aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/removed/o2cb2
-rw-r--r--Documentation/ABI/removed/raw13942
-rw-r--r--Documentation/ABI/testing/evm23
-rw-r--r--Documentation/ABI/testing/sysfs-bus-bcma8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-drivers-ehci_hcd46
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb15
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-adp887016
-rw-r--r--Documentation/ABI/testing/sysfs-class-devfreq52
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-mesh8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff7
-rw-r--r--Documentation/DocBook/80211.tmpl11
-rw-r--r--Documentation/DocBook/uio-howto.tmpl2
-rw-r--r--Documentation/PCI/pci.txt2
-rw-r--r--Documentation/RCU/NMI-RCU.txt2
-rw-r--r--Documentation/RCU/lockdep-splat.txt110
-rw-r--r--Documentation/RCU/lockdep.txt34
-rw-r--r--Documentation/RCU/torture.txt137
-rw-r--r--Documentation/RCU/trace.txt38
-rw-r--r--Documentation/blackfin/bfin-gpio-notes.txt2
-rw-r--r--Documentation/block/biodoc.txt2
-rw-r--r--Documentation/bus-virt-phys-mapping.txt2
-rw-r--r--Documentation/cdrom/packet-writing.txt2
-rw-r--r--Documentation/cpu-freq/governors.txt2
-rw-r--r--Documentation/development-process/4.Coding2
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt44
-rw-r--r--Documentation/devicetree/bindings/gpio/led.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt63
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt38
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.txt31
-rw-r--r--Documentation/devicetree/bindings/tty/serial/atmel-usart.txt27
-rw-r--r--Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt25
-rw-r--r--Documentation/driver-model/binding.txt4
-rw-r--r--Documentation/driver-model/device.txt65
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/filesystems/9p.txt2
-rw-r--r--Documentation/filesystems/caching/object.txt6
-rw-r--r--Documentation/filesystems/locks.txt11
-rw-r--r--Documentation/filesystems/nfs/idmapper.txt2
-rw-r--r--Documentation/filesystems/pohmelfs/design_notes.txt5
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/filesystems/sysfs.txt10
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/frv/booting.txt6
-rw-r--r--Documentation/hwmon/ad731425
-rw-r--r--Documentation/hwmon/adm127540
-rw-r--r--Documentation/hwmon/exynos4_tmu81
-rw-r--r--Documentation/hwmon/lm7561
-rw-r--r--Documentation/hwmon/ltc2978103
-rw-r--r--Documentation/hwmon/pmbus13
-rw-r--r--Documentation/hwmon/pmbus-core283
-rw-r--r--Documentation/hwmon/zl6100125
-rw-r--r--Documentation/input/input.txt2
-rw-r--r--Documentation/kernel-docs.txt4
-rw-r--r--Documentation/kernel-parameters.txt35
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt4
-rw-r--r--Documentation/media-framework.txt4
-rw-r--r--Documentation/memory-barriers.txt2
-rw-r--r--Documentation/networking/batman-adv.txt8
-rw-r--r--Documentation/networking/ip-sysctl.txt17
-rw-r--r--Documentation/networking/mac80211-injection.txt4
-rw-r--r--Documentation/networking/netdevices.txt4
-rw-r--r--Documentation/networking/scaling.txt12
-rw-r--r--Documentation/networking/stmmac.txt44
-rw-r--r--Documentation/pinctrl.txt950
-rw-r--r--Documentation/power/00-INDEX2
-rw-r--r--Documentation/power/basic-pm-debugging.txt26
-rw-r--r--Documentation/power/devices.txt8
-rw-r--r--Documentation/power/pm_qos_interface.txt92
-rw-r--r--Documentation/power/runtime_pm.txt21
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.txt275
-rw-r--r--Documentation/power/userland-swsusp.txt3
-rw-r--r--Documentation/rfkill.txt3
-rw-r--r--Documentation/scsi/aic7xxx_old.txt2
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt5
-rw-r--r--Documentation/security/keys-trusted-encrypted.txt3
-rw-r--r--Documentation/serial/serial-rs485.txt8
-rw-r--r--Documentation/sound/oss/PAS163
-rw-r--r--Documentation/spi/pxa2xx4
-rw-r--r--Documentation/stable_kernel_rules.txt14
-rw-r--r--Documentation/timers/highres.txt2
-rw-r--r--Documentation/usb/dma.txt6
-rw-r--r--Documentation/usb/dwc3.txt45
-rw-r--r--Documentation/usb/power-management.txt34
-rw-r--r--Documentation/virtual/lguest/lguest.c2
-rw-r--r--Documentation/vm/numa4
-rw-r--r--Documentation/vm/slub.txt2
-rw-r--r--Documentation/zh_CN/SubmitChecklist109
-rw-r--r--MAINTAINERS290
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/fcntl.h2
-rw-r--r--arch/alpha/kernel/srm_env.c5
-rw-r--r--arch/arm/Kconfig60
-rw-r--r--arch/arm/Kconfig.debug91
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/Makefile9
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/common/gic.c43
-rw-r--r--arch/arm/common/pl330.c2
-rw-r--r--arch/arm/common/vic.c4
-rw-r--r--arch/arm/configs/integrator_defconfig19
-rw-r--r--arch/arm/include/asm/Kbuild17
-rw-r--r--arch/arm/include/asm/auxvec.h4
-rw-r--r--arch/arm/include/asm/bitsperlong.h1
-rw-r--r--arch/arm/include/asm/bug.h55
-rw-r--r--arch/arm/include/asm/cachetype.h5
-rw-r--r--arch/arm/include/asm/cputime.h6
-rw-r--r--arch/arm/include/asm/cputype.h6
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/dma.h6
-rw-r--r--arch/arm/include/asm/ecard.h1
-rw-r--r--arch/arm/include/asm/emergency-restart.h6
-rw-r--r--arch/arm/include/asm/errno.h6
-rw-r--r--arch/arm/include/asm/exception.h19
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h42
-rw-r--r--arch/arm/include/asm/io.h40
-rw-r--r--arch/arm/include/asm/ioctl.h1
-rw-r--r--arch/arm/include/asm/irq_regs.h1
-rw-r--r--arch/arm/include/asm/kdebug.h1
-rw-r--r--arch/arm/include/asm/local.h1
-rw-r--r--arch/arm/include/asm/local64.h1
-rw-r--r--arch/arm/include/asm/localtimer.h6
-rw-r--r--arch/arm/include/asm/mach/arch.h3
-rw-r--r--arch/arm/include/asm/memory.h7
-rw-r--r--arch/arm/include/asm/mmu.h4
-rw-r--r--arch/arm/include/asm/module.h4
-rw-r--r--arch/arm/include/asm/outercache.h7
-rw-r--r--arch/arm/include/asm/page.h42
-rw-r--r--arch/arm/include/asm/percpu.h6
-rw-r--r--arch/arm/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h93
-rw-r--r--arch/arm/include/asm/pgtable-2level-types.h67
-rw-r--r--arch/arm/include/asm/pgtable-2level.h143
-rw-r--r--arch/arm/include/asm/pgtable-hwdef.h77
-rw-r--r--arch/arm/include/asm/pgtable.h141
-rw-r--r--arch/arm/include/asm/poll.h1
-rw-r--r--arch/arm/include/asm/resource.h6
-rw-r--r--arch/arm/include/asm/sections.h1
-rw-r--r--arch/arm/include/asm/siginfo.h6
-rw-r--r--arch/arm/include/asm/sizes.h21
-rw-r--r--arch/arm/include/asm/smp.h11
-rw-r--r--arch/arm/include/asm/system.h11
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/include/asm/topology.h33
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/debug.S4
-rw-r--r--arch/arm/kernel/dma.c2
-rw-r--r--arch/arm/kernel/ecard.c36
-rw-r--r--arch/arm/kernel/entry-armv.S44
-rw-r--r--arch/arm/kernel/head.S135
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/machine_kexec.c35
-rw-r--r--arch/arm/kernel/module.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c37
-rw-r--r--arch/arm/kernel/smp.c61
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/time.c6
-rw-r--r--arch/arm/kernel/topology.c148
-rw-r--r--arch/arm/kernel/traps.c52
-rw-r--r--arch/arm/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm/lib/backtrace.S6
-rw-r--r--arch/arm/lib/div64.S8
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c1
-rw-r--r--arch/arm/mach-at91/Makefile.boot6
-rw-r--r--arch/arm/mach-bcmring/Kconfig2
-rw-r--r--arch/arm/mach-bcmring/Makefile.boot2
-rw-r--r--arch/arm/mach-bcmring/arch.c4
-rw-r--r--arch/arm/mach-bcmring/irq.c1
-rw-r--r--arch/arm/mach-bcmring/timer.c1
-rw-r--r--arch/arm/mach-clps711x/Makefile.boot2
-rw-r--r--arch/arm/mach-clps711x/clep7312.c3
-rw-r--r--arch/arm/mach-clps711x/edb7211-arch.c3
-rw-r--r--arch/arm/mach-clps711x/fortunet.c3
-rw-r--r--arch/arm/mach-clps711x/p720t.c3
-rw-r--r--arch/arm/mach-cns3xxx/Makefile.boot2
-rw-r--r--arch/arm/mach-davinci/Makefile.boot4
-rw-r--r--arch/arm/mach-dove/Makefile.boot2
-rw-r--r--arch/arm/mach-ebsa110/Makefile.boot2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/io.h2
-rw-r--r--arch/arm/mach-ep93xx/Makefile.boot10
-rw-r--r--arch/arm/mach-exynos4/Kconfig1
-rw-r--r--arch/arm/mach-exynos4/Makefile.boot2
-rw-r--r--arch/arm/mach-exynos4/platsmp.c10
-rw-r--r--arch/arm/mach-footbridge/Kconfig4
-rw-r--r--arch/arm/mach-footbridge/Makefile.boot2
-rw-r--r--arch/arm/mach-footbridge/cats-hw.c3
-rw-r--r--arch/arm/mach-footbridge/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-footbridge/include/mach/io.h2
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c17
-rw-r--r--arch/arm/mach-footbridge/netwinder-leds.c10
-rw-r--r--arch/arm/mach-gemini/Makefile.boot4
-rw-r--r--arch/arm/mach-h720x/Makefile.boot2
-rw-r--r--arch/arm/mach-imx/Makefile.boot10
-rw-r--r--arch/arm/mach-integrator/Makefile.boot2
-rw-r--r--arch/arm/mach-integrator/core.c10
-rw-r--r--arch/arm/mach-integrator/include/mach/io.h2
-rw-r--r--arch/arm/mach-integrator/include/mach/platform.h12
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c86
-rw-r--r--arch/arm/mach-integrator/pci_v3.c14
-rw-r--r--arch/arm/mach-iop13xx/Makefile.boot2
-rw-r--r--arch/arm/mach-iop32x/Makefile.boot2
-rw-r--r--arch/arm/mach-iop33x/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp2000/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp23xx/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp4xx/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c25
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h2
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot2
-rw-r--r--arch/arm/mach-ks8695/Makefile.boot2
-rw-r--r--arch/arm/mach-lpc32xx/Makefile.boot2
-rw-r--r--arch/arm/mach-mmp/Makefile.boot2
-rw-r--r--arch/arm/mach-mmp/aspenite.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa168.h7
-rw-r--r--arch/arm/mach-mmp/pxa168.c46
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c2
-rw-r--r--arch/arm/mach-msm/Makefile.boot2
-rw-r--r--arch/arm/mach-msm/board-halibut.c4
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c4
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c22
-rw-r--r--arch/arm/mach-msm/board-msm8960.c22
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c25
-rw-r--r--arch/arm/mach-msm/board-sapphire.c4
-rw-r--r--arch/arm/mach-msm/board-trout.c4
-rw-r--r--arch/arm/mach-msm/clock.c2
-rw-r--r--arch/arm/mach-msm/include/mach/memory.h6
-rw-r--r--arch/arm/mach-msm/platsmp.c6
-rw-r--r--arch/arm/mach-mv78xx0/Makefile.boot2
-rw-r--r--arch/arm/mach-mx5/Makefile.boot6
-rw-r--r--arch/arm/mach-mxs/Makefile.boot2
-rw-r--r--arch/arm/mach-netx/Makefile.boot2
-rw-r--r--arch/arm/mach-nomadik/Makefile.boot2
-rw-r--r--arch/arm/mach-nuc93x/Makefile.boot2
-rw-r--r--arch/arm/mach-nuc93x/time.c2
-rw-r--r--arch/arm/mach-omap1/Makefile.boot2
-rw-r--r--arch/arm/mach-omap1/pm_bus.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/Makefile11
-rw-r--r--arch/arm/mach-omap2/Makefile.boot2
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c3
-rw-r--r--arch/arm/mach-omap2/hsmmc.c12
-rw-r--r--arch/arm/mach-omap2/omap-smp.c10
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c3
-rw-r--r--arch/arm/mach-orion5x/Makefile.boot2
-rw-r--r--arch/arm/mach-orion5x/common.c4
-rw-r--r--arch/arm/mach-orion5x/common.h4
-rw-r--r--arch/arm/mach-pnx4008/Makefile.boot2
-rw-r--r--arch/arm/mach-prima2/Makefile.boot2
-rw-r--r--arch/arm/mach-pxa/Makefile.boot2
-rw-r--r--arch/arm/mach-pxa/cm-x300.c4
-rw-r--r--arch/arm/mach-pxa/corgi.c4
-rw-r--r--arch/arm/mach-pxa/eseries.c3
-rw-r--r--arch/arm/mach-pxa/eseries.h3
-rw-r--r--arch/arm/mach-pxa/irq.c2
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/tosa.c4
-rw-r--r--arch/arm/mach-pxa/xcep.c3
-rw-r--r--arch/arm/mach-realview/Makefile.boot4
-rw-r--r--arch/arm/mach-realview/core.c3
-rw-r--r--arch/arm/mach-realview/core.h4
-rw-r--r--arch/arm/mach-realview/include/mach/board-pb1176.h1
-rw-r--r--arch/arm/mach-realview/platsmp.c10
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c48
-rw-r--r--arch/arm/mach-realview/realview_pbx.c6
-rw-r--r--arch/arm/mach-rpc/Makefile.boot2
-rw-r--r--arch/arm/mach-rpc/include/mach/hardware.h25
-rw-r--r--arch/arm/mach-rpc/include/mach/io.h193
-rw-r--r--arch/arm/mach-rpc/riscpc.c2
-rw-r--r--arch/arm/mach-s3c2410/Makefile.boot4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/io.h2
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c2
-rw-r--r--arch/arm/mach-s3c2412/mach-smdk2413.c3
-rw-r--r--arch/arm/mach-s3c2412/mach-vstms.c5
-rw-r--r--arch/arm/mach-s3c2412/s3c2412.c2
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c2
-rw-r--r--arch/arm/mach-s3c64xx/Makefile.boot2
-rw-r--r--arch/arm/mach-s3c64xx/dev-uart.c60
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/irqs.h30
-rw-r--r--arch/arm/mach-s3c64xx/irq.c25
-rw-r--r--arch/arm/mach-s5p64x0/Makefile.boot2
-rw-r--r--arch/arm/mach-s5pc100/Makefile.boot2
-rw-r--r--arch/arm/mach-s5pv210/Makefile.boot2
-rw-r--r--arch/arm/mach-sa1100/Makefile1
-rw-r--r--arch/arm/mach-sa1100/Makefile.boot5
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/include/mach/io.h6
-rw-r--r--arch/arm/mach-sa1100/include/mach/simpad.h94
-rw-r--r--arch/arm/mach-sa1100/leds-simpad.c100
-rw-r--r--arch/arm/mach-sa1100/leds.c2
-rw-r--r--arch/arm/mach-sa1100/leds.h1
-rw-r--r--arch/arm/mach-sa1100/simpad.c213
-rw-r--r--arch/arm/mach-shark/Makefile.boot2
-rw-r--r--arch/arm/mach-shark/leds.c6
-rw-r--r--arch/arm/mach-shmobile/Makefile.boot2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c6
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c12
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h13
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c52
-rw-r--r--arch/arm/mach-shmobile/platsmp.c6
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c342
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c25
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S221
-rw-r--r--arch/arm/mach-spear3xx/Makefile.boot2
-rw-r--r--arch/arm/mach-spear6xx/Makefile.boot2
-rw-r--r--arch/arm/mach-tcc8k/Makefile.boot2
-rw-r--r--arch/arm/mach-tegra/Makefile.boot2
-rw-r--r--arch/arm/mach-tegra/board-harmony.c4
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c4
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c1
-rw-r--r--arch/arm/mach-tegra/platsmp.c8
-rw-r--r--arch/arm/mach-u300/Kconfig2
-rw-r--r--arch/arm/mach-u300/Makefile2
-rw-r--r--arch/arm/mach-u300/Makefile.boot4
-rw-r--r--arch/arm/mach-u300/core.c84
-rw-r--r--arch/arm/mach-u300/include/mach/syscon.h136
-rw-r--r--arch/arm/mach-u300/mmc.c16
-rw-r--r--arch/arm/mach-u300/padmux.c367
-rw-r--r--arch/arm/mach-u300/padmux.h39
-rw-r--r--arch/arm/mach-u300/spi.c20
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/Makefile.boot2
-rw-r--r--arch/arm/mach-ux500/platsmp.c10
-rw-r--r--arch/arm/mach-versatile/Makefile.boot2
-rw-r--r--arch/arm/mach-vexpress/Makefile.boot2
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c6
-rw-r--r--arch/arm/mach-vexpress/hotplug.c9
-rw-r--r--arch/arm/mach-vexpress/include/mach/io.h2
-rw-r--r--arch/arm/mach-vt8500/Makefile.boot2
-rw-r--r--arch/arm/mach-vt8500/include/mach/io.h2
-rw-r--r--arch/arm/mach-w90x900/Makefile.boot2
-rw-r--r--arch/arm/mach-w90x900/cpu.c2
-rw-r--r--arch/arm/mach-zynq/Makefile.boot2
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-l2x0.c262
-rw-r--r--arch/arm/mm/context.c14
-rw-r--r--arch/arm/mm/copypage-v4mc.c6
-rw-r--r--arch/arm/mm/copypage-v6.c10
-rw-r--r--arch/arm/mm/copypage-xscale.c6
-rw-r--r--arch/arm/mm/dma-mapping.c6
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--arch/arm/mm/init.c7
-rw-r--r--arch/arm/mm/ioremap.c21
-rw-r--r--arch/arm/mm/mm.h4
-rw-r--r--arch/arm/mm/mmu.c18
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/plat-mxc/Kconfig2
-rw-r--r--arch/arm/plat-mxc/devices.c53
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h16
-rw-r--r--arch/arm/plat-omap/Kconfig1
-rw-r--r--arch/arm/plat-s5p/Kconfig1
-rw-r--r--arch/arm/plat-s5p/dev-uart.c84
-rw-r--r--arch/arm/plat-s5p/include/plat/irqs.h35
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c6
-rw-r--r--arch/arm/plat-s5p/irq.c34
-rw-r--r--arch/arm/plat-samsung/Kconfig5
-rw-r--r--arch/arm/plat-samsung/Makefile1
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h5
-rw-r--r--arch/arm/plat-samsung/irq-uart.c96
-rw-r--r--arch/arm/tools/mach-types20
-rw-r--r--arch/arm/vfp/Makefile2
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v10/Kconfig12
-rw-r--r--arch/cris/arch-v10/drivers/Kconfig1
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig1
-rw-r--r--arch/cris/arch-v32/lib/nand_init.S178
-rw-r--r--arch/h8300/Kconfig4
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/dig/Makefile2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c12
-rw-r--r--arch/ia64/hp/sim/simeth.c2
-rw-r--r--arch/ia64/include/asm/device.h2
-rw-r--r--arch/ia64/include/asm/iommu.h6
-rw-r--r--arch/ia64/include/asm/pci.h2
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/msi_ia64.c4
-rw-r--r--arch/ia64/kernel/pci-dma.c2
-rw-r--r--arch/m68k/Kconfig195
-rw-r--r--arch/m68k/Kconfig.bus55
-rw-r--r--arch/m68k/Kconfig.cpu429
-rw-r--r--arch/m68k/Kconfig.devices123
-rw-r--r--arch/m68k/Kconfig.machine583
-rw-r--r--arch/m68k/Kconfig.mmu411
-rw-r--r--arch/m68k/Kconfig.nommu787
-rw-r--r--arch/m68k/Makefile168
-rw-r--r--arch/m68k/Makefile_mm121
-rw-r--r--arch/m68k/Makefile_no124
-rw-r--r--arch/m68k/include/asm/entry.h255
-rw-r--r--arch/m68k/include/asm/entry_mm.h128
-rw-r--r--arch/m68k/include/asm/entry_no.h181
-rw-r--r--arch/m68k/include/asm/m520xsim.h26
-rw-r--r--arch/m68k/include/asm/mcfqspi.h8
-rw-r--r--arch/m68k/include/asm/page_no.h3
-rw-r--r--arch/m68k/include/asm/processor.h6
-rw-r--r--arch/m68k/include/asm/sections.h2
-rw-r--r--arch/m68k/kernel/Makefile24
-rw-r--r--arch/m68k/kernel/Makefile_mm17
-rw-r--r--arch/m68k/kernel/Makefile_no10
-rw-r--r--arch/m68k/kernel/entry_no.S6
-rw-r--r--arch/m68k/kernel/setup_no.c4
-rw-r--r--arch/m68k/kernel/traps.c1108
-rw-r--r--arch/m68k/kernel/traps_mm.c1207
-rw-r--r--arch/m68k/kernel/traps_no.c361
-rw-r--r--arch/m68k/kernel/vectors.c145
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S1
-rw-r--r--arch/m68k/lib/memcpy.c9
-rw-r--r--arch/m68k/mac/macints.c2
-rw-r--r--arch/m68k/mac/misc.c40
-rw-r--r--arch/m68k/mm/init_no.c21
-rw-r--r--arch/m68k/platform/520x/config.c6
-rw-r--r--arch/m68k/platform/520x/gpio.c50
-rw-r--r--arch/m68k/platform/68328/Makefile5
-rw-r--r--arch/m68k/platform/68328/entry.S18
-rw-r--r--arch/m68k/platform/68360/Makefile6
-rw-r--r--arch/m68k/platform/68360/entry.S4
-rw-r--r--arch/m68k/platform/coldfire/entry.S6
-rw-r--r--arch/m68k/q40/README2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/microblaze/mm/init.c6
-rw-r--r--arch/mips/Kconfig14
-rw-r--r--arch/mips/alchemy/common/platform.c2
-rw-r--r--arch/mips/alchemy/common/power.c22
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c4
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c7
-rw-r--r--arch/mips/ar7/irq.c3
-rw-r--r--arch/mips/bcm47xx/Kconfig31
-rw-r--r--arch/mips/bcm47xx/Makefile3
-rw-r--r--arch/mips/bcm47xx/gpio.c82
-rw-r--r--arch/mips/bcm47xx/irq.c12
-rw-r--r--arch/mips/bcm47xx/nvram.c29
-rw-r--r--arch/mips/bcm47xx/serial.c46
-rw-r--r--arch/mips/bcm47xx/setup.c90
-rw-r--r--arch/mips/bcm47xx/time.c16
-rw-r--r--arch/mips/bcm47xx/wgt634u.c14
-rw-r--r--arch/mips/bcm63xx/irq.c1
-rw-r--r--arch/mips/cobalt/irq.c1
-rw-r--r--arch/mips/dec/setup.c4
-rw-r--r--arch/mips/emma/markeins/irq.c2
-rw-r--r--arch/mips/include/asm/lasat/lasat.h6
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx.h26
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/gpio.h108
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h1
-rw-r--r--arch/mips/include/asm/stackframe.h4
-rw-r--r--arch/mips/jz4740/gpio.c52
-rw-r--r--arch/mips/kernel/ftrace.c39
-rw-r--r--arch/mips/kernel/i8259.c3
-rw-r--r--arch/mips/kernel/linux32.c7
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/signal.c3
-rw-r--r--arch/mips/kernel/traps.c16
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/lantiq/irq.c6
-rw-r--r--arch/mips/lantiq/xway/ebu.c1
-rw-r--r--arch/mips/lantiq/xway/pmu.c1
-rw-r--r--arch/mips/lasat/interrupt.c1
-rw-r--r--arch/mips/loongson/fuloong-2e/irq.c1
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c3
-rw-r--r--arch/mips/mm/mmap.c48
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/mti-malta/malta-int.c6
-rw-r--r--arch/mips/netlogic/xlr/Makefile2
-rw-r--r--arch/mips/pci/pci-bcm47xx.c6
-rw-r--r--arch/mips/pci/pci-lantiq.c9
-rw-r--r--arch/mips/pci/pci-rc32434.c2
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq.c6
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_serial.c73
-rw-r--r--arch/mips/pnx8550/common/int.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c10
-rw-r--r--arch/mips/sni/rm200.c1
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c2
-rw-r--r--arch/mips/vr41xx/common/irq.c1
-rw-r--r--arch/mn10300/Kconfig2
-rw-r--r--arch/mn10300/kernel/irq.c1
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts10
-rw-r--r--arch/powerpc/boot/dts/p1010si.dtsi10
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig11
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig5
-rw-r--r--arch/powerpc/configs/40x/hcu4_defconfig5
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig9
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig9
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig5
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig9
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig5
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig9
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig9
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig5
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig9
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig5
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig5
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig11
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig5
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig5
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig5
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig5
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig5
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig5
-rw-r--r--arch/powerpc/include/asm/qe.h2
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/platforms/40x/Kconfig12
-rw-r--r--arch/powerpc/platforms/44x/Kconfig54
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig8
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/powerpc/sysdev/uic.c24
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/hypfs/hypfs_diag.c3
-rw-r--r--arch/s390/include/asm/qdio.h78
-rw-r--r--arch/s390/kernel/suspend.c118
-rw-r--r--arch/s390/kernel/swsusp_asm64.S3
-rw-r--r--arch/sh/include/asm/sh_eth.h25
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h2
-rw-r--r--arch/sparc/kernel/pci.c3
-rw-r--r--arch/sparc/kernel/signal32.c21
-rw-r--r--arch/sparc/kernel/signal_32.c32
-rw-r--r--arch/sparc/kernel/signal_64.c32
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/tile/kernel/intvec_32.S2
-rw-r--r--arch/tile/lib/atomic_asm_32.S2
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/unicore32/include/asm/io.h2
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/configs/x86_64_defconfig4
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/include/asm/hw_irq.h2
-rw-r--r--arch/x86/include/asm/hyperv.h1
-rw-r--r--arch/x86/include/asm/irq_remapping.h6
-rw-r--r--arch/x86/include/asm/xen/page.h6
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/apic/apic.c33
-rw-r--r--arch/x86/kernel/apic/io_apic.c284
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c10
-rw-r--r--arch/x86/kernel/kprobes.c7
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/mmio-mod.c1
-rw-r--r--arch/x86/oprofile/nmi_int.c4
-rw-r--r--arch/x86/pci/acpi.c11
-rw-r--r--arch/x86/pci/xen.c32
-rw-r--r--arch/x86/platform/mrst/mrst.c25
-rw-r--r--arch/x86/xen/Kconfig11
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/mmu.c52
-rw-r--r--arch/x86/xen/p2m.c128
-rw-r--r--arch/x86/xen/setup.c284
-rw-r--r--arch/xtensa/platforms/iss/network.c2
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/processor_idle.c12
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/atm/eni.c5
-rw-r--r--drivers/atm/idt77252.c11
-rw-r--r--drivers/atm/iphase.c265
-rw-r--r--drivers/atm/iphase.h395
-rw-r--r--drivers/atm/lanai.c3
-rw-r--r--drivers/base/class.c17
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/memory.c62
-rw-r--r--drivers/base/platform.c54
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/clock_ops.c127
-rw-r--r--drivers/base/power/common.c86
-rw-r--r--drivers/base/power/domain.c352
-rw-r--r--drivers/base/power/main.c42
-rw-r--r--drivers/base/power/opp.c30
-rw-r--r--drivers/base/power/power.h10
-rw-r--r--drivers/base/power/qos.c419
-rw-r--r--drivers/base/power/runtime.c127
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/base/regmap/Makefile3
-rw-r--r--drivers/base/regmap/internal.h128
-rw-r--r--drivers/base/regmap/regcache-indexed.c64
-rw-r--r--drivers/base/regmap/regcache-lzo.c361
-rw-r--r--drivers/base/regmap/regcache-rbtree.c345
-rw-r--r--drivers/base/regmap/regcache.c401
-rw-r--r--drivers/base/regmap/regmap-debugfs.c209
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap.c221
-rw-r--r--drivers/bcma/Kconfig13
-rw-r--r--drivers/bcma/Makefile2
-rw-r--r--drivers/bcma/bcma_private.h16
-rw-r--r--drivers/bcma/core.c2
-rw-r--r--drivers/bcma/driver_chipcommon.c53
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c191
-rw-r--r--drivers/bcma/driver_mips.c256
-rw-r--r--drivers/bcma/driver_pci.c16
-rw-r--r--drivers/bcma/host_soc.c183
-rw-r--r--drivers/bcma/main.c70
-rw-r--r--drivers/bcma/scan.c348
-rw-r--r--drivers/bcma/sprom.c15
-rw-r--r--drivers/block/drbd/drbd_bitmap.c5
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkback/common.h1
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/char/agp/backend.c3
-rw-r--r--drivers/char/agp/intel-gtt.c4
-rw-r--r--drivers/char/apm-emulation.c21
-rw-r--r--drivers/char/raw.c3
-rw-r--r--drivers/char/rtc.c7
-rw-r--r--drivers/char/scc.h613
-rw-r--r--drivers/char/tpm/tpm.c3
-rw-r--r--drivers/connector/cn_proc.c26
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c2
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dca/dca-core.c78
-rw-r--r--drivers/devfreq/Kconfig75
-rw-r--r--drivers/devfreq/Makefile5
-rw-r--r--drivers/devfreq/devfreq.c601
-rw-r--r--drivers/devfreq/governor.h24
-rw-r--r--drivers/devfreq/governor_performance.c29
-rw-r--r--drivers/devfreq/governor_powersave.c29
-rw-r--r--drivers/devfreq/governor_simpleondemand.c88
-rw-r--r--drivers/devfreq/governor_userspace.c116
-rw-r--r--drivers/dma/ipu/ipu_irq.c48
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c1
-rw-r--r--drivers/gpu/drm/radeon/atom.c15
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/hid/Kconfig42
-rw-r--r--drivers/hid/Makefile4
-rw-r--r--drivers/hid/hid-apple.c15
-rw-r--r--drivers/hid/hid-axff.c36
-rw-r--r--drivers/hid/hid-core.c79
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h21
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lg.c29
-rw-r--r--drivers/hid/hid-lg.h4
-rw-r--r--drivers/hid/hid-lg4ff.c403
-rw-r--r--drivers/hid/hid-lgff.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c922
-rw-r--r--drivers/hid/hid-logitech-dj.h123
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-multitouch.c93
-rw-r--r--drivers/hid/hid-picolcd.c2
-rw-r--r--drivers/hid/hid-primax.c117
-rw-r--r--drivers/hid/hid-prodikeys.c8
-rw-r--r--drivers/hid/hid-roccat-kone.c63
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c17
-rw-r--r--drivers/hid/hid-roccat-pyra.c23
-rw-r--r--drivers/hid/hid-sjoy.c75
-rw-r--r--drivers/hid/hid-wacom.c81
-rw-r--r--drivers/hid/hid-wiimote.c800
-rw-r--r--drivers/hid/hid-zydacron.c4
-rw-r--r--drivers/hid/hidraw.c11
-rw-r--r--drivers/hid/usbhid/hid-core.c9
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hv/Kconfig14
-rw-r--r--drivers/hv/Makefile7
-rw-r--r--drivers/hv/channel.c815
-rw-r--r--drivers/hv/channel_mgmt.c (renamed from drivers/staging/hv/channel_mgmt.c)190
-rw-r--r--drivers/hv/connection.c (renamed from drivers/staging/hv/connection.c)58
-rw-r--r--drivers/hv/hv.c (renamed from drivers/staging/hv/hv.c)25
-rw-r--r--drivers/hv/hv_kvp.c (renamed from drivers/staging/hv/hv_kvp.c)49
-rw-r--r--drivers/hv/hv_kvp.h (renamed from drivers/staging/hv/hv_kvp.h)2
-rw-r--r--drivers/hv/hv_util.c354
-rw-r--r--drivers/hv/hyperv_vmbus.h (renamed from drivers/staging/hv/hyperv_vmbus.h)17
-rw-r--r--drivers/hv/ring_buffer.c (renamed from drivers/staging/hv/ring_buffer.c)42
-rw-r--r--drivers/hv/vmbus_drv.c786
-rw-r--r--drivers/hwmon/Kconfig21
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/ad7314.c186
-rw-r--r--drivers/hwmon/exynos4_tmu.c524
-rw-r--r--drivers/hwmon/f71882fg.c231
-rw-r--r--drivers/hwmon/lm75.c39
-rw-r--r--drivers/hwmon/pmbus/Kconfig28
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c159
-rw-r--r--drivers/hwmon/pmbus/lm25066.c17
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c408
-rw-r--r--drivers/hwmon/pmbus/max16064.c3
-rw-r--r--drivers/hwmon/pmbus/max34440.c13
-rw-r--r--drivers/hwmon/pmbus/max8688.c9
-rw-r--r--drivers/hwmon/pmbus/pmbus.c10
-rw-r--r--drivers/hwmon/pmbus/pmbus.h23
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c85
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c13
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c5
-rw-r--r--drivers/hwmon/pmbus/zl6100.c256
-rw-r--r--drivers/hwmon/w83627ehf.c252
-rw-r--r--drivers/i2c/busses/i2c-designware.c2
-rw-r--r--drivers/ide/Kconfig26
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c11
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig3
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/input/misc/rotary_encoder.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c7
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/dmar.c246
-rw-r--r--drivers/iommu/intel-iommu.c278
-rw-r--r--drivers/iommu/intr_remapping.c93
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/isdn/i4l/isdn_tty.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c3
-rw-r--r--drivers/isdn/mISDN/l1oip_codec.c6
-rw-r--r--drivers/leds/Kconfig2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c4
-rw-r--r--drivers/md/dm-kcopyd.c1
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c2
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-remote.c2
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c2
-rw-r--r--drivers/media/dvb/frontends/dib3000.h2
-rw-r--r--drivers/media/dvb/frontends/dib3000mb.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/video/v4l2-dev.c2
-rw-r--r--drivers/media/video/via-camera.c4
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/wm831x-core.c437
-rw-r--r--drivers/mfd/wm831x-i2c.c76
-rw-r--r--drivers/mfd/wm831x-spi.c204
-rw-r--r--drivers/mfd/wm8400-core.c106
-rw-r--r--drivers/mfd/wm8994-core.c178
-rw-r--r--drivers/misc/pch_phub.c10
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/net/Kconfig3390
-rw-r--r--drivers/net/Makefile326
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arcnet/Kconfig14
-rw-r--r--drivers/net/arcnet/Makefile1
-rw-r--r--drivers/net/arcnet/com20020.c2
-rw-r--r--drivers/net/arcnet/com20020_cs.c (renamed from drivers/net/pcmcia/com20020_cs.c)0
-rw-r--r--drivers/net/arm/Kconfig74
-rw-r--r--drivers/net/arm/Makefile14
-rw-r--r--drivers/net/benet/Kconfig6
-rw-r--r--drivers/net/bmac.c1685
-rw-r--r--drivers/net/bna/Makefile11
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h222
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c516
-rw-r--r--drivers/net/bna/bfi.h400
-rw-r--r--drivers/net/bna/bfi_ctreg.h646
-rw-r--r--drivers/net/bna/bfi_ll.h438
-rw-r--r--drivers/net/bna/bna.h548
-rw-r--r--drivers/net/bna/bna_ctrl.c3076
-rw-r--r--drivers/net/bna/bna_hw.h1490
-rw-r--r--drivers/net/bna/bna_txrx.c4185
-rw-r--r--drivers/net/bna/bna_types.h1127
-rw-r--r--drivers/net/bna/bnad.h341
-rw-r--r--drivers/net/bna/bnad_ethtool.c1214
-rw-r--r--drivers/net/bna/cna.h80
-rw-r--r--drivers/net/bna/cna_fwimg.c64
-rw-r--r--drivers/net/bonding/bond_3ad.c7
-rw-r--r--drivers/net/bonding/bond_main.c83
-rw-r--r--drivers/net/bonding/bond_sysfs.c46
-rw-r--r--drivers/net/bonding/bonding.h8
-rw-r--r--drivers/net/caif/caif_hsi.c436
-rw-r--r--drivers/net/caif/caif_spi.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/dev.c14
-rw-r--r--drivers/net/can/flexcan.c189
-rw-r--r--drivers/net/can/mscan/mscan.c37
-rw-r--r--drivers/net/can/sja1000/Kconfig14
-rw-r--r--drivers/net/can/sja1000/Makefile2
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c331
-rw-r--r--drivers/net/can/sja1000/peak_pci.c291
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c2
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/chelsio/sge.c2140
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/cxgb3/sge.c3303
-rw-r--r--drivers/net/cxgb4/sge.c2442
-rw-r--r--drivers/net/cxgb4vf/sge.c2465
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/e1000/e1000.h361
-rw-r--r--drivers/net/e1000e/Makefile37
-rw-r--r--drivers/net/e1000e/e1000.h741
-rw-r--r--drivers/net/e1000e/es2lan.c1516
-rw-r--r--drivers/net/e1000e/ethtool.c2082
-rw-r--r--drivers/net/e1000e/netdev.c6387
-rw-r--r--drivers/net/enic/enic_pp.c264
-rw-r--r--drivers/net/enic/enic_pp.h27
-rw-r--r--drivers/net/ethernet/3com/3c501.c (renamed from drivers/net/3c501.c)2
-rw-r--r--drivers/net/ethernet/3com/3c501.h (renamed from drivers/net/3c501.h)0
-rw-r--r--drivers/net/ethernet/3com/3c509.c (renamed from drivers/net/3c509.c)2
-rw-r--r--drivers/net/ethernet/3com/3c515.c (renamed from drivers/net/3c515.c)2
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c (renamed from drivers/net/pcmcia/3c574_cs.c)2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c (renamed from drivers/net/pcmcia/3c589_cs.c)2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c (renamed from drivers/net/3c59x.c)15
-rw-r--r--drivers/net/ethernet/3com/Kconfig122
-rw-r--r--drivers/net/ethernet/3com/Makefile11
-rw-r--r--drivers/net/ethernet/3com/typhoon.c (renamed from drivers/net/typhoon.c)15
-rw-r--r--drivers/net/ethernet/3com/typhoon.h (renamed from drivers/net/typhoon.h)0
-rw-r--r--drivers/net/ethernet/8390/3c503.c (renamed from drivers/net/3c503.c)2
-rw-r--r--drivers/net/ethernet/8390/3c503.h (renamed from drivers/net/3c503.h)0
-rw-r--r--drivers/net/ethernet/8390/8390.c (renamed from drivers/net/8390.c)2
-rw-r--r--drivers/net/ethernet/8390/8390.h (renamed from drivers/net/8390.h)0
-rw-r--r--drivers/net/ethernet/8390/8390p.c (renamed from drivers/net/8390p.c)2
-rw-r--r--drivers/net/ethernet/8390/Kconfig337
-rw-r--r--drivers/net/ethernet/8390/Makefile29
-rw-r--r--drivers/net/ethernet/8390/ac3200.c (renamed from drivers/net/ac3200.c)2
-rw-r--r--drivers/net/ethernet/8390/apne.c (renamed from drivers/net/apne.c)0
-rw-r--r--drivers/net/ethernet/8390/ax88796.c (renamed from drivers/net/ax88796.c)2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c (renamed from drivers/net/pcmcia/axnet_cs.c)4
-rw-r--r--drivers/net/ethernet/8390/e2100.c (renamed from drivers/net/e2100.c)2
-rw-r--r--drivers/net/ethernet/8390/es3210.c (renamed from drivers/net/es3210.c)0
-rw-r--r--drivers/net/ethernet/8390/etherh.c (renamed from drivers/net/arm/etherh.c)4
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c (renamed from drivers/net/hp-plus.c)2
-rw-r--r--drivers/net/ethernet/8390/hp.c (renamed from drivers/net/hp.c)0
-rw-r--r--drivers/net/ethernet/8390/hydra.c (renamed from drivers/net/hydra.c)2
-rw-r--r--drivers/net/ethernet/8390/lib8390.c (renamed from drivers/net/lib8390.c)0
-rw-r--r--drivers/net/ethernet/8390/lne390.c (renamed from drivers/net/lne390.c)0
-rw-r--r--drivers/net/ethernet/8390/mac8390.c (renamed from drivers/net/mac8390.c)2
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c (renamed from drivers/net/ne-h8300.c)2
-rw-r--r--drivers/net/ethernet/8390/ne.c (renamed from drivers/net/ne.c)0
-rw-r--r--drivers/net/ethernet/8390/ne2.c (renamed from drivers/net/ne2.c)0
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c (renamed from drivers/net/ne2k-pci.c)2
-rw-r--r--drivers/net/ethernet/8390/ne3210.c (renamed from drivers/net/ne3210.c)0
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c (renamed from drivers/net/pcmcia/pcnet_cs.c)4
-rw-r--r--drivers/net/ethernet/8390/smc-mca.c (renamed from drivers/net/smc-mca.c)2
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c (renamed from drivers/net/smc-ultra.c)2
-rw-r--r--drivers/net/ethernet/8390/smc-ultra32.c (renamed from drivers/net/smc-ultra32.c)2
-rw-r--r--drivers/net/ethernet/8390/stnic.c (renamed from drivers/net/stnic.c)0
-rw-r--r--drivers/net/ethernet/8390/wd.c (renamed from drivers/net/wd.c)2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c (renamed from drivers/net/zorro8390.c)2
-rw-r--r--drivers/net/ethernet/Kconfig177
-rw-r--r--drivers/net/ethernet/Makefile74
-rw-r--r--drivers/net/ethernet/adaptec/Kconfig36
-rw-r--r--drivers/net/ethernet/adaptec/Makefile5
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c (renamed from drivers/net/starfire.c)13
-rw-r--r--drivers/net/ethernet/adi/Kconfig69
-rw-r--r--drivers/net/ethernet/adi/Makefile5
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c (renamed from drivers/net/bfin_mac.c)2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h (renamed from drivers/net/bfin_mac.h)0
-rw-r--r--drivers/net/ethernet/aeroflex/Kconfig11
-rw-r--r--drivers/net/ethernet/aeroflex/Makefile5
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c (renamed from drivers/net/greth.c)20
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h (renamed from drivers/net/greth.h)0
-rw-r--r--drivers/net/ethernet/alteon/Kconfig48
-rw-r--r--drivers/net/ethernet/alteon/Makefile5
-rw-r--r--drivers/net/ethernet/alteon/acenic.c (renamed from drivers/net/acenic.c)16
-rw-r--r--drivers/net/ethernet/alteon/acenic.h (renamed from drivers/net/acenic.h)0
-rw-r--r--drivers/net/ethernet/amd/7990.c (renamed from drivers/net/7990.c)0
-rw-r--r--drivers/net/ethernet/amd/7990.h (renamed from drivers/net/7990.h)0
-rw-r--r--drivers/net/ethernet/amd/Kconfig195
-rw-r--r--drivers/net/ethernet/amd/Makefile20
-rw-r--r--drivers/net/ethernet/amd/a2065.c (renamed from drivers/net/a2065.c)2
-rw-r--r--drivers/net/ethernet/amd/a2065.h (renamed from drivers/net/a2065.h)0
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c (renamed from drivers/net/arm/am79c961a.c)2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h (renamed from drivers/net/arm/am79c961a.h)0
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c (renamed from drivers/net/amd8111e.c)2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h (renamed from drivers/net/amd8111e.h)0
-rw-r--r--drivers/net/ethernet/amd/ariadne.c (renamed from drivers/net/ariadne.c)2
-rw-r--r--drivers/net/ethernet/amd/ariadne.h (renamed from drivers/net/ariadne.h)0
-rw-r--r--drivers/net/ethernet/amd/atarilance.c (renamed from drivers/net/atarilance.c)2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c (renamed from drivers/net/au1000_eth.c)2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h (renamed from drivers/net/au1000_eth.h)0
-rw-r--r--drivers/net/ethernet/amd/declance.c (renamed from drivers/net/declance.c)2
-rw-r--r--drivers/net/ethernet/amd/depca.c (renamed from drivers/net/depca.c)2
-rw-r--r--drivers/net/ethernet/amd/depca.h (renamed from drivers/net/depca.h)2
-rw-r--r--drivers/net/ethernet/amd/hplance.c (renamed from drivers/net/hplance.c)2
-rw-r--r--drivers/net/ethernet/amd/hplance.h (renamed from drivers/net/hplance.h)0
-rw-r--r--drivers/net/ethernet/amd/lance.c (renamed from drivers/net/lance.c)2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c (renamed from drivers/net/mvme147.c)2
-rw-r--r--drivers/net/ethernet/amd/ni65.c (renamed from drivers/net/ni65.c)2
-rw-r--r--drivers/net/ethernet/amd/ni65.h (renamed from drivers/net/ni65.h)0
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c (renamed from drivers/net/pcmcia/nmclan_cs.c)2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c (renamed from drivers/net/pcnet32.c)204
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c (renamed from drivers/net/sun3lance.c)2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c (renamed from drivers/net/sunlance.c)2
-rw-r--r--drivers/net/ethernet/apple/Kconfig77
-rw-r--r--drivers/net/ethernet/apple/Makefile8
-rw-r--r--drivers/net/ethernet/apple/bmac.c1685
-rw-r--r--drivers/net/ethernet/apple/bmac.h (renamed from drivers/net/bmac.h)0
-rw-r--r--drivers/net/ethernet/apple/mac89x0.c (renamed from drivers/net/mac89x0.c)2
-rw-r--r--drivers/net/ethernet/apple/mace.c (renamed from drivers/net/mace.c)2
-rw-r--r--drivers/net/ethernet/apple/mace.h (renamed from drivers/net/mace.h)0
-rw-r--r--drivers/net/ethernet/apple/macmace.c (renamed from drivers/net/macmace.c)11
-rw-r--r--drivers/net/ethernet/atheros/Kconfig70
-rw-r--r--drivers/net/ethernet/atheros/Makefile8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/Makefile (renamed from drivers/net/atl1c/Makefile)0
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h (renamed from drivers/net/atl1c/atl1c.h)0
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c (renamed from drivers/net/atl1c/atl1c_ethtool.c)0
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c (renamed from drivers/net/atl1c/atl1c_hw.c)0
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h (renamed from drivers/net/atl1c/atl1c_hw.h)0
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c (renamed from drivers/net/atl1c/atl1c_main.c)13
-rw-r--r--drivers/net/ethernet/atheros/atl1e/Makefile (renamed from drivers/net/atl1e/Makefile)0
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h (renamed from drivers/net/atl1e/atl1e.h)0
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c (renamed from drivers/net/atl1e/atl1e_ethtool.c)0
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.c (renamed from drivers/net/atl1e/atl1e_hw.c)0
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.h (renamed from drivers/net/atl1e/atl1e_hw.h)0
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c (renamed from drivers/net/atl1e/atl1e_main.c)19
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c (renamed from drivers/net/atl1e/atl1e_param.c)0
-rw-r--r--drivers/net/ethernet/atheros/atlx/Makefile (renamed from drivers/net/atlx/Makefile)0
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c (renamed from drivers/net/atlx/atl1.c)27
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h (renamed from drivers/net/atlx/atl1.h)0
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c (renamed from drivers/net/atlx/atl2.c)2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h (renamed from drivers/net/atlx/atl2.h)0
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c (renamed from drivers/net/atlx/atlx.c)0
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.h (renamed from drivers/net/atlx/atlx.h)0
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig122
-rw-r--r--drivers/net/ethernet/broadcom/Makefile11
-rw-r--r--drivers/net/ethernet/broadcom/b44.c (renamed from drivers/net/b44.c)2
-rw-r--r--drivers/net/ethernet/broadcom/b44.h (renamed from drivers/net/b44.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c (renamed from drivers/net/bcm63xx_enet.c)4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h (renamed from drivers/net/bcm63xx_enet.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c (renamed from drivers/net/bnx2.c)94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h (renamed from drivers/net/bnx2.h)12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2_fw.h (renamed from drivers/net/bnx2_fw.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile (renamed from drivers/net/bnx2x/Makefile)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h (renamed from drivers/net/bnx2x/bnx2x.h)85
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c (renamed from drivers/net/bnx2x/bnx2x_cmn.c)65
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h (renamed from drivers/net/bnx2x/bnx2x_cmn.h)12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c (renamed from drivers/net/bnx2x/bnx2x_dcb.c)5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h (renamed from drivers/net/bnx2x/bnx2x_dcb.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h (renamed from drivers/net/bnx2x/bnx2x_dump.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c (renamed from drivers/net/bnx2x/bnx2x_ethtool.c)30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h (renamed from drivers/net/bnx2x/bnx2x_fw_defs.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h (renamed from drivers/net/bnx2x/bnx2x_fw_file_hdr.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h (renamed from drivers/net/bnx2x/bnx2x_hsi.h)2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h (renamed from drivers/net/bnx2x/bnx2x_init.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h (renamed from drivers/net/bnx2x/bnx2x_init_ops.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c (renamed from drivers/net/bnx2x/bnx2x_link.c)150
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h (renamed from drivers/net/bnx2x/bnx2x_link.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c (renamed from drivers/net/bnx2x/bnx2x_main.c)81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h (renamed from drivers/net/bnx2x/bnx2x_reg.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c (renamed from drivers/net/bnx2x/bnx2x_sp.c)67
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h (renamed from drivers/net/bnx2x/bnx2x_sp.h)0
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c (renamed from drivers/net/bnx2x/bnx2x_stats.c)46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h (renamed from drivers/net/bnx2x/bnx2x_stats.h)0
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c (renamed from drivers/net/cnic.c)35
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h (renamed from drivers/net/cnic.h)4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h (renamed from drivers/net/cnic_defs.h)1
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h (renamed from drivers/net/cnic_if.h)0
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c (renamed from drivers/net/sb1250-mac.c)2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c (renamed from drivers/net/tg3.c)1446
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h (renamed from drivers/net/tg3.h)4
-rw-r--r--drivers/net/ethernet/brocade/Kconfig23
-rw-r--r--drivers/net/ethernet/brocade/Makefile5
-rw-r--r--drivers/net/ethernet/brocade/bna/Kconfig17
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile12
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c (renamed from drivers/net/bna/bfa_cee.c)5
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h (renamed from drivers/net/bna/bfa_cee.h)0
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h (renamed from drivers/net/bna/bfa_cs.h)0
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h (renamed from drivers/net/bna/bfa_defs.h)64
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h (renamed from drivers/net/bna/bfa_defs_cna.h)8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h171
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h (renamed from drivers/net/bna/bfa_defs_status.h)0
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c (renamed from drivers/net/bna/bfa_ioc.c)459
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h (renamed from drivers/net/bna/bfa_ioc.h)68
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c878
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c669
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.h130
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h481
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h (renamed from drivers/net/bna/bfi_cna.h)0
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h901
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h452
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h575
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c2144
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h422
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c3798
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h987
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c (renamed from drivers/net/bna/bnad.c)1178
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h380
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c958
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c92
-rw-r--r--drivers/net/ethernet/cadence/Kconfig45
-rw-r--r--drivers/net/ethernet/cadence/Makefile6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c (renamed from drivers/net/arm/at91_ether.c)2
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h (renamed from drivers/net/arm/at91_ether.h)0
-rw-r--r--drivers/net/ethernet/cadence/macb.c (renamed from drivers/net/macb.c)2
-rw-r--r--drivers/net/ethernet/cadence/macb.h (renamed from drivers/net/macb.h)0
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig107
-rw-r--r--drivers/net/ethernet/chelsio/Makefile8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/Makefile (renamed from drivers/net/chelsio/Makefile)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h (renamed from drivers/net/chelsio/common.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h (renamed from drivers/net/chelsio/cphy.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h (renamed from drivers/net/chelsio/cpl5_cmd.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c (renamed from drivers/net/chelsio/cxgb2.c)4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h (renamed from drivers/net/chelsio/elmer0.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c (renamed from drivers/net/chelsio/espi.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h (renamed from drivers/net/chelsio/espi.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/fpga_defs.h (renamed from drivers/net/chelsio/fpga_defs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h (renamed from drivers/net/chelsio/gmac.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c (renamed from drivers/net/chelsio/mv88e1xxx.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h (renamed from drivers/net/chelsio/mv88e1xxx.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c (renamed from drivers/net/chelsio/mv88x201x.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/my3126.c (renamed from drivers/net/chelsio/my3126.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c (renamed from drivers/net/chelsio/pm3393.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h (renamed from drivers/net/chelsio/regs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2139
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h (renamed from drivers/net/chelsio/sge.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c (renamed from drivers/net/chelsio/subr.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h (renamed from drivers/net/chelsio/suni1x10gexp_regs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.c (renamed from drivers/net/chelsio/tp.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h (renamed from drivers/net/chelsio/tp.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c (renamed from drivers/net/chelsio/vsc7326.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h (renamed from drivers/net/chelsio/vsc7326_reg.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/Makefile (renamed from drivers/net/cxgb3/Makefile)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h (renamed from drivers/net/cxgb3/adapter.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c (renamed from drivers/net/cxgb3/ael1002.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/aq100x.c (renamed from drivers/net/cxgb3/aq100x.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h (renamed from drivers/net/cxgb3/common.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h (renamed from drivers/net/cxgb3/cxgb3_ctl_defs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h (renamed from drivers/net/cxgb3/cxgb3_defs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h (renamed from drivers/net/cxgb3/cxgb3_ioctl.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c (renamed from drivers/net/cxgb3/cxgb3_main.c)3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c (renamed from drivers/net/cxgb3/cxgb3_offload.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h (renamed from drivers/net/cxgb3/cxgb3_offload.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h (renamed from drivers/net/cxgb3/firmware_exports.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c (renamed from drivers/net/cxgb3/l2t.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h (renamed from drivers/net/cxgb3/l2t.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c (renamed from drivers/net/cxgb3/mc5.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h (renamed from drivers/net/cxgb3/regs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3303
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge_defs.h (renamed from drivers/net/cxgb3/sge_defs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h (renamed from drivers/net/cxgb3/t3_cpl.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c (renamed from drivers/net/cxgb3/t3_hw.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3cdev.h (renamed from drivers/net/cxgb3/t3cdev.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h (renamed from drivers/net/cxgb3/version.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/vsc8211.c (renamed from drivers/net/cxgb3/vsc8211.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c (renamed from drivers/net/cxgb3/xgmac.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile (renamed from drivers/net/cxgb4/Makefile)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h (renamed from drivers/net/cxgb4/cxgb4.h)2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c (renamed from drivers/net/cxgb4/cxgb4_main.c)5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h (renamed from drivers/net/cxgb4/cxgb4_uld.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c (renamed from drivers/net/cxgb4/l2t.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h (renamed from drivers/net/cxgb4/l2t.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2443
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c (renamed from drivers/net/cxgb4/t4_hw.c)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h (renamed from drivers/net/cxgb4/t4_hw.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h (renamed from drivers/net/cxgb4/t4_msg.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h (renamed from drivers/net/cxgb4/t4_regs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h (renamed from drivers/net/cxgb4/t4fw_api.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/Makefile (renamed from drivers/net/cxgb4vf/Makefile)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h (renamed from drivers/net/cxgb4vf/adapter.h)2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c (renamed from drivers/net/cxgb4vf/cxgb4vf_main.c)2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2453
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h (renamed from drivers/net/cxgb4vf/t4vf_common.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h (renamed from drivers/net/cxgb4vf/t4vf_defs.h)0
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c (renamed from drivers/net/cxgb4vf/t4vf_hw.c)0
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig50
-rw-r--r--drivers/net/ethernet/cirrus/Makefile6
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c (renamed from drivers/net/cs89x0.c)2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.h (renamed from drivers/net/cs89x0.h)0
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c (renamed from drivers/net/arm/ep93xx_eth.c)0
-rw-r--r--drivers/net/ethernet/cisco/Kconfig23
-rw-r--r--drivers/net/ethernet/cisco/Makefile5
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig9
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile (renamed from drivers/net/enic/Makefile)0
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h (renamed from drivers/net/enic/cq_desc.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h (renamed from drivers/net/enic/cq_enet_desc.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h (renamed from drivers/net/enic/enic.h)15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c (renamed from drivers/net/enic/enic_dev.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h (renamed from drivers/net/enic/enic_dev.h)19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c (renamed from drivers/net/enic/enic_main.c)202
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c368
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.h36
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c (renamed from drivers/net/enic/enic_res.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h (renamed from drivers/net/enic/enic_res.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/rq_enet_desc.h (renamed from drivers/net/enic/rq_enet_desc.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c (renamed from drivers/net/enic/vnic_cq.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h (renamed from drivers/net/enic/vnic_cq.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c (renamed from drivers/net/enic/vnic_dev.c)28
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h (renamed from drivers/net/enic/vnic_dev.h)2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h (renamed from drivers/net/enic/vnic_devcmd.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h (renamed from drivers/net/enic/vnic_enet.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c (renamed from drivers/net/enic/vnic_intr.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.h (renamed from drivers/net/enic/vnic_intr.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h (renamed from drivers/net/enic/vnic_nic.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h (renamed from drivers/net/enic/vnic_resource.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c (renamed from drivers/net/enic/vnic_rq.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h (renamed from drivers/net/enic/vnic_rq.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h (renamed from drivers/net/enic/vnic_rss.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h (renamed from drivers/net/enic/vnic_stats.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c (renamed from drivers/net/enic/vnic_vic.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h (renamed from drivers/net/enic/vnic_vic.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c (renamed from drivers/net/enic/vnic_wq.c)0
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h (renamed from drivers/net/enic/vnic_wq.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/wq_enet_desc.h (renamed from drivers/net/enic/wq_enet_desc.h)0
-rw-r--r--drivers/net/ethernet/davicom/Kconfig24
-rw-r--r--drivers/net/ethernet/davicom/Makefile5
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c (renamed from drivers/net/dm9000.c)13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.h (renamed from drivers/net/dm9000.h)0
-rw-r--r--drivers/net/ethernet/dec/Kconfig37
-rw-r--r--drivers/net/ethernet/dec/Makefile6
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c (renamed from drivers/net/ewrk3.c)2
-rw-r--r--drivers/net/ethernet/dec/ewrk3.h (renamed from drivers/net/ewrk3.h)0
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c (renamed from drivers/net/tulip/21142.c)3
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig172
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile (renamed from drivers/net/tulip/Makefile)0
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c (renamed from drivers/net/tulip/de2104x.c)2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c (renamed from drivers/net/tulip/de4x5.c)2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h (renamed from drivers/net/tulip/de4x5.h)2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c (renamed from drivers/net/tulip/dmfe.c)2
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c (renamed from drivers/net/tulip/eeprom.c)2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c (renamed from drivers/net/tulip/interrupt.c)3
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c (renamed from drivers/net/tulip/media.c)3
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c (renamed from drivers/net/tulip/pnic.c)3
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c (renamed from drivers/net/tulip/pnic2.c)3
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c (renamed from drivers/net/tulip/timer.c)3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h (renamed from drivers/net/tulip/tulip.h)3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c (renamed from drivers/net/tulip/tulip_core.c)5
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c (renamed from drivers/net/tulip/uli526x.c)2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c (renamed from drivers/net/tulip/winbond-840.c)2
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c (renamed from drivers/net/tulip/xircom_cb.c)0
-rw-r--r--drivers/net/ethernet/dlink/Kconfig86
-rw-r--r--drivers/net/ethernet/dlink/Makefile8
-rw-r--r--drivers/net/ethernet/dlink/de600.c (renamed from drivers/net/de600.c)0
-rw-r--r--drivers/net/ethernet/dlink/de600.h (renamed from drivers/net/de600.h)0
-rw-r--r--drivers/net/ethernet/dlink/de620.c (renamed from drivers/net/de620.c)2
-rw-r--r--drivers/net/ethernet/dlink/de620.h (renamed from drivers/net/de620.h)0
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c (renamed from drivers/net/dl2k.c)107
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h (renamed from drivers/net/dl2k.h)110
-rw-r--r--drivers/net/ethernet/dlink/sundance.c (renamed from drivers/net/sundance.c)2
-rw-r--r--drivers/net/ethernet/dnet.c (renamed from drivers/net/dnet.c)0
-rw-r--r--drivers/net/ethernet/dnet.h (renamed from drivers/net/dnet.h)0
-rw-r--r--drivers/net/ethernet/emulex/Kconfig23
-rw-r--r--drivers/net/ethernet/emulex/Makefile5
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig6
-rw-r--r--drivers/net/ethernet/emulex/benet/Makefile (renamed from drivers/net/benet/Makefile)0
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h (renamed from drivers/net/benet/be.h)132
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c (renamed from drivers/net/benet/be_cmds.c)307
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h (renamed from drivers/net/benet/be_cmds.h)104
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c (renamed from drivers/net/benet/be_ethtool.c)168
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h (renamed from drivers/net/benet/be_hw.h)21
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c (renamed from drivers/net/benet/be_main.c)915
-rw-r--r--drivers/net/ethernet/ethoc.c (renamed from drivers/net/ethoc.c)2
-rw-r--r--drivers/net/ethernet/faraday/Kconfig40
-rw-r--r--drivers/net/ethernet/faraday/Makefile6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c (renamed from drivers/net/ftgmac100.c)4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h (renamed from drivers/net/ftgmac100.h)0
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c (renamed from drivers/net/ftmac100.c)5
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h (renamed from drivers/net/ftmac100.h)0
-rw-r--r--drivers/net/ethernet/fealnx.c (renamed from drivers/net/fealnx.c)2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig88
-rw-r--r--drivers/net/ethernet/freescale/Makefile18
-rw-r--r--drivers/net/ethernet/freescale/fec.c (renamed from drivers/net/fec.c)117
-rw-r--r--drivers/net/ethernet/freescale/fec.h (renamed from drivers/net/fec.h)0
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c (renamed from drivers/net/fec_mpc52xx.c)2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.h (renamed from drivers/net/fec_mpc52xx.h)0
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c (renamed from drivers/net/fec_mpc52xx_phy.c)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig35
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Makefile (renamed from drivers/net/fs_enet/Makefile)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h (renamed from drivers/net/fs_enet/fec.h)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c (renamed from drivers/net/fs_enet/fs_enet-main.c)2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h (renamed from drivers/net/fs_enet/fs_enet.h)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c (renamed from drivers/net/fs_enet/mac-fcc.c)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c (renamed from drivers/net/fs_enet/mac-fec.c)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c (renamed from drivers/net/fs_enet/mac-scc.c)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c (renamed from drivers/net/fs_enet/mii-bitbang.c)0
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c (renamed from drivers/net/fs_enet/mii-fec.c)0
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c (renamed from drivers/net/fsl_pq_mdio.c)0
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.h (renamed from drivers/net/fsl_pq_mdio.h)0
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c (renamed from drivers/net/gianfar.c)12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h (renamed from drivers/net/gianfar.h)0
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c (renamed from drivers/net/gianfar_ethtool.c)5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c (renamed from drivers/net/gianfar_ptp.c)0
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c (renamed from drivers/net/gianfar_sysfs.c)0
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c (renamed from drivers/net/ucc_geth.c)12
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h (renamed from drivers/net/ucc_geth.h)0
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c (renamed from drivers/net/ucc_geth_ethtool.c)0
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig54
-rw-r--r--drivers/net/ethernet/fujitsu/Makefile7
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c (renamed from drivers/net/at1700.c)2
-rw-r--r--drivers/net/ethernet/fujitsu/eth16i.c (renamed from drivers/net/eth16i.c)2
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c (renamed from drivers/net/pcmcia/fmvj18x_cs.c)2
-rw-r--r--drivers/net/ethernet/hp/Kconfig32
-rw-r--r--drivers/net/ethernet/hp/Makefile5
-rw-r--r--drivers/net/ethernet/hp/hp100.c (renamed from drivers/net/hp100.c)4
-rw-r--r--drivers/net/ethernet/hp/hp100.h (renamed from drivers/net/hp100.h)0
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c (renamed from drivers/net/3c505.c)2
-rw-r--r--drivers/net/ethernet/i825xx/3c505.h (renamed from drivers/net/3c505.h)0
-rw-r--r--drivers/net/ethernet/i825xx/3c507.c (renamed from drivers/net/3c507.c)0
-rw-r--r--drivers/net/ethernet/i825xx/3c523.c (renamed from drivers/net/3c523.c)2
-rw-r--r--drivers/net/ethernet/i825xx/3c523.h (renamed from drivers/net/3c523.h)0
-rw-r--r--drivers/net/ethernet/i825xx/3c527.c (renamed from drivers/net/3c527.c)2
-rw-r--r--drivers/net/ethernet/i825xx/3c527.h (renamed from drivers/net/3c527.h)0
-rw-r--r--drivers/net/ethernet/i825xx/82596.c (renamed from drivers/net/82596.c)2
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig183
-rw-r--r--drivers/net/ethernet/i825xx/Makefile20
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c (renamed from drivers/net/eepro.c)2
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c (renamed from drivers/net/eexpress.c)2
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.h (renamed from drivers/net/eexpress.h)0
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c (renamed from drivers/net/arm/ether1.c)2
-rw-r--r--drivers/net/ethernet/i825xx/ether1.h (renamed from drivers/net/arm/ether1.h)0
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c (renamed from drivers/net/lasi_82596.c)0
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c (renamed from drivers/net/lib82596.c)2
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c (renamed from drivers/net/lp486e.c)2
-rw-r--r--drivers/net/ethernet/i825xx/ni52.c (renamed from drivers/net/ni52.c)2
-rw-r--r--drivers/net/ethernet/i825xx/ni52.h (renamed from drivers/net/ni52.h)0
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c (renamed from drivers/net/sni_82596.c)0
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c (renamed from drivers/net/sun3_82586.c)2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h (renamed from drivers/net/sun3_82586.h)0
-rw-r--r--drivers/net/ethernet/i825xx/znet.c (renamed from drivers/net/znet.c)2
-rw-r--r--drivers/net/ethernet/ibm/Kconfig48
-rw-r--r--drivers/net/ethernet/ibm/Makefile8
-rw-r--r--drivers/net/ethernet/ibm/ehea/Makefile (renamed from drivers/net/ehea/Makefile)0
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h (renamed from drivers/net/ehea/ehea.h)21
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c (renamed from drivers/net/ehea/ehea_ethtool.c)33
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_hw.h (renamed from drivers/net/ehea/ehea_hw.h)25
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c (renamed from drivers/net/ehea/ehea_main.c)544
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c (renamed from drivers/net/ehea/ehea_phyp.c)0
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h (renamed from drivers/net/ehea/ehea_phyp.h)0
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c (renamed from drivers/net/ehea/ehea_qmr.c)0
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.h (renamed from drivers/net/ehea/ehea_qmr.h)2
-rw-r--r--drivers/net/ethernet/ibm/emac/Kconfig76
-rw-r--r--drivers/net/ethernet/ibm/emac/Makefile11
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3074
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h462
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c (renamed from drivers/net/ibm_newemac/debug.c)0
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h83
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h (renamed from drivers/net/ibm_newemac/emac.h)0
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c (renamed from drivers/net/ibm_newemac/mal.c)6
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h (renamed from drivers/net/ibm_newemac/mal.h)4
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c (renamed from drivers/net/ibm_newemac/phy.c)0
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.h (renamed from drivers/net/ibm_newemac/phy.h)0
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c (renamed from drivers/net/ibm_newemac/rgmii.c)0
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h (renamed from drivers/net/ibm_newemac/rgmii.h)4
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c (renamed from drivers/net/ibm_newemac/tah.c)0
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h (renamed from drivers/net/ibm_newemac/tah.h)4
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c (renamed from drivers/net/ibm_newemac/zmii.c)0
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h (renamed from drivers/net/ibm_newemac/zmii.h)4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c (renamed from drivers/net/ibmveth.c)11
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h (renamed from drivers/net/ibmveth.h)0
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c (renamed from drivers/net/iseries_veth.c)2
-rw-r--r--drivers/net/ethernet/icplus/Kconfig14
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c (renamed from drivers/net/ipg.c)199
-rw-r--r--drivers/net/ethernet/icplus/ipg.h (renamed from drivers/net/ipg.h)0
-rw-r--r--drivers/net/ethernet/intel/Kconfig222
-rw-r--r--drivers/net/ethernet/intel/Makefile12
-rw-r--r--drivers/net/ethernet/intel/e100.c (renamed from drivers/net/e100.c)6
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile (renamed from drivers/net/e1000/Makefile)0
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h363
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c (renamed from drivers/net/e1000/e1000_ethtool.c)4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c (renamed from drivers/net/e1000/e1000_hw.c)22
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h (renamed from drivers/net/e1000/e1000_hw.h)0
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c (renamed from drivers/net/e1000/e1000_main.c)219
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_osdep.h (renamed from drivers/net/e1000/e1000_osdep.h)0
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c (renamed from drivers/net/e1000/e1000_param.c)0
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1515
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c (renamed from drivers/net/e1000e/82571.c)25
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile37
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h (renamed from drivers/net/e1000e/defines.h)0
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h742
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c1991
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h (renamed from drivers/net/e1000e/hw.h)0
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c (renamed from drivers/net/e1000e/ich8lan.c)69
-rw-r--r--drivers/net/ethernet/intel/e1000e/lib.c (renamed from drivers/net/e1000e/lib.c)0
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6440
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c (renamed from drivers/net/e1000e/param.c)0
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c (renamed from drivers/net/e1000e/phy.c)0
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile (renamed from drivers/net/igb/Makefile)0
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c (renamed from drivers/net/igb/e1000_82575.c)43
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h (renamed from drivers/net/igb/e1000_82575.h)2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h (renamed from drivers/net/igb/e1000_defines.h)4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h (renamed from drivers/net/igb/e1000_hw.h)0
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c (renamed from drivers/net/igb/e1000_mac.c)72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h (renamed from drivers/net/igb/e1000_mac.h)1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c (renamed from drivers/net/igb/e1000_mbx.c)2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h (renamed from drivers/net/igb/e1000_mbx.h)0
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c (renamed from drivers/net/igb/e1000_nvm.c)0
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h (renamed from drivers/net/igb/e1000_nvm.h)0
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c (renamed from drivers/net/igb/e1000_phy.c)6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h (renamed from drivers/net/igb/e1000_phy.h)0
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h (renamed from drivers/net/igb/e1000_regs.h)1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h450
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c (renamed from drivers/net/igb/igb_ethtool.c)47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c (renamed from drivers/net/igb/igb_main.c)2289
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile (renamed from drivers/net/igbvf/Makefile)0
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h (renamed from drivers/net/igbvf/defines.h)0
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c473
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h (renamed from drivers/net/igbvf/igbvf.h)0
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c350
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h (renamed from drivers/net/igbvf/mbx.h)0
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2873
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h (renamed from drivers/net/igbvf/regs.h)0
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c (renamed from drivers/net/igbvf/vf.c)0
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h (renamed from drivers/net/igbvf/vf.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile (renamed from drivers/net/ixgb/Makefile)0
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h (renamed from drivers/net/ixgb/ixgb.h)2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c (renamed from drivers/net/ixgb/ixgb_ee.c)2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h (renamed from drivers/net/ixgb/ixgb_ee.h)4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c (renamed from drivers/net/ixgb/ixgb_ethtool.c)102
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c (renamed from drivers/net/ixgb/ixgb_hw.c)2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h (renamed from drivers/net/ixgb/ixgb_hw.h)4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h (renamed from drivers/net/ixgb/ixgb_ids.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c (renamed from drivers/net/ixgb/ixgb_main.c)78
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h (renamed from drivers/net/ixgb/ixgb_osdep.h)1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c (renamed from drivers/net/ixgb/ixgb_param.c)18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile (renamed from drivers/net/ixgbe/Makefile)0
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h (renamed from drivers/net/ixgbe/ixgbe.h)87
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c (renamed from drivers/net/ixgbe/ixgbe_82598.c)45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c (renamed from drivers/net/ixgbe/ixgbe_82599.c)147
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c (renamed from drivers/net/ixgbe/ixgbe_common.c)188
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h (renamed from drivers/net/ixgbe/ixgbe_common.h)2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c (renamed from drivers/net/ixgbe/ixgbe_dcb.c)58
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h (renamed from drivers/net/ixgbe/ixgbe_dcb.h)7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c (renamed from drivers/net/ixgbe/ixgbe_dcb_82598.c)9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h (renamed from drivers/net/ixgbe/ixgbe_dcb_82598.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c (renamed from drivers/net/ixgbe/ixgbe_dcb_82599.c)56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h (renamed from drivers/net/ixgbe/ixgbe_dcb_82599.h)2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c (renamed from drivers/net/ixgbe/ixgbe_dcb_nl.c)173
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c (renamed from drivers/net/ixgbe/ixgbe_ethtool.c)307
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c (renamed from drivers/net/ixgbe/ixgbe_fcoe.c)47
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h (renamed from drivers/net/ixgbe/ixgbe_fcoe.h)2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (renamed from drivers/net/ixgbe/ixgbe_main.c)2296
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c (renamed from drivers/net/ixgbe/ixgbe_mbx.c)2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h (renamed from drivers/net/ixgbe/ixgbe_mbx.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c (renamed from drivers/net/ixgbe/ixgbe_phy.c)33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h (renamed from drivers/net/ixgbe/ixgbe_phy.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c926
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h (renamed from drivers/net/ixgbe/ixgbe_sriov.h)6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h (renamed from drivers/net/ixgbe/ixgbe_type.h)80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c (renamed from drivers/net/ixgbe/ixgbe_x540.c)92
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile (renamed from drivers/net/ixgbevf/Makefile)0
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h (renamed from drivers/net/ixgbevf/defines.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c692
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h (renamed from drivers/net/ixgbevf/ixgbevf.h)8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c (renamed from drivers/net/ixgbevf/ixgbevf_main.c)107
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h (renamed from drivers/net/ixgbevf/mbx.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h (renamed from drivers/net/ixgbevf/regs.h)0
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c (renamed from drivers/net/ixgbevf/vf.c)0
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h (renamed from drivers/net/ixgbevf/vf.h)0
-rw-r--r--drivers/net/ethernet/jme.c (renamed from drivers/net/jme.c)15
-rw-r--r--drivers/net/ethernet/jme.h (renamed from drivers/net/jme.h)1
-rw-r--r--drivers/net/ethernet/korina.c (renamed from drivers/net/korina.c)3
-rw-r--r--drivers/net/ethernet/lantiq_etop.c (renamed from drivers/net/lantiq_etop.c)2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig111
-rw-r--r--drivers/net/ethernet/marvell/Makefile8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c (renamed from drivers/net/mv643xx_eth.c)21
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c (renamed from drivers/net/pxa168_eth.c)0
-rw-r--r--drivers/net/ethernet/marvell/skge.c (renamed from drivers/net/skge.c)88
-rw-r--r--drivers/net/ethernet/marvell/skge.h (renamed from drivers/net/skge.h)0
-rw-r--r--drivers/net/ethernet/marvell/sky2.c (renamed from drivers/net/sky2.c)126
-rw-r--r--drivers/net/ethernet/marvell/sky2.h (renamed from drivers/net/sky2.h)0
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig23
-rw-r--r--drivers/net/ethernet/mellanox/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile (renamed from drivers/net/mlx4/Makefile)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c (renamed from drivers/net/mlx4/alloc.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c (renamed from drivers/net/mlx4/catas.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c (renamed from drivers/net/mlx4/cmd.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c (renamed from drivers/net/mlx4/cq.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c (renamed from drivers/net/mlx4/en_cq.c)31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c (renamed from drivers/net/mlx4/en_ethtool.c)14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c (renamed from drivers/net/mlx4/en_main.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c (renamed from drivers/net/mlx4/en_netdev.c)116
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c (renamed from drivers/net/mlx4/en_port.c)24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h (renamed from drivers/net/mlx4/en_port.h)11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c (renamed from drivers/net/mlx4/en_resources.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c (renamed from drivers/net/mlx4/en_rx.c)74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c (renamed from drivers/net/mlx4/en_selftest.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c (renamed from drivers/net/mlx4/en_tx.c)38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c (renamed from drivers/net/mlx4/eq.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c (renamed from drivers/net/mlx4/fw.c)1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h (renamed from drivers/net/mlx4/fw.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c (renamed from drivers/net/mlx4/icm.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h (renamed from drivers/net/mlx4/icm.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c (renamed from drivers/net/mlx4/intf.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c (renamed from drivers/net/mlx4/main.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c (renamed from drivers/net/mlx4/mcg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h (renamed from drivers/net/mlx4/mlx4.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h (renamed from drivers/net/mlx4/mlx4_en.h)22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c (renamed from drivers/net/mlx4/mr.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c (renamed from drivers/net/mlx4/pd.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c (renamed from drivers/net/mlx4/port.c)15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c (renamed from drivers/net/mlx4/profile.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c (renamed from drivers/net/mlx4/qp.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c (renamed from drivers/net/mlx4/reset.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c (renamed from drivers/net/mlx4/sense.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c (renamed from drivers/net/mlx4/srq.c)0
-rw-r--r--drivers/net/ethernet/micrel/Kconfig69
-rw-r--r--drivers/net/ethernet/micrel/Makefile9
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c (renamed from drivers/net/arm/ks8695net.c)4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.h (renamed from drivers/net/arm/ks8695net.h)0
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c (renamed from drivers/net/ks8842.c)0
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c (renamed from drivers/net/ks8851.c)0
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h (renamed from drivers/net/ks8851.h)0
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c (renamed from drivers/net/ks8851_mll.c)0
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c (renamed from drivers/net/ksz884x.c)5
-rw-r--r--drivers/net/ethernet/microchip/Kconfig38
-rw-r--r--drivers/net/ethernet/microchip/Makefile5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c (renamed from drivers/net/enc28j60.c)2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60_hw.h (renamed from drivers/net/enc28j60_hw.h)0
-rw-r--r--drivers/net/ethernet/mipsnet.c (renamed from drivers/net/mipsnet.c)2
-rw-r--r--drivers/net/ethernet/myricom/Kconfig47
-rw-r--r--drivers/net/ethernet/myricom/Makefile5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/Makefile (renamed from drivers/net/myri10ge/Makefile)0
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c (renamed from drivers/net/myri10ge/myri10ge.c)31
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h (renamed from drivers/net/myri10ge/myri10ge_mcp.h)0
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h (renamed from drivers/net/myri10ge/myri10ge_mcp_gen_header.h)0
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig83
-rw-r--r--drivers/net/ethernet/natsemi/Makefile10
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c (renamed from drivers/net/ibmlana.c)2
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.h (renamed from drivers/net/ibmlana.h)0
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c (renamed from drivers/net/jazzsonic.c)2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c (renamed from drivers/net/macsonic.c)11
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c (renamed from drivers/net/natsemi.c)2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c (renamed from drivers/net/ns83820.c)9
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c (renamed from drivers/net/sonic.c)0
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h (renamed from drivers/net/sonic.h)0
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c (renamed from drivers/net/xtsonic.c)2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig55
-rw-r--r--drivers/net/ethernet/neterion/Makefile6
-rw-r--r--drivers/net/ethernet/neterion/s2io-regs.h (renamed from drivers/net/s2io-regs.h)0
-rw-r--r--drivers/net/ethernet/neterion/s2io.c (renamed from drivers/net/s2io.c)22
-rw-r--r--drivers/net/ethernet/neterion/s2io.h (renamed from drivers/net/s2io.h)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile (renamed from drivers/net/vxge/Makefile)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c (renamed from drivers/net/vxge/vxge-config.c)11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h (renamed from drivers/net/vxge/vxge-config.h)4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c (renamed from drivers/net/vxge/vxge-ethtool.c)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h (renamed from drivers/net/vxge/vxge-ethtool.h)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c (renamed from drivers/net/vxge/vxge-main.c)30
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h (renamed from drivers/net/vxge/vxge-main.h)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h (renamed from drivers/net/vxge/vxge-reg.h)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c (renamed from drivers/net/vxge/vxge-traffic.c)12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h (renamed from drivers/net/vxge/vxge-traffic.h)0
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h (renamed from drivers/net/vxge/vxge-version.h)0
-rw-r--r--drivers/net/ethernet/netx-eth.c (renamed from drivers/net/netx-eth.c)2
-rw-r--r--drivers/net/ethernet/nuvoton/Kconfig31
-rw-r--r--drivers/net/ethernet/nuvoton/Makefile5
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c (renamed from drivers/net/arm/w90p910_ether.c)2
-rw-r--r--drivers/net/ethernet/nvidia/Kconfig32
-rw-r--r--drivers/net/ethernet/nvidia/Makefile5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c (renamed from drivers/net/forcedeth.c)40
-rw-r--r--drivers/net/ethernet/octeon/Kconfig14
-rw-r--r--drivers/net/ethernet/octeon/Makefile5
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c (renamed from drivers/net/octeon/octeon_mgmt.c)4
-rw-r--r--drivers/net/ethernet/oki-semi/Kconfig23
-rw-r--r--drivers/net/ethernet/oki-semi/Makefile5
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig22
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile (renamed from drivers/net/pch_gbe/Makefile)0
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h (renamed from drivers/net/pch_gbe/pch_gbe.h)0
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c (renamed from drivers/net/pch_gbe/pch_gbe_api.c)0
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h (renamed from drivers/net/pch_gbe/pch_gbe_api.h)0
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c (renamed from drivers/net/pch_gbe/pch_gbe_ethtool.c)4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c (renamed from drivers/net/pch_gbe/pch_gbe_main.c)3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c (renamed from drivers/net/pch_gbe/pch_gbe_param.c)0
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c (renamed from drivers/net/pch_gbe/pch_gbe_phy.c)0
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h (renamed from drivers/net/pch_gbe/pch_gbe_phy.h)0
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig47
-rw-r--r--drivers/net/ethernet/packetengines/Makefile6
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c (renamed from drivers/net/hamachi.c)2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c (renamed from drivers/net/yellowfin.c)2
-rw-r--r--drivers/net/ethernet/pasemi/Kconfig30
-rw-r--r--drivers/net/ethernet/pasemi/Makefile5
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c (renamed from drivers/net/pasemi_mac.c)15
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h (renamed from drivers/net/pasemi_mac.h)0
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c (renamed from drivers/net/pasemi_mac_ethtool.c)0
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig54
-rw-r--r--drivers/net/ethernet/qlogic/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile (renamed from drivers/net/netxen/Makefile)0
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h (renamed from drivers/net/netxen/netxen_nic.h)10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c (renamed from drivers/net/netxen/netxen_nic_ctx.c)0
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c (renamed from drivers/net/netxen/netxen_nic_ethtool.c)3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h (renamed from drivers/net/netxen/netxen_nic_hdr.h)0
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c (renamed from drivers/net/netxen/netxen_nic_hw.c)0
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h (renamed from drivers/net/netxen/netxen_nic_hw.h)0
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c (renamed from drivers/net/netxen/netxen_nic_init.c)8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c (renamed from drivers/net/netxen/netxen_nic_main.c)178
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c (renamed from drivers/net/qla3xxx.c)14
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.h (renamed from drivers/net/qla3xxx.h)0
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile (renamed from drivers/net/qlcnic/Makefile)0
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h (renamed from drivers/net/qlcnic/qlcnic.h)32
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c (renamed from drivers/net/qlcnic/qlcnic_ctx.c)312
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c (renamed from drivers/net/qlcnic/qlcnic_ethtool.c)56
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h (renamed from drivers/net/qlcnic/qlcnic_hdr.h)4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c (renamed from drivers/net/qlcnic/qlcnic_hw.c)4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c (renamed from drivers/net/qlcnic/qlcnic_init.c)21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c (renamed from drivers/net/qlcnic/qlcnic_main.c)185
-rw-r--r--drivers/net/ethernet/qlogic/qlge/Makefile (renamed from drivers/net/qlge/Makefile)0
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h (renamed from drivers/net/qlge/qlge.h)0
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c (renamed from drivers/net/qlge/qlge_dbg.c)0
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c (renamed from drivers/net/qlge/qlge_ethtool.c)0
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c (renamed from drivers/net/qlge/qlge_main.c)26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c (renamed from drivers/net/qlge/qlge_mpi.c)0
-rw-r--r--drivers/net/ethernet/racal/Kconfig33
-rw-r--r--drivers/net/ethernet/racal/Makefile5
-rw-r--r--drivers/net/ethernet/racal/ni5010.c (renamed from drivers/net/ni5010.c)2
-rw-r--r--drivers/net/ethernet/racal/ni5010.h (renamed from drivers/net/ni5010.h)0
-rw-r--r--drivers/net/ethernet/rdc/Kconfig35
-rw-r--r--drivers/net/ethernet/rdc/Makefile5
-rw-r--r--drivers/net/ethernet/rdc/r6040.c (renamed from drivers/net/r6040.c)10
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c (renamed from drivers/net/8139cp.c)19
-rw-r--r--drivers/net/ethernet/realtek/8139too.c (renamed from drivers/net/8139too.c)2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig130
-rw-r--r--drivers/net/ethernet/realtek/Makefile9
-rw-r--r--drivers/net/ethernet/realtek/atp.c (renamed from drivers/net/atp.c)2
-rw-r--r--drivers/net/ethernet/realtek/atp.h (renamed from drivers/net/atp.h)0
-rw-r--r--drivers/net/ethernet/realtek/r8169.c (renamed from drivers/net/r8169.c)585
-rw-r--r--drivers/net/ethernet/realtek/sc92031.c (renamed from drivers/net/sc92031.c)10
-rw-r--r--drivers/net/ethernet/renesas/Kconfig19
-rw-r--r--drivers/net/ethernet/renesas/Makefile5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c (renamed from drivers/net/sh_eth.c)44
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h830
-rw-r--r--drivers/net/ethernet/s6gmac.c (renamed from drivers/net/s6gmac.c)0
-rw-r--r--drivers/net/ethernet/seeq/Kconfig47
-rw-r--r--drivers/net/ethernet/seeq/Makefile7
-rw-r--r--drivers/net/ethernet/seeq/ether3.c (renamed from drivers/net/arm/ether3.c)2
-rw-r--r--drivers/net/ethernet/seeq/ether3.h (renamed from drivers/net/arm/ether3.h)0
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c (renamed from drivers/net/seeq8005.c)2
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.h (renamed from drivers/net/seeq8005.h)0
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c (renamed from drivers/net/sgiseeq.c)2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.h (renamed from drivers/net/sgiseeq.h)0
-rw-r--r--drivers/net/ethernet/sfc/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/Makefile (renamed from drivers/net/sfc/Makefile)0
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h (renamed from drivers/net/sfc/bitfield.h)0
-rw-r--r--drivers/net/ethernet/sfc/efx.c (renamed from drivers/net/sfc/efx.c)46
-rw-r--r--drivers/net/ethernet/sfc/efx.h (renamed from drivers/net/sfc/efx.h)7
-rw-r--r--drivers/net/ethernet/sfc/enum.h (renamed from drivers/net/sfc/enum.h)0
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c1025
-rw-r--r--drivers/net/ethernet/sfc/falcon.c (renamed from drivers/net/sfc/falcon.c)2
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c (renamed from drivers/net/sfc/falcon_boards.c)0
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c (renamed from drivers/net/sfc/falcon_xmac.c)0
-rw-r--r--drivers/net/ethernet/sfc/filter.c (renamed from drivers/net/sfc/filter.c)0
-rw-r--r--drivers/net/ethernet/sfc/filter.h (renamed from drivers/net/sfc/filter.h)0
-rw-r--r--drivers/net/ethernet/sfc/io.h (renamed from drivers/net/sfc/io.h)0
-rw-r--r--drivers/net/ethernet/sfc/mac.h (renamed from drivers/net/sfc/mac.h)0
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c (renamed from drivers/net/sfc/mcdi.c)0
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h (renamed from drivers/net/sfc/mcdi.h)0
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c (renamed from drivers/net/sfc/mcdi_mac.c)0
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h (renamed from drivers/net/sfc/mcdi_pcol.h)0
-rw-r--r--drivers/net/ethernet/sfc/mcdi_phy.c (renamed from drivers/net/sfc/mcdi_phy.c)0
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c (renamed from drivers/net/sfc/mdio_10g.c)0
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h (renamed from drivers/net/sfc/mdio_10g.h)0
-rw-r--r--drivers/net/ethernet/sfc/mtd.c (renamed from drivers/net/sfc/mtd.c)0
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h (renamed from drivers/net/sfc/net_driver.h)0
-rw-r--r--drivers/net/ethernet/sfc/nic.c (renamed from drivers/net/sfc/nic.c)0
-rw-r--r--drivers/net/ethernet/sfc/nic.h (renamed from drivers/net/sfc/nic.h)3
-rw-r--r--drivers/net/ethernet/sfc/phy.h (renamed from drivers/net/sfc/phy.h)0
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c (renamed from drivers/net/sfc/qt202x_phy.c)0
-rw-r--r--drivers/net/ethernet/sfc/regs.h (renamed from drivers/net/sfc/regs.h)0
-rw-r--r--drivers/net/ethernet/sfc/rx.c (renamed from drivers/net/sfc/rx.c)4
-rw-r--r--drivers/net/ethernet/sfc/selftest.c (renamed from drivers/net/sfc/selftest.c)0
-rw-r--r--drivers/net/ethernet/sfc/selftest.h (renamed from drivers/net/sfc/selftest.h)0
-rw-r--r--drivers/net/ethernet/sfc/siena.c (renamed from drivers/net/sfc/siena.c)2
-rw-r--r--drivers/net/ethernet/sfc/spi.h (renamed from drivers/net/sfc/spi.h)0
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c (renamed from drivers/net/sfc/tenxpress.c)0
-rw-r--r--drivers/net/ethernet/sfc/tx.c (renamed from drivers/net/sfc/tx.c)21
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c (renamed from drivers/net/sfc/txc43128_phy.c)0
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h (renamed from drivers/net/sfc/workarounds.h)0
-rw-r--r--drivers/net/ethernet/sgi/Kconfig36
-rw-r--r--drivers/net/ethernet/sgi/Makefile6
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c (renamed from drivers/net/ioc3-eth.c)2
-rw-r--r--drivers/net/ethernet/sgi/meth.c (renamed from drivers/net/meth.c)0
-rw-r--r--drivers/net/ethernet/sgi/meth.h (renamed from drivers/net/meth.h)0
-rw-r--r--drivers/net/ethernet/sis/Kconfig53
-rw-r--r--drivers/net/ethernet/sis/Makefile6
-rw-r--r--drivers/net/ethernet/sis/sis190.c (renamed from drivers/net/sis190.c)2
-rw-r--r--drivers/net/ethernet/sis/sis900.c (renamed from drivers/net/sis900.c)2
-rw-r--r--drivers/net/ethernet/sis/sis900.h (renamed from drivers/net/sis900.h)0
-rw-r--r--drivers/net/ethernet/smsc/Kconfig136
-rw-r--r--drivers/net/ethernet/smsc/Makefile11
-rw-r--r--drivers/net/ethernet/smsc/epic100.c (renamed from drivers/net/epic100.c)2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c (renamed from drivers/net/smc911x.c)2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h (renamed from drivers/net/smc911x.h)0
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c (renamed from drivers/net/smc9194.c)2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.h (renamed from drivers/net/smc9194.h)0
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c (renamed from drivers/net/pcmcia/smc91c92_cs.c)2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c (renamed from drivers/net/smc91x.c)2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h (renamed from drivers/net/smc91x.h)0
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c (renamed from drivers/net/smsc911x.c)91
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h (renamed from drivers/net/smsc911x.h)0
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c (renamed from drivers/net/smsc9420.c)2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h (renamed from drivers/net/smsc9420.h)0
-rw-r--r--drivers/net/ethernet/stmicro/Kconfig23
-rw-r--r--drivers/net/ethernet/stmicro/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig84
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h319
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h (renamed from drivers/net/stmmac/descs.h)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h126
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h (renamed from drivers/net/stmmac/dwmac100.h)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/dwmac1000.h)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c (renamed from drivers/net/stmmac/dwmac1000_core.c)11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/dwmac1000_dma.c)14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c (renamed from drivers/net/stmmac/dwmac100_core.c)12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c (renamed from drivers/net/stmmac/dwmac100_dma.c)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h (renamed from drivers/net/stmmac/dwmac_dma.h)1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c (renamed from drivers/net/stmmac/dwmac_lib.c)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c (renamed from drivers/net/stmmac/enh_desc.c)22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h131
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c265
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c (renamed from drivers/net/stmmac/norm_desc.c)14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c126
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h (renamed from drivers/net/stmmac/stmmac.h)8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c477
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (renamed from drivers/net/stmmac/stmmac_main.c)479
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c (renamed from drivers/net/stmmac/stmmac_mdio.c)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c (renamed from drivers/net/stmmac/stmmac_timer.c)0
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h (renamed from drivers/net/stmmac/stmmac_timer.h)0
-rw-r--r--drivers/net/ethernet/sun/Kconfig88
-rw-r--r--drivers/net/ethernet/sun/Makefile11
-rw-r--r--drivers/net/ethernet/sun/cassini.c (renamed from drivers/net/cassini.c)25
-rw-r--r--drivers/net/ethernet/sun/cassini.h (renamed from drivers/net/cassini.h)0
-rw-r--r--drivers/net/ethernet/sun/niu.c (renamed from drivers/net/niu.c)34
-rw-r--r--drivers/net/ethernet/sun/niu.h (renamed from drivers/net/niu.h)0
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c (renamed from drivers/net/sunbmac.c)33
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h (renamed from drivers/net/sunbmac.h)17
-rw-r--r--drivers/net/ethernet/sun/sungem.c (renamed from drivers/net/sungem.c)16
-rw-r--r--drivers/net/ethernet/sun/sungem.h (renamed from drivers/net/sungem.h)0
-rw-r--r--drivers/net/ethernet/sun/sunhme.c (renamed from drivers/net/sunhme.c)11
-rw-r--r--drivers/net/ethernet/sun/sunhme.h (renamed from drivers/net/sunhme.h)0
-rw-r--r--drivers/net/ethernet/sun/sunqe.c (renamed from drivers/net/sunqe.c)2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h (renamed from drivers/net/sunqe.h)0
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c (renamed from drivers/net/sunvnet.c)2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h (renamed from drivers/net/sunvnet.h)0
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig27
-rw-r--r--drivers/net/ethernet/tehuti/Makefile5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c (renamed from drivers/net/tehuti.c)20
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h (renamed from drivers/net/tehuti.h)0
-rw-r--r--drivers/net/ethernet/ti/Kconfig77
-rw-r--r--drivers/net/ethernet/ti/Makefile9
-rw-r--r--drivers/net/ethernet/ti/cpmac.c (renamed from drivers/net/cpmac.c)2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c (renamed from drivers/net/davinci_cpdma.c)0
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h (renamed from drivers/net/davinci_cpdma.h)0
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c (renamed from drivers/net/davinci_emac.c)2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c (renamed from drivers/net/davinci_mdio.c)0
-rw-r--r--drivers/net/ethernet/ti/tlan.c (renamed from drivers/net/tlan.c)2
-rw-r--r--drivers/net/ethernet/ti/tlan.h (renamed from drivers/net/tlan.h)0
-rw-r--r--drivers/net/ethernet/tile/Kconfig15
-rw-r--r--drivers/net/ethernet/tile/Makefile (renamed from drivers/net/tile/Makefile)0
-rw-r--r--drivers/net/ethernet/tile/tilepro.c (renamed from drivers/net/tile/tilepro.c)4
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig57
-rw-r--r--drivers/net/ethernet/toshiba/Makefile10
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c (renamed from drivers/net/ps3_gelic_net.c)2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h (renamed from drivers/net/ps3_gelic_net.h)0
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c (renamed from drivers/net/ps3_gelic_wireless.c)2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h (renamed from drivers/net/ps3_gelic_wireless.h)0
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c (renamed from drivers/net/spider_net.c)6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h (renamed from drivers/net/spider_net.h)2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c (renamed from drivers/net/spider_net_ethtool.c)0
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c (renamed from drivers/net/tc35815.c)2
-rw-r--r--drivers/net/ethernet/tundra/Kconfig29
-rw-r--r--drivers/net/ethernet/tundra/Makefile5
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c (renamed from drivers/net/tsi108_eth.c)13
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h (renamed from drivers/net/tsi108_eth.h)0
-rw-r--r--drivers/net/ethernet/via/Kconfig59
-rw-r--r--drivers/net/ethernet/via/Makefile6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c (renamed from drivers/net/via-rhine.c)2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c (renamed from drivers/net/via-velocity.c)159
-rw-r--r--drivers/net/ethernet/via/via-velocity.h (renamed from drivers/net/via-velocity.h)0
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig36
-rw-r--r--drivers/net/ethernet/xilinx/Makefile7
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h (renamed from drivers/net/ll_temac.h)0
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c (renamed from drivers/net/ll_temac_main.c)35
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c (renamed from drivers/net/ll_temac_mdio.c)0
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c (renamed from drivers/net/xilinx_emaclite.c)0
-rw-r--r--drivers/net/ethernet/xircom/Kconfig31
-rw-r--r--drivers/net/ethernet/xircom/Makefile5
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c (renamed from drivers/net/pcmcia/xirc2ps_cs.c)2
-rw-r--r--drivers/net/ethernet/xscale/Kconfig32
-rw-r--r--drivers/net/ethernet/xscale/Makefile6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Makefile (renamed from drivers/net/ixp2000/Makefile)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.c (renamed from drivers/net/ixp2000/caleb.c)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.h (renamed from drivers/net/ixp2000/caleb.h)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/enp2611.c (renamed from drivers/net/ixp2000/enp2611.c)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c (renamed from drivers/net/ixp2000/ixp2400-msf.c)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h (renamed from drivers/net/ixp2000/ixp2400-msf.h)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc (renamed from drivers/net/ixp2000/ixp2400_rx.uc)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode (renamed from drivers/net/ixp2000/ixp2400_rx.ucode)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc (renamed from drivers/net/ixp2000/ixp2400_tx.uc)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode (renamed from drivers/net/ixp2000/ixp2400_tx.ucode)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.c (renamed from drivers/net/ixp2000/ixpdev.c)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.h (renamed from drivers/net/ixp2000/ixpdev.h)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h (renamed from drivers/net/ixp2000/ixpdev_priv.h)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.c (renamed from drivers/net/ixp2000/pm3386.c)0
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.h (renamed from drivers/net/ixp2000/pm3386.h)0
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c (renamed from drivers/net/arm/ixp4xx_eth.c)2
-rw-r--r--drivers/net/fddi/Kconfig77
-rw-r--r--drivers/net/fddi/Makefile6
-rw-r--r--drivers/net/fddi/defxx.c (renamed from drivers/net/defxx.c)2
-rw-r--r--drivers/net/fddi/defxx.h (renamed from drivers/net/defxx.h)0
-rw-r--r--drivers/net/fddi/skfp/Makefile (renamed from drivers/net/skfp/Makefile)0
-rw-r--r--drivers/net/fddi/skfp/cfm.c (renamed from drivers/net/skfp/cfm.c)0
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c (renamed from drivers/net/skfp/drvfbi.c)0
-rw-r--r--drivers/net/fddi/skfp/ecm.c (renamed from drivers/net/skfp/ecm.c)0
-rw-r--r--drivers/net/fddi/skfp/ess.c (renamed from drivers/net/skfp/ess.c)0
-rw-r--r--drivers/net/fddi/skfp/fplustm.c (renamed from drivers/net/skfp/fplustm.c)0
-rw-r--r--drivers/net/fddi/skfp/h/cmtdef.h (renamed from drivers/net/skfp/h/cmtdef.h)4
-rw-r--r--drivers/net/fddi/skfp/h/fddi.h (renamed from drivers/net/skfp/h/fddi.h)0
-rw-r--r--drivers/net/fddi/skfp/h/fddimib.h (renamed from drivers/net/skfp/h/fddimib.h)0
-rw-r--r--drivers/net/fddi/skfp/h/fplustm.h (renamed from drivers/net/skfp/h/fplustm.h)0
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h (renamed from drivers/net/skfp/h/hwmtm.h)2
-rw-r--r--drivers/net/fddi/skfp/h/mbuf.h (renamed from drivers/net/skfp/h/mbuf.h)0
-rw-r--r--drivers/net/fddi/skfp/h/osdef1st.h (renamed from drivers/net/skfp/h/osdef1st.h)0
-rw-r--r--drivers/net/fddi/skfp/h/sba.h (renamed from drivers/net/skfp/h/sba.h)4
-rw-r--r--drivers/net/fddi/skfp/h/sba_def.h (renamed from drivers/net/skfp/h/sba_def.h)0
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h (renamed from drivers/net/skfp/h/skfbi.h)0
-rw-r--r--drivers/net/fddi/skfp/h/skfbiinc.h (renamed from drivers/net/skfp/h/skfbiinc.h)2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h (renamed from drivers/net/skfp/h/smc.h)14
-rw-r--r--drivers/net/fddi/skfp/h/smt.h (renamed from drivers/net/skfp/h/smt.h)0
-rw-r--r--drivers/net/fddi/skfp/h/smt_p.h (renamed from drivers/net/skfp/h/smt_p.h)0
-rw-r--r--drivers/net/fddi/skfp/h/smtstate.h (renamed from drivers/net/skfp/h/smtstate.h)0
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h (renamed from drivers/net/skfp/h/supern_2.h)0
-rw-r--r--drivers/net/fddi/skfp/h/targethw.h (renamed from drivers/net/skfp/h/targethw.h)6
-rw-r--r--drivers/net/fddi/skfp/h/targetos.h (renamed from drivers/net/skfp/h/targetos.h)2
-rw-r--r--drivers/net/fddi/skfp/h/types.h (renamed from drivers/net/skfp/h/types.h)0
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c (renamed from drivers/net/skfp/hwmtm.c)0
-rw-r--r--drivers/net/fddi/skfp/hwt.c (renamed from drivers/net/skfp/hwt.c)0
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c (renamed from drivers/net/skfp/pcmplc.c)0
-rw-r--r--drivers/net/fddi/skfp/pmf.c (renamed from drivers/net/skfp/pmf.c)0
-rw-r--r--drivers/net/fddi/skfp/queue.c (renamed from drivers/net/skfp/queue.c)0
-rw-r--r--drivers/net/fddi/skfp/rmt.c (renamed from drivers/net/skfp/rmt.c)0
-rw-r--r--drivers/net/fddi/skfp/skfddi.c (renamed from drivers/net/skfp/skfddi.c)2
-rw-r--r--drivers/net/fddi/skfp/smt.c (renamed from drivers/net/skfp/smt.c)0
-rw-r--r--drivers/net/fddi/skfp/smtdef.c (renamed from drivers/net/skfp/smtdef.c)0
-rw-r--r--drivers/net/fddi/skfp/smtinit.c (renamed from drivers/net/skfp/smtinit.c)0
-rw-r--r--drivers/net/fddi/skfp/smttimer.c (renamed from drivers/net/skfp/smttimer.c)0
-rw-r--r--drivers/net/fddi/skfp/srf.c (renamed from drivers/net/skfp/srf.c)0
-rw-r--r--drivers/net/fs_enet/Kconfig34
-rw-r--r--drivers/net/hippi/Kconfig39
-rw-r--r--drivers/net/hippi/Makefile5
-rw-r--r--drivers/net/hippi/rrunner.c (renamed from drivers/net/rrunner.c)0
-rw-r--r--drivers/net/hippi/rrunner.h (renamed from drivers/net/rrunner.h)0
-rw-r--r--drivers/net/ibm_newemac/Kconfig76
-rw-r--r--drivers/net/ibm_newemac/Makefile11
-rw-r--r--drivers/net/ibm_newemac/core.c3074
-rw-r--r--drivers/net/ibm_newemac/core.h462
-rw-r--r--drivers/net/ibm_newemac/debug.h83
-rw-r--r--drivers/net/igb/igb.h415
-rw-r--r--drivers/net/igbvf/ethtool.c534
-rw-r--r--drivers/net/igbvf/mbx.c350
-rw-r--r--drivers/net/igbvf/netdev.c2859
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/irda/sh_irda.c124
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c687
-rw-r--r--drivers/net/ixgbevf/ethtool.c742
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixp2000/Kconfig6
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/macvtap.c183
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/octeon/Kconfig10
-rw-r--r--drivers/net/octeon/Makefile2
-rw-r--r--drivers/net/pci-skeleton.c1923
-rw-r--r--drivers/net/pcmcia/Kconfig123
-rw-r--r--drivers/net/pcmcia/Makefile16
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/dp83640.c214
-rw-r--r--drivers/net/phy/icplus.c100
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/plip/Kconfig38
-rw-r--r--drivers/net/plip/Makefile5
-rw-r--r--drivers/net/plip/plip.c (renamed from drivers/net/plip.c)0
-rw-r--r--drivers/net/ppp/Kconfig175
-rw-r--r--drivers/net/ppp/Makefile13
-rw-r--r--drivers/net/ppp/bsd_comp.c (renamed from drivers/net/bsd_comp.c)0
-rw-r--r--drivers/net/ppp/ppp_async.c (renamed from drivers/net/ppp_async.c)0
-rw-r--r--drivers/net/ppp/ppp_deflate.c (renamed from drivers/net/ppp_deflate.c)0
-rw-r--r--drivers/net/ppp/ppp_generic.c (renamed from drivers/net/ppp_generic.c)0
-rw-r--r--drivers/net/ppp/ppp_mppe.c (renamed from drivers/net/ppp_mppe.c)0
-rw-r--r--drivers/net/ppp/ppp_mppe.h (renamed from drivers/net/ppp_mppe.h)0
-rw-r--r--drivers/net/ppp/ppp_synctty.c (renamed from drivers/net/ppp_synctty.c)0
-rw-r--r--drivers/net/ppp/pppoe.c (renamed from drivers/net/pppoe.c)0
-rw-r--r--drivers/net/ppp/pppox.c (renamed from drivers/net/pppox.c)0
-rw-r--r--drivers/net/ppp/pptp.c (renamed from drivers/net/pptp.c)22
-rw-r--r--drivers/net/sfc/Kconfig21
-rw-r--r--drivers/net/sfc/ethtool.c1012
-rw-r--r--drivers/net/sh_eth.h837
-rw-r--r--drivers/net/slip/Kconfig79
-rw-r--r--drivers/net/slip/Makefile6
-rw-r--r--drivers/net/slip/slhc.c (renamed from drivers/net/slhc.c)0
-rw-r--r--drivers/net/slip/slip.c (renamed from drivers/net/slip.c)29
-rw-r--r--drivers/net/slip/slip.h (renamed from drivers/net/slip.h)9
-rw-r--r--drivers/net/stmmac/Kconfig57
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h252
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c359
-rw-r--r--drivers/net/sungem_phy.c7
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/Kconfig15
-rw-r--r--drivers/net/tokenring/Makefile21
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tokenring/ibmtr_cs.c (renamed from drivers/net/pcmcia/ibmtr_cs.c)2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/smctr.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tulip/Kconfig171
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig3
-rw-r--r--drivers/net/usb/asix.c412
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lg-vl600.c19
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/rtl8150.c113
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/virtio_net.c42
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/wan/hdlc_ppp.c14
-rw-r--r--drivers/net/wan/hdlc_x25.c16
-rw-r--r--drivers/net/wan/lapbether.c3
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/x25_asy.c3
-rw-r--r--drivers/net/wimax/i2400m/usb.c4
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile5
-rw-r--r--drivers/net/wireless/adm8211.c3
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.c18
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h53
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h98
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c18
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c37
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h55
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c10
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c79
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig15
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile37
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c689
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h250
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1914
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h39
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h97
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h637
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c934
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h138
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h77
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h208
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c2478
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h607
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.c641
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h92
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1727
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1477
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c949
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h364
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c167
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h36
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c1478
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c3127
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2282
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h131
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h266
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h3403
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c257
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h204
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c280
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c310
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c610
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c378
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c123
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h130
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c153
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h1833
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h1928
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h1673
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c493
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c272
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c169
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c241
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c257
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h83
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c59
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c629
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h83
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c894
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig14
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h24
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c34
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h11
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c236
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c16
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h4
-rw-r--r--drivers/net/wireless/ath/main.c8
-rw-r--r--drivers/net/wireless/ath/regd.h2
-rw-r--r--drivers/net/wireless/ath/regd_common.h2
-rw-r--r--drivers/net/wireless/b43/Kconfig22
-rw-r--r--drivers/net/wireless/b43/b43.h65
-rw-r--r--drivers/net/wireless/b43/bus.c2
-rw-r--r--drivers/net/wireless/b43/dma.c157
-rw-r--r--drivers/net/wireless/b43/dma.h16
-rw-r--r--drivers/net/wireless/b43/main.c133
-rw-r--r--drivers/net/wireless/b43/phy_common.c32
-rw-r--r--drivers/net/wireless/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/b43/phy_ht.c205
-rw-r--r--drivers/net/wireless/b43/phy_ht.h19
-rw-r--r--drivers/net/wireless/b43/phy_lcn.c853
-rw-r--r--drivers/net/wireless/b43/phy_lcn.h19
-rw-r--r--drivers/net/wireless/b43/phy_n.c575
-rw-r--r--drivers/net/wireless/b43/phy_n.h4
-rw-r--r--drivers/net/wireless/b43/pio.c12
-rw-r--r--drivers/net/wireless/b43/radio_2055.c1
-rw-r--r--drivers/net/wireless/b43/radio_2056.c2
-rw-r--r--drivers/net/wireless/b43/radio_2056.h26
-rw-r--r--drivers/net/wireless/b43/radio_2059.c2
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c1
-rw-r--r--drivers/net/wireless/b43/tables_phy_ht.c86
-rw-r--r--drivers/net/wireless/b43/tables_phy_ht.h4
-rw-r--r--drivers/net/wireless/b43/tables_phy_lcn.c690
-rw-r--r--drivers/net/wireless/b43/tables_phy_lcn.h18
-rw-r--r--drivers/net/wireless/b43/xmit.c138
-rw-r--r--drivers/net/wireless/b43/xmit.h72
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h5
-rw-r--r--drivers/net/wireless/b43legacy/dma.c374
-rw-r--r--drivers/net/wireless/b43legacy/dma.h107
-rw-r--r--drivers/net/wireless/b43legacy/main.c11
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig35
-rw-r--r--drivers/net/wireless/brcm80211/Makefile23
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h (renamed from drivers/staging/brcm80211/brcmfmac/bcmchip.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c371
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c626
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h776
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h57
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c498
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c895
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h58
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c1356
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h60
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c4591
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h252
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c3868
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h (renamed from drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h)91
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile51
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c2079
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h378
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c (renamed from drivers/staging/brcm80211/brcmsmac/ampdu.c)594
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h (renamed from drivers/staging/brcm80211/brcmsmac/ampdu.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.c (renamed from drivers/staging/brcm80211/brcmsmac/antsel.c)240
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.h (renamed from drivers/staging/brcm80211/brcmsmac/antsel.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.c23
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h92
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c1591
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h53
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h1898
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c1425
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h120
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c1696
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h108
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c8775
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h735
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c (renamed from drivers/staging/brcm80211/brcmsmac/nicpci.c)181
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h82
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c426
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_cmn.c)1163
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h)85
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_int.h)212
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.c)3478
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_n.c)15452
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c308
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_radio.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phy_radio.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phyreg_n.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phyreg_n.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c3250
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.h)0
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.c)131
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.h50
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c (renamed from drivers/staging/brcm80211/brcmsmac/phy_shim.c)53
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h182
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c (renamed from drivers/staging/brcm80211/brcmsmac/pmu.c)76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h (renamed from drivers/staging/brcm80211/brcmsmac/pmu.h)14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h634
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.c514
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h250
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/scb.h82
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c1298
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h (renamed from drivers/staging/brcm80211/brcmsmac/srom.h)4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.c436
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.h (renamed from drivers/staging/brcm80211/brcmsmac/stf.h)6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h352
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c109
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h58
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/Makefile28
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c386
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h (renamed from drivers/staging/brcm80211/include/brcm_hw_ids.h)0
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h195
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h239
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h (renamed from drivers/staging/brcm80211/include/chipcommon.h)13
-rw-r--r--drivers/net/wireless/brcm80211/include/defs.h (renamed from drivers/staging/brcm80211/include/defs.h)32
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h (renamed from drivers/staging/brcm80211/include/soc.h)9
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c3
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig43
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c90
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c104
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c299
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c1143
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c104
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c943
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c992
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c119
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1499
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h216
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h90
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-cfg.h117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c405
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h179
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c450
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h556
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c306
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h184
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c192
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c1029
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c764
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h534
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c832
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h436
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c1435
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c1141
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c1998
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c979
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c1038
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c1115
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h278
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c6
-rw-r--r--drivers/net/wireless/libertas/README25
-rw-r--r--drivers/net/wireless/libertas/cfg.c120
-rw-r--r--drivers/net/wireless/libertas/cfg.h1
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/wireless/libertas/decl.h6
-rw-r--r--drivers/net/wireless/libertas/dev.h30
-rw-r--r--drivers/net/wireless/libertas/ethtool.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c277
-rw-r--r--drivers/net/wireless/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/libertas/if_usb.c34
-rw-r--r--drivers/net/wireless/libertas/main.c249
-rw-r--r--drivers/net/wireless/libertas/mesh.c79
-rw-r--r--drivers/net/wireless/libertas/mesh.h27
-rw-r--r--drivers/net/wireless/libertas/rx.c1
-rw-r--r--drivers/net/wireless/libertas/tx.c1
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c9
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c5
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h1
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig11
-rw-r--r--drivers/net/wireless/mwifiex/Makefile3
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c392
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h3
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c10
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c37
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h51
-rw-r--r--drivers/net/wireless/mwifiex/init.c96
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h5
-rw-r--r--drivers/net/wireless/mwifiex/join.c35
-rw-r--r--drivers/net/wireless/mwifiex/main.c282
-rw-r--r--drivers/net/wireless/mwifiex/main.h107
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c1948
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h148
-rw-r--r--drivers/net/wireless/mwifiex/scan.c1647
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c19
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h24
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c64
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c9
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c295
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c45
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwifiex/util.h9
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c36
-rw-r--r--drivers/net/wireless/mwl8k.c16
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/orinoco/wext.c1
-rw-r--r--drivers/net/wireless/p54/eeprom.c26
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/main.c116
-rw-r--r--drivers/net/wireless/p54/p54.h18
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c72
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c49
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c42
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c198
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c61
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c53
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c7
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c5
-rw-r--r--drivers/net/wireless/rtlwifi/base.c167
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c62
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h153
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c77
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c116
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/def.h170
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c63
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c121
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c169
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c40
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h54
-rw-r--r--drivers/net/wireless/wl1251/cmd.h2
-rw-r--r--drivers/net/wireless/wl1251/main.c3
-rw-r--r--drivers/net/wireless/wl1251/wl12xx_80211.h2
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/acx.c378
-rw-r--r--drivers/net/wireless/wl12xx/acx.h413
-rw-r--r--drivers/net/wireless/wl12xx/boot.c51
-rw-r--r--drivers/net/wireless/wl12xx/boot.h3
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c866
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h354
-rw-r--r--drivers/net/wireless/wl12xx/conf.h387
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c105
-rw-r--r--drivers/net/wireless/wl12xx/event.c56
-rw-r--r--drivers/net/wireless/wl12xx/event.h80
-rw-r--r--drivers/net/wireless/wl12xx/init.c113
-rw-r--r--drivers/net/wireless/wl12xx/io.h1
-rw-r--r--drivers/net/wireless/wl12xx/main.c1345
-rw-r--r--drivers/net/wireless/wl12xx/ps.c12
-rw-r--r--drivers/net/wireless/wl12xx/reg.h75
-rw-r--r--drivers/net/wireless/wl12xx/rx.c69
-rw-r--r--drivers/net/wireless/wl12xx/rx.h18
-rw-r--r--drivers/net/wireless/wl12xx/scan.c200
-rw-r--r--drivers/net/wireless/wl12xx/scan.h31
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c4
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c17
-rw-r--r--drivers/net/wireless/wl12xx/spi.c4
-rw-r--r--drivers/net/wireless/wl12xx/tx.c192
-rw-r--r--drivers/net/wireless/wl12xx/tx.h26
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h174
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h27
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/xen-netback/netback.c58
-rw-r--r--drivers/net/xen-netfront.c38
-rw-r--r--drivers/nfc/Kconfig11
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/nfcwilink.c342
-rw-r--r--drivers/nfc/pn533.c21
-rw-r--r--drivers/oprofile/event_buffer.c4
-rw-r--r--drivers/oprofile/oprofile_perf.c4
-rw-r--r--drivers/oprofile/oprofilefs.c6
-rw-r--r--drivers/parisc/sba_iommu.c16
-rw-r--r--drivers/parport/parport_pc.c3
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pci/xen-pcifront.c5
-rw-r--r--drivers/pcmcia/sa1100_simpad.c35
-rw-r--r--drivers/pinctrl/Kconfig43
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/core.c598
-rw-r--r--drivers/pinctrl/core.h71
-rw-r--r--drivers/pinctrl/pinmux-sirf.c1215
-rw-r--r--drivers/pinctrl/pinmux-u300.c1135
-rw-r--r--drivers/pinctrl/pinmux.c1190
-rw-r--r--drivers/pinctrl/pinmux.h47
-rw-r--r--drivers/platform/x86/Kconfig5
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/qdio.h38
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c208
-rw-r--r--drivers/s390/cio/qdio_setup.c83
-rw-r--r--drivers/s390/cio/qdio_thinint.c88
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h50
-rw-r--r--drivers/s390/net/qeth_core_main.c780
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c92
-rw-r--r--drivers/s390/net/qeth_l3_sys.c110
-rw-r--r--drivers/scsi/aacraid/src.c1
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx3
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h1
-rw-r--r--drivers/scsi/bnx2fc/Kconfig3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c6
-rw-r--r--drivers/scsi/bnx2i/Kconfig3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kbuild2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig3
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kbuild2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c22
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c7
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/pmcraid.c1
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/ssb/main.c24
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/altera-stapl/altera.c3
-rw-r--r--drivers/staging/ath6kl/Kconfig158
-rw-r--r--drivers/staging/ath6kl/Makefile122
-rw-r--r--drivers/staging/ath6kl/TODO25
-rw-r--r--drivers/staging/ath6kl/bmi/include/bmi_internal.h54
-rw-r--r--drivers/staging/ath6kl/bmi/src/bmi.c1010
-rw-r--r--drivers/staging/ath6kl/hif/common/hif_sdio_common.h87
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h131
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c1273
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c393
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k.c1479
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k.h401
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c783
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c755
-rw-r--r--drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c1284
-rw-r--r--drivers/staging/ath6kl/htc2/htc.c575
-rw-r--r--drivers/staging/ath6kl/htc2/htc_debug.h38
-rw-r--r--drivers/staging/ath6kl/htc2/htc_internal.h211
-rw-r--r--drivers/staging/ath6kl/htc2/htc_recv.c1572
-rw-r--r--drivers/staging/ath6kl/htc2/htc_send.c1018
-rw-r--r--drivers/staging/ath6kl/htc2/htc_services.c450
-rw-r--r--drivers/staging/ath6kl/include/a_config.h31
-rw-r--r--drivers/staging/ath6kl/include/a_debug.h195
-rw-r--r--drivers/staging/ath6kl/include/a_drv.h32
-rw-r--r--drivers/staging/ath6kl/include/a_drv_api.h204
-rw-r--r--drivers/staging/ath6kl/include/a_osapi.h32
-rw-r--r--drivers/staging/ath6kl/include/aggr_recv_api.h140
-rw-r--r--drivers/staging/ath6kl/include/ar3kconfig.h65
-rw-r--r--drivers/staging/ath6kl/include/ar6000_api.h32
-rw-r--r--drivers/staging/ath6kl/include/ar6000_diag.h48
-rw-r--r--drivers/staging/ath6kl/include/ar6kap_common.h44
-rw-r--r--drivers/staging/ath6kl/include/athbtfilter.h135
-rw-r--r--drivers/staging/ath6kl/include/bmi.h134
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h52
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/addrs.h90
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h40
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h40
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h24
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h552
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h471
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h589
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h187
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h162
-rw-r--r--drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h40
-rw-r--r--drivers/staging/ath6kl/include/common/athdefs.h75
-rw-r--r--drivers/staging/ath6kl/include/common/bmi_msg.h233
-rw-r--r--drivers/staging/ath6kl/include/common/cnxmgmt.h36
-rw-r--r--drivers/staging/ath6kl/include/common/dbglog.h126
-rw-r--r--drivers/staging/ath6kl/include/common/dbglog_id.h558
-rw-r--r--drivers/staging/ath6kl/include/common/discovery.h75
-rw-r--r--drivers/staging/ath6kl/include/common/epping_test.h111
-rw-r--r--drivers/staging/ath6kl/include/common/gmboxif.h70
-rw-r--r--drivers/staging/ath6kl/include/common/gpio_reg.h9
-rw-r--r--drivers/staging/ath6kl/include/common/htc.h227
-rw-r--r--drivers/staging/ath6kl/include/common/htc_services.h52
-rw-r--r--drivers/staging/ath6kl/include/common/pkt_log.h45
-rw-r--r--drivers/staging/ath6kl/include/common/roaming.h41
-rw-r--r--drivers/staging/ath6kl/include/common/targaddrs.h395
-rw-r--r--drivers/staging/ath6kl/include/common/testcmd.h185
-rw-r--r--drivers/staging/ath6kl/include/common/tlpm.h38
-rw-r--r--drivers/staging/ath6kl/include/common/wlan_defs.h79
-rw-r--r--drivers/staging/ath6kl/include/common/wmi.h3220
-rw-r--r--drivers/staging/ath6kl/include/common/wmix.h271
-rw-r--r--drivers/staging/ath6kl/include/common_drv.h104
-rw-r--r--drivers/staging/ath6kl/include/dbglog_api.h52
-rw-r--r--drivers/staging/ath6kl/include/dl_list.h153
-rw-r--r--drivers/staging/ath6kl/include/dset_api.h65
-rw-r--r--drivers/staging/ath6kl/include/hci_transport_api.h259
-rw-r--r--drivers/staging/ath6kl/include/hif.h456
-rw-r--r--drivers/staging/ath6kl/include/host_version.h52
-rw-r--r--drivers/staging/ath6kl/include/htc_api.h575
-rw-r--r--drivers/staging/ath6kl/include/htc_packet.h227
-rw-r--r--drivers/staging/ath6kl/include/wlan_api.h128
-rw-r--r--drivers/staging/ath6kl/include/wmi_api.h441
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kconfig.c565
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c572
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h75
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c969
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h113
-rw-r--r--drivers/staging/ath6kl/miscdrv/common_drv.c910
-rw-r--r--drivers/staging/ath6kl/miscdrv/credit_dist.c417
-rw-r--r--drivers/staging/ath6kl/miscdrv/miscdrv.h42
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c6267
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_pm.c626
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_raw_if.c455
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c1892
-rw-r--r--drivers/staging/ath6kl/os/linux/export_hci_transport.c124
-rw-r--r--drivers/staging/ath6kl/os/linux/hci_bridge.c1141
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6000_drv.h776
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6k_pal.h36
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h190
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athdrv_linux.h1217
-rw-r--r--drivers/staging/ath6kl/os/linux/include/cfg80211.h61
-rw-r--r--drivers/staging/ath6kl/os/linux/include/config_linux.h51
-rw-r--r--drivers/staging/ath6kl/os/linux/include/debug_linux.h50
-rw-r--r--drivers/staging/ath6kl/os/linux/include/export_hci_transport.h76
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h177
-rw-r--r--drivers/staging/ath6kl/os/linux/include/osapi_linux.h339
-rw-r--r--drivers/staging/ath6kl/os/linux/include/wlan_config.h108
-rw-r--r--drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h300
-rw-r--r--drivers/staging/ath6kl/os/linux/netbuf.c231
-rw-r--r--drivers/staging/ath6kl/reorder/aggr_rx_internal.h117
-rw-r--r--drivers/staging/ath6kl/reorder/rcv_aggr.c661
-rw-r--r--drivers/staging/ath6kl/wlan/include/ieee80211.h397
-rw-r--r--drivers/staging/ath6kl/wlan/include/ieee80211_node.h93
-rw-r--r--drivers/staging/ath6kl/wlan/src/wlan_node.c636
-rw-r--r--drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c199
-rw-r--r--drivers/staging/ath6kl/wlan/src/wlan_utils.c58
-rw-r--r--drivers/staging/ath6kl/wmi/wmi.c6444
-rw-r--r--drivers/staging/ath6kl/wmi/wmi_host.h102
-rw-r--r--drivers/staging/bcm/Bcmchar.c3171
-rw-r--r--drivers/staging/bcm/InterfaceDld.c456
-rw-r--r--drivers/staging/bcm/InterfaceInit.c39
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c304
-rw-r--r--drivers/staging/bcm/Macros.h337
-rw-r--r--drivers/staging/bcm/Misc.c1833
-rw-r--r--drivers/staging/bcm/headers.h2
-rw-r--r--drivers/staging/bcm/nvm.c13
-rw-r--r--drivers/staging/brcm80211/Kconfig40
-rw-r--r--drivers/staging/brcm80211/Makefile24
-rw-r--r--drivers/staging/brcm80211/README1
-rw-r--r--drivers/staging/brcm80211/TODO13
-rw-r--r--drivers/staging/brcm80211/brcmfmac/Makefile39
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh.c642
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c1196
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd.h904
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_bus.h78
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_cdc.c502
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_common.c1196
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_dbg.h70
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c1736
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_proto.h75
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_sdio.c6772
-rw-r--r--drivers/staging/brcm80211/brcmfmac/sdio_host.h347
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c4152
-rw-r--r--drivers/staging/brcm80211/brcmsmac/Makefile58
-rw-r--r--drivers/staging/brcm80211/brcmsmac/aiutils.c2279
-rw-r--r--drivers/staging/brcm80211/brcmsmac/aiutils.h584
-rw-r--r--drivers/staging/brcm80211/brcmsmac/alloc.c275
-rw-r--r--drivers/staging/brcm80211/brcmsmac/alloc.h19
-rw-r--r--drivers/staging/brcm80211/brcmsmac/bmac.c3593
-rw-r--r--drivers/staging/brcm80211/brcmsmac/bmac.h174
-rw-r--r--drivers/staging/brcm80211/brcmsmac/channel.c1559
-rw-r--r--drivers/staging/brcm80211/brcmsmac/channel.h132
-rw-r--r--drivers/staging/brcm80211/brcmsmac/d11.h1775
-rw-r--r--drivers/staging/brcm80211/brcmsmac/dma.c1917
-rw-r--r--drivers/staging/brcm80211/brcmsmac/dma.h250
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.c1934
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.h108
-rw-r--r--drivers/staging/brcm80211/brcmsmac/main.c6102
-rw-r--r--drivers/staging/brcm80211/brcmsmac/main.h1025
-rw-r--r--drivers/staging/brcm80211/brcmsmac/nicpci.h85
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.c545
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.h47
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c294
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c3638
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h40
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy_shim.h164
-rw-r--r--drivers/staging/brcm80211/brcmsmac/pub.h665
-rw-r--r--drivers/staging/brcm80211/brcmsmac/rate.c498
-rw-r--r--drivers/staging/brcm80211/brcmsmac/rate.h173
-rw-r--r--drivers/staging/brcm80211/brcmsmac/scb.h85
-rw-r--r--drivers/staging/brcm80211/brcmsmac/srom.c1237
-rw-r--r--drivers/staging/brcm80211/brcmsmac/stf.c477
-rw-r--r--drivers/staging/brcm80211/brcmsmac/types.h399
-rw-r--r--drivers/staging/brcm80211/brcmsmac/ucode_loader.c115
-rw-r--r--drivers/staging/brcm80211/brcmsmac/ucode_loader.h52
-rw-r--r--drivers/staging/brcm80211/brcmutil/Makefile29
-rw-r--r--drivers/staging/brcm80211/brcmutil/utils.c787
-rw-r--r--drivers/staging/brcm80211/brcmutil/wifi.c131
-rw-r--r--drivers/staging/brcm80211/include/brcmu_utils.h301
-rw-r--r--drivers/staging/brcm80211/include/brcmu_wifi.h243
-rw-r--r--drivers/staging/comedi/Kconfig18
-rw-r--r--drivers/staging/comedi/comedi_fops.c2
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c13
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c18
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c34
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c2880
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h4
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h6
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h57
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h532
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h6
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c43
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.h9
-rw-r--r--drivers/staging/cx25821/cx25821-audio.h6
-rw-r--r--drivers/staging/cx25821/cx25821-cards.c26
-rw-r--r--drivers/staging/cx25821/cx25821-core.c505
-rw-r--r--drivers/staging/cx25821/cx25821-gpio.c1
-rw-r--r--drivers/staging/cx25821/cx25821-gpio.h2
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c30
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.c81
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.h83
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c18
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.h78
-rw-r--r--drivers/staging/cx25821/cx25821-video.c1244
-rw-r--r--drivers/staging/cx25821/cx25821-video.h79
-rw-r--r--drivers/staging/cx25821/cx25821.h10
-rw-r--r--drivers/staging/cxd2099/Makefile6
-rw-r--r--drivers/staging/cxt1e1/Kconfig3
-rw-r--r--drivers/staging/cxt1e1/linux.c6
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h64
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c122
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.h4
-rw-r--r--drivers/staging/easycap/easycap.h11
-rw-r--r--drivers/staging/easycap/easycap_ioctl.c32
-rw-r--r--drivers/staging/easycap/easycap_main.c54
-rw-r--r--drivers/staging/easycap/easycap_sound.c3
-rw-r--r--drivers/staging/et131x/Kconfig9
-rw-r--r--drivers/staging/et131x/Makefile10
-rw-r--r--drivers/staging/et131x/README15
-rw-r--r--drivers/staging/et131x/et1310_address_map.h1434
-rw-r--r--drivers/staging/et131x/et1310_eeprom.c407
-rw-r--r--drivers/staging/et131x/et1310_mac.c654
-rw-r--r--drivers/staging/et131x/et1310_phy.c979
-rw-r--r--drivers/staging/et131x/et1310_phy.h458
-rw-r--r--drivers/staging/et131x/et1310_pm.c180
-rw-r--r--drivers/staging/et131x/et1310_rx.c1152
-rw-r--r--drivers/staging/et131x/et1310_rx.h243
-rw-r--r--drivers/staging/et131x/et1310_tx.c797
-rw-r--r--drivers/staging/et131x/et1310_tx.h150
-rw-r--r--drivers/staging/et131x/et131x.c5514
-rw-r--r--drivers/staging/et131x/et131x.h1721
-rw-r--r--drivers/staging/et131x/et131x_adapter.h243
-rw-r--r--drivers/staging/et131x/et131x_defs.h126
-rw-r--r--drivers/staging/et131x/et131x_initpci.c848
-rw-r--r--drivers/staging/et131x/et131x_isr.c480
-rw-r--r--drivers/staging/et131x/et131x_netdev.c686
-rw-r--r--drivers/staging/et131x/et131x_version.h74
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h10
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c8
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c3
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c24
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2
-rw-r--r--drivers/staging/gma500/accel_2d.c24
-rw-r--r--drivers/staging/gma500/cdv_device.c1
-rw-r--r--drivers/staging/gma500/framebuffer.c92
-rw-r--r--drivers/staging/gma500/gem.c5
-rw-r--r--drivers/staging/gma500/gtt.c81
-rw-r--r--drivers/staging/gma500/gtt.h3
-rw-r--r--drivers/staging/gma500/intel_opregion.c1
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c12
-rw-r--r--drivers/staging/gma500/mrst_crtc.c16
-rw-r--r--drivers/staging/gma500/power.c8
-rw-r--r--drivers/staging/gma500/psb_device.c32
-rw-r--r--drivers/staging/gma500/psb_drv.c49
-rw-r--r--drivers/staging/gma500/psb_drv.h6
-rw-r--r--drivers/staging/gma500/psb_intel_display.c4
-rw-r--r--drivers/staging/gma500/psb_intel_lvds.c61
-rw-r--r--drivers/staging/gma500/psb_irq.c71
-rw-r--r--drivers/staging/gma500/psb_lid.c2
-rw-r--r--drivers/staging/go7007/Makefile8
-rw-r--r--drivers/staging/go7007/wis-tw2804.c6
-rw-r--r--drivers/staging/hv/Kconfig35
-rw-r--r--drivers/staging/hv/Makefile11
-rw-r--r--drivers/staging/hv/TODO11
-rw-r--r--drivers/staging/hv/blkvsc_drv.c1026
-rw-r--r--drivers/staging/hv/channel.c877
-rw-r--r--drivers/staging/hv/hv_mouse.c715
-rw-r--r--drivers/staging/hv/hv_util.c306
-rw-r--r--drivers/staging/hv/hyperv_net.h7
-rw-r--r--drivers/staging/hv/hyperv_storage.h334
-rw-r--r--drivers/staging/hv/netvsc.c329
-rw-r--r--drivers/staging/hv/netvsc_drv.c156
-rw-r--r--drivers/staging/hv/rndis_filter.c146
-rw-r--r--drivers/staging/hv/storvsc.c564
-rw-r--r--drivers/staging/hv/storvsc_drv.c892
-rw-r--r--drivers/staging/hv/vmbus_drv.c802
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c12
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h2
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio738
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-adc-ad7280a21
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad593330
-rw-r--r--drivers/staging/iio/Kconfig35
-rw-r--r--drivers/staging/iio/Makefile11
-rw-r--r--drivers/staging/iio/accel/Kconfig33
-rw-r--r--drivers/staging/iio/accel/Makefile12
-rw-r--r--drivers/staging/iio/accel/accel.h87
-rw-r--r--drivers/staging/iio/accel/adis16201.h6
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c42
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c48
-rw-r--r--drivers/staging/iio/accel/adis16201_trigger.c13
-rw-r--r--drivers/staging/iio/accel/adis16203.h6
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c42
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c39
-rw-r--r--drivers/staging/iio/accel/adis16203_trigger.c14
-rw-r--r--drivers/staging/iio/accel/adis16204.h6
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c94
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c39
-rw-r--r--drivers/staging/iio/accel/adis16204_trigger.c14
-rw-r--r--drivers/staging/iio/accel/adis16209.h6
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c45
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c42
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c14
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c356
-rw-r--r--drivers/staging/iio/accel/adis16240.h6
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c43
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c40
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c14
-rw-r--r--drivers/staging/iio/accel/inclinometer.h25
-rw-r--r--drivers/staging/iio/accel/kxsd9.c270
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h26
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c114
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c124
-rw-r--r--drivers/staging/iio/accel/sca3000.h4
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c106
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c152
-rw-r--r--drivers/staging/iio/adc/Kconfig97
-rw-r--r--drivers/staging/iio/adc/Makefile15
-rw-r--r--drivers/staging/iio/adc/ad7150.c812
-rw-r--r--drivers/staging/iio/adc/ad7152.c586
-rw-r--r--drivers/staging/iio/adc/ad7192.c1179
-rw-r--r--drivers/staging/iio/adc/ad7192.h47
-rw-r--r--drivers/staging/iio/adc/ad7280a.c997
-rw-r--r--drivers/staging/iio/adc/ad7280a.h38
-rw-r--r--drivers/staging/iio/adc/ad7291.c1094
-rw-r--r--drivers/staging/iio/adc/ad7298.h6
-rw-r--r--drivers/staging/iio/adc/ad7298_core.c62
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c50
-rw-r--r--drivers/staging/iio/adc/ad7314.c281
-rw-r--r--drivers/staging/iio/adc/ad7476.h6
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c81
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c50
-rw-r--r--drivers/staging/iio/adc/ad7606.h12
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c277
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c86
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/staging/iio/adc/ad7745.c674
-rw-r--r--drivers/staging/iio/adc/ad7780.c10
-rw-r--r--drivers/staging/iio/adc/ad7793.c275
-rw-r--r--drivers/staging/iio/adc/ad7816.c60
-rw-r--r--drivers/staging/iio/adc/ad7887.h10
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c70
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c59
-rw-r--r--drivers/staging/iio/adc/ad799x.h4
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c744
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c69
-rw-r--r--drivers/staging/iio/adc/adc.h42
-rw-r--r--drivers/staging/iio/adc/adt7310.c24
-rw-r--r--drivers/staging/iio/adc/adt7410.c24
-rw-r--r--drivers/staging/iio/adc/adt75.c657
-rw-r--r--drivers/staging/iio/adc/max1363.h8
-rw-r--r--drivers/staging/iio/adc/max1363_core.c314
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c63
-rw-r--r--drivers/staging/iio/addac/Kconfig5
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c3
-rw-r--r--drivers/staging/iio/addac/adt7316.c46
-rw-r--r--drivers/staging/iio/buffer_generic.h228
-rw-r--r--drivers/staging/iio/cdc/Kconfig36
-rw-r--r--drivers/staging/iio/cdc/Makefile7
-rw-r--r--drivers/staging/iio/cdc/ad7150.c676
-rw-r--r--drivers/staging/iio/cdc/ad7152.c559
-rw-r--r--drivers/staging/iio/cdc/ad7746.c807
-rw-r--r--drivers/staging/iio/cdc/ad7746.h29
-rw-r--r--drivers/staging/iio/chrdev.h54
-rw-r--r--drivers/staging/iio/dac/Kconfig27
-rw-r--r--drivers/staging/iio/dac/Makefile2
-rw-r--r--drivers/staging/iio/dac/ad5064.c463
-rw-r--r--drivers/staging/iio/dac/ad5360.c581
-rw-r--r--drivers/staging/iio/dac/ad5446.c72
-rw-r--r--drivers/staging/iio/dac/ad5504.c94
-rw-r--r--drivers/staging/iio/dac/ad5624r_spi.c83
-rw-r--r--drivers/staging/iio/dac/ad5686.c133
-rw-r--r--drivers/staging/iio/dac/ad5791.c262
-rw-r--r--drivers/staging/iio/dac/ad5791.h6
-rw-r--r--drivers/staging/iio/dac/dac.h2
-rw-r--r--drivers/staging/iio/dac/max517.c34
-rw-r--r--drivers/staging/iio/dds/Kconfig4
-rw-r--r--drivers/staging/iio/dds/ad5930.c2
-rw-r--r--drivers/staging/iio/dds/ad9832.c20
-rw-r--r--drivers/staging/iio/dds/ad9834.c37
-rw-r--r--drivers/staging/iio/dds/ad9850.c2
-rw-r--r--drivers/staging/iio/dds/ad9852.c3
-rw-r--r--drivers/staging/iio/dds/ad9910.c3
-rw-r--r--drivers/staging/iio/dds/ad9951.c3
-rw-r--r--drivers/staging/iio/gyro/Kconfig8
-rw-r--r--drivers/staging/iio/gyro/Makefile2
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c125
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c104
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c146
-rw-r--r--drivers/staging/iio/gyro/adis16260.h6
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c58
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c39
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c14
-rw-r--r--drivers/staging/iio/gyro/adxrs450_core.c265
-rw-r--r--drivers/staging/iio/gyro/gyro.h85
-rw-r--r--drivers/staging/iio/iio.h248
-rw-r--r--drivers/staging/iio/iio_core.h63
-rw-r--r--drivers/staging/iio/iio_core_trigger.h47
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c217
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.h2
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c545
-rw-r--r--drivers/staging/iio/iio_simple_dummy.h108
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c206
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c190
-rw-r--r--drivers/staging/iio/impedance-analyzer/Kconfig18
-rw-r--r--drivers/staging/iio/impedance-analyzer/Makefile5
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c814
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.h28
-rw-r--r--drivers/staging/iio/imu/Kconfig16
-rw-r--r--drivers/staging/iio/imu/Makefile2
-rw-r--r--drivers/staging/iio/imu/adis16400.h6
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c653
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c49
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c16
-rw-r--r--drivers/staging/iio/industrialio-buffer.c635
-rw-r--r--drivers/staging/iio/industrialio-core.c961
-rw-r--r--drivers/staging/iio/industrialio-ring.c596
-rw-r--r--drivers/staging/iio/industrialio-trigger.c174
-rw-r--r--drivers/staging/iio/kfifo_buf.c78
-rw-r--r--drivers/staging/iio/kfifo_buf.h16
-rw-r--r--drivers/staging/iio/light/Kconfig4
-rw-r--r--drivers/staging/iio/light/isl29018.c74
-rw-r--r--drivers/staging/iio/light/tsl2563.c69
-rw-r--r--drivers/staging/iio/light/tsl2583.c222
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig4
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c240
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c104
-rw-r--r--drivers/staging/iio/magnetometer/magnet.h31
-rw-r--r--drivers/staging/iio/meter/Kconfig8
-rw-r--r--drivers/staging/iio/meter/Makefile2
-rw-r--r--drivers/staging/iio/meter/ade7753.c32
-rw-r--r--drivers/staging/iio/meter/ade7754.c31
-rw-r--r--drivers/staging/iio/meter/ade7758.h13
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c57
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c57
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c16
-rw-r--r--drivers/staging/iio/meter/ade7759.c27
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c1
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c1
-rw-r--r--drivers/staging/iio/meter/ade7854.c5
-rw-r--r--drivers/staging/iio/resolver/Kconfig9
-rw-r--r--drivers/staging/iio/resolver/Makefile2
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c188
-rw-r--r--drivers/staging/iio/resolver/ad2s120x.c177
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c223
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c60
-rw-r--r--drivers/staging/iio/ring_generic.h288
-rw-r--r--drivers/staging/iio/ring_hw.h6
-rw-r--r--drivers/staging/iio/ring_sw.c69
-rw-r--r--drivers/staging/iio/ring_sw.h8
-rw-r--r--drivers/staging/iio/sysfs.h136
-rw-r--r--drivers/staging/iio/trigger.h116
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c7
-rw-r--r--drivers/staging/iio/trigger/iio-trig-sysfs.c13
-rw-r--r--drivers/staging/iio/trigger_consumer.h71
-rw-r--r--drivers/staging/intel_sst/intel_sst.c10
-rw-r--r--drivers/staging/intel_sst/intel_sst_dsp.c2
-rw-r--r--drivers/staging/intel_sst/intelmid.c1
-rw-r--r--drivers/staging/keucr/scsiglue.c5
-rw-r--r--drivers/staging/keucr/smil.h6
-rw-r--r--drivers/staging/keucr/smilsub.c1574
-rw-r--r--drivers/staging/keucr/smscsi.c117
-rw-r--r--drivers/staging/lirc/lirc_imon.c6
-rw-r--r--drivers/staging/lirc/lirc_sasem.c46
-rw-r--r--drivers/staging/lirc/lirc_serial.c2
-rw-r--r--drivers/staging/lirc/lirc_sir.c2
-rw-r--r--drivers/staging/mei/Kconfig2
-rw-r--r--drivers/staging/mei/TODO10
-rw-r--r--drivers/staging/mei/init.c25
-rw-r--r--drivers/staging/mei/interface.c4
-rw-r--r--drivers/staging/mei/interface.h8
-rw-r--r--drivers/staging/mei/interrupt.c85
-rw-r--r--drivers/staging/mei/main.c51
-rw-r--r--drivers/staging/mei/mei_dev.h24
-rw-r--r--drivers/staging/mei/wd.c242
-rw-r--r--drivers/staging/nvec/Kconfig12
-rw-r--r--drivers/staging/nvec/Makefile1
-rw-r--r--drivers/staging/nvec/TODO10
-rw-r--r--drivers/staging/nvec/nvec-keytable.h225
-rw-r--r--drivers/staging/nvec/nvec.c958
-rw-r--r--drivers/staging/nvec/nvec.h217
-rw-r--r--drivers/staging/nvec/nvec_kbd.c95
-rw-r--r--drivers/staging/nvec/nvec_leds.c114
-rw-r--r--drivers/staging/nvec/nvec_power.c352
-rw-r--r--drivers/staging/nvec/nvec_ps2.c114
-rw-r--r--drivers/staging/octeon/ethernet-rx.c3
-rw-r--r--drivers/staging/octeon/ethernet.c12
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c47
-rw-r--r--drivers/staging/panel/panel.c10
-rw-r--r--drivers/staging/pohmelfs/Kconfig8
-rw-r--r--drivers/staging/pohmelfs/trans.c6
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c9
-rw-r--r--drivers/staging/rtl8187se/Makefile6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c14
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c4
-rw-r--r--drivers/staging/rtl8192e/Makefile67
-rw-r--r--drivers/staging/rtl8192e/TODO2
-rw-r--r--drivers/staging/rtl8192e/dot11d.c216
-rw-r--r--drivers/staging/rtl8192e/dot11d.h113
-rw-r--r--drivers/staging/rtl8192e/ieee80211/dot11d.c218
-rw-r--r--drivers/staging/rtl8192e/ieee80211/dot11d.h102
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h2636
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c244
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h85
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c483
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c809
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c296
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c352
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2676
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c3278
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c600
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c955
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c872
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_BA.h69
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c676
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_HT.h483
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c1732
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h582
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_TS.h56
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c627
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl_crypto.h399
-rw-r--r--drivers/staging/rtl8192e/license339
-rw-r--r--drivers/staging/rtl8192e/r8180_93cx6.c141
-rw-r--r--drivers/staging/rtl8192e/r8180_93cx6.h41
-rw-r--r--drivers/staging/rtl8192e/r8190P_def.h410
-rw-r--r--drivers/staging/rtl8192e/r8190P_rtl8256.c306
-rw-r--r--drivers/staging/rtl8192e/r8190P_rtl8256.h31
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.c677
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.h29
-rw-r--r--drivers/staging/rtl8192e/r8192E.h1148
-rw-r--r--drivers/staging/rtl8192e/r8192E_cmdpkt.c418
-rw-r--r--drivers/staging/rtl8192e/r8192E_cmdpkt.h159
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c5039
-rw-r--r--drivers/staging/rtl8192e/r8192E_dev.c2395
-rw-r--r--drivers/staging/rtl8192e/r8192E_dev.h62
-rw-r--r--drivers/staging/rtl8192e/r8192E_dm.c2554
-rw-r--r--drivers/staging/rtl8192e/r8192E_dm.h228
-rw-r--r--drivers/staging/rtl8192e/r8192E_firmware.c348
-rw-r--r--drivers/staging/rtl8192e/r8192E_firmware.h73
-rw-r--r--drivers/staging/rtl8192e/r8192E_hw.h552
-rw-r--r--drivers/staging/rtl8192e/r8192E_hwimg.c3336
-rw-r--r--drivers/staging/rtl8192e/r8192E_hwimg.h51
-rw-r--r--drivers/staging/rtl8192e/r8192E_phy.c1637
-rw-r--r--drivers/staging/rtl8192e/r8192E_phy.h120
-rw-r--r--drivers/staging/rtl8192e/r8192E_phyreg.h852
-rw-r--r--drivers/staging/rtl8192e/r8192E_wx.c1163
-rw-r--r--drivers/staging/rtl8192e/r8192E_wx.h18
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.c123
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.h24
-rw-r--r--drivers/staging/rtl8192e/r819xE_cmdpkt.c444
-rw-r--r--drivers/staging/rtl8192e/r819xE_cmdpkt.h207
-rw-r--r--drivers/staging/rtl8192e/r819xE_firmware.c351
-rw-r--r--drivers/staging/rtl8192e/r819xE_phy.c2225
-rw-r--r--drivers/staging/rtl8192e/r819xE_phy.h131
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h77
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c566
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h475
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c1075
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h444
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h73
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c548
-rw-r--r--drivers/staging/rtl8192e/rtl_cam.c304
-rw-r--r--drivers/staging/rtl8192e/rtl_cam.h46
-rw-r--r--drivers/staging/rtl8192e/rtl_core.c3198
-rw-r--r--drivers/staging/rtl8192e/rtl_core.h1124
-rw-r--r--drivers/staging/rtl8192e/rtl_crypto.h382
-rw-r--r--drivers/staging/rtl8192e/rtl_debug.c1108
-rw-r--r--drivers/staging/rtl8192e/rtl_debug.h299
-rw-r--r--drivers/staging/rtl8192e/rtl_dm.c2995
-rw-r--r--drivers/staging/rtl8192e/rtl_dm.h324
-rw-r--r--drivers/staging/rtl8192e/rtl_eeprom.c139
-rw-r--r--drivers/staging/rtl8192e/rtl_eeprom.h29
-rw-r--r--drivers/staging/rtl8192e/rtl_ethtool.c53
-rw-r--r--drivers/staging/rtl8192e/rtl_pci.c97
-rw-r--r--drivers/staging/rtl8192e/rtl_pci.h104
-rw-r--r--drivers/staging/rtl8192e/rtl_pm.c136
-rw-r--r--drivers/staging/rtl8192e/rtl_pm.h35
-rw-r--r--drivers/staging/rtl8192e/rtl_ps.c310
-rw-r--r--drivers/staging/rtl8192e/rtl_ps.h47
-rw-r--r--drivers/staging/rtl8192e/rtl_wx.c1333
-rw-r--r--drivers/staging/rtl8192e/rtl_wx.h31
-rw-r--r--drivers/staging/rtl8192e/rtllib.h3144
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.c241
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.h85
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c463
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c775
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c292
-rw-r--r--drivers/staging/rtl8192e/rtllib_endianfree.h160
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c289
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2720
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3741
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c645
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c967
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c876
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile109
-rw-r--r--drivers/staging/rtl8192u/ieee80211/compress.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c14
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8712/Kconfig7
-rw-r--r--drivers/staging/rtl8712/basic_types.h25
-rw-r--r--drivers/staging/rtl8712/big_endian.h25
-rw-r--r--drivers/staging/rtl8712/drv_types.h33
-rw-r--r--drivers/staging/rtl8712/ethernet.h25
-rw-r--r--drivers/staging/rtl8712/generic.h25
-rw-r--r--drivers/staging/rtl8712/hal_init.c6
-rw-r--r--drivers/staging/rtl8712/ieee80211.c35
-rw-r--r--drivers/staging/rtl8712/ieee80211.h31
-rw-r--r--drivers/staging/rtl8712/if_ether.h25
-rw-r--r--drivers/staging/rtl8712/little_endian.h25
-rw-r--r--drivers/staging/rtl8712/mlme_osdep.h25
-rw-r--r--drivers/staging/rtl8712/mp_custom_oid.h25
-rw-r--r--drivers/staging/rtl8712/os_intfs.c33
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h25
-rw-r--r--drivers/staging/rtl8712/osdep_service.h69
-rw-r--r--drivers/staging/rtl8712/recv_linux.c6
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_bitdef.h21
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c18
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h87
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c75
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_bitdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_regdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c24
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h31
-rw-r--r--drivers/staging/rtl8712/rtl8712_regdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_regdef.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c303
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h37
-rw-r--r--drivers/staging/rtl8712/rtl871x_byteorder.h19
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c172
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h77
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.h19
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c306
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h27
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c62
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h28
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c26
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h26
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h29
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c15
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_wlan_sme.h25
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c31
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h52
-rw-r--r--drivers/staging/rtl8712/sta_info.h29
-rw-r--r--drivers/staging/rtl8712/swab.h25
-rw-r--r--drivers/staging/rtl8712/usb_intf.c19
-rw-r--r--drivers/staging/rtl8712/usb_ops.h25
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c29
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h26
-rw-r--r--drivers/staging/rtl8712/usb_vendor_req.h25
-rw-r--r--drivers/staging/rtl8712/wifi.h25
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h25
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c27
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h26
-rw-r--r--drivers/staging/rts5139/Kconfig16
-rw-r--r--drivers/staging/rts5139/Makefile37
-rw-r--r--drivers/staging/rts5139/TODO5
-rw-r--r--drivers/staging/rts5139/debug.h46
-rw-r--r--drivers/staging/rts5139/ms.c4191
-rw-r--r--drivers/staging/rts5139/ms.h263
-rw-r--r--drivers/staging/rts5139/ms_mg.c642
-rw-r--r--drivers/staging/rts5139/ms_mg.h41
-rw-r--r--drivers/staging/rts5139/rts51x.c967
-rw-r--r--drivers/staging/rts5139/rts51x.h205
-rw-r--r--drivers/staging/rts5139/rts51x_card.c986
-rw-r--r--drivers/staging/rts5139/rts51x_card.h881
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c1167
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h904
-rw-r--r--drivers/staging/rts5139/rts51x_fop.c298
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h62
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c2234
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.h162
-rw-r--r--drivers/staging/rts5139/rts51x_sys.h54
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c1000
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h80
-rw-r--r--drivers/staging/rts5139/sd.c3400
-rw-r--r--drivers/staging/rts5139/sd.h304
-rw-r--r--drivers/staging/rts5139/sd_cprm.c1215
-rw-r--r--drivers/staging/rts5139/sd_cprm.h54
-rw-r--r--drivers/staging/rts5139/trace.h137
-rw-r--r--drivers/staging/rts5139/xd.c2255
-rw-r--r--drivers/staging/rts5139/xd.h193
-rw-r--r--drivers/staging/rts_pstor/Makefile2
-rw-r--r--drivers/staging/rts_pstor/rtsx.c62
-rw-r--r--drivers/staging/rts_pstor/rtsx.h10
-rw-r--r--drivers/staging/rts_pstor/rtsx_scsi.c2
-rw-r--r--drivers/staging/rts_pstor/sd.c35
-rw-r--r--drivers/staging/rts_pstor/spi.c2
-rw-r--r--drivers/staging/sep/sep_driver.c6
-rw-r--r--drivers/staging/serial/68360serial.c (renamed from drivers/tty/serial/68360serial.c)0
-rw-r--r--drivers/staging/serial/Kconfig16
-rw-r--r--drivers/staging/serial/Makefile1
-rw-r--r--drivers/staging/serial/TODO6
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c37
-rw-r--r--drivers/staging/slicoss/slicoss.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c2
-rw-r--r--drivers/staging/spectra/ffsport.c15
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c1
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c2
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c2
-rw-r--r--drivers/staging/usbip/stub.h5
-rw-r--r--drivers/staging/usbip/stub_dev.c7
-rw-r--r--drivers/staging/usbip/stub_main.c1
-rw-r--r--drivers/staging/usbip/stub_rx.c5
-rw-r--r--drivers/staging/usbip/usbip_common.h8
-rw-r--r--drivers/staging/usbip/usbip_protocol.txt358
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c4
-rw-r--r--drivers/staging/usbip/userspace/src/utils.h1
-rw-r--r--drivers/staging/usbip/vhci.h5
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c24
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c24
-rw-r--r--drivers/staging/vme/devices/vme_user.c69
-rw-r--r--drivers/staging/vme/devices/vme_user.h2
-rw-r--r--drivers/staging/vme/vme.c349
-rw-r--r--drivers/staging/vme/vme.h50
-rw-r--r--drivers/staging/vme/vme_api.txt90
-rw-r--r--drivers/staging/vme/vme_bridge.h17
-rw-r--r--drivers/staging/vt6655/IEEE11h.c406
-rw-r--r--drivers/staging/vt6655/device_main.c12
-rw-r--r--drivers/staging/vt6655/dpc.c30
-rw-r--r--drivers/staging/vt6655/ioctl.c1092
-rw-r--r--drivers/staging/vt6655/wmgr.c49
-rw-r--r--drivers/staging/vt6655/wpactl.c4
-rw-r--r--drivers/staging/vt6656/dpc.c30
-rw-r--r--drivers/staging/vt6656/ioctl.c1040
-rw-r--r--drivers/staging/vt6656/main_usb.c13
-rw-r--r--drivers/staging/vt6656/wmgr.c49
-rw-r--r--drivers/staging/vt6656/wpactl.c18
-rw-r--r--drivers/staging/winbond/phy_calibration.c12
-rw-r--r--drivers/staging/winbond/wbusb.c2
-rw-r--r--drivers/staging/wlags49_h2/Makefile26
-rw-r--r--drivers/staging/wlags49_h2/debug.h61
-rw-r--r--drivers/staging/wlags49_h2/hcf.c6428
-rw-r--r--drivers/staging/wlags49_h2/hcf.h10
-rw-r--r--drivers/staging/wlags49_h2/hcfcfg.h1586
-rw-r--r--drivers/staging/wlags49_h2/hcfdef.h857
-rw-r--r--drivers/staging/wlags49_h2/mdd.h34
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c114
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h15
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c6
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c18
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h6
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c1184
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.h8
-rw-r--r--drivers/staging/wlags49_h25/Makefile26
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/xgifb/TODO1
-rw-r--r--drivers/staging/xgifb/XGI_main.h238
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c1434
-rw-r--r--drivers/staging/xgifb/XGIfb.h48
-rw-r--r--drivers/staging/xgifb/vb_def.h605
-rw-r--r--drivers/staging/xgifb/vb_ext.h20
-rw-r--r--drivers/staging/xgifb/vb_init.c257
-rw-r--r--drivers/staging/xgifb/vb_init.h2
-rw-r--r--drivers/staging/xgifb/vb_setmode.c1535
-rw-r--r--drivers/staging/xgifb/vb_struct.h97
-rw-r--r--drivers/staging/xgifb/vb_table.h1200
-rw-r--r--drivers/staging/xgifb/vb_util.c5
-rw-r--r--drivers/staging/xgifb/vgatypes.h11
-rw-r--r--drivers/staging/zcache/zcache-main.c51
-rw-r--r--drivers/staging/zram/zram_drv.c96
-rw-r--r--drivers/staging/zram/zram_drv.h12
-rw-r--r--drivers/staging/zram/zram_sysfs.c22
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c34
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c49
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c20
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c18
-rw-r--r--drivers/target/target_core_alua.c21
-rw-r--r--drivers/target/target_core_cdb.c79
-rw-r--r--drivers/target/target_core_configfs.c16
-rw-r--r--drivers/target/target_core_device.c70
-rw-r--r--drivers/target/target_core_fabric_configfs.c1
-rw-r--r--drivers/target/target_core_fabric_lib.c12
-rw-r--r--drivers/target/target_core_file.c56
-rw-r--r--drivers/target/target_core_file.h4
-rw-r--r--drivers/target/target_core_iblock.c274
-rw-r--r--drivers/target/target_core_iblock.h2
-rw-r--r--drivers/target/target_core_pr.c1
-rw-r--r--drivers/target/target_core_pscsi.c261
-rw-r--r--drivers/target/target_core_pscsi.h1
-rw-r--r--drivers/target/target_core_rd.c19
-rw-r--r--drivers/target/target_core_rd.h2
-rw-r--r--drivers/target/target_core_scdb.c105
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_stat.c1
-rw-r--r--drivers/target/target_core_tmr.c251
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c1477
-rw-r--r--drivers/target/target_core_ua.c1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c18
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c13
-rw-r--r--drivers/target/tcm_fc/tfc_io.c1
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c1
-rw-r--r--drivers/tty/Kconfig40
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/amiserial.c12
-rw-r--r--drivers/tty/cyclades.c14
-rw-r--r--drivers/tty/ehv_bytechan.c881
-rw-r--r--drivers/tty/hvc/hvc_console.c9
-rw-r--r--drivers/tty/hvc/hvc_irq.c2
-rw-r--r--drivers/tty/hvc/hvcs.c4
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/mxser.c13
-rw-r--r--drivers/tty/n_gsm.c70
-rw-r--r--drivers/tty/pty.c26
-rw-r--r--drivers/tty/serial/68328serial.c37
-rw-r--r--drivers/tty/serial/68328serial.h1
-rw-r--r--drivers/tty/serial/8250.c132
-rw-r--r--drivers/tty/serial/8250_dw.c194
-rw-r--r--drivers/tty/serial/8250_pci.c42
-rw-r--r--drivers/tty/serial/Kconfig30
-rw-r--r--drivers/tty/serial/Makefile4
-rw-r--r--drivers/tty/serial/altera_jtaguart.c2
-rw-r--r--drivers/tty/serial/altera_uart.c2
-rw-r--r--drivers/tty/serial/apbuart.c1
-rw-r--r--drivers/tty/serial/atmel_serial.c128
-rw-r--r--drivers/tty/serial/bfin_5xx.c1596
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c2
-rw-r--r--drivers/tty/serial/bfin_uart.c1611
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c1
-rw-r--r--drivers/tty/serial/crisv10.c30
-rw-r--r--drivers/tty/serial/dz.c1
-rw-r--r--drivers/tty/serial/icom.c2
-rw-r--r--drivers/tty/serial/imx.c10
-rw-r--r--drivers/tty/serial/ioc3_serial.c1
-rw-r--r--drivers/tty/serial/ioc4_serial.c1
-rw-r--r--drivers/tty/serial/jsm/jsm.h10
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c19
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c29
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c94
-rw-r--r--drivers/tty/serial/lantiq.c10
-rw-r--r--drivers/tty/serial/m32r_sio.c1
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/mfd.c49
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c3
-rw-r--r--drivers/tty/serial/mrst_max3110.c117
-rw-r--r--drivers/tty/serial/mrst_max3110.h1
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c2
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/nwpserial.c1
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/pxa.c20
-rw-r--r--drivers/tty/serial/samsung.c107
-rw-r--r--drivers/tty/serial/samsung.h1
-rw-r--r--drivers/tty/serial/sb1250-duart.c1
-rw-r--r--drivers/tty/serial/serial_core.c72
-rw-r--r--drivers/tty/serial/serial_ks8695.c9
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/sn_console.c3
-rw-r--r--drivers/tty/serial/timbuart.c2
-rw-r--r--drivers/tty/serial/uartlite.c1
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c4
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/synclink.c1
-rw-r--r--drivers/tty/synclinkmp.c6
-rw-r--r--drivers/tty/tty_io.c50
-rw-r--r--drivers/tty/tty_ioctl.c17
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/tty/tty_mutex.c12
-rw-r--r--drivers/tty/tty_port.c2
-rw-r--r--drivers/tty/vt/keyboard.c3
-rw-r--r--drivers/tty/vt/selection.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/uio.c14
-rw-r--r--drivers/uio/uio_pci_generic.c5
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/Kconfig16
-rw-r--r--drivers/usb/Makefile4
-rw-r--r--drivers/usb/class/cdc-acm.c11
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/class/usbtmc.c5
-rw-r--r--drivers/usb/core/config.c111
-rw-r--r--drivers/usb/core/devices.c4
-rw-r--r--drivers/usb/core/devio.c57
-rw-r--r--drivers/usb/core/driver.c25
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c36
-rw-r--r--drivers/usb/core/hub.c326
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/quirks.c24
-rw-r--r--drivers/usb/core/sysfs.c59
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/core/usb.c1
-rw-r--r--drivers/usb/core/usb.h7
-rw-r--r--drivers/usb/dwc3/Kconfig25
-rw-r--r--drivers/usb/dwc3/Makefile36
-rw-r--r--drivers/usb/dwc3/core.c484
-rw-r--r--drivers/usb/dwc3/core.h768
-rw-r--r--drivers/usb/dwc3/debug.h50
-rw-r--r--drivers/usb/dwc3/debugfs.c441
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c401
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c219
-rw-r--r--drivers/usb/dwc3/ep0.c804
-rw-r--r--drivers/usb/dwc3/gadget.c2104
-rw-r--r--drivers/usb/dwc3/gadget.h211
-rw-r--r--drivers/usb/dwc3/io.h54
-rw-r--r--drivers/usb/gadget/Kconfig47
-rw-r--r--drivers/usb/gadget/Makefile4
-rw-r--r--drivers/usb/gadget/acm_ms.c256
-rw-r--r--drivers/usb/gadget/amd5536udc.c20
-rw-r--r--drivers/usb/gadget/amd5536udc.h9
-rw-r--r--drivers/usb/gadget/at91_udc.c32
-rw-r--r--drivers/usb/gadget/at91_udc.h10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c13
-rw-r--r--drivers/usb/gadget/cdc2.c9
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c11
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c2
-rw-r--r--drivers/usb/gadget/composite.c37
-rw-r--r--drivers/usb/gadget/config.c9
-rw-r--r--drivers/usb/gadget/dbgp.c1
-rw-r--r--drivers/usb/gadget/dummy_hcd.c13
-rw-r--r--drivers/usb/gadget/epautoconf.c12
-rw-r--r--drivers/usb/gadget/ether.c9
-rw-r--r--drivers/usb/gadget/f_audio.c15
-rw-r--r--drivers/usb/gadget/f_ecm.c9
-rw-r--r--drivers/usb/gadget/f_eem.c9
-rw-r--r--drivers/usb/gadget/f_fs.c9
-rw-r--r--drivers/usb/gadget/f_hid.c9
-rw-r--r--drivers/usb/gadget/f_loopback.c9
-rw-r--r--drivers/usb/gadget/f_mass_storage.c196
-rw-r--r--drivers/usb/gadget/f_midi.c998
-rw-r--r--drivers/usb/gadget/f_ncm.c9
-rw-r--r--drivers/usb/gadget/f_obex.c9
-rw-r--r--drivers/usb/gadget/f_phonet.c10
-rw-r--r--drivers/usb/gadget/f_rndis.c9
-rw-r--r--drivers/usb/gadget/f_sourcesink.c9
-rw-r--r--drivers/usb/gadget/f_subset.c9
-rw-r--r--drivers/usb/gadget/f_uvc.c1
-rw-r--r--drivers/usb/gadget/f_uvc.h1
-rw-r--r--drivers/usb/gadget/file_storage.c254
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c55
-rw-r--r--drivers/usb/gadget/fusb300_udc.c16
-rw-r--r--drivers/usb/gadget/fusb300_udc.h10
-rw-r--r--drivers/usb/gadget/g_ffs.c9
-rw-r--r--drivers/usb/gadget/gadget_chips.h3
-rw-r--r--drivers/usb/gadget/gmidi.c1292
-rw-r--r--drivers/usb/gadget/hid.c9
-rw-r--r--drivers/usb/gadget/imx_udc.c4
-rw-r--r--drivers/usb/gadget/imx_udc.h5
-rw-r--r--drivers/usb/gadget/inode.c9
-rw-r--r--drivers/usb/gadget/langwell_udc.c149
-rw-r--r--drivers/usb/gadget/langwell_udc.h12
-rw-r--r--drivers/usb/gadget/m66592-udc.c16
-rw-r--r--drivers/usb/gadget/m66592-udc.h10
-rw-r--r--drivers/usb/gadget/mass_storage.c11
-rw-r--r--drivers/usb/gadget/multi.c9
-rw-r--r--drivers/usb/gadget/mv_udc.h26
-rw-r--r--drivers/usb/gadget/mv_udc_core.c564
-rw-r--r--drivers/usb/gadget/mv_udc_phy.c214
-rw-r--r--drivers/usb/gadget/ncm.c9
-rw-r--r--drivers/usb/gadget/ndis.h6
-rw-r--r--drivers/usb/gadget/net2272.c78
-rw-r--r--drivers/usb/gadget/net2280.c89
-rw-r--r--drivers/usb/gadget/net2280.h9
-rw-r--r--drivers/usb/gadget/omap_udc.c18
-rw-r--r--drivers/usb/gadget/pch_udc.c15
-rw-r--r--drivers/usb/gadget/printer.c25
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c30
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h17
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c12
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h9
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c516
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h73
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c10
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c69
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c16
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h10
-rw-r--r--drivers/usb/gadget/storage_common.c185
-rw-r--r--drivers/usb/gadget/u_ether.c9
-rw-r--r--drivers/usb/gadget/u_ether.h9
-rw-r--r--drivers/usb/gadget/u_serial.c3
-rw-r--r--drivers/usb/gadget/udc-core.c21
-rw-r--r--drivers/usb/gadget/uvc.h2
-rw-r--r--drivers/usb/gadget/uvc_queue.c1
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/gadget/uvc_video.c1
-rw-r--r--drivers/usb/gadget/webcam.c2
-rw-r--r--drivers/usb/gadget/zero.c9
-rw-r--r--drivers/usb/host/Kconfig28
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/ehci-ath79.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c4
-rw-r--r--drivers/usb/host/ehci-dbg.c19
-rw-r--r--drivers/usb/host/ehci-fsl.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c64
-rw-r--r--drivers/usb/host/ehci-hub.c10
-rw-r--r--drivers/usb/host/ehci-mxc.c2
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-pci.c7
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-pxa168.c363
-rw-r--r--drivers/usb/host/ehci-q.c31
-rw-r--r--drivers/usb/host/ehci-s5p.c4
-rw-r--r--drivers/usb/host/ehci-sched.c41
-rw-r--r--drivers/usb/host/ehci-sh.c2
-rw-r--r--drivers/usb/host/ehci-spear.c2
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/ehci-vt8500.c2
-rw-r--r--drivers/usb/host/ehci-xls.c161
-rw-r--r--drivers/usb/host/ehci.h24
-rw-r--r--drivers/usb/host/fhci-hcd.c9
-rw-r--r--drivers/usb/host/fhci-sched.c19
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c8
-rw-r--r--drivers/usb/host/isp1760-hcd.c418
-rw-r--r--drivers/usb/host/isp1760-hcd.h5
-rw-r--r--drivers/usb/host/isp1760-if.c80
-rw-r--r--drivers/usb/host/ohci-ath79.c2
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/host/ohci-ep93xx.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-hub.c5
-rw-r--r--drivers/usb/host/ohci-octeon.c2
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-omap3.c4
-rw-r--r--drivers/usb/host/ohci-pnx4008.c2
-rw-r--r--drivers/usb/host/ohci-pnx8550.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c2
-rw-r--r--drivers/usb/host/ohci-ps3.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-q.c4
-rw-r--r--drivers/usb/host/ohci-s3c2410.c2
-rw-r--r--drivers/usb/host/ohci-sa1111.c2
-rw-r--r--drivers/usb/host/ohci-sh.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/ohci-spear.c2
-rw-r--r--drivers/usb/host/ohci-ssb.c2
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/ohci-xls.c151
-rw-r--r--drivers/usb/host/r8a66597-hcd.c4
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c66
-rw-r--r--drivers/usb/host/uhci-q.c6
-rw-r--r--drivers/usb/host/xhci-ext-caps.h6
-rw-r--r--drivers/usb/host/xhci-hub.c139
-rw-r--r--drivers/usb/host/xhci-mem.c371
-rw-r--r--drivers/usb/host/xhci-pci.c107
-rw-r--r--drivers/usb/host/xhci-ring.c80
-rw-r--r--drivers/usb/host/xhci.c1151
-rw-r--r--drivers/usb/host/xhci.h187
-rw-r--r--drivers/usb/misc/adutux.c14
-rw-r--r--drivers/usb/misc/ftdi-elan.c2
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c4
-rw-r--r--drivers/usb/misc/usblcd.c129
-rw-r--r--drivers/usb/misc/usbled.c25
-rw-r--r--drivers/usb/misc/usbtest.c31
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c73
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c8
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/twl6030-usb.c16
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig2
-rw-r--r--drivers/usb/renesas_usbhs/Makefile8
-rw-r--r--drivers/usb/renesas_usbhs/common.c236
-rw-r--r--drivers/usb/renesas_usbhs/common.h60
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c222
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h22
-rw-r--r--drivers/usb/renesas_usbhs/mod.c81
-rw-r--r--drivers/usb/renesas_usbhs/mod.h57
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c130
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c1312
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c200
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h27
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c91
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/ipw.c33
-rw-r--r--drivers/usb/serial/mos7720.c1
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/usb/serial/opticon.c2
-rw-r--r--drivers/usb/serial/option.c170
-rw-r--r--drivers/usb/serial/pl2303.c4
-rw-r--r--drivers/usb/serial/pl2303.h5
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c2
-rw-r--r--drivers/usb/serial/symbolserial.c2
-rw-r--r--drivers/usb/serial/usb-serial.c8
-rw-r--r--drivers/usb/serial/usb_wwan.c2
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/usb/storage/realtek_cr.c117
-rw-r--r--drivers/usb/storage/transport.c34
-rw-r--r--drivers/usb/storage/usb.c16
-rw-r--r--drivers/usb/usb-common.c35
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.c2
-rw-r--r--drivers/uwb/uwb-internal.h1
-rw-r--r--drivers/video/console/vgacon.c42
-rw-r--r--drivers/video/igafb.c2
-rw-r--r--drivers/virtio/virtio_ring.c10
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c27
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c62
-rw-r--r--drivers/xen/events.c49
-rw-r--r--drivers/xen/gntdev.c39
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/pci.c105
-rw-r--r--drivers/xen/swiotlb-xen.c70
-rw-r--r--drivers/xen/xen-pciback/conf_space.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c5
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c3
-rw-r--r--drivers/xen/xen-pciback/passthrough.c34
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c35
-rw-r--r--drivers/xen/xen-pciback/pciback.h32
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c1
-rw-r--r--drivers/xen/xen-pciback/vpci.c35
-rw-r--r--drivers/xen/xen-pciback/xenbus.c29
-rw-r--r--drivers/xen/xen-selfballoon.c67
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c121
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c17
-rw-r--r--drivers/zorro/zorro-driver.c8
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/vfs_dir.c14
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/attr.c5
-rw-r--r--fs/btrfs/ioctl.c11
-rw-r--r--fs/btrfs/xattr.c50
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/xattr.c40
-rw-r--r--fs/coda/coda_linux.h5
-rw-r--r--fs/compat.c1
-rw-r--r--fs/configfs/inode.c3
-rw-r--r--fs/configfs/item.c2
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/ext2/xattr_security.c34
-rw-r--r--fs/ext3/xattr_security.c36
-rw-r--r--fs/ext4/xattr_security.c36
-rw-r--r--fs/gfs2/inode.c38
-rw-r--r--fs/jffs2/security.c35
-rw-r--r--fs/jfs/xattr.c57
-rw-r--r--fs/lockd/host.c25
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/locks.c225
-rw-r--r--fs/nfs/blocklayout/blocklayout.c58
-rw-r--r--fs/nfs/blocklayout/blocklayout.h4
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c35
-rw-r--r--fs/nfs/callback.c4
-rw-r--r--fs/nfs/client.c11
-rw-r--r--fs/nfs/delegation.c2
-rw-r--r--fs/nfs/fscache-index.c4
-rw-r--r--fs/nfs/idmap.c25
-rw-r--r--fs/nfs/inode.c16
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4_fs.h24
-rw-r--r--fs/nfs/nfs4filelayout.c33
-rw-r--r--fs/nfs/nfs4proc.c93
-rw-r--r--fs/nfs/pnfs.c52
-rw-r--r--fs/nfs/pnfs.h5
-rw-r--r--fs/nfs/read.c40
-rw-r--r--fs/nfs/super.c17
-rw-r--r--fs/nfs/unlink.c4
-rw-r--r--fs/nfs/write.c73
-rw-r--r--fs/nfsd/export.c16
-rw-r--r--fs/nfsd/nfs4callback.c20
-rw-r--r--fs/nfsd/nfs4proc.c374
-rw-r--r--fs/nfsd/nfs4recover.c53
-rw-r--r--fs/nfsd/nfs4state.c1794
-rw-r--r--fs/nfsd/nfs4xdr.c380
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nfsd/nfsd.h33
-rw-r--r--fs/nfsd/nfsfh.c39
-rw-r--r--fs/nfsd/state.h174
-rw-r--r--fs/nfsd/vfs.c31
-rw-r--r--fs/nfsd/vfs.h29
-rw-r--r--fs/nfsd/xdr4.h28
-rw-r--r--fs/ocfs2/xattr.c38
-rw-r--r--fs/reiserfs/journal.c9
-rw-r--r--fs/reiserfs/resize.c4
-rw-r--r--fs/reiserfs/xattr_security.c4
-rw-r--r--fs/squashfs/Kconfig6
-rw-r--r--fs/sysfs/dir.c182
-rw-r--r--fs/sysfs/file.c56
-rw-r--r--fs/sysfs/inode.c16
-rw-r--r--fs/sysfs/sysfs.h17
-rw-r--r--fs/xattr.c63
-rw-r--r--fs/xfs/kmem.h7
-rw-r--r--fs/xfs/xfs_buf_item.c3
-rw-r--r--fs/xfs/xfs_dquot_item.c10
-rw-r--r--fs/xfs/xfs_inode_item.c10
-rw-r--r--fs/xfs/xfs_iops.c39
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c83
-rw-r--r--fs/xfs/xfs_trans_priv.h8
-rw-r--r--include/asm-generic/fcntl.h5
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/linux/atalk.h3
-rw-r--r--include/linux/ax25.h2
-rw-r--r--include/linux/bcma/bcma.h9
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h94
-rw-r--r--include/linux/bcma/bcma_driver_mips.h51
-rw-r--r--include/linux/bcma/bcma_soc.h16
-rw-r--r--include/linux/caif/caif_socket.h7
-rw-r--r--include/linux/can.h4
-rw-r--r--include/linux/can/Kbuild1
-rw-r--r--include/linux/can/bcm.h3
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/error.h2
-rw-r--r--include/linux/can/gw.h162
-rw-r--r--include/linux/can/netlink.h2
-rw-r--r--include/linux/can/raw.h2
-rw-r--r--include/linux/capability.h3
-rw-r--r--include/linux/ceph/messenger.h1
-rw-r--r--include/linux/cn_proc.h11
-rw-r--r--include/linux/devfreq.h238
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dma_remapping.h10
-rw-r--r--include/linux/dmar.h43
-rw-r--r--include/linux/drbd_tag_magic.h2
-rw-r--r--include/linux/dynamic_debug.h76
-rw-r--r--include/linux/ethtool.h180
-rw-r--r--include/linux/evm.h100
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/freezer.h4
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hyperv.h (renamed from drivers/staging/hv/hyperv.h)181
-rw-r--r--include/linux/ieee80211.h156
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_link.h10
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_packet.h119
-rw-r--r--include/linux/if_pppol2tp.h2
-rw-r--r--include/linux/if_pppox.h9
-rw-r--r--include/linux/ima.h13
-rw-r--r--include/linux/in.h2
-rw-r--r--include/linux/inet_diag.h3
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/integrity.h39
-rw-r--r--include/linux/intel-iommu.h10
-rw-r--r--include/linux/interrupt.h41
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/ip6_tunnel.h2
-rw-r--r--include/linux/ipx.h2
-rw-r--r--include/linux/irda.h9
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/isdn.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/l2tp.h7
-rw-r--r--include/linux/lapb.h3
-rw-r--r--include/linux/llc.h10
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mfd/wm831x/core.h12
-rw-r--r--include/linux/mfd/wm831x/pdata.h3
-rw-r--r--include/linux/mfd/wm8400-private.h7
-rw-r--r--include/linux/mfd/wm8994/core.h9
-rw-r--r--include/linux/mii.h210
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/moduleparam.h20
-rw-r--r--include/linux/net_tstamp.h13
-rw-r--r--include/linux/netdevice.h30
-rw-r--r--include/linux/netfilter/xt_connlimit.h1
-rw-r--r--include/linux/netfilter/xt_conntrack.h1
-rw-r--r--include/linux/netfilter/xt_iprange.h1
-rw-r--r--include/linux/netfilter_arp/arp_tables.h14
-rw-r--r--include/linux/netfilter_decnet.h3
-rw-r--r--include/linux/netfilter_ipv4.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h20
-rw-r--r--include/linux/netfilter_ipv6.h3
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h22
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/netrom.h2
-rw-r--r--include/linux/nfc.h8
-rw-r--r--include/linux/nfs4.h21
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/nfsd/Kbuild2
-rw-r--r--include/linux/nfsd/const.h55
-rw-r--r--include/linux/nfsd/export.h2
-rw-r--r--include/linux/nfsd/nfsfh.h7
-rw-r--r--include/linux/nfsd/syscall.h116
-rw-r--r--include/linux/nl80211.h238
-rw-r--r--include/linux/opp.h12
-rw-r--r--include/linux/oprofile.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu_counter.h2
-rw-r--r--include/linux/phonet.h5
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pinctrl/machine.h107
-rw-r--r--include/linux/pinctrl/pinctrl.h133
-rw-r--r--include/linux/pinctrl/pinmux.h117
-rw-r--r--include/linux/platform_data/dwc3-omap.h47
-rw-r--r--include/linux/platform_data/exynos4_tmu.h83
-rw-r--r--include/linux/platform_data/mv_usb.h50
-rw-r--r--include/linux/platform_data/ntc_thermistor.h2
-rw-r--r--include/linux/platform_device.h48
-rw-r--r--include/linux/pm.h26
-rw-r--r--include/linux/pm_clock.h71
-rw-r--r--include/linux/pm_domain.h26
-rw-r--r--include/linux/pm_qos.h155
-rw-r--r--include/linux/pm_qos_params.h38
-rw-r--r--include/linux/pm_runtime.h42
-rw-r--r--include/linux/proportions.h6
-rw-r--r--include/linux/ratelimit.h6
-rw-r--r--include/linux/rcupdate.h300
-rw-r--r--include/linux/rcutiny.h20
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h79
-rw-r--r--include/linux/rfkill-gpio.h4
-rw-r--r--include/linux/rose.h7
-rw-r--r--include/linux/rwsem-spinlock.h2
-rw-r--r--include/linux/rwsem.h10
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/semaphore.h4
-rw-r--r--include/linux/serial.h1
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/serial_reg.h1
-rw-r--r--include/linux/sh_eth.h25
-rw-r--r--include/linux/skbuff.h262
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/ssb/ssb_regs.h40
-rw-r--r--include/linux/sungem_phy.h (renamed from drivers/net/sungem_phy.h)2
-rw-r--r--include/linux/sunrpc/clnt.h11
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/svc.h32
-rw-r--r--include/linux/suspend.h95
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysfs.h1
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/linux/tipc_config.h4
-rw-r--r--include/linux/tty.h26
-rw-r--r--include/linux/types.h10
-rw-r--r--include/linux/uio_driver.h7
-rw-r--r--include/linux/un.h4
-rw-r--r--include/linux/usb.h20
-rw-r--r--include/linux/usb/ch9.h29
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/r8a66597.h60
-rw-r--r--include/linux/usb/renesas_usbhs.h14
-rw-r--r--include/linux/virtio.h5
-rw-r--r--include/linux/x25.h3
-rw-r--r--include/linux/xattr.h19
-rw-r--r--include/media/pwc-ioctl.h1
-rw-r--r--include/media/videobuf-dma-sg.h2
-rw-r--r--include/net/9p/9p.h14
-rw-r--r--include/net/9p/client.h8
-rw-r--r--include/net/9p/transport.h10
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci.h10
-rw-r--r--include/net/bluetooth/hci_core.h25
-rw-r--r--include/net/bluetooth/l2cap.h11
-rw-r--r--include/net/bluetooth/mgmt.h16
-rw-r--r--include/net/bluetooth/smp.h17
-rw-r--r--include/net/caif/caif_hsi.h37
-rw-r--r--include/net/cfg80211-wext.h55
-rw-r--r--include/net/cfg80211.h385
-rw-r--r--include/net/dcbevent.h18
-rw-r--r--include/net/dcbnl.h3
-rw-r--r--include/net/dst.h9
-rw-r--r--include/net/ieee80211_radiotap.h1
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_ecn.h8
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/iucv/af_iucv.h52
-rw-r--r--include/net/iucv/iucv.h36
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/lib80211.h3
-rw-r--r--include/net/mac80211.h294
-rw-r--r--include/net/nfc/nci.h313
-rw-r--r--include/net/nfc/nci_core.h184
-rw-r--r--include/net/nfc/nfc.h (renamed from include/net/nfc.h)11
-rw-r--r--include/net/regulatory.h16
-rw-r--r--include/net/sch_generic.h24
-rw-r--r--include/net/scm.h5
-rw-r--r--include/net/sctp/structs.h1
-rw-r--r--include/net/secure_seq.h2
-rw-r--r--include/net/sock.h15
-rw-r--r--include/net/tcp.h71
-rw-r--r--include/net/udplite.h63
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/sound/pcm.h4
-rw-r--r--include/target/configfs_macros.h4
-rw-r--r--include/target/target_core_base.h71
-rw-r--r--include/target/target_core_tmr.h2
-rw-r--r--include/target/target_core_transport.h75
-rw-r--r--include/trace/events/9p.h176
-rw-r--r--include/trace/events/rcu.h459
-rw-r--r--include/trace/events/regmap.h136
-rw-r--r--include/trace/events/rpm.h99
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/io/xs_wire.h6
-rw-r--r--include/xen/interface/physdev.h34
-rw-r--r--include/xen/page.h12
-rw-r--r--init/Kconfig6
-rw-r--r--init/main.c4
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/async.c4
-rw-r--r--kernel/cgroup.c18
-rw-r--r--kernel/cred.c18
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/futex.c10
-rw-r--r--kernel/irq/chip.c64
-rw-r--r--kernel/irq/internals.h19
-rw-r--r--kernel/irq/irqdesc.c32
-rw-r--r--kernel/irq/manage.c218
-rw-r--r--kernel/irq/pm.c48
-rw-r--r--kernel/irq/settings.h7
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/kprobes.c34
-rw-r--r--kernel/latencytop.c14
-rw-r--r--kernel/lockdep.c240
-rw-r--r--kernel/params.c21
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/pm_qos_params.c481
-rw-r--r--kernel/posix-cpu-timers.c17
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/Makefile4
-rw-r--r--kernel/power/console.c4
-rw-r--r--kernel/power/hibernate.c53
-rw-r--r--kernel/power/main.c102
-rw-r--r--kernel/power/power.h4
-rw-r--r--kernel/power/process.c30
-rw-r--r--kernel/power/qos.c490
-rw-r--r--kernel/power/snapshot.c18
-rw-r--r--kernel/power/suspend.c17
-rw-r--r--kernel/power/swap.c818
-rw-r--r--kernel/printk.c46
-rw-r--r--kernel/rcu.h85
-rw-r--r--kernel/rcupdate.c26
-rw-r--r--kernel/rcutiny.c117
-rw-r--r--kernel/rcutiny_plugin.h134
-rw-r--r--kernel/rcutorture.c77
-rw-r--r--kernel/rcutree.c290
-rw-r--r--kernel/rcutree.h17
-rw-r--r--kernel/rcutree_plugin.h150
-rw-r--r--kernel/rcutree_trace.c13
-rw-r--r--kernel/rtmutex-debug.c77
-rw-r--r--kernel/rtmutex.c8
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/sched_stats.h12
-rw-r--r--kernel/semaphore.c28
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/tick-sched.c6
-rw-r--r--kernel/time/timer_stats.c6
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--kernel/trace/ring_buffer.c56
-rw-r--r--kernel/trace/rpm-traces.c20
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_irqsoff.c6
-rw-r--r--lib/Kconfig.debug44
-rw-r--r--lib/atomic64.c66
-rw-r--r--lib/dynamic_debug.c174
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/percpu_counter.c18
-rw-r--r--lib/proportions.c12
-rw-r--r--lib/ratelimit.c4
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c14
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page_cgroup.c3
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/802/garp.c4
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/9p/client.c469
-rw-r--r--net/9p/protocol.c99
-rw-r--r--net/9p/protocol.h4
-rw-r--r--net/9p/trans_common.c53
-rw-r--r--net/9p/trans_common.h21
-rw-r--r--net/9p/trans_virtio.c319
-rw-r--r--net/appletalk/ddp.c5
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c293
-rw-r--r--net/batman-adv/aggregation.h46
-rw-r--r--net/batman-adv/bat_iv_ogm.c1170
-rw-r--r--net/batman-adv/bat_ogm.h35
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bitarray.c6
-rw-r--r--net/batman-adv/gateway_client.c10
-rw-r--r--net/batman-adv/hard-interface.c88
-rw-r--r--net/batman-adv/hard-interface.h1
-rw-r--r--net/batman-adv/hash.h25
-rw-r--r--net/batman-adv/main.c4
-rw-r--r--net/batman-adv/main.h8
-rw-r--r--net/batman-adv/originator.c21
-rw-r--r--net/batman-adv/packet.h19
-rw-r--r--net/batman-adv/routing.c669
-rw-r--r--net/batman-adv/routing.h17
-rw-r--r--net/batman-adv/send.c313
-rw-r--r--net/batman-adv/send.h9
-rw-r--r--net/batman-adv/soft-interface.c36
-rw-r--r--net/batman-adv/translation-table.c206
-rw-r--r--net/batman-adv/translation-table.h21
-rw-r--r--net/batman-adv/types.h5
-rw-r--r--net/batman-adv/unicast.c6
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c10
-rw-r--r--net/bluetooth/af_bluetooth.c30
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c57
-rw-r--r--net/bluetooth/hci_event.c36
-rw-r--r--net/bluetooth/hci_sock.c18
-rw-r--r--net/bluetooth/hci_sysfs.c2
-rw-r--r--net/bluetooth/hidp/core.c13
-rw-r--r--net/bluetooth/l2cap_core.c273
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/mgmt.c212
-rw-r--r--net/bluetooth/rfcomm/core.c5
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bluetooth/smp.c421
-rw-r--r--net/bridge/br_device.c7
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_if.c50
-rw-r--r--net/bridge/br_input.c33
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/br_sysfs_br.c34
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/cfcnfg.c38
-rw-r--r--net/caif/cfctrl.c23
-rw-r--r--net/caif/cfdbgl.c7
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c7
-rw-r--r--net/caif/cfmuxl.c6
-rw-r--r--net/caif/cfrfml.c7
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c8
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c7
-rw-r--r--net/can/Kconfig11
-rw-r--r--net/can/Makefile3
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/af_can.h2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/gw.c957
-rw-r--r--net/can/proc.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/datagram.c24
-rw-r--r--net/core/dev.c339
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c20
-rw-r--r--net/core/fib_rules.c9
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/flow.c14
-rw-r--r--net/core/kmap_skb.h2
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/core/neighbour.c44
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c25
-rw-r--r--net/core/rtnetlink.c34
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c174
-rw-r--r--net/core/sock.c24
-rw-r--r--net/core/timestamping.c12
-rw-r--r--net/core/user_dma.c6
-rw-r--r--net/dcb/dcbnl.c30
-rw-r--r--net/dccp/ccids/ccid2.c84
-rw-r--r--net/dccp/ccids/ccid2.h6
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c202
-rw-r--r--net/dccp/feat.h1
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan.c891
-rw-r--r--net/ieee802154/6lowpan.h212
-rw-r--r--net/ieee802154/Kconfig6
-rw-r--r--net/ieee802154/Makefile8
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/gre.c4
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/inet_lro.c10
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_fragment.c40
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c17
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ipip.c10
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c36
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/route.c53
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c97
-rw-r--r--net/ipv4/tcp_input.c248
-rw-r--r--net/ipv4/tcp_ipv4.c68
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_output.c152
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c11
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv6/addrconf.c81
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c7
-rw-r--r--net/ipv6/icmp.c28
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_output.c28
-rw-r--r--net/ipv6/ip6_tunnel.c54
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/ndisc.c42
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/syncookies.c6
-rw-r--r--net/ipv6/tcp_ipv6.c56
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_output.c56
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c870
-rw-r--r--net/iucv/iucv.c23
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/lapb/lapb_iface.c29
-rw-r--r--net/mac80211/Kconfig25
-rw-r--r--net/mac80211/agg-rx.c25
-rw-r--r--net/mac80211/agg-tx.c64
-rw-r--r--net/mac80211/cfg.c497
-rw-r--r--net/mac80211/debugfs.c71
-rw-r--r--net/mac80211/debugfs_netdev.c59
-rw-r--r--net/mac80211/debugfs_sta.c37
-rw-r--r--net/mac80211/driver-ops.h91
-rw-r--r--net/mac80211/driver-trace.h117
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ibss.c16
-rw-r--r--net/mac80211/ieee80211_i.h104
-rw-r--r--net/mac80211/iface.c26
-rw-r--r--net/mac80211/key.c4
-rw-r--r--net/mac80211/main.c27
-rw-r--r--net/mac80211/mesh.c213
-rw-r--r--net/mac80211/mesh.h38
-rw-r--r--net/mac80211/mesh_hwmp.c177
-rw-r--r--net/mac80211/mesh_pathtbl.c481
-rw-r--r--net/mac80211/mesh_plink.c257
-rw-r--r--net/mac80211/mlme.c140
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c37
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c13
-rw-r--r--net/mac80211/rx.c199
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/mac80211/spectmgmt.c6
-rw-r--r--net/mac80211/sta_info.c977
-rw-r--r--net/mac80211/sta_info.h171
-rw-r--r--net/mac80211/status.c251
-rw-r--r--net/mac80211/tx.c557
-rw-r--r--net/mac80211/util.c280
-rw-r--r--net/mac80211/wme.c20
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c10
-rw-r--r--net/mac80211/wpa.c3
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/core.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c133
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c12
-rw-r--r--net/netfilter/nf_conntrack_ecache.c8
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/netfilter/nf_log.c10
-rw-r--r--net/netfilter/nf_queue.c6
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/x_tables.c5
-rw-r--r--net/netlabel/netlabel_domainhash.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/nfc/Kconfig2
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/core.c83
-rw-r--r--net/nfc/nci/Kconfig10
-rw-r--r--net/nfc/nci/Makefile7
-rw-r--r--net/nfc/nci/core.c797
-rw-r--r--net/nfc/nci/data.c247
-rw-r--r--net/nfc/nci/lib.c94
-rw-r--r--net/nfc/nci/ntf.c258
-rw-r--r--net/nfc/nci/rsp.c226
-rw-r--r--net/nfc/netlink.c56
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/nfc/rawsock.c13
-rw-r--r--net/packet/af_packet.c987
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/rds/Kconfig1
-rw-r--r--net/rds/ib_cm.c6
-rw-r--r--net/rds/ib_rdma.c112
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rfkill/rfkill-gpio.c11
-rw-r--r--net/rfkill/rfkill-regulator.c1
-rw-r--r--net/sched/cls_flow.c188
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_make_chunk.c1
-rw-r--r--net/sctp/sm_statefuns.c5
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/addr.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c28
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/rpc_pipe.c23
-rw-r--r--net/sunrpc/rpcb_clnt.c6
-rw-r--r--net/sunrpc/svc.c33
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/sunrpc/svcsock.c23
-rw-r--r--net/tipc/bcast.c111
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/discover.c6
-rw-r--r--net/tipc/eth_media.c32
-rw-r--r--net/tipc/link.c111
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/tipc/net.c11
-rw-r--r--net/tipc/node.c45
-rw-r--r--net/tipc/node.h10
-rw-r--r--net/tipc/socket.c51
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/subscr.h6
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h6
-rw-r--r--net/wireless/lib80211.c15
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/lib80211_crypt_wep.c4
-rw-r--r--net/wireless/mesh.c3
-rw-r--r--net/wireless/mlme.c16
-rw-r--r--net/wireless/nl80211.c405
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/reg.c47
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c28
-rw-r--r--net/wireless/sme.c19
-rw-r--r--net/wireless/util.c194
-rw-r--r--net/wireless/wext-compat.c137
-rw-r--r--net/wireless/wext-compat.h8
-rw-r--r--net/wireless/wext-sme.c3
-rw-r--r--net/x25/af_x25.c40
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c10
-rw-r--r--net/x25/x25_in.c43
-rw-r--r--net/x25/x25_link.c3
-rw-r--r--net/x25/x25_subr.c14
-rw-r--r--net/xfrm/xfrm_ipcomp.c13
-rw-r--r--net/xfrm/xfrm_replay.c98
-rw-r--r--net/xfrm/xfrm_user.c4
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--scripts/mod/file2alias.c25
-rw-r--r--security/Kconfig6
-rw-r--r--security/Makefile4
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/ipc.c1
-rw-r--r--security/apparmor/lib.c1
-rw-r--r--security/apparmor/policy_unpack.c12
-rw-r--r--security/apparmor/procattr.c1
-rw-r--r--security/commoncap.c16
-rw-r--r--security/integrity/Kconfig7
-rw-r--r--security/integrity/Makefile12
-rw-r--r--security/integrity/evm/Kconfig13
-rw-r--r--security/integrity/evm/Makefile7
-rw-r--r--security/integrity/evm/evm.h38
-rw-r--r--security/integrity/evm/evm_crypto.c216
-rw-r--r--security/integrity/evm/evm_main.c384
-rw-r--r--security/integrity/evm/evm_posix_acl.c26
-rw-r--r--security/integrity/evm/evm_secfs.c108
-rw-r--r--security/integrity/iint.c172
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/integrity/ima/Makefile2
-rw-r--r--security/integrity/ima/ima.h30
-rw-r--r--security/integrity/ima/ima_api.c7
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_iint.c169
-rw-r--r--security/integrity/ima/ima_main.c13
-rw-r--r--security/integrity/integrity.h50
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/encrypted-keys/Makefile6
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c (renamed from security/keys/ecryptfs_format.c)0
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.h (renamed from security/keys/ecryptfs_format.h)0
-rw-r--r--security/keys/encrypted-keys/encrypted.c (renamed from security/keys/encrypted.c)49
-rw-r--r--security/keys/encrypted-keys/encrypted.h (renamed from security/keys/encrypted.h)11
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c45
-rw-r--r--security/keys/gc.c386
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/key.c121
-rw-r--r--security/keys/keyring.c3
-rw-r--r--security/keys/process_keys.c16
-rw-r--r--security/keys/trusted.c19
-rw-r--r--security/security.c77
-rw-r--r--security/selinux/exports.c1
-rw-r--r--security/selinux/hooks.c13
-rw-r--r--security/selinux/include/avc_ss.h6
-rw-r--r--security/selinux/include/security.h8
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c5
-rw-r--r--security/selinux/ss/conditional.c2
-rw-r--r--security/selinux/ss/conditional.h1
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c3
-rw-r--r--security/smack/smack.h24
-rw-r--r--security/smack/smack_access.c134
-rw-r--r--security/smack/smack_lsm.c266
-rw-r--r--security/smack/smackfs.c277
-rw-r--r--security/tomoyo/Kconfig2
-rw-r--r--security/tomoyo/Makefile4
-rw-r--r--security/tomoyo/audit.c7
-rw-r--r--security/tomoyo/common.c228
-rw-r--r--security/tomoyo/common.h189
-rw-r--r--security/tomoyo/condition.c71
-rw-r--r--security/tomoyo/domain.c209
-rw-r--r--security/tomoyo/environ.c122
-rw-r--r--security/tomoyo/file.c42
-rw-r--r--security/tomoyo/gc.c540
-rw-r--r--security/tomoyo/group.c61
-rw-r--r--security/tomoyo/memory.c39
-rw-r--r--security/tomoyo/network.c771
-rw-r--r--security/tomoyo/realpath.c32
-rw-r--r--security/tomoyo/securityfs_if.c123
-rw-r--r--security/tomoyo/tomoyo.c62
-rw-r--r--security/tomoyo/util.c80
-rw-r--r--sound/core/memalloc.c4
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/oss/Kconfig4
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/soc/davinci/Kconfig1
-rw-r--r--sound/usb/card.c2
-rw-r--r--tools/hv/hv_kvp_daemon.c (renamed from drivers/staging/hv/tools/hv_kvp_daemon.c)19
-rw-r--r--tools/perf/util/config.c4
-rw-r--r--virt/kvm/assigned-dev.c2
-rw-r--r--virt/kvm/iommu.c4
4327 files changed, 359917 insertions, 308021 deletions
diff --git a/Documentation/ABI/removed/o2cb b/Documentation/ABI/removed/o2cb
index 7f5daa46509..20c91adca6d 100644
--- a/Documentation/ABI/removed/o2cb
+++ b/Documentation/ABI/removed/o2cb
@@ -1,6 +1,6 @@
What: /sys/o2cb symlink
Date: May 2011
-KernelVersion: 2.6.40
+KernelVersion: 3.0
Contact: ocfs2-devel@oss.oracle.com
Description: This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink is
removed when new versions of ocfs2-tools which know to look
diff --git a/Documentation/ABI/removed/raw1394 b/Documentation/ABI/removed/raw1394
index 490aa1efc4a..ec333e67632 100644
--- a/Documentation/ABI/removed/raw1394
+++ b/Documentation/ABI/removed/raw1394
@@ -5,7 +5,7 @@ Description:
/dev/raw1394 was a character device file that allowed low-level
access to FireWire buses. Its major drawbacks were its inability
to implement sensible device security policies, and its low level
- of abstraction that required userspace clients do duplicate much
+ of abstraction that required userspace clients to duplicate much
of the kernel's ieee1394 core functionality.
Replaced by /dev/fw*, i.e. the <linux/firewire-cdev.h> ABI of
firewire-core.
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
new file mode 100644
index 00000000000..8374d4557e5
--- /dev/null
+++ b/Documentation/ABI/testing/evm
@@ -0,0 +1,23 @@
+What: security/evm
+Date: March 2011
+Contact: Mimi Zohar <zohar@us.ibm.com>
+Description:
+ EVM protects a file's security extended attributes(xattrs)
+ against integrity attacks. The initial method maintains an
+ HMAC-sha1 value across the extended attributes, storing the
+ value as the extended attribute 'security.evm'.
+
+ EVM depends on the Kernel Key Retention System to provide it
+ with a trusted/encrypted key for the HMAC-sha1 operation.
+ The key is loaded onto the root's keyring using keyctl. Until
+ EVM receives notification that the key has been successfully
+ loaded onto the keyring (echo 1 > <securityfs>/evm), EVM
+ can not create or validate the 'security.evm' xattr, but
+ returns INTEGRITY_UNKNOWN. Loading the key and signaling EVM
+ should be done as early as possible. Normally this is done
+ in the initramfs, which has already been measured as part
+ of the trusted boot. For more information on creating and
+ loading existing trusted/encrypted keys, refer to:
+ Documentation/keys-trusted-encrypted.txt. (A sample dracut
+ patch, which loads the trusted/encrypted key and enables
+ EVM, is available from http://linux-ima.sourceforge.net/#EVM.)
diff --git a/Documentation/ABI/testing/sysfs-bus-bcma b/Documentation/ABI/testing/sysfs-bus-bcma
index 06b62badddd..721b4aea302 100644
--- a/Documentation/ABI/testing/sysfs-bus-bcma
+++ b/Documentation/ABI/testing/sysfs-bus-bcma
@@ -1,6 +1,6 @@
What: /sys/bus/bcma/devices/.../manuf
Date: May 2011
-KernelVersion: 2.6.40
+KernelVersion: 3.0
Contact: Rafał Miłecki <zajec5@gmail.com>
Description:
Each BCMA core has it's manufacturer id. See
@@ -8,7 +8,7 @@ Description:
What: /sys/bus/bcma/devices/.../id
Date: May 2011
-KernelVersion: 2.6.40
+KernelVersion: 3.0
Contact: Rafał Miłecki <zajec5@gmail.com>
Description:
There are a few types of BCMA cores, they can be identified by
@@ -16,7 +16,7 @@ Description:
What: /sys/bus/bcma/devices/.../rev
Date: May 2011
-KernelVersion: 2.6.40
+KernelVersion: 3.0
Contact: Rafał Miłecki <zajec5@gmail.com>
Description:
BCMA cores of the same type can still slightly differ depending
@@ -24,7 +24,7 @@ Description:
What: /sys/bus/bcma/devices/.../class
Date: May 2011
-KernelVersion: 2.6.40
+KernelVersion: 3.0
Contact: Rafał Miłecki <zajec5@gmail.com>
Description:
Each BCMA core is identified by few fields, including class it
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-drivers-ehci_hcd b/Documentation/ABI/testing/sysfs-bus-pci-drivers-ehci_hcd
new file mode 100644
index 00000000000..60c60fa624b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-drivers-ehci_hcd
@@ -0,0 +1,46 @@
+What: /sys/bus/pci/drivers/ehci_hcd/.../companion
+ /sys/bus/usb/devices/usbN/../companion
+Date: January 2007
+KernelVersion: 2.6.21
+Contact: Alan Stern <stern@rowland.harvard.edu>
+Description:
+ PCI-based EHCI USB controllers (i.e., high-speed USB-2.0
+ controllers) are often implemented along with a set of
+ "companion" full/low-speed USB-1.1 controllers. When a
+ high-speed device is plugged in, the connection is routed
+ to the EHCI controller; when a full- or low-speed device
+ is plugged in, the connection is routed to the companion
+ controller.
+
+ Sometimes you want to force a high-speed device to connect
+ at full speed, which can be accomplished by forcing the
+ connection to be routed to the companion controller.
+ That's what this file does. Writing a port number to the
+ file causes connections on that port to be routed to the
+ companion controller, and writing the negative of a port
+ number returns the port to normal operation.
+
+ For example: To force the high-speed device attached to
+ port 4 on bus 2 to run at full speed:
+
+ echo 4 >/sys/bus/usb/devices/usb2/../companion
+
+ To return the port to high-speed operation:
+
+ echo -4 >/sys/bus/usb/devices/usb2/../companion
+
+ Reading the file gives the list of ports currently forced
+ to the companion controller.
+
+ Note: Some EHCI controllers do not have companions; they
+ may contain an internal "transaction translator" or they
+ may be attached directly to a "rate-matching hub". This
+ mechanism will not work with such controllers. Also, it
+ cannot be used to force a port on a high-speed hub to
+ connect at full speed.
+
+ Note: When this file was first added, it appeared in a
+ different sysfs directory. The location given above is
+ correct for 2.6.35 (and probably several earlier kernel
+ versions as well).
+
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 294aa864a60..e647378e9e8 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -142,3 +142,18 @@ Description:
such devices.
Users:
usb_modeswitch
+
+What: /sys/bus/usb/devices/.../power/usb2_hardware_lpm
+Date: September 2011
+Contact: Andiry Xu <andiry.xu@amd.com>
+Description:
+ If CONFIG_USB_SUSPEND is set and a USB 2.0 lpm-capable device
+ is plugged in to a xHCI host which support link PM, it will
+ perform a LPM test; if the test is passed and host supports
+ USB2 hardware LPM (xHCI 1.0 feature), USB2 hardware LPM will
+ be enabled for the device and the USB device directory will
+ contain a file named power/usb2_hardware_lpm. The file holds
+ a string value (enable or disable) indicating whether or not
+ USB2 hardware LPM is enabled for the device. Developer can
+ write y/Y/1 or n/N/0 to the file to enable/disable the
+ feature.
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
index aa11dbdd794..4a9c545bda4 100644
--- a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
@@ -4,8 +4,8 @@ What: /sys/class/backlight/<backlight>/l2_bright_max
What: /sys/class/backlight/<backlight>/l3_office_max
What: /sys/class/backlight/<backlight>/l4_indoor_max
What: /sys/class/backlight/<backlight>/l5_dark_max
-Date: Mai 2011
-KernelVersion: 2.6.40
+Date: May 2011
+KernelVersion: 3.0
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Control the maximum brightness for <ambient light zone>
@@ -18,8 +18,8 @@ What: /sys/class/backlight/<backlight>/l2_bright_dim
What: /sys/class/backlight/<backlight>/l3_office_dim
What: /sys/class/backlight/<backlight>/l4_indoor_dim
What: /sys/class/backlight/<backlight>/l5_dark_dim
-Date: Mai 2011
-KernelVersion: 2.6.40
+Date: May 2011
+KernelVersion: 3.0
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Control the dim brightness for <ambient light zone>
@@ -29,8 +29,8 @@ Description:
this <ambient light zone>.
What: /sys/class/backlight/<backlight>/ambient_light_level
-Date: Mai 2011
-KernelVersion: 2.6.40
+Date: May 2011
+KernelVersion: 3.0
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Get conversion value of the light sensor.
@@ -39,8 +39,8 @@ Description:
8000 (max ambient brightness)
What: /sys/class/backlight/<backlight>/ambient_light_zone
-Date: Mai 2011
-KernelVersion: 2.6.40
+Date: May 2011
+KernelVersion: 3.0
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Get/Set current ambient light zone. Reading returns
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
new file mode 100644
index 00000000000..23d78b5aab1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-devfreq
@@ -0,0 +1,52 @@
+What: /sys/class/devfreq/.../
+Date: September 2011
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Description:
+ Provide a place in sysfs for the devfreq objects.
+ This allows accessing various devfreq specific variables.
+ The name of devfreq object denoted as ... is same as the
+ name of device using devfreq.
+
+What: /sys/class/devfreq/.../governor
+Date: September 2011
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Description:
+ The /sys/class/devfreq/.../governor shows the name of the
+ governor used by the corresponding devfreq object.
+
+What: /sys/class/devfreq/.../cur_freq
+Date: September 2011
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Description:
+ The /sys/class/devfreq/.../cur_freq shows the current
+ frequency of the corresponding devfreq object.
+
+What: /sys/class/devfreq/.../central_polling
+Date: September 2011
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Description:
+ The /sys/class/devfreq/.../central_polling shows whether
+ the devfreq ojbect is using devfreq-provided central
+ polling mechanism or not.
+
+What: /sys/class/devfreq/.../polling_interval
+Date: September 2011
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Description:
+ The /sys/class/devfreq/.../polling_interval shows and sets
+ the requested polling interval of the corresponding devfreq
+ object. The values are represented in ms. If the value is
+ less than 1 jiffy, it is considered to be 0, which means
+ no polling. This value is meaningless if the governor is
+ not polling; thus. If the governor is not using
+ devfreq-provided central polling
+ (/sys/class/devfreq/.../central_polling is 0), this value
+ may be useless.
+
+What: /sys/class/devfreq/.../userspace/set_freq
+Date: September 2011
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Description:
+ The /sys/class/devfreq/.../userspace/set_freq shows and
+ sets the requested frequency for the devfreq object if
+ userspace governor is in effect.
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index 748fe1701d2..b02001488ee 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -22,6 +22,14 @@ Description:
mesh will be fragmented or silently discarded if the
packet size exceeds the outgoing interface MTU.
+What: /sys/class/net/<mesh_iface>/mesh/ap_isolation
+Date: May 2011
+Contact: Antonio Quartulli <ordex@autistici.org>
+Description:
+ Indicates whether the data traffic going from a
+ wireless client to another wireless client will be
+ silently dropped.
+
What: /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
Date: October 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
new file mode 100644
index 00000000000..9aec8ef228b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
@@ -0,0 +1,7 @@
+What: /sys/module/hid_logitech/drivers/hid:logitech/<dev>/range.
+Date: July 2011
+KernelVersion: 3.2
+Contact: Michal Malý <madcatxster@gmail.com>
+Description: Display minimum, maximum and current range of the steering
+ wheel. Writing a value within min and max boundaries sets the
+ range of the wheel.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 445289cd0e6..2014155c899 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -433,8 +433,18 @@
Insert notes about VLAN interfaces with hw crypto here or
in the hw crypto chapter.
</para>
+ <section id="ps-client">
+ <title>support for powersaving clients</title>
+!Pinclude/net/mac80211.h AP support for powersaving clients
+ </section>
!Finclude/net/mac80211.h ieee80211_get_buffered_bc
!Finclude/net/mac80211.h ieee80211_beacon_get
+!Finclude/net/mac80211.h ieee80211_sta_eosp_irqsafe
+!Finclude/net/mac80211.h ieee80211_frame_release_type
+!Finclude/net/mac80211.h ieee80211_sta_ps_transition
+!Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
+!Finclude/net/mac80211.h ieee80211_sta_set_buffered
+!Finclude/net/mac80211.h ieee80211_sta_block_awake
</chapter>
<chapter id="multi-iface">
@@ -460,7 +470,6 @@
!Finclude/net/mac80211.h sta_notify_cmd
!Finclude/net/mac80211.h ieee80211_find_sta
!Finclude/net/mac80211.h ieee80211_find_sta_by_ifaddr
-!Finclude/net/mac80211.h ieee80211_sta_block_awake
</chapter>
<chapter id="hardware-scan-offload">
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 7c4b514d62b..54883de5d5f 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -529,7 +529,7 @@ memory (e.g. allocated with <function>kmalloc()</function>). There's also
</para></listitem>
<listitem><para>
-<varname>unsigned long addr</varname>: Required if the mapping is used.
+<varname>phys_addr_t addr</varname>: Required if the mapping is used.
Fill in the address of your memory block. This address is the one that
appears in sysfs.
</para></listitem>
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 6148d4080f8..aa09e5476bb 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -314,7 +314,7 @@ from the PCI device config space. Use the values in the pci_dev structure
as the PCI "bus address" might have been remapped to a "host physical"
address by the arch/chip-set specific kernel support.
-See Documentation/IO-mapping.txt for how to access device registers
+See Documentation/io-mapping.txt for how to access device registers
or device memory.
The device driver needs to call pci_request_region() to verify
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index bf82851a0e5..687777f83b2 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -95,7 +95,7 @@ not to return until all ongoing NMI handlers exit. It is therefore safe
to free up the handler's data as soon as synchronize_sched() returns.
Important note: for this to work, the architecture in question must
-invoke irq_enter() and irq_exit() on NMI entry and exit, respectively.
+invoke nmi_enter() and nmi_exit() on NMI entry and exit, respectively.
Answer to Quick Quiz
diff --git a/Documentation/RCU/lockdep-splat.txt b/Documentation/RCU/lockdep-splat.txt
new file mode 100644
index 00000000000..bf906114282
--- /dev/null
+++ b/Documentation/RCU/lockdep-splat.txt
@@ -0,0 +1,110 @@
+Lockdep-RCU was added to the Linux kernel in early 2010
+(http://lwn.net/Articles/371986/). This facility checks for some common
+misuses of the RCU API, most notably using one of the rcu_dereference()
+family to access an RCU-protected pointer without the proper protection.
+When such misuse is detected, an lockdep-RCU splat is emitted.
+
+The usual cause of a lockdep-RCU slat is someone accessing an
+RCU-protected data structure without either (1) being in the right kind of
+RCU read-side critical section or (2) holding the right update-side lock.
+This problem can therefore be serious: it might result in random memory
+overwriting or worse. There can of course be false positives, this
+being the real world and all that.
+
+So let's look at an example RCU lockdep splat from 3.0-rc5, one that
+has long since been fixed:
+
+===============================
+[ INFO: suspicious RCU usage. ]
+-------------------------------
+block/cfq-iosched.c:2776 suspicious rcu_dereference_protected() usage!
+
+other info that might help us debug this:
+
+
+rcu_scheduler_active = 1, debug_locks = 0
+3 locks held by scsi_scan_6/1552:
+ #0: (&shost->scan_mutex){+.+.+.}, at: [<ffffffff8145efca>]
+scsi_scan_host_selected+0x5a/0x150
+ #1: (&eq->sysfs_lock){+.+...}, at: [<ffffffff812a5032>]
+elevator_exit+0x22/0x60
+ #2: (&(&q->__queue_lock)->rlock){-.-...}, at: [<ffffffff812b6233>]
+cfq_exit_queue+0x43/0x190
+
+stack backtrace:
+Pid: 1552, comm: scsi_scan_6 Not tainted 3.0.0-rc5 #17
+Call Trace:
+ [<ffffffff810abb9b>] lockdep_rcu_dereference+0xbb/0xc0
+ [<ffffffff812b6139>] __cfq_exit_single_io_context+0xe9/0x120
+ [<ffffffff812b626c>] cfq_exit_queue+0x7c/0x190
+ [<ffffffff812a5046>] elevator_exit+0x36/0x60
+ [<ffffffff812a802a>] blk_cleanup_queue+0x4a/0x60
+ [<ffffffff8145cc09>] scsi_free_queue+0x9/0x10
+ [<ffffffff81460944>] __scsi_remove_device+0x84/0xd0
+ [<ffffffff8145dca3>] scsi_probe_and_add_lun+0x353/0xb10
+ [<ffffffff817da069>] ? error_exit+0x29/0xb0
+ [<ffffffff817d98ed>] ? _raw_spin_unlock_irqrestore+0x3d/0x80
+ [<ffffffff8145e722>] __scsi_scan_target+0x112/0x680
+ [<ffffffff812c690d>] ? trace_hardirqs_off_thunk+0x3a/0x3c
+ [<ffffffff817da069>] ? error_exit+0x29/0xb0
+ [<ffffffff812bcc60>] ? kobject_del+0x40/0x40
+ [<ffffffff8145ed16>] scsi_scan_channel+0x86/0xb0
+ [<ffffffff8145f0b0>] scsi_scan_host_selected+0x140/0x150
+ [<ffffffff8145f149>] do_scsi_scan_host+0x89/0x90
+ [<ffffffff8145f170>] do_scan_async+0x20/0x160
+ [<ffffffff8145f150>] ? do_scsi_scan_host+0x90/0x90
+ [<ffffffff810975b6>] kthread+0xa6/0xb0
+ [<ffffffff817db154>] kernel_thread_helper+0x4/0x10
+ [<ffffffff81066430>] ? finish_task_switch+0x80/0x110
+ [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
+ [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
+ [<ffffffff817db150>] ? gs_change+0xb/0xb
+
+Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
+
+ if (rcu_dereference(ioc->ioc_data) == cic) {
+
+This form says that it must be in a plain vanilla RCU read-side critical
+section, but the "other info" list above shows that this is not the
+case. Instead, we hold three locks, one of which might be RCU related.
+And maybe that lock really does protect this reference. If so, the fix
+is to inform RCU, perhaps by changing __cfq_exit_single_io_context() to
+take the struct request_queue "q" from cfq_exit_queue() as an argument,
+which would permit us to invoke rcu_dereference_protected as follows:
+
+ if (rcu_dereference_protected(ioc->ioc_data,
+ lockdep_is_held(&q->queue_lock)) == cic) {
+
+With this change, there would be no lockdep-RCU splat emitted if this
+code was invoked either from within an RCU read-side critical section
+or with the ->queue_lock held. In particular, this would have suppressed
+the above lockdep-RCU splat because ->queue_lock is held (see #2 in the
+list above).
+
+On the other hand, perhaps we really do need an RCU read-side critical
+section. In this case, the critical section must span the use of the
+return value from rcu_dereference(), or at least until there is some
+reference count incremented or some such. One way to handle this is to
+add rcu_read_lock() and rcu_read_unlock() as follows:
+
+ rcu_read_lock();
+ if (rcu_dereference(ioc->ioc_data) == cic) {
+ spin_lock(&ioc->lock);
+ rcu_assign_pointer(ioc->ioc_data, NULL);
+ spin_unlock(&ioc->lock);
+ }
+ rcu_read_unlock();
+
+With this change, the rcu_dereference() is always within an RCU
+read-side critical section, which again would have suppressed the
+above lockdep-RCU splat.
+
+But in this particular case, we don't actually deference the pointer
+returned from rcu_dereference(). Instead, that pointer is just compared
+to the cic pointer, which means that the rcu_dereference() can be replaced
+by rcu_access_pointer() as follows:
+
+ if (rcu_access_pointer(ioc->ioc_data) == cic) {
+
+Because it is legal to invoke rcu_access_pointer() without protection,
+this change would also suppress the above lockdep-RCU splat.
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
index d7a49b2f699..a102d4b3724 100644
--- a/Documentation/RCU/lockdep.txt
+++ b/Documentation/RCU/lockdep.txt
@@ -32,9 +32,27 @@ checking of rcu_dereference() primitives:
srcu_dereference(p, sp):
Check for SRCU read-side critical section.
rcu_dereference_check(p, c):
- Use explicit check expression "c". This is useful in
- code that is invoked by both readers and updaters.
- rcu_dereference_raw(p)
+ Use explicit check expression "c" along with
+ rcu_read_lock_held(). This is useful in code that is
+ invoked by both RCU readers and updaters.
+ rcu_dereference_bh_check(p, c):
+ Use explicit check expression "c" along with
+ rcu_read_lock_bh_held(). This is useful in code that
+ is invoked by both RCU-bh readers and updaters.
+ rcu_dereference_sched_check(p, c):
+ Use explicit check expression "c" along with
+ rcu_read_lock_sched_held(). This is useful in code that
+ is invoked by both RCU-sched readers and updaters.
+ srcu_dereference_check(p, c):
+ Use explicit check expression "c" along with
+ srcu_read_lock_held()(). This is useful in code that
+ is invoked by both SRCU readers and updaters.
+ rcu_dereference_index_check(p, c):
+ Use explicit check expression "c", but the caller
+ must supply one of the rcu_read_lock_held() functions.
+ This is useful in code that uses RCU-protected arrays
+ that is invoked by both RCU readers and updaters.
+ rcu_dereference_raw(p):
Don't check. (Use sparingly, if at all.)
rcu_dereference_protected(p, c):
Use explicit check expression "c", and omit all barriers
@@ -48,13 +66,11 @@ checking of rcu_dereference() primitives:
value of the pointer itself, for example, against NULL.
The rcu_dereference_check() check expression can be any boolean
-expression, but would normally include one of the rcu_read_lock_held()
-family of functions and a lockdep expression. However, any boolean
-expression can be used. For a moderately ornate example, consider
-the following:
+expression, but would normally include a lockdep expression. However,
+any boolean expression can be used. For a moderately ornate example,
+consider the following:
file = rcu_dereference_check(fdt->fd[fd],
- rcu_read_lock_held() ||
lockdep_is_held(&files->file_lock) ||
atomic_read(&files->count) == 1);
@@ -62,7 +78,7 @@ This expression picks up the pointer "fdt->fd[fd]" in an RCU-safe manner,
and, if CONFIG_PROVE_RCU is configured, verifies that this expression
is used in:
-1. An RCU read-side critical section, or
+1. An RCU read-side critical section (implicit), or
2. with files->file_lock held, or
3. on an unshared files_struct.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 5d9016795fd..783d6c134d3 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -42,7 +42,7 @@ fqs_holdoff Holdoff time (in microseconds) between consecutive calls
fqs_stutter Wait time (in seconds) between consecutive bursts
of calls to force_quiescent_state().
-irqreaders Says to invoke RCU readers from irq level. This is currently
+irqreader Says to invoke RCU readers from irq level. This is currently
done via timers. Defaults to "1" for variants of RCU that
permit this. (Or, more accurately, variants of RCU that do
-not- permit this know to ignore this variable.)
@@ -79,19 +79,68 @@ stutter The length of time to run the test before pausing for this
Specifying "stutter=0" causes the test to run continuously
without pausing, which is the old default behavior.
+test_boost Whether or not to test the ability of RCU to do priority
+ boosting. Defaults to "test_boost=1", which performs
+ RCU priority-inversion testing only if the selected
+ RCU implementation supports priority boosting. Specifying
+ "test_boost=0" never performs RCU priority-inversion
+ testing. Specifying "test_boost=2" performs RCU
+ priority-inversion testing even if the selected RCU
+ implementation does not support RCU priority boosting,
+ which can be used to test rcutorture's ability to
+ carry out RCU priority-inversion testing.
+
+test_boost_interval
+ The number of seconds in an RCU priority-inversion test
+ cycle. Defaults to "test_boost_interval=7". It is
+ usually wise for this value to be relatively prime to
+ the value selected for "stutter".
+
+test_boost_duration
+ The number of seconds to do RCU priority-inversion testing
+ within any given "test_boost_interval". Defaults to
+ "test_boost_duration=4".
+
test_no_idle_hz Whether or not to test the ability of RCU to operate in
a kernel that disables the scheduling-clock interrupt to
idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
Defaults to omitting this test.
-torture_type The type of RCU to test: "rcu" for the rcu_read_lock() API,
- "rcu_sync" for rcu_read_lock() with synchronous reclamation,
- "rcu_bh" for the rcu_read_lock_bh() API, "rcu_bh_sync" for
- rcu_read_lock_bh() with synchronous reclamation, "srcu" for
- the "srcu_read_lock()" API, "sched" for the use of
- preempt_disable() together with synchronize_sched(),
- and "sched_expedited" for the use of preempt_disable()
- with synchronize_sched_expedited().
+torture_type The type of RCU to test, with string values as follows:
+
+ "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu().
+
+ "rcu_sync": rcu_read_lock(), rcu_read_unlock(), and
+ synchronize_rcu().
+
+ "rcu_expedited": rcu_read_lock(), rcu_read_unlock(), and
+ synchronize_rcu_expedited().
+
+ "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and
+ call_rcu_bh().
+
+ "rcu_bh_sync": rcu_read_lock_bh(), rcu_read_unlock_bh(),
+ and synchronize_rcu_bh().
+
+ "rcu_bh_expedited": rcu_read_lock_bh(), rcu_read_unlock_bh(),
+ and synchronize_rcu_bh_expedited().
+
+ "srcu": srcu_read_lock(), srcu_read_unlock() and
+ synchronize_srcu().
+
+ "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
+ synchronize_srcu_expedited().
+
+ "sched": preempt_disable(), preempt_enable(), and
+ call_rcu_sched().
+
+ "sched_sync": preempt_disable(), preempt_enable(), and
+ synchronize_sched().
+
+ "sched_expedited": preempt_disable(), preempt_enable(), and
+ synchronize_sched_expedited().
+
+ Defaults to "rcu".
verbose Enable debug printk()s. Default is disabled.
@@ -100,12 +149,12 @@ OUTPUT
The statistics output is as follows:
- rcu-torture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
- rcu-torture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
- rcu-torture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
- rcu-torture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
- rcu-torture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
- rcu-torture: --- End of test
+ rcu-torture:--- Start of test: nreaders=16 nfakewriters=4 stat_interval=30 verbose=0 test_no_idle_hz=1 shuffle_interval=3 stutter=5 irqreader=1 fqs_duration=0 fqs_holdoff=0 fqs_stutter=3 test_boost=1/0 test_boost_interval=7 test_boost_duration=4
+ rcu-torture: rtc: (null) ver: 155441 tfle: 0 rta: 155441 rtaf: 8884 rtf: 155440 rtmbe: 0 rtbke: 0 rtbre: 0 rtbf: 0 rtb: 0 nt: 3055767
+ rcu-torture: Reader Pipe: 727860534 34213 0 0 0 0 0 0 0 0 0
+ rcu-torture: Reader Batch: 727877838 17003 0 0 0 0 0 0 0 0 0
+ rcu-torture: Free-Block Circulation: 155440 155440 155440 155440 155440 155440 155440 155440 155440 155440 0
+ rcu-torture:--- End of test: SUCCESS: nreaders=16 nfakewriters=4 stat_interval=30 verbose=0 test_no_idle_hz=1 shuffle_interval=3 stutter=5 irqreader=1 fqs_duration=0 fqs_holdoff=0 fqs_stutter=3 test_boost=1/0 test_boost_interval=7 test_boost_duration=4
The command "dmesg | grep torture:" will extract this information on
most systems. On more esoteric configurations, it may be necessary to
@@ -113,26 +162,55 @@ use other commands to access the output of the printk()s used by
the RCU torture test. The printk()s use KERN_ALERT, so they should
be evident. ;-)
+The first and last lines show the rcutorture module parameters, and the
+last line shows either "SUCCESS" or "FAILURE", based on rcutorture's
+automatic determination as to whether RCU operated correctly.
+
The entries are as follows:
o "rtc": The hexadecimal address of the structure currently visible
to readers.
-o "ver": The number of times since boot that the rcutw writer task
+o "ver": The number of times since boot that the RCU writer task
has changed the structure visible to readers.
o "tfle": If non-zero, indicates that the "torture freelist"
- containing structure to be placed into the "rtc" area is empty.
+ containing structures to be placed into the "rtc" area is empty.
This condition is important, since it can fool you into thinking
that RCU is working when it is not. :-/
o "rta": Number of structures allocated from the torture freelist.
o "rtaf": Number of allocations from the torture freelist that have
- failed due to the list being empty.
+ failed due to the list being empty. It is not unusual for this
+ to be non-zero, but it is bad for it to be a large fraction of
+ the value indicated by "rta".
o "rtf": Number of frees into the torture freelist.
+o "rtmbe": A non-zero value indicates that rcutorture believes that
+ rcu_assign_pointer() and rcu_dereference() are not working
+ correctly. This value should be zero.
+
+o "rtbke": rcutorture was unable to create the real-time kthreads
+ used to force RCU priority inversion. This value should be zero.
+
+o "rtbre": Although rcutorture successfully created the kthreads
+ used to force RCU priority inversion, it was unable to set them
+ to the real-time priority level of 1. This value should be zero.
+
+o "rtbf": The number of times that RCU priority boosting failed
+ to resolve RCU priority inversion.
+
+o "rtb": The number of times that rcutorture attempted to force
+ an RCU priority inversion condition. If you are testing RCU
+ priority boosting via the "test_boost" module parameter, this
+ value should be non-zero.
+
+o "nt": The number of times rcutorture ran RCU read-side code from
+ within a timer handler. This value should be non-zero only
+ if you specified the "irqreader" module parameter.
+
o "Reader Pipe": Histogram of "ages" of structures seen by readers.
If any entries past the first two are non-zero, RCU is broken.
And rcutorture prints the error flag string "!!!" to make sure
@@ -162,26 +240,15 @@ o "Free-Block Circulation": Shows the number of torture structures
somehow gets incremented farther than it should.
Different implementations of RCU can provide implementation-specific
-additional information. For example, SRCU provides the following:
+additional information. For example, SRCU provides the following
+additional line:
- srcu-torture: rtc: f8cf46a8 ver: 355 tfle: 0 rta: 356 rtaf: 0 rtf: 346 rtmbe: 0
- srcu-torture: Reader Pipe: 559738 939 0 0 0 0 0 0 0 0 0
- srcu-torture: Reader Batch: 560434 243 0 0 0 0 0 0 0 0
- srcu-torture: Free-Block Circulation: 355 354 353 352 351 350 349 348 347 346 0
srcu-torture: per-CPU(idx=1): 0(0,1) 1(0,1) 2(0,0) 3(0,1)
-The first four lines are similar to those for RCU. The last line shows
-the per-CPU counter state. The numbers in parentheses are the values
-of the "old" and "current" counters for the corresponding CPU. The
-"idx" value maps the "old" and "current" values to the underlying array,
-and is useful for debugging.
-
-Similarly, sched_expedited RCU provides the following:
-
- sched_expedited-torture: rtc: d0000000016c1880 ver: 1090796 tfle: 0 rta: 1090796 rtaf: 0 rtf: 1090787 rtmbe: 0 nt: 27713319
- sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
- sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
- sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
+This line shows the per-CPU counter state. The numbers in parentheses are
+the values of the "old" and "current" counters for the corresponding CPU.
+The "idx" value maps the "old" and "current" values to the underlying
+array, and is useful for debugging.
USAGE
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 8173cec473a..aaf65f6c6cd 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -33,23 +33,23 @@ rcu/rcuboost:
The output of "cat rcu/rcudata" looks as follows:
rcu_sched:
- 0 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=545/1/0 df=50 of=0 ri=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0
- 1 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=967/1/0 df=58 of=0 ri=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0
- 2 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=1081/1/0 df=175 of=0 ri=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0
- 3 c=20942 g=20943 pq=1 pqc=20942 qp=1 dt=1846/0/0 df=404 of=0 ri=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0
- 4 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=369/1/0 df=83 of=0 ri=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0
- 5 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=381/1/0 df=64 of=0 ri=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0
- 6 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=1037/1/0 df=183 of=0 ri=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0
- 7 c=20897 g=20897 pq=1 pqc=20896 qp=0 dt=1572/0/0 df=382 of=0 ri=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0
+ 0 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=545/1/0 df=50 of=0 ri=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0
+ 1 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=967/1/0 df=58 of=0 ri=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0
+ 2 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1081/1/0 df=175 of=0 ri=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0
+ 3 c=20942 g=20943 pq=1 pgp=20942 qp=1 dt=1846/0/0 df=404 of=0 ri=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0
+ 4 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=369/1/0 df=83 of=0 ri=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0
+ 5 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=381/1/0 df=64 of=0 ri=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0
+ 6 c=20972 g=20973 pq=1 pgp=20973 qp=0 dt=1037/1/0 df=183 of=0 ri=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0
+ 7 c=20897 g=20897 pq=1 pgp=20896 qp=0 dt=1572/0/0 df=382 of=0 ri=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0
rcu_bh:
- 0 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=545/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0
- 1 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=967/1/0 df=3 of=0 ri=1 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0
- 2 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1081/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0
- 3 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1846/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0
- 4 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=369/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0
- 5 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=381/1/0 df=4 of=0 ri=1 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0
- 6 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1037/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0
- 7 c=1474 g=1474 pq=1 pqc=1473 qp=0 dt=1572/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0
+ 0 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=545/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0
+ 1 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=967/1/0 df=3 of=0 ri=1 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0
+ 2 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1081/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0
+ 3 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1846/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0
+ 4 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=369/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0
+ 5 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=381/1/0 df=4 of=0 ri=1 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0
+ 6 c=1480 g=1480 pq=1 pgp=1480 qp=0 dt=1037/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0
+ 7 c=1474 g=1474 pq=1 pgp=1473 qp=0 dt=1572/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0
The first section lists the rcu_data structures for rcu_sched, the second
for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an
@@ -84,7 +84,7 @@ o "pq" indicates that this CPU has passed through a quiescent state
CPU has not yet reported that fact, (2) some other CPU has not
yet reported for this grace period, or (3) both.
-o "pqc" indicates which grace period the last-observed quiescent
+o "pgp" indicates which grace period the last-observed quiescent
state for this CPU corresponds to. This is important for handling
the race between CPU 0 reporting an extended dynticks-idle
quiescent state for CPU 1 and CPU 1 suddenly waking up and
@@ -184,10 +184,14 @@ o "kt" is the per-CPU kernel-thread state. The digit preceding
The number after the final slash is the CPU that the kthread
is actually running on.
+ This field is displayed only for CONFIG_RCU_BOOST kernels.
+
o "ktl" is the low-order 16 bits (in hexadecimal) of the count of
the number of times that this CPU's per-CPU kthread has gone
through its loop servicing invoke_rcu_cpu_kthread() requests.
+ This field is displayed only for CONFIG_RCU_BOOST kernels.
+
o "b" is the batch limit for this CPU. If more than this number
of RCU callbacks is ready to invoke, then the remainder will
be deferred.
diff --git a/Documentation/blackfin/bfin-gpio-notes.txt b/Documentation/blackfin/bfin-gpio-notes.txt
index f731c1e5647..d36b01f778b 100644
--- a/Documentation/blackfin/bfin-gpio-notes.txt
+++ b/Documentation/blackfin/bfin-gpio-notes.txt
@@ -1,5 +1,5 @@
/*
- * File: Documentation/blackfin/bfin-gpio-note.txt
+ * File: Documentation/blackfin/bfin-gpio-notes.txt
* Based on:
* Author:
*
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index c6d84cfd2f5..e418dc0a708 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -186,7 +186,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address
do not have a corresponding kernel virtual address space mapping) and
low-memory pages.
-Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion
+Note: Please refer to Documentation/DMA-API-HOWTO.txt for a discussion
on PCI high mem DMA aspects and mapping of scatter gather lists, and support
for 64 bit PCI.
diff --git a/Documentation/bus-virt-phys-mapping.txt b/Documentation/bus-virt-phys-mapping.txt
index 1b5aa10df84..2bc55ff3b4d 100644
--- a/Documentation/bus-virt-phys-mapping.txt
+++ b/Documentation/bus-virt-phys-mapping.txt
@@ -1,6 +1,6 @@
[ NOTE: The virt_to_bus() and bus_to_virt() functions have been
superseded by the functionality provided by the PCI DMA interface
- (see Documentation/PCI/PCI-DMA-mapping.txt). They continue
+ (see Documentation/DMA-API-HOWTO.txt). They continue
to be documented below for historical purposes, but new code
must not use them. --davidm 00/12/12 ]
diff --git a/Documentation/cdrom/packet-writing.txt b/Documentation/cdrom/packet-writing.txt
index 13c251d5add..2834170d821 100644
--- a/Documentation/cdrom/packet-writing.txt
+++ b/Documentation/cdrom/packet-writing.txt
@@ -109,7 +109,7 @@ this interface. (see http://tom.ist-im-web.de/download/pktcdvd )
For a description of the sysfs interface look into the file:
- Documentation/ABI/testing/sysfs-block-pktcdvd
+ Documentation/ABI/testing/sysfs-class-pktcdvd
Using the pktcdvd debugfs interface
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index e74d0a2eb1c..d221781daba 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -132,7 +132,7 @@ The sampling rate is limited by the HW transition latency:
transition_latency * 100
Or by kernel restrictions:
If CONFIG_NO_HZ is set, the limit is 10ms fixed.
-If CONFIG_NO_HZ is not set or no_hz=off boot parameter is used, the
+If CONFIG_NO_HZ is not set or nohz=off boot parameter is used, the
limits depend on the CONFIG_HZ option:
HZ=1000: min=20000us (20ms)
HZ=250: min=80000us (80ms)
diff --git a/Documentation/development-process/4.Coding b/Documentation/development-process/4.Coding
index 83f5f5b365a..e3cb6a56653 100644
--- a/Documentation/development-process/4.Coding
+++ b/Documentation/development-process/4.Coding
@@ -278,7 +278,7 @@ enabled, a configurable percentage of memory allocations will be made to
fail; these failures can be restricted to a specific range of code.
Running with fault injection enabled allows the programmer to see how the
code responds when things go badly. See
-Documentation/fault-injection/fault-injection.text for more information on
+Documentation/fault-injection/fault-injection.txt for more information on
how to use this facility.
Other kinds of errors can be found with the "sparse" static analysis tool.
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
new file mode 100644
index 00000000000..7ca52161e7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -0,0 +1,44 @@
+* ARM L2 Cache Controller
+
+ARM cores often have a separate level 2 cache controller. There are various
+implementations of the L2 cache controller with compatible programming models.
+The ARM L2 cache representation in the device tree should be done as follows:
+
+Required properties:
+
+- compatible : should be one of:
+ "arm,pl310-cache"
+ "arm,l220-cache"
+ "arm,l210-cache"
+- cache-unified : Specifies the cache is a unified cache.
+- cache-level : Should be set to 2 for a level 2 cache.
+- reg : Physical base address and size of cache controller's memory mapped
+ registers.
+
+Optional properties:
+
+- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of
+ read, write and setup latencies. Minimum valid values are 1. Controllers
+ without setup latency control should use a value of 0.
+- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of
+ read, write and setup latencies. Controllers without setup latency control
+ should use 0. Controllers without separate read and write Tag RAM latency
+ values should only use the first cell.
+- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell.
+- arm,filter-ranges : <start length> Starting address and length of window to
+ filter. Addresses in the filter window are directed to the M1 port. Other
+ addresses will go to the M0 port.
+- interrupts : 1 combined interrupt.
+
+Example:
+
+L2: cache-controller {
+ compatible = "arm,pl310-cache";
+ reg = <0xfff12000 0x1000>;
+ arm,data-latency = <1 1 1>;
+ arm,tag-latency = <2 2 2>;
+ arm,filter-latency = <0x80000000 0x8000000>;
+ cache-unified;
+ cache-level = <2>;
+ interrupts = <45>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt
index 064db928c3c..141087cf310 100644
--- a/Documentation/devicetree/bindings/gpio/led.txt
+++ b/Documentation/devicetree/bindings/gpio/led.txt
@@ -8,7 +8,7 @@ node's name represents the name of the corresponding LED.
LED sub-node properties:
- gpios : Should specify the LED's GPIO, see "Specifying GPIO information
- for devices" in Documentation/powerpc/booting-without-of.txt. Active
+ for devices" in Documentation/devicetree/booting-without-of.txt. Active
low LEDs should be indicated using flags in the GPIO specifier.
- label : (optional) The label for this LED. If omitted, the label is
taken from the node name (excluding the unit address).
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index 1a729f08986..1ad80d5865a 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -1,61 +1,24 @@
-CAN Device Tree Bindings
-------------------------
-2011 Freescale Semiconductor, Inc.
+Flexcan CAN contoller on Freescale's ARM and PowerPC system-on-a-chip (SOC).
-fsl,flexcan-v1.0 nodes
------------------------
-In addition to the required compatible-, reg- and interrupt-properties, you can
-also specify which clock source shall be used for the controller.
+Required properties:
-CPI Clock- Can Protocol Interface Clock
- This CLK_SRC bit of CTRL(control register) selects the clock source to
- the CAN Protocol Interface(CPI) to be either the peripheral clock
- (driven by the PLL) or the crystal oscillator clock. The selected clock
- is the one fed to the prescaler to generate the Serial Clock (Sclock).
- The PRESDIV field of CTRL(control register) controls a prescaler that
- generates the Serial Clock (Sclock), whose period defines the
- time quantum used to compose the CAN waveform.
+- compatible : Should be "fsl,<processor>-flexcan"
-Can Engine Clock Source
- There are two sources for CAN clock
- - Platform Clock It represents the bus clock
- - Oscillator Clock
+ An implementation should also claim any of the following compatibles
+ that it is fully backwards compatible with:
- Peripheral Clock (PLL)
- --------------
- |
- --------- -------------
- | |CPI Clock | Prescaler | Sclock
- | |---------------->| (1.. 256) |------------>
- --------- -------------
- | |
- -------------- ---------------------CLK_SRC
- Oscillator Clock
+ - fsl,p1010-flexcan
-- fsl,flexcan-clock-source : CAN Engine Clock Source.This property selects
- the peripheral clock. PLL clock is fed to the
- prescaler to generate the Serial Clock (Sclock).
- Valid values are "oscillator" and "platform"
- "oscillator": CAN engine clock source is oscillator clock.
- "platform" The CAN engine clock source is the bus clock
- (platform clock).
+- reg : Offset and length of the register set for this device
+- interrupts : Interrupt tuple for this device
+- clock-frequency : The oscillator frequency driving the flexcan device
-- fsl,flexcan-clock-divider : for the reference and system clock, an additional
- clock divider can be specified.
-- clock-frequency: frequency required to calculate the bitrate for FlexCAN.
+Example:
-Note:
- - v1.0 of flexcan-v1.0 represent the IP block version for P1010 SOC.
- - P1010 does not have oscillator as the Clock Source.So the default
- Clock Source is platform clock.
-Examples:
-
- can0@1c000 {
- compatible = "fsl,flexcan-v1.0";
+ can@1c000 {
+ compatible = "fsl,p1010-flexcan";
reg = <0x1c000 0x1000>;
interrupts = <48 0x2>;
interrupt-parent = <&mpic>;
- fsl,flexcan-clock-source = "platform";
- fsl,flexcan-clock-divider = <2>;
- clock-frequency = <fixed by u-boot>;
+ clock-frequency = <200000000>; // filled in by bootloader
};
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
new file mode 100644
index 00000000000..adb5b5744ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -0,0 +1,38 @@
+* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
+
+Required properties:
+- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
+- reg : Address and length of the io space for SMSC LAN
+- interrupts : Should contain SMSC LAN interrupt line
+- interrupt-parent : Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- phy-mode : String, operation mode of the PHY interface.
+ Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
+ "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+
+Optional properties:
+- reg-shift : Specify the quantity to shift the register offsets by
+- reg-io-width : Specify the size (in bytes) of the IO accesses that
+ should be performed on the device. Valid value for SMSC LAN is
+ 2 or 4. If it's omitted or invalid, the size would be 2.
+- smsc,irq-active-high : Indicates the IRQ polarity is active-high
+- smsc,irq-push-pull : Indicates the IRQ type is push-pull
+- smsc,force-internal-phy : Forces SMSC LAN controller to use
+ internal PHY
+- smsc,force-external-phy : Forces SMSC LAN controller to use
+ external PHY
+- smsc,save-mac-address : Indicates that mac address needs to be saved
+ before resetting the controller
+- local-mac-address : 6 bytes, mac address
+
+Examples:
+
+lan9220@f4000000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0xf4000000 0x2000000>;
+ phy-mode = "mii";
+ interrupt-parent = <&gpio1>;
+ interrupts = <31>;
+ reg-io-width = <4>;
+ smsc,irq-push-pull;
+};
diff --git a/Documentation/devicetree/bindings/serial/rs485.txt b/Documentation/devicetree/bindings/serial/rs485.txt
new file mode 100644
index 00000000000..1e753c69fc8
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/rs485.txt
@@ -0,0 +1,31 @@
+* RS485 serial communications
+
+The RTS signal is capable of automatically controlling line direction for
+the built-in half-duplex mode.
+The properties described hereafter shall be given to a half-duplex capable
+UART node.
+
+Required properties:
+- rs485-rts-delay: prop-encoded-array <a b> where:
+ * a is the delay beteween rts signal and beginning of data sent in milliseconds.
+ it corresponds to the delay before sending data.
+ * b is the delay between end of data sent and rts signal in milliseconds
+ it corresponds to the delay after sending data and actual release of the line.
+
+Optional properties:
+- linux,rs485-enabled-at-boot-time: empty property telling to enable the rs485
+ feature at boot time. It can be disabled later with proper ioctl.
+- rs485-rx-during-tx: empty property that enables the receiving of data even
+ whilst sending data.
+
+RS485 example for Atmel USART:
+ usart0: serial@fff8c000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xfff8c000 0x4000>;
+ interrupts = <7>;
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-delay = <0 200>; // in milliseconds
+ };
+
diff --git a/Documentation/devicetree/bindings/tty/serial/atmel-usart.txt b/Documentation/devicetree/bindings/tty/serial/atmel-usart.txt
new file mode 100644
index 00000000000..a49d9a1d4cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/tty/serial/atmel-usart.txt
@@ -0,0 +1,27 @@
+* Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
+
+Required properties:
+- compatible: Should be "atmel,<chip>-usart"
+ The compatible <chip> indicated will be the first SoC to support an
+ additional mode or an USART new feature.
+- reg: Should contain registers location and length
+- interrupts: Should contain interrupt
+
+Optional properties:
+- atmel,use-dma-rx: use of PDC or DMA for receiving data
+- atmel,use-dma-tx: use of PDC or DMA for transmitting data
+
+<chip> compatible description:
+- at91rm9200: legacy USART support
+- at91sam9260: generic USART implementation for SAM9 SoCs
+
+Example:
+
+ usart0: serial@fff8c000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xfff8c000 0x4000>;
+ interrupts = <7>;
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ };
+
diff --git a/Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt
new file mode 100644
index 00000000000..f13f1c5be91
--- /dev/null
+++ b/Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt
@@ -0,0 +1,25 @@
+* Synopsys DesignWare ABP UART
+
+Required properties:
+- compatible : "snps,dw-apb-uart"
+- reg : offset and length of the register set for the device.
+- interrupts : should contain uart interrupt.
+- clock-frequency : the input clock frequency for the UART.
+
+Optional properties:
+- reg-shift : quantity to shift the register offsets by. If this property is
+ not present then the register offsets are not shifted.
+- reg-io-width : the size (in bytes) of the IO accesses that should be
+ performed on the device. If this property is not present then single byte
+ accesses are used.
+
+Example:
+
+ uart@80230000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x80230000 0x100>;
+ clock-frequency = <3686400>;
+ interrupts = <10>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
diff --git a/Documentation/driver-model/binding.txt b/Documentation/driver-model/binding.txt
index f7ec9d625bf..abfc8e290d5 100644
--- a/Documentation/driver-model/binding.txt
+++ b/Documentation/driver-model/binding.txt
@@ -48,10 +48,6 @@ devclass_add_device is called to enumerate the device within the class
and actually register it with the class, which happens with the
class's register_dev callback.
-NOTE: The device class structures and core routines to manipulate them
-are not in the mainline kernel, so the discussion is still a bit
-speculative.
-
Driver
~~~~~~
diff --git a/Documentation/driver-model/device.txt b/Documentation/driver-model/device.txt
index bdefe728a73..1e70220d20f 100644
--- a/Documentation/driver-model/device.txt
+++ b/Documentation/driver-model/device.txt
@@ -45,33 +45,52 @@ struct device_attribute {
const char *buf, size_t count);
};
-Attributes of devices can be exported via drivers using a simple
-procfs-like interface.
+Attributes of devices can be exported by a device driver through sysfs.
Please see Documentation/filesystems/sysfs.txt for more information
on how sysfs works.
+As explained in Documentation/kobject.txt, device attributes must be be
+created before the KOBJ_ADD uevent is generated. The only way to realize
+that is by defining an attribute group.
+
Attributes are declared using a macro called DEVICE_ATTR:
#define DEVICE_ATTR(name,mode,show,store)
Example:
-DEVICE_ATTR(power,0644,show_power,store_power);
+static DEVICE_ATTR(type, 0444, show_type, NULL);
+static DEVICE_ATTR(power, 0644, show_power, store_power);
-This declares a structure of type struct device_attribute named
-'dev_attr_power'. This can then be added and removed to the device's
-directory using:
+This declares two structures of type struct device_attribute with respective
+names 'dev_attr_type' and 'dev_attr_power'. These two attributes can be
+organized as follows into a group:
-int device_create_file(struct device *device, struct device_attribute * entry);
-void device_remove_file(struct device * dev, struct device_attribute * attr);
+static struct attribute *dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_power.attr,
+ NULL,
+};
-Example:
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attrs,
+};
+
+static const struct attribute_group *dev_attr_groups[] = {
+ &dev_attr_group,
+ NULL,
+};
+
+This array of groups can then be associated with a device by setting the
+group pointer in struct device before device_register() is invoked:
-device_create_file(dev,&dev_attr_power);
-device_remove_file(dev,&dev_attr_power);
+ dev->groups = dev_attr_groups;
+ device_register(dev);
-The file name will be 'power' with a mode of 0644 (-rw-r--r--).
+The device_register() function will use the 'groups' pointer to create the
+device attributes and the device_unregister() function will use this pointer
+to remove the device attributes.
Word of warning: While the kernel allows device_create_file() and
device_remove_file() to be called on a device at any time, userspace has
@@ -84,24 +103,4 @@ not know about the new attributes.
This is important for device driver that need to publish additional
attributes for a device at driver probe time. If the device driver simply
calls device_create_file() on the device structure passed to it, then
-userspace will never be notified of the new attributes. Instead, it should
-probably use class_create() and class->dev_attrs to set up a list of
-desired attributes in the modules_init function, and then in the .probe()
-hook, and then use device_create() to create a new device as a child
-of the probed device. The new device will generate a new uevent and
-properly advertise the new attributes to userspace.
-
-For example, if a driver wanted to add the following attributes:
-struct device_attribute mydriver_attribs[] = {
- __ATTR(port_count, 0444, port_count_show),
- __ATTR(serial_number, 0444, serial_number_show),
- NULL
-};
-
-Then in the module init function is would do:
- mydriver_class = class_create(THIS_MODULE, "my_attrs");
- mydriver_class.dev_attr = mydriver_attribs;
-
-And assuming 'dev' is the struct device passed into the probe hook, the driver
-probe function would do something like:
- device_create(&mydriver_class, dev, chrdev, &private_data, "my_name");
+userspace will never be notified of the new attributes.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 4dc46547766..d5ac362daef 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -594,9 +594,18 @@ Why: In 3.0, we can now autodetect internal 3G device and already have
Who: Lee, Chun-Yi <jlee@novell.com>
----------------------------
+
What: The XFS nodelaylog mount option
When: 3.3
Why: The delaylog mode that has been the default since 2.6.39 has proven
stable, and the old code is in the way of additional improvements in
the log code.
Who: Christoph Hellwig <hch@lst.de>
+
+----------------------------
+
+What: iwlagn alias support
+When: 3.5
+Why: The iwlagn module has been renamed iwlwifi. The alias will be around
+ for backward compatibility for several cycles and then dropped.
+Who: Don Fry <donald.h.fry@intel.com>
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index 13de64c7f0a..2c032144284 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -92,7 +92,7 @@ OPTIONS
wfdno=n the file descriptor for writing with trans=fd
- maxdata=n the number of bytes to use for 9p packet payload (msize)
+ msize=n the number of bytes to use for 9p packet payload
port=n port to connect to on the remote server
diff --git a/Documentation/filesystems/caching/object.txt b/Documentation/filesystems/caching/object.txt
index e8b0a35d8fe..58313348da8 100644
--- a/Documentation/filesystems/caching/object.txt
+++ b/Documentation/filesystems/caching/object.txt
@@ -127,9 +127,9 @@ fscache_enqueue_object()).
PROVISION OF CPU TIME
---------------------
-The work to be done by the various states is given CPU time by the threads of
-the slow work facility (see Documentation/slow-work.txt). This is used in
-preference to the workqueue facility because:
+The work to be done by the various states was given CPU time by the threads of
+the slow work facility. This was used in preference to the workqueue facility
+because:
(1) Threads may be completely occupied for very long periods of time by a
particular work item. These state actions may be doing sequences of
diff --git a/Documentation/filesystems/locks.txt b/Documentation/filesystems/locks.txt
index fab857accbd..2cf81082581 100644
--- a/Documentation/filesystems/locks.txt
+++ b/Documentation/filesystems/locks.txt
@@ -53,11 +53,12 @@ fcntl(), with all the problems that implies.
1.3 Mandatory Locking As A Mount Option
---------------------------------------
-Mandatory locking, as described in 'Documentation/filesystems/mandatory.txt'
-was prior to this release a general configuration option that was valid for
-all mounted filesystems. This had a number of inherent dangers, not the
-least of which was the ability to freeze an NFS server by asking it to read
-a file for which a mandatory lock existed.
+Mandatory locking, as described in
+'Documentation/filesystems/mandatory-locking.txt' was prior to this release a
+general configuration option that was valid for all mounted filesystems. This
+had a number of inherent dangers, not the least of which was the ability to
+freeze an NFS server by asking it to read a file for which a mandatory lock
+existed.
From this release of the kernel, mandatory locking can be turned on and off
on a per-filesystem basis, using the mount options 'mand' and 'nomand'.
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
index 9c8fd614865..120fd3cf7fd 100644
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -47,7 +47,7 @@ request-key will find the first matching line and corresponding program. In
this case, /some/other/program will handle all uid lookups and
/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
-See <file:Documentation/security/keys-request-keys.txt> for more information
+See <file:Documentation/security/keys-request-key.txt> for more information
about the request-key function.
diff --git a/Documentation/filesystems/pohmelfs/design_notes.txt b/Documentation/filesystems/pohmelfs/design_notes.txt
index dcf83358716..8aef9133570 100644
--- a/Documentation/filesystems/pohmelfs/design_notes.txt
+++ b/Documentation/filesystems/pohmelfs/design_notes.txt
@@ -58,8 +58,9 @@ data transfers.
POHMELFS clients operate with a working set of servers and are capable of balancing read-only
operations (like lookups or directory listings) between them according to IO priorities.
Administrators can add or remove servers from the set at run-time via special commands (described
-in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers, which are connected
-with write permission turned on. IO priority and permissions can be changed in run-time.
+in Documentation/filesystems/pohmelfs/info.txt file). Writes are replicated to all servers, which
+are connected with write permission turned on. IO priority and permissions can be changed in
+run-time.
POHMELFS is capable of full data channel encryption and/or strong crypto hashing.
One can select any kernel supported cipher, encryption mode, hash type and operation mode
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index db3b1aba32a..0ec91f03422 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1263,7 +1263,7 @@ review the kernel documentation in the directory /usr/src/linux/Documentation.
This chapter is heavily based on the documentation included in the pre 2.2
kernels, and became part of it in version 2.2.1 of the Linux kernel.
-Please see: Documentation/sysctls/ directory for descriptions of these
+Please see: Documentation/sysctl/ directory for descriptions of these
entries.
------------------------------------------------------------------------------
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index 597f728e7b4..07235caec22 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -4,7 +4,7 @@ sysfs - _The_ filesystem for exporting kernel objects.
Patrick Mochel <mochel@osdl.org>
Mike Murphy <mamurph@cs.clemson.edu>
-Revised: 15 July 2010
+Revised: 16 August 2011
Original: 10 January 2003
@@ -370,3 +370,11 @@ int driver_create_file(struct device_driver *, const struct driver_attribute *);
void driver_remove_file(struct device_driver *, const struct driver_attribute *);
+Documentation
+~~~~~~~~~~~~~
+
+The sysfs directory structure and the attributes in each directory define an
+ABI between the kernel and user space. As for any ABI, it is important that
+this ABI is stable and properly documented. All new sysfs attributes must be
+documented in Documentation/ABI. See also Documentation/ABI/README for more
+information.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 52d8fb81cff..43cbd082172 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -1053,9 +1053,6 @@ manipulate dentries:
and the dentry is returned. The caller must use dput()
to free the dentry when it finishes using it.
-For further information on dentry locking, please refer to the document
-Documentation/filesystems/dentry-locking.txt.
-
Mount Options
=============
diff --git a/Documentation/frv/booting.txt b/Documentation/frv/booting.txt
index 37c4d84a0e5..9bdf4b46e74 100644
--- a/Documentation/frv/booting.txt
+++ b/Documentation/frv/booting.txt
@@ -180,9 +180,3 @@ separated by spaces:
This tells the kernel what program to run initially. By default this is
/sbin/init, but /sbin/sash or /bin/sh are common alternatives.
-
- (*) vdc=...
-
- This option configures the MB93493 companion chip visual display
- driver. Please see Documentation/frv/mb93493/vdc.txt for more
- information.
diff --git a/Documentation/hwmon/ad7314 b/Documentation/hwmon/ad7314
new file mode 100644
index 00000000000..1912549c746
--- /dev/null
+++ b/Documentation/hwmon/ad7314
@@ -0,0 +1,25 @@
+Kernel driver ad7314
+====================
+
+Supported chips:
+ * Analog Devices AD7314
+ Prefix: 'ad7314'
+ Datasheet: Publicly available at Analog Devices website.
+ * Analog Devices ADT7301
+ Prefix: 'adt7301'
+ Datasheet: Publicly available at Analog Devices website.
+ * Analog Devices ADT7302
+ Prefix: 'adt7302'
+ Datasheet: Publicly available at Analog Devices website.
+
+Description
+-----------
+
+Driver supports the above parts. The ad7314 has a 10 bit
+sensor with 1lsb = 0.25 degrees centigrade. The adt7301 and
+adt7302 have 14 bit sensors with 1lsb = 0.03125 degrees centigrade.
+
+Notes
+-----
+
+Currently power down mode is not supported.
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275
index 097b3ccc4be..ab70d96d2df 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275
@@ -6,6 +6,10 @@ Supported chips:
Prefix: 'adm1275'
Addresses scanned: -
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1275.pdf
+ * Analog Devices ADM1276
+ Prefix: 'adm1276'
+ Addresses scanned: -
+ Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
Author: Guenter Roeck <guenter.roeck@ericsson.com>
@@ -13,13 +17,13 @@ Author: Guenter Roeck <guenter.roeck@ericsson.com>
Description
-----------
-This driver supports hardware montoring for Analog Devices ADM1275 Hot-Swap
-Controller and Digital Power Monitor.
+This driver supports hardware montoring for Analog Devices ADM1275 and ADM1276
+Hot-Swap Controller and Digital Power Monitor.
-The ADM1275 is a hot-swap controller that allows a circuit board to be removed
-from or inserted into a live backplane. It also features current and voltage
-readback via an integrated 12-bit analog-to-digital converter (ADC), accessed
-using a PMBus. interface.
+ADM1275 and ADM1276 are hot-swap controllers that allow a circuit board to be
+removed from or inserted into a live backplane. They also feature current and
+voltage readback via an integrated 12-bit analog-to-digital converter (ADC),
+accessed using a PMBus interface.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -48,17 +52,25 @@ attributes are write-only, all other attributes are read-only.
in1_label "vin1" or "vout1" depending on chip variant and
configuration.
-in1_input Measured voltage. From READ_VOUT register.
-in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
-in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
-in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
-in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in1_input Measured voltage.
+in1_min Minumum Voltage.
+in1_max Maximum voltage.
+in1_min_alarm Voltage low alarm.
+in1_max_alarm Voltage high alarm.
in1_highest Historical maximum voltage.
in1_reset_history Write any value to reset history.
curr1_label "iout1"
-curr1_input Measured current. From READ_IOUT register.
-curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
+curr1_input Measured current.
+curr1_max Maximum current.
+curr1_max_alarm Current high alarm.
+curr1_lcrit Critical minimum current. Depending on the chip
+ configuration, either curr1_lcrit or curr1_crit is
+ supported, but not both.
+curr1_lcrit_alarm Critical current low alarm.
+curr1_crit Critical maximum current. Depending on the chip
+ configuration, either curr1_lcrit or curr1_crit is
+ supported, but not both.
+curr1_crit_alarm Critical current high alarm.
curr1_highest Historical maximum current.
curr1_reset_history Write any value to reset history.
diff --git a/Documentation/hwmon/exynos4_tmu b/Documentation/hwmon/exynos4_tmu
new file mode 100644
index 00000000000..c3c6b41db60
--- /dev/null
+++ b/Documentation/hwmon/exynos4_tmu
@@ -0,0 +1,81 @@
+Kernel driver exynos4_tmu
+=================
+
+Supported chips:
+* ARM SAMSUNG EXYNOS4 series of SoC
+ Prefix: 'exynos4-tmu'
+ Datasheet: Not publicly available
+
+Authors: Donggeun Kim <dg77.kim@samsung.com>
+
+Description
+-----------
+
+This driver allows to read temperature inside SAMSUNG EXYNOS4 series of SoC.
+
+The chip only exposes the measured 8-bit temperature code value
+through a register.
+Temperature can be taken from the temperature code.
+There are three equations converting from temperature to temperature code.
+
+The three equations are:
+ 1. Two point trimming
+ Tc = (T - 25) * (TI2 - TI1) / (85 - 25) + TI1
+
+ 2. One point trimming
+ Tc = T + TI1 - 25
+
+ 3. No trimming
+ Tc = T + 50
+
+ Tc: Temperature code, T: Temperature,
+ TI1: Trimming info for 25 degree Celsius (stored at TRIMINFO register)
+ Temperature code measured at 25 degree Celsius which is unchanged
+ TI2: Trimming info for 85 degree Celsius (stored at TRIMINFO register)
+ Temperature code measured at 85 degree Celsius which is unchanged
+
+TMU(Thermal Management Unit) in EXYNOS4 generates interrupt
+when temperature exceeds pre-defined levels.
+The maximum number of configurable threshold is four.
+The threshold levels are defined as follows:
+ Level_0: current temperature > trigger_level_0 + threshold
+ Level_1: current temperature > trigger_level_1 + threshold
+ Level_2: current temperature > trigger_level_2 + threshold
+ Level_3: current temperature > trigger_level_3 + threshold
+
+ The threshold and each trigger_level are set
+ through the corresponding registers.
+
+When an interrupt occurs, this driver notify user space of
+one of four threshold levels for the interrupt
+through kobject_uevent_env and sysfs_notify functions.
+Although an interrupt condition for level_0 can be set,
+it is not notified to user space through sysfs_notify function.
+
+Sysfs Interface
+---------------
+name name of the temperature sensor
+ RO
+
+temp1_input temperature
+ RO
+
+temp1_max temperature for level_1 interrupt
+ RO
+
+temp1_crit temperature for level_2 interrupt
+ RO
+
+temp1_emergency temperature for level_3 interrupt
+ RO
+
+temp1_max_alarm alarm for level_1 interrupt
+ RO
+
+temp1_crit_alarm
+ alarm for level_2 interrupt
+ RO
+
+temp1_emergency_alarm
+ alarm for level_3 interrupt
+ RO
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75
index a1790401fdd..c91a1d15fa2 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75
@@ -12,26 +12,46 @@ Supported chips:
Addresses scanned: I2C 0x48 - 0x4f
Datasheet: Publicly available at the National Semiconductor website
http://www.national.com/
- * Dallas Semiconductor DS75
- Prefix: 'lm75'
- Addresses scanned: I2C 0x48 - 0x4f
- Datasheet: Publicly available at the Dallas Semiconductor website
- http://www.maxim-ic.com/
- * Dallas Semiconductor DS1775
- Prefix: 'lm75'
- Addresses scanned: I2C 0x48 - 0x4f
+ * Dallas Semiconductor DS75, DS1775
+ Prefixes: 'ds75', 'ds1775'
+ Addresses scanned: none
Datasheet: Publicly available at the Dallas Semiconductor website
http://www.maxim-ic.com/
* Maxim MAX6625, MAX6626
- Prefix: 'lm75'
- Addresses scanned: I2C 0x48 - 0x4b
+ Prefixes: 'max6625', 'max6626'
+ Addresses scanned: none
Datasheet: Publicly available at the Maxim website
http://www.maxim-ic.com/
* Microchip (TelCom) TCN75
Prefix: 'lm75'
- Addresses scanned: I2C 0x48 - 0x4f
+ Addresses scanned: none
+ Datasheet: Publicly available at the Microchip website
+ http://www.microchip.com/
+ * Microchip MCP9800, MCP9801, MCP9802, MCP9803
+ Prefix: 'mcp980x'
+ Addresses scanned: none
Datasheet: Publicly available at the Microchip website
http://www.microchip.com/
+ * Analog Devices ADT75
+ Prefix: 'adt75'
+ Addresses scanned: none
+ Datasheet: Publicly available at the Analog Devices website
+ http://www.analog.com/adt75
+ * ST Microelectronics STDS75
+ Prefix: 'stds75'
+ Addresses scanned: none
+ Datasheet: Publicly available at the ST website
+ http://www.st.com/internet/analog/product/121769.jsp
+ * Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175, TMP275
+ Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp175', 'tmp75', 'tmp275'
+ Addresses scanned: none
+ Datasheet: Publicly available at the Texas Instruments website
+ http://www.ti.com/product/tmp100
+ http://www.ti.com/product/tmp101
+ http://www.ti.com/product/tmp105
+ http://www.ti.com/product/tmp75
+ http://www.ti.com/product/tmp175
+ http://www.ti.com/product/tmp275
Author: Frodo Looijaard <frodol@dds.nl>
@@ -50,21 +70,16 @@ range of -55 to +125 degrees.
The LM75 only updates its values each 1.5 seconds; reading it more often
will do no harm, but will return 'old' values.
-The LM75 is usually used in combination with LM78-like chips, to measure
-the temperature of the processor(s).
-
-The DS75, DS1775, MAX6625, and MAX6626 are supported as well.
-They are not distinguished from an LM75. While most of these chips
-have three additional bits of accuracy (12 vs. 9 for the LM75),
-the additional bits are not supported. Not only that, but these chips will
-not be detected if not in 9-bit precision mode (use the force parameter if
-needed).
-
-The TCN75 is supported as well, and is not distinguished from an LM75.
+The original LM75 was typically used in combination with LM78-like chips
+on PC motherboards, to measure the temperature of the processor(s). Clones
+are now used in various embedded designs.
The LM75 is essentially an industry standard; there may be other
LM75 clones not listed here, with or without various enhancements,
-that are supported.
+that are supported. The clones are not detected by the driver, unless
+they reproduce the exact register tricks of the original LM75, and must
+therefore be instantiated explicitly. The specific enhancements (such as
+higher resolution) are not currently supported by the driver.
The LM77 is not supported, contrary to what we pretended for a long time.
Both chips are simply not compatible, value encoding differs.
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978
new file mode 100644
index 00000000000..c365f9beb5d
--- /dev/null
+++ b/Documentation/hwmon/ltc2978
@@ -0,0 +1,103 @@
+Kernel driver ltc2978
+=====================
+
+Supported chips:
+ * Linear Technology LTC2978
+ Prefix: 'ltc2978'
+ Addresses scanned: -
+ Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
+ * Linear Technology LTC3880
+ Prefix: 'ltc3880'
+ Addresses scanned: -
+ Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+The LTC2978 is an octal power supply monitor, supervisor, sequencer and
+margin controller. The LTC3880 is a dual, PolyPhase DC/DC synchronous
+step-down switching regulator controller.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+Example: the following commands will load the driver for an LTC2978 at address
+0x60 on I2C bus #1:
+
+# modprobe ltc2978
+# echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+
+
+Sysfs attributes
+----------------
+
+in1_label "vin"
+in1_input Measured input voltage.
+in1_min Minimum input voltage.
+in1_max Maximum input voltage.
+in1_lcrit Critical minimum input voltage.
+in1_crit Critical maximum input voltage.
+in1_min_alarm Input voltage low alarm.
+in1_max_alarm Input voltage high alarm.
+in1_lcrit_alarm Input voltage critical low alarm.
+in1_crit_alarm Input voltage critical high alarm.
+in1_lowest Lowest input voltage. LTC2978 only.
+in1_highest Highest input voltage.
+in1_reset_history Reset history. Writing into this attribute will reset
+ history for all attributes.
+
+in[2-9]_label "vout[1-8]". Channels 3 to 9 on LTC2978 only.
+in[2-9]_input Measured output voltage.
+in[2-9]_min Minimum output voltage.
+in[2-9]_max Maximum output voltage.
+in[2-9]_lcrit Critical minimum output voltage.
+in[2-9]_crit Critical maximum output voltage.
+in[2-9]_min_alarm Output voltage low alarm.
+in[2-9]_max_alarm Output voltage high alarm.
+in[2-9]_lcrit_alarm Output voltage critical low alarm.
+in[2-9]_crit_alarm Output voltage critical high alarm.
+in[2-9]_lowest Lowest output voltage. LTC2978 only.
+in[2-9]_highest Lowest output voltage.
+in[2-9]_reset_history Reset history. Writing into this attribute will reset
+ history for all attributes.
+
+temp[1-3]_input Measured temperature.
+ On LTC2978, only one temperature measurement is
+ supported and reflects the internal temperature.
+ On LTC3880, temp1 and temp2 report external
+ temperatures, and temp3 reports the internal
+ temperature.
+temp[1-3]_min Mimimum temperature.
+temp[1-3]_max Maximum temperature.
+temp[1-3]_lcrit Critical low temperature.
+temp[1-3]_crit Critical high temperature.
+temp[1-3]_min_alarm Chip temperature low alarm.
+temp[1-3]_max_alarm Chip temperature high alarm.
+temp[1-3]_lcrit_alarm Chip temperature critical low alarm.
+temp[1-3]_crit_alarm Chip temperature critical high alarm.
+temp[1-3]_lowest Lowest measured temperature. LTC2978 only.
+temp[1-3]_highest Highest measured temperature.
+temp[1-3]_reset_history Reset history. Writing into this attribute will reset
+ history for all attributes.
+
+power[1-2]_label "pout[1-2]". LTC3880 only.
+power[1-2]_input Measured power.
+
+curr1_label "iin". LTC3880 only.
+curr1_input Measured input current.
+curr1_max Maximum input current.
+curr1_max_alarm Input current high alarm.
+
+curr[2-3]_label "iout[1-2]". LTC3880 only.
+curr[2-3]_input Measured input current.
+curr[2-3]_max Maximum input current.
+curr[2-3]_crit Critical input current.
+curr[2-3]_max_alarm Input current high alarm.
+curr[2-3]_crit_alarm Input current critical high alarm.
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index c36c1c1a62b..15ac911ce51 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -8,11 +8,6 @@ Supported chips:
Addresses scanned: -
Datasheet:
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
- * Linear Technology LTC2978
- Octal PMBus Power Supply Monitor and Controller
- Prefix: 'ltc2978'
- Addresses scanned: -
- Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
* ON Semiconductor ADP4000, NCP4200, NCP4208
Prefixes: 'adp4000', 'ncp4200', 'ncp4208'
Addresses scanned: -
@@ -20,6 +15,14 @@ Supported chips:
http://www.onsemi.com/pub_link/Collateral/ADP4000-D.PDF
http://www.onsemi.com/pub_link/Collateral/NCP4200-D.PDF
http://www.onsemi.com/pub_link/Collateral/JUNE%202009-%20REV.%200.PDF
+ * Lineage Power
+ Prefixes: 'pdt003', 'pdt006', 'pdt012', 'udt020'
+ Addresses scanned: -
+ Datasheets:
+ http://www.lineagepower.com/oem/pdf/PDT003A0X.pdf
+ http://www.lineagepower.com/oem/pdf/PDT006A0X.pdf
+ http://www.lineagepower.com/oem/pdf/PDT012A0X.pdf
+ http://www.lineagepower.com/oem/pdf/UDT020A0X.pdf
* Generic PMBus devices
Prefix: 'pmbus'
Addresses scanned: -
diff --git a/Documentation/hwmon/pmbus-core b/Documentation/hwmon/pmbus-core
new file mode 100644
index 00000000000..31e4720fed1
--- /dev/null
+++ b/Documentation/hwmon/pmbus-core
@@ -0,0 +1,283 @@
+PMBus core driver and internal API
+==================================
+
+Introduction
+============
+
+[from pmbus.org] The Power Management Bus (PMBus) is an open standard
+power-management protocol with a fully defined command language that facilitates
+communication with power converters and other devices in a power system. The
+protocol is implemented over the industry-standard SMBus serial interface and
+enables programming, control, and real-time monitoring of compliant power
+conversion products. This flexible and highly versatile standard allows for
+communication between devices based on both analog and digital technologies, and
+provides true interoperability which will reduce design complexity and shorten
+time to market for power system designers. Pioneered by leading power supply and
+semiconductor companies, this open power system standard is maintained and
+promoted by the PMBus Implementers Forum (PMBus-IF), comprising 30+ adopters
+with the objective to provide support to, and facilitate adoption among, users.
+
+Unfortunately, while PMBus commands are standardized, there are no mandatory
+commands, and manufacturers can add as many non-standard commands as they like.
+Also, different PMBUs devices act differently if non-supported commands are
+executed. Some devices return an error, some devices return 0xff or 0xffff and
+set a status error flag, and some devices may simply hang up.
+
+Despite all those difficulties, a generic PMBus device driver is still useful
+and supported since kernel version 2.6.39. However, it was necessary to support
+device specific extensions in addition to the core PMBus driver, since it is
+simply unknown what new device specific functionality PMBus device developers
+come up with next.
+
+To make device specific extensions as scalable as possible, and to avoid having
+to modify the core PMBus driver repeatedly for new devices, the PMBus driver was
+split into core, generic, and device specific code. The core code (in
+pmbus_core.c) provides generic functionality. The generic code (in pmbus.c)
+provides support for generic PMBus devices. Device specific code is responsible
+for device specific initialization and, if needed, maps device specific
+functionality into generic functionality. This is to some degree comparable
+to PCI code, where generic code is augmented as needed with quirks for all kinds
+of devices.
+
+PMBus device capabilities auto-detection
+========================================
+
+For generic PMBus devices, code in pmbus.c attempts to auto-detect all supported
+PMBus commands. Auto-detection is somewhat limited, since there are simply too
+many variables to consider. For example, it is almost impossible to autodetect
+which PMBus commands are paged and which commands are replicated across all
+pages (see the PMBus specification for details on multi-page PMBus devices).
+
+For this reason, it often makes sense to provide a device specific driver if not
+all commands can be auto-detected. The data structures in this driver can be
+used to inform the core driver about functionality supported by individual
+chips.
+
+Some commands are always auto-detected. This applies to all limit commands
+(lcrit, min, max, and crit attributes) as well as associated alarm attributes.
+Limits and alarm attributes are auto-detected because there are simply too many
+possible combinations to provide a manual configuration interface.
+
+PMBus internal API
+==================
+
+The API between core and device specific PMBus code is defined in
+drivers/hwmon/pmbus/pmbus.h. In addition to the internal API, pmbus.h defines
+standard PMBus commands and virtual PMBus commands.
+
+Standard PMBus commands
+-----------------------
+
+Standard PMBus commands (commands values 0x00 to 0xff) are defined in the PMBUs
+specification.
+
+Virtual PMBus commands
+----------------------
+
+Virtual PMBus commands are provided to enable support for non-standard
+functionality which has been implemented by several chip vendors and is thus
+desirable to support.
+
+Virtual PMBus commands start with command value 0x100 and can thus easily be
+distinguished from standard PMBus commands (which can not have values larger
+than 0xff). Support for virtual PMBus commands is device specific and thus has
+to be implemented in device specific code.
+
+Virtual commands are named PMBUS_VIRT_xxx and start with PMBUS_VIRT_BASE. All
+virtual commands are word sized.
+
+There are currently two types of virtual commands.
+
+- READ commands are read-only; writes are either ignored or return an error.
+- RESET commands are read/write. Reading reset registers returns zero
+ (used for detection), writing any value causes the associated history to be
+ reset.
+
+Virtual commands have to be handled in device specific driver code. Chip driver
+code returns non-negative values if a virtual command is supported, or a
+negative error code if not. The chip driver may return -ENODATA or any other
+Linux error code in this case, though an error code other than -ENODATA is
+handled more efficiently and thus preferred. Either case, the calling PMBus
+core code will abort if the chip driver returns an error code when reading
+or writing virtual registers (in other words, the PMBus core code will never
+send a virtual command to a chip).
+
+PMBus driver information
+------------------------
+
+PMBus driver information, defined in struct pmbus_driver_info, is the main means
+for device specific drivers to pass information to the core PMBus driver.
+Specifically, it provides the following information.
+
+- For devices supporting its data in Direct Data Format, it provides coefficients
+ for converting register values into normalized data. This data is usually
+ provided by chip manufacturers in device datasheets.
+- Supported chip functionality can be provided to the core driver. This may be
+ necessary for chips which react badly if non-supported commands are executed,
+ and/or to speed up device detection and initialization.
+- Several function entry points are provided to support overriding and/or
+ augmenting generic command execution. This functionality can be used to map
+ non-standard PMBus commands to standard commands, or to augment standard
+ command return values with device specific information.
+
+ API functions
+ -------------
+
+ Functions provided by chip driver
+ ---------------------------------
+
+ All functions return the command return value (read) or zero (write) if
+ successful. A return value of -ENODATA indicates that there is no manufacturer
+ specific command, but that a standard PMBus command may exist. Any other
+ negative return value indicates that the commands does not exist for this
+ chip, and that no attempt should be made to read or write the standard
+ command.
+
+ As mentioned above, an exception to this rule applies to virtual commands,
+ which _must_ be handled in driver specific code. See "Virtual PMBus Commands"
+ above for more details.
+
+ Command execution in the core PMBus driver code is as follows.
+
+ if (chip_access_function) {
+ status = chip_access_function();
+ if (status != -ENODATA)
+ return status;
+ }
+ if (command >= PMBUS_VIRT_BASE) /* For word commands/registers only */
+ return -EINVAL;
+ return generic_access();
+
+ Chip drivers may provide pointers to the following functions in struct
+ pmbus_driver_info. All functions are optional.
+
+ int (*read_byte_data)(struct i2c_client *client, int page, int reg);
+
+ Read byte from page <page>, register <reg>.
+ <page> may be -1, which means "current page".
+
+ int (*read_word_data)(struct i2c_client *client, int page, int reg);
+
+ Read word from page <page>, register <reg>.
+
+ int (*write_word_data)(struct i2c_client *client, int page, int reg,
+ u16 word);
+
+ Write word to page <page>, register <reg>.
+
+ int (*write_byte)(struct i2c_client *client, int page, u8 value);
+
+ Write byte to page <page>, register <reg>.
+ <page> may be -1, which means "current page".
+
+ int (*identify)(struct i2c_client *client, struct pmbus_driver_info *info);
+
+ Determine supported PMBus functionality. This function is only necessary
+ if a chip driver supports multiple chips, and the chip functionality is not
+ pre-determined. It is currently only used by the generic pmbus driver
+ (pmbus.c).
+
+ Functions exported by core driver
+ ---------------------------------
+
+ Chip drivers are expected to use the following functions to read or write
+ PMBus registers. Chip drivers may also use direct I2C commands. If direct I2C
+ commands are used, the chip driver code must not directly modify the current
+ page, since the selected page is cached in the core driver and the core driver
+ will assume that it is selected. Using pmbus_set_page() to select a new page
+ is mandatory.
+
+ int pmbus_set_page(struct i2c_client *client, u8 page);
+
+ Set PMBus page register to <page> for subsequent commands.
+
+ int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+
+ Read word data from <page>, <reg>. Similar to i2c_smbus_read_word_data(), but
+ selects page first.
+
+ int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
+ u16 word);
+
+ Write word data to <page>, <reg>. Similar to i2c_smbus_write_word_data(), but
+ selects page first.
+
+ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
+
+ Read byte data from <page>, <reg>. Similar to i2c_smbus_read_byte_data(), but
+ selects page first. <page> may be -1, which means "current page".
+
+ int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
+
+ Write byte data to <page>, <reg>. Similar to i2c_smbus_write_byte(), but
+ selects page first. <page> may be -1, which means "current page".
+
+ void pmbus_clear_faults(struct i2c_client *client);
+
+ Execute PMBus "Clear Fault" command on all chip pages.
+ This function calls the device specific write_byte function if defined.
+ Therefore, it must _not_ be called from that function.
+
+ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
+
+ Check if byte register exists. Return true if the register exists, false
+ otherwise.
+ This function calls the device specific write_byte function if defined to
+ obtain the chip status. Therefore, it must _not_ be called from that function.
+
+ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
+
+ Check if word register exists. Return true if the register exists, false
+ otherwise.
+ This function calls the device specific write_byte function if defined to
+ obtain the chip status. Therefore, it must _not_ be called from that function.
+
+ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info);
+
+ Execute probe function. Similar to standard probe function for other drivers,
+ with the pointer to struct pmbus_driver_info as additional argument. Calls
+ identify function if supported. Must only be called from device probe
+ function.
+
+ void pmbus_do_remove(struct i2c_client *client);
+
+ Execute driver remove function. Similar to standard driver remove function.
+
+ const struct pmbus_driver_info
+ *pmbus_get_driver_info(struct i2c_client *client);
+
+ Return pointer to struct pmbus_driver_info as passed to pmbus_do_probe().
+
+
+PMBus driver platform data
+==========================
+
+PMBus platform data is defined in include/linux/i2c/pmbus.h. Platform data
+currently only provides a flag field with a single bit used.
+
+#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+
+struct pmbus_platform_data {
+ u32 flags; /* Device specific flags */
+};
+
+
+Flags
+-----
+
+PMBUS_SKIP_STATUS_CHECK
+
+During register detection, skip checking the status register for
+communication or command errors.
+
+Some PMBus chips respond with valid data when trying to read an unsupported
+register. For such chips, checking the status register is mandatory when
+trying to determine if a chip register exists or not.
+Other PMBus chips don't support the STATUS_CML register, or report
+communication errors for no explicable reason. For such chips, checking the
+status register must be disabled.
+
+Some i2c controllers do not support single-byte commands (write commands with
+no data, i2c_smbus_write_byte()). With such controllers, clearing the status
+register is impossible, and the PMBUS_SKIP_STATUS_CHECK flag must be set.
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
new file mode 100644
index 00000000000..7617798b5c9
--- /dev/null
+++ b/Documentation/hwmon/zl6100
@@ -0,0 +1,125 @@
+Kernel driver zl6100
+====================
+
+Supported chips:
+ * Intersil / Zilker Labs ZL2004
+ Prefix: 'zl2004'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+ * Intersil / Zilker Labs ZL2006
+ Prefix: 'zl2006'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6850.pdf
+ * Intersil / Zilker Labs ZL2008
+ Prefix: 'zl2008'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6859.pdf
+ * Intersil / Zilker Labs ZL2105
+ Prefix: 'zl2105'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6851.pdf
+ * Intersil / Zilker Labs ZL2106
+ Prefix: 'zl2106'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6852.pdf
+ * Intersil / Zilker Labs ZL6100
+ Prefix: 'zl6100'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6876.pdf
+ * Intersil / Zilker Labs ZL6105
+ Prefix: 'zl6105'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Intersil / Zilker Labs ZL6100 and
+compatible digital DC-DC controllers.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus and Documentation.hwmon/pmbus-core for details
+on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+WARNING: Do not access chip registers using the i2cdump command, and do not use
+any of the i2ctools commands on a command register used to save and restore
+configuration data (0x11, 0x12, 0x15, 0x16, and 0xf4). The chips supported by
+this driver interpret any access to those command registers (including read
+commands) as request to execute the command in question. Unless write accesses
+to those registers are protected, this may result in power loss, board resets,
+and/or Flash corruption. Worst case, your board may turn into a brick.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Module parameters
+-----------------
+
+delay
+-----
+
+Some Intersil/Zilker Labs DC-DC controllers require a minimum interval between
+I2C bus accesses. According to Intersil, the minimum interval is 2 ms, though
+1 ms appears to be sufficient and has not caused any problems in testing.
+The problem is known to affect ZL6100, ZL2105, and ZL2008. It is known not to
+affect ZL2004 and ZL6105. The driver automatically sets the interval to 1 ms
+except for ZL2004 and ZL6105. To enable manual override, the driver provides a
+writeable module parameter, 'delay', which can be used to set the interval to
+a value between 0 and 65,535 microseconds.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in1_label "vin"
+in1_input Measured input voltage.
+in1_min Minimum input voltage.
+in1_max Maximum input voltage.
+in1_lcrit Critical minumum input voltage.
+in1_crit Critical maximum input voltage.
+in1_min_alarm Input voltage low alarm.
+in1_max_alarm Input voltage high alarm.
+in1_lcrit_alarm Input voltage critical low alarm.
+in1_crit_alarm Input voltage critical high alarm.
+
+in2_label "vout1"
+in2_input Measured output voltage.
+in2_lcrit Critical minumum output Voltage.
+in2_crit Critical maximum output voltage.
+in2_lcrit_alarm Critical output voltage critical low alarm.
+in2_crit_alarm Critical output voltage critical high alarm.
+
+curr1_label "iout1"
+curr1_input Measured output current.
+curr1_lcrit Critical minimum output current.
+curr1_crit Critical maximum output current.
+curr1_lcrit_alarm Output current critical low alarm.
+curr1_crit_alarm Output current critical high alarm.
+
+temp[12]_input Measured temperature.
+temp[12]_min Minimum temperature.
+temp[12]_max Maximum temperature.
+temp[12]_lcrit Critical low temperature.
+temp[12]_crit Critical high temperature.
+temp[12]_min_alarm Chip temperature low alarm.
+temp[12]_max_alarm Chip temperature high alarm.
+temp[12]_lcrit_alarm Chip temperature critical low alarm.
+temp[12]_crit_alarm Chip temperature critical high alarm.
diff --git a/Documentation/input/input.txt b/Documentation/input/input.txt
index b93c08442e3..b3d6787b4fb 100644
--- a/Documentation/input/input.txt
+++ b/Documentation/input/input.txt
@@ -111,7 +111,7 @@ LCDs and many other purposes.
The monitor and speaker controls should be easy to add to the hid/input
interface, but for the UPSs and LCDs it doesn't make much sense. For this,
-the hiddev interface was designed. See Documentation/usb/hiddev.txt
+the hiddev interface was designed. See Documentation/hid/hiddev.txt
for more information about it.
The usage of the usbhid module is very simple, it takes no parameters,
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index 0e0734b509d..eda1eb1451a 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -300,7 +300,7 @@
* Title: "The Kernel Hacking HOWTO"
Author: Various Talented People, and Rusty.
- Location: in kernel tree, Documentation/DocBook/kernel-hacking/
+ Location: in kernel tree, Documentation/DocBook/kernel-hacking.tmpl
(must be built as "make {htmldocs | psdocs | pdfdocs})
Keywords: HOWTO, kernel contexts, deadlock, locking, modules,
symbols, return conventions.
@@ -351,7 +351,7 @@
* Title: "Linux Kernel Locking HOWTO"
Author: Various Talented People, and Rusty.
- Location: in kernel tree, Documentation/DocBook/kernel-locking/
+ Location: in kernel tree, Documentation/DocBook/kernel-locking.tmpl
(must be built as "make {htmldocs | psdocs | pdfdocs})
Keywords: locks, locking, spinlock, semaphore, atomic, race
condition, bottom halves, tasklets, softirqs.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 854ed5ca7e3..a8ba119a4d5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -49,6 +49,7 @@ parameter is applicable:
EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
EFI EFI Partitioning (GPT) is enabled
EIDE EIDE/ATAPI support is enabled.
+ EVM Extended Verification Module
FB The frame buffer device is enabled.
FTRACE Function tracing enabled.
GCOV GCOV profiling is enabled.
@@ -163,7 +164,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
rsdt -- prefer RSDT over (default) XSDT
copy_dsdt -- copy DSDT to memory
- See also Documentation/power/pm.txt, pci=noacpi
+ See also Documentation/power/runtime_pm.txt, pci=noacpi
acpi_rsdp= [ACPI,EFI,KEXEC]
Pass the RSDP address to the kernel, mostly used
@@ -319,7 +320,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
- See also Documentation/kernel/input/joystick.txt
+ See also Documentation/input/joystick.txt
analog.map= [HW,JOY] Analog joystick and gamepad support
Specifies type or capabilities of an analog joystick
@@ -408,7 +409,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
bttv.radio= Most important insmod options are available as
kernel args too.
bttv.pll= See Documentation/video4linux/bttv/Insmod-options
- bttv.tuner= and Documentation/video4linux/bttv/CARDLIST
+ bttv.tuner=
bulk_remove=off [PPC] This parameter disables the use of the pSeries
firmware feature for flushing multiple hpte entries
@@ -724,7 +725,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
elevator= [IOSCHED]
Format: {"cfq" | "deadline" | "noop"}
- See Documentation/block/as-iosched.txt and
+ See Documentation/block/cfq-iosched.txt and
Documentation/block/deadline-iosched.txt for details.
elfcorehdr= [IA-64,PPC,SH,X86]
@@ -760,12 +761,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
+ evm= [EVM]
+ Format: { "fix" }
+ Permit 'security.evm' to be updated regardless of
+ current integrity status.
+
failslab=
fail_page_alloc=
fail_make_request=[KNL]
General fault injection mechanism.
Format: <interval>,<probability>,<space>,<times>
- See also /Documentation/fault-injection/.
+ See also Documentation/fault-injection/.
floppy= [HW]
See Documentation/blockdev/floppy.txt.
@@ -1014,10 +1020,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
has the capability. With this option, super page will
not be supported.
intremap= [X86-64, Intel-IOMMU]
- Format: { on (default) | off | nosid }
on enable Interrupt Remapping (default)
off disable Interrupt Remapping
nosid disable Source ID checking
+ no_x2apic_optout
+ BIOS x2APIC opt-out request will be ignored
inttest= [IA-64]
@@ -2240,6 +2247,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
in <PAGE_SIZE> units (needed only for swap files).
See Documentation/power/swsusp-and-swap-files.txt
+ resumedelay= [HIBERNATION] Delay (in seconds) to pause before attempting to
+ read the resume files
+
+ resumewait [HIBERNATION] Wait (indefinitely) for resume device to show up.
+ Useful for devices that are detected asynchronously
+ (e.g. USB and MMC devices).
+
hibernate= [HIBERNATION]
noresume Don't check if there's a hibernation image
present during boot.
@@ -2375,7 +2389,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <integer>
sonypi.*= [HW] Sony Programmable I/O Control Device driver
- See Documentation/sonypi.txt
+ See Documentation/laptops/sonypi.txt
specialix= [HW,SERIAL] Specialix multi-serial port adapter
See Documentation/serial/specialix.txt.
@@ -2706,10 +2720,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
functions are at fixed addresses, they make nice
targets for exploits that can control RIP.
- emulate [default] Vsyscalls turn into traps and are
- emulated reasonably safely.
+ emulate Vsyscalls turn into traps and are emulated
+ reasonably safely.
- native Vsyscalls are native syscall instructions.
+ native [default] Vsyscalls are native syscall
+ instructions.
This is a little bit faster than trapping
and makes a few dynamic recompilers work
better than they would in emulation mode.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 61815483efa..3ff0dad62d3 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -736,7 +736,7 @@ status as "unknown". The available commands are:
sysfs notes:
The ThinkLight sysfs interface is documented by the LED class
-documentation, in Documentation/leds-class.txt. The ThinkLight LED name
+documentation, in Documentation/leds/leds-class.txt. The ThinkLight LED name
is "tpacpi::thinklight".
Due to limitations in the sysfs LED class, if the status of the ThinkLight
@@ -833,7 +833,7 @@ All of the above can be turned on and off and can be made to blink.
sysfs notes:
The ThinkPad LED sysfs interface is described in detail by the LED class
-documentation, in Documentation/leds-class.txt.
+documentation, in Documentation/leds/leds-class.txt.
The LEDs are named (in LED ID order, from 0 to 12):
"tpacpi::power", "tpacpi:orange:batt", "tpacpi:green:batt",
diff --git a/Documentation/media-framework.txt b/Documentation/media-framework.txt
index 669b5fb03a8..3a0f879533c 100644
--- a/Documentation/media-framework.txt
+++ b/Documentation/media-framework.txt
@@ -9,8 +9,8 @@ Introduction
------------
The media controller API is documented in DocBook format in
-Documentation/DocBook/v4l/media-controller.xml. This document will focus on
-the kernel-side implementation of the media framework.
+Documentation/DocBook/media/v4l/media-controller.xml. This document will focus
+on the kernel-side implementation of the media framework.
Abstract media device model
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index f0d3a8026a5..2759f7c188f 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -438,7 +438,7 @@ There are certain things that the Linux kernel memory barriers do not guarantee:
[*] For information on bus mastering DMA and coherency please read:
Documentation/PCI/pci.txt
- Documentation/PCI/PCI-DMA-mapping.txt
+ Documentation/DMA-API-HOWTO.txt
Documentation/DMA-API.txt
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 88d4afbdef9..c86d03f18a5 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -1,4 +1,4 @@
-[state: 17-04-2011]
+[state: 21-08-2011]
BATMAN-ADV
----------
@@ -68,9 +68,9 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
-# aggregated_ogms gw_bandwidth hop_penalty
-# bonding gw_mode orig_interval
-# fragmentation gw_sel_class vis_mode
+# aggregated_ogms fragmentation gw_sel_class vis_mode
+# ap_isolation gw_bandwidth hop_penalty
+# bonding gw_mode orig_interval
There is a special folder for debugging information:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ca5cdcd0f0e..cb7f3148035 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1045,6 +1045,11 @@ conf/interface/*:
accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them.
+ It also determines whether or not to transmit Router
+ Solicitations. If and only if the functional setting is to
+ accept Router Advertisements, Router Solicitations will be
+ transmitted.
+
Possible values are:
0 Do not accept Router Advertisements.
1 Accept Router Advertisements if forwarding is disabled.
@@ -1115,14 +1120,14 @@ forwarding - INTEGER
Possible values are:
0 Forwarding disabled
1 Forwarding enabled
- 2 Forwarding enabled (Hybrid Mode)
FALSE (0):
By default, Host behaviour is assumed. This means:
1. IsRouter flag is not set in Neighbour Advertisements.
- 2. Router Solicitations are being sent when necessary.
+ 2. If accept_ra is TRUE (default), transmit Router
+ Solicitations.
3. If accept_ra is TRUE (default), accept Router
Advertisements (and do autoconfiguration).
4. If accept_redirects is TRUE (default), accept Redirects.
@@ -1133,16 +1138,10 @@ forwarding - INTEGER
This means exactly the reverse from the above:
1. IsRouter flag is set in Neighbour Advertisements.
- 2. Router Solicitations are not sent.
+ 2. Router Solicitations are not sent unless accept_ra is 2.
3. Router Advertisements are ignored unless accept_ra is 2.
4. Redirects are ignored.
- TRUE (2):
-
- Hybrid mode. Same behaviour as TRUE, except for:
-
- 2. Router Solicitations are being sent when necessary.
-
Default: 0 (disabled) if global forwarding is disabled (default),
otherwise 1 (enabled).
diff --git a/Documentation/networking/mac80211-injection.txt b/Documentation/networking/mac80211-injection.txt
index b30e81ad530..3a930072b16 100644
--- a/Documentation/networking/mac80211-injection.txt
+++ b/Documentation/networking/mac80211-injection.txt
@@ -23,6 +23,10 @@ radiotap headers and used to control injection:
IEEE80211_RADIOTAP_F_FRAG: frame will be fragmented if longer than the
current fragmentation threshold.
+ * IEEE80211_RADIOTAP_TX_FLAGS
+
+ IEEE80211_RADIOTAP_F_TX_NOACK: frame should be sent without waiting for
+ an ACK even if it is a unicast frame
The injection code can also skip all other currently defined radiotap fields
facilitating replay of captured radiotap headers directly.
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index 87b3d15f523..89358341682 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -73,7 +73,7 @@ dev->hard_start_xmit:
has to lock by itself when needed. It is recommended to use a try lock
for this and return NETDEV_TX_LOCKED when the spin lock fails.
The locking there should also properly protect against
- set_multicast_list. Note that the use of NETIF_F_LLTX is deprecated.
+ set_rx_mode. Note that the use of NETIF_F_LLTX is deprecated.
Don't use it for new drivers.
Context: Process with BHs disabled or BH (timer),
@@ -92,7 +92,7 @@ dev->tx_timeout:
Context: BHs disabled
Notes: netif_queue_stopped() is guaranteed true
-dev->set_multicast_list:
+dev->set_rx_mode:
Synchronization: netif_tx_lock spinlock.
Context: BHs disabled
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 8ce7c30e723..a177de21d28 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling†(RSS). The goal of RSS and
-the other scaling techniques to increase performance uniformly.
+the other scaling techniques is to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques.
@@ -73,7 +73,7 @@ of queues to IRQs can be determined from /proc/interrupts. By default,
an IRQ may be handled on any CPU. Because a non-negligible part of packet
processing takes place in receive interrupt handling, it is advantageous
to spread receive interrupts between CPUs. To manually adjust the IRQ
-affinity of each interrupt see Documentation/IRQ-affinity. Some systems
+affinity of each interrupt see Documentation/IRQ-affinity.txt. Some systems
will be running irqbalance, a daemon that dynamically optimizes IRQ
assignments and as a result may override any manual settings.
@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes.
-rps_sock_table is a global flow table that contains the *desired* CPU for
-flows: the CPU that is currently processing the flow in userspace. Each
-table value is a CPU index that is updated during calls to recvmsg and
-sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
+rps_sock_flow_table is a global flow table that contains the *desired* CPU
+for flows: the CPU that is currently processing the flow in userspace.
+Each table value is a CPU index that is updated during calls to recvmsg
+and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 57a24108b84..8d67980fabe 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -76,7 +76,16 @@ core.
4.5) DMA descriptors
Driver handles both normal and enhanced descriptors. The latter has been only
-tested on DWC Ether MAC 10/100/1000 Universal version 3.41a.
+tested on DWC Ether MAC 10/100/1000 Universal version 3.41a and later.
+
+STMMAC supports DMA descriptor to operate both in dual buffer (RING)
+and linked-list(CHAINED) mode. In RING each descriptor points to two
+data buffer pointers whereas in CHAINED mode they point to only one data
+buffer pointer. RING mode is the default.
+
+In CHAINED mode each descriptor will have pointer to next descriptor in
+the list, hence creating the explicit chaining in the descriptor itself,
+whereas such explicit chaining is not possible in RING mode.
4.6) Ethtool support
Ethtool is supported. Driver statistics and internal errors can be taken using:
@@ -235,7 +244,38 @@ reset procedure etc).
o enh_desc.c: functions for handling enhanced descriptors
o norm_desc.c: functions for handling normal descriptors
-5) TODO:
+5) Debug Information
+
+The driver exports many information i.e. internal statistics,
+debug information, MAC and DMA registers etc.
+
+These can be read in several ways depending on the
+type of the information actually needed.
+
+For example a user can be use the ethtool support
+to get statistics: e.g. using: ethtool -S ethX
+(that shows the Management counters (MMC) if supported)
+or sees the MAC/DMA registers: e.g. using: ethtool -d ethX
+
+Compiling the Kernel with CONFIG_DEBUG_FS and enabling the
+STMMAC_DEBUG_FS option the driver will export the following
+debugfs entries:
+
+/sys/kernel/debug/stmmaceth/descriptors_status
+ To show the DMA TX/RX descriptor rings
+
+Developer can also use the "debug" module parameter to get
+further debug information.
+
+In the end, there are other macros (that cannot be enabled
+via menuconfig) to turn-on the RX/TX DMA debugging,
+specific MAC core debug printk etc. Others to enable the
+debug in the TX and RX processes.
+All these are only useful during the developing stage
+and should never enabled inside the code for general usage.
+In fact, these can generate an huge amount of debug messages.
+
+6) TODO:
o XGMAC is not supported.
o Review the timer optimisation code to use an embedded device that will be
available in new chip generations.
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
new file mode 100644
index 00000000000..b04cb7d45a1
--- /dev/null
+++ b/Documentation/pinctrl.txt
@@ -0,0 +1,950 @@
+PINCTRL (PIN CONTROL) subsystem
+This document outlines the pin control subsystem in Linux
+
+This subsystem deals with:
+
+- Enumerating and naming controllable pins
+
+- Multiplexing of pins, pads, fingers (etc) see below for details
+
+The intention is to also deal with:
+
+- Software-controlled biasing and driving mode specific pins, such as
+ pull-up/down, open drain etc, load capacitance configuration when controlled
+ by software, etc.
+
+
+Top-level interface
+===================
+
+Definition of PIN CONTROLLER:
+
+- A pin controller is a piece of hardware, usually a set of registers, that
+ can control PINs. It may be able to multiplex, bias, set load capacitance,
+ set drive strength etc for individual pins or groups of pins.
+
+Definition of PIN:
+
+- PINS are equal to pads, fingers, balls or whatever packaging input or
+ output line you want to control and these are denoted by unsigned integers
+ in the range 0..maxpin. This numberspace is local to each PIN CONTROLLER, so
+ there may be several such number spaces in a system. This pin space may
+ be sparse - i.e. there may be gaps in the space with numbers where no
+ pin exists.
+
+When a PIN CONTROLLER is instatiated, it will register a descriptor to the
+pin control framework, and this descriptor contains an array of pin descriptors
+describing the pins handled by this specific pin controller.
+
+Here is an example of a PGA (Pin Grid Array) chip seen from underneath:
+
+ A B C D E F G H
+
+ 8 o o o o o o o o
+
+ 7 o o o o o o o o
+
+ 6 o o o o o o o o
+
+ 5 o o o o o o o o
+
+ 4 o o o o o o o o
+
+ 3 o o o o o o o o
+
+ 2 o o o o o o o o
+
+ 1 o o o o o o o o
+
+To register a pin controller and name all the pins on this package we can do
+this in our driver:
+
+#include <linux/pinctrl/pinctrl.h>
+
+const struct pinctrl_pin_desc __refdata foo_pins[] = {
+ PINCTRL_PIN(0, "A1"),
+ PINCTRL_PIN(1, "A2"),
+ PINCTRL_PIN(2, "A3"),
+ ...
+ PINCTRL_PIN(61, "H6"),
+ PINCTRL_PIN(62, "H7"),
+ PINCTRL_PIN(63, "H8"),
+};
+
+static struct pinctrl_desc foo_desc = {
+ .name = "foo",
+ .pins = foo_pins,
+ .npins = ARRAY_SIZE(foo_pins),
+ .maxpin = 63,
+ .owner = THIS_MODULE,
+};
+
+int __init foo_probe(void)
+{
+ struct pinctrl_dev *pctl;
+
+ pctl = pinctrl_register(&foo_desc, <PARENT>, NULL);
+ if (IS_ERR(pctl))
+ pr_err("could not register foo pin driver\n");
+}
+
+Pins usually have fancier names than this. You can find these in the dataheet
+for your chip. Notice that the core pinctrl.h file provides a fancy macro
+called PINCTRL_PIN() to create the struct entries. As you can see I enumerated
+the pins from 0 in the upper left corner to 63 in the lower right corner,
+this enumeration was arbitrarily chosen, in practice you need to think
+through your numbering system so that it matches the layout of registers
+and such things in your driver, or the code may become complicated. You must
+also consider matching of offsets to the GPIO ranges that may be handled by
+the pin controller.
+
+For a padring with 467 pads, as opposed to actual pins, I used an enumeration
+like this, walking around the edge of the chip, which seems to be industry
+standard too (all these pads had names, too):
+
+
+ 0 ..... 104
+ 466 105
+ . .
+ . .
+ 358 224
+ 357 .... 225
+
+
+Pin groups
+==========
+
+Many controllers need to deal with groups of pins, so the pin controller
+subsystem has a mechanism for enumerating groups of pins and retrieving the
+actual enumerated pins that are part of a certain group.
+
+For example, say that we have a group of pins dealing with an SPI interface
+on { 0, 8, 16, 24 }, and a group of pins dealing with an I2C interface on pins
+on { 24, 25 }.
+
+These two groups are presented to the pin control subsystem by implementing
+some generic pinctrl_ops like this:
+
+#include <linux/pinctrl/pinctrl.h>
+
+struct foo_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned num_pins;
+};
+
+static unsigned int spi0_pins[] = { 0, 8, 16, 24 };
+static unsigned int i2c0_pins[] = { 24, 25 };
+
+static const struct foo_group foo_groups[] = {
+ {
+ .name = "spi0_grp",
+ .pins = spi0_pins,
+ .num_pins = ARRAY_SIZE(spi0_pins),
+ },
+ {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .num_pins = ARRAY_SIZE(i2c0_pins),
+ },
+};
+
+
+static int foo_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(foo_groups))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return foo_groups[selector].name;
+}
+
+static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned ** const pins,
+ unsigned * const num_pins)
+{
+ *pins = (unsigned *) foo_groups[selector].pins;
+ *num_pins = foo_groups[selector].num_pins;
+ return 0;
+}
+
+static struct pinctrl_ops foo_pctrl_ops = {
+ .list_groups = foo_list_groups,
+ .get_group_name = foo_get_group_name,
+ .get_group_pins = foo_get_group_pins,
+};
+
+
+static struct pinctrl_desc foo_desc = {
+ ...
+ .pctlops = &foo_pctrl_ops,
+};
+
+The pin control subsystem will call the .list_groups() function repeatedly
+beginning on 0 until it returns non-zero to determine legal selectors, then
+it will call the other functions to retrieve the name and pins of the group.
+Maintaining the data structure of the groups is up to the driver, this is
+just a simple example - in practice you may need more entries in your group
+structure, for example specific register ranges associated with each group
+and so on.
+
+
+Interaction with the GPIO subsystem
+===================================
+
+The GPIO drivers may want to perform operations of various types on the same
+physical pins that are also registered as pin controller pins.
+
+Since the pin controller subsystem have its pinspace local to the pin
+controller we need a mapping so that the pin control subsystem can figure out
+which pin controller handles control of a certain GPIO pin. Since a single
+pin controller may be muxing several GPIO ranges (typically SoCs that have
+one set of pins but internally several GPIO silicon blocks, each modeled as
+a struct gpio_chip) any number of GPIO ranges can be added to a pin controller
+instance like this:
+
+struct gpio_chip chip_a;
+struct gpio_chip chip_b;
+
+static struct pinctrl_gpio_range gpio_range_a = {
+ .name = "chip a",
+ .id = 0,
+ .base = 32,
+ .npins = 16,
+ .gc = &chip_a;
+};
+
+static struct pinctrl_gpio_range gpio_range_a = {
+ .name = "chip b",
+ .id = 0,
+ .base = 48,
+ .npins = 8,
+ .gc = &chip_b;
+};
+
+
+{
+ struct pinctrl_dev *pctl;
+ ...
+ pinctrl_add_gpio_range(pctl, &gpio_range_a);
+ pinctrl_add_gpio_range(pctl, &gpio_range_b);
+}
+
+So this complex system has one pin controller handling two different
+GPIO chips. Chip a has 16 pins and chip b has 8 pins. They are mapped in
+the global GPIO pin space at:
+
+chip a: [32 .. 47]
+chip b: [48 .. 55]
+
+When GPIO-specific functions in the pin control subsystem are called, these
+ranges will be used to look up the apropriate pin controller by inspecting
+and matching the pin to the pin ranges across all controllers. When a
+pin controller handling the matching range is found, GPIO-specific functions
+will be called on that specific pin controller.
+
+For all functionalities dealing with pin biasing, pin muxing etc, the pin
+controller subsystem will subtract the range's .base offset from the passed
+in gpio pin number, and pass that on to the pin control driver, so the driver
+will get an offset into its handled number range. Further it is also passed
+the range ID value, so that the pin controller knows which range it should
+deal with.
+
+For example: if a user issues pinctrl_gpio_set_foo(50), the pin control
+subsystem will find that the second range on this pin controller matches,
+subtract the base 48 and call the
+pinctrl_driver_gpio_set_foo(pinctrl, range, 2) where the latter function has
+this signature:
+
+int pinctrl_driver_gpio_set_foo(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *rangeid,
+ unsigned offset);
+
+Now the driver knows that we want to do some GPIO-specific operation on the
+second GPIO range handled by "chip b", at offset 2 in that specific range.
+
+(If the GPIO subsystem is ever refactored to use a local per-GPIO controller
+pin space, this mapping will need to be augmented accordingly.)
+
+
+PINMUX interfaces
+=================
+
+These calls use the pinmux_* naming prefix. No other calls should use that
+prefix.
+
+
+What is pinmuxing?
+==================
+
+PINMUX, also known as padmux, ballmux, alternate functions or mission modes
+is a way for chip vendors producing some kind of electrical packages to use
+a certain physical pin (ball, pad, finger, etc) for multiple mutually exclusive
+functions, depending on the application. By "application" in this context
+we usually mean a way of soldering or wiring the package into an electronic
+system, even though the framework makes it possible to also change the function
+at runtime.
+
+Here is an example of a PGA (Pin Grid Array) chip seen from underneath:
+
+ A B C D E F G H
+ +---+
+ 8 | o | o o o o o o o
+ | |
+ 7 | o | o o o o o o o
+ | |
+ 6 | o | o o o o o o o
+ +---+---+
+ 5 | o | o | o o o o o o
+ +---+---+ +---+
+ 4 o o o o o o | o | o
+ | |
+ 3 o o o o o o | o | o
+ | |
+ 2 o o o o o o | o | o
+ +-------+-------+-------+---+---+
+ 1 | o o | o o | o o | o | o |
+ +-------+-------+-------+---+---+
+
+This is not tetris. The game to think of is chess. Not all PGA/BGA packages
+are chessboard-like, big ones have "holes" in some arrangement according to
+different design patterns, but we're using this as a simple example. Of the
+pins you see some will be taken by things like a few VCC and GND to feed power
+to the chip, and quite a few will be taken by large ports like an external
+memory interface. The remaining pins will often be subject to pin multiplexing.
+
+The example 8x8 PGA package above will have pin numbers 0 thru 63 assigned to
+its physical pins. It will name the pins { A1, A2, A3 ... H6, H7, H8 } using
+pinctrl_register_pins() and a suitable data set as shown earlier.
+
+In this 8x8 BGA package the pins { A8, A7, A6, A5 } can be used as an SPI port
+(these are four pins: CLK, RXD, TXD, FRM). In that case, pin B5 can be used as
+some general-purpose GPIO pin. However, in another setting, pins { A5, B5 } can
+be used as an I2C port (these are just two pins: SCL, SDA). Needless to say,
+we cannot use the SPI port and I2C port at the same time. However in the inside
+of the package the silicon performing the SPI logic can alternatively be routed
+out on pins { G4, G3, G2, G1 }.
+
+On the botton row at { A1, B1, C1, D1, E1, F1, G1, H1 } we have something
+special - it's an external MMC bus that can be 2, 4 or 8 bits wide, and it will
+consume 2, 4 or 8 pins respectively, so either { A1, B1 } are taken or
+{ A1, B1, C1, D1 } or all of them. If we use all 8 bits, we cannot use the SPI
+port on pins { G4, G3, G2, G1 } of course.
+
+This way the silicon blocks present inside the chip can be multiplexed "muxed"
+out on different pin ranges. Often contemporary SoC (systems on chip) will
+contain several I2C, SPI, SDIO/MMC, etc silicon blocks that can be routed to
+different pins by pinmux settings.
+
+Since general-purpose I/O pins (GPIO) are typically always in shortage, it is
+common to be able to use almost any pin as a GPIO pin if it is not currently
+in use by some other I/O port.
+
+
+Pinmux conventions
+==================
+
+The purpose of the pinmux functionality in the pin controller subsystem is to
+abstract and provide pinmux settings to the devices you choose to instantiate
+in your machine configuration. It is inspired by the clk, GPIO and regulator
+subsystems, so devices will request their mux setting, but it's also possible
+to request a single pin for e.g. GPIO.
+
+Definitions:
+
+- FUNCTIONS can be switched in and out by a driver residing with the pin
+ control subsystem in the drivers/pinctrl/* directory of the kernel. The
+ pin control driver knows the possible functions. In the example above you can
+ identify three pinmux functions, one for spi, one for i2c and one for mmc.
+
+- FUNCTIONS are assumed to be enumerable from zero in a one-dimensional array.
+ In this case the array could be something like: { spi0, i2c0, mmc0 }
+ for the three available functions.
+
+- FUNCTIONS have PIN GROUPS as defined on the generic level - so a certain
+ function is *always* associated with a certain set of pin groups, could
+ be just a single one, but could also be many. In the example above the
+ function i2c is associated with the pins { A5, B5 }, enumerated as
+ { 24, 25 } in the controller pin space.
+
+ The Function spi is associated with pin groups { A8, A7, A6, A5 }
+ and { G4, G3, G2, G1 }, which are enumerated as { 0, 8, 16, 24 } and
+ { 38, 46, 54, 62 } respectively.
+
+ Group names must be unique per pin controller, no two groups on the same
+ controller may have the same name.
+
+- The combination of a FUNCTION and a PIN GROUP determine a certain function
+ for a certain set of pins. The knowledge of the functions and pin groups
+ and their machine-specific particulars are kept inside the pinmux driver,
+ from the outside only the enumerators are known, and the driver core can:
+
+ - Request the name of a function with a certain selector (>= 0)
+ - A list of groups associated with a certain function
+ - Request that a certain group in that list to be activated for a certain
+ function
+
+ As already described above, pin groups are in turn self-descriptive, so
+ the core will retrieve the actual pin range in a certain group from the
+ driver.
+
+- FUNCTIONS and GROUPS on a certain PIN CONTROLLER are MAPPED to a certain
+ device by the board file, device tree or similar machine setup configuration
+ mechanism, similar to how regulators are connected to devices, usually by
+ name. Defining a pin controller, function and group thus uniquely identify
+ the set of pins to be used by a certain device. (If only one possible group
+ of pins is available for the function, no group name need to be supplied -
+ the core will simply select the first and only group available.)
+
+ In the example case we can define that this particular machine shall
+ use device spi0 with pinmux function fspi0 group gspi0 and i2c0 on function
+ fi2c0 group gi2c0, on the primary pin controller, we get mappings
+ like these:
+
+ {
+ {"map-spi0", spi0, pinctrl0, fspi0, gspi0},
+ {"map-i2c0", i2c0, pinctrl0, fi2c0, gi2c0}
+ }
+
+ Every map must be assigned a symbolic name, pin controller and function.
+ The group is not compulsory - if it is omitted the first group presented by
+ the driver as applicable for the function will be selected, which is
+ useful for simple cases.
+
+ The device name is present in map entries tied to specific devices. Maps
+ without device names are referred to as SYSTEM pinmuxes, such as can be taken
+ by the machine implementation on boot and not tied to any specific device.
+
+ It is possible to map several groups to the same combination of device,
+ pin controller and function. This is for cases where a certain function on
+ a certain pin controller may use different sets of pins in different
+ configurations.
+
+- PINS for a certain FUNCTION using a certain PIN GROUP on a certain
+ PIN CONTROLLER are provided on a first-come first-serve basis, so if some
+ other device mux setting or GPIO pin request has already taken your physical
+ pin, you will be denied the use of it. To get (activate) a new setting, the
+ old one has to be put (deactivated) first.
+
+Sometimes the documentation and hardware registers will be oriented around
+pads (or "fingers") rather than pins - these are the soldering surfaces on the
+silicon inside the package, and may or may not match the actual number of
+pins/balls underneath the capsule. Pick some enumeration that makes sense to
+you. Define enumerators only for the pins you can control if that makes sense.
+
+Assumptions:
+
+We assume that the number possible function maps to pin groups is limited by
+the hardware. I.e. we assume that there is no system where any function can be
+mapped to any pin, like in a phone exchange. So the available pins groups for
+a certain function will be limited to a few choices (say up to eight or so),
+not hundreds or any amount of choices. This is the characteristic we have found
+by inspecting available pinmux hardware, and a necessary assumption since we
+expect pinmux drivers to present *all* possible function vs pin group mappings
+to the subsystem.
+
+
+Pinmux drivers
+==============
+
+The pinmux core takes care of preventing conflicts on pins and calling
+the pin controller driver to execute different settings.
+
+It is the responsibility of the pinmux driver to impose further restrictions
+(say for example infer electronic limitations due to load etc) to determine
+whether or not the requested function can actually be allowed, and in case it
+is possible to perform the requested mux setting, poke the hardware so that
+this happens.
+
+Pinmux drivers are required to supply a few callback functions, some are
+optional. Usually the enable() and disable() functions are implemented,
+writing values into some certain registers to activate a certain mux setting
+for a certain pin.
+
+A simple driver for the above example will work by setting bits 0, 1, 2, 3 or 4
+into some register named MUX to select a certain function with a certain
+group of pins would work something like this:
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+struct foo_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned num_pins;
+};
+
+static const unsigned spi0_0_pins[] = { 0, 8, 16, 24 };
+static const unsigned spi0_1_pins[] = { 38, 46, 54, 62 };
+static const unsigned i2c0_pins[] = { 24, 25 };
+static const unsigned mmc0_1_pins[] = { 56, 57 };
+static const unsigned mmc0_2_pins[] = { 58, 59 };
+static const unsigned mmc0_3_pins[] = { 60, 61, 62, 63 };
+
+static const struct foo_group foo_groups[] = {
+ {
+ .name = "spi0_0_grp",
+ .pins = spi0_0_pins,
+ .num_pins = ARRAY_SIZE(spi0_0_pins),
+ },
+ {
+ .name = "spi0_1_grp",
+ .pins = spi0_1_pins,
+ .num_pins = ARRAY_SIZE(spi0_1_pins),
+ },
+ {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .num_pins = ARRAY_SIZE(i2c0_pins),
+ },
+ {
+ .name = "mmc0_1_grp",
+ .pins = mmc0_1_pins,
+ .num_pins = ARRAY_SIZE(mmc0_1_pins),
+ },
+ {
+ .name = "mmc0_2_grp",
+ .pins = mmc0_2_pins,
+ .num_pins = ARRAY_SIZE(mmc0_2_pins),
+ },
+ {
+ .name = "mmc0_3_grp",
+ .pins = mmc0_3_pins,
+ .num_pins = ARRAY_SIZE(mmc0_3_pins),
+ },
+};
+
+
+static int foo_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(foo_groups))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return foo_groups[selector].name;
+}
+
+static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned ** const pins,
+ unsigned * const num_pins)
+{
+ *pins = (unsigned *) foo_groups[selector].pins;
+ *num_pins = foo_groups[selector].num_pins;
+ return 0;
+}
+
+static struct pinctrl_ops foo_pctrl_ops = {
+ .list_groups = foo_list_groups,
+ .get_group_name = foo_get_group_name,
+ .get_group_pins = foo_get_group_pins,
+};
+
+struct foo_pmx_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+};
+
+static const char * const spi0_groups[] = { "spi0_1_grp" };
+static const char * const i2c0_groups[] = { "i2c0_grp" };
+static const char * const mmc0_groups[] = { "mmc0_1_grp", "mmc0_2_grp",
+ "mmc0_3_grp" };
+
+static const struct foo_pmx_func foo_functions[] = {
+ {
+ .name = "spi0",
+ .groups = spi0_groups,
+ .num_groups = ARRAY_SIZE(spi0_groups),
+ },
+ {
+ .name = "i2c0",
+ .groups = i2c0_groups,
+ .num_groups = ARRAY_SIZE(i2c0_groups),
+ },
+ {
+ .name = "mmc0",
+ .groups = mmc0_groups,
+ .num_groups = ARRAY_SIZE(mmc0_groups),
+ },
+};
+
+int foo_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(foo_functions))
+ return -EINVAL;
+ return 0;
+}
+
+const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ return myfuncs[selector].name;
+}
+
+static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = foo_functions[selector].groups;
+ *num_groups = foo_functions[selector].num_groups;
+ return 0;
+}
+
+int foo_enable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ u8 regbit = (1 << group);
+
+ writeb((readb(MUX)|regbit), MUX)
+ return 0;
+}
+
+int foo_disable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ u8 regbit = (1 << group);
+
+ writeb((readb(MUX) & ~(regbit)), MUX)
+ return 0;
+}
+
+struct pinmux_ops foo_pmxops = {
+ .list_functions = foo_list_funcs,
+ .get_function_name = foo_get_fname,
+ .get_function_groups = foo_get_groups,
+ .enable = foo_enable,
+ .disable = foo_disable,
+};
+
+/* Pinmux operations are handled by some pin controller */
+static struct pinctrl_desc foo_desc = {
+ ...
+ .pctlops = &foo_pctrl_ops,
+ .pmxops = &foo_pmxops,
+};
+
+In the example activating muxing 0 and 1 at the same time setting bits
+0 and 1, uses one pin in common so they would collide.
+
+The beauty of the pinmux subsystem is that since it keeps track of all
+pins and who is using them, it will already have denied an impossible
+request like that, so the driver does not need to worry about such
+things - when it gets a selector passed in, the pinmux subsystem makes
+sure no other device or GPIO assignment is already using the selected
+pins. Thus bits 0 and 1 in the control register will never be set at the
+same time.
+
+All the above functions are mandatory to implement for a pinmux driver.
+
+
+Pinmux interaction with the GPIO subsystem
+==========================================
+
+The function list could become long, especially if you can convert every
+individual pin into a GPIO pin independent of any other pins, and then try
+the approach to define every pin as a function.
+
+In this case, the function array would become 64 entries for each GPIO
+setting and then the device functions.
+
+For this reason there is an additional function a pinmux driver can implement
+to enable only GPIO on an individual pin: .gpio_request_enable(). The same
+.free() function as for other functions is assumed to be usable also for
+GPIO pins.
+
+This function will pass in the affected GPIO range identified by the pin
+controller core, so you know which GPIO pins are being affected by the request
+operation.
+
+Alternatively it is fully allowed to use named functions for each GPIO
+pin, the pinmux_request_gpio() will attempt to obtain the function "gpioN"
+where "N" is the global GPIO pin number if no special GPIO-handler is
+registered.
+
+
+Pinmux board/machine configuration
+==================================
+
+Boards and machines define how a certain complete running system is put
+together, including how GPIOs and devices are muxed, how regulators are
+constrained and how the clock tree looks. Of course pinmux settings are also
+part of this.
+
+A pinmux config for a machine looks pretty much like a simple regulator
+configuration, so for the example array above we want to enable i2c and
+spi on the second function mapping:
+
+#include <linux/pinctrl/machine.h>
+
+static struct pinmux_map pmx_mapping[] = {
+ {
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "spi0",
+ .dev_name = "foo-spi.0",
+ },
+ {
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "i2c0",
+ .dev_name = "foo-i2c.0",
+ },
+ {
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .dev_name = "foo-mmc.0",
+ },
+};
+
+The dev_name here matches to the unique device name that can be used to look
+up the device struct (just like with clockdev or regulators). The function name
+must match a function provided by the pinmux driver handling this pin range.
+
+As you can see we may have several pin controllers on the system and thus
+we need to specify which one of them that contain the functions we wish
+to map. The map can also use struct device * directly, so there is no
+inherent need to use strings to specify .dev_name or .ctrl_dev_name, these
+are for the situation where you do not have a handle to the struct device *,
+for example if they are not yet instantiated or cumbersome to obtain.
+
+You register this pinmux mapping to the pinmux subsystem by simply:
+
+ ret = pinmux_register_mappings(&pmx_mapping, ARRAY_SIZE(pmx_mapping));
+
+Since the above construct is pretty common there is a helper macro to make
+it even more compact which assumes you want to use pinctrl.0 and position
+0 for mapping, for example:
+
+static struct pinmux_map pmx_mapping[] = {
+ PINMUX_MAP_PRIMARY("I2CMAP", "i2c0", "foo-i2c.0"),
+};
+
+
+Complex mappings
+================
+
+As it is possible to map a function to different groups of pins an optional
+.group can be specified like this:
+
+...
+{
+ .name = "spi0-pos-A",
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "spi0",
+ .group = "spi0_0_grp",
+ .dev_name = "foo-spi.0",
+},
+{
+ .name = "spi0-pos-B",
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "spi0",
+ .group = "spi0_1_grp",
+ .dev_name = "foo-spi.0",
+},
+...
+
+This example mapping is used to switch between two positions for spi0 at
+runtime, as described further below under the heading "Runtime pinmuxing".
+
+Further it is possible to match several groups of pins to the same function
+for a single device, say for example in the mmc0 example above, where you can
+additively expand the mmc0 bus from 2 to 4 to 8 pins. If we want to use all
+three groups for a total of 2+2+4 = 8 pins (for an 8-bit MMC bus as is the
+case), we define a mapping like this:
+
+...
+{
+ .name "2bit"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .group = "mmc0_0_grp",
+ .dev_name = "foo-mmc.0",
+},
+{
+ .name "4bit"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .group = "mmc0_0_grp",
+ .dev_name = "foo-mmc.0",
+},
+{
+ .name "4bit"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .group = "mmc0_1_grp",
+ .dev_name = "foo-mmc.0",
+},
+{
+ .name "8bit"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .group = "mmc0_0_grp",
+ .dev_name = "foo-mmc.0",
+},
+{
+ .name "8bit"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .group = "mmc0_1_grp",
+ .dev_name = "foo-mmc.0",
+},
+{
+ .name "8bit"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "mmc0",
+ .group = "mmc0_2_grp",
+ .dev_name = "foo-mmc.0",
+},
+...
+
+The result of grabbing this mapping from the device with something like
+this (see next paragraph):
+
+ pmx = pinmux_get(&device, "8bit");
+
+Will be that you activate all the three bottom records in the mapping at
+once. Since they share the same name, pin controller device, funcion and
+device, and since we allow multiple groups to match to a single device, they
+all get selected, and they all get enabled and disable simultaneously by the
+pinmux core.
+
+
+Pinmux requests from drivers
+============================
+
+Generally it is discouraged to let individual drivers get and enable pinmuxes.
+So if possible, handle the pinmuxes in platform code or some other place where
+you have access to all the affected struct device * pointers. In some cases
+where a driver needs to switch between different mux mappings at runtime
+this is not possible.
+
+A driver may request a certain mux to be activated, usually just the default
+mux like this:
+
+#include <linux/pinctrl/pinmux.h>
+
+struct foo_state {
+ struct pinmux *pmx;
+ ...
+};
+
+foo_probe()
+{
+ /* Allocate a state holder named "state" etc */
+ struct pinmux pmx;
+
+ pmx = pinmux_get(&device, NULL);
+ if IS_ERR(pmx)
+ return PTR_ERR(pmx);
+ pinmux_enable(pmx);
+
+ state->pmx = pmx;
+}
+
+foo_remove()
+{
+ pinmux_disable(state->pmx);
+ pinmux_put(state->pmx);
+}
+
+If you want to grab a specific mux mapping and not just the first one found for
+this device you can specify a specific mapping name, for example in the above
+example the second i2c0 setting: pinmux_get(&device, "spi0-pos-B");
+
+This get/enable/disable/put sequence can just as well be handled by bus drivers
+if you don't want each and every driver to handle it and you know the
+arrangement on your bus.
+
+The semantics of the get/enable respective disable/put is as follows:
+
+- pinmux_get() is called in process context to reserve the pins affected with
+ a certain mapping and set up the pinmux core and the driver. It will allocate
+ a struct from the kernel memory to hold the pinmux state.
+
+- pinmux_enable()/pinmux_disable() is quick and can be called from fastpath
+ (irq context) when you quickly want to set up/tear down the hardware muxing
+ when running a device driver. Usually it will just poke some values into a
+ register.
+
+- pinmux_disable() is called in process context to tear down the pin requests
+ and release the state holder struct for the mux setting.
+
+Usually the pinmux core handled the get/put pair and call out to the device
+drivers bookkeeping operations, like checking available functions and the
+associated pins, whereas the enable/disable pass on to the pin controller
+driver which takes care of activating and/or deactivating the mux setting by
+quickly poking some registers.
+
+The pins are allocated for your device when you issue the pinmux_get() call,
+after this you should be able to see this in the debugfs listing of all pins.
+
+
+System pinmux hogging
+=====================
+
+A system pinmux map entry, i.e. a pinmux setting that does not have a device
+associated with it, can be hogged by the core when the pin controller is
+registered. This means that the core will attempt to call pinmux_get() and
+pinmux_enable() on it immediately after the pin control device has been
+registered.
+
+This is enabled by simply setting the .hog_on_boot field in the map to true,
+like this:
+
+{
+ .name "POWERMAP"
+ .ctrl_dev_name = "pinctrl.0",
+ .function = "power_func",
+ .hog_on_boot = true,
+},
+
+Since it may be common to request the core to hog a few always-applicable
+mux settings on the primary pin controller, there is a convenience macro for
+this:
+
+PINMUX_MAP_PRIMARY_SYS_HOG("POWERMAP", "power_func")
+
+This gives the exact same result as the above construction.
+
+
+Runtime pinmuxing
+=================
+
+It is possible to mux a certain function in and out at runtime, say to move
+an SPI port from one set of pins to another set of pins. Say for example for
+spi0 in the example above, we expose two different groups of pins for the same
+function, but with different named in the mapping as described under
+"Advanced mapping" above. So we have two mappings named "spi0-pos-A" and
+"spi0-pos-B".
+
+This snippet first muxes the function in the pins defined by group A, enables
+it, disables and releases it, and muxes it in on the pins defined by group B:
+
+foo_switch()
+{
+ struct pinmux pmx;
+
+ /* Enable on position A */
+ pmx = pinmux_get(&device, "spi0-pos-A");
+ if IS_ERR(pmx)
+ return PTR_ERR(pmx);
+ pinmux_enable(pmx);
+
+ /* This releases the pins again */
+ pinmux_disable(pmx);
+ pinmux_put(pmx);
+
+ /* Enable on position B */
+ pmx = pinmux_get(&device, "spi0-pos-B");
+ if IS_ERR(pmx)
+ return PTR_ERR(pmx);
+ pinmux_enable(pmx);
+ ...
+}
+
+The above has to be done from process context.
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index 45e9d4a9128..a4d682f5423 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -26,6 +26,8 @@ s2ram.txt
- How to get suspend to ram working (and debug it when it isn't)
states.txt
- System power management states
+suspend-and-cpuhotplug.txt
+ - Explains the interaction between Suspend-to-RAM (S3) and CPU hotplug
swsusp-and-swap-files.txt
- Using swap files with software suspend (to disk)
swsusp-dmcrypt.txt
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
index ddd78172ef7..40a4c65f380 100644
--- a/Documentation/power/basic-pm-debugging.txt
+++ b/Documentation/power/basic-pm-debugging.txt
@@ -173,7 +173,7 @@ kernel messages using the serial console. This may provide you with some
information about the reasons of the suspend (resume) failure. Alternatively,
it may be possible to use a FireWire port for debugging with firescope
(ftp://ftp.firstfloor.org/pub/ak/firescope/). On x86 it is also possible to
-use the PM_TRACE mechanism documented in Documentation/s2ram.txt .
+use the PM_TRACE mechanism documented in Documentation/power/s2ram.txt .
2. Testing suspend to RAM (STR)
@@ -201,3 +201,27 @@ case, you may be able to search for failing drivers by following the procedure
analogous to the one described in section 1. If you find some failing drivers,
you will have to unload them every time before an STR transition (ie. before
you run s2ram), and please report the problems with them.
+
+There is a debugfs entry which shows the suspend to RAM statistics. Here is an
+example of its output.
+ # mount -t debugfs none /sys/kernel/debug
+ # cat /sys/kernel/debug/suspend_stats
+ success: 20
+ fail: 5
+ failed_freeze: 0
+ failed_prepare: 0
+ failed_suspend: 5
+ failed_suspend_noirq: 0
+ failed_resume: 0
+ failed_resume_noirq: 0
+ failures:
+ last_failed_dev: alarm
+ adc
+ last_failed_errno: -16
+ -16
+ last_failed_step: suspend
+ suspend
+Field success means the success number of suspend to RAM, and field fail means
+the failure number. Others are the failure number of different steps of suspend
+to RAM. suspend_stats just lists the last 2 failed devices, error number and
+failed step of suspend.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 3384d5996be..646a89e0c07 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -152,7 +152,9 @@ try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
for the most part drivers should not change its value. The initial value of
should_wakeup is supposed to be false for the majority of devices; the major
exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
-(wake-on-LAN) feature has been set up with ethtool.
+(wake-on-LAN) feature has been set up with ethtool. It should also default
+to true for devices that don't generate wakeup requests on their own but merely
+forward wakeup requests from one bus to another (like PCI bridges).
Whether or not a device is capable of issuing wakeup events is a hardware
matter, and the kernel is responsible for keeping track of it. By contrast,
@@ -279,10 +281,6 @@ When the system goes into the standby or memory sleep state, the phases are:
time.) Unlike the other suspend-related phases, during the prepare
phase the device tree is traversed top-down.
- In addition to that, if device drivers need to allocate additional
- memory to be able to hadle device suspend correctly, that should be
- done in the prepare phase.
-
After the prepare callback method returns, no new children may be
registered below the device. The method may also prepare the device or
driver in some way for the upcoming system power transition (for
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
index bfed898a03f..17e130a8034 100644
--- a/Documentation/power/pm_qos_interface.txt
+++ b/Documentation/power/pm_qos_interface.txt
@@ -4,14 +4,19 @@ This interface provides a kernel and user mode interface for registering
performance expectations by drivers, subsystems and user space applications on
one of the parameters.
-Currently we have {cpu_dma_latency, network_latency, network_throughput} as the
-initial set of pm_qos parameters.
+Two different PM QoS frameworks are available:
+1. PM QoS classes for cpu_dma_latency, network_latency, network_throughput.
+2. the per-device PM QoS framework provides the API to manage the per-device latency
+constraints.
Each parameters have defined units:
* latency: usec
* timeout: usec
* throughput: kbs (kilo bit / sec)
+
+1. PM QoS framework
+
The infrastructure exposes multiple misc device nodes one per implemented
parameter. The set of parameters implement is defined by pm_qos_power_init()
and pm_qos_params.h. This is done because having the available parameters
@@ -23,14 +28,18 @@ an aggregated target value. The aggregated target value is updated with
changes to the request list or elements of the list. Typically the
aggregated target value is simply the max or min of the request values held
in the parameter list elements.
+Note: the aggregated target value is implemented as an atomic variable so that
+reading the aggregated value does not require any locking mechanism.
+
From kernel mode the use of this interface is simple:
-handle = pm_qos_add_request(param_class, target_value):
-Will insert an element into the list for that identified PM_QOS class with the
+void pm_qos_add_request(handle, param_class, target_value):
+Will insert an element into the list for that identified PM QoS class with the
target value. Upon change to this list the new target is recomputed and any
registered notifiers are called only if the target value is now different.
-Clients of pm_qos need to save the returned handle.
+Clients of pm_qos need to save the returned handle for future use in other
+pm_qos API functions.
void pm_qos_update_request(handle, new_target_value):
Will update the list element pointed to by the handle with the new target value
@@ -42,6 +51,20 @@ Will remove the element. After removal it will update the aggregate target and
call the notification tree if the target was changed as a result of removing
the request.
+int pm_qos_request(param_class):
+Returns the aggregated value for a given PM QoS class.
+
+int pm_qos_request_active(handle):
+Returns if the request is still active, i.e. it has not been removed from a
+PM QoS class constraints list.
+
+int pm_qos_add_notifier(param_class, notifier):
+Adds a notification callback function to the PM QoS class. The callback is
+called when the aggregated value for the PM QoS class is changed.
+
+int pm_qos_remove_notifier(int param_class, notifier):
+Removes the notification callback function for the PM QoS class.
+
From user mode:
Only processes can register a pm_qos request. To provide for automatic
@@ -63,4 +86,63 @@ To remove the user mode request for a target value simply close the device
node.
+2. PM QoS per-device latency framework
+
+For each device a list of performance requests is maintained along with
+an aggregated target value. The aggregated target value is updated with
+changes to the request list or elements of the list. Typically the
+aggregated target value is simply the max or min of the request values held
+in the parameter list elements.
+Note: the aggregated target value is implemented as an atomic variable so that
+reading the aggregated value does not require any locking mechanism.
+
+
+From kernel mode the use of this interface is the following:
+
+int dev_pm_qos_add_request(device, handle, value):
+Will insert an element into the list for that identified device with the
+target value. Upon change to this list the new target is recomputed and any
+registered notifiers are called only if the target value is now different.
+Clients of dev_pm_qos need to save the handle for future use in other
+dev_pm_qos API functions.
+
+int dev_pm_qos_update_request(handle, new_value):
+Will update the list element pointed to by the handle with the new target value
+and recompute the new aggregated target, calling the notification trees if the
+target is changed.
+
+int dev_pm_qos_remove_request(handle):
+Will remove the element. After removal it will update the aggregate target and
+call the notification trees if the target was changed as a result of removing
+the request.
+
+s32 dev_pm_qos_read_value(device):
+Returns the aggregated value for a given device's constraints list.
+
+
+Notification mechanisms:
+The per-device PM QoS framework has 2 different and distinct notification trees:
+a per-device notification tree and a global notification tree.
+
+int dev_pm_qos_add_notifier(device, notifier):
+Adds a notification callback function for the device.
+The callback is called when the aggregated value of the device constraints list
+is changed.
+
+int dev_pm_qos_remove_notifier(device, notifier):
+Removes the notification callback function for the device.
+
+int dev_pm_qos_add_global_notifier(notifier):
+Adds a notification callback function in the global notification tree of the
+framework.
+The callback is called when the aggregated value for any device is changed.
+
+int dev_pm_qos_remove_global_notifier(notifier):
+Removes the notification callback function from the global notification tree
+of the framework.
+
+
+From user mode:
+No API for user space access to the per-device latency constraints is provided
+yet - still under discussion.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 6066e3a6b9a..0e856088db7 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -43,13 +43,18 @@ struct dev_pm_ops {
...
};
-The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are
-executed by the PM core for either the device type, or the class (if the device
-type's struct dev_pm_ops object does not exist), or the bus type (if the
-device type's and class' struct dev_pm_ops objects do not exist) of the given
-device (this allows device types to override callbacks provided by bus types or
-classes if necessary). The bus type, device type and class callbacks are
-referred to as subsystem-level callbacks in what follows.
+The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
+are executed by the PM core for either the power domain, or the device type
+(if the device power domain's struct dev_pm_ops does not exist), or the class
+(if the device power domain's and type's struct dev_pm_ops object does not
+exist), or the bus type (if the device power domain's, type's and class'
+struct dev_pm_ops objects do not exist) of the given device, so the priority
+order of callbacks from high to low is that power domain callbacks, device
+type callbacks, class callbacks and bus type callbacks, and the high priority
+one will take precedence over low priority one. The bus type, device type and
+class callbacks are referred to as subsystem-level callbacks in what follows,
+and generally speaking, the power domain callbacks are used for representing
+power domains within a SoC.
By default, the callbacks are always invoked in process context with interrupts
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
@@ -477,12 +482,14 @@ pm_runtime_autosuspend_expiration()
If pm_runtime_irq_safe() has been called for a device then the following helper
functions may also be used in interrupt context:
+pm_runtime_idle()
pm_runtime_suspend()
pm_runtime_autosuspend()
pm_runtime_resume()
pm_runtime_get_sync()
pm_runtime_put_sync()
pm_runtime_put_sync_suspend()
+pm_runtime_put_sync_autosuspend()
5. Runtime PM Initialization, Device Probing and Removal
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
new file mode 100644
index 00000000000..f28f9a6f034
--- /dev/null
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -0,0 +1,275 @@
+Interaction of Suspend code (S3) with the CPU hotplug infrastructure
+
+ (C) 2011 Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+
+
+I. How does the regular CPU hotplug code differ from how the Suspend-to-RAM
+ infrastructure uses it internally? And where do they share common code?
+
+Well, a picture is worth a thousand words... So ASCII art follows :-)
+
+[This depicts the current design in the kernel, and focusses only on the
+interactions involving the freezer and CPU hotplug and also tries to explain
+the locking involved. It outlines the notifications involved as well.
+But please note that here, only the call paths are illustrated, with the aim
+of describing where they take different paths and where they share code.
+What happens when regular CPU hotplug and Suspend-to-RAM race with each other
+is not depicted here.]
+
+On a high level, the suspend-resume cycle goes like this:
+
+|Freeze| -> |Disable nonboot| -> |Do suspend| -> |Enable nonboot| -> |Thaw |
+|tasks | | cpus | | | | cpus | |tasks|
+
+
+More details follow:
+
+ Suspend call path
+ -----------------
+
+ Write 'mem' to
+ /sys/power/state
+ syfs file
+ |
+ v
+ Acquire pm_mutex lock
+ |
+ v
+ Send PM_SUSPEND_PREPARE
+ notifications
+ |
+ v
+ Freeze tasks
+ |
+ |
+ v
+ disable_nonboot_cpus()
+ /* start */
+ |
+ v
+ Acquire cpu_add_remove_lock
+ |
+ v
+ Iterate over CURRENTLY
+ online CPUs
+ |
+ |
+ | ----------
+ v | L
+ ======> _cpu_down() |
+ | [This takes cpuhotplug.lock |
+ Common | before taking down the CPU |
+ code | and releases it when done] | O
+ | While it is at it, notifications |
+ | are sent when notable events occur, |
+ ======> by running all registered callbacks. |
+ | | O
+ | |
+ | |
+ v |
+ Note down these cpus in | P
+ frozen_cpus mask ----------
+ |
+ v
+ Disable regular cpu hotplug
+ by setting cpu_hotplug_disabled=1
+ |
+ v
+ Release cpu_add_remove_lock
+ |
+ v
+ /* disable_nonboot_cpus() complete */
+ |
+ v
+ Do suspend
+
+
+
+Resuming back is likewise, with the counterparts being (in the order of
+execution during resume):
+* enable_nonboot_cpus() which involves:
+ | Acquire cpu_add_remove_lock
+ | Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug
+ | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
+ | Release cpu_add_remove_lock
+ v
+
+* thaw tasks
+* send PM_POST_SUSPEND notifications
+* Release pm_mutex lock.
+
+
+It is to be noted here that the pm_mutex lock is acquired at the very
+beginning, when we are just starting out to suspend, and then released only
+after the entire cycle is complete (i.e., suspend + resume).
+
+
+
+ Regular CPU hotplug call path
+ -----------------------------
+
+ Write 0 (or 1) to
+ /sys/devices/system/cpu/cpu*/online
+ sysfs file
+ |
+ |
+ v
+ cpu_down()
+ |
+ v
+ Acquire cpu_add_remove_lock
+ |
+ v
+ If cpu_hotplug_disabled is 1
+ return gracefully
+ |
+ |
+ v
+ ======> _cpu_down()
+ | [This takes cpuhotplug.lock
+ Common | before taking down the CPU
+ code | and releases it when done]
+ | While it is at it, notifications
+ | are sent when notable events occur,
+ ======> by running all registered callbacks.
+ |
+ |
+ v
+ Release cpu_add_remove_lock
+ [That's it!, for
+ regular CPU hotplug]
+
+
+
+So, as can be seen from the two diagrams (the parts marked as "Common code"),
+regular CPU hotplug and the suspend code path converge at the _cpu_down() and
+_cpu_up() functions. They differ in the arguments passed to these functions,
+in that during regular CPU hotplug, 0 is passed for the 'tasks_frozen'
+argument. But during suspend, since the tasks are already frozen by the time
+the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called
+with the 'tasks_frozen' argument set to 1.
+[See below for some known issues regarding this.]
+
+
+Important files and functions/entry points:
+------------------------------------------
+
+kernel/power/process.c : freeze_processes(), thaw_processes()
+kernel/power/suspend.c : suspend_prepare(), suspend_enter(), suspend_finish()
+kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](), [disable|enable]_nonboot_cpus()
+
+
+
+II. What are the issues involved in CPU hotplug?
+ -------------------------------------------
+
+There are some interesting situations involving CPU hotplug and microcode
+update on the CPUs, as discussed below:
+
+[Please bear in mind that the kernel requests the microcode images from
+userspace, using the request_firmware() function defined in
+drivers/base/firmware_class.c]
+
+
+a. When all the CPUs are identical:
+
+ This is the most common situation and it is quite straightforward: we want
+ to apply the same microcode revision to each of the CPUs.
+ To give an example of x86, the collect_cpu_info() function defined in
+ arch/x86/kernel/microcode_core.c helps in discovering the type of the CPU
+ and thereby in applying the correct microcode revision to it.
+ But note that the kernel does not maintain a common microcode image for the
+ all CPUs, in order to handle case 'b' described below.
+
+
+b. When some of the CPUs are different than the rest:
+
+ In this case since we probably need to apply different microcode revisions
+ to different CPUs, the kernel maintains a copy of the correct microcode
+ image for each CPU (after appropriate CPU type/model discovery using
+ functions such as collect_cpu_info()).
+
+
+c. When a CPU is physically hot-unplugged and a new (and possibly different
+ type of) CPU is hot-plugged into the system:
+
+ In the current design of the kernel, whenever a CPU is taken offline during
+ a regular CPU hotplug operation, upon receiving the CPU_DEAD notification
+ (which is sent by the CPU hotplug code), the microcode update driver's
+ callback for that event reacts by freeing the kernel's copy of the
+ microcode image for that CPU.
+
+ Hence, when a new CPU is brought online, since the kernel finds that it
+ doesn't have the microcode image, it does the CPU type/model discovery
+ afresh and then requests the userspace for the appropriate microcode image
+ for that CPU, which is subsequently applied.
+
+ For example, in x86, the mc_cpu_callback() function (which is the microcode
+ update driver's callback registered for CPU hotplug events) calls
+ microcode_update_cpu() which would call microcode_init_cpu() in this case,
+ instead of microcode_resume_cpu() when it finds that the kernel doesn't
+ have a valid microcode image. This ensures that the CPU type/model
+ discovery is performed and the right microcode is applied to the CPU after
+ getting it from userspace.
+
+
+d. Handling microcode update during suspend/hibernate:
+
+ Strictly speaking, during a CPU hotplug operation which does not involve
+ physically removing or inserting CPUs, the CPUs are not actually powered
+ off during a CPU offline. They are just put to the lowest C-states possible.
+ Hence, in such a case, it is not really necessary to re-apply microcode
+ when the CPUs are brought back online, since they wouldn't have lost the
+ image during the CPU offline operation.
+
+ This is the usual scenario encountered during a resume after a suspend.
+ However, in the case of hibernation, since all the CPUs are completely
+ powered off, during restore it becomes necessary to apply the microcode
+ images to all the CPUs.
+
+ [Note that we don't expect someone to physically pull out nodes and insert
+ nodes with a different type of CPUs in-between a suspend-resume or a
+ hibernate/restore cycle.]
+
+ In the current design of the kernel however, during a CPU offline operation
+ as part of the suspend/hibernate cycle (the CPU_DEAD_FROZEN notification),
+ the existing copy of microcode image in the kernel is not freed up.
+ And during the CPU online operations (during resume/restore), since the
+ kernel finds that it already has copies of the microcode images for all the
+ CPUs, it just applies them to the CPUs, avoiding any re-discovery of CPU
+ type/model and the need for validating whether the microcode revisions are
+ right for the CPUs or not (due to the above assumption that physical CPU
+ hotplug will not be done in-between suspend/resume or hibernate/restore
+ cycles).
+
+
+III. Are there any known problems when regular CPU hotplug and suspend race
+ with each other?
+
+Yes, they are listed below:
+
+1. When invoking regular CPU hotplug, the 'tasks_frozen' argument passed to
+ the _cpu_down() and _cpu_up() functions is *always* 0.
+ This might not reflect the true current state of the system, since the
+ tasks could have been frozen by an out-of-band event such as a suspend
+ operation in progress. Hence, it will lead to wrong notifications being
+ sent during the cpu online/offline events (eg, CPU_ONLINE notification
+ instead of CPU_ONLINE_FROZEN) which in turn will lead to execution of
+ inappropriate code by the callbacks registered for such CPU hotplug events.
+
+2. If a regular CPU hotplug stress test happens to race with the freezer due
+ to a suspend operation in progress at the same time, then we could hit the
+ situation described below:
+
+ * A regular cpu online operation continues its journey from userspace
+ into the kernel, since the freezing has not yet begun.
+ * Then freezer gets to work and freezes userspace.
+ * If cpu online has not yet completed the microcode update stuff by now,
+ it will now start waiting on the frozen userspace in the
+ TASK_UNINTERRUPTIBLE state, in order to get the microcode image.
+ * Now the freezer continues and tries to freeze the remaining tasks. But
+ due to this wait mentioned above, the freezer won't be able to freeze
+ the cpu online hotplug task and hence freezing of tasks fails.
+
+ As a result of this task freezing failure, the suspend operation gets
+ aborted.
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index 1101bee4e82..0e870825c1b 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -77,7 +77,8 @@ SNAPSHOT_SET_SWAP_AREA - set the resume partition and the offset (in <PAGE_SIZE>
resume_swap_area, as defined in kernel/power/suspend_ioctls.h,
containing the resume device specification and the offset); for swap
partitions the offset is always 0, but it is different from zero for
- swap files (see Documentation/swsusp-and-swap-files.txt for details).
+ swap files (see Documentation/power/swsusp-and-swap-files.txt for
+ details).
SNAPSHOT_PLATFORM_SUPPORT - enable/disable the hibernation platform support,
depending on the argument value (enable, if the argument is nonzero)
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 83668e5dd17..03c9d9299c6 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -117,5 +117,4 @@ The contents of these variables corresponds to the "name", "state" and
"type" sysfs files explained above.
-For further details consult Documentation/ABI/stable/dev-rfkill and
-Documentation/ABI/stable/sysfs-class-rfkill.
+For further details consult Documentation/ABI/stable/sysfs-class-rfkill.
diff --git a/Documentation/scsi/aic7xxx_old.txt b/Documentation/scsi/aic7xxx_old.txt
index 7bd210ab45a..ecfc474f36a 100644
--- a/Documentation/scsi/aic7xxx_old.txt
+++ b/Documentation/scsi/aic7xxx_old.txt
@@ -444,7 +444,7 @@ linux-1.1.x and fairly stable since linux-1.2.x, and are also in FreeBSD
Kernel Compile options
------------------------------
The various kernel compile time options for this driver are now fairly
- well documented in the file Documentation/Configure.help. In order to
+ well documented in the file drivers/scsi/Kconfig. In order to
see this documentation, you need to use one of the advanced configuration
programs (menuconfig and xconfig). If you are using the "make menuconfig"
method of configuring your kernel, then you would simply highlight the
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 5f17d29c59b..a340b18cd4e 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -55,11 +55,6 @@ or in the same directory as the C source code. For example to find a url
about the USB mass storage driver see the
/usr/src/linux/drivers/usb/storage directory.
-The Linux kernel source Documentation/DocBook/scsidrivers.tmpl file
-refers to this file. With the appropriate DocBook tool-set, this permits
-users to generate html, ps and pdf renderings of information within this
-file (e.g. the interface functions).
-
Driver structure
================
Traditionally an LLD for the SCSI subsystem has been at least two files in
diff --git a/Documentation/security/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt
index 5f50ccabfc8..c9e4855ed3d 100644
--- a/Documentation/security/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys-trusted-encrypted.txt
@@ -156,4 +156,5 @@ Load an encrypted key "evm" from saved blob:
Other uses for trusted and encrypted keys, such as for disk and file encryption
are anticipated. In particular the new format 'ecryptfs' has been defined in
in order to use encrypted keys to mount an eCryptfs filesystem. More details
-about the usage can be found in the file 'Documentation/keys-ecryptfs.txt'.
+about the usage can be found in the file
+'Documentation/security/keys-ecryptfs.txt'.
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt
index a4932387bbf..079cb3df62c 100644
--- a/Documentation/serial/serial-rs485.txt
+++ b/Documentation/serial/serial-rs485.txt
@@ -28,6 +28,10 @@
RS485 communications. This data structure is used to set and configure RS485
parameters in the platform data and in ioctls.
+ The device tree can also provide RS485 boot time parameters (see [2]
+ for bindings). The driver is in charge of filling this data structure from
+ the values given by the device tree.
+
Any driver for devices capable of working both as RS232 and RS485 should
provide at least the following ioctls:
@@ -104,6 +108,9 @@
rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
rs485conf.delay_rts_after_send = ...;
+ /* Set this flag if you want to receive data even whilst sending data */
+ rs485conf.flags |= SER_RS485_RX_DURING_TX;
+
if (ioctl (fd, TIOCSRS485, &rs485conf) < 0) {
/* Error handling. See errno. */
}
@@ -118,3 +125,4 @@
5. REFERENCES
[1] include/linux/serial.h
+ [2] Documentation/devicetree/bindings/serial/rs485.txt
diff --git a/Documentation/sound/oss/PAS16 b/Documentation/sound/oss/PAS16
index 951b3dce51b..3dca4b75988 100644
--- a/Documentation/sound/oss/PAS16
+++ b/Documentation/sound/oss/PAS16
@@ -60,8 +60,7 @@ With PAS16 you can use two audio device files at the same time. /dev/dsp (and
The new stuff for 2.3.99 and later
============================================================================
-The following configuration options from Documentation/Configure.help
-are relevant to configuring the PAS16:
+The following configuration options are relevant to configuring the PAS16:
Sound card support
CONFIG_SOUND
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index 00511e08db7..3352f97430e 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -2,7 +2,7 @@ PXA2xx SPI on SSP driver HOWTO
===================================================
This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
synchronous serial port into a SPI master controller
-(see Documentation/spi/spi_summary). The driver has the following features
+(see Documentation/spi/spi-summary). The driver has the following features
- Support for any PXA2xx SSP
- SSP PIO and SSP DMA data transfers.
@@ -85,7 +85,7 @@ Declaring Slave Devices
-----------------------
Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
using the "spi_board_info" structure found in "linux/spi/spi.h". See
-"Documentation/spi/spi_summary" for additional information.
+"Documentation/spi/spi-summary" for additional information.
Each slave device attached to the PXA must provide slave specific configuration
information via the structure "pxa2xx_spi_chip" found in
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index e213f45cf9d..21fd05c28e7 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -24,10 +24,10 @@ Rules on what kind of patches are accepted, and which ones are not, into the
Procedure for submitting patches to the -stable tree:
- Send the patch, after verifying that it follows the above rules, to
- stable@kernel.org. You must note the upstream commit ID in the changelog
- of your submission.
+ stable@vger.kernel.org. You must note the upstream commit ID in the
+ changelog of your submission.
- To have the patch automatically included in the stable tree, add the tag
- Cc: stable@kernel.org
+ Cc: stable@vger.kernel.org
in the sign-off area. Once the patch is merged it will be applied to
the stable tree without anything else needing to be done by the author
or subsystem maintainer.
@@ -35,10 +35,10 @@ Procedure for submitting patches to the -stable tree:
cherry-picked than this can be specified in the following format in
the sign-off area:
- Cc: <stable@kernel.org> # .32.x: a1f84a3: sched: Check for idle
- Cc: <stable@kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
- Cc: <stable@kernel.org> # .32.x: fd21073: sched: Fix affinity logic
- Cc: <stable@kernel.org> # .32.x
+ Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+ Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+ Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+ Cc: <stable@vger.kernel.org> # .32.x
Signed-off-by: Ingo Molnar <mingo@elte.hu>
The tag sequence has the meaning of:
diff --git a/Documentation/timers/highres.txt b/Documentation/timers/highres.txt
index 21332233cef..e8789976e77 100644
--- a/Documentation/timers/highres.txt
+++ b/Documentation/timers/highres.txt
@@ -30,7 +30,7 @@ hrtimer base infrastructure
---------------------------
The hrtimer base infrastructure was merged into the 2.6.16 kernel. Details of
-the base implementation are covered in Documentation/hrtimers/hrtimer.txt. See
+the base implementation are covered in Documentation/timers/hrtimers.txt. See
also figure #2 (OLS slides p. 15)
The main differences to the timer wheel, which holds the armed timer_list type
diff --git a/Documentation/usb/dma.txt b/Documentation/usb/dma.txt
index 84ef865237d..444651e70d9 100644
--- a/Documentation/usb/dma.txt
+++ b/Documentation/usb/dma.txt
@@ -7,7 +7,7 @@ API OVERVIEW
The big picture is that USB drivers can continue to ignore most DMA issues,
though they still must provide DMA-ready buffers (see
-Documentation/PCI/PCI-DMA-mapping.txt). That's how they've worked through
+Documentation/DMA-API-HOWTO.txt). That's how they've worked through
the 2.4 (and earlier) kernels.
OR: they can now be DMA-aware.
@@ -57,7 +57,7 @@ and effects like cache-trashing can impose subtle penalties.
force a consistent memory access ordering by using memory barriers. It's
not using a streaming DMA mapping, so it's good for small transfers on
systems where the I/O would otherwise thrash an IOMMU mapping. (See
- Documentation/PCI/PCI-DMA-mapping.txt for definitions of "coherent" and
+ Documentation/DMA-API-HOWTO.txt for definitions of "coherent" and
"streaming" DMA mappings.)
Asking for 1/Nth of a page (as well as asking for N pages) is reasonably
@@ -88,7 +88,7 @@ WORKING WITH EXISTING BUFFERS
Existing buffers aren't usable for DMA without first being mapped into the
DMA address space of the device. However, most buffers passed to your
driver can safely be used with such DMA mapping. (See the first section
-of Documentation/PCI/PCI-DMA-mapping.txt, titled "What memory is DMA-able?")
+of Documentation/DMA-API-HOWTO.txt, titled "What memory is DMA-able?")
- When you're using scatterlists, you can map everything at once. On some
systems, this kicks in an IOMMU and turns the scatterlists into single
diff --git a/Documentation/usb/dwc3.txt b/Documentation/usb/dwc3.txt
new file mode 100644
index 00000000000..7b590edae14
--- /dev/null
+++ b/Documentation/usb/dwc3.txt
@@ -0,0 +1,45 @@
+
+ TODO
+~~~~~~
+Please pick something while reading :)
+
+- Convert interrupt handler to per-ep-thread-irq
+
+ As it turns out some DWC3-commands ~1ms to complete. Currently we spin
+ until the command completes which is bad.
+
+ Implementation idea:
+ - dwc core implements a demultiplexing irq chip for interrupts per
+ endpoint. The interrupt numbers are allocated during probe and belong
+ to the device. If MSI provides per-endpoint interrupt this dummy
+ interrupt chip can be replaced with "real" interrupts.
+ - interrupts are requested / allocated on usb_ep_enable() and removed on
+ usb_ep_disable(). Worst case are 32 interrupts, the lower limit is two
+ for ep0/1.
+ - dwc3_send_gadget_ep_cmd() will sleep in wait_for_completion_timeout()
+ until the command completes.
+ - the interrupt handler is split into the following pieces:
+ - primary handler of the device
+ goes through every event and calls generic_handle_irq() for event
+ it. On return from generic_handle_irq() in acknowledges the event
+ counter so interrupt goes away (eventually).
+
+ - threaded handler of the device
+ none
+
+ - primary handler of the EP-interrupt
+ reads the event and tries to process it. Everything that requries
+ sleeping is handed over to the Thread. The event is saved in an
+ per-endpoint data-structure.
+ We probably have to pay attention not to process events once we
+ handed something to thread so we don't process event X prio Y
+ where X > Y.
+
+ - threaded handler of the EP-interrupt
+ handles the remaining EP work which might sleep such as waiting
+ for command completion.
+
+ Latency:
+ There should be no increase in latency since the interrupt-thread has a
+ high priority and will be run before an average task in user land
+ (except the user changed priorities).
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index c9ffa9ced7e..12511c98cc4 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -439,10 +439,10 @@ cause autosuspends to fail with -EBUSY if the driver needs to use the
device.
External suspend calls should never be allowed to fail in this way,
-only autosuspend calls. The driver can tell them apart by checking
-the PM_EVENT_AUTO bit in the message.event argument to the suspend
-method; this bit will be set for internal PM events (autosuspend) and
-clear for external PM events.
+only autosuspend calls. The driver can tell them apart by applying
+the PMSG_IS_AUTO() macro to the message argument to the suspend
+method; it will return True for internal PM events (autosuspend) and
+False for external PM events.
Mutual exclusion
@@ -487,3 +487,29 @@ succeed, it may still remain active and thus cause the system to
resume as soon as the system suspend is complete. Or the remote
wakeup may fail and get lost. Which outcome occurs depends on timing
and on the hardware and firmware design.
+
+
+ xHCI hardware link PM
+ ---------------------
+
+xHCI host controller provides hardware link power management to usb2.0
+(xHCI 1.0 feature) and usb3.0 devices which support link PM. By
+enabling hardware LPM, the host can automatically put the device into
+lower power state(L1 for usb2.0 devices, or U1/U2 for usb3.0 devices),
+which state device can enter and resume very quickly.
+
+The user interface for controlling USB2 hardware LPM is located in the
+power/ subdirectory of each USB device's sysfs directory, that is, in
+/sys/bus/usb/devices/.../power/ where "..." is the device's ID. The
+relevant attribute files is usb2_hardware_lpm.
+
+ power/usb2_hardware_lpm
+
+ When a USB2 device which support LPM is plugged to a
+ xHCI host root hub which support software LPM, the
+ host will run a software LPM test for it; if the device
+ enters L1 state and resume successfully and the host
+ supports USB2 hardware LPM, this file will show up and
+ driver will enable hardware LPM for the device. You
+ can write y/Y/1 or n/N/0 to the file to enable/disable
+ USB2 hardware LPM manually. This is for test purpose mainly.
diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
index d928c134dee..c095d79cae7 100644
--- a/Documentation/virtual/lguest/lguest.c
+++ b/Documentation/virtual/lguest/lguest.c
@@ -436,7 +436,7 @@ static unsigned long load_bzimage(int fd)
/*
* Go back to the start of the file and read the header. It should be
- * a Linux boot header (see Documentation/x86/i386/boot.txt)
+ * a Linux boot header (see Documentation/x86/boot.txt)
*/
lseek(fd, 0, SEEK_SET);
read(fd, &boot, sizeof(boot));
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index a200a386429..ade01274212 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -109,11 +109,11 @@ to improve NUMA locality using various CPU affinity command line interfaces,
such as taskset(1) and numactl(1), and program interfaces such as
sched_setaffinity(2). Further, one can modify the kernel's default local
allocation behavior using Linux NUMA memory policy.
-[see Documentation/vm/numa_memory_policy.]
+[see Documentation/vm/numa_memory_policy.txt.]
System administrators can restrict the CPUs and nodes' memories that a non-
privileged user can specify in the scheduling or NUMA commands and functions
-using control groups and CPUsets. [see Documentation/cgroups/CPUsets.txt]
+using control groups and CPUsets. [see Documentation/cgroups/cpusets.txt]
On architectures that do not hide memoryless nodes, Linux will include only
zones [nodes] with memory in the zonelists. This means that for a memoryless
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 07375e73981..f464f47bc60 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
slabs that have data in them. See "slabinfo -h" for more options when
running the command. slabinfo can be compiled with
-gcc -o slabinfo Documentation/vm/slabinfo.c
+gcc -o slabinfo tools/slub/slabinfo.c
Some of the modes of operation of slabinfo require that slub debugging
be enabled on the command line. F.e. no tracking information will be
diff --git a/Documentation/zh_CN/SubmitChecklist b/Documentation/zh_CN/SubmitChecklist
deleted file mode 100644
index 4c741d6bc04..00000000000
--- a/Documentation/zh_CN/SubmitChecklist
+++ /dev/null
@@ -1,109 +0,0 @@
-Chinese translated version of Documentation/SubmitChecklist
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Chinese maintainer: Harry Wei <harryxiyou@gmail.com>
----------------------------------------------------------------------
-Documentation/SubmitChecklist µÄÖÐÎÄ·­Òë
-
-Èç¹ûÏëÆÀÂÛ»ò¸üб¾ÎĵÄÄÚÈÝ£¬ÇëÖ±½ÓÁªÏµÔ­ÎĵµµÄά»¤Õß¡£Èç¹ûÄãʹÓÃÓ¢ÎÄ
-½»Á÷ÓÐÀ§ÄѵĻ°£¬Ò²¿ÉÒÔÏòÖÐÎÄ°æά»¤ÕßÇóÖú¡£Èç¹û±¾·­Òë¸üв»¼°Ê±»òÕß·­
-Òë´æÔÚÎÊÌ⣬ÇëÁªÏµÖÐÎÄ°æά»¤Õß¡£
-
-ÖÐÎÄ°æά»¤Õߣº ¼ÖÍþÍþ Harry Wei <harryxiyou@gmail.com>
-ÖÐÎÄ°æ·­ÒëÕߣº ¼ÖÍþÍþ Harry Wei <harryxiyou@gmail.com>
-ÖÐÎÄ°æУÒëÕߣº ¼ÖÍþÍþ Harry Wei <harryxiyou@gmail.com>
-
-
-ÒÔÏÂΪÕýÎÄ
----------------------------------------------------------------------
-LinuxÄÚºËÌá½»Çåµ¥
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-ÕâÀïÓÐһЩÄں˿ª·¢ÕßÓ¦¸Ã×öµÄ»ù±¾ÊÂÇ飬Èç¹ûËûÃÇÏë¿´µ½×Ô¼ºµÄÄں˲¹¶¡Ìá½»
-±»½ÓÊܵĸü¿ì¡£
-
-ÕâЩ¶¼Êdz¬³öDocumentation/SubmittingPatchesÎĵµÀïËùÌṩµÄÒÔ¼°ÆäËû
-¹ØÓÚÌá½»LinuxÄں˲¹¶¡µÄ˵Ã÷¡£
-
-1£ºÈç¹ûÄãʹÓÃÁËÒ»¸ö¹¦ÄÜÄÇô¾Í#include¶¨Òå/ÉùÃ÷ÄǸö¹¦ÄܵÄÄǸöÎļþ¡£
- ²»ÒªÒÀ¿¿ÆäËû¼ä½ÓÒýÈ붨Òå/ÉùÃ÷ÄǸö¹¦ÄܵÄÍ·Îļþ¡£
-
-2£º¹¹½¨¼ò½àÊÊÓûòÕ߸ü¸ÄCONFIGÑ¡Ïî =y£¬=m£¬»òÕß=n¡£
- ²»ÒªÓбàÒ뾯¸æ/´íÎó£¬ ²»ÒªÓÐÁ´½Ó¾¯¸æ/´íÎó¡£
-
-2b£ºÍ¨¹ý allnoconfig, allmodconfig
-
-2c£ºµ±Ê¹Óà 0=builddir ³É¹¦µØ¹¹½¨
-
-3£ºÍ¨¹ýʹÓñ¾µØ½»²æ±àÒ빤¾ß»òÕßÆäËûһЩ¹¹½¨²úËù£¬ÔÚ¶àCPU¿ò¼ÜÉϹ¹½¨¡£
-
-4£ºppc64 ÊÇÒ»¸öºÜºÃµÄ¼ì²é½»²æ±àÒëµÄ¿ò¼Ü£¬ÒòΪËüÍùÍù°Ñ¡®unsigned long¡¯
- µ±64λֵÀ´Ê¹Óá£
-
-5£º°´ÕÕDocumentation/CodingStyleÎļþÀïµÄÏêϸÃèÊö£¬¼ì²éÄã²¹¶¡µÄÕûÌå·ç¸ñ¡£
- ʹÓò¹¶¡·ç¸ñ¼ì²éËöËéµÄÎ¥¹æ(scripts/checkpatch.pl)£¬ÉóºËÔ±ÓÅÏÈÌá½»¡£
- ÄãÓ¦¸Ãµ÷ÕûÒÅÁôÔÚÄã²¹¶¡ÖеÄËùÓÐÎ¥¹æ¡£
-
-6£ºÈκθüлòÕ߸Ķ¯CONFIGÑ¡Ï²»ÄÜ´òÂÒÅäÖò˵¥¡£
-
-7£ºËùÓеÄKconfigÑ¡Ïî¸üж¼ÒªÓÐ˵Ã÷ÎÄ×Ö¡£
-
-8£ºÒѾ­ÈÏÕæµØ×ܽáÁËÏà¹ØµÄKconfig×éºÏ¡£ÕâÊǺÜÄÑͨ¹ý²âÊÔ×öºÃµÄ--ÄÔÁ¦ÔÚÕâÀïϽµ¡£
-
-9£º¼ì²é¾ßÓмò½àÐÔ¡£
-
-10£ºÊ¹ÓÃ'make checkstack'ºÍ'make namespacecheck'¼ì²é£¬È»ºóÐÞ¸ÄËùÕÒµ½µÄÎÊÌâ¡£
- ×¢Ò⣺¶ÑÕ»¼ì²é²»»áÃ÷È·µØ³öÏÖÎÊÌ⣬µ«ÊÇÈκεÄÒ»¸öº¯ÊýÔÚ¶ÑÕ»ÉÏʹÓöàÓÚ512×Ö½Ú
- ¶¼Òª×¼±¸Ð޸ġ£
-
-11£º°üº¬kernel-docµ½È«¾ÖÄÚºËAPIsÎļþ¡££¨²»ÒªÇó¾²Ì¬µÄº¯Êý£¬µ«ÊÇ°üº¬Ò²ÎÞËùν¡££©
- ʹÓÃ'make htmldocs'»òÕß'make mandocs'À´¼ì²ékernel-doc£¬È»ºóÐÞ¸ÄÈκÎ
- ·¢ÏÖµÄÎÊÌâ¡£
-
-12£ºÒѾ­Í¨¹ýCONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT,
- CONFIG_DEBUG_SLAB, CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES,
- CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_ATOMIC_SLEEP²âÊÔ£¬²¢ÇÒͬʱ¶¼
- ʹÄÜ¡£
-
-13£ºÒѾ­¶¼¹¹½¨²¢ÇÒʹÓûòÕß²»Ê¹Óà CONFIG_SMP ºÍ CONFIG_PREEMPT²âÊÔÖ´ÐÐʱ¼ä¡£
-
-14£ºÈç¹û²¹¶¡Ó°ÏìIO/Disk£¬µÈµÈ£ºÒѾ­Í¨¹ýʹÓûòÕß²»Ê¹Óà CONFIG_LBDAF ²âÊÔ¡£
-
-15£ºËùÓеÄcodepathsÒѾ­ÐÐʹËùÓÐlockdepÆôÓù¦ÄÜ¡£
-
-16£ºËùÓеÄ/proc¼Ç¼¸üж¼Òª×÷³ÉÎļþ·ÅÔÚDocumentation/Ŀ¼Ï¡£
-
-17£ºËùÓеÄÄÚºËÆô¶¯²ÎÊý¸üж¼±»¼Ç¼µ½Documentation/kernel-parameters.txtÎļþÖС£
-
-18£ºËùÓеÄÄ£¿é²ÎÊý¸üж¼ÓÃMODULE_PARM_DESC()¼Ç¼¡£
-
-19£ºËùÓеÄÓû§¿Õ¼ä½Ó¿Ú¸üж¼±»¼Ç¼µ½Documentation/ABI/¡£²é¿´Documentation/ABI/README
- ¿ÉÒÔ»ñµÃ¸ü¶àµÄÐÅÏ¢¡£¸Ä±äÓû§¿Õ¼ä½Ó¿ÚµÄ²¹¶¡Ó¦¸Ã±»Óʼþ³­Ë͸ølinux-api@vger.kernel.org¡£
-
-20£º¼ì²éËüÊDz»ÊǶ¼Í¨¹ý`make headers_check'¡£
-
-21£ºÒѾ­Í¨¹ýÖÁÉÙÒýÈëslabºÍpage-allocationʧ°Ü¼ì²é¡£²é¿´Documentation/fault-injection/¡£
-
-22£ºÐ¼ÓÈëµÄÔ´ÂëÒѾ­Í¨¹ý`gcc -W'£¨Ê¹ÓÃ"make EXTRA_CFLAGS=-W"£©±àÒë¡£ÕâÑù½«²úÉúºÜ¶à·³ÄÕ£¬
- µ«ÊǶÔÓÚÑ°ÕÒ©¶´ºÜÓÐÒæ´¦£¬ÀýÈç:"warning: comparison between signed and unsigned"¡£
-
-23£ºµ±Ëü±»ºÏ²¢µ½-mm²¹¶¡¼¯ºóÔÙ²âÊÔ£¬ÓÃÀ´È·¶¨ËüÊÇ·ñ»¹ºÍ²¹¶¡¶ÓÁÐÖеÄÆäËû²¹¶¡Ò»Æð¹¤×÷ÒÔ¼°ÔÚVM£¬VFS
- ºÍÆäËû×ÓϵͳÖи÷¸ö±ä»¯¡£
-
-24£ºËùÓеÄÄÚ´æÆÁÕÏ{e.g., barrier(), rmb(), wmb()}ÐèÒªÔÚÔ´´úÂëÖеÄÒ»¸ö×¢ÊÍÀ´½âÊÍËûÃǶ¼ÊǸÉʲôµÄ
- ÒÔ¼°Ô­Òò¡£
-
-25£ºÈç¹ûÓÐÈκÎÊäÈëÊä³ö¿ØÖƵIJ¹¶¡±»Ìí¼Ó£¬Ò²Òª¸üÐÂDocumentation/ioctl/ioctl-number.txt¡£
-
-26£ºÈç¹ûÄãµÄ¸ü¸Ä´úÂëÒÀ¿¿»òÕßʹÓÃÈκεÄÄÚºËAPIs»òÕßÓëÏÂÃæµÄkconfig·ûºÅÓйØϵµÄ¹¦ÄÜ£¬Äã¾ÍÒª
- ʹÓÃÏà¹ØµÄkconfig·ûºÅ¹Ø±Õ£¬ and/or =m£¨Èç¹ûÑ¡ÏîÌṩ£©[ÔÚͬһʱ¼ä²»ÊÇËùÓõĶ¼ÆôÓ㬽ö½ö¸÷¸ö»òÕß×ÔÓÉ
- ×éºÏËûÃÇ]£º
-
- CONFIG_SMP, CONFIG_SYSFS, CONFIG_PROC_FS, CONFIG_INPUT, CONFIG_PCI,
- CONFIG_BLOCK, CONFIG_PM, CONFIG_HOTPLUG, CONFIG_MAGIC_SYSRQ,
- CONFIG_NET, CONFIG_INET=n (ºóÒ»¸öʹÓà CONFIG_NET=y)
diff --git a/MAINTAINERS b/MAINTAINERS
index ace8f9c81b9..27af9c95ef4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -117,20 +117,20 @@ Maintainers List (try to look for most precise areas first)
M: Philip Blundell <philb@gnu.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/3c505*
+F: drivers/net/ethernet/i825xx/3c505*
3C59X NETWORK DRIVER
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/vortex.txt
-F: drivers/net/3c59x.c
+F: drivers/net/ethernet/3com/3c59x.c
3CR990 NETWORK DRIVER
M: David Dillow <dave@thedillows.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/typhoon*
+F: drivers/net/ethernet/3com/typhoon*
3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
M: Adam Radford <linuxraid@lsi.com>
@@ -156,7 +156,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/r8169.c
+F: drivers/net/ethernet/realtek/r8169.c
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
M: Greg Kroah-Hartman <gregkh@suse.de>
@@ -170,8 +170,7 @@ F: include/linux/serial_8250.h
8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
L: netdev@vger.kernel.org
S: Orphan / Obsolete
-F: drivers/net/*8390*
-F: drivers/net/ax88796.c
+F: drivers/net/ethernet/8390/
9P FILE SYSTEM
M: Eric Van Hensbergen <ericvh@gmail.com>
@@ -214,7 +213,7 @@ ACENIC DRIVER
M: Jes Sorensen <jes@trained-monkey.org>
L: linux-acenic@sunsite.dk
S: Maintained
-F: drivers/net/acenic*
+F: drivers/net/ethernet/alteon/acenic*
ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
M: Peter Feuerer <peter@piie.net>
@@ -746,7 +745,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/mach-ebsa110/
-F: drivers/net/arm/am79c961a.*
+F: drivers/net/ethernet/amd/am79c961a.*
ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
M: Daniel Ribeiro <drwyrm@gmail.com>
@@ -1015,7 +1014,8 @@ F: arch/arm/include/asm/hardware/ioc.h
F: arch/arm/include/asm/hardware/iomd.h
F: arch/arm/include/asm/hardware/memc.h
F: arch/arm/mach-rpc/
-F: drivers/net/arm/ether*
+F: drivers/net/ethernet/i825xx/ether1*
+F: drivers/net/ethernet/seeq/ether3*
F: drivers/scsi/arm/
ARM/SHARK MACHINE SUPPORT
@@ -1127,7 +1127,7 @@ F: arch/arm/mach-nuc93x/
F: drivers/input/keyboard/w90p910_keypad.c
F: drivers/input/touchscreen/w90p910_ts.c
F: drivers/watchdog/nuc900_wdt.c
-F: drivers/net/arm/w90p910_ether.c
+F: drivers/net/ethernet/nuvoton/w90p910_ether.c
F: drivers/mtd/nand/nuc900_nand.c
F: drivers/rtc/rtc-nuc900.c
F: drivers/spi/spi_nuc900.c
@@ -1230,7 +1230,7 @@ F: Documentation/aoe/
F: drivers/block/aoe/
ATHEROS ATH GENERIC UTILITIES
-M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
+M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
@@ -1238,7 +1238,7 @@ F: drivers/net/wireless/ath/*
ATHEROS ATH5K WIRELESS DRIVER
M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
-M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
+M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
M: Bob Copeland <me@bobcopeland.com>
L: linux-wireless@vger.kernel.org
L: ath5k-devel@lists.ath5k.org
@@ -1246,11 +1246,19 @@ W: http://wireless.kernel.org/en/users/Drivers/ath5k
S: Maintained
F: drivers/net/wireless/ath/ath5k/
+ATHEROS ATH6KL WIRELESS DRIVER
+M: Kalle Valo <kvalo@qca.qualcomm.com>
+L: linux-wireless@vger.kernel.org
+W: http://wireless.kernel.org/en/users/Drivers/ath6kl
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
+S: Supported
+F: drivers/net/wireless/ath/ath6kl/
+
ATHEROS ATH9K WIRELESS DRIVER
-M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
-M: Jouni Malinen <jmalinen@atheros.com>
-M: Vasanthakumar Thiagarajan <vasanth@atheros.com>
-M: Senthil Balasubramanian <senthilkumar@atheros.com>
+M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
+M: Jouni Malinen <jouni@qca.qualcomm.com>
+M: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
+M: Senthil Balasubramanian <senthilb@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
L: ath9k-devel@lists.ath9k.org
W: http://wireless.kernel.org/en/users/Drivers/ath9k
@@ -1282,7 +1290,7 @@ L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
S: Maintained
-F: drivers/net/atlx/
+F: drivers/net/ethernet/atheros/
ATM
M: Chas Williams <chas@cmf.nrl.navy.mil>
@@ -1322,7 +1330,7 @@ F: include/video/atmel_lcdc.h
ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
-F: drivers/net/macb.*
+F: drivers/net/ethernet/cadence/
ATMEL SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1445,7 +1453,7 @@ BLACKFIN EMAC DRIVER
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
-F: drivers/net/bfin_mac.*
+F: drivers/net/ethernet/adi/
BLACKFIN RTC DRIVER
M: Mike Frysinger <vapier.adi@gmail.com>
@@ -1526,27 +1534,27 @@ BROADCOM B44 10/100 ETHERNET DRIVER
M: Gary Zambrano <zambrano@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/b44.*
+F: drivers/net/ethernet/broadcom/b44.*
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Michael Chan <mchan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/bnx2.*
-F: drivers/net/bnx2_*
+F: drivers/net/ethernet/broadcom/bnx2.*
+F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Eilon Greenstein <eilong@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/bnx2x/
+F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM TG3 GIGABIT ETHERNET DRIVER
M: Matt Carlson <mcarlson@broadcom.com>
M: Michael Chan <mchan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/tg3.*
+F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Brett Rudley <brudley@broadcom.com>
@@ -1575,7 +1583,7 @@ BROCADE BNA 10 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rmody@brocade.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/bna/
+F: drivers/net/ethernet/brocade/bna/
BSG (block layer generic sg v4 driver)
M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
@@ -1663,7 +1671,7 @@ CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
M: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
M: Urs Thuermann <urs.thuermann@volkswagen.de>
-L: socketcan-core@lists.berlios.de (subscribers-only)
+L: linux-can@vger.kernel.org
L: netdev@vger.kernel.org
W: http://developer.berlios.de/projects/socketcan/
S: Maintained
@@ -1675,7 +1683,7 @@ F: include/linux/can/raw.h
CAN NETWORK DRIVERS
M: Wolfgang Grandegger <wg@grandegger.com>
-L: socketcan-core@lists.berlios.de (subscribers-only)
+L: linux-can@vger.kernel.org
L: netdev@vger.kernel.org
W: http://developer.berlios.de/projects/socketcan/
S: Maintained
@@ -1759,13 +1767,13 @@ M: Christian Benvenuti <benve@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
M: David Wang <dwang2@cisco.com>
S: Supported
-F: drivers/net/enic/
+F: drivers/net/ethernet/cisco/enic/
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/arm/ep93xx_eth.c
+F: drivers/net/ethernet/cirrus/ep93xx_eth.c
CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER
M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -1905,7 +1913,7 @@ CPMAC ETHERNET DRIVER
M: Florian Fainelli <florian@openwrt.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/cpmac.c
+F: drivers/net/ethernet/ti/cpmac.c
CPU FREQUENCY DRIVERS
M: Dave Jones <davej@redhat.com>
@@ -1992,7 +2000,7 @@ M: Divy Le Ray <divy@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
-F: drivers/net/cxgb3/
+F: drivers/net/ethernet/chelsio/cxgb3/
CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
M: Steve Wise <swise@chelsio.com>
@@ -2006,7 +2014,7 @@ M: Dimitris Michailidis <dm@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
-F: drivers/net/cxgb4/
+F: drivers/net/ethernet/chelsio/cxgb4/
CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
M: Steve Wise <swise@chelsio.com>
@@ -2020,14 +2028,14 @@ M: Casey Leedom <leedom@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
-F: drivers/net/cxgb4vf/
+F: drivers/net/ethernet/chelsio/cxgb4vf/
STMMAC ETHERNET DRIVER
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
L: netdev@vger.kernel.org
W: http://www.stlinux.com
S: Supported
-F: drivers/net/stmmac/
+F: drivers/net/ethernet/stmicro/stmmac/
CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk>
@@ -2071,7 +2079,7 @@ DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
L: netdev@vger.kernel.org
S: Orphan
F: Documentation/networking/dmfe.txt
-F: drivers/net/tulip/dmfe.c
+F: drivers/net/ethernet/tulip/dmfe.c
DC390/AM53C974 SCSI driver
M: Kurt Garloff <garloff@suse.de>
@@ -2110,7 +2118,7 @@ F: net/decnet/
DEFXX FDDI NETWORK DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
-F: drivers/net/defxx.*
+F: drivers/net/fddi/defxx.*
DELL LAPTOP DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
@@ -2136,6 +2144,14 @@ M: Matthew Garrett <mjg59@srcf.ucam.org>
S: Maintained
F: drivers/platform/x86/dell-wmi.c
+DESIGNWARE USB3 DRD IP DRIVER
+M: Felipe Balbi <balbi@ti.com>
+L: linux-usb@vger.kernel.org
+L: linux-omap@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
+S: Maintained
+F: drivers/usb/dwc3/
+
DEVICE NUMBER REGISTRY
M: Torben Mathiasen <device@lanana.org>
W: http://lanana.org/docs/device-list/index.html
@@ -2294,6 +2310,12 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/wan/dscc4.c
+DYNAMIC DEBUG
+M: Jason Baron <jbaron@redhat.com>
+S: Maintained
+F: lib/dynamic_debug.c
+F: include/linux/dynamic_debug.h
+
DZ DECSTATION DZ11 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
@@ -2460,10 +2482,10 @@ S: Supported
F: drivers/infiniband/hw/ehca/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Breno Leitao <leitao@linux.vnet.ibm.com>
+M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ehea/
+F: drivers/net/ethernet/ibm/ehea/
EMBEDDED LINUX
M: Paul Gortmaker <paul.gortmaker@windriver.com>
@@ -2508,7 +2530,7 @@ ETHEREXPRESS-16 NETWORK DRIVER
M: Philip Blundell <philb@gnu.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/eexpress.*
+F: drivers/net/ethernet/i825xx/eexpress.*
ETHERNET BRIDGE
M: Stephen Hemminger <shemminger@linux-foundation.org>
@@ -2522,7 +2544,7 @@ F: net/bridge/
ETHERTEAM 16I DRIVER
M: Mika Kuoppala <miku@iki.fi>
S: Maintained
-F: drivers/net/eth16i.c
+F: drivers/net/ethernet/fujitsu/eth16i.c
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
@@ -2552,6 +2574,11 @@ S: Maintained
F: Documentation/filesystems/ext4.txt
F: fs/ext4/
+Extended Verification Module (EVM)
+M: Mimi Zohar <zohar@us.ibm.com>
+S: Supported
+F: security/integrity/evm/
+
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: lm-sensors@lm-sensors.org
@@ -2686,7 +2713,7 @@ M: Vitaly Bordug <vbordug@ru.mvista.com>
L: linuxppc-dev@lists.ozlabs.org
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/fs_enet/
+F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY
@@ -2708,7 +2735,7 @@ M: Li Yang <leoli@freescale.com>
L: netdev@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
-F: drivers/net/ucc_geth*
+F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi <timur@freescale.com>
@@ -2733,7 +2760,7 @@ F: fs/freevxfs/
FREEZER
M: Pavel Machek <pavel@ucw.cz>
M: "Rafael J. Wysocki" <rjw@sisk.pl>
-L: linux-pm@lists.linux-foundation.org
+L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/power/freezing-of-tasks.txt
F: include/linux/freezer.h
@@ -2995,7 +3022,7 @@ F: drivers/video/hgafb.c
HIBERNATION (aka Software Suspend, aka swsusp)
M: Pavel Machek <pavel@ucw.cz>
M: "Rafael J. Wysocki" <rjw@sisk.pl>
-L: linux-pm@lists.linux-foundation.org
+L: linux-pm@vger.kernel.org
S: Supported
F: arch/x86/power/
F: drivers/base/power/
@@ -3046,6 +3073,7 @@ S: Maintained
F: include/linux/hippidevice.h
F: include/linux/if_hippi.h
F: net/802/hippi.c
+F: drivers/net/hippi/
HOST AP DRIVER
M: Jouni Malinen <j@w1.fi>
@@ -3063,7 +3091,7 @@ F: drivers/platform/x86/tc1100-wmi.c
HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
M: Jaroslav Kysela <perex@perex.cz>
S: Maintained
-F: drivers/net/hp100.*
+F: drivers/net/ethernet/hp/hp100.*
HPET: High Precision Event Timers driver
M: Clemens Ladisch <clemens@ladisch.de>
@@ -3161,7 +3189,7 @@ IBM Power Virtual Ethernet Device Driver
M: Santiago Leon <santil@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/ibmveth.*
+F: drivers/net/ethernet/ibm/ibmveth.*
IBM ServeRAID RAID DRIVER
P: Jack Hammer
@@ -3189,7 +3217,7 @@ F: drivers/ide/ide-cd*
IDLE-I7300
M: Andy Henroid <andrew.d.henroid@intel.com>
-L: linux-pm@lists.linux-foundation.org
+L: linux-pm@vger.kernel.org
S: Supported
F: drivers/idle/i7300_idle.c
@@ -3272,7 +3300,7 @@ F: firmware/isci/
INTEL IDLE DRIVER
M: Len Brown <lenb@kernel.org>
-L: linux-pm@lists.linux-foundation.org
+L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git
S: Supported
F: drivers/idle/intel_idle.c
@@ -3313,7 +3341,7 @@ M: David Woodhouse <dwmw2@infradead.org>
L: iommu@lists.linux-foundation.org
T: git git://git.infradead.org/iommu-2.6.git
S: Supported
-F: drivers/pci/intel-iommu.c
+F: drivers/iommu/intel-iommu.c
F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER
@@ -3328,7 +3356,7 @@ F: arch/arm/mach-ixp4xx/include/mach/qmgr.h
F: arch/arm/mach-ixp4xx/include/mach/npe.h
F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
F: arch/arm/mach-ixp4xx/ixp4xx_npe.c
-F: drivers/net/arm/ixp4xx_eth.c
+F: drivers/net/ethernet/xscale/ixp4xx_eth.c
F: drivers/net/wan/ixp4xx_hss.c
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
@@ -3340,7 +3368,7 @@ INTEL IXP2000 ETHERNET DRIVER
M: Lennert Buytenhek <kernel@wantstofly.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ixp2000/
+F: drivers/net/ethernet/xscale/ixp2000/
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
@@ -3349,13 +3377,13 @@ M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com>
-M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
+M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://e1000.sourceforge.net/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
S: Supported
F: Documentation/networking/e100.txt
F: Documentation/networking/e1000.txt
@@ -3365,18 +3393,11 @@ F: Documentation/networking/igbvf.txt
F: Documentation/networking/ixgb.txt
F: Documentation/networking/ixgbe.txt
F: Documentation/networking/ixgbevf.txt
-F: drivers/net/e100.c
-F: drivers/net/e1000/
-F: drivers/net/e1000e/
-F: drivers/net/igb/
-F: drivers/net/igbvf/
-F: drivers/net/ixgb/
-F: drivers/net/ixgbe/
-F: drivers/net/ixgbevf/
+F: drivers/net/ethernet/intel/
INTEL MRST PMU DRIVER
M: Len Brown <len.brown@intel.com>
-L: linux-pm@lists.linux-foundation.org
+L: linux-pm@vger.kernel.org
S: Supported
F: arch/x86/platform/mrst/pmu.*
@@ -3424,7 +3445,7 @@ M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
S: Supported
F: drivers/net/wireless/iwlwifi/
@@ -3440,7 +3461,7 @@ IOC3 ETHERNET DRIVER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-mips@linux-mips.org
S: Maintained
-F: drivers/net/ioc3-eth.c
+F: drivers/net/ethernet/sgi/ioc3-eth.c
IOC3 SERIAL DRIVER
M: Pat Gefre <pfg@sgi.com>
@@ -3458,7 +3479,7 @@ M: Francois Romieu <romieu@fr.zoreil.com>
M: Sorbica Shieh <sorbica@icplus.com.tw>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ipg.*
+F: drivers/net/ethernet/icplus/ipg.*
IPATH DRIVER
M: Mike Marciniszyn <infinipath@qlogic.com>
@@ -3606,7 +3627,7 @@ JME NETWORK DRIVER
M: Guo-Fu Tseng <cooldavid@cooldavid.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/jme.*
+F: drivers/net/ethernet/jme.*
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
M: David Woodhouse <dwmw2@infradead.org>
@@ -4067,7 +4088,7 @@ S: Maintained
M32R ARCHITECTURE
M: Hirokazu Takata <takata@linux-m32r.org>
-L: linux-m32r@ml.linux-m32r.org
+L: linux-m32r@ml.linux-m32r.org (moderated for non-subscribers)
L: linux-m32r-ja@ml.linux-m32r.org (in Japanese)
W: http://www.linux-m32r.org/
S: Maintained
@@ -4137,7 +4158,7 @@ MARVELL MV643XX ETHERNET DRIVER
M: Lennert Buytenhek <buytenh@wantstofly.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/mv643xx_eth.*
+F: drivers/net/ethernet/marvell/mv643xx_eth.*
F: include/linux/mv643xx.h
MARVELL MWIFIEX WIRELESS DRIVER
@@ -4236,7 +4257,7 @@ F: include/mtd/
MICROBLAZE ARCHITECTURE
M: Michal Simek <monstr@monstr.eu>
-L: microblaze-uclinux@itee.uq.edu.au
+L: microblaze-uclinux@itee.uq.edu.au (moderated for non-subscribers)
W: http://www.monstr.eu/fdt/
T: git git://git.monstr.eu/linux-2.6-microblaze.git
S: Supported
@@ -4351,12 +4372,12 @@ M: Andrew Gallatin <gallatin@myri.com>
L: netdev@vger.kernel.org
W: http://www.myri.com/scs/download-Myri10GE.html
S: Supported
-F: drivers/net/myri10ge/
+F: drivers/net/ethernet/myricom/myri10ge/
NATSEMI ETHERNET DRIVER (DP8381x)
M: Tim Hockin <thockin@hockin.org>
S: Maintained
-F: drivers/net/natsemi.c
+F: drivers/net/ethernet/natsemi/natsemi.c
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
M: Daniel Mack <zonque@gmail.com>
@@ -4396,9 +4417,8 @@ W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
S: Supported
F: Documentation/networking/s2io.txt
-F: drivers/net/s2io*
F: Documentation/networking/vxge.txt
-F: drivers/net/vxge/
+F: drivers/net/ethernet/neterion/
NETFILTER/IPTABLES/IPCHAINS
P: Rusty Russell
@@ -4512,11 +4532,23 @@ F: include/linux/if_*
F: include/linux/*device.h
NETXEN (1/10) GbE SUPPORT
-M: Amit Kumar Salecha <amit.salecha@qlogic.com>
+M: Sony Chacko <sony.chacko@qlogic.com>
+M: Rajesh Borundia <rajesh.borundia@qlogic.com>
L: netdev@vger.kernel.org
W: http://www.qlogic.com
S: Supported
-F: drivers/net/netxen/
+F: drivers/net/ethernet/qlogic/netxen/
+
+NFC SUBSYSTEM
+M: Lauro Ramos Venancio <lauro.venancio@openbossa.org>
+M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
+M: Samuel Ortiz <sameo@linux.intel.com>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: net/nfc/
+F: include/linux/nfc.h
+F: include/net/nfc/
+F: drivers/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <Trond.Myklebust@netapp.com>
@@ -4537,7 +4569,7 @@ M: Jan-Pascal van Best <janpascal@vanbest.org>
M: Andreas Mohr <andi@lisas.de>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ni5010.*
+F: drivers/net/ethernet/racal/ni5010.*
NILFS2 FILESYSTEM
M: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
@@ -4740,7 +4772,7 @@ K: of_match_table
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
W: http://openrisc.net
-L: linux@lists.openrisc.net
+L: linux@lists.openrisc.net (moderated for non-subscribers)
S: Maintained
T: git git://openrisc.net/~jonas/linux
F: arch/openrisc
@@ -4803,7 +4835,7 @@ PA SEMI ETHERNET DRIVER
M: Olof Johansson <olof@lixom.net>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/pasemi_mac.*
+F: drivers/net/ethernet/pasemi/*
PA SEMI SMBUS DRIVER
M: Olof Johansson <olof@lixom.net>
@@ -4950,7 +4982,7 @@ PCNET32 NETWORK DRIVER
M: Don Fry <pcnet32@frontier.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/pcnet32.c
+F: drivers/net/ethernet/amd/pcnet32.c
PCRYPT PARALLEL CRYPTO ENGINE
M: Steffen Klassert <steffen.klassert@secunet.com>
@@ -5010,6 +5042,11 @@ L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/devices/phram.c
+PIN CONTROL SUBSYSTEM
+M: Linus Walleij <linus.walleij@linaro.org>
+S: Maintained
+F: drivers/pinmux/
+
PKTCDVD DRIVER
M: Peter Osterlund <petero2@telia.com>
S: Maintained
@@ -5082,7 +5119,7 @@ PPP PROTOCOL DRIVERS AND COMPRESSORS
M: Paul Mackerras <paulus@samba.org>
L: linux-ppp@vger.kernel.org
S: Maintained
-F: drivers/net/ppp_*
+F: drivers/net/ppp/ppp_*
PPP OVER ATM (RFC 2364)
M: Mitchell Blank Jr <mitch@sfgoth.com>
@@ -5093,8 +5130,8 @@ F: include/linux/atmppp.h
PPP OVER ETHERNET
M: Michal Ostrowski <mostrows@earthlink.net>
S: Maintained
-F: drivers/net/pppoe.c
-F: drivers/net/pppox.c
+F: drivers/net/ppp/pppoe.c
+F: drivers/net/ppp/pppox.c
PPP OVER L2TP
M: James Chapman <jchapman@katalix.com>
@@ -5115,7 +5152,7 @@ PPTP DRIVER
M: Dmitry Kozlov <xeb@mail.ru>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/pptp.c
+F: drivers/net/ppp/pptp.c
W: http://sourceforge.net/projects/accel-pptp
PREEMPTIBLE KERNEL
@@ -5144,7 +5181,7 @@ M: Geoff Levand <geoff@infradead.org>
L: netdev@vger.kernel.org
L: cbe-oss-dev@lists.ozlabs.org
S: Maintained
-F: drivers/net/ps3_gelic_net.*
+F: drivers/net/ethernet/toshiba/ps3_gelic_net.*
PS3 PLATFORM SUPPORT
M: Geoff Levand <geoff@infradead.org>
@@ -5262,23 +5299,24 @@ M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
-F: drivers/net/qla3xxx.*
+F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Amit Kumar Salecha <amit.salecha@qlogic.com>
M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
+M: Sony Chacko <sony.chacko@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/qlcnic/
+F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
+M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/qlge/
+F: drivers/net/ethernet/qlogic/qlge/
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
@@ -5360,7 +5398,7 @@ RDC R6040 FAST ETHERNET DRIVER
M: Florian Fainelli <florian@openwrt.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/r6040.c
+F: drivers/net/ethernet/rdc/r6040.c
RDS - RELIABLE DATAGRAM SOCKETS
M: Andy Grover <andy.grover@oracle.com>
@@ -5764,7 +5802,7 @@ M: Ajit Khaparde <ajit.khaparde@emulex.com>
L: netdev@vger.kernel.org
W: http://www.emulex.com
S: Supported
-F: drivers/net/benet/
+F: drivers/net/ethernet/emulex/benet/
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
@@ -5772,7 +5810,7 @@ M: Steve Hodgson <shodgson@solarflare.com>
M: Ben Hutchings <bhutchings@solarflare.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/sfc/
+F: drivers/net/ethernet/sfc/
SGI GRU DRIVER
M: Jack Steiner <steiner@sgi.com>
@@ -5838,14 +5876,14 @@ SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/sis190.c
+F: drivers/net/ethernet/sis/sis190.c
SIS 900/7016 FAST ETHERNET DRIVER
M: Daniele Venzano <venza@brownhat.org>
W: http://www.brownhat.org/sis900.html
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/sis900.*
+F: drivers/net/ethernet/sis/sis900.*
SIS 96X I2C/SMBUS DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
@@ -5872,8 +5910,7 @@ SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
M: Stephen Hemminger <shemminger@linux-foundation.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/skge.*
-F: drivers/net/sky2.*
+F: drivers/net/ethernet/marvell/sk*
SLAB ALLOCATOR
M: Christoph Lameter <cl@linux-foundation.org>
@@ -5887,7 +5924,7 @@ F: mm/sl?b.c
SMC91x ETHERNET DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
-F: drivers/net/smc91x.*
+F: drivers/net/ethernet/smsc/smc91x.*
SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
@@ -5922,13 +5959,13 @@ M: Steve Glendinning <steve.glendinning@smsc.com>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/smsc911x.h
-F: drivers/net/smsc911x.*
+F: drivers/net/ethernet/smsc/smsc911x.*
SMSC9420 PCI ETHERNET DRIVER
M: Steve Glendinning <steve.glendinning@smsc.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/smsc9420.*
+F: drivers/net/ethernet/smsc/smsc9420.*
SN-IA64 (Itanium) SUB-PLATFORM
M: Jes Sorensen <jes@sgi.com>
@@ -5962,7 +5999,7 @@ SONIC NETWORK DRIVER
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/sonic.*
+F: drivers/net/ethernet/natsemi/sonic.*
SONICS SILICON BACKPLANE DRIVER (SSB)
M: Michael Buesch <m@bues.ch>
@@ -6103,7 +6140,7 @@ M: Jens Osterkamp <jens@de.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/spider_net.txt
-F: drivers/net/spider_net*
+F: drivers/net/ethernet/toshiba/spider_net*
SPU FILE SYSTEM
M: Jeremy Kerr <jk@ozlabs.org>
@@ -6134,7 +6171,7 @@ S: Maintained
STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@suse.de>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
L: devel@driverdev.osuosl.org
S: Maintained
F: drivers/staging/
@@ -6150,12 +6187,6 @@ M: Jakub Schmidtke <sjakub@gmail.com>
S: Odd Fixes
F: drivers/staging/asus_oled/
-STAGING - ATHEROS ATH6KL WIRELESS DRIVER
-M: Luis R. Rodriguez <mcgrof@gmail.com>
-M: Naveen Singh <nsingh@atheros.com>
-S: Odd Fixes
-F: drivers/staging/ath6kl/
-
STAGING - COMEDI
M: Ian Abbott <abbotti@mev.co.uk>
M: Mori Hess <fmhess@users.sourceforge.net>
@@ -6181,6 +6212,11 @@ M: David Rowe <david@rowetel.com>
S: Odd Fixes
F: drivers/staging/echo/
+STAGING - ET131X NETWORK DRIVER
+M: Mark Einon <mark.einon@gmail.com>
+S: Odd Fixes
+F: drivers/staging/et131x/
+
STAGING - FLARION FT1000 DRIVERS
M: Marek Belisko <marek.belisko@gmail.com>
S: Odd Fixes
@@ -6209,6 +6245,13 @@ W: http://www.lirc.org/
S: Odd Fixes
F: drivers/staging/lirc/
+STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
+M: Julian Andres Klode <jak@jak-linux.org>
+M: Marc Dietrich <marvin24@gmx.de>
+L: ac100@lists.launchpad.net (moderated for non-subscribers)
+S: Maintained
+F: drivers/staging/nvec/
+
STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
M: Andres Salomon <dilinger@queued.net>
M: Chris Ball <cjb@laptop.org>
@@ -6281,7 +6324,7 @@ F: drivers/staging/xgifb/
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes
-F: drivers/net/starfire*
+F: drivers/net/ethernet/adaptec/starfire*
SUN3/3X
M: Sam Creasey <sammy@sammy.net>
@@ -6290,6 +6333,7 @@ S: Maintained
F: arch/m68k/kernel/*sun3*
F: arch/m68k/sun3*/
F: arch/m68k/include/asm/sun3*
+F: drivers/net/ethernet/i825xx/sun3*
SUPERH
M: Paul Mundt <lethal@linux-sh.org>
@@ -6306,7 +6350,7 @@ SUSPEND TO RAM
M: Len Brown <len.brown@intel.com>
M: Pavel Machek <pavel@ucw.cz>
M: "Rafael J. Wysocki" <rjw@sisk.pl>
-L: linux-pm@lists.linux-foundation.org
+L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/power/
F: arch/x86/kernel/acpi/
@@ -6366,10 +6410,10 @@ F: net/ipv4/tcp_lp.c
TEGRA SUPPORT
M: Colin Cross <ccross@android.com>
-M: Erik Gilling <konkers@android.com>
M: Olof Johansson <olof@lixom.net>
+M: Stephen Warren <swarren@nvidia.com>
L: linux-tegra@vger.kernel.org
-T: git git://android.git.kernel.org/kernel/tegra.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git
S: Supported
F: arch/arm/mach-tegra
@@ -6377,7 +6421,7 @@ TEHUTI ETHERNET DRIVER
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/tehuti*
+F: drivers/net/ethernet/tehuti/*
Telecom Clock Driver for MCPL0010
M: Mark Gross <mark.gross@intel.com>
@@ -6428,7 +6472,7 @@ W: http://www.tilera.com/scm/
S: Supported
F: arch/tile/
F: drivers/tty/hvc/hvc_tile.c
-F: drivers/net/tile/
+F: drivers/net/ethernet/tile/
F: drivers/edac/tile_edac.c
TLAN NETWORK DRIVER
@@ -6437,7 +6481,7 @@ L: tlan-devel@lists.sourceforge.net (subscribers-only)
W: http://sourceforge.net/projects/tlan/
S: Maintained
F: Documentation/networking/tlan.txt
-F: drivers/net/tlan.*
+F: drivers/net/ethernet/ti/tlan.*
TOMOYO SECURITY MODULE
M: Kentaro Takeda <takedakn@nttdata.co.jp>
@@ -6447,7 +6491,7 @@ L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
W: http://tomoyo.sourceforge.jp/
-T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.4.x/tomoyo-lsm/patches/
+T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/
S: Maintained
F: security/tomoyo/
@@ -6531,7 +6575,7 @@ TULIP NETWORK DRIVERS
M: Grant Grundler <grundler@parisc-linux.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/tulip/
+F: drivers/net/ethernet/tulip/
TUN/TAP driver
M: Maxim Krasnyansky <maxk@qualcomm.com>
@@ -6577,7 +6621,7 @@ W: http://uclinux-h8.sourceforge.jp/
S: Supported
F: arch/h8300/
F: drivers/ide/ide-h8300.c
-F: drivers/net/ne-h8300.c
+F: drivers/net/ethernet/8390/ne-h8300.c
UDF FILESYSTEM
M: Jan Kara <jack@suse.cz>
@@ -7005,7 +7049,7 @@ F: include/linux/vhost.h
VIA RHINE NETWORK DRIVER
M: Roger Luethi <rl@hellgate.ch>
S: Maintained
-F: drivers/net/via-rhine.c
+F: drivers/net/ethernet/via/via-rhine.c
VIAPRO SMBUS DRIVER
M: Jean Delvare <khali@linux-fr.org>
@@ -7033,7 +7077,7 @@ VIA VELOCITY NETWORK DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/via-velocity.*
+F: drivers/net/ethernet/via/via-velocity.*
VLAN (802.1Q)
M: Patrick McHardy <kaber@trash.net>
@@ -7142,6 +7186,12 @@ L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/wd7000.c
+WIIMOTE HID DRIVER
+M: David Herrmann <dh.herrmann@googlemail.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/hid-wiimote*
+
WINBOND CIR DRIVER
M: David Härdeman <david@hardeman.nu>
S: Maintained
diff --git a/Makefile b/Makefile
index 31f967c31e7..07bc92544e9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION = -rc9
+EXTRAVERSION =
NAME = "Divemaster Edition"
# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h
index 1b71ca70c9f..6d9e805f18a 100644
--- a/arch/alpha/include/asm/fcntl.h
+++ b/arch/alpha/include/asm/fcntl.h
@@ -51,8 +51,6 @@
#define F_EXLCK 16 /* or 3 */
#define F_SHLCK 32 /* or 4 */
-#define F_INPROGRESS 64
-
#include <asm-generic/fcntl.h>
#endif
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index f0df3fbd840..b9fc6c309d2 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -4,9 +4,8 @@
*
* (C) 2001,2002,2006 by Jan-Benedict Glaw <jbglaw@lug-owl.de>
*
- * This driver is at all a modified version of Erik Mouw's
- * Documentation/DocBook/procfs_example.c, so: thank
- * you, Erik! He can be reached via email at
+ * This driver is a modified version of Erik Mouw's example proc
+ * interface, so: thank you, Erik! He can be reached via email at
* <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea
* provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They
* included a patch like this as well. Thanks for idea!
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3146ed3f6ec..795126ea493 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -3,7 +3,7 @@ config ARM
default y
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
- select HAVE_IDE
+ select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -195,7 +195,8 @@ config VECTORS_BASE
The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
- bool "Patch physical to virtual translations at runtime"
+ bool "Patch physical to virtual translations at runtime" if EMBEDDED
+ default y
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
@@ -204,16 +205,16 @@ config ARM_PATCH_PHYS_VIRT
kernel in system memory.
This can only be used with non-XIP MMU kernels where the base
- of physical memory is at a 16MB boundary, or theoretically 64K
- for the MSM machine class.
+ of physical memory is at a 16MB boundary.
+
+ Only disable this option if you know that you do not require
+ this feature (eg, building a kernel for a single machine) and
+ you need to shrink the kernel to the minimal size.
-config ARM_PATCH_PHYS_VIRT_16BIT
+
+config GENERIC_BUG
def_bool y
- depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
- help
- This option extends the physical to virtual translation patching
- to allow physical memory down to a theoretical minimum of 64K
- boundaries.
+ depends on BUG
source "init/Kconfig"
@@ -301,7 +302,6 @@ config ARCH_AT91
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
select CLKDEV_LOOKUP
- select ARM_PATCH_PHYS_VIRT if MMU
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -385,6 +385,7 @@ config ARCH_FOOTBRIDGE
select CPU_SA110
select FOOTBRIDGE
select GENERIC_CLOCKEVENTS
+ select HAVE_IDE
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -631,6 +632,8 @@ config ARCH_PXA
select SPARSE_IRQ
select AUTO_ZRELADDR
select MULTI_IRQ_HANDLER
+ select ARM_CPU_SUSPEND if PM
+ select HAVE_IDE
help
Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
@@ -671,6 +674,7 @@ config ARCH_RPC
select NO_IOPORT
select ARCH_SPARSEMEM_ENABLE
select ARCH_USES_GETTIMEOFFSET
+ select HAVE_IDE
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -689,6 +693,7 @@ config ARCH_SA1100
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
+ select HAVE_IDE
help
Support for StrongARM 11x0 based boards.
@@ -722,7 +727,6 @@ config ARCH_S3C64XX
select ARCH_REQUIRE_GPIOLIB
select SAMSUNG_CLKSRC
select SAMSUNG_IRQ_VIC_TIMER
- select SAMSUNG_IRQ_UART
select S3C_GPIO_TRACK
select S3C_GPIO_PULL_UPDOWN
select S3C_GPIO_CFG_S3C24XX
@@ -1375,6 +1379,7 @@ config SMP
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE
+ depends on MMU
select USE_GENERIC_SMP_HELPERS
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
help
@@ -1388,7 +1393,7 @@ config SMP
processor machines. On a single processor machine, the kernel will
run faster if you say N here.
- See also <file:Documentation/i386/IO-APIC.txt>,
+ See also <file:Documentation/x86/i386/IO-APIC.txt>,
<file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
<http://tldp.org/HOWTO/SMP-HOWTO.html>.
@@ -1407,6 +1412,31 @@ config SMP_ON_UP
If you don't know what to do here, say Y.
+config ARM_CPU_TOPOLOGY
+ bool "Support cpu topology definition"
+ depends on SMP && CPU_V7
+ default y
+ help
+ Support ARM cpu topology definition. The MPIDR register defines
+ affinity between processors which is then used to describe the cpu
+ topology of an ARM System.
+
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on ARM_CPU_TOPOLOGY
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+config SCHED_SMT
+ bool "SMT scheduler support"
+ depends on ARM_CPU_TOPOLOGY
+ help
+ Improves the CPU scheduler's decision making when dealing with
+ MultiThreading at a cost of slightly increased overhead in some
+ places. If unsure say N here.
+
config HAVE_ARM_SCU
bool
help
@@ -1482,6 +1512,7 @@ config THUMB2_KERNEL
depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
select AEABI
select ARM_ASM_UNIFIED
+ select ARM_UNWIND
help
By enabling this option, the kernel will be compiled in
Thumb-2 mode. A compiler/assembler that understand the unified
@@ -2101,6 +2132,9 @@ config ARCH_SUSPEND_POSSIBLE
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
def_bool y
+config ARM_CPU_SUSPEND
+ def_bool PM_SLEEP
+
endmenu
source "net/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 81cbe40c159..df3eb3ccd76 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -65,13 +65,71 @@ config DEBUG_USER
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
- bool "Kernel low-level debugging functions"
+ bool "Kernel low-level debugging functions (read help!)"
depends on DEBUG_KERNEL
help
Say Y here to include definitions of printascii, printch, printhex
in the kernel. This is helpful if you are debugging code that
executes before the console is initialized.
+ Note that selecting this option will limit the kernel to a single
+ UART definition, as specified below. Attempting to boot the kernel
+ image on a different platform *will not work*, so this option should
+ not be enabled for kernels that are intended to be portable.
+
+choice
+ prompt "Kernel low-level debugging port"
+ depends on DEBUG_LL
+
+ config DEBUG_LL_UART_NONE
+ bool "No low-level debugging UART"
+ help
+ Say Y here if your platform doesn't provide a UART option
+ below. This relies on your platform choosing the right UART
+ definition internally in order for low-level debugging to
+ work.
+
+ config DEBUG_ICEDCC
+ bool "Kernel low-level debugging via EmbeddedICE DCC channel"
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the EmbeddedICE macrocell's DCC channel using
+ co-processor 14. This is known to work on the ARM9 style ICE
+ channel and on the XScale with the PEEDI.
+
+ Note that the system will appear to hang during boot if there
+ is nothing connected to read from the DCC.
+
+ config DEBUG_FOOTBRIDGE_COM1
+ bool "Kernel low-level debugging messages via footbridge 8250 at PCI COM1"
+ depends on FOOTBRIDGE
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the 8250 at PCI COM1.
+
+ config DEBUG_DC21285_PORT
+ bool "Kernel low-level debugging messages via footbridge serial port"
+ depends on FOOTBRIDGE
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port in the DC21285 (Footbridge).
+
+ config DEBUG_CLPS711X_UART1
+ bool "Kernel low-level debugging messages via UART1"
+ depends on ARCH_CLPS711X
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the first serial port on these devices.
+
+ config DEBUG_CLPS711X_UART2
+ bool "Kernel low-level debugging messages via UART2"
+ depends on ARCH_CLPS711X
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the second serial port on these devices.
+
+endchoice
+
config EARLY_PRINTK
bool "Early printk"
depends on DEBUG_LL
@@ -80,43 +138,14 @@ config EARLY_PRINTK
kernel low-level debugging functions. Add earlyprintk to your
kernel parameters to enable this console.
-config DEBUG_ICEDCC
- bool "Kernel low-level debugging via EmbeddedICE DCC channel"
- depends on DEBUG_LL
- help
- Say Y here if you want the debug print routines to direct their
- output to the EmbeddedICE macrocell's DCC channel using
- co-processor 14. This is known to work on the ARM9 style ICE
- channel and on the XScale with the PEEDI.
-
- It does include a timeout to ensure that the system does not
- totally freeze when there is nothing connected to read.
-
config OC_ETM
bool "On-chip ETM and ETB"
- select ARM_AMBA
+ depends on ARM_AMBA
help
Enables the on-chip embedded trace macrocell and embedded trace
buffer driver that will allow you to collect traces of the
kernel code.
-config DEBUG_DC21285_PORT
- bool "Kernel low-level debugging messages via footbridge serial port"
- depends on DEBUG_LL && FOOTBRIDGE
- help
- Say Y here if you want the debug print routines to direct their
- output to the serial port in the DC21285 (Footbridge). Saying N
- will cause the debug messages to appear on the first 16550
- serial port.
-
-config DEBUG_CLPS711X_UART2
- bool "Kernel low-level debugging messages via UART2"
- depends on DEBUG_LL && ARCH_CLPS711X
- help
- Say Y here if you want the debug print routines to direct their
- output to the second serial port on these devices. Saying N will
- cause the debug messages to appear on the first serial port.
-
config DEBUG_S3C_UART
depends on PLAT_SAMSUNG
int "S3C UART to use for low-level debug"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 70c424eaf7b..5665c2a3b65 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -128,6 +128,9 @@ textofs-$(CONFIG_PM_H1940) := 0x00108000
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
endif
+textofs-$(CONFIG_ARCH_MSM7X30) := 0x00208000
+textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
+textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
# Machine directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index a1edfd5a129..176062ac7f0 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -78,7 +78,16 @@ endif
$(obj)/uImage: STARTADDR=$(LOADADDR)
+check_for_multiple_loadaddr = \
+if [ $(words $(LOADADDR)) -gt 1 ]; then \
+ echo 'multiple load addresses: $(LOADADDR)'; \
+ echo 'This is incompatible with uImages'; \
+ echo 'Specify LOADADDR on the commandline to build an uImage'; \
+ false; \
+fi
+
$(obj)/uImage: $(obj)/zImage FORCE
+ @$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
@echo ' Image $@ is ready'
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 0c74a6fab95..a6b30b35ca6 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -139,8 +139,16 @@ bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
( echo "following symbols must have non local/private scope:" >&2; \
echo "$$bad_syms" >&2; rm -f $@; false )
+check_for_multiple_zreladdr = \
+if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
+ echo 'multiple zreladdrs: $(ZRELADDR)'; \
+ echo 'This needs CONFIG_AUTO_ZRELADDR to be set'; \
+ false; \
+fi
+
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
+ @$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 3227ca952a1..bdbb3f74f0f 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -33,7 +33,7 @@
#include <asm/mach/irq.h>
#include <asm/hardware/gic.h>
-static DEFINE_SPINLOCK(irq_controller_lock);
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
/* Address of GIC 0 CPU interface */
void __iomem *gic_cpu_base_addr __read_mostly;
@@ -82,30 +82,30 @@ static void gic_mask_irq(struct irq_data *d)
{
u32 mask = 1 << (d->irq % 32);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
}
static void gic_unmask_irq(struct irq_data *d)
{
u32 mask = 1 << (d->irq % 32);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
}
static void gic_eoi_irq(struct irq_data *d)
{
if (gic_arch_extn.irq_eoi) {
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
gic_arch_extn.irq_eoi(d);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
}
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
@@ -129,7 +129,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
@@ -154,7 +154,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
if (enabled)
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
return 0;
}
@@ -180,12 +180,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return -EINVAL;
mask = 0xff << shift;
- bit = 1 << (cpu + shift);
+ bit = 1 << (cpu_logical_map(cpu) + shift);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
return IRQ_SET_MASK_OK;
}
@@ -215,9 +215,9 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
chained_irq_enter(chip, desc);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
gic_irq = (status & 0x3ff);
if (gic_irq == 1023)
@@ -259,9 +259,15 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
unsigned int irq_start)
{
unsigned int gic_irqs, irq_limit, i;
+ u32 cpumask;
void __iomem *base = gic->dist_base;
- u32 cpumask = 1 << smp_processor_id();
+ u32 cpu = 0;
+#ifdef CONFIG_SMP
+ cpu = cpu_logical_map(smp_processor_id());
+#endif
+
+ cpumask = 1 << cpu;
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
@@ -382,7 +388,12 @@ void __cpuinit gic_enable_ppi(unsigned int irq)
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
- unsigned long map = *cpus_addr(*mask);
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 97912fa4878..7129cfbdacd 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1546,7 +1546,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
/* Start the next */
case PL330_OP_START:
- if (!_start(thrd))
+ if (!_thrd_active(thrd) && !_start(thrd))
ret = -EIO;
break;
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7aa4262ada7..01f18a421b1 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base)
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
- writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
}
@@ -347,7 +346,8 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
/* Identify which VIC cell this one is, by reading the ID */
for (i = 0; i < 4; i++) {
- u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ void __iomem *addr;
+ addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
cellid |= (readl(addr) & 0xff) << (8 * i);
}
vendor = (cellid >> 12) & 0xff;
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 7196ade07e2..1103f62a196 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,5 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_TINY_RCU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -8,20 +9,29 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
+CONFIG_ARCH_INTEGRATOR_CP=y
CONFIG_CPU_ARM720T=y
CONFIG_CPU_ARM920T=y
+CONFIG_CPU_ARM922T=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_ARM1020=y
+CONFIG_CPU_ARM1022=y
+CONFIG_CPU_ARM1026=y
CONFIG_PCI=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
CONFIG_LEDS=y
CONFIG_LEDS_CPU=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp mem=32M"
+CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -32,7 +42,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
CONFIG_MTD_CHAR=y
@@ -40,6 +49,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -56,6 +66,8 @@ CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL030=y
CONFIG_EXT2_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
@@ -68,4 +80,3 @@ CONFIG_NFSD_V3=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 6550db3aa5c..960abceb8e1 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,3 +1,20 @@
include include/asm-generic/Kbuild.asm
header-y += hwcap.h
+
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += sizes.h
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
deleted file mode 100644
index c0536f6b29a..00000000000
--- a/arch/arm/include/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMARM_AUXVEC_H
-#define __ASMARM_AUXVEC_H
-
-#endif
diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b..00000000000
--- a/arch/arm/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 4d88425a416..9abe7a07d5a 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -3,21 +3,58 @@
#ifdef CONFIG_BUG
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-extern void __bug(const char *file, int line) __attribute__((noreturn));
-
-/* give file/line information */
-#define BUG() __bug(__FILE__, __LINE__)
+/*
+ * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
+ * We need to be careful not to conflict with those used by other modules and
+ * the register_undef_hook() system.
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+#define BUG_INSTR_VALUE 0xde02
+#define BUG_INSTR_TYPE ".hword "
#else
+#define BUG_INSTR_VALUE 0xe7f001f2
+#define BUG_INSTR_TYPE ".word "
+#endif
-/* this just causes an oops */
-#define BUG() do { *(int *)0 = 0; } while (1)
-#endif
+#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE)
+#define _BUG(file, line, value) __BUG(file, line, value)
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+/*
+ * The extra indirection is to ensure that the __FILE__ string comes through
+ * OK. Many version of gcc do not support the asm %c parameter which would be
+ * preferable to this unpleasantness. We use mergeable string sections to
+ * avoid multiple copies of the string appearing in the kernel image.
+ */
+
+#define __BUG(__file, __line, __value) \
+do { \
+ BUILD_BUG_ON(sizeof(struct bug_entry) != 12); \
+ asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
+ "2:\t.asciz " #__file "\n" \
+ ".popsection\n" \
+ ".pushsection __bug_table,\"a\"\n" \
+ "3:\t.word 1b, 2b\n" \
+ "\t.hword " #__line ", 0\n" \
+ ".popsection"); \
+ unreachable(); \
+} while (0)
+
+#else /* not CONFIG_DEBUG_BUGVERBOSE */
+
+#define __BUG(__file, __line, __value) \
+do { \
+ asm volatile(BUG_INSTR_TYPE #__value); \
+ unreachable(); \
+} while (0)
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define HAVE_ARCH_BUG
-#endif
+#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index c023db09fcc..7ea78144ae2 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -7,6 +7,7 @@
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
#define CACHEID_ASID_TAGGED (1 << 3)
#define CACHEID_VIPT_I_ALIASING (1 << 4)
+#define CACHEID_PIPT (1 << 5)
extern unsigned int cacheid;
@@ -16,6 +17,7 @@ extern unsigned int cacheid;
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
+#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
@@ -26,7 +28,8 @@ extern unsigned int cacheid;
#if __LINUX_ARM_ARCH__ >= 7
#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
CACHEID_ASID_TAGGED |\
- CACHEID_VIPT_I_ALIASING)
+ CACHEID_VIPT_I_ALIASING |\
+ CACHEID_PIPT)
#elif __LINUX_ARM_ARCH__ >= 6
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
#else
diff --git a/arch/arm/include/asm/cputime.h b/arch/arm/include/asm/cputime.h
deleted file mode 100644
index 3a8002a5fec..00000000000
--- a/arch/arm/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARM_CPUTIME_H
-#define __ARM_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ARM_CPUTIME_H */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cd4458f6417..cb47d28cbe1 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,6 +8,7 @@
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
+#define CPUID_MPIDR 5
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
@@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
return read_cpuid(CPUID_TCM);
}
+static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
+{
+ return read_cpuid(CPUID_MPIDR);
+}
+
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 9f390ce335c..6615f03f56a 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
+#ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+#endif
};
struct pdev_archdata {
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7a21d0bf713..28b7ee8d739 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -32,7 +32,7 @@ static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
- return (void *)__bus_to_virt(addr);
+ return (void *)__bus_to_virt((unsigned long)addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 628670e9d7c..69a5b0b6455 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -34,18 +34,18 @@
#define DMA_MODE_CASCADE 0xc0
#define DMA_AUTOINIT 0x10
-extern spinlock_t dma_spin_lock;
+extern raw_spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
+ raw_spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
+ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* Clear the 'DMA Pointer Flip Flop'.
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
index 29f2610efc7..eaea14676d5 100644
--- a/arch/arm/include/asm/ecard.h
+++ b/arch/arm/include/asm/ecard.h
@@ -161,7 +161,6 @@ struct expansion_card {
/* Private internal data */
const char *card_desc; /* Card description */
- CONST unsigned int podaddr; /* Base Linux address for card */
CONST loader_t loader; /* loader program */
u64 dma_mask;
};
diff --git a/arch/arm/include/asm/emergency-restart.h b/arch/arm/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42..00000000000
--- a/arch/arm/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/arm/include/asm/errno.h b/arch/arm/include/asm/errno.h
deleted file mode 100644
index 6e60f0612bb..00000000000
--- a/arch/arm/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ARM_ERRNO_H
-#define _ARM_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
new file mode 100644
index 00000000000..5abaf5bbd98
--- /dev/null
+++ b/arch/arm/include/asm/exception.h
@@ -0,0 +1,19 @@
+/*
+ * Annotations for marking C functions as exception handlers.
+ *
+ * These should only be used for C functions that are called from the low
+ * level exception entry code and not any intervening C code.
+ */
+#ifndef __ASM_ARM_EXCEPTION_H
+#define __ASM_ARM_EXCEPTION_H
+
+#include <linux/ftrace.h>
+
+#define __exception __attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry __irq_entry
+#else
+#define __exception_irq_entry __exception
+#endif
+
+#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 99a6ed7e1bf..434edccdf7f 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -52,6 +52,8 @@
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08
+#define L2X0_ADDR_FILTER_START 0xC00
+#define L2X0_ADDR_FILTER_END 0xC04
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
@@ -65,8 +67,23 @@
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
+#define L2X0_CACHE_ID_RTL_MASK 0x3f
+#define L2X0_CACHE_ID_RTL_R0P0 0x0
+#define L2X0_CACHE_ID_RTL_R1P0 0x2
+#define L2X0_CACHE_ID_RTL_R2P0 0x4
+#define L2X0_CACHE_ID_RTL_R3P0 0x5
+#define L2X0_CACHE_ID_RTL_R3P1 0x6
+#define L2X0_CACHE_ID_RTL_R3P2 0x8
#define L2X0_AUX_CTRL_MASK 0xc0000fff
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
+#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
+#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
@@ -77,8 +94,33 @@
#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
+#define L2X0_LATENCY_CTRL_RD_SHIFT 4
+#define L2X0_LATENCY_CTRL_WR_SHIFT 8
+
+#define L2X0_ADDR_FILTER_EN 1
+
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
+
+struct l2x0_regs {
+ unsigned long phy_base;
+ unsigned long aux_ctrl;
+ /*
+ * Whether the following registers need to be saved/restored
+ * depends on platform
+ */
+ unsigned long tag_latency;
+ unsigned long data_latency;
+ unsigned long filter_start;
+ unsigned long filter_end;
+ unsigned long prefetch_ctrl;
+ unsigned long pwr_ctrl;
+};
+
+extern struct l2x0_regs l2x0_saved_regs;
+
#endif
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d66605dea55..065d100fa63 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -80,6 +80,7 @@ extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
/*
@@ -110,6 +111,27 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#include <mach/io.h>
/*
+ * This is the limit of PC card/PCI/ISA IO space, which is by default
+ * 64K if we have PC card, PCI or ISA support. Otherwise, default to
+ * zero to prevent ISA/PCI drivers claiming IO space (and potentially
+ * oopsing.)
+ *
+ * Only set this larger if you really need inb() et.al. to operate over
+ * a larger address space. Note that SOC_COMMON ioremaps each sockets
+ * IO space area, and so inb() et.al. must be defined to operate as per
+ * readb() et.al. on such platforms.
+ */
+#ifndef IO_SPACE_LIMIT
+#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
+#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
+#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
+#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#else
+#define IO_SPACE_LIMIT ((resource_size_t)0)
+#endif
+#endif
+
+/*
* IO port access primitives
* -------------------------
*
@@ -189,11 +211,11 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* IO port primitives for more information.
*/
#ifdef __mem_pci
-#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; })
-#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
- __raw_readw(__mem_pci(c))); __v; })
-#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
- __raw_readl(__mem_pci(c))); __v; })
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(__mem_pci(c)); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(__mem_pci(c))); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(__mem_pci(c))); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
@@ -238,7 +260,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * Documentation/IO-mapping.txt.
+ * Documentation/io-mapping.txt.
*
*/
#ifndef __arch_ioremap
@@ -260,10 +282,16 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
+#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); })
+
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
diff --git a/arch/arm/include/asm/ioctl.h b/arch/arm/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/arm/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/arm/include/asm/irq_regs.h b/arch/arm/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b7027..00000000000
--- a/arch/arm/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/arm/include/asm/kdebug.h b/arch/arm/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b03766..00000000000
--- a/arch/arm/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/arm/include/asm/local.h b/arch/arm/include/asm/local.h
deleted file mode 100644
index c11c530f74d..00000000000
--- a/arch/arm/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc23..00000000000
--- a/arch/arm/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 080d74f8128..6fd955d34c6 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -10,6 +10,8 @@
#ifndef __ASM_ARM_LOCALTIMER_H
#define __ASM_ARM_LOCALTIMER_H
+#include <linux/errno.h>
+
struct clock_event_device;
/*
@@ -22,6 +24,10 @@ void percpu_timer_setup(void);
*/
asmlinkage void do_local_timer(struct pt_regs *);
+/*
+ * Called from C code
+ */
+void handle_local_timer(struct pt_regs *);
#ifdef CONFIG_LOCAL_TIMERS
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 217aa1911dd..c5699987fa9 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -34,8 +34,7 @@ struct machine_desc {
unsigned int reserve_lp1 :1; /* never has lp1 */
unsigned int reserve_lp2 :1; /* never has lp2 */
unsigned int soft_reboot :1; /* soft reboot */
- void (*fixup)(struct machine_desc *,
- struct tag *, char **,
+ void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index b8de516e600..441fc4fe826 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -160,7 +160,6 @@
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
-#define __PV_BITS_23_16 0x00810000
extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
@@ -178,9 +177,6 @@ static inline unsigned long __virt_to_phys(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "add", __PV_BITS_31_24);
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- __pv_stub(t, t, "add", __PV_BITS_23_16);
-#endif
return t;
}
@@ -188,9 +184,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- __pv_stub(t, t, "sub", __PV_BITS_23_16);
-#endif
return t;
}
#else
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b4ffe9d5b52..14965658a92 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,7 +6,7 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
unsigned int id;
- spinlock_t id_lock;
+ raw_spinlock_t id_lock;
#endif
unsigned int kvm_seq;
} mm_context_t;
@@ -16,7 +16,7 @@ typedef struct {
/* init_mm.context.id_lock should be initialized. */
#define INIT_MM_CONTEXT(name) \
- .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 543b44916d2..6c6809f982f 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -31,11 +31,7 @@ struct mod_arch_specific {
/* Add __virt_to_phys patching state as well */
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
-#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
-#else
#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
-#endif
#else
#define MODULE_ARCH_VERMAGIC_P2V ""
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index d8387437ec5..53426c66352 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -34,6 +34,7 @@ struct outer_cache_fns {
void (*sync)(void);
#endif
void (*set_debug)(unsigned long);
+ void (*resume)(void);
};
#ifdef CONFIG_OUTER_CACHE
@@ -74,6 +75,12 @@ static inline void outer_disable(void)
outer_cache.disable();
}
+static inline void outer_resume(void)
+{
+ if (outer_cache.resume)
+ outer_cache.resume();
+}
+
#else
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ac75d084888..ca94653f1ec 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,47 +151,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
-typedef unsigned long pteval_t;
-
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { pteval_t pte; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
-typedef struct { unsigned long pgd[2]; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-
-#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((x).pmd)
-#define pgd_val(x) ((x).pgd[0])
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#else
-/*
- * .. while these make it easier on the compiler
- */
-typedef pteval_t pte_t;
-typedef unsigned long pmd_t;
-typedef unsigned long pgd_t[2];
-typedef unsigned long pgprot_t;
-
-#define pte_val(x) (x)
-#define pmd_val(x) (x)
-#define pgd_val(x) ((x)[0])
-#define pgprot_val(x) (x)
-
-#define __pte(x) (x)
-#define __pmd(x) (x)
-#define __pgprot(x) (x)
-
-#endif /* STRICT_MM_TYPECHECKS */
+#include <asm/pgtable-2level-types.h>
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
deleted file mode 100644
index b4e32d8ec07..00000000000
--- a/arch/arm/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARM_PERCPU
-#define __ARM_PERCPU
-
-#include <asm-generic/percpu.h>
-
-#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 22de005f159..3e08fd3fbb6 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -105,9 +105,9 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
}
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
- unsigned long prot)
+ pmdval_t prot)
{
- unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot;
+ pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
new file mode 100644
index 00000000000..5cfba15cb40
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -0,0 +1,93 @@
+/*
+ * arch/arm/include/asm/pgtable-2level-hwdef.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H
+#define _ASM_PGTABLE_2LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PMD)
+ * - common
+ */
+#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
+#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
+#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
+/*
+ * - section
+ */
+#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */
+#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */
+#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */
+#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
+#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
+#define PMD_SECT_AF (_AT(pmdval_t, 0))
+
+#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
+#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
+#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
+#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
+#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
+
+/*
+ * - coarse table (not used)
+ */
+
+/*
+ * + Level 2 descriptor (PTE)
+ * - common
+ */
+#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0)
+#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0)
+#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */
+#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2)
+#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3)
+
+/*
+ * - extended small page/tiny page
+ */
+#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
+#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
+#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
+#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
+#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4)
+#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
+#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
+#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
+#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */
+#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */
+#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */
+#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */
+#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */
+
+/*
+ * - small page
+ */
+#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4)
+#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4)
+#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4)
+#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4)
+#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4)
+
+#define PHYS_MASK (~0UL)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h
new file mode 100644
index 00000000000..66cb5b0e89c
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level-types.h
@@ -0,0 +1,67 @@
+/*
+ * arch/arm/include/asm/pgtable-2level-types.h
+ *
+ * Copyright (C) 1995-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H
+#define _ASM_PGTABLE_2LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u32 pteval_t;
+typedef u32 pmdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pmdval_t pgd[2]; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd[0])
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pmdval_t pgd_t[2];
+typedef pteval_t pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) ((x)[0])
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
new file mode 100644
index 00000000000..470457e1cfc
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -0,0 +1,143 @@
+/*
+ * arch/arm/include/asm/pgtable-2level.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_H
+#define _ASM_PGTABLE_2LEVEL_H
+
+/*
+ * Hardware-wise, we have a two level page table structure, where the first
+ * level has 4096 entries, and the second level has 256 entries. Each entry
+ * is one 32-bit word. Most of the bits in the second level entry are used
+ * by hardware, and there aren't any "accessed" and "dirty" bits.
+ *
+ * Linux on the other hand has a three level page table structure, which can
+ * be wrapped to fit a two level page table structure easily - using the PGD
+ * and PTE only. However, Linux also expects one "PTE" table per page, and
+ * at least a "dirty" bit.
+ *
+ * Therefore, we tweak the implementation slightly - we tell Linux that we
+ * have 2048 entries in the first level, each of which is 8 bytes (iow, two
+ * hardware pointers to the second level.) The second level contains two
+ * hardware PTE tables arranged contiguously, preceded by Linux versions
+ * which contain the state information Linux needs. We, therefore, end up
+ * with 512 entries in the "PTE" level.
+ *
+ * This leads to the page tables having the following layout:
+ *
+ * pgd pte
+ * | |
+ * +--------+
+ * | | +------------+ +0
+ * +- - - - + | Linux pt 0 |
+ * | | +------------+ +1024
+ * +--------+ +0 | Linux pt 1 |
+ * | |-----> +------------+ +2048
+ * +- - - - + +4 | h/w pt 0 |
+ * | |-----> +------------+ +3072
+ * +--------+ +8 | h/w pt 1 |
+ * | | +------------+ +4096
+ *
+ * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
+ * PTE_xxx for definitions of bits appearing in the "h/w pt".
+ *
+ * PMD_xxx definitions refer to bits in the first level page table.
+ *
+ * The "dirty" bit is emulated by only granting hardware write permission
+ * iff the page is marked "writable" and "dirty" in the Linux PTE. This
+ * means that a write to a clean page will cause a permission fault, and
+ * the Linux MM layer will mark the page dirty via handle_pte_fault().
+ * For the hardware to notice the permission change, the TLB entry must
+ * be flushed, and ptep_set_access_flags() does that for us.
+ *
+ * The "accessed" or "young" bit is emulated by a similar method; we only
+ * allow accesses to the page if the "young" bit is set. Accesses to the
+ * page will cause a fault, and handle_pte_fault() will set the young bit
+ * for us as long as the page is marked present in the corresponding Linux
+ * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
+ *
+ * However, when the "young" bit is cleared, we deny access to the page
+ * by clearing the hardware PTE. Currently Linux does not flush the TLB
+ * for us in this case, which means the TLB will retain the transation
+ * until either the TLB entry is evicted under pressure, or a context
+ * switch which changes the user space mapping occurs.
+ */
+#define PTRS_PER_PTE 512
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 2048
+
+#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
+#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
+#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PMD_SHIFT 21
+#define PGDIR_SHIFT 21
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 20
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE-1))
+
+/*
+ * ARMv6 supersection address mask and size definitions.
+ */
+#define SUPERSECTION_SHIFT 24
+#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
+#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * "Linux" PTE definitions.
+ *
+ * We keep two sets of PTEs - the hardware and the linux version.
+ * This allows greater flexibility in the way we map the Linux bits
+ * onto the hardware tables, and allows us to have YOUNG and DIRTY
+ * bits.
+ *
+ * The PTE table pointer refers to the hardware entries; the "Linux"
+ * entries are stored 1024 bytes below.
+ */
+#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
+#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
+#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
+#define L_PTE_USER (_AT(pteval_t, 1) << 8)
+#define L_PTE_XN (_AT(pteval_t, 1) << 9)
+#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
+
+/*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ */
+#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
+#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
+#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
+#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
+#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
+#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
+#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
+#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
+#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
+#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+
+#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
index fd1521d5cb9..183111164ce 100644
--- a/arch/arm/include/asm/pgtable-hwdef.h
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -10,81 +10,6 @@
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
-/*
- * Hardware page table definitions.
- *
- * + Level 1 descriptor (PMD)
- * - common
- */
-#define PMD_TYPE_MASK (3 << 0)
-#define PMD_TYPE_FAULT (0 << 0)
-#define PMD_TYPE_TABLE (1 << 0)
-#define PMD_TYPE_SECT (2 << 0)
-#define PMD_BIT4 (1 << 4)
-#define PMD_DOMAIN(x) ((x) << 5)
-#define PMD_PROTECTION (1 << 9) /* v5 */
-/*
- * - section
- */
-#define PMD_SECT_BUFFERABLE (1 << 2)
-#define PMD_SECT_CACHEABLE (1 << 3)
-#define PMD_SECT_XN (1 << 4) /* v6 */
-#define PMD_SECT_AP_WRITE (1 << 10)
-#define PMD_SECT_AP_READ (1 << 11)
-#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
-#define PMD_SECT_APX (1 << 15) /* v6 */
-#define PMD_SECT_S (1 << 16) /* v6 */
-#define PMD_SECT_nG (1 << 17) /* v6 */
-#define PMD_SECT_SUPER (1 << 18) /* v6 */
-
-#define PMD_SECT_UNCACHED (0)
-#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
-#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
-#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
-#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
-#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
-#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
-
-/*
- * - coarse table (not used)
- */
-
-/*
- * + Level 2 descriptor (PTE)
- * - common
- */
-#define PTE_TYPE_MASK (3 << 0)
-#define PTE_TYPE_FAULT (0 << 0)
-#define PTE_TYPE_LARGE (1 << 0)
-#define PTE_TYPE_SMALL (2 << 0)
-#define PTE_TYPE_EXT (3 << 0) /* v5 */
-#define PTE_BUFFERABLE (1 << 2)
-#define PTE_CACHEABLE (1 << 3)
-
-/*
- * - extended small page/tiny page
- */
-#define PTE_EXT_XN (1 << 0) /* v6 */
-#define PTE_EXT_AP_MASK (3 << 4)
-#define PTE_EXT_AP0 (1 << 4)
-#define PTE_EXT_AP1 (2 << 4)
-#define PTE_EXT_AP_UNO_SRO (0 << 4)
-#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
-#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
-#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
-#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
-#define PTE_EXT_APX (1 << 9) /* v6 */
-#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */
-#define PTE_EXT_SHARED (1 << 10) /* v6 */
-#define PTE_EXT_NG (1 << 11) /* v6 */
-
-/*
- * - small page
- */
-#define PTE_SMALL_AP_MASK (0xff << 4)
-#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
-#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
-#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
-#define PTE_SMALL_AP_URW_SRW (0xff << 4)
+#include <asm/pgtable-2level-hwdef.h>
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5750704e027..8ade1840c6f 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,8 @@
#include <mach/vmalloc.h>
#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable-2level.h>
+
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
@@ -41,79 +43,6 @@
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#endif
-/*
- * Hardware-wise, we have a two level page table structure, where the first
- * level has 4096 entries, and the second level has 256 entries. Each entry
- * is one 32-bit word. Most of the bits in the second level entry are used
- * by hardware, and there aren't any "accessed" and "dirty" bits.
- *
- * Linux on the other hand has a three level page table structure, which can
- * be wrapped to fit a two level page table structure easily - using the PGD
- * and PTE only. However, Linux also expects one "PTE" table per page, and
- * at least a "dirty" bit.
- *
- * Therefore, we tweak the implementation slightly - we tell Linux that we
- * have 2048 entries in the first level, each of which is 8 bytes (iow, two
- * hardware pointers to the second level.) The second level contains two
- * hardware PTE tables arranged contiguously, preceded by Linux versions
- * which contain the state information Linux needs. We, therefore, end up
- * with 512 entries in the "PTE" level.
- *
- * This leads to the page tables having the following layout:
- *
- * pgd pte
- * | |
- * +--------+
- * | | +------------+ +0
- * +- - - - + | Linux pt 0 |
- * | | +------------+ +1024
- * +--------+ +0 | Linux pt 1 |
- * | |-----> +------------+ +2048
- * +- - - - + +4 | h/w pt 0 |
- * | |-----> +------------+ +3072
- * +--------+ +8 | h/w pt 1 |
- * | | +------------+ +4096
- *
- * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
- * PTE_xxx for definitions of bits appearing in the "h/w pt".
- *
- * PMD_xxx definitions refer to bits in the first level page table.
- *
- * The "dirty" bit is emulated by only granting hardware write permission
- * iff the page is marked "writable" and "dirty" in the Linux PTE. This
- * means that a write to a clean page will cause a permission fault, and
- * the Linux MM layer will mark the page dirty via handle_pte_fault().
- * For the hardware to notice the permission change, the TLB entry must
- * be flushed, and ptep_set_access_flags() does that for us.
- *
- * The "accessed" or "young" bit is emulated by a similar method; we only
- * allow accesses to the page if the "young" bit is set. Accesses to the
- * page will cause a fault, and handle_pte_fault() will set the young bit
- * for us as long as the page is marked present in the corresponding Linux
- * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
- * up to date.
- *
- * However, when the "young" bit is cleared, we deny access to the page
- * by clearing the hardware PTE. Currently Linux does not flush the TLB
- * for us in this case, which means the TLB will retain the transation
- * until either the TLB entry is evicted under pressure, or a context
- * switch which changes the user space mapping occurs.
- */
-#define PTRS_PER_PTE 512
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD 2048
-
-#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
-#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
-#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
-
-/*
- * PMD_SHIFT determines the size of the area a second-level page table can map
- * PGDIR_SHIFT determines what a third-level page table entry can map
- */
-#define PMD_SHIFT 21
-#define PGDIR_SHIFT 21
-
#define LIBRARY_TEXT_START 0x0c000000
#ifndef __ASSEMBLY__
@@ -124,12 +53,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
-#endif /* !__ASSEMBLY__ */
-
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* This is the lowest virtual address we can permit any user space
@@ -138,60 +61,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
*/
#define FIRST_USER_ADDRESS PAGE_SIZE
-#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-
-/*
- * section address mask and size definitions.
- */
-#define SECTION_SHIFT 20
-#define SECTION_SIZE (1UL << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
-
-/*
- * ARMv6 supersection address mask and size definitions.
- */
-#define SUPERSECTION_SHIFT 24
-#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
-#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
-
-/*
- * "Linux" PTE definitions.
- *
- * We keep two sets of PTEs - the hardware and the linux version.
- * This allows greater flexibility in the way we map the Linux bits
- * onto the hardware tables, and allows us to have YOUNG and DIRTY
- * bits.
- *
- * The PTE table pointer refers to the hardware entries; the "Linux"
- * entries are stored 1024 bytes below.
- */
-#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
-#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
-#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
-#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
-#define L_PTE_USER (_AT(pteval_t, 1) << 8)
-#define L_PTE_XN (_AT(pteval_t, 1) << 9)
-#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
-
-/*
- * These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
- */
-#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
-#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
-#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
-#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
-#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
-#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
-#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
-#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
-#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
-#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
-#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
-
-#ifndef __ASSEMBLY__
-
/*
* The pgprot_* and protection_map entries will be fixed up in runtime
* to include the cachable and bufferable bits based on memory policy,
@@ -327,10 +196,10 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
- return __va(pmd_val(pmd) & PAGE_MASK);
+ return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
@@ -351,7 +220,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
#define pte_unmap(pte) __pte_unmap(pte)
-#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
diff --git a/arch/arm/include/asm/poll.h b/arch/arm/include/asm/poll.h
deleted file mode 100644
index c98509d3149..00000000000
--- a/arch/arm/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/arm/include/asm/resource.h b/arch/arm/include/asm/resource.h
deleted file mode 100644
index 734b581b5b6..00000000000
--- a/arch/arm/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ARM_RESOURCE_H
-#define _ARM_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
deleted file mode 100644
index 2b8c5160388..00000000000
--- a/arch/arm/include/asm/sections.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sections.h>
diff --git a/arch/arm/include/asm/siginfo.h b/arch/arm/include/asm/siginfo.h
deleted file mode 100644
index 5e21852e603..00000000000
--- a/arch/arm/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASMARM_SIGINFO_H
-#define _ASMARM_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
deleted file mode 100644
index 154b89b81d3..00000000000
--- a/arch/arm/include/asm/sizes.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-/* Size definitions
- * Copyright (C) ARM Limited 1998. All rights reserved.
- */
-#include <asm-generic/sizes.h>
-
-#define SZ_48M (SZ_32M + SZ_16M)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index e42d96a45d3..0a17b62538c 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -33,6 +33,11 @@ extern void show_ipi_list(struct seq_file *, int);
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
/*
+ * Called from C code, this handles an IPI.
+ */
+void handle_IPI(int ipinr, struct pt_regs *regs);
+
+/*
* Setup the set of possible CPUs (via set_cpu_possible)
*/
extern void smp_init_cpus(void);
@@ -66,6 +71,12 @@ extern void platform_secondary_init(unsigned int cpu);
extern void platform_smp_prepare_cpus(unsigned int);
/*
+ * Logical CPU mapping.
+ */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+/*
* Initial data for bringing up a secondary CPU.
*/
struct secondary_data {
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 832888d0c20..984014b9264 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -57,18 +57,12 @@
#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/outercache.h>
-#define __exception __attribute__((section(".exception.text")))
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry __exception
-#endif
-
struct thread_info;
struct task_struct;
@@ -97,14 +91,13 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
-extern int cpu_architecture(void);
+extern int __pure cpu_architecture(void);
extern void cpu_init(void);
void arm_machine_restart(char mode, const char *cmd);
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 8077145698f..02b2f820398 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -471,7 +471,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
* these operations. This is typically used when we are removing
* PMD entries.
*/
-static inline void flush_pmd_entry(pmd_t *pmd)
+static inline void flush_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
@@ -487,7 +487,7 @@ static inline void flush_pmd_entry(pmd_t *pmd)
dsb();
}
-static inline void clean_pmd_entry(pmd_t *pmd)
+static inline void clean_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index accbd7cad9b..a7e457ed27c 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -1,6 +1,39 @@
#ifndef _ASM_ARM_TOPOLOGY_H
#define _ASM_ARM_TOPOLOGY_H
+#ifdef CONFIG_ARM_CPU_TOPOLOGY
+
+#include <linux/cpumask.h>
+
+struct cputopo_arm {
+ int thread_id;
+ int core_id;
+ int socket_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+};
+
+extern struct cputopo_arm cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+
+#define mc_capable() (cpu_topology[0].socket_id != -1)
+#define smt_capable() (cpu_topology[0].thread_id != -1)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index f7887dc53c1..68036eece34 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
-obj-$(CONFIG_PM_SLEEP) += sleep.o
+obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
@@ -66,6 +66,7 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
+obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
ifneq ($(CONFIG_ARCH_EBSA110),y)
obj-y += io.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index aeef960ff79..8e3c6f11b0a 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -49,9 +49,6 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
-
-EXPORT_SYMBOL(__backtrace);
-
/* platform dependent support */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 16baba2e436..1429d8989fb 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -20,6 +20,7 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/procinfo.h>
+#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
/*
@@ -92,6 +93,17 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
+#ifdef CONFIG_CACHE_L2X0
+ DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
+ DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
+ DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency));
+ DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency));
+ DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start));
+ DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end));
+ DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl));
+ DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl));
+ BLANK();
+#endif
#ifdef CONFIG_CPU_HAS_ASID
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
BLANK();
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index d6df359408f..c0d9203fc75 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -412,6 +412,9 @@ void pcibios_fixup_bus(struct pci_bus *bus)
printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
}
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_fixup_bus);
+#endif
/*
* Convert from Linux-centric to bus-centric addresses for bridge devices.
@@ -431,6 +434,7 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
region->start = res->start - offset;
region->end = res->end - offset;
}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
void __devinit
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
@@ -447,12 +451,7 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
res->start = region->start + offset;
res->end = region->end + offset;
}
-
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(pcibios_fixup_bus);
-EXPORT_SYMBOL(pcibios_resource_to_bus);
EXPORT_SYMBOL(pcibios_bus_to_resource);
-#endif
/*
* Swizzle the device pin each time we cross a bridge.
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index bcd66e00bdb..0f852d082fc 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -151,6 +151,8 @@ printhex: adr r2, hexbuf
b printascii
ENDPROC(printhex2)
+hexbuf: .space 16
+
.ltorg
ENTRY(printascii)
@@ -175,5 +177,3 @@ ENTRY(printch)
mov r0, #0
b 1b
ENDPROC(printch)
-
-hexbuf: .space 16
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 2c4a185f92c..7b829d9663b 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -23,7 +23,7 @@
#include <asm/mach/dma.h>
-DEFINE_SPINLOCK(dma_spin_lock);
+DEFINE_RAW_SPINLOCK(dma_spin_lock);
EXPORT_SYMBOL(dma_spin_lock);
static dma_t *dma_chan[MAX_DMA_CHANNELS];
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index d16500110ee..4dd0edab6a6 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -237,7 +237,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE));
- src_pgd = pgd_offset(mm, EASI_BASE);
+ src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE);
dst_pgd = pgd_offset(mm, EASI_START);
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
@@ -674,44 +674,37 @@ static int __init ecard_probeirqhw(void)
#define ecard_probeirqhw() (0)
#endif
-#ifndef IO_EC_MEMC8_BASE
-#define IO_EC_MEMC8_BASE 0
-#endif
-
-static unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
+static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
{
- unsigned long address = 0;
+ void __iomem *address = NULL;
int slot = ec->slot_no;
if (ec->slot_no == 8)
- return IO_EC_MEMC8_BASE;
+ return ECARD_MEMC8_BASE;
ectcr &= ~(1 << slot);
switch (type) {
case ECARD_MEMC:
if (slot < 4)
- address = IO_EC_MEMC_BASE + (slot << 12);
+ address = ECARD_MEMC_BASE + (slot << 14);
break;
case ECARD_IOC:
if (slot < 4)
- address = IO_EC_IOC_BASE + (slot << 12);
-#ifdef IO_EC_IOC4_BASE
+ address = ECARD_IOC_BASE + (slot << 14);
else
- address = IO_EC_IOC4_BASE + ((slot - 4) << 12);
-#endif
+ address = ECARD_IOC4_BASE + ((slot - 4) << 14);
if (address)
- address += speed << 17;
+ address += speed << 19;
break;
-#ifdef IO_EC_EASI_BASE
case ECARD_EASI:
- address = IO_EC_EASI_BASE + (slot << 22);
+ address = ECARD_EASI_BASE + (slot << 24);
if (speed == ECARD_FAST)
ectcr |= 1 << slot;
break;
-#endif
+
default:
break;
}
@@ -990,6 +983,7 @@ ecard_probe(int slot, card_type_t type)
ecard_t **ecp;
ecard_t *ec;
struct ex_ecid cid;
+ void __iomem *addr;
int i, rc;
ec = ecard_alloc_card(type, slot);
@@ -999,7 +993,7 @@ ecard_probe(int slot, card_type_t type)
}
rc = -ENODEV;
- if ((ec->podaddr = __ecard_address(ec, type, ECARD_SYNC)) == 0)
+ if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL)
goto nodev;
cid.r_zero = 1;
@@ -1019,7 +1013,7 @@ ecard_probe(int slot, card_type_t type)
ec->cid.fiqmask = cid.r_fiqmask;
ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
ec->fiqaddr =
- ec->irqaddr = (void __iomem *)ioaddr(ec->podaddr);
+ ec->irqaddr = addr;
if (ec->cid.is) {
ec->irqmask = ec->cid.irqmask;
@@ -1048,10 +1042,8 @@ ecard_probe(int slot, card_type_t type)
set_irq_flags(ec->irq, IRQF_VALID);
}
-#ifdef IO_EC_MEMC8_BASE
if (slot == 8)
ec->irq = 11;
-#endif
#ifdef CONFIG_ARCH_RPC
/* On RiscPC, only first two slots have DMA capability */
if (slot < 2)
@@ -1097,9 +1089,7 @@ static int __init ecard_init(void)
ecard_probe(slot, ECARD_IOC);
}
-#ifdef IO_EC_MEMC8_BASE
ecard_probe(8, ECARD_IOC);
-#endif
irqhw = ecard_probeirqhw();
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a87cbf889ff..9ad50c4208a 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -24,6 +24,7 @@
#include <asm/unwind.h>
#include <asm/unistd.h>
#include <asm/tls.h>
+#include <asm/system.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
@@ -262,8 +263,7 @@ __und_svc:
ldr r0, [r4, #-4]
#else
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
- and r9, r0, #0xf800
- cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
+ cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
ldrhhs r9, [r4] @ bottom 16 bits
orrhs r0, r9, r0, lsl #16
#endif
@@ -440,18 +440,46 @@ __und_usr:
#endif
beq call_fpe
@ Thumb instruction
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+/*
+ * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
+ * can never be supported in a single kernel, this code is not applicable at
+ * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
+ * made about .arch directives.
+ */
+#if __LINUX_ARM_ARCH__ < 7
+/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
+#define NEED_CPU_ARCHITECTURE
+ ldr r5, .LCcpu_architecture
+ ldr r5, [r5]
+ cmp r5, #CPU_ARCH_ARMv7
+ blo __und_usr_unknown
+/*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless. Temporarily
+ * override the assembler target arch with the minimum required instead:
+ */
+ .arch armv6t2
+#endif
2:
ARM( ldrht r5, [r4], #2 )
THUMB( ldrht r5, [r4] )
THUMB( add r4, r4, #2 )
- and r0, r5, #0xf800 @ mask bits 111x x... .... ....
- cmp r0, #0xe800 @ 32bit instruction if xx != 0
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_unknown
3: ldrht r0, [r4]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
orr r0, r0, r5, lsl #16
+
+#if __LINUX_ARM_ARCH__ < 7
+/* If the target arch was overridden, change it back: */
+#ifdef CONFIG_CPU_32v6K
+ .arch armv6k
#else
+ .arch armv6
+#endif
+#endif /* __LINUX_ARM_ARCH__ < 7 */
+#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
b __und_usr_unknown
#endif
UNWIND(.fnend )
@@ -578,6 +606,12 @@ call_fpe:
movw_pc lr @ CP#14 (Debug)
movw_pc lr @ CP#15 (Control)
+#ifdef NEED_CPU_ARCHITECTURE
+ .align 2
+.LCcpu_architecture:
+ .word __cpu_architecture
+#endif
+
#ifdef CONFIG_NEON
.align 6
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 742b6108a00..239703dbdf4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -21,6 +21,7 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_DEBUG_LL
#include <mach/debug-macro.S>
@@ -38,11 +39,14 @@
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
+#define PG_DIR_SIZE 0x4000
+#define PMD_ORDER 2
+
.globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
.macro pgtbl, rd, phys
- add \rd, \phys, #TEXT_OFFSET - 0x4000
+ add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
.endm
#ifdef CONFIG_XIP_KERNEL
@@ -148,11 +152,11 @@ __create_page_tables:
pgtbl r4, r8 @ page table address
/*
- * Clear the 16K level 1 swapper page table
+ * Clear the swapper page table
*/
mov r0, r4
mov r3, #0
- add r6, r0, #0x4000
+ add r6, r0, #PG_DIR_SIZE
1: str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
@@ -171,30 +175,30 @@ __create_page_tables:
sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __enable_mmu
add r6, r6, r0 @ phys __enable_mmu_end
- mov r5, r5, lsr #20
- mov r6, r6, lsr #20
+ mov r5, r5, lsr #SECTION_SHIFT
+ mov r6, r6, lsr #SECTION_SHIFT
-1: orr r3, r7, r5, lsl #20 @ flags + kernel base
- str r3, [r4, r5, lsl #2] @ identity mapping
- teq r5, r6
- addne r5, r5, #1 @ next section
- bne 1b
+1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
+ str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
+ cmp r5, r6
+ addlo r5, r5, #1 @ next section
+ blo 1b
/*
* Now setup the pagetables for our kernel direct
* mapped region.
*/
mov r3, pc
- mov r3, r3, lsr #20
- orr r3, r7, r3, lsl #20
- add r0, r4, #(KERNEL_START & 0xff000000) >> 18
- str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!
+ mov r3, r3, lsr #SECTION_SHIFT
+ orr r3, r7, r3, lsl #SECTION_SHIFT
+ add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+ str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
ldr r6, =(KERNEL_END - 1)
- add r0, r0, #4
- add r6, r4, r6, lsr #18
+ add r0, r0, #1 << PMD_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
- add r3, r3, #1 << 20
- strls r3, [r0], #4
+ add r3, r3, #1 << SECTION_SHIFT
+ strls r3, [r0], #1 << PMD_ORDER
bls 1b
#ifdef CONFIG_XIP_KERNEL
@@ -203,11 +207,11 @@ __create_page_tables:
*/
add r3, r8, #TEXT_OFFSET
orr r3, r3, r7
- add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
- str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
+ add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+ str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]!
ldr r6, =(_end - 1)
add r0, r0, #4
- add r6, r4, r6, lsr #18
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
add r3, r3, #1 << 20
strls r3, [r0], #4
@@ -218,12 +222,12 @@ __create_page_tables:
* Then map boot params address in r2 or
* the first 1MB of ram if boot params address is not specified.
*/
- mov r0, r2, lsr #20
- movs r0, r0, lsl #20
+ mov r0, r2, lsr #SECTION_SHIFT
+ movs r0, r0, lsl #SECTION_SHIFT
moveq r0, r8
sub r3, r0, r8
add r3, r3, #PAGE_OFFSET
- add r3, r4, r3, lsr #18
+ add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orr r6, r7, r0
str r6, [r3]
@@ -236,21 +240,21 @@ __create_page_tables:
*/
addruart r7, r3
- mov r3, r3, lsr #20
- mov r3, r3, lsl #2
+ mov r3, r3, lsr #SECTION_SHIFT
+ mov r3, r3, lsl #PMD_ORDER
add r0, r4, r3
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
cmp r3, #0x0800 @ limit to 512MB
movhi r3, #0x0800
add r6, r0, r3
- mov r3, r7, lsr #20
+ mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
- orr r3, r7, r3, lsl #20
+ orr r3, r7, r3, lsl #SECTION_SHIFT
1: str r3, [r0], #4
- add r3, r3, #1 << 20
- teq r0, r6
- bne 1b
+ add r3, r3, #1 << SECTION_SHIFT
+ cmp r0, r6
+ blo 1b
#else /* CONFIG_DEBUG_ICEDCC */
/* we don't need any serial debugging mappings for ICEDCC */
@@ -262,7 +266,7 @@ __create_page_tables:
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
- add r0, r4, #0xff000000 >> 18
+ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
@@ -272,10 +276,10 @@ __create_page_tables:
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
- add r0, r4, #0x02000000 >> 18
+ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
- add r0, r4, #0xd8000000 >> 18
+ add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0]
#endif
#endif
@@ -488,13 +492,8 @@ __fixup_pv_table:
add r5, r5, r3 @ adjust table end address
add r7, r7, r3 @ adjust __pv_phys_offset address
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
-#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
-#else
- mov r6, r3, lsr #16 @ constant for add/sub instructions
- teq r3, r6, lsl #16 @ must be 64kiB aligned
-#endif
THUMB( it ne @ cross section branch )
bne __error
str r6, [r7, #4] @ save to __pv_offset
@@ -510,20 +509,8 @@ ENDPROC(__fixup_pv_table)
.text
__fixup_a_pv_table:
#ifdef CONFIG_THUMB2_KERNEL
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- lsls r0, r6, #24
- lsr r6, #8
- beq 1f
- clz r7, r0
- lsr r0, #24
- lsl r0, r7
- bic r0, 0x0080
- lsrs r7, #1
- orrcs r0, #0x0080
- orr r0, r0, r7, lsl #12
-#endif
-1: lsls r6, #24
- beq 4f
+ lsls r6, #24
+ beq 2f
clz r7, r6
lsr r6, #24
lsl r6, r7
@@ -532,43 +519,25 @@ __fixup_a_pv_table:
orrcs r6, #0x0080
orr r6, r6, r7, lsl #12
orr r6, #0x4000
- b 4f
-2: @ at this point the C flag is always clear
- add r7, r3
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- ldrh ip, [r7]
- tst ip, 0x0400 @ the i bit tells us LS or MS byte
- beq 3f
- cmp r0, #0 @ set C flag, and ...
- biceq ip, 0x0400 @ immediate zero value has a special encoding
- streqh ip, [r7] @ that requires the i bit cleared
-#endif
-3: ldrh ip, [r7, #2]
+ b 2f
+1: add r7, r3
+ ldrh ip, [r7, #2]
and ip, 0x8f00
- orrcc ip, r6 @ mask in offset bits 31-24
- orrcs ip, r0 @ mask in offset bits 23-16
+ orr ip, r6 @ mask in offset bits 31-24
strh ip, [r7, #2]
-4: cmp r4, r5
+2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
- bcc 2b
+ bcc 1b
bx lr
#else
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- and r0, r6, #255 @ offset bits 23-16
- mov r6, r6, lsr #8 @ offset bits 31-24
-#else
- mov r0, #0 @ just in case...
-#endif
- b 3f
-2: ldr ip, [r7, r3]
+ b 2f
+1: ldr ip, [r7, r3]
bic ip, ip, #0x000000ff
- tst ip, #0x400 @ rotate shift tells us LS or MS byte
- orrne ip, ip, r6 @ mask in offset bits 31-24
- orreq ip, ip, r0 @ mask in offset bits 23-16
+ orr ip, ip, r6 @ mask in offset bits 31-24
str ip, [r7, r3]
-3: cmp r4, r5
+2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
- bcc 2b
+ bcc 1b
mov pc, lr
#endif
ENDPROC(__fixup_a_pv_table)
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index de3dcab8610..53919b230e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -35,8 +35,8 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
-#include <linux/ftrace.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd496c3..c1b4463dcc8 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -32,6 +32,24 @@ static atomic_t waiting_for_crash_ipi;
int machine_kexec_prepare(struct kimage *image)
{
+ unsigned long page_list;
+ void *reboot_code_buffer;
+ page_list = image->head & PAGE_MASK;
+
+ reboot_code_buffer = page_address(image->control_code_page);
+
+ /* Prepare parameters for reboot_code_buffer*/
+ kexec_start_address = image->start;
+ kexec_indirection_page = page_list;
+ kexec_mach_type = machine_arch_type;
+ kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
+
+ /* copy our kernel relocation code to the control code page */
+ memcpy(reboot_code_buffer,
+ relocate_new_kernel, relocate_new_kernel_size);
+
+ flush_icache_range((unsigned long) reboot_code_buffer,
+ (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
return 0;
}
@@ -82,31 +100,14 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
- unsigned long page_list;
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
-
- page_list = image->head & PAGE_MASK;
-
/* we need both effective and real address here */
reboot_code_buffer_phys =
page_to_pfn(image->control_code_page) << PAGE_SHIFT;
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
- /* copy our kernel relocation code to the control code page */
- memcpy(reboot_code_buffer,
- relocate_new_kernel, relocate_new_kernel_size);
-
-
- flush_icache_range((unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (kexec_reinit)
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index cc2020c2c70..1e9be5d25e5 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -33,7 +33,7 @@
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
*/
#undef MODULES_VADDR
-#define MODULES_VADDR (((unsigned long)_etext + ~PGDIR_MASK) & PGDIR_MASK)
+#define MODULES_VADDR (((unsigned long)_etext + ~PMD_MASK) & PMD_MASK)
#endif
#ifdef CONFIG_MMU
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4c851834f68..6be3e2e4d83 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1a347f481e5..fd0814076ff 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -319,7 +319,7 @@ void show_regs(struct pt_regs * regs)
printk("\n");
printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
__show_regs(regs);
- __backtrace();
+ dump_stack();
}
ATOMIC_NOTIFIER_HEAD(thread_notify_head);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e514c76043b..3fe93f75b55 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -29,6 +29,8 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
#include <asm/unified.h>
#include <asm/cpu.h>
@@ -42,6 +44,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
+#include <asm/system.h>
#include <asm/prom.h>
#include <asm/mach/arch.h>
@@ -115,6 +118,13 @@ struct outer_cache_fns outer_cache __read_mostly;
EXPORT_SYMBOL(outer_cache);
#endif
+/*
+ * Cached cpu_architecture() result for use by assembler code.
+ * C code should use the cpu_architecture() function instead of accessing this
+ * variable directly.
+ */
+int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
+
struct stack {
u32 irq[3];
u32 abt[3];
@@ -210,7 +220,7 @@ static const char *proc_arch[] = {
"?(17)",
};
-int cpu_architecture(void)
+static int __get_cpu_architecture(void)
{
int cpu_arch;
@@ -243,11 +253,22 @@ int cpu_architecture(void)
return cpu_arch;
}
+int __pure cpu_architecture(void)
+{
+ BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
+
+ return __cpu_architecture;
+}
+
static int cpu_has_aliasing_icache(unsigned int arch)
{
int aliasing_icache;
unsigned int id_reg, num_sets, line_size;
+ /* PIPT caches never alias. */
+ if (icache_is_pipt())
+ return 0;
+
/* arch specifies the register format */
switch (arch) {
case CPU_ARCH_ARMv7:
@@ -282,8 +303,14 @@ static void __init cacheid_init(void)
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
- if ((cachetype & (3 << 14)) == 1 << 14)
+ switch (cachetype & (3 << 14)) {
+ case (1 << 14):
cacheid |= CACHEID_ASID_TAGGED;
+ break;
+ case (3 << 14):
+ cacheid |= CACHEID_PIPT;
+ break;
+ }
} else {
arch = CPU_ARCH_ARMv6;
if (cachetype & (1 << 23))
@@ -300,10 +327,11 @@ static void __init cacheid_init(void)
printk("CPU: %s data cache, %s instruction cache\n",
cache_is_vivt() ? "VIVT" :
cache_is_vipt_aliasing() ? "VIPT aliasing" :
- cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
+ cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
cache_is_vivt() ? "VIVT" :
icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
icache_is_vipt_aliasing() ? "VIPT aliasing" :
+ icache_is_pipt() ? "PIPT" :
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
}
@@ -414,6 +442,7 @@ static void __init setup_processor(void)
}
cpu_name = list->cpu_name;
+ __cpu_architecture = __get_cpu_architecture();
#ifdef MULTI_CPU
processor = *list->proc;
@@ -861,7 +890,7 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
}
if (mdesc->fixup)
- mdesc->fixup(mdesc, tags, &from, &meminfo);
+ mdesc->fixup(tags, &from, &meminfo);
if (tags->hdr.tag == ATAG_CORE) {
if (meminfo.nr_banks != 0)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d88ff0230e8..94f34a6c861 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -16,7 +16,6 @@
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
-#include <linux/ftrace.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
@@ -31,6 +30,8 @@
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -39,6 +40,7 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/localtimer.h>
+#include <asm/smp_plat.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
@@ -259,6 +261,20 @@ void __ref cpu_die(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
+int __cpu_logical_map[NR_CPUS];
+
+void __init smp_setup_processor_id(void)
+{
+ int i;
+ u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
+
+ cpu_logical_map(0) = cpu;
+ for (i = 1; i < NR_CPUS; ++i)
+ cpu_logical_map(i) = i == cpu ? 0 : i;
+
+ printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
+}
+
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
@@ -268,6 +284,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
cpu_info->loops_per_jiffy = loops_per_jiffy;
+
+ store_cpu_topology(cpuid);
}
/*
@@ -301,17 +319,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
*/
platform_secondary_init(cpu);
- /*
- * Enable local interrupts.
- */
notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
-
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
calibrate_delay();
@@ -323,10 +331,23 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
+
+ /*
+ * Setup the percpu timer for this CPU.
+ */
+ percpu_timer_setup();
+
while (!cpu_active(cpu))
cpu_relax();
/*
+ * cpu_active bit is set, so it's safe to enalbe interrupts
+ * now.
+ */
+ local_irq_enable();
+ local_fiq_enable();
+
+ /*
* OK, it's off to the idle thread for us
*/
cpu_idle();
@@ -358,6 +379,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int ncores = num_possible_cpus();
+ init_cpu_topology();
+
smp_store_cpu_info(smp_processor_id());
/*
@@ -460,6 +483,11 @@ static void ipi_timer(void)
#ifdef CONFIG_LOCAL_TIMERS
asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
{
+ handle_local_timer(regs);
+}
+
+void handle_local_timer(struct pt_regs *regs)
+{
struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id();
@@ -538,7 +566,7 @@ static void percpu_timer_stop(void)
}
#endif
-static DEFINE_SPINLOCK(stop_lock);
+static DEFINE_RAW_SPINLOCK(stop_lock);
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
@@ -547,10 +575,10 @@ static void ipi_cpu_stop(unsigned int cpu)
{
if (system_state == SYSTEM_BOOTING ||
system_state == SYSTEM_RUNNING) {
- spin_lock(&stop_lock);
+ raw_spin_lock(&stop_lock);
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
dump_stack();
- spin_unlock(&stop_lock);
+ raw_spin_unlock(&stop_lock);
}
set_cpu_online(cpu, false);
@@ -567,6 +595,11 @@ static void ipi_cpu_stop(unsigned int cpu)
*/
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
{
+ handle_IPI(ipinr, regs);
+}
+
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 7fcddb75c87..8f5dd796335 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -34,7 +34,7 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base)
/*
* Enable the SCU
*/
-void __init scu_enable(void __iomem *scu_base)
+void scu_enable(void __iomem *scu_base)
{
u32 scu_ctrl;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index cb634c3e28e..5a54b95d6bd 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -39,13 +39,11 @@
*/
static struct sys_timer *system_timer;
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \
+ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE)
/* this needs a better home */
DEFINE_SPINLOCK(rtc_lock);
-
-#ifdef CONFIG_RTC_DRV_CMOS_MODULE
EXPORT_SYMBOL(rtc_lock);
-#endif
#endif /* pc-style 'CMOS' RTC support */
/* change this if you have some constant time drift */
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
new file mode 100644
index 00000000000..1040c00405d
--- /dev/null
+++ b/arch/arm/kernel/topology.c
@@ -0,0 +1,148 @@
+/*
+ * arch/arm/kernel/topology.c
+ *
+ * Copyright (C) 2011 Linaro Limited.
+ * Written by: Vincent Guittot
+ *
+ * based on arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/sched.h>
+
+#include <asm/cputype.h>
+#include <asm/topology.h>
+
+#define MPIDR_SMP_BITMASK (0x3 << 30)
+#define MPIDR_SMP_VALUE (0x2 << 30)
+
+#define MPIDR_MT_BITMASK (0x1 << 24)
+
+/*
+ * These masks reflect the current use of the affinity levels.
+ * The affinity level can be up to 16 bits according to ARM ARM
+ */
+
+#define MPIDR_LEVEL0_MASK 0x3
+#define MPIDR_LEVEL0_SHIFT 0
+
+#define MPIDR_LEVEL1_MASK 0xF
+#define MPIDR_LEVEL1_SHIFT 8
+
+#define MPIDR_LEVEL2_MASK 0xFF
+#define MPIDR_LEVEL2_SHIFT 16
+
+struct cputopo_arm cpu_topology[NR_CPUS];
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_topology[cpu].core_sibling;
+}
+
+/*
+ * store_cpu_topology is called at boot when only one cpu is running
+ * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
+ * which prevents simultaneous write access to cpu_topology array
+ */
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
+ unsigned int mpidr;
+ unsigned int cpu;
+
+ /* If the cpu topology has been already set, just return */
+ if (cpuid_topo->core_id != -1)
+ return;
+
+ mpidr = read_cpuid_mpidr();
+
+ /* create cpu topology mapping */
+ if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
+ /*
+ * This is a multiprocessor system
+ * multiprocessor format & multiprocessor mode field are set
+ */
+
+ if (mpidr & MPIDR_MT_BITMASK) {
+ /* core performance interdependency */
+ cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
+ & MPIDR_LEVEL0_MASK;
+ cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
+ & MPIDR_LEVEL1_MASK;
+ cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
+ & MPIDR_LEVEL2_MASK;
+ } else {
+ /* largely independent cores */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
+ & MPIDR_LEVEL0_MASK;
+ cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
+ & MPIDR_LEVEL1_MASK;
+ }
+ } else {
+ /*
+ * This is an uniprocessor system
+ * we are in multiprocessor format but uniprocessor system
+ * or in the old uniprocessor format
+ */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = 0;
+ cpuid_topo->socket_id = -1;
+ }
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->socket_id == cpu_topo->socket_id) {
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu,
+ &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id == cpu_topo->core_id) {
+ cpumask_set_cpu(cpuid,
+ &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu,
+ &cpuid_topo->thread_sibling);
+ }
+ }
+ }
+ smp_wmb();
+
+ printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+ cpuid, cpu_topology[cpuid].thread_id,
+ cpu_topology[cpuid].core_id,
+ cpu_topology[cpuid].socket_id, mpidr);
+}
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void init_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ /* init core mask */
+ for_each_possible_cpu(cpu) {
+ struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->socket_id = -1;
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ }
+ smp_wmb();
+}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc9f9da782c..99a57270250 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -21,12 +21,14 @@
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/kexec.h>
+#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/traps.h>
@@ -255,7 +257,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
return ret;
}
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
@@ -267,9 +269,11 @@ void die(const char *str, struct pt_regs *regs, int err)
oops_enter();
- spin_lock_irq(&die_lock);
+ raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
+ if (!user_mode(regs))
+ report_bug(regs->ARM_pc, regs);
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
@@ -277,7 +281,7 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
+ raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
@@ -301,25 +305,43 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
}
}
+#ifdef CONFIG_GENERIC_BUG
+
+int is_valid_bugaddr(unsigned long pc)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ unsigned short bkpt;
+#else
+ unsigned long bkpt;
+#endif
+
+ if (probe_kernel_address((unsigned *)pc, bkpt))
+ return 0;
+
+ return bkpt == BUG_INSTR_VALUE;
+}
+
+#endif
+
static LIST_HEAD(undef_hook);
-static DEFINE_SPINLOCK(undef_lock);
+static DEFINE_RAW_SPINLOCK(undef_lock);
void register_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
- spin_lock_irqsave(&undef_lock, flags);
+ raw_spin_lock_irqsave(&undef_lock, flags);
list_add(&hook->node, &undef_hook);
- spin_unlock_irqrestore(&undef_lock, flags);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
}
void unregister_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
- spin_lock_irqsave(&undef_lock, flags);
+ raw_spin_lock_irqsave(&undef_lock, flags);
list_del(&hook->node);
- spin_unlock_irqrestore(&undef_lock, flags);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
}
static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
@@ -328,12 +350,12 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
unsigned long flags;
int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
- spin_lock_irqsave(&undef_lock, flags);
+ raw_spin_lock_irqsave(&undef_lock, flags);
list_for_each_entry(hook, &undef_hook, node)
if ((instr & hook->instr_mask) == hook->instr_val &&
(regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
fn = hook->fn;
- spin_unlock_irqrestore(&undef_lock, flags);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
return fn ? fn(regs, instr) : 1;
}
@@ -706,16 +728,6 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
arm_notify_die("unknown data abort code", regs, &info, instr, 0);
}
-void __attribute__((noreturn)) __bug(const char *file, int line)
-{
- printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line);
- *(int *)0 = 0;
-
- /* Avoid "noreturn function does return" */
- for (;;);
-}
-EXPORT_SYMBOL(__bug);
-
void __readwrite_bug(const char *fn)
{
printk("%s called, but not implemented\n", fn);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 4e66f62b8d4..20b3041e086 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,7 +21,8 @@
#define ARM_CPU_KEEP(x)
#endif
-#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index a673297b0cf..cd07b5814c2 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -22,15 +22,10 @@
#define mask r7
#define offset r8
-ENTRY(__backtrace)
- mov r1, #0x10
- mov r0, fp
-
ENTRY(c_backtrace)
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
mov pc, lr
-ENDPROC(__backtrace)
ENDPROC(c_backtrace)
#else
stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
@@ -107,7 +102,6 @@ for_each_frame: tst frame, mask @ Check for address exceptions
mov r1, frame
bl printk
no_frame: ldmfd sp!, {r4 - r8, pc}
-ENDPROC(__backtrace)
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index faa7748142d..e55c4842c29 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <asm/unwind.h>
#ifdef __ARMEB__
#define xh r0
@@ -44,6 +45,7 @@
*/
ENTRY(__do_div64)
+UNWIND(.fnstart)
@ Test for easy paths first.
subs ip, r4, #1
@@ -189,7 +191,12 @@ ENTRY(__do_div64)
moveq yh, xh
moveq xh, #0
moveq pc, lr
+UNWIND(.fnend)
+UNWIND(.fnstart)
+UNWIND(.pad #4)
+UNWIND(.save {lr})
+Ldiv0_64:
@ Division by 0:
str lr, [sp, #-8]!
bl __div0
@@ -200,4 +207,5 @@ ENTRY(__do_div64)
mov xh, #0
ldr pc, [sp], #8
+UNWIND(.fnend)
ENDPROC(__do_div64)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 8b9b13649f8..025f742dd4d 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <asm/current.h>
#include <asm/page.h>
diff --git a/arch/arm/mach-at91/Makefile.boot b/arch/arm/mach-at91/Makefile.boot
index 3462b815054..9ab5a3e5f4f 100644
--- a/arch/arm/mach-at91/Makefile.boot
+++ b/arch/arm/mach-at91/Makefile.boot
@@ -4,15 +4,15 @@
# INITRD_PHYS must be in RAM
ifeq ($(CONFIG_ARCH_AT91CAP9),y)
- zreladdr-y := 0x70008000
+ zreladdr-y += 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70410000
else ifeq ($(CONFIG_ARCH_AT91SAM9G45),y)
- zreladdr-y := 0x70008000
+ zreladdr-y += 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70410000
else
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
initrd_phys-y := 0x20410000
endif
diff --git a/arch/arm/mach-bcmring/Kconfig b/arch/arm/mach-bcmring/Kconfig
index 457b4384913..9170d16dca5 100644
--- a/arch/arm/mach-bcmring/Kconfig
+++ b/arch/arm/mach-bcmring/Kconfig
@@ -17,5 +17,3 @@ config BCM_ZRELADDR
hex "Compressed ZREL ADDR"
endmenu
-
-# source "drivers/char/bcmring/Kconfig"
diff --git a/arch/arm/mach-bcmring/Makefile.boot b/arch/arm/mach-bcmring/Makefile.boot
index fb53b283beb..aef2467757f 100644
--- a/arch/arm/mach-bcmring/Makefile.boot
+++ b/arch/arm/mach-bcmring/Makefile.boot
@@ -1,6 +1,6 @@
# Address where decompressor will be written and eventually executed.
#
# default to SDRAM
-zreladdr-y := $(CONFIG_BCM_ZRELADDR)
+zreladdr-y += $(CONFIG_BCM_ZRELADDR)
params_phys-y := 0x00000800
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index a604b9ebb50..31a143592c8 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -136,8 +136,8 @@ static void __init bcmring_init_machine(void)
*
*****************************************************************************/
-static void __init bcmring_fixup(struct machine_desc *desc,
- struct tag *t, char **cmdline, struct meminfo *mi) {
+static void __init bcmring_fixup(struct tag *t, char **cmdline,
+ struct meminfo *mi) {
#ifdef CONFIG_BLK_DEV_INITRD
printk(KERN_NOTICE "bcmring_fixup\n");
t->hdr.tag = ATAG_CORE;
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c
index c48feaf4e8e..437fa683bcb 100644
--- a/arch/arm/mach-bcmring/irq.c
+++ b/arch/arm/mach-bcmring/irq.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <linux/list.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/io.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-bcmring/timer.c b/arch/arm/mach-bcmring/timer.c
index 2d415d2a8e6..af9c3d7e2a0 100644
--- a/arch/arm/mach-bcmring/timer.c
+++ b/arch/arm/mach-bcmring/timer.c
@@ -12,7 +12,6 @@
* consent.
*****************************************************************************/
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/module.h>
#include <csp/tmrHw.h>
diff --git a/arch/arm/mach-clps711x/Makefile.boot b/arch/arm/mach-clps711x/Makefile.boot
index a51fcef64fe..9398e859b5a 100644
--- a/arch/arm/mach-clps711x/Makefile.boot
+++ b/arch/arm/mach-clps711x/Makefile.boot
@@ -1,5 +1,5 @@
# The standard locations for stuff on CLPS711x type processors
- zreladdr-y := 0xc0028000
+ zreladdr-y += 0xc0028000
params_phys-y := 0xc0000100
# Should probably have some agreement on these...
initrd_phys-$(CONFIG_ARCH_P720T) := 0xc0400000
diff --git a/arch/arm/mach-clps711x/clep7312.c b/arch/arm/mach-clps711x/clep7312.c
index 67b5abb4a60..0a2e74feb24 100644
--- a/arch/arm/mach-clps711x/clep7312.c
+++ b/arch/arm/mach-clps711x/clep7312.c
@@ -26,8 +26,7 @@
#include "common.h"
static void __init
-fixup_clep7312(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = 0xc0000000;
diff --git a/arch/arm/mach-clps711x/edb7211-arch.c b/arch/arm/mach-clps711x/edb7211-arch.c
index 98ca5b2e940..725a7a54ba4 100644
--- a/arch/arm/mach-clps711x/edb7211-arch.c
+++ b/arch/arm/mach-clps711x/edb7211-arch.c
@@ -37,8 +37,7 @@ static void __init edb7211_reserve(void)
}
static void __init
-fixup_edb7211(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi)
{
/*
* Bank start addresses are not present in the information
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index b1cb479e71e..1947b30f9b8 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -57,8 +57,7 @@ typedef struct tag_IMAGE_PARAMS
#define IMAGE_PARAMS_PHYS 0xC01F0000
static void __init
-fortunet_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fortunet_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
{
IMAGE_PARAMS *ip = phys_to_virt(IMAGE_PARAMS_PHYS);
*cmdline = phys_to_virt(ip->command_line);
diff --git a/arch/arm/mach-clps711x/p720t.c b/arch/arm/mach-clps711x/p720t.c
index cefbce0480b..3f796e0d328 100644
--- a/arch/arm/mach-clps711x/p720t.c
+++ b/arch/arm/mach-clps711x/p720t.c
@@ -56,8 +56,7 @@ static struct map_desc p720t_io_desc[] __initdata = {
};
static void __init
-fixup_p720t(struct machine_desc *desc, struct tag *tag,
- char **cmdline, struct meminfo *mi)
+fixup_p720t(struct tag *tag, char **cmdline, struct meminfo *mi)
{
/*
* Our bootloader doesn't setup any tags (yet).
diff --git a/arch/arm/mach-cns3xxx/Makefile.boot b/arch/arm/mach-cns3xxx/Makefile.boot
index 77701286522..d079de0b6e3 100644
--- a/arch/arm/mach-cns3xxx/Makefile.boot
+++ b/arch/arm/mach-cns3xxx/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00C00000
diff --git a/arch/arm/mach-davinci/Makefile.boot b/arch/arm/mach-davinci/Makefile.boot
index db97ef2c647..04a6c4e67b1 100644
--- a/arch/arm/mach-davinci/Makefile.boot
+++ b/arch/arm/mach-davinci/Makefile.boot
@@ -2,12 +2,12 @@ ifeq ($(CONFIG_ARCH_DAVINCI_DA8XX),y)
ifeq ($(CONFIG_ARCH_DAVINCI_DMx),y)
$(error Cannot enable DaVinci and DA8XX platforms concurrently)
else
- zreladdr-y := 0xc0008000
+ zreladdr-y += 0xc0008000
params_phys-y := 0xc0000100
initrd_phys-y := 0xc0800000
endif
else
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
endif
diff --git a/arch/arm/mach-dove/Makefile.boot b/arch/arm/mach-dove/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-dove/Makefile.boot
+++ b/arch/arm/mach-dove/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ebsa110/Makefile.boot b/arch/arm/mach-ebsa110/Makefile.boot
index 23212604493..83cf07c38ad 100644
--- a/arch/arm/mach-ebsa110/Makefile.boot
+++ b/arch/arm/mach-ebsa110/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000400
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ebsa110/include/mach/io.h b/arch/arm/mach-ebsa110/include/mach/io.h
index f68daa632af..44679db672f 100644
--- a/arch/arm/mach-ebsa110/include/mach/io.h
+++ b/arch/arm/mach-ebsa110/include/mach/io.h
@@ -13,8 +13,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffff
-
u8 __inb8(unsigned int port);
void __outb8(u8 val, unsigned int port);
diff --git a/arch/arm/mach-ep93xx/Makefile.boot b/arch/arm/mach-ep93xx/Makefile.boot
index 0ad33f15c62..d3113a71cb4 100644
--- a/arch/arm/mach-ep93xx/Makefile.boot
+++ b/arch/arm/mach-ep93xx/Makefile.boot
@@ -1,14 +1,14 @@
- zreladdr-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00008000
+ zreladdr-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) += 0x00008000
params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00000100
- zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) += 0xc0008000
params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0000100
- zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) += 0xd0008000
params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0000100
- zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) += 0xe0008000
params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0000100
- zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) += 0xf0008000
params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0000100
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index 0c77ab99fa1..fc1f92dfbea 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -12,6 +12,7 @@ if ARCH_EXYNOS4
config CPU_EXYNOS4210
bool
select S3C_PL330_DMA
+ select ARM_CPU_SUSPEND if PM
help
Enable EXYNOS4210 CPU support
diff --git a/arch/arm/mach-exynos4/Makefile.boot b/arch/arm/mach-exynos4/Makefile.boot
index d65956ffb43..b9862e22bf1 100644
--- a/arch/arm/mach-exynos4/Makefile.boot
+++ b/arch/arm/mach-exynos4/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x40008000
+ zreladdr-y += 0x40008000
params_phys-y := 0x40000100
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index df6ef1b2f98..0c90896ad9a 100644
--- a/arch/arm/mach-exynos4/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -193,12 +193,10 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "EXYNOS4: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index c8e7afcf14e..f643ef819da 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -4,8 +4,8 @@ menu "Footbridge Implementations"
config ARCH_CATS
bool "CATS"
- select CLKSRC_I8253
select CLKEVT_I8253
+ select CLKSRC_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA
@@ -61,8 +61,8 @@ config ARCH_EBSA285_HOST
config ARCH_NETWINDER
bool "NetWinder"
- select CLKSRC_I8253
select CLKEVT_I8253
+ select CLKSRC_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA
diff --git a/arch/arm/mach-footbridge/Makefile.boot b/arch/arm/mach-footbridge/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-footbridge/Makefile.boot
+++ b/arch/arm/mach-footbridge/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index 5b1a8db779b..206ff2f39d6 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -76,8 +76,7 @@ __initcall(cats_hw_init);
* hard reboots fail on early boards.
*/
static void __init
-fixup_cats(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi)
{
screen_info.orig_video_lines = 25;
screen_info.orig_video_points = 16;
diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h
index 15d54981674..e3d6ccac216 100644
--- a/arch/arm/mach-footbridge/include/mach/hardware.h
+++ b/arch/arm/mach-footbridge/include/mach/hardware.h
@@ -93,7 +93,7 @@
#define CPLD_FLASH_WR_ENABLE 1
#ifndef __ASSEMBLY__
-extern spinlock_t nw_gpio_lock;
+extern raw_spinlock_t nw_gpio_lock;
extern void nw_gpio_modify_op(unsigned int mask, unsigned int set);
extern void nw_gpio_modify_io(unsigned int mask, unsigned int in);
extern unsigned int nw_gpio_read(void);
diff --git a/arch/arm/mach-footbridge/include/mach/io.h b/arch/arm/mach-footbridge/include/mach/io.h
index 32e4cc397c2..15a70396c27 100644
--- a/arch/arm/mach-footbridge/include/mach/io.h
+++ b/arch/arm/mach-footbridge/include/mach/io.h
@@ -23,8 +23,6 @@
#define PCIO_SIZE 0x00100000
#define PCIO_BASE MMU_IO(0xff000000, 0x7c000000)
-#define IO_SPACE_LIMIT 0xffff
-
/*
* Translation of various region addresses to virtual addresses
*/
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 06e514f372d..0f7aeff486c 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int val)
/*
* This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE
*/
-DEFINE_SPINLOCK(nw_gpio_lock);
+DEFINE_RAW_SPINLOCK(nw_gpio_lock);
EXPORT_SYMBOL(nw_gpio_lock);
static unsigned int current_gpio_op;
@@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void)
/*
* Set Group1/Group2 outputs
*/
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
/*
@@ -390,9 +390,9 @@ static void __init cpld_init(void)
{
unsigned long flags;
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
static unsigned char rwa_unlock[] __initdata =
@@ -616,9 +616,9 @@ static int __init nw_hw_init(void)
cpld_init();
rwa010_init();
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
return 0;
}
@@ -631,8 +631,7 @@ __initcall(nw_hw_init);
* the parameter page.
*/
static void __init
-fixup_netwinder(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi)
{
#ifdef CONFIG_ISAPNP
extern int isapnp_disable;
diff --git a/arch/arm/mach-footbridge/netwinder-leds.c b/arch/arm/mach-footbridge/netwinder-leds.c
index 00269fe0be8..e57102e871f 100644
--- a/arch/arm/mach-footbridge/netwinder-leds.c
+++ b/arch/arm/mach-footbridge/netwinder-leds.c
@@ -31,13 +31,13 @@
static char led_state;
static char hw_led_state;
-static DEFINE_SPINLOCK(leds_lock);
+static DEFINE_RAW_SPINLOCK(leds_lock);
static void netwinder_leds_event(led_event_t evt)
{
unsigned long flags;
- spin_lock_irqsave(&leds_lock, flags);
+ raw_spin_lock_irqsave(&leds_lock, flags);
switch (evt) {
case led_start:
@@ -117,12 +117,12 @@ static void netwinder_leds_event(led_event_t evt)
break;
}
- spin_unlock_irqrestore(&leds_lock, flags);
+ raw_spin_unlock_irqrestore(&leds_lock, flags);
if (led_state & LED_STATE_ENABLED) {
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
}
diff --git a/arch/arm/mach-gemini/Makefile.boot b/arch/arm/mach-gemini/Makefile.boot
index 22a52c228d9..683f52b20e3 100644
--- a/arch/arm/mach-gemini/Makefile.boot
+++ b/arch/arm/mach-gemini/Makefile.boot
@@ -1,9 +1,9 @@
ifeq ($(CONFIG_GEMINI_MEM_SWAP),y)
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
else
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
endif
diff --git a/arch/arm/mach-h720x/Makefile.boot b/arch/arm/mach-h720x/Makefile.boot
index 52984017bd9..d875a7094df 100644
--- a/arch/arm/mach-h720x/Makefile.boot
+++ b/arch/arm/mach-h720x/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-$(CONFIG_ARCH_H720X) := 0x40008000
+ zreladdr-$(CONFIG_ARCH_H720X) += 0x40008000
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index ebee18b3884..dbe61201bcd 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -1,19 +1,19 @@
-zreladdr-$(CONFIG_ARCH_MX1) := 0x08008000
+zreladdr-$(CONFIG_ARCH_MX1) += 0x08008000
params_phys-$(CONFIG_ARCH_MX1) := 0x08000100
initrd_phys-$(CONFIG_ARCH_MX1) := 0x08800000
-zreladdr-$(CONFIG_MACH_MX21) := 0xC0008000
+zreladdr-$(CONFIG_MACH_MX21) += 0xC0008000
params_phys-$(CONFIG_MACH_MX21) := 0xC0000100
initrd_phys-$(CONFIG_MACH_MX21) := 0xC0800000
-zreladdr-$(CONFIG_ARCH_MX25) := 0x80008000
+zreladdr-$(CONFIG_ARCH_MX25) += 0x80008000
params_phys-$(CONFIG_ARCH_MX25) := 0x80000100
initrd_phys-$(CONFIG_ARCH_MX25) := 0x80800000
-zreladdr-$(CONFIG_MACH_MX27) := 0xA0008000
+zreladdr-$(CONFIG_MACH_MX27) += 0xA0008000
params_phys-$(CONFIG_MACH_MX27) := 0xA0000100
initrd_phys-$(CONFIG_MACH_MX27) := 0xA0800000
-zreladdr-$(CONFIG_ARCH_MX3) := 0x80008000
+zreladdr-$(CONFIG_ARCH_MX3) += 0x80008000
params_phys-$(CONFIG_ARCH_MX3) := 0x80000100
initrd_phys-$(CONFIG_ARCH_MX3) := 0x80800000
diff --git a/arch/arm/mach-integrator/Makefile.boot b/arch/arm/mach-integrator/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-integrator/Makefile.boot
+++ b/arch/arm/mach-integrator/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 77315b99568..4b38e13667a 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -126,6 +126,10 @@ static struct clk_lookup lookups[] = {
{ /* Bus clock */
.con_id = "apb_pclk",
.clk = &dummy_apb_pclk,
+ }, {
+ /* Integrator/AP timer frequency */
+ .dev_id = "ap_timer",
+ .clk = &clk24mhz,
}, { /* UART0 */
.dev_id = "mb:16",
.clk = &uartclk,
@@ -205,7 +209,7 @@ static struct amba_pl010_data integrator_uart_data = {
#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
-static DEFINE_SPINLOCK(cm_lock);
+static DEFINE_RAW_SPINLOCK(cm_lock);
/**
* cm_control - update the CM_CTRL register.
@@ -217,10 +221,10 @@ void cm_control(u32 mask, u32 set)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&cm_lock, flags);
+ raw_spin_lock_irqsave(&cm_lock, flags);
val = readl(CM_CTRL) & ~mask;
writel(val | set, CM_CTRL);
- spin_unlock_irqrestore(&cm_lock, flags);
+ raw_spin_unlock_irqrestore(&cm_lock, flags);
}
EXPORT_SYMBOL(cm_control);
diff --git a/arch/arm/mach-integrator/include/mach/io.h b/arch/arm/mach-integrator/include/mach/io.h
index f21bb5493dd..37beed3fa3e 100644
--- a/arch/arm/mach-integrator/include/mach/io.h
+++ b/arch/arm/mach-integrator/include/mach/io.h
@@ -20,8 +20,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffff
-
/*
* WARNING: this has to mirror definitions in platform.h
*/
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h
index 5e6ea5cfea6..ec467baade0 100644
--- a/arch/arm/mach-integrator/include/mach/platform.h
+++ b/arch/arm/mach-integrator/include/mach/platform.h
@@ -13,9 +13,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* DO NOT EDIT!! - this file automatically generated
- * from .s file by awk -f s2h.awk
- */
/**************************************************************************
* * Copyright © ARM Limited 1998. All rights reserved.
* ***********************************************************************/
@@ -399,15 +396,6 @@
#define INTEGRATOR_TIMER1_BASE (INTEGRATOR_CT_BASE + 0x100)
#define INTEGRATOR_TIMER2_BASE (INTEGRATOR_CT_BASE + 0x200)
-#define TICKS_PER_uSEC 24
-
-/*
- * These are useconds NOT ticks.
- *
- */
-#define mSEC_1 1000
-#define mSEC_10 (mSEC_1 * 10)
-
#define INTEGRATOR_CSR_BASE 0x10000000
#define INTEGRATOR_CSR_SIZE 0x10000000
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 8cdc730dcb3..f2119908a0b 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
+#include <linux/clk.h>
#include <video/vga.h>
#include <mach/hardware.h>
@@ -322,27 +323,16 @@ static void __init ap_init(void)
#define TIMER1_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER1_BASE)
#define TIMER2_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER2_BASE)
-/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
static unsigned long timer_reload;
-static void integrator_clocksource_init(u32 khz)
+static void integrator_clocksource_init(unsigned long inrate)
{
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
+ unsigned long rate = inrate;
- if (khz >= 1500) {
- khz /= 16;
+ if (rate >= 1500000) {
+ rate /= 16;
ctrl |= TIMER_CTRL_DIV16;
}
@@ -350,7 +340,7 @@ static void integrator_clocksource_init(u32 khz)
writel(ctrl, base + TIMER_CTRL);
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
- khz * 1000, 200, 16, clocksource_mmio_readl_down);
+ rate, 200, 16, clocksource_mmio_readl_down);
}
static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE;
@@ -374,15 +364,29 @@ static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_devic
{
u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
- BUG_ON(mode == CLOCK_EVT_MODE_ONESHOT);
+ /* Disable timer */
+ writel(ctrl, clkevt_base + TIMER_CTRL);
- if (mode == CLOCK_EVT_MODE_PERIODIC) {
- writel(ctrl, clkevt_base + TIMER_CTRL);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* Enable the timer and start the periodic tick */
writel(timer_reload, clkevt_base + TIMER_LOAD);
ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Leave the timer disabled, .set_next_event will enable it */
+ ctrl &= ~TIMER_CTRL_PERIODIC;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ default:
+ /* Just leave in disabled state */
+ break;
}
- writel(ctrl, clkevt_base + TIMER_CTRL);
}
static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
@@ -398,12 +402,10 @@ static int clkevt_set_next_event(unsigned long next, struct clock_event_device *
static struct clock_event_device integrator_clockevent = {
.name = "timer1",
- .shift = 34,
- .features = CLOCK_EVT_FEAT_PERIODIC,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = clkevt_set_mode,
.set_next_event = clkevt_set_next_event,
.rating = 300,
- .cpumask = cpu_all_mask,
};
static struct irqaction integrator_timer_irq = {
@@ -413,29 +415,27 @@ static struct irqaction integrator_timer_irq = {
.dev_id = &integrator_clockevent,
};
-static void integrator_clockevent_init(u32 khz)
+static void integrator_clockevent_init(unsigned long inrate)
{
- struct clock_event_device *evt = &integrator_clockevent;
+ unsigned long rate = inrate;
unsigned int ctrl = 0;
- if (khz * 1000 > 0x100000 * HZ) {
- khz /= 256;
+ /* Calculate and program a divisor */
+ if (rate > 0x100000 * HZ) {
+ rate /= 256;
ctrl |= TIMER_CTRL_DIV256;
- } else if (khz * 1000 > 0x10000 * HZ) {
- khz /= 16;
+ } else if (rate > 0x10000 * HZ) {
+ rate /= 16;
ctrl |= TIMER_CTRL_DIV16;
}
-
- timer_reload = khz * 1000 / HZ;
+ timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
- evt->irq = IRQ_TIMERINT1;
- evt->mult = div_sc(khz, NSEC_PER_MSEC, evt->shift);
- evt->max_delta_ns = clockevent_delta2ns(0xffff, evt);
- evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
-
setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
- clockevents_register_device(evt);
+ clockevents_config_and_register(&integrator_clockevent,
+ rate,
+ 1,
+ 0xffffU);
}
/*
@@ -443,14 +443,20 @@ static void integrator_clockevent_init(u32 khz)
*/
static void __init ap_init_timer(void)
{
- u32 khz = TICKS_PER_uSEC * 1000;
+ struct clk *clk;
+ unsigned long rate;
+
+ clk = clk_get_sys("ap_timer", NULL);
+ BUG_ON(IS_ERR(clk));
+ clk_enable(clk);
+ rate = clk_get_rate(clk);
writel(0, TIMER0_VA_BASE + TIMER_CTRL);
writel(0, TIMER1_VA_BASE + TIMER_CTRL);
writel(0, TIMER2_VA_BASE + TIMER_CTRL);
- integrator_clocksource_init(khz);
- integrator_clockevent_init(khz);
+ integrator_clocksource_init(rate);
+ integrator_clockevent_init(rate);
}
static struct sys_timer ap_timer = {
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 11b86e5b71c..b4d8f8b8a08 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -163,7 +163,7 @@
* 7:2 register number
*
*/
-static DEFINE_SPINLOCK(v3_lock);
+static DEFINE_RAW_SPINLOCK(v3_lock);
#define PCI_BUS_NONMEM_START 0x00000000
#define PCI_BUS_NONMEM_SIZE SZ_256M
@@ -284,7 +284,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
unsigned long flags;
u32 v;
- spin_lock_irqsave(&v3_lock, flags);
+ raw_spin_lock_irqsave(&v3_lock, flags);
addr = v3_open_config_window(bus, devfn, where);
switch (size) {
@@ -302,7 +302,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
}
v3_close_config_window();
- spin_unlock_irqrestore(&v3_lock, flags);
+ raw_spin_unlock_irqrestore(&v3_lock, flags);
*val = v;
return PCIBIOS_SUCCESSFUL;
@@ -314,7 +314,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
unsigned long addr;
unsigned long flags;
- spin_lock_irqsave(&v3_lock, flags);
+ raw_spin_lock_irqsave(&v3_lock, flags);
addr = v3_open_config_window(bus, devfn, where);
switch (size) {
@@ -335,7 +335,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
}
v3_close_config_window();
- spin_unlock_irqrestore(&v3_lock, flags);
+ raw_spin_unlock_irqrestore(&v3_lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -513,7 +513,7 @@ void __init pci_v3_preinit(void)
hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
- spin_lock_irqsave(&v3_lock, flags);
+ raw_spin_lock_irqsave(&v3_lock, flags);
/*
* Unlock V3 registers, but only if they were previously locked.
@@ -586,7 +586,7 @@ void __init pci_v3_preinit(void)
printk(KERN_ERR "PCI: unable to grab PCI error "
"interrupt: %d\n", ret);
- spin_unlock_irqrestore(&v3_lock, flags);
+ raw_spin_unlock_irqrestore(&v3_lock, flags);
}
void __init pci_v3_postinit(void)
diff --git a/arch/arm/mach-iop13xx/Makefile.boot b/arch/arm/mach-iop13xx/Makefile.boot
index 0b0e19fdfe6..3a8c38c3189 100644
--- a/arch/arm/mach-iop13xx/Makefile.boot
+++ b/arch/arm/mach-iop13xx/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-iop32x/Makefile.boot b/arch/arm/mach-iop32x/Makefile.boot
index 47000dccd61..0a833b11e38 100644
--- a/arch/arm/mach-iop32x/Makefile.boot
+++ b/arch/arm/mach-iop32x/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0xa0008000
+ zreladdr-y += 0xa0008000
params_phys-y := 0xa0000100
initrd_phys-y := 0xa0800000
diff --git a/arch/arm/mach-iop33x/Makefile.boot b/arch/arm/mach-iop33x/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-iop33x/Makefile.boot
+++ b/arch/arm/mach-iop33x/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ixp2000/Makefile.boot b/arch/arm/mach-ixp2000/Makefile.boot
index d84c5807a43..9c7af91d93d 100644
--- a/arch/arm/mach-ixp2000/Makefile.boot
+++ b/arch/arm/mach-ixp2000/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-ixp23xx/Makefile.boot b/arch/arm/mach-ixp23xx/Makefile.boot
index d5561ad15ba..44fb4a717c3 100644
--- a/arch/arm/mach-ixp23xx/Makefile.boot
+++ b/arch/arm/mach-ixp23xx/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-ixp4xx/Makefile.boot b/arch/arm/mach-ixp4xx/Makefile.boot
index d84c5807a43..9c7af91d93d 100644
--- a/arch/arm/mach-ixp4xx/Makefile.boot
+++ b/arch/arm/mach-ixp4xx/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 2131832ee6b..f72a3a893c4 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0;
* these transactions are atomic or we will end up
* with corrupt data on the bus or in a driver.
*/
-static DEFINE_SPINLOCK(ixp4xx_pci_lock);
+static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock);
/*
* Read from PCI config space
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock);
static void crp_read(u32 ad_cbe, u32 *data)
{
unsigned long flags;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_CRP_AD_CBE = ad_cbe;
*data = *PCI_CRP_RDATA;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
}
/*
@@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *data)
static void crp_write(u32 ad_cbe, u32 data)
{
unsigned long flags;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
*PCI_CRP_WDATA = data;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
}
static inline int check_master_abort(void)
@@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data)
int retval = 0;
int i;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_NP_AD = addr;
@@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data)
if(check_master_abort())
retval = 1;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
return retval;
}
@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data)
unsigned long flags;
int retval = 0;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_NP_AD = addr;
@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data)
if(check_master_abort())
retval = 1;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
return retval;
}
@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data)
unsigned long flags;
int retval = 0;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_NP_AD = addr;
@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data)
if(check_master_abort())
retval = 1;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
return retval;
}
@@ -397,7 +397,8 @@ void __init ixp4xx_pci_preinit(void)
local_write_config(PCI_BASE_ADDRESS_0, 4, PHYS_OFFSET);
local_write_config(PCI_BASE_ADDRESS_1, 4, PHYS_OFFSET + SZ_16M);
local_write_config(PCI_BASE_ADDRESS_2, 4, PHYS_OFFSET + SZ_32M);
- local_write_config(PCI_BASE_ADDRESS_3, 4, PHYS_OFFSET + SZ_48M);
+ local_write_config(PCI_BASE_ADDRESS_3, 4,
+ PHYS_OFFSET + SZ_32M + SZ_16M);
/*
* Enable CSR window at 64 MiB to allow PCI masters
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 57b5410c31f..ffb9d6afb89 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -17,8 +17,6 @@
#include <mach/hardware.h>
-#define IO_SPACE_LIMIT 0x0000ffff
-
extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ks8695/Makefile.boot b/arch/arm/mach-ks8695/Makefile.boot
index 48eb2cb3ac7..c9b0bebcf23 100644
--- a/arch/arm/mach-ks8695/Makefile.boot
+++ b/arch/arm/mach-ks8695/Makefile.boot
@@ -3,6 +3,6 @@
# PARAMS_PHYS must be within 4MB of ZRELADDR
# INITRD_PHYS must be in RAM
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-lpc32xx/Makefile.boot b/arch/arm/mach-lpc32xx/Makefile.boot
index b796b41ebf8..2cfe0ee635c 100644
--- a/arch/arm/mach-lpc32xx/Makefile.boot
+++ b/arch/arm/mach-lpc32xx/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x82000000
diff --git a/arch/arm/mach-mmp/Makefile.boot b/arch/arm/mach-mmp/Makefile.boot
index 574a4aa8321..5edf03e2bee 100644
--- a/arch/arm/mach-mmp/Makefile.boot
+++ b/arch/arm/mach-mmp/Makefile.boot
@@ -1 +1 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 06b5fa853c9..49c5d6d843d 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -160,7 +160,7 @@ static struct mtd_partition aspenite_nand_partitions[] = {
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
- .size = SZ_48M,
+ .size = SZ_32M + SZ_16M,
.mask_flags = 0,
}
};
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 7f005843a70..7fb568d2845 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -35,6 +35,13 @@ extern struct pxa_device_desc pxa168_device_fb;
extern struct pxa_device_desc pxa168_device_keypad;
extern struct pxa_device_desc pxa168_device_eth;
+struct pxa168_usb_pdata {
+ /* If NULL, default phy init routine for PXA168 would be called */
+ int (*phy_init)(void __iomem *usb_phy_reg_base);
+};
+/* pdata can be NULL */
+int __init pxa168_add_usb_host(struct pxa168_usb_pdata *pdata);
+
static inline int pxa168_add_uart(int id)
{
struct pxa_device_desc *d = NULL;
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 0156f535dae..b2b280c517d 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -25,6 +25,9 @@
#include <mach/dma.h>
#include <mach/devices.h>
#include <mach/mfp.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <mach/pxa168.h>
#include "common.h"
#include "clock.h"
@@ -83,6 +86,7 @@ static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
static APMU_CLK(nand, NAND, 0x19b, 156000000);
static APMU_CLK(lcd, LCD, 0x7f, 312000000);
static APMU_CLK(eth, ETH, 0x09, 0);
+static APMU_CLK(usb, USB, 0x12, 0);
/* device and clock bindings */
static struct clk_lookup pxa168_clkregs[] = {
@@ -104,6 +108,7 @@ static struct clk_lookup pxa168_clkregs[] = {
INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL),
INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL),
INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"),
+ INIT_CLKREG(&clk_usb, "pxa168-ehci", "PXA168-USBCLK"),
};
static int __init pxa168_init(void)
@@ -169,3 +174,44 @@ PXA168_DEVICE(ssp5, "pxa168-ssp", 4, SSP5, 0xd4021000, 0x40, 60, 61);
PXA168_DEVICE(fb, "pxa168-fb", -1, LCD, 0xd420b000, 0x1c8);
PXA168_DEVICE(keypad, "pxa27x-keypad", -1, KEYPAD, 0xd4012000, 0x4c);
PXA168_DEVICE(eth, "pxa168-eth", -1, MFU, 0xc0800000, 0x0fff);
+
+struct resource pxa168_usb_host_resources[] = {
+ /* USB Host conroller register base */
+ [0] = {
+ .start = 0xd4209000,
+ .end = 0xd4209000 + 0x200,
+ .flags = IORESOURCE_MEM,
+ .name = "pxa168-usb-host",
+ },
+ /* USB PHY register base */
+ [1] = {
+ .start = 0xd4206000,
+ .end = 0xd4206000 + 0xff,
+ .flags = IORESOURCE_MEM,
+ .name = "pxa168-usb-phy",
+ },
+ [2] = {
+ .start = IRQ_PXA168_USB2,
+ .end = IRQ_PXA168_USB2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 pxa168_usb_host_dmamask = DMA_BIT_MASK(32);
+struct platform_device pxa168_device_usb_host = {
+ .name = "pxa168-ehci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &pxa168_usb_host_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+
+ .num_resources = ARRAY_SIZE(pxa168_usb_host_resources),
+ .resource = pxa168_usb_host_resources,
+};
+
+int __init pxa168_add_usb_host(struct pxa168_usb_pdata *pdata)
+{
+ pxa168_device_usb_host.dev.platform_data = pdata;
+ return platform_device_register(&pxa168_device_usb_host);
+}
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 6bd37a27e5f..176515a7698 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -93,7 +93,7 @@ static struct mtd_partition ttc_dkb_onenand_partitions[] = {
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
- .size = SZ_48M,
+ .size = SZ_32M + SZ_16M,
.mask_flags = 0,
}
};
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 24dfbf8c07c..9b803a578b4 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 18a3c97bc86..f81ef1f9d46 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -78,8 +78,8 @@ static void __init halibut_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init halibut_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 7a9a03eb189..1df15aa3c66 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -53,8 +53,8 @@ static void __init mahimahi_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init mahimahi_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init mahimahi_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index b7a84966b71..d1e4cc83b1e 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -24,6 +24,7 @@
#include <linux/smsc911x.h>
#include <linux/usb/msm_hsusb.h>
#include <linux/clkdev.h>
+#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -42,6 +43,21 @@
extern struct sys_timer msm_timer;
+static void __init msm7x30_fixup(struct machine_desc *desc, struct tag *tag,
+ char **cmdline, struct meminfo *mi)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
+ tag->u.mem.start = 0;
+ tag->u.mem.size += SZ_2M;
+ }
+}
+
+static void __init msm7x30_reserve(void)
+{
+ memblock_remove(0x0, SZ_2M);
+}
+
static int hsusb_phy_init_seq[] = {
0x30, 0x32, /* Enable and set Pre-Emphasis Depth to 20% */
0x02, 0x36, /* Disable CDR Auto Reset feature */
@@ -107,6 +123,8 @@ static void __init msm7x30_map_io(void)
MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
.boot_params = PLAT_PHYS_OFFSET + 0x100,
+ .fixup = msm7x30_fixup,
+ .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
@@ -115,6 +133,8 @@ MACHINE_END
MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
.boot_params = PLAT_PHYS_OFFSET + 0x100,
+ .fixup = msm7x30_fixup,
+ .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
@@ -123,6 +143,8 @@ MACHINE_END
MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
.boot_params = PLAT_PHYS_OFFSET + 0x100,
+ .fixup = msm7x30_fixup,
+ .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index 35c7ceeb3f2..b04468e7d00 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -20,16 +20,34 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clkdev.h>
+#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
+#include <asm/setup.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
#include "devices.h"
+static void __init msm8960_fixup(struct machine_desc *desc, struct tag *tag,
+ char **cmdline, struct meminfo *mi)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM &&
+ tag->u.mem.start == 0x40200000) {
+ tag->u.mem.start = 0x40000000;
+ tag->u.mem.size += SZ_2M;
+ }
+}
+
+static void __init msm8960_reserve(void)
+{
+ memblock_remove(0x40000000, SZ_2M);
+}
+
static void __init msm8960_map_io(void)
{
msm_map_msm8960_io();
@@ -76,6 +94,8 @@ static void __init msm8960_rumi3_init(void)
}
MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
+ .fixup = msm8960_fixup,
+ .reserve = msm8960_reserve,
.map_io = msm8960_map_io,
.init_irq = msm8960_init_irq,
.timer = &msm_timer,
@@ -83,6 +103,8 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
MACHINE_END
MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
+ .fixup = msm8960_fixup,
+ .reserve = msm8960_reserve,
.map_io = msm8960_map_io,
.init_irq = msm8960_init_irq,
.timer = &msm_timer,
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 1163b6fd05d..9221f54778b 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -20,14 +20,31 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
+#include <asm/setup.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
+static void __init msm8x60_fixup(struct machine_desc *desc, struct tag *tag,
+ char **cmdline, struct meminfo *mi)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM &&
+ tag->u.mem.start == 0x40200000) {
+ tag->u.mem.start = 0x40000000;
+ tag->u.mem.size += SZ_2M;
+ }
+}
+
+static void __init msm8x60_reserve(void)
+{
+ memblock_remove(0x40000000, SZ_2M);
+}
static void __init msm8x60_map_io(void)
{
@@ -65,6 +82,8 @@ static void __init msm8x60_init(void)
}
MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
@@ -72,6 +91,8 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
MACHINE_END
MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
@@ -79,6 +100,8 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
MACHINE_END
MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
@@ -86,6 +109,8 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
MACHINE_END
MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 68f930f07d7..c6e043c896a 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -77,8 +77,8 @@ static struct map_desc sapphire_io_desc[] __initdata = {
}
};
-static void __init sapphire_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init sapphire_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
int smi_sz = parse_tag_smi((const struct tag *)tags);
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 814386772c6..7acd2021ada 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -48,8 +48,8 @@ static void __init trout_init_irq(void)
msm_init_irq();
}
-static void __init trout_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init trout_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 1;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 22a53766962..d9145dfc2a3 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -18,7 +18,7 @@
#include <linux/list.h>
#include <linux/err.h>
#include <linux/spinlock.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/string.h>
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index f2f8d299ba9..58d5e7eec43 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -22,11 +22,11 @@
#elif defined(CONFIG_ARCH_QSD8X50)
#define PLAT_PHYS_OFFSET UL(0x20000000)
#elif defined(CONFIG_ARCH_MSM7X30)
-#define PLAT_PHYS_OFFSET UL(0x00200000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_ARCH_MSM8X60)
-#define PLAT_PHYS_OFFSET UL(0x40200000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
#elif defined(CONFIG_ARCH_MSM8960)
-#define PLAT_PHYS_OFFSET UL(0x40200000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
#else
#define PLAT_PHYS_OFFSET UL(0x10000000)
#endif
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 1a1af9e5625..72765952091 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -156,6 +156,12 @@ void __init smp_init_cpus(void)
{
unsigned int i, ncores = get_core_count();
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
diff --git a/arch/arm/mach-mv78xx0/Makefile.boot b/arch/arm/mach-mv78xx0/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-mv78xx0/Makefile.boot
+++ b/arch/arm/mach-mv78xx0/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-mx5/Makefile.boot b/arch/arm/mach-mx5/Makefile.boot
index e928be1b675..ca207ca305e 100644
--- a/arch/arm/mach-mx5/Makefile.boot
+++ b/arch/arm/mach-mx5/Makefile.boot
@@ -1,9 +1,9 @@
- zreladdr-$(CONFIG_ARCH_MX50) := 0x70008000
+ zreladdr-$(CONFIG_ARCH_MX50) += 0x70008000
params_phys-$(CONFIG_ARCH_MX50) := 0x70000100
initrd_phys-$(CONFIG_ARCH_MX50) := 0x70800000
- zreladdr-$(CONFIG_ARCH_MX51) := 0x90008000
+ zreladdr-$(CONFIG_ARCH_MX51) += 0x90008000
params_phys-$(CONFIG_ARCH_MX51) := 0x90000100
initrd_phys-$(CONFIG_ARCH_MX51) := 0x90800000
- zreladdr-$(CONFIG_ARCH_MX53) := 0x70008000
+ zreladdr-$(CONFIG_ARCH_MX53) += 0x70008000
params_phys-$(CONFIG_ARCH_MX53) := 0x70000100
initrd_phys-$(CONFIG_ARCH_MX53) := 0x70800000
diff --git a/arch/arm/mach-mxs/Makefile.boot b/arch/arm/mach-mxs/Makefile.boot
index eb541e0291d..07b11fe6453 100644
--- a/arch/arm/mach-mxs/Makefile.boot
+++ b/arch/arm/mach-mxs/Makefile.boot
@@ -1 +1 @@
-zreladdr-y := 0x40008000
+zreladdr-y += 0x40008000
diff --git a/arch/arm/mach-netx/Makefile.boot b/arch/arm/mach-netx/Makefile.boot
index b81cf6aff0a..534a4d27055 100644
--- a/arch/arm/mach-netx/Makefile.boot
+++ b/arch/arm/mach-netx/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
diff --git a/arch/arm/mach-nomadik/Makefile.boot b/arch/arm/mach-nomadik/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-nomadik/Makefile.boot
+++ b/arch/arm/mach-nomadik/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-nuc93x/Makefile.boot b/arch/arm/mach-nuc93x/Makefile.boot
index a057b546b6e..6c3d421c2d1 100644
--- a/arch/arm/mach-nuc93x/Makefile.boot
+++ b/arch/arm/mach-nuc93x/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-nuc93x/time.c b/arch/arm/mach-nuc93x/time.c
index 2f90f9dc6e3..f9807c029ec 100644
--- a/arch/arm/mach-nuc93x/time.c
+++ b/arch/arm/mach-nuc93x/time.c
@@ -82,7 +82,7 @@ static void nuc93x_timer_setup(void)
timer0_load = (rate / TICKS_PER_SEC);
__raw_writel(timer0_load, REG_TICR0);
- val |= (PERIOD | COUNTEN | INTEN | PRESCALE);;
+ val |= (PERIOD | COUNTEN | INTEN | PRESCALE);
__raw_writel(val, REG_TCSR0);
}
diff --git a/arch/arm/mach-omap1/Makefile.boot b/arch/arm/mach-omap1/Makefile.boot
index 292d56c5a88..13bda8dbd60 100644
--- a/arch/arm/mach-omap1/Makefile.boot
+++ b/arch/arm/mach-omap1/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 943072d5a1d..7868e75ad07 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/clk.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 57b66d590c5..89bfb49389f 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -36,6 +36,7 @@ config ARCH_OMAP3
select ARM_L1_CACHE_SHIFT_6 if !ARCH_OMAP4
select ARCH_HAS_OPP
select PM_OPP if PM
+ select ARM_CPU_SUSPEND if PM
config ARCH_OMAP4
bool "TI OMAP4"
@@ -50,6 +51,7 @@ config ARCH_OMAP4
select ARCH_HAS_OPP
select PM_OPP if PM
select USB_ARCH_HAS_EHCI
+ select ARM_CPU_SUSPEND if PM
comment "OMAP Core Type"
depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index f3433656043..7317a2b39dd 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -242,14 +242,11 @@ obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \
obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
hsmmc.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \
- hsmmc.o \
- omap_phy_internal.o
+ hsmmc.o
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o \
- hsmmc.o \
- omap_phy_internal.o
+ hsmmc.o
-obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o \
- omap_phy_internal.o \
+obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
@@ -260,6 +257,8 @@ obj-$(CONFIG_MACH_TI8168EVM) += board-ti8168evm.o
usbfs-$(CONFIG_ARCH_OMAP_OTG) := usb-fs.o
obj-y += $(usbfs-m) $(usbfs-y)
obj-y += usb-musb.o
+obj-y += omap_phy_internal.o
+
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
obj-y += usb-host.o
diff --git a/arch/arm/mach-omap2/Makefile.boot b/arch/arm/mach-omap2/Makefile.boot
index 565aff7f37a..b03e562acc6 100644
--- a/arch/arm/mach-omap2/Makefile.boot
+++ b/arch/arm/mach-omap2/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 2028464cf5b..f79b7d2a8ed 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void)
{
omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
ARRAY_SIZE(sdp2430_i2c1_boardinfo));
- omap2_pmic_init("twl4030", &sdp2430_twldata);
+ omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
+ &sdp2430_twldata);
return 0;
}
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index a9b45c76e1d..097a42d81e5 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
*/
reg = omap4_ctrl_pad_readl(control_pbias_offset);
reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
- OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
@@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
else
reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
- OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
timeout = jiffies + msecs_to_jiffies(5);
@@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
pr_err("Pbias Voltage is not same as LDO\n");
/* Caution : On VMODE_ERROR Power Down MMC IO */
- reg &= ~(OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ reg &= ~(OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
} else {
reg = omap4_ctrl_pad_readl(control_pbias_offset);
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_MMC1_PBIASLITE_VMODE_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ OMAP4_MMC1_PBIASLITE_VMODE_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
}
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ce65e9329c7..889464dc7b2 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -109,12 +109,10 @@ void __init smp_init_cpus(void)
ncores = scu_get_core_count(scu_base);
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "OMAP4: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 34c01a7de81..f49804f181d 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -247,7 +247,7 @@ static void sr_stop_vddautocomp(struct omap_sr *sr)
* driver register and sr device intializtion API's. Only one call
* will ultimately succeed.
*
- * Currently this function registers interrrupt handler for a particular SR
+ * Currently this function registers interrupt handler for a particular SR
* if smartreflex class driver is already registered and has
* requested for interrupts and the SR interrupt line in present.
*/
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index a65145b02a5..19e4dac62a8 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
- if (cpu_is_omap44xx())
- omap4430_phy_init(dev);
-
if (cpu_is_omap3517() || cpu_is_omap3505()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";
diff --git a/arch/arm/mach-orion5x/Makefile.boot b/arch/arm/mach-orion5x/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-orion5x/Makefile.boot
+++ b/arch/arm/mach-orion5x/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 0ab531d047f..22ace0bf2f9 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -308,8 +308,8 @@ void __init orion5x_init(void)
* Many orion-based systems have buggy bootloader implementations.
* This is a common fixup for bogus memory tags.
*/
-void __init tag_fixup_mem32(struct machine_desc *mdesc, struct tag *t,
- char **from, struct meminfo *meminfo)
+void __init tag_fixup_mem32(struct tag *t, char **from,
+ struct meminfo *meminfo)
{
for (; t->hdr.size; t = tag_next(t))
if (t->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 3e5499dda49..909489f4d23 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -53,11 +53,9 @@ int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys);
int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-struct machine_desc;
struct meminfo;
struct tag;
-extern void __init tag_fixup_mem32(struct machine_desc *, struct tag *,
- char **, struct meminfo *);
+extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *);
#endif
diff --git a/arch/arm/mach-pnx4008/Makefile.boot b/arch/arm/mach-pnx4008/Makefile.boot
index 44c7117e20d..9fa19baa7f2 100644
--- a/arch/arm/mach-pnx4008/Makefile.boot
+++ b/arch/arm/mach-pnx4008/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
diff --git a/arch/arm/mach-prima2/Makefile.boot b/arch/arm/mach-prima2/Makefile.boot
index d023db3ae4f..c77a4883a4e 100644
--- a/arch/arm/mach-prima2/Makefile.boot
+++ b/arch/arm/mach-prima2/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-pxa/Makefile.boot b/arch/arm/mach-pxa/Makefile.boot
index 1ead67178ec..2c1ae92f210 100644
--- a/arch/arm/mach-pxa/Makefile.boot
+++ b/arch/arm/mach-pxa/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0xa0008000
+ zreladdr-y += 0xa0008000
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index b6a51340270..d940e8a7227 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -839,8 +839,8 @@ static void __init cm_x300_init(void)
cm_x300_init_bl();
}
-static void __init cm_x300_fixup(struct machine_desc *mdesc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init cm_x300_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
/* Make sure that mi->bank[0].start = PHYS_ADDR */
for (; tags->hdr.size; tags = tag_next(tags))
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 185a37cad25..3e9483b0605 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -705,8 +705,8 @@ static void __init corgi_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init fixup_corgi(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init fixup_corgi(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index b4599ec9d61..e4a1f4dc89f 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -41,8 +41,7 @@
#include "clock.h"
/* Only e800 has 128MB RAM */
-void __init eseries_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = 0xa0000000;
diff --git a/arch/arm/mach-pxa/eseries.h b/arch/arm/mach-pxa/eseries.h
index 5930f5e2a12..be921965e91 100644
--- a/arch/arm/mach-pxa/eseries.h
+++ b/arch/arm/mach-pxa/eseries.h
@@ -1,5 +1,4 @@
-void __init eseries_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi);
+void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi);
extern struct pxa2xx_udc_mach_info e7xx_udc_mach_info;
extern struct pxaficp_platform_data e7xx_ficp_platform_data;
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index b09e848eb6c..ca607571782 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -19,6 +19,8 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <asm/exception.h>
+
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/gpio.h>
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index a113ea9ab4a..948ce3e729f 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -454,8 +454,8 @@ static void __init poodle_init(void)
poodle_init_spi();
}
-static void __init fixup_poodle(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init fixup_poodle(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index df4356e8aca..72001ec6e7b 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -540,7 +540,7 @@ static struct mtd_partition saar_onenand_partitions[] = {
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
- .size = SZ_48M,
+ .size = SZ_32M + SZ_16M,
.mask_flags = 0,
}
};
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 438c7b5e451..d8dec9113aa 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -970,8 +970,8 @@ static void __init spitz_init(void)
spitz_i2c_init();
}
-static void __init spitz_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init spitz_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks = 1;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 9f69a268269..402b0c96613 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -960,8 +960,8 @@ static void __init tosa_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init fixup_tosa(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init fixup_tosa(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index acc600f5e72..937c42845df 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -142,8 +142,7 @@ static struct platform_device *devices[] __initdata = {
/* We have to state that there are HWMON devices on the I2C bus on XCEP.
* Drivers for HWMON verify capabilities of the adapter when loading and
- * refuse to attach if the adapter doesn't support HWMON class of devices.
- * See also Documentation/i2c/porting-clients. */
+ * refuse to attach if the adapter doesn't support HWMON class of devices. */
static struct i2c_pxa_platform_data xcep_i2c_platform_data = {
.class = I2C_CLASS_HWMON
};
diff --git a/arch/arm/mach-realview/Makefile.boot b/arch/arm/mach-realview/Makefile.boot
index d97e003d3df..d2c3d788f68 100644
--- a/arch/arm/mach-realview/Makefile.boot
+++ b/arch/arm/mach-realview/Makefile.boot
@@ -1,9 +1,9 @@
ifeq ($(CONFIG_REALVIEW_HIGH_PHYS_OFFSET),y)
- zreladdr-y := 0x70008000
+ zreladdr-y += 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70800000
else
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
endif
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 5c23450d2d1..d5ed5d4f77d 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -517,8 +517,7 @@ void __init realview_timer_init(unsigned int timer_irq)
/*
* Setup the memory banks.
*/
-void realview_fixup(struct machine_desc *mdesc, struct tag *tags, char **from,
- struct meminfo *meminfo)
+void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo)
{
/*
* Most RealView platforms have 512MB contiguous RAM at 0x70000000.
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 5c83d1e87a0..47259c89a75 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -63,8 +63,8 @@ extern int realview_flash_register(struct resource *res, u32 num);
extern int realview_eth_register(const char *name, struct resource *res);
extern int realview_usb_register(struct resource *res);
extern void realview_init_early(void);
-extern void realview_fixup(struct machine_desc *mdesc, struct tag *tags,
- char **from, struct meminfo *meminfo);
+extern void realview_fixup(struct tag *tags, char **from,
+ struct meminfo *meminfo);
extern void (*realview_reset)(char);
#endif
diff --git a/arch/arm/mach-realview/include/mach/board-pb1176.h b/arch/arm/mach-realview/include/mach/board-pb1176.h
index 002ab5d8c11..2a15fef9473 100644
--- a/arch/arm/mach-realview/include/mach/board-pb1176.h
+++ b/arch/arm/mach-realview/include/mach/board-pb1176.h
@@ -70,6 +70,7 @@
#define REALVIEW_DC1176_GIC_CPU_BASE 0x10120000 /* GIC CPU interface, on devchip */
#define REALVIEW_DC1176_GIC_DIST_BASE 0x10121000 /* GIC distributor, on devchip */
+#define REALVIEW_DC1176_ROM_BASE 0x10200000 /* 16KiB NRAM preudo-ROM, on devchip */
#define REALVIEW_PB1176_GIC_CPU_BASE 0x10040000 /* GIC CPU interface, on FPGA */
#define REALVIEW_PB1176_GIC_DIST_BASE 0x10041000 /* GIC distributor, on FPGA */
#define REALVIEW_PB1176_L220_BASE 0x10110000 /* L220 registers */
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 4ae943bafa9..e83c654a58d 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -52,12 +52,10 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "Realview: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index ad5671acb66..865d440fcf5 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -26,6 +26,8 @@
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
#include <linux/amba/pl022.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -204,22 +206,48 @@ static struct amba_device *amba_devs[] __initdata = {
* RealView PB1176 platform devices
*/
static struct resource realview_pb1176_flash_resources[] = {
- [0] = {
+ {
.start = REALVIEW_PB1176_FLASH_BASE,
.end = REALVIEW_PB1176_FLASH_BASE + REALVIEW_PB1176_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
- [1] = {
+#ifdef CONFIG_REALVIEW_PB1176_SECURE_FLASH
+ {
.start = REALVIEW_PB1176_SEC_FLASH_BASE,
.end = REALVIEW_PB1176_SEC_FLASH_BASE + REALVIEW_PB1176_SEC_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
-};
-#ifdef CONFIG_REALVIEW_PB1176_SECURE_FLASH
-#define PB1176_FLASH_BLOCKS 2
-#else
-#define PB1176_FLASH_BLOCKS 1
#endif
+};
+
+static struct physmap_flash_data pb1176_rom_pdata = {
+ .probe_type = "map_rom",
+ .width = 4,
+ .nr_parts = 0,
+};
+
+static struct resource pb1176_rom_resources[] = {
+ /*
+ * This exposes the PB1176 DevChip ROM as an MTD ROM mapping.
+ * The reference manual states that this is actually a pseudo-ROM
+ * programmed in NVRAM.
+ */
+ {
+ .start = REALVIEW_DC1176_ROM_BASE,
+ .end = REALVIEW_DC1176_ROM_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device pb1176_rom_device = {
+ .name = "physmap-flash",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pb1176_rom_resources),
+ .resource = pb1176_rom_resources,
+ .dev = {
+ .platform_data = &pb1176_rom_pdata,
+ },
+};
static struct resource realview_pb1176_smsc911x_resources[] = {
[0] = {
@@ -316,8 +344,7 @@ static void realview_pb1176_reset(char mode)
__raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl);
}
-static void realview_pb1176_fixup(struct machine_desc *mdesc,
- struct tag *tags, char **from,
+static void realview_pb1176_fixup(struct tag *tags, char **from,
struct meminfo *meminfo)
{
/*
@@ -338,7 +365,8 @@ static void __init realview_pb1176_init(void)
#endif
realview_flash_register(realview_pb1176_flash_resources,
- PB1176_FLASH_BLOCKS);
+ ARRAY_SIZE(realview_pb1176_flash_resources));
+ platform_device_register(&pb1176_rom_device);
realview_eth_register(NULL, realview_pb1176_smsc911x_resources);
platform_device_register(&realview_i2c_device);
realview_usb_register(realview_pb1176_isp1761_resources);
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 363b0ab5615..3e1eb2eb813 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -319,8 +319,8 @@ static struct sys_timer realview_pbx_timer = {
.init = realview_pbx_timer_init,
};
-static void realview_pbx_fixup(struct machine_desc *mdesc, struct tag *tags,
- char **from, struct meminfo *meminfo)
+static void realview_pbx_fixup(struct tag *tags, char **from,
+ struct meminfo *meminfo)
{
#ifdef CONFIG_SPARSEMEM
/*
@@ -335,7 +335,7 @@ static void realview_pbx_fixup(struct machine_desc *mdesc, struct tag *tags,
meminfo->bank[2].size = SZ_256M;
meminfo->nr_banks = 3;
#else
- realview_fixup(mdesc, tags, from, meminfo);
+ realview_fixup(tags, from, meminfo);
#endif
}
diff --git a/arch/arm/mach-rpc/Makefile.boot b/arch/arm/mach-rpc/Makefile.boot
index 9c9e7685ec7..ae2df0d7d03 100644
--- a/arch/arm/mach-rpc/Makefile.boot
+++ b/arch/arm/mach-rpc/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x18000000
diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h
index dde6b3c0e29..050d63c74cc 100644
--- a/arch/arm/mach-rpc/include/mach/hardware.h
+++ b/arch/arm/mach-rpc/include/mach/hardware.h
@@ -36,7 +36,7 @@
#define EASI_SIZE 0x08000000 /* EASI I/O */
#define EASI_START 0x08000000
-#define EASI_BASE 0xe5000000
+#define EASI_BASE IOMEM(0xe5000000)
#define IO_START 0x03000000 /* I/O */
#define IO_SIZE 0x01000000
@@ -51,21 +51,20 @@
/*
* IO Addresses
*/
-#define VIDC_BASE IOMEM(0xe0400000)
-#define EXPMASK_BASE 0xe0360000
-#define IOMD_BASE IOMEM(0xe0200000)
-#define IOC_BASE IOMEM(0xe0200000)
-#define PCIO_BASE IOMEM(0xe0010000)
-#define FLOPPYDMA_BASE IOMEM(0xe002a000)
+#define ECARD_EASI_BASE (EASI_BASE)
+#define VIDC_BASE (IO_BASE + 0x00400000)
+#define EXPMASK_BASE (IO_BASE + 0x00360000)
+#define ECARD_IOC4_BASE (IO_BASE + 0x00270000)
+#define ECARD_IOC_BASE (IO_BASE + 0x00240000)
+#define IOMD_BASE (IO_BASE + 0x00200000)
+#define IOC_BASE (IO_BASE + 0x00200000)
+#define ECARD_MEMC8_BASE (IO_BASE + 0x0002b000)
+#define FLOPPYDMA_BASE (IO_BASE + 0x0002a000)
+#define PCIO_BASE (IO_BASE + 0x00010000)
+#define ECARD_MEMC_BASE (IO_BASE + 0x00000000)
#define vidc_writel(val) __raw_writel(val, VIDC_BASE)
-#define IO_EC_EASI_BASE 0x81400000
-#define IO_EC_IOC4_BASE 0x8009c000
-#define IO_EC_IOC_BASE 0x80090000
-#define IO_EC_MEMC8_BASE 0x8000ac00
-#define IO_EC_MEMC_BASE 0x80000000
-
#define NETSLOT_BASE 0x0302b000
#define NETSLOT_SIZE 0x00001000
diff --git a/arch/arm/mach-rpc/include/mach/io.h b/arch/arm/mach-rpc/include/mach/io.h
index 20da7f486e5..695f4ed2e11 100644
--- a/arch/arm/mach-rpc/include/mach/io.h
+++ b/arch/arm/mach-rpc/include/mach/io.h
@@ -15,195 +15,18 @@
#include <mach/hardware.h>
-#define IO_SPACE_LIMIT 0xffffffff
+#define IO_SPACE_LIMIT 0xffff
/*
- * We use two different types of addressing - PC style addresses, and ARM
- * addresses. PC style accesses the PC hardware with the normal PC IO
- * addresses, eg 0x3f8 for serial#1. ARM addresses are 0x80000000+
- * and are translated to the start of IO. Note that all addresses are
- * shifted left!
- */
-#define __PORT_PCIO(x) (!((x) & 0x80000000))
-
-/*
- * Dynamic IO functions.
- */
-static inline void __outb (unsigned int value, unsigned int port)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "tst %2, #0x80000000\n\t"
- "mov %0, %4\n\t"
- "addeq %0, %0, %3\n\t"
- "strb %1, [%0, %2, lsl #2] @ outb"
- : "=&r" (temp)
- : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
- : "cc");
-}
-
-static inline void __outw (unsigned int value, unsigned int port)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "tst %2, #0x80000000\n\t"
- "mov %0, %4\n\t"
- "addeq %0, %0, %3\n\t"
- "str %1, [%0, %2, lsl #2] @ outw"
- : "=&r" (temp)
- : "r" (value|value<<16), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
- : "cc");
-}
-
-static inline void __outl (unsigned int value, unsigned int port)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "tst %2, #0x80000000\n\t"
- "mov %0, %4\n\t"
- "addeq %0, %0, %3\n\t"
- "str %1, [%0, %2, lsl #2] @ outl"
- : "=&r" (temp)
- : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
- : "cc");
-}
-
-#define DECLARE_DYN_IN(sz,fnsuffix,instr) \
-static inline unsigned sz __in##fnsuffix (unsigned int port) \
-{ \
- unsigned long temp, value; \
- __asm__ __volatile__( \
- "tst %2, #0x80000000\n\t" \
- "mov %0, %4\n\t" \
- "addeq %0, %0, %3\n\t" \
- "ldr" instr " %1, [%0, %2, lsl #2] @ in" #fnsuffix \
- : "=&r" (temp), "=r" (value) \
- : "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) \
- : "cc"); \
- return (unsigned sz)value; \
-}
-
-static inline void __iomem *__deprecated __ioaddr(unsigned int port)
-{
- void __iomem *ret;
- if (__PORT_PCIO(port))
- ret = PCIO_BASE;
- else
- ret = IO_BASE;
- return ret + (port << 2);
-}
-
-#define DECLARE_IO(sz,fnsuffix,instr) \
- DECLARE_DYN_IN(sz,fnsuffix,instr)
-
-DECLARE_IO(char,b,"b")
-DECLARE_IO(short,w,"")
-DECLARE_IO(int,l,"")
-
-#undef DECLARE_IO
-#undef DECLARE_DYN_IN
-
-/*
- * Constant address IO functions
+ * We need PC style IO addressing for:
+ * - floppy (at 0x3f2,0x3f4,0x3f5,0x3f7)
+ * - parport (at 0x278-0x27a, 0x27b-0x27f, 0x778-0x77a)
+ * - 8250 serial (only for compile)
*
- * These have to be macros for the 'J' constraint to work -
- * +/-4096 immediate operand.
+ * These peripherals are found in an area of MMIO which looks very much
+ * like an ISA bus, but with registers at the low byte of each word.
*/
-#define __outbc(value,port) \
-({ \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "strb %0, [%1, %2] @ outbc" \
- : : "r" (value), "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "strb %0, [%1, %2] @ outbc" \
- : : "r" (value), "r" (IO_BASE), "r" ((port) << 2)); \
-})
-
-#define __inbc(port) \
-({ \
- unsigned char result; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "ldrb %0, [%1, %2] @ inbc" \
- : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "ldrb %0, [%1, %2] @ inbc" \
- : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \
- result; \
-})
-
-#define __outwc(value,port) \
-({ \
- unsigned long __v = value; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outwc" \
- : : "r" (__v|__v<<16), "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outwc" \
- : : "r" (__v|__v<<16), "r" (IO_BASE), "r" ((port) << 2)); \
-})
-
-#define __inwc(port) \
-({ \
- unsigned short result; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inwc" \
- : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inwc" \
- : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \
- result & 0xffff; \
-})
-
-#define __outlc(value,port) \
-({ \
- unsigned long __v = value; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outlc" \
- : : "r" (__v), "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outlc" \
- : : "r" (__v), "r" (IO_BASE), "r" ((port) << 2)); \
-})
-
-#define __inlc(port) \
-({ \
- unsigned long result; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inlc" \
- : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inlc" \
- : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \
- result; \
-})
-
-#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
-#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
-#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
-#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
-#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
-#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
-
-/* the following macro is deprecated */
-#define ioaddr(port) ((unsigned long)__ioaddr((port)))
-
-#define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l)
-#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l)
-
-#define outsb(p,d,l) __raw_writesb(__ioaddr(p),d,l)
-#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l)
+#define __io(a) (PCIO_BASE + ((a) << 2))
/*
* 1:1 mapping for ioremapped regions.
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 580b3c73d2c..1e0e60d0462 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -74,7 +74,7 @@ static struct map_desc rpc_io_desc[] __initdata = {
.length = IO_SIZE ,
.type = MT_DEVICE
}, { /* EASI space */
- .virtual = EASI_BASE,
+ .virtual = (unsigned long)EASI_BASE,
.pfn = __phys_to_pfn(EASI_START),
.length = EASI_SIZE,
.type = MT_DEVICE
diff --git a/arch/arm/mach-s3c2410/Makefile.boot b/arch/arm/mach-s3c2410/Makefile.boot
index 58c1dd7f8e1..4457605ba04 100644
--- a/arch/arm/mach-s3c2410/Makefile.boot
+++ b/arch/arm/mach-s3c2410/Makefile.boot
@@ -1,7 +1,7 @@
ifeq ($(CONFIG_PM_H1940),y)
- zreladdr-y := 0x30108000
+ zreladdr-y += 0x30108000
params_phys-y := 0x30100100
else
- zreladdr-y := 0x30008000
+ zreladdr-y += 0x30008000
params_phys-y := 0x30000100
endif
diff --git a/arch/arm/mach-s3c2410/include/mach/io.h b/arch/arm/mach-s3c2410/include/mach/io.h
index 9813dbf2ae4..118749f37c4 100644
--- a/arch/arm/mach-s3c2410/include/mach/io.h
+++ b/arch/arm/mach-s3c2410/include/mach/io.h
@@ -199,8 +199,6 @@ DECLARE_IO(int,l,"")
#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
-/* the following macro is deprecated */
-#define ioaddr(port) __ioaddr((port))
#define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l)
#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l)
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index f1d3bd8f6f1..343a540d86a 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -170,7 +170,9 @@ int __init s3c2410_init(void)
{
printk("S3C2410: Initialising architecture\n");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return sysdev_register(&s3c2410_sysdev);
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c
index 834cfb61bcf..3391713e0c9 100644
--- a/arch/arm/mach-s3c2412/mach-smdk2413.c
+++ b/arch/arm/mach-s3c2412/mach-smdk2413.c
@@ -92,8 +92,7 @@ static struct platform_device *smdk2413_devices[] __initdata = {
&s3c_device_usbgadget,
};
-static void __init smdk2413_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline,
+static void __init smdk2413_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
diff --git a/arch/arm/mach-s3c2412/mach-vstms.c b/arch/arm/mach-s3c2412/mach-vstms.c
index 83544ebe20a..b6ed4573553 100644
--- a/arch/arm/mach-s3c2412/mach-vstms.c
+++ b/arch/arm/mach-s3c2412/mach-vstms.c
@@ -129,9 +129,8 @@ static struct platform_device *vstms_devices[] __initdata = {
&s3c_device_nand,
};
-static void __init vstms_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline,
- struct meminfo *mi)
+static void __init vstms_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
mi->nr_banks=1;
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index ef0958d3e5c..57a1e01e4e5 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -245,7 +245,9 @@ int __init s3c2412_init(void)
{
printk("S3C2412: Initialising architecture\n");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2412_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return sysdev_register(&s3c2412_sysdev);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index 494ce913dc9..20b3fdfb305 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -97,7 +97,9 @@ int __init s3c2416_init(void)
s3c_fb_setname("s3c2443-fb");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2416_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return sysdev_register(&s3c2416_sysdev);
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index ce99ff72838..2270d336021 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -55,7 +55,9 @@ int __init s3c2440_init(void)
/* register suspend/resume handlers */
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 9ad99f8016a..6f2b65e6e06 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -169,7 +169,9 @@ int __init s3c2442_init(void)
{
printk("S3C2442: Initialising architecture\n");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
diff --git a/arch/arm/mach-s3c64xx/Makefile.boot b/arch/arm/mach-s3c64xx/Makefile.boot
index ba41fdc0a58..c642333af3e 100644
--- a/arch/arm/mach-s3c64xx/Makefile.boot
+++ b/arch/arm/mach-s3c64xx/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x50008000
+ zreladdr-y += 0x50008000
params_phys-y := 0x50000100
diff --git a/arch/arm/mach-s3c64xx/dev-uart.c b/arch/arm/mach-s3c64xx/dev-uart.c
index f797f748b99..c681b99eda0 100644
--- a/arch/arm/mach-s3c64xx/dev-uart.c
+++ b/arch/arm/mach-s3c64xx/dev-uart.c
@@ -37,21 +37,10 @@ static struct resource s3c64xx_uart0_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S3CUART_RX0,
- .end = IRQ_S3CUART_RX0,
+ .start = IRQ_UART0,
+ .end = IRQ_UART0,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = IRQ_S3CUART_TX0,
- .end = IRQ_S3CUART_TX0,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR0,
- .end = IRQ_S3CUART_ERR0,
- .flags = IORESOURCE_IRQ,
- }
};
static struct resource s3c64xx_uart1_resource[] = {
@@ -61,19 +50,8 @@ static struct resource s3c64xx_uart1_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S3CUART_RX1,
- .end = IRQ_S3CUART_RX1,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX1,
- .end = IRQ_S3CUART_TX1,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR1,
- .end = IRQ_S3CUART_ERR1,
+ .start = IRQ_UART1,
+ .end = IRQ_UART1,
.flags = IORESOURCE_IRQ,
},
};
@@ -85,19 +63,8 @@ static struct resource s3c6xx_uart2_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S3CUART_RX2,
- .end = IRQ_S3CUART_RX2,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX2,
- .end = IRQ_S3CUART_TX2,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR2,
- .end = IRQ_S3CUART_ERR2,
+ .start = IRQ_UART2,
+ .end = IRQ_UART2,
.flags = IORESOURCE_IRQ,
},
};
@@ -109,19 +76,8 @@ static struct resource s3c64xx_uart3_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S3CUART_RX3,
- .end = IRQ_S3CUART_RX3,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX3,
- .end = IRQ_S3CUART_TX3,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR3,
- .end = IRQ_S3CUART_ERR3,
+ .start = IRQ_UART3,
+ .end = IRQ_UART3,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-s3c64xx/include/mach/irqs.h b/arch/arm/mach-s3c64xx/include/mach/irqs.h
index c026f67a80d..443f85b3c20 100644
--- a/arch/arm/mach-s3c64xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c64xx/include/mach/irqs.h
@@ -27,36 +27,6 @@
#define IRQ_VIC0_BASE S3C_IRQ(0)
#define IRQ_VIC1_BASE S3C_IRQ(32)
-/* UART interrupts, each UART has 4 intterupts per channel so
- * use the space between the ISA and S3C main interrupts. Note, these
- * are not in the same order as the S3C24XX series! */
-
-#define IRQ_S3CUART_BASE0 (16)
-#define IRQ_S3CUART_BASE1 (20)
-#define IRQ_S3CUART_BASE2 (24)
-#define IRQ_S3CUART_BASE3 (28)
-
-#define UART_IRQ_RXD (0)
-#define UART_IRQ_ERR (1)
-#define UART_IRQ_TXD (2)
-#define UART_IRQ_MODEM (3)
-
-#define IRQ_S3CUART_RX0 (IRQ_S3CUART_BASE0 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX0 (IRQ_S3CUART_BASE0 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR0 (IRQ_S3CUART_BASE0 + UART_IRQ_ERR)
-
-#define IRQ_S3CUART_RX1 (IRQ_S3CUART_BASE1 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX1 (IRQ_S3CUART_BASE1 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR1 (IRQ_S3CUART_BASE1 + UART_IRQ_ERR)
-
-#define IRQ_S3CUART_RX2 (IRQ_S3CUART_BASE2 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX2 (IRQ_S3CUART_BASE2 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR2 (IRQ_S3CUART_BASE2 + UART_IRQ_ERR)
-
-#define IRQ_S3CUART_RX3 (IRQ_S3CUART_BASE3 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX3 (IRQ_S3CUART_BASE3 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR3 (IRQ_S3CUART_BASE3 + UART_IRQ_ERR)
-
/* VIC based IRQs */
#define S3C64XX_IRQ_VIC0(x) (IRQ_VIC0_BASE + (x))
diff --git a/arch/arm/mach-s3c64xx/irq.c b/arch/arm/mach-s3c64xx/irq.c
index 75d9a0e4919..b07357e9495 100644
--- a/arch/arm/mach-s3c64xx/irq.c
+++ b/arch/arm/mach-s3c64xx/irq.c
@@ -25,29 +25,6 @@
#include <plat/irq-uart.h>
#include <plat/cpu.h>
-static struct s3c_uart_irq uart_irqs[] = {
- [0] = {
- .regs = S3C_VA_UART0,
- .base_irq = IRQ_S3CUART_BASE0,
- .parent_irq = IRQ_UART0,
- },
- [1] = {
- .regs = S3C_VA_UART1,
- .base_irq = IRQ_S3CUART_BASE1,
- .parent_irq = IRQ_UART1,
- },
- [2] = {
- .regs = S3C_VA_UART2,
- .base_irq = IRQ_S3CUART_BASE2,
- .parent_irq = IRQ_UART2,
- },
- [3] = {
- .regs = S3C_VA_UART3,
- .base_irq = IRQ_S3CUART_BASE3,
- .parent_irq = IRQ_UART3,
- },
-};
-
/* setup the sources the vic should advertise resume for, even though it
* is not doing the wake (set_irq_wake needs to be valid) */
#define IRQ_VIC0_RESUME (1 << (IRQ_RTC_TIC - IRQ_VIC0_BASE))
@@ -67,6 +44,4 @@ void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
/* add the timer sub-irqs */
s3c_init_vic_timer_irq(5, IRQ_TIMER0);
-
- s3c_init_uart_irqs(uart_irqs, ARRAY_SIZE(uart_irqs));
}
diff --git a/arch/arm/mach-s5p64x0/Makefile.boot b/arch/arm/mach-s5p64x0/Makefile.boot
index ff90aa13bd6..79ece4055b0 100644
--- a/arch/arm/mach-s5p64x0/Makefile.boot
+++ b/arch/arm/mach-s5p64x0/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5pc100/Makefile.boot b/arch/arm/mach-s5pc100/Makefile.boot
index ff90aa13bd6..79ece4055b0 100644
--- a/arch/arm/mach-s5pc100/Makefile.boot
+++ b/arch/arm/mach-s5pc100/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5pv210/Makefile.boot b/arch/arm/mach-s5pv210/Makefile.boot
index ff90aa13bd6..79ece4055b0 100644
--- a/arch/arm/mach-s5pv210/Makefile.boot
+++ b/arch/arm/mach-s5pv210/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile
index 41252d22e65..00631787e80 100644
--- a/arch/arm/mach-sa1100/Makefile
+++ b/arch/arm/mach-sa1100/Makefile
@@ -45,7 +45,6 @@ obj-$(CONFIG_SA1100_PLEB) += pleb.o
obj-$(CONFIG_SA1100_SHANNON) += shannon.o
obj-$(CONFIG_SA1100_SIMPAD) += simpad.o
-led-$(CONFIG_SA1100_SIMPAD) += leds-simpad.o
# LEDs support
obj-$(CONFIG_LEDS) += $(led-y)
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot
index a56ad0417cf..5a616f6e561 100644
--- a/arch/arm/mach-sa1100/Makefile.boot
+++ b/arch/arm/mach-sa1100/Makefile.boot
@@ -1,6 +1,7 @@
- zreladdr-y := 0xc0008000
ifeq ($(CONFIG_ARCH_SA1100),y)
- zreladdr-$(CONFIG_SA1111) := 0xc0208000
+ zreladdr-$(CONFIG_SA1111) += 0xc0208000
+else
+ zreladdr-y += 0xc0008000
endif
params_phys-y := 0xc0000100
initrd_phys-y := 0xc0800000
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 26257df19b6..6290ce28b88 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -301,8 +301,7 @@ static void __init get_assabet_scr(void)
}
static void __init
-fixup_assabet(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi)
{
/* This must be done before any call to machine_has_neponset() */
map_sa1100_gpio_regs();
diff --git a/arch/arm/mach-sa1100/include/mach/io.h b/arch/arm/mach-sa1100/include/mach/io.h
index d8b43f3dcd2..dfc27ff0834 100644
--- a/arch/arm/mach-sa1100/include/mach/io.h
+++ b/arch/arm/mach-sa1100/include/mach/io.h
@@ -10,11 +10,9 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffffffff
-
/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
+ * __io() is required to be an equivalent mapping to __mem_pci() for
+ * SOC_COMMON to work.
*/
#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-sa1100/include/mach/simpad.h b/arch/arm/mach-sa1100/include/mach/simpad.h
index 9296c4513ce..db28118103e 100644
--- a/arch/arm/mach-sa1100/include/mach/simpad.h
+++ b/arch/arm/mach-sa1100/include/mach/simpad.h
@@ -48,32 +48,80 @@
#define GPIO_SMART_CARD GPIO_GPIO10
#define IRQ_GPIO_SMARD_CARD IRQ_GPIO10
-// CS3 Latch is write only, a shadow is necessary
+/*--- ucb1x00 GPIO ---*/
+#define SIMPAD_UCB1X00_GPIO_BASE (GPIO_MAX + 1)
+#define SIMPAD_UCB1X00_GPIO_PROG1 (SIMPAD_UCB1X00_GPIO_BASE)
+#define SIMPAD_UCB1X00_GPIO_PROG2 (SIMPAD_UCB1X00_GPIO_BASE + 1)
+#define SIMPAD_UCB1X00_GPIO_UP (SIMPAD_UCB1X00_GPIO_BASE + 2)
+#define SIMPAD_UCB1X00_GPIO_DOWN (SIMPAD_UCB1X00_GPIO_BASE + 3)
+#define SIMPAD_UCB1X00_GPIO_LEFT (SIMPAD_UCB1X00_GPIO_BASE + 4)
+#define SIMPAD_UCB1X00_GPIO_RIGHT (SIMPAD_UCB1X00_GPIO_BASE + 5)
+#define SIMPAD_UCB1X00_GPIO_6 (SIMPAD_UCB1X00_GPIO_BASE + 6)
+#define SIMPAD_UCB1X00_GPIO_7 (SIMPAD_UCB1X00_GPIO_BASE + 7)
+#define SIMPAD_UCB1X00_GPIO_HEADSET (SIMPAD_UCB1X00_GPIO_BASE + 8)
+#define SIMPAD_UCB1X00_GPIO_SPEAKER (SIMPAD_UCB1X00_GPIO_BASE + 9)
+
+/*--- CS3 Latch ---*/
+#define SIMPAD_CS3_GPIO_BASE (GPIO_MAX + 11)
+#define SIMPAD_CS3_VCC_5V_EN (SIMPAD_CS3_GPIO_BASE)
+#define SIMPAD_CS3_VCC_3V_EN (SIMPAD_CS3_GPIO_BASE + 1)
+#define SIMPAD_CS3_EN1 (SIMPAD_CS3_GPIO_BASE + 2)
+#define SIMPAD_CS3_EN0 (SIMPAD_CS3_GPIO_BASE + 3)
+#define SIMPAD_CS3_DISPLAY_ON (SIMPAD_CS3_GPIO_BASE + 4)
+#define SIMPAD_CS3_PCMCIA_BUFF_DIS (SIMPAD_CS3_GPIO_BASE + 5)
+#define SIMPAD_CS3_MQ_RESET (SIMPAD_CS3_GPIO_BASE + 6)
+#define SIMPAD_CS3_PCMCIA_RESET (SIMPAD_CS3_GPIO_BASE + 7)
+#define SIMPAD_CS3_DECT_POWER_ON (SIMPAD_CS3_GPIO_BASE + 8)
+#define SIMPAD_CS3_IRDA_SD (SIMPAD_CS3_GPIO_BASE + 9)
+#define SIMPAD_CS3_RS232_ON (SIMPAD_CS3_GPIO_BASE + 10)
+#define SIMPAD_CS3_SD_MEDIAQ (SIMPAD_CS3_GPIO_BASE + 11)
+#define SIMPAD_CS3_LED2_ON (SIMPAD_CS3_GPIO_BASE + 12)
+#define SIMPAD_CS3_IRDA_MODE (SIMPAD_CS3_GPIO_BASE + 13)
+#define SIMPAD_CS3_ENABLE_5V (SIMPAD_CS3_GPIO_BASE + 14)
+#define SIMPAD_CS3_RESET_SIMCARD (SIMPAD_CS3_GPIO_BASE + 15)
+
+#define SIMPAD_CS3_PCMCIA_BVD1 (SIMPAD_CS3_GPIO_BASE + 16)
+#define SIMPAD_CS3_PCMCIA_BVD2 (SIMPAD_CS3_GPIO_BASE + 17)
+#define SIMPAD_CS3_PCMCIA_VS1 (SIMPAD_CS3_GPIO_BASE + 18)
+#define SIMPAD_CS3_PCMCIA_VS2 (SIMPAD_CS3_GPIO_BASE + 19)
+#define SIMPAD_CS3_LOCK_IND (SIMPAD_CS3_GPIO_BASE + 20)
+#define SIMPAD_CS3_CHARGING_STATE (SIMPAD_CS3_GPIO_BASE + 21)
+#define SIMPAD_CS3_PCMCIA_SHORT (SIMPAD_CS3_GPIO_BASE + 22)
+#define SIMPAD_CS3_GPIO_23 (SIMPAD_CS3_GPIO_BASE + 23)
-#define CS3BUSTYPE unsigned volatile long
#define CS3_BASE 0xf1000000
-#define VCC_5V_EN 0x0001 // For 5V PCMCIA
-#define VCC_3V_EN 0x0002 // FOR 3.3V PCMCIA
-#define EN1 0x0004 // This is only for EPROM's
-#define EN0 0x0008 // Both should be enable for 3.3V or 5V
-#define DISPLAY_ON 0x0010
-#define PCMCIA_BUFF_DIS 0x0020
-#define MQ_RESET 0x0040
-#define PCMCIA_RESET 0x0080
-#define DECT_POWER_ON 0x0100
-#define IRDA_SD 0x0200 // Shutdown for powersave
-#define RS232_ON 0x0400
-#define SD_MEDIAQ 0x0800 // Shutdown for powersave
-#define LED2_ON 0x1000
-#define IRDA_MODE 0x2000 // Fast/Slow IrDA mode
-#define ENABLE_5V 0x4000 // Enable 5V circuit
-#define RESET_SIMCARD 0x8000
-
-#define RS232_ENABLE 0x0440
-#define PCMCIAMASK 0x402f
-
-
+long simpad_get_cs3_ro(void);
+long simpad_get_cs3_shadow(void);
+void simpad_set_cs3_bit(int value);
+void simpad_clear_cs3_bit(int value);
+
+#define VCC_5V_EN 0x0001 /* For 5V PCMCIA */
+#define VCC_3V_EN 0x0002 /* FOR 3.3V PCMCIA */
+#define EN1 0x0004 /* This is only for EPROM's */
+#define EN0 0x0008 /* Both should be enable for 3.3V or 5V */
+#define DISPLAY_ON 0x0010
+#define PCMCIA_BUFF_DIS 0x0020
+#define MQ_RESET 0x0040
+#define PCMCIA_RESET 0x0080
+#define DECT_POWER_ON 0x0100
+#define IRDA_SD 0x0200 /* Shutdown for powersave */
+#define RS232_ON 0x0400
+#define SD_MEDIAQ 0x0800 /* Shutdown for powersave */
+#define LED2_ON 0x1000
+#define IRDA_MODE 0x2000 /* Fast/Slow IrDA mode */
+#define ENABLE_5V 0x4000 /* Enable 5V circuit */
+#define RESET_SIMCARD 0x8000
+
+#define PCMCIA_BVD1 0x01
+#define PCMCIA_BVD2 0x02
+#define PCMCIA_VS1 0x04
+#define PCMCIA_VS2 0x08
+#define LOCK_IND 0x10
+#define CHARGING_STATE 0x20
+#define PCMCIA_SHORT 0x40
+
+/*--- Battery ---*/
struct simpad_battery {
unsigned char ac_status; /* line connected yes/no */
unsigned char status; /* battery loading yes/no */
diff --git a/arch/arm/mach-sa1100/leds-simpad.c b/arch/arm/mach-sa1100/leds-simpad.c
deleted file mode 100644
index d50f4eeaa12..00000000000
--- a/arch/arm/mach-sa1100/leds-simpad.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * linux/arch/arm/mach-sa1100/leds-simpad.c
- *
- * Author: Juergen Messerer <juergen.messerer@siemens.ch>
- */
-#include <linux/init.h>
-
-#include <mach/hardware.h>
-#include <asm/leds.h>
-#include <asm/system.h>
-#include <mach/simpad.h>
-
-#include "leds.h"
-
-
-#define LED_STATE_ENABLED 1
-#define LED_STATE_CLAIMED 2
-
-static unsigned int led_state;
-static unsigned int hw_led_state;
-
-#define LED_GREEN (1)
-#define LED_MASK (1)
-
-extern void set_cs3_bit(int value);
-extern void clear_cs3_bit(int value);
-
-void simpad_leds_event(led_event_t evt)
-{
- switch (evt)
- {
- case led_start:
- hw_led_state = LED_GREEN;
- led_state = LED_STATE_ENABLED;
- break;
-
- case led_stop:
- led_state &= ~LED_STATE_ENABLED;
- break;
-
- case led_claim:
- led_state |= LED_STATE_CLAIMED;
- hw_led_state = LED_GREEN;
- break;
-
- case led_release:
- led_state &= ~LED_STATE_CLAIMED;
- hw_led_state = LED_GREEN;
- break;
-
-#ifdef CONFIG_LEDS_TIMER
- case led_timer:
- if (!(led_state & LED_STATE_CLAIMED))
- hw_led_state ^= LED_GREEN;
- break;
-#endif
-
-#ifdef CONFIG_LEDS_CPU
- case led_idle_start:
- break;
-
- case led_idle_end:
- break;
-#endif
-
- case led_halted:
- break;
-
- case led_green_on:
- if (led_state & LED_STATE_CLAIMED)
- hw_led_state |= LED_GREEN;
- break;
-
- case led_green_off:
- if (led_state & LED_STATE_CLAIMED)
- hw_led_state &= ~LED_GREEN;
- break;
-
- case led_amber_on:
- break;
-
- case led_amber_off:
- break;
-
- case led_red_on:
- break;
-
- case led_red_off:
- break;
-
- default:
- break;
- }
-
- if (led_state & LED_STATE_ENABLED)
- set_cs3_bit(LED2_ON);
- else
- clear_cs3_bit(LED2_ON);
-}
-
diff --git a/arch/arm/mach-sa1100/leds.c b/arch/arm/mach-sa1100/leds.c
index bbfe197fb4d..5fe71a0f105 100644
--- a/arch/arm/mach-sa1100/leds.c
+++ b/arch/arm/mach-sa1100/leds.c
@@ -42,8 +42,6 @@ sa1100_leds_init(void)
leds_event = adsbitsy_leds_event;
if (machine_is_pt_system3())
leds_event = system3_leds_event;
- if (machine_is_simpad())
- leds_event = simpad_leds_event; /* what about machine registry? including led, apm... -zecke */
leds_event(led_start);
return 0;
diff --git a/arch/arm/mach-sa1100/leds.h b/arch/arm/mach-sa1100/leds.h
index 68cc9f773d6..776b6020f55 100644
--- a/arch/arm/mach-sa1100/leds.h
+++ b/arch/arm/mach-sa1100/leds.h
@@ -11,4 +11,3 @@ extern void pfs168_leds_event(led_event_t evt);
extern void graphicsmaster_leds_event(led_event_t evt);
extern void adsbitsy_leds_event(led_event_t evt);
extern void system3_leds_event(led_event_t evt);
-extern void simpad_leds_event(led_event_t evt);
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index cfb76077bd2..34659f354be 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -13,6 +13,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <asm/irq.h>
#include <mach/hardware.h>
@@ -28,35 +29,92 @@
#include <linux/serial_core.h>
#include <linux/ioport.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/leds.h>
+#include <linux/i2c-gpio.h>
#include "generic.h"
-long cs3_shadow;
+/*
+ * CS3 support
+ */
-long get_cs3_shadow(void)
+static long cs3_shadow;
+static spinlock_t cs3_lock;
+static struct gpio_chip cs3_gpio;
+
+long simpad_get_cs3_ro(void)
+{
+ return readl(CS3_BASE);
+}
+EXPORT_SYMBOL(simpad_get_cs3_ro);
+
+long simpad_get_cs3_shadow(void)
{
return cs3_shadow;
}
+EXPORT_SYMBOL(simpad_get_cs3_shadow);
-void set_cs3(long value)
+static void __simpad_write_cs3(void)
{
- *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow = value;
+ writel(cs3_shadow, CS3_BASE);
}
-void set_cs3_bit(int value)
+void simpad_set_cs3_bit(int value)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs3_lock, flags);
cs3_shadow |= value;
- *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow;
+ __simpad_write_cs3();
+ spin_unlock_irqrestore(&cs3_lock, flags);
}
+EXPORT_SYMBOL(simpad_set_cs3_bit);
-void clear_cs3_bit(int value)
+void simpad_clear_cs3_bit(int value)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs3_lock, flags);
cs3_shadow &= ~value;
- *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow;
+ __simpad_write_cs3();
+ spin_unlock_irqrestore(&cs3_lock, flags);
}
+EXPORT_SYMBOL(simpad_clear_cs3_bit);
+
+static void cs3_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (offset > 15)
+ return;
+ if (value)
+ simpad_set_cs3_bit(1 << offset);
+ else
+ simpad_clear_cs3_bit(1 << offset);
+};
+
+static int cs3_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset > 15)
+ return simpad_get_cs3_ro() & (1 << (offset - 16));
+ return simpad_get_cs3_shadow() & (1 << offset);
+};
-EXPORT_SYMBOL(set_cs3_bit);
-EXPORT_SYMBOL(clear_cs3_bit);
+static int cs3_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset > 15)
+ return 0;
+ return -EINVAL;
+};
+
+static int cs3_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ if (offset > 15)
+ return -EINVAL;
+ cs3_gpio_set(chip, offset, value);
+ return 0;
+};
static struct map_desc simpad_io_desc[] __initdata = {
{ /* MQ200 */
@@ -64,9 +122,9 @@ static struct map_desc simpad_io_desc[] __initdata = {
.pfn = __phys_to_pfn(0x4b800000),
.length = 0x00800000,
.type = MT_DEVICE
- }, { /* Paules CS3, write only */
- .virtual = 0xf1000000,
- .pfn = __phys_to_pfn(0x18000000),
+ }, { /* Simpad CS3 */
+ .virtual = CS3_BASE,
+ .pfn = __phys_to_pfn(SA1100_CS3_PHYS),
.length = 0x00100000,
.type = MT_DEVICE
},
@@ -78,12 +136,12 @@ static void simpad_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
if (port->mapbase == (u_int)&Ser1UTCR0) {
if (state)
{
- clear_cs3_bit(RS232_ON);
- clear_cs3_bit(DECT_POWER_ON);
+ simpad_clear_cs3_bit(RS232_ON);
+ simpad_clear_cs3_bit(DECT_POWER_ON);
}else
{
- set_cs3_bit(RS232_ON);
- set_cs3_bit(DECT_POWER_ON);
+ simpad_set_cs3_bit(RS232_ON);
+ simpad_set_cs3_bit(DECT_POWER_ON);
}
}
}
@@ -132,6 +190,7 @@ static struct resource simpad_flash_resources [] = {
static struct mcp_plat_data simpad_mcp_data = {
.mccr0 = MCCR0_ADM,
.sclk_rate = 11981000,
+ .gpio_base = SIMPAD_UCB1X00_GPIO_BASE,
};
@@ -142,9 +201,10 @@ static void __init simpad_map_io(void)
iotable_init(simpad_io_desc, ARRAY_SIZE(simpad_io_desc));
- set_cs3_bit (EN1 | EN0 | LED2_ON | DISPLAY_ON | RS232_ON |
- ENABLE_5V | RESET_SIMCARD | DECT_POWER_ON);
-
+ /* Initialize CS3 */
+ cs3_shadow = (EN1 | EN0 | LED2_ON | DISPLAY_ON |
+ RS232_ON | ENABLE_5V | RESET_SIMCARD | DECT_POWER_ON);
+ __simpad_write_cs3(); /* Spinlocks not yet initialized */
sa1100_register_uart_fns(&simpad_port_fns);
sa1100_register_uart(0, 3); /* serial interface */
@@ -170,13 +230,14 @@ static void __init simpad_map_io(void)
static void simpad_power_off(void)
{
- local_irq_disable(); // was cli
- set_cs3(0x800); /* only SD_MEDIAQ */
+ local_irq_disable();
+ cs3_shadow = SD_MEDIAQ;
+ __simpad_write_cs3(); /* Bypass spinlock here */
/* disable internal oscillator, float CS lines */
PCFR = (PCFR_OPDE | PCFR_FP | PCFR_FS);
- /* enable wake-up on GPIO0 (Assabet...) */
- PWER = GFER = GRER = 1;
+ /* enable wake-up on GPIO0 */
+ PWER = GFER = GRER = PWER_GPIO0;
/*
* set scratchpad to zero, just in case it is used as a
* restart address by the bootloader.
@@ -192,6 +253,91 @@ static void simpad_power_off(void)
}
+/*
+ * gpio_keys
+*/
+
+static struct gpio_keys_button simpad_button_table[] = {
+ { KEY_POWER, IRQ_GPIO_POWER_BUTTON, 1, "power button" },
+};
+
+static struct gpio_keys_platform_data simpad_keys_data = {
+ .buttons = simpad_button_table,
+ .nbuttons = ARRAY_SIZE(simpad_button_table),
+};
+
+static struct platform_device simpad_keys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &simpad_keys_data,
+ },
+};
+
+static struct gpio_keys_button simpad_polled_button_table[] = {
+ { KEY_PROG1, SIMPAD_UCB1X00_GPIO_PROG1, 1, "prog1 button" },
+ { KEY_PROG2, SIMPAD_UCB1X00_GPIO_PROG2, 1, "prog2 button" },
+ { KEY_UP, SIMPAD_UCB1X00_GPIO_UP, 1, "up button" },
+ { KEY_DOWN, SIMPAD_UCB1X00_GPIO_DOWN, 1, "down button" },
+ { KEY_LEFT, SIMPAD_UCB1X00_GPIO_LEFT, 1, "left button" },
+ { KEY_RIGHT, SIMPAD_UCB1X00_GPIO_RIGHT, 1, "right button" },
+};
+
+static struct gpio_keys_platform_data simpad_polled_keys_data = {
+ .buttons = simpad_polled_button_table,
+ .nbuttons = ARRAY_SIZE(simpad_polled_button_table),
+ .poll_interval = 50,
+};
+
+static struct platform_device simpad_polled_keys = {
+ .name = "gpio-keys-polled",
+ .dev = {
+ .platform_data = &simpad_polled_keys_data,
+ },
+};
+
+/*
+ * GPIO LEDs
+ */
+
+static struct gpio_led simpad_leds[] = {
+ {
+ .name = "simpad:power",
+ .gpio = SIMPAD_CS3_LED2_ON,
+ .active_low = 0,
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led_platform_data simpad_led_data = {
+ .num_leds = ARRAY_SIZE(simpad_leds),
+ .leds = simpad_leds,
+};
+
+static struct platform_device simpad_gpio_leds = {
+ .name = "leds-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &simpad_led_data,
+ },
+};
+
+/*
+ * i2c
+ */
+static struct i2c_gpio_platform_data simpad_i2c_data = {
+ .sda_pin = GPIO_GPIO21,
+ .scl_pin = GPIO_GPIO25,
+ .udelay = 10,
+ .timeout = HZ,
+};
+
+static struct platform_device simpad_i2c = {
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &simpad_i2c_data,
+ },
+};
/*
* MediaQ Video Device
@@ -202,7 +348,11 @@ static struct platform_device simpad_mq200fb = {
};
static struct platform_device *devices[] __initdata = {
- &simpad_mq200fb
+ &simpad_keys,
+ &simpad_polled_keys,
+ &simpad_mq200fb,
+ &simpad_gpio_leds,
+ &simpad_i2c,
};
@@ -211,6 +361,19 @@ static int __init simpad_init(void)
{
int ret;
+ spin_lock_init(&cs3_lock);
+
+ cs3_gpio.label = "simpad_cs3";
+ cs3_gpio.base = SIMPAD_CS3_GPIO_BASE;
+ cs3_gpio.ngpio = 24;
+ cs3_gpio.set = cs3_gpio_set;
+ cs3_gpio.get = cs3_gpio_get;
+ cs3_gpio.direction_input = cs3_gpio_direction_input;
+ cs3_gpio.direction_output = cs3_gpio_direction_output;
+ ret = gpiochip_add(&cs3_gpio);
+ if (ret)
+ printk(KERN_WARNING "simpad: Unable to register cs3 GPIO device");
+
pm_power_off = simpad_power_off;
sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources,
diff --git a/arch/arm/mach-shark/Makefile.boot b/arch/arm/mach-shark/Makefile.boot
index 4320f8b9277..e40e24e4ca3 100644
--- a/arch/arm/mach-shark/Makefile.boot
+++ b/arch/arm/mach-shark/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x08008000
+ zreladdr-y += 0x08008000
diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c
index c9e32de4adf..ccd49189bbd 100644
--- a/arch/arm/mach-shark/leds.c
+++ b/arch/arm/mach-shark/leds.c
@@ -36,7 +36,7 @@ static char led_state;
static short hw_led_state;
static short saved_state;
-static DEFINE_SPINLOCK(leds_lock);
+static DEFINE_RAW_SPINLOCK(leds_lock);
short sequoia_read(int addr) {
outw(addr,0x24);
@@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event_t evt)
{
unsigned long flags;
- spin_lock_irqsave(&leds_lock, flags);
+ raw_spin_lock_irqsave(&leds_lock, flags);
hw_led_state = sequoia_read(0x09);
@@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event_t evt)
if (led_state & LED_STATE_ENABLED)
sequoia_write(hw_led_state,0x09);
- spin_unlock_irqrestore(&leds_lock, flags);
+ raw_spin_unlock_irqrestore(&leds_lock, flags);
}
static int __init leds_init(void)
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index 1c08ee9de86..498efd99338 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -1,7 +1,7 @@
__ZRELADDR := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_MEMORY_START) + 0x8000]')
- zreladdr-y := $(__ZRELADDR)
+ zreladdr-y += $(__ZRELADDR)
# Unsupported legacy stuff
#
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 523f608eb8c..7e90d064ebc 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -42,6 +42,7 @@
#include <linux/leds.h>
#include <linux/input/sh_keysc.h>
#include <linux/usb/r8a66597.h>
+#include <linux/pm_clock.h>
#include <media/sh_mobile_ceu.h>
#include <media/sh_mobile_csi2.h>
@@ -1408,6 +1409,11 @@ static void __init ap4evb_init(void)
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device);
+
hdmi_init_pm_clock();
fsi_init_pm_clock();
sh7372_pm_init();
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 17c19dc2560..00273dad5bf 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,7 +39,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
#include <linux/smsc911x.h>
#include <linux/sh_intc.h>
#include <linux/tca6416_keypad.h>
@@ -810,6 +810,7 @@ static struct usbhs_private usbhs1_private = {
},
.driver_param = {
.buswait_bwait = 4,
+ .has_otg = 1,
.pipe_type = usbhs1_pipe_cfg,
.pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
.d0_tx_id = SHDMA_SLAVE_USB1_TX,
@@ -1588,6 +1589,15 @@ static void __init mackerel_init(void)
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
+#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device);
+#endif
+ sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi2_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device);
hdmi_init_pm_clock();
sh7372_pm_init();
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 06aecb31d9c..c0cdbf997c9 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -35,8 +35,8 @@ extern void sh7372_add_standard_devices(void);
extern void sh7372_clock_init(void);
extern void sh7372_pinmux_init(void);
extern void sh7372_pm_init(void);
-extern void sh7372_cpu_suspend(void);
-extern void sh7372_cpu_resume(void);
+extern void sh7372_resume_core_standby_a3sm(void);
+extern int sh7372_do_idle_a3sm(unsigned long unused);
extern struct clk sh7372_extal1_clk;
extern struct clk sh7372_extal2_clk;
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 24e63a85e66..84532f9629b 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -479,7 +479,12 @@ struct platform_device;
struct sh7372_pm_domain {
struct generic_pm_domain genpd;
+ struct dev_power_governor *gov;
+ void (*suspend)(void);
+ void (*resume)(void);
unsigned int bit_shift;
+ bool no_debug;
+ bool stay_on;
};
static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
@@ -491,16 +496,24 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
extern struct sh7372_pm_domain sh7372_a4lc;
extern struct sh7372_pm_domain sh7372_a4mp;
extern struct sh7372_pm_domain sh7372_d4;
+extern struct sh7372_pm_domain sh7372_a4r;
extern struct sh7372_pm_domain sh7372_a3rv;
extern struct sh7372_pm_domain sh7372_a3ri;
+extern struct sh7372_pm_domain sh7372_a3sp;
extern struct sh7372_pm_domain sh7372_a3sg;
extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
struct platform_device *pdev);
+extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
+ struct sh7372_pm_domain *sh7372_sd);
#else
#define sh7372_init_pm_domain(pd) do { } while(0)
#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
+#define sh7372_pm_add_subdomain(pd, sd) do { } while(0)
#endif /* CONFIG_PM */
+extern void sh7372_intcs_suspend(void);
+extern void sh7372_intcs_resume(void);
+
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 739315e30eb..29cdc0522d9 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -606,9 +606,16 @@ static void intcs_demux(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(intcs_evt2irq(evtcodeas));
}
+static void __iomem *intcs_ffd2;
+static void __iomem *intcs_ffd5;
+
void __init sh7372_init_irq(void)
{
- void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
+ void __iomem *intevtsa;
+
+ intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE);
+ intevtsa = intcs_ffd2 + 0x100;
+ intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE);
register_intc_controller(&intca_desc);
register_intc_controller(&intcs_desc);
@@ -617,3 +624,46 @@ void __init sh7372_init_irq(void)
irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa);
irq_set_chained_handler(evt2irq(0xf80), intcs_demux);
}
+
+static unsigned short ffd2[0x200];
+static unsigned short ffd5[0x100];
+
+void sh7372_intcs_suspend(void)
+{
+ int k;
+
+ for (k = 0x00; k <= 0x30; k += 4)
+ ffd2[k] = __raw_readw(intcs_ffd2 + k);
+
+ for (k = 0x80; k <= 0xb0; k += 4)
+ ffd2[k] = __raw_readb(intcs_ffd2 + k);
+
+ for (k = 0x180; k <= 0x188; k += 4)
+ ffd2[k] = __raw_readb(intcs_ffd2 + k);
+
+ for (k = 0x00; k <= 0x3c; k += 4)
+ ffd5[k] = __raw_readw(intcs_ffd5 + k);
+
+ for (k = 0x80; k <= 0x9c; k += 4)
+ ffd5[k] = __raw_readb(intcs_ffd5 + k);
+}
+
+void sh7372_intcs_resume(void)
+{
+ int k;
+
+ for (k = 0x00; k <= 0x30; k += 4)
+ __raw_writew(ffd2[k], intcs_ffd2 + k);
+
+ for (k = 0x80; k <= 0xb0; k += 4)
+ __raw_writeb(ffd2[k], intcs_ffd2 + k);
+
+ for (k = 0x180; k <= 0x188; k += 4)
+ __raw_writeb(ffd2[k], intcs_ffd2 + k);
+
+ for (k = 0x00; k <= 0x3c; k += 4)
+ __raw_writew(ffd5[k], intcs_ffd5 + k);
+
+ for (k = 0x80; k <= 0x9c; k += 4)
+ __raw_writeb(ffd5[k], intcs_ffd5 + k);
+}
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 66f980625a3..e4e485fa253 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -56,6 +56,12 @@ void __init smp_init_cpus(void)
unsigned int ncores = shmobile_smp_get_core_count();
unsigned int i;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 933fb411be0..79612737c5b 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -15,23 +15,61 @@
#include <linux/list.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/bitrev.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
+#include <asm/suspend.h>
#include <mach/common.h>
#include <mach/sh7372.h>
-#define SMFRAM 0xe6a70000
-#define SYSTBCR 0xe6150024
-#define SBAR 0xe6180020
-#define APARMBAREA 0xe6f10020
+/* DBG */
+#define DBGREG1 0xe6100020
+#define DBGREG9 0xe6100040
+/* CPGA */
+#define SYSTBCR 0xe6150024
+#define MSTPSR0 0xe6150030
+#define MSTPSR1 0xe6150038
+#define MSTPSR2 0xe6150040
+#define MSTPSR3 0xe6150048
+#define MSTPSR4 0xe615004c
+#define PLLC01STPCR 0xe61500c8
+
+/* SYSC */
#define SPDCR 0xe6180008
#define SWUCR 0xe6180014
+#define SBAR 0xe6180020
+#define WUPRMSK 0xe6180028
+#define WUPSMSK 0xe618002c
+#define WUPSMSK2 0xe6180048
#define PSTR 0xe6180080
+#define WUPSFAC 0xe6180098
+#define IRQCR 0xe618022c
+#define IRQCR2 0xe6180238
+#define IRQCR3 0xe6180244
+#define IRQCR4 0xe6180248
+#define PDNSEL 0xe6180254
+
+/* INTC */
+#define ICR1A 0xe6900000
+#define ICR2A 0xe6900004
+#define ICR3A 0xe6900008
+#define ICR4A 0xe690000c
+#define INTMSK00A 0xe6900040
+#define INTMSK10A 0xe6900044
+#define INTMSK20A 0xe6900048
+#define INTMSK30A 0xe690004c
+
+/* MFIS */
+#define SMFRAM 0xe6a70000
+
+/* AP-System Core */
+#define APARMBAREA 0xe6f10020
#define PSTR_RETRIES 100
#define PSTR_DELAY_US 10
@@ -43,6 +81,12 @@ static int pd_power_down(struct generic_pm_domain *genpd)
struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
unsigned int mask = 1 << sh7372_pd->bit_shift;
+ if (sh7372_pd->suspend)
+ sh7372_pd->suspend();
+
+ if (sh7372_pd->stay_on)
+ return 0;
+
if (__raw_readl(PSTR) & mask) {
unsigned int retry_count;
@@ -55,8 +99,9 @@ static int pd_power_down(struct generic_pm_domain *genpd)
}
}
- pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
- mask, __raw_readl(PSTR));
+ if (!sh7372_pd->no_debug)
+ pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
+ mask, __raw_readl(PSTR));
return 0;
}
@@ -68,6 +113,9 @@ static int pd_power_up(struct generic_pm_domain *genpd)
unsigned int retry_count;
int ret = 0;
+ if (sh7372_pd->stay_on)
+ goto out;
+
if (__raw_readl(PSTR) & mask)
goto out;
@@ -84,66 +132,48 @@ static int pd_power_up(struct generic_pm_domain *genpd)
if (__raw_readl(SWUCR) & mask)
ret = -EIO;
+ if (!sh7372_pd->no_debug)
+ pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
+ mask, __raw_readl(PSTR));
+
out:
- pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
- mask, __raw_readl(PSTR));
+ if (ret == 0 && sh7372_pd->resume)
+ sh7372_pd->resume();
return ret;
}
-static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
+static void sh7372_a4r_suspend(void)
{
- int ret = pd_power_up(genpd);
-
- /* force A4LC on after A3RV has been requested on */
- pm_genpd_poweron(&sh7372_a4lc.genpd);
-
- return ret;
+ sh7372_intcs_suspend();
+ __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
}
-static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
+static bool pd_active_wakeup(struct device *dev)
{
- int ret = pd_power_down(genpd);
-
- /* try to power down A4LC after A3RV is requested off */
- genpd_queue_power_off_work(&sh7372_a4lc.genpd);
-
- return ret;
+ return true;
}
-static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
+static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain)
{
- /* only power down A4LC if A3RV is off */
- if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
- return pd_power_down(genpd);
-
- return -EBUSY;
+ return false;
}
-static bool pd_active_wakeup(struct device *dev)
-{
- return true;
-}
+struct dev_power_governor sh7372_always_on_gov = {
+ .power_down_ok = sh7372_power_down_forbidden,
+};
void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
{
struct generic_pm_domain *genpd = &sh7372_pd->genpd;
- pm_genpd_init(genpd, NULL, false);
+ pm_genpd_init(genpd, sh7372_pd->gov, false);
genpd->stop_device = pm_clk_suspend;
genpd->start_device = pm_clk_resume;
+ genpd->dev_irq_safe = true;
genpd->active_wakeup = pd_active_wakeup;
-
- if (sh7372_pd == &sh7372_a4lc) {
- genpd->power_off = pd_power_down_a4lc;
- genpd->power_on = pd_power_up;
- } else if (sh7372_pd == &sh7372_a3rv) {
- genpd->power_off = pd_power_down_a3rv;
- genpd->power_on = pd_power_up_a3rv;
- } else {
- genpd->power_off = pd_power_down;
- genpd->power_on = pd_power_up;
- }
+ genpd->power_off = pd_power_down;
+ genpd->power_on = pd_power_up;
genpd->power_on(&sh7372_pd->genpd);
}
@@ -152,11 +182,15 @@ void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
{
struct device *dev = &pdev->dev;
- if (!dev->power.subsys_data) {
- pm_clk_init(dev);
- pm_clk_add(dev, NULL);
- }
pm_genpd_add_device(&sh7372_pd->genpd, dev);
+ if (pm_clk_no_clocks(dev))
+ pm_clk_add(dev, NULL);
+}
+
+void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
+ struct sh7372_pm_domain *sh7372_sd)
+{
+ pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd);
}
struct sh7372_pm_domain sh7372_a4lc = {
@@ -171,6 +205,14 @@ struct sh7372_pm_domain sh7372_d4 = {
.bit_shift = 3,
};
+struct sh7372_pm_domain sh7372_a4r = {
+ .bit_shift = 5,
+ .gov = &sh7372_always_on_gov,
+ .suspend = sh7372_a4r_suspend,
+ .resume = sh7372_intcs_resume,
+ .stay_on = true,
+};
+
struct sh7372_pm_domain sh7372_a3rv = {
.bit_shift = 6,
};
@@ -179,39 +221,187 @@ struct sh7372_pm_domain sh7372_a3ri = {
.bit_shift = 8,
};
+struct sh7372_pm_domain sh7372_a3sp = {
+ .bit_shift = 11,
+ .gov = &sh7372_always_on_gov,
+ .no_debug = true,
+};
+
struct sh7372_pm_domain sh7372_a3sg = {
.bit_shift = 13,
};
#endif /* CONFIG_PM */
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
+static int sh7372_do_idle_core_standby(unsigned long unused)
+{
+ cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
+ return 0;
+}
+
static void sh7372_enter_core_standby(void)
{
- void __iomem *smfram = (void __iomem *)SMFRAM;
+ /* set reset vector, translate 4k */
+ __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+ __raw_writel(0, APARMBAREA);
- __raw_writel(0, APARMBAREA); /* translate 4k */
- __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
- __raw_writel(0x10, SYSTBCR); /* enable core standby */
+ /* enter sleep mode with SYSTBCR to 0x10 */
+ __raw_writel(0x10, SYSTBCR);
+ cpu_suspend(0, sh7372_do_idle_core_standby);
+ __raw_writel(0, SYSTBCR);
- __raw_writel(0, smfram + 0x3c); /* clear page table address */
+ /* disable reset vector translation */
+ __raw_writel(0, SBAR);
+}
+#endif
+
+#ifdef CONFIG_SUSPEND
+static void sh7372_enter_a3sm_common(int pllc0_on)
+{
+ /* set reset vector, translate 4k */
+ __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+ __raw_writel(0, APARMBAREA);
+
+ if (pllc0_on)
+ __raw_writel(0, PLLC01STPCR);
+ else
+ __raw_writel(1 << 28, PLLC01STPCR);
+
+ __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
+ __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
+ cpu_suspend(0, sh7372_do_idle_a3sm);
+ __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
+
+ /* disable reset vector translation */
+ __raw_writel(0, SBAR);
+}
+
+static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
+{
+ unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
+ unsigned long msk, msk2;
+
+ /* check active clocks to determine potential wakeup sources */
+
+ mstpsr0 = __raw_readl(MSTPSR0);
+ if ((mstpsr0 & 0x00000003) != 0x00000003) {
+ pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0);
+ return 0;
+ }
+
+ mstpsr1 = __raw_readl(MSTPSR1);
+ if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) {
+ pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1);
+ return 0;
+ }
- sh7372_cpu_suspend();
- cpu_init();
+ mstpsr2 = __raw_readl(MSTPSR2);
+ if ((mstpsr2 & 0x000741ff) != 0x000741ff) {
+ pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2);
+ return 0;
+ }
- /* if page table address is non-NULL then we have been powered down */
- if (__raw_readl(smfram + 0x3c)) {
- __raw_writel(__raw_readl(smfram + 0x40),
- __va(__raw_readl(smfram + 0x3c)));
+ mstpsr3 = __raw_readl(MSTPSR3);
+ if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) {
+ pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3);
+ return 0;
+ }
- flush_tlb_all();
- set_cr(__raw_readl(smfram + 0x38));
+ mstpsr4 = __raw_readl(MSTPSR4);
+ if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) {
+ pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4);
+ return 0;
}
- __raw_writel(0, SYSTBCR); /* disable core standby */
- __raw_writel(0, SBAR); /* disable reset vector translation */
+ msk = 0;
+ msk2 = 0;
+
+ /* make bitmaps of limited number of wakeup sources */
+
+ if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */
+ msk |= 1 << 31;
+
+ if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */
+ msk |= 1 << 21;
+
+ if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */
+ msk |= 1 << 2;
+
+ if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */
+ msk |= 1 << 1;
+
+ if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */
+ msk |= 1 << 1;
+
+ if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */
+ msk |= 1 << 1;
+
+ if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */
+ msk2 |= 1 << 17;
+
+ *mskp = msk;
+ *msk2p = msk2;
+
+ return 1;
+}
+
+static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
+{
+ u16 tmp, irqcr1, irqcr2;
+ int k;
+
+ irqcr1 = 0;
+ irqcr2 = 0;
+
+ /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */
+ for (k = 0; k <= 7; k++) {
+ tmp = (icr >> ((7 - k) * 4)) & 0xf;
+ irqcr1 |= (tmp & 0x03) << (k * 2);
+ irqcr2 |= (tmp >> 2) << (k * 2);
+ }
+
+ *irqcr1p = irqcr1;
+ *irqcr2p = irqcr2;
+}
+
+static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
+{
+ u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
+ unsigned long tmp;
+
+ /* read IRQ0A -> IRQ15A mask */
+ tmp = bitrev8(__raw_readb(INTMSK00A));
+ tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8;
+
+ /* setup WUPSMSK from clocks and external IRQ mask */
+ msk = (~msk & 0xc030000f) | (tmp << 4);
+ __raw_writel(msk, WUPSMSK);
+
+ /* propage level/edge trigger for external IRQ 0->15 */
+ sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low);
+ sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high);
+ __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR);
+ __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2);
+
+ /* read IRQ16A -> IRQ31A mask */
+ tmp = bitrev8(__raw_readb(INTMSK20A));
+ tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8;
+
+ /* setup WUPSMSK2 from clocks and external IRQ mask */
+ msk2 = (~msk2 & 0x00030000) | tmp;
+ __raw_writel(msk2, WUPSMSK2);
+
+ /* propage level/edge trigger for external IRQ 16->31 */
+ sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low);
+ sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high);
+ __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
+ __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
}
+#endif
#ifdef CONFIG_CPU_IDLE
+
static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
{
struct cpuidle_state *state;
@@ -239,9 +429,25 @@ static void sh7372_cpuidle_init(void) {}
#endif
#ifdef CONFIG_SUSPEND
+
static int sh7372_enter_suspend(suspend_state_t suspend_state)
{
- sh7372_enter_core_standby();
+ unsigned long msk, msk2;
+
+ /* check active clocks to determine potential wakeup sources */
+ if (sh7372_a3sm_valid(&msk, &msk2)) {
+
+ /* convert INTC mask and sense to SYSC mask and sense */
+ sh7372_setup_a3sm(msk, msk2);
+
+ /* enter A3SM sleep with PLLC0 off */
+ pr_debug("entering A3SM\n");
+ sh7372_enter_a3sm_common(0);
+ } else {
+ /* default to Core Standby that supports all wakeup sources */
+ pr_debug("entering Core Standby\n");
+ sh7372_enter_core_standby();
+ }
return 0;
}
@@ -253,9 +459,6 @@ static void sh7372_suspend_init(void)
static void sh7372_suspend_init(void) {}
#endif
-#define DBGREG1 0xe6100020
-#define DBGREG9 0xe6100040
-
void __init sh7372_pm_init(void)
{
/* enable DBG hardware block to kick SYSC */
@@ -263,6 +466,9 @@ void __init sh7372_pm_init(void)
__raw_writel(0x0000a501, DBGREG9);
__raw_writel(0x00000000, DBGREG1);
+ /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
+ __raw_writel(0, PDNSEL);
+
sh7372_suspend_init();
sh7372_cpuidle_init();
}
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 6ec454e1e06..bd5c6a3b8c5 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_clock.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2d9b1b1a253..2380389e6ac 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -30,6 +30,7 @@
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <linux/sh_timer.h>
+#include <linux/pm_domain.h>
#include <mach/hardware.h>
#include <mach/sh7372.h>
#include <asm/mach-types.h>
@@ -990,9 +991,14 @@ void __init sh7372_add_standard_devices(void)
sh7372_init_pm_domain(&sh7372_a4lc);
sh7372_init_pm_domain(&sh7372_a4mp);
sh7372_init_pm_domain(&sh7372_d4);
+ sh7372_init_pm_domain(&sh7372_a4r);
sh7372_init_pm_domain(&sh7372_a3rv);
sh7372_init_pm_domain(&sh7372_a3ri);
sh7372_init_pm_domain(&sh7372_a3sg);
+ sh7372_init_pm_domain(&sh7372_a3sp);
+
+ sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
+ sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
@@ -1003,6 +1009,25 @@ void __init sh7372_add_standard_devices(void)
sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif0_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif1_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif2_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif3_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif4_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif5_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &scif6_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &iic1_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &dma0_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &dma1_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &dma2_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma0_device);
+ sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma1_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &iic0_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &veu0_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &veu1_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device);
+ sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device);
}
void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index d37d3ca4d18..f3ab3c5810e 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -30,58 +30,20 @@
*/
#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/memory.h>
#include <asm/assembler.h>
-#define SMFRAM 0xe6a70000
-
- .align
-kernel_flush:
- .word v7_flush_dcache_all
-
- .align 3
-ENTRY(sh7372_cpu_suspend)
- stmfd sp!, {r0-r12, lr} @ save registers on stack
-
- ldr r8, =SMFRAM
-
- mov r4, sp @ Store sp
- mrs r5, spsr @ Store spsr
- mov r6, lr @ Store lr
- stmia r8!, {r4-r6}
-
- mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
- mrc p15, 0, r5, c2, c0, 0 @ TTBR0
- mrc p15, 0, r6, c2, c0, 1 @ TTBR1
- mrc p15, 0, r7, c2, c0, 2 @ TTBCR
- stmia r8!, {r4-r7}
-
- mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
- mrc p15, 0, r5, c10, c2, 0 @ PRRR
- mrc p15, 0, r6, c10, c2, 1 @ NMRR
- stmia r8!,{r4-r6}
-
- mrc p15, 0, r4, c13, c0, 1 @ Context ID
- mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
- mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
- mrs r7, cpsr @ Store current cpsr
- stmia r8!, {r4-r7}
-
- mrc p15, 0, r4, c1, c0, 0 @ save control register
- stmia r8!, {r4}
-
- /*
- * jump out to kernel flush routine
- * - reuse that code is better
- * - it executes in a cached space so is faster than refetch per-block
- * - should be faster and will change with kernel
- * - 'might' have to copy address, load and jump to it
- * Flush all data from the L1 data cache before disabling
- * SCTLR.C bit.
- */
- ldr r1, kernel_flush
- mov lr, pc
- bx r1
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
+ .align 12
+ .text
+ .global sh7372_resume_core_standby_a3sm
+sh7372_resume_core_standby_a3sm:
+ ldr pc, 1f
+1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
+ .global sh7372_do_idle_a3sm
+sh7372_do_idle_a3sm:
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
@@ -92,10 +54,13 @@ ENTRY(sh7372_cpu_suspend)
mcr p15, 0, r0, c1, c0, 0
isb
+ /* disable L2 cache in the aux control register */
+ mrc p15, 0, r10, c1, c0, 1
+ bic r10, r10, #2
+ mcr p15, 0, r10, c1, c0, 1
+
/*
- * Invalidate L1 data cache. Even though only invalidate is
- * necessary exported flush API is used here. Doing clean
- * on already clean cache would be almost NOP.
+ * Invalidate data cache again.
*/
ldr r1, kernel_flush
blx r1
@@ -115,146 +80,16 @@ ENTRY(sh7372_cpu_suspend)
dsb
dmb
-/*
- * ===================================
- * == WFI instruction => Enter idle ==
- * ===================================
- */
- wfi @ wait for interrupt
-
-/*
- * ===================================
- * == Resume path for non-OFF modes ==
- * ===================================
- */
- mrc p15, 0, r0, c1, c0, 0
- tst r0, #(1 << 2) @ Check C bit enabled?
- orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
- mcreq p15, 0, r0, c1, c0, 0
- isb
-
-/*
- * ===================================
- * == Exit point from non-OFF modes ==
- * ===================================
- */
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
+#define SPDCR 0xe6180008
+#define A3SM (1 << 12)
- .pool
+ /* A3SM power down */
+ ldr r0, =SPDCR
+ ldr r1, =A3SM
+ str r1, [r0]
+1:
+ b 1b
- .align 12
- .text
- .global sh7372_cpu_resume
-sh7372_cpu_resume:
-
- mov r1, #0
- /*
- * Invalidate all instruction caches to PoU
- * and flush branch target cache
- */
- mcr p15, 0, r1, c7, c5, 0
-
- ldr r3, =SMFRAM
-
- ldmia r3!, {r4-r6}
- mov sp, r4 @ Restore sp
- msr spsr_cxsf, r5 @ Restore spsr
- mov lr, r6 @ Restore lr
-
- ldmia r3!, {r4-r7}
- mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
- mcr p15, 0, r5, c2, c0, 0 @ TTBR0
- mcr p15, 0, r6, c2, c0, 1 @ TTBR1
- mcr p15, 0, r7, c2, c0, 2 @ TTBCR
-
- ldmia r3!,{r4-r6}
- mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
- mcr p15, 0, r5, c10, c2, 0 @ PRRR
- mcr p15, 0, r6, c10, c2, 1 @ NMRR
-
- ldmia r3!,{r4-r7}
- mcr p15, 0, r4, c13, c0, 1 @ Context ID
- mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
- mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
- msr cpsr, r7 @ store cpsr
-
- /* Starting to enable MMU here */
- mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
- /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
- and r7, #0x7
- cmp r7, #0x0
- beq usettbr0
-ttbr_error:
- /*
- * More work needs to be done to support N[0:2] value other than 0
- * So looping here so that the error can be detected
- */
- b ttbr_error
-
- .align
-cache_pred_disable_mask:
- .word 0xFFFFE7FB
-ttbrbit_mask:
- .word 0xFFFFC000
-table_index_mask:
- .word 0xFFF00000
-table_entry:
- .word 0x00000C02
-usettbr0:
-
- mrc p15, 0, r2, c2, c0, 0
- ldr r5, ttbrbit_mask
- and r2, r5
- mov r4, pc
- ldr r5, table_index_mask
- and r4, r5 @ r4 = 31 to 20 bits of pc
- /* Extract the value to be written to table entry */
- ldr r6, table_entry
- /* r6 has the value to be written to table entry */
- add r6, r6, r4
- /* Getting the address of table entry to modify */
- lsr r4, #18
- /* r2 has the location which needs to be modified */
- add r2, r4
- ldr r4, [r2]
- str r6, [r2] /* modify the table entry */
-
- mov r7, r6
- mov r5, r2
- mov r6, r4
- /* r5 = original page table address */
- /* r6 = original page table data */
-
- mov r0, #0
- mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
- mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
- mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
- mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
-
- /*
- * Restore control register. This enables the MMU.
- * The caches and prediction are not enabled here, they
- * will be enabled after restoring the MMU table entry.
- */
- ldmia r3!, {r4}
- stmia r3!, {r5} /* save original page table address */
- stmia r3!, {r6} /* save original page table data */
- stmia r3!, {r7} /* save modified page table data */
-
- ldr r2, cache_pred_disable_mask
- and r4, r2
- mcr p15, 0, r4, c1, c0, 0
- dsb
- isb
-
- ldr r0, =restoremmu_on
- bx r0
-
-/*
- * ==============================
- * == Exit point from OFF mode ==
- * ==============================
- */
-restoremmu_on:
-
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
+kernel_flush:
+ .word v7_flush_dcache_all
+#endif
diff --git a/arch/arm/mach-spear3xx/Makefile.boot b/arch/arm/mach-spear3xx/Makefile.boot
index 7a1f3c0eadb..4674a4c221d 100644
--- a/arch/arm/mach-spear3xx/Makefile.boot
+++ b/arch/arm/mach-spear3xx/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-spear6xx/Makefile.boot b/arch/arm/mach-spear6xx/Makefile.boot
index 7a1f3c0eadb..4674a4c221d 100644
--- a/arch/arm/mach-spear6xx/Makefile.boot
+++ b/arch/arm/mach-spear6xx/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-tcc8k/Makefile.boot b/arch/arm/mach-tcc8k/Makefile.boot
index f135c9deae1..5e02d4156b0 100644
--- a/arch/arm/mach-tcc8k/Makefile.boot
+++ b/arch/arm/mach-tcc8k/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
initrd_phys-y := 0x20800000
diff --git a/arch/arm/mach-tegra/Makefile.boot b/arch/arm/mach-tegra/Makefile.boot
index 428ad122be0..5e870d29eca 100644
--- a/arch/arm/mach-tegra/Makefile.boot
+++ b/arch/arm/mach-tegra/Makefile.boot
@@ -1,4 +1,4 @@
-zreladdr-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00008000
+zreladdr-$(CONFIG_ARCH_TEGRA_2x_SOC) += 0x00008000
params_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00000100
initrd_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00800000
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index 846cd7d69e3..c78ce41cca1 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -123,8 +123,8 @@ static struct platform_device *harmony_devices[] __initdata = {
&harmony_audio_device,
};
-static void __init tegra_harmony_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init tegra_harmony_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index ea2f79c9879..5e6bc771964 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -84,8 +84,8 @@ static void paz00_usb_init(void)
platform_device_register(&tegra_ehci3_device);
}
-static void __init tegra_paz00_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init tegra_paz00_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 1;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index 89a6d2adc1d..652c3404d0e 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -126,8 +126,8 @@ static void trimslice_usb_init(void)
platform_device_register(&tegra_ehci1_device);
}
-static void __init tegra_trimslice_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init tegra_trimslice_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index 0e1016a827a..0e0fd4d889b 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -32,7 +32,6 @@
#include <asm/system.h>
-#include <mach/hardware.h>
#include <mach/clk.h>
/* Frequency table index must be sequential starting at 0 */
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 0886cbccdde..7d2b5d03c1d 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -114,10 +114,10 @@ void __init smp_init_cpus(void)
{
unsigned int i, ncores = scu_get_core_count(scu_base);
- if (ncores > NR_CPUS) {
- printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 32a7b0f7e9f..449fd6a8dbd 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -6,6 +6,8 @@ comment "ST-Ericsson Mobile Platform Products"
config MACH_U300
bool "U300"
+ select PINCTRL
+ select PINMUX_U300
comment "ST-Ericsson U300/U330/U335/U365 Feature Selections"
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile
index 8fd354aaf0a..285538124e5 100644
--- a/arch/arm/mach-u300/Makefile
+++ b/arch/arm/mach-u300/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel, U300 machine.
#
-obj-y := core.o clock.o timer.o padmux.o
+obj-y := core.o clock.o timer.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/mach-u300/Makefile.boot b/arch/arm/mach-u300/Makefile.boot
index 6fbfc6ea2d3..69357affbd7 100644
--- a/arch/arm/mach-u300/Makefile.boot
+++ b/arch/arm/mach-u300/Makefile.boot
@@ -4,10 +4,10 @@
# INITRD_PHYS must be in RAM
ifdef CONFIG_MACH_U300_SINGLE_RAM
- zreladdr-y := 0x28E08000
+ zreladdr-y += 0x28E08000
params_phys-y := 0x28E00100
else
- zreladdr-y := 0x48008000
+ zreladdr-y += 0x48008000
params_phys-y := 0x48000100
endif
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 399c89f14df..2f5929bdeaa 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -25,6 +25,8 @@
#include <linux/err.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/fsmc.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinmux.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -1535,6 +1537,14 @@ static struct coh901318_platform coh901318_platform = {
.max_channels = U300_DMA_CHANNELS,
};
+static struct resource pinmux_resources[] = {
+ {
+ .start = U300_SYSCON_BASE,
+ .end = U300_SYSCON_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
static struct platform_device wdog_device = {
.name = "coh901327_wdog",
.id = -1,
@@ -1630,6 +1640,72 @@ static struct platform_device dma_device = {
},
};
+static struct platform_device pinmux_device = {
+ .name = "pinmux-u300",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pinmux_resources),
+ .resource = pinmux_resources,
+};
+
+/* Pinmux settings */
+static struct pinmux_map u300_pinmux_map[] = {
+ /* anonymous maps for chip power and EMIFs */
+ PINMUX_MAP_PRIMARY_SYS_HOG("POWER", "power"),
+ PINMUX_MAP_PRIMARY_SYS_HOG("EMIF0", "emif0"),
+ PINMUX_MAP_PRIMARY_SYS_HOG("EMIF1", "emif1"),
+ /* per-device maps for MMC/SD, SPI and UART */
+ PINMUX_MAP_PRIMARY("MMCSD", "mmc0", "mmci"),
+ PINMUX_MAP_PRIMARY("SPI", "spi0", "pl022"),
+ PINMUX_MAP_PRIMARY("UART0", "uart0", "uart0"),
+};
+
+struct u300_mux_hog {
+ const char *name;
+ struct device *dev;
+ struct pinmux *pmx;
+};
+
+static struct u300_mux_hog u300_mux_hogs[] = {
+ {
+ .name = "uart0",
+ .dev = &uart0_device.dev,
+ },
+ {
+ .name = "spi0",
+ .dev = &pl022_device.dev,
+ },
+ {
+ .name = "mmc0",
+ .dev = &mmcsd_device.dev,
+ },
+};
+
+static int __init u300_pinmux_fetch(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(u300_mux_hogs); i++) {
+ struct pinmux *pmx;
+ int ret;
+
+ pmx = pinmux_get(u300_mux_hogs[i].dev, NULL);
+ if (IS_ERR(pmx)) {
+ pr_err("u300: could not get pinmux hog %s\n",
+ u300_mux_hogs[i].name);
+ continue;
+ }
+ ret = pinmux_enable(pmx);
+ if (ret) {
+ pr_err("u300: could enable pinmux hog %s\n",
+ u300_mux_hogs[i].name);
+ continue;
+ }
+ u300_mux_hogs[i].pmx = pmx;
+ }
+ return 0;
+}
+subsys_initcall(u300_pinmux_fetch);
+
/*
* Notice that AMBA devices are initialized before platform devices.
*
@@ -1643,10 +1719,10 @@ static struct platform_device *platform_devs[] __initdata = {
&gpio_device,
&nand_device,
&wdog_device,
- &ave_device
+ &ave_device,
+ &pinmux_device,
};
-
/*
* Interrupts: the U300 platforms have two pl190 ARM PrimeCells connected
* together so some interrupts are connected to the first one and some
@@ -1828,6 +1904,10 @@ void __init u300_init_devices(void)
u300_assign_physmem();
+ /* Initialize pinmuxing */
+ pinmux_register_mappings(u300_pinmux_map,
+ ARRAY_SIZE(u300_pinmux_map));
+
/* Register subdevices on the I2C buses */
u300_i2c_register_board_devices();
diff --git a/arch/arm/mach-u300/include/mach/syscon.h b/arch/arm/mach-u300/include/mach/syscon.h
index 7444f5c7da9..6e84f07a7c6 100644
--- a/arch/arm/mach-u300/include/mach/syscon.h
+++ b/arch/arm/mach-u300/include/mach/syscon.h
@@ -234,91 +234,6 @@
#define U300_SYSCON_ECCR_EMIF_1_RET_OUT_CLK_EN_N_DISABLE (0x0004)
#define U300_SYSCON_ECCR_EMIF_MEMCLK_RET_EN_N_DISABLE (0x0002)
#define U300_SYSCON_ECCR_EMIF_SDRCLK_RET_EN_N_DISABLE (0x0001)
-/* PAD MUX Control register 1 (LOW) 16bit (R/W) */
-#define U300_SYSCON_PMC1LR (0x007C)
-#define U300_SYSCON_PMC1LR_MASK (0xFFFF)
-#define U300_SYSCON_PMC1LR_CDI_MASK (0xC000)
-#define U300_SYSCON_PMC1LR_CDI_CDI (0x0000)
-#define U300_SYSCON_PMC1LR_CDI_EMIF (0x4000)
-#ifdef CONFIG_MACH_U300_BS335
-#define U300_SYSCON_PMC1LR_CDI_CDI2 (0x8000)
-#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO (0xC000)
-#elif CONFIG_MACH_U300_BS365
-#define U300_SYSCON_PMC1LR_CDI_GPIO (0x8000)
-#define U300_SYSCON_PMC1LR_CDI_WCDMA (0xC000)
-#endif
-#define U300_SYSCON_PMC1LR_PDI_MASK (0x3000)
-#define U300_SYSCON_PMC1LR_PDI_PDI (0x0000)
-#define U300_SYSCON_PMC1LR_PDI_EGG (0x1000)
-#define U300_SYSCON_PMC1LR_PDI_WCDMA (0x3000)
-#define U300_SYSCON_PMC1LR_MMCSD_MASK (0x0C00)
-#define U300_SYSCON_PMC1LR_MMCSD_MMCSD (0x0000)
-#define U300_SYSCON_PMC1LR_MMCSD_MSPRO (0x0400)
-#define U300_SYSCON_PMC1LR_MMCSD_DSP (0x0800)
-#define U300_SYSCON_PMC1LR_MMCSD_WCDMA (0x0C00)
-#define U300_SYSCON_PMC1LR_ETM_MASK (0x0300)
-#define U300_SYSCON_PMC1LR_ETM_ACC (0x0000)
-#define U300_SYSCON_PMC1LR_ETM_APP (0x0100)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK (0x00C0)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC (0x0000)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_NFIF (0x0040)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM (0x0080)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC_2GB (0x00C0)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK (0x0030)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC (0x0000)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_NFIF (0x0010)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SDRAM (0x0020)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SEMI (0x0030)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK (0x000C)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_STATIC (0x0000)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF (0x0004)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SDRAM (0x0008)
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SEMI (0x000C)
-#define U300_SYSCON_PMC1LR_EMIF_1_MASK (0x0003)
-#define U300_SYSCON_PMC1LR_EMIF_1_STATIC (0x0000)
-#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM0 (0x0001)
-#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM1 (0x0002)
-#define U300_SYSCON_PMC1LR_EMIF_1 (0x0003)
-/* PAD MUX Control register 2 (HIGH) 16bit (R/W) */
-#define U300_SYSCON_PMC1HR (0x007E)
-#define U300_SYSCON_PMC1HR_MASK (0xFFFF)
-#define U300_SYSCON_PMC1HR_MISC_2_MASK (0xC000)
-#define U300_SYSCON_PMC1HR_MISC_2_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_MISC_2_MSPRO (0x4000)
-#define U300_SYSCON_PMC1HR_MISC_2_DSP (0x8000)
-#define U300_SYSCON_PMC1HR_MISC_2_AAIF (0xC000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_MASK (0x3000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_NFIF (0x1000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_DSP (0x2000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_AAIF (0x3000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_MASK (0x0C00)
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_MMC (0x0400)
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_DSP (0x0800)
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_AAIF (0x0C00)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK (0x0300)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI (0x0100)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_AAIF (0x0300)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK (0x00C0)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI (0x0040)
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_AAIF (0x00C0)
-#define U300_SYSCON_PMC1HR_APP_SPI_2_MASK (0x0030)
-#define U300_SYSCON_PMC1HR_APP_SPI_2_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_SPI_2_SPI (0x0010)
-#define U300_SYSCON_PMC1HR_APP_SPI_2_DSP (0x0020)
-#define U300_SYSCON_PMC1HR_APP_SPI_2_AAIF (0x0030)
-#define U300_SYSCON_PMC1HR_APP_UART0_2_MASK (0x000C)
-#define U300_SYSCON_PMC1HR_APP_UART0_2_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_UART0_2_UART0 (0x0004)
-#define U300_SYSCON_PMC1HR_APP_UART0_2_NFIF_CS (0x0008)
-#define U300_SYSCON_PMC1HR_APP_UART0_2_AAIF (0x000C)
-#define U300_SYSCON_PMC1HR_APP_UART0_1_MASK (0x0003)
-#define U300_SYSCON_PMC1HR_APP_UART0_1_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC1HR_APP_UART0_1_UART0 (0x0001)
-#define U300_SYSCON_PMC1HR_APP_UART0_1_AAIF (0x0003)
/* Step one for killing the applications system 16bit (-/W) */
#define U300_SYSCON_KA1R (0x0080)
#define U300_SYSCON_KA1R_MASK (0xFFFF)
@@ -357,57 +272,6 @@
#define U300_SYSCON_PUCR_EMIF_1_16BIT_PU_ENABLE (0x0080)
#define U300_SYSCON_PUCR_EMIF_1_8BIT_PU_ENABLE (0x0040)
#define U300_SYSCON_PUCR_KEY_IN_PU_EN_MASK (0x003F)
-/* Padmux 2 control */
-#define U300_SYSCON_PMC2R (0x100)
-#define U300_SYSCON_PMC2R_APP_MISC_0_MASK (0x00C0)
-#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM (0x0040)
-#define U300_SYSCON_PMC2R_APP_MISC_0_MMC (0x0080)
-#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2 (0x00C0)
-#define U300_SYSCON_PMC2R_APP_MISC_1_MASK (0x0300)
-#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM (0x0100)
-#define U300_SYSCON_PMC2R_APP_MISC_1_MMC (0x0200)
-#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2 (0x0300)
-#define U300_SYSCON_PMC2R_APP_MISC_2_MASK (0x0C00)
-#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM (0x0400)
-#define U300_SYSCON_PMC2R_APP_MISC_2_MMC (0x0800)
-#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2 (0x0C00)
-#define U300_SYSCON_PMC2R_APP_MISC_3_MASK (0x3000)
-#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM (0x1000)
-#define U300_SYSCON_PMC2R_APP_MISC_3_MMC (0x2000)
-#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2 (0x3000)
-#define U300_SYSCON_PMC2R_APP_MISC_4_MASK (0xC000)
-#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM (0x4000)
-#define U300_SYSCON_PMC2R_APP_MISC_4_MMC (0x8000)
-#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO (0xC000)
-/* TODO: More SYSCON registers missing */
-#define U300_SYSCON_PMC3R (0x10c)
-#define U300_SYSCON_PMC3R_APP_MISC_11_MASK (0xc000)
-#define U300_SYSCON_PMC3R_APP_MISC_11_SPI (0x4000)
-#define U300_SYSCON_PMC3R_APP_MISC_10_MASK (0x3000)
-#define U300_SYSCON_PMC3R_APP_MISC_10_SPI (0x1000)
-/* TODO: Missing other configs */
-#define U300_SYSCON_PMC4R (0x168)
-#define U300_SYSCON_PMC4R_APP_MISC_12_MASK (0x0003)
-#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO (0x0000)
-#define U300_SYSCON_PMC4R_APP_MISC_13_MASK (0x000C)
-#define U300_SYSCON_PMC4R_APP_MISC_13_CDI (0x0000)
-#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA (0x0004)
-#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2 (0x0008)
-#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO (0x000C)
-#define U300_SYSCON_PMC4R_APP_MISC_14_MASK (0x0030)
-#define U300_SYSCON_PMC4R_APP_MISC_14_CDI (0x0000)
-#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA (0x0010)
-#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2 (0x0020)
-#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO (0x0030)
-#define U300_SYSCON_PMC4R_APP_MISC_16_MASK (0x0300)
-#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13 (0x0000)
-#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS (0x0100)
-#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N (0x0200)
/* SYS_0_CLK_CONTROL first clock control 16bit (R/W) */
#define U300_SYSCON_S0CCR (0x120)
#define U300_SYSCON_S0CCR_FIELD_MASK (0x43FF)
diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c
index 677ccef5cd3..d5e4a98a9ab 100644
--- a/arch/arm/mach-u300/mmc.c
+++ b/arch/arm/mach-u300/mmc.c
@@ -21,7 +21,6 @@
#include <mach/dma_channels.h>
#include "mmc.h"
-#include "padmux.h"
static struct mmci_platform_data mmc0_plat_data = {
/*
@@ -45,24 +44,9 @@ static struct mmci_platform_data mmc0_plat_data = {
int __devinit mmc_init(struct amba_device *adev)
{
struct device *mmcsd_device = &adev->dev;
- struct pmx *pmx;
int ret = 0;
mmcsd_device->platform_data = &mmc0_plat_data;
- /*
- * Setup padmuxing for MMC. Since this must always be
- * compiled into the kernel, pmx is never released.
- */
- pmx = pmx_get(mmcsd_device, U300_APP_PMX_MMC_SETTING);
-
- if (IS_ERR(pmx))
- pr_warning("Could not get padmux handle\n");
- else {
- ret = pmx_activate(mmcsd_device, pmx);
- if (IS_ERR_VALUE(ret))
- pr_warning("Could not activate padmuxing\n");
- }
-
return ret;
}
diff --git a/arch/arm/mach-u300/padmux.c b/arch/arm/mach-u300/padmux.c
deleted file mode 100644
index 4c93c6cefd3..00000000000
--- a/arch/arm/mach-u300/padmux.c
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/padmux.c
- *
- *
- * Copyright (C) 2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * U300 PADMUX functions
- * Author: Martin Persson <martin.persson@stericsson.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/string.h>
-#include <linux/bug.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <mach/u300-regs.h>
-#include <mach/syscon.h>
-#include "padmux.h"
-
-static DEFINE_MUTEX(pmx_mutex);
-
-const u32 pmx_registers[] = {
- (U300_SYSCON_VBASE + U300_SYSCON_PMC1LR),
- (U300_SYSCON_VBASE + U300_SYSCON_PMC1HR),
- (U300_SYSCON_VBASE + U300_SYSCON_PMC2R),
- (U300_SYSCON_VBASE + U300_SYSCON_PMC3R),
- (U300_SYSCON_VBASE + U300_SYSCON_PMC4R)
-};
-
-/* High level functionality */
-
-/* Lazy dog:
- * onmask = {
- * {"PMC1LR" mask, "PMC1LR" value},
- * {"PMC1HR" mask, "PMC1HR" value},
- * {"PMC2R" mask, "PMC2R" value},
- * {"PMC3R" mask, "PMC3R" value},
- * {"PMC4R" mask, "PMC4R" value}
- * }
- */
-static struct pmx mmc_setting = {
- .setting = U300_APP_PMX_MMC_SETTING,
- .default_on = false,
- .activated = false,
- .name = "MMC",
- .onmask = {
- {U300_SYSCON_PMC1LR_MMCSD_MASK,
- U300_SYSCON_PMC1LR_MMCSD_MMCSD},
- {0, 0},
- {0, 0},
- {0, 0},
- {U300_SYSCON_PMC4R_APP_MISC_12_MASK,
- U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO}
- },
-};
-
-static struct pmx spi_setting = {
- .setting = U300_APP_PMX_SPI_SETTING,
- .default_on = false,
- .activated = false,
- .name = "SPI",
- .onmask = {{0, 0},
- {U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
- U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
- U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
- U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
- U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
- U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI},
- {0, 0},
- {0, 0},
- {0, 0}
- },
-};
-
-/* Available padmux settings */
-static struct pmx *pmx_settings[] = {
- &mmc_setting,
- &spi_setting,
-};
-
-static void update_registers(struct pmx *pmx, bool activate)
-{
- u16 regval, val, mask;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmx_registers); i++) {
- if (activate)
- val = pmx->onmask[i].val;
- else
- val = 0;
-
- mask = pmx->onmask[i].mask;
- if (mask != 0) {
- regval = readw(pmx_registers[i]);
- regval &= ~mask;
- regval |= val;
- writew(regval, pmx_registers[i]);
- }
- }
-}
-
-struct pmx *pmx_get(struct device *dev, enum pmx_settings setting)
-{
- int i;
- struct pmx *pmx = ERR_PTR(-ENOENT);
-
- if (dev == NULL)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&pmx_mutex);
- for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
-
- if (setting == pmx_settings[i]->setting) {
-
- if (pmx_settings[i]->dev != NULL) {
- WARN(1, "padmux: required setting "
- "in use by another consumer\n");
- } else {
- pmx = pmx_settings[i];
- pmx->dev = dev;
- dev_dbg(dev, "padmux: setting nr %d is now "
- "bound to %s and ready to use\n",
- setting, dev_name(dev));
- break;
- }
- }
- }
- mutex_unlock(&pmx_mutex);
-
- return pmx;
-}
-EXPORT_SYMBOL(pmx_get);
-
-int pmx_put(struct device *dev, struct pmx *pmx)
-{
- int i;
- int ret = -ENOENT;
-
- if (pmx == NULL || dev == NULL)
- return -EINVAL;
-
- mutex_lock(&pmx_mutex);
- for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
-
- if (pmx->setting == pmx_settings[i]->setting) {
-
- if (dev != pmx->dev) {
- WARN(1, "padmux: cannot release handle as "
- "it is bound to another consumer\n");
- ret = -EINVAL;
- break;
- } else {
- pmx_settings[i]->dev = NULL;
- ret = 0;
- break;
- }
- }
- }
- mutex_unlock(&pmx_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(pmx_put);
-
-int pmx_activate(struct device *dev, struct pmx *pmx)
-{
- int i, j, ret;
- ret = 0;
-
- if (pmx == NULL || dev == NULL)
- return -EINVAL;
-
- mutex_lock(&pmx_mutex);
-
- /* Make sure the required bits are not used */
- for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
-
- if (pmx_settings[i]->dev == NULL || pmx_settings[i] == pmx)
- continue;
-
- for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) {
-
- if (pmx_settings[i]->onmask[j].mask & pmx->
- onmask[j].mask) {
- /* More than one entry on the same bits */
- WARN(1, "padmux: cannot activate "
- "setting. Bit conflict with "
- "an active setting\n");
-
- ret = -EUSERS;
- goto exit;
- }
- }
- }
- update_registers(pmx, true);
- pmx->activated = true;
- dev_dbg(dev, "padmux: setting nr %d is activated\n",
- pmx->setting);
-
-exit:
- mutex_unlock(&pmx_mutex);
- return ret;
-}
-EXPORT_SYMBOL(pmx_activate);
-
-int pmx_deactivate(struct device *dev, struct pmx *pmx)
-{
- int i;
- int ret = -ENOENT;
-
- if (pmx == NULL || dev == NULL)
- return -EINVAL;
-
- mutex_lock(&pmx_mutex);
- for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
-
- if (pmx_settings[i]->dev == NULL)
- continue;
-
- if (pmx->setting == pmx_settings[i]->setting) {
-
- if (dev != pmx->dev) {
- WARN(1, "padmux: cannot deactivate "
- "pmx setting as it was activated "
- "by another consumer\n");
-
- ret = -EBUSY;
- continue;
- } else {
- update_registers(pmx, false);
- pmx_settings[i]->dev = NULL;
- pmx->activated = false;
- ret = 0;
- dev_dbg(dev, "padmux: setting nr %d is deactivated",
- pmx->setting);
- break;
- }
- }
- }
- mutex_unlock(&pmx_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(pmx_deactivate);
-
-/*
- * For internal use only. If it is to be exported,
- * it should be reentrant. Notice that pmx_activate
- * (i.e. runtime settings) always override default settings.
- */
-static int pmx_set_default(void)
-{
- /* Used to identify several entries on the same bits */
- u16 modbits[ARRAY_SIZE(pmx_registers)];
-
- int i, j;
-
- memset(modbits, 0, ARRAY_SIZE(pmx_registers) * sizeof(u16));
-
- for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
-
- if (!pmx_settings[i]->default_on)
- continue;
-
- for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) {
-
- /* Make sure there is only one entry on the same bits */
- if (modbits[j] & pmx_settings[i]->onmask[j].mask) {
- BUG();
- return -EUSERS;
- }
- modbits[j] |= pmx_settings[i]->onmask[j].mask;
- }
- update_registers(pmx_settings[i], true);
- }
- return 0;
-}
-
-#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
-static int pmx_show(struct seq_file *s, void *data)
-{
- int i;
- seq_printf(s, "-------------------------------------------------\n");
- seq_printf(s, "SETTING BOUND TO DEVICE STATE\n");
- seq_printf(s, "-------------------------------------------------\n");
- mutex_lock(&pmx_mutex);
- for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
- /* Format pmx and device name nicely */
- char cdp[33];
- int chars;
-
- chars = snprintf(&cdp[0], 17, "%s", pmx_settings[i]->name);
- while (chars < 16) {
- cdp[chars] = ' ';
- chars++;
- }
- chars = snprintf(&cdp[16], 17, "%s", pmx_settings[i]->dev ?
- dev_name(pmx_settings[i]->dev) : "N/A");
- while (chars < 16) {
- cdp[chars+16] = ' ';
- chars++;
- }
- cdp[32] = '\0';
-
- seq_printf(s,
- "%s\t%s\n",
- &cdp[0],
- pmx_settings[i]->activated ?
- "ACTIVATED" : "DEACTIVATED"
- );
-
- }
- mutex_unlock(&pmx_mutex);
- return 0;
-}
-
-static int pmx_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmx_show, NULL);
-}
-
-static const struct file_operations pmx_operations = {
- .owner = THIS_MODULE,
- .open = pmx_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init init_pmx_read_debugfs(void)
-{
- /* Expose a simple debugfs interface to view pmx settings */
- (void) debugfs_create_file("padmux", S_IFREG | S_IRUGO,
- NULL, NULL,
- &pmx_operations);
- return 0;
-}
-
-/*
- * This needs to come in after the core_initcall(),
- * because debugfs is not available until
- * the subsystems come up.
- */
-module_init(init_pmx_read_debugfs);
-#endif
-
-static int __init pmx_init(void)
-{
- int ret;
-
- ret = pmx_set_default();
-
- if (IS_ERR_VALUE(ret))
- pr_crit("padmux: default settings could not be set\n");
-
- return 0;
-}
-
-/* Should be initialized before consumers */
-core_initcall(pmx_init);
diff --git a/arch/arm/mach-u300/padmux.h b/arch/arm/mach-u300/padmux.h
deleted file mode 100644
index 6e8b8606409..00000000000
--- a/arch/arm/mach-u300/padmux.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/padmux.h
- *
- *
- * Copyright (C) 2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * U300 PADMUX API
- * Author: Martin Persson <martin.persson@stericsson.com>
- */
-
-#ifndef __MACH_U300_PADMUX_H
-#define __MACH_U300_PADMUX_H
-
-enum pmx_settings {
- U300_APP_PMX_MMC_SETTING,
- U300_APP_PMX_SPI_SETTING
-};
-
-struct pmx_onmask {
- u16 mask; /* Mask bits */
- u16 val; /* Value when active */
-};
-
-struct pmx {
- struct device *dev;
- enum pmx_settings setting;
- char *name;
- bool activated;
- bool default_on;
- struct pmx_onmask onmask[];
-};
-
-struct pmx *pmx_get(struct device *dev, enum pmx_settings setting);
-int pmx_put(struct device *dev, struct pmx *pmx);
-int pmx_activate(struct device *dev, struct pmx *pmx);
-int pmx_deactivate(struct device *dev, struct pmx *pmx);
-
-#endif
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
index 7b597e2b19e..a1affacfa59 100644
--- a/arch/arm/mach-u300/spi.c
+++ b/arch/arm/mach-u300/spi.c
@@ -14,8 +14,6 @@
#include <mach/coh901318.h>
#include <mach/dma_channels.h>
-#include "padmux.h"
-
/*
* The following is for the actual devices on the SSP/SPI bus
*/
@@ -95,25 +93,7 @@ static struct pl022_ssp_controller ssp_platform_data = {
void __init u300_spi_init(struct amba_device *adev)
{
- struct pmx *pmx;
-
adev->dev.platform_data = &ssp_platform_data;
- /*
- * Setup padmuxing for SPI. Since this must always be
- * compiled into the kernel, pmx is never released.
- */
- pmx = pmx_get(&adev->dev, U300_APP_PMX_SPI_SETTING);
-
- if (IS_ERR(pmx))
- dev_warn(&adev->dev, "Could not get padmux handle\n");
- else {
- int ret;
-
- ret = pmx_activate(&adev->dev, pmx);
- if (IS_ERR_VALUE(ret))
- dev_warn(&adev->dev, "Could not activate padmuxing\n");
- }
-
}
void __init u300_spi_register_board_devices(void)
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 4210cb434db..a3e0c8692f0 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -6,6 +6,7 @@ config UX500_SOC_COMMON
select ARM_GIC
select HAS_MTU
select ARM_ERRATA_753970
+ select ARM_ERRATA_754322
menu "Ux500 SoC"
diff --git a/arch/arm/mach-ux500/Makefile.boot b/arch/arm/mach-ux500/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-ux500/Makefile.boot
+++ b/arch/arm/mach-ux500/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index a33df5f4c27..eb5199102cf 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -156,12 +156,10 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "U8500: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-versatile/Makefile.boot b/arch/arm/mach-versatile/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-versatile/Makefile.boot
+++ b/arch/arm/mach-versatile/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
index 07c2d9c457e..8630b3d10a4 100644
--- a/arch/arm/mach-vexpress/Makefile.boot
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x60008000
+ zreladdr-y += 0x60008000
params_phys-y := 0x60000100
initrd_phys-y := 0x60800000
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index bfd32f52c2d..2b1e836a76e 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -221,6 +221,12 @@ static void ct_ca9x4_init_cpu_map(void)
{
int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
for (i = 0; i < ncores; ++i)
set_cpu_possible(i, true);
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
index ea4cbfb90a6..3668cf91d2d 100644
--- a/arch/arm/mach-vexpress/hotplug.c
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/system.h>
extern volatile int pen_release;
@@ -62,13 +63,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
* code will have already disabled interrupts
*/
for (;;) {
- /*
- * here's the WFI
- */
- asm(".word 0xe320f003\n"
- :
- :
- : "memory", "cc");
+ wfi();
if (pen_release == cpu) {
/*
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h
index 748bb524ee7..13522d86685 100644
--- a/arch/arm/mach-vexpress/include/mach/io.h
+++ b/arch/arm/mach-vexpress/include/mach/io.h
@@ -20,8 +20,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffffffff
-
#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-vt8500/Makefile.boot b/arch/arm/mach-vt8500/Makefile.boot
index a8acc4e2490..b79c41cdfdf 100644
--- a/arch/arm/mach-vt8500/Makefile.boot
+++ b/arch/arm/mach-vt8500/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x01000000
diff --git a/arch/arm/mach-vt8500/include/mach/io.h b/arch/arm/mach-vt8500/include/mach/io.h
index 9077239f78c..46181eecf27 100644
--- a/arch/arm/mach-vt8500/include/mach/io.h
+++ b/arch/arm/mach-vt8500/include/mach/io.h
@@ -20,8 +20,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffff
-
#define __io(a) __typesafe_io((a) + 0xf0000000)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-w90x900/Makefile.boot b/arch/arm/mach-w90x900/Makefile.boot
index a057b546b6e..6c3d421c2d1 100644
--- a/arch/arm/mach-w90x900/Makefile.boot
+++ b/arch/arm/mach-w90x900/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c
index 83c56324a47..0a235e50233 100644
--- a/arch/arm/mach-w90x900/cpu.c
+++ b/arch/arm/mach-w90x900/cpu.c
@@ -60,7 +60,7 @@ static DEFINE_CLK(emc, 7);
static DEFINE_SUBCLK(rmii, 2);
static DEFINE_CLK(usbd, 8);
static DEFINE_CLK(usbh, 9);
-static DEFINE_CLK(g2d, 10);;
+static DEFINE_CLK(g2d, 10);
static DEFINE_CLK(pwm, 18);
static DEFINE_CLK(ps2, 24);
static DEFINE_CLK(kpi, 25);
diff --git a/arch/arm/mach-zynq/Makefile.boot b/arch/arm/mach-zynq/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-zynq/Makefile.boot
+++ b/arch/arm/mach-zynq/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index cfbcf8b9559..c335c76e0d8 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -86,16 +86,6 @@ core_param(alignment, ai_usermode, int, 0600);
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
-#ifdef CONFIG_PROC_FS
-static const char *usermode_action[] = {
- "ignored",
- "warn",
- "fixup",
- "fixup+warn",
- "signal",
- "signal+warn"
-};
-
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
@@ -123,6 +113,16 @@ static int safe_usermode(int new_usermode, bool warn)
return new_usermode;
}
+#ifdef CONFIG_PROC_FS
+static const char *usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9ecfdb51195..8ac9e9f8479 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -16,9 +16,12 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
@@ -26,15 +29,23 @@
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
-static DEFINE_SPINLOCK(l2x0_lock);
+static DEFINE_RAW_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
+struct l2x0_regs l2x0_saved_regs;
+
+struct l2x0_of_data {
+ void (*setup)(const struct device_node *, __u32 *, __u32 *);
+ void (*save)(void);
+ void (*resume)(void);
+};
+
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
- ;
+ cpu_relax();
}
#ifdef CONFIG_CACHE_PL310
@@ -115,9 +126,9 @@ static void l2x0_cache_sync(void)
{
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void __l2x0_flush_all(void)
@@ -134,9 +145,9 @@ static void l2x0_flush_all(void)
unsigned long flags;
/* clean all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_all(void)
@@ -144,11 +155,11 @@ static void l2x0_clean_all(void)
unsigned long flags;
/* clean all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_all(void)
@@ -156,13 +167,13 @@ static void l2x0_inv_all(void)
unsigned long flags;
/* invalidate all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
/* Invalidating when L2 is enabled is a nono */
BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
@@ -170,7 +181,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
@@ -195,13 +206,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
}
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
@@ -214,7 +225,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
return;
}
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
@@ -225,13 +236,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
}
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
@@ -244,7 +255,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
return;
}
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
@@ -257,27 +268,27 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
debug_writel(0x00);
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_disable(void)
{
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
writel_relaxed(0, l2x0_base + L2X0_CTRL);
dsb();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void __init l2x0_unlock(__u32 cache_id)
+static void l2x0_unlock(__u32 cache_id)
{
int lockregs;
int i;
@@ -353,6 +364,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
+ l2x0_saved_regs.aux_ctrl = aux;
+
l2x0_inv_all();
/* enable L2X0 */
@@ -372,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
ways, cache_id, aux, l2x0_size);
}
+
+#ifdef CONFIG_OF
+static void __init l2x0_of_setup(const struct device_node *np,
+ __u32 *aux_val, __u32 *aux_mask)
+{
+ u32 data[2] = { 0, 0 };
+ u32 tag = 0;
+ u32 dirty = 0;
+ u32 val = 0, mask = 0;
+
+ of_property_read_u32(np, "arm,tag-latency", &tag);
+ if (tag) {
+ mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
+ val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
+ }
+
+ of_property_read_u32_array(np, "arm,data-latency",
+ data, ARRAY_SIZE(data));
+ if (data[0] && data[1]) {
+ mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
+ L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
+ val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
+ ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
+ }
+
+ of_property_read_u32(np, "arm,dirty-latency", &dirty);
+ if (dirty) {
+ mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
+ val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
+ }
+
+ *aux_val &= ~mask;
+ *aux_val |= val;
+ *aux_mask &= ~mask;
+}
+
+static void __init pl310_of_setup(const struct device_node *np,
+ __u32 *aux_val, __u32 *aux_mask)
+{
+ u32 data[3] = { 0, 0, 0 };
+ u32 tag[3] = { 0, 0, 0 };
+ u32 filter[2] = { 0, 0 };
+
+ of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
+ if (tag[0] && tag[1] && tag[2])
+ writel_relaxed(
+ ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+ ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+ ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+ l2x0_base + L2X0_TAG_LATENCY_CTRL);
+
+ of_property_read_u32_array(np, "arm,data-latency",
+ data, ARRAY_SIZE(data));
+ if (data[0] && data[1] && data[2])
+ writel_relaxed(
+ ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+ ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+ ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+ l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+ of_property_read_u32_array(np, "arm,filter-ranges",
+ filter, ARRAY_SIZE(filter));
+ if (filter[1]) {
+ writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
+ l2x0_base + L2X0_ADDR_FILTER_END);
+ writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
+ l2x0_base + L2X0_ADDR_FILTER_START);
+ }
+}
+
+static void __init pl310_save(void)
+{
+ u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
+ L2X0_TAG_LATENCY_CTRL);
+ l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
+ L2X0_DATA_LATENCY_CTRL);
+ l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
+ L2X0_ADDR_FILTER_END);
+ l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
+ L2X0_ADDR_FILTER_START);
+
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+ /*
+ * From r2p0, there is Prefetch offset/control register
+ */
+ l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
+ L2X0_PREFETCH_CTRL);
+ /*
+ * From r3p0, there is Power control register
+ */
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+ l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
+ L2X0_POWER_CTRL);
+ }
+}
+
+static void l2x0_resume(void)
+{
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ /* restore aux ctrl and enable l2 */
+ l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
+
+ writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
+ L2X0_AUX_CTRL);
+
+ l2x0_inv_all();
+
+ writel_relaxed(1, l2x0_base + L2X0_CTRL);
+ }
+}
+
+static void pl310_resume(void)
+{
+ u32 l2x0_revision;
+
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ /* restore pl310 setup */
+ writel_relaxed(l2x0_saved_regs.tag_latency,
+ l2x0_base + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(l2x0_saved_regs.data_latency,
+ l2x0_base + L2X0_DATA_LATENCY_CTRL);
+ writel_relaxed(l2x0_saved_regs.filter_end,
+ l2x0_base + L2X0_ADDR_FILTER_END);
+ writel_relaxed(l2x0_saved_regs.filter_start,
+ l2x0_base + L2X0_ADDR_FILTER_START);
+
+ l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+ l2x0_base + L2X0_PREFETCH_CTRL);
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+ writel_relaxed(l2x0_saved_regs.pwr_ctrl,
+ l2x0_base + L2X0_POWER_CTRL);
+ }
+ }
+
+ l2x0_resume();
+}
+
+static const struct l2x0_of_data pl310_data = {
+ pl310_of_setup,
+ pl310_save,
+ pl310_resume,
+};
+
+static const struct l2x0_of_data l2x0_data = {
+ l2x0_of_setup,
+ NULL,
+ l2x0_resume,
+};
+
+static const struct of_device_id l2x0_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
+ { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
+ { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
+ {}
+};
+
+int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
+{
+ struct device_node *np;
+ struct l2x0_of_data *data;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, l2x0_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2x0_base = ioremap(res.start, resource_size(&res));
+ if (!l2x0_base)
+ return -ENOMEM;
+
+ l2x0_saved_regs.phy_base = res.start;
+
+ data = of_match_node(l2x0_ids, np)->data;
+
+ /* L2 configuration can only be changed if the cache is disabled */
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ if (data->setup)
+ data->setup(np, &aux_val, &aux_mask);
+ }
+
+ if (data->save)
+ data->save();
+
+ l2x0_init(l2x0_base, aux_val, aux_mask);
+
+ outer_cache.resume = data->resume;
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba3cfa..93aac068da9 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -16,7 +16,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-static DEFINE_SPINLOCK(cpu_asid_lock);
+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct mm_struct *, current_mm);
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.id = 0;
- spin_lock_init(&mm->context.id_lock);
+ raw_spin_lock_init(&mm->context.id_lock);
}
static void flush_context(void)
@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid)
* the broadcast. This function is also called via IPI so the
* mm->context.id_lock has to be IRQ-safe.
*/
- spin_lock_irqsave(&mm->context.id_lock, flags);
+ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
/*
* Old version of ASID found. Set the new one and
@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid)
mm->context.id = asid;
cpumask_clear(mm_cpumask(mm));
}
- spin_unlock_irqrestore(&mm->context.id_lock, flags);
+ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
/*
* Set the mm_cpumask(mm) bit for the current CPU.
@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
{
unsigned int asid;
- spin_lock(&cpu_asid_lock);
+ raw_spin_lock(&cpu_asid_lock);
#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from
@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
*/
if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- spin_unlock(&cpu_asid_lock);
+ raw_spin_unlock(&cpu_asid_lock);
return;
}
#endif
@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
}
set_mm_context(mm, asid);
- spin_unlock(&cpu_asid_lock);
+ raw_spin_unlock(&cpu_asid_lock);
}
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index b8061519ce7..7d0a8c23034 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -30,7 +30,7 @@
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
-static DEFINE_SPINLOCK(minicache_lock);
+static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* ARMv4 mini-dcache optimised copy_user_highpage
@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
- spin_lock(&minicache_lock);
+ raw_spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(0xffff8000);
mc_copy_user_page((void *)0xffff8000, kto);
- spin_unlock(&minicache_lock);
+ raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1);
}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 63cca009713..3d9a1552cef 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -27,7 +27,7 @@
#define from_address (0xffff8000)
#define to_address (0xffffc000)
-static DEFINE_SPINLOCK(v6_lock);
+static DEFINE_RAW_SPINLOCK(v6_lock);
/*
* Copy the user page. No aliasing to deal with so we can just
@@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
* Now copy the page using the same cache colour as the
* pages ultimate destination.
*/
- spin_lock(&v6_lock);
+ raw_spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
@@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
copy_page((void *)kto, (void *)kfrom);
- spin_unlock(&v6_lock);
+ raw_spin_unlock(&v6_lock);
}
/*
@@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
* Now clear the page using the same cache colour as
* the pages ultimate destination.
*/
- spin_lock(&v6_lock);
+ raw_spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
flush_tlb_kernel_page(to);
clear_page((void *)to);
- spin_unlock(&v6_lock);
+ raw_spin_unlock(&v6_lock);
}
struct cpu_user_fns v6_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 649bbcd325b..610c24ced31 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -32,7 +32,7 @@
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
-static DEFINE_SPINLOCK(minicache_lock);
+static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* XScale mini-dcache optimised copy_user_highpage
@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
- spin_lock(&minicache_lock);
+ raw_spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
- spin_unlock(&minicache_lock);
+ raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c3ff82f92d9..235eb775fc7 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -123,8 +123,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
-#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PMD_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PMD_SHIFT)
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
@@ -183,7 +183,7 @@ static int __init consistent_init(void)
}
consistent_pte[i++] = pte;
- base += (1 << PGDIR_SHIFT);
+ base += PMD_SIZE;
} while (base < CONSISTENT_END);
return ret;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3b5ea68acbb..aa33949fef6 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cc7e2d8be9a..f8037ba338a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#else
+ /*
+ * Align down here since the VM subsystem insists that the
+ * memmap entries are valid from the bank start aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab506272b2d..bdb248c4f55 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -289,6 +289,27 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
}
EXPORT_SYMBOL(__arm_ioremap);
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space as memory. Needed when the kernel wants to execute
+ * code in external memory. This is needed for reprogramming source
+ * clocks that would affect normal memory for example. Please see
+ * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
+ */
+void __iomem *
+__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
+{
+ unsigned int mtype;
+
+ if (cached)
+ mtype = MT_MEMORY;
+ else
+ mtype = MT_MEMORY_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+}
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 010566799c8..ad7cce3bc43 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -12,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
- unsigned int prot_l1;
- unsigned int prot_sect;
+ pmdval_t prot_l1;
+ pmdval_t prot_sect;
unsigned int domain;
};
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c..226f1804be1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel);
struct cachepolicy {
const char policy[16];
unsigned int cr_mask;
- unsigned int pmd;
+ pmdval_t pmd;
pteval_t pte;
};
@@ -288,7 +288,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -863,14 +863,14 @@ static inline void prepare_page_table(void)
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
- addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+ addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
#endif
- for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+ for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -885,10 +885,12 @@ static inline void prepare_page_table(void)
* memory bank, up to the end of the vmalloc region.
*/
for (addr = __phys_to_virt(end);
- addr < VMALLOC_END; addr += PGDIR_SIZE)
+ addr < VMALLOC_END; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
+#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+
/*
* Reserve the special regions of memory
*/
@@ -898,7 +900,7 @@ void __init arm_mm_memblock_reserve(void)
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
- memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
+ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
#ifdef CONFIG_SA1111
/*
@@ -926,7 +928,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
vectors_page = early_alloc(PAGE_SIZE);
- for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+ for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9049c0764db..9591c8e9fb8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -218,7 +218,7 @@ ENDPROC(cpu_v7_set_pte_ext)
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 9
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index a5353fc0793..4c8fdbcc946 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -39,7 +39,7 @@ config ARCH_MX503
select ARCH_MX50_SUPPORTED
select ARCH_MX53_SUPPORTED
help
- This enables support for machines using Freescale's i.MX50 and i.MX51
+ This enables support for machines using Freescale's i.MX50 and i.MX53
processors.
config ARCH_MX51
diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c
index 0d6ed31bdbf..a34b2ae895f 100644
--- a/arch/arm/plat-mxc/devices.c
+++ b/arch/arm/plat-mxc/devices.c
@@ -37,59 +37,6 @@ int __init mxc_register_device(struct platform_device *pdev, void *data)
return ret;
}
-struct platform_device *__init imx_add_platform_device_dmamask(
- const char *name, int id,
- const struct resource *res, unsigned int num_resources,
- const void *data, size_t size_data, u64 dmamask)
-{
- int ret = -ENOMEM;
- struct platform_device *pdev;
-
- pdev = platform_device_alloc(name, id);
- if (!pdev)
- goto err;
-
- if (dmamask) {
- /*
- * This memory isn't freed when the device is put,
- * I don't have a nice idea for that though. Conceptually
- * dma_mask in struct device should not be a pointer.
- * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
- */
- pdev->dev.dma_mask =
- kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
- if (!pdev->dev.dma_mask)
- /* ret is still -ENOMEM; */
- goto err;
-
- *pdev->dev.dma_mask = dmamask;
- pdev->dev.coherent_dma_mask = dmamask;
- }
-
- if (res) {
- ret = platform_device_add_resources(pdev, res, num_resources);
- if (ret)
- goto err;
- }
-
- if (data) {
- ret = platform_device_add_data(pdev, data, size_data);
- if (ret)
- goto err;
- }
-
- ret = platform_device_add(pdev);
- if (ret) {
-err:
- if (dmamask)
- kfree(pdev->dev.dma_mask);
- platform_device_put(pdev);
- return ERR_PTR(ret);
- }
-
- return pdev;
-}
-
struct device mxc_aips_bus = {
.init_name = "mxc_aips",
.parent = &platform_bus,
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
index 524538aabc4..543525d76a6 100644
--- a/arch/arm/plat-mxc/include/mach/devices-common.h
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -14,10 +14,22 @@
extern struct device mxc_aips_bus;
extern struct device mxc_ahb_bus;
-struct platform_device *imx_add_platform_device_dmamask(
+static inline struct platform_device *imx_add_platform_device_dmamask(
const char *name, int id,
const struct resource *res, unsigned int num_resources,
- const void *data, size_t size_data, u64 dmamask);
+ const void *data, size_t size_data, u64 dmamask)
+{
+ struct platform_device_info pdevinfo = {
+ .name = name,
+ .id = id,
+ .res = res,
+ .num_res = num_resources,
+ .data = data,
+ .size_data = size_data,
+ .dma_mask = dmamask,
+ };
+ return platform_device_register_full(&pdevinfo);
+}
static inline struct platform_device *imx_add_platform_device(
const char *name, int id,
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index bb8f4a6b3e3..5b605a9eb09 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -14,6 +14,7 @@ config ARCH_OMAP1
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select HAVE_IDE
help
"Systems based on omap7xx, omap15xx or omap16xx"
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 9843c954c04..9a197e55f66 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -22,7 +22,6 @@ config PLAT_S5P
select PLAT_SAMSUNG
select SAMSUNG_CLKSRC
select SAMSUNG_IRQ_VIC_TIMER
- select SAMSUNG_IRQ_UART
help
Base platform code for Samsung's S5P series SoC.
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
index afaf87fdb93..c9308db3618 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-s5p/dev-uart.c
@@ -32,20 +32,10 @@ static struct resource s5p_uart0_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S5P_UART_RX0,
- .end = IRQ_S5P_UART_RX0,
+ .start = IRQ_UART0,
+ .end = IRQ_UART0,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = IRQ_S5P_UART_TX0,
- .end = IRQ_S5P_UART_TX0,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_S5P_UART_ERR0,
- .end = IRQ_S5P_UART_ERR0,
- .flags = IORESOURCE_IRQ,
- }
};
static struct resource s5p_uart1_resource[] = {
@@ -55,18 +45,8 @@ static struct resource s5p_uart1_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S5P_UART_RX1,
- .end = IRQ_S5P_UART_RX1,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S5P_UART_TX1,
- .end = IRQ_S5P_UART_TX1,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_S5P_UART_ERR1,
- .end = IRQ_S5P_UART_ERR1,
+ .start = IRQ_UART1,
+ .end = IRQ_UART1,
.flags = IORESOURCE_IRQ,
},
};
@@ -78,18 +58,8 @@ static struct resource s5p_uart2_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S5P_UART_RX2,
- .end = IRQ_S5P_UART_RX2,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S5P_UART_TX2,
- .end = IRQ_S5P_UART_TX2,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_S5P_UART_ERR2,
- .end = IRQ_S5P_UART_ERR2,
+ .start = IRQ_UART2,
+ .end = IRQ_UART2,
.flags = IORESOURCE_IRQ,
},
};
@@ -102,18 +72,8 @@ static struct resource s5p_uart3_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S5P_UART_RX3,
- .end = IRQ_S5P_UART_RX3,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S5P_UART_TX3,
- .end = IRQ_S5P_UART_TX3,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_S5P_UART_ERR3,
- .end = IRQ_S5P_UART_ERR3,
+ .start = IRQ_UART3,
+ .end = IRQ_UART3,
.flags = IORESOURCE_IRQ,
},
#endif
@@ -127,18 +87,8 @@ static struct resource s5p_uart4_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S5P_UART_RX4,
- .end = IRQ_S5P_UART_RX4,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S5P_UART_TX4,
- .end = IRQ_S5P_UART_TX4,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_S5P_UART_ERR4,
- .end = IRQ_S5P_UART_ERR4,
+ .start = IRQ_UART4,
+ .end = IRQ_UART4,
.flags = IORESOURCE_IRQ,
},
#endif
@@ -152,18 +102,8 @@ static struct resource s5p_uart5_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_S5P_UART_RX5,
- .end = IRQ_S5P_UART_RX5,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S5P_UART_TX5,
- .end = IRQ_S5P_UART_TX5,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_S5P_UART_ERR5,
- .end = IRQ_S5P_UART_ERR5,
+ .start = IRQ_UART5,
+ .end = IRQ_UART5,
.flags = IORESOURCE_IRQ,
},
#endif
diff --git a/arch/arm/plat-s5p/include/plat/irqs.h b/arch/arm/plat-s5p/include/plat/irqs.h
index ba9121c60a2..144dbfc6506 100644
--- a/arch/arm/plat-s5p/include/plat/irqs.h
+++ b/arch/arm/plat-s5p/include/plat/irqs.h
@@ -37,41 +37,6 @@
#define IRQ_VIC1_BASE S5P_VIC1_BASE
#define IRQ_VIC2_BASE S5P_VIC2_BASE
-/* UART interrupts, each UART has 4 intterupts per channel so
- * use the space between the ISA and S3C main interrupts. Note, these
- * are not in the same order as the S3C24XX series! */
-
-#define IRQ_S5P_UART_BASE0 (16)
-#define IRQ_S5P_UART_BASE1 (20)
-#define IRQ_S5P_UART_BASE2 (24)
-#define IRQ_S5P_UART_BASE3 (28)
-
-#define UART_IRQ_RXD (0)
-#define UART_IRQ_ERR (1)
-#define UART_IRQ_TXD (2)
-
-#define IRQ_S5P_UART_RX0 (IRQ_S5P_UART_BASE0 + UART_IRQ_RXD)
-#define IRQ_S5P_UART_TX0 (IRQ_S5P_UART_BASE0 + UART_IRQ_TXD)
-#define IRQ_S5P_UART_ERR0 (IRQ_S5P_UART_BASE0 + UART_IRQ_ERR)
-
-#define IRQ_S5P_UART_RX1 (IRQ_S5P_UART_BASE1 + UART_IRQ_RXD)
-#define IRQ_S5P_UART_TX1 (IRQ_S5P_UART_BASE1 + UART_IRQ_TXD)
-#define IRQ_S5P_UART_ERR1 (IRQ_S5P_UART_BASE1 + UART_IRQ_ERR)
-
-#define IRQ_S5P_UART_RX2 (IRQ_S5P_UART_BASE2 + UART_IRQ_RXD)
-#define IRQ_S5P_UART_TX2 (IRQ_S5P_UART_BASE2 + UART_IRQ_TXD)
-#define IRQ_S5P_UART_ERR2 (IRQ_S5P_UART_BASE2 + UART_IRQ_ERR)
-
-#define IRQ_S5P_UART_RX3 (IRQ_S5P_UART_BASE3 + UART_IRQ_RXD)
-#define IRQ_S5P_UART_TX3 (IRQ_S5P_UART_BASE3 + UART_IRQ_TXD)
-#define IRQ_S5P_UART_ERR3 (IRQ_S5P_UART_BASE3 + UART_IRQ_ERR)
-
-/* S3C compatibilty defines */
-#define IRQ_S3CUART_RX0 IRQ_S5P_UART_RX0
-#define IRQ_S3CUART_RX1 IRQ_S5P_UART_RX1
-#define IRQ_S3CUART_RX2 IRQ_S5P_UART_RX2
-#define IRQ_S3CUART_RX3 IRQ_S5P_UART_RX3
-
/* VIC based IRQs */
#define S5P_IRQ_VIC0(x) (S5P_VIC0_BASE + (x))
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index f88216d2399..c65eb791d1b 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -163,9 +163,9 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_type = s5p_gpioint_set_type,
- ct->regs.ack = PEND_OFFSET + REG_OFFSET(chip->group);
- ct->regs.mask = MASK_OFFSET + REG_OFFSET(chip->group);
- ct->regs.type = CON_OFFSET + REG_OFFSET(chip->group);
+ ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start);
+ ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start);
+ ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start);
irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio),
IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/arch/arm/plat-s5p/irq.c b/arch/arm/plat-s5p/irq.c
index a97c08957f4..afdaa1082b9 100644
--- a/arch/arm/plat-s5p/irq.c
+++ b/arch/arm/plat-s5p/irq.c
@@ -17,42 +17,10 @@
#include <asm/hardware/vic.h>
-#include <linux/serial_core.h>
#include <mach/map.h>
#include <plat/regs-timer.h>
-#include <plat/regs-serial.h>
#include <plat/cpu.h>
#include <plat/irq-vic-timer.h>
-#include <plat/irq-uart.h>
-
-/*
- * Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3]
- * are consecutive when looking up the interrupt in the demux routines.
- */
-static struct s3c_uart_irq uart_irqs[] = {
- [0] = {
- .regs = S5P_VA_UART0,
- .base_irq = IRQ_S5P_UART_BASE0,
- .parent_irq = IRQ_UART0,
- },
- [1] = {
- .regs = S5P_VA_UART1,
- .base_irq = IRQ_S5P_UART_BASE1,
- .parent_irq = IRQ_UART1,
- },
- [2] = {
- .regs = S5P_VA_UART2,
- .base_irq = IRQ_S5P_UART_BASE2,
- .parent_irq = IRQ_UART2,
- },
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [3] = {
- .regs = S5P_VA_UART3,
- .base_irq = IRQ_S5P_UART_BASE3,
- .parent_irq = IRQ_UART3,
- },
-#endif
-};
void __init s5p_init_irq(u32 *vic, u32 num_vic)
{
@@ -65,6 +33,4 @@ void __init s5p_init_irq(u32 *vic, u32 num_vic)
#endif
s3c_init_vic_timer_irq(5, IRQ_TIMER0);
-
- s3c_init_uart_irqs(uart_irqs, ARRAY_SIZE(uart_irqs));
}
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index b3e10659e4b..dffa37bc4a0 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -65,11 +65,6 @@ config SAMSUNG_IRQ_VIC_TIMER
help
Internal configuration to build the VIC timer interrupt code.
-config SAMSUNG_IRQ_UART
- bool
- help
- Internal configuration to build the IRQ UART demux code.
-
# options for gpio configuration support
config SAMSUNG_GPIOLIB_4BIT
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 853764ba8cc..1105922342f 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -21,7 +21,6 @@ obj-y += dev-asocdma.o
obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
-obj-$(CONFIG_SAMSUNG_IRQ_UART) += irq-uart.o
obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
# ADC
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index bac36fa3bec..72073484702 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -186,6 +186,11 @@
#define S3C64XX_UINTSP 0x34
#define S3C64XX_UINTM 0x38
+#define S3C64XX_UINTM_RXD (0)
+#define S3C64XX_UINTM_TXD (2)
+#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
+#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
+
/* Following are specific to S5PV210 */
#define S5PV210_UCON_CLKMASK (1<<10)
#define S5PV210_UCON_PCLK (0<<10)
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
deleted file mode 100644
index 3014c7226bd..00000000000
--- a/arch/arm/plat-samsung/irq-uart.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/* arch/arm/plat-samsung/irq-uart.c
- * originally part of arch/arm/plat-s3c64xx/irq.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Samsung- UART Interrupt handling
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/serial_core.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/mach/irq.h>
-
-#include <mach/map.h>
-#include <plat/irq-uart.h>
-#include <plat/regs-serial.h>
-#include <plat/cpu.h>
-
-/* Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3]
- * are consecutive when looking up the interrupt in the demux routines.
- */
-static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc)
-{
- struct s3c_uart_irq *uirq = desc->irq_data.handler_data;
- struct irq_chip *chip = irq_get_chip(irq);
- u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP);
- int base = uirq->base_irq;
-
- chained_irq_enter(chip, desc);
-
- if (pend & (1 << 0))
- generic_handle_irq(base);
- if (pend & (1 << 1))
- generic_handle_irq(base + 1);
- if (pend & (1 << 2))
- generic_handle_irq(base + 2);
- if (pend & (1 << 3))
- generic_handle_irq(base + 3);
-
- chained_irq_exit(chip, desc);
-}
-
-static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
-{
- void __iomem *reg_base = uirq->regs;
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- /* mask all interrupts at the start. */
- __raw_writel(0xf, reg_base + S3C64XX_UINTM);
-
- gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
- handle_level_irq);
-
- if (!gc) {
- pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
- __func__, uirq->base_irq);
- return;
- }
-
- ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_set_bit;
- ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->regs.ack = S3C64XX_UINTP;
- ct->regs.mask = S3C64XX_UINTM;
- irq_setup_generic_chip(gc, IRQ_MSK(4), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-
- irq_set_handler_data(uirq->parent_irq, uirq);
- irq_set_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
-}
-
-/**
- * s3c_init_uart_irqs() - initialise UART IRQs and the necessary demuxing
- * @irq: The interrupt data for registering
- * @nr_irqs: The number of interrupt descriptions in @irq.
- *
- * Register the UART interrupts specified by @irq including the demuxing
- * routines. This supports the S3C6400 and newer style of devices.
- */
-void __init s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs)
-{
- for (; nr_irqs > 0; nr_irqs--, irq++)
- s3c_init_uart_irq(irq);
-}
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 62cc8f98117..5bdeef96984 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,10 +12,9 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# XXX: This is a cut-down version of the file; it contains only machines that
-# XXX: are in mainline or have been submitted to the machine database within
-# XXX: the last 12 months. If your entry is missing please email rmk at
-# XXX: <linux@arm.linux.org.uk>
+# This is a cut-down version of the file; it contains only machines that
+# are merged into mainline or have been edited in the machine database
+# within the last 12 months. References to machine_is_NAME() do not count!
#
# Last update: Sat May 7 08:48:24 2011
#
@@ -65,6 +64,7 @@ h7201 ARCH_H7201 H7201 161
h7202 ARCH_H7202 H7202 162
iq80321 ARCH_IQ80321 IQ80321 169
ks8695 ARCH_KS8695 KS8695 180
+karo ARCH_KARO KARO 190
smdk2410 ARCH_SMDK2410 SMDK2410 193
ceiva ARCH_CEIVA CEIVA 200
voiceblue MACH_VOICEBLUE VOICEBLUE 218
@@ -188,6 +188,7 @@ omap_2430sdp MACH_OMAP_2430SDP OMAP_2430SDP 900
davinci_evm MACH_DAVINCI_EVM DAVINCI_EVM 901
palmz72 MACH_PALMZ72 PALMZ72 904
nxdb500 MACH_NXDB500 NXDB500 905
+apf9328 MACH_APF9328 APF9328 906
palmt5 MACH_PALMT5 PALMT5 917
palmtc MACH_PALMTC PALMTC 918
omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
@@ -271,10 +272,12 @@ pcm038 MACH_PCM038 PCM038 1551
ts_x09 MACH_TS209 TS209 1565
at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
+vision_ep9307 MACH_VISION_EP9307 VISION_EP9307 1578
terastation_pro2 MACH_TERASTATION_PRO2 TERASTATION_PRO2 1584
linkstation_pro MACH_LINKSTATION_PRO LINKSTATION_PRO 1585
e350 MACH_E350 E350 1596
ts409 MACH_TS409 TS409 1601
+rsi_ews MACH_RSI_EWS RSI_EWS 1609
cm_x300 MACH_CM_X300 CM_X300 1616
at91sam9g20ek MACH_AT91SAM9G20EK AT91SAM9G20EK 1624
smdk6410 MACH_SMDK6410 SMDK6410 1626
@@ -331,6 +334,7 @@ smdkc100 MACH_SMDKC100 SMDKC100 1826
tavorevb MACH_TAVOREVB TAVOREVB 1827
saar MACH_SAAR SAAR 1828
at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830
+usb_a9g20 MACH_USB_A9G20 USB_A9G20 1841
mxlads MACH_MXLADS MXLADS 1851
linkstation_mini MACH_LINKSTATION_MINI LINKSTATION_MINI 1858
afeb9260 MACH_AFEB9260 AFEB9260 1859
@@ -369,6 +373,7 @@ pcm043 MACH_PCM043 PCM043 2072
sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097
avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104
mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125
+tx37 MACH_TX37 TX37 2127
rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135
dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138
ts219 MACH_TS219 TS219 2139
@@ -379,6 +384,7 @@ omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
btmavb101 MACH_BTMAVB101 BTMAVB101 2172
btmawb101 MACH_BTMAWB101 BTMAWB101 2173
+tx25 MACH_TX25 TX25 2177
omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
anw6410 MACH_ANW6410 ANW6410 2183
imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
@@ -423,6 +429,7 @@ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413
raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414
raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415
tnetv107x MACH_TNETV107X TNETV107X 2418
+mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
smdkv210 MACH_SMDKV210 SMDKV210 2456
omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
@@ -433,14 +440,17 @@ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
ts41x MACH_TS41X TS41X 2502
phy3250 MACH_PHY3250 PHY3250 2511
mini6410 MACH_MINI6410 MINI6410 2520
+tx51 MACH_TX51 TX51 2529
mx28evk MACH_MX28EVK MX28EVK 2531
smartq5 MACH_SMARTQ5 SMARTQ5 2534
davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576
riot_x37 MACH_RIOT_X37 RIOT_X37 2578
+pca101 MACH_PCA101 PCA101 2595
capc7117 MACH_CAPC7117 CAPC7117 2612
icontrol MACH_ICONTROL ICONTROL 2624
+gplugd MACH_GPLUGD GPLUGD 2625
qsd8x50a_st1_5 MACH_QSD8X50A_ST1_5 QSD8X50A_ST1_5 2627
mx23evk MACH_MX23EVK MX23EVK 2629
ap4evb MACH_AP4EVB AP4EVB 2630
@@ -1113,3 +1123,5 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+smdk4212 MACH_SMDK4212 SMDK4212 3638
+smdk4412 MACH_SMDK4412 SMDK4412 3765
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index 6de73aab019..a81404c09d5 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -7,7 +7,7 @@
# ccflags-y := -DDEBUG
# asflags-y := -DDEBUG
-KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp)
+KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
LDFLAGS +=--no-warn-mismatch
obj-y += vfp.o
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 17addacb169..408b055c585 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -282,8 +282,8 @@ config ETRAX_RTC
Enables drivers for the Real-Time Clock battery-backed chips on
some products. The kernel reads the time when booting, and
the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a
- rtc_time struct (see <file:include/asm-cris/rtc.h>) on the /dev/rtc
- device. You can check the time with cat /proc/rtc, but
+ rtc_time struct (see <file:arch/cris/include/asm/rtc.h>) on the
+ /dev/rtc device. You can check the time with cat /proc/rtc, but
normal time reading should be done using libc function time and
friends.
diff --git a/arch/cris/arch-v10/Kconfig b/arch/cris/arch-v10/Kconfig
index adc164e9933..df9a38b4f18 100644
--- a/arch/cris/arch-v10/Kconfig
+++ b/arch/cris/arch-v10/Kconfig
@@ -24,8 +24,8 @@ config ETRAX_PA_LEDS
help
The ETRAX network driver is responsible for flashing LED's when
packets arrive and are sent. It uses macros defined in
- <file:include/asm-cris/io.h>, and those macros are defined after what
- YOU choose in this option. The actual bits used are configured
+ <file:arch/cris/include/asm/io.h>, and those macros are defined after
+ what YOU choose in this option. The actual bits used are configured
separately. Select this if the LEDs are on port PA. Some products
put the leds on PB or a memory-mapped latch (CSP0) instead.
@@ -34,8 +34,8 @@ config ETRAX_PB_LEDS
help
The ETRAX network driver is responsible for flashing LED's when
packets arrive and are sent. It uses macros defined in
- <file:include/asm-cris/io.h>, and those macros are defined after what
- YOU choose in this option. The actual bits used are configured
+ <file:arch/cris/include/asm/io.h>, and those macros are defined after
+ what YOU choose in this option. The actual bits used are configured
separately. Select this if the LEDs are on port PB. Some products
put the leds on PA or a memory-mapped latch (CSP0) instead.
@@ -44,8 +44,8 @@ config ETRAX_CSP0_LEDS
help
The ETRAX network driver is responsible for flashing LED's when
packets arrive and are sent. It uses macros defined in
- <file:include/asm-cris/io.h>, and those macros are defined after what
- YOU choose in this option. The actual bits used are configured
+ <file:arch/cris/include/asm/io.h>, and those macros are defined after
+ what YOU choose in this option. The actual bits used are configured
separately. Select this if the LEDs are on a memory-mapped latch
using chip select CSP0, this is mapped at 0x90000000.
Some products put the leds on PA or PB instead.
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index 0d722177992..32d90867a98 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -4,6 +4,7 @@ config ETRAX_ETHERNET
bool "Ethernet support"
depends on ETRAX_ARCH_V10
select NET_ETHERNET
+ select NET_CORE
select MII
help
This option enables the ETRAX 100LX built-in 10/100Mbit Ethernet
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 41a2732e8b9..e47e9c3401b 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -4,6 +4,7 @@ config ETRAX_ETHERNET
bool "Ethernet support"
depends on ETRAX_ARCH_V32
select NET_ETHERNET
+ select NET_CORE
select MII
help
This option enables the ETRAX FS built-in 10/100Mbit Ethernet
diff --git a/arch/cris/arch-v32/lib/nand_init.S b/arch/cris/arch-v32/lib/nand_init.S
deleted file mode 100644
index d671fed451c..00000000000
--- a/arch/cris/arch-v32/lib/nand_init.S
+++ /dev/null
@@ -1,178 +0,0 @@
-##=============================================================================
-##
-## nand_init.S
-##
-## The bootrom copies data from the NAND flash to the internal RAM but
-## due to a bug/feature we can only trust the 256 first bytes. So this
-## code copies more data from NAND flash to internal RAM. Obvioulsy this
-## code must fit in the first 256 bytes so alter with care.
-##
-## Some notes about the bug/feature for future reference:
-## The bootrom copies the first 127 KB from NAND flash to internal
-## memory. The problem is that it does a bytewise copy. NAND flashes
-## does autoincrement on the address so for a 16-bite device each
-## read/write increases the address by two. So the copy loop in the
-## bootrom will discard every second byte. This is solved by inserting
-## zeroes in every second byte in the first erase block.
-##
-## The bootrom also incorrectly assumes that it can read the flash
-## linear with only one read command but the flash will actually
-## switch between normal area and spare area if you do that so we
-## can't trust more than the first 256 bytes.
-##
-##=============================================================================
-
-#include <arch/hwregs/asm/reg_map_asm.h>
-#include <arch/hwregs/asm/gio_defs_asm.h>
-#include <arch/hwregs/asm/pinmux_defs_asm.h>
-#include <arch/hwregs/asm/bif_core_defs_asm.h>
-#include <arch/hwregs/asm/config_defs_asm.h>
-
-;; There are 8-bit NAND flashes and 16-bit NAND flashes.
-;; We need to treat them slightly different.
-#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
-#define PAGE_SIZE 256
-#else
-#error 2
-#define PAGE_SIZE 512
-#endif
-#define ERASE_BLOCK 16384
-
-;; GPIO pins connected to NAND flash
-#define CE 4
-#define CLE 5
-#define ALE 6
-#define BY 7
-
-;; Address space for NAND flash
-#define NAND_RD_ADDR 0x90000000
-#define NAND_WR_ADDR 0x94000000
-
-#define READ_CMD 0x00
-
-;; Readability macros
-#define CSP_MASK \
- REG_MASK(bif_core, rw_grp3_cfg, gated_csp0) | \
- REG_MASK(bif_core, rw_grp3_cfg, gated_csp1)
-#define CSP_VAL \
- REG_STATE(bif_core, rw_grp3_cfg, gated_csp0, rd) | \
- REG_STATE(bif_core, rw_grp3_cfg, gated_csp1, wr)
-
-;;----------------------------------------------------------------------------
-;; Macros to set/clear GPIO bits
-
-.macro SET x
- or.b (1<<\x),$r9
- move.d $r9, [$r2]
-.endm
-
-.macro CLR x
- and.b ~(1<<\x),$r9
- move.d $r9, [$r2]
-.endm
-
-;;----------------------------------------------------------------------------
-
-nand_boot:
- ;; Check if nand boot was selected
- move.d REG_ADDR(config, regi_config, r_bootsel), $r0
- move.d [$r0], $r0
- and.d REG_MASK(config, r_bootsel, boot_mode), $r0
- cmp.d REG_STATE(config, r_bootsel, boot_mode, nand), $r0
- bne normal_boot ; No NAND boot
- nop
-
-copy_nand_to_ram:
- ;; copy_nand_to_ram
- ;; Arguments
- ;; r10 - destination
- ;; r11 - source offset
- ;; r12 - size
- ;; r13 - Address to jump to after completion
- ;; Note : r10-r12 are clobbered on return
- ;; Registers used:
- ;; r0 - NAND_RD_ADDR
- ;; r1 - NAND_WR_ADDR
- ;; r2 - reg_gio_rw_pa_dout
- ;; r3 - reg_gio_r_pa_din
- ;; r4 - tmp
- ;; r5 - byte counter within a page
- ;; r6 - reg_pinmux_rw_pa
- ;; r7 - reg_gio_rw_pa_oe
- ;; r8 - reg_bif_core_rw_grp3_cfg
- ;; r9 - reg_gio_rw_pa_dout shadow
- move.d 0x90000000, $r0
- move.d 0x94000000, $r1
- move.d REG_ADDR(gio, regi_gio, rw_pa_dout), $r2
- move.d REG_ADDR(gio, regi_gio, r_pa_din), $r3
- move.d REG_ADDR(pinmux, regi_pinmux, rw_pa), $r6
- move.d REG_ADDR(gio, regi_gio, rw_pa_oe), $r7
- move.d REG_ADDR(bif_core, regi_bif_core, rw_grp3_cfg), $r8
-
-#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
- lsrq 1, $r11
-#endif
- ;; Set up GPIO
- move.d [$r2], $r9
- move.d [$r7], $r4
- or.b (1<<ALE) | (1 << CLE) | (1<<CE), $r4
- move.d $r4, [$r7]
-
- ;; Set up bif
- move.d [$r8], $r4
- and.d CSP_MASK, $r4
- or.d CSP_VAL, $r4
- move.d $r4, [$r8]
-
-1: ;; Copy one page
- CLR CE
- SET CLE
- moveq READ_CMD, $r4
- move.b $r4, [$r1]
- moveq 20, $r4
-2: bne 2b
- subq 1, $r4
- CLR CLE
- SET ALE
- clear.w [$r1] ; Column address = 0
- move.d $r11, $r4
- lsrq 8, $r4
- move.b $r4, [$r1] ; Row address
- lsrq 8, $r4
- move.b $r4, [$r1] ; Row address
- moveq 20, $r4
-2: bne 2b
- subq 1, $r4
- CLR ALE
-2: move.d [$r3], $r4
- and.d 1 << BY, $r4
- beq 2b
- movu.w PAGE_SIZE, $r5
-2: ; Copy one byte/word
-#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
- move.w [$r0], $r4
-#else
- move.b [$r0], $r4
-#endif
- subq 1, $r5
- bne 2b
-#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
- move.w $r4, [$r10+]
- subu.w PAGE_SIZE*2, $r12
-#else
- move.b $r4, [$r10+]
- subu.w PAGE_SIZE, $r12
-#endif
- bpl 1b
- addu.w PAGE_SIZE, $r11
-
- ;; End of copy
- jump $r13
- nop
-
- ;; This will warn if the code above is too large. If you consider
- ;; to remove this you don't understand the bug/feature.
- .org 256
- .org ERASE_BLOCK
-
-normal_boot:
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 091ed6192ae..d1f377f5d3b 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -160,7 +160,7 @@ config VT_CONSOLE
config HW_CONSOLE
bool
- depends on VT && !S390 && !UM
+ depends on VT
default y
comment "Unix98 PTY support"
@@ -195,7 +195,7 @@ config UNIX98_PTYS
source "drivers/char/pcmcia/Kconfig"
-source "drivers/serial/Kconfig"
+source "drivers/tty/serial/Kconfig"
source "drivers/i2c/Kconfig"
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 0e5cd1405e0..43ab1cd097a 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,4 +234,4 @@ CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_T10DIF=y
CONFIG_MISC_DEVICES=y
-CONFIG_DMAR=y
+CONFIG_INTEL_IOMMU=y
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 2f7caddf093..ae16ec4f630 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -6,7 +6,7 @@
#
obj-y := setup.o
-ifeq ($(CONFIG_DMAR), y)
+ifeq ($(CONFIG_INTEL_IOMMU), y)
obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
else
obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 80241fe03f5..f5f4ef149aa 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -915,7 +915,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
unsigned long poff, size_t size,
@@ -1044,7 +1044,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
@@ -1127,7 +1127,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
@@ -1190,7 +1190,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
@@ -1453,7 +1453,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
@@ -1549,7 +1549,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index 7e81966ce48..47afcc61f6e 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -172,7 +172,7 @@ static const struct net_device_ops simeth_netdev_ops = {
.ndo_stop = simeth_close,
.ndo_start_xmit = simeth_tx,
.ndo_get_stats = simeth_get_stats,
- .ndo_set_multicast_list = set_multicast_list, /* not yet used */
+ .ndo_set_rx_mode = set_multicast_list, /* not yet used */
};
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index d66d446b127..d05e78f6db9 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -10,7 +10,7 @@ struct dev_archdata {
#ifdef CONFIG_ACPI
void *acpi_handle;
#endif
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
void *iommu; /* hook for IOMMU specific extension */
#endif
};
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 745e095fe82..105c93b00b1 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -7,12 +7,14 @@
extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
+#ifdef CONFIG_INTEL_IOMMU
extern int force_iommu, no_iommu;
-extern int iommu_detected;
-#ifdef CONFIG_DMAR
extern int iommu_pass_through;
+extern int iommu_detected;
#else
#define iommu_pass_through (0)
+#define no_iommu (1)
+#define iommu_detected (0)
#endif
extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 73b5f785e70..127dd7be346 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -139,7 +139,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
}
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
extern void pci_iommu_alloc(void);
#endif
#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 395c2f216dd..d959c84904b 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper
endif
-obj-$(CONFIG_DMAR) += pci-dma.o
+obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 3be485a300b..bfb4d01e0e5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -88,7 +88,7 @@ acpi_get_sysname(void)
struct acpi_table_rsdp *rsdp;
struct acpi_table_xsdt *xsdt;
struct acpi_table_header *hdr;
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
u64 i, nentries;
#endif
@@ -125,7 +125,7 @@ acpi_get_sysname(void)
return "xen";
}
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
/* Look for Intel IOMMU */
nentries = (hdr->length - sizeof(*hdr)) /
sizeof(xsdt->table_offset_entry[0]);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 009df5434a7..94e0db72d4a 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -131,7 +131,7 @@ void arch_teardown_msi_irq(unsigned int irq)
return ia64_teardown_msi_irq(irq);
}
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
#ifdef CONFIG_SMP
static int dmar_msi_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
@@ -210,5 +210,5 @@ int arch_setup_dmar_msi(unsigned int irq)
"edge");
return 0;
}
-#endif /* CONFIG_DMAR */
+#endif /* CONFIG_INTEL_IOMMU */
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index f6b1ff0aea7..c16162c7086 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -14,7 +14,7 @@
#include <asm/system.h>
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
#include <linux/kernel.h>
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 9e8ee9d2b8c..6c28582fb98 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -21,6 +21,15 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
bool
+config GENERIC_CLOCKEVENTS
+ bool
+
+config GENERIC_CMOS_UPDATE
+ def_bool !MMU
+
+config GENERIC_GPIO
+ bool
+
config GENERIC_HWEIGHT
bool
default y
@@ -29,10 +38,16 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
+config GENERIC_IOMAP
+ def_bool MMU
+
config TIME_LOW_RES
bool
default y
+config ARCH_USES_GETTIMEOFFSET
+ def_bool MMU
+
config NO_IOPORT
def_bool y
@@ -62,13 +77,31 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
-menu "Platform dependent setup"
+config MMU_MOTOROLA
+ bool
+
+config MMU_SUN3
+ bool
+ depends on MMU && !MMU_MOTOROLA
+
+menu "Platform setup"
+
+source arch/m68k/Kconfig.cpu
+
+source arch/m68k/Kconfig.machine
+
+source arch/m68k/Kconfig.bus
+
+endmenu
+
+menu "Kernel Features"
-if MMU
-source arch/m68k/Kconfig.mmu
+if COLDFIRE
+source "kernel/Kconfig.preempt"
endif
-if !MMU
-source arch/m68k/Kconfig.nommu
+
+if !MMU || COLDFIRE
+source "kernel/time/Kconfig"
endif
source "mm/Kconfig"
@@ -85,9 +118,9 @@ if !MMU
menu "Power management options"
config PM
- bool "Power Management support"
- help
- Support processor power management modes
+ bool "Power Management support"
+ help
+ Support processor power management modes
endmenu
endif
@@ -96,151 +129,7 @@ source "net/Kconfig"
source "drivers/Kconfig"
-if MMU
-
-menu "Character devices"
-
-config ATARI_MFPSER
- tristate "Atari MFP serial support"
- depends on ATARI
- ---help---
- If you like to use the MFP serial ports ("Modem1", "Serial1") under
- Linux, say Y. The driver equally supports all kinds of MFP serial
- ports and automatically detects whether Serial1 is available.
-
- To compile this driver as a module, choose M here.
-
- Note for Falcon users: You also have an MFP port, it's just not
- wired to the outside... But you could use the port under Linux.
-
-config ATARI_MIDI
- tristate "Atari MIDI serial support"
- depends on ATARI
- help
- If you want to use your Atari's MIDI port in Linux, say Y.
-
- To compile this driver as a module, choose M here.
-
-config ATARI_DSP56K
- tristate "Atari DSP56k support (EXPERIMENTAL)"
- depends on ATARI && EXPERIMENTAL
- help
- If you want to be able to use the DSP56001 in Falcons, say Y. This
- driver is still experimental, and if you don't know what it is, or
- if you don't have this processor, just say N.
-
- To compile this driver as a module, choose M here.
-
-config AMIGA_BUILTIN_SERIAL
- tristate "Amiga builtin serial support"
- depends on AMIGA
- help
- If you want to use your Amiga's built-in serial port in Linux,
- answer Y.
-
- To compile this driver as a module, choose M here.
-
-config MULTIFACE_III_TTY
- tristate "Multiface Card III serial support"
- depends on AMIGA
- help
- If you want to use a Multiface III card's serial port in Linux,
- answer Y.
-
- To compile this driver as a module, choose M here.
-
-config GVPIOEXT
- tristate "GVP IO-Extender support"
- depends on PARPORT=n && ZORRO
- help
- If you want to use a GVP IO-Extender serial card in Linux, say Y.
- Otherwise, say N.
-
-config GVPIOEXT_LP
- tristate "GVP IO-Extender parallel printer support"
- depends on GVPIOEXT
- help
- Say Y to enable driving a printer from the parallel port on your
- GVP IO-Extender card, N otherwise.
-
-config GVPIOEXT_PLIP
- tristate "GVP IO-Extender PLIP support"
- depends on GVPIOEXT
- help
- Say Y to enable doing IP over the parallel port on your GVP
- IO-Extender card, N otherwise.
-
-config MAC_HID
- bool
- depends on INPUT_ADBHID
- default y
-
-config HPDCA
- tristate "HP DCA serial support"
- depends on DIO && SERIAL_8250
- help
- If you want to use the internal "DCA" serial ports on an HP300
- machine, say Y here.
-
-config HPAPCI
- tristate "HP APCI serial support"
- depends on HP300 && SERIAL_8250 && EXPERIMENTAL
- help
- If you want to use the internal "APCI" serial ports on an HP400
- machine, say Y here.
-
-config MVME147_SCC
- bool "SCC support for MVME147 serial ports"
- depends on MVME147 && BROKEN
- help
- This is the driver for the serial ports on the Motorola MVME147
- boards. Everyone using one of these boards should say Y here.
-
-config MVME162_SCC
- bool "SCC support for MVME162 serial ports"
- depends on MVME16x && BROKEN
- help
- This is the driver for the serial ports on the Motorola MVME162 and
- 172 boards. Everyone using one of these boards should say Y here.
-
-config BVME6000_SCC
- bool "SCC support for BVME6000 serial ports"
- depends on BVME6000 && BROKEN
- help
- This is the driver for the serial ports on the BVME4000 and BVME6000
- boards from BVM Ltd. Everyone using one of these boards should say
- Y here.
-
-config DN_SERIAL
- bool "Support for DN serial port (dummy)"
- depends on APOLLO
-
-config SERIAL_CONSOLE
- bool "Support for serial port console"
- depends on (AMIGA || ATARI || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_MIDI=y || AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y || SERIAL=y || MVME147_SCC || SERIAL167 || MVME162_SCC || BVME6000_SCC || DN_SERIAL)
- ---help---
- If you say Y here, it will be possible to use a serial port as the
- system console (the system console is the device which receives all
- kernel messages and warnings and which allows logins in single user
- mode). This could be useful if some terminal or printer is connected
- to that serial port.
-
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttyS1". (Try "man bootparam" or see the documentation of
- your boot loader (lilo or loadlin) about how to pass options to the
- kernel at boot time.)
-
- If you don't have a VGA card installed and you say Y here, the
- kernel will automatically use the first serial line, /dev/ttyS0, as
- system console.
-
- If unsure, say N.
-
-endmenu
-
-endif
+source "arch/m68k/Kconfig.devices"
source "fs/Kconfig"
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
new file mode 100644
index 00000000000..8294f0c1785
--- /dev/null
+++ b/arch/m68k/Kconfig.bus
@@ -0,0 +1,55 @@
+if MMU
+
+comment "Bus Support"
+
+config NUBUS
+ bool
+ depends on MAC
+ default y
+
+config ZORRO
+ bool "Amiga Zorro (AutoConfig) bus support"
+ depends on AMIGA
+ help
+ This enables support for the Zorro bus in the Amiga. If you have
+ expansion cards in your Amiga that conform to the Amiga
+ AutoConfig(tm) specification, say Y, otherwise N. Note that even
+ expansion cards that do not fit in the Zorro slots but fit in e.g.
+ the CPU slot may fall in this category, so you have to say Y to let
+ Linux use these.
+
+config AMIGA_PCMCIA
+ bool "Amiga 1200/600 PCMCIA support (EXPERIMENTAL)"
+ depends on AMIGA && EXPERIMENTAL
+ help
+ Include support in the kernel for pcmcia on Amiga 1200 and Amiga
+ 600. If you intend to use pcmcia cards say Y; otherwise say N.
+
+config ISA
+ bool
+ depends on Q40 || AMIGA_PCMCIA
+ default y
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+ inside your box. Other bus systems are PCI, EISA, MicroChannel
+ (MCA) or VESA. ISA is an older system, now being displaced by PCI;
+ newer boards don't support it. If you have ISA, say Y, otherwise N.
+
+config GENERIC_ISA_DMA
+ def_bool ISA
+
+source "drivers/pci/Kconfig"
+
+source "drivers/zorro/Kconfig"
+
+endif
+
+if !MMU
+
+config ISA_DMA_API
+ def_bool !M5272
+
+source "drivers/pcmcia/Kconfig"
+
+endif
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
new file mode 100644
index 00000000000..e632b2d1210
--- /dev/null
+++ b/arch/m68k/Kconfig.cpu
@@ -0,0 +1,429 @@
+comment "Processor Type"
+
+config M68000
+ bool
+ select CPU_HAS_NO_BITFIELDS
+ help
+ The Freescale (was Motorola) 68000 CPU is the first generation of
+ the well known M68K family of processors. The CPU core as well as
+ being available as a stand alone CPU was also used in many
+ System-On-Chip devices (eg 68328, 68302, etc). It does not contain
+ a paging MMU.
+
+config MCPU32
+ bool
+ select CPU_HAS_NO_BITFIELDS
+ help
+ The Freescale (was then Motorola) CPU32 is a CPU core that is
+ based on the 68020 processor. For the most part it is used in
+ System-On-Chip parts, and does not contain a paging MMU.
+
+config COLDFIRE
+ bool
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select CPU_HAS_NO_BITFIELDS
+ help
+ The Freescale ColdFire family of processors is a modern derivitive
+ of the 68000 processor family. They are mainly targeted at embedded
+ applications, and are all System-On-Chip (SOC) devices, as opposed
+ to stand alone CPUs. They implement a subset of the original 68000
+ processor instruction set.
+
+config M68020
+ bool "68020 support"
+ depends on MMU
+ help
+ If you anticipate running this kernel on a computer with a MC68020
+ processor, say Y. Otherwise, say N. Note that the 68020 requires a
+ 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
+ Sun 3, which provides its own version.
+
+config M68030
+ bool "68030 support"
+ depends on MMU && !MMU_SUN3
+ help
+ If you anticipate running this kernel on a computer with a MC68030
+ processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
+ work, as it does not include an MMU (Memory Management Unit).
+
+config M68040
+ bool "68040 support"
+ depends on MMU && !MMU_SUN3
+ help
+ If you anticipate running this kernel on a computer with a MC68LC040
+ or MC68040 processor, say Y. Otherwise, say N. Note that an
+ MC68EC040 will not work, as it does not include an MMU (Memory
+ Management Unit).
+
+config M68060
+ bool "68060 support"
+ depends on MMU && !MMU_SUN3
+ help
+ If you anticipate running this kernel on a computer with a MC68060
+ processor, say Y. Otherwise, say N.
+
+config M68328
+ bool "MC68328"
+ depends on !MMU
+ select M68000
+ help
+ Motorola 68328 processor support.
+
+config M68EZ328
+ bool "MC68EZ328"
+ depends on !MMU
+ select M68000
+ help
+ Motorola 68EX328 processor support.
+
+config M68VZ328
+ bool "MC68VZ328"
+ depends on !MMU
+ select M68000
+ help
+ Motorola 68VZ328 processor support.
+
+config M68360
+ bool "MC68360"
+ depends on !MMU
+ select MCPU32
+ help
+ Motorola 68360 processor support.
+
+config M5206
+ bool "MCF5206"
+ depends on !MMU
+ select COLDFIRE
+ select COLDFIRE_SW_A7
+ select HAVE_MBAR
+ help
+ Motorola ColdFire 5206 processor support.
+
+config M5206e
+ bool "MCF5206e"
+ depends on !MMU
+ select COLDFIRE
+ select COLDFIRE_SW_A7
+ select HAVE_MBAR
+ help
+ Motorola ColdFire 5206e processor support.
+
+config M520x
+ bool "MCF520x"
+ depends on !MMU
+ select COLDFIRE
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CACHE_SPLIT
+ help
+ Freescale Coldfire 5207/5208 processor support.
+
+config M523x
+ bool "MCF523x"
+ depends on !MMU
+ select COLDFIRE
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CACHE_SPLIT
+ select HAVE_IPSBAR
+ help
+ Freescale Coldfire 5230/1/2/4/5 processor support
+
+config M5249
+ bool "MCF5249"
+ depends on !MMU
+ select COLDFIRE
+ select COLDFIRE_SW_A7
+ select HAVE_MBAR
+ help
+ Motorola ColdFire 5249 processor support.
+
+config M527x
+ bool
+
+config M5271
+ bool "MCF5271"
+ depends on !MMU
+ select COLDFIRE
+ select M527x
+ select HAVE_CACHE_SPLIT
+ select HAVE_IPSBAR
+ select GENERIC_CLOCKEVENTS
+ help
+ Freescale (Motorola) ColdFire 5270/5271 processor support.
+
+config M5272
+ bool "MCF5272"
+ depends on !MMU
+ select COLDFIRE
+ select COLDFIRE_SW_A7
+ select HAVE_MBAR
+ help
+ Motorola ColdFire 5272 processor support.
+
+config M5275
+ bool "MCF5275"
+ depends on !MMU
+ select COLDFIRE
+ select M527x
+ select HAVE_CACHE_SPLIT
+ select HAVE_IPSBAR
+ select GENERIC_CLOCKEVENTS
+ help
+ Freescale (Motorola) ColdFire 5274/5275 processor support.
+
+config M528x
+ bool "MCF528x"
+ depends on !MMU
+ select COLDFIRE
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CACHE_SPLIT
+ select HAVE_IPSBAR
+ help
+ Motorola ColdFire 5280/5282 processor support.
+
+config M5307
+ bool "MCF5307"
+ depends on !MMU
+ select COLDFIRE
+ select COLDFIRE_SW_A7
+ select HAVE_CACHE_CB
+ select HAVE_MBAR
+ help
+ Motorola ColdFire 5307 processor support.
+
+config M532x
+ bool "MCF532x"
+ depends on !MMU
+ select COLDFIRE
+ select HAVE_CACHE_CB
+ help
+ Freescale (Motorola) ColdFire 532x processor support.
+
+config M5407
+ bool "MCF5407"
+ depends on !MMU
+ select COLDFIRE
+ select COLDFIRE_SW_A7
+ select HAVE_CACHE_CB
+ select HAVE_MBAR
+ help
+ Motorola ColdFire 5407 processor support.
+
+config M54xx
+ bool
+
+config M547x
+ bool "MCF547x"
+ depends on !MMU
+ select COLDFIRE
+ select M54xx
+ select HAVE_CACHE_CB
+ select HAVE_MBAR
+ help
+ Freescale ColdFire 5470/5471/5472/5473/5474/5475 processor support.
+
+config M548x
+ bool "MCF548x"
+ depends on !MMU
+ select COLDFIRE
+ select M54xx
+ select HAVE_CACHE_CB
+ select HAVE_MBAR
+ help
+ Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
+
+
+comment "Processor Specific Options"
+
+config M68KFPU_EMU
+ bool "Math emulation support (EXPERIMENTAL)"
+ depends on MMU
+ depends on EXPERIMENTAL
+ help
+ At some point in the future, this will cause floating-point math
+ instructions to be emulated by the kernel on machines that lack a
+ floating-point math coprocessor. Thrill-seekers and chronically
+ sleep-deprived psychotic hacker types can say Y now, everyone else
+ should probably wait a while.
+
+config M68KFPU_EMU_EXTRAPREC
+ bool "Math emulation extra precision"
+ depends on M68KFPU_EMU
+ help
+ The fpu uses normally a few bit more during calculations for
+ correct rounding, the emulator can (often) do the same but this
+ extra calculation can cost quite some time, so you can disable
+ it here. The emulator will then "only" calculate with a 64 bit
+ mantissa and round slightly incorrect, what is more than enough
+ for normal usage.
+
+config M68KFPU_EMU_ONLY
+ bool "Math emulation only kernel"
+ depends on M68KFPU_EMU
+ help
+ This option prevents any floating-point instructions from being
+ compiled into the kernel, thereby the kernel doesn't save any
+ floating point context anymore during task switches, so this
+ kernel will only be usable on machines without a floating-point
+ math coprocessor. This makes the kernel a bit faster as no tests
+ needs to be executed whether a floating-point instruction in the
+ kernel should be executed or not.
+
+config ADVANCED
+ bool "Advanced configuration options"
+ depends on MMU
+ ---help---
+ This gives you access to some advanced options for the CPU. The
+ defaults should be fine for most users, but these options may make
+ it possible for you to improve performance somewhat if you know what
+ you are doing.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about these options.
+
+ Most users should say N to this question.
+
+config RMW_INSNS
+ bool "Use read-modify-write instructions"
+ depends on ADVANCED
+ ---help---
+ This allows to use certain instructions that work with indivisible
+ read-modify-write bus cycles. While this is faster than the
+ workaround of disabling interrupts, it can conflict with DMA
+ ( = direct memory access) on many Amiga systems, and it is also said
+ to destabilize other machines. It is very likely that this will
+ cause serious problems on any Amiga or Atari Medusa if set. The only
+ configuration where it should work are 68030-based Ataris, where it
+ apparently improves performance. But you've been warned! Unless you
+ really know what you are doing, say N. Try Y only if you're quite
+ adventurous.
+
+config SINGLE_MEMORY_CHUNK
+ bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
+ depends on MMU
+ default y if SUN3
+ select NEED_MULTIPLE_NODES
+ help
+ Ignore all but the first contiguous chunk of physical memory for VM
+ purposes. This will save a few bytes kernel size and may speed up
+ some operations. Say N if not sure.
+
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool MMU && !SINGLE_MEMORY_CHUNK
+
+config 060_WRITETHROUGH
+ bool "Use write-through caching for 68060 supervisor accesses"
+ depends on ADVANCED && M68060
+ ---help---
+ The 68060 generally uses copyback caching of recently accessed data.
+ Copyback caching means that memory writes will be held in an on-chip
+ cache and only written back to memory some time later. Saying Y
+ here will force supervisor (kernel) accesses to use writethrough
+ caching. Writethrough caching means that data is written to memory
+ straight away, so that cache and memory data always agree.
+ Writethrough caching is less efficient, but is needed for some
+ drivers on 68060 based systems where the 68060 bus snooping signal
+ is hardwired on. The 53c710 SCSI driver is known to suffer from
+ this problem.
+
+config M68K_L2_CACHE
+ bool
+ depends on MAC
+ default y
+
+config NODES_SHIFT
+ int
+ default "3"
+ depends on !SINGLE_MEMORY_CHUNK
+
+config FPU
+ bool
+
+config COLDFIRE_SW_A7
+ bool
+
+config HAVE_CACHE_SPLIT
+ bool
+
+config HAVE_CACHE_CB
+ bool
+
+config HAVE_MBAR
+ bool
+
+config HAVE_IPSBAR
+ bool
+
+config CLOCK_SET
+ bool "Enable setting the CPU clock frequency"
+ depends on COLDFIRE
+ default n
+ help
+ On some CPU's you do not need to know what the core CPU clock
+ frequency is. On these you can disable clock setting. On some
+ traditional 68K parts, and on all ColdFire parts you need to set
+ the appropriate CPU clock frequency. On these devices many of the
+ onboard peripherals derive their timing from the master CPU clock
+ frequency.
+
+config CLOCK_FREQ
+ int "Set the core clock frequency"
+ default "66666666"
+ depends on CLOCK_SET
+ help
+ Define the CPU clock frequency in use. This is the core clock
+ frequency, it may or may not be the same as the external clock
+ crystal fitted to your board. Some processors have an internal
+ PLL and can have their frequency programmed at run time, others
+ use internal dividers. In general the kernel won't setup a PLL
+ if it is fitted (there are some exceptions). This value will be
+ specific to the exact CPU that you are using.
+
+config OLDMASK
+ bool "Old mask 5307 (1H55J) silicon"
+ depends on M5307
+ help
+ Build support for the older revision ColdFire 5307 silicon.
+ Specifically this is the 1H55J mask revision.
+
+if HAVE_CACHE_SPLIT
+choice
+ prompt "Split Cache Configuration"
+ default CACHE_I
+
+config CACHE_I
+ bool "Instruction"
+ help
+ Use all of the ColdFire CPU cache memory as an instruction cache.
+
+config CACHE_D
+ bool "Data"
+ help
+ Use all of the ColdFire CPU cache memory as a data cache.
+
+config CACHE_BOTH
+ bool "Both"
+ help
+ Split the ColdFire CPU cache, and use half as an instruction cache
+ and half as a data cache.
+endchoice
+endif
+
+if HAVE_CACHE_CB
+choice
+ prompt "Data cache mode"
+ default CACHE_WRITETHRU
+
+config CACHE_WRITETHRU
+ bool "Write-through"
+ help
+ The ColdFire CPU cache is set into Write-through mode.
+
+config CACHE_COPYBACK
+ bool "Copy-back"
+ help
+ The ColdFire CPU cache is set into Copy-back mode.
+endchoice
+endif
+
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
new file mode 100644
index 00000000000..d214034be6a
--- /dev/null
+++ b/arch/m68k/Kconfig.devices
@@ -0,0 +1,123 @@
+if MMU
+
+config ARCH_MAY_HAVE_PC_FDC
+ bool
+ depends on BROKEN && (Q40 || SUN3X)
+ default y
+
+menu "Platform devices"
+
+config HEARTBEAT
+ bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
+ default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
+ help
+ Use the power-on LED on your machine as a load meter. The exact
+ behavior is platform-dependent, but normally the flash frequency is
+ a hyperbolic function of the 5-minute load average.
+
+# We have a dedicated heartbeat LED. :-)
+config PROC_HARDWARE
+ bool "/proc/hardware support"
+ help
+ Say Y here to support the /proc/hardware file, which gives you
+ access to information about the machine you're running on,
+ including the model, CPU, MMU, clock speed, BogoMIPS rating,
+ and memory size.
+
+endmenu
+
+menu "Character devices"
+
+config ATARI_MFPSER
+ tristate "Atari MFP serial support"
+ depends on ATARI
+ ---help---
+ If you like to use the MFP serial ports ("Modem1", "Serial1") under
+ Linux, say Y. The driver equally supports all kinds of MFP serial
+ ports and automatically detects whether Serial1 is available.
+
+ To compile this driver as a module, choose M here.
+
+ Note for Falcon users: You also have an MFP port, it's just not
+ wired to the outside... But you could use the port under Linux.
+
+config ATARI_MIDI
+ tristate "Atari MIDI serial support"
+ depends on ATARI
+ help
+ If you want to use your Atari's MIDI port in Linux, say Y.
+
+ To compile this driver as a module, choose M here.
+
+config ATARI_DSP56K
+ tristate "Atari DSP56k support (EXPERIMENTAL)"
+ depends on ATARI && EXPERIMENTAL
+ help
+ If you want to be able to use the DSP56001 in Falcons, say Y. This
+ driver is still experimental, and if you don't know what it is, or
+ if you don't have this processor, just say N.
+
+ To compile this driver as a module, choose M here.
+
+config AMIGA_BUILTIN_SERIAL
+ tristate "Amiga builtin serial support"
+ depends on AMIGA
+ help
+ If you want to use your Amiga's built-in serial port in Linux,
+ answer Y.
+
+ To compile this driver as a module, choose M here.
+
+config MULTIFACE_III_TTY
+ tristate "Multiface Card III serial support"
+ depends on AMIGA
+ help
+ If you want to use a Multiface III card's serial port in Linux,
+ answer Y.
+
+ To compile this driver as a module, choose M here.
+
+config HPDCA
+ tristate "HP DCA serial support"
+ depends on DIO && SERIAL_8250
+ help
+ If you want to use the internal "DCA" serial ports on an HP300
+ machine, say Y here.
+
+config HPAPCI
+ tristate "HP APCI serial support"
+ depends on HP300 && SERIAL_8250 && EXPERIMENTAL
+ help
+ If you want to use the internal "APCI" serial ports on an HP400
+ machine, say Y here.
+
+config DN_SERIAL
+ bool "Support for DN serial port (dummy)"
+ depends on APOLLO
+
+config SERIAL_CONSOLE
+ bool "Support for serial port console"
+ depends on (AMIGA || ATARI || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_MIDI=y || AMIGA_BUILTIN_SERIAL=y || MULTIFACE_III_TTY=y || SERIAL=y || SERIAL167 || DN_SERIAL)
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode). This could be useful if some terminal or printer is connected
+ to that serial port.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyS1". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+ If you don't have a VGA card installed and you say Y here, the
+ kernel will automatically use the first serial line, /dev/ttyS0, as
+ system console.
+
+ If unsure, say N.
+
+endmenu
+
+endif
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
new file mode 100644
index 00000000000..ef4a26aff78
--- /dev/null
+++ b/arch/m68k/Kconfig.machine
@@ -0,0 +1,583 @@
+comment "Machine Types"
+
+config AMIGA
+ bool "Amiga support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ This option enables support for the Amiga series of computers. If
+ you plan to use this kernel on an Amiga, say Y here and browse the
+ material available in <file:Documentation/m68k>; otherwise say N.
+
+config ATARI
+ bool "Atari support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ This option enables support for the 68000-based Atari series of
+ computers (including the TT, Falcon and Medusa). If you plan to use
+ this kernel on an Atari, say Y here and browse the material
+ available in <file:Documentation/m68k>; otherwise say N.
+
+config MAC
+ bool "Macintosh support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ This option enables support for the Apple Macintosh series of
+ computers (yes, there is experimental support now, at least for part
+ of the series).
+
+ Say N unless you're willing to code the remaining necessary support.
+ ;)
+
+config APOLLO
+ bool "Apollo support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ Say Y here if you want to run Linux on an MC680x0-based Apollo
+ Domain workstation such as the DN3500.
+
+config VME
+ bool "VME (Motorola and BVM) support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ Say Y here if you want to build a kernel for a 680x0 based VME
+ board. Boards currently supported include Motorola boards MVME147,
+ MVME162, MVME166, MVME167, MVME172, and MVME177. BVME4000 and
+ BVME6000 boards from BVM Ltd are also supported.
+
+config MVME147
+ bool "MVME147 support"
+ depends on MMU
+ depends on VME
+ help
+ Say Y to include support for early Motorola VME boards. This will
+ build a kernel which can run on MVME147 single-board computers. If
+ you select this option you will have to select the appropriate
+ drivers for SCSI, Ethernet and serial ports later on.
+
+config MVME16x
+ bool "MVME162, 166 and 167 support"
+ depends on MMU
+ depends on VME
+ help
+ Say Y to include support for Motorola VME boards. This will build a
+ kernel which can run on MVME162, MVME166, MVME167, MVME172, and
+ MVME177 boards. If you select this option you will have to select
+ the appropriate drivers for SCSI, Ethernet and serial ports later
+ on.
+
+config BVME6000
+ bool "BVME4000 and BVME6000 support"
+ depends on MMU
+ depends on VME
+ help
+ Say Y to include support for VME boards from BVM Ltd. This will
+ build a kernel which can run on BVME4000 and BVME6000 boards. If
+ you select this option you will have to select the appropriate
+ drivers for SCSI, Ethernet and serial ports later on.
+
+config HP300
+ bool "HP9000/300 and HP9000/400 support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ This option enables support for the HP9000/300 and HP9000/400 series
+ of workstations. Support for these machines is still somewhat
+ experimental. If you plan to try to use the kernel on such a machine
+ say Y here.
+ Everybody else says N.
+
+config SUN3X
+ bool "Sun3x support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ select M68030
+ help
+ This option enables support for the Sun 3x series of workstations.
+ Be warned that this support is very experimental.
+ Note that Sun 3x kernels are not compatible with Sun 3 hardware.
+ General Linux information on the Sun 3x series (now discontinued)
+ is at <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+ If you don't want to compile a kernel for a Sun 3x, say N.
+
+config Q40
+ bool "Q40/Q60 support"
+ depends on MMU
+ select MMU_MOTOROLA if MMU
+ help
+ The Q40 is a Motorola 68040-based successor to the Sinclair QL
+ manufactured in Germany. There is an official Q40 home page at
+ <http://www.q40.de/>. This option enables support for the Q40 and
+ Q60. Select your CPU below. For 68LC060 don't forget to enable FPU
+ emulation.
+
+config SUN3
+ bool "Sun3 support"
+ depends on MMU
+ depends on !MMU_MOTOROLA
+ select MMU_SUN3 if MMU
+ select M68020
+ help
+ This option enables support for the Sun 3 series of workstations
+ (3/50, 3/60, 3/1xx, 3/2xx systems). Enabling this option requires
+ that all other hardware types must be disabled, as Sun 3 kernels
+ are incompatible with all other m68k targets (including Sun 3x!).
+
+ If you don't want to compile a kernel exclusively for a Sun 3, say N.
+
+config PILOT
+ bool
+
+config PILOT3
+ bool "Pilot 1000/5000, PalmPilot Personal/Pro, or PalmIII support"
+ depends on M68328
+ select PILOT
+ help
+ Support for the Palm Pilot 1000/5000, Personal/Pro and PalmIII.
+
+config XCOPILOT_BUGS
+ bool "(X)Copilot support"
+ depends on PILOT3
+ help
+ Support the bugs of Xcopilot.
+
+config UC5272
+ bool "Arcturus Networks uC5272 dimm board support"
+ depends on M5272
+ help
+ Support for the Arcturus Networks uC5272 dimm board.
+
+config UC5282
+ bool "Arcturus Networks uC5282 board support"
+ depends on M528x
+ help
+ Support for the Arcturus Networks uC5282 dimm board.
+
+config UCSIMM
+ bool "uCsimm module support"
+ depends on M68EZ328
+ help
+ Support for the Arcturus Networks uCsimm module.
+
+config UCDIMM
+ bool "uDsimm module support"
+ depends on M68VZ328
+ help
+ Support for the Arcturus Networks uDsimm module.
+
+config DRAGEN2
+ bool "DragenEngine II board support"
+ depends on M68VZ328
+ help
+ Support for the DragenEngine II board.
+
+config DIRECT_IO_ACCESS
+ bool "Allow user to access IO directly"
+ depends on (UCSIMM || UCDIMM || DRAGEN2)
+ help
+ Disable the CPU internal registers protection in user mode,
+ to allow a user application to read/write them.
+
+config INIT_LCD
+ bool "Initialize LCD"
+ depends on (UCSIMM || UCDIMM || DRAGEN2)
+ help
+ Initialize the LCD controller of the 68x328 processor.
+
+config MEMORY_RESERVE
+ int "Memory reservation (MiB)"
+ depends on (UCSIMM || UCDIMM)
+ help
+ Reserve certain memory regions on 68x328 based boards.
+
+config UCQUICC
+ bool "Lineo uCquicc board support"
+ depends on M68360
+ help
+ Support for the Lineo uCquicc board.
+
+config ARNEWSH
+ bool
+
+config ARN5206
+ bool "Arnewsh 5206 board support"
+ depends on M5206
+ select ARNEWSH
+ help
+ Support for the Arnewsh 5206 board.
+
+config FREESCALE
+ bool
+
+config M5206eC3
+ bool "Motorola M5206eC3 board support"
+ depends on M5206e
+ select FREESCALE
+ help
+ Support for the Motorola M5206eC3 board.
+
+config ELITE
+ bool "Motorola M5206eLITE board support"
+ depends on M5206e
+ help
+ Support for the Motorola M5206eLITE board.
+
+config M5208EVB
+ bool "Freescale M5208EVB board support"
+ depends on M520x
+ select FREESCALE
+ help
+ Support for the Freescale Coldfire M5208EVB.
+
+config M5235EVB
+ bool "Freescale M5235EVB support"
+ depends on M523x
+ select FREESCALE
+ help
+ Support for the Freescale M5235EVB board.
+
+config M5249C3
+ bool "Motorola M5249C3 board support"
+ depends on M5249
+ select FREESCALE
+ help
+ Support for the Motorola M5249C3 board.
+
+config M5271EVB
+ bool "Freescale (Motorola) M5271EVB board support"
+ depends on M5271
+ select FREESCALE
+ help
+ Support for the Freescale (Motorola) M5271EVB board.
+
+config M5275EVB
+ bool "Freescale (Motorola) M5275EVB board support"
+ depends on M5275
+ select FREESCALE
+ help
+ Support for the Freescale (Motorola) M5275EVB board.
+
+config M5272C3
+ bool "Motorola M5272C3 board support"
+ depends on M5272
+ select FREESCALE
+ help
+ Support for the Motorola M5272C3 board.
+
+config senTec
+ bool
+
+config COBRA5272
+ bool "senTec COBRA5272 board support"
+ depends on M5272
+ select senTec
+ help
+ Support for the senTec COBRA5272 board.
+
+config AVNET
+ bool
+
+config AVNET5282
+ bool "Avnet 5282 board support"
+ depends on M528x
+ select AVNET
+ help
+ Support for the Avnet 5282 board.
+
+config M5282EVB
+ bool "Motorola M5282EVB board support"
+ depends on M528x
+ select FREESCALE
+ help
+ Support for the Motorola M5282EVB board.
+
+config COBRA5282
+ bool "senTec COBRA5282 board support"
+ depends on M528x
+ select senTec
+ help
+ Support for the senTec COBRA5282 board.
+
+config SOM5282EM
+ bool "EMAC.Inc SOM5282EM board support"
+ depends on M528x
+ select EMAC_INC
+ help
+ Support for the EMAC.Inc SOM5282EM module.
+
+config WILDFIRE
+ bool "Intec Automation Inc. WildFire board support"
+ depends on M528x
+ help
+ Support for the Intec Automation Inc. WildFire.
+
+config WILDFIREMOD
+ bool "Intec Automation Inc. WildFire module support"
+ depends on M528x
+ help
+ Support for the Intec Automation Inc. WildFire module.
+
+config ARN5307
+ bool "Arnewsh 5307 board support"
+ depends on M5307
+ select ARNEWSH
+ help
+ Support for the Arnewsh 5307 board.
+
+config M5307C3
+ bool "Motorola M5307C3 board support"
+ depends on M5307
+ select FREESCALE
+ help
+ Support for the Motorola M5307C3 board.
+
+config SECUREEDGEMP3
+ bool "SnapGear SecureEdge/MP3 platform support"
+ depends on M5307
+ help
+ Support for the SnapGear SecureEdge/MP3 platform.
+
+config M5329EVB
+ bool "Freescale (Motorola) M5329EVB board support"
+ depends on M532x
+ select FREESCALE
+ help
+ Support for the Freescale (Motorola) M5329EVB board.
+
+config COBRA5329
+ bool "senTec COBRA5329 board support"
+ depends on M532x
+ help
+ Support for the senTec COBRA5329 board.
+
+config M5407C3
+ bool "Motorola M5407C3 board support"
+ depends on M5407
+ select FREESCALE
+ help
+ Support for the Motorola M5407C3 board.
+
+config FIREBEE
+ bool "FireBee board support"
+ depends on M547x
+ help
+ Support for the FireBee ColdFire 5475 based board.
+
+config CLEOPATRA
+ bool "Feith CLEOPATRA board support"
+ depends on (M5307 || M5407)
+ help
+ Support for the Feith Cleopatra boards.
+
+config CANCam
+ bool "Feith CANCam board support"
+ depends on M5272
+ help
+ Support for the Feith CANCam board.
+
+config SCALES
+ bool "Feith SCALES board support"
+ depends on M5272
+ help
+ Support for the Feith SCALES board.
+
+config NETtel
+ bool "SecureEdge/NETtel board support"
+ depends on (M5206e || M5272 || M5307)
+ help
+ Support for the SnapGear NETtel/SecureEdge/SnapGear boards.
+
+config SNAPGEAR
+ bool "SnapGear router board support"
+ depends on NETtel
+ help
+ Special additional support for SnapGear router boards.
+
+config SNEHA
+ bool
+
+config CPU16B
+ bool "Sneha Technologies S.L. Sarasvati board support"
+ depends on M5272
+ select SNEHA
+ help
+ Support for the SNEHA CPU16B board.
+
+config MOD5272
+ bool "Netburner MOD-5272 board support"
+ depends on M5272
+ help
+ Support for the Netburner MOD-5272 board.
+
+config SAVANT
+ bool
+
+config SAVANTrosie1
+ bool "Savant Rosie1 board support"
+ depends on M523x
+ select SAVANT
+ help
+ Support for the Savant Rosie1 board.
+
+
+if !MMU || COLDFIRE
+
+comment "Machine Options"
+
+config UBOOT
+ bool "Support for U-Boot command line parameters"
+ help
+ If you say Y here kernel will try to collect command
+ line parameters from the initial u-boot stack.
+ default n
+
+config 4KSTACKS
+ bool "Use 4Kb for kernel stacks instead of 8Kb"
+ default y
+ help
+ If you say Y here the kernel will use a 4Kb stacksize for the
+ kernel stack attached to each process/thread. This facilitates
+ running more threads on a system and also reduces the pressure
+ on the VM subsystem for higher order allocations.
+
+comment "RAM configuration"
+
+config RAMBASE
+ hex "Address of the base of RAM"
+ default "0"
+ help
+ Define the address that RAM starts at. On many platforms this is
+ 0, the base of the address space. And this is the default. Some
+ platforms choose to setup their RAM at other addresses within the
+ processor address space.
+
+config RAMSIZE
+ hex "Size of RAM (in bytes), or 0 for automatic"
+ default "0x400000"
+ help
+ Define the size of the system RAM. If you select 0 then the
+ kernel will try to probe the RAM size at runtime. This is not
+ supported on all CPU types.
+
+config VECTORBASE
+ hex "Address of the base of system vectors"
+ default "0"
+ help
+ Define the address of the system vectors. Commonly this is
+ put at the start of RAM, but it doesn't have to be. On ColdFire
+ platforms this address is programmed into the VBR register, thus
+ actually setting the address to use.
+
+config MBAR
+ hex "Address of the MBAR (internal peripherals)"
+ default "0x10000000"
+ depends on HAVE_MBAR
+ help
+ Define the address of the internal system peripherals. This value
+ is set in the processors MBAR register. This is generally setup by
+ the boot loader, and will not be written by the kernel. By far most
+ ColdFire boards use the default 0x10000000 value, so if unsure then
+ use this.
+
+config IPSBAR
+ hex "Address of the IPSBAR (internal peripherals)"
+ default "0x40000000"
+ depends on HAVE_IPSBAR
+ help
+ Define the address of the internal system peripherals. This value
+ is set in the processors IPSBAR register. This is generally setup by
+ the boot loader, and will not be written by the kernel. By far most
+ ColdFire boards use the default 0x40000000 value, so if unsure then
+ use this.
+
+config KERNELBASE
+ hex "Address of the base of kernel code"
+ default "0x400"
+ help
+ Typically on m68k systems the kernel will not start at the base
+ of RAM, but usually some small offset from it. Define the start
+ address of the kernel here. The most common setup will have the
+ processor vectors at the base of RAM and then the start of the
+ kernel. On some platforms some RAM is reserved for boot loaders
+ and the kernel starts after that. The 0x400 default was based on
+ a system with the RAM based at address 0, and leaving enough room
+ for the theoretical maximum number of 256 vectors.
+
+comment "ROM configuration"
+
+config ROM
+ bool "Specify ROM linker regions"
+ default n
+ help
+ Define a ROM region for the linker script. This creates a kernel
+ that can be stored in flash, with possibly the text, and data
+ regions being copied out to RAM at startup.
+
+config ROMBASE
+ hex "Address of the base of ROM device"
+ default "0"
+ depends on ROM
+ help
+ Define the address that the ROM region starts at. Some platforms
+ use this to set their chip select region accordingly for the boot
+ device.
+
+config ROMVEC
+ hex "Address of the base of the ROM vectors"
+ default "0"
+ depends on ROM
+ help
+ This is almost always the same as the base of the ROM. Since on all
+ 68000 type variants the vectors are at the base of the boot device
+ on system startup.
+
+config ROMVECSIZE
+ hex "Size of ROM vector region (in bytes)"
+ default "0x400"
+ depends on ROM
+ help
+ Define the size of the vector region in ROM. For most 68000
+ variants this would be 0x400 bytes in size. Set to 0 if you do
+ not want a vector region at the start of the ROM.
+
+config ROMSTART
+ hex "Address of the base of system image in ROM"
+ default "0x400"
+ depends on ROM
+ help
+ Define the start address of the system image in ROM. Commonly this
+ is strait after the ROM vectors.
+
+config ROMSIZE
+ hex "Size of the ROM device"
+ default "0x100000"
+ depends on ROM
+ help
+ Size of the ROM device. On some platforms this is used to setup
+ the chip select that controls the boot ROM device.
+
+choice
+ prompt "Kernel executes from"
+ ---help---
+ Choose the memory type that the kernel will be running in.
+
+config RAMKERNEL
+ bool "RAM"
+ help
+ The kernel will be resident in RAM when running.
+
+config ROMKERNEL
+ bool "ROM"
+ help
+ The kernel will be resident in FLASH/ROM when running. This is
+ often referred to as Execute-in-Place (XIP), since the kernel
+ code executes from the position it is stored in the FLASH/ROM.
+
+endchoice
+
+endif
diff --git a/arch/m68k/Kconfig.mmu b/arch/m68k/Kconfig.mmu
deleted file mode 100644
index 13e20bbc407..00000000000
--- a/arch/m68k/Kconfig.mmu
+++ /dev/null
@@ -1,411 +0,0 @@
-config GENERIC_IOMAP
- bool
- default y
-
-config ARCH_MAY_HAVE_PC_FDC
- bool
- depends on BROKEN && (Q40 || SUN3X)
- default y
-
-config ARCH_USES_GETTIMEOFFSET
- def_bool y
-
-config EISA
- bool
- ---help---
- The Extended Industry Standard Architecture (EISA) bus was
- developed as an open alternative to the IBM MicroChannel bus.
-
- The EISA bus provided some of the features of the IBM MicroChannel
- bus while maintaining backward compatibility with cards made for
- the older ISA bus. The EISA bus saw limited use between 1988 and
- 1995 when it was made obsolete by the PCI bus.
-
- Say Y here if you are building a kernel for an EISA-based machine.
-
- Otherwise, say N.
-
-config MCA
- bool
- help
- MicroChannel Architecture is found in some IBM PS/2 machines and
- laptops. It is a bus system similar to PCI or ISA. See
- <file:Documentation/mca.txt> (and especially the web page given
- there) before attempting to build an MCA bus kernel.
-
-config PCMCIA
- tristate
- ---help---
- Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
- computer. These are credit-card size devices such as network cards,
- modems or hard drives often used with laptops computers. There are
- actually two varieties of these cards: the older 16 bit PCMCIA cards
- and the newer 32 bit CardBus cards. If you want to use CardBus
- cards, you need to say Y here and also to "CardBus support" below.
-
- To use your PC-cards, you will need supporting software from David
- Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
- for location). Please also read the PCMCIA-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as modules, choose M here: the
- modules will be called pcmcia_core and ds.
-
-config AMIGA
- bool "Amiga support"
- select MMU_MOTOROLA if MMU
- help
- This option enables support for the Amiga series of computers. If
- you plan to use this kernel on an Amiga, say Y here and browse the
- material available in <file:Documentation/m68k>; otherwise say N.
-
-config ATARI
- bool "Atari support"
- select MMU_MOTOROLA if MMU
- help
- This option enables support for the 68000-based Atari series of
- computers (including the TT, Falcon and Medusa). If you plan to use
- this kernel on an Atari, say Y here and browse the material
- available in <file:Documentation/m68k>; otherwise say N.
-
-config MAC
- bool "Macintosh support"
- select MMU_MOTOROLA if MMU
- help
- This option enables support for the Apple Macintosh series of
- computers (yes, there is experimental support now, at least for part
- of the series).
-
- Say N unless you're willing to code the remaining necessary support.
- ;)
-
-config NUBUS
- bool
- depends on MAC
- default y
-
-config M68K_L2_CACHE
- bool
- depends on MAC
- default y
-
-config APOLLO
- bool "Apollo support"
- select MMU_MOTOROLA if MMU
- help
- Say Y here if you want to run Linux on an MC680x0-based Apollo
- Domain workstation such as the DN3500.
-
-config VME
- bool "VME (Motorola and BVM) support"
- select MMU_MOTOROLA if MMU
- help
- Say Y here if you want to build a kernel for a 680x0 based VME
- board. Boards currently supported include Motorola boards MVME147,
- MVME162, MVME166, MVME167, MVME172, and MVME177. BVME4000 and
- BVME6000 boards from BVM Ltd are also supported.
-
-config MVME147
- bool "MVME147 support"
- depends on VME
- help
- Say Y to include support for early Motorola VME boards. This will
- build a kernel which can run on MVME147 single-board computers. If
- you select this option you will have to select the appropriate
- drivers for SCSI, Ethernet and serial ports later on.
-
-config MVME16x
- bool "MVME162, 166 and 167 support"
- depends on VME
- help
- Say Y to include support for Motorola VME boards. This will build a
- kernel which can run on MVME162, MVME166, MVME167, MVME172, and
- MVME177 boards. If you select this option you will have to select
- the appropriate drivers for SCSI, Ethernet and serial ports later
- on.
-
-config BVME6000
- bool "BVME4000 and BVME6000 support"
- depends on VME
- help
- Say Y to include support for VME boards from BVM Ltd. This will
- build a kernel which can run on BVME4000 and BVME6000 boards. If
- you select this option you will have to select the appropriate
- drivers for SCSI, Ethernet and serial ports later on.
-
-config HP300
- bool "HP9000/300 and HP9000/400 support"
- select MMU_MOTOROLA if MMU
- help
- This option enables support for the HP9000/300 and HP9000/400 series
- of workstations. Support for these machines is still somewhat
- experimental. If you plan to try to use the kernel on such a machine
- say Y here.
- Everybody else says N.
-
-config DIO
- bool "DIO bus support"
- depends on HP300
- default y
- help
- Say Y here to enable support for the "DIO" expansion bus used in
- HP300 machines. If you are using such a system you almost certainly
- want this.
-
-config SUN3X
- bool "Sun3x support"
- select MMU_MOTOROLA if MMU
- select M68030
- help
- This option enables support for the Sun 3x series of workstations.
- Be warned that this support is very experimental.
- Note that Sun 3x kernels are not compatible with Sun 3 hardware.
- General Linux information on the Sun 3x series (now discontinued)
- is at <http://www.angelfire.com/ca2/tech68k/sun3.html>.
-
- If you don't want to compile a kernel for a Sun 3x, say N.
-
-config Q40
- bool "Q40/Q60 support"
- select MMU_MOTOROLA if MMU
- help
- The Q40 is a Motorola 68040-based successor to the Sinclair QL
- manufactured in Germany. There is an official Q40 home page at
- <http://www.q40.de/>. This option enables support for the Q40 and
- Q60. Select your CPU below. For 68LC060 don't forget to enable FPU
- emulation.
-
-config SUN3
- bool "Sun3 support"
- depends on !MMU_MOTOROLA
- select MMU_SUN3 if MMU
- select M68020
- help
- This option enables support for the Sun 3 series of workstations
- (3/50, 3/60, 3/1xx, 3/2xx systems). Enabling this option requires
- that all other hardware types must be disabled, as Sun 3 kernels
- are incompatible with all other m68k targets (including Sun 3x!).
-
- If you don't want to compile a kernel exclusively for a Sun 3, say N.
-
-config NATFEAT
- bool "ARAnyM emulator support"
- depends on ATARI
- help
- This option enables support for ARAnyM native features, such as
- access to a disk image as /dev/hda.
-
-config NFBLOCK
- tristate "NatFeat block device support"
- depends on BLOCK && NATFEAT
- help
- Say Y to include support for the ARAnyM NatFeat block device
- which allows direct access to the hard drives without using
- the hardware emulation.
-
-config NFCON
- tristate "NatFeat console driver"
- depends on NATFEAT
- help
- Say Y to include support for the ARAnyM NatFeat console driver
- which allows the console output to be redirected to the stderr
- output of ARAnyM.
-
-config NFETH
- tristate "NatFeat Ethernet support"
- depends on NET_ETHERNET && NATFEAT
- help
- Say Y to include support for the ARAnyM NatFeat network device
- which will emulate a regular ethernet device while presenting an
- ethertap device to the host system.
-
-comment "Processor type"
-
-config M68020
- bool "68020 support"
- help
- If you anticipate running this kernel on a computer with a MC68020
- processor, say Y. Otherwise, say N. Note that the 68020 requires a
- 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
- Sun 3, which provides its own version.
-
-config M68030
- bool "68030 support"
- depends on !MMU_SUN3
- help
- If you anticipate running this kernel on a computer with a MC68030
- processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
- work, as it does not include an MMU (Memory Management Unit).
-
-config M68040
- bool "68040 support"
- depends on !MMU_SUN3
- help
- If you anticipate running this kernel on a computer with a MC68LC040
- or MC68040 processor, say Y. Otherwise, say N. Note that an
- MC68EC040 will not work, as it does not include an MMU (Memory
- Management Unit).
-
-config M68060
- bool "68060 support"
- depends on !MMU_SUN3
- help
- If you anticipate running this kernel on a computer with a MC68060
- processor, say Y. Otherwise, say N.
-
-config MMU_MOTOROLA
- bool
-
-config MMU_SUN3
- bool
- depends on MMU && !MMU_MOTOROLA
-
-config M68KFPU_EMU
- bool "Math emulation support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- help
- At some point in the future, this will cause floating-point math
- instructions to be emulated by the kernel on machines that lack a
- floating-point math coprocessor. Thrill-seekers and chronically
- sleep-deprived psychotic hacker types can say Y now, everyone else
- should probably wait a while.
-
-config M68KFPU_EMU_EXTRAPREC
- bool "Math emulation extra precision"
- depends on M68KFPU_EMU
- help
- The fpu uses normally a few bit more during calculations for
- correct rounding, the emulator can (often) do the same but this
- extra calculation can cost quite some time, so you can disable
- it here. The emulator will then "only" calculate with a 64 bit
- mantissa and round slightly incorrect, what is more than enough
- for normal usage.
-
-config M68KFPU_EMU_ONLY
- bool "Math emulation only kernel"
- depends on M68KFPU_EMU
- help
- This option prevents any floating-point instructions from being
- compiled into the kernel, thereby the kernel doesn't save any
- floating point context anymore during task switches, so this
- kernel will only be usable on machines without a floating-point
- math coprocessor. This makes the kernel a bit faster as no tests
- needs to be executed whether a floating-point instruction in the
- kernel should be executed or not.
-
-config ADVANCED
- bool "Advanced configuration options"
- ---help---
- This gives you access to some advanced options for the CPU. The
- defaults should be fine for most users, but these options may make
- it possible for you to improve performance somewhat if you know what
- you are doing.
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about these options.
-
- Most users should say N to this question.
-
-config RMW_INSNS
- bool "Use read-modify-write instructions"
- depends on ADVANCED
- ---help---
- This allows to use certain instructions that work with indivisible
- read-modify-write bus cycles. While this is faster than the
- workaround of disabling interrupts, it can conflict with DMA
- ( = direct memory access) on many Amiga systems, and it is also said
- to destabilize other machines. It is very likely that this will
- cause serious problems on any Amiga or Atari Medusa if set. The only
- configuration where it should work are 68030-based Ataris, where it
- apparently improves performance. But you've been warned! Unless you
- really know what you are doing, say N. Try Y only if you're quite
- adventurous.
-
-config SINGLE_MEMORY_CHUNK
- bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
- default y if SUN3
- select NEED_MULTIPLE_NODES
- help
- Ignore all but the first contiguous chunk of physical memory for VM
- purposes. This will save a few bytes kernel size and may speed up
- some operations. Say N if not sure.
-
-config 060_WRITETHROUGH
- bool "Use write-through caching for 68060 supervisor accesses"
- depends on ADVANCED && M68060
- ---help---
- The 68060 generally uses copyback caching of recently accessed data.
- Copyback caching means that memory writes will be held in an on-chip
- cache and only written back to memory some time later. Saying Y
- here will force supervisor (kernel) accesses to use writethrough
- caching. Writethrough caching means that data is written to memory
- straight away, so that cache and memory data always agree.
- Writethrough caching is less efficient, but is needed for some
- drivers on 68060 based systems where the 68060 bus snooping signal
- is hardwired on. The 53c710 SCSI driver is known to suffer from
- this problem.
-
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool !SINGLE_MEMORY_CHUNK
-
-config NODES_SHIFT
- int
- default "3"
- depends on !SINGLE_MEMORY_CHUNK
-
-config ZORRO
- bool "Amiga Zorro (AutoConfig) bus support"
- depends on AMIGA
- help
- This enables support for the Zorro bus in the Amiga. If you have
- expansion cards in your Amiga that conform to the Amiga
- AutoConfig(tm) specification, say Y, otherwise N. Note that even
- expansion cards that do not fit in the Zorro slots but fit in e.g.
- the CPU slot may fall in this category, so you have to say Y to let
- Linux use these.
-
-config AMIGA_PCMCIA
- bool "Amiga 1200/600 PCMCIA support (EXPERIMENTAL)"
- depends on AMIGA && EXPERIMENTAL
- help
- Include support in the kernel for pcmcia on Amiga 1200 and Amiga
- 600. If you intend to use pcmcia cards say Y; otherwise say N.
-
-config HEARTBEAT
- bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
- default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
- help
- Use the power-on LED on your machine as a load meter. The exact
- behavior is platform-dependent, but normally the flash frequency is
- a hyperbolic function of the 5-minute load average.
-
-# We have a dedicated heartbeat LED. :-)
-config PROC_HARDWARE
- bool "/proc/hardware support"
- help
- Say Y here to support the /proc/hardware file, which gives you
- access to information about the machine you're running on,
- including the model, CPU, MMU, clock speed, BogoMIPS rating,
- and memory size.
-
-config ISA
- bool
- depends on Q40 || AMIGA_PCMCIA
- default y
- help
- Find out whether you have ISA slots on your motherboard. ISA is the
- name of a bus system, i.e. the way the CPU talks to the other stuff
- inside your box. Other bus systems are PCI, EISA, MicroChannel
- (MCA) or VESA. ISA is an older system, now being displaced by PCI;
- newer boards don't support it. If you have ISA, say Y, otherwise N.
-
-config GENERIC_ISA_DMA
- bool
- depends on Q40 || AMIGA_PCMCIA
- default y
-
-source "drivers/pci/Kconfig"
-
-source "drivers/zorro/Kconfig"
-
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
deleted file mode 100644
index ff46383112a..00000000000
--- a/arch/m68k/Kconfig.nommu
+++ /dev/null
@@ -1,787 +0,0 @@
-config FPU
- bool
- default n
-
-config GENERIC_GPIO
- bool
- default n
-
-config GENERIC_CMOS_UPDATE
- bool
- default y
-
-config GENERIC_CLOCKEVENTS
- bool
- default n
-
-config M68000
- bool
- select CPU_HAS_NO_BITFIELDS
- help
- The Freescale (was Motorola) 68000 CPU is the first generation of
- the well known M68K family of processors. The CPU core as well as
- being available as a stand alone CPU was also used in many
- System-On-Chip devices (eg 68328, 68302, etc). It does not contain
- a paging MMU.
-
-config MCPU32
- bool
- select CPU_HAS_NO_BITFIELDS
- help
- The Freescale (was then Motorola) CPU32 is a CPU core that is
- based on the 68020 processor. For the most part it is used in
- System-On-Chip parts, and does not contain a paging MMU.
-
-config COLDFIRE
- bool
- select GENERIC_GPIO
- select ARCH_REQUIRE_GPIOLIB
- select CPU_HAS_NO_BITFIELDS
- help
- The Freescale ColdFire family of processors is a modern derivitive
- of the 68000 processor family. They are mainly targeted at embedded
- applications, and are all System-On-Chip (SOC) devices, as opposed
- to stand alone CPUs. They implement a subset of the original 68000
- processor instruction set.
-
-config COLDFIRE_SW_A7
- bool
- default n
-
-config HAVE_CACHE_SPLIT
- bool
-
-config HAVE_CACHE_CB
- bool
-
-config HAVE_MBAR
- bool
-
-config HAVE_IPSBAR
- bool
-
-choice
- prompt "CPU"
- default M68EZ328
-
-config M68328
- bool "MC68328"
- select M68000
- help
- Motorola 68328 processor support.
-
-config M68EZ328
- bool "MC68EZ328"
- select M68000
- help
- Motorola 68EX328 processor support.
-
-config M68VZ328
- bool "MC68VZ328"
- select M68000
- help
- Motorola 68VZ328 processor support.
-
-config M68360
- bool "MC68360"
- select MCPU32
- help
- Motorola 68360 processor support.
-
-config M5206
- bool "MCF5206"
- select COLDFIRE
- select COLDFIRE_SW_A7
- select HAVE_MBAR
- help
- Motorola ColdFire 5206 processor support.
-
-config M5206e
- bool "MCF5206e"
- select COLDFIRE
- select COLDFIRE_SW_A7
- select HAVE_MBAR
- help
- Motorola ColdFire 5206e processor support.
-
-config M520x
- bool "MCF520x"
- select COLDFIRE
- select GENERIC_CLOCKEVENTS
- select HAVE_CACHE_SPLIT
- help
- Freescale Coldfire 5207/5208 processor support.
-
-config M523x
- bool "MCF523x"
- select COLDFIRE
- select GENERIC_CLOCKEVENTS
- select HAVE_CACHE_SPLIT
- select HAVE_IPSBAR
- help
- Freescale Coldfire 5230/1/2/4/5 processor support
-
-config M5249
- bool "MCF5249"
- select COLDFIRE
- select COLDFIRE_SW_A7
- select HAVE_MBAR
- help
- Motorola ColdFire 5249 processor support.
-
-config M5271
- bool "MCF5271"
- select COLDFIRE
- select HAVE_CACHE_SPLIT
- select HAVE_IPSBAR
- help
- Freescale (Motorola) ColdFire 5270/5271 processor support.
-
-config M5272
- bool "MCF5272"
- select COLDFIRE
- select COLDFIRE_SW_A7
- select HAVE_MBAR
- help
- Motorola ColdFire 5272 processor support.
-
-config M5275
- bool "MCF5275"
- select COLDFIRE
- select HAVE_CACHE_SPLIT
- select HAVE_IPSBAR
- help
- Freescale (Motorola) ColdFire 5274/5275 processor support.
-
-config M528x
- bool "MCF528x"
- select COLDFIRE
- select GENERIC_CLOCKEVENTS
- select HAVE_CACHE_SPLIT
- select HAVE_IPSBAR
- help
- Motorola ColdFire 5280/5282 processor support.
-
-config M5307
- bool "MCF5307"
- select COLDFIRE
- select COLDFIRE_SW_A7
- select HAVE_CACHE_CB
- select HAVE_MBAR
- help
- Motorola ColdFire 5307 processor support.
-
-config M532x
- bool "MCF532x"
- select COLDFIRE
- select HAVE_CACHE_CB
- help
- Freescale (Motorola) ColdFire 532x processor support.
-
-config M5407
- bool "MCF5407"
- select COLDFIRE
- select COLDFIRE_SW_A7
- select HAVE_CACHE_CB
- select HAVE_MBAR
- help
- Motorola ColdFire 5407 processor support.
-
-config M547x
- bool "MCF547x"
- select COLDFIRE
- select HAVE_CACHE_CB
- select HAVE_MBAR
- help
- Freescale ColdFire 5470/5471/5472/5473/5474/5475 processor support.
-
-config M548x
- bool "MCF548x"
- select COLDFIRE
- select HAVE_CACHE_CB
- select HAVE_MBAR
- help
- Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
-
-endchoice
-
-config M527x
- bool
- depends on (M5271 || M5275)
- select GENERIC_CLOCKEVENTS
- default y
-
-config M54xx
- bool
- depends on (M548x || M547x)
- default y
-
-config CLOCK_SET
- bool "Enable setting the CPU clock frequency"
- default n
- help
- On some CPU's you do not need to know what the core CPU clock
- frequency is. On these you can disable clock setting. On some
- traditional 68K parts, and on all ColdFire parts you need to set
- the appropriate CPU clock frequency. On these devices many of the
- onboard peripherals derive their timing from the master CPU clock
- frequency.
-
-config CLOCK_FREQ
- int "Set the core clock frequency"
- default "66666666"
- depends on CLOCK_SET
- help
- Define the CPU clock frequency in use. This is the core clock
- frequency, it may or may not be the same as the external clock
- crystal fitted to your board. Some processors have an internal
- PLL and can have their frequency programmed at run time, others
- use internal dividers. In general the kernel won't setup a PLL
- if it is fitted (there are some exceptions). This value will be
- specific to the exact CPU that you are using.
-
-config OLDMASK
- bool "Old mask 5307 (1H55J) silicon"
- depends on M5307
- help
- Build support for the older revision ColdFire 5307 silicon.
- Specifically this is the 1H55J mask revision.
-
-if HAVE_CACHE_SPLIT
-choice
- prompt "Split Cache Configuration"
- default CACHE_I
-
-config CACHE_I
- bool "Instruction"
- help
- Use all of the ColdFire CPU cache memory as an instruction cache.
-
-config CACHE_D
- bool "Data"
- help
- Use all of the ColdFire CPU cache memory as a data cache.
-
-config CACHE_BOTH
- bool "Both"
- help
- Split the ColdFire CPU cache, and use half as an instruction cache
- and half as a data cache.
-endchoice
-endif
-
-if HAVE_CACHE_CB
-choice
- prompt "Data cache mode"
- default CACHE_WRITETHRU
-
-config CACHE_WRITETHRU
- bool "Write-through"
- help
- The ColdFire CPU cache is set into Write-through mode.
-
-config CACHE_COPYBACK
- bool "Copy-back"
- help
- The ColdFire CPU cache is set into Copy-back mode.
-endchoice
-endif
-
-comment "Platform"
-
-config PILOT3
- bool "Pilot 1000/5000, PalmPilot Personal/Pro, or PalmIII support"
- depends on M68328
- help
- Support for the Palm Pilot 1000/5000, Personal/Pro and PalmIII.
-
-config XCOPILOT_BUGS
- bool "(X)Copilot support"
- depends on PILOT3
- help
- Support the bugs of Xcopilot.
-
-config UC5272
- bool 'Arcturus Networks uC5272 dimm board support'
- depends on M5272
- help
- Support for the Arcturus Networks uC5272 dimm board.
-
-config UC5282
- bool "Arcturus Networks uC5282 board support"
- depends on M528x
- help
- Support for the Arcturus Networks uC5282 dimm board.
-
-config UCSIMM
- bool "uCsimm module support"
- depends on M68EZ328
- help
- Support for the Arcturus Networks uCsimm module.
-
-config UCDIMM
- bool "uDsimm module support"
- depends on M68VZ328
- help
- Support for the Arcturus Networks uDsimm module.
-
-config DRAGEN2
- bool "DragenEngine II board support"
- depends on M68VZ328
- help
- Support for the DragenEngine II board.
-
-config DIRECT_IO_ACCESS
- bool "Allow user to access IO directly"
- depends on (UCSIMM || UCDIMM || DRAGEN2)
- help
- Disable the CPU internal registers protection in user mode,
- to allow a user application to read/write them.
-
-config INIT_LCD
- bool "Initialize LCD"
- depends on (UCSIMM || UCDIMM || DRAGEN2)
- help
- Initialize the LCD controller of the 68x328 processor.
-
-config MEMORY_RESERVE
- int "Memory reservation (MiB)"
- depends on (UCSIMM || UCDIMM)
- help
- Reserve certain memory regions on 68x328 based boards.
-
-config UCQUICC
- bool "Lineo uCquicc board support"
- depends on M68360
- help
- Support for the Lineo uCquicc board.
-
-config ARN5206
- bool "Arnewsh 5206 board support"
- depends on M5206
- help
- Support for the Arnewsh 5206 board.
-
-config M5206eC3
- bool "Motorola M5206eC3 board support"
- depends on M5206e
- help
- Support for the Motorola M5206eC3 board.
-
-config ELITE
- bool "Motorola M5206eLITE board support"
- depends on M5206e
- help
- Support for the Motorola M5206eLITE board.
-
-config M5208EVB
- bool "Freescale M5208EVB board support"
- depends on M520x
- help
- Support for the Freescale Coldfire M5208EVB.
-
-config M5235EVB
- bool "Freescale M5235EVB support"
- depends on M523x
- help
- Support for the Freescale M5235EVB board.
-
-config M5249C3
- bool "Motorola M5249C3 board support"
- depends on M5249
- help
- Support for the Motorola M5249C3 board.
-
-config M5271EVB
- bool "Freescale (Motorola) M5271EVB board support"
- depends on M5271
- help
- Support for the Freescale (Motorola) M5271EVB board.
-
-config M5275EVB
- bool "Freescale (Motorola) M5275EVB board support"
- depends on M5275
- help
- Support for the Freescale (Motorola) M5275EVB board.
-
-config M5272C3
- bool "Motorola M5272C3 board support"
- depends on M5272
- help
- Support for the Motorola M5272C3 board.
-
-config COBRA5272
- bool "senTec COBRA5272 board support"
- depends on M5272
- help
- Support for the senTec COBRA5272 board.
-
-config AVNET5282
- bool "Avnet 5282 board support"
- depends on M528x
- help
- Support for the Avnet 5282 board.
-
-config M5282EVB
- bool "Motorola M5282EVB board support"
- depends on M528x
- help
- Support for the Motorola M5282EVB board.
-
-config COBRA5282
- bool "senTec COBRA5282 board support"
- depends on M528x
- help
- Support for the senTec COBRA5282 board.
-
-config SOM5282EM
- bool "EMAC.Inc SOM5282EM board support"
- depends on M528x
- help
- Support for the EMAC.Inc SOM5282EM module.
-
-config WILDFIRE
- bool "Intec Automation Inc. WildFire board support"
- depends on M528x
- help
- Support for the Intec Automation Inc. WildFire.
-
-config WILDFIREMOD
- bool "Intec Automation Inc. WildFire module support"
- depends on M528x
- help
- Support for the Intec Automation Inc. WildFire module.
-
-config ARN5307
- bool "Arnewsh 5307 board support"
- depends on M5307
- help
- Support for the Arnewsh 5307 board.
-
-config M5307C3
- bool "Motorola M5307C3 board support"
- depends on M5307
- help
- Support for the Motorola M5307C3 board.
-
-config SECUREEDGEMP3
- bool "SnapGear SecureEdge/MP3 platform support"
- depends on M5307
- help
- Support for the SnapGear SecureEdge/MP3 platform.
-
-config M5329EVB
- bool "Freescale (Motorola) M5329EVB board support"
- depends on M532x
- help
- Support for the Freescale (Motorola) M5329EVB board.
-
-config COBRA5329
- bool "senTec COBRA5329 board support"
- depends on M532x
- help
- Support for the senTec COBRA5329 board.
-
-config M5407C3
- bool "Motorola M5407C3 board support"
- depends on M5407
- help
- Support for the Motorola M5407C3 board.
-
-config FIREBEE
- bool "FireBee board support"
- depends on M547x
- help
- Support for the FireBee ColdFire 5475 based board.
-
-config CLEOPATRA
- bool "Feith CLEOPATRA board support"
- depends on (M5307 || M5407)
- help
- Support for the Feith Cleopatra boards.
-
-config CANCam
- bool "Feith CANCam board support"
- depends on M5272
- help
- Support for the Feith CANCam board.
-
-config SCALES
- bool "Feith SCALES board support"
- depends on M5272
- help
- Support for the Feith SCALES board.
-
-config NETtel
- bool "SecureEdge/NETtel board support"
- depends on (M5206e || M5272 || M5307)
- help
- Support for the SnapGear NETtel/SecureEdge/SnapGear boards.
-
-config SNAPGEAR
- bool "SnapGear router board support"
- depends on NETtel
- help
- Special additional support for SnapGear router boards.
-
-config CPU16B
- bool "Sneha Technologies S.L. Sarasvati board support"
- depends on M5272
- help
- Support for the SNEHA CPU16B board.
-
-config MOD5272
- bool "Netburner MOD-5272 board support"
- depends on M5272
- help
- Support for the Netburner MOD-5272 board.
-
-config SAVANTrosie1
- bool "Savant Rosie1 board support"
- depends on M523x
- help
- Support for the Savant Rosie1 board.
-
-config ROMFS_FROM_ROM
- bool "ROMFS image not RAM resident"
- depends on (NETtel || SNAPGEAR)
- help
- The ROMfs filesystem will stay resident in the FLASH/ROM, not be
- moved into RAM.
-
-config PILOT
- bool
- default y
- depends on (PILOT3 || PILOT5)
-
-config ARNEWSH
- bool
- default y
- depends on (ARN5206 || ARN5307)
-
-config FREESCALE
- bool
- default y
- depends on (M5206eC3 || M5208EVB || M5235EVB || M5249C3 || M5271EVB || M5272C3 || M5275EVB || M5282EVB || M5307C3 || M5329EVB || M5407C3)
-
-config HW_FEITH
- bool
- default y
- depends on (CLEOPATRA || CANCam || SCALES)
-
-config senTec
- bool
- default y
- depends on (COBRA5272 || COBRA5282)
-
-config EMAC_INC
- bool
- default y
- depends on (SOM5282EM)
-
-config SNEHA
- bool
- default y
- depends on CPU16B
-
-config SAVANT
- bool
- default y
- depends on SAVANTrosie1
-
-config AVNET
- bool
- default y
- depends on (AVNET5282)
-
-config UBOOT
- bool "Support for U-Boot command line parameters"
- help
- If you say Y here kernel will try to collect command
- line parameters from the initial u-boot stack.
- default n
-
-config 4KSTACKS
- bool "Use 4Kb for kernel stacks instead of 8Kb"
- default y
- help
- If you say Y here the kernel will use a 4Kb stacksize for the
- kernel stack attached to each process/thread. This facilitates
- running more threads on a system and also reduces the pressure
- on the VM subsystem for higher order allocations.
-
-comment "RAM configuration"
-
-config RAMBASE
- hex "Address of the base of RAM"
- default "0"
- help
- Define the address that RAM starts at. On many platforms this is
- 0, the base of the address space. And this is the default. Some
- platforms choose to setup their RAM at other addresses within the
- processor address space.
-
-config RAMSIZE
- hex "Size of RAM (in bytes), or 0 for automatic"
- default "0x400000"
- help
- Define the size of the system RAM. If you select 0 then the
- kernel will try to probe the RAM size at runtime. This is not
- supported on all CPU types.
-
-config VECTORBASE
- hex "Address of the base of system vectors"
- default "0"
- help
- Define the address of the system vectors. Commonly this is
- put at the start of RAM, but it doesn't have to be. On ColdFire
- platforms this address is programmed into the VBR register, thus
- actually setting the address to use.
-
-config MBAR
- hex "Address of the MBAR (internal peripherals)"
- default "0x10000000"
- depends on HAVE_MBAR
- help
- Define the address of the internal system peripherals. This value
- is set in the processors MBAR register. This is generally setup by
- the boot loader, and will not be written by the kernel. By far most
- ColdFire boards use the default 0x10000000 value, so if unsure then
- use this.
-
-config IPSBAR
- hex "Address of the IPSBAR (internal peripherals)"
- default "0x40000000"
- depends on HAVE_IPSBAR
- help
- Define the address of the internal system peripherals. This value
- is set in the processors IPSBAR register. This is generally setup by
- the boot loader, and will not be written by the kernel. By far most
- ColdFire boards use the default 0x40000000 value, so if unsure then
- use this.
-
-config KERNELBASE
- hex "Address of the base of kernel code"
- default "0x400"
- help
- Typically on m68k systems the kernel will not start at the base
- of RAM, but usually some small offset from it. Define the start
- address of the kernel here. The most common setup will have the
- processor vectors at the base of RAM and then the start of the
- kernel. On some platforms some RAM is reserved for boot loaders
- and the kernel starts after that. The 0x400 default was based on
- a system with the RAM based at address 0, and leaving enough room
- for the theoretical maximum number of 256 vectors.
-
-choice
- prompt "RAM bus width"
- default RAMAUTOBIT
-
-config RAMAUTOBIT
- bool "AUTO"
- help
- Select the physical RAM data bus size. Not needed on most platforms,
- so you can generally choose AUTO.
-
-config RAM8BIT
- bool "8bit"
- help
- Configure RAM bus to be 8 bits wide.
-
-config RAM16BIT
- bool "16bit"
- help
- Configure RAM bus to be 16 bits wide.
-
-config RAM32BIT
- bool "32bit"
- help
- Configure RAM bus to be 32 bits wide.
-
-endchoice
-
-comment "ROM configuration"
-
-config ROM
- bool "Specify ROM linker regions"
- default n
- help
- Define a ROM region for the linker script. This creates a kernel
- that can be stored in flash, with possibly the text, and data
- regions being copied out to RAM at startup.
-
-config ROMBASE
- hex "Address of the base of ROM device"
- default "0"
- depends on ROM
- help
- Define the address that the ROM region starts at. Some platforms
- use this to set their chip select region accordingly for the boot
- device.
-
-config ROMVEC
- hex "Address of the base of the ROM vectors"
- default "0"
- depends on ROM
- help
- This is almost always the same as the base of the ROM. Since on all
- 68000 type variants the vectors are at the base of the boot device
- on system startup.
-
-config ROMVECSIZE
- hex "Size of ROM vector region (in bytes)"
- default "0x400"
- depends on ROM
- help
- Define the size of the vector region in ROM. For most 68000
- variants this would be 0x400 bytes in size. Set to 0 if you do
- not want a vector region at the start of the ROM.
-
-config ROMSTART
- hex "Address of the base of system image in ROM"
- default "0x400"
- depends on ROM
- help
- Define the start address of the system image in ROM. Commonly this
- is strait after the ROM vectors.
-
-config ROMSIZE
- hex "Size of the ROM device"
- default "0x100000"
- depends on ROM
- help
- Size of the ROM device. On some platforms this is used to setup
- the chip select that controls the boot ROM device.
-
-choice
- prompt "Kernel executes from"
- ---help---
- Choose the memory type that the kernel will be running in.
-
-config RAMKERNEL
- bool "RAM"
- help
- The kernel will be resident in RAM when running.
-
-config ROMKERNEL
- bool "ROM"
- help
- The kernel will be resident in FLASH/ROM when running. This is
- often referred to as Execute-in-Place (XIP), since the kernel
- code executes from the position it is stored in the FLASH/ROM.
-
-endchoice
-
-if COLDFIRE
-source "kernel/Kconfig.preempt"
-endif
-
-source "kernel/time/Kconfig"
-
-config ISA_DMA_API
- bool
- depends on !M5272
- default y
-
-source "drivers/pcmcia/Kconfig"
-
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index be46cadd401..cf318f20c64 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -1,7 +1,171 @@
+#
+# m68k/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Hamish Macdonald
+# Copyright (C) 2002,2011 Greg Ungerer <gerg@snapgear.com>
+#
+
KBUILD_DEFCONFIG := multi_defconfig
+#
+# Enable processor type. Ordering of these is important - we want to
+# use the minimum processor type of the range we support. The logic
+# for 680x0 will only allow use of the -m68060 or -m68040 if no other
+# 680x0 type is specified - and no option is specified for 68030 or
+# 68020. The other m68k/ColdFire types always specify some type of
+# compiler cpu type flag.
+#
+ifndef CONFIG_M68040
+cpuflags-$(CONFIG_M68060) := -m68060
+endif
+ifndef CONFIG_M68060
+cpuflags-$(CONFIG_M68040) := -m68040
+endif
+cpuflags-$(CONFIG_M68030) :=
+cpuflags-$(CONFIG_M68020) :=
+cpuflags-$(CONFIG_M68360) := -m68332
+cpuflags-$(CONFIG_M68000) := -m68000
+cpuflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200)
+cpuflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200)
+cpuflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
+cpuflags-$(CONFIG_M5307) := $(call cc-option,-mcpu=5307,-m5200)
+cpuflags-$(CONFIG_M528x) := $(call cc-option,-mcpu=528x,-m5307)
+cpuflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
+cpuflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307)
+cpuflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
+cpuflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
+cpuflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
+cpuflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200)
+cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
+cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
+
+KBUILD_AFLAGS += $(cpuflags-y)
+KBUILD_CFLAGS += $(cpuflags-y) -pipe
ifdef CONFIG_MMU
-include $(srctree)/arch/m68k/Makefile_mm
+# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
+KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
+else
+# we can use a m68k-linux-gcc toolchain with these in place
+KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
+KBUILD_CFLAGS += -D__uClinux__
+KBUILD_AFLAGS += -D__uClinux__
+endif
+
+LDFLAGS := -m m68kelf
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, \
+ m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
+ endif
+endif
+
+ifdef CONFIG_SUN3
+LDFLAGS_vmlinux = -N
+endif
+
+CHECKFLAGS += -D__mc68000__
+
+
+ifdef CONFIG_KGDB
+# If configured for kgdb support, include debugging infos and keep the
+# frame pointer
+KBUILD_CFLAGS := $(subst -fomit-frame-pointer,,$(KBUILD_CFLAGS)) -g
+endif
+
+#
+# Select the assembler head startup code. Order is important. The default
+# head code is first, processor specific selections can override it after.
+#
+head-y := arch/m68k/kernel/head.o
+head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o
+head-$(CONFIG_M68360) := arch/m68k/platform/68360/head.o
+head-$(CONFIG_M68000) := arch/m68k/platform/68328/head.o
+head-$(CONFIG_COLDFIRE) := arch/m68k/platform/coldfire/head.o
+
+core-y += arch/m68k/kernel/ arch/m68k/mm/
+libs-y += arch/m68k/lib/
+
+core-$(CONFIG_Q40) += arch/m68k/q40/
+core-$(CONFIG_AMIGA) += arch/m68k/amiga/
+core-$(CONFIG_ATARI) += arch/m68k/atari/
+core-$(CONFIG_MAC) += arch/m68k/mac/
+core-$(CONFIG_HP300) += arch/m68k/hp300/
+core-$(CONFIG_APOLLO) += arch/m68k/apollo/
+core-$(CONFIG_MVME147) += arch/m68k/mvme147/
+core-$(CONFIG_MVME16x) += arch/m68k/mvme16x/
+core-$(CONFIG_BVME6000) += arch/m68k/bvme6000/
+core-$(CONFIG_SUN3X) += arch/m68k/sun3x/ arch/m68k/sun3/
+core-$(CONFIG_SUN3) += arch/m68k/sun3/ arch/m68k/sun3/prom/
+core-$(CONFIG_NATFEAT) += arch/m68k/emu/
+core-$(CONFIG_M68040) += arch/m68k/fpsp040/
+core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
+core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
+core-$(CONFIG_M68360) += arch/m68k/platform/68360/
+core-$(CONFIG_M68000) += arch/m68k/platform/68328/
+core-$(CONFIG_M68EZ328) += arch/m68k/platform/68EZ328/
+core-$(CONFIG_M68VZ328) += arch/m68k/platform/68VZ328/
+core-$(CONFIG_COLDFIRE) += arch/m68k/platform/coldfire/
+core-$(CONFIG_M5206) += arch/m68k/platform/5206/
+core-$(CONFIG_M5206e) += arch/m68k/platform/5206/
+core-$(CONFIG_M520x) += arch/m68k/platform/520x/
+core-$(CONFIG_M523x) += arch/m68k/platform/523x/
+core-$(CONFIG_M5249) += arch/m68k/platform/5249/
+core-$(CONFIG_M527x) += arch/m68k/platform/527x/
+core-$(CONFIG_M5272) += arch/m68k/platform/5272/
+core-$(CONFIG_M528x) += arch/m68k/platform/528x/
+core-$(CONFIG_M5307) += arch/m68k/platform/5307/
+core-$(CONFIG_M532x) += arch/m68k/platform/532x/
+core-$(CONFIG_M5407) += arch/m68k/platform/5407/
+core-$(CONFIG_M54xx) += arch/m68k/platform/54xx/
+
+
+all: zImage
+
+lilo: vmlinux
+ if [ -f $(INSTALL_PATH)/vmlinux ]; then mv -f $(INSTALL_PATH)/vmlinux $(INSTALL_PATH)/vmlinux.old; fi
+ if [ -f $(INSTALL_PATH)/System.map ]; then mv -f $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
+ cat vmlinux > $(INSTALL_PATH)/vmlinux
+ cp System.map $(INSTALL_PATH)/System.map
+ if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+
+zImage compressed: vmlinux.gz
+
+vmlinux.gz: vmlinux
+
+ifndef CONFIG_KGDB
+ cp vmlinux vmlinux.tmp
+ $(STRIP) vmlinux.tmp
+ gzip -9c vmlinux.tmp >vmlinux.gz
+ rm vmlinux.tmp
else
-include $(srctree)/arch/m68k/Makefile_no
+ gzip -9c vmlinux >vmlinux.gz
endif
+
+bzImage: vmlinux.bz2
+
+vmlinux.bz2: vmlinux
+
+ifndef CONFIG_KGDB
+ cp vmlinux vmlinux.tmp
+ $(STRIP) vmlinux.tmp
+ bzip2 -1c vmlinux.tmp >vmlinux.bz2
+ rm vmlinux.tmp
+else
+ bzip2 -1c vmlinux >vmlinux.bz2
+endif
+
+archclean:
+ rm -f vmlinux.gz vmlinux.bz2
+
+install:
+ sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/m68k/Makefile_mm b/arch/m68k/Makefile_mm
deleted file mode 100644
index d449b6d5aec..00000000000
--- a/arch/m68k/Makefile_mm
+++ /dev/null
@@ -1,121 +0,0 @@
-#
-# m68k/Makefile
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" and "archdep" for cleaning up and making dependencies for
-# this architecture
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 1994 by Hamish Macdonald
-#
-
-# override top level makefile
-AS += -m68020
-LDFLAGS := -m m68kelf
-KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
-ifneq ($(SUBARCH),$(ARCH))
- ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := $(call cc-cross-prefix, \
- m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
- endif
-endif
-
-ifdef CONFIG_SUN3
-LDFLAGS_vmlinux = -N
-endif
-
-CHECKFLAGS += -D__mc68000__
-
-# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
-KBUILD_CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
-
-# enable processor switch if compiled only for a single cpu
-ifndef CONFIG_M68020
-ifndef CONFIG_M68030
-
-ifndef CONFIG_M68060
-KBUILD_CFLAGS += -m68040
-endif
-
-ifndef CONFIG_M68040
-KBUILD_CFLAGS += -m68060
-endif
-
-endif
-endif
-
-ifdef CONFIG_KGDB
-# If configured for kgdb support, include debugging infos and keep the
-# frame pointer
-KBUILD_CFLAGS := $(subst -fomit-frame-pointer,,$(KBUILD_CFLAGS)) -g
-endif
-
-ifndef CONFIG_SUN3
-head-y := arch/m68k/kernel/head.o
-else
-head-y := arch/m68k/kernel/sun3-head.o
-endif
-
-core-y += arch/m68k/kernel/ arch/m68k/mm/
-libs-y += arch/m68k/lib/
-
-core-$(CONFIG_Q40) += arch/m68k/q40/
-core-$(CONFIG_AMIGA) += arch/m68k/amiga/
-core-$(CONFIG_ATARI) += arch/m68k/atari/
-core-$(CONFIG_MAC) += arch/m68k/mac/
-core-$(CONFIG_HP300) += arch/m68k/hp300/
-core-$(CONFIG_APOLLO) += arch/m68k/apollo/
-core-$(CONFIG_MVME147) += arch/m68k/mvme147/
-core-$(CONFIG_MVME16x) += arch/m68k/mvme16x/
-core-$(CONFIG_BVME6000) += arch/m68k/bvme6000/
-core-$(CONFIG_SUN3X) += arch/m68k/sun3x/ arch/m68k/sun3/
-core-$(CONFIG_SUN3) += arch/m68k/sun3/ arch/m68k/sun3/prom/
-core-$(CONFIG_NATFEAT) += arch/m68k/emu/
-core-$(CONFIG_M68040) += arch/m68k/fpsp040/
-core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
-core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
-
-all: zImage
-
-lilo: vmlinux
- if [ -f $(INSTALL_PATH)/vmlinux ]; then mv -f $(INSTALL_PATH)/vmlinux $(INSTALL_PATH)/vmlinux.old; fi
- if [ -f $(INSTALL_PATH)/System.map ]; then mv -f $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
- cat vmlinux > $(INSTALL_PATH)/vmlinux
- cp System.map $(INSTALL_PATH)/System.map
- if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-
-zImage compressed: vmlinux.gz
-
-vmlinux.gz: vmlinux
-
-ifndef CONFIG_KGDB
- cp vmlinux vmlinux.tmp
- $(STRIP) vmlinux.tmp
- gzip -9c vmlinux.tmp >vmlinux.gz
- rm vmlinux.tmp
-else
- gzip -9c vmlinux >vmlinux.gz
-endif
-
-bzImage: vmlinux.bz2
-
-vmlinux.bz2: vmlinux
-
-ifndef CONFIG_KGDB
- cp vmlinux vmlinux.tmp
- $(STRIP) vmlinux.tmp
- bzip2 -1c vmlinux.tmp >vmlinux.bz2
- rm vmlinux.tmp
-else
- bzip2 -1c vmlinux >vmlinux.bz2
-endif
-
-archclean:
- rm -f vmlinux.gz vmlinux.bz2
-
-install:
- sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/m68k/Makefile_no b/arch/m68k/Makefile_no
deleted file mode 100644
index 844d3f17226..00000000000
--- a/arch/m68k/Makefile_no
+++ /dev/null
@@ -1,124 +0,0 @@
-#
-# arch/m68k/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# (C) Copyright 2002, Greg Ungerer <gerg@snapgear.com>
-#
-
-platform-$(CONFIG_M68328) := 68328
-platform-$(CONFIG_M68EZ328) := 68EZ328
-platform-$(CONFIG_M68VZ328) := 68VZ328
-platform-$(CONFIG_M68360) := 68360
-platform-$(CONFIG_M5206) := 5206
-platform-$(CONFIG_M5206e) := 5206
-platform-$(CONFIG_M520x) := 520x
-platform-$(CONFIG_M523x) := 523x
-platform-$(CONFIG_M5249) := 5249
-platform-$(CONFIG_M527x) := 527x
-platform-$(CONFIG_M5272) := 5272
-platform-$(CONFIG_M528x) := 528x
-platform-$(CONFIG_M5307) := 5307
-platform-$(CONFIG_M532x) := 532x
-platform-$(CONFIG_M5407) := 5407
-platform-$(CONFIG_M54xx) := 54xx
-PLATFORM := $(platform-y)
-
-board-$(CONFIG_PILOT) := pilot
-board-$(CONFIG_UC5272) := UC5272
-board-$(CONFIG_UC5282) := UC5282
-board-$(CONFIG_UCSIMM) := ucsimm
-board-$(CONFIG_UCDIMM) := ucdimm
-board-$(CONFIG_UCQUICC) := uCquicc
-board-$(CONFIG_DRAGEN2) := de2
-board-$(CONFIG_ARNEWSH) := ARNEWSH
-board-$(CONFIG_FREESCALE) := FREESCALE
-board-$(CONFIG_M5235EVB) := M5235EVB
-board-$(CONFIG_M5271EVB) := M5271EVB
-board-$(CONFIG_M5275EVB) := M5275EVB
-board-$(CONFIG_M5282EVB) := M5282EVB
-board-$(CONFIG_ELITE) := eLITE
-board-$(CONFIG_NETtel) := NETtel
-board-$(CONFIG_SECUREEDGEMP3) := MP3
-board-$(CONFIG_CLEOPATRA) := CLEOPATRA
-board-$(CONFIG_senTec) := senTec
-board-$(CONFIG_SNEHA) := SNEHA
-board-$(CONFIG_M5208EVB) := M5208EVB
-board-$(CONFIG_MOD5272) := MOD5272
-board-$(CONFIG_AVNET) := AVNET
-board-$(CONFIG_SAVANT) := SAVANT
-BOARD := $(board-y)
-
-model-$(CONFIG_RAMKERNEL) := ram
-model-$(CONFIG_ROMKERNEL) := rom
-MODEL := $(model-y)
-
-#
-# Some code support is grouped together for a common cpu-subclass (for
-# example all ColdFire cpu's are very similar). Determine the sub-class
-# for the selected cpu. ONLY need to define this for the non-base member
-# of the family.
-#
-cpuclass-$(CONFIG_M5206) := coldfire
-cpuclass-$(CONFIG_M5206e) := coldfire
-cpuclass-$(CONFIG_M520x) := coldfire
-cpuclass-$(CONFIG_M523x) := coldfire
-cpuclass-$(CONFIG_M5249) := coldfire
-cpuclass-$(CONFIG_M527x) := coldfire
-cpuclass-$(CONFIG_M5272) := coldfire
-cpuclass-$(CONFIG_M528x) := coldfire
-cpuclass-$(CONFIG_M5307) := coldfire
-cpuclass-$(CONFIG_M532x) := coldfire
-cpuclass-$(CONFIG_M5407) := coldfire
-cpuclass-$(CONFIG_M54xx) := coldfire
-cpuclass-$(CONFIG_M68328) := 68328
-cpuclass-$(CONFIG_M68EZ328) := 68328
-cpuclass-$(CONFIG_M68VZ328) := 68328
-cpuclass-$(CONFIG_M68360) := 68360
-CPUCLASS := $(cpuclass-y)
-
-ifneq ($(CPUCLASS),$(PLATFORM))
-CLASSDIR := arch/m68k/platform/$(cpuclass-y)/
-endif
-
-export PLATFORM BOARD MODEL CPUCLASS
-
-#
-# Some CFLAG additions based on specific CPU type.
-#
-cflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
-cflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
-cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200)
-cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
-cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
-cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
-cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307)
-cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
-cflags-$(CONFIG_M528x) := $(call cc-option,-mcpu=528x,-m5307)
-cflags-$(CONFIG_M5307) := $(call cc-option,-mcpu=5307,-m5200)
-cflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
-cflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200)
-cflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200)
-cflags-$(CONFIG_M68328) := -m68000
-cflags-$(CONFIG_M68EZ328) := -m68000
-cflags-$(CONFIG_M68VZ328) := -m68000
-cflags-$(CONFIG_M68360) := -m68332
-
-KBUILD_AFLAGS += $(cflags-y)
-
-KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CFLAGS += -D__linux__
-KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
-
-head-y := arch/m68k/platform/$(cpuclass-y)/head.o
-
-core-y += arch/m68k/kernel/ \
- arch/m68k/mm/ \
- $(CLASSDIR) \
- arch/m68k/platform/$(PLATFORM)/
-libs-y += arch/m68k/lib/
-
-archclean:
-
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index 876eec6f2b5..c3c5a8643e1 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -1,5 +1,254 @@
-#ifdef __uClinux__
-#include "entry_no.h"
+#ifndef __M68K_ENTRY_H
+#define __M68K_ENTRY_H
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#ifdef __ASSEMBLY__
+#include <asm/thread_info.h>
+#endif
+
+/*
+ * Stack layout in 'ret_from_exception':
+ *
+ * This allows access to the syscall arguments in registers d1-d5
+ *
+ * 0(sp) - d1
+ * 4(sp) - d2
+ * 8(sp) - d3
+ * C(sp) - d4
+ * 10(sp) - d5
+ * 14(sp) - a0
+ * 18(sp) - a1
+ * 1C(sp) - a2
+ * 20(sp) - d0
+ * 24(sp) - orig_d0
+ * 28(sp) - stack adjustment
+ * 2C(sp) - [ sr ] [ format & vector ]
+ * 2E(sp) - [ pc-hiword ] [ sr ]
+ * 30(sp) - [ pc-loword ] [ pc-hiword ]
+ * 32(sp) - [ format & vector ] [ pc-loword ]
+ * ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
+ * M68K COLDFIRE
+ */
+
+/* the following macro is used when enabling interrupts */
+#if defined(MACH_ATARI_ONLY)
+ /* block out HSYNC on the atari */
+#define ALLOWINT (~0x400)
+#define MAX_NOINT_IPL 3
#else
-#include "entry_mm.h"
+ /* portable version */
+#define ALLOWINT (~0x700)
+#define MAX_NOINT_IPL 0
+#endif /* machine compilation types */
+
+#ifdef __ASSEMBLY__
+/*
+ * This defines the normal kernel pt-regs layout.
+ *
+ * regs a3-a6 and d6-d7 are preserved by C code
+ * the kernel doesn't mess with usp unless it needs to
+ */
+#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
+
+#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
+/*
+ * This is made a little more tricky on older ColdFires. There is no
+ * separate supervisor and user stack pointers. Need to artificially
+ * construct a usp in software... When doing this we need to disable
+ * interrupts, otherwise bad things will happen.
+ */
+.globl sw_usp
+.globl sw_ksp
+
+.macro SAVE_ALL_SYS
+ move #0x2700,%sr /* disable intrs */
+ btst #5,%sp@(2) /* from user? */
+ bnes 6f /* no, skip */
+ movel %sp,sw_usp /* save user sp */
+ addql #8,sw_usp /* remove exception */
+ movel sw_ksp,%sp /* kernel sp */
+ subql #8,%sp /* room for exception */
+ clrl %sp@- /* stkadj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+ movel sw_usp,%a0 /* get usp */
+ movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
+ movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
+ bra 7f
+ 6:
+ clrl %sp@- /* stkadj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+ 7:
+.endm
+
+.macro SAVE_ALL_INT
+ SAVE_ALL_SYS
+ moveq #-1,%d0 /* not system call entry */
+ movel %d0,%sp@(PT_OFF_ORIG_D0)
+.endm
+
+.macro RESTORE_USER
+ move #0x2700,%sr /* disable intrs */
+ movel sw_usp,%a0 /* get usp */
+ movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
+ movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
+ moveml %sp@,%d1-%d5/%a0-%a2
+ lea %sp@(32),%sp /* space for 8 regs */
+ movel %sp@+,%d0
+ addql #4,%sp /* orig d0 */
+ addl %sp@+,%sp /* stkadj */
+ addql #8,%sp /* remove exception */
+ movel %sp,sw_ksp /* save ksp */
+ subql #8,sw_usp /* set exception */
+ movel sw_usp,%sp /* restore usp */
+ rte
+.endm
+
+.macro RDUSP
+ movel sw_usp,%a3
+.endm
+
+.macro WRUSP
+ movel %a3,sw_usp
+.endm
+
+#else /* !CONFIG_COLDFIRE_SW_A7 */
+/*
+ * Modern ColdFire parts have separate supervisor and user stack
+ * pointers. Simple load and restore macros for this case.
+ */
+.macro SAVE_ALL_SYS
+ move #0x2700,%sr /* disable intrs */
+ clrl %sp@- /* stkadj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+.endm
+
+.macro SAVE_ALL_INT
+ move #0x2700,%sr /* disable intrs */
+ clrl %sp@- /* stkadj */
+ pea -1:w /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+.endm
+
+.macro RESTORE_USER
+ moveml %sp@,%d1-%d5/%a0-%a2
+ lea %sp@(32),%sp /* space for 8 regs */
+ movel %sp@+,%d0
+ addql #4,%sp /* orig d0 */
+ addl %sp@+,%sp /* stkadj */
+ rte
+.endm
+
+.macro RDUSP
+ /*move %usp,%a3*/
+ .word 0x4e6b
+.endm
+
+.macro WRUSP
+ /*move %a3,%usp*/
+ .word 0x4e63
+.endm
+
+#endif /* !CONFIG_COLDFIRE_SW_A7 */
+
+.macro SAVE_SWITCH_STACK
+ lea %sp@(-24),%sp /* 6 regs */
+ moveml %a3-%a6/%d6-%d7,%sp@
+.endm
+
+.macro RESTORE_SWITCH_STACK
+ moveml %sp@,%a3-%a6/%d6-%d7
+ lea %sp@(24),%sp /* 6 regs */
+.endm
+
+#else /* !CONFIG_COLDFIRE */
+
+/*
+ * All other types of m68k parts (68000, 680x0, CPU32) have the same
+ * entry and exit code.
+ */
+
+/*
+ * a -1 in the orig_d0 field signifies
+ * that the stack frame is NOT for syscall
+ */
+.macro SAVE_ALL_INT
+ clrl %sp@- /* stk_adj */
+ pea -1:w /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ moveml %d1-%d5/%a0-%a2,%sp@-
+.endm
+
+.macro SAVE_ALL_SYS
+ clrl %sp@- /* stk_adj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ moveml %d1-%d5/%a0-%a2,%sp@-
+.endm
+
+.macro RESTORE_ALL
+ moveml %sp@+,%a0-%a2/%d1-%d5
+ movel %sp@+,%d0
+ addql #4,%sp /* orig d0 */
+ addl %sp@+,%sp /* stk adj */
+ rte
+.endm
+
+
+.macro SAVE_SWITCH_STACK
+ moveml %a3-%a6/%d6-%d7,%sp@-
+.endm
+
+.macro RESTORE_SWITCH_STACK
+ moveml %sp@+,%a3-%a6/%d6-%d7
+.endm
+
+#endif /* !CONFIG_COLDFIRE */
+
+/*
+ * Register %a2 is reserved and set to current task on MMU enabled systems.
+ * Non-MMU systems do not reserve %a2 in this way, and this definition is
+ * not used for them.
+ */
+#define curptr a2
+
+#define GET_CURRENT(tmp) get_current tmp
+.macro get_current reg=%d0
+ movel %sp,\reg
+ andw #-THREAD_SIZE,\reg
+ movel \reg,%curptr
+ movel %curptr@,%curptr
+.endm
+
+#else /* C source */
+
+#define STR(X) STR1(X)
+#define STR1(X) #X
+
+#define SAVE_ALL_INT \
+ "clrl %%sp@-;" /* stk_adj */ \
+ "pea -1:w;" /* orig d0 = -1 */ \
+ "movel %%d0,%%sp@-;" /* d0 */ \
+ "moveml %%d1-%%d5/%%a0-%%a2,%%sp@-"
+
+#define GET_CURRENT(tmp) \
+ "movel %%sp,"#tmp"\n\t" \
+ "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
+ "movel "#tmp",%%a2\n\t" \
+ "movel %%a2@,%%a2"
+
#endif
+
+#endif /* __M68K_ENTRY_H */
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
deleted file mode 100644
index 73b8c8fbed9..00000000000
--- a/arch/m68k/include/asm/entry_mm.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef __M68K_ENTRY_H
-#define __M68K_ENTRY_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#ifdef __ASSEMBLY__
-#include <asm/thread_info.h>
-#endif
-
-/*
- * Stack layout in 'ret_from_exception':
- *
- * This allows access to the syscall arguments in registers d1-d5
- *
- * 0(sp) - d1
- * 4(sp) - d2
- * 8(sp) - d3
- * C(sp) - d4
- * 10(sp) - d5
- * 14(sp) - a0
- * 18(sp) - a1
- * 1C(sp) - a2
- * 20(sp) - d0
- * 24(sp) - orig_d0
- * 28(sp) - stack adjustment
- * 2C(sp) - sr
- * 2E(sp) - pc
- * 32(sp) - format & vector
- */
-
-/*
- * 97/05/14 Andreas: Register %a2 is now set to the current task throughout
- * the whole kernel.
- */
-
-/* the following macro is used when enabling interrupts */
-#if defined(MACH_ATARI_ONLY)
- /* block out HSYNC on the atari */
-#define ALLOWINT (~0x400)
-#define MAX_NOINT_IPL 3
-#else
- /* portable version */
-#define ALLOWINT (~0x700)
-#define MAX_NOINT_IPL 0
-#endif /* machine compilation types */
-
-#ifdef __ASSEMBLY__
-
-#define curptr a2
-
-LFLUSH_I_AND_D = 0x00000808
-
-#define SAVE_ALL_INT save_all_int
-#define SAVE_ALL_SYS save_all_sys
-#define RESTORE_ALL restore_all
-/*
- * This defines the normal kernel pt-regs layout.
- *
- * regs a3-a6 and d6-d7 are preserved by C code
- * the kernel doesn't mess with usp unless it needs to
- */
-
-/*
- * a -1 in the orig_d0 field signifies
- * that the stack frame is NOT for syscall
- */
-.macro save_all_int
- clrl %sp@- | stk_adj
- pea -1:w | orig d0
- movel %d0,%sp@- | d0
- moveml %d1-%d5/%a0-%a1/%curptr,%sp@-
-.endm
-
-.macro save_all_sys
- clrl %sp@- | stk_adj
- movel %d0,%sp@- | orig d0
- movel %d0,%sp@- | d0
- moveml %d1-%d5/%a0-%a1/%curptr,%sp@-
-.endm
-
-.macro restore_all
- moveml %sp@+,%a0-%a1/%curptr/%d1-%d5
- movel %sp@+,%d0
- addql #4,%sp | orig d0
- addl %sp@+,%sp | stk adj
- rte
-.endm
-
-#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
-
-#define SAVE_SWITCH_STACK save_switch_stack
-#define RESTORE_SWITCH_STACK restore_switch_stack
-#define GET_CURRENT(tmp) get_current tmp
-
-.macro save_switch_stack
- moveml %a3-%a6/%d6-%d7,%sp@-
-.endm
-
-.macro restore_switch_stack
- moveml %sp@+,%a3-%a6/%d6-%d7
-.endm
-
-.macro get_current reg=%d0
- movel %sp,\reg
- andw #-THREAD_SIZE,\reg
- movel \reg,%curptr
- movel %curptr@,%curptr
-.endm
-
-#else /* C source */
-
-#define STR(X) STR1(X)
-#define STR1(X) #X
-
-#define SAVE_ALL_INT \
- "clrl %%sp@-;" /* stk_adj */ \
- "pea -1:w;" /* orig d0 = -1 */ \
- "movel %%d0,%%sp@-;" /* d0 */ \
- "moveml %%d1-%%d5/%%a0-%%a2,%%sp@-"
-#define GET_CURRENT(tmp) \
- "movel %%sp,"#tmp"\n\t" \
- "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
- "movel "#tmp",%%a2\n\t" \
- "movel %%a2@,%%a2"
-
-#endif
-
-#endif /* __M68K_ENTRY_H */
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
deleted file mode 100644
index 68611e3dbb1..00000000000
--- a/arch/m68k/include/asm/entry_no.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef __M68KNOMMU_ENTRY_H
-#define __M68KNOMMU_ENTRY_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-/*
- * Stack layout in 'ret_from_exception':
- *
- * This allows access to the syscall arguments in registers d1-d5
- *
- * 0(sp) - d1
- * 4(sp) - d2
- * 8(sp) - d3
- * C(sp) - d4
- * 10(sp) - d5
- * 14(sp) - a0
- * 18(sp) - a1
- * 1C(sp) - a2
- * 20(sp) - d0
- * 24(sp) - orig_d0
- * 28(sp) - stack adjustment
- * 2C(sp) - [ sr ] [ format & vector ]
- * 2E(sp) - [ pc-hiword ] [ sr ]
- * 30(sp) - [ pc-loword ] [ pc-hiword ]
- * 32(sp) - [ format & vector ] [ pc-loword ]
- * ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
- * M68K COLDFIRE
- */
-
-#define ALLOWINT (~0x700)
-
-#ifdef __ASSEMBLY__
-
-#define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */
-
-/*
- * This defines the normal kernel pt-regs layout.
- *
- * regs are a2-a6 and d6-d7 preserved by C code
- * the kernel doesn't mess with usp unless it needs to
- */
-
-#ifdef CONFIG_COLDFIRE
-#ifdef CONFIG_COLDFIRE_SW_A7
-/*
- * This is made a little more tricky on older ColdFires. There is no
- * separate supervisor and user stack pointers. Need to artificially
- * construct a usp in software... When doing this we need to disable
- * interrupts, otherwise bad things will happen.
- */
-.globl sw_usp
-.globl sw_ksp
-
-.macro SAVE_ALL
- move #0x2700,%sr /* disable intrs */
- btst #5,%sp@(2) /* from user? */
- bnes 6f /* no, skip */
- movel %sp,sw_usp /* save user sp */
- addql #8,sw_usp /* remove exception */
- movel sw_ksp,%sp /* kernel sp */
- subql #8,%sp /* room for exception */
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- lea %sp@(-32),%sp /* space for 8 regs */
- moveml %d1-%d5/%a0-%a2,%sp@
- movel sw_usp,%a0 /* get usp */
- movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
- movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
- bra 7f
- 6:
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- lea %sp@(-32),%sp /* space for 8 regs */
- moveml %d1-%d5/%a0-%a2,%sp@
- 7:
-.endm
-
-.macro RESTORE_USER
- move #0x2700,%sr /* disable intrs */
- movel sw_usp,%a0 /* get usp */
- movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
- movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- addql #8,%sp /* remove exception */
- movel %sp,sw_ksp /* save ksp */
- subql #8,sw_usp /* set exception */
- movel sw_usp,%sp /* restore usp */
- rte
-.endm
-
-.macro RDUSP
- movel sw_usp,%a3
-.endm
-
-.macro WRUSP
- movel %a3,sw_usp
-.endm
-
-#else /* !CONFIG_COLDFIRE_SW_A7 */
-/*
- * Modern ColdFire parts have separate supervisor and user stack
- * pointers. Simple load and restore macros for this case.
- */
-.macro SAVE_ALL
- move #0x2700,%sr /* disable intrs */
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- lea %sp@(-32),%sp /* space for 8 regs */
- moveml %d1-%d5/%a0-%a2,%sp@
-.endm
-
-.macro RESTORE_USER
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- rte
-.endm
-
-.macro RDUSP
- /*move %usp,%a3*/
- .word 0x4e6b
-.endm
-
-.macro WRUSP
- /*move %a3,%usp*/
- .word 0x4e63
-.endm
-
-#endif /* !CONFIG_COLDFIRE_SW_A7 */
-
-.macro SAVE_SWITCH_STACK
- lea %sp@(-24),%sp /* 6 regs */
- moveml %a3-%a6/%d6-%d7,%sp@
-.endm
-
-.macro RESTORE_SWITCH_STACK
- moveml %sp@,%a3-%a6/%d6-%d7
- lea %sp@(24),%sp /* 6 regs */
-.endm
-
-#else /* !CONFIG_COLDFIRE */
-
-/*
- * Standard 68k interrupt entry and exit macros.
- */
-.macro SAVE_ALL
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- moveml %d1-%d5/%a0-%a2,%sp@-
-.endm
-
-.macro RESTORE_ALL
- moveml %sp@+,%a0-%a2/%d1-%d5
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- rte
-.endm
-
-.macro SAVE_SWITCH_STACK
- moveml %a3-%a6/%d6-%d7,%sp@-
-.endm
-
-.macro RESTORE_SWITCH_STACK
- moveml %sp@+,%a3-%a6/%d6-%d7
-.endm
-
-#endif /* !COLDFIRE_SW_A7 */
-#endif /* __ASSEMBLY__ */
-#endif /* __M68KNOMMU_ENTRY_H */
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index b6bf2c518ba..eda62de7e60 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -90,15 +90,13 @@
#define MCFGPIO_PDDR_FECH 0xFC0A4013
#define MCFGPIO_PDDR_FECL 0xFC0A4014
-#define MCFGPIO_PPDSDR_BUSCTL 0xFC0A401A
-#define MCFGPIO_PPDSDR_BE 0xFC0A401B
-#define MCFGPIO_PPDSDR_CS 0xFC0A401C
-#define MCFGPIO_PPDSDR_FECI2C 0xFC0A401D
-#define MCFGPIO_PPDSDR_QSPI 0xFC0A401E
-#define MCFGPIO_PPDSDR_TIMER 0xFC0A401F
-#define MCFGPIO_PPDSDR_UART 0xFC0A4021
-#define MCFGPIO_PPDSDR_FECH 0xFC0A4021
-#define MCFGPIO_PPDSDR_FECL 0xFC0A4022
+#define MCFGPIO_PPDSDR_CS 0xFC0A401A
+#define MCFGPIO_PPDSDR_FECI2C 0xFC0A401B
+#define MCFGPIO_PPDSDR_QSPI 0xFC0A401C
+#define MCFGPIO_PPDSDR_TIMER 0xFC0A401D
+#define MCFGPIO_PPDSDR_UART 0xFC0A401E
+#define MCFGPIO_PPDSDR_FECH 0xFC0A401F
+#define MCFGPIO_PPDSDR_FECL 0xFC0A4020
#define MCFGPIO_PCLRR_BUSCTL 0xFC0A4024
#define MCFGPIO_PCLRR_BE 0xFC0A4025
@@ -113,11 +111,11 @@
/*
* Generic GPIO support
*/
-#define MCFGPIO_PODR MCFGPIO_PODR_BUSCTL
-#define MCFGPIO_PDDR MCFGPIO_PDDR_BUSCTL
-#define MCFGPIO_PPDR MCFGPIO_PPDSDR_BUSCTL
-#define MCFGPIO_SETR MCFGPIO_PPDSDR_BUSCTL
-#define MCFGPIO_CLRR MCFGPIO_PCLRR_BUSCTL
+#define MCFGPIO_PODR MCFGPIO_PODR_CS
+#define MCFGPIO_PDDR MCFGPIO_PDDR_CS
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_CS
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_CS
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_CS
#define MCFGPIO_PIN_MAX 80
#define MCFGPIO_IRQ_MAX 8
diff --git a/arch/m68k/include/asm/mcfqspi.h b/arch/m68k/include/asm/mcfqspi.h
index 39d90d51111..7fe631972f1 100644
--- a/arch/m68k/include/asm/mcfqspi.h
+++ b/arch/m68k/include/asm/mcfqspi.h
@@ -24,9 +24,11 @@
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
#elif defined(CONFIG_M5249)
-#define MCFQSPI_IOBASE (MCF_MBAR + 0x300)
-#elif defined(CONFIG_M520x) || defined(CONFIG_M532x)
-#define MCFQSPI_IOBASE 0xFC058000
+#define MCFQSPI_IOBASE (MCF_MBAR + 0x300)
+#elif defined(CONFIG_M520x)
+#define MCFQSPI_IOBASE 0xFC05C000
+#elif defined(CONFIG_M532x)
+#define MCFQSPI_IOBASE 0xFC058000
#endif
#define MCFQSPI_IOSIZE 0x40
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 90595721185..a8d1c60eb9c 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -5,6 +5,9 @@
extern unsigned long memory_start;
extern unsigned long memory_end;
+extern unsigned long _rambase;
+extern unsigned long _ramstart;
+extern unsigned long _ramend;
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index d8ef53ac03f..568facf3027 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -135,6 +135,12 @@ do { \
wrusp(_usp); \
} while(0)
+static inline int handle_kernel_fault(struct pt_regs *regs)
+{
+ /* Any fault in kernel is fatal on non-mmu */
+ return 0;
+}
+
#endif
/* Forward declaration, a strange C thing */
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
index d64967ecfec..5277e52715e 100644
--- a/arch/m68k/include/asm/sections.h
+++ b/arch/m68k/include/asm/sections.h
@@ -3,4 +3,6 @@
#include <asm-generic/sections.h>
+extern char _sbss[], _ebss[];
+
#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index c482ebc9dd5..e7f0f2e5ad4 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -1,5 +1,21 @@
-ifdef CONFIG_MMU
-include arch/m68k/kernel/Makefile_mm
-else
-include arch/m68k/kernel/Makefile_no
+#
+# Makefile for the linux kernel.
+#
+
+extra-$(CONFIG_MMU) := head.o
+extra-$(CONFIG_SUN3) := sun3-head.o
+extra-y += vmlinux.lds
+
+obj-y := entry.o m68k_ksyms.o module.o process.o ptrace.o setup.o signal.o \
+ sys_m68k.o syscalltable.o time.o traps.o
+
+obj-$(CONFIG_MMU) += ints.o devres.o vectors.o
+devres-$(CONFIG_MMU) = ../../../kernel/irq/devres.o
+
+ifndef CONFIG_MMU_SUN3
+obj-y += dma.o
endif
+ifndef CONFIG_MMU
+obj-y += init_task.o irq.o
+endif
+
diff --git a/arch/m68k/kernel/Makefile_mm b/arch/m68k/kernel/Makefile_mm
deleted file mode 100644
index aced6780457..00000000000
--- a/arch/m68k/kernel/Makefile_mm
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-ifndef CONFIG_SUN3
- extra-y := head.o
-else
- extra-y := sun3-head.o
-endif
-extra-y += vmlinux.lds
-
-obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
- sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o
-
-devres-y = ../../../kernel/irq/devres.o
-
-obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
diff --git a/arch/m68k/kernel/Makefile_no b/arch/m68k/kernel/Makefile_no
deleted file mode 100644
index 37c3fc074c0..00000000000
--- a/arch/m68k/kernel/Makefile_no
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for arch/m68knommu/kernel.
-#
-
-extra-y := vmlinux.lds
-
-obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
- setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
-
-obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
index 5f0f6b598b5..1b4289061a6 100644
--- a/arch/m68k/kernel/entry_no.S
+++ b/arch/m68k/kernel/entry_no.S
@@ -43,7 +43,7 @@
.globl sys_vfork
ENTRY(buserr)
- SAVE_ALL
+ SAVE_ALL_INT
moveq #-1,%d0
movel %d0,%sp@(PT_OFF_ORIG_D0)
movel %sp,%sp@- /* stack frame pointer argument */
@@ -52,7 +52,7 @@ ENTRY(buserr)
jra ret_from_exception
ENTRY(trap)
- SAVE_ALL
+ SAVE_ALL_INT
moveq #-1,%d0
movel %d0,%sp@(PT_OFF_ORIG_D0)
movel %sp,%sp@- /* stack frame pointer argument */
@@ -64,7 +64,7 @@ ENTRY(trap)
.globl dbginterrupt
ENTRY(dbginterrupt)
- SAVE_ALL
+ SAVE_ALL_INT
moveq #-1,%d0
movel %d0,%sp@(PT_OFF_ORIG_D0)
movel %sp,%sp@- /* stack frame pointer argument */
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 16b2de7f510..2ed8c0fb151 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -36,6 +36,7 @@
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
+#include <asm/sections.h>
unsigned long memory_start;
unsigned long memory_end;
@@ -80,9 +81,6 @@ void (*mach_power_off)(void);
#define CPU_INSTR_PER_JIFFY 16
#endif
-extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
-extern int _ramstart, _ramend;
-
#if defined(CONFIG_UBOOT)
/*
* parse_uboot_commandline
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index c98add3f5f0..89362f2bb56 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1,5 +1,1107 @@
-#ifdef CONFIG_MMU
-#include "traps_mm.c"
+/*
+ * linux/arch/m68k/kernel/traps.c
+ *
+ * Copyright (C) 1993, 1994 by Hamish Macdonald
+ *
+ * 68040 fixes by Michael Rausch
+ * 68040 fixes by Martin Apel
+ * 68040 fixes and writeback by Richard Zidlicky
+ * 68060 fixes by Roman Hodek
+ * 68060 fixes by Jesper Skov
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+#include <asm/pgalloc.h>
+#include <asm/machdep.h>
+#include <asm/siginfo.h>
+
+
+static const char *vec_names[] = {
+ [VEC_RESETSP] = "RESET SP",
+ [VEC_RESETPC] = "RESET PC",
+ [VEC_BUSERR] = "BUS ERROR",
+ [VEC_ADDRERR] = "ADDRESS ERROR",
+ [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
+ [VEC_ZERODIV] = "ZERO DIVIDE",
+ [VEC_CHK] = "CHK",
+ [VEC_TRAP] = "TRAPcc",
+ [VEC_PRIV] = "PRIVILEGE VIOLATION",
+ [VEC_TRACE] = "TRACE",
+ [VEC_LINE10] = "LINE 1010",
+ [VEC_LINE11] = "LINE 1111",
+ [VEC_RESV12] = "UNASSIGNED RESERVED 12",
+ [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
+ [VEC_FORMAT] = "FORMAT ERROR",
+ [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
+ [VEC_RESV16] = "UNASSIGNED RESERVED 16",
+ [VEC_RESV17] = "UNASSIGNED RESERVED 17",
+ [VEC_RESV18] = "UNASSIGNED RESERVED 18",
+ [VEC_RESV19] = "UNASSIGNED RESERVED 19",
+ [VEC_RESV20] = "UNASSIGNED RESERVED 20",
+ [VEC_RESV21] = "UNASSIGNED RESERVED 21",
+ [VEC_RESV22] = "UNASSIGNED RESERVED 22",
+ [VEC_RESV23] = "UNASSIGNED RESERVED 23",
+ [VEC_SPUR] = "SPURIOUS INTERRUPT",
+ [VEC_INT1] = "LEVEL 1 INT",
+ [VEC_INT2] = "LEVEL 2 INT",
+ [VEC_INT3] = "LEVEL 3 INT",
+ [VEC_INT4] = "LEVEL 4 INT",
+ [VEC_INT5] = "LEVEL 5 INT",
+ [VEC_INT6] = "LEVEL 6 INT",
+ [VEC_INT7] = "LEVEL 7 INT",
+ [VEC_SYS] = "SYSCALL",
+ [VEC_TRAP1] = "TRAP #1",
+ [VEC_TRAP2] = "TRAP #2",
+ [VEC_TRAP3] = "TRAP #3",
+ [VEC_TRAP4] = "TRAP #4",
+ [VEC_TRAP5] = "TRAP #5",
+ [VEC_TRAP6] = "TRAP #6",
+ [VEC_TRAP7] = "TRAP #7",
+ [VEC_TRAP8] = "TRAP #8",
+ [VEC_TRAP9] = "TRAP #9",
+ [VEC_TRAP10] = "TRAP #10",
+ [VEC_TRAP11] = "TRAP #11",
+ [VEC_TRAP12] = "TRAP #12",
+ [VEC_TRAP13] = "TRAP #13",
+ [VEC_TRAP14] = "TRAP #14",
+ [VEC_TRAP15] = "TRAP #15",
+ [VEC_FPBRUC] = "FPCP BSUN",
+ [VEC_FPIR] = "FPCP INEXACT",
+ [VEC_FPDIVZ] = "FPCP DIV BY 0",
+ [VEC_FPUNDER] = "FPCP UNDERFLOW",
+ [VEC_FPOE] = "FPCP OPERAND ERROR",
+ [VEC_FPOVER] = "FPCP OVERFLOW",
+ [VEC_FPNAN] = "FPCP SNAN",
+ [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
+ [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
+ [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
+ [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
+ [VEC_RESV59] = "UNASSIGNED RESERVED 59",
+ [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
+ [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
+ [VEC_RESV62] = "UNASSIGNED RESERVED 62",
+ [VEC_RESV63] = "UNASSIGNED RESERVED 63",
+};
+
+static const char *space_names[] = {
+ [0] = "Space 0",
+ [USER_DATA] = "User Data",
+ [USER_PROGRAM] = "User Program",
+#ifndef CONFIG_SUN3
+ [3] = "Space 3",
#else
-#include "traps_no.c"
+ [FC_CONTROL] = "Control",
+#endif
+ [4] = "Space 4",
+ [SUPER_DATA] = "Super Data",
+ [SUPER_PROGRAM] = "Super Program",
+ [CPU_SPACE] = "CPU"
+};
+
+void die_if_kernel(char *,struct pt_regs *,int);
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+int send_fault_sig(struct pt_regs *regs);
+
+asmlinkage void trap_c(struct frame *fp);
+
+#if defined (CONFIG_M68060)
+static inline void access_error060 (struct frame *fp)
+{
+ unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
+
+#ifdef DEBUG
+ printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
+#endif
+
+ if (fslw & MMU060_BPE) {
+ /* branch prediction error -> clear branch cache */
+ __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
+ "orl #0x00400000,%/d0\n\t"
+ "movec %/d0,%/cacr"
+ : : : "d0" );
+ /* return if there's no other error */
+ if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
+ return;
+ }
+
+ if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
+ unsigned long errorcode;
+ unsigned long addr = fp->un.fmt4.effaddr;
+
+ if (fslw & MMU060_MA)
+ addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
+
+ errorcode = 1;
+ if (fslw & MMU060_DESC_ERR) {
+ __flush_tlb040_one(addr);
+ errorcode = 0;
+ }
+ if (fslw & MMU060_W)
+ errorcode |= 2;
+#ifdef DEBUG
+ printk("errorcode = %d\n", errorcode );
+#endif
+ do_page_fault(&fp->ptregs, addr, errorcode);
+ } else if (fslw & (MMU060_SEE)){
+ /* Software Emulation Error.
+ * fault during mem_read/mem_write in ifpsp060/os.S
+ */
+ send_fault_sig(&fp->ptregs);
+ } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
+ send_fault_sig(&fp->ptregs) > 0) {
+ printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
+ printk( "68060 access error, fslw=%lx\n", fslw );
+ trap_c( fp );
+ }
+}
+#endif /* CONFIG_M68060 */
+
+#if defined (CONFIG_M68040)
+static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
+{
+ unsigned long mmusr;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(MAKE_MM_SEG(wbs));
+
+ if (iswrite)
+ asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
+ else
+ asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
+
+ asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
+
+ set_fs(old_fs);
+
+ return mmusr;
+}
+
+static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
+ unsigned long wbd)
+{
+ int res = 0;
+ mm_segment_t old_fs = get_fs();
+
+ /* set_fs can not be moved, otherwise put_user() may oops */
+ set_fs(MAKE_MM_SEG(wbs));
+
+ switch (wbs & WBSIZ_040) {
+ case BA_SIZE_BYTE:
+ res = put_user(wbd & 0xff, (char __user *)wba);
+ break;
+ case BA_SIZE_WORD:
+ res = put_user(wbd & 0xffff, (short __user *)wba);
+ break;
+ case BA_SIZE_LONG:
+ res = put_user(wbd, (int __user *)wba);
+ break;
+ }
+
+ /* set_fs can not be moved, otherwise put_user() may oops */
+ set_fs(old_fs);
+
+
+#ifdef DEBUG
+ printk("do_040writeback1, res=%d\n",res);
+#endif
+
+ return res;
+}
+
+/* after an exception in a writeback the stack frame corresponding
+ * to that exception is discarded, set a few bits in the old frame
+ * to simulate what it should look like
+ */
+static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
+{
+ fp->un.fmt7.faddr = wba;
+ fp->un.fmt7.ssw = wbs & 0xff;
+ if (wba != current->thread.faddr)
+ fp->un.fmt7.ssw |= MA_040;
+}
+
+static inline void do_040writebacks(struct frame *fp)
+{
+ int res = 0;
+#if 0
+ if (fp->un.fmt7.wb1s & WBV_040)
+ printk("access_error040: cannot handle 1st writeback. oops.\n");
+#endif
+
+ if ((fp->un.fmt7.wb2s & WBV_040) &&
+ !(fp->un.fmt7.wb2s & WBTT_040)) {
+ res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
+ fp->un.fmt7.wb2d);
+ if (res)
+ fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
+ else
+ fp->un.fmt7.wb2s = 0;
+ }
+
+ /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
+ if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
+ res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
+ fp->un.fmt7.wb3d);
+ if (res)
+ {
+ fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
+
+ fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
+ fp->un.fmt7.wb3s &= (~WBV_040);
+ fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
+ fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
+ }
+ else
+ fp->un.fmt7.wb3s = 0;
+ }
+
+ if (res)
+ send_fault_sig(&fp->ptregs);
+}
+
+/*
+ * called from sigreturn(), must ensure userspace code didn't
+ * manipulate exception frame to circumvent protection, then complete
+ * pending writebacks
+ * we just clear TM2 to turn it into a userspace access
+ */
+asmlinkage void berr_040cleanup(struct frame *fp)
+{
+ fp->un.fmt7.wb2s &= ~4;
+ fp->un.fmt7.wb3s &= ~4;
+
+ do_040writebacks(fp);
+}
+
+static inline void access_error040(struct frame *fp)
+{
+ unsigned short ssw = fp->un.fmt7.ssw;
+ unsigned long mmusr;
+
+#ifdef DEBUG
+ printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
+ printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
+ fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
+ printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
+ fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
+ fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
+#endif
+
+ if (ssw & ATC_040) {
+ unsigned long addr = fp->un.fmt7.faddr;
+ unsigned long errorcode;
+
+ /*
+ * The MMU status has to be determined AFTER the address
+ * has been corrected if there was a misaligned access (MA).
+ */
+ if (ssw & MA_040)
+ addr = (addr + 7) & -8;
+
+ /* MMU error, get the MMUSR info for this access */
+ mmusr = probe040(!(ssw & RW_040), addr, ssw);
+#ifdef DEBUG
+ printk("mmusr = %lx\n", mmusr);
+#endif
+ errorcode = 1;
+ if (!(mmusr & MMU_R_040)) {
+ /* clear the invalid atc entry */
+ __flush_tlb040_one(addr);
+ errorcode = 0;
+ }
+
+ /* despite what documentation seems to say, RMW
+ * accesses have always both the LK and RW bits set */
+ if (!(ssw & RW_040) || (ssw & LK_040))
+ errorcode |= 2;
+
+ if (do_page_fault(&fp->ptregs, addr, errorcode)) {
+#ifdef DEBUG
+ printk("do_page_fault() !=0\n");
+#endif
+ if (user_mode(&fp->ptregs)){
+ /* delay writebacks after signal delivery */
+#ifdef DEBUG
+ printk(".. was usermode - return\n");
+#endif
+ return;
+ }
+ /* disable writeback into user space from kernel
+ * (if do_page_fault didn't fix the mapping,
+ * the writeback won't do good)
+ */
+disable_wb:
+#ifdef DEBUG
+ printk(".. disabling wb2\n");
+#endif
+ if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
+ fp->un.fmt7.wb2s &= ~WBV_040;
+ if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
+ fp->un.fmt7.wb3s &= ~WBV_040;
+ }
+ } else {
+ /* In case of a bus error we either kill the process or expect
+ * the kernel to catch the fault, which then is also responsible
+ * for cleaning up the mess.
+ */
+ current->thread.signo = SIGBUS;
+ current->thread.faddr = fp->un.fmt7.faddr;
+ if (send_fault_sig(&fp->ptregs) >= 0)
+ printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
+ fp->un.fmt7.faddr);
+ goto disable_wb;
+ }
+
+ do_040writebacks(fp);
+}
+#endif /* CONFIG_M68040 */
+
+#if defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+
+extern int mmu_emu_handle_fault (unsigned long, int, int);
+
+/* sun3 version of bus_error030 */
+
+static inline void bus_error030 (struct frame *fp)
+{
+ unsigned char buserr_type = sun3_get_buserr ();
+ unsigned long addr, errorcode;
+ unsigned short ssw = fp->un.fmtb.ssw;
+ extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
+
+#ifdef DEBUG
+ if (ssw & (FC | FB))
+ printk ("Instruction fault at %#010lx\n",
+ ssw & FC ?
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+ :
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+ if (ssw & DF)
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+#endif
+
+ /*
+ * Check if this page should be demand-mapped. This needs to go before
+ * the testing for a bad kernel-space access (demand-mapping applies
+ * to kernel accesses too).
+ */
+
+ if ((ssw & DF)
+ && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
+ if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
+ return;
+ }
+
+ /* Check for kernel-space pagefault (BAD). */
+ if (fp->ptregs.sr & PS_S) {
+ /* kernel fault must be a data fault to user space */
+ if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
+ // try checking the kernel mappings before surrender
+ if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
+ return;
+ /* instruction fault or kernel data fault! */
+ if (ssw & (FC | FB))
+ printk ("Instruction fault at %#010lx\n",
+ fp->ptregs.pc);
+ if (ssw & DF) {
+ /* was this fault incurred testing bus mappings? */
+ if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
+ (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
+ send_fault_sig(&fp->ptregs);
+ return;
+ }
+
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+ }
+ printk ("BAD KERNEL BUSERR\n");
+
+ die_if_kernel("Oops", &fp->ptregs,0);
+ force_sig(SIGKILL, current);
+ return;
+ }
+ } else {
+ /* user fault */
+ if (!(ssw & (FC | FB)) && !(ssw & DF))
+ /* not an instruction fault or data fault! BAD */
+ panic ("USER BUSERR w/o instruction or data fault");
+ }
+
+
+ /* First handle the data fault, if any. */
+ if (ssw & DF) {
+ addr = fp->un.fmtb.daddr;
+
+// errorcode bit 0: 0 -> no page 1 -> protection fault
+// errorcode bit 1: 0 -> read fault 1 -> write fault
+
+// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
+// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
+
+ if (buserr_type & SUN3_BUSERR_PROTERR)
+ errorcode = 0x01;
+ else if (buserr_type & SUN3_BUSERR_INVALID)
+ errorcode = 0x00;
+ else {
+#ifdef DEBUG
+ printk ("*** unexpected busfault type=%#04x\n", buserr_type);
+ printk ("invalid %s access at %#lx from pc %#lx\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc);
+#endif
+ die_if_kernel ("Oops", &fp->ptregs, buserr_type);
+ force_sig (SIGBUS, current);
+ return;
+ }
+
+//todo: wtf is RM bit? --m
+ if (!(ssw & RW) || ssw & RM)
+ errorcode |= 0x02;
+
+ /* Handle page fault. */
+ do_page_fault (&fp->ptregs, addr, errorcode);
+
+ /* Retry the data fault now. */
+ return;
+ }
+
+ /* Now handle the instruction fault. */
+
+ /* Get the fault address. */
+ if (fp->ptregs.format == 0xA)
+ addr = fp->ptregs.pc + 4;
+ else
+ addr = fp->un.fmtb.baddr;
+ if (ssw & FC)
+ addr -= 2;
+
+ if (buserr_type & SUN3_BUSERR_INVALID) {
+ if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
+ do_page_fault (&fp->ptregs, addr, 0);
+ } else {
+#ifdef DEBUG
+ printk ("protection fault on insn access (segv).\n");
+#endif
+ force_sig (SIGSEGV, current);
+ }
+}
+#else
+#if defined(CPU_M68020_OR_M68030)
+static inline void bus_error030 (struct frame *fp)
+{
+ volatile unsigned short temp;
+ unsigned short mmusr;
+ unsigned long addr, errorcode;
+ unsigned short ssw = fp->un.fmtb.ssw;
+#ifdef DEBUG
+ unsigned long desc;
+
+ printk ("pid = %x ", current->pid);
+ printk ("SSW=%#06x ", ssw);
+
+ if (ssw & (FC | FB))
+ printk ("Instruction fault at %#010lx\n",
+ ssw & FC ?
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+ :
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+ if (ssw & DF)
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+#endif
+
+ /* ++andreas: If a data fault and an instruction fault happen
+ at the same time map in both pages. */
+
+ /* First handle the data fault, if any. */
+ if (ssw & DF) {
+ addr = fp->un.fmtb.daddr;
+
+#ifdef DEBUG
+ asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+ "pmove %%psr,%1@"
+ : "=a&" (desc)
+ : "a" (&temp), "a" (addr), "d" (ssw));
+#else
+ asm volatile ("ptestr %2,%1@,#7\n\t"
+ "pmove %%psr,%0@"
+ : : "a" (&temp), "a" (addr), "d" (ssw));
+#endif
+ mmusr = temp;
+
+#ifdef DEBUG
+ printk("mmusr is %#x for addr %#lx in task %p\n",
+ mmusr, addr, current);
+ printk("descriptor address is %#lx, contents %#lx\n",
+ __va(desc), *(unsigned long *)__va(desc));
+#endif
+
+ errorcode = (mmusr & MMU_I) ? 0 : 1;
+ if (!(ssw & RW) || (ssw & RM))
+ errorcode |= 2;
+
+ if (mmusr & (MMU_I | MMU_WP)) {
+ if (ssw & 4) {
+ printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+ goto buserr;
+ }
+ /* Don't try to do anything further if an exception was
+ handled. */
+ if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
+ return;
+ } else if (!(mmusr & MMU_I)) {
+ /* probably a 020 cas fault */
+ if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
+ printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
+ } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+ printk("invalid %s access at %#lx from pc %#lx\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc);
+ die_if_kernel("Oops",&fp->ptregs,mmusr);
+ force_sig(SIGSEGV, current);
+ return;
+ } else {
+#if 0
+ static volatile long tlong;
+#endif
+
+ printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc, ssw);
+ asm volatile ("ptestr #1,%1@,#0\n\t"
+ "pmove %%psr,%0@"
+ : /* no outputs */
+ : "a" (&temp), "a" (addr));
+ mmusr = temp;
+
+ printk ("level 0 mmusr is %#x\n", mmusr);
+#if 0
+ asm volatile ("pmove %%tt0,%0@"
+ : /* no outputs */
+ : "a" (&tlong));
+ printk("tt0 is %#lx, ", tlong);
+ asm volatile ("pmove %%tt1,%0@"
+ : /* no outputs */
+ : "a" (&tlong));
+ printk("tt1 is %#lx\n", tlong);
+#endif
+#ifdef DEBUG
+ printk("Unknown SIGSEGV - 1\n");
+#endif
+ die_if_kernel("Oops",&fp->ptregs,mmusr);
+ force_sig(SIGSEGV, current);
+ return;
+ }
+
+ /* setup an ATC entry for the access about to be retried */
+ if (!(ssw & RW) || (ssw & RM))
+ asm volatile ("ploadw %1,%0@" : /* no outputs */
+ : "a" (addr), "d" (ssw));
+ else
+ asm volatile ("ploadr %1,%0@" : /* no outputs */
+ : "a" (addr), "d" (ssw));
+ }
+
+ /* Now handle the instruction fault. */
+
+ if (!(ssw & (FC|FB)))
+ return;
+
+ if (fp->ptregs.sr & PS_S) {
+ printk("Instruction fault at %#010lx\n",
+ fp->ptregs.pc);
+ buserr:
+ printk ("BAD KERNEL BUSERR\n");
+ die_if_kernel("Oops",&fp->ptregs,0);
+ force_sig(SIGKILL, current);
+ return;
+ }
+
+ /* get the fault address */
+ if (fp->ptregs.format == 10)
+ addr = fp->ptregs.pc + 4;
+ else
+ addr = fp->un.fmtb.baddr;
+ if (ssw & FC)
+ addr -= 2;
+
+ if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
+ /* Insn fault on same page as data fault. But we
+ should still create the ATC entry. */
+ goto create_atc_entry;
+
+#ifdef DEBUG
+ asm volatile ("ptestr #1,%2@,#7,%0\n\t"
+ "pmove %%psr,%1@"
+ : "=a&" (desc)
+ : "a" (&temp), "a" (addr));
+#else
+ asm volatile ("ptestr #1,%1@,#7\n\t"
+ "pmove %%psr,%0@"
+ : : "a" (&temp), "a" (addr));
+#endif
+ mmusr = temp;
+
+#ifdef DEBUG
+ printk ("mmusr is %#x for addr %#lx in task %p\n",
+ mmusr, addr, current);
+ printk ("descriptor address is %#lx, contents %#lx\n",
+ __va(desc), *(unsigned long *)__va(desc));
+#endif
+
+ if (mmusr & MMU_I)
+ do_page_fault (&fp->ptregs, addr, 0);
+ else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+ printk ("invalid insn access at %#lx from pc %#lx\n",
+ addr, fp->ptregs.pc);
+#ifdef DEBUG
+ printk("Unknown SIGSEGV - 2\n");
+#endif
+ die_if_kernel("Oops",&fp->ptregs,mmusr);
+ force_sig(SIGSEGV, current);
+ return;
+ }
+
+create_atc_entry:
+ /* setup an ATC entry for the access about to be retried */
+ asm volatile ("ploadr #2,%0@" : /* no outputs */
+ : "a" (addr));
+}
+#endif /* CPU_M68020_OR_M68030 */
+#endif /* !CONFIG_SUN3 */
+
+asmlinkage void buserr_c(struct frame *fp)
+{
+ /* Only set esp0 if coming from user mode */
+ if (user_mode(&fp->ptregs))
+ current->thread.esp0 = (unsigned long) fp;
+
+#ifdef DEBUG
+ printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
+#endif
+
+ switch (fp->ptregs.format) {
+#if defined (CONFIG_M68060)
+ case 4: /* 68060 access error */
+ access_error060 (fp);
+ break;
+#endif
+#if defined (CONFIG_M68040)
+ case 0x7: /* 68040 access error */
+ access_error040 (fp);
+ break;
+#endif
+#if defined (CPU_M68020_OR_M68030)
+ case 0xa:
+ case 0xb:
+ bus_error030 (fp);
+ break;
+#endif
+ default:
+ die_if_kernel("bad frame format",&fp->ptregs,0);
+#ifdef DEBUG
+ printk("Unknown SIGSEGV - 4\n");
+#endif
+ force_sig(SIGSEGV, current);
+ }
+}
+
+
+static int kstack_depth_to_print = 48;
+
+void show_trace(unsigned long *stack)
+{
+ unsigned long *endstack;
+ unsigned long addr;
+ int i;
+
+ printk("Call Trace:");
+ addr = (unsigned long)stack + THREAD_SIZE - 1;
+ endstack = (unsigned long *)(addr & -THREAD_SIZE);
+ i = 0;
+ while (stack + 1 <= endstack) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+ if (i % 5 == 0)
+ printk("\n ");
+#endif
+ printk(" [<%08lx>] %pS\n", addr, (void *)addr);
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ struct frame *fp = (struct frame *)regs;
+ mm_segment_t old_fs = get_fs();
+ u16 c, *cp;
+ unsigned long addr;
+ int i;
+
+ print_modules();
+ printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
+ printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
+ printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
+ regs->d0, regs->d1, regs->d2, regs->d3);
+ printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
+ regs->d4, regs->d5, regs->a0, regs->a1);
+
+ printk("Process %s (pid: %d, task=%p)\n",
+ current->comm, task_pid_nr(current), current);
+ addr = (unsigned long)&fp->un;
+ printk("Frame format=%X ", regs->format);
+ switch (regs->format) {
+ case 0x2:
+ printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
+ addr += sizeof(fp->un.fmt2);
+ break;
+ case 0x3:
+ printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
+ addr += sizeof(fp->un.fmt3);
+ break;
+ case 0x4:
+ printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
+ : "eff addr=%08lx pc=%08lx\n"),
+ fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+ addr += sizeof(fp->un.fmt4);
+ break;
+ case 0x7:
+ printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
+ fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
+ printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
+ fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
+ printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
+ fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
+ printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
+ fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
+ printk("push data: %08lx %08lx %08lx %08lx\n",
+ fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
+ fp->un.fmt7.pd3);
+ addr += sizeof(fp->un.fmt7);
+ break;
+ case 0x9:
+ printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
+ addr += sizeof(fp->un.fmt9);
+ break;
+ case 0xa:
+ printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+ fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
+ fp->un.fmta.daddr, fp->un.fmta.dobuf);
+ addr += sizeof(fp->un.fmta);
+ break;
+ case 0xb:
+ printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+ fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
+ fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
+ printk("baddr=%08lx dibuf=%08lx ver=%x\n",
+ fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
+ addr += sizeof(fp->un.fmtb);
+ break;
+ default:
+ printk("\n");
+ }
+ show_stack(NULL, (unsigned long *)addr);
+
+ printk("Code:");
+ set_fs(KERNEL_DS);
+ cp = (u16 *)regs->pc;
+ for (i = -8; i < 16; i++) {
+ if (get_user(c, cp + i) && i >= 0) {
+ printk(" Bad PC value.");
+ break;
+ }
+ printk(i ? " %04x" : " <%04x>", c);
+ }
+ set_fs(old_fs);
+ printk ("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *p;
+ unsigned long *endstack;
+ int i;
+
+ if (!stack) {
+ if (task)
+ stack = (unsigned long *)task->thread.esp0;
+ else
+ stack = (unsigned long *)&stack;
+ }
+ endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
+
+ printk("Stack from %08lx:", (unsigned long)stack);
+ p = stack;
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (p + 1 > endstack)
+ break;
+ if (i % 8 == 0)
+ printk("\n ");
+ printk(" %08lx", *p++);
+ }
+ printk("\n");
+ show_trace(stack);
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_trace(&stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+/*
+ * The vector number returned in the frame pointer may also contain
+ * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
+ * 2 bits, and upper 2 bits. So we need to mask out the real vector
+ * number before using it in comparisons. You don't need to do this on
+ * real 68k parts, but it won't hurt either.
+ */
+
+void bad_super_trap (struct frame *fp)
+{
+ int vector = (fp->ptregs.vector >> 2) & 0xff;
+
+ console_verbose();
+ if (vector < ARRAY_SIZE(vec_names))
+ printk ("*** %s *** FORMAT=%X\n",
+ vec_names[vector],
+ fp->ptregs.format);
+ else
+ printk ("*** Exception %d *** FORMAT=%X\n",
+ vector, fp->ptregs.format);
+ if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
+ unsigned short ssw = fp->un.fmtb.ssw;
+
+ printk ("SSW=%#06x ", ssw);
+
+ if (ssw & RC)
+ printk ("Pipe stage C instruction fault at %#010lx\n",
+ (fp->ptregs.format) == 0xA ?
+ fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
+ if (ssw & RB)
+ printk ("Pipe stage B instruction fault at %#010lx\n",
+ (fp->ptregs.format) == 0xA ?
+ fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+ if (ssw & DF)
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr, space_names[ssw & DFC],
+ fp->ptregs.pc);
+ }
+ printk ("Current process id is %d\n", task_pid_nr(current));
+ die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
+}
+
+asmlinkage void trap_c(struct frame *fp)
+{
+ int sig;
+ int vector = (fp->ptregs.vector >> 2) & 0xff;
+ siginfo_t info;
+
+ if (fp->ptregs.sr & PS_S) {
+ if (vector == VEC_TRACE) {
+ /* traced a trapping instruction on a 68020/30,
+ * real exception will be executed afterwards.
+ */
+ } else if (!handle_kernel_fault(&fp->ptregs))
+ bad_super_trap(fp);
+ return;
+ }
+
+ /* send the appropriate signal to the user program */
+ switch (vector) {
+ case VEC_ADDRERR:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ break;
+ case VEC_ILLEGAL:
+ case VEC_LINE10:
+ case VEC_LINE11:
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ break;
+ case VEC_PRIV:
+ info.si_code = ILL_PRVOPC;
+ sig = SIGILL;
+ break;
+ case VEC_COPROC:
+ info.si_code = ILL_COPROC;
+ sig = SIGILL;
+ break;
+ case VEC_TRAP1:
+ case VEC_TRAP2:
+ case VEC_TRAP3:
+ case VEC_TRAP4:
+ case VEC_TRAP5:
+ case VEC_TRAP6:
+ case VEC_TRAP7:
+ case VEC_TRAP8:
+ case VEC_TRAP9:
+ case VEC_TRAP10:
+ case VEC_TRAP11:
+ case VEC_TRAP12:
+ case VEC_TRAP13:
+ case VEC_TRAP14:
+ info.si_code = ILL_ILLTRP;
+ sig = SIGILL;
+ break;
+ case VEC_FPBRUC:
+ case VEC_FPOE:
+ case VEC_FPNAN:
+ info.si_code = FPE_FLTINV;
+ sig = SIGFPE;
+ break;
+ case VEC_FPIR:
+ info.si_code = FPE_FLTRES;
+ sig = SIGFPE;
+ break;
+ case VEC_FPDIVZ:
+ info.si_code = FPE_FLTDIV;
+ sig = SIGFPE;
+ break;
+ case VEC_FPUNDER:
+ info.si_code = FPE_FLTUND;
+ sig = SIGFPE;
+ break;
+ case VEC_FPOVER:
+ info.si_code = FPE_FLTOVF;
+ sig = SIGFPE;
+ break;
+ case VEC_ZERODIV:
+ info.si_code = FPE_INTDIV;
+ sig = SIGFPE;
+ break;
+ case VEC_CHK:
+ case VEC_TRAP:
+ info.si_code = FPE_INTOVF;
+ sig = SIGFPE;
+ break;
+ case VEC_TRACE: /* ptrace single step */
+ info.si_code = TRAP_TRACE;
+ sig = SIGTRAP;
+ break;
+ case VEC_TRAP15: /* breakpoint */
+ info.si_code = TRAP_BRKPT;
+ sig = SIGTRAP;
+ break;
+ default:
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ break;
+ }
+ info.si_signo = sig;
+ info.si_errno = 0;
+ switch (fp->ptregs.format) {
+ default:
+ info.si_addr = (void *) fp->ptregs.pc;
+ break;
+ case 2:
+ info.si_addr = (void *) fp->un.fmt2.iaddr;
+ break;
+ case 7:
+ info.si_addr = (void *) fp->un.fmt7.effaddr;
+ break;
+ case 9:
+ info.si_addr = (void *) fp->un.fmt9.iaddr;
+ break;
+ case 10:
+ info.si_addr = (void *) fp->un.fmta.daddr;
+ break;
+ case 11:
+ info.si_addr = (void *) fp->un.fmtb.daddr;
+ break;
+ }
+ force_sig_info (sig, &info, current);
+}
+
+void die_if_kernel (char *str, struct pt_regs *fp, int nr)
+{
+ if (!(fp->sr & PS_S))
+ return;
+
+ console_verbose();
+ printk("%s: %08x\n",str,nr);
+ show_registers(fp);
+ add_taint(TAINT_DIE);
+ do_exit(SIGSEGV);
+}
+
+asmlinkage void set_esp0(unsigned long ssp)
+{
+ current->thread.esp0 = ssp;
+}
+
+/*
+ * This function is called if an error occur while accessing
+ * user-space from the fpsp040 code.
+ */
+asmlinkage void fpsp040_die(void)
+{
+ do_exit(SIGSEGV);
+}
+
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpemu_signal(int signal, int code, void *addr)
+{
+ siginfo_t info;
+
+ info.si_signo = signal;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ force_sig_info(signal, &info, current);
+}
#endif
diff --git a/arch/m68k/kernel/traps_mm.c b/arch/m68k/kernel/traps_mm.c
deleted file mode 100644
index 4022bbc2887..00000000000
--- a/arch/m68k/kernel/traps_mm.c
+++ /dev/null
@@ -1,1207 +0,0 @@
-/*
- * linux/arch/m68k/kernel/traps.c
- *
- * Copyright (C) 1993, 1994 by Hamish Macdonald
- *
- * 68040 fixes by Michael Rausch
- * 68040 fixes by Martin Apel
- * 68040 fixes and writeback by Richard Zidlicky
- * 68060 fixes by Roman Hodek
- * 68060 fixes by Jesper Skov
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * Sets up all exception vectors
- */
-
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/user.h>
-#include <linux/string.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/kallsyms.h>
-
-#include <asm/setup.h>
-#include <asm/fpu.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/traps.h>
-#include <asm/pgalloc.h>
-#include <asm/machdep.h>
-#include <asm/siginfo.h>
-
-/* assembler routines */
-asmlinkage void system_call(void);
-asmlinkage void buserr(void);
-asmlinkage void trap(void);
-asmlinkage void nmihandler(void);
-#ifdef CONFIG_M68KFPU_EMU
-asmlinkage void fpu_emu(void);
-#endif
-
-e_vector vectors[256];
-
-/* nmi handler for the Amiga */
-asm(".text\n"
- __ALIGN_STR "\n"
- "nmihandler: rte");
-
-/*
- * this must be called very early as the kernel might
- * use some instruction that are emulated on the 060
- * and so we're prepared for early probe attempts (e.g. nf_init).
- */
-void __init base_trap_init(void)
-{
- if (MACH_IS_SUN3X) {
- extern e_vector *sun3x_prom_vbr;
-
- __asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
- }
-
- /* setup the exception vector table */
- __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
-
- if (CPU_IS_060) {
- /* set up ISP entry points */
- asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
-
- vectors[VEC_UNIMPII] = unimp_vec;
- }
-
- vectors[VEC_BUSERR] = buserr;
- vectors[VEC_ILLEGAL] = trap;
- vectors[VEC_SYS] = system_call;
-}
-
-void __init trap_init (void)
-{
- int i;
-
- for (i = VEC_SPUR; i <= VEC_INT7; i++)
- vectors[i] = bad_inthandler;
-
- for (i = 0; i < VEC_USER; i++)
- if (!vectors[i])
- vectors[i] = trap;
-
- for (i = VEC_USER; i < 256; i++)
- vectors[i] = bad_inthandler;
-
-#ifdef CONFIG_M68KFPU_EMU
- if (FPU_IS_EMU)
- vectors[VEC_LINE11] = fpu_emu;
-#endif
-
- if (CPU_IS_040 && !FPU_IS_EMU) {
- /* set up FPSP entry points */
- asmlinkage void dz_vec(void) asm ("dz");
- asmlinkage void inex_vec(void) asm ("inex");
- asmlinkage void ovfl_vec(void) asm ("ovfl");
- asmlinkage void unfl_vec(void) asm ("unfl");
- asmlinkage void snan_vec(void) asm ("snan");
- asmlinkage void operr_vec(void) asm ("operr");
- asmlinkage void bsun_vec(void) asm ("bsun");
- asmlinkage void fline_vec(void) asm ("fline");
- asmlinkage void unsupp_vec(void) asm ("unsupp");
-
- vectors[VEC_FPDIVZ] = dz_vec;
- vectors[VEC_FPIR] = inex_vec;
- vectors[VEC_FPOVER] = ovfl_vec;
- vectors[VEC_FPUNDER] = unfl_vec;
- vectors[VEC_FPNAN] = snan_vec;
- vectors[VEC_FPOE] = operr_vec;
- vectors[VEC_FPBRUC] = bsun_vec;
- vectors[VEC_LINE11] = fline_vec;
- vectors[VEC_FPUNSUP] = unsupp_vec;
- }
-
- if (CPU_IS_060 && !FPU_IS_EMU) {
- /* set up IFPSP entry points */
- asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
- asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
- asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
- asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
- asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
- asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
- asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
- asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
- asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
-
- vectors[VEC_FPNAN] = snan_vec6;
- vectors[VEC_FPOE] = operr_vec6;
- vectors[VEC_FPOVER] = ovfl_vec6;
- vectors[VEC_FPUNDER] = unfl_vec6;
- vectors[VEC_FPDIVZ] = dz_vec6;
- vectors[VEC_FPIR] = inex_vec6;
- vectors[VEC_LINE11] = fline_vec6;
- vectors[VEC_FPUNSUP] = unsupp_vec6;
- vectors[VEC_UNIMPEA] = effadd_vec6;
- }
-
- /* if running on an amiga, make the NMI interrupt do nothing */
- if (MACH_IS_AMIGA) {
- vectors[VEC_INT7] = nmihandler;
- }
-}
-
-
-static const char *vec_names[] = {
- [VEC_RESETSP] = "RESET SP",
- [VEC_RESETPC] = "RESET PC",
- [VEC_BUSERR] = "BUS ERROR",
- [VEC_ADDRERR] = "ADDRESS ERROR",
- [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
- [VEC_ZERODIV] = "ZERO DIVIDE",
- [VEC_CHK] = "CHK",
- [VEC_TRAP] = "TRAPcc",
- [VEC_PRIV] = "PRIVILEGE VIOLATION",
- [VEC_TRACE] = "TRACE",
- [VEC_LINE10] = "LINE 1010",
- [VEC_LINE11] = "LINE 1111",
- [VEC_RESV12] = "UNASSIGNED RESERVED 12",
- [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
- [VEC_FORMAT] = "FORMAT ERROR",
- [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
- [VEC_RESV16] = "UNASSIGNED RESERVED 16",
- [VEC_RESV17] = "UNASSIGNED RESERVED 17",
- [VEC_RESV18] = "UNASSIGNED RESERVED 18",
- [VEC_RESV19] = "UNASSIGNED RESERVED 19",
- [VEC_RESV20] = "UNASSIGNED RESERVED 20",
- [VEC_RESV21] = "UNASSIGNED RESERVED 21",
- [VEC_RESV22] = "UNASSIGNED RESERVED 22",
- [VEC_RESV23] = "UNASSIGNED RESERVED 23",
- [VEC_SPUR] = "SPURIOUS INTERRUPT",
- [VEC_INT1] = "LEVEL 1 INT",
- [VEC_INT2] = "LEVEL 2 INT",
- [VEC_INT3] = "LEVEL 3 INT",
- [VEC_INT4] = "LEVEL 4 INT",
- [VEC_INT5] = "LEVEL 5 INT",
- [VEC_INT6] = "LEVEL 6 INT",
- [VEC_INT7] = "LEVEL 7 INT",
- [VEC_SYS] = "SYSCALL",
- [VEC_TRAP1] = "TRAP #1",
- [VEC_TRAP2] = "TRAP #2",
- [VEC_TRAP3] = "TRAP #3",
- [VEC_TRAP4] = "TRAP #4",
- [VEC_TRAP5] = "TRAP #5",
- [VEC_TRAP6] = "TRAP #6",
- [VEC_TRAP7] = "TRAP #7",
- [VEC_TRAP8] = "TRAP #8",
- [VEC_TRAP9] = "TRAP #9",
- [VEC_TRAP10] = "TRAP #10",
- [VEC_TRAP11] = "TRAP #11",
- [VEC_TRAP12] = "TRAP #12",
- [VEC_TRAP13] = "TRAP #13",
- [VEC_TRAP14] = "TRAP #14",
- [VEC_TRAP15] = "TRAP #15",
- [VEC_FPBRUC] = "FPCP BSUN",
- [VEC_FPIR] = "FPCP INEXACT",
- [VEC_FPDIVZ] = "FPCP DIV BY 0",
- [VEC_FPUNDER] = "FPCP UNDERFLOW",
- [VEC_FPOE] = "FPCP OPERAND ERROR",
- [VEC_FPOVER] = "FPCP OVERFLOW",
- [VEC_FPNAN] = "FPCP SNAN",
- [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
- [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
- [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
- [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
- [VEC_RESV59] = "UNASSIGNED RESERVED 59",
- [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
- [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
- [VEC_RESV62] = "UNASSIGNED RESERVED 62",
- [VEC_RESV63] = "UNASSIGNED RESERVED 63",
-};
-
-static const char *space_names[] = {
- [0] = "Space 0",
- [USER_DATA] = "User Data",
- [USER_PROGRAM] = "User Program",
-#ifndef CONFIG_SUN3
- [3] = "Space 3",
-#else
- [FC_CONTROL] = "Control",
-#endif
- [4] = "Space 4",
- [SUPER_DATA] = "Super Data",
- [SUPER_PROGRAM] = "Super Program",
- [CPU_SPACE] = "CPU"
-};
-
-void die_if_kernel(char *,struct pt_regs *,int);
-asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
-int send_fault_sig(struct pt_regs *regs);
-
-asmlinkage void trap_c(struct frame *fp);
-
-#if defined (CONFIG_M68060)
-static inline void access_error060 (struct frame *fp)
-{
- unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
-
-#ifdef DEBUG
- printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
-#endif
-
- if (fslw & MMU060_BPE) {
- /* branch prediction error -> clear branch cache */
- __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
- "orl #0x00400000,%/d0\n\t"
- "movec %/d0,%/cacr"
- : : : "d0" );
- /* return if there's no other error */
- if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
- return;
- }
-
- if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
- unsigned long errorcode;
- unsigned long addr = fp->un.fmt4.effaddr;
-
- if (fslw & MMU060_MA)
- addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
-
- errorcode = 1;
- if (fslw & MMU060_DESC_ERR) {
- __flush_tlb040_one(addr);
- errorcode = 0;
- }
- if (fslw & MMU060_W)
- errorcode |= 2;
-#ifdef DEBUG
- printk("errorcode = %d\n", errorcode );
-#endif
- do_page_fault(&fp->ptregs, addr, errorcode);
- } else if (fslw & (MMU060_SEE)){
- /* Software Emulation Error.
- * fault during mem_read/mem_write in ifpsp060/os.S
- */
- send_fault_sig(&fp->ptregs);
- } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
- send_fault_sig(&fp->ptregs) > 0) {
- printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
- printk( "68060 access error, fslw=%lx\n", fslw );
- trap_c( fp );
- }
-}
-#endif /* CONFIG_M68060 */
-
-#if defined (CONFIG_M68040)
-static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
-{
- unsigned long mmusr;
- mm_segment_t old_fs = get_fs();
-
- set_fs(MAKE_MM_SEG(wbs));
-
- if (iswrite)
- asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
- else
- asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
-
- asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
-
- set_fs(old_fs);
-
- return mmusr;
-}
-
-static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
- unsigned long wbd)
-{
- int res = 0;
- mm_segment_t old_fs = get_fs();
-
- /* set_fs can not be moved, otherwise put_user() may oops */
- set_fs(MAKE_MM_SEG(wbs));
-
- switch (wbs & WBSIZ_040) {
- case BA_SIZE_BYTE:
- res = put_user(wbd & 0xff, (char __user *)wba);
- break;
- case BA_SIZE_WORD:
- res = put_user(wbd & 0xffff, (short __user *)wba);
- break;
- case BA_SIZE_LONG:
- res = put_user(wbd, (int __user *)wba);
- break;
- }
-
- /* set_fs can not be moved, otherwise put_user() may oops */
- set_fs(old_fs);
-
-
-#ifdef DEBUG
- printk("do_040writeback1, res=%d\n",res);
-#endif
-
- return res;
-}
-
-/* after an exception in a writeback the stack frame corresponding
- * to that exception is discarded, set a few bits in the old frame
- * to simulate what it should look like
- */
-static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
-{
- fp->un.fmt7.faddr = wba;
- fp->un.fmt7.ssw = wbs & 0xff;
- if (wba != current->thread.faddr)
- fp->un.fmt7.ssw |= MA_040;
-}
-
-static inline void do_040writebacks(struct frame *fp)
-{
- int res = 0;
-#if 0
- if (fp->un.fmt7.wb1s & WBV_040)
- printk("access_error040: cannot handle 1st writeback. oops.\n");
-#endif
-
- if ((fp->un.fmt7.wb2s & WBV_040) &&
- !(fp->un.fmt7.wb2s & WBTT_040)) {
- res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
- fp->un.fmt7.wb2d);
- if (res)
- fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
- else
- fp->un.fmt7.wb2s = 0;
- }
-
- /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
- if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
- res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
- fp->un.fmt7.wb3d);
- if (res)
- {
- fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
-
- fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
- fp->un.fmt7.wb3s &= (~WBV_040);
- fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
- fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
- }
- else
- fp->un.fmt7.wb3s = 0;
- }
-
- if (res)
- send_fault_sig(&fp->ptregs);
-}
-
-/*
- * called from sigreturn(), must ensure userspace code didn't
- * manipulate exception frame to circumvent protection, then complete
- * pending writebacks
- * we just clear TM2 to turn it into a userspace access
- */
-asmlinkage void berr_040cleanup(struct frame *fp)
-{
- fp->un.fmt7.wb2s &= ~4;
- fp->un.fmt7.wb3s &= ~4;
-
- do_040writebacks(fp);
-}
-
-static inline void access_error040(struct frame *fp)
-{
- unsigned short ssw = fp->un.fmt7.ssw;
- unsigned long mmusr;
-
-#ifdef DEBUG
- printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
- printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
- fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
- printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
- fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
- fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
-#endif
-
- if (ssw & ATC_040) {
- unsigned long addr = fp->un.fmt7.faddr;
- unsigned long errorcode;
-
- /*
- * The MMU status has to be determined AFTER the address
- * has been corrected if there was a misaligned access (MA).
- */
- if (ssw & MA_040)
- addr = (addr + 7) & -8;
-
- /* MMU error, get the MMUSR info for this access */
- mmusr = probe040(!(ssw & RW_040), addr, ssw);
-#ifdef DEBUG
- printk("mmusr = %lx\n", mmusr);
-#endif
- errorcode = 1;
- if (!(mmusr & MMU_R_040)) {
- /* clear the invalid atc entry */
- __flush_tlb040_one(addr);
- errorcode = 0;
- }
-
- /* despite what documentation seems to say, RMW
- * accesses have always both the LK and RW bits set */
- if (!(ssw & RW_040) || (ssw & LK_040))
- errorcode |= 2;
-
- if (do_page_fault(&fp->ptregs, addr, errorcode)) {
-#ifdef DEBUG
- printk("do_page_fault() !=0\n");
-#endif
- if (user_mode(&fp->ptregs)){
- /* delay writebacks after signal delivery */
-#ifdef DEBUG
- printk(".. was usermode - return\n");
-#endif
- return;
- }
- /* disable writeback into user space from kernel
- * (if do_page_fault didn't fix the mapping,
- * the writeback won't do good)
- */
-disable_wb:
-#ifdef DEBUG
- printk(".. disabling wb2\n");
-#endif
- if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
- fp->un.fmt7.wb2s &= ~WBV_040;
- if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
- fp->un.fmt7.wb3s &= ~WBV_040;
- }
- } else {
- /* In case of a bus error we either kill the process or expect
- * the kernel to catch the fault, which then is also responsible
- * for cleaning up the mess.
- */
- current->thread.signo = SIGBUS;
- current->thread.faddr = fp->un.fmt7.faddr;
- if (send_fault_sig(&fp->ptregs) >= 0)
- printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
- fp->un.fmt7.faddr);
- goto disable_wb;
- }
-
- do_040writebacks(fp);
-}
-#endif /* CONFIG_M68040 */
-
-#if defined(CONFIG_SUN3)
-#include <asm/sun3mmu.h>
-
-extern int mmu_emu_handle_fault (unsigned long, int, int);
-
-/* sun3 version of bus_error030 */
-
-static inline void bus_error030 (struct frame *fp)
-{
- unsigned char buserr_type = sun3_get_buserr ();
- unsigned long addr, errorcode;
- unsigned short ssw = fp->un.fmtb.ssw;
- extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
-
-#ifdef DEBUG
- if (ssw & (FC | FB))
- printk ("Instruction fault at %#010lx\n",
- ssw & FC ?
- fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
- :
- fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
- if (ssw & DF)
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
- ssw & RW ? "read" : "write",
- fp->un.fmtb.daddr,
- space_names[ssw & DFC], fp->ptregs.pc);
-#endif
-
- /*
- * Check if this page should be demand-mapped. This needs to go before
- * the testing for a bad kernel-space access (demand-mapping applies
- * to kernel accesses too).
- */
-
- if ((ssw & DF)
- && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
- if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
- return;
- }
-
- /* Check for kernel-space pagefault (BAD). */
- if (fp->ptregs.sr & PS_S) {
- /* kernel fault must be a data fault to user space */
- if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
- // try checking the kernel mappings before surrender
- if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
- return;
- /* instruction fault or kernel data fault! */
- if (ssw & (FC | FB))
- printk ("Instruction fault at %#010lx\n",
- fp->ptregs.pc);
- if (ssw & DF) {
- /* was this fault incurred testing bus mappings? */
- if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
- (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
- send_fault_sig(&fp->ptregs);
- return;
- }
-
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
- ssw & RW ? "read" : "write",
- fp->un.fmtb.daddr,
- space_names[ssw & DFC], fp->ptregs.pc);
- }
- printk ("BAD KERNEL BUSERR\n");
-
- die_if_kernel("Oops", &fp->ptregs,0);
- force_sig(SIGKILL, current);
- return;
- }
- } else {
- /* user fault */
- if (!(ssw & (FC | FB)) && !(ssw & DF))
- /* not an instruction fault or data fault! BAD */
- panic ("USER BUSERR w/o instruction or data fault");
- }
-
-
- /* First handle the data fault, if any. */
- if (ssw & DF) {
- addr = fp->un.fmtb.daddr;
-
-// errorcode bit 0: 0 -> no page 1 -> protection fault
-// errorcode bit 1: 0 -> read fault 1 -> write fault
-
-// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
-// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
-
- if (buserr_type & SUN3_BUSERR_PROTERR)
- errorcode = 0x01;
- else if (buserr_type & SUN3_BUSERR_INVALID)
- errorcode = 0x00;
- else {
-#ifdef DEBUG
- printk ("*** unexpected busfault type=%#04x\n", buserr_type);
- printk ("invalid %s access at %#lx from pc %#lx\n",
- !(ssw & RW) ? "write" : "read", addr,
- fp->ptregs.pc);
-#endif
- die_if_kernel ("Oops", &fp->ptregs, buserr_type);
- force_sig (SIGBUS, current);
- return;
- }
-
-//todo: wtf is RM bit? --m
- if (!(ssw & RW) || ssw & RM)
- errorcode |= 0x02;
-
- /* Handle page fault. */
- do_page_fault (&fp->ptregs, addr, errorcode);
-
- /* Retry the data fault now. */
- return;
- }
-
- /* Now handle the instruction fault. */
-
- /* Get the fault address. */
- if (fp->ptregs.format == 0xA)
- addr = fp->ptregs.pc + 4;
- else
- addr = fp->un.fmtb.baddr;
- if (ssw & FC)
- addr -= 2;
-
- if (buserr_type & SUN3_BUSERR_INVALID) {
- if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
- do_page_fault (&fp->ptregs, addr, 0);
- } else {
-#ifdef DEBUG
- printk ("protection fault on insn access (segv).\n");
-#endif
- force_sig (SIGSEGV, current);
- }
-}
-#else
-#if defined(CPU_M68020_OR_M68030)
-static inline void bus_error030 (struct frame *fp)
-{
- volatile unsigned short temp;
- unsigned short mmusr;
- unsigned long addr, errorcode;
- unsigned short ssw = fp->un.fmtb.ssw;
-#ifdef DEBUG
- unsigned long desc;
-
- printk ("pid = %x ", current->pid);
- printk ("SSW=%#06x ", ssw);
-
- if (ssw & (FC | FB))
- printk ("Instruction fault at %#010lx\n",
- ssw & FC ?
- fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
- :
- fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
- if (ssw & DF)
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
- ssw & RW ? "read" : "write",
- fp->un.fmtb.daddr,
- space_names[ssw & DFC], fp->ptregs.pc);
-#endif
-
- /* ++andreas: If a data fault and an instruction fault happen
- at the same time map in both pages. */
-
- /* First handle the data fault, if any. */
- if (ssw & DF) {
- addr = fp->un.fmtb.daddr;
-
-#ifdef DEBUG
- asm volatile ("ptestr %3,%2@,#7,%0\n\t"
- "pmove %%psr,%1@"
- : "=a&" (desc)
- : "a" (&temp), "a" (addr), "d" (ssw));
-#else
- asm volatile ("ptestr %2,%1@,#7\n\t"
- "pmove %%psr,%0@"
- : : "a" (&temp), "a" (addr), "d" (ssw));
-#endif
- mmusr = temp;
-
-#ifdef DEBUG
- printk("mmusr is %#x for addr %#lx in task %p\n",
- mmusr, addr, current);
- printk("descriptor address is %#lx, contents %#lx\n",
- __va(desc), *(unsigned long *)__va(desc));
-#endif
-
- errorcode = (mmusr & MMU_I) ? 0 : 1;
- if (!(ssw & RW) || (ssw & RM))
- errorcode |= 2;
-
- if (mmusr & (MMU_I | MMU_WP)) {
- if (ssw & 4) {
- printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
- ssw & RW ? "read" : "write",
- fp->un.fmtb.daddr,
- space_names[ssw & DFC], fp->ptregs.pc);
- goto buserr;
- }
- /* Don't try to do anything further if an exception was
- handled. */
- if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
- return;
- } else if (!(mmusr & MMU_I)) {
- /* probably a 020 cas fault */
- if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
- printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
- } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
- printk("invalid %s access at %#lx from pc %#lx\n",
- !(ssw & RW) ? "write" : "read", addr,
- fp->ptregs.pc);
- die_if_kernel("Oops",&fp->ptregs,mmusr);
- force_sig(SIGSEGV, current);
- return;
- } else {
-#if 0
- static volatile long tlong;
-#endif
-
- printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
- !(ssw & RW) ? "write" : "read", addr,
- fp->ptregs.pc, ssw);
- asm volatile ("ptestr #1,%1@,#0\n\t"
- "pmove %%psr,%0@"
- : /* no outputs */
- : "a" (&temp), "a" (addr));
- mmusr = temp;
-
- printk ("level 0 mmusr is %#x\n", mmusr);
-#if 0
- asm volatile ("pmove %%tt0,%0@"
- : /* no outputs */
- : "a" (&tlong));
- printk("tt0 is %#lx, ", tlong);
- asm volatile ("pmove %%tt1,%0@"
- : /* no outputs */
- : "a" (&tlong));
- printk("tt1 is %#lx\n", tlong);
-#endif
-#ifdef DEBUG
- printk("Unknown SIGSEGV - 1\n");
-#endif
- die_if_kernel("Oops",&fp->ptregs,mmusr);
- force_sig(SIGSEGV, current);
- return;
- }
-
- /* setup an ATC entry for the access about to be retried */
- if (!(ssw & RW) || (ssw & RM))
- asm volatile ("ploadw %1,%0@" : /* no outputs */
- : "a" (addr), "d" (ssw));
- else
- asm volatile ("ploadr %1,%0@" : /* no outputs */
- : "a" (addr), "d" (ssw));
- }
-
- /* Now handle the instruction fault. */
-
- if (!(ssw & (FC|FB)))
- return;
-
- if (fp->ptregs.sr & PS_S) {
- printk("Instruction fault at %#010lx\n",
- fp->ptregs.pc);
- buserr:
- printk ("BAD KERNEL BUSERR\n");
- die_if_kernel("Oops",&fp->ptregs,0);
- force_sig(SIGKILL, current);
- return;
- }
-
- /* get the fault address */
- if (fp->ptregs.format == 10)
- addr = fp->ptregs.pc + 4;
- else
- addr = fp->un.fmtb.baddr;
- if (ssw & FC)
- addr -= 2;
-
- if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
- /* Insn fault on same page as data fault. But we
- should still create the ATC entry. */
- goto create_atc_entry;
-
-#ifdef DEBUG
- asm volatile ("ptestr #1,%2@,#7,%0\n\t"
- "pmove %%psr,%1@"
- : "=a&" (desc)
- : "a" (&temp), "a" (addr));
-#else
- asm volatile ("ptestr #1,%1@,#7\n\t"
- "pmove %%psr,%0@"
- : : "a" (&temp), "a" (addr));
-#endif
- mmusr = temp;
-
-#ifdef DEBUG
- printk ("mmusr is %#x for addr %#lx in task %p\n",
- mmusr, addr, current);
- printk ("descriptor address is %#lx, contents %#lx\n",
- __va(desc), *(unsigned long *)__va(desc));
-#endif
-
- if (mmusr & MMU_I)
- do_page_fault (&fp->ptregs, addr, 0);
- else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
- printk ("invalid insn access at %#lx from pc %#lx\n",
- addr, fp->ptregs.pc);
-#ifdef DEBUG
- printk("Unknown SIGSEGV - 2\n");
-#endif
- die_if_kernel("Oops",&fp->ptregs,mmusr);
- force_sig(SIGSEGV, current);
- return;
- }
-
-create_atc_entry:
- /* setup an ATC entry for the access about to be retried */
- asm volatile ("ploadr #2,%0@" : /* no outputs */
- : "a" (addr));
-}
-#endif /* CPU_M68020_OR_M68030 */
-#endif /* !CONFIG_SUN3 */
-
-asmlinkage void buserr_c(struct frame *fp)
-{
- /* Only set esp0 if coming from user mode */
- if (user_mode(&fp->ptregs))
- current->thread.esp0 = (unsigned long) fp;
-
-#ifdef DEBUG
- printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
-#endif
-
- switch (fp->ptregs.format) {
-#if defined (CONFIG_M68060)
- case 4: /* 68060 access error */
- access_error060 (fp);
- break;
-#endif
-#if defined (CONFIG_M68040)
- case 0x7: /* 68040 access error */
- access_error040 (fp);
- break;
-#endif
-#if defined (CPU_M68020_OR_M68030)
- case 0xa:
- case 0xb:
- bus_error030 (fp);
- break;
-#endif
- default:
- die_if_kernel("bad frame format",&fp->ptregs,0);
-#ifdef DEBUG
- printk("Unknown SIGSEGV - 4\n");
-#endif
- force_sig(SIGSEGV, current);
- }
-}
-
-
-static int kstack_depth_to_print = 48;
-
-void show_trace(unsigned long *stack)
-{
- unsigned long *endstack;
- unsigned long addr;
- int i;
-
- printk("Call Trace:");
- addr = (unsigned long)stack + THREAD_SIZE - 1;
- endstack = (unsigned long *)(addr & -THREAD_SIZE);
- i = 0;
- while (stack + 1 <= endstack) {
- addr = *stack++;
- /*
- * If the address is either in the text segment of the
- * kernel, or in the region which contains vmalloc'ed
- * memory, it *may* be the address of a calling
- * routine; if so, print it so that someone tracing
- * down the cause of the crash will be able to figure
- * out the call path that was taken.
- */
- if (__kernel_text_address(addr)) {
-#ifndef CONFIG_KALLSYMS
- if (i % 5 == 0)
- printk("\n ");
-#endif
- printk(" [<%08lx>] %pS\n", addr, (void *)addr);
- i++;
- }
- }
- printk("\n");
-}
-
-void show_registers(struct pt_regs *regs)
-{
- struct frame *fp = (struct frame *)regs;
- mm_segment_t old_fs = get_fs();
- u16 c, *cp;
- unsigned long addr;
- int i;
-
- print_modules();
- printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
- printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
- printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
- regs->d0, regs->d1, regs->d2, regs->d3);
- printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
- regs->d4, regs->d5, regs->a0, regs->a1);
-
- printk("Process %s (pid: %d, task=%p)\n",
- current->comm, task_pid_nr(current), current);
- addr = (unsigned long)&fp->un;
- printk("Frame format=%X ", regs->format);
- switch (regs->format) {
- case 0x2:
- printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
- addr += sizeof(fp->un.fmt2);
- break;
- case 0x3:
- printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
- addr += sizeof(fp->un.fmt3);
- break;
- case 0x4:
- printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
- : "eff addr=%08lx pc=%08lx\n"),
- fp->un.fmt4.effaddr, fp->un.fmt4.pc);
- addr += sizeof(fp->un.fmt4);
- break;
- case 0x7:
- printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
- fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
- printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
- fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
- printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
- fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
- printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
- fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
- printk("push data: %08lx %08lx %08lx %08lx\n",
- fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
- fp->un.fmt7.pd3);
- addr += sizeof(fp->un.fmt7);
- break;
- case 0x9:
- printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
- addr += sizeof(fp->un.fmt9);
- break;
- case 0xa:
- printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
- fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
- fp->un.fmta.daddr, fp->un.fmta.dobuf);
- addr += sizeof(fp->un.fmta);
- break;
- case 0xb:
- printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
- fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
- fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
- printk("baddr=%08lx dibuf=%08lx ver=%x\n",
- fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
- addr += sizeof(fp->un.fmtb);
- break;
- default:
- printk("\n");
- }
- show_stack(NULL, (unsigned long *)addr);
-
- printk("Code:");
- set_fs(KERNEL_DS);
- cp = (u16 *)regs->pc;
- for (i = -8; i < 16; i++) {
- if (get_user(c, cp + i) && i >= 0) {
- printk(" Bad PC value.");
- break;
- }
- printk(i ? " %04x" : " <%04x>", c);
- }
- set_fs(old_fs);
- printk ("\n");
-}
-
-void show_stack(struct task_struct *task, unsigned long *stack)
-{
- unsigned long *p;
- unsigned long *endstack;
- int i;
-
- if (!stack) {
- if (task)
- stack = (unsigned long *)task->thread.esp0;
- else
- stack = (unsigned long *)&stack;
- }
- endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
-
- printk("Stack from %08lx:", (unsigned long)stack);
- p = stack;
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (p + 1 > endstack)
- break;
- if (i % 8 == 0)
- printk("\n ");
- printk(" %08lx", *p++);
- }
- printk("\n");
- show_trace(stack);
-}
-
-/*
- * The architecture-independent backtrace generator
- */
-void dump_stack(void)
-{
- unsigned long stack;
-
- show_trace(&stack);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-void bad_super_trap (struct frame *fp)
-{
- console_verbose();
- if (fp->ptregs.vector < 4 * ARRAY_SIZE(vec_names))
- printk ("*** %s *** FORMAT=%X\n",
- vec_names[(fp->ptregs.vector) >> 2],
- fp->ptregs.format);
- else
- printk ("*** Exception %d *** FORMAT=%X\n",
- (fp->ptregs.vector) >> 2,
- fp->ptregs.format);
- if (fp->ptregs.vector >> 2 == VEC_ADDRERR && CPU_IS_020_OR_030) {
- unsigned short ssw = fp->un.fmtb.ssw;
-
- printk ("SSW=%#06x ", ssw);
-
- if (ssw & RC)
- printk ("Pipe stage C instruction fault at %#010lx\n",
- (fp->ptregs.format) == 0xA ?
- fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
- if (ssw & RB)
- printk ("Pipe stage B instruction fault at %#010lx\n",
- (fp->ptregs.format) == 0xA ?
- fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
- if (ssw & DF)
- printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
- ssw & RW ? "read" : "write",
- fp->un.fmtb.daddr, space_names[ssw & DFC],
- fp->ptregs.pc);
- }
- printk ("Current process id is %d\n", task_pid_nr(current));
- die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
-}
-
-asmlinkage void trap_c(struct frame *fp)
-{
- int sig;
- siginfo_t info;
-
- if (fp->ptregs.sr & PS_S) {
- if (fp->ptregs.vector == VEC_TRACE << 2) {
- /* traced a trapping instruction on a 68020/30,
- * real exception will be executed afterwards.
- */
- } else if (!handle_kernel_fault(&fp->ptregs))
- bad_super_trap(fp);
- return;
- }
-
- /* send the appropriate signal to the user program */
- switch ((fp->ptregs.vector) >> 2) {
- case VEC_ADDRERR:
- info.si_code = BUS_ADRALN;
- sig = SIGBUS;
- break;
- case VEC_ILLEGAL:
- case VEC_LINE10:
- case VEC_LINE11:
- info.si_code = ILL_ILLOPC;
- sig = SIGILL;
- break;
- case VEC_PRIV:
- info.si_code = ILL_PRVOPC;
- sig = SIGILL;
- break;
- case VEC_COPROC:
- info.si_code = ILL_COPROC;
- sig = SIGILL;
- break;
- case VEC_TRAP1:
- case VEC_TRAP2:
- case VEC_TRAP3:
- case VEC_TRAP4:
- case VEC_TRAP5:
- case VEC_TRAP6:
- case VEC_TRAP7:
- case VEC_TRAP8:
- case VEC_TRAP9:
- case VEC_TRAP10:
- case VEC_TRAP11:
- case VEC_TRAP12:
- case VEC_TRAP13:
- case VEC_TRAP14:
- info.si_code = ILL_ILLTRP;
- sig = SIGILL;
- break;
- case VEC_FPBRUC:
- case VEC_FPOE:
- case VEC_FPNAN:
- info.si_code = FPE_FLTINV;
- sig = SIGFPE;
- break;
- case VEC_FPIR:
- info.si_code = FPE_FLTRES;
- sig = SIGFPE;
- break;
- case VEC_FPDIVZ:
- info.si_code = FPE_FLTDIV;
- sig = SIGFPE;
- break;
- case VEC_FPUNDER:
- info.si_code = FPE_FLTUND;
- sig = SIGFPE;
- break;
- case VEC_FPOVER:
- info.si_code = FPE_FLTOVF;
- sig = SIGFPE;
- break;
- case VEC_ZERODIV:
- info.si_code = FPE_INTDIV;
- sig = SIGFPE;
- break;
- case VEC_CHK:
- case VEC_TRAP:
- info.si_code = FPE_INTOVF;
- sig = SIGFPE;
- break;
- case VEC_TRACE: /* ptrace single step */
- info.si_code = TRAP_TRACE;
- sig = SIGTRAP;
- break;
- case VEC_TRAP15: /* breakpoint */
- info.si_code = TRAP_BRKPT;
- sig = SIGTRAP;
- break;
- default:
- info.si_code = ILL_ILLOPC;
- sig = SIGILL;
- break;
- }
- info.si_signo = sig;
- info.si_errno = 0;
- switch (fp->ptregs.format) {
- default:
- info.si_addr = (void *) fp->ptregs.pc;
- break;
- case 2:
- info.si_addr = (void *) fp->un.fmt2.iaddr;
- break;
- case 7:
- info.si_addr = (void *) fp->un.fmt7.effaddr;
- break;
- case 9:
- info.si_addr = (void *) fp->un.fmt9.iaddr;
- break;
- case 10:
- info.si_addr = (void *) fp->un.fmta.daddr;
- break;
- case 11:
- info.si_addr = (void *) fp->un.fmtb.daddr;
- break;
- }
- force_sig_info (sig, &info, current);
-}
-
-void die_if_kernel (char *str, struct pt_regs *fp, int nr)
-{
- if (!(fp->sr & PS_S))
- return;
-
- console_verbose();
- printk("%s: %08x\n",str,nr);
- show_registers(fp);
- add_taint(TAINT_DIE);
- do_exit(SIGSEGV);
-}
-
-/*
- * This function is called if an error occur while accessing
- * user-space from the fpsp040 code.
- */
-asmlinkage void fpsp040_die(void)
-{
- do_exit(SIGSEGV);
-}
-
-#ifdef CONFIG_M68KFPU_EMU
-asmlinkage void fpemu_signal(int signal, int code, void *addr)
-{
- siginfo_t info;
-
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = code;
- info.si_addr = addr;
- force_sig_info(signal, &info, current);
-}
-#endif
diff --git a/arch/m68k/kernel/traps_no.c b/arch/m68k/kernel/traps_no.c
deleted file mode 100644
index e67b8c80695..00000000000
--- a/arch/m68k/kernel/traps_no.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * linux/arch/m68knommu/kernel/traps.c
- *
- * Copyright (C) 1993, 1994 by Hamish Macdonald
- *
- * 68040 fixes by Michael Rausch
- * 68040 fixes by Martin Apel
- * 68060 fixes by Roman Hodek
- * 68060 fixes by Jesper Skov
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * Sets up all exception vectors
- */
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/user.h>
-#include <linux/string.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/kallsyms.h>
-
-#include <asm/setup.h>
-#include <asm/fpu.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/traps.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-#include <asm/siginfo.h>
-
-static char const * const vec_names[] = {
- "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
- "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
- "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
- "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
- "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
- "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
- "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
- "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
- "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
- "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
- "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
- "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
- "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
- "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
- "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
- "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
- "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
- "FPCP UNSUPPORTED OPERATION",
- "MMU CONFIGURATION ERROR"
-};
-
-void die_if_kernel(char *str, struct pt_regs *fp, int nr)
-{
- if (!(fp->sr & PS_S))
- return;
-
- console_verbose();
- printk(KERN_EMERG "%s: %08x\n",str,nr);
- printk(KERN_EMERG "PC: [<%08lx>]\nSR: %04x SP: %p a2: %08lx\n",
- fp->pc, fp->sr, fp, fp->a2);
- printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
- fp->d0, fp->d1, fp->d2, fp->d3);
- printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
- fp->d4, fp->d5, fp->a0, fp->a1);
-
- printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
- current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
- show_stack(NULL, (unsigned long *)(fp + 1));
- add_taint(TAINT_DIE);
- do_exit(SIGSEGV);
-}
-
-asmlinkage void buserr_c(struct frame *fp)
-{
- /* Only set esp0 if coming from user mode */
- if (user_mode(&fp->ptregs))
- current->thread.esp0 = (unsigned long) fp;
-
-#if defined(DEBUG)
- printk (KERN_DEBUG "*** Bus Error *** Format is %x\n", fp->ptregs.format);
-#endif
-
- die_if_kernel("bad frame format",&fp->ptregs,0);
-#if defined(DEBUG)
- printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
-#endif
- force_sig(SIGSEGV, current);
-}
-
-static void print_this_address(unsigned long addr, int i)
-{
-#ifdef CONFIG_KALLSYMS
- printk(KERN_EMERG " [%08lx] ", addr);
- print_symbol(KERN_CONT "%s\n", addr);
-#else
- if (i % 5)
- printk(KERN_CONT " [%08lx] ", addr);
- else
- printk(KERN_EMERG " [%08lx] ", addr);
- i++;
-#endif
-}
-
-int kstack_depth_to_print = 48;
-
-static void __show_stack(struct task_struct *task, unsigned long *stack)
-{
- unsigned long *endstack, addr;
-#ifdef CONFIG_FRAME_POINTER
- unsigned long *last_stack;
-#endif
- int i;
-
- if (!stack)
- stack = (unsigned long *)task->thread.ksp;
-
- addr = (unsigned long) stack;
- endstack = (unsigned long *) PAGE_ALIGN(addr);
-
- printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (stack + 1 + i > endstack)
- break;
- if (i % 8 == 0)
- printk(KERN_EMERG " ");
- printk(KERN_CONT " %08lx", *(stack + i));
- }
- printk("\n");
- i = 0;
-
-#ifdef CONFIG_FRAME_POINTER
- printk(KERN_EMERG "Call Trace:\n");
-
- last_stack = stack - 1;
- while (stack <= endstack && stack > last_stack) {
-
- addr = *(stack + 1);
- print_this_address(addr, i);
- i++;
-
- last_stack = stack;
- stack = (unsigned long *)*stack;
- }
- printk("\n");
-#else
- printk(KERN_EMERG "Call Trace with CONFIG_FRAME_POINTER disabled:\n");
- while (stack <= endstack) {
- addr = *stack++;
- /*
- * If the address is either in the text segment of the kernel,
- * or in a region which is occupied by a module then it *may*
- * be the address of a calling routine; if so, print it so that
- * someone tracing down the cause of the crash will be able to
- * figure out the call path that was taken.
- */
- if (__kernel_text_address(addr)) {
- print_this_address(addr, i);
- i++;
- }
- }
- printk(KERN_CONT "\n");
-#endif
-}
-
-void bad_super_trap(struct frame *fp)
-{
- int vector = (fp->ptregs.vector >> 2) & 0xff;
-
- console_verbose();
- if (vector < ARRAY_SIZE(vec_names))
- printk (KERN_WARNING "*** %s *** FORMAT=%X\n",
- vec_names[vector],
- fp->ptregs.format);
- else
- printk (KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
- vector,
- fp->ptregs.format);
- printk (KERN_WARNING "Current process id is %d\n", current->pid);
- die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
-}
-
-asmlinkage void trap_c(struct frame *fp)
-{
- int sig;
- int vector = (fp->ptregs.vector >> 2) & 0xff;
- siginfo_t info;
-
- if (fp->ptregs.sr & PS_S) {
- if (vector == VEC_TRACE) {
- /* traced a trapping instruction */
- } else
- bad_super_trap(fp);
- return;
- }
-
- /* send the appropriate signal to the user program */
- switch (vector) {
- case VEC_ADDRERR:
- info.si_code = BUS_ADRALN;
- sig = SIGBUS;
- break;
- case VEC_ILLEGAL:
- case VEC_LINE10:
- case VEC_LINE11:
- info.si_code = ILL_ILLOPC;
- sig = SIGILL;
- break;
- case VEC_PRIV:
- info.si_code = ILL_PRVOPC;
- sig = SIGILL;
- break;
- case VEC_COPROC:
- info.si_code = ILL_COPROC;
- sig = SIGILL;
- break;
- case VEC_TRAP1: /* gdbserver breakpoint */
- fp->ptregs.pc -= 2;
- info.si_code = TRAP_TRACE;
- sig = SIGTRAP;
- break;
- case VEC_TRAP2:
- case VEC_TRAP3:
- case VEC_TRAP4:
- case VEC_TRAP5:
- case VEC_TRAP6:
- case VEC_TRAP7:
- case VEC_TRAP8:
- case VEC_TRAP9:
- case VEC_TRAP10:
- case VEC_TRAP11:
- case VEC_TRAP12:
- case VEC_TRAP13:
- case VEC_TRAP14:
- info.si_code = ILL_ILLTRP;
- sig = SIGILL;
- break;
- case VEC_FPBRUC:
- case VEC_FPOE:
- case VEC_FPNAN:
- info.si_code = FPE_FLTINV;
- sig = SIGFPE;
- break;
- case VEC_FPIR:
- info.si_code = FPE_FLTRES;
- sig = SIGFPE;
- break;
- case VEC_FPDIVZ:
- info.si_code = FPE_FLTDIV;
- sig = SIGFPE;
- break;
- case VEC_FPUNDER:
- info.si_code = FPE_FLTUND;
- sig = SIGFPE;
- break;
- case VEC_FPOVER:
- info.si_code = FPE_FLTOVF;
- sig = SIGFPE;
- break;
- case VEC_ZERODIV:
- info.si_code = FPE_INTDIV;
- sig = SIGFPE;
- break;
- case VEC_CHK:
- case VEC_TRAP:
- info.si_code = FPE_INTOVF;
- sig = SIGFPE;
- break;
- case VEC_TRACE: /* ptrace single step */
- info.si_code = TRAP_TRACE;
- sig = SIGTRAP;
- break;
- case VEC_TRAP15: /* breakpoint */
- info.si_code = TRAP_BRKPT;
- sig = SIGTRAP;
- break;
- default:
- info.si_code = ILL_ILLOPC;
- sig = SIGILL;
- break;
- }
- info.si_signo = sig;
- info.si_errno = 0;
- switch (fp->ptregs.format) {
- default:
- info.si_addr = (void *) fp->ptregs.pc;
- break;
- case 2:
- info.si_addr = (void *) fp->un.fmt2.iaddr;
- break;
- case 7:
- info.si_addr = (void *) fp->un.fmt7.effaddr;
- break;
- case 9:
- info.si_addr = (void *) fp->un.fmt9.iaddr;
- break;
- case 10:
- info.si_addr = (void *) fp->un.fmta.daddr;
- break;
- case 11:
- info.si_addr = (void *) fp->un.fmtb.daddr;
- break;
- }
- force_sig_info (sig, &info, current);
-}
-
-asmlinkage void set_esp0(unsigned long ssp)
-{
- current->thread.esp0 = ssp;
-}
-
-/*
- * The architecture-independent backtrace generator
- */
-void dump_stack(void)
-{
- /*
- * We need frame pointers for this little trick, which works as follows:
- *
- * +------------+ 0x00
- * | Next SP | -> 0x0c
- * +------------+ 0x04
- * | Caller |
- * +------------+ 0x08
- * | Local vars | -> our stack var
- * +------------+ 0x0c
- * | Next SP | -> 0x18, that is what we pass to show_stack()
- * +------------+ 0x10
- * | Caller |
- * +------------+ 0x14
- * | Local vars |
- * +------------+ 0x18
- * | ... |
- * +------------+
- */
-
- unsigned long *stack;
-
- stack = (unsigned long *)&stack;
- stack++;
- __show_stack(current, stack);
-}
-EXPORT_SYMBOL(dump_stack);
-
-void show_stack(struct task_struct *task, unsigned long *stack)
-{
- if (!stack && !task)
- dump_stack();
- else
- __show_stack(task, stack);
-}
diff --git a/arch/m68k/kernel/vectors.c b/arch/m68k/kernel/vectors.c
new file mode 100644
index 00000000000..147b03fbc71
--- /dev/null
+++ b/arch/m68k/kernel/vectors.c
@@ -0,0 +1,145 @@
+/*
+ * vectors.c
+ *
+ * Copyright (C) 1993, 1994 by Hamish Macdonald
+ *
+ * 68040 fixes by Michael Rausch
+ * 68040 fixes by Martin Apel
+ * 68040 fixes and writeback by Richard Zidlicky
+ * 68060 fixes by Roman Hodek
+ * 68060 fixes by Jesper Skov
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+
+/* assembler routines */
+asmlinkage void system_call(void);
+asmlinkage void buserr(void);
+asmlinkage void trap(void);
+asmlinkage void nmihandler(void);
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpu_emu(void);
+#endif
+
+e_vector vectors[256];
+
+/* nmi handler for the Amiga */
+asm(".text\n"
+ __ALIGN_STR "\n"
+ "nmihandler: rte");
+
+/*
+ * this must be called very early as the kernel might
+ * use some instruction that are emulated on the 060
+ * and so we're prepared for early probe attempts (e.g. nf_init).
+ */
+void __init base_trap_init(void)
+{
+ if (MACH_IS_SUN3X) {
+ extern e_vector *sun3x_prom_vbr;
+
+ __asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
+ }
+
+ /* setup the exception vector table */
+ __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
+
+ if (CPU_IS_060) {
+ /* set up ISP entry points */
+ asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
+
+ vectors[VEC_UNIMPII] = unimp_vec;
+ }
+
+ vectors[VEC_BUSERR] = buserr;
+ vectors[VEC_ILLEGAL] = trap;
+ vectors[VEC_SYS] = system_call;
+}
+
+void __init trap_init (void)
+{
+ int i;
+
+ for (i = VEC_SPUR; i <= VEC_INT7; i++)
+ vectors[i] = bad_inthandler;
+
+ for (i = 0; i < VEC_USER; i++)
+ if (!vectors[i])
+ vectors[i] = trap;
+
+ for (i = VEC_USER; i < 256; i++)
+ vectors[i] = bad_inthandler;
+
+#ifdef CONFIG_M68KFPU_EMU
+ if (FPU_IS_EMU)
+ vectors[VEC_LINE11] = fpu_emu;
+#endif
+
+ if (CPU_IS_040 && !FPU_IS_EMU) {
+ /* set up FPSP entry points */
+ asmlinkage void dz_vec(void) asm ("dz");
+ asmlinkage void inex_vec(void) asm ("inex");
+ asmlinkage void ovfl_vec(void) asm ("ovfl");
+ asmlinkage void unfl_vec(void) asm ("unfl");
+ asmlinkage void snan_vec(void) asm ("snan");
+ asmlinkage void operr_vec(void) asm ("operr");
+ asmlinkage void bsun_vec(void) asm ("bsun");
+ asmlinkage void fline_vec(void) asm ("fline");
+ asmlinkage void unsupp_vec(void) asm ("unsupp");
+
+ vectors[VEC_FPDIVZ] = dz_vec;
+ vectors[VEC_FPIR] = inex_vec;
+ vectors[VEC_FPOVER] = ovfl_vec;
+ vectors[VEC_FPUNDER] = unfl_vec;
+ vectors[VEC_FPNAN] = snan_vec;
+ vectors[VEC_FPOE] = operr_vec;
+ vectors[VEC_FPBRUC] = bsun_vec;
+ vectors[VEC_LINE11] = fline_vec;
+ vectors[VEC_FPUNSUP] = unsupp_vec;
+ }
+
+ if (CPU_IS_060 && !FPU_IS_EMU) {
+ /* set up IFPSP entry points */
+ asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
+ asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
+ asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
+ asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
+ asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
+ asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
+ asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
+ asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
+ asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
+
+ vectors[VEC_FPNAN] = snan_vec6;
+ vectors[VEC_FPOE] = operr_vec6;
+ vectors[VEC_FPOVER] = ovfl_vec6;
+ vectors[VEC_FPUNDER] = unfl_vec6;
+ vectors[VEC_FPDIVZ] = dz_vec6;
+ vectors[VEC_FPIR] = inex_vec6;
+ vectors[VEC_LINE11] = fline_vec6;
+ vectors[VEC_FPUNSUP] = unsupp_vec6;
+ vectors[VEC_UNIMPEA] = effadd_vec6;
+ }
+
+ /* if running on an amiga, make the NMI interrupt do nothing */
+ if (MACH_IS_AMIGA) {
+ vectors[VEC_INT7] = nmihandler;
+ }
+}
+
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
index 7dc4087a954..4e238934083 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -77,7 +77,6 @@ SECTIONS {
*(.rodata) *(.rodata.*)
*(__vermagic) /* Kernel version magic */
- *(__markers_strings)
*(.rodata1)
*(.rodata.str1.1)
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index 06488931697..10ca051d56b 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -22,6 +22,15 @@ void *memcpy(void *to, const void *from, size_t n)
from = cfrom;
n--;
}
+#if defined(CONFIG_M68000)
+ if ((long)from & 1) {
+ char *cto = to;
+ const char *cfrom = from;
+ for (; n; n--)
+ *cto++ = *cfrom++;
+ return xto;
+ }
+#endif
if (n > 2 && (long)to & 2) {
short *sto = to;
const short *sfrom = from;
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 900d899f332..f92190c159b 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -370,7 +370,7 @@ int mac_irq_pending(unsigned int irq)
break;
case 4:
if (psc_present)
- psc_irq_pending(irq);
+ return psc_irq_pending(irq);
break;
}
return 0;
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index e023fc6b37e..eb915551de6 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -304,35 +304,41 @@ static void via_write_pram(int offset, __u8 data)
static long via_read_time(void)
{
union {
- __u8 cdata[4];
- long idata;
+ __u8 cdata[4];
+ long idata;
} result, last_result;
- int ct;
+ int count = 1;
+
+ via_pram_command(0x81, &last_result.cdata[3]);
+ via_pram_command(0x85, &last_result.cdata[2]);
+ via_pram_command(0x89, &last_result.cdata[1]);
+ via_pram_command(0x8D, &last_result.cdata[0]);
/*
* The NetBSD guys say to loop until you get the same reading
* twice in a row.
*/
- ct = 0;
- do {
- if (++ct > 10) {
- printk("via_read_time: couldn't get valid time, "
- "last read = 0x%08lx and 0x%08lx\n",
- last_result.idata, result.idata);
- break;
- }
-
- last_result.idata = result.idata;
- result.idata = 0;
-
+ while (1) {
via_pram_command(0x81, &result.cdata[3]);
via_pram_command(0x85, &result.cdata[2]);
via_pram_command(0x89, &result.cdata[1]);
via_pram_command(0x8D, &result.cdata[0]);
- } while (result.idata != last_result.idata);
- return result.idata - RTC_OFFSET;
+ if (result.idata == last_result.idata)
+ return result.idata - RTC_OFFSET;
+
+ if (++count > 10)
+ break;
+
+ last_result.idata = result.idata;
+ }
+
+ pr_err("via_read_time: failed to read a stable value; "
+ "got 0x%08lx then 0x%08lx\n",
+ last_result.idata, result.idata);
+
+ return 0;
}
/*
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 50cd12cf28d..1e33d39ca9a 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -32,6 +32,7 @@
#include <linux/gfp.h>
#include <asm/setup.h>
+#include <asm/sections.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -44,9 +45,6 @@
*/
void *empty_zero_page;
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
@@ -78,8 +76,6 @@ void __init mem_init(void)
{
int codek = 0, datak = 0, initk = 0;
unsigned long tmp;
- extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
- extern unsigned int _ramend, _rambase;
unsigned long len = _ramend - _rambase;
unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */
@@ -95,9 +91,9 @@ void __init mem_init(void)
/* this will put all memory onto the freelists */
totalram_pages = free_all_bootmem();
- codek = (&_etext - &_stext) >> 10;
- datak = (&_ebss - &_sdata) >> 10;
- initk = (&__init_begin - &__init_end) >> 10;
+ codek = (_etext - _stext) >> 10;
+ datak = (_ebss - _sdata) >> 10;
+ initk = (__init_begin - __init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
printk(KERN_INFO "Memory available: %luk/%luk RAM, (%dk kernel code, %dk data)\n",
@@ -129,22 +125,21 @@ void free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
unsigned long addr;
- extern char __init_begin, __init_end;
/*
* The following code should be cool even if these sections
* are not page aligned.
*/
- addr = PAGE_ALIGN((unsigned long)(&__init_begin));
+ addr = PAGE_ALIGN((unsigned long) __init_begin);
/* next to check that the page we free is not a partial page */
- for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
+ for (; addr + PAGE_SIZE < ((unsigned long) __init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
- (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
- (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
+ (addr - PAGE_ALIGN((unsigned long) __init_begin)) >> 10,
+ (int)(PAGE_ALIGN((unsigned long) __init_begin)),
(int)(addr - PAGE_SIZE));
#endif
}
diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/520x/config.c
index 621238f1a21..8a98683f1b1 100644
--- a/arch/m68k/platform/520x/config.c
+++ b/arch/m68k/platform/520x/config.c
@@ -91,9 +91,9 @@ static struct resource m520x_qspi_resources[] = {
},
};
-#define MCFQSPI_CS0 62
-#define MCFQSPI_CS1 63
-#define MCFQSPI_CS2 44
+#define MCFQSPI_CS0 46
+#define MCFQSPI_CS1 47
+#define MCFQSPI_CS2 27
static int m520x_cs_setup(struct mcfqspi_cs_control *cs_control)
{
diff --git a/arch/m68k/platform/520x/gpio.c b/arch/m68k/platform/520x/gpio.c
index d757328563d..9bcc3e4b60c 100644
--- a/arch/m68k/platform/520x/gpio.c
+++ b/arch/m68k/platform/520x/gpio.c
@@ -38,42 +38,6 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
},
{
.gpio_chip = {
- .label = "BUSCTL",
- .request = mcf_gpio_request,
- .free = mcf_gpio_free,
- .direction_input = mcf_gpio_direction_input,
- .direction_output = mcf_gpio_direction_output,
- .get = mcf_gpio_get_value,
- .set = mcf_gpio_set_value_fast,
- .base = 8,
- .ngpio = 4,
- },
- .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL,
- .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL,
- .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
- .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
- .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL,
- },
- {
- .gpio_chip = {
- .label = "BE",
- .request = mcf_gpio_request,
- .free = mcf_gpio_free,
- .direction_input = mcf_gpio_direction_input,
- .direction_output = mcf_gpio_direction_output,
- .get = mcf_gpio_get_value,
- .set = mcf_gpio_set_value_fast,
- .base = 16,
- .ngpio = 4,
- },
- .pddr = (void __iomem *) MCFGPIO_PDDR_BE,
- .podr = (void __iomem *) MCFGPIO_PODR_BE,
- .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE,
- .setr = (void __iomem *) MCFGPIO_PPDSDR_BE,
- .clrr = (void __iomem *) MCFGPIO_PCLRR_BE,
- },
- {
- .gpio_chip = {
.label = "CS",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
@@ -81,7 +45,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 25,
+ .base = 9,
.ngpio = 3,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_CS,
@@ -99,7 +63,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 32,
+ .base = 16,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C,
@@ -117,7 +81,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 40,
+ .base = 24,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_QSPI,
@@ -135,7 +99,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 48,
+ .base = 32,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_TIMER,
@@ -153,7 +117,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 56,
+ .base = 40,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_UART,
@@ -171,7 +135,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 64,
+ .base = 48,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_FECH,
@@ -189,7 +153,7 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = {
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
- .base = 72,
+ .base = 56,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_FECL,
diff --git a/arch/m68k/platform/68328/Makefile b/arch/m68k/platform/68328/Makefile
index 5e5435552d5..e4dfd8fde06 100644
--- a/arch/m68k/platform/68328/Makefile
+++ b/arch/m68k/platform/68328/Makefile
@@ -2,7 +2,10 @@
# Makefile for arch/m68knommu/platform/68328.
#
-head-y = head-$(MODEL).o
+model-y := ram
+model-$(CONFIG_ROMKERNEL) := rom
+
+head-y = head-$(model-y).o
head-$(CONFIG_PILOT) = head-pilot.o
head-$(CONFIG_DRAGEN2) = head-de2.o
diff --git a/arch/m68k/platform/68328/entry.S b/arch/m68k/platform/68328/entry.S
index 293e1eba9ac..5c39b80ed7d 100644
--- a/arch/m68k/platform/68328/entry.S
+++ b/arch/m68k/platform/68328/entry.S
@@ -67,7 +67,7 @@ ret_from_signal:
jra ret_from_exception
ENTRY(system_call)
- SAVE_ALL
+ SAVE_ALL_SYS
/* save top of frame*/
pea %sp@
@@ -129,7 +129,7 @@ Lsignal_return:
* This is the main interrupt handler, responsible for calling process_int()
*/
inthandler1:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -140,7 +140,7 @@ inthandler1:
bra ret_from_interrupt
inthandler2:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -151,7 +151,7 @@ inthandler2:
bra ret_from_interrupt
inthandler3:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -162,7 +162,7 @@ inthandler3:
bra ret_from_interrupt
inthandler4:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -173,7 +173,7 @@ inthandler4:
bra ret_from_interrupt
inthandler5:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -184,7 +184,7 @@ inthandler5:
bra ret_from_interrupt
inthandler6:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -195,7 +195,7 @@ inthandler6:
bra ret_from_interrupt
inthandler7:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
@@ -206,7 +206,7 @@ inthandler7:
bra ret_from_interrupt
inthandler:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
diff --git a/arch/m68k/platform/68360/Makefile b/arch/m68k/platform/68360/Makefile
index cf5af73a578..f6f43438304 100644
--- a/arch/m68k/platform/68360/Makefile
+++ b/arch/m68k/platform/68360/Makefile
@@ -1,10 +1,12 @@
#
# Makefile for arch/m68knommu/platform/68360.
#
+model-y := ram
+model-$(CONFIG_ROMKERNEL) := rom
obj-y := config.o commproc.o entry.o ints.o
extra-y := head.o
-$(obj)/head.o: $(obj)/head-$(MODEL).o
- ln -sf head-$(MODEL).o $(obj)/head.o
+$(obj)/head.o: $(obj)/head-$(model-y).o
+ ln -sf head-$(model-y).o $(obj)/head.o
diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S
index abbb89672ea..aa47d1d4992 100644
--- a/arch/m68k/platform/68360/entry.S
+++ b/arch/m68k/platform/68360/entry.S
@@ -63,7 +63,7 @@ ret_from_signal:
jra ret_from_exception
ENTRY(system_call)
- SAVE_ALL
+ SAVE_ALL_SYS
/* save top of frame*/
pea %sp@
@@ -125,7 +125,7 @@ Lsignal_return:
* This is the main interrupt handler, responsible for calling do_IRQ()
*/
inthandler:
- SAVE_ALL
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and.l #0x3ff, %d0
lsr.l #0x02, %d0
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index bd27242c2f4..3157461a8d1 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -61,7 +61,7 @@ enosys:
bra 1f
ENTRY(system_call)
- SAVE_ALL
+ SAVE_ALL_SYS
move #0x2000,%sr /* enable intrs again */
cmpl #NR_syscalls,%d0
@@ -165,9 +165,7 @@ Lsignal_return:
* sources). Calls up to high level code to do all the work.
*/
ENTRY(inthandler)
- SAVE_ALL
- moveq #-1,%d0
- movel %d0,%sp@(PT_OFF_ORIG_D0)
+ SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */
diff --git a/arch/m68k/q40/README b/arch/m68k/q40/README
index b26d5f55e91..93f4c4cd3c4 100644
--- a/arch/m68k/q40/README
+++ b/arch/m68k/q40/README
@@ -31,7 +31,7 @@ drivers used by the Q40, apart from the very obvious (console etc.):
char/joystick/* # most of this should work, not
# in default config.in
block/q40ide.c # startup for ide
- ide* # see Documentation/ide.txt
+ ide* # see Documentation/ide/ide.txt
floppy.c # normal PC driver, DMA emu in asm/floppy.h
# and arch/m68k/kernel/entry.S
# see drivers/block/README.fd
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 8fbb0ec1023..a569514cf19 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -16,7 +16,7 @@
#define _ASM_MICROBLAZE_DMA_MAPPING_H
/*
- * See Documentation/PCI/PCI-DMA-mapping.txt and
+ * See Documentation/DMA-API-HOWTO.txt and
* Documentation/DMA-API.txt for documentation.
*/
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 213f2d67166..36a133e5ee3 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -304,11 +304,11 @@ asmlinkage void __init mmu_init(void)
/* Map in all of RAM starting at CONFIG_KERNEL_START */
mapin_ram();
-#ifdef HIGHMEM_START_BOOL
- ioremap_base = HIGHMEM_START;
+#ifdef CONFIG_HIGHMEM_START_BOOL
+ ioremap_base = CONFIG_HIGHMEM_START;
#else
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
-#endif /* CONFIG_HIGHMEM */
+#endif /* CONFIG_HIGHMEM_START_BOOL */
ioremap_bot = ioremap_base;
/* Initialize the context management stuff */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 177cdaf8356..4cbc6d8de21 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -24,6 +24,7 @@ config MIPS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select HAVE_ARCH_JUMP_LABEL
+ select IRQ_FORCED_THREADING
menu "Machine selection"
@@ -91,15 +92,8 @@ config BCM47XX
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
- select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
- select SSB
- select SSB_DRIVER_MIPS
- select SSB_DRIVER_EXTIF
- select SSB_EMBEDDED
- select SSB_B43_PCI_BRIDGE if PCI
- select SSB_PCICORE_HOSTMODE if PCI
select GENERIC_GPIO
select SYS_HAS_EARLY_PRINTK
select CFE
@@ -722,6 +716,7 @@ config CAVIUM_OCTEON_SIMULATOR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_HOTPLUG_CPU
select SYS_HAS_CPU_CAVIUM_OCTEON
+ select HOLES_IN_ZONE
help
The Octeon simulator is software performance model of the Cavium
Octeon Processor. It supports simulating Octeon processors on x86
@@ -744,6 +739,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
select ZONE_DMA32
select USB_ARCH_HAS_OHCI
select USB_ARCH_HAS_EHCI
+ select HOLES_IN_ZONE
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
@@ -788,6 +784,7 @@ endchoice
source "arch/mips/alchemy/Kconfig"
source "arch/mips/ath79/Kconfig"
+source "arch/mips/bcm47xx/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
source "arch/mips/jazz/Kconfig"
source "arch/mips/jz4740/Kconfig"
@@ -973,6 +970,9 @@ config ISA_DMA_API
config GENERIC_GPIO
bool
+config HOLES_IN_ZONE
+ bool
+
#
# Endianess selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 3b2c18b1434..f72c48d4804 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype)
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
ret = platform_device_register(&au1xxx_eth0_device);
- if (!ret)
+ if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC0\n");
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 647e518c90b..b86324a4260 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -158,15 +158,21 @@ static void restore_core_regs(void)
void au_sleep(void)
{
- int cpuid = alchemy_get_cputype();
- if (cpuid != ALCHEMY_CPU_UNKNOWN) {
- save_core_regs();
- if (cpuid <= ALCHEMY_CPU_AU1500)
- alchemy_sleep_au1000();
- else if (cpuid <= ALCHEMY_CPU_AU1200)
- alchemy_sleep_au1550();
- restore_core_regs();
+ save_core_regs();
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ alchemy_sleep_au1000();
+ break;
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ alchemy_sleep_au1550();
+ break;
}
+
+ restore_core_regs();
}
#endif /* CONFIG_PM */
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index 596ad00e7f0..463d2c4d944 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
{
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+ disable_irq_nosync(irq);
+
for ( ; bisr; bisr &= bisr - 1)
generic_handle_irq(bcsr_csc_base + __ffs(bisr));
+
+ enable_irq(irq);
}
/* NOTE: both the enable and mask bits must be cleared, otherwise the
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 1dac4f27d33..4a8980027ec 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -23,13 +23,6 @@ void __init board_setup(void)
unsigned long freq0, clksrc, div, pfc;
unsigned short whoami;
- /* Set Config[OD] (disable overlapping bus transaction):
- * This gets rid of a _lot_ of spurious interrupts (especially
- * wrt. IDE); but incurs ~10% performance hit in some
- * cpu-bound applications.
- */
- set_c0_config(1 << 19);
-
bcsr_init(DB1200_BCSR_PHYS_ADDR,
DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
index 03db3daadbd..88c4babfdb5 100644
--- a/arch/mips/ar7/irq.c
+++ b/arch/mips/ar7/irq.c
@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = {
static struct irqaction ar7_cascade_action = {
.handler = no_action,
- .name = "AR7 cascade interrupt"
+ .name = "AR7 cascade interrupt",
+ .flags = IRQF_NO_THREAD,
};
static void __init ar7_irq_init(int base)
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
new file mode 100644
index 00000000000..6210b8d8410
--- /dev/null
+++ b/arch/mips/bcm47xx/Kconfig
@@ -0,0 +1,31 @@
+if BCM47XX
+
+config BCM47XX_SSB
+ bool "SSB Support for Broadcom BCM47XX"
+ select SYS_HAS_CPU_MIPS32_R1
+ select SSB
+ select SSB_DRIVER_MIPS
+ select SSB_DRIVER_EXTIF
+ select SSB_EMBEDDED
+ select SSB_B43_PCI_BRIDGE if PCI
+ select SSB_PCICORE_HOSTMODE if PCI
+ default y
+ help
+ Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
+
+ This will generate an image with support for SSB and MIPS32 R1 instruction set.
+
+config BCM47XX_BCMA
+ bool "BCMA Support for Broadcom BCM47XX"
+ select SYS_HAS_CPU_MIPS32_R2
+ select BCMA
+ select BCMA_HOST_SOC
+ select BCMA_DRIVER_MIPS
+ select BCMA_DRIVER_PCI_HOSTMODE if PCI
+ default y
+ help
+ Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
+
+ This will generate an image with support for BCMA and MIPS32 R2 instruction set.
+
+endif
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 7465e8a72d9..4add17349ff 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,4 +3,5 @@
# under Linux.
#
-obj-y := gpio.o irq.o nvram.o prom.o serial.o setup.o time.o wgt634u.o
+obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o
+obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
diff --git a/arch/mips/bcm47xx/gpio.c b/arch/mips/bcm47xx/gpio.c
index e4a5ee9c972..57b425fd4d4 100644
--- a/arch/mips/bcm47xx/gpio.c
+++ b/arch/mips/bcm47xx/gpio.c
@@ -20,42 +20,82 @@ static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES);
int gpio_request(unsigned gpio, const char *tag)
{
- if (ssb_chipco_available(&ssb_bcm47xx.chipco) &&
- ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
- return -EINVAL;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) &&
+ ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
+ return -EINVAL;
- if (ssb_extif_available(&ssb_bcm47xx.extif) &&
- ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
- return -EINVAL;
+ if (ssb_extif_available(&bcm47xx_bus.ssb.extif) &&
+ ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
+ return -EINVAL;
- if (test_and_set_bit(gpio, gpio_in_use))
- return -EBUSY;
+ if (test_and_set_bit(gpio, gpio_in_use))
+ return -EBUSY;
- return 0;
+ return 0;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ if (gpio >= BCM47XX_CHIPCO_GPIO_LINES)
+ return -EINVAL;
+
+ if (test_and_set_bit(gpio, gpio_in_use))
+ return -EBUSY;
+
+ return 0;
+#endif
+ }
+ return -EINVAL;
}
EXPORT_SYMBOL(gpio_request);
void gpio_free(unsigned gpio)
{
- if (ssb_chipco_available(&ssb_bcm47xx.chipco) &&
- ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
- return;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) &&
+ ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
+ return;
+
+ if (ssb_extif_available(&bcm47xx_bus.ssb.extif) &&
+ ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
+ return;
- if (ssb_extif_available(&ssb_bcm47xx.extif) &&
- ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
+ clear_bit(gpio, gpio_in_use);
return;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ if (gpio >= BCM47XX_CHIPCO_GPIO_LINES)
+ return;
- clear_bit(gpio, gpio_in_use);
+ clear_bit(gpio, gpio_in_use);
+ return;
+#endif
+ }
}
EXPORT_SYMBOL(gpio_free);
int gpio_to_irq(unsigned gpio)
{
- if (ssb_chipco_available(&ssb_bcm47xx.chipco))
- return ssb_mips_irq(ssb_bcm47xx.chipco.dev) + 2;
- else if (ssb_extif_available(&ssb_bcm47xx.extif))
- return ssb_mips_irq(ssb_bcm47xx.extif.dev) + 2;
- else
- return -EINVAL;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco))
+ return ssb_mips_irq(bcm47xx_bus.ssb.chipco.dev) + 2;
+ else if (ssb_extif_available(&bcm47xx_bus.ssb.extif))
+ return ssb_mips_irq(bcm47xx_bus.ssb.extif.dev) + 2;
+ else
+ return -EINVAL;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return bcma_core_mips_irq(bcm47xx_bus.bcma.bus.drv_cc.core) + 2;
+#endif
+ }
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(gpio_to_irq);
diff --git a/arch/mips/bcm47xx/irq.c b/arch/mips/bcm47xx/irq.c
index 325757acd02..8cf3833b2d2 100644
--- a/arch/mips/bcm47xx/irq.c
+++ b/arch/mips/bcm47xx/irq.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
+#include <bcm47xx.h>
void plat_irq_dispatch(void)
{
@@ -51,5 +52,16 @@ void plat_irq_dispatch(void)
void __init arch_init_irq(void)
{
+#ifdef CONFIG_BCM47XX_BCMA
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
+ bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core,
+ BCMA_MIPS_MIPS74K_INTMASK(5), 1 << 31);
+ /*
+ * the kernel reads the timer irq from some register and thinks
+ * it's #5, but we offset it by 2 and route to #7
+ */
+ cp0_compare_irq = 7;
+ }
+#endif
mips_cpu_irq_init();
}
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 54db815bc86..a84e3bb7387 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -26,14 +26,35 @@ static char nvram_buf[NVRAM_SPACE];
/* Probe for NVRAM header */
static void early_nvram_init(void)
{
- struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore;
+#ifdef CONFIG_BCM47XX_SSB
+ struct ssb_mipscore *mcore_ssb;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ struct bcma_drv_cc *bcma_cc;
+#endif
struct nvram_header *header;
int i;
- u32 base, lim, off;
+ u32 base = 0;
+ u32 lim = 0;
+ u32 off;
u32 *src, *dst;
- base = mcore->flash_window;
- lim = mcore->flash_window_size;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ mcore_ssb = &bcm47xx_bus.ssb.mipscore;
+ base = mcore_ssb->flash_window;
+ lim = mcore_ssb->flash_window_size;
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_cc = &bcm47xx_bus.bcma.bus.drv_cc;
+ base = bcma_cc->pflash.window;
+ lim = bcma_cc->pflash.window_size;
+ break;
+#endif
+ }
off = FLASH_MIN;
while (off <= lim) {
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
index 59c11afdb2a..57981e4fe2b 100644
--- a/arch/mips/bcm47xx/serial.c
+++ b/arch/mips/bcm47xx/serial.c
@@ -23,10 +23,11 @@ static struct platform_device uart8250_device = {
},
};
-static int __init uart8250_init(void)
+#ifdef CONFIG_BCM47XX_SSB
+static int __init uart8250_init_ssb(void)
{
int i;
- struct ssb_mipscore *mcore = &(ssb_bcm47xx.mipscore);
+ struct ssb_mipscore *mcore = &(bcm47xx_bus.ssb.mipscore);
memset(&uart8250_data, 0, sizeof(uart8250_data));
@@ -44,6 +45,47 @@ static int __init uart8250_init(void)
}
return platform_device_register(&uart8250_device);
}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static int __init uart8250_init_bcma(void)
+{
+ int i;
+ struct bcma_drv_cc *cc = &(bcm47xx_bus.bcma.bus.drv_cc);
+
+ memset(&uart8250_data, 0, sizeof(uart8250_data));
+
+ for (i = 0; i < cc->nr_serial_ports; i++) {
+ struct plat_serial8250_port *p = &(uart8250_data[i]);
+ struct bcma_serial_port *bcma_port;
+ bcma_port = &(cc->serial_ports[i]);
+
+ p->mapbase = (unsigned int) bcma_port->regs;
+ p->membase = (void *) bcma_port->regs;
+ p->irq = bcma_port->irq + 2;
+ p->uartclk = bcma_port->baud_base;
+ p->regshift = bcma_port->reg_shift;
+ p->iotype = UPIO_MEM;
+ p->flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ }
+ return platform_device_register(&uart8250_device);
+}
+#endif
+
+static int __init uart8250_init(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return uart8250_init_ssb();
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return uart8250_init_bcma();
+#endif
+ }
+ return -EINVAL;
+}
module_init(uart8250_init);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index cfae81571de..17c3d14d7c4 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -29,21 +29,36 @@
#include <linux/types.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
+#include <linux/bcma/bcma_soc.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <bcm47xx.h>
#include <asm/mach-bcm47xx/nvram.h>
-struct ssb_bus ssb_bcm47xx;
-EXPORT_SYMBOL(ssb_bcm47xx);
+union bcm47xx_bus bcm47xx_bus;
+EXPORT_SYMBOL(bcm47xx_bus);
+
+enum bcm47xx_bus_type bcm47xx_bus_type;
+EXPORT_SYMBOL(bcm47xx_bus_type);
static void bcm47xx_machine_restart(char *command)
{
printk(KERN_ALERT "Please stand by while rebooting the system...\n");
local_irq_disable();
/* Set the watchdog timer to reset immediately */
- ssb_watchdog_timer_set(&ssb_bcm47xx, 1);
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1);
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 1);
+ break;
+#endif
+ }
while (1)
cpu_relax();
}
@@ -52,11 +67,23 @@ static void bcm47xx_machine_halt(void)
{
/* Disable interrupts and watchdog and spin forever */
local_irq_disable();
- ssb_watchdog_timer_set(&ssb_bcm47xx, 0);
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0);
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 0);
+ break;
+#endif
+ }
while (1)
cpu_relax();
}
+#ifdef CONFIG_BCM47XX_SSB
#define READ_FROM_NVRAM(_outvar, name, buf) \
if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\
sprom->_outvar = simple_strtoul(buf, NULL, 0);
@@ -247,7 +274,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
return 0;
}
-void __init plat_mem_setup(void)
+static void __init bcm47xx_register_ssb(void)
{
int err;
char buf[100];
@@ -258,12 +285,12 @@ void __init plat_mem_setup(void)
printk(KERN_WARNING "bcm47xx: someone else already registered"
" a ssb SPROM callback handler (err %d)\n", err);
- err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE,
+ err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE,
bcm47xx_get_invariants);
if (err)
panic("Failed to initialize SSB bus (err %d)\n", err);
- mcore = &ssb_bcm47xx.mipscore;
+ mcore = &bcm47xx_bus.ssb.mipscore;
if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
if (strstr(buf, "console=ttyS1")) {
struct ssb_serial_port port;
@@ -276,8 +303,57 @@ void __init plat_mem_setup(void)
memcpy(&mcore->serial_ports[1], &port, sizeof(port));
}
}
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static void __init bcm47xx_register_bcma(void)
+{
+ int err;
+
+ err = bcma_host_soc_register(&bcm47xx_bus.bcma);
+ if (err)
+ panic("Failed to initialize BCMA bus (err %d)\n", err);
+}
+#endif
+
+void __init plat_mem_setup(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (c->cputype == CPU_74K) {
+ printk(KERN_INFO "bcm47xx: using bcma bus\n");
+#ifdef CONFIG_BCM47XX_BCMA
+ bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
+ bcm47xx_register_bcma();
+#endif
+ } else {
+ printk(KERN_INFO "bcm47xx: using ssb bus\n");
+#ifdef CONFIG_BCM47XX_SSB
+ bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB;
+ bcm47xx_register_ssb();
+#endif
+ }
_machine_restart = bcm47xx_machine_restart;
_machine_halt = bcm47xx_machine_halt;
pm_power_off = bcm47xx_machine_halt;
}
+
+static int __init bcm47xx_register_bus_complete(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ /* Nothing to do */
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_bus_register(&bcm47xx_bus.bcma.bus);
+ break;
+#endif
+ }
+ return 0;
+}
+device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/bcm47xx/time.c b/arch/mips/bcm47xx/time.c
index 0c6f47b3fd9..536374dcba7 100644
--- a/arch/mips/bcm47xx/time.c
+++ b/arch/mips/bcm47xx/time.c
@@ -30,7 +30,7 @@
void __init plat_time_init(void)
{
- unsigned long hz;
+ unsigned long hz = 0;
/*
* Use deterministic values for initial counter interrupt
@@ -39,7 +39,19 @@ void __init plat_time_init(void)
write_c0_count(0);
write_c0_compare(0xffff);
- hz = ssb_cpu_clock(&ssb_bcm47xx.mipscore) / 2;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2;
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2;
+ break;
+#endif
+ }
+
if (!hz)
hz = 100000000;
diff --git a/arch/mips/bcm47xx/wgt634u.c b/arch/mips/bcm47xx/wgt634u.c
index 74d06965326..e9f9ec8d443 100644
--- a/arch/mips/bcm47xx/wgt634u.c
+++ b/arch/mips/bcm47xx/wgt634u.c
@@ -108,7 +108,7 @@ static irqreturn_t gpio_interrupt(int irq, void *ignored)
/* Interrupts are shared, check if the current one is
a GPIO interrupt. */
- if (!ssb_chipco_irq_status(&ssb_bcm47xx.chipco,
+ if (!ssb_chipco_irq_status(&bcm47xx_bus.ssb.chipco,
SSB_CHIPCO_IRQ_GPIO))
return IRQ_NONE;
@@ -132,22 +132,26 @@ static int __init wgt634u_init(void)
* machine. Use the MAC address as an heuristic. Netgear Inc. has
* been allocated ranges 00:09:5b:xx:xx:xx and 00:0f:b5:xx:xx:xx.
*/
+ u8 *et0mac;
- u8 *et0mac = ssb_bcm47xx.sprom.et0mac;
+ if (bcm47xx_bus_type != BCM47XX_BUS_TYPE_SSB)
+ return -ENODEV;
+
+ et0mac = bcm47xx_bus.ssb.sprom.et0mac;
if (et0mac[0] == 0x00 &&
((et0mac[1] == 0x09 && et0mac[2] == 0x5b) ||
(et0mac[1] == 0x0f && et0mac[2] == 0xb5))) {
- struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore;
+ struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore;
printk(KERN_INFO "WGT634U machine detected.\n");
if (!request_irq(gpio_to_irq(WGT634U_GPIO_RESET),
gpio_interrupt, IRQF_SHARED,
- "WGT634U GPIO", &ssb_bcm47xx.chipco)) {
+ "WGT634U GPIO", &bcm47xx_bus.ssb.chipco)) {
gpio_direction_input(WGT634U_GPIO_RESET);
gpio_intmask(WGT634U_GPIO_RESET, 1);
- ssb_chipco_irq_mask(&ssb_bcm47xx.chipco,
+ ssb_chipco_irq_mask(&bcm47xx_bus.ssb.chipco,
SSB_CHIPCO_IRQ_GPIO,
SSB_CHIPCO_IRQ_GPIO);
}
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index cea6021cb8d..162e11b4ed7 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = {
static struct irqaction cpu_ip2_cascade_action = {
.handler = no_action,
.name = "cascade_ip2",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index cb9bf820fe5..965c777d356 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index fa45e924be0..f7b7ba6d5c4 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
static struct irqaction ioirq = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction fpuirq = {
.handler = no_action,
.name = "fpu",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction busirq = {
.flags = IRQF_DISABLED,
.name = "bus error",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction haltirq = {
.handler = dec_intr_halt,
.name = "halt",
+ .flags = IRQF_NO_THREAD,
};
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c
index 3dbd7a5a6ad..7798887a128 100644
--- a/arch/mips/emma/markeins/irq.c
+++ b/arch/mips/emma/markeins/irq.c
@@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
static struct irqaction irq_cascade = {
.handler = no_action,
- .flags = 0,
+ .flags = IRQF_NO_THREAD,
.name = "cascade",
.dev_id = NULL,
.next = NULL,
diff --git a/arch/mips/include/asm/lasat/lasat.h b/arch/mips/include/asm/lasat/lasat.h
index a1ada1c27c1..e8ff70f80e1 100644
--- a/arch/mips/include/asm/lasat/lasat.h
+++ b/arch/mips/include/asm/lasat/lasat.h
@@ -41,10 +41,8 @@ enum lasat_mtdparts {
/*
* The format of the data record in the EEPROM.
- * See Documentation/LASAT/eeprom.txt for a detailed description
- * of the fields in this struct, and the LASAT Hardware Configuration
- * field specification for a detailed description of the config
- * field.
+ * See the LASAT Hardware Configuration field specification for a detailed
+ * description of the config field.
*/
#include <linux/types.h>
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index d008f47a28b..de95e0723e2 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -19,7 +19,29 @@
#ifndef __ASM_BCM47XX_H
#define __ASM_BCM47XX_H
-/* SSB bus */
-extern struct ssb_bus ssb_bcm47xx;
+#include <linux/ssb/ssb.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_soc.h>
+
+enum bcm47xx_bus_type {
+#ifdef CONFIG_BCM47XX_SSB
+ BCM47XX_BUS_TYPE_SSB,
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ BCM47XX_BUS_TYPE_BCMA,
+#endif
+};
+
+union bcm47xx_bus {
+#ifdef CONFIG_BCM47XX_SSB
+ struct ssb_bus ssb;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ struct bcma_soc bcma;
+#endif
+};
+
+extern union bcm47xx_bus bcm47xx_bus;
+extern enum bcm47xx_bus_type bcm47xx_bus_type;
#endif /* __ASM_BCM47XX_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
index 98504142124..76961cabeed 100644
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h
@@ -10,6 +10,7 @@
#define __BCM47XX_GPIO_H
#include <linux/ssb/ssb_embedded.h>
+#include <linux/bcma/bcma.h>
#include <asm/mach-bcm47xx/bcm47xx.h>
#define BCM47XX_EXTIF_GPIO_LINES 5
@@ -21,41 +22,118 @@ extern int gpio_to_irq(unsigned gpio);
static inline int gpio_get_value(unsigned gpio)
{
- return ssb_gpio_in(&ssb_bcm47xx, 1 << gpio);
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return ssb_gpio_in(&bcm47xx_bus.ssb, 1 << gpio);
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return bcma_chipco_gpio_in(&bcm47xx_bus.bcma.bus.drv_cc,
+ 1 << gpio);
+#endif
+ }
+ return -EINVAL;
}
static inline void gpio_set_value(unsigned gpio, int value)
{
- ssb_gpio_out(&ssb_bcm47xx, 1 << gpio, value ? 1 << gpio : 0);
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
+ value ? 1 << gpio : 0);
+ return;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
+ value ? 1 << gpio : 0);
+ return;
+#endif
+ }
}
static inline int gpio_direction_input(unsigned gpio)
{
- ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 0);
- return 0;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 0);
+ return 0;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
+ 0);
+ return 0;
+#endif
+ }
+ return -EINVAL;
}
static inline int gpio_direction_output(unsigned gpio, int value)
{
- /* first set the gpio out value */
- ssb_gpio_out(&ssb_bcm47xx, 1 << gpio, value ? 1 << gpio : 0);
- /* then set the gpio mode */
- ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio);
- return 0;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ /* first set the gpio out value */
+ ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
+ value ? 1 << gpio : 0);
+ /* then set the gpio mode */
+ ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 1 << gpio);
+ return 0;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ /* first set the gpio out value */
+ bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
+ value ? 1 << gpio : 0);
+ /* then set the gpio mode */
+ bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
+ 1 << gpio);
+ return 0;
+#endif
+ }
+ return -EINVAL;
}
static inline int gpio_intmask(unsigned gpio, int value)
{
- ssb_gpio_intmask(&ssb_bcm47xx, 1 << gpio,
- value ? 1 << gpio : 0);
- return 0;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_gpio_intmask(&bcm47xx_bus.ssb, 1 << gpio,
+ value ? 1 << gpio : 0);
+ return 0;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_gpio_intmask(&bcm47xx_bus.bcma.bus.drv_cc,
+ 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+#endif
+ }
+ return -EINVAL;
}
static inline int gpio_polarity(unsigned gpio, int value)
{
- ssb_gpio_polarity(&ssb_bcm47xx, 1 << gpio,
- value ? 1 << gpio : 0);
- return 0;
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << gpio,
+ value ? 1 << gpio : 0);
+ return 0;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_gpio_polarity(&bcm47xx_bus.bcma.bus.drv_cc,
+ 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+#endif
+ }
+ return -EINVAL;
}
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index 0d5a42b5f47..a58addb98cf 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -54,7 +54,6 @@
#define cpu_has_mips_r2_exec_hazard 0
#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
-#define cpu_has_userlocal 0
#define cpu_has_vint 0
#define cpu_has_veic 0
#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index 62c09408594..35371641575 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -13,7 +13,6 @@
#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <asm/mach-powertv/asic.h>
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b4ba2449444..cb41af5f340 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -195,9 +195,9 @@
* to cover the pipeline delay.
*/
.set mips32
- mfc0 v1, CP0_TCSTATUS
+ mfc0 k0, CP0_TCSTATUS
.set mips0
- LONG_S v1, PT_TCSTATUS(sp)
+ LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
LONG_S $5, PT_R5(sp)
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 73031f7fc82..4397972949f 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
@@ -86,7 +86,6 @@ struct jz_gpio_chip {
spinlock_t lock;
struct gpio_chip gpio_chip;
- struct sys_device sysdev;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
@@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(D),
};
-static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev)
+static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
{
- return container_of(dev, struct jz_gpio_chip, sysdev);
+ chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
+ writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
+ writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
}
-static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state)
+static int jz4740_gpio_suspend(void)
{
- struct jz_gpio_chip *chip = sysdev_to_chip(dev);
+ int i;
- chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
- writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
- writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
+ for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
+ jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
return 0;
}
-static int jz4740_gpio_resume(struct sys_device *dev)
+static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
{
- struct jz_gpio_chip *chip = sysdev_to_chip(dev);
uint32_t mask = chip->suspend_mask;
writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
+}
- return 0;
+static void jz4740_gpio_resume(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
+ jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
}
-static struct sysdev_class jz4740_gpio_sysdev_class = {
- .name = "gpio",
+static struct syscore_ops jz4740_gpio_syscore_ops = {
.suspend = jz4740_gpio_suspend,
.resume = jz4740_gpio_resume,
};
-static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
+static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
- int ret, irq;
-
- chip->sysdev.id = id;
- chip->sysdev.cls = &jz4740_gpio_sysdev_class;
- ret = sysdev_register(&chip->sysdev);
-
- if (ret)
- return ret;
+ int irq;
spin_lock_init(&chip->lock);
@@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
handle_level_irq);
}
-
- return 0;
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
- int ret;
-
- ret = sysdev_class_register(&jz4740_gpio_sysdev_class);
- if (ret)
- return ret;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
+ register_syscore_ops(&jz4740_gpio_syscore_ops);
+
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index feb8021a305..6a2d758dd8e 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -19,6 +19,26 @@
#include <asm-generic/sections.h>
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+ if (ip >= (unsigned long)_stext &&
+ ip <= (unsigned long)_etext)
+ return 1;
+ return 0;
+}
+
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
#endif
}
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
- if (ip >= (unsigned long)_stext &&
- ip <= (unsigned long)_etext)
- return 1;
- return 0;
-}
-
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
@@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
* 1: offset = 4 instructions
*/
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-#define MCOUNT_OFFSET_INSNS 5
-#else
-#define MCOUNT_OFFSET_INSNS 4
-#endif
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
int ftrace_make_nop(struct module *mod,
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 5c74eb797f0..32b397b646e 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -229,7 +229,7 @@ static void i8259A_shutdown(void)
*/
if (i8259A_auto_eoi >= 0) {
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
}
}
@@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi)
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 876a75cc376..922a554cd10 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
}
+
+SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val,
+ struct compat_timespec __user *, utime, u32 __user *, uaddr2,
+ u32, val3)
+{
+ return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3);
+}
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f9296e894e4..6de1f598346 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -315,7 +315,7 @@ EXPORT(sysn32_call_table)
PTR sys_fremovexattr
PTR sys_tkill
PTR sys_ni_syscall
- PTR compat_sys_futex
+ PTR sys_32_futex
PTR compat_sys_sched_setaffinity /* 6195 */
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 4d7c9827706..1d813169e45 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -441,7 +441,7 @@ sys_call_table:
PTR sys_fremovexattr /* 4235 */
PTR sys_tkill
PTR sys_sendfile64
- PTR compat_sys_futex
+ PTR sys_32_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
PTR compat_sys_io_setup
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index dbbe0ce48d8..f8524003676 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -8,6 +8,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
@@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
+ local_irq_enable();
+
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b7517e3abc8..cbea618af0b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs)
return (regs->cp0_cause >> 2) & 0x1f;
}
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long dvpret = dvpe();
+ unsigned long dvpret;
#endif /* CONFIG_MIPS_MT_SMTC */
+ oops_enter();
+
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
- spin_lock_irq(&die_lock);
+ raw_spin_lock_irq(&die_lock);
+#ifdef CONFIG_MIPS_MT_SMTC
+ dvpret = dvpe();
+#endif /* CONFIG_MIPS_MT_SMTC */
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
@@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2cd50ad0d5c..3efcb065f78 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -192,7 +192,7 @@ static struct tc *get_tc(int index)
}
spin_unlock(&vpecontrol.tc_list_lock);
- return NULL;
+ return res;
}
/* allocate a vpe and associate it with this minor (or index) */
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index fc89795cafd..f9737bb3c5a 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d)
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
int i;
- int irq_nr = d->irq - INT_NUM_IRQ0;
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (irq_nr == ltq_eiu_irq[i]) {
+ if (d->irq == ltq_eiu_irq[i]) {
/* low level - we should really handle set_type */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
@@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
int i;
- int irq_nr = d->irq - INT_NUM_IRQ0;
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (irq_nr == ltq_eiu_irq[i]) {
+ if (d->irq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
LTQ_EIU_EXIN_INEN);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
index 66eb52fa50a..033b3184c7a 100644
--- a/arch/mips/lantiq/xway/ebu.c
+++ b/arch/mips/lantiq/xway/ebu.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
index 9d69f01e352..39f0d2641cb 100644
--- a/arch/mips/lantiq/xway/pmu.c
+++ b/arch/mips/lantiq/xway/pmu.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index de4c165515d..d608b6ef0ed 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c
index d61a04222b8..3cf1fef29f0 100644
--- a/arch/mips/loongson/fuloong-2e/irq.c
+++ b/arch/mips/loongson/fuloong-2e/irq.c
@@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init mach_init_irq(void)
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 081db102bb9..14b081841b6 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
struct irqaction ip6_irqaction = {
.handler = ip6_action,
.name = "cascade",
- .flags = IRQF_SHARED,
+ .flags = IRQF_SHARED | IRQF_NO_THREAD,
};
struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init mach_init_irq(void)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 9ff5d0fac55..302d779d5b0 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -6,6 +6,7 @@
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
@@ -15,12 +16,11 @@
#include <linux/sched.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-
EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
+#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(void)
{
@@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
return base - off;
}
-#define COLOUR_ALIGN(addr,pgoff) \
+#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
enum mmap_allocation_direction {UP, DOWN};
-static unsigned long arch_get_unmapped_area_foo(struct file *filp,
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
@@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
if (dir == UP) {
addr = mm->mmap_base;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
mm->free_area_cache = mm->mmap_base;
}
- /* either no address requested or can't fit in requested address hole */
+ /*
+ * either no address requested, or the mapping can't fit into
+ * the requested address hole
+ */
addr = mm->free_area_cache;
- if (do_color_align) {
- unsigned long base =
- COLOUR_ALIGN_DOWN(addr - len, pgoff);
-
+ if (do_color_align) {
+ unsigned long base =
+ COLOUR_ALIGN_DOWN(addr - len, pgoff);
addr = base + len;
- }
+ }
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr - len);
if (!vma || addr <= vma->vm_start) {
- /* remember the address as a hint for next time */
- return mm->free_area_cache = addr-len;
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr - len;
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
- addr = mm->mmap_base-len;
+ addr = mm->mmap_base - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
@@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
* return with success:
*/
vma = find_vma(mm, addr);
- if (likely(!vma || addr+len <= vma->vm_start)) {
- /* remember the address as a hint for next time */
+ if (likely(!vma || addr + len <= vma->vm_start)) {
+ /* cache the address as a hint for next time */
return mm->free_area_cache = addr;
}
@@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
+ addr = vma->vm_start - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (likely(len < vma->vm_start));
@@ -201,7 +203,7 @@ bottomup:
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
- return arch_get_unmapped_area_foo(filp,
+ return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
@@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- return arch_get_unmapped_area_foo(filp,
+ return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b6e1cff5066..e06370f58ef 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- struct work_registers wr;
memset(handle_tlbm, 0, sizeof(handle_tlbm));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
+ build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
-
+ if (PM_DEFAULT_MASK == 0)
+ uasm_i_nop(&p);
/*
* We clobbered C0_PAGEMASK, restore it. On the other branch
* it is restored in build_huge_tlb_write_entry.
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 1d36c511a7a..d53ff91b277 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
static struct irqaction i8259irq = {
.handler = no_action,
- .name = "XT-PIC cascade"
+ .name = "XT-PIC cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction corehi_irqaction = {
.handler = no_action,
- .name = "CoreHi"
+ .name = "CoreHi",
+ .flags = IRQF_NO_THREAD,
};
static msc_irqmap_t __initdata msc_irqmap[] = {
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index 9bd3f731f62..2dca585dd2f 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
-EXTRA_CFLAGS += -Werror
+ccflags-y += -Werror
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
index 455f8e50a00..400535a955d 100644
--- a/arch/mips/pci/pci-bcm47xx.c
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ssb/ssb.h>
+#include <bcm47xx.h>
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
@@ -33,9 +34,13 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
int pcibios_plat_dev_init(struct pci_dev *dev)
{
+#ifdef CONFIG_BCM47XX_SSB
int res;
u8 slot, pin;
+ if (bcm47xx_bus_type != BCM47XX_BUS_TYPE_SSB)
+ return 0;
+
res = ssb_pcibios_plat_dev_init(dev);
if (res < 0) {
printk(KERN_ALERT "PCI: Failed to init device %s\n",
@@ -55,5 +60,6 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
}
dev->irq = res;
+#endif
return 0;
}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 603d7493e96..8656388b34b 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
u32 temp_buffer;
/* set clock to 33Mhz */
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+ if (ltq_is_ar9()) {
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
+ } else {
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+ }
/* external or internal clock ? */
if (conf->clock) {
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 764362ce5e4..5f3a69cebad 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void)
rc32434_pcibridge_init();
io_map_base = ioremap(rc32434_res_pci_io1.start,
- resource_size(&rcrc32434_res_pci_io1));
+ resource_size(&rc32434_res_pci_io1));
if (!io_map_base)
return -ENOMEM;
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
index 4531c4a514b..d3c3d81757a 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
@@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
static struct irqaction cic_cascade_msp = {
.handler = no_action,
- .name = "MSP CIC cascade"
+ .name = "MSP CIC cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction per_cascade_msp = {
.handler = no_action,
- .name = "MSP PER cascade"
+ .name = "MSP PER cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_serial.c b/arch/mips/pmc-sierra/msp71xx/msp_serial.c
index f7261628d8a..a1c7c7da233 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_serial.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_serial.c
@@ -27,6 +27,7 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
+#include <linux/slab.h>
#include <asm/bootinfo.h>
#include <asm/io.h>
@@ -38,6 +39,55 @@
#include <msp_int.h>
#include <msp_regs.h>
+struct msp_uart_data {
+ int last_lcr;
+};
+
+static void msp_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct msp_uart_data *d = p->private_data;
+
+ if (offset == UART_LCR)
+ d->last_lcr = value;
+
+ offset <<= p->regshift;
+ writeb(value, p->membase + offset);
+}
+
+static unsigned int msp_serial_in(struct uart_port *p, int offset)
+{
+ offset <<= p->regshift;
+
+ return readb(p->membase + offset);
+}
+
+static int msp_serial_handle_irq(struct uart_port *p)
+{
+ struct msp_uart_data *d = p->private_data;
+ unsigned int iir = readb(p->membase + (UART_IIR << p->regshift));
+
+ if (serial8250_handle_irq(p, iir)) {
+ return 1;
+ } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ /*
+ * The DesignWare APB UART has an Busy Detect (0x07) interrupt
+ * meaning an LCR write attempt occurred while the UART was
+ * busy. The interrupt must be cleared by reading the UART
+ * status register (USR) and the LCR re-written.
+ *
+ * Note: MSP reserves 0x20 bytes of address space for the UART
+ * and the USR is mapped in a separate block at an offset of
+ * 0xc0 from the start of the UART.
+ */
+ (void)readb(p->membase + 0xc0);
+ writeb(d->last_lcr, p->membase + (UART_LCR << p->regshift));
+
+ return 1;
+ }
+
+ return 0;
+}
+
void __init msp_serial_setup(void)
{
char *s;
@@ -59,13 +109,22 @@ void __init msp_serial_setup(void)
up.irq = MSP_INT_UART0;
up.uartclk = uartclk;
up.regshift = 2;
- up.iotype = UPIO_DWAPB; /* UPIO_MEM like */
+ up.iotype = UPIO_MEM;
up.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
up.type = PORT_16550A;
up.line = 0;
- up.private_data = (void*)UART0_STATUS_REG;
- if (early_serial_setup(&up))
- printk(KERN_ERR "Early serial init of port 0 failed\n");
+ up.serial_out = msp_serial_out;
+ up.serial_in = msp_serial_in;
+ up.handle_irq = msp_serial_handle_irq;
+ up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
+ if (!up.private_data) {
+ pr_err("failed to allocate uart private data\n");
+ return;
+ }
+ if (early_serial_setup(&up)) {
+ kfree(up.private_data);
+ pr_err("Early serial init of port 0 failed\n");
+ }
/* Initialize the second serial port, if one exists */
switch (mips_machtype) {
@@ -88,6 +147,8 @@ void __init msp_serial_setup(void)
up.irq = MSP_INT_UART1;
up.line = 1;
up.private_data = (void*)UART1_STATUS_REG;
- if (early_serial_setup(&up))
- printk(KERN_ERR "Early serial init of port 1 failed\n");
+ if (early_serial_setup(&up)) {
+ kfree(up.private_data);
+ pr_err("Early serial init of port 1 failed\n");
+ }
}
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
index 6b93c81779c..1ebe22bdadc 100644
--- a/arch/mips/pnx8550/common/int.c
+++ b/arch/mips/pnx8550/common/int.c
@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = {
static struct irqaction gic_action = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "GIC",
};
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index b4d08e4d2ea..f72c336ea27 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void)
static struct irqaction local0_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "local0 cascade",
};
static struct irqaction local1_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "local1 cascade",
};
static struct irqaction buserr = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "Bus Error",
};
static struct irqaction map0_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "mapable0 cascade",
};
#ifdef USE_LIO3_IRQ
static struct irqaction map1_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "mapable1 cascade",
};
#define SGI_INTERRUPTS SGINT_END
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a7e5a6d917b..3ab5b5d25b0 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
static struct irqaction sni_rm200_irq2 = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct resource sni_rm200_pic1_resource = {
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index e9f95dcde37..ba3cec3155d 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -321,7 +321,7 @@ void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
static u32 tx4939_get_eth_speed(struct net_device *dev)
{
struct ethtool_cmd cmd;
- if (dev_ethtool_get_settings(dev, &cmd))
+ if (__ethtool_get_settings(dev, &cmd))
return 100; /* default 100Mbps */
return ethtool_cmd_speed(&cmd);
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 70a3b85f375..fad2bef432c 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 1f870340ebd..f093b3a8a4a 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -195,7 +195,7 @@ config SMP
singleprocessor machines. On a singleprocessor machine, the kernel
will run faster if you say N here.
- See also <file:Documentation/i386/IO-APIC.txt>,
+ See also <file:Documentation/x86/i386/IO-APIC.txt>,
<file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 2623d19f4f4..2381df83bd0 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -260,7 +260,6 @@ void set_intr_level(int irq, u16 level)
/*
* mark an interrupt to be ACK'd after interrupt handlers have been run rather
* than before
- * - see Documentation/mn10300/features.txt
*/
void mn10300_set_lateack_irq_type(int irq)
{
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 4558bafbd1a..9460e1c266d 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -1,6 +1,6 @@
#
# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/config-language.txt.
+# see Documentation/kbuild/kconfig-language.txt.
#
config OPENRISC
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 60b47223390..b206ba4608b 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -18,7 +18,7 @@
#define __ASM_OPENRISC_DMA_MAPPING_H
/*
- * See Documentation/PCI/PCI-DMA-mapping.txt and
+ * See Documentation/DMA-API-HOWTO.txt and
* Documentation/DMA-API.txt for documentation.
*
* This file is written with the intention of eventually moving over
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e077b0bf56c..fdfd8be29e9 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -169,9 +169,7 @@ config 64BIT
choice
prompt "Kernel page size"
- default PARISC_PAGE_SIZE_4KB if !64BIT
- default PARISC_PAGE_SIZE_4KB if 64BIT
-# default PARISC_PAGE_SIZE_16KB if 64BIT
+ default PARISC_PAGE_SIZE_4KB
config PARISC_PAGE_SIZE_4KB
bool "4KB"
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 890531e32fe..467bbd510ea 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -5,7 +5,7 @@
#include <asm/cacheflush.h>
#include <asm/scatterlist.h>
-/* See Documentation/PCI/PCI-DMA-mapping.txt */
+/* See Documentation/DMA-API-HOWTO.txt */
struct hppa_dma_ops {
int (*dma_supported)(struct device *dev, u64 mask);
void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index a029f74a3c5..d047edea250 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -2,7 +2,7 @@
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
-** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
+** See Documentation/DMA-API-HOWTO.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6926b61acfe..47682b67fd3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -656,6 +656,8 @@ config SBUS
config FSL_SOC
bool
+ select HAVE_CAN_FLEXCAN if NET && CAN
+ select PPC_CLOCK if CAN_FLEXCAN
config FSL_PCI
bool
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
index 6b33b73a5ba..d6c669c888e 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dts
+++ b/arch/powerpc/boot/dts/p1010rdb.dts
@@ -23,6 +23,8 @@
ethernet2 = &enet2;
pci0 = &pci0;
pci1 = &pci1;
+ can0 = &can0;
+ can1 = &can1;
};
memory {
@@ -169,14 +171,6 @@
};
};
- can0@1c000 {
- fsl,flexcan-clock-source = "platform";
- };
-
- can1@1d000 {
- fsl,flexcan-clock-source = "platform";
- };
-
usb@22000 {
phy_type = "utmi";
};
diff --git a/arch/powerpc/boot/dts/p1010si.dtsi b/arch/powerpc/boot/dts/p1010si.dtsi
index 7f51104f2e3..cabe0a453ae 100644
--- a/arch/powerpc/boot/dts/p1010si.dtsi
+++ b/arch/powerpc/boot/dts/p1010si.dtsi
@@ -140,20 +140,18 @@
interrupt-parent = <&mpic>;
};
- can0@1c000 {
- compatible = "fsl,flexcan-v1.0";
+ can0: can@1c000 {
+ compatible = "fsl,p1010-flexcan";
reg = <0x1c000 0x1000>;
interrupts = <48 0x2>;
interrupt-parent = <&mpic>;
- fsl,flexcan-clock-divider = <2>;
};
- can1@1d000 {
- compatible = "fsl,flexcan-v1.0";
+ can1: can@1d000 {
+ compatible = "fsl,p1010-flexcan";
reg = <0x1d000 0x1000>;
interrupts = <61 0x2>;
interrupt-parent = <&mpic>;
- fsl,flexcan-clock-divider = <2>;
};
L2: l2-cache-controller@20000 {
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 4182c772340..ed3bab72a83 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -44,12 +44,13 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
CONFIG_MII=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
-CONFIG_IBM_NEW_EMAC_DEBUG=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
+CONFIG_IBM_EMAC_DEBUG=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 2dbb293163f..17582a3420f 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -42,8 +42,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig
index ebeb4accad6..dba263c1d3a 100644
--- a/arch/powerpc/configs/40x/hcu4_defconfig
+++ b/arch/powerpc/configs/40x/hcu4_defconfig
@@ -43,8 +43,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 532ea9d93a1..f2d4be936e0 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -51,10 +51,11 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 3c142ac1b34..42b979355f9 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -43,10 +43,11 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index ff57d4828ff..aa1a4cac370 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -40,8 +40,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 3ed16d5c909..329f9a3b892 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -44,10 +44,11 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index b1b7d2c5c05..cef7d62560c 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -32,8 +32,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index 30a0a8e08fd..20c8d26d7fc 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -38,10 +38,11 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index a46942aac69..d5be93e6e92 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -49,10 +49,11 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index 07d77e51f1b..f9269fc4ffc 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -40,8 +40,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index 2ce7e9aff09..9be089038fd 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -55,10 +55,11 @@ CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
CONFIG_I2O=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
CONFIG_E1000E=y
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 18730ff9de7..82f73035a7c 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -56,8 +56,9 @@ CONFIG_FUSION_SAS=y
CONFIG_FUSION_CTL=y
CONFIG_FUSION_LOGGING=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index 34c09144a69..109562c3c6b 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -42,8 +42,9 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 01cc2b1a7f9..48802811da7 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -53,11 +53,12 @@ CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
CONFIG_I2O=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
-CONFIG_IBM_NEW_EMAC_RXB=256
-CONFIG_IBM_NEW_EMAC_TXB=256
-CONFIG_IBM_NEW_EMAC_DEBUG=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
+CONFIG_IBM_EMAC_DEBUG=y
CONFIG_E1000E=y
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index dfcffede16a..ca088cd581a 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -44,8 +44,9 @@ CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_INPUT_FF_MEMLESS=m
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 47e399f2892..b7a653b626d 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -46,8 +46,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index a6a002ed568..30de97f158a 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -40,8 +40,9 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index abf74dc1f79..105bc56f4b2 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -54,9 +54,10 @@ CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
CONFIG_MII=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_IBM_EMAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index bfd634b5ada..7cb703b948b 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -50,8 +50,9 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_XILINX_SYSACE=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
CONFIG_SERIO=m
# CONFIG_SERIO_I8042 is not set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 47133202a62..6cdf1c0d2c8 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -63,8 +63,9 @@ CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_IBM_NEW_EMAC=y
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
CONFIG_SERIO=m
# CONFIG_SERIO_I8042 is not set
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index 0947b36e534..5e0b6d511e1 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -196,7 +196,7 @@ static inline int qe_alive_during_sleep(void)
/* Structure that defines QE firmware binary files.
*
- * See Documentation/powerpc/qe-firmware.txt for a description of these
+ * See Documentation/powerpc/qe_firmware.txt for a description of these
* fields.
*/
struct qe_firmware {
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 93e05d1b34b..5354ae91bdd 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -54,6 +54,7 @@ extern void __init udbg_init_40x_realmode(void);
extern void __init udbg_init_cpm(void);
extern void __init udbg_init_usbgecko(void);
extern void __init udbg_init_wsp(void);
+extern void __init udbg_init_ehv_bc(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index faa82c1f3f6..b4607a91d1f 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -67,6 +67,8 @@ void __init udbg_early_init(void)
udbg_init_usbgecko();
#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
udbg_init_wsp();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
+ udbg_init_ehv_bc();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index d733d7ca939..b5d87067a58 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -130,21 +130,21 @@ config 405GP
bool
select IBM405_ERR77
select IBM405_ERR51
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_ZMII
config 405EP
bool
config 405EX
bool
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_RGMII
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
config 405EZ
bool
- select IBM_NEW_EMAC_NO_FLOW_CTRL
- select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
- select IBM_NEW_EMAC_MAL_COMMON_ERR
+ select IBM_EMAC_NO_FLOW_CTRL
+ select IBM_EMAC_MAL_CLR_ICINTSTAT
+ select IBM_EMAC_MAL_COMMON_ERR
config 405GPR
bool
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index e958b6f48ec..762322ce24a 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -23,7 +23,7 @@ config BLUESTONE
default n
select PPC44x_SIMPLE
select APM821xx
- select IBM_NEW_EMAC_RGMII
+ select IBM_EMAC_RGMII
help
This option enables support for the APM APM821xx Evaluation board.
@@ -122,8 +122,8 @@ config CANYONLANDS
select PPC4xx_PCI_EXPRESS
select PCI_MSI
select PPC4xx_MSI
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII
help
This option enables support for the AMCC PPC460EX evaluation board.
@@ -135,8 +135,8 @@ config GLACIER
select 460EX # Odd since it uses 460GT but the effects are the same
select PCI
select PPC4xx_PCI_EXPRESS
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII
help
This option enables support for the AMCC PPC460GT evaluation board.
@@ -161,7 +161,7 @@ config EIGER
select 460SX
select PCI
select PPC4xx_PCI_EXPRESS
- select IBM_NEW_EMAC_RGMII
+ select IBM_EMAC_RGMII
help
This option enables support for the AMCC PPC460SX evaluation board.
@@ -260,59 +260,59 @@ config 440EP
bool
select PPC_FPU
select IBM440EP_ERR42
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_ZMII
select USB_ARCH_HAS_OHCI
config 440EPX
bool
select PPC_FPU
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII
config 440GRX
bool
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII
config 440GP
bool
- select IBM_NEW_EMAC_ZMII
+ select IBM_EMAC_ZMII
config 440GX
bool
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII #test only
- select IBM_NEW_EMAC_TAH #test only
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII #test only
+ select IBM_EMAC_TAH #test only
config 440SP
bool
config 440SPe
bool
- select IBM_NEW_EMAC_EMAC4
+ select IBM_EMAC_EMAC4
config 460EX
bool
select PPC_FPU
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_TAH
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_TAH
config 460SX
bool
select PPC_FPU
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII
- select IBM_NEW_EMAC_TAH
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII
+ select IBM_EMAC_TAH
config APM821xx
bool
select PPC_FPU
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_TAH
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_TAH
# 44x errata/workaround config symbols, selected by the CPU models above
config IBM440EP_ERR42
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 498534cd526..12f5932dadc 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -80,7 +80,7 @@ config P1010_RDB
config P1022_DS
bool "Freescale P1022 DS"
select DEFAULT_UIMAGE
- select CONFIG_PHYS_64BIT # The DTS has 36-bit addresses
+ select PHYS_64BIT # The DTS has 36-bit addresses
select SWIOTLB
help
This option enables support for the Freescale P1022DS reference board.
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 67d5009b4e8..2e7ff0c5cf4 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -17,10 +17,10 @@ config PPC_CELL_NATIVE
select PPC_CELL_COMMON
select MPIC
select PPC_IO_WORKAROUNDS
- select IBM_NEW_EMAC_EMAC4
- select IBM_NEW_EMAC_RGMII
- select IBM_NEW_EMAC_ZMII #test only
- select IBM_NEW_EMAC_TAH #test only
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII #test only
+ select IBM_EMAC_TAH #test only
default n
config PPC_IBM_CELL_BLADE
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index 613070e9ddb..f1eebcae9bf 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -77,7 +77,7 @@ static void __init storcenter_setup_arch(void)
}
/*
- * Interrupt setup and service. Interrrupts on the turbostation come
+ * Interrupt setup and service. Interrupts on the turbostation come
* from the four PCI slots plus onboard 8241 devices: I2C, DUART.
*/
static void __init storcenter_init_IRQ(void)
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 904c6cbaf45..3363fbc964f 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -382,7 +382,7 @@ static void qe_upload_microcode(const void *base,
/*
* Upload a microcode to the I-RAM at a specific address.
*
- * See Documentation/powerpc/qe-firmware.txt for information on QE microcode
+ * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
* uploading.
*
* Currently, only version 1 is supported, so the 'version' field must be
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 984cd202915..3330feca750 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -47,7 +47,7 @@ struct uic {
int index;
int dcrbase;
- spinlock_t lock;
+ raw_spinlock_t lock;
/* The remapper for this UIC */
struct irq_host *irqhost;
@@ -61,14 +61,14 @@ static void uic_unmask_irq(struct irq_data *d)
u32 er, sr;
sr = 1 << (31-src);
- spin_lock_irqsave(&uic->lock, flags);
+ raw_spin_lock_irqsave(&uic->lock, flags);
/* ack level-triggered interrupts here */
if (irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
er = mfdcr(uic->dcrbase + UIC_ER);
er |= sr;
mtdcr(uic->dcrbase + UIC_ER, er);
- spin_unlock_irqrestore(&uic->lock, flags);
+ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static void uic_mask_irq(struct irq_data *d)
@@ -78,11 +78,11 @@ static void uic_mask_irq(struct irq_data *d)
unsigned long flags;
u32 er;
- spin_lock_irqsave(&uic->lock, flags);
+ raw_spin_lock_irqsave(&uic->lock, flags);
er = mfdcr(uic->dcrbase + UIC_ER);
er &= ~(1 << (31 - src));
mtdcr(uic->dcrbase + UIC_ER, er);
- spin_unlock_irqrestore(&uic->lock, flags);
+ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static void uic_ack_irq(struct irq_data *d)
@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data *d)
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
- spin_lock_irqsave(&uic->lock, flags);
+ raw_spin_lock_irqsave(&uic->lock, flags);
mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
- spin_unlock_irqrestore(&uic->lock, flags);
+ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static void uic_mask_ack_irq(struct irq_data *d)
@@ -104,7 +104,7 @@ static void uic_mask_ack_irq(struct irq_data *d)
u32 er, sr;
sr = 1 << (31-src);
- spin_lock_irqsave(&uic->lock, flags);
+ raw_spin_lock_irqsave(&uic->lock, flags);
er = mfdcr(uic->dcrbase + UIC_ER);
er &= ~sr;
mtdcr(uic->dcrbase + UIC_ER, er);
@@ -118,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_data *d)
*/
if (!irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
- spin_unlock_irqrestore(&uic->lock, flags);
+ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
@@ -152,7 +152,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mask = ~(1 << (31 - src));
- spin_lock_irqsave(&uic->lock, flags);
+ raw_spin_lock_irqsave(&uic->lock, flags);
tr = mfdcr(uic->dcrbase + UIC_TR);
pr = mfdcr(uic->dcrbase + UIC_PR);
tr = (tr & mask) | (trigger << (31-src));
@@ -161,7 +161,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
- spin_unlock_irqrestore(&uic->lock, flags);
+ raw_spin_unlock_irqrestore(&uic->lock, flags);
return 0;
}
@@ -254,7 +254,7 @@ static struct uic * __init uic_init_one(struct device_node *node)
if (! uic)
return NULL; /* FIXME: panic? */
- spin_lock_init(&uic->lock);
+ raw_spin_lock_init(&uic->lock);
indexp = of_get_property(node, "cell-index", &len);
if (!indexp || (len != sizeof(u32))) {
printk(KERN_ERR "uic: Device node %s has missing or invalid "
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ed5cb5af528..6b99fc3f9b6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -91,6 +91,7 @@ config S390
select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_RCU_TABLE_FREE if SMP
+ select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 6023c6dc1fb..74c8f5e76ce 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -562,10 +562,9 @@ static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
void *base;
buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
- base = vmalloc(buf_size);
+ base = vzalloc(buf_size);
if (!base)
return -ENOMEM;
- memset(base, 0, buf_size);
d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
rc = diag204_do_store(d204->buf, diag204_buf_pages);
if (rc) {
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 15c97625df8..21993623da9 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -123,6 +123,40 @@ struct slibe {
};
/**
+ * struct qaob - queue asynchronous operation block
+ * @res0: reserved parameters
+ * @res1: reserved parameter
+ * @res2: reserved parameter
+ * @res3: reserved parameter
+ * @aorc: asynchronous operation return code
+ * @flags: internal flags
+ * @cbtbs: control block type
+ * @sb_count: number of storage blocks
+ * @sba: storage block element addresses
+ * @dcount: size of storage block elements
+ * @user0: user defineable value
+ * @res4: reserved paramater
+ * @user1: user defineable value
+ * @user2: user defineable value
+ */
+struct qaob {
+ u64 res0[6];
+ u8 res1;
+ u8 res2;
+ u8 res3;
+ u8 aorc;
+ u8 flags;
+ u16 cbtbs;
+ u8 sb_count;
+ u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ u64 user0;
+ u64 res4[2];
+ u64 user1;
+ u64 user2;
+} __attribute__ ((packed, aligned(256)));
+
+/**
* struct slib - storage list information block (SLIB)
* @nsliba: next SLIB address (if any)
* @sla: SL address
@@ -225,6 +259,41 @@ struct slsb {
#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
+/**
+ * struct qdio_outbuf_state - SBAL related asynchronous operation information
+ * (for communication with upper layer programs)
+ * (only required for use with completion queues)
+ * @flags: flags indicating state of buffer
+ * @aob: pointer to QAOB used for the particular SBAL
+ * @user: pointer to upper layer program's state information related to SBAL
+ * (stored in user1 data of QAOB)
+ */
+struct qdio_outbuf_state {
+ u8 flags;
+ struct qaob *aob;
+ void *user;
+};
+
+#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
+#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
+
+#define CHSC_AC1_INITIATE_INPUTQ 0x80
+
+
+/* qdio adapter-characteristics-1 flag */
+#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
+#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
+#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
+#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
+#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
+#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
+#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
+
+#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
+#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
+
+#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000
+
struct qdio_ssqd_desc {
u8 flags;
u8:8;
@@ -243,8 +312,7 @@ struct qdio_ssqd_desc {
u64 sch_token;
u8 mro;
u8 mri;
- u8:8;
- u8 sbalic;
+ u16 qdioac3;
u16:16;
u8:8;
u8 mmwc;
@@ -280,9 +348,11 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
+ * @queue_start_poll: polling handlers (one per input queue or NULL)
* @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
+ * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
*/
struct qdio_initialize {
struct ccw_device *cdev;
@@ -297,11 +367,12 @@ struct qdio_initialize {
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
- void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
+ void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
int scan_threshold;
unsigned long int_parm;
void **input_sbal_addr_array;
void **output_sbal_addr_array;
+ struct qdio_outbuf_state *output_sbal_state_array;
};
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
@@ -316,6 +387,7 @@ struct qdio_initialize {
extern int qdio_allocate(struct qdio_initialize *);
extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);
+extern void qdio_release_aob(struct qaob *);
extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
unsigned int);
extern int qdio_start_irq(struct ccw_device *, int);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index cf9e5c6d552..b6f9afed74e 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -7,6 +7,7 @@
*/
#include <linux/pfn.h>
+#include <linux/mm.h>
#include <asm/system.h>
/*
@@ -14,6 +15,123 @@
*/
extern const void __nosave_begin, __nosave_end;
+/*
+ * The restore of the saved pages in an hibernation image will set
+ * the change and referenced bits in the storage key for each page.
+ * Overindication of the referenced bits after an hibernation cycle
+ * does not cause any harm but the overindication of the change bits
+ * would cause trouble.
+ * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
+ * page to the most significant byte of the associated page frame
+ * number in the hibernation image.
+ */
+
+/*
+ * Key storage is allocated as a linked list of pages.
+ * The size of the keys array is (PAGE_SIZE - sizeof(long))
+ */
+struct page_key_data {
+ struct page_key_data *next;
+ unsigned char data[];
+};
+
+#define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
+
+static struct page_key_data *page_key_data;
+static struct page_key_data *page_key_rp, *page_key_wp;
+static unsigned long page_key_rx, page_key_wx;
+
+/*
+ * For each page in the hibernation image one additional byte is
+ * stored in the most significant byte of the page frame number.
+ * On suspend no additional memory is required but on resume the
+ * keys need to be memorized until the page data has been restored.
+ * Only then can the storage keys be set to their old state.
+ */
+unsigned long page_key_additional_pages(unsigned long pages)
+{
+ return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
+}
+
+/*
+ * Free page_key_data list of arrays.
+ */
+void page_key_free(void)
+{
+ struct page_key_data *pkd;
+
+ while (page_key_data) {
+ pkd = page_key_data;
+ page_key_data = pkd->next;
+ free_page((unsigned long) pkd);
+ }
+}
+
+/*
+ * Allocate page_key_data list of arrays with enough room to store
+ * one byte for each page in the hibernation image.
+ */
+int page_key_alloc(unsigned long pages)
+{
+ struct page_key_data *pk;
+ unsigned long size;
+
+ size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
+ while (size--) {
+ pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
+ if (!pk) {
+ page_key_free();
+ return -ENOMEM;
+ }
+ pk->next = page_key_data;
+ page_key_data = pk;
+ }
+ page_key_rp = page_key_wp = page_key_data;
+ page_key_rx = page_key_wx = 0;
+ return 0;
+}
+
+/*
+ * Save the storage key into the upper 8 bits of the page frame number.
+ */
+void page_key_read(unsigned long *pfn)
+{
+ unsigned long addr;
+
+ addr = (unsigned long) page_address(pfn_to_page(*pfn));
+ *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
+}
+
+/*
+ * Extract the storage key from the upper 8 bits of the page frame number
+ * and store it in the page_key_data list of arrays.
+ */
+void page_key_memorize(unsigned long *pfn)
+{
+ page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
+ *(unsigned char *) pfn = 0;
+ if (++page_key_wx < PAGE_KEY_DATA_SIZE)
+ return;
+ page_key_wp = page_key_wp->next;
+ page_key_wx = 0;
+}
+
+/*
+ * Get the next key from the page_key_data list of arrays and set the
+ * storage key of the page referred by @address. If @address refers to
+ * a "safe" page the swsusp_arch_resume code will transfer the storage
+ * key from the buffer page to the original page.
+ */
+void page_key_write(void *address)
+{
+ page_set_storage_key((unsigned long) address,
+ page_key_rp->data[page_key_rx], 0);
+ if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
+ return;
+ page_key_rp = page_key_rp->next;
+ page_key_rx = 0;
+}
+
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 51bcdb50a23..acb78cdee89 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -136,11 +136,14 @@ ENTRY(swsusp_arch_resume)
0:
lg %r2,8(%r1)
lg %r4,0(%r1)
+ iske %r0,%r4
lghi %r3,PAGE_SIZE
lghi %r5,PAGE_SIZE
1:
mvcle %r2,%r4,0
jo 1b
+ lg %r2,8(%r1)
+ sske %r0,%r2
lg %r1,16(%r1)
ltgr %r1,%r1
jnz 0b
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h
deleted file mode 100644
index 0f325da0f92..00000000000
--- a/arch/sh/include/asm/sh_eth.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __ASM_SH_ETH_H__
-#define __ASM_SH_ETH_H__
-
-#include <linux/phy.h>
-
-enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
-enum {
- SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_SH4,
- SH_ETH_REG_FAST_SH3_SH2
-};
-
-struct sh_eth_plat_data {
- int phy;
- int edmac_endian;
- int register_type;
- phy_interface_t phy_interface;
- void (*set_mdio_gate)(unsigned long addr);
-
- unsigned char mac_addr[6];
- unsigned no_ether_link:1;
- unsigned ether_link_active_low:1;
-};
-
-#endif
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 1407c07bdad..f6ae2b2b687 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -280,7 +280,7 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
return retval;
}
#else
-#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK)
+#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
#endif
static inline int
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 1e94f946570..8aa0d440858 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -230,7 +230,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
- flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
+ flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE
+ | IORESOURCE_SIZEALIGN;
} else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue;
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 1ba95aff5d5..2caa556db86 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -273,10 +273,7 @@ void do_sigreturn32(struct pt_regs *regs)
case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
@@ -377,10 +374,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
@@ -782,6 +776,7 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
@@ -792,12 +787,10 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
if (err)
return err;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
- sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
tracehook_signal_handler(signr, info, ka, regs, 0);
@@ -881,7 +874,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 04ede8f04ad..8ce247ac04c 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -62,12 +62,13 @@ struct rt_signal_frame {
static int _sigpause_common(old_sigset_t set)
{
- set &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, set);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ set &= _BLOCKABLE;
+ siginitset(&blocked, set);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -139,10 +140,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
goto segv_and_exit;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv_and_exit:
@@ -209,10 +207,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
@@ -470,6 +465,7 @@ static inline int
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
@@ -480,12 +476,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
if (err)
return err;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
tracehook_signal_handler(signr, info, ka, regs, 0);
@@ -581,7 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 47509df3b89..a2b81598d90 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -70,10 +70,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
goto do_sigsegv;
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
}
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
@@ -242,12 +239,13 @@ struct rt_signal_frame {
static long _sigpause_common(old_sigset_t set)
{
- set &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, set);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ set &= _BLOCKABLE;
+ siginitset(&blocked, set);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -327,10 +325,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
pt_regs_clear_syscall(regs);
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
@@ -484,18 +479,17 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int err;
err = setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
if (err)
return err;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
- sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
tracehook_signal_handler(signr, info, ka, regs, 0);
@@ -601,7 +595,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index e485a680499..13c2169822a 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -162,7 +162,7 @@ ready:
printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
if (paddr)
*paddr = paddr_calc;
- return paddrbase;
+ return pte;
}
void leon_flush_icache_all(void)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fc94607f0bd..aecc8ed5f39 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -21,7 +21,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/irqflags.h>
-#include <linux/atomic.h>
+#include <asm/atomic_32.h>
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
#include <arch/abi.h>
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1f75a2a5610..30638042691 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -70,7 +70,7 @@
*/
#include <linux/linkage.h>
-#include <linux/atomic.h>
+#include <asm/atomic_32.h>
#include <asm/page.h>
#include <asm/processor.h>
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 22745b47c82..a492e59883a 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -368,7 +368,7 @@ static const struct net_device_ops uml_netdev_ops = {
.ndo_open = uml_net_open,
.ndo_stop = uml_net_close,
.ndo_start_xmit = uml_net_start_xmit,
- .ndo_set_multicast_list = uml_net_set_multicast_list,
+ .ndo_set_rx_mode = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = uml_net_change_mtu,
diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h
index 4bd87f3d13d..1a5c5a5eb39 100644
--- a/arch/unicore32/include/asm/io.h
+++ b/arch/unicore32/include/asm/io.h
@@ -32,7 +32,7 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * Documentation/IO-mapping.txt.
+ * Documentation/io-mapping.txt.
*
*/
#define ioremap(cookie, size) __uc32_ioremap(cookie, size)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a47bb22657..9037289617a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -130,7 +130,7 @@ config SBUS
bool
config NEED_DMA_MAP_STATE
- def_bool (X86_64 || DMAR || DMA_API_DEBUG)
+ def_bool (X86_64 || INTEL_IOMMU || DMA_API_DEBUG)
config NEED_SG_DMA_LENGTH
def_bool y
@@ -220,7 +220,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
config HAVE_INTEL_TXT
def_bool y
- depends on EXPERIMENTAL && DMAR && ACPI
+ depends on EXPERIMENTAL && INTEL_IOMMU && ACPI
config X86_32_SMP
def_bool y
@@ -279,7 +279,7 @@ config SMP
Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
Management" code will be disabled if you say Y here.
- See also <file:Documentation/i386/IO-APIC.txt>,
+ See also <file:Documentation/x86/i386/IO-APIC.txt>,
<file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
@@ -287,7 +287,7 @@ config SMP
config X86_X2APIC
bool "Support x2apic"
- depends on X86_LOCAL_APIC && X86_64 && INTR_REMAP
+ depends on X86_LOCAL_APIC && X86_64 && IRQ_REMAP
---help---
This enables x2apic support on CPUs that have this feature.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c0f8a5c8891..bf56e179327 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -139,7 +139,7 @@ config IOMMU_DEBUG
code. When you use it make sure you have a big enough
IOMMU/AGP aperture. Most of the options enabled by this can
be set more finegrained using the iommu= command line
- options. See Documentation/x86_64/boot-options.txt for more
+ options. See Documentation/x86/x86_64/boot-options.txt for more
details.
config IOMMU_STRESS
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 93e689f4bd8..bdb4d458ec8 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -129,7 +129,7 @@ start_sys_seg: .word SYSSEG # obsolete and meaningless, but just
type_of_loader: .byte 0 # 0 means ancient bootloader, newer
# bootloaders know to change this.
- # See Documentation/i386/boot.txt for
+ # See Documentation/x86/boot.txt for
# assigned ids
# flags, unused bits must be zero (RFU) bit within loadflags
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 22a0dc8e51d..058a35b8286 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -67,8 +67,8 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_X86_ACPI_CPUFREQ=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_DMAR=y
-# CONFIG_DMAR_DEFAULT_ON is not set
+CONFIG_INTEL_IOMMU=y
+# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_PCIEPORTBUS=y
CONFIG_PCCARD=y
CONFIG_YENTA=y
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 029f230ab63..63a2a03d7d5 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -8,7 +8,7 @@ struct dev_archdata {
#ifdef CONFIG_X86_64
struct dma_map_ops *dma_ops;
#endif
-#if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
+#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
#endif
};
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index d4c419f883a..ed3065fd631 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -2,7 +2,7 @@
#define _ASM_X86_DMA_MAPPING_H
/*
- * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
+ * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
* Documentation/DMA-API.txt for documentation.
*/
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 09199052060..eb92a6ed2be 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -119,7 +119,7 @@ struct irq_cfg {
cpumask_var_t old_domain;
u8 vector;
u8 move_in_progress : 1;
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
struct irq_2_iommu irq_2_iommu;
#endif
};
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
index 5df477ac3af..b80420bcd09 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/asm/hyperv.h
@@ -189,5 +189,6 @@
#define HV_STATUS_INVALID_HYPERCALL_CODE 2
#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
#define HV_STATUS_INVALID_ALIGNMENT 4
+#define HV_STATUS_INSUFFICIENT_BUFFERS 19
#endif
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 1c23360fb2d..47d99934580 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -3,7 +3,8 @@
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
+static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
static inline void prepare_irte(struct irte *irte, int vector,
unsigned int dest)
{
@@ -36,6 +37,9 @@ static inline bool irq_remapped(struct irq_cfg *cfg)
{
return false;
}
+static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+}
#endif
#endif /* _ASM_X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 7ff4669580c..c34f96c2f7a 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -12,6 +12,7 @@
#include <asm/pgtable.h>
#include <xen/interface/xen.h>
+#include <xen/grant_table.h>
#include <xen/features.h>
/* Xen machine address */
@@ -48,14 +49,11 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
extern int m2p_add_override(unsigned long mfn, struct page *page,
- bool clear_pte);
+ struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, bool clear_pte);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
-#ifdef CONFIG_XEN_DEBUG_FS
-extern int p2m_dump_show(struct seq_file *m, void *v);
-#endif
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 8a439d364b9..b1e7c7f7a0a 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -5,7 +5,7 @@
* This allows to use PCI devices that only support 32bit addresses on systems
* with more than 4GB.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
+ * See Documentation/DMA-API-HOWTO.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU General Public License v2 only.
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 52fa56399a5..a2fd72e0ab3 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1437,27 +1437,21 @@ void enable_x2apic(void)
int __init enable_IR(void)
{
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
if (!intr_remapping_supported()) {
pr_debug("intr-remapping not supported\n");
- return 0;
+ return -1;
}
if (!x2apic_preenabled && skip_ioapic_setup) {
pr_info("Skipped enabling intr-remap because of skipping "
"io-apic setup\n");
- return 0;
+ return -1;
}
- if (enable_intr_remapping(x2apic_supported()))
- return 0;
-
- pr_info("Enabled Interrupt-remapping\n");
-
- return 1;
-
+ return enable_intr_remapping();
#endif
- return 0;
+ return -1;
}
void __init enable_IR_x2apic(void)
@@ -1481,11 +1475,11 @@ void __init enable_IR_x2apic(void)
mask_ioapic_entries();
if (dmar_table_init_ret)
- ret = 0;
+ ret = -1;
else
ret = enable_IR();
- if (!ret) {
+ if (ret < 0) {
/* IR is required if there is APIC ID > 255 even when running
* under KVM
*/
@@ -1499,6 +1493,9 @@ void __init enable_IR_x2apic(void)
x2apic_force_phys();
}
+ if (ret == IRQ_REMAP_XAPIC_MODE)
+ goto nox2apic;
+
x2apic_enabled = 1;
if (x2apic_supported() && !x2apic_mode) {
@@ -1508,19 +1505,21 @@ void __init enable_IR_x2apic(void)
}
nox2apic:
- if (!ret) /* IR enabling failed */
+ if (ret < 0) /* IR enabling failed */
restore_ioapic_entries();
legacy_pic->restore_mask();
local_irq_restore(flags);
out:
- if (x2apic_enabled)
+ if (x2apic_enabled || !x2apic_supported())
return;
if (x2apic_preenabled)
panic("x2apic: enabled by BIOS but kernel init failed.");
- else if (cpu_has_x2apic)
- pr_info("Not enabling x2apic, Intr-remapping init failed.\n");
+ else if (ret == IRQ_REMAP_XAPIC_MODE)
+ pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
+ else if (ret < 0)
+ pr_info("x2apic not enabled, IRQ remapping init failed\n");
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8eb863e27ea..229e19f3eb5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -394,13 +394,21 @@ union entry_union {
struct IO_APIC_route_entry entry;
};
+static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
+{
+ union entry_union eu;
+
+ eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
+ eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
+ return eu.entry;
+}
+
static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
union entry_union eu;
unsigned long flags;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
- eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
+ eu.entry = __ioapic_read_entry(apic, pin);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return eu.entry;
}
@@ -529,18 +537,6 @@ static void io_apic_modify_irq(struct irq_cfg *cfg,
__io_apic_modify_irq(entry, mask_and, mask_or, final);
}
-static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
-{
- __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
- IO_APIC_REDIR_MASKED, NULL);
-}
-
-static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
-{
- __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
- IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
-}
-
static void io_apic_sync(struct irq_pin_list *entry)
{
/*
@@ -585,6 +581,66 @@ static void unmask_ioapic_irq(struct irq_data *data)
unmask_ioapic(data->chip_data);
}
+/*
+ * IO-APIC versions below 0x20 don't support EOI register.
+ * For the record, here is the information about various versions:
+ * 0Xh 82489DX
+ * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
+ * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
+ * 30h-FFh Reserved
+ *
+ * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
+ * version as 0x2. This is an error with documentation and these ICH chips
+ * use io-apic's of version 0x20.
+ *
+ * For IO-APIC's with EOI register, we use that to do an explicit EOI.
+ * Otherwise, we simulate the EOI message manually by changing the trigger
+ * mode to edge and then back to level, with RTE being masked during this.
+ */
+static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
+{
+ if (mpc_ioapic_ver(apic) >= 0x20) {
+ /*
+ * Intr-remapping uses pin number as the virtual vector
+ * in the RTE. Actual vector is programmed in
+ * intr-remapping table entry. Hence for the io-apic
+ * EOI we use the pin number.
+ */
+ if (cfg && irq_remapped(cfg))
+ io_apic_eoi(apic, pin);
+ else
+ io_apic_eoi(apic, vector);
+ } else {
+ struct IO_APIC_route_entry entry, entry1;
+
+ entry = entry1 = __ioapic_read_entry(apic, pin);
+
+ /*
+ * Mask the entry and change the trigger mode to edge.
+ */
+ entry1.mask = 1;
+ entry1.trigger = IOAPIC_EDGE;
+
+ __ioapic_write_entry(apic, pin, entry1);
+
+ /*
+ * Restore the previous level triggered entry.
+ */
+ __ioapic_write_entry(apic, pin, entry);
+ }
+}
+
+static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
+{
+ struct irq_pin_list *entry;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ for_each_irq_pin(entry, cfg->irq_2_pin)
+ __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
{
struct IO_APIC_route_entry entry;
@@ -593,10 +649,44 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
entry = ioapic_read_entry(apic, pin);
if (entry.delivery_mode == dest_SMI)
return;
+
+ /*
+ * Make sure the entry is masked and re-read the contents to check
+ * if it is a level triggered pin and if the remote-IRR is set.
+ */
+ if (!entry.mask) {
+ entry.mask = 1;
+ ioapic_write_entry(apic, pin, entry);
+ entry = ioapic_read_entry(apic, pin);
+ }
+
+ if (entry.irr) {
+ unsigned long flags;
+
+ /*
+ * Make sure the trigger mode is set to level. Explicit EOI
+ * doesn't clear the remote-IRR if the trigger mode is not
+ * set to level.
+ */
+ if (!entry.trigger) {
+ entry.trigger = IOAPIC_LEVEL;
+ ioapic_write_entry(apic, pin, entry);
+ }
+
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ __eoi_ioapic_pin(apic, pin, entry.vector, NULL);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
/*
- * Disable it in the IO-APIC irq-routing table:
+ * Clear the rest of the bits in the IO-APIC RTE except for the mask
+ * bit.
*/
ioapic_mask_entry(apic, pin);
+ entry = ioapic_read_entry(apic, pin);
+ if (entry.irr)
+ printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n",
+ mpc_ioapic_id(apic), pin);
}
static void clear_IO_APIC (void)
@@ -1202,7 +1292,6 @@ void __setup_vector_irq(int cpu)
}
static struct irq_chip ioapic_chip;
-static struct irq_chip ir_ioapic_chip;
#ifdef CONFIG_X86_32
static inline int IO_APIC_irq_trigger(int irq)
@@ -1246,7 +1335,7 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
if (irq_remapped(cfg)) {
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- chip = &ir_ioapic_chip;
+ irq_remap_modify_chip_defaults(chip);
fasteoi = trigger != 0;
}
@@ -2255,7 +2344,7 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
@@ -2267,6 +2356,9 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
* updated vector information), by using a virtual vector (io-apic pin number).
* Real vector that is used for interrupting cpu will be coming from
* the interrupt-remapping table entry.
+ *
+ * As the migration is a simple atomic update of IRTE, the same mechanism
+ * is used to migrate MSI irq's in the presence of interrupt-remapping.
*/
static int
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
@@ -2291,10 +2383,16 @@ ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
irte.dest_id = IRTE_DEST(dest);
/*
- * Modified the IRTE and flushes the Interrupt entry cache.
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
*/
modify_irte(irq, &irte);
+ /*
+ * After this point, all the interrupts will start arriving
+ * at the new destination. So, time to cleanup the previous
+ * vector allocation.
+ */
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
@@ -2407,48 +2505,6 @@ static void ack_apic_edge(struct irq_data *data)
atomic_t irq_mis_count;
-/*
- * IO-APIC versions below 0x20 don't support EOI register.
- * For the record, here is the information about various versions:
- * 0Xh 82489DX
- * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
- * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
- * 30h-FFh Reserved
- *
- * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
- * version as 0x2. This is an error with documentation and these ICH chips
- * use io-apic's of version 0x20.
- *
- * For IO-APIC's with EOI register, we use that to do an explicit EOI.
- * Otherwise, we simulate the EOI message manually by changing the trigger
- * mode to edge and then back to level, with RTE being masked during this.
-*/
-static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
-{
- struct irq_pin_list *entry;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- for_each_irq_pin(entry, cfg->irq_2_pin) {
- if (mpc_ioapic_ver(entry->apic) >= 0x20) {
- /*
- * Intr-remapping uses pin number as the virtual vector
- * in the RTE. Actual vector is programmed in
- * intr-remapping table entry. Hence for the io-apic
- * EOI we use the pin number.
- */
- if (irq_remapped(cfg))
- io_apic_eoi(entry->apic, entry->pin);
- else
- io_apic_eoi(entry->apic, cfg->vector);
- } else {
- __mask_and_edge_IO_APIC_irq(entry);
- __unmask_and_level_IO_APIC_irq(entry);
- }
- }
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
static void ack_apic_level(struct irq_data *data)
{
struct irq_cfg *cfg = data->chip_data;
@@ -2552,7 +2608,7 @@ static void ack_apic_level(struct irq_data *data)
}
}
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
static void ir_ack_apic_edge(struct irq_data *data)
{
ack_APIC_irq();
@@ -2563,7 +2619,23 @@ static void ir_ack_apic_level(struct irq_data *data)
ack_APIC_irq();
eoi_ioapic_irq(data->irq, data->chip_data);
}
-#endif /* CONFIG_INTR_REMAP */
+
+static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
+{
+ seq_printf(p, " IR-%s", data->chip->name);
+}
+
+static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+ chip->irq_print_chip = ir_print_prefix;
+ chip->irq_ack = ir_ack_apic_edge;
+ chip->irq_eoi = ir_ack_apic_level;
+
+#ifdef CONFIG_SMP
+ chip->irq_set_affinity = ir_ioapic_set_affinity;
+#endif
+}
+#endif /* CONFIG_IRQ_REMAP */
static struct irq_chip ioapic_chip __read_mostly = {
.name = "IO-APIC",
@@ -2578,21 +2650,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_retrigger = ioapic_retrigger_irq,
};
-static struct irq_chip ir_ioapic_chip __read_mostly = {
- .name = "IR-IO-APIC",
- .irq_startup = startup_ioapic_irq,
- .irq_mask = mask_ioapic_irq,
- .irq_unmask = unmask_ioapic_irq,
-#ifdef CONFIG_INTR_REMAP
- .irq_ack = ir_ack_apic_edge,
- .irq_eoi = ir_ack_apic_level,
-#ifdef CONFIG_SMP
- .irq_set_affinity = ir_ioapic_set_affinity,
-#endif
-#endif
- .irq_retrigger = ioapic_retrigger_irq,
-};
-
static inline void init_IO_APIC_traps(void)
{
struct irq_cfg *cfg;
@@ -3144,45 +3201,6 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
return 0;
}
-#ifdef CONFIG_INTR_REMAP
-/*
- * Migrate the MSI irq to another cpumask. This migration is
- * done in the process context using interrupt-remapping hardware.
- */
-static int
-ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int dest, irq = data->irq;
- struct irte irte;
-
- if (get_irte(irq, &irte))
- return -1;
-
- if (__ioapic_set_affinity(data, mask, &dest))
- return -1;
-
- irte.vector = cfg->vector;
- irte.dest_id = IRTE_DEST(dest);
-
- /*
- * atomically update the IRTE with the new destination and vector.
- */
- modify_irte(irq, &irte);
-
- /*
- * After this point, all the interrupts will start arriving
- * at the new destination. So, time to cleanup the previous
- * vector allocation.
- */
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
-
- return 0;
-}
-
-#endif
#endif /* CONFIG_SMP */
/*
@@ -3200,19 +3218,6 @@ static struct irq_chip msi_chip = {
.irq_retrigger = ioapic_retrigger_irq,
};
-static struct irq_chip msi_ir_chip = {
- .name = "IR-PCI-MSI",
- .irq_unmask = unmask_msi_irq,
- .irq_mask = mask_msi_irq,
-#ifdef CONFIG_INTR_REMAP
- .irq_ack = ir_ack_apic_edge,
-#ifdef CONFIG_SMP
- .irq_set_affinity = ir_msi_set_affinity,
-#endif
-#endif
- .irq_retrigger = ioapic_retrigger_irq,
-};
-
/*
* Map the PCI dev to the corresponding remapping hardware unit
* and allocate 'nvec' consecutive interrupt-remapping table entries
@@ -3255,7 +3260,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
if (irq_remapped(irq_get_chip_data(irq))) {
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- chip = &msi_ir_chip;
+ irq_remap_modify_chip_defaults(chip);
}
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
@@ -3328,7 +3333,7 @@ void native_teardown_msi_irq(unsigned int irq)
destroy_irq(irq);
}
-#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
+#ifdef CONFIG_DMAR_TABLE
#ifdef CONFIG_SMP
static int
dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
@@ -3409,19 +3414,6 @@ static int hpet_msi_set_affinity(struct irq_data *data,
#endif /* CONFIG_SMP */
-static struct irq_chip ir_hpet_msi_type = {
- .name = "IR-HPET_MSI",
- .irq_unmask = hpet_msi_unmask,
- .irq_mask = hpet_msi_mask,
-#ifdef CONFIG_INTR_REMAP
- .irq_ack = ir_ack_apic_edge,
-#ifdef CONFIG_SMP
- .irq_set_affinity = ir_msi_set_affinity,
-#endif
-#endif
- .irq_retrigger = ioapic_retrigger_irq,
-};
-
static struct irq_chip hpet_msi_type = {
.name = "HPET_MSI",
.irq_unmask = hpet_msi_unmask,
@@ -3458,7 +3450,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
hpet_msi_write(irq_get_handler_data(irq), &msg);
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
if (irq_remapped(irq_get_chip_data(irq)))
- chip = &ir_hpet_msi_type;
+ irq_remap_modify_chip_defaults(chip);
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
return 0;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 0371c484bb8..a46bd383953 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -249,8 +249,6 @@ extern int (*console_blank_hook)(int);
#define APM_MINOR_DEV 134
/*
- * See Documentation/Config.help for the configuration options.
- *
* Various options can be changed at boot time as follows:
* (We allow underscores for compatibility with the modules code)
* apm=on/off enable/disable APM
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 8694ef56459..38e49bc95ff 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
* cmci_discover_lock protects against parallel discovery attempts
* which could race against each other.
*/
-static DEFINE_SPINLOCK(cmci_discover_lock);
+static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
#define CMCI_THRESHOLD 1
@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int boot)
int hdr = 0;
int i;
- spin_lock_irqsave(&cmci_discover_lock, flags);
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
for (i = 0; i < banks; i++) {
u64 val;
@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int boot)
WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
}
}
- spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
if (hdr)
printk(KERN_CONT "\n");
}
@@ -150,7 +150,7 @@ void cmci_clear(void)
if (!cmci_supported(&banks))
return;
- spin_lock_irqsave(&cmci_discover_lock, flags);
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
for (i = 0; i < banks; i++) {
if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
continue;
@@ -160,7 +160,7 @@ void cmci_clear(void)
wrmsrl(MSR_IA32_MCx_CTL2(i), val);
__clear_bit(i, __get_cpu_var(mce_banks_owned));
}
- spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
/*
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index c0ed3d9c3b2..7da647d8b64 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -75,10 +75,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* Undefined/reserved opcodes, conditional jump, Opcode Extension
* Groups, and some special opcodes can not boost.
- * This is volatile to keep gcc from statically optimizing it out, as
- * variable_test_bit makes gcc think only *(unsigned long*) is used.
+ * This is non-const and volatile to keep gcc from statically
+ * optimizing it out, as variable_test_bit makes gcc think only
+ * *(unsigned long*) is used.
*/
-static volatile const u32 twobyte_is_boostable[256 / 32] = {
+static volatile u32 twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index b49d00da2ae..622872054fb 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -117,8 +117,8 @@ again:
}
/*
- * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
- * documentation.
+ * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
+ * parameter documentation.
*/
static __init int iommu_setup(char *p)
{
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 46ff054ebaa..795b79f984c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(start_thread);
/*
- * switch_to(x,yn) should switch tasks from x to y.
+ * switch_to(x,y) should switch tasks from x to y.
*
* We fsave/fwait so that an exception goes off at the right time
* (as a call from the fsave or fwait in effect) rather than to
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 18ae83dd1cd..b56c65de384 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
static int __init vsyscall_setup(char *str)
{
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 30326443ab8..87488b93a65 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
-
- good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
+ good_end = max_pfn_mapped << PAGE_SHIFT;
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 67421f38a21..de54b9b278a 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <linux/version.h>
#include <linux/kallsyms.h>
#include <asm/pgtable.h>
#include <linux/mmiotrace.h>
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index c04dc145a4b..75f9528e037 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -344,10 +344,10 @@ static void nmi_cpu_setup(void *dummy)
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
nmi_cpu_save_registers(msrs);
- spin_lock(&oprofilefs_lock);
+ raw_spin_lock(&oprofilefs_lock);
model->setup_ctrs(model, msrs);
nmi_cpu_setup_mux(cpu, msrs);
- spin_unlock(&oprofilefs_lock);
+ raw_spin_unlock(&oprofilefs_lock);
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 039d91315bc..404f21a3ff9 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
},
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
+ /* 2006 AMD HT/VIA system with two host bridges */
+ {
+ .callback = set_use_crs,
+ .ident = "ASUS M2V-MX SE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ },
{}
};
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1017c7bee38..492ade8c978 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -175,8 +175,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
"pcifront-msi-x" :
"pcifront-msi",
DOMID_SELF);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto free;
+ }
i++;
}
kfree(v);
@@ -221,8 +223,10 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (msg.data != XEN_PIRQ_MSI_DATA ||
xen_irq_from_pirq(pirq) < 0) {
pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0)
+ if (pirq < 0) {
+ irq = -ENODEV;
goto error;
+ }
xen_msi_compose_msg(dev, pirq, &msg);
__write_msi_msg(msidesc, &msg);
dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
@@ -244,10 +248,12 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
error:
dev_err(&dev->dev,
"Xen PCI frontend has not registered MSI/MSI-X support!\n");
- return -ENODEV;
+ return irq;
}
#ifdef CONFIG_XEN_DOM0
+static bool __read_mostly pci_seg_supported = true;
+
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int ret = 0;
@@ -265,10 +271,11 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
memset(&map_irq, 0, sizeof(map_irq));
map_irq.domid = domid;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
map_irq.index = -1;
map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
+ map_irq.bus = dev->bus->number |
+ (pci_domain_nr(dev->bus) << 16);
map_irq.devfn = dev->devfn;
if (type == PCI_CAP_ID_MSIX) {
@@ -285,7 +292,20 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
}
- ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ ret = -EINVAL;
+ if (pci_seg_supported)
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+ &map_irq);
+ if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
+ map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.index = -1;
+ map_irq.pirq = -1;
+ map_irq.bus = dev->bus->number;
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+ &map_irq);
+ if (ret != -EINVAL)
+ pci_seg_supported = false;
+ }
if (ret) {
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
ret, domid);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 58425adc22c..e6379526675 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -14,6 +14,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
#include <linux/sfi.h>
#include <linux/intel_pmic_gpio.h>
#include <linux/spi/spi.h>
@@ -392,6 +394,7 @@ static void __init *max3111_platform_data(void *info)
struct spi_board_info *spi_info = info;
int intr = get_gpio_by_name("max3111_int");
+ spi_info->mode = SPI_MODE_0;
if (intr == -1)
return NULL;
spi_info->irq = intr + MRST_IRQ_OFFSET;
@@ -678,38 +681,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
pentry = (struct sfi_device_table_entry *)sb->pentry;
for (i = 0; i < num; i++, pentry++) {
- if (pentry->irq != (u8)0xff) { /* native RTE case */
+ int irq = pentry->irq;
+
+ if (irq != (u8)0xff) { /* native RTE case */
/* these SPI2 devices are not exposed to system as PCI
* devices, but they have separate RTE entry in IOAPIC
* so we have to enable them one by one here
*/
- ioapic = mp_find_ioapic(pentry->irq);
+ ioapic = mp_find_ioapic(irq);
irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = pentry->irq;
+ irq_attr.ioapic_pin = irq;
irq_attr.trigger = 1;
irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
} else
- pentry->irq = 0; /* No irq */
+ irq = 0; /* No irq */
switch (pentry->type) {
case SFI_DEV_TYPE_IPC:
/* ID as IRQ is a hack that will go away */
- pdev = platform_device_alloc(pentry->name, pentry->irq);
+ pdev = platform_device_alloc(pentry->name, irq);
if (pdev == NULL) {
pr_err("out of memory for SFI platform device '%s'.\n",
pentry->name);
continue;
}
- install_irq_resource(pdev, pentry->irq);
+ install_irq_resource(pdev, irq);
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
- "irq = 0x%2x\n", i, pentry->name, pentry->irq);
+ "irq = 0x%2x\n", i, pentry->name, irq);
sfi_handle_ipc_dev(pdev);
break;
case SFI_DEV_TYPE_SPI:
memset(&spi_info, 0, sizeof(spi_info));
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
- spi_info.irq = pentry->irq;
+ spi_info.irq = irq;
spi_info.bus_num = pentry->host_num;
spi_info.chip_select = pentry->addr;
spi_info.max_speed_hz = pentry->max_freq;
@@ -726,7 +731,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
memset(&i2c_info, 0, sizeof(i2c_info));
bus = pentry->host_num;
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
- i2c_info.irq = pentry->irq;
+ i2c_info.irq = irq;
i2c_info.addr = pentry->addr;
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
"irq = 0x%2x, addr = 0x%x\n", i, bus,
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 5cc821cb2e0..26c731a106a 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -25,8 +25,7 @@ config XEN_PRIVILEGED_GUEST
config XEN_PVHVM
def_bool y
- depends on XEN
- depends on X86_LOCAL_APIC
+ depends on XEN && PCI && X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY
int
@@ -49,11 +48,3 @@ config XEN_DEBUG_FS
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
-
-config XEN_DEBUG
- bool "Enable Xen debug checks"
- depends on XEN
- default n
- help
- Enable various WARN_ON checks in the Xen MMU code.
- Enabling this option WILL incur a significant performance overhead.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2d69617950f..da8afd576a6 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -251,6 +251,7 @@ static void __init xen_init_cpuid_mask(void)
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
ax = 1;
+ cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
xsave_mask =
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3dd53f997b1..87f6673b120 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -495,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
-#ifdef CONFIG_XEN_DEBUG
-pte_t xen_make_pte_debug(pteval_t pte)
-{
- phys_addr_t addr = (pte & PTE_PFN_MASK);
- phys_addr_t other_addr;
- bool io_page = false;
- pte_t _pte;
-
- if (pte & _PAGE_IOMAP)
- io_page = true;
-
- _pte = xen_make_pte(pte);
-
- if (!addr)
- return _pte;
-
- if (io_page &&
- (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
- other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
- WARN_ONCE(addr != other_addr,
- "0x%lx is using VM_IO, but it is 0x%lx!\n",
- (unsigned long)addr, (unsigned long)other_addr);
- } else {
- pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
- other_addr = (_pte.pte & PTE_PFN_MASK);
- WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
- "0x%lx is missing VM_IO (and wasn't fixed)!\n",
- (unsigned long)addr);
- }
-
- return _pte;
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
-#endif
-
static pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
@@ -1992,9 +1957,6 @@ void __init xen_ident_map_ISA(void)
static void __init xen_post_allocator_init(void)
{
-#ifdef CONFIG_XEN_DEBUG
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
-#endif
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2404,17 +2366,3 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
-
-#ifdef CONFIG_XEN_DEBUG_FS
-static int p2m_dump_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, p2m_dump_show, NULL);
-}
-
-static const struct file_operations p2m_dump_fops = {
- .open = p2m_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 58efeb9d544..1b267e75158 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -161,7 +161,9 @@
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
+#include <xen/grant_table.h>
+#include "multicalls.h"
#include "xen-ops.h"
static void __init m2p_override_init(void);
@@ -676,7 +678,8 @@ static unsigned long mfn_hash(unsigned long mfn)
}
/* Add an MFN override for a particular page */
-int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
+int m2p_add_override(unsigned long mfn, struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long pfn;
@@ -692,16 +695,28 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
-
- page->private = mfn;
+ WARN_ON(PagePrivate(page));
+ SetPagePrivate(page);
+ set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
- if (clear_pte && !PageHighMem(page))
- /* Just zap old mapping for now */
- pte_clear(&init_mm, address, ptep);
+ if (kmap_op != NULL) {
+ if (!PageHighMem(page)) {
+ struct multicall_space mcs =
+ xen_mc_entry(sizeof(*kmap_op));
+
+ MULTI_grant_table_op(mcs.mc,
+ GNTTABOP_map_grant_ref, kmap_op, 1);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ }
+ /* let's use dev_bus_addr to record the old mfn instead */
+ kmap_op->dev_bus_addr = page->index;
+ page->index = (unsigned long) kmap_op;
+ }
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags);
@@ -735,13 +750,56 @@ int m2p_remove_override(struct page *page, bool clear_pte)
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
- set_phys_to_machine(pfn, page->index);
+ WARN_ON(!PagePrivate(page));
+ ClearPagePrivate(page);
- if (clear_pte && !PageHighMem(page))
- set_pte_at(&init_mm, address, ptep,
- pfn_pte(pfn, PAGE_KERNEL));
- /* No tlb flush necessary because the caller already
- * left the pte unmapped. */
+ if (clear_pte) {
+ struct gnttab_map_grant_ref *map_op =
+ (struct gnttab_map_grant_ref *) page->index;
+ set_phys_to_machine(pfn, map_op->dev_bus_addr);
+ if (!PageHighMem(page)) {
+ struct multicall_space mcs;
+ struct gnttab_unmap_grant_ref *unmap_op;
+
+ /*
+ * It might be that we queued all the m2p grant table
+ * hypercalls in a multicall, then m2p_remove_override
+ * get called before the multicall has actually been
+ * issued. In this case handle is going to -1 because
+ * it hasn't been modified yet.
+ */
+ if (map_op->handle == -1)
+ xen_mc_flush();
+ /*
+ * Now if map_op->handle is negative it means that the
+ * hypercall actually returned an error.
+ */
+ if (map_op->handle == GNTST_general_error) {
+ printk(KERN_WARNING "m2p_remove_override: "
+ "pfn %lx mfn %lx, failed to modify kernel mappings",
+ pfn, mfn);
+ return -1;
+ }
+
+ mcs = xen_mc_entry(
+ sizeof(struct gnttab_unmap_grant_ref));
+ unmap_op = mcs.args;
+ unmap_op->host_addr = map_op->host_addr;
+ unmap_op->handle = map_op->handle;
+ unmap_op->dev_bus_addr = 0;
+
+ MULTI_grant_table_op(mcs.mc,
+ GNTTABOP_unmap_grant_ref, unmap_op, 1);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ set_pte_at(&init_mm, address, ptep,
+ pfn_pte(pfn, PAGE_KERNEL));
+ __flush_tlb_single(address);
+ map_op->host_addr = 0;
+ }
+ } else
+ set_phys_to_machine(pfn, page->index);
return 0;
}
@@ -758,7 +816,7 @@ struct page *m2p_find_override(unsigned long mfn)
spin_lock_irqsave(&m2p_override_lock, flags);
list_for_each_entry(p, bucket, lru) {
- if (p->private == mfn) {
+ if (page_private(p) == mfn) {
ret = p;
break;
}
@@ -782,17 +840,21 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
#ifdef CONFIG_XEN_DEBUG_FS
-
-int p2m_dump_show(struct seq_file *m, void *v)
+#include <linux/debugfs.h>
+#include "debugfs.h"
+static int p2m_dump_show(struct seq_file *m, void *v)
{
static const char * const level_name[] = { "top", "middle",
- "entry", "abnormal" };
- static const char * const type_name[] = { "identity", "missing",
- "pfn", "abnormal"};
+ "entry", "abnormal", "error"};
#define TYPE_IDENTITY 0
#define TYPE_MISSING 1
#define TYPE_PFN 2
#define TYPE_UNKNOWN 3
+ static const char * const type_name[] = {
+ [TYPE_IDENTITY] = "identity",
+ [TYPE_MISSING] = "missing",
+ [TYPE_PFN] = "pfn",
+ [TYPE_UNKNOWN] = "abnormal"};
unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
unsigned int uninitialized_var(prev_level);
unsigned int uninitialized_var(prev_type);
@@ -856,4 +918,32 @@ int p2m_dump_show(struct seq_file *m, void *v)
#undef TYPE_PFN
#undef TYPE_UNKNOWN
}
-#endif
+
+static int p2m_dump_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, p2m_dump_show, NULL);
+}
+
+static const struct file_operations p2m_dump_fops = {
+ .open = p2m_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *d_mmu_debug;
+
+static int __init xen_p2m_debugfs(void)
+{
+ struct dentry *d_xen = xen_init_debugfs();
+
+ if (d_xen == NULL)
+ return -ENOMEM;
+
+ d_mmu_debug = debugfs_create_dir("mmu", d_xen);
+
+ debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
+ return 0;
+}
+fs_initcall(xen_p2m_debugfs);
+#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 46d6d21dbdb..38d0af4fefe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -37,7 +37,10 @@ extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */
-phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+
+/* Number of pages released from the initial allocation. */
+unsigned long xen_released_pages;
/*
* The maximum amount of extra memory compared to the base size. The
@@ -51,48 +54,47 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
*/
#define EXTRA_MEM_RATIO (10)
-static void __init xen_add_extra_mem(unsigned long pages)
+static void __init xen_add_extra_mem(u64 start, u64 size)
{
unsigned long pfn;
+ int i;
- u64 size = (u64)pages * PAGE_SIZE;
- u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
-
- if (!pages)
- return;
-
- e820_add_region(extra_start, size, E820_RAM);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-
- memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ /* Add new region. */
+ if (xen_extra_mem[i].size == 0) {
+ xen_extra_mem[i].start = start;
+ xen_extra_mem[i].size = size;
+ break;
+ }
+ /* Append to existing region. */
+ if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
+ xen_extra_mem[i].size += size;
+ break;
+ }
+ }
+ if (i == XEN_EXTRA_MEM_MAX_REGIONS)
+ printk(KERN_WARNING "Warning: not enough extra memory regions\n");
- xen_extra_mem_size += size;
+ memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
- xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+ xen_max_p2m_pfn = PFN_DOWN(start + size);
- for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
+ for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
- phys_addr_t end_addr)
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
- unsigned long start, end;
unsigned long len = 0;
unsigned long pfn;
int ret;
- start = PFN_UP(start_addr);
- end = PFN_DOWN(end_addr);
-
- if (end <= start)
- return 0;
-
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -117,72 +119,52 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
return len;
}
-static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
- const struct e820map *e820)
+static unsigned long __init xen_set_identity_and_release(
+ const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
- phys_addr_t max_addr = PFN_PHYS(max_pfn);
- phys_addr_t last_end = ISA_END_ADDRESS;
+ phys_addr_t start = 0;
unsigned long released = 0;
- int i;
-
- /* Free any unused memory above the low 1Mbyte. */
- for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
- phys_addr_t end = e820->map[i].addr;
- end = min(max_addr, end);
-
- if (last_end < end)
- released += xen_release_chunk(last_end, end);
- last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
- }
-
- if (last_end < max_addr)
- released += xen_release_chunk(last_end, max_addr);
-
- printk(KERN_INFO "released %lu pages of unused memory\n", released);
- return released;
-}
-
-static unsigned long __init xen_set_identity(const struct e820entry *list,
- ssize_t map_size)
-{
- phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
- phys_addr_t start_pci = last;
- const struct e820entry *entry;
unsigned long identity = 0;
+ const struct e820entry *entry;
int i;
+ /*
+ * Combine non-RAM regions and gaps until a RAM region (or the
+ * end of the map) is reached, then set the 1:1 map and
+ * release the pages (if available) in those non-RAM regions.
+ *
+ * The combined non-RAM regions are rounded to a whole number
+ * of pages so any partial pages are accessible via the 1:1
+ * mapping. This is needed for some BIOSes that put (for
+ * example) the DMI tables in a reserved region that begins on
+ * a non-page boundary.
+ */
for (i = 0, entry = list; i < map_size; i++, entry++) {
- phys_addr_t start = entry->addr;
- phys_addr_t end = start + entry->size;
+ phys_addr_t end = entry->addr + entry->size;
- if (start < last)
- start = last;
+ if (entry->type == E820_RAM || i == map_size - 1) {
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long end_pfn = PFN_UP(end);
- if (end <= start)
- continue;
+ if (entry->type == E820_RAM)
+ end_pfn = PFN_UP(entry->addr);
- /* Skip over the 1MB region. */
- if (last > end)
- continue;
+ if (start_pfn < end_pfn) {
+ if (start_pfn < nr_pages)
+ released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
- if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
- if (start > start_pci)
identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(start));
-
- /* Without saving 'last' we would gooble RAM too
- * at the end of the loop. */
- last = end;
- start_pci = end;
- continue;
+ start_pfn, end_pfn);
+ }
+ start = end;
}
- start_pci = min(start, start_pci);
- last = end;
}
- if (last > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(last));
- return identity;
+
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+
+ return released;
}
static unsigned long __init xen_get_max_pages(void)
@@ -197,21 +179,32 @@ static unsigned long __init xen_get_max_pages(void)
return min(max_pages, MAX_DOMAIN_PAGES);
}
+static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+{
+ u64 end = start + size;
+
+ /* Align RAM regions to page boundaries. */
+ if (type == E820_RAM) {
+ start = PAGE_ALIGN(start);
+ end &= ~((u64)PAGE_SIZE - 1);
+ }
+
+ e820_add_region(start, end - start, type);
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
- static struct e820entry map_raw[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
int rc;
struct xen_memory_map memmap;
+ unsigned long max_pages;
unsigned long extra_pages = 0;
- unsigned long extra_limit;
- unsigned long identity_pages = 0;
int i;
int op;
@@ -237,58 +230,65 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
- memcpy(map_raw, map, sizeof(map));
- e820.nr_map = 0;
- xen_extra_mem_start = mem_end;
- for (i = 0; i < memmap.nr_entries; i++) {
- unsigned long long end;
-
- /* Guard against non-page aligned E820 entries. */
- if (map[i].type == E820_RAM)
- map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
-
- end = map[i].addr + map[i].size;
- if (map[i].type == E820_RAM && end > mem_end) {
- /* RAM off the end - may be partially included */
- u64 delta = min(map[i].size, end - mem_end);
-
- map[i].size -= delta;
- end -= delta;
-
- extra_pages += PFN_DOWN(delta);
- /*
- * Set RAM below 4GB that is not for us to be unusable.
- * This prevents "System RAM" address space from being
- * used as potential resource for I/O address (happens
- * when 'allocate_resource' is called).
- */
- if (delta &&
- (xen_initial_domain() && end < 0x100000000ULL))
- e820_add_region(end, delta, E820_UNUSABLE);
+ /* Make sure the Xen-supplied memory map is well-ordered. */
+ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+
+ max_pages = xen_get_max_pages();
+ if (max_pages > max_pfn)
+ extra_pages += max_pages - max_pfn;
+
+ /*
+ * Set P2M for all non-RAM pages and E820 gaps to be identity
+ * type PFNs. Any RAM pages that would be made inaccesible by
+ * this are first released.
+ */
+ xen_released_pages = xen_set_identity_and_release(
+ map, memmap.nr_entries, max_pfn);
+ extra_pages += xen_released_pages;
+
+ /*
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+ * factor the base size. On non-highmem systems, the base
+ * size is the full initial memory allocation; on highmem it
+ * is limited to the max size of lowmem, so that it doesn't
+ * get completely filled.
+ *
+ * In principle there could be a problem in lowmem systems if
+ * the initial memory is also very large with respect to
+ * lowmem, but we won't try to deal with that here.
+ */
+ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ extra_pages);
+
+ i = 0;
+ while (i < memmap.nr_entries) {
+ u64 addr = map[i].addr;
+ u64 size = map[i].size;
+ u32 type = map[i].type;
+
+ if (type == E820_RAM) {
+ if (addr < mem_end) {
+ size = min(size, mem_end - addr);
+ } else if (extra_pages) {
+ size = min(size, (u64)extra_pages * PAGE_SIZE);
+ extra_pages -= size / PAGE_SIZE;
+ xen_add_extra_mem(addr, size);
+ } else
+ type = E820_UNUSABLE;
}
- if (map[i].size > 0 && end > xen_extra_mem_start)
- xen_extra_mem_start = end;
+ xen_align_and_add_e820_region(addr, size, type);
- /* Add region if any remains */
- if (map[i].size > 0)
- e820_add_region(map[i].addr, map[i].size, map[i].type);
+ map[i].addr += size;
+ map[i].size -= size;
+ if (map[i].size == 0)
+ i++;
}
- /* Align the balloon area so that max_low_pfn does not get set
- * to be at the _end_ of the PCI gap at the far end (fee01000).
- * Note that xen_extra_mem_start gets set in the loop above to be
- * past the last E820 region. */
- if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
- xen_extra_mem_start = (1ULL<<32);
/*
* In domU, the ISA region is normal, usable memory, but we
* reserve ISA memory anyway because too many things poke
* about in there.
- *
- * In Dom0, the host E820 information can leave gaps in the
- * ISA range, which would cause us to release those pages. To
- * avoid this, we unconditionally reserve them here.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
@@ -305,44 +305,6 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- extra_limit = xen_get_max_pages();
- if (max_pfn + extra_pages > extra_limit) {
- if (extra_limit > max_pfn)
- extra_pages = extra_limit - max_pfn;
- else
- extra_pages = 0;
- }
-
- extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
-
- /*
- * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
- * factor the base size. On non-highmem systems, the base
- * size is the full initial memory allocation; on highmem it
- * is limited to the max size of lowmem, so that it doesn't
- * get completely filled.
- *
- * In principle there could be a problem in lowmem systems if
- * the initial memory is also very large with respect to
- * lowmem, but we won't try to deal with that here.
- */
- extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
- max_pfn + extra_pages);
-
- if (extra_limit >= max_pfn)
- extra_pages = extra_limit - max_pfn;
- else
- extra_pages = 0;
-
- xen_add_extra_mem(extra_pages);
-
- /*
- * Set P2M for all non-RAM pages and E820 gaps to be identity
- * type PFNs. We supply it with the non-sanitized version
- * of the E820.
- */
- identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
return "Xen";
}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index f717e20d961..7dde2445642 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -633,7 +633,7 @@ static const struct net_device_ops iss_netdev_ops = {
.ndo_set_mac_address = iss_net_set_mac,
//.ndo_do_ioctl = iss_net_ioctl,
.ndo_tx_timeout = iss_net_tx_timeout,
- .ndo_set_multicast_list = iss_net_set_multicast_list,
+ .ndo_set_rx_mode = iss_net_set_multicast_list,
};
static int iss_net_configure(int index, char *init)
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index be442561693..7835b8fc94d 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
+ if (!ctx->gf128)
+ return -ENOKEY;
+
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
+ if (!ctx->gf128)
+ return -ENOKEY;
+
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 95b9e7eefad..b5e6f243f74 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -56,6 +56,8 @@ source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
+source "drivers/pinctrl/Kconfig"
+
source "drivers/gpio/Kconfig"
source "drivers/w1/Kconfig"
@@ -130,4 +132,8 @@ source "drivers/iommu/Kconfig"
source "drivers/virt/Kconfig"
+source "drivers/hv/Kconfig"
+
+source "drivers/devfreq/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7fa433a7030..1b3142127bf 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -5,6 +5,8 @@
# Rewritten to use lists instead of if-statements.
#
+# GPIO must come after pinctrl as gpios may need to mux pins etc
+obj-y += pinctrl/
obj-y += gpio/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
@@ -127,3 +129,6 @@ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
+obj-$(CONFIG_HYPERV) += hv/
+
+obj-$(CONFIG_PM_DEVFREQ) += devfreq/
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 431ab11c8c1..9b88f9828d8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -37,7 +37,7 @@
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
#include <linux/irqflags.h>
@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
}
static int c3_cpu_count;
-static DEFINE_SPINLOCK(c3_lock);
+static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
- spin_lock(&c3_lock);
+ raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
- spin_unlock(&c3_lock);
+ raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
- spin_lock(&c3_lock);
+ raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
- spin_unlock(&c3_lock);
+ raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 3ed80b2ca90..0e46faef1d3 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -390,6 +390,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW21E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-SR11M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
@@ -444,6 +452,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR26GN_P",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW520F",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 93071417315..956e9accb05 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1134,8 +1134,9 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
skb_headlen(skb));
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
- skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size);
+ skb_frag_page(&skb_shinfo(skb)->frags[i]) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]));
}
if (skb->len & 3)
put_dma(tx->index,eni_dev->dma,&j,zeroes,4-(skb->len & 3));
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index db06f34419c..1c052127548 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3416,27 +3416,28 @@ init_card(struct atm_dev *dev)
size = sizeof(struct vc_map *) * card->tct_size;
IPRINTK("%s: allocate %d byte for VC map.\n", card->name, size);
- if (NULL == (card->vcs = vmalloc(size))) {
+ card->vcs = vzalloc(size);
+ if (!card->vcs) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
}
- memset(card->vcs, 0, size);
size = sizeof(struct vc_map *) * card->scd_size;
IPRINTK("%s: allocate %d byte for SCD to VC mapping.\n",
card->name, size);
- if (NULL == (card->scd2vc = vmalloc(size))) {
+ card->scd2vc = vzalloc(size);
+ if (!card->scd2vc) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
}
- memset(card->scd2vc, 0, size);
size = sizeof(struct tst_info) * (card->tst_size - 2);
IPRINTK("%s: allocate %d byte for TST to VC mapping.\n",
card->name, size);
- if (NULL == (card->soft_tst = vmalloc(size))) {
+ card->soft_tst = vmalloc(size);
+ if (!card->soft_tst) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index cb90f7a3e07..3d0c2b0fed9 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -818,127 +818,152 @@ static void ia_hw_type(IADEV *iadev) {
}
-static void IaFrontEndIntr(IADEV *iadev) {
- volatile IA_SUNI *suni;
- volatile ia_mb25_t *mb25;
- volatile suni_pm7345_t *suni_pm7345;
-
- if(iadev->phy_type & FE_25MBIT_PHY) {
- mb25 = (ia_mb25_t*)iadev->phy;
- iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
- } else if (iadev->phy_type & FE_DS3_PHY) {
- suni_pm7345 = (suni_pm7345_t *)iadev->phy;
- /* clear FRMR interrupts */
- (void) suni_pm7345->suni_ds3_frm_intr_stat;
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
- } else if (iadev->phy_type & FE_E3_PHY ) {
- suni_pm7345 = (suni_pm7345_t *)iadev->phy;
- (void) suni_pm7345->suni_e3_frm_maint_intr_ind;
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
- }
- else {
- suni = (IA_SUNI *)iadev->phy;
- (void) suni->suni_rsop_status;
- iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
- }
- if (iadev->carrier_detect)
- printk("IA: SUNI carrier detected\n");
- else
- printk("IA: SUNI carrier lost signal\n");
- return;
+static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
+{
+ return readl(ia->phy + (reg >> 2));
+}
+
+static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
+{
+ writel(val, ia->phy + (reg >> 2));
+}
+
+static void ia_frontend_intr(struct iadev_priv *iadev)
+{
+ u32 status;
+
+ if (iadev->phy_type & FE_25MBIT_PHY) {
+ status = ia_phy_read32(iadev, MB25_INTR_STATUS);
+ iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
+ } else if (iadev->phy_type & FE_DS3_PHY) {
+ ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
+ status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
+ iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
+ } else if (iadev->phy_type & FE_E3_PHY) {
+ ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
+ status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
+ iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
+ } else {
+ status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
+ iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
+ }
+
+ printk(KERN_INFO "IA: SUNI carrier %s\n",
+ iadev->carrier_detect ? "detected" : "lost signal");
}
-static void ia_mb25_init (IADEV *iadev)
+static void ia_mb25_init(struct iadev_priv *iadev)
{
- volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
#if 0
mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
#endif
- mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
- mb25->mb25_diag_control = 0;
- /*
- * Initialize carrier detect state
- */
- iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
- return;
-}
+ ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
+ ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
+
+ iadev->carrier_detect =
+ (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
+}
-static void ia_suni_pm7345_init (IADEV *iadev)
+struct ia_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void ia_phy_write(struct iadev_priv *iadev,
+ const struct ia_reg *regs, int len)
{
- volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
- if (iadev->phy_type & FE_DS3_PHY)
- {
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
- suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
- suni_pm7345->suni_ds3_frm_cfg = 1;
- suni_pm7345->suni_ds3_tran_cfg = 1;
- suni_pm7345->suni_config = 0;
- suni_pm7345->suni_splr_cfg = 0;
- suni_pm7345->suni_splt_cfg = 0;
- }
- else
- {
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
- suni_pm7345->suni_e3_frm_fram_options = 0x4;
- suni_pm7345->suni_e3_frm_maint_options = 0x20;
- suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
- suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
- suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
- suni_pm7345->suni_e3_tran_fram_options = 0x1;
- suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
- suni_pm7345->suni_splr_cfg = 0x41;
- suni_pm7345->suni_splt_cfg = 0x41;
- }
- /*
- * Enable RSOP loss of signal interrupt.
- */
- suni_pm7345->suni_intr_enbl = 0x28;
-
- /*
- * Clear error counters
- */
- suni_pm7345->suni_id_reset = 0;
-
- /*
- * Clear "PMCTST" in master test register.
- */
- suni_pm7345->suni_master_test = 0;
-
- suni_pm7345->suni_rxcp_ctrl = 0x2c;
- suni_pm7345->suni_rxcp_fctrl = 0x81;
-
- suni_pm7345->suni_rxcp_idle_pat_h1 =
- suni_pm7345->suni_rxcp_idle_pat_h2 =
- suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
- suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
-
- suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
- suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
- suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
- suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
-
- suni_pm7345->suni_rxcp_cell_pat_h1 =
- suni_pm7345->suni_rxcp_cell_pat_h2 =
- suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
- suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
-
- suni_pm7345->suni_rxcp_cell_mask_h1 =
- suni_pm7345->suni_rxcp_cell_mask_h2 =
- suni_pm7345->suni_rxcp_cell_mask_h3 =
- suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
-
- suni_pm7345->suni_txcp_ctrl = 0xa4;
- suni_pm7345->suni_txcp_intr_en_sts = 0x10;
- suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
-
- suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
- SUNI_PM7345_CLB |
- SUNI_PM7345_DLB |
- SUNI_PM7345_PLB);
+ while (len--) {
+ ia_phy_write32(iadev, regs->reg, regs->val);
+ regs++;
+ }
+}
+
+static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
+{
+ static const struct ia_reg suni_ds3_init [] = {
+ { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
+ { SUNI_DS3_FRM_CFG, 0x01 },
+ { SUNI_DS3_TRAN_CFG, 0x01 },
+ { SUNI_CONFIG, 0 },
+ { SUNI_SPLR_CFG, 0 },
+ { SUNI_SPLT_CFG, 0 }
+ };
+ u32 status;
+
+ status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
+ iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
+
+ ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
+}
+
+static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
+{
+ static const struct ia_reg suni_e3_init [] = {
+ { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
+ { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
+ { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
+ { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
+ { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
+ { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
+ { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
+ { SUNI_SPLR_CFG, 0x41 },
+ { SUNI_SPLT_CFG, 0x41 }
+ };
+ u32 status;
+
+ status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
+ iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
+ ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
+}
+
+static void ia_suni_pm7345_init(struct iadev_priv *iadev)
+{
+ static const struct ia_reg suni_init [] = {
+ /* Enable RSOP loss of signal interrupt. */
+ { SUNI_INTR_ENBL, 0x28 },
+ /* Clear error counters. */
+ { SUNI_ID_RESET, 0 },
+ /* Clear "PMCTST" in master test register. */
+ { SUNI_MASTER_TEST, 0 },
+
+ { SUNI_RXCP_CTRL, 0x2c },
+ { SUNI_RXCP_FCTRL, 0x81 },
+
+ { SUNI_RXCP_IDLE_PAT_H1, 0 },
+ { SUNI_RXCP_IDLE_PAT_H2, 0 },
+ { SUNI_RXCP_IDLE_PAT_H3, 0 },
+ { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
+
+ { SUNI_RXCP_IDLE_MASK_H1, 0xff },
+ { SUNI_RXCP_IDLE_MASK_H2, 0xff },
+ { SUNI_RXCP_IDLE_MASK_H3, 0xff },
+ { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
+
+ { SUNI_RXCP_CELL_PAT_H1, 0 },
+ { SUNI_RXCP_CELL_PAT_H2, 0 },
+ { SUNI_RXCP_CELL_PAT_H3, 0 },
+ { SUNI_RXCP_CELL_PAT_H4, 0x01 },
+
+ { SUNI_RXCP_CELL_MASK_H1, 0xff },
+ { SUNI_RXCP_CELL_MASK_H2, 0xff },
+ { SUNI_RXCP_CELL_MASK_H3, 0xff },
+ { SUNI_RXCP_CELL_MASK_H4, 0xff },
+
+ { SUNI_TXCP_CTRL, 0xa4 },
+ { SUNI_TXCP_INTR_EN_STS, 0x10 },
+ { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
+ };
+
+ if (iadev->phy_type & FE_DS3_PHY)
+ ia_suni_pm7345_init_ds3(iadev);
+ else
+ ia_suni_pm7345_init_e3(iadev);
+
+ ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
+
+ ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
+ ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
+ SUNI_PM7345_DLB | SUNI_PM7345_PLB));
#ifdef __SNMP__
suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
#endif /* __SNMP__ */
@@ -1425,10 +1450,10 @@ static int rx_init(struct atm_dev *dev)
iadev->dma + IPHASE5575_RX_LIST_ADDR);
IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_TX_LIST_ADDR,
- *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
+ readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_RX_LIST_ADDR,
- *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
+ readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
writew(0, iadev->reass_reg+MODE_REG);
@@ -2208,7 +2233,7 @@ static irqreturn_t ia_int(int irq, void *dev_id)
if (status & STAT_DLERINT)
{
/* Clear this bit by writing a 1 to it. */
- *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
+ writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
rx_dle_intr(dev);
}
if (status & STAT_SEGINT)
@@ -2219,13 +2244,13 @@ static irqreturn_t ia_int(int irq, void *dev_id)
}
if (status & STAT_DLETINT)
{
- *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
+ writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
tx_dle_intr(dev);
}
if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
{
if (status & STAT_FEINT)
- IaFrontEndIntr(iadev);
+ ia_frontend_intr(iadev);
}
}
return IRQ_RETVAL(handled);
@@ -2556,7 +2581,7 @@ static int __devinit ia_start(struct atm_dev *dev)
goto err_free_rx;
}
/* Get iadev->carrier_detect status */
- IaFrontEndIntr(iadev);
+ ia_frontend_intr(iadev);
}
return 0;
@@ -2827,7 +2852,7 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
case 0xb:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- IaFrontEndIntr(iadev);
+ ia_frontend_intr(iadev);
break;
case 0xa:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 077735e0e04..6a0955e6d4f 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -889,79 +889,71 @@ typedef struct ia_rtn_q {
} IARTN_Q;
#define SUNI_LOSV 0x04
-typedef struct {
- u32 suni_master_reset; /* SUNI Master Reset and Identity */
- u32 suni_master_config; /* SUNI Master Configuration */
- u32 suni_master_intr_stat; /* SUNI Master Interrupt Status */
- u32 suni_reserved1; /* Reserved */
- u32 suni_master_clk_monitor;/* SUNI Master Clock Monitor */
- u32 suni_master_control; /* SUNI Master Clock Monitor */
- u32 suni_reserved2[10]; /* Reserved */
-
- u32 suni_rsop_control; /* RSOP Control/Interrupt Enable */
- u32 suni_rsop_status; /* RSOP Status/Interrupt States */
- u32 suni_rsop_section_bip8l;/* RSOP Section BIP-8 LSB */
- u32 suni_rsop_section_bip8m;/* RSOP Section BIP-8 MSB */
-
- u32 suni_tsop_control; /* TSOP Control */
- u32 suni_tsop_diag; /* TSOP Disgnostics */
- u32 suni_tsop_reserved[2]; /* TSOP Reserved */
-
- u32 suni_rlop_cs; /* RLOP Control/Status */
- u32 suni_rlop_intr; /* RLOP Interrupt Enable/Status */
- u32 suni_rlop_line_bip24l; /* RLOP Line BIP-24 LSB */
- u32 suni_rlop_line_bip24; /* RLOP Line BIP-24 */
- u32 suni_rlop_line_bip24m; /* RLOP Line BIP-24 MSB */
- u32 suni_rlop_line_febel; /* RLOP Line FEBE LSB */
- u32 suni_rlop_line_febe; /* RLOP Line FEBE */
- u32 suni_rlop_line_febem; /* RLOP Line FEBE MSB */
-
- u32 suni_tlop_control; /* TLOP Control */
- u32 suni_tlop_disg; /* TLOP Disgnostics */
- u32 suni_tlop_reserved[14]; /* TLOP Reserved */
-
- u32 suni_rpop_cs; /* RPOP Status/Control */
- u32 suni_rpop_intr; /* RPOP Interrupt/Status */
- u32 suni_rpop_reserved; /* RPOP Reserved */
- u32 suni_rpop_intr_ena; /* RPOP Interrupt Enable */
- u32 suni_rpop_reserved1[3]; /* RPOP Reserved */
- u32 suni_rpop_path_sig; /* RPOP Path Signal Label */
- u32 suni_rpop_bip8l; /* RPOP Path BIP-8 LSB */
- u32 suni_rpop_bip8m; /* RPOP Path BIP-8 MSB */
- u32 suni_rpop_febel; /* RPOP Path FEBE LSB */
- u32 suni_rpop_febem; /* RPOP Path FEBE MSB */
- u32 suni_rpop_reserved2[4]; /* RPOP Reserved */
-
- u32 suni_tpop_cntrl_daig; /* TPOP Control/Disgnostics */
- u32 suni_tpop_pointer_ctrl; /* TPOP Pointer Control */
- u32 suni_tpop_sourcer_ctrl; /* TPOP Source Control */
- u32 suni_tpop_reserved1[2]; /* TPOP Reserved */
- u32 suni_tpop_arb_prtl; /* TPOP Arbitrary Pointer LSB */
- u32 suni_tpop_arb_prtm; /* TPOP Arbitrary Pointer MSB */
- u32 suni_tpop_reserved2; /* TPOP Reserved */
- u32 suni_tpop_path_sig; /* TPOP Path Signal Lable */
- u32 suni_tpop_path_status; /* TPOP Path Status */
- u32 suni_tpop_reserved3[6]; /* TPOP Reserved */
-
- u32 suni_racp_cs; /* RACP Control/Status */
- u32 suni_racp_intr; /* RACP Interrupt Enable/Status */
- u32 suni_racp_hdr_pattern; /* RACP Match Header Pattern */
- u32 suni_racp_hdr_mask; /* RACP Match Header Mask */
- u32 suni_racp_corr_hcs; /* RACP Correctable HCS Error Count */
- u32 suni_racp_uncorr_hcs; /* RACP Uncorrectable HCS Error Count */
- u32 suni_racp_reserved[10]; /* RACP Reserved */
-
- u32 suni_tacp_control; /* TACP Control */
- u32 suni_tacp_idle_hdr_pat; /* TACP Idle Cell Header Pattern */
- u32 suni_tacp_idle_pay_pay; /* TACP Idle Cell Payld Octet Pattern */
- u32 suni_tacp_reserved[5]; /* TACP Reserved */
-
- u32 suni_reserved3[24]; /* Reserved */
-
- u32 suni_master_test; /* SUNI Master Test */
- u32 suni_reserved_test; /* SUNI Reserved for Test */
-} IA_SUNI;
-
+enum ia_suni {
+ SUNI_MASTER_RESET = 0x000, /* SUNI Master Reset and Identity */
+ SUNI_MASTER_CONFIG = 0x004, /* SUNI Master Configuration */
+ SUNI_MASTER_INTR_STAT = 0x008, /* SUNI Master Interrupt Status */
+ SUNI_RESERVED1 = 0x00c, /* Reserved */
+ SUNI_MASTER_CLK_MONITOR = 0x010, /* SUNI Master Clock Monitor */
+ SUNI_MASTER_CONTROL = 0x014, /* SUNI Master Clock Monitor */
+ /* Reserved (10) */
+ SUNI_RSOP_CONTROL = 0x040, /* RSOP Control/Interrupt Enable */
+ SUNI_RSOP_STATUS = 0x044, /* RSOP Status/Interrupt States */
+ SUNI_RSOP_SECTION_BIP8L = 0x048, /* RSOP Section BIP-8 LSB */
+ SUNI_RSOP_SECTION_BIP8M = 0x04c, /* RSOP Section BIP-8 MSB */
+
+ SUNI_TSOP_CONTROL = 0x050, /* TSOP Control */
+ SUNI_TSOP_DIAG = 0x054, /* TSOP Disgnostics */
+ /* Reserved (2) */
+ SUNI_RLOP_CS = 0x060, /* RLOP Control/Status */
+ SUNI_RLOP_INTR = 0x064, /* RLOP Interrupt Enable/Status */
+ SUNI_RLOP_LINE_BIP24L = 0x068, /* RLOP Line BIP-24 LSB */
+ SUNI_RLOP_LINE_BIP24 = 0x06c, /* RLOP Line BIP-24 */
+ SUNI_RLOP_LINE_BIP24M = 0x070, /* RLOP Line BIP-24 MSB */
+ SUNI_RLOP_LINE_FEBEL = 0x074, /* RLOP Line FEBE LSB */
+ SUNI_RLOP_LINE_FEBE = 0x078, /* RLOP Line FEBE */
+ SUNI_RLOP_LINE_FEBEM = 0x07c, /* RLOP Line FEBE MSB */
+
+ SUNI_TLOP_CONTROL = 0x080, /* TLOP Control */
+ SUNI_TLOP_DISG = 0x084, /* TLOP Disgnostics */
+ /* Reserved (14) */
+ SUNI_RPOP_CS = 0x0c0, /* RPOP Status/Control */
+ SUNI_RPOP_INTR = 0x0c4, /* RPOP Interrupt/Status */
+ SUNI_RPOP_RESERVED = 0x0c8, /* RPOP Reserved */
+ SUNI_RPOP_INTR_ENA = 0x0cc, /* RPOP Interrupt Enable */
+ /* Reserved (3) */
+ SUNI_RPOP_PATH_SIG = 0x0dc, /* RPOP Path Signal Label */
+ SUNI_RPOP_BIP8L = 0x0e0, /* RPOP Path BIP-8 LSB */
+ SUNI_RPOP_BIP8M = 0x0e4, /* RPOP Path BIP-8 MSB */
+ SUNI_RPOP_FEBEL = 0x0e8, /* RPOP Path FEBE LSB */
+ SUNI_RPOP_FEBEM = 0x0ec, /* RPOP Path FEBE MSB */
+ /* Reserved (4) */
+ SUNI_TPOP_CNTRL_DAIG = 0x100, /* TPOP Control/Disgnostics */
+ SUNI_TPOP_POINTER_CTRL = 0x104, /* TPOP Pointer Control */
+ SUNI_TPOP_SOURCER_CTRL = 0x108, /* TPOP Source Control */
+ /* Reserved (2) */
+ SUNI_TPOP_ARB_PRTL = 0x114, /* TPOP Arbitrary Pointer LSB */
+ SUNI_TPOP_ARB_PRTM = 0x118, /* TPOP Arbitrary Pointer MSB */
+ SUNI_TPOP_RESERVED2 = 0x11c, /* TPOP Reserved */
+ SUNI_TPOP_PATH_SIG = 0x120, /* TPOP Path Signal Lable */
+ SUNI_TPOP_PATH_STATUS = 0x124, /* TPOP Path Status */
+ /* Reserved (6) */
+ SUNI_RACP_CS = 0x140, /* RACP Control/Status */
+ SUNI_RACP_INTR = 0x144, /* RACP Interrupt Enable/Status */
+ SUNI_RACP_HDR_PATTERN = 0x148, /* RACP Match Header Pattern */
+ SUNI_RACP_HDR_MASK = 0x14c, /* RACP Match Header Mask */
+ SUNI_RACP_CORR_HCS = 0x150, /* RACP Correctable HCS Error Count */
+ SUNI_RACP_UNCORR_HCS = 0x154, /* RACP Uncorrectable HCS Err Count */
+ /* Reserved (10) */
+ SUNI_TACP_CONTROL = 0x180, /* TACP Control */
+ SUNI_TACP_IDLE_HDR_PAT = 0x184, /* TACP Idle Cell Header Pattern */
+ SUNI_TACP_IDLE_PAY_PAY = 0x188, /* TACP Idle Cell Payld Octet Patrn */
+ /* Reserved (5) */
+ /* Reserved (24) */
+ /* FIXME: unused but name conflicts.
+ * SUNI_MASTER_TEST = 0x200, SUNI Master Test */
+ SUNI_RESERVED_TEST = 0x204 /* SUNI Reserved for Test */
+};
typedef struct _SUNI_STATS_
{
@@ -993,13 +985,11 @@ typedef struct _SUNI_STATS_
u32 racp_uchcs_count; // uncorrectable HCS error count
} IA_SUNI_STATS;
-typedef struct iadev_t {
+typedef struct iadev_priv {
/*-----base pointers into (i)chipSAR+ address space */
- u32 __iomem *phy; /* base pointer into phy(SUNI) */
- u32 __iomem *dma; /* base pointer into DMA control
- registers */
- u32 __iomem *reg; /* base pointer to SAR registers
- - Bus Interface Control Regs */
+ u32 __iomem *phy; /* Base pointer into phy (SUNI). */
+ u32 __iomem *dma; /* Base pointer into DMA control registers. */
+ u32 __iomem *reg; /* Base pointer to SAR registers. */
u32 __iomem *seg_reg; /* base pointer to segmentation engine
internal registers */
u32 __iomem *reass_reg; /* base pointer to reassemble engine
@@ -1071,14 +1061,14 @@ typedef struct iadev_t {
#define INPH_IA_VCC(v) ((struct ia_vcc *) (v)->dev_data)
/******************* IDT77105 25MB/s PHY DEFINE *****************************/
-typedef struct {
- u_int mb25_master_ctrl; /* Master control */
- u_int mb25_intr_status; /* Interrupt status */
- u_int mb25_diag_control; /* Diagnostic control */
- u_int mb25_led_hec; /* LED driver and HEC status/control */
- u_int mb25_low_byte_counter; /* Low byte counter */
- u_int mb25_high_byte_counter; /* High byte counter */
-} ia_mb25_t;
+enum ia_mb25 {
+ MB25_MASTER_CTRL = 0x00, /* Master control */
+ MB25_INTR_STATUS = 0x04, /* Interrupt status */
+ MB25_DIAG_CONTROL = 0x08, /* Diagnostic control */
+ MB25_LED_HEC = 0x0c, /* LED driver and HEC status/control */
+ MB25_LOW_BYTE_COUNTER = 0x10,
+ MB25_HIGH_BYTE_COUNTER = 0x14
+};
/*
* Master Control
@@ -1127,122 +1117,121 @@ typedef struct {
#define FE_E3_PHY 0x0090 /* E3 */
/*********************** SUNI_PM7345 PHY DEFINE HERE *********************/
-typedef struct _suni_pm7345_t
-{
- u_int suni_config; /* SUNI Configuration */
- u_int suni_intr_enbl; /* SUNI Interrupt Enable */
- u_int suni_intr_stat; /* SUNI Interrupt Status */
- u_int suni_control; /* SUNI Control */
- u_int suni_id_reset; /* SUNI Reset and Identity */
- u_int suni_data_link_ctrl;
- u_int suni_rboc_conf_intr_enbl;
- u_int suni_rboc_stat;
- u_int suni_ds3_frm_cfg;
- u_int suni_ds3_frm_intr_enbl;
- u_int suni_ds3_frm_intr_stat;
- u_int suni_ds3_frm_stat;
- u_int suni_rfdl_cfg;
- u_int suni_rfdl_enbl_stat;
- u_int suni_rfdl_stat;
- u_int suni_rfdl_data;
- u_int suni_pmon_chng;
- u_int suni_pmon_intr_enbl_stat;
- u_int suni_reserved1[0x13-0x11];
- u_int suni_pmon_lcv_evt_cnt_lsb;
- u_int suni_pmon_lcv_evt_cnt_msb;
- u_int suni_pmon_fbe_evt_cnt_lsb;
- u_int suni_pmon_fbe_evt_cnt_msb;
- u_int suni_pmon_sez_det_cnt_lsb;
- u_int suni_pmon_sez_det_cnt_msb;
- u_int suni_pmon_pe_evt_cnt_lsb;
- u_int suni_pmon_pe_evt_cnt_msb;
- u_int suni_pmon_ppe_evt_cnt_lsb;
- u_int suni_pmon_ppe_evt_cnt_msb;
- u_int suni_pmon_febe_evt_cnt_lsb;
- u_int suni_pmon_febe_evt_cnt_msb;
- u_int suni_ds3_tran_cfg;
- u_int suni_ds3_tran_diag;
- u_int suni_reserved2[0x23-0x21];
- u_int suni_xfdl_cfg;
- u_int suni_xfdl_intr_st;
- u_int suni_xfdl_xmit_data;
- u_int suni_xboc_code;
- u_int suni_splr_cfg;
- u_int suni_splr_intr_en;
- u_int suni_splr_intr_st;
- u_int suni_splr_status;
- u_int suni_splt_cfg;
- u_int suni_splt_cntl;
- u_int suni_splt_diag_g1;
- u_int suni_splt_f1;
- u_int suni_cppm_loc_meters;
- u_int suni_cppm_chng_of_cppm_perf_meter;
- u_int suni_cppm_b1_err_cnt_lsb;
- u_int suni_cppm_b1_err_cnt_msb;
- u_int suni_cppm_framing_err_cnt_lsb;
- u_int suni_cppm_framing_err_cnt_msb;
- u_int suni_cppm_febe_cnt_lsb;
- u_int suni_cppm_febe_cnt_msb;
- u_int suni_cppm_hcs_err_cnt_lsb;
- u_int suni_cppm_hcs_err_cnt_msb;
- u_int suni_cppm_idle_un_cell_cnt_lsb;
- u_int suni_cppm_idle_un_cell_cnt_msb;
- u_int suni_cppm_rcv_cell_cnt_lsb;
- u_int suni_cppm_rcv_cell_cnt_msb;
- u_int suni_cppm_xmit_cell_cnt_lsb;
- u_int suni_cppm_xmit_cell_cnt_msb;
- u_int suni_rxcp_ctrl;
- u_int suni_rxcp_fctrl;
- u_int suni_rxcp_intr_en_sts;
- u_int suni_rxcp_idle_pat_h1;
- u_int suni_rxcp_idle_pat_h2;
- u_int suni_rxcp_idle_pat_h3;
- u_int suni_rxcp_idle_pat_h4;
- u_int suni_rxcp_idle_mask_h1;
- u_int suni_rxcp_idle_mask_h2;
- u_int suni_rxcp_idle_mask_h3;
- u_int suni_rxcp_idle_mask_h4;
- u_int suni_rxcp_cell_pat_h1;
- u_int suni_rxcp_cell_pat_h2;
- u_int suni_rxcp_cell_pat_h3;
- u_int suni_rxcp_cell_pat_h4;
- u_int suni_rxcp_cell_mask_h1;
- u_int suni_rxcp_cell_mask_h2;
- u_int suni_rxcp_cell_mask_h3;
- u_int suni_rxcp_cell_mask_h4;
- u_int suni_rxcp_hcs_cs;
- u_int suni_rxcp_lcd_cnt_threshold;
- u_int suni_reserved3[0x57-0x54];
- u_int suni_txcp_ctrl;
- u_int suni_txcp_intr_en_sts;
- u_int suni_txcp_idle_pat_h1;
- u_int suni_txcp_idle_pat_h2;
- u_int suni_txcp_idle_pat_h3;
- u_int suni_txcp_idle_pat_h4;
- u_int suni_txcp_idle_pat_h5;
- u_int suni_txcp_idle_payload;
- u_int suni_e3_frm_fram_options;
- u_int suni_e3_frm_maint_options;
- u_int suni_e3_frm_fram_intr_enbl;
- u_int suni_e3_frm_fram_intr_ind_stat;
- u_int suni_e3_frm_maint_intr_enbl;
- u_int suni_e3_frm_maint_intr_ind;
- u_int suni_e3_frm_maint_stat;
- u_int suni_reserved4;
- u_int suni_e3_tran_fram_options;
- u_int suni_e3_tran_stat_diag_options;
- u_int suni_e3_tran_bip_8_err_mask;
- u_int suni_e3_tran_maint_adapt_options;
- u_int suni_ttb_ctrl;
- u_int suni_ttb_trail_trace_id_stat;
- u_int suni_ttb_ind_addr;
- u_int suni_ttb_ind_data;
- u_int suni_ttb_exp_payload_type;
- u_int suni_ttb_payload_type_ctrl_stat;
- u_int suni_pad5[0x7f-0x71];
- u_int suni_master_test;
- u_int suni_pad6[0xff-0x80];
-}suni_pm7345_t;
+enum suni_pm7345 {
+ SUNI_CONFIG = 0x000, /* SUNI Configuration */
+ SUNI_INTR_ENBL = 0x004, /* SUNI Interrupt Enable */
+ SUNI_INTR_STAT = 0x008, /* SUNI Interrupt Status */
+ SUNI_CONTROL = 0x00c, /* SUNI Control */
+ SUNI_ID_RESET = 0x010, /* SUNI Reset and Identity */
+ SUNI_DATA_LINK_CTRL = 0x014,
+ SUNI_RBOC_CONF_INTR_ENBL = 0x018,
+ SUNI_RBOC_STAT = 0x01c,
+ SUNI_DS3_FRM_CFG = 0x020,
+ SUNI_DS3_FRM_INTR_ENBL = 0x024,
+ SUNI_DS3_FRM_INTR_STAT = 0x028,
+ SUNI_DS3_FRM_STAT = 0x02c,
+ SUNI_RFDL_CFG = 0x030,
+ SUNI_RFDL_ENBL_STAT = 0x034,
+ SUNI_RFDL_STAT = 0x038,
+ SUNI_RFDL_DATA = 0x03c,
+ SUNI_PMON_CHNG = 0x040,
+ SUNI_PMON_INTR_ENBL_STAT = 0x044,
+ /* SUNI_RESERVED1 (0x13 - 0x11) */
+ SUNI_PMON_LCV_EVT_CNT_LSB = 0x050,
+ SUNI_PMON_LCV_EVT_CNT_MSB = 0x054,
+ SUNI_PMON_FBE_EVT_CNT_LSB = 0x058,
+ SUNI_PMON_FBE_EVT_CNT_MSB = 0x05c,
+ SUNI_PMON_SEZ_DET_CNT_LSB = 0x060,
+ SUNI_PMON_SEZ_DET_CNT_MSB = 0x064,
+ SUNI_PMON_PE_EVT_CNT_LSB = 0x068,
+ SUNI_PMON_PE_EVT_CNT_MSB = 0x06c,
+ SUNI_PMON_PPE_EVT_CNT_LSB = 0x070,
+ SUNI_PMON_PPE_EVT_CNT_MSB = 0x074,
+ SUNI_PMON_FEBE_EVT_CNT_LSB = 0x078,
+ SUNI_PMON_FEBE_EVT_CNT_MSB = 0x07c,
+ SUNI_DS3_TRAN_CFG = 0x080,
+ SUNI_DS3_TRAN_DIAG = 0x084,
+ /* SUNI_RESERVED2 (0x23 - 0x21) */
+ SUNI_XFDL_CFG = 0x090,
+ SUNI_XFDL_INTR_ST = 0x094,
+ SUNI_XFDL_XMIT_DATA = 0x098,
+ SUNI_XBOC_CODE = 0x09c,
+ SUNI_SPLR_CFG = 0x0a0,
+ SUNI_SPLR_INTR_EN = 0x0a4,
+ SUNI_SPLR_INTR_ST = 0x0a8,
+ SUNI_SPLR_STATUS = 0x0ac,
+ SUNI_SPLT_CFG = 0x0b0,
+ SUNI_SPLT_CNTL = 0x0b4,
+ SUNI_SPLT_DIAG_G1 = 0x0b8,
+ SUNI_SPLT_F1 = 0x0bc,
+ SUNI_CPPM_LOC_METERS = 0x0c0,
+ SUNI_CPPM_CHG_OF_CPPM_PERF_METR = 0x0c4,
+ SUNI_CPPM_B1_ERR_CNT_LSB = 0x0c8,
+ SUNI_CPPM_B1_ERR_CNT_MSB = 0x0cc,
+ SUNI_CPPM_FRAMING_ERR_CNT_LSB = 0x0d0,
+ SUNI_CPPM_FRAMING_ERR_CNT_MSB = 0x0d4,
+ SUNI_CPPM_FEBE_CNT_LSB = 0x0d8,
+ SUNI_CPPM_FEBE_CNT_MSB = 0x0dc,
+ SUNI_CPPM_HCS_ERR_CNT_LSB = 0x0e0,
+ SUNI_CPPM_HCS_ERR_CNT_MSB = 0x0e4,
+ SUNI_CPPM_IDLE_UN_CELL_CNT_LSB = 0x0e8,
+ SUNI_CPPM_IDLE_UN_CELL_CNT_MSB = 0x0ec,
+ SUNI_CPPM_RCV_CELL_CNT_LSB = 0x0f0,
+ SUNI_CPPM_RCV_CELL_CNT_MSB = 0x0f4,
+ SUNI_CPPM_XMIT_CELL_CNT_LSB = 0x0f8,
+ SUNI_CPPM_XMIT_CELL_CNT_MSB = 0x0fc,
+ SUNI_RXCP_CTRL = 0x100,
+ SUNI_RXCP_FCTRL = 0x104,
+ SUNI_RXCP_INTR_EN_STS = 0x108,
+ SUNI_RXCP_IDLE_PAT_H1 = 0x10c,
+ SUNI_RXCP_IDLE_PAT_H2 = 0x110,
+ SUNI_RXCP_IDLE_PAT_H3 = 0x114,
+ SUNI_RXCP_IDLE_PAT_H4 = 0x118,
+ SUNI_RXCP_IDLE_MASK_H1 = 0x11c,
+ SUNI_RXCP_IDLE_MASK_H2 = 0x120,
+ SUNI_RXCP_IDLE_MASK_H3 = 0x124,
+ SUNI_RXCP_IDLE_MASK_H4 = 0x128,
+ SUNI_RXCP_CELL_PAT_H1 = 0x12c,
+ SUNI_RXCP_CELL_PAT_H2 = 0x130,
+ SUNI_RXCP_CELL_PAT_H3 = 0x134,
+ SUNI_RXCP_CELL_PAT_H4 = 0x138,
+ SUNI_RXCP_CELL_MASK_H1 = 0x13c,
+ SUNI_RXCP_CELL_MASK_H2 = 0x140,
+ SUNI_RXCP_CELL_MASK_H3 = 0x144,
+ SUNI_RXCP_CELL_MASK_H4 = 0x148,
+ SUNI_RXCP_HCS_CS = 0x14c,
+ SUNI_RXCP_LCD_CNT_THRESHOLD = 0x150,
+ /* SUNI_RESERVED3 (0x57 - 0x54) */
+ SUNI_TXCP_CTRL = 0x160,
+ SUNI_TXCP_INTR_EN_STS = 0x164,
+ SUNI_TXCP_IDLE_PAT_H1 = 0x168,
+ SUNI_TXCP_IDLE_PAT_H2 = 0x16c,
+ SUNI_TXCP_IDLE_PAT_H3 = 0x170,
+ SUNI_TXCP_IDLE_PAT_H4 = 0x174,
+ SUNI_TXCP_IDLE_PAT_H5 = 0x178,
+ SUNI_TXCP_IDLE_PAYLOAD = 0x17c,
+ SUNI_E3_FRM_FRAM_OPTIONS = 0x180,
+ SUNI_E3_FRM_MAINT_OPTIONS = 0x184,
+ SUNI_E3_FRM_FRAM_INTR_ENBL = 0x188,
+ SUNI_E3_FRM_FRAM_INTR_IND_STAT = 0x18c,
+ SUNI_E3_FRM_MAINT_INTR_ENBL = 0x190,
+ SUNI_E3_FRM_MAINT_INTR_IND = 0x194,
+ SUNI_E3_FRM_MAINT_STAT = 0x198,
+ SUNI_RESERVED4 = 0x19c,
+ SUNI_E3_TRAN_FRAM_OPTIONS = 0x1a0,
+ SUNI_E3_TRAN_STAT_DIAG_OPTIONS = 0x1a4,
+ SUNI_E3_TRAN_BIP_8_ERR_MASK = 0x1a8,
+ SUNI_E3_TRAN_MAINT_ADAPT_OPTS = 0x1ac,
+ SUNI_TTB_CTRL = 0x1b0,
+ SUNI_TTB_TRAIL_TRACE_ID_STAT = 0x1b4,
+ SUNI_TTB_IND_ADDR = 0x1b8,
+ SUNI_TTB_IND_DATA = 0x1bc,
+ SUNI_TTB_EXP_PAYLOAD_TYPE = 0x1c0,
+ SUNI_TTB_PAYLOAD_TYPE_CTRL_STAT = 0x1c4,
+ /* SUNI_PAD5 (0x7f - 0x71) */
+ SUNI_MASTER_TEST = 0x200,
+ /* SUNI_PAD6 (0xff - 0x80) */
+};
#define SUNI_PM7345_T suni_pm7345_t
#define SUNI_PM7345 0x20 /* Suni chip type */
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index e828c548749..f5569699f31 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1457,10 +1457,9 @@ static int __devinit vcc_table_allocate(struct lanai_dev *lanai)
return (lanai->vccs == NULL) ? -ENOMEM : 0;
#else
int bytes = (lanai->num_vci) * sizeof(struct lanai_vcc *);
- lanai->vccs = (struct lanai_vcc **) vmalloc(bytes);
+ lanai->vccs = vzalloc(bytes);
if (unlikely(lanai->vccs == NULL))
return -ENOMEM;
- memset(lanai->vccs, 0, bytes);
return 0;
#endif
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 4f1df2e8fd7..b80d91cc8c3 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -47,6 +47,18 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
+static const void *class_attr_namespace(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ struct class_attribute *class_attr = to_class_attr(attr);
+ struct subsys_private *cp = to_subsys_private(kobj);
+ const void *ns = NULL;
+
+ if (class_attr->namespace)
+ ns = class_attr->namespace(cp->class, class_attr);
+ return ns;
+}
+
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
@@ -72,8 +84,9 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject
}
static const struct sysfs_ops class_sysfs_ops = {
- .show = class_attr_show,
- .store = class_attr_store,
+ .show = class_attr_show,
+ .store = class_attr_store,
+ .namespace = class_attr_namespace,
};
static struct kobj_type class_ktype = {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index bc8729d603a..82c865452c7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1764,8 +1764,8 @@ void device_shutdown(void)
#ifdef CONFIG_PRINTK
-static int __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
+int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
{
if (!dev)
return printk("%s(NULL device *): %pV", level, vaf);
@@ -1773,6 +1773,7 @@ static int __dev_printk(const char *level, const struct device *dev,
return printk("%s%s %s: %pV",
level, dev_driver_string(dev), dev_name(dev), vaf);
}
+EXPORT_SYMBOL(__dev_printk);
int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 6658da743c3..142e3d600f1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -147,6 +147,9 @@ probe_failed:
printk(KERN_WARNING
"%s: probe of %s failed with error %d\n",
drv->name, dev_name(dev), ret);
+ } else {
+ pr_debug("%s: probe of %s rejects match %d\n",
+ drv->name, dev_name(dev), ret);
}
/*
* Ignore errors returned by ->probe so that the next driver can try
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2840ed4668c..8272d92d22c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -224,13 +224,48 @@ int memory_isolate_notify(unsigned long val, void *v)
}
/*
+ * The probe routines leave the pages reserved, just as the bootmem code does.
+ * Make sure they're still that way.
+ */
+static bool pages_correctly_reserved(unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ int i, j;
+ struct page *page;
+ unsigned long pfn = start_pfn;
+
+ /*
+ * memmap between sections is not contiguous except with
+ * SPARSEMEM_VMEMMAP. We lookup the page once per section
+ * and assume memmap is contiguous within each section
+ */
+ for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
+ if (WARN_ON_ONCE(!pfn_valid(pfn)))
+ return false;
+ page = pfn_to_page(pfn);
+
+ for (j = 0; j < PAGES_PER_SECTION; j++) {
+ if (PageReserved(page + j))
+ continue;
+
+ printk(KERN_WARNING "section number %ld page number %d "
+ "not reserved, was it already online?\n",
+ pfn_to_section_nr(pfn), j);
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
- int i;
unsigned long start_pfn, start_paddr;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
@@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_index, unsigned long action)
first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
- /*
- * The probe routines leave the pages reserved, just
- * as the bootmem code does. Make sure they're still
- * that way.
- */
- if (action == MEM_ONLINE) {
- for (i = 0; i < nr_pages; i++) {
- if (PageReserved(first_page+i))
- continue;
-
- printk(KERN_WARNING "section number %ld page number %d "
- "not reserved, was it already online?\n",
- phys_index, i);
- return -EBUSY;
- }
- }
-
switch (action) {
case MEM_ONLINE:
start_pfn = page_to_pfn(first_page);
+
+ if (!pages_correctly_reserved(start_pfn, nr_pages))
+ return -EBUSY;
+
ret = online_pages(start_pfn, nr_pages);
break;
case MEM_OFFLINE:
@@ -380,9 +402,13 @@ memory_probe_store(struct class *class, struct class_attribute *attr,
u64 phys_addr;
int nid;
int i, ret;
+ unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
phys_addr = simple_strtoull(buf, NULL, 0);
+ if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
+ return -EINVAL;
+
for (i = 0; i < sections_per_block; i++) {
nid = memory_add_physaddr_to_nid(phys_addr);
ret = add_memory(nid, phys_addr,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 99a5272d7c2..7a24895543e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -375,52 +375,64 @@ void platform_device_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(platform_device_unregister);
/**
- * platform_device_register_resndata - add a platform-level device with
+ * platform_device_register_full - add a platform-level device with
* resources and platform-specific data
*
- * @parent: parent device for the device we're adding
- * @name: base name of the device we're adding
- * @id: instance id
- * @res: set of resources that needs to be allocated for the device
- * @num: number of resources
- * @data: platform specific data for this platform device
- * @size: size of platform specific data
+ * @pdevinfo: data used to create device
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device *platform_device_register_resndata(
- struct device *parent,
- const char *name, int id,
- const struct resource *res, unsigned int num,
- const void *data, size_t size)
+struct platform_device *platform_device_register_full(
+ struct platform_device_info *pdevinfo)
{
int ret = -ENOMEM;
struct platform_device *pdev;
- pdev = platform_device_alloc(name, id);
+ pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
if (!pdev)
- goto err;
-
- pdev->dev.parent = parent;
+ goto err_alloc;
+
+ pdev->dev.parent = pdevinfo->parent;
+
+ if (pdevinfo->dma_mask) {
+ /*
+ * This memory isn't freed when the device is put,
+ * I don't have a nice idea for that though. Conceptually
+ * dma_mask in struct device should not be a pointer.
+ * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
+ */
+ pdev->dev.dma_mask =
+ kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
+ if (!pdev->dev.dma_mask)
+ goto err;
+
+ *pdev->dev.dma_mask = pdevinfo->dma_mask;
+ pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
+ }
- ret = platform_device_add_resources(pdev, res, num);
+ ret = platform_device_add_resources(pdev,
+ pdevinfo->res, pdevinfo->num_res);
if (ret)
goto err;
- ret = platform_device_add_data(pdev, data, size);
+ ret = platform_device_add_data(pdev,
+ pdevinfo->data, pdevinfo->size_data);
if (ret)
goto err;
ret = platform_device_add(pdev);
if (ret) {
err:
+ kfree(pdev->dev.dma_mask);
+
+err_alloc:
platform_device_put(pdev);
return ERR_PTR(ret);
}
return pdev;
}
-EXPORT_SYMBOL_GPL(platform_device_register_resndata);
+EXPORT_SYMBOL_GPL(platform_device_register_full);
static int platform_drv_probe(struct device *_dev)
{
@@ -614,7 +626,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
return rc;
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
- (pdev->id_entry) ? pdev->id_entry->name : pdev->name);
+ pdev->name);
return 0;
}
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2639ae79a37..81676dd1790 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
@@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
-ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b97294e2d95..5f0f85d5c57 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -10,18 +10,13 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#ifdef CONFIG_PM
-struct pm_clk_data {
- struct list_head clock_list;
- spinlock_t lock;
-};
-
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
@@ -36,11 +31,6 @@ struct pm_clock_entry {
enum pce_status status;
};
-static struct pm_clk_data *__to_pcd(struct device *dev)
-{
- return dev ? dev->power.subsys_data : NULL;
-}
-
/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
@@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
*/
int pm_clk_add(struct device *dev, const char *con_id)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
- if (!pcd)
+ if (!psd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
pm_clk_acquire(dev, ce);
- spin_lock_irq(&pcd->lock);
- list_add_tail(&ce->node, &pcd->clock_list);
- spin_unlock_irq(&pcd->lock);
+ spin_lock_irq(&psd->lock);
+ list_add_tail(&ce->node, &psd->clock_list);
+ spin_unlock_irq(&psd->lock);
return 0;
}
@@ -114,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
clk_put(ce->clk);
}
- if (ce->con_id)
- kfree(ce->con_id);
-
+ kfree(ce->con_id);
kfree(ce);
}
@@ -130,15 +118,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
*/
void pm_clk_remove(struct device *dev, const char *con_id)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
- if (!pcd)
+ if (!psd)
return;
- spin_lock_irq(&pcd->lock);
+ spin_lock_irq(&psd->lock);
- list_for_each_entry(ce, &pcd->clock_list, node) {
+ list_for_each_entry(ce, &psd->clock_list, node) {
if (!con_id && !ce->con_id)
goto remove;
else if (!con_id || !ce->con_id)
@@ -147,12 +135,12 @@ void pm_clk_remove(struct device *dev, const char *con_id)
goto remove;
}
- spin_unlock_irq(&pcd->lock);
+ spin_unlock_irq(&psd->lock);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&pcd->lock);
+ spin_unlock_irq(&psd->lock);
__pm_clk_remove(ce);
}
@@ -161,23 +149,27 @@ void pm_clk_remove(struct device *dev, const char *con_id)
* pm_clk_init - Initialize a device's list of power management clocks.
* @dev: Device to initialize the list of PM clocks for.
*
- * Allocate a struct pm_clk_data object, initialize its lock member and
- * make the @dev's power.subsys_data field point to it.
+ * Initialize the lock and clock_list members of the device's pm_subsys_data
+ * object.
*/
-int pm_clk_init(struct device *dev)
+void pm_clk_init(struct device *dev)
{
- struct pm_clk_data *pcd;
-
- pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
- if (!pcd) {
- dev_err(dev, "Not enough memory for PM clock data.\n");
- return -ENOMEM;
- }
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ if (psd)
+ INIT_LIST_HEAD(&psd->clock_list);
+}
- INIT_LIST_HEAD(&pcd->clock_list);
- spin_lock_init(&pcd->lock);
- dev->power.subsys_data = pcd;
- return 0;
+/**
+ * pm_clk_create - Create and initialize a device's list of PM clocks.
+ * @dev: Device to create and initialize the list of PM clocks for.
+ *
+ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
+ * members and make the @dev's power.subsys_data field point to it.
+ */
+int pm_clk_create(struct device *dev)
+{
+ int ret = dev_pm_get_subsys_data(dev);
+ return ret < 0 ? ret : 0;
}
/**
@@ -185,29 +177,28 @@ int pm_clk_init(struct device *dev)
* @dev: Device to destroy the list of PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_clk_data object pointed to by it before and free
+ * from the struct pm_subsys_data object pointed to by it before and free
* that object.
*/
void pm_clk_destroy(struct device *dev)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce, *c;
struct list_head list;
- if (!pcd)
+ if (!psd)
return;
- dev->power.subsys_data = NULL;
INIT_LIST_HEAD(&list);
- spin_lock_irq(&pcd->lock);
+ spin_lock_irq(&psd->lock);
- list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+ list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
- spin_unlock_irq(&pcd->lock);
+ spin_unlock_irq(&psd->lock);
- kfree(pcd);
+ dev_pm_put_subsys_data(dev);
list_for_each_entry_safe_reverse(ce, c, &list, node) {
list_del(&ce->node);
@@ -225,25 +216,25 @@ void pm_clk_destroy(struct device *dev)
*/
int pm_clk_suspend(struct device *dev)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
- if (!pcd)
+ if (!psd)
return 0;
- spin_lock_irqsave(&pcd->lock, flags);
+ spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
+ list_for_each_entry_reverse(ce, &psd->clock_list, node) {
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
}
}
- spin_unlock_irqrestore(&pcd->lock, flags);
+ spin_unlock_irqrestore(&psd->lock, flags);
return 0;
}
@@ -254,25 +245,25 @@ int pm_clk_suspend(struct device *dev)
*/
int pm_clk_resume(struct device *dev)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
- if (!pcd)
+ if (!psd)
return 0;
- spin_lock_irqsave(&pcd->lock, flags);
+ spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry(ce, &pcd->clock_list, node) {
+ list_for_each_entry(ce, &psd->clock_list, node) {
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
ce->status = PCE_STATUS_ENABLED;
}
}
- spin_unlock_irqrestore(&pcd->lock, flags);
+ spin_unlock_irqrestore(&psd->lock, flags);
return 0;
}
@@ -310,7 +301,7 @@ static int pm_clk_notify(struct notifier_block *nb,
if (dev->pm_domain)
break;
- error = pm_clk_init(dev);
+ error = pm_clk_create(dev);
if (error)
break;
@@ -345,22 +336,22 @@ static int pm_clk_notify(struct notifier_block *nb,
*/
int pm_clk_suspend(struct device *dev)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
/* If there is no driver, the clocks are already disabled. */
- if (!pcd || !dev->driver)
+ if (!psd || !dev->driver)
return 0;
- spin_lock_irqsave(&pcd->lock, flags);
+ spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+ list_for_each_entry_reverse(ce, &psd->clock_list, node)
clk_disable(ce->clk);
- spin_unlock_irqrestore(&pcd->lock, flags);
+ spin_unlock_irqrestore(&psd->lock, flags);
return 0;
}
@@ -371,22 +362,22 @@ int pm_clk_suspend(struct device *dev)
*/
int pm_clk_resume(struct device *dev)
{
- struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
/* If there is no driver, the clocks should remain disabled. */
- if (!pcd || !dev->driver)
+ if (!psd || !dev->driver)
return 0;
- spin_lock_irqsave(&pcd->lock, flags);
+ spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry(ce, &pcd->clock_list, node)
+ list_for_each_entry(ce, &psd->clock_list, node)
clk_enable(ce->clk);
- spin_unlock_irqrestore(&pcd->lock, flags);
+ spin_unlock_irqrestore(&psd->lock, flags);
return 0;
}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 00000000000..29820c39618
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,86 @@
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter. Return 1 if a new object has been created, otherwise
+ * return 0 or error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+ int ret = 0;
+
+ psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+ if (!psd)
+ return -ENOMEM;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data) {
+ dev->power.subsys_data->refcount++;
+ } else {
+ spin_lock_init(&psd->lock);
+ psd->refcount = 1;
+ dev->power.subsys_data = psd;
+ pm_clk_init(dev);
+ psd = NULL;
+ ret = 1;
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ /* kfree() verifies that its argument is nonzero. */
+ kfree(psd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed. Return 1 if that happens or 0
+ * otherwise.
+ */
+int dev_pm_put_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+ int ret = 0;
+
+ spin_lock_irq(&dev->power.lock);
+
+ psd = dev_to_psd(dev);
+ if (!psd) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (--psd->refcount == 0) {
+ dev->power.subsys_data = NULL;
+ kfree(psd);
+ ret = 1;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1c374579407..6790cf7eba5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
return pd_to_genpd(dev->pm_domain);
}
-static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
- if (!WARN_ON(genpd->sd_count == 0))
- genpd->sd_count--;
+ bool ret = false;
+
+ if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
+ ret = !!atomic_dec_and_test(&genpd->sd_count);
+
+ return ret;
+}
+
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
+{
+ atomic_inc(&genpd->sd_count);
+ smp_mb__after_atomic_inc();
}
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
}
/**
- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*
- * Restore power to @genpd and all of its parents so that it is possible to
+ * Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
+int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
{
- struct generic_pm_domain *parent = genpd->parent;
+ struct gpd_link *link;
+ DEFINE_WAIT(wait);
int ret = 0;
- start:
- if (parent) {
- genpd_acquire_lock(parent);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
- } else {
+ /* If the domain's master is being waited for, we have to wait too. */
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (genpd->status != GPD_STATE_WAIT_MASTER)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
mutex_lock(&genpd->lock);
}
+ finish_wait(&genpd->status_wait_queue, &wait);
if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
- goto out;
+ return 0;
if (genpd->status != GPD_STATE_POWER_OFF) {
genpd_set_active(genpd);
- goto out;
+ return 0;
}
- if (parent && parent->status != GPD_STATE_ACTIVE) {
+ /*
+ * The list is guaranteed not to change while the loop below is being
+ * executed, unless one of the masters' .power_on() callbacks fiddles
+ * with it.
+ */
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ genpd_sd_counter_inc(link->master);
+ genpd->status = GPD_STATE_WAIT_MASTER;
+
mutex_unlock(&genpd->lock);
- genpd_release_lock(parent);
- ret = pm_genpd_poweron(parent);
- if (ret)
- return ret;
+ ret = pm_genpd_poweron(link->master);
- goto start;
+ mutex_lock(&genpd->lock);
+
+ /*
+ * The "wait for parent" status is guaranteed not to change
+ * while the master is powering on.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ wake_up_all(&genpd->status_wait_queue);
+ if (ret) {
+ genpd_sd_counter_dec(link->master);
+ goto err;
+ }
}
if (genpd->power_on) {
ret = genpd->power_on(genpd);
if (ret)
- goto out;
+ goto err;
}
genpd_set_active(genpd);
- if (parent)
- parent->sd_count++;
- out:
- mutex_unlock(&genpd->lock);
- if (parent)
- genpd_release_lock(parent);
+ return 0;
+
+ err:
+ list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+ genpd_sd_counter_dec(link->master);
return ret;
}
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * @genpd: PM domain to power up.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ int ret;
+
+ mutex_lock(&genpd->lock);
+ ret = __pm_genpd_poweron(genpd);
+ mutex_unlock(&genpd->lock);
+ return ret;
+}
+
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
/**
* __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @dle: Device list entry of the device to save the state of.
+ * @pdd: Domain data of the device to save the state of.
* @genpd: PM domain the device belongs to.
*/
-static int __pm_genpd_save_device(struct dev_list_entry *dle,
+static int __pm_genpd_save_device(struct pm_domain_data *pdd,
struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
- struct device *dev = dle->dev;
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ struct device *dev = pdd->dev;
struct device_driver *drv = dev->driver;
int ret = 0;
- if (dle->need_restore)
+ if (gpd_data->need_restore)
return 0;
mutex_unlock(&genpd->lock);
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
mutex_lock(&genpd->lock);
if (!ret)
- dle->need_restore = true;
+ gpd_data->need_restore = true;
return ret;
}
/**
* __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @dle: Device list entry of the device to restore the state of.
+ * @pdd: Domain data of the device to restore the state of.
* @genpd: PM domain the device belongs to.
*/
-static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
- struct device *dev = dle->dev;
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ struct device *dev = pdd->dev;
struct device_driver *drv = dev->driver;
- if (!dle->need_restore)
+ if (!gpd_data->need_restore)
return;
mutex_unlock(&genpd->lock);
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
mutex_lock(&genpd->lock);
- dle->need_restore = false;
+ gpd_data->need_restore = false;
}
/**
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
*/
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
{
- return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+ return genpd->status == GPD_STATE_WAIT_MASTER
+ || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
}
/**
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
- struct generic_pm_domain *parent;
- struct dev_list_entry *dle;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
unsigned int not_suspended;
int ret = 0;
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
- * (2) System suspend is in progress.
+ * (2) The domain is waiting for its master to power up.
* (3) One of the domain's devices is being resumed right now.
+ * (4) System suspend is in progress.
*/
- if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
- || genpd->resume_count > 0)
+ if (genpd->status == GPD_STATE_POWER_OFF
+ || genpd->status == GPD_STATE_WAIT_MASTER
+ || genpd->resume_count > 0 || genpd->prepared_count > 0)
return 0;
- if (genpd->sd_count > 0)
+ if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
not_suspended = 0;
- list_for_each_entry(dle, &genpd->dev_list, node)
- if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+ list_for_each_entry(pdd, &genpd->dev_list, list_node)
+ if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
+ || pdd->dev->power.irq_safe))
not_suspended++;
if (not_suspended > genpd->in_progress)
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd->status = GPD_STATE_BUSY;
genpd->poweroff_task = current;
- list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
- ret = __pm_genpd_save_device(dle, genpd);
+ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+ ret = atomic_read(&genpd->sd_count) == 0 ?
+ __pm_genpd_save_device(pdd, genpd) : -EBUSY;
+
+ if (genpd_abort_poweroff(genpd))
+ goto out;
+
if (ret) {
genpd_set_active(genpd);
goto out;
}
- if (genpd_abort_poweroff(genpd))
- goto out;
-
if (genpd->status == GPD_STATE_REPEAT) {
genpd->poweroff_task = NULL;
goto start;
}
}
- parent = genpd->parent;
- if (parent) {
- mutex_unlock(&genpd->lock);
-
- genpd_acquire_lock(parent);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-
- if (genpd_abort_poweroff(genpd)) {
- genpd_release_lock(parent);
+ if (genpd->power_off) {
+ if (atomic_read(&genpd->sd_count) > 0) {
+ ret = -EBUSY;
goto out;
}
- }
- if (genpd->power_off) {
+ /*
+ * If sd_count > 0 at this point, one of the subdomains hasn't
+ * managed to call pm_genpd_poweron() for the master yet after
+ * incrementing it. In that case pm_genpd_poweron() will wait
+ * for us to drop the lock, so we can call .power_off() and let
+ * the pm_genpd_poweron() restore power for us (this shouldn't
+ * happen very often).
+ */
ret = genpd->power_off(genpd);
if (ret == -EBUSY) {
genpd_set_active(genpd);
- if (parent)
- genpd_release_lock(parent);
-
goto out;
}
}
genpd->status = GPD_STATE_POWER_OFF;
- if (parent) {
- genpd_sd_counter_dec(parent);
- if (parent->sd_count == 0)
- genpd_queue_power_off_work(parent);
-
- genpd_release_lock(parent);
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ genpd_sd_counter_dec(link->master);
+ genpd_queue_power_off_work(link->master);
}
out:
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
+ might_sleep_if(!genpd->dev_irq_safe);
+
if (genpd->stop_device) {
int ret = genpd->stop_device(dev);
if (ret)
return ret;
}
+ /*
+ * If power.irq_safe is set, this routine will be run with interrupts
+ * off, so it can't use mutexes.
+ */
+ if (dev->power.irq_safe)
+ return 0;
+
mutex_lock(&genpd->lock);
genpd->in_progress++;
pm_genpd_poweroff(genpd);
@@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
}
/**
- * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
- * @dev: Device to resume.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_runtime_resume(struct device *dev,
- struct generic_pm_domain *genpd)
-{
- struct dev_list_entry *dle;
-
- list_for_each_entry(dle, &genpd->dev_list, node) {
- if (dle->dev == dev) {
- __pm_genpd_restore_device(dle, genpd);
- break;
- }
- }
-}
-
-/**
* pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
* @dev: Device to resume.
*
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- ret = pm_genpd_poweron(genpd);
- if (ret)
- return ret;
+ might_sleep_if(!genpd->dev_irq_safe);
+
+ /* If power.irq_safe, the PM domain is never powered off. */
+ if (dev->power.irq_safe)
+ goto out;
mutex_lock(&genpd->lock);
+ ret = __pm_genpd_poweron(genpd);
+ if (ret) {
+ mutex_unlock(&genpd->lock);
+ return ret;
+ }
genpd->status = GPD_STATE_BUSY;
genpd->resume_count++;
for (;;) {
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
mutex_lock(&genpd->lock);
}
finish_wait(&genpd->status_wait_queue, &wait);
- __pm_genpd_runtime_resume(dev, genpd);
+ __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
genpd->resume_count--;
genpd_set_active(genpd);
wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
+ out:
if (genpd->start_device)
genpd->start_device(dev);
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void)
#else
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
-static inline void __pm_genpd_runtime_resume(struct device *dev,
- struct generic_pm_domain *genpd) {}
#define pm_genpd_runtime_suspend NULL
#define pm_genpd_runtime_resume NULL
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
#ifdef CONFIG_PM_SLEEP
/**
- * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
*
* Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its parent.
+ * hibernation) and do that if so. Also, in that case propagate to its masters.
*
* This function is only called in "noirq" stages of system power transitions,
* so it need not acquire locks (all of the "noirq" callbacks are executed
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
*/
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
- struct generic_pm_domain *parent = genpd->parent;
+ struct gpd_link *link;
if (genpd->status == GPD_STATE_POWER_OFF)
return;
- if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+ if (genpd->suspended_count != genpd->device_count
+ || atomic_read(&genpd->sd_count) > 0)
return;
if (genpd->power_off)
genpd->power_off(genpd);
genpd->status = GPD_STATE_POWER_OFF;
- if (parent) {
- genpd_sd_counter_dec(parent);
- pm_genpd_sync_poweroff(parent);
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ genpd_sd_counter_dec(link->master);
+ pm_genpd_sync_poweroff(link->master);
}
}
@@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (ret)
return ret;
- if (device_may_wakeup(dev)
+ if (dev->power.wakeup_path
&& genpd->active_wakeup && genpd->active_wakeup(dev))
return 0;
@@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
if (ret)
return ret;
- if (device_may_wakeup(dev)
+ if (dev->power.wakeup_path
&& genpd->active_wakeup && genpd->active_wakeup(dev))
return 0;
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev)
*/
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
{
- struct dev_list_entry *dle;
+ struct generic_pm_domain_data *gpd_data;
+ struct pm_domain_data *pdd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
goto out;
}
- list_for_each_entry(dle, &genpd->dev_list, node)
- if (dle->dev == dev) {
+ list_for_each_entry(pdd, &genpd->dev_list, list_node)
+ if (pdd->dev == dev) {
ret = -EINVAL;
goto out;
}
- dle = kzalloc(sizeof(*dle), GFP_KERNEL);
- if (!dle) {
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data) {
ret = -ENOMEM;
goto out;
}
- dle->dev = dev;
- dle->need_restore = false;
- list_add_tail(&dle->node, &genpd->dev_list);
genpd->device_count++;
- spin_lock_irq(&dev->power.lock);
dev->pm_domain = &genpd->domain;
- spin_unlock_irq(&dev->power.lock);
+ dev_pm_get_subsys_data(dev);
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+ gpd_data->base.dev = dev;
+ gpd_data->need_restore = false;
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out:
genpd_release_lock(genpd);
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev)
{
- struct dev_list_entry *dle;
+ struct pm_domain_data *pdd;
int ret = -EINVAL;
dev_dbg(dev, "%s()\n", __func__);
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(dle, &genpd->dev_list, node) {
- if (dle->dev != dev)
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ if (pdd->dev != dev)
continue;
- spin_lock_irq(&dev->power.lock);
+ list_del_init(&pdd->list_node);
+ pdd->dev = NULL;
+ dev_pm_put_subsys_data(dev);
dev->pm_domain = NULL;
- spin_unlock_irq(&dev->power.lock);
+ kfree(to_gpd_data(pdd));
genpd->device_count--;
- list_del(&dle->node);
- kfree(dle);
ret = 0;
break;
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
- * @new_subdomain: Subdomain to be added.
+ * @subdomain: Subdomain to be added.
*/
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *new_subdomain)
+ struct generic_pm_domain *subdomain)
{
- struct generic_pm_domain *subdomain;
+ struct gpd_link *link;
int ret = 0;
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
start:
genpd_acquire_lock(genpd);
- mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
- if (new_subdomain->status != GPD_STATE_POWER_OFF
- && new_subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&new_subdomain->lock);
+ if (subdomain->status != GPD_STATE_POWER_OFF
+ && subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&subdomain->lock);
genpd_release_lock(genpd);
goto start;
}
if (genpd->status == GPD_STATE_POWER_OFF
- && new_subdomain->status != GPD_STATE_POWER_OFF) {
+ && subdomain->status != GPD_STATE_POWER_OFF) {
ret = -EINVAL;
goto out;
}
- list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
- if (subdomain == new_subdomain) {
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ if (link->slave == subdomain && link->master == genpd) {
ret = -EINVAL;
goto out;
}
}
- list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
- new_subdomain->parent = genpd;
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ link->master = genpd;
+ list_add_tail(&link->master_node, &genpd->master_links);
+ link->slave = subdomain;
+ list_add_tail(&link->slave_node, &subdomain->slave_links);
if (subdomain->status != GPD_STATE_POWER_OFF)
- genpd->sd_count++;
+ genpd_sd_counter_inc(genpd);
out:
- mutex_unlock(&new_subdomain->lock);
+ mutex_unlock(&subdomain->lock);
genpd_release_lock(genpd);
return ret;
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @genpd: Master PM domain to remove the subdomain from.
- * @target: Subdomain to be removed.
+ * @subdomain: Subdomain to be removed.
*/
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *target)
+ struct generic_pm_domain *subdomain)
{
- struct generic_pm_domain *subdomain;
+ struct gpd_link *link;
int ret = -EINVAL;
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
start:
genpd_acquire_lock(genpd);
- list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
- if (subdomain != target)
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ if (link->slave != subdomain)
continue;
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
goto start;
}
- list_del(&subdomain->sd_node);
- subdomain->parent = NULL;
+ list_del(&link->master_node);
+ list_del(&link->slave_node);
+ kfree(link);
if (subdomain->status != GPD_STATE_POWER_OFF)
genpd_sd_counter_dec(genpd);
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd))
return;
- INIT_LIST_HEAD(&genpd->sd_node);
- genpd->parent = NULL;
+ INIT_LIST_HEAD(&genpd->master_links);
+ INIT_LIST_HEAD(&genpd->slave_links);
INIT_LIST_HEAD(&genpd->dev_list);
- INIT_LIST_HEAD(&genpd->sd_list);
mutex_init(&genpd->lock);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
genpd->in_progress = 0;
- genpd->sd_count = 0;
+ atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
init_waitqueue_head(&genpd->status_wait_queue);
genpd->poweroff_task = NULL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a85459126bc..59f8ab23548 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_noirq_list);
+struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@@ -65,6 +66,7 @@ void device_pm_init(struct device *dev)
spin_lock_init(&dev->power.lock);
pm_runtime_init(dev);
INIT_LIST_HEAD(&dev->power.entry);
+ dev->power.power_state = PMSG_INVALID;
}
/**
@@ -96,6 +98,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
+ dev_pm_qos_constraints_init(dev);
mutex_unlock(&dpm_list_mtx);
}
@@ -109,6 +112,7 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
+ dev_pm_qos_constraints_destroy(dev);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
@@ -464,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
error = device_resume_noirq(dev, state);
- if (error)
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
@@ -626,8 +634,12 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
error = device_resume(dev, state, false);
- if (error)
+ if (error) {
+ suspend_stats.failed_resume++;
+ dpm_save_failed_step(SUSPEND_RESUME);
+ dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, "", error);
+ }
mutex_lock(&dpm_list_mtx);
}
@@ -802,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
@@ -902,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
- dev->power.is_suspended = !error;
+ if (!error) {
+ dev->power.is_suspended = true;
+ if (dev->power.wakeup_path && dev->parent)
+ dev->parent->power.wakeup_path = true;
+ }
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -923,8 +942,10 @@ static void async_suspend(void *data, async_cookie_t cookie)
int error;
error = __device_suspend(dev, pm_transition, true);
- if (error)
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
+ }
put_device(dev);
}
@@ -967,6 +988,7 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, "", error);
+ dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
@@ -980,7 +1002,10 @@ int dpm_suspend(pm_message_t state)
async_synchronize_full();
if (!error)
error = async_error;
- if (!error)
+ if (error) {
+ suspend_stats.failed_suspend++;
+ dpm_save_failed_step(SUSPEND_SUSPEND);
+ } else
dpm_show_time(starttime, state, NULL);
return error;
}
@@ -999,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
+ dev->power.wakeup_path = device_may_wakeup(dev);
+
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "preparing power domain ");
if (dev->pm_domain->ops.prepare)
@@ -1088,7 +1115,10 @@ int dpm_suspend_start(pm_message_t state)
int error;
error = dpm_prepare(state);
- if (!error)
+ if (error) {
+ suspend_stats.failed_prepare++;
+ dpm_save_failed_step(SUSPEND_PREPARE);
+ } else
error = dpm_suspend(state);
return error;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index b23de185cb0..434a6c01167 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -73,6 +73,7 @@ struct opp {
* RCU usage: nodes are not modified in the list of device_opp,
* however addition is possible and is secured by dev_opp_list_lock
* @dev: device pointer
+ * @head: notifier head to notify the OPP availability changes.
* @opp_list: list of opps
*
* This is an internal data structure maintaining the link to opps attached to
@@ -83,6 +84,7 @@ struct device_opp {
struct list_head node;
struct device *dev;
+ struct srcu_notifier_head head;
struct list_head opp_list;
};
@@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
}
dev_opp->dev = dev;
+ srcu_init_notifier_head(&dev_opp->head);
INIT_LIST_HEAD(&dev_opp->opp_list);
/* Secure the device list modification */
@@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
+ /*
+ * Notify the changes in the availability of the operable
+ * frequency/voltage list.
+ */
+ srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
@@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
mutex_unlock(&dev_opp_list_lock);
synchronize_rcu();
+ /* Notify the change of the OPP availability */
+ if (availability_req)
+ srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
+ new_opp);
+ else
+ srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
+ new_opp);
+
/* clean up old opp */
new_opp = opp;
goto out;
@@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev,
*table = NULL;
}
#endif /* CONFIG_CPU_FREQ */
+
+/**
+ * opp_get_notifier() - find notifier_head of the device with opp
+ * @dev: device pointer used to lookup device OPPs.
+ */
+struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+{
+ struct device_opp *dev_opp = find_device_opp(dev);
+
+ if (IS_ERR(dev_opp))
+ return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
+
+ return &dev_opp->head;
+}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index f2a25f18fde..9bf62323aaf 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,3 +1,5 @@
+#include <linux/pm_qos.h>
+
#ifdef CONFIG_PM_RUNTIME
extern void pm_runtime_init(struct device *dev);
@@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_init(struct device *dev)
{
spin_lock_init(&dev->power.lock);
+ dev->power.power_state = PMSG_INVALID;
pm_runtime_init(dev);
}
+static inline void device_pm_add(struct device *dev)
+{
+ dev_pm_qos_constraints_init(dev);
+}
+
static inline void device_pm_remove(struct device *dev)
{
+ dev_pm_qos_constraints_destroy(dev);
pm_runtime_remove(dev);
}
-static inline void device_pm_add(struct device *dev) {}
static inline void device_pm_move_before(struct device *deva,
struct device *devb) {}
static inline void device_pm_move_after(struct device *deva,
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
new file mode 100644
index 00000000000..91e06141738
--- /dev/null
+++ b/drivers/base/power/qos.c
@@ -0,0 +1,419 @@
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register different types of notification callbacks:
+ * . a per-device notification callback using the dev_pm_qos_*_notifier API.
+ * The notification chain data is stored in the per-device constraint
+ * data struct.
+ * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
+ * API. The notification chain data is stored in a static variable.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is tored into the device
+ * dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ * is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ * . A global mutex protects the constraints users from the data being
+ * allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+
+static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ */
+s32 dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c;
+ unsigned long flags;
+ s32 ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ c = dev->power.constraints;
+ if (c)
+ ret = pm_qos_read_value(c);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/*
+ * apply_constraint
+ * @req: constraint request to apply
+ * @action: action to perform add/update/remove, of type enum pm_qos_req_action
+ * @value: defines the qos request
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device and the global notification
+ * callbacks
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+ enum pm_qos_req_action action, int value)
+{
+ int ret, curr_value;
+
+ ret = pm_qos_update_target(req->dev->power.constraints,
+ &req->node, action, value);
+
+ if (ret) {
+ /* Call the global callbacks if needed */
+ curr_value = pm_qos_read_value(req->dev->power.constraints);
+ blocking_notifier_call_chain(&dev_pm_notifiers,
+ (unsigned long)curr_value,
+ req);
+ }
+
+ return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+ struct pm_qos_constraints *c;
+ struct blocking_notifier_head *n;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->type = PM_QOS_MIN;
+ c->notifiers = n;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.constraints = c;
+ spin_unlock_irq(&dev->power.lock);
+
+ return 0;
+}
+
+/**
+ * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
+ * @dev: target device
+ *
+ * Called from the device PM subsystem during device insertion under
+ * device_pm_lock().
+ */
+void dev_pm_qos_constraints_init(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ dev->power.constraints = NULL;
+ dev->power.power_state = PMSG_ON;
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ struct dev_pm_qos_request *req, *tmp;
+ struct pm_qos_constraints *c;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ dev->power.power_state = PMSG_INVALID;
+ c = dev->power.constraints;
+ if (!c)
+ goto out;
+
+ /* Flush the constraints list for the device */
+ plist_for_each_entry_safe(req, tmp, &c->list, node) {
+ /*
+ * Update constraints list and call the notification
+ * callbacks if needed
+ */
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.constraints = NULL;
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(c->notifiers);
+ kfree(c);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ s32 value)
+{
+ int ret = 0;
+
+ if (!dev || !req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
+ "added request\n");
+ return -EINVAL;
+ }
+
+ req->dev = dev;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (!dev->power.constraints) {
+ if (dev->power.power_state.event == PM_EVENT_INVALID) {
+ /* The device has been removed from the system. */
+ req->dev = NULL;
+ ret = -ENODEV;
+ goto out;
+ } else {
+ /*
+ * Allocate the constraints data on the first call to
+ * add_request, i.e. only if the data is not already
+ * allocated and if the device has not been removed.
+ */
+ ret = dev_pm_qos_constraints_allocate(dev);
+ }
+ }
+
+ if (!ret)
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (!dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
+ "unknown object\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (req->dev->power.constraints) {
+ if (new_value != req->node.prio)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
+ new_value);
+ } else {
+ /* Return if the device has been removed */
+ ret = -ENODEV;
+ }
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (!dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
+ "unknown object\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (req->dev->power.constraints) {
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ } else {
+ /* Return if the device has been removed */
+ ret = -ENODEV;
+ }
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+{
+ int retval = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (dev->power.constraints)
+ retval = blocking_notifier_chain_register(
+ dev->power.constraints->notifiers,
+ notifier);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier)
+{
+ int retval = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (dev->power.constraints)
+ retval = blocking_notifier_chain_unregister(
+ dev->power.constraints->notifiers,
+ notifier);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_global_notifier - sets notification entry for changes to
+ * target value of the PM QoS constraints for any device
+ *
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
+{
+ return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
+
+/**
+ * dev_pm_qos_remove_global_notifier - deletes notification for changes to
+ * target value of PM QoS constraints for any device
+ *
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
+{
+ return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index acb3f83b807..6bb3aafa85e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/pm_runtime.h>
+#include <trace/events/rpm.h>
#include "power.h"
static int rpm_resume(struct device *dev, int rpmflags);
@@ -155,6 +156,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
}
/**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int retval;
+
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = cb(dev);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ return retval;
+}
+
+/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
@@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
- if (callback) {
- if (dev->power.irq_safe)
- spin_unlock(&dev->power.lock);
- else
- spin_unlock_irq(&dev->power.lock);
-
- callback(dev);
-
- if (dev->power.irq_safe)
- spin_lock(&dev->power.lock);
- else
- spin_lock_irq(&dev->power.lock);
- }
+ if (callback)
+ __rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
* @dev: Device to run the callback for.
*/
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
if (!cb)
return -ENOSYS;
- if (dev->power.irq_safe) {
- retval = cb(dev);
- } else {
- spin_unlock_irq(&dev->power.lock);
-
- retval = cb(dev);
+ retval = __rpm_callback(cb, dev);
- spin_lock_irq(&dev->power.lock);
- }
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
}
@@ -277,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
- * Check if the device's runtime PM status allows it to be suspended. If
- * another suspend has been started earlier, either return immediately or wait
- * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
- * pending idle notification. If the RPM_ASYNC flag is set then queue a
- * suspend request; otherwise run the ->runtime_suspend() callback directly.
- * If a deferred resume was requested while the callback was running then carry
- * it out; otherwise send an idle notification for the device (if the suspend
- * failed) or for its parent (if the suspend succeeded).
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -295,7 +306,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -347,6 +358,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
/* Wait for the other suspend running in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -400,15 +420,16 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.runtime_error = 0;
else
pm_runtime_cancel_pending(dev);
- } else {
+ wake_up_all(&dev->power.wait_queue);
+ goto out;
+ }
no_callback:
- __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_deactivate_timer(dev);
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
- if (dev->parent) {
- parent = dev->parent;
- atomic_add_unless(&parent->power.child_count, -1, 0);
- }
+ if (dev->parent) {
+ parent = dev->parent;
+ atomic_add_unless(&parent->power.child_count, -1, 0);
}
wake_up_all(&dev->power.wait_queue);
@@ -430,7 +451,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- dev_dbg(dev, "%s returns %d\n", __func__, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -459,7 +480,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error)
@@ -496,6 +517,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
/* Wait for the operation carried out in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -615,7 +645,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- dev_dbg(dev, "%s returns %d\n", __func__, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -732,13 +762,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
* return immediately if it is larger than zero. Then carry out an idle
* notification, either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -761,13 +794,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
* return immediately if it is larger than zero. Then carry out a suspend,
* either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -789,13 +825,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* carry out a resume, either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_resume(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 84f7c7d5a09..14ee07e9cc4 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
*
* By default, most devices should leave wakeup disabled. The exceptions are
* devices that everyone expects to be wakeup sources: keyboards, power buttons,
- * possibly network interfaces, etc.
+ * possibly network interfaces, etc. Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
*/
int device_init_wakeup(struct device *dev, bool enable)
{
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index fabbf6cc536..2fc6a66f39a 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,6 +4,8 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI)
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
bool
config REGMAP_I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index f476f457129..0573c8a9dac 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,3 +1,4 @@
-obj-$(CONFIG_REGMAP) += regmap.o
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
new file mode 100644
index 00000000000..348ff02eb93
--- /dev/null
+++ b/drivers/base/regmap/internal.h
@@ -0,0 +1,128 @@
+/*
+ * Register map access API internal header
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGMAP_INTERNAL_H
+#define _REGMAP_INTERNAL_H
+
+#include <linux/regmap.h>
+#include <linux/fs.h>
+
+struct regmap;
+struct regcache_ops;
+
+struct regmap_format {
+ size_t buf_size;
+ size_t reg_bytes;
+ size_t val_bytes;
+ void (*format_write)(struct regmap *map,
+ unsigned int reg, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg);
+ void (*format_val)(void *buf, unsigned int val);
+ unsigned int (*parse_val)(void *buf);
+};
+
+struct regmap {
+ struct mutex lock;
+
+ struct device *dev; /* Device we do I/O on */
+ void *work_buf; /* Scratch buffer used to format I/O */
+ struct regmap_format format; /* Buffer format */
+ const struct regmap_bus *bus;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+
+ unsigned int max_register;
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ u8 read_flag_mask;
+ u8 write_flag_mask;
+
+ /* regcache specific members */
+ const struct regcache_ops *cache_ops;
+ enum regcache_type cache_type;
+
+ /* number of bytes in reg_defaults_raw */
+ unsigned int cache_size_raw;
+ /* number of bytes per word in reg_defaults_raw */
+ unsigned int cache_word_size;
+ /* number of entries in reg_defaults */
+ unsigned int num_reg_defaults;
+ /* number of entries in reg_defaults_raw */
+ unsigned int num_reg_defaults_raw;
+
+ /* if set, only the cache is modified not the HW */
+ unsigned int cache_only:1;
+ /* if set, only the HW is modified not the cache */
+ unsigned int cache_bypass:1;
+ /* if set, remember to free reg_defaults_raw */
+ unsigned int cache_free:1;
+
+ struct reg_default *reg_defaults;
+ const void *reg_defaults_raw;
+ void *cache;
+};
+
+struct regcache_ops {
+ const char *name;
+ enum regcache_type type;
+ int (*init)(struct regmap *map);
+ int (*exit)(struct regmap *map);
+ int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
+ int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
+ int (*sync)(struct regmap *map);
+};
+
+bool regmap_writeable(struct regmap *map, unsigned int reg);
+bool regmap_readable(struct regmap *map, unsigned int reg);
+bool regmap_volatile(struct regmap *map, unsigned int reg);
+bool regmap_precious(struct regmap *map, unsigned int reg);
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val);
+
+#ifdef CONFIG_DEBUG_FS
+extern void regmap_debugfs_initcall(void);
+extern void regmap_debugfs_init(struct regmap *map);
+extern void regmap_debugfs_exit(struct regmap *map);
+#else
+static inline void regmap_debugfs_initcall(void) { }
+static inline void regmap_debugfs_init(struct regmap *map) { }
+static inline void regmap_debugfs_exit(struct regmap *map) { }
+#endif
+
+/* regcache core declarations */
+int regcache_init(struct regmap *map);
+void regcache_exit(struct regmap *map);
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value);
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value);
+int regcache_sync(struct regmap *map);
+
+unsigned int regcache_get_val(const void *base, unsigned int idx,
+ unsigned int word_size);
+bool regcache_set_val(void *base, unsigned int idx,
+ unsigned int val, unsigned int word_size);
+int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+int regcache_insert_reg(struct regmap *map, unsigned int reg,
+ unsigned int val);
+
+extern struct regcache_ops regcache_indexed_ops;
+extern struct regcache_ops regcache_rbtree_ops;
+extern struct regcache_ops regcache_lzo_ops;
+
+#endif
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c
new file mode 100644
index 00000000000..507731ad8ec
--- /dev/null
+++ b/drivers/base/regmap/regcache-indexed.c
@@ -0,0 +1,64 @@
+/*
+ * Register cache access API - indexed caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_indexed_read(struct regmap *map, unsigned int reg,
+ unsigned int *value)
+{
+ int ret;
+
+ ret = regcache_lookup_reg(map, reg);
+ if (ret >= 0)
+ *value = map->reg_defaults[ret].def;
+
+ return ret;
+}
+
+static int regcache_indexed_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ int ret;
+
+ ret = regcache_lookup_reg(map, reg);
+ if (ret < 0)
+ return regcache_insert_reg(map, reg, value);
+ map->reg_defaults[ret].def = value;
+ return 0;
+}
+
+static int regcache_indexed_sync(struct regmap *map)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = _regmap_write(map, map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret < 0)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ }
+ return 0;
+}
+
+struct regcache_ops regcache_indexed_ops = {
+ .type = REGCACHE_INDEXED,
+ .name = "indexed",
+ .read = regcache_indexed_read,
+ .write = regcache_indexed_write,
+ .sync = regcache_indexed_sync
+};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
new file mode 100644
index 00000000000..066aeece362
--- /dev/null
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -0,0 +1,361 @@
+/*
+ * Register cache access API - LZO caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "internal.h"
+
+struct regcache_lzo_ctx {
+ void *wmem;
+ void *dst;
+ const void *src;
+ size_t src_len;
+ size_t dst_len;
+ size_t decompressed_size;
+ unsigned long *sync_bmp;
+ int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int regcache_lzo_block_count(void)
+{
+ return LZO_BLOCK_NUM;
+}
+
+static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
+{
+ lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!lzo_ctx->wmem)
+ return -ENOMEM;
+ return 0;
+}
+
+static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
+{
+ size_t compress_size;
+ int ret;
+
+ ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+ lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+ if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+ return -EINVAL;
+ lzo_ctx->dst_len = compress_size;
+ return 0;
+}
+
+static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
+{
+ size_t dst_len;
+ int ret;
+
+ dst_len = lzo_ctx->dst_len;
+ ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+ lzo_ctx->dst, &dst_len);
+ if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+ return -EINVAL;
+ return 0;
+}
+
+static int regcache_lzo_compress_cache_block(struct regmap *map,
+ struct regcache_lzo_ctx *lzo_ctx)
+{
+ int ret;
+
+ lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+ lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+ if (!lzo_ctx->dst) {
+ lzo_ctx->dst_len = 0;
+ return -ENOMEM;
+ }
+
+ ret = regcache_lzo_compress(lzo_ctx);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int regcache_lzo_decompress_cache_block(struct regmap *map,
+ struct regcache_lzo_ctx *lzo_ctx)
+{
+ int ret;
+
+ lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+ lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+ if (!lzo_ctx->dst) {
+ lzo_ctx->dst_len = 0;
+ return -ENOMEM;
+ }
+
+ ret = regcache_lzo_decompress(lzo_ctx);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int regcache_lzo_get_blkindex(struct regmap *map,
+ unsigned int reg)
+{
+ return (reg * map->cache_word_size) /
+ DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+}
+
+static inline int regcache_lzo_get_blkpos(struct regmap *map,
+ unsigned int reg)
+{
+ return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
+ map->cache_word_size);
+}
+
+static inline int regcache_lzo_get_blksize(struct regmap *map)
+{
+ return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+}
+
+static int regcache_lzo_init(struct regmap *map)
+{
+ struct regcache_lzo_ctx **lzo_blocks;
+ size_t bmp_size;
+ int ret, i, blksize, blkcount;
+ const char *p, *end;
+ unsigned long *sync_bmp;
+
+ ret = 0;
+
+ blkcount = regcache_lzo_block_count();
+ map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
+ GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+ lzo_blocks = map->cache;
+
+ /*
+ * allocate a bitmap to be used when syncing the cache with
+ * the hardware. Each time a register is modified, the corresponding
+ * bit is set in the bitmap, so we know that we have to sync
+ * that register.
+ */
+ bmp_size = map->num_reg_defaults_raw;
+ sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
+ GFP_KERNEL);
+ if (!sync_bmp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ bitmap_zero(sync_bmp, bmp_size);
+
+ /* allocate the lzo blocks and initialize them */
+ for (i = 0; i < blkcount; i++) {
+ lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+ GFP_KERNEL);
+ if (!lzo_blocks[i]) {
+ kfree(sync_bmp);
+ ret = -ENOMEM;
+ goto err;
+ }
+ lzo_blocks[i]->sync_bmp = sync_bmp;
+ lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+ /* alloc the working space for the compressed block */
+ ret = regcache_lzo_prepare(lzo_blocks[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ blksize = regcache_lzo_get_blksize(map);
+ p = map->reg_defaults_raw;
+ end = map->reg_defaults_raw + map->cache_size_raw;
+ /* compress the register map and fill the lzo blocks */
+ for (i = 0; i < blkcount; i++, p += blksize) {
+ lzo_blocks[i]->src = p;
+ if (p + blksize > end)
+ lzo_blocks[i]->src_len = end - p;
+ else
+ lzo_blocks[i]->src_len = blksize;
+ ret = regcache_lzo_compress_cache_block(map,
+ lzo_blocks[i]);
+ if (ret < 0)
+ goto err;
+ lzo_blocks[i]->decompressed_size =
+ lzo_blocks[i]->src_len;
+ }
+
+ return 0;
+err:
+ regcache_exit(map);
+ return ret;
+}
+
+static int regcache_lzo_exit(struct regmap *map)
+{
+ struct regcache_lzo_ctx **lzo_blocks;
+ int i, blkcount;
+
+ lzo_blocks = map->cache;
+ if (!lzo_blocks)
+ return 0;
+
+ blkcount = regcache_lzo_block_count();
+ /*
+ * the pointer to the bitmap used for syncing the cache
+ * is shared amongst all lzo_blocks. Ensure it is freed
+ * only once.
+ */
+ if (lzo_blocks[0])
+ kfree(lzo_blocks[0]->sync_bmp);
+ for (i = 0; i < blkcount; i++) {
+ if (lzo_blocks[i]) {
+ kfree(lzo_blocks[i]->wmem);
+ kfree(lzo_blocks[i]->dst);
+ }
+ /* each lzo_block is a pointer returned by kmalloc or NULL */
+ kfree(lzo_blocks[i]);
+ }
+ kfree(lzo_blocks);
+ map->cache = NULL;
+ return 0;
+}
+
+static int regcache_lzo_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+ int ret, blkindex, blkpos;
+ size_t blksize, tmp_dst_len;
+ void *tmp_dst;
+
+ /* index of the compressed lzo block */
+ blkindex = regcache_lzo_get_blkindex(map, reg);
+ /* register index within the decompressed block */
+ blkpos = regcache_lzo_get_blkpos(map, reg);
+ /* size of the compressed block */
+ blksize = regcache_lzo_get_blksize(map);
+ lzo_blocks = map->cache;
+ lzo_block = lzo_blocks[blkindex];
+
+ /* save the pointer and length of the compressed block */
+ tmp_dst = lzo_block->dst;
+ tmp_dst_len = lzo_block->dst_len;
+
+ /* prepare the source to be the compressed block */
+ lzo_block->src = lzo_block->dst;
+ lzo_block->src_len = lzo_block->dst_len;
+
+ /* decompress the block */
+ ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+ if (ret >= 0)
+ /* fetch the value from the cache */
+ *value = regcache_get_val(lzo_block->dst, blkpos,
+ map->cache_word_size);
+
+ kfree(lzo_block->dst);
+ /* restore the pointer and length of the compressed block */
+ lzo_block->dst = tmp_dst;
+ lzo_block->dst_len = tmp_dst_len;
+
+ return ret;
+}
+
+static int regcache_lzo_write(struct regmap *map,
+ unsigned int reg, unsigned int value)
+{
+ struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+ int ret, blkindex, blkpos;
+ size_t blksize, tmp_dst_len;
+ void *tmp_dst;
+
+ /* index of the compressed lzo block */
+ blkindex = regcache_lzo_get_blkindex(map, reg);
+ /* register index within the decompressed block */
+ blkpos = regcache_lzo_get_blkpos(map, reg);
+ /* size of the compressed block */
+ blksize = regcache_lzo_get_blksize(map);
+ lzo_blocks = map->cache;
+ lzo_block = lzo_blocks[blkindex];
+
+ /* save the pointer and length of the compressed block */
+ tmp_dst = lzo_block->dst;
+ tmp_dst_len = lzo_block->dst_len;
+
+ /* prepare the source to be the compressed block */
+ lzo_block->src = lzo_block->dst;
+ lzo_block->src_len = lzo_block->dst_len;
+
+ /* decompress the block */
+ ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+ if (ret < 0) {
+ kfree(lzo_block->dst);
+ goto out;
+ }
+
+ /* write the new value to the cache */
+ if (regcache_set_val(lzo_block->dst, blkpos, value,
+ map->cache_word_size)) {
+ kfree(lzo_block->dst);
+ goto out;
+ }
+
+ /* prepare the source to be the decompressed block */
+ lzo_block->src = lzo_block->dst;
+ lzo_block->src_len = lzo_block->dst_len;
+
+ /* compress the block */
+ ret = regcache_lzo_compress_cache_block(map, lzo_block);
+ if (ret < 0) {
+ kfree(lzo_block->dst);
+ kfree(lzo_block->src);
+ goto out;
+ }
+
+ /* set the bit so we know we have to sync this register */
+ set_bit(reg, lzo_block->sync_bmp);
+ kfree(tmp_dst);
+ kfree(lzo_block->src);
+ return 0;
+out:
+ lzo_block->dst = tmp_dst;
+ lzo_block->dst_len = tmp_dst_len;
+ return ret;
+}
+
+static int regcache_lzo_sync(struct regmap *map)
+{
+ struct regcache_lzo_ctx **lzo_blocks;
+ unsigned int val;
+ int i;
+ int ret;
+
+ lzo_blocks = map->cache;
+ for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
+ ret = regcache_read(map, i, &val);
+ if (ret)
+ return ret;
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, i, val);
+ map->cache_bypass = 0;
+ if (ret)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ i, val);
+ }
+
+ return 0;
+}
+
+struct regcache_ops regcache_lzo_ops = {
+ .type = REGCACHE_LZO,
+ .name = "lzo",
+ .init = regcache_lzo_init,
+ .exit = regcache_lzo_exit,
+ .read = regcache_lzo_read,
+ .write = regcache_lzo_write,
+ .sync = regcache_lzo_sync
+};
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
new file mode 100644
index 00000000000..e31498499b0
--- /dev/null
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -0,0 +1,345 @@
+/*
+ * Register cache access API - rbtree caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+
+#include "internal.h"
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+ unsigned int value);
+
+struct regcache_rbtree_node {
+ /* the actual rbtree node holding this block */
+ struct rb_node node;
+ /* base register handled by this block */
+ unsigned int base_reg;
+ /* block of adjacent registers */
+ void *block;
+ /* number of registers available in the block */
+ unsigned int blklen;
+} __attribute__ ((packed));
+
+struct regcache_rbtree_ctx {
+ struct rb_root root;
+ struct regcache_rbtree_node *cached_rbnode;
+};
+
+static inline void regcache_rbtree_get_base_top_reg(
+ struct regcache_rbtree_node *rbnode,
+ unsigned int *base, unsigned int *top)
+{
+ *base = rbnode->base_reg;
+ *top = rbnode->base_reg + rbnode->blklen - 1;
+}
+
+static unsigned int regcache_rbtree_get_register(
+ struct regcache_rbtree_node *rbnode, unsigned int idx,
+ unsigned int word_size)
+{
+ return regcache_get_val(rbnode->block, idx, word_size);
+}
+
+static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
+ unsigned int idx, unsigned int val,
+ unsigned int word_size)
+{
+ regcache_set_val(rbnode->block, idx, val, word_size);
+}
+
+static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
+ unsigned int reg)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+ struct rb_node *node;
+ struct regcache_rbtree_node *rbnode;
+ unsigned int base_reg, top_reg;
+
+ rbnode = rbtree_ctx->cached_rbnode;
+ if (rbnode) {
+ regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+ if (reg >= base_reg && reg <= top_reg)
+ return rbnode;
+ }
+
+ node = rbtree_ctx->root.rb_node;
+ while (node) {
+ rbnode = container_of(node, struct regcache_rbtree_node, node);
+ regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+ if (reg >= base_reg && reg <= top_reg) {
+ rbtree_ctx->cached_rbnode = rbnode;
+ return rbnode;
+ } else if (reg > top_reg) {
+ node = node->rb_right;
+ } else if (reg < base_reg) {
+ node = node->rb_left;
+ }
+ }
+
+ return NULL;
+}
+
+static int regcache_rbtree_insert(struct rb_root *root,
+ struct regcache_rbtree_node *rbnode)
+{
+ struct rb_node **new, *parent;
+ struct regcache_rbtree_node *rbnode_tmp;
+ unsigned int base_reg_tmp, top_reg_tmp;
+ unsigned int base_reg;
+
+ parent = NULL;
+ new = &root->rb_node;
+ while (*new) {
+ rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
+ node);
+ /* base and top registers of the current rbnode */
+ regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
+ &top_reg_tmp);
+ /* base register of the rbnode to be added */
+ base_reg = rbnode->base_reg;
+ parent = *new;
+ /* if this register has already been inserted, just return */
+ if (base_reg >= base_reg_tmp &&
+ base_reg <= top_reg_tmp)
+ return 0;
+ else if (base_reg > top_reg_tmp)
+ new = &((*new)->rb_right);
+ else if (base_reg < base_reg_tmp)
+ new = &((*new)->rb_left);
+ }
+
+ /* insert the node into the rbtree */
+ rb_link_node(&rbnode->node, parent, new);
+ rb_insert_color(&rbnode->node, root);
+
+ return 1;
+}
+
+static int regcache_rbtree_init(struct regmap *map)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ int i;
+ int ret;
+
+ map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+
+ rbtree_ctx = map->cache;
+ rbtree_ctx->root = RB_ROOT;
+ rbtree_ctx->cached_rbnode = NULL;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_rbtree_write(map,
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_exit(map);
+ return ret;
+}
+
+static int regcache_rbtree_exit(struct regmap *map)
+{
+ struct rb_node *next;
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbtree_node;
+
+ /* if we've already been called then just return */
+ rbtree_ctx = map->cache;
+ if (!rbtree_ctx)
+ return 0;
+
+ /* free up the rbtree */
+ next = rb_first(&rbtree_ctx->root);
+ while (next) {
+ rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
+ next = rb_next(&rbtree_node->node);
+ rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+ kfree(rbtree_node->block);
+ kfree(rbtree_node);
+ }
+
+ /* release the resources */
+ kfree(map->cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_rbtree_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ struct regcache_rbtree_node *rbnode;
+ unsigned int reg_tmp;
+
+ rbnode = regcache_rbtree_lookup(map, reg);
+ if (rbnode) {
+ reg_tmp = reg - rbnode->base_reg;
+ *value = regcache_rbtree_get_register(rbnode, reg_tmp,
+ map->cache_word_size);
+ } else {
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+
+static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
+ unsigned int pos, unsigned int reg,
+ unsigned int value, unsigned int word_size)
+{
+ u8 *blk;
+
+ blk = krealloc(rbnode->block,
+ (rbnode->blklen + 1) * word_size, GFP_KERNEL);
+ if (!blk)
+ return -ENOMEM;
+
+ /* insert the register value in the correct place in the rbnode block */
+ memmove(blk + (pos + 1) * word_size,
+ blk + pos * word_size,
+ (rbnode->blklen - pos) * word_size);
+
+ /* update the rbnode block, its size and the base register */
+ rbnode->block = blk;
+ rbnode->blklen++;
+ if (!pos)
+ rbnode->base_reg = reg;
+
+ regcache_rbtree_set_register(rbnode, pos, value, word_size);
+ return 0;
+}
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbnode, *rbnode_tmp;
+ struct rb_node *node;
+ unsigned int val;
+ unsigned int reg_tmp;
+ unsigned int pos;
+ int i;
+ int ret;
+
+ rbtree_ctx = map->cache;
+ /* if we can't locate it in the cached rbnode we'll have
+ * to traverse the rbtree looking for it.
+ */
+ rbnode = regcache_rbtree_lookup(map, reg);
+ if (rbnode) {
+ reg_tmp = reg - rbnode->base_reg;
+ val = regcache_rbtree_get_register(rbnode, reg_tmp,
+ map->cache_word_size);
+ if (val == value)
+ return 0;
+ regcache_rbtree_set_register(rbnode, reg_tmp, value,
+ map->cache_word_size);
+ } else {
+ /* look for an adjacent register to the one we are about to add */
+ for (node = rb_first(&rbtree_ctx->root); node;
+ node = rb_next(node)) {
+ rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
+ for (i = 0; i < rbnode_tmp->blklen; i++) {
+ reg_tmp = rbnode_tmp->base_reg + i;
+ if (abs(reg_tmp - reg) != 1)
+ continue;
+ /* decide where in the block to place our register */
+ if (reg_tmp + 1 == reg)
+ pos = i + 1;
+ else
+ pos = i;
+ ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
+ reg, value,
+ map->cache_word_size);
+ if (ret)
+ return ret;
+ rbtree_ctx->cached_rbnode = rbnode_tmp;
+ return 0;
+ }
+ }
+ /* we did not manage to find a place to insert it in an existing
+ * block so create a new rbnode with a single register in its block.
+ * This block will get populated further if any other adjacent
+ * registers get modified in the future.
+ */
+ rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+ if (!rbnode)
+ return -ENOMEM;
+ rbnode->blklen = 1;
+ rbnode->base_reg = reg;
+ rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
+ GFP_KERNEL);
+ if (!rbnode->block) {
+ kfree(rbnode);
+ return -ENOMEM;
+ }
+ regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
+ regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
+ rbtree_ctx->cached_rbnode = rbnode;
+ }
+
+ return 0;
+}
+
+static int regcache_rbtree_sync(struct regmap *map)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct rb_node *node;
+ struct regcache_rbtree_node *rbnode;
+ unsigned int regtmp;
+ unsigned int val;
+ int ret;
+ int i;
+
+ rbtree_ctx = map->cache;
+ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+ for (i = 0; i < rbnode->blklen; i++) {
+ regtmp = rbnode->base_reg + i;
+ val = regcache_rbtree_get_register(rbnode, i,
+ map->cache_word_size);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, i);
+ if (ret > 0 && val == map->reg_defaults[ret].def)
+ continue;
+
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, regtmp, val);
+ map->cache_bypass = 0;
+ if (ret)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ regtmp, val);
+ }
+ }
+
+ return 0;
+}
+
+struct regcache_ops regcache_rbtree_ops = {
+ .type = REGCACHE_RBTREE,
+ .name = "rbtree",
+ .init = regcache_rbtree_init,
+ .exit = regcache_rbtree_exit,
+ .read = regcache_rbtree_read,
+ .write = regcache_rbtree_write,
+ .sync = regcache_rbtree_sync
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
new file mode 100644
index 00000000000..afcfef83826
--- /dev/null
+++ b/drivers/base/regmap/regcache.c
@@ -0,0 +1,401 @@
+/*
+ * Register cache access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <trace/events/regmap.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
+
+#include "internal.h"
+
+static const struct regcache_ops *cache_types[] = {
+ &regcache_indexed_ops,
+ &regcache_rbtree_ops,
+ &regcache_lzo_ops,
+};
+
+static int regcache_hw_init(struct regmap *map)
+{
+ int i, j;
+ int ret;
+ int count;
+ unsigned int val;
+ void *tmp_buf;
+
+ if (!map->num_reg_defaults_raw)
+ return -EINVAL;
+
+ if (!map->reg_defaults_raw) {
+ dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+ tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
+ if (!tmp_buf)
+ return -EINVAL;
+ ret = regmap_bulk_read(map, 0, tmp_buf,
+ map->num_reg_defaults_raw);
+ if (ret < 0) {
+ kfree(tmp_buf);
+ return ret;
+ }
+ map->reg_defaults_raw = tmp_buf;
+ map->cache_free = 1;
+ }
+
+ /* calculate the size of reg_defaults */
+ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+ if (!val)
+ continue;
+ count++;
+ }
+
+ map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!map->reg_defaults)
+ return -ENOMEM;
+
+ /* fill the reg_defaults */
+ map->num_reg_defaults = count;
+ for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+ if (!val)
+ continue;
+ map->reg_defaults[j].reg = i;
+ map->reg_defaults[j].def = val;
+ j++;
+ }
+
+ return 0;
+}
+
+int regcache_init(struct regmap *map)
+{
+ int ret;
+ int i;
+ void *tmp_buf;
+
+ if (map->cache_type == REGCACHE_NONE) {
+ map->cache_bypass = true;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cache_types); i++)
+ if (cache_types[i]->type == map->cache_type)
+ break;
+
+ if (i == ARRAY_SIZE(cache_types)) {
+ dev_err(map->dev, "Could not match compress type: %d\n",
+ map->cache_type);
+ return -EINVAL;
+ }
+
+ map->cache = NULL;
+ map->cache_ops = cache_types[i];
+
+ if (!map->cache_ops->read ||
+ !map->cache_ops->write ||
+ !map->cache_ops->name)
+ return -EINVAL;
+
+ /* We still need to ensure that the reg_defaults
+ * won't vanish from under us. We'll need to make
+ * a copy of it.
+ */
+ if (map->reg_defaults) {
+ if (!map->num_reg_defaults)
+ return -EINVAL;
+ tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+ sizeof(struct reg_default), GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+ map->reg_defaults = tmp_buf;
+ } else if (map->num_reg_defaults_raw) {
+ /* Some devices such as PMICs don't have cache defaults,
+ * we cope with this by reading back the HW registers and
+ * crafting the cache defaults by hand.
+ */
+ ret = regcache_hw_init(map);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!map->max_register)
+ map->max_register = map->num_reg_defaults_raw;
+
+ if (map->cache_ops->init) {
+ dev_dbg(map->dev, "Initializing %s cache\n",
+ map->cache_ops->name);
+ return map->cache_ops->init(map);
+ }
+ return 0;
+}
+
+void regcache_exit(struct regmap *map)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return;
+
+ BUG_ON(!map->cache_ops);
+
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n",
+ map->cache_ops->name);
+ map->cache_ops->exit(map);
+ }
+}
+
+/**
+ * regcache_read: Fetch the value of a given register from the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return -ENOSYS;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->read(map, reg, value);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regcache_read);
+
+/**
+ * regcache_write: Set the value of a given register in the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return 0;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_writeable(map, reg))
+ return -EIO;
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->write(map, reg, value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regcache_write);
+
+/**
+ * regcache_sync: Sync the register cache with the hardware.
+ *
+ * @map: map to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile. In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync(struct regmap *map)
+{
+ int ret = 0;
+ unsigned int val;
+ unsigned int i;
+ const char *name;
+ unsigned int bypass;
+
+ BUG_ON(!map->cache_ops);
+
+ mutex_lock(&map->lock);
+ /* Remember the initial bypass state */
+ bypass = map->cache_bypass;
+ dev_dbg(map->dev, "Syncing %s cache\n",
+ map->cache_ops->name);
+ name = map->cache_ops->name;
+ trace_regcache_sync(map->dev, name, "start");
+ if (map->cache_ops->sync) {
+ ret = map->cache_ops->sync(map);
+ } else {
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_read(map, i, &val);
+ if (ret < 0)
+ goto out;
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, i, val);
+ map->cache_bypass = 0;
+ if (ret < 0)
+ goto out;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ }
+
+ }
+out:
+ trace_regcache_sync(map->dev, name, "stop");
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync);
+
+/**
+ * regcache_cache_only: Put a register map into cache only mode
+ *
+ * @map: map to configure
+ * @cache_only: flag if changes should be written to the hardware
+ *
+ * When a register map is marked as cache only writes to the register
+ * map API will only update the register cache, they will not cause
+ * any hardware changes. This is useful for allowing portions of
+ * drivers to act as though the device were functioning as normal when
+ * it is disabled for power saving reasons.
+ */
+void regcache_cache_only(struct regmap *map, bool enable)
+{
+ mutex_lock(&map->lock);
+ WARN_ON(map->cache_bypass && enable);
+ map->cache_only = enable;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_only);
+
+/**
+ * regcache_cache_bypass: Put a register map into cache bypass mode
+ *
+ * @map: map to configure
+ * @cache_bypass: flag if changes should not be written to the hardware
+ *
+ * When a register map is marked with the cache bypass option, writes
+ * to the register map API will only update the hardware and not the
+ * the cache directly. This is useful when syncing the cache back to
+ * the hardware.
+ */
+void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ mutex_lock(&map->lock);
+ WARN_ON(map->cache_only && enable);
+ map->cache_bypass = enable;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+
+bool regcache_set_val(void *base, unsigned int idx,
+ unsigned int val, unsigned int word_size)
+{
+ switch (word_size) {
+ case 1: {
+ u8 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ case 2: {
+ u16 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return false;
+}
+
+unsigned int regcache_get_val(const void *base, unsigned int idx,
+ unsigned int word_size)
+{
+ if (!base)
+ return -EINVAL;
+
+ switch (word_size) {
+ case 1: {
+ const u8 *cache = base;
+ return cache[idx];
+ }
+ case 2: {
+ const u16 *cache = base;
+ return cache[idx];
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return -1;
+}
+
+static int regcache_default_cmp(const void *a, const void *b)
+{
+ const struct reg_default *_a = a;
+ const struct reg_default *_b = b;
+
+ return _a->reg - _b->reg;
+}
+
+int regcache_lookup_reg(struct regmap *map, unsigned int reg)
+{
+ struct reg_default key;
+ struct reg_default *r;
+
+ key.reg = reg;
+ key.def = 0;
+
+ r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
+ sizeof(struct reg_default), regcache_default_cmp);
+
+ if (r)
+ return r - map->reg_defaults;
+ else
+ return -ENOENT;
+}
+
+int regcache_insert_reg(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ void *tmp;
+
+ tmp = krealloc(map->reg_defaults,
+ (map->num_reg_defaults + 1) * sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ map->reg_defaults = tmp;
+ map->num_reg_defaults++;
+ map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
+ map->reg_defaults[map->num_reg_defaults - 1].def = val;
+ sort(map->reg_defaults, map->num_reg_defaults,
+ sizeof(struct reg_default), regcache_default_cmp, NULL);
+ return 0;
+}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
new file mode 100644
index 00000000000..6f397476e27
--- /dev/null
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -0,0 +1,209 @@
+/*
+ * Register map access API - debugfs
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include "internal.h"
+
+static struct dentry *regmap_debugfs_root;
+
+/* Calculate the length of a fixed format */
+static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+{
+ snprintf(buf, buf_size, "%x", max_val);
+ return strlen(buf);
+}
+
+static int regmap_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int reg_len, val_len, tot_len;
+ size_t buf_pos = 0;
+ loff_t p = 0;
+ ssize_t ret;
+ int i;
+ struct regmap *map = file->private_data;
+ char *buf;
+ unsigned int val;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Calculate the length of a fixed format */
+ reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+ val_len = 2 * map->format.val_bytes;
+ tot_len = reg_len + val_len + 3; /* : \n */
+
+ for (i = 0; i < map->max_register + 1; i++) {
+ if (!regmap_readable(map, i))
+ continue;
+
+ if (regmap_precious(map, i))
+ continue;
+
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+ if (buf_pos >= count - 1 - tot_len)
+ break;
+
+ /* Format the register */
+ snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
+ reg_len, i);
+ buf_pos += reg_len + 2;
+
+ /* Format the value, write all X if we can't read */
+ ret = regmap_read(map, i, &val);
+ if (ret == 0)
+ snprintf(buf + buf_pos, count - buf_pos,
+ "%.*x", val_len, val);
+ else
+ memset(buf + buf_pos, 'X', val_len);
+ buf_pos += 2 * map->format.val_bytes;
+
+ buf[buf_pos++] = '\n';
+ }
+ p += tot_len;
+ }
+
+ ret = buf_pos;
+
+ if (copy_to_user(user_buf, buf, buf_pos)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ *ppos += buf_pos;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_map_fops = {
+ .open = regmap_open_file,
+ .read = regmap_map_read_file,
+ .llseek = default_llseek,
+};
+
+static ssize_t regmap_access_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ int reg_len, tot_len;
+ size_t buf_pos = 0;
+ loff_t p = 0;
+ ssize_t ret;
+ int i;
+ struct regmap *map = file->private_data;
+ char *buf;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Calculate the length of a fixed format */
+ reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+ tot_len = reg_len + 10; /* ': R W V P\n' */
+
+ for (i = 0; i < map->max_register + 1; i++) {
+ /* Ignore registers which are neither readable nor writable */
+ if (!regmap_readable(map, i) && !regmap_writeable(map, i))
+ continue;
+
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+ if (buf_pos >= count - 1 - tot_len)
+ break;
+
+ /* Format the register */
+ snprintf(buf + buf_pos, count - buf_pos,
+ "%.*x: %c %c %c %c\n",
+ reg_len, i,
+ regmap_readable(map, i) ? 'y' : 'n',
+ regmap_writeable(map, i) ? 'y' : 'n',
+ regmap_volatile(map, i) ? 'y' : 'n',
+ regmap_precious(map, i) ? 'y' : 'n');
+
+ buf_pos += tot_len;
+ }
+ p += tot_len;
+ }
+
+ ret = buf_pos;
+
+ if (copy_to_user(user_buf, buf, buf_pos)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ *ppos += buf_pos;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_access_fops = {
+ .open = regmap_open_file,
+ .read = regmap_access_read_file,
+ .llseek = default_llseek,
+};
+
+void regmap_debugfs_init(struct regmap *map)
+{
+ map->debugfs = debugfs_create_dir(dev_name(map->dev),
+ regmap_debugfs_root);
+ if (!map->debugfs) {
+ dev_warn(map->dev, "Failed to create debugfs directory\n");
+ return;
+ }
+
+ if (map->max_register) {
+ debugfs_create_file("registers", 0400, map->debugfs,
+ map, &regmap_map_fops);
+ debugfs_create_file("access", 0400, map->debugfs,
+ map, &regmap_access_fops);
+ }
+}
+
+void regmap_debugfs_exit(struct regmap *map)
+{
+ debugfs_remove_recursive(map->debugfs);
+}
+
+void regmap_debugfs_initcall(void)
+{
+ regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
+ if (!regmap_debugfs_root) {
+ pr_warn("regmap: Failed to create debugfs root\n");
+ return;
+ }
+}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index c4f7a45cd2c..38621ec87c0 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -90,11 +90,9 @@ static int regmap_i2c_read(struct device *dev,
}
static struct regmap_bus regmap_i2c = {
- .type = &i2c_bus_type,
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
- .owner = THIS_MODULE,
};
/**
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index f8396945d6e..2560658de34 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -48,11 +48,9 @@ static int regmap_spi_read(struct device *dev,
}
static struct regmap_bus regmap_spi = {
- .type = &spi_bus_type,
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.read = regmap_spi_read,
- .owner = THIS_MODULE,
.read_flag_mask = 0x80,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 20663f8dae4..bf441db1ee9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,29 +15,54 @@
#include <linux/mutex.h>
#include <linux/err.h>
-#include <linux/regmap.h>
-
-struct regmap;
-
-struct regmap_format {
- size_t buf_size;
- size_t reg_bytes;
- size_t val_bytes;
- void (*format_write)(struct regmap *map,
- unsigned int reg, unsigned int val);
- void (*format_reg)(void *buf, unsigned int reg);
- void (*format_val)(void *buf, unsigned int val);
- unsigned int (*parse_val)(void *buf);
-};
-
-struct regmap {
- struct mutex lock;
-
- struct device *dev; /* Device we do I/O on */
- void *work_buf; /* Scratch buffer used to format I/O */
- struct regmap_format format; /* Buffer format */
- const struct regmap_bus *bus;
-};
+#define CREATE_TRACE_POINTS
+#include <trace/events/regmap.h>
+
+#include "internal.h"
+
+bool regmap_writeable(struct regmap *map, unsigned int reg)
+{
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ if (map->writeable_reg)
+ return map->writeable_reg(map->dev, reg);
+
+ return true;
+}
+
+bool regmap_readable(struct regmap *map, unsigned int reg)
+{
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ if (map->readable_reg)
+ return map->readable_reg(map->dev, reg);
+
+ return true;
+}
+
+bool regmap_volatile(struct regmap *map, unsigned int reg)
+{
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ if (map->volatile_reg)
+ return map->volatile_reg(map->dev, reg);
+
+ return true;
+}
+
+bool regmap_precious(struct regmap *map, unsigned int reg)
+{
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ if (map->precious_reg)
+ return map->precious_reg(map->dev, reg);
+
+ return false;
+}
static void regmap_format_4_12_write(struct regmap *map,
unsigned int reg, unsigned int val)
@@ -116,6 +141,25 @@ struct regmap *regmap_init(struct device *dev,
map->format.val_bytes = config->val_bits / 8;
map->dev = dev;
map->bus = bus;
+ map->max_register = config->max_register;
+ map->writeable_reg = config->writeable_reg;
+ map->readable_reg = config->readable_reg;
+ map->volatile_reg = config->volatile_reg;
+ map->precious_reg = config->precious_reg;
+ map->cache_type = config->cache_type;
+ map->reg_defaults = config->reg_defaults;
+ map->num_reg_defaults = config->num_reg_defaults;
+ map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+ map->reg_defaults_raw = config->reg_defaults_raw;
+ map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw;
+ map->cache_word_size = config->val_bits / 8;
+
+ if (config->read_flag_mask || config->write_flag_mask) {
+ map->read_flag_mask = config->read_flag_mask;
+ map->write_flag_mask = config->write_flag_mask;
+ } else {
+ map->read_flag_mask = bus->read_flag_mask;
+ }
switch (config->reg_bits) {
case 4:
@@ -171,6 +215,12 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
+ ret = regcache_init(map);
+ if (ret < 0)
+ goto err_map;
+
+ regmap_debugfs_init(map);
+
return map;
err_map:
@@ -185,6 +235,8 @@ EXPORT_SYMBOL_GPL(regmap_init);
*/
void regmap_exit(struct regmap *map)
{
+ regcache_exit(map);
+ regmap_debugfs_exit(map);
kfree(map->work_buf);
kfree(map);
}
@@ -193,19 +245,38 @@ EXPORT_SYMBOL_GPL(regmap_exit);
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
+ u8 *u8 = map->work_buf;
void *buf;
int ret = -ENOTSUPP;
size_t len;
+ int i;
+
+ /* Check for unwritable registers before we start */
+ if (map->writeable_reg)
+ for (i = 0; i < val_len / map->format.val_bytes; i++)
+ if (!map->writeable_reg(map->dev, reg + i))
+ return -EINVAL;
map->format.format_reg(map->work_buf, reg);
- /* Try to do a gather write if we can */
- if (map->bus->gather_write)
+ u8[0] |= map->write_flag_mask;
+
+ trace_regmap_hw_write_start(map->dev, reg,
+ val_len / map->format.val_bytes);
+
+ /* If we're doing a single register write we can probably just
+ * send the work_buf directly, otherwise try to do a gather
+ * write.
+ */
+ if (val == map->work_buf + map->format.reg_bytes)
+ ret = map->bus->write(map->dev, map->work_buf,
+ map->format.reg_bytes + val_len);
+ else if (map->bus->gather_write)
ret = map->bus->gather_write(map->dev, map->work_buf,
map->format.reg_bytes,
val, val_len);
- /* Otherwise fall back on linearising by hand. */
+ /* If that didn't work fall back on linearising by hand. */
if (ret == -ENOTSUPP) {
len = map->format.reg_bytes + val_len;
buf = kmalloc(len, GFP_KERNEL);
@@ -219,19 +290,39 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(buf);
}
+ trace_regmap_hw_write_done(map->dev, reg,
+ val_len / map->format.val_bytes);
+
return ret;
}
-static int _regmap_write(struct regmap *map, unsigned int reg,
- unsigned int val)
+int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
{
+ int ret;
BUG_ON(!map->format.format_write && !map->format.format_val);
+ if (!map->cache_bypass) {
+ ret = regcache_write(map, reg, val);
+ if (ret != 0)
+ return ret;
+ if (map->cache_only)
+ return 0;
+ }
+
+ trace_regmap_reg_write(map->dev, reg, val);
+
if (map->format.format_write) {
map->format.format_write(map, reg, val);
- return map->bus->write(map->dev, map->work_buf,
- map->format.buf_size);
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+
+ ret = map->bus->write(map->dev, map->work_buf,
+ map->format.buf_size);
+
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+
+ return ret;
} else {
map->format.format_val(map->work_buf + map->format.reg_bytes,
val);
@@ -286,6 +377,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
+ WARN_ON(map->cache_type != REGCACHE_NONE);
+
mutex_lock(&map->lock);
ret = _regmap_raw_write(map, reg, val, val_len);
@@ -305,20 +398,23 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
map->format.format_reg(map->work_buf, reg);
/*
- * Some buses flag reads by setting the high bits in the
+ * Some buses or devices flag reads by setting the high bits in the
* register addresss; since it's always the high bits for all
* current formats we can do this here rather than in
* formatting. This may break if we get interesting formats.
*/
- if (map->bus->read_flag_mask)
- u8[0] |= map->bus->read_flag_mask;
+ u8[0] |= map->read_flag_mask;
+
+ trace_regmap_hw_read_start(map->dev, reg,
+ val_len / map->format.val_bytes);
ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
val, val_len);
- if (ret != 0)
- return ret;
- return 0;
+ trace_regmap_hw_read_done(map->dev, reg,
+ val_len / map->format.val_bytes);
+
+ return ret;
}
static int _regmap_read(struct regmap *map, unsigned int reg,
@@ -329,9 +425,20 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
if (!map->format.parse_val)
return -EINVAL;
+ if (!map->cache_bypass) {
+ ret = regcache_read(map, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+
+ if (map->cache_only)
+ return -EBUSY;
+
ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
- if (ret == 0)
+ if (ret == 0) {
*val = map->format.parse_val(map->work_buf);
+ trace_regmap_reg_read(map->dev, reg, *val);
+ }
return ret;
}
@@ -375,6 +482,14 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t val_len)
{
int ret;
+ int i;
+ bool vol = true;
+
+ for (i = 0; i < val_len / map->format.val_bytes; i++)
+ if (!regmap_volatile(map, reg + i))
+ vol = false;
+
+ WARN_ON(!vol && map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -402,16 +517,30 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
{
int ret, i;
size_t val_bytes = map->format.val_bytes;
+ bool vol = true;
if (!map->format.parse_val)
return -EINVAL;
- ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
- if (ret != 0)
- return ret;
+ /* Is this a block of volatile registers? */
+ for (i = 0; i < val_count; i++)
+ if (!regmap_volatile(map, reg + i))
+ vol = false;
+
+ if (vol || map->cache_type == REGCACHE_NONE) {
+ ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+ if (ret != 0)
+ return ret;
- for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(val + i);
+ for (i = 0; i < val_count * val_bytes; i += val_bytes)
+ map->format.parse_val(val + i);
+ } else {
+ for (i = 0; i < val_count; i++) {
+ ret = regmap_read(map, reg + i, val + (i * val_bytes));
+ if (ret != 0)
+ return ret;
+ }
+ }
return 0;
}
@@ -450,3 +579,11 @@ out:
return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits);
+
+static int __init regmap_initcall(void)
+{
+ regmap_debugfs_initcall();
+
+ return 0;
+}
+postcore_initcall(regmap_initcall);
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index ae0a02e1b80..c1172dafdff 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -33,6 +33,19 @@ config BCMA_DRIVER_PCI_HOSTMODE
help
PCI core hostmode operation (external PCI bus).
+config BCMA_HOST_SOC
+ bool
+ depends on BCMA_DRIVER_MIPS
+
+config BCMA_DRIVER_MIPS
+ bool "BCMA Broadcom MIPS core driver"
+ depends on BCMA && MIPS
+ help
+ Driver for the Broadcom MIPS core attached to Broadcom specific
+ Advanced Microcontroller Bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index a2161cceafb..82de24e5340 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -2,7 +2,9 @@ bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
+bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
+bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
ccflags-$(CONFIG_BCMA_DEBUG) := -DDEBUG
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index e02ff21835c..30a3085d335 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -15,13 +15,29 @@ struct bcma_bus;
/* main.c */
int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
+int __init bcma_bus_early_register(struct bcma_bus *bus,
+ struct bcma_device *core_cc,
+ struct bcma_device *core_mips);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
+int __init bcma_bus_scan_early(struct bcma_bus *bus,
+ struct bcma_device_id *match,
+ struct bcma_device *core);
+void bcma_init_bus(struct bcma_bus *bus);
/* sprom.c */
int bcma_sprom_get(struct bcma_bus *bus);
+/* driver_chipcommon.c */
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+/* driver_chipcommon_pmu.c */
+u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc);
+u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
+
#ifdef CONFIG_BCMA_HOST_PCI
/* host_pci.c */
extern int __init bcma_host_pci_init(void);
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 4a04a49cc06..189a97b51be 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -110,6 +110,8 @@ EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
u32 bcma_core_dma_translation(struct bcma_device *core)
{
switch (core->bus->hosttype) {
+ case BCMA_HOSTTYPE_SOC:
+ return 0;
case BCMA_HOSTTYPE_PCI:
if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
return BCMA_DMA_TRANSLATION_DMA64_CMT;
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 851e05bc948..47cce9d6963 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -26,6 +26,9 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
u32 leddc_on = 10;
u32 leddc_off = 90;
+ if (cc->setup_done)
+ return;
+
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
@@ -52,6 +55,8 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
(leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
}
+
+ cc->setup_done = true;
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
@@ -101,3 +106,51 @@ u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
}
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+{
+ unsigned int irq;
+ u32 baud_base;
+ u32 i;
+ unsigned int ccrev = cc->core->id.rev;
+ struct bcma_serial_port *ports = cc->serial_ports;
+
+ if (ccrev >= 11 && ccrev != 15) {
+ /* Fixed ALP clock */
+ baud_base = bcma_pmu_alp_clock(cc);
+ if (ccrev >= 21) {
+ /* Turn off UART clock before switching clocksource. */
+ bcma_cc_write32(cc, BCMA_CC_CORECTL,
+ bcma_cc_read32(cc, BCMA_CC_CORECTL)
+ & ~BCMA_CC_CORECTL_UARTCLKEN);
+ }
+ /* Set the override bit so we don't divide it */
+ bcma_cc_write32(cc, BCMA_CC_CORECTL,
+ bcma_cc_read32(cc, BCMA_CC_CORECTL)
+ | BCMA_CC_CORECTL_UARTCLK0);
+ if (ccrev >= 21) {
+ /* Re-enable the UART clock. */
+ bcma_cc_write32(cc, BCMA_CC_CORECTL,
+ bcma_cc_read32(cc, BCMA_CC_CORECTL)
+ | BCMA_CC_CORECTL_UARTCLKEN);
+ }
+ } else {
+ pr_err("serial not supported on this device ccrev: 0x%x\n",
+ ccrev);
+ return;
+ }
+
+ irq = bcma_core_mips_irq(cc->core);
+
+ /* Determine the registers of the UARTs */
+ cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART);
+ for (i = 0; i < cc->nr_serial_ports; i++) {
+ ports[i].regs = cc->core->io_addr + BCMA_CC_UART0_DATA +
+ (i * 256);
+ ports[i].irq = irq;
+ ports[i].baud_base = baud_base;
+ ports[i].reg_shift = 0;
+ }
+}
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index fcc63db0ce7..2968d809d49 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -11,20 +11,47 @@
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
-static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
- u32 offset, u32 mask, u32 set)
+static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
- u32 value;
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+}
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
+void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
+
+void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
+
+void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set)
+{
bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
- value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
- value &= mask;
- value |= set;
- bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value);
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
+
+void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
{
@@ -83,6 +110,24 @@ void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
}
}
+/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
+{
+ struct bcma_bus *bus = cc->core->bus;
+ u32 val;
+
+ val = bcma_cc_read32(cc, BCMA_CC_CHIPCTL);
+ if (enable) {
+ val |= BCMA_CHIPCTL_4331_EXTPA_EN;
+ if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
+ val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ } else {
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ }
+ bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
+}
+
void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
@@ -92,7 +137,7 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
break;
case 0x4331:
- pr_err("Enabling Ext PA lines not implemented\n");
+ /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
break;
case 43224:
if (bus->chipinfo.rev == 0) {
@@ -136,3 +181,129 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_pmu_swreg_init(cc);
bcma_pmu_workarounds(cc);
}
+
+u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+ case 0x4716:
+ case 0x4748:
+ case 47162:
+ case 0x4313:
+ case 0x5357:
+ case 0x4749:
+ case 53572:
+ /* always 20Mhz */
+ return 20000 * 1000;
+ case 0x5356:
+ case 0x5300:
+ /* always 25Mhz */
+ return 25000 * 1000;
+ default:
+ pr_warn("No ALP clock specified for %04X device, "
+ "pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
+ }
+ return BCMA_CC_PMU_ALP_CLOCK;
+}
+
+/* Find the output of the "m" pll divider given pll controls that start with
+ * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
+ */
+static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+{
+ u32 tmp, div, ndiv, p1, p2, fc;
+ struct bcma_bus *bus = cc->core->bus;
+
+ BUG_ON((pll0 & 3) || (pll0 > BCMA_CC_PMU4716_MAINPLL_PLL0));
+
+ BUG_ON(!m || m > 4);
+
+ if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) {
+ /* Detect failure in clock setting */
+ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+ if (tmp & 0x40000)
+ return 133 * 1000000;
+ }
+
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_P1P2_OFF);
+ p1 = (tmp & BCMA_CC_PPL_P1_MASK) >> BCMA_CC_PPL_P1_SHIFT;
+ p2 = (tmp & BCMA_CC_PPL_P2_MASK) >> BCMA_CC_PPL_P2_SHIFT;
+
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_M14_OFF);
+ div = (tmp >> ((m - 1) * BCMA_CC_PPL_MDIV_WIDTH)) &
+ BCMA_CC_PPL_MDIV_MASK;
+
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_NM5_OFF);
+ ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT;
+
+ /* Do calculation in Mhz */
+ fc = bcma_pmu_alp_clock(cc) / 1000000;
+ fc = (p1 * ndiv * fc) / p2;
+
+ /* Return clock in Hertz */
+ return (fc / div) * 1000000;
+}
+
+/* query bus clock frequency for PMU-enabled chipcommon */
+u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+ case 0x4716:
+ case 0x4748:
+ case 47162:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 0x5356:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 0x5357:
+ case 0x4749:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 0x5300:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 53572:
+ return 75000000;
+ default:
+ pr_warn("No backplane clock specified for %04X device, "
+ "pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
+ }
+ return BCMA_CC_PMU_HT_CLOCK;
+}
+
+/* query cpu clock frequency for PMU-enabled chipcommon */
+u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ if (bus->chipinfo.id == 53572)
+ return 300000000;
+
+ if (cc->pmu.rev >= 5) {
+ u32 pll;
+ switch (bus->chipinfo.id) {
+ case 0x5356:
+ pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
+ break;
+ case 0x5357:
+ case 0x4749:
+ pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
+ break;
+ default:
+ pll = BCMA_CC_PMU4716_MAINPLL_PLL0;
+ break;
+ }
+
+ /* TODO: if (bus->chipinfo.id == 0x5300)
+ return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
+ return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
+ }
+
+ return bcma_pmu_get_clockcontrol(cc);
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
new file mode 100644
index 00000000000..c3e9dff4224
--- /dev/null
+++ b/drivers/bcma/driver_mips.c
@@ -0,0 +1,256 @@
+/*
+ * Broadcom specific AMBA
+ * Broadcom MIPS32 74K core driver
+ *
+ * Copyright 2009, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2010, Bernhard Loos <bernhardloos@googlemail.com>
+ * Copyright 2011, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/time.h>
+
+/* The 47162a0 hangs when reading MIPS DMP registers registers */
+static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
+{
+ return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 &&
+ dev->id.id == BCMA_CORE_MIPS_74K;
+}
+
+/* The 5357b0 hangs when reading USB20H DMP registers */
+static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
+{
+ return (dev->bus->chipinfo.id == 0x5357 ||
+ dev->bus->chipinfo.id == 0x4749) &&
+ dev->bus->chipinfo.pkg == 11 &&
+ dev->id.id == BCMA_CORE_USB20_HOST;
+}
+
+static inline u32 mips_read32(struct bcma_drv_mips *mcore,
+ u16 offset)
+{
+ return bcma_read32(mcore->core, offset);
+}
+
+static inline void mips_write32(struct bcma_drv_mips *mcore,
+ u16 offset,
+ u32 value)
+{
+ bcma_write32(mcore->core, offset, value);
+}
+
+static const u32 ipsflag_irq_mask[] = {
+ 0,
+ BCMA_MIPS_IPSFLAG_IRQ1,
+ BCMA_MIPS_IPSFLAG_IRQ2,
+ BCMA_MIPS_IPSFLAG_IRQ3,
+ BCMA_MIPS_IPSFLAG_IRQ4,
+};
+
+static const u32 ipsflag_irq_shift[] = {
+ 0,
+ BCMA_MIPS_IPSFLAG_IRQ1_SHIFT,
+ BCMA_MIPS_IPSFLAG_IRQ2_SHIFT,
+ BCMA_MIPS_IPSFLAG_IRQ3_SHIFT,
+ BCMA_MIPS_IPSFLAG_IRQ4_SHIFT,
+};
+
+static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
+{
+ u32 flag;
+
+ if (bcma_core_mips_bcm47162a0_quirk(dev))
+ return dev->core_index;
+ if (bcma_core_mips_bcm5357b0_quirk(dev))
+ return dev->core_index;
+ flag = bcma_aread32(dev, BCMA_MIPS_OOBSELOUTA30);
+
+ return flag & 0x1F;
+}
+
+/* Get the MIPS IRQ assignment for a specified device.
+ * If unassigned, 0 is returned.
+ */
+unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+{
+ struct bcma_device *mdev = dev->bus->drv_mips.core;
+ u32 irqflag;
+ unsigned int irq;
+
+ irqflag = bcma_core_mips_irqflag(dev);
+
+ for (irq = 1; irq <= 4; irq++)
+ if (bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq)) &
+ (1 << irqflag))
+ return irq;
+
+ return 0;
+}
+EXPORT_SYMBOL(bcma_core_mips_irq);
+
+static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
+{
+ unsigned int oldirq = bcma_core_mips_irq(dev);
+ struct bcma_bus *bus = dev->bus;
+ struct bcma_device *mdev = bus->drv_mips.core;
+ u32 irqflag;
+
+ irqflag = bcma_core_mips_irqflag(dev);
+ BUG_ON(oldirq == 6);
+
+ dev->irq = irq + 2;
+
+ /* clear the old irq */
+ if (oldirq == 0)
+ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
+ bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
+ ~(1 << irqflag));
+ else
+ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
+
+ /* assign the new one */
+ if (irq == 0) {
+ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
+ bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) |
+ (1 << irqflag));
+ } else {
+ u32 oldirqflag = bcma_read32(mdev,
+ BCMA_MIPS_MIPS74K_INTMASK(irq));
+ if (oldirqflag) {
+ struct bcma_device *core;
+
+ /* backplane irq line is in use, find out who uses
+ * it and set user to irq 0
+ */
+ list_for_each_entry_reverse(core, &bus->cores, list) {
+ if ((1 << bcma_core_mips_irqflag(core)) ==
+ oldirqflag) {
+ bcma_core_mips_set_irq(core, 0);
+ break;
+ }
+ }
+ }
+ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq),
+ 1 << irqflag);
+ }
+
+ pr_info("set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.id, oldirq + 2, irq + 2);
+}
+
+static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
+{
+ int i;
+ static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
+ printk(KERN_INFO KBUILD_MODNAME ": core 0x%04x, irq :", dev->id.id);
+ for (i = 0; i <= 6; i++)
+ printk(" %s%s", irq_name[i], i == irq ? "*" : " ");
+ printk("\n");
+}
+
+static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ list_for_each_entry_reverse(core, &bus->cores, list) {
+ bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
+ }
+}
+
+u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
+{
+ struct bcma_bus *bus = mcore->core->bus;
+
+ if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
+ return bcma_pmu_get_clockcpu(&bus->drv_cc);
+
+ pr_err("No PMU available, need this to get the cpu clock\n");
+ return 0;
+}
+EXPORT_SYMBOL(bcma_cpu_clock);
+
+static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
+{
+ struct bcma_bus *bus = mcore->core->bus;
+
+ switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ case BCMA_CC_FLASHT_ATSER:
+ pr_err("Serial flash not supported.\n");
+ break;
+ case BCMA_CC_FLASHT_PARA:
+ pr_info("found parallel flash.\n");
+ bus->drv_cc.pflash.window = 0x1c000000;
+ bus->drv_cc.pflash.window_size = 0x02000000;
+
+ if ((bcma_read32(bus->drv_cc.core, BCMA_CC_FLASH_CFG) &
+ BCMA_CC_FLASH_CFG_DS) == 0)
+ bus->drv_cc.pflash.buswidth = 1;
+ else
+ bus->drv_cc.pflash.buswidth = 2;
+ break;
+ default:
+ pr_err("flash not supported.\n");
+ }
+}
+
+void bcma_core_mips_init(struct bcma_drv_mips *mcore)
+{
+ struct bcma_bus *bus;
+ struct bcma_device *core;
+ bus = mcore->core->bus;
+
+ pr_info("Initializing MIPS core...\n");
+
+ if (!mcore->setup_done)
+ mcore->assigned_irqs = 1;
+
+ /* Assign IRQs to all cores on the bus */
+ list_for_each_entry_reverse(core, &bus->cores, list) {
+ int mips_irq;
+ if (core->irq)
+ continue;
+
+ mips_irq = bcma_core_mips_irq(core);
+ if (mips_irq > 4)
+ core->irq = 0;
+ else
+ core->irq = mips_irq + 2;
+ if (core->irq > 5)
+ continue;
+ switch (core->id.id) {
+ case BCMA_CORE_PCI:
+ case BCMA_CORE_PCIE:
+ case BCMA_CORE_ETHERNET:
+ case BCMA_CORE_ETHERNET_GBIT:
+ case BCMA_CORE_MAC_GBIT:
+ case BCMA_CORE_80211:
+ case BCMA_CORE_USB20_HOST:
+ /* These devices get their own IRQ line if available,
+ * the rest goes on IRQ0
+ */
+ if (mcore->assigned_irqs <= 4)
+ bcma_core_mips_set_irq(core,
+ mcore->assigned_irqs++);
+ break;
+ }
+ }
+ pr_info("IRQ reconfiguration done\n");
+ bcma_core_mips_dump_irq(bus);
+
+ if (mcore->setup_done)
+ return;
+
+ bcma_chipco_serial_init(&bus->drv_cc);
+ bcma_core_mips_flash_detect(mcore);
+ mcore->setup_done = true;
+}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 25f3ddf3382..81f3d0a4b85 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -173,7 +173,7 @@ static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
return false;
#ifdef CONFIG_SSB_DRIVER_PCICORE
- if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
+ if (bus->sprom.boardflags_lo & SSB_BFL_NOPCI)
return false;
#endif /* CONFIG_SSB_DRIVER_PCICORE */
@@ -189,6 +189,9 @@ static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
void bcma_core_pci_init(struct bcma_drv_pci *pc)
{
+ if (pc->setup_done)
+ return;
+
if (bcma_core_pci_is_in_hostmode(pc)) {
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
bcma_core_pci_hostmode_init(pc);
@@ -198,6 +201,8 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
} else {
bcma_core_pci_clientmode_init(pc);
}
+
+ pc->setup_done = true;
}
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
@@ -205,7 +210,14 @@ int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
{
struct pci_dev *pdev = pc->core->bus->host_pci;
u32 coremask, tmp;
- int err;
+ int err = 0;
+
+ if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+ /* This bcma device is not on a PCI host-bus. So the IRQs are
+ * not routed through the PCI core.
+ * So we must not enable routing through the PCI core. */
+ goto out;
+ }
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
if (err)
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
new file mode 100644
index 00000000000..3c381fb8f9c
--- /dev/null
+++ b/drivers/bcma/host_soc.c
@@ -0,0 +1,183 @@
+/*
+ * Broadcom specific AMBA
+ * System on Chip (SoC) Host
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include "scan.h"
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_soc.h>
+
+static u8 bcma_host_soc_read8(struct bcma_device *core, u16 offset)
+{
+ return readb(core->io_addr + offset);
+}
+
+static u16 bcma_host_soc_read16(struct bcma_device *core, u16 offset)
+{
+ return readw(core->io_addr + offset);
+}
+
+static u32 bcma_host_soc_read32(struct bcma_device *core, u16 offset)
+{
+ return readl(core->io_addr + offset);
+}
+
+static void bcma_host_soc_write8(struct bcma_device *core, u16 offset,
+ u8 value)
+{
+ writeb(value, core->io_addr + offset);
+}
+
+static void bcma_host_soc_write16(struct bcma_device *core, u16 offset,
+ u16 value)
+{
+ writew(value, core->io_addr + offset);
+}
+
+static void bcma_host_soc_write32(struct bcma_device *core, u16 offset,
+ u32 value)
+{
+ writel(value, core->io_addr + offset);
+}
+
+#ifdef CONFIG_BCMA_BLOCKIO
+static void bcma_host_soc_block_read(struct bcma_device *core, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ void __iomem *addr = core->io_addr + offset;
+
+ switch (reg_width) {
+ case sizeof(u8): {
+ u8 *buf = buffer;
+
+ while (count) {
+ *buf = __raw_readb(addr);
+ buf++;
+ count--;
+ }
+ break;
+ }
+ case sizeof(u16): {
+ __le16 *buf = buffer;
+
+ WARN_ON(count & 1);
+ while (count) {
+ *buf = (__force __le16)__raw_readw(addr);
+ buf++;
+ count -= 2;
+ }
+ break;
+ }
+ case sizeof(u32): {
+ __le32 *buf = buffer;
+
+ WARN_ON(count & 3);
+ while (count) {
+ *buf = (__force __le32)__raw_readl(addr);
+ buf++;
+ count -= 4;
+ }
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void bcma_host_soc_block_write(struct bcma_device *core,
+ const void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ void __iomem *addr = core->io_addr + offset;
+
+ switch (reg_width) {
+ case sizeof(u8): {
+ const u8 *buf = buffer;
+
+ while (count) {
+ __raw_writeb(*buf, addr);
+ buf++;
+ count--;
+ }
+ break;
+ }
+ case sizeof(u16): {
+ const __le16 *buf = buffer;
+
+ WARN_ON(count & 1);
+ while (count) {
+ __raw_writew((__force u16)(*buf), addr);
+ buf++;
+ count -= 2;
+ }
+ break;
+ }
+ case sizeof(u32): {
+ const __le32 *buf = buffer;
+
+ WARN_ON(count & 3);
+ while (count) {
+ __raw_writel((__force u32)(*buf), addr);
+ buf++;
+ count -= 4;
+ }
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+}
+#endif /* CONFIG_BCMA_BLOCKIO */
+
+static u32 bcma_host_soc_aread32(struct bcma_device *core, u16 offset)
+{
+ return readl(core->io_wrap + offset);
+}
+
+static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
+ u32 value)
+{
+ writel(value, core->io_wrap + offset);
+}
+
+const struct bcma_host_ops bcma_host_soc_ops = {
+ .read8 = bcma_host_soc_read8,
+ .read16 = bcma_host_soc_read16,
+ .read32 = bcma_host_soc_read32,
+ .write8 = bcma_host_soc_write8,
+ .write16 = bcma_host_soc_write16,
+ .write32 = bcma_host_soc_write32,
+#ifdef CONFIG_BCMA_BLOCKIO
+ .block_read = bcma_host_soc_block_read,
+ .block_write = bcma_host_soc_block_write,
+#endif
+ .aread32 = bcma_host_soc_aread32,
+ .awrite32 = bcma_host_soc_awrite32,
+};
+
+int __init bcma_host_soc_register(struct bcma_soc *soc)
+{
+ struct bcma_bus *bus = &soc->bus;
+ int err;
+
+ /* iomap only first core. We have to read some register on this core
+ * to scan the bus.
+ */
+ bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
+ if (!bus->mmio)
+ return -ENOMEM;
+
+ /* Host specific */
+ bus->hosttype = BCMA_HOSTTYPE_SOC;
+ bus->ops = &bcma_host_soc_ops;
+
+ /* Register */
+ err = bcma_bus_early_register(bus, &soc->core_cc, &soc->core_mips);
+ if (err)
+ iounmap(bus->mmio);
+
+ return err;
+}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 73b7b1a18fa..8c09c3e547c 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -68,6 +68,10 @@ static struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+ if (core->io_addr)
+ iounmap(core->io_addr);
+ if (core->io_wrap)
+ iounmap(core->io_wrap);
kfree(core);
}
@@ -82,6 +86,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
+ case BCMA_CORE_MIPS_74K:
continue;
}
@@ -95,7 +100,10 @@ static int bcma_register_cores(struct bcma_bus *bus)
core->dma_dev = &bus->host_pci->dev;
core->irq = bus->host_pci->irq;
break;
- case BCMA_HOSTTYPE_NONE:
+ case BCMA_HOSTTYPE_SOC:
+ core->dev.dma_mask = &core->dev.coherent_dma_mask;
+ core->dma_dev = &core->dev;
+ break;
case BCMA_HOSTTYPE_SDIO:
break;
}
@@ -142,6 +150,13 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_chipcommon_init(&bus->drv_cc);
}
+ /* Init MIPS core */
+ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+ if (core) {
+ bus->drv_mips.core = core;
+ bcma_core_mips_init(&bus->drv_mips);
+ }
+
/* Init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
@@ -171,6 +186,59 @@ void bcma_bus_unregister(struct bcma_bus *bus)
bcma_unregister_cores(bus);
}
+int __init bcma_bus_early_register(struct bcma_bus *bus,
+ struct bcma_device *core_cc,
+ struct bcma_device *core_mips)
+{
+ int err;
+ struct bcma_device *core;
+ struct bcma_device_id match;
+
+ bcma_init_bus(bus);
+
+ match.manuf = BCMA_MANUF_BCM;
+ match.id = BCMA_CORE_CHIPCOMMON;
+ match.class = BCMA_CL_SIM;
+ match.rev = BCMA_ANY_REV;
+
+ /* Scan for chip common core */
+ err = bcma_bus_scan_early(bus, &match, core_cc);
+ if (err) {
+ pr_err("Failed to scan for common core: %d\n", err);
+ return -1;
+ }
+
+ match.manuf = BCMA_MANUF_MIPS;
+ match.id = BCMA_CORE_MIPS_74K;
+ match.class = BCMA_CL_SIM;
+ match.rev = BCMA_ANY_REV;
+
+ /* Scan for mips core */
+ err = bcma_bus_scan_early(bus, &match, core_mips);
+ if (err) {
+ pr_err("Failed to scan for mips core: %d\n", err);
+ return -1;
+ }
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.core = core;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ /* Init MIPS core */
+ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+ if (core) {
+ bus->drv_mips.core = core;
+ bcma_core_mips_init(&bus->drv_mips);
+ }
+
+ pr_info("Early bus registered\n");
+
+ return 0;
+}
+
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 40d7dcce893..cad99485768 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -200,18 +200,162 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 **eromptr,
return addrl;
}
-int bcma_bus_scan(struct bcma_bus *bus)
+static struct bcma_device *bcma_find_core_by_index(struct bcma_bus *bus,
+ u16 index)
{
- u32 erombase;
- u32 __iomem *eromptr, *eromend;
+ struct bcma_device *core;
+ list_for_each_entry(core, &bus->cores, list) {
+ if (core->core_index == index)
+ return core;
+ }
+ return NULL;
+}
+
+static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
+ struct bcma_device_id *match, int core_num,
+ struct bcma_device *core)
+{
+ s32 tmp;
+ u8 i, j;
s32 cia, cib;
u8 ports[2], wrappers[2];
+ /* get CIs */
+ cia = bcma_erom_get_ci(bus, eromptr);
+ if (cia < 0) {
+ bcma_erom_push_ent(eromptr);
+ if (bcma_erom_is_end(bus, eromptr))
+ return -ESPIPE;
+ return -EILSEQ;
+ }
+ cib = bcma_erom_get_ci(bus, eromptr);
+ if (cib < 0)
+ return -EILSEQ;
+
+ /* parse CIs */
+ core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
+ core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
+ core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
+ ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
+ ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
+ wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
+ wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
+ core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
+
+ if (((core->id.manuf == BCMA_MANUF_ARM) &&
+ (core->id.id == 0xFFF)) ||
+ (ports[1] == 0)) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
+
+ /* check if component is a core at all */
+ if (wrappers[0] + wrappers[1] == 0) {
+ /* we could save addrl of the router
+ if (cid == BCMA_CORE_OOB_ROUTER)
+ */
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
+
+ if (bcma_erom_is_bridge(bus, eromptr)) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
+
+ if (bcma_find_core_by_index(bus, core_num)) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENODEV;
+ }
+
+ if (match && ((match->manuf != BCMA_ANY_MANUF &&
+ match->manuf != core->id.manuf) ||
+ (match->id != BCMA_ANY_ID && match->id != core->id.id) ||
+ (match->rev != BCMA_ANY_REV && match->rev != core->id.rev) ||
+ (match->class != BCMA_ANY_CLASS && match->class != core->id.class)
+ )) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENODEV;
+ }
+
+ /* get & parse master ports */
+ for (i = 0; i < ports[0]; i++) {
+ s32 mst_port_d = bcma_erom_get_mst_port(bus, eromptr);
+ if (mst_port_d < 0)
+ return -EILSEQ;
+ }
+
+ /* get & parse slave ports */
+ for (i = 0; i < ports[1]; i++) {
+ for (j = 0; ; j++) {
+ tmp = bcma_erom_get_addr_desc(bus, eromptr,
+ SCAN_ADDR_TYPE_SLAVE, i);
+ if (tmp < 0) {
+ /* no more entries for port _i_ */
+ /* pr_debug("erom: slave port %d "
+ * "has %d descriptors\n", i, j); */
+ break;
+ } else {
+ if (i == 0 && j == 0)
+ core->addr = tmp;
+ }
+ }
+ }
+
+ /* get & parse master wrappers */
+ for (i = 0; i < wrappers[0]; i++) {
+ for (j = 0; ; j++) {
+ tmp = bcma_erom_get_addr_desc(bus, eromptr,
+ SCAN_ADDR_TYPE_MWRAP, i);
+ if (tmp < 0) {
+ /* no more entries for port _i_ */
+ /* pr_debug("erom: master wrapper %d "
+ * "has %d descriptors\n", i, j); */
+ break;
+ } else {
+ if (i == 0 && j == 0)
+ core->wrap = tmp;
+ }
+ }
+ }
+
+ /* get & parse slave wrappers */
+ for (i = 0; i < wrappers[1]; i++) {
+ u8 hack = (ports[1] == 1) ? 0 : 1;
+ for (j = 0; ; j++) {
+ tmp = bcma_erom_get_addr_desc(bus, eromptr,
+ SCAN_ADDR_TYPE_SWRAP, i + hack);
+ if (tmp < 0) {
+ /* no more entries for port _i_ */
+ /* pr_debug("erom: master wrapper %d "
+ * has %d descriptors\n", i, j); */
+ break;
+ } else {
+ if (wrappers[0] == 0 && !i && !j)
+ core->wrap = tmp;
+ }
+ }
+ }
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+ if (!core->io_addr)
+ return -ENOMEM;
+ core->io_wrap = ioremap_nocache(core->wrap, BCMA_CORE_SIZE);
+ if (!core->io_wrap) {
+ iounmap(core->io_addr);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+void bcma_init_bus(struct bcma_bus *bus)
+{
s32 tmp;
- u8 i, j;
- int err;
+ if (bus->init_done)
+ return;
INIT_LIST_HEAD(&bus->cores);
bus->nr_cores = 0;
@@ -222,9 +366,27 @@ int bcma_bus_scan(struct bcma_bus *bus)
bus->chipinfo.id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
bus->chipinfo.rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
bus->chipinfo.pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
+ bus->init_done = true;
+}
+
+int bcma_bus_scan(struct bcma_bus *bus)
+{
+ u32 erombase;
+ u32 __iomem *eromptr, *eromend;
+
+ int err, core_num = 0;
+
+ bcma_init_bus(bus);
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
- eromptr = bus->mmio;
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ if (!eromptr)
+ return -ENOMEM;
+ } else {
+ eromptr = bus->mmio;
+ }
+
eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
bcma_scan_switch_core(bus, erombase);
@@ -236,125 +398,89 @@ int bcma_bus_scan(struct bcma_bus *bus)
INIT_LIST_HEAD(&core->list);
core->bus = bus;
- /* get CIs */
- cia = bcma_erom_get_ci(bus, &eromptr);
- if (cia < 0) {
- bcma_erom_push_ent(&eromptr);
- if (bcma_erom_is_end(bus, &eromptr))
- break;
- err= -EILSEQ;
- goto out;
- }
- cib = bcma_erom_get_ci(bus, &eromptr);
- if (cib < 0) {
- err= -EILSEQ;
- goto out;
- }
-
- /* parse CIs */
- core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
- core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
- core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
- ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
- ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
- wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
- wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
- core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
-
- if (((core->id.manuf == BCMA_MANUF_ARM) &&
- (core->id.id == 0xFFF)) ||
- (ports[1] == 0)) {
- bcma_erom_skip_component(bus, &eromptr);
+ err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
+ if (err == -ENODEV) {
+ core_num++;
continue;
- }
-
- /* check if component is a core at all */
- if (wrappers[0] + wrappers[1] == 0) {
- /* we could save addrl of the router
- if (cid == BCMA_CORE_OOB_ROUTER)
- */
- bcma_erom_skip_component(bus, &eromptr);
+ } else if (err == -ENXIO)
continue;
- }
+ else if (err == -ESPIPE)
+ break;
+ else if (err < 0)
+ return err;
- if (bcma_erom_is_bridge(bus, &eromptr)) {
- bcma_erom_skip_component(bus, &eromptr);
- continue;
- }
+ core->core_index = core_num++;
+ bus->nr_cores++;
- /* get & parse master ports */
- for (i = 0; i < ports[0]; i++) {
- u32 mst_port_d = bcma_erom_get_mst_port(bus, &eromptr);
- if (mst_port_d < 0) {
- err= -EILSEQ;
- goto out;
- }
- }
+ pr_info("Core %d found: %s "
+ "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+ core->core_index, bcma_device_name(&core->id),
+ core->id.manuf, core->id.id, core->id.rev,
+ core->id.class);
- /* get & parse slave ports */
- for (i = 0; i < ports[1]; i++) {
- for (j = 0; ; j++) {
- tmp = bcma_erom_get_addr_desc(bus, &eromptr,
- SCAN_ADDR_TYPE_SLAVE, i);
- if (tmp < 0) {
- /* no more entries for port _i_ */
- /* pr_debug("erom: slave port %d "
- * "has %d descriptors\n", i, j); */
- break;
- } else {
- if (i == 0 && j == 0)
- core->addr = tmp;
- }
- }
- }
+ list_add(&core->list, &bus->cores);
+ }
- /* get & parse master wrappers */
- for (i = 0; i < wrappers[0]; i++) {
- for (j = 0; ; j++) {
- tmp = bcma_erom_get_addr_desc(bus, &eromptr,
- SCAN_ADDR_TYPE_MWRAP, i);
- if (tmp < 0) {
- /* no more entries for port _i_ */
- /* pr_debug("erom: master wrapper %d "
- * "has %d descriptors\n", i, j); */
- break;
- } else {
- if (i == 0 && j == 0)
- core->wrap = tmp;
- }
- }
- }
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ iounmap(eromptr);
- /* get & parse slave wrappers */
- for (i = 0; i < wrappers[1]; i++) {
- u8 hack = (ports[1] == 1) ? 0 : 1;
- for (j = 0; ; j++) {
- tmp = bcma_erom_get_addr_desc(bus, &eromptr,
- SCAN_ADDR_TYPE_SWRAP, i + hack);
- if (tmp < 0) {
- /* no more entries for port _i_ */
- /* pr_debug("erom: master wrapper %d "
- * has %d descriptors\n", i, j); */
- break;
- } else {
- if (wrappers[0] == 0 && !i && !j)
- core->wrap = tmp;
- }
- }
- }
+ return 0;
+}
+
+int __init bcma_bus_scan_early(struct bcma_bus *bus,
+ struct bcma_device_id *match,
+ struct bcma_device *core)
+{
+ u32 erombase;
+ u32 __iomem *eromptr, *eromend;
+
+ int err = -ENODEV;
+ int core_num = 0;
+
+ erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ if (!eromptr)
+ return -ENOMEM;
+ } else {
+ eromptr = bus->mmio;
+ }
+
+ eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
+
+ bcma_scan_switch_core(bus, erombase);
+
+ while (eromptr < eromend) {
+ memset(core, 0, sizeof(*core));
+ INIT_LIST_HEAD(&core->list);
+ core->bus = bus;
+ err = bcma_get_next_core(bus, &eromptr, match, core_num, core);
+ if (err == -ENODEV) {
+ core_num++;
+ continue;
+ } else if (err == -ENXIO)
+ continue;
+ else if (err == -ESPIPE)
+ break;
+ else if (err < 0)
+ return err;
+
+ core->core_index = core_num++;
+ bus->nr_cores++;
pr_info("Core %d found: %s "
"(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- bus->nr_cores, bcma_device_name(&core->id),
+ core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
- core->core_index = bus->nr_cores++;
list_add(&core->list, &bus->cores);
- continue;
-out:
- return err;
+ err = 0;
+ break;
}
- return 0;
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ iounmap(eromptr);
+
+ return err;
}
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 8b5b7856abe..d7292390d23 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -133,6 +133,15 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
}
+
+ bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
+
+ bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
+ bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
+ bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
+ bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
+
+ bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
}
int bcma_sprom_get(struct bcma_bus *bus)
@@ -152,6 +161,9 @@ int bcma_sprom_get(struct bcma_bus *bus)
if (!sprom)
return -ENOMEM;
+ if (bus->chipinfo.id == 0x4331)
+ bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
+
/* Most cards have SPROM moved by additional offset 0x30 (48 dwords).
* According to brcm80211 this applies to cards with PCIe rev >= 6
* TODO: understand this condition and use it */
@@ -159,6 +171,9 @@ int bcma_sprom_get(struct bcma_bus *bus)
BCMA_CC_SPROM_PCIE6;
bcma_sprom_read(bus, offset, sprom);
+ if (bus->chipinfo.id == 0x4331)
+ bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
+
err = bcma_sprom_valid(sprom);
if (err)
goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 7b976296b56..912f585a760 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -378,15 +378,14 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
* thread. As we have no disk yet, we are not in the IO path,
* not even the IO path of the peer. */
bytes = sizeof(struct page *)*want;
- new_pages = kmalloc(bytes, GFP_KERNEL);
+ new_pages = kzalloc(bytes, GFP_KERNEL);
if (!new_pages) {
- new_pages = vmalloc(bytes);
+ new_pages = vzalloc(bytes);
if (!new_pages)
return NULL;
vmalloced = 1;
}
- memset(new_pages, 0, bytes);
if (want >= have) {
for (i = 0; i < have; i++)
new_pages[i] = old_pages[i];
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ef2ceed3be4..1706d60b8c9 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -28,7 +28,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/bitops.h>
@@ -928,7 +927,7 @@ struct drbd_md {
#define NL_INT64(pn,pr,member) __u64 member;
#define NL_BIT(pn,pr,member) unsigned member:1;
#define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
-#include "linux/drbd_nl.h"
+#include <linux/drbd_nl.h>
struct drbd_backing_dev {
struct block_device *backing_bdev;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 0feab261e29..af2a25049bc 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -94,7 +94,7 @@ static int name ## _from_tags(struct drbd_conf *mdev, \
arg->member ## _len = dlen; \
memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
break;
-#include "linux/drbd_nl.h"
+#include <linux/drbd_nl.h>
/* Generate the struct to tag_list functions */
#define NL_PACKET(name, number, fields) \
@@ -129,7 +129,7 @@ name ## _to_tags(struct drbd_conf *mdev, \
put_unaligned(arg->member ## _len, tags++); \
memcpy(tags, arg->member, arg->member ## _len); \
tags = (unsigned short *)((char *)tags + arg->member ## _len);
-#include "linux/drbd_nl.h"
+#include <linux/drbd_nl.h>
void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
void drbd_nl_send_reply(struct cn_msg *, int);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 15f65b5f3fc..fe3c3249cec 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -192,7 +192,7 @@ static ssize_t rbd_snap_add(struct device *dev,
const char *buf,
size_t count);
static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
- struct rbd_snap *snap);;
+ struct rbd_snap *snap);
static struct rbd_device *dev_to_rbd(struct device *dev)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2330a9ad5e9..1540792b1e5 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -396,7 +396,7 @@ static int xen_blkbk_map(struct blkif_request *req,
continue;
ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
- blkbk->pending_page(pending_req, i), false);
+ blkbk->pending_page(pending_req, i), NULL);
if (ret) {
pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
(unsigned long)map[i].dev_bus_addr, ret);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 00c57c90e2d..c4bd34063ec 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -27,7 +27,6 @@
#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
#define __XEN_BLKIF__BACKEND__COMMON_H__
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 7f521d4ac65..c827d737cce 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -81,7 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
.io_port_2 = 0x7a,
};
-static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = {
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "sd8688_helper.bin",
.firmware = "sd8688.bin",
.reg = &btmrvl_reg_8688,
@@ -98,7 +98,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
- .driver_data = (unsigned long) &btmrvl_sdio_sd6888 },
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8688 },
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 9cbac6b445e..f9b726091ad 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ /* Broadcom SoftSailing reporting vendor specific */
+ { USB_DEVICE(0x05ac, 0x21e1) },
+
/* Apple MacBookPro 7,1 */
{ USB_DEVICE(0x05ac, 0x8213) },
@@ -708,8 +711,7 @@ static int btusb_send_frame(struct sk_buff *skb)
break;
case HCI_ACLDATA_PKT:
- if (!data->bulk_tx_ep || (hdev->conn_hash.acl_num < 1 &&
- hdev->conn_hash.le_num < 1))
+ if (!data->bulk_tx_ep)
return -ENODEV;
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -1116,7 +1118,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
spin_lock_irq(&data->txlock);
- if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
+ if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
set_bit(BTUSB_SUSPENDING, &data->flags);
spin_unlock_irq(&data->txlock);
} else {
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index f27d0d0816d..4b71647782d 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -171,7 +171,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
}
got_gatt = 1;
- bridge->key_list = vmalloc(PAGE_SIZE * 4);
+ bridge->key_list = vzalloc(PAGE_SIZE * 4);
if (bridge->key_list == NULL) {
dev_err(&bridge->dev->dev,
"can't allocate memory for key lists\n");
@@ -181,7 +181,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
got_keylist = 1;
/* FIXME vmalloc'd memory not guaranteed contiguous */
- memset(bridge->key_list, 0, PAGE_SIZE * 4);
if (bridge->driver->configure()) {
dev_err(&bridge->dev->dev, "error configuring host chipset\n");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 85151019dde..2774ac1086d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -30,10 +30,10 @@
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
+ * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
* Only newer chipsets need to bother with this, of course.
*/
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
#define USE_PCI_DMA_API 1
#else
#define USE_PCI_DMA_API 0
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index a7346ab97a3..f4837a893df 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -40,10 +40,7 @@
#define APM_MINOR_DEV 134
/*
- * See Documentation/Config.help for the configuration options.
- *
- * Various options can be changed at boot time as follows:
- * (We allow underscores for compatibility with the modules code)
+ * One option can be changed at boot time as follows:
* apm=on/off enable/disable APM
*/
@@ -300,17 +297,13 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg)
/*
* Wait for the suspend/resume to complete. If there
* are pending acknowledges, we wait here for them.
+ * wait_event_freezable() is interruptible and pending
+ * signal can cause busy looping. We aren't doing
+ * anything critical, chill a bit on each iteration.
*/
- freezer_do_not_count();
-
- wait_event(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
-
- /*
- * Since we are waiting until the suspend is done, the
- * try_to_freeze() in freezer_count() will not trigger
- */
- freezer_count();
+ while (wait_event_freezable(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE))
+ msleep(10);
break;
case SUSPEND_ACKTO:
as->suspend_result = -ETIMEDOUT;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index b33e8ea314e..b6de2c04714 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -324,13 +324,12 @@ static int __init raw_init(void)
max_raw_minors = MAX_RAW_MINORS;
}
- raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors);
+ raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors);
if (!raw_devices) {
printk(KERN_ERR "Not enough memory for raw device structures\n");
ret = -ENOMEM;
goto error;
}
- memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);
ret = register_chrdev_region(dev, max_raw_minors, "raw");
if (ret)
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index dfa8b3062fd..ccd124ab7ca 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -80,6 +80,7 @@
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
#include <asm/current.h>
#include <asm/system.h>
@@ -1195,10 +1196,8 @@ static void rtc_dropped_irq(unsigned long data)
spin_unlock_irq(&rtc_lock);
- if (printk_ratelimit()) {
- printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
- freq);
- }
+ printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
+ freq);
/* Now we have new data */
wake_up_interruptible(&rtc_wait);
diff --git a/drivers/char/scc.h b/drivers/char/scc.h
deleted file mode 100644
index 341b1142bea..00000000000
--- a/drivers/char/scc.h
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * atari_SCC.h: Definitions for the Am8530 Serial Communications Controller
- *
- * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- */
-
-
-#ifndef _SCC_H
-#define _SCC_H
-
-#include <linux/delay.h>
-
-/* Special configuration ioctls for the Atari SCC5380 Serial
- * Communications Controller
- */
-
-/* ioctl command codes */
-
-#define TIOCGATSCC 0x54c0 /* get SCC configuration */
-#define TIOCSATSCC 0x54c1 /* set SCC configuration */
-#define TIOCDATSCC 0x54c2 /* reset configuration to defaults */
-
-/* Clock sources */
-
-#define CLK_RTxC 0
-#define CLK_TRxC 1
-#define CLK_PCLK 2
-
-/* baud_bases for the common clocks in the Atari. These are the real
- * frequencies divided by 16.
- */
-
-#define SCC_BAUD_BASE_TIMC 19200 /* 0.3072 MHz from TT-MFP, Timer C */
-#define SCC_BAUD_BASE_BCLK 153600 /* 2.4576 MHz */
-#define SCC_BAUD_BASE_PCLK4 229500 /* 3.6720 MHz */
-#define SCC_BAUD_BASE_PCLK 503374 /* 8.0539763 MHz */
-#define SCC_BAUD_BASE_NONE 0 /* for not connected or unused
- * clock sources */
-
-/* The SCC clock configuration structure */
-
-struct scc_clock_config {
- unsigned RTxC_base; /* base_baud of RTxC */
- unsigned TRxC_base; /* base_baud of TRxC */
- unsigned PCLK_base; /* base_baud of PCLK, both channels! */
- struct {
- unsigned clksrc; /* CLK_RTxC, CLK_TRxC or CLK_PCLK */
- unsigned divisor; /* divisor for base baud, valid values:
- * see below */
- } baud_table[17]; /* For 50, 75, 110, 135, 150, 200, 300,
- * 600, 1200, 1800, 2400, 4800, 9600,
- * 19200, 38400, 57600 and 115200 bps.
- * The last two could be replaced by
- * other rates > 38400 if they're not
- * possible.
- */
-};
-
-/* The following divisors are valid:
- *
- * - CLK_RTxC: 1 or even (1, 2 and 4 are the direct modes, > 4 use
- * the BRG)
- *
- * - CLK_TRxC: 1, 2 or 4 (no BRG, only direct modes possible)
- *
- * - CLK_PCLK: >= 4 and even (no direct modes, only BRG)
- *
- */
-
-struct scc_port {
- struct gs_port gs;
- volatile unsigned char *ctrlp;
- volatile unsigned char *datap;
- int x_char; /* xon/xoff character */
- int c_dcd;
- int channel;
- struct scc_port *port_a; /* Reference to port A and B */
- struct scc_port *port_b; /* structs for reg access */
-};
-
-#define SCC_MAGIC 0x52696368
-
-/***********************************************************************/
-/* */
-/* Register Names */
-/* */
-/***********************************************************************/
-
-/* The SCC documentation gives no explicit names to the registers,
- * they're just called WR0..15 and RR0..15. To make the source code
- * better readable and make the transparent write reg read access (see
- * below) possible, I christen them here with self-invented names.
- * Note that (real) read registers are assigned numbers 16..31. WR7'
- * has number 33.
- */
-
-#define COMMAND_REG 0 /* wo */
-#define INT_AND_DMA_REG 1 /* wo */
-#define INT_VECTOR_REG 2 /* rw, common to both channels */
-#define RX_CTRL_REG 3 /* rw */
-#define AUX1_CTRL_REG 4 /* rw */
-#define TX_CTRL_REG 5 /* rw */
-#define SYNC_ADR_REG 6 /* wo */
-#define SYNC_CHAR_REG 7 /* wo */
-#define SDLC_OPTION_REG 33 /* wo */
-#define TX_DATA_REG 8 /* wo */
-#define MASTER_INT_CTRL 9 /* wo, common to both channels */
-#define AUX2_CTRL_REG 10 /* rw */
-#define CLK_CTRL_REG 11 /* wo */
-#define TIMER_LOW_REG 12 /* rw */
-#define TIMER_HIGH_REG 13 /* rw */
-#define DPLL_CTRL_REG 14 /* wo */
-#define INT_CTRL_REG 15 /* rw */
-
-#define STATUS_REG 16 /* ro */
-#define SPCOND_STATUS_REG 17 /* wo */
-/* RR2 is WR2 for Channel A, Channel B gives vector + current status: */
-#define CURR_VECTOR_REG 18 /* Ch. B only, Ch. A for rw */
-#define INT_PENDING_REG 19 /* Channel A only! */
-/* RR4 is WR4, if b6(MR7') == 1 */
-/* RR5 is WR5, if b6(MR7') == 1 */
-#define FS_FIFO_LOW_REG 22 /* ro */
-#define FS_FIFO_HIGH_REG 23 /* ro */
-#define RX_DATA_REG 24 /* ro */
-/* RR9 is WR3, if b6(MR7') == 1 */
-#define DPLL_STATUS_REG 26 /* ro */
-/* RR11 is WR10, if b6(MR7') == 1 */
-/* RR12 is WR12 */
-/* RR13 is WR13 */
-/* RR14 not present */
-/* RR15 is WR15 */
-
-
-/***********************************************************************/
-/* */
-/* Register Values */
-/* */
-/***********************************************************************/
-
-
-/* WR0: COMMAND_REG "CR" */
-
-#define CR_RX_CRC_RESET 0x40
-#define CR_TX_CRC_RESET 0x80
-#define CR_TX_UNDERRUN_RESET 0xc0
-
-#define CR_EXTSTAT_RESET 0x10
-#define CR_SEND_ABORT 0x18
-#define CR_ENAB_INT_NEXT_RX 0x20
-#define CR_TX_PENDING_RESET 0x28
-#define CR_ERROR_RESET 0x30
-#define CR_HIGHEST_IUS_RESET 0x38
-
-
-/* WR1: INT_AND_DMA_REG "IDR" */
-
-#define IDR_EXTSTAT_INT_ENAB 0x01
-#define IDR_TX_INT_ENAB 0x02
-#define IDR_PARERR_AS_SPCOND 0x04
-
-#define IDR_RX_INT_DISAB 0x00
-#define IDR_RX_INT_FIRST 0x08
-#define IDR_RX_INT_ALL 0x10
-#define IDR_RX_INT_SPCOND 0x18
-#define IDR_RX_INT_MASK 0x18
-
-#define IDR_WAITREQ_RX 0x20
-#define IDR_WAITREQ_IS_REQ 0x40
-#define IDR_WAITREQ_ENAB 0x80
-
-
-/* WR3: RX_CTRL_REG "RCR" */
-
-#define RCR_RX_ENAB 0x01
-#define RCR_DISCARD_SYNC_CHARS 0x02
-#define RCR_ADDR_SEARCH 0x04
-#define RCR_CRC_ENAB 0x08
-#define RCR_SEARCH_MODE 0x10
-#define RCR_AUTO_ENAB_MODE 0x20
-
-#define RCR_CHSIZE_MASK 0xc0
-#define RCR_CHSIZE_5 0x00
-#define RCR_CHSIZE_6 0x40
-#define RCR_CHSIZE_7 0x80
-#define RCR_CHSIZE_8 0xc0
-
-
-/* WR4: AUX1_CTRL_REG "A1CR" */
-
-#define A1CR_PARITY_MASK 0x03
-#define A1CR_PARITY_NONE 0x00
-#define A1CR_PARITY_ODD 0x01
-#define A1CR_PARITY_EVEN 0x03
-
-#define A1CR_MODE_MASK 0x0c
-#define A1CR_MODE_SYNCR 0x00
-#define A1CR_MODE_ASYNC_1 0x04
-#define A1CR_MODE_ASYNC_15 0x08
-#define A1CR_MODE_ASYNC_2 0x0c
-
-#define A1CR_SYNCR_MODE_MASK 0x30
-#define A1CR_SYNCR_MONOSYNC 0x00
-#define A1CR_SYNCR_BISYNC 0x10
-#define A1CR_SYNCR_SDLC 0x20
-#define A1CR_SYNCR_EXTCSYNC 0x30
-
-#define A1CR_CLKMODE_MASK 0xc0
-#define A1CR_CLKMODE_x1 0x00
-#define A1CR_CLKMODE_x16 0x40
-#define A1CR_CLKMODE_x32 0x80
-#define A1CR_CLKMODE_x64 0xc0
-
-
-/* WR5: TX_CTRL_REG "TCR" */
-
-#define TCR_TX_CRC_ENAB 0x01
-#define TCR_RTS 0x02
-#define TCR_USE_CRC_CCITT 0x00
-#define TCR_USE_CRC_16 0x04
-#define TCR_TX_ENAB 0x08
-#define TCR_SEND_BREAK 0x10
-
-#define TCR_CHSIZE_MASK 0x60
-#define TCR_CHSIZE_5 0x00
-#define TCR_CHSIZE_6 0x20
-#define TCR_CHSIZE_7 0x40
-#define TCR_CHSIZE_8 0x60
-
-#define TCR_DTR 0x80
-
-
-/* WR7': SLDC_OPTION_REG "SOR" */
-
-#define SOR_AUTO_TX_ENAB 0x01
-#define SOR_AUTO_EOM_RESET 0x02
-#define SOR_AUTO_RTS_MODE 0x04
-#define SOR_NRZI_DISAB_HIGH 0x08
-#define SOR_ALT_DTRREQ_TIMING 0x10
-#define SOR_READ_CRC_CHARS 0x20
-#define SOR_EXTENDED_REG_ACCESS 0x40
-
-
-/* WR9: MASTER_INT_CTRL "MIC" */
-
-#define MIC_VEC_INCL_STAT 0x01
-#define MIC_NO_VECTOR 0x02
-#define MIC_DISAB_LOWER_CHAIN 0x04
-#define MIC_MASTER_INT_ENAB 0x08
-#define MIC_STATUS_HIGH 0x10
-#define MIC_IGN_INTACK 0x20
-
-#define MIC_NO_RESET 0x00
-#define MIC_CH_A_RESET 0x40
-#define MIC_CH_B_RESET 0x80
-#define MIC_HARD_RESET 0xc0
-
-
-/* WR10: AUX2_CTRL_REG "A2CR" */
-
-#define A2CR_SYNC_6 0x01
-#define A2CR_LOOP_MODE 0x02
-#define A2CR_ABORT_ON_UNDERRUN 0x04
-#define A2CR_MARK_IDLE 0x08
-#define A2CR_GO_ACTIVE_ON_POLL 0x10
-
-#define A2CR_CODING_MASK 0x60
-#define A2CR_CODING_NRZ 0x00
-#define A2CR_CODING_NRZI 0x20
-#define A2CR_CODING_FM1 0x40
-#define A2CR_CODING_FM0 0x60
-
-#define A2CR_PRESET_CRC_1 0x80
-
-
-/* WR11: CLK_CTRL_REG "CCR" */
-
-#define CCR_TRxCOUT_MASK 0x03
-#define CCR_TRxCOUT_XTAL 0x00
-#define CCR_TRxCOUT_TXCLK 0x01
-#define CCR_TRxCOUT_BRG 0x02
-#define CCR_TRxCOUT_DPLL 0x03
-
-#define CCR_TRxC_OUTPUT 0x04
-
-#define CCR_TXCLK_MASK 0x18
-#define CCR_TXCLK_RTxC 0x00
-#define CCR_TXCLK_TRxC 0x08
-#define CCR_TXCLK_BRG 0x10
-#define CCR_TXCLK_DPLL 0x18
-
-#define CCR_RXCLK_MASK 0x60
-#define CCR_RXCLK_RTxC 0x00
-#define CCR_RXCLK_TRxC 0x20
-#define CCR_RXCLK_BRG 0x40
-#define CCR_RXCLK_DPLL 0x60
-
-#define CCR_RTxC_XTAL 0x80
-
-
-/* WR14: DPLL_CTRL_REG "DCR" */
-
-#define DCR_BRG_ENAB 0x01
-#define DCR_BRG_USE_PCLK 0x02
-#define DCR_DTRREQ_IS_REQ 0x04
-#define DCR_AUTO_ECHO 0x08
-#define DCR_LOCAL_LOOPBACK 0x10
-
-#define DCR_DPLL_EDGE_SEARCH 0x20
-#define DCR_DPLL_ERR_RESET 0x40
-#define DCR_DPLL_DISAB 0x60
-#define DCR_DPLL_CLK_BRG 0x80
-#define DCR_DPLL_CLK_RTxC 0xa0
-#define DCR_DPLL_FM 0xc0
-#define DCR_DPLL_NRZI 0xe0
-
-
-/* WR15: INT_CTRL_REG "ICR" */
-
-#define ICR_OPTIONREG_SELECT 0x01
-#define ICR_ENAB_BRG_ZERO_INT 0x02
-#define ICR_USE_FS_FIFO 0x04
-#define ICR_ENAB_DCD_INT 0x08
-#define ICR_ENAB_SYNC_INT 0x10
-#define ICR_ENAB_CTS_INT 0x20
-#define ICR_ENAB_UNDERRUN_INT 0x40
-#define ICR_ENAB_BREAK_INT 0x80
-
-
-/* RR0: STATUS_REG "SR" */
-
-#define SR_CHAR_AVAIL 0x01
-#define SR_BRG_ZERO 0x02
-#define SR_TX_BUF_EMPTY 0x04
-#define SR_DCD 0x08
-#define SR_SYNC_ABORT 0x10
-#define SR_CTS 0x20
-#define SR_TX_UNDERRUN 0x40
-#define SR_BREAK 0x80
-
-
-/* RR1: SPCOND_STATUS_REG "SCSR" */
-
-#define SCSR_ALL_SENT 0x01
-#define SCSR_RESIDUAL_MASK 0x0e
-#define SCSR_PARITY_ERR 0x10
-#define SCSR_RX_OVERRUN 0x20
-#define SCSR_CRC_FRAME_ERR 0x40
-#define SCSR_END_OF_FRAME 0x80
-
-
-/* RR3: INT_PENDING_REG "IPR" */
-
-#define IPR_B_EXTSTAT 0x01
-#define IPR_B_TX 0x02
-#define IPR_B_RX 0x04
-#define IPR_A_EXTSTAT 0x08
-#define IPR_A_TX 0x10
-#define IPR_A_RX 0x20
-
-
-/* RR7: FS_FIFO_HIGH_REG "FFHR" */
-
-#define FFHR_CNT_MASK 0x3f
-#define FFHR_IS_FROM_FIFO 0x40
-#define FFHR_FIFO_OVERRUN 0x80
-
-
-/* RR10: DPLL_STATUS_REG "DSR" */
-
-#define DSR_ON_LOOP 0x02
-#define DSR_ON_LOOP_SENDING 0x10
-#define DSR_TWO_CLK_MISSING 0x40
-#define DSR_ONE_CLK_MISSING 0x80
-
-/***********************************************************************/
-/* */
-/* Register Access */
-/* */
-/***********************************************************************/
-
-
-/* The SCC needs 3.5 PCLK cycles recovery time between to register
- * accesses. PCLK runs with 8 MHz on an Atari, so this delay is 3.5 *
- * 125 ns = 437.5 ns. This is too short for udelay().
- * 10/16/95: A tstb st_mfp.par_dt_reg takes 600ns (sure?) and thus should be
- * quite right
- */
-
-#define scc_reg_delay() \
- do { \
- if (MACH_IS_MVME16x || MACH_IS_BVME6000 || MACH_IS_MVME147) \
- __asm__ __volatile__ ( " nop; nop"); \
- else if (MACH_IS_ATARI) \
- __asm__ __volatile__ ( "tstb %0" : : "g" (*_scc_del) : "cc" );\
- } while (0)
-
-static unsigned char scc_shadow[2][16];
-
-/* The following functions should relax the somehow complicated
- * register access of the SCC. _SCCwrite() stores all written values
- * (except for WR0 and WR8) in shadow registers for later recall. This
- * removes the burden of remembering written values as needed. The
- * extra work of storing the value doesn't count, since a delay is
- * needed after a SCC access anyway. Additionally, _SCCwrite() manages
- * writes to WR0 and WR8 differently, because these can be accessed
- * directly with less overhead. Another special case are WR7 and WR7'.
- * _SCCwrite automatically checks what of this registers is selected
- * and changes b0 of WR15 if needed.
- *
- * _SCCread() for standard read registers is straightforward, except
- * for RR2 (split into two "virtual" registers: one for the value
- * written to WR2 (from the shadow) and one for the vector including
- * status from RR2, Ch. B) and RR3. The latter must be read from
- * Channel A, because it reads as all zeros on Ch. B. RR0 and RR8 can
- * be accessed directly as before.
- *
- * The two inline function contain complicated switch statements. But
- * I rely on regno and final_delay being constants, so gcc can reduce
- * the whole stuff to just some assembler statements.
- *
- * _SCCwrite and _SCCread aren't intended to be used directly under
- * normal circumstances. The macros SCCread[_ND] and SCCwrite[_ND] are
- * for that purpose. They assume that a local variable 'port' is
- * declared and pointing to the port's scc_struct entry. The
- * variants with "_NB" appended should be used if no other SCC
- * accesses follow immediately (within 0.5 usecs). They just skip the
- * final delay nops.
- *
- * Please note that accesses to SCC registers should only take place
- * when interrupts are turned off (at least if SCC interrupts are
- * enabled). Otherwise, an interrupt could interfere with the
- * two-stage accessing process.
- *
- */
-
-
-static __inline__ void _SCCwrite(
- struct scc_port *port,
- unsigned char *shadow,
- volatile unsigned char *_scc_del,
- int regno,
- unsigned char val, int final_delay )
-{
- switch( regno ) {
-
- case COMMAND_REG:
- /* WR0 can be written directly without pointing */
- *port->ctrlp = val;
- break;
-
- case SYNC_CHAR_REG:
- /* For WR7, first set b0 of WR15 to 0, if needed */
- if (shadow[INT_CTRL_REG] & ICR_OPTIONREG_SELECT) {
- *port->ctrlp = 15;
- shadow[INT_CTRL_REG] &= ~ICR_OPTIONREG_SELECT;
- scc_reg_delay();
- *port->ctrlp = shadow[INT_CTRL_REG];
- scc_reg_delay();
- }
- goto normal_case;
-
- case SDLC_OPTION_REG:
- /* For WR7', first set b0 of WR15 to 1, if needed */
- if (!(shadow[INT_CTRL_REG] & ICR_OPTIONREG_SELECT)) {
- *port->ctrlp = 15;
- shadow[INT_CTRL_REG] |= ICR_OPTIONREG_SELECT;
- scc_reg_delay();
- *port->ctrlp = shadow[INT_CTRL_REG];
- scc_reg_delay();
- }
- *port->ctrlp = 7;
- shadow[8] = val; /* WR7' shadowed at WR8 */
- scc_reg_delay();
- *port->ctrlp = val;
- break;
-
- case TX_DATA_REG: /* WR8 */
- /* TX_DATA_REG can be accessed directly on some h/w */
- if (MACH_IS_MVME16x || MACH_IS_BVME6000 || MACH_IS_MVME147)
- {
- *port->ctrlp = regno;
- scc_reg_delay();
- *port->ctrlp = val;
- }
- else
- *port->datap = val;
- break;
-
- case MASTER_INT_CTRL:
- *port->ctrlp = regno;
- val &= 0x3f; /* bits 6..7 are the reset commands */
- scc_shadow[0][regno] = val;
- scc_reg_delay();
- *port->ctrlp = val;
- break;
-
- case DPLL_CTRL_REG:
- *port->ctrlp = regno;
- val &= 0x1f; /* bits 5..7 are the DPLL commands */
- shadow[regno] = val;
- scc_reg_delay();
- *port->ctrlp = val;
- break;
-
- case 1 ... 6:
- case 10 ... 13:
- case 15:
- normal_case:
- *port->ctrlp = regno;
- shadow[regno] = val;
- scc_reg_delay();
- *port->ctrlp = val;
- break;
-
- default:
- printk( "Bad SCC write access to WR%d\n", regno );
- break;
-
- }
-
- if (final_delay)
- scc_reg_delay();
-}
-
-
-static __inline__ unsigned char _SCCread(
- struct scc_port *port,
- unsigned char *shadow,
- volatile unsigned char *_scc_del,
- int regno, int final_delay )
-{
- unsigned char rv;
-
- switch( regno ) {
-
- /* --- real read registers --- */
- case STATUS_REG:
- rv = *port->ctrlp;
- break;
-
- case INT_PENDING_REG:
- /* RR3: read only from Channel A! */
- port = port->port_a;
- goto normal_case;
-
- case RX_DATA_REG:
- /* RR8 can be accessed directly on some h/w */
- if (MACH_IS_MVME16x || MACH_IS_BVME6000 || MACH_IS_MVME147)
- {
- *port->ctrlp = 8;
- scc_reg_delay();
- rv = *port->ctrlp;
- }
- else
- rv = *port->datap;
- break;
-
- case CURR_VECTOR_REG:
- /* RR2 (vector including status) from Ch. B */
- port = port->port_b;
- goto normal_case;
-
- /* --- reading write registers: access the shadow --- */
- case 1 ... 7:
- case 10 ... 15:
- return shadow[regno]; /* no final delay! */
-
- /* WR7' is special, because it is shadowed at the place of WR8 */
- case SDLC_OPTION_REG:
- return shadow[8]; /* no final delay! */
-
- /* WR9 is special too, because it is common for both channels */
- case MASTER_INT_CTRL:
- return scc_shadow[0][9]; /* no final delay! */
-
- default:
- printk( "Bad SCC read access to %cR%d\n", (regno & 16) ? 'R' : 'W',
- regno & ~16 );
- break;
-
- case SPCOND_STATUS_REG:
- case FS_FIFO_LOW_REG:
- case FS_FIFO_HIGH_REG:
- case DPLL_STATUS_REG:
- normal_case:
- *port->ctrlp = regno & 0x0f;
- scc_reg_delay();
- rv = *port->ctrlp;
- break;
-
- }
-
- if (final_delay)
- scc_reg_delay();
- return rv;
-}
-
-#define SCC_ACCESS_INIT(port) \
- unsigned char *_scc_shadow = &scc_shadow[port->channel][0]
-
-#define SCCwrite(reg,val) _SCCwrite(port,_scc_shadow,scc_del,(reg),(val),1)
-#define SCCwrite_NB(reg,val) _SCCwrite(port,_scc_shadow,scc_del,(reg),(val),0)
-#define SCCread(reg) _SCCread(port,_scc_shadow,scc_del,(reg),1)
-#define SCCread_NB(reg) _SCCread(port,_scc_shadow,scc_del,(reg),0)
-
-#define SCCmod(reg,and,or) SCCwrite((reg),(SCCread(reg)&(and))|(or))
-
-#endif /* _SCC_H */
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 9ca5c021d0b..361a1dff8f7 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -966,6 +966,9 @@ ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
{
struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (chip->vendor.duration[TPM_LONG] == 0)
+ return 0;
+
return sprintf(buf, "%d %d %d [%s]\n",
jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index e55814bc0d0..77e1e6cd66c 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -205,6 +205,32 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
+void proc_comm_connector(struct task_struct *task)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+ __u8 buffer[CN_PROC_MSG_SIZE];
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_COMM;
+ ev->event_data.comm.process_pid = task->pid;
+ ev->event_data.comm.process_tgid = task->tgid;
+ get_task_comm(ev->event_data.comm.comm, task);
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 891360edecd..629b3ec698e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -725,7 +725,7 @@ static int __init cpufreq_gov_dbs_init(void)
dbs_tuners_ins.down_differential =
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
- * In no_hz/micro accounting case we set the minimum frequency
+ * In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d4c54237288..0df01411009 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -12,7 +12,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/notifier.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/ktime.h>
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 12c98900dcf..f62fde21e96 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c47f3d09c1e..3600f1955e4 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 4abd089a094..25ec0bb0519 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
-static DEFINE_SPINLOCK(dca_lock);
+static DEFINE_RAW_SPINLOCK(dca_lock);
static LIST_HEAD(dca_domains);
@@ -101,10 +101,10 @@ static void unregister_dca_providers(void)
INIT_LIST_HEAD(&unregistered_providers);
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (list_empty(&dca_domains)) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return;
}
@@ -116,7 +116,7 @@ static void unregister_dca_providers(void)
dca_free_domain(domain);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
dca_sysfs_remove_provider(dca);
@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev)
domain = dca_find_domain(rc);
if (!domain) {
- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
dca_providers_blocked = 1;
- } else {
- domain = dca_allocate_domain(rc);
- if (domain)
- list_add(&domain->node, &dca_domains);
- }
}
return domain;
@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev)
if (!dev)
return -EFAULT;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
/* check if the requester has not been added already */
dca = dca_find_provider_by_dev(dev);
if (dca) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -EEXIST;
}
pci_rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(pci_rc);
if (!domain) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev)
break;
}
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
if (slot < 0)
return slot;
err = dca_sysfs_add_req(dca, dev, slot);
if (err) {
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (dca == dca_find_provider_by_dev(dev))
dca->ops->remove_requester(dca, dev);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return err;
}
@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev)
if (!dev)
return -EFAULT;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
dca = dca_find_provider_by_dev(dev);
if (!dca) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
slot = dca->ops->remove_requester(dca, dev);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
if (slot < 0)
return slot;
@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu)
u8 tag;
unsigned long flags;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
dca = dca_find_provider_by_dev(dev);
if (!dca) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
tag = dca->ops->get_tag(dca, dev, cpu);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return tag;
}
@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
{
int err;
unsigned long flags;
- struct dca_domain *domain;
+ struct dca_domain *domain, *newdomain = NULL;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (dca_providers_blocked) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
+ struct pci_bus *rc;
+
if (dca_providers_blocked) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
unregister_dca_providers();
- } else {
- spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ rc = dca_pci_rc_from_dev(dev);
+ newdomain = dca_allocate_domain(rc);
+ if (!newdomain)
+ return -ENODEV;
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ /* Recheck, we might have raced after dropping the lock */
+ domain = dca_get_domain(dev);
+ if (!domain) {
+ domain = newdomain;
+ newdomain = NULL;
+ list_add(&domain->node, &dca_domains);
}
- return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_ADD, NULL);
+ kfree(newdomain);
return 0;
}
EXPORT_SYMBOL_GPL(register_dca_provider);
@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL);
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
list_del(&dca->node);
@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
if (list_empty(&domain->dca_providers))
dca_free_domain(domain);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
}
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
new file mode 100644
index 00000000000..643b055ed3c
--- /dev/null
+++ b/drivers/devfreq/Kconfig
@@ -0,0 +1,75 @@
+config ARCH_HAS_DEVFREQ
+ bool
+ depends on ARCH_HAS_OPP
+ help
+ Denotes that the architecture supports DEVFREQ. If the architecture
+ supports multiple OPP entries per device and the frequency of the
+ devices with OPPs may be altered dynamically, the architecture
+ supports DEVFREQ.
+
+menuconfig PM_DEVFREQ
+ bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+ depends on PM_OPP && ARCH_HAS_DEVFREQ
+ help
+ With OPP support, a device may have a list of frequencies and
+ voltages available. DEVFREQ, a generic DVFS framework can be
+ registered for a device with OPP support in order to let the
+ governor provided to DEVFREQ choose an operating frequency
+ based on the OPP's list and the policy given with DEVFREQ.
+
+ Each device may have its own governor and policy. DEVFREQ can
+ reevaluate the device state periodically and/or based on the
+ OPP list changes (each frequency/voltage pair in OPP may be
+ disabled or enabled).
+
+ Like some CPUs with CPUFREQ, a device may have multiple clocks.
+ However, because the clock frequencies of a single device are
+ determined by the single device's state, an instance of DEVFREQ
+ is attached to a single device and returns a "representative"
+ clock frequency from the OPP of the device, which is also attached
+ to a device by 1-to-1. The device registering DEVFREQ takes the
+ responsiblity to "interpret" the frequency listed in OPP and
+ to set its every clock accordingly with the "target" callback
+ given to DEVFREQ.
+
+if PM_DEVFREQ
+
+comment "DEVFREQ Governors"
+
+config DEVFREQ_GOV_SIMPLE_ONDEMAND
+ bool "Simple Ondemand"
+ help
+ Chooses frequency based on the recent load on the device. Works
+ similar as ONDEMAND governor of CPUFREQ does. A device with
+ Simple-Ondemand should be able to provide busy/total counter
+ values that imply the usage rate. A device may provide tuned
+ values to the governor with data field at devfreq_add_device().
+
+config DEVFREQ_GOV_PERFORMANCE
+ bool "Performance"
+ help
+ Sets the frequency at the maximum available frequency.
+ This governor always returns UINT_MAX as frequency so that
+ the DEVFREQ framework returns the highest frequency available
+ at any time.
+
+config DEVFREQ_GOV_POWERSAVE
+ bool "Powersave"
+ help
+ Sets the frequency at the minimum available frequency.
+ This governor always returns 0 as frequency so that
+ the DEVFREQ framework returns the lowest frequency available
+ at any time.
+
+config DEVFREQ_GOV_USERSPACE
+ bool "Userspace"
+ help
+ Sets the frequency at the user specified one.
+ This governor returns the user configured frequency if there
+ has been an input to /sys/devices/.../power/devfreq_set_freq.
+ Otherwise, the governor does not change the frequnecy
+ given at the initialization.
+
+comment "DEVFREQ Drivers"
+
+endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
new file mode 100644
index 00000000000..4564a89e970
--- /dev/null
+++ b/drivers/devfreq/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
+obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
+obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
+obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
+obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
new file mode 100644
index 00000000000..5d15b812377
--- /dev/null
+++ b/drivers/devfreq/devfreq.c
@@ -0,0 +1,601 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ * for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/hrtimer.h>
+#include "governor.h"
+
+struct class *devfreq_class;
+
+/*
+ * devfreq_work periodically monitors every registered device.
+ * The minimum polling interval is one jiffy. The polling interval is
+ * determined by the minimum polling period among all polling devfreq
+ * devices. The resolution of polling interval is one jiffy.
+ */
+static bool polling;
+static struct workqueue_struct *devfreq_wq;
+static struct delayed_work devfreq_work;
+
+/* wait removing if this is to be removed */
+static struct devfreq *wait_remove_device;
+
+/* The list of all device-devfreq */
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(devfreq_list_lock);
+
+/**
+ * find_device_devfreq() - find devfreq struct using device pointer
+ * @dev: device pointer used to lookup device devfreq.
+ *
+ * Search the list of device devfreqs and return the matched device's
+ * devfreq info. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq *find_device_devfreq(struct device *dev)
+{
+ struct devfreq *tmp_devfreq;
+
+ if (unlikely(IS_ERR_OR_NULL(dev))) {
+ pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ WARN(!mutex_is_locked(&devfreq_list_lock),
+ "devfreq_list_lock must be locked.");
+
+ list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
+ if (tmp_devfreq->dev.parent == dev)
+ return tmp_devfreq;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency.
+ * @devfreq: the devfreq instance.
+ *
+ * Note: Lock devfreq->lock before calling update_devfreq
+ * This function is exported for governors.
+ */
+int update_devfreq(struct devfreq *devfreq)
+{
+ unsigned long freq;
+ int err = 0;
+
+ if (!mutex_is_locked(&devfreq->lock)) {
+ WARN(true, "devfreq->lock must be locked by the caller.\n");
+ return -EINVAL;
+ }
+
+ /* Reevaluate the proper frequency */
+ err = devfreq->governor->get_target_freq(devfreq, &freq);
+ if (err)
+ return err;
+
+ err = devfreq->profile->target(devfreq->dev.parent, &freq);
+ if (err)
+ return err;
+
+ devfreq->previous_freq = freq;
+ return err;
+}
+
+/**
+ * devfreq_notifier_call() - Notify that the device frequency requirements
+ * has been changed out of devfreq framework.
+ * @nb the notifier_block (supposed to be devfreq->nb)
+ * @type not used
+ * @devp not used
+ *
+ * Called by a notifier that uses devfreq->nb.
+ */
+static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
+ void *devp)
+{
+ struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
+ int ret;
+
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+
+ return ret;
+}
+
+/**
+ * _remove_devfreq() - Remove devfreq from the device.
+ * @devfreq: the devfreq struct
+ * @skip: skip calling device_unregister().
+ *
+ * Note that the caller should lock devfreq->lock before calling
+ * this. _remove_devfreq() will unlock it and free devfreq
+ * internally. devfreq_list_lock should be locked by the caller
+ * as well (not relased at return)
+ *
+ * Lock usage:
+ * devfreq->lock: locked before call.
+ * unlocked at return (and freed)
+ * devfreq_list_lock: locked before call.
+ * kept locked at return.
+ * if devfreq is centrally polled.
+ *
+ * Freed memory:
+ * devfreq
+ */
+static void _remove_devfreq(struct devfreq *devfreq, bool skip)
+{
+ if (!mutex_is_locked(&devfreq->lock)) {
+ WARN(true, "devfreq->lock must be locked by the caller.\n");
+ return;
+ }
+ if (!devfreq->governor->no_central_polling &&
+ !mutex_is_locked(&devfreq_list_lock)) {
+ WARN(true, "devfreq_list_lock must be locked by the caller.\n");
+ return;
+ }
+
+ if (devfreq->being_removed)
+ return;
+
+ devfreq->being_removed = true;
+
+ if (devfreq->profile->exit)
+ devfreq->profile->exit(devfreq->dev.parent);
+
+ if (devfreq->governor->exit)
+ devfreq->governor->exit(devfreq);
+
+ if (!skip && get_device(&devfreq->dev)) {
+ device_unregister(&devfreq->dev);
+ put_device(&devfreq->dev);
+ }
+
+ if (!devfreq->governor->no_central_polling)
+ list_del(&devfreq->node);
+
+ mutex_unlock(&devfreq->lock);
+ mutex_destroy(&devfreq->lock);
+
+ kfree(devfreq);
+}
+
+/**
+ * devfreq_dev_release() - Callback for struct device to release the device.
+ * @dev: the devfreq device
+ *
+ * This calls _remove_devfreq() if _remove_devfreq() is not called.
+ * Note that devfreq_dev_release() could be called by _remove_devfreq() as
+ * well as by others unregistering the device.
+ */
+static void devfreq_dev_release(struct device *dev)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ bool central_polling = !devfreq->governor->no_central_polling;
+
+ /*
+ * If devfreq_dev_release() was called by device_unregister() of
+ * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
+ * being_removed is already set. This also partially checks the case
+ * where devfreq_dev_release() is called from a thread other than
+ * the one called _remove_devfreq(); however, this case is
+ * dealt completely with another following being_removed check.
+ *
+ * Because being_removed is never being
+ * unset, we do not need to worry about race conditions on
+ * being_removed.
+ */
+ if (devfreq->being_removed)
+ return;
+
+ if (central_polling)
+ mutex_lock(&devfreq_list_lock);
+
+ mutex_lock(&devfreq->lock);
+
+ /*
+ * Check being_removed flag again for the case where
+ * devfreq_dev_release() was called in a thread other than the one
+ * possibly called _remove_devfreq().
+ */
+ if (devfreq->being_removed) {
+ mutex_unlock(&devfreq->lock);
+ goto out;
+ }
+
+ /* devfreq->lock is unlocked and removed in _removed_devfreq() */
+ _remove_devfreq(devfreq, true);
+
+out:
+ if (central_polling)
+ mutex_unlock(&devfreq_list_lock);
+}
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work: the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+ static unsigned long last_polled_at;
+ struct devfreq *devfreq, *tmp;
+ int error;
+ unsigned long jiffies_passed;
+ unsigned long next_jiffies = ULONG_MAX, now = jiffies;
+ struct device *dev;
+
+ /* Initially last_polled_at = 0, polling every device at bootup */
+ jiffies_passed = now - last_polled_at;
+ last_polled_at = now;
+ if (jiffies_passed == 0)
+ jiffies_passed = 1;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
+ mutex_lock(&devfreq->lock);
+ dev = devfreq->dev.parent;
+
+ /* Do not remove tmp for a while */
+ wait_remove_device = tmp;
+
+ if (devfreq->governor->no_central_polling ||
+ devfreq->next_polling == 0) {
+ mutex_unlock(&devfreq->lock);
+ continue;
+ }
+ mutex_unlock(&devfreq_list_lock);
+
+ /*
+ * Reduce more next_polling if devfreq_wq took an extra
+ * delay. (i.e., CPU has been idled.)
+ */
+ if (devfreq->next_polling <= jiffies_passed) {
+ error = update_devfreq(devfreq);
+
+ /* Remove a devfreq with an error. */
+ if (error && error != -EAGAIN) {
+
+ dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
+ error, devfreq->governor->name);
+
+ /*
+ * Unlock devfreq before locking the list
+ * in order to avoid deadlock with
+ * find_device_devfreq or others
+ */
+ mutex_unlock(&devfreq->lock);
+ mutex_lock(&devfreq_list_lock);
+ /* Check if devfreq is already removed */
+ if (IS_ERR(find_device_devfreq(dev)))
+ continue;
+ mutex_lock(&devfreq->lock);
+ /* This unlocks devfreq->lock and free it */
+ _remove_devfreq(devfreq, false);
+ continue;
+ }
+ devfreq->next_polling = devfreq->polling_jiffies;
+ } else {
+ devfreq->next_polling -= jiffies_passed;
+ }
+
+ if (devfreq->next_polling)
+ next_jiffies = (next_jiffies > devfreq->next_polling) ?
+ devfreq->next_polling : next_jiffies;
+
+ mutex_unlock(&devfreq->lock);
+ mutex_lock(&devfreq_list_lock);
+ }
+ wait_remove_device = NULL;
+ mutex_unlock(&devfreq_list_lock);
+
+ if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
+ polling = true;
+ queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
+ } else {
+ polling = false;
+ }
+}
+
+/**
+ * devfreq_add_device() - Add devfreq feature to the device
+ * @dev: the device to add devfreq feature.
+ * @profile: device-specific profile to run devfreq.
+ * @governor: the policy to choose frequency.
+ * @data: private data for the governor. The devfreq framework does not
+ * touch this value.
+ */
+struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const struct devfreq_governor *governor,
+ void *data)
+{
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!dev || !profile || !governor) {
+ dev_err(dev, "%s: Invalid parameters.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ if (!governor->no_central_polling) {
+ mutex_lock(&devfreq_list_lock);
+ devfreq = find_device_devfreq(dev);
+ mutex_unlock(&devfreq_list_lock);
+ if (!IS_ERR(devfreq)) {
+ dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
+ if (!devfreq) {
+ dev_err(dev, "%s: Unable to create devfreq for the device\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&devfreq->lock);
+ mutex_lock(&devfreq->lock);
+ devfreq->dev.parent = dev;
+ devfreq->dev.class = devfreq_class;
+ devfreq->dev.release = devfreq_dev_release;
+ devfreq->profile = profile;
+ devfreq->governor = governor;
+ devfreq->previous_freq = profile->initial_freq;
+ devfreq->data = data;
+ devfreq->next_polling = devfreq->polling_jiffies
+ = msecs_to_jiffies(devfreq->profile->polling_ms);
+ devfreq->nb.notifier_call = devfreq_notifier_call;
+
+ dev_set_name(&devfreq->dev, dev_name(dev));
+ err = device_register(&devfreq->dev);
+ if (err) {
+ put_device(&devfreq->dev);
+ goto err_dev;
+ }
+
+ if (governor->init)
+ err = governor->init(devfreq);
+ if (err)
+ goto err_init;
+
+ mutex_unlock(&devfreq->lock);
+
+ if (governor->no_central_polling)
+ goto out;
+
+ mutex_lock(&devfreq_list_lock);
+
+ list_add(&devfreq->node, &devfreq_list);
+
+ if (devfreq_wq && devfreq->next_polling && !polling) {
+ polling = true;
+ queue_delayed_work(devfreq_wq, &devfreq_work,
+ devfreq->next_polling);
+ }
+ mutex_unlock(&devfreq_list_lock);
+ goto out;
+err_init:
+ device_unregister(&devfreq->dev);
+err_dev:
+ mutex_unlock(&devfreq->lock);
+ kfree(devfreq);
+out:
+ if (err)
+ return ERR_PTR(err);
+ else
+ return devfreq;
+}
+
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @devfreq the devfreq instance to be removed
+ */
+int devfreq_remove_device(struct devfreq *devfreq)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ if (!devfreq->governor->no_central_polling) {
+ mutex_lock(&devfreq_list_lock);
+ while (wait_remove_device == devfreq) {
+ mutex_unlock(&devfreq_list_lock);
+ schedule();
+ mutex_lock(&devfreq_list_lock);
+ }
+ }
+
+ mutex_lock(&devfreq->lock);
+ _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
+
+ if (!devfreq->governor->no_central_polling)
+ mutex_unlock(&devfreq_list_lock);
+
+ return 0;
+}
+
+static ssize_t show_governor(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+}
+
+static ssize_t show_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
+}
+
+static ssize_t show_polling_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
+}
+
+static ssize_t store_polling_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ unsigned int value;
+ int ret;
+
+ ret = sscanf(buf, "%u", &value);
+ if (ret != 1)
+ goto out;
+
+ mutex_lock(&df->lock);
+ df->profile->polling_ms = value;
+ df->next_polling = df->polling_jiffies
+ = msecs_to_jiffies(value);
+ mutex_unlock(&df->lock);
+
+ ret = count;
+
+ if (df->governor->no_central_polling)
+ goto out;
+
+ mutex_lock(&devfreq_list_lock);
+ if (df->next_polling > 0 && !polling) {
+ polling = true;
+ queue_delayed_work(devfreq_wq, &devfreq_work,
+ df->next_polling);
+ }
+ mutex_unlock(&devfreq_list_lock);
+out:
+ return ret;
+}
+
+static ssize_t show_central_polling(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n",
+ !to_devfreq(dev)->governor->no_central_polling);
+}
+
+static struct device_attribute devfreq_attrs[] = {
+ __ATTR(governor, S_IRUGO, show_governor, NULL),
+ __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
+ __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
+ __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
+ store_polling_interval),
+ { },
+};
+
+/**
+ * devfreq_start_polling() - Initialize data structure for devfreq framework and
+ * start polling registered devfreq devices.
+ */
+static int __init devfreq_start_polling(void)
+{
+ mutex_lock(&devfreq_list_lock);
+ polling = false;
+ devfreq_wq = create_freezable_workqueue("devfreq_wq");
+ INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
+ mutex_unlock(&devfreq_list_lock);
+
+ devfreq_monitor(&devfreq_work.work);
+ return 0;
+}
+late_initcall(devfreq_start_polling);
+
+static int __init devfreq_init(void)
+{
+ devfreq_class = class_create(THIS_MODULE, "devfreq");
+ if (IS_ERR(devfreq_class)) {
+ pr_err("%s: couldn't create class\n", __FILE__);
+ return PTR_ERR(devfreq_class);
+ }
+ devfreq_class->dev_attrs = devfreq_attrs;
+ return 0;
+}
+subsys_initcall(devfreq_init);
+
+static void __exit devfreq_exit(void)
+{
+ class_destroy(devfreq_class);
+}
+module_exit(devfreq_exit);
+
+/*
+ * The followings are helper functions for devfreq user device drivers with
+ * OPP framework.
+ */
+
+/**
+ * devfreq_recommended_opp() - Helper function to get proper OPP for the
+ * freq value given to target callback.
+ * @dev The devfreq user device. (parent of devfreq)
+ * @freq The frequency given to target function
+ *
+ */
+struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq)
+{
+ struct opp *opp = opp_find_freq_ceil(dev, freq);
+
+ if (opp == ERR_PTR(-ENODEV))
+ opp = opp_find_freq_floor(dev, freq);
+ return opp;
+}
+
+/**
+ * devfreq_register_opp_notifier() - Helper function to get devfreq notified
+ * for any changes in the OPP availability
+ * changes
+ * @dev The devfreq user device. (parent of devfreq)
+ * @devfreq The devfreq object.
+ */
+int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+ struct srcu_notifier_head *nh = opp_get_notifier(dev);
+
+ if (IS_ERR(nh))
+ return PTR_ERR(nh);
+ return srcu_notifier_chain_register(nh, &devfreq->nb);
+}
+
+/**
+ * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
+ * notified for any changes in the OPP
+ * availability changes anymore.
+ * @dev The devfreq user device. (parent of devfreq)
+ * @devfreq The devfreq object.
+ *
+ * At exit() callback of devfreq_dev_profile, this must be included if
+ * devfreq_recommended_opp is used.
+ */
+int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+ struct srcu_notifier_head *nh = opp_get_notifier(dev);
+
+ if (IS_ERR(nh))
+ return PTR_ERR(nh);
+ return srcu_notifier_chain_unregister(nh, &devfreq->nb);
+}
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("devfreq class support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
new file mode 100644
index 00000000000..ea7f13c58de
--- /dev/null
+++ b/drivers/devfreq/governor.h
@@ -0,0 +1,24 @@
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This header is for devfreq governors in drivers/devfreq/
+ */
+
+#ifndef _GOVERNOR_H
+#define _GOVERNOR_H
+
+#include <linux/devfreq.h>
+
+#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+
+/* Caution: devfreq->lock must be locked before calling update_devfreq */
+extern int update_devfreq(struct devfreq *devfreq);
+
+#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
new file mode 100644
index 00000000000..c0596b29176
--- /dev/null
+++ b/drivers/devfreq/governor_performance.c
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/devfreq/governor_performance.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+
+static int devfreq_performance_func(struct devfreq *df,
+ unsigned long *freq)
+{
+ /*
+ * target callback should be able to get floor value as
+ * said in devfreq.h
+ */
+ *freq = UINT_MAX;
+ return 0;
+}
+
+const struct devfreq_governor devfreq_performance = {
+ .name = "performance",
+ .get_target_freq = devfreq_performance_func,
+ .no_central_polling = true,
+};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
new file mode 100644
index 00000000000..2483a85a266
--- /dev/null
+++ b/drivers/devfreq/governor_powersave.c
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/devfreq/governor_powersave.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+
+static int devfreq_powersave_func(struct devfreq *df,
+ unsigned long *freq)
+{
+ /*
+ * target callback should be able to get ceiling value as
+ * said in devfreq.h
+ */
+ *freq = 0;
+ return 0;
+}
+
+const struct devfreq_governor devfreq_powersave = {
+ .name = "powersave",
+ .get_target_freq = devfreq_powersave_func,
+ .no_central_polling = true,
+};
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
new file mode 100644
index 00000000000..efad8dcf902
--- /dev/null
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -0,0 +1,88 @@
+/*
+ * linux/drivers/devfreq/governor_simpleondemand.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+
+/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
+#define DFSO_UPTHRESHOLD (90)
+#define DFSO_DOWNDIFFERENCTIAL (5)
+static int devfreq_simple_ondemand_func(struct devfreq *df,
+ unsigned long *freq)
+{
+ struct devfreq_dev_status stat;
+ int err = df->profile->get_dev_status(df->dev.parent, &stat);
+ unsigned long long a, b;
+ unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
+ unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+ struct devfreq_simple_ondemand_data *data = df->data;
+
+ if (err)
+ return err;
+
+ if (data) {
+ if (data->upthreshold)
+ dfso_upthreshold = data->upthreshold;
+ if (data->downdifferential)
+ dfso_downdifferential = data->downdifferential;
+ }
+ if (dfso_upthreshold > 100 ||
+ dfso_upthreshold < dfso_downdifferential)
+ return -EINVAL;
+
+ /* Assume MAX if it is going to be divided by zero */
+ if (stat.total_time == 0) {
+ *freq = UINT_MAX;
+ return 0;
+ }
+
+ /* Prevent overflow */
+ if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
+ stat.busy_time >>= 7;
+ stat.total_time >>= 7;
+ }
+
+ /* Set MAX if it's busy enough */
+ if (stat.busy_time * 100 >
+ stat.total_time * dfso_upthreshold) {
+ *freq = UINT_MAX;
+ return 0;
+ }
+
+ /* Set MAX if we do not know the initial frequency */
+ if (stat.current_frequency == 0) {
+ *freq = UINT_MAX;
+ return 0;
+ }
+
+ /* Keep the current frequency */
+ if (stat.busy_time * 100 >
+ stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
+ *freq = stat.current_frequency;
+ return 0;
+ }
+
+ /* Set the desired frequency based on the load */
+ a = stat.busy_time;
+ a *= stat.current_frequency;
+ b = div_u64(a, stat.total_time);
+ b *= 100;
+ b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
+ *freq = (unsigned long) b;
+
+ return 0;
+}
+
+const struct devfreq_governor devfreq_simple_ondemand = {
+ .name = "simple_ondemand",
+ .get_target_freq = devfreq_simple_ondemand_func,
+};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
new file mode 100644
index 00000000000..4f8b563da78
--- /dev/null
+++ b/drivers/devfreq/governor_userspace.c
@@ -0,0 +1,116 @@
+/*
+ * linux/drivers/devfreq/governor_simpleondemand.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include "governor.h"
+
+struct userspace_data {
+ unsigned long user_frequency;
+ bool valid;
+};
+
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+{
+ struct userspace_data *data = df->data;
+
+ if (!data->valid)
+ *freq = df->previous_freq; /* No user freq specified yet */
+ else
+ *freq = data->user_frequency;
+ return 0;
+}
+
+static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ struct userspace_data *data;
+ unsigned long wanted;
+ int err = 0;
+
+
+ mutex_lock(&devfreq->lock);
+ data = devfreq->data;
+
+ sscanf(buf, "%lu", &wanted);
+ data->user_frequency = wanted;
+ data->valid = true;
+ err = update_devfreq(devfreq);
+ if (err == 0)
+ err = count;
+ mutex_unlock(&devfreq->lock);
+ return err;
+}
+
+static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ struct userspace_data *data;
+ int err = 0;
+
+ mutex_lock(&devfreq->lock);
+ data = devfreq->data;
+
+ if (data->valid)
+ err = sprintf(buf, "%lu\n", data->user_frequency);
+ else
+ err = sprintf(buf, "undefined\n");
+ mutex_unlock(&devfreq->lock);
+ return err;
+}
+
+static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
+static struct attribute *dev_entries[] = {
+ &dev_attr_set_freq.attr,
+ NULL,
+};
+static struct attribute_group dev_attr_group = {
+ .name = "userspace",
+ .attrs = dev_entries,
+};
+
+static int userspace_init(struct devfreq *devfreq)
+{
+ int err = 0;
+ struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
+ GFP_KERNEL);
+
+ if (!data) {
+ err = -ENOMEM;
+ goto out;
+ }
+ data->valid = false;
+ devfreq->data = data;
+
+ err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+out:
+ return err;
+}
+
+static void userspace_exit(struct devfreq *devfreq)
+{
+ sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+ kfree(devfreq->data);
+ devfreq->data = NULL;
+}
+
+const struct devfreq_governor devfreq_userspace = {
+ .name = "userspace",
+ .get_target_freq = devfreq_userspace_func,
+ .init = userspace_init,
+ .exit = userspace_exit,
+ .no_central_polling = true,
+};
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index ab8a4eff072..a71f55e72be 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
/* Protects allocations from the above array of maps */
static DEFINE_MUTEX(map_lock);
/* Protects register accesses and individual mappings */
-static DEFINE_SPINLOCK(bank_lock);
+static DEFINE_RAW_SPINLOCK(bank_lock);
static struct ipu_irq_map *src2map(unsigned int src)
{
@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_data *d)
uint32_t reg;
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
if (!bank) {
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
return;
}
@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_data *d)
reg |= (1UL << (map->source & 31));
ipu_write_reg(bank->ipu, reg, bank->control);
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
static void ipu_irq_mask(struct irq_data *d)
@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data *d)
uint32_t reg;
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
if (!bank) {
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
return;
}
@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data *d)
reg &= ~(1UL << (map->source & 31));
ipu_write_reg(bank->ipu, reg, bank->control);
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
static void ipu_irq_ack(struct irq_data *d)
@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data *d)
struct ipu_irq_bank *bank;
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
if (!bank) {
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
return;
}
ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
/**
@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq)
unsigned long lock_flags;
bool ret;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
ret = bank && ipu_read_reg(bank->ipu, bank->status) &
(1UL << (map->source & 31));
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
return ret;
}
@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source)
if (irq_map[i].source < 0) {
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
irq_map[i].source = source;
irq_map[i].bank = irq_bank + source / 32;
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
ret = irq_map[i].irq;
pr_debug("IPU: mapped source %u to IRQ %u\n",
@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source)
pr_debug("IPU: unmapped source %u from IRQ %u\n",
source, irq_map[i].irq);
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
irq_map[i].source = -EINVAL;
irq_map[i].bank = NULL;
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
ret = 0;
break;
@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
struct ipu_irq_bank *bank = irq_bank + i;
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
status = ipu_read_reg(ipu, bank->status);
/*
* Don't think we have to clear all interrupts here, they will
@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
* might want to clear unhandled interrupts after the loop...
*/
status &= ipu_read_reg(ipu, bank->control);
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
line--;
status &= ~(1UL << line);
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
map = src2map(32 * i + line);
if (map)
irq = map->irq;
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
if (!map) {
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
struct ipu_irq_bank *bank = irq_bank + i;
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
status = ipu_read_reg(ipu, bank->status);
/* Not clearing all interrupts, see above */
status &= ipu_read_reg(ipu, bank->control);
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
line--;
status &= ~(1UL << line);
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
map = src2map(32 * i + line);
if (map)
irq = map->irq;
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
if (!map) {
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0599854e221..118ec12d2d5 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -34,8 +34,8 @@ struct gpio_bank {
u16 irq;
u16 virtual_irq_start;
int method;
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 suspend_wakeup;
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 saved_wakeup;
#endif
u32 non_wakeup_gpios;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c43b8ff626a..0550dcb8581 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
+ *gpio_base = -1;
}
#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e88c64417a8..14cc88aaf3a 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
- val = gctx->scratch[((gctx->fb_base + idx) / 4)];
+ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+ DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+ val = 0;
+ } else
+ val = gctx->scratch[(gctx->fb_base / 4) + idx];
if (print)
DEBUG("FB[0x%02X]", idx);
break;
@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
- gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
+ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+ DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+ } else
+ gctx->scratch[(gctx->fb_base / 4) + idx] = val;
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_PLL:
@@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
}
+ ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
if (!ctx->scratch)
return -ENOMEM;
+ ctx->scratch_size_bytes = usage_bytes;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index a589a55b223..93cfe2086ba 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -137,6 +137,7 @@ struct atom_context {
int cs_equal, cs_above;
int io_mode;
uint32_t *scratch;
+ int scratch_size_bytes;
};
extern int atom_debug;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c742944d380..a515b2a09d8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
return;
}
args.v2.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4da23889fea..79e8ebc0530 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -129,7 +129,9 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
- if (ret < 0)
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return send_bytes;
@@ -160,7 +162,9 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
- if (ret < 0)
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
@@ -236,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
- if (ret < 0) {
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index bce63fd329d..449c3d8c683 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
/* get the DPCD from the bridge */
radeon_dp_getdpcd(radeon_connector);
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- ret = connector_status_connected;
- else {
- /* need to setup ddc on the bridge */
- if (encoder)
- radeon_atom_ext_encoder_setup_ddc(encoder);
+ if (encoder) {
+ /* setup ddc on the bridge */
+ radeon_atom_ext_encoder_setup_ddc(encoder);
if (radeon_ddc_probe(radeon_connector,
- radeon_connector->requires_extended_probe))
+ radeon_connector->requires_extended_probe)) /* try DDC */
ret = connector_status_connected;
- }
-
- if ((ret == connector_status_disconnected) &&
- radeon_connector->dac_load_detect) {
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- struct drm_encoder_helper_funcs *encoder_funcs;
- if (encoder) {
- encoder_funcs = encoder->helper_private;
+ else if (radeon_connector->dac_load_detect) { /* try load detection */
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 13690f3eb4a..eb3f6dc6df8 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1638,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
break;
case 2:
args.v2.ucCRTC = radeon_crtc->crtc_id;
- args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ if (radeon_encoder_is_dp_bridge(encoder)) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ } else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
@@ -1755,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
/* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
- if (ASIC_IS_DCE41(rdev))
- return radeon_crtc->crtc_id;
- else {
+ if (ASIC_IS_DCE41(rdev)) {
+ /* ontario follows DCE4 */
+ if (rdev->family == CHIP_PALM) {
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ } else
+ /* llano follows DCE3.2 */
+ return radeon_crtc->crtc_id;
+ } else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7fd4e3e5ad5..3475a09f946 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -48,7 +48,7 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
+ rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
} else
WREG32(rdev->fence_drv.scratch_reg, seq);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae3c6f5dd2b..082fcaea583 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy;
+ struct ttm_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1130a898712..22a4a051f22 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -69,7 +69,7 @@ config HID_ACRUX
Say Y here if you want to enable support for ACRUX game controllers.
config HID_ACRUX_FF
- tristate "ACRUX force feedback support"
+ bool "ACRUX force feedback support"
depends on HID_ACRUX
select INPUT_FF_MEMLESS
---help---
@@ -245,6 +245,15 @@ config HID_LOGITECH
---help---
Support for Logitech devices that are not fully compliant with HID standard.
+config HID_LOGITECH_DJ
+ tristate "Logitech Unifying receivers full support"
+ depends on HID_LOGITECH
+ default m
+ ---help---
+ Say Y if you want support for Logitech Unifying receivers and devices.
+ Unifying receivers are capable of pairing up to 6 Logitech compliant
+ devices to the same receiver.
+
config LOGITECH_FF
bool "Logitech force feedback support"
depends on HID_LOGITECH
@@ -278,13 +287,21 @@ config LOGIG940_FF
Say Y here if you want to enable force feedback support for Logitech
Flight System G940 devices.
-config LOGIWII_FF
- bool "Logitech Speed Force Wireless force feedback support"
+config LOGIWHEELS_FF
+ bool "Logitech wheels configuration and force feedback support"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
+ default LOGITECH_FF
help
- Say Y here if you want to enable force feedback support for Logitech
- Speed Force Wireless (Wii) devices.
+ Say Y here if you want to enable force feedback and range setting
+ support for following Logitech wheels:
+ - Logitech Driving Force
+ - Logitech Driving Force Pro
+ - Logitech Driving Force GT
+ - Logitech G25
+ - Logitech G27
+ - Logitech MOMO/MOMO 2
+ - Logitech Formula Force EX
config HID_MAGICMOUSE
tristate "Apple MagicMouse multi-touch support"
@@ -328,6 +345,7 @@ config HID_MULTITOUCH
- Hanvon dual touch panels
- Ilitek dual touch panels
- IrTouch Infrared USB panels
+ - LG Display panels (Dell ST2220Tc)
- Lumio CrystalTouch panels
- MosArt dual-touch panels
- PenMount dual touch panels
@@ -441,6 +459,13 @@ config HID_PICOLCD_LEDS
---help---
Provide access to PicoLCD's GPO pins via leds class.
+config HID_PRIMAX
+ tristate "Primax non-fully HID-compliant devices"
+ depends on USB_HID
+ ---help---
+ Support for Primax devices that are not fully compliant with the
+ HID standard.
+
config HID_QUANTA
tristate "Quanta Optical Touch panels"
depends on USB_HID
@@ -539,7 +564,11 @@ config HID_SMARTJOYPLUS
tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
---help---
- Support for SmartJoy PLUS PS2/USB adapter.
+ Support for SmartJoy PLUS PS2/USB adapter, Super Dual Box,
+ Super Joy Box 3 Pro, Super Dual Box Pro, and Super Joy Box 5 Pro.
+
+ Note that DDR (Dance Dance Revolution) mode is not supported, nor
+ is pressure sensitive buttons on the pro models.
config SMARTJOYPLUS_FF
bool "SmartJoy PLUS PS2/USB adapter force feedback support"
@@ -590,6 +619,7 @@ config HID_WIIMOTE
tristate "Nintendo Wii Remote support"
depends on BT_HIDP
depends on LEDS_CLASS
+ select POWER_SUPPLY
---help---
Support for the Nintendo Wii Remote bluetooth device.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0a0a38e9fd2..1e0d2a638b2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -21,7 +21,7 @@ endif
ifdef CONFIG_LOGIG940_FF
hid-logitech-y += hid-lg3ff.o
endif
-ifdef CONFIG_LOGIWII_FF
+ifdef CONFIG_LOGIWHEELS_FF
hid-logitech-y += hid-lg4ff.o
endif
@@ -43,6 +43,7 @@ obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
+obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 18b3bc646bf..9bc7b03269d 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -183,6 +183,9 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
+ else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI &&
+ hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING6_JIS)
+ table = macbookair_fn_keys;
else if (hid->product < 0x21d || hid->product >= 0x300)
table = powerbook_fn_keys;
else
@@ -493,6 +496,18 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 121514149e0..3bdb4500f95 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -6,7 +6,7 @@
* Xbox 360 controller.
*
* 1a34:0802 "ACRUX USB GAMEPAD 8116"
- * - tested with a EXEQ EQ-PCU-02090 game controller.
+ * - tested with an EXEQ EQ-PCU-02090 game controller.
*
* Copyright (c) 2010 Sergei Kolzun <x0r@dv-life.ru>
*/
@@ -45,7 +45,10 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
{
struct hid_device *hid = input_get_drvdata(dev);
struct axff_device *axff = data;
+ struct hid_report *report = axff->report;
+ int field_count = 0;
int left, right;
+ int i, j;
left = effect->u.rumble.strong_magnitude;
right = effect->u.rumble.weak_magnitude;
@@ -55,10 +58,14 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
left = left * 0xff / 0xffff;
right = right * 0xff / 0xffff;
- axff->report->field[0]->value[0] = left;
- axff->report->field[1]->value[0] = right;
- axff->report->field[2]->value[0] = left;
- axff->report->field[3]->value[0] = right;
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] =
+ field_count % 2 ? right : left;
+ field_count++;
+ }
+ }
+
dbg_hid("running with 0x%02x 0x%02x", left, right);
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
@@ -72,6 +79,8 @@ static int axff_init(struct hid_device *hid)
struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
+ int field_count = 0;
+ int i, j;
int error;
if (list_empty(report_list)) {
@@ -80,9 +89,16 @@ static int axff_init(struct hid_device *hid)
}
report = list_first_entry(report_list, struct hid_report, list);
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] = 0x00;
+ field_count++;
+ }
+ }
- if (report->maxfield < 4) {
- hid_err(hid, "no fields in the report: %d\n", report->maxfield);
+ if (field_count < 4) {
+ hid_err(hid, "not enough fields in the report: %d\n",
+ field_count);
return -ENODEV;
}
@@ -97,13 +113,9 @@ static int axff_init(struct hid_device *hid)
goto err_free_mem;
axff->report = report;
- axff->report->field[0]->value[0] = 0x00;
- axff->report->field[1]->value[0] = 0x00;
- axff->report->field[2]->value[0] = 0x00;
- axff->report->field[3]->value[0] = 0x00;
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
- hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
+ hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun <x0r@dv-life.ru>\n");
return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 242353df3dc..91adcc5bad2 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
+#include <linux/semaphore.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -1085,16 +1086,25 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
struct hid_report *report;
char *buf;
unsigned int i;
- int ret;
+ int ret = 0;
- if (!hid || !hid->driver)
+ if (!hid)
return -ENODEV;
+
+ if (down_trylock(&hid->driver_lock))
+ return -EBUSY;
+
+ if (!hid->driver) {
+ ret = -ENODEV;
+ goto unlock;
+ }
report_enum = hid->report_enum + type;
hdrv = hid->driver;
if (!size) {
dbg_hid("empty report\n");
- return -1;
+ ret = -1;
+ goto unlock;
}
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
@@ -1118,18 +1128,24 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
nomem:
report = hid_get_report(report_enum, data);
- if (!report)
- return -1;
+ if (!report) {
+ ret = -1;
+ goto unlock;
+ }
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
- if (ret != 0)
- return ret < 0 ? ret : 0;
+ if (ret != 0) {
+ ret = ret < 0 ? ret : 0;
+ goto unlock;
+ }
}
hid_report_raw_event(hid, type, data, size, interrupt);
- return 0;
+unlock:
+ up(&hid->driver_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(hid_input_report);
@@ -1212,6 +1228,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
connect_mask & HID_CONNECT_HIDINPUT_FORCE))
hdev->claimed |= HID_CLAIMED_INPUT;
+ if (hdev->quirks & HID_QUIRK_MULTITOUCH) {
+ /* this device should be handled by hid-multitouch, skip it */
+ hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
+ return -ENODEV;
+ }
+
if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
!hdev->hiddev_connect(hdev,
connect_mask & HID_CONNECT_HIDDEV_FORCE))
@@ -1343,6 +1365,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1391,6 +1419,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
@@ -1399,6 +1428,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1420,8 +1450,11 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
@@ -1461,6 +1494,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
@@ -1501,6 +1535,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
@@ -1620,10 +1658,15 @@ static int hid_device_probe(struct device *dev)
const struct hid_device_id *id;
int ret = 0;
+ if (down_interruptible(&hdev->driver_lock))
+ return -EINTR;
+
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
- if (id == NULL)
- return -ENODEV;
+ if (id == NULL) {
+ ret = -ENODEV;
+ goto unlock;
+ }
hdev->driver = hdrv;
if (hdrv->probe) {
@@ -1636,14 +1679,20 @@ static int hid_device_probe(struct device *dev)
if (ret)
hdev->driver = NULL;
}
+unlock:
+ up(&hdev->driver_lock);
return ret;
}
static int hid_device_remove(struct device *dev)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct hid_driver *hdrv = hdev->driver;
+ struct hid_driver *hdrv;
+
+ if (down_interruptible(&hdev->driver_lock))
+ return -EINTR;
+ hdrv = hdev->driver;
if (hdrv) {
if (hdrv->remove)
hdrv->remove(hdev);
@@ -1652,6 +1701,7 @@ static int hid_device_remove(struct device *dev)
hdev->driver = NULL;
}
+ up(&hdev->driver_lock);
return 0;
}
@@ -1892,6 +1942,12 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
@@ -1999,6 +2055,7 @@ struct hid_device *hid_allocate_device(void)
init_waitqueue_head(&hdev->debug_wait);
INIT_LIST_HEAD(&hdev->debug_list);
+ sema_init(&hdev->driver_lock, 1);
return hdev;
err:
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index bae48745bb4..9a243ca96e6 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -450,6 +450,11 @@ void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) {
seq_printf(f, "Logical(");
hid_resolv_usage(field->logical, f); seq_printf(f, ")\n");
}
+ if (field->application) {
+ tab(n, f);
+ seq_printf(f, "Application(");
+ hid_resolv_usage(field->application, f); seq_printf(f, ")\n");
+ }
tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage);
for (j = 0; j < field->maxusage; j++) {
tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7484e1b6724..1680e99b481 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -112,6 +112,12 @@
#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -351,6 +357,9 @@
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
+#define USB_VENDOR_ID_IDEACOM 0x1cb6
+#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
+
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -423,6 +432,9 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
+#define USB_VENDOR_ID_LG 0x1fd2
+#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
+
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
@@ -440,6 +452,7 @@
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
+#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL 0xc29a
#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
@@ -447,6 +460,8 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -678,6 +693,9 @@
#define USB_VENDOR_ID_WISEGROUP_LTD 0x6666
#define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677
#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
+#define USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO 0x8801
+#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
+#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
@@ -693,4 +711,7 @@
#define USB_VENDOR_ID_ZYDACRON 0x13EC
#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+#define USB_VENDOR_ID_PRIMAX 0x0461
+#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6559e2e3364..f333139d1a4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -474,6 +474,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_key_clear(BTN_STYLUS2);
break;
+ case 0x51: /* ContactID */
+ device->quirks |= HID_QUIRK_MULTITOUCH;
+ goto unknown;
+
default: goto unknown;
}
break;
@@ -978,6 +982,13 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
}
+ if (hid->quirks & HID_QUIRK_MULTITOUCH) {
+ /* generic hid does not know how to handle multitouch devices */
+ if (hidinput)
+ goto out_cleanup;
+ goto out_unwind;
+ }
+
if (hidinput && input_register_device(hidinput->input))
goto out_cleanup;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a7f916e8fc3..e7a7bd1eb34 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -363,7 +363,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
- if (quirks & (LG_FF | LG_FF2 | LG_FF3))
+ if (quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4))
connect_mask &= ~HID_CONNECT_FF;
ret = hid_hw_start(hdev, connect_mask);
@@ -372,7 +372,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
- if (quirks & LG_FF4) {
+ /* Setup wireless link with Logitech Wii wheel */
+ if(hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
@@ -405,6 +406,15 @@ err_free:
return ret;
}
+static void lg_remove(struct hid_device *hdev)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ if(quirks & LG_FF4)
+ lg4ff_deinit(hdev);
+
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = LG_RDESC | LG_WIRELESS },
@@ -431,7 +441,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
.driver_data = LG_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
- .driver_data = LG_NOGET | LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD),
.driver_data = LG_FF2 },
@@ -444,15 +454,17 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
- .driver_data = LG_NOGET | LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
@@ -478,6 +490,7 @@ static struct hid_driver lg_driver = {
.input_mapped = lg_input_mapped,
.event = lg_event,
.probe = lg_probe,
+ .remove = lg_remove,
};
static int __init lg_init(void)
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index b0100ba2ae0..4b097286dc7 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -19,10 +19,12 @@ int lg3ff_init(struct hid_device *hdev);
static inline int lg3ff_init(struct hid_device *hdev) { return -1; }
#endif
-#ifdef CONFIG_LOGIWII_FF
+#ifdef CONFIG_LOGIWHEELS_FF
int lg4ff_init(struct hid_device *hdev);
+int lg4ff_deinit(struct hid_device *hdev);
#else
static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
+static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }
#endif
#endif
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index fa550c8e1d1..103f30d93f7 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -29,19 +29,108 @@
#include "usbhid/usbhid.h"
#include "hid-lg.h"
+#include "hid-ids.h"
-struct lg4ff_device {
- struct hid_report *report;
+#define DFGT_REV_MAJ 0x13
+#define DFGT_REV_MIN 0x22
+#define DFP_REV_MAJ 0x11
+#define DFP_REV_MIN 0x06
+#define FFEX_REV_MAJ 0x21
+#define FFEX_REV_MIN 0x00
+#define G25_REV_MAJ 0x12
+#define G25_REV_MIN 0x22
+#define G27_REV_MAJ 0x12
+#define G27_REV_MIN 0x38
+
+#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
+
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+
+static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store);
+
+static bool list_inited;
+
+struct lg4ff_device_entry {
+ char *device_id; /* Use name in respective kobject structure's address as the ID */
+ __u16 range;
+ __u16 min_range;
+ __u16 max_range;
+ __u8 leds;
+ struct list_head list;
+ void (*set_range)(struct hid_device *hid, u16 range);
};
-static const signed short ff4_wheel_ac[] = {
+static struct lg4ff_device_entry device_list;
+
+static const signed short lg4ff_wheel_effects[] = {
FF_CONSTANT,
FF_AUTOCENTER,
-1
};
-static int hid_lg4ff_play(struct input_dev *dev, void *data,
- struct ff_effect *effect)
+struct lg4ff_wheel {
+ const __u32 product_id;
+ const signed short *ff_effects;
+ const __u16 min_range;
+ const __u16 max_range;
+ void (*set_range)(struct hid_device *hid, u16 range);
+};
+
+static const struct lg4ff_wheel lg4ff_devices[] = {
+ {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp},
+ {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}
+};
+
+struct lg4ff_native_cmd {
+ const __u8 cmd_num; /* Number of commands to send */
+ const __u8 cmd[];
+};
+
+struct lg4ff_usb_revision {
+ const __u16 rev_maj;
+ const __u16 rev_min;
+ const struct lg4ff_native_cmd *command;
+};
+
+static const struct lg4ff_native_cmd native_dfp = {
+ 1,
+ {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_dfgt = {
+ 2,
+ {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
+ 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
+};
+
+static const struct lg4ff_native_cmd native_g25 = {
+ 1,
+ {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_g27 = {
+ 2,
+ {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
+ 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
+};
+
+static const struct lg4ff_usb_revision lg4ff_revs[] = {
+ {DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt}, /* Driving Force GT */
+ {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
+ {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
+ {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
+};
+
+static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -55,13 +144,12 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
CLAMP(x);
report->field[0]->value[0] = 0x11; /* Slot 1 */
- report->field[0]->value[1] = 0x10;
+ report->field[0]->value[1] = 0x08;
report->field[0]->value[2] = x;
- report->field[0]->value[3] = 0x00;
+ report->field[0]->value[3] = 0x80;
report->field[0]->value[4] = 0x00;
- report->field[0]->value[5] = 0x08;
+ report->field[0]->value[5] = 0x00;
report->field[0]->value[6] = 0x00;
- dbg_hid("Autocenter, x=0x%02X\n", x);
usbhid_submit_report(hid, report, USB_DIR_OUT);
break;
@@ -69,24 +157,184 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
return 0;
}
-static void hid_lg4ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+/* Sends default autocentering command compatible with
+ * all wheels except Formula Force EX */
+static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
- *value++ = 0xfe;
- *value++ = 0x0d;
- *value++ = 0x07;
- *value++ = 0x07;
- *value++ = (magnitude >> 8) & 0xff;
- *value++ = 0x00;
- *value = 0x00;
+ report->field[0]->value[0] = 0xfe;
+ report->field[0]->value[1] = 0x0d;
+ report->field[0]->value[2] = magnitude >> 13;
+ report->field[0]->value[3] = magnitude >> 13;
+ report->field[0]->value[4] = magnitude >> 8;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends autocentering command compatible with Formula Force EX */
+static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ magnitude = magnitude * 90 / 65535;
+
+
+ report->field[0]->value[0] = 0xfe;
+ report->field[0]->value[1] = 0x03;
+ report->field[0]->value[2] = magnitude >> 14;
+ report->field[0]->value[3] = magnitude >> 14;
+ report->field[0]->value[4] = magnitude;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends command to set range compatible with G25/G27/Driving Force GT */
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
+
+ report->field[0]->value[0] = 0xf8;
+ report->field[0]->value[1] = 0x81;
+ report->field[0]->value[2] = range & 0x00ff;
+ report->field[0]->value[3] = (range & 0xff00) >> 8;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends commands to set range compatible with Driving Force Pro wheel */
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ int start_left, start_right, full_range;
+ dbg_hid("Driving Force Pro: setting range to %u\n", range);
+
+ /* Prepare "coarse" limit command */
+ report->field[0]->value[0] = 0xf8;
+ report->field[0]->value[1] = 0x00; /* Set later */
+ report->field[0]->value[2] = 0x00;
+ report->field[0]->value[3] = 0x00;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ if (range > 200) {
+ report->field[0]->value[1] = 0x03;
+ full_range = 900;
+ } else {
+ report->field[0]->value[1] = 0x02;
+ full_range = 200;
+ }
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+
+ /* Prepare "fine" limit command */
+ report->field[0]->value[0] = 0x81;
+ report->field[0]->value[1] = 0x0b;
+ report->field[0]->value[2] = 0x00;
+ report->field[0]->value[3] = 0x00;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ if (range == 200 || range == 900) { /* Do not apply any fine limit */
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ return;
+ }
+
+ /* Construct fine limit command */
+ start_left = (((full_range - range + 1) * 2047) / full_range);
+ start_right = 0xfff - start_left;
+
+ report->field[0]->value[2] = start_left >> 4;
+ report->field[0]->value[3] = start_right >> 4;
+ report->field[0]->value[4] = 0xff;
+ report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe);
+ report->field[0]->value[6] = 0xff;
usbhid_submit_report(hid, report, USB_DIR_OUT);
}
+static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __u8 i, j;
+
+ j = 0;
+ while (j < 7*cmd->cmd_num) {
+ for (i = 0; i < 7; i++)
+ report->field[0]->value[i] = cmd->cmd[j++];
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ }
+}
+
+/* Read current range and display it in terminal */
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct lg4ff_device_entry *uninitialized_var(entry);
+ struct list_head *h;
+ struct hid_device *hid = to_hid_device(dev);
+ size_t count;
+
+ list_for_each(h, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+ break;
+ }
+ if (h == &device_list.list) {
+ dbg_hid("Device not found!");
+ return 0;
+ }
+
+ count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range);
+ return count;
+}
+
+/* Set range to user specified value, call appropriate function
+ * according to the type of the wheel */
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct lg4ff_device_entry *uninitialized_var(entry);
+ struct list_head *h;
+ struct hid_device *hid = to_hid_device(dev);
+ __u16 range = simple_strtoul(buf, NULL, 10);
+
+ list_for_each(h, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+ break;
+ }
+ if (h == &device_list.list) {
+ dbg_hid("Device not found!");
+ return count;
+ }
+
+ if (range == 0)
+ range = entry->max_range;
+
+ /* Check if the wheel supports range setting
+ * and that the range is within limits for the wheel */
+ if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) {
+ entry->set_range(hid, range);
+ entry->range = range;
+ }
+
+ return count;
+}
int lg4ff_init(struct hid_device *hid)
{
@@ -95,9 +343,10 @@ int lg4ff_init(struct hid_device *hid)
struct input_dev *dev = hidinput->input;
struct hid_report *report;
struct hid_field *field;
- const signed short *ff_bits = ff4_wheel_ac;
- int error;
- int i;
+ struct lg4ff_device_entry *entry;
+ struct usb_device_descriptor *udesc;
+ int error, i, j;
+ __u16 bcdDevice, rev_maj, rev_min;
/* Find the report to use */
if (list_empty(report_list)) {
@@ -118,18 +367,122 @@ int lg4ff_init(struct hid_device *hid)
return -1;
}
- for (i = 0; ff_bits[i] >= 0; i++)
- set_bit(ff_bits[i], dev->ffbit);
+ /* Check what wheel has been connected */
+ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+ if (hid->product == lg4ff_devices[i].product_id) {
+ dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id);
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(lg4ff_devices)) {
+ hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to"
+ "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n");
+ return -1;
+ }
+
+ /* Attempt to switch wheel to native mode when applicable */
+ udesc = &(hid_to_usb_dev(hid)->descriptor);
+ if (!udesc) {
+ hid_err(hid, "NULL USB device descriptor\n");
+ return -1;
+ }
+ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ rev_maj = bcdDevice >> 8;
+ rev_min = bcdDevice & 0xff;
+
+ if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) {
+ dbg_hid("Generic wheel detected, can it do native?\n");
+ dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min);
+
+ for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) {
+ if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) {
+ hid_lg4ff_switch_native(hid, lg4ff_revs[j].command);
+ hid_info(hid, "Switched to native mode\n");
+ }
+ }
+ }
+
+ /* Set supported force feedback capabilities */
+ for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++)
+ set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);
error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
if (error)
return error;
- if (test_bit(FF_AUTOCENTER, dev->ffbit))
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
+ /* Check if autocentering is available and
+ * set the centering force to zero by default */
+ if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+ if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+ else
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+ dev->ff->set_autocenter(dev, 0);
+ }
+
+ /* Initialize device_list if this is the first device to handle by lg4ff */
+ if (!list_inited) {
+ INIT_LIST_HEAD(&device_list.list);
+ list_inited = 1;
+ }
+
+ /* Add the device to device_list */
+ entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+ if (!entry) {
+ hid_err(hid, "Cannot add device, insufficient memory.\n");
+ return -ENOMEM;
+ }
+ entry->device_id = kstrdup((&hid->dev)->kobj.name, GFP_KERNEL);
+ if (!entry->device_id) {
+ hid_err(hid, "Cannot set device_id, insufficient memory.\n");
+ kfree(entry);
+ return -ENOMEM;
+ }
+ entry->min_range = lg4ff_devices[i].min_range;
+ entry->max_range = lg4ff_devices[i].max_range;
+ entry->set_range = lg4ff_devices[i].set_range;
+ list_add(&entry->list, &device_list.list);
+
+ /* Create sysfs interface */
+ error = device_create_file(&hid->dev, &dev_attr_range);
+ if (error)
+ return error;
+ dbg_hid("sysfs interface created\n");
+
+ /* Set the maximum range to start with */
+ entry->range = entry->max_range;
+ if (entry->set_range != NULL)
+ entry->set_range(hid, entry->range);
hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");
return 0;
}
+int lg4ff_deinit(struct hid_device *hid)
+{
+ bool found = 0;
+ struct lg4ff_device_entry *entry;
+ struct list_head *h, *g;
+ list_for_each_safe(h, g, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) {
+ list_del(h);
+ kfree(entry->device_id);
+ kfree(entry);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dbg_hid("Device entry not found!\n");
+ return -1;
+ }
+
+ device_remove_file(&hid->dev, &dev_attr_range);
+ dbg_hid("Device successfully unregistered\n");
+ return 0;
+}
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 088f8504929..27bc54f92f4 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -58,12 +58,6 @@ static const signed short ff_joystick_ac[] = {
-1
};
-static const signed short ff_wheel[] = {
- FF_CONSTANT,
- FF_AUTOCENTER,
- -1
-};
-
static const struct dev_type devices[] = {
{ 0x046d, 0xc211, ff_rumble },
{ 0x046d, 0xc219, ff_rumble },
@@ -71,14 +65,7 @@ static const struct dev_type devices[] = {
{ 0x046d, 0xc286, ff_joystick_ac },
{ 0x046d, 0xc287, ff_joystick_ac },
{ 0x046d, 0xc293, ff_joystick },
- { 0x046d, 0xc294, ff_wheel },
- { 0x046d, 0xc298, ff_wheel },
- { 0x046d, 0xc299, ff_wheel },
- { 0x046d, 0xc29b, ff_wheel },
{ 0x046d, 0xc295, ff_joystick },
- { 0x046d, 0xc298, ff_wheel },
- { 0x046d, 0xc299, ff_wheel },
- { 0x046d, 0xca03, ff_wheel },
};
static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
new file mode 100644
index 00000000000..38b12e45780
--- /dev/null
+++ b/drivers/hid/hid-logitech-dj.c
@@ -0,0 +1,922 @@
+/*
+ * HID driver for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+#include "hid-logitech-dj.h"
+
+/* Keyboard descriptor (1) */
+static const char kbd_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (generic Desktop) */
+ 0x09, 0x06, /* USAGE (Keyboard) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x01, /* REPORT_ID (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0xE0, /* USAGE_MINIMUM (Left Control) */
+ 0x29, 0xE7, /* USAGE_MAXIMUM (Right GUI) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
+ 0x95, 0x05, /* REPORT COUNT (5) */
+ 0x05, 0x08, /* USAGE PAGE (LED page) */
+ 0x19, 0x01, /* USAGE MINIMUM (1) */
+ 0x29, 0x05, /* USAGE MAXIMUM (5) */
+ 0x91, 0x02, /* OUTPUT (Data, Variable, Absolute) */
+ 0x95, 0x01, /* REPORT COUNT (1) */
+ 0x75, 0x03, /* REPORT SIZE (3) */
+ 0x91, 0x01, /* OUTPUT (Constant) */
+ 0x95, 0x06, /* REPORT_COUNT (6) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x26, 0xFF, 0x00, /* LOGICAL_MAXIMUM (255) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0x00, /* USAGE_MINIMUM (no event) */
+ 0x2A, 0xFF, 0x00, /* USAGE_MAXIMUM (reserved) */
+ 0x81, 0x00, /* INPUT (Data,Ary,Abs) */
+ 0xC0
+};
+
+/* Mouse descriptor (2) */
+static const char mse_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x10, /* USAGE_MAX (16) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x10, /* REPORT_COUNT (16) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Consumer Control descriptor (3) */
+static const char consumer_descriptor[] = {
+ 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
+ 0x09, 0x01, /* USAGE (Consumer Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x03, /* REPORT_ID = 3 */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x26, 0x8C, 0x02, /* LOGICAL_MAX (652) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x2A, 0x8C, 0x02, /* USAGE_MAX (652) */
+ 0x81, 0x00, /* INPUT (Data Ary Abs) */
+ 0xC0, /* END_COLLECTION */
+}; /* */
+
+/* System control descriptor (4) */
+static const char syscontrol_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x80, /* USAGE (System Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x04, /* REPORT_ID = 4 */
+ 0x75, 0x02, /* REPORT_SIZE (2) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x25, 0x03, /* LOGICAL_MAX (3) */
+ 0x09, 0x82, /* USAGE (System Sleep) */
+ 0x09, 0x81, /* USAGE (System Power Down) */
+ 0x09, 0x83, /* USAGE (System Wake Up) */
+ 0x81, 0x60, /* INPUT (Data Ary Abs NPrf Null) */
+ 0x75, 0x06, /* REPORT_SIZE (6) */
+ 0x81, 0x03, /* INPUT (Cnst Var Abs) */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Media descriptor (8) */
+static const char media_descriptor[] = {
+ 0x06, 0xbc, 0xff, /* Usage Page 0xffbc */
+ 0x09, 0x88, /* Usage 0x0088 */
+ 0xa1, 0x01, /* BeginCollection */
+ 0x85, 0x08, /* Report ID 8 */
+ 0x19, 0x01, /* Usage Min 0x0001 */
+ 0x29, 0xff, /* Usage Max 0x00ff */
+ 0x15, 0x01, /* Logical Min 1 */
+ 0x26, 0xff, 0x00, /* Logical Max 255 */
+ 0x75, 0x08, /* Report Size 8 */
+ 0x95, 0x01, /* Report Count 1 */
+ 0x81, 0x00, /* Input */
+ 0xc0, /* EndCollection */
+}; /* */
+
+/* Maximum size of all defined hid reports in bytes (including report id) */
+#define MAX_REPORT_SIZE 8
+
+/* Number of possible hid report types that can be created by this driver.
+ *
+ * Right now, RF report types have the same report types (or report id's)
+ * than the hid report created from those RF reports. In the future
+ * this doesnt have to be true.
+ *
+ * For instance, RF report type 0x01 which has a size of 8 bytes, corresponds
+ * to hid report id 0x01, this is standard keyboard. Same thing applies to mice
+ * reports and consumer control, etc. If a new RF report is created, it doesn't
+ * has to have the same report id as its corresponding hid report, so an
+ * translation may have to take place for future report types.
+ */
+#define NUMBER_OF_HID_REPORTS 32
+static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
+ [1] = 8, /* Standard keyboard */
+ [2] = 8, /* Standard mouse */
+ [3] = 5, /* Consumer control */
+ [4] = 2, /* System control */
+ [8] = 2, /* Media Center */
+};
+
+
+#define LOGITECH_DJ_INTERFACE_NUMBER 0x02
+
+static struct hid_ll_driver logi_dj_ll_driver;
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type);
+
+static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* Called in delayed work context */
+ struct dj_device *dj_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ if (dj_dev != NULL) {
+ hid_destroy_device(dj_dev->hdev);
+ kfree(dj_dev);
+ } else {
+ dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+ __func__);
+ }
+}
+
+static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* Called in delayed work context */
+ struct hid_device *djrcv_hdev = djrcv_dev->hdev;
+ struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct hid_device *dj_hiddev;
+ struct dj_device *dj_dev;
+
+ /* Device index goes from 1 to 6, we need 3 bytes to store the
+ * semicolon, the index, and a null terminator
+ */
+ unsigned char tmpstr[3];
+
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ dbg_hid("%s: device list is empty\n", __func__);
+ return;
+ }
+
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+ dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return;
+ }
+
+ dj_hiddev = hid_allocate_device();
+ if (IS_ERR(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
+ __func__);
+ return;
+ }
+
+ dj_hiddev->ll_driver = &logi_dj_ll_driver;
+ dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report;
+
+ dj_hiddev->dev.parent = &djrcv_hdev->dev;
+ dj_hiddev->bus = BUS_USB;
+ dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct);
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Unifying Device. Wireless PID:%02x%02x",
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB],
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]);
+
+ usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
+ snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+ strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
+
+ dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
+
+ if (!dj_dev) {
+ dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
+ __func__);
+ goto dj_device_allocate_fail;
+ }
+
+ dj_dev->reports_supported = le32_to_cpu(
+ dj_report->report_params[DEVICE_PAIRED_RF_REPORT_TYPE]);
+ dj_dev->hdev = dj_hiddev;
+ dj_dev->dj_receiver_dev = djrcv_dev;
+ dj_dev->device_index = dj_report->device_index;
+ dj_hiddev->driver_data = dj_dev;
+
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+
+ if (hid_add_device(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
+ __func__);
+ goto hid_add_device_fail;
+ }
+
+ return;
+
+hid_add_device_fail:
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ kfree(dj_dev);
+dj_device_allocate_fail:
+ hid_destroy_device(dj_hiddev);
+}
+
+static void delayedwork_callback(struct work_struct *work)
+{
+ struct dj_receiver_dev *djrcv_dev =
+ container_of(work, struct dj_receiver_dev, work);
+
+ struct dj_report dj_report;
+ unsigned long flags;
+ int count;
+
+ dbg_hid("%s\n", __func__);
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+
+ count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
+ sizeof(struct dj_report));
+
+ if (count != sizeof(struct dj_report)) {
+ dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
+ "notifications available\n", __func__);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
+ }
+
+ if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was "
+ "already queued\n", __func__);
+ }
+ }
+
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ switch (dj_report.report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+ break;
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ break;
+ default:
+ dbg_hid("%s: unexpected report type\n", __func__);
+ }
+}
+
+static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
+}
+
+static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ unsigned int i;
+ u8 reportbuffer[MAX_REPORT_SIZE];
+ struct dj_device *djdev;
+
+ djdev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+ if (!djdev) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
+ return;
+ }
+
+ memset(reportbuffer, 0, sizeof(reportbuffer));
+
+ for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) {
+ if (djdev->reports_supported & (1 << i)) {
+ reportbuffer[0] = i;
+ if (hid_input_report(djdev->hdev,
+ HID_INPUT_REPORT,
+ reportbuffer,
+ hid_reportid_size_map[i], 1)) {
+ dbg_hid("hid_input_report error sending null "
+ "report\n");
+ }
+ }
+ }
+}
+
+static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_device *dj_device;
+
+ dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+ if (dj_device == NULL) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
+ return;
+ }
+
+ if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) ||
+ (hid_reportid_size_map[dj_report->report_type] == 0)) {
+ dbg_hid("invalid report type:%x\n", dj_report->report_type);
+ return;
+ }
+
+ if (hid_input_report(dj_device->hdev,
+ HID_INPUT_REPORT, &dj_report->report_type,
+ hid_reportid_size_map[dj_report->report_type], 1)) {
+ dbg_hid("hid_input_report error\n");
+ }
+}
+
+
+static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ struct hid_device *hdev = djrcv_dev->hdev;
+ int sent_bytes;
+
+ if (!hdev->hid_output_raw_report) {
+ dev_err(&hdev->dev, "%s:"
+ "hid_output_raw_report is null\n", __func__);
+ return -ENODEV;
+ }
+
+ sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report,
+ sizeof(struct dj_report),
+ HID_OUTPUT_REPORT);
+
+ return (sent_bytes < 0) ? sent_bytes : 0;
+}
+
+static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
+{
+ struct dj_report dj_report;
+
+ memset(&dj_report, 0, sizeof(dj_report));
+ dj_report.report_id = REPORT_ID_DJ_SHORT;
+ dj_report.device_index = 0xFF;
+ dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+ return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ unsigned timeout)
+{
+ struct dj_report dj_report;
+
+ memset(&dj_report, 0, sizeof(dj_report));
+ dj_report.report_id = REPORT_ID_DJ_SHORT;
+ dj_report.device_index = 0xFF;
+ dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F;
+ dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+
+static int logi_dj_ll_open(struct hid_device *hid)
+{
+ dbg_hid("%s:%s\n", __func__, hid->phys);
+ return 0;
+
+}
+
+static void logi_dj_ll_close(struct hid_device *hid)
+{
+ dbg_hid("%s:%s\n", __func__, hid->phys);
+}
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type)
+{
+ /* Called by hid raw to send data */
+ dbg_hid("%s\n", __func__);
+
+ return 0;
+}
+
+static int logi_dj_ll_parse(struct hid_device *hid)
+{
+ struct dj_device *djdev = hid->driver_data;
+ int retval;
+
+ dbg_hid("%s\n", __func__);
+
+ djdev->hdev->version = 0x0111;
+ djdev->hdev->country = 0x00;
+
+ if (djdev->reports_supported & STD_KEYBOARD) {
+ dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) kbd_descriptor,
+ sizeof(kbd_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a kbd descriptor, hid_parse failed"
+ " error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & STD_MOUSE) {
+ dbg_hid("%s: sending a mouse descriptor, reports_supported: "
+ "%x\n", __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) mse_descriptor,
+ sizeof(mse_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a mouse descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & MULTIMEDIA) {
+ dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) consumer_descriptor,
+ sizeof(consumer_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a consumer_descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & POWER_KEYS) {
+ dbg_hid("%s: sending a power keys report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) syscontrol_descriptor,
+ sizeof(syscontrol_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a syscontrol_descriptor, "
+ "hid_parse failed error: %d\n",
+ __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & MEDIA_CENTER) {
+ dbg_hid("%s: sending a media center report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) media_descriptor,
+ sizeof(media_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a media_descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & KBD_LEDS) {
+ dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ }
+
+ return 0;
+}
+
+static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ /* Sent by the input layer to handle leds and Force Feedback */
+ struct hid_device *dj_hiddev = input_get_drvdata(dev);
+ struct dj_device *dj_dev = dj_hiddev->driver_data;
+
+ struct dj_receiver_dev *djrcv_dev =
+ dev_get_drvdata(dj_hiddev->dev.parent);
+ struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev;
+ struct hid_report_enum *output_report_enum;
+
+ struct hid_field *field;
+ struct hid_report *report;
+ unsigned char data[8];
+ int offset;
+
+ dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
+ __func__, dev->phys, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(dj_hiddev, type, code, &field);
+
+ if (offset == -1) {
+ dev_warn(&dev->dev, "event field not found\n");
+ return -1;
+ }
+ hid_set_field(field, offset, value);
+ hid_output_report(field->report, &data[0]);
+
+ output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
+ report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
+ hid_set_field(report->field[0], 0, dj_dev->device_index);
+ hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS);
+ hid_set_field(report->field[0], 2, data[1]);
+
+ usbhid_submit_report(dj_rcv_hiddev, report, USB_DIR_OUT);
+
+ return 0;
+
+}
+
+static int logi_dj_ll_start(struct hid_device *hid)
+{
+ dbg_hid("%s\n", __func__);
+ return 0;
+}
+
+static void logi_dj_ll_stop(struct hid_device *hid)
+{
+ dbg_hid("%s\n", __func__);
+}
+
+
+static struct hid_ll_driver logi_dj_ll_driver = {
+ .parse = logi_dj_ll_parse,
+ .start = logi_dj_ll_start,
+ .stop = logi_dj_ll_stop,
+ .open = logi_dj_ll_open,
+ .close = logi_dj_ll_close,
+ .hidinput_input_event = logi_dj_ll_input_event,
+};
+
+
+static int logi_dj_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data,
+ int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_report *dj_report = (struct dj_report *) data;
+ unsigned long flags;
+ bool report_processed = false;
+
+ dbg_hid("%s, size:%d\n", __func__, size);
+
+ /* Here we receive all data coming from iface 2, there are 4 cases:
+ *
+ * 1) Data should continue its normal processing i.e. data does not
+ * come from the DJ collection, in which case we do nothing and
+ * return 0, so hid-core can continue normal processing (will forward
+ * to associated hidraw device)
+ *
+ * 2) Data is from DJ collection, and is intended for this driver i. e.
+ * data contains arrival, departure, etc notifications, in which case
+ * we queue them for delayed processing by the work queue. We return 1
+ * to hid-core as no further processing is required from it.
+ *
+ * 3) Data is from DJ collection, and informs a connection change,
+ * if the change means rf link loss, then we must send a null report
+ * to the upper layer to discard potentially pressed keys that may be
+ * repeated forever by the input layer. Return 1 to hid-core as no
+ * further processing is required.
+ *
+ * 4) Data is from DJ collection and is an actual input event from
+ * a paired DJ device in which case we forward it to the correct hid
+ * device (via hid_input_report() ) and return 1 so hid-core does not do
+ * anything else with it.
+ */
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+ switch (dj_report->report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+ break;
+ case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+ if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+ STATUS_LINKLOSS) {
+ logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
+ }
+ break;
+ default:
+ logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ }
+ report_processed = true;
+ }
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ return report_processed;
+}
+
+static int logi_dj_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct dj_receiver_dev *djrcv_dev;
+ int retval;
+
+ if (is_dj_device((struct dj_device *)hdev->driver_data))
+ return -ENODEV;
+
+ dbg_hid("%s called for ifnum %d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+
+ /* Ignore interfaces 0 and 1, they will not carry any data, dont create
+ * any hid_device for them */
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ LOGITECH_DJ_INTERFACE_NUMBER) {
+ dbg_hid("%s: ignoring ifnum %d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+ return -ENODEV;
+ }
+
+ /* Treat interface 2 */
+
+ djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+ if (!djrcv_dev) {
+ dev_err(&hdev->dev,
+ "%s:failed allocating dj_receiver_dev\n", __func__);
+ return -ENOMEM;
+ }
+ djrcv_dev->hdev = hdev;
+ INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+ spin_lock_init(&djrcv_dev->lock);
+ if (kfifo_alloc(&djrcv_dev->notif_fifo,
+ DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
+ GFP_KERNEL)) {
+ dev_err(&hdev->dev,
+ "%s:failed allocating notif_fifo\n", __func__);
+ kfree(djrcv_dev);
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, djrcv_dev);
+
+ /* Call to usbhid to fetch the HID descriptors of interface 2 and
+ * subsequently call to the hid/hid-core to parse the fetched
+ * descriptors, this will in turn create the hidraw and hiddev nodes
+ * for interface 2 of the receiver */
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "%s:parse of interface 2 failed\n", __func__);
+ goto hid_parse_fail;
+ }
+
+ /* Starts the usb device and connects to upper interfaces hiddev and
+ * hidraw */
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
+
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ dev_err(&hdev->dev,
+ "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ goto switch_to_dj_mode_fail;
+ }
+
+ /* This is enabling the polling urb on the IN endpoint */
+ retval = hdev->ll_driver->open(hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned "
+ "error:%d\n", __func__, retval);
+ goto llopen_failed;
+ }
+
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
+ "error:%d\n", __func__, retval);
+ goto logi_dj_recv_query_paired_devices_failed;
+ }
+
+ return retval;
+
+logi_dj_recv_query_paired_devices_failed:
+ hdev->ll_driver->close(hdev);
+
+llopen_failed:
+switch_to_dj_mode_fail:
+ hid_hw_stop(hdev);
+
+hid_hw_start_fail:
+hid_parse_fail:
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+ hid_set_drvdata(hdev, NULL);
+ return retval;
+
+}
+
+#ifdef CONFIG_PM
+static int logi_dj_reset_resume(struct hid_device *hdev)
+{
+ int retval;
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ dev_err(&hdev->dev,
+ "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ }
+
+ return 0;
+}
+#endif
+
+static void logi_dj_remove(struct hid_device *hdev)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_device *dj_dev;
+ int i;
+
+ dbg_hid("%s\n", __func__);
+
+ cancel_work_sync(&djrcv_dev->work);
+
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+
+ /* I suppose that at this point the only context that can access
+ * the djrecv_data is this thread as the work item is guaranteed to
+ * have finished and no more raw_event callbacks should arrive after
+ * the remove callback was triggered so no locks are put around the
+ * code below */
+ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ dj_dev = djrcv_dev->paired_dj_devices[i];
+ if (dj_dev != NULL) {
+ hid_destroy_device(dj_dev->hdev);
+ kfree(dj_dev);
+ djrcv_dev->paired_dj_devices[i] = NULL;
+ }
+ }
+
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+ hid_set_drvdata(hdev, NULL);
+}
+
+static int logi_djdevice_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct dj_device *dj_dev = hdev->driver_data;
+
+ if (!is_dj_device(dj_dev))
+ return -ENODEV;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ return ret;
+}
+
+static const struct hid_device_id logi_dj_receivers[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, logi_dj_receivers);
+
+static struct hid_driver logi_djreceiver_driver = {
+ .name = "logitech-djreceiver",
+ .id_table = logi_dj_receivers,
+ .probe = logi_dj_probe,
+ .remove = logi_dj_remove,
+ .raw_event = logi_dj_raw_event,
+#ifdef CONFIG_PM
+ .reset_resume = logi_dj_reset_resume,
+#endif
+};
+
+
+static const struct hid_device_id logi_dj_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ {}
+};
+
+static struct hid_driver logi_djdevice_driver = {
+ .name = "logitech-djdevice",
+ .id_table = logi_dj_devices,
+ .probe = logi_djdevice_probe,
+};
+
+
+static int __init logi_dj_init(void)
+{
+ int retval;
+
+ dbg_hid("Logitech-DJ:%s\n", __func__);
+
+ retval = hid_register_driver(&logi_djreceiver_driver);
+ if (retval)
+ return retval;
+
+ retval = hid_register_driver(&logi_djdevice_driver);
+ if (retval)
+ hid_unregister_driver(&logi_djreceiver_driver);
+
+ return retval;
+
+}
+
+static void __exit logi_dj_exit(void)
+{
+ dbg_hid("Logitech-DJ:%s\n", __func__);
+
+ hid_unregister_driver(&logi_djdevice_driver);
+ hid_unregister_driver(&logi_djreceiver_driver);
+
+}
+
+module_init(logi_dj_init);
+module_exit(logi_dj_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logitech");
+MODULE_AUTHOR("Nestor Lopez Casado");
+MODULE_AUTHOR("nlopezcasad@logitech.com");
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
new file mode 100644
index 00000000000..fd28a5e0ca3
--- /dev/null
+++ b/drivers/hid/hid-logitech-dj.h
@@ -0,0 +1,123 @@
+#ifndef __HID_LOGITECH_DJ_H
+#define __HID_LOGITECH_DJ_H
+
+/*
+ * HID driver for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kfifo.h>
+
+#define DJ_MAX_PAIRED_DEVICES 6
+#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_DEVICE_INDEX_MIN 1
+#define DJ_DEVICE_INDEX_MAX 6
+
+#define DJREPORT_SHORT_LENGTH 15
+#define DJREPORT_LONG_LENGTH 32
+
+#define REPORT_ID_DJ_SHORT 0x20
+#define REPORT_ID_DJ_LONG 0x21
+
+#define REPORT_TYPE_RFREPORT_FIRST 0x01
+#define REPORT_TYPE_RFREPORT_LAST 0x1F
+
+/* Command Switch to DJ mode */
+#define REPORT_TYPE_CMD_SWITCH 0x80
+#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
+#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
+#define TIMEOUT_NO_KEEPALIVE 0x00
+
+/* Command to Get the list of Paired devices */
+#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
+
+/* Device Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
+#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
+#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
+#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
+#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
+
+/* Device Un-Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
+
+
+/* Connection Status Notification */
+#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
+#define CONNECTION_STATUS_PARAM_STATUS 0x00
+#define STATUS_LINKLOSS 0x01
+
+/* Error Notification */
+#define REPORT_TYPE_NOTIF_ERROR 0x7F
+#define NOTIF_ERROR_PARAM_ETYPE 0x00
+#define ETYPE_KEEPALIVE_TIMEOUT 0x01
+
+/* supported DJ HID && RF report types */
+#define REPORT_TYPE_KEYBOARD 0x01
+#define REPORT_TYPE_MOUSE 0x02
+#define REPORT_TYPE_CONSUMER_CONTROL 0x03
+#define REPORT_TYPE_SYSTEM_CONTROL 0x04
+#define REPORT_TYPE_MEDIA_CENTER 0x08
+#define REPORT_TYPE_LEDS 0x0E
+
+/* RF Report types bitfield */
+#define STD_KEYBOARD 0x00000002
+#define STD_MOUSE 0x00000004
+#define MULTIMEDIA 0x00000008
+#define POWER_KEYS 0x00000010
+#define MEDIA_CENTER 0x00000100
+#define KBD_LEDS 0x00004000
+
+struct dj_report {
+ u8 report_id;
+ u8 device_index;
+ u8 report_type;
+ u8 report_params[DJREPORT_SHORT_LENGTH - 3];
+};
+
+struct dj_receiver_dev {
+ struct hid_device *hdev;
+ struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
+ DJ_DEVICE_INDEX_MIN];
+ struct work_struct work;
+ struct kfifo notif_fifo;
+ spinlock_t lock;
+};
+
+struct dj_device {
+ struct hid_device *hdev;
+ struct dj_receiver_dev *dj_receiver_dev;
+ u32 reports_supported;
+ u8 device_index;
+};
+
+/**
+ * is_dj_device - know if the given dj_device is not the receiver.
+ * @dj_dev: the dj device to test
+ *
+ * This macro tests if a struct dj_device pointer is a device created
+ * by the bus enumarator.
+ */
+#define is_dj_device(dj_dev) \
+ (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent)
+
+#endif
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index f0fbd7bd239..2ab71758e2e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -405,6 +405,13 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(REL_HWHEEL, input->relbit);
}
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ /* input->keybit is initialized with incorrect button info
+ * for Magic Trackpad. There really is only one physical
+ * button (BTN_LEFT == BTN_MOUSE). Make sure we don't
+ * advertise buttons that don't exist...
+ */
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
__set_bit(BTN_MOUSE, input->keybit);
__set_bit(BTN_TOOL_FINGER, input->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 58d0e7aaf08..fa5d7a1ffa9 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -47,10 +47,11 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_SLOT_IS_CONTACTID (1 << 1)
#define MT_QUIRK_CYPRESS (1 << 2)
#define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3)
-#define MT_QUIRK_VALID_IS_INRANGE (1 << 4)
-#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 6)
-#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 7)
+#define MT_QUIRK_ALWAYS_VALID (1 << 4)
+#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
+#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
+#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 7)
+#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
struct mt_slot {
__s32 x, y, p, w, h;
@@ -86,11 +87,12 @@ struct mt_class {
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
-#define MT_CLS_CONFIDENCE 0x0002
-#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0003
-#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0005
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0006
+#define MT_CLS_SERIAL 0x0002
+#define MT_CLS_CONFIDENCE 0x0003
+#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0004
+#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
+#define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -134,6 +136,8 @@ static int find_slot_from_contactid(struct mt_device *td)
struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
+ { .name = MT_CLS_SERIAL,
+ .quirks = MT_QUIRK_ALWAYS_VALID},
{ .name = MT_CLS_CONFIDENCE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
{ .name = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -213,6 +217,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_class *cls = td->mtclass;
__s32 quirks = cls->quirks;
+ /* Only map fields from TouchScreen or TouchPad collections.
+ * We need to ignore fields that belong to other collections
+ * such as Mouse that might have the same GenericDesktop usages. */
+ if (field->application == HID_DG_TOUCHSCREEN)
+ set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+ else if (field->application == HID_DG_TOUCHPAD)
+ set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ else
+ return 0;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
@@ -277,6 +291,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
td->last_mt_collection = usage->collection_index;
+ hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -435,7 +450,9 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
switch (usage->hid) {
case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ if (quirks & MT_QUIRK_ALWAYS_VALID)
+ td->curvalid = true;
+ else if (quirks & MT_QUIRK_VALID_IS_INRANGE)
td->curvalid = value;
break;
case HID_DG_TIPSWITCH:
@@ -513,12 +530,44 @@ static void mt_set_input_mode(struct hid_device *hdev)
}
}
+/* a list of devices for which there is a specialized multitouch driver */
+static const struct hid_device_id mt_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { }
+};
+
+static bool mt_match_one_id(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ return id->bus == hdev->bus &&
+ (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
+ (id->product == HID_ANY_ID || id->product == hdev->product);
+}
+
+static const struct hid_device_id *mt_match_id(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ for (; id->bus; id++)
+ if (mt_match_one_id(hdev, id))
+ return id;
+
+ return NULL;
+}
+
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
struct mt_device *td;
struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+ if (mt_match_id(hdev, mt_have_special_driver))
+ return -ENODEV;
+
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
mtclass = &(mt_classes[i]);
@@ -526,10 +575,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
@@ -545,10 +590,16 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret != 0)
goto fail;
+ hdev->quirks |= HID_QUIRK_MULTITOUCH;
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
goto fail;
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -662,6 +713,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
+ /* Ideacom panel */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
+ USB_DEVICE_ID_IDEACOM_IDC6650) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@@ -672,6 +728,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
+ /* LG Display panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_LG,
+ USB_DEVICE_ID_LG_MULTITOUCH) },
+
/* Lumio panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
@@ -732,6 +793,10 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
+ /* Rest of the world */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 9d8710f8bc7..1782693819f 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -2409,7 +2409,7 @@ static int picolcd_raw_event(struct hid_device *hdev,
#ifdef CONFIG_PM
static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
{
- if (message.event & PM_EVENT_AUTO)
+ if (PMSG_IS_AUTO(message))
return 0;
picolcd_suspend_backlight(hid_get_drvdata(hdev));
diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c
new file mode 100644
index 00000000000..4d3c60d8831
--- /dev/null
+++ b/drivers/hid/hid-primax.c
@@ -0,0 +1,117 @@
+/*
+ * HID driver for primax and similar keyboards with in-band modifiers
+ *
+ * Copyright 2011 Google Inc. All Rights Reserved
+ *
+ * Author:
+ * Terry Lambert <tlambert@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static int px_raw_event(struct hid_device *hid, struct hid_report *report,
+ u8 *data, int size)
+{
+ int idx = size;
+
+ switch (report->id) {
+ case 0: /* keyboard input */
+ /*
+ * Convert in-band modifier key values into out of band
+ * modifier bits and pull the key strokes from the report.
+ * Thus a report data set which looked like:
+ *
+ * [00][00][E0][30][00][00][00][00]
+ * (no modifier bits + "Left Shift" key + "1" key)
+ *
+ * Would be converted to:
+ *
+ * [01][00][00][30][00][00][00][00]
+ * (Left Shift modifier bit + "1" key)
+ *
+ * As long as it's in the size range, the upper level
+ * drivers don't particularly care if there are in-band
+ * 0-valued keys, so they don't stop parsing.
+ */
+ while (--idx > 1) {
+ if (data[idx] < 0xE0 || data[idx] > 0xE7)
+ continue;
+ data[0] |= (1 << (data[idx] - 0xE0));
+ data[idx] = 0;
+ }
+ hid_report_raw_event(hid, HID_INPUT_REPORT, data, size, 0);
+ return 1;
+
+ default: /* unknown report */
+ /* Unknown report type; pass upstream */
+ hid_info(hid, "unknown report type %d\n", report->id);
+ break;
+ }
+
+ return 0;
+}
+
+static int px_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hid);
+ if (ret) {
+ hid_err(hid, "parse failed\n");
+ goto fail;
+ }
+
+ ret = hid_hw_start(hid, HID_CONNECT_DEFAULT);
+ if (ret)
+ hid_err(hid, "hw start failed\n");
+
+fail:
+ return ret;
+}
+
+static void px_remove(struct hid_device *hid)
+{
+ hid_hw_stop(hid);
+}
+
+static const struct hid_device_id px_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, px_devices);
+
+static struct hid_driver px_driver = {
+ .name = "primax",
+ .id_table = px_devices,
+ .raw_event = px_raw_event,
+ .probe = px_probe,
+ .remove = px_remove,
+};
+
+static int __init px_init(void)
+{
+ return hid_register_driver(&px_driver);
+}
+
+static void __exit px_exit(void)
+{
+ hid_unregister_driver(&px_driver);
+}
+
+module_init(px_init);
+module_exit(px_exit);
+MODULE_AUTHOR("Terry Lambert <tlambert@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 158b389d0fb..f779009104e 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -816,7 +816,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (pm == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
ret = -ENOMEM;
- goto err_free;
+ goto err_free_pk;
}
pm->pk = pk;
@@ -849,10 +849,10 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
err_stop:
hid_hw_stop(hdev);
err_free:
- if (pm != NULL)
- kfree(pm);
-
+ kfree(pm);
+err_free_pk:
kfree(pk);
+
return ret;
}
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 2b8f3a31ffb..e2072afb34b 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,21 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+static void kone_profile_activated(struct kone_device *kone, uint new_profile)
+{
+ kone->actual_profile = new_profile;
+ kone->actual_dpi = kone->profiles[new_profile - 1].startup_dpi;
+}
+
+static void kone_profile_report(struct kone_device *kone, uint new_profile)
+{
+ struct kone_roccat_report roccat_report;
+ roccat_report.event = kone_mouse_event_switch_profile;
+ roccat_report.value = new_profile;
+ roccat_report.key = 0;
+ roccat_report_event(kone->chrdev_minor, (uint8_t *)&roccat_report);
+}
+
static int kone_receive(struct usb_device *usb_dev, uint usb_command,
void *data, uint size)
{
@@ -283,7 +298,7 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0, difference;
+ int retval = 0, difference, old_profile;
/* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_settings))
@@ -294,21 +309,20 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
if (difference) {
retval = kone_set_settings(usb_dev,
(struct kone_settings const *)buf);
- if (!retval)
- memcpy(&kone->settings, buf,
- sizeof(struct kone_settings));
- }
- mutex_unlock(&kone->kone_lock);
+ if (retval) {
+ mutex_unlock(&kone->kone_lock);
+ return retval;
+ }
- if (retval)
- return retval;
+ old_profile = kone->settings.startup_profile;
+ memcpy(&kone->settings, buf, sizeof(struct kone_settings));
- /*
- * If we get here, treat settings as okay and update actual values
- * according to startup_profile
- */
- kone->actual_profile = kone->settings.startup_profile;
- kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+ kone_profile_activated(kone, kone->settings.startup_profile);
+
+ if (kone->settings.startup_profile != old_profile)
+ kone_profile_report(kone, kone->settings.startup_profile);
+ }
+ mutex_unlock(&kone->kone_lock);
return sizeof(struct kone_settings);
}
@@ -501,6 +515,8 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
goto exit_no_settings;
goto exit_unlock;
}
+ /* calibration resets profile */
+ kone_profile_activated(kone, kone->settings.startup_profile);
}
retval = size;
@@ -544,16 +560,16 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
kone_set_settings_checksum(&kone->settings);
retval = kone_set_settings(usb_dev, &kone->settings);
-
- mutex_unlock(&kone->kone_lock);
-
- if (retval)
+ if (retval) {
+ mutex_unlock(&kone->kone_lock);
return retval;
+ }
/* changing the startup profile immediately activates this profile */
- kone->actual_profile = new_startup_profile;
- kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+ kone_profile_activated(kone, new_startup_profile);
+ kone_profile_report(kone, new_startup_profile);
+ mutex_unlock(&kone->kone_lock);
return size;
}
@@ -665,8 +681,7 @@ static int kone_init_kone_device_struct(struct usb_device *usb_dev,
if (retval)
return retval;
- kone->actual_profile = kone->settings.startup_profile;
- kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+ kone_profile_activated(kone, kone->settings.startup_profile);
return 0;
}
@@ -776,10 +791,10 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
{
switch (event->event) {
case kone_mouse_event_switch_profile:
+ kone->actual_dpi = kone->profiles[event->value - 1].
+ startup_dpi;
case kone_mouse_event_osd_profile:
kone->actual_profile = event->value;
- kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
- startup_dpi;
break;
case kone_mouse_event_switch_dpi:
case kone_mouse_event_osd_dpi:
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 1f8336e3f58..112d934132c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -323,6 +323,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
struct usb_device *usb_dev;
unsigned long profile;
int retval;
+ struct kovaplus_roccat_report roccat_report;
dev = dev->parent->parent;
kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
@@ -337,10 +338,22 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
mutex_lock(&kovaplus->kovaplus_lock);
retval = kovaplus_set_actual_profile(usb_dev, profile);
+ if (retval) {
+ mutex_unlock(&kovaplus->kovaplus_lock);
+ return retval;
+ }
+
kovaplus_profile_activated(kovaplus, profile);
+
+ roccat_report.type = KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1;
+ roccat_report.profile = profile + 1;
+ roccat_report.button = 0;
+ roccat_report.data1 = profile + 1;
+ roccat_report.data2 = 0;
+ roccat_report_event(kovaplus->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+
mutex_unlock(&kovaplus->kovaplus_lock);
- if (retval)
- return retval;
return size;
}
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 8140776bd8c..df05c1b1064 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -298,6 +298,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
int difference;
+ struct pyra_roccat_report roccat_report;
if (off != 0 || count != sizeof(struct pyra_settings))
return -EINVAL;
@@ -307,17 +308,23 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
if (difference) {
retval = pyra_set_settings(usb_dev,
(struct pyra_settings const *)buf);
- if (!retval)
- memcpy(&pyra->settings, buf,
- sizeof(struct pyra_settings));
- }
- mutex_unlock(&pyra->pyra_lock);
+ if (retval) {
+ mutex_unlock(&pyra->pyra_lock);
+ return retval;
+ }
- if (retval)
- return retval;
+ memcpy(&pyra->settings, buf,
+ sizeof(struct pyra_settings));
- profile_activated(pyra, pyra->settings.startup_profile);
+ profile_activated(pyra, pyra->settings.startup_profile);
+ roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2;
+ roccat_report.value = pyra->settings.startup_profile + 1;
+ roccat_report.key = 0;
+ roccat_report_event(pyra->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+ }
+ mutex_unlock(&pyra->pyra_lock);
return sizeof(struct pyra_settings);
}
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 16f7cafc969..670da9109f8 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -65,8 +65,7 @@ static int sjoyff_init(struct hid_device *hid)
{
struct sjoyff_device *sjoyff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
@@ -78,43 +77,45 @@ static int sjoyff_init(struct hid_device *hid)
return -ENODEV;
}
- report_ptr = report_ptr->next;
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ report_ptr = report_ptr->next;
- if (report_ptr == report_list) {
- hid_err(hid, "required output report is missing\n");
- return -ENODEV;
- }
+ if (report_ptr == report_list) {
+ hid_err(hid, "required output report is missing\n");
+ return -ENODEV;
+ }
- report = list_entry(report_ptr, struct hid_report, list);
- if (report->maxfield < 1) {
- hid_err(hid, "no fields in the report\n");
- return -ENODEV;
- }
+ report = list_entry(report_ptr, struct hid_report, list);
+ if (report->maxfield < 1) {
+ hid_err(hid, "no fields in the report\n");
+ return -ENODEV;
+ }
- if (report->field[0]->report_count < 3) {
- hid_err(hid, "not enough values in the field\n");
- return -ENODEV;
- }
+ if (report->field[0]->report_count < 3) {
+ hid_err(hid, "not enough values in the field\n");
+ return -ENODEV;
+ }
- sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL);
- if (!sjoyff)
- return -ENOMEM;
+ sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL);
+ if (!sjoyff)
+ return -ENOMEM;
- dev = hidinput->input;
+ dev = hidinput->input;
- set_bit(FF_RUMBLE, dev->ffbit);
+ set_bit(FF_RUMBLE, dev->ffbit);
- error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play);
- if (error) {
- kfree(sjoyff);
- return error;
- }
+ error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play);
+ if (error) {
+ kfree(sjoyff);
+ return error;
+ }
- sjoyff->report = report;
- sjoyff->report->field[0]->value[0] = 0x01;
- sjoyff->report->field[0]->value[1] = 0x00;
- sjoyff->report->field[0]->value[2] = 0x00;
- usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
+ sjoyff->report = report;
+ sjoyff->report->field[0]->value[0] = 0x01;
+ sjoyff->report->field[0]->value[1] = 0x00;
+ sjoyff->report->field[0]->value[2] = 0x00;
+ usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
+ }
hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
@@ -131,6 +132,8 @@ static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
+ hdev->quirks |= id->driver_data;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -151,7 +154,17 @@ err:
}
static const struct hid_device_id sjoy_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO),
+ .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO),
+ .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD),
+ .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ }
};
MODULE_DEVICE_TABLE(hid, sjoy_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 72ca689b647..17bb88f782b 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -304,11 +304,51 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
return 1;
}
+static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct input_dev *input = hi->input;
+
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+
+ /* Basics */
+ input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
+
+ __set_bit(REL_WHEEL, input->relbit);
+
+ __set_bit(BTN_TOOL_PEN, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_STYLUS, input->keybit);
+ __set_bit(BTN_STYLUS2, input->keybit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_RIGHT, input->keybit);
+ __set_bit(BTN_MIDDLE, input->keybit);
+
+ /* Pad */
+ input->evbit[0] |= BIT(EV_MSC);
+
+ __set_bit(MSC_SERIAL, input->mscbit);
+
+ __set_bit(BTN_0, input->keybit);
+ __set_bit(BTN_1, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ /* Distance, rubber and mouse */
+ __set_bit(BTN_TOOL_RUBBER, input->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input->keybit);
+
+ input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+
+ return 0;
+}
+
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct hid_input *hidinput;
- struct input_dev *input;
struct wacom_data *wdata;
int ret;
@@ -370,42 +410,6 @@ static int wacom_probe(struct hid_device *hdev,
goto err_ac;
}
#endif
- hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
- input = hidinput->input;
-
- __set_bit(INPUT_PROP_POINTER, input->propbit);
-
- /* Basics */
- input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
-
- __set_bit(REL_WHEEL, input->relbit);
-
- __set_bit(BTN_TOOL_PEN, input->keybit);
- __set_bit(BTN_TOUCH, input->keybit);
- __set_bit(BTN_STYLUS, input->keybit);
- __set_bit(BTN_STYLUS2, input->keybit);
- __set_bit(BTN_LEFT, input->keybit);
- __set_bit(BTN_RIGHT, input->keybit);
- __set_bit(BTN_MIDDLE, input->keybit);
-
- /* Pad */
- input->evbit[0] |= BIT(EV_MSC);
-
- __set_bit(MSC_SERIAL, input->mscbit);
-
- __set_bit(BTN_0, input->keybit);
- __set_bit(BTN_1, input->keybit);
- __set_bit(BTN_TOOL_FINGER, input->keybit);
-
- /* Distance, rubber and mouse */
- __set_bit(BTN_TOOL_RUBBER, input->keybit);
- __set_bit(BTN_TOOL_MOUSE, input->keybit);
-
- input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
-
return 0;
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
@@ -448,6 +452,7 @@ static struct hid_driver wacom_driver = {
.probe = wacom_probe,
.remove = wacom_remove,
.raw_event = wacom_raw_event,
+ .input_mapped = wacom_input_mapped,
};
static int __init wacom_init(void)
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index 85a02e5f9fe..76739c07fa3 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,15 +10,18 @@
* any later version.
*/
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
-#define WIIMOTE_VERSION "0.1"
+#define WIIMOTE_VERSION "0.2"
#define WIIMOTE_NAME "Nintendo Wii Remote"
#define WIIMOTE_BUFSIZE 32
@@ -30,12 +33,26 @@ struct wiimote_buf {
struct wiimote_state {
spinlock_t lock;
__u8 flags;
+ __u8 accel_split[2];
+
+ /* synchronous cmd requests */
+ struct mutex sync;
+ struct completion ready;
+ int cmd;
+ __u32 opt;
+
+ /* results of synchronous requests */
+ __u8 cmd_battery;
+ __u8 cmd_err;
};
struct wiimote_data {
struct hid_device *hdev;
struct input_dev *input;
struct led_classdev *leds[4];
+ struct input_dev *accel;
+ struct input_dev *ir;
+ struct power_supply battery;
spinlock_t qlock;
__u8 head;
@@ -46,23 +63,47 @@ struct wiimote_data {
struct wiimote_state state;
};
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_RUMBLE 0x10
+#define WIIPROTO_FLAG_ACCEL 0x20
+#define WIIPROTO_FLAG_IR_BASIC 0x40
+#define WIIPROTO_FLAG_IR_EXT 0x80
+#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+ WIIPROTO_FLAG_IR_FULL)
/* return flag for led \num */
#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
enum wiiproto_reqs {
WIIPROTO_REQ_NULL = 0x0,
+ WIIPROTO_REQ_RUMBLE = 0x10,
WIIPROTO_REQ_LED = 0x11,
WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_IR1 = 0x13,
+ WIIPROTO_REQ_SREQ = 0x15,
+ WIIPROTO_REQ_WMEM = 0x16,
+ WIIPROTO_REQ_RMEM = 0x17,
+ WIIPROTO_REQ_IR2 = 0x1a,
WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_DATA = 0x21,
WIIPROTO_REQ_RETURN = 0x22,
WIIPROTO_REQ_DRM_K = 0x30,
+ WIIPROTO_REQ_DRM_KA = 0x31,
+ WIIPROTO_REQ_DRM_KE = 0x32,
+ WIIPROTO_REQ_DRM_KAI = 0x33,
+ WIIPROTO_REQ_DRM_KEE = 0x34,
+ WIIPROTO_REQ_DRM_KAE = 0x35,
+ WIIPROTO_REQ_DRM_KIE = 0x36,
+ WIIPROTO_REQ_DRM_KAIE = 0x37,
+ WIIPROTO_REQ_DRM_E = 0x3d,
+ WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+ WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
};
enum wiiproto_keys {
@@ -94,6 +135,56 @@ static __u16 wiiproto_keymap[] = {
BTN_MODE, /* WIIPROTO_KEY_HOME */
};
+static enum power_supply_property wiimote_battery_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+ wdata->state.cmd = WIIPROTO_REQ_NULL;
+ complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+ return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ INIT_COMPLETION(wdata->state.ready);
+ wdata->state.cmd = cmd;
+ wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+ mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ else if (ret == 0)
+ return -EIO;
+ else
+ return 0;
+}
+
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -172,6 +263,39 @@ static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
spin_unlock_irqrestore(&wdata->qlock, flags);
}
+/*
+ * This sets the rumble bit on the given output report if rumble is
+ * currently enabled.
+ * \cmd1 must point to the second byte in the output report => &cmd[1]
+ * This must be called on nearly every output report before passing it
+ * into the output queue!
+ */
+static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
+{
+ if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
+ *cmd1 |= 0x01;
+}
+
+static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
+{
+ __u8 cmd[2];
+
+ rumble = !!rumble;
+ if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
+ return;
+
+ if (rumble)
+ wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
+ else
+ wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
+
+ cmd[0] = WIIPROTO_REQ_RUMBLE;
+ cmd[1] = 0;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
{
__u8 cmd[2];
@@ -193,6 +317,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
if (leds & WIIPROTO_FLAG_LED4)
cmd[1] |= 0x80;
+ wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
@@ -203,7 +328,23 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
*/
static __u8 select_drm(struct wiimote_data *wdata)
{
- return WIIPROTO_REQ_DRM_K;
+ __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+
+ if (ir == WIIPROTO_FLAG_IR_BASIC) {
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+ return WIIPROTO_REQ_DRM_KAIE;
+ else
+ return WIIPROTO_REQ_DRM_KIE;
+ } else if (ir == WIIPROTO_FLAG_IR_EXT) {
+ return WIIPROTO_REQ_DRM_KAI;
+ } else if (ir == WIIPROTO_FLAG_IR_FULL) {
+ return WIIPROTO_REQ_DRM_SKAI1;
+ } else {
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+ return WIIPROTO_REQ_DRM_KA;
+ else
+ return WIIPROTO_REQ_DRM_K;
+ }
}
static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
@@ -217,9 +358,256 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
cmd[1] = 0;
cmd[2] = drm;
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_status(struct wiimote_data *wdata)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_SREQ;
+ cmd[1] = 0;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
+{
+ accel = !!accel;
+ if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+ return;
+
+ if (accel)
+ wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
+ else
+ wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
+
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+}
+
+static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_IR1;
+ cmd[1] = flags;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_IR2;
+ cmd[1] = flags;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiiproto_req_wreg(wdata, os, buf, sz) \
+ wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
+
+#define wiiproto_req_weeprom(wdata, os, buf, sz) \
+ wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
+
+static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
+ __u32 offset, const __u8 *buf, __u8 size)
+{
+ __u8 cmd[22];
+
+ if (size > 16 || size == 0) {
+ hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
+ return;
+ }
+
+ memset(cmd, 0, sizeof(cmd));
+ cmd[0] = WIIPROTO_REQ_WMEM;
+ cmd[2] = (offset >> 16) & 0xff;
+ cmd[3] = (offset >> 8) & 0xff;
+ cmd[4] = offset & 0xff;
+ cmd[5] = size;
+ memcpy(&cmd[6], buf, size);
+
+ if (!eeprom)
+ cmd[1] |= 0x04;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
+/* requries the cmd-mutex to be held */
+static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+ const __u8 *wmem, __u8 size)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
+ wiiproto_req_wreg(wdata, offset, wmem, size);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (!ret && wdata->state.cmd_err)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int wiimote_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wiimote_data *wdata = container_of(psy,
+ struct wiimote_data, battery);
+ int ret = 0, state;
+ unsigned long flags;
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
+ wiiproto_req_status(wdata);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ state = wdata->state.cmd_battery;
+ wiimote_cmd_release(wdata);
+
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = state * 100 / 255;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
+{
+ int ret;
+ unsigned long flags;
+ __u8 format = 0;
+ static const __u8 data_enable[] = { 0x01 };
+ static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
+ 0x00, 0xaa, 0x00, 0x64 };
+ static const __u8 data_sens2[] = { 0x63, 0x03 };
+ static const __u8 data_fin[] = { 0x08 };
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+
+ if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ return 0;
+ }
+
+ if (mode == 0) {
+ wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+ wiiproto_req_ir1(wdata, 0);
+ wiiproto_req_ir2(wdata, 0);
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ /* send PIXEL CLOCK ENABLE cmd first */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
+ wiiproto_req_ir1(wdata, 0x06);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (ret)
+ goto unlock;
+ if (wdata->state.cmd_err) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* enable IR LOGIC */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
+ wiiproto_req_ir2(wdata, 0x06);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (ret)
+ goto unlock;
+ if (wdata->state.cmd_err) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* enable IR cam but do not make it send data, yet */
+ ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
+ sizeof(data_enable));
+ if (ret)
+ goto unlock;
+
+ /* write first sensitivity block */
+ ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
+ sizeof(data_sens1));
+ if (ret)
+ goto unlock;
+
+ /* write second sensitivity block */
+ ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
+ sizeof(data_sens2));
+ if (ret)
+ goto unlock;
+
+ /* put IR cam into desired state */
+ switch (mode) {
+ case WIIPROTO_FLAG_IR_FULL:
+ format = 5;
+ break;
+ case WIIPROTO_FLAG_IR_EXT:
+ format = 3;
+ break;
+ case WIIPROTO_FLAG_IR_BASIC:
+ format = 1;
+ break;
+ }
+ ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
+ if (ret)
+ goto unlock;
+
+ /* make IR cam send data */
+ ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
+ if (ret)
+ goto unlock;
+
+ /* request new DRM mode compatible to IR mode */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+ wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+unlock:
+ wiimote_cmd_release(wdata);
+ return ret;
+}
+
static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
{
struct wiimote_data *wdata;
@@ -268,9 +656,28 @@ static void wiimote_leds_set(struct led_classdev *led_dev,
}
}
-static int wiimote_input_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
+static int wiimote_ff_play(struct input_dev *dev, void *data,
+ struct ff_effect *eff)
{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ __u8 value;
+ unsigned long flags;
+
+ /*
+ * The wiimote supports only a single rumble motor so if any magnitude
+ * is set to non-zero then we start the rumble motor. If both are set to
+ * zero, we stop the rumble motor.
+ */
+
+ if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
+ value = 1;
+ else
+ value = 0;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_rumble(wdata, value);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
return 0;
}
@@ -288,6 +695,61 @@ static void wiimote_input_close(struct input_dev *dev)
hid_hw_close(wdata->hdev);
}
+static int wiimote_accel_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ int ret;
+ unsigned long flags;
+
+ ret = hid_hw_open(wdata->hdev);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_accel(wdata, true);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimote_accel_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_accel(wdata, false);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ hid_hw_close(wdata->hdev);
+}
+
+static int wiimote_ir_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(wdata->hdev);
+ if (ret)
+ return ret;
+
+ ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
+ if (ret) {
+ hid_hw_close(wdata->hdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void wiimote_ir_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+
+ wiimote_init_ir(wdata, 0);
+ hid_hw_close(wdata->hdev);
+}
+
static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
{
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
@@ -315,12 +777,100 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
input_sync(wdata->input);
}
+static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u16 x, y, z;
+
+ if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+ return;
+
+ /*
+ * payload is: BB BB XX YY ZZ
+ * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
+ * contain the upper 8 bits of each value. The lower 2 bits are
+ * contained in the buttons data BB BB.
+ * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
+ * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
+ * accel value and bit 6 is the second bit of the Z value.
+ * The first bit of Y and Z values is not available and always set to 0.
+ * 0x200 is returned on no movement.
+ */
+
+ x = payload[2] << 2;
+ y = payload[3] << 2;
+ z = payload[4] << 2;
+
+ x |= (payload[0] >> 5) & 0x3;
+ y |= (payload[1] >> 4) & 0x2;
+ z |= (payload[1] >> 5) & 0x2;
+
+ input_report_abs(wdata->accel, ABS_RX, x - 0x200);
+ input_report_abs(wdata->accel, ABS_RY, y - 0x200);
+ input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
+ input_sync(wdata->accel);
+}
+
+#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT0X, ABS_HAT0Y)
+#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT1X, ABS_HAT1Y)
+#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT2X, ABS_HAT2Y)
+#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT3X, ABS_HAT3Y)
+
+static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+ bool packed, __u8 xid, __u8 yid)
+{
+ __u16 x, y;
+
+ if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
+ return;
+
+ /*
+ * Basic IR data is encoded into 3 bytes. The first two bytes are the
+ * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
+ * of both.
+ * If data is packed, then the 3rd byte is put first and slightly
+ * reordered. This allows to interleave packed and non-packed data to
+ * have two IR sets in 5 bytes instead of 6.
+ * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
+ */
+
+ if (packed) {
+ x = ir[1] << 2;
+ y = ir[2] << 2;
+
+ x |= ir[0] & 0x3;
+ y |= (ir[0] >> 2) & 0x3;
+ } else {
+ x = ir[0] << 2;
+ y = ir[1] << 2;
+
+ x |= (ir[2] >> 4) & 0x3;
+ y |= (ir[2] >> 6) & 0x3;
+ }
+
+ input_report_abs(wdata->ir, xid, x);
+ input_report_abs(wdata->ir, yid, y);
+}
+
static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+
+ if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
+ wdata->state.cmd_battery = payload[5];
+ wiimote_cmd_complete(wdata);
+ }
+}
+
+static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -330,9 +880,105 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
handler_keys(wdata, payload);
- if (err)
+ if (wiimote_cmd_pending(wdata, cmd, 0)) {
+ wdata->state.cmd_err = err;
+ wiimote_cmd_complete(wdata);
+ } else if (err) {
hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
cmd);
+ }
+}
+
+static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+}
+
+static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+}
+
+static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+ ir_to_input0(wdata, &payload[5], false);
+ ir_to_input1(wdata, &payload[8], false);
+ ir_to_input2(wdata, &payload[11], false);
+ ir_to_input3(wdata, &payload[14], false);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+}
+
+static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ ir_to_input0(wdata, &payload[2], false);
+ ir_to_input1(wdata, &payload[4], true);
+ ir_to_input2(wdata, &payload[7], false);
+ ir_to_input3(wdata, &payload[9], true);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+}
+
+static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+ ir_to_input0(wdata, &payload[5], false);
+ ir_to_input1(wdata, &payload[7], true);
+ ir_to_input2(wdata, &payload[10], false);
+ ir_to_input3(wdata, &payload[12], true);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
+{
+}
+
+static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+
+ wdata->state.accel_split[0] = payload[2];
+ wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
+ wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
+
+ ir_to_input0(wdata, &payload[3], false);
+ ir_to_input1(wdata, &payload[12], false);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u8 buf[5];
+
+ handler_keys(wdata, payload);
+
+ wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
+ wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
+
+ buf[0] = 0;
+ buf[1] = 0;
+ buf[2] = wdata->state.accel_split[0];
+ buf[3] = payload[2];
+ buf[4] = wdata->state.accel_split[1];
+ handler_accel(wdata, buf);
+
+ ir_to_input2(wdata, &payload[3], false);
+ ir_to_input3(wdata, &payload[12], false);
+ input_sync(wdata->ir);
}
struct wiiproto_handler {
@@ -343,8 +989,19 @@ struct wiiproto_handler {
static struct wiiproto_handler handlers[] = {
{ .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
+ { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
{ .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
{ .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+ { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
+ { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
+ { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
+ { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
+ { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
+ { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
+ { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
+ { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
+ { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
+ { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
{ .id = 0 }
};
@@ -355,6 +1012,7 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
struct wiiproto_handler *h;
int i;
unsigned long flags;
+ bool handled = false;
if (size < 1)
return -EINVAL;
@@ -363,10 +1021,16 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
for (i = 0; handlers[i].id; ++i) {
h = &handlers[i];
- if (h->id == raw_data[0] && h->size < size)
+ if (h->id == raw_data[0] && h->size < size) {
h->func(wdata, &raw_data[1]);
+ handled = true;
+ }
}
+ if (!handled)
+ hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
+ size);
+
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
@@ -434,16 +1098,13 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
return NULL;
wdata->input = input_allocate_device();
- if (!wdata->input) {
- kfree(wdata);
- return NULL;
- }
+ if (!wdata->input)
+ goto err;
wdata->hdev = hdev;
hid_set_drvdata(hdev, wdata);
input_set_drvdata(wdata->input, wdata);
- wdata->input->event = wiimote_input_event;
wdata->input->open = wiimote_input_open;
wdata->input->close = wiimote_input_close;
wdata->input->dev.parent = &wdata->hdev->dev;
@@ -457,18 +1118,89 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
set_bit(wiiproto_keymap[i], wdata->input->keybit);
+ set_bit(FF_RUMBLE, wdata->input->ffbit);
+ if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
+ goto err_input;
+
+ wdata->accel = input_allocate_device();
+ if (!wdata->accel)
+ goto err_input;
+
+ input_set_drvdata(wdata->accel, wdata);
+ wdata->accel->open = wiimote_accel_open;
+ wdata->accel->close = wiimote_accel_close;
+ wdata->accel->dev.parent = &wdata->hdev->dev;
+ wdata->accel->id.bustype = wdata->hdev->bus;
+ wdata->accel->id.vendor = wdata->hdev->vendor;
+ wdata->accel->id.product = wdata->hdev->product;
+ wdata->accel->id.version = wdata->hdev->version;
+ wdata->accel->name = WIIMOTE_NAME " Accelerometer";
+
+ set_bit(EV_ABS, wdata->accel->evbit);
+ set_bit(ABS_RX, wdata->accel->absbit);
+ set_bit(ABS_RY, wdata->accel->absbit);
+ set_bit(ABS_RZ, wdata->accel->absbit);
+ input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
+ input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
+ input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
+
+ wdata->ir = input_allocate_device();
+ if (!wdata->ir)
+ goto err_ir;
+
+ input_set_drvdata(wdata->ir, wdata);
+ wdata->ir->open = wiimote_ir_open;
+ wdata->ir->close = wiimote_ir_close;
+ wdata->ir->dev.parent = &wdata->hdev->dev;
+ wdata->ir->id.bustype = wdata->hdev->bus;
+ wdata->ir->id.vendor = wdata->hdev->vendor;
+ wdata->ir->id.product = wdata->hdev->product;
+ wdata->ir->id.version = wdata->hdev->version;
+ wdata->ir->name = WIIMOTE_NAME " IR";
+
+ set_bit(EV_ABS, wdata->ir->evbit);
+ set_bit(ABS_HAT0X, wdata->ir->absbit);
+ set_bit(ABS_HAT0Y, wdata->ir->absbit);
+ set_bit(ABS_HAT1X, wdata->ir->absbit);
+ set_bit(ABS_HAT1Y, wdata->ir->absbit);
+ set_bit(ABS_HAT2X, wdata->ir->absbit);
+ set_bit(ABS_HAT2Y, wdata->ir->absbit);
+ set_bit(ABS_HAT3X, wdata->ir->absbit);
+ set_bit(ABS_HAT3Y, wdata->ir->absbit);
+ input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
+
spin_lock_init(&wdata->qlock);
INIT_WORK(&wdata->worker, wiimote_worker);
spin_lock_init(&wdata->state.lock);
+ init_completion(&wdata->state.ready);
+ mutex_init(&wdata->state.sync);
return wdata;
+
+err_ir:
+ input_free_device(wdata->accel);
+err_input:
+ input_free_device(wdata->input);
+err:
+ kfree(wdata);
+ return NULL;
}
static void wiimote_destroy(struct wiimote_data *wdata)
{
wiimote_leds_destroy(wdata);
+ power_supply_unregister(&wdata->battery);
+ input_unregister_device(wdata->accel);
+ input_unregister_device(wdata->ir);
input_unregister_device(wdata->input);
cancel_work_sync(&wdata->worker);
hid_hw_stop(wdata->hdev);
@@ -500,12 +1232,37 @@ static int wiimote_hid_probe(struct hid_device *hdev,
goto err;
}
- ret = input_register_device(wdata->input);
+ ret = input_register_device(wdata->accel);
if (ret) {
hid_err(hdev, "Cannot register input device\n");
goto err_stop;
}
+ ret = input_register_device(wdata->ir);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_ir;
+ }
+
+ ret = input_register_device(wdata->input);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_input;
+ }
+
+ wdata->battery.properties = wiimote_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
+ wdata->battery.get_property = wiimote_battery_get_property;
+ wdata->battery.name = "wiimote_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+
+ ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
+ if (ret) {
+ hid_err(hdev, "Cannot register battery device\n");
+ goto err_battery;
+ }
+
ret = wiimote_leds_create(wdata);
if (ret)
goto err_free;
@@ -523,9 +1280,20 @@ err_free:
wiimote_destroy(wdata);
return ret;
+err_battery:
+ input_unregister_device(wdata->input);
+ wdata->input = NULL;
+err_input:
+ input_unregister_device(wdata->ir);
+ wdata->ir = NULL;
+err_ir:
+ input_unregister_device(wdata->accel);
+ wdata->accel = NULL;
err_stop:
hid_hw_stop(hdev);
err:
+ input_free_device(wdata->ir);
+ input_free_device(wdata->accel);
input_free_device(wdata->input);
kfree(wdata);
return ret;
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index e90371508fd..1ad85f2257b 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -201,9 +201,7 @@ static void zc_remove(struct hid_device *hdev)
struct zc_device *zc = hid_get_drvdata(hdev);
hid_hw_stop(hdev);
-
- if (NULL != zc)
- kfree(zc);
+ kfree(zc);
}
static const struct hid_device_id zc_devices[] = {
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c79578b5a78..cf7d6d58e79 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -259,7 +259,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
- kfree(list);
err = -ENODEV;
goto out_unlock;
}
@@ -272,8 +271,10 @@ static int hidraw_open(struct inode *inode, struct file *file)
dev = hidraw_table[minor];
if (!dev->open++) {
err = hid_hw_power(dev->hid, PM_HINT_FULLON);
- if (err < 0)
+ if (err < 0) {
+ dev->open--;
goto out_unlock;
+ }
err = hid_hw_open(dev->hid);
if (err < 0) {
@@ -285,6 +286,8 @@ static int hidraw_open(struct inode *inode, struct file *file)
out_unlock:
mutex_unlock(&minors_lock);
out:
+ if (err < 0)
+ kfree(list);
return err;
}
@@ -510,13 +513,12 @@ void hidraw_disconnect(struct hid_device *hid)
{
struct hidraw *hidraw = hid->hidraw;
+ mutex_lock(&minors_lock);
hidraw->exist = 0;
device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
- mutex_lock(&minors_lock);
hidraw_table[hidraw->minor] = NULL;
- mutex_unlock(&minors_lock);
if (hidraw->open) {
hid_hw_close(hid);
@@ -524,6 +526,7 @@ void hidraw_disconnect(struct hid_device *hid)
} else {
kfree(hidraw);
}
+ mutex_unlock(&minors_lock);
}
EXPORT_SYMBOL_GPL(hidraw_disconnect);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad978f5748d..b403fcef0b8 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1270,7 +1270,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
static void hid_cease_io(struct usbhid_device *usbhid)
{
- del_timer(&usbhid->io_retry);
+ del_timer_sync(&usbhid->io_retry);
usb_kill_urb(usbhid->urbin);
usb_kill_urb(usbhid->urbctrl);
usb_kill_urb(usbhid->urbout);
@@ -1332,7 +1332,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (message.event & PM_EVENT_AUTO) {
+ if (PMSG_IS_AUTO(message)) {
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
&& !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
@@ -1367,7 +1367,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
return -EIO;
}
- if (!ignoreled && (message.event & PM_EVENT_AUTO)) {
+ if (!ignoreled && PMSG_IS_AUTO(message)) {
spin_lock_irq(&usbhid->lock);
if (test_bit(HID_LED_ON, &usbhid->iofl)) {
spin_unlock_irq(&usbhid->lock);
@@ -1380,8 +1380,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
hid_cancel_delayed_stuff(usbhid);
hid_cease_io(usbhid);
- if ((message.event & PM_EVENT_AUTO) &&
- test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
+ if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
/* lost race against keypresses */
status = hid_start_in(hid);
if (status < 0)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 3146fdcda27..4ea464151c3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,10 +80,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7c1188b53c3..4ef02b269a7 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -641,6 +641,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct usb_device *dev = hid_to_usb_dev(hid);
struct usbhid_device *usbhid = hid->driver_data;
+ memset(&dinfo, 0, sizeof(dinfo));
+
dinfo.bustype = BUS_USB;
dinfo.busnum = dev->bus->busnum;
dinfo.devnum = dev->devnum;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
new file mode 100644
index 00000000000..9fa09ac000a
--- /dev/null
+++ b/drivers/hv/Kconfig
@@ -0,0 +1,14 @@
+config HYPERV
+ tristate "Microsoft Hyper-V client drivers"
+ depends on X86 && ACPI && PCI
+ help
+ Select this option to run Linux as a Hyper-V client operating
+ system.
+
+config HYPERV_UTILS
+ tristate "Microsoft Hyper-V Utilities driver"
+ depends on HYPERV && CONNECTOR && NLS
+ help
+ Select this option to enable the Hyper-V Utilities.
+
+
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
new file mode 100644
index 00000000000..a23938b991c
--- /dev/null
+++ b/drivers/hv/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_HYPERV) += hv_vmbus.o
+obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
+
+hv_vmbus-y := vmbus_drv.o \
+ hv.o connection.o channel.o \
+ channel_mgmt.o ring_buffer.o
+hv_utils-y := hv_util.o hv_kvp.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
new file mode 100644
index 00000000000..406537420ff
--- /dev/null
+++ b/drivers/hv/channel.c
@@ -0,0 +1,815 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/hyperv.h>
+
+#include "hyperv_vmbus.h"
+
+#define NUM_PAGES_SPANNED(addr, len) \
+((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
+
+/* Internal routines */
+static int create_gpadl_header(
+ void *kbuffer, /* must be phys and virt contiguous */
+ u32 size, /* page-size multiple */
+ struct vmbus_channel_msginfo **msginfo,
+ u32 *messagecount);
+static void vmbus_setevent(struct vmbus_channel *channel);
+
+/*
+ * vmbus_setevent- Trigger an event notification on the specified
+ * channel.
+ */
+static void vmbus_setevent(struct vmbus_channel *channel)
+{
+ struct hv_monitor_page *monitorpage;
+
+ if (channel->offermsg.monitor_allocated) {
+ /* Each u32 represents 32 channels */
+ sync_set_bit(channel->offermsg.child_relid & 31,
+ (unsigned long *) vmbus_connection.send_int_page +
+ (channel->offermsg.child_relid >> 5));
+
+ monitorpage = vmbus_connection.monitor_pages;
+ monitorpage++; /* Get the child to parent monitor page */
+
+ sync_set_bit(channel->monitor_bit,
+ (unsigned long *)&monitorpage->trigger_group
+ [channel->monitor_grp].pending);
+
+ } else {
+ vmbus_set_event(channel->offermsg.child_relid);
+ }
+}
+
+/*
+ * vmbus_get_debug_info -Retrieve various channel debug info
+ */
+void vmbus_get_debug_info(struct vmbus_channel *channel,
+ struct vmbus_channel_debug_info *debuginfo)
+{
+ struct hv_monitor_page *monitorpage;
+ u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
+ u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
+
+ debuginfo->relid = channel->offermsg.child_relid;
+ debuginfo->state = channel->state;
+ memcpy(&debuginfo->interfacetype,
+ &channel->offermsg.offer.if_type, sizeof(uuid_le));
+ memcpy(&debuginfo->interface_instance,
+ &channel->offermsg.offer.if_instance,
+ sizeof(uuid_le));
+
+ monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
+
+ debuginfo->monitorid = channel->offermsg.monitorid;
+
+ debuginfo->servermonitor_pending =
+ monitorpage->trigger_group[monitor_group].pending;
+ debuginfo->servermonitor_latency =
+ monitorpage->latency[monitor_group][monitor_offset];
+ debuginfo->servermonitor_connectionid =
+ monitorpage->parameter[monitor_group]
+ [monitor_offset].connectionid.u.id;
+
+ monitorpage++;
+
+ debuginfo->clientmonitor_pending =
+ monitorpage->trigger_group[monitor_group].pending;
+ debuginfo->clientmonitor_latency =
+ monitorpage->latency[monitor_group][monitor_offset];
+ debuginfo->clientmonitor_connectionid =
+ monitorpage->parameter[monitor_group]
+ [monitor_offset].connectionid.u.id;
+
+ hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
+ hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
+}
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ struct vmbus_channel_open_channel *open_msg;
+ struct vmbus_channel_msginfo *open_info = NULL;
+ void *in, *out;
+ unsigned long flags;
+ int ret, t, err = 0;
+
+ newchannel->onchannel_callback = onchannelcallback;
+ newchannel->channel_callback_context = context;
+
+ /* Allocate the ring buffer */
+ out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+
+ if (!out)
+ return -ENOMEM;
+
+
+ in = (void *)((unsigned long)out + send_ringbuffer_size);
+
+ newchannel->ringbuffer_pages = out;
+ newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
+ recv_ringbuffer_size) >> PAGE_SHIFT;
+
+ ret = hv_ringbuffer_init(
+ &newchannel->outbound, out, send_ringbuffer_size);
+
+ if (ret != 0) {
+ err = ret;
+ goto errorout;
+ }
+
+ ret = hv_ringbuffer_init(
+ &newchannel->inbound, in, recv_ringbuffer_size);
+ if (ret != 0) {
+ err = ret;
+ goto errorout;
+ }
+
+
+ /* Establish the gpadl for the ring buffer */
+ newchannel->ringbuffer_gpadlhandle = 0;
+
+ ret = vmbus_establish_gpadl(newchannel,
+ newchannel->outbound.ring_buffer,
+ send_ringbuffer_size +
+ recv_ringbuffer_size,
+ &newchannel->ringbuffer_gpadlhandle);
+
+ if (ret != 0) {
+ err = ret;
+ goto errorout;
+ }
+
+ /* Create and init the channel open message */
+ open_info = kmalloc(sizeof(*open_info) +
+ sizeof(struct vmbus_channel_open_channel),
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+ goto errorout;
+ }
+
+ init_completion(&open_info->waitevent);
+
+ open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
+ open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
+ open_msg->openid = newchannel->offermsg.child_relid;
+ open_msg->child_relid = newchannel->offermsg.child_relid;
+ open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+ open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
+ PAGE_SHIFT;
+ open_msg->server_contextarea_gpadlhandle = 0;
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+ goto errorout;
+ }
+
+ if (userdatalen)
+ memcpy(open_msg->userdata, userdata, userdatalen);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&open_info->msglistentry,
+ &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel));
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ if (t == 0) {
+ err = -ETIMEDOUT;
+ goto errorout;
+ }
+
+
+ if (open_info->response.open_result.status)
+ err = open_info->response.open_result.status;
+
+cleanup:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ kfree(open_info);
+ return err;
+
+errorout:
+ hv_ringbuffer_cleanup(&newchannel->outbound);
+ hv_ringbuffer_cleanup(&newchannel->inbound);
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+ kfree(open_info);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmbus_open);
+
+/*
+ * create_gpadl_header - Creates a gpadl for the specified buffer
+ */
+static int create_gpadl_header(void *kbuffer, u32 size,
+ struct vmbus_channel_msginfo **msginfo,
+ u32 *messagecount)
+{
+ int i;
+ int pagecount;
+ unsigned long long pfn;
+ struct vmbus_channel_gpadl_header *gpadl_header;
+ struct vmbus_channel_gpadl_body *gpadl_body;
+ struct vmbus_channel_msginfo *msgheader;
+ struct vmbus_channel_msginfo *msgbody = NULL;
+ u32 msgsize;
+
+ int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
+
+ pagecount = size >> PAGE_SHIFT;
+ pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
+
+ /* do we need a gpadl body msg */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_header) -
+ sizeof(struct gpa_range);
+ pfncount = pfnsize / sizeof(u64);
+
+ if (pagecount > pfncount) {
+ /* we need a gpadl body */
+ /* fill in the header */
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ goto nomem;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = size;
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = pfn+i;
+ *msginfo = msgheader;
+ *messagecount = 1;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
+ pfncount = pfnsize / sizeof(u64);
+
+ /* fill in the body */
+ while (pfnleft) {
+ if (pfnleft > pfncount)
+ pfncurr = pfncount;
+ else
+ pfncurr = pfnleft;
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
+ /*
+ * Free up all the allocated messages.
+ */
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+
+ goto nomem;
+ }
+
+ msgbody->msgsize = msgsize;
+ (*messagecount)++;
+ gpadl_body =
+ (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor gurantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = pfn + pfnsum + i;
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry,
+ &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
+ }
+ } else {
+ /* everything fits in a header */
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pagecount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (msgheader == NULL)
+ goto nomem;
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = size;
+ for (i = 0; i < pagecount; i++)
+ gpadl_header->range[0].pfn_array[i] = pfn+i;
+
+ *msginfo = msgheader;
+ *messagecount = 1;
+ }
+
+ return 0;
+nomem:
+ kfree(msgheader);
+ kfree(msgbody);
+ return -ENOMEM;
+}
+
+/*
+ * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
+ *
+ * @channel: a channel
+ * @kbuffer: from kmalloc
+ * @size: page-size multiple
+ * @gpadl_handle: some funky thing
+ */
+int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 size, u32 *gpadl_handle)
+{
+ struct vmbus_channel_gpadl_header *gpadlmsg;
+ struct vmbus_channel_gpadl_body *gpadl_body;
+ struct vmbus_channel_msginfo *msginfo = NULL;
+ struct vmbus_channel_msginfo *submsginfo;
+ u32 msgcount;
+ struct list_head *curr;
+ u32 next_gpadl_handle;
+ unsigned long flags;
+ int ret = 0;
+ int t;
+
+ next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+ atomic_inc(&vmbus_connection.next_gpadl_handle);
+
+ ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+ if (ret)
+ return ret;
+
+ init_completion(&msginfo->waitevent);
+
+ gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
+ gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
+ gpadlmsg->child_relid = channel->offermsg.child_relid;
+ gpadlmsg->gpadl = next_gpadl_handle;
+
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
+ sizeof(*msginfo));
+ if (ret != 0)
+ goto cleanup;
+
+ if (msgcount > 1) {
+ list_for_each(curr, &msginfo->submsglist) {
+
+ submsginfo = (struct vmbus_channel_msginfo *)curr;
+ gpadl_body =
+ (struct vmbus_channel_gpadl_body *)submsginfo->msg;
+
+ gpadl_body->header.msgtype =
+ CHANNELMSG_GPADL_BODY;
+ gpadl_body->gpadl = next_gpadl_handle;
+
+ ret = vmbus_post_msg(gpadl_body,
+ submsginfo->msgsize -
+ sizeof(*submsginfo));
+ if (ret != 0)
+ goto cleanup;
+
+ }
+ }
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+ BUG_ON(t == 0);
+
+
+ /* At this point, we received the gpadl created msg */
+ *gpadl_handle = gpadlmsg->gpadl;
+
+cleanup:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ kfree(msginfo);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
+
+/*
+ * vmbus_teardown_gpadl -Teardown the specified GPADL handle
+ */
+int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+{
+ struct vmbus_channel_gpadl_teardown *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+ int ret, t;
+
+ info = kmalloc(sizeof(*info) +
+ sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ init_completion(&info->waitevent);
+
+ msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
+
+ msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
+ msg->child_relid = channel->offermsg.child_relid;
+ msg->gpadl = gpadl_handle;
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&info->msglistentry,
+ &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_gpadl_teardown));
+
+ BUG_ON(ret != 0);
+ t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
+ BUG_ON(t == 0);
+
+ /* Received a torndown response */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ kfree(info);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+
+/*
+ * vmbus_close - Close the specified channel
+ */
+void vmbus_close(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+ unsigned long flags;
+
+ /* Stop callback and cancel the timer asap */
+ spin_lock_irqsave(&channel->inbound_lock, flags);
+ channel->onchannel_callback = NULL;
+ spin_unlock_irqrestore(&channel->inbound_lock, flags);
+
+ /* Send a closing message */
+
+ msg = &channel->close_msg.msg;
+
+ msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
+ msg->child_relid = channel->offermsg.child_relid;
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+
+ BUG_ON(ret != 0);
+ /* Tear down the gpadl for the channel's ring buffer */
+ if (channel->ringbuffer_gpadlhandle)
+ vmbus_teardown_gpadl(channel,
+ channel->ringbuffer_gpadlhandle);
+
+ /* Cleanup the ring buffers for this channel */
+ hv_ringbuffer_cleanup(&channel->outbound);
+ hv_ringbuffer_cleanup(&channel->inbound);
+
+ free_pages((unsigned long)channel->ringbuffer_pages,
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+
+
+}
+EXPORT_SYMBOL_GPL(vmbus_close);
+
+/**
+ * vmbus_sendpacket() - Send the specified buffer on the given channel
+ * @channel: Pointer to vmbus_channel structure.
+ * @buffer: Pointer to the buffer you want to receive the data into.
+ * @bufferlen: Maximum size of what the the buffer will hold
+ * @requestid: Identifier of the request
+ * @type: Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ *
+ * Sends data in @buffer directly to hyper-v via the vmbus
+ * This will send the data unparsed to hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
+ u32 bufferlen, u64 requestid,
+ enum vmbus_packet_type type, u32 flags)
+{
+ struct vmpacket_descriptor desc;
+ u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
+ u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+ struct scatterlist bufferlist[3];
+ u64 aligned_data = 0;
+ int ret;
+
+
+ /* Setup the descriptor */
+ desc.type = type; /* VmbusPacketTypeDataInBand; */
+ desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
+ /* in 8-bytes granularity */
+ desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
+ desc.len8 = (u16)(packetlen_aligned >> 3);
+ desc.trans_id = requestid;
+
+ sg_init_table(bufferlist, 3);
+ sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
+ sg_set_buf(&bufferlist[1], buffer, bufferlen);
+ sg_set_buf(&bufferlist[2], &aligned_data,
+ packetlen_aligned - packetlen);
+
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+
+ if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ vmbus_setevent(channel);
+
+ return ret;
+}
+EXPORT_SYMBOL(vmbus_sendpacket);
+
+/*
+ * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
+ * packets using a GPADL Direct packet type.
+ */
+int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount, void *buffer, u32 bufferlen,
+ u64 requestid)
+{
+ int ret;
+ int i;
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 descsize;
+ u32 packetlen;
+ u32 packetlen_aligned;
+ struct scatterlist bufferlist[3];
+ u64 aligned_data = 0;
+
+ if (pagecount > MAX_PAGE_BUFFER_COUNT)
+ return -EINVAL;
+
+
+ /*
+ * Adjust the size down since vmbus_channel_packet_page_buffer is the
+ * largest size we support
+ */
+ descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
+ ((MAX_PAGE_BUFFER_COUNT - pagecount) *
+ sizeof(struct hv_page_buffer));
+ packetlen = descsize + bufferlen;
+ packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+
+ /* Setup the descriptor */
+ desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
+ desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
+ desc.length8 = (u16)(packetlen_aligned >> 3);
+ desc.transactionid = requestid;
+ desc.rangecount = pagecount;
+
+ for (i = 0; i < pagecount; i++) {
+ desc.range[i].len = pagebuffers[i].len;
+ desc.range[i].offset = pagebuffers[i].offset;
+ desc.range[i].pfn = pagebuffers[i].pfn;
+ }
+
+ sg_init_table(bufferlist, 3);
+ sg_set_buf(&bufferlist[0], &desc, descsize);
+ sg_set_buf(&bufferlist[1], buffer, bufferlen);
+ sg_set_buf(&bufferlist[2], &aligned_data,
+ packetlen_aligned - packetlen);
+
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+
+ if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ vmbus_setevent(channel);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
+
+/*
+ * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * using a GPADL Direct packet type.
+ */
+int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
+ struct hv_multipage_buffer *multi_pagebuffer,
+ void *buffer, u32 bufferlen, u64 requestid)
+{
+ int ret;
+ struct vmbus_channel_packet_multipage_buffer desc;
+ u32 descsize;
+ u32 packetlen;
+ u32 packetlen_aligned;
+ struct scatterlist bufferlist[3];
+ u64 aligned_data = 0;
+ u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
+ multi_pagebuffer->len);
+
+
+ if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
+ return -EINVAL;
+
+ /*
+ * Adjust the size down since vmbus_channel_packet_multipage_buffer is
+ * the largest size we support
+ */
+ descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
+ ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
+ sizeof(u64));
+ packetlen = descsize + bufferlen;
+ packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+
+
+ /* Setup the descriptor */
+ desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
+ desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
+ desc.length8 = (u16)(packetlen_aligned >> 3);
+ desc.transactionid = requestid;
+ desc.rangecount = 1;
+
+ desc.range.len = multi_pagebuffer->len;
+ desc.range.offset = multi_pagebuffer->offset;
+
+ memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
+ pfncount * sizeof(u64));
+
+ sg_init_table(bufferlist, 3);
+ sg_set_buf(&bufferlist[0], &desc, descsize);
+ sg_set_buf(&bufferlist[1], buffer, bufferlen);
+ sg_set_buf(&bufferlist[2], &aligned_data,
+ packetlen_aligned - packetlen);
+
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+
+ if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ vmbus_setevent(channel);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
+
+/**
+ * vmbus_recvpacket() - Retrieve the user packet on the specified channel
+ * @channel: Pointer to vmbus_channel structure.
+ * @buffer: Pointer to the buffer you want to receive the data into.
+ * @bufferlen: Maximum size of what the the buffer will hold
+ * @buffer_actual_len: The actual size of the data after it was received
+ * @requestid: Identifier of the request
+ *
+ * Receives directly from the hyper-v vmbus and puts the data it received
+ * into Buffer. This will receive the data unparsed from hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
+{
+ struct vmpacket_descriptor desc;
+ u32 packetlen;
+ u32 userlen;
+ int ret;
+
+ *buffer_actual_len = 0;
+ *requestid = 0;
+
+
+ ret = hv_ringbuffer_peek(&channel->inbound, &desc,
+ sizeof(struct vmpacket_descriptor));
+ if (ret != 0)
+ return 0;
+
+ packetlen = desc.len8 << 3;
+ userlen = packetlen - (desc.offset8 << 3);
+
+ *buffer_actual_len = userlen;
+
+ if (userlen > bufferlen) {
+
+ pr_err("Buffer too small - got %d needs %d\n",
+ bufferlen, userlen);
+ return -ETOOSMALL;
+ }
+
+ *requestid = desc.trans_id;
+
+ /* Copy over the packet to the user buffer */
+ ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
+ (desc.offset8 << 3));
+
+
+ return 0;
+}
+EXPORT_SYMBOL(vmbus_recvpacket);
+
+/*
+ * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
+ */
+int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u32 *buffer_actual_len,
+ u64 *requestid)
+{
+ struct vmpacket_descriptor desc;
+ u32 packetlen;
+ u32 userlen;
+ int ret;
+
+ *buffer_actual_len = 0;
+ *requestid = 0;
+
+
+ ret = hv_ringbuffer_peek(&channel->inbound, &desc,
+ sizeof(struct vmpacket_descriptor));
+ if (ret != 0)
+ return 0;
+
+
+ packetlen = desc.len8 << 3;
+ userlen = packetlen - (desc.offset8 << 3);
+
+ *buffer_actual_len = packetlen;
+
+ if (packetlen > bufferlen) {
+ pr_err("Buffer too small - needed %d bytes but "
+ "got space for only %d bytes\n",
+ packetlen, bufferlen);
+ return -ENOBUFS;
+ }
+
+ *requestid = desc.trans_id;
+
+ /* Copy over the entire packet to the user buffer */
+ ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
diff --git a/drivers/staging/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index bf011f3fb85..12b85ff957f 100644
--- a/drivers/staging/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -28,8 +28,8 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/hyperv.h>
-#include "hyperv.h"
#include "hyperv_vmbus.h"
struct vmbus_channel_message_table_entry {
@@ -40,12 +40,12 @@ struct vmbus_channel_message_table_entry {
#define MAX_MSG_TYPES 4
#define MAX_NUM_DEVICE_CLASSES_SUPPORTED 8
-static const struct hv_guid
+static const uuid_le
supported_device_classes[MAX_NUM_DEVICE_CLASSES_SUPPORTED] = {
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
/* Storage - SCSI */
{
- .data = {
+ .b = {
0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
}
@@ -54,7 +54,7 @@ static const struct hv_guid
/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
/* Network */
{
- .data = {
+ .b = {
0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
}
@@ -63,7 +63,7 @@ static const struct hv_guid
/* {CFA8B69E-5B4A-4cc0-B98B-8BA1A1F3F95A} */
/* Input */
{
- .data = {
+ .b = {
0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A
}
@@ -72,7 +72,7 @@ static const struct hv_guid
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
/* IDE */
{
- .data = {
+ .b = {
0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
}
@@ -80,7 +80,7 @@ static const struct hv_guid
/* 0E0B6031-5213-4934-818B-38D90CED39DB */
/* Shutdown */
{
- .data = {
+ .b = {
0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB
}
@@ -88,7 +88,7 @@ static const struct hv_guid
/* {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */
/* TimeSync */
{
- .data = {
+ .b = {
0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf
}
@@ -96,7 +96,7 @@ static const struct hv_guid
/* {57164f39-9115-4e78-ab55-382f3bd5422d} */
/* Heartbeat */
{
- .data = {
+ .b = {
0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d
}
@@ -104,7 +104,7 @@ static const struct hv_guid
/* {A9A0F4E7-5A45-4d96-B827-8A841E8C03E6} */
/* KVP */
{
- .data = {
+ .b = {
0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6
}
@@ -114,7 +114,7 @@ static const struct hv_guid
/**
- * prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
+ * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
* @icmsghdrp: Pointer to msg header structure
* @icmsg_negotiate: Pointer to negotiate message structure
* @buf: Raw buffer channel data
@@ -128,9 +128,8 @@ static const struct hv_guid
*
* Mainly used by Hyper-V drivers.
*/
-void prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
- struct icmsg_negotiate *negop,
- u8 *buf)
+void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
+ struct icmsg_negotiate *negop, u8 *buf)
{
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
icmsghdrp->icmsgsize = 0x10;
@@ -156,119 +155,7 @@ void prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
negop->icmsg_vercnt = 1;
}
}
-EXPORT_SYMBOL(prep_negotiate_resp);
-
-/**
- * chn_cb_negotiate() - Default handler for non IDE/SCSI/NETWORK
- * Hyper-V requests
- * @context: Pointer to argument structure.
- *
- * Set up the default handler for non device driver specific requests
- * from Hyper-V. This stub responds to the default negotiate messages
- * that come in for every non IDE/SCSI/Network request.
- * This behavior is normally overwritten in the hv_utils driver. That
- * driver handles requests like graceful shutdown, heartbeats etc.
- *
- * Mainly used by Hyper-V drivers.
- */
-void chn_cb_negotiate(void *context)
-{
- struct vmbus_channel *channel = context;
- u8 *buf;
- u32 buflen, recvlen;
- u64 requestid;
-
- struct icmsg_hdr *icmsghdrp;
- struct icmsg_negotiate *negop = NULL;
-
- if (channel->util_index >= 0) {
- /*
- * This is a properly initialized util channel.
- * Route this callback appropriately and setup state
- * so that we don't need to reroute again.
- */
- if (hv_cb_utils[channel->util_index].callback != NULL) {
- /*
- * The util driver has established a handler for
- * this service; do the magic.
- */
- channel->onchannel_callback =
- hv_cb_utils[channel->util_index].callback;
- (hv_cb_utils[channel->util_index].callback)(channel);
- return;
- }
- }
-
- buflen = PAGE_SIZE;
- buf = kmalloc(buflen, GFP_ATOMIC);
-
- vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&buf[
- sizeof(struct vmbuspipe_hdr)];
-
- prep_negotiate_resp(icmsghdrp, negop, buf);
-
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
-
- vmbus_sendpacket(channel, buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(chn_cb_negotiate);
-
-/*
- * Function table used for message responses for non IDE/SCSI/Network type
- * messages. (Such as KVP/Shutdown etc)
- */
-struct hyperv_service_callback hv_cb_utils[MAX_MSG_TYPES] = {
- /* 0E0B6031-5213-4934-818B-38D90CED39DB */
- /* Shutdown */
- {
- .msg_type = HV_SHUTDOWN_MSG,
- .data = {
- 0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
- 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB
- },
- .log_msg = "Shutdown channel functionality initialized"
- },
-
- /* {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */
- /* TimeSync */
- {
- .msg_type = HV_TIMESYNC_MSG,
- .data = {
- 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
- 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf
- },
- .log_msg = "Timesync channel functionality initialized"
- },
- /* {57164f39-9115-4e78-ab55-382f3bd5422d} */
- /* Heartbeat */
- {
- .msg_type = HV_HEARTBEAT_MSG,
- .data = {
- 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
- 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d
- },
- .log_msg = "Heartbeat channel functionality initialized"
- },
- /* {A9A0F4E7-5A45-4d96-B827-8A841E8C03E6} */
- /* KVP */
- {
- .data = {
- 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
- 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6
- },
- .log_msg = "KVP channel functionality initialized"
- },
-};
-EXPORT_SYMBOL(hv_cb_utils);
+EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
/*
* alloc_channel - Allocate and initialize a vmbus channel object
@@ -309,7 +196,7 @@ static void release_channel(struct work_struct *work)
/*
* free_channel - Release the resources used by the vmbus channel object
*/
-void free_channel(struct vmbus_channel *channel)
+static void free_channel(struct vmbus_channel *channel)
{
/*
@@ -333,7 +220,7 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
struct vmbus_channel,
work);
- vmbus_child_device_unregister(channel->device_obj);
+ vmbus_device_unregister(channel->device_obj);
}
/*
@@ -348,7 +235,6 @@ static void vmbus_process_offer(struct work_struct *work)
struct vmbus_channel *channel;
bool fnew = true;
int ret;
- int cnt;
unsigned long flags;
/* The next possible work is rescind handling */
@@ -358,12 +244,10 @@ static void vmbus_process_offer(struct work_struct *work)
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (!memcmp(&channel->offermsg.offer.if_type,
- &newchannel->offermsg.offer.if_type,
- sizeof(struct hv_guid)) &&
- !memcmp(&channel->offermsg.offer.if_instance,
- &newchannel->offermsg.offer.if_instance,
- sizeof(struct hv_guid))) {
+ if (!uuid_le_cmp(channel->offermsg.offer.if_type,
+ newchannel->offermsg.offer.if_type) &&
+ !uuid_le_cmp(channel->offermsg.offer.if_instance,
+ newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
@@ -385,7 +269,7 @@ static void vmbus_process_offer(struct work_struct *work)
* We need to set the DeviceObject field before calling
* vmbus_child_dev_add()
*/
- newchannel->device_obj = vmbus_child_device_create(
+ newchannel->device_obj = vmbus_device_create(
&newchannel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_instance,
newchannel);
@@ -395,7 +279,7 @@ static void vmbus_process_offer(struct work_struct *work)
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
- ret = vmbus_child_device_register(newchannel->device_obj);
+ ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
@@ -412,48 +296,26 @@ static void vmbus_process_offer(struct work_struct *work)
* can cleanup properly
*/
newchannel->state = CHANNEL_OPEN_STATE;
- newchannel->util_index = -1; /* Invalid index */
-
- /* Open IC channels */
- for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) {
- if (memcmp(&newchannel->offermsg.offer.if_type,
- &hv_cb_utils[cnt].data,
- sizeof(struct hv_guid)) == 0 &&
- vmbus_open(newchannel, 2 * PAGE_SIZE,
- 2 * PAGE_SIZE, NULL, 0,
- chn_cb_negotiate,
- newchannel) == 0) {
- hv_cb_utils[cnt].channel = newchannel;
- newchannel->util_index = cnt;
-
- pr_info("%s\n", hv_cb_utils[cnt].log_msg);
-
- }
- }
}
}
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
- * We ignore all offers except network and storage offers. For each network and
- * storage offers, we create a channel object and queue a work item to the
- * channel object to process the offer synchronously
*/
static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_offer_channel *offer;
struct vmbus_channel *newchannel;
- struct hv_guid *guidtype;
- struct hv_guid *guidinstance;
+ uuid_le *guidtype;
+ uuid_le *guidinstance;
int i;
int fsupported = 0;
offer = (struct vmbus_channel_offer_channel *)hdr;
for (i = 0; i < MAX_NUM_DEVICE_CLASSES_SUPPORTED; i++) {
- if (memcmp(&offer->offer.if_type,
- &supported_device_classes[i],
- sizeof(struct hv_guid)) == 0) {
+ if (!uuid_le_cmp(offer->offer.if_type,
+ supported_device_classes[i])) {
fsupported = 1;
break;
}
diff --git a/drivers/staging/hv/connection.c b/drivers/hv/connection.c
index e6b40392e08..650c9f0b664 100644
--- a/drivers/staging/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -25,11 +25,12 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-
-#include "hyperv.h"
+#include <linux/hyperv.h>
+#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -49,10 +50,6 @@ int vmbus_connect(void)
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- /* Make sure we are not connecting or connected */
- if (vmbus_connection.conn_state != DISCONNECTED)
- return -EISCONN;
-
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
vmbus_connection.work_queue = create_workqueue("hv_vmbus_con");
@@ -214,8 +211,7 @@ struct vmbus_channel *relid2channel(u32 relid)
static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
-
- /* ASSERT(relId > 0); */
+ unsigned long flags;
/*
* Find the channel based on this relid and invokes the
@@ -223,11 +219,27 @@ static void process_chn_event(u32 relid)
*/
channel = relid2channel(relid);
- if (channel) {
- channel->onchannel_callback(channel->channel_callback_context);
- } else {
+ if (!channel) {
pr_err("channel not found for relid - %u\n", relid);
+ return;
}
+
+ /*
+ * A channel once created is persistent even when there
+ * is no driver handling the device. An unloading driver
+ * sets the onchannel_callback to NULL under the
+ * protection of the channel inbound_lock. Thus, checking
+ * and invoking the driver specific callback takes care of
+ * orderly unloading of the driver.
+ */
+
+ spin_lock_irqsave(&channel->inbound_lock, flags);
+ if (channel->onchannel_callback != NULL)
+ channel->onchannel_callback(channel->channel_callback_context);
+ else
+ pr_err("no channel callback for relid - %u\n", relid);
+
+ spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
/*
@@ -248,16 +260,17 @@ void vmbus_on_event(unsigned long data)
if (!recv_int_page[dword])
continue;
for (bit = 0; bit < 32; bit++) {
- if (sync_test_and_clear_bit(bit, (unsigned long *)&recv_int_page[dword])) {
+ if (sync_test_and_clear_bit(bit,
+ (unsigned long *)&recv_int_page[dword])) {
relid = (dword << 5) + bit;
- if (relid == 0) {
+ if (relid == 0)
/*
* Special case - vmbus
* channel protocol msg
*/
continue;
- }
+
process_chn_event(relid);
}
}
@@ -270,10 +283,25 @@ void vmbus_on_event(unsigned long data)
int vmbus_post_msg(void *buffer, size_t buflen)
{
union hv_connection_id conn_id;
+ int ret = 0;
+ int retries = 0;
conn_id.asu32 = 0;
conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
- return hv_post_message(conn_id, 1, buffer, buflen);
+
+ /*
+ * hv_post_message() can have transient failures because of
+ * insufficient resources. Retry the operation a couple of
+ * times before giving up.
+ */
+ while (retries < 3) {
+ ret = hv_post_message(conn_id, 1, buffer, buflen);
+ if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
+ return ret;
+ retries++;
+ msleep(100);
+ }
+ return ret;
}
/*
diff --git a/drivers/staging/hv/hv.c b/drivers/hv/hv.c
index 824f81679ae..0fb100ed91a 100644
--- a/drivers/staging/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -25,8 +25,8 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-
-#include "hyperv.h"
+#include <linux/hyperv.h>
+#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
/* The one and only */
@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
u64 hv_status = 0;
u64 input_address = (input) ? virt_to_phys(input) : 0;
u64 output_address = (output) ? virt_to_phys(output) : 0;
- volatile void *hypercall_page = hv_context.hypercall_page;
+ void *hypercall_page = hv_context.hypercall_page;
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
u64 output_address = (output) ? virt_to_phys(output) : 0;
u32 output_address_hi = output_address >> 32;
u32 output_address_lo = output_address & 0xFFFFFFFF;
- volatile void *hypercall_page = hv_context.hypercall_page;
+ void *hypercall_page = hv_context.hypercall_page;
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
"=a"(hv_status_lo) : "d" (control_hi),
@@ -151,7 +151,6 @@ static u64 do_hypercall(u64 control, void *input, void *output)
*/
int hv_init(void)
{
- int ret = 0;
int max_leaf;
union hv_x64_msr_hypercall_contents hypercall_msr;
void *virtaddr = NULL;
@@ -164,11 +163,7 @@ int hv_init(void)
goto cleanup;
max_leaf = query_hypervisor_info();
- /* HvQueryHypervisorFeatures(maxLeaf); */
- /*
- * We only support running on top of Hyper-V
- */
rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
if (hv_context.guestid != 0)
@@ -181,10 +176,6 @@ int hv_init(void)
/* See if the hypercall page is already set */
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- /*
- * Allocate the hypercall page memory
- * virtaddr = osd_page_alloc(1);
- */
virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
if (!virtaddr)
@@ -222,7 +213,7 @@ int hv_init(void)
hv_context.signal_event_param->flag_number = 0;
hv_context.signal_event_param->rsvdz = 0;
- return ret;
+ return 0;
cleanup:
if (virtaddr) {
@@ -233,8 +224,8 @@ cleanup:
vfree(virtaddr);
}
- ret = -1;
- return ret;
+
+ return -ENOTSUPP;
}
/*
@@ -378,7 +369,7 @@ void hv_synic_init(void *irqarg)
shared_sint.as_uint64 = 0;
shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
shared_sint.masked = false;
- shared_sint.auto_eoi = true;
+ shared_sint.auto_eoi = false;
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/staging/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 13b0ecf7d5d..89f52440fcf 100644
--- a/drivers/staging/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -26,8 +26,8 @@
#include <linux/nls.h>
#include <linux/connector.h>
#include <linux/workqueue.h>
+#include <linux/hyperv.h>
-#include "hyperv.h"
#include "hv_kvp.h"
@@ -44,21 +44,24 @@
static struct {
bool active; /* transaction status - active or not */
int recv_len; /* number of bytes received. */
+ int index; /* current index */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
} kvp_transaction;
-static int kvp_send_key(int index);
+static void kvp_send_key(struct work_struct *dummy);
+
+#define TIMEOUT_FIRED 1
static void kvp_respond_to_host(char *key, char *value, int error);
static void kvp_work_func(struct work_struct *dummy);
static void kvp_register(void);
static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func);
+static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static struct cb_id kvp_id = { CN_KVP_IDX, CN_KVP_VAL };
static const char kvp_name[] = "kvp_kernel_module";
-static int timeout_fired;
static u8 *recv_buffer;
/*
* Register the kernel component with the user-level daemon.
@@ -90,8 +93,7 @@ kvp_work_func(struct work_struct *dummy)
* If the timer fires, the user-mode component has not responded;
* process the pending transaction.
*/
- kvp_respond_to_host("Unknown key", "Guest timed out", timeout_fired);
- timeout_fired = 1;
+ kvp_respond_to_host("Unknown key", "Guest timed out", TIMEOUT_FIRED);
}
/*
@@ -121,10 +123,11 @@ kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
}
}
-static int
-kvp_send_key(int index)
+static void
+kvp_send_key(struct work_struct *dummy)
{
struct cn_msg *msg;
+ int index = kvp_transaction.index;
msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC);
@@ -136,9 +139,8 @@ kvp_send_key(int index)
msg->len = sizeof(struct hv_ku_msg);
cn_netlink_send(msg, 0, GFP_ATOMIC);
kfree(msg);
- return 0;
}
- return 1;
+ return;
}
/*
@@ -177,6 +179,15 @@ kvp_respond_to_host(char *key, char *value, int error)
channel = kvp_transaction.recv_channel;
req_id = kvp_transaction.recv_req_id;
+ kvp_transaction.active = false;
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
kvp_msg = (struct hv_kvp_msg *)
@@ -217,7 +228,6 @@ response_done:
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
- kvp_transaction.active = false;
}
/*
@@ -243,10 +253,6 @@ void hv_kvp_onchannelcallback(void *context)
struct icmsg_negotiate *negop = NULL;
- if (kvp_transaction.active)
- return;
-
-
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
@@ -254,7 +260,7 @@ void hv_kvp_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, negop, recv_buffer);
+ vmbus_prep_negotiate_resp(icmsghdrp, negop, recv_buffer);
} else {
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
@@ -282,6 +288,7 @@ void hv_kvp_onchannelcallback(void *context)
kvp_transaction.recv_channel = channel;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.active = true;
+ kvp_transaction.index = kvp_data->index;
/*
* Get the information from the
@@ -292,8 +299,8 @@ void hv_kvp_onchannelcallback(void *context)
* Set a timeout to deal with
* user-mode not responding.
*/
- kvp_send_key(kvp_data->index);
- schedule_delayed_work(&kvp_work, 100);
+ schedule_work(&kvp_sendkey_work);
+ schedule_delayed_work(&kvp_work, 5*HZ);
return;
@@ -312,16 +319,14 @@ callback_done:
}
int
-hv_kvp_init(void)
+hv_kvp_init(struct hv_util_service *srv)
{
int err;
err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback);
if (err)
return err;
- recv_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!recv_buffer)
- return -ENOMEM;
+ recv_buffer = srv->recv_buffer;
return 0;
}
@@ -330,5 +335,5 @@ void hv_kvp_deinit(void)
{
cn_del_callback(&kvp_id);
cancel_delayed_work_sync(&kvp_work);
- kfree(recv_buffer);
+ cancel_work_sync(&kvp_sendkey_work);
}
diff --git a/drivers/staging/hv/hv_kvp.h b/drivers/hv/hv_kvp.h
index 8c402f357d3..9b765d7df83 100644
--- a/drivers/staging/hv/hv_kvp.h
+++ b/drivers/hv/hv_kvp.h
@@ -175,7 +175,7 @@ struct hv_kvp_msg {
struct hv_kvp_msg_enumerate kvp_data;
};
-int hv_kvp_init(void);
+int hv_kvp_init(struct hv_util_service *);
void hv_kvp_deinit(void);
void hv_kvp_onchannelcallback(void *);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
new file mode 100644
index 00000000000..55d58f21e6d
--- /dev/null
+++ b/drivers/hv/hv_util.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2010, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/reboot.h>
+#include <linux/hyperv.h>
+
+#include "hv_kvp.h"
+
+
+static void shutdown_onchannelcallback(void *context);
+static struct hv_util_service util_shutdown = {
+ .util_cb = shutdown_onchannelcallback,
+};
+
+static void timesync_onchannelcallback(void *context);
+static struct hv_util_service util_timesynch = {
+ .util_cb = timesync_onchannelcallback,
+};
+
+static void heartbeat_onchannelcallback(void *context);
+static struct hv_util_service util_heartbeat = {
+ .util_cb = heartbeat_onchannelcallback,
+};
+
+static struct hv_util_service util_kvp = {
+ .util_cb = hv_kvp_onchannelcallback,
+ .util_init = hv_kvp_init,
+ .util_deinit = hv_kvp_deinit,
+};
+
+static void shutdown_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ u8 execute_shutdown = false;
+ u8 *shut_txf_buf = util_shutdown.recv_buffer;
+
+ struct shutdown_msg_data *shutdown_msg;
+
+ struct icmsg_hdr *icmsghdrp;
+ struct icmsg_negotiate *negop = NULL;
+
+ vmbus_recvpacket(channel, shut_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ vmbus_prep_negotiate_resp(icmsghdrp, negop, shut_txf_buf);
+ } else {
+ shutdown_msg =
+ (struct shutdown_msg_data *)&shut_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ switch (shutdown_msg->flags) {
+ case 0:
+ case 1:
+ icmsghdrp->status = HV_S_OK;
+ execute_shutdown = true;
+
+ pr_info("Shutdown request received -"
+ " graceful shutdown initiated\n");
+ break;
+ default:
+ icmsghdrp->status = HV_E_FAIL;
+ execute_shutdown = false;
+
+ pr_info("Shutdown request received -"
+ " Invalid request\n");
+ break;
+ }
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, shut_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+ }
+
+ if (execute_shutdown == true)
+ orderly_poweroff(true);
+}
+
+/*
+ * Set guest time to host UTC time.
+ */
+static inline void do_adj_guesttime(u64 hosttime)
+{
+ s64 host_tns;
+ struct timespec host_ts;
+
+ host_tns = (hosttime - WLTIMEDELTA) * 100;
+ host_ts = ns_to_timespec(host_tns);
+
+ do_settimeofday(&host_ts);
+}
+
+/*
+ * Set the host time in a process context.
+ */
+
+struct adj_time_work {
+ struct work_struct work;
+ u64 host_time;
+};
+
+static void hv_set_host_time(struct work_struct *work)
+{
+ struct adj_time_work *wrk;
+
+ wrk = container_of(work, struct adj_time_work, work);
+ do_adj_guesttime(wrk->host_time);
+ kfree(wrk);
+}
+
+/*
+ * Synchronize time with host after reboot, restore, etc.
+ *
+ * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
+ * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
+ * message after the timesync channel is opened. Since the hv_utils module is
+ * loaded after hv_vmbus, the first message is usually missed. The other
+ * thing is, systime is automatically set to emulated hardware clock which may
+ * not be UTC time or in the same time zone. So, to override these effects, we
+ * use the first 50 time samples for initial system time setting.
+ */
+static inline void adj_guesttime(u64 hosttime, u8 flags)
+{
+ struct adj_time_work *wrk;
+ static s32 scnt = 50;
+
+ wrk = kmalloc(sizeof(struct adj_time_work), GFP_ATOMIC);
+ if (wrk == NULL)
+ return;
+
+ wrk->host_time = hosttime;
+ if ((flags & ICTIMESYNCFLAG_SYNC) != 0) {
+ INIT_WORK(&wrk->work, hv_set_host_time);
+ schedule_work(&wrk->work);
+ return;
+ }
+
+ if ((flags & ICTIMESYNCFLAG_SAMPLE) != 0 && scnt > 0) {
+ scnt--;
+ INIT_WORK(&wrk->work, hv_set_host_time);
+ schedule_work(&wrk->work);
+ } else
+ kfree(wrk);
+}
+
+/*
+ * Time Sync Channel message handler.
+ */
+static void timesync_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct icmsg_hdr *icmsghdrp;
+ struct ictimesync_data *timedatap;
+ u8 *time_txf_buf = util_timesynch.recv_buffer;
+
+ vmbus_recvpacket(channel, time_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf);
+ } else {
+ timedatap = (struct ictimesync_data *)&time_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+ adj_guesttime(timedatap->parenttime, timedatap->flags);
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, time_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+ }
+}
+
+/*
+ * Heartbeat functionality.
+ * Every two seconds, Hyper-V send us a heartbeat request message.
+ * we respond to this message, and Hyper-V knows we are alive.
+ */
+static void heartbeat_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct icmsg_hdr *icmsghdrp;
+ struct heartbeat_msg_data *heartbeat_msg;
+ u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ vmbus_prep_negotiate_resp(icmsghdrp, NULL, hbeat_txf_buf);
+ } else {
+ heartbeat_msg =
+ (struct heartbeat_msg_data *)&hbeat_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ heartbeat_msg->seq_num += 1;
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, hbeat_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+ }
+}
+
+static int util_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_util_service *srv =
+ (struct hv_util_service *)dev_id->driver_data;
+ int ret;
+
+ srv->recv_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!srv->recv_buffer)
+ return -ENOMEM;
+ if (srv->util_init) {
+ ret = srv->util_init(srv);
+ if (ret) {
+ ret = -ENODEV;
+ goto error1;
+ }
+ }
+
+ ret = vmbus_open(dev->channel, 2 * PAGE_SIZE, 2 * PAGE_SIZE, NULL, 0,
+ srv->util_cb, dev->channel);
+ if (ret)
+ goto error;
+
+ hv_set_drvdata(dev, srv);
+ return 0;
+
+error:
+ if (srv->util_deinit)
+ srv->util_deinit();
+error1:
+ kfree(srv->recv_buffer);
+ return ret;
+}
+
+static int util_remove(struct hv_device *dev)
+{
+ struct hv_util_service *srv = hv_get_drvdata(dev);
+
+ vmbus_close(dev->channel);
+ if (srv->util_deinit)
+ srv->util_deinit();
+ kfree(srv->recv_buffer);
+
+ return 0;
+}
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Shutdown guid */
+ { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
+ 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
+ .driver_data = (unsigned long)&util_shutdown },
+ /* Time synch guid */
+ { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
+ 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
+ .driver_data = (unsigned long)&util_timesynch },
+ /* Heartbeat guid */
+ { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
+ 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
+ .driver_data = (unsigned long)&util_heartbeat },
+ /* KVP guid */
+ { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
+ 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6)
+ .driver_data = (unsigned long)&util_kvp },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+/* The one and only one */
+static struct hv_driver util_drv = {
+ .name = "hv_util",
+ .id_table = id_table,
+ .probe = util_probe,
+ .remove = util_remove,
+};
+
+static int __init init_hyperv_utils(void)
+{
+ pr_info("Registering HyperV Utility Driver\n");
+
+ return vmbus_driver_register(&util_drv);
+}
+
+static void exit_hyperv_utils(void)
+{
+ pr_info("De-Registered HyperV Utility Driver\n");
+
+ vmbus_driver_unregister(&util_drv);
+}
+
+module_init(init_hyperv_utils);
+module_exit(exit_hyperv_utils);
+
+MODULE_DESCRIPTION("Hyper-V Utilities");
+MODULE_VERSION(HV_DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 349ad80ce32..0aee1122734 100644
--- a/drivers/staging/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -28,8 +28,7 @@
#include <linux/list.h>
#include <asm/sync_bitops.h>
#include <linux/atomic.h>
-
-#include "hyperv.h"
+#include <linux/hyperv.h>
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -451,8 +450,8 @@ enum {
/* #define VMBUS_PORT_ID 11 */
/* 628180B8-308D-4c5e-B7DB-1BEB62E62EF4 */
-static const struct hv_guid VMBUS_SERVICE_ID = {
- .data = {
+static const uuid_le VMBUS_SERVICE_ID = {
+ .b = {
0xb8, 0x80, 0x81, 0x62, 0x8d, 0x30, 0x5e, 0x4c,
0xb7, 0xdb, 0x1b, 0xeb, 0x62, 0xe6, 0x2e, 0xf4
},
@@ -530,8 +529,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
-void hv_dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix);
-
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
@@ -601,12 +598,12 @@ extern struct vmbus_connection vmbus_connection;
/* General vmbus interface */
-struct hv_device *vmbus_child_device_create(struct hv_guid *type,
- struct hv_guid *instance,
+struct hv_device *vmbus_device_create(uuid_le *type,
+ uuid_le *instance,
struct vmbus_channel *channel);
-int vmbus_child_device_register(struct hv_device *child_device_obj);
-void vmbus_child_device_unregister(struct hv_device *device_obj);
+int vmbus_device_register(struct hv_device *child_device_obj);
+void vmbus_device_unregister(struct hv_device *device_obj);
/* static void */
/* VmbusChildDeviceDestroy( */
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 42f76728429..8af25a097d7 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -25,8 +25,8 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/hyperv.h>
-#include "hyperv.h"
#include "hyperv_vmbus.h"
@@ -34,7 +34,8 @@
/* Amount of space to write to */
-#define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
+#define BYTES_AVAIL_TO_WRITE(r, w, z) \
+ ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
/*
@@ -171,37 +172,6 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
return (u64)ring_info->ring_buffer->write_index << 32;
}
-
-/*
- *
- * hv_dump_ring_info()
- *
- * Dump out to console the ring buffer info
- *
- */
-void hv_dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix)
-{
- u32 bytes_avail_towrite;
- u32 bytes_avail_toread;
-
- hv_get_ringbuffer_availbytes(ring_info,
- &bytes_avail_toread,
- &bytes_avail_towrite);
-
- DPRINT(VMBUS,
- DEBUG_RING_LVL,
- "%s <<ringinfo %p buffer %p avail write %u "
- "avail read %u read idx %u write idx %u>>",
- prefix,
- ring_info,
- ring_info->ring_buffer->buffer,
- bytes_avail_towrite,
- bytes_avail_toread,
- ring_info->ring_buffer->read_index,
- ring_info->ring_buffer->write_index);
-}
-
-
/*
*
* hv_copyfrom_ringbuffer()
@@ -390,7 +360,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* is empty since the read index == write index */
if (bytes_avail_towrite <= totalbytes_towrite) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- return -1;
+ return -EAGAIN;
}
/* Write to the ring buffer */
@@ -450,7 +420,7 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
- return -1;
+ return -EAGAIN;
}
/* Convert to byte offset */
@@ -496,7 +466,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
- return -1;
+ return -EAGAIN;
}
next_read_location =
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
new file mode 100644
index 00000000000..0c048dd8013
--- /dev/null
+++ b/drivers/hv/vmbus_drv.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/completion.h>
+#include <linux/hyperv.h>
+#include <asm/hyperv.h>
+#include "hyperv_vmbus.h"
+
+
+static struct acpi_device *hv_acpi_dev;
+
+static struct tasklet_struct msg_dpc;
+static struct tasklet_struct event_dpc;
+static struct completion probe_event;
+static int irq;
+
+struct hv_device_info {
+ u32 chn_id;
+ u32 chn_state;
+ uuid_le chn_type;
+ uuid_le chn_instance;
+
+ u32 monitor_id;
+ u32 server_monitor_pending;
+ u32 server_monitor_latency;
+ u32 server_monitor_conn_id;
+ u32 client_monitor_pending;
+ u32 client_monitor_latency;
+ u32 client_monitor_conn_id;
+
+ struct hv_dev_port_info inbound;
+ struct hv_dev_port_info outbound;
+};
+
+
+static void get_channel_info(struct hv_device *device,
+ struct hv_device_info *info)
+{
+ struct vmbus_channel_debug_info debug_info;
+
+ if (!device->channel)
+ return;
+
+ vmbus_get_debug_info(device->channel, &debug_info);
+
+ info->chn_id = debug_info.relid;
+ info->chn_state = debug_info.state;
+ memcpy(&info->chn_type, &debug_info.interfacetype,
+ sizeof(uuid_le));
+ memcpy(&info->chn_instance, &debug_info.interface_instance,
+ sizeof(uuid_le));
+
+ info->monitor_id = debug_info.monitorid;
+
+ info->server_monitor_pending = debug_info.servermonitor_pending;
+ info->server_monitor_latency = debug_info.servermonitor_latency;
+ info->server_monitor_conn_id = debug_info.servermonitor_connectionid;
+
+ info->client_monitor_pending = debug_info.clientmonitor_pending;
+ info->client_monitor_latency = debug_info.clientmonitor_latency;
+ info->client_monitor_conn_id = debug_info.clientmonitor_connectionid;
+
+ info->inbound.int_mask = debug_info.inbound.current_interrupt_mask;
+ info->inbound.read_idx = debug_info.inbound.current_read_index;
+ info->inbound.write_idx = debug_info.inbound.current_write_index;
+ info->inbound.bytes_avail_toread =
+ debug_info.inbound.bytes_avail_toread;
+ info->inbound.bytes_avail_towrite =
+ debug_info.inbound.bytes_avail_towrite;
+
+ info->outbound.int_mask =
+ debug_info.outbound.current_interrupt_mask;
+ info->outbound.read_idx = debug_info.outbound.current_read_index;
+ info->outbound.write_idx = debug_info.outbound.current_write_index;
+ info->outbound.bytes_avail_toread =
+ debug_info.outbound.bytes_avail_toread;
+ info->outbound.bytes_avail_towrite =
+ debug_info.outbound.bytes_avail_towrite;
+}
+
+#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
+static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
+{
+ int i;
+ for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
+ sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
+}
+
+/*
+ * vmbus_show_device_attr - Show the device attribute in sysfs.
+ *
+ * This is invoked when user does a
+ * "cat /sys/bus/vmbus/devices/<busdevice>/<attr name>"
+ */
+static ssize_t vmbus_show_device_attr(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_device_info *device_info;
+ char alias_name[VMBUS_ALIAS_LEN + 1];
+ int ret = 0;
+
+ device_info = kzalloc(sizeof(struct hv_device_info), GFP_KERNEL);
+ if (!device_info)
+ return ret;
+
+ get_channel_info(hv_dev, device_info);
+
+ if (!strcmp(dev_attr->attr.name, "class_id")) {
+ ret = sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-"
+ "%02x%02x%02x%02x%02x%02x%02x%02x}\n",
+ device_info->chn_type.b[3],
+ device_info->chn_type.b[2],
+ device_info->chn_type.b[1],
+ device_info->chn_type.b[0],
+ device_info->chn_type.b[5],
+ device_info->chn_type.b[4],
+ device_info->chn_type.b[7],
+ device_info->chn_type.b[6],
+ device_info->chn_type.b[8],
+ device_info->chn_type.b[9],
+ device_info->chn_type.b[10],
+ device_info->chn_type.b[11],
+ device_info->chn_type.b[12],
+ device_info->chn_type.b[13],
+ device_info->chn_type.b[14],
+ device_info->chn_type.b[15]);
+ } else if (!strcmp(dev_attr->attr.name, "device_id")) {
+ ret = sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-"
+ "%02x%02x%02x%02x%02x%02x%02x%02x}\n",
+ device_info->chn_instance.b[3],
+ device_info->chn_instance.b[2],
+ device_info->chn_instance.b[1],
+ device_info->chn_instance.b[0],
+ device_info->chn_instance.b[5],
+ device_info->chn_instance.b[4],
+ device_info->chn_instance.b[7],
+ device_info->chn_instance.b[6],
+ device_info->chn_instance.b[8],
+ device_info->chn_instance.b[9],
+ device_info->chn_instance.b[10],
+ device_info->chn_instance.b[11],
+ device_info->chn_instance.b[12],
+ device_info->chn_instance.b[13],
+ device_info->chn_instance.b[14],
+ device_info->chn_instance.b[15]);
+ } else if (!strcmp(dev_attr->attr.name, "modalias")) {
+ print_alias_name(hv_dev, alias_name);
+ ret = sprintf(buf, "vmbus:%s\n", alias_name);
+ } else if (!strcmp(dev_attr->attr.name, "state")) {
+ ret = sprintf(buf, "%d\n", device_info->chn_state);
+ } else if (!strcmp(dev_attr->attr.name, "id")) {
+ ret = sprintf(buf, "%d\n", device_info->chn_id);
+ } else if (!strcmp(dev_attr->attr.name, "out_intr_mask")) {
+ ret = sprintf(buf, "%d\n", device_info->outbound.int_mask);
+ } else if (!strcmp(dev_attr->attr.name, "out_read_index")) {
+ ret = sprintf(buf, "%d\n", device_info->outbound.read_idx);
+ } else if (!strcmp(dev_attr->attr.name, "out_write_index")) {
+ ret = sprintf(buf, "%d\n", device_info->outbound.write_idx);
+ } else if (!strcmp(dev_attr->attr.name, "out_read_bytes_avail")) {
+ ret = sprintf(buf, "%d\n",
+ device_info->outbound.bytes_avail_toread);
+ } else if (!strcmp(dev_attr->attr.name, "out_write_bytes_avail")) {
+ ret = sprintf(buf, "%d\n",
+ device_info->outbound.bytes_avail_towrite);
+ } else if (!strcmp(dev_attr->attr.name, "in_intr_mask")) {
+ ret = sprintf(buf, "%d\n", device_info->inbound.int_mask);
+ } else if (!strcmp(dev_attr->attr.name, "in_read_index")) {
+ ret = sprintf(buf, "%d\n", device_info->inbound.read_idx);
+ } else if (!strcmp(dev_attr->attr.name, "in_write_index")) {
+ ret = sprintf(buf, "%d\n", device_info->inbound.write_idx);
+ } else if (!strcmp(dev_attr->attr.name, "in_read_bytes_avail")) {
+ ret = sprintf(buf, "%d\n",
+ device_info->inbound.bytes_avail_toread);
+ } else if (!strcmp(dev_attr->attr.name, "in_write_bytes_avail")) {
+ ret = sprintf(buf, "%d\n",
+ device_info->inbound.bytes_avail_towrite);
+ } else if (!strcmp(dev_attr->attr.name, "monitor_id")) {
+ ret = sprintf(buf, "%d\n", device_info->monitor_id);
+ } else if (!strcmp(dev_attr->attr.name, "server_monitor_pending")) {
+ ret = sprintf(buf, "%d\n", device_info->server_monitor_pending);
+ } else if (!strcmp(dev_attr->attr.name, "server_monitor_latency")) {
+ ret = sprintf(buf, "%d\n", device_info->server_monitor_latency);
+ } else if (!strcmp(dev_attr->attr.name, "server_monitor_conn_id")) {
+ ret = sprintf(buf, "%d\n",
+ device_info->server_monitor_conn_id);
+ } else if (!strcmp(dev_attr->attr.name, "client_monitor_pending")) {
+ ret = sprintf(buf, "%d\n", device_info->client_monitor_pending);
+ } else if (!strcmp(dev_attr->attr.name, "client_monitor_latency")) {
+ ret = sprintf(buf, "%d\n", device_info->client_monitor_latency);
+ } else if (!strcmp(dev_attr->attr.name, "client_monitor_conn_id")) {
+ ret = sprintf(buf, "%d\n",
+ device_info->client_monitor_conn_id);
+ }
+
+ kfree(device_info);
+ return ret;
+}
+
+/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
+static struct device_attribute vmbus_device_attrs[] = {
+ __ATTR(id, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(state, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(class_id, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(device_id, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(monitor_id, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(modalias, S_IRUGO, vmbus_show_device_attr, NULL),
+
+ __ATTR(server_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(server_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(server_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
+
+ __ATTR(client_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(client_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(client_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
+
+ __ATTR(out_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(out_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(out_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(out_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(out_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
+
+ __ATTR(in_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(in_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(in_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(in_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR(in_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
+ __ATTR_NULL
+};
+
+
+/*
+ * vmbus_uevent - add uevent for our device
+ *
+ * This routine is invoked when a device is added or removed on the vmbus to
+ * generate a uevent to udev in the userspace. The udev will then look at its
+ * rule and the uevent generated here to load the appropriate driver
+ *
+ * The alias string will be of the form vmbus:guid where guid is the string
+ * representation of the device guid (each byte of the guid will be
+ * represented with two hex characters.
+ */
+static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
+{
+ struct hv_device *dev = device_to_hv_device(device);
+ int ret;
+ char alias_name[VMBUS_ALIAS_LEN + 1];
+
+ print_alias_name(dev, alias_name);
+ ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
+ return ret;
+}
+
+static uuid_le null_guid;
+
+static inline bool is_null_guid(const __u8 *guid)
+{
+ if (memcmp(guid, &null_guid, sizeof(uuid_le)))
+ return false;
+ return true;
+}
+
+/*
+ * Return a matching hv_vmbus_device_id pointer.
+ * If there is no match, return NULL.
+ */
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(
+ const struct hv_vmbus_device_id *id,
+ __u8 *guid)
+{
+ for (; !is_null_guid(id->guid); id++)
+ if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
+ return id;
+
+ return NULL;
+}
+
+
+
+/*
+ * vmbus_match - Attempt to match the specified device to the specified driver
+ */
+static int vmbus_match(struct device *device, struct device_driver *driver)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ struct hv_device *hv_dev = device_to_hv_device(device);
+
+ if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * vmbus_probe - Add the new vmbus's child device
+ */
+static int vmbus_probe(struct device *child_device)
+{
+ int ret = 0;
+ struct hv_driver *drv =
+ drv_to_hv_drv(child_device->driver);
+ struct hv_device *dev = device_to_hv_device(child_device);
+ const struct hv_vmbus_device_id *dev_id;
+
+ dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
+ if (drv->probe) {
+ ret = drv->probe(dev, dev_id);
+ if (ret != 0)
+ pr_err("probe failed for device %s (%d)\n",
+ dev_name(child_device), ret);
+
+ } else {
+ pr_err("probe not set for driver %s\n",
+ dev_name(child_device));
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+/*
+ * vmbus_remove - Remove a vmbus device
+ */
+static int vmbus_remove(struct device *child_device)
+{
+ struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+ if (drv->remove)
+ drv->remove(dev);
+ else
+ pr_err("remove not set for driver %s\n",
+ dev_name(child_device));
+
+ return 0;
+}
+
+
+/*
+ * vmbus_shutdown - Shutdown a vmbus device
+ */
+static void vmbus_shutdown(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+
+ /* The device may not be attached yet */
+ if (!child_device->driver)
+ return;
+
+ drv = drv_to_hv_drv(child_device->driver);
+
+ if (drv->shutdown)
+ drv->shutdown(dev);
+
+ return;
+}
+
+
+/*
+ * vmbus_device_release - Final callback release of the vmbus child device
+ */
+static void vmbus_device_release(struct device *device)
+{
+ struct hv_device *hv_dev = device_to_hv_device(device);
+
+ kfree(hv_dev);
+
+}
+
+/* The one and only one */
+static struct bus_type hv_bus = {
+ .name = "vmbus",
+ .match = vmbus_match,
+ .shutdown = vmbus_shutdown,
+ .remove = vmbus_remove,
+ .probe = vmbus_probe,
+ .uevent = vmbus_uevent,
+ .dev_attrs = vmbus_device_attrs,
+};
+
+static const char *driver_name = "hyperv";
+
+
+struct onmessage_work_context {
+ struct work_struct work;
+ struct hv_message msg;
+};
+
+static void vmbus_onmessage_work(struct work_struct *work)
+{
+ struct onmessage_work_context *ctx;
+
+ ctx = container_of(work, struct onmessage_work_context,
+ work);
+ vmbus_onmessage(&ctx->msg);
+ kfree(ctx);
+}
+
+static void vmbus_on_msg_dpc(unsigned long data)
+{
+ int cpu = smp_processor_id();
+ void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_message *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct onmessage_work_context *ctx;
+
+ while (1) {
+ if (msg->header.message_type == HVMSG_NONE) {
+ /* no msg */
+ break;
+ } else {
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx == NULL)
+ continue;
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+ memcpy(&ctx->msg, msg, sizeof(*msg));
+ queue_work(vmbus_connection.work_queue, &ctx->work);
+ }
+
+ msg->header.message_type = HVMSG_NONE;
+
+ /*
+ * Make sure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ smp_mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ wrmsrl(HV_X64_MSR_EOM, 0);
+ }
+ }
+}
+
+static irqreturn_t vmbus_isr(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+ void *page_addr;
+ struct hv_message *msg;
+ union hv_synic_event_flags *event;
+ bool handled = false;
+
+ /*
+ * Check for events before checking for messages. This is the order
+ * in which events and messages are checked in Windows guests on
+ * Hyper-V, and the Windows team suggested we do the same.
+ */
+
+ page_addr = hv_context.synic_event_page[cpu];
+ event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+
+ /* Since we are a child, we only need to check bit 0 */
+ if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+ handled = true;
+ tasklet_schedule(&event_dpc);
+ }
+
+ page_addr = hv_context.synic_message_page[cpu];
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+
+ /* Check if there are actual msgs to be processed */
+ if (msg->header.message_type != HVMSG_NONE) {
+ handled = true;
+ tasklet_schedule(&msg_dpc);
+ }
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+/*
+ * vmbus_bus_init -Main vmbus driver initialization routine.
+ *
+ * Here, we
+ * - initialize the vmbus driver context
+ * - invoke the vmbus hv main init routine
+ * - get the irq resource
+ * - retrieve the channel offers
+ */
+static int vmbus_bus_init(int irq)
+{
+ int ret;
+ unsigned int vector;
+
+ /* Hypervisor initialization...setup hypercall page..etc */
+ ret = hv_init();
+ if (ret != 0) {
+ pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
+ return ret;
+ }
+
+ tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
+ tasklet_init(&event_dpc, vmbus_on_event, 0);
+
+ ret = bus_register(&hv_bus);
+ if (ret)
+ goto err_cleanup;
+
+ ret = request_irq(irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
+ driver_name, hv_acpi_dev);
+
+ if (ret != 0) {
+ pr_err("Unable to request IRQ %d\n",
+ irq);
+ goto err_unregister;
+ }
+
+ vector = IRQ0_VECTOR + irq;
+
+ /*
+ * Notify the hypervisor of our irq and
+ * connect to the host.
+ */
+ on_each_cpu(hv_synic_init, (void *)&vector, 1);
+ ret = vmbus_connect();
+ if (ret)
+ goto err_irq;
+
+ vmbus_request_offers();
+
+ return 0;
+
+err_irq:
+ free_irq(irq, hv_acpi_dev);
+
+err_unregister:
+ bus_unregister(&hv_bus);
+
+err_cleanup:
+ hv_cleanup();
+
+ return ret;
+}
+
+/**
+ * __vmbus_child_driver_register - Register a vmbus's driver
+ * @drv: Pointer to driver structure you want to register
+ * @owner: owner module of the drv
+ * @mod_name: module name string
+ *
+ * Registers the given driver with Linux through the 'driver_register()' call
+ * and sets up the hyper-v vmbus handling for this driver.
+ * It will return the state of the 'driver_register()' call.
+ *
+ */
+int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
+{
+ int ret;
+
+ pr_info("registering driver %s\n", hv_driver->name);
+
+ hv_driver->driver.name = hv_driver->name;
+ hv_driver->driver.owner = owner;
+ hv_driver->driver.mod_name = mod_name;
+ hv_driver->driver.bus = &hv_bus;
+
+ ret = driver_register(&hv_driver->driver);
+
+ vmbus_request_offers();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__vmbus_driver_register);
+
+/**
+ * vmbus_driver_unregister() - Unregister a vmbus's driver
+ * @drv: Pointer to driver structure you want to un-register
+ *
+ * Un-register the given driver that was previous registered with a call to
+ * vmbus_driver_register()
+ */
+void vmbus_driver_unregister(struct hv_driver *hv_driver)
+{
+ pr_info("unregistering driver %s\n", hv_driver->name);
+
+ driver_unregister(&hv_driver->driver);
+
+}
+EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+/*
+ * vmbus_device_create - Creates and registers a new child device
+ * on the vmbus.
+ */
+struct hv_device *vmbus_device_create(uuid_le *type,
+ uuid_le *instance,
+ struct vmbus_channel *channel)
+{
+ struct hv_device *child_device_obj;
+
+ child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
+ if (!child_device_obj) {
+ pr_err("Unable to allocate device object for child device\n");
+ return NULL;
+ }
+
+ child_device_obj->channel = channel;
+ memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
+ memcpy(&child_device_obj->dev_instance, instance,
+ sizeof(uuid_le));
+
+
+ return child_device_obj;
+}
+
+/*
+ * vmbus_device_register - Register the child device
+ */
+int vmbus_device_register(struct hv_device *child_device_obj)
+{
+ int ret = 0;
+
+ static atomic_t device_num = ATOMIC_INIT(0);
+
+ dev_set_name(&child_device_obj->device, "vmbus_0_%d",
+ atomic_inc_return(&device_num));
+
+ child_device_obj->device.bus = &hv_bus;
+ child_device_obj->device.parent = &hv_acpi_dev->dev;
+ child_device_obj->device.release = vmbus_device_release;
+
+ /*
+ * Register with the LDM. This will kick off the driver/device
+ * binding...which will eventually call vmbus_match() and vmbus_probe()
+ */
+ ret = device_register(&child_device_obj->device);
+
+ if (ret)
+ pr_err("Unable to register child device\n");
+ else
+ pr_info("child device %s registered\n",
+ dev_name(&child_device_obj->device));
+
+ return ret;
+}
+
+/*
+ * vmbus_device_unregister - Remove the specified child device
+ * from the vmbus.
+ */
+void vmbus_device_unregister(struct hv_device *device_obj)
+{
+ /*
+ * Kick off the process of unregistering the device.
+ * This will call vmbus_remove() and eventually vmbus_device_release()
+ */
+ device_unregister(&device_obj->device);
+
+ pr_info("child device %s unregistered\n",
+ dev_name(&device_obj->device));
+}
+
+
+/*
+ * VMBUS is an acpi enumerated device. Get the the IRQ information
+ * from DSDT.
+ */
+
+static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
+{
+
+ if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
+ struct acpi_resource_irq *irqp;
+ irqp = &res->data.irq;
+
+ *((unsigned int *)irq) = irqp->interrupts[0];
+ }
+
+ return AE_OK;
+}
+
+static int vmbus_acpi_add(struct acpi_device *device)
+{
+ acpi_status result;
+
+ hv_acpi_dev = device;
+
+ result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ vmbus_walk_resources, &irq);
+
+ if (ACPI_FAILURE(result)) {
+ complete(&probe_event);
+ return -ENODEV;
+ }
+ complete(&probe_event);
+ return 0;
+}
+
+static const struct acpi_device_id vmbus_acpi_device_ids[] = {
+ {"VMBUS", 0},
+ {"VMBus", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
+
+static struct acpi_driver vmbus_acpi_driver = {
+ .name = "vmbus",
+ .ids = vmbus_acpi_device_ids,
+ .ops = {
+ .add = vmbus_acpi_add,
+ },
+};
+
+static int __init hv_acpi_init(void)
+{
+ int ret, t;
+
+ init_completion(&probe_event);
+
+ /*
+ * Get irq resources first.
+ */
+
+ ret = acpi_bus_register_driver(&vmbus_acpi_driver);
+
+ if (ret)
+ return ret;
+
+ t = wait_for_completion_timeout(&probe_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (irq <= 0) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ ret = vmbus_bus_init(irq);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ return ret;
+}
+
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HV_DRV_VERSION);
+
+module_init(hv_acpi_init);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0b62c3c6b7c..9b347acf155 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -68,6 +68,16 @@ config SENSORS_ABITUGURU3
This driver can also be built as a module. If so, the module
will be called abituguru3.
+config SENSORS_AD7314
+ tristate "Analog Devices AD7314 and compatibles"
+ depends on SPI && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Analog Devices
+ AD7314, ADT7301 and ADT7302 temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called ad7314.
+
config SENSORS_AD7414
tristate "Analog Devices AD7414"
depends on I2C && EXPERIMENTAL
@@ -303,6 +313,16 @@ config SENSORS_DS1621
This driver can also be built as a module. If so, the module
will be called ds1621.
+config SENSORS_EXYNOS4_TMU
+ tristate "Temperature sensor on Samsung EXYNOS4"
+ depends on EXYNOS4_DEV_TMU
+ help
+ If you say yes here you get support for TMU (Thermal Managment
+ Unit) on SAMSUNG EXYNOS4 series of SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called exynos4-tmu.
+
config SENSORS_I5K_AMB
tristate "FB-DIMM AMB temperature sensor on Intel 5000 series chipsets"
depends on PCI && EXPERIMENTAL
@@ -531,6 +551,7 @@ config SENSORS_LM75
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
+ - Analog Devices ADT75
- Dallas Semiconductor DS75 and DS1775
- Maxim MAX6625 and MAX6626
- Microchip MCP980x
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 3c9ccefea79..8251ce8cd03 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
+obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o
+obj-$(CONFIG_SENSORS_EXYNOS4_TMU) += exynos4_tmu.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
new file mode 100644
index 00000000000..318e38e8537
--- /dev/null
+++ b/drivers/hwmon/ad7314.c
@@ -0,0 +1,186 @@
+/*
+ * AD7314 digital temperature sensor driver for AD7314, ADT7301 and ADT7302
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Conversion to hwmon from IIO done by Jonathan Cameron <jic23@cam.ac.uk>
+ */
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/*
+ * AD7314 power mode
+ */
+#define AD7314_PD 0x2000
+
+/*
+ * AD7314 temperature masks
+ */
+#define AD7314_TEMP_SIGN 0x200
+#define AD7314_TEMP_MASK 0x7FE0
+#define AD7314_TEMP_OFFSET 5
+
+/*
+ * ADT7301 and ADT7302 temperature masks
+ */
+#define ADT7301_TEMP_SIGN 0x2000
+#define ADT7301_TEMP_MASK 0x3FFF
+
+enum ad7314_variant {
+ adt7301,
+ adt7302,
+ ad7314,
+};
+
+struct ad7314_data {
+ struct spi_device *spi_dev;
+ struct device *hwmon_dev;
+ u16 rx ____cacheline_aligned;
+};
+
+static int ad7314_spi_read(struct ad7314_data *chip, s16 *data)
+{
+ int ret;
+
+ ret = spi_read(chip->spi_dev, (u8 *)&chip->rx, sizeof(chip->rx));
+ if (ret < 0) {
+ dev_err(&chip->spi_dev->dev, "SPI read error\n");
+ return ret;
+ }
+
+ *data = be16_to_cpu(chip->rx);
+
+ return ret;
+}
+
+static ssize_t ad7314_show_temperature(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ad7314_data *chip = dev_get_drvdata(dev);
+ s16 data;
+ int ret;
+
+ ret = ad7314_spi_read(chip, &data);
+ if (ret < 0)
+ return ret;
+ switch (spi_get_device_id(chip->spi_dev)->driver_data) {
+ case ad7314:
+ data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
+ data = (data << 6) >> 6;
+
+ return sprintf(buf, "%d\n", 250 * data);
+ case adt7301:
+ case adt7302:
+ /*
+ * Documented as a 13 bit twos complement register
+ * with a sign bit - which is a 14 bit 2's complement
+ * register. 1lsb - 31.25 milli degrees centigrade
+ */
+ data &= ADT7301_TEMP_MASK;
+ data = (data << 2) >> 2;
+
+ return sprintf(buf, "%d\n",
+ DIV_ROUND_CLOSEST(data * 3125, 100));
+ default:
+ return -EINVAL;
+ }
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ ad7314_show_temperature, NULL, 0);
+
+static struct attribute *ad7314_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7314_group = {
+ .attrs = ad7314_attributes,
+};
+
+static int __devinit ad7314_probe(struct spi_device *spi_dev)
+{
+ int ret;
+ struct ad7314_data *chip;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ dev_set_drvdata(&spi_dev->dev, chip);
+
+ ret = sysfs_create_group(&spi_dev->dev.kobj, &ad7314_group);
+ if (ret < 0)
+ goto error_free_chip;
+ chip->hwmon_dev = hwmon_device_register(&spi_dev->dev);
+ if (IS_ERR(chip->hwmon_dev)) {
+ ret = PTR_ERR(chip->hwmon_dev);
+ goto error_remove_group;
+ }
+
+ return 0;
+error_remove_group:
+ sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
+error_free_chip:
+ kfree(chip);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7314_remove(struct spi_device *spi_dev)
+{
+ struct ad7314_data *chip = dev_get_drvdata(&spi_dev->dev);
+
+ hwmon_device_unregister(chip->hwmon_dev);
+ sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7314_id[] = {
+ { "adt7301", adt7301 },
+ { "adt7302", adt7302 },
+ { "ad7314", ad7314 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad7314_id);
+
+static struct spi_driver ad7314_driver = {
+ .driver = {
+ .name = "ad7314",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7314_probe,
+ .remove = __devexit_p(ad7314_remove),
+ .id_table = ad7314_id,
+};
+
+static __init int ad7314_init(void)
+{
+ return spi_register_driver(&ad7314_driver);
+}
+module_init(ad7314_init);
+
+static __exit void ad7314_exit(void)
+{
+ spi_unregister_driver(&ad7314_driver);
+}
+module_exit(ad7314_exit);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7314, ADT7301 and ADT7302 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
new file mode 100644
index 00000000000..faa0884f61f
--- /dev/null
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -0,0 +1,524 @@
+/*
+ * exynos4_tmu.c - Samsung EXYNOS4 TMU (Thermal Management Unit)
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <linux/platform_data/exynos4_tmu.h>
+
+#define EXYNOS4_TMU_REG_TRIMINFO 0x0
+#define EXYNOS4_TMU_REG_CONTROL 0x20
+#define EXYNOS4_TMU_REG_STATUS 0x28
+#define EXYNOS4_TMU_REG_CURRENT_TEMP 0x40
+#define EXYNOS4_TMU_REG_THRESHOLD_TEMP 0x44
+#define EXYNOS4_TMU_REG_TRIG_LEVEL0 0x50
+#define EXYNOS4_TMU_REG_TRIG_LEVEL1 0x54
+#define EXYNOS4_TMU_REG_TRIG_LEVEL2 0x58
+#define EXYNOS4_TMU_REG_TRIG_LEVEL3 0x5C
+#define EXYNOS4_TMU_REG_PAST_TEMP0 0x60
+#define EXYNOS4_TMU_REG_PAST_TEMP1 0x64
+#define EXYNOS4_TMU_REG_PAST_TEMP2 0x68
+#define EXYNOS4_TMU_REG_PAST_TEMP3 0x6C
+#define EXYNOS4_TMU_REG_INTEN 0x70
+#define EXYNOS4_TMU_REG_INTSTAT 0x74
+#define EXYNOS4_TMU_REG_INTCLEAR 0x78
+
+#define EXYNOS4_TMU_GAIN_SHIFT 8
+#define EXYNOS4_TMU_REF_VOLTAGE_SHIFT 24
+
+#define EXYNOS4_TMU_TRIM_TEMP_MASK 0xff
+#define EXYNOS4_TMU_CORE_ON 3
+#define EXYNOS4_TMU_CORE_OFF 2
+#define EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET 50
+#define EXYNOS4_TMU_TRIG_LEVEL0_MASK 0x1
+#define EXYNOS4_TMU_TRIG_LEVEL1_MASK 0x10
+#define EXYNOS4_TMU_TRIG_LEVEL2_MASK 0x100
+#define EXYNOS4_TMU_TRIG_LEVEL3_MASK 0x1000
+#define EXYNOS4_TMU_INTCLEAR_VAL 0x1111
+
+struct exynos4_tmu_data {
+ struct exynos4_tmu_platform_data *pdata;
+ struct device *hwmon_dev;
+ struct resource *mem;
+ void __iomem *base;
+ int irq;
+ struct work_struct irq_work;
+ struct mutex lock;
+ struct clk *clk;
+ u8 temp_error1, temp_error2;
+};
+
+/*
+ * TMU treats temperature as a mapped temperature code.
+ * The temperature is converted differently depending on the calibration type.
+ */
+static int temp_to_code(struct exynos4_tmu_data *data, u8 temp)
+{
+ struct exynos4_tmu_platform_data *pdata = data->pdata;
+ int temp_code;
+
+ /* temp should range between 25 and 125 */
+ if (temp < 25 || temp > 125) {
+ temp_code = -EINVAL;
+ goto out;
+ }
+
+ switch (pdata->cal_type) {
+ case TYPE_TWO_POINT_TRIMMING:
+ temp_code = (temp - 25) *
+ (data->temp_error2 - data->temp_error1) /
+ (85 - 25) + data->temp_error1;
+ break;
+ case TYPE_ONE_POINT_TRIMMING:
+ temp_code = temp + data->temp_error1 - 25;
+ break;
+ default:
+ temp_code = temp + EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET;
+ break;
+ }
+out:
+ return temp_code;
+}
+
+/*
+ * Calculate a temperature value from a temperature code.
+ * The unit of the temperature is degree Celsius.
+ */
+static int code_to_temp(struct exynos4_tmu_data *data, u8 temp_code)
+{
+ struct exynos4_tmu_platform_data *pdata = data->pdata;
+ int temp;
+
+ /* temp_code should range between 75 and 175 */
+ if (temp_code < 75 || temp_code > 175) {
+ temp = -ENODATA;
+ goto out;
+ }
+
+ switch (pdata->cal_type) {
+ case TYPE_TWO_POINT_TRIMMING:
+ temp = (temp_code - data->temp_error1) * (85 - 25) /
+ (data->temp_error2 - data->temp_error1) + 25;
+ break;
+ case TYPE_ONE_POINT_TRIMMING:
+ temp = temp_code - data->temp_error1 + 25;
+ break;
+ default:
+ temp = temp_code - EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET;
+ break;
+ }
+out:
+ return temp;
+}
+
+static int exynos4_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos4_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos4_tmu_platform_data *pdata = data->pdata;
+ unsigned int status, trim_info;
+ int ret = 0, threshold_code;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ status = readb(data->base + EXYNOS4_TMU_REG_STATUS);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Save trimming info in order to perform calibration */
+ trim_info = readl(data->base + EXYNOS4_TMU_REG_TRIMINFO);
+ data->temp_error1 = trim_info & EXYNOS4_TMU_TRIM_TEMP_MASK;
+ data->temp_error2 = ((trim_info >> 8) & EXYNOS4_TMU_TRIM_TEMP_MASK);
+
+ /* Write temperature code for threshold */
+ threshold_code = temp_to_code(data, pdata->threshold);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ writeb(threshold_code,
+ data->base + EXYNOS4_TMU_REG_THRESHOLD_TEMP);
+
+ writeb(pdata->trigger_levels[0],
+ data->base + EXYNOS4_TMU_REG_TRIG_LEVEL0);
+ writeb(pdata->trigger_levels[1],
+ data->base + EXYNOS4_TMU_REG_TRIG_LEVEL1);
+ writeb(pdata->trigger_levels[2],
+ data->base + EXYNOS4_TMU_REG_TRIG_LEVEL2);
+ writeb(pdata->trigger_levels[3],
+ data->base + EXYNOS4_TMU_REG_TRIG_LEVEL3);
+
+ writel(EXYNOS4_TMU_INTCLEAR_VAL,
+ data->base + EXYNOS4_TMU_REG_INTCLEAR);
+out:
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static void exynos4_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos4_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos4_tmu_platform_data *pdata = data->pdata;
+ unsigned int con, interrupt_en;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ con = pdata->reference_voltage << EXYNOS4_TMU_REF_VOLTAGE_SHIFT |
+ pdata->gain << EXYNOS4_TMU_GAIN_SHIFT;
+ if (on) {
+ con |= EXYNOS4_TMU_CORE_ON;
+ interrupt_en = pdata->trigger_level3_en << 12 |
+ pdata->trigger_level2_en << 8 |
+ pdata->trigger_level1_en << 4 |
+ pdata->trigger_level0_en;
+ } else {
+ con |= EXYNOS4_TMU_CORE_OFF;
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+ writel(interrupt_en, data->base + EXYNOS4_TMU_REG_INTEN);
+ writel(con, data->base + EXYNOS4_TMU_REG_CONTROL);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+}
+
+static int exynos4_tmu_read(struct exynos4_tmu_data *data)
+{
+ u8 temp_code;
+ int temp;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ temp_code = readb(data->base + EXYNOS4_TMU_REG_CURRENT_TEMP);
+ temp = code_to_temp(data, temp_code);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ return temp;
+}
+
+static void exynos4_tmu_work(struct work_struct *work)
+{
+ struct exynos4_tmu_data *data = container_of(work,
+ struct exynos4_tmu_data, irq_work);
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ writel(EXYNOS4_TMU_INTCLEAR_VAL, data->base + EXYNOS4_TMU_REG_INTCLEAR);
+
+ kobject_uevent(&data->hwmon_dev->kobj, KOBJ_CHANGE);
+
+ enable_irq(data->irq);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+}
+
+static irqreturn_t exynos4_tmu_irq(int irq, void *id)
+{
+ struct exynos4_tmu_data *data = id;
+
+ disable_irq_nosync(irq);
+ schedule_work(&data->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t exynos4_tmu_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "exynos4-tmu\n");
+}
+
+static ssize_t exynos4_tmu_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct exynos4_tmu_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos4_tmu_read(data);
+ if (ret < 0)
+ return ret;
+
+ /* convert from degree Celsius to millidegree Celsius */
+ return sprintf(buf, "%d\n", ret * 1000);
+}
+
+static ssize_t exynos4_tmu_show_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct exynos4_tmu_data *data = dev_get_drvdata(dev);
+ struct exynos4_tmu_platform_data *pdata = data->pdata;
+ int temp;
+ unsigned int trigger_level;
+
+ temp = exynos4_tmu_read(data);
+ if (temp < 0)
+ return temp;
+
+ trigger_level = pdata->threshold + pdata->trigger_levels[attr->index];
+
+ return sprintf(buf, "%d\n", !!(temp > trigger_level));
+}
+
+static ssize_t exynos4_tmu_show_level(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct exynos4_tmu_data *data = dev_get_drvdata(dev);
+ struct exynos4_tmu_platform_data *pdata = data->pdata;
+ unsigned int temp = pdata->threshold +
+ pdata->trigger_levels[attr->index];
+
+ return sprintf(buf, "%u\n", temp * 1000);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, exynos4_tmu_show_name, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, exynos4_tmu_show_temp, NULL, 0);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ exynos4_tmu_show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
+ exynos4_tmu_show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO,
+ exynos4_tmu_show_alarm, NULL, 3);
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, exynos4_tmu_show_level, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, exynos4_tmu_show_level, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO,
+ exynos4_tmu_show_level, NULL, 3);
+
+static struct attribute *exynos4_tmu_attributes[] = {
+ &dev_attr_name.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group exynos4_tmu_attr_group = {
+ .attrs = exynos4_tmu_attributes,
+};
+
+static int __devinit exynos4_tmu_probe(struct platform_device *pdev)
+{
+ struct exynos4_tmu_data *data;
+ struct exynos4_tmu_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform init data supplied.\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct exynos4_tmu_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+ return -ENOMEM;
+ }
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0) {
+ ret = data->irq;
+ dev_err(&pdev->dev, "Failed to get platform irq\n");
+ goto err_free;
+ }
+
+ INIT_WORK(&data->irq_work, exynos4_tmu_work);
+
+ data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!data->mem) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ goto err_free;
+ }
+
+ data->mem = request_mem_region(data->mem->start,
+ resource_size(data->mem), pdev->name);
+ if (!data->mem) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to request memory region\n");
+ goto err_free;
+ }
+
+ data->base = ioremap(data->mem->start, resource_size(data->mem));
+ if (!data->base) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to ioremap memory\n");
+ goto err_mem_region;
+ }
+
+ ret = request_irq(data->irq, exynos4_tmu_irq,
+ IRQF_TRIGGER_RISING,
+ "exynos4-tmu", data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
+ goto err_io_remap;
+ }
+
+ data->clk = clk_get(NULL, "tmu_apbif");
+ if (IS_ERR(data->clk)) {
+ ret = PTR_ERR(data->clk);
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ goto err_irq;
+ }
+
+ data->pdata = pdata;
+ platform_set_drvdata(pdev, data);
+ mutex_init(&data->lock);
+
+ ret = exynos4_tmu_initialize(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ goto err_clk;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &exynos4_tmu_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create sysfs group\n");
+ goto err_clk;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ goto err_create_group;
+ }
+
+ exynos4_tmu_control(pdev, true);
+
+ return 0;
+
+err_create_group:
+ sysfs_remove_group(&pdev->dev.kobj, &exynos4_tmu_attr_group);
+err_clk:
+ platform_set_drvdata(pdev, NULL);
+ clk_put(data->clk);
+err_irq:
+ free_irq(data->irq, data);
+err_io_remap:
+ iounmap(data->base);
+err_mem_region:
+ release_mem_region(data->mem->start, resource_size(data->mem));
+err_free:
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit exynos4_tmu_remove(struct platform_device *pdev)
+{
+ struct exynos4_tmu_data *data = platform_get_drvdata(pdev);
+
+ exynos4_tmu_control(pdev, false);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &exynos4_tmu_attr_group);
+
+ clk_put(data->clk);
+
+ free_irq(data->irq, data);
+
+ iounmap(data->base);
+ release_mem_region(data->mem->start, resource_size(data->mem));
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos4_tmu_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ exynos4_tmu_control(pdev, false);
+
+ return 0;
+}
+
+static int exynos4_tmu_resume(struct platform_device *pdev)
+{
+ exynos4_tmu_initialize(pdev);
+ exynos4_tmu_control(pdev, true);
+
+ return 0;
+}
+#else
+#define exynos4_tmu_suspend NULL
+#define exynos4_tmu_resume NULL
+#endif
+
+static struct platform_driver exynos4_tmu_driver = {
+ .driver = {
+ .name = "exynos4-tmu",
+ .owner = THIS_MODULE,
+ },
+ .probe = exynos4_tmu_probe,
+ .remove = __devexit_p(exynos4_tmu_remove),
+ .suspend = exynos4_tmu_suspend,
+ .resume = exynos4_tmu_resume,
+};
+
+static int __init exynos4_tmu_driver_init(void)
+{
+ return platform_driver_register(&exynos4_tmu_driver);
+}
+module_init(exynos4_tmu_driver_init);
+
+static void __exit exynos4_tmu_driver_exit(void)
+{
+ platform_driver_unregister(&exynos4_tmu_driver);
+}
+module_exit(exynos4_tmu_driver_exit);
+
+MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:exynos4-tmu");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 2d96ed2bf8e..59dd881c71d 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -605,7 +605,7 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
/* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the
standard models */
-static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
+static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[3][7] = { {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 0),
@@ -627,7 +627,7 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
0, 0),
SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 0),
-
+}, {
SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 1),
@@ -649,7 +649,7 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
0, 1),
SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 1),
-
+}, {
SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 2),
@@ -671,12 +671,12 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
0, 2),
SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
-};
+} };
/* PWM attr for the f71808e/f71869, almost identical to the f71862fg, but the
pwm setting when the temperature is above the pwmX_auto_point1_temp can be
programmed instead of being hardcoded to 0xff */
-static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
+static struct sensor_device_attribute_2 f71869_auto_pwm_attr[3][8] = { {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 0),
@@ -701,7 +701,7 @@ static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
0, 0),
SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 0),
-
+}, {
SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 1),
@@ -726,7 +726,7 @@ static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
0, 1),
SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 1),
-
+}, {
SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 2),
@@ -751,7 +751,7 @@ static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
0, 2),
SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
-};
+} };
/* PWM attr for the standard models */
static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { {
@@ -928,7 +928,7 @@ static struct sensor_device_attribute_2 f8000_fan_attr[] = {
/* PWM attr for the f8000, zones mapped to temp instead of to pwm!
Also the register block at offset A0 maps to TEMP1 (so our temp2, as the
F8000 starts counting temps at 0), B0 maps the TEMP2 and C0 maps to TEMP0 */
-static struct sensor_device_attribute_2 f8000_auto_pwm_attr[] = {
+static struct sensor_device_attribute_2 f8000_auto_pwm_attr[3][14] = { {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 0),
@@ -969,7 +969,7 @@ static struct sensor_device_attribute_2 f8000_auto_pwm_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 2, 2),
SENSOR_ATTR_2(temp1_auto_point4_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
-
+}, {
SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 1),
@@ -1010,7 +1010,7 @@ static struct sensor_device_attribute_2 f8000_auto_pwm_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 2, 0),
SENSOR_ATTR_2(temp2_auto_point4_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 0),
-
+}, {
SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 2),
@@ -1051,7 +1051,7 @@ static struct sensor_device_attribute_2 f8000_auto_pwm_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 2, 1),
SENSOR_ATTR_2(temp3_auto_point4_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 1),
-};
+} };
/* Super I/O functions */
static inline int superio_inb(int base, int reg)
@@ -2154,6 +2154,104 @@ static void f71882fg_remove_sysfs_files(struct platform_device *pdev,
device_remove_file(&pdev->dev, &attr[i].dev_attr);
}
+static int __devinit f71882fg_create_fan_sysfs_files(
+ struct platform_device *pdev, int idx)
+{
+ struct f71882fg_data *data = platform_get_drvdata(pdev);
+ int err;
+
+ /* Sanity check the pwm setting */
+ err = 0;
+ switch (data->type) {
+ case f71858fg:
+ if (((data->pwm_enable >> (idx * 2)) & 3) == 3)
+ err = 1;
+ break;
+ case f71862fg:
+ if (((data->pwm_enable >> (idx * 2)) & 1) != 1)
+ err = 1;
+ break;
+ case f8000:
+ if (idx == 2)
+ err = data->pwm_enable & 0x20;
+ break;
+ default:
+ break;
+ }
+ if (err) {
+ dev_err(&pdev->dev,
+ "Invalid (reserved) pwm settings: 0x%02x, "
+ "skipping fan %d\n",
+ (data->pwm_enable >> (idx * 2)) & 3, idx + 1);
+ return 0; /* This is a non fatal condition */
+ }
+
+ err = f71882fg_create_sysfs_files(pdev, &fxxxx_fan_attr[idx][0],
+ ARRAY_SIZE(fxxxx_fan_attr[0]));
+ if (err)
+ return err;
+
+ if (f71882fg_fan_has_beep[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_fan_beep_attr[idx],
+ 1);
+ if (err)
+ return err;
+ }
+
+ dev_info(&pdev->dev, "Fan: %d is in %s mode\n", idx + 1,
+ (data->pwm_enable & (1 << (2 * idx))) ? "duty-cycle" : "RPM");
+
+ /* Check for unsupported auto pwm settings */
+ switch (data->type) {
+ case f71808e:
+ case f71808a:
+ case f71869:
+ case f71869a:
+ case f71889fg:
+ case f71889ed:
+ case f71889a:
+ data->pwm_auto_point_mapping[idx] =
+ f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(idx));
+ if ((data->pwm_auto_point_mapping[idx] & 0x80) ||
+ (data->pwm_auto_point_mapping[idx] & 3) == 0) {
+ dev_warn(&pdev->dev,
+ "Auto pwm controlled by raw digital "
+ "data, disabling pwm auto_point "
+ "sysfs attributes for fan %d\n", idx + 1);
+ return 0; /* This is a non fatal condition */
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (data->type) {
+ case f71862fg:
+ err = f71882fg_create_sysfs_files(pdev,
+ &f71862fg_auto_pwm_attr[idx][0],
+ ARRAY_SIZE(f71862fg_auto_pwm_attr[0]));
+ break;
+ case f71808e:
+ case f71869:
+ err = f71882fg_create_sysfs_files(pdev,
+ &f71869_auto_pwm_attr[idx][0],
+ ARRAY_SIZE(f71869_auto_pwm_attr[0]));
+ break;
+ case f8000:
+ err = f71882fg_create_sysfs_files(pdev,
+ &f8000_auto_pwm_attr[idx][0],
+ ARRAY_SIZE(f8000_auto_pwm_attr[0]));
+ break;
+ default:
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_auto_pwm_attr[idx][0],
+ ARRAY_SIZE(fxxxx_auto_pwm_attr[0]));
+ }
+
+ return err;
+}
+
static int __devinit f71882fg_probe(struct platform_device *pdev)
{
struct f71882fg_data *data;
@@ -2272,117 +2370,29 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
data->pwm_enable =
f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- /* Sanity check the pwm settings */
- switch (data->type) {
- case f71858fg:
- err = 0;
- for (i = 0; i < nr_fans; i++)
- if (((data->pwm_enable >> (i * 2)) & 3) == 3)
- err = 1;
- break;
- case f71862fg:
- err = (data->pwm_enable & 0x15) != 0x15;
- break;
- case f8000:
- err = data->pwm_enable & 0x20;
- break;
- default:
- err = 0;
- break;
- }
- if (err) {
- dev_err(&pdev->dev,
- "Invalid (reserved) pwm settings: 0x%02x\n",
- (unsigned int)data->pwm_enable);
- err = -ENODEV;
- goto exit_unregister_sysfs;
- }
-
- err = f71882fg_create_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
- ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
- if (err)
- goto exit_unregister_sysfs;
-
- if (f71882fg_fan_has_beep[data->type]) {
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_fan_beep_attr, nr_fans);
+ for (i = 0; i < nr_fans; i++) {
+ err = f71882fg_create_fan_sysfs_files(pdev, i);
if (err)
goto exit_unregister_sysfs;
}
- switch (data->type) {
- case f71808e:
- case f71808a:
- case f71869:
- case f71869a:
- case f71889fg:
- case f71889ed:
- case f71889a:
- for (i = 0; i < nr_fans; i++) {
- data->pwm_auto_point_mapping[i] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_MAPPING(i));
- if ((data->pwm_auto_point_mapping[i] & 0x80) ||
- (data->pwm_auto_point_mapping[i] & 3) == 0)
- break;
- }
- if (i != nr_fans) {
- dev_warn(&pdev->dev,
- "Auto pwm controlled by raw digital "
- "data, disabling pwm auto_point "
- "sysfs attributes\n");
- goto no_pwm_auto_point;
- }
- break;
- default:
- break;
- }
-
+ /* Some types have 1 extra fan with limited functionality */
switch (data->type) {
case f71808a:
err = f71882fg_create_sysfs_files(pdev,
- &fxxxx_auto_pwm_attr[0][0],
- ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
- if (err)
- goto exit_unregister_sysfs;
- err = f71882fg_create_sysfs_files(pdev,
f71808a_fan3_attr,
ARRAY_SIZE(f71808a_fan3_attr));
break;
- case f71862fg:
- err = f71882fg_create_sysfs_files(pdev,
- f71862fg_auto_pwm_attr,
- ARRAY_SIZE(f71862fg_auto_pwm_attr));
- break;
- case f71808e:
- case f71869:
- err = f71882fg_create_sysfs_files(pdev,
- f71869_auto_pwm_attr,
- ARRAY_SIZE(f71869_auto_pwm_attr));
- break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
f8000_fan_attr,
ARRAY_SIZE(f8000_fan_attr));
- if (err)
- goto exit_unregister_sysfs;
- err = f71882fg_create_sysfs_files(pdev,
- f8000_auto_pwm_attr,
- ARRAY_SIZE(f8000_auto_pwm_attr));
break;
default:
- err = f71882fg_create_sysfs_files(pdev,
- &fxxxx_auto_pwm_attr[0][0],
- ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
+ break;
}
if (err)
goto exit_unregister_sysfs;
-
-no_pwm_auto_point:
- for (i = 0; i < nr_fans; i++)
- dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1,
- (data->pwm_enable & (1 << 2 * i)) ?
- "duty-cycle" : "RPM");
}
data->hwmon_dev = hwmon_device_register(&pdev->dev);
@@ -2476,22 +2486,23 @@ static int f71882fg_remove(struct platform_device *pdev)
break;
case f71862fg:
f71882fg_remove_sysfs_files(pdev,
- f71862fg_auto_pwm_attr,
- ARRAY_SIZE(f71862fg_auto_pwm_attr));
+ &f71862fg_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f71862fg_auto_pwm_attr[0]) *
+ nr_fans);
break;
case f71808e:
case f71869:
f71882fg_remove_sysfs_files(pdev,
- f71869_auto_pwm_attr,
- ARRAY_SIZE(f71869_auto_pwm_attr));
+ &f71869_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f71869_auto_pwm_attr[0]) * nr_fans);
break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
f8000_fan_attr,
ARRAY_SIZE(f8000_fan_attr));
f71882fg_remove_sysfs_files(pdev,
- f8000_auto_pwm_attr,
- ARRAY_SIZE(f8000_auto_pwm_attr));
+ &f8000_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f8000_auto_pwm_attr[0]) * nr_fans);
break;
default:
f71882fg_remove_sysfs_files(pdev,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index ef902d5d06a..90126a2a1e4 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -35,6 +35,7 @@
*/
enum lm75_type { /* keep sorted in alphabetical order */
+ adt75,
ds1775,
ds75,
lm75,
@@ -213,6 +214,7 @@ static int lm75_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm75_ids[] = {
+ { "adt75", adt75, },
{ "ds1775", ds1775, },
{ "ds75", ds75, },
{ "lm75", lm75, },
@@ -247,19 +249,30 @@ static int lm75_detect(struct i2c_client *new_client,
I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- /* Now, we do the remaining detection. There is no identification-
- dedicated register so we have to rely on several tricks:
- unused bits, registers cycling over 8-address boundaries,
- addresses 0x04-0x07 returning the last read value.
- The cycling+unused addresses combination is not tested,
- since it would significantly slow the detection down and would
- hardly add any value.
-
- The National Semiconductor LM75A is different than earlier
- LM75s. It has an ID byte of 0xaX (where X is the chip
- revision, with 1 being the only revision in existence) in
- register 7, and unused registers return 0xff rather than the
- last read value. */
+ /*
+ * Now, we do the remaining detection. There is no identification-
+ * dedicated register so we have to rely on several tricks:
+ * unused bits, registers cycling over 8-address boundaries,
+ * addresses 0x04-0x07 returning the last read value.
+ * The cycling+unused addresses combination is not tested,
+ * since it would significantly slow the detection down and would
+ * hardly add any value.
+ *
+ * The National Semiconductor LM75A is different than earlier
+ * LM75s. It has an ID byte of 0xaX (where X is the chip
+ * revision, with 1 being the only revision in existence) in
+ * register 7, and unused registers return 0xff rather than the
+ * last read value.
+ *
+ * Note that this function only detects the original National
+ * Semiconductor LM75 and the LM75A. Clones from other vendors
+ * aren't detected, on purpose, because they are typically never
+ * found on PC hardware. They are found on embedded designs where
+ * they can be instantiated explicitly so detection is not needed.
+ * The absence of identification registers on all these clones
+ * would make their exhaustive detection very difficult and weak,
+ * and odds are that the driver would bind to unsupported devices.
+ */
/* Unused bits */
conf = i2c_smbus_read_byte_data(new_client, 1);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index c9237b9dcff..4b26f51920b 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -20,17 +20,18 @@ config SENSORS_PMBUS
help
If you say yes here you get hardware monitoring support for generic
PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
- BMR453, BMR454, LTC2978, NCP4200, and NCP4208.
+ BMR453, BMR454, NCP4200, and NCP4208.
This driver can also be built as a module. If so, the module will
be called pmbus.
config SENSORS_ADM1275
- tristate "Analog Devices ADM1275"
+ tristate "Analog Devices ADM1275 and compatibles"
default n
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
+ Devices ADM1275 and ADM1276 Hot-Swap Controller and Digital Power
+ Monitor.
This driver can also be built as a module. If so, the module will
be called adm1275.
@@ -45,6 +46,16 @@ config SENSORS_LM25066
This driver can also be built as a module. If so, the module will
be called lm25066.
+config SENSORS_LTC2978
+ tristate "Linear Technologies LTC2978 and LTC3880"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Linear
+ Technology LTC2978 and LTC3880.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2978.
+
config SENSORS_MAX16064
tristate "Maxim MAX16064"
default n
@@ -97,4 +108,15 @@ config SENSORS_UCD9200
This driver can also be built as a module. If so, the module will
be called ucd9200.
+config SENSORS_ZL6100
+ tristate "Intersil ZL6100 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Intersil
+ ZL2004, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105 Digital
+ DC/DC Controllers.
+
+ This driver can also be built as a module. If so, the module will
+ be called zl6100.
+
endif # PMBUS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 623eedb1ed9..789376c85db 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -6,8 +6,10 @@ obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index c936e278230..980a4d9d502 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -23,6 +23,8 @@
#include <linux/i2c.h>
#include "pmbus.h"
+enum chips { adm1275, adm1276 };
+
#define ADM1275_PEAK_IOUT 0xd0
#define ADM1275_PEAK_VIN 0xd1
#define ADM1275_PEAK_VOUT 0xd2
@@ -31,14 +33,47 @@
#define ADM1275_VIN_VOUT_SELECT (1 << 6)
#define ADM1275_VRANGE (1 << 5)
+#define ADM1275_IOUT_WARN2_LIMIT 0xd7
+#define ADM1275_DEVICE_CONFIG 0xd8
+
+#define ADM1275_IOUT_WARN2_SELECT (1 << 4)
+
+#define ADM1276_PEAK_PIN 0xda
+
+#define ADM1275_MFR_STATUS_IOUT_WARN2 (1 << 0)
+
+struct adm1275_data {
+ int id;
+ bool have_oc_fault;
+ struct pmbus_driver_info info;
+};
+
+#define to_adm1275_data(x) container_of(x, struct adm1275_data, info)
+
static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
{
- int ret;
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct adm1275_data *data = to_adm1275_data(info);
+ int ret = 0;
if (page)
- return -EINVAL;
+ return -ENXIO;
switch (reg) {
+ case PMBUS_IOUT_UC_FAULT_LIMIT:
+ if (data->have_oc_fault) {
+ ret = -ENXIO;
+ break;
+ }
+ ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ if (!data->have_oc_fault) {
+ ret = -ENXIO;
+ break;
+ }
+ ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
+ break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
break;
@@ -48,10 +83,20 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
break;
+ case PMBUS_VIRT_READ_PIN_MAX:
+ if (data->id != adm1276) {
+ ret = -ENXIO;
+ break;
+ }
+ ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
+ break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_VIN_HISTORY:
- ret = 0;
+ break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ if (data->id != adm1276)
+ ret = -ENXIO;
break;
default:
ret = -ENODATA;
@@ -66,9 +111,14 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
int ret;
if (page)
- return -EINVAL;
+ return -ENXIO;
switch (reg) {
+ case PMBUS_IOUT_UC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = pmbus_write_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT,
+ word);
+ break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
break;
@@ -78,6 +128,41 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_RESET_VIN_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VIN, 0);
break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1276_PEAK_PIN, 0);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct adm1275_data *data = to_adm1275_data(info);
+ int mfr_status, ret;
+
+ if (page > 0)
+ return -ENXIO;
+
+ switch (reg) {
+ case PMBUS_STATUS_IOUT:
+ ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_IOUT);
+ if (ret < 0)
+ break;
+ mfr_status = pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfr_status < 0) {
+ ret = mfr_status;
+ break;
+ }
+ if (mfr_status & ADM1275_MFR_STATUS_IOUT_WARN2) {
+ ret |= data->have_oc_fault ?
+ PB_IOUT_OC_FAULT : PB_IOUT_UC_FAULT;
+ }
+ break;
default:
ret = -ENODATA;
break;
@@ -88,16 +173,17 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int config;
+ int config, device_config;
int ret;
struct pmbus_driver_info *info;
+ struct adm1275_data *data;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -ENODEV;
- info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
- if (!info)
+ data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
@@ -106,6 +192,15 @@ static int adm1275_probe(struct i2c_client *client,
goto err_mem;
}
+ device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
+ if (device_config < 0) {
+ ret = device_config;
+ goto err_mem;
+ }
+
+ data->id = id->driver_data;
+ info = &data->info;
+
info->pages = 1;
info->format[PSC_VOLTAGE_IN] = direct;
info->format[PSC_VOLTAGE_OUT] = direct;
@@ -116,6 +211,7 @@ static int adm1275_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
info->read_word_data = adm1275_read_word_data;
+ info->read_byte_data = adm1275_read_byte_data;
info->write_word_data = adm1275_write_word_data;
if (config & ADM1275_VRANGE) {
@@ -134,10 +230,36 @@ static int adm1275_probe(struct i2c_client *client,
info->R[PSC_VOLTAGE_OUT] = -1;
}
- if (config & ADM1275_VIN_VOUT_SELECT)
- info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
- else
- info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
+ if (device_config & ADM1275_IOUT_WARN2_SELECT)
+ data->have_oc_fault = true;
+
+ switch (id->driver_data) {
+ case adm1275:
+ if (config & ADM1275_VIN_VOUT_SELECT)
+ info->func[0] |=
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+ else
+ info->func[0] |=
+ PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
+ break;
+ case adm1276:
+ info->format[PSC_POWER] = direct;
+ info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_STATUS_INPUT;
+ if (config & ADM1275_VIN_VOUT_SELECT)
+ info->func[0] |=
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+ if (config & ADM1275_VRANGE) {
+ info->m[PSC_POWER] = 6043;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -2;
+ } else {
+ info->m[PSC_POWER] = 2115;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -1;
+ }
+ break;
+ }
ret = pmbus_do_probe(client, id, info);
if (ret)
@@ -145,22 +267,23 @@ static int adm1275_probe(struct i2c_client *client,
return 0;
err_mem:
- kfree(info);
+ kfree(data);
return ret;
}
static int adm1275_remove(struct i2c_client *client)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- int ret;
+ const struct adm1275_data *data = to_adm1275_data(info);
- ret = pmbus_do_remove(client);
- kfree(info);
- return ret;
+ pmbus_do_remove(client);
+ kfree(data);
+ return 0;
}
static const struct i2c_device_id adm1275_id[] = {
- {"adm1275", 0},
+ { "adm1275", adm1275 },
+ { "adm1276", adm1276 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1275_id);
@@ -185,7 +308,7 @@ static void __exit adm1275_exit(void)
}
MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for Analog Devices ADM1275");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices ADM1275 and compatibles");
MODULE_LICENSE("GPL");
module_init(adm1275_init);
module_exit(adm1275_exit);
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index ac254fba551..84a37f0c8db 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -57,7 +57,7 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
int ret;
if (page > 1)
- return -EINVAL;
+ return -ENXIO;
/* Map READ_VAUX into READ_VOUT register on page 1 */
if (page == 1) {
@@ -85,7 +85,7 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
break;
default:
/* No other valid registers on page 1 */
- ret = -EINVAL;
+ ret = -ENXIO;
break;
}
goto done;
@@ -138,7 +138,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
int ret;
if (page > 1)
- return -EINVAL;
+ return -ENXIO;
switch (reg) {
case PMBUS_IIN_OC_WARN_LIMIT:
@@ -164,10 +164,10 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
static int lm25066_write_byte(struct i2c_client *client, int page, u8 value)
{
if (page > 1)
- return -EINVAL;
+ return -ENXIO;
- if (page == 0)
- return pmbus_write_byte(client, 0, value);
+ if (page <= 0)
+ return pmbus_write_byte(client, page, value);
return 0;
}
@@ -309,11 +309,10 @@ static int lm25066_remove(struct i2c_client *client)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
const struct lm25066_data *data = to_lm25066_data(info);
- int ret;
- ret = pmbus_do_remove(client);
+ pmbus_do_remove(client);
kfree(data);
- return ret;
+ return 0;
}
static const struct i2c_device_id lm25066_id[] = {
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
new file mode 100644
index 00000000000..820fff48910
--- /dev/null
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -0,0 +1,408 @@
+/*
+ * Hardware monitoring driver for LTC2978 and LTC3880
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { ltc2978, ltc3880 };
+
+/* LTC2978 and LTC3880 */
+#define LTC2978_MFR_VOUT_PEAK 0xdd
+#define LTC2978_MFR_VIN_PEAK 0xde
+#define LTC2978_MFR_TEMPERATURE_PEAK 0xdf
+#define LTC2978_MFR_SPECIAL_ID 0xe7
+
+/* LTC2978 only */
+#define LTC2978_MFR_VOUT_MIN 0xfb
+#define LTC2978_MFR_VIN_MIN 0xfc
+#define LTC2978_MFR_TEMPERATURE_MIN 0xfd
+
+/* LTC3880 only */
+#define LTC3880_MFR_IOUT_PEAK 0xd7
+#define LTC3880_MFR_CLEAR_PEAKS 0xe3
+#define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4
+
+#define LTC2978_ID_REV1 0x0121
+#define LTC2978_ID_REV2 0x0122
+#define LTC3880_ID 0x4000
+#define LTC3880_ID_MASK 0xff00
+
+/*
+ * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
+ * happens pretty much each time chip data is updated. Raw peak data therefore
+ * does not provide much value. To be able to provide useful peak data, keep an
+ * internal cache of measured peak data, which is only cleared if an explicit
+ * "clear peak" command is executed for the sensor in question.
+ */
+struct ltc2978_data {
+ enum chips id;
+ int vin_min, vin_max;
+ int temp_min, temp_max;
+ int vout_min[8], vout_max[8];
+ int iout_max[2];
+ int temp2_max[2];
+ struct pmbus_driver_info info;
+};
+
+#define to_ltc2978_data(x) container_of(x, struct ltc2978_data, info)
+
+static inline int lin11_to_val(int data)
+{
+ s16 e = ((s16)data) >> 11;
+ s32 m = (((s16)(data << 5)) >> 5);
+
+ /*
+ * mantissa is 10 bit + sign, exponent adds up to 15 bit.
+ * Add 6 bit to exponent for maximum accuracy (10 + 15 + 6 = 31).
+ */
+ e += 6;
+ return (e < 0 ? m >> -e : m << e);
+}
+
+static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ltc2978_data *data = to_ltc2978_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_PEAK);
+ if (ret >= 0) {
+ if (lin11_to_val(ret) > lin11_to_val(data->vin_max))
+ data->vin_max = ret;
+ ret = data->vin_max;
+ }
+ break;
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
+ if (ret >= 0) {
+ /*
+ * VOUT is 16 bit unsigned with fixed exponent,
+ * so we can compare it directly
+ */
+ if (ret > data->vout_max[page])
+ data->vout_max[page] = ret;
+ ret = data->vout_max[page];
+ }
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page,
+ LTC2978_MFR_TEMPERATURE_PEAK);
+ if (ret >= 0) {
+ if (lin11_to_val(ret) > lin11_to_val(data->temp_max))
+ data->temp_max = ret;
+ ret = data->temp_max;
+ }
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ltc2978_data *data = to_ltc2978_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VIN_MIN:
+ ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_MIN);
+ if (ret >= 0) {
+ if (lin11_to_val(ret) < lin11_to_val(data->vin_min))
+ data->vin_min = ret;
+ ret = data->vin_min;
+ }
+ break;
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
+ if (ret >= 0) {
+ /*
+ * VOUT_MIN is known to not be supported on some lots
+ * of LTC2978 revision 1, and will return the maximum
+ * possible voltage if read. If VOUT_MAX is valid and
+ * lower than the reading of VOUT_MIN, use it instead.
+ */
+ if (data->vout_max[page] && ret > data->vout_max[page])
+ ret = data->vout_max[page];
+ if (ret < data->vout_min[page])
+ data->vout_min[page] = ret;
+ ret = data->vout_min[page];
+ }
+ break;
+ case PMBUS_VIRT_READ_TEMP_MIN:
+ ret = pmbus_read_word_data(client, page,
+ LTC2978_MFR_TEMPERATURE_MIN);
+ if (ret >= 0) {
+ if (lin11_to_val(ret)
+ < lin11_to_val(data->temp_min))
+ data->temp_min = ret;
+ ret = data->temp_min;
+ }
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_READ_TEMP2_MAX:
+ case PMBUS_VIRT_RESET_TEMP2_HISTORY:
+ ret = -ENXIO;
+ break;
+ default:
+ ret = ltc2978_read_word_data_common(client, page, reg);
+ break;
+ }
+ return ret;
+}
+
+static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ltc2978_data *data = to_ltc2978_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, page, LTC3880_MFR_IOUT_PEAK);
+ if (ret >= 0) {
+ if (lin11_to_val(ret)
+ > lin11_to_val(data->iout_max[page]))
+ data->iout_max[page] = ret;
+ ret = data->iout_max[page];
+ }
+ break;
+ case PMBUS_VIRT_READ_TEMP2_MAX:
+ ret = pmbus_read_word_data(client, page,
+ LTC3880_MFR_TEMPERATURE2_PEAK);
+ if (ret >= 0) {
+ if (lin11_to_val(ret)
+ > lin11_to_val(data->temp2_max[page]))
+ data->temp2_max[page] = ret;
+ ret = data->temp2_max[page];
+ }
+ break;
+ case PMBUS_VIRT_READ_VIN_MIN:
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ case PMBUS_VIRT_READ_TEMP_MIN:
+ ret = -ENXIO;
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP2_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = ltc2978_read_word_data_common(client, page, reg);
+ break;
+ }
+ return ret;
+}
+
+static int ltc2978_clear_peaks(struct i2c_client *client, int page,
+ enum chips id)
+{
+ int ret;
+
+ if (id == ltc2978)
+ ret = pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+ else
+ ret = pmbus_write_byte(client, 0, LTC3880_MFR_CLEAR_PEAKS);
+
+ return ret;
+}
+
+static int ltc2978_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ltc2978_data *data = to_ltc2978_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ data->iout_max[page] = 0x7fff;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_TEMP2_HISTORY:
+ data->temp2_max[page] = 0x7fff;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ data->vout_min[page] = 0xffff;
+ data->vout_max[page] = 0;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ data->vin_min = 0x7bff;
+ data->vin_max = 0;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ data->temp_min = 0x7bff;
+ data->temp_max = 0x7fff;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static const struct i2c_device_id ltc2978_id[] = {
+ {"ltc2978", ltc2978},
+ {"ltc3880", ltc3880},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2978_id);
+
+static int ltc2978_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int chip_id, ret, i;
+ struct ltc2978_data *data;
+ struct pmbus_driver_info *info;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct ltc2978_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ chip_id = i2c_smbus_read_word_data(client, LTC2978_MFR_SPECIAL_ID);
+ if (chip_id < 0) {
+ ret = chip_id;
+ goto err_mem;
+ }
+
+ if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2) {
+ data->id = ltc2978;
+ } else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
+ data->id = ltc3880;
+ } else {
+ dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
+ ret = -ENODEV;
+ goto err_mem;
+ }
+ if (data->id != id->driver_data)
+ dev_warn(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ id->name,
+ ltc2978_id[data->id].name);
+
+ info = &data->info;
+ info->write_word_data = ltc2978_write_word_data;
+
+ data->vout_min[0] = 0xffff;
+ data->vin_min = 0x7bff;
+ data->temp_min = 0x7bff;
+ data->temp_max = 0x7fff;
+
+ switch (id->driver_data) {
+ case ltc2978:
+ info->read_word_data = ltc2978_read_word_data;
+ info->pages = 8;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ for (i = 1; i < 8; i++) {
+ info->func[i] = PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT;
+ data->vout_min[i] = 0xffff;
+ }
+ break;
+ case ltc3880:
+ info->read_word_data = ltc3880_read_word_data;
+ info->pages = 2;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+ info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ data->vout_min[1] = 0xffff;
+ break;
+ default:
+ ret = -ENODEV;
+ goto err_mem;
+ }
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(data);
+ return ret;
+}
+
+static int ltc2978_remove(struct i2c_client *client)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct ltc2978_data *data = to_ltc2978_data(info);
+
+ pmbus_do_remove(client);
+ kfree(data);
+ return 0;
+}
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ltc2978_driver = {
+ .driver = {
+ .name = "ltc2978",
+ },
+ .probe = ltc2978_probe,
+ .remove = ltc2978_remove,
+ .id_table = ltc2978_id,
+};
+
+static int __init ltc2978_init(void)
+{
+ return i2c_add_driver(&ltc2978_driver);
+}
+
+static void __exit ltc2978_exit(void)
+{
+ i2c_del_driver(&ltc2978_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for LTC2978 and LTC3880");
+MODULE_LICENSE("GPL");
+module_init(ltc2978_init);
+module_exit(ltc2978_exit);
diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c
index e50b296e8db..1d77cf4d2d4 100644
--- a/drivers/hwmon/pmbus/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -105,7 +105,8 @@ static int max16064_probe(struct i2c_client *client,
static int max16064_remove(struct i2c_client *client)
{
- return pmbus_do_remove(client);
+ pmbus_do_remove(client);
+ return 0;
}
static const struct i2c_device_id max16064_id[] = {
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index fda621d2e45..beaf5a8d9c4 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -93,12 +93,14 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
{
- int ret;
+ int ret = 0;
int mfg_status;
- ret = pmbus_set_page(client, page);
- if (ret < 0)
- return ret;
+ if (page >= 0) {
+ ret = pmbus_set_page(client, page);
+ if (ret < 0)
+ return ret;
+ }
switch (reg) {
case PMBUS_STATUS_IOUT:
@@ -224,7 +226,8 @@ static int max34440_probe(struct i2c_client *client,
static int max34440_remove(struct i2c_client *client)
{
- return pmbus_do_remove(client);
+ pmbus_do_remove(client);
+ return 0;
}
static const struct i2c_device_id max34440_id[] = {
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index c3e72f1a3cf..e2b74bb399b 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
int ret;
if (page)
- return -EINVAL;
+ return -ENXIO;
switch (reg) {
case PMBUS_VIRT_READ_VOUT_MAX:
@@ -101,8 +101,8 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
int ret = 0;
int mfg_status;
- if (page)
- return -EINVAL;
+ if (page > 0)
+ return -ENXIO;
switch (reg) {
case PMBUS_STATUS_VOUT:
@@ -182,7 +182,8 @@ static int max8688_probe(struct i2c_client *client,
static int max8688_remove(struct i2c_client *client)
{
- return pmbus_do_remove(client);
+ pmbus_do_remove(client);
+ return 0;
}
static const struct i2c_device_id max8688_id[] = {
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 73de9f1f319..995e873197e 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -187,13 +187,12 @@ out:
static int pmbus_remove(struct i2c_client *client)
{
- int ret;
const struct pmbus_driver_info *info;
info = pmbus_get_driver_info(client);
- ret = pmbus_do_remove(client);
+ pmbus_do_remove(client);
kfree(info);
- return ret;
+ return 0;
}
/*
@@ -205,10 +204,13 @@ static const struct i2c_device_id pmbus_id[] = {
{"bmr451", 1},
{"bmr453", 1},
{"bmr454", 1},
- {"ltc2978", 8},
{"ncp4200", 1},
{"ncp4208", 1},
+ {"pdt003", 1},
+ {"pdt006", 1},
+ {"pdt012", 1},
{"pmbus", 0},
+ {"udt020", 1},
{}
};
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index a6ae20ffef6..5d31d1c2c0f 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -134,8 +134,16 @@
* Semantics:
* Virtual registers are all word size.
* READ registers are read-only; writes are either ignored or return an error.
- * RESET registers are read/write. Reading returns zero (used for detection),
- * writing any value causes the associated history to be reset.
+ * RESET registers are read/write. Reading reset registers returns zero
+ * (used for detection), writing any value causes the associated history to be
+ * reset.
+ * Virtual registers have to be handled in device specific driver code. Chip
+ * driver code returns non-negative register values if a virtual register is
+ * supported, or a negative error code if not. The chip driver may return
+ * -ENODATA or any other error code in this case, though an error code other
+ * than -ENODATA is handled more efficiently and thus preferred. Either case,
+ * the calling PMBus core code will abort if the chip driver returns an error
+ * code when reading or writing virtual registers.
*/
#define PMBUS_VIRT_BASE 0x100
#define PMBUS_VIRT_READ_TEMP_MIN (PMBUS_VIRT_BASE + 0)
@@ -160,6 +168,9 @@
#define PMBUS_VIRT_READ_IOUT_MIN (PMBUS_VIRT_BASE + 19)
#define PMBUS_VIRT_READ_IOUT_MAX (PMBUS_VIRT_BASE + 20)
#define PMBUS_VIRT_RESET_IOUT_HISTORY (PMBUS_VIRT_BASE + 21)
+#define PMBUS_VIRT_READ_TEMP2_MIN (PMBUS_VIRT_BASE + 22)
+#define PMBUS_VIRT_READ_TEMP2_MAX (PMBUS_VIRT_BASE + 23)
+#define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 24)
/*
* CAPABILITY
@@ -320,6 +331,12 @@ struct pmbus_driver_info {
* The following functions map manufacturing specific register values
* to PMBus standard register values. Specify only if mapping is
* necessary.
+ * Functions return the register value (read) or zero (write) if
+ * successful. A return value of -ENODATA indicates that there is no
+ * manufacturer specific register, but that a standard PMBus register
+ * may exist. Any other negative return value indicates that the
+ * register does not exist, and that no attempt should be made to read
+ * the standard register.
*/
int (*read_byte_data)(struct i2c_client *client, int page, int reg);
int (*read_word_data)(struct i2c_client *client, int page, int reg);
@@ -347,7 +364,7 @@ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
struct pmbus_driver_info *info);
-int pmbus_do_remove(struct i2c_client *client);
+void pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 397fc59b568..00460d8d842 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -160,7 +160,7 @@ int pmbus_set_page(struct i2c_client *client, u8 page)
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (newpage != page)
- rv = -EINVAL;
+ rv = -EIO;
else
data->currpage = page;
}
@@ -229,7 +229,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
return status;
}
if (reg >= PMBUS_VIRT_BASE)
- return -EINVAL;
+ return -ENXIO;
return pmbus_write_word_data(client, page, reg, word);
}
@@ -261,7 +261,7 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
return status;
}
if (reg >= PMBUS_VIRT_BASE)
- return -EINVAL;
+ return -ENXIO;
return pmbus_read_word_data(client, page, reg);
}
@@ -316,11 +316,11 @@ static int pmbus_check_status_cml(struct i2c_client *client)
{
int status, status2;
- status = pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
+ status = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
if (status < 0 || (status & PB_STATUS_CML)) {
- status2 = pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
+ status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
- return -EINVAL;
+ return -EIO;
}
return 0;
}
@@ -371,8 +371,8 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
for (i = 0; i < info->pages; i++)
data->status[PB_STATUS_BASE + i]
- = pmbus_read_byte_data(client, i,
- PMBUS_STATUS_BYTE);
+ = _pmbus_read_byte_data(client, i,
+ PMBUS_STATUS_BYTE);
for (i = 0; i < info->pages; i++) {
if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
continue;
@@ -445,13 +445,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
exponent = data->exponent;
mantissa = (u16) sensor->data;
} else { /* LINEAR11 */
- exponent = (sensor->data >> 11) & 0x001f;
- mantissa = sensor->data & 0x07ff;
-
- if (exponent > 0x0f)
- exponent |= 0xffe0; /* sign extend exponent */
- if (mantissa > 0x03ff)
- mantissa |= 0xfffff800; /* sign extend mantissa */
+ exponent = ((s16)sensor->data) >> 11;
+ mantissa = ((s16)((sensor->data & 0x7ff) << 5)) >> 5;
}
val = mantissa;
@@ -1401,7 +1396,42 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
}
};
-static const struct pmbus_limit_attr temp_limit_attrs23[] = {
+static const struct pmbus_limit_attr temp_limit_attrs2[] = {
+ {
+ .reg = PMBUS_UT_WARN_LIMIT,
+ .low = true,
+ .attr = "min",
+ .alarm = "min_alarm",
+ .sbit = PB_TEMP_UT_WARNING,
+ }, {
+ .reg = PMBUS_UT_FAULT_LIMIT,
+ .low = true,
+ .attr = "lcrit",
+ .alarm = "lcrit_alarm",
+ .sbit = PB_TEMP_UT_FAULT,
+ }, {
+ .reg = PMBUS_OT_WARN_LIMIT,
+ .attr = "max",
+ .alarm = "max_alarm",
+ .sbit = PB_TEMP_OT_WARNING,
+ }, {
+ .reg = PMBUS_OT_FAULT_LIMIT,
+ .attr = "crit",
+ .alarm = "crit_alarm",
+ .sbit = PB_TEMP_OT_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_TEMP2_MIN,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_TEMP2_MAX,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_TEMP2_HISTORY,
+ .attr = "reset_history",
+ }
+};
+
+static const struct pmbus_limit_attr temp_limit_attrs3[] = {
{
.reg = PMBUS_UT_WARN_LIMIT,
.low = true,
@@ -1450,8 +1480,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.sfunc = PMBUS_HAVE_STATUS_TEMP,
.sbase = PB_STATUS_TEMP_BASE,
.gbit = PB_STATUS_TEMPERATURE,
- .limit = temp_limit_attrs23,
- .nlimit = ARRAY_SIZE(temp_limit_attrs23),
+ .limit = temp_limit_attrs2,
+ .nlimit = ARRAY_SIZE(temp_limit_attrs2),
}, {
.reg = PMBUS_READ_TEMPERATURE_3,
.class = PSC_TEMPERATURE,
@@ -1462,8 +1492,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.sfunc = PMBUS_HAVE_STATUS_TEMP,
.sbase = PB_STATUS_TEMP_BASE,
.gbit = PB_STATUS_TEMPERATURE,
- .limit = temp_limit_attrs23,
- .nlimit = ARRAY_SIZE(temp_limit_attrs23),
+ .limit = temp_limit_attrs3,
+ .nlimit = ARRAY_SIZE(temp_limit_attrs3),
}
};
@@ -1593,10 +1623,10 @@ static void pmbus_find_attributes(struct i2c_client *client,
static int pmbus_identify_common(struct i2c_client *client,
struct pmbus_data *data)
{
- int vout_mode = -1, exponent;
+ int vout_mode = -1;
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
- vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
/*
* Not all chips support the VOUT_MODE command,
@@ -1607,11 +1637,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
- exponent = vout_mode & 0x1f;
- /* and sign-extend it */
- if (exponent & 0x10)
- exponent |= ~0x1f;
- data->exponent = exponent;
+ data->exponent = ((s8)(vout_mode << 3)) >> 3;
break;
case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1682,7 +1708,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
info->pages);
- ret = -EINVAL;
+ ret = -ENODEV;
goto out_data;
}
@@ -1764,7 +1790,7 @@ out_data:
}
EXPORT_SYMBOL_GPL(pmbus_do_probe);
-int pmbus_do_remove(struct i2c_client *client)
+void pmbus_do_remove(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
@@ -1774,7 +1800,6 @@ int pmbus_do_remove(struct i2c_client *client)
kfree(data->booleans);
kfree(data->sensors);
kfree(data);
- return 0;
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index d0ddb60155c..4ff6cf289f8 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -74,8 +74,8 @@ static int ucd9000_read_byte_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_FAN_CONFIG_12:
- if (page)
- return -EINVAL;
+ if (page > 0)
+ return -ENXIO;
ret = ucd9000_get_fan_config(client, 0);
if (ret < 0)
@@ -88,8 +88,8 @@ static int ucd9000_read_byte_data(struct i2c_client *client, int page, int reg)
ret = fan_config;
break;
case PMBUS_FAN_CONFIG_34:
- if (page)
- return -EINVAL;
+ if (page > 0)
+ return -ENXIO;
ret = ucd9000_get_fan_config(client, 2);
if (ret < 0)
@@ -239,13 +239,12 @@ out:
static int ucd9000_remove(struct i2c_client *client)
{
- int ret;
struct ucd9000_data *data;
data = to_ucd9000_data(pmbus_get_driver_info(client));
- ret = pmbus_do_remove(client);
+ pmbus_do_remove(client);
kfree(data);
- return ret;
+ return 0;
}
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index c65e9da707c..6e1c1a80ab8 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -171,13 +171,12 @@ out:
static int ucd9200_remove(struct i2c_client *client)
{
- int ret;
const struct pmbus_driver_info *info;
info = pmbus_get_driver_info(client);
- ret = pmbus_do_remove(client);
+ pmbus_do_remove(client);
kfree(info);
- return ret;
+ return 0;
}
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
new file mode 100644
index 00000000000..2bc980006f8
--- /dev/null
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -0,0 +1,256 @@
+/*
+ * Hardware monitoring driver for ZL6100 and compatibles
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include "pmbus.h"
+
+enum chips { zl2004, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
+
+struct zl6100_data {
+ int id;
+ ktime_t access; /* chip access time */
+ struct pmbus_driver_info info;
+};
+
+#define to_zl6100_data(x) container_of(x, struct zl6100_data, info)
+
+#define ZL6100_DEVICE_ID 0xe4
+
+#define ZL6100_WAIT_TIME 1000 /* uS */
+
+static ushort delay = ZL6100_WAIT_TIME;
+module_param(delay, ushort, 0644);
+MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
+
+/* Some chips need a delay between accesses */
+static inline void zl6100_wait(const struct zl6100_data *data)
+{
+ if (delay) {
+ s64 delta = ktime_us_delta(ktime_get(), data->access);
+ if (delta < delay)
+ udelay(delay - delta);
+ }
+}
+
+static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct zl6100_data *data = to_zl6100_data(info);
+ int ret;
+
+ if (page || reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+
+ zl6100_wait(data);
+ ret = pmbus_read_word_data(client, page, reg);
+ data->access = ktime_get();
+
+ return ret;
+}
+
+static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct zl6100_data *data = to_zl6100_data(info);
+ int ret;
+
+ if (page > 0)
+ return -ENXIO;
+
+ zl6100_wait(data);
+ ret = pmbus_read_byte_data(client, page, reg);
+ data->access = ktime_get();
+
+ return ret;
+}
+
+static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct zl6100_data *data = to_zl6100_data(info);
+ int ret;
+
+ if (page || reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+
+ zl6100_wait(data);
+ ret = pmbus_write_word_data(client, page, reg, word);
+ data->access = ktime_get();
+
+ return ret;
+}
+
+static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct zl6100_data *data = to_zl6100_data(info);
+ int ret;
+
+ if (page > 0)
+ return -ENXIO;
+
+ zl6100_wait(data);
+ ret = pmbus_write_byte(client, page, value);
+ data->access = ktime_get();
+
+ return ret;
+}
+
+static const struct i2c_device_id zl6100_id[] = {
+ {"zl2004", zl2004},
+ {"zl2006", zl2006},
+ {"zl2008", zl2008},
+ {"zl2105", zl2105},
+ {"zl2106", zl2106},
+ {"zl6100", zl6100},
+ {"zl6105", zl6105},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, zl6100_id);
+
+static int zl6100_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct zl6100_data *data;
+ struct pmbus_driver_info *info;
+ u8 device_id[I2C_SMBUS_BLOCK_MAX + 1];
+ const struct i2c_device_id *mid;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, ZL6100_DEVICE_ID,
+ device_id);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read device ID\n");
+ return ret;
+ }
+ device_id[ret] = '\0';
+ dev_info(&client->dev, "Device ID %s\n", device_id);
+
+ mid = NULL;
+ for (mid = zl6100_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, device_id, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
+ }
+ if (id->driver_data != mid->driver_data)
+ dev_notice(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ id->name, mid->name);
+
+ data = kzalloc(sizeof(struct zl6100_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = mid->driver_data;
+
+ /*
+ * ZL2008, ZL2105, and ZL6100 are known to require a wait time
+ * between I2C accesses. ZL2004 and ZL6105 are known to be safe.
+ *
+ * Only clear the wait time for chips known to be safe. The wait time
+ * can be cleared later for additional chips if tests show that it
+ * is not needed (in other words, better be safe than sorry).
+ */
+ if (data->id == zl2004 || data->id == zl6105)
+ delay = 0;
+
+ /*
+ * Since there was a direct I2C device access above, wait before
+ * accessing the chip again.
+ * Set the timestamp, wait, then set it again. This should provide
+ * enough buffer time to be safe.
+ */
+ data->access = ktime_get();
+ zl6100_wait(data);
+ data->access = ktime_get();
+
+ info = &data->info;
+
+ info->pages = 1;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+
+ info->read_word_data = zl6100_read_word_data;
+ info->read_byte_data = zl6100_read_byte_data;
+ info->write_word_data = zl6100_write_word_data;
+ info->write_byte = zl6100_write_byte;
+
+ ret = pmbus_do_probe(client, mid, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(data);
+ return ret;
+}
+
+static int zl6100_remove(struct i2c_client *client)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct zl6100_data *data = to_zl6100_data(info);
+
+ pmbus_do_remove(client);
+ kfree(data);
+ return 0;
+}
+
+static struct i2c_driver zl6100_driver = {
+ .driver = {
+ .name = "zl6100",
+ },
+ .probe = zl6100_probe,
+ .remove = zl6100_remove,
+ .id_table = zl6100_id,
+};
+
+static int __init zl6100_init(void)
+{
+ return i2c_add_driver(&zl6100_driver);
+}
+
+static void __exit zl6100_exit(void)
+{
+ i2c_del_driver(&zl6100_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for ZL6100 and compatibles");
+MODULE_LICENSE("GPL");
+module_init(zl6100_init);
+module_exit(zl6100_exit);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index f2b377c56a3..98aab4bea34 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -197,6 +197,9 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
+#define W83627EHF_REG_CASEOPEN_DET 0x42 /* SMI STATUS #2 */
+#define W83627EHF_REG_CASEOPEN_CLR 0x46 /* SMI MASK #3 */
+
/* SmartFan registers */
#define W83627EHF_REG_FAN_STEPUP_TIME 0x0f
#define W83627EHF_REG_FAN_STEPDOWN_TIME 0x0e
@@ -316,7 +319,7 @@ static const char *const nct6776_temp_label[] = {
#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
-static inline int is_word_sized(u16 reg)
+static int is_word_sized(u16 reg)
{
return ((((reg & 0xff00) == 0x100
|| (reg & 0xff00) == 0x200)
@@ -385,22 +388,6 @@ div_from_reg(u8 reg)
return 1 << reg;
}
-static inline int
-temp_from_reg(u16 reg, s16 regval)
-{
- if (is_word_sized(reg))
- return LM75_TEMP_FROM_REG(regval);
- return regval * 1000;
-}
-
-static inline u16
-temp_to_reg(u16 reg, long temp)
-{
- if (is_word_sized(reg))
- return LM75_TEMP_TO_REG(temp);
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
-}
-
/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
static u8 scale_in[10] = { 8, 8, 16, 16, 8, 8, 8, 16, 16, 8 };
@@ -468,6 +455,7 @@ struct w83627ehf_data {
s16 temp_max[9];
s16 temp_max_hyst[9];
u32 alarms;
+ u8 caseopen;
u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
u8 pwm_enable[4]; /* 1->manual
@@ -556,6 +544,26 @@ static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg,
return 0;
}
+/* We left-align 8-bit temperature values to make the code simpler */
+static u16 w83627ehf_read_temp(struct w83627ehf_data *data, u16 reg)
+{
+ u16 res;
+
+ res = w83627ehf_read_value(data, reg);
+ if (!is_word_sized(reg))
+ res <<= 8;
+
+ return res;
+}
+
+static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg,
+ u16 value)
+{
+ if (!is_word_sized(reg))
+ value >>= 8;
+ return w83627ehf_write_value(data, reg, value);
+}
+
/* This function assumes that the caller holds data->update_lock */
static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
{
@@ -770,6 +778,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
+ if ((i == 6) && data->in6_skip)
+ continue;
+
data->in[i] = w83627ehf_read_value(data,
W83627EHF_REG_IN(i));
data->in_min[i] = w83627ehf_read_value(data,
@@ -854,15 +865,15 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
for (i = 0; i < NUM_REG_TEMP; i++) {
if (!(data->have_temp & (1 << i)))
continue;
- data->temp[i] = w83627ehf_read_value(data,
+ data->temp[i] = w83627ehf_read_temp(data,
data->reg_temp[i]);
if (data->reg_temp_over[i])
data->temp_max[i]
- = w83627ehf_read_value(data,
+ = w83627ehf_read_temp(data,
data->reg_temp_over[i]);
if (data->reg_temp_hyst[i])
data->temp_max_hyst[i]
- = w83627ehf_read_value(data,
+ = w83627ehf_read_temp(data,
data->reg_temp_hyst[i]);
}
@@ -873,6 +884,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
(w83627ehf_read_value(data,
W83627EHF_REG_ALARM3) << 16);
+ data->caseopen = w83627ehf_read_value(data,
+ W83627EHF_REG_CASEOPEN_DET);
+
data->last_updated = jiffies;
data->valid = 1;
}
@@ -1155,8 +1169,7 @@ show_##reg(struct device *dev, struct device_attribute *attr, \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", \
- temp_from_reg(data->addr[nr], data->reg[nr])); \
+ return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
}
show_temp_reg(reg_temp, temp);
show_temp_reg(reg_temp_over, temp_max);
@@ -1177,9 +1190,8 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = temp_to_reg(data->addr[nr], val); \
- w83627ehf_write_value(data, data->addr[nr], \
- data->reg[nr]); \
+ data->reg[nr] = LM75_TEMP_TO_REG(val); \
+ w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
mutex_unlock(&data->update_lock); \
return count; \
}
@@ -1654,6 +1666,48 @@ show_vid(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+
+/* Case open detection */
+
+static ssize_t
+show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
+}
+
+static ssize_t
+clear_caseopen(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ u16 reg, mask;
+
+ if (strict_strtoul(buf, 10, &val) || val != 0)
+ return -EINVAL;
+
+ mask = to_sensor_dev_attr_2(attr)->nr;
+
+ mutex_lock(&data->update_lock);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
+ w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
+ w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg & ~mask);
+ data->valid = 0; /* Force cache refresh */
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static struct sensor_device_attribute_2 sda_caseopen[] = {
+ SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
+ clear_caseopen, 0x80, 0x10),
+ SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
+ clear_caseopen, 0x40, 0x40),
+};
+
/*
* Driver and device management
*/
@@ -1710,12 +1764,16 @@ static void w83627ehf_device_remove_files(struct device *dev)
device_remove_file(dev, &sda_temp_type[i].dev_attr);
}
+ device_remove_file(dev, &sda_caseopen[0].dev_attr);
+ device_remove_file(dev, &sda_caseopen[1].dev_attr);
+
device_remove_file(dev, &dev_attr_name);
device_remove_file(dev, &dev_attr_cpu0_vid);
}
/* Get the monitoring functions started */
-static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
+static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
+ enum kinds kind)
{
int i;
u8 tmp, diode;
@@ -1746,10 +1804,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
/* Get thermal sensor types */
- diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
+ switch (kind) {
+ case w83627ehf:
+ diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
+ break;
+ default:
+ diode = 0x70;
+ }
for (i = 0; i < 3; i++) {
if ((tmp & (0x02 << i)))
- data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
+ data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
else
data->temp_type[i] = 4; /* thermistor */
}
@@ -1781,13 +1845,78 @@ static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
data->reg_temp_config[r2] = tmp;
}
+static void __devinit
+w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
+ struct w83627ehf_data *data)
+{
+ int fan3pin, fan4pin, fan4min, fan5pin, regval;
+
+ superio_enter(sio_data->sioreg);
+
+ /* fan4 and fan5 share some pins with the GPIO and serial flash */
+ if (sio_data->kind == nct6775) {
+ /* On NCT6775, fan4 shares pins with the fdc interface */
+ fan3pin = 1;
+ fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
+ fan4min = 0;
+ fan5pin = 0;
+ } else if (sio_data->kind == nct6776) {
+ fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+ fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+ fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+ fan4min = fan4pin;
+ } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ fan3pin = 1;
+ fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
+ fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ fan4min = fan4pin;
+ } else {
+ fan3pin = 1;
+ fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
+ fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan4min = fan4pin;
+ }
+
+ superio_exit(sio_data->sioreg);
+
+ data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
+ data->has_fan |= (fan3pin << 2);
+ data->has_fan_min |= (fan3pin << 2);
+
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ /*
+ * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
+ * register
+ */
+ data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
+ data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
+ } else {
+ /*
+ * It looks like fan4 and fan5 pins can be alternatively used
+ * as fan on/off switches, but fan5 control is write only :/
+ * We assume that if the serial interface is disabled, designers
+ * connected fan5 as input unless they are emitting log 1, which
+ * is not the default.
+ */
+ regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((regval & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(regval & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+ }
+}
+
static int __devinit w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct w83627ehf_data *data;
struct resource *res;
- u8 fan3pin, fan4pin, fan4min, fan5pin, en_vrm10;
+ u8 en_vrm10;
int i, err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -2016,7 +2145,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* Initialize the chip */
- w83627ehf_init_device(data);
+ w83627ehf_init_device(data, sio_data->kind);
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);
@@ -2072,30 +2201,6 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
}
- /* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == nct6775) {
- /* On NCT6775, fan4 shares pins with the fdc interface */
- fan3pin = 1;
- fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
- fan4min = 0;
- fan5pin = 0;
- } else if (sio_data->kind == nct6776) {
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
- fan4min = fan4pin;
- } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- fan3pin = 1;
- fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
- fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
- fan4min = fan4pin;
- } else {
- fan3pin = 1;
- fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
- fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
- fan4min = fan4pin;
- }
-
if (fan_debounce &&
(sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
u8 tmp;
@@ -2113,34 +2218,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
superio_exit(sio_data->sioreg);
- /* It looks like fan4 and fan5 pins can be alternatively used
- as fan on/off switches, but fan5 control is write only :/
- We assume that if the serial interface is disabled, designers
- connected fan5 as input unless they are emitting log 1, which
- is not the default. */
-
- data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
-
- data->has_fan |= (fan3pin << 2);
- data->has_fan_min |= (fan3pin << 2);
-
- /*
- * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 register
- */
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
- data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
- } else {
- i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((i & (1 << 2)) && fan4pin) {
- data->has_fan |= (1 << 3);
- data->has_fan_min |= (1 << 3);
- }
- if (!(i & (1 << 1)) && fan5pin) {
- data->has_fan |= (1 << 4);
- data->has_fan_min |= (1 << 4);
- }
- }
+ w83627ehf_check_fan_inputs(sio_data, data);
/* Read fan clock dividers immediately */
w83627ehf_update_fan_div_common(dev, data);
@@ -2261,6 +2339,16 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
goto exit_remove;
}
+ err = device_create_file(dev, &sda_caseopen[0].dev_attr);
+ if (err)
+ goto exit_remove;
+
+ if (sio_data->kind == nct6776) {
+ err = device_create_file(dev, &sda_caseopen[1].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+
err = device_create_file(dev, &dev_attr_name);
if (err)
goto exit_remove;
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c
index b7a51c43b18..1b42b50b599 100644
--- a/drivers/i2c/busses/i2c-designware.c
+++ b/drivers/i2c/busses/i2c-designware.c
@@ -390,7 +390,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
int tx_limit, rx_limit;
u32 addr = msgs[dev->msg_write_idx].addr;
u32 buf_len = dev->tx_buf_len;
- u8 *buf = dev->tx_buf;;
+ u8 *buf = dev->tx_buf;
intr_mask = DW_IC_INTR_DEFAULT_MASK;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 9827c5e686c..76b6d98bd29 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -327,7 +327,7 @@ config BLK_DEV_OPTI621
select BLK_DEV_IDEPCI
help
This is a driver for the OPTi 82C621 EIDE controller.
- Please read the comments at the top of <file:drivers/ide/pci/opti621.c>.
+ Please read the comments at the top of <file:drivers/ide/opti621.c>.
config BLK_DEV_RZ1000
tristate "RZ1000 chipset bugfix/support"
@@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3
normal dual channel support.
Please read the comments at the top of
- <file:drivers/ide/pci/alim15x3.c>.
+ <file:drivers/ide/alim15x3.c>.
If unsure, say N.
@@ -528,7 +528,7 @@ config BLK_DEV_NS87415
This driver adds detection and support for the NS87415 chip
(used mainly on SPARC64 and PA-RISC machines).
- Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>.
+ Please read the comments at the top of <file:drivers/ide/ns87415.c>.
config BLK_DEV_PDC202XX_OLD
tristate "PROMISE PDC202{46|62|65|67} support"
@@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD
for more than one card.
Please read the comments at the top of
- <file:drivers/ide/pci/pdc202xx_old.c>.
+ <file:drivers/ide/pdc202xx_old.c>.
If unsure, say N.
@@ -593,7 +593,7 @@ config BLK_DEV_SIS5513
ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
SiS745, SiS750
- Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
+ Please read the comments at the top of <file:drivers/ide/sis5513.c>.
config BLK_DEV_SL82C105
tristate "Winbond SL82c105 support"
@@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66
look-a-like to the PIIX4 it should be a nice addition.
Please read the comments at the top of
- <file:drivers/ide/pci/slc90e66.c>.
+ <file:drivers/ide/slc90e66.c>.
config BLK_DEV_TRM290
tristate "Tekram TRM290 chipset support"
@@ -625,7 +625,7 @@ config BLK_DEV_TRM290
This driver adds support for bus master DMA transfers
using the Tekram TRM290 PCI IDE chip. Volunteers are
needed for further tweaking and development.
- Please read the comments at the top of <file:drivers/ide/pci/trm290.c>.
+ Please read the comments at the top of <file:drivers/ide/trm290.c>.
config BLK_DEV_VIA82CXXX
tristate "VIA82CXXX chipset support"
@@ -681,7 +681,7 @@ config BLK_DEV_IDE_AU1XXX
select IDE_XFER_MODE
choice
prompt "IDE Mode for AMD Alchemy Au1200"
- default CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
+ default BLK_DEV_IDE_AU1XXX_PIO_DBDMA
depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX
config BLK_DEV_IDE_AU1XXX_PIO_DBDMA
@@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX
of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
I/O speeds to be set as well.
See the files <file:Documentation/ide/ide.txt> and
- <file:drivers/ide/legacy/ali14xx.c> for more info.
+ <file:drivers/ide/ali14xx.c> for more info.
config BLK_DEV_DTC2278
tristate "DTC-2278 support"
@@ -847,7 +847,7 @@ config BLK_DEV_DTC2278
boot parameter. It enables support for the secondary IDE interface
of the DTC-2278 card, and permits faster I/O speeds to be set as
well. See the <file:Documentation/ide/ide.txt> and
- <file:drivers/ide/legacy/dtc2278.c> files for more info.
+ <file:drivers/ide/dtc2278.c> files for more info.
config BLK_DEV_HT6560B
tristate "Holtek HT6560B support"
@@ -858,7 +858,7 @@ config BLK_DEV_HT6560B
boot parameter. It enables support for the secondary IDE interface
of the Holtek card, and permits faster I/O speeds to be set as well.
See the <file:Documentation/ide/ide.txt> and
- <file:drivers/ide/legacy/ht6560b.c> files for more info.
+ <file:drivers/ide/ht6560b.c> files for more info.
config BLK_DEV_QD65XX
tristate "QDI QD65xx support"
@@ -867,7 +867,7 @@ config BLK_DEV_QD65XX
help
This driver is enabled at runtime using the "qd65xx.probe" kernel
boot parameter. It permits faster I/O speeds to be set. See the
- <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c>
+ <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c>
for more info.
config BLK_DEV_UMC8672
@@ -879,7 +879,7 @@ config BLK_DEV_UMC8672
boot parameter. It enables support for the secondary IDE interface
of the UMC-8672, and permits faster I/O speeds to be set as well.
See the files <file:Documentation/ide/ide.txt> and
- <file:drivers/ide/legacy/umc8672.c> for more info.
+ <file:drivers/ide/umc8672.c> for more info.
endif
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 444470a28de..5ce7b9e8bff 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -800,13 +800,10 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Loop thru additional data fragments and queue them */
if (skb_shinfo(skb)->nr_frags) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- maplen = frag->size;
- mapaddr =
- pci_map_page(c2dev->pcidev, frag->page,
- frag->page_offset, maplen,
- PCI_DMA_TODEVICE);
-
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ maplen = skb_frag_size(frag);
+ mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
+ 0, maplen, DMA_TO_DEVICE);
elem = elem->next;
elem->skb = NULL;
elem->mapaddr = mapaddr;
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 621619c794e..2761364185a 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/cxgb3
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb3
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index cd20b1342ae..46b878ca2c3 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/cxgb4
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index bd995b2b50d..24ab11a9ad1 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
- depends on NETDEVICES && NETDEV_10000 && PCI
+ depends on NETDEVICES && ETHERNET && PCI
+ select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
This driver provides low-level InfiniBand support for
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9d7ffebff21..47b2ee4c01e 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -441,13 +441,13 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
nesnic->tx_skb[nesnic->sq_head] = skb;
for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
skb_fragment_index++) {
- bus_address = pci_map_page( nesdev->pcidev,
- skb_shinfo(skb)->frags[skb_fragment_index].page,
- skb_shinfo(skb)->frags[skb_fragment_index].page_offset,
- skb_shinfo(skb)->frags[skb_fragment_index].size,
- PCI_DMA_TODEVICE);
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[skb_fragment_index];
+ bus_address = skb_frag_dma_map(&nesdev->pcidev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
wqe_fragment_length[wqe_fragment_index] =
- cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
+ cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index]));
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
bus_address);
wqe_fragment_index++;
@@ -561,11 +561,12 @@ tso_sq_no_longer_full:
/* Map all the buffers */
for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
tso_frag_count++) {
- tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev,
- skb_shinfo(skb)->frags[tso_frag_count].page,
- skb_shinfo(skb)->frags[tso_frag_count].page_offset,
- skb_shinfo(skb)->frags[tso_frag_count].size,
- PCI_DMA_TODEVICE);
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[tso_frag_count];
+ tso_bus_address[tso_frag_count] =
+ skb_frag_dma_map(&nesdev->pcidev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
}
tso_frag_index = 0;
@@ -636,11 +637,11 @@ tso_sq_no_longer_full:
}
while (wqe_fragment_index < 5) {
wqe_fragment_length[wqe_fragment_index] =
- cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
+ cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index]));
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
(u64)tso_bus_address[tso_frag_index]);
wqe_fragment_index++;
- tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
+ tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]);
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
if (tso_frag_index == tso_frag_count)
@@ -1638,7 +1639,7 @@ static const struct net_device_ops nes_netdev_ops = {
.ndo_get_stats = nes_netdev_get_stats,
.ndo_tx_timeout = nes_netdev_tx_timeout,
.ndo_set_mac_address = nes_netdev_set_mac_address,
- .ndo_set_multicast_list = nes_netdev_set_multicast_list,
+ .ndo_set_rx_mode = nes_netdev_set_multicast_list,
.ndo_change_mtu = nes_netdev_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = nes_fix_features,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 39913a065f9..c74548a586e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -169,7 +169,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
goto partial_error;
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
- mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
+ mapping[i + 1] = ib_dma_map_page(priv->ca, page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
@@ -537,12 +537,13 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) {
/* don't need this page */
- skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
+ skb_fill_page_desc(toskb, i, skb_frag_page(frag),
+ 0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
- frag->size = size;
+ skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
skb->len += size;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 81ae61d68a2..2b060f45bec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -117,7 +117,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
size = length - IPOIB_UD_HEAD_SIZE;
- frag->size = size;
+ skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
} else
@@ -182,7 +182,7 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
goto partial_error;
skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
mapping[1] =
- ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
+ ib_dma_map_page(priv->ca, page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
goto partial_error;
@@ -322,9 +322,10 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping[i + off] = ib_dma_map_page(ca, frag->page,
- frag->page_offset, frag->size,
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping[i + off] = ib_dma_map_page(ca,
+ skb_frag_page(frag),
+ frag->page_offset, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
goto partial_error;
@@ -333,8 +334,9 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
partial_error:
for (; i > 0; --i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
}
if (off)
@@ -358,8 +360,9 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- ib_dma_unmap_page(ca, mapping[i + off], frag->size,
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
DMA_TO_DEVICE);
}
}
@@ -509,7 +512,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + off].addr = mapping[i + off];
- priv->tx_sge[i + off].length = frags[i].size;
+ priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
}
priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index fe89c4660d5..7567b600023 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -998,7 +998,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
.ndo_fix_features = ipoib_fix_features,
.ndo_start_xmit = ipoib_start_xmit,
.ndo_tx_timeout = ipoib_timeout,
- .ndo_set_multicast_list = ipoib_set_mcast_list,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_neigh_setup = ipoib_neigh_setup_dev,
};
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 2c8b84dd9da..2be21694fac 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -7,7 +7,7 @@
* state machine code inspired by code from Tim Ruetz
*
* A generic driver for rotary encoders connected to GPIO lines.
- * See file:Documentation/input/rotary_encoder.txt for more information
+ * See file:Documentation/input/rotary-encoder.txt for more information
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0dc97ec15c2..9dea71849f4 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
for (i = 0; i < 8; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
- if (wacom_wac->features.type != WACOM_21UX2) {
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
- }
-
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b57b3fa492f..7d7eaa15e77 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -59,10 +59,14 @@ config AMD_IOMMU_STATS
If unsure, say N.
# Intel IOMMU support
-config DMAR
- bool "Support for DMA Remapping Devices"
+config DMAR_TABLE
+ bool
+
+config INTEL_IOMMU
+ bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
select IOMMU_API
+ select DMAR_TABLE
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -70,18 +74,18 @@ config DMAR
and include PCI device scope covered by these DMA
remapping devices.
-config DMAR_DEFAULT_ON
+config INTEL_IOMMU_DEFAULT_ON
def_bool y
- prompt "Enable DMA Remapping Devices by default"
- depends on DMAR
+ prompt "Enable Intel DMA Remapping Devices by default"
+ depends on INTEL_IOMMU
help
Selecting this option will enable a DMAR device at boot time if
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel.
-config DMAR_BROKEN_GFX_WA
+config INTEL_IOMMU_BROKEN_GFX_WA
bool "Workaround broken graphics drivers (going away soon)"
- depends on DMAR && BROKEN && X86
+ depends on INTEL_IOMMU && BROKEN && X86
---help---
Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config
@@ -90,18 +94,19 @@ config DMAR_BROKEN_GFX_WA
to use physical addresses for DMA, at least until this
option is removed in the 2.6.32 kernel.
-config DMAR_FLOPPY_WA
+config INTEL_IOMMU_FLOPPY_WA
def_bool y
- depends on DMAR && X86
+ depends on INTEL_IOMMU && X86
---help---
Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
workaround will setup a 1:1 mapping for the first
16MiB to make floppy (an ISA device) work.
-config INTR_REMAP
+config IRQ_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
+ select DMAR_TABLE
---help---
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4d4d77df7ca..6394994a2b9 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
-obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
-obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
+obj-$(CONFIG_DMAR_TABLE) += dmar.o
+obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
+obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 6dcc7e2d54d..35c1e17fce1 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -46,7 +46,7 @@
*/
LIST_HEAD(dmar_drhd_units);
-static struct acpi_table_header * __initdata dmar_tbl;
+struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
@@ -118,8 +118,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
return 0;
}
-static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct pci_dev ***devices, u16 segment)
+int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
+ struct pci_dev ***devices, u16 segment)
{
struct acpi_dmar_device_scope *scope;
void * tmp = start;
@@ -217,133 +217,6 @@ static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
return ret;
}
-#ifdef CONFIG_DMAR
-LIST_HEAD(dmar_rmrr_units);
-
-static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
-{
- list_add(&rmrr->list, &dmar_rmrr_units);
-}
-
-
-static int __init
-dmar_parse_one_rmrr(struct acpi_dmar_header *header)
-{
- struct acpi_dmar_reserved_memory *rmrr;
- struct dmar_rmrr_unit *rmrru;
-
- rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
- if (!rmrru)
- return -ENOMEM;
-
- rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
- rmrru->base_address = rmrr->base_address;
- rmrru->end_address = rmrr->end_address;
-
- dmar_register_rmrr_unit(rmrru);
- return 0;
-}
-
-static int __init
-rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
-{
- struct acpi_dmar_reserved_memory *rmrr;
- int ret;
-
- rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
- ret = dmar_parse_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
- if (ret || (rmrru->devices_cnt == 0)) {
- list_del(&rmrru->list);
- kfree(rmrru);
- }
- return ret;
-}
-
-static LIST_HEAD(dmar_atsr_units);
-
-static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
-{
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
-
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
- if (!atsru)
- return -ENOMEM;
-
- atsru->hdr = hdr;
- atsru->include_all = atsr->flags & 0x1;
-
- list_add(&atsru->list, &dmar_atsr_units);
-
- return 0;
-}
-
-static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
-{
- int rc;
- struct acpi_dmar_atsr *atsr;
-
- if (atsru->include_all)
- return 0;
-
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- rc = dmar_parse_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt, &atsru->devices,
- atsr->segment);
- if (rc || !atsru->devices_cnt) {
- list_del(&atsru->list);
- kfree(atsru);
- }
-
- return rc;
-}
-
-int dmar_find_matched_atsr_unit(struct pci_dev *dev)
-{
- int i;
- struct pci_bus *bus;
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
-
- dev = pci_physfn(dev);
-
- list_for_each_entry(atsru, &dmar_atsr_units, list) {
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (atsr->segment == pci_domain_nr(dev->bus))
- goto found;
- }
-
- return 0;
-
-found:
- for (bus = dev->bus; bus; bus = bus->parent) {
- struct pci_dev *bridge = bus->self;
-
- if (!bridge || !pci_is_pcie(bridge) ||
- bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
-
- if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
- for (i = 0; i < atsru->devices_cnt; i++)
- if (atsru->devices[i] == bridge)
- return 1;
- break;
- }
- }
-
- if (atsru->include_all)
- return 1;
-
- return 0;
-}
-#endif
-
#ifdef CONFIG_ACPI_NUMA
static int __init
dmar_parse_one_rhsa(struct acpi_dmar_header *header)
@@ -484,14 +357,10 @@ parse_dmar_table(void)
ret = dmar_parse_one_drhd(entry_header);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
-#ifdef CONFIG_DMAR
ret = dmar_parse_one_rmrr(entry_header);
-#endif
break;
case ACPI_DMAR_TYPE_ATSR:
-#ifdef CONFIG_DMAR
ret = dmar_parse_one_atsr(entry_header);
-#endif
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
#ifdef CONFIG_ACPI_NUMA
@@ -557,34 +426,31 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void)
{
+ static int dmar_dev_scope_initialized;
struct dmar_drhd_unit *drhd, *drhd_n;
int ret = -ENODEV;
+ if (dmar_dev_scope_initialized)
+ return dmar_dev_scope_initialized;
+
+ if (list_empty(&dmar_drhd_units))
+ goto fail;
+
list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd);
if (ret)
- return ret;
+ goto fail;
}
-#ifdef CONFIG_DMAR
- {
- struct dmar_rmrr_unit *rmrr, *rmrr_n;
- struct dmar_atsr_unit *atsr, *atsr_n;
-
- list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
- ret = rmrr_parse_dev(rmrr);
- if (ret)
- return ret;
- }
+ ret = dmar_parse_rmrr_atsr_dev();
+ if (ret)
+ goto fail;
- list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
- ret = atsr_parse_dev(atsr);
- if (ret)
- return ret;
- }
- }
-#endif
+ dmar_dev_scope_initialized = 1;
+ return 0;
+fail:
+ dmar_dev_scope_initialized = ret;
return ret;
}
@@ -611,14 +477,6 @@ int __init dmar_table_init(void)
return -ENODEV;
}
-#ifdef CONFIG_DMAR
- if (list_empty(&dmar_rmrr_units))
- printk(KERN_INFO PREFIX "No RMRR found\n");
-
- if (list_empty(&dmar_atsr_units))
- printk(KERN_INFO PREFIX "No ATSR found\n");
-#endif
-
return 0;
}
@@ -682,9 +540,6 @@ int __init check_zero_address(void)
return 1;
failed:
-#ifdef CONFIG_DMAR
- dmar_disabled = 1;
-#endif
return 0;
}
@@ -696,22 +551,21 @@ int __init detect_intel_iommu(void)
if (ret)
ret = check_zero_address();
{
-#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *) dmar_tbl;
- if (ret && cpu_has_x2apic && dmar->flags & 0x1)
+
+ if (ret && intr_remapping_enabled && cpu_has_x2apic &&
+ dmar->flags & 0x1)
printk(KERN_INFO
- "Queued invalidation will be enabled to support "
- "x2apic and Intr-remapping.\n");
-#endif
-#ifdef CONFIG_DMAR
+ "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
}
-#endif
+
#ifdef CONFIG_X86
if (ret)
x86_init.iommu.iommu_init = intel_iommu_init;
@@ -758,7 +612,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
goto err_unmap;
}
-#ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
@@ -773,7 +626,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id);
goto err_unmap;
}
-#endif
iommu->agaw = agaw;
iommu->msagaw = msagaw;
@@ -800,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
(unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap);
- spin_lock_init(&iommu->register_lock);
+ raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu;
return 0;
@@ -817,9 +669,7 @@ void free_iommu(struct intel_iommu *iommu)
if (!iommu)
return;
-#ifdef CONFIG_DMAR
free_dmar_iommu(iommu);
-#endif
if (iommu->reg)
iounmap(iommu->reg);
@@ -921,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
restart:
rc = 0;
- spin_lock_irqsave(&qi->q_lock, flags);
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
- spin_unlock_irqrestore(&qi->q_lock, flags);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
- spin_lock_irqsave(&qi->q_lock, flags);
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
@@ -965,15 +815,15 @@ restart:
if (rc)
break;
- spin_unlock(&qi->q_lock);
+ raw_spin_unlock(&qi->q_lock);
cpu_relax();
- spin_lock(&qi->q_lock);
+ raw_spin_lock(&qi->q_lock);
}
qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
- spin_unlock_irqrestore(&qi->q_lock, flags);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
if (rc == -EAGAIN)
goto restart;
@@ -1062,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
if (!ecap_qis(iommu->ecap))
return;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_QIES))
@@ -1082,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
!(sts & DMA_GSTS_QIES), sts);
end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/*
@@ -1097,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* write zero to the tail reg */
writel(0, iommu->reg + DMAR_IQT_REG);
@@ -1110,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/*
@@ -1159,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH;
- spin_lock_init(&qi->q_lock);
+ raw_spin_lock_init(&qi->q_lock);
__dmar_enable_qi(iommu);
@@ -1225,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data)
unsigned long flag;
/* unmask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(0, iommu->reg + DMAR_FECTL_REG);
/* Read a reg to force flush the post write */
readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_mask(struct irq_data *data)
@@ -1238,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data)
struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
/* mask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
/* Read a reg to force flush the post write */
readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_write(int irq, struct msi_msg *msg)
@@ -1250,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_read(int irq, struct msi_msg *msg)
@@ -1262,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
@@ -1303,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
u32 fault_status;
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status)
printk(KERN_ERR "DRHD: handling fault status reg %x\n",
@@ -1342,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
writel(DMA_FRCD_F, iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN + 12);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
dmar_fault_do_one(iommu, type, fault_reason,
source_id, guest_addr);
@@ -1350,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index++;
if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
clear_rest:
/* clear all the other faults */
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
writel(fault_status, iommu->reg + DMAR_FSTS_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c621c98c99d..be1953c239b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+ return (pte->val & (1 << 7));
+}
+
static inline int first_pte_in_page(struct dma_pte *pte)
{
return !((unsigned long)pte & ~VTD_PAGE_MASK);
@@ -393,17 +398,20 @@ static long list_size;
static void domain_remove_dev_info(struct dmar_domain *domain);
-#ifdef CONFIG_DMAR_DEFAULT_ON
+#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
-#endif /*CONFIG_DMAR_DEFAULT_ON*/
+#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
+int intel_iommu_gfx_mapped;
+EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
+
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
static void domain_update_iommu_superpage(struct dmar_domain *domain)
{
- int i, mask = 0xf;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+ int mask = 0xf;
if (!intel_iommu_superpage) {
domain->iommu_superpage = 0;
return;
}
- domain->iommu_superpage = 4; /* 1TiB */
-
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
- mask |= cap_super_page_val(g_iommus[i]->cap);
+ /* set iommu_superpage to the smallest common denominator */
+ for_each_active_iommu(iommu, drhd) {
+ mask &= cap_super_page_val(iommu->cap);
if (!mask) {
break;
}
@@ -730,29 +739,23 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int large_level)
+ unsigned long pfn, int target_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
- int offset, target_level;
+ int offset;
BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
- /* Search pte */
- if (!large_level)
- target_level = 1;
- else
- target_level = large_level;
-
while (level > 0) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
- if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+ if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
break;
if (level == target_level)
break;
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
}
/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
+static int dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
+ int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
(void *)pte - (void *)first_pte);
} while (start_pfn && start_pfn <= last_pfn);
+
+ order = (large_page - 1) * 9;
+ return order;
}
/* free page table pages. last level pte should already be cleared */
@@ -932,7 +939,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
addr = iommu->root_entry;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -941,7 +948,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_RTPS), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -952,14 +959,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(val & DMA_GSTS_WBFS)), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
@@ -986,14 +993,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
}
val |= DMA_CCMD_ICC;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
@@ -1032,7 +1039,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
if (cap_write_drain(iommu->cap))
val |= DMA_TLB_WRITE_DRAIN;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
/* Note: Only uses first TLB reg currently */
if (val_iva)
dmar_writeq(iommu->reg + tlb_offset, val_iva);
@@ -1042,7 +1049,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
IOMMU_WAIT_OP(iommu, tlb_offset + 8,
dmar_readq, (!(val & DMA_TLB_IVT)), val);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0)
@@ -1158,7 +1165,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
writel(pmen, iommu->reg + DMAR_PMEN_REG);
@@ -1167,7 +1174,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
readl, !(pmen & DMA_PMEN_PRS), pmen);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int iommu_enable_translation(struct intel_iommu *iommu)
@@ -1175,7 +1182,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flags;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -1183,7 +1190,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_TES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1192,7 +1199,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flag;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -1200,7 +1207,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(sts & DMA_GSTS_TES)), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0;
}
@@ -2150,7 +2157,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
rmrr->end_address);
}
-#ifdef CONFIG_DMAR_FLOPPY_WA
+#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
static inline void iommu_prepare_isa(void)
{
struct pci_dev *pdev;
@@ -2173,7 +2180,7 @@ static inline void iommu_prepare_isa(void)
{
return;
}
-#endif /* !CONFIG_DMAR_FLPY_WA */
+#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
static int md_domain_init(struct dmar_domain *domain, int guest_width);
@@ -2484,7 +2491,7 @@ static int __init init_dmars(void)
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
-#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
iommu_identity_mapping |= IDENTMAP_GFX;
#endif
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)
}
}
- if (dmar_map_gfx)
- return;
-
for_each_drhd_unit(drhd) {
int i;
if (drhd->ignored || drhd->include_all)
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void)
for (i = 0; i < drhd->devices_cnt; i++)
if (drhd->devices[i] &&
- !IS_GFX_DEVICE(drhd->devices[i]))
+ !IS_GFX_DEVICE(drhd->devices[i]))
break;
if (i < drhd->devices_cnt)
continue;
- /* bypass IOMMU if it is just for gfx devices */
- drhd->ignored = 1;
- for (i = 0; i < drhd->devices_cnt; i++) {
- if (!drhd->devices[i])
- continue;
- drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ /* This IOMMU has *only* gfx devices. Either bypass it or
+ set the gfx_mapped flag, as appropriate */
+ if (dmar_map_gfx) {
+ intel_iommu_gfx_mapped = 1;
+ } else {
+ drhd->ignored = 1;
+ for (i = 0; i < drhd->devices_cnt; i++) {
+ if (!drhd->devices[i])
+ continue;
+ drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ }
}
}
}
@@ -3320,7 +3329,7 @@ static int iommu_suspend(void)
for_each_active_iommu(iommu, drhd) {
iommu_disable_translation(iommu);
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->iommu_state[SR_DMAR_FECTL_REG] =
readl(iommu->reg + DMAR_FECTL_REG);
@@ -3331,7 +3340,7 @@ static int iommu_suspend(void)
iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
readl(iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
return 0;
@@ -3358,7 +3367,7 @@ static void iommu_resume(void)
for_each_active_iommu(iommu, drhd) {
- spin_lock_irqsave(&iommu->register_lock, flag);
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
iommu->reg + DMAR_FECTL_REG);
@@ -3369,7 +3378,7 @@ static void iommu_resume(void)
writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
for_each_active_iommu(iommu, drhd)
@@ -3390,6 +3399,151 @@ static void __init init_iommu_pm_ops(void)
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
+LIST_HEAD(dmar_rmrr_units);
+
+static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
+{
+ list_add(&rmrr->list, &dmar_rmrr_units);
+}
+
+
+int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+{
+ struct acpi_dmar_reserved_memory *rmrr;
+ struct dmar_rmrr_unit *rmrru;
+
+ rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
+ if (!rmrru)
+ return -ENOMEM;
+
+ rmrru->hdr = header;
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ rmrru->base_address = rmrr->base_address;
+ rmrru->end_address = rmrr->end_address;
+
+ dmar_register_rmrr_unit(rmrru);
+ return 0;
+}
+
+static int __init
+rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
+{
+ struct acpi_dmar_reserved_memory *rmrr;
+ int ret;
+
+ rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
+ ret = dmar_parse_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
+
+ if (ret || (rmrru->devices_cnt == 0)) {
+ list_del(&rmrru->list);
+ kfree(rmrru);
+ }
+ return ret;
+}
+
+static LIST_HEAD(dmar_atsr_units);
+
+int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ if (!atsru)
+ return -ENOMEM;
+
+ atsru->hdr = hdr;
+ atsru->include_all = atsr->flags & 0x1;
+
+ list_add(&atsru->list, &dmar_atsr_units);
+
+ return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+ int rc;
+ struct acpi_dmar_atsr *atsr;
+
+ if (atsru->include_all)
+ return 0;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ rc = dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+ if (rc || !atsru->devices_cnt) {
+ list_del(&atsru->list);
+ kfree(atsru);
+ }
+
+ return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+ int i;
+ struct pci_bus *bus;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ dev = pci_physfn(dev);
+
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment == pci_domain_nr(dev->bus))
+ goto found;
+ }
+
+ return 0;
+
+found:
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge || !pci_is_pcie(bridge) ||
+ bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ for (i = 0; i < atsru->devices_cnt; i++)
+ if (atsru->devices[i] == bridge)
+ return 1;
+ break;
+ }
+ }
+
+ if (atsru->include_all)
+ return 1;
+
+ return 0;
+}
+
+int dmar_parse_rmrr_atsr_dev(void)
+{
+ struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ struct dmar_atsr_unit *atsr, *atsr_n;
+ int ret = 0;
+
+ list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+ ret = rmrr_parse_dev(rmrr);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ ret = atsr_parse_dev(atsr);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
/*
* Here we only respond to action of unbound device from driver.
*
@@ -3439,16 +3593,12 @@ int __init intel_iommu_init(void)
return -ENODEV;
}
- if (dmar_dev_scope_init()) {
+ if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
return -ENODEV;
}
- /*
- * Check the need for DMA-remapping initialization now.
- * Above initialization will also be used by Interrupt-remapping.
- */
if (no_iommu || dmar_disabled)
return -ENODEV;
@@ -3458,6 +3608,12 @@ int __init intel_iommu_init(void)
return -ENODEV;
}
+ if (list_empty(&dmar_rmrr_units))
+ printk(KERN_INFO "DMAR: No RMRR found\n");
+
+ if (list_empty(&dmar_atsr_units))
+ printk(KERN_INFO "DMAR: No ATSR found\n");
+
if (dmar_init_reserved_ranges()) {
if (force_on)
panic("tboot: Failed to reserve iommu ranges\n");
@@ -3568,6 +3724,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
found = 1;
}
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
if (found == 0) {
unsigned long tmp_flags;
spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
@@ -3584,8 +3742,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
}
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
}
static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
@@ -3739,6 +3895,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
vm_domain_exit(dmar_domain);
return -ENOMEM;
}
+ domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain;
return 0;
@@ -3864,14 +4021,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = domain->priv;
size_t size = PAGE_SIZE << gfp_order;
+ int order;
- dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return gfp_order;
+ return order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3950,7 +4108,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
dmar_map_gfx = 0;
- }
+ } else if (dmar_map_gfx) {
+ /* we have to ensure the gfx device is idle before we flush */
+ printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
+ intel_iommu_strict = 1;
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 1a89d4a2cad..07c9f189f31 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -21,6 +21,7 @@ int intr_remapping_enabled;
static int disable_intremap;
static int disable_sourceid_checking;
+static int no_x2apic_optout;
static __init int setup_nointremap(char *str)
{
@@ -34,18 +35,26 @@ static __init int setup_intremap(char *str)
if (!str)
return -EINVAL;
- if (!strncmp(str, "on", 2))
- disable_intremap = 0;
- else if (!strncmp(str, "off", 3))
- disable_intremap = 1;
- else if (!strncmp(str, "nosid", 5))
- disable_sourceid_checking = 1;
+ while (*str) {
+ if (!strncmp(str, "on", 2))
+ disable_intremap = 0;
+ else if (!strncmp(str, "off", 3))
+ disable_intremap = 1;
+ else if (!strncmp(str, "nosid", 5))
+ disable_sourceid_checking = 1;
+ else if (!strncmp(str, "no_x2apic_optout", 16))
+ no_x2apic_optout = 1;
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
+ }
return 0;
}
early_param("intremap", setup_intremap);
-static DEFINE_SPINLOCK(irq_2_ir_lock);
+static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
@@ -62,12 +71,12 @@ int get_irte(int irq, struct irte *entry)
if (!entry || !irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index);
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -101,7 +110,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
return -1;
}
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
do {
for (i = index; i < index + count; i++)
if (table->base[i].present)
@@ -113,7 +122,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
if (index == start_index) {
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
@@ -127,7 +136,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
@@ -152,10 +161,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
*sub_handle = irq_iommu->sub_handle;
index = irq_iommu->irte_index;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
@@ -167,14 +176,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
irq_iommu->irte_mask = 0;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -190,7 +199,7 @@ int modify_irte(int irq, struct irte *irte_modified)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
iommu = irq_iommu->iommu;
@@ -202,7 +211,7 @@ int modify_irte(int irq, struct irte *irte_modified)
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -270,7 +279,7 @@ int free_irte(int irq)
if (!irq_iommu)
return -1;
- spin_lock_irqsave(&irq_2_ir_lock, flags);
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
rc = clear_entries(irq_iommu);
@@ -279,7 +288,7 @@ int free_irte(int irq)
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = 0;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -409,7 +418,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
addr = virt_to_phys((void *)iommu->ir_table->base);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_IRTA_REG,
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
@@ -420,7 +429,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
/*
* global invalidation of interrupt entry cache before enabling
@@ -428,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
*/
qi_global_iec(iommu);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
@@ -437,7 +446,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
@@ -485,7 +494,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
*/
qi_global_iec(iommu);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES))
@@ -498,7 +507,16 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
readl, !(sts & DMA_GSTS_IRES), sts);
end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+static int __init dmar_x2apic_optout(void)
+{
+ struct acpi_table_dmar *dmar;
+ dmar = (struct acpi_table_dmar *)dmar_tbl;
+ if (!dmar || no_x2apic_optout)
+ return 0;
+ return dmar->flags & DMAR_X2APIC_OPT_OUT;
}
int __init intr_remapping_supported(void)
@@ -521,16 +539,25 @@ int __init intr_remapping_supported(void)
return 1;
}
-int __init enable_intr_remapping(int eim)
+int __init enable_intr_remapping(void)
{
struct dmar_drhd_unit *drhd;
int setup = 0;
+ int eim = 0;
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
return -1;
}
+ if (x2apic_supported()) {
+ eim = !dmar_x2apic_optout();
+ WARN(!eim, KERN_WARNING
+ "Your BIOS is broken and requested that x2apic be disabled\n"
+ "This will leave your machine vulnerable to irq-injection attacks\n"
+ "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
+ }
+
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
@@ -606,8 +633,9 @@ int __init enable_intr_remapping(int eim)
goto error;
intr_remapping_enabled = 1;
+ pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
- return 0;
+ return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
/*
@@ -745,6 +773,15 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
+int ir_dev_scope_init(void)
+{
+ if (!intr_remapping_enabled)
+ return 0;
+
+ return dmar_dev_scope_init();
+}
+rootfs_initcall(ir_dev_scope_init);
+
void disable_intr_remapping(void)
{
struct dmar_drhd_unit *drhd;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 6ed82add6ff..6ddb795e31c 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -2308,11 +2308,11 @@ static int __init isdn_init(void)
int i;
char tmprev[50];
- if (!(dev = vmalloc(sizeof(isdn_dev)))) {
+ dev = vzalloc(sizeof(isdn_dev));
+ if (!dev) {
printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
return -EIO;
}
- memset((char *) dev, 0, sizeof(isdn_dev));
init_timer(&dev->timer);
dev->timer.function = isdn_timer_funct;
spin_lock_init(&dev->lock);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d8504279e50..e5546cb3ac6 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1693,7 +1693,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
* line status register.
*/
if (info->flags & ISDN_ASYNC_INITIALIZED) {
- tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
+ tty_wait_until_sent_from_close(tty, 3000); /* 30 seconds timeout */
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 2877291a9ed..0c41553ce68 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1052,12 +1052,11 @@ dspcreate(struct channel_req *crq)
if (crq->protocol != ISDN_P_B_L2DSP
&& crq->protocol != ISDN_P_B_L2DSPHDLC)
return -EPROTONOSUPPORT;
- ndsp = vmalloc(sizeof(struct dsp));
+ ndsp = vzalloc(sizeof(struct dsp));
if (!ndsp) {
printk(KERN_ERR "%s: vmalloc struct dsp failed\n", __func__);
return -ENOMEM;
}
- memset(ndsp, 0, sizeof(struct dsp));
if (dsp_debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s: creating new dsp instance\n", __func__);
diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c
index bbfd1b863ed..5a89972624d 100644
--- a/drivers/isdn/mISDN/l1oip_codec.c
+++ b/drivers/isdn/mISDN/l1oip_codec.c
@@ -330,14 +330,12 @@ l1oip_4bit_alloc(int ulaw)
return 0;
/* alloc conversion tables */
- table_com = vmalloc(65536);
- table_dec = vmalloc(512);
+ table_com = vzalloc(65536);
+ table_dec = vzalloc(512);
if (!table_com || !table_dec) {
l1oip_4bit_free();
return -ENOMEM;
}
- memset(table_com, 0, 65536);
- memset(table_dec, 0, 512);
/* generate compression table */
i1 = 0;
while (i1 < 256) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b591e726a6f..807c875f1c2 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -400,7 +400,7 @@ config LEDS_TRIGGER_TIMER
This allows LEDs to be controlled by a programmable timer
via sysfs. Some LED hardware can be programmed to start
blinking the LED without any further software interaction.
- For more details read Documentation/leds-class.txt.
+ For more details read Documentation/leds/leds-class.txt.
If unsure, say Y.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 49da55c1528..8c2a000cf3f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
+
return 0;
bad:
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89f73ca22cf..f84c08029b2 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
- if (!argc)
+ if (!argc) {
ti->error = "Feature corrupt_bio_byte requires parameters";
+ return -EINVAL;
+ }
r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
if (r)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index f8214702963..32ac70861d6 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
job->kc = kc;
job->fn = fn;
job->context = context;
+ job->master_job = job;
atomic_inc(&kc->nr_jobs);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a002dd85db1..86df8b2cf92 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "write_mostly option is only valid for RAID1";
return -EINVAL;
}
- if (value > rs->md.raid_disks) {
+ if (value >= rs->md.raid_disks) {
rs->ti->error = "Invalid write_mostly drive index given";
return -EINVAL;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 986b8754bb0..bc04518e9d8 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
return;
template_disk = dm_table_get_integrity_disk(t, true);
- if (!template_disk &&
- blk_integrity_is_initialized(dm_disk(t->md))) {
+ if (template_disk)
+ blk_integrity_register(dm_disk(t->md),
+ blk_get_integrity(template_disk));
+ else if (blk_integrity_is_initialized(dm_disk(t->md)))
DMWARN("%s: device no longer has a valid integrity profile",
dm_device_name(t->md));
- return;
- }
- blk_integrity_register(dm_disk(t->md),
- blk_get_integrity(template_disk));
+ else
+ DMWARN("%s: unable to establish an integrity profile",
+ dm_device_name(t->md));
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
return 0;
}
+static bool dm_table_discard_zeroes_data(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /* Ensure that all targets supports discard_zeroes_data. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (ti->discard_zeroes_data_unsupported)
+ return 0;
+ }
+
+ return 1;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_flush(q, flush);
+ if (!dm_table_discard_zeroes_data(t))
+ q->limits.discard_zeroes_data = 0;
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5404b229582..5c95ccb5950 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -61,6 +61,11 @@
static void autostart_arrays(int part);
#endif
+/* pers_list is a list of registered personalities protected
+ * by pers_lock.
+ * pers_lock does extra service to protect accesses to
+ * mddev->thread when the mutex cannot be held.
+ */
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
+ /* was we've dropped the mutex we need a spinlock to
+ * make sur the thread doesn't disappear
+ */
+ spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
+ spin_unlock(&pers_lock);
}
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
return thread;
}
-void md_unregister_thread(mdk_thread_t *thread)
+void md_unregister_thread(mdk_thread_t **threadp)
{
+ mdk_thread_t *thread = *threadp;
if (!thread)
return;
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
+ /* Locking ensures that mddev_unlock does not wake_up a
+ * non-existent thread
+ */
+ spin_lock(&pers_lock);
+ *threadp = NULL;
+ spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
mdk_rdev_t *rdev;
/* resync has finished, collect result */
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
+ md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452..0a309dc29b4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
extern int unregister_md_personality(struct mdk_personality *p);
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
-extern void md_unregister_thread(mdk_thread_t *thread);
+extern void md_unregister_thread(mdk_thread_t **threadp);
extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_check_recovery(mddev_t *mddev);
extern void md_write_start(mddev_t *mddev, struct bio *bi);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af28..d5b5fb30017 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
mempool_destroy(conf->pool);
kfree(conf->multipaths);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f4622dd8fc5..d9587dffe53 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf);
lower_barrier(conf);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d7a8468ddea..0cd9672cf9c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev)
return 0;
out_free_conf:
- md_unregister_thread(mddev->thread);
+ md_unregister_thread(&mddev->thread);
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf, 0);
lower_barrier(conf);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 43709fa6b6d..ac5e8b57e50 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
return 0;
abort:
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (conf) {
print_raid5_conf(conf);
free_conf(conf);
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
free_conf(conf);
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 51752a9ef7a..93d9869e0f1 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1230,7 +1230,7 @@ static const struct net_device_ops dvb_netdev_ops = {
.ndo_open = dvb_net_open,
.ndo_stop = dvb_net_stop,
.ndo_start_xmit = dvb_net_tx,
- .ndo_set_multicast_list = dvb_net_set_multicast_list,
+ .ndo_set_rx_mode = dvb_net_set_multicast_list,
.ndo_set_mac_address = dvb_net_set_mac,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c
index c3bc64ed405..7e3961d0db6 100644
--- a/drivers/media/dvb/dvb-usb/af9005-remote.c
+++ b/drivers/media/dvb/dvb-usb/af9005-remote.c
@@ -21,7 +21,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * see Documentation/dvb/REDME.dvb-usb for more information
+ * see Documentation/dvb/README.dvb-usb for more information
*/
#include "af9005.h"
/* debug */
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index 51f6439dcfd..0351c0e52dd 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -19,7 +19,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * see Documentation/dvb/REDME.dvb-usb for more information
+ * see Documentation/dvb/README.dvb-usb for more information
*/
#include "af9005.h"
diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
index ba917359fa6..404f63a6f26 100644
--- a/drivers/media/dvb/frontends/dib3000.h
+++ b/drivers/media/dvb/frontends/dib3000.h
@@ -17,7 +17,7 @@
* Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
* sources, on which this driver (and the dvb-dibusb) are based.
*
- * see Documentation/dvb/README.dibusb for more information
+ * see Documentation/dvb/README.dvb-usb for more information
*
*/
diff --git a/drivers/media/dvb/frontends/dib3000mb.c b/drivers/media/dvb/frontends/dib3000mb.c
index e80c5979636..437904cbf3e 100644
--- a/drivers/media/dvb/frontends/dib3000mb.c
+++ b/drivers/media/dvb/frontends/dib3000mb.c
@@ -17,7 +17,7 @@
* Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
* sources, on which this driver (and the dvb-dibusb) are based.
*
- * see Documentation/dvb/README.dibusb for more information
+ * see Documentation/dvb/README.dvb-usb for more information
*
*/
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 8c0e1927697..ec1d52f3890 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -402,7 +402,7 @@ static int fm_v4l2_vidioc_s_hw_freq_seek(struct file *file, void *priv,
static int fm_v4l2_vidioc_g_modulator(struct file *file, void *priv,
struct v4l2_modulator *mod)
{
- struct fmdev *fmdev = video_drvdata(file);;
+ struct fmdev *fmdev = video_drvdata(file);
if (mod->index != 0)
return -EINVAL;
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
index 05fe6bdbe06..b63fdfaac49 100644
--- a/drivers/media/video/cx18/cx18-mailbox.h
+++ b/drivers/media/video/cx18/cx18-mailbox.h
@@ -69,7 +69,7 @@ struct cx18_mailbox {
/* Each command can have up to 6 arguments */
u32 args[MAX_MB_ARGUMENTS];
/* The return code can be one of the codes in the file cx23418.h. If the
- command is completed successfuly, the error will be ERR_SYS_SUCCESS.
+ command is completed successfully, the error will be ERR_SYS_SUCCESS.
If it is pending, the code is ERR_SYS_PENDING. If it failed, the error
code would indicate the task from which the error originated and will
be one of the errors in cx23418.h. In that case, the following
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index d7215651772..a5c9ed128b9 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -181,7 +181,7 @@ static void v4l2_device_release(struct device *cd)
* TODO: In the long run all drivers that use v4l2_device should use the
* v4l2_device release callback. This check will then be unnecessary.
*/
- if (v4l2_dev->release == NULL)
+ if (v4l2_dev && v4l2_dev->release == NULL)
v4l2_dev = NULL;
/* Release video_device and perform other
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index bb7f17f2a33..cbf13d09b4a 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -21,7 +21,7 @@
#include <media/videobuf-dma-sg.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/via-core.h>
#include <linux/via-gpio.h>
#include <linux/via_i2c.h>
@@ -69,7 +69,7 @@ struct via_camera {
struct mutex lock;
enum viacam_opstate opstate;
unsigned long flags;
- struct pm_qos_request_list qos_request;
+ struct pm_qos_request qos_request;
/*
* GPIO info for power/reset management
*/
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 21574bdf485..a67adcbd0fa 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -404,6 +404,7 @@ config MFD_WM831X_I2C
bool "Support Wolfson Microelectronics WM831x/2x PMICs with I2C"
select MFD_CORE
select MFD_WM831X
+ select REGMAP_I2C
depends on I2C=y && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -415,6 +416,7 @@ config MFD_WM831X_SPI
bool "Support Wolfson Microelectronics WM831x/2x PMICs with SPI"
select MFD_CORE
select MFD_WM831X
+ select REGMAP_SPI
depends on SPI_MASTER && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -488,6 +490,7 @@ config MFD_WM8350_I2C
config MFD_WM8994
bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
+ select REGMAP_I2C
depends on I2C=y && GENERIC_HARDIRQS
help
The WM8994 is a highly integrated hi-fi CODEC designed for
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 282e76ab678..0a2b8d41a70 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -18,12 +18,14 @@
#include <linux/delay.h>
#include <linux/mfd/core.h>
#include <linux/slab.h>
+#include <linux/err.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/mfd/wm831x/auxadc.h>
#include <linux/mfd/wm831x/otp.h>
+#include <linux/mfd/wm831x/pmu.h>
#include <linux/mfd/wm831x/regulator.h>
/* Current settings - values are 2*2^(reg_val/4) microamps. These are
@@ -160,27 +162,350 @@ int wm831x_reg_unlock(struct wm831x *wm831x)
}
EXPORT_SYMBOL_GPL(wm831x_reg_unlock);
-static int wm831x_read(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *dest)
+static bool wm831x_reg_readable(struct device *dev, unsigned int reg)
{
- int ret, i;
- u16 *buf = dest;
-
- BUG_ON(bytes % 2);
- BUG_ON(bytes <= 0);
+ switch (reg) {
+ case WM831X_RESET_ID:
+ case WM831X_REVISION:
+ case WM831X_PARENT_ID:
+ case WM831X_SYSVDD_CONTROL:
+ case WM831X_THERMAL_MONITORING:
+ case WM831X_POWER_STATE:
+ case WM831X_WATCHDOG:
+ case WM831X_ON_PIN_CONTROL:
+ case WM831X_RESET_CONTROL:
+ case WM831X_CONTROL_INTERFACE:
+ case WM831X_SECURITY_KEY:
+ case WM831X_SOFTWARE_SCRATCH:
+ case WM831X_OTP_CONTROL:
+ case WM831X_GPIO_LEVEL:
+ case WM831X_SYSTEM_STATUS:
+ case WM831X_ON_SOURCE:
+ case WM831X_OFF_SOURCE:
+ case WM831X_SYSTEM_INTERRUPTS:
+ case WM831X_INTERRUPT_STATUS_1:
+ case WM831X_INTERRUPT_STATUS_2:
+ case WM831X_INTERRUPT_STATUS_3:
+ case WM831X_INTERRUPT_STATUS_4:
+ case WM831X_INTERRUPT_STATUS_5:
+ case WM831X_IRQ_CONFIG:
+ case WM831X_SYSTEM_INTERRUPTS_MASK:
+ case WM831X_INTERRUPT_STATUS_1_MASK:
+ case WM831X_INTERRUPT_STATUS_2_MASK:
+ case WM831X_INTERRUPT_STATUS_3_MASK:
+ case WM831X_INTERRUPT_STATUS_4_MASK:
+ case WM831X_INTERRUPT_STATUS_5_MASK:
+ case WM831X_RTC_WRITE_COUNTER:
+ case WM831X_RTC_TIME_1:
+ case WM831X_RTC_TIME_2:
+ case WM831X_RTC_ALARM_1:
+ case WM831X_RTC_ALARM_2:
+ case WM831X_RTC_CONTROL:
+ case WM831X_RTC_TRIM:
+ case WM831X_TOUCH_CONTROL_1:
+ case WM831X_TOUCH_CONTROL_2:
+ case WM831X_TOUCH_DATA_X:
+ case WM831X_TOUCH_DATA_Y:
+ case WM831X_TOUCH_DATA_Z:
+ case WM831X_AUXADC_DATA:
+ case WM831X_AUXADC_CONTROL:
+ case WM831X_AUXADC_SOURCE:
+ case WM831X_COMPARATOR_CONTROL:
+ case WM831X_COMPARATOR_1:
+ case WM831X_COMPARATOR_2:
+ case WM831X_COMPARATOR_3:
+ case WM831X_COMPARATOR_4:
+ case WM831X_GPIO1_CONTROL:
+ case WM831X_GPIO2_CONTROL:
+ case WM831X_GPIO3_CONTROL:
+ case WM831X_GPIO4_CONTROL:
+ case WM831X_GPIO5_CONTROL:
+ case WM831X_GPIO6_CONTROL:
+ case WM831X_GPIO7_CONTROL:
+ case WM831X_GPIO8_CONTROL:
+ case WM831X_GPIO9_CONTROL:
+ case WM831X_GPIO10_CONTROL:
+ case WM831X_GPIO11_CONTROL:
+ case WM831X_GPIO12_CONTROL:
+ case WM831X_GPIO13_CONTROL:
+ case WM831X_GPIO14_CONTROL:
+ case WM831X_GPIO15_CONTROL:
+ case WM831X_GPIO16_CONTROL:
+ case WM831X_CHARGER_CONTROL_1:
+ case WM831X_CHARGER_CONTROL_2:
+ case WM831X_CHARGER_STATUS:
+ case WM831X_BACKUP_CHARGER_CONTROL:
+ case WM831X_STATUS_LED_1:
+ case WM831X_STATUS_LED_2:
+ case WM831X_CURRENT_SINK_1:
+ case WM831X_CURRENT_SINK_2:
+ case WM831X_DCDC_ENABLE:
+ case WM831X_LDO_ENABLE:
+ case WM831X_DCDC_STATUS:
+ case WM831X_LDO_STATUS:
+ case WM831X_DCDC_UV_STATUS:
+ case WM831X_LDO_UV_STATUS:
+ case WM831X_DC1_CONTROL_1:
+ case WM831X_DC1_CONTROL_2:
+ case WM831X_DC1_ON_CONFIG:
+ case WM831X_DC1_SLEEP_CONTROL:
+ case WM831X_DC1_DVS_CONTROL:
+ case WM831X_DC2_CONTROL_1:
+ case WM831X_DC2_CONTROL_2:
+ case WM831X_DC2_ON_CONFIG:
+ case WM831X_DC2_SLEEP_CONTROL:
+ case WM831X_DC2_DVS_CONTROL:
+ case WM831X_DC3_CONTROL_1:
+ case WM831X_DC3_CONTROL_2:
+ case WM831X_DC3_ON_CONFIG:
+ case WM831X_DC3_SLEEP_CONTROL:
+ case WM831X_DC4_CONTROL:
+ case WM831X_DC4_SLEEP_CONTROL:
+ case WM831X_EPE1_CONTROL:
+ case WM831X_EPE2_CONTROL:
+ case WM831X_LDO1_CONTROL:
+ case WM831X_LDO1_ON_CONTROL:
+ case WM831X_LDO1_SLEEP_CONTROL:
+ case WM831X_LDO2_CONTROL:
+ case WM831X_LDO2_ON_CONTROL:
+ case WM831X_LDO2_SLEEP_CONTROL:
+ case WM831X_LDO3_CONTROL:
+ case WM831X_LDO3_ON_CONTROL:
+ case WM831X_LDO3_SLEEP_CONTROL:
+ case WM831X_LDO4_CONTROL:
+ case WM831X_LDO4_ON_CONTROL:
+ case WM831X_LDO4_SLEEP_CONTROL:
+ case WM831X_LDO5_CONTROL:
+ case WM831X_LDO5_ON_CONTROL:
+ case WM831X_LDO5_SLEEP_CONTROL:
+ case WM831X_LDO6_CONTROL:
+ case WM831X_LDO6_ON_CONTROL:
+ case WM831X_LDO6_SLEEP_CONTROL:
+ case WM831X_LDO7_CONTROL:
+ case WM831X_LDO7_ON_CONTROL:
+ case WM831X_LDO7_SLEEP_CONTROL:
+ case WM831X_LDO8_CONTROL:
+ case WM831X_LDO8_ON_CONTROL:
+ case WM831X_LDO8_SLEEP_CONTROL:
+ case WM831X_LDO9_CONTROL:
+ case WM831X_LDO9_ON_CONTROL:
+ case WM831X_LDO9_SLEEP_CONTROL:
+ case WM831X_LDO10_CONTROL:
+ case WM831X_LDO10_ON_CONTROL:
+ case WM831X_LDO10_SLEEP_CONTROL:
+ case WM831X_LDO11_ON_CONTROL:
+ case WM831X_LDO11_SLEEP_CONTROL:
+ case WM831X_POWER_GOOD_SOURCE_1:
+ case WM831X_POWER_GOOD_SOURCE_2:
+ case WM831X_CLOCK_CONTROL_1:
+ case WM831X_CLOCK_CONTROL_2:
+ case WM831X_FLL_CONTROL_1:
+ case WM831X_FLL_CONTROL_2:
+ case WM831X_FLL_CONTROL_3:
+ case WM831X_FLL_CONTROL_4:
+ case WM831X_FLL_CONTROL_5:
+ case WM831X_UNIQUE_ID_1:
+ case WM831X_UNIQUE_ID_2:
+ case WM831X_UNIQUE_ID_3:
+ case WM831X_UNIQUE_ID_4:
+ case WM831X_UNIQUE_ID_5:
+ case WM831X_UNIQUE_ID_6:
+ case WM831X_UNIQUE_ID_7:
+ case WM831X_UNIQUE_ID_8:
+ case WM831X_FACTORY_OTP_ID:
+ case WM831X_FACTORY_OTP_1:
+ case WM831X_FACTORY_OTP_2:
+ case WM831X_FACTORY_OTP_3:
+ case WM831X_FACTORY_OTP_4:
+ case WM831X_FACTORY_OTP_5:
+ case WM831X_CUSTOMER_OTP_ID:
+ case WM831X_DC1_OTP_CONTROL:
+ case WM831X_DC2_OTP_CONTROL:
+ case WM831X_DC3_OTP_CONTROL:
+ case WM831X_LDO1_2_OTP_CONTROL:
+ case WM831X_LDO3_4_OTP_CONTROL:
+ case WM831X_LDO5_6_OTP_CONTROL:
+ case WM831X_LDO7_8_OTP_CONTROL:
+ case WM831X_LDO9_10_OTP_CONTROL:
+ case WM831X_LDO11_EPE_CONTROL:
+ case WM831X_GPIO1_OTP_CONTROL:
+ case WM831X_GPIO2_OTP_CONTROL:
+ case WM831X_GPIO3_OTP_CONTROL:
+ case WM831X_GPIO4_OTP_CONTROL:
+ case WM831X_GPIO5_OTP_CONTROL:
+ case WM831X_GPIO6_OTP_CONTROL:
+ case WM831X_DBE_CHECK_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
- ret = wm831x->read_dev(wm831x, reg, bytes, dest);
- if (ret < 0)
- return ret;
+static bool wm831x_reg_writeable(struct device *dev, unsigned int reg)
+{
+ struct wm831x *wm831x = dev_get_drvdata(dev);
- for (i = 0; i < bytes / 2; i++) {
- buf[i] = be16_to_cpu(buf[i]);
+ if (wm831x_reg_locked(wm831x, reg))
+ return false;
- dev_vdbg(wm831x->dev, "Read %04x from R%d(0x%x)\n",
- buf[i], reg + i, reg + i);
+ switch (reg) {
+ case WM831X_SYSVDD_CONTROL:
+ case WM831X_THERMAL_MONITORING:
+ case WM831X_POWER_STATE:
+ case WM831X_WATCHDOG:
+ case WM831X_ON_PIN_CONTROL:
+ case WM831X_RESET_CONTROL:
+ case WM831X_CONTROL_INTERFACE:
+ case WM831X_SECURITY_KEY:
+ case WM831X_SOFTWARE_SCRATCH:
+ case WM831X_OTP_CONTROL:
+ case WM831X_GPIO_LEVEL:
+ case WM831X_INTERRUPT_STATUS_1:
+ case WM831X_INTERRUPT_STATUS_2:
+ case WM831X_INTERRUPT_STATUS_3:
+ case WM831X_INTERRUPT_STATUS_4:
+ case WM831X_INTERRUPT_STATUS_5:
+ case WM831X_IRQ_CONFIG:
+ case WM831X_SYSTEM_INTERRUPTS_MASK:
+ case WM831X_INTERRUPT_STATUS_1_MASK:
+ case WM831X_INTERRUPT_STATUS_2_MASK:
+ case WM831X_INTERRUPT_STATUS_3_MASK:
+ case WM831X_INTERRUPT_STATUS_4_MASK:
+ case WM831X_INTERRUPT_STATUS_5_MASK:
+ case WM831X_RTC_TIME_1:
+ case WM831X_RTC_TIME_2:
+ case WM831X_RTC_ALARM_1:
+ case WM831X_RTC_ALARM_2:
+ case WM831X_RTC_CONTROL:
+ case WM831X_RTC_TRIM:
+ case WM831X_TOUCH_CONTROL_1:
+ case WM831X_TOUCH_CONTROL_2:
+ case WM831X_AUXADC_CONTROL:
+ case WM831X_AUXADC_SOURCE:
+ case WM831X_COMPARATOR_CONTROL:
+ case WM831X_COMPARATOR_1:
+ case WM831X_COMPARATOR_2:
+ case WM831X_COMPARATOR_3:
+ case WM831X_COMPARATOR_4:
+ case WM831X_GPIO1_CONTROL:
+ case WM831X_GPIO2_CONTROL:
+ case WM831X_GPIO3_CONTROL:
+ case WM831X_GPIO4_CONTROL:
+ case WM831X_GPIO5_CONTROL:
+ case WM831X_GPIO6_CONTROL:
+ case WM831X_GPIO7_CONTROL:
+ case WM831X_GPIO8_CONTROL:
+ case WM831X_GPIO9_CONTROL:
+ case WM831X_GPIO10_CONTROL:
+ case WM831X_GPIO11_CONTROL:
+ case WM831X_GPIO12_CONTROL:
+ case WM831X_GPIO13_CONTROL:
+ case WM831X_GPIO14_CONTROL:
+ case WM831X_GPIO15_CONTROL:
+ case WM831X_GPIO16_CONTROL:
+ case WM831X_CHARGER_CONTROL_1:
+ case WM831X_CHARGER_CONTROL_2:
+ case WM831X_CHARGER_STATUS:
+ case WM831X_BACKUP_CHARGER_CONTROL:
+ case WM831X_STATUS_LED_1:
+ case WM831X_STATUS_LED_2:
+ case WM831X_CURRENT_SINK_1:
+ case WM831X_CURRENT_SINK_2:
+ case WM831X_DCDC_ENABLE:
+ case WM831X_LDO_ENABLE:
+ case WM831X_DC1_CONTROL_1:
+ case WM831X_DC1_CONTROL_2:
+ case WM831X_DC1_ON_CONFIG:
+ case WM831X_DC1_SLEEP_CONTROL:
+ case WM831X_DC1_DVS_CONTROL:
+ case WM831X_DC2_CONTROL_1:
+ case WM831X_DC2_CONTROL_2:
+ case WM831X_DC2_ON_CONFIG:
+ case WM831X_DC2_SLEEP_CONTROL:
+ case WM831X_DC2_DVS_CONTROL:
+ case WM831X_DC3_CONTROL_1:
+ case WM831X_DC3_CONTROL_2:
+ case WM831X_DC3_ON_CONFIG:
+ case WM831X_DC3_SLEEP_CONTROL:
+ case WM831X_DC4_CONTROL:
+ case WM831X_DC4_SLEEP_CONTROL:
+ case WM831X_EPE1_CONTROL:
+ case WM831X_EPE2_CONTROL:
+ case WM831X_LDO1_CONTROL:
+ case WM831X_LDO1_ON_CONTROL:
+ case WM831X_LDO1_SLEEP_CONTROL:
+ case WM831X_LDO2_CONTROL:
+ case WM831X_LDO2_ON_CONTROL:
+ case WM831X_LDO2_SLEEP_CONTROL:
+ case WM831X_LDO3_CONTROL:
+ case WM831X_LDO3_ON_CONTROL:
+ case WM831X_LDO3_SLEEP_CONTROL:
+ case WM831X_LDO4_CONTROL:
+ case WM831X_LDO4_ON_CONTROL:
+ case WM831X_LDO4_SLEEP_CONTROL:
+ case WM831X_LDO5_CONTROL:
+ case WM831X_LDO5_ON_CONTROL:
+ case WM831X_LDO5_SLEEP_CONTROL:
+ case WM831X_LDO6_CONTROL:
+ case WM831X_LDO6_ON_CONTROL:
+ case WM831X_LDO6_SLEEP_CONTROL:
+ case WM831X_LDO7_CONTROL:
+ case WM831X_LDO7_ON_CONTROL:
+ case WM831X_LDO7_SLEEP_CONTROL:
+ case WM831X_LDO8_CONTROL:
+ case WM831X_LDO8_ON_CONTROL:
+ case WM831X_LDO8_SLEEP_CONTROL:
+ case WM831X_LDO9_CONTROL:
+ case WM831X_LDO9_ON_CONTROL:
+ case WM831X_LDO9_SLEEP_CONTROL:
+ case WM831X_LDO10_CONTROL:
+ case WM831X_LDO10_ON_CONTROL:
+ case WM831X_LDO10_SLEEP_CONTROL:
+ case WM831X_LDO11_ON_CONTROL:
+ case WM831X_LDO11_SLEEP_CONTROL:
+ case WM831X_POWER_GOOD_SOURCE_1:
+ case WM831X_POWER_GOOD_SOURCE_2:
+ case WM831X_CLOCK_CONTROL_1:
+ case WM831X_CLOCK_CONTROL_2:
+ case WM831X_FLL_CONTROL_1:
+ case WM831X_FLL_CONTROL_2:
+ case WM831X_FLL_CONTROL_3:
+ case WM831X_FLL_CONTROL_4:
+ case WM831X_FLL_CONTROL_5:
+ return true;
+ default:
+ return false;
}
+}
- return 0;
+static bool wm831x_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM831X_SYSTEM_STATUS:
+ case WM831X_ON_SOURCE:
+ case WM831X_OFF_SOURCE:
+ case WM831X_GPIO_LEVEL:
+ case WM831X_SYSTEM_INTERRUPTS:
+ case WM831X_INTERRUPT_STATUS_1:
+ case WM831X_INTERRUPT_STATUS_2:
+ case WM831X_INTERRUPT_STATUS_3:
+ case WM831X_INTERRUPT_STATUS_4:
+ case WM831X_INTERRUPT_STATUS_5:
+ case WM831X_RTC_TIME_1:
+ case WM831X_RTC_TIME_2:
+ case WM831X_TOUCH_DATA_X:
+ case WM831X_TOUCH_DATA_Y:
+ case WM831X_TOUCH_DATA_Z:
+ case WM831X_AUXADC_DATA:
+ case WM831X_CHARGER_STATUS:
+ case WM831X_DCDC_STATUS:
+ case WM831X_LDO_STATUS:
+ case WM831X_DCDC_UV_STATUS:
+ case WM831X_LDO_UV_STATUS:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -191,14 +516,10 @@ static int wm831x_read(struct wm831x *wm831x, unsigned short reg,
*/
int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg)
{
- unsigned short val;
+ unsigned int val;
int ret;
- mutex_lock(&wm831x->io_lock);
-
- ret = wm831x_read(wm831x, reg, 2, &val);
-
- mutex_unlock(&wm831x->io_lock);
+ ret = regmap_read(wm831x->regmap, reg, &val);
if (ret < 0)
return ret;
@@ -218,15 +539,7 @@ EXPORT_SYMBOL_GPL(wm831x_reg_read);
int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int count, u16 *buf)
{
- int ret;
-
- mutex_lock(&wm831x->io_lock);
-
- ret = wm831x_read(wm831x, reg, count * 2, buf);
-
- mutex_unlock(&wm831x->io_lock);
-
- return ret;
+ return regmap_bulk_read(wm831x->regmap, reg, buf, count);
}
EXPORT_SYMBOL_GPL(wm831x_bulk_read);
@@ -234,7 +547,7 @@ static int wm831x_write(struct wm831x *wm831x, unsigned short reg,
int bytes, void *src)
{
u16 *buf = src;
- int i;
+ int i, ret;
BUG_ON(bytes % 2);
BUG_ON(bytes <= 0);
@@ -245,11 +558,10 @@ static int wm831x_write(struct wm831x *wm831x, unsigned short reg,
dev_vdbg(wm831x->dev, "Write %04x to R%d(0x%x)\n",
buf[i], reg + i, reg + i);
-
- buf[i] = cpu_to_be16(buf[i]);
+ ret = regmap_write(wm831x->regmap, reg + i, buf[i]);
}
- return wm831x->write_dev(wm831x, reg, bytes, src);
+ return 0;
}
/**
@@ -286,20 +598,14 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
unsigned short mask, unsigned short val)
{
int ret;
- u16 r;
mutex_lock(&wm831x->io_lock);
- ret = wm831x_read(wm831x, reg, 2, &r);
- if (ret < 0)
- goto out;
-
- r &= ~mask;
- r |= val & mask;
-
- ret = wm831x_write(wm831x, reg, 2, &r);
+ if (!wm831x_reg_locked(wm831x, reg))
+ ret = regmap_update_bits(wm831x->regmap, reg, mask, val);
+ else
+ ret = -EPERM;
-out:
mutex_unlock(&wm831x->io_lock);
return ret;
@@ -1292,6 +1598,19 @@ static struct mfd_cell backlight_devs[] = {
},
};
+struct regmap_config wm831x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .max_register = WM831X_DBE_CHECK_DATA,
+ .readable_reg = wm831x_reg_readable,
+ .writeable_reg = wm831x_reg_writeable,
+ .volatile_reg = wm831x_reg_volatile,
+};
+EXPORT_SYMBOL_GPL(wm831x_regmap_config);
+
/*
* Instantiate the generic non-control parts of the device.
*/
@@ -1305,11 +1624,12 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
mutex_init(&wm831x->io_lock);
mutex_init(&wm831x->key_lock);
dev_set_drvdata(wm831x->dev, wm831x);
+ wm831x->soft_shutdown = pdata->soft_shutdown;
ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret);
- goto err;
+ goto err_regmap;
}
switch (ret) {
case 0x6204:
@@ -1318,20 +1638,20 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
default:
dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret);
ret = -EINVAL;
- goto err;
+ goto err_regmap;
}
ret = wm831x_reg_read(wm831x, WM831X_REVISION);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read revision: %d\n", ret);
- goto err;
+ goto err_regmap;
}
rev = (ret & WM831X_PARENT_REV_MASK) >> WM831X_PARENT_REV_SHIFT;
ret = wm831x_reg_read(wm831x, WM831X_RESET_ID);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read device ID: %d\n", ret);
- goto err;
+ goto err_regmap;
}
/* Some engineering samples do not have the ID set, rely on
@@ -1406,7 +1726,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
default:
dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
ret = -EINVAL;
- goto err;
+ goto err_regmap;
}
/* This will need revisiting in future but is OK for all
@@ -1420,7 +1740,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
ret = wm831x_reg_read(wm831x, WM831X_SECURITY_KEY);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read security key: %d\n", ret);
- goto err;
+ goto err_regmap;
}
if (ret != 0) {
dev_warn(wm831x->dev, "Security key had non-zero value %x\n",
@@ -1433,7 +1753,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
ret = pdata->pre_init(wm831x);
if (ret != 0) {
dev_err(wm831x->dev, "pre_init() failed: %d\n", ret);
- goto err;
+ goto err_regmap;
}
}
@@ -1456,7 +1776,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
ret = wm831x_irq_init(wm831x, irq);
if (ret != 0)
- goto err;
+ goto err_regmap;
wm831x_auxadc_init(wm831x);
@@ -1552,8 +1872,9 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
err_irq:
wm831x_irq_exit(wm831x);
-err:
+err_regmap:
mfd_remove_devices(wm831x->dev);
+ regmap_exit(wm831x->regmap);
kfree(wm831x);
return ret;
}
@@ -1565,6 +1886,7 @@ void wm831x_device_exit(struct wm831x *wm831x)
if (wm831x->irq_base)
free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
wm831x_irq_exit(wm831x);
+ regmap_exit(wm831x->regmap);
kfree(wm831x);
}
@@ -1604,6 +1926,15 @@ int wm831x_device_suspend(struct wm831x *wm831x)
return 0;
}
+void wm831x_device_shutdown(struct wm831x *wm831x)
+{
+ if (wm831x->soft_shutdown) {
+ dev_info(wm831x->dev, "Initiating shutdown...\n");
+ wm831x_set_bits(wm831x, WM831X_POWER_STATE, WM831X_CHIP_ON, 0);
+ }
+}
+EXPORT_SYMBOL_GPL(wm831x_device_shutdown);
+
MODULE_DESCRIPTION("Core support for the WM831X AudioPlus PMIC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown");
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index a06cbc73971..ac8da1d439d 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -18,67 +18,17 @@
#include <linux/delay.h>
#include <linux/mfd/core.h>
#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
-static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *dest)
-{
- struct i2c_client *i2c = wm831x->control_data;
- int ret;
- u16 r = cpu_to_be16(reg);
-
- ret = i2c_master_send(i2c, (unsigned char *)&r, 2);
- if (ret < 0)
- return ret;
- if (ret != 2)
- return -EIO;
-
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- if (ret != bytes)
- return -EIO;
- return 0;
-}
-
-/* Currently we allocate the write buffer on the stack; this is OK for
- * small writes - if we need to do large writes this will need to be
- * revised.
- */
-static int wm831x_i2c_write_device(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *src)
-{
- struct i2c_client *i2c = wm831x->control_data;
- struct i2c_msg xfer[2];
- int ret;
-
- reg = cpu_to_be16(reg);
-
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = 2;
- xfer[0].buf = (char *)&reg;
-
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_NOSTART;
- xfer[1].len = bytes;
- xfer[1].buf = (char *)src;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret < 0)
- return ret;
- if (ret != 2)
- return -EIO;
-
- return 0;
-}
-
static int wm831x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm831x *wm831x;
+ int ret;
wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
if (wm831x == NULL)
@@ -86,9 +36,15 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm831x);
wm831x->dev = &i2c->dev;
- wm831x->control_data = i2c;
- wm831x->read_dev = wm831x_i2c_read_device;
- wm831x->write_dev = wm831x_i2c_write_device;
+
+ wm831x->regmap = regmap_init_i2c(i2c, &wm831x_regmap_config);
+ if (IS_ERR(wm831x->regmap)) {
+ ret = PTR_ERR(wm831x->regmap);
+ dev_err(wm831x->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(wm831x);
+ return ret;
+ }
return wm831x_device_init(wm831x, id->driver_data, i2c->irq);
}
@@ -109,6 +65,13 @@ static int wm831x_i2c_suspend(struct device *dev)
return wm831x_device_suspend(wm831x);
}
+static void wm831x_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct wm831x *wm831x = i2c_get_clientdata(i2c);
+
+ wm831x_device_shutdown(wm831x);
+}
+
static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8310", WM8310 },
{ "wm8311", WM8311 },
@@ -133,6 +96,7 @@ static struct i2c_driver wm831x_i2c_driver = {
},
.probe = wm831x_i2c_probe,
.remove = wm831x_i2c_remove,
+ .shutdown = wm831x_i2c_shutdown,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index eed8e4f7a5a..8d6a9a969db 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -16,78 +16,19 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
#include <linux/mfd/wm831x/core.h>
-static int wm831x_spi_read_device(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *dest)
-{
- u16 tx_val;
- u16 *d = dest;
- int r, ret;
-
- /* Go register at a time */
- for (r = reg; r < reg + (bytes / 2); r++) {
- tx_val = r | 0x8000;
-
- ret = spi_write_then_read(wm831x->control_data,
- (u8 *)&tx_val, 2, (u8 *)d, 2);
- if (ret != 0)
- return ret;
-
- *d = be16_to_cpu(*d);
-
- d++;
- }
-
- return 0;
-}
-
-static int wm831x_spi_write_device(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *src)
-{
- struct spi_device *spi = wm831x->control_data;
- u16 *s = src;
- u16 data[2];
- int ret, r;
-
- /* Go register at a time */
- for (r = reg; r < reg + (bytes / 2); r++) {
- data[0] = r;
- data[1] = *s++;
-
- ret = spi_write(spi, (char *)&data, sizeof(data));
- if (ret != 0)
- return ret;
- }
-
- return 0;
-}
-
static int __devinit wm831x_spi_probe(struct spi_device *spi)
{
+ const struct spi_device_id *id = spi_get_device_id(spi);
struct wm831x *wm831x;
enum wm831x_parent type;
+ int ret;
- /* Currently SPI support for ID tables is unmerged, we're faking it */
- if (strcmp(spi->modalias, "wm8310") == 0)
- type = WM8310;
- else if (strcmp(spi->modalias, "wm8311") == 0)
- type = WM8311;
- else if (strcmp(spi->modalias, "wm8312") == 0)
- type = WM8312;
- else if (strcmp(spi->modalias, "wm8320") == 0)
- type = WM8320;
- else if (strcmp(spi->modalias, "wm8321") == 0)
- type = WM8321;
- else if (strcmp(spi->modalias, "wm8325") == 0)
- type = WM8325;
- else if (strcmp(spi->modalias, "wm8326") == 0)
- type = WM8326;
- else {
- dev_err(&spi->dev, "Unknown device type\n");
- return -EINVAL;
- }
+ type = (enum wm831x_parent)id->driver_data;
wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
if (wm831x == NULL)
@@ -98,9 +39,15 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, wm831x);
wm831x->dev = &spi->dev;
- wm831x->control_data = spi;
- wm831x->read_dev = wm831x_spi_read_device;
- wm831x->write_dev = wm831x_spi_write_device;
+
+ wm831x->regmap = regmap_init_spi(spi, &wm831x_regmap_config);
+ if (IS_ERR(wm831x->regmap)) {
+ ret = PTR_ERR(wm831x->regmap);
+ dev_err(wm831x->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(wm831x);
+ return ret;
+ }
return wm831x_device_init(wm831x, type, spi->irq);
}
@@ -121,119 +68,50 @@ static int wm831x_spi_suspend(struct device *dev)
return wm831x_device_suspend(wm831x);
}
+static void wm831x_spi_shutdown(struct spi_device *spi)
+{
+ struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+
+ wm831x_device_shutdown(wm831x);
+}
+
static const struct dev_pm_ops wm831x_spi_pm = {
.freeze = wm831x_spi_suspend,
.suspend = wm831x_spi_suspend,
};
-static struct spi_driver wm8310_spi_driver = {
- .driver = {
- .name = "wm8310",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- .pm = &wm831x_spi_pm,
- },
- .probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
+static const struct spi_device_id wm831x_spi_ids[] = {
+ { "wm8310", WM8310 },
+ { "wm8311", WM8311 },
+ { "wm8312", WM8312 },
+ { "wm8320", WM8320 },
+ { "wm8321", WM8321 },
+ { "wm8325", WM8325 },
+ { "wm8326", WM8326 },
+ { },
};
+MODULE_DEVICE_TABLE(spi, wm831x_spi_id);
-static struct spi_driver wm8311_spi_driver = {
+static struct spi_driver wm831x_spi_driver = {
.driver = {
- .name = "wm8311",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- .pm = &wm831x_spi_pm,
- },
- .probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
-};
-
-static struct spi_driver wm8312_spi_driver = {
- .driver = {
- .name = "wm8312",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- .pm = &wm831x_spi_pm,
- },
- .probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
-};
-
-static struct spi_driver wm8320_spi_driver = {
- .driver = {
- .name = "wm8320",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- .pm = &wm831x_spi_pm,
- },
- .probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
-};
-
-static struct spi_driver wm8321_spi_driver = {
- .driver = {
- .name = "wm8321",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- .pm = &wm831x_spi_pm,
- },
- .probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
-};
-
-static struct spi_driver wm8325_spi_driver = {
- .driver = {
- .name = "wm8325",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- .pm = &wm831x_spi_pm,
- },
- .probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
-};
-
-static struct spi_driver wm8326_spi_driver = {
- .driver = {
- .name = "wm8326",
+ .name = "wm831x",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &wm831x_spi_pm,
},
+ .id_table = wm831x_spi_ids,
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
+ .shutdown = wm831x_spi_shutdown,
};
static int __init wm831x_spi_init(void)
{
int ret;
- ret = spi_register_driver(&wm8310_spi_driver);
- if (ret != 0)
- pr_err("Failed to register WM8310 SPI driver: %d\n", ret);
-
- ret = spi_register_driver(&wm8311_spi_driver);
- if (ret != 0)
- pr_err("Failed to register WM8311 SPI driver: %d\n", ret);
-
- ret = spi_register_driver(&wm8312_spi_driver);
- if (ret != 0)
- pr_err("Failed to register WM8312 SPI driver: %d\n", ret);
-
- ret = spi_register_driver(&wm8320_spi_driver);
- if (ret != 0)
- pr_err("Failed to register WM8320 SPI driver: %d\n", ret);
-
- ret = spi_register_driver(&wm8321_spi_driver);
- if (ret != 0)
- pr_err("Failed to register WM8321 SPI driver: %d\n", ret);
-
- ret = spi_register_driver(&wm8325_spi_driver);
- if (ret != 0)
- pr_err("Failed to register WM8325 SPI driver: %d\n", ret);
-
- ret = spi_register_driver(&wm8326_spi_driver);
+ ret = spi_register_driver(&wm831x_spi_driver);
if (ret != 0)
- pr_err("Failed to register WM8326 SPI driver: %d\n", ret);
+ pr_err("Failed to register WM831x SPI driver: %d\n", ret);
return 0;
}
@@ -241,13 +119,7 @@ subsys_initcall(wm831x_spi_init);
static void __exit wm831x_spi_exit(void)
{
- spi_unregister_driver(&wm8326_spi_driver);
- spi_unregister_driver(&wm8325_spi_driver);
- spi_unregister_driver(&wm8321_spi_driver);
- spi_unregister_driver(&wm8320_spi_driver);
- spi_unregister_driver(&wm8312_spi_driver);
- spi_unregister_driver(&wm8311_spi_driver);
- spi_unregister_driver(&wm8310_spi_driver);
+ spi_unregister_driver(&wm831x_spi_driver);
}
module_exit(wm831x_spi_exit);
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 597f82edaca..e06ba9440cd 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -13,11 +13,13 @@
*/
#include <linux/bug.h>
+#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wm8400-private.h>
#include <linux/mfd/wm8400-audio.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
static struct {
@@ -123,14 +125,9 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
/* If there are any volatile reads then read back the entire block */
for (i = reg; i < reg + num_regs; i++)
if (reg_data[i].vol) {
- ret = wm8400->read_dev(wm8400->io_data, reg,
- num_regs, dest);
- if (ret != 0)
- return ret;
- for (i = 0; i < num_regs; i++)
- dest[i] = be16_to_cpu(dest[i]);
-
- return 0;
+ ret = regmap_bulk_read(wm8400->regmap, reg, dest,
+ num_regs);
+ return ret;
}
/* Otherwise use the cache */
@@ -149,14 +146,11 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
for (i = 0; i < num_regs; i++) {
BUG_ON(!reg_data[reg + i].writable);
wm8400->reg_cache[reg + i] = src[i];
- src[i] = cpu_to_be16(src[i]);
+ ret = regmap_write(wm8400->regmap, reg, src[i]);
+ if (ret != 0)
+ return ret;
}
- /* Do the actual I/O */
- ret = wm8400->write_dev(wm8400->io_data, reg, num_regs, src);
- if (ret != 0)
- return -EIO;
-
return 0;
}
@@ -270,14 +264,14 @@ static int wm8400_init(struct wm8400 *wm8400,
dev_set_drvdata(wm8400->dev, wm8400);
/* Check that this is actually a WM8400 */
- ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg);
+ ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i);
if (ret != 0) {
dev_err(wm8400->dev, "Chip ID register read failed\n");
return -EIO;
}
- if (be16_to_cpu(reg) != reg_data[WM8400_RESET_ID].default_val) {
+ if (i != reg_data[WM8400_RESET_ID].default_val) {
dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
- be16_to_cpu(reg));
+ reg);
return -ENODEV;
}
@@ -285,9 +279,8 @@ static int wm8400_init(struct wm8400 *wm8400,
* is a PMIC we can't reset it safely so initialise the register
* cache from the hardware.
*/
- ret = wm8400->read_dev(wm8400->io_data, 0,
- ARRAY_SIZE(wm8400->reg_cache),
- wm8400->reg_cache);
+ ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
+ ARRAY_SIZE(wm8400->reg_cache));
if (ret != 0) {
dev_err(wm8400->dev, "Register cache read failed\n");
return -EIO;
@@ -337,60 +330,13 @@ static void wm8400_release(struct wm8400 *wm8400)
mfd_remove_devices(wm8400->dev);
}
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static int wm8400_i2c_read(void *io_data, char reg, int count, u16 *dest)
-{
- struct i2c_client *i2c = io_data;
- struct i2c_msg xfer[2];
- int ret;
-
- /* Write register */
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &reg;
-
- /* Read data */
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = count * sizeof(u16);
- xfer[1].buf = (u8 *)dest;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret == 2)
- ret = 0;
- else if (ret >= 0)
- ret = -EIO;
-
- return ret;
-}
-
-static int wm8400_i2c_write(void *io_data, char reg, int count, const u16 *src)
-{
- struct i2c_client *i2c = io_data;
- u8 *msg;
- int ret;
-
- /* We add 1 byte for device register - ideally I2C would gather. */
- msg = kmalloc((count * sizeof(u16)) + 1, GFP_KERNEL);
- if (msg == NULL)
- return -ENOMEM;
-
- msg[0] = reg;
- memcpy(&msg[1], src, count * sizeof(u16));
-
- ret = i2c_master_send(i2c, msg, (count * sizeof(u16)) + 1);
-
- if (ret == (count * 2) + 1)
- ret = 0;
- else if (ret >= 0)
- ret = -EIO;
-
- kfree(msg);
-
- return ret;
-}
+static const struct regmap_config wm8400_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = WM8400_REGISTER_COUNT - 1,
+};
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -403,18 +349,23 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
goto err;
}
- wm8400->io_data = i2c;
- wm8400->read_dev = wm8400_i2c_read;
- wm8400->write_dev = wm8400_i2c_write;
+ wm8400->regmap = regmap_init_i2c(i2c, &wm8400_regmap_config);
+ if (IS_ERR(wm8400->regmap)) {
+ ret = PTR_ERR(wm8400->regmap);
+ goto struct_err;
+ }
+
wm8400->dev = &i2c->dev;
i2c_set_clientdata(i2c, wm8400);
ret = wm8400_init(wm8400, i2c->dev.platform_data);
if (ret != 0)
- goto struct_err;
+ goto map_err;
return 0;
+map_err:
+ regmap_exit(wm8400->regmap);
struct_err:
kfree(wm8400);
err:
@@ -426,6 +377,7 @@ static int wm8400_i2c_remove(struct i2c_client *i2c)
struct wm8400 *wm8400 = i2c_get_clientdata(i2c);
wm8400_release(wm8400);
+ regmap_exit(wm8400->regmap);
kfree(wm8400);
return 0;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 96479c9b172..bfde4e8ec63 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -16,9 +16,11 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/err.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
@@ -29,22 +31,7 @@
static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
int bytes, void *dest)
{
- int ret, i;
- u16 *buf = dest;
-
- BUG_ON(bytes % 2);
- BUG_ON(bytes <= 0);
-
- ret = wm8994->read_dev(wm8994, reg, bytes, dest);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < bytes / 2; i++) {
- dev_vdbg(wm8994->dev, "Read %04x from R%d(0x%x)\n",
- be16_to_cpu(buf[i]), reg + i, reg + i);
- }
-
- return 0;
+ return regmap_raw_read(wm8994->regmap, reg, dest, bytes);
}
/**
@@ -55,19 +42,15 @@ static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
*/
int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
{
- unsigned short val;
+ unsigned int val;
int ret;
- mutex_lock(&wm8994->io_lock);
-
- ret = wm8994_read(wm8994, reg, 2, &val);
-
- mutex_unlock(&wm8994->io_lock);
+ ret = regmap_read(wm8994->regmap, reg, &val);
if (ret < 0)
return ret;
else
- return be16_to_cpu(val);
+ return val;
}
EXPORT_SYMBOL_GPL(wm8994_reg_read);
@@ -82,33 +65,13 @@ EXPORT_SYMBOL_GPL(wm8994_reg_read);
int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
int count, u16 *buf)
{
- int ret;
-
- mutex_lock(&wm8994->io_lock);
-
- ret = wm8994_read(wm8994, reg, count * 2, buf);
-
- mutex_unlock(&wm8994->io_lock);
-
- return ret;
+ return regmap_bulk_read(wm8994->regmap, reg, buf, count);
}
-EXPORT_SYMBOL_GPL(wm8994_bulk_read);
static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
int bytes, const void *src)
{
- const u16 *buf = src;
- int i;
-
- BUG_ON(bytes % 2);
- BUG_ON(bytes <= 0);
-
- for (i = 0; i < bytes / 2; i++) {
- dev_vdbg(wm8994->dev, "Write %04x to R%d(0x%x)\n",
- be16_to_cpu(buf[i]), reg + i, reg + i);
- }
-
- return wm8994->write_dev(wm8994, reg, bytes, src);
+ return regmap_raw_write(wm8994->regmap, reg, src, bytes);
}
/**
@@ -121,17 +84,7 @@ static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
unsigned short val)
{
- int ret;
-
- val = cpu_to_be16(val);
-
- mutex_lock(&wm8994->io_lock);
-
- ret = wm8994_write(wm8994, reg, 2, &val);
-
- mutex_unlock(&wm8994->io_lock);
-
- return ret;
+ return regmap_write(wm8994->regmap, reg, val);
}
EXPORT_SYMBOL_GPL(wm8994_reg_write);
@@ -146,15 +99,7 @@ EXPORT_SYMBOL_GPL(wm8994_reg_write);
int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
int count, const u16 *buf)
{
- int ret;
-
- mutex_lock(&wm8994->io_lock);
-
- ret = wm8994_write(wm8994, reg, count * 2, buf);
-
- mutex_unlock(&wm8994->io_lock);
-
- return ret;
+ return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
}
EXPORT_SYMBOL_GPL(wm8994_bulk_write);
@@ -169,28 +114,7 @@ EXPORT_SYMBOL_GPL(wm8994_bulk_write);
int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
unsigned short mask, unsigned short val)
{
- int ret;
- u16 r;
-
- mutex_lock(&wm8994->io_lock);
-
- ret = wm8994_read(wm8994, reg, 2, &r);
- if (ret < 0)
- goto out;
-
- r = be16_to_cpu(r);
-
- r &= ~mask;
- r |= val;
-
- r = cpu_to_be16(r);
-
- ret = wm8994_write(wm8994, reg, 2, &r);
-
-out:
- mutex_unlock(&wm8994->io_lock);
-
- return ret;
+ return regmap_update_bits(wm8994->regmap, reg, mask, val);
}
EXPORT_SYMBOL_GPL(wm8994_set_bits);
@@ -378,6 +302,11 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
}
#endif
+static struct regmap_config wm8994_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+};
+
/*
* Instantiate the generic non-control parts of the device.
*/
@@ -387,7 +316,6 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
const char *devname;
int ret, i;
- mutex_init(&wm8994->io_lock);
dev_set_drvdata(wm8994->dev, wm8994);
/* Add the on-chip regulators first for bootstrapping */
@@ -397,7 +325,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
NULL, 0);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
- goto err;
+ goto err_regmap;
}
switch (wm8994->type) {
@@ -409,7 +337,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- goto err;
+ goto err_regmap;
}
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
@@ -417,7 +345,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
GFP_KERNEL);
if (!wm8994->supplies) {
ret = -ENOMEM;
- goto err;
+ goto err_regmap;
}
switch (wm8994->type) {
@@ -431,7 +359,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- goto err;
+ goto err_regmap;
}
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
@@ -554,7 +482,8 @@ err_get:
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err_supplies:
kfree(wm8994->supplies);
-err:
+err_regmap:
+ regmap_exit(wm8994->regmap);
mfd_remove_devices(wm8994->dev);
kfree(wm8994);
return ret;
@@ -569,62 +498,15 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
kfree(wm8994->supplies);
+ regmap_exit(wm8994->regmap);
kfree(wm8994);
}
-static int wm8994_i2c_read_device(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *dest)
-{
- struct i2c_client *i2c = wm8994->control_data;
- int ret;
- u16 r = cpu_to_be16(reg);
-
- ret = i2c_master_send(i2c, (unsigned char *)&r, 2);
- if (ret < 0)
- return ret;
- if (ret != 2)
- return -EIO;
-
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- if (ret != bytes)
- return -EIO;
- return 0;
-}
-
-static int wm8994_i2c_write_device(struct wm8994 *wm8994, unsigned short reg,
- int bytes, const void *src)
-{
- struct i2c_client *i2c = wm8994->control_data;
- struct i2c_msg xfer[2];
- int ret;
-
- reg = cpu_to_be16(reg);
-
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = 2;
- xfer[0].buf = (char *)&reg;
-
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_NOSTART;
- xfer[1].len = bytes;
- xfer[1].buf = (char *)src;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret < 0)
- return ret;
- if (ret != 2)
- return -EIO;
-
- return 0;
-}
-
static int wm8994_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8994 *wm8994;
+ int ret;
wm8994 = kzalloc(sizeof(struct wm8994), GFP_KERNEL);
if (wm8994 == NULL)
@@ -632,12 +514,18 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8994);
wm8994->dev = &i2c->dev;
- wm8994->control_data = i2c;
- wm8994->read_dev = wm8994_i2c_read_device;
- wm8994->write_dev = wm8994_i2c_write_device;
wm8994->irq = i2c->irq;
wm8994->type = id->driver_data;
+ wm8994->regmap = regmap_init_i2c(i2c, &wm8994_regmap_config);
+ if (IS_ERR(wm8994->regmap)) {
+ ret = PTR_ERR(wm8994->regmap);
+ dev_err(wm8994->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(wm8994);
+ return ret;
+ }
+
return wm8994_device_init(wm8994, i2c->irq);
}
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 0fd7e77bee2..dee33addcae 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -90,6 +90,7 @@
#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C
#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040
#define CLKCFG_REG_OFFSET 0x500
+#define FUNCSEL_REG_OFFSET 0x508
#define PCH_PHUB_OROM_SIZE 15360
@@ -108,6 +109,7 @@
* @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val
* @int_reduce_control_reg: INT_REDUCE_CONTROL registers val
* @clkcfg_reg: CLK CFG register val
+ * @funcsel_reg: Function select register value
* @pch_phub_base_address: Register base address
* @pch_phub_extrom_base_address: external rom base address
* @pch_mac_start_address: MAC address area start address
@@ -128,6 +130,7 @@ struct pch_phub_reg {
u32 intpin_reg_wpermit_reg3;
u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
u32 clkcfg_reg;
+ u32 funcsel_reg;
void __iomem *pch_phub_base_address;
void __iomem *pch_phub_extrom_base_address;
u32 pch_mac_start_address;
@@ -211,6 +214,8 @@ static void pch_phub_save_reg_conf(struct pci_dev *pdev)
__func__, i, chip->int_reduce_control_reg[i]);
}
chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
+ if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
+ chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET);
}
/* pch_phub_restore_reg_conf - restore register configuration */
@@ -271,6 +276,8 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
}
iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
+ if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
+ iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
}
/**
@@ -594,8 +601,7 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
pch_phub_read_gbe_mac_addr(chip, mac);
- return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ return sprintf(buf, "%pM\n", mac);
}
static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 4be8373d43e..66b616ebe53 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -142,7 +142,7 @@ config MTD_OF_PARTS
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
- as described in Documentation/powerpc/booting-without-of.txt.
+ as described in Documentation/devicetree/booting-without-of.txt.
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a44874e24f2..583f66cd5bb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2,9 +2,6 @@
# Network device configuration
#
-config HAVE_NET_MACB
- bool
-
menuconfig NETDEVICES
default y if UML
depends on NET
@@ -28,18 +25,32 @@ menuconfig NETDEVICES
# that for each of the symbols.
if NETDEVICES
-config IFB
- tristate "Intermediate Functional Block support"
- depends on NET_CLS_ACT
+config NET_CORE
+ default y
+ bool "Network core driver support"
---help---
- This is an intermediate driver that allows sharing of
- resources.
+ You can say N here if you do not intend to use any of the
+ networking core drivers (i.e. VLAN, bridging, bonding, etc.)
+
+if NET_CORE
+
+config BONDING
+ tristate "Bonding driver support"
+ depends on INET
+ depends on IPV6 || IPV6=n
+ ---help---
+ Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
+ Channels together. This is called 'Etherchannel' by Cisco,
+ 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux.
+
+ The driver supports multiple bonding modes to allow for both high
+ performance and high availability operation.
+
+ Refer to <file:Documentation/networking/bonding.txt> for more
+ information.
+
To compile this driver as a module, choose M here: the module
- will be called ifb. If you want to use more than one ifb
- device at a time, you need to compile this driver as a module.
- Instead of 'ifb', the devices will then be called 'ifb0',
- 'ifb1' etc.
- Look at the iproute2 documentation directory for usage etc
+ will be called bonding.
config DUMMY
tristate "Dummy net driver support"
@@ -60,23 +71,59 @@ config DUMMY
Instead of 'dummy', the devices will then be called 'dummy0',
'dummy1' etc.
-config BONDING
- tristate "Bonding driver support"
- depends on INET
- depends on IPV6 || IPV6=n
+config EQUALIZER
+ tristate "EQL (serial line load balancing) support"
---help---
- Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
- Channels together. This is called 'Etherchannel' by Cisco,
- 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux.
+ If you have two serial connections to some other computer (this
+ usually requires two modems and two telephone lines) and you use
+ SLIP (the protocol for sending Internet traffic over telephone
+ lines) or PPP (a better SLIP) on them, you can make them behave like
+ one double speed connection using this driver. Naturally, this has
+ to be supported at the other end as well, either with a similar EQL
+ Linux driver or with a Livingston Portmaster 2e.
- The driver supports multiple bonding modes to allow for both high
- performance and high availability operation.
+ Say Y if you want this and read
+ <file:Documentation/networking/eql.txt>. You may also want to read
+ section 6.2 of the NET-3-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
- Refer to <file:Documentation/networking/bonding.txt> for more
- information.
+ To compile this driver as a module, choose M here: the module
+ will be called eql. If unsure, say N.
+
+config NET_FC
+ bool "Fibre Channel driver support"
+ depends on SCSI && PCI
+ help
+ Fibre Channel is a high speed serial protocol mainly used to connect
+ large storage devices to the computer; it is compatible with and
+ intended to replace SCSI.
+
+ If you intend to use Fibre Channel, you need to have a Fibre channel
+ adaptor card in your computer; say Y here and to the driver for your
+ adaptor below. You also should have said Y to "SCSI support" and
+ "SCSI generic support".
+
+config MII
+ tristate "Generic Media Independent Interface device support"
+ help
+ Most ethernet controllers have MII transceiver either as an external
+ or internal device. It is safe to say Y or M here even if your
+ ethernet card lacks MII.
+source "drivers/ieee802154/Kconfig"
+
+config IFB
+ tristate "Intermediate Functional Block support"
+ depends on NET_CLS_ACT
+ ---help---
+ This is an intermediate driver that allows sharing of
+ resources.
To compile this driver as a module, choose M here: the module
- will be called bonding.
+ will be called ifb. If you want to use more than one ifb
+ device at a time, you need to compile this driver as a module.
+ Instead of 'ifb', the devices will then be called 'ifb0',
+ 'ifb1' etc.
+ Look at the iproute2 documentation directory for usage etc
config MACVLAN
tristate "MAC-VLAN support (EXPERIMENTAL)"
@@ -105,24 +152,46 @@ config MACVTAP
To compile this driver as a module, choose M here: the module
will be called macvtap.
-config EQUALIZER
- tristate "EQL (serial line load balancing) support"
+config NETCONSOLE
+ tristate "Network console logging support"
---help---
- If you have two serial connections to some other computer (this
- usually requires two modems and two telephone lines) and you use
- SLIP (the protocol for sending Internet traffic over telephone
- lines) or PPP (a better SLIP) on them, you can make them behave like
- one double speed connection using this driver. Naturally, this has
- to be supported at the other end as well, either with a similar EQL
- Linux driver or with a Livingston Portmaster 2e.
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
- Say Y if you want this and read
- <file:Documentation/networking/eql.txt>. You may also want to read
- section 6.2 of the NET-3-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+config NETCONSOLE_DYNAMIC
+ bool "Dynamic reconfiguration of logging targets"
+ depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
+ !(NETCONSOLE=y && CONFIGFS_FS=m)
+ help
+ This option enables the ability to dynamically reconfigure target
+ parameters (interface, IP addresses, port numbers, MAC addresses)
+ at runtime through a userspace interface exported using configfs.
+ See <file:Documentation/networking/netconsole.txt> for details.
- To compile this driver as a module, choose M here: the module
- will be called eql. If unsure, say N.
+config NETPOLL
+ def_bool NETCONSOLE
+
+config NETPOLL_TRAP
+ bool "Netpoll traffic trapping"
+ default n
+ depends on NETPOLL
+
+config NET_POLL_CONTROLLER
+ def_bool NETPOLL
+
+config RIONET
+ tristate "RapidIO Ethernet over messaging driver support"
+ depends on RAPIDIO
+
+config RIONET_TX_SIZE
+ int "Number of outbound queue entries"
+ depends on RIONET
+ default "128"
+
+config RIONET_RX_SIZE
+ int "Number of inbound queue entries"
+ depends on RIONET
+ default "128"
config TUN
tristate "Universal TUN/TAP device driver support"
@@ -154,6 +223,28 @@ config VETH
When one end receives the packet it appears on its pair and vice
versa.
+config VIRTIO_NET
+ tristate "Virtio network driver (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && VIRTIO
+ ---help---
+ This is the virtual network driver for virtio. It can be used with
+ lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+
+endif # NET_CORE
+
+config SUNGEM_PHY
+ tristate
+
+source "drivers/net/arcnet/Kconfig"
+
+source "drivers/atm/Kconfig"
+
+source "drivers/net/caif/Kconfig"
+
+source "drivers/net/ethernet/Kconfig"
+
+source "drivers/net/fddi/Kconfig"
+
config NET_SB1000
tristate "General Instruments Surfboard 1000"
depends on PNP
@@ -178,2791 +269,26 @@ config NET_SB1000
If you don't have this card, of course say N.
-source "drivers/net/arcnet/Kconfig"
-
-config MII
- tristate "Generic Media Independent Interface device support"
- help
- Most ethernet controllers have MII transceiver either as an external
- or internal device. It is safe to say Y or M here even if your
- ethernet card lacks MII.
-
source "drivers/net/phy/Kconfig"
-#
-# Ethernet
-#
+source "drivers/net/plip/Kconfig"
-menuconfig NET_ETHERNET
- bool "Ethernet (10 or 100Mbit)"
- depends on !UML
- ---help---
- Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common
- type of Local Area Network (LAN) in universities and companies.
-
- Common varieties of Ethernet are: 10BASE-2 or Thinnet (10 Mbps over
- coaxial cable, linking computers in a chain), 10BASE-T or twisted
- pair (10 Mbps over twisted pair cable, linking computers to central
- hubs), 10BASE-F (10 Mbps over optical fiber links, using hubs),
- 100BASE-TX (100 Mbps over two twisted pair cables, using hubs),
- 100BASE-T4 (100 Mbps over 4 standard voice-grade twisted pair
- cables, using hubs), 100BASE-FX (100 Mbps over optical fiber links)
- [the 100BASE varieties are also known as Fast Ethernet], and Gigabit
- Ethernet (1 Gbps over optical fiber or short copper links).
-
- If your Linux machine will be connected to an Ethernet and you have
- an Ethernet network interface card (NIC) installed in your computer,
- say Y here and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. You will then also have
- to say Y to the driver for your particular NIC.
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Ethernet network cards. If unsure, say N.
-
-if NET_ETHERNET
-
-config MACB
- tristate "Atmel MACB support"
- depends on HAVE_NET_MACB
- select PHYLIB
- help
- The Atmel MACB ethernet interface is found on many AT32 and AT91
- parts. Say Y to include support for the MACB chip.
+source "drivers/net/ppp/Kconfig"
- To compile this driver as a module, choose M here: the module
- will be called macb.
+source "drivers/net/slip/Kconfig"
-source "drivers/net/arm/Kconfig"
-
-config AX88796
- tristate "ASIX AX88796 NE2000 clone support"
- depends on ARM || MIPS || SUPERH
- select PHYLIB
- select MDIO_BITBANG
- help
- AX88796 driver, using platform bus to provide
- chip detection and resources
-
-config AX88796_93CX6
- bool "ASIX AX88796 external 93CX6 eeprom support"
- depends on AX88796
- select EEPROM_93CX6
- help
- Select this if your platform comes with an external 93CX6 eeprom.
-
-config MACE
- tristate "MACE (Power Mac ethernet) support"
- depends on PPC_PMAC && PPC32
- select CRC32
- help
- Power Macintoshes and clones with Ethernet built-in on the
- motherboard will usually use a MACE (Medium Access Control for
- Ethernet) interface. Say Y to include support for the MACE chip.
-
- To compile this driver as a module, choose M here: the module
- will be called mace.
-
-config MACE_AAUI_PORT
- bool "Use AAUI port instead of TP by default"
- depends on MACE
- help
- Some Apple machines (notably the Apple Network Server) which use the
- MACE ethernet chip have an Apple AUI port (small 15-pin connector),
- instead of an 8-pin RJ45 connector for twisted-pair ethernet. Say
- Y here if you have such a machine. If unsure, say N.
- The driver will default to AAUI on ANS anyway, and if you use it as
- a module, you can provide the port_aaui=0|1 to force the driver.
-
-config BMAC
- tristate "BMAC (G3 ethernet) support"
- depends on PPC_PMAC && PPC32
- select CRC32
- help
- Say Y for support of BMAC Ethernet interfaces. These are used on G3
- computers.
-
- To compile this driver as a module, choose M here: the module
- will be called bmac.
-
-config ARIADNE
- tristate "Ariadne support"
- depends on ZORRO
- help
- If you have a Village Tronic Ariadne Ethernet adapter, say Y.
- Otherwise, say N.
-
- To compile this driver as a module, choose M here: the module
- will be called ariadne.
-
-config A2065
- tristate "A2065 support"
- depends on ZORRO
- select CRC32
- help
- If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
- say N.
-
- To compile this driver as a module, choose M here: the module
- will be called a2065.
-
-config HYDRA
- tristate "Hydra support"
- depends on ZORRO
- select CRC32
- help
- If you have a Hydra Ethernet adapter, say Y. Otherwise, say N.
-
- To compile this driver as a module, choose M here: the module
- will be called hydra.
-
-config ZORRO8390
- tristate "Zorro NS8390-based Ethernet support"
- depends on ZORRO
- select CRC32
- help
- This driver is for Zorro Ethernet cards using an NS8390-compatible
- chipset, like the Village Tronic Ariadne II and the Individual
- Computers X-Surf Ethernet cards. If you have such a card, say Y.
- Otherwise, say N.
-
- To compile this driver as a module, choose M here: the module
- will be called zorro8390.
-
-config APNE
- tristate "PCMCIA NE2000 support"
- depends on AMIGA_PCMCIA
- select CRC32
- help
- If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
- say N.
-
- To compile this driver as a module, choose M here: the module
- will be called apne.
-
-config MAC8390
- bool "Macintosh NS 8390 based ethernet cards"
- depends on MAC
- select CRC32
- help
- If you want to include a driver to support Nubus or LC-PDS
- Ethernet cards using an NS8390 chipset or its equivalent, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
-config MAC89x0
- tristate "Macintosh CS89x0 based ethernet cards"
- depends on MAC
- ---help---
- Support for CS89x0 chipset based Ethernet cards. If you have a
- Nubus or LC-PDS network (Ethernet) card of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. This module will
- be called mac89x0.
-
-config MACSONIC
- tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
- depends on MAC
- ---help---
- Support for NatSemi SONIC based Ethernet devices. This includes
- the onboard Ethernet in many Quadras as well as some LC-PDS,
- a few Nubus and all known Comm Slot Ethernet cards. If you have
- one of these say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. This module will
- be called macsonic.
-
-config MACMACE
- bool "Macintosh (AV) onboard MACE ethernet"
- depends on MAC
- select CRC32
- help
- Support for the onboard AMD 79C940 MACE Ethernet controller used in
- the 660AV and 840AV Macintosh. If you have one of these Macintoshes
- say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
-config MVME147_NET
- tristate "MVME147 (Lance) Ethernet support"
- depends on MVME147
- select CRC32
- help
- Support for the on-board Ethernet interface on the Motorola MVME147
- single-board computer. Say Y here to include the
- driver for this chip in your kernel.
- To compile this driver as a module, choose M here.
-
-config MVME16x_NET
- tristate "MVME16x Ethernet support"
- depends on MVME16x
- help
- This is the driver for the Ethernet interface on the Motorola
- MVME162, 166, 167, 172 and 177 boards. Say Y here to include the
- driver for this chip in your kernel.
- To compile this driver as a module, choose M here.
-
-config BVME6000_NET
- tristate "BVME6000 Ethernet support"
- depends on BVME6000
- help
- This is the driver for the Ethernet interface on BVME4000 and
- BVME6000 VME boards. Say Y here to include the driver for this chip
- in your kernel.
- To compile this driver as a module, choose M here.
-
-config ATARILANCE
- tristate "Atari Lance support"
- depends on ATARI
- help
- Say Y to include support for several Atari Ethernet adapters based
- on the AMD Lance chipset: RieblCard (with or without battery), or
- PAMCard VME (also the version by Rhotron, with different addresses).
-
-config SUN3LANCE
- tristate "Sun3/Sun3x on-board LANCE support"
- depends on SUN3 || SUN3X
- help
- Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
- featured an AMD Lance 10Mbit Ethernet controller on board; say Y
- here to compile in the Linux driver for this and enable Ethernet.
- General Linux information on the Sun 3 and 3x series (now
- discontinued) is at
- <http://www.angelfire.com/ca2/tech68k/sun3.html>.
-
- If you're not building a kernel for a Sun 3, say N.
-
-config SUN3_82586
- bool "Sun3 on-board Intel 82586 support"
- depends on SUN3
- help
- This driver enables support for the on-board Intel 82586 based
- Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note
- that this driver does not support 82586-based adapters on additional
- VME boards.
-
-config HPLANCE
- bool "HP on-board LANCE support"
- depends on DIO
- select CRC32
- help
- If you want to use the builtin "LANCE" Ethernet controller on an
- HP300 machine, say Y here.
-
-config LASI_82596
- tristate "Lasi ethernet"
- depends on GSC
- help
- Say Y here to support the builtin Intel 82596 ethernet controller
- found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
-
-config SNI_82596
- tristate "SNI RM ethernet"
- depends on NET_ETHERNET && SNI_RM
- help
- Say Y here to support the on-board Intel 82596 ethernet controller
- built into SNI RM machines.
-
-config KORINA
- tristate "Korina (IDT RC32434) Ethernet support"
- depends on NET_ETHERNET && MIKROTIK_RB532
- help
- If you have a Mikrotik RouterBoard 500 or IDT RC32434
- based system say Y. Otherwise say N.
-
-config MIPS_JAZZ_SONIC
- tristate "MIPS JAZZ onboard SONIC Ethernet support"
- depends on MACH_JAZZ
- help
- This is the driver for the onboard card of MIPS Magnum 4000,
- Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
-
-config XTENSA_XT2000_SONIC
- tristate "Xtensa XT2000 onboard SONIC Ethernet support"
- depends on XTENSA_PLATFORM_XT2000
- help
- This is the driver for the onboard card of the Xtensa XT2000 board.
-
-config MIPS_AU1X00_ENET
- tristate "MIPS AU1000 Ethernet support"
- depends on MIPS_ALCHEMY
- select PHYLIB
- select CRC32
- help
- If you have an Alchemy Semi AU1X00 based system
- say Y. Otherwise, say N.
-
-config SGI_IOC3_ETH
- bool "SGI IOC3 Ethernet"
- depends on PCI && SGI_IP27
- select CRC32
- select MII
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
-config MIPS_SIM_NET
- tristate "MIPS simulator Network device"
- depends on MIPS_SIM
- help
- The MIPSNET device is a simple Ethernet network device which is
- emulated by the MIPS Simulator.
- If you are not using a MIPSsim or are unsure, say N.
-
-config SGI_O2MACE_ETH
- tristate "SGI O2 MACE Fast Ethernet support"
- depends on SGI_IP32=y
-
-config STNIC
- tristate "National DP83902AV support"
- depends on SUPERH
- select CRC32
- help
- Support for cards based on the National Semiconductor DP83902AV
- ST-NIC Serial Network Interface Controller for Twisted Pair. This
- is a 10Mbit/sec Ethernet controller. Product overview and specs at
- <http://www.national.com/pf/DP/DP83902A.html>.
-
- If unsure, say N.
-
-config SH_ETH
- tristate "Renesas SuperH Ethernet support"
- depends on SUPERH && \
- (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
- CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
- CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
- select CRC32
- select MII
- select MDIO_BITBANG
- select PHYLIB
- help
- Renesas SuperH Ethernet device driver.
- This driver supporting CPUs are:
- - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
-
-config SUNLANCE
- tristate "Sun LANCE support"
- depends on SBUS
- select CRC32
- help
- This driver supports the "le" interface present on all 32-bit Sparc
- systems, on some older Ultra systems and as an Sbus option. These
- cards are based on the AMD Lance chipset, which is better known
- via the NE2100 cards.
-
- To compile this driver as a module, choose M here: the module
- will be called sunlance.
-
-config HAPPYMEAL
- tristate "Sun Happy Meal 10/100baseT support"
- depends on SBUS || PCI
- select CRC32
- help
- This driver supports the "hme" interface present on most Ultra
- systems and as an option on older Sbus systems. This driver supports
- both PCI and Sbus devices. This driver also supports the "qfe" quad
- 100baseT device available in both PCI and Sbus configurations.
-
- To compile this driver as a module, choose M here: the module
- will be called sunhme.
-
-config SUNBMAC
- tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
- depends on SBUS && EXPERIMENTAL
- select CRC32
- help
- This driver supports the "be" interface available as an Sbus option.
- This is Sun's older 100baseT Ethernet device.
-
- To compile this driver as a module, choose M here: the module
- will be called sunbmac.
-
-config SUNQE
- tristate "Sun QuadEthernet support"
- depends on SBUS
- select CRC32
- help
- This driver supports the "qe" 10baseT Ethernet device, available as
- an Sbus option. Note that this is not the same as Quad FastEthernet
- "qfe" which is supported by the Happy Meal driver instead.
-
- To compile this driver as a module, choose M here: the module
- will be called sunqe.
-
-config SUNGEM
- tristate "Sun GEM support"
- depends on PCI
- select CRC32
- help
- Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
-
-config CASSINI
- tristate "Sun Cassini support"
- depends on PCI
- select CRC32
- help
- Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
-
-config SUNVNET
- tristate "Sun Virtual Network support"
- depends on SUN_LDOMS
- help
- Support for virtual network devices under Sun Logical Domains.
-
-config NET_VENDOR_3COM
- bool "3COM cards"
- depends on ISA || EISA || MCA || PCI
- help
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about 3COM cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-config EL1
- tristate "3c501 \"EtherLink\" support"
- depends on NET_VENDOR_3COM && ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Also, consider buying a
- new card, since the 3c501 is slow, broken, and obsolete: you will
- have problems. Some people suggest to ping ("man ping") a nearby
- machine every minute ("man cron") when using this card.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c501.
-
-config EL2
- tristate "3c503 \"EtherLink II\" support"
- depends on NET_VENDOR_3COM && ISA
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c503.
-
-config ELPLUS
- tristate "3c505 \"EtherLink Plus\" support"
- depends on NET_VENDOR_3COM && ISA && ISA_DMA_API
- ---help---
- Information about this network (Ethernet) card can be found in
- <file:Documentation/networking/3c505.txt>. If you have a card of
- this type, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c505.
-
-config EL16
- tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
- depends on NET_VENDOR_3COM && ISA && EXPERIMENTAL
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c507.
-
-config EL3
- tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
- depends on NET_VENDOR_3COM && (ISA || EISA || MCA)
- ---help---
- If you have a network (Ethernet) card belonging to the 3Com
- EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
- from <http://www.tldp.org/docs.html#howto>.
-
- If your card is not working you may need to use the DOS
- setup disk to disable Plug & Play mode, and to select the default
- media type.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c509.
-
-config 3C515
- tristate "3c515 ISA \"Fast EtherLink\""
- depends on NET_VENDOR_3COM && (ISA || EISA) && ISA_DMA_API
- help
- If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
- network card, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c515.
-
-config ELMC
- tristate "3c523 \"EtherLink/MC\" support"
- depends on NET_VENDOR_3COM && MCA_LEGACY
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c523.
-
-config ELMC_II
- tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)"
- depends on NET_VENDOR_3COM && MCA && MCA_LEGACY
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c527.
-
-config VORTEX
- tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
- depends on NET_VENDOR_3COM && (PCI || EISA)
- select MII
- ---help---
- This option enables driver support for a large number of 10Mbps and
- 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
-
- "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
- "Boomerang" (EtherLink XL 3c900 or 3c905) PCI
- "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus
- "Tornado" (3c905) PCI
- "Hurricane" (3c555/3cSOHO) PCI
-
- If you have such a card, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>. More
- specific information is in
- <file:Documentation/networking/vortex.txt> and in the comments at
- the beginning of <file:drivers/net/3c59x.c>.
-
- To compile this support as a module, choose M here.
-
-config TYPHOON
- tristate "3cr990 series \"Typhoon\" support"
- depends on NET_VENDOR_3COM && PCI
- select CRC32
- ---help---
- This option enables driver support for the 3cr990 series of cards:
-
- 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97,
- 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server,
- 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR
-
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called typhoon.
-
-config LANCE
- tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
- of this type.
-
- To compile this driver as a module, choose M here: the module
- will be called lance. This is recommended.
-
-config NET_VENDOR_SMC
- bool "Western Digital/SMC cards"
- depends on ISA || MCA || EISA || MAC
- help
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Western Digital cards. If you say Y, you will be
- asked for your specific card in the following questions.
-
-config WD80x3
- tristate "WD80*3 support"
- depends on NET_VENDOR_SMC && ISA
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called wd.
-
-config ULTRAMCA
- tristate "SMC Ultra MCA support"
- depends on NET_VENDOR_SMC && MCA
- select CRC32
- help
- If you have a network (Ethernet) card of this type and are running
- an MCA based system (PS/2), say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called smc-mca.
-
-config ULTRA
- tristate "SMC Ultra support"
- depends on NET_VENDOR_SMC && ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- Important: There have been many reports that, with some motherboards
- mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible,
- such as some BusLogic models) causes corruption problems with many
- operating systems. The Linux smc-ultra driver has a work-around for
- this but keep it in mind if you have such a SCSI card and have
- problems.
-
- To compile this driver as a module, choose M here. The module
- will be called smc-ultra.
-
-config ULTRA32
- tristate "SMC Ultra32 EISA support"
- depends on NET_VENDOR_SMC && EISA
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called smc-ultra32.
-
-config BFIN_MAC
- tristate "Blackfin on-chip MAC support"
- depends on NET_ETHERNET && (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
- select CRC32
- select MII
- select PHYLIB
- select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
- help
- This is the driver for Blackfin on-chip mac device. Say Y if you want it
- compiled into the kernel. This driver is also available as a module
- ( = code which can be inserted in and removed from the running kernel
- whenever you want). The module will be called bfin_mac.
-
-config BFIN_MAC_USE_L1
- bool "Use L1 memory for rx/tx packets"
- depends on BFIN_MAC && (BF527 || BF537)
- default y
- help
- To get maximum network performance, you should use L1 memory as rx/tx buffers.
- Say N here if you want to reserve L1 memory for other uses.
-
-config BFIN_TX_DESC_NUM
- int "Number of transmit buffer packets"
- depends on BFIN_MAC
- range 6 10 if BFIN_MAC_USE_L1
- range 10 100
- default "10"
- help
- Set the number of buffer packets used in driver.
-
-config BFIN_RX_DESC_NUM
- int "Number of receive buffer packets"
- depends on BFIN_MAC
- range 20 100 if BFIN_MAC_USE_L1
- range 20 800
- default "20"
- help
- Set the number of buffer packets used in driver.
-
-config BFIN_MAC_USE_HWSTAMP
- bool "Use IEEE 1588 hwstamp"
- depends on BFIN_MAC && BF518
- default y
- help
- To support the IEEE 1588 Precision Time Protocol (PTP), select y here
-
-config SMC9194
- tristate "SMC 9194 support"
- depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
- select CRC32
- ---help---
- This is support for the SMC9xxx based Ethernet cards. Choose this
- option if you have a DELL laptop with the docking station, or
- another SMC9192/9194 based chipset. Say Y if you want it compiled
- into the kernel, and read the file
- <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called smc9194.
-
-config SMC91X
- tristate "SMC 91C9x/91C1xxx support"
- select CRC32
- select MII
- depends on ARM || M32R || SUPERH || \
- MIPS || BLACKFIN || MN10300 || COLDFIRE
- help
- This is a driver for SMC's 91x series of Ethernet chipsets,
- including the SMC91C94 and the SMC91C111. Say Y if you want it
- compiled into the kernel, and read the file
- <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- This driver is also available as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want).
- The module will be called smc91x. If you want to compile it as a
- module, say M here and read <file:Documentation/kbuild/modules.txt>.
-
-config PXA168_ETH
- tristate "Marvell pxa168 ethernet support"
- depends on CPU_PXA168
- select PHYLIB
- help
- This driver supports the pxa168 Ethernet ports.
-
- To compile this driver as a module, choose M here. The module
- will be called pxa168_eth.
-
-config NET_NETX
- tristate "NetX Ethernet support"
- select MII
- depends on ARCH_NETX
- help
- This is support for the Hilscher netX builtin Ethernet ports
-
- To compile this driver as a module, choose M here. The module
- will be called netx-eth.
-
-config TI_DAVINCI_EMAC
- tristate "TI DaVinci EMAC Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
- select TI_DAVINCI_MDIO
- select TI_DAVINCI_CPDMA
- select PHYLIB
- help
- This driver supports TI's DaVinci Ethernet .
-
- To compile this driver as a module, choose M here: the module
- will be called davinci_emac_driver. This is recommended.
-
-config TI_DAVINCI_MDIO
- tristate "TI DaVinci MDIO Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
- select PHYLIB
- help
- This driver supports TI's DaVinci MDIO module.
-
- To compile this driver as a module, choose M here: the module
- will be called davinci_mdio. This is recommended.
-
-config TI_DAVINCI_CPDMA
- tristate "TI DaVinci CPDMA Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
- help
- This driver supports TI's DaVinci CPDMA dma engine.
-
- To compile this driver as a module, choose M here: the module
- will be called davinci_cpdma. This is recommended.
-
-config DM9000
- tristate "DM9000 support"
- depends on ARM || BLACKFIN || MIPS
- select CRC32
- select MII
- ---help---
- Support for DM9000 chipset.
-
- To compile this driver as a module, choose M here. The module
- will be called dm9000.
-
-config DM9000_DEBUGLEVEL
- int "DM9000 maximum debug level"
- depends on DM9000
- default 4
- help
- The maximum level of debugging code compiled into the DM9000
- driver.
-
-config DM9000_FORCE_SIMPLE_PHY_POLL
- bool "Force simple NSR based PHY polling"
- depends on DM9000
- ---help---
- This configuration forces the DM9000 to use the NSR's LinkStatus
- bit to determine if the link is up or down instead of the more
- costly MII PHY reads. Note, this will not work if the chip is
- operating with an external PHY.
-
-config ENC28J60
- tristate "ENC28J60 support"
- depends on EXPERIMENTAL && SPI && NET_ETHERNET
- select CRC32
- ---help---
- Support for the Microchip EN28J60 ethernet chip.
-
- To compile this driver as a module, choose M here. The module will be
- called enc28j60.
-
-config ENC28J60_WRITEVERIFY
- bool "Enable write verify"
- depends on ENC28J60
- ---help---
- Enable the verify after the buffer write useful for debugging purpose.
- If unsure, say N.
-
-config ETHOC
- tristate "OpenCores 10/100 Mbps Ethernet MAC support"
- depends on NET_ETHERNET && HAS_IOMEM && HAS_DMA
- select MII
- select PHYLIB
- select CRC32
- select BITREVERSE
- help
- Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
-
-config GRETH
- tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
- depends on SPARC
- select PHYLIB
- select CRC32
- help
- Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC.
-
-config SMC911X
- tristate "SMSC LAN911[5678] support"
- select CRC32
- select MII
- depends on ARM || SUPERH || MN10300
- help
- This is a driver for SMSC's LAN911x series of Ethernet chipsets
- including the new LAN9115, LAN9116, LAN9117, and LAN9118.
- Say Y if you want it compiled into the kernel,
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- This driver is also available as a module. The module will be
- called smc911x. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>
-
-config SMSC911X
- tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
- depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
- select CRC32
- select MII
- select PHYLIB
- ---help---
- Say Y here if you want support for SMSC LAN911x and LAN921x families
- of ethernet controllers.
-
- To compile this driver as a module, choose M here and read
- <file:Documentation/networking/net-modules.txt>. The module
- will be called smsc911x.
-
-config SMSC911X_ARCH_HOOKS
- def_bool n
- depends on SMSC911X
- help
- If the arch enables this, it allows the arch to implement various
- hooks for more comprehensive interrupt control and also to override
- the source of the MAC address.
-
-config NET_VENDOR_RACAL
- bool "Racal-Interlan (Micom) NI cards"
- depends on ISA
- help
- If you have a network (Ethernet) card belonging to this class, such
- as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about NI cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-config NI5010
- tristate "NI5010 support (EXPERIMENTAL)"
- depends on NET_VENDOR_RACAL && ISA && EXPERIMENTAL && BROKEN_ON_SMP
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this is still
- experimental code.
-
- To compile this driver as a module, choose M here. The module
- will be called ni5010.
-
-config NI52
- tristate "NI5210 support"
- depends on NET_VENDOR_RACAL && ISA
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ni52.
-
-config NI65
- tristate "NI6510 support"
- depends on NET_VENDOR_RACAL && ISA && ISA_DMA_API
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ni65.
-
-config DNET
- tristate "Dave ethernet support (DNET)"
- depends on NET_ETHERNET && HAS_IOMEM
- select PHYLIB
- help
- The Dave ethernet interface (DNET) is found on Qong Board FPGA.
- Say Y to include support for the DNET chip.
-
- To compile this driver as a module, choose M here: the module
- will be called dnet.
-
-source "drivers/net/tulip/Kconfig"
-
-config AT1700
- tristate "AT1700/1720 support (EXPERIMENTAL)"
- depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called at1700.
-
-config DEPCA
- tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
- depends on ISA || EISA || MCA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
- <file:drivers/net/depca.c>.
-
- To compile this driver as a module, choose M here. The module
- will be called depca.
-
-config HP100
- tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
- depends on ISA || EISA || PCI
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp100.
-
-config NET_ISA
- bool "Other ISA cards"
- depends on ISA
- ---help---
- If your network (Ethernet) card hasn't been mentioned yet and its
- bus system (that's the way the cards talks to the other components
- of your computer) is ISA (as opposed to EISA, VLB or PCI), say Y.
- Make sure you know the name of your card. Read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- If unsure, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the remaining ISA network card questions. If you say Y, you will be
- asked for your specific card in the following questions.
-
-config E2100
- tristate "Cabletron E21xx support"
- depends on NET_ISA
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called e2100.
-
-config EWRK3
- tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
- depends on NET_ISA
- select CRC32
- ---help---
- This driver supports the DE203, DE204 and DE205 network (Ethernet)
- cards. If this is for you, say Y and read
- <file:Documentation/networking/ewrk3.txt> in the kernel source as
- well as the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ewrk3.
-
-config EEXPRESS
- tristate "EtherExpress 16 support"
- depends on NET_ISA
- ---help---
- If you have an EtherExpress16 network (Ethernet) card, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that the Intel
- EtherExpress16 card used to be regarded as a very poor choice
- because the driver was very unreliable. We now have a new driver
- that should do better.
-
- To compile this driver as a module, choose M here. The module
- will be called eexpress.
-
-config EEXPRESS_PRO
- tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
- depends on NET_ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y. This
- driver supports Intel i82595{FX,TX} based boards. Note however
- that the EtherExpress PRO/100 Ethernet card has its own separate
- driver. Please read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eepro.
-
-config HPLAN_PLUS
- tristate "HP PCLAN+ (27247B and 27252A) support"
- depends on NET_ISA
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp-plus.
-
-config HPLAN
- tristate "HP PCLAN (27245 and other 27xxx series) support"
- depends on NET_ISA
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp.
-
-config LP486E
- tristate "LP486E on board Ethernet"
- depends on NET_ISA
- help
- Say Y here to support the 82596-based on-board Ethernet controller
- for the Panther motherboard, which is one of the two shipped in the
- Intel Professional Workstation.
-
-config ETH16I
- tristate "ICL EtherTeam 16i/32 support"
- depends on NET_ISA
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eth16i.
-
-config NE2000
- tristate "NE2000/NE1000 support"
- depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Many Ethernet cards
- without a specific driver are compatible with NE2000.
-
- If you have a PCI NE2000 card however, say N here and Y to "PCI
- NE2000 and clone support" under "EISA, VLB, PCI and on board
- controllers" below. If you have a NE2000 card and are running on
- an MCA system (a bus system used on some IBM PS/2 computers and
- laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
- below.
-
- To compile this driver as a module, choose M here. The module
- will be called ne.
-
-config ZNET
- tristate "Zenith Z-Note support (EXPERIMENTAL)"
- depends on NET_ISA && EXPERIMENTAL && ISA_DMA_API
- help
- The Zenith Z-Note notebook computer has a built-in network
- (Ethernet) card, and this is the Linux driver for it. Note that the
- IBM Thinkpad 300 is compatible with the Z-Note and is also supported
- by this driver. Read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
-config SEEQ8005
- tristate "SEEQ8005 support (EXPERIMENTAL)"
- depends on NET_ISA && EXPERIMENTAL
- help
- This is a driver for the SEEQ 8005 network (Ethernet) card. If this
- is for you, read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called seeq8005.
-
-config NE2_MCA
- tristate "NE/2 (ne2000 MCA version) support"
- depends on MCA_LEGACY
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ne2.
-
-config IBMLANA
- tristate "IBM LAN Adapter/A support"
- depends on MCA
- ---help---
- This is a Micro Channel Ethernet adapter. You need to set
- CONFIG_MCA to use this driver. It is both available as an in-kernel
- driver and as a module.
-
- To compile this driver as a module, choose M here. The only
- currently supported card is the IBM LAN Adapter/A for Ethernet. It
- will both support 16K and 32K memory windows, however a 32K window
- gives a better security against packet losses. Usage of multiple
- boards with this driver should be possible, but has not been tested
- up to now due to lack of hardware.
-
-config IBMVETH
- tristate "IBM LAN Virtual Ethernet support"
- depends on PPC_PSERIES
- ---help---
- This driver supports virtual ethernet adapters on newer IBM iSeries
- and pSeries systems.
-
- To compile this driver as a module, choose M here. The module will
- be called ibmveth.
-
-source "drivers/net/ibm_newemac/Kconfig"
-
-config NET_PCI
- bool "EISA, VLB, PCI and on board controllers"
- depends on ISA || EISA || PCI
- help
- This is another class of network cards which attach directly to the
- bus. If you have one of those, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about this class of network cards. If you say Y, you
- will be asked for your specific card in the following questions. If
- you are unsure, say Y.
-
-config PCNET32
- tristate "AMD PCnet32 PCI support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
- answer Y here and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called pcnet32.
-
-config AMD8111_ETH
- tristate "AMD 8111 (new PCI lance) support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- If you have an AMD 8111-based PCI lance ethernet card,
- answer Y here and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called amd8111e.
-
-config ADAPTEC_STARFIRE
- tristate "Adaptec Starfire/DuraLAN support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
- adapter. The DuraLAN chip is used on the 64 bit PCI boards from
- Adaptec e.g. the ANA-6922A. The older 32 bit boards use the tulip
- driver.
-
- To compile this driver as a module, choose M here: the module
- will be called starfire. This is recommended.
-
-config AC3200
- tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
- depends on NET_PCI && (ISA || EISA) && EXPERIMENTAL
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ac3200.
-
-config KSZ884X_PCI
- tristate "Micrel KSZ8841/2 PCI"
- depends on NET_PCI && PCI
- select MII
- select CRC32
- help
- This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
-
- To compile this driver as a module, choose M here. The module
- will be called ksz884x.
-
-config APRICOT
- tristate "Apricot Xen-II on board Ethernet"
- depends on NET_PCI && ISA
- help
- If you have a network (Ethernet) controller of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called apricot.
-
-config B44
- tristate "Broadcom 440x/47xx ethernet support"
- depends on SSB_POSSIBLE && HAS_DMA
- select SSB
- select MII
- help
- If you have a network (Ethernet) controller of this type, say Y
- or M and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called b44.
-
-# Auto-select SSB PCI-HOST support, if possible
-config B44_PCI_AUTOSELECT
- bool
- depends on B44 && SSB_PCIHOST_POSSIBLE
- select SSB_PCIHOST
- default y
-
-# Auto-select SSB PCICORE driver, if possible
-config B44_PCICORE_AUTOSELECT
- bool
- depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
- select SSB_DRIVER_PCICORE
- default y
-
-config B44_PCI
- bool
- depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
- default y
-
-config FORCEDETH
- tristate "nForce Ethernet support"
- depends on NET_PCI && PCI
- help
- If you have a network (Ethernet) controller of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called forcedeth.
-
-config CS89x0
- tristate "CS89x0 support"
- depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
- || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
- ---help---
- Support for CS89x0 chipset based Ethernet cards. If you have a
- network (Ethernet) card of this type, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
- <file:Documentation/networking/cs89x0.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called cs89x0.
-
-config CS89x0_NONISA_IRQ
- def_bool y
- depends on CS89x0 != n
- depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
-
-config TC35815
- tristate "TOSHIBA TC35815 Ethernet support"
- depends on NET_PCI && PCI && MIPS
- select PHYLIB
-
-config E100
- tristate "Intel(R) PRO/100+ support"
- depends on NET_PCI && PCI
- select MII
- ---help---
- This driver supports Intel(R) PRO/100 family of adapters.
- To verify that your adapter is supported, find the board ID number
- on the adapter. Look for a label that has a barcode and a number
- in the format 123456-001 (six digits hyphen three digits).
-
- Use the above information and the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- to identify the adapter.
-
- For the latest Intel PRO/100 network driver for Linux, see:
-
- <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/e100.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called e100.
-
-config LNE390
- tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
- depends on NET_PCI && EISA && EXPERIMENTAL
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called lne390.
-
-config FEALNX
- tristate "Myson MTD-8xx PCI Ethernet support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
- cards. <http://www.myson.com.tw/>
-
-config NATSEMI
- tristate "National Semiconductor DP8381x series PCI Ethernet support"
- depends on NET_PCI && PCI
- select CRC32
- help
- This driver is for the National Semiconductor DP83810 series,
- which is used in cards from PureData, NetGear, Linksys
- and others, including the 83815 chip.
- More specific information and updates are available from
- <http://www.scyld.com/network/natsemi.html>.
-
-config NE2K_PCI
- tristate "PCI NE2000 and clones support (see help)"
- depends on NET_PCI && PCI
- select CRC32
- ---help---
- This driver is for NE2000 compatible PCI cards. It will not work
- with ISA NE2000 cards (they have their own driver, "NE2000/NE1000
- support" below). If you have a PCI NE2000 network (Ethernet) card,
- say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- This driver also works for the following NE2000 clone cards:
- RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2
- NetVin NV5000SC Via 86C926 SureCom NE34 Winbond
- Holtek HT80232 Holtek HT80229
-
- To compile this driver as a module, choose M here. The module
- will be called ne2k-pci.
-
-config NE3210
- tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
- depends on NET_PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this driver
- will NOT WORK for NE3200 cards as they are completely different.
-
- To compile this driver as a module, choose M here. The module
- will be called ne3210.
-
-config ES3210
- tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
- depends on NET_PCI && EISA && EXPERIMENTAL
- select CRC32
- help
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called es3210.
-
-config 8139CP
- tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
- depends on NET_PCI && PCI && EXPERIMENTAL
- select CRC32
- select MII
- help
- This is a driver for the Fast Ethernet PCI network cards based on
- the RTL8139C+ chips. If you have one of those, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here: the module
- will be called 8139cp. This is recommended.
-
-config 8139TOO
- tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- ---help---
- This is a driver for the Fast Ethernet PCI network cards based on
- the RTL 8129/8130/8139 chips. If you have one of those, say Y and
- read the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here: the module
- will be called 8139too. This is recommended.
-
-config 8139TOO_PIO
- bool "Use PIO instead of MMIO"
- default y
- depends on 8139TOO
- help
- This instructs the driver to use programmed I/O ports (PIO) instead
- of PCI shared memory (MMIO). This can possibly solve some problems
- in case your mainboard has memory consistency issues. If unsure,
- say N.
-
-config 8139TOO_TUNE_TWISTER
- bool "Support for uncommon RTL-8139 rev. K (automatic channel equalization)"
- depends on 8139TOO
- help
- This implements a function which might come in handy in case you
- are using low quality on long cabling. It is required for RealTek
- RTL-8139 revision K boards, and totally unused otherwise. It tries
- to match the transceiver to the cable characteristics. This is
- experimental since hardly documented by the manufacturer.
- If unsure, say Y.
-
-config 8139TOO_8129
- bool "Support for older RTL-8129/8130 boards"
- depends on 8139TOO
- help
- This enables support for the older and uncommon RTL-8129 and
- RTL-8130 chips, which support MII via an external transceiver,
- instead of an internal one. Disabling this option will save some
- memory by making the code size smaller. If unsure, say Y.
-
-config 8139_OLD_RX_RESET
- bool "Use older RX-reset method"
- depends on 8139TOO
- help
- The 8139too driver was recently updated to contain a more rapid
- reset sequence, in the face of severe receive errors. This "new"
- RX-reset method should be adequate for all boards. But if you
- experience problems, you can enable this option to restore the
- old RX-reset behavior. If unsure, say N.
-
-config R6040
- tristate "RDC R6040 Fast Ethernet Adapter support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- select PHYLIB
- help
- This is a driver for the R6040 Fast Ethernet MACs found in the
- the RDC R-321x System-on-chips.
-
- To compile this driver as a module, choose M here: the module
- will be called r6040. This is recommended.
-
-config SIS900
- tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- ---help---
- This is a driver for the Fast Ethernet PCI network cards based on
- the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
- SiS 630 and SiS 540 chipsets.
-
- This driver also supports AMD 79C901 HomePNA so that you can use
- your phone line as a network cable.
-
- To compile this driver as a module, choose M here: the module
- will be called sis900. This is recommended.
-
-config EPIC100
- tristate "SMC EtherPower II"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
- which is based on the SMC83c17x (EPIC/100).
- More specific information and updates are available from
- <http://www.scyld.com/network/epic100.html>.
-
-config SMSC9420
- tristate "SMSC LAN9420 PCI ethernet adapter support"
- depends on NET_PCI && PCI
- select CRC32
- select PHYLIB
- select SMSC_PHY
- help
- This is a driver for SMSC's LAN9420 PCI ethernet adapter.
- Say Y if you want it compiled into the kernel,
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- This driver is also available as a module. The module will be
- called smsc9420. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>
-
-config SUNDANCE
- tristate "Sundance Alta support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- This driver is for the Sundance "Alta" chip.
- More specific information and updates are available from
- <http://www.scyld.com/network/sundance.html>.
-
-config SUNDANCE_MMIO
- bool "Use MMIO instead of PIO"
- depends on SUNDANCE
- help
- Enable memory-mapped I/O for interaction with Sundance NIC registers.
- Do NOT enable this by default, PIO (enabled when MMIO is disabled)
- is known to solve bugs on certain chips.
-
- If unsure, say N.
-
-config TLAN
- tristate "TI ThunderLAN support"
- depends on NET_PCI && (PCI || EISA)
- ---help---
- If you have a PCI Ethernet network card based on the ThunderLAN chip
- which is supported by this driver, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- Devices currently supported by this driver are Compaq Netelligent,
- Compaq NetFlex and Olicom cards. Please read the file
- <file:Documentation/networking/tlan.txt> for more details.
-
- To compile this driver as a module, choose M here. The module
- will be called tlan.
-
- Please email feedback to <torben.mathiasen@compaq.com>.
-
-config KS8842
- tristate "Micrel KSZ8841/42 with generic bus interface"
- depends on HAS_IOMEM && DMA_ENGINE
- help
- This platform driver is for KSZ8841(1-port) / KS8842(2-port)
- ethernet switch chip (managed, VLAN, QoS) from Micrel or
- Timberdale(FPGA).
-
-config KS8851
- tristate "Micrel KS8851 SPI"
- depends on SPI
- select MII
- select CRC32
- help
- SPI driver for Micrel KS8851 SPI attached network chip.
-
-config KS8851_MLL
- tristate "Micrel KS8851 MLL"
- depends on HAS_IOMEM
- select MII
- help
- This platform driver is for Micrel KS8851 Address/data bus
- multiplexed network chip.
-
-config VIA_RHINE
- tristate "VIA Rhine support"
- depends on NET_PCI && PCI
- select CRC32
- select MII
- help
- If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
- Rhine-II (VT6102), or Rhine-III (VT6105)), say Y here. Rhine-type
- Ethernet functions can also be found integrated on South Bridges
- (e.g. VT8235).
-
- To compile this driver as a module, choose M here. The module
- will be called via-rhine.
-
-config VIA_RHINE_MMIO
- bool "Use MMIO instead of PIO"
- depends on VIA_RHINE
- help
- This instructs the driver to use PCI shared memory (MMIO) instead of
- programmed I/O ports (PIO). Enabling this gives an improvement in
- processing time in parts of the driver.
-
- If unsure, say Y.
-
-config SC92031
- tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
- depends on NET_PCI && PCI && EXPERIMENTAL
- select CRC32
- ---help---
- This is a driver for the Fast Ethernet PCI network cards based on
- the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
- have one of these, say Y here.
-
- To compile this driver as a module, choose M here: the module
- will be called sc92031. This is recommended.
-
-config CPMAC
- tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
- depends on NET_ETHERNET && EXPERIMENTAL && AR7
- select PHYLIB
- help
- TI AR7 CPMAC Ethernet support
-
-config NET_POCKET
- bool "Pocket and portable adapters"
- depends on PARPORT
- ---help---
- Cute little network (Ethernet) devices which attach to the parallel
- port ("pocket adapters"), commonly used with laptops. If you have
- one of those, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- If you want to plug a network (or some other) card into the PCMCIA
- (or PC-card) slot of your laptop instead (PCMCIA is the standard for
- credit card size extension cards used by all modern laptops), you
- need the pcmcia-cs package (location contained in the file
- <file:Documentation/Changes>) and you can say N here.
-
- Laptop users should read the Linux Laptop home page at
- <http://www.linux-on-laptops.com/> or
- Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about this class of network devices. If you say Y, you
- will be asked for your specific device in the following questions.
-
-config ATP
- tristate "AT-LAN-TEC/RealTek pocket adapter support"
- depends on NET_POCKET && PARPORT && X86
- select CRC32
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>, if you
- want to use this. If you intend to use this driver, you should have
- said N to the "Parallel printer support", because the two drivers
- don't like each other.
-
- To compile this driver as a module, choose M here: the module
- will be called atp.
-
-config DE600
- tristate "D-Link DE600 pocket adapter support"
- depends on NET_POCKET && PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de600.
-
-config DE620
- tristate "D-Link DE620 pocket adapter support"
- depends on NET_POCKET && PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de620.
-
-config SGISEEQ
- tristate "SGI Seeq ethernet controller support"
- depends on SGI_HAS_SEEQ
- help
- Say Y here if you have an Seeq based Ethernet network card. This is
- used in many Silicon Graphics machines.
-
-config DECLANCE
- tristate "DEC LANCE ethernet controller support"
- depends on MACH_DECSTATION
- select CRC32
- help
- This driver is for the series of Ethernet controllers produced by
- DEC (now Compaq) based on the AMD Lance chipset, including the
- DEPCA series. (This chipset is better known via the NE2100 cards.)
-
-config FEC
- bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
- default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
- select PHYLIB
- help
- Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
-
-config FEC_MPC52xx
- tristate "MPC52xx FEC driver"
- depends on PPC_MPC52xx && PPC_BESTCOMM
- select CRC32
- select PHYLIB
- select PPC_BESTCOMM_FEC
- ---help---
- This option enables support for the MPC5200's on-chip
- Fast Ethernet Controller
- If compiled as module, it will be called fec_mpc52xx.
-
-config FEC_MPC52xx_MDIO
- bool "MPC52xx FEC MDIO bus driver"
- depends on FEC_MPC52xx
- default y
- ---help---
- The MPC5200's FEC can connect to the Ethernet either with
- an external MII PHY chip or 10 Mbps 7-wire interface
- (Motorola? industry standard).
- If your board uses an external PHY connected to FEC, enable this.
- If not sure, enable.
- If compiled as module, it will be called fec_mpc52xx_phy.
-
-config NE_H8300
- tristate "NE2000 compatible support for H8/300"
- depends on H8300
- help
- Say Y here if you want to use the NE2000 compatible
- controller on the Renesas H8/300 processor.
-
-config ATL2
- tristate "Atheros L2 Fast Ethernet support"
- depends on PCI
- select CRC32
- select MII
- help
- This driver supports the Atheros L2 fast ethernet adapter.
-
- To compile this driver as a module, choose M here. The module
- will be called atl2.
-
-config XILINX_EMACLITE
- tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE
- select PHYLIB
- help
- This driver supports the 10/100 Ethernet Lite from Xilinx.
-
-config BCM63XX_ENET
- tristate "Broadcom 63xx internal mac support"
- depends on BCM63XX
- select MII
- select PHYLIB
- help
- This driver supports the ethernet MACs in the Broadcom 63xx
- MIPS chipset family (BCM63XX).
-
-config FTMAC100
- tristate "Faraday FTMAC100 10/100 Ethernet support"
- depends on ARM
- select MII
- help
- This driver supports the FTMAC100 10/100 Ethernet controller
- from Faraday. It is used on Faraday A320, Andes AG101 and some
- other ARM/NDS32 SoC's.
-
-config LANTIQ_ETOP
- tristate "Lantiq SoC ETOP driver"
- depends on SOC_TYPE_XWAY
- help
- Support for the MII0 inside the Lantiq SoC
-
-
-source "drivers/net/fs_enet/Kconfig"
-
-source "drivers/net/octeon/Kconfig"
-
-endif # NET_ETHERNET
-
-#
-# Gigabit Ethernet
-#
-
-menuconfig NETDEV_1000
- bool "Ethernet (1000 Mbit)"
- depends on !UML
- default y
- ---help---
- Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common
- type of Local Area Network (LAN) in universities and companies.
-
- Say Y here to get to see options for Gigabit Ethernet drivers.
- This option alone does not add any kernel code.
- Note that drivers supporting both 100 and 1000 MBit may be listed
- under "Ethernet (10 or 100MBit)" instead.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if NETDEV_1000
-
-config ACENIC
- tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support"
- depends on PCI
- ---help---
- Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear
- GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet
- adapter. The driver allows for using the Jumbo Frame option (9000
- bytes/frame) however it requires that your switches can handle this
- as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig
- line.
-
- To compile this driver as a module, choose M here: the
- module will be called acenic.
-
-config ACENIC_OMIT_TIGON_I
- bool "Omit support for old Tigon I based AceNICs"
- depends on ACENIC
- help
- Say Y here if you only have Tigon II based AceNICs and want to leave
- out support for the older Tigon I based cards which are no longer
- being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B
- version)). This will reduce the size of the driver object by
- app. 100KB. If you are not sure whether your card is a Tigon I or a
- Tigon II, say N here.
-
- The safe and default value for this is N.
-
-config DL2K
- tristate "DL2000/TC902x-based Gigabit Ethernet support"
- depends on PCI
- select CRC32
- help
- This driver supports DL2000/TC902x-based Gigabit ethernet cards,
- which includes
- D-Link DGE-550T Gigabit Ethernet Adapter.
- D-Link DL2000-based Gigabit Ethernet Adapter.
- Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
-
- To compile this driver as a module, choose M here: the
- module will be called dl2k.
-
-config E1000
- tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) PRO/1000 gigabit ethernet family of
- adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called e1000.
-
-config E1000E
- tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
- depends on PCI && (!SPARC32 || BROKEN)
- select CRC32
- ---help---
- This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
- ethernet family of adapters. For PCI or PCI-X e1000 adapters,
- use the regular e1000 driver For more information on how to
- identify your adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- To compile this driver as a module, choose M here. The module
- will be called e1000e.
-
-config IP1000
- tristate "IP1000 Gigabit Ethernet support"
- depends on PCI && EXPERIMENTAL
- select MII
- ---help---
- This driver supports IP1000 gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called ipg. This is recommended.
-
-config IGB
- tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82575/82576 gigabit ethernet family of
- adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called igb.
-
-config IGB_DCA
- bool "Direct Cache Access (DCA) Support"
- default y
- depends on IGB && DCA && !(IGB=y && DCA=m)
- ---help---
- Say Y here if you want to use Direct Cache Access (DCA) in the
- driver. DCA is a method for warming the CPU cache before data
- is used, with the intent of lessening the impact of cache misses.
-
-config IGBVF
- tristate "Intel(R) 82576 Virtual Function Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82576 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called igbvf.
-
-source "drivers/net/ixp2000/Kconfig"
-
-config NS83820
- tristate "National Semiconductor DP83820 support"
- depends on PCI
- help
- This is a driver for the National Semiconductor DP83820 series
- of gigabit ethernet MACs. Cards using this chipset include
- the D-Link DGE-500T, PureData's PDP8023Z-TG, SMC's SMC9462TX,
- SOHO-GA2000T, SOHO-GA2500T. The driver supports the use of
- zero copy.
-
-config HAMACHI
- tristate "Packet Engines Hamachi GNIC-II support"
- depends on PCI
- select MII
- help
- If you have a Gigabit Ethernet card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module will be
- called hamachi.
-
-config YELLOWFIN
- tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
- select CRC32
- ---help---
- Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
- adapter or the SYM53C885 Ethernet controller. The Gigabit adapter is
- used by the Beowulf Linux cluster project. See
- <http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html> for more
- information about this driver in particular and Beowulf in general.
-
- To compile this driver as a module, choose M here: the module
- will be called yellowfin. This is recommended.
-
-config R8169
- tristate "Realtek 8169 gigabit ethernet support"
- depends on PCI
- select FW_LOADER
- select CRC32
- select MII
- ---help---
- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
-
- To compile this driver as a module, choose M here: the module
- will be called r8169. This is recommended.
-
-config SB1250_MAC
- tristate "SB1250 Gigabit Ethernet support"
- depends on SIBYTE_SB1xxx_SOC
- select PHYLIB
- ---help---
- This driver supports Gigabit Ethernet interfaces based on the
- Broadcom SiByte family of System-On-a-Chip parts. They include
- the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
- and BCM1480 chips.
-
- To compile this driver as a module, choose M here: the module
- will be called sb1250-mac.
-
-config SIS190
- tristate "SiS190/SiS191 gigabit ethernet support"
- depends on PCI
- select CRC32
- select MII
- ---help---
- Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
- a SiS 191 PCI Gigabit Ethernet adapter. Both are expected to
- appear in lan on motherboard designs which are based on SiS 965
- and SiS 966 south bridge.
-
- To compile this driver as a module, choose M here: the module
- will be called sis190. This is recommended.
-
-config SKGE
- tristate "Marvell Yukon Gigabit Ethernet support"
- depends on PCI
- select CRC32
- ---help---
- This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
- and related Gigabit Ethernet adapters. It is a new smaller driver
- with better performance and more complete ethtool support.
-
- It does not support the link failover and network management
- features that "portable" vendor supplied sk98lin driver does.
-
- This driver supports adapters based on the original Yukon chipset:
- Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
- Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
-
- It does not support the newer Yukon2 chipset: a separate driver,
- sky2, is provided for these adapters.
-
- To compile this driver as a module, choose M here: the module
- will be called skge. This is recommended.
-
-config SKGE_DEBUG
- bool "Debugging interface"
- depends on SKGE && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/skge/ethX displays the state of the internal
- transmit and receive rings.
-
- If unsure, say N.
-
-config SKGE_GENESIS
- bool "Support for older SysKonnect Genesis boards"
- depends on SKGE
- help
- This enables support for the older and uncommon SysKonnect Genesis
- chips, which support MII via an external transceiver, instead of
- an internal one. Disabling this option will save some memory
- by making code smaller. If unsure say Y.
-
-config SKY2
- tristate "Marvell Yukon 2 support"
- depends on PCI
- select CRC32
- ---help---
- This driver supports Gigabit Ethernet adapters based on the
- Marvell Yukon 2 chipset:
- Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
- 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
-
- There is companion driver for the older Marvell Yukon and
- SysKonnect Genesis based adapters: skge.
-
- To compile this driver as a module, choose M here: the module
- will be called sky2. This is recommended.
-
-config SKY2_DEBUG
- bool "Debugging interface"
- depends on SKY2 && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/sky2/ethX displays the state of the internal
- transmit and receive rings.
-
- If unsure, say N.
-
-config VIA_VELOCITY
- tristate "VIA Velocity support"
- depends on PCI
- select CRC32
- select CRC_CCITT
- select MII
- help
- If you have a VIA "Velocity" based network card say Y here.
-
- To compile this driver as a module, choose M here. The module
- will be called via-velocity.
-
-config TIGON3
- tristate "Broadcom Tigon3 support"
- depends on PCI
- select PHYLIB
- help
- This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called tg3. This is recommended.
-
-config BNX2
- tristate "Broadcom NetXtremeII support"
- depends on PCI
- select CRC32
- select FW_LOADER
- help
- This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called bnx2. This is recommended.
-
-config CNIC
- tristate "Broadcom CNIC support"
- depends on PCI
- select BNX2
- select UIO
- help
- This driver supports offload features of Broadcom NetXtremeII
- gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called cnic. This is recommended.
-
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
- select FW_LOADER
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
-config TSI108_ETH
- tristate "Tundra TSI108 gigabit Ethernet support"
- depends on TSI108_BRIDGE
- help
- This driver supports Tundra TSI108 gigabit Ethernet ports.
- To compile this driver as a module, choose M here: the module
- will be called tsi108_eth.
-
-config GELIC_NET
- tristate "PS3 Gigabit Ethernet driver"
- depends on PPC_PS3
- select PS3_SYS_MANAGER
- help
- This driver supports the network device on the PS3 game
- console. This driver has built-in support for Ethernet.
-
- To compile this driver as a module, choose M here: the
- module will be called ps3_gelic.
-
-config GELIC_WIRELESS
- bool "PS3 Wireless support"
- depends on WLAN
- depends on GELIC_NET
- select WIRELESS_EXT
- help
- This option adds the support for the wireless feature of PS3.
- If you have the wireless-less model of PS3 or have no plan to
- use wireless feature, disabling this option saves memory. As
- the driver automatically distinguishes the models, you can
- safely enable this option even if you have a wireless-less model.
-
-config FSL_PQ_MDIO
- tristate "Freescale PQ MDIO"
- depends on FSL_SOC
- select PHYLIB
- help
- This driver supports the MDIO bus used by the gianfar and UCC drivers.
-
-config GIANFAR
- tristate "Gianfar Ethernet"
- depends on FSL_SOC
- select FSL_PQ_MDIO
- select PHYLIB
- select CRC32
- help
- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
- and MPC86xx family of chips, and the FEC on the 8540.
-
-config UCC_GETH
- tristate "Freescale QE Gigabit Ethernet"
- depends on QUICC_ENGINE
- select FSL_PQ_MDIO
- select PHYLIB
- help
- This driver supports the Gigabit Ethernet mode of the QUICC Engine,
- which is available on some Freescale SOCs.
-
-config UGETH_TX_ON_DEMAND
- bool "Transmit on Demand support"
- depends on UCC_GETH
-
-config MV643XX_ETH
- tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
- select INET_LRO
- select PHYLIB
- help
- This driver supports the gigabit ethernet MACs in the
- Marvell Discovery PPC/MIPS chipset family (MV643XX) and
- in the Marvell Orion ARM SoC family.
-
- Some boards that use the Discovery chipset are the Momenco
- Ocelot C and Jaguar ATX and Pegasos II.
-
-config XILINX_LL_TEMAC
- tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on PPC || MICROBLAZE
- select PHYLIB
- help
- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
- core used in Xilinx Spartan and Virtex FPGAs
-
-config QLA3XXX
- tristate "QLogic QLA3XXX Network Driver Support"
- depends on PCI
- help
- This driver supports QLogic ISP3XXX gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called qla3xxx.
-
-config ATL1
- tristate "Atheros/Attansic L1 Gigabit Ethernet support"
- depends on PCI
- select CRC32
- select MII
- help
- This driver supports the Atheros/Attansic L1 gigabit ethernet
- adapter.
-
- To compile this driver as a module, choose M here. The module
- will be called atl1.
-
-config ATL1E
- tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
- select CRC32
- select MII
- help
- This driver supports the Atheros L1E gigabit ethernet adapter.
-
- To compile this driver as a module, choose M here. The module
- will be called atl1e.
-
-config ATL1C
- tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
- select CRC32
- select MII
- help
- This driver supports the Atheros L1C gigabit ethernet adapter.
-
- To compile this driver as a module, choose M here. The module
- will be called atl1c.
-
-config JME
- tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
- depends on PCI
- select CRC32
- select MII
- ---help---
- This driver supports the PCI-Express gigabit ethernet adapters
- based on JMicron JMC250 chipset.
-
- To compile this driver as a module, choose M here. The module
- will be called jme.
-
-config S6GMAC
- tristate "S6105 GMAC ethernet support"
- depends on XTENSA_VARIANT_S6000
- select PHYLIB
- help
- This driver supports the on chip ethernet device on the
- S6105 xtensa processor.
-
- To compile this driver as a module, choose M here. The module
- will be called s6gmac.
-
-source "drivers/net/stmmac/Kconfig"
-
-config PCH_GBE
- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
- depends on PCI
- select MII
- ---help---
- This is a gigabit ethernet driver for EG20T PCH.
- EG20T PCH is the platform controller hub that is used in Intel's
- general embedded platform.
- EG20T PCH has Gigabit Ethernet interface.
- Using this interface, it is able to access system devices connected
- to Gigabit Ethernet.
- This driver enables Gigabit Ethernet function.
-
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223/ML7831.
- ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
- purpose use.
- ML7223/ML7831 is companion chip for Intel Atom E6xx series.
- ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-config FTGMAC100
- tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM
- select PHYLIB
- help
- This driver supports the FTGMAC100 Gigabit Ethernet controller
- from Faraday. It is used on Faraday A369, Andes AG102 and some
- other ARM/NDS32 SoC's.
-
-endif # NETDEV_1000
-
-#
-# 10 Gigabit Ethernet
-#
-
-menuconfig NETDEV_10000
- bool "Ethernet (10000 Mbit)"
- depends on !UML
- default y
- ---help---
- Say Y here to get to see options for 10 Gigabit Ethernet drivers.
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if NETDEV_10000
-
-config MDIO
- tristate
-
-config CHELSIO_T1
- tristate "Chelsio 10Gb Ethernet support"
- depends on PCI
- select CRC32
- select MDIO
- help
- This driver supports Chelsio gigabit and 10-gigabit
- Ethernet cards. More information about adapter features and
- performance tuning is in <file:Documentation/networking/cxgb.txt>.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called cxgb.
-
-config CHELSIO_T1_1G
- bool "Chelsio gigabit Ethernet support"
- depends on CHELSIO_T1
- help
- Enables support for Chelsio's gigabit Ethernet PCI cards. If you
- are using only 10G cards say 'N' here.
-
-config CHELSIO_T3
- tristate "Chelsio Communications T3 10Gb Ethernet support"
- depends on PCI && INET
- select FW_LOADER
- select MDIO
- help
- This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
- adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called cxgb3.
-
-config CHELSIO_T4
- tristate "Chelsio Communications T4 Ethernet support"
- depends on PCI
- select FW_LOADER
- select MDIO
- help
- This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
- adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module choose M here; the module
- will be called cxgb4.
-
-config CHELSIO_T4VF
- tristate "Chelsio Communications T4 Virtual Function Ethernet support"
- depends on PCI
- help
- This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
- adapters with PCI-E SR-IOV Virtual Functions.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module choose M here; the module
- will be called cxgb4vf.
-
-config EHEA
- tristate "eHEA Ethernet support"
- depends on IBMEBUS && INET && SPARSEMEM
- select INET_LRO
- ---help---
- This driver supports the IBM pSeries eHEA ethernet adapter.
-
- To compile the driver as a module, choose M here. The module
- will be called ehea.
-
-config ENIC
- tristate "Cisco VIC Ethernet NIC Support"
- depends on PCI && INET
- help
- This enables the support for the Cisco VIC Ethernet card.
-
-config IXGBE
- tristate "Intel(R) 10GbE PCI Express adapters support"
- depends on PCI && INET
- select MDIO
- ---help---
- This driver supports Intel(R) 10GbE PCI Express family of
- adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- To compile this driver as a module, choose M here. The module
- will be called ixgbe.
-
-config IXGBE_DCA
- bool "Direct Cache Access (DCA) Support"
- default y
- depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
- ---help---
- Say Y here if you want to use Direct Cache Access (DCA) in the
- driver. DCA is a method for warming the CPU cache before data
- is used, with the intent of lessening the impact of cache misses.
-
-config IXGBE_DCB
- bool "Data Center Bridging (DCB) Support"
- default n
- depends on IXGBE && DCB
- ---help---
- Say Y here if you want to use Data Center Bridging (DCB) in the
- driver.
-
- If unsure, say N.
-
-config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
- depends on PCI_MSI
- ---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called ixgbevf. MSI-X interrupt support is required
- for this driver to work correctly.
-
-config IXGB
- tristate "Intel(R) PRO/10GbE support"
- depends on PCI
- ---help---
- This driver supports Intel(R) PRO/10GbE family of adapters for
- PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
- instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/ixgb.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called ixgb.
-
-config S2IO
- tristate "Exar Xframe 10Gb Ethernet Adapter"
- depends on PCI
- ---help---
- This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/s2io.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called s2io.
-
-config VXGE
- tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
- depends on PCI && INET
- ---help---
- This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/vxge.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called vxge.
-
-config VXGE_DEBUG_TRACE_ALL
- bool "Enabling All Debug trace statments in driver"
- default n
- depends on VXGE
- ---help---
- Say Y here if you want to enabling all the debug trace statements in
- the vxge driver. By default only few debug trace statements are
- enabled.
-
-config MYRI10GE
- tristate "Myricom Myri-10G Ethernet support"
- depends on PCI && INET
- select FW_LOADER
- select CRC32
- select INET_LRO
- ---help---
- This driver supports Myricom Myri-10G Dual Protocol interface in
- Ethernet mode. If the eeprom on your board is not recent enough,
- you will need a newer firmware image.
- You may get this image or more information, at:
-
- <http://www.myri.com/scs/download-Myri10GE.html>
-
- To compile this driver as a module, choose M here. The module
- will be called myri10ge.
-
-config MYRI10GE_DCA
- bool "Direct Cache Access (DCA) Support"
- default y
- depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
- ---help---
- Say Y here if you want to use Direct Cache Access (DCA) in the
- driver. DCA is a method for warming the CPU cache before data
- is used, with the intent of lessening the impact of cache misses.
-
-config NETXEN_NIC
- tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
- depends on PCI
- select FW_LOADER
- help
- This enables the support for NetXen's Gigabit Ethernet card.
-
-config NIU
- tristate "Sun Neptune 10Gbit Ethernet support"
- depends on PCI
- select CRC32
- help
- This enables support for cards based upon Sun's
- Neptune chipset.
-
-config PASEMI_MAC
- tristate "PA Semi 1/10Gbit MAC"
- depends on PPC_PASEMI && PCI && INET
- select PHYLIB
- select INET_LRO
- help
- This driver supports the on-chip 1/10Gbit Ethernet controller on
- PA Semi's PWRficient line of chips.
-
-config MLX4_EN
- tristate "Mellanox Technologies 10Gbit Ethernet support"
- depends on PCI && INET
- select MLX4_CORE
- select INET_LRO
- help
- This driver supports Mellanox Technologies ConnectX Ethernet
- devices.
-
-config MLX4_CORE
- tristate
- depends on PCI
- default n
-
-config MLX4_DEBUG
- bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
- depends on MLX4_CORE
- default y
- ---help---
- This option causes debugging code to be compiled into the
- mlx4_core driver. The output can be turned on via the
- debug_level module parameter (which can also be set after
- the driver is loaded through sysfs).
-
-config TEHUTI
- tristate "Tehuti Networks 10G Ethernet"
- depends on PCI
- help
- Tehuti Networks 10G Ethernet NIC
-
-config BNX2X
- tristate "Broadcom NetXtremeII 10Gb support"
- depends on PCI
- select FW_LOADER
- select ZLIB_INFLATE
- select LIBCRC32C
- select MDIO
- help
- This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
- To compile this driver as a module, choose M here: the module
- will be called bnx2x. This is recommended.
-
-config QLCNIC
- tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
- depends on PCI
- select FW_LOADER
- help
- This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
- devices.
-
-config QLGE
- tristate "QLogic QLGE 10Gb Ethernet Driver Support"
- depends on PCI
- help
- This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called qlge.
-
-config BNA
- tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
- depends on PCI
- ---help---
- This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
- cards.
- To compile this driver as a module, choose M here: the module
- will be called bna.
-
- For general information and support, go to the Brocade support
- website at:
-
- <http://support.brocade.com>
-
-source "drivers/net/sfc/Kconfig"
-
-source "drivers/net/benet/Kconfig"
-
-endif # NETDEV_10000
+source "drivers/s390/net/Kconfig"
source "drivers/net/tokenring/Kconfig"
+source "drivers/net/usb/Kconfig"
+
source "drivers/net/wireless/Kconfig"
source "drivers/net/wimax/Kconfig"
-source "drivers/net/usb/Kconfig"
-
-source "drivers/net/pcmcia/Kconfig"
-
source "drivers/net/wan/Kconfig"
-source "drivers/atm/Kconfig"
-
-source "drivers/ieee802154/Kconfig"
-
-source "drivers/s390/net/Kconfig"
-
-source "drivers/net/caif/Kconfig"
-
-config TILE_NET
- tristate "Tilera GBE/XGBE network driver support"
- depends on TILE
- default y
- select CRC32
- help
- This is a standard Linux network device driver for the
- on-chip Tilera Gigabit Ethernet and XAUI interfaces.
-
- To compile this driver as a module, choose M here: the module
- will be called tile_net.
-
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
@@ -3002,448 +328,6 @@ config XEN_NETDEV_BACKEND
compile this driver as a module, chose M here: the module
will be called xen-netback.
-config ISERIES_VETH
- tristate "iSeries Virtual Ethernet driver support"
- depends on PPC_ISERIES
-
-config RIONET
- tristate "RapidIO Ethernet over messaging driver support"
- depends on RAPIDIO
-
-config RIONET_TX_SIZE
- int "Number of outbound queue entries"
- depends on RIONET
- default "128"
-
-config RIONET_RX_SIZE
- int "Number of inbound queue entries"
- depends on RIONET
- default "128"
-
-config FDDI
- tristate "FDDI driver support"
- depends on (PCI || EISA || TC)
- help
- Fiber Distributed Data Interface is a high speed local area network
- design; essentially a replacement for high speed Ethernet. FDDI can
- run over copper or fiber. If you are connected to such a network and
- want a driver for the FDDI card in your computer, say Y here (and
- then also Y to the driver for your FDDI card, below). Most people
- will say N.
-
-config DEFXX
- tristate "Digital DEFTA/DEFEA/DEFPA adapter support"
- depends on FDDI && (PCI || EISA || TC)
- ---help---
- This is support for the DIGITAL series of TURBOchannel (DEFTA),
- EISA (DEFEA) and PCI (DEFPA) controllers which can connect you
- to a local FDDI network.
-
- To compile this driver as a module, choose M here: the module
- will be called defxx. If unsure, say N.
-
-config DEFXX_MMIO
- bool
- prompt "Use MMIO instead of PIO" if PCI || EISA
- depends on DEFXX
- default n if PCI || EISA
- default y
- ---help---
- This instructs the driver to use EISA or PCI memory-mapped I/O
- (MMIO) as appropriate instead of programmed I/O ports (PIO).
- Enabling this gives an improvement in processing time in parts
- of the driver, but it may cause problems with EISA (DEFEA)
- adapters. TURBOchannel does not have the concept of I/O ports,
- so MMIO is always used for these (DEFTA) adapters.
-
- If unsure, say N.
-
-config SKFP
- tristate "SysKonnect FDDI PCI support"
- depends on FDDI && PCI
- select BITREVERSE
- ---help---
- Say Y here if you have a SysKonnect FDDI PCI adapter.
- The following adapters are supported by this driver:
- - SK-5521 (SK-NET FDDI-UP)
- - SK-5522 (SK-NET FDDI-UP DAS)
- - SK-5541 (SK-NET FDDI-FP)
- - SK-5543 (SK-NET FDDI-LP)
- - SK-5544 (SK-NET FDDI-LP DAS)
- - SK-5821 (SK-NET FDDI-UP64)
- - SK-5822 (SK-NET FDDI-UP64 DAS)
- - SK-5841 (SK-NET FDDI-FP64)
- - SK-5843 (SK-NET FDDI-LP64)
- - SK-5844 (SK-NET FDDI-LP64 DAS)
- - Netelligent 100 FDDI DAS Fibre SC
- - Netelligent 100 FDDI SAS Fibre SC
- - Netelligent 100 FDDI DAS UTP
- - Netelligent 100 FDDI SAS UTP
- - Netelligent 100 FDDI SAS Fibre MIC
-
- Read <file:Documentation/networking/skfp.txt> for information about
- the driver.
-
- Questions concerning this driver can be addressed to:
- <linux@syskonnect.de>
-
- To compile this driver as a module, choose M here: the module
- will be called skfp. This is recommended.
-
-config HIPPI
- bool "HIPPI driver support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && INET && PCI
- help
- HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
- 1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
- can run over copper (25m) or fiber (300m on multi-mode or 10km on
- single-mode). HIPPI networks are commonly used for clusters and to
- connect to super computers. If you are connected to a HIPPI network
- and have a HIPPI network card in your computer that you want to use
- under Linux, say Y here (you must also remember to enable the driver
- for your HIPPI card below). Most people will say N here.
-
-config ROADRUNNER
- tristate "Essential RoadRunner HIPPI PCI adapter support (EXPERIMENTAL)"
- depends on HIPPI && PCI
- help
- Say Y here if this is your PCI HIPPI network card.
-
- To compile this driver as a module, choose M here: the module
- will be called rrunner. If unsure, say N.
-
-config ROADRUNNER_LARGE_RINGS
- bool "Use large TX/RX rings (EXPERIMENTAL)"
- depends on ROADRUNNER
- help
- If you say Y here, the RoadRunner driver will preallocate up to 2 MB
- of additional memory to allow for fastest operation, both for
- transmitting and receiving. This memory cannot be used by any other
- kernel code or by user space programs. Say Y here only if you have
- the memory.
-
-config PLIP
- tristate "PLIP (parallel port) support"
- depends on PARPORT
- ---help---
- PLIP (Parallel Line Internet Protocol) is used to create a
- reasonably fast mini network consisting of two (or, rarely, more)
- local machines. A PLIP link from a Linux box is a popular means to
- install a Linux distribution on a machine which doesn't have a
- CD-ROM drive (a minimal system has to be transferred with floppies
- first). The kernels on both machines need to have this PLIP option
- enabled for this to work.
-
- The PLIP driver has two modes, mode 0 and mode 1. The parallel
- ports (the connectors at the computers with 25 holes) are connected
- with "null printer" or "Turbo Laplink" cables which can transmit 4
- bits at a time (mode 0) or with special PLIP cables, to be used on
- bidirectional parallel ports only, which can transmit 8 bits at a
- time (mode 1); you can find the wiring of these cables in
- <file:Documentation/networking/PLIP.txt>. The cables can be up to
- 15m long. Mode 0 works also if one of the machines runs DOS/Windows
- and has some PLIP software installed, e.g. the Crynwr PLIP packet
- driver (<http://oak.oakland.edu/simtel.net/msdos/pktdrvr-pre.html>)
- and winsock or NCSA's telnet.
-
- If you want to use PLIP, say Y and read the PLIP mini-HOWTO as well
- as the NET-3-HOWTO, both available from
- <http://www.tldp.org/docs.html#howto>. Note that the PLIP
- protocol has been changed and this PLIP driver won't work together
- with the PLIP support in Linux versions 1.0.x. This option enlarges
- your kernel by about 8 KB.
-
- To compile this driver as a module, choose M here. The module
- will be called plip. If unsure, say Y or M, in case you buy
- a laptop later.
-
-config PPP
- tristate "PPP (point-to-point protocol) support"
- select SLHC
- ---help---
- PPP (Point to Point Protocol) is a newer and better SLIP. It serves
- the same purpose: sending Internet traffic over telephone (and other
- serial) lines. Ask your access provider if they support it, because
- otherwise you can't use it; most Internet access providers these
- days support PPP rather than SLIP.
-
- To use PPP, you need an additional program called pppd as described
- in the PPP-HOWTO, available at
- <http://www.tldp.org/docs.html#howto>. Make sure that you have
- the version of pppd recommended in <file:Documentation/Changes>.
- The PPP option enlarges your kernel by about 16 KB.
-
- There are actually two versions of PPP: the traditional PPP for
- asynchronous lines, such as regular analog phone lines, and
- synchronous PPP which can be used over digital ISDN lines for
- example. If you want to use PPP over phone lines or other
- asynchronous serial lines, you need to say Y (or M) here and also to
- the next option, "PPP support for async serial ports". For PPP over
- synchronous lines, you should say Y (or M) here and to "Support
- synchronous PPP", below.
-
- If you said Y to "Version information on all symbols" above, then
- you cannot compile the PPP driver into the kernel; you can then only
- compile it as a module. To compile this driver as a module, choose M
- here. The module will be called ppp_generic.
-
-config PPP_MULTILINK
- bool "PPP multilink support (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
- help
- PPP multilink is a protocol (defined in RFC 1990) which allows you
- to combine several (logical or physical) lines into one logical PPP
- connection, so that you can utilize your full bandwidth.
-
- This has to be supported at the other end as well and you need a
- version of the pppd daemon which understands the multilink protocol.
-
- If unsure, say N.
-
-config PPP_FILTER
- bool "PPP filtering"
- depends on PPP
- help
- Say Y here if you want to be able to filter the packets passing over
- PPP interfaces. This allows you to control which packets count as
- activity (i.e. which packets will reset the idle timer or bring up
- a demand-dialed link) and which packets are to be dropped entirely.
- You need to say Y here if you wish to use the pass-filter and
- active-filter options to pppd.
-
- If unsure, say N.
-
-config PPP_ASYNC
- tristate "PPP support for async serial ports"
- depends on PPP
- select CRC_CCITT
- ---help---
- Say Y (or M) here if you want to be able to use PPP over standard
- asynchronous serial ports, such as COM1 or COM2 on a PC. If you use
- a modem (not a synchronous or ISDN modem) to contact your ISP, you
- need this option.
-
- To compile this driver as a module, choose M here.
-
- If unsure, say Y.
-
-config PPP_SYNC_TTY
- tristate "PPP support for sync tty ports"
- depends on PPP
- help
- Say Y (or M) here if you want to be able to use PPP over synchronous
- (HDLC) tty devices, such as the SyncLink adapter. These devices
- are often used for high-speed leased lines like T1/E1.
-
- To compile this driver as a module, choose M here.
-
-config PPP_DEFLATE
- tristate "PPP Deflate compression"
- depends on PPP
- select ZLIB_INFLATE
- select ZLIB_DEFLATE
- ---help---
- Support for the Deflate compression method for PPP, which uses the
- Deflate algorithm (the same algorithm that gzip uses) to compress
- each PPP packet before it is sent over the wire. The machine at the
- other end of the PPP link (usually your ISP) has to support the
- Deflate compression method as well for this to be useful. Even if
- they don't support it, it is safe to say Y here.
-
- To compile this driver as a module, choose M here.
-
-config PPP_BSDCOMP
- tristate "PPP BSD-Compress compression"
- depends on PPP
- ---help---
- Support for the BSD-Compress compression method for PPP, which uses
- the LZW compression method to compress each PPP packet before it is
- sent over the wire. The machine at the other end of the PPP link
- (usually your ISP) has to support the BSD-Compress compression
- method as well for this to be useful. Even if they don't support it,
- it is safe to say Y here.
-
- The PPP Deflate compression method ("PPP Deflate compression",
- above) is preferable to BSD-Compress, because it compresses better
- and is patent-free.
-
- Note that the BSD compression code will always be compiled as a
- module; it is called bsd_comp and will show up in the directory
- modules once you have said "make modules". If unsure, say N.
-
-config PPP_MPPE
- tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
- select CRYPTO
- select CRYPTO_SHA1
- select CRYPTO_ARC4
- select CRYPTO_ECB
- ---help---
- Support for the MPPE Encryption protocol, as employed by the
- Microsoft Point-to-Point Tunneling Protocol.
-
- See http://pptpclient.sourceforge.net/ for information on
- configuring PPTP clients and servers to utilize this method.
-
-config PPPOE
- tristate "PPP over Ethernet (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP
- help
- Support for PPP over Ethernet.
-
- This driver requires the latest version of pppd from the CVS
- repository at cvs.samba.org. Alternatively, see the
- RoaringPenguin package (<http://www.roaringpenguin.com/pppoe>)
- which contains instruction on how to use this driver (under
- the heading "Kernel mode PPPoE").
-
-config PPTP
- tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
- help
- Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
-
- This driver requires pppd plugin to work in client mode or
- modified pptpd (poptop) to work in server mode.
- See http://accel-pptp.sourceforge.net/ for information how to
- utilize this module.
-
-config PPPOATM
- tristate "PPP over ATM"
- depends on ATM && PPP
- help
- Support PPP (Point to Point Protocol) encapsulated in ATM frames.
- This implementation does not yet comply with section 8 of RFC2364,
- which can lead to bad results if the ATM peer loses state and
- changes its encapsulation unilaterally.
-
-config PPPOL2TP
- tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && L2TP && PPP
- help
- Support for PPP-over-L2TP socket family. L2TP is a protocol
- used by ISPs and enterprises to tunnel PPP traffic over UDP
- tunnels. L2TP is replacing PPTP for VPN uses.
-
-config SLIP
- tristate "SLIP (serial line) support"
- ---help---
- Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
- connect to your Internet service provider or to connect to some
- other local Unix box or if you want to configure your Linux box as a
- Slip/CSlip server for other people to dial in. SLIP (Serial Line
- Internet Protocol) is a protocol used to send Internet traffic over
- serial connections such as telephone lines or null modem cables;
- nowadays, the protocol PPP is more commonly used for this same
- purpose.
-
- Normally, your access provider has to support SLIP in order for you
- to be able to use it, but there is now a SLIP emulator called SLiRP
- around (available from
- <ftp://ibiblio.org/pub/Linux/system/network/serial/>) which
- allows you to use SLIP over a regular dial up shell connection. If
- you plan to use SLiRP, make sure to say Y to CSLIP, below. The
- NET-3-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, explains how to
- configure SLIP. Note that you don't need this option if you just
- want to run term (term is a program which gives you almost full
- Internet connectivity if you have a regular dial up shell account on
- some Internet connected Unix computer. Read
- <http://www.bart.nl/~patrickr/term-howto/Term-HOWTO.html>). SLIP
- support will enlarge your kernel by about 4 KB. If unsure, say N.
-
- To compile this driver as a module, choose M here. The module
- will be called slip.
-
-config SLIP_COMPRESSED
- bool "CSLIP compressed headers"
- depends on SLIP
- select SLHC
- ---help---
- This protocol is faster than SLIP because it uses compression on the
- TCP/IP headers (not on the data itself), but it has to be supported
- on both ends. Ask your access provider if you are not sure and
- answer Y, just in case. You will still be able to use plain SLIP. If
- you plan to use SLiRP, the SLIP emulator (available from
- <ftp://ibiblio.org/pub/Linux/system/network/serial/>) which
- allows you to use SLIP over a regular dial up shell connection, you
- definitely want to say Y here. The NET-3-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, explains how to configure
- CSLIP. This won't enlarge your kernel.
-
-config SLHC
- tristate
- help
- This option enables Van Jacobsen serial line header compression
- routines.
-
-config SLIP_SMART
- bool "Keepalive and linefill"
- depends on SLIP
- help
- Adds additional capabilities to the SLIP driver to support the
- RELCOM line fill and keepalive monitoring. Ideal on poor quality
- analogue lines.
-
-config SLIP_MODE_SLIP6
- bool "Six bit SLIP encapsulation"
- depends on SLIP
- help
- Just occasionally you may need to run IP over hostile serial
- networks that don't pass all control characters or are only seven
- bit. Saying Y here adds an extra mode you can use with SLIP:
- "slip6". In this mode, SLIP will only send normal ASCII symbols over
- the serial device. Naturally, this has to be supported at the other
- end of the link as well. It's good enough, for example, to run IP
- over the async ports of a Camtec JNT Pad. If unsure, say N.
-
-config NET_FC
- bool "Fibre Channel driver support"
- depends on SCSI && PCI
- help
- Fibre Channel is a high speed serial protocol mainly used to connect
- large storage devices to the computer; it is compatible with and
- intended to replace SCSI.
-
- If you intend to use Fibre Channel, you need to have a Fibre channel
- adaptor card in your computer; say Y here and to the driver for your
- adaptor below. You also should have said Y to "SCSI support" and
- "SCSI generic support".
-
-config NETCONSOLE
- tristate "Network console logging support"
- ---help---
- If you want to log kernel messages over the network, enable this.
- See <file:Documentation/networking/netconsole.txt> for details.
-
-config NETCONSOLE_DYNAMIC
- bool "Dynamic reconfiguration of logging targets"
- depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
- !(NETCONSOLE=y && CONFIGFS_FS=m)
- help
- This option enables the ability to dynamically reconfigure target
- parameters (interface, IP addresses, port numbers, MAC addresses)
- at runtime through a userspace interface exported using configfs.
- See <file:Documentation/networking/netconsole.txt> for details.
-
-config NETPOLL
- def_bool NETCONSOLE
-
-config NETPOLL_TRAP
- bool "Netpoll traffic trapping"
- default n
- depends on NETPOLL
-
-config NET_POLL_CONTROLLER
- def_bool NETPOLL
-
-config VIRTIO_NET
- tristate "Virtio network driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && VIRTIO
- ---help---
- This is the virtual network driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
-
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e1eca2ab505..fa877cd2b13 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -1,278 +1,61 @@
#
-# Makefile for the Linux network (ethercard) device drivers.
+# Makefile for the Linux network device drivers.
#
-obj-$(CONFIG_MII) += mii.o
-obj-$(CONFIG_MDIO) += mdio.o
-obj-$(CONFIG_PHYLIB) += phy/
-
-obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
-obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
-obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
-
-obj-$(CONFIG_E1000) += e1000/
-obj-$(CONFIG_E1000E) += e1000e/
-obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
-obj-$(CONFIG_IGB) += igb/
-obj-$(CONFIG_IGBVF) += igbvf/
-obj-$(CONFIG_IXGBE) += ixgbe/
-obj-$(CONFIG_IXGBEVF) += ixgbevf/
-obj-$(CONFIG_IXGB) += ixgb/
-obj-$(CONFIG_IP1000) += ipg.o
-obj-$(CONFIG_CHELSIO_T1) += chelsio/
-obj-$(CONFIG_CHELSIO_T3) += cxgb3/
-obj-$(CONFIG_CHELSIO_T4) += cxgb4/
-obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
-obj-$(CONFIG_EHEA) += ehea/
-obj-$(CONFIG_CAN) += can/
-obj-$(CONFIG_BONDING) += bonding/
-obj-$(CONFIG_ATL1) += atlx/
-obj-$(CONFIG_ATL2) += atlx/
-obj-$(CONFIG_ATL1E) += atl1e/
-obj-$(CONFIG_ATL1C) += atl1c/
-obj-$(CONFIG_GIANFAR) += gianfar_driver.o
-obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
-obj-$(CONFIG_TEHUTI) += tehuti.o
-obj-$(CONFIG_ENIC) += enic/
-obj-$(CONFIG_JME) += jme.o
-obj-$(CONFIG_BE2NET) += benet/
-obj-$(CONFIG_VMXNET3) += vmxnet3/
-obj-$(CONFIG_BNA) += bna/
-
-gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o \
- gianfar_sysfs.o
-
-obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
-ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
-
-obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
-
-#
-# link order important here
#
-obj-$(CONFIG_PLIP) += plip.o
-
-obj-$(CONFIG_ROADRUNNER) += rrunner.o
-
-obj-$(CONFIG_HAPPYMEAL) += sunhme.o
-obj-$(CONFIG_SUNLANCE) += sunlance.o
-obj-$(CONFIG_SUNQE) += sunqe.o
-obj-$(CONFIG_SUNBMAC) += sunbmac.o
-obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
-obj-$(CONFIG_CASSINI) += cassini.o
-obj-$(CONFIG_SUNVNET) += sunvnet.o
-
-obj-$(CONFIG_MACE) += mace.o
-obj-$(CONFIG_BMAC) += bmac.o
-
-obj-$(CONFIG_VORTEX) += 3c59x.o
-obj-$(CONFIG_TYPHOON) += typhoon.o
-obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_PCNET32) += pcnet32.o
-obj-$(CONFIG_E100) += e100.o
-obj-$(CONFIG_TLAN) += tlan.o
-obj-$(CONFIG_EPIC100) += epic100.o
-obj-$(CONFIG_SMSC9420) += smsc9420.o
-obj-$(CONFIG_SIS190) += sis190.o
-obj-$(CONFIG_SIS900) += sis900.o
-obj-$(CONFIG_R6040) += r6040.o
-obj-$(CONFIG_YELLOWFIN) += yellowfin.o
-obj-$(CONFIG_ACENIC) += acenic.o
-obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
-obj-$(CONFIG_NATSEMI) += natsemi.o
-obj-$(CONFIG_NS83820) += ns83820.o
-obj-$(CONFIG_STNIC) += stnic.o 8390.o
-obj-$(CONFIG_FEALNX) += fealnx.o
-obj-$(CONFIG_TIGON3) += tg3.o
-obj-$(CONFIG_BNX2) += bnx2.o
-obj-$(CONFIG_CNIC) += cnic.o
-obj-$(CONFIG_BNX2X) += bnx2x/
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
-obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
-gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
-ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-obj-$(CONFIG_TC35815) += tc35815.o
-obj-$(CONFIG_SKGE) += skge.o
-obj-$(CONFIG_SKY2) += sky2.o
-obj-$(CONFIG_SKFP) += skfp/
-obj-$(CONFIG_KS8842) += ks8842.o
-obj-$(CONFIG_KS8851) += ks8851.o
-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
-obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
-obj-$(CONFIG_VIA_RHINE) += via-rhine.o
-obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
-obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
-obj-$(CONFIG_RIONET) += rionet.o
-obj-$(CONFIG_SH_ETH) += sh_eth.o
-obj-$(CONFIG_STMMAC_ETH) += stmmac/
-
+# Networking Core Drivers
#
-# end link order section
-#
-
-obj-$(CONFIG_SUNDANCE) += sundance.o
-obj-$(CONFIG_HAMACHI) += hamachi.o
-obj-$(CONFIG_NET) += Space.o loopback.o
-obj-$(CONFIG_SEEQ8005) += seeq8005.o
-obj-$(CONFIG_NET_SB1000) += sb1000.o
-obj-$(CONFIG_MAC8390) += mac8390.o
-obj-$(CONFIG_APNE) += apne.o 8390.o
-obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
-obj-$(CONFIG_HP100) += hp100.o
-obj-$(CONFIG_SMC9194) += smc9194.o
-obj-$(CONFIG_FEC) += fec.o
-obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
-ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
- obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
-endif
-obj-$(CONFIG_WD80x3) += wd.o 8390.o
-obj-$(CONFIG_EL2) += 3c503.o 8390p.o
-obj-$(CONFIG_NE2000) += ne.o 8390p.o
-obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
-obj-$(CONFIG_HPLAN) += hp.o 8390p.o
-obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
-obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
-obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
-obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
-obj-$(CONFIG_E2100) += e2100.o 8390.o
-obj-$(CONFIG_ES3210) += es3210.o 8390.o
-obj-$(CONFIG_LNE390) += lne390.o 8390.o
-obj-$(CONFIG_NE3210) += ne3210.o 8390.o
-obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
-obj-$(CONFIG_B44) += b44.o
-obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o
-obj-$(CONFIG_AX88796) += ax88796.o
-obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
-obj-$(CONFIG_FTGMAC100) += ftgmac100.o
-obj-$(CONFIG_FTMAC100) += ftmac100.o
-
-obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
-obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
-ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
-obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
-obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
-obj-$(CONFIG_QLA3XXX) += qla3xxx.o
-obj-$(CONFIG_QLCNIC) += qlcnic/
-obj-$(CONFIG_QLGE) += qlge/
-
-obj-$(CONFIG_PPP) += ppp_generic.o
-obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
-obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
-obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
-obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
-obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
-obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
-obj-$(CONFIG_PPPOL2TP) += pppox.o
-obj-$(CONFIG_PPTP) += pppox.o pptp.o
-
-obj-$(CONFIG_SLIP) += slip.o
-obj-$(CONFIG_SLHC) += slhc.o
-
-obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
-obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
-
+obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_MACVTAP) += macvtap.o
-obj-$(CONFIG_DE600) += de600.o
-obj-$(CONFIG_DE620) += de620.o
-obj-$(CONFIG_LANCE) += lance.o
-obj-$(CONFIG_SUN3_82586) += sun3_82586.o
-obj-$(CONFIG_SUN3LANCE) += sun3lance.o
-obj-$(CONFIG_DEFXX) += defxx.o
-obj-$(CONFIG_SGISEEQ) += sgiseeq.o
-obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
-obj-$(CONFIG_AT1700) += at1700.o
-obj-$(CONFIG_EL1) += 3c501.o
-obj-$(CONFIG_EL16) += 3c507.o
-obj-$(CONFIG_ELMC) += 3c523.o
-obj-$(CONFIG_IBMLANA) += ibmlana.o
-obj-$(CONFIG_ELMC_II) += 3c527.o
-obj-$(CONFIG_EL3) += 3c509.o
-obj-$(CONFIG_3C515) += 3c515.o
-obj-$(CONFIG_EEXPRESS) += eexpress.o
-obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
-obj-$(CONFIG_8139CP) += 8139cp.o
-obj-$(CONFIG_8139TOO) += 8139too.o
-obj-$(CONFIG_ZNET) += znet.o
-obj-$(CONFIG_CPMAC) += cpmac.o
-obj-$(CONFIG_DEPCA) += depca.o
-obj-$(CONFIG_EWRK3) += ewrk3.o
-obj-$(CONFIG_ATP) += atp.o
-obj-$(CONFIG_NI5010) += ni5010.o
-obj-$(CONFIG_NI52) += ni52.o
-obj-$(CONFIG_NI65) += ni65.o
-obj-$(CONFIG_ELPLUS) += 3c505.o
-obj-$(CONFIG_AC3200) += ac3200.o 8390.o
-obj-$(CONFIG_APRICOT) += 82596.o
-obj-$(CONFIG_LASI_82596) += lasi_82596.o
-obj-$(CONFIG_SNI_82596) += sni_82596.o
-obj-$(CONFIG_MVME16x_NET) += 82596.o
-obj-$(CONFIG_BVME6000_NET) += 82596.o
-obj-$(CONFIG_SC92031) += sc92031.o
-
-# This is also a 82596 and should probably be merged
-obj-$(CONFIG_LP486E) += lp486e.o
-
-obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o
-obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
-obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
-obj-$(CONFIG_EQUALIZER) += eql.o
-obj-$(CONFIG_KORINA) += korina.o
-obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
-obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
-obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
-obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
-obj-$(CONFIG_DECLANCE) += declance.o
-obj-$(CONFIG_ATARILANCE) += atarilance.o
-obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o
-obj-$(CONFIG_ARIADNE) += ariadne.o
-obj-$(CONFIG_CS89x0) += cs89x0.o
-obj-$(CONFIG_MACSONIC) += macsonic.o
-obj-$(CONFIG_MACMACE) += macmace.o
-obj-$(CONFIG_MAC89x0) += mac89x0.o
+obj-$(CONFIG_MII) += mii.o
+obj-$(CONFIG_MDIO) += mdio.o
+obj-$(CONFIG_NET) += Space.o loopback.o
+obj-$(CONFIG_NETCONSOLE) += netconsole.o
+obj-$(CONFIG_PHYLIB) += phy/
+obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
-obj-$(CONFIG_NET_NETX) += netx-eth.o
-obj-$(CONFIG_DL2K) += dl2k.o
-obj-$(CONFIG_R8169) += r8169.o
-obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
-obj-$(CONFIG_IBMVETH) += ibmveth.o
-obj-$(CONFIG_S2IO) += s2io.o
-obj-$(CONFIG_VXGE) += vxge/
-obj-$(CONFIG_MYRI10GE) += myri10ge/
-obj-$(CONFIG_SMC91X) += smc91x.o
-obj-$(CONFIG_SMC911X) += smc911x.o
-obj-$(CONFIG_SMSC911X) += smsc911x.o
-obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
-obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
-obj-$(CONFIG_DM9000) += dm9000.o
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
-pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
-obj-$(CONFIG_MLX4_CORE) += mlx4/
-obj-$(CONFIG_ENC28J60) += enc28j60.o
-obj-$(CONFIG_ETHOC) += ethoc.o
-obj-$(CONFIG_GRETH) += greth.o
-obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
-
-obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
-
-obj-$(CONFIG_DNET) += dnet.o
-obj-$(CONFIG_MACB) += macb.o
-obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_ARM) += arm/
+#
+# Networking Drivers
+#
+obj-$(CONFIG_ARCNET) += arcnet/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
+obj-$(CONFIG_CAIF) += caif/
+obj-$(CONFIG_CAN) += can/
+obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_ETHERNET) += ethernet/
+obj-$(CONFIG_FDDI) += fddi/
+obj-$(CONFIG_HIPPI) += hippi/
+obj-$(CONFIG_HAMRADIO) += hamradio/
+obj-$(CONFIG_IRDA) += irda/
+obj-$(CONFIG_PLIP) += plip/
+obj-$(CONFIG_PPP) += ppp/
+obj-$(CONFIG_PPP_ASYNC) += ppp/
+obj-$(CONFIG_PPP_BSDCOMP) += ppp/
+obj-$(CONFIG_PPP_DEFLATE) += ppp/
+obj-$(CONFIG_PPP_MPPE) += ppp/
+obj-$(CONFIG_PPP_SYNC_TTY) += ppp/
+obj-$(CONFIG_PPPOE) += ppp/
+obj-$(CONFIG_PPPOL2TP) += ppp/
+obj-$(CONFIG_PPTP) += ppp/
+obj-$(CONFIG_SLIP) += slip/
+obj-$(CONFIG_SLHC) += slip/
+obj-$(CONFIG_NET_SB1000) += sb1000.o
+obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_TR) += tokenring/
obj-$(CONFIG_WAN) += wan/
-obj-$(CONFIG_ARCNET) += arcnet/
-obj-$(CONFIG_NET_PCMCIA) += pcmcia/
+obj-$(CONFIG_WLAN) += wireless/
+obj-$(CONFIG_WIMAX) += wimax/
+
+obj-$(CONFIG_VMXNET3) += vmxnet3/
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
+obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
obj-$(CONFIG_USB_CATC) += usb/
obj-$(CONFIG_USB_KAWETH) += usb/
@@ -283,26 +66,3 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
-
-obj-$(CONFIG_WLAN) += wireless/
-obj-$(CONFIG_NET_TULIP) += tulip/
-obj-$(CONFIG_HAMRADIO) += hamradio/
-obj-$(CONFIG_IRDA) += irda/
-obj-$(CONFIG_ETRAX_ETHERNET) += cris/
-obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
-
-obj-$(CONFIG_NETCONSOLE) += netconsole.o
-
-obj-$(CONFIG_FS_ENET) += fs_enet/
-
-obj-$(CONFIG_NETXEN_NIC) += netxen/
-obj-$(CONFIG_NIU) += niu.o
-obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_SFC) += sfc/
-
-obj-$(CONFIG_WIMAX) += wimax/
-obj-$(CONFIG_CAIF) += caif/
-
-obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
-obj-$(CONFIG_PCH_GBE) += pch_gbe/
-obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 748c9f526e7..9abd4eb86dc 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -264,7 +264,7 @@ static const struct net_device_ops cops_netdev_ops = {
.ndo_start_xmit = cops_send_packet,
.ndo_tx_timeout = cops_timeout,
.ndo_do_ioctl = cops_ioctl,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
};
/*
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 34ffb542262..6057b30417a 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1014,7 +1014,7 @@ static int __init ltpc_probe_dma(int base, int dma)
static const struct net_device_ops ltpc_netdev = {
.ndo_start_xmit = ltpc_xmit,
.ndo_do_ioctl = ltpc_ioctl,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
};
struct net_device * __init ltpc_probe(void)
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index 3b2f7f11546..a73d9dc80ff 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -3,8 +3,8 @@
#
menuconfig ARCNET
- depends on NETDEVICES && (ISA || PCI)
- tristate "ARCnet support"
+ depends on NETDEVICES && (ISA || PCI || PCMCIA)
+ bool "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
@@ -123,4 +123,14 @@ config ARCNET_COM20020_PCI
tristate "Support for COM20020 on PCI"
depends on ARCNET_COM20020 && PCI
+config ARCNET_COM20020_CS
+ tristate "COM20020 ARCnet PCMCIA support"
+ depends on ARCNET_COM20020 && PCMCIA
+ help
+ Say Y here if you intend to attach this type of ARCnet PCMCIA card
+ to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called com20020_cs. If unsure, say N.
+
endif # ARCNET
diff --git a/drivers/net/arcnet/Makefile b/drivers/net/arcnet/Makefile
index 5861af543d4..5ce8ee63e43 100644
--- a/drivers/net/arcnet/Makefile
+++ b/drivers/net/arcnet/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_ARCNET_RIM_I) += arc-rimi.o
obj-$(CONFIG_ARCNET_COM20020) += com20020.o
obj-$(CONFIG_ARCNET_COM20020_ISA) += com20020-isa.o
obj-$(CONFIG_ARCNET_COM20020_PCI) += com20020-pci.o
+obj-$(CONFIG_ARCNET_COM20020_CS) += com20020_cs.o
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 7bfb91f3285..7b96c5f47e8 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -154,7 +154,7 @@ const struct net_device_ops com20020_netdev_ops = {
.ndo_stop = arcnet_close,
.ndo_start_xmit = arcnet_send_packet,
.ndo_tx_timeout = arcnet_timeout,
- .ndo_set_multicast_list = com20020_set_mc_list,
+ .ndo_set_rx_mode = com20020_set_mc_list,
};
/* Set up the struct net_device associated with this card. Called after
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 980e65c1493..980e65c1493 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
deleted file mode 100644
index 39e1c0d3947..00000000000
--- a/drivers/net/arm/Kconfig
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Acorn Network device configuration
-# These are for Acorn's Expansion card network interfaces
-#
-config ARM_AM79C961A
- bool "ARM EBSA110 AM79C961A support"
- depends on ARM && ARCH_EBSA110
- select CRC32
- help
- If you wish to compile a kernel for the EBSA-110, then you should
- always answer Y to this.
-
-config ARM_ETHER1
- tristate "Acorn Ether1 support"
- depends on ARM && ARCH_ACORN
- help
- If you have an Acorn system with one of these (AKA25) network cards,
- you should say Y to this option if you wish to use it with Linux.
-
-config ARM_ETHER3
- tristate "Acorn/ANT Ether3 support"
- depends on ARM && ARCH_ACORN
- help
- If you have an Acorn system with one of these network cards, you
- should say Y to this option if you wish to use it with Linux.
-
-config ARM_ETHERH
- tristate "I-cubed EtherH/ANT EtherM support"
- depends on ARM && ARCH_ACORN
- select CRC32
- help
- If you have an Acorn system with one of these network cards, you
- should say Y to this option if you wish to use it with Linux.
-
-config ARM_AT91_ETHER
- tristate "AT91RM9200 Ethernet support"
- depends on ARM && ARCH_AT91RM9200
- select MII
- help
- If you wish to compile a kernel for the AT91RM9200 and enable
- ethernet support, then you should always answer Y to this.
-
-config ARM_KS8695_ETHER
- tristate "KS8695 Ethernet support"
- depends on ARM && ARCH_KS8695
- select MII
- help
- If you wish to compile a kernel for the KS8695 and want to
- use the internal ethernet then you should answer Y to this.
-
-config EP93XX_ETH
- tristate "EP93xx Ethernet support"
- depends on ARM && ARCH_EP93XX
- select MII
- help
- This is a driver for the ethernet hardware included in EP93xx CPUs.
- Say Y if you are building a kernel for EP93xx based devices.
-
-config IXP4XX_ETH
- tristate "Intel IXP4xx Ethernet support"
- depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
- select PHYLIB
- help
- Say Y here if you want to use built-in Ethernet ports
- on IXP4xx processor.
-
-config W90P910_ETH
- tristate "Nuvoton w90p910 Ethernet support"
- depends on ARM && ARCH_W90X900
- select PHYLIB
- select MII
- help
- Say Y here if you want to use built-in Ethernet ports
- on w90p910 processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
deleted file mode 100644
index 303171f589e..00000000000
--- a/drivers/net/arm/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# File: drivers/net/arm/Makefile
-#
-# Makefile for the ARM network device drivers
-#
-
-obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
-obj-$(CONFIG_ARM_ETHERH) += etherh.o
-obj-$(CONFIG_ARM_ETHER3) += ether3.o
-obj-$(CONFIG_ARM_ETHER1) += ether1.o
-obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
-obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
-obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
-obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
-obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
deleted file mode 100644
index 1a41a49bb61..00000000000
--- a/drivers/net/benet/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config BE2NET
- tristate "ServerEngines' 10Gbps NIC - BladeEngine"
- depends on PCI && INET
- help
- This driver implements the NIC functionality for ServerEngines'
- 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
deleted file mode 100644
index 45e45e8d3d6..00000000000
--- a/drivers/net/bmac.c
+++ /dev/null
@@ -1,1685 +0,0 @@
-/*
- * Network device driver for the BMAC ethernet controller on
- * Apple Powermacs. Assumes it's under a DBDMA controller.
- *
- * Copyright (C) 1998 Randy Gobbel.
- *
- * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
- * dynamic procfs inode.
- */
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/crc32.h>
-#include <linux/bitrev.h>
-#include <linux/ethtool.h>
-#include <linux/slab.h>
-#include <asm/prom.h>
-#include <asm/dbdma.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/macio.h>
-#include <asm/irq.h>
-
-#include "bmac.h"
-
-#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
-#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
-
-/*
- * CRC polynomial - used in working out multicast filter bits.
- */
-#define ENET_CRCPOLY 0x04c11db7
-
-/* switch to use multicast code lifted from sunhme driver */
-#define SUNHME_MULTICAST
-
-#define N_RX_RING 64
-#define N_TX_RING 32
-#define MAX_TX_ACTIVE 1
-#define ETHERCRC 4
-#define ETHERMINPACKET 64
-#define ETHERMTU 1500
-#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
-#define TX_TIMEOUT HZ /* 1 second */
-
-/* Bits in transmit DMA status */
-#define TX_DMA_ERR 0x80
-
-#define XXDEBUG(args)
-
-struct bmac_data {
- /* volatile struct bmac *bmac; */
- struct sk_buff_head *queue;
- volatile struct dbdma_regs __iomem *tx_dma;
- int tx_dma_intr;
- volatile struct dbdma_regs __iomem *rx_dma;
- int rx_dma_intr;
- volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
- volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
- struct macio_dev *mdev;
- int is_bmac_plus;
- struct sk_buff *rx_bufs[N_RX_RING];
- int rx_fill;
- int rx_empty;
- struct sk_buff *tx_bufs[N_TX_RING];
- int tx_fill;
- int tx_empty;
- unsigned char tx_fullup;
- struct timer_list tx_timeout;
- int timeout_active;
- int sleeping;
- int opened;
- unsigned short hash_use_count[64];
- unsigned short hash_table_mask[4];
- spinlock_t lock;
-};
-
-#if 0 /* Move that to ethtool */
-
-typedef struct bmac_reg_entry {
- char *name;
- unsigned short reg_offset;
-} bmac_reg_entry_t;
-
-#define N_REG_ENTRIES 31
-
-static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
- {"MEMADD", MEMADD},
- {"MEMDATAHI", MEMDATAHI},
- {"MEMDATALO", MEMDATALO},
- {"TXPNTR", TXPNTR},
- {"RXPNTR", RXPNTR},
- {"IPG1", IPG1},
- {"IPG2", IPG2},
- {"ALIMIT", ALIMIT},
- {"SLOT", SLOT},
- {"PALEN", PALEN},
- {"PAPAT", PAPAT},
- {"TXSFD", TXSFD},
- {"JAM", JAM},
- {"TXCFG", TXCFG},
- {"TXMAX", TXMAX},
- {"TXMIN", TXMIN},
- {"PAREG", PAREG},
- {"DCNT", DCNT},
- {"NCCNT", NCCNT},
- {"NTCNT", NTCNT},
- {"EXCNT", EXCNT},
- {"LTCNT", LTCNT},
- {"TXSM", TXSM},
- {"RXCFG", RXCFG},
- {"RXMAX", RXMAX},
- {"RXMIN", RXMIN},
- {"FRCNT", FRCNT},
- {"AECNT", AECNT},
- {"FECNT", FECNT},
- {"RXSM", RXSM},
- {"RXCV", RXCV}
-};
-
-#endif
-
-static unsigned char *bmac_emergency_rxbuf;
-
-/*
- * Number of bytes of private data per BMAC: allow enough for
- * the rx and tx dma commands plus a branch dma command each,
- * and another 16 bytes to allow us to align the dma command
- * buffers on a 16 byte boundary.
- */
-#define PRIV_BYTES (sizeof(struct bmac_data) \
- + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
- + sizeof(struct sk_buff_head))
-
-static int bmac_open(struct net_device *dev);
-static int bmac_close(struct net_device *dev);
-static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
-static void bmac_set_multicast(struct net_device *dev);
-static void bmac_reset_and_enable(struct net_device *dev);
-static void bmac_start_chip(struct net_device *dev);
-static void bmac_init_chip(struct net_device *dev);
-static void bmac_init_registers(struct net_device *dev);
-static void bmac_enable_and_reset_chip(struct net_device *dev);
-static int bmac_set_address(struct net_device *dev, void *addr);
-static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
-static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
-static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
-static void bmac_set_timeout(struct net_device *dev);
-static void bmac_tx_timeout(unsigned long data);
-static int bmac_output(struct sk_buff *skb, struct net_device *dev);
-static void bmac_start(struct net_device *dev);
-
-#define DBDMA_SET(x) ( ((x) | (x) << 16) )
-#define DBDMA_CLEAR(x) ( (x) << 16)
-
-static inline void
-dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
-{
- __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
-}
-
-static inline unsigned long
-dbdma_ld32(volatile __u32 __iomem *a)
-{
- __u32 swap;
- __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
- return swap;
-}
-
-static void
-dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
-{
- dbdma_st32(&dmap->control,
- DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
- eieio();
-}
-
-static void
-dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
-{
- dbdma_st32(&dmap->control,
- DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
- eieio();
- while (dbdma_ld32(&dmap->status) & RUN)
- eieio();
-}
-
-static void
-dbdma_setcmd(volatile struct dbdma_cmd *cp,
- unsigned short cmd, unsigned count, unsigned long addr,
- unsigned long cmd_dep)
-{
- out_le16(&cp->command, cmd);
- out_le16(&cp->req_count, count);
- out_le32(&cp->phy_addr, addr);
- out_le32(&cp->cmd_dep, cmd_dep);
- out_le16(&cp->xfer_status, 0);
- out_le16(&cp->res_count, 0);
-}
-
-static inline
-void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
-{
- out_le16((void __iomem *)dev->base_addr + reg_offset, data);
-}
-
-
-static inline
-unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
-{
- return in_le16((void __iomem *)dev->base_addr + reg_offset);
-}
-
-static void
-bmac_enable_and_reset_chip(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- volatile struct dbdma_regs __iomem *td = bp->tx_dma;
-
- if (rd)
- dbdma_reset(rd);
- if (td)
- dbdma_reset(td);
-
- pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
-}
-
-#define MIFDELAY udelay(10)
-
-static unsigned int
-bmac_mif_readbits(struct net_device *dev, int nb)
-{
- unsigned int val = 0;
-
- while (--nb >= 0) {
- bmwrite(dev, MIFCSR, 0);
- MIFDELAY;
- if (bmread(dev, MIFCSR) & 8)
- val |= 1 << nb;
- bmwrite(dev, MIFCSR, 1);
- MIFDELAY;
- }
- bmwrite(dev, MIFCSR, 0);
- MIFDELAY;
- bmwrite(dev, MIFCSR, 1);
- MIFDELAY;
- return val;
-}
-
-static void
-bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
-{
- int b;
-
- while (--nb >= 0) {
- b = (val & (1 << nb))? 6: 4;
- bmwrite(dev, MIFCSR, b);
- MIFDELAY;
- bmwrite(dev, MIFCSR, b|1);
- MIFDELAY;
- }
-}
-
-static unsigned int
-bmac_mif_read(struct net_device *dev, unsigned int addr)
-{
- unsigned int val;
-
- bmwrite(dev, MIFCSR, 4);
- MIFDELAY;
- bmac_mif_writebits(dev, ~0U, 32);
- bmac_mif_writebits(dev, 6, 4);
- bmac_mif_writebits(dev, addr, 10);
- bmwrite(dev, MIFCSR, 2);
- MIFDELAY;
- bmwrite(dev, MIFCSR, 1);
- MIFDELAY;
- val = bmac_mif_readbits(dev, 17);
- bmwrite(dev, MIFCSR, 4);
- MIFDELAY;
- return val;
-}
-
-static void
-bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
-{
- bmwrite(dev, MIFCSR, 4);
- MIFDELAY;
- bmac_mif_writebits(dev, ~0U, 32);
- bmac_mif_writebits(dev, 5, 4);
- bmac_mif_writebits(dev, addr, 10);
- bmac_mif_writebits(dev, 2, 2);
- bmac_mif_writebits(dev, val, 16);
- bmac_mif_writebits(dev, 3, 2);
-}
-
-static void
-bmac_init_registers(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- volatile unsigned short regValue;
- unsigned short *pWord16;
- int i;
-
- /* XXDEBUG(("bmac: enter init_registers\n")); */
-
- bmwrite(dev, RXRST, RxResetValue);
- bmwrite(dev, TXRST, TxResetBit);
-
- i = 100;
- do {
- --i;
- udelay(10000);
- regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
- } while ((regValue & TxResetBit) && i > 0);
-
- if (!bp->is_bmac_plus) {
- regValue = bmread(dev, XCVRIF);
- regValue |= ClkBit | SerialMode | COLActiveLow;
- bmwrite(dev, XCVRIF, regValue);
- udelay(10000);
- }
-
- bmwrite(dev, RSEED, (unsigned short)0x1968);
-
- regValue = bmread(dev, XIFC);
- regValue |= TxOutputEnable;
- bmwrite(dev, XIFC, regValue);
-
- bmread(dev, PAREG);
-
- /* set collision counters to 0 */
- bmwrite(dev, NCCNT, 0);
- bmwrite(dev, NTCNT, 0);
- bmwrite(dev, EXCNT, 0);
- bmwrite(dev, LTCNT, 0);
-
- /* set rx counters to 0 */
- bmwrite(dev, FRCNT, 0);
- bmwrite(dev, LECNT, 0);
- bmwrite(dev, AECNT, 0);
- bmwrite(dev, FECNT, 0);
- bmwrite(dev, RXCV, 0);
-
- /* set tx fifo information */
- bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
-
- bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
- bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
-
- /* set rx fifo information */
- bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
- bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
-
- //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
- bmread(dev, STATUS); /* read it just to clear it */
-
- /* zero out the chip Hash Filter registers */
- for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
- bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
- bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
- bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
- bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
-
- pWord16 = (unsigned short *)dev->dev_addr;
- bmwrite(dev, MADD0, *pWord16++);
- bmwrite(dev, MADD1, *pWord16++);
- bmwrite(dev, MADD2, *pWord16);
-
- bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
-
- bmwrite(dev, INTDISABLE, EnableNormal);
-}
-
-#if 0
-static void
-bmac_disable_interrupts(struct net_device *dev)
-{
- bmwrite(dev, INTDISABLE, DisableAll);
-}
-
-static void
-bmac_enable_interrupts(struct net_device *dev)
-{
- bmwrite(dev, INTDISABLE, EnableNormal);
-}
-#endif
-
-
-static void
-bmac_start_chip(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- unsigned short oldConfig;
-
- /* enable rx dma channel */
- dbdma_continue(rd);
-
- oldConfig = bmread(dev, TXCFG);
- bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
-
- /* turn on rx plus any other bits already on (promiscuous possibly) */
- oldConfig = bmread(dev, RXCFG);
- bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
- udelay(20000);
-}
-
-static void
-bmac_init_phy(struct net_device *dev)
-{
- unsigned int addr;
- struct bmac_data *bp = netdev_priv(dev);
-
- printk(KERN_DEBUG "phy registers:");
- for (addr = 0; addr < 32; ++addr) {
- if ((addr & 7) == 0)
- printk(KERN_DEBUG);
- printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
- }
- printk(KERN_CONT "\n");
-
- if (bp->is_bmac_plus) {
- unsigned int capable, ctrl;
-
- ctrl = bmac_mif_read(dev, 0);
- capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
- if (bmac_mif_read(dev, 4) != capable ||
- (ctrl & 0x1000) == 0) {
- bmac_mif_write(dev, 4, capable);
- bmac_mif_write(dev, 0, 0x1200);
- } else
- bmac_mif_write(dev, 0, 0x1000);
- }
-}
-
-static void bmac_init_chip(struct net_device *dev)
-{
- bmac_init_phy(dev);
- bmac_init_registers(dev);
-}
-
-#ifdef CONFIG_PM
-static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
-{
- struct net_device* dev = macio_get_drvdata(mdev);
- struct bmac_data *bp = netdev_priv(dev);
- unsigned long flags;
- unsigned short config;
- int i;
-
- netif_device_detach(dev);
- /* prolly should wait for dma to finish & turn off the chip */
- spin_lock_irqsave(&bp->lock, flags);
- if (bp->timeout_active) {
- del_timer(&bp->tx_timeout);
- bp->timeout_active = 0;
- }
- disable_irq(dev->irq);
- disable_irq(bp->tx_dma_intr);
- disable_irq(bp->rx_dma_intr);
- bp->sleeping = 1;
- spin_unlock_irqrestore(&bp->lock, flags);
- if (bp->opened) {
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- volatile struct dbdma_regs __iomem *td = bp->tx_dma;
-
- config = bmread(dev, RXCFG);
- bmwrite(dev, RXCFG, (config & ~RxMACEnable));
- config = bmread(dev, TXCFG);
- bmwrite(dev, TXCFG, (config & ~TxMACEnable));
- bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
- /* disable rx and tx dma */
- st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
- st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
- /* free some skb's */
- for (i=0; i<N_RX_RING; i++) {
- if (bp->rx_bufs[i] != NULL) {
- dev_kfree_skb(bp->rx_bufs[i]);
- bp->rx_bufs[i] = NULL;
- }
- }
- for (i = 0; i<N_TX_RING; i++) {
- if (bp->tx_bufs[i] != NULL) {
- dev_kfree_skb(bp->tx_bufs[i]);
- bp->tx_bufs[i] = NULL;
- }
- }
- }
- pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
- return 0;
-}
-
-static int bmac_resume(struct macio_dev *mdev)
-{
- struct net_device* dev = macio_get_drvdata(mdev);
- struct bmac_data *bp = netdev_priv(dev);
-
- /* see if this is enough */
- if (bp->opened)
- bmac_reset_and_enable(dev);
-
- enable_irq(dev->irq);
- enable_irq(bp->tx_dma_intr);
- enable_irq(bp->rx_dma_intr);
- netif_device_attach(dev);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-static int bmac_set_address(struct net_device *dev, void *addr)
-{
- struct bmac_data *bp = netdev_priv(dev);
- unsigned char *p = addr;
- unsigned short *pWord16;
- unsigned long flags;
- int i;
-
- XXDEBUG(("bmac: enter set_address\n"));
- spin_lock_irqsave(&bp->lock, flags);
-
- for (i = 0; i < 6; ++i) {
- dev->dev_addr[i] = p[i];
- }
- /* load up the hardware address */
- pWord16 = (unsigned short *)dev->dev_addr;
- bmwrite(dev, MADD0, *pWord16++);
- bmwrite(dev, MADD1, *pWord16++);
- bmwrite(dev, MADD2, *pWord16);
-
- spin_unlock_irqrestore(&bp->lock, flags);
- XXDEBUG(("bmac: exit set_address\n"));
- return 0;
-}
-
-static inline void bmac_set_timeout(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&bp->lock, flags);
- if (bp->timeout_active)
- del_timer(&bp->tx_timeout);
- bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
- bp->tx_timeout.function = bmac_tx_timeout;
- bp->tx_timeout.data = (unsigned long) dev;
- add_timer(&bp->tx_timeout);
- bp->timeout_active = 1;
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static void
-bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
-{
- void *vaddr;
- unsigned long baddr;
- unsigned long len;
-
- len = skb->len;
- vaddr = skb->data;
- baddr = virt_to_bus(vaddr);
-
- dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
-}
-
-static void
-bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
-{
- unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
-
- dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
- virt_to_bus(addr), 0);
-}
-
-static void
-bmac_init_tx_ring(struct bmac_data *bp)
-{
- volatile struct dbdma_regs __iomem *td = bp->tx_dma;
-
- memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
-
- bp->tx_empty = 0;
- bp->tx_fill = 0;
- bp->tx_fullup = 0;
-
- /* put a branch at the end of the tx command list */
- dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
- (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
-
- /* reset tx dma */
- dbdma_reset(td);
- out_le32(&td->wait_sel, 0x00200020);
- out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
-}
-
-static int
-bmac_init_rx_ring(struct bmac_data *bp)
-{
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- int i;
- struct sk_buff *skb;
-
- /* initialize list of sk_buffs for receiving and set up recv dma */
- memset((char *)bp->rx_cmds, 0,
- (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
- for (i = 0; i < N_RX_RING; i++) {
- if ((skb = bp->rx_bufs[i]) == NULL) {
- bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
- if (skb != NULL)
- skb_reserve(skb, 2);
- }
- bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
- }
-
- bp->rx_empty = 0;
- bp->rx_fill = i;
-
- /* Put a branch back to the beginning of the receive command list */
- dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
- (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
-
- /* start rx dma */
- dbdma_reset(rd);
- out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
-
- return 1;
-}
-
-
-static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_regs __iomem *td = bp->tx_dma;
- int i;
-
- /* see if there's a free slot in the tx ring */
- /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
- /* bp->tx_empty, bp->tx_fill)); */
- i = bp->tx_fill + 1;
- if (i >= N_TX_RING)
- i = 0;
- if (i == bp->tx_empty) {
- netif_stop_queue(dev);
- bp->tx_fullup = 1;
- XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
- return -1; /* can't take it at the moment */
- }
-
- dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
-
- bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
-
- bp->tx_bufs[bp->tx_fill] = skb;
- bp->tx_fill = i;
-
- dev->stats.tx_bytes += skb->len;
-
- dbdma_continue(td);
-
- return 0;
-}
-
-static int rxintcount;
-
-static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- volatile struct dbdma_cmd *cp;
- int i, nb, stat;
- struct sk_buff *skb;
- unsigned int residual;
- int last;
- unsigned long flags;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- if (++rxintcount < 10) {
- XXDEBUG(("bmac_rxdma_intr\n"));
- }
-
- last = -1;
- i = bp->rx_empty;
-
- while (1) {
- cp = &bp->rx_cmds[i];
- stat = ld_le16(&cp->xfer_status);
- residual = ld_le16(&cp->res_count);
- if ((stat & ACTIVE) == 0)
- break;
- nb = RX_BUFLEN - residual - 2;
- if (nb < (ETHERMINPACKET - ETHERCRC)) {
- skb = NULL;
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
- } else {
- skb = bp->rx_bufs[i];
- bp->rx_bufs[i] = NULL;
- }
- if (skb != NULL) {
- nb -= ETHERCRC;
- skb_put(skb, nb);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- ++dev->stats.rx_packets;
- dev->stats.rx_bytes += nb;
- } else {
- ++dev->stats.rx_dropped;
- }
- if ((skb = bp->rx_bufs[i]) == NULL) {
- bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
- if (skb != NULL)
- skb_reserve(bp->rx_bufs[i], 2);
- }
- bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
- st_le16(&cp->res_count, 0);
- st_le16(&cp->xfer_status, 0);
- last = i;
- if (++i >= N_RX_RING) i = 0;
- }
-
- if (last != -1) {
- bp->rx_fill = last;
- bp->rx_empty = i;
- }
-
- dbdma_continue(rd);
- spin_unlock_irqrestore(&bp->lock, flags);
-
- if (rxintcount < 10) {
- XXDEBUG(("bmac_rxdma_intr done\n"));
- }
- return IRQ_HANDLED;
-}
-
-static int txintcount;
-
-static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_cmd *cp;
- int stat;
- unsigned long flags;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- if (txintcount++ < 10) {
- XXDEBUG(("bmac_txdma_intr\n"));
- }
-
- /* del_timer(&bp->tx_timeout); */
- /* bp->timeout_active = 0; */
-
- while (1) {
- cp = &bp->tx_cmds[bp->tx_empty];
- stat = ld_le16(&cp->xfer_status);
- if (txintcount < 10) {
- XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
- }
- if (!(stat & ACTIVE)) {
- /*
- * status field might not have been filled by DBDMA
- */
- if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
- break;
- }
-
- if (bp->tx_bufs[bp->tx_empty]) {
- ++dev->stats.tx_packets;
- dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
- }
- bp->tx_bufs[bp->tx_empty] = NULL;
- bp->tx_fullup = 0;
- netif_wake_queue(dev);
- if (++bp->tx_empty >= N_TX_RING)
- bp->tx_empty = 0;
- if (bp->tx_empty == bp->tx_fill)
- break;
- }
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- if (txintcount < 10) {
- XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
- }
-
- bmac_start(dev);
- return IRQ_HANDLED;
-}
-
-#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- register unsigned int counter, cur = curval, next = nxtval;
- register int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
-/*
- * Add requested mcast addr to BMac's hash table filter.
- *
- */
-
-static void
-bmac_addhash(struct bmac_data *bp, unsigned char *addr)
-{
- unsigned int crc;
- unsigned short mask;
-
- if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
- if (bp->hash_use_count[crc]++) return; /* This bit is already set */
- mask = crc % 16;
- mask = (unsigned char)1 << mask;
- bp->hash_use_count[crc/16] |= mask;
-}
-
-static void
-bmac_removehash(struct bmac_data *bp, unsigned char *addr)
-{
- unsigned int crc;
- unsigned char mask;
-
- /* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
- if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
- if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
- mask = crc % 16;
- mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
- bp->hash_table_mask[crc/16] &= mask;
-}
-
-/*
- * Sync the adapter with the software copy of the multicast mask
- * (logical address filter).
- */
-
-static void
-bmac_rx_off(struct net_device *dev)
-{
- unsigned short rx_cfg;
-
- rx_cfg = bmread(dev, RXCFG);
- rx_cfg &= ~RxMACEnable;
- bmwrite(dev, RXCFG, rx_cfg);
- do {
- rx_cfg = bmread(dev, RXCFG);
- } while (rx_cfg & RxMACEnable);
-}
-
-unsigned short
-bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
-{
- unsigned short rx_cfg;
-
- rx_cfg = bmread(dev, RXCFG);
- rx_cfg |= RxMACEnable;
- if (hash_enable) rx_cfg |= RxHashFilterEnable;
- else rx_cfg &= ~RxHashFilterEnable;
- if (promisc_enable) rx_cfg |= RxPromiscEnable;
- else rx_cfg &= ~RxPromiscEnable;
- bmwrite(dev, RXRST, RxResetValue);
- bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
- bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
- bmwrite(dev, RXCFG, rx_cfg );
- return rx_cfg;
-}
-
-static void
-bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
-{
- bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
- bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
- bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
- bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
-}
-
-#if 0
-static void
-bmac_add_multi(struct net_device *dev,
- struct bmac_data *bp, unsigned char *addr)
-{
- /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
- bmac_addhash(bp, addr);
- bmac_rx_off(dev);
- bmac_update_hash_table_mask(dev, bp);
- bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
- /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
-}
-
-static void
-bmac_remove_multi(struct net_device *dev,
- struct bmac_data *bp, unsigned char *addr)
-{
- bmac_removehash(bp, addr);
- bmac_rx_off(dev);
- bmac_update_hash_table_mask(dev, bp);
- bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
-}
-#endif
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
- */
-static void bmac_set_multicast(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- struct bmac_data *bp = netdev_priv(dev);
- int num_addrs = netdev_mc_count(dev);
- unsigned short rx_cfg;
- int i;
-
- if (bp->sleeping)
- return;
-
- XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
-
- if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
- for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
- bmac_update_hash_table_mask(dev, bp);
- rx_cfg = bmac_rx_on(dev, 1, 0);
- XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
- } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
- rx_cfg = bmread(dev, RXCFG);
- rx_cfg |= RxPromiscEnable;
- bmwrite(dev, RXCFG, rx_cfg);
- rx_cfg = bmac_rx_on(dev, 0, 1);
- XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
- } else {
- for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
- for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
- if (num_addrs == 0) {
- rx_cfg = bmac_rx_on(dev, 0, 0);
- XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
- } else {
- netdev_for_each_mc_addr(ha, dev)
- bmac_addhash(bp, ha->addr);
- bmac_update_hash_table_mask(dev, bp);
- rx_cfg = bmac_rx_on(dev, 1, 0);
- XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
- }
- }
- /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
-}
-#else /* ifdef SUNHME_MULTICAST */
-
-/* The version of set_multicast below was lifted from sunhme.c */
-
-static void bmac_set_multicast(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- int i;
- unsigned short rx_cfg;
- u32 crc;
-
- if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
- bmwrite(dev, BHASH0, 0xffff);
- bmwrite(dev, BHASH1, 0xffff);
- bmwrite(dev, BHASH2, 0xffff);
- bmwrite(dev, BHASH3, 0xffff);
- } else if(dev->flags & IFF_PROMISC) {
- rx_cfg = bmread(dev, RXCFG);
- rx_cfg |= RxPromiscEnable;
- bmwrite(dev, RXCFG, rx_cfg);
- } else {
- u16 hash_table[4];
-
- rx_cfg = bmread(dev, RXCFG);
- rx_cfg &= ~RxPromiscEnable;
- bmwrite(dev, RXCFG, rx_cfg);
-
- for(i = 0; i < 4; i++) hash_table[i] = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(6, ha->addr);
- crc >>= 26;
- hash_table[crc >> 4] |= 1 << (crc & 0xf);
- }
- bmwrite(dev, BHASH0, hash_table[0]);
- bmwrite(dev, BHASH1, hash_table[1]);
- bmwrite(dev, BHASH2, hash_table[2]);
- bmwrite(dev, BHASH3, hash_table[3]);
- }
-}
-#endif /* SUNHME_MULTICAST */
-
-static int miscintcount;
-
-static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- unsigned int status = bmread(dev, STATUS);
- if (miscintcount++ < 10) {
- XXDEBUG(("bmac_misc_intr\n"));
- }
- /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
- /* bmac_txdma_intr_inner(irq, dev_id); */
- /* if (status & FrameReceived) dev->stats.rx_dropped++; */
- if (status & RxErrorMask) dev->stats.rx_errors++;
- if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
- if (status & RxLenCntExp) dev->stats.rx_length_errors++;
- if (status & RxOverFlow) dev->stats.rx_over_errors++;
- if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
-
- /* if (status & FrameSent) dev->stats.tx_dropped++; */
- if (status & TxErrorMask) dev->stats.tx_errors++;
- if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
- if (status & TxNormalCollExp) dev->stats.collisions++;
- return IRQ_HANDLED;
-}
-
-/*
- * Procedure for reading EEPROM
- */
-#define SROMAddressLength 5
-#define DataInOn 0x0008
-#define DataInOff 0x0000
-#define Clk 0x0002
-#define ChipSelect 0x0001
-#define SDIShiftCount 3
-#define SD0ShiftCount 2
-#define DelayValue 1000 /* number of microseconds */
-#define SROMStartOffset 10 /* this is in words */
-#define SROMReadCount 3 /* number of words to read from SROM */
-#define SROMAddressBits 6
-#define EnetAddressOffset 20
-
-static unsigned char
-bmac_clock_out_bit(struct net_device *dev)
-{
- unsigned short data;
- unsigned short val;
-
- bmwrite(dev, SROMCSR, ChipSelect | Clk);
- udelay(DelayValue);
-
- data = bmread(dev, SROMCSR);
- udelay(DelayValue);
- val = (data >> SD0ShiftCount) & 1;
-
- bmwrite(dev, SROMCSR, ChipSelect);
- udelay(DelayValue);
-
- return val;
-}
-
-static void
-bmac_clock_in_bit(struct net_device *dev, unsigned int val)
-{
- unsigned short data;
-
- if (val != 0 && val != 1) return;
-
- data = (val << SDIShiftCount);
- bmwrite(dev, SROMCSR, data | ChipSelect );
- udelay(DelayValue);
-
- bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
- udelay(DelayValue);
-
- bmwrite(dev, SROMCSR, data | ChipSelect);
- udelay(DelayValue);
-}
-
-static void
-reset_and_select_srom(struct net_device *dev)
-{
- /* first reset */
- bmwrite(dev, SROMCSR, 0);
- udelay(DelayValue);
-
- /* send it the read command (110) */
- bmac_clock_in_bit(dev, 1);
- bmac_clock_in_bit(dev, 1);
- bmac_clock_in_bit(dev, 0);
-}
-
-static unsigned short
-read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
-{
- unsigned short data, val;
- int i;
-
- /* send out the address we want to read from */
- for (i = 0; i < addr_len; i++) {
- val = addr >> (addr_len-i-1);
- bmac_clock_in_bit(dev, val & 1);
- }
-
- /* Now read in the 16-bit data */
- data = 0;
- for (i = 0; i < 16; i++) {
- val = bmac_clock_out_bit(dev);
- data <<= 1;
- data |= val;
- }
- bmwrite(dev, SROMCSR, 0);
-
- return data;
-}
-
-/*
- * It looks like Cogent and SMC use different methods for calculating
- * checksums. What a pain..
- */
-
-static int
-bmac_verify_checksum(struct net_device *dev)
-{
- unsigned short data, storedCS;
-
- reset_and_select_srom(dev);
- data = read_srom(dev, 3, SROMAddressBits);
- storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
-
- return 0;
-}
-
-
-static void
-bmac_get_station_address(struct net_device *dev, unsigned char *ea)
-{
- int i;
- unsigned short data;
-
- for (i = 0; i < 6; i++)
- {
- reset_and_select_srom(dev);
- data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
- ea[2*i] = bitrev8(data & 0x0ff);
- ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
- }
-}
-
-static void bmac_reset_and_enable(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- unsigned long flags;
- struct sk_buff *skb;
- unsigned char *data;
-
- spin_lock_irqsave(&bp->lock, flags);
- bmac_enable_and_reset_chip(dev);
- bmac_init_tx_ring(bp);
- bmac_init_rx_ring(bp);
- bmac_init_chip(dev);
- bmac_start_chip(dev);
- bmwrite(dev, INTDISABLE, EnableNormal);
- bp->sleeping = 0;
-
- /*
- * It seems that the bmac can't receive until it's transmitted
- * a packet. So we give it a dummy packet to transmit.
- */
- skb = dev_alloc_skb(ETHERMINPACKET);
- if (skb != NULL) {
- data = skb_put(skb, ETHERMINPACKET);
- memset(data, 0, ETHERMINPACKET);
- memcpy(data, dev->dev_addr, 6);
- memcpy(data+6, dev->dev_addr, 6);
- bmac_transmit_packet(skb, dev);
- }
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static const struct ethtool_ops bmac_ethtool_ops = {
- .get_link = ethtool_op_get_link,
-};
-
-static const struct net_device_ops bmac_netdev_ops = {
- .ndo_open = bmac_open,
- .ndo_stop = bmac_close,
- .ndo_start_xmit = bmac_output,
- .ndo_set_multicast_list = bmac_set_multicast,
- .ndo_set_mac_address = bmac_set_address,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
-{
- int j, rev, ret;
- struct bmac_data *bp;
- const unsigned char *prop_addr;
- unsigned char addr[6];
- struct net_device *dev;
- int is_bmac_plus = ((int)match->data) != 0;
-
- if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
- printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
- return -ENODEV;
- }
- prop_addr = of_get_property(macio_get_of_node(mdev),
- "mac-address", NULL);
- if (prop_addr == NULL) {
- prop_addr = of_get_property(macio_get_of_node(mdev),
- "local-mac-address", NULL);
- if (prop_addr == NULL) {
- printk(KERN_ERR "BMAC: Can't get mac-address\n");
- return -ENODEV;
- }
- }
- memcpy(addr, prop_addr, sizeof(addr));
-
- dev = alloc_etherdev(PRIV_BYTES);
- if (!dev) {
- printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
- return -ENOMEM;
- }
-
- bp = netdev_priv(dev);
- SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
- macio_set_drvdata(mdev, dev);
-
- bp->mdev = mdev;
- spin_lock_init(&bp->lock);
-
- if (macio_request_resources(mdev, "bmac")) {
- printk(KERN_ERR "BMAC: can't request IO resource !\n");
- goto out_free;
- }
-
- dev->base_addr = (unsigned long)
- ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
- if (dev->base_addr == 0)
- goto out_release;
-
- dev->irq = macio_irq(mdev, 0);
-
- bmac_enable_and_reset_chip(dev);
- bmwrite(dev, INTDISABLE, DisableAll);
-
- rev = addr[0] == 0 && addr[1] == 0xA0;
- for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
-
- /* Enable chip without interrupts for now */
- bmac_enable_and_reset_chip(dev);
- bmwrite(dev, INTDISABLE, DisableAll);
-
- dev->netdev_ops = &bmac_netdev_ops;
- dev->ethtool_ops = &bmac_ethtool_ops;
-
- bmac_get_station_address(dev, addr);
- if (bmac_verify_checksum(dev) != 0)
- goto err_out_iounmap;
-
- bp->is_bmac_plus = is_bmac_plus;
- bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
- if (!bp->tx_dma)
- goto err_out_iounmap;
- bp->tx_dma_intr = macio_irq(mdev, 1);
- bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
- if (!bp->rx_dma)
- goto err_out_iounmap_tx;
- bp->rx_dma_intr = macio_irq(mdev, 2);
-
- bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
- bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
-
- bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
- skb_queue_head_init(bp->queue);
-
- init_timer(&bp->tx_timeout);
-
- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
- if (ret) {
- printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
- goto err_out_iounmap_rx;
- }
- ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
- if (ret) {
- printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
- goto err_out_irq0;
- }
- ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
- if (ret) {
- printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
- goto err_out_irq1;
- }
-
- /* Mask chip interrupts and disable chip, will be
- * re-enabled on open()
- */
- disable_irq(dev->irq);
- pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
-
- if (register_netdev(dev) != 0) {
- printk(KERN_ERR "BMAC: Ethernet registration failed\n");
- goto err_out_irq2;
- }
-
- printk(KERN_INFO "%s: BMAC%s at %pM",
- dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
- XXDEBUG((", base_addr=%#0lx", dev->base_addr));
- printk("\n");
-
- return 0;
-
-err_out_irq2:
- free_irq(bp->rx_dma_intr, dev);
-err_out_irq1:
- free_irq(bp->tx_dma_intr, dev);
-err_out_irq0:
- free_irq(dev->irq, dev);
-err_out_iounmap_rx:
- iounmap(bp->rx_dma);
-err_out_iounmap_tx:
- iounmap(bp->tx_dma);
-err_out_iounmap:
- iounmap((void __iomem *)dev->base_addr);
-out_release:
- macio_release_resources(mdev);
-out_free:
- pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
- free_netdev(dev);
-
- return -ENODEV;
-}
-
-static int bmac_open(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- /* XXDEBUG(("bmac: enter open\n")); */
- /* reset the chip */
- bp->opened = 1;
- bmac_reset_and_enable(dev);
- enable_irq(dev->irq);
- return 0;
-}
-
-static int bmac_close(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- volatile struct dbdma_regs __iomem *td = bp->tx_dma;
- unsigned short config;
- int i;
-
- bp->sleeping = 1;
-
- /* disable rx and tx */
- config = bmread(dev, RXCFG);
- bmwrite(dev, RXCFG, (config & ~RxMACEnable));
-
- config = bmread(dev, TXCFG);
- bmwrite(dev, TXCFG, (config & ~TxMACEnable));
-
- bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
-
- /* disable rx and tx dma */
- st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
- st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
-
- /* free some skb's */
- XXDEBUG(("bmac: free rx bufs\n"));
- for (i=0; i<N_RX_RING; i++) {
- if (bp->rx_bufs[i] != NULL) {
- dev_kfree_skb(bp->rx_bufs[i]);
- bp->rx_bufs[i] = NULL;
- }
- }
- XXDEBUG(("bmac: free tx bufs\n"));
- for (i = 0; i<N_TX_RING; i++) {
- if (bp->tx_bufs[i] != NULL) {
- dev_kfree_skb(bp->tx_bufs[i]);
- bp->tx_bufs[i] = NULL;
- }
- }
- XXDEBUG(("bmac: all bufs freed\n"));
-
- bp->opened = 0;
- disable_irq(dev->irq);
- pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
-
- return 0;
-}
-
-static void
-bmac_start(struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- int i;
- struct sk_buff *skb;
- unsigned long flags;
-
- if (bp->sleeping)
- return;
-
- spin_lock_irqsave(&bp->lock, flags);
- while (1) {
- i = bp->tx_fill + 1;
- if (i >= N_TX_RING)
- i = 0;
- if (i == bp->tx_empty)
- break;
- skb = skb_dequeue(bp->queue);
- if (skb == NULL)
- break;
- bmac_transmit_packet(skb, dev);
- }
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static int
-bmac_output(struct sk_buff *skb, struct net_device *dev)
-{
- struct bmac_data *bp = netdev_priv(dev);
- skb_queue_tail(bp->queue, skb);
- bmac_start(dev);
- return NETDEV_TX_OK;
-}
-
-static void bmac_tx_timeout(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct bmac_data *bp = netdev_priv(dev);
- volatile struct dbdma_regs __iomem *td = bp->tx_dma;
- volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
- volatile struct dbdma_cmd *cp;
- unsigned long flags;
- unsigned short config, oldConfig;
- int i;
-
- XXDEBUG(("bmac: tx_timeout called\n"));
- spin_lock_irqsave(&bp->lock, flags);
- bp->timeout_active = 0;
-
- /* update various counters */
-/* bmac_handle_misc_intrs(bp, 0); */
-
- cp = &bp->tx_cmds[bp->tx_empty];
-/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
-/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
-/* mb->pr, mb->xmtfs, mb->fifofc)); */
-
- /* turn off both tx and rx and reset the chip */
- config = bmread(dev, RXCFG);
- bmwrite(dev, RXCFG, (config & ~RxMACEnable));
- config = bmread(dev, TXCFG);
- bmwrite(dev, TXCFG, (config & ~TxMACEnable));
- out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
- printk(KERN_ERR "bmac: transmit timeout - resetting\n");
- bmac_enable_and_reset_chip(dev);
-
- /* restart rx dma */
- cp = bus_to_virt(ld_le32(&rd->cmdptr));
- out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
- out_le16(&cp->xfer_status, 0);
- out_le32(&rd->cmdptr, virt_to_bus(cp));
- out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
-
- /* fix up the transmit side */
- XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
- bp->tx_empty, bp->tx_fill, bp->tx_fullup));
- i = bp->tx_empty;
- ++dev->stats.tx_errors;
- if (i != bp->tx_fill) {
- dev_kfree_skb(bp->tx_bufs[i]);
- bp->tx_bufs[i] = NULL;
- if (++i >= N_TX_RING) i = 0;
- bp->tx_empty = i;
- }
- bp->tx_fullup = 0;
- netif_wake_queue(dev);
- if (i != bp->tx_fill) {
- cp = &bp->tx_cmds[i];
- out_le16(&cp->xfer_status, 0);
- out_le16(&cp->command, OUTPUT_LAST);
- out_le32(&td->cmdptr, virt_to_bus(cp));
- out_le32(&td->control, DBDMA_SET(RUN));
- /* bmac_set_timeout(dev); */
- XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
- }
-
- /* turn it back on */
- oldConfig = bmread(dev, RXCFG);
- bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
- oldConfig = bmread(dev, TXCFG);
- bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
-
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-#if 0
-static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
-{
- int i,*ip;
-
- for (i=0;i< count;i++) {
- ip = (int*)(cp+i);
-
- printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
- ld_le32(ip+0),
- ld_le32(ip+1),
- ld_le32(ip+2),
- ld_le32(ip+3));
- }
-
-}
-#endif
-
-#if 0
-static int
-bmac_proc_info(char *buffer, char **start, off_t offset, int length)
-{
- int len = 0;
- off_t pos = 0;
- off_t begin = 0;
- int i;
-
- if (bmac_devs == NULL)
- return -ENOSYS;
-
- len += sprintf(buffer, "BMAC counters & registers\n");
-
- for (i = 0; i<N_REG_ENTRIES; i++) {
- len += sprintf(buffer + len, "%s: %#08x\n",
- reg_entries[i].name,
- bmread(bmac_devs, reg_entries[i].reg_offset));
- pos = begin + len;
-
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
-
- if (pos > offset+length) break;
- }
-
- *start = buffer + (offset - begin);
- len -= (offset - begin);
-
- if (len > length) len = length;
-
- return len;
-}
-#endif
-
-static int __devexit bmac_remove(struct macio_dev *mdev)
-{
- struct net_device *dev = macio_get_drvdata(mdev);
- struct bmac_data *bp = netdev_priv(dev);
-
- unregister_netdev(dev);
-
- free_irq(dev->irq, dev);
- free_irq(bp->tx_dma_intr, dev);
- free_irq(bp->rx_dma_intr, dev);
-
- iounmap((void __iomem *)dev->base_addr);
- iounmap(bp->tx_dma);
- iounmap(bp->rx_dma);
-
- macio_release_resources(mdev);
-
- free_netdev(dev);
-
- return 0;
-}
-
-static struct of_device_id bmac_match[] =
-{
- {
- .name = "bmac",
- .data = (void *)0,
- },
- {
- .type = "network",
- .compatible = "bmac+",
- .data = (void *)1,
- },
- {},
-};
-MODULE_DEVICE_TABLE (of, bmac_match);
-
-static struct macio_driver bmac_driver =
-{
- .driver = {
- .name = "bmac",
- .owner = THIS_MODULE,
- .of_match_table = bmac_match,
- },
- .probe = bmac_probe,
- .remove = bmac_remove,
-#ifdef CONFIG_PM
- .suspend = bmac_suspend,
- .resume = bmac_resume,
-#endif
-};
-
-
-static int __init bmac_init(void)
-{
- if (bmac_emergency_rxbuf == NULL) {
- bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
- if (bmac_emergency_rxbuf == NULL) {
- printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
- return -ENOMEM;
- }
- }
-
- return macio_register_driver(&bmac_driver);
-}
-
-static void __exit bmac_exit(void)
-{
- macio_unregister_driver(&bmac_driver);
-
- kfree(bmac_emergency_rxbuf);
- bmac_emergency_rxbuf = NULL;
-}
-
-MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
-MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
-MODULE_LICENSE("GPL");
-
-module_init(bmac_init);
-module_exit(bmac_exit);
diff --git a/drivers/net/bna/Makefile b/drivers/net/bna/Makefile
deleted file mode 100644
index a5d604de7fe..00000000000
--- a/drivers/net/bna/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
-# All rights reserved.
-#
-
-obj-$(CONFIG_BNA) += bna.o
-
-bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
-bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
-
-EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
deleted file mode 100644
index 885ef3afdd4..00000000000
--- a/drivers/net/bna/bfa_defs_mfg_comm.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#ifndef __BFA_DEFS_MFG_COMM_H__
-#define __BFA_DEFS_MFG_COMM_H__
-
-#include "cna.h"
-
-/**
- * Manufacturing block version
- */
-#define BFA_MFG_VERSION 2
-#define BFA_MFG_VERSION_UNINIT 0xFF
-
-/**
- * Manufacturing block encrypted version
- */
-#define BFA_MFG_ENC_VER 2
-
-/**
- * Manufacturing block version 1 length
- */
-#define BFA_MFG_VER1_LEN 128
-
-/**
- * Manufacturing block header length
- */
-#define BFA_MFG_HDR_LEN 4
-
-#define BFA_MFG_SERIALNUM_SIZE 11
-#define STRSZ(_n) (((_n) + 4) & ~3)
-
-/**
- * Manufacturing card type
- */
-enum {
- BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
- BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
- BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
- BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
- BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
- BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
- BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
- BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
- BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
- BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
- BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
- BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
- BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
-};
-
-#pragma pack(1)
-
-/**
- * Check if 1-port card
- */
-#define bfa_mfg_is_1port(type) (( \
- (type) == BFA_MFG_TYPE_FC8P1 || \
- (type) == BFA_MFG_TYPE_FC4P1 || \
- (type) == BFA_MFG_TYPE_CNA10P1))
-
-/**
- * Check if Mezz card
- */
-#define bfa_mfg_is_mezz(type) (( \
- (type) == BFA_MFG_TYPE_JAYHAWK || \
- (type) == BFA_MFG_TYPE_WANCHESE || \
- (type) == BFA_MFG_TYPE_ASTRA || \
- (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
- (type) == BFA_MFG_TYPE_LIGHTNING))
-
-/**
- * Check if card type valid
- */
-#define bfa_mfg_is_card_type_valid(type) (( \
- (type) == BFA_MFG_TYPE_FC8P2 || \
- (type) == BFA_MFG_TYPE_FC8P1 || \
- (type) == BFA_MFG_TYPE_FC4P2 || \
- (type) == BFA_MFG_TYPE_FC4P1 || \
- (type) == BFA_MFG_TYPE_CNA10P2 || \
- (type) == BFA_MFG_TYPE_CNA10P1 || \
- bfa_mfg_is_mezz(type)))
-
-#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
-do { \
- switch ((card_type)) { \
- case BFA_MFG_TYPE_FC8P2: \
- case BFA_MFG_TYPE_JAYHAWK: \
- case BFA_MFG_TYPE_ASTRA: \
- (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
- BFI_ADAPTER_SETP(SPEED, 8); \
- break; \
- case BFA_MFG_TYPE_FC8P1: \
- (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
- BFI_ADAPTER_SETP(SPEED, 8); \
- break; \
- case BFA_MFG_TYPE_FC4P2: \
- (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
- BFI_ADAPTER_SETP(SPEED, 4); \
- break; \
- case BFA_MFG_TYPE_FC4P1: \
- (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
- BFI_ADAPTER_SETP(SPEED, 4); \
- break; \
- case BFA_MFG_TYPE_CNA10P2: \
- case BFA_MFG_TYPE_WANCHESE: \
- case BFA_MFG_TYPE_LIGHTNING_P0: \
- case BFA_MFG_TYPE_LIGHTNING: \
- (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
- (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
- break; \
- case BFA_MFG_TYPE_CNA10P1: \
- (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
- (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
- break; \
- default: \
- (prop) = BFI_ADAPTER_UNSUPP; \
- } \
-} while (0)
-
-enum {
- CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
- CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
- CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
- CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
- CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
- CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
- CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
-};
-
-#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
-do { \
- if ((gpio) & CB_GPIO_PROTO) { \
- (prop) |= BFI_ADAPTER_PROTO; \
- (gpio) &= ~CB_GPIO_PROTO; \
- } \
- switch ((gpio)) { \
- case CB_GPIO_TTV: \
- (prop) |= BFI_ADAPTER_TTV; \
- case CB_GPIO_DFLY: \
- case CB_GPIO_FC8P2: \
- (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
- (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
- (card_type) = BFA_MFG_TYPE_FC8P2; \
- break; \
- case CB_GPIO_FC8P1: \
- (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
- (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
- (card_type) = BFA_MFG_TYPE_FC8P1; \
- break; \
- case CB_GPIO_FC4P2: \
- (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
- (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
- (card_type) = BFA_MFG_TYPE_FC4P2; \
- break; \
- case CB_GPIO_FC4P1: \
- (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
- (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
- (card_type) = BFA_MFG_TYPE_FC4P1; \
- break; \
- default: \
- (prop) |= BFI_ADAPTER_UNSUPP; \
- (card_type) = BFA_MFG_TYPE_INVALID; \
- } \
-} while (0)
-
-/**
- * VPD data length
- */
-#define BFA_MFG_VPD_LEN 512
-#define BFA_MFG_VPD_LEN_INVALID 0
-
-#define BFA_MFG_VPD_PCI_HDR_OFF 137
-#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
-#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
-
-/**
- * VPD vendor tag
- */
-enum {
- BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
- BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
- BFA_MFG_VPD_HP = 2, /*!< vendor HP */
- BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
- BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
- BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
- BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
- BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
-};
-
-/**
- * @brief BFA adapter flash vpd data definition.
- *
- * All numerical fields are in big-endian format.
- */
-struct bfa_mfg_vpd {
- u8 version; /*!< vpd data version */
- u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
- u8 chksum; /*!< u8 checksum */
- u8 vendor; /*!< vendor */
- u8 len; /*!< vpd data length excluding header */
- u8 rsv;
- u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
-};
-
-#pragma pack()
-
-#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
deleted file mode 100644
index 87aecdf22cf..00000000000
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-#include "bfa_ioc.h"
-#include "cna.h"
-#include "bfi.h"
-#include "bfi_ctreg.h"
-#include "bfa_defs.h"
-
-#define bfa_ioc_ct_sync_pos(__ioc) \
- ((u32) (1 << bfa_ioc_pcifn(__ioc)))
-#define BFA_IOC_SYNC_REQD_SH 16
-#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
-#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
-#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
-#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
- (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
-
-/*
- * forward declarations
- */
-static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
-static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
-static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
-static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
-static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
-static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
-
-static struct bfa_ioc_hwif nw_hwif_ct;
-
-/**
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
-void
-bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
-{
- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
-
- ioc->ioc_hwif = &nw_hwif_ct;
-}
-
-/**
- * Return true if firmware of current driver matches the running firmware.
- */
-static bool
-bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
-{
- enum bfi_ioc_state ioc_fwstate;
- u32 usecnt;
- struct bfi_ioc_image_hdr fwhdr;
-
- /**
- * Firmware match check is relevant only for CNA.
- */
- if (!ioc->cna)
- return true;
-
- /**
- * If bios boot (flash based) -- do not increment usage count
- */
- if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
- BFA_IOC_FWIMG_MINSZ)
- return true;
-
- bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
- usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
-
- /**
- * If usage count is 0, always return TRUE.
- */
- if (usecnt == 0) {
- writel(1, ioc->ioc_regs.ioc_usage_reg);
- bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
- writel(0, ioc->ioc_regs.ioc_fail_sync);
- return true;
- }
-
- ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
-
- /**
- * Use count cannot be non-zero and chip in uninitialized state.
- */
- BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
-
- /**
- * Check if another driver with a different firmware is active
- */
- bfa_nw_ioc_fwver_get(ioc, &fwhdr);
- if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
- bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
- return false;
- }
-
- /**
- * Same firmware version. Increment the reference count.
- */
- usecnt++;
- writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
- bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
- return true;
-}
-
-static void
-bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
-{
- u32 usecnt;
-
- /**
- * Firmware lock is relevant only for CNA.
- */
- if (!ioc->cna)
- return;
-
- /**
- * If bios boot (flash based) -- do not decrement usage count
- */
- if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
- BFA_IOC_FWIMG_MINSZ)
- return;
-
- /**
- * decrement usage count
- */
- bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
- usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
- BUG_ON(!(usecnt > 0));
-
- usecnt--;
- writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
-
- bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
-}
-
-/**
- * Notify other functions on HB failure.
- */
-static void
-bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
-{
- if (ioc->cna) {
- writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
- writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
- /* Wait for halt to take effect */
- readl(ioc->ioc_regs.ll_halt);
- readl(ioc->ioc_regs.alt_ll_halt);
- } else {
- writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
- readl(ioc->ioc_regs.err_set);
- }
-}
-
-/**
- * Host to LPU mailbox message addresses
- */
-static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
- { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
- { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
- { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
- { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
-};
-
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
- { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
- { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
- { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
- { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
-};
-
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
- { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
- { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
- { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
- { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
-};
-
-static void
-bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
-{
- void __iomem *rb;
- int pcifn = bfa_ioc_pcifn(ioc);
-
- rb = bfa_ioc_bar0(ioc);
-
- ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
- ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
- ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
-
- if (ioc->port_id == 0) {
- ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
- ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
- ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
- ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
- ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
- ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
- ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
- } else {
- ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
- ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
- ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
- ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
- ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
- ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
- ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
- }
-
- /*
- * PSS control registers
- */
- ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
- ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
- ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
- ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
-
- /*
- * IOC semaphore registers and serialization
- */
- ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
- ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
- ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
- ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
- ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
-
- /**
- * sram memory access
- */
- ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
- ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
-
- /*
- * err set reg : for notification of hb failure in fcmode
- */
- ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
-}
-
-/**
- * Initialize IOC to port mapping.
- */
-
-#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
-static void
-bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
-{
- void __iomem *rb = ioc->pcidev.pci_bar_kva;
- u32 r32;
-
- /**
- * For catapult, base port id on personality register and IOC type
- */
- r32 = readl(rb + FNC_PERS_REG);
- r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
- ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
-
-}
-
-/**
- * Set interrupt mode for a function: INTX or MSIX
- */
-static void
-bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
-{
- void __iomem *rb = ioc->pcidev.pci_bar_kva;
- u32 r32, mode;
-
- r32 = readl(rb + FNC_PERS_REG);
-
- mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
- __F0_INTX_STATUS;
-
- /**
- * If already in desired mode, do not change anything
- */
- if (!msix && mode)
- return;
-
- if (msix)
- mode = __F0_INTX_STATUS_MSIX;
- else
- mode = __F0_INTX_STATUS_INTA;
-
- r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
- r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
-
- writel(r32, rb + FNC_PERS_REG);
-}
-
-/**
- * Cleanup hw semaphore and usecnt registers
- */
-static void
-bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
-{
- if (ioc->cna) {
- bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
- writel(0, ioc->ioc_regs.ioc_usage_reg);
- bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
- }
-
- /*
- * Read the hw sem reg to make sure that it is locked
- * before we clear it. If it is not locked, writing 1
- * will lock it instead of clearing it.
- */
- readl(ioc->ioc_regs.ioc_sem_reg);
- bfa_nw_ioc_hw_sem_release(ioc);
-}
-
-/**
- * Synchronized IOC failure processing routines
- */
-static bool
-bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
-{
- u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
- u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
-
- /*
- * Driver load time. If the sync required bit for this PCI fn
- * is set, it is due to an unclean exit by the driver for this
- * PCI fn in the previous incarnation. Whoever comes here first
- * should clean it up, no matter which PCI fn.
- */
-
- if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
- writel(0, ioc->ioc_regs.ioc_fail_sync);
- writel(1, ioc->ioc_regs.ioc_usage_reg);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
- return true;
- }
-
- return bfa_ioc_ct_sync_complete(ioc);
-}
-/**
- * Synchronized IOC failure processing routines
- */
-static void
-bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
-{
- u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
- u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
-
- writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
-}
-
-static void
-bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
-{
- u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
- u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
- bfa_ioc_ct_sync_pos(ioc);
-
- writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
-}
-
-static void
-bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
-{
- u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
-
- writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
-}
-
-static bool
-bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
-{
- u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
- u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
- u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
- u32 tmp_ackd;
-
- if (sync_ackd == 0)
- return true;
-
- /**
- * The check below is to see whether any other PCI fn
- * has reinitialized the ASIC (reset sync_ackd bits)
- * and failed again while this IOC was waiting for hw
- * semaphore (in bfa_iocpf_sm_semwait()).
- */
- tmp_ackd = sync_ackd;
- if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
- !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
- sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
-
- if (sync_reqd == sync_ackd) {
- writel(bfa_ioc_ct_clear_sync_ackd(r32),
- ioc->ioc_regs.ioc_fail_sync);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
- return true;
- }
-
- /**
- * If another PCI fn reinitialized and failed again while
- * this IOC was waiting for hw sem, the sync_ackd bit for
- * this IOC need to be set again to allow reinitialization.
- */
- if (tmp_ackd != sync_ackd)
- writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
-
- return false;
-}
-
-static enum bfa_status
-bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
-{
- u32 pll_sclk, pll_fclk, r32;
-
- pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
- __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
- __APP_PLL_312_JITLMT0_1(3U) |
- __APP_PLL_312_CNTLMT0_1(1U);
- pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
- __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
- __APP_PLL_425_JITLMT0_1(3U) |
- __APP_PLL_425_CNTLMT0_1(1U);
- if (fcmode) {
- writel(0, (rb + OP_MODE));
- writel(__APP_EMS_CMLCKSEL |
- __APP_EMS_REFCKBUFEN2 |
- __APP_EMS_CHANNEL_SEL,
- (rb + ETH_MAC_SER_REG));
- } else {
- writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
- writel(__APP_EMS_REFCKBUFEN1,
- (rb + ETH_MAC_SER_REG));
- }
- writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
- writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
- writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
- writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
- writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
- writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
- writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
- writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
- writel(pll_sclk |
- __APP_PLL_312_LOGIC_SOFT_RESET,
- rb + APP_PLL_312_CTL_REG);
- writel(pll_fclk |
- __APP_PLL_425_LOGIC_SOFT_RESET,
- rb + APP_PLL_425_CTL_REG);
- writel(pll_sclk |
- __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
- rb + APP_PLL_312_CTL_REG);
- writel(pll_fclk |
- __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
- rb + APP_PLL_425_CTL_REG);
- readl(rb + HOSTFN0_INT_MSK);
- udelay(2000);
- writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
- writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
- writel(pll_sclk |
- __APP_PLL_312_ENABLE,
- rb + APP_PLL_312_CTL_REG);
- writel(pll_fclk |
- __APP_PLL_425_ENABLE,
- rb + APP_PLL_425_CTL_REG);
- if (!fcmode) {
- writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
- writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
- }
- r32 = readl((rb + PSS_CTL_REG));
- r32 &= ~__PSS_LMEM_RESET;
- writel(r32, (rb + PSS_CTL_REG));
- udelay(1000);
- if (!fcmode) {
- writel(0, (rb + PMM_1T_RESET_REG_P0));
- writel(0, (rb + PMM_1T_RESET_REG_P1));
- }
-
- writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
- udelay(1000);
- r32 = readl((rb + MBIST_STAT_REG));
- writel(0, (rb + MBIST_CTL_REG));
- return BFA_STATUS_OK;
-}
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
deleted file mode 100644
index 088211c2724..00000000000
--- a/drivers/net/bna/bfi.h
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-#ifndef __BFI_H__
-#define __BFI_H__
-
-#include "bfa_defs.h"
-
-#pragma pack(1)
-
-/**
- * BFI FW image type
- */
-#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
-#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-enum {
- BFI_IMAGE_CB_FC,
- BFI_IMAGE_CT_FC,
- BFI_IMAGE_CT_CNA,
- BFI_IMAGE_MAX,
-};
-
-/**
- * Msg header common to all msgs
- */
-struct bfi_mhdr {
- u8 msg_class; /*!< @ref enum bfi_mclass */
- u8 msg_id; /*!< msg opcode with in the class */
- union {
- struct {
- u8 rsvd;
- u8 lpu_id; /*!< msg destination */
- } h2i;
- u16 i2htok; /*!< token in msgs to host */
- } mtag;
-};
-
-#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
- (_mh).msg_class = (_mc); \
- (_mh).msg_id = (_op); \
- (_mh).mtag.h2i.lpu_id = (_lpuid); \
-} while (0)
-
-#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
- (_mh).msg_class = (_mc); \
- (_mh).msg_id = (_op); \
- (_mh).mtag.i2htok = (_i2htok); \
-} while (0)
-
-/*
- * Message opcodes: 0-127 to firmware, 128-255 to host
- */
-#define BFI_I2H_OPCODE_BASE 128
-#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
-
-/**
- ****************************************************************************
- *
- * Scatter Gather Element and Page definition
- *
- ****************************************************************************
- */
-
-#define BFI_SGE_INLINE 1
-#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
-
-/**
- * SG Flags
- */
-enum {
- BFI_SGE_DATA = 0, /*!< data address, not last */
- BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
- BFI_SGE_DATA_LAST = 3, /*!< data address, last */
- BFI_SGE_LINK = 2, /*!< link address */
- BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
-};
-
-/**
- * DMA addresses
- */
-union bfi_addr_u {
- struct {
- u32 addr_lo;
- u32 addr_hi;
- } a32;
-};
-
-/**
- * Scatter Gather Element
- */
-struct bfi_sge {
-#ifdef __BIGENDIAN
- u32 flags:2,
- rsvd:2,
- sg_len:28;
-#else
- u32 sg_len:28,
- rsvd:2,
- flags:2;
-#endif
- union bfi_addr_u sga;
-};
-
-/**
- * Scatter Gather Page
- */
-#define BFI_SGPG_DATA_SGES 7
-#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
-#define BFI_SGPG_RSVD_WD_LEN 8
-struct bfi_sgpg {
- struct bfi_sge sges[BFI_SGPG_SGES_MAX];
- u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
-};
-
-/*
- * Large Message structure - 128 Bytes size Msgs
- */
-#define BFI_LMSG_SZ 128
-#define BFI_LMSG_PL_WSZ \
- ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-
-struct bfi_msg {
- struct bfi_mhdr mhdr;
- u32 pl[BFI_LMSG_PL_WSZ];
-};
-
-/**
- * Mailbox message structure
- */
-#define BFI_MBMSG_SZ 7
-struct bfi_mbmsg {
- struct bfi_mhdr mh;
- u32 pl[BFI_MBMSG_SZ];
-};
-
-/**
- * Message Classes
- */
-enum bfi_mclass {
- BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
- BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
- BFI_MC_FLASH = 3, /*!< Flash message class */
- BFI_MC_CEE = 4, /*!< CEE */
- BFI_MC_FCPORT = 5, /*!< FC port */
- BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
- BFI_MC_LL = 7, /*!< Link Layer */
- BFI_MC_UF = 8, /*!< Unsolicited frame receive */
- BFI_MC_FCXP = 9, /*!< FC Transport */
- BFI_MC_LPS = 10, /*!< lport fc login services */
- BFI_MC_RPORT = 11, /*!< Remote port */
- BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
- BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
- BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
- BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
- BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
- BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
- BFI_MC_TSKIM = 18, /*!< Initiator Task management */
- BFI_MC_SBOOT = 19, /*!< SAN boot services */
- BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
- BFI_MC_PORT = 21, /*!< Physical port */
- BFI_MC_SFP = 22, /*!< SFP module */
- BFI_MC_MSGQ = 23, /*!< MSGQ */
- BFI_MC_ENET = 24, /*!< ENET commands/responses */
- BFI_MC_MAX = 32
-};
-
-#define BFI_IOC_MAX_CQS 4
-#define BFI_IOC_MAX_CQS_ASIC 8
-#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
-
-#define BFI_BOOT_TYPE_OFF 8
-#define BFI_BOOT_LOADER_OFF 12
-
-#define BFI_BOOT_TYPE_NORMAL 0
-#define BFI_BOOT_TYPE_FLASH 1
-#define BFI_BOOT_TYPE_MEMTEST 2
-
-#define BFI_BOOT_LOADER_OS 0
-
-#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
-#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
-
-/**
- *----------------------------------------------------------------------
- * IOC
- *----------------------------------------------------------------------
- */
-
-enum bfi_ioc_h2i_msgs {
- BFI_IOC_H2I_ENABLE_REQ = 1,
- BFI_IOC_H2I_DISABLE_REQ = 2,
- BFI_IOC_H2I_GETATTR_REQ = 3,
- BFI_IOC_H2I_DBG_SYNC = 4,
- BFI_IOC_H2I_DBG_DUMP = 5,
-};
-
-enum bfi_ioc_i2h_msgs {
- BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
- BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
- BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
- BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
- BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
-};
-
-/**
- * BFI_IOC_H2I_GETATTR_REQ message
- */
-struct bfi_ioc_getattr_req {
- struct bfi_mhdr mh;
- union bfi_addr_u attr_addr;
-};
-
-struct bfi_ioc_attr {
- u64 mfg_pwwn; /*!< Mfg port wwn */
- u64 mfg_nwwn; /*!< Mfg node wwn */
- mac_t mfg_mac; /*!< Mfg mac */
- u16 rsvd_a;
- u64 pwwn;
- u64 nwwn;
- mac_t mac; /*!< PBC or Mfg mac */
- u16 rsvd_b;
- mac_t fcoe_mac;
- u16 rsvd_c;
- char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
- u8 pcie_gen;
- u8 pcie_lanes_orig;
- u8 pcie_lanes;
- u8 rx_bbcredit; /*!< receive buffer credits */
- u32 adapter_prop; /*!< adapter properties */
- u16 maxfrsize; /*!< max receive frame size */
- char asic_rev;
- u8 rsvd_d;
- char fw_version[BFA_VERSION_LEN];
- char optrom_version[BFA_VERSION_LEN];
- struct bfa_mfg_vpd vpd;
- u32 card_type; /*!< card type */
-};
-
-/**
- * BFI_IOC_I2H_GETATTR_REPLY message
- */
-struct bfi_ioc_getattr_reply {
- struct bfi_mhdr mh; /*!< Common msg header */
- u8 status; /*!< cfg reply status */
- u8 rsvd[3];
-};
-
-/**
- * Firmware memory page offsets
- */
-#define BFI_IOC_SMEM_PG0_CB (0x40)
-#define BFI_IOC_SMEM_PG0_CT (0x180)
-
-/**
- * Firmware statistic offset
- */
-#define BFI_IOC_FWSTATS_OFF (0x6B40)
-#define BFI_IOC_FWSTATS_SZ (4096)
-
-/**
- * Firmware trace offset
- */
-#define BFI_IOC_TRC_OFF (0x4b00)
-#define BFI_IOC_TRC_ENTS 256
-
-#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
-#define BFI_IOC_MD5SUM_SZ 4
-struct bfi_ioc_image_hdr {
- u32 signature; /*!< constant signature */
- u32 rsvd_a;
- u32 exec; /*!< exec vector */
- u32 param; /*!< parameters */
- u32 rsvd_b[4];
- u32 md5sum[BFI_IOC_MD5SUM_SZ];
-};
-
-enum bfi_fwboot_type {
- BFI_FWBOOT_TYPE_NORMAL = 0,
- BFI_FWBOOT_TYPE_FLASH = 1,
- BFI_FWBOOT_TYPE_MEMTEST = 2,
-};
-
-/**
- * BFI_IOC_I2H_READY_EVENT message
- */
-struct bfi_ioc_rdy_event {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 init_status; /*!< init event status */
- u8 rsvd[3];
-};
-
-struct bfi_ioc_hbeat {
- struct bfi_mhdr mh; /*!< common msg header */
- u32 hb_count; /*!< current heart beat count */
-};
-
-/**
- * IOC hardware/firmware state
- */
-enum bfi_ioc_state {
- BFI_IOC_UNINIT = 0, /*!< not initialized */
- BFI_IOC_INITING = 1, /*!< h/w is being initialized */
- BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
- BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
- BFI_IOC_OP = 4, /*!< IOC is operational */
- BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
- BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
- BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
- BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
- BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
-};
-
-#define BFI_IOC_ENDIAN_SIG 0x12345678
-
-enum {
- BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
- BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
- BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
- BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
- BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
- BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
- BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
- BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
- BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
- BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
-};
-
-#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
- (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
- BFI_ADAPTER_ ## __prop ## _SH)
-#define BFI_ADAPTER_SETP(__prop, __val) \
- ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
-#define BFI_ADAPTER_IS_PROTO(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_PROTO)
-#define BFI_ADAPTER_IS_TTV(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_TTV)
-#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_UNSUPP)
-#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
- ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
- BFI_ADAPTER_UNSUPP))
-
-/**
- * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
- */
-struct bfi_ioc_ctrl_req {
- struct bfi_mhdr mh;
- u8 ioc_class;
- u8 rsvd[3];
- u32 tv_sec;
-};
-
-/**
- * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
- */
-struct bfi_ioc_ctrl_reply {
- struct bfi_mhdr mh; /*!< Common msg header */
- u8 status; /*!< enable/disable status */
- u8 rsvd[3];
-};
-
-#define BFI_IOC_MSGSZ 8
-/**
- * H2I Messages
- */
-union bfi_ioc_h2i_msg_u {
- struct bfi_mhdr mh;
- struct bfi_ioc_ctrl_req enable_req;
- struct bfi_ioc_ctrl_req disable_req;
- struct bfi_ioc_getattr_req getattr_req;
- u32 mboxmsg[BFI_IOC_MSGSZ];
-};
-
-/**
- * I2H Messages
- */
-union bfi_ioc_i2h_msg_u {
- struct bfi_mhdr mh;
- struct bfi_ioc_rdy_event rdy_event;
- u32 mboxmsg[BFI_IOC_MSGSZ];
-};
-
-#pragma pack()
-
-#endif /* __BFI_H__ */
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
deleted file mode 100644
index 5130d791866..00000000000
--- a/drivers/net/bna/bfi_ctreg.h
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-/*
- * bfi_ctreg.h catapult host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CTREG_H__
-#define __BFI_CTREG_H__
-
-#define HOSTFN0_LPU_MBOX0_0 0x00019200
-#define HOSTFN1_LPU_MBOX0_8 0x00019260
-#define LPU_HOSTFN0_MBOX0_0 0x00019280
-#define LPU_HOSTFN1_MBOX0_8 0x000192e0
-#define HOSTFN2_LPU_MBOX0_0 0x00019400
-#define HOSTFN3_LPU_MBOX0_8 0x00019460
-#define LPU_HOSTFN2_MBOX0_0 0x00019480
-#define LPU_HOSTFN3_MBOX0_8 0x000194e0
-#define HOSTFN0_INT_STATUS 0x00014000
-#define __HOSTFN0_HALT_OCCURRED 0x01000000
-#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH 20
-#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN0_INT_STATUS_P_SH 16
-#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
-#define __HOSTFN0_INT_STATUS_F 0x0000ffff
-#define HOSTFN0_INT_MSK 0x00014004
-#define HOST_PAGE_NUM_FN0 0x00014008
-#define __HOST_PAGE_NUM_FN 0x000001ff
-#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
-#define __MSIX_ERR_INDEX_FN 0x000001ff
-#define HOSTFN1_INT_STATUS 0x00014100
-#define __HOSTFN1_HALT_OCCURRED 0x01000000
-#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN1_INT_STATUS_LVL_SH 20
-#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
-#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN1_INT_STATUS_P_SH 16
-#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
-#define __HOSTFN1_INT_STATUS_F 0x0000ffff
-#define HOSTFN1_INT_MSK 0x00014104
-#define HOST_PAGE_NUM_FN1 0x00014108
-#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
-#define APP_PLL_425_CTL_REG 0x00014204
-#define __P_425_PLL_LOCK 0x80000000
-#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
-#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
-#define __APP_PLL_425_RESET_TIMER_SH 17
-#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
-#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
-#define __APP_PLL_425_CNTLMT0_1_SH 14
-#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
-#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
-#define __APP_PLL_425_JITLMT0_1_SH 12
-#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
-#define __APP_PLL_425_HREF 0x00000800
-#define __APP_PLL_425_HDIV 0x00000400
-#define __APP_PLL_425_P0_1_MK 0x00000300
-#define __APP_PLL_425_P0_1_SH 8
-#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
-#define __APP_PLL_425_Z0_2_MK 0x000000e0
-#define __APP_PLL_425_Z0_2_SH 5
-#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
-#define __APP_PLL_425_RSEL200500 0x00000010
-#define __APP_PLL_425_ENARST 0x00000008
-#define __APP_PLL_425_BYPASS 0x00000004
-#define __APP_PLL_425_LRESETN 0x00000002
-#define __APP_PLL_425_ENABLE 0x00000001
-#define APP_PLL_312_CTL_REG 0x00014208
-#define __P_312_PLL_LOCK 0x80000000
-#define __ENABLE_MAC_AHB_1 0x00800000
-#define __ENABLE_MAC_AHB_0 0x00400000
-#define __ENABLE_MAC_1 0x00200000
-#define __ENABLE_MAC_0 0x00100000
-#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
-#define __APP_PLL_312_RESET_TIMER_SH 17
-#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
-#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
-#define __APP_PLL_312_CNTLMT0_1_SH 14
-#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
-#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
-#define __APP_PLL_312_JITLMT0_1_SH 12
-#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
-#define __APP_PLL_312_HREF 0x00000800
-#define __APP_PLL_312_HDIV 0x00000400
-#define __APP_PLL_312_P0_1_MK 0x00000300
-#define __APP_PLL_312_P0_1_SH 8
-#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
-#define __APP_PLL_312_Z0_2_MK 0x000000e0
-#define __APP_PLL_312_Z0_2_SH 5
-#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
-#define __APP_PLL_312_RSEL200500 0x00000010
-#define __APP_PLL_312_ENARST 0x00000008
-#define __APP_PLL_312_BYPASS 0x00000004
-#define __APP_PLL_312_LRESETN 0x00000002
-#define __APP_PLL_312_ENABLE 0x00000001
-#define MBIST_CTL_REG 0x00014220
-#define __EDRAM_BISTR_START 0x00000004
-#define __MBIST_RESET 0x00000002
-#define __MBIST_START 0x00000001
-#define MBIST_STAT_REG 0x00014224
-#define __EDRAM_BISTR_STATUS 0x00000008
-#define __EDRAM_BISTR_DONE 0x00000004
-#define __MEM_BIT_STATUS 0x00000002
-#define __MBIST_DONE 0x00000001
-#define HOST_SEM0_REG 0x00014230
-#define __HOST_SEMAPHORE 0x00000001
-#define HOST_SEM1_REG 0x00014234
-#define HOST_SEM2_REG 0x00014238
-#define HOST_SEM3_REG 0x0001423c
-#define HOST_SEM0_INFO_REG 0x00014240
-#define HOST_SEM1_INFO_REG 0x00014244
-#define HOST_SEM2_INFO_REG 0x00014248
-#define HOST_SEM3_INFO_REG 0x0001424c
-#define ETH_MAC_SER_REG 0x00014288
-#define __APP_EMS_CKBUFAMPIN 0x00000020
-#define __APP_EMS_REFCLKSEL 0x00000010
-#define __APP_EMS_CMLCKSEL 0x00000008
-#define __APP_EMS_REFCKBUFEN2 0x00000004
-#define __APP_EMS_REFCKBUFEN1 0x00000002
-#define __APP_EMS_CHANNEL_SEL 0x00000001
-#define HOSTFN2_INT_STATUS 0x00014300
-#define __HOSTFN2_HALT_OCCURRED 0x01000000
-#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN2_INT_STATUS_LVL_SH 20
-#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
-#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN2_INT_STATUS_P_SH 16
-#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
-#define __HOSTFN2_INT_STATUS_F 0x0000ffff
-#define HOSTFN2_INT_MSK 0x00014304
-#define HOST_PAGE_NUM_FN2 0x00014308
-#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
-#define HOSTFN3_INT_STATUS 0x00014400
-#define __HALT_OCCURRED 0x01000000
-#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN3_INT_STATUS_LVL_SH 20
-#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
-#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN3_INT_STATUS_P_SH 16
-#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
-#define __HOSTFN3_INT_STATUS_F 0x0000ffff
-#define HOSTFN3_INT_MSK 0x00014404
-#define HOST_PAGE_NUM_FN3 0x00014408
-#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
-#define FNC_ID_REG 0x00014600
-#define __FUNCTION_NUMBER 0x00000007
-#define FNC_PERS_REG 0x00014604
-#define __F3_FUNCTION_ACTIVE 0x80000000
-#define __F3_FUNCTION_MODE 0x40000000
-#define __F3_PORT_MAP_MK 0x30000000
-#define __F3_PORT_MAP_SH 28
-#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
-#define __F3_VM_MODE 0x08000000
-#define __F3_INTX_STATUS_MK 0x07000000
-#define __F3_INTX_STATUS_SH 24
-#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
-#define __F2_FUNCTION_ACTIVE 0x00800000
-#define __F2_FUNCTION_MODE 0x00400000
-#define __F2_PORT_MAP_MK 0x00300000
-#define __F2_PORT_MAP_SH 20
-#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
-#define __F2_VM_MODE 0x00080000
-#define __F2_INTX_STATUS_MK 0x00070000
-#define __F2_INTX_STATUS_SH 16
-#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
-#define __F1_FUNCTION_ACTIVE 0x00008000
-#define __F1_FUNCTION_MODE 0x00004000
-#define __F1_PORT_MAP_MK 0x00003000
-#define __F1_PORT_MAP_SH 12
-#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
-#define __F1_VM_MODE 0x00000800
-#define __F1_INTX_STATUS_MK 0x00000700
-#define __F1_INTX_STATUS_SH 8
-#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
-#define __F0_FUNCTION_ACTIVE 0x00000080
-#define __F0_FUNCTION_MODE 0x00000040
-#define __F0_PORT_MAP_MK 0x00000030
-#define __F0_PORT_MAP_SH 4
-#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
-#define __F0_VM_MODE 0x00000008
-#define __F0_INTX_STATUS 0x00000007
-enum {
- __F0_INTX_STATUS_MSIX = 0x0,
- __F0_INTX_STATUS_INTA = 0x1,
- __F0_INTX_STATUS_INTB = 0x2,
- __F0_INTX_STATUS_INTC = 0x3,
- __F0_INTX_STATUS_INTD = 0x4,
-};
-#define OP_MODE 0x0001460c
-#define __APP_ETH_CLK_LOWSPEED 0x00000004
-#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
-#define __GLOBAL_FCOE_MODE 0x00000001
-#define HOST_SEM4_REG 0x00014610
-#define HOST_SEM5_REG 0x00014614
-#define HOST_SEM6_REG 0x00014618
-#define HOST_SEM7_REG 0x0001461c
-#define HOST_SEM4_INFO_REG 0x00014620
-#define HOST_SEM5_INFO_REG 0x00014624
-#define HOST_SEM6_INFO_REG 0x00014628
-#define HOST_SEM7_INFO_REG 0x0001462c
-#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
-#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
-#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
-#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
-#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
-#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
-#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
-#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
-#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
-#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
-#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
-#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
-#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
-#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
-#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
-#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
-#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
-#define FW_INIT_HALT_P0 0x000191ac
-#define __FW_INIT_HALT_P 0x00000001
-#define FW_INIT_HALT_P1 0x000191bc
-#define CPE_PI_PTR_Q0 0x00038000
-#define __CPE_PI_UNUSED_MK 0xffff0000
-#define __CPE_PI_UNUSED_SH 16
-#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
-#define __CPE_PI_PTR 0x0000ffff
-#define CPE_PI_PTR_Q1 0x00038040
-#define CPE_CI_PTR_Q0 0x00038004
-#define __CPE_CI_UNUSED_MK 0xffff0000
-#define __CPE_CI_UNUSED_SH 16
-#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
-#define __CPE_CI_PTR 0x0000ffff
-#define CPE_CI_PTR_Q1 0x00038044
-#define CPE_DEPTH_Q0 0x00038008
-#define __CPE_DEPTH_UNUSED_MK 0xf8000000
-#define __CPE_DEPTH_UNUSED_SH 27
-#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
-#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
-#define __CPE_MSIX_VEC_INDEX_SH 16
-#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
-#define __CPE_DEPTH 0x0000ffff
-#define CPE_DEPTH_Q1 0x00038048
-#define CPE_QCTRL_Q0 0x0003800c
-#define __CPE_CTRL_UNUSED30_MK 0xfc000000
-#define __CPE_CTRL_UNUSED30_SH 26
-#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
-#define __CPE_FUNC_INT_CTRL_MK 0x03000000
-#define __CPE_FUNC_INT_CTRL_SH 24
-#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
-enum {
- __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
- __CPE_FUNC_INT_CTRL_F2NF = 0x1,
- __CPE_FUNC_INT_CTRL_3QUART = 0x2,
- __CPE_FUNC_INT_CTRL_HALF = 0x3,
-};
-#define __CPE_CTRL_UNUSED20_MK 0x00f00000
-#define __CPE_CTRL_UNUSED20_SH 20
-#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
-#define __CPE_SCI_TH_MK 0x000f0000
-#define __CPE_SCI_TH_SH 16
-#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
-#define __CPE_CTRL_UNUSED10_MK 0x0000c000
-#define __CPE_CTRL_UNUSED10_SH 14
-#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
-#define __CPE_ACK_PENDING 0x00002000
-#define __CPE_CTRL_UNUSED40_MK 0x00001c00
-#define __CPE_CTRL_UNUSED40_SH 10
-#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
-#define __CPE_PCIEID_MK 0x00000300
-#define __CPE_PCIEID_SH 8
-#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
-#define __CPE_CTRL_UNUSED00_MK 0x000000fe
-#define __CPE_CTRL_UNUSED00_SH 1
-#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
-#define __CPE_ESIZE 0x00000001
-#define CPE_QCTRL_Q1 0x0003804c
-#define __CPE_CTRL_UNUSED31_MK 0xfc000000
-#define __CPE_CTRL_UNUSED31_SH 26
-#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
-#define __CPE_CTRL_UNUSED21_MK 0x00f00000
-#define __CPE_CTRL_UNUSED21_SH 20
-#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
-#define __CPE_CTRL_UNUSED11_MK 0x0000c000
-#define __CPE_CTRL_UNUSED11_SH 14
-#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
-#define __CPE_CTRL_UNUSED41_MK 0x00001c00
-#define __CPE_CTRL_UNUSED41_SH 10
-#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
-#define __CPE_CTRL_UNUSED01_MK 0x000000fe
-#define __CPE_CTRL_UNUSED01_SH 1
-#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
-#define RME_PI_PTR_Q0 0x00038020
-#define __LATENCY_TIME_STAMP_MK 0xffff0000
-#define __LATENCY_TIME_STAMP_SH 16
-#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
-#define __RME_PI_PTR 0x0000ffff
-#define RME_PI_PTR_Q1 0x00038060
-#define RME_CI_PTR_Q0 0x00038024
-#define __DELAY_TIME_STAMP_MK 0xffff0000
-#define __DELAY_TIME_STAMP_SH 16
-#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
-#define __RME_CI_PTR 0x0000ffff
-#define RME_CI_PTR_Q1 0x00038064
-#define RME_DEPTH_Q0 0x00038028
-#define __RME_DEPTH_UNUSED_MK 0xf8000000
-#define __RME_DEPTH_UNUSED_SH 27
-#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
-#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
-#define __RME_MSIX_VEC_INDEX_SH 16
-#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
-#define __RME_DEPTH 0x0000ffff
-#define RME_DEPTH_Q1 0x00038068
-#define RME_QCTRL_Q0 0x0003802c
-#define __RME_INT_LATENCY_TIMER_MK 0xff000000
-#define __RME_INT_LATENCY_TIMER_SH 24
-#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
-#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
-#define __RME_INT_DELAY_TIMER_SH 16
-#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
-#define __RME_INT_DELAY_DISABLE 0x00008000
-#define __RME_DLY_DELAY_DISABLE 0x00004000
-#define __RME_ACK_PENDING 0x00002000
-#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
-#define __RME_CTRL_UNUSED10_MK 0x00000c00
-#define __RME_CTRL_UNUSED10_SH 10
-#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
-#define __RME_PCIEID_MK 0x00000300
-#define __RME_PCIEID_SH 8
-#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
-#define __RME_CTRL_UNUSED00_MK 0x000000fe
-#define __RME_CTRL_UNUSED00_SH 1
-#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
-#define __RME_ESIZE 0x00000001
-#define RME_QCTRL_Q1 0x0003806c
-#define __RME_CTRL_UNUSED11_MK 0x00000c00
-#define __RME_CTRL_UNUSED11_SH 10
-#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
-#define __RME_CTRL_UNUSED01_MK 0x000000fe
-#define __RME_CTRL_UNUSED01_SH 1
-#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
-#define PSS_CTL_REG 0x00018800
-#define __PSS_I2C_CLK_DIV_MK 0x007f0000
-#define __PSS_I2C_CLK_DIV_SH 16
-#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE 0x00001000
-#define __PSS_LMEM_RESET 0x00000200
-#define __PSS_LMEM_INIT_EN 0x00000100
-#define __PSS_LPU1_RESET 0x00000002
-#define __PSS_LPU0_RESET 0x00000001
-#define PSS_ERR_STATUS_REG 0x00018810
-#define __PSS_LPU1_TCM_READ_ERR 0x00200000
-#define __PSS_LPU0_TCM_READ_ERR 0x00100000
-#define __PSS_LMEM5_CORR_ERR 0x00080000
-#define __PSS_LMEM4_CORR_ERR 0x00040000
-#define __PSS_LMEM3_CORR_ERR 0x00020000
-#define __PSS_LMEM2_CORR_ERR 0x00010000
-#define __PSS_LMEM1_CORR_ERR 0x00008000
-#define __PSS_LMEM0_CORR_ERR 0x00004000
-#define __PSS_LMEM5_UNCORR_ERR 0x00002000
-#define __PSS_LMEM4_UNCORR_ERR 0x00001000
-#define __PSS_LMEM3_UNCORR_ERR 0x00000800
-#define __PSS_LMEM2_UNCORR_ERR 0x00000400
-#define __PSS_LMEM1_UNCORR_ERR 0x00000200
-#define __PSS_LMEM0_UNCORR_ERR 0x00000100
-#define __PSS_BAL_PERR 0x00000080
-#define __PSS_DIP_IF_ERR 0x00000040
-#define __PSS_IOH_IF_ERR 0x00000020
-#define __PSS_TDS_IF_ERR 0x00000010
-#define __PSS_RDS_IF_ERR 0x00000008
-#define __PSS_SGM_IF_ERR 0x00000004
-#define __PSS_LPU1_RAM_ERR 0x00000002
-#define __PSS_LPU0_RAM_ERR 0x00000001
-#define ERR_SET_REG 0x00018818
-#define __PSS_ERR_STATUS_SET 0x003fffff
-#define PMM_1T_RESET_REG_P0 0x0002381c
-#define __PMM_1T_RESET_P 0x00000001
-#define PMM_1T_RESET_REG_P1 0x00023c1c
-#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
-#define __RXQ0_ADD_VECTORS_P 0x80000000
-#define __RXQ0_STOP_P 0x40000000
-#define __RXQ0_PRD_PTR_P 0x0000ffff
-#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
-#define __RXQ1_ADD_VECTORS_P 0x80000000
-#define __RXQ1_STOP_P 0x40000000
-#define __RXQ1_PRD_PTR_P 0x0000ffff
-#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
-#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
-#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
-#define __TXQ0_ADD_VECTORS_P 0x80000000
-#define __TXQ0_STOP_P 0x40000000
-#define __TXQ0_PRD_PTR_P 0x0000ffff
-#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
-#define __TXQ1_ADD_VECTORS_P 0x80000000
-#define __TXQ1_STOP_P 0x40000000
-#define __TXQ1_PRD_PTR_P 0x0000ffff
-#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
-#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
-#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
-#define __IB1_0_ACK_P 0x80000000
-#define __IB1_0_DISABLE_P 0x40000000
-#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB1_0_COALESCING_CFG_P_SH 16
-#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
-#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
-#define __IB1_1_ACK_P 0x80000000
-#define __IB1_1_DISABLE_P 0x40000000
-#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB1_1_COALESCING_CFG_P_SH 16
-#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
-#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
-#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
-#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
-#define __IB2_0_ACK_P 0x80000000
-#define __IB2_0_DISABLE_P 0x40000000
-#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB2_0_COALESCING_CFG_P_SH 16
-#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
-#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
-#define __IB2_1_ACK_P 0x80000000
-#define __IB2_1_DISABLE_P 0x40000000
-#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB2_1_COALESCING_CFG_P_SH 16
-#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
-#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
-#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
-#define __EMPHPRE_AT_4G_FIX 0x00000003
-#define __SFP_TXRATE_EN_FIX 0x00000100
-#define __SFP_RXRATE_EN_FIX 0x00000080
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
-
-#define CPE_DEPTH_Q(__n) \
- (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
-#define CPE_QCTRL_Q(__n) \
- (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
-#define CPE_PI_PTR_Q(__n) \
- (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
-#define CPE_CI_PTR_Q(__n) \
- (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
-#define RME_DEPTH_Q(__n) \
- (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
-#define RME_QCTRL_Q(__n) \
- (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
-#define RME_PI_PTR_Q(__n) \
- (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
-#define RME_CI_PTR_Q(__n) \
- (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) \
- (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
- (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) \
- (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
- (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) \
- (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
- (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) \
- (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
- (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) \
- (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
- (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) \
- (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
- (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) \
- (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
- (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) \
- (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
- (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
-
-#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q) ((__q) & 0x3)
-#define RME_Q_MASK(__q) ((__q) & 0x3)
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
- BFA_MSIX_CPE_Q0 = 0,
- BFA_MSIX_CPE_Q1 = 1,
- BFA_MSIX_CPE_Q2 = 2,
- BFA_MSIX_CPE_Q3 = 3,
- BFA_MSIX_RME_Q0 = 4,
- BFA_MSIX_RME_Q1 = 5,
- BFA_MSIX_RME_Q2 = 6,
- BFA_MSIX_RME_Q3 = 7,
- BFA_MSIX_LPU_ERR = 8,
- BFA_MSIX_CT_MAX = 9,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0 0x00000001U
-#define __HFN_INT_CPE_Q1 0x00000002U
-#define __HFN_INT_CPE_Q2 0x00000004U
-#define __HFN_INT_CPE_Q3 0x00000008U
-#define __HFN_INT_CPE_Q4 0x00000010U
-#define __HFN_INT_CPE_Q5 0x00000020U
-#define __HFN_INT_CPE_Q6 0x00000040U
-#define __HFN_INT_CPE_Q7 0x00000080U
-#define __HFN_INT_RME_Q0 0x00000100U
-#define __HFN_INT_RME_Q1 0x00000200U
-#define __HFN_INT_RME_Q2 0x00000400U
-#define __HFN_INT_RME_Q3 0x00000800U
-#define __HFN_INT_RME_Q4 0x00001000U
-#define __HFN_INT_RME_Q5 0x00002000U
-#define __HFN_INT_RME_Q6 0x00004000U
-#define __HFN_INT_RME_Q7 0x00008000U
-#define __HFN_INT_ERR_EMC 0x00010000U
-#define __HFN_INT_ERR_LPU0 0x00020000U
-#define __HFN_INT_ERR_LPU1 0x00040000U
-#define __HFN_INT_ERR_PSS 0x00080000U
-#define __HFN_INT_MBOX_LPU0 0x00100000U
-#define __HFN_INT_MBOX_LPU1 0x00200000U
-#define __HFN_INT_MBOX1_LPU0 0x00400000U
-#define __HFN_INT_MBOX1_LPU1 0x00800000U
-#define __HFN_INT_LL_HALT 0x01000000U
-#define __HFN_INT_CPE_MASK 0x000000ffU
-#define __HFN_INT_RME_MASK 0x0000ff00U
-
-/*
- * catapult memory map.
- */
-#define LL_PGN_HQM0 0x0096
-#define LL_PGN_HQM1 0x0097
-#define PSS_SMEM_PAGE_START 0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
-
-/*
- * End of catapult memory map
- */
-
-#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/net/bna/bfi_ll.h b/drivers/net/bna/bfi_ll.h
deleted file mode 100644
index bee4d054066..00000000000
--- a/drivers/net/bna/bfi_ll.h
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#ifndef __BFI_LL_H__
-#define __BFI_LL_H__
-
-#include "bfi.h"
-
-#pragma pack(1)
-
-/**
- * @brief
- * "enums" for all LL mailbox messages other than IOC
- */
-enum {
- BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
- BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
- BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
-
- BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
- BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
- BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
- BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
-
- BFI_LL_H2I_PORT_ADMIN_REQ = 8,
- BFI_LL_H2I_STATS_GET_REQ = 9,
- BFI_LL_H2I_STATS_CLEAR_REQ = 10,
-
- BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
- BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
-
- BFI_LL_H2I_TXQ_STOP_REQ = 13,
- BFI_LL_H2I_RXQ_STOP_REQ = 14,
-
- BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
-
- BFI_LL_H2I_SET_PAUSE_REQ = 16,
- BFI_LL_H2I_MTU_INFO_REQ = 17,
-
- BFI_LL_H2I_RX_REQ = 18,
-} ;
-
-enum {
- BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
- BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
- BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
-
- BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
- BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
- BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
- BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
-
- BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
- BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
- BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
-
- BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
- BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
-
- BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
- BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
-
- BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
-
- BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
-
- BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
- BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
-
- BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
- BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
-
- BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
- BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
-} ;
-
-/**
- * @brief bfi_ll_mac_addr_req is used by:
- * BFI_LL_H2I_MAC_UCAST_SET_REQ
- * BFI_LL_H2I_MAC_UCAST_ADD_REQ
- * BFI_LL_H2I_MAC_UCAST_DEL_REQ
- * BFI_LL_H2I_MAC_MCAST_ADD_REQ
- * BFI_LL_H2I_MAC_MCAST_DEL_REQ
- */
-struct bfi_ll_mac_addr_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 rxf_id;
- u8 rsvd1[3];
- mac_t mac_addr;
- u8 rsvd2[2];
-};
-
-/**
- * @brief bfi_ll_mcast_filter_req is used by:
- * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
- */
-struct bfi_ll_mcast_filter_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 rxf_id;
- u8 enable;
- u8 rsvd[2];
-};
-
-/**
- * @brief bfi_ll_mcast_del_all is used by:
- * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
- */
-struct bfi_ll_mcast_del_all_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 rxf_id;
- u8 rsvd[3];
-};
-
-/**
- * @brief bfi_ll_q_stop_req is used by:
- * BFI_LL_H2I_TXQ_STOP_REQ
- * BFI_LL_H2I_RXQ_STOP_REQ
- */
-struct bfi_ll_q_stop_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u32 q_id_mask[2]; /* !< bit-mask for queue ids */
-};
-
-/**
- * @brief bfi_ll_stats_req is used by:
- * BFI_LL_I2H_STATS_GET_REQ
- * BFI_LL_I2H_STATS_CLEAR_REQ
- */
-struct bfi_ll_stats_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u16 stats_mask; /* !< bit-mask for non-function statistics */
- u8 rsvd[2];
- u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
- u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
- union bfi_addr_u host_buffer; /* !< where statistics are returned */
-};
-
-/**
- * @brief defines for "stats_mask" above.
- */
-#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
-#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
-#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
-#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
-#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
-
-#define BFI_LL_STATS_ALL 0x1f
-
-/**
- * @brief bfi_ll_port_admin_req
- */
-struct bfi_ll_port_admin_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 up;
- u8 rsvd[3];
-};
-
-/**
- * @brief bfi_ll_rxf_req is used by:
- * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
- * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
- */
-struct bfi_ll_rxf_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 rxf_id;
- u8 enable;
- u8 rsvd[2];
-};
-
-/**
- * @brief bfi_ll_rxf_multi_req is used by:
- * BFI_LL_H2I_RX_REQ
- */
-struct bfi_ll_rxf_multi_req {
- struct bfi_mhdr mh; /*!< common msg header */
- u32 rxf_id_mask[2];
- u8 enable;
- u8 rsvd[3];
-};
-
-/**
- * @brief enum for Loopback opmodes
- */
-enum {
- BFI_LL_DIAG_LB_OPMODE_EXT = 0,
- BFI_LL_DIAG_LB_OPMODE_CBL = 1,
-};
-
-/**
- * @brief bfi_ll_set_pause_req is used by:
- * BFI_LL_H2I_SET_PAUSE_REQ
- */
-struct bfi_ll_set_pause_req {
- struct bfi_mhdr mh;
- u8 tx_pause; /* 1 = enable, 0 = disable */
- u8 rx_pause; /* 1 = enable, 0 = disable */
- u8 rsvd[2];
-};
-
-/**
- * @brief bfi_ll_mtu_info_req is used by:
- * BFI_LL_H2I_MTU_INFO_REQ
- */
-struct bfi_ll_mtu_info_req {
- struct bfi_mhdr mh;
- u16 mtu;
- u8 rsvd[2];
-};
-
-/**
- * @brief
- * Response header format used by all responses
- * For both responses and asynchronous notifications
- */
-struct bfi_ll_rsp {
- struct bfi_mhdr mh; /*!< common msg header */
- u8 error;
- u8 rsvd[3];
-};
-
-/**
- * @brief bfi_ll_cee_aen is used by:
- * BFI_LL_I2H_LINK_DOWN_AEN
- * BFI_LL_I2H_LINK_UP_AEN
- */
-struct bfi_ll_aen {
- struct bfi_mhdr mh; /*!< common msg header */
- u32 reason;
- u8 cee_linkup;
- u8 prio_map; /*!< LL priority bit-map */
- u8 rsvd[2];
-};
-
-/**
- * @brief
- * The following error codes can be returned
- * by the mbox commands
- */
-enum {
- BFI_LL_CMD_OK = 0,
- BFI_LL_CMD_FAIL = 1,
- BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
- BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
- BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
- BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
- BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
- BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
-} ;
-
-/* Statistics */
-#define BFI_LL_TXF_ID_MAX 64
-#define BFI_LL_RXF_ID_MAX 64
-
-/* TxF Frame Statistics */
-struct bfi_ll_stats_txf {
- u64 ucast_octets;
- u64 ucast;
- u64 ucast_vlan;
-
- u64 mcast_octets;
- u64 mcast;
- u64 mcast_vlan;
-
- u64 bcast_octets;
- u64 bcast;
- u64 bcast_vlan;
-
- u64 errors;
- u64 filter_vlan; /* frames filtered due to VLAN */
- u64 filter_mac_sa; /* frames filtered due to SA check */
-};
-
-/* RxF Frame Statistics */
-struct bfi_ll_stats_rxf {
- u64 ucast_octets;
- u64 ucast;
- u64 ucast_vlan;
-
- u64 mcast_octets;
- u64 mcast;
- u64 mcast_vlan;
-
- u64 bcast_octets;
- u64 bcast;
- u64 bcast_vlan;
- u64 frame_drops;
-};
-
-/* FC Tx Frame Statistics */
-struct bfi_ll_stats_fc_tx {
- u64 txf_ucast_octets;
- u64 txf_ucast;
- u64 txf_ucast_vlan;
-
- u64 txf_mcast_octets;
- u64 txf_mcast;
- u64 txf_mcast_vlan;
-
- u64 txf_bcast_octets;
- u64 txf_bcast;
- u64 txf_bcast_vlan;
-
- u64 txf_parity_errors;
- u64 txf_timeout;
- u64 txf_fid_parity_errors;
-};
-
-/* FC Rx Frame Statistics */
-struct bfi_ll_stats_fc_rx {
- u64 rxf_ucast_octets;
- u64 rxf_ucast;
- u64 rxf_ucast_vlan;
-
- u64 rxf_mcast_octets;
- u64 rxf_mcast;
- u64 rxf_mcast_vlan;
-
- u64 rxf_bcast_octets;
- u64 rxf_bcast;
- u64 rxf_bcast_vlan;
-};
-
-/* RAD Frame Statistics */
-struct bfi_ll_stats_rad {
- u64 rx_frames;
- u64 rx_octets;
- u64 rx_vlan_frames;
-
- u64 rx_ucast;
- u64 rx_ucast_octets;
- u64 rx_ucast_vlan;
-
- u64 rx_mcast;
- u64 rx_mcast_octets;
- u64 rx_mcast_vlan;
-
- u64 rx_bcast;
- u64 rx_bcast_octets;
- u64 rx_bcast_vlan;
-
- u64 rx_drops;
-};
-
-/* BPC Tx Registers */
-struct bfi_ll_stats_bpc {
- /* transmit stats */
- u64 tx_pause[8];
- u64 tx_zero_pause[8]; /*!< Pause cancellation */
- /*!<Pause initiation rather than retention */
- u64 tx_first_pause[8];
-
- /* receive stats */
- u64 rx_pause[8];
- u64 rx_zero_pause[8]; /*!< Pause cancellation */
- /*!<Pause initiation rather than retention */
- u64 rx_first_pause[8];
-};
-
-/* MAC Rx Statistics */
-struct bfi_ll_stats_mac {
- u64 frame_64; /* both rx and tx counter */
- u64 frame_65_127; /* both rx and tx counter */
- u64 frame_128_255; /* both rx and tx counter */
- u64 frame_256_511; /* both rx and tx counter */
- u64 frame_512_1023; /* both rx and tx counter */
- u64 frame_1024_1518; /* both rx and tx counter */
- u64 frame_1519_1522; /* both rx and tx counter */
-
- /* receive stats */
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_fcs_error;
- u64 rx_multicast;
- u64 rx_broadcast;
- u64 rx_control_frames;
- u64 rx_pause;
- u64 rx_unknown_opcode;
- u64 rx_alignment_error;
- u64 rx_frame_length_error;
- u64 rx_code_error;
- u64 rx_carrier_sense_error;
- u64 rx_undersize;
- u64 rx_oversize;
- u64 rx_fragments;
- u64 rx_jabber;
- u64 rx_drop;
-
- /* transmit stats */
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_multicast;
- u64 tx_broadcast;
- u64 tx_pause;
- u64 tx_deferral;
- u64 tx_excessive_deferral;
- u64 tx_single_collision;
- u64 tx_muliple_collision;
- u64 tx_late_collision;
- u64 tx_excessive_collision;
- u64 tx_total_collision;
- u64 tx_pause_honored;
- u64 tx_drop;
- u64 tx_jabber;
- u64 tx_fcs_error;
- u64 tx_control_frame;
- u64 tx_oversize;
- u64 tx_undersize;
- u64 tx_fragments;
-};
-
-/* Complete statistics */
-struct bfi_ll_stats {
- struct bfi_ll_stats_mac mac_stats;
- struct bfi_ll_stats_bpc bpc_stats;
- struct bfi_ll_stats_rad rad_stats;
- struct bfi_ll_stats_fc_rx fc_rx_stats;
- struct bfi_ll_stats_fc_tx fc_tx_stats;
- struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
- struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
-};
-
-#pragma pack()
-
-#endif /* __BFI_LL_H__ */
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
deleted file mode 100644
index 21e9155d6e5..00000000000
--- a/drivers/net/bna/bna.h
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#ifndef __BNA_H__
-#define __BNA_H__
-
-#include "bfa_cs.h"
-#include "bfa_ioc.h"
-#include "cna.h"
-#include "bfi_ll.h"
-#include "bna_types.h"
-
-extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
-
-/**
- *
- * Macros and constants
- *
- */
-
-#define BNA_IOC_TIMER_FREQ 200
-
-/* Log string size */
-#define BNA_MESSAGE_SIZE 256
-
-/* MBOX API for PORT, TX, RX */
-#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
-do { \
- memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
- (_qe)->cbfn = (_cbfn); \
- (_qe)->cbarg = (_cbarg); \
-} while (0)
-
-#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
-
-#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
- (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
-
-#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
-
-#define BNA_TO_POWER_OF_2(x) \
-do { \
- int _shift = 0; \
- while ((x) && (x) != 1) { \
- (x) >>= 1; \
- _shift++; \
- } \
- (x) <<= _shift; \
-} while (0)
-
-#define BNA_TO_POWER_OF_2_HIGH(x) \
-do { \
- int n = 1; \
- while (n < (x)) \
- n <<= 1; \
- (x) = n; \
-} while (0)
-
-/*
- * input : _addr-> os dma addr in host endian format,
- * output : _bna_dma_addr-> pointer to hw dma addr
- */
-#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
-do { \
- u64 tmp_addr = \
- cpu_to_be64((u64)(_addr)); \
- (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
- (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
-} while (0)
-
-/*
- * input : _bna_dma_addr-> pointer to hw dma addr
- * output : _addr-> os dma addr in host endian format
- */
-#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
-do { \
- (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
- | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
-} while (0)
-
-#define containing_rec(addr, type, field) \
- ((type *)((unsigned char *)(addr) - \
- (unsigned char *)(&((type *)0)->field)))
-
-#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
-
-/* TxQ element is 64 bytes */
-#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
-#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
-
-#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
- (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
-}
-
-/* RxQ element is 8 bytes */
-#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
-#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
-
-#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
-}
-
-/* CQ element is 16 bytes */
-#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
-#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
-
-#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- \
- page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
-}
-
-#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
- (&((_cast *)(_q_base))[(_qe_idx)])
-
-#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
-
-#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
- ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
-
-#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
- (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
-
-#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
- (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
- ((_q_depth) - 1))
-
-#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
- ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
- (_q_depth - 1))
-
-#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
-
-#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
-
-#define BNA_Q_PI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.producer_index = \
- (((_q_ptr)->q.producer_index + (_num)) & \
- ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_CI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.consumer_index = \
- (((_q_ptr)->q.consumer_index + (_num)) \
- & ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_FREE_COUNT(_q_ptr) \
- (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-
-#define BNA_Q_IN_USE_COUNT(_q_ptr) \
- (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
-
-/* These macros build the data portion of the TxQ/RxQ doorbell */
-#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
-#define BNA_DOORBELL_Q_STOP (0x40000000)
-
-/* These macros build the data portion of the IB doorbell */
-#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
- (0x80000000 | ((_timeout) << 16) | (_events))
-#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
-
-/* Set the coalescing timer for the given ib */
-#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
- ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
-
-/* Acks 'events' # of events for a given ib */
-#define bna_ib_ack(_i_dbell, _events) \
- (writel(((_i_dbell)->doorbell_ack | (_events)), \
- (_i_dbell)->doorbell_addr));
-
-#define bna_txq_prod_indx_doorbell(_tcb) \
- (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
- (_tcb)->q_dbell));
-
-#define bna_rxq_prod_indx_doorbell(_rcb) \
- (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
- (_rcb)->q_dbell));
-
-#define BNA_LARGE_PKT_SIZE 1000
-
-#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
-do { \
- if ((_len) > BNA_LARGE_PKT_SIZE) { \
- (_pkt)->large_pkt_cnt++; \
- } else { \
- (_pkt)->small_pkt_cnt++; \
- } \
-} while (0)
-
-#define call_rxf_stop_cbfn(rxf, status) \
- if ((rxf)->stop_cbfn) { \
- (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
- (rxf)->stop_cbfn = NULL; \
- (rxf)->stop_cbarg = NULL; \
- }
-
-#define call_rxf_start_cbfn(rxf, status) \
- if ((rxf)->start_cbfn) { \
- (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
- (rxf)->start_cbfn = NULL; \
- (rxf)->start_cbarg = NULL; \
- }
-
-#define call_rxf_cam_fltr_cbfn(rxf, status) \
- if ((rxf)->cam_fltr_cbfn) { \
- (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
- (status)); \
- (rxf)->cam_fltr_cbfn = NULL; \
- (rxf)->cam_fltr_cbarg = NULL; \
- }
-
-#define call_rxf_pause_cbfn(rxf, status) \
- if ((rxf)->oper_state_cbfn) { \
- (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
- (status)); \
- (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
- (rxf)->oper_state_cbfn = NULL; \
- (rxf)->oper_state_cbarg = NULL; \
- }
-
-#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
-
-#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
-
-#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
-
-#define xxx_enable(mode, bitmask, xxx) \
-do { \
- bitmask |= xxx; \
- mode |= xxx; \
-} while (0)
-
-#define xxx_disable(mode, bitmask, xxx) \
-do { \
- bitmask |= xxx; \
- mode &= ~xxx; \
-} while (0)
-
-#define xxx_inactive(mode, bitmask, xxx) \
-do { \
- bitmask &= ~xxx; \
- mode &= ~xxx; \
-} while (0)
-
-#define is_promisc_enable(mode, bitmask) \
- is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
-
-#define is_promisc_disable(mode, bitmask) \
- is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
-
-#define promisc_enable(mode, bitmask) \
- xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
-
-#define promisc_disable(mode, bitmask) \
- xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
-
-#define promisc_inactive(mode, bitmask) \
- xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
-
-#define is_default_enable(mode, bitmask) \
- is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
-
-#define is_default_disable(mode, bitmask) \
- is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
-
-#define default_enable(mode, bitmask) \
- xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
-
-#define default_disable(mode, bitmask) \
- xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
-
-#define default_inactive(mode, bitmask) \
- xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
-
-#define is_allmulti_enable(mode, bitmask) \
- is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
-
-#define is_allmulti_disable(mode, bitmask) \
- is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
-
-#define allmulti_enable(mode, bitmask) \
- xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
-
-#define allmulti_disable(mode, bitmask) \
- xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
-
-#define allmulti_inactive(mode, bitmask) \
- xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
-
-#define GET_RXQS(rxp, q0, q1) do { \
- switch ((rxp)->type) { \
- case BNA_RXP_SINGLE: \
- (q0) = rxp->rxq.single.only; \
- (q1) = NULL; \
- break; \
- case BNA_RXP_SLR: \
- (q0) = rxp->rxq.slr.large; \
- (q1) = rxp->rxq.slr.small; \
- break; \
- case BNA_RXP_HDS: \
- (q0) = rxp->rxq.hds.data; \
- (q1) = rxp->rxq.hds.hdr; \
- break; \
- } \
-} while (0)
-
-/**
- *
- * Function prototypes
- *
- */
-
-/**
- * BNA
- */
-
-/* APIs for BNAD */
-void bna_res_req(struct bna_res_info *res_info);
-void bna_init(struct bna *bna, struct bnad *bnad,
- struct bfa_pcidev *pcidev,
- struct bna_res_info *res_info);
-void bna_uninit(struct bna *bna);
-void bna_stats_get(struct bna *bna);
-void bna_get_perm_mac(struct bna *bna, u8 *mac);
-
-/* APIs for Rx */
-int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
-
-/* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
- struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mac *mac);
-struct bna_rit_segment *
-bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
-void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
- struct bna_rit_segment *seg);
-
-/**
- * DEVICE
- */
-
-/* APIs for BNAD */
-void bna_device_enable(struct bna_device *device);
-void bna_device_disable(struct bna_device *device,
- enum bna_cleanup_type type);
-
-/**
- * MBOX
- */
-
-/* APIs for PORT, TX, RX */
-void bna_mbox_handler(struct bna *bna, u32 intr_status);
-void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
-
-/**
- * PORT
- */
-
-/* API for RX */
-int bna_port_mtu_get(struct bna_port *port);
-void bna_llport_rx_started(struct bna_llport *llport);
-void bna_llport_rx_stopped(struct bna_llport *llport);
-
-/* API for BNAD */
-void bna_port_enable(struct bna_port *port);
-void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
- void (*cbfn)(void *, enum bna_cb_status));
-void bna_port_pause_config(struct bna_port *port,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *, enum bna_cb_status));
-void bna_port_mtu_set(struct bna_port *port, int mtu,
- void (*cbfn)(struct bnad *, enum bna_cb_status));
-void bna_port_mac_get(struct bna_port *port, mac_t *mac);
-
-/* Callbacks for TX, RX */
-void bna_port_cb_tx_stopped(struct bna_port *port,
- enum bna_cb_status status);
-void bna_port_cb_rx_stopped(struct bna_port *port,
- enum bna_cb_status status);
-
-/**
- * IB
- */
-
-/* APIs for BNA */
-void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
- struct bna_res_info *res_info);
-void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
-
-/**
- * TX MODULE AND TX
- */
-
-/* APIs for BNA */
-void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
- struct bna_res_info *res_info);
-void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
-int bna_tx_state_get(struct bna_tx *tx);
-
-/* APIs for PORT */
-void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
-void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
-void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
-void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
-void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
-
-/* APIs for BNAD */
-void bna_tx_res_req(int num_txq, int txq_depth,
- struct bna_res_info *res_info);
-struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
- struct bna_tx_config *tx_cfg,
- struct bna_tx_event_cbfn *tx_cbfn,
- struct bna_res_info *res_info, void *priv);
-void bna_tx_destroy(struct bna_tx *tx);
-void bna_tx_enable(struct bna_tx *tx);
-void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
- void (*cbfn)(void *, struct bna_tx *,
- enum bna_cb_status));
-void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
-
-/**
- * RX MODULE, RX, RXF
- */
-
-/* Internal APIs */
-void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
-void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
- const struct bna_mac *mac_addr);
-void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
-void bna_rxf_adv_init(struct bna_rxf *rxf,
- struct bna_rx *rx,
- struct bna_rx_config *q_config);
-int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
-int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
-int rxf_process_packet_filter_default(struct bna_rxf *rxf);
-int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
-int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
-int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
-int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
-int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
-void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
-void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
-void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
-void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
-
-/* APIs for BNA */
-void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
- struct bna_res_info *res_info);
-void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
-int bna_rx_state_get(struct bna_rx *rx);
-int bna_rxf_state_get(struct bna_rxf *rxf);
-
-/* APIs for PORT */
-void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
-void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
-void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
-
-/* APIs for BNAD */
-void bna_rx_res_req(struct bna_rx_config *rx_config,
- struct bna_res_info *res_info);
-struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
- struct bna_rx_config *rx_cfg,
- struct bna_rx_event_cbfn *rx_cbfn,
- struct bna_res_info *res_info, void *priv);
-void bna_rx_destroy(struct bna_rx *rx);
-void bna_rx_enable(struct bna_rx *rx);
-void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
- void (*cbfn)(void *, struct bna_rx *,
- enum bna_cb_status));
-void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
-void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
-void bna_rx_dim_update(struct bna_ccb *ccb);
-enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status));
-enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status));
-enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status));
-enum bna_cb_status
-bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status));
-void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
-void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
-void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status));
-void bna_rx_hds_disable(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status));
-
-/**
- * BNAD
- */
-
-/* Callbacks for BNA */
-void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
- struct bna_stats *stats);
-
-/* Callbacks for DEVICE */
-void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
-void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
-void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
-void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
-
-/* Callbacks for port */
-void bnad_cb_port_link_status(struct bnad *bnad,
- enum bna_link_status status);
-
-#endif /* __BNA_H__ */
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
deleted file mode 100644
index cb2594c564d..00000000000
--- a/drivers/net/bna/bna_ctrl.c
+++ /dev/null
@@ -1,3076 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#include "bna.h"
-#include "bfa_cs.h"
-
-static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
-
-static void
-bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
- int status)
-{
- int i;
- u8 prio_map;
-
- port->llport.link_status = BNA_LINK_UP;
- if (aen->cee_linkup)
- port->llport.link_status = BNA_CEE_UP;
-
- /* Compute the priority */
- prio_map = aen->prio_map;
- if (prio_map) {
- for (i = 0; i < 8; i++) {
- if ((prio_map >> i) & 0x1)
- break;
- }
- port->priority = i;
- } else
- port->priority = 0;
-
- /* Dispatch events */
- bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
- bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
- port->link_cbfn(port->bna->bnad, port->llport.link_status);
-}
-
-static void
-bna_port_cb_link_down(struct bna_port *port, int status)
-{
- port->llport.link_status = BNA_LINK_DOWN;
-
- /* Dispatch events */
- bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
- port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
-}
-
-static inline int
-llport_can_be_up(struct bna_llport *llport)
-{
- int ready = 0;
- if (llport->type == BNA_PORT_T_REGULAR)
- ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
- (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
- (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
- else
- ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
- (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
- !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
- return ready;
-}
-
-#define llport_is_up llport_can_be_up
-
-enum bna_llport_event {
- LLPORT_E_START = 1,
- LLPORT_E_STOP = 2,
- LLPORT_E_FAIL = 3,
- LLPORT_E_UP = 4,
- LLPORT_E_DOWN = 5,
- LLPORT_E_FWRESP_UP_OK = 6,
- LLPORT_E_FWRESP_UP_FAIL = 7,
- LLPORT_E_FWRESP_DOWN = 8
-};
-
-static void
-bna_llport_cb_port_enabled(struct bna_llport *llport)
-{
- llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
-
- if (llport_can_be_up(llport))
- bfa_fsm_send_event(llport, LLPORT_E_UP);
-}
-
-static void
-bna_llport_cb_port_disabled(struct bna_llport *llport)
-{
- int llport_up = llport_is_up(llport);
-
- llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
-
- if (llport_up)
- bfa_fsm_send_event(llport, LLPORT_E_DOWN);
-}
-
-/**
- * MBOX
- */
-static int
-bna_is_aen(u8 msg_id)
-{
- switch (msg_id) {
- case BFI_LL_I2H_LINK_DOWN_AEN:
- case BFI_LL_I2H_LINK_UP_AEN:
- case BFI_LL_I2H_PORT_ENABLE_AEN:
- case BFI_LL_I2H_PORT_DISABLE_AEN:
- return 1;
-
- default:
- return 0;
- }
-}
-
-static void
-bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
-{
- struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
-
- switch (aen->mh.msg_id) {
- case BFI_LL_I2H_LINK_UP_AEN:
- bna_port_cb_link_up(&bna->port, aen, aen->reason);
- break;
- case BFI_LL_I2H_LINK_DOWN_AEN:
- bna_port_cb_link_down(&bna->port, aen->reason);
- break;
- case BFI_LL_I2H_PORT_ENABLE_AEN:
- bna_llport_cb_port_enabled(&bna->port.llport);
- break;
- case BFI_LL_I2H_PORT_DISABLE_AEN:
- bna_llport_cb_port_disabled(&bna->port.llport);
- break;
- default:
- break;
- }
-}
-
-static void
-bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
-{
- struct bna *bna = (struct bna *)(llarg);
- struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
- struct bfi_mhdr *cmd_h, *rsp_h;
- struct bna_mbox_qe *mb_qe = NULL;
- int to_post = 0;
- u8 aen = 0;
- char message[BNA_MESSAGE_SIZE];
-
- aen = bna_is_aen(mb_rsp->mh.msg_id);
-
- if (!aen) {
- mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
- cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
- rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
-
- if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
- (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
- /* Remove the request from posted_q, update state */
- list_del(&mb_qe->qe);
- bna->mbox_mod.msg_pending--;
- if (list_empty(&bna->mbox_mod.posted_q))
- bna->mbox_mod.state = BNA_MBOX_FREE;
- else
- to_post = 1;
-
- /* Dispatch the cbfn */
- if (mb_qe->cbfn)
- mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
-
- /* Post the next entry, if needed */
- if (to_post) {
- mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
- bfa_nw_ioc_mbox_queue(&bna->device.ioc,
- &mb_qe->cmd);
- }
- } else {
- snprintf(message, BNA_MESSAGE_SIZE,
- "No matching rsp for [%d:%d:%d]\n",
- mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
- mb_rsp->mh.mtag.i2htok);
- pr_info("%s", message);
- }
-
- } else
- bna_mbox_aen_callback(bna, msg);
-}
-
-static void
-bna_err_handler(struct bna *bna, u32 intr_status)
-{
- u32 init_halt;
-
- if (intr_status & __HALT_STATUS_BITS) {
- init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
- init_halt &= ~__FW_INIT_HALT_P;
- writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
- }
-
- bfa_nw_ioc_error_isr(&bna->device.ioc);
-}
-
-void
-bna_mbox_handler(struct bna *bna, u32 intr_status)
-{
- if (BNA_IS_ERR_INTR(intr_status)) {
- bna_err_handler(bna, intr_status);
- return;
- }
- if (BNA_IS_MBOX_INTR(intr_status))
- bfa_nw_ioc_mbox_isr(&bna->device.ioc);
-}
-
-void
-bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
-{
- struct bfi_mhdr *mh;
-
- mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
-
- mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
- bna->mbox_mod.msg_ctr++;
- bna->mbox_mod.msg_pending++;
- if (bna->mbox_mod.state == BNA_MBOX_FREE) {
- list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
- bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
- bna->mbox_mod.state = BNA_MBOX_POSTED;
- } else {
- list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
- }
-}
-
-static void
-bna_mbox_flush_q(struct bna *bna, struct list_head *q)
-{
- struct bna_mbox_qe *mb_qe = NULL;
- struct list_head *mb_q;
- void (*cbfn)(void *arg, int status);
- void *cbarg;
-
- mb_q = &bna->mbox_mod.posted_q;
-
- while (!list_empty(mb_q)) {
- bfa_q_deq(mb_q, &mb_qe);
- cbfn = mb_qe->cbfn;
- cbarg = mb_qe->cbarg;
- bfa_q_qe_init(mb_qe);
- bna->mbox_mod.msg_pending--;
-
- if (cbfn)
- cbfn(cbarg, BNA_CB_NOT_EXEC);
- }
-
- bna->mbox_mod.state = BNA_MBOX_FREE;
-}
-
-static void
-bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
-{
-}
-
-static void
-bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
-{
- bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
-}
-
-static void
-bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
-{
- bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
- mbox_mod->state = BNA_MBOX_FREE;
- mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
- INIT_LIST_HEAD(&mbox_mod->posted_q);
- mbox_mod->bna = bna;
-}
-
-static void
-bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
-{
- mbox_mod->bna = NULL;
-}
-
-/**
- * LLPORT
- */
-#define call_llport_stop_cbfn(llport, status)\
-do {\
- if ((llport)->stop_cbfn)\
- (llport)->stop_cbfn(&(llport)->bna->port, status);\
- (llport)->stop_cbfn = NULL;\
-} while (0)
-
-static void bna_fw_llport_up(struct bna_llport *llport);
-static void bna_fw_cb_llport_up(void *arg, int status);
-static void bna_fw_llport_down(struct bna_llport *llport);
-static void bna_fw_cb_llport_down(void *arg, int status);
-static void bna_llport_start(struct bna_llport *llport);
-static void bna_llport_stop(struct bna_llport *llport);
-static void bna_llport_fail(struct bna_llport *llport);
-
-enum bna_llport_state {
- BNA_LLPORT_STOPPED = 1,
- BNA_LLPORT_DOWN = 2,
- BNA_LLPORT_UP_RESP_WAIT = 3,
- BNA_LLPORT_DOWN_RESP_WAIT = 4,
- BNA_LLPORT_UP = 5,
- BNA_LLPORT_LAST_RESP_WAIT = 6
-};
-
-bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
- enum bna_llport_event);
-bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
- enum bna_llport_event);
-bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
- enum bna_llport_event);
-bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
- enum bna_llport_event);
-bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
- enum bna_llport_event);
-bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
- enum bna_llport_event);
-
-static struct bfa_sm_table llport_sm_table[] = {
- {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
- {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
- {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
- {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
- {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
- {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
-};
-
-static void
-bna_llport_sm_stopped_entry(struct bna_llport *llport)
-{
- llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
- call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
-}
-
-static void
-bna_llport_sm_stopped(struct bna_llport *llport,
- enum bna_llport_event event)
-{
- switch (event) {
- case LLPORT_E_START:
- bfa_fsm_set_state(llport, bna_llport_sm_down);
- break;
-
- case LLPORT_E_STOP:
- call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
- break;
-
- case LLPORT_E_FAIL:
- break;
-
- case LLPORT_E_DOWN:
- /* This event is received due to Rx objects failing */
- /* No-op */
- break;
-
- case LLPORT_E_FWRESP_UP_OK:
- case LLPORT_E_FWRESP_DOWN:
- /**
- * These events are received due to flushing of mbox when
- * device fails
- */
- /* No-op */
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_llport_sm_down_entry(struct bna_llport *llport)
-{
- bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
-}
-
-static void
-bna_llport_sm_down(struct bna_llport *llport,
- enum bna_llport_event event)
-{
- switch (event) {
- case LLPORT_E_STOP:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- case LLPORT_E_FAIL:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- case LLPORT_E_UP:
- bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
- bna_fw_llport_up(llport);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
-{
- BUG_ON(!llport_can_be_up(llport));
- /**
- * NOTE: Do not call bna_fw_llport_up() here. That will over step
- * mbox due to down_resp_wait -> up_resp_wait transition on event
- * LLPORT_E_UP
- */
-}
-
-static void
-bna_llport_sm_up_resp_wait(struct bna_llport *llport,
- enum bna_llport_event event)
-{
- switch (event) {
- case LLPORT_E_STOP:
- bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
- break;
-
- case LLPORT_E_FAIL:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- case LLPORT_E_DOWN:
- bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
- break;
-
- case LLPORT_E_FWRESP_UP_OK:
- bfa_fsm_set_state(llport, bna_llport_sm_up);
- break;
-
- case LLPORT_E_FWRESP_UP_FAIL:
- bfa_fsm_set_state(llport, bna_llport_sm_down);
- break;
-
- case LLPORT_E_FWRESP_DOWN:
- /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
- bna_fw_llport_up(llport);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
-{
- /**
- * NOTE: Do not call bna_fw_llport_down() here. That will over step
- * mbox due to up_resp_wait -> down_resp_wait transition on event
- * LLPORT_E_DOWN
- */
-}
-
-static void
-bna_llport_sm_down_resp_wait(struct bna_llport *llport,
- enum bna_llport_event event)
-{
- switch (event) {
- case LLPORT_E_STOP:
- bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
- break;
-
- case LLPORT_E_FAIL:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- case LLPORT_E_UP:
- bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
- break;
-
- case LLPORT_E_FWRESP_UP_OK:
- /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
- bna_fw_llport_down(llport);
- break;
-
- case LLPORT_E_FWRESP_UP_FAIL:
- case LLPORT_E_FWRESP_DOWN:
- bfa_fsm_set_state(llport, bna_llport_sm_down);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_llport_sm_up_entry(struct bna_llport *llport)
-{
-}
-
-static void
-bna_llport_sm_up(struct bna_llport *llport,
- enum bna_llport_event event)
-{
- switch (event) {
- case LLPORT_E_STOP:
- bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
- bna_fw_llport_down(llport);
- break;
-
- case LLPORT_E_FAIL:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- case LLPORT_E_DOWN:
- bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
- bna_fw_llport_down(llport);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
-{
-}
-
-static void
-bna_llport_sm_last_resp_wait(struct bna_llport *llport,
- enum bna_llport_event event)
-{
- switch (event) {
- case LLPORT_E_FAIL:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- case LLPORT_E_DOWN:
- /**
- * This event is received due to Rx objects stopping in
- * parallel to llport
- */
- /* No-op */
- break;
-
- case LLPORT_E_FWRESP_UP_OK:
- /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
- bna_fw_llport_down(llport);
- break;
-
- case LLPORT_E_FWRESP_UP_FAIL:
- case LLPORT_E_FWRESP_DOWN:
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_fw_llport_admin_up(struct bna_llport *llport)
-{
- struct bfi_ll_port_admin_req ll_req;
-
- memset(&ll_req, 0, sizeof(ll_req));
- ll_req.mh.msg_class = BFI_MC_LL;
- ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
- ll_req.mh.mtag.h2i.lpu_id = 0;
-
- ll_req.up = BNA_STATUS_T_ENABLED;
-
- bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
- bna_fw_cb_llport_up, llport);
-
- bna_mbox_send(llport->bna, &llport->mbox_qe);
-}
-
-static void
-bna_fw_llport_up(struct bna_llport *llport)
-{
- if (llport->type == BNA_PORT_T_REGULAR)
- bna_fw_llport_admin_up(llport);
-}
-
-static void
-bna_fw_cb_llport_up(void *arg, int status)
-{
- struct bna_llport *llport = (struct bna_llport *)arg;
-
- bfa_q_qe_init(&llport->mbox_qe.qe);
- if (status == BFI_LL_CMD_FAIL) {
- if (llport->type == BNA_PORT_T_REGULAR)
- llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
- else
- llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
- bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
- } else
- bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
-}
-
-static void
-bna_fw_llport_admin_down(struct bna_llport *llport)
-{
- struct bfi_ll_port_admin_req ll_req;
-
- memset(&ll_req, 0, sizeof(ll_req));
- ll_req.mh.msg_class = BFI_MC_LL;
- ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
- ll_req.mh.mtag.h2i.lpu_id = 0;
-
- ll_req.up = BNA_STATUS_T_DISABLED;
-
- bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
- bna_fw_cb_llport_down, llport);
-
- bna_mbox_send(llport->bna, &llport->mbox_qe);
-}
-
-static void
-bna_fw_llport_down(struct bna_llport *llport)
-{
- if (llport->type == BNA_PORT_T_REGULAR)
- bna_fw_llport_admin_down(llport);
-}
-
-static void
-bna_fw_cb_llport_down(void *arg, int status)
-{
- struct bna_llport *llport = (struct bna_llport *)arg;
-
- bfa_q_qe_init(&llport->mbox_qe.qe);
- bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
-}
-
-static void
-bna_port_cb_llport_stopped(struct bna_port *port,
- enum bna_cb_status status)
-{
- bfa_wc_down(&port->chld_stop_wc);
-}
-
-static void
-bna_llport_init(struct bna_llport *llport, struct bna *bna)
-{
- llport->flags |= BNA_LLPORT_F_ADMIN_UP;
- llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
- llport->type = BNA_PORT_T_REGULAR;
- llport->bna = bna;
-
- llport->link_status = BNA_LINK_DOWN;
-
- llport->rx_started_count = 0;
-
- llport->stop_cbfn = NULL;
-
- bfa_q_qe_init(&llport->mbox_qe.qe);
-
- bfa_fsm_set_state(llport, bna_llport_sm_stopped);
-}
-
-static void
-bna_llport_uninit(struct bna_llport *llport)
-{
- llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
- llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
-
- llport->bna = NULL;
-}
-
-static void
-bna_llport_start(struct bna_llport *llport)
-{
- bfa_fsm_send_event(llport, LLPORT_E_START);
-}
-
-static void
-bna_llport_stop(struct bna_llport *llport)
-{
- llport->stop_cbfn = bna_port_cb_llport_stopped;
-
- bfa_fsm_send_event(llport, LLPORT_E_STOP);
-}
-
-static void
-bna_llport_fail(struct bna_llport *llport)
-{
- /* Reset the physical port status to enabled */
- llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
- bfa_fsm_send_event(llport, LLPORT_E_FAIL);
-}
-
-static int
-bna_llport_state_get(struct bna_llport *llport)
-{
- return bfa_sm_to_state(llport_sm_table, llport->fsm);
-}
-
-void
-bna_llport_rx_started(struct bna_llport *llport)
-{
- llport->rx_started_count++;
-
- if (llport->rx_started_count == 1) {
-
- llport->flags |= BNA_LLPORT_F_RX_STARTED;
-
- if (llport_can_be_up(llport))
- bfa_fsm_send_event(llport, LLPORT_E_UP);
- }
-}
-
-void
-bna_llport_rx_stopped(struct bna_llport *llport)
-{
- int llport_up = llport_is_up(llport);
-
- llport->rx_started_count--;
-
- if (llport->rx_started_count == 0) {
-
- llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
-
- if (llport_up)
- bfa_fsm_send_event(llport, LLPORT_E_DOWN);
- }
-}
-
-/**
- * PORT
- */
-#define bna_port_chld_start(port)\
-do {\
- enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
- BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
- enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
- bna_llport_start(&(port)->llport);\
- bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
- bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
-} while (0)
-
-#define bna_port_chld_stop(port)\
-do {\
- enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
- BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
- enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
- bfa_wc_up(&(port)->chld_stop_wc);\
- bfa_wc_up(&(port)->chld_stop_wc);\
- bfa_wc_up(&(port)->chld_stop_wc);\
- bna_llport_stop(&(port)->llport);\
- bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
- bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
-} while (0)
-
-#define bna_port_chld_fail(port)\
-do {\
- bna_llport_fail(&(port)->llport);\
- bna_tx_mod_fail(&(port)->bna->tx_mod);\
- bna_rx_mod_fail(&(port)->bna->rx_mod);\
-} while (0)
-
-#define bna_port_rx_start(port)\
-do {\
- enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
- bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
-} while (0)
-
-#define bna_port_rx_stop(port)\
-do {\
- enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
- bfa_wc_up(&(port)->chld_stop_wc);\
- bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
-} while (0)
-
-#define call_port_stop_cbfn(port, status)\
-do {\
- if ((port)->stop_cbfn)\
- (port)->stop_cbfn((port)->stop_cbarg, status);\
- (port)->stop_cbfn = NULL;\
- (port)->stop_cbarg = NULL;\
-} while (0)
-
-#define call_port_pause_cbfn(port, status)\
-do {\
- if ((port)->pause_cbfn)\
- (port)->pause_cbfn((port)->bna->bnad, status);\
- (port)->pause_cbfn = NULL;\
-} while (0)
-
-#define call_port_mtu_cbfn(port, status)\
-do {\
- if ((port)->mtu_cbfn)\
- (port)->mtu_cbfn((port)->bna->bnad, status);\
- (port)->mtu_cbfn = NULL;\
-} while (0)
-
-static void bna_fw_pause_set(struct bna_port *port);
-static void bna_fw_cb_pause_set(void *arg, int status);
-static void bna_fw_mtu_set(struct bna_port *port);
-static void bna_fw_cb_mtu_set(void *arg, int status);
-
-enum bna_port_event {
- PORT_E_START = 1,
- PORT_E_STOP = 2,
- PORT_E_FAIL = 3,
- PORT_E_PAUSE_CFG = 4,
- PORT_E_MTU_CFG = 5,
- PORT_E_CHLD_STOPPED = 6,
- PORT_E_FWRESP_PAUSE = 7,
- PORT_E_FWRESP_MTU = 8
-};
-
-enum bna_port_state {
- BNA_PORT_STOPPED = 1,
- BNA_PORT_MTU_INIT_WAIT = 2,
- BNA_PORT_PAUSE_INIT_WAIT = 3,
- BNA_PORT_LAST_RESP_WAIT = 4,
- BNA_PORT_STARTED = 5,
- BNA_PORT_PAUSE_CFG_WAIT = 6,
- BNA_PORT_RX_STOP_WAIT = 7,
- BNA_PORT_MTU_CFG_WAIT = 8,
- BNA_PORT_CHLD_STOP_WAIT = 9
-};
-
-bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, started, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
- enum bna_port_event);
-bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
- enum bna_port_event);
-
-static struct bfa_sm_table port_sm_table[] = {
- {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
- {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
- {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
- {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
- {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
- {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
- {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
- {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
- {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
-};
-
-static void
-bna_port_sm_stopped_entry(struct bna_port *port)
-{
- call_port_pause_cbfn(port, BNA_CB_SUCCESS);
- call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
- call_port_stop_cbfn(port, BNA_CB_SUCCESS);
-}
-
-static void
-bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_START:
- bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
- break;
-
- case PORT_E_STOP:
- call_port_stop_cbfn(port, BNA_CB_SUCCESS);
- break;
-
- case PORT_E_FAIL:
- /* No-op */
- break;
-
- case PORT_E_PAUSE_CFG:
- call_port_pause_cbfn(port, BNA_CB_SUCCESS);
- break;
-
- case PORT_E_MTU_CFG:
- call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
- break;
-
- case PORT_E_CHLD_STOPPED:
- /**
- * This event is received due to LLPort, Tx and Rx objects
- * failing
- */
- /* No-op */
- break;
-
- case PORT_E_FWRESP_PAUSE:
- case PORT_E_FWRESP_MTU:
- /**
- * These events are received due to flushing of mbox when
- * device fails
- */
- /* No-op */
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
-{
- bna_fw_mtu_set(port);
-}
-
-static void
-bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_STOP:
- bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
- break;
-
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- break;
-
- case PORT_E_PAUSE_CFG:
- /* No-op */
- break;
-
- case PORT_E_MTU_CFG:
- port->flags |= BNA_PORT_F_MTU_CHANGED;
- break;
-
- case PORT_E_FWRESP_MTU:
- if (port->flags & BNA_PORT_F_MTU_CHANGED) {
- port->flags &= ~BNA_PORT_F_MTU_CHANGED;
- bna_fw_mtu_set(port);
- } else {
- bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
- }
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_pause_init_wait_entry(struct bna_port *port)
-{
- bna_fw_pause_set(port);
-}
-
-static void
-bna_port_sm_pause_init_wait(struct bna_port *port,
- enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_STOP:
- bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
- break;
-
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- break;
-
- case PORT_E_PAUSE_CFG:
- port->flags |= BNA_PORT_F_PAUSE_CHANGED;
- break;
-
- case PORT_E_MTU_CFG:
- port->flags |= BNA_PORT_F_MTU_CHANGED;
- break;
-
- case PORT_E_FWRESP_PAUSE:
- if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
- port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
- bna_fw_pause_set(port);
- } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
- port->flags &= ~BNA_PORT_F_MTU_CHANGED;
- bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
- } else {
- bfa_fsm_set_state(port, bna_port_sm_started);
- bna_port_chld_start(port);
- }
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_last_resp_wait_entry(struct bna_port *port)
-{
-}
-
-static void
-bna_port_sm_last_resp_wait(struct bna_port *port,
- enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_FAIL:
- case PORT_E_FWRESP_PAUSE:
- case PORT_E_FWRESP_MTU:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_started_entry(struct bna_port *port)
-{
- /**
- * NOTE: Do not call bna_port_chld_start() here, since it will be
- * inadvertently called during pause_cfg_wait->started transition
- * as well
- */
- call_port_pause_cbfn(port, BNA_CB_SUCCESS);
- call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
-}
-
-static void
-bna_port_sm_started(struct bna_port *port,
- enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_STOP:
- bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
- break;
-
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- bna_port_chld_fail(port);
- break;
-
- case PORT_E_PAUSE_CFG:
- bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
- break;
-
- case PORT_E_MTU_CFG:
- bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
-{
- bna_fw_pause_set(port);
-}
-
-static void
-bna_port_sm_pause_cfg_wait(struct bna_port *port,
- enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- bna_port_chld_fail(port);
- break;
-
- case PORT_E_FWRESP_PAUSE:
- bfa_fsm_set_state(port, bna_port_sm_started);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
-{
- bna_port_rx_stop(port);
-}
-
-static void
-bna_port_sm_rx_stop_wait(struct bna_port *port,
- enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- bna_port_chld_fail(port);
- break;
-
- case PORT_E_CHLD_STOPPED:
- bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
-{
- bna_fw_mtu_set(port);
-}
-
-static void
-bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- bna_port_chld_fail(port);
- break;
-
- case PORT_E_FWRESP_MTU:
- bfa_fsm_set_state(port, bna_port_sm_started);
- bna_port_rx_start(port);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
-{
- bna_port_chld_stop(port);
-}
-
-static void
-bna_port_sm_chld_stop_wait(struct bna_port *port,
- enum bna_port_event event)
-{
- switch (event) {
- case PORT_E_FAIL:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- bna_port_chld_fail(port);
- break;
-
- case PORT_E_CHLD_STOPPED:
- bfa_fsm_set_state(port, bna_port_sm_stopped);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_fw_pause_set(struct bna_port *port)
-{
- struct bfi_ll_set_pause_req ll_req;
-
- memset(&ll_req, 0, sizeof(ll_req));
- ll_req.mh.msg_class = BFI_MC_LL;
- ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
- ll_req.mh.mtag.h2i.lpu_id = 0;
-
- ll_req.tx_pause = port->pause_config.tx_pause;
- ll_req.rx_pause = port->pause_config.rx_pause;
-
- bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
- bna_fw_cb_pause_set, port);
-
- bna_mbox_send(port->bna, &port->mbox_qe);
-}
-
-static void
-bna_fw_cb_pause_set(void *arg, int status)
-{
- struct bna_port *port = (struct bna_port *)arg;
-
- bfa_q_qe_init(&port->mbox_qe.qe);
- bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
-}
-
-void
-bna_fw_mtu_set(struct bna_port *port)
-{
- struct bfi_ll_mtu_info_req ll_req;
-
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
- ll_req.mtu = htons((u16)port->mtu);
-
- bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
- bna_fw_cb_mtu_set, port);
- bna_mbox_send(port->bna, &port->mbox_qe);
-}
-
-void
-bna_fw_cb_mtu_set(void *arg, int status)
-{
- struct bna_port *port = (struct bna_port *)arg;
-
- bfa_q_qe_init(&port->mbox_qe.qe);
- bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
-}
-
-static void
-bna_port_cb_chld_stopped(void *arg)
-{
- struct bna_port *port = (struct bna_port *)arg;
-
- bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
-}
-
-static void
-bna_port_init(struct bna_port *port, struct bna *bna)
-{
- port->bna = bna;
- port->flags = 0;
- port->mtu = 0;
- port->type = BNA_PORT_T_REGULAR;
-
- port->link_cbfn = bnad_cb_port_link_status;
-
- port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
- port->chld_stop_wc.wc_cbarg = port;
- port->chld_stop_wc.wc_count = 0;
-
- port->stop_cbfn = NULL;
- port->stop_cbarg = NULL;
-
- port->pause_cbfn = NULL;
-
- port->mtu_cbfn = NULL;
-
- bfa_q_qe_init(&port->mbox_qe.qe);
-
- bfa_fsm_set_state(port, bna_port_sm_stopped);
-
- bna_llport_init(&port->llport, bna);
-}
-
-static void
-bna_port_uninit(struct bna_port *port)
-{
- bna_llport_uninit(&port->llport);
-
- port->flags = 0;
-
- port->bna = NULL;
-}
-
-static int
-bna_port_state_get(struct bna_port *port)
-{
- return bfa_sm_to_state(port_sm_table, port->fsm);
-}
-
-static void
-bna_port_start(struct bna_port *port)
-{
- port->flags |= BNA_PORT_F_DEVICE_READY;
- if (port->flags & BNA_PORT_F_ENABLED)
- bfa_fsm_send_event(port, PORT_E_START);
-}
-
-static void
-bna_port_stop(struct bna_port *port)
-{
- port->stop_cbfn = bna_device_cb_port_stopped;
- port->stop_cbarg = &port->bna->device;
-
- port->flags &= ~BNA_PORT_F_DEVICE_READY;
- bfa_fsm_send_event(port, PORT_E_STOP);
-}
-
-static void
-bna_port_fail(struct bna_port *port)
-{
- port->flags &= ~BNA_PORT_F_DEVICE_READY;
- bfa_fsm_send_event(port, PORT_E_FAIL);
-}
-
-void
-bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
-{
- bfa_wc_down(&port->chld_stop_wc);
-}
-
-void
-bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
-{
- bfa_wc_down(&port->chld_stop_wc);
-}
-
-int
-bna_port_mtu_get(struct bna_port *port)
-{
- return port->mtu;
-}
-
-void
-bna_port_enable(struct bna_port *port)
-{
- if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
- return;
-
- port->flags |= BNA_PORT_F_ENABLED;
-
- if (port->flags & BNA_PORT_F_DEVICE_READY)
- bfa_fsm_send_event(port, PORT_E_START);
-}
-
-void
-bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
- void (*cbfn)(void *, enum bna_cb_status))
-{
- if (type == BNA_SOFT_CLEANUP) {
- (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
- return;
- }
-
- port->stop_cbfn = cbfn;
- port->stop_cbarg = port->bna->bnad;
-
- port->flags &= ~BNA_PORT_F_ENABLED;
-
- bfa_fsm_send_event(port, PORT_E_STOP);
-}
-
-void
-bna_port_pause_config(struct bna_port *port,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *, enum bna_cb_status))
-{
- port->pause_config = *pause_config;
-
- port->pause_cbfn = cbfn;
-
- bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
-}
-
-void
-bna_port_mtu_set(struct bna_port *port, int mtu,
- void (*cbfn)(struct bnad *, enum bna_cb_status))
-{
- port->mtu = mtu;
-
- port->mtu_cbfn = cbfn;
-
- bfa_fsm_send_event(port, PORT_E_MTU_CFG);
-}
-
-void
-bna_port_mac_get(struct bna_port *port, mac_t *mac)
-{
- *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
-}
-
-/**
- * DEVICE
- */
-#define enable_mbox_intr(_device)\
-do {\
- u32 intr_status;\
- bna_intr_status_get((_device)->bna, intr_status);\
- bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
- bna_mbox_intr_enable((_device)->bna);\
-} while (0)
-
-#define disable_mbox_intr(_device)\
-do {\
- bna_mbox_intr_disable((_device)->bna);\
- bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
-} while (0)
-
-static const struct bna_chip_regs_offset reg_offset[] =
-{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
- HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
-{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
- HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
-{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
- HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
-{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
- HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
-};
-
-enum bna_device_event {
- DEVICE_E_ENABLE = 1,
- DEVICE_E_DISABLE = 2,
- DEVICE_E_IOC_READY = 3,
- DEVICE_E_IOC_FAILED = 4,
- DEVICE_E_IOC_DISABLED = 5,
- DEVICE_E_IOC_RESET = 6,
- DEVICE_E_PORT_STOPPED = 7,
-};
-
-enum bna_device_state {
- BNA_DEVICE_STOPPED = 1,
- BNA_DEVICE_IOC_READY_WAIT = 2,
- BNA_DEVICE_READY = 3,
- BNA_DEVICE_PORT_STOP_WAIT = 4,
- BNA_DEVICE_IOC_DISABLE_WAIT = 5,
- BNA_DEVICE_FAILED = 6
-};
-
-bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
- enum bna_device_event);
-bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
- enum bna_device_event);
-bfa_fsm_state_decl(bna_device, ready, struct bna_device,
- enum bna_device_event);
-bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
- enum bna_device_event);
-bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
- enum bna_device_event);
-bfa_fsm_state_decl(bna_device, failed, struct bna_device,
- enum bna_device_event);
-
-static struct bfa_sm_table device_sm_table[] = {
- {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
- {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
- {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
- {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
- {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
- {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
-};
-
-static void
-bna_device_sm_stopped_entry(struct bna_device *device)
-{
- if (device->stop_cbfn)
- device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
-
- device->stop_cbfn = NULL;
- device->stop_cbarg = NULL;
-}
-
-static void
-bna_device_sm_stopped(struct bna_device *device,
- enum bna_device_event event)
-{
- switch (event) {
- case DEVICE_E_ENABLE:
- if (device->intr_type == BNA_INTR_T_MSIX)
- bna_mbox_msix_idx_set(device);
- bfa_nw_ioc_enable(&device->ioc);
- bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
- break;
-
- case DEVICE_E_DISABLE:
- bfa_fsm_set_state(device, bna_device_sm_stopped);
- break;
-
- case DEVICE_E_IOC_RESET:
- enable_mbox_intr(device);
- break;
-
- case DEVICE_E_IOC_FAILED:
- bfa_fsm_set_state(device, bna_device_sm_failed);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
-{
- /**
- * Do not call bfa_ioc_enable() here. It must be called in the
- * previous state due to failed -> ioc_ready_wait transition.
- */
-}
-
-static void
-bna_device_sm_ioc_ready_wait(struct bna_device *device,
- enum bna_device_event event)
-{
- switch (event) {
- case DEVICE_E_DISABLE:
- if (device->ready_cbfn)
- device->ready_cbfn(device->ready_cbarg,
- BNA_CB_INTERRUPT);
- device->ready_cbfn = NULL;
- device->ready_cbarg = NULL;
- bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
- break;
-
- case DEVICE_E_IOC_READY:
- bfa_fsm_set_state(device, bna_device_sm_ready);
- break;
-
- case DEVICE_E_IOC_FAILED:
- bfa_fsm_set_state(device, bna_device_sm_failed);
- break;
-
- case DEVICE_E_IOC_RESET:
- enable_mbox_intr(device);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_device_sm_ready_entry(struct bna_device *device)
-{
- bna_mbox_mod_start(&device->bna->mbox_mod);
- bna_port_start(&device->bna->port);
-
- if (device->ready_cbfn)
- device->ready_cbfn(device->ready_cbarg,
- BNA_CB_SUCCESS);
- device->ready_cbfn = NULL;
- device->ready_cbarg = NULL;
-}
-
-static void
-bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
-{
- switch (event) {
- case DEVICE_E_DISABLE:
- bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
- break;
-
- case DEVICE_E_IOC_FAILED:
- bfa_fsm_set_state(device, bna_device_sm_failed);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_device_sm_port_stop_wait_entry(struct bna_device *device)
-{
- bna_port_stop(&device->bna->port);
-}
-
-static void
-bna_device_sm_port_stop_wait(struct bna_device *device,
- enum bna_device_event event)
-{
- switch (event) {
- case DEVICE_E_PORT_STOPPED:
- bna_mbox_mod_stop(&device->bna->mbox_mod);
- bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
- break;
-
- case DEVICE_E_IOC_FAILED:
- disable_mbox_intr(device);
- bna_port_fail(&device->bna->port);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
-{
- bfa_nw_ioc_disable(&device->ioc);
-}
-
-static void
-bna_device_sm_ioc_disable_wait(struct bna_device *device,
- enum bna_device_event event)
-{
- switch (event) {
- case DEVICE_E_IOC_DISABLED:
- disable_mbox_intr(device);
- bfa_fsm_set_state(device, bna_device_sm_stopped);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_device_sm_failed_entry(struct bna_device *device)
-{
- disable_mbox_intr(device);
- bna_port_fail(&device->bna->port);
- bna_mbox_mod_stop(&device->bna->mbox_mod);
-
- if (device->ready_cbfn)
- device->ready_cbfn(device->ready_cbarg,
- BNA_CB_FAIL);
- device->ready_cbfn = NULL;
- device->ready_cbarg = NULL;
-}
-
-static void
-bna_device_sm_failed(struct bna_device *device,
- enum bna_device_event event)
-{
- switch (event) {
- case DEVICE_E_DISABLE:
- bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
- break;
-
- case DEVICE_E_IOC_RESET:
- enable_mbox_intr(device);
- bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-/* IOC callback functions */
-
-static void
-bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
-{
- struct bna_device *device = (struct bna_device *)dev;
-
- if (error)
- bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
- else
- bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
-}
-
-static void
-bna_device_cb_iocll_disabled(void *dev)
-{
- struct bna_device *device = (struct bna_device *)dev;
-
- bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
-}
-
-static void
-bna_device_cb_iocll_failed(void *dev)
-{
- struct bna_device *device = (struct bna_device *)dev;
-
- bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
-}
-
-static void
-bna_device_cb_iocll_reset(void *dev)
-{
- struct bna_device *device = (struct bna_device *)dev;
-
- bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
-}
-
-static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
- bna_device_cb_iocll_ready,
- bna_device_cb_iocll_disabled,
- bna_device_cb_iocll_failed,
- bna_device_cb_iocll_reset
-};
-
-/* device */
-static void
-bna_adv_device_init(struct bna_device *device, struct bna *bna,
- struct bna_res_info *res_info)
-{
- u8 *kva;
- u64 dma;
-
- device->bna = bna;
-
- kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
-
- /**
- * Attach common modules (Diag, SFP, CEE, Port) and claim respective
- * DMA memory.
- */
- BNA_GET_DMA_ADDR(
- &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
- kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
-
- bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
- bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
- kva += bfa_nw_cee_meminfo();
- dma += bfa_nw_cee_meminfo();
-
-}
-
-static void
-bna_device_init(struct bna_device *device, struct bna *bna,
- struct bna_res_info *res_info)
-{
- u64 dma;
-
- device->bna = bna;
-
- /**
- * Attach IOC and claim:
- * 1. DMA memory for IOC attributes
- * 2. Kernel memory for FW trace
- */
- bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
- bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
-
- BNA_GET_DMA_ADDR(
- &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
- bfa_nw_ioc_mem_claim(&device->ioc,
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
- dma);
-
- bna_adv_device_init(device, bna, res_info);
- /*
- * Initialize mbox_mod only after IOC, so that mbox handler
- * registration goes through
- */
- device->intr_type =
- res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
- device->vector =
- res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
- bna_mbox_mod_init(&bna->mbox_mod, bna);
-
- device->ready_cbfn = device->stop_cbfn = NULL;
- device->ready_cbarg = device->stop_cbarg = NULL;
-
- bfa_fsm_set_state(device, bna_device_sm_stopped);
-}
-
-static void
-bna_device_uninit(struct bna_device *device)
-{
- bna_mbox_mod_uninit(&device->bna->mbox_mod);
-
- bfa_nw_ioc_detach(&device->ioc);
-
- device->bna = NULL;
-}
-
-static void
-bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
-{
- struct bna_device *device = (struct bna_device *)arg;
-
- bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
-}
-
-static int
-bna_device_status_get(struct bna_device *device)
-{
- return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
-}
-
-void
-bna_device_enable(struct bna_device *device)
-{
- if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
- bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
- return;
- }
-
- device->ready_cbfn = bnad_cb_device_enabled;
- device->ready_cbarg = device->bna->bnad;
-
- bfa_fsm_send_event(device, DEVICE_E_ENABLE);
-}
-
-void
-bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
-{
- if (type == BNA_SOFT_CLEANUP) {
- bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
- return;
- }
-
- device->stop_cbfn = bnad_cb_device_disabled;
- device->stop_cbarg = device->bna->bnad;
-
- bfa_fsm_send_event(device, DEVICE_E_DISABLE);
-}
-
-static int
-bna_device_state_get(struct bna_device *device)
-{
- return bfa_sm_to_state(device_sm_table, device->fsm);
-}
-
-const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
- {12, 12},
- {6, 10},
- {5, 10},
- {4, 8},
- {3, 6},
- {3, 6},
- {2, 4},
- {1, 2},
-};
-
-/* utils */
-
-static void
-bna_adv_res_req(struct bna_res_info *res_info)
-{
- /* DMA memory for COMMON_MODULE */
- res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
- bfa_nw_cee_meminfo(), PAGE_SIZE);
-
- /* Virtual memory for retreiving fw_trc */
- res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
-
- /* DMA memory for retreiving stats */
- res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
- ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
-
- /* Virtual memory for soft stats */
- res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
- sizeof(struct bna_sw_stats);
-}
-
-static void
-bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
-{
- struct bna_tx *tx;
- struct bna_txq *txq;
- struct bna_rx *rx;
- struct bna_rxp *rxp;
- struct list_head *qe;
- struct list_head *txq_qe;
- struct list_head *rxp_qe;
- struct list_head *mac_qe;
- int i;
-
- sw_stats->device_state = bna_device_state_get(&bna->device);
- sw_stats->port_state = bna_port_state_get(&bna->port);
- sw_stats->port_flags = bna->port.flags;
- sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
- sw_stats->priority = bna->port.priority;
-
- i = 0;
- list_for_each(qe, &bna->tx_mod.tx_active_q) {
- tx = (struct bna_tx *)qe;
- sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
- sw_stats->tx_stats[i].tx_flags = tx->flags;
-
- sw_stats->tx_stats[i].num_txqs = 0;
- sw_stats->tx_stats[i].txq_bmap[0] = 0;
- sw_stats->tx_stats[i].txq_bmap[1] = 0;
- list_for_each(txq_qe, &tx->txq_q) {
- txq = (struct bna_txq *)txq_qe;
- if (txq->txq_id < 32)
- sw_stats->tx_stats[i].txq_bmap[0] |=
- ((u32)1 << txq->txq_id);
- else
- sw_stats->tx_stats[i].txq_bmap[1] |=
- ((u32)
- 1 << (txq->txq_id - 32));
- sw_stats->tx_stats[i].num_txqs++;
- }
-
- sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
-
- i++;
- }
- sw_stats->num_active_tx = i;
-
- i = 0;
- list_for_each(qe, &bna->rx_mod.rx_active_q) {
- rx = (struct bna_rx *)qe;
- sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
- sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
-
- sw_stats->rx_stats[i].num_rxps = 0;
- sw_stats->rx_stats[i].num_rxqs = 0;
- sw_stats->rx_stats[i].rxq_bmap[0] = 0;
- sw_stats->rx_stats[i].rxq_bmap[1] = 0;
- sw_stats->rx_stats[i].cq_bmap[0] = 0;
- sw_stats->rx_stats[i].cq_bmap[1] = 0;
- list_for_each(rxp_qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)rxp_qe;
-
- sw_stats->rx_stats[i].num_rxqs += 1;
-
- if (rxp->type == BNA_RXP_SINGLE) {
- if (rxp->rxq.single.only->rxq_id < 32) {
- sw_stats->rx_stats[i].rxq_bmap[0] |=
- ((u32)1 <<
- rxp->rxq.single.only->rxq_id);
- } else {
- sw_stats->rx_stats[i].rxq_bmap[1] |=
- ((u32)1 <<
- (rxp->rxq.single.only->rxq_id - 32));
- }
- } else {
- if (rxp->rxq.slr.large->rxq_id < 32) {
- sw_stats->rx_stats[i].rxq_bmap[0] |=
- ((u32)1 <<
- rxp->rxq.slr.large->rxq_id);
- } else {
- sw_stats->rx_stats[i].rxq_bmap[1] |=
- ((u32)1 <<
- (rxp->rxq.slr.large->rxq_id - 32));
- }
-
- if (rxp->rxq.slr.small->rxq_id < 32) {
- sw_stats->rx_stats[i].rxq_bmap[0] |=
- ((u32)1 <<
- rxp->rxq.slr.small->rxq_id);
- } else {
- sw_stats->rx_stats[i].rxq_bmap[1] |=
- ((u32)1 <<
- (rxp->rxq.slr.small->rxq_id - 32));
- }
- sw_stats->rx_stats[i].num_rxqs += 1;
- }
-
- if (rxp->cq.cq_id < 32)
- sw_stats->rx_stats[i].cq_bmap[0] |=
- (1 << rxp->cq.cq_id);
- else
- sw_stats->rx_stats[i].cq_bmap[1] |=
- (1 << (rxp->cq.cq_id - 32));
-
- sw_stats->rx_stats[i].num_rxps++;
- }
-
- sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
- sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
- sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
-
- sw_stats->rx_stats[i].num_active_ucast = 0;
- if (rx->rxf.ucast_active_mac)
- sw_stats->rx_stats[i].num_active_ucast++;
- list_for_each(mac_qe, &rx->rxf.ucast_active_q)
- sw_stats->rx_stats[i].num_active_ucast++;
-
- sw_stats->rx_stats[i].num_active_mcast = 0;
- list_for_each(mac_qe, &rx->rxf.mcast_active_q)
- sw_stats->rx_stats[i].num_active_mcast++;
-
- sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
- sw_stats->rx_stats[i].vlan_filter_status =
- rx->rxf.vlan_filter_status;
- memcpy(sw_stats->rx_stats[i].vlan_filter_table,
- rx->rxf.vlan_filter_table,
- sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
-
- sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
- sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
-
- i++;
- }
- sw_stats->num_active_rx = i;
-}
-
-static void
-bna_fw_cb_stats_get(void *arg, int status)
-{
- struct bna *bna = (struct bna *)arg;
- u64 *p_stats;
- int i, count;
- int rxf_count, txf_count;
- u64 rxf_bmap, txf_bmap;
-
- bfa_q_qe_init(&bna->mbox_qe.qe);
-
- if (status == 0) {
- p_stats = (u64 *)bna->stats.hw_stats;
- count = sizeof(struct bfi_ll_stats) / sizeof(u64);
- for (i = 0; i < count; i++)
- p_stats[i] = cpu_to_be64(p_stats[i]);
-
- rxf_count = 0;
- rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
- ((u64)bna->stats.rxf_bmap[1] << 32);
- for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
- if (rxf_bmap & ((u64)1 << i))
- rxf_count++;
-
- txf_count = 0;
- txf_bmap = (u64)bna->stats.txf_bmap[0] |
- ((u64)bna->stats.txf_bmap[1] << 32);
- for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
- if (txf_bmap & ((u64)1 << i))
- txf_count++;
-
- p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
- ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
- txf_count * sizeof(struct bfi_ll_stats_txf))/
- sizeof(u64));
-
- /* Populate the TXF stats from the firmware DMAed copy */
- for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
- if (txf_bmap & ((u64)1 << i)) {
- p_stats -= sizeof(struct bfi_ll_stats_txf)/
- sizeof(u64);
- memcpy(&bna->stats.hw_stats->txf_stats[i],
- p_stats,
- sizeof(struct bfi_ll_stats_txf));
- }
-
- /* Populate the RXF stats from the firmware DMAed copy */
- for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
- if (rxf_bmap & ((u64)1 << i)) {
- p_stats -= sizeof(struct bfi_ll_stats_rxf)/
- sizeof(u64);
- memcpy(&bna->stats.hw_stats->rxf_stats[i],
- p_stats,
- sizeof(struct bfi_ll_stats_rxf));
- }
-
- bna_sw_stats_get(bna, bna->stats.sw_stats);
- bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
- } else
- bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
-}
-
-static void
-bna_fw_stats_get(struct bna *bna)
-{
- struct bfi_ll_stats_req ll_req;
-
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
- ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
-
- ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
- ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
- ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
- ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
-
- ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
- ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
-
- bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
- bna_fw_cb_stats_get, bna);
- bna_mbox_send(bna, &bna->mbox_qe);
-
- bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
- bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
- bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
- bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
-}
-
-void
-bna_stats_get(struct bna *bna)
-{
- if (bna_device_status_get(&bna->device))
- bna_fw_stats_get(bna);
- else
- bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
-}
-
-/* IB */
-static void
-bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
-{
- ib->ib_config.coalescing_timeo = coalescing_timeo;
-
- if (ib->start_count)
- ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
- (u32)ib->ib_config.coalescing_timeo, 0);
-}
-
-/* RxF */
-void
-bna_rxf_adv_init(struct bna_rxf *rxf,
- struct bna_rx *rx,
- struct bna_rx_config *q_config)
-{
- switch (q_config->rxp_type) {
- case BNA_RXP_SINGLE:
- /* No-op */
- break;
- case BNA_RXP_SLR:
- rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
- break;
- case BNA_RXP_HDS:
- rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
- rxf->hds_cfg.header_size =
- q_config->hds_config.header_size;
- rxf->forced_offset = 0;
- break;
- default:
- break;
- }
-
- if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
- rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
- rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
- rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
- memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
- &q_config->rss_config.toeplitz_hash_key[0],
- sizeof(rxf->rss_cfg.toeplitz_hash_key));
- }
-}
-
-static void
-rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
-{
- struct bfi_ll_rxf_req req;
-
- bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
-
- req.rxf_id = rxf->rxf_id;
- req.enable = status;
-
- bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
- rxf_cb_cam_fltr_mbox_cmd, rxf);
-
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
-}
-
-int
-rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
-{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
-
- /* Add additional MAC entries */
- if (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
- list_add_tail(&mac->qe, &rxf->ucast_active_q);
- return 1;
- }
-
- /* Delete MAC addresses previousely added */
- if (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
- return 1;
- }
-
- return 0;
-}
-
-int
-rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
-
- /* Enable/disable promiscuous mode */
- if (is_promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move promisc configuration from pending -> active */
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active |= BNA_RXMODE_PROMISC;
-
- /* Disable VLAN filter to allow all VLANs */
- __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
- BNA_STATUS_T_ENABLED);
- return 1;
- } else if (is_promisc_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move promisc configuration from pending -> active */
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
- bna->rxf_promisc_id = BFI_MAX_RXF;
-
- /* Revert VLAN filter */
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- return 0;
-}
-
-int
-rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
-{
- /* Enable/disable allmulti mode */
- if (is_allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move allmulti configuration from pending -> active */
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
-
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
- BNA_STATUS_T_ENABLED);
- return 1;
- } else if (is_allmulti_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move allmulti configuration from pending -> active */
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
-
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- return 0;
-}
-
-int
-rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
-{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
-
- /* 1. delete pending ucast entries */
- if (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
- return 1;
- }
-
- /* 2. clear active ucast entries; move them to pending_add_q */
- if (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
- list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
- return 1;
- }
-
- return 0;
-}
-
-int
-rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
-
- /* 6. Execute pending promisc mode disable command */
- if (is_promisc_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move promisc configuration from pending -> active */
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
- bna->rxf_promisc_id = BFI_MAX_RXF;
-
- /* Revert VLAN filter */
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- /* 7. Clear active promisc mode; move it to pending enable */
- if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
- /* move promisc configuration from active -> pending */
- promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
-
- /* Revert VLAN filter */
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- return 0;
-}
-
-int
-rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
-{
- /* 10. Execute pending allmulti mode disable command */
- if (is_allmulti_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move allmulti configuration from pending -> active */
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- /* 11. Clear active allmulti mode; move it to pending enable */
- if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
- /* move allmulti configuration from active -> pending */
- allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- return 0;
-}
-
-void
-rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
-{
- struct list_head *qe;
- struct bna_mac *mac;
-
- /* 1. Move active ucast entries to pending_add_q */
- while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->ucast_pending_add_q);
- }
-
- /* 2. Throw away delete pending ucast entries */
- while (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
- }
-}
-
-void
-rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
-
- /* 6. Clear pending promisc mode disable */
- if (is_promisc_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
- bna->rxf_promisc_id = BFI_MAX_RXF;
- }
-
- /* 7. Move promisc mode config from active -> pending */
- if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
- promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
- }
-
-}
-
-void
-rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
-{
- /* 10. Clear pending allmulti mode disable */
- if (is_allmulti_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
- }
-
- /* 11. Move allmulti mode config from active -> pending */
- if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
- allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
- }
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- * Returns:
- * 0 = no h/w change
- * 1 = need h/w change
- */
-static int
-rxf_promisc_enable(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
- int ret = 0;
-
- /* There can not be any pending disable command */
-
- /* Do nothing if pending enable or already enabled */
- if (is_promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask) ||
- (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
- /* Schedule enable */
- } else {
- /* Promisc mode should not be active in the system */
- promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- bna->rxf_promisc_id = rxf->rxf_id;
- ret = 1;
- }
-
- return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- * Returns:
- * 0 = no h/w change
- * 1 = need h/w change
- */
-static int
-rxf_promisc_disable(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
- int ret = 0;
-
- /* There can not be any pending disable */
-
- /* Turn off pending enable command , if any */
- if (is_promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* Promisc mode should not be active */
- /* system promisc state should be pending */
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- /* Remove the promisc state from the system */
- bna->rxf_promisc_id = BFI_MAX_RXF;
-
- /* Schedule disable */
- } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
- /* Promisc mode should be active in the system */
- promisc_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- ret = 1;
-
- /* Do nothing if already disabled */
- } else {
- }
-
- return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- * Returns:
- * 0 = no h/w change
- * 1 = need h/w change
- */
-static int
-rxf_allmulti_enable(struct bna_rxf *rxf)
-{
- int ret = 0;
-
- /* There can not be any pending disable command */
-
- /* Do nothing if pending enable or already enabled */
- if (is_allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask) ||
- (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
- /* Schedule enable */
- } else {
- allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- ret = 1;
- }
-
- return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- * Returns:
- * 0 = no h/w change
- * 1 = need h/w change
- */
-static int
-rxf_allmulti_disable(struct bna_rxf *rxf)
-{
- int ret = 0;
-
- /* There can not be any pending disable */
-
- /* Turn off pending enable command , if any */
- if (is_allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* Allmulti mode should not be active */
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
-
- /* Schedule disable */
- } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
- allmulti_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- ret = 1;
- }
-
- return ret;
-}
-
-/* RxF <- bnad */
-enum bna_cb_status
-bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
-{
- struct bna_rxf *rxf = &rx->rxf;
- int need_hw_config = 0;
-
- /* Process the commands */
-
- if (is_promisc_enable(new_mode, bitmask)) {
- /* If promisc mode is already enabled elsewhere in the system */
- if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
- (rx->bna->rxf_promisc_id != rxf->rxf_id))
- goto err_return;
- if (rxf_promisc_enable(rxf))
- need_hw_config = 1;
- } else if (is_promisc_disable(new_mode, bitmask)) {
- if (rxf_promisc_disable(rxf))
- need_hw_config = 1;
- }
-
- if (is_allmulti_enable(new_mode, bitmask)) {
- if (rxf_allmulti_enable(rxf))
- need_hw_config = 1;
- } else if (is_allmulti_disable(new_mode, bitmask)) {
- if (rxf_allmulti_disable(rxf))
- need_hw_config = 1;
- }
-
- /* Trigger h/w if needed */
-
- if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- } else if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-
- return BNA_CB_SUCCESS;
-
-err_return:
- return BNA_CB_FAIL;
-}
-
-void
-/* RxF <- bnad */
-bna_rx_vlanfilter_enable(struct bna_rx *rx)
-{
- struct bna_rxf *rxf = &rx->rxf;
-
- if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
- rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- }
-}
-
-/* Rx */
-
-/* Rx <- bnad */
-void
-bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
-{
- struct bna_rxp *rxp;
- struct list_head *qe;
-
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
- rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
- bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
- }
-}
-
-/* Rx <- bnad */
-void
-bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
-{
- int i, j;
-
- for (i = 0; i < BNA_LOAD_T_MAX; i++)
- for (j = 0; j < BNA_BIAS_T_MAX; j++)
- bna->rx_mod.dim_vector[i][j] = vector[i][j];
-}
-
-/* Rx <- bnad */
-void
-bna_rx_dim_update(struct bna_ccb *ccb)
-{
- struct bna *bna = ccb->cq->rx->bna;
- u32 load, bias;
- u32 pkt_rt, small_rt, large_rt;
- u8 coalescing_timeo;
-
- if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
- (ccb->pkt_rate.large_pkt_cnt == 0))
- return;
-
- /* Arrive at preconfigured coalescing timeo value based on pkt rate */
-
- small_rt = ccb->pkt_rate.small_pkt_cnt;
- large_rt = ccb->pkt_rate.large_pkt_cnt;
-
- pkt_rt = small_rt + large_rt;
-
- if (pkt_rt < BNA_PKT_RATE_10K)
- load = BNA_LOAD_T_LOW_4;
- else if (pkt_rt < BNA_PKT_RATE_20K)
- load = BNA_LOAD_T_LOW_3;
- else if (pkt_rt < BNA_PKT_RATE_30K)
- load = BNA_LOAD_T_LOW_2;
- else if (pkt_rt < BNA_PKT_RATE_40K)
- load = BNA_LOAD_T_LOW_1;
- else if (pkt_rt < BNA_PKT_RATE_50K)
- load = BNA_LOAD_T_HIGH_1;
- else if (pkt_rt < BNA_PKT_RATE_60K)
- load = BNA_LOAD_T_HIGH_2;
- else if (pkt_rt < BNA_PKT_RATE_80K)
- load = BNA_LOAD_T_HIGH_3;
- else
- load = BNA_LOAD_T_HIGH_4;
-
- if (small_rt > (large_rt << 1))
- bias = 0;
- else
- bias = 1;
-
- ccb->pkt_rate.small_pkt_cnt = 0;
- ccb->pkt_rate.large_pkt_cnt = 0;
-
- coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
- ccb->rx_coalescing_timeo = coalescing_timeo;
-
- /* Set it to IB */
- bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
-}
-
-/* Tx */
-/* TX <- bnad */
-void
-bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
- }
-}
-
-/*
- * Private data
- */
-
-struct bna_ritseg_pool_cfg {
- u32 pool_size;
- u32 pool_entry_size;
-};
-init_ritseg_pool(ritseg_pool_cfg);
-
-/*
- * Private functions
- */
-static void
-bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
- struct bna_res_info *res_info)
-{
- int i;
-
- ucam_mod->ucmac = (struct bna_mac *)
- res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
-
- INIT_LIST_HEAD(&ucam_mod->free_q);
- for (i = 0; i < BFI_MAX_UCMAC; i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
- list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
- }
-
- ucam_mod->bna = bna;
-}
-
-static void
-bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
- int i = 0;
-
- list_for_each(qe, &ucam_mod->free_q)
- i++;
-
- ucam_mod->bna = NULL;
-}
-
-static void
-bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
- struct bna_res_info *res_info)
-{
- int i;
-
- mcam_mod->mcmac = (struct bna_mac *)
- res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
-
- INIT_LIST_HEAD(&mcam_mod->free_q);
- for (i = 0; i < BFI_MAX_MCMAC; i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
- list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
- }
-
- mcam_mod->bna = bna;
-}
-
-static void
-bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
-{
- struct list_head *qe;
- int i = 0;
-
- list_for_each(qe, &mcam_mod->free_q)
- i++;
-
- mcam_mod->bna = NULL;
-}
-
-static void
-bna_rit_mod_init(struct bna_rit_mod *rit_mod,
- struct bna_res_info *res_info)
-{
- int i;
- int j;
- int count;
- int offset;
-
- rit_mod->rit = (struct bna_rit_entry *)
- res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
- rit_mod->rit_segment = (struct bna_rit_segment *)
- res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
-
- count = 0;
- offset = 0;
- for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
- INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
- for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
- bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
- rit_mod->rit_segment[count].max_rit_size =
- ritseg_pool_cfg[i].pool_entry_size;
- rit_mod->rit_segment[count].rit_offset = offset;
- rit_mod->rit_segment[count].rit =
- &rit_mod->rit[offset];
- list_add_tail(&rit_mod->rit_segment[count].qe,
- &rit_mod->rit_seg_pool[i]);
- count++;
- offset += ritseg_pool_cfg[i].pool_entry_size;
- }
- }
-}
-
-/*
- * Public functions
- */
-
-/* Called during probe(), before calling bna_init() */
-void
-bna_res_req(struct bna_res_info *res_info)
-{
- bna_adv_res_req(res_info);
-
- /* DMA memory for retrieving IOC attributes */
- res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
- ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
-
- /* DMA memory for index segment of an IB */
- res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
- BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
-
- /* Virtual memory for IB objects - stored by IB module */
- res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
- BFI_MAX_IB * sizeof(struct bna_ib);
-
- /* Virtual memory for intr objects - stored by IB module */
- res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
- BFI_MAX_IB * sizeof(struct bna_intr);
-
- /* Virtual memory for idx_seg objects - stored by IB module */
- res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
- BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
-
- /* Virtual memory for Tx objects - stored by Tx module */
- res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
- BFI_MAX_TXQ * sizeof(struct bna_tx);
-
- /* Virtual memory for TxQ - stored by Tx module */
- res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
- BFI_MAX_TXQ * sizeof(struct bna_txq);
-
- /* Virtual memory for Rx objects - stored by Rx module */
- res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
- BFI_MAX_RXQ * sizeof(struct bna_rx);
-
- /* Virtual memory for RxPath - stored by Rx module */
- res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
- BFI_MAX_RXQ * sizeof(struct bna_rxp);
-
- /* Virtual memory for RxQ - stored by Rx module */
- res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
- BFI_MAX_RXQ * sizeof(struct bna_rxq);
-
- /* Virtual memory for Unicast MAC address - stored by ucam module */
- res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- BFI_MAX_UCMAC * sizeof(struct bna_mac);
-
- /* Virtual memory for Multicast MAC address - stored by mcam module */
- res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- BFI_MAX_MCMAC * sizeof(struct bna_mac);
-
- /* Virtual memory for RIT entries */
- res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
- BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
-
- /* Virtual memory for RIT segment table */
- res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
- BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
-
- /* Interrupt resource for mailbox interrupt */
- res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
- res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
- BNA_INTR_T_MSIX;
- res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
-}
-
-/* Called during probe() */
-void
-bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
- struct bna_res_info *res_info)
-{
- bna->bnad = bnad;
- bna->pcidev = *pcidev;
-
- bna->stats.hw_stats = (struct bfi_ll_stats *)
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
- bna->hw_stats_dma.msb =
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
- bna->hw_stats_dma.lsb =
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
- bna->stats.sw_stats = (struct bna_sw_stats *)
- res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
-
- bna->regs.page_addr = bna->pcidev.pci_bar_kva +
- reg_offset[bna->pcidev.pci_func].page_addr;
- bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
- reg_offset[bna->pcidev.pci_func].fn_int_status;
- bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
- reg_offset[bna->pcidev.pci_func].fn_int_mask;
-
- if (bna->pcidev.pci_func < 3)
- bna->port_num = 0;
- else
- bna->port_num = 1;
-
- /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
- bna_device_init(&bna->device, bna, res_info);
-
- bna_port_init(&bna->port, bna);
-
- bna_tx_mod_init(&bna->tx_mod, bna, res_info);
-
- bna_rx_mod_init(&bna->rx_mod, bna, res_info);
-
- bna_ib_mod_init(&bna->ib_mod, bna, res_info);
-
- bna_rit_mod_init(&bna->rit_mod, res_info);
-
- bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
-
- bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
-
- bna->rxf_promisc_id = BFI_MAX_RXF;
-
- /* Mbox q element for posting stat request to f/w */
- bfa_q_qe_init(&bna->mbox_qe.qe);
-}
-
-void
-bna_uninit(struct bna *bna)
-{
- bna_mcam_mod_uninit(&bna->mcam_mod);
-
- bna_ucam_mod_uninit(&bna->ucam_mod);
-
- bna_ib_mod_uninit(&bna->ib_mod);
-
- bna_rx_mod_uninit(&bna->rx_mod);
-
- bna_tx_mod_uninit(&bna->tx_mod);
-
- bna_port_uninit(&bna->port);
-
- bna_device_uninit(&bna->device);
-
- bna->bnad = NULL;
-}
-
-struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&ucam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&ucam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&mcam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&mcam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &mcam_mod->free_q);
-}
-
-/**
- * Note: This should be called in the same locking context as the call to
- * bna_rit_mod_seg_get()
- */
-int
-bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
-{
- int i;
-
- /* Select the pool for seg_size */
- for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
- if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
- break;
- }
-
- if (i == BFI_RIT_SEG_TOTAL_POOLS)
- return 0;
-
- if (list_empty(&rit_mod->rit_seg_pool[i]))
- return 0;
-
- return 1;
-}
-
-struct bna_rit_segment *
-bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
-{
- struct bna_rit_segment *seg;
- struct list_head *qe;
- int i;
-
- /* Select the pool for seg_size */
- for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
- if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
- break;
- }
-
- if (i == BFI_RIT_SEG_TOTAL_POOLS)
- return NULL;
-
- if (list_empty(&rit_mod->rit_seg_pool[i]))
- return NULL;
-
- bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
- seg = (struct bna_rit_segment *)qe;
- bfa_q_qe_init(&seg->qe);
- seg->rit_size = seg_size;
-
- return seg;
-}
-
-void
-bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
- struct bna_rit_segment *seg)
-{
- int i;
-
- /* Select the pool for seg->max_rit_size */
- for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
- if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
- break;
- }
-
- seg->rit_size = 0;
- list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
-}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
deleted file mode 100644
index cad233da843..00000000000
--- a/drivers/net/bna/bna_hw.h
+++ /dev/null
@@ -1,1490 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * File for interrupt macros and functions
- */
-
-#ifndef __BNA_HW_H__
-#define __BNA_HW_H__
-
-#include "bfi_ctreg.h"
-
-/**
- *
- * SW imposed limits
- *
- */
-
-#ifndef BNA_BIOS_BUILD
-
-#define BFI_MAX_TXQ 64
-#define BFI_MAX_RXQ 64
-#define BFI_MAX_RXF 64
-#define BFI_MAX_IB 128
-#define BFI_MAX_RIT_SIZE 256
-#define BFI_RSS_RIT_SIZE 64
-#define BFI_NONRSS_RIT_SIZE 1
-#define BFI_MAX_UCMAC 256
-#define BFI_MAX_MCMAC 512
-#define BFI_IBIDX_SIZE 4
-#define BFI_MAX_VLAN 4095
-
-/**
- * There are 2 free IB index pools:
- * pool1: 120 segments of 1 index each
- * pool8: 1 segment of 8 indexes
- */
-#define BFI_IBIDX_POOL1_SIZE 116
-#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
-#define BFI_IBIDX_POOL2_SIZE 2
-#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
-#define BFI_IBIDX_POOL8_SIZE 1
-#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
-#define BFI_IBIDX_TOTAL_POOLS 3
-#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
-#define BFI_IBIDX_MAX_SEGSIZE 8
-#define init_ibidx_pool(name) \
-static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
-{ \
- { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
- { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
- { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
-}
-
-/**
- * There are 2 free RIT segment pools:
- * Pool1: 192 segments of 1 RIT entry each
- * Pool2: 1 segment of 64 RIT entry
- */
-#define BFI_RIT_SEG_POOL1_SIZE 192
-#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
-#define BFI_RIT_SEG_POOLRSS_SIZE 1
-#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
-#define BFI_RIT_SEG_TOTAL_POOLS 2
-#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
-#define init_ritseg_pool(name) \
-static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
-{ \
- { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
- { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
-}
-
-#else /* BNA_BIOS_BUILD */
-
-#define BFI_MAX_TXQ 1
-#define BFI_MAX_RXQ 1
-#define BFI_MAX_RXF 1
-#define BFI_MAX_IB 2
-#define BFI_MAX_RIT_SIZE 2
-#define BFI_RSS_RIT_SIZE 64
-#define BFI_NONRSS_RIT_SIZE 1
-#define BFI_MAX_UCMAC 1
-#define BFI_MAX_MCMAC 8
-#define BFI_IBIDX_SIZE 4
-#define BFI_MAX_VLAN 4095
-/* There is one free pool: 2 segments of 1 index each */
-#define BFI_IBIDX_POOL1_SIZE 2
-#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
-#define BFI_IBIDX_TOTAL_POOLS 1
-#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
-#define BFI_IBIDX_MAX_SEGSIZE 1
-#define init_ibidx_pool(name) \
-static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
-{ \
- { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
-}
-
-#define BFI_RIT_SEG_POOL1_SIZE 1
-#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
-#define BFI_RIT_SEG_TOTAL_POOLS 1
-#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
-#define init_ritseg_pool(name) \
-static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
-{ \
- { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
-}
-
-#endif /* BNA_BIOS_BUILD */
-
-#define BFI_RSS_HASH_KEY_LEN 10
-
-#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
-#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
-#define BFI_MAX_INTERPKT_COUNT 0xFF
-#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
-#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
-#define BFI_TX_INTERPKT_COUNT 32
-#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
-#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
-#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
-
-#define BFI_TXQ_WI_SIZE 64 /* bytes */
-#define BFI_RXQ_WI_SIZE 8 /* bytes */
-#define BFI_CQ_WI_SIZE 16 /* bytes */
-#define BFI_TX_MAX_WRR_QUOTA 0xFFF
-
-#define BFI_TX_MAX_VECTORS_PER_WI 4
-#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
-#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
-#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
-
-/* Small Q buffer size */
-#define BFI_SMALL_RXBUF_SIZE 128
-
-/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
-#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
-#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
-
-/**
- *
- * HW register offsets, macros
- *
- */
-
-/* DMA Block Register Host Window Start Address */
-#define DMA_BLK_REG_ADDR 0x00013000
-
-/* DMA Block Internal Registers */
-#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
-#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
-#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
-#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
-#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
-
-/* APP Block Register Address Offset from BAR0 */
-#define APP_BLK_REG_ADDR 0x00014000
-
-/* Host Function Interrupt Mask Registers */
-#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
-#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
-#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
-#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
-
-/**
- * Host Function PCIe Error Registers
- * Duplicates "Correctable" & "Uncorrectable"
- * registers in PCIe Config space.
- */
-#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
-#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
-#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
-#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
-
-/* Host Function Error Type Status Registers */
-#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
-#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
-#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
-#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
-
-/* Host Function Error Type Mask Registers */
-#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
-#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
-#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
-#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
-
-/* Catapult Host Semaphore Status Registers (App block) */
-#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
-#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
-#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
-#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
-#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
-#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
-#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
-#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
-
-/* PCIe Misc Register */
-#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
-
-/* Temp Sensor Control Registers */
-#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
-#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
-
-/* APP Block local error registers */
-#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
-#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
-
-/* PCIe Link Error registers */
-#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
-#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
-
-/**
- * FCoE/FIP Ethertype Register
- * 31:16 -- Chip wide value for FIP type
- * 15:0 -- Chip wide value for FCoE type
- */
-#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
-
-/**
- * Reserved Ethertype Register
- * 31:16 -- Reserved
- * 15:0 -- Other ethertype
- */
-#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
-
-/**
- * Host Command Status Registers
- * Each set consists of 3 registers :
- * clear, set, cmd
- * 16 such register sets in all
- * See catapult_spec.pdf for detailed functionality
- * Put each type in a single macro accessed by _num ?
- */
-#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
-#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
-#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
-#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
-#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
-#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
-#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
-#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
-#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
-#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
-#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
-#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
-#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
-#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
-#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
-#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
-#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
-#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
-#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
-#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
-#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
-#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
-#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
-#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
-#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
-#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
-#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
-#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
-#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
-#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
-#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
-#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
-#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
-#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
-#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
-#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
-#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
-#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
-#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
-#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
-#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
-#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
-#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
-#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
-#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
-#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
-#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
-#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
-
-/**
- * LPU0 Block Register Address Offset from BAR0
- * Range 0x18000 - 0x18033
- */
-#define LPU0_BLK_REG_ADDR 0x00018000
-
-/**
- * LPU0 Registers
- * Should they be directly used from host,
- * except for diagnostics ?
- * CTL_REG : Control register
- * CMD_REG : Triggers exec. of cmd. in
- * Mailbox memory
- */
-#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
-#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
-#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
-#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
-#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
-#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
-#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
-#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
-
-/**
- * LPU1 Block Register Address Offset from BAR0
- * Range 0x18400 - 0x18433
- */
-#define LPU1_BLK_REG_ADDR 0x00018400
-
-/**
- * LPU1 Registers
- * Same as LPU0 registers above
- */
-#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
-#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
-#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
-#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
-#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
-#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
-#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
-#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
-
-/**
- * PSS Block Register Address Offset from BAR0
- * Range 0x18800 - 0x188DB
- */
-#define PSS_BLK_REG_ADDR 0x00018800
-
-/**
- * PSS Registers
- * For details, see catapult_spec.pdf
- * ERR_STATUS_REG : Indicates error in PSS module
- * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
- */
-#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
-#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
-
-/**
- * PSS Semaphore Lock Registers, total 16
- * First read when unlocked returns 0,
- * and is set to 1, atomically.
- * Subsequent reads returns 1.
- * To clear set the value to 0.
- * Range : 0x20 to 0x5c
- */
-#define PSS_SEM_LOCK_REG(_num) \
- (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
-
-/**
- * PSS Semaphore Status Registers,
- * corresponding to the lock registers above
- */
-#define PSS_SEM_STATUS_REG(_num) \
- (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
-
-/**
- * Catapult CPQ Registers
- * Defines for Mailbox Registers
- * Used to send mailbox commands to firmware from
- * host. The data part is written to the MBox
- * memory, registers are used to indicate that
- * a commnad is resident in memory.
- *
- * Note : LPU0<->LPU1 mailboxes are not listed here
- */
-#define CPQ_BLK_REG_ADDR 0x00019000
-
-#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
-#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
-#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
-#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
-
-#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
-#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
-#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
-#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
-
-#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
-#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
-#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
-#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
-
-#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
-#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
-#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
-#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
-
-/* Host Function Force Parity Error Registers */
-#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
-#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
-#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
-#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
-
-/* LL Port[0|1] Halt Mask Registers */
-#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
-#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
-
-/* LL Port[0|1] Error Mask Registers */
-#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
-#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
-
-/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
-#define FLI_BLK_REG_ADDR 0x0001D000
-
-/* EMC FLI Registers */
-#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
-#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
-#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
-#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
-#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
-#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
-#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
-
-/**
- * RO register
- * 31:16 -- Vendor Id
- * 15:0 -- Device Id
- */
-#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
-#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
-
-/**
- * RAD (RxAdm) Block Register Address Offset from BAR0
- * RAD0 Range : 0x20000 - 0x203FF
- * RAD1 Range : 0x20400 - 0x207FF
- */
-#define RAD0_BLK_REG_ADDR 0x00020000
-#define RAD1_BLK_REG_ADDR 0x00020400
-
-/* RAD0 Registers */
-#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
-#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
-#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
-
-/* Default function ID register */
-#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
-
-/* Default promiscuous ID register */
-#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
-
-#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
-
-/*
- * This register selects 1 of 8 PM Q's using
- * VLAN pri, for non-BCN packets without a VLAN tag
- */
-#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
-
-#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
-#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
-#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
-#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
-#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
-
-#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
-#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
-#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
-#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
-#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
-#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
-#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
-#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
-#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
-#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
-#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
-#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
-#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
-#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
-#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
-#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
-#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
-
-#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
-#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
-#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
-#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
-#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
-#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
-#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
-#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
-
-#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
-
-/* RAD1 Registers */
-#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
-#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
-#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
-
-/* Default function ID register */
-#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
-
-/* Promiscuous function ID register */
-#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
-
-#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
-
-/*
- * This register selects 1 of 8 PM Q's using
- * VLAN pri, for non-BCN packets without a VLAN tag
- */
-#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
-
-#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
-#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
-#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
-
-/**
- * TXA Block Register Address Offset from BAR0
- * TXA0 Range : 0x21000 - 0x213FF
- * TXA1 Range : 0x21400 - 0x217FF
- */
-#define TXA0_BLK_REG_ADDR 0x00021000
-#define TXA1_BLK_REG_ADDR 0x00021400
-
-/* TXA Registers */
-#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
-#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
-
-/**
- * TSO Sequence # Registers (RO)
- * Total 8 (for 8 queues)
- * Holds the last seq.# for TSO frames
- * See catapult_spec.pdf for more details
- */
-#define TXA0_TSO_TCP_SEQ_REG(_num) \
- (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
-
-#define TXA1_TSO_TCP_SEQ_REG(_num) \
- (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
-
-/**
- * TSO IP ID # Registers (RO)
- * Total 8 (for 8 queues)
- * Holds the last IP ID for TSO frames
- * See catapult_spec.pdf for more details
- */
-#define TXA0_TSO_IP_INFO_REG(_num) \
- (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
-
-#define TXA1_TSO_IP_INFO_REG(_num) \
- (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
-
-/**
- * RXA Block Register Address Offset from BAR0
- * RXA0 Range : 0x21800 - 0x21BFF
- * RXA1 Range : 0x21C00 - 0x21FFF
- */
-#define RXA0_BLK_REG_ADDR 0x00021800
-#define RXA1_BLK_REG_ADDR 0x00021C00
-
-/* RXA Registers */
-#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
-#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
-
-/**
- * PPLB Block Register Address Offset from BAR0
- * PPLB0 Range : 0x22000 - 0x223FF
- * PPLB1 Range : 0x22400 - 0x227FF
- */
-#define PLB0_BLK_REG_ADDR 0x00022000
-#define PLB1_BLK_REG_ADDR 0x00022400
-
-/**
- * PLB Registers
- * Holds RL timer used time stamps in RLT tagged frames
- */
-#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
-#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
-
-/* Controls the rate-limiter on each of the priority class */
-#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
-#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
-
-/**
- * Max byte register, total 8, 0-7
- * see catapult_spec.pdf for details
- */
-#define PLB0_RL_MAX_BC(_num) \
- (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
-#define PLB1_RL_MAX_BC(_num) \
- (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
-
-/**
- * RL Time Unit Register for priority 0-7
- * 4 bits per priority
- * (2^rl_unit)*1us is the actual time period
- */
-#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
-#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
-
-/**
- * RL byte count register,
- * bytes transmitted in (rl_unit*1)us time period
- * 1 per priority, 8 in all, 0-7.
- */
-#define PLB0_RL_BYTE_CNT(_num) \
- (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
-#define PLB1_RL_BYTE_CNT(_num) \
- (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
-
-/**
- * RL Min factor register
- * 2 bits per priority,
- * 4 factors possible: 1, 0.5, 0.25, 0
- * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
- */
-#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
-#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
-
-/**
- * RL Max factor register
- * 2 bits per priority,
- * 4 factors possible: 1, 0.5, 0.25, 0
- * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
- */
-#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
-#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
-
-/* MAC SERDES Address Paging register */
-#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
-#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
-
-/* LL EMS Registers */
-#define LL_EMS0_BLK_REG_ADDR 0x00026800
-#define LL_EMS1_BLK_REG_ADDR 0x00026C00
-
-/**
- * BPC Block Register Address Offset from BAR0
- * BPC0 Range : 0x23000 - 0x233FF
- * BPC1 Range : 0x23400 - 0x237FF
- */
-#define BPC0_BLK_REG_ADDR 0x00023000
-#define BPC1_BLK_REG_ADDR 0x00023400
-
-/**
- * PMM Block Register Address Offset from BAR0
- * PMM0 Range : 0x23800 - 0x23BFF
- * PMM1 Range : 0x23C00 - 0x23FFF
- */
-#define PMM0_BLK_REG_ADDR 0x00023800
-#define PMM1_BLK_REG_ADDR 0x00023C00
-
-/**
- * HQM Block Register Address Offset from BAR0
- * HQM0 Range : 0x24000 - 0x243FF
- * HQM1 Range : 0x24400 - 0x247FF
- */
-#define HQM0_BLK_REG_ADDR 0x00024000
-#define HQM1_BLK_REG_ADDR 0x00024400
-
-/**
- * HQM Control Register
- * Controls some aspects of IB
- * See catapult_spec.pdf for details
- */
-#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
-#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
-
-/**
- * HQM Stop Q Semaphore Registers.
- * Only one Queue resource can be stopped at
- * any given time. This register controls access
- * to the single stop Q resource.
- * See catapult_spec.pdf for details
- */
-#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
-#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
-#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
-#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
-
-/**
- * LUT Block Register Address Offset from BAR0
- * LUT0 Range : 0x25800 - 0x25BFF
- * LUT1 Range : 0x25C00 - 0x25FFF
- */
-#define LUT0_BLK_REG_ADDR 0x00025800
-#define LUT1_BLK_REG_ADDR 0x00025C00
-
-/**
- * LUT Registers
- * See catapult_spec.pdf for details
- */
-#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
-#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
-#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
-#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
-
-/**
- * TRC (Debug/Trace) Register Offset from BAR0
- * Range : 0x26000 -- 0x263FFF
- */
-#define TRC_BLK_REG_ADDR 0x00026000
-
-/**
- * TRC Registers
- * See catapult_spec.pdf for details of each
- */
-#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
-#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
-#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
-#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
-#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
-#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
-#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
-
-/**
- * TRC Trigger match filters, total 10
- * Determines the trigger condition
- */
-#define TRC_TRGM_REG(_num) \
- (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
-
-/**
- * TRC Next State filters, total 10
- * Determines the next state conditions
- */
-#define TRC_NXTM_REG(_num) \
- (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
-
-/**
- * TRC Store Match filters, total 10
- * Determines the store conditions
- */
-#define TRC_STRM_REG(_num) \
- (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
-
-/* DOORBELLS ACCESS */
-
-/**
- * Catapult doorbells
- * Each doorbell-queue set has
- * 1 RxQ, 1 TxQ, 2 IBs in that order
- * Size of each entry in 32 bytes, even though only 1 word
- * is used. For Non-VM case each doorbell-q set is
- * separated by 128 bytes, for VM case it is separated
- * by 4K bytes
- * Non VM case Range : 0x38000 - 0x39FFF
- * VM case Range : 0x100000 - 0x11FFFF
- * The range applies to both HQMs
- */
-#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
-#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
-
-/* MEMORY ACCESS */
-
-/**
- * Catapult H/W Block Memory Access Address
- * To the host a memory space of 32K (page) is visible
- * at a time. The address range is from 0x08000 to 0x0FFFF
- */
-#define HW_BLK_HOST_MEM_ADDR 0x08000
-
-/**
- * Catapult LUT Memory Access Page Numbers
- * Range : LUT0 0xa0-0xa1
- * LUT1 0xa2-0xa3
- */
-#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
-#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
-
-/**
- * Catapult RxFn Database Memory Block Base Offset
- *
- * The Rx function database exists in LUT block.
- * In PCIe space this is accessible as a 256x32
- * bit block. Each entry in this database is 4
- * (4 byte) words. Max. entries is 64.
- * Address of an entry corresponding to a function
- * = base_addr + (function_no. * 16)
- */
-#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
-
-/**
- * Catapult TxFn Database Memory Block Base Offset Address
- *
- * The Tx function database exists in LUT block.
- * In PCIe space this is accessible as a 64x32
- * bit block. Each entry in this database is 1
- * (4 byte) word. Max. entries is 64.
- * Address of an entry corresponding to a function
- * = base_addr + (function_no. * 4)
- */
-#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
-
-/**
- * Catapult Unicast CAM Base Offset Address
- *
- * Exists in LUT memory space.
- * Shared by both the LL & FCoE driver.
- * Size is 256x48 bits; mapped to PCIe space
- * 512x32 bit blocks. For each address, bits
- * are written in the order : [47:32] and then
- * [31:0].
- */
-#define UCAST_CAM_BASE_OFFSET 0x0000A800
-
-/**
- * Catapult Unicast RAM Base Offset Address
- *
- * Exists in LUT memory space.
- * Shared by both the LL & FCoE driver.
- * Size is 256x9 bits.
- */
-#define UCAST_RAM_BASE_OFFSET 0x0000B000
-
-/**
- * Catapult Mulicast CAM Base Offset Address
- *
- * Exists in LUT memory space.
- * Shared by both the LL & FCoE driver.
- * Size is 256x48 bits; mapped to PCIe space
- * 512x32 bit blocks. For each address, bits
- * are written in the order : [47:32] and then
- * [31:0].
- */
-#define MCAST_CAM_BASE_OFFSET 0x0000A000
-
-/**
- * Catapult VLAN RAM Base Offset Address
- *
- * Exists in LUT memory space.
- * Size is 4096x66 bits; mapped to PCIe space as
- * 8192x32 bit blocks.
- * All the 4K entries are within the address range
- * 0x0000 to 0x8000, so in the first LUT page.
- */
-#define VLAN_RAM_BASE_OFFSET 0x00000000
-
-/**
- * Catapult Tx Stats RAM Base Offset Address
- *
- * Exists in LUT memory space.
- * Size is 1024x33 bits;
- * Each Tx function has 64 bytes of space
- */
-#define TX_STATS_RAM_BASE_OFFSET 0x00009000
-
-/**
- * Catapult Rx Stats RAM Base Offset Address
- *
- * Exists in LUT memory space.
- * Size is 1024x33 bits;
- * Each Rx function has 64 bytes of space
- */
-#define RX_STATS_RAM_BASE_OFFSET 0x00008000
-
-/* Catapult RXA Memory Access Page Numbers */
-#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
-#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
-
-/**
- * Catapult Multicast Vector Table Base Offset Address
- *
- * Exists in RxA memory space.
- * Organized as 512x65 bit block.
- * However for each entry 16 bytes allocated (power of 2)
- * Total size 512*16 bytes.
- * There are two logical divisions, 256 entries each :
- * a) Entries 0x00 to 0xff (256) -- Approx. MVT
- * Offset 0x000 to 0xFFF
- * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
- * Offsets 0x1000 to 0x1FFF
- */
-#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
-#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
-
-/**
- * Catapult RxQ Translate Table (RIT) Base Offset Address
- *
- * Exists in RxA memory space
- * Total no. of entries 64
- * Each entry is 1 (4 byte) word.
- * 31:12 -- Reserved
- * 11:0 -- Two 6 bit RxQ Ids
- */
-#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
-
-/* Catapult RxAdm (RAD) Memory Access Page Numbers */
-#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
-#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
-
-/**
- * Catapult RSS Table Base Offset Address
- *
- * Exists in RAD memory space.
- * Each entry is 352 bits, but aligned on
- * 64 byte (512 bit) boundary. Accessed
- * 4 byte words, the whole entry can be
- * broken into 11 word accesses.
- */
-#define RSS_TABLE_BASE_OFFSET 0x00000800
-
-/**
- * Catapult CPQ Block Page Number
- * This value is written to the page number registers
- * to access the memory associated with the mailboxes.
- */
-#define CPQ_BLK_PG_NUM 0x00000005
-
-/**
- * Clarification :
- * LL functions are 2 & 3; can HostFn0/HostFn1
- * <-> LPU0/LPU1 memories be used ?
- */
-/**
- * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
- * Per catapult_spec.pdf, the offset of the mbox
- * memory is in the register space at an offset of 0x200
- */
-#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
-
-#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
-
-/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
-#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
-
-/**
- * Catapult HQM Block Page Number
- * This is written to the page number register for
- * the appropriate function to access the memory
- * associated with HQM
- */
-#define HQM0_BLK_PG_NUM 0x00000096
-#define HQM1_BLK_PG_NUM 0x00000097
-
-/**
- * Note that TxQ and RxQ entries are interlaced
- * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
- */
-
-#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
-
-/**
- * CQ Memory
- * Exists in HQM Memory space
- * Each entry is 16 (4 byte) words of which
- * only 12 words are used for configuration
- * Total 64 entries per HQM memory space
- */
-#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
-
-/**
- * Interrupt Block (IB) Memory
- * Exists in HQM Memory space
- * Each entry is 8 (4 byte) words of which
- * only 5 words are used for configuration
- * Total 128 entries per HQM memory space
- */
-#define HQM_IB_RAM_BASE_OFFSET 0x00001000
-
-/**
- * Index Table (IT) Memory
- * Exists in HQM Memory space
- * Each entry is 1 (4 byte) word which
- * is used for configuration
- * Total 128 entries per HQM memory space
- */
-#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
-
-/**
- * PSS Block Memory Page Number
- * This is written to the appropriate page number
- * register to access the CPU memory.
- * Also known as the PSS secondary memory (SMEM).
- * Range : 0x180 to 0x1CF
- * See catapult_spec.pdf for details
- */
-#define PSS_BLK_PG_NUM 0x00000180
-
-/**
- * Offsets of different instances of PSS SMEM
- * 2.5M of continuous 1T memory space : 2 blocks
- * of 1M each (32 pages each, page=32KB) and 4 smaller
- * blocks of 128K each (4 pages each, page=32KB)
- * PSS_LMEM_INST0 is used for firmware download
- */
-#define PSS_LMEM_INST0 0x00000000
-#define PSS_LMEM_INST1 0x00100000
-#define PSS_LMEM_INST2 0x00200000
-#define PSS_LMEM_INST3 0x00220000
-#define PSS_LMEM_INST4 0x00240000
-#define PSS_LMEM_INST5 0x00260000
-
-#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
-
-#define BNA_GET_PAGE_NUM(_base_page, _offset) \
- ((_base_page) + ((_offset) >> 15))
-
-#define BNA_GET_PAGE_OFFSET(_offset) \
- ((_offset) & 0x7fff)
-
-#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
- ((_bar0) + HW_BLK_HOST_MEM_ADDR \
- + BNA_GET_PAGE_OFFSET((_base_offset)))
-
-#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
- (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
- + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
- + (((_fn_id) & 0x3f) << 9) \
- + (((_vlan_id) & 0xfe0) >> 3))
-
-/**
- *
- * Interrupt related bits, flags and macros
- *
- */
-
-#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
-#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
-#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
-#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
-
-#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
-#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
-#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
-#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
-
-#define __LPU2HOST_MBOX_MASK_BITS \
- (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
- __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
-
-#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
-
-#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
- ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
- __LPU02HOST_MBOX1_STATUS_BITS))
-
-#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
- ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
- __LPU12HOST_MBOX1_STATUS_BITS))
-
-#define BNA_IS_MBOX_INTR(_intr_status) \
- ((_intr_status) & \
- (__LPU02HOST_MBOX0_STATUS_BITS | \
- __LPU02HOST_MBOX1_STATUS_BITS | \
- __LPU12HOST_MBOX0_STATUS_BITS | \
- __LPU12HOST_MBOX1_STATUS_BITS))
-
-#define __EMC_ERROR_STATUS_BITS 0x00010000
-#define __LPU0_ERROR_STATUS_BITS 0x00020000
-#define __LPU1_ERROR_STATUS_BITS 0x00040000
-#define __PSS_ERROR_STATUS_BITS 0x00080000
-
-#define __HALT_STATUS_BITS 0x01000000
-
-#define __EMC_ERROR_MASK_BITS 0x00010000
-#define __LPU0_ERROR_MASK_BITS 0x00020000
-#define __LPU1_ERROR_MASK_BITS 0x00040000
-#define __PSS_ERROR_MASK_BITS 0x00080000
-
-#define __HALT_MASK_BITS 0x01000000
-
-#define __ERROR_MASK_BITS \
- (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
- __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
- __HALT_MASK_BITS)
-
-#define BNA_IS_ERR_INTR(_intr_status) \
- ((_intr_status) & \
- (__EMC_ERROR_STATUS_BITS | \
- __LPU0_ERROR_STATUS_BITS | \
- __LPU1_ERROR_STATUS_BITS | \
- __PSS_ERROR_STATUS_BITS | \
- __HALT_STATUS_BITS))
-
-#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
- (BNA_IS_MBOX_INTR((_intr_status)) | \
- BNA_IS_ERR_INTR((_intr_status)))
-
-#define BNA_IS_INTX_DATA_INTR(_intr_status) \
- ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
-
-#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
-do { \
- (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
- __LPU02HOST_MBOX1_STATUS_BITS | \
- __LPU12HOST_MBOX0_STATUS_BITS | \
- __LPU12HOST_MBOX1_STATUS_BITS); \
-} while (0)
-
-#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
-do { \
- (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
- __LPU0_ERROR_STATUS_BITS | \
- __LPU1_ERROR_STATUS_BITS | \
- __PSS_ERROR_STATUS_BITS | \
- __HALT_STATUS_BITS); \
-} while (0)
-
-#define bna_intx_disable(_bna, _cur_mask) \
-{ \
- (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
- writel(0xffffffff, (_bna)->regs.fn_int_mask);\
-}
-
-#define bna_intx_enable(bna, new_mask) \
- writel((new_mask), (bna)->regs.fn_int_mask)
-
-#define bna_mbox_intr_disable(bna) \
- writel((readl((bna)->regs.fn_int_mask) | \
- (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
- (bna)->regs.fn_int_mask)
-
-#define bna_mbox_intr_enable(bna) \
- writel((readl((bna)->regs.fn_int_mask) & \
- ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
- (bna)->regs.fn_int_mask)
-
-#define bna_intr_status_get(_bna, _status) \
-{ \
- (_status) = readl((_bna)->regs.fn_int_status); \
- if ((_status)) { \
- writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
- __LPU02HOST_MBOX1_STATUS_BITS |\
- __LPU12HOST_MBOX0_STATUS_BITS |\
- __LPU12HOST_MBOX1_STATUS_BITS), \
- (_bna)->regs.fn_int_status);\
- } \
-}
-
-#define bna_intr_status_get_no_clr(_bna, _status) \
- (_status) = readl((_bna)->regs.fn_int_status)
-
-#define bna_intr_mask_get(bna, mask) \
- (*mask) = readl((bna)->regs.fn_int_mask)
-
-#define bna_intr_ack(bna, intr_bmap) \
- writel((intr_bmap), (bna)->regs.fn_int_status)
-
-#define bna_ib_intx_disable(bna, ib_id) \
- writel(readl((bna)->regs.fn_int_mask) | \
- (1 << (ib_id)), \
- (bna)->regs.fn_int_mask)
-
-#define bna_ib_intx_enable(bna, ib_id) \
- writel(readl((bna)->regs.fn_int_mask) & \
- ~(1 << (ib_id)), \
- (bna)->regs.fn_int_mask)
-
-#define bna_mbox_msix_idx_set(_device) \
-do {\
- writel(((_device)->vector & 0x000001FF), \
- (_device)->bna->pcidev.pci_bar_kva + \
- reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
-} while (0)
-
-/**
- *
- * TxQ, RxQ, CQ related bits, offsets, macros
- *
- */
-
-#define BNA_Q_IDLE_STATE 0x00008001
-
-#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
- ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
-
-#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
- ((HQM_DOORBELL_BLK_BASE_ADDR) \
- + (_entry << 7))
-
-#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
- (0x80000000 | ((_timeout) << 16) | (_events))
-
-#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
-
-/* TxQ Entry Opcodes */
-#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
-#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
-#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
-
-/* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
-
-#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
- (((_hdr_size) << 10) | ((_offset) & 0x3FF))
-
-/*
- * Completion Q defines
- */
-/* CQ Entry Flags */
-#define BNA_CQ_EF_MAC_ERROR (1 << 0)
-#define BNA_CQ_EF_FCS_ERROR (1 << 1)
-#define BNA_CQ_EF_TOO_LONG (1 << 2)
-#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
-
-#define BNA_CQ_EF_RSVD1 (1 << 4)
-#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BNA_CQ_EF_HDS_HEADER (1 << 7)
-
-#define BNA_CQ_EF_UDP (1 << 8)
-#define BNA_CQ_EF_TCP (1 << 9)
-#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
-#define BNA_CQ_EF_IPV6 (1 << 11)
-
-#define BNA_CQ_EF_IPV4 (1 << 12)
-#define BNA_CQ_EF_VLAN (1 << 13)
-#define BNA_CQ_EF_RSS (1 << 14)
-#define BNA_CQ_EF_RSVD2 (1 << 15)
-
-#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
-#define BNA_CQ_EF_MCAST (1 << 17)
-#define BNA_CQ_EF_BCAST (1 << 18)
-#define BNA_CQ_EF_REMOTE (1 << 19)
-
-#define BNA_CQ_EF_LOCAL (1 << 20)
-
-/**
- *
- * Data structures
- *
- */
-
-enum txf_flags {
- BFI_TXF_CF_ENABLE = 1 << 0,
- BFI_TXF_CF_VLAN_FILTER = 1 << 8,
- BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
- BFI_TXF_CF_VLAN_INSERT = 1 << 10,
- BFI_TXF_CF_RSVD1 = 1 << 11,
- BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
- BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
- BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
- BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
- BFI_TXF_CF_RSVD2 = 0x7F << 1
-};
-
-enum ib_flags {
- BFI_IB_CF_MASTER_ENABLE = (1 << 0),
- BFI_IB_CF_MSIX_MODE = (1 << 1),
- BFI_IB_CF_COALESCING_MODE = (1 << 2),
- BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
- BFI_IB_CF_INT_ENABLE = (1 << 4),
- BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
- BFI_IB_CF_ACK_PENDING = (1 << 6),
- BFI_IB_CF_RESERVED1 = (1 << 7)
-};
-
-enum rss_hash_type {
- BFI_RSS_T_V4_TCP = (1 << 11),
- BFI_RSS_T_V4_IP = (1 << 10),
- BFI_RSS_T_V6_TCP = (1 << 9),
- BFI_RSS_T_V6_IP = (1 << 8)
-};
-enum hds_header_type {
- BNA_HDS_T_V4_TCP = (1 << 11),
- BNA_HDS_T_V4_UDP = (1 << 10),
- BNA_HDS_T_V6_TCP = (1 << 9),
- BNA_HDS_T_V6_UDP = (1 << 8),
- BNA_HDS_FORCED = (1 << 7),
-};
-enum rxf_flags {
- BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
- BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
- BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
- BNA_RXF_CF_VLAN_STRIP = (1 << 12),
- BNA_RXF_CF_RSS_ENABLE = (1 << 8)
-};
-struct bna_chip_regs_offset {
- u32 page_addr;
- u32 fn_int_status;
- u32 fn_int_mask;
- u32 msix_idx;
-};
-
-struct bna_chip_regs {
- void __iomem *page_addr;
- void __iomem *fn_int_status;
- void __iomem *fn_int_mask;
-};
-
-struct bna_txq_mem {
- u32 pg_tbl_addr_lo;
- u32 pg_tbl_addr_hi;
- u32 cur_q_entry_lo;
- u32 cur_q_entry_hi;
- u32 reserved1;
- u32 reserved2;
- u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
- /* 15:0 ->producer pointer (index?) */
- u32 entry_n_pg_size; /* 31:16->entry size */
- /* 15:0 ->page size */
- u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
- /* 23:16->Int Blk Offset */
- /* 15:0 ->consumer pointer(index?) */
- u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
- u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
- /* QId;9:3->FID;2:0->Priority */
- u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
- /* 23:12->Cfg Quota; */
- /* 11:0 ->Run Quota */
- u32 reserved3[4];
-};
-
-struct bna_rxq_mem {
- u32 pg_tbl_addr_lo;
- u32 pg_tbl_addr_hi;
- u32 cur_q_entry_lo;
- u32 cur_q_entry_hi;
- u32 reserved1;
- u32 reserved2;
- u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
- /* 15:0 ->producer pointer (index?) */
- u32 entry_n_pg_size; /* 31:16->entry size */
- /* 15:0 ->page size */
- u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
- /* 23:16->CQ; */
- /* 15:0->consumer pointer(index?) */
- u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
- u32 next_qid; /* 17:10->next QId */
- u32 reserved3;
- u32 reserved4[4];
-};
-
-struct bna_rxtx_q_mem {
- struct bna_rxq_mem rxq;
- struct bna_txq_mem txq;
-};
-
-struct bna_cq_mem {
- u32 pg_tbl_addr_lo;
- u32 pg_tbl_addr_hi;
- u32 cur_q_entry_lo;
- u32 cur_q_entry_hi;
-
- u32 reserved1;
- u32 reserved2;
- u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
- /* 15:0 ->producer pointer (index?) */
- u32 entry_n_pg_size; /* 31:16->entry size */
- /* 15:0 ->page size */
- u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
- /* 23:16->Int Blk Offset */
- /* 15:0 ->consumer pointer(index?) */
- u32 q_state; /* 31:16->reserved; 15:0-> Q state */
- u32 reserved3[2];
- u32 reserved4[4];
-};
-
-struct bna_ib_blk_mem {
- u32 host_addr_lo;
- u32 host_addr_hi;
- u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
- /* 23:16->coalescing cfg; */
- /* 15:8 ->control; */
- /* 7:0 ->msix; */
- u32 ipkt_n_ent_n_idxof;
- u32 ipkt_cnt_cfg_n_unacked;
-
- u32 reserved[3];
-};
-
-struct bna_idx_tbl_mem {
- u32 idx; /* !< 31:16->res;15:0->idx; */
-};
-
-struct bna_doorbell_qset {
- u32 rxq[0x20 >> 2];
- u32 txq[0x20 >> 2];
- u32 ib0[0x20 >> 2];
- u32 ib1[0x20 >> 2];
-};
-
-struct bna_rx_fndb_ram {
- u32 rss_prop;
- u32 size_routing_props;
- u32 rit_hds_mcastq;
- u32 control_flags;
-};
-
-struct bna_tx_fndb_ram {
- u32 vlan_n_ctrl_flags;
-};
-
-/**
- * @brief
- * Structure which maps to RxFn Indirection Table (RIT)
- * Size : 1 word
- * See catapult_spec.pdf, RxA for details
- */
-struct bna_rit_mem {
- u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
-};
-
-/**
- * @brief
- * Structure which maps to RSS Table entry
- * Size : 16 words
- * See catapult_spec.pdf, RAD for details
- */
-struct bna_rss_mem {
- /*
- * 31:12-> res
- * 11:8 -> protocol type
- * 7:0 -> hash index
- */
- u32 type_n_hash;
- u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
- u32 reserved[5];
-};
-
-/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
-struct bna_dma_addr {
- u32 msb;
- u32 lsb;
-};
-
-struct bna_txq_wi_vector {
- u16 reserved;
- u16 length; /* Only 14 LSB are valid */
- struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
-};
-
-typedef u16 bna_txq_wi_opcode_t;
-
-typedef u16 bna_txq_wi_ctrl_flag_t;
-
-/**
- * TxQ Entry Structure
- *
- * BEWARE: Load values into this structure with correct endianess.
- */
-struct bna_txq_entry {
- union {
- struct {
- u8 reserved;
- u8 num_vectors; /* number of vectors present */
- bna_txq_wi_opcode_t opcode; /* Either */
- /* BNA_TXQ_WI_SEND or */
- /* BNA_TXQ_WI_SEND_LSO */
- bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
- u16 l4_hdr_size_n_offset;
- u16 vlan_tag;
- u16 lso_mss; /* Only 14 LSB are valid */
- u32 frame_length; /* Only 24 LSB are valid */
- } wi;
-
- struct {
- u16 reserved;
- bna_txq_wi_opcode_t opcode; /* Must be */
- /* BNA_TXQ_WI_EXTENSION */
- u32 reserved2[3]; /* Place holder for */
- /* removed vector (12 bytes) */
- } wi_ext;
- } hdr;
- struct bna_txq_wi_vector vector[4];
-};
-#define wi_hdr hdr.wi
-#define wi_ext_hdr hdr.wi_ext
-
-/* RxQ Entry Structure */
-struct bna_rxq_entry { /* Rx-Buffer */
- struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
-};
-
-typedef u32 bna_cq_e_flag_t;
-
-/* CQ Entry Structure */
-struct bna_cq_entry {
- bna_cq_e_flag_t flags;
- u16 vlan_tag;
- u16 length;
- u32 rss_hash;
- u8 valid;
- u8 reserved1;
- u8 reserved2;
- u8 rxq_id;
-};
-
-#endif /* __BNA_HW_H__ */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
deleted file mode 100644
index f0983c83244..00000000000
--- a/drivers/net/bna/bna_txrx.c
+++ /dev/null
@@ -1,4185 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#include "bna.h"
-#include "bfa_cs.h"
-#include "bfi.h"
-
-/**
- * IB
- */
-#define bna_ib_find_free_ibidx(_mask, _pos)\
-do {\
- (_pos) = 0;\
- while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
- ((1 << (_pos)) & (_mask)))\
- (_pos)++;\
-} while (0)
-
-#define bna_ib_count_ibidx(_mask, _count)\
-do {\
- int pos = 0;\
- (_count) = 0;\
- while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
- if ((1 << pos) & (_mask))\
- (_count) = pos + 1;\
- pos++;\
- } \
-} while (0)
-
-#define bna_ib_select_segpool(_count, _q_idx)\
-do {\
- int i;\
- (_q_idx) = -1;\
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
- if ((_count <= ibidx_pool[i].pool_entry_size)) {\
- (_q_idx) = i;\
- break;\
- } \
- } \
-} while (0)
-
-struct bna_ibidx_pool {
- int pool_size;
- int pool_entry_size;
-};
-init_ibidx_pool(ibidx_pool);
-
-static struct bna_intr *
-bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
- int vector)
-{
- struct bna_intr *intr;
- struct list_head *qe;
-
- list_for_each(qe, &ib_mod->intr_active_q) {
- intr = (struct bna_intr *)qe;
-
- if ((intr->intr_type == intr_type) &&
- (intr->vector == vector)) {
- intr->ref_count++;
- return intr;
- }
- }
-
- if (list_empty(&ib_mod->intr_free_q))
- return NULL;
-
- bfa_q_deq(&ib_mod->intr_free_q, &intr);
- bfa_q_qe_init(&intr->qe);
-
- intr->ref_count = 1;
- intr->intr_type = intr_type;
- intr->vector = vector;
-
- list_add_tail(&intr->qe, &ib_mod->intr_active_q);
-
- return intr;
-}
-
-static void
-bna_intr_put(struct bna_ib_mod *ib_mod,
- struct bna_intr *intr)
-{
- intr->ref_count--;
-
- if (intr->ref_count == 0) {
- intr->ib = NULL;
- list_del(&intr->qe);
- bfa_q_qe_init(&intr->qe);
- list_add_tail(&intr->qe, &ib_mod->intr_free_q);
- }
-}
-
-void
-bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
- struct bna_res_info *res_info)
-{
- int i;
- int j;
- int count;
- u8 offset;
- struct bna_doorbell_qset *qset;
- unsigned long off;
-
- ib_mod->bna = bna;
-
- ib_mod->ib = (struct bna_ib *)
- res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
- ib_mod->intr = (struct bna_intr *)
- res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
- ib_mod->idx_seg = (struct bna_ibidx_seg *)
- res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
-
- INIT_LIST_HEAD(&ib_mod->ib_free_q);
- INIT_LIST_HEAD(&ib_mod->intr_free_q);
- INIT_LIST_HEAD(&ib_mod->intr_active_q);
-
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
- INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
-
- for (i = 0; i < BFI_MAX_IB; i++) {
- ib_mod->ib[i].ib_id = i;
-
- ib_mod->ib[i].ib_seg_host_addr_kva =
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
- ib_mod->ib[i].ib_seg_host_addr.lsb =
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
- ib_mod->ib[i].ib_seg_host_addr.msb =
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
-
- qset = (struct bna_doorbell_qset *)0;
- off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
- * (0x20 >> 2)]);
- ib_mod->ib[i].door_bell.doorbell_addr = off +
- BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
-
- bfa_q_qe_init(&ib_mod->ib[i].qe);
- list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
-
- bfa_q_qe_init(&ib_mod->intr[i].qe);
- list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
- }
-
- count = 0;
- offset = 0;
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
- for (j = 0; j < ibidx_pool[i].pool_size; j++) {
- bfa_q_qe_init(&ib_mod->idx_seg[count]);
- ib_mod->idx_seg[count].ib_seg_size =
- ibidx_pool[i].pool_entry_size;
- ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
- list_add_tail(&ib_mod->idx_seg[count].qe,
- &ib_mod->ibidx_seg_pool[i]);
- count++;
- offset += ibidx_pool[i].pool_entry_size;
- }
- }
-}
-
-void
-bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
-{
- int i;
- int j;
- struct list_head *qe;
-
- i = 0;
- list_for_each(qe, &ib_mod->ib_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &ib_mod->intr_free_q)
- i++;
-
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
- j = 0;
- list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
- j++;
- }
-
- ib_mod->bna = NULL;
-}
-
-static struct bna_ib *
-bna_ib_get(struct bna_ib_mod *ib_mod,
- enum bna_intr_type intr_type,
- int vector)
-{
- struct bna_ib *ib;
- struct bna_intr *intr;
-
- if (intr_type == BNA_INTR_T_INTX)
- vector = (1 << vector);
-
- intr = bna_intr_get(ib_mod, intr_type, vector);
- if (intr == NULL)
- return NULL;
-
- if (intr->ib) {
- if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
- bna_intr_put(ib_mod, intr);
- return NULL;
- }
- intr->ib->ref_count++;
- return intr->ib;
- }
-
- if (list_empty(&ib_mod->ib_free_q)) {
- bna_intr_put(ib_mod, intr);
- return NULL;
- }
-
- bfa_q_deq(&ib_mod->ib_free_q, &ib);
- bfa_q_qe_init(&ib->qe);
-
- ib->ref_count = 1;
- ib->start_count = 0;
- ib->idx_mask = 0;
-
- ib->intr = intr;
- ib->idx_seg = NULL;
- intr->ib = ib;
-
- ib->bna = ib_mod->bna;
-
- return ib;
-}
-
-static void
-bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
-{
- bna_intr_put(ib_mod, ib->intr);
-
- ib->ref_count--;
-
- if (ib->ref_count == 0) {
- ib->intr = NULL;
- ib->bna = NULL;
- list_add_tail(&ib->qe, &ib_mod->ib_free_q);
- }
-}
-
-/* Returns index offset - starting from 0 */
-static int
-bna_ib_reserve_idx(struct bna_ib *ib)
-{
- struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
- struct bna_ibidx_seg *idx_seg;
- int idx;
- int num_idx;
- int q_idx;
-
- /* Find the first free index position */
- bna_ib_find_free_ibidx(ib->idx_mask, idx);
- if (idx == BFI_IBIDX_MAX_SEGSIZE)
- return -1;
-
- /*
- * Calculate the total number of indexes held by this IB,
- * including the index newly reserved above.
- */
- bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
-
- /* See if there is a free space in the index segment held by this IB */
- if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
- ib->idx_mask |= (1 << idx);
- return idx;
- }
-
- if (ib->start_count)
- return -1;
-
- /* Allocate a new segment */
- bna_ib_select_segpool(num_idx, q_idx);
- while (1) {
- if (q_idx == BFI_IBIDX_TOTAL_POOLS)
- return -1;
- if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
- break;
- q_idx++;
- }
- bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
- bfa_q_qe_init(&idx_seg->qe);
-
- /* Free the old segment */
- if (ib->idx_seg) {
- bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
- list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
- }
-
- ib->idx_seg = idx_seg;
-
- ib->idx_mask |= (1 << idx);
-
- return idx;
-}
-
-static void
-bna_ib_release_idx(struct bna_ib *ib, int idx)
-{
- struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
- struct bna_ibidx_seg *idx_seg;
- int num_idx;
- int cur_q_idx;
- int new_q_idx;
-
- ib->idx_mask &= ~(1 << idx);
-
- if (ib->start_count)
- return;
-
- bna_ib_count_ibidx(ib->idx_mask, num_idx);
-
- /*
- * Free the segment, if there are no more indexes in the segment
- * held by this IB
- */
- if (!num_idx) {
- bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
- list_add_tail(&ib->idx_seg->qe,
- &ib_mod->ibidx_seg_pool[cur_q_idx]);
- ib->idx_seg = NULL;
- return;
- }
-
- /* See if we can move to a smaller segment */
- bna_ib_select_segpool(num_idx, new_q_idx);
- bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
- while (new_q_idx < cur_q_idx) {
- if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
- break;
- new_q_idx++;
- }
- if (new_q_idx < cur_q_idx) {
- /* Select the new smaller segment */
- bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
- bfa_q_qe_init(&idx_seg->qe);
- /* Free the old segment */
- list_add_tail(&ib->idx_seg->qe,
- &ib_mod->ibidx_seg_pool[cur_q_idx]);
- ib->idx_seg = idx_seg;
- }
-}
-
-static int
-bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
-{
- if (ib->start_count)
- return -1;
-
- ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
- ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
- ib->ib_config.interpkt_count = ib_config->interpkt_count;
- ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
-
- ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
- if (ib->intr->intr_type == BNA_INTR_T_MSIX)
- ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
-
- return 0;
-}
-
-static void
-bna_ib_start(struct bna_ib *ib)
-{
- struct bna_ib_blk_mem ib_cfg;
- struct bna_ib_blk_mem *ib_mem;
- u32 pg_num;
- u32 intx_mask;
- int i;
- void __iomem *base_addr;
- unsigned long off;
-
- ib->start_count++;
-
- if (ib->start_count > 1)
- return;
-
- ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
- ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
-
- ib_cfg.clsc_n_ctrl_n_msix = (((u32)
- ib->ib_config.coalescing_timeo << 16) |
- ((u32)ib->ib_config.ctrl_flags << 8) |
- (ib->intr->vector));
- ib_cfg.ipkt_n_ent_n_idxof =
- ((u32)
- (ib->ib_config.interpkt_timeo & 0xf) << 16) |
- ((u32)ib->idx_seg->ib_seg_size << 8) |
- (ib->idx_seg->ib_idx_tbl_offset);
- ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
- ib->ib_config.interpkt_count << 24);
-
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
- HQM_IB_RAM_BASE_OFFSET);
- writel(pg_num, ib->bna->regs.page_addr);
-
- base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
- HQM_IB_RAM_BASE_OFFSET);
-
- ib_mem = (struct bna_ib_blk_mem *)0;
- off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
- writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
-
- off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
- writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
-
- off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
- writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
-
- off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
- writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
-
- off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
- writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
-
- ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
- (u32)ib->ib_config.coalescing_timeo, 0);
-
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
- HQM_INDX_TBL_RAM_BASE_OFFSET);
- writel(pg_num, ib->bna->regs.page_addr);
-
- base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
- HQM_INDX_TBL_RAM_BASE_OFFSET);
- for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
- off = (unsigned long)
- ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
- writel(0, base_addr + off);
- }
-
- if (ib->intr->intr_type == BNA_INTR_T_INTX) {
- bna_intx_disable(ib->bna, intx_mask);
- intx_mask &= ~(ib->intr->vector);
- bna_intx_enable(ib->bna, intx_mask);
- }
-}
-
-static void
-bna_ib_stop(struct bna_ib *ib)
-{
- u32 intx_mask;
-
- ib->start_count--;
-
- if (ib->start_count == 0) {
- writel(BNA_DOORBELL_IB_INT_DISABLE,
- ib->door_bell.doorbell_addr);
- if (ib->intr->intr_type == BNA_INTR_T_INTX) {
- bna_intx_disable(ib->bna, intx_mask);
- intx_mask |= (ib->intr->vector);
- bna_intx_enable(ib->bna, intx_mask);
- }
- }
-}
-
-static void
-bna_ib_fail(struct bna_ib *ib)
-{
- ib->start_count = 0;
-}
-
-/**
- * RXF
- */
-static void rxf_enable(struct bna_rxf *rxf);
-static void rxf_disable(struct bna_rxf *rxf);
-static void __rxf_config_set(struct bna_rxf *rxf);
-static void __rxf_rit_set(struct bna_rxf *rxf);
-static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
-static int rxf_process_packet_filter(struct bna_rxf *rxf);
-static int rxf_clear_packet_filter(struct bna_rxf *rxf);
-static void rxf_reset_packet_filter(struct bna_rxf *rxf);
-static void rxf_cb_enabled(void *arg, int status);
-static void rxf_cb_disabled(void *arg, int status);
-static void bna_rxf_cb_stats_cleared(void *arg, int status);
-static void __rxf_enable(struct bna_rxf *rxf);
-static void __rxf_disable(struct bna_rxf *rxf);
-
-bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
- enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
-
-static struct bfa_sm_table rxf_sm_table[] = {
- {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
- {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
- {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
- {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
- {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
- {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
- {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
- {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
- {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
-};
-
-static void
-bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
-{
- call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
-}
-
-static void
-bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_START:
- bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
- break;
-
- case RXF_E_STOP:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_FAIL:
- /* No-op */
- break;
-
- case RXF_E_CAM_FLTR_MOD:
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- break;
-
- case RXF_E_STARTED:
- case RXF_E_STOPPED:
- case RXF_E_CAM_FLTR_RESP:
- /**
- * These events are received due to flushing of mbox
- * when device fails
- */
- /* No-op */
- break;
-
- case RXF_E_PAUSE:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
- call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
- break;
-
- case RXF_E_RESUME:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
- call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
-{
- __rxf_config_set(rxf);
- __rxf_rit_set(rxf);
- rxf_enable(rxf);
-}
-
-static void
-bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_STOP:
- /**
- * STOP is originated from bnad. When this happens,
- * it can not be waiting for filter update
- */
- call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
- break;
-
- case RXF_E_FAIL:
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CAM_FLTR_MOD:
- /* No-op */
- break;
-
- case RXF_E_STARTED:
- /**
- * Force rxf_process_filter() to go through initial
- * config
- */
- if ((rxf->ucast_active_mac != NULL) &&
- (rxf->ucast_pending_set == 0))
- rxf->ucast_pending_set = 1;
-
- if (rxf->rss_status == BNA_STATUS_T_ENABLED)
- rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
-
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
-
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
- break;
-
- case RXF_E_PAUSE:
- case RXF_E_RESUME:
- rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
-{
- if (!rxf_process_packet_filter(rxf)) {
- /* No more pending CAM entries to update */
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- }
-}
-
-static void
-bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_STOP:
- /**
- * STOP is originated from bnad. When this happens,
- * it can not be waiting for filter update
- */
- call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
- break;
-
- case RXF_E_FAIL:
- rxf_reset_packet_filter(rxf);
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CAM_FLTR_MOD:
- /* No-op */
- break;
-
- case RXF_E_CAM_FLTR_RESP:
- if (!rxf_process_packet_filter(rxf)) {
- /* No more pending CAM entries to update */
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- }
- break;
-
- case RXF_E_PAUSE:
- case RXF_E_RESUME:
- rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_started_entry(struct bna_rxf *rxf)
-{
- call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
-
- if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
- if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
- bfa_fsm_send_event(rxf, RXF_E_PAUSE);
- else
- bfa_fsm_send_event(rxf, RXF_E_RESUME);
- }
-
-}
-
-static void
-bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_STOP:
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
- /* Hack to get FSM start clearing CAM entries */
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
- break;
-
- case RXF_E_FAIL:
- rxf_reset_packet_filter(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CAM_FLTR_MOD:
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
- break;
-
- case RXF_E_PAUSE:
- bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
- break;
-
- case RXF_E_RESUME:
- bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
-{
- /**
- * Note: Do not add rxf_clear_packet_filter here.
- * It will overstep mbox when this transition happens:
- * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
- */
-}
-
-static void
-bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of stopping, initiated by
- * bnad. When this happens, no one can be waiting for
- * start or filter update
- */
- rxf_reset_packet_filter(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CAM_FLTR_RESP:
- if (!rxf_clear_packet_filter(rxf)) {
- /* No more pending CAM entries to clear */
- bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
- rxf_disable(rxf);
- }
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
-{
- /**
- * NOTE: Do not add rxf_disable here.
- * It will overstep mbox when this transition happens:
- * start_wait -> stop_wait on RXF_E_STOP event
- */
-}
-
-static void
-bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of stopping, initiated by
- * bnad. When this happens, no one can be waiting for
- * start or filter update
- */
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_STARTED:
- /**
- * This event is received due to abrupt transition from
- * bna_rxf_sm_start_wait state on receiving
- * RXF_E_STOP event
- */
- rxf_disable(rxf);
- break;
-
- case RXF_E_STOPPED:
- /**
- * FSM was in the process of stopping, initiated by
- * bnad. When this happens, no one can be waiting for
- * start or filter update
- */
- bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
- break;
-
- case RXF_E_PAUSE:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
- break;
-
- case RXF_E_RESUME:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
-{
- rxf->rxf_flags &=
- ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
- __rxf_disable(rxf);
-}
-
-static void
-bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of disabling rxf, initiated by
- * bnad.
- */
- call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_STOPPED:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
- call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- break;
-
- /*
- * Since PAUSE/RESUME can only be sent by bnad, we don't expect
- * any other event during these states
- */
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
-{
- rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
- rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
- __rxf_enable(rxf);
-}
-
-static void
-bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of disabling rxf, initiated by
- * bnad.
- */
- call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_STARTED:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
- call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- break;
-
- /*
- * Since PAUSE/RESUME can only be sent by bnad, we don't expect
- * any other event during these states
- */
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
-{
- __bna_rxf_stat_clr(rxf);
-}
-
-static void
-bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- case RXF_E_STAT_CLEARED:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-__rxf_enable(struct bna_rxf *rxf)
-{
- struct bfi_ll_rxf_multi_req ll_req;
- u32 bm[2] = {0, 0};
-
- if (rxf->rxf_id < 32)
- bm[0] = 1 << rxf->rxf_id;
- else
- bm[1] = 1 << (rxf->rxf_id - 32);
-
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
- ll_req.rxf_id_mask[0] = htonl(bm[0]);
- ll_req.rxf_id_mask[1] = htonl(bm[1]);
- ll_req.enable = 1;
-
- bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
- rxf_cb_enabled, rxf);
-
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
-}
-
-static void
-__rxf_disable(struct bna_rxf *rxf)
-{
- struct bfi_ll_rxf_multi_req ll_req;
- u32 bm[2] = {0, 0};
-
- if (rxf->rxf_id < 32)
- bm[0] = 1 << rxf->rxf_id;
- else
- bm[1] = 1 << (rxf->rxf_id - 32);
-
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
- ll_req.rxf_id_mask[0] = htonl(bm[0]);
- ll_req.rxf_id_mask[1] = htonl(bm[1]);
- ll_req.enable = 0;
-
- bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
- rxf_cb_disabled, rxf);
-
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
-}
-
-static void
-__rxf_config_set(struct bna_rxf *rxf)
-{
- u32 i;
- struct bna_rss_mem *rss_mem;
- struct bna_rx_fndb_ram *rx_fndb_ram;
- struct bna *bna = rxf->rx->bna;
- void __iomem *base_addr;
- unsigned long off;
-
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- RSS_TABLE_BASE_OFFSET);
-
- rss_mem = (struct bna_rss_mem *)0;
-
- /* Configure RSS if required */
- if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
- /* configure RSS Table */
- writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
- bna->port_num, RSS_TABLE_BASE_OFFSET),
- bna->regs.page_addr);
-
- /* temporarily disable RSS, while hash value is written */
- off = (unsigned long)&rss_mem[0].type_n_hash;
- writel(0, base_addr + off);
-
- for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
- off = (unsigned long)
- &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
- writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
- base_addr + off);
- }
-
- off = (unsigned long)&rss_mem[0].type_n_hash;
- writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
- base_addr + off);
- }
-
- /* Configure RxF */
- writel(BNA_GET_PAGE_NUM(
- LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
- RX_FNDB_RAM_BASE_OFFSET),
- bna->regs.page_addr);
-
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- RX_FNDB_RAM_BASE_OFFSET);
-
- rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
-
- /* We always use RSS table 0 */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
- writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
- base_addr + off);
-
- /* small large buffer enable/disable */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
- writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
- base_addr + off);
-
- /* RIT offset, HDS forced offset, multicast RxQ Id */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
- writel((rxf->rit_segment->rit_offset << 16) |
- (rxf->forced_offset << 8) |
- (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
- base_addr + off);
-
- /*
- * default vlan tag, default function enable, strip vlan bytes,
- * HDS type, header size
- */
-
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
- writel(((u32)rxf->default_vlan_tag << 16) |
- (rxf->ctrl_flags &
- (BNA_RXF_CF_DEFAULT_VLAN |
- BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
- BNA_RXF_CF_VLAN_STRIP)) |
- (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
- rxf->hds_cfg.header_size,
- base_addr + off);
-}
-
-void
-__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
-{
- struct bna *bna = rxf->rx->bna;
- int i;
-
- writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
- (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
- bna->regs.page_addr);
-
- if (status == BNA_STATUS_T_ENABLED) {
- /* enable VLAN filtering on this function */
- for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
- writel(rxf->vlan_filter_table[i],
- BNA_GET_VLAN_MEM_ENTRY_ADDR
- (bna->pcidev.pci_bar_kva, rxf->rxf_id,
- i * 32));
- }
- } else {
- /* disable VLAN filtering on this function */
- for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
- writel(0xffffffff,
- BNA_GET_VLAN_MEM_ENTRY_ADDR
- (bna->pcidev.pci_bar_kva, rxf->rxf_id,
- i * 32));
- }
- }
-}
-
-static void
-__rxf_rit_set(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
- struct bna_rit_mem *rit_mem;
- int i;
- void __iomem *base_addr;
- unsigned long off;
-
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- FUNCTION_TO_RXQ_TRANSLATE);
-
- rit_mem = (struct bna_rit_mem *)0;
-
- writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
- FUNCTION_TO_RXQ_TRANSLATE),
- bna->regs.page_addr);
-
- for (i = 0; i < rxf->rit_segment->rit_size; i++) {
- off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
- writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
- rxf->rit_segment->rit[i].small_rxq_id,
- base_addr + off);
- }
-}
-
-static void
-__bna_rxf_stat_clr(struct bna_rxf *rxf)
-{
- struct bfi_ll_stats_req ll_req;
- u32 bm[2] = {0, 0};
-
- if (rxf->rxf_id < 32)
- bm[0] = 1 << rxf->rxf_id;
- else
- bm[1] = 1 << (rxf->rxf_id - 32);
-
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
- ll_req.stats_mask = 0;
- ll_req.txf_id_mask[0] = 0;
- ll_req.txf_id_mask[1] = 0;
-
- ll_req.rxf_id_mask[0] = htonl(bm[0]);
- ll_req.rxf_id_mask[1] = htonl(bm[1]);
-
- bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
- bna_rxf_cb_stats_cleared, rxf);
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
-}
-
-static void
-rxf_enable(struct bna_rxf *rxf)
-{
- if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
- bfa_fsm_send_event(rxf, RXF_E_STARTED);
- else {
- rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
- __rxf_enable(rxf);
- }
-}
-
-static void
-rxf_cb_enabled(void *arg, int status)
-{
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
-
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_STARTED);
-}
-
-static void
-rxf_disable(struct bna_rxf *rxf)
-{
- if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
- bfa_fsm_send_event(rxf, RXF_E_STOPPED);
- else
- rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
- __rxf_disable(rxf);
-}
-
-static void
-rxf_cb_disabled(void *arg, int status)
-{
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
-
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_STOPPED);
-}
-
-void
-rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
-{
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
-
- bfa_q_qe_init(&rxf->mbox_qe.qe);
-
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
-}
-
-static void
-bna_rxf_cb_stats_cleared(void *arg, int status)
-{
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
-
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
-}
-
-void
-rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
- const struct bna_mac *mac_addr)
-{
- struct bfi_ll_mac_addr_req req;
-
- bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
-
- req.rxf_id = rxf->rxf_id;
- memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
-
- bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
- rxf_cb_cam_fltr_mbox_cmd, rxf);
-
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
-}
-
-static int
-rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
-{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
-
- /* Add multicast entries */
- if (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
- return 1;
- }
-
- /* Delete multicast entries previousely added */
- if (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- return 1;
- }
-
- return 0;
-}
-
-static int
-rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
-{
- /* Apply the VLAN filter */
- if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
- rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
- if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- }
-
- /* Apply RSS configuration */
- if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
- rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
- if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
- /* RSS is being disabled */
- rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
- __rxf_rit_set(rxf);
- __rxf_config_set(rxf);
- } else {
- /* RSS is being enabled or reconfigured */
- rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
- __rxf_rit_set(rxf);
- __rxf_config_set(rxf);
- }
- }
-
- return 0;
-}
-
-/**
- * Processes pending ucast, mcast entry addition/deletion and issues mailbox
- * command. Also processes pending filter configuration - promiscuous mode,
- * default mode, allmutli mode and issues mailbox command or directly applies
- * to h/w
- */
-static int
-rxf_process_packet_filter(struct bna_rxf *rxf)
-{
- /* Set the default MAC first */
- if (rxf->ucast_pending_set > 0) {
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
- rxf->ucast_active_mac);
- rxf->ucast_pending_set--;
- return 1;
- }
-
- if (rxf_process_packet_filter_ucast(rxf))
- return 1;
-
- if (rxf_process_packet_filter_mcast(rxf))
- return 1;
-
- if (rxf_process_packet_filter_promisc(rxf))
- return 1;
-
- if (rxf_process_packet_filter_allmulti(rxf))
- return 1;
-
- if (rxf_process_packet_filter_vlan(rxf))
- return 1;
-
- return 0;
-}
-
-static int
-rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
-{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
-
- /* 3. delete pending mcast entries */
- if (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- return 1;
- }
-
- /* 4. clear active mcast entries; move them to pending_add_q */
- if (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * In the rxf stop path, processes pending ucast/mcast delete queue and issues
- * the mailbox command. Moves the active ucast/mcast entries to pending add q,
- * so that they are added to CAM again in the rxf start path. Moves the current
- * filter settings - promiscuous, default, allmutli - to pending filter
- * configuration
- */
-static int
-rxf_clear_packet_filter(struct bna_rxf *rxf)
-{
- if (rxf_clear_packet_filter_ucast(rxf))
- return 1;
-
- if (rxf_clear_packet_filter_mcast(rxf))
- return 1;
-
- /* 5. clear active default MAC in the CAM */
- if (rxf->ucast_pending_set > 0)
- rxf->ucast_pending_set = 0;
-
- if (rxf_clear_packet_filter_promisc(rxf))
- return 1;
-
- if (rxf_clear_packet_filter_allmulti(rxf))
- return 1;
-
- return 0;
-}
-
-static void
-rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
-{
- struct list_head *qe;
- struct bna_mac *mac;
-
- /* 3. Move active mcast entries to pending_add_q */
- while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->mcast_pending_add_q);
- }
-
- /* 4. Throw away delete pending mcast entries */
- while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- }
-}
-
-/**
- * In the rxf fail path, throws away the ucast/mcast entries pending for
- * deletion, moves all active ucast/mcast entries to pending queue so that
- * they are added back to CAM in the rxf start path. Also moves the current
- * filter configuration to pending filter configuration.
- */
-static void
-rxf_reset_packet_filter(struct bna_rxf *rxf)
-{
- rxf_reset_packet_filter_ucast(rxf);
-
- rxf_reset_packet_filter_mcast(rxf);
-
- /* 5. Turn off ucast set flag */
- rxf->ucast_pending_set = 0;
-
- rxf_reset_packet_filter_promisc(rxf);
-
- rxf_reset_packet_filter_allmulti(rxf);
-}
-
-static void
-bna_rxf_init(struct bna_rxf *rxf,
- struct bna_rx *rx,
- struct bna_rx_config *q_config)
-{
- struct list_head *qe;
- struct bna_rxp *rxp;
-
- /* rxf_id is initialized during rx_mod init */
- rxf->rx = rx;
-
- INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
- INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
- rxf->ucast_pending_set = 0;
- INIT_LIST_HEAD(&rxf->ucast_active_q);
- rxf->ucast_active_mac = NULL;
-
- INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
- INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
- INIT_LIST_HEAD(&rxf->mcast_active_q);
-
- bfa_q_qe_init(&rxf->mbox_qe.qe);
-
- if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
- rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
-
- rxf->rxf_oper_state = (q_config->paused) ?
- BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
-
- bna_rxf_adv_init(rxf, rx, q_config);
-
- rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
- q_config->num_paths);
-
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
- if (q_config->rxp_type == BNA_RXP_SINGLE)
- rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
- else
- rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
- break;
- }
-
- rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
- memset(rxf->vlan_filter_table, 0,
- (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
-
- /* Set up VLAN 0 for pure priority tagged packets */
- rxf->vlan_filter_table[0] |= 1;
-
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
-}
-
-static void
-bna_rxf_uninit(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
- struct bna_mac *mac;
-
- bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
- rxf->rit_segment = NULL;
-
- rxf->ucast_pending_set = 0;
-
- while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
- }
-
- if (rxf->ucast_active_mac) {
- bfa_q_qe_init(&rxf->ucast_active_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_active_mac);
- rxf->ucast_active_mac = NULL;
- }
-
- while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- }
-
- /* Turn off pending promisc mode */
- if (is_promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* system promisc state should be pending */
- BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- bna->rxf_promisc_id = BFI_MAX_RXF;
- }
- /* Promisc mode should not be active */
- BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
-
- /* Turn off pending all-multi mode */
- if (is_allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- }
- /* Allmulti mode should not be active */
- BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
-
- rxf->rx = NULL;
-}
-
-static void
-bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
-{
- bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
- if (rx->rxf.rxf_id < 32)
- rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
- else
- rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
- 1 << (rx->rxf.rxf_id - 32));
-}
-
-static void
-bna_rxf_start(struct bna_rxf *rxf)
-{
- rxf->start_cbfn = bna_rx_cb_rxf_started;
- rxf->start_cbarg = rxf->rx;
- rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
- bfa_fsm_send_event(rxf, RXF_E_START);
-}
-
-static void
-bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
-{
- bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
- if (rx->rxf.rxf_id < 32)
- rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
- else
- rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
- 1 << (rx->rxf.rxf_id - 32);
-}
-
-static void
-bna_rxf_stop(struct bna_rxf *rxf)
-{
- rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
- rxf->stop_cbarg = rxf->rx;
- bfa_fsm_send_event(rxf, RXF_E_STOP);
-}
-
-static void
-bna_rxf_fail(struct bna_rxf *rxf)
-{
- rxf->rxf_flags |= BNA_RXF_FL_FAILED;
- bfa_fsm_send_event(rxf, RXF_E_FAIL);
-}
-
-int
-bna_rxf_state_get(struct bna_rxf *rxf)
-{
- return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
-}
-
-enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
-{
- struct bna_rxf *rxf = &rx->rxf;
-
- if (rxf->ucast_active_mac == NULL) {
- rxf->ucast_active_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
- if (rxf->ucast_active_mac == NULL)
- return BNA_CB_UCAST_CAM_FULL;
- bfa_q_qe_init(&rxf->ucast_active_mac->qe);
- }
-
- memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
- rxf->ucast_pending_set++;
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
-
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-
- return BNA_CB_SUCCESS;
-}
-
-enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
-{
- struct bna_rxf *rxf = &rx->rxf;
- struct list_head *qe;
- struct bna_mac *mac;
-
- /* Check if already added */
- list_for_each(qe, &rxf->mcast_active_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
- return BNA_CB_SUCCESS;
- }
- }
-
- /* Check if pending addition */
- list_for_each(qe, &rxf->mcast_pending_add_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
- return BNA_CB_SUCCESS;
- }
- }
-
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
- if (mac == NULL)
- return BNA_CB_MCAST_LIST_FULL;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, addr, ETH_ALEN);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
-
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
-
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-
- return BNA_CB_SUCCESS;
-}
-
-enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
-{
- struct bna_rxf *rxf = &rx->rxf;
- struct list_head list_head;
- struct list_head *qe;
- u8 *mcaddr;
- struct bna_mac *mac;
- struct bna_mac *mac1;
- int skip;
- int delete;
- int need_hw_config = 0;
- int i;
-
- /* Allocate nodes */
- INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
- if (mac == NULL)
- goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
- list_add_tail(&mac->qe, &list_head);
-
- mcaddr += ETH_ALEN;
- }
-
- /* Schedule for addition */
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
- skip = 0;
-
- /* Skip if already added */
- list_for_each(qe, &rxf->mcast_active_q) {
- mac1 = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
- mac);
- skip = 1;
- break;
- }
- }
-
- if (skip)
- continue;
-
- /* Skip if pending addition */
- list_for_each(qe, &rxf->mcast_pending_add_q) {
- mac1 = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
- mac);
- skip = 1;
- break;
- }
- }
-
- if (skip)
- continue;
-
- need_hw_config = 1;
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- }
-
- /**
- * Delete the entries that are in the pending_add_q but not
- * in the new list
- */
- while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
- if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
- delete = 0;
- break;
- }
- mcaddr += ETH_ALEN;
- }
- if (delete)
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- else
- list_add_tail(&mac->qe, &list_head);
- }
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- }
-
- /**
- * Schedule entries for deletion that are in the active_q but not
- * in the new list
- */
- while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
- if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
- delete = 0;
- break;
- }
- mcaddr += ETH_ALEN;
- }
- if (delete) {
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
- need_hw_config = 1;
- } else {
- list_add_tail(&mac->qe, &list_head);
- }
- }
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
- }
-
- if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- } else if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-
- return BNA_CB_SUCCESS;
-
-err_return:
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- }
-
- return BNA_CB_MCAST_LIST_FULL;
-}
-
-void
-bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
-{
- struct bna_rxf *rxf = &rx->rxf;
- int index = (vlan_id >> 5);
- int bit = (1 << (vlan_id & 0x1F));
-
- rxf->vlan_filter_table[index] |= bit;
- if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- }
-}
-
-void
-bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
-{
- struct bna_rxf *rxf = &rx->rxf;
- int index = (vlan_id >> 5);
- int bit = (1 << (vlan_id & 0x1F));
-
- rxf->vlan_filter_table[index] &= ~bit;
- if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- }
-}
-
-/**
- * RX
- */
-#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
- struct bna_doorbell_qset *_qset; \
- unsigned long off; \
- (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
- (q)->rcb->q_depth = (qdepth); \
- (q)->rcb->unmap_q = unmapq_mem; \
- (q)->rcb->rxq = (q); \
- (q)->rcb->cq = &(rxp)->cq; \
- (q)->rcb->bnad = (bna)->bnad; \
- _qset = (struct bna_doorbell_qset *)0; \
- off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
- (q)->rcb->q_dbell = off + \
- BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
- (q)->rcb->id = _id; \
-} while (0)
-
-#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
- (qcfg)->num_paths : ((qcfg)->num_paths * 2))
-
-#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
- (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
-
-#define call_rx_stop_callback(rx, status) \
- if ((rx)->stop_cbfn) { \
- (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
- (rx)->stop_cbfn = NULL; \
- (rx)->stop_cbarg = NULL; \
- }
-
-/*
- * Since rx_enable is synchronous callback, there is no start_cbfn required.
- * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
- * for each rxpath.
- */
-
-#define call_rx_disable_cbfn(rx, status) \
- if ((rx)->disable_cbfn) { \
- (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
- status); \
- (rx)->disable_cbfn = NULL; \
- (rx)->disable_cbarg = NULL; \
- } \
-
-#define rxqs_reqd(type, num_rxqs) \
- (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
-
-#define rx_ib_fail(rx) \
-do { \
- struct bna_rxp *rxp; \
- struct list_head *qe; \
- list_for_each(qe, &(rx)->rxp_q) { \
- rxp = (struct bna_rxp *)qe; \
- bna_ib_fail(rxp->cq.ib); \
- } \
-} while (0)
-
-static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
-static void __bna_rxq_start(struct bna_rxq *rxq);
-static void __bna_cq_start(struct bna_cq *cq);
-static void bna_rit_create(struct bna_rx *rx);
-static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
-static void bna_rx_cb_rxq_stopped_all(void *arg);
-
-bfa_fsm_state_decl(bna_rx, stopped,
- struct bna_rx, enum bna_rx_event);
-bfa_fsm_state_decl(bna_rx, rxf_start_wait,
- struct bna_rx, enum bna_rx_event);
-bfa_fsm_state_decl(bna_rx, started,
- struct bna_rx, enum bna_rx_event);
-bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
- struct bna_rx, enum bna_rx_event);
-bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
- struct bna_rx, enum bna_rx_event);
-
-static const struct bfa_sm_table rx_sm_table[] = {
- {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
- {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
- {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
- {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
- {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
-};
-
-static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
-{
- struct bna_rxp *rxp;
- struct list_head *qe_rxp;
-
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
- }
-
- call_rx_stop_callback(rx, BNA_CB_SUCCESS);
-}
-
-static void bna_rx_sm_stopped(struct bna_rx *rx,
- enum bna_rx_event event)
-{
- switch (event) {
- case RX_E_START:
- bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
- break;
- case RX_E_STOP:
- call_rx_stop_callback(rx, BNA_CB_SUCCESS);
- break;
- case RX_E_FAIL:
- /* no-op */
- break;
- default:
- bfa_sm_fault(event);
- break;
- }
-
-}
-
-static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
-{
- struct bna_rxp *rxp;
- struct list_head *qe_rxp;
- struct bna_rxq *q0 = NULL, *q1 = NULL;
-
- /* Setup the RIT */
- bna_rit_create(rx);
-
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- bna_ib_start(rxp->cq.ib);
- GET_RXQS(rxp, q0, q1);
- q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
- __bna_rxq_start(q0);
- rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
- if (q1) {
- __bna_rxq_start(q1);
- rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
- }
- __bna_cq_start(&rxp->cq);
- }
-
- bna_rxf_start(&rx->rxf);
-}
-
-static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
- enum bna_rx_event event)
-{
- switch (event) {
- case RX_E_STOP:
- bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
- break;
- case RX_E_FAIL:
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_ib_fail(rx);
- bna_rxf_fail(&rx->rxf);
- break;
- case RX_E_RXF_STARTED:
- bfa_fsm_set_state(rx, bna_rx_sm_started);
- break;
- default:
- bfa_sm_fault(event);
- break;
- }
-}
-
-void
-bna_rx_sm_started_entry(struct bna_rx *rx)
-{
- struct bna_rxp *rxp;
- struct list_head *qe_rxp;
-
- /* Start IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- bna_ib_ack(&rxp->cq.ib->door_bell, 0);
- }
-
- bna_llport_rx_started(&rx->bna->port.llport);
-}
-
-void
-bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
-{
- switch (event) {
- case RX_E_FAIL:
- bna_llport_rx_stopped(&rx->bna->port.llport);
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_ib_fail(rx);
- bna_rxf_fail(&rx->rxf);
- break;
- case RX_E_STOP:
- bna_llport_rx_stopped(&rx->bna->port.llport);
- bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
- break;
- default:
- bfa_sm_fault(event);
- break;
- }
-}
-
-void
-bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
-{
- bna_rxf_stop(&rx->rxf);
-}
-
-void
-bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
-{
- switch (event) {
- case RX_E_RXF_STOPPED:
- bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
- break;
- case RX_E_RXF_STARTED:
- /**
- * RxF was in the process of starting up when
- * RXF_E_STOP was issued. Ignore this event
- */
- break;
- case RX_E_FAIL:
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_ib_fail(rx);
- bna_rxf_fail(&rx->rxf);
- break;
- default:
- bfa_sm_fault(event);
- break;
- }
-
-}
-
-void
-bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
-{
- struct bna_rxp *rxp = NULL;
- struct bna_rxq *q0 = NULL;
- struct bna_rxq *q1 = NULL;
- struct list_head *qe;
- u32 rxq_mask[2] = {0, 0};
-
- /* Only one call to multi-rxq-stop for all RXPs in this RX */
- bfa_wc_up(&rx->rxq_stop_wc);
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
- GET_RXQS(rxp, q0, q1);
- if (q0->rxq_id < 32)
- rxq_mask[0] |= ((u32)1 << q0->rxq_id);
- else
- rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
- if (q1) {
- if (q1->rxq_id < 32)
- rxq_mask[0] |= ((u32)1 << q1->rxq_id);
- else
- rxq_mask[1] |= ((u32)
- 1 << (q1->rxq_id - 32));
- }
- }
-
- __bna_multi_rxq_stop(rxp, rxq_mask);
-}
-
-void
-bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
-{
- struct bna_rxp *rxp = NULL;
- struct list_head *qe;
-
- switch (event) {
- case RX_E_RXQ_STOPPED:
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
- bna_ib_stop(rxp->cq.ib);
- }
- /* Fall through */
- case RX_E_FAIL:
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- break;
- default:
- bfa_sm_fault(event);
- break;
- }
-}
-
-void
-__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
-{
- struct bfi_ll_q_stop_req ll_req;
-
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
- ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
- ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
- bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
- bna_rx_cb_multi_rxq_stopped, rxp);
- bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
-}
-
-void
-__bna_rxq_start(struct bna_rxq *rxq)
-{
- struct bna_rxtx_q_mem *q_mem;
- struct bna_rxq_mem rxq_cfg, *rxq_mem;
- struct bna_dma_addr cur_q_addr;
- /* struct bna_doorbell_qset *qset; */
- struct bna_qpt *qpt;
- u32 pg_num;
- struct bna *bna = rxq->rx->bna;
- void __iomem *base_addr;
- unsigned long off;
-
- qpt = &rxq->qpt;
- cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
-
- rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
- rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
- rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
- rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
-
- rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
- rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
- (qpt->page_size >> 2);
- rxq_cfg.sg_n_cq_n_cns_ptr =
- ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
- rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
- BNA_Q_IDLE_STATE;
- rxq_cfg.next_qid = 0x0 | (0x3 << 8);
-
- /* Write the page number register */
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
- HQM_RXTX_Q_RAM_BASE_OFFSET);
- writel(pg_num, bna->regs.page_addr);
-
- /* Write to h/w */
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- HQM_RXTX_Q_RAM_BASE_OFFSET);
-
- q_mem = (struct bna_rxtx_q_mem *)0;
- rxq_mem = &q_mem[rxq->rxq_id].rxq;
-
- off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
- writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
-
- off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
- writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
-
- off = (unsigned long)&rxq_mem->cur_q_entry_lo;
- writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
-
- off = (unsigned long)&rxq_mem->cur_q_entry_hi;
- writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
-
- off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
- writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
-
- off = (unsigned long)&rxq_mem->entry_n_pg_size;
- writel(rxq_cfg.entry_n_pg_size, base_addr + off);
-
- off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
- writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
-
- off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
- writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
-
- off = (unsigned long)&rxq_mem->next_qid;
- writel(rxq_cfg.next_qid, base_addr + off);
-
- rxq->rcb->producer_index = 0;
- rxq->rcb->consumer_index = 0;
-}
-
-void
-__bna_cq_start(struct bna_cq *cq)
-{
- struct bna_cq_mem cq_cfg, *cq_mem;
- const struct bna_qpt *qpt;
- struct bna_dma_addr cur_q_addr;
- u32 pg_num;
- struct bna *bna = cq->rx->bna;
- void __iomem *base_addr;
- unsigned long off;
-
- qpt = &cq->qpt;
- cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
-
- /*
- * Fill out structure, to be subsequently written
- * to hardware
- */
- cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
- cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
- cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
- cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
-
- cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
- cq_cfg.entry_n_pg_size =
- ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
- cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
- ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
- cq_cfg.q_state = BNA_Q_IDLE_STATE;
-
- /* Write the page number register */
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
- HQM_CQ_RAM_BASE_OFFSET);
-
- writel(pg_num, bna->regs.page_addr);
-
- /* H/W write */
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- HQM_CQ_RAM_BASE_OFFSET);
-
- cq_mem = (struct bna_cq_mem *)0;
-
- off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
- writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
- writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
- writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
- writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
- writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
- writel(cq_cfg.entry_n_pg_size, base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
- writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
-
- off = (unsigned long)&cq_mem[cq->cq_id].q_state;
- writel(cq_cfg.q_state, base_addr + off);
-
- cq->ccb->producer_index = 0;
- *(cq->ccb->hw_producer_index) = 0;
-}
-
-void
-bna_rit_create(struct bna_rx *rx)
-{
- struct list_head *qe_rxp;
- struct bna_rxp *rxp;
- struct bna_rxq *q0 = NULL;
- struct bna_rxq *q1 = NULL;
- int offset;
-
- offset = 0;
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- GET_RXQS(rxp, q0, q1);
- rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
- rx->rxf.rit_segment->rit[offset].small_rxq_id =
- (q1 ? q1->rxq_id : 0);
- offset++;
- }
-}
-
-static int
-_rx_can_satisfy(struct bna_rx_mod *rx_mod,
- struct bna_rx_config *rx_cfg)
-{
- if ((rx_mod->rx_free_count == 0) ||
- (rx_mod->rxp_free_count == 0) ||
- (rx_mod->rxq_free_count == 0))
- return 0;
-
- if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
- if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
- (rx_mod->rxq_free_count < rx_cfg->num_paths))
- return 0;
- } else {
- if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
- (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
- return 0;
- }
-
- if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
- return 0;
-
- return 1;
-}
-
-static struct bna_rxq *
-_get_free_rxq(struct bna_rx_mod *rx_mod)
-{
- struct bna_rxq *rxq = NULL;
- struct list_head *qe = NULL;
-
- bfa_q_deq(&rx_mod->rxq_free_q, &qe);
- if (qe) {
- rx_mod->rxq_free_count--;
- rxq = (struct bna_rxq *)qe;
- }
- return rxq;
-}
-
-static void
-_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
-{
- bfa_q_qe_init(&rxq->qe);
- list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
- rx_mod->rxq_free_count++;
-}
-
-static struct bna_rxp *
-_get_free_rxp(struct bna_rx_mod *rx_mod)
-{
- struct list_head *qe = NULL;
- struct bna_rxp *rxp = NULL;
-
- bfa_q_deq(&rx_mod->rxp_free_q, &qe);
- if (qe) {
- rx_mod->rxp_free_count--;
-
- rxp = (struct bna_rxp *)qe;
- }
-
- return rxp;
-}
-
-static void
-_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
-{
- bfa_q_qe_init(&rxp->qe);
- list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
- rx_mod->rxp_free_count++;
-}
-
-static struct bna_rx *
-_get_free_rx(struct bna_rx_mod *rx_mod)
-{
- struct list_head *qe = NULL;
- struct bna_rx *rx = NULL;
-
- bfa_q_deq(&rx_mod->rx_free_q, &qe);
- if (qe) {
- rx_mod->rx_free_count--;
-
- rx = (struct bna_rx *)qe;
- bfa_q_qe_init(qe);
- list_add_tail(&rx->qe, &rx_mod->rx_active_q);
- }
-
- return rx;
-}
-
-static void
-_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
-{
- bfa_q_qe_init(&rx->qe);
- list_add_tail(&rx->qe, &rx_mod->rx_free_q);
- rx_mod->rx_free_count++;
-}
-
-static void
-_rx_init(struct bna_rx *rx, struct bna *bna)
-{
- rx->bna = bna;
- rx->rx_flags = 0;
-
- INIT_LIST_HEAD(&rx->rxp_q);
-
- rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
- rx->rxq_stop_wc.wc_cbarg = rx;
- rx->rxq_stop_wc.wc_count = 0;
-
- rx->stop_cbfn = NULL;
- rx->stop_cbarg = NULL;
-}
-
-static void
-_rxp_add_rxqs(struct bna_rxp *rxp,
- struct bna_rxq *q0,
- struct bna_rxq *q1)
-{
- switch (rxp->type) {
- case BNA_RXP_SINGLE:
- rxp->rxq.single.only = q0;
- rxp->rxq.single.reserved = NULL;
- break;
- case BNA_RXP_SLR:
- rxp->rxq.slr.large = q0;
- rxp->rxq.slr.small = q1;
- break;
- case BNA_RXP_HDS:
- rxp->rxq.hds.data = q0;
- rxp->rxq.hds.hdr = q1;
- break;
- default:
- break;
- }
-}
-
-static void
-_rxq_qpt_init(struct bna_rxq *rxq,
- struct bna_rxp *rxp,
- u32 page_count,
- u32 page_size,
- struct bna_mem_descr *qpt_mem,
- struct bna_mem_descr *swqpt_mem,
- struct bna_mem_descr *page_mem)
-{
- int i;
-
- rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
- rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
- rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
- rxq->qpt.page_count = page_count;
- rxq->qpt.page_size = page_size;
-
- rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
-
- for (i = 0; i < rxq->qpt.page_count; i++) {
- rxq->rcb->sw_qpt[i] = page_mem[i].kva;
- ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
- page_mem[i].dma.lsb;
- ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
- page_mem[i].dma.msb;
-
- }
-}
-
-static void
-_rxp_cqpt_setup(struct bna_rxp *rxp,
- u32 page_count,
- u32 page_size,
- struct bna_mem_descr *qpt_mem,
- struct bna_mem_descr *swqpt_mem,
- struct bna_mem_descr *page_mem)
-{
- int i;
-
- rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
- rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
- rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
- rxp->cq.qpt.page_count = page_count;
- rxp->cq.qpt.page_size = page_size;
-
- rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
-
- for (i = 0; i < rxp->cq.qpt.page_count; i++) {
- rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
-
- ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
- page_mem[i].dma.lsb;
- ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
- page_mem[i].dma.msb;
-
- }
-}
-
-static void
-_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
-{
- list_add_tail(&rxp->qe, &rx->rxp_q);
-}
-
-static void
-_init_rxmod_queues(struct bna_rx_mod *rx_mod)
-{
- INIT_LIST_HEAD(&rx_mod->rx_free_q);
- INIT_LIST_HEAD(&rx_mod->rxq_free_q);
- INIT_LIST_HEAD(&rx_mod->rxp_free_q);
- INIT_LIST_HEAD(&rx_mod->rx_active_q);
-
- rx_mod->rx_free_count = 0;
- rx_mod->rxq_free_count = 0;
- rx_mod->rxp_free_count = 0;
-}
-
-static void
-_rx_ctor(struct bna_rx *rx, int id)
-{
- bfa_q_qe_init(&rx->qe);
- INIT_LIST_HEAD(&rx->rxp_q);
- rx->bna = NULL;
-
- rx->rxf.rxf_id = id;
-
- /* FIXME: mbox_qe ctor()?? */
- bfa_q_qe_init(&rx->mbox_qe.qe);
-
- rx->stop_cbfn = NULL;
- rx->stop_cbarg = NULL;
-}
-
-void
-bna_rx_cb_multi_rxq_stopped(void *arg, int status)
-{
- struct bna_rxp *rxp = (struct bna_rxp *)arg;
-
- bfa_wc_down(&rxp->rx->rxq_stop_wc);
-}
-
-void
-bna_rx_cb_rxq_stopped_all(void *arg)
-{
- struct bna_rx *rx = (struct bna_rx *)arg;
-
- bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
-}
-
-static void
-bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
- enum bna_cb_status status)
-{
- struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
-
- bfa_wc_down(&rx_mod->rx_stop_wc);
-}
-
-static void
-bna_rx_mod_cb_rx_stopped_all(void *arg)
-{
- struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
-
- if (rx_mod->stop_cbfn)
- rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
- rx_mod->stop_cbfn = NULL;
-}
-
-static void
-bna_rx_start(struct bna_rx *rx)
-{
- rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
- if (rx->rx_flags & BNA_RX_F_ENABLE)
- bfa_fsm_send_event(rx, RX_E_START);
-}
-
-static void
-bna_rx_stop(struct bna_rx *rx)
-{
- rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
- if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
- bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
- else {
- rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
- rx->stop_cbarg = &rx->bna->rx_mod;
- bfa_fsm_send_event(rx, RX_E_STOP);
- }
-}
-
-static void
-bna_rx_fail(struct bna_rx *rx)
-{
- /* Indicate port is not enabled, and failed */
- rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
- rx->rx_flags |= BNA_RX_F_PORT_FAILED;
- bfa_fsm_send_event(rx, RX_E_FAIL);
-}
-
-void
-bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
-{
- struct bna_rx *rx;
- struct list_head *qe;
-
- rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
- if (type == BNA_RX_T_LOOPBACK)
- rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
-
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
- if (rx->type == type)
- bna_rx_start(rx);
- }
-}
-
-void
-bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
-{
- struct bna_rx *rx;
- struct list_head *qe;
-
- rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
- rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
-
- rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
-
- /**
- * Before calling bna_rx_stop(), increment rx_stop_wc as many times
- * as we are going to call bna_rx_stop
- */
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
- if (rx->type == type)
- bfa_wc_up(&rx_mod->rx_stop_wc);
- }
-
- if (rx_mod->rx_stop_wc.wc_count == 0) {
- rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
- rx_mod->stop_cbfn = NULL;
- return;
- }
-
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
- if (rx->type == type)
- bna_rx_stop(rx);
- }
-}
-
-void
-bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
-{
- struct bna_rx *rx;
- struct list_head *qe;
-
- rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
- rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
-
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
- bna_rx_fail(rx);
- }
-}
-
-void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
- struct bna_res_info *res_info)
-{
- int index;
- struct bna_rx *rx_ptr;
- struct bna_rxp *rxp_ptr;
- struct bna_rxq *rxq_ptr;
-
- rx_mod->bna = bna;
- rx_mod->flags = 0;
-
- rx_mod->rx = (struct bna_rx *)
- res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
- rx_mod->rxp = (struct bna_rxp *)
- res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
- rx_mod->rxq = (struct bna_rxq *)
- res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
-
- /* Initialize the queues */
- _init_rxmod_queues(rx_mod);
-
- /* Build RX queues */
- for (index = 0; index < BFI_MAX_RXQ; index++) {
- rx_ptr = &rx_mod->rx[index];
- _rx_ctor(rx_ptr, index);
- list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
- rx_mod->rx_free_count++;
- }
-
- /* build RX-path queue */
- for (index = 0; index < BFI_MAX_RXQ; index++) {
- rxp_ptr = &rx_mod->rxp[index];
- rxp_ptr->cq.cq_id = index;
- bfa_q_qe_init(&rxp_ptr->qe);
- list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
- rx_mod->rxp_free_count++;
- }
-
- /* build RXQ queue */
- for (index = 0; index < BFI_MAX_RXQ; index++) {
- rxq_ptr = &rx_mod->rxq[index];
- rxq_ptr->rxq_id = index;
-
- bfa_q_qe_init(&rxq_ptr->qe);
- list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
- rx_mod->rxq_free_count++;
- }
-
- rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
- rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
- rx_mod->rx_stop_wc.wc_count = 0;
-}
-
-void
-bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
-{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &rx_mod->rx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxp_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxq_free_q)
- i++;
-
- rx_mod->bna = NULL;
-}
-
-int
-bna_rx_state_get(struct bna_rx *rx)
-{
- return bfa_sm_to_state(rx_sm_table, rx->fsm);
-}
-
-void
-bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
-{
- u32 cq_size, hq_size, dq_size;
- u32 cpage_count, hpage_count, dpage_count;
- struct bna_mem_info *mem_info;
- u32 cq_depth;
- u32 hq_depth;
- u32 dq_depth;
-
- dq_depth = q_cfg->q_depth;
- hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
- cq_depth = dq_depth + hq_depth;
-
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
- cq_size = cq_depth * BFI_CQ_WI_SIZE;
- cq_size = ALIGN(cq_size, PAGE_SIZE);
- cpage_count = SIZE_TO_PAGES(cq_size);
-
- BNA_TO_POWER_OF_2_HIGH(dq_depth);
- dq_size = dq_depth * BFI_RXQ_WI_SIZE;
- dq_size = ALIGN(dq_size, PAGE_SIZE);
- dpage_count = SIZE_TO_PAGES(dq_size);
-
- if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
- BNA_TO_POWER_OF_2_HIGH(hq_depth);
- hq_size = hq_depth * BFI_RXQ_WI_SIZE;
- hq_size = ALIGN(hq_size, PAGE_SIZE);
- hpage_count = SIZE_TO_PAGES(hq_size);
- } else {
- hpage_count = 0;
- }
-
- /* CCB structures */
- res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = sizeof(struct bna_ccb);
- mem_info->num = q_cfg->num_paths;
-
- /* RCB structures */
- res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = sizeof(struct bna_rcb);
- mem_info->num = BNA_GET_RXQS(q_cfg);
-
- /* Completion QPT */
- res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
- mem_info->num = q_cfg->num_paths;
-
- /* Completion s/w QPT */
- res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = cpage_count * sizeof(void *);
- mem_info->num = q_cfg->num_paths;
-
- /* Completion QPT pages */
- res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = PAGE_SIZE;
- mem_info->num = cpage_count * q_cfg->num_paths;
-
- /* Data QPTs */
- res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
- mem_info->num = q_cfg->num_paths;
-
- /* Data s/w QPTs */
- res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = dpage_count * sizeof(void *);
- mem_info->num = q_cfg->num_paths;
-
- /* Data QPT pages */
- res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = PAGE_SIZE;
- mem_info->num = dpage_count * q_cfg->num_paths;
-
- /* Hdr QPTs */
- res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
- mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
-
- /* Hdr s/w QPTs */
- res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = hpage_count * sizeof(void *);
- mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
-
- /* Hdr QPT pages */
- res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = (hpage_count ? PAGE_SIZE : 0);
- mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
-
- /* RX Interrupts */
- res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
- res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
- res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
-}
-
-struct bna_rx *
-bna_rx_create(struct bna *bna, struct bnad *bnad,
- struct bna_rx_config *rx_cfg,
- struct bna_rx_event_cbfn *rx_cbfn,
- struct bna_res_info *res_info,
- void *priv)
-{
- struct bna_rx_mod *rx_mod = &bna->rx_mod;
- struct bna_rx *rx;
- struct bna_rxp *rxp;
- struct bna_rxq *q0;
- struct bna_rxq *q1;
- struct bna_intr_info *intr_info;
- u32 page_count;
- struct bna_mem_descr *ccb_mem;
- struct bna_mem_descr *rcb_mem;
- struct bna_mem_descr *unmapq_mem;
- struct bna_mem_descr *cqpt_mem;
- struct bna_mem_descr *cswqpt_mem;
- struct bna_mem_descr *cpage_mem;
- struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
- struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
- struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
- struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
- struct bna_mem_descr *hpage_mem; /* hdr page mem */
- struct bna_mem_descr *dpage_mem; /* data page mem */
- int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
- int dpage_count, hpage_count, rcb_idx;
- struct bna_ib_config ibcfg;
- /* Fail if we don't have enough RXPs, RXQs */
- if (!_rx_can_satisfy(rx_mod, rx_cfg))
- return NULL;
-
- /* Initialize resource pointers */
- intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
- ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
- rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
- unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
- cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
- cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
- cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
- hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
- dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
- hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
- dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
- hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
- dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
-
- /* Compute q depth & page count */
- page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
- rx_cfg->num_paths;
-
- dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
- rx_cfg->num_paths;
-
- hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
- rx_cfg->num_paths;
- /* Get RX pointer */
- rx = _get_free_rx(rx_mod);
- _rx_init(rx, bna);
- rx->priv = priv;
- rx->type = rx_cfg->rx_type;
-
- rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
- rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
- rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
- rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
- /* Following callbacks are mandatory */
- rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
- rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
-
- if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
- switch (rx->type) {
- case BNA_RX_T_REGULAR:
- if (!(rx->bna->rx_mod.flags &
- BNA_RX_MOD_F_PORT_LOOPBACK))
- rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
- break;
- case BNA_RX_T_LOOPBACK:
- if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
- rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
- break;
- }
- }
-
- for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
- rxp = _get_free_rxp(rx_mod);
- rxp->type = rx_cfg->rxp_type;
- rxp->rx = rx;
- rxp->cq.rx = rx;
-
- /* Get required RXQs, and queue them to rx-path */
- q0 = _get_free_rxq(rx_mod);
- if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
- q1 = NULL;
- else
- q1 = _get_free_rxq(rx_mod);
-
- /* Initialize IB */
- if (1 == intr_info->num) {
- rxp->cq.ib = bna_ib_get(&bna->ib_mod,
- intr_info->intr_type,
- intr_info->idl[0].vector);
- rxp->vector = intr_info->idl[0].vector;
- } else {
- rxp->cq.ib = bna_ib_get(&bna->ib_mod,
- intr_info->intr_type,
- intr_info->idl[i].vector);
-
- /* Map the MSI-x vector used for this RXP */
- rxp->vector = intr_info->idl[i].vector;
- }
-
- rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
-
- ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
- ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
- ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
- ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
-
- bna_ib_config(rxp->cq.ib, &ibcfg);
-
- /* Link rxqs to rxp */
- _rxp_add_rxqs(rxp, q0, q1);
-
- /* Link rxp to rx */
- _rx_add_rxp(rx, rxp);
-
- q0->rx = rx;
- q0->rxp = rxp;
-
- /* Initialize RCB for the large / data q */
- q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
- (void *)unmapq_mem[rcb_idx].kva);
- rcb_idx++;
- (q0)->rx_packets = (q0)->rx_bytes = 0;
- (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
-
- /* Initialize RXQs */
- _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
- &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
- q0->rcb->page_idx = dpage_idx;
- q0->rcb->page_count = dpage_count;
- dpage_idx += dpage_count;
-
- /* Call bnad to complete rcb setup */
- if (rx->rcb_setup_cbfn)
- rx->rcb_setup_cbfn(bnad, q0->rcb);
-
- if (q1) {
- q1->rx = rx;
- q1->rxp = rxp;
-
- q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
- (void *)unmapq_mem[rcb_idx].kva);
- rcb_idx++;
- (q1)->buffer_size = (rx_cfg)->small_buff_size;
- (q1)->rx_packets = (q1)->rx_bytes = 0;
- (q1)->rx_packets_with_error =
- (q1)->rxbuf_alloc_failed = 0;
-
- _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
- &hqpt_mem[i], &hsqpt_mem[i],
- &hpage_mem[hpage_idx]);
- q1->rcb->page_idx = hpage_idx;
- q1->rcb->page_count = hpage_count;
- hpage_idx += hpage_count;
-
- /* Call bnad to complete rcb setup */
- if (rx->rcb_setup_cbfn)
- rx->rcb_setup_cbfn(bnad, q1->rcb);
- }
- /* Setup RXP::CQ */
- rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
- _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
- &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
- rxp->cq.ccb->page_idx = cpage_idx;
- rxp->cq.ccb->page_count = page_count;
- cpage_idx += page_count;
-
- rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
- rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
-
- rxp->cq.ccb->producer_index = 0;
- rxp->cq.ccb->q_depth = rx_cfg->q_depth +
- ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_cfg->q_depth);
- rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
- rxp->cq.ccb->rcb[0] = q0->rcb;
- if (q1)
- rxp->cq.ccb->rcb[1] = q1->rcb;
- rxp->cq.ccb->cq = &rxp->cq;
- rxp->cq.ccb->bnad = bna->bnad;
- rxp->cq.ccb->hw_producer_index =
- ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
- (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
- *(rxp->cq.ccb->hw_producer_index) = 0;
- rxp->cq.ccb->intr_type = intr_info->intr_type;
- rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
- intr_info->idl[0].vector :
- intr_info->idl[i].vector;
- rxp->cq.ccb->rx_coalescing_timeo =
- rxp->cq.ib->ib_config.coalescing_timeo;
- rxp->cq.ccb->id = i;
-
- /* Call bnad to complete CCB setup */
- if (rx->ccb_setup_cbfn)
- rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
-
- } /* for each rx-path */
-
- bna_rxf_init(&rx->rxf, rx, rx_cfg);
-
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
-
- return rx;
-}
-
-void
-bna_rx_destroy(struct bna_rx *rx)
-{
- struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
- struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
- struct bna_rxq *q0 = NULL;
- struct bna_rxq *q1 = NULL;
- struct bna_rxp *rxp;
- struct list_head *qe;
-
- bna_rxf_uninit(&rx->rxf);
-
- while (!list_empty(&rx->rxp_q)) {
- bfa_q_deq(&rx->rxp_q, &rxp);
- GET_RXQS(rxp, q0, q1);
- /* Callback to bnad for destroying RCB */
- if (rx->rcb_destroy_cbfn)
- rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
- q0->rcb = NULL;
- q0->rxp = NULL;
- q0->rx = NULL;
- _put_free_rxq(rx_mod, q0);
- if (q1) {
- /* Callback to bnad for destroying RCB */
- if (rx->rcb_destroy_cbfn)
- rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
- q1->rcb = NULL;
- q1->rxp = NULL;
- q1->rx = NULL;
- _put_free_rxq(rx_mod, q1);
- }
- rxp->rxq.slr.large = NULL;
- rxp->rxq.slr.small = NULL;
- if (rxp->cq.ib) {
- if (rxp->cq.ib_seg_offset != 0xff)
- bna_ib_release_idx(rxp->cq.ib,
- rxp->cq.ib_seg_offset);
- bna_ib_put(ib_mod, rxp->cq.ib);
- rxp->cq.ib = NULL;
- }
- /* Callback to bnad for destroying CCB */
- if (rx->ccb_destroy_cbfn)
- rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
- rxp->cq.ccb = NULL;
- rxp->rx = NULL;
- _put_free_rxp(rx_mod, rxp);
- }
-
- list_for_each(qe, &rx_mod->rx_active_q) {
- if (qe == &rx->qe) {
- list_del(&rx->qe);
- bfa_q_qe_init(&rx->qe);
- break;
- }
- }
-
- rx->bna = NULL;
- rx->priv = NULL;
- _put_free_rx(rx_mod, rx);
-}
-
-void
-bna_rx_enable(struct bna_rx *rx)
-{
- if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
- return;
-
- rx->rx_flags |= BNA_RX_F_ENABLE;
- if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
- bfa_fsm_send_event(rx, RX_E_START);
-}
-
-void
-bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
- void (*cbfn)(void *, struct bna_rx *,
- enum bna_cb_status))
-{
- if (type == BNA_SOFT_CLEANUP) {
- /* h/w should not be accessed. Treat we're stopped */
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
- } else {
- rx->stop_cbfn = cbfn;
- rx->stop_cbarg = rx->bna->bnad;
-
- rx->rx_flags &= ~BNA_RX_F_ENABLE;
-
- bfa_fsm_send_event(rx, RX_E_STOP);
- }
-}
-
-/**
- * TX
- */
-#define call_tx_stop_cbfn(tx, status)\
-do {\
- if ((tx)->stop_cbfn)\
- (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
- (tx)->stop_cbfn = NULL;\
- (tx)->stop_cbarg = NULL;\
-} while (0)
-
-#define call_tx_prio_change_cbfn(tx, status)\
-do {\
- if ((tx)->prio_change_cbfn)\
- (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
- (tx)->prio_change_cbfn = NULL;\
-} while (0)
-
-static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
- enum bna_cb_status status);
-static void bna_tx_cb_txq_stopped(void *arg, int status);
-static void bna_tx_cb_stats_cleared(void *arg, int status);
-static void __bna_tx_stop(struct bna_tx *tx);
-static void __bna_tx_start(struct bna_tx *tx);
-static void __bna_txf_stat_clr(struct bna_tx *tx);
-
-enum bna_tx_event {
- TX_E_START = 1,
- TX_E_STOP = 2,
- TX_E_FAIL = 3,
- TX_E_TXQ_STOPPED = 4,
- TX_E_PRIO_CHANGE = 5,
- TX_E_STAT_CLEARED = 6,
-};
-
-enum bna_tx_state {
- BNA_TX_STOPPED = 1,
- BNA_TX_STARTED = 2,
- BNA_TX_TXQ_STOP_WAIT = 3,
- BNA_TX_PRIO_STOP_WAIT = 4,
- BNA_TX_STAT_CLR_WAIT = 5,
-};
-
-bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
- enum bna_tx_event);
-bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
- enum bna_tx_event);
-bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
- enum bna_tx_event);
-bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
- enum bna_tx_event);
-bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
- enum bna_tx_event);
-
-static struct bfa_sm_table tx_sm_table[] = {
- {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
- {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
- {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
- {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
- {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
-};
-
-static void
-bna_tx_sm_stopped_entry(struct bna_tx *tx)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
- }
-
- call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
-}
-
-static void
-bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
-{
- switch (event) {
- case TX_E_START:
- bfa_fsm_set_state(tx, bna_tx_sm_started);
- break;
-
- case TX_E_STOP:
- bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- break;
-
- case TX_E_FAIL:
- /* No-op */
- break;
-
- case TX_E_PRIO_CHANGE:
- call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
- break;
-
- case TX_E_TXQ_STOPPED:
- /**
- * This event is received due to flushing of mbox when
- * device fails
- */
- /* No-op */
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_tx_sm_started_entry(struct bna_tx *tx)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- __bna_tx_start(tx);
-
- /* Start IB */
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bna_ib_ack(&txq->ib->door_bell, 0);
- }
-}
-
-static void
-bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- switch (event) {
- case TX_E_STOP:
- bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
- __bna_tx_stop(tx);
- break;
-
- case TX_E_FAIL:
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bna_ib_fail(txq->ib);
- (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
- }
- bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- break;
-
- case TX_E_PRIO_CHANGE:
- bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
-{
-}
-
-static void
-bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- switch (event) {
- case TX_E_FAIL:
- bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- break;
-
- case TX_E_TXQ_STOPPED:
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bna_ib_stop(txq->ib);
- }
- bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
- break;
-
- case TX_E_PRIO_CHANGE:
- /* No-op */
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
-{
- __bna_tx_stop(tx);
-}
-
-static void
-bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- switch (event) {
- case TX_E_STOP:
- bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
- break;
-
- case TX_E_FAIL:
- call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
- bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- break;
-
- case TX_E_TXQ_STOPPED:
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bna_ib_stop(txq->ib);
- (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
- }
- call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
- bfa_fsm_set_state(tx, bna_tx_sm_started);
- break;
-
- case TX_E_PRIO_CHANGE:
- /* No-op */
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
-{
- __bna_txf_stat_clr(tx);
-}
-
-static void
-bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
-{
- switch (event) {
- case TX_E_FAIL:
- case TX_E_STAT_CLEARED:
- bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
-{
- struct bna_rxtx_q_mem *q_mem;
- struct bna_txq_mem txq_cfg;
- struct bna_txq_mem *txq_mem;
- struct bna_dma_addr cur_q_addr;
- u32 pg_num;
- void __iomem *base_addr;
- unsigned long off;
-
- /* Fill out structure, to be subsequently written to hardware */
- txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
- txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
- cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
- txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
- txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
-
- txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
-
- txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
- (txq->qpt.page_size >> 2);
- txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
- ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
-
- txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
- txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
- (txq->priority & 0x7));
- txq_cfg.wvc_n_cquota_n_rquota =
- ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
- (BFI_TX_MAX_WRR_QUOTA & 0xfff));
-
- /* Setup the page and write to H/W */
-
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
- HQM_RXTX_Q_RAM_BASE_OFFSET);
- writel(pg_num, tx->bna->regs.page_addr);
-
- base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
- HQM_RXTX_Q_RAM_BASE_OFFSET);
- q_mem = (struct bna_rxtx_q_mem *)0;
- txq_mem = &q_mem[txq->txq_id].txq;
-
- /*
- * The following 4 lines, is a hack b'cos the H/W needs to read
- * these DMA addresses as little endian
- */
-
- off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
- writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
-
- off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
- writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
-
- off = (unsigned long)&txq_mem->cur_q_entry_lo;
- writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
-
- off = (unsigned long)&txq_mem->cur_q_entry_hi;
- writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
-
- off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
- writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
-
- off = (unsigned long)&txq_mem->entry_n_pg_size;
- writel(txq_cfg.entry_n_pg_size, base_addr + off);
-
- off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
- writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
-
- off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
- writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
-
- off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
- writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
-
- off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
- writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
-
- txq->tcb->producer_index = 0;
- txq->tcb->consumer_index = 0;
- *(txq->tcb->hw_consumer_index) = 0;
-
-}
-
-static void
-__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
-{
- struct bfi_ll_q_stop_req ll_req;
- u32 bit_mask[2] = {0, 0};
- if (txq->txq_id < 32)
- bit_mask[0] = (u32)1 << txq->txq_id;
- else
- bit_mask[1] = (u32)1 << (txq->txq_id - 32);
-
- memset(&ll_req, 0, sizeof(ll_req));
- ll_req.mh.msg_class = BFI_MC_LL;
- ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
- ll_req.mh.mtag.h2i.lpu_id = 0;
- ll_req.q_id_mask[0] = htonl(bit_mask[0]);
- ll_req.q_id_mask[1] = htonl(bit_mask[1]);
-
- bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
- bna_tx_cb_txq_stopped, tx);
-
- bna_mbox_send(tx->bna, &tx->mbox_qe);
-}
-
-static void
-__bna_txf_start(struct bna_tx *tx)
-{
- struct bna_tx_fndb_ram *tx_fndb;
- struct bna_txf *txf = &tx->txf;
- void __iomem *base_addr;
- unsigned long off;
-
- writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
- (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
- tx->bna->regs.page_addr);
-
- base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
- TX_FNDB_RAM_BASE_OFFSET);
-
- tx_fndb = (struct bna_tx_fndb_ram *)0;
- off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
-
- writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
- base_addr + off);
-
- if (tx->txf.txf_id < 32)
- tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
- else
- tx->bna->tx_mod.txf_bmap[1] |= ((u32)
- 1 << (tx->txf.txf_id - 32));
-}
-
-static void
-__bna_txf_stop(struct bna_tx *tx)
-{
- struct bna_tx_fndb_ram *tx_fndb;
- u32 page_num;
- u32 ctl_flags;
- struct bna_txf *txf = &tx->txf;
- void __iomem *base_addr;
- unsigned long off;
-
- /* retrieve the running txf_flags & turn off enable bit */
- page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
- (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
- writel(page_num, tx->bna->regs.page_addr);
-
- base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
- TX_FNDB_RAM_BASE_OFFSET);
- tx_fndb = (struct bna_tx_fndb_ram *)0;
- off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
-
- ctl_flags = readl(base_addr + off);
- ctl_flags &= ~BFI_TXF_CF_ENABLE;
-
- writel(ctl_flags, base_addr + off);
-
- if (tx->txf.txf_id < 32)
- tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
- else
- tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
- 1 << (tx->txf.txf_id - 32));
-}
-
-static void
-__bna_txf_stat_clr(struct bna_tx *tx)
-{
- struct bfi_ll_stats_req ll_req;
- u32 txf_bmap[2] = {0, 0};
- if (tx->txf.txf_id < 32)
- txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
- else
- txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
- ll_req.stats_mask = 0;
- ll_req.rxf_id_mask[0] = 0;
- ll_req.rxf_id_mask[1] = 0;
- ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
- ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
-
- bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
- bna_tx_cb_stats_cleared, tx);
- bna_mbox_send(tx->bna, &tx->mbox_qe);
-}
-
-static void
-__bna_tx_start(struct bna_tx *tx)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bna_ib_start(txq->ib);
- __bna_txq_start(tx, txq);
- }
-
- __bna_txf_start(tx);
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- txq->tcb->priority = txq->priority;
- (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
- }
-}
-
-static void
-__bna_tx_stop(struct bna_tx *tx)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
- }
-
- __bna_txf_stop(tx);
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- bfa_wc_up(&tx->txq_stop_wc);
- }
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- __bna_txq_stop(tx, txq);
- }
-}
-
-static void
-bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
- struct bna_mem_descr *qpt_mem,
- struct bna_mem_descr *swqpt_mem,
- struct bna_mem_descr *page_mem)
-{
- int i;
-
- txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
- txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
- txq->qpt.kv_qpt_ptr = qpt_mem->kva;
- txq->qpt.page_count = page_count;
- txq->qpt.page_size = page_size;
-
- txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
-
- for (i = 0; i < page_count; i++) {
- txq->tcb->sw_qpt[i] = page_mem[i].kva;
-
- ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
- page_mem[i].dma.lsb;
- ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
- page_mem[i].dma.msb;
-
- }
-}
-
-static void
-bna_tx_free(struct bna_tx *tx)
-{
- struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
- struct bna_txq *txq;
- struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
- struct list_head *qe;
-
- while (!list_empty(&tx->txq_q)) {
- bfa_q_deq(&tx->txq_q, &txq);
- bfa_q_qe_init(&txq->qe);
- if (txq->ib) {
- if (txq->ib_seg_offset != -1)
- bna_ib_release_idx(txq->ib,
- txq->ib_seg_offset);
- bna_ib_put(ib_mod, txq->ib);
- txq->ib = NULL;
- }
- txq->tcb = NULL;
- txq->tx = NULL;
- list_add_tail(&txq->qe, &tx_mod->txq_free_q);
- }
-
- list_for_each(qe, &tx_mod->tx_active_q) {
- if (qe == &tx->qe) {
- list_del(&tx->qe);
- bfa_q_qe_init(&tx->qe);
- break;
- }
- }
-
- tx->bna = NULL;
- tx->priv = NULL;
- list_add_tail(&tx->qe, &tx_mod->tx_free_q);
-}
-
-static void
-bna_tx_cb_txq_stopped(void *arg, int status)
-{
- struct bna_tx *tx = (struct bna_tx *)arg;
-
- bfa_q_qe_init(&tx->mbox_qe.qe);
- bfa_wc_down(&tx->txq_stop_wc);
-}
-
-static void
-bna_tx_cb_txq_stopped_all(void *arg)
-{
- struct bna_tx *tx = (struct bna_tx *)arg;
-
- bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
-}
-
-static void
-bna_tx_cb_stats_cleared(void *arg, int status)
-{
- struct bna_tx *tx = (struct bna_tx *)arg;
-
- bfa_q_qe_init(&tx->mbox_qe.qe);
-
- bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
-}
-
-static void
-bna_tx_start(struct bna_tx *tx)
-{
- tx->flags |= BNA_TX_F_PORT_STARTED;
- if (tx->flags & BNA_TX_F_ENABLED)
- bfa_fsm_send_event(tx, TX_E_START);
-}
-
-static void
-bna_tx_stop(struct bna_tx *tx)
-{
- tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
- tx->stop_cbarg = &tx->bna->tx_mod;
-
- tx->flags &= ~BNA_TX_F_PORT_STARTED;
- bfa_fsm_send_event(tx, TX_E_STOP);
-}
-
-static void
-bna_tx_fail(struct bna_tx *tx)
-{
- tx->flags &= ~BNA_TX_F_PORT_STARTED;
- bfa_fsm_send_event(tx, TX_E_FAIL);
-}
-
-static void
-bna_tx_prio_changed(struct bna_tx *tx, int prio)
-{
- struct bna_txq *txq;
- struct list_head *qe;
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- txq->priority = prio;
- }
-
- bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
-}
-
-static void
-bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
-{
- if (cee_link)
- tx->flags |= BNA_TX_F_PRIO_LOCK;
- else
- tx->flags &= ~BNA_TX_F_PRIO_LOCK;
-}
-
-static void
-bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
- enum bna_cb_status status)
-{
- struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
-
- bfa_wc_down(&tx_mod->tx_stop_wc);
-}
-
-static void
-bna_tx_mod_cb_tx_stopped_all(void *arg)
-{
- struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
-
- if (tx_mod->stop_cbfn)
- tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
- tx_mod->stop_cbfn = NULL;
-}
-
-void
-bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
-{
- u32 q_size;
- u32 page_count;
- struct bna_mem_info *mem_info;
-
- res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = sizeof(struct bna_tcb);
- mem_info->num = num_txq;
-
- q_size = txq_depth * BFI_TXQ_WI_SIZE;
- q_size = ALIGN(q_size, PAGE_SIZE);
- page_count = q_size >> PAGE_SHIFT;
-
- res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = page_count * sizeof(struct bna_dma_addr);
- mem_info->num = num_txq;
-
- res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_KVA;
- mem_info->len = page_count * sizeof(void *);
- mem_info->num = num_txq;
-
- res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
- mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
- mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = PAGE_SIZE;
- mem_info->num = num_txq * page_count;
-
- res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
- res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
- BNA_INTR_T_MSIX;
- res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
-}
-
-struct bna_tx *
-bna_tx_create(struct bna *bna, struct bnad *bnad,
- struct bna_tx_config *tx_cfg,
- struct bna_tx_event_cbfn *tx_cbfn,
- struct bna_res_info *res_info, void *priv)
-{
- struct bna_intr_info *intr_info;
- struct bna_tx_mod *tx_mod = &bna->tx_mod;
- struct bna_tx *tx;
- struct bna_txq *txq;
- struct list_head *qe;
- struct bna_ib_mod *ib_mod = &bna->ib_mod;
- struct bna_doorbell_qset *qset;
- struct bna_ib_config ib_config;
- int page_count;
- int page_size;
- int page_idx;
- int i;
- unsigned long off;
-
- intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
- page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
- tx_cfg->num_txq;
- page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
-
- /**
- * Get resources
- */
-
- if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
- return NULL;
-
- /* Tx */
-
- if (list_empty(&tx_mod->tx_free_q))
- return NULL;
- bfa_q_deq(&tx_mod->tx_free_q, &tx);
- bfa_q_qe_init(&tx->qe);
-
- /* TxQs */
-
- INIT_LIST_HEAD(&tx->txq_q);
- for (i = 0; i < tx_cfg->num_txq; i++) {
- if (list_empty(&tx_mod->txq_free_q))
- goto err_return;
-
- bfa_q_deq(&tx_mod->txq_free_q, &txq);
- bfa_q_qe_init(&txq->qe);
- list_add_tail(&txq->qe, &tx->txq_q);
- txq->ib = NULL;
- txq->ib_seg_offset = -1;
- txq->tx = tx;
- }
-
- /* IBs */
- i = 0;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
-
- if (intr_info->num == 1)
- txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
- intr_info->idl[0].vector);
- else
- txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
- intr_info->idl[i].vector);
-
- if (txq->ib == NULL)
- goto err_return;
-
- txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
- if (txq->ib_seg_offset == -1)
- goto err_return;
-
- i++;
- }
-
- /*
- * Initialize
- */
-
- /* Tx */
-
- tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
- tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
- /* Following callbacks are mandatory */
- tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
- tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
- tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
-
- list_add_tail(&tx->qe, &tx_mod->tx_active_q);
- tx->bna = bna;
- tx->priv = priv;
- tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
- tx->txq_stop_wc.wc_cbarg = tx;
- tx->txq_stop_wc.wc_count = 0;
-
- tx->type = tx_cfg->tx_type;
-
- tx->flags = 0;
- if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
- switch (tx->type) {
- case BNA_TX_T_REGULAR:
- if (!(tx->bna->tx_mod.flags &
- BNA_TX_MOD_F_PORT_LOOPBACK))
- tx->flags |= BNA_TX_F_PORT_STARTED;
- break;
- case BNA_TX_T_LOOPBACK:
- if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
- tx->flags |= BNA_TX_F_PORT_STARTED;
- break;
- }
- }
- if (tx->bna->tx_mod.cee_link)
- tx->flags |= BNA_TX_F_PRIO_LOCK;
-
- /* TxQ */
-
- i = 0;
- page_idx = 0;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- txq->priority = tx_mod->priority;
- txq->tcb = (struct bna_tcb *)
- res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
- txq->tx_packets = 0;
- txq->tx_bytes = 0;
-
- /* IB */
-
- ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
- ib_config.interpkt_timeo = 0; /* Not used */
- ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
- ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
- BFI_IB_CF_INT_ENABLE |
- BFI_IB_CF_COALESCING_MODE);
- bna_ib_config(txq->ib, &ib_config);
-
- /* TCB */
-
- txq->tcb->producer_index = 0;
- txq->tcb->consumer_index = 0;
- txq->tcb->hw_consumer_index = (volatile u32 *)
- ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
- (txq->ib_seg_offset * BFI_IBIDX_SIZE));
- *(txq->tcb->hw_consumer_index) = 0;
- txq->tcb->q_depth = tx_cfg->txq_depth;
- txq->tcb->unmap_q = (void *)
- res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
- qset = (struct bna_doorbell_qset *)0;
- off = (unsigned long)&qset[txq->txq_id].txq[0];
- txq->tcb->q_dbell = off +
- BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
- txq->tcb->i_dbell = &txq->ib->door_bell;
- txq->tcb->intr_type = intr_info->intr_type;
- txq->tcb->intr_vector = (intr_info->num == 1) ?
- intr_info->idl[0].vector :
- intr_info->idl[i].vector;
- txq->tcb->txq = txq;
- txq->tcb->bnad = bnad;
- txq->tcb->id = i;
-
- /* QPT, SWQPT, Pages */
- bna_txq_qpt_setup(txq, page_count, page_size,
- &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
- &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
- &res_info[BNA_TX_RES_MEM_T_PAGE].
- res_u.mem_info.mdl[page_idx]);
- txq->tcb->page_idx = page_idx;
- txq->tcb->page_count = page_count;
- page_idx += page_count;
-
- /* Callback to bnad for setting up TCB */
- if (tx->tcb_setup_cbfn)
- (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
-
- i++;
- }
-
- /* TxF */
-
- tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
- tx->txf.vlan = 0;
-
- /* Mbox element */
- bfa_q_qe_init(&tx->mbox_qe.qe);
-
- bfa_fsm_set_state(tx, bna_tx_sm_stopped);
-
- return tx;
-
-err_return:
- bna_tx_free(tx);
- return NULL;
-}
-
-void
-bna_tx_destroy(struct bna_tx *tx)
-{
- /* Callback to bnad for destroying TCB */
- if (tx->tcb_destroy_cbfn) {
- struct bna_txq *txq;
- struct list_head *qe;
-
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
- (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
- }
- }
-
- bna_tx_free(tx);
-}
-
-void
-bna_tx_enable(struct bna_tx *tx)
-{
- if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
- return;
-
- tx->flags |= BNA_TX_F_ENABLED;
-
- if (tx->flags & BNA_TX_F_PORT_STARTED)
- bfa_fsm_send_event(tx, TX_E_START);
-}
-
-void
-bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
- void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
-{
- if (type == BNA_SOFT_CLEANUP) {
- (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
- return;
- }
-
- tx->stop_cbfn = cbfn;
- tx->stop_cbarg = tx->bna->bnad;
-
- tx->flags &= ~BNA_TX_F_ENABLED;
-
- bfa_fsm_send_event(tx, TX_E_STOP);
-}
-
-int
-bna_tx_state_get(struct bna_tx *tx)
-{
- return bfa_sm_to_state(tx_sm_table, tx->fsm);
-}
-
-void
-bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
- struct bna_res_info *res_info)
-{
- int i;
-
- tx_mod->bna = bna;
- tx_mod->flags = 0;
-
- tx_mod->tx = (struct bna_tx *)
- res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
- tx_mod->txq = (struct bna_txq *)
- res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
-
- INIT_LIST_HEAD(&tx_mod->tx_free_q);
- INIT_LIST_HEAD(&tx_mod->tx_active_q);
-
- INIT_LIST_HEAD(&tx_mod->txq_free_q);
-
- for (i = 0; i < BFI_MAX_TXQ; i++) {
- tx_mod->tx[i].txf.txf_id = i;
- bfa_q_qe_init(&tx_mod->tx[i].qe);
- list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
-
- tx_mod->txq[i].txq_id = i;
- bfa_q_qe_init(&tx_mod->txq[i].qe);
- list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
- }
-
- tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
- tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
- tx_mod->tx_stop_wc.wc_count = 0;
-}
-
-void
-bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
-{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &tx_mod->tx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &tx_mod->txq_free_q)
- i++;
-
- tx_mod->bna = NULL;
-}
-
-void
-bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
-{
- struct bna_tx *tx;
- struct list_head *qe;
-
- tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
- if (type == BNA_TX_T_LOOPBACK)
- tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
-
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
- if (tx->type == type)
- bna_tx_start(tx);
- }
-}
-
-void
-bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
-{
- struct bna_tx *tx;
- struct list_head *qe;
-
- tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
- tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
-
- tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
-
- /**
- * Before calling bna_tx_stop(), increment tx_stop_wc as many times
- * as we are going to call bna_tx_stop
- */
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
- if (tx->type == type)
- bfa_wc_up(&tx_mod->tx_stop_wc);
- }
-
- if (tx_mod->tx_stop_wc.wc_count == 0) {
- tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
- tx_mod->stop_cbfn = NULL;
- return;
- }
-
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
- if (tx->type == type)
- bna_tx_stop(tx);
- }
-}
-
-void
-bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
-{
- struct bna_tx *tx;
- struct list_head *qe;
-
- tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
- tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
-
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
- bna_tx_fail(tx);
- }
-}
-
-void
-bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
-{
- struct bna_tx *tx;
- struct list_head *qe;
-
- if (prio != tx_mod->priority) {
- tx_mod->priority = prio;
-
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
- bna_tx_prio_changed(tx, prio);
- }
- }
-}
-
-void
-bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
-{
- struct bna_tx *tx;
- struct list_head *qe;
-
- tx_mod->cee_link = cee_link;
-
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
- bna_tx_cee_link_status(tx, cee_link);
- }
-}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
deleted file mode 100644
index 2f89cb23524..00000000000
--- a/drivers/net/bna/bna_types.h
+++ /dev/null
@@ -1,1127 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#ifndef __BNA_TYPES_H__
-#define __BNA_TYPES_H__
-
-#include "cna.h"
-#include "bna_hw.h"
-#include "bfa_cee.h"
-
-/**
- *
- * Forward declarations
- *
- */
-
-struct bna_txq;
-struct bna_tx;
-struct bna_rxq;
-struct bna_cq;
-struct bna_rx;
-struct bna_rxf;
-struct bna_port;
-struct bna;
-struct bnad;
-
-/**
- *
- * Enums, primitive data types
- *
- */
-
-enum bna_status {
- BNA_STATUS_T_DISABLED = 0,
- BNA_STATUS_T_ENABLED = 1
-};
-
-enum bna_cleanup_type {
- BNA_HARD_CLEANUP = 0,
- BNA_SOFT_CLEANUP = 1
-};
-
-enum bna_cb_status {
- BNA_CB_SUCCESS = 0,
- BNA_CB_FAIL = 1,
- BNA_CB_INTERRUPT = 2,
- BNA_CB_BUSY = 3,
- BNA_CB_INVALID_MAC = 4,
- BNA_CB_MCAST_LIST_FULL = 5,
- BNA_CB_UCAST_CAM_FULL = 6,
- BNA_CB_WAITING = 7,
- BNA_CB_NOT_EXEC = 8
-};
-
-enum bna_res_type {
- BNA_RES_T_MEM = 1,
- BNA_RES_T_INTR = 2
-};
-
-enum bna_mem_type {
- BNA_MEM_T_KVA = 1,
- BNA_MEM_T_DMA = 2
-};
-
-enum bna_intr_type {
- BNA_INTR_T_INTX = 1,
- BNA_INTR_T_MSIX = 2
-};
-
-enum bna_res_req_type {
- BNA_RES_MEM_T_COM = 0,
- BNA_RES_MEM_T_ATTR = 1,
- BNA_RES_MEM_T_FWTRC = 2,
- BNA_RES_MEM_T_STATS = 3,
- BNA_RES_MEM_T_SWSTATS = 4,
- BNA_RES_MEM_T_IBIDX = 5,
- BNA_RES_MEM_T_IB_ARRAY = 6,
- BNA_RES_MEM_T_INTR_ARRAY = 7,
- BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
- BNA_RES_MEM_T_TX_ARRAY = 9,
- BNA_RES_MEM_T_TXQ_ARRAY = 10,
- BNA_RES_MEM_T_RX_ARRAY = 11,
- BNA_RES_MEM_T_RXP_ARRAY = 12,
- BNA_RES_MEM_T_RXQ_ARRAY = 13,
- BNA_RES_MEM_T_UCMAC_ARRAY = 14,
- BNA_RES_MEM_T_MCMAC_ARRAY = 15,
- BNA_RES_MEM_T_RIT_ENTRY = 16,
- BNA_RES_MEM_T_RIT_SEGMENT = 17,
- BNA_RES_INTR_T_MBOX = 18,
- BNA_RES_T_MAX
-};
-
-enum bna_tx_res_req_type {
- BNA_TX_RES_MEM_T_TCB = 0,
- BNA_TX_RES_MEM_T_UNMAPQ = 1,
- BNA_TX_RES_MEM_T_QPT = 2,
- BNA_TX_RES_MEM_T_SWQPT = 3,
- BNA_TX_RES_MEM_T_PAGE = 4,
- BNA_TX_RES_INTR_T_TXCMPL = 5,
- BNA_TX_RES_T_MAX,
-};
-
-enum bna_rx_mem_type {
- BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
- BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
- BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
- BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
- BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
- BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
- BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
- BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
- BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
- BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
- BNA_RX_RES_T_MAX = 13
-};
-
-enum bna_mbox_state {
- BNA_MBOX_FREE = 0,
- BNA_MBOX_POSTED = 1
-};
-
-enum bna_tx_type {
- BNA_TX_T_REGULAR = 0,
- BNA_TX_T_LOOPBACK = 1,
-};
-
-enum bna_tx_flags {
- BNA_TX_F_PORT_STARTED = 1,
- BNA_TX_F_ENABLED = 2,
- BNA_TX_F_PRIO_LOCK = 4,
-};
-
-enum bna_tx_mod_flags {
- BNA_TX_MOD_F_PORT_STARTED = 1,
- BNA_TX_MOD_F_PORT_LOOPBACK = 2,
-};
-
-enum bna_rx_type {
- BNA_RX_T_REGULAR = 0,
- BNA_RX_T_LOOPBACK = 1,
-};
-
-enum bna_rxp_type {
- BNA_RXP_SINGLE = 1,
- BNA_RXP_SLR = 2,
- BNA_RXP_HDS = 3
-};
-
-enum bna_rxmode {
- BNA_RXMODE_PROMISC = 1,
- BNA_RXMODE_ALLMULTI = 2
-};
-
-enum bna_rx_event {
- RX_E_START = 1,
- RX_E_STOP = 2,
- RX_E_FAIL = 3,
- RX_E_RXF_STARTED = 4,
- RX_E_RXF_STOPPED = 5,
- RX_E_RXQ_STOPPED = 6,
-};
-
-enum bna_rx_state {
- BNA_RX_STOPPED = 1,
- BNA_RX_RXF_START_WAIT = 2,
- BNA_RX_STARTED = 3,
- BNA_RX_RXF_STOP_WAIT = 4,
- BNA_RX_RXQ_STOP_WAIT = 5,
-};
-
-enum bna_rx_flags {
- BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
- BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
- BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
-};
-
-enum bna_rx_mod_flags {
- BNA_RX_MOD_F_PORT_STARTED = 1,
- BNA_RX_MOD_F_PORT_LOOPBACK = 2,
-};
-
-enum bna_rxf_oper_state {
- BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
- BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
-};
-
-enum bna_rxf_flags {
- BNA_RXF_FL_STOP_PENDING = 0x01,
- BNA_RXF_FL_FAILED = 0x02,
- BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
- BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
- BNA_RXF_FL_RXF_ENABLED = 0x10,
- BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
-};
-
-enum bna_rxf_event {
- RXF_E_START = 1,
- RXF_E_STOP = 2,
- RXF_E_FAIL = 3,
- RXF_E_CAM_FLTR_MOD = 4,
- RXF_E_STARTED = 5,
- RXF_E_STOPPED = 6,
- RXF_E_CAM_FLTR_RESP = 7,
- RXF_E_PAUSE = 8,
- RXF_E_RESUME = 9,
- RXF_E_STAT_CLEARED = 10,
-};
-
-enum bna_rxf_state {
- BNA_RXF_STOPPED = 1,
- BNA_RXF_START_WAIT = 2,
- BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
- BNA_RXF_STARTED = 4,
- BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
- BNA_RXF_STOP_WAIT = 6,
- BNA_RXF_PAUSE_WAIT = 7,
- BNA_RXF_RESUME_WAIT = 8,
- BNA_RXF_STAT_CLR_WAIT = 9,
-};
-
-enum bna_port_type {
- BNA_PORT_T_REGULAR = 0,
- BNA_PORT_T_LOOPBACK_INTERNAL = 1,
- BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
-};
-
-enum bna_link_status {
- BNA_LINK_DOWN = 0,
- BNA_LINK_UP = 1,
- BNA_CEE_UP = 2
-};
-
-enum bna_llport_flags {
- BNA_LLPORT_F_ADMIN_UP = 1,
- BNA_LLPORT_F_PORT_ENABLED = 2,
- BNA_LLPORT_F_RX_STARTED = 4
-};
-
-enum bna_port_flags {
- BNA_PORT_F_DEVICE_READY = 1,
- BNA_PORT_F_ENABLED = 2,
- BNA_PORT_F_PAUSE_CHANGED = 4,
- BNA_PORT_F_MTU_CHANGED = 8
-};
-
-enum bna_pkt_rates {
- BNA_PKT_RATE_10K = 10000,
- BNA_PKT_RATE_20K = 20000,
- BNA_PKT_RATE_30K = 30000,
- BNA_PKT_RATE_40K = 40000,
- BNA_PKT_RATE_50K = 50000,
- BNA_PKT_RATE_60K = 60000,
- BNA_PKT_RATE_70K = 70000,
- BNA_PKT_RATE_80K = 80000,
-};
-
-enum bna_dim_load_types {
- BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
- BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
- BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
- BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
- BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
- BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
- BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
- BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
- BNA_LOAD_T_MAX = 8
-};
-
-enum bna_dim_bias_types {
- BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
- BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
- BNA_BIAS_T_MAX = 2
-};
-
-struct bna_mac {
- /* This should be the first one */
- struct list_head qe;
- u8 addr[ETH_ALEN];
-};
-
-struct bna_mem_descr {
- u32 len;
- void *kva;
- struct bna_dma_addr dma;
-};
-
-struct bna_mem_info {
- enum bna_mem_type mem_type;
- u32 len;
- u32 num;
- u32 align_sz; /* 0/1 = no alignment */
- struct bna_mem_descr *mdl;
- void *cookie; /* For bnad to unmap dma later */
-};
-
-struct bna_intr_descr {
- int vector;
-};
-
-struct bna_intr_info {
- enum bna_intr_type intr_type;
- int num;
- struct bna_intr_descr *idl;
-};
-
-union bna_res_u {
- struct bna_mem_info mem_info;
- struct bna_intr_info intr_info;
-};
-
-struct bna_res_info {
- enum bna_res_type res_type;
- union bna_res_u res_u;
-};
-
-/* HW QPT */
-struct bna_qpt {
- struct bna_dma_addr hw_qpt_ptr;
- void *kv_qpt_ptr;
- u32 page_count;
- u32 page_size;
-};
-
-/**
- *
- * Device
- *
- */
-
-struct bna_device {
- bfa_fsm_t fsm;
- struct bfa_ioc ioc;
-
- enum bna_intr_type intr_type;
- int vector;
-
- void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
- struct bnad *ready_cbarg;
-
- void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
- struct bnad *stop_cbarg;
-
- struct bna *bna;
-};
-
-/**
- *
- * Mail box
- *
- */
-
-struct bna_mbox_qe {
- /* This should be the first one */
- struct list_head qe;
-
- struct bfa_mbox_cmd cmd;
- u32 cmd_len;
- /* Callback for port, tx, rx, rxf */
- void (*cbfn)(void *arg, int status);
- void *cbarg;
-};
-
-struct bna_mbox_mod {
- enum bna_mbox_state state;
- struct list_head posted_q;
- u32 msg_pending;
- u32 msg_ctr;
- struct bna *bna;
-};
-
-/**
- *
- * Port
- *
- */
-
-/* Pause configuration */
-struct bna_pause_config {
- enum bna_status tx_pause;
- enum bna_status rx_pause;
-};
-
-struct bna_llport {
- bfa_fsm_t fsm;
- enum bna_llport_flags flags;
-
- enum bna_port_type type;
-
- enum bna_link_status link_status;
-
- int rx_started_count;
-
- void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
-
- struct bna_mbox_qe mbox_qe;
-
- struct bna *bna;
-};
-
-struct bna_port {
- bfa_fsm_t fsm;
- enum bna_port_flags flags;
-
- enum bna_port_type type;
-
- struct bna_llport llport;
-
- struct bna_pause_config pause_config;
- u8 priority;
- int mtu;
-
- /* Callback for bna_port_disable(), port_stop() */
- void (*stop_cbfn)(void *, enum bna_cb_status);
- void *stop_cbarg;
-
- /* Callback for bna_port_pause_config() */
- void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
-
- /* Callback for bna_port_mtu_set() */
- void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
-
- void (*link_cbfn)(struct bnad *, enum bna_link_status);
-
- struct bfa_wc chld_stop_wc;
-
- struct bna_mbox_qe mbox_qe;
-
- struct bna *bna;
-};
-
-/**
- *
- * Interrupt Block
- *
- */
-
-/* IB index segment structure */
-struct bna_ibidx_seg {
- /* This should be the first one */
- struct list_head qe;
-
- u8 ib_seg_size;
- u8 ib_idx_tbl_offset;
-};
-
-/* Interrupt structure */
-struct bna_intr {
- /* This should be the first one */
- struct list_head qe;
- int ref_count;
-
- enum bna_intr_type intr_type;
- int vector;
-
- struct bna_ib *ib;
-};
-
-/* Doorbell structure */
-struct bna_ib_dbell {
- void *__iomem doorbell_addr;
- u32 doorbell_ack;
-};
-
-/* Interrupt timer configuration */
-struct bna_ib_config {
- u8 coalescing_timeo; /* Unit is 5usec. */
-
- int interpkt_count;
- int interpkt_timeo;
-
- enum ib_flags ctrl_flags;
-};
-
-/* IB structure */
-struct bna_ib {
- /* This should be the first one */
- struct list_head qe;
-
- int ib_id;
-
- int ref_count;
- int start_count;
-
- struct bna_dma_addr ib_seg_host_addr;
- void *ib_seg_host_addr_kva;
- u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
-
- struct bna_ibidx_seg *idx_seg;
-
- struct bna_ib_dbell door_bell;
-
- struct bna_intr *intr;
-
- struct bna_ib_config ib_config;
-
- struct bna *bna;
-};
-
-/* IB module - keeps track of IBs and interrupts */
-struct bna_ib_mod {
- struct bna_ib *ib; /* BFI_MAX_IB entries */
- struct bna_intr *intr; /* BFI_MAX_IB entries */
- struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
-
- struct list_head ib_free_q;
-
- struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
-
- struct list_head intr_free_q;
- struct list_head intr_active_q;
-
- struct bna *bna;
-};
-
-/**
- *
- * Tx object
- *
- */
-
-/* Tx datapath control structure */
-#define BNA_Q_NAME_SIZE 16
-struct bna_tcb {
- /* Fast path */
- void **sw_qpt;
- void *unmap_q;
- u32 producer_index;
- u32 consumer_index;
- volatile u32 *hw_consumer_index;
- u32 q_depth;
- void *__iomem q_dbell;
- struct bna_ib_dbell *i_dbell;
- int page_idx;
- int page_count;
- /* Control path */
- struct bna_txq *txq;
- struct bnad *bnad;
- enum bna_intr_type intr_type;
- int intr_vector;
- u8 priority; /* Current priority */
- unsigned long flags; /* Used by bnad as required */
- int id;
- char name[BNA_Q_NAME_SIZE];
-};
-
-/* TxQ QPT and configuration */
-struct bna_txq {
- /* This should be the first one */
- struct list_head qe;
-
- int txq_id;
-
- u8 priority;
-
- struct bna_qpt qpt;
- struct bna_tcb *tcb;
- struct bna_ib *ib;
- int ib_seg_offset;
-
- struct bna_tx *tx;
-
- u64 tx_packets;
- u64 tx_bytes;
-};
-
-/* TxF structure (hardware Tx Function) */
-struct bna_txf {
- int txf_id;
- enum txf_flags ctrl_flags;
- u16 vlan;
-};
-
-/* Tx object */
-struct bna_tx {
- /* This should be the first one */
- struct list_head qe;
-
- bfa_fsm_t fsm;
- enum bna_tx_flags flags;
-
- enum bna_tx_type type;
-
- struct list_head txq_q;
- struct bna_txf txf;
-
- /* Tx event handlers */
- void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
-
- /* callback for bna_tx_disable(), bna_tx_stop() */
- void (*stop_cbfn)(void *arg, struct bna_tx *tx,
- enum bna_cb_status status);
- void *stop_cbarg;
-
- /* callback for bna_tx_prio_set() */
- void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
- enum bna_cb_status status);
-
- struct bfa_wc txq_stop_wc;
-
- struct bna_mbox_qe mbox_qe;
-
- struct bna *bna;
- void *priv; /* bnad's cookie */
-};
-
-struct bna_tx_config {
- int num_txq;
- int txq_depth;
- enum bna_tx_type tx_type;
-};
-
-struct bna_tx_event_cbfn {
- /* Optional */
- void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
- /* Mandatory */
- void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
- void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
-};
-
-/* Tx module - keeps track of free, active tx objects */
-struct bna_tx_mod {
- struct bna_tx *tx; /* BFI_MAX_TXQ entries */
- struct bna_txq *txq; /* BFI_MAX_TXQ entries */
-
- struct list_head tx_free_q;
- struct list_head tx_active_q;
-
- struct list_head txq_free_q;
-
- /* callback for bna_tx_mod_stop() */
- void (*stop_cbfn)(struct bna_port *port,
- enum bna_cb_status status);
-
- struct bfa_wc tx_stop_wc;
-
- enum bna_tx_mod_flags flags;
-
- int priority;
- int cee_link;
-
- u32 txf_bmap[2];
-
- struct bna *bna;
-};
-
-/**
- *
- * Receive Indirection Table
- *
- */
-
-/* One row of RIT table */
-struct bna_rit_entry {
- u8 large_rxq_id; /* used for either large or data buffers */
- u8 small_rxq_id; /* used for either small or header buffers */
-};
-
-/* RIT segment */
-struct bna_rit_segment {
- struct list_head qe;
-
- u32 rit_offset;
- u32 rit_size;
- /**
- * max_rit_size: Varies per RIT segment depending on how RIT is
- * partitioned
- */
- u32 max_rit_size;
-
- struct bna_rit_entry *rit;
-};
-
-struct bna_rit_mod {
- struct bna_rit_entry *rit;
- struct bna_rit_segment *rit_segment;
-
- struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
-};
-
-/**
- *
- * Rx object
- *
- */
-
-/* Rx datapath control structure */
-struct bna_rcb {
- /* Fast path */
- void **sw_qpt;
- void *unmap_q;
- u32 producer_index;
- u32 consumer_index;
- u32 q_depth;
- void *__iomem q_dbell;
- int page_idx;
- int page_count;
- /* Control path */
- struct bna_rxq *rxq;
- struct bna_cq *cq;
- struct bnad *bnad;
- unsigned long flags;
- int id;
-};
-
-/* RxQ structure - QPT, configuration */
-struct bna_rxq {
- struct list_head qe;
- int rxq_id;
-
- int buffer_size;
- int q_depth;
-
- struct bna_qpt qpt;
- struct bna_rcb *rcb;
-
- struct bna_rxp *rxp;
- struct bna_rx *rx;
-
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_packets_with_error;
- u64 rxbuf_alloc_failed;
-};
-
-/* RxQ pair */
-union bna_rxq_u {
- struct {
- struct bna_rxq *hdr;
- struct bna_rxq *data;
- } hds;
- struct {
- struct bna_rxq *small;
- struct bna_rxq *large;
- } slr;
- struct {
- struct bna_rxq *only;
- struct bna_rxq *reserved;
- } single;
-};
-
-/* Packet rate for Dynamic Interrupt Moderation */
-struct bna_pkt_rate {
- u32 small_pkt_cnt;
- u32 large_pkt_cnt;
-};
-
-/* Completion control structure */
-struct bna_ccb {
- /* Fast path */
- void **sw_qpt;
- u32 producer_index;
- volatile u32 *hw_producer_index;
- u32 q_depth;
- struct bna_ib_dbell *i_dbell;
- struct bna_rcb *rcb[2];
- void *ctrl; /* For bnad */
- struct bna_pkt_rate pkt_rate;
- int page_idx;
- int page_count;
-
- /* Control path */
- struct bna_cq *cq;
- struct bnad *bnad;
- enum bna_intr_type intr_type;
- int intr_vector;
- u8 rx_coalescing_timeo; /* For NAPI */
- int id;
- char name[BNA_Q_NAME_SIZE];
-};
-
-/* CQ QPT, configuration */
-struct bna_cq {
- int cq_id;
-
- struct bna_qpt qpt;
- struct bna_ccb *ccb;
-
- struct bna_ib *ib;
- u8 ib_seg_offset;
-
- struct bna_rx *rx;
-};
-
-struct bna_rss_config {
- enum rss_hash_type hash_type;
- u8 hash_mask;
- u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
-};
-
-struct bna_hds_config {
- enum hds_header_type hdr_type;
- int header_size;
-};
-
-/* This structure is used during RX creation */
-struct bna_rx_config {
- enum bna_rx_type rx_type;
- int num_paths;
- enum bna_rxp_type rxp_type;
- int paused;
- int q_depth;
- /*
- * Small/Large (or Header/Data) buffer size to be configured
- * for SLR and HDS queue type. Large buffer size comes from
- * port->mtu.
- */
- int small_buff_size;
-
- enum bna_status rss_status;
- struct bna_rss_config rss_config;
-
- enum bna_status hds_status;
- struct bna_hds_config hds_config;
-
- enum bna_status vlan_strip_status;
-};
-
-/* Rx Path structure - one per MSIX vector/CPU */
-struct bna_rxp {
- /* This should be the first one */
- struct list_head qe;
-
- enum bna_rxp_type type;
- union bna_rxq_u rxq;
- struct bna_cq cq;
-
- struct bna_rx *rx;
-
- /* MSI-x vector number for configuring RSS */
- int vector;
-
- struct bna_mbox_qe mbox_qe;
-};
-
-/* HDS configuration structure */
-struct bna_rxf_hds {
- enum hds_header_type hdr_type;
- int header_size;
-};
-
-/* RSS configuration structure */
-struct bna_rxf_rss {
- enum rss_hash_type hash_type;
- u8 hash_mask;
- u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
-};
-
-/* RxF structure (hardware Rx Function) */
-struct bna_rxf {
- bfa_fsm_t fsm;
- int rxf_id;
- enum rxf_flags ctrl_flags;
- u16 default_vlan_tag;
- enum bna_rxf_oper_state rxf_oper_state;
- enum bna_status hds_status;
- struct bna_rxf_hds hds_cfg;
- enum bna_status rss_status;
- struct bna_rxf_rss rss_cfg;
- struct bna_rit_segment *rit_segment;
- struct bna_rx *rx;
- u32 forced_offset;
- struct bna_mbox_qe mbox_qe;
- int mcast_rxq_id;
-
- /* callback for bna_rxf_start() */
- void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
- struct bna_rx *start_cbarg;
-
- /* callback for bna_rxf_stop() */
- void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
- struct bna_rx *stop_cbarg;
-
- /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
- void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
- enum bna_cb_status status);
- struct bnad *oper_state_cbarg;
-
- /**
- * callback for:
- * bna_rxf_ucast_set()
- * bna_rxf_{ucast/mcast}_add(),
- * bna_rxf_{ucast/mcast}_del(),
- * bna_rxf_mode_set()
- */
- void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
- enum bna_cb_status status);
- struct bnad *cam_fltr_cbarg;
-
- enum bna_rxf_flags rxf_flags;
-
- /* List of unicast addresses yet to be applied to h/w */
- struct list_head ucast_pending_add_q;
- struct list_head ucast_pending_del_q;
- int ucast_pending_set;
- /* ucast addresses applied to the h/w */
- struct list_head ucast_active_q;
- struct bna_mac *ucast_active_mac;
-
- /* List of multicast addresses yet to be applied to h/w */
- struct list_head mcast_pending_add_q;
- struct list_head mcast_pending_del_q;
- /* multicast addresses applied to the h/w */
- struct list_head mcast_active_q;
-
- /* Rx modes yet to be applied to h/w */
- enum bna_rxmode rxmode_pending;
- enum bna_rxmode rxmode_pending_bitmask;
- /* Rx modes applied to h/w */
- enum bna_rxmode rxmode_active;
-
- enum bna_status vlan_filter_status;
- u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
-};
-
-/* Rx object */
-struct bna_rx {
- /* This should be the first one */
- struct list_head qe;
-
- bfa_fsm_t fsm;
-
- enum bna_rx_type type;
-
- /* list-head for RX path objects */
- struct list_head rxp_q;
-
- struct bna_rxf rxf;
-
- enum bna_rx_flags rx_flags;
-
- struct bna_mbox_qe mbox_qe;
-
- struct bfa_wc rxq_stop_wc;
-
- /* Rx event handlers */
- void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
- void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
- void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
- void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
- void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
- void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
-
- /* callback for bna_rx_disable(), bna_rx_stop() */
- void (*stop_cbfn)(void *arg, struct bna_rx *rx,
- enum bna_cb_status status);
- void *stop_cbarg;
-
- struct bna *bna;
- void *priv; /* bnad's cookie */
-};
-
-struct bna_rx_event_cbfn {
- /* Optional */
- void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
- void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
- void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
- void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
- /* Mandatory */
- void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
- void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
-};
-
-/* Rx module - keeps track of free, active rx objects */
-struct bna_rx_mod {
- struct bna *bna; /* back pointer to parent */
- struct bna_rx *rx; /* BFI_MAX_RXQ entries */
- struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
- struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
-
- struct list_head rx_free_q;
- struct list_head rx_active_q;
- int rx_free_count;
-
- struct list_head rxp_free_q;
- int rxp_free_count;
-
- struct list_head rxq_free_q;
- int rxq_free_count;
-
- enum bna_rx_mod_flags flags;
-
- /* callback for bna_rx_mod_stop() */
- void (*stop_cbfn)(struct bna_port *port,
- enum bna_cb_status status);
-
- struct bfa_wc rx_stop_wc;
- u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
- u32 rxf_bmap[2];
-};
-
-/**
- *
- * CAM
- *
- */
-
-struct bna_ucam_mod {
- struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
- struct list_head free_q;
-
- struct bna *bna;
-};
-
-struct bna_mcam_mod {
- struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
- struct list_head free_q;
-
- struct bna *bna;
-};
-
-/**
- *
- * Statistics
- *
- */
-
-struct bna_tx_stats {
- int tx_state;
- int tx_flags;
- int num_txqs;
- u32 txq_bmap[2];
- int txf_id;
-};
-
-struct bna_rx_stats {
- int rx_state;
- int rx_flags;
- int num_rxps;
- int num_rxqs;
- u32 rxq_bmap[2];
- u32 cq_bmap[2];
- int rxf_id;
- int rxf_state;
- int rxf_oper_state;
- int num_active_ucast;
- int num_active_mcast;
- int rxmode_active;
- int vlan_filter_status;
- u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
- int rss_status;
- int hds_status;
-};
-
-struct bna_sw_stats {
- int device_state;
- int port_state;
- int port_flags;
- int llport_state;
- int priority;
- int num_active_tx;
- int num_active_rx;
- struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
- struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
-};
-
-struct bna_stats {
- u32 txf_bmap[2];
- u32 rxf_bmap[2];
- struct bfi_ll_stats *hw_stats;
- struct bna_sw_stats *sw_stats;
-};
-
-/**
- *
- * BNA
- *
- */
-
-struct bna {
- struct bfa_pcidev pcidev;
-
- int port_num;
-
- struct bna_chip_regs regs;
-
- struct bna_dma_addr hw_stats_dma;
- struct bna_stats stats;
-
- struct bna_device device;
- struct bfa_cee cee;
-
- struct bna_mbox_mod mbox_mod;
-
- struct bna_port port;
-
- struct bna_tx_mod tx_mod;
-
- struct bna_rx_mod rx_mod;
-
- struct bna_ib_mod ib_mod;
-
- struct bna_ucam_mod ucam_mod;
- struct bna_mcam_mod mcam_mod;
-
- struct bna_rit_mod rit_mod;
-
- int rxf_promisc_id;
-
- struct bna_mbox_qe mbox_qe;
-
- struct bnad *bnad;
-};
-
-#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
deleted file mode 100644
index 458eb30371b..00000000000
--- a/drivers/net/bna/bnad.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#ifndef __BNAD_H__
-#define __BNAD_H__
-
-#include <linux/rtnetlink.h>
-#include <linux/workqueue.h>
-#include <linux/ipv6.h>
-#include <linux/etherdevice.h>
-#include <linux/mutex.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-
-/* Fix for IA64 */
-#include <asm/checksum.h>
-#include <net/ip6_checksum.h>
-
-#include <net/ip.h>
-#include <net/tcp.h>
-
-#include "bna.h"
-
-#define BNAD_TXQ_DEPTH 2048
-#define BNAD_RXQ_DEPTH 2048
-
-#define BNAD_MAX_TXS 1
-#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
-#define BNAD_TXQ_NUM 1
-
-#define BNAD_MAX_RXS 1
-#define BNAD_MAX_RXPS_PER_RX 16
-
-/*
- * Control structure pointed to ccb->ctrl, which
- * determines the NAPI / LRO behavior CCB
- * There is 1:1 corres. between ccb & ctrl
- */
-struct bnad_rx_ctrl {
- struct bna_ccb *ccb;
- unsigned long flags;
- struct napi_struct napi;
-};
-
-#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
-
-#define BNAD_GET_TX_ID(_skb) (0)
-
-/*
- * GLOBAL #defines (CONSTANTS)
- */
-#define BNAD_NAME "bna"
-#define BNAD_NAME_LEN 64
-
-#define BNAD_VERSION "2.3.2.3"
-
-#define BNAD_MAILBOX_MSIX_INDEX 0
-#define BNAD_MAILBOX_MSIX_VECTORS 1
-#define BNAD_INTX_TX_IB_BITMASK 0x1
-#define BNAD_INTX_RX_IB_BITMASK 0x2
-
-#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
-#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
-
-#define BNAD_MAX_Q_DEPTH 0x10000
-#define BNAD_MIN_Q_DEPTH 0x200
-
-#define BNAD_JUMBO_MTU 9000
-
-#define BNAD_NETIF_WAKE_THRESHOLD 8
-
-#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
-
-/* Bit positions for tcb->flags */
-#define BNAD_TXQ_FREE_SENT 0
-#define BNAD_TXQ_TX_STARTED 1
-
-/* Bit positions for rcb->flags */
-#define BNAD_RXQ_REFILL 0
-#define BNAD_RXQ_STARTED 1
-
-/*
- * DATA STRUCTURES
- */
-
-/* enums */
-enum bnad_intr_source {
- BNAD_INTR_TX = 1,
- BNAD_INTR_RX = 2
-};
-
-enum bnad_link_state {
- BNAD_LS_DOWN = 0,
- BNAD_LS_UP = 1
-};
-
-struct bnad_completion {
- struct completion ioc_comp;
- struct completion ucast_comp;
- struct completion mcast_comp;
- struct completion tx_comp;
- struct completion rx_comp;
- struct completion stats_comp;
- struct completion port_comp;
-
- u8 ioc_comp_status;
- u8 ucast_comp_status;
- u8 mcast_comp_status;
- u8 tx_comp_status;
- u8 rx_comp_status;
- u8 stats_comp_status;
- u8 port_comp_status;
-};
-
-/* Tx Rx Control Stats */
-struct bnad_drv_stats {
- u64 netif_queue_stop;
- u64 netif_queue_wakeup;
- u64 netif_queue_stopped;
- u64 tso4;
- u64 tso6;
- u64 tso_err;
- u64 tcpcsum_offload;
- u64 udpcsum_offload;
- u64 csum_help;
- u64 csum_help_err;
-
- u64 hw_stats_updates;
- u64 netif_rx_schedule;
- u64 netif_rx_complete;
- u64 netif_rx_dropped;
-
- u64 link_toggle;
- u64 cee_up;
-
- u64 rxp_info_alloc_failed;
- u64 mbox_intr_disabled;
- u64 mbox_intr_enabled;
- u64 tx_unmap_q_alloc_failed;
- u64 rx_unmap_q_alloc_failed;
-
- u64 rxbuf_alloc_failed;
-};
-
-/* Complete driver stats */
-struct bnad_stats {
- struct bnad_drv_stats drv_stats;
- struct bna_stats *bna_stats;
-};
-
-/* Tx / Rx Resources */
-struct bnad_tx_res_info {
- struct bna_res_info res_info[BNA_TX_RES_T_MAX];
-};
-
-struct bnad_rx_res_info {
- struct bna_res_info res_info[BNA_RX_RES_T_MAX];
-};
-
-struct bnad_tx_info {
- struct bna_tx *tx; /* 1:1 between tx_info & tx */
- struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
-} ____cacheline_aligned;
-
-struct bnad_rx_info {
- struct bna_rx *rx; /* 1:1 between rx_info & rx */
-
- struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
-} ____cacheline_aligned;
-
-/* Unmap queues for Tx / Rx cleanup */
-struct bnad_skb_unmap {
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma_addr);
-};
-
-struct bnad_unmap_q {
- u32 producer_index;
- u32 consumer_index;
- u32 q_depth;
- /* This should be the last one */
- struct bnad_skb_unmap unmap_array[1];
-};
-
-/* Bit mask values for bnad->cfg_flags */
-#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
-#define BNAD_CF_PROMISC 0x02
-#define BNAD_CF_ALLMULTI 0x04
-#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
-
-/* Defines for run_flags bit-mask */
-/* Set, tested & cleared using xxx_bit() functions */
-/* Values indicated bit positions */
-#define BNAD_RF_CEE_RUNNING 1
-#define BNAD_RF_MBOX_IRQ_DISABLED 2
-#define BNAD_RF_RX_STARTED 3
-#define BNAD_RF_DIM_TIMER_RUNNING 4
-#define BNAD_RF_STATS_TIMER_RUNNING 5
-#define BNAD_RF_TX_SHUTDOWN_DELAYED 6
-#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
-
-struct bnad {
- struct net_device *netdev;
-
- /* Data path */
- struct bnad_tx_info tx_info[BNAD_MAX_TXS];
- struct bnad_rx_info rx_info[BNAD_MAX_RXS];
-
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- /*
- * These q numbers are global only because
- * they are used to calculate MSIx vectors.
- * Actually the exact # of queues are per Tx/Rx
- * object.
- */
- u32 num_tx;
- u32 num_rx;
- u32 num_txq_per_tx;
- u32 num_rxp_per_rx;
-
- u32 txq_depth;
- u32 rxq_depth;
-
- u8 tx_coalescing_timeo;
- u8 rx_coalescing_timeo;
-
- struct bna_rx_config rx_config[BNAD_MAX_RXS];
- struct bna_tx_config tx_config[BNAD_MAX_TXS];
-
- void __iomem *bar0; /* BAR0 address */
-
- struct bna bna;
-
- u32 cfg_flags;
- unsigned long run_flags;
-
- struct pci_dev *pcidev;
- u64 mmio_start;
- u64 mmio_len;
-
- u32 msix_num;
- struct msix_entry *msix_table;
-
- struct mutex conf_mutex;
- spinlock_t bna_lock ____cacheline_aligned;
-
- /* Timers */
- struct timer_list ioc_timer;
- struct timer_list dim_timer;
- struct timer_list stats_timer;
-
- /* Control path resources, memory & irq */
- struct bna_res_info res_info[BNA_RES_T_MAX];
- struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
- struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
-
- struct bnad_completion bnad_completions;
-
- /* Burnt in MAC address */
- mac_t perm_addr;
-
- struct tasklet_struct tx_free_tasklet;
-
- /* Statistics */
- struct bnad_stats stats;
-
- struct bnad_diag *diag;
-
- char adapter_name[BNAD_NAME_LEN];
- char port_name[BNAD_NAME_LEN];
- char mbox_irq_name[BNAD_NAME_LEN];
-};
-
-/*
- * EXTERN VARIABLES
- */
-extern struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
-
-/*
- * EXTERN PROTOTYPES
- */
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
-/* Netdev entry point prototypes */
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-
-/* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
-
-extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
-extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
-extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
-
-/* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
-
-/* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
-
-/**
- * MACROS
- */
-/* To set & get the stats counters */
-#define BNAD_UPDATE_CTR(_bnad, _ctr) \
- (((_bnad)->stats.drv_stats._ctr)++)
-
-#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
-
-#define bnad_enable_rx_irq_unsafe(_ccb) \
-{ \
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
- bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
- (_ccb)->rx_coalescing_timeo); \
- bna_ib_ack((_ccb)->i_dbell, 0); \
- } \
-}
-
-#define bnad_dim_timer_running(_bnad) \
- (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
- (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
-
-#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
deleted file mode 100644
index fea07f19a5d..00000000000
--- a/drivers/net/bna/bnad_ethtool.c
+++ /dev/null
@@ -1,1214 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-#include "cna.h"
-
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-
-#include "bna.h"
-
-#include "bnad.h"
-
-#define BNAD_NUM_TXF_COUNTERS 12
-#define BNAD_NUM_RXF_COUNTERS 10
-#define BNAD_NUM_CQ_COUNTERS 3
-#define BNAD_NUM_RXQ_COUNTERS 6
-#define BNAD_NUM_TXQ_COUNTERS 5
-
-#define BNAD_ETHTOOL_STATS_NUM \
- (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
- sizeof(struct bnad_drv_stats) / sizeof(u64) + \
- offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
-
-static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
- "rx_packets",
- "tx_packets",
- "rx_bytes",
- "tx_bytes",
- "rx_errors",
- "tx_errors",
- "rx_dropped",
- "tx_dropped",
- "multicast",
- "collisions",
-
- "rx_length_errors",
- "rx_over_errors",
- "rx_crc_errors",
- "rx_frame_errors",
- "rx_fifo_errors",
- "rx_missed_errors",
-
- "tx_aborted_errors",
- "tx_carrier_errors",
- "tx_fifo_errors",
- "tx_heartbeat_errors",
- "tx_window_errors",
-
- "rx_compressed",
- "tx_compressed",
-
- "netif_queue_stop",
- "netif_queue_wakeup",
- "netif_queue_stopped",
- "tso4",
- "tso6",
- "tso_err",
- "tcpcsum_offload",
- "udpcsum_offload",
- "csum_help",
- "csum_help_err",
- "hw_stats_updates",
- "netif_rx_schedule",
- "netif_rx_complete",
- "netif_rx_dropped",
-
- "link_toggle",
- "cee_up",
-
- "rxp_info_alloc_failed",
- "mbox_intr_disabled",
- "mbox_intr_enabled",
- "tx_unmap_q_alloc_failed",
- "rx_unmap_q_alloc_failed",
- "rxbuf_alloc_failed",
-
- "mac_frame_64",
- "mac_frame_65_127",
- "mac_frame_128_255",
- "mac_frame_256_511",
- "mac_frame_512_1023",
- "mac_frame_1024_1518",
- "mac_frame_1518_1522",
- "mac_rx_bytes",
- "mac_rx_packets",
- "mac_rx_fcs_error",
- "mac_rx_multicast",
- "mac_rx_broadcast",
- "mac_rx_control_frames",
- "mac_rx_pause",
- "mac_rx_unknown_opcode",
- "mac_rx_alignment_error",
- "mac_rx_frame_length_error",
- "mac_rx_code_error",
- "mac_rx_carrier_sense_error",
- "mac_rx_undersize",
- "mac_rx_oversize",
- "mac_rx_fragments",
- "mac_rx_jabber",
- "mac_rx_drop",
-
- "mac_tx_bytes",
- "mac_tx_packets",
- "mac_tx_multicast",
- "mac_tx_broadcast",
- "mac_tx_pause",
- "mac_tx_deferral",
- "mac_tx_excessive_deferral",
- "mac_tx_single_collision",
- "mac_tx_muliple_collision",
- "mac_tx_late_collision",
- "mac_tx_excessive_collision",
- "mac_tx_total_collision",
- "mac_tx_pause_honored",
- "mac_tx_drop",
- "mac_tx_jabber",
- "mac_tx_fcs_error",
- "mac_tx_control_frame",
- "mac_tx_oversize",
- "mac_tx_undersize",
- "mac_tx_fragments",
-
- "bpc_tx_pause_0",
- "bpc_tx_pause_1",
- "bpc_tx_pause_2",
- "bpc_tx_pause_3",
- "bpc_tx_pause_4",
- "bpc_tx_pause_5",
- "bpc_tx_pause_6",
- "bpc_tx_pause_7",
- "bpc_tx_zero_pause_0",
- "bpc_tx_zero_pause_1",
- "bpc_tx_zero_pause_2",
- "bpc_tx_zero_pause_3",
- "bpc_tx_zero_pause_4",
- "bpc_tx_zero_pause_5",
- "bpc_tx_zero_pause_6",
- "bpc_tx_zero_pause_7",
- "bpc_tx_first_pause_0",
- "bpc_tx_first_pause_1",
- "bpc_tx_first_pause_2",
- "bpc_tx_first_pause_3",
- "bpc_tx_first_pause_4",
- "bpc_tx_first_pause_5",
- "bpc_tx_first_pause_6",
- "bpc_tx_first_pause_7",
-
- "bpc_rx_pause_0",
- "bpc_rx_pause_1",
- "bpc_rx_pause_2",
- "bpc_rx_pause_3",
- "bpc_rx_pause_4",
- "bpc_rx_pause_5",
- "bpc_rx_pause_6",
- "bpc_rx_pause_7",
- "bpc_rx_zero_pause_0",
- "bpc_rx_zero_pause_1",
- "bpc_rx_zero_pause_2",
- "bpc_rx_zero_pause_3",
- "bpc_rx_zero_pause_4",
- "bpc_rx_zero_pause_5",
- "bpc_rx_zero_pause_6",
- "bpc_rx_zero_pause_7",
- "bpc_rx_first_pause_0",
- "bpc_rx_first_pause_1",
- "bpc_rx_first_pause_2",
- "bpc_rx_first_pause_3",
- "bpc_rx_first_pause_4",
- "bpc_rx_first_pause_5",
- "bpc_rx_first_pause_6",
- "bpc_rx_first_pause_7",
-
- "rad_rx_frames",
- "rad_rx_octets",
- "rad_rx_vlan_frames",
- "rad_rx_ucast",
- "rad_rx_ucast_octets",
- "rad_rx_ucast_vlan",
- "rad_rx_mcast",
- "rad_rx_mcast_octets",
- "rad_rx_mcast_vlan",
- "rad_rx_bcast",
- "rad_rx_bcast_octets",
- "rad_rx_bcast_vlan",
- "rad_rx_drops",
-
- "fc_rx_ucast_octets",
- "fc_rx_ucast",
- "fc_rx_ucast_vlan",
- "fc_rx_mcast_octets",
- "fc_rx_mcast",
- "fc_rx_mcast_vlan",
- "fc_rx_bcast_octets",
- "fc_rx_bcast",
- "fc_rx_bcast_vlan",
-
- "fc_tx_ucast_octets",
- "fc_tx_ucast",
- "fc_tx_ucast_vlan",
- "fc_tx_mcast_octets",
- "fc_tx_mcast",
- "fc_tx_mcast_vlan",
- "fc_tx_bcast_octets",
- "fc_tx_bcast",
- "fc_tx_bcast_vlan",
- "fc_tx_parity_errors",
- "fc_tx_timeout",
- "fc_tx_fid_parity_errors",
-};
-
-static int
-bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
-{
- cmd->supported = SUPPORTED_10000baseT_Full;
- cmd->advertising = ADVERTISED_10000baseT_Full;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
- cmd->port = PORT_FIBRE;
- cmd->phy_address = 0;
-
- if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
- }
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
-
- return 0;
-}
-
-static int
-bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
-{
- /* 10G full duplex setting supported only */
- if (cmd->autoneg == AUTONEG_ENABLE)
- return -EOPNOTSUPP; else {
- if ((ethtool_cmd_speed(cmd) == SPEED_10000)
- && (cmd->duplex == DUPLEX_FULL))
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static void
-bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
-{
- struct bnad *bnad = netdev_priv(netdev);
- struct bfa_ioc_attr *ioc_attr;
- unsigned long flags;
-
- strcpy(drvinfo->driver, BNAD_NAME);
- strcpy(drvinfo->version, BNAD_VERSION);
-
- ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
- if (ioc_attr) {
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
- strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
- sizeof(drvinfo->fw_version) - 1);
- kfree(ioc_attr);
- }
-
- strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
-}
-
-static int
-get_regs(struct bnad *bnad, u32 * regs)
-{
- int num = 0, i;
- u32 reg_addr;
- unsigned long flags;
-
-#define BNAD_GET_REG(addr) \
-do { \
- if (regs) \
- regs[num++] = readl(bnad->bar0 + (addr)); \
- else \
- num++; \
-} while (0)
-
- spin_lock_irqsave(&bnad->bna_lock, flags);
-
- /* DMA Block Internal Registers */
- BNAD_GET_REG(DMA_CTRL_REG0);
- BNAD_GET_REG(DMA_CTRL_REG1);
- BNAD_GET_REG(DMA_ERR_INT_STATUS);
- BNAD_GET_REG(DMA_ERR_INT_ENABLE);
- BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
-
- /* APP Block Register Address Offset from BAR0 */
- BNAD_GET_REG(HOSTFN0_INT_STATUS);
- BNAD_GET_REG(HOSTFN0_INT_MASK);
- BNAD_GET_REG(HOST_PAGE_NUM_FN0);
- BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
- BNAD_GET_REG(FN0_PCIE_ERR_REG);
- BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
- BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
-
- BNAD_GET_REG(HOSTFN1_INT_STATUS);
- BNAD_GET_REG(HOSTFN1_INT_MASK);
- BNAD_GET_REG(HOST_PAGE_NUM_FN1);
- BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
- BNAD_GET_REG(FN1_PCIE_ERR_REG);
- BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
- BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
-
- BNAD_GET_REG(PCIE_MISC_REG);
-
- BNAD_GET_REG(HOST_SEM0_INFO_REG);
- BNAD_GET_REG(HOST_SEM1_INFO_REG);
- BNAD_GET_REG(HOST_SEM2_INFO_REG);
- BNAD_GET_REG(HOST_SEM3_INFO_REG);
-
- BNAD_GET_REG(TEMPSENSE_CNTL_REG);
- BNAD_GET_REG(TEMPSENSE_STAT_REG);
-
- BNAD_GET_REG(APP_LOCAL_ERR_STAT);
- BNAD_GET_REG(APP_LOCAL_ERR_MSK);
-
- BNAD_GET_REG(PCIE_LNK_ERR_STAT);
- BNAD_GET_REG(PCIE_LNK_ERR_MSK);
-
- BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
- BNAD_GET_REG(RESV_ETH_TYPE);
-
- BNAD_GET_REG(HOSTFN2_INT_STATUS);
- BNAD_GET_REG(HOSTFN2_INT_MASK);
- BNAD_GET_REG(HOST_PAGE_NUM_FN2);
- BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
- BNAD_GET_REG(FN2_PCIE_ERR_REG);
- BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
- BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
-
- BNAD_GET_REG(HOSTFN3_INT_STATUS);
- BNAD_GET_REG(HOSTFN3_INT_MASK);
- BNAD_GET_REG(HOST_PAGE_NUM_FN3);
- BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
- BNAD_GET_REG(FN3_PCIE_ERR_REG);
- BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
- BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
-
- /* Host Command Status Registers */
- reg_addr = HOST_CMDSTS0_CLR_REG;
- for (i = 0; i < 16; i++) {
- BNAD_GET_REG(reg_addr);
- BNAD_GET_REG(reg_addr + 4);
- BNAD_GET_REG(reg_addr + 8);
- reg_addr += 0x10;
- }
-
- /* Function ID register */
- BNAD_GET_REG(FNC_ID_REG);
-
- /* Function personality register */
- BNAD_GET_REG(FNC_PERS_REG);
-
- /* Operation mode register */
- BNAD_GET_REG(OP_MODE);
-
- /* LPU0 Registers */
- BNAD_GET_REG(LPU0_MBOX_CTL_REG);
- BNAD_GET_REG(LPU0_MBOX_CMD_REG);
- BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
- BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
- BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
- BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
- BNAD_GET_REG(LPU0_ERR_STATUS_REG);
- BNAD_GET_REG(LPU0_ERR_SET_REG);
-
- /* LPU1 Registers */
- BNAD_GET_REG(LPU1_MBOX_CTL_REG);
- BNAD_GET_REG(LPU1_MBOX_CMD_REG);
- BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
- BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
- BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
- BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
- BNAD_GET_REG(LPU1_ERR_STATUS_REG);
- BNAD_GET_REG(LPU1_ERR_SET_REG);
-
- /* PSS Registers */
- BNAD_GET_REG(PSS_CTL_REG);
- BNAD_GET_REG(PSS_ERR_STATUS_REG);
- BNAD_GET_REG(ERR_STATUS_SET);
- BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
-
- /* Catapult CPQ Registers */
- BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
- BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
- BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
- BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
- BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
- BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
- BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
- BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
-
- BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
- BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
- BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
-
- /* Host Function Force Parity Error Registers */
- BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
- BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
- BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
- BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
-
- /* LL Port[0|1] Halt Mask Registers */
- BNAD_GET_REG(LL_HALT_MSK_P0);
- BNAD_GET_REG(LL_HALT_MSK_P1);
-
- /* LL Port[0|1] Error Mask Registers */
- BNAD_GET_REG(LL_ERR_MSK_P0);
- BNAD_GET_REG(LL_ERR_MSK_P1);
-
- /* EMC FLI Registers */
- BNAD_GET_REG(FLI_CMD_REG);
- BNAD_GET_REG(FLI_ADDR_REG);
- BNAD_GET_REG(FLI_CTL_REG);
- BNAD_GET_REG(FLI_WRDATA_REG);
- BNAD_GET_REG(FLI_RDDATA_REG);
- BNAD_GET_REG(FLI_DEV_STATUS_REG);
- BNAD_GET_REG(FLI_SIG_WD_REG);
-
- BNAD_GET_REG(FLI_DEV_VENDOR_REG);
- BNAD_GET_REG(FLI_ERR_STATUS_REG);
-
- /* RxAdm 0 Registers */
- BNAD_GET_REG(RAD0_CTL_REG);
- BNAD_GET_REG(RAD0_PE_PARM_REG);
- BNAD_GET_REG(RAD0_BCN_REG);
- BNAD_GET_REG(RAD0_DEFAULT_REG);
- BNAD_GET_REG(RAD0_PROMISC_REG);
- BNAD_GET_REG(RAD0_BCNQ_REG);
- BNAD_GET_REG(RAD0_DEFAULTQ_REG);
-
- BNAD_GET_REG(RAD0_ERR_STS);
- BNAD_GET_REG(RAD0_SET_ERR_STS);
- BNAD_GET_REG(RAD0_ERR_INT_EN);
- BNAD_GET_REG(RAD0_FIRST_ERR);
- BNAD_GET_REG(RAD0_FORCE_ERR);
-
- BNAD_GET_REG(RAD0_MAC_MAN_1H);
- BNAD_GET_REG(RAD0_MAC_MAN_1L);
- BNAD_GET_REG(RAD0_MAC_MAN_2H);
- BNAD_GET_REG(RAD0_MAC_MAN_2L);
- BNAD_GET_REG(RAD0_MAC_MAN_3H);
- BNAD_GET_REG(RAD0_MAC_MAN_3L);
- BNAD_GET_REG(RAD0_MAC_MAN_4H);
- BNAD_GET_REG(RAD0_MAC_MAN_4L);
-
- BNAD_GET_REG(RAD0_LAST4_IP);
-
- /* RxAdm 1 Registers */
- BNAD_GET_REG(RAD1_CTL_REG);
- BNAD_GET_REG(RAD1_PE_PARM_REG);
- BNAD_GET_REG(RAD1_BCN_REG);
- BNAD_GET_REG(RAD1_DEFAULT_REG);
- BNAD_GET_REG(RAD1_PROMISC_REG);
- BNAD_GET_REG(RAD1_BCNQ_REG);
- BNAD_GET_REG(RAD1_DEFAULTQ_REG);
-
- BNAD_GET_REG(RAD1_ERR_STS);
- BNAD_GET_REG(RAD1_SET_ERR_STS);
- BNAD_GET_REG(RAD1_ERR_INT_EN);
-
- /* TxA0 Registers */
- BNAD_GET_REG(TXA0_CTRL_REG);
- /* TxA0 TSO Sequence # Registers (RO) */
- for (i = 0; i < 8; i++) {
- BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
- BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
- }
-
- /* TxA1 Registers */
- BNAD_GET_REG(TXA1_CTRL_REG);
- /* TxA1 TSO Sequence # Registers (RO) */
- for (i = 0; i < 8; i++) {
- BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
- BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
- }
-
- /* RxA Registers */
- BNAD_GET_REG(RXA0_CTL_REG);
- BNAD_GET_REG(RXA1_CTL_REG);
-
- /* PLB0 Registers */
- BNAD_GET_REG(PLB0_ECM_TIMER_REG);
- BNAD_GET_REG(PLB0_RL_CTL);
- for (i = 0; i < 8; i++)
- BNAD_GET_REG(PLB0_RL_MAX_BC(i));
- BNAD_GET_REG(PLB0_RL_TU_PRIO);
- for (i = 0; i < 8; i++)
- BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
- BNAD_GET_REG(PLB0_RL_MIN_REG);
- BNAD_GET_REG(PLB0_RL_MAX_REG);
- BNAD_GET_REG(PLB0_EMS_ADD_REG);
-
- /* PLB1 Registers */
- BNAD_GET_REG(PLB1_ECM_TIMER_REG);
- BNAD_GET_REG(PLB1_RL_CTL);
- for (i = 0; i < 8; i++)
- BNAD_GET_REG(PLB1_RL_MAX_BC(i));
- BNAD_GET_REG(PLB1_RL_TU_PRIO);
- for (i = 0; i < 8; i++)
- BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
- BNAD_GET_REG(PLB1_RL_MIN_REG);
- BNAD_GET_REG(PLB1_RL_MAX_REG);
- BNAD_GET_REG(PLB1_EMS_ADD_REG);
-
- /* HQM Control Register */
- BNAD_GET_REG(HQM0_CTL_REG);
- BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
- BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
- BNAD_GET_REG(HQM1_CTL_REG);
- BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
- BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
-
- /* LUT Registers */
- BNAD_GET_REG(LUT0_ERR_STS);
- BNAD_GET_REG(LUT0_SET_ERR_STS);
- BNAD_GET_REG(LUT1_ERR_STS);
- BNAD_GET_REG(LUT1_SET_ERR_STS);
-
- /* TRC Registers */
- BNAD_GET_REG(TRC_CTL_REG);
- BNAD_GET_REG(TRC_MODS_REG);
- BNAD_GET_REG(TRC_TRGC_REG);
- BNAD_GET_REG(TRC_CNT1_REG);
- BNAD_GET_REG(TRC_CNT2_REG);
- BNAD_GET_REG(TRC_NXTS_REG);
- BNAD_GET_REG(TRC_DIRR_REG);
- for (i = 0; i < 10; i++)
- BNAD_GET_REG(TRC_TRGM_REG(i));
- for (i = 0; i < 10; i++)
- BNAD_GET_REG(TRC_NXTM_REG(i));
- for (i = 0; i < 10; i++)
- BNAD_GET_REG(TRC_STRM_REG(i));
-
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-#undef BNAD_GET_REG
- return num;
-}
-static int
-bnad_get_regs_len(struct net_device *netdev)
-{
- int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
- return ret;
-}
-
-static void
-bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
-{
- memset(buf, 0, bnad_get_regs_len(netdev));
- get_regs(netdev_priv(netdev), buf);
-}
-
-static void
-bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
-{
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static int
-bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
-{
- struct bnad *bnad = netdev_priv(netdev);
- unsigned long flags;
-
- /* Lock rqd. to access bnad->bna_lock */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- coalesce->use_adaptive_rx_coalesce =
- (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
- coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
- BFI_COALESCING_TIMER_UNIT;
- coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
- BFI_COALESCING_TIMER_UNIT;
- coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
-
- return 0;
-}
-
-static int
-bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
-{
- struct bnad *bnad = netdev_priv(netdev);
- unsigned long flags;
- int dim_timer_del = 0;
-
- if (coalesce->rx_coalesce_usecs == 0 ||
- coalesce->rx_coalesce_usecs >
- BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
- return -EINVAL;
-
- if (coalesce->tx_coalesce_usecs == 0 ||
- coalesce->tx_coalesce_usecs >
- BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
- return -EINVAL;
-
- mutex_lock(&bnad->conf_mutex);
- /*
- * Do not need to store rx_coalesce_usecs here
- * Every time DIM is disabled, we can get it from the
- * stack.
- */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- if (coalesce->use_adaptive_rx_coalesce) {
- if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
- bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
- bnad_dim_timer_start(bnad);
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
- bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
- dim_timer_del = bnad_dim_timer_running(bnad);
- if (dim_timer_del) {
- clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
- &bnad->run_flags);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
- del_timer_sync(&bnad->dim_timer);
- spin_lock_irqsave(&bnad->bna_lock, flags);
- }
- bnad_rx_coalescing_timeo_set(bnad);
- }
- }
- if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
- BFI_COALESCING_TIMER_UNIT) {
- bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
- BFI_COALESCING_TIMER_UNIT;
- bnad_tx_coalescing_timeo_set(bnad);
- }
-
- if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
- BFI_COALESCING_TIMER_UNIT) {
- bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
- BFI_COALESCING_TIMER_UNIT;
-
- if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
- bnad_rx_coalescing_timeo_set(bnad);
-
- }
-
- /* Add Tx Inter-pkt DMA count? */
-
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
- mutex_unlock(&bnad->conf_mutex);
- return 0;
-}
-
-static void
-bnad_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
-{
- struct bnad *bnad = netdev_priv(netdev);
-
- ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
- ringparam->rx_mini_max_pending = 0;
- ringparam->rx_jumbo_max_pending = 0;
- ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
-
- ringparam->rx_pending = bnad->rxq_depth;
- ringparam->rx_mini_max_pending = 0;
- ringparam->rx_jumbo_max_pending = 0;
- ringparam->tx_pending = bnad->txq_depth;
-}
-
-static int
-bnad_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
-{
- int i, current_err, err = 0;
- struct bnad *bnad = netdev_priv(netdev);
-
- mutex_lock(&bnad->conf_mutex);
- if (ringparam->rx_pending == bnad->rxq_depth &&
- ringparam->tx_pending == bnad->txq_depth) {
- mutex_unlock(&bnad->conf_mutex);
- return 0;
- }
-
- if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
- ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
- !BNA_POWER_OF_2(ringparam->rx_pending)) {
- mutex_unlock(&bnad->conf_mutex);
- return -EINVAL;
- }
- if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
- ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
- !BNA_POWER_OF_2(ringparam->tx_pending)) {
- mutex_unlock(&bnad->conf_mutex);
- return -EINVAL;
- }
-
- if (ringparam->rx_pending != bnad->rxq_depth) {
- bnad->rxq_depth = ringparam->rx_pending;
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- bnad_cleanup_rx(bnad, i);
- current_err = bnad_setup_rx(bnad, i);
- if (current_err && !err)
- err = current_err;
- }
- }
- if (ringparam->tx_pending != bnad->txq_depth) {
- bnad->txq_depth = ringparam->tx_pending;
- for (i = 0; i < bnad->num_tx; i++) {
- if (!bnad->tx_info[i].tx)
- continue;
- bnad_cleanup_tx(bnad, i);
- current_err = bnad_setup_tx(bnad, i);
- if (current_err && !err)
- err = current_err;
- }
- }
-
- mutex_unlock(&bnad->conf_mutex);
- return err;
-}
-
-static void
-bnad_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
-{
- struct bnad *bnad = netdev_priv(netdev);
-
- pauseparam->autoneg = 0;
- pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
- pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
-}
-
-static int
-bnad_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
-{
- struct bnad *bnad = netdev_priv(netdev);
- struct bna_pause_config pause_config;
- unsigned long flags;
-
- if (pauseparam->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- mutex_lock(&bnad->conf_mutex);
- if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
- pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
- pause_config.rx_pause = pauseparam->rx_pause;
- pause_config.tx_pause = pauseparam->tx_pause;
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
- }
- mutex_unlock(&bnad->conf_mutex);
- return 0;
-}
-
-static void
-bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
-{
- struct bnad *bnad = netdev_priv(netdev);
- int i, j, q_num;
- u64 bmap;
-
- mutex_lock(&bnad->conf_mutex);
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
- BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
- ETH_GSTRING_LEN));
- memcpy(string, bnad_net_stats_strings[i],
- ETH_GSTRING_LEN);
- string += ETH_GSTRING_LEN;
- }
- bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
- ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
- if (bmap & 1) {
- sprintf(string, "txf%d_ucast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_ucast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_ucast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_mcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_mcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_mcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_bcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_bcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_bcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_errors", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_filter_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_filter_mac_sa", i);
- string += ETH_GSTRING_LEN;
- }
- bmap >>= 1;
- }
-
- bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
- ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
- if (bmap & 1) {
- sprintf(string, "rxf%d_ucast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_ucast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_ucast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_mcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_mcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_mcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_bcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_bcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_bcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_frame_drops", i);
- string += ETH_GSTRING_LEN;
- }
- bmap >>= 1;
- }
-
- q_num = 0;
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- for (j = 0; j < bnad->num_rxp_per_rx; j++) {
- sprintf(string, "cq%d_producer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_consumer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_hw_producer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- }
- }
-
- q_num = 0;
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- for (j = 0; j < bnad->num_rxp_per_rx; j++) {
- sprintf(string, "rxq%d_packets", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_bytes", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_packets_with_error",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_allocbuf_failed", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_producer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_consumer_index", q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- if (bnad->rx_info[i].rx_ctrl[j].ccb &&
- bnad->rx_info[i].rx_ctrl[j].ccb->
- rcb[1] &&
- bnad->rx_info[i].rx_ctrl[j].ccb->
- rcb[1]->rxq) {
- sprintf(string, "rxq%d_packets", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_bytes", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string,
- "rxq%d_packets_with_error", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_allocbuf_failed",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_producer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_consumer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- }
- }
- }
-
- q_num = 0;
- for (i = 0; i < bnad->num_tx; i++) {
- if (!bnad->tx_info[i].tx)
- continue;
- for (j = 0; j < bnad->num_txq_per_tx; j++) {
- sprintf(string, "txq%d_packets", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_bytes", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_producer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_consumer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_hw_consumer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- }
- }
-
- break;
-
- default:
- break;
- }
-
- mutex_unlock(&bnad->conf_mutex);
-}
-
-static int
-bnad_get_stats_count_locked(struct net_device *netdev)
-{
- struct bnad *bnad = netdev_priv(netdev);
- int i, j, count, rxf_active_num = 0, txf_active_num = 0;
- u64 bmap;
-
- bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
- ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
- if (bmap & 1)
- txf_active_num++;
- bmap >>= 1;
- }
- bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
- ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
- if (bmap & 1)
- rxf_active_num++;
- bmap >>= 1;
- }
- count = BNAD_ETHTOOL_STATS_NUM +
- txf_active_num * BNAD_NUM_TXF_COUNTERS +
- rxf_active_num * BNAD_NUM_RXF_COUNTERS;
-
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
- count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
- for (j = 0; j < bnad->num_rxp_per_rx; j++)
- if (bnad->rx_info[i].rx_ctrl[j].ccb &&
- bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
- bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
- count += BNAD_NUM_RXQ_COUNTERS;
- }
-
- for (i = 0; i < bnad->num_tx; i++) {
- if (!bnad->tx_info[i].tx)
- continue;
- count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
- }
- return count;
-}
-
-static int
-bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
-{
- int i, j;
- struct bna_rcb *rcb = NULL;
- struct bna_tcb *tcb = NULL;
-
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- for (j = 0; j < bnad->num_rxp_per_rx; j++)
- if (bnad->rx_info[i].rx_ctrl[j].ccb &&
- bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
- bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
- buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
- ccb->producer_index;
- buf[bi++] = 0; /* ccb->consumer_index */
- buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
- ccb->hw_producer_index);
- }
- }
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- for (j = 0; j < bnad->num_rxp_per_rx; j++)
- if (bnad->rx_info[i].rx_ctrl[j].ccb) {
- if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
- bnad->rx_info[i].rx_ctrl[j].ccb->
- rcb[0]->rxq) {
- rcb = bnad->rx_info[i].rx_ctrl[j].
- ccb->rcb[0];
- buf[bi++] = rcb->rxq->rx_packets;
- buf[bi++] = rcb->rxq->rx_bytes;
- buf[bi++] = rcb->rxq->
- rx_packets_with_error;
- buf[bi++] = rcb->rxq->
- rxbuf_alloc_failed;
- buf[bi++] = rcb->producer_index;
- buf[bi++] = rcb->consumer_index;
- }
- if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
- bnad->rx_info[i].rx_ctrl[j].ccb->
- rcb[1]->rxq) {
- rcb = bnad->rx_info[i].rx_ctrl[j].
- ccb->rcb[1];
- buf[bi++] = rcb->rxq->rx_packets;
- buf[bi++] = rcb->rxq->rx_bytes;
- buf[bi++] = rcb->rxq->
- rx_packets_with_error;
- buf[bi++] = rcb->rxq->
- rxbuf_alloc_failed;
- buf[bi++] = rcb->producer_index;
- buf[bi++] = rcb->consumer_index;
- }
- }
- }
-
- for (i = 0; i < bnad->num_tx; i++) {
- if (!bnad->tx_info[i].tx)
- continue;
- for (j = 0; j < bnad->num_txq_per_tx; j++)
- if (bnad->tx_info[i].tcb[j] &&
- bnad->tx_info[i].tcb[j]->txq) {
- tcb = bnad->tx_info[i].tcb[j];
- buf[bi++] = tcb->txq->tx_packets;
- buf[bi++] = tcb->txq->tx_bytes;
- buf[bi++] = tcb->producer_index;
- buf[bi++] = tcb->consumer_index;
- buf[bi++] = *(tcb->hw_consumer_index);
- }
- }
-
- return bi;
-}
-
-static void
-bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
- u64 *buf)
-{
- struct bnad *bnad = netdev_priv(netdev);
- int i, j, bi;
- unsigned long flags;
- struct rtnl_link_stats64 *net_stats64;
- u64 *stats64;
- u64 bmap;
-
- mutex_lock(&bnad->conf_mutex);
- if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
- mutex_unlock(&bnad->conf_mutex);
- return;
- }
-
- /*
- * Used bna_lock to sync reads from bna_stats, which is written
- * under the same lock
- */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bi = 0;
- memset(buf, 0, stats->n_stats * sizeof(u64));
-
- net_stats64 = (struct rtnl_link_stats64 *)buf;
- bnad_netdev_qstats_fill(bnad, net_stats64);
- bnad_netdev_hwstats_fill(bnad, net_stats64);
-
- bi = sizeof(*net_stats64) / sizeof(u64);
-
- /* Get netif_queue_stopped from stack */
- bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
-
- /* Fill driver stats into ethtool buffers */
- stats64 = (u64 *)&bnad->stats.drv_stats;
- for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
- buf[bi++] = stats64[i];
-
- /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
- stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
- for (i = 0;
- i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
- i++)
- buf[bi++] = stats64[i];
-
- /* Fill txf stats into ethtool buffers */
- bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
- ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
- if (bmap & 1) {
- stats64 = (u64 *)&bnad->stats.bna_stats->
- hw_stats->txf_stats[i];
- for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
- sizeof(u64); j++)
- buf[bi++] = stats64[j];
- }
- bmap >>= 1;
- }
-
- /* Fill rxf stats into ethtool buffers */
- bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
- ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
- if (bmap & 1) {
- stats64 = (u64 *)&bnad->stats.bna_stats->
- hw_stats->rxf_stats[i];
- for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
- sizeof(u64); j++)
- buf[bi++] = stats64[j];
- }
- bmap >>= 1;
- }
-
- /* Fill per Q stats into ethtool buffers */
- bi = bnad_per_q_stats_fill(bnad, buf, bi);
-
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
- mutex_unlock(&bnad->conf_mutex);
-}
-
-static int
-bnad_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return bnad_get_stats_count_locked(netdev);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static struct ethtool_ops bnad_ethtool_ops = {
- .get_settings = bnad_get_settings,
- .set_settings = bnad_set_settings,
- .get_drvinfo = bnad_get_drvinfo,
- .get_regs_len = bnad_get_regs_len,
- .get_regs = bnad_get_regs,
- .get_wol = bnad_get_wol,
- .get_link = ethtool_op_get_link,
- .get_coalesce = bnad_get_coalesce,
- .set_coalesce = bnad_set_coalesce,
- .get_ringparam = bnad_get_ringparam,
- .set_ringparam = bnad_set_ringparam,
- .get_pauseparam = bnad_get_pauseparam,
- .set_pauseparam = bnad_set_pauseparam,
- .get_strings = bnad_get_strings,
- .get_ethtool_stats = bnad_get_ethtool_stats,
- .get_sset_count = bnad_get_sset_count
-};
-
-void
-bnad_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
-}
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
deleted file mode 100644
index a679e038747..00000000000
--- a/drivers/net/bna/cna.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-#ifndef __CNA_H__
-#define __CNA_H__
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/string.h>
-
-#include <linux/list.h>
-
-#define bfa_sm_fault(__event) do { \
- pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
- __event); \
-} while (0)
-
-extern char bfa_version[];
-
-#define CNA_FW_FILE_CT "ctfw_cna.bin"
-#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
-
-#pragma pack(1)
-
-#define MAC_ADDRLEN (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
-
-#pragma pack()
-
-#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
-#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
-#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
-
-/*
- * bfa_q_qe_init - to initialize a queue element
- */
-#define bfa_q_qe_init(_qe) { \
- bfa_q_next(_qe) = (struct list_head *) NULL; \
- bfa_q_prev(_qe) = (struct list_head *) NULL; \
-}
-
-/*
- * bfa_q_deq - dequeue an element from head of the queue
- */
-#define bfa_q_deq(_q, _qe) { \
- if (!list_empty(_q)) { \
- (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
- bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **)(_qe)) = NULL; \
- } \
-}
-
-#endif /* __CNA_H__ */
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
deleted file mode 100644
index e8f4ecd9ebb..00000000000
--- a/drivers/net/bna/cna_fwimg.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-#include <linux/firmware.h>
-#include "cna.h"
-
-const struct firmware *bfi_fw;
-static u32 *bfi_image_ct_cna;
-static u32 bfi_image_ct_cna_size;
-
-static u32 *
-cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
- u32 *bfi_image_size, char *fw_name)
-{
- const struct firmware *fw;
-
- if (request_firmware(&fw, fw_name, &pdev->dev)) {
- pr_alert("Can't locate firmware %s\n", fw_name);
- goto error;
- }
-
- *bfi_image = (u32 *)fw->data;
- *bfi_image_size = fw->size/sizeof(u32);
- bfi_fw = fw;
-
- return *bfi_image;
-error:
- return NULL;
-}
-
-u32 *
-cna_get_firmware_buf(struct pci_dev *pdev)
-{
- if (bfi_image_ct_cna_size == 0)
- cna_read_firmware(pdev, &bfi_image_ct_cna,
- &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
- return bfi_image_ct_cna;
-}
-
-u32 *
-bfa_cb_image_get_chunk(int type, u32 off)
-{
- return (u32 *)(bfi_image_ct_cna + off);
-}
-
-u32
-bfa_cb_image_get_size(int type)
-{
- return bfi_image_ct_cna_size;
-}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 47b928ed08f..b33c099d65a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1135,13 +1135,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__record_pdu(lacpdu, port);
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
- // verify that if the aggregator is enabled, the port is enabled too.
- //(because if the link goes down for a short time, the 802.3ad will not
- // catch it, and the port will continue to be disabled)
- if (port->aggregator
- && port->aggregator->is_active
- && !__port_is_enabled(port))
- __enable_port(port);
break;
default: //to silence the compiler
break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6d79b78cfc7..c5944f1a4f9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -395,7 +395,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
- skb->priority = 1;
skb->queue_mapping = bond_queue_mapping(skb);
@@ -557,7 +556,7 @@ down:
static int bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
- struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET };
+ struct ethtool_cmd ecmd;
u32 slave_speed;
int res;
@@ -565,18 +564,15 @@ static int bond_update_speed_duplex(struct slave *slave)
slave->speed = SPEED_100;
slave->duplex = DUPLEX_FULL;
- if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings)
- return -1;
-
- res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool);
+ res = __ethtool_get_settings(slave_dev, &ecmd);
if (res < 0)
return -1;
- slave_speed = ethtool_cmd_speed(&etool);
+ slave_speed = ethtool_cmd_speed(&ecmd);
if (slave_speed == 0 || slave_speed == ((__u32) -1))
return -1;
- switch (etool.duplex) {
+ switch (ecmd.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
@@ -585,7 +581,7 @@ static int bond_update_speed_duplex(struct slave *slave)
}
slave->speed = slave_speed;
- slave->duplex = etool.duplex;
+ slave->duplex = ecmd.duplex;
return 0;
}
@@ -1435,6 +1431,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
+ void (*recv_probe)(struct sk_buff *, struct bonding *,
+ struct slave *);
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
@@ -1448,11 +1446,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
- if (bond->recv_probe) {
+ recv_probe = ACCESS_ONCE(bond->recv_probe);
+ if (recv_probe) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
- bond->recv_probe(nskb, bond, slave);
+ recv_probe(nskb, bond, slave);
dev_kfree_skb(nskb);
}
}
@@ -3707,44 +3706,27 @@ static bool bond_addr_in_mc_list(unsigned char *addr,
return false;
}
-static void bond_set_multicast_list(struct net_device *bond_dev)
+static void bond_change_rx_flags(struct net_device *bond_dev, int change)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct netdev_hw_addr *ha;
- bool found;
- /*
- * Do promisc before checking multicast_mode
- */
- if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC))
- /*
- * FIXME: Need to handle the error when one of the multi-slaves
- * encounters error.
- */
- bond_set_promiscuity(bond, 1);
-
-
- if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC))
- bond_set_promiscuity(bond, -1);
-
-
- /* set allmulti flag to slaves */
- if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI))
- /*
- * FIXME: Need to handle the error when one of the multi-slaves
- * encounters error.
- */
- bond_set_allmulti(bond, 1);
+ if (change & IFF_PROMISC)
+ bond_set_promiscuity(bond,
+ bond_dev->flags & IFF_PROMISC ? 1 : -1);
+ if (change & IFF_ALLMULTI)
+ bond_set_allmulti(bond,
+ bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
+}
- if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI))
- bond_set_allmulti(bond, -1);
-
+static void bond_set_multicast_list(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct netdev_hw_addr *ha;
+ bool found;
read_lock(&bond->lock);
- bond->flags = bond_dev->flags;
-
/* looking for addresses to add to slaves' mc list */
netdev_for_each_mc_addr(ha, bond_dev) {
found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
@@ -4303,7 +4285,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_select_queue = bond_select_queue,
.ndo_get_stats64 = bond_get_stats,
.ndo_do_ioctl = bond_do_ioctl,
- .ndo_set_multicast_list = bond_set_multicast_list,
+ .ndo_change_rx_flags = bond_change_rx_flags,
+ .ndo_set_rx_mode = bond_set_multicast_list,
.ndo_change_mtu = bond_change_mtu,
.ndo_set_mac_address = bond_set_mac_address,
.ndo_neigh_setup = bond_neigh_setup,
@@ -4849,11 +4832,20 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
+static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
+ unsigned int *num_queues,
+ unsigned int *real_num_queues)
+{
+ *num_queues = tx_queues;
+ return 0;
+}
+
static struct rtnl_link_ops bond_link_ops __read_mostly = {
.kind = "bond",
.priv_size = sizeof(struct bonding),
.setup = bond_setup,
.validate = bond_validate,
+ .get_tx_queues = bond_get_tx_queues,
};
/* Create a new bond based on the specified name and bonding parameters.
@@ -4898,6 +4890,7 @@ static int __net_init bond_net_init(struct net *net)
INIT_LIST_HEAD(&bn->dev_list);
bond_create_proc_dir(bn);
+ bond_create_sysfs(bn);
return 0;
}
@@ -4906,6 +4899,7 @@ static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn);
bond_destroy_proc_dir(bn);
}
@@ -4943,10 +4937,6 @@ static int __init bonding_init(void)
goto err;
}
- res = bond_create_sysfs();
- if (res)
- goto err;
-
register_netdevice_notifier(&bond_netdev_notifier);
register_inetaddr_notifier(&bond_inetaddr_notifier);
out:
@@ -4964,7 +4954,6 @@ static void __exit bonding_exit(void)
unregister_netdevice_notifier(&bond_netdev_notifier);
unregister_inetaddr_notifier(&bond_inetaddr_notifier);
- bond_destroy_sysfs();
bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 2dfb4bf9008..5a20804fdec 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -55,8 +55,8 @@ static ssize_t bonding_show_bonds(struct class *cls,
struct class_attribute *attr,
char *buf)
{
- struct net *net = current->nsproxy->net_ns;
- struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bond_net *bn =
+ container_of(attr, struct bond_net, class_attr_bonding_masters);
int res = 0;
struct bonding *bond;
@@ -79,9 +79,8 @@ static ssize_t bonding_show_bonds(struct class *cls,
return res;
}
-static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
+static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifname)
{
- struct bond_net *bn = net_generic(net, bond_net_id);
struct bonding *bond;
list_for_each_entry(bond, &bn->dev_list, bond_list) {
@@ -103,7 +102,8 @@ static ssize_t bonding_store_bonds(struct class *cls,
struct class_attribute *attr,
const char *buffer, size_t count)
{
- struct net *net = current->nsproxy->net_ns;
+ struct bond_net *bn =
+ container_of(attr, struct bond_net, class_attr_bonding_masters);
char command[IFNAMSIZ + 1] = {0, };
char *ifname;
int rv, res = count;
@@ -116,7 +116,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
if (command[0] == '+') {
pr_info("%s is being created...\n", ifname);
- rv = bond_create(net, ifname);
+ rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
pr_info("%s already exists.\n", ifname);
@@ -128,7 +128,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
struct net_device *bond_dev;
rtnl_lock();
- bond_dev = bond_get_by_name(net, ifname);
+ bond_dev = bond_get_by_name(bn, ifname);
if (bond_dev) {
pr_info("%s is being deleted...\n", ifname);
unregister_netdevice(bond_dev);
@@ -150,9 +150,24 @@ err_no_cmd:
return -EPERM;
}
+static const void *bonding_namespace(struct class *cls,
+ const struct class_attribute *attr)
+{
+ const struct bond_net *bn =
+ container_of(attr, struct bond_net, class_attr_bonding_masters);
+ return bn->net;
+}
+
/* class attribute for bond_masters file. This ends up in /sys/class/net */
-static CLASS_ATTR(bonding_masters, S_IWUSR | S_IRUGO,
- bonding_show_bonds, bonding_store_bonds);
+static const struct class_attribute class_attr_bonding_masters = {
+ .attr = {
+ .name = "bonding_masters",
+ .mode = S_IWUSR | S_IRUGO,
+ },
+ .show = bonding_show_bonds,
+ .store = bonding_store_bonds,
+ .namespace = bonding_namespace,
+};
int bond_create_slave_symlinks(struct net_device *master,
struct net_device *slave)
@@ -1655,11 +1670,14 @@ static struct attribute_group bonding_group = {
* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
-int bond_create_sysfs(void)
+int bond_create_sysfs(struct bond_net *bn)
{
int ret;
- ret = netdev_class_create_file(&class_attr_bonding_masters);
+ bn->class_attr_bonding_masters = class_attr_bonding_masters;
+ sysfs_attr_init(&bn->class_attr_bonding_masters.attr);
+
+ ret = netdev_class_create_file(&bn->class_attr_bonding_masters);
/*
* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
@@ -1673,7 +1691,7 @@ int bond_create_sysfs(void)
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
- if (__dev_get_by_name(&init_net,
+ if (__dev_get_by_name(bn->net,
class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs",
class_attr_bonding_masters.attr.name);
@@ -1687,9 +1705,9 @@ int bond_create_sysfs(void)
/*
* Remove /sys/class/net/bonding_masters.
*/
-void bond_destroy_sysfs(void)
+void bond_destroy_sysfs(struct bond_net *bn)
{
- netdev_class_remove_file(&class_attr_bonding_masters);
+ netdev_class_remove_file(&bn->class_attr_bonding_masters);
}
/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 43526a2d275..82fec5fc75d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -234,7 +234,6 @@ struct bonding {
struct netdev_hw_addr_list mc_list;
int (*xmit_hash_policy)(struct sk_buff *, int);
__be32 master_ip;
- u16 flags;
u16 rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
@@ -380,11 +379,13 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+struct bond_net;
+
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
-int bond_create_sysfs(void);
-void bond_destroy_sysfs(void);
+int bond_create_sysfs(struct bond_net *net);
+void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
@@ -410,6 +411,7 @@ struct bond_net {
#ifdef CONFIG_PROC_FS
struct proc_dir_entry * proc_dir;
#endif
+ struct class_attribute class_attr_bonding_masters;
};
#ifdef CONFIG_PROC_FS
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b41c2fced0a..073352517ad 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
+#include <linux/rtnetlink.h>
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>
@@ -29,6 +30,10 @@ MODULE_DESCRIPTION("CAIF HSI driver");
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
(((pow)-((x)&((pow)-1)))))
+static int inactivity_timeout = 1000;
+module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
+
/*
* HSI padding options.
* Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -98,7 +103,8 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
}
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+ mod_timer(&cfhsi->timer,
+ jiffies + cfhsi->inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
}
@@ -145,7 +151,7 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
}
ret = 5 * HZ;
- wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
+ ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
!test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
if (ret < 0) {
@@ -178,6 +184,9 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
if (!skb)
return 0;
+ /* Clear offset. */
+ desc->offset = 0;
+
/* Check if we can embed a CAIF frame. */
if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
struct caif_payload_info *info;
@@ -206,9 +215,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
consume_skb(skb);
skb = NULL;
}
- } else
- /* Clear offset. */
- desc->offset = 0;
+ }
/* Create payload CAIF frames. */
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
@@ -271,16 +278,13 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
return CFHSI_DESC_SZ + pld_len;
}
-static void cfhsi_tx_done_work(struct work_struct *work)
+static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
- struct cfhsi *cfhsi = NULL;
struct cfhsi_desc *desc = NULL;
int len = 0;
int res;
- cfhsi = container_of(work, struct cfhsi, tx_done_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
- __func__);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -303,14 +307,22 @@ static void cfhsi_tx_done_work(struct work_struct *work)
spin_unlock_bh(&cfhsi->lock);
/* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- if (!len) {
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- /* Start inactivity timer. */
- mod_timer(&cfhsi->timer,
- jiffies + CFHSI_INACTIVITY_TOUT);
- break;
- }
+ do {
+ len = cfhsi_tx_frm(desc, cfhsi);
+ if (!len) {
+ spin_lock_bh(&cfhsi->lock);
+ if (unlikely(skb_peek(&cfhsi->qhead))) {
+ spin_unlock_bh(&cfhsi->lock);
+ continue;
+ }
+ cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+ /* Start inactivity timer. */
+ mod_timer(&cfhsi->timer,
+ jiffies + cfhsi->inactivity_timeout);
+ spin_unlock_bh(&cfhsi->lock);
+ goto done;
+ }
+ } while (!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
@@ -319,6 +331,9 @@ static void cfhsi_tx_done_work(struct work_struct *work)
__func__, res);
}
} while (res < 0);
+
+done:
+ return;
}
static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
@@ -331,8 +346,7 @@ static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
-
- queue_work(cfhsi->wq, &cfhsi->tx_done_work);
+ cfhsi_tx_done(cfhsi);
}
static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
@@ -346,14 +360,14 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
__func__);
- return 0;
+ return -EPROTO;
}
/* Check for embedded CAIF frame. */
if (desc->offset) {
struct sk_buff *skb;
u8 *dst = NULL;
- int len = 0, retries = 0;
+ int len = 0;
pfrm = ((u8 *)desc) + desc->offset;
/* Remove offset padding. */
@@ -364,26 +378,19 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
len |= ((*(pfrm+1)) << 8) & 0xFF00;
len += 2; /* Add FCS fields. */
+ /* Sanity check length of CAIF frame. */
+ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ __func__);
+ return -EPROTO;
+ }
/* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_KERNEL);
- while (!skb) {
- retries++;
- schedule_timeout(1);
- skb = alloc_skb(len + 1, GFP_KERNEL);
- if (skb) {
- printk(KERN_WARNING "%s: slept for %u "
- "before getting memory\n",
- __func__, retries);
- break;
- }
- if (retries > HZ) {
- printk(KERN_ERR "%s: slept for 1HZ and "
- "did not get memory\n",
- __func__);
- cfhsi->ndev->stats.rx_dropped++;
- goto drop_frame;
- }
+ skb = alloc_skb(len + 1, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ __func__);
+ return -ENOMEM;
}
caif_assert(skb != NULL);
@@ -409,7 +416,6 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
cfhsi->ndev->stats.rx_bytes += len;
}
-drop_frame:
/* Calculate transfer length. */
plen = desc->cffrm_len;
while (nfrms < CFHSI_MAX_PKTS && *plen) {
@@ -422,13 +428,12 @@ drop_frame:
if (desc->header & CFHSI_PIGGY_DESC)
xfer_sz += CFHSI_DESC_SZ;
- if (xfer_sz % 4) {
+ if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
dev_err(&cfhsi->ndev->dev,
"%s: Invalid payload len: %d, ignored.\n",
__func__, xfer_sz);
- xfer_sz = 0;
+ return -EPROTO;
}
-
return xfer_sz;
}
@@ -444,23 +449,27 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
__func__);
- return -EINVAL;
+ return -EPROTO;
}
/* Set frame pointer to start of payload. */
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
plen = desc->cffrm_len;
+
+ /* Skip already processed frames. */
+ while (nfrms < cfhsi->rx_state.nfrms) {
+ pfrm += *plen;
+ rx_sz += *plen;
+ plen++;
+ nfrms++;
+ }
+
+ /* Parse payload. */
while (nfrms < CFHSI_MAX_PKTS && *plen) {
struct sk_buff *skb;
u8 *dst = NULL;
u8 *pcffrm = NULL;
- int len = 0, retries = 0;
-
- if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
- __func__);
- return -EINVAL;
- }
+ int len = 0;
/* CAIF frame starts after head padding. */
pcffrm = pfrm + *pfrm + 1;
@@ -470,25 +479,20 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
len += 2; /* Add FCS fields. */
+ /* Sanity check length of CAIF frames. */
+ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ __func__);
+ return -EPROTO;
+ }
+
/* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_KERNEL);
- while (!skb) {
- retries++;
- schedule_timeout(1);
- skb = alloc_skb(len + 1, GFP_KERNEL);
- if (skb) {
- printk(KERN_WARNING "%s: slept for %u "
- "before getting memory\n",
- __func__, retries);
- break;
- }
- if (retries > HZ) {
- printk(KERN_ERR "%s: slept for 1HZ "
- "and did not get memory\n",
- __func__);
- cfhsi->ndev->stats.rx_dropped++;
- goto drop_frame;
- }
+ skb = alloc_skb(len + 1, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ __func__);
+ cfhsi->rx_state.nfrms = nfrms;
+ return -ENOMEM;
}
caif_assert(skb != NULL);
@@ -512,7 +516,6 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
cfhsi->ndev->stats.rx_packets++;
cfhsi->ndev->stats.rx_bytes += len;
-drop_frame:
pfrm += *plen;
rx_sz += *plen;
plen++;
@@ -522,40 +525,56 @@ drop_frame:
return rx_sz;
}
-static void cfhsi_rx_done_work(struct work_struct *work)
+static void cfhsi_rx_done(struct cfhsi *cfhsi)
{
int res;
int desc_pld_len = 0;
- struct cfhsi *cfhsi = NULL;
struct cfhsi_desc *desc = NULL;
- cfhsi = container_of(work, struct cfhsi, rx_done_work);
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
- dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
- __func__);
+ dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
/* Update inactivity timer if pending. */
- mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+ spin_lock_bh(&cfhsi->lock);
+ mod_timer_pending(&cfhsi->timer,
+ jiffies + cfhsi->inactivity_timeout);
+ spin_unlock_bh(&cfhsi->lock);
- if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
+ if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
+ if (desc_pld_len == -ENOMEM)
+ goto restart;
+ if (desc_pld_len == -EPROTO)
+ goto out_of_sync;
} else {
int pld_len;
- pld_len = cfhsi_rx_pld(desc, cfhsi);
+ if (!cfhsi->rx_state.piggy_desc) {
+ pld_len = cfhsi_rx_pld(desc, cfhsi);
+ if (pld_len == -ENOMEM)
+ goto restart;
+ if (pld_len == -EPROTO)
+ goto out_of_sync;
+ cfhsi->rx_state.pld_len = pld_len;
+ } else {
+ pld_len = cfhsi->rx_state.pld_len;
+ }
if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
struct cfhsi_desc *piggy_desc;
piggy_desc = (struct cfhsi_desc *)
(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
pld_len);
+ cfhsi->rx_state.piggy_desc = true;
/* Extract piggy-backed descriptor. */
desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
+ if (desc_pld_len == -ENOMEM)
+ goto restart;
/*
* Copy needed information from the piggy-backed
@@ -563,19 +582,22 @@ static void cfhsi_rx_done_work(struct work_struct *work)
*/
memcpy((u8 *)desc, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
+
+ if (desc_pld_len == -EPROTO)
+ goto out_of_sync;
}
}
+ memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
if (desc_pld_len) {
- cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
+ cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
cfhsi->rx_len = desc_pld_len;
} else {
- cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+ cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
cfhsi->rx_ptr = cfhsi->rx_buf;
cfhsi->rx_len = CFHSI_DESC_SZ;
}
- clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
/* Set up new transfer. */
@@ -590,6 +612,33 @@ static void cfhsi_rx_done_work(struct work_struct *work)
cfhsi->ndev->stats.rx_dropped++;
}
}
+ return;
+
+restart:
+ if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
+ dev_err(&cfhsi->ndev->dev, "%s: No memory available "
+ "in %d iterations.\n",
+ __func__, CFHSI_MAX_RX_RETRIES);
+ BUG();
+ }
+ mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
+ return;
+
+out_of_sync:
+ dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
+ print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
+ cfhsi->rx_buf, CFHSI_DESC_SZ);
+ schedule_work(&cfhsi->out_of_sync_work);
+}
+
+static void cfhsi_rx_slowpath(unsigned long arg)
+{
+ struct cfhsi *cfhsi = (struct cfhsi *)arg;
+
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ cfhsi_rx_done(cfhsi);
}
static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
@@ -603,12 +652,10 @@ static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
- set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
-
if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
wake_up_interruptible(&cfhsi->flush_fifo_wait);
else
- queue_work(cfhsi->wq, &cfhsi->rx_done_work);
+ cfhsi_rx_done(cfhsi);
}
static void cfhsi_wake_up(struct work_struct *work)
@@ -627,6 +674,7 @@ static void cfhsi_wake_up(struct work_struct *work)
/* It happenes when wakeup is requested by
* both ends at the same time. */
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
return;
}
@@ -637,25 +685,53 @@ static void cfhsi_wake_up(struct work_struct *work)
__func__);
/* Wait for acknowledge. */
- ret = CFHSI_WAKEUP_TOUT;
- wait_event_interruptible_timeout(cfhsi->wake_up_wait,
- test_bit(CFHSI_WAKE_UP_ACK,
+ ret = CFHSI_WAKE_TOUT;
+ ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
+ test_and_clear_bit(CFHSI_WAKE_UP_ACK,
&cfhsi->bits), ret);
if (unlikely(ret < 0)) {
/* Interrupted by signal. */
- dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
__func__, ret);
+
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
return;
} else if (!ret) {
+ bool ca_wake = false;
+ size_t fifo_occupancy = 0;
+
/* Wakeup timeout */
dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
__func__);
+
+ /* Check FIFO to check if modem has sent something. */
+ WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy));
+
+ dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+ __func__, (unsigned) fifo_occupancy);
+
+ /* Check if we misssed the interrupt. */
+ WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ &ca_wake));
+
+ if (ca_wake) {
+ dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ __func__);
+
+ /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
+ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+
+ /* Continue execution. */
+ goto wake_ack;
+ }
+
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
return;
}
+wake_ack:
dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
__func__);
@@ -664,16 +740,11 @@ static void cfhsi_wake_up(struct work_struct *work)
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
/* Resume read operation. */
- if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
- __func__);
- res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
- cfhsi->rx_len, cfhsi->dev);
- if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
- __func__, res);
- }
- }
+ dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
+ res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
+
+ if (WARN_ON(res < 0))
+ dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
/* Clear power up acknowledment. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -686,7 +757,7 @@ static void cfhsi_wake_up(struct work_struct *work)
__func__);
/* Start inactivity timer. */
mod_timer(&cfhsi->timer,
- jiffies + CFHSI_INACTIVITY_TOUT);
+ jiffies + cfhsi->inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
return;
}
@@ -712,79 +783,81 @@ static void cfhsi_wake_up(struct work_struct *work)
"%s: Failed to create HSI frame: %d.\n",
__func__, len);
}
-
}
static void cfhsi_wake_down(struct work_struct *work)
{
long ret;
struct cfhsi *cfhsi = NULL;
- size_t fifo_occupancy;
+ size_t fifo_occupancy = 0;
+ int retry = CFHSI_WAKE_TOUT;
cfhsi = container_of(work, struct cfhsi, wake_down_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
- __func__);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
- /* Check if there is something in FIFO. */
- if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
- &fifo_occupancy)))
- fifo_occupancy = 0;
-
- if (fifo_occupancy) {
- dev_dbg(&cfhsi->ndev->dev,
- "%s: %u words in RX FIFO, restart timer.\n",
- __func__, (unsigned) fifo_occupancy);
- spin_lock_bh(&cfhsi->lock);
- mod_timer(&cfhsi->timer,
- jiffies + CFHSI_INACTIVITY_TOUT);
- spin_unlock_bh(&cfhsi->lock);
- return;
- }
-
- /* Cancel pending RX requests */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
-
/* Deactivate wake line. */
cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
/* Wait for acknowledge. */
- ret = CFHSI_WAKEUP_TOUT;
+ ret = CFHSI_WAKE_TOUT;
ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
- test_bit(CFHSI_WAKE_DOWN_ACK,
- &cfhsi->bits),
- ret);
+ test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
+ &cfhsi->bits), ret);
if (ret < 0) {
/* Interrupted by signal. */
- dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
__func__, ret);
return;
} else if (!ret) {
+ bool ca_wake = true;
+
/* Timeout */
- dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
- __func__);
+ dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
+
+ /* Check if we misssed the interrupt. */
+ WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ &ca_wake));
+ if (!ca_wake)
+ dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ __func__);
}
- /* Clear power down acknowledment. */
- clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+ /* Check FIFO occupancy. */
+ while (retry) {
+ WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy));
+
+ if (!fifo_occupancy)
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ retry--;
+ }
+
+ if (!retry)
+ dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
+
+ /* Clear AWAKE condition. */
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
- /* Check if there is something in FIFO. */
- if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
- &fifo_occupancy)))
- fifo_occupancy = 0;
+ /* Cancel pending RX requests. */
+ cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
- if (fifo_occupancy) {
- dev_dbg(&cfhsi->ndev->dev,
- "%s: %u words in RX FIFO, wakeup forced.\n",
- __func__, (unsigned) fifo_occupancy);
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
- } else
- dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
- __func__);
+}
+
+static void cfhsi_out_of_sync(struct work_struct *work)
+{
+ struct cfhsi *cfhsi = NULL;
+
+ cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
+
+ rtnl_lock();
+ dev_close(cfhsi->ndev);
+ rtnl_unlock();
}
static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
@@ -854,17 +927,15 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
start_xfer = 1;
}
- spin_unlock_bh(&cfhsi->lock);
-
- if (!start_xfer)
+ if (!start_xfer) {
+ spin_unlock_bh(&cfhsi->lock);
return 0;
+ }
/* Delete inactivity timer if started. */
-#ifdef CONFIG_SMP
timer_active = del_timer_sync(&cfhsi->timer);
-#else
- timer_active = del_timer(&cfhsi->timer);
-#endif /* CONFIG_SMP */
+
+ spin_unlock_bh(&cfhsi->lock);
if (timer_active) {
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
@@ -937,11 +1008,8 @@ int cfhsi_probe(struct platform_device *pdev)
int res;
ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
- if (!ndev) {
- dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
- __func__);
+ if (!ndev)
return -ENODEV;
- }
cfhsi = netdev_priv(ndev);
cfhsi->ndev = ndev;
@@ -949,7 +1017,7 @@ int cfhsi_probe(struct platform_device *pdev)
/* Initialize state vaiables. */
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+ cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
/* Set flow info */
cfhsi->flow_off_sent = 0;
@@ -969,8 +1037,6 @@ int cfhsi_probe(struct platform_device *pdev)
*/
cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
if (!cfhsi->tx_buf) {
- dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
- __func__);
res = -ENODEV;
goto err_alloc_tx;
}
@@ -981,13 +1047,23 @@ int cfhsi_probe(struct platform_device *pdev)
*/
cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
if (!cfhsi->rx_buf) {
- dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
- __func__);
res = -ENODEV;
goto err_alloc_rx;
}
- /* Initialize receive variables. */
+ /* Pre-calculate inactivity timeout. */
+ if (inactivity_timeout != -1) {
+ cfhsi->inactivity_timeout =
+ inactivity_timeout * HZ / 1000;
+ if (!cfhsi->inactivity_timeout)
+ cfhsi->inactivity_timeout = 1;
+ else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+ cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ } else {
+ cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ }
+
+ /* Initialize recieve vaiables. */
cfhsi->rx_ptr = cfhsi->rx_buf;
cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -997,19 +1073,19 @@ int cfhsi_probe(struct platform_device *pdev)
/* Set up the driver. */
cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
+ cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
+ cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
/* Initialize the work queues. */
INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
- INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
- INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
+ INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
/* Clear all bit fields. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
- clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
/* Create work thread. */
cfhsi->wq = create_singlethread_workqueue(pdev->name);
@@ -1029,6 +1105,10 @@ int cfhsi_probe(struct platform_device *pdev)
init_timer(&cfhsi->timer);
cfhsi->timer.data = (unsigned long)cfhsi;
cfhsi->timer.function = cfhsi_inactivity_tout;
+ /* Setup the slowpath RX timer. */
+ init_timer(&cfhsi->rx_slowpath_timer);
+ cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
+ cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
/* Add CAIF HSI device to list. */
spin_lock(&cfhsi_list_lock);
@@ -1052,9 +1132,6 @@ int cfhsi_probe(struct platform_device *pdev)
goto err_net_reg;
}
- cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
-
/* Register network device. */
res = register_netdev(ndev);
if (res) {
@@ -1081,7 +1158,7 @@ int cfhsi_probe(struct platform_device *pdev)
return res;
}
-static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
+static void cfhsi_shutdown(struct cfhsi *cfhsi)
{
u8 *tx_buf, *rx_buf;
@@ -1091,28 +1168,17 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
/* going to shutdown driver */
set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
- if (remove_platform_dev) {
- /* Flush workqueue */
- flush_workqueue(cfhsi->wq);
-
- /* Notify device. */
- platform_device_unregister(cfhsi->pdev);
- }
-
/* Flush workqueue */
flush_workqueue(cfhsi->wq);
- /* Delete timer if pending */
-#ifdef CONFIG_SMP
+ /* Delete timers if pending */
del_timer_sync(&cfhsi->timer);
-#else
- del_timer(&cfhsi->timer);
-#endif /* CONFIG_SMP */
+ del_timer_sync(&cfhsi->rx_slowpath_timer);
/* Cancel pending RX request (if any) */
cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
- /* Flush again and destroy workqueue */
+ /* Destroy workqueue */
destroy_workqueue(cfhsi->wq);
/* Store bufferes: will be freed later. */
@@ -1151,7 +1217,7 @@ int cfhsi_remove(struct platform_device *pdev)
spin_unlock(&cfhsi_list_lock);
/* Shutdown driver. */
- cfhsi_shutdown(cfhsi, false);
+ cfhsi_shutdown(cfhsi);
return 0;
}
@@ -1184,7 +1250,7 @@ static void __exit cfhsi_exit_module(void)
spin_unlock(&cfhsi_list_lock);
/* Shutdown driver. */
- cfhsi_shutdown(cfhsi, true);
+ cfhsi_shutdown(cfhsi);
spin_lock(&cfhsi_list_lock);
}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 0f8defc7330..05e791f46ae 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -664,8 +664,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
/* Allocate DMA buffers. */
cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
if (!cfspi->xfer.va_tx) {
- printk(KERN_WARNING
- "CFSPI: failed to allocate dma TX buffer.\n");
res = -ENODEV;
goto err_dma_alloc_tx;
}
@@ -673,8 +671,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
if (!cfspi->xfer.va_rx) {
- printk(KERN_WARNING
- "CFSPI: failed to allocate dma TX buffer.\n");
res = -ENODEV;
goto err_dma_alloc_rx;
}
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 121ede663e2..044ea0647b0 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,8 +8,6 @@
* Public License ("GPL") version 2 as distributed in the 'COPYING'
* file from the main directory of the linux kernel source.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*
* Your platform definition file should specify something like:
*
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 9bf1116e5b5..25695bde054 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -150,7 +150,19 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
bt->prop_seg = tseg1 / 2;
bt->phase_seg1 = tseg1 - bt->prop_seg;
bt->phase_seg2 = tseg2;
- bt->sjw = 1;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max)
+ bt->sjw = 1;
+ else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
bt->brp = best_brp;
/* real bit-rate */
bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1));
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 17678117ed6..e02337953f4 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -33,10 +33,9 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <mach/clock.h>
-
#define DRV_NAME "flexcan"
/* 8 for RX fifo and 2 error handling */
@@ -192,6 +191,31 @@ static struct can_bittiming_const flexcan_bittiming_const = {
};
/*
+ * Abstract off the read/write for arm versus ppc.
+ */
+#if defined(__BIG_ENDIAN)
+static inline u32 flexcan_read(void __iomem *addr)
+{
+ return in_be32(addr);
+}
+
+static inline void flexcan_write(u32 val, void __iomem *addr)
+{
+ out_be32(addr, val);
+}
+#else
+static inline u32 flexcan_read(void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline void flexcan_write(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+}
+#endif
+
+/*
* Swtich transceiver on or off
*/
static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on)
@@ -212,9 +236,9 @@ static inline void flexcan_chip_enable(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg;
- reg = readl(&regs->mcr);
+ reg = flexcan_read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
- writel(reg, &regs->mcr);
+ flexcan_write(reg, &regs->mcr);
udelay(10);
}
@@ -224,9 +248,9 @@ static inline void flexcan_chip_disable(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg;
- reg = readl(&regs->mcr);
+ reg = flexcan_read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS;
- writel(reg, &regs->mcr);
+ flexcan_write(reg, &regs->mcr);
}
static int flexcan_get_berr_counter(const struct net_device *dev,
@@ -234,7 +258,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
{
const struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg = readl(&regs->ecr);
+ u32 reg = flexcan_read(&regs->ecr);
bec->txerr = (reg >> 0) & 0xff;
bec->rxerr = (reg >> 8) & 0xff;
@@ -268,15 +292,15 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (cf->can_dlc > 0) {
u32 data = be32_to_cpup((__be32 *)&cf->data[0]);
- writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]);
+ flexcan_write(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]);
}
if (cf->can_dlc > 3) {
u32 data = be32_to_cpup((__be32 *)&cf->data[4]);
- writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
+ flexcan_write(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
}
- writel(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
- writel(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+ flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
+ flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
kfree_skb(skb);
@@ -464,8 +488,8 @@ static void flexcan_read_fifo(const struct net_device *dev,
struct flexcan_mb __iomem *mb = &regs->cantxfg[0];
u32 reg_ctrl, reg_id;
- reg_ctrl = readl(&mb->can_ctrl);
- reg_id = readl(&mb->can_id);
+ reg_ctrl = flexcan_read(&mb->can_ctrl);
+ reg_id = flexcan_read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
@@ -475,12 +499,12 @@ static void flexcan_read_fifo(const struct net_device *dev,
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
- *(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0]));
- *(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1]));
+ *(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
/* mark as read */
- writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- readl(&regs->timer);
+ flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+ flexcan_read(&regs->timer);
}
static int flexcan_read_frame(struct net_device *dev)
@@ -516,17 +540,17 @@ static int flexcan_poll(struct napi_struct *napi, int quota)
* The error bits are cleared on read,
* use saved value from irq handler.
*/
- reg_esr = readl(&regs->esr) | priv->reg_esr;
+ reg_esr = flexcan_read(&regs->esr) | priv->reg_esr;
/* handle state changes */
work_done += flexcan_poll_state(dev, reg_esr);
/* handle RX-FIFO */
- reg_iflag1 = readl(&regs->iflag1);
+ reg_iflag1 = flexcan_read(&regs->iflag1);
while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
work_done < quota) {
work_done += flexcan_read_frame(dev);
- reg_iflag1 = readl(&regs->iflag1);
+ reg_iflag1 = flexcan_read(&regs->iflag1);
}
/* report bus errors */
@@ -536,8 +560,8 @@ static int flexcan_poll(struct napi_struct *napi, int quota)
if (work_done < quota) {
napi_complete(napi);
/* enable IRQs */
- writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
- writel(priv->reg_ctrl_default, &regs->ctrl);
+ flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+ flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
}
return work_done;
@@ -551,9 +575,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg_iflag1, reg_esr;
- reg_iflag1 = readl(&regs->iflag1);
- reg_esr = readl(&regs->esr);
- writel(FLEXCAN_ESR_ERR_INT, &regs->esr); /* ACK err IRQ */
+ reg_iflag1 = flexcan_read(&regs->iflag1);
+ reg_esr = flexcan_read(&regs->esr);
+ flexcan_write(FLEXCAN_ESR_ERR_INT, &regs->esr); /* ACK err IRQ */
/*
* schedule NAPI in case of:
@@ -569,16 +593,16 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
* save them for later use.
*/
priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
- writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE,
- &regs->imask1);
- writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ flexcan_write(FLEXCAN_IFLAG_DEFAULT &
+ ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->imask1);
+ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
&regs->ctrl);
napi_schedule(&priv->napi);
}
/* FIFO overflow */
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
- writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+ flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
dev->stats.rx_over_errors++;
dev->stats.rx_errors++;
}
@@ -587,7 +611,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
/* tx_bytes is incremented in flexcan_start_xmit */
stats->tx_packets++;
- writel((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
+ flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
netif_wake_queue(dev);
}
@@ -601,7 +625,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg;
- reg = readl(&regs->ctrl);
+ reg = flexcan_read(&regs->ctrl);
reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
FLEXCAN_CTRL_RJW(0x3) |
FLEXCAN_CTRL_PSEG1(0x7) |
@@ -625,11 +649,11 @@ static void flexcan_set_bittiming(struct net_device *dev)
reg |= FLEXCAN_CTRL_SMP;
dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg);
- writel(reg, &regs->ctrl);
+ flexcan_write(reg, &regs->ctrl);
/* print chip status */
dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- readl(&regs->mcr), readl(&regs->ctrl));
+ flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
}
/*
@@ -650,10 +674,10 @@ static int flexcan_chip_start(struct net_device *dev)
flexcan_chip_enable(priv);
/* soft reset */
- writel(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+ flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
udelay(10);
- reg_mcr = readl(&regs->mcr);
+ reg_mcr = flexcan_read(&regs->mcr);
if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
dev_err(dev->dev.parent,
"Failed to softreset can module (mcr=0x%08x)\n",
@@ -675,12 +699,12 @@ static int flexcan_chip_start(struct net_device *dev)
* choose format C
*
*/
- reg_mcr = readl(&regs->mcr);
+ reg_mcr = flexcan_read(&regs->mcr);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
FLEXCAN_MCR_IDAM_C;
dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
- writel(reg_mcr, &regs->mcr);
+ flexcan_write(reg_mcr, &regs->mcr);
/*
* CTRL
@@ -698,7 +722,7 @@ static int flexcan_chip_start(struct net_device *dev)
* (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
* warning or bus passive interrupts.
*/
- reg_ctrl = readl(&regs->ctrl);
+ reg_ctrl = flexcan_read(&regs->ctrl);
reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
@@ -706,38 +730,39 @@ static int flexcan_chip_start(struct net_device *dev)
/* save for later use */
priv->reg_ctrl_default = reg_ctrl;
dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
- writel(reg_ctrl, &regs->ctrl);
+ flexcan_write(reg_ctrl, &regs->ctrl);
for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
- writel(0, &regs->cantxfg[i].can_ctrl);
- writel(0, &regs->cantxfg[i].can_id);
- writel(0, &regs->cantxfg[i].data[0]);
- writel(0, &regs->cantxfg[i].data[1]);
+ flexcan_write(0, &regs->cantxfg[i].can_ctrl);
+ flexcan_write(0, &regs->cantxfg[i].can_id);
+ flexcan_write(0, &regs->cantxfg[i].data[0]);
+ flexcan_write(0, &regs->cantxfg[i].data[1]);
/* put MB into rx queue */
- writel(FLEXCAN_MB_CNT_CODE(0x4), &regs->cantxfg[i].can_ctrl);
+ flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+ &regs->cantxfg[i].can_ctrl);
}
/* acceptance mask/acceptance code (accept everything) */
- writel(0x0, &regs->rxgmask);
- writel(0x0, &regs->rx14mask);
- writel(0x0, &regs->rx15mask);
+ flexcan_write(0x0, &regs->rxgmask);
+ flexcan_write(0x0, &regs->rx14mask);
+ flexcan_write(0x0, &regs->rx15mask);
flexcan_transceiver_switch(priv, 1);
/* synchronize with the can bus */
- reg_mcr = readl(&regs->mcr);
+ reg_mcr = flexcan_read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_HALT;
- writel(reg_mcr, &regs->mcr);
+ flexcan_write(reg_mcr, &regs->mcr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* enable FIFO interrupts */
- writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+ flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
/* print chip status */
dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n",
- __func__, readl(&regs->mcr), readl(&regs->ctrl));
+ __func__, flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
return 0;
@@ -759,12 +784,12 @@ static void flexcan_chip_stop(struct net_device *dev)
u32 reg;
/* Disable all interrupts */
- writel(0, &regs->imask1);
+ flexcan_write(0, &regs->imask1);
/* Disable + halt module */
- reg = readl(&regs->mcr);
+ reg = flexcan_read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
- writel(reg, &regs->mcr);
+ flexcan_write(reg, &regs->mcr);
flexcan_transceiver_switch(priv, 0);
priv->can.state = CAN_STATE_STOPPED;
@@ -856,24 +881,24 @@ static int __devinit register_flexcandev(struct net_device *dev)
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
- reg = readl(&regs->ctrl);
+ reg = flexcan_read(&regs->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
- writel(reg, &regs->ctrl);
+ flexcan_write(reg, &regs->ctrl);
flexcan_chip_enable(priv);
/* set freeze, halt and activate FIFO, restrict register access */
- reg = readl(&regs->mcr);
+ reg = flexcan_read(&regs->mcr);
reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
- writel(reg, &regs->mcr);
+ flexcan_write(reg, &regs->mcr);
/*
* Currently we only support newer versions of this core
* featuring a RX FIFO. Older cores found on some Coldfire
* derivates are not yet supported.
*/
- reg = readl(&regs->mcr);
+ reg = flexcan_read(&regs->mcr);
if (!(reg & FLEXCAN_MCR_FEN)) {
dev_err(dev->dev.parent,
"Could not enable RX FIFO, unsupported core\n");
@@ -901,16 +926,29 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct resource *mem;
- struct clk *clk;
+ struct clk *clk = NULL;
void __iomem *base;
resource_size_t mem_size;
int err, irq;
+ u32 clock_freq = 0;
+
+ if (pdev->dev.of_node) {
+ const u32 *clock_freq_p;
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "no clock defined\n");
- err = PTR_ERR(clk);
- goto failed_clock;
+ clock_freq_p = of_get_property(pdev->dev.of_node,
+ "clock-frequency", NULL);
+ if (clock_freq_p)
+ clock_freq = *clock_freq_p;
+ }
+
+ if (!clock_freq) {
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ err = PTR_ERR(clk);
+ goto failed_clock;
+ }
+ clock_freq = clk_get_rate(clk);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -943,7 +981,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
dev->flags |= IFF_ECHO; /* we support local echo in hardware */
priv = netdev_priv(dev);
- priv->can.clock.freq = clk_get_rate(clk);
+ priv->can.clock.freq = clock_freq;
priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
priv->can.do_get_berr_counter = flexcan_get_berr_counter;
@@ -978,7 +1016,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
failed_map:
release_mem_region(mem->start, mem_size);
failed_get:
- clk_put(clk);
+ if (clk)
+ clk_put(clk);
failed_clock:
return err;
}
@@ -996,15 +1035,27 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
- clk_put(priv->clk);
+ if (priv->clk)
+ clk_put(priv->clk);
free_candev(dev);
return 0;
}
+static struct of_device_id flexcan_of_match[] = {
+ {
+ .compatible = "fsl,p1010-flexcan",
+ },
+ {},
+};
+
static struct platform_driver flexcan_driver = {
- .driver.name = DRV_NAME,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = flexcan_of_match,
+ },
.probe = flexcan_probe,
.remove = __devexit_p(flexcan_remove),
};
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 92feac68b66..ec4a3119e2c 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -62,7 +62,7 @@ static enum can_state state_map[] = {
static int mscan_set_mode(struct net_device *dev, u8 mode)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
int ret = 0;
int i;
u8 canctl1;
@@ -138,7 +138,7 @@ static int mscan_set_mode(struct net_device *dev, u8 mode)
static int mscan_start(struct net_device *dev)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
u8 canrflg;
int err;
@@ -178,7 +178,7 @@ static int mscan_restart(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev);
if (priv->type == MSCAN_TYPE_MPC5121) {
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
@@ -199,7 +199,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct can_frame *frame = (struct can_frame *)skb->data;
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
int i, rtr, buf_id;
u32 can_id;
@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *data = &regs->tx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- /* It is safe to write into dsr[dlc+1] */
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+ for (i = 0; i < frame->can_dlc / 2; i++) {
out_be16(data, *payload++);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
+ /* write remaining byte if necessary */
+ if (frame->can_dlc & 1)
+ out_8(data, frame->data[frame->can_dlc - 1]);
}
out_8(&regs->tx.dlr, frame->can_dlc);
@@ -305,7 +307,7 @@ static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
u32 can_id;
int i;
@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
void __iomem *data = &regs->rx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+ for (i = 0; i < frame->can_dlc / 2; i++) {
*payload++ = in_be16(data);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
+ /* read remaining byte if necessary */
+ if (frame->can_dlc & 1)
+ frame->data[frame->can_dlc - 1] = in_8(data);
}
out_8(&regs->canrflg, MSCAN_RXF);
@@ -343,7 +348,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
u8 canrflg)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
enum can_state old_state;
@@ -406,7 +411,7 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
{
struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
struct net_device *dev = napi->dev;
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
int npackets = 0;
int ret = 1;
@@ -453,7 +458,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
u8 cantier, cantflg, canrflg;
irqreturn_t ret = IRQ_NONE;
@@ -537,7 +542,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
static int mscan_do_set_bittiming(struct net_device *dev)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
struct can_bittiming *bt = &priv->can.bittiming;
u8 btr0, btr1;
@@ -559,7 +564,7 @@ static int mscan_open(struct net_device *dev)
{
int ret;
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
/* common open */
ret = open_candev(dev);
@@ -598,7 +603,7 @@ exit_napi_disable:
static int mscan_close(struct net_device *dev)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -622,7 +627,7 @@ static const struct net_device_ops mscan_netdev_ops = {
int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
u8 ctl1;
ctl1 = in_8(&regs->canctl1);
@@ -659,7 +664,7 @@ int register_mscandev(struct net_device *dev, int mscan_clksrc)
void unregister_mscandev(struct net_device *dev)
{
struct mscan_priv *priv = netdev_priv(dev);
- struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+ struct mscan_regs __iomem *regs = priv->reg_base;
mscan_set_mode(dev, MSCAN_INIT_MODE);
clrbits8(&regs->canctl1, MSCAN_CANE);
unregister_candev(dev);
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 6fdc031daaa..fe9e64d476e 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -29,6 +29,13 @@ config CAN_SJA1000_OF_PLATFORM
OpenFirmware bindings, e.g. if you have a PowerPC based system
you may want to enable this option.
+config CAN_EMS_PCMCIA
+ tristate "EMS CPC-CARD Card"
+ depends on PCMCIA
+ ---help---
+ This driver is for the one or two channel CPC-CARD cards from
+ EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+
config CAN_EMS_PCI
tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
depends on PCI
@@ -37,6 +44,13 @@ config CAN_EMS_PCI
CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
(http://www.ems-wuensche.de).
+config CAN_PEAK_PCI
+ tristate "PEAK PCAN PCI/PCIe Cards"
+ depends on PCI
+ ---help---
+ This driver is for the PCAN PCI/PCIe cards (1, 2, 3 or 4 channels)
+ from PEAK Systems (http://www.peak-system.com).
+
config CAN_KVASER_PCI
tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
depends on PCI
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 2c591eb321c..0604f240c8b 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -6,8 +6,10 @@ obj-$(CONFIG_CAN_SJA1000) += sja1000.o
obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
+obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
new file mode 100644
index 00000000000..075a5457a19
--- /dev/null
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2008 Sebastian Haas (initial chardev implementation)
+ * Copyright (C) 2010 Markus Plessing <plessing@ems-wuensche.com>
+ * Rework for mainline by Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include "sja1000.h"
+
+#define DRV_NAME "ems_pcmcia"
+
+MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");
+MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card");
+MODULE_LICENSE("GPL v2");
+
+#define EMS_PCMCIA_MAX_CHAN 2
+
+struct ems_pcmcia_card {
+ int channels;
+ struct pcmcia_device *pcmcia_dev;
+ struct net_device *net_dev[EMS_PCMCIA_MAX_CHAN];
+ void __iomem *base_addr;
+};
+
+#define EMS_PCMCIA_CAN_CLOCK (16000000 / 2)
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode , push-pull and the correct polarity.
+ */
+#define EMS_PCMCIA_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define EMS_PCMCIA_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+#define EMS_PCMCIA_MEM_SIZE 4096 /* Size of the remapped io-memory */
+#define EMS_PCMCIA_CAN_BASE_OFFSET 0x100 /* Offset where controllers starts */
+#define EMS_PCMCIA_CAN_CTRL_SIZE 0x80 /* Memory size for each controller */
+
+#define EMS_CMD_RESET 0x00 /* Perform a reset of the card */
+#define EMS_CMD_MAP 0x03 /* Map CAN controllers into card' memory */
+#define EMS_CMD_UMAP 0x02 /* Unmap CAN controllers from card' memory */
+
+static struct pcmcia_device_id ems_pcmcia_tbl[] = {
+ PCMCIA_DEVICE_PROD_ID123("EMS_T_W", "CPC-Card", "V2.0", 0xeab1ea23,
+ 0xa338573f, 0xe4575800),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, ems_pcmcia_tbl);
+
+static u8 ems_pcmcia_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void ems_pcmcia_write_reg(const struct sja1000_priv *priv, int port,
+ u8 val)
+{
+ writeb(val, priv->reg_base + port);
+}
+
+static irqreturn_t ems_pcmcia_interrupt(int irq, void *dev_id)
+{
+ struct ems_pcmcia_card *card = dev_id;
+ struct net_device *dev;
+ irqreturn_t retval = IRQ_NONE;
+ int i, again;
+
+ /* Card not present */
+ if (readw(card->base_addr) != 0xAA55)
+ return IRQ_HANDLED;
+
+ do {
+ again = 0;
+
+ /* Check interrupt for each channel */
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ if (sja1000_interrupt(irq, dev) == IRQ_HANDLED)
+ again = 1;
+ }
+ /* At least one channel handled the interrupt */
+ if (again)
+ retval = IRQ_HANDLED;
+
+ } while (again);
+
+ return retval;
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to set 'em into the PeliCAN mode
+ */
+static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv)
+{
+ /* Make sure SJA1000 is in reset mode */
+ ems_pcmcia_write_reg(priv, REG_MOD, 1);
+ ems_pcmcia_write_reg(priv, REG_CDR, CDR_PELICAN);
+
+ /* read reset-values */
+ if (ems_pcmcia_read_reg(priv, REG_CDR) == CDR_PELICAN)
+ return 1;
+
+ return 0;
+}
+
+static void ems_pcmcia_del_card(struct pcmcia_device *pdev)
+{
+ struct ems_pcmcia_card *card = pdev->priv;
+ struct net_device *dev;
+ int i;
+
+ free_irq(pdev->irq, card);
+
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ printk(KERN_INFO "%s: removing %s on channel #%d\n",
+ DRV_NAME, dev->name, i);
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ }
+
+ writeb(EMS_CMD_UMAP, card->base_addr);
+ iounmap(card->base_addr);
+ kfree(card);
+
+ pdev->priv = NULL;
+}
+
+/*
+ * Probe PCI device for EMS CAN signature and register each available
+ * CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int __devinit ems_pcmcia_add_card(struct pcmcia_device *pdev,
+ unsigned long base)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct ems_pcmcia_card *card;
+ int err, i;
+
+ /* Allocating card structures to hold addresses, ... */
+ card = kzalloc(sizeof(struct ems_pcmcia_card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ pdev->priv = card;
+ card->channels = 0;
+
+ card->base_addr = ioremap(base, EMS_PCMCIA_MEM_SIZE);
+ if (!card->base_addr) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ /* Check for unique EMS CAN signature */
+ if (readw(card->base_addr) != 0xAA55) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ /* Request board reset */
+ writeb(EMS_CMD_RESET, card->base_addr);
+
+ /* Make sure CAN controllers are mapped into card's memory space */
+ writeb(EMS_CMD_MAP, card->base_addr);
+
+ /* Detect available channels */
+ for (i = 0; i < EMS_PCMCIA_MAX_CHAN; i++) {
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv->irq_flags = IRQF_SHARED;
+ dev->irq = pdev->irq;
+ priv->reg_base = card->base_addr + EMS_PCMCIA_CAN_BASE_OFFSET +
+ (i * EMS_PCMCIA_CAN_CTRL_SIZE);
+
+ /* Check if channel is present */
+ if (ems_pcmcia_check_chan(priv)) {
+ priv->read_reg = ems_pcmcia_read_reg;
+ priv->write_reg = ems_pcmcia_write_reg;
+ priv->can.clock.freq = EMS_PCMCIA_CAN_CLOCK;
+ priv->ocr = EMS_PCMCIA_OCR;
+ priv->cdr = EMS_PCMCIA_CDR;
+ priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER;
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->channels++;
+
+ printk(KERN_INFO "%s: registered %s on channel "
+ "#%d at 0x%p, irq %d\n", DRV_NAME, dev->name,
+ i, priv->reg_base, dev->irq);
+ } else
+ free_sja1000dev(dev);
+ }
+
+ err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+ DRV_NAME, card);
+ if (!err)
+ return 0;
+
+failure_cleanup:
+ ems_pcmcia_del_card(pdev);
+ return err;
+}
+
+/*
+ * Setup PCMCIA socket and probe for EMS CPC-CARD
+ */
+static int __devinit ems_pcmcia_probe(struct pcmcia_device *dev)
+{
+ int csval;
+
+ /* General socket configuration */
+ dev->config_flags |= CONF_ENABLE_IRQ;
+ dev->config_index = 1;
+ dev->config_regs = PRESENT_OPTION;
+
+ /* The io structure describes IO port mapping */
+ dev->resource[0]->end = 16;
+ dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ dev->resource[1]->end = 16;
+ dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
+ dev->io_lines = 5;
+
+ /* Allocate a memory window */
+ dev->resource[2]->flags =
+ (WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE);
+ dev->resource[2]->start = dev->resource[2]->end = 0;
+
+ csval = pcmcia_request_window(dev, dev->resource[2], 0);
+ if (csval) {
+ dev_err(&dev->dev, "pcmcia_request_window failed (err=%d)\n",
+ csval);
+ return 0;
+ }
+
+ csval = pcmcia_map_mem_page(dev, dev->resource[2], dev->config_base);
+ if (csval) {
+ dev_err(&dev->dev, "pcmcia_map_mem_page failed (err=%d)\n",
+ csval);
+ return 0;
+ }
+
+ csval = pcmcia_enable_device(dev);
+ if (csval) {
+ dev_err(&dev->dev, "pcmcia_enable_device failed (err=%d)\n",
+ csval);
+ return 0;
+ }
+
+ ems_pcmcia_add_card(dev, dev->resource[2]->start);
+ return 0;
+}
+
+/*
+ * Release claimed resources
+ */
+static void ems_pcmcia_remove(struct pcmcia_device *dev)
+{
+ ems_pcmcia_del_card(dev);
+ pcmcia_disable_device(dev);
+}
+
+static struct pcmcia_driver ems_pcmcia_driver = {
+ .name = DRV_NAME,
+ .probe = ems_pcmcia_probe,
+ .remove = ems_pcmcia_remove,
+ .id_table = ems_pcmcia_tbl,
+};
+
+static int __init ems_pcmcia_init(void)
+{
+ return pcmcia_register_driver(&ems_pcmcia_driver);
+}
+module_init(ems_pcmcia_init);
+
+static void __exit ems_pcmcia_exit(void)
+{
+ pcmcia_unregister_driver(&ems_pcmcia_driver);
+}
+module_exit(ems_pcmcia_exit);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
new file mode 100644
index 00000000000..905bce0b3a4
--- /dev/null
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from the PCAN project file driver/src/pcan_pci.c:
+ *
+ * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "sja1000.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI/PCIe cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe CAN card");
+MODULE_LICENSE("GPL v2");
+
+#define DRV_NAME "peak_pci"
+
+struct peak_pci_chan {
+ void __iomem *cfg_base; /* Common for all channels */
+ struct net_device *next_dev; /* Chain of network devices */
+ u16 icr_mask; /* Interrupt mask for fast ack */
+};
+
+#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
+
+#define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+#define PEAK_PCI_OCR OCR_TX0_PUSHPULL
+
+/*
+ * Important PITA registers
+ */
+#define PITA_ICR 0x00 /* Interrupt control register */
+#define PITA_GPIOICR 0x18 /* GPIO interface control register */
+#define PITA_MISC 0x1C /* Miscellaneous register */
+
+#define PEAK_PCI_CFG_SIZE 0x1000 /* Size of the config PCI bar */
+#define PEAK_PCI_CHAN_SIZE 0x0400 /* Size used by the channel */
+
+#define PEAK_PCI_VENDOR_ID 0x001C /* The PCI device and vendor IDs */
+#define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */
+
+static const u16 peak_pci_icr_masks[] = {0x02, 0x01, 0x40, 0x80};
+
+static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = {
+ {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
+
+static u8 peak_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + (port << 2));
+}
+
+static void peak_pci_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ writeb(val, priv->reg_base + (port << 2));
+}
+
+static void peak_pci_post_irq(const struct sja1000_priv *priv)
+{
+ struct peak_pci_chan *chan = priv->priv;
+ u16 icr;
+
+ /* Select and clear in PITA stored interrupt */
+ icr = readw(chan->cfg_base + PITA_ICR);
+ if (icr & chan->icr_mask)
+ writew(chan->icr_mask, chan->cfg_base + PITA_ICR);
+}
+
+static int __devinit peak_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct peak_pci_chan *chan;
+ struct net_device *dev, *dev0 = NULL;
+ void __iomem *cfg_base, *reg_base;
+ u16 sub_sys_id, icr;
+ int i, err, channels;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto failure_disable_pci;
+
+ err = pci_read_config_word(pdev, 0x2e, &sub_sys_id);
+ if (err)
+ goto failure_release_regions;
+
+ dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n",
+ pdev->vendor, pdev->device, sub_sys_id);
+
+ err = pci_write_config_word(pdev, 0x44, 0);
+ if (err)
+ goto failure_release_regions;
+
+ if (sub_sys_id >= 12)
+ channels = 4;
+ else if (sub_sys_id >= 10)
+ channels = 3;
+ else if (sub_sys_id >= 4)
+ channels = 2;
+ else
+ channels = 1;
+
+ cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE);
+ if (!cfg_base) {
+ dev_err(&pdev->dev, "failed to map PCI resource #0\n");
+ goto failure_release_regions;
+ }
+
+ reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels);
+ if (!reg_base) {
+ dev_err(&pdev->dev, "failed to map PCI resource #1\n");
+ goto failure_unmap_cfg_base;
+ }
+
+ /* Set GPIO control register */
+ writew(0x0005, cfg_base + PITA_GPIOICR + 2);
+ /* Enable all channels of this card */
+ writeb(0x00, cfg_base + PITA_GPIOICR);
+ /* Toggle reset */
+ writeb(0x05, cfg_base + PITA_MISC + 3);
+ mdelay(5);
+ /* Leave parport mux mode */
+ writeb(0x04, cfg_base + PITA_MISC + 3);
+
+ icr = readw(cfg_base + PITA_ICR + 2);
+
+ for (i = 0; i < channels; i++) {
+ dev = alloc_sja1000dev(sizeof(struct peak_pci_chan));
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_remove_channels;
+ }
+
+ priv = netdev_priv(dev);
+ chan = priv->priv;
+
+ chan->cfg_base = cfg_base;
+ priv->reg_base = reg_base + i * PEAK_PCI_CHAN_SIZE;
+
+ priv->read_reg = peak_pci_read_reg;
+ priv->write_reg = peak_pci_write_reg;
+ priv->post_irq = peak_pci_post_irq;
+
+ priv->can.clock.freq = PEAK_PCI_CAN_CLOCK;
+ priv->ocr = PEAK_PCI_OCR;
+ priv->cdr = PEAK_PCI_CDR;
+ /* Neither a slave nor a single device distributes the clock */
+ if (channels == 1 || i > 0)
+ priv->cdr |= CDR_CLK_OFF;
+
+ /* Setup interrupt handling */
+ priv->irq_flags = IRQF_SHARED;
+ dev->irq = pdev->irq;
+
+ chan->icr_mask = peak_pci_icr_masks[i];
+ icr |= chan->icr_mask;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register device\n");
+ free_sja1000dev(dev);
+ goto failure_remove_channels;
+ }
+
+ /* Create chain of SJA1000 devices */
+ if (i == 0)
+ dev0 = dev;
+ else
+ chan->next_dev = dev;
+
+ dev_info(&pdev->dev,
+ "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
+ dev->name, priv->reg_base, chan->cfg_base, dev->irq);
+ }
+
+ pci_set_drvdata(pdev, dev0);
+
+ /* Enable interrupts */
+ writew(icr, cfg_base + PITA_ICR + 2);
+
+ return 0;
+
+failure_remove_channels:
+ /* Disable interrupts */
+ writew(0x0, cfg_base + PITA_ICR + 2);
+
+ for (dev = dev0; dev; dev = chan->next_dev) {
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ priv = netdev_priv(dev);
+ chan = priv->priv;
+ dev = chan->next_dev;
+ }
+
+ pci_iounmap(pdev, reg_base);
+
+failure_unmap_cfg_base:
+ pci_iounmap(pdev, cfg_base);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void __devexit peak_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev); /* First device */
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct peak_pci_chan *chan = priv->priv;
+ void __iomem *cfg_base = chan->cfg_base;
+ void __iomem *reg_base = priv->reg_base;
+
+ /* Disable interrupts */
+ writew(0x0, cfg_base + PITA_ICR + 2);
+
+ /* Loop over all registered devices */
+ while (1) {
+ dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ dev = chan->next_dev;
+ if (!dev)
+ break;
+ priv = netdev_priv(dev);
+ chan = priv->priv;
+ }
+
+ pci_iounmap(pdev, reg_base);
+ pci_iounmap(pdev, cfg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver peak_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = peak_pci_tbl,
+ .probe = peak_pci_probe,
+ .remove = __devexit_p(peak_pci_remove),
+};
+
+static int __init peak_pci_init(void)
+{
+ return pci_register_driver(&peak_pci_driver);
+}
+module_init(peak_pci_init);
+
+static void __exit peak_pci_exit(void)
+{
+ pci_unregister_driver(&peak_pci_driver);
+}
+module_exit(peak_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f501bba1fc6..04a3f1b756a 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -40,8 +40,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 78bd4ecac14..23fff06875f 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -40,8 +40,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef SJA1000_DEV_H
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index cee6ba2b8b5..c3dd9d09be5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -29,7 +29,7 @@
* nxp,external-clock-frequency = <16000000>;
* };
*
- * See "Documentation/powerpc/dts-bindings/can/sja1000.txt" for further
+ * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
* information.
*/
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 4b70b7e8bde..a979b006f45 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -35,8 +35,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a30b8f480f6..f93e2d6fc88 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
deleted file mode 100644
index e9a03fffef1..00000000000
--- a/drivers/net/chelsio/sge.c
+++ /dev/null
@@ -1,2140 +0,0 @@
-/*****************************************************************************
- * *
- * File: sge.c *
- * $Revision: 1.26 $ *
- * $Date: 2005/06/21 18:29:48 $ *
- * Description: *
- * DMA engine. *
- * part of the Chelsio 10Gb Ethernet Driver. *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
- * *
- * http://www.chelsio.com *
- * *
- * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
- * All rights reserved. *
- * *
- * Maintainers: maintainers@chelsio.com *
- * *
- * Authors: Dimitrios Michailidis <dm@chelsio.com> *
- * Tina Yang <tainay@chelsio.com> *
- * Felix Marti <felix@chelsio.com> *
- * Scott Bardone <sbardone@chelsio.com> *
- * Kurt Ottaway <kottaway@chelsio.com> *
- * Frank DiMambro <frank@chelsio.com> *
- * *
- * History: *
- * *
- ****************************************************************************/
-
-#include "common.h"
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/ktime.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/tcp.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include "cpl5_cmd.h"
-#include "sge.h"
-#include "regs.h"
-#include "espi.h"
-
-/* This belongs in if_ether.h */
-#define ETH_P_CPL5 0xf
-
-#define SGE_CMDQ_N 2
-#define SGE_FREELQ_N 2
-#define SGE_CMDQ0_E_N 1024
-#define SGE_CMDQ1_E_N 128
-#define SGE_FREEL_SIZE 4096
-#define SGE_JUMBO_FREEL_SIZE 512
-#define SGE_FREEL_REFILL_THRESH 16
-#define SGE_RESPQ_E_N 1024
-#define SGE_INTRTIMER_NRES 1000
-#define SGE_RX_SM_BUF_SIZE 1536
-#define SGE_TX_DESC_MAX_PLEN 16384
-
-#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
-
-/*
- * Period of the TX buffer reclaim timer. This timer does not need to run
- * frequently as TX buffers are usually reclaimed by new TX packets.
- */
-#define TX_RECLAIM_PERIOD (HZ / 4)
-
-#define M_CMD_LEN 0x7fffffff
-#define V_CMD_LEN(v) (v)
-#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
-#define V_CMD_GEN1(v) ((v) << 31)
-#define V_CMD_GEN2(v) (v)
-#define F_CMD_DATAVALID (1 << 1)
-#define F_CMD_SOP (1 << 2)
-#define V_CMD_EOP(v) ((v) << 3)
-
-/*
- * Command queue, receive buffer list, and response queue descriptors.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
-struct cmdQ_e {
- u32 addr_lo;
- u32 len_gen;
- u32 flags;
- u32 addr_hi;
-};
-
-struct freelQ_e {
- u32 addr_lo;
- u32 len_gen;
- u32 gen2;
- u32 addr_hi;
-};
-
-struct respQ_e {
- u32 Qsleeping : 4;
- u32 Cmdq1CreditReturn : 5;
- u32 Cmdq1DmaComplete : 5;
- u32 Cmdq0CreditReturn : 5;
- u32 Cmdq0DmaComplete : 5;
- u32 FreelistQid : 2;
- u32 CreditValid : 1;
- u32 DataValid : 1;
- u32 Offload : 1;
- u32 Eop : 1;
- u32 Sop : 1;
- u32 GenerationBit : 1;
- u32 BufferLength;
-};
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-struct cmdQ_e {
- u32 len_gen;
- u32 addr_lo;
- u32 addr_hi;
- u32 flags;
-};
-
-struct freelQ_e {
- u32 len_gen;
- u32 addr_lo;
- u32 addr_hi;
- u32 gen2;
-};
-
-struct respQ_e {
- u32 BufferLength;
- u32 GenerationBit : 1;
- u32 Sop : 1;
- u32 Eop : 1;
- u32 Offload : 1;
- u32 DataValid : 1;
- u32 CreditValid : 1;
- u32 FreelistQid : 2;
- u32 Cmdq0DmaComplete : 5;
- u32 Cmdq0CreditReturn : 5;
- u32 Cmdq1DmaComplete : 5;
- u32 Cmdq1CreditReturn : 5;
- u32 Qsleeping : 4;
-} ;
-#endif
-
-/*
- * SW Context Command and Freelist Queue Descriptors
- */
-struct cmdQ_ce {
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma_addr);
- DEFINE_DMA_UNMAP_LEN(dma_len);
-};
-
-struct freelQ_ce {
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma_addr);
- DEFINE_DMA_UNMAP_LEN(dma_len);
-};
-
-/*
- * SW command, freelist and response rings
- */
-struct cmdQ {
- unsigned long status; /* HW DMA fetch status */
- unsigned int in_use; /* # of in-use command descriptors */
- unsigned int size; /* # of descriptors */
- unsigned int processed; /* total # of descs HW has processed */
- unsigned int cleaned; /* total # of descs SW has reclaimed */
- unsigned int stop_thres; /* SW TX queue suspend threshold */
- u16 pidx; /* producer index (SW) */
- u16 cidx; /* consumer index (HW) */
- u8 genbit; /* current generation (=valid) bit */
- u8 sop; /* is next entry start of packet? */
- struct cmdQ_e *entries; /* HW command descriptor Q */
- struct cmdQ_ce *centries; /* SW command context descriptor Q */
- dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
- spinlock_t lock; /* Lock to protect cmdQ enqueuing */
-};
-
-struct freelQ {
- unsigned int credits; /* # of available RX buffers */
- unsigned int size; /* free list capacity */
- u16 pidx; /* producer index (SW) */
- u16 cidx; /* consumer index (HW) */
- u16 rx_buffer_size; /* Buffer size on this free list */
- u16 dma_offset; /* DMA offset to align IP headers */
- u16 recycleq_idx; /* skb recycle q to use */
- u8 genbit; /* current generation (=valid) bit */
- struct freelQ_e *entries; /* HW freelist descriptor Q */
- struct freelQ_ce *centries; /* SW freelist context descriptor Q */
- dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
-};
-
-struct respQ {
- unsigned int credits; /* credits to be returned to SGE */
- unsigned int size; /* # of response Q descriptors */
- u16 cidx; /* consumer index (SW) */
- u8 genbit; /* current generation(=valid) bit */
- struct respQ_e *entries; /* HW response descriptor Q */
- dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
-};
-
-/* Bit flags for cmdQ.status */
-enum {
- CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
- CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
-};
-
-/* T204 TX SW scheduler */
-
-/* Per T204 TX port */
-struct sched_port {
- unsigned int avail; /* available bits - quota */
- unsigned int drain_bits_per_1024ns; /* drain rate */
- unsigned int speed; /* drain rate, mbps */
- unsigned int mtu; /* mtu size */
- struct sk_buff_head skbq; /* pending skbs */
-};
-
-/* Per T204 device */
-struct sched {
- ktime_t last_updated; /* last time quotas were computed */
- unsigned int max_avail; /* max bits to be sent to any port */
- unsigned int port; /* port index (round robin ports) */
- unsigned int num; /* num skbs in per port queues */
- struct sched_port p[MAX_NPORTS];
- struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
-};
-static void restart_sched(unsigned long);
-
-
-/*
- * Main SGE data structure
- *
- * Interrupts are handled by a single CPU and it is likely that on a MP system
- * the application is migrated to another CPU. In that scenario, we try to
- * separate the RX(in irq context) and TX state in order to decrease memory
- * contention.
- */
-struct sge {
- struct adapter *adapter; /* adapter backpointer */
- struct net_device *netdev; /* netdevice backpointer */
- struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
- struct respQ respQ; /* response Q */
- unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
- unsigned int rx_pkt_pad; /* RX padding for L2 packets */
- unsigned int jumbo_fl; /* jumbo freelist Q index */
- unsigned int intrtimer_nres; /* no-resource interrupt timer */
- unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
- struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
- struct timer_list espibug_timer;
- unsigned long espibug_timeout;
- struct sk_buff *espibug_skb[MAX_NPORTS];
- u32 sge_control; /* shadow value of sge control reg */
- struct sge_intr_counts stats;
- struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
- struct sched *tx_sched;
- struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
-};
-
-static const u8 ch_mac_addr[ETH_ALEN] = {
- 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
-};
-
-/*
- * stop tasklet and free all pending skb's
- */
-static void tx_sched_stop(struct sge *sge)
-{
- struct sched *s = sge->tx_sched;
- int i;
-
- tasklet_kill(&s->sched_tsk);
-
- for (i = 0; i < MAX_NPORTS; i++)
- __skb_queue_purge(&s->p[s->port].skbq);
-}
-
-/*
- * t1_sched_update_parms() is called when the MTU or link speed changes. It
- * re-computes scheduler parameters to scope with the change.
- */
-unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
- unsigned int mtu, unsigned int speed)
-{
- struct sched *s = sge->tx_sched;
- struct sched_port *p = &s->p[port];
- unsigned int max_avail_segs;
-
- pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
- if (speed)
- p->speed = speed;
- if (mtu)
- p->mtu = mtu;
-
- if (speed || mtu) {
- unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
- do_div(drain, (p->mtu + 50) * 1000);
- p->drain_bits_per_1024ns = (unsigned int) drain;
-
- if (p->speed < 1000)
- p->drain_bits_per_1024ns =
- 90 * p->drain_bits_per_1024ns / 100;
- }
-
- if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
- p->drain_bits_per_1024ns -= 16;
- s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
- max_avail_segs = max(1U, 4096 / (p->mtu - 40));
- } else {
- s->max_avail = 16384;
- max_avail_segs = max(1U, 9000 / (p->mtu - 40));
- }
-
- pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
- "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
- p->speed, s->max_avail, max_avail_segs,
- p->drain_bits_per_1024ns);
-
- return max_avail_segs * (p->mtu - 40);
-}
-
-#if 0
-
-/*
- * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
- * data that can be pushed per port.
- */
-void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
-{
- struct sched *s = sge->tx_sched;
- unsigned int i;
-
- s->max_avail = val;
- for (i = 0; i < MAX_NPORTS; i++)
- t1_sched_update_parms(sge, i, 0, 0);
-}
-
-/*
- * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
- * is draining.
- */
-void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
- unsigned int val)
-{
- struct sched *s = sge->tx_sched;
- struct sched_port *p = &s->p[port];
- p->drain_bits_per_1024ns = val * 1024 / 1000;
- t1_sched_update_parms(sge, port, 0, 0);
-}
-
-#endif /* 0 */
-
-
-/*
- * get_clock() implements a ns clock (see ktime_get)
- */
-static inline ktime_t get_clock(void)
-{
- struct timespec ts;
-
- ktime_get_ts(&ts);
- return timespec_to_ktime(ts);
-}
-
-/*
- * tx_sched_init() allocates resources and does basic initialization.
- */
-static int tx_sched_init(struct sge *sge)
-{
- struct sched *s;
- int i;
-
- s = kzalloc(sizeof (struct sched), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- pr_debug("tx_sched_init\n");
- tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
- sge->tx_sched = s;
-
- for (i = 0; i < MAX_NPORTS; i++) {
- skb_queue_head_init(&s->p[i].skbq);
- t1_sched_update_parms(sge, i, 1500, 1000);
- }
-
- return 0;
-}
-
-/*
- * sched_update_avail() computes the delta since the last time it was called
- * and updates the per port quota (number of bits that can be sent to the any
- * port).
- */
-static inline int sched_update_avail(struct sge *sge)
-{
- struct sched *s = sge->tx_sched;
- ktime_t now = get_clock();
- unsigned int i;
- long long delta_time_ns;
-
- delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
-
- pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
- if (delta_time_ns < 15000)
- return 0;
-
- for (i = 0; i < MAX_NPORTS; i++) {
- struct sched_port *p = &s->p[i];
- unsigned int delta_avail;
-
- delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
- p->avail = min(p->avail + delta_avail, s->max_avail);
- }
-
- s->last_updated = now;
-
- return 1;
-}
-
-/*
- * sched_skb() is called from two different places. In the tx path, any
- * packet generating load on an output port will call sched_skb()
- * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
- * context (skb == NULL).
- * The scheduler only returns a skb (which will then be sent) if the
- * length of the skb is <= the current quota of the output port.
- */
-static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
- unsigned int credits)
-{
- struct sched *s = sge->tx_sched;
- struct sk_buff_head *skbq;
- unsigned int i, len, update = 1;
-
- pr_debug("sched_skb %p\n", skb);
- if (!skb) {
- if (!s->num)
- return NULL;
- } else {
- skbq = &s->p[skb->dev->if_port].skbq;
- __skb_queue_tail(skbq, skb);
- s->num++;
- skb = NULL;
- }
-
- if (credits < MAX_SKB_FRAGS + 1)
- goto out;
-
-again:
- for (i = 0; i < MAX_NPORTS; i++) {
- s->port = (s->port + 1) & (MAX_NPORTS - 1);
- skbq = &s->p[s->port].skbq;
-
- skb = skb_peek(skbq);
-
- if (!skb)
- continue;
-
- len = skb->len;
- if (len <= s->p[s->port].avail) {
- s->p[s->port].avail -= len;
- s->num--;
- __skb_unlink(skb, skbq);
- goto out;
- }
- skb = NULL;
- }
-
- if (update-- && sched_update_avail(sge))
- goto again;
-
-out:
- /* If there are more pending skbs, we use the hardware to schedule us
- * again.
- */
- if (s->num && !skb) {
- struct cmdQ *q = &sge->cmdQ[0];
- clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
- if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
- set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
- writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
- }
- }
- pr_debug("sched_skb ret %p\n", skb);
-
- return skb;
-}
-
-/*
- * PIO to indicate that memory mapped Q contains valid descriptor(s).
- */
-static inline void doorbell_pio(struct adapter *adapter, u32 val)
-{
- wmb();
- writel(val, adapter->regs + A_SG_DOORBELL);
-}
-
-/*
- * Frees all RX buffers on the freelist Q. The caller must make sure that
- * the SGE is turned off before calling this function.
- */
-static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
-{
- unsigned int cidx = q->cidx;
-
- while (q->credits--) {
- struct freelQ_ce *ce = &q->centries[cidx];
-
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(ce->skb);
- ce->skb = NULL;
- if (++cidx == q->size)
- cidx = 0;
- }
-}
-
-/*
- * Free RX free list and response queue resources.
- */
-static void free_rx_resources(struct sge *sge)
-{
- struct pci_dev *pdev = sge->adapter->pdev;
- unsigned int size, i;
-
- if (sge->respQ.entries) {
- size = sizeof(struct respQ_e) * sge->respQ.size;
- pci_free_consistent(pdev, size, sge->respQ.entries,
- sge->respQ.dma_addr);
- }
-
- for (i = 0; i < SGE_FREELQ_N; i++) {
- struct freelQ *q = &sge->freelQ[i];
-
- if (q->centries) {
- free_freelQ_buffers(pdev, q);
- kfree(q->centries);
- }
- if (q->entries) {
- size = sizeof(struct freelQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
- }
- }
-}
-
-/*
- * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
- * response queue.
- */
-static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
-{
- struct pci_dev *pdev = sge->adapter->pdev;
- unsigned int size, i;
-
- for (i = 0; i < SGE_FREELQ_N; i++) {
- struct freelQ *q = &sge->freelQ[i];
-
- q->genbit = 1;
- q->size = p->freelQ_size[i];
- q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
- size = sizeof(struct freelQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
- if (!q->entries)
- goto err_no_mem;
-
- size = sizeof(struct freelQ_ce) * q->size;
- q->centries = kzalloc(size, GFP_KERNEL);
- if (!q->centries)
- goto err_no_mem;
- }
-
- /*
- * Calculate the buffer sizes for the two free lists. FL0 accommodates
- * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
- * including all the sk_buff overhead.
- *
- * Note: For T2 FL0 and FL1 are reversed.
- */
- sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
- sizeof(struct cpl_rx_data) +
- sge->freelQ[!sge->jumbo_fl].dma_offset;
-
- size = (16 * 1024) -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
-
- /*
- * Setup which skb recycle Q should be used when recycling buffers from
- * each free list.
- */
- sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
- sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
-
- sge->respQ.genbit = 1;
- sge->respQ.size = SGE_RESPQ_E_N;
- sge->respQ.credits = 0;
- size = sizeof(struct respQ_e) * sge->respQ.size;
- sge->respQ.entries =
- pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
- if (!sge->respQ.entries)
- goto err_no_mem;
- return 0;
-
-err_no_mem:
- free_rx_resources(sge);
- return -ENOMEM;
-}
-
-/*
- * Reclaims n TX descriptors and frees the buffers associated with them.
- */
-static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
-{
- struct cmdQ_ce *ce;
- struct pci_dev *pdev = sge->adapter->pdev;
- unsigned int cidx = q->cidx;
-
- q->in_use -= n;
- ce = &q->centries[cidx];
- while (n--) {
- if (likely(dma_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_TODEVICE);
- if (q->sop)
- q->sop = 0;
- }
- if (ce->skb) {
- dev_kfree_skb_any(ce->skb);
- q->sop = 1;
- }
- ce++;
- if (++cidx == q->size) {
- cidx = 0;
- ce = q->centries;
- }
- }
- q->cidx = cidx;
-}
-
-/*
- * Free TX resources.
- *
- * Assumes that SGE is stopped and all interrupts are disabled.
- */
-static void free_tx_resources(struct sge *sge)
-{
- struct pci_dev *pdev = sge->adapter->pdev;
- unsigned int size, i;
-
- for (i = 0; i < SGE_CMDQ_N; i++) {
- struct cmdQ *q = &sge->cmdQ[i];
-
- if (q->centries) {
- if (q->in_use)
- free_cmdQ_buffers(sge, q, q->in_use);
- kfree(q->centries);
- }
- if (q->entries) {
- size = sizeof(struct cmdQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
- }
- }
-}
-
-/*
- * Allocates basic TX resources, consisting of memory mapped command Qs.
- */
-static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
-{
- struct pci_dev *pdev = sge->adapter->pdev;
- unsigned int size, i;
-
- for (i = 0; i < SGE_CMDQ_N; i++) {
- struct cmdQ *q = &sge->cmdQ[i];
-
- q->genbit = 1;
- q->sop = 1;
- q->size = p->cmdQ_size[i];
- q->in_use = 0;
- q->status = 0;
- q->processed = q->cleaned = 0;
- q->stop_thres = 0;
- spin_lock_init(&q->lock);
- size = sizeof(struct cmdQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
- if (!q->entries)
- goto err_no_mem;
-
- size = sizeof(struct cmdQ_ce) * q->size;
- q->centries = kzalloc(size, GFP_KERNEL);
- if (!q->centries)
- goto err_no_mem;
- }
-
- /*
- * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
- * only. For queue 0 set the stop threshold so we can handle one more
- * packet from each port, plus reserve an additional 24 entries for
- * Ethernet packets only. Queue 1 never suspends nor do we reserve
- * space for Ethernet packets.
- */
- sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
- (MAX_SKB_FRAGS + 1);
- return 0;
-
-err_no_mem:
- free_tx_resources(sge);
- return -ENOMEM;
-}
-
-static inline void setup_ring_params(struct adapter *adapter, u64 addr,
- u32 size, int base_reg_lo,
- int base_reg_hi, int size_reg)
-{
- writel((u32)addr, adapter->regs + base_reg_lo);
- writel(addr >> 32, adapter->regs + base_reg_hi);
- writel(size, adapter->regs + size_reg);
-}
-
-/*
- * Enable/disable VLAN acceleration.
- */
-void t1_vlan_mode(struct adapter *adapter, u32 features)
-{
- struct sge *sge = adapter->sge;
-
- if (features & NETIF_F_HW_VLAN_RX)
- sge->sge_control |= F_VLAN_XTRACT;
- else
- sge->sge_control &= ~F_VLAN_XTRACT;
- if (adapter->open_device_map) {
- writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
- readl(adapter->regs + A_SG_CONTROL); /* flush */
- }
-}
-
-/*
- * Programs the various SGE registers. However, the engine is not yet enabled,
- * but sge->sge_control is setup and ready to go.
- */
-static void configure_sge(struct sge *sge, struct sge_params *p)
-{
- struct adapter *ap = sge->adapter;
-
- writel(0, ap->regs + A_SG_CONTROL);
- setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
- A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
- setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
- A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
- setup_ring_params(ap, sge->freelQ[0].dma_addr,
- sge->freelQ[0].size, A_SG_FL0BASELWR,
- A_SG_FL0BASEUPR, A_SG_FL0SIZE);
- setup_ring_params(ap, sge->freelQ[1].dma_addr,
- sge->freelQ[1].size, A_SG_FL1BASELWR,
- A_SG_FL1BASEUPR, A_SG_FL1SIZE);
-
- /* The threshold comparison uses <. */
- writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
-
- setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
- A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
- writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
-
- sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
- F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
- V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
- V_RX_PKT_OFFSET(sge->rx_pkt_pad);
-
-#if defined(__BIG_ENDIAN_BITFIELD)
- sge->sge_control |= F_ENABLE_BIG_ENDIAN;
-#endif
-
- /* Initialize no-resource timer */
- sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
-
- t1_sge_set_coalesce_params(sge, p);
-}
-
-/*
- * Return the payload capacity of the jumbo free-list buffers.
- */
-static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
-{
- return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
- sge->freelQ[sge->jumbo_fl].dma_offset -
- sizeof(struct cpl_rx_data);
-}
-
-/*
- * Frees all SGE related resources and the sge structure itself
- */
-void t1_sge_destroy(struct sge *sge)
-{
- int i;
-
- for_each_port(sge->adapter, i)
- free_percpu(sge->port_stats[i]);
-
- kfree(sge->tx_sched);
- free_tx_resources(sge);
- free_rx_resources(sge);
- kfree(sge);
-}
-
-/*
- * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
- * context Q) until the Q is full or alloc_skb fails.
- *
- * It is possible that the generation bits already match, indicating that the
- * buffer is already valid and nothing needs to be done. This happens when we
- * copied a received buffer into a new sk_buff during the interrupt processing.
- *
- * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
- * we specify a RX_OFFSET in order to make sure that the IP header is 4B
- * aligned.
- */
-static void refill_free_list(struct sge *sge, struct freelQ *q)
-{
- struct pci_dev *pdev = sge->adapter->pdev;
- struct freelQ_ce *ce = &q->centries[q->pidx];
- struct freelQ_e *e = &q->entries[q->pidx];
- unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
-
- while (q->credits < q->size) {
- struct sk_buff *skb;
- dma_addr_t mapping;
-
- skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
- if (!skb)
- break;
-
- skb_reserve(skb, q->dma_offset);
- mapping = pci_map_single(pdev, skb->data, dma_len,
- PCI_DMA_FROMDEVICE);
- skb_reserve(skb, sge->rx_pkt_pad);
-
- ce->skb = skb;
- dma_unmap_addr_set(ce, dma_addr, mapping);
- dma_unmap_len_set(ce, dma_len, dma_len);
- e->addr_lo = (u32)mapping;
- e->addr_hi = (u64)mapping >> 32;
- e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
- wmb();
- e->gen2 = V_CMD_GEN2(q->genbit);
-
- e++;
- ce++;
- if (++q->pidx == q->size) {
- q->pidx = 0;
- q->genbit ^= 1;
- ce = q->centries;
- e = q->entries;
- }
- q->credits++;
- }
-}
-
-/*
- * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
- * of both rings, we go into 'few interrupt mode' in order to give the system
- * time to free up resources.
- */
-static void freelQs_empty(struct sge *sge)
-{
- struct adapter *adapter = sge->adapter;
- u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
- u32 irqholdoff_reg;
-
- refill_free_list(sge, &sge->freelQ[0]);
- refill_free_list(sge, &sge->freelQ[1]);
-
- if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
- sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
- irq_reg |= F_FL_EXHAUSTED;
- irqholdoff_reg = sge->fixed_intrtimer;
- } else {
- /* Clear the F_FL_EXHAUSTED interrupts for now */
- irq_reg &= ~F_FL_EXHAUSTED;
- irqholdoff_reg = sge->intrtimer_nres;
- }
- writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
- writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
-
- /* We reenable the Qs to force a freelist GTS interrupt later */
- doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
-}
-
-#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
-#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
-#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
- F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
-
-/*
- * Disable SGE Interrupts
- */
-void t1_sge_intr_disable(struct sge *sge)
-{
- u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
-
- writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
- writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
-}
-
-/*
- * Enable SGE interrupts.
- */
-void t1_sge_intr_enable(struct sge *sge)
-{
- u32 en = SGE_INT_ENABLE;
- u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
-
- if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
- en &= ~F_PACKET_TOO_BIG;
- writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
- writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
-}
-
-/*
- * Clear SGE interrupts.
- */
-void t1_sge_intr_clear(struct sge *sge)
-{
- writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
- writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
-}
-
-/*
- * SGE 'Error' interrupt handler
- */
-int t1_sge_intr_error_handler(struct sge *sge)
-{
- struct adapter *adapter = sge->adapter;
- u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
-
- if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
- cause &= ~F_PACKET_TOO_BIG;
- if (cause & F_RESPQ_EXHAUSTED)
- sge->stats.respQ_empty++;
- if (cause & F_RESPQ_OVERFLOW) {
- sge->stats.respQ_overflow++;
- pr_alert("%s: SGE response queue overflow\n",
- adapter->name);
- }
- if (cause & F_FL_EXHAUSTED) {
- sge->stats.freelistQ_empty++;
- freelQs_empty(sge);
- }
- if (cause & F_PACKET_TOO_BIG) {
- sge->stats.pkt_too_big++;
- pr_alert("%s: SGE max packet size exceeded\n",
- adapter->name);
- }
- if (cause & F_PACKET_MISMATCH) {
- sge->stats.pkt_mismatch++;
- pr_alert("%s: SGE packet mismatch\n", adapter->name);
- }
- if (cause & SGE_INT_FATAL)
- t1_fatal_err(adapter);
-
- writel(cause, adapter->regs + A_SG_INT_CAUSE);
- return 0;
-}
-
-const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
-{
- return &sge->stats;
-}
-
-void t1_sge_get_port_stats(const struct sge *sge, int port,
- struct sge_port_stats *ss)
-{
- int cpu;
-
- memset(ss, 0, sizeof(*ss));
- for_each_possible_cpu(cpu) {
- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
-
- ss->rx_cso_good += st->rx_cso_good;
- ss->tx_cso += st->tx_cso;
- ss->tx_tso += st->tx_tso;
- ss->tx_need_hdrroom += st->tx_need_hdrroom;
- ss->vlan_xtract += st->vlan_xtract;
- ss->vlan_insert += st->vlan_insert;
- }
-}
-
-/**
- * recycle_fl_buf - recycle a free list buffer
- * @fl: the free list
- * @idx: index of buffer to recycle
- *
- * Recycles the specified buffer on the given free list by adding it at
- * the next available slot on the list.
- */
-static void recycle_fl_buf(struct freelQ *fl, int idx)
-{
- struct freelQ_e *from = &fl->entries[idx];
- struct freelQ_e *to = &fl->entries[fl->pidx];
-
- fl->centries[fl->pidx] = fl->centries[idx];
- to->addr_lo = from->addr_lo;
- to->addr_hi = from->addr_hi;
- to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
- wmb();
- to->gen2 = V_CMD_GEN2(fl->genbit);
- fl->credits++;
-
- if (++fl->pidx == fl->size) {
- fl->pidx = 0;
- fl->genbit ^= 1;
- }
-}
-
-static int copybreak __read_mostly = 256;
-module_param(copybreak, int, 0);
-MODULE_PARM_DESC(copybreak, "Receive copy threshold");
-
-/**
- * get_packet - return the next ingress packet buffer
- * @pdev: the PCI device that received the packet
- * @fl: the SGE free list holding the packet
- * @len: the actual packet length, excluding any SGE padding
- *
- * Get the next packet from a free list and complete setup of the
- * sk_buff. If the packet is small we make a copy and recycle the
- * original buffer, otherwise we use the original buffer itself. If a
- * positive drop threshold is supplied packets are dropped and their
- * buffers recycled if (a) the number of remaining buffers is under the
- * threshold and the packet is too big to copy, or (b) the packet should
- * be copied but there is no memory for the copy.
- */
-static inline struct sk_buff *get_packet(struct pci_dev *pdev,
- struct freelQ *fl, unsigned int len)
-{
- struct sk_buff *skb;
- const struct freelQ_ce *ce = &fl->centries[fl->cidx];
-
- if (len < copybreak) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
- if (!skb)
- goto use_orig_buf;
-
- skb_reserve(skb, 2); /* align IP header */
- skb_put(skb, len);
- pci_dma_sync_single_for_cpu(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(ce->skb, skb->data, len);
- pci_dma_sync_single_for_device(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
- recycle_fl_buf(fl, fl->cidx);
- return skb;
- }
-
-use_orig_buf:
- if (fl->credits < 2) {
- recycle_fl_buf(fl, fl->cidx);
- return NULL;
- }
-
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
- skb = ce->skb;
- prefetch(skb->data);
-
- skb_put(skb, len);
- return skb;
-}
-
-/**
- * unexpected_offload - handle an unexpected offload packet
- * @adapter: the adapter
- * @fl: the free list that received the packet
- *
- * Called when we receive an unexpected offload packet (e.g., the TOE
- * function is disabled or the card is a NIC). Prints a message and
- * recycles the buffer.
- */
-static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
-{
- struct freelQ_ce *ce = &fl->centries[fl->cidx];
- struct sk_buff *skb = ce->skb;
-
- pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
- pr_err("%s: unexpected offload packet, cmd %u\n",
- adapter->name, *skb->data);
- recycle_fl_buf(fl, fl->cidx);
-}
-
-/*
- * T1/T2 SGE limits the maximum DMA size per TX descriptor to
- * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
- * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
- * Note that the *_large_page_tx_descs stuff will be optimized out when
- * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
- *
- * compute_large_page_descs() computes how many additional descriptors are
- * required to break down the stack's request.
- */
-static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
-{
- unsigned int count = 0;
-
- if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
- unsigned int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int i, len = skb_headlen(skb);
- while (len > SGE_TX_DESC_MAX_PLEN) {
- count++;
- len -= SGE_TX_DESC_MAX_PLEN;
- }
- for (i = 0; nfrags--; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
- while (len > SGE_TX_DESC_MAX_PLEN) {
- count++;
- len -= SGE_TX_DESC_MAX_PLEN;
- }
- }
- }
- return count;
-}
-
-/*
- * Write a cmdQ entry.
- *
- * Since this function writes the 'flags' field, it must not be used to
- * write the first cmdQ entry.
- */
-static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
- unsigned int len, unsigned int gen,
- unsigned int eop)
-{
- BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
-
- e->addr_lo = (u32)mapping;
- e->addr_hi = (u64)mapping >> 32;
- e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
- e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
-}
-
-/*
- * See comment for previous function.
- *
- * write_tx_descs_large_page() writes additional SGE tx descriptors if
- * *desc_len exceeds HW's capability.
- */
-static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
- struct cmdQ_e **e,
- struct cmdQ_ce **ce,
- unsigned int *gen,
- dma_addr_t *desc_mapping,
- unsigned int *desc_len,
- unsigned int nfrags,
- struct cmdQ *q)
-{
- if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
- struct cmdQ_e *e1 = *e;
- struct cmdQ_ce *ce1 = *ce;
-
- while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
- *desc_len -= SGE_TX_DESC_MAX_PLEN;
- write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
- *gen, nfrags == 0 && *desc_len == 0);
- ce1->skb = NULL;
- dma_unmap_len_set(ce1, dma_len, 0);
- *desc_mapping += SGE_TX_DESC_MAX_PLEN;
- if (*desc_len) {
- ce1++;
- e1++;
- if (++pidx == q->size) {
- pidx = 0;
- *gen ^= 1;
- ce1 = q->centries;
- e1 = q->entries;
- }
- }
- }
- *e = e1;
- *ce = ce1;
- }
- return pidx;
-}
-
-/*
- * Write the command descriptors to transmit the given skb starting at
- * descriptor pidx with the given generation.
- */
-static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
- unsigned int pidx, unsigned int gen,
- struct cmdQ *q)
-{
- dma_addr_t mapping, desc_mapping;
- struct cmdQ_e *e, *e1;
- struct cmdQ_ce *ce;
- unsigned int i, flags, first_desc_len, desc_len,
- nfrags = skb_shinfo(skb)->nr_frags;
-
- e = e1 = &q->entries[pidx];
- ce = &q->centries[pidx];
-
- mapping = pci_map_single(adapter->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
-
- desc_mapping = mapping;
- desc_len = skb_headlen(skb);
-
- flags = F_CMD_DATAVALID | F_CMD_SOP |
- V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
- V_CMD_GEN2(gen);
- first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
- desc_len : SGE_TX_DESC_MAX_PLEN;
- e->addr_lo = (u32)desc_mapping;
- e->addr_hi = (u64)desc_mapping >> 32;
- e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
- ce->skb = NULL;
- dma_unmap_len_set(ce, dma_len, 0);
-
- if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
- desc_len > SGE_TX_DESC_MAX_PLEN) {
- desc_mapping += first_desc_len;
- desc_len -= first_desc_len;
- e1++;
- ce++;
- if (++pidx == q->size) {
- pidx = 0;
- gen ^= 1;
- e1 = q->entries;
- ce = q->centries;
- }
- pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
- &desc_mapping, &desc_len,
- nfrags, q);
-
- if (likely(desc_len))
- write_tx_desc(e1, desc_mapping, desc_len, gen,
- nfrags == 0);
- }
-
- ce->skb = NULL;
- dma_unmap_addr_set(ce, dma_addr, mapping);
- dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
-
- for (i = 0; nfrags--; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- e1++;
- ce++;
- if (++pidx == q->size) {
- pidx = 0;
- gen ^= 1;
- e1 = q->entries;
- ce = q->centries;
- }
-
- mapping = pci_map_page(adapter->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
- desc_mapping = mapping;
- desc_len = frag->size;
-
- pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
- &desc_mapping, &desc_len,
- nfrags, q);
- if (likely(desc_len))
- write_tx_desc(e1, desc_mapping, desc_len, gen,
- nfrags == 0);
- ce->skb = NULL;
- dma_unmap_addr_set(ce, dma_addr, mapping);
- dma_unmap_len_set(ce, dma_len, frag->size);
- }
- ce->skb = skb;
- wmb();
- e->flags = flags;
-}
-
-/*
- * Clean up completed Tx buffers.
- */
-static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
-{
- unsigned int reclaim = q->processed - q->cleaned;
-
- if (reclaim) {
- pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
- q->processed, q->cleaned);
- free_cmdQ_buffers(sge, q, reclaim);
- q->cleaned += reclaim;
- }
-}
-
-/*
- * Called from tasklet. Checks the scheduler for any
- * pending skbs that can be sent.
- */
-static void restart_sched(unsigned long arg)
-{
- struct sge *sge = (struct sge *) arg;
- struct adapter *adapter = sge->adapter;
- struct cmdQ *q = &sge->cmdQ[0];
- struct sk_buff *skb;
- unsigned int credits, queued_skb = 0;
-
- spin_lock(&q->lock);
- reclaim_completed_tx(sge, q);
-
- credits = q->size - q->in_use;
- pr_debug("restart_sched credits=%d\n", credits);
- while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
- unsigned int genbit, pidx, count;
- count = 1 + skb_shinfo(skb)->nr_frags;
- count += compute_large_page_tx_descs(skb);
- q->in_use += count;
- genbit = q->genbit;
- pidx = q->pidx;
- q->pidx += count;
- if (q->pidx >= q->size) {
- q->pidx -= q->size;
- q->genbit ^= 1;
- }
- write_tx_descs(adapter, skb, pidx, genbit, q);
- credits = q->size - q->in_use;
- queued_skb = 1;
- }
-
- if (queued_skb) {
- clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
- if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
- set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
- writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
- }
- }
- spin_unlock(&q->lock);
-}
-
-/**
- * sge_rx - process an ingress ethernet packet
- * @sge: the sge structure
- * @fl: the free list that contains the packet buffer
- * @len: the packet length
- *
- * Process an ingress ethernet pakcet and deliver it to the stack.
- */
-static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
-{
- struct sk_buff *skb;
- const struct cpl_rx_pkt *p;
- struct adapter *adapter = sge->adapter;
- struct sge_port_stats *st;
- struct net_device *dev;
-
- skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
- if (unlikely(!skb)) {
- sge->stats.rx_drops++;
- return;
- }
-
- p = (const struct cpl_rx_pkt *) skb->data;
- if (p->iff >= adapter->params.nports) {
- kfree_skb(skb);
- return;
- }
- __skb_pull(skb, sizeof(*p));
-
- st = this_cpu_ptr(sge->port_stats[p->iff]);
- dev = adapter->port[p->iff].dev;
-
- skb->protocol = eth_type_trans(skb, dev);
- if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
- skb->protocol == htons(ETH_P_IP) &&
- (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
- ++st->rx_cso_good;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else
- skb_checksum_none_assert(skb);
-
- if (p->vlan_valid) {
- st->vlan_xtract++;
- __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
- }
- netif_receive_skb(skb);
-}
-
-/*
- * Returns true if a command queue has enough available descriptors that
- * we can resume Tx operation after temporarily disabling its packet queue.
- */
-static inline int enough_free_Tx_descs(const struct cmdQ *q)
-{
- unsigned int r = q->processed - q->cleaned;
-
- return q->in_use - r < (q->size >> 1);
-}
-
-/*
- * Called when sufficient space has become available in the SGE command queues
- * after the Tx packet schedulers have been suspended to restart the Tx path.
- */
-static void restart_tx_queues(struct sge *sge)
-{
- struct adapter *adap = sge->adapter;
- int i;
-
- if (!enough_free_Tx_descs(&sge->cmdQ[0]))
- return;
-
- for_each_port(adap, i) {
- struct net_device *nd = adap->port[i].dev;
-
- if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
- netif_running(nd)) {
- sge->stats.cmdQ_restarted[2]++;
- netif_wake_queue(nd);
- }
- }
-}
-
-/*
- * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
- * information.
- */
-static unsigned int update_tx_info(struct adapter *adapter,
- unsigned int flags,
- unsigned int pr0)
-{
- struct sge *sge = adapter->sge;
- struct cmdQ *cmdq = &sge->cmdQ[0];
-
- cmdq->processed += pr0;
- if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
- freelQs_empty(sge);
- flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
- }
- if (flags & F_CMDQ0_ENABLE) {
- clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
-
- if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
- !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
- set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
- writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
- }
- if (sge->tx_sched)
- tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
-
- flags &= ~F_CMDQ0_ENABLE;
- }
-
- if (unlikely(sge->stopped_tx_queues != 0))
- restart_tx_queues(sge);
-
- return flags;
-}
-
-/*
- * Process SGE responses, up to the supplied budget. Returns the number of
- * responses processed. A negative budget is effectively unlimited.
- */
-static int process_responses(struct adapter *adapter, int budget)
-{
- struct sge *sge = adapter->sge;
- struct respQ *q = &sge->respQ;
- struct respQ_e *e = &q->entries[q->cidx];
- int done = 0;
- unsigned int flags = 0;
- unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
-
- while (done < budget && e->GenerationBit == q->genbit) {
- flags |= e->Qsleeping;
-
- cmdq_processed[0] += e->Cmdq0CreditReturn;
- cmdq_processed[1] += e->Cmdq1CreditReturn;
-
- /* We batch updates to the TX side to avoid cacheline
- * ping-pong of TX state information on MP where the sender
- * might run on a different CPU than this function...
- */
- if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
- flags = update_tx_info(adapter, flags, cmdq_processed[0]);
- cmdq_processed[0] = 0;
- }
-
- if (unlikely(cmdq_processed[1] > 16)) {
- sge->cmdQ[1].processed += cmdq_processed[1];
- cmdq_processed[1] = 0;
- }
-
- if (likely(e->DataValid)) {
- struct freelQ *fl = &sge->freelQ[e->FreelistQid];
-
- BUG_ON(!e->Sop || !e->Eop);
- if (unlikely(e->Offload))
- unexpected_offload(adapter, fl);
- else
- sge_rx(sge, fl, e->BufferLength);
-
- ++done;
-
- /*
- * Note: this depends on each packet consuming a
- * single free-list buffer; cf. the BUG above.
- */
- if (++fl->cidx == fl->size)
- fl->cidx = 0;
- prefetch(fl->centries[fl->cidx].skb);
-
- if (unlikely(--fl->credits <
- fl->size - SGE_FREEL_REFILL_THRESH))
- refill_free_list(sge, fl);
- } else
- sge->stats.pure_rsps++;
-
- e++;
- if (unlikely(++q->cidx == q->size)) {
- q->cidx = 0;
- q->genbit ^= 1;
- e = q->entries;
- }
- prefetch(e);
-
- if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
- writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
- q->credits = 0;
- }
- }
-
- flags = update_tx_info(adapter, flags, cmdq_processed[0]);
- sge->cmdQ[1].processed += cmdq_processed[1];
-
- return done;
-}
-
-static inline int responses_pending(const struct adapter *adapter)
-{
- const struct respQ *Q = &adapter->sge->respQ;
- const struct respQ_e *e = &Q->entries[Q->cidx];
-
- return e->GenerationBit == Q->genbit;
-}
-
-/*
- * A simpler version of process_responses() that handles only pure (i.e.,
- * non data-carrying) responses. Such respones are too light-weight to justify
- * calling a softirq when using NAPI, so we handle them specially in hard
- * interrupt context. The function is called with a pointer to a response,
- * which the caller must ensure is a valid pure response. Returns 1 if it
- * encounters a valid data-carrying response, 0 otherwise.
- */
-static int process_pure_responses(struct adapter *adapter)
-{
- struct sge *sge = adapter->sge;
- struct respQ *q = &sge->respQ;
- struct respQ_e *e = &q->entries[q->cidx];
- const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
- unsigned int flags = 0;
- unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
-
- prefetch(fl->centries[fl->cidx].skb);
- if (e->DataValid)
- return 1;
-
- do {
- flags |= e->Qsleeping;
-
- cmdq_processed[0] += e->Cmdq0CreditReturn;
- cmdq_processed[1] += e->Cmdq1CreditReturn;
-
- e++;
- if (unlikely(++q->cidx == q->size)) {
- q->cidx = 0;
- q->genbit ^= 1;
- e = q->entries;
- }
- prefetch(e);
-
- if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
- writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
- q->credits = 0;
- }
- sge->stats.pure_rsps++;
- } while (e->GenerationBit == q->genbit && !e->DataValid);
-
- flags = update_tx_info(adapter, flags, cmdq_processed[0]);
- sge->cmdQ[1].processed += cmdq_processed[1];
-
- return e->GenerationBit == q->genbit;
-}
-
-/*
- * Handler for new data events when using NAPI. This does not need any locking
- * or protection from interrupts as data interrupts are off at this point and
- * other adapter interrupts do not interfere.
- */
-int t1_poll(struct napi_struct *napi, int budget)
-{
- struct adapter *adapter = container_of(napi, struct adapter, napi);
- int work_done = process_responses(adapter, budget);
-
- if (likely(work_done < budget)) {
- napi_complete(napi);
- writel(adapter->sge->respQ.cidx,
- adapter->regs + A_SG_SLEEPING);
- }
- return work_done;
-}
-
-irqreturn_t t1_interrupt(int irq, void *data)
-{
- struct adapter *adapter = data;
- struct sge *sge = adapter->sge;
- int handled;
-
- if (likely(responses_pending(adapter))) {
- writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
-
- if (napi_schedule_prep(&adapter->napi)) {
- if (process_pure_responses(adapter))
- __napi_schedule(&adapter->napi);
- else {
- /* no data, no NAPI needed */
- writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
- /* undo schedule_prep */
- napi_enable(&adapter->napi);
- }
- }
- return IRQ_HANDLED;
- }
-
- spin_lock(&adapter->async_lock);
- handled = t1_slow_intr_handler(adapter);
- spin_unlock(&adapter->async_lock);
-
- if (!handled)
- sge->stats.unhandled_irqs++;
-
- return IRQ_RETVAL(handled != 0);
-}
-
-/*
- * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
- *
- * The code figures out how many entries the sk_buff will require in the
- * cmdQ and updates the cmdQ data structure with the state once the enqueue
- * has complete. Then, it doesn't access the global structure anymore, but
- * uses the corresponding fields on the stack. In conjunction with a spinlock
- * around that code, we can make the function reentrant without holding the
- * lock when we actually enqueue (which might be expensive, especially on
- * architectures with IO MMUs).
- *
- * This runs with softirqs disabled.
- */
-static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
- unsigned int qid, struct net_device *dev)
-{
- struct sge *sge = adapter->sge;
- struct cmdQ *q = &sge->cmdQ[qid];
- unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
-
- if (!spin_trylock(&q->lock))
- return NETDEV_TX_LOCKED;
-
- reclaim_completed_tx(sge, q);
-
- pidx = q->pidx;
- credits = q->size - q->in_use;
- count = 1 + skb_shinfo(skb)->nr_frags;
- count += compute_large_page_tx_descs(skb);
-
- /* Ethernet packet */
- if (unlikely(credits < count)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
- set_bit(dev->if_port, &sge->stopped_tx_queues);
- sge->stats.cmdQ_full[2]++;
- pr_err("%s: Tx ring full while queue awake!\n",
- adapter->name);
- }
- spin_unlock(&q->lock);
- return NETDEV_TX_BUSY;
- }
-
- if (unlikely(credits - count < q->stop_thres)) {
- netif_stop_queue(dev);
- set_bit(dev->if_port, &sge->stopped_tx_queues);
- sge->stats.cmdQ_full[2]++;
- }
-
- /* T204 cmdQ0 skbs that are destined for a certain port have to go
- * through the scheduler.
- */
- if (sge->tx_sched && !qid && skb->dev) {
-use_sched:
- use_sched_skb = 1;
- /* Note that the scheduler might return a different skb than
- * the one passed in.
- */
- skb = sched_skb(sge, skb, credits);
- if (!skb) {
- spin_unlock(&q->lock);
- return NETDEV_TX_OK;
- }
- pidx = q->pidx;
- count = 1 + skb_shinfo(skb)->nr_frags;
- count += compute_large_page_tx_descs(skb);
- }
-
- q->in_use += count;
- genbit = q->genbit;
- pidx = q->pidx;
- q->pidx += count;
- if (q->pidx >= q->size) {
- q->pidx -= q->size;
- q->genbit ^= 1;
- }
- spin_unlock(&q->lock);
-
- write_tx_descs(adapter, skb, pidx, genbit, q);
-
- /*
- * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
- * the doorbell if the Q is asleep. There is a natural race, where
- * the hardware is going to sleep just after we checked, however,
- * then the interrupt handler will detect the outstanding TX packet
- * and ring the doorbell for us.
- */
- if (qid)
- doorbell_pio(adapter, F_CMDQ1_ENABLE);
- else {
- clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
- if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
- set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
- writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
- }
- }
-
- if (use_sched_skb) {
- if (spin_trylock(&q->lock)) {
- credits = q->size - q->in_use;
- skb = NULL;
- goto use_sched;
- }
- }
- return NETDEV_TX_OK;
-}
-
-#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
-
-/*
- * eth_hdr_len - return the length of an Ethernet header
- * @data: pointer to the start of the Ethernet header
- *
- * Returns the length of an Ethernet header, including optional VLAN tag.
- */
-static inline int eth_hdr_len(const void *data)
-{
- const struct ethhdr *e = data;
-
- return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
-}
-
-/*
- * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
- */
-netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct adapter *adapter = dev->ml_priv;
- struct sge *sge = adapter->sge;
- struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
- struct cpl_tx_pkt *cpl;
- struct sk_buff *orig_skb = skb;
- int ret;
-
- if (skb->protocol == htons(ETH_P_CPL5))
- goto send;
-
- /*
- * We are using a non-standard hard_header_len.
- * Allocate more header room in the rare cases it is not big enough.
- */
- if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
- skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
- ++st->tx_need_hdrroom;
- dev_kfree_skb_any(orig_skb);
- if (!skb)
- return NETDEV_TX_OK;
- }
-
- if (skb_shinfo(skb)->gso_size) {
- int eth_type;
- struct cpl_tx_pkt_lso *hdr;
-
- ++st->tx_tso;
-
- eth_type = skb_network_offset(skb) == ETH_HLEN ?
- CPL_ETH_II : CPL_ETH_II_VLAN;
-
- hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
- hdr->opcode = CPL_TX_PKT_LSO;
- hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
- hdr->ip_hdr_words = ip_hdr(skb)->ihl;
- hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
- hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
- skb_shinfo(skb)->gso_size));
- hdr->len = htonl(skb->len - sizeof(*hdr));
- cpl = (struct cpl_tx_pkt *)hdr;
- } else {
- /*
- * Packets shorter than ETH_HLEN can break the MAC, drop them
- * early. Also, we may get oversized packets because some
- * parts of the kernel don't handle our unusual hard_header_len
- * right, drop those too.
- */
- if (unlikely(skb->len < ETH_HLEN ||
- skb->len > dev->mtu + eth_hdr_len(skb->data))) {
- pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
- skb->len, eth_hdr_len(skb->data), dev->mtu);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
- ip_hdr(skb)->protocol == IPPROTO_UDP) {
- if (unlikely(skb_checksum_help(skb))) {
- pr_debug("%s: unable to do udp checksum\n", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-
- /* Hmmm, assuming to catch the gratious arp... and we'll use
- * it to flush out stuck espi packets...
- */
- if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
- if (skb->protocol == htons(ETH_P_ARP) &&
- arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
- adapter->sge->espibug_skb[dev->if_port] = skb;
- /* We want to re-use this skb later. We
- * simply bump the reference count and it
- * will not be freed...
- */
- skb = skb_get(skb);
- }
- }
-
- cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
- cpl->opcode = CPL_TX_PKT;
- cpl->ip_csum_dis = 1; /* SW calculates IP csum */
- cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
- /* the length field isn't used so don't bother setting it */
-
- st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
- }
- cpl->iff = dev->if_port;
-
- if (vlan_tx_tag_present(skb)) {
- cpl->vlan_valid = 1;
- cpl->vlan = htons(vlan_tx_tag_get(skb));
- st->vlan_insert++;
- } else
- cpl->vlan_valid = 0;
-
-send:
- ret = t1_sge_tx(skb, adapter, 0, dev);
-
- /* If transmit busy, and we reallocated skb's due to headroom limit,
- * then silently discard to avoid leak.
- */
- if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
- dev_kfree_skb_any(skb);
- ret = NETDEV_TX_OK;
- }
- return ret;
-}
-
-/*
- * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
- */
-static void sge_tx_reclaim_cb(unsigned long data)
-{
- int i;
- struct sge *sge = (struct sge *)data;
-
- for (i = 0; i < SGE_CMDQ_N; ++i) {
- struct cmdQ *q = &sge->cmdQ[i];
-
- if (!spin_trylock(&q->lock))
- continue;
-
- reclaim_completed_tx(sge, q);
- if (i == 0 && q->in_use) { /* flush pending credits */
- writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
- }
- spin_unlock(&q->lock);
- }
- mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
-}
-
-/*
- * Propagate changes of the SGE coalescing parameters to the HW.
- */
-int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
-{
- sge->fixed_intrtimer = p->rx_coalesce_usecs *
- core_ticks_per_usec(sge->adapter);
- writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
- return 0;
-}
-
-/*
- * Allocates both RX and TX resources and configures the SGE. However,
- * the hardware is not enabled yet.
- */
-int t1_sge_configure(struct sge *sge, struct sge_params *p)
-{
- if (alloc_rx_resources(sge, p))
- return -ENOMEM;
- if (alloc_tx_resources(sge, p)) {
- free_rx_resources(sge);
- return -ENOMEM;
- }
- configure_sge(sge, p);
-
- /*
- * Now that we have sized the free lists calculate the payload
- * capacity of the large buffers. Other parts of the driver use
- * this to set the max offload coalescing size so that RX packets
- * do not overflow our large buffers.
- */
- p->large_buf_capacity = jumbo_payload_capacity(sge);
- return 0;
-}
-
-/*
- * Disables the DMA engine.
- */
-void t1_sge_stop(struct sge *sge)
-{
- int i;
- writel(0, sge->adapter->regs + A_SG_CONTROL);
- readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
-
- if (is_T2(sge->adapter))
- del_timer_sync(&sge->espibug_timer);
-
- del_timer_sync(&sge->tx_reclaim_timer);
- if (sge->tx_sched)
- tx_sched_stop(sge);
-
- for (i = 0; i < MAX_NPORTS; i++)
- kfree_skb(sge->espibug_skb[i]);
-}
-
-/*
- * Enables the DMA engine.
- */
-void t1_sge_start(struct sge *sge)
-{
- refill_free_list(sge, &sge->freelQ[0]);
- refill_free_list(sge, &sge->freelQ[1]);
-
- writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
- doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
- readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
-
- mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
-
- if (is_T2(sge->adapter))
- mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
-}
-
-/*
- * Callback for the T2 ESPI 'stuck packet feature' workaorund
- */
-static void espibug_workaround_t204(unsigned long data)
-{
- struct adapter *adapter = (struct adapter *)data;
- struct sge *sge = adapter->sge;
- unsigned int nports = adapter->params.nports;
- u32 seop[MAX_NPORTS];
-
- if (adapter->open_device_map & PORT_MASK) {
- int i;
-
- if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
- return;
-
- for (i = 0; i < nports; i++) {
- struct sk_buff *skb = sge->espibug_skb[i];
-
- if (!netif_running(adapter->port[i].dev) ||
- netif_queue_stopped(adapter->port[i].dev) ||
- !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
- continue;
-
- if (!skb->cb[0]) {
- skb_copy_to_linear_data_offset(skb,
- sizeof(struct cpl_tx_pkt),
- ch_mac_addr,
- ETH_ALEN);
- skb_copy_to_linear_data_offset(skb,
- skb->len - 10,
- ch_mac_addr,
- ETH_ALEN);
- skb->cb[0] = 0xff;
- }
-
- /* bump the reference count to avoid freeing of
- * the skb once the DMA has completed.
- */
- skb = skb_get(skb);
- t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
- }
- }
- mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
-}
-
-static void espibug_workaround(unsigned long data)
-{
- struct adapter *adapter = (struct adapter *)data;
- struct sge *sge = adapter->sge;
-
- if (netif_running(adapter->port[0].dev)) {
- struct sk_buff *skb = sge->espibug_skb[0];
- u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
-
- if ((seop & 0xfff0fff) == 0xfff && skb) {
- if (!skb->cb[0]) {
- skb_copy_to_linear_data_offset(skb,
- sizeof(struct cpl_tx_pkt),
- ch_mac_addr,
- ETH_ALEN);
- skb_copy_to_linear_data_offset(skb,
- skb->len - 10,
- ch_mac_addr,
- ETH_ALEN);
- skb->cb[0] = 0xff;
- }
-
- /* bump the reference count to avoid freeing of the
- * skb once the DMA has completed.
- */
- skb = skb_get(skb);
- t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
- }
- }
- mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
-}
-
-/*
- * Creates a t1_sge structure and returns suggested resource parameters.
- */
-struct sge * __devinit t1_sge_create(struct adapter *adapter,
- struct sge_params *p)
-{
- struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
- int i;
-
- if (!sge)
- return NULL;
-
- sge->adapter = adapter;
- sge->netdev = adapter->port[0].dev;
- sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
- sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
-
- for_each_port(adapter, i) {
- sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
- if (!sge->port_stats[i])
- goto nomem_port;
- }
-
- init_timer(&sge->tx_reclaim_timer);
- sge->tx_reclaim_timer.data = (unsigned long)sge;
- sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
-
- if (is_T2(sge->adapter)) {
- init_timer(&sge->espibug_timer);
-
- if (adapter->params.nports > 1) {
- tx_sched_init(sge);
- sge->espibug_timer.function = espibug_workaround_t204;
- } else
- sge->espibug_timer.function = espibug_workaround;
- sge->espibug_timer.data = (unsigned long)sge->adapter;
-
- sge->espibug_timeout = 1;
- /* for T204, every 10ms */
- if (adapter->params.nports > 1)
- sge->espibug_timeout = HZ/100;
- }
-
-
- p->cmdQ_size[0] = SGE_CMDQ0_E_N;
- p->cmdQ_size[1] = SGE_CMDQ1_E_N;
- p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
- p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
- if (sge->tx_sched) {
- if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
- p->rx_coalesce_usecs = 15;
- else
- p->rx_coalesce_usecs = 50;
- } else
- p->rx_coalesce_usecs = 50;
-
- p->coalesce_enable = 0;
- p->sample_interval_usecs = 0;
-
- return sge;
-nomem_port:
- while (i >= 0) {
- free_percpu(sge->port_stats[i]);
- --i;
- }
- kfree(sge);
- return NULL;
-
-}
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index e66aceb57ce..7cb2785e209 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -261,7 +261,7 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_start_xmit = e100_send_packet,
.ndo_tx_timeout = e100_tx_timeout,
.ndo_get_stats = e100_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = e100_ioctl,
.ndo_set_mac_address = e100_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
deleted file mode 100644
index d6fa1777a34..00000000000
--- a/drivers/net/cxgb3/sge.c
+++ /dev/null
@@ -1,3303 +0,0 @@
-/*
- * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-#include <net/arp.h>
-#include "common.h"
-#include "regs.h"
-#include "sge_defs.h"
-#include "t3_cpl.h"
-#include "firmware_exports.h"
-#include "cxgb3_offload.h"
-
-#define USE_GTS 0
-
-#define SGE_RX_SM_BUF_SIZE 1536
-
-#define SGE_RX_COPY_THRES 256
-#define SGE_RX_PULL_LEN 128
-
-#define SGE_PG_RSVD SMP_CACHE_BYTES
-/*
- * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
- * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
- * directly.
- */
-#define FL0_PG_CHUNK_SIZE 2048
-#define FL0_PG_ORDER 0
-#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
-#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
-#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
-#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
-
-#define SGE_RX_DROP_THRES 16
-#define RX_RECLAIM_PERIOD (HZ/4)
-
-/*
- * Max number of Rx buffers we replenish at a time.
- */
-#define MAX_RX_REFILL 16U
-/*
- * Period of the Tx buffer reclaim timer. This timer does not need to run
- * frequently as Tx buffers are usually reclaimed by new Tx packets.
- */
-#define TX_RECLAIM_PERIOD (HZ / 4)
-#define TX_RECLAIM_TIMER_CHUNK 64U
-#define TX_RECLAIM_CHUNK 16U
-
-/* WR size in bytes */
-#define WR_LEN (WR_FLITS * 8)
-
-/*
- * Types of Tx queues in each queue set. Order here matters, do not change.
- */
-enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
-
-/* Values for sge_txq.flags */
-enum {
- TXQ_RUNNING = 1 << 0, /* fetch engine is running */
- TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
-};
-
-struct tx_desc {
- __be64 flit[TX_DESC_FLITS];
-};
-
-struct rx_desc {
- __be32 addr_lo;
- __be32 len_gen;
- __be32 gen2;
- __be32 addr_hi;
-};
-
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- u8 eop; /* set if last descriptor for packet */
- u8 addr_idx; /* buffer index of first SGL entry in descriptor */
- u8 fragidx; /* first page fragment associated with descriptor */
- s8 sflit; /* start flit of first SGL entry in descriptor */
-};
-
-struct rx_sw_desc { /* SW state per Rx descriptor */
- union {
- struct sk_buff *skb;
- struct fl_pg_chunk pg_chunk;
- };
- DEFINE_DMA_UNMAP_ADDR(dma_addr);
-};
-
-struct rsp_desc { /* response queue descriptor */
- struct rss_header rss_hdr;
- __be32 flags;
- __be32 len_cq;
- u8 imm_data[47];
- u8 intr_gen;
-};
-
-/*
- * Holds unmapping information for Tx packets that need deferred unmapping.
- * This structure lives at skb->head and must be allocated by callers.
- */
-struct deferred_unmap_info {
- struct pci_dev *pdev;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
-/*
- * Maps a number of flits to the number of Tx descriptors that can hold them.
- * The formula is
- *
- * desc = 1 + (flits - 2) / (WR_FLITS - 1).
- *
- * HW allows up to 4 descriptors to be combined into a WR.
- */
-static u8 flit_desc_map[] = {
- 0,
-#if SGE_NUM_GENBITS == 1
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
-#elif SGE_NUM_GENBITS == 2
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-#else
-# error "SGE_NUM_GENBITS must be 1 or 2"
-#endif
-};
-
-static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
-{
- return container_of(q, struct sge_qset, fl[qidx]);
-}
-
-static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
-{
- return container_of(q, struct sge_qset, rspq);
-}
-
-static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
-{
- return container_of(q, struct sge_qset, txq[qidx]);
-}
-
-/**
- * refill_rspq - replenish an SGE response queue
- * @adapter: the adapter
- * @q: the response queue to replenish
- * @credits: how many new responses to make available
- *
- * Replenishes a response queue by making the supplied number of responses
- * available to HW.
- */
-static inline void refill_rspq(struct adapter *adapter,
- const struct sge_rspq *q, unsigned int credits)
-{
- rmb();
- t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
- V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
-}
-
-/**
- * need_skb_unmap - does the platform need unmapping of sk_buffs?
- *
- * Returns true if the platform needs sk_buff unmapping. The compiler
- * optimizes away unnecessary code if this returns true.
- */
-static inline int need_skb_unmap(void)
-{
-#ifdef CONFIG_NEED_DMA_MAP_STATE
- return 1;
-#else
- return 0;
-#endif
-}
-
-/**
- * unmap_skb - unmap a packet main body and its page fragments
- * @skb: the packet
- * @q: the Tx queue containing Tx descriptors for the packet
- * @cidx: index of Tx descriptor
- * @pdev: the PCI device
- *
- * Unmap the main body of an sk_buff and its page fragments, if any.
- * Because of the fairly complicated structure of our SGLs and the desire
- * to conserve space for metadata, the information necessary to unmap an
- * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
- * descriptors (the physical addresses of the various data buffers), and
- * the SW descriptor state (assorted indices). The send functions
- * initialize the indices for the first packet descriptor so we can unmap
- * the buffers held in the first Tx descriptor here, and we have enough
- * information at this point to set the state for the next Tx descriptor.
- *
- * Note that it is possible to clean up the first descriptor of a packet
- * before the send routines have written the next descriptors, but this
- * race does not cause any problem. We just end up writing the unmapping
- * info for the descriptor first.
- */
-static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
- unsigned int cidx, struct pci_dev *pdev)
-{
- const struct sg_ent *sgp;
- struct tx_sw_desc *d = &q->sdesc[cidx];
- int nfrags, frag_idx, curflit, j = d->addr_idx;
-
- sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
- frag_idx = d->fragidx;
-
- if (frag_idx == 0 && skb_headlen(skb)) {
- pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
- skb_headlen(skb), PCI_DMA_TODEVICE);
- j = 1;
- }
-
- curflit = d->sflit + 1 + j;
- nfrags = skb_shinfo(skb)->nr_frags;
-
- while (frag_idx < nfrags && curflit < WR_FLITS) {
- pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
- skb_shinfo(skb)->frags[frag_idx].size,
- PCI_DMA_TODEVICE);
- j ^= 1;
- if (j == 0) {
- sgp++;
- curflit++;
- }
- curflit++;
- frag_idx++;
- }
-
- if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
- d = cidx + 1 == q->size ? q->sdesc : d + 1;
- d->fragidx = frag_idx;
- d->addr_idx = j;
- d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
- }
-}
-
-/**
- * free_tx_desc - reclaims Tx descriptors and their buffers
- * @adapter: the adapter
- * @q: the Tx queue to reclaim descriptors from
- * @n: the number of descriptors to reclaim
- *
- * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
- * Tx buffers. Called with the Tx queue lock held.
- */
-static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
- unsigned int n)
-{
- struct tx_sw_desc *d;
- struct pci_dev *pdev = adapter->pdev;
- unsigned int cidx = q->cidx;
-
- const int need_unmap = need_skb_unmap() &&
- q->cntxt_id >= FW_TUNNEL_SGEEC_START;
-
- d = &q->sdesc[cidx];
- while (n--) {
- if (d->skb) { /* an SGL is present */
- if (need_unmap)
- unmap_skb(d->skb, q, cidx, pdev);
- if (d->eop) {
- kfree_skb(d->skb);
- d->skb = NULL;
- }
- }
- ++d;
- if (++cidx == q->size) {
- cidx = 0;
- d = q->sdesc;
- }
- }
- q->cidx = cidx;
-}
-
-/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
- * @adapter: the adapter
- * @q: the Tx queue to reclaim completed descriptors from
- * @chunk: maximum number of descriptors to reclaim
- *
- * Reclaims Tx descriptors that the SGE has indicated it has processed,
- * and frees the associated buffers if possible. Called with the Tx
- * queue's lock held.
- */
-static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
- struct sge_txq *q,
- unsigned int chunk)
-{
- unsigned int reclaim = q->processed - q->cleaned;
-
- reclaim = min(chunk, reclaim);
- if (reclaim) {
- free_tx_desc(adapter, q, reclaim);
- q->cleaned += reclaim;
- q->in_use -= reclaim;
- }
- return q->processed - q->cleaned;
-}
-
-/**
- * should_restart_tx - are there enough resources to restart a Tx queue?
- * @q: the Tx queue
- *
- * Checks if there are enough descriptors to restart a suspended Tx queue.
- */
-static inline int should_restart_tx(const struct sge_txq *q)
-{
- unsigned int r = q->processed - q->cleaned;
-
- return q->in_use - r < (q->size >> 1);
-}
-
-static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
- struct rx_sw_desc *d)
-{
- if (q->use_pages && d->pg_chunk.page) {
- (*d->pg_chunk.p_cnt)--;
- if (!*d->pg_chunk.p_cnt)
- pci_unmap_page(pdev,
- d->pg_chunk.mapping,
- q->alloc_size, PCI_DMA_FROMDEVICE);
-
- put_page(d->pg_chunk.page);
- d->pg_chunk.page = NULL;
- } else {
- pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
- q->buf_size, PCI_DMA_FROMDEVICE);
- kfree_skb(d->skb);
- d->skb = NULL;
- }
-}
-
-/**
- * free_rx_bufs - free the Rx buffers on an SGE free list
- * @pdev: the PCI device associated with the adapter
- * @rxq: the SGE free list to clean up
- *
- * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
- * this queue should be stopped before calling this function.
- */
-static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
-{
- unsigned int cidx = q->cidx;
-
- while (q->credits--) {
- struct rx_sw_desc *d = &q->sdesc[cidx];
-
-
- clear_rx_desc(pdev, q, d);
- if (++cidx == q->size)
- cidx = 0;
- }
-
- if (q->pg_chunk.page) {
- __free_pages(q->pg_chunk.page, q->order);
- q->pg_chunk.page = NULL;
- }
-}
-
-/**
- * add_one_rx_buf - add a packet buffer to a free-buffer list
- * @va: buffer start VA
- * @len: the buffer length
- * @d: the HW Rx descriptor to write
- * @sd: the SW Rx descriptor to write
- * @gen: the generation bit value
- * @pdev: the PCI device associated with the adapter
- *
- * Add a buffer of the given length to the supplied HW and SW Rx
- * descriptors.
- */
-static inline int add_one_rx_buf(void *va, unsigned int len,
- struct rx_desc *d, struct rx_sw_desc *sd,
- unsigned int gen, struct pci_dev *pdev)
-{
- dma_addr_t mapping;
-
- mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, mapping)))
- return -ENOMEM;
-
- dma_unmap_addr_set(sd, dma_addr, mapping);
-
- d->addr_lo = cpu_to_be32(mapping);
- d->addr_hi = cpu_to_be32((u64) mapping >> 32);
- wmb();
- d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
- d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
- return 0;
-}
-
-static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
- unsigned int gen)
-{
- d->addr_lo = cpu_to_be32(mapping);
- d->addr_hi = cpu_to_be32((u64) mapping >> 32);
- wmb();
- d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
- d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
- return 0;
-}
-
-static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
- struct rx_sw_desc *sd, gfp_t gfp,
- unsigned int order)
-{
- if (!q->pg_chunk.page) {
- dma_addr_t mapping;
-
- q->pg_chunk.page = alloc_pages(gfp, order);
- if (unlikely(!q->pg_chunk.page))
- return -ENOMEM;
- q->pg_chunk.va = page_address(q->pg_chunk.page);
- q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
- SGE_PG_RSVD;
- q->pg_chunk.offset = 0;
- mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
- 0, q->alloc_size, PCI_DMA_FROMDEVICE);
- q->pg_chunk.mapping = mapping;
- }
- sd->pg_chunk = q->pg_chunk;
-
- prefetch(sd->pg_chunk.p_cnt);
-
- q->pg_chunk.offset += q->buf_size;
- if (q->pg_chunk.offset == (PAGE_SIZE << order))
- q->pg_chunk.page = NULL;
- else {
- q->pg_chunk.va += q->buf_size;
- get_page(q->pg_chunk.page);
- }
-
- if (sd->pg_chunk.offset == 0)
- *sd->pg_chunk.p_cnt = 1;
- else
- *sd->pg_chunk.p_cnt += 1;
-
- return 0;
-}
-
-static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-{
- if (q->pend_cred >= q->credits / 4) {
- q->pend_cred = 0;
- wmb();
- t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
- }
-}
-
-/**
- * refill_fl - refill an SGE free-buffer list
- * @adapter: the adapter
- * @q: the free-list to refill
- * @n: the number of new buffers to allocate
- * @gfp: the gfp flags for allocating new buffers
- *
- * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
- * allocated with the supplied gfp flags. The caller must assure that
- * @n does not exceed the queue's capacity.
- */
-static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
-{
- struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- struct rx_desc *d = &q->desc[q->pidx];
- unsigned int count = 0;
-
- while (n--) {
- dma_addr_t mapping;
- int err;
-
- if (q->use_pages) {
- if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
- q->order))) {
-nomem: q->alloc_failed++;
- break;
- }
- mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
- dma_unmap_addr_set(sd, dma_addr, mapping);
-
- add_one_rx_chunk(mapping, d, q->gen);
- pci_dma_sync_single_for_device(adap->pdev, mapping,
- q->buf_size - SGE_PG_RSVD,
- PCI_DMA_FROMDEVICE);
- } else {
- void *buf_start;
-
- struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
- if (!skb)
- goto nomem;
-
- sd->skb = skb;
- buf_start = skb->data;
- err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
- q->gen, adap->pdev);
- if (unlikely(err)) {
- clear_rx_desc(adap->pdev, q, sd);
- break;
- }
- }
-
- d++;
- sd++;
- if (++q->pidx == q->size) {
- q->pidx = 0;
- q->gen ^= 1;
- sd = q->sdesc;
- d = q->desc;
- }
- count++;
- }
-
- q->credits += count;
- q->pend_cred += count;
- ring_fl_db(adap, q);
-
- return count;
-}
-
-static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
-{
- refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
- GFP_ATOMIC | __GFP_COMP);
-}
-
-/**
- * recycle_rx_buf - recycle a receive buffer
- * @adapter: the adapter
- * @q: the SGE free list
- * @idx: index of buffer to recycle
- *
- * Recycles the specified buffer on the given free list by adding it at
- * the next available slot on the list.
- */
-static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
- unsigned int idx)
-{
- struct rx_desc *from = &q->desc[idx];
- struct rx_desc *to = &q->desc[q->pidx];
-
- q->sdesc[q->pidx] = q->sdesc[idx];
- to->addr_lo = from->addr_lo; /* already big endian */
- to->addr_hi = from->addr_hi; /* likewise */
- wmb();
- to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
- to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
-
- if (++q->pidx == q->size) {
- q->pidx = 0;
- q->gen ^= 1;
- }
-
- q->credits++;
- q->pend_cred++;
- ring_fl_db(adap, q);
-}
-
-/**
- * alloc_ring - allocate resources for an SGE descriptor ring
- * @pdev: the PCI device
- * @nelem: the number of descriptors
- * @elem_size: the size of each descriptor
- * @sw_size: the size of the SW state associated with each ring element
- * @phys: the physical address of the allocated ring
- * @metadata: address of the array holding the SW state for the ring
- *
- * Allocates resources for an SGE descriptor ring, such as Tx queues,
- * free buffer lists, or response queues. Each SGE ring requires
- * space for its HW descriptors plus, optionally, space for the SW state
- * associated with each HW entry (the metadata). The function returns
- * three values: the virtual address for the HW ring (the return value
- * of the function), the physical address of the HW ring, and the address
- * of the SW ring.
- */
-static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
- size_t sw_size, dma_addr_t * phys, void *metadata)
-{
- size_t len = nelem * elem_size;
- void *s = NULL;
- void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
-
- if (!p)
- return NULL;
- if (sw_size && metadata) {
- s = kcalloc(nelem, sw_size, GFP_KERNEL);
-
- if (!s) {
- dma_free_coherent(&pdev->dev, len, p, *phys);
- return NULL;
- }
- *(void **)metadata = s;
- }
- memset(p, 0, len);
- return p;
-}
-
-/**
- * t3_reset_qset - reset a sge qset
- * @q: the queue set
- *
- * Reset the qset structure.
- * the NAPI structure is preserved in the event of
- * the qset's reincarnation, for example during EEH recovery.
- */
-static void t3_reset_qset(struct sge_qset *q)
-{
- if (q->adap &&
- !(q->adap->flags & NAPI_INIT)) {
- memset(q, 0, sizeof(*q));
- return;
- }
-
- q->adap = NULL;
- memset(&q->rspq, 0, sizeof(q->rspq));
- memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
- memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
- q->txq_stopped = 0;
- q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
- q->rx_reclaim_timer.function = NULL;
- q->nomem = 0;
- napi_free_frags(&q->napi);
-}
-
-
-/**
- * free_qset - free the resources of an SGE queue set
- * @adapter: the adapter owning the queue set
- * @q: the queue set
- *
- * Release the HW and SW resources associated with an SGE queue set, such
- * as HW contexts, packet buffers, and descriptor rings. Traffic to the
- * queue set must be quiesced prior to calling this.
- */
-static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
-{
- int i;
- struct pci_dev *pdev = adapter->pdev;
-
- for (i = 0; i < SGE_RXQ_PER_SET; ++i)
- if (q->fl[i].desc) {
- spin_lock_irq(&adapter->sge.reg_lock);
- t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
- spin_unlock_irq(&adapter->sge.reg_lock);
- free_rx_bufs(pdev, &q->fl[i]);
- kfree(q->fl[i].sdesc);
- dma_free_coherent(&pdev->dev,
- q->fl[i].size *
- sizeof(struct rx_desc), q->fl[i].desc,
- q->fl[i].phys_addr);
- }
-
- for (i = 0; i < SGE_TXQ_PER_SET; ++i)
- if (q->txq[i].desc) {
- spin_lock_irq(&adapter->sge.reg_lock);
- t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
- spin_unlock_irq(&adapter->sge.reg_lock);
- if (q->txq[i].sdesc) {
- free_tx_desc(adapter, &q->txq[i],
- q->txq[i].in_use);
- kfree(q->txq[i].sdesc);
- }
- dma_free_coherent(&pdev->dev,
- q->txq[i].size *
- sizeof(struct tx_desc),
- q->txq[i].desc, q->txq[i].phys_addr);
- __skb_queue_purge(&q->txq[i].sendq);
- }
-
- if (q->rspq.desc) {
- spin_lock_irq(&adapter->sge.reg_lock);
- t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
- spin_unlock_irq(&adapter->sge.reg_lock);
- dma_free_coherent(&pdev->dev,
- q->rspq.size * sizeof(struct rsp_desc),
- q->rspq.desc, q->rspq.phys_addr);
- }
-
- t3_reset_qset(q);
-}
-
-/**
- * init_qset_cntxt - initialize an SGE queue set context info
- * @qs: the queue set
- * @id: the queue set id
- *
- * Initializes the TIDs and context ids for the queues of a queue set.
- */
-static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
-{
- qs->rspq.cntxt_id = id;
- qs->fl[0].cntxt_id = 2 * id;
- qs->fl[1].cntxt_id = 2 * id + 1;
- qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
- qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
- qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
- qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
- qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
-}
-
-/**
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- *
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
- return (3 * n) / 2 + (n & 1);
-}
-
-/**
- * flits_to_desc - returns the num of Tx descriptors for the given flits
- * @n: the number of flits
- *
- * Calculates the number of Tx descriptors needed for the supplied number
- * of flits.
- */
-static inline unsigned int flits_to_desc(unsigned int n)
-{
- BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
- return flit_desc_map[n];
-}
-
-/**
- * get_packet - return the next ingress packet buffer from a free list
- * @adap: the adapter that received the packet
- * @fl: the SGE free list holding the packet
- * @len: the packet length including any SGE padding
- * @drop_thres: # of remaining buffers before we start dropping packets
- *
- * Get the next packet from a free list and complete setup of the
- * sk_buff. If the packet is small we make a copy and recycle the
- * original buffer, otherwise we use the original buffer itself. If a
- * positive drop threshold is supplied packets are dropped and their
- * buffers recycled if (a) the number of remaining buffers is under the
- * threshold and the packet is too big to copy, or (b) the packet should
- * be copied but there is no memory for the copy.
- */
-static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
- unsigned int len, unsigned int drop_thres)
-{
- struct sk_buff *skb = NULL;
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-
- prefetch(sd->skb->data);
- fl->credits--;
-
- if (len <= SGE_RX_COPY_THRES) {
- skb = alloc_skb(len, GFP_ATOMIC);
- if (likely(skb != NULL)) {
- __skb_put(skb, len);
- pci_dma_sync_single_for_cpu(adap->pdev,
- dma_unmap_addr(sd, dma_addr), len,
- PCI_DMA_FROMDEVICE);
- memcpy(skb->data, sd->skb->data, len);
- pci_dma_sync_single_for_device(adap->pdev,
- dma_unmap_addr(sd, dma_addr), len,
- PCI_DMA_FROMDEVICE);
- } else if (!drop_thres)
- goto use_orig_buf;
-recycle:
- recycle_rx_buf(adap, fl, fl->cidx);
- return skb;
- }
-
- if (unlikely(fl->credits < drop_thres) &&
- refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
- GFP_ATOMIC | __GFP_COMP) == 0)
- goto recycle;
-
-use_orig_buf:
- pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
- fl->buf_size, PCI_DMA_FROMDEVICE);
- skb = sd->skb;
- skb_put(skb, len);
- __refill_fl(adap, fl);
- return skb;
-}
-
-/**
- * get_packet_pg - return the next ingress packet buffer from a free list
- * @adap: the adapter that received the packet
- * @fl: the SGE free list holding the packet
- * @len: the packet length including any SGE padding
- * @drop_thres: # of remaining buffers before we start dropping packets
- *
- * Get the next packet from a free list populated with page chunks.
- * If the packet is small we make a copy and recycle the original buffer,
- * otherwise we attach the original buffer as a page fragment to a fresh
- * sk_buff. If a positive drop threshold is supplied packets are dropped
- * and their buffers recycled if (a) the number of remaining buffers is
- * under the threshold and the packet is too big to copy, or (b) there's
- * no system memory.
- *
- * Note: this function is similar to @get_packet but deals with Rx buffers
- * that are page chunks rather than sk_buffs.
- */
-static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
- struct sge_rspq *q, unsigned int len,
- unsigned int drop_thres)
-{
- struct sk_buff *newskb, *skb;
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-
- dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
-
- newskb = skb = q->pg_skb;
- if (!skb && (len <= SGE_RX_COPY_THRES)) {
- newskb = alloc_skb(len, GFP_ATOMIC);
- if (likely(newskb != NULL)) {
- __skb_put(newskb, len);
- pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
- PCI_DMA_FROMDEVICE);
- memcpy(newskb->data, sd->pg_chunk.va, len);
- pci_dma_sync_single_for_device(adap->pdev, dma_addr,
- len,
- PCI_DMA_FROMDEVICE);
- } else if (!drop_thres)
- return NULL;
-recycle:
- fl->credits--;
- recycle_rx_buf(adap, fl, fl->cidx);
- q->rx_recycle_buf++;
- return newskb;
- }
-
- if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
- goto recycle;
-
- prefetch(sd->pg_chunk.p_cnt);
-
- if (!skb)
- newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
-
- if (unlikely(!newskb)) {
- if (!drop_thres)
- return NULL;
- goto recycle;
- }
-
- pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
- PCI_DMA_FROMDEVICE);
- (*sd->pg_chunk.p_cnt)--;
- if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
- pci_unmap_page(adap->pdev,
- sd->pg_chunk.mapping,
- fl->alloc_size,
- PCI_DMA_FROMDEVICE);
- if (!skb) {
- __skb_put(newskb, SGE_RX_PULL_LEN);
- memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
- skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
- sd->pg_chunk.offset + SGE_RX_PULL_LEN,
- len - SGE_RX_PULL_LEN);
- newskb->len = len;
- newskb->data_len = len - SGE_RX_PULL_LEN;
- newskb->truesize += newskb->data_len;
- } else {
- skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
- sd->pg_chunk.page,
- sd->pg_chunk.offset, len);
- newskb->len += len;
- newskb->data_len += len;
- newskb->truesize += len;
- }
-
- fl->credits--;
- /*
- * We do not refill FLs here, we let the caller do it to overlap a
- * prefetch.
- */
- return newskb;
-}
-
-/**
- * get_imm_packet - return the next ingress packet buffer from a response
- * @resp: the response descriptor containing the packet data
- *
- * Return a packet containing the immediate data of the given response.
- */
-static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
-{
- struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
-
- if (skb) {
- __skb_put(skb, IMMED_PKT_SIZE);
- skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
- }
- return skb;
-}
-
-/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet. Ethernet packets require addition of WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
-{
- unsigned int flits;
-
- if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
- return 1;
-
- flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
- if (skb_shinfo(skb)->gso_size)
- flits++;
- return flits_to_desc(flits);
-}
-
-/**
- * make_sgl - populate a scatter/gather list for a packet
- * @skb: the packet
- * @sgp: the SGL to populate
- * @start: start address of skb main body data to include in the SGL
- * @len: length of skb main body data to include in the SGL
- * @pdev: the PCI device
- *
- * Generates a scatter/gather list for the buffers that make up a packet
- * and returns the SGL size in 8-byte words. The caller must size the SGL
- * appropriately.
- */
-static inline unsigned int make_sgl(const struct sk_buff *skb,
- struct sg_ent *sgp, unsigned char *start,
- unsigned int len, struct pci_dev *pdev)
-{
- dma_addr_t mapping;
- unsigned int i, j = 0, nfrags;
-
- if (len) {
- mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
- sgp->len[0] = cpu_to_be32(len);
- sgp->addr[0] = cpu_to_be64(mapping);
- j = 1;
- }
-
- nfrags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- mapping = pci_map_page(pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- sgp->len[j] = cpu_to_be32(frag->size);
- sgp->addr[j] = cpu_to_be64(mapping);
- j ^= 1;
- if (j == 0)
- ++sgp;
- }
- if (j)
- sgp->len[j] = 0;
- return ((nfrags + (len != 0)) * 3) / 2 + j;
-}
-
-/**
- * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
- * @adap: the adapter
- * @q: the Tx queue
- *
- * Ring the doorbel if a Tx queue is asleep. There is a natural race,
- * where the HW is going to sleep just after we checked, however,
- * then the interrupt handler will detect the outstanding TX packet
- * and ring the doorbell for us.
- *
- * When GTS is disabled we unconditionally ring the doorbell.
- */
-static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
-{
-#if USE_GTS
- clear_bit(TXQ_LAST_PKT_DB, &q->flags);
- if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
- set_bit(TXQ_LAST_PKT_DB, &q->flags);
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
- }
-#else
- wmb(); /* write descriptors before telling HW */
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
-#endif
-}
-
-static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
-{
-#if SGE_NUM_GENBITS == 2
- d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
-#endif
-}
-
-/**
- * write_wr_hdr_sgl - write a WR header and, optionally, SGL
- * @ndesc: number of Tx descriptors spanned by the SGL
- * @skb: the packet corresponding to the WR
- * @d: first Tx descriptor to be written
- * @pidx: index of above descriptors
- * @q: the SGE Tx queue
- * @sgl: the SGL
- * @flits: number of flits to the start of the SGL in the first descriptor
- * @sgl_flits: the SGL size in flits
- * @gen: the Tx descriptor generation
- * @wr_hi: top 32 bits of WR header based on WR type (big endian)
- * @wr_lo: low 32 bits of WR header based on WR type (big endian)
- *
- * Write a work request header and an associated SGL. If the SGL is
- * small enough to fit into one Tx descriptor it has already been written
- * and we just need to write the WR header. Otherwise we distribute the
- * SGL across the number of descriptors it spans.
- */
-static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
- struct tx_desc *d, unsigned int pidx,
- const struct sge_txq *q,
- const struct sg_ent *sgl,
- unsigned int flits, unsigned int sgl_flits,
- unsigned int gen, __be32 wr_hi,
- __be32 wr_lo)
-{
- struct work_request_hdr *wrp = (struct work_request_hdr *)d;
- struct tx_sw_desc *sd = &q->sdesc[pidx];
-
- sd->skb = skb;
- if (need_skb_unmap()) {
- sd->fragidx = 0;
- sd->addr_idx = 0;
- sd->sflit = flits;
- }
-
- if (likely(ndesc == 1)) {
- sd->eop = 1;
- wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
- V_WR_SGLSFLT(flits)) | wr_hi;
- wmb();
- wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
- V_WR_GEN(gen)) | wr_lo;
- wr_gen2(d, gen);
- } else {
- unsigned int ogen = gen;
- const u64 *fp = (const u64 *)sgl;
- struct work_request_hdr *wp = wrp;
-
- wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
- V_WR_SGLSFLT(flits)) | wr_hi;
-
- while (sgl_flits) {
- unsigned int avail = WR_FLITS - flits;
-
- if (avail > sgl_flits)
- avail = sgl_flits;
- memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
- sgl_flits -= avail;
- ndesc--;
- if (!sgl_flits)
- break;
-
- fp += avail;
- d++;
- sd->eop = 0;
- sd++;
- if (++pidx == q->size) {
- pidx = 0;
- gen ^= 1;
- d = q->desc;
- sd = q->sdesc;
- }
-
- sd->skb = skb;
- wrp = (struct work_request_hdr *)d;
- wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
- V_WR_SGLSFLT(1)) | wr_hi;
- wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
- sgl_flits + 1)) |
- V_WR_GEN(gen)) | wr_lo;
- wr_gen2(d, gen);
- flits = 1;
- }
- sd->eop = 1;
- wrp->wr_hi |= htonl(F_WR_EOP);
- wmb();
- wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
- wr_gen2((struct tx_desc *)wp, ogen);
- WARN_ON(ndesc != 0);
- }
-}
-
-/**
- * write_tx_pkt_wr - write a TX_PKT work request
- * @adap: the adapter
- * @skb: the packet to send
- * @pi: the egress interface
- * @pidx: index of the first Tx descriptor to write
- * @gen: the generation value to use
- * @q: the Tx queue
- * @ndesc: number of descriptors the packet will occupy
- * @compl: the value of the COMPL bit to use
- *
- * Generate a TX_PKT work request to send the supplied packet.
- */
-static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
- const struct port_info *pi,
- unsigned int pidx, unsigned int gen,
- struct sge_txq *q, unsigned int ndesc,
- unsigned int compl)
-{
- unsigned int flits, sgl_flits, cntrl, tso_info;
- struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
- struct tx_desc *d = &q->desc[pidx];
- struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
-
- cpl->len = htonl(skb->len);
- cntrl = V_TXPKT_INTF(pi->port_id);
-
- if (vlan_tx_tag_present(skb))
- cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
-
- tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
- if (tso_info) {
- int eth_type;
- struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
-
- d->flit[2] = 0;
- cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
- hdr->cntrl = htonl(cntrl);
- eth_type = skb_network_offset(skb) == ETH_HLEN ?
- CPL_ETH_II : CPL_ETH_II_VLAN;
- tso_info |= V_LSO_ETH_TYPE(eth_type) |
- V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
- V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
- hdr->lso_info = htonl(tso_info);
- flits = 3;
- } else {
- cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
- cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
- cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
- cpl->cntrl = htonl(cntrl);
-
- if (skb->len <= WR_LEN - sizeof(*cpl)) {
- q->sdesc[pidx].skb = NULL;
- if (!skb->data_len)
- skb_copy_from_linear_data(skb, &d->flit[2],
- skb->len);
- else
- skb_copy_bits(skb, 0, &d->flit[2], skb->len);
-
- flits = (skb->len + 7) / 8 + 2;
- cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
- V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
- | F_WR_SOP | F_WR_EOP | compl);
- wmb();
- cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
- V_WR_TID(q->token));
- wr_gen2(d, gen);
- kfree_skb(skb);
- return;
- }
-
- flits = 2;
- }
-
- sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
-
- write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
- htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
- htonl(V_WR_TID(q->token)));
-}
-
-static inline void t3_stop_tx_queue(struct netdev_queue *txq,
- struct sge_qset *qs, struct sge_txq *q)
-{
- netif_tx_stop_queue(txq);
- set_bit(TXQ_ETH, &qs->txq_stopped);
- q->stops++;
-}
-
-/**
- * eth_xmit - add a packet to the Ethernet Tx queue
- * @skb: the packet
- * @dev: the egress net device
- *
- * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
- */
-netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- int qidx;
- unsigned int ndesc, pidx, credits, gen, compl;
- const struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- struct netdev_queue *txq;
- struct sge_qset *qs;
- struct sge_txq *q;
-
- /*
- * The chip min packet length is 9 octets but play safe and reject
- * anything shorter than an Ethernet header.
- */
- if (unlikely(skb->len < ETH_HLEN)) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- qidx = skb_get_queue_mapping(skb);
- qs = &pi->qs[qidx];
- q = &qs->txq[TXQ_ETH];
- txq = netdev_get_tx_queue(dev, qidx);
-
- reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
-
- credits = q->size - q->in_use;
- ndesc = calc_tx_descs(skb);
-
- if (unlikely(credits < ndesc)) {
- t3_stop_tx_queue(txq, qs, q);
- dev_err(&adap->pdev->dev,
- "%s: Tx ring %u full while queue awake!\n",
- dev->name, q->cntxt_id & 7);
- return NETDEV_TX_BUSY;
- }
-
- q->in_use += ndesc;
- if (unlikely(credits - ndesc < q->stop_thres)) {
- t3_stop_tx_queue(txq, qs, q);
-
- if (should_restart_tx(q) &&
- test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
- q->restarts++;
- netif_tx_start_queue(txq);
- }
- }
-
- gen = q->gen;
- q->unacked += ndesc;
- compl = (q->unacked & 8) << (S_WR_COMPL - 3);
- q->unacked &= 7;
- pidx = q->pidx;
- q->pidx += ndesc;
- if (q->pidx >= q->size) {
- q->pidx -= q->size;
- q->gen ^= 1;
- }
-
- /* update port statistics */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- qs->port_stats[SGE_PSTAT_TX_CSUM]++;
- if (skb_shinfo(skb)->gso_size)
- qs->port_stats[SGE_PSTAT_TSO]++;
- if (vlan_tx_tag_present(skb))
- qs->port_stats[SGE_PSTAT_VLANINS]++;
-
- /*
- * We do not use Tx completion interrupts to free DMAd Tx packets.
- * This is good for performance but means that we rely on new Tx
- * packets arriving to run the destructors of completed packets,
- * which open up space in their sockets' send queues. Sometimes
- * we do not get such new packets causing Tx to stall. A single
- * UDP transmitter is a good example of this situation. We have
- * a clean up timer that periodically reclaims completed packets
- * but it doesn't run often enough (nor do we want it to) to prevent
- * lengthy stalls. A solution to this problem is to run the
- * destructor early, after the packet is queued but before it's DMAd.
- * A cons is that we lie to socket memory accounting, but the amount
- * of extra memory is reasonable (limited by the number of Tx
- * descriptors), the packets do actually get freed quickly by new
- * packets almost always, and for protocols like TCP that wait for
- * acks to really free up the data the extra memory is even less.
- * On the positive side we run the destructors on the sending CPU
- * rather than on a potentially different completing CPU, usually a
- * good thing. We also run them without holding our Tx queue lock,
- * unlike what reclaim_completed_tx() would otherwise do.
- *
- * Run the destructor before telling the DMA engine about the packet
- * to make sure it doesn't complete and get freed prematurely.
- */
- if (likely(!skb_shared(skb)))
- skb_orphan(skb);
-
- write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
- check_ring_tx_db(adap, q);
- return NETDEV_TX_OK;
-}
-
-/**
- * write_imm - write a packet into a Tx descriptor as immediate data
- * @d: the Tx descriptor to write
- * @skb: the packet
- * @len: the length of packet data to write as immediate data
- * @gen: the generation bit value to write
- *
- * Writes a packet as immediate data into a Tx descriptor. The packet
- * contains a work request at its beginning. We must write the packet
- * carefully so the SGE doesn't read it accidentally before it's written
- * in its entirety.
- */
-static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
- unsigned int len, unsigned int gen)
-{
- struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
- struct work_request_hdr *to = (struct work_request_hdr *)d;
-
- if (likely(!skb->data_len))
- memcpy(&to[1], &from[1], len - sizeof(*from));
- else
- skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
-
- to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
- V_WR_BCNTLFLT(len & 7));
- wmb();
- to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
- V_WR_LEN((len + 7) / 8));
- wr_gen2(d, gen);
- kfree_skb(skb);
-}
-
-/**
- * check_desc_avail - check descriptor availability on a send queue
- * @adap: the adapter
- * @q: the send queue
- * @skb: the packet needing the descriptors
- * @ndesc: the number of Tx descriptors needed
- * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
- *
- * Checks if the requested number of Tx descriptors is available on an
- * SGE send queue. If the queue is already suspended or not enough
- * descriptors are available the packet is queued for later transmission.
- * Must be called with the Tx queue locked.
- *
- * Returns 0 if enough descriptors are available, 1 if there aren't
- * enough descriptors and the packet has been queued, and 2 if the caller
- * needs to retry because there weren't enough descriptors at the
- * beginning of the call but some freed up in the mean time.
- */
-static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
- struct sk_buff *skb, unsigned int ndesc,
- unsigned int qid)
-{
- if (unlikely(!skb_queue_empty(&q->sendq))) {
- addq_exit:__skb_queue_tail(&q->sendq, skb);
- return 1;
- }
- if (unlikely(q->size - q->in_use < ndesc)) {
- struct sge_qset *qs = txq_to_qset(q, qid);
-
- set_bit(qid, &qs->txq_stopped);
- smp_mb__after_clear_bit();
-
- if (should_restart_tx(q) &&
- test_and_clear_bit(qid, &qs->txq_stopped))
- return 2;
-
- q->stops++;
- goto addq_exit;
- }
- return 0;
-}
-
-/**
- * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
- * @q: the SGE control Tx queue
- *
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
- */
-static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-{
- unsigned int reclaim = q->processed - q->cleaned;
-
- q->in_use -= reclaim;
- q->cleaned += reclaim;
-}
-
-static inline int immediate(const struct sk_buff *skb)
-{
- return skb->len <= WR_LEN;
-}
-
-/**
- * ctrl_xmit - send a packet through an SGE control Tx queue
- * @adap: the adapter
- * @q: the control queue
- * @skb: the packet
- *
- * Send a packet through an SGE control Tx queue. Packets sent through
- * a control queue must fit entirely as immediate data in a single Tx
- * descriptor and have no page fragments.
- */
-static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
- struct sk_buff *skb)
-{
- int ret;
- struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
-
- if (unlikely(!immediate(skb))) {
- WARN_ON(1);
- dev_kfree_skb(skb);
- return NET_XMIT_SUCCESS;
- }
-
- wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
- wrp->wr_lo = htonl(V_WR_TID(q->token));
-
- spin_lock(&q->lock);
- again:reclaim_completed_tx_imm(q);
-
- ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
- if (unlikely(ret)) {
- if (ret == 1) {
- spin_unlock(&q->lock);
- return NET_XMIT_CN;
- }
- goto again;
- }
-
- write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
-
- q->in_use++;
- if (++q->pidx >= q->size) {
- q->pidx = 0;
- q->gen ^= 1;
- }
- spin_unlock(&q->lock);
- wmb();
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
- return NET_XMIT_SUCCESS;
-}
-
-/**
- * restart_ctrlq - restart a suspended control queue
- * @qs: the queue set cotaining the control queue
- *
- * Resumes transmission on a suspended Tx control queue.
- */
-static void restart_ctrlq(unsigned long data)
-{
- struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
- struct sge_txq *q = &qs->txq[TXQ_CTRL];
-
- spin_lock(&q->lock);
- again:reclaim_completed_tx_imm(q);
-
- while (q->in_use < q->size &&
- (skb = __skb_dequeue(&q->sendq)) != NULL) {
-
- write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
-
- if (++q->pidx >= q->size) {
- q->pidx = 0;
- q->gen ^= 1;
- }
- q->in_use++;
- }
-
- if (!skb_queue_empty(&q->sendq)) {
- set_bit(TXQ_CTRL, &qs->txq_stopped);
- smp_mb__after_clear_bit();
-
- if (should_restart_tx(q) &&
- test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
- goto again;
- q->stops++;
- }
-
- spin_unlock(&q->lock);
- wmb();
- t3_write_reg(qs->adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
-}
-
-/*
- * Send a management message through control queue 0
- */
-int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
-{
- int ret;
- local_bh_disable();
- ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- local_bh_enable();
-
- return ret;
-}
-
-/**
- * deferred_unmap_destructor - unmap a packet when it is freed
- * @skb: the packet
- *
- * This is the packet destructor used for Tx packets that need to remain
- * mapped until they are freed rather than until their Tx descriptors are
- * freed.
- */
-static void deferred_unmap_destructor(struct sk_buff *skb)
-{
- int i;
- const dma_addr_t *p;
- const struct skb_shared_info *si;
- const struct deferred_unmap_info *dui;
-
- dui = (struct deferred_unmap_info *)skb->head;
- p = dui->addr;
-
- if (skb->tail - skb->transport_header)
- pci_unmap_single(dui->pdev, *p++,
- skb->tail - skb->transport_header,
- PCI_DMA_TODEVICE);
-
- si = skb_shinfo(skb);
- for (i = 0; i < si->nr_frags; i++)
- pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
- PCI_DMA_TODEVICE);
-}
-
-static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
- const struct sg_ent *sgl, int sgl_flits)
-{
- dma_addr_t *p;
- struct deferred_unmap_info *dui;
-
- dui = (struct deferred_unmap_info *)skb->head;
- dui->pdev = pdev;
- for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
- *p++ = be64_to_cpu(sgl->addr[0]);
- *p++ = be64_to_cpu(sgl->addr[1]);
- }
- if (sgl_flits)
- *p = be64_to_cpu(sgl->addr[0]);
-}
-
-/**
- * write_ofld_wr - write an offload work request
- * @adap: the adapter
- * @skb: the packet to send
- * @q: the Tx queue
- * @pidx: index of the first Tx descriptor to write
- * @gen: the generation value to use
- * @ndesc: number of descriptors the packet will occupy
- *
- * Write an offload work request to send the supplied packet. The packet
- * data already carry the work request with most fields populated.
- */
-static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
- struct sge_txq *q, unsigned int pidx,
- unsigned int gen, unsigned int ndesc)
-{
- unsigned int sgl_flits, flits;
- struct work_request_hdr *from;
- struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
- struct tx_desc *d = &q->desc[pidx];
-
- if (immediate(skb)) {
- q->sdesc[pidx].skb = NULL;
- write_imm(d, skb, skb->len, gen);
- return;
- }
-
- /* Only TX_DATA builds SGLs */
-
- from = (struct work_request_hdr *)skb->data;
- memcpy(&d->flit[1], &from[1],
- skb_transport_offset(skb) - sizeof(*from));
-
- flits = skb_transport_offset(skb) / 8;
- sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
- skb->tail - skb->transport_header,
- adap->pdev);
- if (need_skb_unmap()) {
- setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
- skb->destructor = deferred_unmap_destructor;
- }
-
- write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
- gen, from->wr_hi, from->wr_lo);
-}
-
-/**
- * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
- * @skb: the packet
- *
- * Returns the number of Tx descriptors needed for the given offload
- * packet. These packets are already fully constructed.
- */
-static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
-{
- unsigned int flits, cnt;
-
- if (skb->len <= WR_LEN)
- return 1; /* packet fits as immediate data */
-
- flits = skb_transport_offset(skb) / 8; /* headers */
- cnt = skb_shinfo(skb)->nr_frags;
- if (skb->tail != skb->transport_header)
- cnt++;
- return flits_to_desc(flits + sgl_len(cnt));
-}
-
-/**
- * ofld_xmit - send a packet through an offload queue
- * @adap: the adapter
- * @q: the Tx offload queue
- * @skb: the packet
- *
- * Send an offload packet through an SGE offload queue.
- */
-static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
- struct sk_buff *skb)
-{
- int ret;
- unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
-
- spin_lock(&q->lock);
-again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
-
- ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
- if (unlikely(ret)) {
- if (ret == 1) {
- skb->priority = ndesc; /* save for restart */
- spin_unlock(&q->lock);
- return NET_XMIT_CN;
- }
- goto again;
- }
-
- gen = q->gen;
- q->in_use += ndesc;
- pidx = q->pidx;
- q->pidx += ndesc;
- if (q->pidx >= q->size) {
- q->pidx -= q->size;
- q->gen ^= 1;
- }
- spin_unlock(&q->lock);
-
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
- check_ring_tx_db(adap, q);
- return NET_XMIT_SUCCESS;
-}
-
-/**
- * restart_offloadq - restart a suspended offload queue
- * @qs: the queue set cotaining the offload queue
- *
- * Resumes transmission on a suspended Tx offload queue.
- */
-static void restart_offloadq(unsigned long data)
-{
- struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
- struct sge_txq *q = &qs->txq[TXQ_OFLD];
- const struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
-
- spin_lock(&q->lock);
-again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
-
- while ((skb = skb_peek(&q->sendq)) != NULL) {
- unsigned int gen, pidx;
- unsigned int ndesc = skb->priority;
-
- if (unlikely(q->size - q->in_use < ndesc)) {
- set_bit(TXQ_OFLD, &qs->txq_stopped);
- smp_mb__after_clear_bit();
-
- if (should_restart_tx(q) &&
- test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
- goto again;
- q->stops++;
- break;
- }
-
- gen = q->gen;
- q->in_use += ndesc;
- pidx = q->pidx;
- q->pidx += ndesc;
- if (q->pidx >= q->size) {
- q->pidx -= q->size;
- q->gen ^= 1;
- }
- __skb_unlink(skb, &q->sendq);
- spin_unlock(&q->lock);
-
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
- spin_lock(&q->lock);
- }
- spin_unlock(&q->lock);
-
-#if USE_GTS
- set_bit(TXQ_RUNNING, &q->flags);
- set_bit(TXQ_LAST_PKT_DB, &q->flags);
-#endif
- wmb();
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
-}
-
-/**
- * queue_set - return the queue set a packet should use
- * @skb: the packet
- *
- * Maps a packet to the SGE queue set it should use. The desired queue
- * set is carried in bits 1-3 in the packet's priority.
- */
-static inline int queue_set(const struct sk_buff *skb)
-{
- return skb->priority >> 1;
-}
-
-/**
- * is_ctrl_pkt - return whether an offload packet is a control packet
- * @skb: the packet
- *
- * Determines whether an offload packet should use an OFLD or a CTRL
- * Tx queue. This is indicated by bit 0 in the packet's priority.
- */
-static inline int is_ctrl_pkt(const struct sk_buff *skb)
-{
- return skb->priority & 1;
-}
-
-/**
- * t3_offload_tx - send an offload packet
- * @tdev: the offload device to send to
- * @skb: the packet
- *
- * Sends an offload packet. We use the packet priority to select the
- * appropriate Tx queue as follows: bit 0 indicates whether the packet
- * should be sent as regular or control, bits 1-3 select the queue set.
- */
-int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
-{
- struct adapter *adap = tdev2adap(tdev);
- struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
-
- if (unlikely(is_ctrl_pkt(skb)))
- return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
-
- return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
-}
-
-/**
- * offload_enqueue - add an offload packet to an SGE offload receive queue
- * @q: the SGE response queue
- * @skb: the packet
- *
- * Add a new offload packet to an SGE response queue's offload packet
- * queue. If the packet is the first on the queue it schedules the RX
- * softirq to process the queue.
- */
-static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
-{
- int was_empty = skb_queue_empty(&q->rx_queue);
-
- __skb_queue_tail(&q->rx_queue, skb);
-
- if (was_empty) {
- struct sge_qset *qs = rspq_to_qset(q);
-
- napi_schedule(&qs->napi);
- }
-}
-
-/**
- * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
- * @tdev: the offload device that will be receiving the packets
- * @q: the SGE response queue that assembled the bundle
- * @skbs: the partial bundle
- * @n: the number of packets in the bundle
- *
- * Delivers a (partial) bundle of Rx offload packets to an offload device.
- */
-static inline void deliver_partial_bundle(struct t3cdev *tdev,
- struct sge_rspq *q,
- struct sk_buff *skbs[], int n)
-{
- if (n) {
- q->offload_bundles++;
- tdev->recv(tdev, skbs, n);
- }
-}
-
-/**
- * ofld_poll - NAPI handler for offload packets in interrupt mode
- * @dev: the network device doing the polling
- * @budget: polling budget
- *
- * The NAPI handler for offload packets when a response queue is serviced
- * by the hard interrupt handler, i.e., when it's operating in non-polling
- * mode. Creates small packet batches and sends them through the offload
- * receive handler. Batches need to be of modest size as we do prefetches
- * on the packets in each.
- */
-static int ofld_poll(struct napi_struct *napi, int budget)
-{
- struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
- struct sge_rspq *q = &qs->rspq;
- struct adapter *adapter = qs->adap;
- int work_done = 0;
-
- while (work_done < budget) {
- struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
- struct sk_buff_head queue;
- int ngathered;
-
- spin_lock_irq(&q->lock);
- __skb_queue_head_init(&queue);
- skb_queue_splice_init(&q->rx_queue, &queue);
- if (skb_queue_empty(&queue)) {
- napi_complete(napi);
- spin_unlock_irq(&q->lock);
- return work_done;
- }
- spin_unlock_irq(&q->lock);
-
- ngathered = 0;
- skb_queue_walk_safe(&queue, skb, tmp) {
- if (work_done >= budget)
- break;
- work_done++;
-
- __skb_unlink(skb, &queue);
- prefetch(skb->data);
- skbs[ngathered] = skb;
- if (++ngathered == RX_BUNDLE_SIZE) {
- q->offload_bundles++;
- adapter->tdev.recv(&adapter->tdev, skbs,
- ngathered);
- ngathered = 0;
- }
- }
- if (!skb_queue_empty(&queue)) {
- /* splice remaining packets back onto Rx queue */
- spin_lock_irq(&q->lock);
- skb_queue_splice(&queue, &q->rx_queue);
- spin_unlock_irq(&q->lock);
- }
- deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
- }
-
- return work_done;
-}
-
-/**
- * rx_offload - process a received offload packet
- * @tdev: the offload device receiving the packet
- * @rq: the response queue that received the packet
- * @skb: the packet
- * @rx_gather: a gather list of packets if we are building a bundle
- * @gather_idx: index of the next available slot in the bundle
- *
- * Process an ingress offload pakcet and add it to the offload ingress
- * queue. Returns the index of the next available slot in the bundle.
- */
-static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
- struct sk_buff *skb, struct sk_buff *rx_gather[],
- unsigned int gather_idx)
-{
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- if (rq->polling) {
- rx_gather[gather_idx++] = skb;
- if (gather_idx == RX_BUNDLE_SIZE) {
- tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
- gather_idx = 0;
- rq->offload_bundles++;
- }
- } else
- offload_enqueue(rq, skb);
-
- return gather_idx;
-}
-
-/**
- * restart_tx - check whether to restart suspended Tx queues
- * @qs: the queue set to resume
- *
- * Restarts suspended Tx queues of an SGE queue set if they have enough
- * free resources to resume operation.
- */
-static void restart_tx(struct sge_qset *qs)
-{
- if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
- should_restart_tx(&qs->txq[TXQ_ETH]) &&
- test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
- qs->txq[TXQ_ETH].restarts++;
- if (netif_running(qs->netdev))
- netif_tx_wake_queue(qs->tx_q);
- }
-
- if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
- should_restart_tx(&qs->txq[TXQ_OFLD]) &&
- test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
- qs->txq[TXQ_OFLD].restarts++;
- tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
- }
- if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
- should_restart_tx(&qs->txq[TXQ_CTRL]) &&
- test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
- qs->txq[TXQ_CTRL].restarts++;
- tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
- }
-}
-
-/**
- * cxgb3_arp_process - process an ARP request probing a private IP address
- * @adapter: the adapter
- * @skb: the skbuff containing the ARP request
- *
- * Check if the ARP request is probing the private IP address
- * dedicated to iSCSI, generate an ARP reply if so.
- */
-static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- struct arphdr *arp;
- unsigned char *arp_ptr;
- unsigned char *sha;
- __be32 sip, tip;
-
- if (!dev)
- return;
-
- skb_reset_network_header(skb);
- arp = arp_hdr(skb);
-
- if (arp->ar_op != htons(ARPOP_REQUEST))
- return;
-
- arp_ptr = (unsigned char *)(arp + 1);
- sha = arp_ptr;
- arp_ptr += dev->addr_len;
- memcpy(&sip, arp_ptr, sizeof(sip));
- arp_ptr += sizeof(sip);
- arp_ptr += dev->addr_len;
- memcpy(&tip, arp_ptr, sizeof(tip));
-
- if (tip != pi->iscsi_ipv4addr)
- return;
-
- arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
- pi->iscsic.mac_addr, sha);
-
-}
-
-static inline int is_arp(struct sk_buff *skb)
-{
- return skb->protocol == htons(ETH_P_ARP);
-}
-
-static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
- struct sk_buff *skb)
-{
- if (is_arp(skb)) {
- cxgb3_arp_process(pi, skb);
- return;
- }
-
- if (pi->iscsic.recv)
- pi->iscsic.recv(pi, skb);
-
-}
-
-/**
- * rx_eth - process an ingress ethernet packet
- * @adap: the adapter
- * @rq: the response queue that received the packet
- * @skb: the packet
- * @pad: amount of padding at the start of the buffer
- *
- * Process an ingress ethernet pakcet and deliver it to the stack.
- * The padding is 2 if the packet was delivered in an Rx buffer and 0
- * if it was immediate data in a response.
- */
-static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
- struct sk_buff *skb, int pad, int lro)
-{
- struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
- struct sge_qset *qs = rspq_to_qset(rq);
- struct port_info *pi;
-
- skb_pull(skb, sizeof(*p) + pad);
- skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
- pi = netdev_priv(skb->dev);
- if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
- p->csum == htons(0xffff) && !p->fragment) {
- qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else
- skb_checksum_none_assert(skb);
- skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
-
- if (p->vlan_valid) {
- qs->port_stats[SGE_PSTAT_VLANEX]++;
- __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
- }
- if (rq->polling) {
- if (lro)
- napi_gro_receive(&qs->napi, skb);
- else {
- if (unlikely(pi->iscsic.flags))
- cxgb3_process_iscsi_prov_pack(pi, skb);
- netif_receive_skb(skb);
- }
- } else
- netif_rx(skb);
-}
-
-static inline int is_eth_tcp(u32 rss)
-{
- return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
-}
-
-/**
- * lro_add_page - add a page chunk to an LRO session
- * @adap: the adapter
- * @qs: the associated queue set
- * @fl: the free list containing the page chunk to add
- * @len: packet length
- * @complete: Indicates the last fragment of a frame
- *
- * Add a received packet contained in a page chunk to an existing LRO
- * session.
- */
-static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
- struct sge_fl *fl, int len, int complete)
-{
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- struct port_info *pi = netdev_priv(qs->netdev);
- struct sk_buff *skb = NULL;
- struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag;
- int nr_frags;
- int offset = 0;
-
- if (!qs->nomem) {
- skb = napi_get_frags(&qs->napi);
- qs->nomem = !skb;
- }
-
- fl->credits--;
-
- pci_dma_sync_single_for_cpu(adap->pdev,
- dma_unmap_addr(sd, dma_addr),
- fl->buf_size - SGE_PG_RSVD,
- PCI_DMA_FROMDEVICE);
-
- (*sd->pg_chunk.p_cnt)--;
- if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
- pci_unmap_page(adap->pdev,
- sd->pg_chunk.mapping,
- fl->alloc_size,
- PCI_DMA_FROMDEVICE);
-
- if (!skb) {
- put_page(sd->pg_chunk.page);
- if (complete)
- qs->nomem = 0;
- return;
- }
-
- rx_frag = skb_shinfo(skb)->frags;
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (!nr_frags) {
- offset = 2 + sizeof(struct cpl_rx_pkt);
- cpl = qs->lro_va = sd->pg_chunk.va + 2;
-
- if ((qs->netdev->features & NETIF_F_RXCSUM) &&
- cpl->csum_valid && cpl->csum == htons(0xffff)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
- } else
- skb->ip_summed = CHECKSUM_NONE;
- } else
- cpl = qs->lro_va;
-
- len -= offset;
-
- rx_frag += nr_frags;
- rx_frag->page = sd->pg_chunk.page;
- rx_frag->page_offset = sd->pg_chunk.offset + offset;
- rx_frag->size = len;
-
- skb->len += len;
- skb->data_len += len;
- skb->truesize += len;
- skb_shinfo(skb)->nr_frags++;
-
- if (!complete)
- return;
-
- skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
-
- if (cpl->vlan_valid)
- __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
- napi_gro_frags(&qs->napi);
-}
-
-/**
- * handle_rsp_cntrl_info - handles control information in a response
- * @qs: the queue set corresponding to the response
- * @flags: the response control flags
- *
- * Handles the control information of an SGE response, such as GTS
- * indications and completion credits for the queue set's Tx queues.
- * HW coalesces credits, we don't do any extra SW coalescing.
- */
-static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
-{
- unsigned int credits;
-
-#if USE_GTS
- if (flags & F_RSPD_TXQ0_GTS)
- clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
-#endif
-
- credits = G_RSPD_TXQ0_CR(flags);
- if (credits)
- qs->txq[TXQ_ETH].processed += credits;
-
- credits = G_RSPD_TXQ2_CR(flags);
- if (credits)
- qs->txq[TXQ_CTRL].processed += credits;
-
-# if USE_GTS
- if (flags & F_RSPD_TXQ1_GTS)
- clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
-# endif
- credits = G_RSPD_TXQ1_CR(flags);
- if (credits)
- qs->txq[TXQ_OFLD].processed += credits;
-}
-
-/**
- * check_ring_db - check if we need to ring any doorbells
- * @adapter: the adapter
- * @qs: the queue set whose Tx queues are to be examined
- * @sleeping: indicates which Tx queue sent GTS
- *
- * Checks if some of a queue set's Tx queues need to ring their doorbells
- * to resume transmission after idling while they still have unprocessed
- * descriptors.
- */
-static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
- unsigned int sleeping)
-{
- if (sleeping & F_RSPD_TXQ0_GTS) {
- struct sge_txq *txq = &qs->txq[TXQ_ETH];
-
- if (txq->cleaned + txq->in_use != txq->processed &&
- !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
- set_bit(TXQ_RUNNING, &txq->flags);
- t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
- V_EGRCNTX(txq->cntxt_id));
- }
- }
-
- if (sleeping & F_RSPD_TXQ1_GTS) {
- struct sge_txq *txq = &qs->txq[TXQ_OFLD];
-
- if (txq->cleaned + txq->in_use != txq->processed &&
- !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
- set_bit(TXQ_RUNNING, &txq->flags);
- t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
- V_EGRCNTX(txq->cntxt_id));
- }
- }
-}
-
-/**
- * is_new_response - check if a response is newly written
- * @r: the response descriptor
- * @q: the response queue
- *
- * Returns true if a response descriptor contains a yet unprocessed
- * response.
- */
-static inline int is_new_response(const struct rsp_desc *r,
- const struct sge_rspq *q)
-{
- return (r->intr_gen & F_RSPD_GEN2) == q->gen;
-}
-
-static inline void clear_rspq_bufstate(struct sge_rspq * const q)
-{
- q->pg_skb = NULL;
- q->rx_recycle_buf = 0;
-}
-
-#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
-#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
- V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
- V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
- V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
-
-/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
-#define NOMEM_INTR_DELAY 2500
-
-/**
- * process_responses - process responses from an SGE response queue
- * @adap: the adapter
- * @qs: the queue set to which the response queue belongs
- * @budget: how many responses can be processed in this round
- *
- * Process responses from an SGE response queue up to the supplied budget.
- * Responses include received packets as well as credits and other events
- * for the queues that belong to the response queue's queue set.
- * A negative budget is effectively unlimited.
- *
- * Additionally choose the interrupt holdoff time for the next interrupt
- * on this queue. If the system is under memory shortage use a fairly
- * long delay to help recovery.
- */
-static int process_responses(struct adapter *adap, struct sge_qset *qs,
- int budget)
-{
- struct sge_rspq *q = &qs->rspq;
- struct rsp_desc *r = &q->desc[q->cidx];
- int budget_left = budget;
- unsigned int sleeping = 0;
- struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
- int ngathered = 0;
-
- q->next_holdoff = q->holdoff_tmr;
-
- while (likely(budget_left && is_new_response(r, q))) {
- int packet_complete, eth, ethpad = 2;
- int lro = !!(qs->netdev->features & NETIF_F_GRO);
- struct sk_buff *skb = NULL;
- u32 len, flags;
- __be32 rss_hi, rss_lo;
-
- rmb();
- eth = r->rss_hdr.opcode == CPL_RX_PKT;
- rss_hi = *(const __be32 *)r;
- rss_lo = r->rss_hdr.rss_hash_val;
- flags = ntohl(r->flags);
-
- if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
- skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
- if (!skb)
- goto no_mem;
-
- memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
- skb->data[0] = CPL_ASYNC_NOTIF;
- rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
- q->async_notif++;
- } else if (flags & F_RSPD_IMM_DATA_VALID) {
- skb = get_imm_packet(r);
- if (unlikely(!skb)) {
-no_mem:
- q->next_holdoff = NOMEM_INTR_DELAY;
- q->nomem++;
- /* consume one credit since we tried */
- budget_left--;
- break;
- }
- q->imm_data++;
- ethpad = 0;
- } else if ((len = ntohl(r->len_cq)) != 0) {
- struct sge_fl *fl;
-
- lro &= eth && is_eth_tcp(rss_hi);
-
- fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
- if (fl->use_pages) {
- void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
-
- prefetch(addr);
-#if L1_CACHE_BYTES < 128
- prefetch(addr + L1_CACHE_BYTES);
-#endif
- __refill_fl(adap, fl);
- if (lro > 0) {
- lro_add_page(adap, qs, fl,
- G_RSPD_LEN(len),
- flags & F_RSPD_EOP);
- goto next_fl;
- }
-
- skb = get_packet_pg(adap, fl, q,
- G_RSPD_LEN(len),
- eth ?
- SGE_RX_DROP_THRES : 0);
- q->pg_skb = skb;
- } else
- skb = get_packet(adap, fl, G_RSPD_LEN(len),
- eth ? SGE_RX_DROP_THRES : 0);
- if (unlikely(!skb)) {
- if (!eth)
- goto no_mem;
- q->rx_drops++;
- } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
- __skb_pull(skb, 2);
-next_fl:
- if (++fl->cidx == fl->size)
- fl->cidx = 0;
- } else
- q->pure_rsps++;
-
- if (flags & RSPD_CTRL_MASK) {
- sleeping |= flags & RSPD_GTS_MASK;
- handle_rsp_cntrl_info(qs, flags);
- }
-
- r++;
- if (unlikely(++q->cidx == q->size)) {
- q->cidx = 0;
- q->gen ^= 1;
- r = q->desc;
- }
- prefetch(r);
-
- if (++q->credits >= (q->size / 4)) {
- refill_rspq(adap, q, q->credits);
- q->credits = 0;
- }
-
- packet_complete = flags &
- (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
- F_RSPD_ASYNC_NOTIF);
-
- if (skb != NULL && packet_complete) {
- if (eth)
- rx_eth(adap, q, skb, ethpad, lro);
- else {
- q->offload_pkts++;
- /* Preserve the RSS info in csum & priority */
- skb->csum = rss_hi;
- skb->priority = rss_lo;
- ngathered = rx_offload(&adap->tdev, q, skb,
- offload_skbs,
- ngathered);
- }
-
- if (flags & F_RSPD_EOP)
- clear_rspq_bufstate(q);
- }
- --budget_left;
- }
-
- deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
-
- if (sleeping)
- check_ring_db(adap, qs, sleeping);
-
- smp_mb(); /* commit Tx queue .processed updates */
- if (unlikely(qs->txq_stopped != 0))
- restart_tx(qs);
-
- budget -= budget_left;
- return budget;
-}
-
-static inline int is_pure_response(const struct rsp_desc *r)
-{
- __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
-
- return (n | r->len_cq) == 0;
-}
-
-/**
- * napi_rx_handler - the NAPI handler for Rx processing
- * @napi: the napi instance
- * @budget: how many packets we can process in this round
- *
- * Handler for new data events when using NAPI.
- */
-static int napi_rx_handler(struct napi_struct *napi, int budget)
-{
- struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
- struct adapter *adap = qs->adap;
- int work_done = process_responses(adap, qs, budget);
-
- if (likely(work_done < budget)) {
- napi_complete(napi);
-
- /*
- * Because we don't atomically flush the following
- * write it is possible that in very rare cases it can
- * reach the device in a way that races with a new
- * response being written plus an error interrupt
- * causing the NAPI interrupt handler below to return
- * unhandled status to the OS. To protect against
- * this would require flushing the write and doing
- * both the write and the flush with interrupts off.
- * Way too expensive and unjustifiable given the
- * rarity of the race.
- *
- * The race cannot happen at all with MSI-X.
- */
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
- V_NEWTIMER(qs->rspq.next_holdoff) |
- V_NEWINDEX(qs->rspq.cidx));
- }
- return work_done;
-}
-
-/*
- * Returns true if the device is already scheduled for polling.
- */
-static inline int napi_is_scheduled(struct napi_struct *napi)
-{
- return test_bit(NAPI_STATE_SCHED, &napi->state);
-}
-
-/**
- * process_pure_responses - process pure responses from a response queue
- * @adap: the adapter
- * @qs: the queue set owning the response queue
- * @r: the first pure response to process
- *
- * A simpler version of process_responses() that handles only pure (i.e.,
- * non data-carrying) responses. Such respones are too light-weight to
- * justify calling a softirq under NAPI, so we handle them specially in
- * the interrupt handler. The function is called with a pointer to a
- * response, which the caller must ensure is a valid pure response.
- *
- * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
- */
-static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
- struct rsp_desc *r)
-{
- struct sge_rspq *q = &qs->rspq;
- unsigned int sleeping = 0;
-
- do {
- u32 flags = ntohl(r->flags);
-
- r++;
- if (unlikely(++q->cidx == q->size)) {
- q->cidx = 0;
- q->gen ^= 1;
- r = q->desc;
- }
- prefetch(r);
-
- if (flags & RSPD_CTRL_MASK) {
- sleeping |= flags & RSPD_GTS_MASK;
- handle_rsp_cntrl_info(qs, flags);
- }
-
- q->pure_rsps++;
- if (++q->credits >= (q->size / 4)) {
- refill_rspq(adap, q, q->credits);
- q->credits = 0;
- }
- if (!is_new_response(r, q))
- break;
- rmb();
- } while (is_pure_response(r));
-
- if (sleeping)
- check_ring_db(adap, qs, sleeping);
-
- smp_mb(); /* commit Tx queue .processed updates */
- if (unlikely(qs->txq_stopped != 0))
- restart_tx(qs);
-
- return is_new_response(r, q);
-}
-
-/**
- * handle_responses - decide what to do with new responses in NAPI mode
- * @adap: the adapter
- * @q: the response queue
- *
- * This is used by the NAPI interrupt handlers to decide what to do with
- * new SGE responses. If there are no new responses it returns -1. If
- * there are new responses and they are pure (i.e., non-data carrying)
- * it handles them straight in hard interrupt context as they are very
- * cheap and don't deliver any packets. Finally, if there are any data
- * signaling responses it schedules the NAPI handler. Returns 1 if it
- * schedules NAPI, 0 if all new responses were pure.
- *
- * The caller must ascertain NAPI is not already running.
- */
-static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
-{
- struct sge_qset *qs = rspq_to_qset(q);
- struct rsp_desc *r = &q->desc[q->cidx];
-
- if (!is_new_response(r, q))
- return -1;
- rmb();
- if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
- V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
- return 0;
- }
- napi_schedule(&qs->napi);
- return 1;
-}
-
-/*
- * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
- * (i.e., response queue serviced in hard interrupt).
- */
-static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
-{
- struct sge_qset *qs = cookie;
- struct adapter *adap = qs->adap;
- struct sge_rspq *q = &qs->rspq;
-
- spin_lock(&q->lock);
- if (process_responses(adap, qs, -1) == 0)
- q->unhandled_irqs++;
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
- V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
- spin_unlock(&q->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * The MSI-X interrupt handler for an SGE response queue for the NAPI case
- * (i.e., response queue serviced by NAPI polling).
- */
-static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
-{
- struct sge_qset *qs = cookie;
- struct sge_rspq *q = &qs->rspq;
-
- spin_lock(&q->lock);
-
- if (handle_responses(qs->adap, q) < 0)
- q->unhandled_irqs++;
- spin_unlock(&q->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * The non-NAPI MSI interrupt handler. This needs to handle data events from
- * SGE response queues as well as error and other async events as they all use
- * the same MSI vector. We use one SGE response queue per port in this mode
- * and protect all response queues with queue 0's lock.
- */
-static irqreturn_t t3_intr_msi(int irq, void *cookie)
-{
- int new_packets = 0;
- struct adapter *adap = cookie;
- struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
- spin_lock(&q->lock);
-
- if (process_responses(adap, &adap->sge.qs[0], -1)) {
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
- V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
- new_packets = 1;
- }
-
- if (adap->params.nports == 2 &&
- process_responses(adap, &adap->sge.qs[1], -1)) {
- struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
-
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
- V_NEWTIMER(q1->next_holdoff) |
- V_NEWINDEX(q1->cidx));
- new_packets = 1;
- }
-
- if (!new_packets && t3_slow_intr_handler(adap) == 0)
- q->unhandled_irqs++;
-
- spin_unlock(&q->lock);
- return IRQ_HANDLED;
-}
-
-static int rspq_check_napi(struct sge_qset *qs)
-{
- struct sge_rspq *q = &qs->rspq;
-
- if (!napi_is_scheduled(&qs->napi) &&
- is_new_response(&q->desc[q->cidx], q)) {
- napi_schedule(&qs->napi);
- return 1;
- }
- return 0;
-}
-
-/*
- * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
- * by NAPI polling). Handles data events from SGE response queues as well as
- * error and other async events as they all use the same MSI vector. We use
- * one SGE response queue per port in this mode and protect all response
- * queues with queue 0's lock.
- */
-static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
-{
- int new_packets;
- struct adapter *adap = cookie;
- struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
- spin_lock(&q->lock);
-
- new_packets = rspq_check_napi(&adap->sge.qs[0]);
- if (adap->params.nports == 2)
- new_packets += rspq_check_napi(&adap->sge.qs[1]);
- if (!new_packets && t3_slow_intr_handler(adap) == 0)
- q->unhandled_irqs++;
-
- spin_unlock(&q->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * A helper function that processes responses and issues GTS.
- */
-static inline int process_responses_gts(struct adapter *adap,
- struct sge_rspq *rq)
-{
- int work;
-
- work = process_responses(adap, rspq_to_qset(rq), -1);
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
- V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
- return work;
-}
-
-/*
- * The legacy INTx interrupt handler. This needs to handle data events from
- * SGE response queues as well as error and other async events as they all use
- * the same interrupt pin. We use one SGE response queue per port in this mode
- * and protect all response queues with queue 0's lock.
- */
-static irqreturn_t t3_intr(int irq, void *cookie)
-{
- int work_done, w0, w1;
- struct adapter *adap = cookie;
- struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
- struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
-
- spin_lock(&q0->lock);
-
- w0 = is_new_response(&q0->desc[q0->cidx], q0);
- w1 = adap->params.nports == 2 &&
- is_new_response(&q1->desc[q1->cidx], q1);
-
- if (likely(w0 | w1)) {
- t3_write_reg(adap, A_PL_CLI, 0);
- t3_read_reg(adap, A_PL_CLI); /* flush */
-
- if (likely(w0))
- process_responses_gts(adap, q0);
-
- if (w1)
- process_responses_gts(adap, q1);
-
- work_done = w0 | w1;
- } else
- work_done = t3_slow_intr_handler(adap);
-
- spin_unlock(&q0->lock);
- return IRQ_RETVAL(work_done != 0);
-}
-
-/*
- * Interrupt handler for legacy INTx interrupts for T3B-based cards.
- * Handles data events from SGE response queues as well as error and other
- * async events as they all use the same interrupt pin. We use one SGE
- * response queue per port in this mode and protect all response queues with
- * queue 0's lock.
- */
-static irqreturn_t t3b_intr(int irq, void *cookie)
-{
- u32 map;
- struct adapter *adap = cookie;
- struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
-
- t3_write_reg(adap, A_PL_CLI, 0);
- map = t3_read_reg(adap, A_SG_DATA_INTR);
-
- if (unlikely(!map)) /* shared interrupt, most likely */
- return IRQ_NONE;
-
- spin_lock(&q0->lock);
-
- if (unlikely(map & F_ERRINTR))
- t3_slow_intr_handler(adap);
-
- if (likely(map & 1))
- process_responses_gts(adap, q0);
-
- if (map & 2)
- process_responses_gts(adap, &adap->sge.qs[1].rspq);
-
- spin_unlock(&q0->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
- * Handles data events from SGE response queues as well as error and other
- * async events as they all use the same interrupt pin. We use one SGE
- * response queue per port in this mode and protect all response queues with
- * queue 0's lock.
- */
-static irqreturn_t t3b_intr_napi(int irq, void *cookie)
-{
- u32 map;
- struct adapter *adap = cookie;
- struct sge_qset *qs0 = &adap->sge.qs[0];
- struct sge_rspq *q0 = &qs0->rspq;
-
- t3_write_reg(adap, A_PL_CLI, 0);
- map = t3_read_reg(adap, A_SG_DATA_INTR);
-
- if (unlikely(!map)) /* shared interrupt, most likely */
- return IRQ_NONE;
-
- spin_lock(&q0->lock);
-
- if (unlikely(map & F_ERRINTR))
- t3_slow_intr_handler(adap);
-
- if (likely(map & 1))
- napi_schedule(&qs0->napi);
-
- if (map & 2)
- napi_schedule(&adap->sge.qs[1].napi);
-
- spin_unlock(&q0->lock);
- return IRQ_HANDLED;
-}
-
-/**
- * t3_intr_handler - select the top-level interrupt handler
- * @adap: the adapter
- * @polling: whether using NAPI to service response queues
- *
- * Selects the top-level interrupt handler based on the type of interrupts
- * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
- * response queues.
- */
-irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
-{
- if (adap->flags & USING_MSIX)
- return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
- if (adap->flags & USING_MSI)
- return polling ? t3_intr_msi_napi : t3_intr_msi;
- if (adap->params.rev > 0)
- return polling ? t3b_intr_napi : t3b_intr;
- return t3_intr;
-}
-
-#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
- F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
- V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
- F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
- F_HIRCQPARITYERROR)
-#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
-#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
- F_RSPQDISABLED)
-
-/**
- * t3_sge_err_intr_handler - SGE async event interrupt handler
- * @adapter: the adapter
- *
- * Interrupt handler for SGE asynchronous (non-data) events.
- */
-void t3_sge_err_intr_handler(struct adapter *adapter)
-{
- unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
- ~F_FLEMPTY;
-
- if (status & SGE_PARERR)
- CH_ALERT(adapter, "SGE parity error (0x%x)\n",
- status & SGE_PARERR);
- if (status & SGE_FRAMINGERR)
- CH_ALERT(adapter, "SGE framing error (0x%x)\n",
- status & SGE_FRAMINGERR);
-
- if (status & F_RSPQCREDITOVERFOW)
- CH_ALERT(adapter, "SGE response queue credit overflow\n");
-
- if (status & F_RSPQDISABLED) {
- v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
-
- CH_ALERT(adapter,
- "packet delivered to disabled response queue "
- "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
- }
-
- if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
- queue_work(cxgb3_wq, &adapter->db_drop_task);
-
- if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
- queue_work(cxgb3_wq, &adapter->db_full_task);
-
- if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
- queue_work(cxgb3_wq, &adapter->db_empty_task);
-
- t3_write_reg(adapter, A_SG_INT_CAUSE, status);
- if (status & SGE_FATALERR)
- t3_fatal_err(adapter);
-}
-
-/**
- * sge_timer_tx - perform periodic maintenance of an SGE qset
- * @data: the SGE queue set to maintain
- *
- * Runs periodically from a timer to perform maintenance of an SGE queue
- * set. It performs two tasks:
- *
- * Cleans up any completed Tx descriptors that may still be pending.
- * Normal descriptor cleanup happens when new packets are added to a Tx
- * queue so this timer is relatively infrequent and does any cleanup only
- * if the Tx queue has not seen any new packets in a while. We make a
- * best effort attempt to reclaim descriptors, in that we don't wait
- * around if we cannot get a queue's lock (which most likely is because
- * someone else is queueing new packets and so will also handle the clean
- * up). Since control queues use immediate data exclusively we don't
- * bother cleaning them up here.
- *
- */
-static void sge_timer_tx(unsigned long data)
-{
- struct sge_qset *qs = (struct sge_qset *)data;
- struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
- unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
- unsigned long next_period;
-
- if (__netif_tx_trylock(qs->tx_q)) {
- tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
- TX_RECLAIM_TIMER_CHUNK);
- __netif_tx_unlock(qs->tx_q);
- }
-
- if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
- tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
- TX_RECLAIM_TIMER_CHUNK);
- spin_unlock(&qs->txq[TXQ_OFLD].lock);
- }
-
- next_period = TX_RECLAIM_PERIOD >>
- (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
- TX_RECLAIM_TIMER_CHUNK);
- mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
-}
-
-/*
- * sge_timer_rx - perform periodic maintenance of an SGE qset
- * @data: the SGE queue set to maintain
- *
- * a) Replenishes Rx queues that have run out due to memory shortage.
- * Normally new Rx buffers are added when existing ones are consumed but
- * when out of memory a queue can become empty. We try to add only a few
- * buffers here, the queue will be replenished fully as these new buffers
- * are used up if memory shortage has subsided.
- *
- * b) Return coalesced response queue credits in case a response queue is
- * starved.
- *
- */
-static void sge_timer_rx(unsigned long data)
-{
- spinlock_t *lock;
- struct sge_qset *qs = (struct sge_qset *)data;
- struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
- u32 status;
-
- lock = adap->params.rev > 0 ?
- &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
-
- if (!spin_trylock_irq(lock))
- goto out;
-
- if (napi_is_scheduled(&qs->napi))
- goto unlock;
-
- if (adap->params.rev < 4) {
- status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
-
- if (status & (1 << qs->rspq.cntxt_id)) {
- qs->rspq.starved++;
- if (qs->rspq.credits) {
- qs->rspq.credits--;
- refill_rspq(adap, &qs->rspq, 1);
- qs->rspq.restarted++;
- t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
- 1 << qs->rspq.cntxt_id);
- }
- }
- }
-
- if (qs->fl[0].credits < qs->fl[0].size)
- __refill_fl(adap, &qs->fl[0]);
- if (qs->fl[1].credits < qs->fl[1].size)
- __refill_fl(adap, &qs->fl[1]);
-
-unlock:
- spin_unlock_irq(lock);
-out:
- mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
-}
-
-/**
- * t3_update_qset_coalesce - update coalescing settings for a queue set
- * @qs: the SGE queue set
- * @p: new queue set parameters
- *
- * Update the coalescing settings for an SGE queue set. Nothing is done
- * if the queue set is not initialized yet.
- */
-void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
-{
- qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
- qs->rspq.polling = p->polling;
- qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
-}
-
-/**
- * t3_sge_alloc_qset - initialize an SGE queue set
- * @adapter: the adapter
- * @id: the queue set id
- * @nports: how many Ethernet ports will be using this queue set
- * @irq_vec_idx: the IRQ vector index for response queue interrupts
- * @p: configuration parameters for this queue set
- * @ntxq: number of Tx queues for the queue set
- * @netdev: net device associated with this queue set
- * @netdevq: net device TX queue associated with this queue set
- *
- * Allocate resources and initialize an SGE queue set. A queue set
- * comprises a response queue, two Rx free-buffer queues, and up to 3
- * Tx queues. The Tx queues are assigned roles in the order Ethernet
- * queue, offload queue, and control queue.
- */
-int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
- int irq_vec_idx, const struct qset_params *p,
- int ntxq, struct net_device *dev,
- struct netdev_queue *netdevq)
-{
- int i, avail, ret = -ENOMEM;
- struct sge_qset *q = &adapter->sge.qs[id];
-
- init_qset_cntxt(q, id);
- setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
- setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
-
- q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
- sizeof(struct rx_desc),
- sizeof(struct rx_sw_desc),
- &q->fl[0].phys_addr, &q->fl[0].sdesc);
- if (!q->fl[0].desc)
- goto err;
-
- q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
- sizeof(struct rx_desc),
- sizeof(struct rx_sw_desc),
- &q->fl[1].phys_addr, &q->fl[1].sdesc);
- if (!q->fl[1].desc)
- goto err;
-
- q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
- sizeof(struct rsp_desc), 0,
- &q->rspq.phys_addr, NULL);
- if (!q->rspq.desc)
- goto err;
-
- for (i = 0; i < ntxq; ++i) {
- /*
- * The control queue always uses immediate data so does not
- * need to keep track of any sk_buffs.
- */
- size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
-
- q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
- sizeof(struct tx_desc), sz,
- &q->txq[i].phys_addr,
- &q->txq[i].sdesc);
- if (!q->txq[i].desc)
- goto err;
-
- q->txq[i].gen = 1;
- q->txq[i].size = p->txq_size[i];
- spin_lock_init(&q->txq[i].lock);
- skb_queue_head_init(&q->txq[i].sendq);
- }
-
- tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
- (unsigned long)q);
- tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
- (unsigned long)q);
-
- q->fl[0].gen = q->fl[1].gen = 1;
- q->fl[0].size = p->fl_size;
- q->fl[1].size = p->jumbo_size;
-
- q->rspq.gen = 1;
- q->rspq.size = p->rspq_size;
- spin_lock_init(&q->rspq.lock);
- skb_queue_head_init(&q->rspq.rx_queue);
-
- q->txq[TXQ_ETH].stop_thres = nports *
- flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
-
-#if FL0_PG_CHUNK_SIZE > 0
- q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
-#else
- q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
-#endif
-#if FL1_PG_CHUNK_SIZE > 0
- q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
-#else
- q->fl[1].buf_size = is_offload(adapter) ?
- (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
- MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
-#endif
-
- q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
- q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
- q->fl[0].order = FL0_PG_ORDER;
- q->fl[1].order = FL1_PG_ORDER;
- q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
- q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
-
- spin_lock_irq(&adapter->sge.reg_lock);
-
- /* FL threshold comparison uses < */
- ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
- q->rspq.phys_addr, q->rspq.size,
- q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
- if (ret)
- goto err_unlock;
-
- for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
- ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
- q->fl[i].phys_addr, q->fl[i].size,
- q->fl[i].buf_size - SGE_PG_RSVD,
- p->cong_thres, 1, 0);
- if (ret)
- goto err_unlock;
- }
-
- ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
- SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
- q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
- 1, 0);
- if (ret)
- goto err_unlock;
-
- if (ntxq > 1) {
- ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
- USE_GTS, SGE_CNTXT_OFLD, id,
- q->txq[TXQ_OFLD].phys_addr,
- q->txq[TXQ_OFLD].size, 0, 1, 0);
- if (ret)
- goto err_unlock;
- }
-
- if (ntxq > 2) {
- ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
- SGE_CNTXT_CTRL, id,
- q->txq[TXQ_CTRL].phys_addr,
- q->txq[TXQ_CTRL].size,
- q->txq[TXQ_CTRL].token, 1, 0);
- if (ret)
- goto err_unlock;
- }
-
- spin_unlock_irq(&adapter->sge.reg_lock);
-
- q->adap = adapter;
- q->netdev = dev;
- q->tx_q = netdevq;
- t3_update_qset_coalesce(q, p);
-
- avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
- GFP_KERNEL | __GFP_COMP);
- if (!avail) {
- CH_ALERT(adapter, "free list queue 0 initialization failed\n");
- goto err;
- }
- if (avail < q->fl[0].size)
- CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
- avail);
-
- avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
- GFP_KERNEL | __GFP_COMP);
- if (avail < q->fl[1].size)
- CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
- avail);
- refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
-
- t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
- V_NEWTIMER(q->rspq.holdoff_tmr));
-
- return 0;
-
-err_unlock:
- spin_unlock_irq(&adapter->sge.reg_lock);
-err:
- t3_free_qset(adapter, q);
- return ret;
-}
-
-/**
- * t3_start_sge_timers - start SGE timer call backs
- * @adap: the adapter
- *
- * Starts each SGE queue set's timer call back
- */
-void t3_start_sge_timers(struct adapter *adap)
-{
- int i;
-
- for (i = 0; i < SGE_QSETS; ++i) {
- struct sge_qset *q = &adap->sge.qs[i];
-
- if (q->tx_reclaim_timer.function)
- mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
-
- if (q->rx_reclaim_timer.function)
- mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
- }
-}
-
-/**
- * t3_stop_sge_timers - stop SGE timer call backs
- * @adap: the adapter
- *
- * Stops each SGE queue set's timer call back
- */
-void t3_stop_sge_timers(struct adapter *adap)
-{
- int i;
-
- for (i = 0; i < SGE_QSETS; ++i) {
- struct sge_qset *q = &adap->sge.qs[i];
-
- if (q->tx_reclaim_timer.function)
- del_timer_sync(&q->tx_reclaim_timer);
- if (q->rx_reclaim_timer.function)
- del_timer_sync(&q->rx_reclaim_timer);
- }
-}
-
-/**
- * t3_free_sge_resources - free SGE resources
- * @adap: the adapter
- *
- * Frees resources used by the SGE queue sets.
- */
-void t3_free_sge_resources(struct adapter *adap)
-{
- int i;
-
- for (i = 0; i < SGE_QSETS; ++i)
- t3_free_qset(adap, &adap->sge.qs[i]);
-}
-
-/**
- * t3_sge_start - enable SGE
- * @adap: the adapter
- *
- * Enables the SGE for DMAs. This is the last step in starting packet
- * transfers.
- */
-void t3_sge_start(struct adapter *adap)
-{
- t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
-}
-
-/**
- * t3_sge_stop - disable SGE operation
- * @adap: the adapter
- *
- * Disables the DMA engine. This can be called in emeregencies (e.g.,
- * from error interrupts) or from normal process context. In the latter
- * case it also disables any pending queue restart tasklets. Note that
- * if it is called in interrupt context it cannot disable the restart
- * tasklets as it cannot wait, however the tasklets will have no effect
- * since the doorbells are disabled and the driver will call this again
- * later from process context, at which time the tasklets will be stopped
- * if they are still running.
- */
-void t3_sge_stop(struct adapter *adap)
-{
- t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
- if (!in_interrupt()) {
- int i;
-
- for (i = 0; i < SGE_QSETS; ++i) {
- struct sge_qset *qs = &adap->sge.qs[i];
-
- tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
- tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
- }
- }
-}
-
-/**
- * t3_sge_init - initialize SGE
- * @adap: the adapter
- * @p: the SGE parameters
- *
- * Performs SGE initialization needed every time after a chip reset.
- * We do not initialize any of the queue sets here, instead the driver
- * top-level must request those individually. We also do not enable DMA
- * here, that should be done after the queues have been set up.
- */
-void t3_sge_init(struct adapter *adap, struct sge_params *p)
-{
- unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
-
- ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
- F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
- V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
- V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
-#if SGE_NUM_GENBITS == 1
- ctrl |= F_EGRGENCTRL;
-#endif
- if (adap->params.rev > 0) {
- if (!(adap->flags & (USING_MSIX | USING_MSI)))
- ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
- }
- t3_write_reg(adap, A_SG_CONTROL, ctrl);
- t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
- V_LORCQDRBTHRSH(512));
- t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
- t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
- V_TIMEOUT(200 * core_ticks_per_usec(adap)));
- t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
- adap->params.rev < T3_REV_C ? 1000 : 500);
- t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
- t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
- t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
- t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
- t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
-}
-
-/**
- * t3_sge_prep - one-time SGE initialization
- * @adap: the associated adapter
- * @p: SGE parameters
- *
- * Performs one-time initialization of SGE SW state. Includes determining
- * defaults for the assorted SGE parameters, which admins can change until
- * they are used to initialize the SGE.
- */
-void t3_sge_prep(struct adapter *adap, struct sge_params *p)
-{
- int i;
-
- p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- for (i = 0; i < SGE_QSETS; ++i) {
- struct qset_params *q = p->qset + i;
-
- q->polling = adap->params.rev > 0;
- q->coalesce_usecs = 5;
- q->rspq_size = 1024;
- q->fl_size = 1024;
- q->jumbo_size = 512;
- q->txq_size[TXQ_ETH] = 1024;
- q->txq_size[TXQ_OFLD] = 1024;
- q->txq_size[TXQ_CTRL] = 256;
- q->cong_thres = 0;
- }
-
- spin_lock_init(&adap->sge.reg_lock);
-}
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
deleted file mode 100644
index 56adf448b9f..00000000000
--- a/drivers/net/cxgb4/sge.c
+++ /dev/null
@@ -1,2442 +0,0 @@
-/*
- * This file is part of the Chelsio T4 Ethernet driver for Linux.
- *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/dma-mapping.h>
-#include <linux/jiffies.h>
-#include <linux/prefetch.h>
-#include <net/ipv6.h>
-#include <net/tcp.h>
-#include "cxgb4.h"
-#include "t4_regs.h"
-#include "t4_msg.h"
-#include "t4fw_api.h"
-
-/*
- * Rx buffer size. We use largish buffers if possible but settle for single
- * pages under memory shortage.
- */
-#if PAGE_SHIFT >= 16
-# define FL_PG_ORDER 0
-#else
-# define FL_PG_ORDER (16 - PAGE_SHIFT)
-#endif
-
-/* RX_PULL_LEN should be <= RX_COPY_THRES */
-#define RX_COPY_THRES 256
-#define RX_PULL_LEN 128
-
-/*
- * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
- * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
- */
-#define RX_PKT_SKB_LEN 512
-
-/* Ethernet header padding prepended to RX_PKTs */
-#define RX_PKT_PAD 2
-
-/*
- * Max number of Tx descriptors we clean up at a time. Should be modest as
- * freeing skbs isn't cheap and it happens while holding locks. We just need
- * to free packets faster than they arrive, we eventually catch up and keep
- * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
- */
-#define MAX_TX_RECLAIM 16
-
-/*
- * Max number of Rx buffers we replenish at a time. Again keep this modest,
- * allocating buffers isn't cheap either.
- */
-#define MAX_RX_REFILL 16U
-
-/*
- * Period of the Rx queue check timer. This timer is infrequent as it has
- * something to do only when the system experiences severe memory shortage.
- */
-#define RX_QCHECK_PERIOD (HZ / 2)
-
-/*
- * Period of the Tx queue check timer.
- */
-#define TX_QCHECK_PERIOD (HZ / 2)
-
-/*
- * Max number of Tx descriptors to be reclaimed by the Tx timer.
- */
-#define MAX_TIMER_TX_RECLAIM 100
-
-/*
- * Timer index used when backing off due to memory shortage.
- */
-#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
-
-/*
- * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
- * attempt to refill it.
- */
-#define FL_STARVE_THRES 4
-
-/*
- * Suspend an Ethernet Tx queue with fewer available descriptors than this.
- * This is the same as calc_tx_descs() for a TSO packet with
- * nr_frags == MAX_SKB_FRAGS.
- */
-#define ETHTXQ_STOP_THRES \
- (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
-
-/*
- * Suspension threshold for non-Ethernet Tx queues. We require enough room
- * for a full sized WR.
- */
-#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
-
-/*
- * Max Tx descriptor space we allow for an Ethernet packet to be inlined
- * into a WR.
- */
-#define MAX_IMM_TX_PKT_LEN 128
-
-/*
- * Max size of a WR sent through a control Tx queue.
- */
-#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-
-enum {
- /* packet alignment in FL buffers */
- FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
- /* egress status entry size */
- STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
-};
-
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
-struct rx_sw_desc { /* SW state per Rx descriptor */
- struct page *page;
- dma_addr_t dma_addr;
-};
-
-/*
- * The low bits of rx_sw_desc.dma_addr have special meaning.
- */
-enum {
- RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
- RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
-};
-
-static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
-{
- return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
-}
-
-static inline bool is_buf_mapped(const struct rx_sw_desc *d)
-{
- return !(d->dma_addr & RX_UNMAPPED_BUF);
-}
-
-/**
- * txq_avail - return the number of available slots in a Tx queue
- * @q: the Tx queue
- *
- * Returns the number of descriptors in a Tx queue available to write new
- * packets.
- */
-static inline unsigned int txq_avail(const struct sge_txq *q)
-{
- return q->size - 1 - q->in_use;
-}
-
-/**
- * fl_cap - return the capacity of a free-buffer list
- * @fl: the FL
- *
- * Returns the capacity of a free-buffer list. The capacity is less than
- * the size because one descriptor needs to be left unpopulated, otherwise
- * HW will think the FL is empty.
- */
-static inline unsigned int fl_cap(const struct sge_fl *fl)
-{
- return fl->size - 8; /* 1 descriptor = 8 buffers */
-}
-
-static inline bool fl_starving(const struct sge_fl *fl)
-{
- return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
-}
-
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
-{
- const skb_frag_t *fp, *end;
- const struct skb_shared_info *si;
-
- *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(dev, *addr))
- goto out_err;
-
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
-
- for (fp = si->frags; fp < end; fp++) {
- *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, *addr))
- goto unwind;
- }
- return 0;
-
-unwind:
- while (fp-- > si->frags)
- dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
-
- dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
-out_err:
- return -ENOMEM;
-}
-
-#ifdef CONFIG_NEED_DMA_MAP_STATE
-static void unmap_skb(struct device *dev, const struct sk_buff *skb,
- const dma_addr_t *addr)
-{
- const skb_frag_t *fp, *end;
- const struct skb_shared_info *si;
-
- dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
-
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
- for (fp = si->frags; fp < end; fp++)
- dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
-}
-
-/**
- * deferred_unmap_destructor - unmap a packet when it is freed
- * @skb: the packet
- *
- * This is the packet destructor used for Tx packets that need to remain
- * mapped until they are freed rather than until their Tx descriptors are
- * freed.
- */
-static void deferred_unmap_destructor(struct sk_buff *skb)
-{
- unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
-}
-#endif
-
-static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
- const struct ulptx_sgl *sgl, const struct sge_txq *q)
-{
- const struct ulptx_sge_pair *p;
- unsigned int nfrags = skb_shinfo(skb)->nr_frags;
-
- if (likely(skb_headlen(skb)))
- dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- else {
- dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- nfrags--;
- }
-
- /*
- * the complexity below is because of the possibility of a wrap-around
- * in the middle of an SGL
- */
- for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
- if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
-unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p++;
- } else if ((u8 *)p == (u8 *)q->stat) {
- p = (const struct ulptx_sge_pair *)q->desc;
- goto unmap;
- } else if ((u8 *)p + 8 == (u8 *)q->stat) {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[2];
- } else {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[1];
- }
- }
- if (nfrags) {
- __be64 addr;
-
- if ((u8 *)p == (u8 *)q->stat)
- p = (const struct ulptx_sge_pair *)q->desc;
- addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
- *(const __be64 *)q->desc;
- dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
- DMA_TO_DEVICE);
- }
-}
-
-/**
- * free_tx_desc - reclaims Tx descriptors and their buffers
- * @adapter: the adapter
- * @q: the Tx queue to reclaim descriptors from
- * @n: the number of descriptors to reclaim
- * @unmap: whether the buffers should be unmapped for DMA
- *
- * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
- * Tx buffers. Called with the Tx queue lock held.
- */
-static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
- unsigned int n, bool unmap)
-{
- struct tx_sw_desc *d;
- unsigned int cidx = q->cidx;
- struct device *dev = adap->pdev_dev;
-
- d = &q->sdesc[cidx];
- while (n--) {
- if (d->skb) { /* an SGL is present */
- if (unmap)
- unmap_sgl(dev, d->skb, d->sgl, q);
- kfree_skb(d->skb);
- d->skb = NULL;
- }
- ++d;
- if (++cidx == q->size) {
- cidx = 0;
- d = q->sdesc;
- }
- }
- q->cidx = cidx;
-}
-
-/*
- * Return the number of reclaimable descriptors in a Tx queue.
- */
-static inline int reclaimable(const struct sge_txq *q)
-{
- int hw_cidx = ntohs(q->stat->cidx);
- hw_cidx -= q->cidx;
- return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
-}
-
-/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
- * @adap: the adapter
- * @q: the Tx queue to reclaim completed descriptors from
- * @unmap: whether the buffers should be unmapped for DMA
- *
- * Reclaims Tx descriptors that the SGE has indicated it has processed,
- * and frees the associated buffers if possible. Called with the Tx
- * queue locked.
- */
-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
- bool unmap)
-{
- int avail = reclaimable(q);
-
- if (avail) {
- /*
- * Limit the amount of clean up work we do at a time to keep
- * the Tx lock hold time O(1).
- */
- if (avail > MAX_TX_RECLAIM)
- avail = MAX_TX_RECLAIM;
-
- free_tx_desc(adap, q, avail, unmap);
- q->in_use -= avail;
- }
-}
-
-static inline int get_buf_size(const struct rx_sw_desc *d)
-{
-#if FL_PG_ORDER > 0
- return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
- PAGE_SIZE;
-#else
- return PAGE_SIZE;
-#endif
-}
-
-/**
- * free_rx_bufs - free the Rx buffers on an SGE free list
- * @adap: the adapter
- * @q: the SGE free list to free buffers from
- * @n: how many buffers to free
- *
- * Release the next @n buffers on an SGE free-buffer Rx queue. The
- * buffers must be made inaccessible to HW before calling this function.
- */
-static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
-{
- while (n--) {
- struct rx_sw_desc *d = &q->sdesc[q->cidx];
-
- if (is_buf_mapped(d))
- dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
- get_buf_size(d), PCI_DMA_FROMDEVICE);
- put_page(d->page);
- d->page = NULL;
- if (++q->cidx == q->size)
- q->cidx = 0;
- q->avail--;
- }
-}
-
-/**
- * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
- * @adap: the adapter
- * @q: the SGE free list
- *
- * Unmap the current buffer on an SGE free-buffer Rx queue. The
- * buffer must be made inaccessible to HW before calling this function.
- *
- * This is similar to @free_rx_bufs above but does not free the buffer.
- * Do note that the FL still loses any further access to the buffer.
- */
-static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
-{
- struct rx_sw_desc *d = &q->sdesc[q->cidx];
-
- if (is_buf_mapped(d))
- dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
- get_buf_size(d), PCI_DMA_FROMDEVICE);
- d->page = NULL;
- if (++q->cidx == q->size)
- q->cidx = 0;
- q->avail--;
-}
-
-static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-{
- if (q->pend_cred >= 8) {
- wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
- QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
- q->pend_cred &= 7;
- }
-}
-
-static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
- dma_addr_t mapping)
-{
- sd->page = pg;
- sd->dma_addr = mapping; /* includes size low bits */
-}
-
-/**
- * refill_fl - refill an SGE Rx buffer ring
- * @adap: the adapter
- * @q: the ring to refill
- * @n: the number of new buffers to allocate
- * @gfp: the gfp flags for the allocations
- *
- * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
- * allocated with the supplied gfp flags. The caller must assure that
- * @n does not exceed the queue's capacity. If afterwards the queue is
- * found critically low mark it as starving in the bitmap of starving FLs.
- *
- * Returns the number of buffers allocated.
- */
-static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
- gfp_t gfp)
-{
- struct page *pg;
- dma_addr_t mapping;
- unsigned int cred = q->avail;
- __be64 *d = &q->desc[q->pidx];
- struct rx_sw_desc *sd = &q->sdesc[q->pidx];
-
- gfp |= __GFP_NOWARN; /* failures are expected */
-
-#if FL_PG_ORDER > 0
- /*
- * Prefer large buffers
- */
- while (n) {
- pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
- if (unlikely(!pg)) {
- q->large_alloc_failed++;
- break; /* fall back to single pages */
- }
-
- mapping = dma_map_page(adap->pdev_dev, pg, 0,
- PAGE_SIZE << FL_PG_ORDER,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- __free_pages(pg, FL_PG_ORDER);
- goto out; /* do not try small pages for this error */
- }
- mapping |= RX_LARGE_BUF;
- *d++ = cpu_to_be64(mapping);
-
- set_rx_sw_desc(sd, pg, mapping);
- sd++;
-
- q->avail++;
- if (++q->pidx == q->size) {
- q->pidx = 0;
- sd = q->sdesc;
- d = q->desc;
- }
- n--;
- }
-#endif
-
- while (n--) {
- pg = __netdev_alloc_page(adap->port[0], gfp);
- if (unlikely(!pg)) {
- q->alloc_failed++;
- break;
- }
-
- mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- netdev_free_page(adap->port[0], pg);
- goto out;
- }
- *d++ = cpu_to_be64(mapping);
-
- set_rx_sw_desc(sd, pg, mapping);
- sd++;
-
- q->avail++;
- if (++q->pidx == q->size) {
- q->pidx = 0;
- sd = q->sdesc;
- d = q->desc;
- }
- }
-
-out: cred = q->avail - cred;
- q->pend_cred += cred;
- ring_fl_db(adap, q);
-
- if (unlikely(fl_starving(q))) {
- smp_wmb();
- set_bit(q->cntxt_id - adap->sge.egr_start,
- adap->sge.starving_fl);
- }
-
- return cred;
-}
-
-static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
-{
- refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
- GFP_ATOMIC);
-}
-
-/**
- * alloc_ring - allocate resources for an SGE descriptor ring
- * @dev: the PCI device's core device
- * @nelem: the number of descriptors
- * @elem_size: the size of each descriptor
- * @sw_size: the size of the SW state associated with each ring element
- * @phys: the physical address of the allocated ring
- * @metadata: address of the array holding the SW state for the ring
- * @stat_size: extra space in HW ring for status information
- * @node: preferred node for memory allocations
- *
- * Allocates resources for an SGE descriptor ring, such as Tx queues,
- * free buffer lists, or response queues. Each SGE ring requires
- * space for its HW descriptors plus, optionally, space for the SW state
- * associated with each HW entry (the metadata). The function returns
- * three values: the virtual address for the HW ring (the return value
- * of the function), the bus address of the HW ring, and the address
- * of the SW ring.
- */
-static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
- size_t sw_size, dma_addr_t *phys, void *metadata,
- size_t stat_size, int node)
-{
- size_t len = nelem * elem_size + stat_size;
- void *s = NULL;
- void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
-
- if (!p)
- return NULL;
- if (sw_size) {
- s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
-
- if (!s) {
- dma_free_coherent(dev, len, p, *phys);
- return NULL;
- }
- }
- if (metadata)
- *(void **)metadata = s;
- memset(p, 0, len);
- return p;
-}
-
-/**
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- *
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
-/**
- * flits_to_desc - returns the num of Tx descriptors for the given flits
- * @n: the number of flits
- *
- * Returns the number of Tx descriptors needed for the supplied number
- * of flits.
- */
-static inline unsigned int flits_to_desc(unsigned int n)
-{
- BUG_ON(n > SGE_MAX_WR_LEN / 8);
- return DIV_ROUND_UP(n, 8);
-}
-
-/**
- * is_eth_imm - can an Ethernet packet be sent as immediate data?
- * @skb: the packet
- *
- * Returns whether an Ethernet packet is small enough to fit as
- * immediate data.
- */
-static inline int is_eth_imm(const struct sk_buff *skb)
-{
- return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
-}
-
-/**
- * calc_tx_flits - calculate the number of flits for a packet Tx WR
- * @skb: the packet
- *
- * Returns the number of flits needed for a Tx WR for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
-{
- unsigned int flits;
-
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
-
- flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
- if (skb_shinfo(skb)->gso_size)
- flits += 2;
- return flits;
-}
-
-/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
-{
- return flits_to_desc(calc_tx_flits(skb));
-}
-
-/**
- * write_sgl - populate a scatter/gather list for a packet
- * @skb: the packet
- * @q: the Tx queue we are writing into
- * @sgl: starting location for writing the SGL
- * @end: points right after the end of the SGL
- * @start: start offset into skb main-body data to include in the SGL
- * @addr: the list of bus addresses for the SGL elements
- *
- * Generates a gather list for the buffers that make up a packet.
- * The caller must provide adequate space for the SGL that will be written.
- * The SGL includes all of the packet's page fragments and the data in its
- * main body except for the first @start bytes. @sgl must be 16-byte
- * aligned and within a Tx descriptor with available space. @end points
- * right after the end of the SGL but does not account for any potential
- * wrap around, i.e., @end > @sgl.
- */
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
-{
- unsigned int i, len;
- struct ulptx_sge_pair *to;
- const struct skb_shared_info *si = skb_shinfo(skb);
- unsigned int nfrags = si->nr_frags;
- struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
-
- len = skb_headlen(skb) - start;
- if (likely(len)) {
- sgl->len0 = htonl(len);
- sgl->addr0 = cpu_to_be64(addr[0] + start);
- nfrags++;
- } else {
- sgl->len0 = htonl(si->frags[0].size);
- sgl->addr0 = cpu_to_be64(addr[1]);
- }
-
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
- if (likely(--nfrags == 0))
- return;
- /*
- * Most of the complexity below deals with the possibility we hit the
- * end of the queue in the middle of writing the SGL. For this case
- * only we create the SGL in a temporary buffer and then copy it.
- */
- to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
-
- for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(si->frags[++i].size);
- to->addr[0] = cpu_to_be64(addr[i]);
- to->addr[1] = cpu_to_be64(addr[++i]);
- }
- if (nfrags) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(0);
- to->addr[0] = cpu_to_be64(addr[i + 1]);
- }
- if (unlikely((u8 *)end > (u8 *)q->stat)) {
- unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
-
- if (likely(part0))
- memcpy(sgl->sge, buf, part0);
- part1 = (u8 *)end - (u8 *)q->stat;
- memcpy(q->desc, (u8 *)buf + part0, part1);
- end = (void *)q->desc + part1;
- }
- if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
-}
-
-/**
- * ring_tx_db - check and potentially ring a Tx queue's doorbell
- * @adap: the adapter
- * @q: the Tx queue
- * @n: number of new descriptors to give to HW
- *
- * Ring the doorbel for a Tx queue.
- */
-static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
-{
- wmb(); /* write descriptors before telling HW */
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(n));
-}
-
-/**
- * inline_tx_skb - inline a packet's data into Tx descriptors
- * @skb: the packet
- * @q: the Tx queue where the packet will be inlined
- * @pos: starting position in the Tx queue where to inline the packet
- *
- * Inline a packet's contents directly into Tx descriptors, starting at
- * the given position within the Tx DMA ring.
- * Most of the complexity of this operation is dealing with wrap arounds
- * in the middle of the packet we want to inline.
- */
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
- void *pos)
-{
- u64 *p;
- int left = (void *)q->stat - pos;
-
- if (likely(skb->len <= left)) {
- if (likely(!skb->data_len))
- skb_copy_from_linear_data(skb, pos, skb->len);
- else
- skb_copy_bits(skb, 0, pos, skb->len);
- pos += skb->len;
- } else {
- skb_copy_bits(skb, 0, pos, left);
- skb_copy_bits(skb, left, q->desc, skb->len - left);
- pos = (void *)q->desc + (skb->len - left);
- }
-
- /* 0-pad to multiple of 16 */
- p = PTR_ALIGN(pos, 8);
- if ((uintptr_t)p & 8)
- *p = 0;
-}
-
-/*
- * Figure out what HW csum a packet wants and return the appropriate control
- * bits.
- */
-static u64 hwcsum(const struct sk_buff *skb)
-{
- int csum_type;
- const struct iphdr *iph = ip_hdr(skb);
-
- if (iph->version == 4) {
- if (iph->protocol == IPPROTO_TCP)
- csum_type = TX_CSUM_TCPIP;
- else if (iph->protocol == IPPROTO_UDP)
- csum_type = TX_CSUM_UDPIP;
- else {
-nocsum: /*
- * unknown protocol, disable HW csum
- * and hope a bad packet is detected
- */
- return TXPKT_L4CSUM_DIS;
- }
- } else {
- /*
- * this doesn't work with extension headers
- */
- const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
-
- if (ip6h->nexthdr == IPPROTO_TCP)
- csum_type = TX_CSUM_TCPIP6;
- else if (ip6h->nexthdr == IPPROTO_UDP)
- csum_type = TX_CSUM_UDPIP6;
- else
- goto nocsum;
- }
-
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
- int start = skb_transport_offset(skb);
-
- return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
- }
-}
-
-static void eth_txq_stop(struct sge_eth_txq *q)
-{
- netif_tx_stop_queue(q->txq);
- q->q.stops++;
-}
-
-static inline void txq_advance(struct sge_txq *q, unsigned int n)
-{
- q->in_use += n;
- q->pidx += n;
- if (q->pidx >= q->size)
- q->pidx -= q->size;
-}
-
-/**
- * t4_eth_xmit - add a packet to an Ethernet Tx queue
- * @skb: the packet
- * @dev: the egress net device
- *
- * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
- */
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- u32 wr_mid;
- u64 cntrl, *end;
- int qidx, credits;
- unsigned int flits, ndesc;
- struct adapter *adap;
- struct sge_eth_txq *q;
- const struct port_info *pi;
- struct fw_eth_tx_pkt_wr *wr;
- struct cpl_tx_pkt_core *cpl;
- const struct skb_shared_info *ssi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-
- /*
- * The chip min packet length is 10 octets but play safe and reject
- * anything shorter than an Ethernet header.
- */
- if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- pi = netdev_priv(dev);
- adap = pi->adapter;
- qidx = skb_get_queue_mapping(skb);
- q = &adap->sge.ethtxq[qidx + pi->first_qset];
-
- reclaim_completed_tx(adap, &q->q, true);
-
- flits = calc_tx_flits(skb);
- ndesc = flits_to_desc(flits);
- credits = txq_avail(&q->q) - ndesc;
-
- if (unlikely(credits < 0)) {
- eth_txq_stop(q);
- dev_err(adap->pdev_dev,
- "%s: Tx ring %u full while queue awake!\n",
- dev->name, qidx);
- return NETDEV_TX_BUSY;
- }
-
- if (!is_eth_imm(skb) &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
- q->mapping_err++;
- goto out_free;
- }
-
- wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
- if (unlikely(credits < ETHTXQ_STOP_THRES)) {
- eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
- }
-
- wr = (void *)&q->q.desc[q->q.pidx];
- wr->equiq_to_len16 = htonl(wr_mid);
- wr->r3 = cpu_to_be64(0);
- end = (u64 *)wr + flits;
-
- ssi = skb_shinfo(skb);
- if (ssi->gso_size) {
- struct cpl_tx_pkt_lso *lso = (void *)wr;
- bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
- int l3hdr_len = skb_network_header_len(skb);
- int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
-
- wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(sizeof(*lso)));
- lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE | LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- LSO_IPHDR_LEN(l3hdr_len / 4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- lso->c.len = htonl(skb->len);
- cpl = (void *)(lso + 1);
- cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len);
- q->tso++;
- q->tx_cso += ssi->gso_segs;
- } else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
- wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(len));
- cpl = (void *)(wr + 1);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
- q->tx_cso++;
- } else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
- }
-
- if (vlan_tx_tag_present(skb)) {
- q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
- }
-
- cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
- cpl->pack = htons(0);
- cpl->len = htons(skb->len);
- cpl->ctrl1 = cpu_to_be64(cntrl);
-
- if (is_eth_imm(skb)) {
- inline_tx_skb(skb, &q->q, cpl + 1);
- dev_kfree_skb(skb);
- } else {
- int last_desc;
-
- write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
- addr);
- skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
- }
-
- txq_advance(&q->q, ndesc);
-
- ring_tx_db(adap, &q->q, ndesc);
- return NETDEV_TX_OK;
-}
-
-/**
- * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
- * @q: the SGE control Tx queue
- *
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
- */
-static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-{
- int hw_cidx = ntohs(q->stat->cidx);
- int reclaim = hw_cidx - q->cidx;
-
- if (reclaim < 0)
- reclaim += q->size;
-
- q->in_use -= reclaim;
- q->cidx = hw_cidx;
-}
-
-/**
- * is_imm - check whether a packet can be sent as immediate data
- * @skb: the packet
- *
- * Returns true if a packet can be sent as a WR with immediate data.
- */
-static inline int is_imm(const struct sk_buff *skb)
-{
- return skb->len <= MAX_CTRL_WR_LEN;
-}
-
-/**
- * ctrlq_check_stop - check if a control queue is full and should stop
- * @q: the queue
- * @wr: most recent WR written to the queue
- *
- * Check if a control queue has become full and should be stopped.
- * We clean up control queue descriptors very lazily, only when we are out.
- * If the queue is still full after reclaiming any completed descriptors
- * we suspend it and have the last WR wake it up.
- */
-static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
-{
- reclaim_completed_tx_imm(&q->q);
- if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
- wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
- q->q.stops++;
- q->full = 1;
- }
-}
-
-/**
- * ctrl_xmit - send a packet through an SGE control Tx queue
- * @q: the control queue
- * @skb: the packet
- *
- * Send a packet through an SGE control Tx queue. Packets sent through
- * a control queue must fit entirely as immediate data.
- */
-static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
-{
- unsigned int ndesc;
- struct fw_wr_hdr *wr;
-
- if (unlikely(!is_imm(skb))) {
- WARN_ON(1);
- dev_kfree_skb(skb);
- return NET_XMIT_DROP;
- }
-
- ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
- spin_lock(&q->sendq.lock);
-
- if (unlikely(q->full)) {
- skb->priority = ndesc; /* save for restart */
- __skb_queue_tail(&q->sendq, skb);
- spin_unlock(&q->sendq.lock);
- return NET_XMIT_CN;
- }
-
- wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
-
- txq_advance(&q->q, ndesc);
- if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
- ctrlq_check_stop(q, wr);
-
- ring_tx_db(q->adap, &q->q, ndesc);
- spin_unlock(&q->sendq.lock);
-
- kfree_skb(skb);
- return NET_XMIT_SUCCESS;
-}
-
-/**
- * restart_ctrlq - restart a suspended control queue
- * @data: the control queue to restart
- *
- * Resumes transmission on a suspended Tx control queue.
- */
-static void restart_ctrlq(unsigned long data)
-{
- struct sk_buff *skb;
- unsigned int written = 0;
- struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
-
- spin_lock(&q->sendq.lock);
- reclaim_completed_tx_imm(&q->q);
- BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
-
- while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
- struct fw_wr_hdr *wr;
- unsigned int ndesc = skb->priority; /* previously saved */
-
- /*
- * Write descriptors and free skbs outside the lock to limit
- * wait times. q->full is still set so new skbs will be queued.
- */
- spin_unlock(&q->sendq.lock);
-
- wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
- kfree_skb(skb);
-
- written += ndesc;
- txq_advance(&q->q, ndesc);
- if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
- unsigned long old = q->q.stops;
-
- ctrlq_check_stop(q, wr);
- if (q->q.stops != old) { /* suspended anew */
- spin_lock(&q->sendq.lock);
- goto ringdb;
- }
- }
- if (written > 16) {
- ring_tx_db(q->adap, &q->q, written);
- written = 0;
- }
- spin_lock(&q->sendq.lock);
- }
- q->full = 0;
-ringdb: if (written)
- ring_tx_db(q->adap, &q->q, written);
- spin_unlock(&q->sendq.lock);
-}
-
-/**
- * t4_mgmt_tx - send a management message
- * @adap: the adapter
- * @skb: the packet containing the management message
- *
- * Send a management message through control queue 0.
- */
-int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
-{
- int ret;
-
- local_bh_disable();
- ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
- local_bh_enable();
- return ret;
-}
-
-/**
- * is_ofld_imm - check whether a packet can be sent as immediate data
- * @skb: the packet
- *
- * Returns true if a packet can be sent as an offload WR with immediate
- * data. We currently use the same limit as for Ethernet packets.
- */
-static inline int is_ofld_imm(const struct sk_buff *skb)
-{
- return skb->len <= MAX_IMM_TX_PKT_LEN;
-}
-
-/**
- * calc_tx_flits_ofld - calculate # of flits for an offload packet
- * @skb: the packet
- *
- * Returns the number of flits needed for the given offload packet.
- * These packets are already fully constructed and no additional headers
- * will be added.
- */
-static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
-{
- unsigned int flits, cnt;
-
- if (is_ofld_imm(skb))
- return DIV_ROUND_UP(skb->len, 8);
-
- flits = skb_transport_offset(skb) / 8U; /* headers */
- cnt = skb_shinfo(skb)->nr_frags;
- if (skb->tail != skb->transport_header)
- cnt++;
- return flits + sgl_len(cnt);
-}
-
-/**
- * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
- * @adap: the adapter
- * @q: the queue to stop
- *
- * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
- * inability to map packets. A periodic timer attempts to restart
- * queues so marked.
- */
-static void txq_stop_maperr(struct sge_ofld_txq *q)
-{
- q->mapping_err++;
- q->q.stops++;
- set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
- q->adap->sge.txq_maperr);
-}
-
-/**
- * ofldtxq_stop - stop an offload Tx queue that has become full
- * @q: the queue to stop
- * @skb: the packet causing the queue to become full
- *
- * Stops an offload Tx queue that has become full and modifies the packet
- * being written to request a wakeup.
- */
-static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
-{
- struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
-
- wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
- q->q.stops++;
- q->full = 1;
-}
-
-/**
- * service_ofldq - restart a suspended offload queue
- * @q: the offload queue
- *
- * Services an offload Tx queue by moving packets from its packet queue
- * to the HW Tx ring. The function starts and ends with the queue locked.
- */
-static void service_ofldq(struct sge_ofld_txq *q)
-{
- u64 *pos;
- int credits;
- struct sk_buff *skb;
- unsigned int written = 0;
- unsigned int flits, ndesc;
-
- while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
- /*
- * We drop the lock but leave skb on sendq, thus retaining
- * exclusive access to the state of the queue.
- */
- spin_unlock(&q->sendq.lock);
-
- reclaim_completed_tx(q->adap, &q->q, false);
-
- flits = skb->priority; /* previously saved */
- ndesc = flits_to_desc(flits);
- credits = txq_avail(&q->q) - ndesc;
- BUG_ON(credits < 0);
- if (unlikely(credits < TXQ_STOP_THRES))
- ofldtxq_stop(q, skb);
-
- pos = (u64 *)&q->q.desc[q->q.pidx];
- if (is_ofld_imm(skb))
- inline_tx_skb(skb, &q->q, pos);
- else if (map_skb(q->adap->pdev_dev, skb,
- (dma_addr_t *)skb->head)) {
- txq_stop_maperr(q);
- spin_lock(&q->sendq.lock);
- break;
- } else {
- int last_desc, hdr_len = skb_transport_offset(skb);
-
- memcpy(pos, skb->data, hdr_len);
- write_sgl(skb, &q->q, (void *)pos + hdr_len,
- pos + flits, hdr_len,
- (dma_addr_t *)skb->head);
-#ifdef CONFIG_NEED_DMA_MAP_STATE
- skb->dev = q->adap->port[0];
- skb->destructor = deferred_unmap_destructor;
-#endif
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- }
-
- txq_advance(&q->q, ndesc);
- written += ndesc;
- if (unlikely(written > 32)) {
- ring_tx_db(q->adap, &q->q, written);
- written = 0;
- }
-
- spin_lock(&q->sendq.lock);
- __skb_unlink(skb, &q->sendq);
- if (is_ofld_imm(skb))
- kfree_skb(skb);
- }
- if (likely(written))
- ring_tx_db(q->adap, &q->q, written);
-}
-
-/**
- * ofld_xmit - send a packet through an offload queue
- * @q: the Tx offload queue
- * @skb: the packet
- *
- * Send an offload packet through an SGE offload queue.
- */
-static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
-{
- skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
- spin_lock(&q->sendq.lock);
- __skb_queue_tail(&q->sendq, skb);
- if (q->sendq.qlen == 1)
- service_ofldq(q);
- spin_unlock(&q->sendq.lock);
- return NET_XMIT_SUCCESS;
-}
-
-/**
- * restart_ofldq - restart a suspended offload queue
- * @data: the offload queue to restart
- *
- * Resumes transmission on a suspended Tx offload queue.
- */
-static void restart_ofldq(unsigned long data)
-{
- struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
-
- spin_lock(&q->sendq.lock);
- q->full = 0; /* the queue actually is completely empty now */
- service_ofldq(q);
- spin_unlock(&q->sendq.lock);
-}
-
-/**
- * skb_txq - return the Tx queue an offload packet should use
- * @skb: the packet
- *
- * Returns the Tx queue an offload packet should use as indicated by bits
- * 1-15 in the packet's queue_mapping.
- */
-static inline unsigned int skb_txq(const struct sk_buff *skb)
-{
- return skb->queue_mapping >> 1;
-}
-
-/**
- * is_ctrl_pkt - return whether an offload packet is a control packet
- * @skb: the packet
- *
- * Returns whether an offload packet should use an OFLD or a CTRL
- * Tx queue as indicated by bit 0 in the packet's queue_mapping.
- */
-static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
-{
- return skb->queue_mapping & 1;
-}
-
-static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
-{
- unsigned int idx = skb_txq(skb);
-
- if (unlikely(is_ctrl_pkt(skb)))
- return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
- return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
-}
-
-/**
- * t4_ofld_send - send an offload packet
- * @adap: the adapter
- * @skb: the packet
- *
- * Sends an offload packet. We use the packet queue_mapping to select the
- * appropriate Tx queue as follows: bit 0 indicates whether the packet
- * should be sent as regular or control, bits 1-15 select the queue.
- */
-int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
-{
- int ret;
-
- local_bh_disable();
- ret = ofld_send(adap, skb);
- local_bh_enable();
- return ret;
-}
-
-/**
- * cxgb4_ofld_send - send an offload packet
- * @dev: the net device
- * @skb: the packet
- *
- * Sends an offload packet. This is an exported version of @t4_ofld_send,
- * intended for ULDs.
- */
-int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
-{
- return t4_ofld_send(netdev2adap(dev), skb);
-}
-EXPORT_SYMBOL(cxgb4_ofld_send);
-
-static inline void copy_frags(struct skb_shared_info *ssi,
- const struct pkt_gl *gl, unsigned int offset)
-{
- unsigned int n;
-
- /* usually there's just one frag */
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
- ssi->frags[0].size = gl->frags[0].size - offset;
- ssi->nr_frags = gl->nfrags;
- n = gl->nfrags - 1;
- if (n)
- memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
-
- /* get a reference to the last page, we don't own it */
- get_page(gl->frags[n].page);
-}
-
-/**
- * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
- * @gl: the gather list
- * @skb_len: size of sk_buff main body if it carries fragments
- * @pull_len: amount of data to move to the sk_buff's main body
- *
- * Builds an sk_buff from the given packet gather list. Returns the
- * sk_buff or %NULL if sk_buff allocation failed.
- */
-struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len, unsigned int pull_len)
-{
- struct sk_buff *skb;
-
- /*
- * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
- * size, which is expected since buffers are at least PAGE_SIZEd.
- * In this case packets up to RX_COPY_THRES have only one fragment.
- */
- if (gl->tot_len <= RX_COPY_THRES) {
- skb = dev_alloc_skb(gl->tot_len);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, gl->tot_len);
- skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
- } else {
- skb = dev_alloc_skb(skb_len);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, pull_len);
- skb_copy_to_linear_data(skb, gl->va, pull_len);
-
- copy_frags(skb_shinfo(skb), gl, pull_len);
- skb->len = gl->tot_len;
- skb->data_len = skb->len - pull_len;
- skb->truesize += skb->data_len;
- }
-out: return skb;
-}
-EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
-
-/**
- * t4_pktgl_free - free a packet gather list
- * @gl: the gather list
- *
- * Releases the pages of a packet gather list. We do not own the last
- * page on the list and do not free it.
- */
-static void t4_pktgl_free(const struct pkt_gl *gl)
-{
- int n;
- const skb_frag_t *p;
-
- for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
- put_page(p->page);
-}
-
-/*
- * Process an MPS trace packet. Give it an unused protocol number so it won't
- * be delivered to anyone and send it to the stack for capture.
- */
-static noinline int handle_trace_pkt(struct adapter *adap,
- const struct pkt_gl *gl)
-{
- struct sk_buff *skb;
- struct cpl_trace_pkt *p;
-
- skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
- if (unlikely(!skb)) {
- t4_pktgl_free(gl);
- return 0;
- }
-
- p = (struct cpl_trace_pkt *)skb->data;
- __skb_pull(skb, sizeof(*p));
- skb_reset_mac_header(skb);
- skb->protocol = htons(0xffff);
- skb->dev = adap->port[0];
- netif_receive_skb(skb);
- return 0;
-}
-
-static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
- const struct cpl_rx_pkt *pkt)
-{
- int ret;
- struct sk_buff *skb;
-
- skb = napi_get_frags(&rxq->rspq.napi);
- if (unlikely(!skb)) {
- t4_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return;
- }
-
- copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
- skb->len = gl->tot_len - RX_PKT_PAD;
- skb->data_len = skb->len;
- skb->truesize += skb->data_len;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, rxq->rspq.idx);
- if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
-
- if (unlikely(pkt->vlan_ex)) {
- __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
- rxq->stats.vlan_ex++;
- }
- ret = napi_gro_frags(&rxq->rspq.napi);
- if (ret == GRO_HELD)
- rxq->stats.lro_pkts++;
- else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
- rxq->stats.lro_merged++;
- rxq->stats.pkts++;
- rxq->stats.rx_cso++;
-}
-
-/**
- * t4_ethrx_handler - process an ingress ethernet packet
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the RX_PKT message
- * @si: the gather list of packet fragments
- *
- * Process an ingress ethernet packet and deliver it to the stack.
- */
-int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *si)
-{
- bool csum_ok;
- struct sk_buff *skb;
- const struct cpl_rx_pkt *pkt;
- struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
-
- if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
- return handle_trace_pkt(q->adap, si);
-
- pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec;
- if ((pkt->l2info & htonl(RXF_TCP)) &&
- (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
- do_gro(rxq, si, pkt);
- return 0;
- }
-
- skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
- if (unlikely(!skb)) {
- t4_pktgl_free(si);
- rxq->stats.rx_drops++;
- return 0;
- }
-
- __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
- skb->protocol = eth_type_trans(skb, q->netdev);
- skb_record_rx_queue(skb, q->idx);
- if (skb->dev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
-
- rxq->stats.pkts++;
-
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
- (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
- if (!pkt->ip_frag) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- rxq->stats.rx_cso++;
- } else if (pkt->l2info & htonl(RXF_IP)) {
- __sum16 c = (__force __sum16)pkt->csum;
- skb->csum = csum_unfold(c);
- skb->ip_summed = CHECKSUM_COMPLETE;
- rxq->stats.rx_cso++;
- }
- } else
- skb_checksum_none_assert(skb);
-
- if (unlikely(pkt->vlan_ex)) {
- __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
- rxq->stats.vlan_ex++;
- }
- netif_receive_skb(skb);
- return 0;
-}
-
-/**
- * restore_rx_bufs - put back a packet's Rx buffers
- * @si: the packet gather list
- * @q: the SGE free list
- * @frags: number of FL buffers to restore
- *
- * Puts back on an FL the Rx buffers associated with @si. The buffers
- * have already been unmapped and are left unmapped, we mark them so to
- * prevent further unmapping attempts.
- *
- * This function undoes a series of @unmap_rx_buf calls when we find out
- * that the current packet can't be processed right away afterall and we
- * need to come back to it later. This is a very rare event and there's
- * no effort to make this particularly efficient.
- */
-static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
- int frags)
-{
- struct rx_sw_desc *d;
-
- while (frags--) {
- if (q->cidx == 0)
- q->cidx = q->size - 1;
- else
- q->cidx--;
- d = &q->sdesc[q->cidx];
- d->page = si->frags[frags].page;
- d->dma_addr |= RX_UNMAPPED_BUF;
- q->avail++;
- }
-}
-
-/**
- * is_new_response - check if a response is newly written
- * @r: the response descriptor
- * @q: the response queue
- *
- * Returns true if a response descriptor contains a yet unprocessed
- * response.
- */
-static inline bool is_new_response(const struct rsp_ctrl *r,
- const struct sge_rspq *q)
-{
- return RSPD_GEN(r->type_gen) == q->gen;
-}
-
-/**
- * rspq_next - advance to the next entry in a response queue
- * @q: the queue
- *
- * Updates the state of a response queue to advance it to the next entry.
- */
-static inline void rspq_next(struct sge_rspq *q)
-{
- q->cur_desc = (void *)q->cur_desc + q->iqe_len;
- if (unlikely(++q->cidx == q->size)) {
- q->cidx = 0;
- q->gen ^= 1;
- q->cur_desc = q->desc;
- }
-}
-
-/**
- * process_responses - process responses from an SGE response queue
- * @q: the ingress queue to process
- * @budget: how many responses can be processed in this round
- *
- * Process responses from an SGE response queue up to the supplied budget.
- * Responses include received packets as well as control messages from FW
- * or HW.
- *
- * Additionally choose the interrupt holdoff time for the next interrupt
- * on this queue. If the system is under memory shortage use a fairly
- * long delay to help recovery.
- */
-static int process_responses(struct sge_rspq *q, int budget)
-{
- int ret, rsp_type;
- int budget_left = budget;
- const struct rsp_ctrl *rc;
- struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
-
- while (likely(budget_left)) {
- rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
- if (!is_new_response(rc, q))
- break;
-
- rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
- skb_frag_t *fp;
- struct pkt_gl si;
- const struct rx_sw_desc *rsd;
- u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
-
- if (len & RSPD_NEWBUF) {
- if (likely(q->offset > 0)) {
- free_rx_bufs(q->adap, &rxq->fl, 1);
- q->offset = 0;
- }
- len = RSPD_LEN(len);
- }
- si.tot_len = len;
-
- /* gather packet fragments */
- for (frags = 0, fp = si.frags; ; frags++, fp++) {
- rsd = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = get_buf_size(rsd);
- fp->page = rsd->page;
- fp->page_offset = q->offset;
- fp->size = min(bufsz, len);
- len -= fp->size;
- if (!len)
- break;
- unmap_rx_buf(q->adap, &rxq->fl);
- }
-
- /*
- * Last buffer remains mapped so explicitly make it
- * coherent for CPU access.
- */
- dma_sync_single_for_cpu(q->adap->pdev_dev,
- get_buf_addr(rsd),
- fp->size, DMA_FROM_DEVICE);
-
- si.va = page_address(si.frags[0].page) +
- si.frags[0].page_offset;
- prefetch(si.va);
-
- si.nfrags = frags + 1;
- ret = q->handler(q, q->cur_desc, &si);
- if (likely(ret == 0))
- q->offset += ALIGN(fp->size, FL_ALIGN);
- else
- restore_rx_bufs(&si, &rxq->fl, frags);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
- ret = q->handler(q, q->cur_desc, NULL);
- } else {
- ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
- }
-
- if (unlikely(ret)) {
- /* couldn't process descriptor, back off for recovery */
- q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
- break;
- }
-
- rspq_next(q);
- budget_left--;
- }
-
- if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
- __refill_fl(q->adap, &rxq->fl);
- return budget - budget_left;
-}
-
-/**
- * napi_rx_handler - the NAPI handler for Rx processing
- * @napi: the napi instance
- * @budget: how many packets we can process in this round
- *
- * Handler for new data events when using NAPI. This does not need any
- * locking or protection from interrupts as data interrupts are off at
- * this point and other adapter interrupts do not interfere (the latter
- * in not a concern at all with MSI-X as non-data interrupts then have
- * a separate handler).
- */
-static int napi_rx_handler(struct napi_struct *napi, int budget)
-{
- unsigned int params;
- struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
- int work_done = process_responses(q, budget);
-
- if (likely(work_done < budget)) {
- napi_complete(napi);
- params = q->next_intr_params;
- q->next_intr_params = q->intr_params;
- } else
- params = QINTR_TIMER_IDX(7);
-
- t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
- INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
- return work_done;
-}
-
-/*
- * The MSI-X interrupt handler for an SGE response queue.
- */
-irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
-{
- struct sge_rspq *q = cookie;
-
- napi_schedule(&q->napi);
- return IRQ_HANDLED;
-}
-
-/*
- * Process the indirect interrupt entries in the interrupt queue and kick off
- * NAPI for each queue that has generated an entry.
- */
-static unsigned int process_intrq(struct adapter *adap)
-{
- unsigned int credits;
- const struct rsp_ctrl *rc;
- struct sge_rspq *q = &adap->sge.intrq;
-
- spin_lock(&adap->sge.intrq_lock);
- for (credits = 0; ; credits++) {
- rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
- if (!is_new_response(rc, q))
- break;
-
- rmb();
- if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
- unsigned int qid = ntohl(rc->pldbuflen_qid);
-
- qid -= adap->sge.ingr_start;
- napi_schedule(&adap->sge.ingr_map[qid]->napi);
- }
-
- rspq_next(q);
- }
-
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
- INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
- spin_unlock(&adap->sge.intrq_lock);
- return credits;
-}
-
-/*
- * The MSI interrupt handler, which handles data events from SGE response queues
- * as well as error and other async events as they all use the same MSI vector.
- */
-static irqreturn_t t4_intr_msi(int irq, void *cookie)
-{
- struct adapter *adap = cookie;
-
- t4_slow_intr_handler(adap);
- process_intrq(adap);
- return IRQ_HANDLED;
-}
-
-/*
- * Interrupt handler for legacy INTx interrupts.
- * Handles data events from SGE response queues as well as error and other
- * async events as they all use the same interrupt line.
- */
-static irqreturn_t t4_intr_intx(int irq, void *cookie)
-{
- struct adapter *adap = cookie;
-
- t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
- if (t4_slow_intr_handler(adap) | process_intrq(adap))
- return IRQ_HANDLED;
- return IRQ_NONE; /* probably shared interrupt */
-}
-
-/**
- * t4_intr_handler - select the top-level interrupt handler
- * @adap: the adapter
- *
- * Selects the top-level interrupt handler based on the type of interrupts
- * (MSI-X, MSI, or INTx).
- */
-irq_handler_t t4_intr_handler(struct adapter *adap)
-{
- if (adap->flags & USING_MSIX)
- return t4_sge_intr_msix;
- if (adap->flags & USING_MSI)
- return t4_intr_msi;
- return t4_intr_intx;
-}
-
-static void sge_rx_timer_cb(unsigned long data)
-{
- unsigned long m;
- unsigned int i, cnt[2];
- struct adapter *adap = (struct adapter *)data;
- struct sge *s = &adap->sge;
-
- for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
- for (m = s->starving_fl[i]; m; m &= m - 1) {
- struct sge_eth_rxq *rxq;
- unsigned int id = __ffs(m) + i * BITS_PER_LONG;
- struct sge_fl *fl = s->egr_map[id];
-
- clear_bit(id, s->starving_fl);
- smp_mb__after_clear_bit();
-
- if (fl_starving(fl)) {
- rxq = container_of(fl, struct sge_eth_rxq, fl);
- if (napi_reschedule(&rxq->rspq.napi))
- fl->starving++;
- else
- set_bit(id, s->starving_fl);
- }
- }
-
- t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
- cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
- cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
-
- for (i = 0; i < 2; i++)
- if (cnt[i] >= s->starve_thres) {
- if (s->idma_state[i] || cnt[i] == 0xffffffff)
- continue;
- s->idma_state[i] = 1;
- t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
- m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
- dev_warn(adap->pdev_dev,
- "SGE idma%u starvation detected for "
- "queue %lu\n", i, m & 0xffff);
- } else if (s->idma_state[i])
- s->idma_state[i] = 0;
-
- mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
-}
-
-static void sge_tx_timer_cb(unsigned long data)
-{
- unsigned long m;
- unsigned int i, budget;
- struct adapter *adap = (struct adapter *)data;
- struct sge *s = &adap->sge;
-
- for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
- for (m = s->txq_maperr[i]; m; m &= m - 1) {
- unsigned long id = __ffs(m) + i * BITS_PER_LONG;
- struct sge_ofld_txq *txq = s->egr_map[id];
-
- clear_bit(id, s->txq_maperr);
- tasklet_schedule(&txq->qresume_tsk);
- }
-
- budget = MAX_TIMER_TX_RECLAIM;
- i = s->ethtxq_rover;
- do {
- struct sge_eth_txq *q = &s->ethtxq[i];
-
- if (q->q.in_use &&
- time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
- __netif_tx_trylock(q->txq)) {
- int avail = reclaimable(&q->q);
-
- if (avail) {
- if (avail > budget)
- avail = budget;
-
- free_tx_desc(adap, &q->q, avail, true);
- q->q.in_use -= avail;
- budget -= avail;
- }
- __netif_tx_unlock(q->txq);
- }
-
- if (++i >= s->ethqsets)
- i = 0;
- } while (budget && i != s->ethtxq_rover);
- s->ethtxq_rover = i;
- mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
-}
-
-int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
- struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd)
-{
- int ret, flsz = 0;
- struct fw_iq_cmd c;
- struct port_info *pi = netdev_priv(dev);
-
- /* Size needs to be multiple of 16, including status entry. */
- iq->size = roundup(iq->size, 16);
-
- iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
- &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
- if (!iq->desc)
- return -ENOMEM;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
- FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
- FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
- FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
- -intr_idx - 1));
- c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
- FW_IQ_CMD_IQGTSMODE |
- FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
- FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
- c.iqsize = htons(iq->size);
- c.iqaddr = cpu_to_be64(iq->phys_addr);
-
- if (fl) {
- fl->size = roundup(fl->size, 8);
- fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
- sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
- if (!fl->desc)
- goto fl_nomem;
-
- flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
- FW_IQ_CMD_FL0FETCHRO(1) |
- FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN);
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
- FW_IQ_CMD_FL0FBMAX(3));
- c.fl0size = htons(flsz);
- c.fl0addr = cpu_to_be64(fl->addr);
- }
-
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
- if (ret)
- goto err;
-
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
- iq->cur_desc = iq->desc;
- iq->cidx = 0;
- iq->gen = 1;
- iq->next_intr_params = iq->intr_params;
- iq->cntxt_id = ntohs(c.iqid);
- iq->abs_id = ntohs(c.physiqid);
- iq->size--; /* subtract status entry */
- iq->adap = adap;
- iq->netdev = dev;
- iq->handler = hnd;
-
- /* set offset to -1 to distinguish ingress queues without FL */
- iq->offset = fl ? 0 : -1;
-
- adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
-
- if (fl) {
- fl->cntxt_id = ntohs(c.fl0id);
- fl->avail = fl->pend_cred = 0;
- fl->pidx = fl->cidx = 0;
- fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
- adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
- refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
- }
- return 0;
-
-fl_nomem:
- ret = -ENOMEM;
-err:
- if (iq->desc) {
- dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
- iq->desc, iq->phys_addr);
- iq->desc = NULL;
- }
- if (fl && fl->desc) {
- kfree(fl->sdesc);
- fl->sdesc = NULL;
- dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
- fl->desc, fl->addr);
- fl->desc = NULL;
- }
- return ret;
-}
-
-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
-{
- q->in_use = 0;
- q->cidx = q->pidx = 0;
- q->stops = q->restarts = 0;
- q->stat = (void *)&q->desc[q->size];
- q->cntxt_id = id;
- adap->sge.egr_map[id - adap->sge.egr_start] = q;
-}
-
-int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
- struct net_device *dev, struct netdev_queue *netdevq,
- unsigned int iqid)
-{
- int ret, nentries;
- struct fw_eq_eth_cmd c;
- struct port_info *pi = netdev_priv(dev);
-
- /* Add status entries */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
- sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
- netdev_queue_numa_node_read(netdevq));
- if (!txq->q.desc)
- return -ENOMEM;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
- FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
- FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO(1) |
- FW_EQ_ETH_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
- FW_EQ_ETH_CMD_FBMAX(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
- FW_EQ_ETH_CMD_EQSIZE(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
-
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
- if (ret) {
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
- dma_free_coherent(adap->pdev_dev,
- nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
- return ret;
- }
-
- init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
- txq->txq = netdevq;
- txq->tso = txq->tx_cso = txq->vlan_ins = 0;
- txq->mapping_err = 0;
- return 0;
-}
-
-int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
- struct net_device *dev, unsigned int iqid,
- unsigned int cmplqid)
-{
- int ret, nentries;
- struct fw_eq_ctrl_cmd c;
- struct port_info *pi = netdev_priv(dev);
-
- /* Add status entries */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
- sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0, NUMA_NO_NODE);
- if (!txq->q.desc)
- return -ENOMEM;
-
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_CTRL_CMD_PFN(adap->fn) |
- FW_EQ_CTRL_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
- FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
- c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
- FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO |
- FW_EQ_CTRL_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
- FW_EQ_CTRL_CMD_FBMAX(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
- FW_EQ_CTRL_CMD_EQSIZE(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
-
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
- if (ret) {
- dma_free_coherent(adap->pdev_dev,
- nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
- return ret;
- }
-
- init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
- txq->adap = adap;
- skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
- txq->full = 0;
- return 0;
-}
-
-int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
- struct net_device *dev, unsigned int iqid)
-{
- int ret, nentries;
- struct fw_eq_ofld_cmd c;
- struct port_info *pi = netdev_priv(dev);
-
- /* Add status entries */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
- sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
- NUMA_NO_NODE);
- if (!txq->q.desc)
- return -ENOMEM;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_OFLD_CMD_PFN(adap->fn) |
- FW_EQ_OFLD_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
- FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
- FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO(1) |
- FW_EQ_OFLD_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
- FW_EQ_OFLD_CMD_FBMAX(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
- FW_EQ_OFLD_CMD_EQSIZE(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
-
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
- if (ret) {
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
- dma_free_coherent(adap->pdev_dev,
- nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
- return ret;
- }
-
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
- txq->adap = adap;
- skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
- txq->full = 0;
- txq->mapping_err = 0;
- return 0;
-}
-
-static void free_txq(struct adapter *adap, struct sge_txq *q)
-{
- dma_free_coherent(adap->pdev_dev,
- q->size * sizeof(struct tx_desc) + STAT_LEN,
- q->desc, q->phys_addr);
- q->cntxt_id = 0;
- q->sdesc = NULL;
- q->desc = NULL;
-}
-
-static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
- struct sge_fl *fl)
-{
- unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
-
- adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
- t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
- rq->cntxt_id, fl_id, 0xffff);
- dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
- rq->desc, rq->phys_addr);
- netif_napi_del(&rq->napi);
- rq->netdev = NULL;
- rq->cntxt_id = rq->abs_id = 0;
- rq->desc = NULL;
-
- if (fl) {
- free_rx_bufs(adap, fl, fl->avail);
- dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
- fl->desc, fl->addr);
- kfree(fl->sdesc);
- fl->sdesc = NULL;
- fl->cntxt_id = 0;
- fl->desc = NULL;
- }
-}
-
-/**
- * t4_free_sge_resources - free SGE resources
- * @adap: the adapter
- *
- * Frees resources used by the SGE queue sets.
- */
-void t4_free_sge_resources(struct adapter *adap)
-{
- int i;
- struct sge_eth_rxq *eq = adap->sge.ethrxq;
- struct sge_eth_txq *etq = adap->sge.ethtxq;
- struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
-
- /* clean up Ethernet Tx/Rx queues */
- for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
- if (eq->rspq.desc)
- free_rspq_fl(adap, &eq->rspq, &eq->fl);
- if (etq->q.desc) {
- t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
- etq->q.cntxt_id);
- free_tx_desc(adap, &etq->q, etq->q.in_use, true);
- kfree(etq->q.sdesc);
- free_txq(adap, &etq->q);
- }
- }
-
- /* clean up RDMA and iSCSI Rx queues */
- for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
- if (oq->rspq.desc)
- free_rspq_fl(adap, &oq->rspq, &oq->fl);
- }
- for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
- if (oq->rspq.desc)
- free_rspq_fl(adap, &oq->rspq, &oq->fl);
- }
-
- /* clean up offload Tx queues */
- for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
- struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
-
- if (q->q.desc) {
- tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
- q->q.cntxt_id);
- free_tx_desc(adap, &q->q, q->q.in_use, false);
- kfree(q->q.sdesc);
- __skb_queue_purge(&q->sendq);
- free_txq(adap, &q->q);
- }
- }
-
- /* clean up control Tx queues */
- for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
- struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
-
- if (cq->q.desc) {
- tasklet_kill(&cq->qresume_tsk);
- t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
- cq->q.cntxt_id);
- __skb_queue_purge(&cq->sendq);
- free_txq(adap, &cq->q);
- }
- }
-
- if (adap->sge.fw_evtq.desc)
- free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
-
- if (adap->sge.intrq.desc)
- free_rspq_fl(adap, &adap->sge.intrq, NULL);
-
- /* clear the reverse egress queue map */
- memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
-}
-
-void t4_sge_start(struct adapter *adap)
-{
- adap->sge.ethtxq_rover = 0;
- mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
- mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
-}
-
-/**
- * t4_sge_stop - disable SGE operation
- * @adap: the adapter
- *
- * Stop tasklets and timers associated with the DMA engine. Note that
- * this is effective only if measures have been taken to disable any HW
- * events that may restart them.
- */
-void t4_sge_stop(struct adapter *adap)
-{
- int i;
- struct sge *s = &adap->sge;
-
- if (in_interrupt()) /* actions below require waiting */
- return;
-
- if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
- if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
-
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
- struct sge_ofld_txq *q = &s->ofldtxq[i];
-
- if (q->q.desc)
- tasklet_kill(&q->qresume_tsk);
- }
- for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
- struct sge_ctrl_txq *cq = &s->ctrlq[i];
-
- if (cq->q.desc)
- tasklet_kill(&cq->qresume_tsk);
- }
-}
-
-/**
- * t4_sge_init - initialize SGE
- * @adap: the adapter
- *
- * Performs SGE initialization needed every time after a chip reset.
- * We do not initialize any of the queues here, instead the driver
- * top-level must request them individually.
- */
-void t4_sge_init(struct adapter *adap)
-{
- unsigned int i, v;
- struct sge *s = &adap->sge;
- unsigned int fl_align_log = ilog2(FL_ALIGN);
-
- t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
- INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
- INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
- RXPKTCPLMODE |
- (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
-
- for (i = v = 0; i < 32; i += 4)
- v |= (PAGE_SHIFT - 10) << i;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
-#if FL_PG_ORDER > 0
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
-#endif
- t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
- THRESHOLD_0(s->counter_val[0]) |
- THRESHOLD_1(s->counter_val[1]) |
- THRESHOLD_2(s->counter_val[2]) |
- THRESHOLD_3(s->counter_val[3]));
- t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
- t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
- t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
- setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
- setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_state[0] = s->idma_state[1] = 0;
- spin_lock_init(&s->intrq_lock);
-}
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
deleted file mode 100644
index cffb328c46c..00000000000
--- a/drivers/net/cxgb4vf/sge.c
+++ /dev/null
@@ -1,2465 +0,0 @@
-/*
- * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
- * driver for Linux.
- *
- * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <net/ipv6.h>
-#include <net/tcp.h>
-#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-
-#include "t4vf_common.h"
-#include "t4vf_defs.h"
-
-#include "../cxgb4/t4_regs.h"
-#include "../cxgb4/t4fw_api.h"
-#include "../cxgb4/t4_msg.h"
-
-/*
- * Decoded Adapter Parameters.
- */
-static u32 FL_PG_ORDER; /* large page allocation size */
-static u32 STAT_LEN; /* length of status page at ring end */
-static u32 PKTSHIFT; /* padding between CPL and packet data */
-static u32 FL_ALIGN; /* response queue message alignment */
-
-/*
- * Constants ...
- */
-enum {
- /*
- * Egress Queue sizes, producer and consumer indices are all in units
- * of Egress Context Units bytes. Note that as far as the hardware is
- * concerned, the free list is an Egress Queue (the host produces free
- * buffers which the hardware consumes) and free list entries are
- * 64-bit PCI DMA addresses.
- */
- EQ_UNIT = SGE_EQ_IDXSIZE,
- FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
- TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
-
- /*
- * Max number of TX descriptors we clean up at a time. Should be
- * modest as freeing skbs isn't cheap and it happens while holding
- * locks. We just need to free packets faster than they arrive, we
- * eventually catch up and keep the amortized cost reasonable.
- */
- MAX_TX_RECLAIM = 16,
-
- /*
- * Max number of Rx buffers we replenish at a time. Again keep this
- * modest, allocating buffers isn't cheap either.
- */
- MAX_RX_REFILL = 16,
-
- /*
- * Period of the Rx queue check timer. This timer is infrequent as it
- * has something to do only when the system experiences severe memory
- * shortage.
- */
- RX_QCHECK_PERIOD = (HZ / 2),
-
- /*
- * Period of the TX queue check timer and the maximum number of TX
- * descriptors to be reclaimed by the TX timer.
- */
- TX_QCHECK_PERIOD = (HZ / 2),
- MAX_TIMER_TX_RECLAIM = 100,
-
- /*
- * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
- * timer will attempt to refill it.
- */
- FL_STARVE_THRES = 4,
-
- /*
- * Suspend an Ethernet TX queue with fewer available descriptors than
- * this. We always want to have room for a maximum sized packet:
- * inline immediate data + MAX_SKB_FRAGS. This is the same as
- * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
- * (see that function and its helpers for a description of the
- * calculation).
- */
- ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
- ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
- ((ETHTXQ_MAX_FRAGS-1) & 1) +
- 2),
- ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
- ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
-
- ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
-
- /*
- * Max TX descriptor space we allow for an Ethernet packet to be
- * inlined into a WR. This is limited by the maximum value which
- * we can specify for immediate data in the firmware Ethernet TX
- * Work Request.
- */
- MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
-
- /*
- * Max size of a WR sent through a control TX queue.
- */
- MAX_CTRL_WR_LEN = 256,
-
- /*
- * Maximum amount of data which we'll ever need to inline into a
- * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
- */
- MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
- ? MAX_IMM_TX_PKT_LEN
- : MAX_CTRL_WR_LEN),
-
- /*
- * For incoming packets less than RX_COPY_THRES, we copy the data into
- * an skb rather than referencing the data. We allocate enough
- * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
- * of the data (header).
- */
- RX_COPY_THRES = 256,
- RX_PULL_LEN = 128,
-
- /*
- * Main body length for sk_buffs used for RX Ethernet packets with
- * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
- * pskb_may_pull() some room.
- */
- RX_SKB_LEN = 512,
-};
-
-/*
- * Software state per TX descriptor.
- */
-struct tx_sw_desc {
- struct sk_buff *skb; /* socket buffer of TX data source */
- struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
-};
-
-/*
- * Software state per RX Free List descriptor. We keep track of the allocated
- * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
- * page size and its PCI DMA mapped state are stored in the low bits of the
- * PCI DMA address as per below.
- */
-struct rx_sw_desc {
- struct page *page; /* Free List page buffer */
- dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
- /* and flags (see below) */
-};
-
-/*
- * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
- * SGE also uses the low 4 bits to determine the size of the buffer. It uses
- * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
- * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
- * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
- * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
- * maintained in an inverse sense so the hardware never sees that bit high.
- */
-enum {
- RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
- RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
-};
-
-/**
- * get_buf_addr - return DMA buffer address of software descriptor
- * @sdesc: pointer to the software buffer descriptor
- *
- * Return the DMA buffer address of a software descriptor (stripping out
- * our low-order flag bits).
- */
-static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
-{
- return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
-}
-
-/**
- * is_buf_mapped - is buffer mapped for DMA?
- * @sdesc: pointer to the software buffer descriptor
- *
- * Determine whether the buffer associated with a software descriptor in
- * mapped for DMA or not.
- */
-static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
-{
- return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
-}
-
-/**
- * need_skb_unmap - does the platform need unmapping of sk_buffs?
- *
- * Returns true if the platform needs sk_buff unmapping. The compiler
- * optimizes away unnecessary code if this returns true.
- */
-static inline int need_skb_unmap(void)
-{
-#ifdef CONFIG_NEED_DMA_MAP_STATE
- return 1;
-#else
- return 0;
-#endif
-}
-
-/**
- * txq_avail - return the number of available slots in a TX queue
- * @tq: the TX queue
- *
- * Returns the number of available descriptors in a TX queue.
- */
-static inline unsigned int txq_avail(const struct sge_txq *tq)
-{
- return tq->size - 1 - tq->in_use;
-}
-
-/**
- * fl_cap - return the capacity of a Free List
- * @fl: the Free List
- *
- * Returns the capacity of a Free List. The capacity is less than the
- * size because an Egress Queue Index Unit worth of descriptors needs to
- * be left unpopulated, otherwise the Producer and Consumer indices PIDX
- * and CIDX will match and the hardware will think the FL is empty.
- */
-static inline unsigned int fl_cap(const struct sge_fl *fl)
-{
- return fl->size - FL_PER_EQ_UNIT;
-}
-
-/**
- * fl_starving - return whether a Free List is starving.
- * @fl: the Free List
- *
- * Tests specified Free List to see whether the number of buffers
- * available to the hardware has falled below our "starvation"
- * threshold.
- */
-static inline bool fl_starving(const struct sge_fl *fl)
-{
- return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
-}
-
-/**
- * map_skb - map an skb for DMA to the device
- * @dev: the egress net device
- * @skb: the packet to map
- * @addr: a pointer to the base of the DMA mapping array
- *
- * Map an skb for DMA to the device and return an array of DMA addresses.
- */
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
-{
- const skb_frag_t *fp, *end;
- const struct skb_shared_info *si;
-
- *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(dev, *addr))
- goto out_err;
-
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
- for (fp = si->frags; fp < end; fp++) {
- *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, *addr))
- goto unwind;
- }
- return 0;
-
-unwind:
- while (fp-- > si->frags)
- dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
- dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
-
-out_err:
- return -ENOMEM;
-}
-
-static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
- const struct ulptx_sgl *sgl, const struct sge_txq *tq)
-{
- const struct ulptx_sge_pair *p;
- unsigned int nfrags = skb_shinfo(skb)->nr_frags;
-
- if (likely(skb_headlen(skb)))
- dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
- be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
- else {
- dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
- be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
- nfrags--;
- }
-
- /*
- * the complexity below is because of the possibility of a wrap-around
- * in the middle of an SGL
- */
- for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
- if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
-unmap:
- dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
- be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
- p++;
- } else if ((u8 *)p == (u8 *)tq->stat) {
- p = (const struct ulptx_sge_pair *)tq->desc;
- goto unmap;
- } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
- const __be64 *addr = (const __be64 *)tq->desc;
-
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[1]),
- be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[2];
- } else {
- const __be64 *addr = (const __be64 *)tq->desc;
-
- dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[1];
- }
- }
- if (nfrags) {
- __be64 addr;
-
- if ((u8 *)p == (u8 *)tq->stat)
- p = (const struct ulptx_sge_pair *)tq->desc;
- addr = ((u8 *)p + 16 <= (u8 *)tq->stat
- ? p->addr[0]
- : *(const __be64 *)tq->desc);
- dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
- DMA_TO_DEVICE);
- }
-}
-
-/**
- * free_tx_desc - reclaims TX descriptors and their buffers
- * @adapter: the adapter
- * @tq: the TX queue to reclaim descriptors from
- * @n: the number of descriptors to reclaim
- * @unmap: whether the buffers should be unmapped for DMA
- *
- * Reclaims TX descriptors from an SGE TX queue and frees the associated
- * TX buffers. Called with the TX queue lock held.
- */
-static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
- unsigned int n, bool unmap)
-{
- struct tx_sw_desc *sdesc;
- unsigned int cidx = tq->cidx;
- struct device *dev = adapter->pdev_dev;
-
- const int need_unmap = need_skb_unmap() && unmap;
-
- sdesc = &tq->sdesc[cidx];
- while (n--) {
- /*
- * If we kept a reference to the original TX skb, we need to
- * unmap it from PCI DMA space (if required) and free it.
- */
- if (sdesc->skb) {
- if (need_unmap)
- unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
- kfree_skb(sdesc->skb);
- sdesc->skb = NULL;
- }
-
- sdesc++;
- if (++cidx == tq->size) {
- cidx = 0;
- sdesc = tq->sdesc;
- }
- }
- tq->cidx = cidx;
-}
-
-/*
- * Return the number of reclaimable descriptors in a TX queue.
- */
-static inline int reclaimable(const struct sge_txq *tq)
-{
- int hw_cidx = be16_to_cpu(tq->stat->cidx);
- int reclaimable = hw_cidx - tq->cidx;
- if (reclaimable < 0)
- reclaimable += tq->size;
- return reclaimable;
-}
-
-/**
- * reclaim_completed_tx - reclaims completed TX descriptors
- * @adapter: the adapter
- * @tq: the TX queue to reclaim completed descriptors from
- * @unmap: whether the buffers should be unmapped for DMA
- *
- * Reclaims TX descriptors that the SGE has indicated it has processed,
- * and frees the associated buffers if possible. Called with the TX
- * queue locked.
- */
-static inline void reclaim_completed_tx(struct adapter *adapter,
- struct sge_txq *tq,
- bool unmap)
-{
- int avail = reclaimable(tq);
-
- if (avail) {
- /*
- * Limit the amount of clean up work we do at a time to keep
- * the TX lock hold time O(1).
- */
- if (avail > MAX_TX_RECLAIM)
- avail = MAX_TX_RECLAIM;
-
- free_tx_desc(adapter, tq, avail, unmap);
- tq->in_use -= avail;
- }
-}
-
-/**
- * get_buf_size - return the size of an RX Free List buffer.
- * @sdesc: pointer to the software buffer descriptor
- */
-static inline int get_buf_size(const struct rx_sw_desc *sdesc)
-{
- return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
- ? (PAGE_SIZE << FL_PG_ORDER)
- : PAGE_SIZE;
-}
-
-/**
- * free_rx_bufs - free RX buffers on an SGE Free List
- * @adapter: the adapter
- * @fl: the SGE Free List to free buffers from
- * @n: how many buffers to free
- *
- * Release the next @n buffers on an SGE Free List RX queue. The
- * buffers must be made inaccessible to hardware before calling this
- * function.
- */
-static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
-{
- while (n--) {
- struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
-
- if (is_buf_mapped(sdesc))
- dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
- put_page(sdesc->page);
- sdesc->page = NULL;
- if (++fl->cidx == fl->size)
- fl->cidx = 0;
- fl->avail--;
- }
-}
-
-/**
- * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
- * @adapter: the adapter
- * @fl: the SGE Free List
- *
- * Unmap the current buffer on an SGE Free List RX queue. The
- * buffer must be made inaccessible to HW before calling this function.
- *
- * This is similar to @free_rx_bufs above but does not free the buffer.
- * Do note that the FL still loses any further access to the buffer.
- * This is used predominantly to "transfer ownership" of an FL buffer
- * to another entity (typically an skb's fragment list).
- */
-static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
-{
- struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
-
- if (is_buf_mapped(sdesc))
- dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
- sdesc->page = NULL;
- if (++fl->cidx == fl->size)
- fl->cidx = 0;
- fl->avail--;
-}
-
-/**
- * ring_fl_db - righ doorbell on free list
- * @adapter: the adapter
- * @fl: the Free List whose doorbell should be rung ...
- *
- * Tell the Scatter Gather Engine that there are new free list entries
- * available.
- */
-static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
-{
- /*
- * The SGE keeps track of its Producer and Consumer Indices in terms
- * of Egress Queue Units so we can only tell it about integral numbers
- * of multiples of Free List Entries per Egress Queue Units ...
- */
- if (fl->pend_cred >= FL_PER_EQ_UNIT) {
- wmb();
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO |
- QID(fl->cntxt_id) |
- PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
- fl->pend_cred %= FL_PER_EQ_UNIT;
- }
-}
-
-/**
- * set_rx_sw_desc - initialize software RX buffer descriptor
- * @sdesc: pointer to the softwore RX buffer descriptor
- * @page: pointer to the page data structure backing the RX buffer
- * @dma_addr: PCI DMA address (possibly with low-bit flags)
- */
-static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
- dma_addr_t dma_addr)
-{
- sdesc->page = page;
- sdesc->dma_addr = dma_addr;
-}
-
-/*
- * Support for poisoning RX buffers ...
- */
-#define POISON_BUF_VAL -1
-
-static inline void poison_buf(struct page *page, size_t sz)
-{
-#if POISON_BUF_VAL >= 0
- memset(page_address(page), POISON_BUF_VAL, sz);
-#endif
-}
-
-/**
- * refill_fl - refill an SGE RX buffer ring
- * @adapter: the adapter
- * @fl: the Free List ring to refill
- * @n: the number of new buffers to allocate
- * @gfp: the gfp flags for the allocations
- *
- * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
- * allocated with the supplied gfp flags. The caller must assure that
- * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
- * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
- * of buffers allocated. If afterwards the queue is found critically low,
- * mark it as starving in the bitmap of starving FLs.
- */
-static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
- int n, gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma_addr;
- unsigned int cred = fl->avail;
- __be64 *d = &fl->desc[fl->pidx];
- struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
-
- /*
- * Sanity: ensure that the result of adding n Free List buffers
- * won't result in wrapping the SGE's Producer Index around to
- * it's Consumer Index thereby indicating an empty Free List ...
- */
- BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
-
- /*
- * If we support large pages, prefer large buffers and fail over to
- * small pages if we can't allocate large pages to satisfy the refill.
- * If we don't support large pages, drop directly into the small page
- * allocation code.
- */
- if (FL_PG_ORDER == 0)
- goto alloc_small_pages;
-
- while (n) {
- page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- FL_PG_ORDER);
- if (unlikely(!page)) {
- /*
- * We've failed inour attempt to allocate a "large
- * page". Fail over to the "small page" allocation
- * below.
- */
- fl->large_alloc_failed++;
- break;
- }
- poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
-
- dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
- PAGE_SIZE << FL_PG_ORDER,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
- /*
- * We've run out of DMA mapping space. Free up the
- * buffer and return with what we've managed to put
- * into the free list. We don't want to fail over to
- * the small page allocation below in this case
- * because DMA mapping resources are typically
- * critical resources once they become scarse.
- */
- __free_pages(page, FL_PG_ORDER);
- goto out;
- }
- dma_addr |= RX_LARGE_BUF;
- *d++ = cpu_to_be64(dma_addr);
-
- set_rx_sw_desc(sdesc, page, dma_addr);
- sdesc++;
-
- fl->avail++;
- if (++fl->pidx == fl->size) {
- fl->pidx = 0;
- sdesc = fl->sdesc;
- d = fl->desc;
- }
- n--;
- }
-
-alloc_small_pages:
- while (n--) {
- page = __netdev_alloc_page(adapter->port[0],
- gfp | __GFP_NOWARN);
- if (unlikely(!page)) {
- fl->alloc_failed++;
- break;
- }
- poison_buf(page, PAGE_SIZE);
-
- dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
- netdev_free_page(adapter->port[0], page);
- break;
- }
- *d++ = cpu_to_be64(dma_addr);
-
- set_rx_sw_desc(sdesc, page, dma_addr);
- sdesc++;
-
- fl->avail++;
- if (++fl->pidx == fl->size) {
- fl->pidx = 0;
- sdesc = fl->sdesc;
- d = fl->desc;
- }
- }
-
-out:
- /*
- * Update our accounting state to incorporate the new Free List
- * buffers, tell the hardware about them and return the number of
- * bufers which we were able to allocate.
- */
- cred = fl->avail - cred;
- fl->pend_cred += cred;
- ring_fl_db(adapter, fl);
-
- if (unlikely(fl_starving(fl))) {
- smp_wmb();
- set_bit(fl->cntxt_id, adapter->sge.starving_fl);
- }
-
- return cred;
-}
-
-/*
- * Refill a Free List to its capacity or the Maximum Refill Increment,
- * whichever is smaller ...
- */
-static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
-{
- refill_fl(adapter, fl,
- min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
- GFP_ATOMIC);
-}
-
-/**
- * alloc_ring - allocate resources for an SGE descriptor ring
- * @dev: the PCI device's core device
- * @nelem: the number of descriptors
- * @hwsize: the size of each hardware descriptor
- * @swsize: the size of each software descriptor
- * @busaddrp: the physical PCI bus address of the allocated ring
- * @swringp: return address pointer for software ring
- * @stat_size: extra space in hardware ring for status information
- *
- * Allocates resources for an SGE descriptor ring, such as TX queues,
- * free buffer lists, response queues, etc. Each SGE ring requires
- * space for its hardware descriptors plus, optionally, space for software
- * state associated with each hardware entry (the metadata). The function
- * returns three values: the virtual address for the hardware ring (the
- * return value of the function), the PCI bus address of the hardware
- * ring (in *busaddrp), and the address of the software ring (in swringp).
- * Both the hardware and software rings are returned zeroed out.
- */
-static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
- size_t swsize, dma_addr_t *busaddrp, void *swringp,
- size_t stat_size)
-{
- /*
- * Allocate the hardware ring and PCI DMA bus address space for said.
- */
- size_t hwlen = nelem * hwsize + stat_size;
- void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
-
- if (!hwring)
- return NULL;
-
- /*
- * If the caller wants a software ring, allocate it and return a
- * pointer to it in *swringp.
- */
- BUG_ON((swsize != 0) != (swringp != NULL));
- if (swsize) {
- void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
-
- if (!swring) {
- dma_free_coherent(dev, hwlen, hwring, *busaddrp);
- return NULL;
- }
- *(void **)swringp = swring;
- }
-
- /*
- * Zero out the hardware ring and return its address as our function
- * value.
- */
- memset(hwring, 0, hwlen);
- return hwring;
-}
-
-/**
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- *
- * Calculates the number of flits (8-byte units) needed for a Direct
- * Scatter/Gather List that can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- /*
- * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
- * addresses. The DSGL Work Request starts off with a 32-bit DSGL
- * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
- * repeated sequences of { Length[i], Length[i+1], Address[i],
- * Address[i+1] } (this ensures that all addresses are on 64-bit
- * boundaries). If N is even, then Length[N+1] should be set to 0 and
- * Address[N+1] is omitted.
- *
- * The following calculation incorporates all of the above. It's
- * somewhat hard to follow but, briefly: the "+2" accounts for the
- * first two flits which include the DSGL header, Length0 and
- * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
- * flits for every pair of the remaining N) +1 if (n-1) is odd; and
- * finally the "+((n-1)&1)" adds the one remaining flit needed if
- * (n-1) is odd ...
- */
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
-/**
- * flits_to_desc - returns the num of TX descriptors for the given flits
- * @flits: the number of flits
- *
- * Returns the number of TX descriptors needed for the supplied number
- * of flits.
- */
-static inline unsigned int flits_to_desc(unsigned int flits)
-{
- BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
- return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
-}
-
-/**
- * is_eth_imm - can an Ethernet packet be sent as immediate data?
- * @skb: the packet
- *
- * Returns whether an Ethernet packet is small enough to fit completely as
- * immediate data.
- */
-static inline int is_eth_imm(const struct sk_buff *skb)
-{
- /*
- * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
- * which does not accommodate immediate data. We could dike out all
- * of the support code for immediate data but that would tie our hands
- * too much if we ever want to enhace the firmware. It would also
- * create more differences between the PF and VF Drivers.
- */
- return false;
-}
-
-/**
- * calc_tx_flits - calculate the number of flits for a packet TX WR
- * @skb: the packet
- *
- * Returns the number of flits needed for a TX Work Request for the
- * given Ethernet packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
-{
- unsigned int flits;
-
- /*
- * If the skb is small enough, we can pump it out as a work request
- * with only immediate data. In that case we just have to have the
- * TX Packet header plus the skb data in the Work Request.
- */
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
- sizeof(__be64));
-
- /*
- * Otherwise, we're going to have to construct a Scatter gather list
- * of the skb body and fragments. We also include the flits necessary
- * for the TX Packet Work Request and CPL. We always have a firmware
- * Write Header (incorporated as part of the cpl_tx_pkt_lso and
- * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
- * message or, if we're doing a Large Send Offload, an LSO CPL message
- * with an embeded TX Packet Write CPL message.
- */
- flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
- flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- return flits;
-}
-
-/**
- * write_sgl - populate a Scatter/Gather List for a packet
- * @skb: the packet
- * @tq: the TX queue we are writing into
- * @sgl: starting location for writing the SGL
- * @end: points right after the end of the SGL
- * @start: start offset into skb main-body data to include in the SGL
- * @addr: the list of DMA bus addresses for the SGL elements
- *
- * Generates a Scatter/Gather List for the buffers that make up a packet.
- * The caller must provide adequate space for the SGL that will be written.
- * The SGL includes all of the packet's page fragments and the data in its
- * main body except for the first @start bytes. @pos must be 16-byte
- * aligned and within a TX descriptor with available space. @end points
- * write after the end of the SGL but does not account for any potential
- * wrap around, i.e., @end > @tq->stat.
- */
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
-{
- unsigned int i, len;
- struct ulptx_sge_pair *to;
- const struct skb_shared_info *si = skb_shinfo(skb);
- unsigned int nfrags = si->nr_frags;
- struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
-
- len = skb_headlen(skb) - start;
- if (likely(len)) {
- sgl->len0 = htonl(len);
- sgl->addr0 = cpu_to_be64(addr[0] + start);
- nfrags++;
- } else {
- sgl->len0 = htonl(si->frags[0].size);
- sgl->addr0 = cpu_to_be64(addr[1]);
- }
-
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
- ULPTX_NSGE(nfrags));
- if (likely(--nfrags == 0))
- return;
- /*
- * Most of the complexity below deals with the possibility we hit the
- * end of the queue in the middle of writing the SGL. For this case
- * only we create the SGL in a temporary buffer and then copy it.
- */
- to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
-
- for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(si->frags[++i].size);
- to->addr[0] = cpu_to_be64(addr[i]);
- to->addr[1] = cpu_to_be64(addr[++i]);
- }
- if (nfrags) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(0);
- to->addr[0] = cpu_to_be64(addr[i + 1]);
- }
- if (unlikely((u8 *)end > (u8 *)tq->stat)) {
- unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
-
- if (likely(part0))
- memcpy(sgl->sge, buf, part0);
- part1 = (u8 *)end - (u8 *)tq->stat;
- memcpy(tq->desc, (u8 *)buf + part0, part1);
- end = (void *)tq->desc + part1;
- }
- if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
-}
-
-/**
- * check_ring_tx_db - check and potentially ring a TX queue's doorbell
- * @adapter: the adapter
- * @tq: the TX queue
- * @n: number of new descriptors to give to HW
- *
- * Ring the doorbel for a TX queue.
- */
-static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
- int n)
-{
- /*
- * Warn if we write doorbells with the wrong priority and write
- * descriptors before telling HW.
- */
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
- wmb();
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(tq->cntxt_id) | PIDX(n));
-}
-
-/**
- * inline_tx_skb - inline a packet's data into TX descriptors
- * @skb: the packet
- * @tq: the TX queue where the packet will be inlined
- * @pos: starting position in the TX queue to inline the packet
- *
- * Inline a packet's contents directly into TX descriptors, starting at
- * the given position within the TX DMA ring.
- * Most of the complexity of this operation is dealing with wrap arounds
- * in the middle of the packet we want to inline.
- */
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
- void *pos)
-{
- u64 *p;
- int left = (void *)tq->stat - pos;
-
- if (likely(skb->len <= left)) {
- if (likely(!skb->data_len))
- skb_copy_from_linear_data(skb, pos, skb->len);
- else
- skb_copy_bits(skb, 0, pos, skb->len);
- pos += skb->len;
- } else {
- skb_copy_bits(skb, 0, pos, left);
- skb_copy_bits(skb, left, tq->desc, skb->len - left);
- pos = (void *)tq->desc + (skb->len - left);
- }
-
- /* 0-pad to multiple of 16 */
- p = PTR_ALIGN(pos, 8);
- if ((uintptr_t)p & 8)
- *p = 0;
-}
-
-/*
- * Figure out what HW csum a packet wants and return the appropriate control
- * bits.
- */
-static u64 hwcsum(const struct sk_buff *skb)
-{
- int csum_type;
- const struct iphdr *iph = ip_hdr(skb);
-
- if (iph->version == 4) {
- if (iph->protocol == IPPROTO_TCP)
- csum_type = TX_CSUM_TCPIP;
- else if (iph->protocol == IPPROTO_UDP)
- csum_type = TX_CSUM_UDPIP;
- else {
-nocsum:
- /*
- * unknown protocol, disable HW csum
- * and hope a bad packet is detected
- */
- return TXPKT_L4CSUM_DIS;
- }
- } else {
- /*
- * this doesn't work with extension headers
- */
- const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
-
- if (ip6h->nexthdr == IPPROTO_TCP)
- csum_type = TX_CSUM_TCPIP6;
- else if (ip6h->nexthdr == IPPROTO_UDP)
- csum_type = TX_CSUM_UDPIP6;
- else
- goto nocsum;
- }
-
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
- int start = skb_transport_offset(skb);
-
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
- }
-}
-
-/*
- * Stop an Ethernet TX queue and record that state change.
- */
-static void txq_stop(struct sge_eth_txq *txq)
-{
- netif_tx_stop_queue(txq->txq);
- txq->q.stops++;
-}
-
-/*
- * Advance our software state for a TX queue by adding n in use descriptors.
- */
-static inline void txq_advance(struct sge_txq *tq, unsigned int n)
-{
- tq->in_use += n;
- tq->pidx += n;
- if (tq->pidx >= tq->size)
- tq->pidx -= tq->size;
-}
-
-/**
- * t4vf_eth_xmit - add a packet to an Ethernet TX queue
- * @skb: the packet
- * @dev: the egress net device
- *
- * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
- */
-int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- u32 wr_mid;
- u64 cntrl, *end;
- int qidx, credits;
- unsigned int flits, ndesc;
- struct adapter *adapter;
- struct sge_eth_txq *txq;
- const struct port_info *pi;
- struct fw_eth_tx_pkt_vm_wr *wr;
- struct cpl_tx_pkt_core *cpl;
- const struct skb_shared_info *ssi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
- const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci));
-
- /*
- * The chip minimum packet length is 10 octets but the firmware
- * command that we are using requires that we copy the Ethernet header
- * (including the VLAN tag) into the header so we reject anything
- * smaller than that ...
- */
- if (unlikely(skb->len < fw_hdr_copy_len))
- goto out_free;
-
- /*
- * Figure out which TX Queue we're going to use.
- */
- pi = netdev_priv(dev);
- adapter = pi->adapter;
- qidx = skb_get_queue_mapping(skb);
- BUG_ON(qidx >= pi->nqsets);
- txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
-
- /*
- * Take this opportunity to reclaim any TX Descriptors whose DMA
- * transfers have completed.
- */
- reclaim_completed_tx(adapter, &txq->q, true);
-
- /*
- * Calculate the number of flits and TX Descriptors we're going to
- * need along with how many TX Descriptors will be left over after
- * we inject our Work Request.
- */
- flits = calc_tx_flits(skb);
- ndesc = flits_to_desc(flits);
- credits = txq_avail(&txq->q) - ndesc;
-
- if (unlikely(credits < 0)) {
- /*
- * Not enough room for this packet's Work Request. Stop the
- * TX Queue and return a "busy" condition. The queue will get
- * started later on when the firmware informs us that space
- * has opened up.
- */
- txq_stop(txq);
- dev_err(adapter->pdev_dev,
- "%s: TX ring %u full while queue awake!\n",
- dev->name, qidx);
- return NETDEV_TX_BUSY;
- }
-
- if (!is_eth_imm(skb) &&
- unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
- /*
- * We need to map the skb into PCI DMA space (because it can't
- * be in-lined directly into the Work Request) and the mapping
- * operation failed. Record the error and drop the packet.
- */
- txq->mapping_err++;
- goto out_free;
- }
-
- wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
- if (unlikely(credits < ETHTXQ_STOP_THRES)) {
- /*
- * After we're done injecting the Work Request for this
- * packet, we'll be below our "stop threshold" so stop the TX
- * Queue now and schedule a request for an SGE Egress Queue
- * Update message. The queue will get started later on when
- * the firmware processes this Work Request and sends us an
- * Egress Queue Status Update message indicating that space
- * has opened up.
- */
- txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
- }
-
- /*
- * Start filling in our Work Request. Note that we do _not_ handle
- * the WR Header wrapping around the TX Descriptor Ring. If our
- * maximum header size ever exceeds one TX Descriptor, we'll need to
- * do something else here.
- */
- BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
- wr = (void *)&txq->q.desc[txq->q.pidx];
- wr->equiq_to_len16 = cpu_to_be32(wr_mid);
- wr->r3[0] = cpu_to_be64(0);
- wr->r3[1] = cpu_to_be64(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
- end = (u64 *)wr + flits;
-
- /*
- * If this is a Large Send Offload packet we'll put in an LSO CPL
- * message with an encapsulated TX Packet CPL message. Otherwise we
- * just use a TX Packet CPL message.
- */
- ssi = skb_shinfo(skb);
- if (ssi->gso_size) {
- struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
- bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
- int l3hdr_len = skb_network_header_len(skb);
- int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
-
- wr->op_immdlen =
- cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
- FW_WR_IMMDLEN(sizeof(*lso) +
- sizeof(*cpl)));
- /*
- * Fill in the LSO CPL message.
- */
- lso->lso_ctrl =
- cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE |
- LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len/4) |
- LSO_IPHDR_LEN(l3hdr_len/4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
- lso->ipid_ofst = cpu_to_be16(0);
- lso->mss = cpu_to_be16(ssi->gso_size);
- lso->seqno_offset = cpu_to_be32(0);
- lso->len = cpu_to_be32(skb->len);
-
- /*
- * Set up TX Packet CPL pointer, control word and perform
- * accounting.
- */
- cpl = (void *)(lso + 1);
- cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len));
- txq->tso++;
- txq->tx_cso += ssi->gso_segs;
- } else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
- wr->op_immdlen =
- cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
- FW_WR_IMMDLEN(len));
-
- /*
- * Set up TX Packet CPL pointer, control word and perform
- * accounting.
- */
- cpl = (void *)(wr + 1);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
- txq->tx_cso++;
- } else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
- }
-
- /*
- * If there's a VLAN tag present, add that to the list of things to
- * do in this Work Request.
- */
- if (vlan_tx_tag_present(skb)) {
- txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
- }
-
- /*
- * Fill in the TX Packet CPL message header.
- */
- cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->port_id) |
- TXPKT_PF(0));
- cpl->pack = cpu_to_be16(0);
- cpl->len = cpu_to_be16(skb->len);
- cpl->ctrl1 = cpu_to_be64(cntrl);
-
-#ifdef T4_TRACE
- T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
- "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
- ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
-#endif
-
- /*
- * Fill in the body of the TX Packet CPL message with either in-lined
- * data or a Scatter/Gather List.
- */
- if (is_eth_imm(skb)) {
- /*
- * In-line the packet's data and free the skb since we don't
- * need it any longer.
- */
- inline_tx_skb(skb, &txq->q, cpl + 1);
- dev_kfree_skb(skb);
- } else {
- /*
- * Write the skb's Scatter/Gather list into the TX Packet CPL
- * message and retain a pointer to the skb so we can free it
- * later when its DMA completes. (We store the skb pointer
- * in the Software Descriptor corresponding to the last TX
- * Descriptor used by the Work Request.)
- *
- * The retained skb will be freed when the corresponding TX
- * Descriptors are reclaimed after their DMAs complete.
- * However, this could take quite a while since, in general,
- * the hardware is set up to be lazy about sending DMA
- * completion notifications to us and we mostly perform TX
- * reclaims in the transmit routine.
- *
- * This is good for performamce but means that we rely on new
- * TX packets arriving to run the destructors of completed
- * packets, which open up space in their sockets' send queues.
- * Sometimes we do not get such new packets causing TX to
- * stall. A single UDP transmitter is a good example of this
- * situation. We have a clean up timer that periodically
- * reclaims completed packets but it doesn't run often enough
- * (nor do we want it to) to prevent lengthy stalls. A
- * solution to this problem is to run the destructor early,
- * after the packet is queued but before it's DMAd. A con is
- * that we lie to socket memory accounting, but the amount of
- * extra memory is reasonable (limited by the number of TX
- * descriptors), the packets do actually get freed quickly by
- * new packets almost always, and for protocols like TCP that
- * wait for acks to really free up the data the extra memory
- * is even less. On the positive side we run the destructors
- * on the sending CPU rather than on a potentially different
- * completing CPU, usually a good thing.
- *
- * Run the destructor before telling the DMA engine about the
- * packet to make sure it doesn't complete and get freed
- * prematurely.
- */
- struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
- struct sge_txq *tq = &txq->q;
- int last_desc;
-
- /*
- * If the Work Request header was an exact multiple of our TX
- * Descriptor length, then it's possible that the starting SGL
- * pointer lines up exactly with the end of our TX Descriptor
- * ring. If that's the case, wrap around to the beginning
- * here ...
- */
- if (unlikely((void *)sgl == (void *)tq->stat)) {
- sgl = (void *)tq->desc;
- end = (void *)((void *)tq->desc +
- ((void *)end - (void *)tq->stat));
- }
-
- write_sgl(skb, tq, sgl, end, 0, addr);
- skb_orphan(skb);
-
- last_desc = tq->pidx + ndesc - 1;
- if (last_desc >= tq->size)
- last_desc -= tq->size;
- tq->sdesc[last_desc].skb = skb;
- tq->sdesc[last_desc].sgl = sgl;
- }
-
- /*
- * Advance our internal TX Queue state, tell the hardware about
- * the new TX descriptors and return success.
- */
- txq_advance(&txq->q, ndesc);
- dev->trans_start = jiffies;
- ring_tx_db(adapter, &txq->q, ndesc);
- return NETDEV_TX_OK;
-
-out_free:
- /*
- * An error of some sort happened. Free the TX skb and tell the
- * OS that we've "dealt" with the packet ...
- */
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/**
- * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
- * @gl: the gather list
- * @skb_len: size of sk_buff main body if it carries fragments
- * @pull_len: amount of data to move to the sk_buff's main body
- *
- * Builds an sk_buff from the given packet gather list. Returns the
- * sk_buff or %NULL if sk_buff allocation failed.
- */
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len, unsigned int pull_len)
-{
- struct sk_buff *skb;
- struct skb_shared_info *ssi;
-
- /*
- * If the ingress packet is small enough, allocate an skb large enough
- * for all of the data and copy it inline. Otherwise, allocate an skb
- * with enough room to pull in the header and reference the rest of
- * the data via the skb fragment list.
- *
- * Below we rely on RX_COPY_THRES being less than the smallest Rx
- * buff! size, which is expected since buffers are at least
- * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
- * fragment.
- */
- if (gl->tot_len <= RX_COPY_THRES) {
- /* small packets have only one fragment */
- skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, gl->tot_len);
- skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
- } else {
- skb = alloc_skb(skb_len, GFP_ATOMIC);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, pull_len);
- skb_copy_to_linear_data(skb, gl->va, pull_len);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
- ssi->frags[0].size = gl->frags[0].size - pull_len;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
-
- skb->len = gl->tot_len;
- skb->data_len = skb->len - pull_len;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
- }
-
-out:
- return skb;
-}
-
-/**
- * t4vf_pktgl_free - free a packet gather list
- * @gl: the gather list
- *
- * Releases the pages of a packet gather list. We do not own the last
- * page on the list and do not free it.
- */
-void t4vf_pktgl_free(const struct pkt_gl *gl)
-{
- int frag;
-
- frag = gl->nfrags - 1;
- while (frag--)
- put_page(gl->frags[frag].page);
-}
-
-/**
- * copy_frags - copy fragments from gather list into skb_shared_info
- * @si: destination skb shared info structure
- * @gl: source internal packet gather list
- * @offset: packet start offset in first page
- *
- * Copy an internal packet gather list into a Linux skb_shared_info
- * structure.
- */
-static inline void copy_frags(struct skb_shared_info *si,
- const struct pkt_gl *gl,
- unsigned int offset)
-{
- unsigned int n;
-
- /* usually there's just one frag */
- si->frags[0].page = gl->frags[0].page;
- si->frags[0].page_offset = gl->frags[0].page_offset + offset;
- si->frags[0].size = gl->frags[0].size - offset;
- si->nr_frags = gl->nfrags;
-
- n = gl->nfrags - 1;
- if (n)
- memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
-
- /* get a reference to the last page, we don't own it */
- get_page(gl->frags[n].page);
-}
-
-/**
- * do_gro - perform Generic Receive Offload ingress packet processing
- * @rxq: ingress RX Ethernet Queue
- * @gl: gather list for ingress packet
- * @pkt: CPL header for last packet fragment
- *
- * Perform Generic Receive Offload (GRO) ingress packet processing.
- * We use the standard Linux GRO interfaces for this.
- */
-static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
- const struct cpl_rx_pkt *pkt)
-{
- int ret;
- struct sk_buff *skb;
-
- skb = napi_get_frags(&rxq->rspq.napi);
- if (unlikely(!skb)) {
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return;
- }
-
- copy_frags(skb_shinfo(skb), gl, PKTSHIFT);
- skb->len = gl->tot_len - PKTSHIFT;
- skb->data_len = skb->len;
- skb->truesize += skb->data_len;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, rxq->rspq.idx);
-
- if (pkt->vlan_ex)
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
- ret = napi_gro_frags(&rxq->rspq.napi);
-
- if (ret == GRO_HELD)
- rxq->stats.lro_pkts++;
- else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
- rxq->stats.lro_merged++;
- rxq->stats.pkts++;
- rxq->stats.rx_cso++;
-}
-
-/**
- * t4vf_ethrx_handler - process an ingress ethernet packet
- * @rspq: the response queue that received the packet
- * @rsp: the response queue descriptor holding the RX_PKT message
- * @gl: the gather list of packet fragments
- *
- * Process an ingress ethernet packet and deliver it to the stack.
- */
-int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
- const struct pkt_gl *gl)
-{
- struct sk_buff *skb;
- const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
- struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
-
- /*
- * If this is a good TCP packet and we have Generic Receive Offload
- * enabled, handle the packet in the GRO path.
- */
- if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
- (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
- !pkt->ip_frag) {
- do_gro(rxq, gl, pkt);
- return 0;
- }
-
- /*
- * Convert the Packet Gather List into an skb.
- */
- skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
- if (unlikely(!skb)) {
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return 0;
- }
- __skb_pull(skb, PKTSHIFT);
- skb->protocol = eth_type_trans(skb, rspq->netdev);
- skb_record_rx_queue(skb, rspq->idx);
- rxq->stats.pkts++;
-
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
- if (!pkt->ip_frag)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else {
- __sum16 c = (__force __sum16)pkt->csum;
- skb->csum = csum_unfold(c);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- rxq->stats.rx_cso++;
- } else
- skb_checksum_none_assert(skb);
-
- if (pkt->vlan_ex) {
- rxq->stats.vlan_ex++;
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
- }
-
- netif_receive_skb(skb);
-
- return 0;
-}
-
-/**
- * is_new_response - check if a response is newly written
- * @rc: the response control descriptor
- * @rspq: the response queue
- *
- * Returns true if a response descriptor contains a yet unprocessed
- * response.
- */
-static inline bool is_new_response(const struct rsp_ctrl *rc,
- const struct sge_rspq *rspq)
-{
- return RSPD_GEN(rc->type_gen) == rspq->gen;
-}
-
-/**
- * restore_rx_bufs - put back a packet's RX buffers
- * @gl: the packet gather list
- * @fl: the SGE Free List
- * @nfrags: how many fragments in @si
- *
- * Called when we find out that the current packet, @si, can't be
- * processed right away for some reason. This is a very rare event and
- * there's no effort to make this suspension/resumption process
- * particularly efficient.
- *
- * We implement the suspension by putting all of the RX buffers associated
- * with the current packet back on the original Free List. The buffers
- * have already been unmapped and are left unmapped, we mark them as
- * unmapped in order to prevent further unmapping attempts. (Effectively
- * this function undoes the series of @unmap_rx_buf calls which were done
- * to create the current packet's gather list.) This leaves us ready to
- * restart processing of the packet the next time we start processing the
- * RX Queue ...
- */
-static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
- int frags)
-{
- struct rx_sw_desc *sdesc;
-
- while (frags--) {
- if (fl->cidx == 0)
- fl->cidx = fl->size - 1;
- else
- fl->cidx--;
- sdesc = &fl->sdesc[fl->cidx];
- sdesc->page = gl->frags[frags].page;
- sdesc->dma_addr |= RX_UNMAPPED_BUF;
- fl->avail++;
- }
-}
-
-/**
- * rspq_next - advance to the next entry in a response queue
- * @rspq: the queue
- *
- * Updates the state of a response queue to advance it to the next entry.
- */
-static inline void rspq_next(struct sge_rspq *rspq)
-{
- rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
- if (unlikely(++rspq->cidx == rspq->size)) {
- rspq->cidx = 0;
- rspq->gen ^= 1;
- rspq->cur_desc = rspq->desc;
- }
-}
-
-/**
- * process_responses - process responses from an SGE response queue
- * @rspq: the ingress response queue to process
- * @budget: how many responses can be processed in this round
- *
- * Process responses from a Scatter Gather Engine response queue up to
- * the supplied budget. Responses include received packets as well as
- * control messages from firmware or hardware.
- *
- * Additionally choose the interrupt holdoff time for the next interrupt
- * on this queue. If the system is under memory shortage use a fairly
- * long delay to help recovery.
- */
-int process_responses(struct sge_rspq *rspq, int budget)
-{
- struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
- int budget_left = budget;
-
- while (likely(budget_left)) {
- int ret, rsp_type;
- const struct rsp_ctrl *rc;
-
- rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
- if (!is_new_response(rc, rspq))
- break;
-
- /*
- * Figure out what kind of response we've received from the
- * SGE.
- */
- rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
- skb_frag_t *fp;
- struct pkt_gl gl;
- const struct rx_sw_desc *sdesc;
- u32 bufsz, frag;
- u32 len = be32_to_cpu(rc->pldbuflen_qid);
-
- /*
- * If we get a "new buffer" message from the SGE we
- * need to move on to the next Free List buffer.
- */
- if (len & RSPD_NEWBUF) {
- /*
- * We get one "new buffer" message when we
- * first start up a queue so we need to ignore
- * it when our offset into the buffer is 0.
- */
- if (likely(rspq->offset > 0)) {
- free_rx_bufs(rspq->adapter, &rxq->fl,
- 1);
- rspq->offset = 0;
- }
- len = RSPD_LEN(len);
- }
- gl.tot_len = len;
-
- /*
- * Gather packet fragments.
- */
- for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
- BUG_ON(frag >= MAX_SKB_FRAGS);
- BUG_ON(rxq->fl.avail == 0);
- sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = get_buf_size(sdesc);
- fp->page = sdesc->page;
- fp->page_offset = rspq->offset;
- fp->size = min(bufsz, len);
- len -= fp->size;
- if (!len)
- break;
- unmap_rx_buf(rspq->adapter, &rxq->fl);
- }
- gl.nfrags = frag+1;
-
- /*
- * Last buffer remains mapped so explicitly make it
- * coherent for CPU access and start preloading first
- * cache line ...
- */
- dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
- get_buf_addr(sdesc),
- fp->size, DMA_FROM_DEVICE);
- gl.va = (page_address(gl.frags[0].page) +
- gl.frags[0].page_offset);
- prefetch(gl.va);
-
- /*
- * Hand the new ingress packet to the handler for
- * this Response Queue.
- */
- ret = rspq->handler(rspq, rspq->cur_desc, &gl);
- if (likely(ret == 0))
- rspq->offset += ALIGN(fp->size, FL_ALIGN);
- else
- restore_rx_bufs(&gl, &rxq->fl, frag);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
- ret = rspq->handler(rspq, rspq->cur_desc, NULL);
- } else {
- WARN_ON(rsp_type > RSP_TYPE_CPL);
- ret = 0;
- }
-
- if (unlikely(ret)) {
- /*
- * Couldn't process descriptor, back off for recovery.
- * We use the SGE's last timer which has the longest
- * interrupt coalescing value ...
- */
- const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
- rspq->next_intr_params =
- QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
- break;
- }
-
- rspq_next(rspq);
- budget_left--;
- }
-
- /*
- * If this is a Response Queue with an associated Free List and
- * at least two Egress Queue units available in the Free List
- * for new buffer pointers, refill the Free List.
- */
- if (rspq->offset >= 0 &&
- rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
- __refill_fl(rspq->adapter, &rxq->fl);
- return budget - budget_left;
-}
-
-/**
- * napi_rx_handler - the NAPI handler for RX processing
- * @napi: the napi instance
- * @budget: how many packets we can process in this round
- *
- * Handler for new data events when using NAPI. This does not need any
- * locking or protection from interrupts as data interrupts are off at
- * this point and other adapter interrupts do not interfere (the latter
- * in not a concern at all with MSI-X as non-data interrupts then have
- * a separate handler).
- */
-static int napi_rx_handler(struct napi_struct *napi, int budget)
-{
- unsigned int intr_params;
- struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
- int work_done = process_responses(rspq, budget);
-
- if (likely(work_done < budget)) {
- napi_complete(napi);
- intr_params = rspq->next_intr_params;
- rspq->next_intr_params = rspq->intr_params;
- } else
- intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
-
- if (unlikely(work_done == 0))
- rspq->unhandled_irqs++;
-
- t4_write_reg(rspq->adapter,
- T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(work_done) |
- INGRESSQID((u32)rspq->cntxt_id) |
- SEINTARM(intr_params));
- return work_done;
-}
-
-/*
- * The MSI-X interrupt handler for an SGE response queue for the NAPI case
- * (i.e., response queue serviced by NAPI polling).
- */
-irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
-{
- struct sge_rspq *rspq = cookie;
-
- napi_schedule(&rspq->napi);
- return IRQ_HANDLED;
-}
-
-/*
- * Process the indirect interrupt entries in the interrupt queue and kick off
- * NAPI for each queue that has generated an entry.
- */
-static unsigned int process_intrq(struct adapter *adapter)
-{
- struct sge *s = &adapter->sge;
- struct sge_rspq *intrq = &s->intrq;
- unsigned int work_done;
-
- spin_lock(&adapter->sge.intrq_lock);
- for (work_done = 0; ; work_done++) {
- const struct rsp_ctrl *rc;
- unsigned int qid, iq_idx;
- struct sge_rspq *rspq;
-
- /*
- * Grab the next response from the interrupt queue and bail
- * out if it's not a new response.
- */
- rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
- if (!is_new_response(rc, intrq))
- break;
-
- /*
- * If the response isn't a forwarded interrupt message issue a
- * error and go on to the next response message. This should
- * never happen ...
- */
- rmb();
- if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
- dev_err(adapter->pdev_dev,
- "Unexpected INTRQ response type %d\n",
- RSPD_TYPE(rc->type_gen));
- continue;
- }
-
- /*
- * Extract the Queue ID from the interrupt message and perform
- * sanity checking to make sure it really refers to one of our
- * Ingress Queues which is active and matches the queue's ID.
- * None of these error conditions should ever happen so we may
- * want to either make them fatal and/or conditionalized under
- * DEBUG.
- */
- qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
- iq_idx = IQ_IDX(s, qid);
- if (unlikely(iq_idx >= MAX_INGQ)) {
- dev_err(adapter->pdev_dev,
- "Ingress QID %d out of range\n", qid);
- continue;
- }
- rspq = s->ingr_map[iq_idx];
- if (unlikely(rspq == NULL)) {
- dev_err(adapter->pdev_dev,
- "Ingress QID %d RSPQ=NULL\n", qid);
- continue;
- }
- if (unlikely(rspq->abs_id != qid)) {
- dev_err(adapter->pdev_dev,
- "Ingress QID %d refers to RSPQ %d\n",
- qid, rspq->abs_id);
- continue;
- }
-
- /*
- * Schedule NAPI processing on the indicated Response Queue
- * and move on to the next entry in the Forwarded Interrupt
- * Queue.
- */
- napi_schedule(&rspq->napi);
- rspq_next(intrq);
- }
-
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(work_done) |
- INGRESSQID(intrq->cntxt_id) |
- SEINTARM(intrq->intr_params));
-
- spin_unlock(&adapter->sge.intrq_lock);
-
- return work_done;
-}
-
-/*
- * The MSI interrupt handler handles data events from SGE response queues as
- * well as error and other async events as they all use the same MSI vector.
- */
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
-{
- struct adapter *adapter = cookie;
-
- process_intrq(adapter);
- return IRQ_HANDLED;
-}
-
-/**
- * t4vf_intr_handler - select the top-level interrupt handler
- * @adapter: the adapter
- *
- * Selects the top-level interrupt handler based on the type of interrupts
- * (MSI-X or MSI).
- */
-irq_handler_t t4vf_intr_handler(struct adapter *adapter)
-{
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
- if (adapter->flags & USING_MSIX)
- return t4vf_sge_intr_msix;
- else
- return t4vf_intr_msi;
-}
-
-/**
- * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
- * @data: the adapter
- *
- * Runs periodically from a timer to perform maintenance of SGE RX queues.
- *
- * a) Replenishes RX queues that have run out due to memory shortage.
- * Normally new RX buffers are added when existing ones are consumed but
- * when out of memory a queue can become empty. We schedule NAPI to do
- * the actual refill.
- */
-static void sge_rx_timer_cb(unsigned long data)
-{
- struct adapter *adapter = (struct adapter *)data;
- struct sge *s = &adapter->sge;
- unsigned int i;
-
- /*
- * Scan the "Starving Free Lists" flag array looking for any Free
- * Lists in need of more free buffers. If we find one and it's not
- * being actively polled, then bump its "starving" counter and attempt
- * to refill it. If we're successful in adding enough buffers to push
- * the Free List over the starving threshold, then we can clear its
- * "starving" status.
- */
- for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
- unsigned long m;
-
- for (m = s->starving_fl[i]; m; m &= m - 1) {
- unsigned int id = __ffs(m) + i * BITS_PER_LONG;
- struct sge_fl *fl = s->egr_map[id];
-
- clear_bit(id, s->starving_fl);
- smp_mb__after_clear_bit();
-
- /*
- * Since we are accessing fl without a lock there's a
- * small probability of a false positive where we
- * schedule napi but the FL is no longer starving.
- * No biggie.
- */
- if (fl_starving(fl)) {
- struct sge_eth_rxq *rxq;
-
- rxq = container_of(fl, struct sge_eth_rxq, fl);
- if (napi_reschedule(&rxq->rspq.napi))
- fl->starving++;
- else
- set_bit(id, s->starving_fl);
- }
- }
- }
-
- /*
- * Reschedule the next scan for starving Free Lists ...
- */
- mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
-}
-
-/**
- * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
- * @data: the adapter
- *
- * Runs periodically from a timer to perform maintenance of SGE TX queues.
- *
- * b) Reclaims completed Tx packets for the Ethernet queues. Normally
- * packets are cleaned up by new Tx packets, this timer cleans up packets
- * when no new packets are being submitted. This is essential for pktgen,
- * at least.
- */
-static void sge_tx_timer_cb(unsigned long data)
-{
- struct adapter *adapter = (struct adapter *)data;
- struct sge *s = &adapter->sge;
- unsigned int i, budget;
-
- budget = MAX_TIMER_TX_RECLAIM;
- i = s->ethtxq_rover;
- do {
- struct sge_eth_txq *txq = &s->ethtxq[i];
-
- if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
- int avail = reclaimable(&txq->q);
-
- if (avail > budget)
- avail = budget;
-
- free_tx_desc(adapter, &txq->q, avail, true);
- txq->q.in_use -= avail;
- __netif_tx_unlock(txq->txq);
-
- budget -= avail;
- if (!budget)
- break;
- }
-
- i++;
- if (i >= s->ethqsets)
- i = 0;
- } while (i != s->ethtxq_rover);
- s->ethtxq_rover = i;
-
- /*
- * If we found too many reclaimable packets schedule a timer in the
- * near future to continue where we left off. Otherwise the next timer
- * will be at its normal interval.
- */
- mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
-}
-
-/**
- * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
- * @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
- * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
- * @dev: the network device associated with the new rspq
- * @intr_dest: MSI-X vector index (overriden in MSI mode)
- * @fl: pointer to the new rxq's Free List to be filled in
- * @hnd: the interrupt handler to invoke for the rspq
- */
-int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
- bool iqasynch, struct net_device *dev,
- int intr_dest,
- struct sge_fl *fl, rspq_handler_t hnd)
-{
- struct port_info *pi = netdev_priv(dev);
- struct fw_iq_cmd cmd, rpl;
- int ret, iqandst, flsz = 0;
-
- /*
- * If we're using MSI interrupts and we're not initializing the
- * Forwarded Interrupt Queue itself, then set up this queue for
- * indirect interrupts to the Forwarded Interrupt Queue. Obviously
- * the Forwarded Interrupt Queue must be set up before any other
- * ingress queue ...
- */
- if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
- iqandst = SGE_INTRDST_IQ;
- intr_dest = adapter->sge.intrq.abs_id;
- } else
- iqandst = SGE_INTRDST_PCI;
-
- /*
- * Allocate the hardware ring for the Response Queue. The size needs
- * to be a multiple of 16 which includes the mandatory status entry
- * (regardless of whether the Status Page capabilities are enabled or
- * not).
- */
- rspq->size = roundup(rspq->size, 16);
- rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
- 0, &rspq->phys_addr, NULL, 0);
- if (!rspq->desc)
- return -ENOMEM;
-
- /*
- * Fill in the Ingress Queue Command. Note: Ideally this code would
- * be in t4vf_hw.c but there are so many parameters and dependencies
- * on our Linux SGE state that we would end up having to pass tons of
- * parameters. We'll have to think about how this might be migrated
- * into OS-independent common code ...
- */
- memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
- FW_IQ_CMD_IQSTART(1) |
- FW_LEN16(cmd));
- cmd.type_to_iqandstindex =
- cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- FW_IQ_CMD_IQASYNCH(iqasynch) |
- FW_IQ_CMD_VIID(pi->viid) |
- FW_IQ_CMD_IQANDST(iqandst) |
- FW_IQ_CMD_IQANUS(1) |
- FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
- FW_IQ_CMD_IQANDSTINDEX(intr_dest));
- cmd.iqdroprss_to_iqesize =
- cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
- FW_IQ_CMD_IQGTSMODE |
- FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
- FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
- cmd.iqsize = cpu_to_be16(rspq->size);
- cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
-
- if (fl) {
- /*
- * Allocate the ring for the hardware free list (with space
- * for its status page) along with the associated software
- * descriptor ring. The free list size needs to be a multiple
- * of the Egress Queue Unit.
- */
- fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
- fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
- sizeof(__be64), sizeof(struct rx_sw_desc),
- &fl->addr, &fl->sdesc, STAT_LEN);
- if (!fl->desc) {
- ret = -ENOMEM;
- goto err;
- }
-
- /*
- * Calculate the size of the hardware free list ring plus
- * Status Page (which the SGE will place after the end of the
- * free list ring) in Egress Queue Units.
- */
- flsz = (fl->size / FL_PER_EQ_UNIT +
- STAT_LEN / EQ_UNIT);
-
- /*
- * Fill in all the relevant firmware Ingress Queue Command
- * fields for the free list.
- */
- cmd.iqns_to_fl0congen =
- cpu_to_be32(
- FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN |
- FW_IQ_CMD_FL0PADEN);
- cmd.fl0dcaen_to_fl0cidxfthresh =
- cpu_to_be16(
- FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
- cmd.fl0size = cpu_to_be16(flsz);
- cmd.fl0addr = cpu_to_be64(fl->addr);
- }
-
- /*
- * Issue the firmware Ingress Queue Command and extract the results if
- * it completes successfully.
- */
- ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
- if (ret)
- goto err;
-
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
- rspq->cur_desc = rspq->desc;
- rspq->cidx = 0;
- rspq->gen = 1;
- rspq->next_intr_params = rspq->intr_params;
- rspq->cntxt_id = be16_to_cpu(rpl.iqid);
- rspq->abs_id = be16_to_cpu(rpl.physiqid);
- rspq->size--; /* subtract status entry */
- rspq->adapter = adapter;
- rspq->netdev = dev;
- rspq->handler = hnd;
-
- /* set offset to -1 to distinguish ingress queues without FL */
- rspq->offset = fl ? 0 : -1;
-
- if (fl) {
- fl->cntxt_id = be16_to_cpu(rpl.fl0id);
- fl->avail = 0;
- fl->pend_cred = 0;
- fl->pidx = 0;
- fl->cidx = 0;
- fl->alloc_failed = 0;
- fl->large_alloc_failed = 0;
- fl->starving = 0;
- refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
- }
-
- return 0;
-
-err:
- /*
- * An error occurred. Clean up our partial allocation state and
- * return the error.
- */
- if (rspq->desc) {
- dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
- rspq->desc, rspq->phys_addr);
- rspq->desc = NULL;
- }
- if (fl && fl->desc) {
- kfree(fl->sdesc);
- fl->sdesc = NULL;
- dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
- fl->desc, fl->addr);
- fl->desc = NULL;
- }
- return ret;
-}
-
-/**
- * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
- * @adapter: the adapter
- * @txq: pointer to the new txq to be filled in
- * @devq: the network TX queue associated with the new txq
- * @iqid: the relative ingress queue ID to which events relating to
- * the new txq should be directed
- */
-int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
- struct net_device *dev, struct netdev_queue *devq,
- unsigned int iqid)
-{
- int ret, nentries;
- struct fw_eq_eth_cmd cmd, rpl;
- struct port_info *pi = netdev_priv(dev);
-
- /*
- * Calculate the size of the hardware TX Queue (including the Status
- * Page on the end of the TX Queue) in units of TX Descriptors.
- */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
-
- /*
- * Allocate the hardware ring for the TX ring (with space for its
- * status page) along with the associated software descriptor ring.
- */
- txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
- sizeof(struct tx_desc),
- sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
- if (!txq->q.desc)
- return -ENOMEM;
-
- /*
- * Fill in the Egress Queue Command. Note: As with the direct use of
- * the firmware Ingress Queue COmmand above in our RXQ allocation
- * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
- * have to see if there's some reasonable way to parameterize it
- * into the common code ...
- */
- memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
- FW_EQ_ETH_CMD_EQSTART |
- FW_LEN16(cmd));
- cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
- cmd.fetchszm_to_iqid =
- cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
- FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
- FW_EQ_ETH_CMD_IQID(iqid));
- cmd.dcaen_to_eqsize =
- cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
- FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
- FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
- FW_EQ_ETH_CMD_EQSIZE(nentries));
- cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
-
- /*
- * Issue the firmware Egress Queue Command and extract the results if
- * it completes successfully.
- */
- ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
- if (ret) {
- /*
- * The girmware Ingress Queue Command failed for some reason.
- * Free up our partial allocation state and return the error.
- */
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
- dma_free_coherent(adapter->pdev_dev,
- nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
- return ret;
- }
-
- txq->q.in_use = 0;
- txq->q.cidx = 0;
- txq->q.pidx = 0;
- txq->q.stat = (void *)&txq->q.desc[txq->q.size];
- txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
- txq->q.abs_id =
- FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
- txq->txq = devq;
- txq->tso = 0;
- txq->tx_cso = 0;
- txq->vlan_ins = 0;
- txq->q.stops = 0;
- txq->q.restarts = 0;
- txq->mapping_err = 0;
- return 0;
-}
-
-/*
- * Free the DMA map resources associated with a TX queue.
- */
-static void free_txq(struct adapter *adapter, struct sge_txq *tq)
-{
- dma_free_coherent(adapter->pdev_dev,
- tq->size * sizeof(*tq->desc) + STAT_LEN,
- tq->desc, tq->phys_addr);
- tq->cntxt_id = 0;
- tq->sdesc = NULL;
- tq->desc = NULL;
-}
-
-/*
- * Free the resources associated with a response queue (possibly including a
- * free list).
- */
-static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
- struct sge_fl *fl)
-{
- unsigned int flid = fl ? fl->cntxt_id : 0xffff;
-
- t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
- rspq->cntxt_id, flid, 0xffff);
- dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
- rspq->desc, rspq->phys_addr);
- netif_napi_del(&rspq->napi);
- rspq->netdev = NULL;
- rspq->cntxt_id = 0;
- rspq->abs_id = 0;
- rspq->desc = NULL;
-
- if (fl) {
- free_rx_bufs(adapter, fl, fl->avail);
- dma_free_coherent(adapter->pdev_dev,
- fl->size * sizeof(*fl->desc) + STAT_LEN,
- fl->desc, fl->addr);
- kfree(fl->sdesc);
- fl->sdesc = NULL;
- fl->cntxt_id = 0;
- fl->desc = NULL;
- }
-}
-
-/**
- * t4vf_free_sge_resources - free SGE resources
- * @adapter: the adapter
- *
- * Frees resources used by the SGE queue sets.
- */
-void t4vf_free_sge_resources(struct adapter *adapter)
-{
- struct sge *s = &adapter->sge;
- struct sge_eth_rxq *rxq = s->ethrxq;
- struct sge_eth_txq *txq = s->ethtxq;
- struct sge_rspq *evtq = &s->fw_evtq;
- struct sge_rspq *intrq = &s->intrq;
- int qs;
-
- for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
- if (rxq->rspq.desc)
- free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
- if (txq->q.desc) {
- t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
- free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
- kfree(txq->q.sdesc);
- free_txq(adapter, &txq->q);
- }
- }
- if (evtq->desc)
- free_rspq_fl(adapter, evtq, NULL);
- if (intrq->desc)
- free_rspq_fl(adapter, intrq, NULL);
-}
-
-/**
- * t4vf_sge_start - enable SGE operation
- * @adapter: the adapter
- *
- * Start tasklets and timers associated with the DMA engine.
- */
-void t4vf_sge_start(struct adapter *adapter)
-{
- adapter->sge.ethtxq_rover = 0;
- mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
- mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
-}
-
-/**
- * t4vf_sge_stop - disable SGE operation
- * @adapter: the adapter
- *
- * Stop tasklets and timers associated with the DMA engine. Note that
- * this is effective only if measures have been taken to disable any HW
- * events that may restart them.
- */
-void t4vf_sge_stop(struct adapter *adapter)
-{
- struct sge *s = &adapter->sge;
-
- if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
- if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
-}
-
-/**
- * t4vf_sge_init - initialize SGE
- * @adapter: the adapter
- *
- * Performs SGE initialization needed every time after a chip reset.
- * We do not initialize any of the queue sets here, instead the driver
- * top-level must request those individually. We also do not enable DMA
- * here, that should be done after the queues have been set up.
- */
-int t4vf_sge_init(struct adapter *adapter)
-{
- struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
- struct sge *s = &adapter->sge;
-
- /*
- * Start by vetting the basic SGE parameters which have been set up by
- * the Physical Function Driver. Ideally we should be able to deal
- * with _any_ configuration. Practice is different ...
- */
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
- dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
- return -EINVAL;
- }
- if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
- dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
- return -EINVAL;
- }
-
- /*
- * Now translate the adapter parameters into our internal forms.
- */
- if (fl1)
- FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
- STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
- PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
- FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
- SGE_INGPADBOUNDARY_SHIFT);
-
- /*
- * Set up tasklet timers.
- */
- setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
- setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
-
- /*
- * Initialize Forwarded Interrupt Queue lock.
- */
- spin_lock_init(&s->intrq_lock);
-
- return 0;
-}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 39cf9b9bd67..a7c5e8831e8 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -116,7 +116,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = dummy_set_address,
.ndo_get_stats64 = dummy_get_stats64,
};
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
deleted file mode 100644
index 24f41da8c4b..00000000000
--- a/drivers/net/e1000/e1000.h
+++ /dev/null
@@ -1,361 +0,0 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-/* Linux PRO/1000 Ethernet Driver main header file */
-
-#ifndef _E1000_H_
-#define _E1000_H_
-
-#include <linux/stddef.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/capability.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <net/pkt_sched.h>
-#include <linux/list.h>
-#include <linux/reboot.h>
-#include <net/checksum.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-
-#define BAR_0 0
-#define BAR_1 1
-#define BAR_5 5
-
-#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
-struct e1000_adapter;
-
-#include "e1000_hw.h"
-
-#define E1000_MAX_INTR 10
-
-/* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 256
-#define E1000_MIN_TXD 48
-#define E1000_MAX_82544_TXD 4096
-
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 256
-#define E1000_MIN_RXD 48
-#define E1000_MAX_82544_RXD 4096
-
-#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
-#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
-
-/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
-
-/* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128 128 /* Used for packet split */
-#define E1000_RXBUFFER_256 256 /* Used for packet split */
-#define E1000_RXBUFFER_512 512
-#define E1000_RXBUFFER_1024 1024
-#define E1000_RXBUFFER_2048 2048
-#define E1000_RXBUFFER_4096 4096
-#define E1000_RXBUFFER_8192 8192
-#define E1000_RXBUFFER_16384 16384
-
-/* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX 15
-
-/* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
-
-/* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
-
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define E1000_TX_QUEUE_WAKE 16
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_APME 0x0400
-
-#ifndef E1000_MASTER_SLAVE
-/* Switch to override PHY master/slave setting */
-#define E1000_MASTER_SLAVE e1000_ms_hw_default
-#endif
-
-#define E1000_MNG_VLAN_NONE (-1)
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
-struct e1000_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- struct page *page;
- unsigned long time_stamp;
- u16 length;
- u16 next_to_watch;
- u16 mapped_as_page;
-};
-
-struct e1000_tx_ring {
- /* pointer to the descriptor ring memory */
- void *desc;
- /* physical address of the descriptor ring */
- dma_addr_t dma;
- /* length of descriptor ring in bytes */
- unsigned int size;
- /* number of descriptors in the ring */
- unsigned int count;
- /* next descriptor to associate a buffer with */
- unsigned int next_to_use;
- /* next descriptor to check for DD status bit */
- unsigned int next_to_clean;
- /* array of buffer information structs */
- struct e1000_buffer *buffer_info;
-
- u16 tdh;
- u16 tdt;
- bool last_tx_tso;
-};
-
-struct e1000_rx_ring {
- /* pointer to the descriptor ring memory */
- void *desc;
- /* physical address of the descriptor ring */
- dma_addr_t dma;
- /* length of descriptor ring in bytes */
- unsigned int size;
- /* number of descriptors in the ring */
- unsigned int count;
- /* next descriptor to associate a buffer with */
- unsigned int next_to_use;
- /* next descriptor to check for DD status bit */
- unsigned int next_to_clean;
- /* array of buffer information structs */
- struct e1000_buffer *buffer_info;
- struct sk_buff *rx_skb_top;
-
- /* cpu for rx queue */
- int cpu;
-
- u16 rdh;
- u16 rdt;
-};
-
-#define E1000_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) \
- ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
-
-#define E1000_RX_DESC_EXT(R, i) \
- (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
-#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
-#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
-#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
-#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
-
-/* board specific private data structure */
-
-struct e1000_adapter {
- struct timer_list tx_fifo_stall_timer;
- struct timer_list watchdog_timer;
- struct timer_list phy_info_timer;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 mng_vlan_id;
- u32 bd_number;
- u32 rx_buffer_len;
- u32 wol;
- u32 smartspeed;
- u32 en_mng_pt;
- u16 link_speed;
- u16 link_duplex;
- spinlock_t stats_lock;
- unsigned int total_tx_bytes;
- unsigned int total_tx_packets;
- unsigned int total_rx_bytes;
- unsigned int total_rx_packets;
- /* Interrupt Throttle Rate */
- u32 itr;
- u32 itr_setting;
- u16 tx_itr;
- u16 rx_itr;
-
- struct work_struct reset_task;
- u8 fc_autoneg;
-
- /* TX */
- struct e1000_tx_ring *tx_ring; /* One per active queue */
- unsigned int restart_queue;
- u32 txd_cmd;
- u32 tx_int_delay;
- u32 tx_abs_int_delay;
- u32 gotcl;
- u64 gotcl_old;
- u64 tpt_old;
- u64 colc_old;
- u32 tx_timeout_count;
- u32 tx_fifo_head;
- u32 tx_head_addr;
- u32 tx_fifo_size;
- u8 tx_timeout_factor;
- atomic_t tx_fifo_stall;
- bool pcix_82544;
- bool detect_tx_hung;
-
- /* RX */
- bool (*clean_rx)(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int *work_done, int work_to_do);
- void (*alloc_rx_buf)(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int cleaned_count);
- struct e1000_rx_ring *rx_ring; /* One per active queue */
- struct napi_struct napi;
-
- int num_tx_queues;
- int num_rx_queues;
-
- u64 hw_csum_err;
- u64 hw_csum_good;
- u32 alloc_rx_buff_failed;
- u32 rx_int_delay;
- u32 rx_abs_int_delay;
- bool rx_csum;
- u32 gorcl;
- u64 gorcl_old;
-
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in e1000_hw.h */
- struct e1000_hw hw;
- struct e1000_hw_stats stats;
- struct e1000_phy_info phy_info;
- struct e1000_phy_stats phy_stats;
-
- u32 test_icr;
- struct e1000_tx_ring test_tx_ring;
- struct e1000_rx_ring test_rx_ring;
-
- int msg_enable;
-
- /* to not mess up cache alignment, always add to the bottom */
- bool tso_force;
- bool smart_power_down; /* phy smart power down */
- bool quad_port_a;
- unsigned long flags;
- u32 eeprom_wol;
-
- /* for ioport free */
- int bars;
- int need_ioport;
-
- bool discarding;
-
- struct work_struct fifo_stall_task;
- struct work_struct phy_info_task;
-};
-
-enum e1000_state_t {
- __E1000_TESTING,
- __E1000_RESETTING,
- __E1000_DOWN
-};
-
-#undef pr_fmt
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
-#define e_dbg(format, arg...) \
- netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
-#define e_err(msglvl, format, arg...) \
- netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_info(msglvl, format, arg...) \
- netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_warn(msglvl, format, arg...) \
- netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_notice(msglvl, format, arg...) \
- netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_dev_info(format, arg...) \
- dev_info(&adapter->pdev->dev, format, ## arg)
-#define e_dev_warn(format, arg...) \
- dev_warn(&adapter->pdev->dev, format, ## arg)
-#define e_dev_err(format, arg...) \
- dev_err(&adapter->pdev->dev, format, ## arg)
-
-extern char e1000_driver_name[];
-extern const char e1000_driver_version[];
-
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
-
-#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
deleted file mode 100644
index 28519acacd2..00000000000
--- a/drivers/net/e1000e/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-################################################################################
-#
-# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2011 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
-#
-# Makefile for the Intel(R) PRO/1000 ethernet driver
-#
-
-obj-$(CONFIG_E1000E) += e1000e.o
-
-e1000e-objs := 82571.o ich8lan.o es2lan.o \
- lib.o phy.o param.o ethtool.o netdev.o
-
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
deleted file mode 100644
index 8533ad7f355..00000000000
--- a/drivers/net/e1000e/e1000.h
+++ /dev/null
@@ -1,741 +0,0 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* Linux PRO/1000 Ethernet Driver main header file */
-
-#ifndef _E1000_H_
-#define _E1000_H_
-
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/crc32.h>
-#include <linux/if_vlan.h>
-
-#include "hw.h"
-
-struct e1000_info;
-
-#define e_dbg(format, arg...) \
- netdev_dbg(hw->adapter->netdev, format, ## arg)
-#define e_err(format, arg...) \
- netdev_err(adapter->netdev, format, ## arg)
-#define e_info(format, arg...) \
- netdev_info(adapter->netdev, format, ## arg)
-#define e_warn(format, arg...) \
- netdev_warn(adapter->netdev, format, ## arg)
-#define e_notice(format, arg...) \
- netdev_notice(adapter->netdev, format, ## arg)
-
-
-/* Interrupt modes, as used by the IntMode parameter */
-#define E1000E_INT_MODE_LEGACY 0
-#define E1000E_INT_MODE_MSI 1
-#define E1000E_INT_MODE_MSIX 2
-
-/* Tx/Rx descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 4096
-#define E1000_MIN_TXD 64
-
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 4096
-#define E1000_MIN_RXD 64
-
-#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
-#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
-
-/* Early Receive defines */
-#define E1000_ERT_2048 0x100
-
-#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
-
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_APME 0x0400
-
-#define E1000_MNG_VLAN_NONE (-1)
-
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
-
-#define DEFAULT_JUMBO 9234
-
-/* BM/HV Specific Registers */
-#define BM_PORT_CTRL_PAGE 769
-
-#define PHY_UPPER_SHIFT 21
-#define BM_PHY_REG(page, reg) \
- (((reg) & MAX_PHY_REG_ADDRESS) |\
- (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
- (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
-
-/* PHY Wakeup Registers and defines */
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
-#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
-#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
-#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
-#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
-
-#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
-#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
-#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
-#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
-#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
-#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
-#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
-
-#define HV_STATS_PAGE 778
-#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
-#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
-#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
-#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
-#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
-#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
-#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
-#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
-#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
-#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
-
-#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
-
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS 17
-#define BM_CS_STATUS_LINK_UP 0x0400
-#define BM_CS_STATUS_RESOLVED 0x0800
-#define BM_CS_STATUS_SPEED_MASK 0xC000
-#define BM_CS_STATUS_SPEED_1000 0x8000
-
-/* 82577 Mobile Phy Status Register */
-#define HV_M_STATUS 26
-#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
-#define HV_M_STATUS_SPEED_MASK 0x0300
-#define HV_M_STATUS_SPEED_1000 0x0200
-#define HV_M_STATUS_LINK_UP 0x0040
-
-#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
-
-/* Time to wait before putting the device into D3 if there's no link (in ms). */
-#define LINK_TIMEOUT 100
-
-#define DEFAULT_RDTR 0
-#define DEFAULT_RADV 8
-#define BURST_RDTR 0x20
-#define BURST_RADV 0x20
-
-/*
- * in the case of WTHRESH, it appears at least the 82571/2 hardware
- * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
- * WTHRESH=4, and since we want 64 bytes at a time written back, set
- * it to 5
- */
-#define E1000_TXDCTL_DMA_BURST_ENABLE \
- (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
- E1000_TXDCTL_COUNT_DESC | \
- (5 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
-
-#define E1000_RXDCTL_DMA_BURST_ENABLE \
- (0x01000000 | /* set descriptor granularity */ \
- (4 << 16) | /* set writeback threshold */ \
- (4 << 8) | /* set prefetch threshold */ \
- 0x20) /* set hthresh */
-
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
-
-enum e1000_boards {
- board_82571,
- board_82572,
- board_82573,
- board_82574,
- board_82583,
- board_80003es2lan,
- board_ich8lan,
- board_ich9lan,
- board_ich10lan,
- board_pchlan,
- board_pch2lan,
-};
-
-struct e1000_ps_page {
- struct page *page;
- u64 dma; /* must be u64 - written to hw */
-};
-
-/*
- * wrappers around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
- */
-struct e1000_buffer {
- dma_addr_t dma;
- struct sk_buff *skb;
- union {
- /* Tx */
- struct {
- unsigned long time_stamp;
- u16 length;
- u16 next_to_watch;
- unsigned int segs;
- unsigned int bytecount;
- u16 mapped_as_page;
- };
- /* Rx */
- struct {
- /* arrays of page information for packet split */
- struct e1000_ps_page *ps_pages;
- struct page *page;
- };
- };
-};
-
-struct e1000_ring {
- void *desc; /* pointer to ring memory */
- dma_addr_t dma; /* phys address of ring */
- unsigned int size; /* length of ring in bytes */
- unsigned int count; /* number of desc. in ring */
-
- u16 next_to_use;
- u16 next_to_clean;
-
- u16 head;
- u16 tail;
-
- /* array of buffer information structs */
- struct e1000_buffer *buffer_info;
-
- char name[IFNAMSIZ + 5];
- u32 ims_val;
- u32 itr_val;
- u16 itr_register;
- int set_itr;
-
- struct sk_buff *rx_skb_top;
-};
-
-/* PHY register snapshot values */
-struct e1000_phy_regs {
- u16 bmcr; /* basic mode control register */
- u16 bmsr; /* basic mode status register */
- u16 advertise; /* auto-negotiation advertisement */
- u16 lpa; /* link partner ability register */
- u16 expansion; /* auto-negotiation expansion reg */
- u16 ctrl1000; /* 1000BASE-T control register */
- u16 stat1000; /* 1000BASE-T status register */
- u16 estatus; /* extended status register */
-};
-
-/* board specific private data structure */
-struct e1000_adapter {
- struct timer_list watchdog_timer;
- struct timer_list phy_info_timer;
- struct timer_list blink_timer;
-
- struct work_struct reset_task;
- struct work_struct watchdog_task;
-
- const struct e1000_info *ei;
-
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
- u32 rx_buffer_len;
- u16 mng_vlan_id;
- u16 link_speed;
- u16 link_duplex;
- u16 eeprom_vers;
-
- /* track device up/down/testing state */
- unsigned long state;
-
- /* Interrupt Throttle Rate */
- u32 itr;
- u32 itr_setting;
- u16 tx_itr;
- u16 rx_itr;
-
- /*
- * Tx
- */
- struct e1000_ring *tx_ring /* One per active queue */
- ____cacheline_aligned_in_smp;
-
- struct napi_struct napi;
-
- unsigned int restart_queue;
- u32 txd_cmd;
-
- bool detect_tx_hung;
- u8 tx_timeout_factor;
-
- u32 tx_int_delay;
- u32 tx_abs_int_delay;
-
- unsigned int total_tx_bytes;
- unsigned int total_tx_packets;
- unsigned int total_rx_bytes;
- unsigned int total_rx_packets;
-
- /* Tx stats */
- u64 tpt_old;
- u64 colc_old;
- u32 gotc;
- u64 gotc_old;
- u32 tx_timeout_count;
- u32 tx_fifo_head;
- u32 tx_head_addr;
- u32 tx_fifo_size;
- u32 tx_dma_failed;
-
- /*
- * Rx
- */
- bool (*clean_rx) (struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
- ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_adapter *adapter,
- int cleaned_count, gfp_t gfp);
- struct e1000_ring *rx_ring;
-
- u32 rx_int_delay;
- u32 rx_abs_int_delay;
-
- /* Rx stats */
- u64 hw_csum_err;
- u64 hw_csum_good;
- u64 rx_hdr_split;
- u32 gorc;
- u64 gorc_old;
- u32 alloc_rx_buff_failed;
- u32 rx_dma_failed;
-
- unsigned int rx_ps_pages;
- u16 rx_ps_bsize0;
- u32 max_frame_size;
- u32 min_frame_size;
-
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in e1000_hw.h */
- struct e1000_hw hw;
-
- spinlock_t stats64_lock;
- struct e1000_hw_stats stats;
- struct e1000_phy_info phy_info;
- struct e1000_phy_stats phy_stats;
-
- /* Snapshot of PHY registers */
- struct e1000_phy_regs phy_regs;
-
- struct e1000_ring test_tx_ring;
- struct e1000_ring test_rx_ring;
- u32 test_icr;
-
- u32 msg_enable;
- unsigned int num_vectors;
- struct msix_entry *msix_entries;
- int int_mode;
- u32 eiac_mask;
-
- u32 eeprom_wol;
- u32 wol;
- u32 pba;
- u32 max_hw_frame_size;
-
- bool fc_autoneg;
-
- unsigned int flags;
- unsigned int flags2;
- struct work_struct downshift_task;
- struct work_struct update_phy_task;
- struct work_struct print_hang_task;
-
- bool idle_check;
- int phy_hang_count;
-};
-
-struct e1000_info {
- enum e1000_mac_type mac;
- unsigned int flags;
- unsigned int flags2;
- u32 pba;
- u32 max_hw_frame_size;
- s32 (*get_variants)(struct e1000_adapter *);
- struct e1000_mac_operations *mac_ops;
- struct e1000_phy_operations *phy_ops;
- struct e1000_nvm_operations *nvm_ops;
-};
-
-/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT (1 << 0)
-#define FLAG_HAS_FLASH (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
-#define FLAG_HAS_WOL (1 << 3)
-#define FLAG_HAS_ERT (1 << 4)
-#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
-#define FLAG_READ_ONLY_NVM (1 << 8)
-#define FLAG_IS_ICH (1 << 9)
-#define FLAG_HAS_MSIX (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
-#define FLAG_IS_QUAD_PORT_A (1 << 12)
-#define FLAG_IS_QUAD_PORT (1 << 13)
-#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
-#define FLAG_APME_IN_WUC (1 << 15)
-#define FLAG_APME_IN_CTRL3 (1 << 16)
-#define FLAG_APME_CHECK_PORT_B (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
-#define FLAG_NO_WAKE_UCAST (1 << 19)
-#define FLAG_MNG_PT_ENABLED (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
-#define FLAG_RX_NEEDS_RESTART (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
-#define FLAG_SMART_POWER_DOWN (1 << 26)
-#define FLAG_MSI_ENABLED (1 << 27)
-#define FLAG_RX_CSUM_ENABLED (1 << 28)
-#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RX_RESTART_NOW (1 << 30)
-#define FLAG_MSI_TEST_FAILED (1 << 31)
-
-/* CRC Stripping defines */
-#define FLAG2_CRC_STRIPPING (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
-#define FLAG2_IS_DISCARDING (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
-#define FLAG2_HAS_PHY_STATS (1 << 4)
-#define FLAG2_HAS_EEE (1 << 5)
-#define FLAG2_DMA_BURST (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
-#define FLAG2_DISABLE_AIM (1 << 8)
-#define FLAG2_CHECK_PHY_HANG (1 << 9)
-#define FLAG2_NO_DISABLE_RX (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
-
-#define E1000_RX_DESC_PS(R, i) \
- (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
-#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
-#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
-#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
-#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
-
-enum e1000_state_t {
- __E1000_TESTING,
- __E1000_RESETTING,
- __E1000_DOWN
-};
-
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
-extern char e1000e_driver_name[];
-extern const char e1000e_driver_version[];
-
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
-extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
-extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-
-extern unsigned int copybreak;
-
-extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
-
-extern struct e1000_info e1000_82571_info;
-extern struct e1000_info e1000_82572_info;
-extern struct e1000_info e1000_82573_info;
-extern struct e1000_info e1000_82574_info;
-extern struct e1000_info e1000_82583_info;
-extern struct e1000_info e1000_ich8_info;
-extern struct e1000_info e1000_ich9_info;
-extern struct e1000_info e1000_ich10_info;
-extern struct e1000_info e1000_pch_info;
-extern struct e1000_info e1000_pch2_info;
-extern struct e1000_info e1000_es2_info;
-
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-
-extern s32 e1000e_commit_phy(struct e1000_hw *hw);
-
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
-
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
-
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state);
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
-
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init(struct e1000_hw *hw);
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link(struct e1000_hw *hw);
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count);
-extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist(struct e1000_hw *hw);
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-extern void e1000e_reset_adaptive(struct e1000_hw *hw);
-extern void e1000e_update_adaptive(struct e1000_hw *hw);
-
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success);
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_check_downshift(struct e1000_hw *hw);
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-extern bool e1000_check_phy_82574(struct e1000_hw *hw);
-
-static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
-{
- return hw->phy.ops.reset(hw);
-}
-
-static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
-{
- return hw->phy.ops.check_reset_block(hw);
-}
-
-static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- return hw->phy.ops.read_reg(hw, offset, data);
-}
-
-static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
-{
- return hw->phy.ops.write_reg(hw, offset, data);
-}
-
-static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
- return hw->phy.ops.get_cable_length(hw);
-}
-
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
-extern void e1000e_release_nvm(struct e1000_hw *hw);
-extern void e1000e_reload_nvm(struct e1000_hw *hw);
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
-
-static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
-{
- if (hw->mac.ops.read_mac_addr)
- return hw->mac.ops.read_mac_addr(hw);
-
- return e1000_read_mac_addr_generic(hw);
-}
-
-static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
-{
- return hw->nvm.ops.validate(hw);
-}
-
-static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
-{
- return hw->nvm.ops.update(hw);
-}
-
-static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- return hw->nvm.ops.read(hw, offset, words, data);
-}
-
-static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- return hw->nvm.ops.write(hw, offset, words, data);
-}
-
-static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
-{
- return hw->phy.ops.get_info(hw);
-}
-
-static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
-{
- return hw->mac.ops.check_mng_mode(hw);
-}
-
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-
-static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
-{
- return readl(hw->hw_addr + reg);
-}
-
-static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
-{
- writel(val, hw->hw_addr + reg);
-}
-
-#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
deleted file mode 100644
index e4f42257c24..00000000000
--- a/drivers/net/e1000e/es2lan.c
+++ /dev/null
@@ -1,1516 +0,0 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/*
- * 80003ES2LAN Gigabit Ethernet Controller (Copper)
- * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
- */
-
-#include "e1000.h"
-
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
-
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
-
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
-
-#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
-
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
-
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
-
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
- /* 1=Reverse Auto-Negotiation */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK 0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
- 1 = 50-80M
- 2 = 80-110M
- 3 = 110-140M
- 4 = >140M */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
-
-/* Max number of times Kumeran read/write should be validated */
-#define GG82563_MAX_KMRN_RETRY 0x5
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
- /* 1=Enable SERDES Electrical Idle */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-
-/*
- * A table for the GG82563 cable length where the range is defined
- * with a lower bound at "index" and the upper bound at
- * "index + 5".
- */
-static const u16 e1000_gg82563_cable_length_table[] = {
- 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
-#define GG82563_CABLE_LENGTH_TABLE_SIZE \
- ARRAY_SIZE(e1000_gg82563_cable_length_table)
-
-static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
-static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
-static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
-static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
-static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 *data);
-static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 data);
-static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
-
-/**
- * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
-
- if (hw->phy.media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
- return 0;
- } else {
- phy->ops.power_up = e1000_power_up_phy_copper;
- phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
- }
-
- phy->addr = 1;
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 100;
- phy->type = e1000_phy_gg82563;
-
- /* This can only be done after all function pointers are setup. */
- ret_val = e1000e_get_phy_id(hw);
-
- /* Verify phy id */
- if (phy->id != GG82563_E_PHY_ID)
- return -E1000_ERR_PHY;
-
- return ret_val;
-}
-
-/**
- * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = er32(EECD);
- u16 size;
-
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
- break;
- }
-
- nvm->type = e1000_nvm_eeprom_spi;
-
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
-
- /*
- * Added to a constant, "size" becomes the left-shift value
- * for setting word_size.
- */
- size += NVM_WORD_SIZE_BASE_SHIFT;
-
- /* EEPROM access above 16k is unsupported */
- if (size > 14)
- size = 14;
- nvm->word_size = 1 << size;
-
- return 0;
-}
-
-/**
- * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_mac_operations *func = &mac->ops;
-
- /* Set media type */
- switch (adapter->pdev->device) {
- case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- default:
- hw->phy.media_type = e1000_media_type_copper;
- break;
- }
-
- /* Set mta register count */
- mac->mta_reg_count = 128;
- /* Set rar entry count */
- mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* FWSM register */
- mac->has_fwsm = true;
- /* ARC supported; valid only if manageability features are enabled. */
- mac->arc_subsystem_valid =
- (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
- /* Adaptive IFS not supported */
- mac->adaptive_ifs = false;
-
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
- func->check_for_link = e1000e_check_for_copper_link;
- break;
- case e1000_media_type_fiber:
- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
- func->check_for_link = e1000e_check_for_fiber_link;
- break;
- case e1000_media_type_internal_serdes:
- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
- func->check_for_link = e1000e_check_for_serdes_link;
- break;
- default:
- return -E1000_ERR_CONFIG;
- break;
- }
-
- /* set lan id for port to determine which phy lock to use */
- hw->mac.ops.set_lan_id(hw);
-
- return 0;
-}
-
-static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- s32 rc;
-
- rc = e1000_init_mac_params_80003es2lan(adapter);
- if (rc)
- return rc;
-
- rc = e1000_init_nvm_params_80003es2lan(hw);
- if (rc)
- return rc;
-
- rc = e1000_init_phy_params_80003es2lan(hw);
- if (rc)
- return rc;
-
- return 0;
-}
-
-/**
- * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
- * @hw: pointer to the HW structure
- *
- * A wrapper to acquire access rights to the correct PHY.
- **/
-static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
-{
- u16 mask;
-
- mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
- return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
-}
-
-/**
- * e1000_release_phy_80003es2lan - Release rights to access PHY
- * @hw: pointer to the HW structure
- *
- * A wrapper to release access rights to the correct PHY.
- **/
-static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
-{
- u16 mask;
-
- mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
- e1000_release_swfw_sync_80003es2lan(hw, mask);
-}
-
-/**
- * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
- * @hw: pointer to the HW structure
- *
- * Acquire the semaphore to access the Kumeran interface.
- *
- **/
-static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
-{
- u16 mask;
-
- mask = E1000_SWFW_CSR_SM;
-
- return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
-}
-
-/**
- * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
- * @hw: pointer to the HW structure
- *
- * Release the semaphore used to access the Kumeran interface
- **/
-static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
-{
- u16 mask;
-
- mask = E1000_SWFW_CSR_SM;
-
- e1000_release_swfw_sync_80003es2lan(hw, mask);
-}
-
-/**
- * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
- * @hw: pointer to the HW structure
- *
- * Acquire the semaphore to access the EEPROM.
- **/
-static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
-{
- s32 ret_val;
-
- ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000e_acquire_nvm(hw);
-
- if (ret_val)
- e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
-
- return ret_val;
-}
-
-/**
- * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
- * @hw: pointer to the HW structure
- *
- * Release the semaphore used to access the EEPROM.
- **/
-static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
-{
- e1000e_release_nvm(hw);
- e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
-}
-
-/**
- * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
- * @hw: pointer to the HW structure
- * @mask: specifies which semaphore to acquire
- *
- * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
- * will also specify which port we're acquiring the lock for.
- **/
-static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 16;
- s32 i = 0;
- s32 timeout = 50;
-
- while (i < timeout) {
- if (e1000e_get_hw_semaphore(hw))
- return -E1000_ERR_SWFW_SYNC;
-
- swfw_sync = er32(SW_FW_SYNC);
- if (!(swfw_sync & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask)
- * or other software thread using resource (swmask)
- */
- e1000e_put_hw_semaphore(hw);
- mdelay(5);
- i++;
- }
-
- if (i == timeout) {
- e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
- return -E1000_ERR_SWFW_SYNC;
- }
-
- swfw_sync |= swmask;
- ew32(SW_FW_SYNC, swfw_sync);
-
- e1000e_put_hw_semaphore(hw);
-
- return 0;
-}
-
-/**
- * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
- * @hw: pointer to the HW structure
- * @mask: specifies which semaphore to acquire
- *
- * Release the SW/FW semaphore used to access the PHY or NVM. The mask
- * will also specify which port we're releasing the lock for.
- **/
-static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync;
-
- while (e1000e_get_hw_semaphore(hw) != 0)
- ; /* Empty */
-
- swfw_sync = er32(SW_FW_SYNC);
- swfw_sync &= ~mask;
- ew32(SW_FW_SYNC, swfw_sync);
-
- e1000e_put_hw_semaphore(hw);
-}
-
-/**
- * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
- * @hw: pointer to the HW structure
- * @offset: offset of the register to read
- * @data: pointer to the data returned from the operation
- *
- * Read the GG82563 PHY register.
- **/
-static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
- u32 offset, u16 *data)
-{
- s32 ret_val;
- u32 page_select;
- u16 temp;
-
- ret_val = e1000_acquire_phy_80003es2lan(hw);
- if (ret_val)
- return ret_val;
-
- /* Select Configuration Page */
- if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
- page_select = GG82563_PHY_PAGE_SELECT;
- } else {
- /*
- * Use Alternative Page Select register to access
- * registers 30 and 31
- */
- page_select = GG82563_PHY_PAGE_SELECT_ALT;
- }
-
- temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
- ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
- if (ret_val) {
- e1000_release_phy_80003es2lan(hw);
- return ret_val;
- }
-
- if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
- * before the device has completed the "Page Select" MDI
- * transaction. So we wait 200us after each MDI command...
- */
- udelay(200);
-
- /* ...and verify the command was successful. */
- ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
-
- if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- ret_val = -E1000_ERR_PHY;
- e1000_release_phy_80003es2lan(hw);
- return ret_val;
- }
-
- udelay(200);
-
- ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
-
- udelay(200);
- } else {
- ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
- }
-
- e1000_release_phy_80003es2lan(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
- * @hw: pointer to the HW structure
- * @offset: offset of the register to read
- * @data: value to write to the register
- *
- * Write to the GG82563 PHY register.
- **/
-static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
- u32 offset, u16 data)
-{
- s32 ret_val;
- u32 page_select;
- u16 temp;
-
- ret_val = e1000_acquire_phy_80003es2lan(hw);
- if (ret_val)
- return ret_val;
-
- /* Select Configuration Page */
- if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
- page_select = GG82563_PHY_PAGE_SELECT;
- } else {
- /*
- * Use Alternative Page Select register to access
- * registers 30 and 31
- */
- page_select = GG82563_PHY_PAGE_SELECT_ALT;
- }
-
- temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
- ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
- if (ret_val) {
- e1000_release_phy_80003es2lan(hw);
- return ret_val;
- }
-
- if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
- * before the device has completed the "Page Select" MDI
- * transaction. So we wait 200us after each MDI command...
- */
- udelay(200);
-
- /* ...and verify the command was successful. */
- ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
-
- if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- e1000_release_phy_80003es2lan(hw);
- return -E1000_ERR_PHY;
- }
-
- udelay(200);
-
- ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
-
- udelay(200);
- } else {
- ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
- }
-
- e1000_release_phy_80003es2lan(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
- * @hw: pointer to the HW structure
- * @offset: offset of the register to read
- * @words: number of words to write
- * @data: buffer of data to write to the NVM
- *
- * Write "words" of data to the ESB2 NVM.
- **/
-static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- return e1000e_write_nvm_spi(hw, offset, words, data);
-}
-
-/**
- * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
- * @hw: pointer to the HW structure
- *
- * Wait a specific amount of time for manageability processes to complete.
- * This is a function pointer entry point called by the phy module.
- **/
-static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
-{
- s32 timeout = PHY_CFG_TIMEOUT;
- u32 mask = E1000_NVM_CFG_DONE_PORT_0;
-
- if (hw->bus.func == 1)
- mask = E1000_NVM_CFG_DONE_PORT_1;
-
- while (timeout) {
- if (er32(EEMNGCTL) & mask)
- break;
- usleep_range(1000, 2000);
- timeout--;
- }
- if (!timeout) {
- e_dbg("MNG configuration cycle has not completed.\n");
- return -E1000_ERR_RESET;
- }
-
- return 0;
-}
-
-/**
- * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
- * @hw: pointer to the HW structure
- *
- * Force the speed and duplex settings onto the PHY. This is a
- * function pointer entry point called by the phy module.
- **/
-static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 phy_data;
- bool link;
-
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
- ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- e_dbg("GG82563 PSCR: %X\n", phy_data);
-
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
-
- e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
-
- /* Reset the phy to commit changes. */
- phy_data |= MII_CR_RESET;
-
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
-
- udelay(1);
-
- if (hw->phy.autoneg_wait_to_complete) {
- e_dbg("Waiting for forced speed/duplex link "
- "on GG82563 phy.\n");
-
- ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
-
- if (!link) {
- /*
- * We didn't get link.
- * Reset the DSP and cross our fingers.
- */
- ret_val = e1000e_phy_reset_dsp(hw);
- if (ret_val)
- return ret_val;
- }
-
- /* Try once more */
- ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
- }
-
- ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- /*
- * Resetting the phy means we need to verify the TX_CLK corresponds
- * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
- */
- phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
- if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
- phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
- else
- phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
-
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
- * duplex.
- */
- phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
- ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
-
- return ret_val;
-}
-
-/**
- * e1000_get_cable_length_80003es2lan - Set approximate cable length
- * @hw: pointer to the HW structure
- *
- * Find the approximate cable length as measured by the GG82563 PHY.
- * This is a function pointer entry point called by the phy module.
- **/
-static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
- u16 phy_data, index;
-
- ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
- if (ret_val)
- goto out;
-
- index = phy_data & GG82563_DSPD_CABLE_LENGTH;
-
- if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
-
- phy->min_cable_length = e1000_gg82563_cable_length_table[index];
- phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
-
- phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_get_link_up_info_80003es2lan - Report speed and duplex
- * @hw: pointer to the HW structure
- * @speed: pointer to speed buffer
- * @duplex: pointer to duplex buffer
- *
- * Retrieve the current speed and duplex configuration.
- **/
-static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
-{
- s32 ret_val;
-
- if (hw->phy.media_type == e1000_media_type_copper) {
- ret_val = e1000e_get_speed_and_duplex_copper(hw,
- speed,
- duplex);
- hw->phy.ops.cfg_on_link_up(hw);
- } else {
- ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
- speed,
- duplex);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
- * @hw: pointer to the HW structure
- *
- * Perform a global reset to the ESB2 controller.
- **/
-static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
-{
- u32 ctrl;
- s32 ret_val;
-
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
- * on the last TLP read/write transaction when MAC is reset.
- */
- ret_val = e1000e_disable_pcie_master(hw);
- if (ret_val)
- e_dbg("PCI-E Master disable polling has failed.\n");
-
- e_dbg("Masking off all interrupts\n");
- ew32(IMC, 0xffffffff);
-
- ew32(RCTL, 0);
- ew32(TCTL, E1000_TCTL_PSP);
- e1e_flush();
-
- usleep_range(10000, 20000);
-
- ctrl = er32(CTRL);
-
- ret_val = e1000_acquire_phy_80003es2lan(hw);
- e_dbg("Issuing a global reset to MAC\n");
- ew32(CTRL, ctrl | E1000_CTRL_RST);
- e1000_release_phy_80003es2lan(hw);
-
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val)
- /* We don't want to continue accessing MAC registers. */
- return ret_val;
-
- /* Clear any pending interrupt events. */
- ew32(IMC, 0xffffffff);
- er32(ICR);
-
- ret_val = e1000_check_alt_mac_addr_generic(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
- * @hw: pointer to the HW structure
- *
- * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
- **/
-static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 reg_data;
- s32 ret_val;
- u16 kum_reg_data;
- u16 i;
-
- e1000_initialize_hw_bits_80003es2lan(hw);
-
- /* Initialize identification LED */
- ret_val = e1000e_id_led_init(hw);
- if (ret_val)
- e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
-
- /* Disabling VLAN filtering */
- e_dbg("Initializing the IEEE VLAN\n");
- mac->ops.clear_vfta(hw);
-
- /* Setup the receive address. */
- e1000e_init_rx_addrs(hw, mac->rar_entry_count);
-
- /* Zero out the Multicast HASH table */
- e_dbg("Zeroing the MTA\n");
- for (i = 0; i < mac->mta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
-
- /* Setup link and flow control */
- ret_val = e1000e_setup_link(hw);
-
- /* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
-
- /* Set the transmit descriptor write-back policy */
- reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
- ew32(TXDCTL(0), reg_data);
-
- /* ...for both queues. */
- reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
- ew32(TXDCTL(1), reg_data);
-
- /* Enable retransmit on late collisions */
- reg_data = er32(TCTL);
- reg_data |= E1000_TCTL_RTLC;
- ew32(TCTL, reg_data);
-
- /* Configure Gigabit Carry Extend Padding */
- reg_data = er32(TCTL_EXT);
- reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
- reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
- ew32(TCTL_EXT, reg_data);
-
- /* Configure Transmit Inter-Packet Gap */
- reg_data = er32(TIPG);
- reg_data &= ~E1000_TIPG_IPGT_MASK;
- reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
- ew32(TIPG, reg_data);
-
- reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
- reg_data &= ~0x00100000;
- E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
-
- /* default to true to enable the MDIC W/A */
- hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
-
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET >>
- E1000_KMRNCTRLSTA_OFFSET_SHIFT,
- &i);
- if (!ret_val) {
- if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
- E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
- hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
- }
-
- /*
- * Clear all of the statistics registers (clear on read). It is
- * important that we do this after we have tried to establish link
- * because the symbol error count will increment wildly if there
- * is no link.
- */
- e1000_clear_hw_cntrs_80003es2lan(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
- * @hw: pointer to the HW structure
- *
- * Initializes required hardware-dependent bits needed for normal operation.
- **/
-static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
-{
- u32 reg;
-
- /* Transmit Descriptor Control 0 */
- reg = er32(TXDCTL(0));
- reg |= (1 << 22);
- ew32(TXDCTL(0), reg);
-
- /* Transmit Descriptor Control 1 */
- reg = er32(TXDCTL(1));
- reg |= (1 << 22);
- ew32(TXDCTL(1), reg);
-
- /* Transmit Arbitration Control 0 */
- reg = er32(TARC(0));
- reg &= ~(0xF << 27); /* 30:27 */
- if (hw->phy.media_type != e1000_media_type_copper)
- reg &= ~(1 << 20);
- ew32(TARC(0), reg);
-
- /* Transmit Arbitration Control 1 */
- reg = er32(TARC(1));
- if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
- else
- reg |= (1 << 28);
- ew32(TARC(1), reg);
-}
-
-/**
- * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
- * @hw: pointer to the HW structure
- *
- * Setup some GG82563 PHY registers for obtaining link
- **/
-static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u32 ctrl_ext;
- u16 data;
-
- ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
- /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
- data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
-
- ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
- if (ret_val)
- return ret_val;
-
- /*
- * Options:
- * MDI/MDI-X = 0 (default)
- * 0 - Auto for all speeds
- * 1 - MDI mode
- * 2 - MDI-X mode
- * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
- */
- ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
-
- switch (phy->mdix) {
- case 1:
- data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
- break;
- case 2:
- data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
- break;
- case 0:
- default:
- data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
- break;
- }
-
- /*
- * Options:
- * disable_polarity_correction = 0 (default)
- * Automatic Correction for Reversed Cable Polarity
- * 0 - Disabled
- * 1 - Enabled
- */
- data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
- if (phy->disable_polarity_correction)
- data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
-
- ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
- if (ret_val)
- return ret_val;
-
- /* SW Reset the PHY so all changes take effect */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val) {
- e_dbg("Error Resetting the PHY\n");
- return ret_val;
- }
-
- /* Bypass Rx and Tx FIFO's */
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
- E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
- E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- &data);
- if (ret_val)
- return ret_val;
- data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
- if (ret_val)
- return ret_val;
-
- data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
- ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
- if (ret_val)
- return ret_val;
-
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
- ew32(CTRL_EXT, ctrl_ext);
-
- ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- /*
- * Do not init these registers when the HW is in IAMT mode, since the
- * firmware will have already initialized them. We only initialize
- * them if the HW is not in IAMT mode.
- */
- if (!e1000e_check_mng_mode(hw)) {
- /* Enable Electrical Idle on the PHY */
- data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
- ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
- if (ret_val)
- return ret_val;
- }
-
- /*
- * Workaround: Disable padding in Kumeran interface in the MAC
- * and in the PHY to avoid CRC errors.
- */
- ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- data |= GG82563_ICR_DIS_PADDING;
- ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
- if (ret_val)
- return ret_val;
-
- return 0;
-}
-
-/**
- * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
- * @hw: pointer to the HW structure
- *
- * Essentially a wrapper for setting up all things "copper" related.
- * This is a function pointer entry point called by the mac module.
- **/
-static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
-{
- u32 ctrl;
- s32 ret_val;
- u16 reg_data;
-
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_SLU;
- ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- ew32(CTRL, ctrl);
-
- /*
- * Set the mac to wait the maximum time between each
- * iteration and increase the max iterations when
- * polling the phy; this fixes erroneous timeouts at 10Mbps.
- */
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
- if (ret_val)
- return ret_val;
- reg_data |= 0x3F;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- &reg_data);
- if (ret_val)
- return ret_val;
- reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- reg_data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000e_setup_copper_link(hw);
-
- return 0;
-}
-
-/**
- * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
- * @hw: pointer to the HW structure
- * @duplex: current duplex setting
- *
- * Configure the KMRN interface by applying last minute quirks for
- * 10/100 operation.
- **/
-static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
-{
- s32 ret_val = 0;
- u16 speed;
- u16 duplex;
-
- if (hw->phy.media_type == e1000_media_type_copper) {
- ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
- &duplex);
- if (ret_val)
- return ret_val;
-
- if (speed == SPEED_1000)
- ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
- else
- ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
- * @hw: pointer to the HW structure
- * @duplex: current duplex setting
- *
- * Configure the KMRN interface by applying last minute quirks for
- * 10/100 operation.
- **/
-static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
-{
- s32 ret_val;
- u32 tipg;
- u32 i = 0;
- u16 reg_data, reg_data2;
-
- reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
- if (ret_val)
- return ret_val;
-
- /* Configure Transmit Inter-Packet Gap */
- tipg = er32(TIPG);
- tipg &= ~E1000_TIPG_IPGT_MASK;
- tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
- ew32(TIPG, tipg);
-
- do {
- ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
- if (ret_val)
- return ret_val;
- i++;
- } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
-
- if (duplex == HALF_DUPLEX)
- reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
- else
- reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
-
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
-
- return 0;
-}
-
-/**
- * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
- * @hw: pointer to the HW structure
- *
- * Configure the KMRN interface by applying last minute quirks for
- * gigabit operation.
- **/
-static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 reg_data, reg_data2;
- u32 tipg;
- u32 i = 0;
-
- reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
- if (ret_val)
- return ret_val;
-
- /* Configure Transmit Inter-Packet Gap */
- tipg = er32(TIPG);
- tipg &= ~E1000_TIPG_IPGT_MASK;
- tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
- ew32(TIPG, tipg);
-
- do {
- ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
- if (ret_val)
- return ret_val;
- i++;
- } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
-
- reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
-
- return ret_val;
-}
-
-/**
- * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Acquire semaphore, then read the PHY register at offset
- * using the kumeran interface. The information retrieved is stored in data.
- * Release the semaphore before exiting.
- **/
-static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 *data)
-{
- u32 kmrnctrlsta;
- s32 ret_val = 0;
-
- ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
- if (ret_val)
- return ret_val;
-
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
- ew32(KMRNCTRLSTA, kmrnctrlsta);
- e1e_flush();
-
- udelay(2);
-
- kmrnctrlsta = er32(KMRNCTRLSTA);
- *data = (u16)kmrnctrlsta;
-
- e1000_release_mac_csr_80003es2lan(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Acquire semaphore, then write the data to PHY register
- * at the offset using the kumeran interface. Release semaphore
- * before exiting.
- **/
-static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 data)
-{
- u32 kmrnctrlsta;
- s32 ret_val = 0;
-
- ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
- if (ret_val)
- return ret_val;
-
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
- ew32(KMRNCTRLSTA, kmrnctrlsta);
- e1e_flush();
-
- udelay(2);
-
- e1000_release_mac_csr_80003es2lan(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_read_mac_addr_80003es2lan - Read device MAC address
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
-{
- s32 ret_val = 0;
-
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_read_mac_addr_generic(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, remove the link.
- **/
-static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
-{
- /* If the management interface is not enabled, then power down */
- if (!(hw->mac.ops.check_mng_mode(hw) ||
- hw->phy.ops.check_reset_block(hw)))
- e1000_power_down_phy_copper(hw);
-}
-
-/**
- * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
- * @hw: pointer to the HW structure
- *
- * Clears the hardware counters by reading the counter registers.
- **/
-static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
-{
- e1000e_clear_hw_cntrs_base(hw);
-
- er32(PRC64);
- er32(PRC127);
- er32(PRC255);
- er32(PRC511);
- er32(PRC1023);
- er32(PRC1522);
- er32(PTC64);
- er32(PTC127);
- er32(PTC255);
- er32(PTC511);
- er32(PTC1023);
- er32(PTC1522);
-
- er32(ALGNERRC);
- er32(RXERRC);
- er32(TNCRS);
- er32(CEXTERR);
- er32(TSCTC);
- er32(TSCTFC);
-
- er32(MGTPRC);
- er32(MGTPDC);
- er32(MGTPTC);
-
- er32(IAC);
- er32(ICRXOC);
-
- er32(ICRXPTC);
- er32(ICRXATC);
- er32(ICTXPTC);
- er32(ICTXATC);
- er32(ICTXQEC);
- er32(ICTXQMTC);
- er32(ICRXDMTC);
-}
-
-static struct e1000_mac_operations es2_mac_ops = {
- .read_mac_addr = e1000_read_mac_addr_80003es2lan,
- .id_led_init = e1000e_id_led_init,
- .blink_led = e1000e_blink_led_generic,
- .check_mng_mode = e1000e_check_mng_mode_generic,
- /* check_for_link dependent on media type */
- .cleanup_led = e1000e_cleanup_led_generic,
- .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
- .get_bus_info = e1000e_get_bus_info_pcie,
- .set_lan_id = e1000_set_lan_id_multi_port_pcie,
- .get_link_up_info = e1000_get_link_up_info_80003es2lan,
- .led_on = e1000e_led_on_generic,
- .led_off = e1000e_led_off_generic,
- .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
- .write_vfta = e1000_write_vfta_generic,
- .clear_vfta = e1000_clear_vfta_generic,
- .reset_hw = e1000_reset_hw_80003es2lan,
- .init_hw = e1000_init_hw_80003es2lan,
- .setup_link = e1000e_setup_link,
- /* setup_physical_interface dependent on media type */
- .setup_led = e1000e_setup_led_generic,
-};
-
-static struct e1000_phy_operations es2_phy_ops = {
- .acquire = e1000_acquire_phy_80003es2lan,
- .check_polarity = e1000_check_polarity_m88,
- .check_reset_block = e1000e_check_reset_block_generic,
- .commit = e1000e_phy_sw_reset,
- .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
- .get_cfg_done = e1000_get_cfg_done_80003es2lan,
- .get_cable_length = e1000_get_cable_length_80003es2lan,
- .get_info = e1000e_get_phy_info_m88,
- .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
- .release = e1000_release_phy_80003es2lan,
- .reset = e1000e_phy_hw_reset_generic,
- .set_d0_lplu_state = NULL,
- .set_d3_lplu_state = e1000e_set_d3_lplu_state,
- .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
- .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
-};
-
-static struct e1000_nvm_operations es2_nvm_ops = {
- .acquire = e1000_acquire_nvm_80003es2lan,
- .read = e1000e_read_nvm_eerd,
- .release = e1000_release_nvm_80003es2lan,
- .update = e1000e_update_nvm_checksum_generic,
- .valid_led_default = e1000e_valid_led_default,
- .validate = e1000e_validate_nvm_checksum_generic,
- .write = e1000_write_nvm_80003es2lan,
-};
-
-struct e1000_info e1000_es2_info = {
- .mac = e1000_80003es2lan,
- .flags = FLAG_HAS_HW_VLAN_FILTER
- | FLAG_HAS_JUMBO_FRAMES
- | FLAG_HAS_WOL
- | FLAG_APME_IN_CTRL3
- | FLAG_RX_CSUM_ENABLED
- | FLAG_HAS_CTRLEXT_ON_LOAD
- | FLAG_RX_NEEDS_RESTART /* errata */
- | FLAG_TARC_SET_BIT_ZERO /* errata */
- | FLAG_APME_CHECK_PORT_B
- | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
- | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
- .flags2 = FLAG2_DMA_BURST,
- .pba = 38,
- .max_hw_frame_size = DEFAULT_JUMBO,
- .get_variants = e1000_get_variants_80003es2lan,
- .mac_ops = &es2_mac_ops,
- .phy_ops = &es2_phy_ops,
- .nvm_ops = &es2_nvm_ops,
-};
-
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
deleted file mode 100644
index 6a0526a59a8..00000000000
--- a/drivers/net/e1000e/ethtool.c
+++ /dev/null
@@ -1,2082 +0,0 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* ethtool support for e1000 */
-
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include "e1000.h"
-
-enum {NETDEV_STATS, E1000_STATS};
-
-struct e1000_stats {
- char stat_string[ETH_GSTRING_LEN];
- int type;
- int sizeof_stat;
- int stat_offset;
-};
-
-#define E1000_STAT(str, m) { \
- .stat_string = str, \
- .type = E1000_STATS, \
- .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
- .stat_offset = offsetof(struct e1000_adapter, m) }
-#define E1000_NETDEV_STAT(str, m) { \
- .stat_string = str, \
- .type = NETDEV_STATS, \
- .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
- .stat_offset = offsetof(struct rtnl_link_stats64, m) }
-
-static const struct e1000_stats e1000_gstrings_stats[] = {
- E1000_STAT("rx_packets", stats.gprc),
- E1000_STAT("tx_packets", stats.gptc),
- E1000_STAT("rx_bytes", stats.gorc),
- E1000_STAT("tx_bytes", stats.gotc),
- E1000_STAT("rx_broadcast", stats.bprc),
- E1000_STAT("tx_broadcast", stats.bptc),
- E1000_STAT("rx_multicast", stats.mprc),
- E1000_STAT("tx_multicast", stats.mptc),
- E1000_NETDEV_STAT("rx_errors", rx_errors),
- E1000_NETDEV_STAT("tx_errors", tx_errors),
- E1000_NETDEV_STAT("tx_dropped", tx_dropped),
- E1000_STAT("multicast", stats.mprc),
- E1000_STAT("collisions", stats.colc),
- E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
- E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
- E1000_STAT("rx_crc_errors", stats.crcerrs),
- E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
- E1000_STAT("rx_no_buffer_count", stats.rnbc),
- E1000_STAT("rx_missed_errors", stats.mpc),
- E1000_STAT("tx_aborted_errors", stats.ecol),
- E1000_STAT("tx_carrier_errors", stats.tncrs),
- E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
- E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
- E1000_STAT("tx_window_errors", stats.latecol),
- E1000_STAT("tx_abort_late_coll", stats.latecol),
- E1000_STAT("tx_deferred_ok", stats.dc),
- E1000_STAT("tx_single_coll_ok", stats.scc),
- E1000_STAT("tx_multi_coll_ok", stats.mcc),
- E1000_STAT("tx_timeout_count", tx_timeout_count),
- E1000_STAT("tx_restart_queue", restart_queue),
- E1000_STAT("rx_long_length_errors", stats.roc),
- E1000_STAT("rx_short_length_errors", stats.ruc),
- E1000_STAT("rx_align_errors", stats.algnerrc),
- E1000_STAT("tx_tcp_seg_good", stats.tsctc),
- E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
- E1000_STAT("rx_flow_control_xon", stats.xonrxc),
- E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
- E1000_STAT("tx_flow_control_xon", stats.xontxc),
- E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
- E1000_STAT("rx_long_byte_count", stats.gorc),
- E1000_STAT("rx_csum_offload_good", hw_csum_good),
- E1000_STAT("rx_csum_offload_errors", hw_csum_err),
- E1000_STAT("rx_header_split", rx_hdr_split),
- E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
- E1000_STAT("tx_smbus", stats.mgptc),
- E1000_STAT("rx_smbus", stats.mgprc),
- E1000_STAT("dropped_smbus", stats.mgpdc),
- E1000_STAT("rx_dma_failed", rx_dma_failed),
- E1000_STAT("tx_dma_failed", tx_dma_failed),
-};
-
-#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
-#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
-static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
- "Register test (offline)", "Eeprom test (offline)",
- "Interrupt test (offline)", "Loopback test (offline)",
- "Link test (on/offline)"
-};
-#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 speed;
-
- if (hw->phy.media_type == e1000_media_type_copper) {
-
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
- if (hw->phy.type == e1000_phy_ife)
- ecmd->supported &= ~SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_TP;
-
- if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
- /* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
- }
-
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
-
- } else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
-
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
-
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
- }
-
- speed = -1;
- ecmd->duplex = -1;
-
- if (netif_running(netdev)) {
- if (netif_carrier_ok(netdev)) {
- speed = adapter->link_speed;
- ecmd->duplex = adapter->link_duplex - 1;
- }
- } else {
- u32 status = er32(STATUS);
- if (status & E1000_STATUS_LU) {
- if (status & E1000_STATUS_SPEED_1000)
- speed = SPEED_1000;
- else if (status & E1000_STATUS_SPEED_100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
-
- if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
- else
- ecmd->duplex = DUPLEX_HALF;
- }
- }
-
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
- hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
-
- /* MDI-X => 2; MDI =>1; Invalid =>0 */
- if ((hw->phy.media_type == e1000_media_type_copper) &&
- netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
- ETH_TP_MDI;
- else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
-
- return 0;
-}
-
-static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
-{
- struct e1000_mac_info *mac = &adapter->hw.mac;
-
- mac->autoneg = 0;
-
- /* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
- if ((spd & 1) || (dplx & ~1))
- goto err_inval;
-
- /* Fiber NICs only allow 1000 gbps Full duplex */
- if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
- spd != SPEED_1000 &&
- dplx != DUPLEX_FULL) {
- goto err_inval;
- }
-
- switch (spd + dplx) {
- case SPEED_10 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_10_HALF;
- break;
- case SPEED_10 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_10_FULL;
- break;
- case SPEED_100 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_100_HALF;
- break;
- case SPEED_100 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_100_FULL;
- break;
- case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
- break;
- case SPEED_1000 + DUPLEX_HALF: /* not supported */
- default:
- goto err_inval;
- }
- return 0;
-
-err_inval:
- e_err("Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
-}
-
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * When SoL/IDER sessions are active, autoneg/speed/duplex
- * cannot be changed
- */
- if (e1000_check_reset_block(hw)) {
- e_err("Cannot change link characteristics when SoL/IDER is "
- "active.\n");
- return -EINVAL;
- }
-
- while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- hw->mac.autoneg = 1;
- if (hw->phy.media_type == e1000_media_type_fiber)
- hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
- else
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
- ecmd->advertising = hw->phy.autoneg_advertised;
- if (adapter->fc_autoneg)
- hw->fc.requested_mode = e1000_fc_default;
- } else {
- u32 speed = ethtool_cmd_speed(ecmd);
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
- clear_bit(__E1000_RESETTING, &adapter->state);
- return -EINVAL;
- }
- }
-
- /* reset the link */
-
- if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
- e1000e_up(adapter);
- } else {
- e1000e_reset(adapter);
- }
-
- clear_bit(__E1000_RESETTING, &adapter->state);
- return 0;
-}
-
-static void e1000_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- pause->autoneg =
- (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
-
- if (hw->fc.current_mode == e1000_fc_rx_pause) {
- pause->rx_pause = 1;
- } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
- pause->tx_pause = 1;
- } else if (hw->fc.current_mode == e1000_fc_full) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int e1000_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- int retval = 0;
-
- adapter->fc_autoneg = pause->autoneg;
-
- while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (adapter->fc_autoneg == AUTONEG_ENABLE) {
- hw->fc.requested_mode = e1000_fc_default;
- if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
- e1000e_up(adapter);
- } else {
- e1000e_reset(adapter);
- }
- } else {
- if (pause->rx_pause && pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_full;
- else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_rx_pause;
- else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_tx_pause;
- else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_none;
-
- hw->fc.current_mode = hw->fc.requested_mode;
-
- if (hw->phy.media_type == e1000_media_type_fiber) {
- retval = hw->mac.ops.setup_link(hw);
- /* implicit goto out */
- } else {
- retval = e1000e_force_mac_fc(hw);
- if (retval)
- goto out;
- e1000e_set_fc_watermarks(hw);
- }
- }
-
-out:
- clear_bit(__E1000_RESETTING, &adapter->state);
- return retval;
-}
-
-static u32 e1000_get_rx_csum(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- return adapter->flags & FLAG_RX_CSUM_ENABLED;
-}
-
-static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (data)
- adapter->flags |= FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~FLAG_RX_CSUM_ENABLED;
-
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
- else
- e1000e_reset(adapter);
- return 0;
-}
-
-static u32 e1000_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-
-static int e1000_set_tso(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (data) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
-
- adapter->flags |= FLAG_TSO_FORCE;
- return 0;
-}
-
-static u32 e1000_get_msglevel(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void e1000_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-
-static int e1000_get_regs_len(struct net_device *netdev)
-{
-#define E1000_REGS_LEN 32 /* overestimate */
- return E1000_REGS_LEN * sizeof(u32);
-}
-
-static void e1000_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 *regs_buff = p;
- u16 phy_data;
-
- memset(p, 0, E1000_REGS_LEN * sizeof(u32));
-
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
-
- regs_buff[0] = er32(CTRL);
- regs_buff[1] = er32(STATUS);
-
- regs_buff[2] = er32(RCTL);
- regs_buff[3] = er32(RDLEN);
- regs_buff[4] = er32(RDH);
- regs_buff[5] = er32(RDT);
- regs_buff[6] = er32(RDTR);
-
- regs_buff[7] = er32(TCTL);
- regs_buff[8] = er32(TDLEN);
- regs_buff[9] = er32(TDH);
- regs_buff[10] = er32(TDT);
- regs_buff[11] = er32(TIDV);
-
- regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
-
- /* ethtool doesn't use anything past this point, so all this
- * code is likely legacy junk for apps that may or may not
- * exist */
- if (hw->phy.type == e1000_phy_m88) {
- e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- regs_buff[13] = (u32)phy_data; /* cable length */
- regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
- regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
- regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
- e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
- regs_buff[18] = regs_buff[13]; /* cable polarity */
- regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
- regs_buff[20] = regs_buff[17]; /* polarity correction */
- /* phy receive errors */
- regs_buff[22] = adapter->phy_stats.receive_errors;
- regs_buff[23] = regs_buff[13]; /* mdix mode */
- }
- regs_buff[21] = 0; /* was idle_errors */
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
- regs_buff[24] = (u32)phy_data; /* phy local receiver status */
- regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
-}
-
-static int e1000_get_eeprom_len(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- return adapter->hw.nvm.word_size * 2;
-}
-
-static int e1000_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
-
- eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- if (hw->nvm.type == e1000_nvm_eeprom_spi) {
- ret_val = e1000_read_nvm(hw, first_word,
- last_word - first_word + 1,
- eeprom_buff);
- } else {
- for (i = 0; i < last_word - first_word + 1; i++) {
- ret_val = e1000_read_nvm(hw, first_word + i, 1,
- &eeprom_buff[i]);
- if (ret_val)
- break;
- }
- }
-
- if (ret_val) {
- /* a read error occurred, throw away the result */
- memset(eeprom_buff, 0xff, sizeof(u16) *
- (last_word - first_word + 1));
- } else {
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
- }
-
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
-
- return ret_val;
-}
-
-static int e1000_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- void *ptr;
- int max_len;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EOPNOTSUPP;
-
- if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
- return -EFAULT;
-
- if (adapter->flags & FLAG_READ_ONLY_NVM)
- return -EINVAL;
-
- max_len = hw->nvm.word_size * 2;
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- ptr = (void *)eeprom_buff;
-
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
- ptr++;
- }
- if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- ret_val = e1000_read_nvm(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
-
- if (ret_val)
- goto out;
-
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
-
- memcpy(ptr, bytes, eeprom->len);
-
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
-
- ret_val = e1000_write_nvm(hw, first_word,
- last_word - first_word + 1, eeprom_buff);
-
- if (ret_val)
- goto out;
-
- /*
- * Update the checksum over the first part of the EEPROM if needed
- * and flush shadow RAM for applicable controllers
- */
- if ((first_word <= NVM_CHECKSUM_REG) ||
- (hw->mac.type == e1000_82583) ||
- (hw->mac.type == e1000_82574) ||
- (hw->mac.type == e1000_82573))
- ret_val = e1000e_update_nvm_checksum(hw);
-
-out:
- kfree(eeprom_buff);
- return ret_val;
-}
-
-static void e1000_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
-
- strncpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, e1000e_driver_version,
- sizeof(drvinfo->version) - 1);
-
- /*
- * EEPROM image version # is reported as firmware version # for
- * PCI-E controllers
- */
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
- (adapter->eeprom_vers & 0x000F));
-
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
- drvinfo->regdump_len = e1000_get_regs_len(netdev);
- drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
-}
-
-static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_ring *rx_ring = adapter->rx_ring;
-
- ring->rx_max_pending = E1000_MAX_RXD;
- ring->tx_max_pending = E1000_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-
-static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_ring *tx_ring, *tx_old;
- struct e1000_ring *rx_ring, *rx_old;
- int err;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (netif_running(adapter->netdev))
- e1000e_down(adapter);
-
- tx_old = adapter->tx_ring;
- rx_old = adapter->rx_ring;
-
- err = -ENOMEM;
- tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
- if (!tx_ring)
- goto err_alloc_tx;
-
- rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
- if (!rx_ring)
- goto err_alloc_rx;
-
- adapter->tx_ring = tx_ring;
- adapter->rx_ring = rx_ring;
-
- rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
- rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
- rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
-
- tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
- tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
- tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
-
- if (netif_running(adapter->netdev)) {
- /* Try to get new resources before deleting old */
- err = e1000e_setup_rx_resources(adapter);
- if (err)
- goto err_setup_rx;
- err = e1000e_setup_tx_resources(adapter);
- if (err)
- goto err_setup_tx;
-
- /*
- * restore the old in order to free it,
- * then add in the new
- */
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- e1000e_free_rx_resources(adapter);
- e1000e_free_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
- adapter->rx_ring = rx_ring;
- adapter->tx_ring = tx_ring;
- err = e1000e_up(adapter);
- if (err)
- goto err_setup;
- }
-
- clear_bit(__E1000_RESETTING, &adapter->state);
- return 0;
-err_setup_tx:
- e1000e_free_rx_resources(adapter);
-err_setup_rx:
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- kfree(rx_ring);
-err_alloc_rx:
- kfree(tx_ring);
-err_alloc_tx:
- e1000e_up(adapter);
-err_setup:
- clear_bit(__E1000_RESETTING, &adapter->state);
- return err;
-}
-
-static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
- int reg, int offset, u32 mask, u32 write)
-{
- u32 pat, val;
- static const u32 test[] = {
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
- for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
- E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
- (test[pat] & write));
- val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
- if (val != (test[pat] & write & mask)) {
- e_err("pattern test reg %04X failed: got 0x%08X "
- "expected 0x%08X\n", reg + offset, val,
- (test[pat] & write & mask));
- *data = reg;
- return 1;
- }
- }
- return 0;
-}
-
-static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
- int reg, u32 mask, u32 write)
-{
- u32 val;
- __ew32(&adapter->hw, reg, write & mask);
- val = __er32(&adapter->hw, reg);
- if ((write & mask) != (val & mask)) {
- e_err("set/check reg %04X test failed: got 0x%08X "
- "expected 0x%08X\n", reg, (val & mask), (write & mask));
- *data = reg;
- return 1;
- }
- return 0;
-}
-#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
- do { \
- if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
- return 1; \
- } while (0)
-#define REG_PATTERN_TEST(reg, mask, write) \
- REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
-
-#define REG_SET_AND_CHECK(reg, mask, write) \
- do { \
- if (reg_set_and_check(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0)
-
-static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &adapter->hw.mac;
- u32 value;
- u32 before;
- u32 after;
- u32 i;
- u32 toggle;
- u32 mask;
-
- /*
- * The status register is Read Only, so a write should fail.
- * Some bits that get toggled are ignored.
- */
- switch (mac->type) {
- /* there are several bits on newer hardware that are r/w */
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- toggle = 0x7FFFF3FF;
- break;
- default:
- toggle = 0x7FFFF033;
- break;
- }
-
- before = er32(STATUS);
- value = (er32(STATUS) & toggle);
- ew32(STATUS, toggle);
- after = er32(STATUS) & toggle;
- if (value != after) {
- e_err("failed STATUS register test got: 0x%08X expected: "
- "0x%08X\n", after, value);
- *data = 1;
- return 1;
- }
- /* restore previous status */
- ew32(STATUS, before);
-
- if (!(adapter->flags & FLAG_IS_ICH)) {
- REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
- }
-
- REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
- REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
- REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
- REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
- REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
- REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
- REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
-
- REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
-
- before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
- REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
- REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
-
- REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- if (!(adapter->flags & FLAG_IS_ICH))
- REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
- REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
- mask = 0x8003FFFF;
- switch (mac->type) {
- case e1000_ich10lan:
- case e1000_pchlan:
- case e1000_pch2lan:
- mask |= (1 << 18);
- break;
- default:
- break;
- }
- for (i = 0; i < mac->rar_entry_count; i++)
- REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
- mask, 0xFFFFFFFF);
-
- for (i = 0; i < mac->mta_reg_count; i++)
- REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
-
- *data = 0;
- return 0;
-}
-
-static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
-{
- u16 temp;
- u16 checksum = 0;
- u16 i;
-
- *data = 0;
- /* Read and add up the contents of the EEPROM */
- for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
- *data = 1;
- return *data;
- }
- checksum += temp;
- }
-
- /* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) NVM_SUM) && !(*data))
- *data = 2;
-
- return *data;
-}
-
-static irqreturn_t e1000_test_intr(int irq, void *data)
-{
- struct net_device *netdev = (struct net_device *) data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- adapter->test_icr |= er32(ICR);
-
- return IRQ_HANDLED;
-}
-
-static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- u32 mask;
- u32 shared_int = 1;
- u32 irq = adapter->pdev->irq;
- int i;
- int ret_val = 0;
- int int_mode = E1000E_INT_MODE_LEGACY;
-
- *data = 0;
-
- /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
- if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
- int_mode = adapter->int_mode;
- e1000e_reset_interrupt_capability(adapter);
- adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e1000e_set_interrupt_capability(adapter);
- }
- /* Hook up test interrupt handler just for this test */
- if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
- netdev)) {
- shared_int = 0;
- } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
- *data = 1;
- ret_val = -1;
- goto out;
- }
- e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
-
- /* Disable all the interrupts */
- ew32(IMC, 0xFFFFFFFF);
- e1e_flush();
- usleep_range(10000, 20000);
-
- /* Test each interrupt */
- for (i = 0; i < 10; i++) {
- /* Interrupt to test */
- mask = 1 << i;
-
- if (adapter->flags & FLAG_IS_ICH) {
- switch (mask) {
- case E1000_ICR_RXSEQ:
- continue;
- case 0x00000100:
- if (adapter->hw.mac.type == e1000_ich8lan ||
- adapter->hw.mac.type == e1000_ich9lan)
- continue;
- break;
- default:
- break;
- }
- }
-
- if (!shared_int) {
- /*
- * Disable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- ew32(IMC, mask);
- ew32(ICS, mask);
- e1e_flush();
- usleep_range(10000, 20000);
-
- if (adapter->test_icr & mask) {
- *data = 3;
- break;
- }
- }
-
- /*
- * Enable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was not posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- ew32(IMS, mask);
- ew32(ICS, mask);
- e1e_flush();
- usleep_range(10000, 20000);
-
- if (!(adapter->test_icr & mask)) {
- *data = 4;
- break;
- }
-
- if (!shared_int) {
- /*
- * Disable the other interrupts to be reported in
- * the cause register and then force the other
- * interrupts and see if any get posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- ew32(IMC, ~mask & 0x00007FFF);
- ew32(ICS, ~mask & 0x00007FFF);
- e1e_flush();
- usleep_range(10000, 20000);
-
- if (adapter->test_icr) {
- *data = 5;
- break;
- }
- }
- }
-
- /* Disable all the interrupts */
- ew32(IMC, 0xFFFFFFFF);
- e1e_flush();
- usleep_range(10000, 20000);
-
- /* Unhook test interrupt handler */
- free_irq(irq, netdev);
-
-out:
- if (int_mode == E1000E_INT_MODE_MSIX) {
- e1000e_reset_interrupt_capability(adapter);
- adapter->int_mode = int_mode;
- e1000e_set_interrupt_capability(adapter);
- }
-
- return ret_val;
-}
-
-static void e1000_free_desc_rings(struct e1000_adapter *adapter)
-{
- struct e1000_ring *tx_ring = &adapter->test_tx_ring;
- struct e1000_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int i;
-
- if (tx_ring->desc && tx_ring->buffer_info) {
- for (i = 0; i < tx_ring->count; i++) {
- if (tx_ring->buffer_info[i].dma)
- dma_unmap_single(&pdev->dev,
- tx_ring->buffer_info[i].dma,
- tx_ring->buffer_info[i].length,
- DMA_TO_DEVICE);
- if (tx_ring->buffer_info[i].skb)
- dev_kfree_skb(tx_ring->buffer_info[i].skb);
- }
- }
-
- if (rx_ring->desc && rx_ring->buffer_info) {
- for (i = 0; i < rx_ring->count; i++) {
- if (rx_ring->buffer_info[i].dma)
- dma_unmap_single(&pdev->dev,
- rx_ring->buffer_info[i].dma,
- 2048, DMA_FROM_DEVICE);
- if (rx_ring->buffer_info[i].skb)
- dev_kfree_skb(rx_ring->buffer_info[i].skb);
- }
- }
-
- if (tx_ring->desc) {
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
- tx_ring->desc = NULL;
- }
- if (rx_ring->desc) {
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
- rx_ring->desc = NULL;
- }
-
- kfree(tx_ring->buffer_info);
- tx_ring->buffer_info = NULL;
- kfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
-}
-
-static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
-{
- struct e1000_ring *tx_ring = &adapter->test_tx_ring;
- struct e1000_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
- int i;
- int ret_val;
-
- /* Setup Tx descriptor ring and Tx buffers */
-
- if (!tx_ring->count)
- tx_ring->count = E1000_DEFAULT_TXD;
-
- tx_ring->buffer_info = kcalloc(tx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
- if (!(tx_ring->buffer_info)) {
- ret_val = 1;
- goto err_nomem;
- }
-
- tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
- if (!tx_ring->desc) {
- ret_val = 2;
- goto err_nomem;
- }
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- ew32(TDBAH, ((u64) tx_ring->dma >> 32));
- ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
- ew32(TDH, 0);
- ew32(TDT, 0);
- ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
- E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
- E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
-
- for (i = 0; i < tx_ring->count; i++) {
- struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
- struct sk_buff *skb;
- unsigned int skb_size = 1024;
-
- skb = alloc_skb(skb_size, GFP_KERNEL);
- if (!skb) {
- ret_val = 3;
- goto err_nomem;
- }
- skb_put(skb, skb_size);
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].length = skb->len;
- tx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev,
- tx_ring->buffer_info[i].dma)) {
- ret_val = 4;
- goto err_nomem;
- }
- tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
- tx_desc->lower.data = cpu_to_le32(skb->len);
- tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
- E1000_TXD_CMD_IFCS |
- E1000_TXD_CMD_RS);
- tx_desc->upper.data = 0;
- }
-
- /* Setup Rx descriptor ring and Rx buffers */
-
- if (!rx_ring->count)
- rx_ring->count = E1000_DEFAULT_RXD;
-
- rx_ring->buffer_info = kcalloc(rx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
- if (!(rx_ring->buffer_info)) {
- ret_val = 5;
- goto err_nomem;
- }
-
- rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- ret_val = 6;
- goto err_nomem;
- }
- rx_ring->next_to_use = 0;
- rx_ring->next_to_clean = 0;
-
- rctl = er32(RCTL);
- if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
- ew32(RDBAH, ((u64) rx_ring->dma >> 32));
- ew32(RDLEN, rx_ring->size);
- ew32(RDH, 0);
- ew32(RDT, 0);
- rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
- E1000_RCTL_SBP | E1000_RCTL_SECRC |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
- ew32(RCTL, rctl);
-
- for (i = 0; i < rx_ring->count; i++) {
- struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
- struct sk_buff *skb;
-
- skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
- if (!skb) {
- ret_val = 7;
- goto err_nomem;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- rx_ring->buffer_info[i].skb = skb;
- rx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, 2048,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev,
- rx_ring->buffer_info[i].dma)) {
- ret_val = 8;
- goto err_nomem;
- }
- rx_desc->buffer_addr =
- cpu_to_le64(rx_ring->buffer_info[i].dma);
- memset(skb->data, 0x00, skb->len);
- }
-
- return 0;
-
-err_nomem:
- e1000_free_desc_rings(adapter);
- return ret_val;
-}
-
-static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
-{
- /* Write out to PHY registers 29 and 30 to disable the Receiver. */
- e1e_wphy(&adapter->hw, 29, 0x001F);
- e1e_wphy(&adapter->hw, 30, 0x8FFC);
- e1e_wphy(&adapter->hw, 29, 0x001A);
- e1e_wphy(&adapter->hw, 30, 0x8FF0);
-}
-
-static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_reg = 0;
- u16 phy_reg = 0;
- s32 ret_val = 0;
-
- hw->mac.autoneg = 0;
-
- if (hw->phy.type == e1000_phy_ife) {
- /* force 100, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x6100);
-
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = er32(CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_100 |/* Force Speed to 100 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
-
- ew32(CTRL, ctrl_reg);
- e1e_flush();
- udelay(500);
-
- return 0;
- }
-
- /* Specific PHY configuration for loopback */
- switch (hw->phy.type) {
- case e1000_phy_m88:
- /* Auto-MDI/MDIX Off */
- e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
- /* reset to update Auto-MDI/MDIX */
- e1e_wphy(hw, PHY_CONTROL, 0x9140);
- /* autoneg off */
- e1e_wphy(hw, PHY_CONTROL, 0x8140);
- break;
- case e1000_phy_gg82563:
- e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
- break;
- case e1000_phy_bm:
- /* Set Default MAC Interface speed to 1GB */
- e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
- phy_reg &= ~0x0007;
- phy_reg |= 0x006;
- e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
- /* Assert SW reset for above settings to take effect */
- e1000e_commit_phy(hw);
- mdelay(1);
- /* Force Full Duplex */
- e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
- e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
- /* Set Link Up (in force link) */
- e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
- e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
- /* Force Link */
- e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
- e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
- /* Set Early Link Enable */
- e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
- e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
- break;
- case e1000_phy_82577:
- case e1000_phy_82578:
- /* Workaround: K1 must be disabled for stable 1Gbps operation */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- e_err("Cannot setup 1Gbps loopback.\n");
- return ret_val;
- }
- e1000_configure_k1_ich8lan(hw, false);
- hw->phy.ops.release(hw);
- break;
- case e1000_phy_82579:
- /* Disable PHY energy detect power down */
- e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
- e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
- /* Disable full chip energy detect */
- e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
- e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
- /* Enable loopback on the PHY */
-#define I82577_PHY_LBK_CTRL 19
- e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
- break;
- default:
- break;
- }
-
- /* force 1000, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x4140);
- mdelay(250);
-
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = er32(CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
-
- if (adapter->flags & FLAG_IS_ICH)
- ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
-
- if (hw->phy.media_type == e1000_media_type_copper &&
- hw->phy.type == e1000_phy_m88) {
- ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
- } else {
- /*
- * Set the ILOS bit on the fiber Nic if half duplex link is
- * detected.
- */
- if ((er32(STATUS) & E1000_STATUS_FD) == 0)
- ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
- }
-
- ew32(CTRL, ctrl_reg);
-
- /*
- * Disable the receiver on the PHY so when a cable is plugged in, the
- * PHY does not begin to autoneg when a cable is reconnected to the NIC.
- */
- if (hw->phy.type == e1000_phy_m88)
- e1000_phy_disable_receiver(adapter);
-
- udelay(500);
-
- return 0;
-}
-
-static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl = er32(CTRL);
- int link = 0;
-
- /* special requirements for 82571/82572 fiber adapters */
-
- /*
- * jump through hoops to make sure link is up because serdes
- * link is hardwired up
- */
- ctrl |= E1000_CTRL_SLU;
- ew32(CTRL, ctrl);
-
- /* disable autoneg */
- ctrl = er32(TXCW);
- ctrl &= ~(1 << 31);
- ew32(TXCW, ctrl);
-
- link = (er32(STATUS) & E1000_STATUS_LU);
-
- if (!link) {
- /* set invert loss of signal */
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_ILOS;
- ew32(CTRL, ctrl);
- }
-
- /*
- * special write to serdes control register to enable SerDes analog
- * loopback
- */
-#define E1000_SERDES_LB_ON 0x410
- ew32(SCTL, E1000_SERDES_LB_ON);
- e1e_flush();
- usleep_range(10000, 20000);
-
- return 0;
-}
-
-/* only call this for fiber/serdes connections to es2lan */
-static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrlext = er32(CTRL_EXT);
- u32 ctrl = er32(CTRL);
-
- /*
- * save CTRL_EXT to restore later, reuse an empty variable (unused
- * on mac_type 80003es2lan)
- */
- adapter->tx_fifo_head = ctrlext;
-
- /* clear the serdes mode bits, putting the device into mac loopback */
- ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
- ew32(CTRL_EXT, ctrlext);
-
- /* force speed to 1000/FD, link up */
- ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
- E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
- ew32(CTRL, ctrl);
-
- /* set mac loopback */
- ctrl = er32(RCTL);
- ctrl |= E1000_RCTL_LBM_MAC;
- ew32(RCTL, ctrl);
-
- /* set testing mode parameters (no need to reset later) */
-#define KMRNCTRLSTA_OPMODE (0x1F << 16)
-#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
- ew32(KMRNCTRLSTA,
- (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
-
- return 0;
-}
-
-static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
- if (hw->phy.media_type == e1000_media_type_fiber ||
- hw->phy.media_type == e1000_media_type_internal_serdes) {
- switch (hw->mac.type) {
- case e1000_80003es2lan:
- return e1000_set_es2lan_mac_loopback(adapter);
- break;
- case e1000_82571:
- case e1000_82572:
- return e1000_set_82571_fiber_loopback(adapter);
- break;
- default:
- rctl = er32(RCTL);
- rctl |= E1000_RCTL_LBM_TCVR;
- ew32(RCTL, rctl);
- return 0;
- }
- } else if (hw->phy.media_type == e1000_media_type_copper) {
- return e1000_integrated_phy_loopback(adapter);
- }
-
- return 7;
-}
-
-static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
- u16 phy_reg;
-
- rctl = er32(RCTL);
- rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- ew32(RCTL, rctl);
-
- switch (hw->mac.type) {
- case e1000_80003es2lan:
- if (hw->phy.media_type == e1000_media_type_fiber ||
- hw->phy.media_type == e1000_media_type_internal_serdes) {
- /* restore CTRL_EXT, stealing space from tx_fifo_head */
- ew32(CTRL_EXT, adapter->tx_fifo_head);
- adapter->tx_fifo_head = 0;
- }
- /* fall through */
- case e1000_82571:
- case e1000_82572:
- if (hw->phy.media_type == e1000_media_type_fiber ||
- hw->phy.media_type == e1000_media_type_internal_serdes) {
-#define E1000_SERDES_LB_OFF 0x400
- ew32(SCTL, E1000_SERDES_LB_OFF);
- e1e_flush();
- usleep_range(10000, 20000);
- break;
- }
- /* Fall Through */
- default:
- hw->mac.autoneg = 1;
- if (hw->phy.type == e1000_phy_gg82563)
- e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
- e1e_rphy(hw, PHY_CONTROL, &phy_reg);
- if (phy_reg & MII_CR_LOOPBACK) {
- phy_reg &= ~MII_CR_LOOPBACK;
- e1e_wphy(hw, PHY_CONTROL, phy_reg);
- e1000e_commit_phy(hw);
- }
- break;
- }
-}
-
-static void e1000_create_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
-{
- memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
-}
-
-static int e1000_check_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
-{
- frame_size &= ~1;
- if (*(skb->data + 3) == 0xFF)
- if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF))
- return 0;
- return 13;
-}
-
-static int e1000_run_loopback_test(struct e1000_adapter *adapter)
-{
- struct e1000_ring *tx_ring = &adapter->test_tx_ring;
- struct e1000_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- int i, j, k, l;
- int lc;
- int good_cnt;
- int ret_val = 0;
- unsigned long time;
-
- ew32(RDT, rx_ring->count - 1);
-
- /*
- * Calculate the loop count based on the largest descriptor ring
- * The idea is to wrap the largest ring a number of times using 64
- * send/receive pairs during each loop
- */
-
- if (rx_ring->count <= tx_ring->count)
- lc = ((tx_ring->count / 64) * 2) + 1;
- else
- lc = ((rx_ring->count / 64) * 2) + 1;
-
- k = 0;
- l = 0;
- for (j = 0; j <= lc; j++) { /* loop count loop */
- for (i = 0; i < 64; i++) { /* send the packets */
- e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
- 1024);
- dma_sync_single_for_device(&pdev->dev,
- tx_ring->buffer_info[k].dma,
- tx_ring->buffer_info[k].length,
- DMA_TO_DEVICE);
- k++;
- if (k == tx_ring->count)
- k = 0;
- }
- ew32(TDT, k);
- e1e_flush();
- msleep(200);
- time = jiffies; /* set the start time for the receive */
- good_cnt = 0;
- do { /* receive the sent packets */
- dma_sync_single_for_cpu(&pdev->dev,
- rx_ring->buffer_info[l].dma, 2048,
- DMA_FROM_DEVICE);
-
- ret_val = e1000_check_lbtest_frame(
- rx_ring->buffer_info[l].skb, 1024);
- if (!ret_val)
- good_cnt++;
- l++;
- if (l == rx_ring->count)
- l = 0;
- /*
- * time + 20 msecs (200 msecs on 2.4) is more than
- * enough time to complete the receives, if it's
- * exceeded, break and error off
- */
- } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
- if (good_cnt != 64) {
- ret_val = 13; /* ret_val is the same as mis-compare */
- break;
- }
- if (jiffies >= (time + 20)) {
- ret_val = 14; /* error code for time out error */
- break;
- }
- } /* end loop count loop */
- return ret_val;
-}
-
-static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
-{
- /*
- * PHY loopback cannot be performed if SoL/IDER
- * sessions are active
- */
- if (e1000_check_reset_block(&adapter->hw)) {
- e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
- *data = 0;
- goto out;
- }
-
- *data = e1000_setup_desc_rings(adapter);
- if (*data)
- goto out;
-
- *data = e1000_setup_loopback_test(adapter);
- if (*data)
- goto err_loopback;
-
- *data = e1000_run_loopback_test(adapter);
- e1000_loopback_cleanup(adapter);
-
-err_loopback:
- e1000_free_desc_rings(adapter);
-out:
- return *data;
-}
-
-static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- *data = 0;
- if (hw->phy.media_type == e1000_media_type_internal_serdes) {
- int i = 0;
- hw->mac.serdes_has_link = false;
-
- /*
- * On some blade server designs, link establishment
- * could take as long as 2-3 minutes
- */
- do {
- hw->mac.ops.check_for_link(hw);
- if (hw->mac.serdes_has_link)
- return *data;
- msleep(20);
- } while (i++ < 3750);
-
- *data = 1;
- } else {
- hw->mac.ops.check_for_link(hw);
- if (hw->mac.autoneg)
- /*
- * On some Phy/switch combinations, link establishment
- * can take a few seconds more than expected.
- */
- msleep(5000);
-
- if (!(er32(STATUS) & E1000_STATUS_LU))
- *data = 1;
- }
- return *data;
-}
-
-static int e1000e_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_TEST:
- return E1000_TEST_LEN;
- case ETH_SS_STATS:
- return E1000_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void e1000_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- u16 autoneg_advertised;
- u8 forced_speed_duplex;
- u8 autoneg;
- bool if_running = netif_running(netdev);
-
- set_bit(__E1000_TESTING, &adapter->state);
-
- if (!if_running) {
- /* Get control of and reset hardware */
- if (adapter->flags & FLAG_HAS_AMT)
- e1000e_get_hw_control(adapter);
-
- e1000e_power_up_phy(adapter);
-
- adapter->hw.phy.autoneg_wait_to_complete = 1;
- e1000e_reset(adapter);
- adapter->hw.phy.autoneg_wait_to_complete = 0;
- }
-
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- /* save speed, duplex, autoneg settings */
- autoneg_advertised = adapter->hw.phy.autoneg_advertised;
- forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
- autoneg = adapter->hw.mac.autoneg;
-
- e_info("offline testing starting\n");
-
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
-
- if (e1000_reg_test(adapter, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- e1000e_reset(adapter);
- if (e1000_eeprom_test(adapter, &data[1]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- e1000e_reset(adapter);
- if (e1000_intr_test(adapter, &data[2]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- e1000e_reset(adapter);
- if (e1000_loopback_test(adapter, &data[3]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* force this routine to wait until autoneg complete/timeout */
- adapter->hw.phy.autoneg_wait_to_complete = 1;
- e1000e_reset(adapter);
- adapter->hw.phy.autoneg_wait_to_complete = 0;
-
- if (e1000_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* restore speed, duplex, autoneg settings */
- adapter->hw.phy.autoneg_advertised = autoneg_advertised;
- adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
- adapter->hw.mac.autoneg = autoneg;
- e1000e_reset(adapter);
-
- clear_bit(__E1000_TESTING, &adapter->state);
- if (if_running)
- dev_open(netdev);
- } else {
- /* Online tests */
-
- e_info("online testing starting\n");
-
- /* register, eeprom, intr and loopback tests not run online */
- data[0] = 0;
- data[1] = 0;
- data[2] = 0;
- data[3] = 0;
-
- if (e1000_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- clear_bit(__E1000_TESTING, &adapter->state);
- }
-
- if (!if_running) {
- e1000e_reset(adapter);
-
- if (adapter->flags & FLAG_HAS_AMT)
- e1000e_release_hw_control(adapter);
- }
-
- msleep_interruptible(4 * 1000);
-}
-
-static void e1000_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (!(adapter->flags & FLAG_HAS_WOL) ||
- !device_can_wakeup(&adapter->pdev->dev))
- return;
-
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
-
- /* apply any specific unsupported masks here */
- if (adapter->flags & FLAG_NO_WAKE_UCAST) {
- wol->supported &= ~WAKE_UCAST;
-
- if (adapter->wol & E1000_WUFC_EX)
- e_err("Interface does not support directed (unicast) "
- "frame wake-up packets\n");
- }
-
- if (adapter->wol & E1000_WUFC_EX)
- wol->wolopts |= WAKE_UCAST;
- if (adapter->wol & E1000_WUFC_MC)
- wol->wolopts |= WAKE_MCAST;
- if (adapter->wol & E1000_WUFC_BC)
- wol->wolopts |= WAKE_BCAST;
- if (adapter->wol & E1000_WUFC_MAG)
- wol->wolopts |= WAKE_MAGIC;
- if (adapter->wol & E1000_WUFC_LNKC)
- wol->wolopts |= WAKE_PHY;
-}
-
-static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (!(adapter->flags & FLAG_HAS_WOL) ||
- !device_can_wakeup(&adapter->pdev->dev) ||
- (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
- WAKE_MAGIC | WAKE_PHY)))
- return -EOPNOTSUPP;
-
- /* these settings will always override what we currently have */
- adapter->wol = 0;
-
- if (wol->wolopts & WAKE_UCAST)
- adapter->wol |= E1000_WUFC_EX;
- if (wol->wolopts & WAKE_MCAST)
- adapter->wol |= E1000_WUFC_MC;
- if (wol->wolopts & WAKE_BCAST)
- adapter->wol |= E1000_WUFC_BC;
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol |= E1000_WUFC_MAG;
- if (wol->wolopts & WAKE_PHY)
- adapter->wol |= E1000_WUFC_LNKC;
-
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
- return 0;
-}
-
-static int e1000_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- if (!hw->mac.ops.blink_led)
- return 2; /* cycle on/off twice per second */
-
- hw->mac.ops.blink_led(hw);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- if (hw->phy.type == e1000_phy_ife)
- e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
- hw->mac.ops.led_off(hw);
- hw->mac.ops.cleanup_led(hw);
- break;
-
- case ETHTOOL_ID_ON:
- adapter->hw.mac.ops.led_on(&adapter->hw);
- break;
-
- case ETHTOOL_ID_OFF:
- adapter->hw.mac.ops.led_off(&adapter->hw);
- break;
- }
- return 0;
-}
-
-static int e1000_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (adapter->itr_setting <= 4)
- ec->rx_coalesce_usecs = adapter->itr_setting;
- else
- ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
-
- return 0;
-}
-
-static int e1000_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 4) &&
- (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- return -EINVAL;
-
- if (ec->rx_coalesce_usecs == 4) {
- adapter->itr = adapter->itr_setting = 4;
- } else if (ec->rx_coalesce_usecs <= 3) {
- adapter->itr = 20000;
- adapter->itr_setting = ec->rx_coalesce_usecs;
- } else {
- adapter->itr = (1000000 / ec->rx_coalesce_usecs);
- adapter->itr_setting = adapter->itr & ~3;
- }
-
- if (adapter->itr_setting != 0)
- ew32(ITR, 1000000000 / (adapter->itr * 256));
- else
- ew32(ITR, 0);
-
- return 0;
-}
-
-static int e1000_nway_reset(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (!netif_running(netdev))
- return -EAGAIN;
-
- if (!adapter->hw.mac.autoneg)
- return -EINVAL;
-
- e1000e_reinit_locked(adapter);
-
- return 0;
-}
-
-static void e1000_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct rtnl_link_stats64 net_stats;
- int i;
- char *p = NULL;
-
- e1000e_get_stats64(netdev, &net_stats);
- for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
- switch (e1000_gstrings_stats[i].type) {
- case NETDEV_STATS:
- p = (char *) &net_stats +
- e1000_gstrings_stats[i].stat_offset;
- break;
- case E1000_STATS:
- p = (char *) adapter +
- e1000_gstrings_stats[i].stat_offset;
- break;
- default:
- data[i] = 0;
- continue;
- }
-
- data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
-}
-
-static void e1000_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
- break;
- case ETH_SS_STATS:
- for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
- memcpy(p, e1000_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int e1000e_set_flags(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- bool need_reset = false;
- int rc;
-
- need_reset = (data & ETH_FLAG_RXVLAN) !=
- (netdev->features & NETIF_F_HW_VLAN_RX);
-
- rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
- ETH_FLAG_TXVLAN);
-
- if (rc)
- return rc;
-
- if (need_reset) {
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
- else
- e1000e_reset(adapter);
- }
-
- return 0;
-}
-
-static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .get_rx_csum = e1000_get_rx_csum,
- .set_rx_csum = e1000_set_rx_csum,
- .get_tx_csum = e1000_get_tx_csum,
- .set_tx_csum = e1000_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = e1000_set_tso,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .set_phys_id = e1000_set_phys_id,
- .get_ethtool_stats = e1000_get_ethtool_stats,
- .get_sset_count = e1000e_get_sset_count,
- .get_coalesce = e1000_get_coalesce,
- .set_coalesce = e1000_set_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = e1000e_set_flags,
-};
-
-void e1000e_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
-}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
deleted file mode 100644
index 2198e615f24..00000000000
--- a/drivers/net/e1000e/netdev.c
+++ /dev/null
@@ -1,6387 +0,0 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <linux/tcp.h>
-#include <linux/ipv6.h>
-#include <linux/slab.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <linux/pm_qos_params.h>
-#include <linux/pm_runtime.h>
-#include <linux/aer.h>
-#include <linux/prefetch.h>
-
-#include "e1000.h"
-
-#define DRV_EXTRAVERSION "-k"
-
-#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
-char e1000e_driver_name[] = "e1000e";
-const char e1000e_driver_version[] = DRV_VERSION;
-
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-
-static const struct e1000_info *e1000_info_tbl[] = {
- [board_82571] = &e1000_82571_info,
- [board_82572] = &e1000_82572_info,
- [board_82573] = &e1000_82573_info,
- [board_82574] = &e1000_82574_info,
- [board_82583] = &e1000_82583_info,
- [board_80003es2lan] = &e1000_es2_info,
- [board_ich8lan] = &e1000_ich8_info,
- [board_ich9lan] = &e1000_ich9_info,
- [board_ich10lan] = &e1000_ich10_info,
- [board_pchlan] = &e1000_pch_info,
- [board_pch2lan] = &e1000_pch2_info,
-};
-
-struct e1000_reg_info {
- u32 ofs;
- char *name;
-};
-
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
-
-static const struct e1000_reg_info e1000_reg_info_tbl[] = {
-
- /* General Registers */
- {E1000_CTRL, "CTRL"},
- {E1000_STATUS, "STATUS"},
- {E1000_CTRL_EXT, "CTRL_EXT"},
-
- /* Interrupt Registers */
- {E1000_ICR, "ICR"},
-
- /* Rx Registers */
- {E1000_RCTL, "RCTL"},
- {E1000_RDLEN, "RDLEN"},
- {E1000_RDH, "RDH"},
- {E1000_RDT, "RDT"},
- {E1000_RDTR, "RDTR"},
- {E1000_RXDCTL(0), "RXDCTL"},
- {E1000_ERT, "ERT"},
- {E1000_RDBAL, "RDBAL"},
- {E1000_RDBAH, "RDBAH"},
- {E1000_RDFH, "RDFH"},
- {E1000_RDFT, "RDFT"},
- {E1000_RDFHS, "RDFHS"},
- {E1000_RDFTS, "RDFTS"},
- {E1000_RDFPC, "RDFPC"},
-
- /* Tx Registers */
- {E1000_TCTL, "TCTL"},
- {E1000_TDBAL, "TDBAL"},
- {E1000_TDBAH, "TDBAH"},
- {E1000_TDLEN, "TDLEN"},
- {E1000_TDH, "TDH"},
- {E1000_TDT, "TDT"},
- {E1000_TIDV, "TIDV"},
- {E1000_TXDCTL(0), "TXDCTL"},
- {E1000_TADV, "TADV"},
- {E1000_TARC(0), "TARC"},
- {E1000_TDFH, "TDFH"},
- {E1000_TDFT, "TDFT"},
- {E1000_TDFHS, "TDFHS"},
- {E1000_TDFTS, "TDFTS"},
- {E1000_TDFPC, "TDFPC"},
-
- /* List Terminator */
- {}
-};
-
-/*
- * e1000_regdump - register printout routine
- */
-static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
-{
- int n = 0;
- char rname[16];
- u32 regs[8];
-
- switch (reginfo->ofs) {
- case E1000_RXDCTL(0):
- for (n = 0; n < 2; n++)
- regs[n] = __er32(hw, E1000_RXDCTL(n));
- break;
- case E1000_TXDCTL(0):
- for (n = 0; n < 2; n++)
- regs[n] = __er32(hw, E1000_TXDCTL(n));
- break;
- case E1000_TARC(0):
- for (n = 0; n < 2; n++)
- regs[n] = __er32(hw, E1000_TARC(n));
- break;
- default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
- return;
- }
-
- snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 2; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
-}
-
-/*
- * e1000e_dump - Print registers, Tx-ring and Rx-ring
- */
-static void e1000e_dump(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_reg_info *reginfo;
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_tx_desc *tx_desc;
- struct my_u0 {
- u64 a;
- u64 b;
- } *u0;
- struct e1000_buffer *buffer_info;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- union e1000_rx_desc_packet_split *rx_desc_ps;
- struct e1000_rx_desc *rx_desc;
- struct my_u1 {
- u64 a;
- u64 b;
- u64 c;
- u64 d;
- } *u1;
- u32 staterr;
- int i = 0;
-
- if (!netif_msg_hw(adapter))
- return;
-
- /* Print netdevice Info */
- if (netdev) {
- dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
- }
-
- /* Print Registers */
- dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
- for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
- reginfo->name; reginfo++) {
- e1000_regdump(hw, reginfo);
- }
-
- /* Print Tx Ring Summary */
- if (!netdev || !netif_running(netdev))
- goto exit;
-
- dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
- buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
-
- /* Print Tx Ring */
- if (!netif_msg_tx_done(adapter))
- goto rx_ring_summary;
-
- dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
-
- /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
- *
- * Legacy Transmit Descriptor
- * +--------------------------------------------------------------+
- * 0 | Buffer Address [63:0] (Reserved on Write Back) |
- * +--------------------------------------------------------------+
- * 8 | Special | CSS | Status | CMD | CSO | Length |
- * +--------------------------------------------------------------+
- * 63 48 47 36 35 32 31 24 23 16 15 0
- *
- * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
- * 63 48 47 40 39 32 31 16 15 8 7 0
- * +----------------------------------------------------------------+
- * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
- * +----------------------------------------------------------------+
- * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
- * +----------------------------------------------------------------+
- * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
- *
- * Extended Data Descriptor (DTYP=0x1)
- * +----------------------------------------------------------------+
- * 0 | Buffer Address [63:0] |
- * +----------------------------------------------------------------+
- * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
- * +----------------------------------------------------------------+
- * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
- */
- printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
- printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
- printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = E1000_TX_DESC(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->length, buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp,
- buffer_info->skb);
- if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
- else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
-
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
- }
-
- /* Print Rx Ring Summary */
-rx_ring_summary:
- dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
- printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
-
- /* Print Rx Ring */
- if (!netif_msg_rx_status(adapter))
- goto exit;
-
- dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
- switch (adapter->rx_ps_pages) {
- case 1:
- case 2:
- case 3:
- /* [Extended] Packet Split Receive Descriptor Format
- *
- * +-----------------------------------------------------+
- * 0 | Buffer Address 0 [63:0] |
- * +-----------------------------------------------------+
- * 8 | Buffer Address 1 [63:0] |
- * +-----------------------------------------------------+
- * 16 | Buffer Address 2 [63:0] |
- * +-----------------------------------------------------+
- * 24 | Buffer Address 3 [63:0] |
- * +-----------------------------------------------------+
- */
- printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
- "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext Pkt Split format\n");
- /* [Extended] Receive Descriptor (Write-Back) Format
- *
- * 63 48 47 32 31 13 12 8 7 4 3 0
- * +------------------------------------------------------+
- * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
- * | Checksum | Ident | | Queue | | Type |
- * +------------------------------------------------------+
- * 8 | VLAN Tag | Length | Extended Error | Extended Status |
- * +------------------------------------------------------+
- * 63 48 47 32 31 20 19 0
- */
- printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
- "[ l3 l2 l1 hs] [reserved ] ---------------- "
- "[bi->skb] <-- Ext Rx Write-Back format\n");
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
- u1 = (struct my_u1 *)rx_desc_ps;
- staterr =
- le32_to_cpu(rx_desc_ps->wb.middle.status_error);
- if (staterr & E1000_RXD_STAT_DD) {
- /* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
- } else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
-
- if (netif_msg_pktdata(adapter))
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(buffer_info->dma),
- adapter->rx_ps_bsize0, true);
- }
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
- }
- break;
- default:
- case 0:
- /* Legacy Receive Descriptor Format
- *
- * +-----------------------------------------------------+
- * | Buffer Address [63:0] |
- * +-----------------------------------------------------+
- * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
- * +-----------------------------------------------------+
- * 63 48 47 40 39 32 31 16 15 0
- */
- printk(KERN_INFO "Rl[desc] [address 63:0 ] "
- "[vl er S cks ln] [bi->dma ] [bi->skb] "
- "<-- Legacy format\n");
- for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
- u0 = (struct my_u0 *)rx_desc;
- printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
- "%016llX %p", i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
-
- if (netif_msg_pktdata(adapter))
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1,
- phys_to_virt(buffer_info->dma),
- adapter->rx_buffer_len, true);
- }
- }
-
-exit:
- return;
-}
-
-/**
- * e1000_desc_unused - calculate if we have unused descriptors
- **/
-static int e1000_desc_unused(struct e1000_ring *ring)
-{
- if (ring->next_to_clean > ring->next_to_use)
- return ring->next_to_clean - ring->next_to_use - 1;
-
- return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
-/**
- * e1000_receive_skb - helper function to handle Rx indications
- * @adapter: board private structure
- * @status: descriptor status field as written by hardware
- * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
- * @skb: pointer to sk_buff to be indicated to stack
- **/
-static void e1000_receive_skb(struct e1000_adapter *adapter,
- struct net_device *netdev, struct sk_buff *skb,
- u8 status, __le16 vlan)
-{
- u16 tag = le16_to_cpu(vlan);
- skb->protocol = eth_type_trans(skb, netdev);
-
- if (status & E1000_RXD_STAT_VP)
- __vlan_hwaccel_put_tag(skb, tag);
-
- napi_gro_receive(&adapter->napi, skb);
-}
-
-/**
- * e1000_rx_checksum - Receive Checksum Offload
- * @adapter: board private structure
- * @status_err: receive descriptor status and error fields
- * @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
- **/
-static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- u32 csum, struct sk_buff *skb)
-{
- u16 status = (u16)status_err;
- u8 errors = (u8)(status_err >> 24);
-
- skb_checksum_none_assert(skb);
-
- /* Ignore Checksum bit is set */
- if (status & E1000_RXD_STAT_IXSM)
- return;
- /* TCP/UDP checksum error bit is set */
- if (errors & E1000_RXD_ERR_TCPE) {
- /* let the stack verify checksum errors */
- adapter->hw_csum_err++;
- return;
- }
-
- /* TCP/UDP Checksum has not been calculated */
- if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
- return;
-
- /* It must be a TCP or UDP packet with a valid checksum */
- if (status & E1000_RXD_STAT_TCPCS) {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- /*
- * IP fragment with UDP payload
- * Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)htons(csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- adapter->hw_csum_good++;
-}
-
-/**
- * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
- * @hw: pointer to the HW structure
- * @tail: address of tail descriptor register
- * @i: value to write to tail descriptor register
- *
- * When updating the tail register, the ME could be accessing Host CSR
- * registers at the same time. Normally, this is handled in h/w by an
- * arbiter but on some parts there is a bug that acknowledges Host accesses
- * later than it should which could result in the descriptor register to
- * have an incorrect value. Workaround this by checking the FWSM register
- * which has bit 24 set while ME is accessing Host CSR registers, wait
- * if it is set and try again a number of times.
- **/
-static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
- unsigned int i)
-{
- unsigned int j = 0;
-
- while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
- (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
- udelay(50);
-
- writel(i, tail);
-
- if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
- return E1000_ERR_SWFW_SYNC;
-
- return 0;
-}
-
-static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
-{
- u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
- struct e1000_hw *hw = &adapter->hw;
-
- if (e1000e_update_tail_wa(hw, tail, i)) {
- u32 rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- e_err("ME firmware caused invalid RDT - resetting\n");
- schedule_work(&adapter->reset_task);
- }
-}
-
-static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
-{
- u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
- struct e1000_hw *hw = &adapter->hw;
-
- if (e1000e_update_tail_wa(hw, tail, i)) {
- u32 tctl = er32(TCTL);
- ew32(TCTL, tctl & ~E1000_TCTL_EN);
- e_err("ME firmware caused invalid TDT - resetting\n");
- schedule_work(&adapter->reset_task);
- }
-}
-
-/**
- * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
- * @adapter: address of board private structure
- **/
-static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- int cleaned_count, gfp_t gfp)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = adapter->rx_buffer_len;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- while (cleaned_count--) {
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto map_skb;
- }
-
- skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
- if (!skb) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- buffer_info->skb = skb;
-map_skb:
- buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "Rx DMA map failed\n");
- adapter->rx_dma_failed++;
- break;
- }
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
-
- if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(adapter, i);
- else
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
- i++;
- if (i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
- rx_ring->next_to_use = i;
-}
-
-/**
- * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @adapter: address of board private structure
- **/
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
- int cleaned_count, gfp_t gfp)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- union e1000_rx_desc_packet_split *rx_desc;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_buffer *buffer_info;
- struct e1000_ps_page *ps_page;
- struct sk_buff *skb;
- unsigned int i, j;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- while (cleaned_count--) {
- rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
-
- for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- ps_page = &buffer_info->ps_pages[j];
- if (j >= adapter->rx_ps_pages) {
- /* all unused desc entries get hw null ptr */
- rx_desc->read.buffer_addr[j + 1] =
- ~cpu_to_le64(0);
- continue;
- }
- if (!ps_page->page) {
- ps_page->page = alloc_page(gfp);
- if (!ps_page->page) {
- adapter->alloc_rx_buff_failed++;
- goto no_buffers;
- }
- ps_page->dma = dma_map_page(&pdev->dev,
- ps_page->page,
- 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev,
- ps_page->dma)) {
- dev_err(&adapter->pdev->dev,
- "Rx DMA page map failed\n");
- adapter->rx_dma_failed++;
- goto no_buffers;
- }
- }
- /*
- * Refresh the desc even if buffer_addrs
- * didn't change because each write-back
- * erases this info.
- */
- rx_desc->read.buffer_addr[j + 1] =
- cpu_to_le64(ps_page->dma);
- }
-
- skb = __netdev_alloc_skb_ip_align(netdev,
- adapter->rx_ps_bsize0,
- gfp);
-
- if (!skb) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- buffer_info->skb = skb;
- buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
- adapter->rx_ps_bsize0,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "Rx DMA map failed\n");
- adapter->rx_dma_failed++;
- /* cleanup skb */
- dev_kfree_skb_any(skb);
- buffer_info->skb = NULL;
- break;
- }
-
- rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
-
- if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(adapter, i << 1);
- else
- writel(i << 1,
- adapter->hw.hw_addr + rx_ring->tail);
- }
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
-no_buffers:
- rx_ring->next_to_use = i;
-}
-
-/**
- * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
- * @adapter: address of board private structure
- * @cleaned_count: number of buffers to allocate this pass
- **/
-
-static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- int cleaned_count, gfp_t gfp)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_rx_desc *rx_desc;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = 256 - 16 /* for skb_reserve */;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- while (cleaned_count--) {
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto check_page;
- }
-
- skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- buffer_info->skb = skb;
-check_page:
- /* allocate a new page if necessary */
- if (!buffer_info->page) {
- buffer_info->page = alloc_page(gfp);
- if (unlikely(!buffer_info->page)) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
- }
-
- if (!buffer_info->dma)
- buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
-
- if (unlikely(++i == rx_ring->count))
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
- if (likely(rx_ring->next_to_use != i)) {
- rx_ring->next_to_use = i;
- if (unlikely(i-- == 0))
- i = (rx_ring->count - 1);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(adapter, i);
- else
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
-}
-
-/**
- * e1000_clean_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc, *next_rxd;
- struct e1000_buffer *buffer_info, *next_buffer;
- u32 length;
- unsigned int i;
- int cleaned_count = 0;
- bool cleaned = 0;
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-
- i = rx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (rx_desc->status & E1000_RXD_STAT_DD) {
- struct sk_buff *skb;
- u8 status;
-
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
-
- status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
-
- prefetch(skb->data - NET_IP_ALIGN);
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = E1000_RX_DESC(*rx_ring, i);
- prefetch(next_rxd);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = 1;
- cleaned_count++;
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
-
- length = le16_to_cpu(rx_desc->length);
-
- /*
- * !EOP means multiple descriptors were used to store a single
- * packet, if that's the case we need to toss it. In fact, we
- * need to toss every packet with the EOP bit clear and the
- * next frame that _does_ have the EOP bit set, as it is by
- * definition only a frame fragment
- */
- if (unlikely(!(status & E1000_RXD_STAT_EOP)))
- adapter->flags2 |= FLAG2_IS_DISCARDING;
-
- if (adapter->flags2 & FLAG2_IS_DISCARDING) {
- /* All receives must fit into a single buffer */
- e_dbg("Receive packet consumed multiple buffers\n");
- /* recycle */
- buffer_info->skb = skb;
- if (status & E1000_RXD_STAT_EOP)
- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
- goto next_desc;
- }
-
- if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
- /* recycle */
- buffer_info->skb = skb;
- goto next_desc;
- }
-
- /* adjust length to remove Ethernet CRC */
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
- length -= 4;
-
- total_rx_bytes += length;
- total_rx_packets++;
-
- /*
- * code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack
- */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- /* else just continue with the old one */
- }
- /* end copybreak code */
- skb_put(skb, length);
-
- /* Receive Checksum Offload */
- e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
-
- e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
-
-next_desc:
- rx_desc->status = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count,
- GFP_ATOMIC);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
- }
- rx_ring->next_to_clean = i;
-
- cleaned_count = e1000_desc_unused(rx_ring);
- if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
-
- adapter->total_rx_bytes += total_rx_bytes;
- adapter->total_rx_packets += total_rx_packets;
- return cleaned;
-}
-
-static void e1000_put_txbuf(struct e1000_adapter *adapter,
- struct e1000_buffer *buffer_info)
-{
- if (buffer_info->dma) {
- if (buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_TO_DEVICE);
- else
- dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_TO_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- buffer_info->time_stamp = 0;
-}
-
-static void e1000_print_hw_hang(struct work_struct *work)
-{
- struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter,
- print_hang_task);
- struct e1000_ring *tx_ring = adapter->tx_ring;
- unsigned int i = tx_ring->next_to_clean;
- unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
- struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
- struct e1000_hw *hw = &adapter->hw;
- u16 phy_status, phy_1000t_status, phy_ext_status;
- u16 pci_status;
-
- if (test_bit(__E1000_DOWN, &adapter->state))
- return;
-
- e1e_rphy(hw, PHY_STATUS, &phy_status);
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
- e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
-
- pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
-
- /* detected Hardware unit hang */
- e_err("Detected Hardware Unit Hang:\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]:\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n"
- "MAC Status <%x>\n"
- "PHY Status <%x>\n"
- "PHY 1000BASE-T Status <%x>\n"
- "PHY Extended Status <%x>\n"
- "PCI Status <%x>\n",
- readl(adapter->hw.hw_addr + tx_ring->head),
- readl(adapter->hw.hw_addr + tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->upper.fields.status,
- er32(STATUS),
- phy_status,
- phy_1000t_status,
- phy_ext_status,
- pci_status);
-}
-
-/**
- * e1000_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_tx_desc *tx_desc, *eop_desc;
- struct e1000_buffer *buffer_info;
- unsigned int i, eop;
- unsigned int count = 0;
- unsigned int total_tx_bytes = 0, total_tx_packets = 0;
-
- i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = E1000_TX_DESC(*tx_ring, eop);
-
- while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
- for (; !cleaned; count++) {
- tx_desc = E1000_TX_DESC(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
-
- if (cleaned) {
- total_tx_packets += buffer_info->segs;
- total_tx_bytes += buffer_info->bytecount;
- }
-
- e1000_put_txbuf(adapter, buffer_info);
- tx_desc->upper.data = 0;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
-
- if (i == tx_ring->next_to_use)
- break;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = E1000_TX_DESC(*tx_ring, eop);
- }
-
- tx_ring->next_to_clean = i;
-
-#define TX_WAKE_THRESHOLD 32
- if (count && netif_carrier_ok(netdev) &&
- e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean.
- */
- smp_mb();
-
- if (netif_queue_stopped(netdev) &&
- !(test_bit(__E1000_DOWN, &adapter->state))) {
- netif_wake_queue(netdev);
- ++adapter->restart_queue;
- }
- }
-
- if (adapter->detect_tx_hung) {
- /*
- * Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i
- */
- adapter->detect_tx_hung = 0;
- if (tx_ring->buffer_info[i].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp
- + (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
- schedule_work(&adapter->print_hang_task);
- netif_stop_queue(netdev);
- }
- }
- adapter->total_tx_bytes += total_tx_bytes;
- adapter->total_tx_packets += total_tx_packets;
- return count < tx_ring->count;
-}
-
-/**
- * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
-{
- struct e1000_hw *hw = &adapter->hw;
- union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_buffer *buffer_info, *next_buffer;
- struct e1000_ps_page *ps_page;
- struct sk_buff *skb;
- unsigned int i, j;
- u32 length, staterr;
- int cleaned_count = 0;
- bool cleaned = 0;
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-
- i = rx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
- staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (staterr & E1000_RXD_STAT_DD) {
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
- skb = buffer_info->skb;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
-
- /* in the packet split case this is header only */
- prefetch(skb->data - NET_IP_ALIGN);
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
- prefetch(next_rxd);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = 1;
- cleaned_count++;
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
- buffer_info->dma = 0;
-
- /* see !EOP comment in other Rx routine */
- if (!(staterr & E1000_RXD_STAT_EOP))
- adapter->flags2 |= FLAG2_IS_DISCARDING;
-
- if (adapter->flags2 & FLAG2_IS_DISCARDING) {
- e_dbg("Packet Split buffers didn't pick up the full "
- "packet\n");
- dev_kfree_skb_irq(skb);
- if (staterr & E1000_RXD_STAT_EOP)
- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
- goto next_desc;
- }
-
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
- goto next_desc;
- }
-
- length = le16_to_cpu(rx_desc->wb.middle.length0);
-
- if (!length) {
- e_dbg("Last part of the packet spanning multiple "
- "descriptors\n");
- dev_kfree_skb_irq(skb);
- goto next_desc;
- }
-
- /* Good Receive */
- skb_put(skb, length);
-
- {
- /*
- * this looks ugly, but it seems compiler issues make it
- * more efficient than reusing j
- */
- int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
-
- /*
- * page alloc/put takes too long and effects small packet
- * throughput, so unsplit small packets and save the alloc/put
- * only valid in softirq (napi) context to call kmap_*
- */
- if (l1 && (l1 <= copybreak) &&
- ((length + l1) <= adapter->rx_ps_bsize0)) {
- u8 *vaddr;
-
- ps_page = &buffer_info->ps_pages[0];
-
- /*
- * there is no documentation about how to call
- * kmap_atomic, so we can't hold the mapping
- * very long
- */
- dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
- memcpy(skb_tail_pointer(skb), vaddr, l1);
- kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- dma_sync_single_for_device(&pdev->dev, ps_page->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- /* remove the CRC */
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
- l1 -= 4;
-
- skb_put(skb, l1);
- goto copydone;
- } /* if */
- }
-
- for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- length = le16_to_cpu(rx_desc->wb.upper.length[j]);
- if (!length)
- break;
-
- ps_page = &buffer_info->ps_pages[j];
- dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
- ps_page->dma = 0;
- skb_fill_page_desc(skb, j, ps_page->page, 0, length);
- ps_page->page = NULL;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- }
-
- /* strip the ethernet crc, problem is we're using pages now so
- * this whole operation can get a little cpu intensive
- */
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
- pskb_trim(skb, skb->len - 4);
-
-copydone:
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- e1000_rx_checksum(adapter, staterr, le16_to_cpu(
- rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
-
- if (rx_desc->wb.upper.header_status &
- cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
- adapter->rx_hdr_split++;
-
- e1000_receive_skb(adapter, netdev, skb,
- staterr, rx_desc->wb.middle.vlan);
-
-next_desc:
- rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
- buffer_info->skb = NULL;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count,
- GFP_ATOMIC);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
-
- staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
- }
- rx_ring->next_to_clean = i;
-
- cleaned_count = e1000_desc_unused(rx_ring);
- if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
-
- adapter->total_rx_bytes += total_rx_bytes;
- adapter->total_rx_packets += total_rx_packets;
- return cleaned;
-}
-
-/**
- * e1000_consume_page - helper function
- **/
-static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
-{
- bi->page = NULL;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
-}
-
-/**
- * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-
-static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc, *next_rxd;
- struct e1000_buffer *buffer_info, *next_buffer;
- u32 length;
- unsigned int i;
- int cleaned_count = 0;
- bool cleaned = false;
- unsigned int total_rx_bytes=0, total_rx_packets=0;
-
- i = rx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (rx_desc->status & E1000_RXD_STAT_DD) {
- struct sk_buff *skb;
- u8 status;
-
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
-
- status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
-
- ++i;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = E1000_RX_DESC(*rx_ring, i);
- prefetch(next_rxd);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = true;
- cleaned_count++;
- dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
-
- length = le16_to_cpu(rx_desc->length);
-
- /* errors is only valid for DD + EOP descriptors */
- if (unlikely((status & E1000_RXD_STAT_EOP) &&
- (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
- /* recycle both page and skb */
- buffer_info->skb = skb;
- /* an error means any chain goes out the window
- * too */
- if (rx_ring->rx_skb_top)
- dev_kfree_skb_irq(rx_ring->rx_skb_top);
- rx_ring->rx_skb_top = NULL;
- goto next_desc;
- }
-
-#define rxtop (rx_ring->rx_skb_top)
- if (!(status & E1000_RXD_STAT_EOP)) {
- /* this descriptor is only the beginning (or middle) */
- if (!rxtop) {
- /* this is the beginning of a chain */
- rxtop = skb;
- skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
- } else {
- /* this is the middle of a chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
- /* re-use the skb, only consumed the page */
- buffer_info->skb = skb;
- }
- e1000_consume_page(buffer_info, rxtop, length);
- goto next_desc;
- } else {
- if (rxtop) {
- /* end of the chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
- /* re-use the current skb, we only consumed the
- * page */
- buffer_info->skb = skb;
- skb = rxtop;
- rxtop = NULL;
- e1000_consume_page(buffer_info, skb, length);
- } else {
- /* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
- if (length <= copybreak &&
- skb_tailroom(skb) >= length) {
- u8 *vaddr;
- vaddr = kmap_atomic(buffer_info->page,
- KM_SKB_DATA_SOFTIRQ);
- memcpy(skb_tail_pointer(skb), vaddr,
- length);
- kunmap_atomic(vaddr,
- KM_SKB_DATA_SOFTIRQ);
- /* re-use the page, so don't erase
- * buffer_info->page */
- skb_put(skb, length);
- } else {
- skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
- e1000_consume_page(buffer_info, skb,
- length);
- }
- }
- }
-
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- /* eth type trans needs skb->data to point to something */
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- e_err("pskb_may_pull failed.\n");
- dev_kfree_skb_irq(skb);
- goto next_desc;
- }
-
- e1000_receive_skb(adapter, netdev, skb, status,
- rx_desc->special);
-
-next_desc:
- rx_desc->status = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
- adapter->alloc_rx_buf(adapter, cleaned_count,
- GFP_ATOMIC);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
- }
- rx_ring->next_to_clean = i;
-
- cleaned_count = e1000_desc_unused(rx_ring);
- if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
-
- adapter->total_rx_bytes += total_rx_bytes;
- adapter->total_rx_packets += total_rx_packets;
- return cleaned;
-}
-
-/**
- * e1000_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
- **/
-static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
-{
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_buffer *buffer_info;
- struct e1000_ps_page *ps_page;
- struct pci_dev *pdev = adapter->pdev;
- unsigned int i, j;
-
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->dma) {
- if (adapter->clean_rx == e1000_clean_rx_irq)
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- dma_unmap_page(&pdev->dev, buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_bsize0,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
-
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
-
- for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- ps_page = &buffer_info->ps_pages[j];
- if (!ps_page->page)
- break;
- dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
- ps_page->dma = 0;
- put_page(ps_page->page);
- ps_page->page = NULL;
- }
- }
-
- /* there also may be some cached data from a chained receive */
- if (rx_ring->rx_skb_top) {
- dev_kfree_skb(rx_ring->rx_skb_top);
- rx_ring->rx_skb_top = NULL;
- }
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
-}
-
-static void e1000e_downshift_workaround(struct work_struct *work)
-{
- struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, downshift_task);
-
- if (test_bit(__E1000_DOWN, &adapter->state))
- return;
-
- e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
-}
-
-/**
- * e1000_intr_msi - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t e1000_intr_msi(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 icr = er32(ICR);
-
- /*
- * read ICR disables interrupts using IAM
- */
-
- if (icr & E1000_ICR_LSC) {
- hw->mac.get_link_status = 1;
- /*
- * ICH8 workaround-- Call gig speed drop workaround on cable
- * disconnect (LSC) before accessing any PHY registers
- */
- if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
- (!(er32(STATUS) & E1000_STATUS_LU)))
- schedule_work(&adapter->downshift_task);
-
- /*
- * 80003ES2LAN workaround-- For packet buffer work-around on
- * link down event; disable receives here in the ISR and reset
- * adapter in watchdog
- */
- if (netif_carrier_ok(netdev) &&
- adapter->flags & FLAG_RX_NEEDS_RESTART) {
- /* disable receives */
- u32 rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
- }
- /* guard against interrupt when we're going down */
- if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
- if (napi_schedule_prep(&adapter->napi)) {
- adapter->total_tx_bytes = 0;
- adapter->total_tx_packets = 0;
- adapter->total_rx_bytes = 0;
- adapter->total_rx_packets = 0;
- __napi_schedule(&adapter->napi);
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * e1000_intr - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t e1000_intr(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl, icr = er32(ICR);
-
- if (!icr || test_bit(__E1000_DOWN, &adapter->state))
- return IRQ_NONE; /* Not our interrupt */
-
- /*
- * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
- * not set, then the adapter didn't send an interrupt
- */
- if (!(icr & E1000_ICR_INT_ASSERTED))
- return IRQ_NONE;
-
- /*
- * Interrupt Auto-Mask...upon reading ICR,
- * interrupts are masked. No need for the
- * IMC write
- */
-
- if (icr & E1000_ICR_LSC) {
- hw->mac.get_link_status = 1;
- /*
- * ICH8 workaround-- Call gig speed drop workaround on cable
- * disconnect (LSC) before accessing any PHY registers
- */
- if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
- (!(er32(STATUS) & E1000_STATUS_LU)))
- schedule_work(&adapter->downshift_task);
-
- /*
- * 80003ES2LAN workaround--
- * For packet buffer work-around on link down event;
- * disable receives here in the ISR and
- * reset adapter in watchdog
- */
- if (netif_carrier_ok(netdev) &&
- (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
- /* disable receives */
- rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
- }
- /* guard against interrupt when we're going down */
- if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
- if (napi_schedule_prep(&adapter->napi)) {
- adapter->total_tx_bytes = 0;
- adapter->total_tx_packets = 0;
- adapter->total_rx_bytes = 0;
- adapter->total_rx_packets = 0;
- __napi_schedule(&adapter->napi);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t e1000_msix_other(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 icr = er32(ICR);
-
- if (!(icr & E1000_ICR_INT_ASSERTED)) {
- if (!test_bit(__E1000_DOWN, &adapter->state))
- ew32(IMS, E1000_IMS_OTHER);
- return IRQ_NONE;
- }
-
- if (icr & adapter->eiac_mask)
- ew32(ICS, (icr & adapter->eiac_mask));
-
- if (icr & E1000_ICR_OTHER) {
- if (!(icr & E1000_ICR_LSC))
- goto no_link_interrupt;
- hw->mac.get_link_status = 1;
- /* guard against interrupt when we're going down */
- if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
-no_link_interrupt:
- if (!test_bit(__E1000_DOWN, &adapter->state))
- ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
-
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *tx_ring = adapter->tx_ring;
-
-
- adapter->total_tx_bytes = 0;
- adapter->total_tx_packets = 0;
-
- if (!e1000_clean_tx_irq(adapter))
- /* Ring was not completely cleaned, so fire another interrupt */
- ew32(ICS, tx_ring->ims_val);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- /* Write the ITR value calculated at the end of the
- * previous interrupt.
- */
- if (adapter->rx_ring->set_itr) {
- writel(1000000000 / (adapter->rx_ring->itr_val * 256),
- adapter->hw.hw_addr + adapter->rx_ring->itr_register);
- adapter->rx_ring->set_itr = 0;
- }
-
- if (napi_schedule_prep(&adapter->napi)) {
- adapter->total_rx_bytes = 0;
- adapter->total_rx_packets = 0;
- __napi_schedule(&adapter->napi);
- }
- return IRQ_HANDLED;
-}
-
-/**
- * e1000_configure_msix - Configure MSI-X hardware
- *
- * e1000_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
- **/
-static void e1000_configure_msix(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_ring *tx_ring = adapter->tx_ring;
- int vector = 0;
- u32 ctrl_ext, ivar = 0;
-
- adapter->eiac_mask = 0;
-
- /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
- if (hw->mac.type == e1000_82574) {
- u32 rfctl = er32(RFCTL);
- rfctl |= E1000_RFCTL_ACK_DIS;
- ew32(RFCTL, rfctl);
- }
-
-#define E1000_IVAR_INT_ALLOC_VALID 0x8
- /* Configure Rx vector */
- rx_ring->ims_val = E1000_IMS_RXQ0;
- adapter->eiac_mask |= rx_ring->ims_val;
- if (rx_ring->itr_val)
- writel(1000000000 / (rx_ring->itr_val * 256),
- hw->hw_addr + rx_ring->itr_register);
- else
- writel(1, hw->hw_addr + rx_ring->itr_register);
- ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
-
- /* Configure Tx vector */
- tx_ring->ims_val = E1000_IMS_TXQ0;
- vector++;
- if (tx_ring->itr_val)
- writel(1000000000 / (tx_ring->itr_val * 256),
- hw->hw_addr + tx_ring->itr_register);
- else
- writel(1, hw->hw_addr + tx_ring->itr_register);
- adapter->eiac_mask |= tx_ring->ims_val;
- ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
-
- /* set vector for Other Causes, e.g. link changes */
- vector++;
- ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
- if (rx_ring->itr_val)
- writel(1000000000 / (rx_ring->itr_val * 256),
- hw->hw_addr + E1000_EITR_82574(vector));
- else
- writel(1, hw->hw_addr + E1000_EITR_82574(vector));
-
- /* Cause Tx interrupts on every write back */
- ivar |= (1 << 31);
-
- ew32(IVAR, ivar);
-
- /* enable MSI-X PBA support */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
-
- /* Auto-Mask Other interrupts upon ICR read */
-#define E1000_EIAC_MASK_82574 0x01F00000
- ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
- ctrl_ext |= E1000_CTRL_EXT_EIAME;
- ew32(CTRL_EXT, ctrl_ext);
- e1e_flush();
-}
-
-void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & FLAG_MSI_ENABLED) {
- pci_disable_msi(adapter->pdev);
- adapter->flags &= ~FLAG_MSI_ENABLED;
- }
-}
-
-/**
- * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
-{
- int err;
- int i;
-
- switch (adapter->int_mode) {
- case E1000E_INT_MODE_MSIX:
- if (adapter->flags & FLAG_HAS_MSIX) {
- adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
- adapter->msix_entries = kcalloc(adapter->num_vectors,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (adapter->msix_entries) {
- for (i = 0; i < adapter->num_vectors; i++)
- adapter->msix_entries[i].entry = i;
-
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- adapter->num_vectors);
- if (err == 0)
- return;
- }
- /* MSI-X failed, so fall through and try MSI */
- e_err("Failed to initialize MSI-X interrupts. "
- "Falling back to MSI interrupts.\n");
- e1000e_reset_interrupt_capability(adapter);
- }
- adapter->int_mode = E1000E_INT_MODE_MSI;
- /* Fall through */
- case E1000E_INT_MODE_MSI:
- if (!pci_enable_msi(adapter->pdev)) {
- adapter->flags |= FLAG_MSI_ENABLED;
- } else {
- adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e_err("Failed to initialize MSI interrupts. Falling "
- "back to legacy interrupts.\n");
- }
- /* Fall through */
- case E1000E_INT_MODE_LEGACY:
- /* Don't do anything; this is the system default */
- break;
- }
-
- /* store the number of vectors being used */
- adapter->num_vectors = 1;
-}
-
-/**
- * e1000_request_msix - Initialize MSI-X interrupts
- *
- * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
- * kernel.
- **/
-static int e1000_request_msix(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int err = 0, vector = 0;
-
- if (strlen(netdev->name) < (IFNAMSIZ - 5))
- snprintf(adapter->rx_ring->name,
- sizeof(adapter->rx_ring->name) - 1,
- "%s-rx-0", netdev->name);
- else
- memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
- err = request_irq(adapter->msix_entries[vector].vector,
- e1000_intr_msix_rx, 0, adapter->rx_ring->name,
- netdev);
- if (err)
- goto out;
- adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
- adapter->rx_ring->itr_val = adapter->itr;
- vector++;
-
- if (strlen(netdev->name) < (IFNAMSIZ - 5))
- snprintf(adapter->tx_ring->name,
- sizeof(adapter->tx_ring->name) - 1,
- "%s-tx-0", netdev->name);
- else
- memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
- err = request_irq(adapter->msix_entries[vector].vector,
- e1000_intr_msix_tx, 0, adapter->tx_ring->name,
- netdev);
- if (err)
- goto out;
- adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
- adapter->tx_ring->itr_val = adapter->itr;
- vector++;
-
- err = request_irq(adapter->msix_entries[vector].vector,
- e1000_msix_other, 0, netdev->name, netdev);
- if (err)
- goto out;
-
- e1000_configure_msix(adapter);
- return 0;
-out:
- return err;
-}
-
-/**
- * e1000_request_irq - initialize interrupts
- *
- * Attempts to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int e1000_request_irq(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int err;
-
- if (adapter->msix_entries) {
- err = e1000_request_msix(adapter);
- if (!err)
- return err;
- /* fall back to MSI */
- e1000e_reset_interrupt_capability(adapter);
- adapter->int_mode = E1000E_INT_MODE_MSI;
- e1000e_set_interrupt_capability(adapter);
- }
- if (adapter->flags & FLAG_MSI_ENABLED) {
- err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
- netdev->name, netdev);
- if (!err)
- return err;
-
- /* fall back to legacy interrupt */
- e1000e_reset_interrupt_capability(adapter);
- adapter->int_mode = E1000E_INT_MODE_LEGACY;
- }
-
- err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
- netdev->name, netdev);
- if (err)
- e_err("Unable to allocate interrupt, Error: %d\n", err);
-
- return err;
-}
-
-static void e1000_free_irq(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- if (adapter->msix_entries) {
- int vector = 0;
-
- free_irq(adapter->msix_entries[vector].vector, netdev);
- vector++;
-
- free_irq(adapter->msix_entries[vector].vector, netdev);
- vector++;
-
- /* Other Causes interrupt vector */
- free_irq(adapter->msix_entries[vector].vector, netdev);
- return;
- }
-
- free_irq(adapter->pdev->irq, netdev);
-}
-
-/**
- * e1000_irq_disable - Mask off interrupt generation on the NIC
- **/
-static void e1000_irq_disable(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- ew32(IMC, ~0);
- if (adapter->msix_entries)
- ew32(EIAC_82574, 0);
- e1e_flush();
-
- if (adapter->msix_entries) {
- int i;
- for (i = 0; i < adapter->num_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
- } else {
- synchronize_irq(adapter->pdev->irq);
- }
-}
-
-/**
- * e1000_irq_enable - Enable default interrupt generation settings
- **/
-static void e1000_irq_enable(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
- ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
- } else {
- ew32(IMS, IMS_ENABLE_MASK);
- }
- e1e_flush();
-}
-
-/**
- * e1000e_get_hw_control - get control of the h/w from f/w
- * @adapter: address of board private structure
- *
- * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the network i/f is open.
- **/
-void e1000e_get_hw_control(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_ext;
- u32 swsm;
-
- /* Let firmware know the driver has taken over */
- if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
- swsm = er32(SWSM);
- ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
- } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
- ctrl_ext = er32(CTRL_EXT);
- ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
- }
-}
-
-/**
- * e1000e_release_hw_control - release control of the h/w to f/w
- * @adapter: address of board private structure
- *
- * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the network i/f is closed.
- *
- **/
-void e1000e_release_hw_control(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_ext;
- u32 swsm;
-
- /* Let firmware taken over control of h/w */
- if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
- swsm = er32(SWSM);
- ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
- } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
- ctrl_ext = er32(CTRL_EXT);
- ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
- }
-}
-
-/**
- * @e1000_alloc_ring - allocate memory for a ring structure
- **/
-static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
- struct e1000_ring *ring)
-{
- struct pci_dev *pdev = adapter->pdev;
-
- ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
- GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
-{
- struct e1000_ring *tx_ring = adapter->tx_ring;
- int err = -ENOMEM, size;
-
- size = sizeof(struct e1000_buffer) * tx_ring->count;
- tx_ring->buffer_info = vzalloc(size);
- if (!tx_ring->buffer_info)
- goto err;
-
- /* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
-
- err = e1000_alloc_ring_dma(adapter, tx_ring);
- if (err)
- goto err;
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- return 0;
-err:
- vfree(tx_ring->buffer_info);
- e_err("Unable to allocate memory for the transmit descriptor ring\n");
- return err;
-}
-
-/**
- * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
-{
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_buffer *buffer_info;
- int i, size, desc_len, err = -ENOMEM;
-
- size = sizeof(struct e1000_buffer) * rx_ring->count;
- rx_ring->buffer_info = vzalloc(size);
- if (!rx_ring->buffer_info)
- goto err;
-
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
- sizeof(struct e1000_ps_page),
- GFP_KERNEL);
- if (!buffer_info->ps_pages)
- goto err_pages;
- }
-
- desc_len = sizeof(union e1000_rx_desc_packet_split);
-
- /* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * desc_len;
- rx_ring->size = ALIGN(rx_ring->size, 4096);
-
- err = e1000_alloc_ring_dma(adapter, rx_ring);
- if (err)
- goto err_pages;
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
- rx_ring->rx_skb_top = NULL;
-
- return 0;
-
-err_pages:
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- kfree(buffer_info->ps_pages);
- }
-err:
- vfree(rx_ring->buffer_info);
- e_err("Unable to allocate memory for the receive descriptor ring\n");
- return err;
-}
-
-/**
- * e1000_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
- **/
-static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
-{
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_buffer *buffer_info;
- unsigned long size;
- unsigned int i;
-
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);
- }
-
- size = sizeof(struct e1000_buffer) * tx_ring->count;
- memset(tx_ring->buffer_info, 0, size);
-
- memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
-}
-
-/**
- * e1000e_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
- *
- * Free all transmit software resources
- **/
-void e1000e_free_tx_resources(struct e1000_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *tx_ring = adapter->tx_ring;
-
- e1000_clean_tx_ring(adapter);
-
- vfree(tx_ring->buffer_info);
- tx_ring->buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
- tx_ring->desc = NULL;
-}
-
-/**
- * e1000e_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
- *
- * Free all receive software resources
- **/
-
-void e1000e_free_rx_resources(struct e1000_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- int i;
-
- e1000_clean_rx_ring(adapter);
-
- for (i = 0; i < rx_ring->count; i++)
- kfree(rx_ring->buffer_info[i].ps_pages);
-
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
- rx_ring->desc = NULL;
-}
-
-/**
- * e1000_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput. This functionality is controlled
- * by the InterruptThrottleRate module parameter.
- **/
-static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
- u16 itr_setting, int packets,
- int bytes)
-{
- unsigned int retval = itr_setting;
-
- if (packets == 0)
- goto update_itr_done;
-
- switch (itr_setting) {
- case lowest_latency:
- /* handle TSO and jumbo frames */
- if (bytes/packets > 8000)
- retval = bulk_latency;
- else if ((packets < 5) && (bytes > 512))
- retval = low_latency;
- break;
- case low_latency: /* 50 usec aka 20000 ints/s */
- if (bytes > 10000) {
- /* this if handles the TSO accounting */
- if (bytes/packets > 8000)
- retval = bulk_latency;
- else if ((packets < 10) || ((bytes/packets) > 1200))
- retval = bulk_latency;
- else if ((packets > 35))
- retval = lowest_latency;
- } else if (bytes/packets > 2000) {
- retval = bulk_latency;
- } else if (packets <= 2 && bytes < 512) {
- retval = lowest_latency;
- }
- break;
- case bulk_latency: /* 250 usec aka 4000 ints/s */
- if (bytes > 25000) {
- if (packets > 35)
- retval = low_latency;
- } else if (bytes < 6000) {
- retval = low_latency;
- }
- break;
- }
-
-update_itr_done:
- return retval;
-}
-
-static void e1000_set_itr(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 current_itr;
- u32 new_itr = adapter->itr;
-
- /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- if (adapter->link_speed != SPEED_1000) {
- current_itr = 0;
- new_itr = 4000;
- goto set_itr_now;
- }
-
- if (adapter->flags2 & FLAG2_DISABLE_AIM) {
- new_itr = 0;
- goto set_itr_now;
- }
-
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
- adapter->tx_itr = low_latency;
-
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
- adapter->rx_itr = low_latency;
-
- current_itr = max(adapter->rx_itr, adapter->tx_itr);
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = 70000;
- break;
- case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
- break;
- case bulk_latency:
- new_itr = 4000;
- break;
- default:
- break;
- }
-
-set_itr_now:
- if (new_itr != adapter->itr) {
- /*
- * this attempts to bias the interrupt rate towards Bulk
- * by adding intermediate steps when interrupt rate is
- * increasing
- */
- new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
- adapter->itr = new_itr;
- adapter->rx_ring->itr_val = new_itr;
- if (adapter->msix_entries)
- adapter->rx_ring->set_itr = 1;
- else
- if (new_itr)
- ew32(ITR, 1000000000 / (new_itr * 256));
- else
- ew32(ITR, 0);
- }
-}
-
-/**
- * e1000_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- **/
-static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
-{
- adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err;
-
- adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err;
-
- return 0;
-err:
- e_err("Unable to allocate memory for queues\n");
- kfree(adapter->rx_ring);
- kfree(adapter->tx_ring);
- return -ENOMEM;
-}
-
-/**
- * e1000_clean - NAPI Rx polling callback
- * @napi: struct associated with this polling callback
- * @budget: amount of packets driver is allowed to process this poll
- **/
-static int e1000_clean(struct napi_struct *napi, int budget)
-{
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *poll_dev = adapter->netdev;
- int tx_cleaned = 1, work_done = 0;
-
- adapter = netdev_priv(poll_dev);
-
- if (adapter->msix_entries &&
- !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
- goto clean_rx;
-
- tx_cleaned = e1000_clean_tx_irq(adapter);
-
-clean_rx:
- adapter->clean_rx(adapter, &work_done, budget);
-
- if (!tx_cleaned)
- work_done = budget;
-
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- if (adapter->itr_setting & 3)
- e1000_set_itr(adapter);
- napi_complete(napi);
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
- ew32(IMS, adapter->rx_ring->ims_val);
- else
- e1000_irq_enable(adapter);
- }
- }
-
- return work_done;
-}
-
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 vfta, index;
-
- /* don't update vlan cookie if already programmed */
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
- (vid == adapter->mng_vlan_id))
- return;
-
- /* add VID to filter table */
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
- index = (vid >> 5) & 0x7F;
- vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta |= (1 << (vid & 0x1F));
- hw->mac.ops.write_vfta(hw, index, vfta);
- }
-
- set_bit(vid, adapter->active_vlans);
-}
-
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 vfta, index;
-
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
- (vid == adapter->mng_vlan_id)) {
- /* release control to f/w */
- e1000e_release_hw_control(adapter);
- return;
- }
-
- /* remove VID from filter table */
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
- index = (vid >> 5) & 0x7F;
- vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
- hw->mac.ops.write_vfta(hw, index, vfta);
- }
-
- clear_bit(vid, adapter->active_vlans);
-}
-
-/**
- * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
- * @adapter: board private structure to initialize
- **/
-static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
- /* disable VLAN receive filtering */
- rctl = er32(RCTL);
- rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
- ew32(RCTL, rctl);
-
- if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
- }
-}
-
-/**
- * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
- * @adapter: board private structure to initialize
- **/
-static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
- /* enable VLAN receive filtering */
- rctl = er32(RCTL);
- rctl |= E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_CFIEN;
- ew32(RCTL, rctl);
- }
-}
-
-/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
- * @adapter: board private structure to initialize
- **/
-static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl;
-
- /* disable VLAN tag insert/strip */
- ctrl = er32(CTRL);
- ctrl &= ~E1000_CTRL_VME;
- ew32(CTRL, ctrl);
-}
-
-/**
- * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
- * @adapter: board private structure to initialize
- **/
-static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl;
-
- /* enable VLAN tag insert/strip */
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_VME;
- ew32(CTRL, ctrl);
-}
-
-static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- u16 vid = adapter->hw.mng_cookie.vlan_id;
- u16 old_vid = adapter->mng_vlan_id;
-
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
- e1000_vlan_rx_add_vid(netdev, vid);
- adapter->mng_vlan_id = vid;
- }
-
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
- e1000_vlan_rx_kill_vid(netdev, old_vid);
-}
-
-static void e1000_restore_vlan(struct e1000_adapter *adapter)
-{
- u16 vid;
-
- e1000_vlan_rx_add_vid(adapter->netdev, 0);
-
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
-}
-
-static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 manc, manc2h, mdef, i, j;
-
- if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
- return;
-
- manc = er32(MANC);
-
- /*
- * enable receiving management packets to the host. this will probably
- * generate destination unreachable messages from the host OS, but
- * the packets will be handled on SMBUS
- */
- manc |= E1000_MANC_EN_MNG2HOST;
- manc2h = er32(MANC2H);
-
- switch (hw->mac.type) {
- default:
- manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
- break;
- case e1000_82574:
- case e1000_82583:
- /*
- * Check if IPMI pass-through decision filter already exists;
- * if so, enable it.
- */
- for (i = 0, j = 0; i < 8; i++) {
- mdef = er32(MDEF(i));
-
- /* Ignore filters with anything other than IPMI ports */
- if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
- continue;
-
- /* Enable this decision filter in MANC2H */
- if (mdef)
- manc2h |= (1 << i);
-
- j |= mdef;
- }
-
- if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
- break;
-
- /* Create new decision filter in an empty filter */
- for (i = 0, j = 0; i < 8; i++)
- if (er32(MDEF(i)) == 0) {
- ew32(MDEF(i), (E1000_MDEF_PORT_623 |
- E1000_MDEF_PORT_664));
- manc2h |= (1 << 1);
- j++;
- break;
- }
-
- if (!j)
- e_warn("Unable to create IPMI pass-through filter\n");
- break;
- }
-
- ew32(MANC2H, manc2h);
- ew32(MANC, manc);
-}
-
-/**
- * e1000_configure_tx - Configure Transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
- **/
-static void e1000_configure_tx(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *tx_ring = adapter->tx_ring;
- u64 tdba;
- u32 tdlen, tctl, tipg, tarc;
- u32 ipgr1, ipgr2;
-
- /* Setup the HW Tx Head and Tail descriptor pointers */
- tdba = tx_ring->dma;
- tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
- ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
- ew32(TDBAH, (tdba >> 32));
- ew32(TDLEN, tdlen);
- ew32(TDH, 0);
- ew32(TDT, 0);
- tx_ring->head = E1000_TDH;
- tx_ring->tail = E1000_TDT;
-
- /* Set the default values for the Tx Inter Packet Gap timer */
- tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
- ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
- ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
-
- if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
- ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
-
- tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
- tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
- ew32(TIPG, tipg);
-
- /* Set the Tx Interrupt Delay register */
- ew32(TIDV, adapter->tx_int_delay);
- /* Tx irq moderation */
- ew32(TADV, adapter->tx_abs_int_delay);
-
- if (adapter->flags2 & FLAG2_DMA_BURST) {
- u32 txdctl = er32(TXDCTL(0));
- txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
- E1000_TXDCTL_WTHRESH);
- /*
- * set up some performance related parameters to encourage the
- * hardware to use the bus more efficiently in bursts, depends
- * on the tx_int_delay to be enabled,
- * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
- * hthresh = 1 ==> prefetch when one or more available
- * pthresh = 0x1f ==> prefetch if internal cache 31 or less
- * BEWARE: this seems to work but should be considered first if
- * there are Tx hangs or other Tx related bugs
- */
- txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
- ew32(TXDCTL(0), txdctl);
- /* erratum work around: set txdctl the same for both queues */
- ew32(TXDCTL(1), txdctl);
- }
-
- /* Program the Transmit Control Register */
- tctl = er32(TCTL);
- tctl &= ~E1000_TCTL_CT;
- tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
- (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
-
- if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
- tarc = er32(TARC(0));
- /*
- * set the speed mode bit, we'll clear it if we're not at
- * gigabit link later
- */
-#define SPEED_MODE_BIT (1 << 21)
- tarc |= SPEED_MODE_BIT;
- ew32(TARC(0), tarc);
- }
-
- /* errata: program both queues to unweighted RR */
- if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
- tarc = er32(TARC(0));
- tarc |= 1;
- ew32(TARC(0), tarc);
- tarc = er32(TARC(1));
- tarc |= 1;
- ew32(TARC(1), tarc);
- }
-
- /* Setup Transmit Descriptor Settings for eop descriptor */
- adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
-
- /* only set IDE if we are delaying interrupts using the timers */
- if (adapter->tx_int_delay)
- adapter->txd_cmd |= E1000_TXD_CMD_IDE;
-
- /* enable Report Status bit */
- adapter->txd_cmd |= E1000_TXD_CMD_RS;
-
- ew32(TCTL, tctl);
-
- e1000e_config_collision_dist(hw);
-}
-
-/**
- * e1000_setup_rctl - configure the receive control registers
- * @adapter: Board private structure
- **/
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
-static void e1000_setup_rctl(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl, rfctl;
- u32 pages = 0;
-
- /* Workaround Si errata on 82579 - configure jumbo frame flow */
- if (hw->mac.type == e1000_pch2lan) {
- s32 ret_val;
-
- if (adapter->netdev->mtu > ETH_DATA_LEN)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-
- if (ret_val)
- e_dbg("failed to enable jumbo frame workaround mode\n");
- }
-
- /* Program MC offset vector base */
- rctl = er32(RCTL);
- rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-
- /* Do not Store bad packets */
- rctl &= ~E1000_RCTL_SBP;
-
- /* Enable Long Packet receive */
- if (adapter->netdev->mtu <= ETH_DATA_LEN)
- rctl &= ~E1000_RCTL_LPE;
- else
- rctl |= E1000_RCTL_LPE;
-
- /* Some systems expect that the CRC is included in SMBUS traffic. The
- * hardware strips the CRC before sending to both SMBUS (BMC) and to
- * host memory when this is enabled
- */
- if (adapter->flags2 & FLAG2_CRC_STRIPPING)
- rctl |= E1000_RCTL_SECRC;
-
- /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
- if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
- u16 phy_data;
-
- e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
- phy_data &= 0xfff8;
- phy_data |= (1 << 2);
- e1e_wphy(hw, PHY_REG(770, 26), phy_data);
-
- e1e_rphy(hw, 22, &phy_data);
- phy_data &= 0x0fff;
- phy_data |= (1 << 14);
- e1e_wphy(hw, 0x10, 0x2823);
- e1e_wphy(hw, 0x11, 0x0003);
- e1e_wphy(hw, 22, phy_data);
- }
-
- /* Setup buffer sizes */
- rctl &= ~E1000_RCTL_SZ_4096;
- rctl |= E1000_RCTL_BSEX;
- switch (adapter->rx_buffer_len) {
- case 2048:
- default:
- rctl |= E1000_RCTL_SZ_2048;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 4096:
- rctl |= E1000_RCTL_SZ_4096;
- break;
- case 8192:
- rctl |= E1000_RCTL_SZ_8192;
- break;
- case 16384:
- rctl |= E1000_RCTL_SZ_16384;
- break;
- }
-
- /*
- * 82571 and greater support packet-split where the protocol
- * header is placed in skb->data and the packet data is
- * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
- * In the case of a non-split, skb->data is linearly filled,
- * followed by the page buffers. Therefore, skb->data is
- * sized to hold the largest protocol header.
- *
- * allocations using alloc_page take too long for regular MTU
- * so only enable packet split for jumbo frames
- *
- * Using pages when the page size is greater than 16k wastes
- * a lot of memory, since we allocate 3 pages at all times
- * per packet.
- */
- pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
- (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
- adapter->rx_ps_pages = pages;
- else
- adapter->rx_ps_pages = 0;
-
- if (adapter->rx_ps_pages) {
- u32 psrctl = 0;
-
- /* Configure extra packet-split registers */
- rfctl = er32(RFCTL);
- rfctl |= E1000_RFCTL_EXTEN;
- /*
- * disable packet split support for IPv6 extension headers,
- * because some malformed IPv6 headers can hang the Rx
- */
- rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
- E1000_RFCTL_NEW_IPV6_EXT_DIS);
-
- ew32(RFCTL, rfctl);
-
- /* Enable Packet split descriptors */
- rctl |= E1000_RCTL_DTYP_PS;
-
- psrctl |= adapter->rx_ps_bsize0 >>
- E1000_PSRCTL_BSIZE0_SHIFT;
-
- switch (adapter->rx_ps_pages) {
- case 3:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE3_SHIFT;
- case 2:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE2_SHIFT;
- case 1:
- psrctl |= PAGE_SIZE >>
- E1000_PSRCTL_BSIZE1_SHIFT;
- break;
- }
-
- ew32(PSRCTL, psrctl);
- }
-
- ew32(RCTL, rctl);
- /* just started the receive unit, no need to restart */
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
-}
-
-/**
- * e1000_configure_rx - Configure Receive Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-static void e1000_configure_rx(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- u64 rdba;
- u32 rdlen, rctl, rxcsum, ctrl_ext;
-
- if (adapter->rx_ps_pages) {
- /* this is a 32 byte descriptor */
- rdlen = rx_ring->count *
- sizeof(union e1000_rx_desc_packet_split);
- adapter->clean_rx = e1000_clean_rx_irq_ps;
- adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
- } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
- rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
- adapter->clean_rx = e1000_clean_jumbo_rx_irq;
- adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
- } else {
- rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
- adapter->clean_rx = e1000_clean_rx_irq;
- adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
- }
-
- /* disable receives while setting up the descriptors */
- rctl = er32(RCTL);
- if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- e1e_flush();
- usleep_range(10000, 20000);
-
- if (adapter->flags2 & FLAG2_DMA_BURST) {
- /*
- * set the writeback threshold (only takes effect if the RDTR
- * is set). set GRAN=1 and write back up to 0x4 worth, and
- * enable prefetching of 0x20 Rx descriptors
- * granularity = 01
- * wthresh = 04,
- * hthresh = 04,
- * pthresh = 0x20
- */
- ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
- ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
-
- /*
- * override the delay timers for enabling bursting, only if
- * the value was not set by the user via module options
- */
- if (adapter->rx_int_delay == DEFAULT_RDTR)
- adapter->rx_int_delay = BURST_RDTR;
- if (adapter->rx_abs_int_delay == DEFAULT_RADV)
- adapter->rx_abs_int_delay = BURST_RADV;
- }
-
- /* set the Receive Delay Timer Register */
- ew32(RDTR, adapter->rx_int_delay);
-
- /* irq moderation */
- ew32(RADV, adapter->rx_abs_int_delay);
- if ((adapter->itr_setting != 0) && (adapter->itr != 0))
- ew32(ITR, 1000000000 / (adapter->itr * 256));
-
- ctrl_ext = er32(CTRL_EXT);
- /* Auto-Mask interrupts upon ICR access */
- ctrl_ext |= E1000_CTRL_EXT_IAME;
- ew32(IAM, 0xffffffff);
- ew32(CTRL_EXT, ctrl_ext);
- e1e_flush();
-
- /*
- * Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring
- */
- rdba = rx_ring->dma;
- ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
- ew32(RDBAH, (rdba >> 32));
- ew32(RDLEN, rdlen);
- ew32(RDH, 0);
- ew32(RDT, 0);
- rx_ring->head = E1000_RDH;
- rx_ring->tail = E1000_RDT;
-
- /* Enable Receive Checksum Offload for TCP and UDP */
- rxcsum = er32(RXCSUM);
- if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
- rxcsum |= E1000_RXCSUM_TUOFL;
-
- /*
- * IPv4 payload checksum for UDP fragments must be
- * used in conjunction with packet-split.
- */
- if (adapter->rx_ps_pages)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
- rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* no need to clear IPPCSE as it defaults to 0 */
- }
- ew32(RXCSUM, rxcsum);
-
- /*
- * Enable early receives on supported devices, only takes effect when
- * packet size is equal or larger than the specified value (in 8 byte
- * units), e.g. using jumbo frames when setting to E1000_ERT_2048
- */
- if ((adapter->flags & FLAG_HAS_ERT) ||
- (adapter->hw.mac.type == e1000_pch2lan)) {
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
- u32 rxdctl = er32(RXDCTL(0));
- ew32(RXDCTL(0), rxdctl | 0x3);
- if (adapter->flags & FLAG_HAS_ERT)
- ew32(ERT, E1000_ERT_2048 | (1 << 13));
- /*
- * With jumbo frames and early-receive enabled,
- * excessive C-state transition latencies result in
- * dropped transactions.
- */
- pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
- } else {
- pm_qos_update_request(&adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
- }
- }
-
- /* Enable Receives */
- ew32(RCTL, rctl);
-}
-
-/**
- * e1000_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- *
- * Updates the Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- **/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count)
-{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
-}
-
-/**
- * e1000_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- **/
-static void e1000_set_multi(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u8 *mta_list;
- u32 rctl;
-
- /* Check for Promiscuous and All Multicast modes */
-
- rctl = er32(RCTL);
-
- if (netdev->flags & IFF_PROMISC) {
- rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- rctl &= ~E1000_RCTL_VFE;
- /* Do not hardware filter VLANs in promisc mode */
- e1000e_vlan_filter_disable(adapter);
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= E1000_RCTL_MPE;
- rctl &= ~E1000_RCTL_UPE;
- } else {
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
- }
- e1000e_vlan_filter_enable(adapter);
- }
-
- ew32(RCTL, rctl);
-
- if (!netdev_mc_empty(netdev)) {
- int i = 0;
-
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list)
- return;
-
- /* prepare a packed array of only addresses. */
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- e1000_update_mc_addr_list(hw, mta_list, i);
- kfree(mta_list);
- } else {
- /*
- * if we're called from probe, we might not have
- * anything to do here, so clear out the list
- */
- e1000_update_mc_addr_list(hw, NULL, 0);
- }
-
- if (netdev->features & NETIF_F_HW_VLAN_RX)
- e1000e_vlan_strip_enable(adapter);
- else
- e1000e_vlan_strip_disable(adapter);
-}
-
-/**
- * e1000_configure - configure the hardware for Rx and Tx
- * @adapter: private board structure
- **/
-static void e1000_configure(struct e1000_adapter *adapter)
-{
- e1000_set_multi(adapter->netdev);
-
- e1000_restore_vlan(adapter);
- e1000_init_manageability_pt(adapter);
-
- e1000_configure_tx(adapter);
- e1000_setup_rctl(adapter);
- e1000_configure_rx(adapter);
- adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
- GFP_KERNEL);
-}
-
-/**
- * e1000e_power_up_phy - restore link in case the phy was powered down
- * @adapter: address of board private structure
- *
- * The phy may be powered down to save power and turn off link when the
- * driver is unloaded and wake on lan is not enabled (among others)
- * *** this routine MUST be followed by a call to e1000e_reset ***
- **/
-void e1000e_power_up_phy(struct e1000_adapter *adapter)
-{
- if (adapter->hw.phy.ops.power_up)
- adapter->hw.phy.ops.power_up(&adapter->hw);
-
- adapter->hw.mac.ops.setup_link(&adapter->hw);
-}
-
-/**
- * e1000_power_down_phy - Power down the PHY
- *
- * Power down the PHY so no link is implied when interface is down.
- * The PHY cannot be powered down if management or WoL is active.
- */
-static void e1000_power_down_phy(struct e1000_adapter *adapter)
-{
- /* WoL is enabled */
- if (adapter->wol)
- return;
-
- if (adapter->hw.phy.ops.power_down)
- adapter->hw.phy.ops.power_down(&adapter->hw);
-}
-
-/**
- * e1000e_reset - bring the hardware into a known good state
- *
- * This function boots the hardware and enables some settings that
- * require a configuration cycle of the hardware - those cannot be
- * set/changed during runtime. After reset the device needs to be
- * properly configured for Rx, Tx etc.
- */
-void e1000e_reset(struct e1000_adapter *adapter)
-{
- struct e1000_mac_info *mac = &adapter->hw.mac;
- struct e1000_fc_info *fc = &adapter->hw.fc;
- struct e1000_hw *hw = &adapter->hw;
- u32 tx_space, min_tx_space, min_rx_space;
- u32 pba = adapter->pba;
- u16 hwm;
-
- /* reset Packet Buffer Allocation to default */
- ew32(PBA, pba);
-
- if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
- /*
- * To maintain wire speed transmits, the Tx FIFO should be
- * large enough to accommodate two full transmit packets,
- * rounded up to the next 1KB and expressed in KB. Likewise,
- * the Rx FIFO should be large enough to accommodate at least
- * one full receive packet and is similarly rounded up and
- * expressed in KB.
- */
- pba = er32(PBA);
- /* upper 16 bits has Tx packet buffer allocation size in KB */
- tx_space = pba >> 16;
- /* lower 16 bits has Rx packet buffer allocation size in KB */
- pba &= 0xffff;
- /*
- * the Tx fifo also stores 16 bytes of information about the Tx
- * but don't include ethernet FCS because hardware appends it
- */
- min_tx_space = (adapter->max_frame_size +
- sizeof(struct e1000_tx_desc) -
- ETH_FCS_LEN) * 2;
- min_tx_space = ALIGN(min_tx_space, 1024);
- min_tx_space >>= 10;
- /* software strips receive CRC, so leave room for it */
- min_rx_space = adapter->max_frame_size;
- min_rx_space = ALIGN(min_rx_space, 1024);
- min_rx_space >>= 10;
-
- /*
- * If current Tx allocation is less than the min Tx FIFO size,
- * and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation
- */
- if ((tx_space < min_tx_space) &&
- ((min_tx_space - tx_space) < pba)) {
- pba -= min_tx_space - tx_space;
-
- /*
- * if short on Rx space, Rx wins and must trump Tx
- * adjustment or use Early Receive if available
- */
- if ((pba < min_rx_space) &&
- (!(adapter->flags & FLAG_HAS_ERT)))
- /* ERT enabled in e1000_configure_rx */
- pba = min_rx_space;
- }
-
- ew32(PBA, pba);
- }
-
- /*
- * flow control settings
- *
- * The high water mark must be low enough to fit one full frame
- * (or the size used for early receive) above it in the Rx FIFO.
- * Set it to the lower of:
- * - 90% of the Rx FIFO size, and
- * - the full Rx FIFO size minus the early receive size (for parts
- * with ERT support assuming ERT set to E1000_ERT_2048), or
- * - the full Rx FIFO size minus one full frame
- */
- if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
- fc->pause_time = 0xFFFF;
- else
- fc->pause_time = E1000_FC_PAUSE_TIME;
- fc->send_xon = 1;
- fc->current_mode = fc->requested_mode;
-
- switch (hw->mac.type) {
- default:
- if ((adapter->flags & FLAG_HAS_ERT) &&
- (adapter->netdev->mtu > ETH_DATA_LEN))
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - (E1000_ERT_2048 << 3)));
- else
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - adapter->max_frame_size));
-
- fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
- break;
- case e1000_pchlan:
- /*
- * Workaround PCH LOM adapter hangs with certain network
- * loads. If hangs persist, try disabling Tx flow control.
- */
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
- fc->high_water = 0x3500;
- fc->low_water = 0x1500;
- } else {
- fc->high_water = 0x5000;
- fc->low_water = 0x3000;
- }
- fc->refresh_time = 0x1000;
- break;
- case e1000_pch2lan:
- fc->high_water = 0x05C20;
- fc->low_water = 0x05048;
- fc->pause_time = 0x0650;
- fc->refresh_time = 0x0400;
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
- pba = 14;
- ew32(PBA, pba);
- }
- break;
- }
-
- /*
- * Disable Adaptive Interrupt Moderation if 2 full packets cannot
- * fit in receive buffer and early-receive not supported.
- */
- if (adapter->itr_setting & 0x3) {
- if (((adapter->max_frame_size * 2) > (pba << 10)) &&
- !(adapter->flags & FLAG_HAS_ERT)) {
- if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
- dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned off\n");
- adapter->flags2 |= FLAG2_DISABLE_AIM;
- ew32(ITR, 0);
- }
- } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
- dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned on\n");
- adapter->flags2 &= ~FLAG2_DISABLE_AIM;
- adapter->itr = 20000;
- ew32(ITR, 1000000000 / (adapter->itr * 256));
- }
- }
-
- /* Allow time for pending master requests to run */
- mac->ops.reset_hw(hw);
-
- /*
- * For parts with AMT enabled, let the firmware know
- * that the network interface is in control
- */
- if (adapter->flags & FLAG_HAS_AMT)
- e1000e_get_hw_control(adapter);
-
- ew32(WUC, 0);
-
- if (mac->ops.init_hw(hw))
- e_err("Hardware Error\n");
-
- e1000_update_mng_vlan(adapter);
-
- /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
- ew32(VET, ETH_P_8021Q);
-
- e1000e_reset_adaptive(hw);
-
- if (!netif_running(adapter->netdev) &&
- !test_bit(__E1000_TESTING, &adapter->state)) {
- e1000_power_down_phy(adapter);
- return;
- }
-
- e1000_get_phy_info(hw);
-
- if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
- !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
- u16 phy_data = 0;
- /*
- * speed up time to link by disabling smart power down, ignore
- * the return value of this function because there is nothing
- * different we would do if it failed
- */
- e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
- phy_data &= ~IGP02E1000_PM_SPD;
- e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
- }
-}
-
-int e1000e_up(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /* hardware has been reset, we need to reload some things */
- e1000_configure(adapter);
-
- clear_bit(__E1000_DOWN, &adapter->state);
-
- napi_enable(&adapter->napi);
- if (adapter->msix_entries)
- e1000_configure_msix(adapter);
- e1000_irq_enable(adapter);
-
- netif_start_queue(adapter->netdev);
-
- /* fire a link change interrupt to start the watchdog */
- if (adapter->msix_entries)
- ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
- else
- ew32(ICS, E1000_ICS_LSC);
-
- return 0;
-}
-
-static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- if (!(adapter->flags2 & FLAG2_DMA_BURST))
- return;
-
- /* flush pending descriptor writebacks to memory */
- ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
- ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
-
- /* execute the writes immediately */
- e1e_flush();
-}
-
-static void e1000e_update_stats(struct e1000_adapter *adapter);
-
-void e1000e_down(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- u32 tctl, rctl;
-
- /*
- * signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer
- */
- set_bit(__E1000_DOWN, &adapter->state);
-
- /* disable receives in the hardware */
- rctl = er32(RCTL);
- if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- /* flush and sleep below */
-
- netif_stop_queue(netdev);
-
- /* disable transmits in the hardware */
- tctl = er32(TCTL);
- tctl &= ~E1000_TCTL_EN;
- ew32(TCTL, tctl);
-
- /* flush both disables and wait for them to finish */
- e1e_flush();
- usleep_range(10000, 20000);
-
- napi_disable(&adapter->napi);
- e1000_irq_disable(adapter);
-
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
-
- netif_carrier_off(netdev);
-
- spin_lock(&adapter->stats64_lock);
- e1000e_update_stats(adapter);
- spin_unlock(&adapter->stats64_lock);
-
- e1000e_flush_descriptors(adapter);
- e1000_clean_tx_ring(adapter);
- e1000_clean_rx_ring(adapter);
-
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
-
- if (!pci_channel_offline(adapter->pdev))
- e1000e_reset(adapter);
-
- /*
- * TODO: for power management, we could drop the link and
- * pci_disable_device here.
- */
-}
-
-void e1000e_reinit_locked(struct e1000_adapter *adapter)
-{
- might_sleep();
- while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
- e1000e_down(adapter);
- e1000e_up(adapter);
- clear_bit(__E1000_RESETTING, &adapter->state);
-}
-
-/**
- * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
- * @adapter: board private structure to initialize
- *
- * e1000_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
- adapter->rx_ps_bsize0 = 128;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
-
- spin_lock_init(&adapter->stats64_lock);
-
- e1000e_set_interrupt_capability(adapter);
-
- if (e1000_alloc_queues(adapter))
- return -ENOMEM;
-
- /* Explicitly disable IRQ since the NIC can be in any state. */
- e1000_irq_disable(adapter);
-
- set_bit(__E1000_DOWN, &adapter->state);
- return 0;
-}
-
-/**
- * e1000_intr_msi_test - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t e1000_intr_msi_test(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 icr = er32(ICR);
-
- e_dbg("icr is %08X\n", icr);
- if (icr & E1000_ICR_RXSEQ) {
- adapter->flags &= ~FLAG_MSI_TEST_FAILED;
- wmb();
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * e1000_test_msi_interrupt - Returns 0 for successful test
- * @adapter: board private struct
- *
- * code flow taken from tg3.c
- **/
-static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- int err;
-
- /* poll_enable hasn't been called yet, so don't need disable */
- /* clear any pending events */
- er32(ICR);
-
- /* free the real vector and request a test handler */
- e1000_free_irq(adapter);
- e1000e_reset_interrupt_capability(adapter);
-
- /* Assume that the test fails, if it succeeds then the test
- * MSI irq handler will unset this flag */
- adapter->flags |= FLAG_MSI_TEST_FAILED;
-
- err = pci_enable_msi(adapter->pdev);
- if (err)
- goto msi_test_failed;
-
- err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
- netdev->name, netdev);
- if (err) {
- pci_disable_msi(adapter->pdev);
- goto msi_test_failed;
- }
-
- wmb();
-
- e1000_irq_enable(adapter);
-
- /* fire an unusual interrupt on the test handler */
- ew32(ICS, E1000_ICS_RXSEQ);
- e1e_flush();
- msleep(50);
-
- e1000_irq_disable(adapter);
-
- rmb();
-
- if (adapter->flags & FLAG_MSI_TEST_FAILED) {
- adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e_info("MSI interrupt test failed, using legacy interrupt.\n");
- } else
- e_dbg("MSI interrupt test succeeded!\n");
-
- free_irq(adapter->pdev->irq, netdev);
- pci_disable_msi(adapter->pdev);
-
-msi_test_failed:
- e1000e_set_interrupt_capability(adapter);
- return e1000_request_irq(adapter);
-}
-
-/**
- * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
- * @adapter: board private struct
- *
- * code flow taken from tg3.c, called with e1000 interrupts disabled.
- **/
-static int e1000_test_msi(struct e1000_adapter *adapter)
-{
- int err;
- u16 pci_cmd;
-
- if (!(adapter->flags & FLAG_MSI_ENABLED))
- return 0;
-
- /* disable SERR in case the MSI write causes a master abort */
- pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_SERR)
- pci_write_config_word(adapter->pdev, PCI_COMMAND,
- pci_cmd & ~PCI_COMMAND_SERR);
-
- err = e1000_test_msi_interrupt(adapter);
-
- /* re-enable SERR */
- if (pci_cmd & PCI_COMMAND_SERR) {
- pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
- pci_cmd |= PCI_COMMAND_SERR;
- pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
- }
-
- return err;
-}
-
-/**
- * e1000_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-static int e1000_open(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
- int err;
-
- /* disallow open during test */
- if (test_bit(__E1000_TESTING, &adapter->state))
- return -EBUSY;
-
- pm_runtime_get_sync(&pdev->dev);
-
- netif_carrier_off(netdev);
-
- /* allocate transmit descriptors */
- err = e1000e_setup_tx_resources(adapter);
- if (err)
- goto err_setup_tx;
-
- /* allocate receive descriptors */
- err = e1000e_setup_rx_resources(adapter);
- if (err)
- goto err_setup_rx;
-
- /*
- * If AMT is enabled, let the firmware know that the network
- * interface is now open and reset the part to a known state.
- */
- if (adapter->flags & FLAG_HAS_AMT) {
- e1000e_get_hw_control(adapter);
- e1000e_reset(adapter);
- }
-
- e1000e_power_up_phy(adapter);
-
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
- e1000_update_mng_vlan(adapter);
-
- /* DMA latency requirement to workaround early-receive/jumbo issue */
- if ((adapter->flags & FLAG_HAS_ERT) ||
- (adapter->hw.mac.type == e1000_pch2lan))
- pm_qos_add_request(&adapter->netdev->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
- /*
- * before we allocate an interrupt, we must be ready to handle it.
- * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
- * as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so.
- */
- e1000_configure(adapter);
-
- err = e1000_request_irq(adapter);
- if (err)
- goto err_req_irq;
-
- /*
- * Work around PCIe errata with MSI interrupts causing some chipsets to
- * ignore e1000e MSI messages, which means we need to test our MSI
- * interrupt now
- */
- if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
- err = e1000_test_msi(adapter);
- if (err) {
- e_err("Interrupt allocation failed\n");
- goto err_req_irq;
- }
- }
-
- /* From here on the code is the same as e1000e_up() */
- clear_bit(__E1000_DOWN, &adapter->state);
-
- napi_enable(&adapter->napi);
-
- e1000_irq_enable(adapter);
-
- netif_start_queue(netdev);
-
- adapter->idle_check = true;
- pm_runtime_put(&pdev->dev);
-
- /* fire a link status change interrupt to start the watchdog */
- if (adapter->msix_entries)
- ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
- else
- ew32(ICS, E1000_ICS_LSC);
-
- return 0;
-
-err_req_irq:
- e1000e_release_hw_control(adapter);
- e1000_power_down_phy(adapter);
- e1000e_free_rx_resources(adapter);
-err_setup_rx:
- e1000e_free_tx_resources(adapter);
-err_setup_tx:
- e1000e_reset(adapter);
- pm_runtime_put_sync(&pdev->dev);
-
- return err;
-}
-
-/**
- * e1000_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- **/
-static int e1000_close(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
-
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
-
- pm_runtime_get_sync(&pdev->dev);
-
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
- e1000e_down(adapter);
- e1000_free_irq(adapter);
- }
- e1000_power_down_phy(adapter);
-
- e1000e_free_tx_resources(adapter);
- e1000e_free_rx_resources(adapter);
-
- /*
- * kill manageability vlan ID if supported, but not if a vlan with
- * the same ID is registered on the host OS (let 8021q kill it)
- */
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-
- /*
- * If AMT is enabled, let the firmware know that the network
- * interface is now closed
- */
- if ((adapter->flags & FLAG_HAS_AMT) &&
- !test_bit(__E1000_TESTING, &adapter->state))
- e1000e_release_hw_control(adapter);
-
- if ((adapter->flags & FLAG_HAS_ERT) ||
- (adapter->hw.mac.type == e1000_pch2lan))
- pm_qos_remove_request(&adapter->netdev->pm_qos_req);
-
- pm_runtime_put_sync(&pdev->dev);
-
- return 0;
-}
-/**
- * e1000_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int e1000_set_mac(struct net_device *netdev, void *p)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
-
- e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
-
- if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
- /* activate the work around */
- e1000e_set_laa_state_82571(&adapter->hw, 1);
-
- /*
- * Hold a copy of the LAA in RAR[14] This is done so that
- * between the time RAR[0] gets clobbered and the time it
- * gets fixed (in e1000_watchdog), the actual LAA is in one
- * of the RARs and no incoming packets directed to this port
- * are dropped. Eventually the LAA will be in RAR[0] and
- * RAR[14]
- */
- e1000e_rar_set(&adapter->hw,
- adapter->hw.mac.addr,
- adapter->hw.mac.rar_entry_count - 1);
- }
-
- return 0;
-}
-
-/**
- * e1000e_update_phy_task - work thread to update phy
- * @work: pointer to our work struct
- *
- * this worker thread exists because we must acquire a
- * semaphore to read the phy, which we could msleep while
- * waiting for it, and we can't msleep in a timer.
- **/
-static void e1000e_update_phy_task(struct work_struct *work)
-{
- struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, update_phy_task);
-
- if (test_bit(__E1000_DOWN, &adapter->state))
- return;
-
- e1000_get_phy_info(&adapter->hw);
-}
-
-/*
- * Need to wait a few seconds after link up to get diagnostic information from
- * the phy
- */
-static void e1000_update_phy_info(unsigned long data)
-{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-
- if (test_bit(__E1000_DOWN, &adapter->state))
- return;
-
- schedule_work(&adapter->update_phy_task);
-}
-
-/**
- * e1000e_update_phy_stats - Update the PHY statistics counters
- * @adapter: board private structure
- *
- * Read/clear the upper 16-bit PHY registers and read/accumulate lower
- **/
-static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- s32 ret_val;
- u16 phy_data;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return;
-
- /*
- * A page set is expensive so check if already on desired page.
- * If not, set to the page with the PHY status registers.
- */
- hw->phy.addr = 1;
- ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- &phy_data);
- if (ret_val)
- goto release;
- if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
- ret_val = hw->phy.ops.set_page(hw,
- HV_STATS_PAGE << IGP_PAGE_SHIFT);
- if (ret_val)
- goto release;
- }
-
- /* Single Collision Count */
- hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
- if (!ret_val)
- adapter->stats.scc += phy_data;
-
- /* Excessive Collision Count */
- hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
- if (!ret_val)
- adapter->stats.ecol += phy_data;
-
- /* Multiple Collision Count */
- hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
- if (!ret_val)
- adapter->stats.mcc += phy_data;
-
- /* Late Collision Count */
- hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
- if (!ret_val)
- adapter->stats.latecol += phy_data;
-
- /* Collision Count - also used for adaptive IFS */
- hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
- if (!ret_val)
- hw->mac.collision_delta = phy_data;
-
- /* Defer Count */
- hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
- if (!ret_val)
- adapter->stats.dc += phy_data;
-
- /* Transmit with no CRS */
- hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
- ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
- if (!ret_val)
- adapter->stats.tncrs += phy_data;
-
-release:
- hw->phy.ops.release(hw);
-}
-
-/**
- * e1000e_update_stats - Update the board statistics counters
- * @adapter: board private structure
- **/
-static void e1000e_update_stats(struct e1000_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
-
- /*
- * Prevent stats update while adapter is being reset, or if the pci
- * connection is down.
- */
- if (adapter->link_speed == 0)
- return;
- if (pci_channel_offline(pdev))
- return;
-
- adapter->stats.crcerrs += er32(CRCERRS);
- adapter->stats.gprc += er32(GPRC);
- adapter->stats.gorc += er32(GORCL);
- er32(GORCH); /* Clear gorc */
- adapter->stats.bprc += er32(BPRC);
- adapter->stats.mprc += er32(MPRC);
- adapter->stats.roc += er32(ROC);
-
- adapter->stats.mpc += er32(MPC);
-
- /* Half-duplex statistics */
- if (adapter->link_duplex == HALF_DUPLEX) {
- if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
- e1000e_update_phy_stats(adapter);
- } else {
- adapter->stats.scc += er32(SCC);
- adapter->stats.ecol += er32(ECOL);
- adapter->stats.mcc += er32(MCC);
- adapter->stats.latecol += er32(LATECOL);
- adapter->stats.dc += er32(DC);
-
- hw->mac.collision_delta = er32(COLC);
-
- if ((hw->mac.type != e1000_82574) &&
- (hw->mac.type != e1000_82583))
- adapter->stats.tncrs += er32(TNCRS);
- }
- adapter->stats.colc += hw->mac.collision_delta;
- }
-
- adapter->stats.xonrxc += er32(XONRXC);
- adapter->stats.xontxc += er32(XONTXC);
- adapter->stats.xoffrxc += er32(XOFFRXC);
- adapter->stats.xofftxc += er32(XOFFTXC);
- adapter->stats.gptc += er32(GPTC);
- adapter->stats.gotc += er32(GOTCL);
- er32(GOTCH); /* Clear gotc */
- adapter->stats.rnbc += er32(RNBC);
- adapter->stats.ruc += er32(RUC);
-
- adapter->stats.mptc += er32(MPTC);
- adapter->stats.bptc += er32(BPTC);
-
- /* used for adaptive IFS */
-
- hw->mac.tx_packet_delta = er32(TPT);
- adapter->stats.tpt += hw->mac.tx_packet_delta;
-
- adapter->stats.algnerrc += er32(ALGNERRC);
- adapter->stats.rxerrc += er32(RXERRC);
- adapter->stats.cexterr += er32(CEXTERR);
- adapter->stats.tsctc += er32(TSCTC);
- adapter->stats.tsctfc += er32(TSCTFC);
-
- /* Fill out the OS statistics structure */
- netdev->stats.multicast = adapter->stats.mprc;
- netdev->stats.collisions = adapter->stats.colc;
-
- /* Rx Errors */
-
- /*
- * RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC
- */
- netdev->stats.rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
- netdev->stats.rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
- netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
- netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
- netdev->stats.rx_missed_errors = adapter->stats.mpc;
-
- /* Tx Errors */
- netdev->stats.tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
- netdev->stats.tx_aborted_errors = adapter->stats.ecol;
- netdev->stats.tx_window_errors = adapter->stats.latecol;
- netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
-
- /* Tx Dropped needs to be maintained elsewhere */
-
- /* Management Stats */
- adapter->stats.mgptc += er32(MGTPTC);
- adapter->stats.mgprc += er32(MGTPRC);
- adapter->stats.mgpdc += er32(MGTPDC);
-}
-
-/**
- * e1000_phy_read_status - Update the PHY register status snapshot
- * @adapter: board private structure
- **/
-static void e1000_phy_read_status(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_phy_regs *phy = &adapter->phy_regs;
-
- if ((er32(STATUS) & E1000_STATUS_LU) &&
- (adapter->hw.phy.media_type == e1000_media_type_copper)) {
- int ret_val;
-
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
- ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
- ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
- ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
- ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
- ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
- if (ret_val)
- e_warn("Error reading PHY register\n");
- } else {
- /*
- * Do not read PHY registers if link is not up
- * Set values to typical power-on defaults
- */
- phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
- phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
- BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
- BMSR_ERCAP);
- phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
- ADVERTISE_ALL | ADVERTISE_CSMA);
- phy->lpa = 0;
- phy->expansion = EXPANSION_ENABLENPAGE;
- phy->ctrl1000 = ADVERTISE_1000FULL;
- phy->stat1000 = 0;
- phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
- }
-}
-
-static void e1000_print_link_info(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl = er32(CTRL);
-
- /* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
- (adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "Rx/Tx" :
- ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
- ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
-}
-
-static bool e1000e_has_link(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- bool link_active = 0;
- s32 ret_val = 0;
-
- /*
- * get_link_status is set on LSC (link status) interrupt or
- * Rx sequence error interrupt. get_link_status will stay
- * false until the check_for_link establishes link
- * for copper adapters ONLY
- */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- if (hw->mac.get_link_status) {
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- } else {
- link_active = 1;
- }
- break;
- case e1000_media_type_fiber:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !!(er32(STATUS) & E1000_STATUS_LU);
- break;
- case e1000_media_type_internal_serdes:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = adapter->hw.mac.serdes_has_link;
- break;
- default:
- case e1000_media_type_unknown:
- break;
- }
-
- if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
- (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
- /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
- e_info("Gigabit has been disabled, downgrading speed\n");
- }
-
- return link_active;
-}
-
-static void e1000e_enable_receives(struct e1000_adapter *adapter)
-{
- /* make sure the receive unit is started */
- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW)) {
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl = er32(RCTL);
- ew32(RCTL, rctl | E1000_RCTL_EN);
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
- }
-}
-
-static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * With 82574 controllers, PHY needs to be checked periodically
- * for hung state and reset, if two calls return true
- */
- if (e1000_check_phy_82574(hw))
- adapter->phy_hang_count++;
- else
- adapter->phy_hang_count = 0;
-
- if (adapter->phy_hang_count > 1) {
- adapter->phy_hang_count = 0;
- schedule_work(&adapter->reset_task);
- }
-}
-
-/**
- * e1000_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
- **/
-static void e1000_watchdog(unsigned long data)
-{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->watchdog_task);
-
- /* TODO: make this use queue_delayed_work() */
-}
-
-static void e1000_watchdog_task(struct work_struct *work)
-{
- struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, watchdog_task);
- struct net_device *netdev = adapter->netdev;
- struct e1000_mac_info *mac = &adapter->hw.mac;
- struct e1000_phy_info *phy = &adapter->hw.phy;
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_hw *hw = &adapter->hw;
- u32 link, tctl;
-
- if (test_bit(__E1000_DOWN, &adapter->state))
- return;
-
- link = e1000e_has_link(adapter);
- if ((netif_carrier_ok(netdev)) && link) {
- /* Cancel scheduled suspend requests. */
- pm_runtime_resume(netdev->dev.parent);
-
- e1000e_enable_receives(adapter);
- goto link_up;
- }
-
- if ((e1000e_enable_tx_pkt_filtering(hw)) &&
- (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
- e1000_update_mng_vlan(adapter);
-
- if (link) {
- if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
-
- /* Cancel scheduled suspend requests. */
- pm_runtime_resume(netdev->dev.parent);
-
- /* update snapshot of PHY registers on LSC */
- e1000_phy_read_status(adapter);
- mac->ops.get_link_up_info(&adapter->hw,
- &adapter->link_speed,
- &adapter->link_duplex);
- e1000_print_link_info(adapter);
- /*
- * On supported PHYs, check for duplex mismatch only
- * if link has autonegotiated at 10/100 half
- */
- if ((hw->phy.type == e1000_phy_igp_3 ||
- hw->phy.type == e1000_phy_bm) &&
- (hw->mac.autoneg == true) &&
- (adapter->link_speed == SPEED_10 ||
- adapter->link_speed == SPEED_100) &&
- (adapter->link_duplex == HALF_DUPLEX)) {
- u16 autoneg_exp;
-
- e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
-
- if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
- e_info("Autonegotiated half duplex but"
- " link partner cannot autoneg. "
- " Try forcing full duplex if "
- "link gets many collisions.\n");
- }
-
- /* adjust timeout factor according to speed/duplex */
- adapter->tx_timeout_factor = 1;
- switch (adapter->link_speed) {
- case SPEED_10:
- txb2b = 0;
- adapter->tx_timeout_factor = 16;
- break;
- case SPEED_100:
- txb2b = 0;
- adapter->tx_timeout_factor = 10;
- break;
- }
-
- /*
- * workaround: re-program speed mode bit after
- * link-up event
- */
- if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
- !txb2b) {
- u32 tarc0;
- tarc0 = er32(TARC(0));
- tarc0 &= ~SPEED_MODE_BIT;
- ew32(TARC(0), tarc0);
- }
-
- /*
- * disable TSO for pcie and 10/100 speeds, to avoid
- * some hardware issues
- */
- if (!(adapter->flags & FLAG_TSO_FORCE)) {
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- e_info("10/100 speed: disabling TSO\n");
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- break;
- case SPEED_1000:
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- break;
- default:
- /* oops */
- break;
- }
- }
-
- /*
- * enable transmits in the hardware, need to do this
- * after setting TARC(0)
- */
- tctl = er32(TCTL);
- tctl |= E1000_TCTL_EN;
- ew32(TCTL, tctl);
-
- /*
- * Perform any post-link-up configuration before
- * reporting link up.
- */
- if (phy->ops.cfg_on_link_up)
- phy->ops.cfg_on_link_up(hw);
-
- netif_carrier_on(netdev);
-
- if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->phy_info_timer,
- round_jiffies(jiffies + 2 * HZ));
- }
- } else {
- if (netif_carrier_ok(netdev)) {
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- /* Link status message must follow this format */
- printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
- adapter->netdev->name);
- netif_carrier_off(netdev);
- if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->phy_info_timer,
- round_jiffies(jiffies + 2 * HZ));
-
- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
- schedule_work(&adapter->reset_task);
- else
- pm_schedule_suspend(netdev->dev.parent,
- LINK_TIMEOUT);
- }
- }
-
-link_up:
- spin_lock(&adapter->stats64_lock);
- e1000e_update_stats(adapter);
-
- mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
- adapter->tpt_old = adapter->stats.tpt;
- mac->collision_delta = adapter->stats.colc - adapter->colc_old;
- adapter->colc_old = adapter->stats.colc;
-
- adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
- adapter->gorc_old = adapter->stats.gorc;
- adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
- adapter->gotc_old = adapter->stats.gotc;
- spin_unlock(&adapter->stats64_lock);
-
- e1000e_update_adaptive(&adapter->hw);
-
- if (!netif_carrier_ok(netdev) &&
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
- /*
- * We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
- }
-
- /* Simple mode for Interrupt Throttle Rate (ITR) */
- if (adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
- * Total asymmetrical Tx or Rx gets ITR=8000;
- * everyone else is between 2000-8000.
- */
- u32 goc = (adapter->gotc + adapter->gorc) / 10000;
- u32 dif = (adapter->gotc > adapter->gorc ?
- adapter->gotc - adapter->gorc :
- adapter->gorc - adapter->gotc) / 10000;
- u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
-
- ew32(ITR, 1000000000 / (itr * 256));
- }
-
- /* Cause software interrupt to ensure Rx ring is cleaned */
- if (adapter->msix_entries)
- ew32(ICS, adapter->rx_ring->ims_val);
- else
- ew32(ICS, E1000_ICS_RXDMT0);
-
- /* flush pending descriptors to memory before detecting Tx hang */
- e1000e_flush_descriptors(adapter);
-
- /* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
-
- /*
- * With 82571 controllers, LAA may be overwritten due to controller
- * reset from the other port. Set the appropriate LAA in RAR[0]
- */
- if (e1000e_get_laa_state_82571(hw))
- e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
-
- if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
- e1000e_check_82574_phy_workaround(adapter);
-
- /* Reset the timer */
- if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
-}
-
-#define E1000_TX_FLAGS_CSUM 0x00000001
-#define E1000_TX_FLAGS_VLAN 0x00000002
-#define E1000_TX_FLAGS_TSO 0x00000004
-#define E1000_TX_FLAGS_IPV4 0x00000008
-#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
-#define E1000_TX_FLAGS_VLAN_SHIFT 16
-
-static int e1000_tso(struct e1000_adapter *adapter,
- struct sk_buff *skb)
-{
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_context_desc *context_desc;
- struct e1000_buffer *buffer_info;
- unsigned int i;
- u32 cmd_length = 0;
- u16 ipcse = 0, tucse, mss;
- u8 ipcss, ipcso, tucss, tucso, hdr_len;
-
- if (!skb_is_gso(skb))
- return 0;
-
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-
- if (err)
- return err;
- }
-
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
- cmd_length = E1000_TXD_CMD_IP;
- ipcse = skb_transport_offset(skb) - 1;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- ipcse = 0;
- }
- ipcss = skb_network_offset(skb);
- ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
- tucss = skb_transport_offset(skb);
- tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
- tucse = 0;
-
- cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
- E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
-
- i = tx_ring->next_to_use;
- context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
-
- context_desc->lower_setup.ip_fields.ipcss = ipcss;
- context_desc->lower_setup.ip_fields.ipcso = ipcso;
- context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
- context_desc->upper_setup.tcp_fields.tucss = tucss;
- context_desc->upper_setup.tcp_fields.tucso = tucso;
- context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
- context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
- context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
- context_desc->cmd_and_length = cpu_to_le32(cmd_length);
-
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
-
- return 1;
-}
-
-static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
-{
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_context_desc *context_desc;
- struct e1000_buffer *buffer_info;
- unsigned int i;
- u8 css;
- u32 cmd_len = E1000_TXD_CMD_DEXT;
- __be16 protocol;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
- switch (protocol) {
- case cpu_to_be16(ETH_P_IP):
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- cmd_len |= E1000_TXD_CMD_TCP;
- break;
- case cpu_to_be16(ETH_P_IPV6):
- /* XXX not handling all IPV6 headers */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- cmd_len |= E1000_TXD_CMD_TCP;
- break;
- default:
- if (unlikely(net_ratelimit()))
- e_warn("checksum_partial proto=%x!\n",
- be16_to_cpu(protocol));
- break;
- }
-
- css = skb_checksum_start_offset(skb);
-
- i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
-
- context_desc->lower_setup.ip_config = 0;
- context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso =
- css + skb->csum_offset;
- context_desc->upper_setup.tcp_fields.tucse = 0;
- context_desc->tcp_seg_setup.data = 0;
- context_desc->cmd_and_length = cpu_to_le32(cmd_len);
-
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
-
- return 1;
-}
-
-#define E1000_MAX_PER_TXD 8192
-#define E1000_MAX_TXD_PWR 12
-
-static int e1000_tx_map(struct e1000_adapter *adapter,
- struct sk_buff *skb, unsigned int first,
- unsigned int max_per_txd, unsigned int nr_frags,
- unsigned int mss)
-{
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_buffer *buffer_info;
- unsigned int len = skb_headlen(skb);
- unsigned int offset = 0, size, count = 0, i;
- unsigned int f, bytecount, segs;
-
- i = tx_ring->next_to_use;
-
- while (len) {
- buffer_info = &tx_ring->buffer_info[i];
- size = min(len, max_per_txd);
-
- buffer_info->length = size;
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- buffer_info->mapped_as_page = false;
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
-
- len -= size;
- offset += size;
- count++;
-
- if (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- }
-
- for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
- offset = frag->page_offset;
-
- while (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- buffer_info = &tx_ring->buffer_info[i];
- size = min(len, max_per_txd);
-
- buffer_info->length = size;
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
- offset, size,
- DMA_TO_DEVICE);
- buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
-
- len -= size;
- offset += size;
- count++;
- }
- }
-
- segs = skb_shinfo(skb)->gso_segs ? : 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
-
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].segs = segs;
- tx_ring->buffer_info[i].bytecount = bytecount;
- tx_ring->buffer_info[first].next_to_watch = i;
-
- return count;
-
-dma_error:
- dev_err(&pdev->dev, "Tx DMA map failed\n");
- buffer_info->dma = 0;
- if (count)
- count--;
-
- while (count--) {
- if (i == 0)
- i += tx_ring->count;
- i--;
- buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);
- }
-
- return 0;
-}
-
-static void e1000_tx_queue(struct e1000_adapter *adapter,
- int tx_flags, int count)
-{
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_tx_desc *tx_desc = NULL;
- struct e1000_buffer *buffer_info;
- u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
- unsigned int i;
-
- if (tx_flags & E1000_TX_FLAGS_TSO) {
- txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
- txd_upper |= E1000_TXD_POPTS_TXSM << 8;
-
- if (tx_flags & E1000_TX_FLAGS_IPV4)
- txd_upper |= E1000_TXD_POPTS_IXSM << 8;
- }
-
- if (tx_flags & E1000_TX_FLAGS_CSUM) {
- txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
- txd_upper |= E1000_TXD_POPTS_TXSM << 8;
- }
-
- if (tx_flags & E1000_TX_FLAGS_VLAN) {
- txd_lower |= E1000_TXD_CMD_VLE;
- txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
- }
-
- i = tx_ring->next_to_use;
-
- do {
- buffer_info = &tx_ring->buffer_info[i];
- tx_desc = E1000_TX_DESC(*tx_ring, i);
- tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->lower.data =
- cpu_to_le32(txd_lower | buffer_info->length);
- tx_desc->upper.data = cpu_to_le32(txd_upper);
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- } while (--count > 0);
-
- tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
- tx_ring->next_to_use = i;
-
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_tdt_wa(adapter, i);
- else
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
-
- /*
- * we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
-}
-
-#define MINIMUM_DHCP_PACKET_SIZE 282
-static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
- struct sk_buff *skb)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 length, offset;
-
- if (vlan_tx_tag_present(skb)) {
- if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
- (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
- return 0;
- }
-
- if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
- return 0;
-
- if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
- return 0;
-
- {
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
- struct udphdr *udp;
-
- if (ip->protocol != IPPROTO_UDP)
- return 0;
-
- udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
- if (ntohs(udp->dest) != 67)
- return 0;
-
- offset = (u8 *)udp + 8 - skb->data;
- length = skb->len - offset;
- return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
- }
-
- return 0;
-}
-
-static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_stop_queue(netdev);
- /*
- * Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it.
- */
- smp_mb();
-
- /*
- * We need to check again in a case another CPU has just
- * made room available.
- */
- if (e1000_desc_unused(adapter->tx_ring) < size)
- return -EBUSY;
-
- /* A reprieve! */
- netif_start_queue(netdev);
- ++adapter->restart_queue;
- return 0;
-}
-
-static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (e1000_desc_unused(adapter->tx_ring) >= size)
- return 0;
- return __e1000_maybe_stop_tx(netdev, size);
-}
-
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
-static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_ring *tx_ring = adapter->tx_ring;
- unsigned int first;
- unsigned int max_per_txd = E1000_MAX_PER_TXD;
- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
- unsigned int tx_flags = 0;
- unsigned int len = skb_headlen(skb);
- unsigned int nr_frags;
- unsigned int mss;
- int count = 0;
- int tso;
- unsigned int f;
-
- if (test_bit(__E1000_DOWN, &adapter->state)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- mss = skb_shinfo(skb)->gso_size;
- /*
- * The controller does a simple calculation to
- * make sure there is enough room in the FIFO before
- * initiating the DMA for each buffer. The calc is:
- * 4 = ceil(buffer len/mss). To make sure we don't
- * overrun the FIFO, adjust the max buffer len if mss
- * drops.
- */
- if (mss) {
- u8 hdr_len;
- max_per_txd = min(mss << 2, max_per_txd);
- max_txd_pwr = fls(max_per_txd) - 1;
-
- /*
- * TSO Workaround for 82571/2/3 Controllers -- if skb->data
- * points to just header, pull a few bytes of payload from
- * frags into skb->data
- */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- /*
- * we do this workaround for ES2LAN, but it is un-necessary,
- * avoiding it could save a lot of cycles
- */
- if (skb->data_len && (hdr_len == len)) {
- unsigned int pull_size;
-
- pull_size = min((unsigned int)4, skb->data_len);
- if (!__pskb_pull_tail(skb, pull_size)) {
- e_err("__pskb_pull_tail failed.\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- len = skb_headlen(skb);
- }
- }
-
- /* reserve a descriptor for the offload context */
- if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
- count++;
- count++;
-
- count += TXD_USE_COUNT(len, max_txd_pwr);
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
- max_txd_pwr);
-
- if (adapter->hw.mac.tx_pkt_filtering)
- e1000_transfer_dhcp_info(adapter, skb);
-
- /*
- * need: count + 2 desc gap to keep tail from touching
- * head, otherwise try next time
- */
- if (e1000_maybe_stop_tx(netdev, count + 2))
- return NETDEV_TX_BUSY;
-
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= E1000_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
- }
-
- first = tx_ring->next_to_use;
-
- tso = e1000_tso(adapter, skb);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (tso)
- tx_flags |= E1000_TX_FLAGS_TSO;
- else if (e1000_tx_csum(adapter, skb))
- tx_flags |= E1000_TX_FLAGS_CSUM;
-
- /*
- * Old method was to assume IPv4 packet by default if TSO was enabled.
- * 82571 hardware supports TSO capabilities for IPv6 as well...
- * no longer assume, we must.
- */
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= E1000_TX_FLAGS_IPV4;
-
- /* if count is 0 then mapping error has occurred */
- count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
- if (count) {
- e1000_tx_queue(adapter, tx_flags, count);
- /* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
-
- } else {
- dev_kfree_skb_any(skb);
- tx_ring->buffer_info[first].time_stamp = 0;
- tx_ring->next_to_use = first;
- }
-
- return NETDEV_TX_OK;
-}
-
-/**
- * e1000_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- **/
-static void e1000_tx_timeout(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- /* Do the reset outside of interrupt context */
- adapter->tx_timeout_count++;
- schedule_work(&adapter->reset_task);
-}
-
-static void e1000_reset_task(struct work_struct *work)
-{
- struct e1000_adapter *adapter;
- adapter = container_of(work, struct e1000_adapter, reset_task);
-
- /* don't run the task if already down */
- if (test_bit(__E1000_DOWN, &adapter->state))
- return;
-
- if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW))) {
- e1000e_dump(adapter);
- e_err("Reset adapter\n");
- }
- e1000e_reinit_locked(adapter);
-}
-
-/**
- * e1000_get_stats64 - Get System Network Statistics
- * @netdev: network interface device structure
- * @stats: rtnl_link_stats64 pointer
- *
- * Returns the address of the device statistics structure.
- **/
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
- spin_lock(&adapter->stats64_lock);
- e1000e_update_stats(adapter);
- /* Fill out the OS statistics structure */
- stats->rx_bytes = adapter->stats.gorc;
- stats->rx_packets = adapter->stats.gprc;
- stats->tx_bytes = adapter->stats.gotc;
- stats->tx_packets = adapter->stats.gptc;
- stats->multicast = adapter->stats.mprc;
- stats->collisions = adapter->stats.colc;
-
- /* Rx Errors */
-
- /*
- * RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC
- */
- stats->rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
- stats->rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
- stats->rx_crc_errors = adapter->stats.crcerrs;
- stats->rx_frame_errors = adapter->stats.algnerrc;
- stats->rx_missed_errors = adapter->stats.mpc;
-
- /* Tx Errors */
- stats->tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
- stats->tx_aborted_errors = adapter->stats.ecol;
- stats->tx_window_errors = adapter->stats.latecol;
- stats->tx_carrier_errors = adapter->stats.tncrs;
-
- /* Tx Dropped needs to be maintained elsewhere */
-
- spin_unlock(&adapter->stats64_lock);
- return stats;
-}
-
-/**
- * e1000_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- **/
-static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- /* Jumbo frame support */
- if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
- !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
-
- /* Supported frame sizes */
- if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
- (max_frame > adapter->max_hw_frame_size)) {
- e_err("Unsupported MTU setting\n");
- return -EINVAL;
- }
-
- /* Jumbo frame workaround on 82579 requires CRC be stripped */
- if ((adapter->hw.mac.type == e1000_pch2lan) &&
- !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
- (new_mtu > ETH_DATA_LEN)) {
- e_err("Jumbo Frames not supported on 82579 when CRC "
- "stripping is disabled.\n");
- return -EINVAL;
- }
-
- /* 82573 Errata 17 */
- if (((adapter->hw.mac.type == e1000_82573) ||
- (adapter->hw.mac.type == e1000_82574)) &&
- (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
- adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
- }
-
- while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
- /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
- adapter->max_frame_size = max_frame;
- e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
- if (netif_running(netdev))
- e1000e_down(adapter);
-
- /*
- * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
- * means we reserve 2 more, this pushes us to allocate from the next
- * larger slab size.
- * i.e. RXBUFFER_2048 --> size-4096 slab
- * However with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs
- */
-
- if (max_frame <= 2048)
- adapter->rx_buffer_len = 2048;
- else
- adapter->rx_buffer_len = 4096;
-
- /* adjust allocation if LPE protects us, and we aren't using SBP */
- if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
- + ETH_FCS_LEN;
-
- if (netif_running(netdev))
- e1000e_up(adapter);
- else
- e1000e_reset(adapter);
-
- clear_bit(__E1000_RESETTING, &adapter->state);
-
- return 0;
-}
-
-static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- if (adapter->hw.phy.media_type != e1000_media_type_copper)
- return -EOPNOTSUPP;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = adapter->hw.phy.addr;
- break;
- case SIOCGMIIREG:
- e1000_phy_read_status(adapter);
-
- switch (data->reg_num & 0x1F) {
- case MII_BMCR:
- data->val_out = adapter->phy_regs.bmcr;
- break;
- case MII_BMSR:
- data->val_out = adapter->phy_regs.bmsr;
- break;
- case MII_PHYSID1:
- data->val_out = (adapter->hw.phy.id >> 16);
- break;
- case MII_PHYSID2:
- data->val_out = (adapter->hw.phy.id & 0xFFFF);
- break;
- case MII_ADVERTISE:
- data->val_out = adapter->phy_regs.advertise;
- break;
- case MII_LPA:
- data->val_out = adapter->phy_regs.lpa;
- break;
- case MII_EXPANSION:
- data->val_out = adapter->phy_regs.expansion;
- break;
- case MII_CTRL1000:
- data->val_out = adapter->phy_regs.ctrl1000;
- break;
- case MII_STAT1000:
- data->val_out = adapter->phy_regs.stat1000;
- break;
- case MII_ESTATUS:
- data->val_out = adapter->phy_regs.estatus;
- break;
- default:
- return -EIO;
- }
- break;
- case SIOCSMIIREG:
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return e1000_mii_ioctl(netdev, ifr, cmd);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 i, mac_reg;
- u16 phy_reg, wuc_enable;
- int retval = 0;
-
- /* copy MAC RARs to PHY RARs */
- e1000_copy_rx_addrs_to_phy_ich8lan(hw);
-
- retval = hw->phy.ops.acquire(hw);
- if (retval) {
- e_err("Could not acquire PHY\n");
- return retval;
- }
-
- /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
- retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
- if (retval)
- goto out;
-
- /* copy MAC MTA to PHY MTA - only needed for pchlan */
- for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
- mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
- hw->phy.ops.write_reg_page(hw, BM_MTA(i),
- (u16)(mac_reg & 0xFFFF));
- hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
- (u16)((mac_reg >> 16) & 0xFFFF));
- }
-
- /* configure PHY Rx Control register */
- hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
- mac_reg = er32(RCTL);
- if (mac_reg & E1000_RCTL_UPE)
- phy_reg |= BM_RCTL_UPE;
- if (mac_reg & E1000_RCTL_MPE)
- phy_reg |= BM_RCTL_MPE;
- phy_reg &= ~(BM_RCTL_MO_MASK);
- if (mac_reg & E1000_RCTL_MO_3)
- phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
- << BM_RCTL_MO_SHIFT);
- if (mac_reg & E1000_RCTL_BAM)
- phy_reg |= BM_RCTL_BAM;
- if (mac_reg & E1000_RCTL_PMCF)
- phy_reg |= BM_RCTL_PMCF;
- mac_reg = er32(CTRL);
- if (mac_reg & E1000_CTRL_RFCE)
- phy_reg |= BM_RCTL_RFCE;
- hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
-
- /* enable PHY wakeup in MAC register */
- ew32(WUFC, wufc);
- ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
-
- /* configure and enable PHY wakeup in PHY registers */
- hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
- hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
-
- /* activate PHY wakeup */
- wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
- retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
- if (retval)
- e_err("Could not set PHY Host Wakeup bit\n");
-out:
- hw->phy.ops.release(hw);
-
- return retval;
-}
-
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
- bool runtime)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
- int retval = 0;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev)) {
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
- e1000_free_irq(adapter);
- }
- e1000e_reset_interrupt_capability(adapter);
-
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- status = er32(STATUS);
- if (status & E1000_STATUS_LU)
- wufc &= ~E1000_WUFC_LNKC;
-
- if (wufc) {
- e1000_setup_rctl(adapter);
- e1000_set_multi(netdev);
-
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & E1000_WUFC_MC) {
- rctl = er32(RCTL);
- rctl |= E1000_RCTL_MPE;
- ew32(RCTL, rctl);
- }
-
- ctrl = er32(CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
- ctrl |= E1000_CTRL_ADVD3WUC;
- if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
- ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
- ew32(CTRL, ctrl);
-
- if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
- adapter->hw.phy.media_type ==
- e1000_media_type_internal_serdes) {
- /* keep the laser running in D3 */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
- ew32(CTRL_EXT, ctrl_ext);
- }
-
- if (adapter->flags & FLAG_IS_ICH)
- e1000_suspend_workarounds_ich8lan(&adapter->hw);
-
- /* Allow time for pending master requests to run */
- e1000e_disable_pcie_master(&adapter->hw);
-
- if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
- /* enable wakeup by the PHY */
- retval = e1000_init_phy_wakeup(adapter, wufc);
- if (retval)
- return retval;
- } else {
- /* enable wakeup by the MAC */
- ew32(WUFC, wufc);
- ew32(WUC, E1000_WUC_PME_EN);
- }
- } else {
- ew32(WUC, 0);
- ew32(WUFC, 0);
- }
-
- *enable_wake = !!wufc;
-
- /* make sure adapter isn't asleep if manageability is enabled */
- if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
- (hw->mac.ops.check_mng_mode(hw)))
- *enable_wake = true;
-
- if (adapter->hw.phy.type == e1000_phy_igp_3)
- e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
-
- /*
- * Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant.
- */
- e1000e_release_hw_control(adapter);
-
- pci_disable_device(pdev);
-
- return 0;
-}
-
-static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
-{
- if (sleep && wake) {
- pci_prepare_to_sleep(pdev);
- return;
- }
-
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
- bool wake)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- /*
- * The pci-e switch on some quad port adapters will report a
- * correctable error when the MAC transitions from D0 to D3. To
- * prevent this we need to mask off the correctable errors on the
- * downstream port of the pci-e switch.
- */
- if (adapter->flags & FLAG_IS_QUAD_PORT) {
- struct pci_dev *us_dev = pdev->bus->self;
- int pos = pci_pcie_cap(us_dev);
- u16 devctl;
-
- pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
- (devctl & ~PCI_EXP_DEVCTL_CERE));
-
- e1000_power_off(pdev, sleep, wake);
-
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
- } else {
- e1000_power_off(pdev, sleep, wake);
- }
-}
-
-#ifdef CONFIG_PCIEASPM
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- pci_disable_link_state_locked(pdev, state);
-}
-#else
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- int pos;
- u16 reg16;
-
- /*
- * Both device and parent should have the same ASPM setting.
- * Disable ASPM in downstream component first and then upstream.
- */
- pos = pci_pcie_cap(pdev);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 &= ~state;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-
- if (!pdev->bus->self)
- return;
-
- pos = pci_pcie_cap(pdev->bus->self);
- pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 &= ~state;
- pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
-}
-#endif
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
- (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
- (state & PCIE_LINK_STATE_L1) ? "L1" : "");
-
- __e1000e_disable_aspm(pdev, state);
-}
-
-#ifdef CONFIG_PM
-static bool e1000e_pm_ready(struct e1000_adapter *adapter)
-{
- return !!adapter->tx_ring->buffer_info;
-}
-
-static int __e1000_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u16 aspm_disable_flag = 0;
- u32 err;
-
- if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
- aspm_disable_flag = PCIE_LINK_STATE_L0S;
- if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
- aspm_disable_flag |= PCIE_LINK_STATE_L1;
- if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
-
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- err = e1000_request_irq(adapter);
- if (err)
- return err;
- }
-
- if (hw->mac.type == e1000_pch2lan)
- e1000_resume_workarounds_pchlan(&adapter->hw);
-
- e1000e_power_up_phy(adapter);
-
- /* report the system wakeup cause from S3/S4 */
- if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
- u16 phy_data;
-
- e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
- if (phy_data) {
- e_info("PHY Wakeup cause - %s\n",
- phy_data & E1000_WUS_EX ? "Unicast Packet" :
- phy_data & E1000_WUS_MC ? "Multicast Packet" :
- phy_data & E1000_WUS_BC ? "Broadcast Packet" :
- phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ? "Link Status "
- " Change" : "other");
- }
- e1e_wphy(&adapter->hw, BM_WUS, ~0);
- } else {
- u32 wus = er32(WUS);
- if (wus) {
- e_info("MAC Wakeup cause - %s\n",
- wus & E1000_WUS_EX ? "Unicast Packet" :
- wus & E1000_WUS_MC ? "Multicast Packet" :
- wus & E1000_WUS_BC ? "Broadcast Packet" :
- wus & E1000_WUS_MAG ? "Magic Packet" :
- wus & E1000_WUS_LNKC ? "Link Status Change" :
- "other");
- }
- ew32(WUS, ~0);
- }
-
- e1000e_reset(adapter);
-
- e1000_init_manageability_pt(adapter);
-
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
-
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
- * is up. For all other cases, let the f/w know that the h/w is now
- * under the control of the driver.
- */
- if (!(adapter->flags & FLAG_HAS_AMT))
- e1000e_get_hw_control(adapter);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int e1000_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake, false);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
-
- return retval;
-}
-
-static int e1000_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (e1000e_pm_ready(adapter))
- adapter->idle_check = true;
-
- return __e1000_resume(pdev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM_RUNTIME
-static int e1000_runtime_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (e1000e_pm_ready(adapter)) {
- bool wake;
-
- __e1000_shutdown(pdev, &wake, true);
- }
-
- return 0;
-}
-
-static int e1000_idle(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (!e1000e_pm_ready(adapter))
- return 0;
-
- if (adapter->idle_check) {
- adapter->idle_check = false;
- if (!e1000e_has_link(adapter))
- pm_schedule_suspend(dev, MSEC_PER_SEC);
- }
-
- return -EBUSY;
-}
-
-static int e1000_runtime_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (!e1000e_pm_ready(adapter))
- return 0;
-
- adapter->idle_check = !dev->power.runtime_auto;
- return __e1000_resume(pdev);
-}
-#endif /* CONFIG_PM_RUNTIME */
-#endif /* CONFIG_PM */
-
-static void e1000_shutdown(struct pci_dev *pdev)
-{
- bool wake = false;
-
- __e1000_shutdown(pdev, &wake, false);
-
- if (system_state == SYSTEM_POWER_OFF)
- e1000_complete_shutdown(pdev, false, wake);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-static irqreturn_t e1000_intr_msix(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (adapter->msix_entries) {
- int vector, msix_irq;
-
- vector = 0;
- msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_intr_msix_rx(msix_irq, netdev);
- enable_irq(msix_irq);
-
- vector++;
- msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_intr_msix_tx(msix_irq, netdev);
- enable_irq(msix_irq);
-
- vector++;
- msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_msix_other(msix_irq, netdev);
- enable_irq(msix_irq);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void e1000_netpoll(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- switch (adapter->int_mode) {
- case E1000E_INT_MODE_MSIX:
- e1000_intr_msix(adapter->pdev->irq, netdev);
- break;
- case E1000E_INT_MODE_MSI:
- disable_irq(adapter->pdev->irq);
- e1000_intr_msi(adapter->pdev->irq, netdev);
- enable_irq(adapter->pdev->irq);
- break;
- default: /* E1000E_INT_MODE_LEGACY */
- disable_irq(adapter->pdev->irq);
- e1000_intr(adapter->pdev->irq, netdev);
- enable_irq(adapter->pdev->irq);
- break;
- }
-}
-#endif
-
-/**
- * e1000_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev))
- e1000e_down(adapter);
- pci_disable_device(pdev);
-
- /* Request a slot slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * e1000_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the e1000_resume routine.
- */
-static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u16 aspm_disable_flag = 0;
- int err;
- pci_ers_result_t result;
-
- if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
- aspm_disable_flag = PCIE_LINK_STATE_L0S;
- if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
- aspm_disable_flag |= PCIE_LINK_STATE_L1;
- if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset.\n");
- result = PCI_ERS_RESULT_DISCONNECT;
- } else {
- pci_set_master(pdev);
- pdev->state_saved = true;
- pci_restore_state(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- e1000e_reset(adapter);
- ew32(WUS, ~0);
- result = PCI_ERS_RESULT_RECOVERED;
- }
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- return result;
-}
-
-/**
- * e1000_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation. Implementation resembles the
- * second-half of the e1000_resume routine.
- */
-static void e1000_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- e1000_init_manageability_pt(adapter);
-
- if (netif_running(netdev)) {
- if (e1000e_up(adapter)) {
- dev_err(&pdev->dev,
- "can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
- * is up. For all other cases, let the f/w know that the h/w is now
- * under the control of the driver.
- */
- if (!(adapter->flags & FLAG_HAS_AMT))
- e1000e_get_hw_control(adapter);
-
-}
-
-static void e1000_print_device_info(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- u32 ret_val;
- u8 pba_str[E1000_PBANUM_LENGTH];
-
- /* print bus type/speed/width info */
- e_info("(PCI Express:2.5GT/s:%s) %pM\n",
- /* bus width */
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- "Width x1"),
- /* MAC address */
- netdev->dev_addr);
- e_info("Intel(R) PRO/%s Network Connection\n",
- (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
- ret_val = e1000_read_pba_string_generic(hw, pba_str,
- E1000_PBANUM_LENGTH);
- if (ret_val)
- strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
- e_info("MAC: %d, PHY: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, pba_str);
-}
-
-static void e1000_eeprom_checks(struct e1000_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int ret_val;
- u16 buf = 0;
-
- if (hw->mac.type != e1000_82573)
- return;
-
- ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
- if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
- /* Deep Smart Power Down (DSPD) */
- dev_warn(&adapter->pdev->dev,
- "Warning: detected DSPD enabled in EEPROM\n");
- }
-}
-
-static const struct net_device_ops e1000e_netdev_ops = {
- .ndo_open = e1000_open,
- .ndo_stop = e1000_close,
- .ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats64 = e1000e_get_stats64,
- .ndo_set_multicast_list = e1000_set_multi,
- .ndo_set_mac_address = e1000_set_mac,
- .ndo_change_mtu = e1000_change_mtu,
- .ndo_do_ioctl = e1000_ioctl,
- .ndo_tx_timeout = e1000_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
-
- .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = e1000_netpoll,
-#endif
-};
-
-/**
- * e1000_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in e1000_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * e1000_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-static int __devinit e1000_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct net_device *netdev;
- struct e1000_adapter *adapter;
- struct e1000_hw *hw;
- const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
- resource_size_t mmio_start, mmio_len;
- resource_size_t flash_start, flash_len;
-
- static int cards_found;
- u16 aspm_disable_flag = 0;
- int i, err, pci_using_dac;
- u16 eeprom_data = 0;
- u16 eeprom_apme_mask = E1000_EEPROM_APME;
-
- if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
- aspm_disable_flag = PCIE_LINK_STATE_L0S;
- if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
- aspm_disable_flag |= PCIE_LINK_STATE_L1;
- if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
-
- err = pci_enable_device_mem(pdev);
- if (err)
- return err;
-
- pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
- }
- }
-
- err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
- if (err)
- goto err_pci_reg;
-
- /* AER (Advanced Error Reporting) hooks */
- pci_enable_pcie_error_reporting(pdev);
-
- pci_set_master(pdev);
- /* PCI config space info */
- err = pci_save_state(pdev);
- if (err)
- goto err_alloc_etherdev;
-
- err = -ENOMEM;
- netdev = alloc_etherdev(sizeof(struct e1000_adapter));
- if (!netdev)
- goto err_alloc_etherdev;
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- netdev->irq = pdev->irq;
-
- pci_set_drvdata(pdev, netdev);
- adapter = netdev_priv(netdev);
- hw = &adapter->hw;
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- adapter->ei = ei;
- adapter->pba = ei->pba;
- adapter->flags = ei->flags;
- adapter->flags2 = ei->flags2;
- adapter->hw.adapter = adapter;
- adapter->hw.mac.type = ei->mac;
- adapter->max_hw_frame_size = ei->max_hw_frame_size;
- adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
-
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- err = -EIO;
- adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
- if (!adapter->hw.hw_addr)
- goto err_ioremap;
-
- if ((adapter->flags & FLAG_HAS_FLASH) &&
- (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- flash_start = pci_resource_start(pdev, 1);
- flash_len = pci_resource_len(pdev, 1);
- adapter->hw.flash_address = ioremap(flash_start, flash_len);
- if (!adapter->hw.flash_address)
- goto err_flashmap;
- }
-
- /* construct the net_device struct */
- netdev->netdev_ops = &e1000e_netdev_ops;
- e1000e_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
-
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
-
- adapter->bd_number = cards_found++;
-
- e1000e_check_options(adapter);
-
- /* setup adapter struct */
- err = e1000_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
-
- err = ei->get_variants(adapter);
- if (err)
- goto err_hw_init;
-
- if ((adapter->flags & FLAG_IS_ICH) &&
- (adapter->flags & FLAG_READ_ONLY_NVM))
- e1000e_write_protect_nvm_ich8lan(&adapter->hw);
-
- hw->mac.ops.get_bus_info(&adapter->hw);
-
- adapter->hw.phy.autoneg_wait_to_complete = 0;
-
- /* Copper options */
- if (adapter->hw.phy.media_type == e1000_media_type_copper) {
- adapter->hw.phy.mdix = AUTO_ALL_MODES;
- adapter->hw.phy.disable_polarity_correction = 0;
- adapter->hw.phy.ms_type = e1000_ms_hw_default;
- }
-
- if (e1000_check_reset_block(&adapter->hw))
- e_info("PHY reset is blocked due to SOL/IDER session.\n");
-
- netdev->features = NETIF_F_SG |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX;
-
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
- netdev->features |= NETIF_F_HW_VLAN_FILTER;
-
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
-
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
-
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
-
- if (e1000e_enable_mng_pass_thru(&adapter->hw))
- adapter->flags |= FLAG_MNG_PT_ENABLED;
-
- /*
- * before reading the NVM, reset the controller to
- * put the device in a known good starting state
- */
- adapter->hw.mac.ops.reset_hw(&adapter->hw);
-
- /*
- * systems with ASPM and others may see the checksum fail on the first
- * attempt. Let's give it a few tries
- */
- for (i = 0;; i++) {
- if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
- break;
- if (i == 2) {
- e_err("The NVM Checksum Is Not Valid\n");
- err = -EIO;
- goto err_eeprom;
- }
- }
-
- e1000_eeprom_checks(adapter);
-
- /* copy the MAC address */
- if (e1000e_read_mac_addr(&adapter->hw))
- e_err("NVM Read Error while reading MAC address\n");
-
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
- e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
- err = -EIO;
- goto err_eeprom;
- }
-
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = e1000_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
-
- init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long) adapter;
-
- INIT_WORK(&adapter->reset_task, e1000_reset_task);
- INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
- INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
- INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
- INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
-
- /* Initialize link parameters. User can change them with ethtool */
- adapter->hw.mac.autoneg = 1;
- adapter->fc_autoneg = 1;
- adapter->hw.fc.requested_mode = e1000_fc_default;
- adapter->hw.fc.current_mode = e1000_fc_default;
- adapter->hw.phy.autoneg_advertised = 0x2f;
-
- /* ring size defaults */
- adapter->rx_ring->count = 256;
- adapter->tx_ring->count = 256;
-
- /*
- * Initial Wake on LAN setting - If APM wake is enabled in
- * the EEPROM, enable the ACPI Magic Packet filter
- */
- if (adapter->flags & FLAG_APME_IN_WUC) {
- /* APME bit in EEPROM is mapped to WUC.APME */
- eeprom_data = er32(WUC);
- eeprom_apme_mask = E1000_WUC_APME;
- if ((hw->mac.type > e1000_ich10lan) &&
- (eeprom_data & E1000_WUC_PHY_WAKE))
- adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
- } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
- if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
- (adapter->hw.bus.func == 1))
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
- else
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- }
-
- /* fetch WoL from EEPROM */
- if (eeprom_data & eeprom_apme_mask)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
-
- /*
- * now that we have the eeprom settings, apply the special cases
- * where the eeprom may be wrong or the board simply won't support
- * wake on lan on a particular port
- */
- if (!(adapter->flags & FLAG_HAS_WOL))
- adapter->eeprom_wol = 0;
-
- /* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
- /* save off EEPROM version number */
- e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
-
- /* reset the hardware with the new settings */
- e1000e_reset(adapter);
-
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
- * is up. For all other cases, let the f/w know that the h/w is now
- * under the control of the driver.
- */
- if (!(adapter->flags & FLAG_HAS_AMT))
- e1000e_get_hw_control(adapter);
-
- strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
- err = register_netdev(netdev);
- if (err)
- goto err_register;
-
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
- e1000_print_device_info(adapter);
-
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
-
-err_register:
- if (!(adapter->flags & FLAG_HAS_AMT))
- e1000e_release_hw_control(adapter);
-err_eeprom:
- if (!e1000_check_reset_block(&adapter->hw))
- e1000_phy_hw_reset(&adapter->hw);
-err_hw_init:
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-err_sw_init:
- if (adapter->hw.flash_address)
- iounmap(adapter->hw.flash_address);
- e1000e_reset_interrupt_capability(adapter);
-err_flashmap:
- iounmap(adapter->hw.hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
-err_pci_reg:
-err_dma:
- pci_disable_device(pdev);
- return err;
-}
-
-/**
- * e1000_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * e1000_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-static void __devexit e1000_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
- bool down = test_bit(__E1000_DOWN, &adapter->state);
-
- /*
- * The timers may be rescheduled, so explicitly disable them
- * from being rescheduled.
- */
- if (!down)
- set_bit(__E1000_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
-
- cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->watchdog_task);
- cancel_work_sync(&adapter->downshift_task);
- cancel_work_sync(&adapter->update_phy_task);
- cancel_work_sync(&adapter->print_hang_task);
-
- if (!(netdev->flags & IFF_UP))
- e1000_power_down_phy(adapter);
-
- /* Don't lie to e1000_close() down the road. */
- if (!down)
- clear_bit(__E1000_DOWN, &adapter->state);
- unregister_netdev(netdev);
-
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
-
- /*
- * Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant.
- */
- e1000e_release_hw_control(adapter);
-
- e1000e_reset_interrupt_capability(adapter);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
- iounmap(adapter->hw.hw_addr);
- if (adapter->hw.flash_address)
- iounmap(adapter->hw.flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
-
- free_netdev(netdev);
-
- /* AER disable */
- pci_disable_pcie_error_reporting(pdev);
-
- pci_disable_device(pdev);
-}
-
-/* PCI Error Recovery (ERS) */
-static struct pci_error_handlers e1000_err_handler = {
- .error_detected = e1000_io_error_detected,
- .slot_reset = e1000_io_slot_reset,
- .resume = e1000_io_resume,
-};
-
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
- board_80003es2lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
- board_80003es2lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
- board_80003es2lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
- board_80003es2lan },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
-
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
-
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-
-#ifdef CONFIG_PM
-static const struct dev_pm_ops e1000_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
- e1000_runtime_resume, e1000_idle)
-};
-#endif
-
-/* PCI Device API Driver */
-static struct pci_driver e1000_driver = {
- .name = e1000e_driver_name,
- .id_table = e1000_pci_tbl,
- .probe = e1000_probe,
- .remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
- .driver.pm = &e1000_pm_ops,
-#endif
- .shutdown = e1000_shutdown,
- .err_handler = &e1000_err_handler
-};
-
-/**
- * e1000_init_module - Driver Registration Routine
- *
- * e1000_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- **/
-static int __init e1000_init_module(void)
-{
- int ret;
- pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
- ret = pci_register_driver(&e1000_driver);
-
- return ret;
-}
-module_init(e1000_init_module);
-
-/**
- * e1000_exit_module - Driver Exit Cleanup Routine
- *
- * e1000_exit_module is called just before the driver is removed
- * from memory.
- **/
-static void __exit e1000_exit_module(void)
-{
- pci_unregister_driver(&e1000_driver);
-}
-module_exit(e1000_exit_module);
-
-
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-/* e1000_main.c */
diff --git a/drivers/net/enic/enic_pp.c b/drivers/net/enic/enic_pp.c
deleted file mode 100644
index ffaa75dd1de..00000000000
--- a/drivers/net/enic/enic_pp.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/rtnetlink.h>
-#include <net/ip.h>
-
-#include "vnic_vic.h"
-#include "enic_res.h"
-#include "enic.h"
-#include "enic_dev.h"
-
-static int enic_set_port_profile(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct vic_provinfo *vp;
- const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
- const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
- char uuid_str[38];
- char client_mac_str[18];
- u8 *client_mac;
- int err;
-
- if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
- return -EINVAL;
-
- vp = vic_provinfo_alloc(GFP_KERNEL, oui,
- VIC_PROVINFO_GENERIC_TYPE);
- if (!vp)
- return -ENOMEM;
-
- VIC_PROVINFO_ADD_TLV(vp,
- VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
- strlen(enic->pp.name) + 1, enic->pp.name);
-
- if (!is_zero_ether_addr(enic->pp.mac_addr))
- client_mac = enic->pp.mac_addr;
- else
- client_mac = netdev->dev_addr;
-
- VIC_PROVINFO_ADD_TLV(vp,
- VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
- ETH_ALEN, client_mac);
-
- snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
- VIC_PROVINFO_ADD_TLV(vp,
- VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
- sizeof(client_mac_str), client_mac_str);
-
- if (enic->pp.set & ENIC_SET_INSTANCE) {
- sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
- VIC_PROVINFO_ADD_TLV(vp,
- VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
- sizeof(uuid_str), uuid_str);
- }
-
- if (enic->pp.set & ENIC_SET_HOST) {
- sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
- VIC_PROVINFO_ADD_TLV(vp,
- VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
- sizeof(uuid_str), uuid_str);
- }
-
- VIC_PROVINFO_ADD_TLV(vp,
- VIC_GENERIC_PROV_TLV_OS_TYPE,
- sizeof(os_type), &os_type);
-
- err = enic_dev_status_to_errno(enic_dev_init_prov2(enic, vp));
-
-add_tlv_failure:
- vic_provinfo_free(vp);
-
- return err;
-}
-
-static int enic_unset_port_profile(struct enic *enic)
-{
- int err;
-
- err = enic_vnic_dev_deinit(enic);
- if (err)
- return enic_dev_status_to_errno(err);
-
- enic_reset_addr_lists(enic);
-
- return 0;
-}
-
-static int enic_are_pp_different(struct enic_port_profile *pp1,
- struct enic_port_profile *pp2)
-{
- return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
- pp2->instance_uuid, PORT_UUID_MAX) |
- !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
- !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
-}
-
-static int enic_pp_preassociate(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp);
-static int enic_pp_disassociate(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp);
-static int enic_pp_preassociate_rr(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp);
-static int enic_pp_associate(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp);
-
-static int (*enic_pp_handlers[])(struct enic *enic,
- struct enic_port_profile *prev_state, int *restore_pp) = {
- [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
- [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
- [PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
- [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
-};
-
-static const int enic_pp_handlers_count =
- sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
-
-static int enic_pp_preassociate(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp)
-{
- return -EOPNOTSUPP;
-}
-
-static int enic_pp_disassociate(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp)
-{
- return enic_unset_port_profile(enic);
-}
-
-static int enic_pp_preassociate_rr(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp)
-{
- int err;
- int active = 0;
-
- if (enic->pp.request != PORT_REQUEST_ASSOCIATE) {
- /* If pre-associate is not part of an associate.
- We always disassociate first */
- err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic,
- prev_pp, restore_pp);
- if (err)
- return err;
-
- *restore_pp = 0;
- }
-
- *restore_pp = 0;
-
- err = enic_set_port_profile(enic);
- if (err)
- return err;
-
- /* If pre-associate is not part of an associate. */
- if (enic->pp.request != PORT_REQUEST_ASSOCIATE)
- err = enic_dev_status_to_errno(enic_dev_enable2(enic, active));
-
- return err;
-}
-
-static int enic_pp_associate(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp)
-{
- int err;
- int active = 1;
-
- /* Check if a pre-associate was called before */
- if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
- (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
- enic_are_pp_different(prev_pp, &enic->pp))) {
- err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
- enic, prev_pp, restore_pp);
- if (err)
- return err;
-
- *restore_pp = 0;
- }
-
- err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
- enic, prev_pp, restore_pp);
- if (err)
- return err;
-
- *restore_pp = 0;
-
- return enic_dev_status_to_errno(enic_dev_enable2(enic, active));
-}
-
-int enic_process_set_pp_request(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp)
-{
- if (enic->pp.request < enic_pp_handlers_count
- && enic_pp_handlers[enic->pp.request])
- return enic_pp_handlers[enic->pp.request](enic,
- prev_pp, restore_pp);
- else
- return -EOPNOTSUPP;
-}
-
-int enic_process_get_pp_request(struct enic *enic, int request,
- u16 *response)
-{
- int err, status = ERR_SUCCESS;
-
- switch (request) {
-
- case PORT_REQUEST_PREASSOCIATE_RR:
- case PORT_REQUEST_ASSOCIATE:
- err = enic_dev_enable2_done(enic, &status);
- break;
-
- case PORT_REQUEST_DISASSOCIATE:
- err = enic_dev_deinit_done(enic, &status);
- break;
-
- default:
- return -EINVAL;
- }
-
- if (err)
- status = err;
-
- switch (status) {
- case ERR_SUCCESS:
- *response = PORT_PROFILE_RESPONSE_SUCCESS;
- break;
- case ERR_EINVAL:
- *response = PORT_PROFILE_RESPONSE_INVALID;
- break;
- case ERR_EBADSTATE:
- *response = PORT_PROFILE_RESPONSE_BADSTATE;
- break;
- case ERR_ENOMEM:
- *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
- break;
- case ERR_EINPROGRESS:
- *response = PORT_PROFILE_RESPONSE_INPROGRESS;
- break;
- default:
- *response = PORT_PROFILE_RESPONSE_ERROR;
- break;
- }
-
- return 0;
-}
diff --git a/drivers/net/enic/enic_pp.h b/drivers/net/enic/enic_pp.h
deleted file mode 100644
index 699e365a944..00000000000
--- a/drivers/net/enic/enic_pp.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef _ENIC_PP_H_
-#define _ENIC_PP_H_
-
-int enic_process_set_pp_request(struct enic *enic,
- struct enic_port_profile *prev_pp, int *restore_pp);
-int enic_process_get_pp_request(struct enic *enic, int request,
- u16 *response);
-
-#endif /* _ENIC_PP_H_ */
diff --git a/drivers/net/3c501.c b/drivers/net/ethernet/3com/3c501.c
index 5420f6de27d..68da81d476f 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -201,7 +201,7 @@ static const struct net_device_ops el_netdev_ops = {
.ndo_stop = el1_close,
.ndo_start_xmit = el_start_xmit,
.ndo_tx_timeout = el_timeout,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/3c501.h b/drivers/net/ethernet/3com/3c501.h
index 183fd55f03c..183fd55f03c 100644
--- a/drivers/net/3c501.h
+++ b/drivers/net/ethernet/3com/3c501.h
diff --git a/drivers/net/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 44b28b2d700..92053e6fc98 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -545,7 +545,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = el3_close,
.ndo_start_xmit = el3_start_xmit,
.ndo_get_stats = el3_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = el3_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/3c515.c b/drivers/net/ethernet/3com/3c515.c
index d2bb4b254c5..f67a5d3a200 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -569,7 +569,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = corkscrew_start_xmit,
.ndo_tx_timeout = corkscrew_timeout,
.ndo_get_stats = corkscrew_get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 34c5e1cbf65..9c01bc9235b 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -255,7 +255,7 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_tx_timeout = el3_tx_timeout,
.ndo_get_stats = el3_get_stats,
.ndo_do_ioctl = el3_ioctl,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4a1a3580980..972f80ecc51 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -184,7 +184,7 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_tx_timeout = el3_tx_timeout,
.ndo_set_config = el3_config,
.ndo_get_stats = el3_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8cc22568ebd..b42c06baba8 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1055,7 +1055,7 @@ static const struct net_device_ops boomrang_netdev_ops = {
#ifdef CONFIG_PCI
.ndo_do_ioctl = vortex_ioctl,
#endif
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1073,7 +1073,7 @@ static const struct net_device_ops vortex_netdev_ops = {
#ifdef CONFIG_PCI
.ndo_do_ioctl = vortex_ioctl,
#endif
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -2179,14 +2179,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
- cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
- (void*)page_address(frag->page) + frag->page_offset,
- frag->size, PCI_DMA_TODEVICE));
+ cpu_to_le32(pci_map_single(
+ VORTEX_PCI(vp),
+ (void *)skb_frag_address(frag),
+ skb_frag_size(frag), PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
- vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
else
- vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
}
}
#else
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
new file mode 100644
index 00000000000..a8bb30cf512
--- /dev/null
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -0,0 +1,122 @@
+#
+# 3Com Ethernet device configuration
+#
+
+config NET_VENDOR_3COM
+ bool "3Com devices"
+ default y
+ depends on ISA || EISA || MCA || PCI || PCMCIA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about 3Com cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_3COM
+
+config EL1
+ tristate "3c501 \"EtherLink\" support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Also, consider buying a
+ new card, since the 3c501 is slow, broken, and obsolete: you will
+ have problems. Some people suggest to ping ("man ping") a nearby
+ machine every minute ("man cron") when using this card.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c501.
+
+config EL3
+ tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
+ depends on (ISA || EISA || MCA)
+ ---help---
+ If you have a network (Ethernet) card belonging to the 3Com
+ EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>.
+
+ If your card is not working you may need to use the DOS
+ setup disk to disable Plug & Play mode, and to select the default
+ media type.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c509.
+
+config 3C515
+ tristate "3c515 ISA \"Fast EtherLink\""
+ depends on (ISA || EISA) && ISA_DMA_API
+ ---help---
+ If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
+ network card, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c515.
+
+config PCMCIA_3C574
+ tristate "3Com 3c574 PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
+ (PC-card) Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c574_cs. If unsure, say N.
+
+config PCMCIA_3C589
+ tristate "3Com 3c589 PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
+ (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c589_cs. If unsure, say N.
+
+config VORTEX
+ tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
+ depends on (PCI || EISA)
+ select NET_CORE
+ select MII
+ ---help---
+ This option enables driver support for a large number of 10Mbps and
+ 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+
+ "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
+ "Boomerang" (EtherLink XL 3c900 or 3c905) PCI
+ "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus
+ "Tornado" (3c905) PCI
+ "Hurricane" (3c555/3cSOHO) PCI
+
+ If you have such a card, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>. More
+ specific information is in
+ <file:Documentation/networking/vortex.txt> and in the comments at
+ the beginning of <file:drivers/net/3c59x.c>.
+
+ To compile this support as a module, choose M here.
+
+config TYPHOON
+ tristate "3cr990 series \"Typhoon\" support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This option enables driver support for the 3cr990 series of cards:
+
+ 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97,
+ 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server,
+ 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR
+
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called typhoon.
+
+endif # NET_VENDOR_3COM
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile
new file mode 100644
index 00000000000..1e5382a30ea
--- /dev/null
+++ b/drivers/net/ethernet/3com/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the 3Com Ethernet device drivers
+#
+
+obj-$(CONFIG_EL1) += 3c501.o
+obj-$(CONFIG_EL3) += 3c509.o
+obj-$(CONFIG_3C515) += 3c515.o
+obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
+obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o
+obj-$(CONFIG_VORTEX) += 3c59x.o
+obj-$(CONFIG_TYPHOON) += typhoon.o
diff --git a/drivers/net/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 1d5091a1e49..20ea07508ac 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -810,17 +810,16 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
txd->frag.addrHi = 0;
first_txd->numDesc++;
- for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
txd = (struct tx_desc *) (txRing->ringBase +
txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
- len = frag->size;
- frag_addr = (void *) page_address(frag->page) +
- frag->page_offset;
+ len = skb_frag_size(frag);
+ frag_addr = skb_frag_address(frag);
skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
PCI_DMA_TODEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
@@ -1149,13 +1148,9 @@ static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
ering->rx_max_pending = RXENT_ENTRIES;
- ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = 0;
ering->tx_max_pending = TXLO_ENTRIES - 1;
ering->rx_pending = RXENT_ENTRIES;
- ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = 0;
ering->tx_pending = TXLO_ENTRIES - 1;
}
@@ -2266,7 +2261,7 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
.ndo_start_xmit = typhoon_start_tx,
- .ndo_set_multicast_list = typhoon_set_rx_mode,
+ .ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
.ndo_get_stats = typhoon_get_stats,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/typhoon.h b/drivers/net/ethernet/3com/typhoon.h
index 88187fc84aa..88187fc84aa 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/ethernet/3com/typhoon.h
diff --git a/drivers/net/3c503.c b/drivers/net/ethernet/8390/3c503.c
index 84e68f1b9ad..fbab1367505 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/ethernet/8390/3c503.c
@@ -176,7 +176,7 @@ static const struct net_device_ops el2_netdev_ops = {
.ndo_start_xmit = eip_start_xmit,
.ndo_tx_timeout = eip_tx_timeout,
.ndo_get_stats = eip_get_stats,
- .ndo_set_multicast_list = eip_set_multicast_list,
+ .ndo_set_rx_mode = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/3c503.h b/drivers/net/ethernet/8390/3c503.h
index e2367b82a2e..e2367b82a2e 100644
--- a/drivers/net/3c503.h
+++ b/drivers/net/ethernet/8390/3c503.h
diff --git a/drivers/net/8390.c b/drivers/net/ethernet/8390/8390.c
index 7c7518be175..5db1f55abef 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -61,7 +61,7 @@ const struct net_device_ops ei_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/8390.h b/drivers/net/ethernet/8390/8390.h
index 58a12e4c78f..58a12e4c78f 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
diff --git a/drivers/net/8390p.c b/drivers/net/ethernet/8390/8390p.c
index a2a64ea0b69..e8fc2e87e84 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -66,7 +66,7 @@ const struct net_device_ops eip_netdev_ops = {
.ndo_start_xmit = eip_start_xmit,
.ndo_tx_timeout = eip_tx_timeout,
.ndo_get_stats = eip_get_stats,
- .ndo_set_multicast_list = eip_set_multicast_list,
+ .ndo_set_rx_mode = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
new file mode 100644
index 00000000000..e04ade44424
--- /dev/null
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -0,0 +1,337 @@
+#
+# 8390 device configuration
+#
+
+config NET_VENDOR_8390
+ bool "National Semi-conductor 8390 devices"
+ default y
+ depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \
+ ISA || MCA || EISA || MAC || M32R || MACH_TX49XX || \
+ MCA_LEGACY || H8300 || ARM || MIPS || ZORRO || PCMCIA || \
+ EXPERIMENTAL)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Western Digital cards. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_8390
+
+config EL2
+ tristate "3c503 \"EtherLink II\" support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c503.
+
+config AC3200
+ tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
+ depends on PCI && (ISA || EISA) && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ac3200.
+
+config PCMCIA_AXNET
+ tristate "Asix AX88190 PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach an Asix AX88190-based PCMCIA
+ (PC-card) Fast Ethernet card to your computer. These cards are
+ nearly NE2000 compatible but need a separate driver due to a few
+ misfeatures.
+
+ To compile this driver as a module, choose M here: the module will be
+ called axnet_cs. If unsure, say N.
+
+config AX88796
+ tristate "ASIX AX88796 NE2000 clone support"
+ depends on (ARM || MIPS || SUPERH)
+ select PHYLIB
+ select MDIO_BITBANG
+ ---help---
+ AX88796 driver, using platform bus to provide
+ chip detection and resources
+
+config AX88796_93CX6
+ bool "ASIX AX88796 external 93CX6 eeprom support"
+ depends on AX88796
+ select EEPROM_93CX6
+ ---help---
+ Select this if your platform comes with an external 93CX6 eeprom.
+
+config E2100
+ tristate "Cabletron E21xx support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called e2100.
+
+config ES3210
+ tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
+ depends on PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called es3210.
+
+config HPLAN_PLUS
+ tristate "HP PCLAN+ (27247B and 27252A) support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called hp-plus.
+
+config HPLAN
+ tristate "HP PCLAN (27245 and other 27xxx series) support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called hp.
+
+config HYDRA
+ tristate "Hydra support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ If you have a Hydra Ethernet adapter, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hydra.
+
+config ARM_ETHERH
+ tristate "I-cubed EtherH/ANT EtherM support"
+ depends on ARM && ARCH_ACORN
+ select CRC32
+ ---help---
+ If you have an Acorn system with one of these network cards, you
+ should say Y to this option if you wish to use it with Linux.
+
+config LNE390
+ tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
+ depends on PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called lne390.
+
+config MAC8390
+ bool "Macintosh NS 8390 based ethernet cards"
+ depends on MAC
+ select CRC32
+ ---help---
+ If you want to include a driver to support Nubus or LC-PDS
+ Ethernet cards using an NS8390 chipset or its equivalent, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config NE2000
+ tristate "NE2000/NE1000 support"
+ depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Many Ethernet cards
+ without a specific driver are compatible with NE2000.
+
+ If you have a PCI NE2000 card however, say N here and Y to "PCI
+ NE2000 and clone support" under "EISA, VLB, PCI and on board
+ controllers" below. If you have a NE2000 card and are running on
+ an MCA system (a bus system used on some IBM PS/2 computers and
+ laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
+ below.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne.
+
+config NE2_MCA
+ tristate "NE/2 (ne2000 MCA version) support"
+ depends on MCA_LEGACY
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne2.
+
+config NE2K_PCI
+ tristate "PCI NE2000 and clones support (see help)"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for NE2000 compatible PCI cards. It will not work
+ with ISA NE2000 cards (they have their own driver, "NE2000/NE1000
+ support" below). If you have a PCI NE2000 network (Ethernet) card,
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver also works for the following NE2000 clone cards:
+ RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2
+ NetVin NV5000SC Via 86C926 SureCom NE34 Winbond
+ Holtek HT80232 Holtek HT80229
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne2k-pci.
+
+config APNE
+ tristate "PCMCIA NE2000 support"
+ depends on AMIGA_PCMCIA
+ select CRC32
+ ---help---
+ If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called apne.
+
+config NE3210
+ tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
+ depends on PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that this driver
+ will NOT WORK for NE3200 cards as they are completely different.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne3210.
+
+config PCMCIA_PCNET
+ tristate "NE2000 compatible PCMCIA support"
+ depends on PCMCIA
+ select CRC32
+ ---help---
+ Say Y here if you intend to attach an NE2000 compatible PCMCIA
+ (PC-card) Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcnet_cs. If unsure, say N.
+
+config NE_H8300
+ tristate "NE2000 compatible support for H8/300"
+ depends on H8300
+ ---help---
+ Say Y here if you want to use the NE2000 compatible
+ controller on the Renesas H8/300 processor.
+
+config STNIC
+ tristate "National DP83902AV support"
+ depends on SUPERH
+ select CRC32
+ ---help---
+ Support for cards based on the National Semiconductor DP83902AV
+ ST-NIC Serial Network Interface Controller for Twisted Pair. This
+ is a 10Mbit/sec Ethernet controller. Product overview and specs at
+ <http://www.national.com/pf/DP/DP83902A.html>.
+
+ If unsure, say N.
+
+config ULTRAMCA
+ tristate "SMC Ultra MCA support"
+ depends on MCA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type and are running
+ an MCA based system (PS/2), say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc-mca.
+
+config ULTRA
+ tristate "SMC Ultra support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Important: There have been many reports that, with some motherboards
+ mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible,
+ such as some BusLogic models) causes corruption problems with many
+ operating systems. The Linux smc-ultra driver has a work-around for
+ this but keep it in mind if you have such a SCSI card and have
+ problems.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc-ultra.
+
+config ULTRA32
+ tristate "SMC Ultra32 EISA support"
+ depends on EISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc-ultra32.
+
+config WD80x3
+ tristate "WD80*3 support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called wd.
+
+config ZORRO8390
+ tristate "Zorro NS8390-based Ethernet support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ This driver is for Zorro Ethernet cards using an NS8390-compatible
+ chipset, like the Village Tronic Ariadne II and the Individual
+ Computers X-Surf Ethernet cards. If you have such a card, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called zorro8390.
+
+endif # NET_VENDOR_8390
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
new file mode 100644
index 00000000000..3337d7fb434
--- /dev/null
+++ b/drivers/net/ethernet/8390/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the 8390 network device drivers.
+#
+
+obj-$(CONFIG_MAC8390) += mac8390.o
+obj-$(CONFIG_AC3200) += ac3200.o 8390.o
+obj-$(CONFIG_APNE) += apne.o 8390.o
+obj-$(CONFIG_ARM_ETHERH) += etherh.o
+obj-$(CONFIG_AX88796) += ax88796.o
+obj-$(CONFIG_E2100) += e2100.o 8390.o
+obj-$(CONFIG_EL2) += 3c503.o 8390p.o
+obj-$(CONFIG_ES3210) += es3210.o 8390.o
+obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
+obj-$(CONFIG_HPLAN) += hp.o 8390p.o
+obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_NE2000) += ne.o 8390p.o
+obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
+obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
+obj-$(CONFIG_NE3210) += ne3210.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
+obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
+obj-$(CONFIG_STNIC) += stnic.o 8390.o
+obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
+obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
+obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
+obj-$(CONFIG_WD80x3) += wd.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ac3200.c b/drivers/net/ethernet/8390/ac3200.c
index f07b2e980fb..5337dd0a59b 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ethernet/8390/ac3200.c
@@ -151,7 +151,7 @@ static const struct net_device_ops ac_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/apne.c b/drivers/net/ethernet/8390/apne.c
index 547737340cb..547737340cb 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
diff --git a/drivers/net/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e7cb8c8b977..e9f8432f55b 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -543,7 +543,7 @@ static const struct net_device_ops ax_netdev_ops = {
.ndo_start_xmit = ax_ei_start_xmit,
.ndo_tx_timeout = ax_ei_tx_timeout,
.ndo_get_stats = ax_ei_get_stats,
- .ndo_set_multicast_list = ax_ei_set_multicast_list,
+ .ndo_set_rx_mode = ax_ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 9953db71196..bba51cdc74a 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -38,7 +38,7 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include "../8390.h"
+#include "8390.h"
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -134,7 +134,7 @@ static const struct net_device_ops axnet_netdev_ops = {
.ndo_start_xmit = axnet_start_xmit,
.ndo_tx_timeout = axnet_tx_timeout,
.ndo_get_stats = get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/e2100.c b/drivers/net/ethernet/8390/e2100.c
index d50a9998ae7..d16dc53c181 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/ethernet/8390/e2100.c
@@ -168,7 +168,7 @@ static const struct net_device_ops e21_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/es3210.c b/drivers/net/ethernet/8390/es3210.c
index 7a09575ecff..7a09575ecff 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/ethernet/8390/es3210.c
diff --git a/drivers/net/arm/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 03e217a868d..48c4948750d 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -65,7 +65,7 @@
static char version[] __initdata =
"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
-#include "../lib8390.c"
+#include "lib8390.c"
static unsigned int net_debug = NET_DEBUG;
@@ -644,7 +644,7 @@ static const struct net_device_ops etherh_netdev_ops = {
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
index 29917363ebf..eeac843dcd2 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -165,7 +165,7 @@ static const struct net_device_ops hpp_netdev_ops = {
.ndo_start_xmit = eip_start_xmit,
.ndo_tx_timeout = eip_tx_timeout,
.ndo_get_stats = eip_get_stats,
- .ndo_set_multicast_list = eip_set_multicast_list,
+ .ndo_set_rx_mode = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/hp.c b/drivers/net/ethernet/8390/hp.c
index 18564d4a7c0..18564d4a7c0 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/ethernet/8390/hp.c
diff --git a/drivers/net/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 1cd481c0420..3dac937a67c 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -101,7 +101,7 @@ static const struct net_device_ops hydra_netdev_ops = {
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index 05ae21435bf..05ae21435bf 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
diff --git a/drivers/net/lne390.c b/drivers/net/ethernet/8390/lne390.c
index f9888d20177..f9888d20177 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/ethernet/8390/lne390.c
diff --git a/drivers/net/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index f84f5e6eded..af5d9822cad 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -494,7 +494,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
index 7298a34bc79..cd36a6a5f40 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -200,7 +200,7 @@ static const struct net_device_ops ne_netdev_ops = {
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ne.c b/drivers/net/ethernet/8390/ne.c
index 1063093b3af..1063093b3af 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
diff --git a/drivers/net/ne2.c b/drivers/net/ethernet/8390/ne2.c
index 70cdc699634..70cdc699634 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ethernet/8390/ne2.c
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 3c333cb5d34..39923425ba2 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -207,7 +207,7 @@ static const struct net_device_ops ne2k_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index 243ed2aee88..243ed2aee88 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index b4fd7c3ed07..053b2551a72 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -41,7 +41,7 @@
#include <linux/log2.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
-#include "../8390.h"
+#include "8390.h"
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -227,7 +227,7 @@ static const struct net_device_ops pcnet_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_get_stats = ei_get_stats,
.ndo_do_ioctl = ei_ioctl,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/smc-mca.c b/drivers/net/ethernet/8390/smc-mca.c
index 34934fb23b9..77efec44fea 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/ethernet/8390/smc-mca.c
@@ -191,7 +191,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index ba44ede2919..1cc306a83ff 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -192,7 +192,7 @@ static const struct net_device_ops ultra_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c
index e459c3b2510..bb87053eb3d 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/ethernet/8390/smc-ultra32.c
@@ -160,7 +160,7 @@ static const struct net_device_ops ultra32_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/stnic.c b/drivers/net/ethernet/8390/stnic.c
index d85f0a84bc7..d85f0a84bc7 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
diff --git a/drivers/net/wd.c b/drivers/net/ethernet/8390/wd.c
index 8831a3393ec..c175fadb597 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -153,7 +153,7 @@ static const struct net_device_ops wd_netdev_ops = {
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 15e7751a273..3aa9fe9999b 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -278,7 +278,7 @@ static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
new file mode 100644
index 00000000000..6dff5a0e733
--- /dev/null
+++ b/drivers/net/ethernet/Kconfig
@@ -0,0 +1,177 @@
+#
+# Ethernet LAN device configuration
+#
+
+menuconfig ETHERNET
+ bool "Ethernet driver support"
+ depends on NET
+ default y
+ ---help---
+ This section contains all the Ethernet device drivers.
+
+if ETHERNET
+
+config MDIO
+ tristate
+
+config SUNGEM_PHY
+ tristate
+
+source "drivers/net/ethernet/3com/Kconfig"
+source "drivers/net/ethernet/adaptec/Kconfig"
+source "drivers/net/ethernet/aeroflex/Kconfig"
+source "drivers/net/ethernet/alteon/Kconfig"
+source "drivers/net/ethernet/amd/Kconfig"
+source "drivers/net/ethernet/apple/Kconfig"
+source "drivers/net/ethernet/atheros/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/adi/Kconfig"
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/davicom/Kconfig"
+
+config DNET
+ tristate "Dave ethernet support (DNET)"
+ depends on HAS_IOMEM
+ select PHYLIB
+ ---help---
+ The Dave ethernet interface (DNET) is found on Qong Board FPGA.
+ Say Y to include support for the DNET chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dnet.
+
+source "drivers/net/ethernet/dec/Kconfig"
+source "drivers/net/ethernet/dlink/Kconfig"
+source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
+source "drivers/net/ethernet/faraday/Kconfig"
+source "drivers/net/ethernet/freescale/Kconfig"
+source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/hp/Kconfig"
+source "drivers/net/ethernet/ibm/Kconfig"
+source "drivers/net/ethernet/intel/Kconfig"
+source "drivers/net/ethernet/i825xx/Kconfig"
+source "drivers/net/ethernet/xscale/Kconfig"
+source "drivers/net/ethernet/icplus/Kconfig"
+
+config JME
+ tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the PCI-Express gigabit ethernet adapters
+ based on JMicron JMC250 chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called jme.
+
+config KORINA
+ tristate "Korina (IDT RC32434) Ethernet support"
+ depends on MIKROTIK_RB532
+ ---help---
+ If you have a Mikrotik RouterBoard 500 or IDT RC32434
+ based system say Y. Otherwise say N.
+
+config LANTIQ_ETOP
+ tristate "Lantiq SoC ETOP driver"
+ depends on SOC_TYPE_XWAY
+ ---help---
+ Support for the MII0 inside the Lantiq SoC
+
+source "drivers/net/ethernet/marvell/Kconfig"
+source "drivers/net/ethernet/mellanox/Kconfig"
+source "drivers/net/ethernet/micrel/Kconfig"
+source "drivers/net/ethernet/microchip/Kconfig"
+
+config MIPS_SIM_NET
+ tristate "MIPS simulator Network device"
+ depends on MIPS_SIM
+ ---help---
+ The MIPSNET device is a simple Ethernet network device which is
+ emulated by the MIPS Simulator.
+ If you are not using a MIPSsim or are unsure, say N.
+
+source "drivers/net/ethernet/myricom/Kconfig"
+
+config FEALNX
+ tristate "Myson MTD-8xx PCI Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
+ cards. <http://www.myson.com.tw/>
+
+source "drivers/net/ethernet/natsemi/Kconfig"
+source "drivers/net/ethernet/8390/Kconfig"
+
+config NET_NETX
+ tristate "NetX Ethernet support"
+ select NET_CORE
+ select MII
+ depends on ARCH_NETX
+ ---help---
+ This is support for the Hilscher netX builtin Ethernet ports
+
+ To compile this driver as a module, choose M here. The module
+ will be called netx-eth.
+
+source "drivers/net/ethernet/nuvoton/Kconfig"
+source "drivers/net/ethernet/nvidia/Kconfig"
+source "drivers/net/ethernet/octeon/Kconfig"
+source "drivers/net/ethernet/oki-semi/Kconfig"
+
+config ETHOC
+ tristate "OpenCores 10/100 Mbps Ethernet MAC support"
+ depends on HAS_IOMEM && HAS_DMA
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select CRC32
+ select BITREVERSE
+ ---help---
+ Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+
+source "drivers/net/ethernet/packetengines/Kconfig"
+source "drivers/net/ethernet/pasemi/Kconfig"
+source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/racal/Kconfig"
+source "drivers/net/ethernet/realtek/Kconfig"
+source "drivers/net/ethernet/renesas/Kconfig"
+source "drivers/net/ethernet/rdc/Kconfig"
+
+config S6GMAC
+ tristate "S6105 GMAC ethernet support"
+ depends on XTENSA_VARIANT_S6000
+ select PHYLIB
+ ---help---
+ This driver supports the on chip ethernet device on the
+ S6105 xtensa processor.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s6gmac.
+
+source "drivers/net/ethernet/seeq/Kconfig"
+source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
+source "drivers/net/ethernet/sgi/Kconfig"
+source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/stmicro/Kconfig"
+source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/tehuti/Kconfig"
+source "drivers/net/ethernet/ti/Kconfig"
+source "drivers/net/ethernet/tile/Kconfig"
+source "drivers/net/ethernet/toshiba/Kconfig"
+source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/xilinx/Kconfig"
+source "drivers/net/ethernet/xircom/Kconfig"
+
+endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
new file mode 100644
index 00000000000..c53ad3afc99
--- /dev/null
+++ b/drivers/net/ethernet/Makefile
@@ -0,0 +1,74 @@
+#
+# Makefile for the Linux network Ethernet device drivers.
+#
+
+obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
+obj-$(CONFIG_NET_VENDOR_8390) += 8390/
+obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
+obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
+obj-$(CONFIG_NET_VENDOR_AMD) += amd/
+obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
+obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
+obj-$(CONFIG_NET_ATMEL) += cadence/
+obj-$(CONFIG_NET_BFIN) += adi/
+obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
+obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
+obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
+obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_DM9000) += davicom/
+obj-$(CONFIG_DNET) += dnet.o
+obj-$(CONFIG_NET_VENDOR_DEC) += dec/
+obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
+obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
+obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
+obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
+obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_HP) += hp/
+obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
+obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
+obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
+obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
+obj-$(CONFIG_IP1000) += icplus/
+obj-$(CONFIG_JME) += jme.o
+obj-$(CONFIG_KORINA) += korina.o
+obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
+obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
+obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
+obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
+obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
+obj-$(CONFIG_FEALNX) += fealnx.o
+obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+obj-$(CONFIG_NET_NETX) += netx-eth.o
+obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
+obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
+obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
+obj-$(CONFIG_ETHOC) += ethoc.o
+obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
+obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
+obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
+obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
+obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
+obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
+obj-$(CONFIG_NET_VENDOR_SIS) += sis/
+obj-$(CONFIG_SFC) += sfc/
+obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
+obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
+obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
+obj-$(CONFIG_NET_VENDOR_TI) += ti/
+obj-$(CONFIG_TILE_NET) += tile/
+obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
+obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
+obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig
new file mode 100644
index 00000000000..0bff571b1bb
--- /dev/null
+++ b/drivers/net/ethernet/adaptec/Kconfig
@@ -0,0 +1,36 @@
+#
+# Adaptec network device configuration
+#
+
+config NET_VENDOR_ADAPTEC
+ bool "Adaptec devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Adaptec cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_ADAPTEC
+
+config ADAPTEC_STARFIRE
+ tristate "Adaptec Starfire/DuraLAN support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
+ adapter. The DuraLAN chip is used on the 64 bit PCI boards from
+ Adaptec e.g. the ANA-6922A. The older 32 bit boards use the tulip
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called starfire. This is recommended.
+
+endif # NET_VENDOR_ADAPTEC
diff --git a/drivers/net/ethernet/adaptec/Makefile b/drivers/net/ethernet/adaptec/Makefile
new file mode 100644
index 00000000000..6c07b758ac0
--- /dev/null
+++ b/drivers/net/ethernet/adaptec/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Adaptec network device drivers.
+#
+
+obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 7ae1f990a98..6d9f6911000 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -639,7 +639,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_tx_timeout = tx_timeout,
.ndo_get_stats = get_stats,
- .ndo_set_multicast_list = &set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1256,10 +1256,13 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
} else {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
- status |= this_frag->size;
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
+ status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
+ pci_map_single(np->pci_dev,
+ skb_frag_address(this_frag),
+ skb_frag_size(this_frag),
+ PCI_DMA_TODEVICE);
}
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
@@ -1375,7 +1378,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
np->dirty_tx++;
entry++;
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 00000000000..49a30d37ae4
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,69 @@
+#
+# Blackfin device configuration
+#
+
+config NET_BFIN
+ bool "Blackfin devices"
+ depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+ Make sure you know the name of your card. Read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the remaining Blackfin card questions. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_BFIN
+
+config BFIN_MAC
+ tristate "Blackfin on-chip MAC support"
+ depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
+ select CRC32
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
+ ---help---
+ This is the driver for Blackfin on-chip mac device. Say Y if you want
+ it compiled into the kernel. This driver is also available as a
+ module ( = code which can be inserted in and removed from the running
+ kernel whenever you want). The module will be called bfin_mac.
+
+config BFIN_MAC_USE_L1
+ bool "Use L1 memory for rx/tx packets"
+ depends on BFIN_MAC && (BF527 || BF537)
+ default y
+ ---help---
+ To get maximum network performance, you should use L1 memory as rx/tx
+ buffers. Say N here if you want to reserve L1 memory for other uses.
+
+config BFIN_TX_DESC_NUM
+ int "Number of transmit buffer packets"
+ depends on BFIN_MAC
+ range 6 10 if BFIN_MAC_USE_L1
+ range 10 100
+ default "10"
+ ---help---
+ Set the number of buffer packets used in driver.
+
+config BFIN_RX_DESC_NUM
+ int "Number of receive buffer packets"
+ depends on BFIN_MAC
+ range 20 100 if BFIN_MAC_USE_L1
+ range 20 800
+ default "20"
+ ---help---
+ Set the number of buffer packets used in driver.
+
+config BFIN_MAC_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
+ default y
+ ---help---
+ To support the IEEE 1588 Precision Time Protocol (PTP), select y here
+
+endif # NET_BFIN
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 00000000000..b1fbe195d0e
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Blackfin device drivers.
+#
+
+obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
diff --git a/drivers/net/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 6c019e14854..b6d69c91db9 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1449,7 +1449,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
.ndo_start_xmit = bfin_mac_hard_start_xmit,
.ndo_set_mac_address = bfin_mac_set_mac_address,
.ndo_tx_timeout = bfin_mac_timeout,
- .ndo_set_multicast_list = bfin_mac_set_multicast_list,
+ .ndo_set_rx_mode = bfin_mac_set_multicast_list,
.ndo_do_ioctl = bfin_mac_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index f8559ac9a40..f8559ac9a40 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
diff --git a/drivers/net/ethernet/aeroflex/Kconfig b/drivers/net/ethernet/aeroflex/Kconfig
new file mode 100644
index 00000000000..4f4a8d78fd5
--- /dev/null
+++ b/drivers/net/ethernet/aeroflex/Kconfig
@@ -0,0 +1,11 @@
+#
+# Aeroflex Gaisler network device configuration
+#
+
+config GRETH
+ tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
+ depends on SPARC
+ select PHYLIB
+ select CRC32
+ ---help---
+ Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC.
diff --git a/drivers/net/ethernet/aeroflex/Makefile b/drivers/net/ethernet/aeroflex/Makefile
new file mode 100644
index 00000000000..6e62a679282
--- /dev/null
+++ b/drivers/net/ethernet/aeroflex/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Aeroflex Gaisler network device drivers.
+#
+
+obj-$(CONFIG_GRETH) += greth.o
diff --git a/drivers/net/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 52a39000c42..442fefa4f2c 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -113,9 +113,8 @@ static void greth_print_tx_packet(struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
- phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
- skb_shinfo(skb)->frags[i].page_offset,
- length, true);
+ skb_frag_address(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].size, true);
}
}
@@ -199,7 +198,7 @@ static void greth_clean_rings(struct greth_private *greth)
dma_unmap_page(greth->dev,
greth_read_bd(&tx_bdp->addr),
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
@@ -518,7 +517,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
status = GRETH_BD_EN;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
- status |= frag->size & GRETH_BD_LEN;
+ status |= skb_frag_size(frag) & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (curr_tx == GRETH_TXBD_NUM_MASK)
@@ -532,11 +531,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth_write_bd(&bdp->stat, status);
- dma_addr = dma_map_page(greth->dev,
- frag->page,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
goto frag_map_error;
@@ -717,7 +713,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
@@ -1547,7 +1543,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
}
if (greth->multicast) {
- greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
+ greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
dev->flags |= IFF_MULTICAST;
} else {
dev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 232a622a85b..232a622a85b 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
diff --git a/drivers/net/ethernet/alteon/Kconfig b/drivers/net/ethernet/alteon/Kconfig
new file mode 100644
index 00000000000..799a8528207
--- /dev/null
+++ b/drivers/net/ethernet/alteon/Kconfig
@@ -0,0 +1,48 @@
+#
+# Alteon network device configuration
+#
+
+config NET_VENDOR_ALTEON
+ bool "Alteon devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Alteon cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_ALTEON
+
+config ACENIC
+ tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support"
+ depends on PCI
+ ---help---
+ Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear
+ GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet
+ adapter. The driver allows for using the Jumbo Frame option (9000
+ bytes/frame) however it requires that your switches can handle this
+ as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig
+ line.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acenic.
+
+config ACENIC_OMIT_TIGON_I
+ bool "Omit support for old Tigon I based AceNICs"
+ depends on ACENIC
+ ---help---
+ Say Y here if you only have Tigon II based AceNICs and want to leave
+ out support for the older Tigon I based cards which are no longer
+ being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B
+ version)). This will reduce the size of the driver object by
+ app. 100KB. If you are not sure whether your card is a Tigon I or a
+ Tigon II, say N here.
+
+ The safe and default value for this is N.
+
+endif # NET_VENDOR_ALTEON
diff --git a/drivers/net/ethernet/alteon/Makefile b/drivers/net/ethernet/alteon/Makefile
new file mode 100644
index 00000000000..a2ca173f2a5
--- /dev/null
+++ b/drivers/net/ethernet/alteon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Alteon network device drivers.
+#
+
+obj-$(CONFIG_ACENIC) += acenic.o
diff --git a/drivers/net/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 31798f5f5d0..f872748ab4e 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -449,7 +449,7 @@ static const struct net_device_ops ace_netdev_ops = {
.ndo_tx_timeout = ace_watchdog,
.ndo_get_stats = ace_get_stats,
.ndo_start_xmit = ace_start_xmit,
- .ndo_set_multicast_list = ace_set_multicast_list,
+ .ndo_set_rx_mode = ace_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ace_set_mac_addr,
.ndo_change_mtu = ace_change_mtu,
@@ -2478,18 +2478,18 @@ restart:
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
- len += frag->size;
+ len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
- mapping = pci_map_page(ap->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
- flagsize = (frag->size << 16);
+ flagsize = skb_frag_size(frag) << 16;
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2508,7 +2508,7 @@ restart:
info->skb = NULL;
}
dma_unmap_addr_set(info, mapping, mapping);
- dma_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_len_set(info, maplen, skb_frag_size(frag));
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
diff --git a/drivers/net/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index 51c486cfbb8..51c486cfbb8 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
diff --git a/drivers/net/7990.c b/drivers/net/ethernet/amd/7990.c
index 60b35fb5f52..60b35fb5f52 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
diff --git a/drivers/net/7990.h b/drivers/net/ethernet/amd/7990.h
index 0a5837b9642..0a5837b9642 100644
--- a/drivers/net/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
new file mode 100644
index 00000000000..238b537b68f
--- /dev/null
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -0,0 +1,195 @@
+#
+# AMD network device configuration
+#
+
+config NET_VENDOR_AMD
+ bool "AMD devices"
+ default y
+ depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
+ SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
+ (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just case the configurator to skip all
+ the questions regarding AMD chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_AMD
+
+config A2065
+ tristate "A2065 support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called a2065.
+
+config AMD8111_ETH
+ tristate "AMD 8111 (new PCI LANCE) support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have an AMD 8111-based PCI LANCE ethernet card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called amd8111e.
+
+config LANCE
+ tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
+ depends on ISA && ISA_DMA_API
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
+ of this type.
+
+ To compile this driver as a module, choose M here: the module
+ will be called lance. This is recommended.
+
+config PCNET32
+ tristate "AMD PCnet32 PCI support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pcnet32.
+
+config ARIADNE
+ tristate "Ariadne support"
+ depends on ZORRO
+ ---help---
+ If you have a Village Tronic Ariadne Ethernet adapter, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ariadne.
+
+config ARM_AM79C961A
+ bool "ARM EBSA110 AM79C961A support"
+ depends on ARM && ARCH_EBSA110
+ select CRC32
+ ---help---
+ If you wish to compile a kernel for the EBSA-110, then you should
+ always answer Y to this.
+
+config ATARILANCE
+ tristate "Atari LANCE support"
+ depends on ATARI
+ ---help---
+ Say Y to include support for several Atari Ethernet adapters based
+ on the AMD LANCE chipset: RieblCard (with or without battery), or
+ PAMCard VME (also the version by Rhotron, with different addresses).
+
+config DECLANCE
+ tristate "DEC LANCE ethernet controller support"
+ depends on MACH_DECSTATION
+ select CRC32
+ ---help---
+ This driver is for the series of Ethernet controllers produced by
+ DEC (now Compaq) based on the AMD LANCE chipset, including the
+ DEPCA series. (This chipset is better known via the NE2100 cards.)
+
+config DEPCA
+ tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
+ depends on (ISA || EISA || MCA)
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:drivers/net/depca.c>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called depca.
+
+config HPLANCE
+ bool "HP on-board LANCE support"
+ depends on DIO
+ select CRC32
+ ---help---
+ If you want to use the builtin "LANCE" Ethernet controller on an
+ HP300 machine, say Y here.
+
+config MIPS_AU1X00_ENET
+ tristate "MIPS AU1000 Ethernet support"
+ depends on MIPS_ALCHEMY
+ select PHYLIB
+ select CRC32
+ ---help---
+ If you have an Alchemy Semi AU1X00 based system
+ say Y. Otherwise, say N.
+
+config MVME147_NET
+ tristate "MVME147 (LANCE) Ethernet support"
+ depends on MVME147
+ select CRC32
+ ---help---
+ Support for the on-board Ethernet interface on the Motorola MVME147
+ single-board computer. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config PCMCIA_NMCLAN
+ tristate "New Media PCMCIA support"
+ depends on PCMCIA
+ help
+ Say Y here if you intend to attach a New Media Ethernet or LiveWire
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called nmclan_cs. If unsure, say N.
+
+config NI65
+ tristate "NI6510 support"
+ depends on ISA && ISA_DMA_API
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni65.
+
+config SUN3LANCE
+ tristate "Sun3/Sun3x on-board LANCE support"
+ depends on (SUN3 || SUN3X)
+ ---help---
+ Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
+ featured an AMD LANCE 10Mbit Ethernet controller on board; say Y
+ here to compile in the Linux driver for this and enable Ethernet.
+ General Linux information on the Sun 3 and 3x series (now
+ discontinued) is at
+ <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+ If you're not building a kernel for a Sun 3, say N.
+
+config SUNLANCE
+ tristate "Sun LANCE support"
+ depends on SBUS
+ select CRC32
+ ---help---
+ This driver supports the "le" interface present on all 32-bit Sparc
+ systems, on some older Ultra systems and as an Sbus option. These
+ cards are based on the AMD LANCE chipset, which is better known
+ via the NE2100 cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunlance.
+
+endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
new file mode 100644
index 00000000000..175caa5328c
--- /dev/null
+++ b/drivers/net/ethernet/amd/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the AMD network device drivers.
+#
+
+obj-$(CONFIG_A2065) += a2065.o
+obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
+obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
+obj-$(CONFIG_ARIADNE) += ariadne.o
+obj-$(CONFIG_ATARILANCE) += atarilance.o
+obj-$(CONFIG_DECLANCE) += declance.o
+obj-$(CONFIG_DEPCA) += depca.o
+obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
+obj-$(CONFIG_LANCE) += lance.o
+obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
+obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
+obj-$(CONFIG_NI65) += ni65.o
+obj-$(CONFIG_PCNET32) += pcnet32.o
+obj-$(CONFIG_SUN3LANCE) += sun3lance.o
+obj-$(CONFIG_SUNLANCE) += sunlance.o
diff --git a/drivers/net/a2065.c b/drivers/net/ethernet/amd/a2065.c
index e1e1b07d9b8..825e5d4ef4c 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -664,7 +664,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
.ndo_tx_timeout = lance_tx_timeout,
- .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_set_rx_mode = lance_set_multicast,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/a2065.h b/drivers/net/ethernet/amd/a2065.h
index 5117759d4e9..5117759d4e9 100644
--- a/drivers/net/a2065.h
+++ b/drivers/net/ethernet/amd/a2065.h
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 3b1416e3d21..7d5ded80d2d 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -662,7 +662,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
.ndo_open = am79c961_open,
.ndo_stop = am79c961_close,
.ndo_start_xmit = am79c961_sendpacket,
- .ndo_set_multicast_list = am79c961_setmulticastlist,
+ .ndo_set_rx_mode = am79c961_setmulticastlist,
.ndo_tx_timeout = am79c961_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
index fd634d32756..fd634d32756 100644
--- a/drivers/net/arm/am79c961a.h
+++ b/drivers/net/ethernet/amd/am79c961a.h
diff --git a/drivers/net/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 78002ef9c0e..a9745f4ddbf 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1798,7 +1798,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
.ndo_start_xmit = amd8111e_start_xmit,
.ndo_tx_timeout = amd8111e_tx_timeout,
.ndo_get_stats = amd8111e_get_stats,
- .ndo_set_multicast_list = amd8111e_set_multicast_list,
+ .ndo_set_rx_mode = amd8111e_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = amd8111e_set_mac_address,
.ndo_do_ioctl = amd8111e_ioctl,
diff --git a/drivers/net/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 2ff2e7a12dd..2ff2e7a12dd 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
diff --git a/drivers/net/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 7ed78f40204..eb18e1fe65c 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -704,7 +704,7 @@ static const struct net_device_ops ariadne_netdev_ops = {
.ndo_start_xmit = ariadne_start_xmit,
.ndo_tx_timeout = ariadne_tx_timeout,
.ndo_get_stats = ariadne_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ariadne.h b/drivers/net/ethernet/amd/ariadne.h
index 727be5cdd1e..727be5cdd1e 100644
--- a/drivers/net/ariadne.h
+++ b/drivers/net/ethernet/amd/ariadne.h
diff --git a/drivers/net/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 1264d781b55..15bfa28d6c5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -456,7 +456,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_open = lance_open,
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = lance_set_mac_address,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index b9debcfb61a..82386677bb8 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1010,7 +1010,7 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_open = au1000_open,
.ndo_stop = au1000_close,
.ndo_start_xmit = au1000_tx,
- .ndo_set_multicast_list = au1000_multicast_list,
+ .ndo_set_rx_mode = au1000_multicast_list,
.ndo_do_ioctl = au1000_ioctl,
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 6229c774552..6229c774552 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
diff --git a/drivers/net/declance.c b/drivers/net/ethernet/amd/declance.c
index d5598f6584a..73f8d4fa682 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1015,7 +1015,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
.ndo_tx_timeout = lance_tx_timeout,
- .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_set_rx_mode = lance_set_multicast,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/depca.c b/drivers/net/ethernet/amd/depca.c
index f2015a85197..681970c07f2 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/ethernet/amd/depca.c
@@ -572,7 +572,7 @@ static const struct net_device_ops depca_netdev_ops = {
.ndo_open = depca_open,
.ndo_start_xmit = depca_start_xmit,
.ndo_stop = depca_close,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = depca_ioctl,
.ndo_tx_timeout = depca_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/depca.h b/drivers/net/ethernet/amd/depca.h
index ee42648dbde..cdcfe4252c1 100644
--- a/drivers/net/depca.h
+++ b/drivers/net/ethernet/amd/depca.h
@@ -157,8 +157,6 @@
*/
#include <linux/sockios.h>
-#define DEPCAIOCTL SIOCDEVPRIVATE
-
struct depca_ioctl {
unsigned short cmd; /* Command to run */
unsigned short len; /* Length of the data buffer */
diff --git a/drivers/net/hplance.c b/drivers/net/ethernet/amd/hplance.c
index a900d5bf294..86aa0d546a5 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -74,7 +74,7 @@ static const struct net_device_ops hplance_netdev_ops = {
.ndo_open = hplance_open,
.ndo_stop = hplance_close,
.ndo_start_xmit = lance_start_xmit,
- .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_set_rx_mode = lance_set_multicast,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/hplance.h b/drivers/net/ethernet/amd/hplance.h
index 04aee9e0376..04aee9e0376 100644
--- a/drivers/net/hplance.h
+++ b/drivers/net/ethernet/amd/hplance.h
diff --git a/drivers/net/lance.c b/drivers/net/ethernet/amd/lance.c
index 02336edce74..a6e2e840884 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -459,7 +459,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_start_xmit = lance_start_xmit,
.ndo_stop = lance_close,
.ndo_get_stats = lance_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 3a7ad840d5b..56bc47a9418 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -61,7 +61,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_open = m147lance_open,
.ndo_stop = m147lance_close,
.ndo_start_xmit = lance_start_xmit,
- .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_set_rx_mode = lance_set_multicast,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ni65.c b/drivers/net/ethernet/amd/ni65.c
index c75ae85eb91..6e6aa7213aa 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -406,7 +406,7 @@ static const struct net_device_ops ni65_netdev_ops = {
.ndo_stop = ni65_close,
.ndo_start_xmit = ni65_send_packet,
.ndo_tx_timeout = ni65_timeout,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ni65.h b/drivers/net/ethernet/amd/ni65.h
index e6217e35edf..e6217e35edf 100644
--- a/drivers/net/ni65.h
+++ b/drivers/net/ethernet/amd/ni65.h
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9d70b659522..3accd5d21b0 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -430,7 +430,7 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_tx_timeout = mace_tx_timeout,
.ndo_set_config = mace_config,
.ndo_get_stats = mace_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 80b6f36a807..f92bc6e3482 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -270,7 +270,7 @@ struct pcnet32_private {
struct sk_buff **rx_skbuff;
dma_addr_t *tx_dma_addr;
dma_addr_t *rx_dma_addr;
- struct pcnet32_access a;
+ const struct pcnet32_access *a;
spinlock_t lock; /* Guard lock */
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int rx_ring_size; /* current rx ring size */
@@ -379,7 +379,7 @@ static int pcnet32_wio_check(unsigned long addr)
return inw(addr + PCNET32_WIO_RAP) == 88;
}
-static struct pcnet32_access pcnet32_wio = {
+static const struct pcnet32_access pcnet32_wio = {
.read_csr = pcnet32_wio_read_csr,
.write_csr = pcnet32_wio_write_csr,
.read_bcr = pcnet32_wio_read_bcr,
@@ -434,7 +434,7 @@ static int pcnet32_dwio_check(unsigned long addr)
return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
}
-static struct pcnet32_access pcnet32_dwio = {
+static const struct pcnet32_access pcnet32_dwio = {
.read_csr = pcnet32_dwio_read_csr,
.write_csr = pcnet32_dwio_write_csr,
.read_bcr = pcnet32_dwio_read_bcr,
@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev)
u16 val;
netif_wake_queue(dev);
- val = lp->a.read_csr(ioaddr, CSR3);
+ val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
- lp->a.write_csr(ioaddr, CSR3, val);
+ lp->a->write_csr(ioaddr, CSR3, val);
napi_enable(&lp->napi);
}
@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
r = mii_link_ok(&lp->mii_if);
} else if (lp->chip_version >= PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */
- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
} else { /* can not detect link on really old chips */
r = 1;
}
@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
pcnet32_netif_stop(dev);
spin_lock_irqsave(&lp->lock, flags);
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
{
struct pcnet32_private *lp = netdev_priv(dev);
- struct pcnet32_access *a = &lp->a; /* access to registers */
+ const struct pcnet32_access *a = lp->a; /* access to registers */
ulong ioaddr = dev->base_addr; /* card base I/O address */
struct sk_buff *skb; /* sk buff */
int x, i; /* counters */
@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
pcnet32_netif_stop(dev);
spin_lock_irqsave(&lp->lock, flags);
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
/* Reset the PCNET32 */
- lp->a.reset(ioaddr);
- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
/* switch pcnet32 to 32bit mode */
- lp->a.write_bcr(ioaddr, 20, 2);
+ lp->a->write_bcr(ioaddr, 20, 2);
/* purge & init rings but don't actually restart */
pcnet32_restart(dev, 0x0000);
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
/* Initialize Transmit buffers. */
size = data_len + 15;
@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
/* set int loopback in CSR15 */
x = a->read_csr(ioaddr, CSR15) & 0xfffc;
- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
teststatus = cpu_to_le16(0x8000);
- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
/* Check status of descriptors */
for (x = 0; x < numbuffs; x++) {
@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
}
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb();
if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
@@ -1015,7 +1015,7 @@ clean_up:
pcnet32_restart(dev, CSR0_NORMAL);
} else {
pcnet32_purge_rx_ring(dev);
- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct pcnet32_private *lp = netdev_priv(dev);
- struct pcnet32_access *a = &lp->a;
+ const struct pcnet32_access *a = lp->a;
ulong ioaddr = dev->base_addr;
unsigned long flags;
int i;
@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
{
int csr5;
struct pcnet32_private *lp = netdev_priv(dev);
- struct pcnet32_access *a = &lp->a;
+ const struct pcnet32_access *a = lp->a;
ulong ioaddr = dev->base_addr;
int ticks;
@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&lp->lock, flags);
if (pcnet32_tx(dev)) {
/* reset the chip to clear the error condition, then restart */
- lp->a.reset(ioaddr);
- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
pcnet32_restart(dev, CSR0_START);
netif_wake_queue(dev);
}
@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
__napi_complete(napi);
/* clear interrupt masks */
- val = lp->a.read_csr(ioaddr, CSR3);
+ val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
- lp->a.write_csr(ioaddr, CSR3, val);
+ lp->a->write_csr(ioaddr, CSR3, val);
/* Set interrupt enable. */
- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
int i, csr0;
u16 *buff = ptr;
struct pcnet32_private *lp = netdev_priv(dev);
- struct pcnet32_access *a = &lp->a;
+ const struct pcnet32_access *a = lp->a;
ulong ioaddr = dev->base_addr;
unsigned long flags;
@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
for (j = 0; j < PCNET32_MAX_PHYS; j++) {
if (lp->phymask & (1 << j)) {
for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
- lp->a.write_bcr(ioaddr, 33,
+ lp->a->write_bcr(ioaddr, 33,
(j << 5) | i);
- *buff++ = lp->a.read_bcr(ioaddr, 34);
+ *buff++ = lp->a->read_bcr(ioaddr, 34);
}
}
}
@@ -1505,7 +1505,7 @@ static const struct net_device_ops pcnet32_netdev_ops = {
.ndo_start_xmit = pcnet32_start_xmit,
.ndo_tx_timeout = pcnet32_tx_timeout,
.ndo_get_stats = pcnet32_get_stats,
- .ndo_set_multicast_list = pcnet32_set_multicast_list,
+ .ndo_set_rx_mode = pcnet32_set_multicast_list,
.ndo_do_ioctl = pcnet32_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1528,7 +1528,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
int chip_version;
char *chipname;
struct net_device *dev;
- struct pcnet32_access *a = NULL;
+ const struct pcnet32_access *a = NULL;
u8 promaddr[6];
int ret = -ENODEV;
@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
lp->options |= PCNET32_PORT_FD;
- lp->a = *a;
+ lp->a = a;
/* prior to register_netdev, dev->name is not yet correct */
if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (lp->mii) {
/* lp->phycount and lp->phymask are set to 0 by memset above */
- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
/* scan for PHYs */
for (i = 0; i < PCNET32_MAX_PHYS; i++) {
unsigned short id1, id2;
@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_info("Found PHY %04x:%04x at address %d\n",
id1, id2, i);
}
- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
if (lp->phycount > 1)
lp->options |= PCNET32_PORT_MII;
}
@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev)
}
/* Reset the PCNET32 */
- lp->a.reset(ioaddr);
+ lp->a->reset(ioaddr);
/* switch pcnet32 to 32bit mode */
- lp->a.write_bcr(ioaddr, 20, 2);
+ lp->a->write_bcr(ioaddr, 20, 2);
netif_printk(lp, ifup, KERN_DEBUG, dev,
"%s() irq %d tx/rx rings %#x/%#x init %#x\n",
@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev)
(u32) (lp->init_dma_addr));
/* set/reset autoselect bit */
- val = lp->a.read_bcr(ioaddr, 2) & ~2;
+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
if (lp->options & PCNET32_PORT_ASEL)
val |= 2;
- lp->a.write_bcr(ioaddr, 2, val);
+ lp->a->write_bcr(ioaddr, 2, val);
/* handle full duplex setting */
if (lp->mii_if.full_duplex) {
- val = lp->a.read_bcr(ioaddr, 9) & ~3;
+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
if (lp->options & PCNET32_PORT_FD) {
val |= 1;
if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev)
if (lp->chip_version == 0x2627)
val |= 3;
}
- lp->a.write_bcr(ioaddr, 9, val);
+ lp->a->write_bcr(ioaddr, 9, val);
}
/* set/reset GPSI bit in test register */
- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
val |= 0x10;
- lp->a.write_csr(ioaddr, 124, val);
+ lp->a->write_csr(ioaddr, 124, val);
/* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev)
* duplex, and/or enable auto negotiation, and clear DANAS
*/
if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
- lp->a.write_bcr(ioaddr, 32,
- lp->a.read_bcr(ioaddr, 32) | 0x0080);
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
/* disable Auto Negotiation, set 10Mpbs, HD */
- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
if (lp->options & PCNET32_PORT_FD)
val |= 0x10;
if (lp->options & PCNET32_PORT_100)
val |= 0x08;
- lp->a.write_bcr(ioaddr, 32, val);
+ lp->a->write_bcr(ioaddr, 32, val);
} else {
if (lp->options & PCNET32_PORT_ASEL) {
- lp->a.write_bcr(ioaddr, 32,
- lp->a.read_bcr(ioaddr,
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr,
32) | 0x0080);
/* enable auto negotiate, setup, disable fd */
- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
val |= 0x20;
- lp->a.write_bcr(ioaddr, 32, val);
+ lp->a->write_bcr(ioaddr, 32, val);
}
}
} else {
@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev)
* There is really no good other way to handle multiple PHYs
* other than turning off all automatics
*/
- val = lp->a.read_bcr(ioaddr, 2);
- lp->a.write_bcr(ioaddr, 2, val & ~2);
- val = lp->a.read_bcr(ioaddr, 32);
- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
+ val = lp->a->read_bcr(ioaddr, 2);
+ lp->a->write_bcr(ioaddr, 2, val & ~2);
+ val = lp->a->read_bcr(ioaddr, 32);
+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
if (!(lp->options & PCNET32_PORT_ASEL)) {
/* setup ecmd */
@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev)
ethtool_cmd_speed_set(&ecmd,
(lp->options & PCNET32_PORT_100) ?
SPEED_100 : SPEED_10);
- bcr9 = lp->a.read_bcr(ioaddr, 9);
+ bcr9 = lp->a->read_bcr(ioaddr, 9);
if (lp->options & PCNET32_PORT_FD) {
ecmd.duplex = DUPLEX_FULL;
@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev)
ecmd.duplex = DUPLEX_HALF;
bcr9 |= ~(1 << 0);
}
- lp->a.write_bcr(ioaddr, 9, bcr9);
+ lp->a->write_bcr(ioaddr, 9, bcr9);
}
for (i = 0; i < PCNET32_MAX_PHYS; i++) {
@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev)
#ifdef DO_DXSUFLO
if (lp->dxsuflo) { /* Disable transmit stop on underflow */
- val = lp->a.read_csr(ioaddr, CSR3);
+ val = lp->a->read_csr(ioaddr, CSR3);
val |= 0x40;
- lp->a.write_csr(ioaddr, CSR3, val);
+ lp->a->write_csr(ioaddr, CSR3, val);
}
#endif
@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev)
napi_enable(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
netif_start_queue(dev);
@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev)
i = 0;
while (i++ < 100)
- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
break;
/*
* We used to clear the InitDone bit, 0x0100, here but Mark Stockton
* reports that doing so triggers a bug in the '974.
*/
- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
netif_printk(lp, ifup, KERN_DEBUG, dev,
"pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
i,
(u32) (lp->init_dma_addr),
- lp->a.read_csr(ioaddr, CSR0));
+ lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2218,7 +2218,7 @@ err_free_ring:
* Switch back to 16bit mode to avoid problems with dumb
* DOS packet driver after a warm reboot
*/
- lp->a.write_bcr(ioaddr, 20, 4);
+ lp->a->write_bcr(ioaddr, 20, 4);
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
/* wait for stop */
for (i = 0; i < 100; i++)
- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
break;
if (i >= 100)
@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
return;
/* ReInit Ring */
- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
i = 0;
while (i++ < 1000)
- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
break;
- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
}
static void pcnet32_tx_timeout(struct net_device *dev)
@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
/* Transmitter timeout, serious problems. */
if (pcnet32_debug & NETIF_MSG_DRV)
pr_err("%s: transmit timed out, status %4.4x, resetting\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
+ dev->name, lp->a->read_csr(ioaddr, CSR0));
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
dev->stats.tx_errors++;
if (netif_msg_tx_err(lp)) {
int i;
@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
netif_printk(lp, tx_queued, KERN_DEBUG, dev,
"%s() called, csr0 %4.4x\n",
- __func__, lp->a.read_csr(ioaddr, CSR0));
+ __func__, lp->a->read_csr(ioaddr, CSR0));
/* Default status -- will not enable Successful-TxDone
* interrupt when that option is available to us.
@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
dev->stats.tx_bytes += skb->len;
/* Trigger an immediate send poll. */
- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
spin_lock(&lp->lock);
- csr0 = lp->a.read_csr(ioaddr, CSR0);
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
while ((csr0 & 0x8f00) && --boguscnt >= 0) {
if (csr0 == 0xffff)
break; /* PCMCIA remove happened */
/* Acknowledge all of the current interrupt sources ASAP. */
- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
netif_printk(lp, intr, KERN_DEBUG, dev,
"interrupt csr0=%#2.2x new csr=%#2.2x\n",
- csr0, lp->a.read_csr(ioaddr, CSR0));
+ csr0, lp->a->read_csr(ioaddr, CSR0));
/* Log misc errors. */
if (csr0 & 0x4000)
@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
if (napi_schedule_prep(&lp->napi)) {
u16 val;
/* set interrupt masks */
- val = lp->a.read_csr(ioaddr, CSR3);
+ val = lp->a->read_csr(ioaddr, CSR3);
val |= 0x5f00;
- lp->a.write_csr(ioaddr, CSR3, val);
+ lp->a->write_csr(ioaddr, CSR3, val);
__napi_schedule(&lp->napi);
break;
}
- csr0 = lp->a.read_csr(ioaddr, CSR0);
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
}
netif_printk(lp, intr, KERN_DEBUG, dev,
"exiting interrupt, csr0=%#4.4x\n",
- lp->a.read_csr(ioaddr, CSR0));
+ lp->a->read_csr(ioaddr, CSR0));
spin_unlock(&lp->lock);
@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
netif_printk(lp, ifdown, KERN_DEBUG, dev,
"Shutting down ethercard, status was %2.2x\n",
- lp->a.read_csr(ioaddr, CSR0));
+ lp->a->read_csr(ioaddr, CSR0));
/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
/*
* Switch back to 16bit mode to avoid problems with dumb
* DOS packet driver after a warm reboot
*/
- lp->a.write_bcr(ioaddr, 20, 4);
+ lp->a->write_bcr(ioaddr, 20, 4);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
spin_unlock_irqrestore(&lp->lock, flags);
return &dev->stats;
@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
ib->filter[0] = cpu_to_le32(~0U);
ib->filter[1] = cpu_to_le32(~0U);
- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
return;
}
/* clear the multicast filter */
@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
}
for (i = 0; i < 4; i++)
- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
le16_to_cpu(mcast_table[i]));
}
@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
suspended = pcnet32_suspend(dev, &flags, 0);
- csr15 = lp->a.read_csr(ioaddr, CSR15);
+ csr15 = lp->a->read_csr(ioaddr, CSR15);
if (dev->flags & IFF_PROMISC) {
/* Log any net taps. */
netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
lp->init_block->mode =
cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
7);
- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
} else {
lp->init_block->mode =
cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
pcnet32_load_multicast(dev);
}
if (suspended) {
int csr5;
/* clear SUSPEND (SPND) - CSR5 bit 0 */
- csr5 = lp->a.read_csr(ioaddr, CSR5);
- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ csr5 = lp->a->read_csr(ioaddr, CSR5);
+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
} else {
- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
pcnet32_restart(dev, CSR0_NORMAL);
netif_wake_queue(dev);
}
@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
if (!lp->mii)
return 0;
- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
- val_out = lp->a.read_bcr(ioaddr, 34);
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a->read_bcr(ioaddr, 34);
return val_out;
}
@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
if (!lp->mii)
return;
- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
- lp->a.write_bcr(ioaddr, 34, val);
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a->write_bcr(ioaddr, 34, val);
}
static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
curr_link = mii_link_ok(&lp->mii_if);
} else {
ulong ioaddr = dev->base_addr; /* card base I/O address */
- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
}
if (!curr_link) {
if (prev_link || verbose) {
@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
(ecmd.duplex == DUPLEX_FULL)
? "full" : "half");
}
- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
if (lp->mii_if.full_duplex)
bcr9 |= (1 << 0);
else
bcr9 &= ~(1 << 0);
- lp->a.write_bcr(dev->base_addr, 9, bcr9);
+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
}
} else {
netif_info(lp, link, dev, "link up\n");
diff --git a/drivers/net/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 7d9ec23aabf..080b71fcc68 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -297,7 +297,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_open = lance_open,
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = NULL,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 06f2d4382dc..8fda457f94c 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1298,7 +1298,7 @@ static const struct net_device_ops sparc_lance_ops = {
.ndo_open = lance_open,
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
- .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_set_rx_mode = lance_set_multicast,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
new file mode 100644
index 00000000000..a759d5483ab
--- /dev/null
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -0,0 +1,77 @@
+#
+# Apple device configuration
+#
+
+config NET_VENDOR_APPLE
+ bool "Apple devices"
+ default y
+ depends on (PPC_PMAC && PPC32) || MAC
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about IBM devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_APPLE
+
+config MACE
+ tristate "MACE (Power Mac ethernet) support"
+ depends on PPC_PMAC && PPC32
+ select CRC32
+ ---help---
+ Power Macintoshes and clones with Ethernet built-in on the
+ motherboard will usually use a MACE (Medium Access Control for
+ Ethernet) interface. Say Y to include support for the MACE chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mace.
+
+config MACE_AAUI_PORT
+ bool "Use AAUI port instead of TP by default"
+ depends on MACE
+ ---help---
+ Some Apple machines (notably the Apple Network Server) which use the
+ MACE ethernet chip have an Apple AUI port (small 15-pin connector),
+ instead of an 8-pin RJ45 connector for twisted-pair ethernet. Say
+ Y here if you have such a machine. If unsure, say N.
+ The driver will default to AAUI on ANS anyway, and if you use it as
+ a module, you can provide the port_aaui=0|1 to force the driver.
+
+config BMAC
+ tristate "BMAC (G3 ethernet) support"
+ depends on PPC_PMAC && PPC32
+ select CRC32
+ ---help---
+ Say Y for support of BMAC Ethernet interfaces. These are used on G3
+ computers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bmac.
+
+config MAC89x0
+ tristate "Macintosh CS89x0 based ethernet cards"
+ depends on MAC
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ Nubus or LC-PDS network (Ethernet) card of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. This module will
+ be called mac89x0.
+
+config MACMACE
+ bool "Macintosh (AV) onboard MACE ethernet"
+ depends on MAC
+ select CRC32
+ ---help---
+ Support for the onboard AMD 79C940 MACE Ethernet controller used in
+ the 660AV and 840AV Macintosh. If you have one of these Macintoshes
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+endif # NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile
new file mode 100644
index 00000000000..0d3a5919c95
--- /dev/null
+++ b/drivers/net/ethernet/apple/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Apple network device drivers.
+#
+
+obj-$(CONFIG_MACE) += mace.o
+obj-$(CONFIG_BMAC) += bmac.o
+obj-$(CONFIG_MAC89x0) += mac89x0.o
+obj-$(CONFIG_MACMACE) += macmace.o
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
new file mode 100644
index 00000000000..d070b229dbf
--- /dev/null
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -0,0 +1,1685 @@
+/*
+ * Network device driver for the BMAC ethernet controller on
+ * Apple Powermacs. Assumes it's under a DBDMA controller.
+ *
+ * Copyright (C) 1998 Randy Gobbel.
+ *
+ * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
+ * dynamic procfs inode.
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/bitrev.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/macio.h>
+#include <asm/irq.h>
+
+#include "bmac.h"
+
+#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
+#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
+
+/*
+ * CRC polynomial - used in working out multicast filter bits.
+ */
+#define ENET_CRCPOLY 0x04c11db7
+
+/* switch to use multicast code lifted from sunhme driver */
+#define SUNHME_MULTICAST
+
+#define N_RX_RING 64
+#define N_TX_RING 32
+#define MAX_TX_ACTIVE 1
+#define ETHERCRC 4
+#define ETHERMINPACKET 64
+#define ETHERMTU 1500
+#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
+#define TX_TIMEOUT HZ /* 1 second */
+
+/* Bits in transmit DMA status */
+#define TX_DMA_ERR 0x80
+
+#define XXDEBUG(args)
+
+struct bmac_data {
+ /* volatile struct bmac *bmac; */
+ struct sk_buff_head *queue;
+ volatile struct dbdma_regs __iomem *tx_dma;
+ int tx_dma_intr;
+ volatile struct dbdma_regs __iomem *rx_dma;
+ int rx_dma_intr;
+ volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
+ volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
+ struct macio_dev *mdev;
+ int is_bmac_plus;
+ struct sk_buff *rx_bufs[N_RX_RING];
+ int rx_fill;
+ int rx_empty;
+ struct sk_buff *tx_bufs[N_TX_RING];
+ int tx_fill;
+ int tx_empty;
+ unsigned char tx_fullup;
+ struct timer_list tx_timeout;
+ int timeout_active;
+ int sleeping;
+ int opened;
+ unsigned short hash_use_count[64];
+ unsigned short hash_table_mask[4];
+ spinlock_t lock;
+};
+
+#if 0 /* Move that to ethtool */
+
+typedef struct bmac_reg_entry {
+ char *name;
+ unsigned short reg_offset;
+} bmac_reg_entry_t;
+
+#define N_REG_ENTRIES 31
+
+static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
+ {"MEMADD", MEMADD},
+ {"MEMDATAHI", MEMDATAHI},
+ {"MEMDATALO", MEMDATALO},
+ {"TXPNTR", TXPNTR},
+ {"RXPNTR", RXPNTR},
+ {"IPG1", IPG1},
+ {"IPG2", IPG2},
+ {"ALIMIT", ALIMIT},
+ {"SLOT", SLOT},
+ {"PALEN", PALEN},
+ {"PAPAT", PAPAT},
+ {"TXSFD", TXSFD},
+ {"JAM", JAM},
+ {"TXCFG", TXCFG},
+ {"TXMAX", TXMAX},
+ {"TXMIN", TXMIN},
+ {"PAREG", PAREG},
+ {"DCNT", DCNT},
+ {"NCCNT", NCCNT},
+ {"NTCNT", NTCNT},
+ {"EXCNT", EXCNT},
+ {"LTCNT", LTCNT},
+ {"TXSM", TXSM},
+ {"RXCFG", RXCFG},
+ {"RXMAX", RXMAX},
+ {"RXMIN", RXMIN},
+ {"FRCNT", FRCNT},
+ {"AECNT", AECNT},
+ {"FECNT", FECNT},
+ {"RXSM", RXSM},
+ {"RXCV", RXCV}
+};
+
+#endif
+
+static unsigned char *bmac_emergency_rxbuf;
+
+/*
+ * Number of bytes of private data per BMAC: allow enough for
+ * the rx and tx dma commands plus a branch dma command each,
+ * and another 16 bytes to allow us to align the dma command
+ * buffers on a 16 byte boundary.
+ */
+#define PRIV_BYTES (sizeof(struct bmac_data) \
+ + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
+ + sizeof(struct sk_buff_head))
+
+static int bmac_open(struct net_device *dev);
+static int bmac_close(struct net_device *dev);
+static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
+static void bmac_set_multicast(struct net_device *dev);
+static void bmac_reset_and_enable(struct net_device *dev);
+static void bmac_start_chip(struct net_device *dev);
+static void bmac_init_chip(struct net_device *dev);
+static void bmac_init_registers(struct net_device *dev);
+static void bmac_enable_and_reset_chip(struct net_device *dev);
+static int bmac_set_address(struct net_device *dev, void *addr);
+static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
+static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
+static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
+static void bmac_set_timeout(struct net_device *dev);
+static void bmac_tx_timeout(unsigned long data);
+static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static void bmac_start(struct net_device *dev);
+
+#define DBDMA_SET(x) ( ((x) | (x) << 16) )
+#define DBDMA_CLEAR(x) ( (x) << 16)
+
+static inline void
+dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
+{
+ __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
+}
+
+static inline unsigned long
+dbdma_ld32(volatile __u32 __iomem *a)
+{
+ __u32 swap;
+ __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
+ return swap;
+}
+
+static void
+dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
+{
+ dbdma_st32(&dmap->control,
+ DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
+ eieio();
+}
+
+static void
+dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
+{
+ dbdma_st32(&dmap->control,
+ DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
+ eieio();
+ while (dbdma_ld32(&dmap->status) & RUN)
+ eieio();
+}
+
+static void
+dbdma_setcmd(volatile struct dbdma_cmd *cp,
+ unsigned short cmd, unsigned count, unsigned long addr,
+ unsigned long cmd_dep)
+{
+ out_le16(&cp->command, cmd);
+ out_le16(&cp->req_count, count);
+ out_le32(&cp->phy_addr, addr);
+ out_le32(&cp->cmd_dep, cmd_dep);
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->res_count, 0);
+}
+
+static inline
+void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
+{
+ out_le16((void __iomem *)dev->base_addr + reg_offset, data);
+}
+
+
+static inline
+unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
+{
+ return in_le16((void __iomem *)dev->base_addr + reg_offset);
+}
+
+static void
+bmac_enable_and_reset_chip(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ if (rd)
+ dbdma_reset(rd);
+ if (td)
+ dbdma_reset(td);
+
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
+}
+
+#define MIFDELAY udelay(10)
+
+static unsigned int
+bmac_mif_readbits(struct net_device *dev, int nb)
+{
+ unsigned int val = 0;
+
+ while (--nb >= 0) {
+ bmwrite(dev, MIFCSR, 0);
+ MIFDELAY;
+ if (bmread(dev, MIFCSR) & 8)
+ val |= 1 << nb;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ }
+ bmwrite(dev, MIFCSR, 0);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ return val;
+}
+
+static void
+bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
+{
+ int b;
+
+ while (--nb >= 0) {
+ b = (val & (1 << nb))? 6: 4;
+ bmwrite(dev, MIFCSR, b);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, b|1);
+ MIFDELAY;
+ }
+}
+
+static unsigned int
+bmac_mif_read(struct net_device *dev, unsigned int addr)
+{
+ unsigned int val;
+
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ bmac_mif_writebits(dev, ~0U, 32);
+ bmac_mif_writebits(dev, 6, 4);
+ bmac_mif_writebits(dev, addr, 10);
+ bmwrite(dev, MIFCSR, 2);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ val = bmac_mif_readbits(dev, 17);
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ return val;
+}
+
+static void
+bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
+{
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ bmac_mif_writebits(dev, ~0U, 32);
+ bmac_mif_writebits(dev, 5, 4);
+ bmac_mif_writebits(dev, addr, 10);
+ bmac_mif_writebits(dev, 2, 2);
+ bmac_mif_writebits(dev, val, 16);
+ bmac_mif_writebits(dev, 3, 2);
+}
+
+static void
+bmac_init_registers(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile unsigned short regValue;
+ unsigned short *pWord16;
+ int i;
+
+ /* XXDEBUG(("bmac: enter init_registers\n")); */
+
+ bmwrite(dev, RXRST, RxResetValue);
+ bmwrite(dev, TXRST, TxResetBit);
+
+ i = 100;
+ do {
+ --i;
+ udelay(10000);
+ regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
+ } while ((regValue & TxResetBit) && i > 0);
+
+ if (!bp->is_bmac_plus) {
+ regValue = bmread(dev, XCVRIF);
+ regValue |= ClkBit | SerialMode | COLActiveLow;
+ bmwrite(dev, XCVRIF, regValue);
+ udelay(10000);
+ }
+
+ bmwrite(dev, RSEED, (unsigned short)0x1968);
+
+ regValue = bmread(dev, XIFC);
+ regValue |= TxOutputEnable;
+ bmwrite(dev, XIFC, regValue);
+
+ bmread(dev, PAREG);
+
+ /* set collision counters to 0 */
+ bmwrite(dev, NCCNT, 0);
+ bmwrite(dev, NTCNT, 0);
+ bmwrite(dev, EXCNT, 0);
+ bmwrite(dev, LTCNT, 0);
+
+ /* set rx counters to 0 */
+ bmwrite(dev, FRCNT, 0);
+ bmwrite(dev, LECNT, 0);
+ bmwrite(dev, AECNT, 0);
+ bmwrite(dev, FECNT, 0);
+ bmwrite(dev, RXCV, 0);
+
+ /* set tx fifo information */
+ bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
+
+ bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
+ bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
+
+ /* set rx fifo information */
+ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
+ bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
+
+ //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
+ bmread(dev, STATUS); /* read it just to clear it */
+
+ /* zero out the chip Hash Filter registers */
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
+ bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
+ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
+ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
+ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
+
+ pWord16 = (unsigned short *)dev->dev_addr;
+ bmwrite(dev, MADD0, *pWord16++);
+ bmwrite(dev, MADD1, *pWord16++);
+ bmwrite(dev, MADD2, *pWord16);
+
+ bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
+
+ bmwrite(dev, INTDISABLE, EnableNormal);
+}
+
+#if 0
+static void
+bmac_disable_interrupts(struct net_device *dev)
+{
+ bmwrite(dev, INTDISABLE, DisableAll);
+}
+
+static void
+bmac_enable_interrupts(struct net_device *dev)
+{
+ bmwrite(dev, INTDISABLE, EnableNormal);
+}
+#endif
+
+
+static void
+bmac_start_chip(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ unsigned short oldConfig;
+
+ /* enable rx dma channel */
+ dbdma_continue(rd);
+
+ oldConfig = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
+
+ /* turn on rx plus any other bits already on (promiscuous possibly) */
+ oldConfig = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
+ udelay(20000);
+}
+
+static void
+bmac_init_phy(struct net_device *dev)
+{
+ unsigned int addr;
+ struct bmac_data *bp = netdev_priv(dev);
+
+ printk(KERN_DEBUG "phy registers:");
+ for (addr = 0; addr < 32; ++addr) {
+ if ((addr & 7) == 0)
+ printk(KERN_DEBUG);
+ printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
+ }
+ printk(KERN_CONT "\n");
+
+ if (bp->is_bmac_plus) {
+ unsigned int capable, ctrl;
+
+ ctrl = bmac_mif_read(dev, 0);
+ capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
+ if (bmac_mif_read(dev, 4) != capable ||
+ (ctrl & 0x1000) == 0) {
+ bmac_mif_write(dev, 4, capable);
+ bmac_mif_write(dev, 0, 0x1200);
+ } else
+ bmac_mif_write(dev, 0, 0x1000);
+ }
+}
+
+static void bmac_init_chip(struct net_device *dev)
+{
+ bmac_init_phy(dev);
+ bmac_init_registers(dev);
+}
+
+#ifdef CONFIG_PM
+static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
+{
+ struct net_device* dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned short config;
+ int i;
+
+ netif_device_detach(dev);
+ /* prolly should wait for dma to finish & turn off the chip */
+ spin_lock_irqsave(&bp->lock, flags);
+ if (bp->timeout_active) {
+ del_timer(&bp->tx_timeout);
+ bp->timeout_active = 0;
+ }
+ disable_irq(dev->irq);
+ disable_irq(bp->tx_dma_intr);
+ disable_irq(bp->rx_dma_intr);
+ bp->sleeping = 1;
+ spin_unlock_irqrestore(&bp->lock, flags);
+ if (bp->opened) {
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
+ /* disable rx and tx dma */
+ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ /* free some skb's */
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ for (i = 0; i<N_TX_RING; i++) {
+ if (bp->tx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ }
+ }
+ }
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ return 0;
+}
+
+static int bmac_resume(struct macio_dev *mdev)
+{
+ struct net_device* dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+
+ /* see if this is enough */
+ if (bp->opened)
+ bmac_reset_and_enable(dev);
+
+ enable_irq(dev->irq);
+ enable_irq(bp->tx_dma_intr);
+ enable_irq(bp->rx_dma_intr);
+ netif_device_attach(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int bmac_set_address(struct net_device *dev, void *addr)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned char *p = addr;
+ unsigned short *pWord16;
+ unsigned long flags;
+ int i;
+
+ XXDEBUG(("bmac: enter set_address\n"));
+ spin_lock_irqsave(&bp->lock, flags);
+
+ for (i = 0; i < 6; ++i) {
+ dev->dev_addr[i] = p[i];
+ }
+ /* load up the hardware address */
+ pWord16 = (unsigned short *)dev->dev_addr;
+ bmwrite(dev, MADD0, *pWord16++);
+ bmwrite(dev, MADD1, *pWord16++);
+ bmwrite(dev, MADD2, *pWord16);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+ XXDEBUG(("bmac: exit set_address\n"));
+ return 0;
+}
+
+static inline void bmac_set_timeout(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ if (bp->timeout_active)
+ del_timer(&bp->tx_timeout);
+ bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
+ bp->tx_timeout.function = bmac_tx_timeout;
+ bp->tx_timeout.data = (unsigned long) dev;
+ add_timer(&bp->tx_timeout);
+ bp->timeout_active = 1;
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
+{
+ void *vaddr;
+ unsigned long baddr;
+ unsigned long len;
+
+ len = skb->len;
+ vaddr = skb->data;
+ baddr = virt_to_bus(vaddr);
+
+ dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
+}
+
+static void
+bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
+{
+ unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
+
+ dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
+ virt_to_bus(addr), 0);
+}
+
+static void
+bmac_init_tx_ring(struct bmac_data *bp)
+{
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
+
+ bp->tx_empty = 0;
+ bp->tx_fill = 0;
+ bp->tx_fullup = 0;
+
+ /* put a branch at the end of the tx command list */
+ dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
+ (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
+
+ /* reset tx dma */
+ dbdma_reset(td);
+ out_le32(&td->wait_sel, 0x00200020);
+ out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
+}
+
+static int
+bmac_init_rx_ring(struct bmac_data *bp)
+{
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ int i;
+ struct sk_buff *skb;
+
+ /* initialize list of sk_buffs for receiving and set up recv dma */
+ memset((char *)bp->rx_cmds, 0,
+ (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
+ for (i = 0; i < N_RX_RING; i++) {
+ if ((skb = bp->rx_bufs[i]) == NULL) {
+ bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ if (skb != NULL)
+ skb_reserve(skb, 2);
+ }
+ bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
+ }
+
+ bp->rx_empty = 0;
+ bp->rx_fill = i;
+
+ /* Put a branch back to the beginning of the receive command list */
+ dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
+ (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
+
+ /* start rx dma */
+ dbdma_reset(rd);
+ out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
+
+ return 1;
+}
+
+
+static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ int i;
+
+ /* see if there's a free slot in the tx ring */
+ /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
+ /* bp->tx_empty, bp->tx_fill)); */
+ i = bp->tx_fill + 1;
+ if (i >= N_TX_RING)
+ i = 0;
+ if (i == bp->tx_empty) {
+ netif_stop_queue(dev);
+ bp->tx_fullup = 1;
+ XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
+ return -1; /* can't take it at the moment */
+ }
+
+ dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
+
+ bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
+
+ bp->tx_bufs[bp->tx_fill] = skb;
+ bp->tx_fill = i;
+
+ dev->stats.tx_bytes += skb->len;
+
+ dbdma_continue(td);
+
+ return 0;
+}
+
+static int rxintcount;
+
+static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ int i, nb, stat;
+ struct sk_buff *skb;
+ unsigned int residual;
+ int last;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (++rxintcount < 10) {
+ XXDEBUG(("bmac_rxdma_intr\n"));
+ }
+
+ last = -1;
+ i = bp->rx_empty;
+
+ while (1) {
+ cp = &bp->rx_cmds[i];
+ stat = ld_le16(&cp->xfer_status);
+ residual = ld_le16(&cp->res_count);
+ if ((stat & ACTIVE) == 0)
+ break;
+ nb = RX_BUFLEN - residual - 2;
+ if (nb < (ETHERMINPACKET - ETHERCRC)) {
+ skb = NULL;
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ } else {
+ skb = bp->rx_bufs[i];
+ bp->rx_bufs[i] = NULL;
+ }
+ if (skb != NULL) {
+ nb -= ETHERCRC;
+ skb_put(skb, nb);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += nb;
+ } else {
+ ++dev->stats.rx_dropped;
+ }
+ if ((skb = bp->rx_bufs[i]) == NULL) {
+ bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ if (skb != NULL)
+ skb_reserve(bp->rx_bufs[i], 2);
+ }
+ bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
+ st_le16(&cp->res_count, 0);
+ st_le16(&cp->xfer_status, 0);
+ last = i;
+ if (++i >= N_RX_RING) i = 0;
+ }
+
+ if (last != -1) {
+ bp->rx_fill = last;
+ bp->rx_empty = i;
+ }
+
+ dbdma_continue(rd);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (rxintcount < 10) {
+ XXDEBUG(("bmac_rxdma_intr done\n"));
+ }
+ return IRQ_HANDLED;
+}
+
+static int txintcount;
+
+static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_cmd *cp;
+ int stat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (txintcount++ < 10) {
+ XXDEBUG(("bmac_txdma_intr\n"));
+ }
+
+ /* del_timer(&bp->tx_timeout); */
+ /* bp->timeout_active = 0; */
+
+ while (1) {
+ cp = &bp->tx_cmds[bp->tx_empty];
+ stat = ld_le16(&cp->xfer_status);
+ if (txintcount < 10) {
+ XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
+ }
+ if (!(stat & ACTIVE)) {
+ /*
+ * status field might not have been filled by DBDMA
+ */
+ if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
+ break;
+ }
+
+ if (bp->tx_bufs[bp->tx_empty]) {
+ ++dev->stats.tx_packets;
+ dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
+ }
+ bp->tx_bufs[bp->tx_empty] = NULL;
+ bp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (++bp->tx_empty >= N_TX_RING)
+ bp->tx_empty = 0;
+ if (bp->tx_empty == bp->tx_fill)
+ break;
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (txintcount < 10) {
+ XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
+ }
+
+ bmac_start(dev);
+ return IRQ_HANDLED;
+}
+
+#ifndef SUNHME_MULTICAST
+/* Real fast bit-reversal algorithm, 6-bit values */
+static int reverse6[64] = {
+ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
+ 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
+ 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
+ 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
+ 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
+ 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
+ 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
+ 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
+};
+
+static unsigned int
+crc416(unsigned int curval, unsigned short nxtval)
+{
+ register unsigned int counter, cur = curval, next = nxtval;
+ register int high_crc_set, low_data_set;
+
+ /* Swap bytes */
+ next = ((next & 0x00FF) << 8) | (next >> 8);
+
+ /* Compute bit-by-bit */
+ for (counter = 0; counter < 16; ++counter) {
+ /* is high CRC bit set? */
+ if ((cur & 0x80000000) == 0) high_crc_set = 0;
+ else high_crc_set = 1;
+
+ cur = cur << 1;
+
+ if ((next & 0x0001) == 0) low_data_set = 0;
+ else low_data_set = 1;
+
+ next = next >> 1;
+
+ /* do the XOR */
+ if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+ }
+ return cur;
+}
+
+static unsigned int
+bmac_crc(unsigned short *address)
+{
+ unsigned int newcrc;
+
+ XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
+ newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
+ newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
+ newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
+
+ return(newcrc);
+}
+
+/*
+ * Add requested mcast addr to BMac's hash table filter.
+ *
+ */
+
+static void
+bmac_addhash(struct bmac_data *bp, unsigned char *addr)
+{
+ unsigned int crc;
+ unsigned short mask;
+
+ if (!(*addr)) return;
+ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
+ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ if (bp->hash_use_count[crc]++) return; /* This bit is already set */
+ mask = crc % 16;
+ mask = (unsigned char)1 << mask;
+ bp->hash_use_count[crc/16] |= mask;
+}
+
+static void
+bmac_removehash(struct bmac_data *bp, unsigned char *addr)
+{
+ unsigned int crc;
+ unsigned char mask;
+
+ /* Now, delete the address from the filter copy, as indicated */
+ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
+ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
+ if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
+ mask = crc % 16;
+ mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
+ bp->hash_table_mask[crc/16] &= mask;
+}
+
+/*
+ * Sync the adapter with the software copy of the multicast mask
+ * (logical address filter).
+ */
+
+static void
+bmac_rx_off(struct net_device *dev)
+{
+ unsigned short rx_cfg;
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg &= ~RxMACEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ do {
+ rx_cfg = bmread(dev, RXCFG);
+ } while (rx_cfg & RxMACEnable);
+}
+
+unsigned short
+bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
+{
+ unsigned short rx_cfg;
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxMACEnable;
+ if (hash_enable) rx_cfg |= RxHashFilterEnable;
+ else rx_cfg &= ~RxHashFilterEnable;
+ if (promisc_enable) rx_cfg |= RxPromiscEnable;
+ else rx_cfg &= ~RxPromiscEnable;
+ bmwrite(dev, RXRST, RxResetValue);
+ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
+ bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
+ bmwrite(dev, RXCFG, rx_cfg );
+ return rx_cfg;
+}
+
+static void
+bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
+{
+ bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
+ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
+ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
+ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
+}
+
+#if 0
+static void
+bmac_add_multi(struct net_device *dev,
+ struct bmac_data *bp, unsigned char *addr)
+{
+ /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
+ bmac_addhash(bp, addr);
+ bmac_rx_off(dev);
+ bmac_update_hash_table_mask(dev, bp);
+ bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
+ /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
+}
+
+static void
+bmac_remove_multi(struct net_device *dev,
+ struct bmac_data *bp, unsigned char *addr)
+{
+ bmac_removehash(bp, addr);
+ bmac_rx_off(dev);
+ bmac_update_hash_table_mask(dev, bp);
+ bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
+}
+#endif
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void bmac_set_multicast(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct bmac_data *bp = netdev_priv(dev);
+ int num_addrs = netdev_mc_count(dev);
+ unsigned short rx_cfg;
+ int i;
+
+ if (bp->sleeping)
+ return;
+
+ XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
+
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
+ bmac_update_hash_table_mask(dev, bp);
+ rx_cfg = bmac_rx_on(dev, 1, 0);
+ XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
+ } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ rx_cfg = bmac_rx_on(dev, 0, 1);
+ XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
+ } else {
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
+ for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
+ if (num_addrs == 0) {
+ rx_cfg = bmac_rx_on(dev, 0, 0);
+ XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
+ } else {
+ netdev_for_each_mc_addr(ha, dev)
+ bmac_addhash(bp, ha->addr);
+ bmac_update_hash_table_mask(dev, bp);
+ rx_cfg = bmac_rx_on(dev, 1, 0);
+ XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
+ }
+ }
+ /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
+}
+#else /* ifdef SUNHME_MULTICAST */
+
+/* The version of set_multicast below was lifted from sunhme.c */
+
+static void bmac_set_multicast(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ int i;
+ unsigned short rx_cfg;
+ u32 crc;
+
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ bmwrite(dev, BHASH0, 0xffff);
+ bmwrite(dev, BHASH1, 0xffff);
+ bmwrite(dev, BHASH2, 0xffff);
+ bmwrite(dev, BHASH3, 0xffff);
+ } else if(dev->flags & IFF_PROMISC) {
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ } else {
+ u16 hash_table[4];
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg &= ~RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+
+ for(i = 0; i < 4; i++) hash_table[i] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ bmwrite(dev, BHASH0, hash_table[0]);
+ bmwrite(dev, BHASH1, hash_table[1]);
+ bmwrite(dev, BHASH2, hash_table[2]);
+ bmwrite(dev, BHASH3, hash_table[3]);
+ }
+}
+#endif /* SUNHME_MULTICAST */
+
+static int miscintcount;
+
+static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ unsigned int status = bmread(dev, STATUS);
+ if (miscintcount++ < 10) {
+ XXDEBUG(("bmac_misc_intr\n"));
+ }
+ /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
+ /* bmac_txdma_intr_inner(irq, dev_id); */
+ /* if (status & FrameReceived) dev->stats.rx_dropped++; */
+ if (status & RxErrorMask) dev->stats.rx_errors++;
+ if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
+ if (status & RxLenCntExp) dev->stats.rx_length_errors++;
+ if (status & RxOverFlow) dev->stats.rx_over_errors++;
+ if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
+
+ /* if (status & FrameSent) dev->stats.tx_dropped++; */
+ if (status & TxErrorMask) dev->stats.tx_errors++;
+ if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
+ if (status & TxNormalCollExp) dev->stats.collisions++;
+ return IRQ_HANDLED;
+}
+
+/*
+ * Procedure for reading EEPROM
+ */
+#define SROMAddressLength 5
+#define DataInOn 0x0008
+#define DataInOff 0x0000
+#define Clk 0x0002
+#define ChipSelect 0x0001
+#define SDIShiftCount 3
+#define SD0ShiftCount 2
+#define DelayValue 1000 /* number of microseconds */
+#define SROMStartOffset 10 /* this is in words */
+#define SROMReadCount 3 /* number of words to read from SROM */
+#define SROMAddressBits 6
+#define EnetAddressOffset 20
+
+static unsigned char
+bmac_clock_out_bit(struct net_device *dev)
+{
+ unsigned short data;
+ unsigned short val;
+
+ bmwrite(dev, SROMCSR, ChipSelect | Clk);
+ udelay(DelayValue);
+
+ data = bmread(dev, SROMCSR);
+ udelay(DelayValue);
+ val = (data >> SD0ShiftCount) & 1;
+
+ bmwrite(dev, SROMCSR, ChipSelect);
+ udelay(DelayValue);
+
+ return val;
+}
+
+static void
+bmac_clock_in_bit(struct net_device *dev, unsigned int val)
+{
+ unsigned short data;
+
+ if (val != 0 && val != 1) return;
+
+ data = (val << SDIShiftCount);
+ bmwrite(dev, SROMCSR, data | ChipSelect );
+ udelay(DelayValue);
+
+ bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
+ udelay(DelayValue);
+
+ bmwrite(dev, SROMCSR, data | ChipSelect);
+ udelay(DelayValue);
+}
+
+static void
+reset_and_select_srom(struct net_device *dev)
+{
+ /* first reset */
+ bmwrite(dev, SROMCSR, 0);
+ udelay(DelayValue);
+
+ /* send it the read command (110) */
+ bmac_clock_in_bit(dev, 1);
+ bmac_clock_in_bit(dev, 1);
+ bmac_clock_in_bit(dev, 0);
+}
+
+static unsigned short
+read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
+{
+ unsigned short data, val;
+ int i;
+
+ /* send out the address we want to read from */
+ for (i = 0; i < addr_len; i++) {
+ val = addr >> (addr_len-i-1);
+ bmac_clock_in_bit(dev, val & 1);
+ }
+
+ /* Now read in the 16-bit data */
+ data = 0;
+ for (i = 0; i < 16; i++) {
+ val = bmac_clock_out_bit(dev);
+ data <<= 1;
+ data |= val;
+ }
+ bmwrite(dev, SROMCSR, 0);
+
+ return data;
+}
+
+/*
+ * It looks like Cogent and SMC use different methods for calculating
+ * checksums. What a pain..
+ */
+
+static int
+bmac_verify_checksum(struct net_device *dev)
+{
+ unsigned short data, storedCS;
+
+ reset_and_select_srom(dev);
+ data = read_srom(dev, 3, SROMAddressBits);
+ storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
+
+ return 0;
+}
+
+
+static void
+bmac_get_station_address(struct net_device *dev, unsigned char *ea)
+{
+ int i;
+ unsigned short data;
+
+ for (i = 0; i < 6; i++)
+ {
+ reset_and_select_srom(dev);
+ data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
+ ea[2*i] = bitrev8(data & 0x0ff);
+ ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
+ }
+}
+
+static void bmac_reset_and_enable(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ bmac_enable_and_reset_chip(dev);
+ bmac_init_tx_ring(bp);
+ bmac_init_rx_ring(bp);
+ bmac_init_chip(dev);
+ bmac_start_chip(dev);
+ bmwrite(dev, INTDISABLE, EnableNormal);
+ bp->sleeping = 0;
+
+ /*
+ * It seems that the bmac can't receive until it's transmitted
+ * a packet. So we give it a dummy packet to transmit.
+ */
+ skb = dev_alloc_skb(ETHERMINPACKET);
+ if (skb != NULL) {
+ data = skb_put(skb, ETHERMINPACKET);
+ memset(data, 0, ETHERMINPACKET);
+ memcpy(data, dev->dev_addr, 6);
+ memcpy(data+6, dev->dev_addr, 6);
+ bmac_transmit_packet(skb, dev);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static const struct ethtool_ops bmac_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops bmac_netdev_ops = {
+ .ndo_open = bmac_open,
+ .ndo_stop = bmac_close,
+ .ndo_start_xmit = bmac_output,
+ .ndo_set_rx_mode = bmac_set_multicast,
+ .ndo_set_mac_address = bmac_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
+{
+ int j, rev, ret;
+ struct bmac_data *bp;
+ const unsigned char *prop_addr;
+ unsigned char addr[6];
+ struct net_device *dev;
+ int is_bmac_plus = ((int)match->data) != 0;
+
+ if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
+ printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
+ return -ENODEV;
+ }
+ prop_addr = of_get_property(macio_get_of_node(mdev),
+ "mac-address", NULL);
+ if (prop_addr == NULL) {
+ prop_addr = of_get_property(macio_get_of_node(mdev),
+ "local-mac-address", NULL);
+ if (prop_addr == NULL) {
+ printk(KERN_ERR "BMAC: Can't get mac-address\n");
+ return -ENODEV;
+ }
+ }
+ memcpy(addr, prop_addr, sizeof(addr));
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev) {
+ printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
+ return -ENOMEM;
+ }
+
+ bp = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
+ macio_set_drvdata(mdev, dev);
+
+ bp->mdev = mdev;
+ spin_lock_init(&bp->lock);
+
+ if (macio_request_resources(mdev, "bmac")) {
+ printk(KERN_ERR "BMAC: can't request IO resource !\n");
+ goto out_free;
+ }
+
+ dev->base_addr = (unsigned long)
+ ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
+ if (dev->base_addr == 0)
+ goto out_release;
+
+ dev->irq = macio_irq(mdev, 0);
+
+ bmac_enable_and_reset_chip(dev);
+ bmwrite(dev, INTDISABLE, DisableAll);
+
+ rev = addr[0] == 0 && addr[1] == 0xA0;
+ for (j = 0; j < 6; ++j)
+ dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ /* Enable chip without interrupts for now */
+ bmac_enable_and_reset_chip(dev);
+ bmwrite(dev, INTDISABLE, DisableAll);
+
+ dev->netdev_ops = &bmac_netdev_ops;
+ dev->ethtool_ops = &bmac_ethtool_ops;
+
+ bmac_get_station_address(dev, addr);
+ if (bmac_verify_checksum(dev) != 0)
+ goto err_out_iounmap;
+
+ bp->is_bmac_plus = is_bmac_plus;
+ bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
+ if (!bp->tx_dma)
+ goto err_out_iounmap;
+ bp->tx_dma_intr = macio_irq(mdev, 1);
+ bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
+ if (!bp->rx_dma)
+ goto err_out_iounmap_tx;
+ bp->rx_dma_intr = macio_irq(mdev, 2);
+
+ bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
+ bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
+
+ bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
+ skb_queue_head_init(bp->queue);
+
+ init_timer(&bp->tx_timeout);
+
+ ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
+ goto err_out_iounmap_rx;
+ }
+ ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
+ goto err_out_irq0;
+ }
+ ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
+ goto err_out_irq1;
+ }
+
+ /* Mask chip interrupts and disable chip, will be
+ * re-enabled on open()
+ */
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "BMAC: Ethernet registration failed\n");
+ goto err_out_irq2;
+ }
+
+ printk(KERN_INFO "%s: BMAC%s at %pM",
+ dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
+ XXDEBUG((", base_addr=%#0lx", dev->base_addr));
+ printk("\n");
+
+ return 0;
+
+err_out_irq2:
+ free_irq(bp->rx_dma_intr, dev);
+err_out_irq1:
+ free_irq(bp->tx_dma_intr, dev);
+err_out_irq0:
+ free_irq(dev->irq, dev);
+err_out_iounmap_rx:
+ iounmap(bp->rx_dma);
+err_out_iounmap_tx:
+ iounmap(bp->tx_dma);
+err_out_iounmap:
+ iounmap((void __iomem *)dev->base_addr);
+out_release:
+ macio_release_resources(mdev);
+out_free:
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ free_netdev(dev);
+
+ return -ENODEV;
+}
+
+static int bmac_open(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ /* XXDEBUG(("bmac: enter open\n")); */
+ /* reset the chip */
+ bp->opened = 1;
+ bmac_reset_and_enable(dev);
+ enable_irq(dev->irq);
+ return 0;
+}
+
+static int bmac_close(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ unsigned short config;
+ int i;
+
+ bp->sleeping = 1;
+
+ /* disable rx and tx */
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+
+ bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
+
+ /* disable rx and tx dma */
+ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+
+ /* free some skb's */
+ XXDEBUG(("bmac: free rx bufs\n"));
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ XXDEBUG(("bmac: free tx bufs\n"));
+ for (i = 0; i<N_TX_RING; i++) {
+ if (bp->tx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ }
+ }
+ XXDEBUG(("bmac: all bufs freed\n"));
+
+ bp->opened = 0;
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+
+ return 0;
+}
+
+static void
+bmac_start(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ int i;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (bp->sleeping)
+ return;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ while (1) {
+ i = bp->tx_fill + 1;
+ if (i >= N_TX_RING)
+ i = 0;
+ if (i == bp->tx_empty)
+ break;
+ skb = skb_dequeue(bp->queue);
+ if (skb == NULL)
+ break;
+ bmac_transmit_packet(skb, dev);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static int
+bmac_output(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ skb_queue_tail(bp->queue, skb);
+ bmac_start(dev);
+ return NETDEV_TX_OK;
+}
+
+static void bmac_tx_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ unsigned long flags;
+ unsigned short config, oldConfig;
+ int i;
+
+ XXDEBUG(("bmac: tx_timeout called\n"));
+ spin_lock_irqsave(&bp->lock, flags);
+ bp->timeout_active = 0;
+
+ /* update various counters */
+/* bmac_handle_misc_intrs(bp, 0); */
+
+ cp = &bp->tx_cmds[bp->tx_empty];
+/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
+/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
+/* mb->pr, mb->xmtfs, mb->fifofc)); */
+
+ /* turn off both tx and rx and reset the chip */
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
+ printk(KERN_ERR "bmac: transmit timeout - resetting\n");
+ bmac_enable_and_reset_chip(dev);
+
+ /* restart rx dma */
+ cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
+ out_le16(&cp->xfer_status, 0);
+ out_le32(&rd->cmdptr, virt_to_bus(cp));
+ out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
+
+ /* fix up the transmit side */
+ XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
+ bp->tx_empty, bp->tx_fill, bp->tx_fullup));
+ i = bp->tx_empty;
+ ++dev->stats.tx_errors;
+ if (i != bp->tx_fill) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ if (++i >= N_TX_RING) i = 0;
+ bp->tx_empty = i;
+ }
+ bp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (i != bp->tx_fill) {
+ cp = &bp->tx_cmds[i];
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->cmdptr, virt_to_bus(cp));
+ out_le32(&td->control, DBDMA_SET(RUN));
+ /* bmac_set_timeout(dev); */
+ XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
+ }
+
+ /* turn it back on */
+ oldConfig = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
+ oldConfig = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+#if 0
+static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
+{
+ int i,*ip;
+
+ for (i=0;i< count;i++) {
+ ip = (int*)(cp+i);
+
+ printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
+ ld_le32(ip+0),
+ ld_le32(ip+1),
+ ld_le32(ip+2),
+ ld_le32(ip+3));
+ }
+
+}
+#endif
+
+#if 0
+static int
+bmac_proc_info(char *buffer, char **start, off_t offset, int length)
+{
+ int len = 0;
+ off_t pos = 0;
+ off_t begin = 0;
+ int i;
+
+ if (bmac_devs == NULL)
+ return -ENOSYS;
+
+ len += sprintf(buffer, "BMAC counters & registers\n");
+
+ for (i = 0; i<N_REG_ENTRIES; i++) {
+ len += sprintf(buffer + len, "%s: %#08x\n",
+ reg_entries[i].name,
+ bmread(bmac_devs, reg_entries[i].reg_offset));
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ if (pos > offset+length) break;
+ }
+
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+
+ if (len > length) len = length;
+
+ return len;
+}
+#endif
+
+static int __devexit bmac_remove(struct macio_dev *mdev)
+{
+ struct net_device *dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ free_irq(dev->irq, dev);
+ free_irq(bp->tx_dma_intr, dev);
+ free_irq(bp->rx_dma_intr, dev);
+
+ iounmap((void __iomem *)dev->base_addr);
+ iounmap(bp->tx_dma);
+ iounmap(bp->rx_dma);
+
+ macio_release_resources(mdev);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct of_device_id bmac_match[] =
+{
+ {
+ .name = "bmac",
+ .data = (void *)0,
+ },
+ {
+ .type = "network",
+ .compatible = "bmac+",
+ .data = (void *)1,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE (of, bmac_match);
+
+static struct macio_driver bmac_driver =
+{
+ .driver = {
+ .name = "bmac",
+ .owner = THIS_MODULE,
+ .of_match_table = bmac_match,
+ },
+ .probe = bmac_probe,
+ .remove = bmac_remove,
+#ifdef CONFIG_PM
+ .suspend = bmac_suspend,
+ .resume = bmac_resume,
+#endif
+};
+
+
+static int __init bmac_init(void)
+{
+ if (bmac_emergency_rxbuf == NULL) {
+ bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
+ if (bmac_emergency_rxbuf == NULL) {
+ printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return macio_register_driver(&bmac_driver);
+}
+
+static void __exit bmac_exit(void)
+{
+ macio_unregister_driver(&bmac_driver);
+
+ kfree(bmac_emergency_rxbuf);
+ bmac_emergency_rxbuf = NULL;
+}
+
+MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
+MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
+MODULE_LICENSE("GPL");
+
+module_init(bmac_init);
+module_exit(bmac_exit);
diff --git a/drivers/net/bmac.h b/drivers/net/ethernet/apple/bmac.h
index a1d19d867ba..a1d19d867ba 100644
--- a/drivers/net/bmac.h
+++ b/drivers/net/ethernet/apple/bmac.h
diff --git a/drivers/net/mac89x0.c b/drivers/net/ethernet/apple/mac89x0.c
index 669b317974a..83781f316d1 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/ethernet/apple/mac89x0.c
@@ -170,7 +170,7 @@ static const struct net_device_ops mac89x0_netdev_ops = {
.ndo_stop = net_close,
.ndo_start_xmit = net_send_packet,
.ndo_get_stats = net_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/mace.c b/drivers/net/ethernet/apple/mace.c
index 2074e9724ba..bec87bd9195 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -100,7 +100,7 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_open = mace_open,
.ndo_stop = mace_close,
.ndo_start_xmit = mace_xmit_start,
- .ndo_set_multicast_list = mace_set_multicast,
+ .ndo_set_rx_mode = mace_set_multicast,
.ndo_set_mac_address = mace_set_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/mace.h b/drivers/net/ethernet/apple/mace.h
index 30b7ec0cedb..30b7ec0cedb 100644
--- a/drivers/net/mace.h
+++ b/drivers/net/ethernet/apple/mace.h
diff --git a/drivers/net/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 4286e67f963..7cf81bbffe0 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -31,9 +31,8 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
+#include <linux/interrupt.h>
#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_psc.h>
#include <asm/page.h>
@@ -185,7 +184,7 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_stop = mace_close,
.ndo_start_xmit = mace_xmit_start,
.ndo_tx_timeout = mace_tx_timeout,
- .ndo_set_multicast_list = mace_set_multicast,
+ .ndo_set_rx_mode = mace_set_multicast,
.ndo_set_mac_address = mace_set_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -203,14 +202,8 @@ static int __devinit mace_probe(struct platform_device *pdev)
unsigned char *addr;
struct net_device *dev;
unsigned char checksum = 0;
- static int found = 0;
int err;
- if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
- return -ENODEV;
-
- found = 1; /* prevent 'finding' one on every device probe */
-
dev = alloc_etherdev(PRIV_BYTES);
if (!dev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
new file mode 100644
index 00000000000..1ed886d421f
--- /dev/null
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -0,0 +1,70 @@
+#
+# Atheros device configuration
+#
+
+config NET_VENDOR_ATHEROS
+ bool "Atheros devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Atheros devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ATHEROS
+
+config ATL2
+ tristate "Atheros L2 Fast Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros L2 fast ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl2.
+
+config ATL1
+ tristate "Atheros/Attansic L1 Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros/Attansic L1 gigabit ethernet
+ adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1.
+
+config ATL1E
+ tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros L1E gigabit ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1e.
+
+config ATL1C
+ tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros L1C gigabit ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1c.
+
+endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
new file mode 100644
index 00000000000..e7e76fb576f
--- /dev/null
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Atheros network device drivers.
+#
+
+obj-$(CONFIG_ATL1) += atlx/
+obj-$(CONFIG_ATL2) += atlx/
+obj-$(CONFIG_ATL1E) += atl1e/
+obj-$(CONFIG_ATL1C) += atl1c/
diff --git a/drivers/net/atl1c/Makefile b/drivers/net/ethernet/atheros/atl1c/Makefile
index c37d966952e..c37d966952e 100644
--- a/drivers/net/atl1c/Makefile
+++ b/drivers/net/ethernet/atheros/atl1c/Makefile
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index ca70e16b6e2..ca70e16b6e2 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 7be884d0aaf..7be884d0aaf 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 23f2ab0f2fa..23f2ab0f2fa 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 655fc6c4a8a..655fc6c4a8a 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 97224421840..02c7ed8d9ec 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2179,12 +2179,11 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
- buffer_info->length = frag->size;
- buffer_info->dma =
- pci_map_page(adapter->pdev, frag->page,
- frag->page_offset,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->length = skb_frag_size(frag);
+ buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag, 0,
+ buffer_info->length,
+ DMA_TO_DEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
@@ -2600,7 +2599,7 @@ static const struct net_device_ops atl1c_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = atl1c_xmit_frame,
.ndo_set_mac_address = atl1c_set_mac_addr,
- .ndo_set_multicast_list = atl1c_set_multi,
+ .ndo_set_rx_mode = atl1c_set_multi,
.ndo_change_mtu = atl1c_change_mtu,
.ndo_fix_features = atl1c_fix_features,
.ndo_set_features = atl1c_set_features,
diff --git a/drivers/net/atl1e/Makefile b/drivers/net/ethernet/atheros/atl1e/Makefile
index bc11be824e7..bc11be824e7 100644
--- a/drivers/net/atl1e/Makefile
+++ b/drivers/net/ethernet/atheros/atl1e/Makefile
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 829b5ad71d0..829b5ad71d0 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6269438d365..6269438d365 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
index 923063d2e5b..923063d2e5b 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
index 74df16aef79..74df16aef79 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index d8d411998fa..95483bcac1d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1593,7 +1593,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
u16 proto_hdr_len = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- fg_size = skb_shinfo(skb)->frags[i].size;
+ fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
}
@@ -1744,12 +1744,12 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u16 i;
u16 seg_num;
frag = &skb_shinfo(skb)->frags[f];
- buf_len = frag->size;
+ buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
for (i = 0; i < seg_num; i++) {
@@ -1765,12 +1765,11 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
MAX_TX_BUF_LEN : buf_len;
buf_len -= tx_buffer->length;
- tx_buffer->dma =
- pci_map_page(adapter->pdev, frag->page,
- frag->page_offset +
- (i * MAX_TX_BUF_LEN),
- tx_buffer->length,
- PCI_DMA_TODEVICE);
+ tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag,
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ DMA_TO_DEVICE);
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
@@ -2212,7 +2211,7 @@ static const struct net_device_ops atl1e_netdev_ops = {
.ndo_stop = atl1e_close,
.ndo_start_xmit = atl1e_xmit_frame,
.ndo_get_stats = atl1e_get_stats,
- .ndo_set_multicast_list = atl1e_set_multi,
+ .ndo_set_rx_mode = atl1e_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl1e_set_mac_addr,
.ndo_fix_features = atl1e_fix_features,
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6e7ef..0ce60b6e7ef 100644
--- a/drivers/net/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
diff --git a/drivers/net/atlx/Makefile b/drivers/net/ethernet/atheros/atlx/Makefile
index e4f6022ca55..e4f6022ca55 100644
--- a/drivers/net/atlx/Makefile
+++ b/drivers/net/ethernet/atheros/atlx/Makefile
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 97e6954304e..33a4e35f5ee 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -858,7 +858,7 @@ static s32 atl1_init_hw(struct atl1_hw *hw)
atl1_init_flash_opcode(hw);
if (!hw->phy_configured) {
- /* enable GPHY LinkChange Interrrupt */
+ /* enable GPHY LinkChange Interrupt */
ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
if (ret_val)
return ret_val;
@@ -2267,11 +2267,11 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u16 i, nseg;
frag = &skb_shinfo(skb)->frags[f];
- buf_len = frag->size;
+ buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
@@ -2283,10 +2283,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
ATL1_MAX_TX_BUF_LEN : buf_len;
buf_len -= buffer_info->length;
- buffer_info->dma = pci_map_page(adapter->pdev,
- frag->page,
- frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
- buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag, i * ATL1_MAX_TX_BUF_LEN,
+ buffer_info->length, DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2357,7 +2356,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
int count = 1;
int ret_val;
struct tx_packet_desc *ptpd;
- u16 frag_size;
u16 vlan_tag;
unsigned int nr_frags = 0;
unsigned int mss = 0;
@@ -2373,10 +2371,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
- frag_size = skb_shinfo(skb)->frags[f].size;
- if (frag_size)
- count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
- ATL1_MAX_TX_BUF_LEN;
+ unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
+ count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
}
mss = skb_shinfo(skb)->gso_size;
@@ -2869,7 +2866,7 @@ static const struct net_device_ops atl1_netdev_ops = {
.ndo_open = atl1_open,
.ndo_stop = atl1_close,
.ndo_start_xmit = atl1_xmit_frame,
- .ndo_set_multicast_list = atlx_set_multi,
+ .ndo_set_rx_mode = atlx_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl1_set_mac,
.ndo_change_mtu = atl1_change_mtu,
@@ -3474,12 +3471,8 @@ static void atl1_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = ATL1_MAX_RFD;
ring->tx_max_pending = ATL1_MAX_TPD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
static int atl1_set_ringparam(struct net_device *netdev,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da8be9..109d6da8be9 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index d4f7dda3972..1feae5928a4 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1325,7 +1325,7 @@ static const struct net_device_ops atl2_netdev_ops = {
.ndo_open = atl2_open,
.ndo_stop = atl2_close,
.ndo_start_xmit = atl2_xmit_frame,
- .ndo_set_multicast_list = atl2_set_multi,
+ .ndo_set_rx_mode = atl2_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl2_set_mac,
.ndo_change_mtu = atl2_change_mtu,
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index bf9016ebdd9..bf9016ebdd9 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index aabcf4b5745..aabcf4b5745 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/ethernet/atheros/atlx/atlx.h
index 14054b75aa6..14054b75aa6 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/ethernet/atheros/atlx/atlx.h
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
new file mode 100644
index 00000000000..f15e72e81ac
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -0,0 +1,122 @@
+#
+# Broadcom device configuration
+#
+
+config NET_VENDOR_BROADCOM
+ bool "Broadcom devices"
+ default y
+ depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
+ SIBYTE_SB1xxx_SOC
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just case the configurator to skip all
+ the questions regarding AMD chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_BROADCOM
+
+config B44
+ tristate "Broadcom 440x/47xx ethernet support"
+ depends on SSB_POSSIBLE && HAS_DMA
+ select SSB
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a network (Ethernet) controller of this type, say Y
+ or M and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called b44.
+
+# Auto-select SSB PCI-HOST support, if possible
+config B44_PCI_AUTOSELECT
+ bool
+ depends on B44 && SSB_PCIHOST_POSSIBLE
+ select SSB_PCIHOST
+ default y
+
+# Auto-select SSB PCICORE driver, if possible
+config B44_PCICORE_AUTOSELECT
+ bool
+ depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
+ select SSB_DRIVER_PCICORE
+ default y
+
+config B44_PCI
+ bool
+ depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
+ default y
+
+config BCM63XX_ENET
+ tristate "Broadcom 63xx internal mac support"
+ depends on BCM63XX
+ select NET_CORE
+ select MII
+ select PHYLIB
+ help
+ This driver supports the ethernet MACs in the Broadcom 63xx
+ MIPS chipset family (BCM63XX).
+
+config BNX2
+ tristate "Broadcom NetXtremeII support"
+ depends on PCI
+ select CRC32
+ select FW_LOADER
+ ---help---
+ This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bnx2. This is recommended.
+
+config CNIC
+ tristate "Broadcom CNIC support"
+ depends on PCI
+ select BNX2
+ select UIO
+ ---help---
+ This driver supports offload features of Broadcom NetXtremeII
+ gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cnic. This is recommended.
+
+config SB1250_MAC
+ tristate "SB1250 Gigabit Ethernet support"
+ depends on SIBYTE_SB1xxx_SOC
+ select PHYLIB
+ ---help---
+ This driver supports Gigabit Ethernet interfaces based on the
+ Broadcom SiByte family of System-On-a-Chip parts. They include
+ the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
+ and BCM1480 chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sb1250-mac.
+
+config TIGON3
+ tristate "Broadcom Tigon3 support"
+ depends on PCI
+ select PHYLIB
+ ---help---
+ This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tg3. This is recommended.
+
+config BNX2X
+ tristate "Broadcom NetXtremeII 10Gb support"
+ depends on PCI
+ select FW_LOADER
+ select ZLIB_INFLATE
+ select LIBCRC32C
+ select MDIO
+ ---help---
+ This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bnx2x. This is recommended.
+
+endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
new file mode 100644
index 00000000000..b7896051d54
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Broadcom network device drivers.
+#
+
+obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
+obj-$(CONFIG_BNX2X) += bnx2x/
+obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
+obj-$(CONFIG_TIGON3) += tg3.o
diff --git a/drivers/net/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 41ea84e3f69..4cf835dbc12 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2114,7 +2114,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_stop = b44_close,
.ndo_start_xmit = b44_start_xmit,
.ndo_get_stats = b44_get_stats,
- .ndo_set_multicast_list = b44_set_rx_mode,
+ .ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = b44_ioctl,
diff --git a/drivers/net/b44.h b/drivers/net/ethernet/broadcom/b44.h
index e1905a49279..e1905a49279 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 1d9b9858067..a11a8ad9422 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1398,8 +1398,6 @@ static void bcm_enet_get_ringparam(struct net_device *dev,
/* rx/tx ring is actually only limited by memory */
ering->rx_max_pending = 8192;
ering->tx_max_pending = 8192;
- ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = 0;
ering->rx_pending = priv->rx_ring_size;
ering->tx_pending = priv->tx_ring_size;
}
@@ -1603,7 +1601,7 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_stop = bcm_enet_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_set_mac_address = bcm_enet_set_mac_address,
- .ndo_set_multicast_list = bcm_enet_set_multicast_list,
+ .ndo_set_rx_mode = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 0e3048b788c..0e3048b788c 100644
--- a/drivers/net/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
diff --git a/drivers/net/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 4b2b57018a0..965c7235804 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -36,6 +36,7 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/tcp.h>
@@ -2870,7 +2871,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -2929,8 +2930,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
shinfo = skb_shinfo(skb);
shinfo->nr_frags--;
- page = shinfo->frags[shinfo->nr_frags].page;
- shinfo->frags[shinfo->nr_frags].page = NULL;
+ page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
+ __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
cons_rx_pg->page = page;
dev_kfree_skb(skb);
@@ -3048,9 +3049,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
} else {
skb_frag_t *frag =
&skb_shinfo(skb)->frags[i - 1];
- frag->size -= tail;
+ skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
- skb->truesize -= tail;
}
return 0;
}
@@ -3082,7 +3082,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
frag_size -= frag_len;
skb->data_len += frag_len;
- skb->truesize += frag_len;
+ skb->truesize += PAGE_SIZE;
skb->len += frag_len;
pg_prod = NEXT_RX_BD(pg_prod);
@@ -3622,7 +3622,7 @@ bnx2_set_rx_mode(struct net_device *dev)
spin_unlock_bh(&bp->phy_lock);
}
-static int __devinit
+static int
check_fw_section(const struct firmware *fw,
const struct bnx2_fw_file_section *section,
u32 alignment, bool non_empty)
@@ -3638,7 +3638,7 @@ check_fw_section(const struct firmware *fw,
return 0;
}
-static int __devinit
+static int
check_mips_fw_entry(const struct firmware *fw,
const struct bnx2_mips_fw_file_entry *entry)
{
@@ -3649,8 +3649,16 @@ check_mips_fw_entry(const struct firmware *fw,
return 0;
}
-static int __devinit
-bnx2_request_firmware(struct bnx2 *bp)
+static void bnx2_release_firmware(struct bnx2 *bp)
+{
+ if (bp->rv2p_firmware) {
+ release_firmware(bp->mips_firmware);
+ release_firmware(bp->rv2p_firmware);
+ bp->rv2p_firmware = NULL;
+ }
+}
+
+static int bnx2_request_uncached_firmware(struct bnx2 *bp)
{
const char *mips_fw_file, *rv2p_fw_file;
const struct bnx2_mips_fw_file *mips_fw;
@@ -3672,13 +3680,13 @@ bnx2_request_firmware(struct bnx2 *bp)
rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
if (rc) {
pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
- return rc;
+ goto out;
}
rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
if (rc) {
pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
- return rc;
+ goto err_release_mips_firmware;
}
mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
@@ -3689,16 +3697,30 @@ bnx2_request_firmware(struct bnx2 *bp)
check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_release_firmware;
}
if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_release_firmware;
}
+out:
+ return rc;
- return 0;
+err_release_firmware:
+ release_firmware(bp->rv2p_firmware);
+ bp->rv2p_firmware = NULL;
+err_release_mips_firmware:
+ release_firmware(bp->mips_firmware);
+ goto out;
+}
+
+static int bnx2_request_firmware(struct bnx2 *bp)
+{
+ return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
}
static u32
@@ -5373,7 +5395,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[k].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[k]),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(skb);
@@ -6266,6 +6288,10 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto out;
+
netif_carrier_off(dev);
bnx2_set_power_state(bp, PCI_D0);
@@ -6326,8 +6352,8 @@ bnx2_open(struct net_device *dev)
netdev_info(dev, "using MSIX\n");
netif_tx_start_all_queues(dev);
-
- return 0;
+out:
+ return rc;
open_err:
bnx2_napi_disable(bp);
@@ -6335,7 +6361,8 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
- return rc;
+ bnx2_release_firmware(bp);
+ goto out;
}
static void
@@ -6503,15 +6530,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->is_gso = skb_is_gso(skb);
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
txbd = &txr->tx_desc_ring[ring_prod];
- len = frag->size;
- mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
- len, PCI_DMA_TODEVICE);
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping))
goto dma_error;
dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
@@ -6567,7 +6594,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -7127,11 +7154,9 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
struct bnx2 *bp = netdev_priv(dev);
ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
- ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
- ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = bp->rx_pg_ring_size;
ering->tx_max_pending = MAX_TX_DESC_CNT;
@@ -8353,10 +8378,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = bnx2_request_firmware(bp);
- if (rc)
- goto error;
-
memcpy(dev->dev_addr, bp->mac_addr, 6);
memcpy(dev->perm_addr, bp->mac_addr, 6);
@@ -8370,6 +8391,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= dev->hw_features;
+ dev->priv_flags |= IFF_UNICAST_FLT;
if ((rc = register_netdev(dev))) {
dev_err(&pdev->dev, "Cannot register net device\n");
@@ -8387,11 +8409,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
- if (bp->mips_firmware)
- release_firmware(bp->mips_firmware);
- if (bp->rv2p_firmware)
- release_firmware(bp->rv2p_firmware);
-
if (bp->regview)
iounmap(bp->regview);
pci_release_regions(pdev);
@@ -8412,11 +8429,6 @@ bnx2_remove_one(struct pci_dev *pdev)
del_timer_sync(&bp->timer);
cancel_work_sync(&bp->reset_task);
- if (bp->mips_firmware)
- release_firmware(bp->mips_firmware);
- if (bp->rv2p_firmware)
- release_firmware(bp->rv2p_firmware);
-
if (bp->regview)
iounmap(bp->regview);
@@ -8427,6 +8439,8 @@ bnx2_remove_one(struct pci_dev *pdev)
bp->flags &= ~BNX2_FLAG_AER_ENABLED;
}
+ bnx2_release_firmware(bp);
+
free_netdev(dev);
pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index fc50d4267df..99d31a7d6aa 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -5617,7 +5617,7 @@ struct l2_fhdr {
#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_TXP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_TXP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_TXP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_TXP_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -5712,7 +5712,7 @@ struct l2_fhdr {
#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_TPAT_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_TPAT_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_TPAT_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_TPAT_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -5807,7 +5807,7 @@ struct l2_fhdr {
#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_RXP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_RXP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_RXP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_RXP_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -5953,7 +5953,7 @@ struct l2_fhdr {
#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_COM_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_COM_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_COM_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_COM_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_COM_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -6119,7 +6119,7 @@ struct l2_fhdr {
#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_CP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_CP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_CP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_CP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_CP_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -6291,7 +6291,7 @@ struct l2_fhdr {
#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_MCP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_MCP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_MCP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_MCP_CPU_STATE_BLOCKED_READ (1L<<31)
diff --git a/drivers/net/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h
index 940eb91f209..940eb91f209 100644
--- a/drivers/net/bnx2_fw.h
+++ b/drivers/net/ethernet/broadcom/bnx2_fw.h
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
index 48fbdd48f88..48fbdd48f88 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e46df5331c5..627a5807836 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -66,73 +66,68 @@
#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
-#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
-
/* regular debug print */
-#define DP(__mask, __fmt, __args...) \
+#define DP(__mask, fmt, ...) \
do { \
if (bp->msg_enable & (__mask)) \
- printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__args); \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
} while (0)
-#define DP_CONT(__mask, __fmt, __args...) \
+#define DP_CONT(__mask, fmt, ...) \
do { \
if (bp->msg_enable & (__mask)) \
- pr_cont(__fmt, ##__args); \
+ pr_cont(fmt, ##__VA_ARGS__); \
} while (0)
/* errors debug print */
-#define BNX2X_DBG_ERR(__fmt, __args...) \
+#define BNX2X_DBG_ERR(fmt, ...) \
do { \
if (netif_msg_probe(bp)) \
- pr_err("[%s:%d(%s)]" __fmt, \
+ pr_err("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
- ##__args); \
+ ##__VA_ARGS__); \
} while (0)
/* for errors (never masked) */
-#define BNX2X_ERR(__fmt, __args...) \
+#define BNX2X_ERR(fmt, ...) \
do { \
- pr_err("[%s:%d(%s)]" __fmt, \
+ pr_err("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
- ##__args); \
- } while (0)
+ ##__VA_ARGS__); \
+} while (0)
-#define BNX2X_ERROR(__fmt, __args...) do { \
- pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
- } while (0)
+#define BNX2X_ERROR(fmt, ...) \
+ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
/* before we have a dev->name use dev_info() */
-#define BNX2X_DEV_INFO(__fmt, __args...) \
+#define BNX2X_DEV_INFO(fmt, ...) \
do { \
if (netif_msg_probe(bp)) \
- dev_info(&bp->pdev->dev, __fmt, ##__args); \
+ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
-#define BNX2X_MAC_FMT "%pM"
-#define BNX2X_MAC_PRN_LIST(mac) (mac)
-
-
#ifdef BNX2X_STOP_ON_ERROR
void bnx2x_int_disable(struct bnx2x *bp);
-#define bnx2x_panic() do { \
- bp->panic = 1; \
- BNX2X_ERR("driver assert\n"); \
- bnx2x_int_disable(bp); \
- bnx2x_panic_dump(bp); \
- } while (0)
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_int_disable(bp); \
+ bnx2x_panic_dump(bp); \
+} while (0)
#else
-#define bnx2x_panic() do { \
- bp->panic = 1; \
- BNX2X_ERR("driver assert\n"); \
- bnx2x_panic_dump(bp); \
- } while (0)
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_panic_dump(bp); \
+} while (0)
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
@@ -239,13 +234,19 @@ void bnx2x_int_disable(struct bnx2x *bp);
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
*
*/
-/* iSCSI L2 */
-#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
-#define BNX2X_ISCSI_ETH_CID 49
+enum {
+ BNX2X_ISCSI_ETH_CL_ID_IDX,
+ BNX2X_FCOE_ETH_CL_ID_IDX,
+ BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
+};
-/* FCoE L2 */
-#define BNX2X_FCOE_ETH_CL_ID_IDX 2
-#define BNX2X_FCOE_ETH_CID 50
+#define BNX2X_CNIC_START_ETH_CID 48
+enum {
+ /* iSCSI L2 */
+ BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+ /* FCoE L2 */
+ BNX2X_FCOE_ETH_CID,
+};
/** Additional rings budgeting */
#ifdef BCM_CNIC
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c4cbf973641..580b44edb06 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
@@ -452,7 +454,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
skb->data_len += frag_len;
- skb->truesize += frag_len;
+ skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
skb->len += frag_len;
frag_size -= frag_len;
@@ -954,15 +956,16 @@ void __bnx2x_link_report(struct bnx2x *bp)
netdev_err(bp->dev, "NIC Link is Down\n");
return;
} else {
+ const char *duplex;
+ const char *flow;
+
netif_carrier_on(bp->dev);
- netdev_info(bp->dev, "NIC Link is Up, ");
- pr_cont("%d Mbps ", cur_data.line_speed);
if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
&cur_data.link_report_flags))
- pr_cont("full duplex");
+ duplex = "full";
else
- pr_cont("half duplex");
+ duplex = "half";
/* Handle the FC at the end so that only these flags would be
* possibly set. This way we may easily check if there is no FC
@@ -971,16 +974,19 @@ void __bnx2x_link_report(struct bnx2x *bp)
if (cur_data.link_report_flags) {
if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
&cur_data.link_report_flags)) {
- pr_cont(", receive ");
if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
&cur_data.link_report_flags))
- pr_cont("& transmit ");
+ flow = "ON - receive & transmit";
+ else
+ flow = "ON - receive";
} else {
- pr_cont(", transmit ");
+ flow = "ON - transmit";
}
- pr_cont("flow control ON");
+ } else {
+ flow = "none";
}
- pr_cont("\n");
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ cur_data.line_speed, duplex, flow);
}
}
@@ -2357,7 +2363,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Calculate the first sum - it's special */
for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
wnd_sum +=
- skb_shinfo(skb)->frags[frag_idx].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
/* If there was data on linear skb data - check it */
if (first_bd_sz > 0) {
@@ -2373,14 +2379,14 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
check all windows */
for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
wnd_sum +=
- skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
if (unlikely(wnd_sum < lso_mss)) {
to_copy = 1;
break;
}
wnd_sum -=
- skb_shinfo(skb)->frags[wnd_idx].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
}
} else {
/* in non-LSO too fragmented packet should always
@@ -2601,7 +2607,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
/* enable this debug print to view the transmission queue being used
- DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
+ DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
/* locate the fastpath and the txdata */
@@ -2610,7 +2616,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* enable this debug print to view the tranmission details
DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
- " tx_data ptr %p fp pointer %p",
+ " tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
@@ -2790,9 +2796,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = dma_map_page(&bp->pdev->dev, frag->page,
- frag->page_offset, frag->size,
- DMA_TO_DEVICE);
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
@@ -2816,8 +2821,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- tx_data_bd->nbytes = cpu_to_le16(frag->size);
- le16_add_cpu(&pkt_size, frag->size);
+ tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
+ le16_add_cpu(&pkt_size, skb_frag_size(frag));
nbd++;
DP(NETIF_MSG_TX_QUEUED,
@@ -2927,14 +2932,14 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* requested to support too many traffic classes */
if (num_tc > bp->max_cos) {
DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
- " requested: %d. max supported is %d",
+ " requested: %d. max supported is %d\n",
num_tc, bp->max_cos);
return -EINVAL;
}
/* declare amount of supported traffic classes */
if (netdev_set_num_tc(dev, num_tc)) {
- DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
+ DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
num_tc);
return -EINVAL;
}
@@ -2942,7 +2947,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
- DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
prio, bp->prio_to_cos[prio]);
}
@@ -2951,10 +2956,10 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
This can be used for ets or pfc, and save the effort of setting
up a multio class queue disc or negotiating DCBX with a switch
netdev_set_prio_tc_map(dev, 0, 0);
- DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
for (prio = 1; prio < 16; prio++) {
netdev_set_prio_tc_map(dev, prio, 1);
- DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
} */
/* configure traffic class to transmission queue mapping */
@@ -2962,7 +2967,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
count = BNX2X_NUM_ETH_QUEUES(bp);
offset = cos * MAX_TXQS_PER_COS;
netdev_set_tc_queue(dev, cos, count, offset);
- DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
+ DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
cos, offset, count);
}
@@ -3050,7 +3055,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
DP(BNX2X_MSG_SP,
- "freeing tx memory of fp %d cos %d cid %d",
+ "freeing tx memory of fp %d cos %d cid %d\n",
fp_index, cos, txdata->cid);
BNX2X_FREE(txdata->tx_buf_ring);
@@ -3137,7 +3142,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
DP(BNX2X_MSG_SP, "allocating tx memory of "
- "fp %d cos %d",
+ "fp %d cos %d\n",
index, cos);
BNX2X_ALLOC(txdata->tx_buf_ring,
@@ -3387,7 +3392,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ pr_err("Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -3513,7 +3518,7 @@ int bnx2x_resume(struct pci_dev *pdev)
bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ pr_err("Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 223bfeebc59..283d663da18 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -522,7 +522,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
*/
int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
-#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE))
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
/**
* bnx2x_fcoe_get_wwn - return the requested WWN value for this port
*
@@ -1289,7 +1289,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
- DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d",
+ DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
}
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
- (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
+ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
}
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
@@ -1333,7 +1333,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
- DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index);
+ DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index);
/* qZone id equals to FW (per path) client id */
bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
@@ -1481,8 +1481,8 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (!max_cfg) {
- BNX2X_ERR("Illegal configuration detected for Max BW - "
- "using 100 instead\n");
+ DP(NETIF_MSG_LINK,
+ "Max BW configured to 0 - using 100 instead\n");
max_cfg = 100;
}
return max_cfg;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0b4acf67e0c..51bd7485ab1 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -16,6 +16,9 @@
* Written by: Dmitry Kravkov
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -350,7 +353,7 @@ static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
if (cos_params[i].pri_bitmask & nw_prio) {
/* extend the bitmask with unmapped */
DP(NETIF_MSG_LINK,
- "cos %d extended with 0x%08x", i, unmapped);
+ "cos %d extended with 0x%08x\n", i, unmapped);
cos_params[i].pri_bitmask |= unmapped;
break;
}
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2c6a3bca6f2..2c6a3bca6f2 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b983825d0ee..b983825d0ee 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index cf3e47914dd..1a6e37ce730 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -14,6 +14,9 @@
* Statistics and Link management by Yitchak Gertner
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/types.h>
@@ -239,9 +242,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->maxrxpkt = 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n"
- DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
- DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
cmd->cmd, cmd->supported, cmd->advertising,
ethtool_cmd_speed(cmd),
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
@@ -519,7 +522,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
DP(NETIF_MSG_LINK, "req_line_speed %d\n"
- DP_LEVEL " req_duplex %d advertising 0x%x\n",
+ " req_duplex %d advertising 0x%x\n",
bp->link_params.req_line_speed[cfg_idx],
bp->link_params.req_duplex[cfg_idx],
bp->port.advertising[cfg_idx]);
@@ -1065,7 +1068,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
return -EAGAIN;
DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
- DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
eeprom->len, eeprom->len);
@@ -1236,7 +1239,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
return -EAGAIN;
DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
- DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
eeprom->len, eeprom->len);
@@ -1341,17 +1344,12 @@ static void bnx2x_get_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
ering->rx_max_pending = MAX_RX_AVAIL;
- ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = 0;
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
else
ering->rx_pending = MAX_RX_AVAIL;
- ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = 0;
-
ering->tx_max_pending = MAX_TX_AVAIL;
ering->tx_pending = bp->tx_ring_size;
}
@@ -1362,7 +1360,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ pr_err("Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -1393,7 +1391,7 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
BNX2X_FLOW_CTRL_TX);
DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
- DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
+ " autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
}
@@ -1406,7 +1404,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
- DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
+ " autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
@@ -2004,7 +2002,7 @@ static void bnx2x_self_test(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ pr_err("Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
@@ -2272,7 +2270,7 @@ static int bnx2x_set_phys_id(struct net_device *dev,
}
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- void *rules __always_unused)
+ u32 *rules __always_unused)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 998652a1b85..998652a1b85 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f4a07fbaed0..f4a07fbaed0 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index dc24de40e33..e44b858ff12 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -704,6 +704,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
@@ -759,6 +760,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 4d748e77d1a..4d748e77d1a 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 7ec1724753a..7ec1724753a 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index ba15bdc5a1a..818723c9e67 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -694,8 +694,8 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
struct bnx2x *bp = params->bp;
if (!CHIP_IS_E3B0(bp)) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
- "\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
return -EINVAL;
}
@@ -860,12 +860,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
/* Check total BW is valid */
if ((100 != *total_bw) || (0 == *total_bw)) {
if (0 == *total_bw) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
- "shouldn't be 0\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
return -EINVAL;
}
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be"
- "100\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config toatl BW should be 100\n");
/**
* We can handle a case whre the BW isn't 100 this can happen
* if the TC are joined.
@@ -902,13 +902,13 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
- "parameter There can't be two COS's with"
+ "parameter There can't be two COS's with "
"the same strict pri\n");
return -EINVAL;
}
if (pri > max_num_of_cos) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid"
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter Illegal strict priority\n");
return -EINVAL;
}
@@ -1084,8 +1084,8 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
u8 cos_entry = 0;
if (!CHIP_IS_E3B0(bp)) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
- "\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
return -EINVAL;
}
@@ -1102,8 +1102,8 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
&total_bw);
if (0 != bnx2x_status) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed "
- "\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config get_total_bw failed\n");
return -EINVAL;
}
@@ -1138,13 +1138,13 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
cos_entry);
} else {
- DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not"
- " valid\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config cos state not valid\n");
return -EINVAL;
}
if (0 != bnx2x_status) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw "
- "failed\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config set cos bw failed\n");
return bnx2x_status;
}
}
@@ -1154,8 +1154,8 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
sp_pri_to_cos);
if (0 != bnx2x_status) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg "
- "failed\n");
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
return bnx2x_status;
}
@@ -1612,8 +1612,8 @@ static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
- DP(NETIF_MSG_LINK, "XMAC already out of reset"
- " in 4-port mode\n");
+ DP(NETIF_MSG_LINK,
+ "XMAC already out of reset in 4-port mode\n");
return;
}
@@ -1636,13 +1636,13 @@ static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
/* Set the number of ports on the system side to 1 */
REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
if (max_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1"
- " port per path\n");
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 10G x 1 port per path\n");
/* Set the number of ports on the Warp Core to 10G */
REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
} else {
- DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports"
- " per path\n");
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 20G x 2 ports per path\n");
/* Set the number of ports on the Warp Core to 20G */
REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
}
@@ -3959,8 +3959,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
val16 |= 0x0040;
break;
default:
- DP(NETIF_MSG_LINK, "Speed not supported: 0x%x"
- "\n", phy->req_line_speed);
+ DP(NETIF_MSG_LINK,
+ "Speed not supported: 0x%x\n", phy->req_line_speed);
return;
}
@@ -4092,9 +4092,9 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
*/
if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
(cfg_pin > PIN_CFG_GPIO3_P1)) {
- DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for "
- "module detect indication\n",
- cfg_pin);
+ DP(NETIF_MSG_LINK,
+ "ERROR: Invalid cfg pin %x for module detect indication\n",
+ cfg_pin);
return -EINVAL;
}
@@ -4222,8 +4222,9 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface "
- "0x%x\n", serdes_net_if);
+ DP(NETIF_MSG_LINK,
+ "Unsupported Serdes Net Interface 0x%x\n",
+ serdes_net_if);
return;
}
}
@@ -6131,8 +6132,8 @@ static int bnx2x_link_initialize(struct link_params *params,
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- DP(NETIF_MSG_LINK, "Not initializing"
- " second phy\n");
+ DP(NETIF_MSG_LINK,
+ "Not initializing second phy\n");
continue;
}
params->phy[phy_index].config_init(
@@ -6451,8 +6452,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
*/
if (active_external_phy == EXT_PHY1) {
if (params->phy[EXT_PHY2].phy_specific_func) {
- DP(NETIF_MSG_LINK, "Disabling TX on"
- " EXT_PHY2\n");
+ DP(NETIF_MSG_LINK,
+ "Disabling TX on EXT_PHY2\n");
params->phy[EXT_PHY2].phy_specific_func(
&params->phy[EXT_PHY2],
params, DISABLE_TX);
@@ -7345,8 +7346,8 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u16 val = 0;
u16 i;
if (byte_cnt > 16) {
- DP(NETIF_MSG_LINK, "Reading from eeprom is"
- " is limited to 0xf\n");
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
return -EINVAL;
}
/* Set the read command byte count */
@@ -7417,8 +7418,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
" addr %d, cnt %d\n",
addr, byte_cnt);*/
if (byte_cnt > 16) {
- DP(NETIF_MSG_LINK, "Reading from eeprom is"
- " is limited to 16 bytes\n");
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 16 bytes\n");
return -EINVAL;
}
@@ -7447,8 +7448,8 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u16 val, i;
if (byte_cnt > 16) {
- DP(NETIF_MSG_LINK, "Reading from eeprom is"
- " is limited to 0xf\n");
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
return -EINVAL;
}
@@ -7595,13 +7596,14 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
check_limiting_mode = 1;
} else if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
- DP(NETIF_MSG_LINK, "Passive Copper"
- " cable detected\n");
+ DP(NETIF_MSG_LINK,
+ "Passive Copper cable detected\n");
*edc_mode =
EDC_MODE_PASSIVE_DAC;
} else {
- DP(NETIF_MSG_LINK, "Unknown copper-cable-"
- "type 0x%x !!!\n", copper_module_type);
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type 0x%x !!!\n",
+ copper_module_type);
return -EINVAL;
}
break;
@@ -7639,8 +7641,8 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
SFP_EEPROM_OPTIONS_ADDR,
SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
- DP(NETIF_MSG_LINK, "Failed to read Option"
- " field from module EEPROM\n");
+ DP(NETIF_MSG_LINK,
+ "Failed to read Option field from module EEPROM\n");
return -EINVAL;
}
if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
@@ -7681,15 +7683,15 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
/* Use first phy request only in case of non-dual media*/
if (DUAL_MEDIA(params)) {
- DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
- "verification\n");
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
return -EINVAL;
}
cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
} else {
/* No support in OPT MDL detection */
- DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
- "verification\n");
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
return -EINVAL;
}
@@ -7740,8 +7742,9 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
for (timeout = 0; timeout < 60; timeout++) {
if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
== 0) {
- DP(NETIF_MSG_LINK, "SFP+ module initialization "
- "took %d ms\n", timeout * 5);
+ DP(NETIF_MSG_LINK,
+ "SFP+ module initialization took %d ms\n",
+ timeout * 5);
return 0;
}
msleep(5);
@@ -8510,8 +8513,8 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
- "TX_CTRL2 0x%x\n",
+ DP(NETIF_MSG_LINK,
+ "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
phy->tx_preemphasis[0],
phy->tx_preemphasis[1]);
bnx2x_cl45_write(bp, phy,
@@ -8792,8 +8795,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
if (mod_abs & (1<<8)) {
/* Module is absent */
- DP(NETIF_MSG_LINK, "MOD_ABS indication "
- "show module is absent\n");
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is absent\n");
phy->media_type = ETH_PHY_NOT_PRESENT;
/*
* 1. Set mod_abs to detect next module
@@ -8820,8 +8823,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
} else {
/* Module is present */
- DP(NETIF_MSG_LINK, "MOD_ABS indication "
- "show module is present\n");
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is present\n");
/*
* First disable transmitter, and if the module is ok, the
* module_detection will enable it
@@ -8912,8 +8915,9 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
if ((val1 & (1<<8)) == 0) {
if (!CHIP_IS_E1x(bp))
oc_port = BP_PATH(bp) + (params->port << 1);
- DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
- " on port %d\n", oc_port);
+ DP(NETIF_MSG_LINK,
+ "8727 Power fault has been detected on port %d\n",
+ oc_port);
netdev_err(bp->dev, "Error: Power fault on Port %d has"
" been detected and the power to "
"that SFP+ module has been removed"
@@ -9694,8 +9698,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
&legacy_status);
- DP(NETIF_MSG_LINK, "Legacy speed status"
- " = 0x%x\n", legacy_status);
+ DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
+ legacy_status);
link_up = ((legacy_status & (1<<11)) == (1<<11));
if (link_up) {
legacy_speed = (legacy_status & (3<<9));
@@ -9713,9 +9717,10 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
else
vars->duplex = DUPLEX_HALF;
- DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
- " is_duplex_full= %d\n", vars->line_speed,
- (vars->duplex == DUPLEX_FULL));
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
/* Check legacy speed AN resolution */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD,
@@ -10290,9 +10295,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
} else /* Should not happen */
vars->line_speed = 0;
- DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
- " is_duplex_full= %d\n", vars->line_speed,
- (vars->duplex == DUPLEX_FULL));
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
/* Check legacy speed AN resolution */
bnx2x_cl22_read(bp, phy,
@@ -11236,6 +11242,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
*phy = phy_84833;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
break;
@@ -11336,8 +11343,9 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
dev_info.
port_hw_config[params->port].speed_capability_mask));
}
- DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
- " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
+ DP(NETIF_MSG_LINK,
+ "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n",
+ phy_index, link_config, phy->speed_cap_mask);
phy->req_duplex = DUPLEX_FULL;
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index c12db6da213..c12db6da213 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 15f800085bb..6486ab8c8fc 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -37,6 +39,7 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -350,17 +353,15 @@ static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
default:
if (src_type == DMAE_CMD_SRC_PCI)
DP(msglvl, "DMAE: opcode 0x%08x\n"
- DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
- "dst_addr [none]\n"
- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
dmae->comp_val);
else
DP(msglvl, "DMAE: opcode 0x%08x\n"
- DP_LEVEL "src_addr [%08x] len [%d * 4] "
- "dst_addr [none]\n"
- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
dmae->opcode, dmae->src_addr_lo >> 2,
dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
dmae->comp_val);
@@ -789,18 +790,15 @@ void bnx2x_panic_dump(struct bnx2x *bp)
BNX2X_ERR(" def (");
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+ bp->def_status_blk->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
i*sizeof(u32));
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) "
- "pf_id(0x%x) vnic_id(0x%x) "
- "vf_id(0x%x) vf_valid (0x%x) "
- "state(0x%x)\n",
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
sp_sb_data.igu_sb_id,
sp_sb_data.igu_seg_id,
sp_sb_data.p_func.pf_id,
@@ -3741,9 +3739,7 @@ static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
static inline void _print_next_block(int idx, const char *blk)
{
- if (idx)
- pr_cont(", ");
- pr_cont("%s", blk);
+ pr_cont("%s%s", idx ? ", " : "", blk);
}
static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
@@ -4357,8 +4353,6 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
vlan_mac_obj = &bp->fp[cid].mac_obj;
break;
- vlan_mac_obj = &bp->fp[cid].mac_obj;
-
case BNX2X_FILTER_MCAST_PENDING:
/* This is only relevant for 57710 where multicast MACs are
* configured as unicast MACs using the same ramrod.
@@ -4408,7 +4402,7 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
struct bnx2x *bp, u32 cid)
{
- DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid);
+ DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC
if (cid == BNX2X_FCOE_ETH_CID)
return &bnx2x_fcoe(bp, q_obj);
@@ -7243,7 +7237,7 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
/* set maximum number of COSs supported by this queue */
init_params->max_cos = fp->max_cos;
- DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d",
+ DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
fp->index, init_params->max_cos);
/* set the context pointers queue object */
@@ -7276,7 +7270,7 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
"cos %d, primary cid %d, cid %d, "
- "client id %d, sp-client id %d, flags %lx",
+ "client id %d, sp-client id %d, flags %lx\n",
tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
tx_only_params->gen_params.spcl_id, tx_only_params->flags);
@@ -7308,7 +7302,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int rc;
u8 tx_index;
- DP(BNX2X_MSG_SP, "setting up queue %d", fp->index);
+ DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
/* reset IGU state skip FCoE L2 queue */
if (!IS_FCOE_FP(fp))
@@ -7332,7 +7326,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
- DP(BNX2X_MSG_SP, "init complete");
+ DP(BNX2X_MSG_SP, "init complete\n");
/* Now move the Queue to the SETUP state... */
@@ -7386,7 +7380,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
struct bnx2x_queue_state_params q_params = {0};
int rc, tx_index;
- DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid);
+ DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
q_params.q_obj = &fp->q_obj;
/* We want to wait for completion in this context */
@@ -7401,7 +7395,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
/* ascertain this is a normal queue*/
txdata = &fp->txdata[tx_index];
- DP(BNX2X_MSG_SP, "stopping tx-only queue %d",
+ DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
txdata->txq_index);
/* send halt terminate on tx-only connection */
@@ -9391,9 +9385,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = MF_CFG_RD(bp, func_ext_config[func].
iscsi_mac_addr_lower);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
- BNX2X_DEV_INFO("Read iSCSI MAC: "
- BNX2X_MAC_FMT"\n",
- BNX2X_MAC_PRN_LIST(iscsi_mac));
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
} else
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
@@ -9403,9 +9396,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC to "
- BNX2X_MAC_FMT"\n",
- BNX2X_MAC_PRN_LIST(fip_mac));
+ BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ fip_mac);
} else
bp->flags |= NO_FCOE_FLAG;
@@ -9460,9 +9452,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
if (!is_valid_ether_addr(bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: "
- BNX2X_MAC_FMT", change it manually before bringing up "
+ "%pM, change it manually before bringing up "
"the appropriate network interface\n",
- BNX2X_MAC_PRN_LIST(bp->dev->dev_addr));
+ bp->dev->dev_addr);
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -10366,9 +10358,11 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->netdev_ops = &bnx2x_netdev_ops;
bnx2x_set_ethtool_ops(dev);
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -10799,7 +10793,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return rc;
}
- DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs);
+ DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
rc = bnx2x_init_bp(bp);
if (rc)
@@ -10854,15 +10848,14 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
- " IRQ %d, ", board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width,
- ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
- (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
- "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq);
- pr_cont("node addr %pM\n", dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width,
+ ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
+ (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
+ "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
return 0;
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0..fc7bd0f23c0 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index df52f110c6c..0440425c83d 100644
--- a/drivers/net/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -16,6 +16,9 @@
* Written by: Vladislav Zolotarov
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/crc32.h>
#include <linux/netdevice.h>
@@ -707,9 +710,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
&rule_entry->mac.header);
- DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
- "Queue %d\n", (add ? "add" : "delete"),
- BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
+ DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
+ add ? "add" : "delete", mac, raw->cl_id);
/* Set a MAC itself */
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
@@ -801,9 +803,9 @@ static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
cfg_entry);
- DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
- (add ? "setting" : "clearing"),
- BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
+ DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
+ add ? "setting" : "clearing",
+ mac, raw->cl_id, cam_offset);
}
/**
@@ -2579,9 +2581,8 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
cnt++;
- DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
- " mcast MAC\n",
- BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ pmac_pos->mac);
list_del(&pmac_pos->link);
@@ -2702,9 +2703,8 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
cnt++;
- DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
- " mcast MAC\n",
- BNX2X_MAC_PRN_LIST(mlist_pos->mac));
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ mlist_pos->mac);
}
*line_idx = cnt;
@@ -2998,9 +2998,8 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
- DP(BNX2X_MSG_SP, "About to configure "
- BNX2X_MAC_FMT" mcast MAC, bin %d\n",
- BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
+ mlist_pos->mac, bit);
/* bookkeeping... */
BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
@@ -3049,8 +3048,8 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
break;
case BNX2X_MCAST_CMD_DEL:
- DP(BNX2X_MSG_SP, "Invalidating multicast "
- "MACs configuration\n");
+ DP(BNX2X_MSG_SP,
+ "Invalidating multicast MACs configuration\n");
/* clear the registry */
memset(o->registry.aprox_match.vec, 0,
@@ -3233,9 +3232,8 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
- DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
- " mcast MAC\n",
- BNX2X_MAC_PRN_LIST(cfg_data.mac));
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ cfg_data.mac);
}
*rdata_idx = i;
@@ -3270,9 +3268,8 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
cnt++;
- DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
- " mcast MAC\n",
- BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ pmac_pos->mac);
}
break;
@@ -3357,9 +3354,8 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
&data->config_table[i].middle_mac_addr,
&data->config_table[i].lsb_mac_addr,
elem->mac);
- DP(BNX2X_MSG_SP, "Adding registry entry for ["
- BNX2X_MAC_FMT"]\n",
- BNX2X_MAC_PRN_LIST(elem->mac));
+ DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
+ elem->mac);
list_add_tail(&elem->link,
&o->registry.exact_match.macs);
}
@@ -4246,7 +4242,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
if (o->next_tx_only) /* print num tx-only if any exist */
- DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
+ DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
o->state = o->next_state;
@@ -4308,7 +4304,7 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
test_bit(BNX2X_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
- DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
+ DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
}
@@ -4461,7 +4457,7 @@ static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
&data->tx,
&cmd_params->params.tx_only.flags);
- DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
+ DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
}
@@ -4508,9 +4504,9 @@ static inline int bnx2x_q_init(struct bnx2x *bp,
/* Set CDU context validation values */
for (cos = 0; cos < o->max_cos; cos++) {
- DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
+ DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
o->cids[cos], cos);
- DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
+ DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
}
@@ -4599,7 +4595,7 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
return -EINVAL;
}
- DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
+ DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
tx_only_params->gen_params.cos,
tx_only_params->gen_params.spcl_id);
@@ -4610,7 +4606,7 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
bnx2x_q_fill_setup_tx_only(bp, params, rdata);
DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
- "sp-client id %d, cos %d",
+ "sp-client id %d, cos %d\n",
o->cids[cid_index],
rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
@@ -5167,8 +5163,9 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
return -EINVAL;
}
- DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
- "%d\n", cmd, BP_FUNC(bp), o->next_state);
+ DP(BNX2X_MSG_SP,
+ "Completing command %d for func %d, setting state to %d\n",
+ cmd, BP_FUNC(bp), o->next_state);
o->state = o->next_state;
o->next_state = BNX2X_F_STATE_MAX;
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 9a517c2e9f1..9a517c2e9f1 100644
--- a/drivers/net/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 9908f2bbcf7..02ac6a771bf 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -14,6 +14,9 @@
* Statistics and Link management by Yitchak Gertner
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "bnx2x_stats.h"
#include "bnx2x_cmn.h"
@@ -1195,14 +1198,13 @@ static void bnx2x_stats_update(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
- " rx pkt(%lu) rx calls(%lu %lu)\n",
- fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
- fp->rx_comp_cons),
- le16_to_cpu(*fp->rx_cons_sb),
- bnx2x_hilo(&qstats->
- total_unicast_packets_received_hi),
- fp->rx_calls, fp->rx_pkt);
+ pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
}
for_each_eth_queue(bp, i) {
@@ -1211,27 +1213,25 @@ static void bnx2x_stats_update(struct bnx2x *bp)
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
struct netdev_queue *txq;
- printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)",
- fp->name, bnx2x_hilo(
- &qstats->total_unicast_packets_transmitted_hi),
- qstats->driver_xoff);
+ pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
+ fp->name,
+ bnx2x_hilo(
+ &qstats->total_unicast_packets_transmitted_hi),
+ qstats->driver_xoff);
for_each_cos_in_tx_queue(fp, cos) {
txdata = &fp->txdata[cos];
txq = netdev_get_tx_queue(bp->dev,
FP_COS_TO_TXQ(fp, cos));
- printk(KERN_DEBUG "%d: tx avail(%4u)"
- " *tx_cons_sb(%u)"
- " tx calls (%lu)"
- " %s\n",
- cos,
- bnx2x_tx_avail(bp, txdata),
- le16_to_cpu(*txdata->tx_cons_sb),
- txdata->tx_pkt,
- (netif_tx_queue_stopped(txq) ?
- "Xoff" : "Xon")
- );
+ pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n",
+ cos,
+ bnx2x_tx_avail(bp, txdata),
+ le16_to_cpu(*txdata->tx_cons_sb),
+ txdata->tx_pkt,
+ (netif_tx_queue_stopped(txq) ?
+ "Xoff" : "Xon")
+ );
}
}
}
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 5d8ce2f6afe..5d8ce2f6afe 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
diff --git a/drivers/net/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 94a2e541006..6f10c693983 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -45,8 +45,8 @@
#include "bnx2x/bnx2x_reg.h"
#include "bnx2x/bnx2x_fw_defs.h"
#include "bnx2x/bnx2x_hsi.h"
-#include "../scsi/bnx2i/57xx_iscsi_constants.h"
-#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
#include "cnic.h"
#include "cnic_defs.h"
@@ -1177,7 +1177,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
+ cp->max_cid_space += dev->max_fcoe_conn;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
cp->fcoe_init_cid = 0x10;
@@ -1875,12 +1875,12 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
if (ret == 0) {
- wait_event(ctx->waitq, ctx->wait_cond);
+ wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
return -EBUSY;
}
- return ret;
+ return 0;
}
static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
@@ -2280,7 +2280,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
*work = 4;
l5_cid = req1->fcoe_conn_id;
- if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+ if (l5_cid >= dev->max_fcoe_conn)
goto err_reply;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
@@ -2384,7 +2384,7 @@ static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id;
- if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+ if (l5_cid >= dev->max_fcoe_conn)
return -EINVAL;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
@@ -2418,7 +2418,7 @@ static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
req = (struct fcoe_kwqe_conn_destroy *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id;
- if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+ if (l5_cid >= dev->max_fcoe_conn)
return -EINVAL;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
@@ -2428,17 +2428,20 @@ static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (ret == 0) {
- wait_event(ctx->waitq, ctx->wait_cond);
- set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
- queue_delayed_work(cnic_wq, &cp->delete_task,
- msecs_to_jiffies(2000));
+ wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
+ if (ctx->wait_cond)
+ kcqe.completion_status = 0;
}
- memset(&kcqe, 0, sizeof(kcqe));
+ set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
+
kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
kcqe.fcoe_conn_id = req->conn_id;
kcqe.fcoe_conn_context_id = cid;
@@ -4850,8 +4853,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
return -ENOMEM;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
- BNX2X_FCOE_NUM_CONNECTIONS,
+ ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
cp->fcoe_start_cid, 0);
if (ret)
@@ -5292,6 +5294,9 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
!(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+ if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
+ cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
+
memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
cp->cnic_ops = &cnic_bnx2x_ops;
diff --git a/drivers/net/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 7a2928f82d4..30328097f51 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -373,7 +373,7 @@ struct bnx2x_bd_chain_next {
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
-#define BNX2X_FCOE_NUM_CONNECTIONS 128
+#define BNX2X_FCOE_NUM_CONNECTIONS 1024
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
@@ -474,5 +474,7 @@ struct bnx2x_bd_chain_next {
MAX_STAT_COUNTER_ID_E1))
#endif
+#define CNIC_RAMROD_TMO (HZ / 4)
+
#endif
diff --git a/drivers/net/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index e47d2107676..239de898f07 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -67,6 +67,7 @@
#define FCOE_KWQE_OPCODE_DESTROY (10)
#define FCOE_KWQE_OPCODE_STAT (11)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
/* KCQ (kernel completion queue) response op codes */
diff --git a/drivers/net/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 79443e0dbf9..79443e0dbf9 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ea65f7ec360..0a1d7f279fc 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2176,7 +2176,7 @@ static const struct net_device_ops sbmac_netdev_ops = {
.ndo_open = sbmac_open,
.ndo_stop = sbmac_close,
.ndo_start_xmit = sbmac_start_tx,
- .ndo_set_multicast_list = sbmac_set_rx_mode,
+ .ndo_set_rx_mode = sbmac_set_rx_mode,
.ndo_tx_timeout = sbmac_tx_timeout,
.ndo_do_ioctl = sbmac_mii_ioctl,
.ndo_change_mtu = sb1250_change_mtu,
diff --git a/drivers/net/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4a1374df608..161cbbb4814 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,12 +89,15 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 119
+#define TG3_MIN_NUM 120
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 18, 2011"
+#define DRV_MODULE_RELDATE "August 18, 2011"
+
+#define RESET_KIND_SHUTDOWN 0
+#define RESET_KIND_INIT 1
+#define RESET_KIND_SUSPEND 2
-#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
#define TG3_DEF_TX_MODE 0
#define TG3_DEF_MSG_ENABLE \
@@ -188,6 +191,12 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
#endif
+#if (NET_IP_ALIGN != 0)
+#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
+#else
+#define TG3_RX_OFFSET(tp) 0
+#endif
+
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
#define TG3_TX_BD_DMA_MAX 4096
@@ -391,12 +400,14 @@ static const struct {
static const struct {
const char string[ETH_GSTRING_LEN];
} ethtool_test_keys[] = {
- { "nvram test (online) " },
- { "link test (online) " },
- { "register test (offline)" },
- { "memory test (offline)" },
- { "loopback test (offline)" },
- { "interrupt test (offline)" },
+ { "nvram test (online) " },
+ { "link test (online) " },
+ { "register test (offline)" },
+ { "memory test (offline)" },
+ { "mac loopback test (offline)" },
+ { "phy loopback test (offline)" },
+ { "ext loopback test (offline)" },
+ { "interrupt test (offline)" },
};
#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
@@ -717,6 +728,103 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
+static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int i;
+ u32 apedata;
+
+ /* NCSI does not support APE events */
+ if (tg3_flag(tp, APE_HAS_NCSI))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ for (i = 0; i < 10; i++) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(100);
+ }
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+ u32 event;
+ u32 apedata;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return;
+
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+ TG3_APE_HOST_DRVR_STATE_START);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
+ if (device_may_wakeup(&tp->pdev->dev) &&
+ tg3_flag(tp, WOL_ENABLE)) {
+ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+ TG3_APE_HOST_WOL_SPEED_AUTO);
+ apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+ } else
+ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
+ }
+
+ event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+ tg3_ape_send_event(tp, event);
+}
+
static void tg3_disable_ints(struct tg3 *tp)
{
int i;
@@ -1389,6 +1497,149 @@ static void tg3_ump_link_report(struct tg3 *tp)
tg3_generate_fw_event(tp);
}
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+ /* Wait for RX cpu to ACK the previous event. */
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+
+ tg3_generate_fw_event(tp);
+
+ /* Wait for RX cpu to ACK this event. */
+ tg3_wait_for_event_ack(tp);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (kind == RESET_KIND_INIT ||
+ kind == RESET_KIND_SUSPEND)
+ tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START_DONE);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD_DONE);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (kind == RESET_KIND_SHUTDOWN)
+ tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static int tg3_poll_fw(struct tg3 *tp)
+{
+ int i;
+ u32 val;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* Wait up to 20ms for init done. */
+ for (i = 0; i < 200; i++) {
+ if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+ return 0;
+ udelay(100);
+ }
+ return -ENODEV;
+ }
+
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+
+ /* Chip might not be fitted with firmware. Some Sun onboard
+ * parts are configured like that. So don't signal the timeout
+ * of the above loop as an error, but do report the lack of
+ * running firmware once.
+ */
+ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+ tg3_flag_set(tp, NO_FWARE_REPORTED);
+
+ netdev_info(tp->dev, "No firmware running\n");
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
+ }
+
+ return 0;
+}
+
static void tg3_link_report(struct tg3 *tp)
{
if (!netif_carrier_ok(tp->dev)) {
@@ -1680,6 +1931,36 @@ static void tg3_phy_fini(struct tg3 *tp)
}
}
+static int tg3_phy_set_extloopbk(struct tg3 *tp)
+{
+ int err;
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ return 0;
+
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ /* Cannot do read-modify-write on 5401 */
+ err = tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
+ 0x4c20);
+ goto done;
+ }
+
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+ if (err)
+ return err;
+
+ val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
+ err = tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
+
+done:
+ return err;
+}
+
static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
{
u32 phytest;
@@ -2450,12 +2731,6 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
}
static int tg3_setup_phy(struct tg3 *, int);
-
-#define RESET_KIND_SHUTDOWN 0
-#define RESET_KIND_INIT 1
-#define RESET_KIND_SUSPEND 2
-
-static void tg3_write_sig_post_reset(struct tg3 *, int);
static int tg3_halt_cpu(struct tg3 *, u32);
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
@@ -2724,6 +2999,228 @@ static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
return res;
}
+#define RX_CPU_SCRATCH_BASE 0x30000
+#define RX_CPU_SCRATCH_SIZE 0x04000
+#define TX_CPU_SCRATCH_BASE 0x34000
+#define TX_CPU_SCRATCH_SIZE 0x04000
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+{
+ int i;
+
+ BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 val = tr32(GRC_VCPU_EXT_CTRL);
+
+ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+ return 0;
+ }
+ if (offset == RX_CPU_BASE) {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+ } else {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+ }
+
+ if (i >= 10000) {
+ netdev_err(tp->dev, "%s timed out, %s CPU\n",
+ __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ return -ENODEV;
+ }
+
+ /* Clear firmware's nvram arbitration. */
+ if (tg3_flag(tp, NVRAM))
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
+ return 0;
+}
+
+struct fw_info {
+ unsigned int fw_base;
+ unsigned int fw_len;
+ const __be32 *fw_data;
+};
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
+ u32 cpu_scratch_base, int cpu_scratch_size,
+ struct fw_info *info)
+{
+ int err, lock_err, i;
+ void (*write_op)(struct tg3 *, u32, u32);
+
+ if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (tg3_flag(tp, 5705_PLUS))
+ write_op = tg3_write_mem;
+ else
+ write_op = tg3_write_indirect_reg32;
+
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
+
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
+ for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
+ write_op(tp, (cpu_scratch_base +
+ (info->fw_base & 0xffff) +
+ (i * sizeof(u32))),
+ be32_to_cpu(info->fw_data[i]));
+
+ err = 0;
+
+out:
+ return err;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+ struct fw_info info;
+ const __be32 *fw_data;
+ int err, i;
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ start address and length. We are setting complete length.
+ length = end_address_of_bss - start_address_of_text.
+ Remainder is the blob to be loaded contiguously
+ from start address. */
+
+ info.fw_base = be32_to_cpu(fw_data[1]);
+ info.fw_len = tp->fw->size - 12;
+ info.fw_data = &fw_data[3];
+
+ err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+ RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+ TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup only the RX cpu. */
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
+ break;
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
+ tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ return -ENODEV;
+ }
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+ struct fw_info info;
+ const __be32 *fw_data;
+ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+ int err, i;
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ return 0;
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ start address and length. We are setting complete length.
+ length = end_address_of_bss - start_address_of_text.
+ Remainder is the blob to be loaded contiguously
+ from start address. */
+
+ info.fw_base = be32_to_cpu(fw_data[1]);
+ cpu_scratch_size = tp->fw_len;
+ info.fw_len = tp->fw->size - 12;
+ info.fw_data = &fw_data[3];
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ cpu_base = RX_CPU_BASE;
+ cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+ } else {
+ cpu_base = TX_CPU_BASE;
+ cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+ cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+ }
+
+ err = tg3_load_firmware_cpu(tp, cpu_base,
+ cpu_scratch_base, cpu_scratch_size,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup the cpu. */
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, info.fw_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(cpu_base + CPU_PC) == info.fw_base)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, info.fw_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
+ __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ return -ENODEV;
+ }
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+ return 0;
+}
+
+
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
{
@@ -3303,8 +3800,9 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
return 0;
- if ((adv_reg & all_mask) != all_mask)
+ if ((adv_reg & ADVERTISE_ALL) != all_mask)
return 0;
+
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
u32 tg3_ctrl;
@@ -3317,9 +3815,11 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
return 0;
- if ((tg3_ctrl & all_mask) != all_mask)
+ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (tg3_ctrl != all_mask)
return 0;
}
+
return 1;
}
@@ -3620,8 +4120,8 @@ relink:
newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
if (newlnkctl != oldlnkctl)
pci_write_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
- newlnkctl);
+ pci_pcie_cap(tp->pdev) +
+ PCI_EXP_LNKCTL, newlnkctl);
}
if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4856,7 +5356,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pci_unmap_page(tp->pdev,
dma_unmap_addr(ri, mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
while (ri->fragmented) {
@@ -4951,11 +5451,11 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
+ skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
if (skb == NULL)
return -ENOMEM;
- skb_reserve(skb, tp->rx_offset);
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
mapping = pci_map_single(tp->pdev, skb->data, skb_size,
PCI_DMA_FROMDEVICE);
@@ -5673,7 +6173,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
* NIC to stop sending us irqs, engaging "in-intr-handler"
* event coalescing.
*/
- tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ tw32_mailbox(tnapi->int_mbox, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
napi_schedule(&tnapi->napi);
@@ -6010,14 +6510,14 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
}
for (i = 0; i < last; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
txb = &tnapi->tx_buffers[entry];
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
while (txb->fragmented) {
txb->fragmented = false;
@@ -6029,12 +6529,12 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
/* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
- struct sk_buff *skb,
+ struct sk_buff **pskb,
u32 *entry, u32 *budget,
u32 base_flags, u32 mss, u32 vlan)
{
struct tg3 *tp = tnapi->tp;
- struct sk_buff *new_skb;
+ struct sk_buff *new_skb, *skb = *pskb;
dma_addr_t new_addr = 0;
int ret = 0;
@@ -6076,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
}
dev_kfree_skb(skb);
-
+ *pskb = new_skb;
return ret;
}
@@ -6171,10 +6671,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 tcp_opt_len, hdr_len;
if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- dev_kfree_skb(skb);
- goto out_unlock;
- }
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto drop;
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
@@ -6234,22 +6732,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
+ base_flags |= TXD_FLAG_JMB_PKT;
+
if (vlan_tx_tag_present(skb)) {
base_flags |= TXD_FLAG_VLAN;
vlan = vlan_tx_tag_get(skb);
}
- if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
- !mss && skb->len > VLAN_ETH_FRAME_LEN)
- base_flags |= TXD_FLAG_JMB_PKT;
-
len = skb_headlen(skb);
mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
- goto out_unlock;
- }
+ if (pci_dma_mapping_error(tp->pdev, mapping))
+ goto drop;
+
tnapi->tx_buffers[entry].skb = skb;
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
@@ -6277,16 +6774,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
- mapping = pci_map_page(tp->pdev,
- frag->page,
- frag->page_offset,
- len, PCI_DMA_TODEVICE);
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
+ len, DMA_TO_DEVICE);
tnapi->tx_buffers[entry].skb = NULL;
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
- if (pci_dma_mapping_error(tp->pdev, mapping))
+ if (dma_mapping_error(&tp->pdev->dev, mapping))
goto dma_error;
if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
@@ -6305,9 +6800,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
entry = tnapi->tx_prod;
budget = tg3_tx_avail(tnapi);
- if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
base_flags, mss, vlan))
- goto out_unlock;
+ goto drop_nofree;
}
skb_tx_timestamp(skb);
@@ -6329,18 +6824,140 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
-out_unlock:
mmiowb();
-
return NETDEV_TX_OK;
dma_error:
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
- dev_kfree_skb(skb);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+drop:
+ dev_kfree_skb(skb);
+drop_nofree:
+ tp->tx_dropped++;
return NETDEV_TX_OK;
}
+static void tg3_mac_loopback(struct tg3 *tp, bool enable)
+{
+ if (enable) {
+ tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
+ MAC_MODE_PORT_MODE_MASK);
+
+ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else {
+ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+
+ if (tg3_flag(tp, 5705_PLUS) ||
+ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+
+ tw32(MAC_MODE, tp->mac_mode);
+ udelay(40);
+}
+
+static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
+{
+ u32 val, bmcr, mac_mode, ptest = 0;
+
+ tg3_phy_toggle_apd(tp, false);
+ tg3_phy_toggle_automdix(tp, 0);
+
+ if (extlpbk && tg3_phy_set_extloopbk(tp))
+ return -EIO;
+
+ bmcr = BMCR_FULLDPLX;
+ switch (speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ speed = SPEED_100;
+ bmcr |= BMCR_SPEED100;
+ } else {
+ speed = SPEED_1000;
+ bmcr |= BMCR_SPEED1000;
+ }
+ }
+
+ if (extlpbk) {
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ tg3_readphy(tp, MII_CTRL1000, &val);
+ val |= CTL1000_AS_MASTER |
+ CTL1000_ENABLE_MASTER;
+ tg3_writephy(tp, MII_CTRL1000, val);
+ } else {
+ ptest = MII_TG3_FET_PTEST_TRIM_SEL |
+ MII_TG3_FET_PTEST_TRIM_2;
+ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
+ }
+ } else
+ bmcr |= BMCR_LOOPBACK;
+
+ tg3_writephy(tp, MII_BMCR, bmcr);
+
+ /* The write needs to be flushed for the FETs */
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+
+ udelay(40);
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+
+ /* The write needs to be flushed for the AC131 */
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
+ }
+
+ /* Reset to prevent losing 1st rx packet intermittently */
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ tg3_flag(tp, 5780_CLASS)) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ }
+
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ if (speed == SPEED_1000)
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+
+ if (masked_phy_id == TG3_PHY_ID_BCM5401)
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ else if (masked_phy_id == TG3_PHY_ID_BCM5411)
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+ }
+
+ tw32(MAC_MODE, mac_mode);
+ udelay(40);
+
+ return 0;
+}
+
static void tg3_set_loopback(struct net_device *dev, u32 features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6349,16 +6966,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
return;
- /*
- * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
- * loopback mode if Half-Duplex mode was negotiated earlier.
- */
- tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
-
- /* Enable internal MAC loopback mode */
- tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
spin_lock_bh(&tp->lock);
- tw32(MAC_MODE, tp->mac_mode);
+ tg3_mac_loopback(tp, true);
netif_carrier_on(tp->dev);
spin_unlock_bh(&tp->lock);
netdev_info(dev, "Internal MAC loopback mode enabled.\n");
@@ -6366,10 +6975,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
return;
- /* Disable internal MAC loopback mode */
- tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
spin_lock_bh(&tp->lock);
- tw32(MAC_MODE, tp->mac_mode);
+ tg3_mac_loopback(tp, false);
/* Force link status check */
tg3_setup_phy(tp, 1);
spin_unlock_bh(&tp->lock);
@@ -6979,230 +7586,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
return err;
}
-static void tg3_ape_send_event(struct tg3 *tp, u32 event)
-{
- int i;
- u32 apedata;
-
- /* NCSI does not support APE events */
- if (tg3_flag(tp, APE_HAS_NCSI))
- return;
-
- apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
- if (apedata != APE_SEG_SIG_MAGIC)
- return;
-
- apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
- if (!(apedata & APE_FW_STATUS_READY))
- return;
-
- /* Wait for up to 1 millisecond for APE to service previous event. */
- for (i = 0; i < 10; i++) {
- if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
- return;
-
- apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
-
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
- event | APE_EVENT_STATUS_EVENT_PENDING);
-
- tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
-
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- break;
-
- udelay(100);
- }
-
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
-}
-
-static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
-{
- u32 event;
- u32 apedata;
-
- if (!tg3_flag(tp, ENABLE_APE))
- return;
-
- switch (kind) {
- case RESET_KIND_INIT:
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
- APE_HOST_SEG_SIG_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
- APE_HOST_SEG_LEN_MAGIC);
- apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
- tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
- tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
- APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
- tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
- APE_HOST_BEHAV_NO_PHYLOCK);
- tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
- TG3_APE_HOST_DRVR_STATE_START);
-
- event = APE_EVENT_STATUS_STATE_START;
- break;
- case RESET_KIND_SHUTDOWN:
- /* With the interface we are currently using,
- * APE does not track driver state. Wiping
- * out the HOST SEGMENT SIGNATURE forces
- * the APE to assume OS absent status.
- */
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
-
- if (device_may_wakeup(&tp->pdev->dev) &&
- tg3_flag(tp, WOL_ENABLE)) {
- tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
- TG3_APE_HOST_WOL_SPEED_AUTO);
- apedata = TG3_APE_HOST_DRVR_STATE_WOL;
- } else
- apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
-
- tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
-
- event = APE_EVENT_STATUS_STATE_UNLOAD;
- break;
- case RESET_KIND_SUSPEND:
- event = APE_EVENT_STATUS_STATE_SUSPEND;
- break;
- default:
- return;
- }
-
- event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
-
- tg3_ape_send_event(tp, event);
-}
-
-/* tp->lock is held. */
-static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
-{
- tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
- NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
-
- if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
- switch (kind) {
- case RESET_KIND_INIT:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_START);
- break;
-
- case RESET_KIND_SHUTDOWN:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_UNLOAD);
- break;
-
- case RESET_KIND_SUSPEND:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_SUSPEND);
- break;
-
- default:
- break;
- }
- }
-
- if (kind == RESET_KIND_INIT ||
- kind == RESET_KIND_SUSPEND)
- tg3_ape_driver_state_change(tp, kind);
-}
-
-/* tp->lock is held. */
-static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
-{
- if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
- switch (kind) {
- case RESET_KIND_INIT:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_START_DONE);
- break;
-
- case RESET_KIND_SHUTDOWN:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_UNLOAD_DONE);
- break;
-
- default:
- break;
- }
- }
-
- if (kind == RESET_KIND_SHUTDOWN)
- tg3_ape_driver_state_change(tp, kind);
-}
-
-/* tp->lock is held. */
-static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
-{
- if (tg3_flag(tp, ENABLE_ASF)) {
- switch (kind) {
- case RESET_KIND_INIT:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_START);
- break;
-
- case RESET_KIND_SHUTDOWN:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_UNLOAD);
- break;
-
- case RESET_KIND_SUSPEND:
- tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
- DRV_STATE_SUSPEND);
- break;
-
- default:
- break;
- }
- }
-}
-
-static int tg3_poll_fw(struct tg3 *tp)
-{
- int i;
- u32 val;
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- /* Wait up to 20ms for init done. */
- for (i = 0; i < 200; i++) {
- if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
- return 0;
- udelay(100);
- }
- return -ENODEV;
- }
-
- /* Wait for firmware initialization to complete. */
- for (i = 0; i < 100000; i++) {
- tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
- if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
- break;
- udelay(10);
- }
-
- /* Chip might not be fitted with firmware. Some Sun onboard
- * parts are configured like that. So don't signal the timeout
- * of the above loop as an error, but do report the lack of
- * running firmware once.
- */
- if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
- tg3_flag_set(tp, NO_FWARE_REPORTED);
-
- netdev_info(tp->dev, "No firmware running\n");
- }
-
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
- /* The 57765 A0 needs a little more
- * time to do some important work.
- */
- mdelay(10);
- }
-
- return 0;
-}
-
/* Save PCI command register before chip reset */
static void tg3_save_pci_state(struct tg3 *tp)
{
@@ -7274,8 +7657,6 @@ static void tg3_restore_pci_state(struct tg3 *tp)
}
}
-static void tg3_stop_fw(struct tg3 *);
-
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
{
@@ -7523,22 +7904,6 @@ static int tg3_chip_reset(struct tg3 *tp)
}
/* tp->lock is held. */
-static void tg3_stop_fw(struct tg3 *tp)
-{
- if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
- /* Wait for RX cpu to ACK the previous event. */
- tg3_wait_for_event_ack(tp);
-
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
-
- tg3_generate_fw_event(tp);
-
- /* Wait for RX cpu to ACK this event. */
- tg3_wait_for_event_ack(tp);
- }
-}
-
-/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, int silent)
{
int err;
@@ -7561,227 +7926,6 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
return 0;
}
-#define RX_CPU_SCRATCH_BASE 0x30000
-#define RX_CPU_SCRATCH_SIZE 0x04000
-#define TX_CPU_SCRATCH_BASE 0x34000
-#define TX_CPU_SCRATCH_SIZE 0x04000
-
-/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
-{
- int i;
-
- BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 val = tr32(GRC_VCPU_EXT_CTRL);
-
- tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
- return 0;
- }
- if (offset == RX_CPU_BASE) {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
-
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
- udelay(10);
- } else {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
- }
-
- if (i >= 10000) {
- netdev_err(tp->dev, "%s timed out, %s CPU\n",
- __func__, offset == RX_CPU_BASE ? "RX" : "TX");
- return -ENODEV;
- }
-
- /* Clear firmware's nvram arbitration. */
- if (tg3_flag(tp, NVRAM))
- tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
- return 0;
-}
-
-struct fw_info {
- unsigned int fw_base;
- unsigned int fw_len;
- const __be32 *fw_data;
-};
-
-/* tp->lock is held. */
-static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
- int cpu_scratch_size, struct fw_info *info)
-{
- int err, lock_err, i;
- void (*write_op)(struct tg3 *, u32, u32);
-
- if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
- netdev_err(tp->dev,
- "%s: Trying to load TX cpu firmware which is 5705\n",
- __func__);
- return -EINVAL;
- }
-
- if (tg3_flag(tp, 5705_PLUS))
- write_op = tg3_write_mem;
- else
- write_op = tg3_write_indirect_reg32;
-
- /* It is possible that bootcode is still loading at this point.
- * Get the nvram lock first before halting the cpu.
- */
- lock_err = tg3_nvram_lock(tp);
- err = tg3_halt_cpu(tp, cpu_base);
- if (!lock_err)
- tg3_nvram_unlock(tp);
- if (err)
- goto out;
-
- for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
- write_op(tp, cpu_scratch_base + i, 0);
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
- for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
- write_op(tp, (cpu_scratch_base +
- (info->fw_base & 0xffff) +
- (i * sizeof(u32))),
- be32_to_cpu(info->fw_data[i]));
-
- err = 0;
-
-out:
- return err;
-}
-
-/* tp->lock is held. */
-static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
-{
- struct fw_info info;
- const __be32 *fw_data;
- int err, i;
-
- fw_data = (void *)tp->fw->data;
-
- /* Firmware blob starts with version numbers, followed by
- start address and length. We are setting complete length.
- length = end_address_of_bss - start_address_of_text.
- Remainder is the blob to be loaded contiguously
- from start address. */
-
- info.fw_base = be32_to_cpu(fw_data[1]);
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
- err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
- RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
- &info);
- if (err)
- return err;
-
- err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
- TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
- &info);
- if (err)
- return err;
-
- /* Now startup only the RX cpu. */
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
- break;
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
- netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
- "should be %08x\n", __func__,
- tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
- return -ENODEV;
- }
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
-
- return 0;
-}
-
-/* tp->lock is held. */
-static int tg3_load_tso_firmware(struct tg3 *tp)
-{
- struct fw_info info;
- const __be32 *fw_data;
- unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
- int err, i;
-
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
- return 0;
-
- fw_data = (void *)tp->fw->data;
-
- /* Firmware blob starts with version numbers, followed by
- start address and length. We are setting complete length.
- length = end_address_of_bss - start_address_of_text.
- Remainder is the blob to be loaded contiguously
- from start address. */
-
- info.fw_base = be32_to_cpu(fw_data[1]);
- cpu_scratch_size = tp->fw_len;
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
- cpu_base = RX_CPU_BASE;
- cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
- } else {
- cpu_base = TX_CPU_BASE;
- cpu_scratch_base = TX_CPU_SCRATCH_BASE;
- cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
- }
-
- err = tg3_load_firmware_cpu(tp, cpu_base,
- cpu_scratch_base, cpu_scratch_size,
- &info);
- if (err)
- return err;
-
- /* Now startup the cpu. */
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(cpu_base + CPU_PC) == info.fw_base)
- break;
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
- netdev_err(tp->dev,
- "%s fails to set CPU PC, is %08x should be %08x\n",
- __func__, tr32(cpu_base + CPU_PC), info.fw_base);
- return -ENODEV;
- }
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_MODE, 0x00000000);
- return 0;
-}
-
-
static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7961,7 +8105,7 @@ static void tg3_rings_reset(struct tg3 *tp)
tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
- tp->napi[0].chk_msi_cnt = 0;
+ tp->napi[i].chk_msi_cnt = 0;
tp->napi[i].last_rx_cons = 0;
tp->napi[i].last_tx_cons = 0;
}
@@ -8657,6 +8801,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
val = tr32(MSGINT_MODE);
val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ if (!tg3_flag(tp, 1SHOT_MSI))
+ val |= MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
}
@@ -9041,8 +9187,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
tnapi->chk_msi_cnt++;
return;
}
- tw32_mailbox(tnapi->int_mbox,
- tnapi->last_tag << 24);
+ tg3_msi(0, tnapi);
}
}
tnapi->chk_msi_cnt = 0;
@@ -9273,7 +9418,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
if (intr_ok) {
/* Reenable MSI one shot mode. */
- if (tg3_flag(tp, 57765_PLUS)) {
+ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
}
@@ -9451,6 +9596,8 @@ static void tg3_ints_init(struct tg3 *tp)
u32 msi_mode = tr32(MSGINT_MODE);
if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+ if (!tg3_flag(tp, 1SHOT_MSI))
+ msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
defcfg:
@@ -9860,6 +10007,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
get_stat64(&hw_stats->rx_discards);
stats->rx_dropped = tp->rx_dropped;
+ stats->tx_dropped = tp->tx_dropped;
return stats;
}
@@ -10365,7 +10513,6 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
struct tg3 *tp = netdev_priv(dev);
ering->rx_max_pending = tp->rx_std_ring_mask;
- ering->rx_mini_max_pending = 0;
if (tg3_flag(tp, JUMBO_RING_ENABLE))
ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
else
@@ -10374,7 +10521,6 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
ering->rx_pending = tp->rx_pending;
- ering->rx_mini_pending = 0;
if (tg3_flag(tp, JUMBO_RING_ENABLE))
ering->rx_jumbo_pending = tp->rx_jumbo_pending;
else
@@ -11217,10 +11363,6 @@ static int tg3_test_memory(struct tg3 *tp)
return err;
}
-#define TG3_MAC_LOOPBACK 0
-#define TG3_PHY_LOOPBACK 1
-#define TG3_TSO_LOOPBACK 2
-
#define TG3_TSO_MSS 500
#define TG3_TSO_IP_HDR_LEN 20
@@ -11244,9 +11386,9 @@ static const u8 tg3_tso_header[] = {
0x11, 0x11, 0x11, 0x11,
};
-static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
+static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
{
- u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
+ u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
u32 budget;
struct sk_buff *skb, *rx_skb;
@@ -11267,76 +11409,6 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
}
coal_now = tnapi->coal_now | rnapi->coal_now;
- if (loopback_mode == TG3_MAC_LOOPBACK) {
- /* HW errata - mac loopback fails in some cases on 5780.
- * Normal traffic and PHY loopback are not affected by
- * errata. Also, the MAC loopback test is deprecated for
- * all newer ASIC revisions.
- */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- tg3_flag(tp, CPMU_PRESENT))
- return 0;
-
- mac_mode = tp->mac_mode &
- ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
- mac_mode |= MAC_MODE_PORT_INT_LPBACK;
- if (!tg3_flag(tp, 5705_PLUS))
- mac_mode |= MAC_MODE_LINK_POLARITY;
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- mac_mode |= MAC_MODE_PORT_MODE_MII;
- else
- mac_mode |= MAC_MODE_PORT_MODE_GMII;
- tw32(MAC_MODE, mac_mode);
- } else {
- if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
- tg3_phy_fet_toggle_apd(tp, false);
- val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
- } else
- val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
-
- tg3_phy_toggle_automdix(tp, 0);
-
- tg3_writephy(tp, MII_BMCR, val);
- udelay(40);
-
- mac_mode = tp->mac_mode &
- ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
- if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
- tg3_writephy(tp, MII_TG3_FET_PTEST,
- MII_TG3_FET_PTEST_FRC_TX_LINK |
- MII_TG3_FET_PTEST_FRC_TX_LOCK);
- /* The write needs to be flushed for the AC131 */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
- tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
- mac_mode |= MAC_MODE_PORT_MODE_MII;
- } else
- mac_mode |= MAC_MODE_PORT_MODE_GMII;
-
- /* reset to prevent losing 1st rx packet intermittently */
- if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
- tw32_f(MAC_RX_MODE, RX_MODE_RESET);
- udelay(10);
- tw32_f(MAC_RX_MODE, tp->rx_mode);
- }
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
- u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
- if (masked_phy_id == TG3_PHY_ID_BCM5401)
- mac_mode &= ~MAC_MODE_LINK_POLARITY;
- else if (masked_phy_id == TG3_PHY_ID_BCM5411)
- mac_mode |= MAC_MODE_LINK_POLARITY;
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- MII_TG3_EXT_CTRL_LNK3_LED_MODE);
- }
- tw32(MAC_MODE, mac_mode);
-
- /* Wait for link */
- for (i = 0; i < 100; i++) {
- if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
- break;
- mdelay(1);
- }
- }
-
err = -EIO;
tx_len = pktsz;
@@ -11350,7 +11422,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
- if (loopback_mode == TG3_TSO_LOOPBACK) {
+ if (tso_loopback) {
struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
@@ -11470,7 +11542,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
- ETH_FCS_LEN;
- if (loopback_mode != TG3_TSO_LOOPBACK) {
+ if (!tso_loopback) {
if (rx_len != tx_len)
goto out;
@@ -11517,25 +11589,33 @@ out:
#define TG3_STD_LOOPBACK_FAILED 1
#define TG3_JMB_LOOPBACK_FAILED 2
#define TG3_TSO_LOOPBACK_FAILED 4
+#define TG3_LOOPBACK_FAILED \
+ (TG3_STD_LOOPBACK_FAILED | \
+ TG3_JMB_LOOPBACK_FAILED | \
+ TG3_TSO_LOOPBACK_FAILED)
-#define TG3_MAC_LOOPBACK_SHIFT 0
-#define TG3_PHY_LOOPBACK_SHIFT 4
-#define TG3_LOOPBACK_FAILED 0x00000077
-
-static int tg3_test_loopback(struct tg3 *tp)
+static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
{
- int err = 0;
- u32 eee_cap, cpmuctrl = 0;
-
- if (!netif_running(tp->dev))
- return TG3_LOOPBACK_FAILED;
+ int err = -EIO;
+ u32 eee_cap;
eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+ if (!netif_running(tp->dev)) {
+ data[0] = TG3_LOOPBACK_FAILED;
+ data[1] = TG3_LOOPBACK_FAILED;
+ if (do_extlpbk)
+ data[2] = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
+
err = tg3_reset_hw(tp, 1);
if (err) {
- err = TG3_LOOPBACK_FAILED;
+ data[0] = TG3_LOOPBACK_FAILED;
+ data[1] = TG3_LOOPBACK_FAILED;
+ if (do_extlpbk)
+ data[2] = TG3_LOOPBACK_FAILED;
goto done;
}
@@ -11548,68 +11628,72 @@ static int tg3_test_loopback(struct tg3 *tp)
tw32(i, 0x0);
}
- /* Turn off gphy autopowerdown. */
- if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
- tg3_phy_toggle_apd(tp, false);
+ /* HW errata - mac loopback fails in some cases on 5780.
+ * Normal traffic and PHY loopback are not affected by
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT)) {
+ tg3_mac_loopback(tp, true);
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[0] |= TG3_STD_LOOPBACK_FAILED;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[0] |= TG3_JMB_LOOPBACK_FAILED;
- if (tg3_flag(tp, CPMU_PRESENT)) {
+ tg3_mac_loopback(tp, false);
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !tg3_flag(tp, USE_PHYLIB)) {
int i;
- u32 status;
- tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
+ tg3_phy_lpbk_set(tp, 0, false);
- /* Wait for up to 40 microseconds to acquire lock. */
- for (i = 0; i < 4; i++) {
- status = tr32(TG3_CPMU_MUTEX_GNT);
- if (status == CPMU_MUTEX_GNT_DRIVER)
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
break;
- udelay(10);
- }
-
- if (status != CPMU_MUTEX_GNT_DRIVER) {
- err = TG3_LOOPBACK_FAILED;
- goto done;
+ mdelay(1);
}
- /* Turn off link-based power management. */
- cpmuctrl = tr32(TG3_CPMU_CTRL);
- tw32(TG3_CPMU_CTRL,
- cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
- CPMU_CTRL_LINK_AWARE_MODE));
- }
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[1] |= TG3_STD_LOOPBACK_FAILED;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+ data[1] |= TG3_TSO_LOOPBACK_FAILED;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[1] |= TG3_JMB_LOOPBACK_FAILED;
- if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
- err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
+ if (do_extlpbk) {
+ tg3_phy_lpbk_set(tp, 0, true);
- if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
- tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
- err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
+ /* All link indications report up, but the hardware
+ * isn't really ready for about 20 msec. Double it
+ * to be sure.
+ */
+ mdelay(40);
- if (tg3_flag(tp, CPMU_PRESENT)) {
- tw32(TG3_CPMU_CTRL, cpmuctrl);
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[2] |= TG3_STD_LOOPBACK_FAILED;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+ data[2] |= TG3_TSO_LOOPBACK_FAILED;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[2] |= TG3_JMB_LOOPBACK_FAILED;
+ }
- /* Release the mutex */
- tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
+ /* Re-enable gphy autopowerdown. */
+ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, true);
}
- if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- !tg3_flag(tp, USE_PHYLIB)) {
- if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
- err |= TG3_STD_LOOPBACK_FAILED <<
- TG3_PHY_LOOPBACK_SHIFT;
- if (tg3_flag(tp, TSO_CAPABLE) &&
- tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
- err |= TG3_TSO_LOOPBACK_FAILED <<
- TG3_PHY_LOOPBACK_SHIFT;
- if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
- tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
- err |= TG3_JMB_LOOPBACK_FAILED <<
- TG3_PHY_LOOPBACK_SHIFT;
- }
-
- /* Re-enable gphy autopowerdown. */
- if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
- tg3_phy_toggle_apd(tp, true);
+ err = (data[0] | data[1] | data[2]) ? -EIO : 0;
done:
tp->phy_flags |= eee_cap;
@@ -11621,6 +11705,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
u64 *data)
{
struct tg3 *tp = netdev_priv(dev);
+ bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
tg3_power_up(tp)) {
@@ -11635,7 +11720,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
etest->flags |= ETH_TEST_FL_FAILED;
data[0] = 1;
}
- if (tg3_test_link(tp) != 0) {
+ if (!doextlpbk && tg3_test_link(tp)) {
etest->flags |= ETH_TEST_FL_FAILED;
data[1] = 1;
}
@@ -11665,18 +11750,23 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
etest->flags |= ETH_TEST_FL_FAILED;
data[2] = 1;
}
+
if (tg3_test_memory(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
data[3] = 1;
}
- if ((data[4] = tg3_test_loopback(tp)) != 0)
+
+ if (doextlpbk)
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+
+ if (tg3_test_loopback(tp, &data[4], doextlpbk))
etest->flags |= ETH_TEST_FL_FAILED;
tg3_full_unlock(tp);
if (tg3_test_interrupt(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[5] = 1;
+ data[7] = 1;
}
tg3_full_lock(tp, 0);
@@ -13985,7 +14075,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* BCM5785 devices are effectively PCIe devices, and should
* follow PCIe codepaths, but do not have a PCIe capabilities
* section.
- */
+ */
tg3_flag_set(tp, PCI_EXPRESS);
} else if (!tg3_flag(tp, 5705_PLUS) ||
tg3_flag(tp, 5780_CLASS)) {
@@ -14366,7 +14456,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, ENABLE_APE))
tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
- tp->mac_mode = TG3_DEF_MAC_MODE;
+ tp->mac_mode = 0;
/* these are limited to 10/100 only */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
@@ -15175,7 +15265,7 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_start_xmit = tg3_start_xmit,
.ndo_get_stats64 = tg3_get_stats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = tg3_set_rx_mode,
+ .ndo_set_rx_mode = tg3_set_rx_mode,
.ndo_set_mac_address = tg3_set_mac_addr,
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
@@ -15445,7 +15535,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
tnapi->int_mbox = intmbx;
- if (i < 4)
+ if (i <= 4)
intmbx += 0x8;
else
intmbx += 0x4;
@@ -15577,7 +15667,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->reset_task);
- if (!tg3_flag(tp, USE_PHYLIB)) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
tg3_mdio_fini(tp);
}
diff --git a/drivers/net/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 2ea456dd588..f32f288134c 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2197,6 +2197,7 @@
#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000
+#define MII_TG3_AUXCTL_ACTL_EXTLOOPBK 0x8000
#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008
@@ -2262,6 +2263,8 @@
/* Fast Ethernet Tranceiver definitions */
#define MII_TG3_FET_PTEST 0x17
+#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
+#define MII_TG3_FET_PTEST_TRIM_2 0x0002
#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
@@ -2987,6 +2990,7 @@ struct tg3 {
/* begin "everything else" cacheline(s) section */
unsigned long rx_dropped;
+ unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig
new file mode 100644
index 00000000000..26415577885
--- /dev/null
+++ b/drivers/net/ethernet/brocade/Kconfig
@@ -0,0 +1,23 @@
+#
+# Brocade device configuration
+#
+
+config NET_VENDOR_BROCADE
+ bool "Brocade devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Brocade cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_BROCADE
+
+source "drivers/net/ethernet/brocade/bna/Kconfig"
+
+endif # NET_VENDOR_BROCADE
diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile
new file mode 100644
index 00000000000..b58238d2df6
--- /dev/null
+++ b/drivers/net/ethernet/brocade/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Brocade device drivers.
+#
+
+obj-$(CONFIG_BNA) += bna/
diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig
new file mode 100644
index 00000000000..dc2eb526fbf
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/Kconfig
@@ -0,0 +1,17 @@
+#
+# Brocade network device configuration
+#
+
+config BNA
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
+
+ For general information and support, go to the Brocade support
+ website at:
+
+ <http://support.brocade.com>
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
new file mode 100644
index 00000000000..74d3abca196
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# All rights reserved.
+#
+
+obj-$(CONFIG_BNA) += bna.o
+
+bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
+bna-objs += cna_fwimg.o
+
+EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 39e5ab9fde5..8e627186507 100644
--- a/drivers/net/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -16,15 +16,10 @@
* www.brocade.com
*/
-#include "bfa_defs_cna.h"
-#include "cna.h"
#include "bfa_cee.h"
#include "bfi_cna.h"
#include "bfa_ioc.h"
-#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
-#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
-
static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
static void bfa_cee_format_cee_cfg(void *buffer);
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 58d54e98d59..58d54e98d59 100644
--- a/drivers/net/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
diff --git a/drivers/net/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a946ccd..3da1a946ccd 100644
--- a/drivers/net/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index b080b3698f4..2f12d68021d 100644
--- a/drivers/net/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -124,6 +124,7 @@ enum bfa_ioc_state {
BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
+ BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
};
/**
@@ -179,8 +180,20 @@ struct bfa_ioc_attr {
struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
struct bfa_ioc_pci_attr pci_attr;
- u8 port_id; /*!< port number */
- u8 rsvd[7]; /*!< 64bit align */
+ u8 port_id; /*!< port number */
+ u8 port_mode; /*!< enum bfa_mode */
+ u8 cap_bm; /*!< capability */
+ u8 port_mode_cfg; /*!< enum bfa_mode */
+ u8 rsvd[4]; /*!< 64bit align */
+};
+
+/**
+ * Adapter capability mask definition
+ */
+enum {
+ BFA_CM_HBA = 0x01,
+ BFA_CM_CNA = 0x02,
+ BFA_CM_NIC = 0x04,
};
/**
@@ -228,8 +241,18 @@ struct bfa_mfg_block {
mac_t mfg_mac; /*!< mac address */
u8 num_mac; /*!< number of mac addresses */
u8 rsv2;
- u32 mfg_type; /*!< card type */
- u8 rsv3[108];
+ u32 card_type; /*!< card type */
+ char cap_nic; /*!< capability nic */
+ char cap_cna; /*!< capability cna */
+ char cap_hba; /*!< capability hba */
+ char cap_fc16g; /*!< capability fc 16g */
+ char cap_sriov; /*!< capability sriov */
+ char cap_mezz; /*!< capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /*!< number of ports */
+ char media[8]; /*!< xfi/xaui */
+ char initial_mode[8];/*!< initial mode: hba/cna/nic */
+ u8 rsv4[84];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
};
@@ -239,8 +262,35 @@ struct bfa_mfg_block {
* ---------------------- pci definitions ------------
*/
-#define bfa_asic_id_ct(devid) \
- ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
- (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
+/*
+ * PCI device ID information
+ */
+enum {
+ BFA_PCI_DEVICE_ID_CT2 = 0x22,
+};
+
+#define bfa_asic_id_ct(device) \
+ ((device) == PCI_DEVICE_ID_BROCADE_CT || \
+ (device) == PCI_DEVICE_ID_BROCADE_CT_FC)
+#define bfa_asic_id_ct2(device) \
+ ((device) == BFA_PCI_DEVICE_ID_CT2)
+#define bfa_asic_id_ctc(device) \
+ (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
+
+/**
+ * PCI sub-system device and vendor ID information
+ */
+enum {
+ BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
+ BFA_PCI_CT2_SSID_FCoE = 0x22,
+ BFA_PCI_CT2_SSID_ETH = 0x23,
+ BFA_PCI_CT2_SSID_FC = 0x24,
+};
+
+enum bfa_mode {
+ BFA_MODE_HBA = 1,
+ BFA_MODE_CNA = 2,
+ BFA_MODE_NIC = 3
+};
#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 7e0a9187bdd..8ab33ee2c2b 100644
--- a/drivers/net/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -15,7 +15,6 @@
* All rights reserved
* www.brocade.com
*/
-
#ifndef __BFA_DEFS_CNA_H__
#define __BFA_DEFS_CNA_H__
@@ -55,6 +54,9 @@ struct bfa_port_fc_stats {
u64 bad_os_count; /*!< Invalid ordered sets */
u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
u64 err_enc; /*!< Encoding err frame_8b10b */
+ u64 bbsc_frames_lost; /*!< Credit Recovery-Frames Lost */
+ u64 bbsc_credits_lost; /*!< Credit Recovery-Credits Lost */
+ u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
};
/**
@@ -100,6 +102,10 @@ struct bfa_port_eth_stats {
u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
u64 tx_fcoe_pause; /*!< Tx FCoE pause */
u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
+ u64 rx_iscsi_pause; /*!< Rx iSCSI pause */
+ u64 rx_iscsi_zero_pause; /*!< Rx iSCSI zero pause */
+ u64 tx_iscsi_pause; /*!< Tx iSCSI pause */
+ u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
};
/**
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 00000000000..6681fe87c1e
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,171 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_MFG_COMM_H__
+#define __BFA_DEFS_MFG_COMM_H__
+
+#include "bfa_defs.h"
+
+/**
+ * Manufacturing block version
+ */
+#define BFA_MFG_VERSION 3
+#define BFA_MFG_VERSION_UNINIT 0xFF
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+#define BFA_MFG_SERIALNUM_SIZE 11
+#define STRSZ(_n) (((_n) + 4) & ~3)
+
+/**
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
+ BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
+ BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
+ BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
+ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
+ BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
+ BFA_MFG_TYPE_PROWLER_F = 1560, /*!< Prowler FC only cards */
+ BFA_MFG_TYPE_PROWLER_N = 1410, /*!< Prowler NIC only cards */
+ BFA_MFG_TYPE_PROWLER_C = 1710, /*!< Prowler CNA only cards */
+ BFA_MFG_TYPE_PROWLER_D = 1860, /*!< Prowler Dual cards */
+ BFA_MFG_TYPE_CHINOOK = 1867, /*!< Chinook cards */
+ BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
+};
+
+#pragma pack(1)
+
+/**
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE || \
+ (type) == BFA_MFG_TYPE_ASTRA || \
+ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+ (type) == BFA_MFG_TYPE_LIGHTNING || \
+ (type) == BFA_MFG_TYPE_CHINOOK))
+
+enum {
+ CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
+ CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
+ CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
+ CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
+ CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
+ CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
+ CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+};
+
+#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
+do { \
+ if ((gpio) & CB_GPIO_PROTO) { \
+ (prop) |= BFI_ADAPTER_PROTO; \
+ (gpio) &= ~CB_GPIO_PROTO; \
+ } \
+ switch ((gpio)) { \
+ case CB_GPIO_TTV: \
+ (prop) |= BFI_ADAPTER_TTV; \
+ case CB_GPIO_DFLY: \
+ case CB_GPIO_FC8P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P2; \
+ break; \
+ case CB_GPIO_FC8P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P1; \
+ break; \
+ case CB_GPIO_FC4P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P2; \
+ break; \
+ case CB_GPIO_FC4P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P1; \
+ break; \
+ default: \
+ (prop) |= BFI_ADAPTER_UNSUPP; \
+ (card_type) = BFA_MFG_TYPE_INVALID; \
+ } \
+} while (0)
+
+/**
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN 512
+#define BFA_MFG_VPD_LEN_INVALID 0
+
+#define BFA_MFG_VPD_PCI_HDR_OFF 137
+#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+};
+
+/**
+ * @brief BFA adapter flash vpd data definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd {
+ u8 version; /*!< vpd data version */
+ u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
+ u8 chksum; /*!< u8 checksum */
+ u8 vendor; /*!< vendor */
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c2e80..7c5fe6c2e80 100644
--- a/drivers/net/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 126b0aac9f9..b0307a00a10 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -17,9 +17,7 @@
*/
#include "bfa_ioc.h"
-#include "cna.h"
-#include "bfi.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
#include "bfa_defs.h"
/**
@@ -62,6 +60,7 @@ static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
@@ -78,8 +77,8 @@ static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
-static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_param);
@@ -108,11 +107,11 @@ enum ioc_event {
IOC_E_ENABLED = 5, /*!< f/w enabled */
IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
IOC_E_DISABLED = 7, /*!< f/w disabled */
- IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
- IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */
- IOC_E_HBFAIL = 10, /*!< heartbeat failure */
- IOC_E_HWERROR = 11, /*!< hardware error interrupt */
- IOC_E_TIMEOUT = 12, /*!< timeout */
+ IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /*!< heartbeat failure */
+ IOC_E_HWERROR = 10, /*!< hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /*!< timeout */
+ IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
@@ -124,6 +123,7 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
static struct bfa_sm_table ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -135,12 +135,9 @@ static struct bfa_sm_table ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+ {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
};
-/**
- * IOCPF state machine definitions/declarations
- */
-
/*
* Forward declareations for iocpf state machine
*/
@@ -166,6 +163,7 @@ enum iocpf_event {
IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
+ IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
};
/**
@@ -300,11 +298,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_iocpf_initfail(ioc);
break;
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ break;
+
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
@@ -343,6 +346,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
case IOC_E_FWRSP_GETATTR:
del_timer(&ioc->ioc_timer);
bfa_ioc_check_attr_wwns(ioc);
+ bfa_ioc_hb_monitor(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
@@ -352,7 +356,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
/* fall through */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_iocpf_getattrfail(ioc);
break;
@@ -374,7 +378,7 @@ static void
bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
{
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
- bfa_ioc_hb_monitor(ioc);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
}
static void
@@ -394,12 +398,13 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
bfa_ioc_hb_stop(ioc);
/* !!! fall through !!! */
case IOC_E_HBFAIL:
- bfa_ioc_fail_notify(ioc);
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ bfa_ioc_fail_notify(ioc);
+
if (event != IOC_E_PFFAILED)
bfa_iocpf_fail(ioc);
break;
@@ -416,7 +421,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
}
/**
- * IOC is being desabled
+ * IOC is being disabled
*/
static void
bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
@@ -435,13 +440,18 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
bfa_iocpf_fail(ioc);
break;
+ case IOC_E_HWFAILED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ bfa_ioc_disable_comp(ioc);
+ break;
+
default:
bfa_sm_fault(event);
}
}
/**
- * IOC desable completion entry.
+ * IOC disable completion entry.
*/
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
@@ -493,12 +503,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_iocpf_initfail(ioc);
break;
- case IOC_E_INITFAILED:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_ENABLE:
@@ -552,6 +564,36 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
}
}
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC failure.
+ */
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
/**
* IOCPF State Machine
*/
@@ -562,7 +604,7 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
{
- iocpf->retry_count = 0;
+ iocpf->fw_mismatch_notified = false;
iocpf->auto_recover = bfa_nw_auto_recover;
}
@@ -607,7 +649,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
if (bfa_ioc_sync_start(ioc)) {
- iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
@@ -622,6 +663,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
case IOCPF_E_DISABLE:
bfa_ioc_hw_sem_get_cancel(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
@@ -645,10 +691,10 @@ static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
{
/* Call only the first time sm enters fwmismatch state. */
- if (iocpf->retry_count == 0)
+ if (iocpf->fw_mismatch_notified == false)
bfa_ioc_pf_fwmismatch(iocpf->ioc);
- iocpf->retry_count++;
+ iocpf->fw_mismatch_notified = true;
mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
msecs_to_jiffies(BFA_IOC_TOV));
}
@@ -711,6 +757,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
case IOCPF_E_DISABLE:
bfa_ioc_hw_sem_get_cancel(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -724,9 +775,8 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
{
- mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
- msecs_to_jiffies(BFA_IOC_TOV));
- bfa_ioc_reset(iocpf->ioc, 0);
+ iocpf->poll_time = 0;
+ bfa_ioc_reset(iocpf->ioc, false);
}
/**
@@ -740,19 +790,11 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWREADY:
- del_timer(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
break;
- case IOCPF_E_INITFAIL:
- del_timer(&ioc->iocpf_timer);
- /*
- * !!! fall through !!!
- */
-
case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
- if (event == IOCPF_E_TIMEOUT)
bfa_ioc_pf_failed(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
@@ -774,6 +816,10 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
{
mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
msecs_to_jiffies(BFA_IOC_TOV));
+ /**
+ * Enable Interrupts before sending fw IOC ENABLE cmd.
+ */
+ iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
bfa_ioc_send_enable(iocpf->ioc);
}
@@ -811,21 +857,11 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
- case IOCPF_E_FWREADY:
- bfa_ioc_send_enable(ioc);
- break;
-
default:
bfa_sm_fault(event);
}
}
-static bool
-bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
-{
- return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
-}
-
static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
{
@@ -835,8 +871,6 @@ bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
static void
bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
- struct bfa_ioc *ioc = iocpf->ioc;
-
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
@@ -850,14 +884,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
break;
- case IOCPF_E_FWREADY:
- bfa_ioc_pf_failed(ioc);
- if (bfa_nw_ioc_is_operational(ioc))
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
- else
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
- break;
-
default:
bfa_sm_fault(event);
}
@@ -881,7 +907,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
- case IOCPF_E_FWREADY:
del_timer(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -926,6 +951,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
case IOCPF_E_FAIL:
break;
@@ -951,7 +981,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_ENABLE:
- iocpf->retry_count = 0;
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
break;
@@ -982,20 +1011,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
- bfa_ioc_sync_ack(ioc);
- iocpf->retry_count++;
- if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
- bfa_ioc_sync_leave(ioc);
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
- } else {
- if (bfa_ioc_sync_complete(ioc))
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
- else {
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
- }
- }
+ bfa_ioc_sync_leave(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
break;
case IOCPF_E_DISABLE:
@@ -1020,7 +1044,6 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
{
- bfa_ioc_pf_initfailed(iocpf->ioc);
}
/**
@@ -1071,11 +1094,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
- iocpf->retry_count = 0;
bfa_ioc_sync_ack(ioc);
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1088,6 +1111,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
case IOCPF_E_DISABLE:
bfa_ioc_hw_sem_get_cancel(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -1158,22 +1186,22 @@ bfa_nw_ioc_sem_get(void __iomem *sem_reg)
r32 = readl(sem_reg);
- while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+ while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
udelay(2);
r32 = readl(sem_reg);
}
- if (r32 == 0)
+ if (!(r32 & 1))
return true;
- BUG_ON(!(cnt < BFA_SEM_SPINCNT));
return false;
}
void
bfa_nw_ioc_sem_release(void __iomem *sem_reg)
{
+ readl(sem_reg);
writel(1, sem_reg);
}
@@ -1210,7 +1238,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
* will return 1. Semaphore is released by writing 1 to the register
*/
r32 = readl(ioc->ioc_regs.ioc_sem_reg);
- if (r32 == 0) {
+ if (r32 == ~0) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+ return;
+ }
+ if (!(r32 & 1)) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
}
@@ -1331,7 +1363,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
int i;
drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
@@ -1352,12 +1384,12 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature)
return false;
- if (swab32(fwhdr.param) != boot_env)
+ if (swab32(fwhdr.bootenv) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1388,11 +1420,11 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
- boot_env = BFI_BOOT_LOADER_OS;
-
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
+ boot_env = BFI_FWBOOT_ENV_OS;
+
/**
* check if firmware is valid
*/
@@ -1400,7 +1432,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
+ bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
+ bfa_ioc_poll_fwinit(ioc);
return;
}
@@ -1409,7 +1442,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* just wait for an initialization completion interrupt.
*/
if (ioc_fwstate == BFI_IOC_INITING) {
- ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_ioc_poll_fwinit(ioc);
return;
}
@@ -1423,7 +1456,6 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
- ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
@@ -1431,7 +1463,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
+ bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
+ bfa_ioc_poll_fwinit(ioc);
}
void
@@ -1475,7 +1508,7 @@ bfa_ioc_send_enable(struct bfa_ioc *ioc)
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
- enable_req.ioc_class = ioc->ioc_mc;
+ enable_req.clscode = htons(ioc->clscode);
do_gettimeofday(&tv);
enable_req.tv_sec = ntohl(tv.tv_sec);
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
@@ -1548,22 +1581,23 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 loff = 0;
u32 chunkno = 0;
u32 i;
+ u32 asicmode;
/**
* Initialize LMEM first before code download
*/
bfa_ioc_lmem_init(ioc);
- fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+ for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
@@ -1590,12 +1624,16 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
ioc->ioc_regs.host_page_num_fn);
/*
- * Set boot type and boot param at the end.
+ * Set boot type, env and device mode at the end.
*/
+ asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+ ioc->port0_mode, ioc->port1_mode);
+ writel(asicmode, ((ioc->ioc_regs.smem_page_start)
+ + BFI_FWBOOT_DEVMODE_OFF));
writel(boot_type, ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_TYPE_OFF)));
+ + (BFI_FWBOOT_TYPE_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_LOADER_OFF)));
+ + (BFI_FWBOOT_ENV_OFF)));
}
static void
@@ -1605,6 +1643,20 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
}
/**
+ * BFA ioc enable reply by firmware
+ */
+static void
+bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
+ u8 cap_bm)
+{
+ struct bfa_iocpf *iocpf = &ioc->iocpf;
+
+ ioc->port_mode = ioc->port_mode_cfg = port_mode;
+ ioc->ad_cap_bm = cap_bm;
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+}
+
+/**
* @brief
* Update BFA configuration from firmware configuration.
*/
@@ -1644,7 +1696,9 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
{
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
- u32 stat;
+ bfa_mbox_cmd_cbfn_t cbfn;
+ void *cbarg;
+ u32 stat;
/**
* If no command pending, do nothing
@@ -1664,6 +1718,16 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+
+ /**
+ * Give a callback to the client, indicating that the command is sent
+ */
+ if (cmd->cbfn) {
+ cbfn = cmd->cbfn;
+ cbarg = cmd->cbarg;
+ cmd->cbfn = NULL;
+ cbfn(cbarg);
+ }
}
/**
@@ -1689,6 +1753,9 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
}
+/**
+ * IOCPF to IOC interface
+ */
static void
bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
{
@@ -1702,15 +1769,15 @@ bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
}
static void
-bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
+bfa_ioc_pf_failed(struct bfa_ioc *ioc)
{
- bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
}
static void
-bfa_ioc_pf_failed(struct bfa_ioc *ioc)
+bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
{
- bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
}
static void
@@ -1749,10 +1816,9 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
* as the entry vector.
*/
static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
+bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
+ u32 boot_env)
{
- void __iomem *rb;
-
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
@@ -1761,22 +1827,16 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
/**
* Initialize IOC state of all functions on a chip reset.
*/
- rb = ioc->pcidev.pci_bar_kva;
- if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
- writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+ writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
} else {
- writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
- writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+ writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
-
- /**
- * Enable interrupts just before starting LPU
- */
- ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_ioc_lpu_start(ioc);
}
@@ -1789,13 +1849,17 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
bfa_nw_auto_recover = auto_recover;
}
-static void
+static bool
bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
{
u32 *msgp = mbmsg;
u32 r32;
int i;
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if ((r32 & 1) == 0)
+ return false;
+
/**
* read the MBOX msg
*/
@@ -1811,6 +1875,8 @@ bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+ return true;
}
static void
@@ -1827,12 +1893,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
case BFI_IOC_I2H_HBEAT:
break;
- case BFI_IOC_I2H_READY_EVENT:
- bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
- break;
-
case BFI_IOC_I2H_ENABLE_REPLY:
- bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+ bfa_ioc_enable_reply(ioc,
+ (enum bfa_mode)msg->fw_event.port_mode,
+ msg->fw_event.cap_bm);
break;
case BFI_IOC_I2H_DISABLE_REPLY:
@@ -1878,6 +1942,9 @@ void
bfa_nw_ioc_detach(struct bfa_ioc *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_DETACH);
+
+ /* Done with detach, empty the notify_q. */
+ INIT_LIST_HEAD(&ioc->notify_q);
}
/**
@@ -1887,14 +1954,63 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
- enum bfi_mclass mc)
+ enum bfi_pcifn_class clscode)
{
- ioc->ioc_mc = mc;
+ ioc->clscode = clscode;
ioc->pcidev = *pcidev;
- ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
- ioc->cna = ioc->ctdev && !ioc->fcmode;
- bfa_nw_ioc_set_ct_hwif(ioc);
+ /**
+ * Initialize IOC and device personality
+ */
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+ ioc->asic_mode = BFI_ASIC_MODE_FC;
+
+ switch (pcidev->device_id) {
+ case PCI_DEVICE_ID_BROCADE_CT:
+ ioc->asic_gen = BFI_ASIC_GEN_CT;
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT2:
+ ioc->asic_gen = BFI_ASIC_GEN_CT2;
+ if (clscode == BFI_PCIFN_CLASS_FC &&
+ pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+ ioc->asic_mode = BFI_ASIC_MODE_FC16;
+ ioc->fcmode = true;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ } else {
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ } else {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_NIC;
+ ioc->ad_cap_bm = BFA_CM_NIC;
+ }
+ }
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+
+ /**
+ * Set asic specific interfaces.
+ */
+ if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+ bfa_nw_ioc_set_ct_hwif(ioc);
+ else {
+ WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+ bfa_nw_ioc_set_ct2_hwif(ioc);
+ bfa_nw_ioc_ct2_poweron(ioc);
+ }
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
@@ -1968,18 +2084,22 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
* @param[in] ioc IOC instance
* @param[i] cmd Mailbox command
*/
-void
-bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
+bool
+bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
+ bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
{
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
u32 stat;
+ cmd->cbfn = cbfn;
+ cmd->cbarg = cbarg;
+
/**
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
list_add_tail(&cmd->qe, &mod->cmd_q);
- return;
+ return true;
}
/**
@@ -1988,7 +2108,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat) {
list_add_tail(&cmd->qe, &mod->cmd_q);
- return;
+ return true;
}
/**
@@ -1996,7 +2116,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
- return;
+ return false;
}
/**
@@ -2009,21 +2129,28 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
struct bfi_mbmsg m;
int mc;
- bfa_ioc_msgget(ioc, &m);
+ if (bfa_ioc_msgget(ioc, &m)) {
+ /**
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
- /**
- * Treat IOC message class as special.
- */
- mc = m.mh.msg_class;
- if (mc == BFI_MC_IOC) {
- bfa_ioc_isr(ioc, &m);
- return;
+ if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
}
- if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
- return;
+ bfa_ioc_lpu_read_stat(ioc);
- mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+ /**
+ * Try to send pending mailbox commands
+ */
+ bfa_ioc_mbox_poll(ioc);
}
void
@@ -2095,24 +2222,18 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
ad_attr->asic_rev = ioc_attr->asic_rev;
bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
-
- ad_attr->cna_capable = ioc->cna;
- ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
}
static enum bfa_ioc_type
bfa_ioc_get_type(struct bfa_ioc *ioc)
{
- if (!ioc->ctdev || ioc->fcmode)
- return BFA_IOC_TYPE_FC;
- else if (ioc->ioc_mc == BFI_MC_IOCFC)
- return BFA_IOC_TYPE_FCoE;
- else if (ioc->ioc_mc == BFI_MC_LL)
- return BFA_IOC_TYPE_LL;
- else {
- BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
+ if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
return BFA_IOC_TYPE_LL;
- }
+
+ BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
+
+ return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+ ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
}
static void
@@ -2171,9 +2292,6 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
ioc_attr = ioc->attr;
- /**
- * model name
- */
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
BFA_MFG_NAME, ioc_attr->card_type);
}
@@ -2224,6 +2342,10 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
+ ioc_attr->port_mode = ioc->port_mode;
+
+ ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+ ioc_attr->cap_bm = ioc->ad_cap_bm;
ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
@@ -2313,8 +2435,14 @@ void
bfa_nw_iocpf_timeout(void *ioc_arg)
{
struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+ enum bfa_iocpf_state iocpf_st;
+
+ iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
- bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+ if (iocpf_st == BFA_IOCPF_HWINIT)
+ bfa_ioc_poll_fwinit(ioc);
+ else
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
}
void
@@ -2324,3 +2452,22 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
bfa_ioc_hw_sem_get(ioc);
}
+
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
+{
+ u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (fwstate == BFI_IOC_DISABLED) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ return;
+ }
+
+ if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
+ bfa_nw_iocpf_timeout(ioc);
+ } else {
+ ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+ mod_timer(&ioc->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_POLL_TOV));
+ }
+}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index bda866ba6e9..ca158d1eaef 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -26,7 +26,7 @@
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
-#define BFA_IOC_HWINIT_MAX 5
+#define BFA_IOC_POLL_TOV 200 /* msecs */
/**
* PCI device information required by IOC
@@ -35,6 +35,7 @@ struct bfa_pcidev {
int pci_slot;
u8 pci_func;
u16 device_id;
+ u16 ssid;
void __iomem *pci_bar_kva;
};
@@ -72,6 +73,7 @@ struct bfa_ioc_regs {
void __iomem *hfn_mbox;
void __iomem *lpu_mbox_cmd;
void __iomem *lpu_mbox;
+ void __iomem *lpu_read_stat;
void __iomem *pss_ctl_reg;
void __iomem *pss_err_status_reg;
void __iomem *app_pll_fast_ctl_reg;
@@ -150,16 +152,7 @@ struct bfa_ioc_notify {
};
/**
- * Heartbeat failure notification queue element.
- */
-struct bfa_ioc_hbfail_notify {
- struct list_head qe;
- bfa_ioc_hbfail_cbfn_t cbfn;
- void *cbarg;
-};
-
-/**
- * Initialize a heartbeat failure notification structure
+ * Initialize a IOC event notification structure
*/
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
@@ -169,8 +162,9 @@ struct bfa_ioc_hbfail_notify {
struct bfa_iocpf {
bfa_fsm_t fsm;
struct bfa_ioc *ioc;
- u32 retry_count;
+ bool fw_mismatch_notified;
bool auto_recover;
+ u32 poll_time;
};
struct bfa_ioc {
@@ -186,12 +180,10 @@ struct bfa_ioc {
void *dbg_fwsave;
int dbg_fwsave_len;
bool dbg_fwsave_once;
- enum bfi_mclass ioc_mc;
+ enum bfi_pcifn_class clscode;
struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats;
bool fcmode;
- bool ctdev;
- bool cna;
bool pllinit;
bool stats_busy; /*!< outstanding stats */
u8 port_id;
@@ -200,12 +192,20 @@ struct bfa_ioc {
struct bfi_ioc_attr *attr;
struct bfa_ioc_cbfn *cbfn;
struct bfa_ioc_mbox_mod mbox_mod;
- struct bfa_ioc_hwif *ioc_hwif;
+ const struct bfa_ioc_hwif *ioc_hwif;
struct bfa_iocpf iocpf;
+ enum bfi_asic_gen asic_gen;
+ enum bfi_asic_mode asic_mode;
+ enum bfi_port_mode port0_mode;
+ enum bfi_port_mode port1_mode;
+ enum bfa_mode port_mode;
+ u8 ad_cap_bm; /*!< adapter cap bit mask */
+ u8 port_mode_cfg; /*!< config port mode */
};
struct bfa_ioc_hwif {
- enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
+ enum bfa_status (*ioc_pll_init) (void __iomem *rb,
+ enum bfi_asic_mode m);
bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
void (*ioc_reg_init) (struct bfa_ioc *ioc);
@@ -219,12 +219,14 @@ struct bfa_ioc_hwif {
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
+ bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_fetch_stats(__ioc, __stats) \
(((__stats)->drv_stats) = (__ioc)->stats)
#define bfa_ioc_clr_stats(__ioc) \
@@ -240,12 +242,9 @@ struct bfa_ioc_hwif {
#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
((_ioc)->stats.hb_count = (_hb_count))
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
-#define BFA_IOC_FWIMG_TYPE(__ioc) \
- (((__ioc)->ctdev) ? \
- (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
- BFI_IMAGE_CB_FC)
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
- (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+ ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
+ ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
@@ -253,7 +252,9 @@ struct bfa_ioc_hwif {
/**
* IOC mailbox interface
*/
-void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
+bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
+ struct bfa_mbox_cmd *cmd,
+ bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
@@ -264,21 +265,30 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
- (__ioc)->fcmode))
+ (__ioc)->asic_mode))
-#define bfa_ioc_isr_mode_set(__ioc, __msix) \
- ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
+ if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
+} while (0)
#define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+#define bfa_ioc_lpu_read_stat(__ioc) do { \
+ if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
+ ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
+} while (0)
+
void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
+void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc);
+void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc);
void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
struct bfa_ioc_cbfn *cbfn);
void bfa_nw_ioc_auto_recover(bool auto_recover);
void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
- enum bfi_mclass mc);
+ enum bfi_pcifn_class clscode);
u32 bfa_nw_ioc_meminfo(void);
void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
@@ -309,7 +319,7 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
/*
* F/W Image Size & Chunk
*/
-u32 *bfa_cb_image_get_chunk(int type, u32 off);
-u32 bfa_cb_image_get_size(int type);
+u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
+u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
new file mode 100644
index 00000000000..348479bbfa3
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -0,0 +1,878 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+#define bfa_ioc_ct_sync_pos(__ioc) \
+ ((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH 16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
+/*
+ * forward declarations
+ */
+static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
+ enum bfi_asic_mode asic_mode);
+static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
+ enum bfi_asic_mode asic_mode);
+static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
+
+static const struct bfa_ioc_hwif nw_hwif_ct = {
+ .ioc_pll_init = bfa_ioc_ct_pll_init,
+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
+ .ioc_reg_init = bfa_ioc_ct_reg_init,
+ .ioc_map_port = bfa_ioc_ct_map_port,
+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
+ .ioc_sync_start = bfa_ioc_ct_sync_start,
+ .ioc_sync_join = bfa_ioc_ct_sync_join,
+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
+ .ioc_sync_complete = bfa_ioc_ct_sync_complete,
+};
+
+static const struct bfa_ioc_hwif nw_hwif_ct2 = {
+ .ioc_pll_init = bfa_ioc_ct2_pll_init,
+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
+ .ioc_reg_init = bfa_ioc_ct2_reg_init,
+ .ioc_map_port = bfa_ioc_ct2_map_port,
+ .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
+ .ioc_isr_mode_set = NULL,
+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
+ .ioc_sync_start = bfa_ioc_ct_sync_start,
+ .ioc_sync_join = bfa_ioc_ct_sync_join,
+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
+ .ioc_sync_complete = bfa_ioc_ct_sync_complete,
+};
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+{
+ ioc->ioc_hwif = &nw_hwif_ct;
+}
+
+void
+bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
+{
+ ioc->ioc_hwif = &nw_hwif_ct2;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bool
+bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr fwhdr;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return true;
+
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ return true;
+ }
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return false;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ BUG_ON(!(usecnt > 0));
+
+ usecnt--;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
+{
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
+ /* Wait for halt to take effect */
+ readl(ioc->ioc_regs.ll_halt);
+ readl(ioc->ioc_regs.alt_ll_halt);
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static const struct {
+ u32 hfn_mbox;
+ u32 lpu_mbox;
+ u32 hfn_pgn;
+} ct_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static const struct {
+ u32 hfn;
+ u32 lpu;
+} ct_p0reg[] = {
+ { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static const struct {
+ u32 hfn;
+ u32 lpu;
+} ct_p1reg[] = {
+ { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static const struct {
+ u32 hfn_mbox;
+ u32 lpu_mbox;
+ u32 hfn_pgn;
+ u32 hfn;
+ u32 lpu;
+ u32 lpu_read;
+} ct2_reg[] = {
+ { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU0_READ_STAT},
+ { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU1_READ_STAT},
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
+ ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
+ ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
+ ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
+ ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
+ ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
+ ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
+ ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int port = bfa_ioc_portid(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+ ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+ if (port == 0) {
+ ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
+ ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
+ ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
+ ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
+ ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
+ ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
+ ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
+ ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = rb + ERR_SET_REG;
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = readl(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+}
+
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+ ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = readl(rb + FNC_PERS_REG);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if ((!msix && mode) || (msix && !mode))
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+
+ writel(r32, rb + FNC_PERS_REG);
+}
+
+static bool
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_read_stat);
+ if (r32) {
+ writel(1, ioc->ioc_regs.lpu_read_stat);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * MSI-X resource allocation for 1860 with no asic block
+ */
+#define HOSTFN_MSIX_DEFAULT 64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
+#define __MSIX_VT_NUMVT__MK 0x003ff800
+#define __MSIX_VT_NUMVT__SH 11
+#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_ 0x000007ff
+void
+bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ if (r32 & __MSIX_VT_NUMVT__MK) {
+ writel(r32 & __MSIX_VT_OFST_,
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+ return;
+ }
+
+ writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+ HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+{
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ bfa_nw_ioc_hw_sem_release(ioc);
+}
+
+/**
+ * Synchronized IOC failure processing routines
+ */
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+/**
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+ writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+ bfa_ioc_ct_sync_pos(ioc);
+
+ writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+ writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bool
+bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+ u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+ u32 tmp_ackd;
+
+ if (sync_ackd == 0)
+ return true;
+
+ /**
+ * The check below is to see whether any other PCI fn
+ * has reinitialized the ASIC (reset sync_ackd bits)
+ * and failed again while this IOC was waiting for hw
+ * semaphore (in bfa_iocpf_sm_semwait()).
+ */
+ tmp_ackd = sync_ackd;
+ if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
+ !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+ sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+ if (sync_reqd == sync_ackd) {
+ writel(bfa_ioc_ct_clear_sync_ackd(r32),
+ ioc->ioc_regs.ioc_fail_sync);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ /**
+ * If another PCI fn reinitialized and failed again while
+ * this IOC was waiting for hw sem, the sync_ackd bit for
+ * this IOC need to be set again to allow reinitialization.
+ */
+ if (tmp_ackd != sync_ackd)
+ writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+ return false;
+}
+
+static enum bfa_status
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
+{
+ u32 pll_sclk, pll_fclk, r32;
+ bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
+
+ pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+ __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+ __APP_PLL_SCLK_JITLMT0_1(3U) |
+ __APP_PLL_SCLK_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+ __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+ __APP_PLL_LCLK_JITLMT0_1(3U) |
+ __APP_PLL_LCLK_CNTLMT0_1(1U);
+
+ if (fcmode) {
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL |
+ __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL,
+ (rb + ETH_MAC_SER_REG));
+ } else {
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1,
+ (rb + ETH_MAC_SER_REG));
+ }
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
+ writel(pll_sclk |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
+ rb + APP_PLL_LCLK_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk |
+ __APP_PLL_SCLK_ENABLE,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_LCLK_ENABLE,
+ rb + APP_PLL_LCLK_CTL_REG);
+
+ if (!fcmode) {
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+ }
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+ if (!fcmode) {
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
+ }
+
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put s_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+ r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * Ignore mode and program for the max clock (which is FC16)
+ * Firmware/NFC will do the PLL init appropiately
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * while doing PLL init dont clock gate ethernet subsystem
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel((r32 | __ETH_CLK_ENABLE_PORT0),
+ (rb + CT2_CHIP_MISC_PRG));
+
+ r32 = readl((rb + CT2_PCIE_MISC_REG));
+ writel((r32 | __ETH_CLK_ENABLE_PORT1),
+ (rb + CT2_PCIE_MISC_REG));
+
+ /*
+ * set sclk value
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+ __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+
+ /*
+ * Dont do clock gating for ethernet subsystem, firmware/NFC will
+ * do this appropriately
+ */
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put l_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+ r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set LPU speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+ /*
+ * set LPU half speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set lclk for mode (set for FC16)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+ r32 |= 0x20c1731b;
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+
+ writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+ udelay(1000);
+ writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+static void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+ volatile u32 r32;
+
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /* put port0, port1 MAC & AHB in reset */
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ (rb + CT2_CSI_MAC_CONTROL_REG(0)));
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ (rb + CT2_CSI_MAC_CONTROL_REG(1)));
+}
+
+#define CT2_NFC_MAX_DELAY 1000
+static enum bfa_status
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
+{
+ volatile u32 wgn, r32;
+ int i;
+
+ /*
+ * Initialize PLL if not already done by NFC
+ */
+ wgn = readl(rb + CT2_WGN_STATUS);
+ if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+ writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (r32 & __NFC_CONTROLLER_HALTED)
+ break;
+ udelay(1000);
+ }
+ }
+
+ /*
+ * Mask the interrupts and clear any
+ * pending interrupts left by BIOS/EFI
+ */
+
+ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+ writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+ r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ }
+ r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ }
+
+ bfa_ioc_ct2_mac_reset(rb);
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * Announce flash device presence, if flash was corrupted.
+ */
+ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+ r32 = readl((rb + PSS_GPIO_OUT_REG));
+ writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
+ r32 = readl((rb + PSS_GPIO_OE_REG));
+ writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
+ }
+
+ bfa_ioc_ct2_mem_init(rb);
+
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
new file mode 100644
index 00000000000..dd36427f475
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -0,0 +1,669 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfa_msgq.c MSGQ module source file.
+ */
+
+#include "bfi.h"
+#include "bfa_msgq.h"
+#include "bfa_ioc.h"
+
+#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
+{ \
+ bfa_msgq_cmdcbfn_t cbfn; \
+ void *cbarg; \
+ cbfn = (_cmdq_ent)->cbfn; \
+ cbarg = (_cmdq_ent)->cbarg; \
+ (_cmdq_ent)->cbfn = NULL; \
+ (_cmdq_ent)->cbarg = NULL; \
+ if (cbfn) { \
+ cbfn(cbarg, (_status)); \
+ } \
+}
+
+static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
+static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
+
+enum cmdq_event {
+ CMDQ_E_START = 1,
+ CMDQ_E_STOP = 2,
+ CMDQ_E_FAIL = 3,
+ CMDQ_E_POST = 4,
+ CMDQ_E_INIT_RESP = 5,
+ CMDQ_E_DB_READY = 6,
+};
+
+bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
+bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
+bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
+bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
+ enum cmdq_event);
+
+static void
+cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
+{
+ struct bfa_msgq_cmd_entry *cmdq_ent;
+
+ cmdq->producer_index = 0;
+ cmdq->consumer_index = 0;
+ cmdq->flags = 0;
+ cmdq->token = 0;
+ cmdq->offset = 0;
+ cmdq->bytes_to_copy = 0;
+ while (!list_empty(&cmdq->pending_q)) {
+ bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
+ bfa_q_qe_init(&cmdq_ent->qe);
+ call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
+ }
+}
+
+static void
+cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_START:
+ bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
+ break;
+
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ /* No-op */
+ break;
+
+ case CMDQ_E_POST:
+ cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
+{
+ bfa_wc_down(&cmdq->msgq->init_wc);
+}
+
+static void
+cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+ break;
+
+ case CMDQ_E_POST:
+ cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ break;
+
+ case CMDQ_E_INIT_RESP:
+ if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
+ cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
+ } else
+ bfa_fsm_set_state(cmdq, cmdq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
+{
+}
+
+static void
+cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+ break;
+
+ case CMDQ_E_POST:
+ bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
+{
+ bfa_msgq_cmdq_dbell(cmdq);
+}
+
+static void
+cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+ break;
+
+ case CMDQ_E_POST:
+ cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ break;
+
+ case CMDQ_E_DB_READY:
+ if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
+ cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
+ } else
+ bfa_fsm_set_state(cmdq, cmdq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_msgq_cmdq_dbell_ready(void *arg)
+{
+ struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
+ bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
+}
+
+static void
+bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
+{
+ struct bfi_msgq_h2i_db *dbell =
+ (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
+
+ memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
+ bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
+ dbell->mh.mtag.i2htok = 0;
+ dbell->idx.cmdq_pi = htons(cmdq->producer_index);
+
+ if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
+ bfa_msgq_cmdq_dbell_ready, cmdq)) {
+ bfa_msgq_cmdq_dbell_ready(cmdq);
+ }
+}
+
+static void
+__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
+{
+ size_t len = cmd->msg_size;
+ int num_entries = 0;
+ size_t to_copy;
+ u8 *src, *dst;
+
+ src = (u8 *)cmd->msg_hdr;
+ dst = (u8 *)cmdq->addr.kva;
+ dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
+
+ while (len) {
+ to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
+ len : BFI_MSGQ_CMD_ENTRY_SIZE;
+ memcpy(dst, src, to_copy);
+ len -= to_copy;
+ src += BFI_MSGQ_CMD_ENTRY_SIZE;
+ BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
+ dst = (u8 *)cmdq->addr.kva;
+ dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
+ num_entries++;
+ }
+
+}
+
+static void
+bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
+{
+ struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
+ struct bfa_msgq_cmd_entry *cmd;
+ int posted = 0;
+
+ cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
+
+ /* Walk through pending list to see if the command can be posted */
+ while (!list_empty(&cmdq->pending_q)) {
+ cmd =
+ (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
+ if (ntohs(cmd->msg_hdr->num_entries) <=
+ BFA_MSGQ_FREE_CNT(cmdq)) {
+ list_del(&cmd->qe);
+ __cmd_copy(cmdq, cmd);
+ posted = 1;
+ call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
+ } else {
+ break;
+ }
+ }
+
+ if (posted)
+ bfa_fsm_send_event(cmdq, CMDQ_E_POST);
+}
+
+static void
+bfa_msgq_cmdq_copy_next(void *arg)
+{
+ struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
+
+ if (cmdq->bytes_to_copy)
+ bfa_msgq_cmdq_copy_rsp(cmdq);
+}
+
+static void
+bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
+{
+ struct bfi_msgq_i2h_cmdq_copy_req *req =
+ (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
+
+ cmdq->token = 0;
+ cmdq->offset = ntohs(req->offset);
+ cmdq->bytes_to_copy = ntohs(req->len);
+ bfa_msgq_cmdq_copy_rsp(cmdq);
+}
+
+static void
+bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
+{
+ struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
+ (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
+ int copied;
+ u8 *addr = (u8 *)cmdq->addr.kva;
+
+ memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
+ bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
+ rsp->mh.mtag.i2htok = htons(cmdq->token);
+ copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
+ cmdq->bytes_to_copy;
+ addr += cmdq->offset;
+ memcpy(rsp->data, addr, copied);
+
+ cmdq->token++;
+ cmdq->offset += copied;
+ cmdq->bytes_to_copy -= copied;
+
+ if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
+ bfa_msgq_cmdq_copy_next, cmdq)) {
+ bfa_msgq_cmdq_copy_next(cmdq);
+ }
+}
+
+static void
+bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
+{
+ cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
+ INIT_LIST_HEAD(&cmdq->pending_q);
+ cmdq->msgq = msgq;
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+}
+
+static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
+
+enum rspq_event {
+ RSPQ_E_START = 1,
+ RSPQ_E_STOP = 2,
+ RSPQ_E_FAIL = 3,
+ RSPQ_E_RESP = 4,
+ RSPQ_E_INIT_RESP = 5,
+ RSPQ_E_DB_READY = 6,
+};
+
+bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
+bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
+ enum rspq_event);
+bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
+bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
+ enum rspq_event);
+
+static void
+rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
+{
+ rspq->producer_index = 0;
+ rspq->consumer_index = 0;
+ rspq->flags = 0;
+}
+
+static void
+rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_START:
+ bfa_fsm_set_state(rspq, rspq_sm_init_wait);
+ break;
+
+ case RSPQ_E_STOP:
+ case RSPQ_E_FAIL:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
+{
+ bfa_wc_down(&rspq->msgq->init_wc);
+}
+
+static void
+rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_FAIL:
+ case RSPQ_E_STOP:
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+ break;
+
+ case RSPQ_E_INIT_RESP:
+ bfa_fsm_set_state(rspq, rspq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
+{
+}
+
+static void
+rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_STOP:
+ case RSPQ_E_FAIL:
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+ break;
+
+ case RSPQ_E_RESP:
+ bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
+{
+ if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
+ bfa_msgq_rspq_dbell(rspq);
+}
+
+static void
+rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_STOP:
+ case RSPQ_E_FAIL:
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+ break;
+
+ case RSPQ_E_RESP:
+ rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
+ break;
+
+ case RSPQ_E_DB_READY:
+ if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
+ rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
+ bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
+ } else
+ bfa_fsm_set_state(rspq, rspq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_msgq_rspq_dbell_ready(void *arg)
+{
+ struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
+ bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
+}
+
+static void
+bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
+{
+ struct bfi_msgq_h2i_db *dbell =
+ (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
+
+ memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
+ bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
+ dbell->mh.mtag.i2htok = 0;
+ dbell->idx.rspq_ci = htons(rspq->consumer_index);
+
+ if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
+ bfa_msgq_rspq_dbell_ready, rspq)) {
+ bfa_msgq_rspq_dbell_ready(rspq);
+ }
+}
+
+static void
+bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
+{
+ struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
+ struct bfi_msgq_mhdr *msghdr;
+ int num_entries;
+ int mc;
+ u8 *rspq_qe;
+
+ rspq->producer_index = ntohs(dbell->idx.rspq_pi);
+
+ while (rspq->consumer_index != rspq->producer_index) {
+ rspq_qe = (u8 *)rspq->addr.kva;
+ rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
+ msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
+
+ mc = msghdr->msg_class;
+ num_entries = ntohs(msghdr->num_entries);
+
+ if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
+ break;
+
+ (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
+
+ BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
+ rspq->depth);
+ }
+
+ bfa_fsm_send_event(rspq, RSPQ_E_RESP);
+}
+
+static void
+bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
+{
+ rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
+ rspq->msgq = msgq;
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+}
+
+static void
+bfa_msgq_init_rsp(struct bfa_msgq *msgq,
+ struct bfi_mbmsg *mb)
+{
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
+}
+
+static void
+bfa_msgq_init(void *arg)
+{
+ struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
+ struct bfi_msgq_cfg_req *msgq_cfg =
+ (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
+
+ memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
+ bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
+ msgq_cfg->mh.mtag.i2htok = 0;
+
+ bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
+ msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
+ bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
+ msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
+
+ bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
+}
+
+static void
+bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
+{
+ struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
+
+ switch (msg->mh.msg_id) {
+ case BFI_MSGQ_I2H_INIT_RSP:
+ bfa_msgq_init_rsp(msgq, msg);
+ break;
+
+ case BFI_MSGQ_I2H_DOORBELL_PI:
+ bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
+ break;
+
+ case BFI_MSGQ_I2H_DOORBELL_CI:
+ bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
+ break;
+
+ case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
+ bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+}
+
+static void
+bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
+{
+ struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
+
+ switch (event) {
+ case BFA_IOC_E_ENABLED:
+ bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
+ bfa_wc_up(&msgq->init_wc);
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
+ bfa_wc_up(&msgq->init_wc);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
+ bfa_wc_wait(&msgq->init_wc);
+ break;
+
+ case BFA_IOC_E_DISABLED:
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
+ break;
+
+ case BFA_IOC_E_FAILED:
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
+ break;
+
+ default:
+ break;
+ }
+}
+
+u32
+bfa_msgq_meminfo(void)
+{
+ return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
+ roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
+{
+ msgq->cmdq.addr.kva = kva;
+ msgq->cmdq.addr.pa = pa;
+
+ kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
+ pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
+
+ msgq->rspq.addr.kva = kva;
+ msgq->rspq.addr.pa = pa;
+}
+
+void
+bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
+{
+ msgq->ioc = ioc;
+
+ bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
+ bfa_msgq_rspq_attach(&msgq->rspq, msgq);
+
+ bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
+ bfa_q_qe_init(&msgq->ioc_notify);
+ bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
+ bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
+}
+
+void
+bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
+ bfa_msgq_mcfunc_t cbfn, void *cbarg)
+{
+ msgq->rspq.rsphdlr[mc].cbfn = cbfn;
+ msgq->rspq.rsphdlr[mc].cbarg = cbarg;
+}
+
+void
+bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
+{
+ if (ntohs(cmd->msg_hdr->num_entries) <=
+ BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
+ __cmd_copy(&msgq->cmdq, cmd);
+ call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
+ } else {
+ list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
+ }
+}
+
+void
+bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
+{
+ struct bfa_msgq_rspq *rspq = &msgq->rspq;
+ size_t len = buf_len;
+ size_t to_copy;
+ int ci;
+ u8 *src, *dst;
+
+ ci = rspq->consumer_index;
+ src = (u8 *)rspq->addr.kva;
+ src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
+ dst = buf;
+
+ while (len) {
+ to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
+ len : BFI_MSGQ_RSP_ENTRY_SIZE;
+ memcpy(dst, src, to_copy);
+ len -= to_copy;
+ dst += BFI_MSGQ_RSP_ENTRY_SIZE;
+ BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
+ src = (u8 *)rspq->addr.kva;
+ src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
new file mode 100644
index 00000000000..a6a565a366d
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
@@ -0,0 +1,130 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_MSGQ_H__
+#define __BFA_MSGQ_H__
+
+#include "bfa_defs.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+#include "bfa_cs.h"
+
+#define BFA_MSGQ_FREE_CNT(_q) \
+ (((_q)->consumer_index - (_q)->producer_index - 1) & ((_q)->depth - 1))
+
+#define BFA_MSGQ_INDX_ADD(_q_indx, _qe_num, _q_depth) \
+ ((_q_indx) = (((_q_indx) + (_qe_num)) & ((_q_depth) - 1)))
+
+#define BFA_MSGQ_CMDQ_NUM_ENTRY 128
+#define BFA_MSGQ_CMDQ_SIZE \
+ (BFI_MSGQ_CMD_ENTRY_SIZE * BFA_MSGQ_CMDQ_NUM_ENTRY)
+
+#define BFA_MSGQ_RSPQ_NUM_ENTRY 128
+#define BFA_MSGQ_RSPQ_SIZE \
+ (BFI_MSGQ_RSP_ENTRY_SIZE * BFA_MSGQ_RSPQ_NUM_ENTRY)
+
+#define bfa_msgq_cmd_set(_cmd, _cbfn, _cbarg, _msg_size, _msg_hdr) \
+do { \
+ (_cmd)->cbfn = (_cbfn); \
+ (_cmd)->cbarg = (_cbarg); \
+ (_cmd)->msg_size = (_msg_size); \
+ (_cmd)->msg_hdr = (_msg_hdr); \
+} while (0)
+
+struct bfa_msgq;
+
+typedef void (*bfa_msgq_cmdcbfn_t)(void *cbarg, enum bfa_status status);
+
+struct bfa_msgq_cmd_entry {
+ struct list_head qe;
+ bfa_msgq_cmdcbfn_t cbfn;
+ void *cbarg;
+ size_t msg_size;
+ struct bfi_msgq_mhdr *msg_hdr;
+};
+
+enum bfa_msgq_cmdq_flags {
+ BFA_MSGQ_CMDQ_F_DB_UPDATE = 1,
+};
+
+struct bfa_msgq_cmdq {
+ bfa_fsm_t fsm;
+ enum bfa_msgq_cmdq_flags flags;
+
+ u16 producer_index;
+ u16 consumer_index;
+ u16 depth; /* FW Q depth is 16 bits */
+ struct bfa_dma addr;
+ struct bfa_mbox_cmd dbell_mb;
+
+ u16 token;
+ int offset;
+ int bytes_to_copy;
+ struct bfa_mbox_cmd copy_mb;
+
+ struct list_head pending_q; /* pending command queue */
+
+ struct bfa_msgq *msgq;
+};
+
+enum bfa_msgq_rspq_flags {
+ BFA_MSGQ_RSPQ_F_DB_UPDATE = 1,
+};
+
+typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
+
+struct bfa_msgq_rspq {
+ bfa_fsm_t fsm;
+ enum bfa_msgq_rspq_flags flags;
+
+ u16 producer_index;
+ u16 consumer_index;
+ u16 depth; /* FW Q depth is 16 bits */
+ struct bfa_dma addr;
+ struct bfa_mbox_cmd dbell_mb;
+
+ int nmclass;
+ struct {
+ bfa_msgq_mcfunc_t cbfn;
+ void *cbarg;
+ } rsphdlr[BFI_MC_MAX];
+
+ struct bfa_msgq *msgq;
+};
+
+struct bfa_msgq {
+ struct bfa_msgq_cmdq cmdq;
+ struct bfa_msgq_rspq rspq;
+
+ struct bfa_wc init_wc;
+ struct bfa_mbox_cmd init_mb;
+
+ struct bfa_ioc_notify ioc_notify;
+ struct bfa_ioc *ioc;
+};
+
+u32 bfa_msgq_meminfo(void);
+void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa);
+void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc);
+void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
+ bfa_msgq_mcfunc_t cbfn, void *cbarg);
+void bfa_msgq_cmd_post(struct bfa_msgq *msgq,
+ struct bfa_msgq_cmd_entry *cmd);
+void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len);
+
+#endif
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
new file mode 100644
index 00000000000..7a1393aabd4
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -0,0 +1,481 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+
+#pragma pack(1)
+
+/**
+ * BFI FW image type
+ */
+#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+
+/**
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr {
+ u8 msg_class; /*!< @ref enum bfi_mclass */
+ u8 msg_id; /*!< msg opcode with in the class */
+ union {
+ struct {
+ u8 qid;
+ u8 fn_lpu; /*!< msg destination */
+ } h2i;
+ u16 i2htok; /*!< token in msgs to host */
+ } mtag;
+};
+
+#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
+#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.i2htok = (_i2htok); \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE 128
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+
+/**
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+/**
+ * DMA addresses
+ */
+union bfi_addr_u {
+ struct {
+ u32 addr_lo;
+ u32 addr_hi;
+ } a32;
+};
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ 128
+#define BFI_LMSG_PL_WSZ \
+ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
+
+/**
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ 7
+struct bfi_mbmsg {
+ struct bfi_mhdr mh;
+ u32 pl[BFI_MBMSG_SZ];
+};
+
+/**
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+ BFI_PCIFN_CLASS_FC = 0x0c04,
+ BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
+/**
+ * Message Classes
+ */
+enum bfi_mclass {
+ BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /*!< Flash message class */
+ BFI_MC_CEE = 4, /*!< CEE */
+ BFI_MC_FCPORT = 5, /*!< FC port */
+ BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
+ BFI_MC_LL = 7, /*!< Link Layer */
+ BFI_MC_UF = 8, /*!< Unsolicited frame receive */
+ BFI_MC_FCXP = 9, /*!< FC Transport */
+ BFI_MC_LPS = 10, /*!< lport fc login services */
+ BFI_MC_RPORT = 11, /*!< Remote port */
+ BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
+ BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
+ BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
+ BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
+ BFI_MC_TSKIM = 18, /*!< Initiator Task management */
+ BFI_MC_SBOOT = 19, /*!< SAN boot services */
+ BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
+ BFI_MC_PORT = 21, /*!< Physical port */
+ BFI_MC_SFP = 22, /*!< SFP module */
+ BFI_MC_MSGQ = 23, /*!< MSGQ */
+ BFI_MC_ENET = 24, /*!< ENET commands/responses */
+ BFI_MC_PHY = 25, /*!< External PHY message class */
+ BFI_MC_NBOOT = 26, /*!< Network Boot */
+ BFI_MC_TIO_READ = 27, /*!< read IO (Target mode) */
+ BFI_MC_TIO_WRITE = 28, /*!< write IO (Target mode) */
+ BFI_MC_TIO_DATA_XFERED = 29, /*!< ds transferred (target mode) */
+ BFI_MC_TIO_IO = 30, /*!< IO (Target mode) */
+ BFI_MC_TIO = 31, /*!< IO (target mode) */
+ BFI_MC_MFG = 32, /*!< MFG/ASIC block commands */
+ BFI_MC_EDMA = 33, /*!< EDMA copy commands */
+ BFI_MC_MAX = 34
+};
+
+#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
+
+#define BFI_FWBOOT_ENV_OS 0
+
+/**
+ *----------------------------------------------------------------------
+ * IOC
+ *----------------------------------------------------------------------
+ */
+
+/**
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+ BFI_ASIC_GEN_CB = 1,
+ BFI_ASIC_GEN_CT = 2,
+ BFI_ASIC_GEN_CT2 = 3,
+};
+
+enum bfi_asic_mode {
+ BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
+ BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
+ BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
+ BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
+};
+
+enum bfi_ioc_h2i_msgs {
+ BFI_IOC_H2I_ENABLE_REQ = 1,
+ BFI_IOC_H2I_DISABLE_REQ = 2,
+ BFI_IOC_H2I_GETATTR_REQ = 3,
+ BFI_IOC_H2I_DBG_SYNC = 4,
+ BFI_IOC_H2I_DBG_DUMP = 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+ BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
+};
+
+/**
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u attr_addr;
+};
+
+struct bfi_ioc_attr {
+ u64 mfg_pwwn; /*!< Mfg port wwn */
+ u64 mfg_nwwn; /*!< Mfg node wwn */
+ mac_t mfg_mac; /*!< Mfg mac */
+ u8 port_mode; /* enum bfi_port_mode */
+ u8 rsvd_a;
+ u64 pwwn;
+ u64 nwwn;
+ mac_t mac; /*!< PBC or Mfg mac */
+ u16 rsvd_b;
+ mac_t fcoe_mac;
+ u16 rsvd_c;
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 rx_bbcredit; /*!< receive buffer credits */
+ u32 adapter_prop; /*!< adapter properties */
+ u16 maxfrsize; /*!< max receive frame size */
+ char asic_rev;
+ u8 rsvd_d;
+ char fw_version[BFA_VERSION_LEN];
+ char optrom_version[BFA_VERSION_LEN];
+ struct bfa_mfg_vpd vpd;
+ u32 card_type; /*!< card type */
+};
+
+/**
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< cfg reply status */
+ u8 rsvd[3];
+};
+
+/**
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB (0x40)
+#define BFI_IOC_SMEM_PG0_CT (0x180)
+
+/**
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF (0x6B40)
+#define BFI_IOC_FWSTATS_SZ (4096)
+
+/**
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF (0x4b00)
+#define BFI_IOC_TRC_ENTS 256
+
+#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_MD5SUM_SZ 4
+struct bfi_ioc_image_hdr {
+ u32 signature; /*!< constant signature */
+ u8 asic_gen; /*!< asic generation */
+ u8 asic_mode;
+ u8 port0_mode; /*!< device mode for port 0 */
+ u8 port1_mode; /*!< device mode for port 1 */
+ u32 exec; /*!< exec vector */
+ u32 bootenv; /*!< firmware boot env */
+ u32 rsvd_b[4];
+ u32 md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+#define BFI_FWBOOT_DEVMODE_OFF 4
+#define BFI_FWBOOT_TYPE_OFF 8
+#define BFI_FWBOOT_ENV_OFF 12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+ (((u32)(__asic_gen)) << 24 | \
+ ((u32)(__asic_mode)) << 16 | \
+ ((u32)(__p0_mode)) << 8 | \
+ ((u32)(__p1_mode)))
+
+enum bfi_fwboot_type {
+ BFI_FWBOOT_TYPE_NORMAL = 0,
+ BFI_FWBOOT_TYPE_FLASH = 1,
+ BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
+enum bfi_port_mode {
+ BFI_PORT_MODE_FC = 1,
+ BFI_PORT_MODE_ETH = 2,
+};
+
+struct bfi_ioc_hbeat {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 hb_count; /*!< current heart beat count */
+};
+
+/**
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+ BFI_IOC_UNINIT = 0, /*!< not initialized */
+ BFI_IOC_INITING = 1, /*!< h/w is being initialized */
+ BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
+ BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
+ BFI_IOC_OP = 4, /*!< IOC is operational */
+ BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
+ BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
+ BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
+ BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
+};
+
+#define BFI_IOC_ENDIAN_SIG 0x12345678
+
+enum {
+ BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
+ BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
+ BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
+ BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
+ BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
+ BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
+ BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
+ BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
+ BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
+ BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
+ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
+ BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val) \
+ ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
+ ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
+ BFI_ADAPTER_UNSUPP))
+
+/**
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req {
+ struct bfi_mhdr mh;
+ u16 clscode;
+ u16 rsvd;
+ u32 tv_sec;
+};
+
+/**
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< enable/disable status */
+ u8 port_mode; /*!< enum bfa_mode */
+ u8 cap_bm; /*!< capability bit mask */
+ u8 rsvd;
+};
+
+#define BFI_IOC_MSGSZ 8
+/**
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_req enable_req;
+ struct bfi_ioc_ctrl_req disable_req;
+ struct bfi_ioc_getattr_req getattr_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_reply fw_event;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ *----------------------------------------------------------------------
+ * MSGQ
+ *----------------------------------------------------------------------
+ */
+
+enum bfi_msgq_h2i_msgs {
+ BFI_MSGQ_H2I_INIT_REQ = 1,
+ BFI_MSGQ_H2I_DOORBELL_PI = 2,
+ BFI_MSGQ_H2I_DOORBELL_CI = 3,
+ BFI_MSGQ_H2I_CMDQ_COPY_RSP = 4,
+};
+
+enum bfi_msgq_i2h_msgs {
+ BFI_MSGQ_I2H_INIT_RSP = BFA_I2HM(BFI_MSGQ_H2I_INIT_REQ),
+ BFI_MSGQ_I2H_DOORBELL_PI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_PI),
+ BFI_MSGQ_I2H_DOORBELL_CI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_CI),
+ BFI_MSGQ_I2H_CMDQ_COPY_REQ = BFA_I2HM(BFI_MSGQ_H2I_CMDQ_COPY_RSP),
+};
+
+/* Messages(commands/responsed/AENS will have the following header */
+struct bfi_msgq_mhdr {
+ u8 msg_class;
+ u8 msg_id;
+ u16 msg_token;
+ u16 num_entries;
+ u8 enet_id;
+ u8 rsvd[1];
+};
+
+#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_mid); \
+ (_mh).msg_token = (_tok); \
+ (_mh).enet_id = (_enet_id); \
+} while (0)
+
+/*
+ * Mailbox for messaging interface
+ */
+#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */
+#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */
+
+#define bfi_msgq_num_cmd_entries(_size) \
+ (((_size) + BFI_MSGQ_CMD_ENTRY_SIZE - 1) / BFI_MSGQ_CMD_ENTRY_SIZE)
+
+struct bfi_msgq {
+ union bfi_addr_u addr;
+ u16 q_depth; /* Total num of entries in the queue */
+ u8 rsvd[2];
+};
+
+/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
+struct bfi_msgq_cfg_req {
+ struct bfi_mhdr mh;
+ struct bfi_msgq cmdq;
+ struct bfi_msgq rspq;
+};
+
+/* BFI_ENET_MSGQ_CFG_RSP */
+struct bfi_msgq_cfg_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* BFI_MSGQ_H2I_DOORBELL */
+struct bfi_msgq_h2i_db {
+ struct bfi_mhdr mh;
+ union {
+ u16 cmdq_pi;
+ u16 rspq_ci;
+ } idx;
+};
+
+/* BFI_MSGQ_I2H_DOORBELL */
+struct bfi_msgq_i2h_db {
+ struct bfi_mhdr mh;
+ union {
+ u16 rspq_pi;
+ u16 cmdq_ci;
+ } idx;
+};
+
+#define BFI_CMD_COPY_SZ 28
+
+/* BFI_MSGQ_H2I_CMD_COPY_RSP */
+struct bfi_msgq_h2i_cmdq_copy_rsp {
+ struct bfi_mhdr mh;
+ u8 data[BFI_CMD_COPY_SZ];
+};
+
+/* BFI_MSGQ_I2H_CMD_COPY_REQ */
+struct bfi_msgq_i2h_cmdq_copy_req {
+ struct bfi_mhdr mh;
+ u16 offset;
+ u16 len;
+};
+
+#pragma pack()
+
+#endif /* __BFI_H__ */
diff --git a/drivers/net/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabea397..4eecabea397 100644
--- a/drivers/net/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
new file mode 100644
index 00000000000..a90f1cf46b4
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -0,0 +1,901 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfi_enet.h BNA Hardware and Firmware Interface
+ */
+
+/**
+ * Skipping statistics collection to avoid clutter.
+ * Command is no longer needed:
+ * MTU
+ * TxQ Stop
+ * RxQ Stop
+ * RxF Enable/Disable
+ *
+ * HDS-off request is dynamic
+ * keep structures as multiple of 32-bit fields for alignment.
+ * All values must be written in big-endian.
+ */
+#ifndef __BFI_ENET_H__
+#define __BFI_ENET_H__
+
+#include "bfa_defs.h"
+#include "bfi.h"
+
+#pragma pack(1)
+
+#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
+
+#define BFI_ENET_TXQ_PRIO_MAX 8
+#define BFI_ENET_RX_QSET_MAX 16
+#define BFI_ENET_TXQ_WI_VECT_MAX 4
+
+#define BFI_ENET_VLAN_ID_MAX 4096
+#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */
+#define BFI_ENET_VLAN_BLOCKS_MAX \
+ (BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE)
+#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */
+#define BFI_ENET_VLAN_WORDS_MAX \
+ (BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE)
+
+#define BFI_ENET_RSS_RIT_MAX 64 /* entries */
+#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */
+
+union bfi_addr_be_u {
+ struct {
+ u32 addr_hi; /* Most Significant 32-bits */
+ u32 addr_lo; /* Least Significant 32-Bits */
+ } a32;
+};
+
+/**
+ * T X Q U E U E D E F I N E S
+ */
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+/* TxQ Entry Opcodes */
+#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+struct bfi_enet_txq_wi_base {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ u16 opcode;
+ /* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */
+ u16 flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+};
+
+struct bfi_enet_txq_wi_ext {
+ u16 reserved;
+ u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
+ u32 reserved2[3];
+};
+
+struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ union bfi_addr_be_u addr;
+};
+
+/**
+ * TxQ Entry Structure
+ *
+ */
+struct bfi_enet_txq_entry {
+ union {
+ struct bfi_enet_txq_wi_base base;
+ struct bfi_enet_txq_wi_ext ext;
+ } wi;
+ struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
+};
+
+#define wi_hdr wi.base
+#define wi_ext_hdr wi.ext
+
+#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/**
+ * R X Q U E U E D E F I N E S
+ */
+struct bfi_enet_rxq_entry {
+ union bfi_addr_be_u rx_buffer;
+};
+
+/**
+ * R X C O M P L E T I O N Q U E U E D E F I N E S
+ */
+/* CQ Entry Flags */
+#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
+#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
+#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
+#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
+#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BFI_ENET_CQ_EF_UDP (1 << 8)
+#define BFI_ENET_CQ_EF_TCP (1 << 9)
+#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
+#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
+
+#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
+#define BFI_ENET_CQ_EF_VLAN (1 << 13)
+#define BFI_ENET_CQ_EF_RSS (1 << 14)
+#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
+
+#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
+#define BFI_ENET_CQ_EF_MCAST (1 << 17)
+#define BFI_ENET_CQ_EF_BCAST (1 << 18)
+#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
+
+#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
+
+/* CQ Entry Structure */
+struct bfi_enet_cq_entry {
+ u32 flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+/**
+ * E N E T C O N T R O L P A T H C O M M A N D S
+ */
+struct bfi_enet_q {
+ union bfi_addr_u pg_tbl;
+ union bfi_addr_u first_entry;
+ u16 pages; /* # of pages */
+ u16 page_sz;
+};
+
+struct bfi_enet_txq {
+ struct bfi_enet_q q;
+ u8 priority;
+ u8 rsvd[3];
+};
+
+struct bfi_enet_rxq {
+ struct bfi_enet_q q;
+ u16 rx_buffer_size;
+ u16 rsvd;
+};
+
+struct bfi_enet_cq {
+ struct bfi_enet_q q;
+};
+
+struct bfi_enet_ib_cfg {
+ u8 int_pkt_dma;
+ u8 int_enabled;
+ u8 int_pkt_enabled;
+ u8 continuous_coalescing;
+ u8 msix;
+ u8 rsvd[3];
+ u32 coalescing_timeout;
+ u32 inter_pkt_timeout;
+ u8 inter_pkt_count;
+ u8 rsvd1[3];
+};
+
+struct bfi_enet_ib {
+ union bfi_addr_u index_addr;
+ union {
+ u16 msix_index;
+ u16 intx_bitmask;
+ } intr;
+ u16 rsvd;
+};
+
+/**
+ * ENET command messages
+ */
+enum bfi_enet_h2i_msgs {
+ /* Rx Commands */
+ BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
+ BFI_ENET_H2I_RX_CFG_CLR_REQ = 2,
+
+ BFI_ENET_H2I_RIT_CFG_REQ = 3,
+ BFI_ENET_H2I_RSS_CFG_REQ = 4,
+ BFI_ENET_H2I_RSS_ENABLE_REQ = 5,
+ BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6,
+ BFI_ENET_H2I_RX_DEFAULT_REQ = 7,
+
+ BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8,
+ BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9,
+ BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10,
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11,
+
+ BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12,
+ BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13,
+ BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14,
+
+ BFI_ENET_H2I_RX_VLAN_SET_REQ = 15,
+ BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16,
+
+ /* Tx Commands */
+ BFI_ENET_H2I_TX_CFG_SET_REQ = 17,
+ BFI_ENET_H2I_TX_CFG_CLR_REQ = 18,
+
+ /* Port Commands */
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19,
+ BFI_ENET_H2I_SET_PAUSE_REQ = 20,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21,
+
+ /* Get Attributes Command */
+ BFI_ENET_H2I_GET_ATTR_REQ = 22,
+
+ /* Statistics Commands */
+ BFI_ENET_H2I_STATS_GET_REQ = 23,
+ BFI_ENET_H2I_STATS_CLR_REQ = 24,
+
+ BFI_ENET_H2I_WOL_MAGIC_REQ = 25,
+ BFI_ENET_H2I_WOL_FRAME_REQ = 26,
+
+ BFI_ENET_H2I_MAX = 27,
+};
+
+enum bfi_enet_i2h_msgs {
+ /* Rx Responses */
+ BFI_ENET_I2H_RX_CFG_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ),
+ BFI_ENET_I2H_RX_CFG_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ),
+
+ BFI_ENET_I2H_RIT_CFG_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ),
+ BFI_ENET_I2H_RSS_CFG_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ),
+ BFI_ENET_I2H_RSS_ENABLE_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ),
+ BFI_ENET_I2H_RX_PROMISCUOUS_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ),
+ BFI_ENET_I2H_RX_DEFAULT_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ),
+
+ BFI_ENET_I2H_MAC_UCAST_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ),
+ BFI_ENET_I2H_MAC_UCAST_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ),
+ BFI_ENET_I2H_MAC_UCAST_ADD_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ),
+ BFI_ENET_I2H_MAC_UCAST_DEL_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ),
+
+ BFI_ENET_I2H_MAC_MCAST_ADD_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ),
+ BFI_ENET_I2H_MAC_MCAST_DEL_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ),
+ BFI_ENET_I2H_MAC_MCAST_FILTER_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ),
+
+ BFI_ENET_I2H_RX_VLAN_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ),
+
+ BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ),
+
+ /* Tx Responses */
+ BFI_ENET_I2H_TX_CFG_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ),
+ BFI_ENET_I2H_TX_CFG_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ),
+
+ /* Port Responses */
+ BFI_ENET_I2H_PORT_ADMIN_RSP =
+ BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ),
+
+ BFI_ENET_I2H_SET_PAUSE_RSP =
+ BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ),
+ BFI_ENET_I2H_DIAG_LOOPBACK_RSP =
+ BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ),
+
+ /* Attributes Response */
+ BFI_ENET_I2H_GET_ATTR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ),
+
+ /* Statistics Responses */
+ BFI_ENET_I2H_STATS_GET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ),
+ BFI_ENET_I2H_STATS_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ),
+
+ BFI_ENET_I2H_WOL_MAGIC_RSP =
+ BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ),
+ BFI_ENET_I2H_WOL_FRAME_RSP =
+ BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ),
+
+ /* AENs */
+ BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX),
+ BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1),
+
+ BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2),
+ BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3),
+
+ BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
+};
+
+/**
+ * The following error codes can be returned by the enet commands
+ */
+enum bfi_enet_err {
+ BFI_ENET_CMD_OK = 0,
+ BFI_ENET_CMD_FAIL = 1,
+ BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
+ BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */
+ BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
+ BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
+ BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */
+ BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
+};
+
+/**
+ * Generic Request
+ *
+ * bfi_enet_req is used by:
+ * BFI_ENET_H2I_RX_CFG_CLR_REQ
+ * BFI_ENET_H2I_TX_CFG_CLR_REQ
+ */
+struct bfi_enet_req {
+ struct bfi_msgq_mhdr mh;
+};
+
+/**
+ * Enable/Disable Request
+ *
+ * bfi_enet_enable_req is used by:
+ * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
+ * BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero)
+ * BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero)
+ * BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ
+ * BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero)
+ */
+struct bfi_enet_enable_req {
+ struct bfi_msgq_mhdr mh;
+ u8 enable; /* 1 = enable; 0 = disable */
+ u8 rsvd[3];
+};
+
+/**
+ * Generic Response
+ */
+struct bfi_enet_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error; /*!< if error see cmd_offset */
+ u8 rsvd;
+ u16 cmd_offset; /*!< offset to invalid parameter */
+};
+
+/**
+ * GLOBAL CONFIGURATION
+ */
+
+/**
+ * bfi_enet_attr_req is used by:
+ * BFI_ENET_H2I_GET_ATTR_REQ
+ */
+struct bfi_enet_attr_req {
+ struct bfi_msgq_mhdr mh;
+};
+
+/**
+ * bfi_enet_attr_rsp is used by:
+ * BFI_ENET_I2H_GET_ATTR_RSP
+ */
+struct bfi_enet_attr_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error; /*!< if error see cmd_offset */
+ u8 rsvd;
+ u16 cmd_offset; /*!< offset to invalid parameter */
+ u32 max_cfg;
+ u32 max_ucmac;
+ u32 rit_size;
+};
+
+/**
+ * Tx Configuration
+ *
+ * bfi_enet_tx_cfg is used by:
+ * BFI_ENET_H2I_TX_CFG_SET_REQ
+ */
+enum bfi_enet_tx_vlan_mode {
+ BFI_ENET_TX_VLAN_NOP = 0,
+ BFI_ENET_TX_VLAN_INS = 1,
+ BFI_ENET_TX_VLAN_WI = 2,
+};
+
+struct bfi_enet_tx_cfg {
+ u8 vlan_mode; /*!< processing mode */
+ u8 rsvd;
+ u16 vlan_id;
+ u8 admit_tagged_frame;
+ u8 apply_vlan_filter;
+ u8 add_to_vswitch;
+ u8 rsvd1[1];
+};
+
+struct bfi_enet_tx_cfg_req {
+ struct bfi_msgq_mhdr mh;
+ u8 num_queues; /* # of Tx Queues */
+ u8 rsvd[3];
+
+ struct {
+ struct bfi_enet_txq q;
+ struct bfi_enet_ib ib;
+ } q_cfg[BFI_ENET_TXQ_PRIO_MAX];
+
+ struct bfi_enet_ib_cfg ib_cfg;
+
+ struct bfi_enet_tx_cfg tx_cfg;
+};
+
+struct bfi_enet_tx_cfg_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error;
+ u8 hw_id; /* For debugging */
+ u8 rsvd[2];
+ struct {
+ u32 q_dbell; /* PCI base address offset */
+ u32 i_dbell; /* PCI base address offset */
+ u8 hw_qid; /* For debugging */
+ u8 rsvd[3];
+ } q_handles[BFI_ENET_TXQ_PRIO_MAX];
+};
+
+/**
+ * Rx Configuration
+ *
+ * bfi_enet_rx_cfg is used by:
+ * BFI_ENET_H2I_RX_CFG_SET_REQ
+ */
+enum bfi_enet_rxq_type {
+ BFI_ENET_RXQ_SINGLE = 1,
+ BFI_ENET_RXQ_LARGE_SMALL = 2,
+ BFI_ENET_RXQ_HDS = 3,
+ BFI_ENET_RXQ_HDS_OPT_BASED = 4,
+};
+
+enum bfi_enet_hds_type {
+ BFI_ENET_HDS_FORCED = 0x01,
+ BFI_ENET_HDS_IPV6_UDP = 0x02,
+ BFI_ENET_HDS_IPV6_TCP = 0x04,
+ BFI_ENET_HDS_IPV4_TCP = 0x08,
+ BFI_ENET_HDS_IPV4_UDP = 0x10,
+};
+
+struct bfi_enet_rx_cfg {
+ u8 rxq_type;
+ u8 rsvd[3];
+
+ struct {
+ u8 max_header_size;
+ u8 force_offset;
+ u8 type;
+ u8 rsvd1;
+ } hds;
+
+ u8 multi_buffer;
+ u8 strip_vlan;
+ u8 drop_untagged;
+ u8 rsvd2;
+};
+
+/*
+ * Multicast frames are received on the ql of q-set index zero.
+ * On the completion queue. RxQ ID = even is for large/data buffer queues
+ * and RxQ ID = odd is for small/header buffer queues.
+ */
+struct bfi_enet_rx_cfg_req {
+ struct bfi_msgq_mhdr mh;
+ u8 num_queue_sets; /* # of Rx Queue Sets */
+ u8 rsvd[3];
+
+ struct {
+ struct bfi_enet_rxq ql; /* large/data/single buffers */
+ struct bfi_enet_rxq qs; /* small/header buffers */
+ struct bfi_enet_cq cq;
+ struct bfi_enet_ib ib;
+ } q_cfg[BFI_ENET_RX_QSET_MAX];
+
+ struct bfi_enet_ib_cfg ib_cfg;
+
+ struct bfi_enet_rx_cfg rx_cfg;
+};
+
+struct bfi_enet_rx_cfg_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error;
+ u8 hw_id; /* For debugging */
+ u8 rsvd[2];
+ struct {
+ u32 ql_dbell; /* PCI base address offset */
+ u32 qs_dbell; /* PCI base address offset */
+ u32 i_dbell; /* PCI base address offset */
+ u8 hw_lqid; /* For debugging */
+ u8 hw_sqid; /* For debugging */
+ u8 hw_cqid; /* For debugging */
+ u8 rsvd;
+ } q_handles[BFI_ENET_RX_QSET_MAX];
+};
+
+/**
+ * RIT
+ *
+ * bfi_enet_rit_req is used by:
+ * BFI_ENET_H2I_RIT_CFG_REQ
+ */
+struct bfi_enet_rit_req {
+ struct bfi_msgq_mhdr mh;
+ u16 size; /* number of table-entries used */
+ u8 rsvd[2];
+ u8 table[BFI_ENET_RSS_RIT_MAX];
+};
+
+/**
+ * RSS
+ *
+ * bfi_enet_rss_cfg_req is used by:
+ * BFI_ENET_H2I_RSS_CFG_REQ
+ */
+enum bfi_enet_rss_type {
+ BFI_ENET_RSS_IPV6 = 0x01,
+ BFI_ENET_RSS_IPV6_TCP = 0x02,
+ BFI_ENET_RSS_IPV4 = 0x04,
+ BFI_ENET_RSS_IPV4_TCP = 0x08
+};
+
+struct bfi_enet_rss_cfg {
+ u8 type;
+ u8 mask;
+ u8 rsvd[2];
+ u32 key[BFI_ENET_RSS_KEY_LEN];
+};
+
+struct bfi_enet_rss_cfg_req {
+ struct bfi_msgq_mhdr mh;
+ struct bfi_enet_rss_cfg cfg;
+};
+
+/**
+ * MAC Unicast
+ *
+ * bfi_enet_rx_vlan_req is used by:
+ * BFI_ENET_H2I_MAC_UCAST_SET_REQ
+ * BFI_ENET_H2I_MAC_UCAST_CLR_REQ
+ * BFI_ENET_H2I_MAC_UCAST_ADD_REQ
+ * BFI_ENET_H2I_MAC_UCAST_DEL_REQ
+ */
+struct bfi_enet_ucast_req {
+ struct bfi_msgq_mhdr mh;
+ mac_t mac_addr;
+ u8 rsvd[2];
+};
+
+/**
+ * MAC Unicast + VLAN
+ */
+struct bfi_enet_mac_n_vlan_req {
+ struct bfi_msgq_mhdr mh;
+ u16 vlan_id;
+ mac_t mac_addr;
+};
+
+/**
+ * MAC Multicast
+ *
+ * bfi_enet_mac_mfilter_add_req is used by:
+ * BFI_ENET_H2I_MAC_MCAST_ADD_REQ
+ */
+struct bfi_enet_mcast_add_req {
+ struct bfi_msgq_mhdr mh;
+ mac_t mac_addr;
+ u8 rsvd[2];
+};
+
+/**
+ * bfi_enet_mac_mfilter_add_rsp is used by:
+ * BFI_ENET_I2H_MAC_MCAST_ADD_RSP
+ */
+struct bfi_enet_mcast_add_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error;
+ u8 rsvd;
+ u16 cmd_offset;
+ u16 handle;
+ u8 rsvd1[2];
+};
+
+/**
+ * bfi_enet_mac_mfilter_del_req is used by:
+ * BFI_ENET_H2I_MAC_MCAST_DEL_REQ
+ */
+struct bfi_enet_mcast_del_req {
+ struct bfi_msgq_mhdr mh;
+ u16 handle;
+ u8 rsvd[2];
+};
+
+/**
+ * VLAN
+ *
+ * bfi_enet_rx_vlan_req is used by:
+ * BFI_ENET_H2I_RX_VLAN_SET_REQ
+ */
+struct bfi_enet_rx_vlan_req {
+ struct bfi_msgq_mhdr mh;
+ u8 block_idx;
+ u8 rsvd[3];
+ u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
+};
+
+/**
+ * PAUSE
+ *
+ * bfi_enet_set_pause_req is used by:
+ * BFI_ENET_H2I_SET_PAUSE_REQ
+ */
+struct bfi_enet_set_pause_req {
+ struct bfi_msgq_mhdr mh;
+ u8 rsvd[2];
+ u8 tx_pause; /* 1 = enable; 0 = disable */
+ u8 rx_pause; /* 1 = enable; 0 = disable */
+};
+
+/**
+ * DIAGNOSTICS
+ *
+ * bfi_enet_diag_lb_req is used by:
+ * BFI_ENET_H2I_DIAG_LOOPBACK
+ */
+struct bfi_enet_diag_lb_req {
+ struct bfi_msgq_mhdr mh;
+ u8 rsvd[2];
+ u8 mode; /* cable or Serdes */
+ u8 enable; /* 1 = enable; 0 = disable */
+};
+
+/**
+ * enum for Loopback opmodes
+ */
+enum {
+ BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
+ BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
+};
+
+/**
+ * STATISTICS
+ *
+ * bfi_enet_stats_req is used by:
+ * BFI_ENET_H2I_STATS_GET_REQ
+ * BFI_ENET_I2H_STATS_CLR_REQ
+ */
+struct bfi_enet_stats_req {
+ struct bfi_msgq_mhdr mh;
+ u16 stats_mask;
+ u8 rsvd[2];
+ u32 rx_enet_mask;
+ u32 tx_enet_mask;
+ union bfi_addr_u host_buffer;
+};
+
+/**
+ * defines for "stats_mask" above.
+ */
+#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
+#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
+#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
+#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
+#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+
+#define BFI_ENET_STATS_ALL 0x1f
+
+/* TxF Frame Statistics */
+struct bfi_enet_stats_txf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+
+ u64 errors;
+ u64 filter_vlan; /* frames filtered due to VLAN */
+ u64 filter_mac_sa; /* frames filtered due to SA check */
+};
+
+/* RxF Frame Statistics */
+struct bfi_enet_stats_rxf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+ u64 frame_drops;
+};
+
+/* FC Tx Frame Statistics */
+struct bfi_enet_stats_fc_tx {
+ u64 txf_ucast_octets;
+ u64 txf_ucast;
+ u64 txf_ucast_vlan;
+
+ u64 txf_mcast_octets;
+ u64 txf_mcast;
+ u64 txf_mcast_vlan;
+
+ u64 txf_bcast_octets;
+ u64 txf_bcast;
+ u64 txf_bcast_vlan;
+
+ u64 txf_parity_errors;
+ u64 txf_timeout;
+ u64 txf_fid_parity_errors;
+};
+
+/* FC Rx Frame Statistics */
+struct bfi_enet_stats_fc_rx {
+ u64 rxf_ucast_octets;
+ u64 rxf_ucast;
+ u64 rxf_ucast_vlan;
+
+ u64 rxf_mcast_octets;
+ u64 rxf_mcast;
+ u64 rxf_mcast_vlan;
+
+ u64 rxf_bcast_octets;
+ u64 rxf_bcast;
+ u64 rxf_bcast_vlan;
+};
+
+/* RAD Frame Statistics */
+struct bfi_enet_stats_rad {
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_vlan_frames;
+
+ u64 rx_ucast;
+ u64 rx_ucast_octets;
+ u64 rx_ucast_vlan;
+
+ u64 rx_mcast;
+ u64 rx_mcast_octets;
+ u64 rx_mcast_vlan;
+
+ u64 rx_bcast;
+ u64 rx_bcast_octets;
+ u64 rx_bcast_vlan;
+
+ u64 rx_drops;
+};
+
+/* BPC Tx Registers */
+struct bfi_enet_stats_bpc {
+ /* transmit stats */
+ u64 tx_pause[8];
+ u64 tx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 tx_first_pause[8];
+
+ /* receive stats */
+ u64 rx_pause[8];
+ u64 rx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 rx_first_pause[8];
+};
+
+/* MAC Rx Statistics */
+struct bfi_enet_stats_mac {
+ u64 frame_64; /* both rx and tx counter */
+ u64 frame_65_127; /* both rx and tx counter */
+ u64 frame_128_255; /* both rx and tx counter */
+ u64 frame_256_511; /* both rx and tx counter */
+ u64 frame_512_1023; /* both rx and tx counter */
+ u64 frame_1024_1518; /* both rx and tx counter */
+ u64 frame_1519_1522; /* both rx and tx counter */
+
+ /* receive stats */
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_fcs_error;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_control_frames;
+ u64 rx_pause;
+ u64 rx_unknown_opcode;
+ u64 rx_alignment_error;
+ u64 rx_frame_length_error;
+ u64 rx_code_error;
+ u64 rx_carrier_sense_error;
+ u64 rx_undersize;
+ u64 rx_oversize;
+ u64 rx_fragments;
+ u64 rx_jabber;
+ u64 rx_drop;
+
+ /* transmit stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_deferral;
+ u64 tx_excessive_deferral;
+ u64 tx_single_collision;
+ u64 tx_muliple_collision;
+ u64 tx_late_collision;
+ u64 tx_excessive_collision;
+ u64 tx_total_collision;
+ u64 tx_pause_honored;
+ u64 tx_drop;
+ u64 tx_jabber;
+ u64 tx_fcs_error;
+ u64 tx_control_frame;
+ u64 tx_oversize;
+ u64 tx_undersize;
+ u64 tx_fragments;
+};
+
+/**
+ * Complete statistics, DMAed from fw to host followed by
+ * BFI_ENET_I2H_STATS_GET_RSP
+ */
+struct bfi_enet_stats {
+ struct bfi_enet_stats_mac mac_stats;
+ struct bfi_enet_stats_bpc bpc_stats;
+ struct bfi_enet_stats_rad rad_stats;
+ struct bfi_enet_stats_rad rlb_stats;
+ struct bfi_enet_stats_fc_rx fc_rx_stats;
+ struct bfi_enet_stats_fc_tx fc_tx_stats;
+ struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
+ struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
+};
+
+#pragma pack()
+
+#endif /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
new file mode 100644
index 00000000000..efacff3ab51
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -0,0 +1,452 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
+#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
+#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
+#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
+#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
+#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
+#define HOSTFN2_INT_MSK 0x00014304 /* ct */
+#define HOSTFN3_INT_MSK 0x00014404 /* ct */
+
+#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
+#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
+#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
+#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
+
+#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
+#define __P_LCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH 17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH 12
+#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF 0x00000800
+#define __APP_PLL_LCLK_HDIV 0x00000400
+#define __APP_PLL_LCLK_P0_1_MK 0x00000300
+#define __APP_PLL_LCLK_P0_1_SH 8
+#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH 5
+#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500 0x00000010
+#define __APP_PLL_LCLK_ENARST 0x00000008
+#define __APP_PLL_LCLK_BYPASS 0x00000004
+#define __APP_PLL_LCLK_LRESETN 0x00000002
+#define __APP_PLL_LCLK_ENABLE 0x00000001
+#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH 17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH 12
+#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF 0x00000800
+#define __APP_PLL_SCLK_HDIV 0x00000400
+#define __APP_PLL_SCLK_P0_1_MK 0x00000300
+#define __APP_PLL_SCLK_P0_1_SH 8
+#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH 5
+#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500 0x00000010
+#define __APP_PLL_SCLK_ENARST 0x00000008
+#define __APP_PLL_SCLK_BYPASS 0x00000004
+#define __APP_PLL_SCLK_LRESETN 0x00000002
+#define __APP_PLL_SCLK_ENABLE 0x00000001
+#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
+#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
+#define __ENABLE_MAC_1 0x00200000 /* ct */
+#define __ENABLE_MAC_0 0x00100000 /* ct */
+
+#define HOST_SEM0_REG 0x00014230 /* cb/ct */
+#define HOST_SEM1_REG 0x00014234 /* cb/ct */
+#define HOST_SEM2_REG 0x00014238 /* cb/ct */
+#define HOST_SEM3_REG 0x0001423c /* cb/ct */
+#define HOST_SEM4_REG 0x00014610 /* cb/ct */
+#define HOST_SEM5_REG 0x00014614 /* cb/ct */
+#define HOST_SEM6_REG 0x00014618 /* cb/ct */
+#define HOST_SEM7_REG 0x0001461c /* cb/ct */
+#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
+#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
+#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
+#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
+#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
+#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
+#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
+#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
+
+#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
+#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
+#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
+#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
+#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
+#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
+#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
+#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
+#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
+#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
+#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
+#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
+#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
+#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
+#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
+#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
+
+#define PSS_CTL_REG 0x00018800 /* cb/ct */
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
+#define ERR_SET_REG 0x00018818 /* cb/ct */
+#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
+#define __PSS_GPIO_OUT_REG 0x00000fff
+#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
+#define __PSS_GPIO_OE_REG 0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
+#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
+#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
+#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
+#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
+#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
+
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
+
+#define MBIST_CTL_REG 0x00014220 /* ct */
+#define __EDRAM_BISTR_START 0x00000004
+#define MBIST_STAT_REG 0x00014224 /* ct */
+#define ETH_MAC_SER_REG 0x00014288 /* ct */
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define FNC_PERS_REG 0x00014604 /* ct */
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE 0x0001460c
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc
+#define PMM_1T_RESET_REG_P0 0x0002381c
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c
+
+/**
+ * Brocade 1860 Adapter specific defines
+ */
+#define CT2_PCI_CPQ_BASE 0x00030000
+#define CT2_PCI_APP_BASE 0x00030100
+#define CT2_PCI_ETH_BASE 0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_ 0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH 19
+#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK 0x00060000
+#define __FC_LL_PORT_MAP__SH 17
+#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_ 0x00010000
+#define __PF_VF_CFG_RDY_ 0x00008000
+#define __PF_VF_ENABLE_ 0x00004000
+#define __PF_DRIVER_ACTIVE_ 0x00002000
+#define __PF_PME_SEND_ENABLE_ 0x00001000
+#define __PF_EXROM_OFFSET__MK 0x00000ff0
+#define __PF_EXROM_OFFSET__SH 4
+#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_ 0x00000008
+#define __PF_INTX_PIN_ 0x00000007
+#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK 0xff000000
+#define __PF_NUM_QUEUES1__SH 24
+#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH 16
+#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH 8
+#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_ 0x000000ff
+#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Brocade 1860 adapter CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG 0x000148f0
+#define CT2_HOST_SEM1_REG 0x000148f4
+#define CT2_HOST_SEM2_REG 0x000148f8
+#define CT2_HOST_SEM3_REG 0x000148fc
+#define CT2_HOST_SEM4_REG 0x00014900
+#define CT2_HOST_SEM5_REG 0x00014904
+#define CT2_HOST_SEM6_REG 0x00014908
+#define CT2_HOST_SEM7_REG 0x0001490c
+#define CT2_HOST_SEM0_INFO_REG 0x000148b0
+#define CT2_HOST_SEM1_INFO_REG 0x000148b4
+#define CT2_HOST_SEM2_INFO_REG 0x000148b8
+#define CT2_HOST_SEM3_INFO_REG 0x000148bc
+#define CT2_HOST_SEM4_INFO_REG 0x000148c0
+#define CT2_HOST_SEM5_INFO_REG 0x000148c4
+#define CT2_HOST_SEM6_INFO_REG 0x000148c8
+#define CT2_HOST_SEM7_INFO_REG 0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
+#define __APP_LPUCLK_HALFSPEED 0x40000000
+#define __APP_PLL_LCLK_LOAD 0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH 21
+#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+ __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB 0x00000800
+#define __APP_PLL_LCLK_ENOUTS 0x00000400
+#define __APP_PLL_LCLK_RATE 0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
+#define __APP_PLL_SCLK_LOAD 0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH 20
+#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_SCLK_FBCNT_NORM = 6,
+ __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB 0x00000800
+#define __APP_PLL_SCLK_ENOUTS 0x00000400
+#define __APP_PLL_SCLK_RATE 0x00000010
+#define CT2_PCIE_MISC_REG 0x00014804
+#define __ETH_CLK_ENABLE_PORT1 0x00000010
+#define CT2_CHIP_MISC_PRG 0x000148a4
+#define __ETH_CLK_ENABLE_PORT0 0x00004000
+#define __APP_LPU_SPEED 0x00000002
+#define CT2_MBIST_STAT_REG 0x00014818
+#define CT2_MBIST_CTL_REG 0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
+#define __PMM_1T_PNDB_P 0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
+#define CT2_WGN_STATUS 0x00014990
+#define __A2T_AHB_LOAD 0x00000800
+#define __WGN_READY 0x00000400
+#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_CSR_SET_REG 0x00027424
+#define __HALT_NFC_CONTROLLER 0x00000002
+#define __NFC_CONTROLLER_HALTED 0x00001000
+
+#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
+#define __CSI_MAC_RESET 0x00000010
+#define __CSI_MAC_AHB_RESET 0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n) \
+ (CT2_CSI_MAC0_CONTROL_REG + \
+ (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+#define __HFN_INT_ERR_MASK \
+ (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+ __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for 1860
+ */
+#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
+#define __HFN_INT_ERR_PSS_CT2 0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2 0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+ (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+ __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+ __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+ __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
new file mode 100644
index 00000000000..4d7a5de08e1
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -0,0 +1,575 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_H__
+#define __BNA_H__
+
+#include "bfa_defs.h"
+#include "bfa_ioc.h"
+#include "bfi_enet.h"
+#include "bna_types.h"
+
+extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+
+/**
+ *
+ * Macros and constants
+ *
+ */
+
+#define BNA_IOC_TIMER_FREQ 200
+
+/* Log string size */
+#define BNA_MESSAGE_SIZE 256
+
+#define bna_is_small_rxq(_id) ((_id) & 0x1)
+
+#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
+ (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
+
+#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
+
+#define BNA_TO_POWER_OF_2(x) \
+do { \
+ int _shift = 0; \
+ while ((x) && (x) != 1) { \
+ (x) >>= 1; \
+ _shift++; \
+ } \
+ (x) <<= _shift; \
+} while (0)
+
+#define BNA_TO_POWER_OF_2_HIGH(x) \
+do { \
+ int n = 1; \
+ while (n < (x)) \
+ n <<= 1; \
+ (x) = n; \
+} while (0)
+
+/*
+ * input : _addr-> os dma addr in host endian format,
+ * output : _bna_dma_addr-> pointer to hw dma addr
+ */
+#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
+do { \
+ u64 tmp_addr = \
+ cpu_to_be64((u64)(_addr)); \
+ (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
+ (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
+} while (0)
+
+/*
+ * input : _bna_dma_addr-> pointer to hw dma addr
+ * output : _addr-> os dma addr in host endian format
+ */
+#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
+do { \
+ (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
+ | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
+} while (0)
+
+#define containing_rec(addr, type, field) \
+ ((type *)((unsigned char *)(addr) - \
+ (unsigned char *)(&((type *)0)->field)))
+
+#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
+
+/* TxQ element is 64 bytes */
+#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
+#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
+
+#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
+ (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
+}
+
+/* RxQ element is 8 bytes */
+#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
+#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
+
+#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
+}
+
+/* CQ element is 16 bytes */
+#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
+#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
+
+#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ \
+ page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
+}
+
+#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
+ (&((_cast *)(_q_base))[(_qe_idx)])
+
+#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
+
+#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
+ ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
+
+#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
+ (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
+ (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
+ ((_q_depth) - 1))
+
+#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
+ ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
+ (_q_depth - 1))
+
+#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
+
+#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
+
+#define BNA_Q_PI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.producer_index = \
+ (((_q_ptr)->q.producer_index + (_num)) & \
+ ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.consumer_index = \
+ (((_q_ptr)->q.consumer_index + (_num)) \
+ & ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_FREE_COUNT(_q_ptr) \
+ (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
+
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+ (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
+
+#define BNA_LARGE_PKT_SIZE 1000
+
+#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
+do { \
+ if ((_len) > BNA_LARGE_PKT_SIZE) { \
+ (_pkt)->large_pkt_cnt++; \
+ } else { \
+ (_pkt)->small_pkt_cnt++; \
+ } \
+} while (0)
+
+#define call_rxf_stop_cbfn(rxf) \
+do { \
+ if ((rxf)->stop_cbfn) { \
+ void (*cbfn)(struct bna_rx *); \
+ struct bna_rx *cbarg; \
+ cbfn = (rxf)->stop_cbfn; \
+ cbarg = (rxf)->stop_cbarg; \
+ (rxf)->stop_cbfn = NULL; \
+ (rxf)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_rxf_start_cbfn(rxf) \
+do { \
+ if ((rxf)->start_cbfn) { \
+ void (*cbfn)(struct bna_rx *); \
+ struct bna_rx *cbarg; \
+ cbfn = (rxf)->start_cbfn; \
+ cbarg = (rxf)->start_cbarg; \
+ (rxf)->start_cbfn = NULL; \
+ (rxf)->start_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_rxf_cam_fltr_cbfn(rxf) \
+do { \
+ if ((rxf)->cam_fltr_cbfn) { \
+ void (*cbfn)(struct bnad *, struct bna_rx *); \
+ struct bnad *cbarg; \
+ cbfn = (rxf)->cam_fltr_cbfn; \
+ cbarg = (rxf)->cam_fltr_cbarg; \
+ (rxf)->cam_fltr_cbfn = NULL; \
+ (rxf)->cam_fltr_cbarg = NULL; \
+ cbfn(cbarg, rxf->rx); \
+ } \
+} while (0)
+
+#define call_rxf_pause_cbfn(rxf) \
+do { \
+ if ((rxf)->oper_state_cbfn) { \
+ void (*cbfn)(struct bnad *, struct bna_rx *); \
+ struct bnad *cbarg; \
+ cbfn = (rxf)->oper_state_cbfn; \
+ cbarg = (rxf)->oper_state_cbarg; \
+ (rxf)->oper_state_cbfn = NULL; \
+ (rxf)->oper_state_cbarg = NULL; \
+ cbfn(cbarg, rxf->rx); \
+ } \
+} while (0)
+
+#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
+
+#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
+
+#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
+
+#define xxx_enable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode |= xxx; \
+} while (0)
+
+#define xxx_disable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define xxx_inactive(mode, bitmask, xxx) \
+do { \
+ bitmask &= ~xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define is_promisc_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_promisc_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_default_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_default_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_allmulti_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define is_allmulti_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define GET_RXQS(rxp, q0, q1) do { \
+ switch ((rxp)->type) { \
+ case BNA_RXP_SINGLE: \
+ (q0) = rxp->rxq.single.only; \
+ (q1) = NULL; \
+ break; \
+ case BNA_RXP_SLR: \
+ (q0) = rxp->rxq.slr.large; \
+ (q1) = rxp->rxq.slr.small; \
+ break; \
+ case BNA_RXP_HDS: \
+ (q0) = rxp->rxq.hds.data; \
+ (q1) = rxp->rxq.hds.hdr; \
+ break; \
+ } \
+} while (0)
+
+#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
+
+#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
+
+#define bna_tx_from_rid(_bna, _rid, _tx) \
+do { \
+ struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
+ struct bna_tx *__tx; \
+ struct list_head *qe; \
+ _tx = NULL; \
+ list_for_each(qe, &__tx_mod->tx_active_q) { \
+ __tx = (struct bna_tx *)qe; \
+ if (__tx->rid == (_rid)) { \
+ (_tx) = __tx; \
+ break; \
+ } \
+ } \
+} while (0)
+
+#define bna_rx_from_rid(_bna, _rid, _rx) \
+do { \
+ struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
+ struct bna_rx *__rx; \
+ struct list_head *qe; \
+ _rx = NULL; \
+ list_for_each(qe, &__rx_mod->rx_active_q) { \
+ __rx = (struct bna_rx *)qe; \
+ if (__rx->rid == (_rid)) { \
+ (_rx) = __rx; \
+ break; \
+ } \
+ } \
+} while (0)
+
+/**
+ *
+ * Inline functions
+ *
+ */
+
+static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+ list_for_each(qe, q) {
+ if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
+ mac = (struct bna_mac *)qe;
+ break;
+ }
+ }
+ return mac;
+}
+
+#define bna_attr(_bna) (&(_bna)->ioceth.attr)
+
+/**
+ *
+ * Function prototypes
+ *
+ */
+
+/**
+ * BNA
+ */
+
+/* FW response handlers */
+void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
+
+/* APIs for BNAD */
+void bna_res_req(struct bna_res_info *res_info);
+void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
+void bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info);
+void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
+void bna_uninit(struct bna *bna);
+int bna_num_txq_set(struct bna *bna, int num_txq);
+int bna_num_rxp_set(struct bna *bna, int num_rxp);
+void bna_hw_stats_get(struct bna *bna);
+
+/* APIs for RxF */
+struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
+void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
+ struct bna_mac *mac);
+struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
+void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mac *mac);
+struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
+void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mcam_handle *handle);
+
+/**
+ * MBOX
+ */
+
+/* API for BNAD */
+void bna_mbox_handler(struct bna *bna, u32 intr_status);
+
+/**
+ * ETHPORT
+ */
+
+/* Callbacks for RX */
+void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
+void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
+
+/**
+ * TX MODULE AND TX
+ */
+/* FW response handelrs */
+void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
+
+/* APIs for BNA */
+void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
+
+/* APIs for ENET */
+void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
+
+/* APIs for BNAD */
+void bna_tx_res_req(int num_txq, int txq_depth,
+ struct bna_res_info *res_info);
+struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ const struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_tx_destroy(struct bna_tx *tx);
+void bna_tx_enable(struct bna_tx *tx);
+void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *));
+void bna_tx_cleanup_complete(struct bna_tx *tx);
+void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
+
+/**
+ * RX MODULE, RX, RXF
+ */
+
+/* FW response handlers */
+void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
+ struct bfi_msgq_mhdr *msghdr);
+
+/* APIs for BNA */
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
+
+/* APIs for ENET */
+void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
+
+/* APIs for BNAD */
+void bna_rx_res_req(struct bna_rx_config *rx_config,
+ struct bna_res_info *res_info);
+struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ const struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_rx_destroy(struct bna_rx *rx);
+void bna_rx_enable(struct bna_rx *rx);
+void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *));
+void bna_rx_cleanup_complete(struct bna_rx *rx);
+void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
+void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
+void bna_rx_dim_update(struct bna_ccb *ccb);
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+/**
+ * ENET
+ */
+
+/* API for RX */
+int bna_enet_mtu_get(struct bna_enet *enet);
+
+/* Callbacks for TX, RX */
+void bna_enet_cb_tx_stopped(struct bna_enet *enet);
+void bna_enet_cb_rx_stopped(struct bna_enet *enet);
+
+/* API for BNAD */
+void bna_enet_enable(struct bna_enet *enet);
+void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
+ void (*cbfn)(void *));
+void bna_enet_pause_config(struct bna_enet *enet,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *));
+void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
+ void (*cbfn)(struct bnad *));
+void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+
+/**
+ * IOCETH
+ */
+
+/* APIs for BNAD */
+void bna_ioceth_enable(struct bna_ioceth *ioceth);
+void bna_ioceth_disable(struct bna_ioceth *ioceth,
+ enum bna_cleanup_type type);
+
+/**
+ * BNAD
+ */
+
+/* Callbacks for ENET */
+void bnad_cb_ethport_link_status(struct bnad *bnad,
+ enum bna_link_status status);
+
+/* Callbacks for IOCETH */
+void bnad_cb_ioceth_ready(struct bnad *bnad);
+void bnad_cb_ioceth_failed(struct bnad *bnad);
+void bnad_cb_ioceth_disabled(struct bnad *bnad);
+void bnad_cb_mbox_intr_enable(struct bnad *bnad);
+void bnad_cb_mbox_intr_disable(struct bnad *bnad);
+
+/* Callbacks for BNA */
+void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats);
+
+#endif /* __BNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
new file mode 100644
index 00000000000..26f5c5abfd1
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -0,0 +1,2144 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+
+static inline int
+ethport_can_be_up(struct bna_ethport *ethport)
+{
+ int ready = 0;
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
+ (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
+ (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
+ else
+ ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
+ (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
+ !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
+ return ready;
+}
+
+#define ethport_is_up ethport_can_be_up
+
+enum bna_ethport_event {
+ ETHPORT_E_START = 1,
+ ETHPORT_E_STOP = 2,
+ ETHPORT_E_FAIL = 3,
+ ETHPORT_E_UP = 4,
+ ETHPORT_E_DOWN = 5,
+ ETHPORT_E_FWRESP_UP_OK = 6,
+ ETHPORT_E_FWRESP_DOWN = 7,
+ ETHPORT_E_FWRESP_UP_FAIL = 8,
+};
+
+enum bna_enet_event {
+ ENET_E_START = 1,
+ ENET_E_STOP = 2,
+ ENET_E_FAIL = 3,
+ ENET_E_PAUSE_CFG = 4,
+ ENET_E_MTU_CFG = 5,
+ ENET_E_FWRESP_PAUSE = 6,
+ ENET_E_CHLD_STOPPED = 7,
+};
+
+enum bna_ioceth_event {
+ IOCETH_E_ENABLE = 1,
+ IOCETH_E_DISABLE = 2,
+ IOCETH_E_IOC_RESET = 3,
+ IOCETH_E_IOC_FAILED = 4,
+ IOCETH_E_IOC_READY = 5,
+ IOCETH_E_ENET_ATTR_RESP = 6,
+ IOCETH_E_ENET_STOPPED = 7,
+ IOCETH_E_IOC_DISABLED = 8,
+};
+
+#define bna_stats_copy(_name, _type) \
+do { \
+ count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
+ stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
+ stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
+ for (i = 0; i < count; i++) \
+ stats_dst[i] = be64_to_cpu(stats_src[i]); \
+} while (0) \
+
+/*
+ * FW response handlers
+ */
+
+static void
+bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport_can_be_up(ethport))
+ bfa_fsm_send_event(ethport, ETHPORT_E_UP);
+}
+
+static void
+bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ int ethport_up = ethport_is_up(ethport);
+
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport_up)
+ bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
+}
+
+static void
+bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_enable_req *admin_req =
+ &ethport->bfi_enet_cmd.admin_req;
+ struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+
+ switch (admin_req->enable) {
+ case BNA_STATUS_T_ENABLED:
+ if (rsp->error == BFI_ENET_CMD_OK)
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
+ else {
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
+ }
+ break;
+
+ case BNA_STATUS_T_DISABLED:
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+ break;
+ }
+}
+
+static void
+bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_diag_lb_req *diag_lb_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+ struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+
+ switch (diag_lb_req->enable) {
+ case BNA_STATUS_T_ENABLED:
+ if (rsp->error == BFI_ENET_CMD_OK)
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
+ else {
+ ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
+ }
+ break;
+
+ case BNA_STATUS_T_DISABLED:
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
+ break;
+ }
+}
+
+static void
+bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
+}
+
+static void
+bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
+
+ /**
+ * Store only if not set earlier, since BNAD can override the HW
+ * attributes
+ */
+ if (!ioceth->attr.fw_query_complete) {
+ ioceth->attr.num_txq = ntohl(rsp->max_cfg);
+ ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
+ ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+ ioceth->attr.fw_query_complete = true;
+ }
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
+}
+
+static void
+bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
+ u64 *stats_src;
+ u64 *stats_dst;
+ u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
+ u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
+ int count;
+ int i;
+
+ bna_stats_copy(mac, mac);
+ bna_stats_copy(bpc, bpc);
+ bna_stats_copy(rad, rad);
+ bna_stats_copy(rlb, rad);
+ bna_stats_copy(fc_rx, fc_rx);
+ bna_stats_copy(fc_tx, fc_tx);
+
+ stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
+
+ /* Copy Rxf stats to SW area, scatter them while copying */
+ for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
+ stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
+ memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
+ if (rx_enet_mask & ((u32)(1 << i))) {
+ int k;
+ count = sizeof(struct bfi_enet_stats_rxf) /
+ sizeof(u64);
+ for (k = 0; k < count; k++) {
+ stats_dst[k] = be64_to_cpu(*stats_src);
+ stats_src++;
+ }
+ }
+ }
+
+ /* Copy Txf stats to SW area, scatter them while copying */
+ for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
+ stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
+ memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
+ if (tx_enet_mask & ((u32)(1 << i))) {
+ int k;
+ count = sizeof(struct bfi_enet_stats_txf) /
+ sizeof(u64);
+ for (k = 0; k < count; k++) {
+ stats_dst[k] = be64_to_cpu(*stats_src);
+ stats_src++;
+ }
+ }
+ }
+
+ bna->stats_mod.stats_get_busy = false;
+ bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
+}
+
+static void
+bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->link_status = BNA_LINK_UP;
+
+ /* Dispatch events */
+ ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
+}
+
+static void
+bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->link_status = BNA_LINK_DOWN;
+
+ /* Dispatch events */
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+}
+
+static void
+bna_err_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_HALT_INTR(bna, intr_status))
+ bna_halt_clear(bna);
+
+ bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
+}
+
+void
+bna_mbox_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_ERR_INTR(bna, intr_status)) {
+ bna_err_handler(bna, intr_status);
+ return;
+ }
+ if (BNA_IS_MBOX_INTR(bna, intr_status))
+ bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
+}
+
+static void
+bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bna *bna = (struct bna *)arg;
+ struct bna_tx *tx;
+ struct bna_rx *rx;
+
+ switch (msghdr->msg_id) {
+ case BFI_ENET_I2H_RX_CFG_SET_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rx_enet_start_rsp(rx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_RX_CFG_CLR_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rx_enet_stop_rsp(rx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_RIT_CFG_RSP:
+ case BFI_ENET_I2H_RSS_CFG_RSP:
+ case BFI_ENET_I2H_RSS_ENABLE_RSP:
+ case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
+ case BFI_ENET_I2H_RX_DEFAULT_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
+ case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
+ case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
+ case BFI_ENET_I2H_RX_VLAN_SET_RSP:
+ case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
+ break;
+
+ case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
+ break;
+
+ case BFI_ENET_I2H_TX_CFG_SET_RSP:
+ bna_tx_from_rid(bna, msghdr->enet_id, tx);
+ if (tx)
+ bna_bfi_tx_enet_start_rsp(tx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_TX_CFG_CLR_RSP:
+ bna_tx_from_rid(bna, msghdr->enet_id, tx);
+ if (tx)
+ bna_bfi_tx_enet_stop_rsp(tx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_ADMIN_RSP:
+ bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
+ bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_SET_PAUSE_RSP:
+ bna_bfi_pause_set_rsp(&bna->enet, msghdr);
+ break;
+
+ case BFI_ENET_I2H_GET_ATTR_RSP:
+ bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
+ break;
+
+ case BFI_ENET_I2H_STATS_GET_RSP:
+ bna_bfi_stats_get_rsp(bna, msghdr);
+ break;
+
+ case BFI_ENET_I2H_STATS_CLR_RSP:
+ /* No-op */
+ break;
+
+ case BFI_ENET_I2H_LINK_UP_AEN:
+ bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_LINK_DOWN_AEN:
+ bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_ENABLE_AEN:
+ bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_DISABLE_AEN:
+ bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_BW_UPDATE_AEN:
+ bna_bfi_bw_update_aen(&bna->tx_mod);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ETHPORT
+ */
+#define call_ethport_stop_cbfn(_ethport) \
+do { \
+ if ((_ethport)->stop_cbfn) { \
+ void (*cbfn)(struct bna_enet *); \
+ cbfn = (_ethport)->stop_cbfn; \
+ (_ethport)->stop_cbfn = NULL; \
+ cbfn(&(_ethport)->bna->enet); \
+ } \
+} while (0)
+
+#define call_ethport_adminup_cbfn(ethport, status) \
+do { \
+ if ((ethport)->adminup_cbfn) { \
+ void (*cbfn)(struct bnad *, enum bna_cb_status); \
+ cbfn = (ethport)->adminup_cbfn; \
+ (ethport)->adminup_cbfn = NULL; \
+ cbfn((ethport)->bna->bnad, status); \
+ } \
+} while (0)
+
+static void
+bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
+{
+ struct bfi_enet_enable_req *admin_up_req =
+ &ethport->bfi_enet_cmd.admin_req;
+
+ bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
+ admin_up_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ admin_up_req->enable = BNA_STATUS_T_ENABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
+{
+ struct bfi_enet_enable_req *admin_down_req =
+ &ethport->bfi_enet_cmd.admin_req;
+
+ bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
+ admin_down_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ admin_down_req->enable = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
+{
+ struct bfi_enet_diag_lb_req *lpbk_up_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+
+ bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
+ lpbk_up_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
+ lpbk_up_req->mode = (ethport->bna->enet.type ==
+ BNA_ENET_T_LOOPBACK_INTERNAL) ?
+ BFI_ENET_DIAG_LB_OPMODE_EXT :
+ BFI_ENET_DIAG_LB_OPMODE_CBL;
+ lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
+{
+ struct bfi_enet_diag_lb_req *lpbk_down_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+
+ bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
+ lpbk_down_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
+ lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_up(struct bna_ethport *ethport)
+{
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ bna_bfi_ethport_admin_up(ethport);
+ else
+ bna_bfi_ethport_lpbk_up(ethport);
+}
+
+static void
+bna_bfi_ethport_down(struct bna_ethport *ethport)
+{
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ bna_bfi_ethport_admin_down(ethport);
+ else
+ bna_bfi_ethport_lpbk_down(ethport);
+}
+
+bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+
+static void
+bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
+{
+ call_ethport_stop_cbfn(ethport);
+}
+
+static void
+bna_ethport_sm_stopped(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_START:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ case ETHPORT_E_STOP:
+ call_ethport_stop_cbfn(ethport);
+ break;
+
+ case ETHPORT_E_FAIL:
+ /* No-op */
+ break;
+
+ case ETHPORT_E_DOWN:
+ /* This event is received due to Rx objects failing */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_down_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_down(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_UP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
+ bna_bfi_ethport_up(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ break;
+
+ case ETHPORT_E_FAIL:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ case ETHPORT_E_FWRESP_DOWN:
+ /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
+ bna_bfi_ethport_up(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
+{
+ /**
+ * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
+ * mbox due to up_resp_wait -> down_resp_wait transition on event
+ * ETHPORT_E_DOWN
+ */
+}
+
+static void
+bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_UP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ case ETHPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_up_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_up(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ /**
+ * This event is received due to Rx objects stopping in
+ * parallel to ethport
+ */
+ /* No-op */
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ case ETHPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
+{
+ ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
+ ethport->bna = bna;
+
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn = bnad_cb_ethport_link_status;
+
+ ethport->rx_started_count = 0;
+
+ ethport->stop_cbfn = NULL;
+ ethport->adminup_cbfn = NULL;
+
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+}
+
+static void
+bna_ethport_uninit(struct bna_ethport *ethport)
+{
+ ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+
+ ethport->bna = NULL;
+}
+
+static void
+bna_ethport_start(struct bna_ethport *ethport)
+{
+ bfa_fsm_send_event(ethport, ETHPORT_E_START);
+}
+
+static void
+bna_enet_cb_ethport_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+static void
+bna_ethport_stop(struct bna_ethport *ethport)
+{
+ ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
+ bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
+}
+
+static void
+bna_ethport_fail(struct bna_ethport *ethport)
+{
+ /* Reset the physical port status to enabled */
+ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport->link_status != BNA_LINK_DOWN) {
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+ }
+ bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
+}
+
+/* Should be called only when ethport is disabled */
+void
+bna_ethport_cb_rx_started(struct bna_ethport *ethport)
+{
+ ethport->rx_started_count++;
+
+ if (ethport->rx_started_count == 1) {
+ ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
+
+ if (ethport_can_be_up(ethport))
+ bfa_fsm_send_event(ethport, ETHPORT_E_UP);
+ }
+}
+
+void
+bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
+{
+ int ethport_up = ethport_is_up(ethport);
+
+ ethport->rx_started_count--;
+
+ if (ethport->rx_started_count == 0) {
+ ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
+
+ if (ethport_up)
+ bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
+ }
+}
+
+/**
+ * ENET
+ */
+#define bna_enet_chld_start(enet) \
+do { \
+ enum bna_tx_type tx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bna_ethport_start(&(enet)->bna->ethport); \
+ bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
+ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
+} while (0)
+
+#define bna_enet_chld_stop(enet) \
+do { \
+ enum bna_tx_type tx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_ethport_stop(&(enet)->bna->ethport); \
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
+ bfa_wc_wait(&(enet)->chld_stop_wc); \
+} while (0)
+
+#define bna_enet_chld_fail(enet) \
+do { \
+ bna_ethport_fail(&(enet)->bna->ethport); \
+ bna_tx_mod_fail(&(enet)->bna->tx_mod); \
+ bna_rx_mod_fail(&(enet)->bna->rx_mod); \
+} while (0)
+
+#define bna_enet_rx_start(enet) \
+do { \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
+} while (0)
+
+#define bna_enet_rx_stop(enet) \
+do { \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
+ bfa_wc_wait(&(enet)->chld_stop_wc); \
+} while (0)
+
+#define call_enet_stop_cbfn(enet) \
+do { \
+ if ((enet)->stop_cbfn) { \
+ void (*cbfn)(void *); \
+ void *cbarg; \
+ cbfn = (enet)->stop_cbfn; \
+ cbarg = (enet)->stop_cbarg; \
+ (enet)->stop_cbfn = NULL; \
+ (enet)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_enet_pause_cbfn(enet) \
+do { \
+ if ((enet)->pause_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ cbfn = (enet)->pause_cbfn; \
+ (enet)->pause_cbfn = NULL; \
+ cbfn((enet)->bna->bnad); \
+ } \
+} while (0)
+
+#define call_enet_mtu_cbfn(enet) \
+do { \
+ if ((enet)->mtu_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ cbfn = (enet)->mtu_cbfn; \
+ (enet)->mtu_cbfn = NULL; \
+ cbfn((enet)->bna->bnad); \
+ } \
+} while (0)
+
+static void bna_enet_cb_chld_stopped(void *arg);
+static void bna_bfi_pause_set(struct bna_enet *enet);
+
+bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
+ enum bna_enet_event);
+
+static void
+bna_enet_sm_stopped_entry(struct bna_enet *enet)
+{
+ call_enet_pause_cbfn(enet);
+ call_enet_mtu_cbfn(enet);
+ call_enet_stop_cbfn(enet);
+}
+
+static void
+bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_START:
+ bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
+ break;
+
+ case ENET_E_STOP:
+ call_enet_stop_cbfn(enet);
+ break;
+
+ case ENET_E_FAIL:
+ /* No-op */
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ call_enet_pause_cbfn(enet);
+ break;
+
+ case ENET_E_MTU_CFG:
+ call_enet_mtu_cbfn(enet);
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ /**
+ * This event is received due to Ethport, Tx and Rx objects
+ * failing
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
+{
+ bna_bfi_pause_set(enet);
+}
+
+static void
+bna_enet_sm_pause_init_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
+ break;
+
+ case ENET_E_FAIL:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
+ break;
+
+ case ENET_E_MTU_CFG:
+ /* No-op */
+ break;
+
+ case ENET_E_FWRESP_PAUSE:
+ if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bna_bfi_pause_set(enet);
+ } else {
+ bfa_fsm_set_state(enet, bna_enet_sm_started);
+ bna_enet_chld_start(enet);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+}
+
+static void
+bna_enet_sm_last_resp_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ case ENET_E_FWRESP_PAUSE:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_started_entry(struct bna_enet *enet)
+{
+ /**
+ * NOTE: Do not call bna_enet_chld_start() here, since it will be
+ * inadvertently called during cfg_wait->started transition as well
+ */
+ call_enet_pause_cbfn(enet);
+ call_enet_mtu_cbfn(enet);
+}
+
+static void
+bna_enet_sm_started(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
+ break;
+
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
+ bna_bfi_pause_set(enet);
+ break;
+
+ case ENET_E_MTU_CFG:
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
+ bna_enet_rx_stop(enet);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
+{
+}
+
+static void
+bna_enet_sm_cfg_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
+ break;
+
+ case ENET_E_FAIL:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
+ break;
+
+ case ENET_E_MTU_CFG:
+ enet->flags |= BNA_ENET_F_MTU_CHANGED;
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ bna_enet_rx_start(enet);
+ /* Fall through */
+ case ENET_E_FWRESP_PAUSE:
+ if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bna_bfi_pause_set(enet);
+ } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bna_enet_rx_stop(enet);
+ } else {
+ bfa_fsm_set_state(enet, bna_enet_sm_started);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+}
+
+static void
+bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_FWRESP_PAUSE:
+ case ENET_E_CHLD_STOPPED:
+ bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
+{
+ bna_enet_chld_stop(enet);
+}
+
+static void
+bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_pause_set(struct bna_enet *enet)
+{
+ struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
+
+ bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
+ pause_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
+ pause_req->tx_pause = enet->pause_config.tx_pause;
+ pause_req->rx_pause = enet->pause_config.rx_pause;
+
+ bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
+ bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
+}
+
+static void
+bna_enet_cb_chld_stopped(void *arg)
+{
+ struct bna_enet *enet = (struct bna_enet *)arg;
+
+ bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
+}
+
+static void
+bna_enet_init(struct bna_enet *enet, struct bna *bna)
+{
+ enet->bna = bna;
+ enet->flags = 0;
+ enet->mtu = 0;
+ enet->type = BNA_ENET_T_REGULAR;
+
+ enet->stop_cbfn = NULL;
+ enet->stop_cbarg = NULL;
+
+ enet->pause_cbfn = NULL;
+
+ enet->mtu_cbfn = NULL;
+
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+}
+
+static void
+bna_enet_uninit(struct bna_enet *enet)
+{
+ enet->flags = 0;
+
+ enet->bna = NULL;
+}
+
+static void
+bna_enet_start(struct bna_enet *enet)
+{
+ enet->flags |= BNA_ENET_F_IOCETH_READY;
+ if (enet->flags & BNA_ENET_F_ENABLED)
+ bfa_fsm_send_event(enet, ENET_E_START);
+}
+
+static void
+bna_ioceth_cb_enet_stopped(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
+}
+
+static void
+bna_enet_stop(struct bna_enet *enet)
+{
+ enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
+ enet->stop_cbarg = &enet->bna->ioceth;
+
+ enet->flags &= ~BNA_ENET_F_IOCETH_READY;
+ bfa_fsm_send_event(enet, ENET_E_STOP);
+}
+
+static void
+bna_enet_fail(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_IOCETH_READY;
+ bfa_fsm_send_event(enet, ENET_E_FAIL);
+}
+
+void
+bna_enet_cb_tx_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+void
+bna_enet_cb_rx_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+int
+bna_enet_mtu_get(struct bna_enet *enet)
+{
+ return enet->mtu;
+}
+
+void
+bna_enet_enable(struct bna_enet *enet)
+{
+ if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
+ return;
+
+ enet->flags |= BNA_ENET_F_ENABLED;
+
+ if (enet->flags & BNA_ENET_F_IOCETH_READY)
+ bfa_fsm_send_event(enet, ENET_E_START);
+}
+
+void
+bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
+ void (*cbfn)(void *))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(enet->bna->bnad);
+ return;
+ }
+
+ enet->stop_cbfn = cbfn;
+ enet->stop_cbarg = enet->bna->bnad;
+
+ enet->flags &= ~BNA_ENET_F_ENABLED;
+
+ bfa_fsm_send_event(enet, ENET_E_STOP);
+}
+
+void
+bna_enet_pause_config(struct bna_enet *enet,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *))
+{
+ enet->pause_config = *pause_config;
+
+ enet->pause_cbfn = cbfn;
+
+ bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
+}
+
+void
+bna_enet_mtu_set(struct bna_enet *enet, int mtu,
+ void (*cbfn)(struct bnad *))
+{
+ enet->mtu = mtu;
+
+ enet->mtu_cbfn = cbfn;
+
+ bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
+}
+
+void
+bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
+{
+ *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
+}
+
+/**
+ * IOCETH
+ */
+#define enable_mbox_intr(_ioceth) \
+do { \
+ u32 intr_status; \
+ bna_intr_status_get((_ioceth)->bna, intr_status); \
+ bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
+ bna_mbox_intr_enable((_ioceth)->bna); \
+} while (0)
+
+#define disable_mbox_intr(_ioceth) \
+do { \
+ bna_mbox_intr_disable((_ioceth)->bna); \
+ bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
+} while (0)
+
+#define call_ioceth_stop_cbfn(_ioceth) \
+do { \
+ if ((_ioceth)->stop_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ struct bnad *cbarg; \
+ cbfn = (_ioceth)->stop_cbfn; \
+ cbarg = (_ioceth)->stop_cbarg; \
+ (_ioceth)->stop_cbfn = NULL; \
+ (_ioceth)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define bna_stats_mod_uninit(_stats_mod) \
+do { \
+} while (0)
+
+#define bna_stats_mod_start(_stats_mod) \
+do { \
+ (_stats_mod)->ioc_ready = true; \
+} while (0)
+
+#define bna_stats_mod_stop(_stats_mod) \
+do { \
+ (_stats_mod)->ioc_ready = false; \
+} while (0)
+
+#define bna_stats_mod_fail(_stats_mod) \
+do { \
+ (_stats_mod)->ioc_ready = false; \
+ (_stats_mod)->stats_get_busy = false; \
+ (_stats_mod)->stats_clr_busy = false; \
+} while (0)
+
+static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
+
+bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
+ enum bna_ioceth_event);
+
+static void
+bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
+{
+ call_ioceth_stop_cbfn(ioceth);
+}
+
+static void
+bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_ENABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
+ bfa_nw_ioc_enable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
+ break;
+
+ case IOCETH_E_IOC_RESET:
+ enable_mbox_intr(ioceth);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
+{
+ /**
+ * Do not call bfa_nw_ioc_enable() here. It must be called in the
+ * previous state due to failed -> ioc_ready_wait transition.
+ */
+}
+
+static void
+bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_IOC_RESET:
+ enable_mbox_intr(ioceth);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ case IOCETH_E_IOC_READY:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
+{
+ bna_bfi_attr_get(ioceth);
+}
+
+static void
+bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ case IOCETH_E_ENET_ATTR_RESP:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
+{
+ bna_enet_start(&ioceth->bna->enet);
+ bna_stats_mod_start(&ioceth->bna->stats_mod);
+ bnad_cb_ioceth_ready(ioceth->bna->bnad);
+}
+
+static void
+bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bna_enet_fail(&ioceth->bna->enet);
+ bna_stats_mod_fail(&ioceth->bna->stats_mod);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
+{
+}
+
+static void
+bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_IOC_FAILED:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ disable_mbox_intr(ioceth);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_ENET_ATTR_RESP:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
+{
+ bna_stats_mod_stop(&ioceth->bna->stats_mod);
+ bna_enet_stop(&ioceth->bna->enet);
+}
+
+static void
+bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_IOC_FAILED:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ disable_mbox_intr(ioceth);
+ bna_enet_fail(&ioceth->bna->enet);
+ bna_stats_mod_fail(&ioceth->bna->stats_mod);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_ENET_STOPPED:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
+{
+}
+
+static void
+bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_IOC_DISABLED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
+ break;
+
+ case IOCETH_E_ENET_STOPPED:
+ /* This event is received due to enet failing */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
+{
+ bnad_cb_ioceth_failed(ioceth->bna->bnad);
+}
+
+static void
+bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_IOC_RESET:
+ enable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_attr_get(struct bna_ioceth *ioceth)
+{
+ struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
+
+ bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
+ attr_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
+ bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_attr_req), &attr_req->mh);
+ bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
+}
+
+/* IOC callback functions */
+
+static void
+bna_cb_ioceth_enable(void *arg, enum bfa_status error)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ if (error)
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
+ else
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
+}
+
+static void
+bna_cb_ioceth_disable(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
+}
+
+static void
+bna_cb_ioceth_hbfail(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
+}
+
+static void
+bna_cb_ioceth_reset(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
+}
+
+static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
+ bna_cb_ioceth_enable,
+ bna_cb_ioceth_disable,
+ bna_cb_ioceth_hbfail,
+ bna_cb_ioceth_reset
+};
+
+static void bna_attr_init(struct bna_ioceth *ioceth)
+{
+ ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
+ ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
+ ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
+ ioceth->attr.fw_query_complete = false;
+}
+
+static void
+bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u64 dma;
+ u8 *kva;
+
+ ioceth->bna = bna;
+
+ /**
+ * Attach IOC and claim:
+ * 1. DMA memory for IOC attributes
+ * 2. Kernel memory for FW trace
+ */
+ bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
+ bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
+
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
+ bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
+
+ kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+
+ /**
+ * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+ * DMA memory.
+ */
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+ bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
+ bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
+ kva += bfa_nw_cee_meminfo();
+ dma += bfa_nw_cee_meminfo();
+
+ bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
+ bfa_msgq_memclaim(&bna->msgq, kva, dma);
+ bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
+ kva += bfa_msgq_meminfo();
+ dma += bfa_msgq_meminfo();
+
+ ioceth->stop_cbfn = NULL;
+ ioceth->stop_cbarg = NULL;
+
+ bna_attr_init(ioceth);
+
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
+}
+
+static void
+bna_ioceth_uninit(struct bna_ioceth *ioceth)
+{
+ bfa_nw_ioc_detach(&ioceth->ioc);
+
+ ioceth->bna = NULL;
+}
+
+void
+bna_ioceth_enable(struct bna_ioceth *ioceth)
+{
+ if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
+ bnad_cb_ioceth_ready(ioceth->bna->bnad);
+ return;
+ }
+
+ if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
+}
+
+void
+bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ bnad_cb_ioceth_disabled(ioceth->bna->bnad);
+ return;
+ }
+
+ ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
+ ioceth->stop_cbarg = ioceth->bna->bnad;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
+}
+
+static void
+bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ ucam_mod->ucmac = (struct bna_mac *)
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ucam_mod->free_q);
+ for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
+ }
+
+ ucam_mod->bna = bna;
+}
+
+static void
+bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &ucam_mod->free_q)
+ i++;
+
+ ucam_mod->bna = NULL;
+}
+
+static void
+bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ mcam_mod->mcmac = (struct bna_mac *)
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_q);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
+ }
+
+ mcam_mod->mchandle = (struct bna_mcam_handle *)
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_handle_q);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
+ bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
+ list_add_tail(&mcam_mod->mchandle[i].qe,
+ &mcam_mod->free_handle_q);
+ }
+
+ mcam_mod->bna = bna;
+}
+
+static void
+bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &mcam_mod->free_q) i++;
+
+ i = 0;
+ list_for_each(qe, &mcam_mod->free_handle_q) i++;
+
+ mcam_mod->bna = NULL;
+}
+
+static void
+bna_bfi_stats_get(struct bna *bna)
+{
+ struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
+
+ bna->stats_mod.stats_get_busy = true;
+
+ bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
+ stats_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
+ stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
+ stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
+ stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
+ stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
+ stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
+
+ bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_stats_req), &stats_req->mh);
+ bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
+}
+
+void
+bna_res_req(struct bna_res_info *res_info)
+{
+ /* DMA memory for COMMON_MODULE */
+ res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
+ (bfa_nw_cee_meminfo() +
+ bfa_msgq_meminfo()), PAGE_SIZE);
+
+ /* DMA memory for retrieving IOC attributes */
+ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
+ ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
+
+ /* Virtual memory for retreiving fw_trc */
+ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+
+ /* DMA memory for retreiving stats */
+ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
+ ALIGN(sizeof(struct bfi_enet_stats),
+ PAGE_SIZE);
+}
+
+void
+bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
+{
+ struct bna_attr *attr = &bna->ioceth.attr;
+
+ /* Virtual memory for Tx objects - stored by Tx module */
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
+ attr->num_txq * sizeof(struct bna_tx);
+
+ /* Virtual memory for TxQ - stored by Tx module */
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
+ attr->num_txq * sizeof(struct bna_txq);
+
+ /* Virtual memory for Rx objects - stored by Rx module */
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
+ attr->num_rxp * sizeof(struct bna_rx);
+
+ /* Virtual memory for RxPath - stored by Rx module */
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
+ attr->num_rxp * sizeof(struct bna_rxp);
+
+ /* Virtual memory for RxQ - stored by Rx module */
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
+ (attr->num_rxp * 2) * sizeof(struct bna_rxq);
+
+ /* Virtual memory for Unicast MAC address - stored by ucam module */
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
+ attr->num_ucmac * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast MAC address - stored by mcam module */
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
+ attr->num_mcmac * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast handle - stored by mcam module */
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
+ attr->num_mcmac * sizeof(struct bna_mcam_handle);
+}
+
+void
+bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
+{
+ bna->bnad = bnad;
+ bna->pcidev = *pcidev;
+
+ bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
+ bna->stats.hw_stats_dma.msb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
+ bna->stats.hw_stats_dma.lsb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
+
+ bna_reg_addr_init(bna, &bna->pcidev);
+
+ /* Also initializes diag, cee, sfp, phy_port, msgq */
+ bna_ioceth_init(&bna->ioceth, bna, res_info);
+
+ bna_enet_init(&bna->enet, bna);
+ bna_ethport_init(&bna->ethport, bna);
+}
+
+void
+bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
+{
+ bna_tx_mod_init(&bna->tx_mod, bna, res_info);
+
+ bna_rx_mod_init(&bna->rx_mod, bna, res_info);
+
+ bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
+
+ bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
+
+ bna->default_mode_rid = BFI_INVALID_RID;
+ bna->promisc_rid = BFI_INVALID_RID;
+
+ bna->mod_flags |= BNA_MOD_F_INIT_DONE;
+}
+
+void
+bna_uninit(struct bna *bna)
+{
+ if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
+ bna_mcam_mod_uninit(&bna->mcam_mod);
+ bna_ucam_mod_uninit(&bna->ucam_mod);
+ bna_rx_mod_uninit(&bna->rx_mod);
+ bna_tx_mod_uninit(&bna->tx_mod);
+ bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
+ }
+
+ bna_stats_mod_uninit(&bna->stats_mod);
+ bna_ethport_uninit(&bna->ethport);
+ bna_enet_uninit(&bna->enet);
+
+ bna_ioceth_uninit(&bna->ioceth);
+
+ bna->bnad = NULL;
+}
+
+int
+bna_num_txq_set(struct bna *bna, int num_txq)
+{
+ if (bna->ioceth.attr.fw_query_complete &&
+ (num_txq <= bna->ioceth.attr.num_txq)) {
+ bna->ioceth.attr.num_txq = num_txq;
+ return BNA_CB_SUCCESS;
+ }
+
+ return BNA_CB_FAIL;
+}
+
+int
+bna_num_rxp_set(struct bna *bna, int num_rxp)
+{
+ if (bna->ioceth.attr.fw_query_complete &&
+ (num_rxp <= bna->ioceth.attr.num_rxp)) {
+ bna->ioceth.attr.num_rxp = num_rxp;
+ return BNA_CB_SUCCESS;
+ }
+
+ return BNA_CB_FAIL;
+}
+
+struct bna_mac *
+bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&ucam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&ucam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &ucam_mod->free_q);
+}
+
+struct bna_mac *
+bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &mcam_mod->free_q);
+}
+
+struct bna_mcam_handle *
+bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_handle_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_handle_q, &qe);
+
+ return (struct bna_mcam_handle *)qe;
+}
+
+void
+bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mcam_handle *handle)
+{
+ list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
+}
+
+void
+bna_hw_stats_get(struct bna *bna)
+{
+ if (!bna->stats_mod.ioc_ready) {
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+ return;
+ }
+ if (bna->stats_mod.stats_get_busy) {
+ bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
+ return;
+ }
+
+ bna_bfi_stats_get(bna);
+}
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
new file mode 100644
index 00000000000..4c6aab2a953
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -0,0 +1,422 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * File for interrupt macros and functions
+ */
+
+#ifndef __BNA_HW_DEFS_H__
+#define __BNA_HW_DEFS_H__
+
+#include "bfi_reg.h"
+
+/**
+ *
+ * SW imposed limits
+ *
+ */
+#define BFI_ENET_DEF_TXQ 1
+#define BFI_ENET_DEF_RXP 1
+#define BFI_ENET_DEF_UCAM 1
+#define BFI_ENET_DEF_RITSZ 1
+
+#define BFI_ENET_MAX_MCAM 256
+
+#define BFI_INVALID_RID -1
+
+#define BFI_IBIDX_SIZE 4
+
+#define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */
+#define BFI_VLAN_WORD_MASK 0x1F
+#define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */
+#define BFI_VLAN_BMASK_ALL 0xFF
+
+#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
+#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
+#define BFI_MAX_INTERPKT_COUNT 0xFF
+#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
+#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
+#define BFI_TX_INTERPKT_COUNT 32
+#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
+#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
+#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
+
+#define BFI_TXQ_WI_SIZE 64 /* bytes */
+#define BFI_RXQ_WI_SIZE 8 /* bytes */
+#define BFI_CQ_WI_SIZE 16 /* bytes */
+#define BFI_TX_MAX_WRR_QUOTA 0xFFF
+
+#define BFI_TX_MAX_VECTORS_PER_WI 4
+#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
+#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
+#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
+
+/* Small Q buffer size */
+#define BFI_SMALL_RXBUF_SIZE 128
+
+#define BFI_TX_MAX_PRIO 8
+#define BFI_TX_PRIO_MAP_ALL 0xFF
+
+/*
+ *
+ * Register definitions and macros
+ *
+ */
+
+#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
+
+#define ct_reg_addr_init(_bna, _pcidev) \
+{ \
+ struct bna_reg_offset reg_offset[] = \
+ {{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \
+ {HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \
+ {HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \
+ {HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \
+ \
+ (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \
+ reg_offset[(_pcidev)->pci_func].fn_int_status;\
+ (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \
+ reg_offset[(_pcidev)->pci_func].fn_int_mask;\
+}
+
+#define ct_bit_defn_init(_bna, _pcidev) \
+{ \
+ (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \
+ __HFN_INT_MBOX_LPU1); \
+ (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \
+ __HFN_INT_MBOX_LPU1); \
+ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \
+ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \
+ (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \
+ (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \
+}
+
+#define ct2_reg_addr_init(_bna, _pcidev) \
+{ \
+ (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \
+ CT2_HOSTFN_INT_STATUS; \
+ (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \
+ CT2_HOSTFN_INTR_MASK; \
+}
+
+#define ct2_bit_defn_init(_bna, _pcidev) \
+{ \
+ (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \
+ __HFN_INT_MBOX_LPU1_CT2); \
+ (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \
+ __HFN_INT_MBOX_LPU1_CT2); \
+ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \
+ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \
+ (_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \
+ (_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \
+}
+
+#define bna_reg_addr_init(_bna, _pcidev) \
+{ \
+ switch ((_pcidev)->device_id) { \
+ case PCI_DEVICE_ID_BROCADE_CT: \
+ ct_reg_addr_init((_bna), (_pcidev)); \
+ ct_bit_defn_init((_bna), (_pcidev)); \
+ break; \
+ case BFA_PCI_DEVICE_ID_CT2: \
+ ct2_reg_addr_init((_bna), (_pcidev)); \
+ ct2_bit_defn_init((_bna), (_pcidev)); \
+ break; \
+ } \
+}
+
+#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
+/**
+ *
+ * Interrupt related bits, flags and macros
+ *
+ */
+
+#define IB_STATUS_BITS 0x0000ffff
+
+#define BNA_IS_MBOX_INTR(_bna, _intr_status) \
+ ((_intr_status) & (_bna)->bits.mbox_status_bits)
+
+#define BNA_IS_HALT_INTR(_bna, _intr_status) \
+ ((_intr_status) & (_bna)->bits.halt_status_bits)
+
+#define BNA_IS_ERR_INTR(_bna, _intr_status) \
+ ((_intr_status) & (_bna)->bits.error_status_bits)
+
+#define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \
+ (BNA_IS_MBOX_INTR(_bna, _intr_status) | \
+ BNA_IS_ERR_INTR(_bna, _intr_status))
+
+#define BNA_IS_INTX_DATA_INTR(_intr_status) \
+ ((_intr_status) & IB_STATUS_BITS)
+
+#define bna_halt_clear(_bna) \
+do { \
+ u32 init_halt; \
+ init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \
+ init_halt &= ~__FW_INIT_HALT_P; \
+ writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \
+ init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \
+} while (0)
+
+#define bna_intx_disable(_bna, _cur_mask) \
+{ \
+ (_cur_mask) = readl((_bna)->regs.fn_int_mask); \
+ writel(0xffffffff, (_bna)->regs.fn_int_mask); \
+}
+
+#define bna_intx_enable(bna, new_mask) \
+ writel((new_mask), (bna)->regs.fn_int_mask)
+#define bna_mbox_intr_disable(bna) \
+do { \
+ u32 mask; \
+ mask = readl((bna)->regs.fn_int_mask); \
+ writel((mask | (bna)->bits.mbox_mask_bits | \
+ (bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \
+ mask = readl((bna)->regs.fn_int_mask); \
+} while (0)
+
+#define bna_mbox_intr_enable(bna) \
+do { \
+ u32 mask; \
+ mask = readl((bna)->regs.fn_int_mask); \
+ writel((mask & ~((bna)->bits.mbox_mask_bits | \
+ (bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\
+ mask = readl((bna)->regs.fn_int_mask); \
+} while (0)
+
+#define bna_intr_status_get(_bna, _status) \
+{ \
+ (_status) = readl((_bna)->regs.fn_int_status); \
+ if (_status) { \
+ writel(((_status) & ~(_bna)->bits.mbox_status_bits), \
+ (_bna)->regs.fn_int_status); \
+ } \
+}
+
+/*
+ * MAX ACK EVENTS : No. of acks that can be accumulated in driver,
+ * before acking to h/w. The no. of bits is 16 in the doorbell register,
+ * however we keep this limited to 15 bits.
+ * This is because around the edge of 64K boundary (16 bits), one
+ * single poll can make the accumulated ACK counter cross the 64K boundary,
+ * causing problems, when we try to ack with a value greater than 64K.
+ * 15 bits (32K) should be large enough to accumulate, anyways, and the max.
+ * acked events to h/w can be (32K + max poll weight) (currently 64).
+ */
+#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
+
+/* These macros build the data portion of the TxQ/RxQ doorbell */
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_STOP (0x40000000)
+
+/* These macros build the data portion of the IB doorbell */
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* Set the coalescing timer for the given ib */
+#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
+ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
+
+/* Acks 'events' # of events for a given ib while disabling interrupts */
+#define bna_ib_ack_disable_irq(_i_dbell, _events) \
+ (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+/* Acks 'events' # of events for a given ib */
+#define bna_ib_ack(_i_dbell, _events) \
+ (writel(((_i_dbell)->doorbell_ack | (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+#define bna_ib_start(_bna, _ib, _is_regular) \
+{ \
+ u32 intx_mask; \
+ struct bna_ib *ib = _ib; \
+ if ((ib->intr_type == BNA_INTR_T_INTX)) { \
+ bna_intx_disable((_bna), intx_mask); \
+ intx_mask &= ~(ib->intr_vector); \
+ bna_intx_enable((_bna), intx_mask); \
+ } \
+ bna_ib_coalescing_timer_set(&ib->door_bell, \
+ ib->coalescing_timeo); \
+ if (_is_regular) \
+ bna_ib_ack(&ib->door_bell, 0); \
+}
+
+#define bna_ib_stop(_bna, _ib) \
+{ \
+ u32 intx_mask; \
+ struct bna_ib *ib = _ib; \
+ writel(BNA_DOORBELL_IB_INT_DISABLE, \
+ ib->door_bell.doorbell_addr); \
+ if (ib->intr_type == BNA_INTR_T_INTX) { \
+ bna_intx_disable((_bna), intx_mask); \
+ intx_mask |= ib->intr_vector; \
+ bna_intx_enable((_bna), intx_mask); \
+ } \
+}
+
+#define bna_txq_prod_indx_doorbell(_tcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
+ (_tcb)->q_dbell));
+
+#define bna_rxq_prod_indx_doorbell(_rcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
+ (_rcb)->q_dbell));
+
+/**
+ *
+ * TxQ, RxQ, CQ related bits, offsets, macros
+ *
+ */
+
+/* TxQ Entry Opcodes */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/*
+ * Completion Q defines
+ */
+/* CQ Entry Flags */
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BNA_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
+#define BNA_CQ_EF_IPV6 (1 << 11)
+
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
+
+#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
+
+#define BNA_CQ_EF_LOCAL (1 << 20)
+
+/**
+ *
+ * Data structures
+ *
+ */
+
+struct bna_reg_offset {
+ u32 fn_int_status;
+ u32 fn_int_mask;
+};
+
+struct bna_bit_defn {
+ u32 mbox_status_bits;
+ u32 mbox_mask_bits;
+ u32 error_status_bits;
+ u32 error_mask_bits;
+ u32 halt_status_bits;
+ u32 halt_mask_bits;
+};
+
+struct bna_reg {
+ void __iomem *fn_int_status;
+ void __iomem *fn_int_mask;
+};
+
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+struct bna_dma_addr {
+ u32 msb;
+ u32 lsb;
+};
+
+struct bna_txq_wi_vector {
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
+};
+
+/**
+ * TxQ Entry Structure
+ *
+ * BEWARE: Load values into this structure with correct endianess.
+ */
+struct bna_txq_entry {
+ union {
+ struct {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ u16 opcode; /* Either */
+ /* BNA_TXQ_WI_SEND or */
+ /* BNA_TXQ_WI_SEND_LSO */
+ u16 flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+ } wi;
+
+ struct {
+ u16 reserved;
+ u16 opcode; /* Must be */
+ /* BNA_TXQ_WI_EXTENSION */
+ u32 reserved2[3]; /* Place holder for */
+ /* removed vector (12 bytes) */
+ } wi_ext;
+ } hdr;
+ struct bna_txq_wi_vector vector[4];
+};
+
+/* RxQ Entry Structure */
+struct bna_rxq_entry { /* Rx-Buffer */
+ struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
+};
+
+/* CQ Entry Structure */
+struct bna_cq_entry {
+ u32 flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+#endif /* __BNA_HW_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
new file mode 100644
index 00000000000..276fcb589f4
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -0,0 +1,3798 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfi.h"
+
+/**
+ * IB
+ */
+static void
+bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
+{
+ ib->coalescing_timeo = coalescing_timeo;
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->coalescing_timeo, 0);
+}
+
+/**
+ * RXF
+ */
+
+#define bna_rxf_vlan_cfg_soft_reset(rxf) \
+do { \
+ (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
+ (rxf)->vlan_strip_pending = true; \
+} while (0)
+
+#define bna_rxf_rss_cfg_soft_reset(rxf) \
+do { \
+ if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
+ (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
+ BNA_RSS_F_CFG_PENDING | \
+ BNA_RSS_F_STATUS_PENDING); \
+} while (0)
+
+static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
+static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
+static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
+static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
+ enum bna_cleanup_type cleanup);
+static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
+ enum bna_cleanup_type cleanup);
+static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
+ enum bna_cleanup_type cleanup);
+
+bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
+ enum bna_rxf_event);
+
+static void
+bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
+{
+ call_rxf_stop_cbfn(rxf);
+}
+
+static void
+bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_START:
+ if (rxf->flags & BNA_RXF_F_PAUSED) {
+ bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
+ call_rxf_start_cbfn(rxf);
+ } else
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ break;
+
+ case RXF_E_STOP:
+ call_rxf_stop_cbfn(rxf);
+ break;
+
+ case RXF_E_FAIL:
+ /* No-op */
+ break;
+
+ case RXF_E_CONFIG:
+ call_rxf_cam_fltr_cbfn(rxf);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->flags |= BNA_RXF_F_PAUSED;
+ call_rxf_pause_cbfn(rxf);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->flags &= ~BNA_RXF_F_PAUSED;
+ call_rxf_resume_cbfn(rxf);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
+{
+ call_rxf_pause_cbfn(rxf);
+}
+
+static void
+bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ case RXF_E_FAIL:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CONFIG:
+ call_rxf_cam_fltr_cbfn(rxf);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->flags &= ~BNA_RXF_F_PAUSED;
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
+{
+ if (!bna_rxf_cfg_apply(rxf)) {
+ /* No more pending config updates */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+}
+
+static void
+bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
+ break;
+
+ case RXF_E_FAIL:
+ bna_rxf_cfg_reset(rxf);
+ call_rxf_start_cbfn(rxf);
+ call_rxf_cam_fltr_cbfn(rxf);
+ call_rxf_resume_cbfn(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CONFIG:
+ /* No-op */
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->flags |= BNA_RXF_F_PAUSED;
+ call_rxf_start_cbfn(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
+ break;
+
+ case RXF_E_FW_RESP:
+ if (!bna_rxf_cfg_apply(rxf)) {
+ /* No more pending config updates */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_started_entry(struct bna_rxf *rxf)
+{
+ call_rxf_start_cbfn(rxf);
+ call_rxf_cam_fltr_cbfn(rxf);
+ call_rxf_resume_cbfn(rxf);
+}
+
+static void
+bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ case RXF_E_FAIL:
+ bna_rxf_cfg_reset(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CONFIG:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->flags |= BNA_RXF_F_PAUSED;
+ if (!bna_rxf_fltr_clear(rxf))
+ bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
+ else
+ bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
+{
+}
+
+static void
+bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ bna_rxf_cfg_reset(rxf);
+ call_rxf_pause_cbfn(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_FW_RESP:
+ if (!bna_rxf_fltr_clear(rxf)) {
+ /* No more pending CAM entries to clear */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
+{
+}
+
+static void
+bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ case RXF_E_FW_RESP:
+ bna_rxf_cfg_reset(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
+ enum bfi_enet_h2i_msgs req_type)
+{
+ struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
+ memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_ucast_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
+{
+ struct bfi_enet_mcast_add_req *req =
+ &rxf->bfi_enet_cmd.mcast_add_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
+ 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
+ memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_mcast_add_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
+{
+ struct bfi_enet_mcast_del_req *req =
+ &rxf->bfi_enet_cmd.mcast_del_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
+ 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
+ req->handle = htons(handle);
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_mcast_del_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
+{
+ struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
+ int i;
+ int j;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
+ req->block_idx = block_idx;
+ for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
+ j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
+ req->bit_mask[i] =
+ htonl(rxf->vlan_filter_table[j]);
+ else
+ req->bit_mask[i] = 0xFFFFFFFF;
+ }
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = rxf->vlan_strip_status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rit_cfg(struct bna_rxf *rxf)
+{
+ struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
+ req->size = htons(rxf->rit_size);
+ memcpy(&req->table[0], rxf->rit, rxf->rit_size);
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rit_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rss_cfg(struct bna_rxf *rxf)
+{
+ struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
+ int i;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
+ req->cfg.type = rxf->rss_cfg.hash_type;
+ req->cfg.mask = rxf->rss_cfg.hash_mask;
+ for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
+ req->cfg.key[i] =
+ htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rss_enable(struct bna_rxf *rxf)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = rxf->rss_status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+/* This function gets the multicast MAC that has already been added to CAM */
+static struct bna_mac *
+bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
+{
+ struct bna_mac *mac;
+ struct list_head *qe;
+
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ return mac;
+ }
+
+ list_for_each(qe, &rxf->mcast_pending_del_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ return mac;
+ }
+
+ return NULL;
+}
+
+static struct bna_mcam_handle *
+bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
+{
+ struct bna_mcam_handle *mchandle;
+ struct list_head *qe;
+
+ list_for_each(qe, &rxf->mcast_handle_q) {
+ mchandle = (struct bna_mcam_handle *)qe;
+ if (mchandle->handle == handle)
+ return mchandle;
+ }
+
+ return NULL;
+}
+
+static void
+bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
+{
+ struct bna_mac *mcmac;
+ struct bna_mcam_handle *mchandle;
+
+ mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
+ mchandle = bna_rxf_mchandle_get(rxf, handle);
+ if (mchandle == NULL) {
+ mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
+ mchandle->handle = handle;
+ mchandle->refcnt = 0;
+ list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
+ }
+ mchandle->refcnt++;
+ mcmac->handle = mchandle;
+}
+
+static int
+bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
+ enum bna_cleanup_type cleanup)
+{
+ struct bna_mcam_handle *mchandle;
+ int ret = 0;
+
+ mchandle = mac->handle;
+ if (mchandle == NULL)
+ return ret;
+
+ mchandle->refcnt--;
+ if (mchandle->refcnt == 0) {
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_mcast_del_req(rxf, mchandle->handle);
+ ret = 1;
+ }
+ list_del(&mchandle->qe);
+ bfa_q_qe_init(&mchandle->qe);
+ bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
+ }
+ mac->handle = NULL;
+
+ return ret;
+}
+
+static int
+bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+ int ret;
+
+ /* Delete multicast entries previousely added */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ if (ret)
+ return ret;
+ }
+
+ /* Add multicast entries */
+ if (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ bna_bfi_mcast_add_req(rxf, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
+{
+ u8 vlan_pending_bitmask;
+ int block_idx = 0;
+
+ if (rxf->vlan_pending_bitmask) {
+ vlan_pending_bitmask = rxf->vlan_pending_bitmask;
+ while (!(vlan_pending_bitmask & 0x1)) {
+ block_idx++;
+ vlan_pending_bitmask >>= 1;
+ }
+ rxf->vlan_pending_bitmask &= ~(1 << block_idx);
+ bna_bfi_rx_vlan_filter_set(rxf, block_idx);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+ int ret;
+
+ /* Throw away delete pending mcast entries */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ ret = bna_rxf_mcast_del(rxf, mac, cleanup);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ if (ret)
+ return ret;
+ }
+
+ /* Move active mcast entries to pending_add_q */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_add_q);
+ mac = (struct bna_mac *)qe;
+ if (bna_rxf_mcast_del(rxf, mac, cleanup))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
+{
+ if (rxf->rss_pending) {
+ if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
+ rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
+ bna_bfi_rit_cfg(rxf);
+ return 1;
+ }
+
+ if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
+ rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
+ bna_bfi_rss_cfg(rxf);
+ return 1;
+ }
+
+ if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
+ rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
+ bna_bfi_rss_enable(rxf);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_cfg_apply(struct bna_rxf *rxf)
+{
+ if (bna_rxf_ucast_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_mcast_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_promisc_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_allmulti_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_vlan_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_vlan_strip_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_rss_cfg_apply(rxf))
+ return 1;
+
+ return 0;
+}
+
+/* Only software reset */
+static int
+bna_rxf_fltr_clear(struct bna_rxf *rxf)
+{
+ if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ return 0;
+}
+
+static void
+bna_rxf_cfg_reset(struct bna_rxf *rxf)
+{
+ bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_vlan_cfg_soft_reset(rxf);
+ bna_rxf_rss_cfg_soft_reset(rxf);
+}
+
+static void
+bna_rit_init(struct bna_rxf *rxf, int rit_size)
+{
+ struct bna_rx *rx = rxf->rx;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+ int offset = 0;
+
+ rxf->rit_size = rit_size;
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxf->rit[offset] = rxp->cq.ccb->id;
+ offset++;
+ }
+
+}
+
+void
+bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
+}
+
+void
+bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_mcast_add_req *req =
+ &rxf->bfi_enet_cmd.mcast_add_req;
+ struct bfi_enet_mcast_add_rsp *rsp =
+ (struct bfi_enet_mcast_add_rsp *)msghdr;
+
+ bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
+ ntohs(rsp->handle));
+ bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
+}
+
+static void
+bna_rxf_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config,
+ struct bna_res_info *res_info)
+{
+ rxf->rx = rx;
+
+ INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
+ rxf->ucast_pending_set = 0;
+ rxf->ucast_active_set = 0;
+ INIT_LIST_HEAD(&rxf->ucast_active_q);
+ rxf->ucast_pending_mac = NULL;
+
+ INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
+ INIT_LIST_HEAD(&rxf->mcast_active_q);
+ INIT_LIST_HEAD(&rxf->mcast_handle_q);
+
+ if (q_config->paused)
+ rxf->flags |= BNA_RXF_F_PAUSED;
+
+ rxf->rit = (u8 *)
+ res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
+ bna_rit_init(rxf, q_config->num_paths);
+
+ rxf->rss_status = q_config->rss_status;
+ if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
+ rxf->rss_cfg = q_config->rss_config;
+ rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
+ rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
+ rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
+ }
+
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ memset(rxf->vlan_filter_table, 0,
+ (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
+ rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
+ rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
+
+ rxf->vlan_strip_status = q_config->vlan_strip_status;
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+}
+
+static void
+bna_rxf_uninit(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac;
+
+ rxf->ucast_pending_set = 0;
+ rxf->ucast_active_set = 0;
+
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+
+ if (rxf->ucast_pending_mac) {
+ bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
+ rxf->ucast_pending_mac);
+ rxf->ucast_pending_mac = NULL;
+ }
+
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ rxf->rxmode_pending = 0;
+ rxf->rxmode_pending_bitmask = 0;
+ if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
+ rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
+ if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
+ rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
+
+ rxf->rss_pending = 0;
+ rxf->vlan_strip_pending = false;
+
+ rxf->flags = 0;
+
+ rxf->rx = NULL;
+}
+
+static void
+bna_rx_cb_rxf_started(struct bna_rx *rx)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+}
+
+static void
+bna_rxf_start(struct bna_rxf *rxf)
+{
+ rxf->start_cbfn = bna_rx_cb_rxf_started;
+ rxf->start_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_START);
+}
+
+static void
+bna_rx_cb_rxf_stopped(struct bna_rx *rx)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+}
+
+static void
+bna_rxf_stop(struct bna_rxf *rxf)
+{
+ rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
+ rxf->stop_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_STOP);
+}
+
+static void
+bna_rxf_fail(struct bna_rxf *rxf)
+{
+ bfa_fsm_send_event(rxf, RXF_E_FAIL);
+}
+
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->ucast_pending_mac == NULL) {
+ rxf->ucast_pending_mac =
+ bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (rxf->ucast_pending_mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
+ }
+
+ memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
+ rxf->ucast_pending_set = 1;
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct bna_mac *mac;
+
+ /* Check if already added or pending addition */
+ if (bna_mac_find(&rxf->mcast_active_q, addr) ||
+ bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
+ if (cbfn)
+ cbfn(rx->bna->bnad, rx);
+ return BNA_CB_SUCCESS;
+ }
+
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ return BNA_CB_MCAST_LIST_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac;
+ int i;
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
+ }
+
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ }
+
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ return BNA_CB_MCAST_LIST_FULL;
+}
+
+void
+bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
+ int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
+
+ rxf->vlan_filter_table[index] |= bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->vlan_pending_bitmask |= (1 << group_id);
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
+ int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
+
+ rxf->vlan_filter_table[index] &= ~bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->vlan_pending_bitmask |= (1 << group_id);
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+static int
+bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Delete MAC addresses previousely added */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ /* Set default unicast MAC */
+ if (rxf->ucast_pending_set) {
+ rxf->ucast_pending_set = 0;
+ memcpy(rxf->ucast_active_mac.addr,
+ rxf->ucast_pending_mac->addr, ETH_ALEN);
+ rxf->ucast_active_set = 1;
+ bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
+ BFI_ENET_H2I_MAC_UCAST_SET_REQ);
+ return 1;
+ }
+
+ /* Add additional MAC entries */
+ if (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Throw away delete pending ucast entries */
+ while (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ if (cleanup == BNA_SOFT_CLEANUP)
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ else {
+ bna_bfi_ucast_req(rxf, mac,
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+ }
+
+ /* Move active ucast entries to pending_add_q */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_add_q);
+ if (cleanup == BNA_HARD_CLEANUP) {
+ mac = (struct bna_mac *)qe;
+ bna_bfi_ucast_req(rxf, mac,
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ return 1;
+ }
+ }
+
+ if (rxf->ucast_active_set) {
+ rxf->ucast_pending_set = 1;
+ rxf->ucast_active_set = 0;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
+ BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable promiscuous mode */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_PROMISC;
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->promisc_rid = BFI_INVALID_RID;
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Clear pending promisc mode disable */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->promisc_rid = BFI_INVALID_RID;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+ }
+
+ /* Move promisc mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
+{
+ /* Enable/disable allmulti mode */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ /* Clear pending allmulti mode disable */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ }
+ }
+
+ /* Move allmulti mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_promisc_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
+ /* Do nothing if pending enable or already enabled */
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending disable command */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ } else {
+ /* Schedule enable */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->promisc_rid = rxf->rx->rid;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_promisc_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
+ /* Do nothing if pending disable or already disabled */
+ } else if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending enable command */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->promisc_rid = BFI_INVALID_RID;
+ } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* Schedule disable */
+ promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_allmulti_enable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
+ /* Do nothing if pending enable or already enabled */
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending disable command */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ } else {
+ /* Schedule enable */
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_allmulti_disable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
+ /* Do nothing if pending disable or already disabled */
+ } else if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending enable command */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ /* Schedule disable */
+ allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
+{
+ if (rxf->vlan_strip_pending) {
+ rxf->vlan_strip_pending = false;
+ bna_bfi_vlan_strip_enable(rxf);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * RX
+ */
+
+#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
+ (qcfg)->num_paths : ((qcfg)->num_paths * 2))
+
+#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
+ (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+
+#define call_rx_stop_cbfn(rx) \
+do { \
+ if ((rx)->stop_cbfn) { \
+ void (*cbfn)(void *, struct bna_rx *); \
+ void *cbarg; \
+ cbfn = (rx)->stop_cbfn; \
+ cbarg = (rx)->stop_cbarg; \
+ (rx)->stop_cbfn = NULL; \
+ (rx)->stop_cbarg = NULL; \
+ cbfn(cbarg, rx); \
+ } \
+} while (0)
+
+#define call_rx_stall_cbfn(rx) \
+do { \
+ if ((rx)->rx_stall_cbfn) \
+ (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
+} while (0)
+
+#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
+do { \
+ struct bna_dma_addr cur_q_addr = \
+ *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
+ (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
+ (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
+ (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
+ (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
+ (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
+ (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
+} while (0)
+
+static void bna_bfi_rx_enet_start(struct bna_rx *rx);
+static void bna_rx_enet_stop(struct bna_rx *rx);
+static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
+
+bfa_fsm_state_decl(bna_rx, stopped,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, started,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, cleanup_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, failed,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, quiesce_wait,
+ struct bna_rx, enum bna_rx_event);
+
+static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
+{
+ call_rx_stop_cbfn(rx);
+}
+
+static void bna_rx_sm_stopped(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
+ break;
+
+ case RX_E_STOP:
+ call_rx_stop_cbfn(rx);
+ break;
+
+ case RX_E_FAIL:
+ /* no-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
+{
+ bna_bfi_rx_enet_start(rx);
+}
+
+void
+bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ case RX_E_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ case RX_E_STARTED:
+ bna_rx_enet_stop(rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ case RX_E_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
+{
+ rx->rx_post_cbfn(rx->bna->bnad, rx);
+ bna_rxf_start(&rx->rxf);
+}
+
+void
+bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ case RX_E_RXF_STARTED:
+ bna_rxf_stop(&rx->rxf);
+ break;
+
+ case RX_E_RXF_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
+ call_rx_stall_cbfn(rx);
+ bna_rx_enet_stop(rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+
+}
+
+void
+bna_rx_sm_started_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+ int is_regular = (rx->type == BNA_RX_T_REGULAR);
+
+ /* Start IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
+ }
+
+ bna_ethport_cb_rx_started(&rx->bna->ethport);
+}
+
+static void
+bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ bna_ethport_cb_rx_stopped(&rx->bna->ethport);
+ bna_rxf_stop(&rx->rxf);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_failed);
+ bna_ethport_cb_rx_stopped(&rx->bna->ethport);
+ bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_failed);
+ bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ case RX_E_RXF_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_started);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
+{
+}
+
+void
+bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ case RX_E_RXF_STOPPED:
+ /* No-op */
+ break;
+
+ case RX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void
+bna_rx_sm_failed_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
+ break;
+
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ break;
+
+ case RX_E_FAIL:
+ case RX_E_RXF_STARTED:
+ case RX_E_RXF_STOPPED:
+ /* No-op */
+ break;
+
+ case RX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+} }
+
+static void
+bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_failed);
+ break;
+
+ case RX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void
+bna_bfi_rx_enet_start(struct bna_rx *rx)
+{
+ struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+ struct list_head *rxp_qe;
+ int i;
+
+ bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
+ cfg_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+
+ cfg_req->num_queue_sets = rx->num_paths;
+ for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
+ i < rx->num_paths;
+ i++, rxp_qe = bfa_q_next(rxp_qe)) {
+ rxp = (struct bna_rxp *)rxp_qe;
+
+ GET_RXQS(rxp, q0, q1);
+ switch (rxp->type) {
+ case BNA_RXP_SLR:
+ case BNA_RXP_HDS:
+ /* Small RxQ */
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
+ &q1->qpt);
+ cfg_req->q_cfg[i].qs.rx_buffer_size =
+ htons((u16)q1->buffer_size);
+ /* Fall through */
+
+ case BNA_RXP_SINGLE:
+ /* Large/Single RxQ */
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
+ &q0->qpt);
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
+ cfg_req->q_cfg[i].ql.rx_buffer_size =
+ htons((u16)q0->buffer_size);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
+ &rxp->cq.qpt);
+
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
+ rxp->cq.ib.ib_seg_host_addr.lsb;
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
+ rxp->cq.ib.ib_seg_host_addr.msb;
+ cfg_req->q_cfg[i].ib.intr.msix_index =
+ htons((u16)rxp->cq.ib.intr_vector);
+ }
+
+ cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
+ ? BNA_STATUS_T_ENABLED :
+ BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.coalescing_timeout =
+ htonl((u32)rxp->cq.ib.coalescing_timeo);
+ cfg_req->ib_cfg.inter_pkt_timeout =
+ htonl((u32)rxp->cq.ib.interpkt_timeo);
+ cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
+
+ switch (rxp->type) {
+ case BNA_RXP_SLR:
+ cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
+ break;
+
+ case BNA_RXP_HDS:
+ cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
+ cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
+ cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
+ cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
+ break;
+
+ case BNA_RXP_SINGLE:
+ cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+ cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
+
+ bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
+ bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
+}
+
+static void
+bna_bfi_rx_enet_stop(struct bna_rx *rx)
+{
+ struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
+ bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
+ &req->mh);
+ bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
+}
+
+static void
+bna_rx_enet_stop(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ /* Stop IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_stop(rx->bna, &rxp->cq.ib);
+ }
+
+ bna_bfi_rx_enet_stop(rx);
+}
+
+static int
+bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
+{
+ if ((rx_mod->rx_free_count == 0) ||
+ (rx_mod->rxp_free_count == 0) ||
+ (rx_mod->rxq_free_count == 0))
+ return 0;
+
+ if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < rx_cfg->num_paths))
+ return 0;
+ } else {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct bna_rxq *
+bna_rxq_get(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rxq *rxq = NULL;
+ struct list_head *qe = NULL;
+
+ bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ rx_mod->rxq_free_count--;
+ rxq = (struct bna_rxq *)qe;
+ bfa_q_qe_init(&rxq->qe);
+
+ return rxq;
+}
+
+static void
+bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+{
+ bfa_q_qe_init(&rxq->qe);
+ list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+}
+
+static struct bna_rxp *
+bna_rxp_get(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rxp *rxp = NULL;
+
+ bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ rx_mod->rxp_free_count--;
+ rxp = (struct bna_rxp *)qe;
+ bfa_q_qe_init(&rxp->qe);
+
+ return rxp;
+}
+
+static void
+bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+{
+ bfa_q_qe_init(&rxp->qe);
+ list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+}
+
+static struct bna_rx *
+bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct list_head *qe = NULL;
+ struct bna_rx *rx = NULL;
+
+ if (type == BNA_RX_T_REGULAR) {
+ bfa_q_deq(&rx_mod->rx_free_q, &qe);
+ } else
+ bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
+
+ rx_mod->rx_free_count--;
+ rx = (struct bna_rx *)qe;
+ bfa_q_qe_init(&rx->qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ rx->type = type;
+
+ return rx;
+}
+
+static void
+bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+{
+ struct list_head *prev_qe = NULL;
+ struct list_head *qe;
+
+ bfa_q_qe_init(&rx->qe);
+
+ list_for_each(qe, &rx_mod->rx_free_q) {
+ if (((struct bna_rx *)qe)->rid < rx->rid)
+ prev_qe = qe;
+ else
+ break;
+ }
+
+ if (prev_qe == NULL) {
+ /* This is the first entry */
+ bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
+ } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
+ /* This is the last entry */
+ list_add_tail(&rx->qe, &rx_mod->rx_free_q);
+ } else {
+ /* Somewhere in the middle */
+ bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
+ bfa_q_prev(&rx->qe) = prev_qe;
+ bfa_q_next(prev_qe) = &rx->qe;
+ bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
+ }
+
+ rx_mod->rx_free_count++;
+}
+
+static void
+bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
+ struct bna_rxq *q1)
+{
+ switch (rxp->type) {
+ case BNA_RXP_SINGLE:
+ rxp->rxq.single.only = q0;
+ rxp->rxq.single.reserved = NULL;
+ break;
+ case BNA_RXP_SLR:
+ rxp->rxq.slr.large = q0;
+ rxp->rxq.slr.small = q1;
+ break;
+ case BNA_RXP_HDS:
+ rxp->rxq.hds.data = q0;
+ rxp->rxq.hds.hdr = q1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+bna_rxq_qpt_setup(struct bna_rxq *rxq,
+ struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxq->qpt.page_count = page_count;
+ rxq->qpt.page_size = page_size;
+
+ rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxq->qpt.page_count; i++) {
+ rxq->rcb->sw_qpt[i] = page_mem[i].kva;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+ }
+}
+
+static void
+bna_rxp_cqpt_setup(struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxp->cq.qpt.page_count = page_count;
+ rxp->cq.qpt.page_size = page_size;
+
+ rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxp->cq.qpt.page_count; i++) {
+ rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+ }
+}
+
+static void
+bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ bfa_wc_down(&rx_mod->rx_stop_wc);
+}
+
+static void
+bna_rx_mod_cb_rx_stopped_all(void *arg)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ if (rx_mod->stop_cbfn)
+ rx_mod->stop_cbfn(&rx_mod->bna->enet);
+ rx_mod->stop_cbfn = NULL;
+}
+
+static void
+bna_rx_start(struct bna_rx *rx)
+{
+ rx->rx_flags |= BNA_RX_F_ENET_STARTED;
+ if (rx->rx_flags & BNA_RX_F_ENABLED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+static void
+bna_rx_stop(struct bna_rx *rx)
+{
+ rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
+ if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
+ else {
+ rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
+ rx->stop_cbarg = &rx->bna->rx_mod;
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+static void
+bna_rx_fail(struct bna_rx *rx)
+{
+ /* Indicate Enet is not enabled, and failed */
+ rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
+ bfa_fsm_send_event(rx, RX_E_FAIL);
+}
+
+void
+bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
+ if (type == BNA_RX_T_LOOPBACK)
+ rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_start(rx);
+ }
+}
+
+void
+bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
+
+ rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
+
+ bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type) {
+ bfa_wc_up(&rx_mod->rx_stop_wc);
+ bna_rx_stop(rx);
+ }
+ }
+
+ bfa_wc_wait(&rx_mod->rx_stop_wc);
+}
+
+void
+bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ bna_rx_fail(rx);
+ }
+}
+
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int index;
+ struct bna_rx *rx_ptr;
+ struct bna_rxp *rxp_ptr;
+ struct bna_rxq *rxq_ptr;
+
+ rx_mod->bna = bna;
+ rx_mod->flags = 0;
+
+ rx_mod->rx = (struct bna_rx *)
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxp = (struct bna_rxp *)
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxq = (struct bna_rxq *)
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ /* Initialize the queues */
+ INIT_LIST_HEAD(&rx_mod->rx_free_q);
+ rx_mod->rx_free_count = 0;
+ INIT_LIST_HEAD(&rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count = 0;
+ INIT_LIST_HEAD(&rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count = 0;
+ INIT_LIST_HEAD(&rx_mod->rx_active_q);
+
+ /* Build RX queues */
+ for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
+ rx_ptr = &rx_mod->rx[index];
+
+ bfa_q_qe_init(&rx_ptr->qe);
+ INIT_LIST_HEAD(&rx_ptr->rxp_q);
+ rx_ptr->bna = NULL;
+ rx_ptr->rid = index;
+ rx_ptr->stop_cbfn = NULL;
+ rx_ptr->stop_cbarg = NULL;
+
+ list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+ }
+
+ /* build RX-path queue */
+ for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
+ rxp_ptr = &rx_mod->rxp[index];
+ bfa_q_qe_init(&rxp_ptr->qe);
+ list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+ }
+
+ /* build RXQ queue */
+ for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
+ rxq_ptr = &rx_mod->rxq[index];
+ bfa_q_qe_init(&rxq_ptr->qe);
+ list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+ }
+}
+
+void
+bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxp_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxq_free_q)
+ i++;
+
+ rx_mod->bna = NULL;
+}
+
+void
+bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+ struct list_head *rxp_qe;
+ int i;
+
+ bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
+ sizeof(struct bfi_enet_rx_cfg_rsp));
+
+ rx->hw_id = cfg_rsp->hw_id;
+
+ for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
+ i < rx->num_paths;
+ i++, rxp_qe = bfa_q_next(rxp_qe)) {
+ rxp = (struct bna_rxp *)rxp_qe;
+ GET_RXQS(rxp, q0, q1);
+
+ /* Setup doorbells */
+ rxp->cq.ccb->i_dbell->doorbell_addr =
+ rx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].i_dbell);
+ rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
+ q0->rcb->q_dbell =
+ rx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].ql_dbell);
+ q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
+ if (q1) {
+ q1->rcb->q_dbell =
+ rx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].qs_dbell);
+ q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
+ }
+
+ /* Initialize producer/consumer indexes */
+ (*rxp->cq.ccb->hw_producer_index) = 0;
+ rxp->cq.ccb->producer_index = 0;
+ q0->rcb->producer_index = q0->rcb->consumer_index = 0;
+ if (q1)
+ q1->rcb->producer_index = q1->rcb->consumer_index = 0;
+ }
+
+ bfa_fsm_send_event(rx, RX_E_STARTED);
+}
+
+void
+bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(rx, RX_E_STOPPED);
+}
+
+void
+bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
+{
+ u32 cq_size, hq_size, dq_size;
+ u32 cpage_count, hpage_count, dpage_count;
+ struct bna_mem_info *mem_info;
+ u32 cq_depth;
+ u32 hq_depth;
+ u32 dq_depth;
+
+ dq_depth = q_cfg->q_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ cq_depth = dq_depth + hq_depth;
+
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_size = cq_depth * BFI_CQ_WI_SIZE;
+ cq_size = ALIGN(cq_size, PAGE_SIZE);
+ cpage_count = SIZE_TO_PAGES(cq_size);
+
+ BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_size = dq_depth * BFI_RXQ_WI_SIZE;
+ dq_size = ALIGN(dq_size, PAGE_SIZE);
+ dpage_count = SIZE_TO_PAGES(dq_size);
+
+ if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
+ BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_size = hq_depth * BFI_RXQ_WI_SIZE;
+ hq_size = ALIGN(hq_size, PAGE_SIZE);
+ hpage_count = SIZE_TO_PAGES(hq_size);
+ } else
+ hpage_count = 0;
+
+ res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_ccb);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_rcb);
+ mem_info->num = BNA_GET_RXQS(q_cfg);
+
+ res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = cpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = cpage_count * q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = dpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = dpage_count * q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = hpage_count * sizeof(void *);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = (hpage_count ? PAGE_SIZE : 0);
+ mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
+
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = BFI_IBIDX_SIZE;
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = BFI_ENET_RSS_RIT_MAX;
+ mem_info->num = 1;
+
+ res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
+}
+
+struct bna_rx *
+bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ const struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info,
+ void *priv)
+{
+ struct bna_rx_mod *rx_mod = &bna->rx_mod;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0;
+ struct bna_rxq *q1;
+ struct bna_intr_info *intr_info;
+ u32 page_count;
+ struct bna_mem_descr *ccb_mem;
+ struct bna_mem_descr *rcb_mem;
+ struct bna_mem_descr *unmapq_mem;
+ struct bna_mem_descr *cqpt_mem;
+ struct bna_mem_descr *cswqpt_mem;
+ struct bna_mem_descr *cpage_mem;
+ struct bna_mem_descr *hqpt_mem;
+ struct bna_mem_descr *dqpt_mem;
+ struct bna_mem_descr *hsqpt_mem;
+ struct bna_mem_descr *dsqpt_mem;
+ struct bna_mem_descr *hpage_mem;
+ struct bna_mem_descr *dpage_mem;
+ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
+ int dpage_count, hpage_count, rcb_idx;
+
+ if (!bna_rx_res_check(rx_mod, rx_cfg))
+ return NULL;
+
+ intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
+ rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
+ unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
+ cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
+ cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
+ hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
+ dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
+ hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
+ dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
+ hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
+ dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
+
+ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
+ rx->bna = bna;
+ rx->rx_flags = 0;
+ INIT_LIST_HEAD(&rx->rxp_q);
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+ rx->priv = priv;
+
+ rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
+ rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
+ rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
+ rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
+ rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
+ /* Following callbacks are mandatory */
+ rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
+ rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
+
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
+ switch (rx->type) {
+ case BNA_RX_T_REGULAR:
+ if (!(rx->bna->rx_mod.flags &
+ BNA_RX_MOD_F_ENET_LOOPBACK))
+ rx->rx_flags |= BNA_RX_F_ENET_STARTED;
+ break;
+ case BNA_RX_T_LOOPBACK:
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
+ rx->rx_flags |= BNA_RX_F_ENET_STARTED;
+ break;
+ }
+ }
+
+ rx->num_paths = rx_cfg->num_paths;
+ for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ rxp = bna_rxp_get(rx_mod);
+ list_add_tail(&rxp->qe, &rx->rxp_q);
+ rxp->type = rx_cfg->rxp_type;
+ rxp->rx = rx;
+ rxp->cq.rx = rx;
+
+ q0 = bna_rxq_get(rx_mod);
+ if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
+ q1 = NULL;
+ else
+ q1 = bna_rxq_get(rx_mod);
+
+ if (1 == intr_info->num)
+ rxp->vector = intr_info->idl[0].vector;
+ else
+ rxp->vector = intr_info->idl[i].vector;
+
+ /* Setup IB */
+
+ rxp->cq.ib.ib_seg_host_addr.lsb =
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ rxp->cq.ib.ib_seg_host_addr.msb =
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+ rxp->cq.ib.ib_seg_host_addr_kva =
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ rxp->cq.ib.intr_type = intr_info->intr_type;
+ if (intr_info->intr_type == BNA_INTR_T_MSIX)
+ rxp->cq.ib.intr_vector = rxp->vector;
+ else
+ rxp->cq.ib.intr_vector = (1 << rxp->vector);
+ rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
+ rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
+ rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
+
+ bna_rxp_add_rxqs(rxp, q0, q1);
+
+ /* Setup large Q */
+
+ q0->rx = rx;
+ q0->rxp = rxp;
+
+ q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
+ rcb_idx++;
+ q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->rxq = q0;
+ q0->rcb->bnad = bna->bnad;
+ q0->rcb->id = 0;
+ q0->rx_packets = q0->rx_bytes = 0;
+ q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
+
+ bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
+ &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
+ q0->rcb->page_idx = dpage_idx;
+ q0->rcb->page_count = dpage_count;
+ dpage_idx += dpage_count;
+
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q0->rcb);
+
+ /* Setup small Q */
+
+ if (q1) {
+ q1->rx = rx;
+ q1->rxp = rxp;
+
+ q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
+ rcb_idx++;
+ q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->rxq = q1;
+ q1->rcb->bnad = bna->bnad;
+ q1->rcb->id = 1;
+ q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
+ rx_cfg->hds_config.forced_offset
+ : rx_cfg->small_buff_size;
+ q1->rx_packets = q1->rx_bytes = 0;
+ q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
+
+ bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
+ &hqpt_mem[i], &hsqpt_mem[i],
+ &hpage_mem[hpage_idx]);
+ q1->rcb->page_idx = hpage_idx;
+ q1->rcb->page_count = hpage_count;
+ hpage_idx += hpage_count;
+
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q1->rcb);
+ }
+
+ /* Setup CQ */
+
+ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
+ rxp->cq.ccb->q_depth = rx_cfg->q_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q_depth);
+ rxp->cq.ccb->cq = &rxp->cq;
+ rxp->cq.ccb->rcb[0] = q0->rcb;
+ q0->rcb->ccb = rxp->cq.ccb;
+ if (q1) {
+ rxp->cq.ccb->rcb[1] = q1->rcb;
+ q1->rcb->ccb = rxp->cq.ccb;
+ }
+ rxp->cq.ccb->hw_producer_index =
+ (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
+ rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
+ rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
+ rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
+ rxp->cq.ccb->rx_coalescing_timeo =
+ rxp->cq.ib.coalescing_timeo;
+ rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
+ rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
+ rxp->cq.ccb->bnad = bna->bnad;
+ rxp->cq.ccb->id = i;
+
+ bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
+ &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
+ rxp->cq.ccb->page_idx = cpage_idx;
+ rxp->cq.ccb->page_count = page_count;
+ cpage_idx += page_count;
+
+ if (rx->ccb_setup_cbfn)
+ rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
+ }
+
+ rx->hds_cfg = rx_cfg->hds_config;
+
+ bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
+
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+
+ rx_mod->rid_mask |= (1 << rx->rid);
+
+ return rx;
+}
+
+void
+bna_rx_destroy(struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ bna_rxf_uninit(&rx->rxf);
+
+ while (!list_empty(&rx->rxp_q)) {
+ bfa_q_deq(&rx->rxp_q, &rxp);
+ GET_RXQS(rxp, q0, q1);
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
+ q0->rcb = NULL;
+ q0->rxp = NULL;
+ q0->rx = NULL;
+ bna_rxq_put(rx_mod, q0);
+
+ if (q1) {
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
+ q1->rcb = NULL;
+ q1->rxp = NULL;
+ q1->rx = NULL;
+ bna_rxq_put(rx_mod, q1);
+ }
+ rxp->rxq.slr.large = NULL;
+ rxp->rxq.slr.small = NULL;
+
+ if (rx->ccb_destroy_cbfn)
+ rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ rxp->cq.ccb = NULL;
+ rxp->rx = NULL;
+ bna_rxp_put(rx_mod, rxp);
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ if (qe == &rx->qe) {
+ list_del(&rx->qe);
+ bfa_q_qe_init(&rx->qe);
+ break;
+ }
+ }
+
+ rx_mod->rid_mask &= ~(1 << rx->rid);
+
+ rx->bna = NULL;
+ rx->priv = NULL;
+ bna_rx_put(rx_mod, rx);
+}
+
+void
+bna_rx_enable(struct bna_rx *rx)
+{
+ if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ return;
+
+ rx->rx_flags |= BNA_RX_F_ENABLED;
+ if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ /* h/w should not be accessed. Treat we're stopped */
+ (*cbfn)(rx->bna->bnad, rx);
+ } else {
+ rx->stop_cbfn = cbfn;
+ rx->stop_cbarg = rx->bna->bnad;
+
+ rx->rx_flags &= ~BNA_RX_F_ENABLED;
+
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+void
+bna_rx_cleanup_complete(struct bna_rx *rx)
+{
+ bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
+}
+
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int need_hw_config = 0;
+
+ /* Error checks */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ /* If promisc mode is already enabled elsewhere in the system */
+ if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
+ (rx->bna->promisc_rid != rxf->rx->rid))
+ goto err_return;
+
+ /* If default mode is already enabled in the system */
+ if (rx->bna->default_mode_rid != BFI_INVALID_RID)
+ goto err_return;
+
+ /* Trying to enable promiscuous and default mode together */
+ if (is_default_enable(new_mode, bitmask))
+ goto err_return;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ /* If default mode is already enabled elsewhere in the system */
+ if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
+ (rx->bna->default_mode_rid != rxf->rx->rid)) {
+ goto err_return;
+ }
+
+ /* If promiscuous mode is already enabled in the system */
+ if (rx->bna->promisc_rid != BFI_INVALID_RID)
+ goto err_return;
+ }
+
+ /* Process the commands */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ if (bna_rxf_promisc_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_promisc_disable(new_mode, bitmask)) {
+ if (bna_rxf_promisc_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_allmulti_enable(new_mode, bitmask)) {
+ if (bna_rxf_allmulti_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_allmulti_disable(new_mode, bitmask)) {
+ if (bna_rxf_allmulti_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ /* Trigger h/w if needed */
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ return BNA_CB_FAIL;
+}
+
+void
+bna_rx_vlanfilter_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
+ bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
+ }
+}
+
+void
+bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
+{
+ int i, j;
+
+ for (i = 0; i < BNA_LOAD_T_MAX; i++)
+ for (j = 0; j < BNA_BIAS_T_MAX; j++)
+ bna->rx_mod.dim_vector[i][j] = vector[i][j];
+}
+
+void
+bna_rx_dim_update(struct bna_ccb *ccb)
+{
+ struct bna *bna = ccb->cq->rx->bna;
+ u32 load, bias;
+ u32 pkt_rt, small_rt, large_rt;
+ u8 coalescing_timeo;
+
+ if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
+ (ccb->pkt_rate.large_pkt_cnt == 0))
+ return;
+
+ /* Arrive at preconfigured coalescing timeo value based on pkt rate */
+
+ small_rt = ccb->pkt_rate.small_pkt_cnt;
+ large_rt = ccb->pkt_rate.large_pkt_cnt;
+
+ pkt_rt = small_rt + large_rt;
+
+ if (pkt_rt < BNA_PKT_RATE_10K)
+ load = BNA_LOAD_T_LOW_4;
+ else if (pkt_rt < BNA_PKT_RATE_20K)
+ load = BNA_LOAD_T_LOW_3;
+ else if (pkt_rt < BNA_PKT_RATE_30K)
+ load = BNA_LOAD_T_LOW_2;
+ else if (pkt_rt < BNA_PKT_RATE_40K)
+ load = BNA_LOAD_T_LOW_1;
+ else if (pkt_rt < BNA_PKT_RATE_50K)
+ load = BNA_LOAD_T_HIGH_1;
+ else if (pkt_rt < BNA_PKT_RATE_60K)
+ load = BNA_LOAD_T_HIGH_2;
+ else if (pkt_rt < BNA_PKT_RATE_80K)
+ load = BNA_LOAD_T_HIGH_3;
+ else
+ load = BNA_LOAD_T_HIGH_4;
+
+ if (small_rt > (large_rt << 1))
+ bias = 0;
+ else
+ bias = 1;
+
+ ccb->pkt_rate.small_pkt_cnt = 0;
+ ccb->pkt_rate.large_pkt_cnt = 0;
+
+ coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
+ ccb->rx_coalescing_timeo = coalescing_timeo;
+
+ /* Set it to IB */
+ bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
+}
+
+const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 12},
+ {6, 10},
+ {5, 10},
+ {4, 8},
+ {3, 6},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+/**
+ * TX
+ */
+#define call_tx_stop_cbfn(tx) \
+do { \
+ if ((tx)->stop_cbfn) { \
+ void (*cbfn)(void *, struct bna_tx *); \
+ void *cbarg; \
+ cbfn = (tx)->stop_cbfn; \
+ cbarg = (tx)->stop_cbarg; \
+ (tx)->stop_cbfn = NULL; \
+ (tx)->stop_cbarg = NULL; \
+ cbfn(cbarg, (tx)); \
+ } \
+} while (0)
+
+#define call_tx_prio_change_cbfn(tx) \
+do { \
+ if ((tx)->prio_change_cbfn) { \
+ void (*cbfn)(struct bnad *, struct bna_tx *); \
+ cbfn = (tx)->prio_change_cbfn; \
+ (tx)->prio_change_cbfn = NULL; \
+ cbfn((tx)->bna->bnad, (tx)); \
+ } \
+} while (0)
+
+static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
+static void bna_bfi_tx_enet_start(struct bna_tx *tx);
+static void bna_tx_enet_stop(struct bna_tx *tx);
+
+enum bna_tx_event {
+ TX_E_START = 1,
+ TX_E_STOP = 2,
+ TX_E_FAIL = 3,
+ TX_E_STARTED = 4,
+ TX_E_STOPPED = 5,
+ TX_E_PRIO_CHANGE = 6,
+ TX_E_CLEANUP_DONE = 7,
+ TX_E_BW_UPDATE = 8,
+};
+
+bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
+ enum bna_tx_event);
+
+static void
+bna_tx_sm_stopped_entry(struct bna_tx *tx)
+{
+ call_tx_stop_cbfn(tx);
+}
+
+static void
+bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
+ break;
+
+ case TX_E_STOP:
+ call_tx_stop_cbfn(tx);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ call_tx_prio_change_cbfn(tx);
+ break;
+
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_start_wait_entry(struct bna_tx *tx)
+{
+ bna_bfi_tx_enet_start(tx);
+}
+
+static void
+bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_STARTED:
+ if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
+ tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
+ BNA_TX_F_BW_UPDATED);
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ } else
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ tx->flags |= BNA_TX_F_PRIO_CHANGED;
+ break;
+
+ case TX_E_BW_UPDATE:
+ tx->flags |= BNA_TX_F_BW_UPDATED;
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_started_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+ int is_regular = (tx->type == BNA_TX_T_REGULAR);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb->priority = txq->priority;
+ /* Start IB */
+ bna_ib_start(tx->bna, &txq->ib, is_regular);
+ }
+ tx->tx_resume_cbfn(tx->bna->bnad, tx);
+}
+
+static void
+bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
+ tx->tx_stall_cbfn(tx->bna->bnad, tx);
+ bna_tx_enet_stop(tx);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ tx->tx_stall_cbfn(tx->bna->bnad, tx);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_STOPPED:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+ break;
+
+ case TX_E_STARTED:
+ /**
+ * We are here due to start_wait -> stop_wait transition on
+ * TX_E_STOP event
+ */
+ bna_tx_enet_stop(tx);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
+{
+ tx->tx_stall_cbfn(tx->bna->bnad, tx);
+ bna_tx_enet_stop(tx);
+}
+
+static void
+bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ call_tx_prio_change_cbfn(tx);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+ break;
+
+ case TX_E_STOPPED:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
+{
+ call_tx_prio_change_cbfn(tx);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+}
+
+static void
+bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_failed_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
+ break;
+
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
+ break;
+
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_tx_enet_start(struct bna_tx *tx)
+{
+ struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
+ struct bna_txq *txq = NULL;
+ struct list_head *qe;
+ int i;
+
+ bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
+ cfg_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
+
+ cfg_req->num_queues = tx->num_txq;
+ for (i = 0, qe = bfa_q_first(&tx->txq_q);
+ i < tx->num_txq;
+ i++, qe = bfa_q_next(qe)) {
+ txq = (struct bna_txq *)qe;
+
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
+ cfg_req->q_cfg[i].q.priority = txq->priority;
+
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
+ txq->ib.ib_seg_host_addr.lsb;
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
+ txq->ib.ib_seg_host_addr.msb;
+ cfg_req->q_cfg[i].ib.intr.msix_index =
+ htons((u16)txq->ib.intr_vector);
+ }
+
+ cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
+ ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.coalescing_timeout =
+ htonl((u32)txq->ib.coalescing_timeo);
+ cfg_req->ib_cfg.inter_pkt_timeout =
+ htonl((u32)txq->ib.interpkt_timeo);
+ cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
+
+ cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
+ cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
+ cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
+ cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
+ bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
+}
+
+static void
+bna_bfi_tx_enet_stop(struct bna_tx *tx)
+{
+ struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
+ bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
+ &req->mh);
+ bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
+}
+
+static void
+bna_tx_enet_stop(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ /* Stop IB */
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(tx->bna, &txq->ib);
+ }
+
+ bna_bfi_tx_enet_stop(tx);
+}
+
+static void
+bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ txq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ txq->qpt.page_count = page_count;
+ txq->qpt.page_size = page_size;
+
+ txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < page_count; i++) {
+ txq->tcb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+ }
+}
+
+static struct bna_tx *
+bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct list_head *qe = NULL;
+ struct bna_tx *tx = NULL;
+
+ if (list_empty(&tx_mod->tx_free_q))
+ return NULL;
+ if (type == BNA_TX_T_REGULAR) {
+ bfa_q_deq(&tx_mod->tx_free_q, &qe);
+ } else {
+ bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
+ }
+ tx = (struct bna_tx *)qe;
+ bfa_q_qe_init(&tx->qe);
+ tx->type = type;
+
+ return tx;
+}
+
+static void
+bna_tx_free(struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
+ struct bna_txq *txq;
+ struct list_head *prev_qe;
+ struct list_head *qe;
+
+ while (!list_empty(&tx->txq_q)) {
+ bfa_q_deq(&tx->txq_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ txq->tcb = NULL;
+ txq->tx = NULL;
+ list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ if (qe == &tx->qe) {
+ list_del(&tx->qe);
+ bfa_q_qe_init(&tx->qe);
+ break;
+ }
+ }
+
+ tx->bna = NULL;
+ tx->priv = NULL;
+
+ prev_qe = NULL;
+ list_for_each(qe, &tx_mod->tx_free_q) {
+ if (((struct bna_tx *)qe)->rid < tx->rid)
+ prev_qe = qe;
+ else {
+ break;
+ }
+ }
+
+ if (prev_qe == NULL) {
+ /* This is the first entry */
+ bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
+ } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
+ /* This is the last entry */
+ list_add_tail(&tx->qe, &tx_mod->tx_free_q);
+ } else {
+ /* Somewhere in the middle */
+ bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
+ bfa_q_prev(&tx->qe) = prev_qe;
+ bfa_q_next(prev_qe) = &tx->qe;
+ bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
+ }
+}
+
+static void
+bna_tx_start(struct bna_tx *tx)
+{
+ tx->flags |= BNA_TX_F_ENET_STARTED;
+ if (tx->flags & BNA_TX_F_ENABLED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+static void
+bna_tx_stop(struct bna_tx *tx)
+{
+ tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
+ tx->stop_cbarg = &tx->bna->tx_mod;
+
+ tx->flags &= ~BNA_TX_F_ENET_STARTED;
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+static void
+bna_tx_fail(struct bna_tx *tx)
+{
+ tx->flags &= ~BNA_TX_F_ENET_STARTED;
+ bfa_fsm_send_event(tx, TX_E_FAIL);
+}
+
+void
+bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
+ struct bna_txq *txq = NULL;
+ struct list_head *qe;
+ int i;
+
+ bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
+ sizeof(struct bfi_enet_tx_cfg_rsp));
+
+ tx->hw_id = cfg_rsp->hw_id;
+
+ for (i = 0, qe = bfa_q_first(&tx->txq_q);
+ i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
+ txq = (struct bna_txq *)qe;
+
+ /* Setup doorbells */
+ txq->tcb->i_dbell->doorbell_addr =
+ tx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].i_dbell);
+ txq->tcb->q_dbell =
+ tx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].q_dbell);
+ txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
+
+ /* Initialize producer/consumer indexes */
+ (*txq->tcb->hw_consumer_index) = 0;
+ txq->tcb->producer_index = txq->tcb->consumer_index = 0;
+ }
+
+ bfa_fsm_send_event(tx, TX_E_STARTED);
+}
+
+void
+bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(tx, TX_E_STOPPED);
+}
+
+void
+bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
+ }
+}
+
+void
+bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
+{
+ u32 q_size;
+ u32 page_count;
+ struct bna_mem_info *mem_info;
+
+ res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_tcb);
+ mem_info->num = num_txq;
+
+ q_size = txq_depth * BFI_TXQ_WI_SIZE;
+ q_size = ALIGN(q_size, PAGE_SIZE);
+ page_count = q_size >> PAGE_SHIFT;
+
+ res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = page_count * sizeof(struct bna_dma_addr);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = page_count * sizeof(void *);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = num_txq * page_count;
+
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = BFI_IBIDX_SIZE;
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
+}
+
+struct bna_tx *
+bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ const struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv)
+{
+ struct bna_intr_info *intr_info;
+ struct bna_tx_mod *tx_mod = &bna->tx_mod;
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct list_head *qe;
+ int page_count;
+ int page_size;
+ int page_idx;
+ int i;
+
+ intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
+ tx_cfg->num_txq;
+ page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
+
+ /**
+ * Get resources
+ */
+
+ if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
+ return NULL;
+
+ /* Tx */
+
+ tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
+ if (!tx)
+ return NULL;
+ tx->bna = bna;
+ tx->priv = priv;
+
+ /* TxQs */
+
+ INIT_LIST_HEAD(&tx->txq_q);
+ for (i = 0; i < tx_cfg->num_txq; i++) {
+ if (list_empty(&tx_mod->txq_free_q))
+ goto err_return;
+
+ bfa_q_deq(&tx_mod->txq_free_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ list_add_tail(&txq->qe, &tx->txq_q);
+ txq->tx = tx;
+ }
+
+ /*
+ * Initialize
+ */
+
+ /* Tx */
+
+ tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
+ tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
+ tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
+ tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
+
+ list_add_tail(&tx->qe, &tx_mod->tx_active_q);
+
+ tx->num_txq = tx_cfg->num_txq;
+
+ tx->flags = 0;
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
+ switch (tx->type) {
+ case BNA_TX_T_REGULAR:
+ if (!(tx->bna->tx_mod.flags &
+ BNA_TX_MOD_F_ENET_LOOPBACK))
+ tx->flags |= BNA_TX_F_ENET_STARTED;
+ break;
+ case BNA_TX_T_LOOPBACK:
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
+ tx->flags |= BNA_TX_F_ENET_STARTED;
+ break;
+ }
+ }
+
+ /* TxQ */
+
+ i = 0;
+ page_idx = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb = (struct bna_tcb *)
+ res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
+ txq->tx_packets = 0;
+ txq->tx_bytes = 0;
+
+ /* IB */
+ txq->ib.ib_seg_host_addr.lsb =
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ txq->ib.ib_seg_host_addr.msb =
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+ txq->ib.ib_seg_host_addr_kva =
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ txq->ib.intr_type = intr_info->intr_type;
+ txq->ib.intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ if (intr_info->intr_type == BNA_INTR_T_INTX)
+ txq->ib.intr_vector = (1 << txq->ib.intr_vector);
+ txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
+ txq->ib.interpkt_timeo = 0; /* Not used */
+ txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
+
+ /* TCB */
+
+ txq->tcb->q_depth = tx_cfg->txq_depth;
+ txq->tcb->unmap_q = (void *)
+ res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
+ txq->tcb->hw_consumer_index =
+ (u32 *)txq->ib.ib_seg_host_addr_kva;
+ txq->tcb->i_dbell = &txq->ib.door_bell;
+ txq->tcb->intr_type = txq->ib.intr_type;
+ txq->tcb->intr_vector = txq->ib.intr_vector;
+ txq->tcb->txq = txq;
+ txq->tcb->bnad = bnad;
+ txq->tcb->id = i;
+
+ /* QPT, SWQPT, Pages */
+ bna_txq_qpt_setup(txq, page_count, page_size,
+ &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_PAGE].
+ res_u.mem_info.mdl[page_idx]);
+ txq->tcb->page_idx = page_idx;
+ txq->tcb->page_count = page_count;
+ page_idx += page_count;
+
+ /* Callback to bnad for setting up TCB */
+ if (tx->tcb_setup_cbfn)
+ (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
+
+ if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
+ txq->priority = txq->tcb->id;
+ else
+ txq->priority = tx_mod->default_prio;
+
+ i++;
+ }
+
+ tx->txf_vlan_id = 0;
+
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+
+ tx_mod->rid_mask |= (1 << tx->rid);
+
+ return tx;
+
+err_return:
+ bna_tx_free(tx);
+ return NULL;
+}
+
+void
+bna_tx_destroy(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ if (tx->tcb_destroy_cbfn)
+ (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
+ bna_tx_free(tx);
+}
+
+void
+bna_tx_enable(struct bna_tx *tx)
+{
+ if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ return;
+
+ tx->flags |= BNA_TX_F_ENABLED;
+
+ if (tx->flags & BNA_TX_F_ENET_STARTED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+void
+bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(tx->bna->bnad, tx);
+ return;
+ }
+
+ tx->stop_cbfn = cbfn;
+ tx->stop_cbarg = tx->bna->bnad;
+
+ tx->flags &= ~BNA_TX_F_ENABLED;
+
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+void
+bna_tx_cleanup_complete(struct bna_tx *tx)
+{
+ bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ bfa_wc_down(&tx_mod->tx_stop_wc);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped_all(void *arg)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ if (tx_mod->stop_cbfn)
+ tx_mod->stop_cbfn(&tx_mod->bna->enet);
+ tx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ tx_mod->bna = bna;
+ tx_mod->flags = 0;
+
+ tx_mod->tx = (struct bna_tx *)
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
+ tx_mod->txq = (struct bna_txq *)
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&tx_mod->tx_free_q);
+ INIT_LIST_HEAD(&tx_mod->tx_active_q);
+
+ INIT_LIST_HEAD(&tx_mod->txq_free_q);
+
+ for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
+ tx_mod->tx[i].rid = i;
+ bfa_q_qe_init(&tx_mod->tx[i].qe);
+ list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
+ bfa_q_qe_init(&tx_mod->txq[i].qe);
+ list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
+ }
+
+ tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
+ tx_mod->default_prio = 0;
+ tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
+ tx_mod->iscsi_prio = -1;
+}
+
+void
+bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->tx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->txq_free_q)
+ i++;
+
+ tx_mod->bna = NULL;
+}
+
+void
+bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
+ if (type == BNA_TX_T_LOOPBACK)
+ tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_start(tx);
+ }
+}
+
+void
+bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
+
+ tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
+
+ bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type) {
+ bfa_wc_up(&tx_mod->tx_stop_wc);
+ bna_tx_stop(tx);
+ }
+ }
+
+ bfa_wc_wait(&tx_mod->tx_stop_wc);
+}
+
+void
+bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_fail(tx);
+ }
+}
+
+void
+bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
new file mode 100644
index 00000000000..d090fbfb12f
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -0,0 +1,987 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_TYPES_H__
+#define __BNA_TYPES_H__
+
+#include "cna.h"
+#include "bna_hw_defs.h"
+#include "bfa_cee.h"
+#include "bfa_msgq.h"
+
+/**
+ *
+ * Forward declarations
+ *
+ */
+
+struct bna_mcam_handle;
+struct bna_txq;
+struct bna_tx;
+struct bna_rxq;
+struct bna_cq;
+struct bna_rx;
+struct bna_rxf;
+struct bna_enet;
+struct bna;
+struct bnad;
+
+/**
+ *
+ * Enums, primitive data types
+ *
+ */
+
+enum bna_status {
+ BNA_STATUS_T_DISABLED = 0,
+ BNA_STATUS_T_ENABLED = 1
+};
+
+enum bna_cleanup_type {
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
+};
+
+enum bna_cb_status {
+ BNA_CB_SUCCESS = 0,
+ BNA_CB_FAIL = 1,
+ BNA_CB_INTERRUPT = 2,
+ BNA_CB_BUSY = 3,
+ BNA_CB_INVALID_MAC = 4,
+ BNA_CB_MCAST_LIST_FULL = 5,
+ BNA_CB_UCAST_CAM_FULL = 6,
+ BNA_CB_WAITING = 7,
+ BNA_CB_NOT_EXEC = 8
+};
+
+enum bna_res_type {
+ BNA_RES_T_MEM = 1,
+ BNA_RES_T_INTR = 2
+};
+
+enum bna_mem_type {
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
+};
+
+enum bna_intr_type {
+ BNA_INTR_T_INTX = 1,
+ BNA_INTR_T_MSIX = 2
+};
+
+enum bna_res_req_type {
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_T_MAX
+};
+
+enum bna_mod_res_req_type {
+ BNA_MOD_RES_MEM_T_TX_ARRAY = 0,
+ BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1,
+ BNA_MOD_RES_MEM_T_RX_ARRAY = 2,
+ BNA_MOD_RES_MEM_T_RXP_ARRAY = 3,
+ BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4,
+ BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5,
+ BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6,
+ BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7,
+ BNA_MOD_RES_T_MAX
+};
+
+enum bna_tx_res_req_type {
+ BNA_TX_RES_MEM_T_TCB = 0,
+ BNA_TX_RES_MEM_T_UNMAPQ = 1,
+ BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_SWQPT = 3,
+ BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_MEM_T_IBIDX = 5,
+ BNA_TX_RES_INTR_T_TXCMPL = 6,
+ BNA_TX_RES_T_MAX,
+};
+
+enum bna_rx_mem_type {
+ BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
+ BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
+ BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
+ BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
+ BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
+ BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
+ BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
+ BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_IBIDX = 12,
+ BNA_RX_RES_MEM_T_RIT = 13,
+ BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
+ BNA_RX_RES_T_MAX = 15
+};
+
+enum bna_tx_type {
+ BNA_TX_T_REGULAR = 0,
+ BNA_TX_T_LOOPBACK = 1,
+};
+
+enum bna_tx_flags {
+ BNA_TX_F_ENET_STARTED = 1,
+ BNA_TX_F_ENABLED = 2,
+ BNA_TX_F_PRIO_CHANGED = 4,
+ BNA_TX_F_BW_UPDATED = 8,
+};
+
+enum bna_tx_mod_flags {
+ BNA_TX_MOD_F_ENET_STARTED = 1,
+ BNA_TX_MOD_F_ENET_LOOPBACK = 2,
+};
+
+enum bna_rx_type {
+ BNA_RX_T_REGULAR = 0,
+ BNA_RX_T_LOOPBACK = 1,
+};
+
+enum bna_rxp_type {
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
+};
+
+enum bna_rxmode {
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_DEFAULT = 2,
+ BNA_RXMODE_ALLMULTI = 4
+};
+
+enum bna_rx_event {
+ RX_E_START = 1,
+ RX_E_STOP = 2,
+ RX_E_FAIL = 3,
+ RX_E_STARTED = 4,
+ RX_E_STOPPED = 5,
+ RX_E_RXF_STARTED = 6,
+ RX_E_RXF_STOPPED = 7,
+ RX_E_CLEANUP_DONE = 8,
+};
+
+enum bna_rx_flags {
+ BNA_RX_F_ENET_STARTED = 1,
+ BNA_RX_F_ENABLED = 2,
+};
+
+enum bna_rx_mod_flags {
+ BNA_RX_MOD_F_ENET_STARTED = 1,
+ BNA_RX_MOD_F_ENET_LOOPBACK = 2,
+};
+
+enum bna_rxf_flags {
+ BNA_RXF_F_PAUSED = 1,
+};
+
+enum bna_rxf_event {
+ RXF_E_START = 1,
+ RXF_E_STOP = 2,
+ RXF_E_FAIL = 3,
+ RXF_E_CONFIG = 4,
+ RXF_E_PAUSE = 5,
+ RXF_E_RESUME = 6,
+ RXF_E_FW_RESP = 7,
+};
+
+enum bna_enet_type {
+ BNA_ENET_T_REGULAR = 0,
+ BNA_ENET_T_LOOPBACK_INTERNAL = 1,
+ BNA_ENET_T_LOOPBACK_EXTERNAL = 2,
+};
+
+enum bna_link_status {
+ BNA_LINK_DOWN = 0,
+ BNA_LINK_UP = 1,
+ BNA_CEE_UP = 2
+};
+
+enum bna_ethport_flags {
+ BNA_ETHPORT_F_ADMIN_UP = 1,
+ BNA_ETHPORT_F_PORT_ENABLED = 2,
+ BNA_ETHPORT_F_RX_STARTED = 4,
+};
+
+enum bna_enet_flags {
+ BNA_ENET_F_IOCETH_READY = 1,
+ BNA_ENET_F_ENABLED = 2,
+ BNA_ENET_F_PAUSE_CHANGED = 4,
+ BNA_ENET_F_MTU_CHANGED = 8
+};
+
+enum bna_rss_flags {
+ BNA_RSS_F_RIT_PENDING = 1,
+ BNA_RSS_F_CFG_PENDING = 2,
+ BNA_RSS_F_STATUS_PENDING = 4,
+};
+
+enum bna_mod_flags {
+ BNA_MOD_F_INIT_DONE = 1,
+};
+
+enum bna_pkt_rates {
+ BNA_PKT_RATE_10K = 10000,
+ BNA_PKT_RATE_20K = 20000,
+ BNA_PKT_RATE_30K = 30000,
+ BNA_PKT_RATE_40K = 40000,
+ BNA_PKT_RATE_50K = 50000,
+ BNA_PKT_RATE_60K = 60000,
+ BNA_PKT_RATE_70K = 70000,
+ BNA_PKT_RATE_80K = 80000,
+};
+
+enum bna_dim_load_types {
+ BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
+ BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
+ BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
+ BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
+ BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
+ BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
+ BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
+ BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
+ BNA_LOAD_T_MAX = 8
+};
+
+enum bna_dim_bias_types {
+ BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
+ BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
+ BNA_BIAS_T_MAX = 2
+};
+
+#define BNA_MAX_NAME_SIZE 64
+struct bna_ident {
+ int id;
+ char name[BNA_MAX_NAME_SIZE];
+};
+
+struct bna_mac {
+ /* This should be the first one */
+ struct list_head qe;
+ u8 addr[ETH_ALEN];
+ struct bna_mcam_handle *handle;
+};
+
+struct bna_mem_descr {
+ u32 len;
+ void *kva;
+ struct bna_dma_addr dma;
+};
+
+struct bna_mem_info {
+ enum bna_mem_type mem_type;
+ u32 len;
+ u32 num;
+ u32 align_sz; /* 0/1 = no alignment */
+ struct bna_mem_descr *mdl;
+ void *cookie; /* For bnad to unmap dma later */
+};
+
+struct bna_intr_descr {
+ int vector;
+};
+
+struct bna_intr_info {
+ enum bna_intr_type intr_type;
+ int num;
+ struct bna_intr_descr *idl;
+};
+
+union bna_res_u {
+ struct bna_mem_info mem_info;
+ struct bna_intr_info intr_info;
+};
+
+struct bna_res_info {
+ enum bna_res_type res_type;
+ union bna_res_u res_u;
+};
+
+/* HW QPT */
+struct bna_qpt {
+ struct bna_dma_addr hw_qpt_ptr;
+ void *kv_qpt_ptr;
+ u32 page_count;
+ u32 page_size;
+};
+
+struct bna_attr {
+ bool fw_query_complete;
+ int num_txq;
+ int num_rxp;
+ int num_ucmac;
+ int num_mcmac;
+ int max_rit_size;
+};
+
+/**
+ *
+ * IOCEth
+ *
+ */
+
+struct bna_ioceth {
+ bfa_fsm_t fsm;
+ struct bfa_ioc ioc;
+
+ struct bna_attr attr;
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ struct bfi_enet_attr_req attr_req;
+
+ void (*stop_cbfn)(struct bnad *bnad);
+ struct bnad *stop_cbarg;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Enet
+ *
+ */
+
+/* Pause configuration */
+struct bna_pause_config {
+ enum bna_status tx_pause;
+ enum bna_status rx_pause;
+};
+
+struct bna_enet {
+ bfa_fsm_t fsm;
+ enum bna_enet_flags flags;
+
+ enum bna_enet_type type;
+
+ struct bna_pause_config pause_config;
+ int mtu;
+
+ /* Callback for bna_enet_disable(), enet_stop() */
+ void (*stop_cbfn)(void *);
+ void *stop_cbarg;
+
+ /* Callback for bna_enet_pause_config() */
+ void (*pause_cbfn)(struct bnad *);
+
+ /* Callback for bna_enet_mtu_set() */
+ void (*mtu_cbfn)(struct bnad *);
+
+ struct bfa_wc chld_stop_wc;
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ struct bfi_enet_set_pause_req pause_req;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Ethport
+ *
+ */
+
+struct bna_ethport {
+ bfa_fsm_t fsm;
+ enum bna_ethport_flags flags;
+
+ enum bna_link_status link_status;
+
+ int rx_started_count;
+
+ void (*stop_cbfn)(struct bna_enet *);
+
+ void (*adminup_cbfn)(struct bnad *, enum bna_cb_status);
+
+ void (*link_cbfn)(struct bnad *, enum bna_link_status);
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_enable_req admin_req;
+ struct bfi_enet_diag_lb_req lpbk_req;
+ } bfi_enet_cmd;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Interrupt Block
+ *
+ */
+
+/* Doorbell structure */
+struct bna_ib_dbell {
+ void *__iomem doorbell_addr;
+ u32 doorbell_ack;
+};
+
+/* IB structure */
+struct bna_ib {
+ struct bna_dma_addr ib_seg_host_addr;
+ void *ib_seg_host_addr_kva;
+
+ struct bna_ib_dbell door_bell;
+
+ enum bna_intr_type intr_type;
+ int intr_vector;
+
+ u8 coalescing_timeo; /* Unit is 5usec. */
+
+ int interpkt_count;
+ int interpkt_timeo;
+};
+
+/**
+ *
+ * Tx object
+ *
+ */
+
+/* Tx datapath control structure */
+#define BNA_Q_NAME_SIZE 16
+struct bna_tcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ volatile u32 *hw_consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ struct bna_ib_dbell *i_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_txq *txq;
+ struct bnad *bnad;
+ void *priv; /* BNAD's cookie */
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 priority; /* Current priority */
+ unsigned long flags; /* Used by bnad as required */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* TxQ QPT and configuration */
+struct bna_txq {
+ /* This should be the first one */
+ struct list_head qe;
+
+ u8 priority;
+
+ struct bna_qpt qpt;
+ struct bna_tcb *tcb;
+ struct bna_ib ib;
+
+ struct bna_tx *tx;
+
+ int hw_id;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* Tx object */
+struct bna_tx {
+ /* This should be the first one */
+ struct list_head qe;
+ int rid;
+ int hw_id;
+
+ bfa_fsm_t fsm;
+ enum bna_tx_flags flags;
+
+ enum bna_tx_type type;
+ int num_txq;
+
+ struct list_head txq_q;
+ u16 txf_vlan_id;
+
+ /* Tx event handlers */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
+
+ /* callback for bna_tx_disable(), bna_tx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_tx *tx);
+ void *stop_cbarg;
+
+ /* callback for bna_tx_prio_set() */
+ void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_tx_cfg_req cfg_req;
+ struct bfi_enet_req req;
+ struct bfi_enet_tx_cfg_rsp cfg_rsp;
+ } bfi_enet_cmd;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+/* Tx object configuration used during creation */
+struct bna_tx_config {
+ int num_txq;
+ int txq_depth;
+ int coalescing_timeo;
+ enum bna_tx_type tx_type;
+};
+
+struct bna_tx_event_cbfn {
+ /* Optional */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ /* Mandatory */
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
+};
+
+/* Tx module - keeps track of free, active tx objects */
+struct bna_tx_mod {
+ struct bna_tx *tx; /* BFI_MAX_TXQ entries */
+ struct bna_txq *txq; /* BFI_MAX_TXQ entries */
+
+ struct list_head tx_free_q;
+ struct list_head tx_active_q;
+
+ struct list_head txq_free_q;
+
+ /* callback for bna_tx_mod_stop() */
+ void (*stop_cbfn)(struct bna_enet *enet);
+
+ struct bfa_wc tx_stop_wc;
+
+ enum bna_tx_mod_flags flags;
+
+ u8 prio_map;
+ int default_prio;
+ int iscsi_over_cee;
+ int iscsi_prio;
+ int prio_reconfigured;
+
+ u32 rid_mask;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Rx object
+ *
+ */
+
+/* Rx datapath control structure */
+struct bna_rcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_rxq *rxq;
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ void *priv; /* BNAD's cookie */
+ unsigned long flags;
+ int id;
+};
+
+/* RxQ structure - QPT, configuration */
+struct bna_rxq {
+ struct list_head qe;
+
+ int buffer_size;
+ int q_depth;
+
+ struct bna_qpt qpt;
+ struct bna_rcb *rcb;
+
+ struct bna_rxp *rxp;
+ struct bna_rx *rx;
+
+ int hw_id;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
+};
+
+/* RxQ pair */
+union bna_rxq_u {
+ struct {
+ struct bna_rxq *hdr;
+ struct bna_rxq *data;
+ } hds;
+ struct {
+ struct bna_rxq *small;
+ struct bna_rxq *large;
+ } slr;
+ struct {
+ struct bna_rxq *only;
+ struct bna_rxq *reserved;
+ } single;
+};
+
+/* Packet rate for Dynamic Interrupt Moderation */
+struct bna_pkt_rate {
+ u32 small_pkt_cnt;
+ u32 large_pkt_cnt;
+};
+
+/* Completion control structure */
+struct bna_ccb {
+ /* Fast path */
+ void **sw_qpt;
+ u32 producer_index;
+ volatile u32 *hw_producer_index;
+ u32 q_depth;
+ struct bna_ib_dbell *i_dbell;
+ struct bna_rcb *rcb[2];
+ void *ctrl; /* For bnad */
+ struct bna_pkt_rate pkt_rate;
+ int page_idx;
+ int page_count;
+
+ /* Control path */
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ void *priv; /* BNAD's cookie */
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 rx_coalescing_timeo; /* For NAPI */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* CQ QPT, configuration */
+struct bna_cq {
+ struct bna_qpt qpt;
+ struct bna_ccb *ccb;
+
+ struct bna_ib ib;
+
+ struct bna_rx *rx;
+};
+
+struct bna_rss_config {
+ enum bfi_enet_rss_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN];
+};
+
+struct bna_hds_config {
+ enum bfi_enet_hds_type hdr_type;
+ int forced_offset;
+};
+
+/* Rx object configuration used during creation */
+struct bna_rx_config {
+ enum bna_rx_type rx_type;
+ int num_paths;
+ enum bna_rxp_type rxp_type;
+ int paused;
+ int q_depth;
+ int coalescing_timeo;
+ /*
+ * Small/Large (or Header/Data) buffer size to be configured
+ * for SLR and HDS queue type. Large buffer size comes from
+ * enet->mtu.
+ */
+ int small_buff_size;
+
+ enum bna_status rss_status;
+ struct bna_rss_config rss_config;
+
+ struct bna_hds_config hds_config;
+
+ enum bna_status vlan_strip_status;
+};
+
+/* Rx Path structure - one per MSIX vector/CPU */
+struct bna_rxp {
+ /* This should be the first one */
+ struct list_head qe;
+
+ enum bna_rxp_type type;
+ union bna_rxq_u rxq;
+ struct bna_cq cq;
+
+ struct bna_rx *rx;
+
+ /* MSI-x vector number for configuring RSS */
+ int vector;
+ int hw_id;
+};
+
+/* RxF structure (hardware Rx Function) */
+struct bna_rxf {
+ bfa_fsm_t fsm;
+ enum bna_rxf_flags flags;
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_enable_req req;
+ struct bfi_enet_rss_cfg_req rss_req;
+ struct bfi_enet_rit_req rit_req;
+ struct bfi_enet_rx_vlan_req vlan_req;
+ struct bfi_enet_mcast_add_req mcast_add_req;
+ struct bfi_enet_mcast_del_req mcast_del_req;
+ struct bfi_enet_ucast_req ucast_req;
+ } bfi_enet_cmd;
+
+ /* callback for bna_rxf_start() */
+ void (*start_cbfn) (struct bna_rx *rx);
+ struct bna_rx *start_cbarg;
+
+ /* callback for bna_rxf_stop() */
+ void (*stop_cbfn) (struct bna_rx *rx);
+ struct bna_rx *stop_cbarg;
+
+ /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
+ void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
+ struct bnad *oper_state_cbarg;
+
+ /**
+ * callback for:
+ * bna_rxf_ucast_set()
+ * bna_rxf_{ucast/mcast}_add(),
+ * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_mode_set()
+ */
+ void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx);
+ struct bnad *cam_fltr_cbarg;
+
+ /* List of unicast addresses yet to be applied to h/w */
+ struct list_head ucast_pending_add_q;
+ struct list_head ucast_pending_del_q;
+ struct bna_mac *ucast_pending_mac;
+ int ucast_pending_set;
+ /* ucast addresses applied to the h/w */
+ struct list_head ucast_active_q;
+ struct bna_mac ucast_active_mac;
+ int ucast_active_set;
+
+ /* List of multicast addresses yet to be applied to h/w */
+ struct list_head mcast_pending_add_q;
+ struct list_head mcast_pending_del_q;
+ /* multicast addresses applied to the h/w */
+ struct list_head mcast_active_q;
+ struct list_head mcast_handle_q;
+
+ /* Rx modes yet to be applied to h/w */
+ enum bna_rxmode rxmode_pending;
+ enum bna_rxmode rxmode_pending_bitmask;
+ /* Rx modes applied to h/w */
+ enum bna_rxmode rxmode_active;
+
+ u8 vlan_pending_bitmask;
+ enum bna_status vlan_filter_status;
+ u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32];
+ bool vlan_strip_pending;
+ enum bna_status vlan_strip_status;
+
+ enum bna_rss_flags rss_pending;
+ enum bna_status rss_status;
+ struct bna_rss_config rss_cfg;
+ u8 *rit;
+ int rit_size;
+
+ struct bna_rx *rx;
+};
+
+/* Rx object */
+struct bna_rx {
+ /* This should be the first one */
+ struct list_head qe;
+ int rid;
+ int hw_id;
+
+ bfa_fsm_t fsm;
+
+ enum bna_rx_type type;
+
+ int num_paths;
+ struct list_head rxp_q;
+
+ struct bna_hds_config hds_cfg;
+
+ struct bna_rxf rxf;
+
+ enum bna_rx_flags rx_flags;
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_rx_cfg_req cfg_req;
+ struct bfi_enet_req req;
+ struct bfi_enet_rx_cfg_rsp cfg_rsp;
+ } bfi_enet_cmd;
+
+ /* Rx event handlers */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
+
+ /* callback for bna_rx_disable(), bna_rx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_rx *rx);
+ void *stop_cbarg;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_rx_event_cbfn {
+ /* Optional */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
+ /* Mandatory */
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
+};
+
+/* Rx module - keeps track of free, active rx objects */
+struct bna_rx_mod {
+ struct bna *bna; /* back pointer to parent */
+ struct bna_rx *rx; /* BFI_MAX_RXQ entries */
+ struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
+ struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
+
+ struct list_head rx_free_q;
+ struct list_head rx_active_q;
+ int rx_free_count;
+
+ struct list_head rxp_free_q;
+ int rxp_free_count;
+
+ struct list_head rxq_free_q;
+ int rxq_free_count;
+
+ enum bna_rx_mod_flags flags;
+
+ /* callback for bna_rx_mod_stop() */
+ void (*stop_cbfn)(struct bna_enet *enet);
+
+ struct bfa_wc rx_stop_wc;
+ u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
+ u32 rid_mask;
+};
+
+/**
+ *
+ * CAM
+ *
+ */
+
+struct bna_ucam_mod {
+ struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+struct bna_mcam_handle {
+ /* This should be the first one */
+ struct list_head qe;
+ int handle;
+ int refcnt;
+};
+
+struct bna_mcam_mod {
+ struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
+ struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct list_head free_q;
+ struct list_head free_handle_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Statistics
+ *
+ */
+
+struct bna_stats {
+ struct bna_dma_addr hw_stats_dma;
+ struct bfi_enet_stats *hw_stats_kva;
+ struct bfi_enet_stats hw_stats;
+};
+
+struct bna_stats_mod {
+ bool ioc_ready;
+ bool stats_get_busy;
+ bool stats_clr_busy;
+ struct bfa_msgq_cmd_entry stats_get_cmd;
+ struct bfa_msgq_cmd_entry stats_clr_cmd;
+ struct bfi_enet_stats_req stats_get;
+ struct bfi_enet_stats_req stats_clr;
+};
+
+/**
+ *
+ * BNA
+ *
+ */
+
+struct bna {
+ struct bna_ident ident;
+ struct bfa_pcidev pcidev;
+
+ struct bna_reg regs;
+ struct bna_bit_defn bits;
+
+ struct bna_stats stats;
+
+ struct bna_ioceth ioceth;
+ struct bfa_cee cee;
+ struct bfa_msgq msgq;
+
+ struct bna_ethport ethport;
+ struct bna_enet enet;
+ struct bna_stats_mod stats_mod;
+
+ struct bna_tx_mod tx_mod;
+ struct bna_rx_mod rx_mod;
+ struct bna_ucam_mod ucam_mod;
+ struct bna_mcam_mod mcam_mod;
+
+ enum bna_mod_flags mod_flags;
+
+ int default_mode_rid;
+ int promisc_rid;
+
+ struct bnad *bnad;
+};
+#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 8e35b2596f9..5d7872ecff5 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -102,6 +102,28 @@ bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
}
}
+static u32
+bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
+ u32 index, u32 depth, struct sk_buff *skb, u32 frag)
+{
+ int j;
+ array[index].skb = NULL;
+
+ dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+
+ for (j = 0; j < frag; j++) {
+ dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+ }
+
+ return index;
+}
+
/*
* Frees all pending Tx Bufs
* At this point no activity is expected on the Q,
@@ -115,39 +137,20 @@ bnad_free_all_txbufs(struct bnad *bnad,
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb = NULL;
- int i;
+ int q;
unmap_array = unmap_q->unmap_array;
- unmap_cons = 0;
- while (unmap_cons < unmap_q->q_depth) {
- skb = unmap_array[unmap_cons].skb;
- if (!skb) {
- unmap_cons++;
+ for (q = 0; q < unmap_q->q_depth; q++) {
+ skb = unmap_array[q].skb;
+ if (!skb)
continue;
- }
- unmap_array[unmap_cons].skb = NULL;
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr), skb_headlen(skb),
- DMA_TO_DEVICE);
-
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- if (++unmap_cons >= unmap_q->q_depth)
- break;
+ unmap_cons = q;
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- dma_unmap_page(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
- 0);
- if (++unmap_cons >= unmap_q->q_depth)
- break;
- }
dev_kfree_skb_any(skb);
}
}
@@ -164,12 +167,11 @@ static u32
bnad_free_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 sent_packets = 0, sent_bytes = 0;
- u16 wis, unmap_cons, updated_hw_cons;
+ u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
+ u16 wis, updated_hw_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- int i;
/*
* Just return if TX is stopped. This check is useful
@@ -195,32 +197,14 @@ bnad_free_txbufs(struct bnad *bnad,
while (wis) {
skb = unmap_array[unmap_cons].skb;
- unmap_array[unmap_cons].skb = NULL;
-
sent_packets++;
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr), skb_headlen(skb),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
-
- prefetch(&unmap_array[unmap_cons + 1]);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- prefetch(&unmap_array[unmap_cons + 1]);
-
- dma_unmap_page(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
- 0);
- BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
- }
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+
dev_kfree_skb_any(skb);
}
@@ -383,14 +367,14 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
while (to_alloc--) {
- if (!wi_range) {
+ if (!wi_range)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
wi_range);
- }
skb = netdev_alloc_skb_ip_align(bnad->netdev,
rcb->rxq->buffer_size);
if (unlikely(!skb)) {
BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ rcb->rxq->rxbuf_alloc_failed++;
goto finishing;
}
unmap_array[unmap_prod].skb = skb;
@@ -412,7 +396,7 @@ finishing:
unmap_q->producer_index = unmap_prod;
rcb->producer_index = unmap_prod;
smp_mb();
- if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
+ if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
bna_rxq_prod_indx_doorbell(rcb);
}
}
@@ -441,11 +425,15 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
u32 flags, unmap_cons;
- u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+
+ set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
- if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
+ if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
+ clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
return 0;
+ }
prefetch(bnad->netdev);
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -455,10 +443,10 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
packets++;
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
- if (qid0 == cmpl->rxq_id)
- rcb = ccb->rcb[0];
- else
+ if (bna_is_small_rxq(cmpl->rxq_id))
rcb = ccb->rcb[1];
+ else
+ rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
unmap_array = unmap_q->unmap_array;
@@ -518,12 +506,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- struct bnad_rx_ctrl *rx_ctrl;
-
- rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ctrl->napi, skb);
- } else {
+ else {
netif_receive_skb(skb);
}
@@ -534,39 +519,16 @@ next:
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
- if (likely(ccb)) {
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- bna_ib_ack(ccb->i_dbell, packets);
- bnad_refill_rxq(bnad, ccb->rcb[0]);
- if (ccb->rcb[1])
- bnad_refill_rxq(bnad, ccb->rcb[1]);
- } else {
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- bna_ib_ack(ccb->i_dbell, 0);
- }
-
- return packets;
-}
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ bna_ib_ack_disable_irq(ccb->i_dbell, packets);
-static void
-bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- return;
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
- bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
- bna_ib_ack(ccb->i_dbell, 0);
-}
+ clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
-static void
-bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- unsigned long flags;
-
- /* Because of polling context */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bnad_enable_rx_irq_unsafe(ccb);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return packets;
}
static void
@@ -576,10 +538,9 @@ bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
struct napi_struct *napi = &rx_ctrl->napi;
if (likely(napi_schedule_prep(napi))) {
- bnad_disable_rx_irq(bnad, ccb);
__napi_schedule(napi);
+ rx_ctrl->rx_schedule++;
}
- BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
}
/* MSIX Rx Path Handler */
@@ -587,9 +548,11 @@ static irqreturn_t
bnad_msix_rx(int irq, void *data)
{
struct bna_ccb *ccb = (struct bna_ccb *)data;
- struct bnad *bnad = ccb->bnad;
- bnad_netif_rx_schedule_poll(bnad, ccb);
+ if (ccb) {
+ ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+ bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
+ }
return IRQ_HANDLED;
}
@@ -604,14 +567,15 @@ bnad_msix_mbox_handler(int irq, void *data)
unsigned long flags;
struct bnad *bnad = (struct bnad *)data;
- if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
- return IRQ_HANDLED;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_HANDLED;
+ }
bna_intr_status_get(&bnad->bna, intr_status);
- if (BNA_IS_MBOX_ERR_INTR(intr_status))
+ if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
bna_mbox_handler(&bnad->bna, intr_status);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -628,18 +592,22 @@ bnad_isr(int irq, void *data)
struct bnad *bnad = (struct bnad *)data;
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
+ struct bna_tcb *tcb = NULL;
- if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
return IRQ_NONE;
+ }
bna_intr_status_get(&bnad->bna, intr_status);
- if (unlikely(!intr_status))
+ if (unlikely(!intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
return IRQ_NONE;
+ }
- spin_lock_irqsave(&bnad->bna_lock, flags);
-
- if (BNA_IS_MBOX_ERR_INTR(intr_status))
+ if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
bna_mbox_handler(&bnad->bna, intr_status);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -650,8 +618,11 @@ bnad_isr(int irq, void *data)
/* Process data interrupts */
/* Tx processing */
for (i = 0; i < bnad->num_tx; i++) {
- for (j = 0; j < bnad->num_txq_per_tx; j++)
- bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ }
}
/* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
@@ -706,43 +677,49 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
/* Callbacks */
void
-bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
+bnad_cb_mbox_intr_enable(struct bnad *bnad)
{
bnad_enable_mbox_irq(bnad);
}
void
-bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
+bnad_cb_mbox_intr_disable(struct bnad *bnad)
{
bnad_disable_mbox_irq(bnad);
}
void
-bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
+bnad_cb_ioceth_ready(struct bnad *bnad)
{
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
complete(&bnad->bnad_completions.ioc_comp);
- bnad->bnad_completions.ioc_comp_status = status;
}
void
-bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
+bnad_cb_ioceth_failed(struct bnad *bnad)
{
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
+ complete(&bnad->bnad_completions.ioc_comp);
+}
+
+void
+bnad_cb_ioceth_disabled(struct bnad *bnad)
+{
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
complete(&bnad->bnad_completions.ioc_comp);
- bnad->bnad_completions.ioc_comp_status = status;
}
static void
-bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
+bnad_cb_enet_disabled(void *arg)
{
struct bnad *bnad = (struct bnad *)arg;
- complete(&bnad->bnad_completions.port_comp);
-
netif_carrier_off(bnad->netdev);
+ complete(&bnad->bnad_completions.enet_comp);
}
void
-bnad_cb_port_link_status(struct bnad *bnad,
+bnad_cb_ethport_link_status(struct bnad *bnad,
enum bna_link_status link_status)
{
bool link_up = 0;
@@ -750,34 +727,60 @@ bnad_cb_port_link_status(struct bnad *bnad,
link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
if (link_status == BNA_CEE_UP) {
+ if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
+ BNAD_UPDATE_CTR(bnad, cee_toggle);
set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
- BNAD_UPDATE_CTR(bnad, cee_up);
- } else
+ } else {
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
+ BNAD_UPDATE_CTR(bnad, cee_toggle);
clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+ }
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
- struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
- if (!tcb)
- return;
- pr_warn("bna: %s link up\n",
+ uint tx_id, tcb_id;
+ printk(KERN_WARNING "bna: %s link up\n",
bnad->netdev->name);
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
- if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
- /* Force an immediate Transmit Schedule */
- pr_info("bna: %s TX_STARTED\n",
- bnad->netdev->name);
- netif_wake_queue(bnad->netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
- } else {
- netif_stop_queue(bnad->netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
+ for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
+ tcb_id++) {
+ struct bna_tcb *tcb =
+ bnad->tx_info[tx_id].tcb[tcb_id];
+ u32 txq_id;
+ if (!tcb)
+ continue;
+
+ txq_id = tcb->id;
+
+ if (test_bit(BNAD_TXQ_TX_STARTED,
+ &tcb->flags)) {
+ /*
+ * Force an immediate
+ * Transmit Schedule */
+ printk(KERN_INFO "bna: %s %d "
+ "TXQ_STARTED\n",
+ bnad->netdev->name,
+ txq_id);
+ netif_wake_subqueue(
+ bnad->netdev,
+ txq_id);
+ BNAD_UPDATE_CTR(bnad,
+ netif_queue_wakeup);
+ } else {
+ netif_stop_subqueue(
+ bnad->netdev,
+ txq_id);
+ BNAD_UPDATE_CTR(bnad,
+ netif_queue_stop);
+ }
+ }
}
}
} else {
if (netif_carrier_ok(bnad->netdev)) {
- pr_warn("bna: %s link down\n",
+ printk(KERN_WARNING "bna: %s link down\n",
bnad->netdev->name);
netif_carrier_off(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
@@ -786,8 +789,7 @@ bnad_cb_port_link_status(struct bnad *bnad,
}
static void
-bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
- enum bna_cb_status status)
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
{
struct bnad *bnad = (struct bnad *)arg;
@@ -864,108 +866,188 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
}
static void
-bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
{
struct bnad_tx_info *tx_info =
- (struct bnad_tx_info *)tcb->txq->tx->priv;
-
- if (tx_info != &bnad->tx_info[0])
- return;
+ (struct bnad_tx_info *)tx->priv;
+ struct bna_tcb *tcb;
+ u32 txq_id;
+ int i;
- clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
- netif_stop_queue(bnad->netdev);
- pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+ tcb = tx_info->tcb[i];
+ if (!tcb)
+ continue;
+ txq_id = tcb->id;
+ clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+ netif_stop_subqueue(bnad->netdev, txq_id);
+ printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
+ bnad->netdev->name, txq_id);
+ }
}
static void
-bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bna_tcb *tcb;
+ struct bnad_unmap_q *unmap_q;
+ u32 txq_id;
+ int i;
- if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
- return;
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+ tcb = tx_info->tcb[i];
+ if (!tcb)
+ continue;
+ txq_id = tcb->id;
- clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
+ unmap_q = tcb->unmap_q;
- while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
- cpu_relax();
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ continue;
- bnad_free_all_txbufs(bnad, tcb);
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ cpu_relax();
- unmap_q->producer_index = 0;
- unmap_q->consumer_index = 0;
+ bnad_free_all_txbufs(bnad, tcb);
- smp_mb__before_clear_bit();
- clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+
+ if (netif_carrier_ok(bnad->netdev)) {
+ printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
+ bnad->netdev->name, txq_id);
+ netif_wake_subqueue(bnad->netdev, txq_id);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
/*
- * Workaround for first device enable failure & we
+ * Workaround for first ioceth enable failure & we
* get a 0 MAC address. We try to get the MAC address
* again here.
*/
if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
- bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
+ bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
}
+}
- set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+static void
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
+{
+ struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bna_tcb *tcb;
+ int i;
- if (netif_carrier_ok(bnad->netdev)) {
- pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
- netif_wake_queue(bnad->netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+ tcb = tx_info->tcb[i];
+ if (!tcb)
+ continue;
}
+
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
+ bna_tx_cleanup_complete(tx);
}
static void
-bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
{
- /* Delay only once for the whole Tx Path Shutdown */
- if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
- mdelay(BNAD_TXRX_SYNC_MDELAY);
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
+
+ clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1])
+ clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
+ }
}
static void
-bnad_cb_rx_cleanup(struct bnad *bnad,
- struct bna_ccb *ccb)
+bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
{
- clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
- if (ccb->rcb[1])
- clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
+
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
+
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1])
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
- if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
- mdelay(BNAD_TXRX_SYNC_MDELAY);
+ while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
+ cpu_relax();
+ }
+
+ bna_rx_cleanup_complete(rx);
}
static void
-bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_unmap_q *unmap_q = rcb->unmap_q;
-
- clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
-
- if (rcb == rcb->cq->ccb->rcb[0])
- bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bna_rcb *rcb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ struct bnad_unmap_q *unmap_q;
+ int i;
+ int j;
- bnad_free_all_rxbufs(bnad, rcb);
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
- set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+ bnad_cq_cmpl_init(bnad, ccb);
- /* Now allocate & post buffers for this RCB */
- /* !!Allocation in callback context */
- if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
- if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
- >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
- bnad_alloc_n_post_rxbufs(bnad, rcb);
- smp_mb__before_clear_bit();
- clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
+ rcb = ccb->rcb[j];
+ if (!rcb)
+ continue;
+ bnad_free_all_rxbufs(bnad, rcb);
+
+ set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+ set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
+ unmap_q = rcb->unmap_q;
+
+ /* Now allocate & post buffers for this RCB */
+ /* !!Allocation in callback context */
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+ }
}
}
static void
-bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
- enum bna_cb_status status)
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
{
struct bnad *bnad = (struct bnad *)arg;
@@ -973,10 +1055,9 @@ bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
}
static void
-bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
- enum bna_cb_status status)
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
{
- bnad->bnad_completions.mcast_comp_status = status;
+ bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
complete(&bnad->bnad_completions.mcast_comp);
}
@@ -995,6 +1076,13 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
}
+static void
+bnad_cb_enet_mtu_set(struct bnad *bnad)
+{
+ bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
+ complete(&bnad->bnad_completions.mtu_comp);
+}
+
/* Resource allocation, free functions */
static void
@@ -1073,23 +1161,17 @@ err_return:
/* Free IRQ for Mailbox */
static void
-bnad_mbox_irq_free(struct bnad *bnad,
- struct bna_intr_info *intr_info)
+bnad_mbox_irq_free(struct bnad *bnad)
{
int irq;
unsigned long flags;
- if (intr_info->idl == NULL)
- return;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_disable_mbox_irq(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
irq = BNAD_GET_MBOX_IRQ(bnad);
free_irq(irq, bnad);
-
- kfree(intr_info->idl);
}
/*
@@ -1098,32 +1180,22 @@ bnad_mbox_irq_free(struct bnad *bnad,
* from bna
*/
static int
-bnad_mbox_irq_alloc(struct bnad *bnad,
- struct bna_intr_info *intr_info)
+bnad_mbox_irq_alloc(struct bnad *bnad)
{
int err = 0;
unsigned long irq_flags, flags;
u32 irq;
irq_handler_t irq_handler;
- /* Mbox should use only 1 vector */
-
- intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
- if (!intr_info->idl)
- return -ENOMEM;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX) {
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
irq_flags = 0;
- intr_info->intr_type = BNA_INTR_T_MSIX;
- intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
} else {
irq_handler = (irq_handler_t)bnad_isr;
irq = bnad->pcidev->irq;
irq_flags = IRQF_SHARED;
- intr_info->intr_type = BNA_INTR_T_INTX;
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1140,11 +1212,6 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
err = request_irq(irq, irq_handler, irq_flags,
bnad->mbox_irq_name, bnad);
- if (err) {
- kfree(intr_info->idl);
- intr_info->idl = NULL;
- }
-
return err;
}
@@ -1158,7 +1225,7 @@ bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
static int
bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
- uint txrx_id, struct bna_intr_info *intr_info)
+ u32 txrx_id, struct bna_intr_info *intr_info)
{
int i, vector_start = 0;
u32 cfg_flags;
@@ -1241,7 +1308,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
*/
static int
bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
- uint tx_id, int num_txqs)
+ u32 tx_id, int num_txqs)
{
int i;
int err;
@@ -1294,7 +1361,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
*/
static int
bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
- uint rx_id, int num_rxps)
+ u32 rx_id, int num_rxps)
{
int i;
int err;
@@ -1338,7 +1405,7 @@ bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
/* Allocates memory and interrupt resources for Tx object */
static int
bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
- uint tx_id)
+ u32 tx_id)
{
int i, err = 0;
@@ -1407,7 +1474,7 @@ bnad_ioc_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
+ bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1418,7 +1485,7 @@ bnad_ioc_hb_check(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
+ bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1429,7 +1496,7 @@ bnad_iocpf_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
+ bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1440,7 +1507,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
+ bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1499,7 +1566,7 @@ bnad_stats_timeout(unsigned long data)
return;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_stats_get(&bnad->bna);
+ bna_hw_stats_get(&bnad->bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1577,32 +1644,32 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
{
struct bnad_rx_ctrl *rx_ctrl =
container_of(napi, struct bnad_rx_ctrl, napi);
- struct bna_ccb *ccb;
- struct bnad *bnad;
+ struct bnad *bnad = rx_ctrl->bnad;
int rcvd = 0;
- ccb = rx_ctrl->ccb;
-
- bnad = ccb->bnad;
+ rx_ctrl->rx_poll_ctr++;
if (!netif_carrier_ok(bnad->netdev))
goto poll_exit;
- rcvd = bnad_poll_cq(bnad, ccb, budget);
- if (rcvd == budget)
+ rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+ if (rcvd >= budget)
return rcvd;
poll_exit:
- napi_complete((napi));
+ napi_complete(napi);
- BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+ rx_ctrl->rx_complete++;
+
+ if (rx_ctrl->ccb)
+ bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
- bnad_enable_rx_irq(bnad, ccb);
return rcvd;
}
+#define BNAD_NAPI_POLL_QUOTA 64
static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+bnad_napi_init(struct bnad *bnad, u32 rx_id)
{
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1610,9 +1677,20 @@ bnad_napi_enable(struct bnad *bnad, u32 rx_id)
/* Initialize & enable NAPI */
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, 64);
+ bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ }
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
napi_enable(&rx_ctrl->napi);
}
@@ -1632,7 +1710,7 @@ bnad_napi_disable(struct bnad *bnad, u32 rx_id)
/* Should be held with conf_lock held */
void
-bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
+bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
{
struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1651,21 +1729,22 @@ bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
bnad_tx_msix_unregister(bnad, tx_info,
bnad->num_txq_per_tx);
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_tx_destroy(tx_info->tx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
tx_info->tx = NULL;
-
- if (0 == tx_id)
- tasklet_kill(&bnad->tx_free_tasklet);
+ tx_info->tx_id = 0;
bnad_tx_res_free(bnad, res_info);
}
/* Should be held with conf_lock held */
int
-bnad_setup_tx(struct bnad *bnad, uint tx_id)
+bnad_setup_tx(struct bnad *bnad, u32 tx_id)
{
int err;
struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
@@ -1673,21 +1752,24 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
struct bna_intr_info *intr_info =
&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
- struct bna_tx_event_cbfn tx_cbfn;
+ static const struct bna_tx_event_cbfn tx_cbfn = {
+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
+ .tx_stall_cbfn = bnad_cb_tx_stall,
+ .tx_resume_cbfn = bnad_cb_tx_resume,
+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
+ };
+
struct bna_tx *tx;
unsigned long flags;
+ tx_info->tx_id = tx_id;
+
/* Initialize the Tx object configuration */
tx_config->num_txq = bnad->num_txq_per_tx;
tx_config->txq_depth = bnad->txq_depth;
tx_config->tx_type = BNA_TX_T_REGULAR;
-
- /* Initialize the tx event handlers */
- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
+ tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
/* Get BNA's resource requirement for one tx object */
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1741,14 +1823,15 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
{
rx_config->rx_type = BNA_RX_T_REGULAR;
rx_config->num_paths = bnad->num_rxp_per_rx;
+ rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
if (bnad->num_rxp_per_rx > 1) {
rx_config->rss_status = BNA_STATUS_T_ENABLED;
rx_config->rss_config.hash_type =
- (BFI_RSS_T_V4_TCP |
- BFI_RSS_T_V6_TCP |
- BFI_RSS_T_V4_IP |
- BFI_RSS_T_V6_IP);
+ (BFI_ENET_RSS_IPV6 |
+ BFI_ENET_RSS_IPV6_TCP |
+ BFI_ENET_RSS_IPV4 |
+ BFI_ENET_RSS_IPV4_TCP);
rx_config->rss_config.hash_mask =
bnad->num_rxp_per_rx - 1;
get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
@@ -1766,31 +1849,41 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
+static void
+bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ int i;
+
+ for (i = 0; i < bnad->num_rxp_per_rx; i++)
+ rx_info->rx_ctrl[i].bnad = bnad;
+}
+
/* Called with mutex_lock(&bnad->conf_mutex) held */
void
-bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
+bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
{
struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
unsigned long flags;
- int dim_timer_del = 0;
+ int to_del = 0;
if (!rx_info->rx)
return;
if (0 == rx_id) {
spin_lock_irqsave(&bnad->bna_lock, flags);
- dim_timer_del = bnad_dim_timer_running(bnad);
- if (dim_timer_del)
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ to_del = 1;
+ }
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (dim_timer_del)
+ if (to_del)
del_timer_sync(&bnad->dim_timer);
}
- bnad_napi_disable(bnad, rx_id);
-
init_completion(&bnad->bnad_completions.rx_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
@@ -1800,18 +1893,21 @@ bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+ bnad_napi_disable(bnad, rx_id);
+
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_destroy(rx_info->rx);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
rx_info->rx = NULL;
+ rx_info->rx_id = 0;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
bnad_rx_res_free(bnad, res_info);
}
/* Called with mutex_lock(&bnad->conf_mutex) held */
int
-bnad_setup_rx(struct bnad *bnad, uint rx_id)
+bnad_setup_rx(struct bnad *bnad, u32 rx_id)
{
int err;
struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
@@ -1819,21 +1915,23 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
struct bna_intr_info *intr_info =
&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
- struct bna_rx_event_cbfn rx_cbfn;
+ static const struct bna_rx_event_cbfn rx_cbfn = {
+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
+ .rx_stall_cbfn = bnad_cb_rx_stall,
+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
+ .rx_post_cbfn = bnad_cb_rx_post,
+ };
struct bna_rx *rx;
unsigned long flags;
+ rx_info->rx_id = rx_id;
+
/* Initialize the Rx object configuration */
bnad_init_rx_config(bnad, rx_config);
- /* Initialize the Rx event handlers */
- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
-
/* Get BNA's resource requirement for one Rx object */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_res_req(rx_config, res_info);
@@ -1851,14 +1949,25 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
if (err)
return err;
+ bnad_rx_ctrl_init(bnad, rx_id);
+
/* Ask BNA to create one Rx object, supplying required resources */
spin_lock_irqsave(&bnad->bna_lock, flags);
rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
rx_info);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!rx)
+ if (!rx) {
+ err = -ENOMEM;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
goto err_return;
+ }
rx_info->rx = rx;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /*
+ * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+ * so that IRQ handler cannot schedule NAPI at this point.
+ */
+ bnad_napi_init(bnad, rx_id);
/* Register ISR for the Rx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -1868,9 +1977,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
goto err_return;
}
- /* Enable NAPI */
- bnad_napi_enable(bnad, rx_id);
-
spin_lock_irqsave(&bnad->bna_lock, flags);
if (0 == rx_id) {
/* Set up Dynamic Interrupt Moderation Vector */
@@ -1887,6 +1993,9 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
bna_rx_enable(rx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ /* Enable scheduling of NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
return 0;
err_return:
@@ -1926,7 +2035,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad)
/*
* Called with bnad->bna_lock held
*/
-static int
+int
bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
{
int ret;
@@ -1946,7 +2055,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
}
/* Should be called with conf_lock held */
-static int
+int
bnad_enable_default_bcast(struct bnad *bnad)
{
struct bnad_rx_info *rx_info = &bnad->rx_info[0];
@@ -1971,15 +2080,13 @@ bnad_enable_default_bcast(struct bnad *bnad)
return 0;
}
-/* Called with bnad_conf_lock() held */
-static void
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
{
u16 vid;
unsigned long flags;
- BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
-
for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
@@ -2031,11 +2138,11 @@ bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
void
bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
{
- struct bfi_ll_stats_mac *mac_stats;
- u64 bmap;
+ struct bfi_enet_stats_mac *mac_stats;
+ u32 bmap;
int i;
- mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
+ mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
stats->rx_errors =
mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
@@ -2054,13 +2161,12 @@ bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
stats->rx_crc_errors = mac_stats->rx_fcs_error;
stats->rx_frame_errors = mac_stats->rx_alignment_error;
/* recv'r fifo overrun */
- bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
- ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
- for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
if (bmap & 1) {
stats->rx_fifo_errors +=
bnad->stats.bna_stats->
- hw_stats->rxf_stats[i].frame_drops;
+ hw_stats.rxf_stats[i].frame_drops;
break;
}
bmap >>= 1;
@@ -2089,9 +2195,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
{
int err;
- /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
- BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) {
@@ -2118,7 +2221,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
} else {
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
ipv6h->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
@@ -2140,7 +2242,7 @@ bnad_q_num_init(struct bnad *bnad)
int rxps;
rxps = min((uint)num_online_cpus(),
- (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+ (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
if (!(bnad->cfg_flags & BNAD_CF_MSIX))
rxps = 1; /* INTx */
@@ -2158,7 +2260,7 @@ bnad_q_num_init(struct bnad *bnad)
* Called with bnad->bna_lock held b'cos of cfg_flags access
*/
static void
-bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
{
bnad->num_txq_per_tx = 1;
if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
@@ -2171,76 +2273,72 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
bnad->num_rxp_per_rx = 1;
}
-/* Enable / disable device */
-static void
-bnad_device_disable(struct bnad *bnad)
+/* Enable / disable ioceth */
+static int
+bnad_ioceth_disable(struct bnad *bnad)
{
unsigned long flags;
-
- init_completion(&bnad->bnad_completions.ioc_comp);
+ int err = 0;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
+ init_completion(&bnad->bnad_completions.ioc_comp);
+ bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- wait_for_completion(&bnad->bnad_completions.ioc_comp);
+ wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
+ msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
+
+ err = bnad->bnad_completions.ioc_comp_status;
+ return err;
}
static int
-bnad_device_enable(struct bnad *bnad)
+bnad_ioceth_enable(struct bnad *bnad)
{
int err = 0;
unsigned long flags;
- init_completion(&bnad->bnad_completions.ioc_comp);
-
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_device_enable(&bnad->bna.device);
+ init_completion(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
+ bna_ioceth_enable(&bnad->bna.ioceth);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- wait_for_completion(&bnad->bnad_completions.ioc_comp);
+ wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
+ msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
- if (bnad->bnad_completions.ioc_comp_status)
- err = bnad->bnad_completions.ioc_comp_status;
+ err = bnad->bnad_completions.ioc_comp_status;
return err;
}
/* Free BNA resources */
static void
-bnad_res_free(struct bnad *bnad)
+bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
+ u32 res_val_max)
{
int i;
- struct bna_res_info *res_info = &bnad->res_info[0];
- for (i = 0; i < BNA_RES_T_MAX; i++) {
- if (res_info[i].res_type == BNA_RES_T_MEM)
- bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
- else
- bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
- }
+ for (i = 0; i < res_val_max; i++)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
}
/* Allocates memory and interrupt resources for BNA */
static int
-bnad_res_alloc(struct bnad *bnad)
+bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ u32 res_val_max)
{
int i, err;
- struct bna_res_info *res_info = &bnad->res_info[0];
- for (i = 0; i < BNA_RES_T_MAX; i++) {
- if (res_info[i].res_type == BNA_RES_T_MEM)
- err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
- else
- err = bnad_mbox_irq_alloc(bnad,
- &res_info[i].res_u.intr_info);
+ for (i = 0; i < res_val_max; i++) {
+ err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
if (err)
goto err_return;
}
return 0;
err_return:
- bnad_res_free(bnad);
+ bnad_res_free(bnad, res_info, res_val_max);
return err;
}
@@ -2273,17 +2371,21 @@ bnad_enable_msix(struct bnad *bnad)
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
if (ret > 0) {
/* Not enough MSI-X vectors. */
+ pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
- bnad_q_num_adjust(bnad, ret);
+ bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
+ (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
- + (bnad->num_rx
- * bnad->num_rxp_per_rx) +
+ bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
+ if (bnad->msix_num > ret)
+ goto intx_mode;
+
/* Try once more with adjusted numbers */
/* If this fails, fall back to INTx */
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
@@ -2293,9 +2395,13 @@ bnad_enable_msix(struct bnad *bnad)
} else if (ret < 0)
goto intx_mode;
+
+ pci_intx(bnad->pcidev, 0);
+
return;
intx_mode:
+ pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
@@ -2351,12 +2457,12 @@ bnad_open(struct net_device *netdev)
pause_config.tx_pause = 0;
pause_config.rx_pause = 0;
- mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
+ mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
- bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
- bna_port_enable(&bnad->bna.port);
+ bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Enable broadcast */
@@ -2396,14 +2502,14 @@ bnad_stop(struct net_device *netdev)
/* Stop the stats timer */
bnad_stats_timer_stop(bnad);
- init_completion(&bnad->bnad_completions.port_comp);
+ init_completion(&bnad->bnad_completions.enet_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
- bnad_cb_port_disabled);
+ bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
+ bnad_cb_enet_disabled);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- wait_for_completion(&bnad->bnad_completions.port_comp);
+ wait_for_completion(&bnad->bnad_completions.enet_comp);
bnad_cleanup_tx(bnad, 0);
bnad_cleanup_rx(bnad, 0);
@@ -2425,51 +2531,57 @@ static netdev_tx_t
bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
+ u32 txq_id = 0;
+ struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
u16 txq_prod, vlan_tag = 0;
u32 unmap_prod, wis, wis_used, wi_range;
u32 vectors, vect_id, i, acked;
- u32 tx_id;
int err;
+ unsigned int len;
+ u32 gso_size;
- struct bnad_tx_info *tx_info;
- struct bna_tcb *tcb;
- struct bnad_unmap_q *unmap_q;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
dma_addr_t dma_addr;
struct bna_txq_entry *txqent;
- bna_txq_wi_ctrl_flag_t flags;
+ u16 flags;
- if (unlikely
- (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+ if (unlikely(skb->len <= ETH_HLEN)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) == 0)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
-
- tx_id = 0;
-
- tx_info = &bnad->tx_info[tx_id];
- tcb = tx_info->tcb[tx_id];
- unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
- * and the netif_stop_queue() call.
+ * and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
vectors = 1 + skb_shinfo(skb)->nr_frags;
- if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
acked = 0;
- if (unlikely
- (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
- vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
if ((u16) (*tcb->hw_consumer_index) !=
tcb->consumer_index &&
!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
@@ -2501,18 +2613,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
unmap_prod = unmap_q->producer_index;
- wis_used = 1;
- vect_id = 0;
flags = 0;
txq_prod = tcb->producer_index;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
txqent->hdr.wi.reserved = 0;
txqent->hdr.wi.num_vectors = vectors;
- txqent->hdr.wi.opcode =
- htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
- BNA_TXQ_WI_SEND));
if (vlan_tx_tag_present(skb)) {
vlan_tag = (u16) vlan_tx_tag_get(skb);
@@ -2527,62 +2633,93 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
txqent->hdr.wi.vlan_tag = htons(vlan_tag);
if (skb_is_gso(skb)) {
+ gso_size = skb_shinfo(skb)->gso_size;
+
+ if (unlikely(gso_size > netdev->mtu)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely((gso_size + skb_transport_offset(skb) +
+ tcp_hdrlen(skb)) >= skb->len)) {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.lso_mss = 0;
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
+ } else {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.lso_mss = htons(gso_size);
+ }
+
err = bnad_tso_prepare(bnad, skb);
- if (err) {
+ if (unlikely(err)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
return NETDEV_TX_OK;
}
- txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
txqent->hdr.wi.l4_hdr_size_n_offset =
htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
(tcp_hdrlen(skb) >> 2,
skb_transport_offset(skb)));
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 proto = 0;
-
+ } else {
+ txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
- if (skb->protocol == htons(ETH_P_IP))
- proto = ip_hdr(skb)->protocol;
- else if (skb->protocol == htons(ETH_P_IPV6)) {
- /* nexthdr may not be TCP immediately. */
- proto = ipv6_hdr(skb)->nexthdr;
+ if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
+ return NETDEV_TX_OK;
}
- if (proto == IPPROTO_TCP) {
- flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
- txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (0, skb_transport_offset(skb)));
- BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
- BUG_ON(!(skb_headlen(skb) >=
- skb_transport_offset(skb) + tcp_hdrlen(skb)));
-
- } else if (proto == IPPROTO_UDP) {
- flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
- txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (0, skb_transport_offset(skb)));
-
- BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (skb->protocol == __constant_htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol ==
+ __constant_htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
+ return NETDEV_TX_OK;
+ }
- BUG_ON(!(skb_headlen(skb) >=
- skb_transport_offset(skb) +
- sizeof(struct udphdr)));
- } else {
- err = skb_checksum_help(skb);
- BNAD_UPDATE_CTR(bnad, csum_help);
- if (err) {
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
+ return NETDEV_TX_OK;
+ }
+ } else {
dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, csum_help_err);
+ BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
return NETDEV_TX_OK;
}
+ } else {
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
}
- } else {
- txqent->hdr.wi.lso_mss = 0;
- txqent->hdr.wi.l4_hdr_size_n_offset = 0;
}
txqent->hdr.wi.flags = htons(flags);
@@ -2590,19 +2727,36 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
txqent->hdr.wi.frame_length = htonl(skb->len);
unmap_q->unmap_array[unmap_prod].skb = skb;
- BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
- txqent->vector[vect_id].length = htons(skb_headlen(skb));
+ len = skb_headlen(skb);
+ txqent->vector[0].length = htons(len);
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
- BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ vect_id = 0;
+ wis_used = 1;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u16 size = skb_frag_size(frag);
+
+ if (unlikely(size == 0)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array,
+ unmap_prod, unmap_q->q_depth, skb,
+ i);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
+ return NETDEV_TX_OK;
+ }
+
+ len += size;
if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
vect_id = 0;
@@ -2614,22 +2768,34 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis_used = 0;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
}
wis_used++;
- txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+ txqent->hdr.wi_ext.opcode =
+ __constant_htons(BNA_TXQ_WI_EXTENSION);
}
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
- dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
- frag->page_offset, size, DMA_TO_DEVICE);
+ dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
+ 0, size, DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
}
+ if (unlikely(len != skb->len)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array, unmap_prod,
+ unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
+ return NETDEV_TX_OK;
+ }
+
unmap_q->producer_index = unmap_prod;
BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
tcb->producer_index = txq_prod;
@@ -2640,6 +2806,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
bna_txq_prod_indx_doorbell(tcb);
+ smp_mb();
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
tasklet_schedule(&bnad->tx_free_tasklet);
@@ -2667,7 +2834,7 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
-static void
+void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -2706,6 +2873,9 @@ bnad_set_rx_mode(struct net_device *netdev)
}
}
+ if (bnad->rx_info[0].rx == NULL)
+ goto unlock;
+
bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
if (!netdev_mc_empty(netdev)) {
@@ -2760,11 +2930,25 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
}
static int
-bnad_change_mtu(struct net_device *netdev, int new_mtu)
+bnad_mtu_set(struct bnad *bnad, int mtu)
{
- int mtu, err = 0;
unsigned long flags;
+ init_completion(&bnad->bnad_completions.mtu_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.mtu_comp);
+
+ return bnad->bnad_completions.mtu_comp_status;
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err, mtu = netdev->mtu;
struct bnad *bnad = netdev_priv(netdev);
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
@@ -2774,11 +2958,10 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
- mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
-
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
+ err = bnad_mtu_set(bnad, mtu);
+ if (err)
+ err = -EBUSY;
mutex_unlock(&bnad->conf_mutex);
return err;
@@ -2839,18 +3022,21 @@ bnad_netpoll(struct net_device *netdev)
bnad_isr(bnad->pcidev->irq, netdev);
bna_intx_enable(&bnad->bna, curr_mask);
} else {
+ /*
+ * Tx processing may happen in sending context, so no need
+ * to explicitly process completions here
+ */
+
+ /* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
if (!rx_info->rx)
continue;
for (j = 0; j < bnad->num_rxp_per_rx; j++) {
rx_ctrl = &rx_info->rx_ctrl[j];
- if (rx_ctrl->ccb) {
- bnad_disable_rx_irq(bnad,
- rx_ctrl->ccb);
+ if (rx_ctrl->ccb)
bnad_netif_rx_schedule_poll(bnad,
rx_ctrl->ccb);
- }
}
}
}
@@ -2863,7 +3049,6 @@ static const struct net_device_ops bnad_netdev_ops = {
.ndo_start_xmit = bnad_start_xmit,
.ndo_get_stats64 = bnad_get_stats64,
.ndo_set_rx_mode = bnad_set_rx_mode,
- .ndo_set_multicast_list = bnad_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnad_set_mac_address,
.ndo_change_mtu = bnad_change_mtu,
@@ -2968,7 +3153,7 @@ bnad_uninit(struct bnad *bnad)
/*
* Initialize locks
- a) Per device mutes used for serializing configuration
+ a) Per ioceth mutes used for serializing configuration
changes from OS interface
b) spin lock used to protect bna state machine
*/
@@ -3033,7 +3218,7 @@ static int __devinit
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac = false;
+ bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3058,12 +3243,15 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
netdev = alloc_etherdev(sizeof(struct bnad));
if (!netdev) {
- dev_err(&pdev->dev, "alloc_etherdev failed\n");
+ dev_err(&pdev->dev, "netdev allocation failed\n");
err = -ENOMEM;
return err;
}
bnad = netdev_priv(netdev);
+ bnad_lock_init(bnad);
+
+ mutex_lock(&bnad->conf_mutex);
/*
* PCI initialization
* Output : using_dac = 1 for 64 bit DMA
@@ -3071,9 +3259,8 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
err = bnad_pci_init(bnad, pdev, &using_dac);
if (err)
- goto free_netdev;
+ goto unlock_mutex;
- bnad_lock_init(bnad);
/*
* Initialize bnad structure
* Setup relation between pci_dev & netdev
@@ -3082,21 +3269,22 @@ bnad_pci_probe(struct pci_dev *pdev,
err = bnad_init(bnad, pdev, netdev);
if (err)
goto pci_uninit;
+
/* Initialize netdev structure, set up ethtool ops */
bnad_netdev_init(bnad, using_dac);
/* Set link to down state */
netif_carrier_off(netdev);
- bnad_enable_msix(bnad);
-
/* Get resource requirement form bna */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
bna_res_req(&bnad->res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Allocate resources from bna */
- err = bnad_res_alloc(bnad);
+ err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
if (err)
- goto free_netdev;
+ goto drv_uninit;
bna = &bnad->bna;
@@ -3106,38 +3294,74 @@ bnad_pci_probe(struct pci_dev *pdev,
pcidev_info.device_id = bnad->pcidev->device;
pcidev_info.pci_bar_kva = bnad->bar0;
- mutex_lock(&bnad->conf_mutex);
-
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
bnad->stats.bna_stats = &bna->stats;
+ bnad_enable_msix(bnad);
+ err = bnad_mbox_irq_alloc(bnad);
+ if (err)
+ goto res_free;
+
+
/* Set up timers */
- setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
+ setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
((unsigned long)bnad));
- setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
+ setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
((unsigned long)bnad));
- setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
+ setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
((unsigned long)bnad));
- setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
+ setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
((unsigned long)bnad));
/* Now start the timer before calling IOC */
- mod_timer(&bnad->bna.device.ioc.iocpf_timer,
+ mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
/*
* Start the chip
- * Don't care even if err != 0, bna state machine will
- * deal with it
+ * If the call back comes with error, we bail out.
+ * This is a catastrophic error.
*/
- err = bnad_device_enable(bnad);
+ err = bnad_ioceth_enable(bnad);
+ if (err) {
+ pr_err("BNA: Initialization failed err=%d\n",
+ err);
+ goto probe_success;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
+ bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
+ bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
+ bna_attr(bna)->num_rxp - 1);
+ if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
+ bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
+ err = -EIO;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (err)
+ goto disable_ioceth;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+ if (err) {
+ err = -EIO;
+ goto disable_ioceth;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Get the burnt-in mac */
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_port_mac_get(&bna->port, &bnad->perm_addr);
+ bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3147,29 +3371,38 @@ bnad_pci_probe(struct pci_dev *pdev,
err = register_netdev(netdev);
if (err) {
pr_err("BNA : Registering with netdev failed\n");
- goto disable_device;
+ goto probe_uninit;
}
+ set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
return 0;
-disable_device:
+probe_success:
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+
+probe_uninit:
mutex_lock(&bnad->conf_mutex);
- bnad_device_disable(bnad);
- del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.device.ioc.sem_timer);
- del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+disable_ioceth:
+ bnad_ioceth_disable(bnad);
+ del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- mutex_unlock(&bnad->conf_mutex);
-
- bnad_res_free(bnad);
+ bnad_mbox_irq_free(bnad);
bnad_disable_msix(bnad);
+res_free:
+ bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+drv_uninit:
+ bnad_uninit(bnad);
pci_uninit:
bnad_pci_uninit(pdev);
+unlock_mutex:
+ mutex_unlock(&bnad->conf_mutex);
bnad_lock_uninit(bnad);
- bnad_uninit(bnad);
-free_netdev:
free_netdev(netdev);
return err;
}
@@ -3189,21 +3422,24 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad = netdev_priv(netdev);
bna = &bnad->bna;
- unregister_netdev(netdev);
+ if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
+ unregister_netdev(netdev);
mutex_lock(&bnad->conf_mutex);
- bnad_device_disable(bnad);
- del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.device.ioc.sem_timer);
- del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ bnad_ioceth_disable(bnad);
+ del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- mutex_unlock(&bnad->conf_mutex);
- bnad_res_free(bnad);
+ bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+ bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+ bnad_mbox_irq_free(bnad);
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
+ mutex_unlock(&bnad->conf_mutex);
bnad_lock_uninit(bnad);
bnad_uninit(bnad);
free_netdev(netdev);
@@ -3215,7 +3451,14 @@ static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
PCI_DEVICE_ID_BROCADE_CT),
.class = PCI_CLASS_NETWORK_ETHERNET << 8,
.class_mask = 0xffff00
- }, {0, }
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ BFA_PCI_DEVICE_ID_CT2),
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+ .class_mask = 0xffff00
+ },
+ {0, },
};
MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
@@ -3264,3 +3507,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
MODULE_VERSION(BNAD_VERSION);
MODULE_FIRMWARE(CNA_FW_FILE_CT);
+MODULE_FIRMWARE(CNA_FW_FILE_CT2);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
new file mode 100644
index 00000000000..5487ca42d01
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -0,0 +1,380 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNAD_H__
+#define __BNAD_H__
+
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/ipv6.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+
+/* Fix for IA64 */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include "bna.h"
+
+#define BNAD_TXQ_DEPTH 2048
+#define BNAD_RXQ_DEPTH 2048
+
+#define BNAD_MAX_TX 1
+#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
+#define BNAD_TXQ_NUM 1
+
+#define BNAD_MAX_RX 1
+#define BNAD_MAX_RXP_PER_RX 16
+#define BNAD_MAX_RXQ_PER_RXP 2
+
+/*
+ * Control structure pointed to ccb->ctrl, which
+ * determines the NAPI / LRO behavior CCB
+ * There is 1:1 corres. between ccb & ctrl
+ */
+struct bnad_rx_ctrl {
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ unsigned long flags;
+ struct napi_struct napi;
+ u64 rx_intr_ctr;
+ u64 rx_poll_ctr;
+ u64 rx_schedule;
+ u64 rx_keep_poll;
+ u64 rx_complete;
+};
+
+#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
+
+/*
+ * GLOBAL #defines (CONSTANTS)
+ */
+#define BNAD_NAME "bna"
+#define BNAD_NAME_LEN 64
+
+#define BNAD_VERSION "3.0.2.2"
+
+#define BNAD_MAILBOX_MSIX_INDEX 0
+#define BNAD_MAILBOX_MSIX_VECTORS 1
+#define BNAD_INTX_TX_IB_BITMASK 0x1
+#define BNAD_INTX_RX_IB_BITMASK 0x2
+
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+
+#define BNAD_IOCETH_TIMEOUT 10000
+
+#define BNAD_MAX_Q_DEPTH 0x10000
+#define BNAD_MIN_Q_DEPTH 0x200
+
+#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq)
+/* keeping MAX TX and RX Q depth equal */
+#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH
+
+#define BNAD_JUMBO_MTU 9000
+
+#define BNAD_NETIF_WAKE_THRESHOLD 8
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
+
+/* Bit positions for tcb->flags */
+#define BNAD_TXQ_FREE_SENT 0
+#define BNAD_TXQ_TX_STARTED 1
+
+/* Bit positions for rcb->flags */
+#define BNAD_RXQ_REFILL 0
+#define BNAD_RXQ_STARTED 1
+#define BNAD_RXQ_POST_OK 2
+
+/* Resource limits */
+#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
+#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+
+/*
+ * DATA STRUCTURES
+ */
+
+/* enums */
+enum bnad_intr_source {
+ BNAD_INTR_TX = 1,
+ BNAD_INTR_RX = 2
+};
+
+enum bnad_link_state {
+ BNAD_LS_DOWN = 0,
+ BNAD_LS_UP = 1
+};
+
+struct bnad_completion {
+ struct completion ioc_comp;
+ struct completion ucast_comp;
+ struct completion mcast_comp;
+ struct completion tx_comp;
+ struct completion rx_comp;
+ struct completion stats_comp;
+ struct completion enet_comp;
+ struct completion mtu_comp;
+
+ u8 ioc_comp_status;
+ u8 ucast_comp_status;
+ u8 mcast_comp_status;
+ u8 tx_comp_status;
+ u8 rx_comp_status;
+ u8 stats_comp_status;
+ u8 port_comp_status;
+ u8 mtu_comp_status;
+};
+
+/* Tx Rx Control Stats */
+struct bnad_drv_stats {
+ u64 netif_queue_stop;
+ u64 netif_queue_wakeup;
+ u64 netif_queue_stopped;
+ u64 tso4;
+ u64 tso6;
+ u64 tso_err;
+ u64 tcpcsum_offload;
+ u64 udpcsum_offload;
+ u64 csum_help;
+ u64 tx_skb_too_short;
+ u64 tx_skb_stopping;
+ u64 tx_skb_max_vectors;
+ u64 tx_skb_mss_too_long;
+ u64 tx_skb_tso_too_short;
+ u64 tx_skb_tso_prepare;
+ u64 tx_skb_non_tso_too_long;
+ u64 tx_skb_tcp_hdr;
+ u64 tx_skb_udp_hdr;
+ u64 tx_skb_csum_err;
+ u64 tx_skb_headlen_too_long;
+ u64 tx_skb_headlen_zero;
+ u64 tx_skb_frag_zero;
+ u64 tx_skb_len_mismatch;
+
+ u64 hw_stats_updates;
+ u64 netif_rx_dropped;
+
+ u64 link_toggle;
+ u64 cee_toggle;
+
+ u64 rxp_info_alloc_failed;
+ u64 mbox_intr_disabled;
+ u64 mbox_intr_enabled;
+ u64 tx_unmap_q_alloc_failed;
+ u64 rx_unmap_q_alloc_failed;
+
+ u64 rxbuf_alloc_failed;
+};
+
+/* Complete driver stats */
+struct bnad_stats {
+ struct bnad_drv_stats drv_stats;
+ struct bna_stats *bna_stats;
+};
+
+/* Tx / Rx Resources */
+struct bnad_tx_res_info {
+ struct bna_res_info res_info[BNA_TX_RES_T_MAX];
+};
+
+struct bnad_rx_res_info {
+ struct bna_res_info res_info[BNA_RX_RES_T_MAX];
+};
+
+struct bnad_tx_info {
+ struct bna_tx *tx; /* 1:1 between tx_info & tx */
+ struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
+ u32 tx_id;
+} ____cacheline_aligned;
+
+struct bnad_rx_info {
+ struct bna_rx *rx; /* 1:1 between rx_info & rx */
+
+ struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
+ u32 rx_id;
+} ____cacheline_aligned;
+
+/* Unmap queues for Tx / Rx cleanup */
+struct bnad_skb_unmap {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct bnad_unmap_q {
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ /* This should be the last one */
+ struct bnad_skb_unmap unmap_array[1];
+};
+
+/* Bit mask values for bnad->cfg_flags */
+#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
+#define BNAD_CF_PROMISC 0x02
+#define BNAD_CF_ALLMULTI 0x04
+#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+
+/* Defines for run_flags bit-mask */
+/* Set, tested & cleared using xxx_bit() functions */
+/* Values indicated bit positions */
+#define BNAD_RF_CEE_RUNNING 0
+#define BNAD_RF_MTU_SET 1
+#define BNAD_RF_MBOX_IRQ_DISABLED 2
+#define BNAD_RF_NETDEV_REGISTERED 3
+#define BNAD_RF_DIM_TIMER_RUNNING 4
+#define BNAD_RF_STATS_TIMER_RUNNING 5
+#define BNAD_RF_TX_PRIO_SET 6
+
+
+/* Define for Fast Path flags */
+/* Defined as bit positions */
+#define BNAD_FP_IN_RX_PATH 0
+
+struct bnad {
+ struct net_device *netdev;
+
+ /* Data path */
+ struct bnad_tx_info tx_info[BNAD_MAX_TX];
+ struct bnad_rx_info rx_info[BNAD_MAX_RX];
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /*
+ * These q numbers are global only because
+ * they are used to calculate MSIx vectors.
+ * Actually the exact # of queues are per Tx/Rx
+ * object.
+ */
+ u32 num_tx;
+ u32 num_rx;
+ u32 num_txq_per_tx;
+ u32 num_rxp_per_rx;
+
+ u32 txq_depth;
+ u32 rxq_depth;
+
+ u8 tx_coalescing_timeo;
+ u8 rx_coalescing_timeo;
+
+ struct bna_rx_config rx_config[BNAD_MAX_RX];
+ struct bna_tx_config tx_config[BNAD_MAX_TX];
+
+ void __iomem *bar0; /* BAR0 address */
+
+ struct bna bna;
+
+ u32 cfg_flags;
+ unsigned long run_flags;
+
+ struct pci_dev *pcidev;
+ u64 mmio_start;
+ u64 mmio_len;
+
+ u32 msix_num;
+ struct msix_entry *msix_table;
+
+ struct mutex conf_mutex;
+ spinlock_t bna_lock ____cacheline_aligned;
+
+ /* Timers */
+ struct timer_list ioc_timer;
+ struct timer_list dim_timer;
+ struct timer_list stats_timer;
+
+ /* Control path resources, memory & irq */
+ struct bna_res_info res_info[BNA_RES_T_MAX];
+ struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
+ struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX];
+ struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX];
+
+ struct bnad_completion bnad_completions;
+
+ /* Burnt in MAC address */
+ mac_t perm_addr;
+
+ struct tasklet_struct tx_free_tasklet;
+
+ /* Statistics */
+ struct bnad_stats stats;
+
+ struct bnad_diag *diag;
+
+ char adapter_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
+ char mbox_irq_name[BNAD_NAME_LEN];
+};
+
+/*
+ * EXTERN VARIABLES
+ */
+extern struct firmware *bfi_fw;
+extern u32 bnad_rxqs_per_cq;
+
+/*
+ * EXTERN PROTOTYPES
+ */
+extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+/* Netdev entry point prototypes */
+extern void bnad_set_rx_mode(struct net_device *netdev);
+extern struct net_device_stats *bnad_get_netdev_stats(
+ struct net_device *netdev);
+extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+extern int bnad_enable_default_bcast(struct bnad *bnad);
+extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+extern void bnad_set_ethtool_ops(struct net_device *netdev);
+
+/* Configuration & setup */
+extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+
+extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
+
+/* Timer start/stop protos */
+extern void bnad_dim_timer_start(struct bnad *bnad);
+
+/* Statistics */
+extern void bnad_netdev_qstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+
+/**
+ * MACROS
+ */
+/* To set & get the stats counters */
+#define BNAD_UPDATE_CTR(_bnad, _ctr) \
+ (((_bnad)->stats.drv_stats._ctr)++)
+
+#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
+
+#define bnad_enable_rx_irq_unsafe(_ccb) \
+{ \
+ if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
+ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
+ (_ccb)->rx_coalescing_timeo); \
+ bna_ib_ack((_ccb)->i_dbell, 0); \
+ } \
+}
+
+#endif /* __BNAD_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
new file mode 100644
index 00000000000..fd3dcc1e914
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -0,0 +1,958 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "cna.h"
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+
+#include "bna.h"
+
+#include "bnad.h"
+
+#define BNAD_NUM_TXF_COUNTERS 12
+#define BNAD_NUM_RXF_COUNTERS 10
+#define BNAD_NUM_CQ_COUNTERS (3 + 5)
+#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_TXQ_COUNTERS 5
+
+#define BNAD_ETHTOOL_STATS_NUM \
+ (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
+ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
+ offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
+
+static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "multicast",
+ "collisions",
+
+ "rx_length_errors",
+ "rx_over_errors",
+ "rx_crc_errors",
+ "rx_frame_errors",
+ "rx_fifo_errors",
+ "rx_missed_errors",
+
+ "tx_aborted_errors",
+ "tx_carrier_errors",
+ "tx_fifo_errors",
+ "tx_heartbeat_errors",
+ "tx_window_errors",
+
+ "rx_compressed",
+ "tx_compressed",
+
+ "netif_queue_stop",
+ "netif_queue_wakeup",
+ "netif_queue_stopped",
+ "tso4",
+ "tso6",
+ "tso_err",
+ "tcpcsum_offload",
+ "udpcsum_offload",
+ "csum_help",
+ "tx_skb_too_short",
+ "tx_skb_stopping",
+ "tx_skb_max_vectors",
+ "tx_skb_mss_too_long",
+ "tx_skb_tso_too_short",
+ "tx_skb_tso_prepare",
+ "tx_skb_non_tso_too_long",
+ "tx_skb_tcp_hdr",
+ "tx_skb_udp_hdr",
+ "tx_skb_csum_err",
+ "tx_skb_headlen_too_long",
+ "tx_skb_headlen_zero",
+ "tx_skb_frag_zero",
+ "tx_skb_len_mismatch",
+ "hw_stats_updates",
+ "netif_rx_dropped",
+
+ "link_toggle",
+ "cee_toggle",
+
+ "rxp_info_alloc_failed",
+ "mbox_intr_disabled",
+ "mbox_intr_enabled",
+ "tx_unmap_q_alloc_failed",
+ "rx_unmap_q_alloc_failed",
+ "rxbuf_alloc_failed",
+
+ "mac_frame_64",
+ "mac_frame_65_127",
+ "mac_frame_128_255",
+ "mac_frame_256_511",
+ "mac_frame_512_1023",
+ "mac_frame_1024_1518",
+ "mac_frame_1518_1522",
+ "mac_rx_bytes",
+ "mac_rx_packets",
+ "mac_rx_fcs_error",
+ "mac_rx_multicast",
+ "mac_rx_broadcast",
+ "mac_rx_control_frames",
+ "mac_rx_pause",
+ "mac_rx_unknown_opcode",
+ "mac_rx_alignment_error",
+ "mac_rx_frame_length_error",
+ "mac_rx_code_error",
+ "mac_rx_carrier_sense_error",
+ "mac_rx_undersize",
+ "mac_rx_oversize",
+ "mac_rx_fragments",
+ "mac_rx_jabber",
+ "mac_rx_drop",
+
+ "mac_tx_bytes",
+ "mac_tx_packets",
+ "mac_tx_multicast",
+ "mac_tx_broadcast",
+ "mac_tx_pause",
+ "mac_tx_deferral",
+ "mac_tx_excessive_deferral",
+ "mac_tx_single_collision",
+ "mac_tx_muliple_collision",
+ "mac_tx_late_collision",
+ "mac_tx_excessive_collision",
+ "mac_tx_total_collision",
+ "mac_tx_pause_honored",
+ "mac_tx_drop",
+ "mac_tx_jabber",
+ "mac_tx_fcs_error",
+ "mac_tx_control_frame",
+ "mac_tx_oversize",
+ "mac_tx_undersize",
+ "mac_tx_fragments",
+
+ "bpc_tx_pause_0",
+ "bpc_tx_pause_1",
+ "bpc_tx_pause_2",
+ "bpc_tx_pause_3",
+ "bpc_tx_pause_4",
+ "bpc_tx_pause_5",
+ "bpc_tx_pause_6",
+ "bpc_tx_pause_7",
+ "bpc_tx_zero_pause_0",
+ "bpc_tx_zero_pause_1",
+ "bpc_tx_zero_pause_2",
+ "bpc_tx_zero_pause_3",
+ "bpc_tx_zero_pause_4",
+ "bpc_tx_zero_pause_5",
+ "bpc_tx_zero_pause_6",
+ "bpc_tx_zero_pause_7",
+ "bpc_tx_first_pause_0",
+ "bpc_tx_first_pause_1",
+ "bpc_tx_first_pause_2",
+ "bpc_tx_first_pause_3",
+ "bpc_tx_first_pause_4",
+ "bpc_tx_first_pause_5",
+ "bpc_tx_first_pause_6",
+ "bpc_tx_first_pause_7",
+
+ "bpc_rx_pause_0",
+ "bpc_rx_pause_1",
+ "bpc_rx_pause_2",
+ "bpc_rx_pause_3",
+ "bpc_rx_pause_4",
+ "bpc_rx_pause_5",
+ "bpc_rx_pause_6",
+ "bpc_rx_pause_7",
+ "bpc_rx_zero_pause_0",
+ "bpc_rx_zero_pause_1",
+ "bpc_rx_zero_pause_2",
+ "bpc_rx_zero_pause_3",
+ "bpc_rx_zero_pause_4",
+ "bpc_rx_zero_pause_5",
+ "bpc_rx_zero_pause_6",
+ "bpc_rx_zero_pause_7",
+ "bpc_rx_first_pause_0",
+ "bpc_rx_first_pause_1",
+ "bpc_rx_first_pause_2",
+ "bpc_rx_first_pause_3",
+ "bpc_rx_first_pause_4",
+ "bpc_rx_first_pause_5",
+ "bpc_rx_first_pause_6",
+ "bpc_rx_first_pause_7",
+
+ "rad_rx_frames",
+ "rad_rx_octets",
+ "rad_rx_vlan_frames",
+ "rad_rx_ucast",
+ "rad_rx_ucast_octets",
+ "rad_rx_ucast_vlan",
+ "rad_rx_mcast",
+ "rad_rx_mcast_octets",
+ "rad_rx_mcast_vlan",
+ "rad_rx_bcast",
+ "rad_rx_bcast_octets",
+ "rad_rx_bcast_vlan",
+ "rad_rx_drops",
+
+ "rlb_rad_rx_frames",
+ "rlb_rad_rx_octets",
+ "rlb_rad_rx_vlan_frames",
+ "rlb_rad_rx_ucast",
+ "rlb_rad_rx_ucast_octets",
+ "rlb_rad_rx_ucast_vlan",
+ "rlb_rad_rx_mcast",
+ "rlb_rad_rx_mcast_octets",
+ "rlb_rad_rx_mcast_vlan",
+ "rlb_rad_rx_bcast",
+ "rlb_rad_rx_bcast_octets",
+ "rlb_rad_rx_bcast_vlan",
+ "rlb_rad_rx_drops",
+
+ "fc_rx_ucast_octets",
+ "fc_rx_ucast",
+ "fc_rx_ucast_vlan",
+ "fc_rx_mcast_octets",
+ "fc_rx_mcast",
+ "fc_rx_mcast_vlan",
+ "fc_rx_bcast_octets",
+ "fc_rx_bcast",
+ "fc_rx_bcast_vlan",
+
+ "fc_tx_ucast_octets",
+ "fc_tx_ucast",
+ "fc_tx_ucast_vlan",
+ "fc_tx_mcast_octets",
+ "fc_tx_mcast",
+ "fc_tx_mcast_vlan",
+ "fc_tx_bcast_octets",
+ "fc_tx_bcast",
+ "fc_tx_bcast_vlan",
+ "fc_tx_parity_errors",
+ "fc_tx_timeout",
+ "fc_tx_fid_parity_errors",
+};
+
+static int
+bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+
+ if (netif_carrier_ok(netdev)) {
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int
+bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ /* 10G full duplex setting supported only */
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP; else {
+ if ((ethtool_cmd_speed(cmd) == SPEED_10000)
+ && (cmd->duplex == DUPLEX_FULL))
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void
+bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bfa_ioc_attr *ioc_attr;
+ unsigned long flags;
+
+ strcpy(drvinfo->driver, BNAD_NAME);
+ strcpy(drvinfo->version, BNAD_VERSION);
+
+ ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ if (ioc_attr) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version) - 1);
+ kfree(ioc_attr);
+ }
+
+ strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+}
+
+static void
+bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
+{
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static int
+bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ /* Lock rqd. to access bnad->bna_lock */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ coalesce->use_adaptive_rx_coalesce =
+ (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
+
+ return 0;
+}
+
+static int
+bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+ int to_del = 0;
+
+ if (coalesce->rx_coalesce_usecs == 0 ||
+ coalesce->rx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ if (coalesce->tx_coalesce_usecs == 0 ||
+ coalesce->tx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ /*
+ * Do not need to store rx_coalesce_usecs here
+ * Every time DIM is disabled, we can get it from the
+ * stack.
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (coalesce->use_adaptive_rx_coalesce) {
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+ bnad_dim_timer_start(bnad);
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
+ bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags)) {
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags);
+ to_del = 1;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->dim_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_rx_coalescing_timeo_set(bnad);
+ }
+ }
+ if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+ bnad_tx_coalescing_timeo_set(bnad);
+ }
+
+ if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
+ bnad_rx_coalescing_timeo_set(bnad);
+
+ }
+
+ /* Add Tx Inter-pkt DMA count? */
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
+ ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
+
+ ringparam->rx_pending = bnad->rxq_depth;
+ ringparam->tx_pending = bnad->txq_depth;
+}
+
+static int
+bnad_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ int i, current_err, err = 0;
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (ringparam->rx_pending == bnad->rxq_depth &&
+ ringparam->tx_pending == bnad->txq_depth) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+ if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+
+ if (ringparam->rx_pending != bnad->rxq_depth) {
+ bnad->rxq_depth = ringparam->rx_pending;
+ if (!netif_running(netdev)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ bnad_cleanup_rx(bnad, i);
+ current_err = bnad_setup_rx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+
+ if (!err && bnad->rx_info[0].rx) {
+ /* restore rx configuration */
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
+ BNAD_CF_PROMISC);
+ bnad_set_rx_mode(netdev);
+ }
+ }
+ if (ringparam->tx_pending != bnad->txq_depth) {
+ bnad->txq_depth = ringparam->tx_pending;
+ if (!netif_running(netdev)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ bnad_cleanup_tx(bnad, i);
+ current_err = bnad_setup_tx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ pauseparam->autoneg = 0;
+ pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
+ pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
+}
+
+static int
+bnad_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ unsigned long flags;
+
+ if (pauseparam->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
+ pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
+ pause_config.rx_pause = pauseparam->rx_pause;
+ pause_config.tx_pause = pauseparam->tx_pause;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, q_num;
+ u32 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
+ ETH_GSTRING_LEN));
+ memcpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ sprintf(string, "txf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_errors", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_mac_sa", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ sprintf(string, "rxf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_frame_drops", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "cq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_hw_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_intr", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_poll", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_schedule", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_keep_poll", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_complete", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_packets_with_error",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string,
+ "rxq%d_packets_with_error", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ sprintf(string, "txq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_hw_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_stats_count_locked(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
+ u32 bmap;
+
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1)
+ txf_active_num++;
+ bmap >>= 1;
+ }
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1)
+ rxf_active_num++;
+ bmap >>= 1;
+ }
+ count = BNAD_ETHTOOL_STATS_NUM +
+ txf_active_num * BNAD_NUM_TXF_COUNTERS +
+ rxf_active_num * BNAD_NUM_RXF_COUNTERS;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
+ count += BNAD_NUM_RXQ_COUNTERS;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
+ }
+ return count;
+}
+
+static int
+bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
+{
+ int i, j;
+ struct bna_rcb *rcb = NULL;
+ struct bna_tcb *tcb = NULL;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
+ buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
+ ccb->producer_index;
+ buf[bi++] = 0; /* ccb->consumer_index */
+ buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
+ ccb->hw_producer_index);
+
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_intr_ctr;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_poll_ctr;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_schedule;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_keep_poll;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_complete;
+ }
+ }
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[0]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[0];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ }
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ if (bnad->tx_info[i].tcb[j] &&
+ bnad->tx_info[i].tcb[j]->txq) {
+ tcb = bnad->tx_info[i].tcb[j];
+ buf[bi++] = tcb->txq->tx_packets;
+ buf[bi++] = tcb->txq->tx_bytes;
+ buf[bi++] = tcb->producer_index;
+ buf[bi++] = tcb->consumer_index;
+ buf[bi++] = *(tcb->hw_consumer_index);
+ }
+ }
+
+ return bi;
+}
+
+static void
+bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ u64 *buf)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, bi;
+ unsigned long flags;
+ struct rtnl_link_stats64 *net_stats64;
+ u64 *stats64;
+ u32 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
+ mutex_unlock(&bnad->conf_mutex);
+ return;
+ }
+
+ /*
+ * Used bna_lock to sync reads from bna_stats, which is written
+ * under the same lock
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bi = 0;
+ memset(buf, 0, stats->n_stats * sizeof(u64));
+
+ net_stats64 = (struct rtnl_link_stats64 *)buf;
+ bnad_netdev_qstats_fill(bnad, net_stats64);
+ bnad_netdev_hwstats_fill(bnad, net_stats64);
+
+ bi = sizeof(*net_stats64) / sizeof(u64);
+
+ /* Get netif_queue_stopped from stack */
+ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
+
+ /* Fill driver stats into ethtool buffers */
+ stats64 = (u64 *)&bnad->stats.drv_stats;
+ for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
+ stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
+ for (i = 0;
+ i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
+ sizeof(u64);
+ i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill txf stats into ethtool buffers */
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats.txf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill rxf stats into ethtool buffers */
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats.rxf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill per Q stats into ethtool buffers */
+ bi = bnad_per_q_stats_fill(bnad, buf, bi);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return bnad_get_stats_count_locked(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+ .get_wol = bnad_get_wol,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bnad_get_coalesce,
+ .set_coalesce = bnad_set_coalesce,
+ .get_ringparam = bnad_get_ringparam,
+ .set_ringparam = bnad_set_ringparam,
+ .get_pauseparam = bnad_get_pauseparam,
+ .set_pauseparam = bnad_set_pauseparam,
+ .get_strings = bnad_get_strings,
+ .get_ethtool_stats = bnad_get_ethtool_stats,
+ .get_sset_count = bnad_get_sset_count
+};
+
+void
+bnad_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
new file mode 100644
index 00000000000..1b3e90dfbd9
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -0,0 +1,107 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __CNA_H__
+#define __CNA_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+
+#define bfa_sm_fault(__event) do { \
+ pr_err("SM Assertion failure: %s: %d: event = %d\n", \
+ __FILE__, __LINE__, __event); \
+} while (0)
+
+extern char bfa_version[];
+
+#define CNA_FW_FILE_CT "ctfw.bin"
+#define CNA_FW_FILE_CT2 "ct2fw.bin"
+#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN (6)
+typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+
+#pragma pack()
+
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) { \
+ bfa_q_next(_qe) = (struct list_head *) NULL; \
+ bfa_q_prev(_qe) = (struct list_head *) NULL; \
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
+ bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **)(_qe)) = NULL; \
+ } \
+}
+
+/*
+ * bfa_q_deq_tail - dequeue an element from tail of the queue
+ */
+#define bfa_q_deq_tail(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
+ bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ } \
+}
+
+/*
+ * bfa_add_tail_head - enqueue an element at the head of queue
+ */
+#define bfa_q_enq_head(_q, _qe) { \
+ if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
+ pr_err("Assertion failure: %s:%d: %d", \
+ __FILE__, __LINE__, \
+ (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
+ bfa_q_next(_qe) = bfa_q_next(_q); \
+ bfa_q_prev(_qe) = (struct list_head *) (_q); \
+ bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
+ bfa_q_next(_q) = (struct list_head *) (_qe); \
+}
+
+#endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
new file mode 100644
index 00000000000..725b9fff337
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -0,0 +1,92 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/firmware.h>
+#include "bfi.h"
+#include "cna.h"
+
+const struct firmware *bfi_fw;
+static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna;
+static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size;
+
+static u32 *
+cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ pr_alert("Can't locate firmware %s\n", fw_name);
+ goto error;
+ }
+
+ *bfi_image = (u32 *)fw->data;
+ *bfi_image_size = fw->size/sizeof(u32);
+ bfi_fw = fw;
+
+ return *bfi_image;
+error:
+ return NULL;
+}
+
+u32 *
+cna_get_firmware_buf(struct pci_dev *pdev)
+{
+ if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+ if (bfi_image_ct2_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct2_cna,
+ &bfi_image_ct2_cna_size, CNA_FW_FILE_CT2);
+ return bfi_image_ct2_cna;
+ } else if (bfa_asic_id_ct(pdev->device)) {
+ if (bfi_image_ct_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct_cna,
+ &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
+ return bfi_image_ct_cna;
+ }
+
+ return NULL;
+}
+
+u32 *
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
+{
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CT:
+ return (u32 *)(bfi_image_ct_cna + off);
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return (u32 *)(bfi_image_ct2_cna + off);
+ break;
+ default:
+ return NULL;
+ }
+}
+
+u32
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
+{
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CT:
+ return bfi_image_ct_cna_size;
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return bfi_image_ct2_cna_size;
+ break;
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
new file mode 100644
index 00000000000..98849a1fc74
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -0,0 +1,45 @@
+#
+# Atmel device configuration
+#
+
+config HAVE_NET_MACB
+ bool
+
+config NET_ATMEL
+ bool "Atmel devices"
+ depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+ Make sure you know the name of your card. Read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the remaining Atmel network card questions. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_ATMEL
+
+config ARM_AT91_ETHER
+ tristate "AT91RM9200 Ethernet support"
+ depends on ARM && ARCH_AT91RM9200
+ select NET_CORE
+ select MII
+ ---help---
+ If you wish to compile a kernel for the AT91RM9200 and enable
+ ethernet support, then you should always answer Y to this.
+
+config MACB
+ tristate "Atmel MACB support"
+ depends on HAVE_NET_MACB
+ select PHYLIB
+ ---help---
+ The Atmel MACB ethernet interface is found on many AT32 and AT91
+ parts. Say Y to include support for the MACB chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb.
+
+endif # NET_ATMEL
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
new file mode 100644
index 00000000000..9068b8331ed
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Atmel network device drivers.
+#
+
+obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
+obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 29dc43523ce..1b0ba8c819f 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -968,7 +968,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
.ndo_get_stats = at91ether_stats,
- .ndo_set_multicast_list = at91ether_set_multicast_list,
+ .ndo_set_rx_mode = at91ether_set_multicast_list,
.ndo_set_mac_address = set_mac_address,
.ndo_do_ioctl = at91ether_ioctl,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 353f4dab62b..353f4dab62b 100644
--- a/drivers/net/arm/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
diff --git a/drivers/net/macb.c b/drivers/net/ethernet/cadence/macb.c
index dc4e305a108..a437b46e549 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1106,7 +1106,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
- .ndo_set_multicast_list = macb_set_rx_mode,
+ .ndo_set_rx_mode = macb_set_rx_mode,
.ndo_get_stats = macb_get_stats,
.ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/macb.h b/drivers/net/ethernet/cadence/macb.h
index d3212f6db70..d3212f6db70 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
new file mode 100644
index 00000000000..2de50f95798
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -0,0 +1,107 @@
+#
+# Chelsio device configuration
+#
+
+config NET_VENDOR_CHELSIO
+ bool "Chelsio devices"
+ default y
+ depends on PCI || INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Chelsio devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_CHELSIO
+
+config CHELSIO_T1
+ tristate "Chelsio 10Gb Ethernet support"
+ depends on PCI
+ select CRC32
+ select MDIO
+ ---help---
+ This driver supports Chelsio gigabit and 10-gigabit
+ Ethernet cards. More information about adapter features and
+ performance tuning is in <file:Documentation/networking/cxgb.txt>.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb.
+
+config CHELSIO_T1_1G
+ bool "Chelsio gigabit Ethernet support"
+ depends on CHELSIO_T1
+ ---help---
+ Enables support for Chelsio's gigabit Ethernet PCI cards. If you
+ are using only 10G cards say 'N' here.
+
+config CHELSIO_T3
+ tristate "Chelsio Communications T3 10Gb Ethernet support"
+ depends on PCI && INET
+ select FW_LOADER
+ select MDIO
+ ---help---
+ This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
+ adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb3.
+
+config CHELSIO_T4
+ tristate "Chelsio Communications T4 Ethernet support"
+ depends on PCI
+ select FW_LOADER
+ select MDIO
+ ---help---
+ This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+ adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called cxgb4.
+
+config CHELSIO_T4VF
+ tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+ adapters with PCI-E SR-IOV Virtual Functions.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called cxgb4vf.
+
+endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
new file mode 100644
index 00000000000..390510b5e90
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Chelsio network device drivers.
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb/
+obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+obj-$(CONFIG_CHELSIO_T4) += cxgb4/
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/ethernet/chelsio/cxgb/Makefile
index 57a4b262fd3..57a4b262fd3 100644
--- a/drivers/net/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb/Makefile
diff --git a/drivers/net/chelsio/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 5ccbed1784d..5ccbed1784d 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index 1f095a9fc73..1f095a9fc73 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index e36d45b78cc..e36d45b78cc 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 3edbbc4c511..ca26d97171b 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -712,12 +712,10 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
e->rx_max_pending = MAX_RX_BUFFERS;
- e->rx_mini_max_pending = 0;
e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
e->tx_max_pending = MAX_CMDQ_ENTRIES;
e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
- e->rx_mini_pending = 0;
e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
e->tx_pending = adapter->params.sge.cmdQ_size[0];
}
@@ -964,7 +962,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_start_xmit = t1_start_xmit,
.ndo_get_stats = t1_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = t1_set_rxmode,
+ .ndo_set_rx_mode = t1_set_rxmode,
.ndo_do_ioctl = t1_ioctl,
.ndo_change_mtu = t1_change_mtu,
.ndo_set_mac_address = t1_set_mac_addr,
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index eef655c827d..eef655c827d 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 639ff195573..639ff195573 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 5694aad4fbc..5694aad4fbc 100644
--- a/drivers/net/chelsio/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
index ccdb2bc9ae9..ccdb2bc9ae9 100644
--- a/drivers/net/chelsio/fpga_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index d42337457cf..d42337457cf 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
index 71018a4fdf1..71018a4fdf1 100644
--- a/drivers/net/chelsio/mv88e1xxx.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
diff --git a/drivers/net/chelsio/mv88e1xxx.h b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
index 967cc428635..967cc428635 100644
--- a/drivers/net/chelsio/mv88e1xxx.h
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index f7136b2fd1e..f7136b2fd1e 100644
--- a/drivers/net/chelsio/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
index a683fd3bb62..a683fd3bb62 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 40c7b93abab..40c7b93abab 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index c80bf4d6d0a..c80bf4d6d0a 100644
--- a/drivers/net/chelsio/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
new file mode 100644
index 00000000000..f9b60230004
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -0,0 +1,2139 @@
+/*****************************************************************************
+ * *
+ * File: sge.c *
+ * $Revision: 1.26 $ *
+ * $Date: 2005/06/21 18:29:48 $ *
+ * Description: *
+ * DMA engine. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "common.h"
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/ktime.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+
+#include "cpl5_cmd.h"
+#include "sge.h"
+#include "regs.h"
+#include "espi.h"
+
+/* This belongs in if_ether.h */
+#define ETH_P_CPL5 0xf
+
+#define SGE_CMDQ_N 2
+#define SGE_FREELQ_N 2
+#define SGE_CMDQ0_E_N 1024
+#define SGE_CMDQ1_E_N 128
+#define SGE_FREEL_SIZE 4096
+#define SGE_JUMBO_FREEL_SIZE 512
+#define SGE_FREEL_REFILL_THRESH 16
+#define SGE_RESPQ_E_N 1024
+#define SGE_INTRTIMER_NRES 1000
+#define SGE_RX_SM_BUF_SIZE 1536
+#define SGE_TX_DESC_MAX_PLEN 16384
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer. This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+#define M_CMD_LEN 0x7fffffff
+#define V_CMD_LEN(v) (v)
+#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v) ((v) << 31)
+#define V_CMD_GEN2(v) (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP (1 << 2)
+#define V_CMD_EOP(v) ((v) << 3)
+
+/*
+ * Command queue, receive buffer list, and response queue descriptors.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct cmdQ_e {
+ u32 addr_lo;
+ u32 len_gen;
+ u32 flags;
+ u32 addr_hi;
+};
+
+struct freelQ_e {
+ u32 addr_lo;
+ u32 len_gen;
+ u32 gen2;
+ u32 addr_hi;
+};
+
+struct respQ_e {
+ u32 Qsleeping : 4;
+ u32 Cmdq1CreditReturn : 5;
+ u32 Cmdq1DmaComplete : 5;
+ u32 Cmdq0CreditReturn : 5;
+ u32 Cmdq0DmaComplete : 5;
+ u32 FreelistQid : 2;
+ u32 CreditValid : 1;
+ u32 DataValid : 1;
+ u32 Offload : 1;
+ u32 Eop : 1;
+ u32 Sop : 1;
+ u32 GenerationBit : 1;
+ u32 BufferLength;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct cmdQ_e {
+ u32 len_gen;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 flags;
+};
+
+struct freelQ_e {
+ u32 len_gen;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 gen2;
+};
+
+struct respQ_e {
+ u32 BufferLength;
+ u32 GenerationBit : 1;
+ u32 Sop : 1;
+ u32 Eop : 1;
+ u32 Offload : 1;
+ u32 DataValid : 1;
+ u32 CreditValid : 1;
+ u32 FreelistQid : 2;
+ u32 Cmdq0DmaComplete : 5;
+ u32 Cmdq0CreditReturn : 5;
+ u32 Cmdq1DmaComplete : 5;
+ u32 Cmdq1CreditReturn : 5;
+ u32 Qsleeping : 4;
+} ;
+#endif
+
+/*
+ * SW Context Command and Freelist Queue Descriptors
+ */
+struct cmdQ_ce {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+struct freelQ_ce {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/*
+ * SW command, freelist and response rings
+ */
+struct cmdQ {
+ unsigned long status; /* HW DMA fetch status */
+ unsigned int in_use; /* # of in-use command descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int processed; /* total # of descs HW has processed */
+ unsigned int cleaned; /* total # of descs SW has reclaimed */
+ unsigned int stop_thres; /* SW TX queue suspend threshold */
+ u16 pidx; /* producer index (SW) */
+ u16 cidx; /* consumer index (HW) */
+ u8 genbit; /* current generation (=valid) bit */
+ u8 sop; /* is next entry start of packet? */
+ struct cmdQ_e *entries; /* HW command descriptor Q */
+ struct cmdQ_ce *centries; /* SW command context descriptor Q */
+ dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
+ spinlock_t lock; /* Lock to protect cmdQ enqueuing */
+};
+
+struct freelQ {
+ unsigned int credits; /* # of available RX buffers */
+ unsigned int size; /* free list capacity */
+ u16 pidx; /* producer index (SW) */
+ u16 cidx; /* consumer index (HW) */
+ u16 rx_buffer_size; /* Buffer size on this free list */
+ u16 dma_offset; /* DMA offset to align IP headers */
+ u16 recycleq_idx; /* skb recycle q to use */
+ u8 genbit; /* current generation (=valid) bit */
+ struct freelQ_e *entries; /* HW freelist descriptor Q */
+ struct freelQ_ce *centries; /* SW freelist context descriptor Q */
+ dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
+};
+
+struct respQ {
+ unsigned int credits; /* credits to be returned to SGE */
+ unsigned int size; /* # of response Q descriptors */
+ u16 cidx; /* consumer index (SW) */
+ u8 genbit; /* current generation(=valid) bit */
+ struct respQ_e *entries; /* HW response descriptor Q */
+ dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+ CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
+ CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
+};
+
+/* T204 TX SW scheduler */
+
+/* Per T204 TX port */
+struct sched_port {
+ unsigned int avail; /* available bits - quota */
+ unsigned int drain_bits_per_1024ns; /* drain rate */
+ unsigned int speed; /* drain rate, mbps */
+ unsigned int mtu; /* mtu size */
+ struct sk_buff_head skbq; /* pending skbs */
+};
+
+/* Per T204 device */
+struct sched {
+ ktime_t last_updated; /* last time quotas were computed */
+ unsigned int max_avail; /* max bits to be sent to any port */
+ unsigned int port; /* port index (round robin ports) */
+ unsigned int num; /* num skbs in per port queues */
+ struct sched_port p[MAX_NPORTS];
+ struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+};
+static void restart_sched(unsigned long);
+
+
+/*
+ * Main SGE data structure
+ *
+ * Interrupts are handled by a single CPU and it is likely that on a MP system
+ * the application is migrated to another CPU. In that scenario, we try to
+ * separate the RX(in irq context) and TX state in order to decrease memory
+ * contention.
+ */
+struct sge {
+ struct adapter *adapter; /* adapter backpointer */
+ struct net_device *netdev; /* netdevice backpointer */
+ struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
+ struct respQ respQ; /* response Q */
+ unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
+ unsigned int rx_pkt_pad; /* RX padding for L2 packets */
+ unsigned int jumbo_fl; /* jumbo freelist Q index */
+ unsigned int intrtimer_nres; /* no-resource interrupt timer */
+ unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
+ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+ struct timer_list espibug_timer;
+ unsigned long espibug_timeout;
+ struct sk_buff *espibug_skb[MAX_NPORTS];
+ u32 sge_control; /* shadow value of sge control reg */
+ struct sge_intr_counts stats;
+ struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
+ struct sched *tx_sched;
+ struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
+};
+
+static const u8 ch_mac_addr[ETH_ALEN] = {
+ 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
+};
+
+/*
+ * stop tasklet and free all pending skb's
+ */
+static void tx_sched_stop(struct sge *sge)
+{
+ struct sched *s = sge->tx_sched;
+ int i;
+
+ tasklet_kill(&s->sched_tsk);
+
+ for (i = 0; i < MAX_NPORTS; i++)
+ __skb_queue_purge(&s->p[s->port].skbq);
+}
+
+/*
+ * t1_sched_update_parms() is called when the MTU or link speed changes. It
+ * re-computes scheduler parameters to scope with the change.
+ */
+unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
+ unsigned int mtu, unsigned int speed)
+{
+ struct sched *s = sge->tx_sched;
+ struct sched_port *p = &s->p[port];
+ unsigned int max_avail_segs;
+
+ pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+ if (speed)
+ p->speed = speed;
+ if (mtu)
+ p->mtu = mtu;
+
+ if (speed || mtu) {
+ unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
+ do_div(drain, (p->mtu + 50) * 1000);
+ p->drain_bits_per_1024ns = (unsigned int) drain;
+
+ if (p->speed < 1000)
+ p->drain_bits_per_1024ns =
+ 90 * p->drain_bits_per_1024ns / 100;
+ }
+
+ if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
+ p->drain_bits_per_1024ns -= 16;
+ s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
+ max_avail_segs = max(1U, 4096 / (p->mtu - 40));
+ } else {
+ s->max_avail = 16384;
+ max_avail_segs = max(1U, 9000 / (p->mtu - 40));
+ }
+
+ pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
+ "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
+ p->speed, s->max_avail, max_avail_segs,
+ p->drain_bits_per_1024ns);
+
+ return max_avail_segs * (p->mtu - 40);
+}
+
+#if 0
+
+/*
+ * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
+ * data that can be pushed per port.
+ */
+void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
+{
+ struct sched *s = sge->tx_sched;
+ unsigned int i;
+
+ s->max_avail = val;
+ for (i = 0; i < MAX_NPORTS; i++)
+ t1_sched_update_parms(sge, i, 0, 0);
+}
+
+/*
+ * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
+ * is draining.
+ */
+void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
+ unsigned int val)
+{
+ struct sched *s = sge->tx_sched;
+ struct sched_port *p = &s->p[port];
+ p->drain_bits_per_1024ns = val * 1024 / 1000;
+ t1_sched_update_parms(sge, port, 0, 0);
+}
+
+#endif /* 0 */
+
+
+/*
+ * get_clock() implements a ns clock (see ktime_get)
+ */
+static inline ktime_t get_clock(void)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ return timespec_to_ktime(ts);
+}
+
+/*
+ * tx_sched_init() allocates resources and does basic initialization.
+ */
+static int tx_sched_init(struct sge *sge)
+{
+ struct sched *s;
+ int i;
+
+ s = kzalloc(sizeof (struct sched), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ pr_debug("tx_sched_init\n");
+ tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+ sge->tx_sched = s;
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ skb_queue_head_init(&s->p[i].skbq);
+ t1_sched_update_parms(sge, i, 1500, 1000);
+ }
+
+ return 0;
+}
+
+/*
+ * sched_update_avail() computes the delta since the last time it was called
+ * and updates the per port quota (number of bits that can be sent to the any
+ * port).
+ */
+static inline int sched_update_avail(struct sge *sge)
+{
+ struct sched *s = sge->tx_sched;
+ ktime_t now = get_clock();
+ unsigned int i;
+ long long delta_time_ns;
+
+ delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
+
+ pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
+ if (delta_time_ns < 15000)
+ return 0;
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ struct sched_port *p = &s->p[i];
+ unsigned int delta_avail;
+
+ delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
+ p->avail = min(p->avail + delta_avail, s->max_avail);
+ }
+
+ s->last_updated = now;
+
+ return 1;
+}
+
+/*
+ * sched_skb() is called from two different places. In the tx path, any
+ * packet generating load on an output port will call sched_skb()
+ * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
+ * context (skb == NULL).
+ * The scheduler only returns a skb (which will then be sent) if the
+ * length of the skb is <= the current quota of the output port.
+ */
+static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
+ unsigned int credits)
+{
+ struct sched *s = sge->tx_sched;
+ struct sk_buff_head *skbq;
+ unsigned int i, len, update = 1;
+
+ pr_debug("sched_skb %p\n", skb);
+ if (!skb) {
+ if (!s->num)
+ return NULL;
+ } else {
+ skbq = &s->p[skb->dev->if_port].skbq;
+ __skb_queue_tail(skbq, skb);
+ s->num++;
+ skb = NULL;
+ }
+
+ if (credits < MAX_SKB_FRAGS + 1)
+ goto out;
+
+again:
+ for (i = 0; i < MAX_NPORTS; i++) {
+ s->port = (s->port + 1) & (MAX_NPORTS - 1);
+ skbq = &s->p[s->port].skbq;
+
+ skb = skb_peek(skbq);
+
+ if (!skb)
+ continue;
+
+ len = skb->len;
+ if (len <= s->p[s->port].avail) {
+ s->p[s->port].avail -= len;
+ s->num--;
+ __skb_unlink(skb, skbq);
+ goto out;
+ }
+ skb = NULL;
+ }
+
+ if (update-- && sched_update_avail(sge))
+ goto again;
+
+out:
+ /* If there are more pending skbs, we use the hardware to schedule us
+ * again.
+ */
+ if (s->num && !skb) {
+ struct cmdQ *q = &sge->cmdQ[0];
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+ }
+ }
+ pr_debug("sched_skb ret %p\n", skb);
+
+ return skb;
+}
+
+/*
+ * PIO to indicate that memory mapped Q contains valid descriptor(s).
+ */
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
+{
+ wmb();
+ writel(val, adapter->regs + A_SG_DOORBELL);
+}
+
+/*
+ * Frees all RX buffers on the freelist Q. The caller must make sure that
+ * the SGE is turned off before calling this function.
+ */
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
+{
+ unsigned int cidx = q->cidx;
+
+ while (q->credits--) {
+ struct freelQ_ce *ce = &q->centries[cidx];
+
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(ce->skb);
+ ce->skb = NULL;
+ if (++cidx == q->size)
+ cidx = 0;
+ }
+}
+
+/*
+ * Free RX free list and response queue resources.
+ */
+static void free_rx_resources(struct sge *sge)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ if (sge->respQ.entries) {
+ size = sizeof(struct respQ_e) * sge->respQ.size;
+ pci_free_consistent(pdev, size, sge->respQ.entries,
+ sge->respQ.dma_addr);
+ }
+
+ for (i = 0; i < SGE_FREELQ_N; i++) {
+ struct freelQ *q = &sge->freelQ[i];
+
+ if (q->centries) {
+ free_freelQ_buffers(pdev, q);
+ kfree(q->centries);
+ }
+ if (q->entries) {
+ size = sizeof(struct freelQ_e) * q->size;
+ pci_free_consistent(pdev, size, q->entries,
+ q->dma_addr);
+ }
+ }
+}
+
+/*
+ * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
+ * response queue.
+ */
+static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ for (i = 0; i < SGE_FREELQ_N; i++) {
+ struct freelQ *q = &sge->freelQ[i];
+
+ q->genbit = 1;
+ q->size = p->freelQ_size[i];
+ q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+ size = sizeof(struct freelQ_e) * q->size;
+ q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ if (!q->entries)
+ goto err_no_mem;
+
+ size = sizeof(struct freelQ_ce) * q->size;
+ q->centries = kzalloc(size, GFP_KERNEL);
+ if (!q->centries)
+ goto err_no_mem;
+ }
+
+ /*
+ * Calculate the buffer sizes for the two free lists. FL0 accommodates
+ * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
+ * including all the sk_buff overhead.
+ *
+ * Note: For T2 FL0 and FL1 are reversed.
+ */
+ sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
+ sizeof(struct cpl_rx_data) +
+ sge->freelQ[!sge->jumbo_fl].dma_offset;
+
+ size = (16 * 1024) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
+
+ /*
+ * Setup which skb recycle Q should be used when recycling buffers from
+ * each free list.
+ */
+ sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+ sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
+ sge->respQ.genbit = 1;
+ sge->respQ.size = SGE_RESPQ_E_N;
+ sge->respQ.credits = 0;
+ size = sizeof(struct respQ_e) * sge->respQ.size;
+ sge->respQ.entries =
+ pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+ if (!sge->respQ.entries)
+ goto err_no_mem;
+ return 0;
+
+err_no_mem:
+ free_rx_resources(sge);
+ return -ENOMEM;
+}
+
+/*
+ * Reclaims n TX descriptors and frees the buffers associated with them.
+ */
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
+{
+ struct cmdQ_ce *ce;
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int cidx = q->cidx;
+
+ q->in_use -= n;
+ ce = &q->centries[cidx];
+ while (n--) {
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_TODEVICE);
+ if (q->sop)
+ q->sop = 0;
+ }
+ if (ce->skb) {
+ dev_kfree_skb_any(ce->skb);
+ q->sop = 1;
+ }
+ ce++;
+ if (++cidx == q->size) {
+ cidx = 0;
+ ce = q->centries;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/*
+ * Free TX resources.
+ *
+ * Assumes that SGE is stopped and all interrupts are disabled.
+ */
+static void free_tx_resources(struct sge *sge)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ for (i = 0; i < SGE_CMDQ_N; i++) {
+ struct cmdQ *q = &sge->cmdQ[i];
+
+ if (q->centries) {
+ if (q->in_use)
+ free_cmdQ_buffers(sge, q, q->in_use);
+ kfree(q->centries);
+ }
+ if (q->entries) {
+ size = sizeof(struct cmdQ_e) * q->size;
+ pci_free_consistent(pdev, size, q->entries,
+ q->dma_addr);
+ }
+ }
+}
+
+/*
+ * Allocates basic TX resources, consisting of memory mapped command Qs.
+ */
+static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ for (i = 0; i < SGE_CMDQ_N; i++) {
+ struct cmdQ *q = &sge->cmdQ[i];
+
+ q->genbit = 1;
+ q->sop = 1;
+ q->size = p->cmdQ_size[i];
+ q->in_use = 0;
+ q->status = 0;
+ q->processed = q->cleaned = 0;
+ q->stop_thres = 0;
+ spin_lock_init(&q->lock);
+ size = sizeof(struct cmdQ_e) * q->size;
+ q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ if (!q->entries)
+ goto err_no_mem;
+
+ size = sizeof(struct cmdQ_ce) * q->size;
+ q->centries = kzalloc(size, GFP_KERNEL);
+ if (!q->centries)
+ goto err_no_mem;
+ }
+
+ /*
+ * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+ * only. For queue 0 set the stop threshold so we can handle one more
+ * packet from each port, plus reserve an additional 24 entries for
+ * Ethernet packets only. Queue 1 never suspends nor do we reserve
+ * space for Ethernet packets.
+ */
+ sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+ (MAX_SKB_FRAGS + 1);
+ return 0;
+
+err_no_mem:
+ free_tx_resources(sge);
+ return -ENOMEM;
+}
+
+static inline void setup_ring_params(struct adapter *adapter, u64 addr,
+ u32 size, int base_reg_lo,
+ int base_reg_hi, int size_reg)
+{
+ writel((u32)addr, adapter->regs + base_reg_lo);
+ writel(addr >> 32, adapter->regs + base_reg_hi);
+ writel(size, adapter->regs + size_reg);
+}
+
+/*
+ * Enable/disable VLAN acceleration.
+ */
+void t1_vlan_mode(struct adapter *adapter, u32 features)
+{
+ struct sge *sge = adapter->sge;
+
+ if (features & NETIF_F_HW_VLAN_RX)
+ sge->sge_control |= F_VLAN_XTRACT;
+ else
+ sge->sge_control &= ~F_VLAN_XTRACT;
+ if (adapter->open_device_map) {
+ writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+ readl(adapter->regs + A_SG_CONTROL); /* flush */
+ }
+}
+
+/*
+ * Programs the various SGE registers. However, the engine is not yet enabled,
+ * but sge->sge_control is setup and ready to go.
+ */
+static void configure_sge(struct sge *sge, struct sge_params *p)
+{
+ struct adapter *ap = sge->adapter;
+
+ writel(0, ap->regs + A_SG_CONTROL);
+ setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
+ A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
+ setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
+ A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
+ setup_ring_params(ap, sge->freelQ[0].dma_addr,
+ sge->freelQ[0].size, A_SG_FL0BASELWR,
+ A_SG_FL0BASEUPR, A_SG_FL0SIZE);
+ setup_ring_params(ap, sge->freelQ[1].dma_addr,
+ sge->freelQ[1].size, A_SG_FL1BASELWR,
+ A_SG_FL1BASEUPR, A_SG_FL1SIZE);
+
+ /* The threshold comparison uses <. */
+ writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
+
+ setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+ A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+ writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
+
+ sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
+ F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
+ V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+ V_RX_PKT_OFFSET(sge->rx_pkt_pad);
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ sge->sge_control |= F_ENABLE_BIG_ENDIAN;
+#endif
+
+ /* Initialize no-resource timer */
+ sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+ t1_sge_set_coalesce_params(sge, p);
+}
+
+/*
+ * Return the payload capacity of the jumbo free-list buffers.
+ */
+static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
+{
+ return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
+ sge->freelQ[sge->jumbo_fl].dma_offset -
+ sizeof(struct cpl_rx_data);
+}
+
+/*
+ * Frees all SGE related resources and the sge structure itself
+ */
+void t1_sge_destroy(struct sge *sge)
+{
+ int i;
+
+ for_each_port(sge->adapter, i)
+ free_percpu(sge->port_stats[i]);
+
+ kfree(sge->tx_sched);
+ free_tx_resources(sge);
+ free_rx_resources(sge);
+ kfree(sge);
+}
+
+/*
+ * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
+ * context Q) until the Q is full or alloc_skb fails.
+ *
+ * It is possible that the generation bits already match, indicating that the
+ * buffer is already valid and nothing needs to be done. This happens when we
+ * copied a received buffer into a new sk_buff during the interrupt processing.
+ *
+ * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
+ * we specify a RX_OFFSET in order to make sure that the IP header is 4B
+ * aligned.
+ */
+static void refill_free_list(struct sge *sge, struct freelQ *q)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ struct freelQ_ce *ce = &q->centries[q->pidx];
+ struct freelQ_e *e = &q->entries[q->pidx];
+ unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
+
+ while (q->credits < q->size) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ skb_reserve(skb, q->dma_offset);
+ mapping = pci_map_single(pdev, skb->data, dma_len,
+ PCI_DMA_FROMDEVICE);
+ skb_reserve(skb, sge->rx_pkt_pad);
+
+ ce->skb = skb;
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
+ e->addr_lo = (u32)mapping;
+ e->addr_hi = (u64)mapping >> 32;
+ e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+ wmb();
+ e->gen2 = V_CMD_GEN2(q->genbit);
+
+ e++;
+ ce++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->genbit ^= 1;
+ ce = q->centries;
+ e = q->entries;
+ }
+ q->credits++;
+ }
+}
+
+/*
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
+ */
+static void freelQs_empty(struct sge *sge)
+{
+ struct adapter *adapter = sge->adapter;
+ u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
+ u32 irqholdoff_reg;
+
+ refill_free_list(sge, &sge->freelQ[0]);
+ refill_free_list(sge, &sge->freelQ[1]);
+
+ if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+ sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
+ irq_reg |= F_FL_EXHAUSTED;
+ irqholdoff_reg = sge->fixed_intrtimer;
+ } else {
+ /* Clear the F_FL_EXHAUSTED interrupts for now */
+ irq_reg &= ~F_FL_EXHAUSTED;
+ irqholdoff_reg = sge->intrtimer_nres;
+ }
+ writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+ writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
+
+ /* We reenable the Qs to force a freelist GTS interrupt later */
+ doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+}
+
+#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
+#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
+ F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+
+/*
+ * Disable SGE Interrupts
+ */
+void t1_sge_intr_disable(struct sge *sge)
+{
+ u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+ writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+ writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
+}
+
+/*
+ * Enable SGE interrupts.
+ */
+void t1_sge_intr_enable(struct sge *sge)
+{
+ u32 en = SGE_INT_ENABLE;
+ u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+ if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
+ en &= ~F_PACKET_TOO_BIG;
+ writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+ writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+}
+
+/*
+ * Clear SGE interrupts.
+ */
+void t1_sge_intr_clear(struct sge *sge)
+{
+ writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+ writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
+}
+
+/*
+ * SGE 'Error' interrupt handler
+ */
+int t1_sge_intr_error_handler(struct sge *sge)
+{
+ struct adapter *adapter = sge->adapter;
+ u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+
+ if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
+ cause &= ~F_PACKET_TOO_BIG;
+ if (cause & F_RESPQ_EXHAUSTED)
+ sge->stats.respQ_empty++;
+ if (cause & F_RESPQ_OVERFLOW) {
+ sge->stats.respQ_overflow++;
+ pr_alert("%s: SGE response queue overflow\n",
+ adapter->name);
+ }
+ if (cause & F_FL_EXHAUSTED) {
+ sge->stats.freelistQ_empty++;
+ freelQs_empty(sge);
+ }
+ if (cause & F_PACKET_TOO_BIG) {
+ sge->stats.pkt_too_big++;
+ pr_alert("%s: SGE max packet size exceeded\n",
+ adapter->name);
+ }
+ if (cause & F_PACKET_MISMATCH) {
+ sge->stats.pkt_mismatch++;
+ pr_alert("%s: SGE packet mismatch\n", adapter->name);
+ }
+ if (cause & SGE_INT_FATAL)
+ t1_fatal_err(adapter);
+
+ writel(cause, adapter->regs + A_SG_INT_CAUSE);
+ return 0;
+}
+
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
+{
+ return &sge->stats;
+}
+
+void t1_sge_get_port_stats(const struct sge *sge, int port,
+ struct sge_port_stats *ss)
+{
+ int cpu;
+
+ memset(ss, 0, sizeof(*ss));
+ for_each_possible_cpu(cpu) {
+ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+ ss->rx_cso_good += st->rx_cso_good;
+ ss->tx_cso += st->tx_cso;
+ ss->tx_tso += st->tx_tso;
+ ss->tx_need_hdrroom += st->tx_need_hdrroom;
+ ss->vlan_xtract += st->vlan_xtract;
+ ss->vlan_insert += st->vlan_insert;
+ }
+}
+
+/**
+ * recycle_fl_buf - recycle a free list buffer
+ * @fl: the free list
+ * @idx: index of buffer to recycle
+ *
+ * Recycles the specified buffer on the given free list by adding it at
+ * the next available slot on the list.
+ */
+static void recycle_fl_buf(struct freelQ *fl, int idx)
+{
+ struct freelQ_e *from = &fl->entries[idx];
+ struct freelQ_e *to = &fl->entries[fl->pidx];
+
+ fl->centries[fl->pidx] = fl->centries[idx];
+ to->addr_lo = from->addr_lo;
+ to->addr_hi = from->addr_hi;
+ to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+ wmb();
+ to->gen2 = V_CMD_GEN2(fl->genbit);
+ fl->credits++;
+
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ fl->genbit ^= 1;
+ }
+}
+
+static int copybreak __read_mostly = 256;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+/**
+ * get_packet - return the next ingress packet buffer
+ * @pdev: the PCI device that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the actual packet length, excluding any SGE padding
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+ struct freelQ *fl, unsigned int len)
+{
+ struct sk_buff *skb;
+ const struct freelQ_ce *ce = &fl->centries[fl->cidx];
+
+ if (len < copybreak) {
+ skb = alloc_skb(len + 2, GFP_ATOMIC);
+ if (!skb)
+ goto use_orig_buf;
+
+ skb_reserve(skb, 2); /* align IP header */
+ skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(pdev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(ce->skb, skb->data, len);
+ pci_dma_sync_single_for_device(pdev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_FROMDEVICE);
+ recycle_fl_buf(fl, fl->cidx);
+ return skb;
+ }
+
+use_orig_buf:
+ if (fl->credits < 2) {
+ recycle_fl_buf(fl, fl->cidx);
+ return NULL;
+ }
+
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ skb = ce->skb;
+ prefetch(skb->data);
+
+ skb_put(skb, len);
+ return skb;
+}
+
+/**
+ * unexpected_offload - handle an unexpected offload packet
+ * @adapter: the adapter
+ * @fl: the free list that received the packet
+ *
+ * Called when we receive an unexpected offload packet (e.g., the TOE
+ * function is disabled or the card is a NIC). Prints a message and
+ * recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+ struct freelQ_ce *ce = &fl->centries[fl->cidx];
+ struct sk_buff *skb = ce->skb;
+
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pr_err("%s: unexpected offload packet, cmd %u\n",
+ adapter->name, *skb->data);
+ recycle_fl_buf(fl, fl->cidx);
+}
+
+/*
+ * T1/T2 SGE limits the maximum DMA size per TX descriptor to
+ * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
+ * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
+ * Note that the *_large_page_tx_descs stuff will be optimized out when
+ * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
+ *
+ * compute_large_page_descs() computes how many additional descriptors are
+ * required to break down the stack's request.
+ */
+static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
+{
+ unsigned int count = 0;
+
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int i, len = skb_headlen(skb);
+ while (len > SGE_TX_DESC_MAX_PLEN) {
+ count++;
+ len -= SGE_TX_DESC_MAX_PLEN;
+ }
+ for (i = 0; nfrags--; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
+ while (len > SGE_TX_DESC_MAX_PLEN) {
+ count++;
+ len -= SGE_TX_DESC_MAX_PLEN;
+ }
+ }
+ }
+ return count;
+}
+
+/*
+ * Write a cmdQ entry.
+ *
+ * Since this function writes the 'flags' field, it must not be used to
+ * write the first cmdQ entry.
+ */
+static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
+ unsigned int len, unsigned int gen,
+ unsigned int eop)
+{
+ BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
+
+ e->addr_lo = (u32)mapping;
+ e->addr_hi = (u64)mapping >> 32;
+ e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
+ e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
+}
+
+/*
+ * See comment for previous function.
+ *
+ * write_tx_descs_large_page() writes additional SGE tx descriptors if
+ * *desc_len exceeds HW's capability.
+ */
+static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
+ struct cmdQ_e **e,
+ struct cmdQ_ce **ce,
+ unsigned int *gen,
+ dma_addr_t *desc_mapping,
+ unsigned int *desc_len,
+ unsigned int nfrags,
+ struct cmdQ *q)
+{
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+ struct cmdQ_e *e1 = *e;
+ struct cmdQ_ce *ce1 = *ce;
+
+ while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
+ *desc_len -= SGE_TX_DESC_MAX_PLEN;
+ write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
+ *gen, nfrags == 0 && *desc_len == 0);
+ ce1->skb = NULL;
+ dma_unmap_len_set(ce1, dma_len, 0);
+ *desc_mapping += SGE_TX_DESC_MAX_PLEN;
+ if (*desc_len) {
+ ce1++;
+ e1++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ *gen ^= 1;
+ ce1 = q->centries;
+ e1 = q->entries;
+ }
+ }
+ }
+ *e = e1;
+ *ce = ce1;
+ }
+ return pidx;
+}
+
+/*
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
+ */
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+ unsigned int pidx, unsigned int gen,
+ struct cmdQ *q)
+{
+ dma_addr_t mapping, desc_mapping;
+ struct cmdQ_e *e, *e1;
+ struct cmdQ_ce *ce;
+ unsigned int i, flags, first_desc_len, desc_len,
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ e = e1 = &q->entries[pidx];
+ ce = &q->centries[pidx];
+
+ mapping = pci_map_single(adapter->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ desc_mapping = mapping;
+ desc_len = skb_headlen(skb);
+
+ flags = F_CMD_DATAVALID | F_CMD_SOP |
+ V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
+ V_CMD_GEN2(gen);
+ first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
+ desc_len : SGE_TX_DESC_MAX_PLEN;
+ e->addr_lo = (u32)desc_mapping;
+ e->addr_hi = (u64)desc_mapping >> 32;
+ e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
+ ce->skb = NULL;
+ dma_unmap_len_set(ce, dma_len, 0);
+
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
+ desc_len > SGE_TX_DESC_MAX_PLEN) {
+ desc_mapping += first_desc_len;
+ desc_len -= first_desc_len;
+ e1++;
+ ce++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ e1 = q->entries;
+ ce = q->centries;
+ }
+ pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+ &desc_mapping, &desc_len,
+ nfrags, q);
+
+ if (likely(desc_len))
+ write_tx_desc(e1, desc_mapping, desc_len, gen,
+ nfrags == 0);
+ }
+
+ ce->skb = NULL;
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
+
+ for (i = 0; nfrags--; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ e1++;
+ ce++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ e1 = q->entries;
+ ce = q->centries;
+ }
+
+ mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ desc_mapping = mapping;
+ desc_len = skb_frag_size(frag);
+
+ pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+ &desc_mapping, &desc_len,
+ nfrags, q);
+ if (likely(desc_len))
+ write_tx_desc(e1, desc_mapping, desc_len, gen,
+ nfrags == 0);
+ ce->skb = NULL;
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
+ }
+ ce->skb = skb;
+ wmb();
+ e->flags = flags;
+}
+
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ if (reclaim) {
+ pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
+ q->processed, q->cleaned);
+ free_cmdQ_buffers(sge, q, reclaim);
+ q->cleaned += reclaim;
+ }
+}
+
+/*
+ * Called from tasklet. Checks the scheduler for any
+ * pending skbs that can be sent.
+ */
+static void restart_sched(unsigned long arg)
+{
+ struct sge *sge = (struct sge *) arg;
+ struct adapter *adapter = sge->adapter;
+ struct cmdQ *q = &sge->cmdQ[0];
+ struct sk_buff *skb;
+ unsigned int credits, queued_skb = 0;
+
+ spin_lock(&q->lock);
+ reclaim_completed_tx(sge, q);
+
+ credits = q->size - q->in_use;
+ pr_debug("restart_sched credits=%d\n", credits);
+ while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
+ unsigned int genbit, pidx, count;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+ q->in_use += count;
+ genbit = q->genbit;
+ pidx = q->pidx;
+ q->pidx += count;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->genbit ^= 1;
+ }
+ write_tx_descs(adapter, skb, pidx, genbit, q);
+ credits = q->size - q->in_use;
+ queued_skb = 1;
+ }
+
+ if (queued_skb) {
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ }
+ spin_unlock(&q->lock);
+}
+
+/**
+ * sge_rx - process an ingress ethernet packet
+ * @sge: the sge structure
+ * @fl: the free list that contains the packet buffer
+ * @len: the packet length
+ *
+ * Process an ingress ethernet pakcet and deliver it to the stack.
+ */
+static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+{
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *p;
+ struct adapter *adapter = sge->adapter;
+ struct sge_port_stats *st;
+ struct net_device *dev;
+
+ skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
+ if (unlikely(!skb)) {
+ sge->stats.rx_drops++;
+ return;
+ }
+
+ p = (const struct cpl_rx_pkt *) skb->data;
+ if (p->iff >= adapter->params.nports) {
+ kfree_skb(skb);
+ return;
+ }
+ __skb_pull(skb, sizeof(*p));
+
+ st = this_cpu_ptr(sge->port_stats[p->iff]);
+ dev = adapter->port[p->iff].dev;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
+ skb->protocol == htons(ETH_P_IP) &&
+ (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+ ++st->rx_cso_good;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb_checksum_none_assert(skb);
+
+ if (p->vlan_valid) {
+ st->vlan_xtract++;
+ __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ }
+ netif_receive_skb(skb);
+}
+
+/*
+ * Returns true if a command queue has enough available descriptors that
+ * we can resume Tx operation after temporarily disabling its packet queue.
+ */
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
+{
+ unsigned int r = q->processed - q->cleaned;
+
+ return q->in_use - r < (q->size >> 1);
+}
+
+/*
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
+ */
+static void restart_tx_queues(struct sge *sge)
+{
+ struct adapter *adap = sge->adapter;
+ int i;
+
+ if (!enough_free_Tx_descs(&sge->cmdQ[0]))
+ return;
+
+ for_each_port(adap, i) {
+ struct net_device *nd = adap->port[i].dev;
+
+ if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
+ netif_running(nd)) {
+ sge->stats.cmdQ_restarted[2]++;
+ netif_wake_queue(nd);
+ }
+ }
+}
+
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter,
+ unsigned int flags,
+ unsigned int pr0)
+{
+ struct sge *sge = adapter->sge;
+ struct cmdQ *cmdq = &sge->cmdQ[0];
+
+ cmdq->processed += pr0;
+ if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
+ freelQs_empty(sge);
+ flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
+ }
+ if (flags & F_CMDQ0_ENABLE) {
+ clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+
+ if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+ !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+ set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ if (sge->tx_sched)
+ tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+
+ flags &= ~F_CMDQ0_ENABLE;
+ }
+
+ if (unlikely(sge->stopped_tx_queues != 0))
+ restart_tx_queues(sge);
+
+ return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget. Returns the number of
+ * responses processed. A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+ struct sge *sge = adapter->sge;
+ struct respQ *q = &sge->respQ;
+ struct respQ_e *e = &q->entries[q->cidx];
+ int done = 0;
+ unsigned int flags = 0;
+ unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+ while (done < budget && e->GenerationBit == q->genbit) {
+ flags |= e->Qsleeping;
+
+ cmdq_processed[0] += e->Cmdq0CreditReturn;
+ cmdq_processed[1] += e->Cmdq1CreditReturn;
+
+ /* We batch updates to the TX side to avoid cacheline
+ * ping-pong of TX state information on MP where the sender
+ * might run on a different CPU than this function...
+ */
+ if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
+ flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+ cmdq_processed[0] = 0;
+ }
+
+ if (unlikely(cmdq_processed[1] > 16)) {
+ sge->cmdQ[1].processed += cmdq_processed[1];
+ cmdq_processed[1] = 0;
+ }
+
+ if (likely(e->DataValid)) {
+ struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+ BUG_ON(!e->Sop || !e->Eop);
+ if (unlikely(e->Offload))
+ unexpected_offload(adapter, fl);
+ else
+ sge_rx(sge, fl, e->BufferLength);
+
+ ++done;
+
+ /*
+ * Note: this depends on each packet consuming a
+ * single free-list buffer; cf. the BUG above.
+ */
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ prefetch(fl->centries[fl->cidx].skb);
+
+ if (unlikely(--fl->credits <
+ fl->size - SGE_FREEL_REFILL_THRESH))
+ refill_free_list(sge, fl);
+ } else
+ sge->stats.pure_rsps++;
+
+ e++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->genbit ^= 1;
+ e = q->entries;
+ }
+ prefetch(e);
+
+ if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+ writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+ q->credits = 0;
+ }
+ }
+
+ flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+ sge->cmdQ[1].processed += cmdq_processed[1];
+
+ return done;
+}
+
+static inline int responses_pending(const struct adapter *adapter)
+{
+ const struct respQ *Q = &adapter->sge->respQ;
+ const struct respQ_e *e = &Q->entries[Q->cidx];
+
+ return e->GenerationBit == Q->genbit;
+}
+
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses. Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context. The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response. Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter)
+{
+ struct sge *sge = adapter->sge;
+ struct respQ *q = &sge->respQ;
+ struct respQ_e *e = &q->entries[q->cidx];
+ const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+ unsigned int flags = 0;
+ unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+ prefetch(fl->centries[fl->cidx].skb);
+ if (e->DataValid)
+ return 1;
+
+ do {
+ flags |= e->Qsleeping;
+
+ cmdq_processed[0] += e->Cmdq0CreditReturn;
+ cmdq_processed[1] += e->Cmdq1CreditReturn;
+
+ e++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->genbit ^= 1;
+ e = q->entries;
+ }
+ prefetch(e);
+
+ if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+ writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+ q->credits = 0;
+ }
+ sge->stats.pure_rsps++;
+ } while (e->GenerationBit == q->genbit && !e->DataValid);
+
+ flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+ sge->cmdQ[1].processed += cmdq_processed[1];
+
+ return e->GenerationBit == q->genbit;
+}
+
+/*
+ * Handler for new data events when using NAPI. This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
+ */
+int t1_poll(struct napi_struct *napi, int budget)
+{
+ struct adapter *adapter = container_of(napi, struct adapter, napi);
+ int work_done = process_responses(adapter, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ writel(adapter->sge->respQ.cidx,
+ adapter->regs + A_SG_SLEEPING);
+ }
+ return work_done;
+}
+
+irqreturn_t t1_interrupt(int irq, void *data)
+{
+ struct adapter *adapter = data;
+ struct sge *sge = adapter->sge;
+ int handled;
+
+ if (likely(responses_pending(adapter))) {
+ writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ if (process_pure_responses(adapter))
+ __napi_schedule(&adapter->napi);
+ else {
+ /* no data, no NAPI needed */
+ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+ /* undo schedule_prep */
+ napi_enable(&adapter->napi);
+ }
+ }
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&adapter->async_lock);
+ handled = t1_slow_intr_handler(adapter);
+ spin_unlock(&adapter->async_lock);
+
+ if (!handled)
+ sge->stats.unhandled_irqs++;
+
+ return IRQ_RETVAL(handled != 0);
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjunction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+ unsigned int qid, struct net_device *dev)
+{
+ struct sge *sge = adapter->sge;
+ struct cmdQ *q = &sge->cmdQ[qid];
+ unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+ if (!spin_trylock(&q->lock))
+ return NETDEV_TX_LOCKED;
+
+ reclaim_completed_tx(sge, q);
+
+ pidx = q->pidx;
+ credits = q->size - q->in_use;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+
+ /* Ethernet packet */
+ if (unlikely(credits < count)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ set_bit(dev->if_port, &sge->stopped_tx_queues);
+ sge->stats.cmdQ_full[2]++;
+ pr_err("%s: Tx ring full while queue awake!\n",
+ adapter->name);
+ }
+ spin_unlock(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits - count < q->stop_thres)) {
+ netif_stop_queue(dev);
+ set_bit(dev->if_port, &sge->stopped_tx_queues);
+ sge->stats.cmdQ_full[2]++;
+ }
+
+ /* T204 cmdQ0 skbs that are destined for a certain port have to go
+ * through the scheduler.
+ */
+ if (sge->tx_sched && !qid && skb->dev) {
+use_sched:
+ use_sched_skb = 1;
+ /* Note that the scheduler might return a different skb than
+ * the one passed in.
+ */
+ skb = sched_skb(sge, skb, credits);
+ if (!skb) {
+ spin_unlock(&q->lock);
+ return NETDEV_TX_OK;
+ }
+ pidx = q->pidx;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+ }
+
+ q->in_use += count;
+ genbit = q->genbit;
+ pidx = q->pidx;
+ q->pidx += count;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->genbit ^= 1;
+ }
+ spin_unlock(&q->lock);
+
+ write_tx_descs(adapter, skb, pidx, genbit, q);
+
+ /*
+ * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
+ * the doorbell if the Q is asleep. There is a natural race, where
+ * the hardware is going to sleep just after we checked, however,
+ * then the interrupt handler will detect the outstanding TX packet
+ * and ring the doorbell for us.
+ */
+ if (qid)
+ doorbell_pio(adapter, F_CMDQ1_ENABLE);
+ else {
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ }
+
+ if (use_sched_skb) {
+ if (spin_trylock(&q->lock)) {
+ credits = q->size - q->in_use;
+ skb = NULL;
+ goto use_sched;
+ }
+ }
+ return NETDEV_TX_OK;
+}
+
+#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
+
+/*
+ * eth_hdr_len - return the length of an Ethernet header
+ * @data: pointer to the start of the Ethernet header
+ *
+ * Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+ const struct ethhdr *e = data;
+
+ return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+/*
+ * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
+ */
+netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct sge *sge = adapter->sge;
+ struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
+ struct cpl_tx_pkt *cpl;
+ struct sk_buff *orig_skb = skb;
+ int ret;
+
+ if (skb->protocol == htons(ETH_P_CPL5))
+ goto send;
+
+ /*
+ * We are using a non-standard hard_header_len.
+ * Allocate more header room in the rare cases it is not big enough.
+ */
+ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+ skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
+ ++st->tx_need_hdrroom;
+ dev_kfree_skb_any(orig_skb);
+ if (!skb)
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr;
+
+ ++st->tx_tso;
+
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
+ CPL_ETH_II : CPL_ETH_II_VLAN;
+
+ hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+ hdr->opcode = CPL_TX_PKT_LSO;
+ hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
+ hdr->ip_hdr_words = ip_hdr(skb)->ihl;
+ hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
+ hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
+ skb_shinfo(skb)->gso_size));
+ hdr->len = htonl(skb->len - sizeof(*hdr));
+ cpl = (struct cpl_tx_pkt *)hdr;
+ } else {
+ /*
+ * Packets shorter than ETH_HLEN can break the MAC, drop them
+ * early. Also, we may get oversized packets because some
+ * parts of the kernel don't handle our unusual hard_header_len
+ * right, drop those too.
+ */
+ if (unlikely(skb->len < ETH_HLEN ||
+ skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+ pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
+ skb->len, eth_hdr_len(skb->data), dev->mtu);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ if (unlikely(skb_checksum_help(skb))) {
+ pr_debug("%s: unable to do udp checksum\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* Hmmm, assuming to catch the gratious arp... and we'll use
+ * it to flush out stuck espi packets...
+ */
+ if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
+ if (skb->protocol == htons(ETH_P_ARP) &&
+ arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
+ adapter->sge->espibug_skb[dev->if_port] = skb;
+ /* We want to re-use this skb later. We
+ * simply bump the reference count and it
+ * will not be freed...
+ */
+ skb = skb_get(skb);
+ }
+ }
+
+ cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+ cpl->opcode = CPL_TX_PKT;
+ cpl->ip_csum_dis = 1; /* SW calculates IP csum */
+ cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
+ /* the length field isn't used so don't bother setting it */
+
+ st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
+ }
+ cpl->iff = dev->if_port;
+
+ if (vlan_tx_tag_present(skb)) {
+ cpl->vlan_valid = 1;
+ cpl->vlan = htons(vlan_tx_tag_get(skb));
+ st->vlan_insert++;
+ } else
+ cpl->vlan_valid = 0;
+
+send:
+ ret = t1_sge_tx(skb, adapter, 0, dev);
+
+ /* If transmit busy, and we reallocated skb's due to headroom limit,
+ * then silently discard to avoid leak.
+ */
+ if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
+ dev_kfree_skb_any(skb);
+ ret = NETDEV_TX_OK;
+ }
+ return ret;
+}
+
+/*
+ * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+ int i;
+ struct sge *sge = (struct sge *)data;
+
+ for (i = 0; i < SGE_CMDQ_N; ++i) {
+ struct cmdQ *q = &sge->cmdQ[i];
+
+ if (!spin_trylock(&q->lock))
+ continue;
+
+ reclaim_completed_tx(sge, q);
+ if (i == 0 && q->in_use) { /* flush pending credits */
+ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+ }
+ spin_unlock(&q->lock);
+ }
+ mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+ sge->fixed_intrtimer = p->rx_coalesce_usecs *
+ core_ticks_per_usec(sge->adapter);
+ writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
+ return 0;
+}
+
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
+{
+ if (alloc_rx_resources(sge, p))
+ return -ENOMEM;
+ if (alloc_tx_resources(sge, p)) {
+ free_rx_resources(sge);
+ return -ENOMEM;
+ }
+ configure_sge(sge, p);
+
+ /*
+ * Now that we have sized the free lists calculate the payload
+ * capacity of the large buffers. Other parts of the driver use
+ * this to set the max offload coalescing size so that RX packets
+ * do not overflow our large buffers.
+ */
+ p->large_buf_capacity = jumbo_payload_capacity(sge);
+ return 0;
+}
+
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+ int i;
+ writel(0, sge->adapter->regs + A_SG_CONTROL);
+ readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+ if (is_T2(sge->adapter))
+ del_timer_sync(&sge->espibug_timer);
+
+ del_timer_sync(&sge->tx_reclaim_timer);
+ if (sge->tx_sched)
+ tx_sched_stop(sge);
+
+ for (i = 0; i < MAX_NPORTS; i++)
+ kfree_skb(sge->espibug_skb[i]);
+}
+
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
+{
+ refill_free_list(sge, &sge->freelQ[0]);
+ refill_free_list(sge, &sge->freelQ[1]);
+
+ writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+ doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+ readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+ mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+ if (is_T2(sge->adapter))
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround_t204(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *sge = adapter->sge;
+ unsigned int nports = adapter->params.nports;
+ u32 seop[MAX_NPORTS];
+
+ if (adapter->open_device_map & PORT_MASK) {
+ int i;
+
+ if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
+ return;
+
+ for (i = 0; i < nports; i++) {
+ struct sk_buff *skb = sge->espibug_skb[i];
+
+ if (!netif_running(adapter->port[i].dev) ||
+ netif_queue_stopped(adapter->port[i].dev) ||
+ !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
+ continue;
+
+ if (!skb->cb[0]) {
+ skb_copy_to_linear_data_offset(skb,
+ sizeof(struct cpl_tx_pkt),
+ ch_mac_addr,
+ ETH_ALEN);
+ skb_copy_to_linear_data_offset(skb,
+ skb->len - 10,
+ ch_mac_addr,
+ ETH_ALEN);
+ skb->cb[0] = 0xff;
+ }
+
+ /* bump the reference count to avoid freeing of
+ * the skb once the DMA has completed.
+ */
+ skb = skb_get(skb);
+ t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
+ }
+ }
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+static void espibug_workaround(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *sge = adapter->sge;
+
+ if (netif_running(adapter->port[0].dev)) {
+ struct sk_buff *skb = sge->espibug_skb[0];
+ u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+ if ((seop & 0xfff0fff) == 0xfff && skb) {
+ if (!skb->cb[0]) {
+ skb_copy_to_linear_data_offset(skb,
+ sizeof(struct cpl_tx_pkt),
+ ch_mac_addr,
+ ETH_ALEN);
+ skb_copy_to_linear_data_offset(skb,
+ skb->len - 10,
+ ch_mac_addr,
+ ETH_ALEN);
+ skb->cb[0] = 0xff;
+ }
+
+ /* bump the reference count to avoid freeing of the
+ * skb once the DMA has completed.
+ */
+ skb = skb_get(skb);
+ t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+ }
+ }
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge * __devinit t1_sge_create(struct adapter *adapter,
+ struct sge_params *p)
+{
+ struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
+ int i;
+
+ if (!sge)
+ return NULL;
+
+ sge->adapter = adapter;
+ sge->netdev = adapter->port[0].dev;
+ sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+ sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+ for_each_port(adapter, i) {
+ sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
+ if (!sge->port_stats[i])
+ goto nomem_port;
+ }
+
+ init_timer(&sge->tx_reclaim_timer);
+ sge->tx_reclaim_timer.data = (unsigned long)sge;
+ sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+ if (is_T2(sge->adapter)) {
+ init_timer(&sge->espibug_timer);
+
+ if (adapter->params.nports > 1) {
+ tx_sched_init(sge);
+ sge->espibug_timer.function = espibug_workaround_t204;
+ } else
+ sge->espibug_timer.function = espibug_workaround;
+ sge->espibug_timer.data = (unsigned long)sge->adapter;
+
+ sge->espibug_timeout = 1;
+ /* for T204, every 10ms */
+ if (adapter->params.nports > 1)
+ sge->espibug_timeout = HZ/100;
+ }
+
+
+ p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+ p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+ p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+ p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+ if (sge->tx_sched) {
+ if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
+ p->rx_coalesce_usecs = 15;
+ else
+ p->rx_coalesce_usecs = 50;
+ } else
+ p->rx_coalesce_usecs = 50;
+
+ p->coalesce_enable = 0;
+ p->sample_interval_usecs = 0;
+
+ return sge;
+nomem_port:
+ while (i >= 0) {
+ free_percpu(sge->port_stats[i]);
+ --i;
+ }
+ kfree(sge);
+ return NULL;
+
+}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index e03980bcdd6..e03980bcdd6 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 8a43c7e1970..8a43c7e1970 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index d0f87d82566..d0f87d82566 100644
--- a/drivers/net/chelsio/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
index 8bed4a59e65..8bed4a59e65 100644
--- a/drivers/net/chelsio/tp.c
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
index dfd8ce25106..dfd8ce25106 100644
--- a/drivers/net/chelsio/tp.h
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index b0cb388f5e1..b0cb388f5e1 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
index 479edbcabe6..479edbcabe6 100644
--- a/drivers/net/chelsio/vsc7326_reg.h
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/ethernet/chelsio/cxgb3/Makefile
index 29aff78c782..29aff78c782 100644
--- a/drivers/net/cxgb3/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb3/Makefile
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 8b395b53733..8b395b53733 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
index 2028da95afa..2028da95afa 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
diff --git a/drivers/net/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
index 341b7ef1508..341b7ef1508 100644
--- a/drivers/net/cxgb3/aq100x.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b634324..df01b634324 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
index 369fe711fd7..369fe711fd7 100644
--- a/drivers/net/cxgb3/cxgb3_ctl_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index 920d918ed19..920d918ed19 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
index b19e4376ba7..b19e4376ba7 100644
--- a/drivers/net/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 93b41a7ac17..4d15c8f99c3 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1898,7 +1898,6 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
e->rx_max_pending = MAX_RX_BUFFERS;
- e->rx_mini_max_pending = 0;
e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
e->tx_max_pending = MAX_TXQ_ENTRIES;
@@ -3153,7 +3152,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_start_xmit = t3_eth_xmit,
.ndo_get_stats = cxgb_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = cxgb_set_rxmode,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_set_mac_address = cxgb_set_mac_addr,
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index da5a5d9b8af..da5a5d9b8af 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
index 929c298115c..929c298115c 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
index 0d9b0e6dccf..0d9b0e6dccf 100644
--- a/drivers/net/cxgb3/firmware_exports.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 41540978a17..41540978a17 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c5f54796e2c..c5f54796e2c 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
index e13b7fe9d08..e13b7fe9d08 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 6990f6c6522..6990f6c6522 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
new file mode 100644
index 00000000000..cfb60e1f51d
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -0,0 +1,3303 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <net/arp.h>
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+#define USE_GTS 0
+
+#define SGE_RX_SM_BUF_SIZE 1536
+
+#define SGE_RX_COPY_THRES 256
+#define SGE_RX_PULL_LEN 128
+
+#define SGE_PG_RSVD SMP_CACHE_BYTES
+/*
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
+ * directly.
+ */
+#define FL0_PG_CHUNK_SIZE 2048
+#define FL0_PG_ORDER 0
+#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
+#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
+#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
+
+#define SGE_RX_DROP_THRES 16
+#define RX_RECLAIM_PERIOD (HZ/4)
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 16U
+/*
+ * Period of the Tx buffer reclaim timer. This timer does not need to run
+ * frequently as Tx buffers are usually reclaimed by new Tx packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+#define TX_RECLAIM_TIMER_CHUNK 64U
+#define TX_RECLAIM_CHUNK 16U
+
+/* WR size in bytes */
+#define WR_LEN (WR_FLITS * 8)
+
+/*
+ * Types of Tx queues in each queue set. Order here matters, do not change.
+ */
+enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
+
+/* Values for sge_txq.flags */
+enum {
+ TXQ_RUNNING = 1 << 0, /* fetch engine is running */
+ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
+};
+
+struct tx_desc {
+ __be64 flit[TX_DESC_FLITS];
+};
+
+struct rx_desc {
+ __be32 addr_lo;
+ __be32 len_gen;
+ __be32 gen2;
+ __be32 addr_hi;
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ u8 eop; /* set if last descriptor for packet */
+ u8 addr_idx; /* buffer index of first SGL entry in descriptor */
+ u8 fragidx; /* first page fragment associated with descriptor */
+ s8 sflit; /* start flit of first SGL entry in descriptor */
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ union {
+ struct sk_buff *skb;
+ struct fl_pg_chunk pg_chunk;
+ };
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct rsp_desc { /* response queue descriptor */
+ struct rss_header rss_hdr;
+ __be32 flags;
+ __be32 len_cq;
+ u8 imm_data[47];
+ u8 intr_gen;
+};
+
+/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+ struct pci_dev *pdev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/*
+ * Maps a number of flits to the number of Tx descriptors that can hold them.
+ * The formula is
+ *
+ * desc = 1 + (flits - 2) / (WR_FLITS - 1).
+ *
+ * HW allows up to 4 descriptors to be combined into a WR.
+ */
+static u8 flit_desc_map[] = {
+ 0,
+#if SGE_NUM_GENBITS == 1
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+#elif SGE_NUM_GENBITS == 2
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+#else
+# error "SGE_NUM_GENBITS must be 1 or 2"
+#endif
+};
+
+static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
+{
+ return container_of(q, struct sge_qset, fl[qidx]);
+}
+
+static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
+{
+ return container_of(q, struct sge_qset, rspq);
+}
+
+static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
+{
+ return container_of(q, struct sge_qset, txq[qidx]);
+}
+
+/**
+ * refill_rspq - replenish an SGE response queue
+ * @adapter: the adapter
+ * @q: the response queue to replenish
+ * @credits: how many new responses to make available
+ *
+ * Replenishes a response queue by making the supplied number of responses
+ * available to HW.
+ */
+static inline void refill_rspq(struct adapter *adapter,
+ const struct sge_rspq *q, unsigned int credits)
+{
+ rmb();
+ t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
+ V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
+}
+
+/**
+ * need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ * Returns true if the platform needs sk_buff unmapping. The compiler
+ * optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * unmap_skb - unmap a packet main body and its page fragments
+ * @skb: the packet
+ * @q: the Tx queue containing Tx descriptors for the packet
+ * @cidx: index of Tx descriptor
+ * @pdev: the PCI device
+ *
+ * Unmap the main body of an sk_buff and its page fragments, if any.
+ * Because of the fairly complicated structure of our SGLs and the desire
+ * to conserve space for metadata, the information necessary to unmap an
+ * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ * descriptors (the physical addresses of the various data buffers), and
+ * the SW descriptor state (assorted indices). The send functions
+ * initialize the indices for the first packet descriptor so we can unmap
+ * the buffers held in the first Tx descriptor here, and we have enough
+ * information at this point to set the state for the next Tx descriptor.
+ *
+ * Note that it is possible to clean up the first descriptor of a packet
+ * before the send routines have written the next descriptors, but this
+ * race does not cause any problem. We just end up writing the unmapping
+ * info for the descriptor first.
+ */
+static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
+ unsigned int cidx, struct pci_dev *pdev)
+{
+ const struct sg_ent *sgp;
+ struct tx_sw_desc *d = &q->sdesc[cidx];
+ int nfrags, frag_idx, curflit, j = d->addr_idx;
+
+ sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+ frag_idx = d->fragidx;
+
+ if (frag_idx == 0 && skb_headlen(skb)) {
+ pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ j = 1;
+ }
+
+ curflit = d->sflit + 1 + j;
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ while (frag_idx < nfrags && curflit < WR_FLITS) {
+ pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
+ PCI_DMA_TODEVICE);
+ j ^= 1;
+ if (j == 0) {
+ sgp++;
+ curflit++;
+ }
+ curflit++;
+ frag_idx++;
+ }
+
+ if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
+ d = cidx + 1 == q->size ? q->sdesc : d + 1;
+ d->fragidx = frag_idx;
+ d->addr_idx = j;
+ d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
+ }
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
+ unsigned int n)
+{
+ struct tx_sw_desc *d;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int cidx = q->cidx;
+
+ const int need_unmap = need_skb_unmap() &&
+ q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->skb) { /* an SGL is present */
+ if (need_unmap)
+ unmap_skb(d->skb, q, cidx, pdev);
+ if (d->eop) {
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @chunk: maximum number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue's lock held.
+ */
+static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *q,
+ unsigned int chunk)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ reclaim = min(chunk, reclaim);
+ if (reclaim) {
+ free_tx_desc(adapter, q, reclaim);
+ q->cleaned += reclaim;
+ q->in_use -= reclaim;
+ }
+ return q->processed - q->cleaned;
+}
+
+/**
+ * should_restart_tx - are there enough resources to restart a Tx queue?
+ * @q: the Tx queue
+ *
+ * Checks if there are enough descriptors to restart a suspended Tx queue.
+ */
+static inline int should_restart_tx(const struct sge_txq *q)
+{
+ unsigned int r = q->processed - q->cleaned;
+
+ return q->in_use - r < (q->size >> 1);
+}
+
+static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
+ struct rx_sw_desc *d)
+{
+ if (q->use_pages && d->pg_chunk.page) {
+ (*d->pg_chunk.p_cnt)--;
+ if (!*d->pg_chunk.p_cnt)
+ pci_unmap_page(pdev,
+ d->pg_chunk.mapping,
+ q->alloc_size, PCI_DMA_FROMDEVICE);
+
+ put_page(d->pg_chunk.page);
+ d->pg_chunk.page = NULL;
+ } else {
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
+ q->buf_size, PCI_DMA_FROMDEVICE);
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @pdev: the PCI device associated with the adapter
+ * @rxq: the SGE free list to clean up
+ *
+ * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
+ * this queue should be stopped before calling this function.
+ */
+static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
+{
+ unsigned int cidx = q->cidx;
+
+ while (q->credits--) {
+ struct rx_sw_desc *d = &q->sdesc[cidx];
+
+
+ clear_rx_desc(pdev, q, d);
+ if (++cidx == q->size)
+ cidx = 0;
+ }
+
+ if (q->pg_chunk.page) {
+ __free_pages(q->pg_chunk.page, q->order);
+ q->pg_chunk.page = NULL;
+ }
+}
+
+/**
+ * add_one_rx_buf - add a packet buffer to a free-buffer list
+ * @va: buffer start VA
+ * @len: the buffer length
+ * @d: the HW Rx descriptor to write
+ * @sd: the SW Rx descriptor to write
+ * @gen: the generation bit value
+ * @pdev: the PCI device associated with the adapter
+ *
+ * Add a buffer of the given length to the supplied HW and SW Rx
+ * descriptors.
+ */
+static inline int add_one_rx_buf(void *va, unsigned int len,
+ struct rx_desc *d, struct rx_sw_desc *sd,
+ unsigned int gen, struct pci_dev *pdev)
+{
+ dma_addr_t mapping;
+
+ mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, mapping)))
+ return -ENOMEM;
+
+ dma_unmap_addr_set(sd, dma_addr, mapping);
+
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
+ unsigned int gen)
+{
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
+ struct rx_sw_desc *sd, gfp_t gfp,
+ unsigned int order)
+{
+ if (!q->pg_chunk.page) {
+ dma_addr_t mapping;
+
+ q->pg_chunk.page = alloc_pages(gfp, order);
+ if (unlikely(!q->pg_chunk.page))
+ return -ENOMEM;
+ q->pg_chunk.va = page_address(q->pg_chunk.page);
+ q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
+ SGE_PG_RSVD;
+ q->pg_chunk.offset = 0;
+ mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
+ 0, q->alloc_size, PCI_DMA_FROMDEVICE);
+ q->pg_chunk.mapping = mapping;
+ }
+ sd->pg_chunk = q->pg_chunk;
+
+ prefetch(sd->pg_chunk.p_cnt);
+
+ q->pg_chunk.offset += q->buf_size;
+ if (q->pg_chunk.offset == (PAGE_SIZE << order))
+ q->pg_chunk.page = NULL;
+ else {
+ q->pg_chunk.va += q->buf_size;
+ get_page(q->pg_chunk.page);
+ }
+
+ if (sd->pg_chunk.offset == 0)
+ *sd->pg_chunk.p_cnt = 1;
+ else
+ *sd->pg_chunk.p_cnt += 1;
+
+ return 0;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= q->credits / 4) {
+ q->pend_cred = 0;
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+ }
+}
+
+/**
+ * refill_fl - refill an SGE free-buffer list
+ * @adapter: the adapter
+ * @q: the free-list to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for allocating new buffers
+ *
+ * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity.
+ */
+static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+{
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ struct rx_desc *d = &q->desc[q->pidx];
+ unsigned int count = 0;
+
+ while (n--) {
+ dma_addr_t mapping;
+ int err;
+
+ if (q->use_pages) {
+ if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
+ q->order))) {
+nomem: q->alloc_failed++;
+ break;
+ }
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
+ dma_unmap_addr_set(sd, dma_addr, mapping);
+
+ add_one_rx_chunk(mapping, d, q->gen);
+ pci_dma_sync_single_for_device(adap->pdev, mapping,
+ q->buf_size - SGE_PG_RSVD,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ void *buf_start;
+
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+ if (!skb)
+ goto nomem;
+
+ sd->skb = skb;
+ buf_start = skb->data;
+ err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
+ q->gen, adap->pdev);
+ if (unlikely(err)) {
+ clear_rx_desc(adap->pdev, q, sd);
+ break;
+ }
+ }
+
+ d++;
+ sd++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ count++;
+ }
+
+ q->credits += count;
+ q->pend_cred += count;
+ ring_fl_db(adap, q);
+
+ return count;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
+ GFP_ATOMIC | __GFP_COMP);
+}
+
+/**
+ * recycle_rx_buf - recycle a receive buffer
+ * @adapter: the adapter
+ * @q: the SGE free list
+ * @idx: index of buffer to recycle
+ *
+ * Recycles the specified buffer on the given free list by adding it at
+ * the next available slot on the list.
+ */
+static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
+ unsigned int idx)
+{
+ struct rx_desc *from = &q->desc[idx];
+ struct rx_desc *to = &q->desc[q->pidx];
+
+ q->sdesc[q->pidx] = q->sdesc[idx];
+ to->addr_lo = from->addr_lo; /* already big endian */
+ to->addr_hi = from->addr_hi; /* likewise */
+ wmb();
+ to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
+ to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
+
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+
+ q->credits++;
+ q->pend_cred++;
+ ring_fl_db(adap, q);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @pdev: the PCI device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the physical address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t * phys, void *metadata)
+{
+ size_t len = nelem * elem_size;
+ void *s = NULL;
+ void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ if (sw_size && metadata) {
+ s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+ if (!s) {
+ dma_free_coherent(&pdev->dev, len, p, *phys);
+ return NULL;
+ }
+ *(void **)metadata = s;
+ }
+ memset(p, 0, len);
+ return p;
+}
+
+/**
+ * t3_reset_qset - reset a sge qset
+ * @q: the queue set
+ *
+ * Reset the qset structure.
+ * the NAPI structure is preserved in the event of
+ * the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+ if (q->adap &&
+ !(q->adap->flags & NAPI_INIT)) {
+ memset(q, 0, sizeof(*q));
+ return;
+ }
+
+ q->adap = NULL;
+ memset(&q->rspq, 0, sizeof(q->rspq));
+ memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+ memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+ q->txq_stopped = 0;
+ q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
+ q->rx_reclaim_timer.function = NULL;
+ q->nomem = 0;
+ napi_free_frags(&q->napi);
+}
+
+
+/**
+ * free_qset - free the resources of an SGE queue set
+ * @adapter: the adapter owning the queue set
+ * @q: the queue set
+ *
+ * Release the HW and SW resources associated with an SGE queue set, such
+ * as HW contexts, packet buffers, and descriptor rings. Traffic to the
+ * queue set must be quiesced prior to calling this.
+ */
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+{
+ int i;
+ struct pci_dev *pdev = adapter->pdev;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i)
+ if (q->fl[i].desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ free_rx_bufs(pdev, &q->fl[i]);
+ kfree(q->fl[i].sdesc);
+ dma_free_coherent(&pdev->dev,
+ q->fl[i].size *
+ sizeof(struct rx_desc), q->fl[i].desc,
+ q->fl[i].phys_addr);
+ }
+
+ for (i = 0; i < SGE_TXQ_PER_SET; ++i)
+ if (q->txq[i].desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ if (q->txq[i].sdesc) {
+ free_tx_desc(adapter, &q->txq[i],
+ q->txq[i].in_use);
+ kfree(q->txq[i].sdesc);
+ }
+ dma_free_coherent(&pdev->dev,
+ q->txq[i].size *
+ sizeof(struct tx_desc),
+ q->txq[i].desc, q->txq[i].phys_addr);
+ __skb_queue_purge(&q->txq[i].sendq);
+ }
+
+ if (q->rspq.desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ dma_free_coherent(&pdev->dev,
+ q->rspq.size * sizeof(struct rsp_desc),
+ q->rspq.desc, q->rspq.phys_addr);
+ }
+
+ t3_reset_qset(q);
+}
+
+/**
+ * init_qset_cntxt - initialize an SGE queue set context info
+ * @qs: the queue set
+ * @id: the queue set id
+ *
+ * Initializes the TIDs and context ids for the queues of a queue set.
+ */
+static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
+{
+ qs->rspq.cntxt_id = id;
+ qs->fl[0].cntxt_id = 2 * id;
+ qs->fl[1].cntxt_id = 2 * id + 1;
+ qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
+ qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
+ qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
+ return (3 * n) / 2 + (n & 1);
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Calculates the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
+ return flit_desc_map[n];
+}
+
+/**
+ * get_packet - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ prefetch(sd->skb->data);
+ fl->credits--;
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ __skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ dma_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb->data, sd->skb->data, len);
+ pci_dma_sync_single_for_device(adap->pdev,
+ dma_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ goto use_orig_buf;
+recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits < drop_thres) &&
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
+ GFP_ATOMIC | __GFP_COMP) == 0)
+ goto recycle;
+
+use_orig_buf:
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ skb = sd->skb;
+ skb_put(skb, len);
+ __refill_fl(adap, fl);
+ return skb;
+}
+
+/**
+ * get_packet_pg - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list populated with page chunks.
+ * If the packet is small we make a copy and recycle the original buffer,
+ * otherwise we attach the original buffer as a page fragment to a fresh
+ * sk_buff. If a positive drop threshold is supplied packets are dropped
+ * and their buffers recycled if (a) the number of remaining buffers is
+ * under the threshold and the packet is too big to copy, or (b) there's
+ * no system memory.
+ *
+ * Note: this function is similar to @get_packet but deals with Rx buffers
+ * that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+ struct sge_rspq *q, unsigned int len,
+ unsigned int drop_thres)
+{
+ struct sk_buff *newskb, *skb;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
+
+ newskb = skb = q->pg_skb;
+ if (!skb && (len <= SGE_RX_COPY_THRES)) {
+ newskb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(newskb != NULL)) {
+ __skb_put(newskb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(newskb->data, sd->pg_chunk.va, len);
+ pci_dma_sync_single_for_device(adap->pdev, dma_addr,
+ len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ return NULL;
+recycle:
+ fl->credits--;
+ recycle_rx_buf(adap, fl, fl->cidx);
+ q->rx_recycle_buf++;
+ return newskb;
+ }
+
+ if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
+ goto recycle;
+
+ prefetch(sd->pg_chunk.p_cnt);
+
+ if (!skb)
+ newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+
+ if (unlikely(!newskb)) {
+ if (!drop_thres)
+ return NULL;
+ goto recycle;
+ }
+
+ pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+ PCI_DMA_FROMDEVICE);
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+ pci_unmap_page(adap->pdev,
+ sd->pg_chunk.mapping,
+ fl->alloc_size,
+ PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ __skb_put(newskb, SGE_RX_PULL_LEN);
+ memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+ skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
+ sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+ len - SGE_RX_PULL_LEN);
+ newskb->len = len;
+ newskb->data_len = len - SGE_RX_PULL_LEN;
+ newskb->truesize += newskb->data_len;
+ } else {
+ skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
+ sd->pg_chunk.page,
+ sd->pg_chunk.offset, len);
+ newskb->len += len;
+ newskb->data_len += len;
+ newskb->truesize += len;
+ }
+
+ fl->credits--;
+ /*
+ * We do not refill FLs here, we let the caller do it to overlap a
+ * prefetch.
+ */
+ return newskb;
+}
+
+/**
+ * get_imm_packet - return the next ingress packet buffer from a response
+ * @resp: the response descriptor containing the packet data
+ *
+ * Return a packet containing the immediate data of the given response.
+ */
+static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
+{
+ struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
+
+ if (skb) {
+ __skb_put(skb, IMMED_PKT_SIZE);
+ skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ }
+ return skb;
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet. Ethernet packets require addition of WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
+ return 1;
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
+ if (skb_shinfo(skb)->gso_size)
+ flits++;
+ return flits_to_desc(flits);
+}
+
+/**
+ * make_sgl - populate a scatter/gather list for a packet
+ * @skb: the packet
+ * @sgp: the SGL to populate
+ * @start: start address of skb main body data to include in the SGL
+ * @len: length of skb main body data to include in the SGL
+ * @pdev: the PCI device
+ *
+ * Generates a scatter/gather list for the buffers that make up a packet
+ * and returns the SGL size in 8-byte words. The caller must size the SGL
+ * appropriately.
+ */
+static inline unsigned int make_sgl(const struct sk_buff *skb,
+ struct sg_ent *sgp, unsigned char *start,
+ unsigned int len, struct pci_dev *pdev)
+{
+ dma_addr_t mapping;
+ unsigned int i, j = 0, nfrags;
+
+ if (len) {
+ mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
+ sgp->len[0] = cpu_to_be32(len);
+ sgp->addr[0] = cpu_to_be64(mapping);
+ j = 1;
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
+ sgp->addr[j] = cpu_to_be64(mapping);
+ j ^= 1;
+ if (j == 0)
+ ++sgp;
+ }
+ if (j)
+ sgp->len[j] = 0;
+ return ((nfrags + (len != 0)) * 3) / 2 + j;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ *
+ * Ring the doorbel if a Tx queue is asleep. There is a natural race,
+ * where the HW is going to sleep just after we checked, however,
+ * then the interrupt handler will detect the outstanding TX packet
+ * and ring the doorbell for us.
+ *
+ * When GTS is disabled we unconditionally ring the doorbell.
+ */
+static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+#if USE_GTS
+ clear_bit(TXQ_LAST_PKT_DB, &q->flags);
+ if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ }
+#else
+ wmb(); /* write descriptors before telling HW */
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+#endif
+}
+
+static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
+{
+#if SGE_NUM_GENBITS == 2
+ d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
+#endif
+}
+
+/**
+ * write_wr_hdr_sgl - write a WR header and, optionally, SGL
+ * @ndesc: number of Tx descriptors spanned by the SGL
+ * @skb: the packet corresponding to the WR
+ * @d: first Tx descriptor to be written
+ * @pidx: index of above descriptors
+ * @q: the SGE Tx queue
+ * @sgl: the SGL
+ * @flits: number of flits to the start of the SGL in the first descriptor
+ * @sgl_flits: the SGL size in flits
+ * @gen: the Tx descriptor generation
+ * @wr_hi: top 32 bits of WR header based on WR type (big endian)
+ * @wr_lo: low 32 bits of WR header based on WR type (big endian)
+ *
+ * Write a work request header and an associated SGL. If the SGL is
+ * small enough to fit into one Tx descriptor it has already been written
+ * and we just need to write the WR header. Otherwise we distribute the
+ * SGL across the number of descriptors it spans.
+ */
+static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
+ struct tx_desc *d, unsigned int pidx,
+ const struct sge_txq *q,
+ const struct sg_ent *sgl,
+ unsigned int flits, unsigned int sgl_flits,
+ unsigned int gen, __be32 wr_hi,
+ __be32 wr_lo)
+{
+ struct work_request_hdr *wrp = (struct work_request_hdr *)d;
+ struct tx_sw_desc *sd = &q->sdesc[pidx];
+
+ sd->skb = skb;
+ if (need_skb_unmap()) {
+ sd->fragidx = 0;
+ sd->addr_idx = 0;
+ sd->sflit = flits;
+ }
+
+ if (likely(ndesc == 1)) {
+ sd->eop = 1;
+ wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+ wmb();
+ wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
+ V_WR_GEN(gen)) | wr_lo;
+ wr_gen2(d, gen);
+ } else {
+ unsigned int ogen = gen;
+ const u64 *fp = (const u64 *)sgl;
+ struct work_request_hdr *wp = wrp;
+
+ wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+
+ while (sgl_flits) {
+ unsigned int avail = WR_FLITS - flits;
+
+ if (avail > sgl_flits)
+ avail = sgl_flits;
+ memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
+ sgl_flits -= avail;
+ ndesc--;
+ if (!sgl_flits)
+ break;
+
+ fp += avail;
+ d++;
+ sd->eop = 0;
+ sd++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ d = q->desc;
+ sd = q->sdesc;
+ }
+
+ sd->skb = skb;
+ wrp = (struct work_request_hdr *)d;
+ wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(1)) | wr_hi;
+ wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
+ sgl_flits + 1)) |
+ V_WR_GEN(gen)) | wr_lo;
+ wr_gen2(d, gen);
+ flits = 1;
+ }
+ sd->eop = 1;
+ wrp->wr_hi |= htonl(F_WR_EOP);
+ wmb();
+ wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
+ wr_gen2((struct tx_desc *)wp, ogen);
+ WARN_ON(ndesc != 0);
+ }
+}
+
+/**
+ * write_tx_pkt_wr - write a TX_PKT work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @pi: the egress interface
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @q: the Tx queue
+ * @ndesc: number of descriptors the packet will occupy
+ * @compl: the value of the COMPL bit to use
+ *
+ * Generate a TX_PKT work request to send the supplied packet.
+ */
+static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
+ const struct port_info *pi,
+ unsigned int pidx, unsigned int gen,
+ struct sge_txq *q, unsigned int ndesc,
+ unsigned int compl)
+{
+ unsigned int flits, sgl_flits, cntrl, tso_info;
+ struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+ struct tx_desc *d = &q->desc[pidx];
+ struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
+
+ cpl->len = htonl(skb->len);
+ cntrl = V_TXPKT_INTF(pi->port_id);
+
+ if (vlan_tx_tag_present(skb))
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+
+ tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
+ if (tso_info) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
+
+ d->flit[2] = 0;
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
+ hdr->cntrl = htonl(cntrl);
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
+ CPL_ETH_II : CPL_ETH_II_VLAN;
+ tso_info |= V_LSO_ETH_TYPE(eth_type) |
+ V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+ V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
+ hdr->lso_info = htonl(tso_info);
+ flits = 3;
+ } else {
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
+ cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
+ cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
+ cpl->cntrl = htonl(cntrl);
+
+ if (skb->len <= WR_LEN - sizeof(*cpl)) {
+ q->sdesc[pidx].skb = NULL;
+ if (!skb->data_len)
+ skb_copy_from_linear_data(skb, &d->flit[2],
+ skb->len);
+ else
+ skb_copy_bits(skb, 0, &d->flit[2], skb->len);
+
+ flits = (skb->len + 7) / 8 + 2;
+ cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
+ V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
+ | F_WR_SOP | F_WR_EOP | compl);
+ wmb();
+ cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
+ V_WR_TID(q->token));
+ wr_gen2(d, gen);
+ kfree_skb(skb);
+ return;
+ }
+
+ flits = 2;
+ }
+
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+
+ write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
+ htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
+ htonl(V_WR_TID(q->token)));
+}
+
+static inline void t3_stop_tx_queue(struct netdev_queue *txq,
+ struct sge_qset *qs, struct sge_txq *q)
+{
+ netif_tx_stop_queue(txq);
+ set_bit(TXQ_ETH, &qs->txq_stopped);
+ q->stops++;
+}
+
+/**
+ * eth_xmit - add a packet to the Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
+ */
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int qidx;
+ unsigned int ndesc, pidx, credits, gen, compl;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct netdev_queue *txq;
+ struct sge_qset *qs;
+ struct sge_txq *q;
+
+ /*
+ * The chip min packet length is 9 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(skb->len < ETH_HLEN)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ qidx = skb_get_queue_mapping(skb);
+ qs = &pi->qs[qidx];
+ q = &qs->txq[TXQ_ETH];
+ txq = netdev_get_tx_queue(dev, qidx);
+
+ reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ credits = q->size - q->in_use;
+ ndesc = calc_tx_descs(skb);
+
+ if (unlikely(credits < ndesc)) {
+ t3_stop_tx_queue(txq, qs, q);
+ dev_err(&adap->pdev->dev,
+ "%s: Tx ring %u full while queue awake!\n",
+ dev->name, q->cntxt_id & 7);
+ return NETDEV_TX_BUSY;
+ }
+
+ q->in_use += ndesc;
+ if (unlikely(credits - ndesc < q->stop_thres)) {
+ t3_stop_tx_queue(txq, qs, q);
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+ q->restarts++;
+ netif_tx_start_queue(txq);
+ }
+ }
+
+ gen = q->gen;
+ q->unacked += ndesc;
+ compl = (q->unacked & 8) << (S_WR_COMPL - 3);
+ q->unacked &= 7;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+
+ /* update port statistics */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ qs->port_stats[SGE_PSTAT_TX_CSUM]++;
+ if (skb_shinfo(skb)->gso_size)
+ qs->port_stats[SGE_PSTAT_TSO]++;
+ if (vlan_tx_tag_present(skb))
+ qs->port_stats[SGE_PSTAT_VLANINS]++;
+
+ /*
+ * We do not use Tx completion interrupts to free DMAd Tx packets.
+ * This is good for performance but means that we rely on new Tx
+ * packets arriving to run the destructors of completed packets,
+ * which open up space in their sockets' send queues. Sometimes
+ * we do not get such new packets causing Tx to stall. A single
+ * UDP transmitter is a good example of this situation. We have
+ * a clean up timer that periodically reclaims completed packets
+ * but it doesn't run often enough (nor do we want it to) to prevent
+ * lengthy stalls. A solution to this problem is to run the
+ * destructor early, after the packet is queued but before it's DMAd.
+ * A cons is that we lie to socket memory accounting, but the amount
+ * of extra memory is reasonable (limited by the number of Tx
+ * descriptors), the packets do actually get freed quickly by new
+ * packets almost always, and for protocols like TCP that wait for
+ * acks to really free up the data the extra memory is even less.
+ * On the positive side we run the destructors on the sending CPU
+ * rather than on a potentially different completing CPU, usually a
+ * good thing. We also run them without holding our Tx queue lock,
+ * unlike what reclaim_completed_tx() would otherwise do.
+ *
+ * Run the destructor before telling the DMA engine about the packet
+ * to make sure it doesn't complete and get freed prematurely.
+ */
+ if (likely(!skb_shared(skb)))
+ skb_orphan(skb);
+
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+ check_ring_tx_db(adap, q);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * write_imm - write a packet into a Tx descriptor as immediate data
+ * @d: the Tx descriptor to write
+ * @skb: the packet
+ * @len: the length of packet data to write as immediate data
+ * @gen: the generation bit value to write
+ *
+ * Writes a packet as immediate data into a Tx descriptor. The packet
+ * contains a work request at its beginning. We must write the packet
+ * carefully so the SGE doesn't read it accidentally before it's written
+ * in its entirety.
+ */
+static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
+ unsigned int len, unsigned int gen)
+{
+ struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
+ struct work_request_hdr *to = (struct work_request_hdr *)d;
+
+ if (likely(!skb->data_len))
+ memcpy(&to[1], &from[1], len - sizeof(*from));
+ else
+ skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
+
+ to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
+ V_WR_BCNTLFLT(len & 7));
+ wmb();
+ to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
+ V_WR_LEN((len + 7) / 8));
+ wr_gen2(d, gen);
+ kfree_skb(skb);
+}
+
+/**
+ * check_desc_avail - check descriptor availability on a send queue
+ * @adap: the adapter
+ * @q: the send queue
+ * @skb: the packet needing the descriptors
+ * @ndesc: the number of Tx descriptors needed
+ * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
+ *
+ * Checks if the requested number of Tx descriptors is available on an
+ * SGE send queue. If the queue is already suspended or not enough
+ * descriptors are available the packet is queued for later transmission.
+ * Must be called with the Tx queue locked.
+ *
+ * Returns 0 if enough descriptors are available, 1 if there aren't
+ * enough descriptors and the packet has been queued, and 2 if the caller
+ * needs to retry because there weren't enough descriptors at the
+ * beginning of the call but some freed up in the mean time.
+ */
+static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb, unsigned int ndesc,
+ unsigned int qid)
+{
+ if (unlikely(!skb_queue_empty(&q->sendq))) {
+ addq_exit:__skb_queue_tail(&q->sendq, skb);
+ return 1;
+ }
+ if (unlikely(q->size - q->in_use < ndesc)) {
+ struct sge_qset *qs = txq_to_qset(q, qid);
+
+ set_bit(qid, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(qid, &qs->txq_stopped))
+ return 2;
+
+ q->stops++;
+ goto addq_exit;
+ }
+ return 0;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ q->in_use -= reclaim;
+ q->cleaned += reclaim;
+}
+
+static inline int immediate(const struct sk_buff *skb)
+{
+ return skb->len <= WR_LEN;
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @adap: the adapter
+ * @q: the control queue
+ * @skb: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data in a single Tx
+ * descriptor and have no page fragments.
+ */
+static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb)
+{
+ int ret;
+ struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
+
+ if (unlikely(!immediate(skb))) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
+ wrp->wr_lo = htonl(V_WR_TID(q->token));
+
+ spin_lock(&q->lock);
+ again:reclaim_completed_tx_imm(q);
+
+ ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ spin_unlock(&q->lock);
+ return NET_XMIT_CN;
+ }
+ goto again;
+ }
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ q->in_use++;
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ spin_unlock(&q->lock);
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @qs: the queue set cotaining the control queue
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_txq *q = &qs->txq[TXQ_CTRL];
+
+ spin_lock(&q->lock);
+ again:reclaim_completed_tx_imm(q);
+
+ while (q->in_use < q->size &&
+ (skb = __skb_dequeue(&q->sendq)) != NULL) {
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ q->in_use++;
+ }
+
+ if (!skb_queue_empty(&q->sendq)) {
+ set_bit(TXQ_CTRL, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ }
+
+ spin_unlock(&q->lock);
+ wmb();
+ t3_write_reg(qs->adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/*
+ * Send a management message through control queue 0
+ */
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+ local_bh_disable();
+ ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ local_bh_enable();
+
+ return ret;
+}
+
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ int i;
+ const dma_addr_t *p;
+ const struct skb_shared_info *si;
+ const struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+ if (skb->tail - skb->transport_header)
+ pci_unmap_single(dui->pdev, *p++,
+ skb->tail - skb->transport_header,
+ PCI_DMA_TODEVICE);
+
+ si = skb_shinfo(skb);
+ for (i = 0; i < si->nr_frags; i++)
+ pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
+ PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+ const struct sg_ent *sgl, int sgl_flits)
+{
+ dma_addr_t *p;
+ struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ dui->pdev = pdev;
+ for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+ *p++ = be64_to_cpu(sgl->addr[0]);
+ *p++ = be64_to_cpu(sgl->addr[1]);
+ }
+ if (sgl_flits)
+ *p = be64_to_cpu(sgl->addr[0]);
+}
+
+/**
+ * write_ofld_wr - write an offload work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @q: the Tx queue
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @ndesc: number of descriptors the packet will occupy
+ *
+ * Write an offload work request to send the supplied packet. The packet
+ * data already carry the work request with most fields populated.
+ */
+static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+ struct sge_txq *q, unsigned int pidx,
+ unsigned int gen, unsigned int ndesc)
+{
+ unsigned int sgl_flits, flits;
+ struct work_request_hdr *from;
+ struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+ struct tx_desc *d = &q->desc[pidx];
+
+ if (immediate(skb)) {
+ q->sdesc[pidx].skb = NULL;
+ write_imm(d, skb, skb->len, gen);
+ return;
+ }
+
+ /* Only TX_DATA builds SGLs */
+
+ from = (struct work_request_hdr *)skb->data;
+ memcpy(&d->flit[1], &from[1],
+ skb_transport_offset(skb) - sizeof(*from));
+
+ flits = skb_transport_offset(skb) / 8;
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
+ adap->pdev);
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+ skb->destructor = deferred_unmap_destructor;
+ }
+
+ write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
+ gen, from->wr_hi, from->wr_lo);
+}
+
+/**
+ * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given offload
+ * packet. These packets are already fully constructed.
+ */
+static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (skb->len <= WR_LEN)
+ return 1; /* packet fits as immediate data */
+
+ flits = skb_transport_offset(skb) / 8; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb->tail != skb->transport_header)
+ cnt++;
+ return flits_to_desc(flits + sgl_len(cnt));
+}
+
+/**
+ * ofld_xmit - send a packet through an offload queue
+ * @adap: the adapter
+ * @q: the Tx offload queue
+ * @skb: the packet
+ *
+ * Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb)
+{
+ int ret;
+ unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
+
+ spin_lock(&q->lock);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ skb->priority = ndesc; /* save for restart */
+ spin_unlock(&q->lock);
+ return NET_XMIT_CN;
+ }
+ goto again;
+ }
+
+ gen = q->gen;
+ q->in_use += ndesc;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+ spin_unlock(&q->lock);
+
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+ check_ring_tx_db(adap, q);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_offloadq - restart a suspended offload queue
+ * @qs: the queue set cotaining the offload queue
+ *
+ * Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_offloadq(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_txq *q = &qs->txq[TXQ_OFLD];
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+
+ spin_lock(&q->lock);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ while ((skb = skb_peek(&q->sendq)) != NULL) {
+ unsigned int gen, pidx;
+ unsigned int ndesc = skb->priority;
+
+ if (unlikely(q->size - q->in_use < ndesc)) {
+ set_bit(TXQ_OFLD, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ break;
+ }
+
+ gen = q->gen;
+ q->in_use += ndesc;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+ __skb_unlink(skb, &q->sendq);
+ spin_unlock(&q->lock);
+
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+ spin_lock(&q->lock);
+ }
+ spin_unlock(&q->lock);
+
+#if USE_GTS
+ set_bit(TXQ_RUNNING, &q->flags);
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#endif
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/**
+ * queue_set - return the queue set a packet should use
+ * @skb: the packet
+ *
+ * Maps a packet to the SGE queue set it should use. The desired queue
+ * set is carried in bits 1-3 in the packet's priority.
+ */
+static inline int queue_set(const struct sk_buff *skb)
+{
+ return skb->priority >> 1;
+}
+
+/**
+ * is_ctrl_pkt - return whether an offload packet is a control packet
+ * @skb: the packet
+ *
+ * Determines whether an offload packet should use an OFLD or a CTRL
+ * Tx queue. This is indicated by bit 0 in the packet's priority.
+ */
+static inline int is_ctrl_pkt(const struct sk_buff *skb)
+{
+ return skb->priority & 1;
+}
+
+/**
+ * t3_offload_tx - send an offload packet
+ * @tdev: the offload device to send to
+ * @skb: the packet
+ *
+ * Sends an offload packet. We use the packet priority to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-3 select the queue set.
+ */
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+ struct adapter *adap = tdev2adap(tdev);
+ struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
+
+ if (unlikely(is_ctrl_pkt(skb)))
+ return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
+
+ return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
+}
+
+/**
+ * offload_enqueue - add an offload packet to an SGE offload receive queue
+ * @q: the SGE response queue
+ * @skb: the packet
+ *
+ * Add a new offload packet to an SGE response queue's offload packet
+ * queue. If the packet is the first on the queue it schedules the RX
+ * softirq to process the queue.
+ */
+static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
+{
+ int was_empty = skb_queue_empty(&q->rx_queue);
+
+ __skb_queue_tail(&q->rx_queue, skb);
+
+ if (was_empty) {
+ struct sge_qset *qs = rspq_to_qset(q);
+
+ napi_schedule(&qs->napi);
+ }
+}
+
+/**
+ * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
+ * @tdev: the offload device that will be receiving the packets
+ * @q: the SGE response queue that assembled the bundle
+ * @skbs: the partial bundle
+ * @n: the number of packets in the bundle
+ *
+ * Delivers a (partial) bundle of Rx offload packets to an offload device.
+ */
+static inline void deliver_partial_bundle(struct t3cdev *tdev,
+ struct sge_rspq *q,
+ struct sk_buff *skbs[], int n)
+{
+ if (n) {
+ q->offload_bundles++;
+ tdev->recv(tdev, skbs, n);
+ }
+}
+
+/**
+ * ofld_poll - NAPI handler for offload packets in interrupt mode
+ * @dev: the network device doing the polling
+ * @budget: polling budget
+ *
+ * The NAPI handler for offload packets when a response queue is serviced
+ * by the hard interrupt handler, i.e., when it's operating in non-polling
+ * mode. Creates small packet batches and sends them through the offload
+ * receive handler. Batches need to be of modest size as we do prefetches
+ * on the packets in each.
+ */
+static int ofld_poll(struct napi_struct *napi, int budget)
+{
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct sge_rspq *q = &qs->rspq;
+ struct adapter *adapter = qs->adap;
+ int work_done = 0;
+
+ while (work_done < budget) {
+ struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff_head queue;
+ int ngathered;
+
+ spin_lock_irq(&q->lock);
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&q->rx_queue, &queue);
+ if (skb_queue_empty(&queue)) {
+ napi_complete(napi);
+ spin_unlock_irq(&q->lock);
+ return work_done;
+ }
+ spin_unlock_irq(&q->lock);
+
+ ngathered = 0;
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ if (work_done >= budget)
+ break;
+ work_done++;
+
+ __skb_unlink(skb, &queue);
+ prefetch(skb->data);
+ skbs[ngathered] = skb;
+ if (++ngathered == RX_BUNDLE_SIZE) {
+ q->offload_bundles++;
+ adapter->tdev.recv(&adapter->tdev, skbs,
+ ngathered);
+ ngathered = 0;
+ }
+ }
+ if (!skb_queue_empty(&queue)) {
+ /* splice remaining packets back onto Rx queue */
+ spin_lock_irq(&q->lock);
+ skb_queue_splice(&queue, &q->rx_queue);
+ spin_unlock_irq(&q->lock);
+ }
+ deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
+ }
+
+ return work_done;
+}
+
+/**
+ * rx_offload - process a received offload packet
+ * @tdev: the offload device receiving the packet
+ * @rq: the response queue that received the packet
+ * @skb: the packet
+ * @rx_gather: a gather list of packets if we are building a bundle
+ * @gather_idx: index of the next available slot in the bundle
+ *
+ * Process an ingress offload pakcet and add it to the offload ingress
+ * queue. Returns the index of the next available slot in the bundle.
+ */
+static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
+ struct sk_buff *skb, struct sk_buff *rx_gather[],
+ unsigned int gather_idx)
+{
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ if (rq->polling) {
+ rx_gather[gather_idx++] = skb;
+ if (gather_idx == RX_BUNDLE_SIZE) {
+ tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
+ gather_idx = 0;
+ rq->offload_bundles++;
+ }
+ } else
+ offload_enqueue(rq, skb);
+
+ return gather_idx;
+}
+
+/**
+ * restart_tx - check whether to restart suspended Tx queues
+ * @qs: the queue set to resume
+ *
+ * Restarts suspended Tx queues of an SGE queue set if they have enough
+ * free resources to resume operation.
+ */
+static void restart_tx(struct sge_qset *qs)
+{
+ if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_ETH]) &&
+ test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+ qs->txq[TXQ_ETH].restarts++;
+ if (netif_running(qs->netdev))
+ netif_tx_wake_queue(qs->tx_q);
+ }
+
+ if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_OFLD]) &&
+ test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
+ qs->txq[TXQ_OFLD].restarts++;
+ tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
+ }
+ if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_CTRL]) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
+ qs->txq[TXQ_CTRL].restarts++;
+ tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
+ }
+}
+
+/**
+ * cxgb3_arp_process - process an ARP request probing a private IP address
+ * @adapter: the adapter
+ * @skb: the skbuff containing the ARP request
+ *
+ * Check if the ARP request is probing the private IP address
+ * dedicated to iSCSI, generate an ARP reply if so.
+ */
+static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ unsigned char *sha;
+ __be32 sip, tip;
+
+ if (!dev)
+ return;
+
+ skb_reset_network_header(skb);
+ arp = arp_hdr(skb);
+
+ if (arp->ar_op != htons(ARPOP_REQUEST))
+ return;
+
+ arp_ptr = (unsigned char *)(arp + 1);
+ sha = arp_ptr;
+ arp_ptr += dev->addr_len;
+ memcpy(&sip, arp_ptr, sizeof(sip));
+ arp_ptr += sizeof(sip);
+ arp_ptr += dev->addr_len;
+ memcpy(&tip, arp_ptr, sizeof(tip));
+
+ if (tip != pi->iscsi_ipv4addr)
+ return;
+
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+ pi->iscsic.mac_addr, sha);
+
+}
+
+static inline int is_arp(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_ARP);
+}
+
+static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
+ struct sk_buff *skb)
+{
+ if (is_arp(skb)) {
+ cxgb3_arp_process(pi, skb);
+ return;
+ }
+
+ if (pi->iscsic.recv)
+ pi->iscsic.recv(pi, skb);
+
+}
+
+/**
+ * rx_eth - process an ingress ethernet packet
+ * @adap: the adapter
+ * @rq: the response queue that received the packet
+ * @skb: the packet
+ * @pad: amount of padding at the start of the buffer
+ *
+ * Process an ingress ethernet pakcet and deliver it to the stack.
+ * The padding is 2 if the packet was delivered in an Rx buffer and 0
+ * if it was immediate data in a response.
+ */
+static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+ struct sk_buff *skb, int pad, int lro)
+{
+ struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+ struct sge_qset *qs = rspq_to_qset(rq);
+ struct port_info *pi;
+
+ skb_pull(skb, sizeof(*p) + pad);
+ skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
+ pi = netdev_priv(skb->dev);
+ if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
+ p->csum == htons(0xffff) && !p->fragment) {
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb_checksum_none_assert(skb);
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+ if (p->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
+ __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ }
+ if (rq->polling) {
+ if (lro)
+ napi_gro_receive(&qs->napi, skb);
+ else {
+ if (unlikely(pi->iscsic.flags))
+ cxgb3_process_iscsi_prov_pack(pi, skb);
+ netif_receive_skb(skb);
+ }
+ } else
+ netif_rx(skb);
+}
+
+static inline int is_eth_tcp(u32 rss)
+{
+ return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+}
+
+/**
+ * lro_add_page - add a page chunk to an LRO session
+ * @adap: the adapter
+ * @qs: the associated queue set
+ * @fl: the free list containing the page chunk to add
+ * @len: packet length
+ * @complete: Indicates the last fragment of a frame
+ *
+ * Add a received packet contained in a page chunk to an existing LRO
+ * session.
+ */
+static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+ struct sge_fl *fl, int len, int complete)
+{
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct sk_buff *skb = NULL;
+ struct cpl_rx_pkt *cpl;
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ int offset = 0;
+
+ if (!qs->nomem) {
+ skb = napi_get_frags(&qs->napi);
+ qs->nomem = !skb;
+ }
+
+ fl->credits--;
+
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ dma_unmap_addr(sd, dma_addr),
+ fl->buf_size - SGE_PG_RSVD,
+ PCI_DMA_FROMDEVICE);
+
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+ pci_unmap_page(adap->pdev,
+ sd->pg_chunk.mapping,
+ fl->alloc_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (!skb) {
+ put_page(sd->pg_chunk.page);
+ if (complete)
+ qs->nomem = 0;
+ return;
+ }
+
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (!nr_frags) {
+ offset = 2 + sizeof(struct cpl_rx_pkt);
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+ if ((qs->netdev->features & NETIF_F_RXCSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
+
+ len -= offset;
+
+ rx_frag += nr_frags;
+ __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
+ rx_frag->page_offset = sd->pg_chunk.offset + offset;
+ skb_frag_size_set(rx_frag, len);
+
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+ skb_shinfo(skb)->nr_frags++;
+
+ if (!complete)
+ return;
+
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+ if (cpl->vlan_valid)
+ __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
+ napi_gro_frags(&qs->napi);
+}
+
+/**
+ * handle_rsp_cntrl_info - handles control information in a response
+ * @qs: the queue set corresponding to the response
+ * @flags: the response control flags
+ *
+ * Handles the control information of an SGE response, such as GTS
+ * indications and completion credits for the queue set's Tx queues.
+ * HW coalesces credits, we don't do any extra SW coalescing.
+ */
+static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
+{
+ unsigned int credits;
+
+#if USE_GTS
+ if (flags & F_RSPD_TXQ0_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
+#endif
+
+ credits = G_RSPD_TXQ0_CR(flags);
+ if (credits)
+ qs->txq[TXQ_ETH].processed += credits;
+
+ credits = G_RSPD_TXQ2_CR(flags);
+ if (credits)
+ qs->txq[TXQ_CTRL].processed += credits;
+
+# if USE_GTS
+ if (flags & F_RSPD_TXQ1_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
+# endif
+ credits = G_RSPD_TXQ1_CR(flags);
+ if (credits)
+ qs->txq[TXQ_OFLD].processed += credits;
+}
+
+/**
+ * check_ring_db - check if we need to ring any doorbells
+ * @adapter: the adapter
+ * @qs: the queue set whose Tx queues are to be examined
+ * @sleeping: indicates which Tx queue sent GTS
+ *
+ * Checks if some of a queue set's Tx queues need to ring their doorbells
+ * to resume transmission after idling while they still have unprocessed
+ * descriptors.
+ */
+static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
+ unsigned int sleeping)
+{
+ if (sleeping & F_RSPD_TXQ0_GTS) {
+ struct sge_txq *txq = &qs->txq[TXQ_ETH];
+
+ if (txq->cleaned + txq->in_use != txq->processed &&
+ !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+ set_bit(TXQ_RUNNING, &txq->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+ V_EGRCNTX(txq->cntxt_id));
+ }
+ }
+
+ if (sleeping & F_RSPD_TXQ1_GTS) {
+ struct sge_txq *txq = &qs->txq[TXQ_OFLD];
+
+ if (txq->cleaned + txq->in_use != txq->processed &&
+ !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+ set_bit(TXQ_RUNNING, &txq->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+ V_EGRCNTX(txq->cntxt_id));
+ }
+ }
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline int is_new_response(const struct rsp_desc *r,
+ const struct sge_rspq *q)
+{
+ return (r->intr_gen & F_RSPD_GEN2) == q->gen;
+}
+
+static inline void clear_rspq_bufstate(struct sge_rspq * const q)
+{
+ q->pg_skb = NULL;
+ q->rx_recycle_buf = 0;
+}
+
+#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
+#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
+ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
+ V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
+ V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
+
+/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
+#define NOMEM_INTR_DELAY 2500
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @adap: the adapter
+ * @qs: the queue set to which the response queue belongs
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as credits and other events
+ * for the queues that belong to the response queue's queue set.
+ * A negative budget is effectively unlimited.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct adapter *adap, struct sge_qset *qs,
+ int budget)
+{
+ struct sge_rspq *q = &qs->rspq;
+ struct rsp_desc *r = &q->desc[q->cidx];
+ int budget_left = budget;
+ unsigned int sleeping = 0;
+ struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
+ int ngathered = 0;
+
+ q->next_holdoff = q->holdoff_tmr;
+
+ while (likely(budget_left && is_new_response(r, q))) {
+ int packet_complete, eth, ethpad = 2;
+ int lro = !!(qs->netdev->features & NETIF_F_GRO);
+ struct sk_buff *skb = NULL;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+
+ rmb();
+ eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
+
+ if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
+ skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ goto no_mem;
+
+ memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+ skb->data[0] = CPL_ASYNC_NOTIF;
+ rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
+ q->async_notif++;
+ } else if (flags & F_RSPD_IMM_DATA_VALID) {
+ skb = get_imm_packet(r);
+ if (unlikely(!skb)) {
+no_mem:
+ q->next_holdoff = NOMEM_INTR_DELAY;
+ q->nomem++;
+ /* consume one credit since we tried */
+ budget_left--;
+ break;
+ }
+ q->imm_data++;
+ ethpad = 0;
+ } else if ((len = ntohl(r->len_cq)) != 0) {
+ struct sge_fl *fl;
+
+ lro &= eth && is_eth_tcp(rss_hi);
+
+ fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+ if (fl->use_pages) {
+ void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+
+ prefetch(addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(addr + L1_CACHE_BYTES);
+#endif
+ __refill_fl(adap, fl);
+ if (lro > 0) {
+ lro_add_page(adap, qs, fl,
+ G_RSPD_LEN(len),
+ flags & F_RSPD_EOP);
+ goto next_fl;
+ }
+
+ skb = get_packet_pg(adap, fl, q,
+ G_RSPD_LEN(len),
+ eth ?
+ SGE_RX_DROP_THRES : 0);
+ q->pg_skb = skb;
+ } else
+ skb = get_packet(adap, fl, G_RSPD_LEN(len),
+ eth ? SGE_RX_DROP_THRES : 0);
+ if (unlikely(!skb)) {
+ if (!eth)
+ goto no_mem;
+ q->rx_drops++;
+ } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+ __skb_pull(skb, 2);
+next_fl:
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ } else
+ q->pure_rsps++;
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ r++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ r = q->desc;
+ }
+ prefetch(r);
+
+ if (++q->credits >= (q->size / 4)) {
+ refill_rspq(adap, q, q->credits);
+ q->credits = 0;
+ }
+
+ packet_complete = flags &
+ (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
+ F_RSPD_ASYNC_NOTIF);
+
+ if (skb != NULL && packet_complete) {
+ if (eth)
+ rx_eth(adap, q, skb, ethpad, lro);
+ else {
+ q->offload_pkts++;
+ /* Preserve the RSS info in csum & priority */
+ skb->csum = rss_hi;
+ skb->priority = rss_lo;
+ ngathered = rx_offload(&adap->tdev, q, skb,
+ offload_skbs,
+ ngathered);
+ }
+
+ if (flags & F_RSPD_EOP)
+ clear_rspq_bufstate(q);
+ }
+ --budget_left;
+ }
+
+ deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue .processed updates */
+ if (unlikely(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ budget -= budget_left;
+ return budget;
+}
+
+static inline int is_pure_response(const struct rsp_desc *r)
+{
+ __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+
+ return (n | r->len_cq) == 0;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for Rx processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI.
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct adapter *adap = qs->adap;
+ int work_done = process_responses(adap, qs, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+
+ /*
+ * Because we don't atomically flush the following
+ * write it is possible that in very rare cases it can
+ * reach the device in a way that races with a new
+ * response being written plus an error interrupt
+ * causing the NAPI interrupt handler below to return
+ * unhandled status to the OS. To protect against
+ * this would require flushing the write and doing
+ * both the write and the flush with interrupts off.
+ * Way too expensive and unjustifiable given the
+ * rarity of the race.
+ *
+ * The race cannot happen at all with MSI-X.
+ */
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+ V_NEWTIMER(qs->rspq.next_holdoff) |
+ V_NEWINDEX(qs->rspq.cidx));
+ }
+ return work_done;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct napi_struct *napi)
+{
+ return test_bit(NAPI_STATE_SCHED, &napi->state);
+}
+
+/**
+ * process_pure_responses - process pure responses from a response queue
+ * @adap: the adapter
+ * @qs: the queue set owning the response queue
+ * @r: the first pure response to process
+ *
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses. Such respones are too light-weight to
+ * justify calling a softirq under NAPI, so we handle them specially in
+ * the interrupt handler. The function is called with a pointer to a
+ * response, which the caller must ensure is a valid pure response.
+ *
+ * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
+ struct rsp_desc *r)
+{
+ struct sge_rspq *q = &qs->rspq;
+ unsigned int sleeping = 0;
+
+ do {
+ u32 flags = ntohl(r->flags);
+
+ r++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ r = q->desc;
+ }
+ prefetch(r);
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ q->pure_rsps++;
+ if (++q->credits >= (q->size / 4)) {
+ refill_rspq(adap, q, q->credits);
+ q->credits = 0;
+ }
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue .processed updates */
+ if (unlikely(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ return is_new_response(r, q);
+}
+
+/**
+ * handle_responses - decide what to do with new responses in NAPI mode
+ * @adap: the adapter
+ * @q: the response queue
+ *
+ * This is used by the NAPI interrupt handlers to decide what to do with
+ * new SGE responses. If there are no new responses it returns -1. If
+ * there are new responses and they are pure (i.e., non-data carrying)
+ * it handles them straight in hard interrupt context as they are very
+ * cheap and don't deliver any packets. Finally, if there are any data
+ * signaling responses it schedules the NAPI handler. Returns 1 if it
+ * schedules NAPI, 0 if all new responses were pure.
+ *
+ * The caller must ascertain NAPI is not already running.
+ */
+static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+{
+ struct sge_qset *qs = rspq_to_qset(q);
+ struct rsp_desc *r = &q->desc[q->cidx];
+
+ if (!is_new_response(r, q))
+ return -1;
+ rmb();
+ if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+ return 0;
+ }
+ napi_schedule(&qs->napi);
+ return 1;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
+ * (i.e., response queue serviced in hard interrupt).
+ */
+static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_qset *qs = cookie;
+ struct adapter *adap = qs->adap;
+ struct sge_rspq *q = &qs->rspq;
+
+ spin_lock(&q->lock);
+ if (process_responses(adap, qs, -1) == 0)
+ q->unhandled_irqs++;
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+{
+ struct sge_qset *qs = cookie;
+ struct sge_rspq *q = &qs->rspq;
+
+ spin_lock(&q->lock);
+
+ if (handle_responses(qs->adap, q) < 0)
+ q->unhandled_irqs++;
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The non-NAPI MSI interrupt handler. This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same MSI vector. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi(int irq, void *cookie)
+{
+ int new_packets = 0;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+ spin_lock(&q->lock);
+
+ if (process_responses(adap, &adap->sge.qs[0], -1)) {
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+ new_packets = 1;
+ }
+
+ if (adap->params.nports == 2 &&
+ process_responses(adap, &adap->sge.qs[1], -1)) {
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
+ V_NEWTIMER(q1->next_holdoff) |
+ V_NEWINDEX(q1->cidx));
+ new_packets = 1;
+ }
+
+ if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ q->unhandled_irqs++;
+
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+static int rspq_check_napi(struct sge_qset *qs)
+{
+ struct sge_rspq *q = &qs->rspq;
+
+ if (!napi_is_scheduled(&qs->napi) &&
+ is_new_response(&q->desc[q->cidx], q)) {
+ napi_schedule(&qs->napi);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
+ * by NAPI polling). Handles data events from SGE response queues as well as
+ * error and other async events as they all use the same MSI vector. We use
+ * one SGE response queue per port in this mode and protect all response
+ * queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+{
+ int new_packets;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+ spin_lock(&q->lock);
+
+ new_packets = rspq_check_napi(&adap->sge.qs[0]);
+ if (adap->params.nports == 2)
+ new_packets += rspq_check_napi(&adap->sge.qs[1]);
+ if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ q->unhandled_irqs++;
+
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * A helper function that processes responses and issues GTS.
+ */
+static inline int process_responses_gts(struct adapter *adap,
+ struct sge_rspq *rq)
+{
+ int work;
+
+ work = process_responses(adap, rspq_to_qset(rq), -1);
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
+ V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
+ return work;
+}
+
+/*
+ * The legacy INTx interrupt handler. This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same interrupt pin. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr(int irq, void *cookie)
+{
+ int work_done, w0, w1;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+ spin_lock(&q0->lock);
+
+ w0 = is_new_response(&q0->desc[q0->cidx], q0);
+ w1 = adap->params.nports == 2 &&
+ is_new_response(&q1->desc[q1->cidx], q1);
+
+ if (likely(w0 | w1)) {
+ t3_write_reg(adap, A_PL_CLI, 0);
+ t3_read_reg(adap, A_PL_CLI); /* flush */
+
+ if (likely(w0))
+ process_responses_gts(adap, q0);
+
+ if (w1)
+ process_responses_gts(adap, q1);
+
+ work_done = w0 | w1;
+ } else
+ work_done = t3_slow_intr_handler(adap);
+
+ spin_unlock(&q0->lock);
+ return IRQ_RETVAL(work_done != 0);
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr(int irq, void *cookie)
+{
+ u32 map;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (unlikely(!map)) /* shared interrupt, most likely */
+ return IRQ_NONE;
+
+ spin_lock(&q0->lock);
+
+ if (unlikely(map & F_ERRINTR))
+ t3_slow_intr_handler(adap);
+
+ if (likely(map & 1))
+ process_responses_gts(adap, q0);
+
+ if (map & 2)
+ process_responses_gts(adap, &adap->sge.qs[1].rspq);
+
+ spin_unlock(&q0->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+{
+ u32 map;
+ struct adapter *adap = cookie;
+ struct sge_qset *qs0 = &adap->sge.qs[0];
+ struct sge_rspq *q0 = &qs0->rspq;
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (unlikely(!map)) /* shared interrupt, most likely */
+ return IRQ_NONE;
+
+ spin_lock(&q0->lock);
+
+ if (unlikely(map & F_ERRINTR))
+ t3_slow_intr_handler(adap);
+
+ if (likely(map & 1))
+ napi_schedule(&qs0->napi);
+
+ if (map & 2)
+ napi_schedule(&adap->sge.qs[1].napi);
+
+ spin_unlock(&q0->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * t3_intr_handler - select the top-level interrupt handler
+ * @adap: the adapter
+ * @polling: whether using NAPI to service response queues
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
+ * response queues.
+ */
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
+{
+ if (adap->flags & USING_MSIX)
+ return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
+ if (adap->flags & USING_MSI)
+ return polling ? t3_intr_msi_napi : t3_intr_msi;
+ if (adap->params.rev > 0)
+ return polling ? t3b_intr_napi : t3b_intr;
+ return t3_intr;
+}
+
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+ F_RSPQDISABLED)
+
+/**
+ * t3_sge_err_intr_handler - SGE async event interrupt handler
+ * @adapter: the adapter
+ *
+ * Interrupt handler for SGE asynchronous (non-data) events.
+ */
+void t3_sge_err_intr_handler(struct adapter *adapter)
+{
+ unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
+ ~F_FLEMPTY;
+
+ if (status & SGE_PARERR)
+ CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+ status & SGE_PARERR);
+ if (status & SGE_FRAMINGERR)
+ CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+ status & SGE_FRAMINGERR);
+
+ if (status & F_RSPQCREDITOVERFOW)
+ CH_ALERT(adapter, "SGE response queue credit overflow\n");
+
+ if (status & F_RSPQDISABLED) {
+ v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
+
+ CH_ALERT(adapter,
+ "packet delivered to disabled response queue "
+ "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
+ }
+
+ if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
+
+ t3_write_reg(adapter, A_SG_INT_CAUSE, status);
+ if (status & SGE_FATALERR)
+ t3_fatal_err(adapter);
+}
+
+/**
+ * sge_timer_tx - perform periodic maintenance of an SGE qset
+ * @data: the SGE queue set to maintain
+ *
+ * Runs periodically from a timer to perform maintenance of an SGE queue
+ * set. It performs two tasks:
+ *
+ * Cleans up any completed Tx descriptors that may still be pending.
+ * Normal descriptor cleanup happens when new packets are added to a Tx
+ * queue so this timer is relatively infrequent and does any cleanup only
+ * if the Tx queue has not seen any new packets in a while. We make a
+ * best effort attempt to reclaim descriptors, in that we don't wait
+ * around if we cannot get a queue's lock (which most likely is because
+ * someone else is queueing new packets and so will also handle the clean
+ * up). Since control queues use immediate data exclusively we don't
+ * bother cleaning them up here.
+ *
+ */
+static void sge_timer_tx(unsigned long data)
+{
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
+ unsigned long next_period;
+
+ if (__netif_tx_trylock(qs->tx_q)) {
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
+ }
+
+ if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
+ tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
+ TX_RECLAIM_TIMER_CHUNK);
+ spin_unlock(&qs->txq[TXQ_OFLD].lock);
+ }
+
+ next_period = TX_RECLAIM_PERIOD >>
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
+ mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
+}
+
+/*
+ * sge_timer_rx - perform periodic maintenance of an SGE qset
+ * @data: the SGE queue set to maintain
+ *
+ * a) Replenishes Rx queues that have run out due to memory shortage.
+ * Normally new Rx buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We try to add only a few
+ * buffers here, the queue will be replenished fully as these new buffers
+ * are used up if memory shortage has subsided.
+ *
+ * b) Return coalesced response queue credits in case a response queue is
+ * starved.
+ *
+ */
+static void sge_timer_rx(unsigned long data)
+{
+ spinlock_t *lock;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ u32 status;
+
+ lock = adap->params.rev > 0 ?
+ &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
+
+ if (!spin_trylock_irq(lock))
+ goto out;
+
+ if (napi_is_scheduled(&qs->napi))
+ goto unlock;
+
+ if (adap->params.rev < 4) {
+ status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ qs->rspq.starved++;
+ if (qs->rspq.credits) {
+ qs->rspq.credits--;
+ refill_rspq(adap, &qs->rspq, 1);
+ qs->rspq.restarted++;
+ t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
+ }
+ }
+ }
+
+ if (qs->fl[0].credits < qs->fl[0].size)
+ __refill_fl(adap, &qs->fl[0]);
+ if (qs->fl[1].credits < qs->fl[1].size)
+ __refill_fl(adap, &qs->fl[1]);
+
+unlock:
+ spin_unlock_irq(lock);
+out:
+ mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+}
+
+/**
+ * t3_update_qset_coalesce - update coalescing settings for a queue set
+ * @qs: the SGE queue set
+ * @p: new queue set parameters
+ *
+ * Update the coalescing settings for an SGE queue set. Nothing is done
+ * if the queue set is not initialized yet.
+ */
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+{
+ qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+ qs->rspq.polling = p->polling;
+ qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
+}
+
+/**
+ * t3_sge_alloc_qset - initialize an SGE queue set
+ * @adapter: the adapter
+ * @id: the queue set id
+ * @nports: how many Ethernet ports will be using this queue set
+ * @irq_vec_idx: the IRQ vector index for response queue interrupts
+ * @p: configuration parameters for this queue set
+ * @ntxq: number of Tx queues for the queue set
+ * @netdev: net device associated with this queue set
+ * @netdevq: net device TX queue associated with this queue set
+ *
+ * Allocate resources and initialize an SGE queue set. A queue set
+ * comprises a response queue, two Rx free-buffer queues, and up to 3
+ * Tx queues. The Tx queues are assigned roles in the order Ethernet
+ * queue, offload queue, and control queue.
+ */
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
+ int ntxq, struct net_device *dev,
+ struct netdev_queue *netdevq)
+{
+ int i, avail, ret = -ENOMEM;
+ struct sge_qset *q = &adapter->sge.qs[id];
+
+ init_qset_cntxt(q, id);
+ setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
+ setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
+
+ q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
+ sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc),
+ &q->fl[0].phys_addr, &q->fl[0].sdesc);
+ if (!q->fl[0].desc)
+ goto err;
+
+ q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
+ sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc),
+ &q->fl[1].phys_addr, &q->fl[1].sdesc);
+ if (!q->fl[1].desc)
+ goto err;
+
+ q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
+ sizeof(struct rsp_desc), 0,
+ &q->rspq.phys_addr, NULL);
+ if (!q->rspq.desc)
+ goto err;
+
+ for (i = 0; i < ntxq; ++i) {
+ /*
+ * The control queue always uses immediate data so does not
+ * need to keep track of any sk_buffs.
+ */
+ size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
+
+ q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
+ sizeof(struct tx_desc), sz,
+ &q->txq[i].phys_addr,
+ &q->txq[i].sdesc);
+ if (!q->txq[i].desc)
+ goto err;
+
+ q->txq[i].gen = 1;
+ q->txq[i].size = p->txq_size[i];
+ spin_lock_init(&q->txq[i].lock);
+ skb_queue_head_init(&q->txq[i].sendq);
+ }
+
+ tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
+ (unsigned long)q);
+ tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
+ (unsigned long)q);
+
+ q->fl[0].gen = q->fl[1].gen = 1;
+ q->fl[0].size = p->fl_size;
+ q->fl[1].size = p->jumbo_size;
+
+ q->rspq.gen = 1;
+ q->rspq.size = p->rspq_size;
+ spin_lock_init(&q->rspq.lock);
+ skb_queue_head_init(&q->rspq.rx_queue);
+
+ q->txq[TXQ_ETH].stop_thres = nports *
+ flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
+
+#if FL0_PG_CHUNK_SIZE > 0
+ q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
+#else
+ q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
+#endif
+#if FL1_PG_CHUNK_SIZE > 0
+ q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
+#else
+ q->fl[1].buf_size = is_offload(adapter) ?
+ (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
+#endif
+
+ q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+ q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
+ q->fl[0].order = FL0_PG_ORDER;
+ q->fl[1].order = FL1_PG_ORDER;
+ q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
+ q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+
+ /* FL threshold comparison uses < */
+ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
+ q->rspq.phys_addr, q->rspq.size,
+ q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
+ if (ret)
+ goto err_unlock;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+ ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
+ q->fl[i].phys_addr, q->fl[i].size,
+ q->fl[i].buf_size - SGE_PG_RSVD,
+ p->cong_thres, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
+ SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
+ q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
+ 1, 0);
+ if (ret)
+ goto err_unlock;
+
+ if (ntxq > 1) {
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
+ USE_GTS, SGE_CNTXT_OFLD, id,
+ q->txq[TXQ_OFLD].phys_addr,
+ q->txq[TXQ_OFLD].size, 0, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ if (ntxq > 2) {
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
+ SGE_CNTXT_CTRL, id,
+ q->txq[TXQ_CTRL].phys_addr,
+ q->txq[TXQ_CTRL].size,
+ q->txq[TXQ_CTRL].token, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ spin_unlock_irq(&adapter->sge.reg_lock);
+
+ q->adap = adapter;
+ q->netdev = dev;
+ q->tx_q = netdevq;
+ t3_update_qset_coalesce(q, p);
+
+ avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
+ GFP_KERNEL | __GFP_COMP);
+ if (!avail) {
+ CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+ goto err;
+ }
+ if (avail < q->fl[0].size)
+ CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+ avail);
+
+ avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
+ GFP_KERNEL | __GFP_COMP);
+ if (avail < q->fl[1].size)
+ CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+ avail);
+ refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
+
+ t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
+ V_NEWTIMER(q->rspq.holdoff_tmr));
+
+ return 0;
+
+err_unlock:
+ spin_unlock_irq(&adapter->sge.reg_lock);
+err:
+ t3_free_qset(adapter, q);
+ return ret;
+}
+
+/**
+ * t3_start_sge_timers - start SGE timer call backs
+ * @adap: the adapter
+ *
+ * Starts each SGE queue set's timer call back
+ */
+void t3_start_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+ if (q->rx_reclaim_timer.function)
+ mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+ }
+}
+
+/**
+ * t3_stop_sge_timers - stop SGE timer call backs
+ * @adap: the adapter
+ *
+ * Stops each SGE queue set's timer call back
+ */
+void t3_stop_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ del_timer_sync(&q->tx_reclaim_timer);
+ if (q->rx_reclaim_timer.function)
+ del_timer_sync(&q->rx_reclaim_timer);
+ }
+}
+
+/**
+ * t3_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t3_free_sge_resources(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ t3_free_qset(adap, &adap->sge.qs[i]);
+}
+
+/**
+ * t3_sge_start - enable SGE
+ * @adap: the adapter
+ *
+ * Enables the SGE for DMAs. This is the last step in starting packet
+ * transfers.
+ */
+void t3_sge_start(struct adapter *adap)
+{
+ t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
+}
+
+/**
+ * t3_sge_stop - disable SGE operation
+ * @adap: the adapter
+ *
+ * Disables the DMA engine. This can be called in emeregencies (e.g.,
+ * from error interrupts) or from normal process context. In the latter
+ * case it also disables any pending queue restart tasklets. Note that
+ * if it is called in interrupt context it cannot disable the restart
+ * tasklets as it cannot wait, however the tasklets will have no effect
+ * since the doorbells are disabled and the driver will call this again
+ * later from process context, at which time the tasklets will be stopped
+ * if they are still running.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+ t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
+ if (!in_interrupt()) {
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
+ tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
+ }
+ }
+}
+
+/**
+ * t3_sge_init - initialize SGE
+ * @adap: the adapter
+ * @p: the SGE parameters
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+void t3_sge_init(struct adapter *adap, struct sge_params *p)
+{
+ unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
+
+ ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
+ F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
+ V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+ V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+#if SGE_NUM_GENBITS == 1
+ ctrl |= F_EGRGENCTRL;
+#endif
+ if (adap->params.rev > 0) {
+ if (!(adap->flags & (USING_MSIX | USING_MSI)))
+ ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
+ }
+ t3_write_reg(adap, A_SG_CONTROL, ctrl);
+ t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
+ V_LORCQDRBTHRSH(512));
+ t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
+ t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
+ V_TIMEOUT(200 * core_ticks_per_usec(adap)));
+ t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+ adap->params.rev < T3_REV_C ? 1000 : 500);
+ t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
+ t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
+}
+
+/**
+ * t3_sge_prep - one-time SGE initialization
+ * @adap: the associated adapter
+ * @p: SGE parameters
+ *
+ * Performs one-time initialization of SGE SW state. Includes determining
+ * defaults for the assorted SGE parameters, which admins can change until
+ * they are used to initialize the SGE.
+ */
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
+{
+ int i;
+
+ p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct qset_params *q = p->qset + i;
+
+ q->polling = adap->params.rev > 0;
+ q->coalesce_usecs = 5;
+ q->rspq_size = 1024;
+ q->fl_size = 1024;
+ q->jumbo_size = 512;
+ q->txq_size[TXQ_ETH] = 1024;
+ q->txq_size[TXQ_OFLD] = 1024;
+ q->txq_size[TXQ_CTRL] = 256;
+ q->cong_thres = 0;
+ }
+
+ spin_lock_init(&adap->sge.reg_lock);
+}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
index 29b6c800b23..29b6c800b23 100644
--- a/drivers/net/cxgb3/sge_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
index 852c399a8b0..852c399a8b0 100644
--- a/drivers/net/cxgb3/t3_cpl.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f40b64..44ac2f40b64 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
index 705713b5663..705713b5663 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
index 8bda06e366c..8bda06e366c 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
index 4f9a1c2724f..4f9a1c2724f 100644
--- a/drivers/net/cxgb3/vsc8211.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
index 3af19a55037..3af19a55037 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 498667487f5..498667487f5 100644
--- a/drivers/net/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 223a7f72343..0fe18850c83 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -326,7 +326,7 @@ struct sge_fl { /* SGE free-buffer queue state */
/* A packet gather list */
struct pkt_gl {
- skb_frag_t frags[MAX_SKB_FRAGS];
+ struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index b4efa292fd6..4c8f42afa3c 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -41,6 +41,7 @@
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/log2.h>
@@ -1901,7 +1902,7 @@ static int set_rss_table(struct net_device *dev,
}
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- void *rules)
+ u32 *rules)
{
const struct port_info *pi = netdev_priv(dev);
@@ -3639,6 +3640,8 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->features |= netdev->hw_features | highdma;
netdev->vlan_features = netdev->features & VLAN_FEAT;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->netdev_ops = &cxgb4_netdev_ops;
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
}
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b1d39b8d141..b1d39b8d141 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a2d323c473f..a2d323c473f 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 02b31d0c641..02b31d0c641 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
new file mode 100644
index 00000000000..ddc16985d0f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -0,0 +1,2443 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/prefetch.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+/*
+ * Rx buffer size. We use largish buffers if possible but settle for single
+ * pages under memory shortage.
+ */
+#if PAGE_SHIFT >= 16
+# define FL_PG_ORDER 0
+#else
+# define FL_PG_ORDER (16 - PAGE_SHIFT)
+#endif
+
+/* RX_PULL_LEN should be <= RX_COPY_THRES */
+#define RX_COPY_THRES 256
+#define RX_PULL_LEN 128
+
+/*
+ * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
+ * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
+ */
+#define RX_PKT_SKB_LEN 512
+
+/* Ethernet header padding prepended to RX_PKTs */
+#define RX_PKT_PAD 2
+
+/*
+ * Max number of Tx descriptors we clean up at a time. Should be modest as
+ * freeing skbs isn't cheap and it happens while holding locks. We just need
+ * to free packets faster than they arrive, we eventually catch up and keep
+ * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
+ */
+#define MAX_TX_RECLAIM 16
+
+/*
+ * Max number of Rx buffers we replenish at a time. Again keep this modest,
+ * allocating buffers isn't cheap either.
+ */
+#define MAX_RX_REFILL 16U
+
+/*
+ * Period of the Rx queue check timer. This timer is infrequent as it has
+ * something to do only when the system experiences severe memory shortage.
+ */
+#define RX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Period of the Tx queue check timer.
+ */
+#define TX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Max number of Tx descriptors to be reclaimed by the Tx timer.
+ */
+#define MAX_TIMER_TX_RECLAIM 100
+
+/*
+ * Timer index used when backing off due to memory shortage.
+ */
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
+ * attempt to refill it.
+ */
+#define FL_STARVE_THRES 4
+
+/*
+ * Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
+/*
+ * Suspension threshold for non-Ethernet Tx queues. We require enough room
+ * for a full sized WR.
+ */
+#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 128
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+enum {
+ /* packet alignment in FL buffers */
+ FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
+ /* egress status entry size */
+ STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ struct page *page;
+ dma_addr_t dma_addr;
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning.
+ */
+enum {
+ RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
+ RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
+{
+ return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+static inline bool is_buf_mapped(const struct rx_sw_desc *d)
+{
+ return !(d->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ * txq_avail - return the number of available slots in a Tx queue
+ * @q: the Tx queue
+ *
+ * Returns the number of descriptors in a Tx queue available to write new
+ * packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+/**
+ * fl_cap - return the capacity of a free-buffer list
+ * @fl: the FL
+ *
+ * Returns the capacity of a free-buffer list. The capacity is less than
+ * the size because one descriptor needs to be left unpopulated, otherwise
+ * HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - 8; /* 1 descriptor = 8 buffers */
+}
+
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+ return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto out_err;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+out_err:
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+static void unmap_skb(struct device *dev, const struct sk_buff *skb,
+ const dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+ for (fp = si->frags; fp < end; fp++)
+ dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
+}
+
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
+}
+#endif
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+ const struct ulptx_sgl *sgl, const struct sge_txq *q)
+{
+ const struct ulptx_sge_pair *p;
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (likely(skb_headlen(skb)))
+ dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+ DMA_TO_DEVICE);
+ else {
+ dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+ DMA_TO_DEVICE);
+ nfrags--;
+ }
+
+ /*
+ * the complexity below is because of the possibility of a wrap-around
+ * in the middle of an SGL
+ */
+ for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+ if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
+unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p++;
+ } else if ((u8 *)p == (u8 *)q->stat) {
+ p = (const struct ulptx_sge_pair *)q->desc;
+ goto unmap;
+ } else if ((u8 *)p + 8 == (u8 *)q->stat) {
+ const __be64 *addr = (const __be64 *)q->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[1]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[2];
+ } else {
+ const __be64 *addr = (const __be64 *)q->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[1];
+ }
+ }
+ if (nfrags) {
+ __be64 addr;
+
+ if ((u8 *)p == (u8 *)q->stat)
+ p = (const struct ulptx_sge_pair *)q->desc;
+ addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
+ *(const __be64 *)q->desc;
+ dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
+ DMA_TO_DEVICE);
+ }
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+ struct device *dev = adap->pdev_dev;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->skb) { /* an SGL is present */
+ if (unmap)
+ unmap_sgl(dev, d->skb, d->sgl, q);
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ hw_cidx -= q->cidx;
+ return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adap: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ bool unmap)
+{
+ int avail = reclaimable(q);
+
+ if (avail) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the Tx lock hold time O(1).
+ */
+ if (avail > MAX_TX_RECLAIM)
+ avail = MAX_TX_RECLAIM;
+
+ free_tx_desc(adap, q, avail, unmap);
+ q->in_use -= avail;
+ }
+}
+
+static inline int get_buf_size(const struct rx_sw_desc *d)
+{
+#if FL_PG_ORDER > 0
+ return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
+ PAGE_SIZE;
+#else
+ return PAGE_SIZE;
+#endif
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @adap: the adapter
+ * @q: the SGE free list to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE free-buffer Rx queue. The
+ * buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
+{
+ while (n--) {
+ struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+ if (is_buf_mapped(d))
+ dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+ get_buf_size(d), PCI_DMA_FROMDEVICE);
+ put_page(d->page);
+ d->page = NULL;
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+ }
+}
+
+/**
+ * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ * @adap: the adapter
+ * @q: the SGE free list
+ *
+ * Unmap the current buffer on an SGE free-buffer Rx queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
+{
+ struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+ if (is_buf_mapped(d))
+ dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+ get_buf_size(d), PCI_DMA_FROMDEVICE);
+ d->page = NULL;
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= 8) {
+ wmb();
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ q->pend_cred &= 7;
+ }
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
+ dma_addr_t mapping)
+{
+ sd->page = pg;
+ sd->dma_addr = mapping; /* includes size low bits */
+}
+
+/**
+ * refill_fl - refill an SGE Rx buffer ring
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for the allocations
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. If afterwards the queue is
+ * found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ * Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
+ gfp_t gfp)
+{
+ struct page *pg;
+ dma_addr_t mapping;
+ unsigned int cred = q->avail;
+ __be64 *d = &q->desc[q->pidx];
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+
+ gfp |= __GFP_NOWARN; /* failures are expected */
+
+#if FL_PG_ORDER > 0
+ /*
+ * Prefer large buffers
+ */
+ while (n) {
+ pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+ if (unlikely(!pg)) {
+ q->large_alloc_failed++;
+ break; /* fall back to single pages */
+ }
+
+ mapping = dma_map_page(adap->pdev_dev, pg, 0,
+ PAGE_SIZE << FL_PG_ORDER,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+ __free_pages(pg, FL_PG_ORDER);
+ goto out; /* do not try small pages for this error */
+ }
+ mapping |= RX_LARGE_BUF;
+ *d++ = cpu_to_be64(mapping);
+
+ set_rx_sw_desc(sd, pg, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ n--;
+ }
+#endif
+
+ while (n--) {
+ pg = __netdev_alloc_page(adap->port[0], gfp);
+ if (unlikely(!pg)) {
+ q->alloc_failed++;
+ break;
+ }
+
+ mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+ netdev_free_page(adap->port[0], pg);
+ goto out;
+ }
+ *d++ = cpu_to_be64(mapping);
+
+ set_rx_sw_desc(sd, pg, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ }
+
+out: cred = q->avail - cred;
+ q->pend_cred += cred;
+ ring_fl_db(adap, q);
+
+ if (unlikely(fl_starving(q))) {
+ smp_wmb();
+ set_bit(q->cntxt_id - adap->sge.egr_start,
+ adap->sge.starving_fl);
+ }
+
+ return cred;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+ GFP_ATOMIC);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ * @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the bus address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t *phys, void *metadata,
+ size_t stat_size, int node)
+{
+ size_t len = nelem * elem_size + stat_size;
+ void *s = NULL;
+ void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ if (sw_size) {
+ s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
+
+ if (!s) {
+ dma_free_coherent(dev, len, p, *phys);
+ return NULL;
+ }
+ }
+ if (metadata)
+ *(void **)metadata = s;
+ memset(p, 0, len);
+ return p;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ BUG_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit as
+ * immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet Tx WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a Tx WR for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ if (is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+ if (skb_shinfo(skb)->gso_size)
+ flits += 2;
+ return flits;
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+ return flits_to_desc(calc_tx_flits(skb));
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into skb main-body data to include in the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ *
+ * Generates a gather list for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @sgl must be 16-byte
+ * aligned and within a Tx descriptor with available space. @end points
+ * right after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ const struct skb_shared_info *si = skb_shinfo(skb);
+ unsigned int nfrags = si->nr_frags;
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+ len = skb_headlen(skb) - start;
+ if (likely(len)) {
+ sgl->len0 = htonl(len);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ nfrags++;
+ } else {
+ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
+ sgl->addr0 = cpu_to_be64(addr[1]);
+ }
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
+ to->addr[0] = cpu_to_be64(addr[i]);
+ to->addr[1] = cpu_to_be64(addr[++i]);
+ }
+ if (nfrags) {
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(0);
+ to->addr[0] = cpu_to_be64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+/**
+ * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+{
+ wmb(); /* write descriptors before telling HW */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+}
+
+/**
+ * inline_tx_skb - inline a packet's data into Tx descriptors
+ * @skb: the packet
+ * @q: the Tx queue where the packet will be inlined
+ * @pos: starting position in the Tx queue where to inline the packet
+ *
+ * Inline a packet's contents directly into Tx descriptors, starting at
+ * the given position within the Tx DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos)
+{
+ u64 *p;
+ int left = (void *)q->stat - pos;
+
+ if (likely(skb->len <= left)) {
+ if (likely(!skb->data_len))
+ skb_copy_from_linear_data(skb, pos, skb->len);
+ else
+ skb_copy_bits(skb, 0, pos, skb->len);
+ pos += skb->len;
+ } else {
+ skb_copy_bits(skb, 0, pos, left);
+ skb_copy_bits(skb, left, q->desc, skb->len - left);
+ pos = (void *)q->desc + (skb->len - left);
+ }
+
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8)
+ *p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+ int csum_type;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (iph->protocol == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP;
+ else if (iph->protocol == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP;
+ else {
+nocsum: /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return TXPKT_L4CSUM_DIS;
+ }
+ } else {
+ /*
+ * this doesn't work with extension headers
+ */
+ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP6;
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP6;
+ else
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP))
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ else {
+ int start = skb_transport_offset(skb);
+
+ return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
+ TXPKT_CSUM_LOC(start + skb->csum_offset);
+ }
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/**
+ * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 wr_mid;
+ u64 cntrl, *end;
+ int qidx, credits;
+ unsigned int flits, ndesc;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ const struct port_info *pi;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ const struct skb_shared_info *ssi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+
+ /*
+ * The chip min packet length is 10 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(skb->len < ETH_HLEN)) {
+out_free: dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!is_eth_imm(skb) &&
+ unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ }
+
+ wr = (void *)&q->q.desc[q->q.pidx];
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->r3 = cpu_to_be64(0);
+ end = (u64 *)wr + flits;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso *lso = (void *)wr;
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN(sizeof(*lso)));
+ lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE | LSO_LAST_SLICE |
+ LSO_IPV6(v6) |
+ LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ lso->c.len = htonl(skb->len);
+ cpl = (void *)(lso + 1);
+ cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN(l3hdr_len) |
+ TXPKT_ETHHDR_LEN(eth_xtra_len);
+ q->tso++;
+ q->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN(len));
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ q->tx_cso++;
+ } else
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ if (is_eth_imm(skb)) {
+ inline_tx_skb(skb, &q->q, cpl + 1);
+ dev_kfree_skb(skb);
+ } else {
+ int last_desc;
+
+ write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+ addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ }
+
+ txq_advance(&q->q, ndesc);
+
+ ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * ctrlq_check_stop - check if a control queue is full and should stop
+ * @q: the queue
+ * @wr: most recent WR written to the queue
+ *
+ * Check if a control queue has become full and should be stopped.
+ * We clean up control queue descriptors very lazily, only when we are out.
+ * If the queue is still full after reclaiming any completed descriptors
+ * we suspend it and have the last WR wake it up.
+ */
+static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
+{
+ reclaim_completed_tx_imm(&q->q);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+ wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ q->q.stops++;
+ q->full = 1;
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @skb: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+
+ if (unlikely(!is_imm(skb))) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
+ spin_lock(&q->sendq.lock);
+
+ if (unlikely(q->full)) {
+ skb->priority = ndesc; /* save for restart */
+ __skb_queue_tail(&q->sendq, skb);
+ spin_unlock(&q->sendq.lock);
+ return NET_XMIT_CN;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ inline_tx_skb(skb, &q->q, wr);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
+ ctrlq_check_stop(q, wr);
+
+ ring_tx_db(q->adap, &q->q, ndesc);
+ spin_unlock(&q->sendq.lock);
+
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @data: the control queue to restart
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+ struct sk_buff *skb;
+ unsigned int written = 0;
+ struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+
+ spin_lock(&q->sendq.lock);
+ reclaim_completed_tx_imm(&q->q);
+ BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
+
+ while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
+ struct fw_wr_hdr *wr;
+ unsigned int ndesc = skb->priority; /* previously saved */
+
+ /*
+ * Write descriptors and free skbs outside the lock to limit
+ * wait times. q->full is still set so new skbs will be queued.
+ */
+ spin_unlock(&q->sendq.lock);
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ inline_tx_skb(skb, &q->q, wr);
+ kfree_skb(skb);
+
+ written += ndesc;
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+ unsigned long old = q->q.stops;
+
+ ctrlq_check_stop(q, wr);
+ if (q->q.stops != old) { /* suspended anew */
+ spin_lock(&q->sendq.lock);
+ goto ringdb;
+ }
+ }
+ if (written > 16) {
+ ring_tx_db(q->adap, &q->q, written);
+ written = 0;
+ }
+ spin_lock(&q->sendq.lock);
+ }
+ q->full = 0;
+ringdb: if (written)
+ ring_tx_db(q->adap, &q->q, written);
+ spin_unlock(&q->sendq.lock);
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @adap: the adapter
+ * @skb: the packet containing the management message
+ *
+ * Send a management message through control queue 0.
+ */
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_IMM_TX_PKT_LEN;
+}
+
+/**
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+
+ flits = skb_transport_offset(skb) / 8U; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb->tail != skb->transport_header)
+ cnt++;
+ return flits + sgl_len(cnt);
+}
+
+/**
+ * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
+ * @adap: the adapter
+ * @q: the queue to stop
+ *
+ * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
+ * inability to map packets. A periodic timer attempts to restart
+ * queues so marked.
+ */
+static void txq_stop_maperr(struct sge_ofld_txq *q)
+{
+ q->mapping_err++;
+ q->q.stops++;
+ set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
+ q->adap->sge.txq_maperr);
+}
+
+/**
+ * ofldtxq_stop - stop an offload Tx queue that has become full
+ * @q: the queue to stop
+ * @skb: the packet causing the queue to become full
+ *
+ * Stops an offload Tx queue that has become full and modifies the packet
+ * being written to request a wakeup.
+ */
+static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+ struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
+
+ wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ q->q.stops++;
+ q->full = 1;
+}
+
+/**
+ * service_ofldq - restart a suspended offload queue
+ * @q: the offload queue
+ *
+ * Services an offload Tx queue by moving packets from its packet queue
+ * to the HW Tx ring. The function starts and ends with the queue locked.
+ */
+static void service_ofldq(struct sge_ofld_txq *q)
+{
+ u64 *pos;
+ int credits;
+ struct sk_buff *skb;
+ unsigned int written = 0;
+ unsigned int flits, ndesc;
+
+ while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
+ /*
+ * We drop the lock but leave skb on sendq, thus retaining
+ * exclusive access to the state of the queue.
+ */
+ spin_unlock(&q->sendq.lock);
+
+ reclaim_completed_tx(q->adap, &q->q, false);
+
+ flits = skb->priority; /* previously saved */
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+ BUG_ON(credits < 0);
+ if (unlikely(credits < TXQ_STOP_THRES))
+ ofldtxq_stop(q, skb);
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ if (is_ofld_imm(skb))
+ inline_tx_skb(skb, &q->q, pos);
+ else if (map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
+ txq_stop_maperr(q);
+ spin_lock(&q->sendq.lock);
+ break;
+ } else {
+ int last_desc, hdr_len = skb_transport_offset(skb);
+
+ memcpy(pos, skb->data, hdr_len);
+ write_sgl(skb, &q->q, (void *)pos + hdr_len,
+ pos + flits, hdr_len,
+ (dma_addr_t *)skb->head);
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ skb->dev = q->adap->port[0];
+ skb->destructor = deferred_unmap_destructor;
+#endif
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ }
+
+ txq_advance(&q->q, ndesc);
+ written += ndesc;
+ if (unlikely(written > 32)) {
+ ring_tx_db(q->adap, &q->q, written);
+ written = 0;
+ }
+
+ spin_lock(&q->sendq.lock);
+ __skb_unlink(skb, &q->sendq);
+ if (is_ofld_imm(skb))
+ kfree_skb(skb);
+ }
+ if (likely(written))
+ ring_tx_db(q->adap, &q->q, written);
+}
+
+/**
+ * ofld_xmit - send a packet through an offload queue
+ * @q: the Tx offload queue
+ * @skb: the packet
+ *
+ * Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+ skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
+ spin_lock(&q->sendq.lock);
+ __skb_queue_tail(&q->sendq, skb);
+ if (q->sendq.qlen == 1)
+ service_ofldq(q);
+ spin_unlock(&q->sendq.lock);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ofldq - restart a suspended offload queue
+ * @data: the offload queue to restart
+ *
+ * Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_ofldq(unsigned long data)
+{
+ struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+
+ spin_lock(&q->sendq.lock);
+ q->full = 0; /* the queue actually is completely empty now */
+ service_ofldq(q);
+ spin_unlock(&q->sendq.lock);
+}
+
+/**
+ * skb_txq - return the Tx queue an offload packet should use
+ * @skb: the packet
+ *
+ * Returns the Tx queue an offload packet should use as indicated by bits
+ * 1-15 in the packet's queue_mapping.
+ */
+static inline unsigned int skb_txq(const struct sk_buff *skb)
+{
+ return skb->queue_mapping >> 1;
+}
+
+/**
+ * is_ctrl_pkt - return whether an offload packet is a control packet
+ * @skb: the packet
+ *
+ * Returns whether an offload packet should use an OFLD or a CTRL
+ * Tx queue as indicated by bit 0 in the packet's queue_mapping.
+ */
+static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
+{
+ return skb->queue_mapping & 1;
+}
+
+static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+ unsigned int idx = skb_txq(skb);
+
+ if (unlikely(is_ctrl_pkt(skb)))
+ return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+}
+
+/**
+ * t4_ofld_send - send an offload packet
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Sends an offload packet. We use the packet queue_mapping to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-15 select the queue.
+ */
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ofld_send(adap, skb);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * cxgb4_ofld_send - send an offload packet
+ * @dev: the net device
+ * @skb: the packet
+ *
+ * Sends an offload packet. This is an exported version of @t4_ofld_send,
+ * intended for ULDs.
+ */
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
+{
+ return t4_ofld_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_ofld_send);
+
+static inline void copy_frags(struct sk_buff *skb,
+ const struct pkt_gl *gl, unsigned int offset)
+{
+ int i;
+
+ /* usually there's just one frag */
+ __skb_fill_page_desc(skb, 0, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ skb_shinfo(skb)->nr_frags = gl->nfrags;
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, i, gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+/**
+ * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
+ * size, which is expected since buffers are at least PAGE_SIZEd.
+ * In this case packets up to RX_COPY_THRES have only one fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ skb = dev_alloc_skb(gl->tot_len);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = dev_alloc_skb(skb_len);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ copy_frags(skb, gl, pull_len);
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+ }
+out: return skb;
+}
+EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
+
+/**
+ * t4_pktgl_free - free a packet gather list
+ * @gl: the gather list
+ *
+ * Releases the pages of a packet gather list. We do not own the last
+ * page on the list and do not free it.
+ */
+static void t4_pktgl_free(const struct pkt_gl *gl)
+{
+ int n;
+ const struct page_frag *p;
+
+ for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
+ put_page(p->page);
+}
+
+/*
+ * Process an MPS trace packet. Give it an unused protocol number so it won't
+ * be delivered to anyone and send it to the stack for capture.
+ */
+static noinline int handle_trace_pkt(struct adapter *adap,
+ const struct pkt_gl *gl)
+{
+ struct sk_buff *skb;
+ struct cpl_trace_pkt *p;
+
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(gl);
+ return 0;
+ }
+
+ p = (struct cpl_trace_pkt *)skb->data;
+ __skb_pull(skb, sizeof(*p));
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(0xffff);
+ skb->dev = adap->port[0];
+ netif_receive_skb(skb);
+ return 0;
+}
+
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ const struct cpl_rx_pkt *pkt)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(&rxq->rspq.napi);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return;
+ }
+
+ copy_frags(skb, gl, RX_PKT_PAD);
+ skb->len = gl->tot_len - RX_PKT_PAD;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxq->rspq.idx);
+ if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
+ if (unlikely(pkt->vlan_ex)) {
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
+ ret = napi_gro_frags(&rxq->rspq.napi);
+ if (ret == GRO_HELD)
+ rxq->stats.lro_pkts++;
+ else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+ rxq->stats.lro_merged++;
+ rxq->stats.pkts++;
+ rxq->stats.rx_cso++;
+}
+
+/**
+ * t4_ethrx_handler - process an ingress ethernet packet
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @si: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ bool csum_ok;
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *pkt;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ return handle_trace_pkt(q->adap, si);
+
+ pkt = (const struct cpl_rx_pkt *)rsp;
+ csum_ok = pkt->csum_calc && !pkt->err_vec;
+ if ((pkt->l2info & htonl(RXF_TCP)) &&
+ (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
+ do_gro(rxq, si, pkt);
+ return 0;
+ }
+
+ skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(si);
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+
+ __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
+ skb->protocol = eth_type_trans(skb, q->netdev);
+ skb_record_rx_queue(skb, q->idx);
+ if (skb->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
+ rxq->stats.pkts++;
+
+ if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
+ (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (!pkt->ip_frag) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rxq->stats.rx_cso++;
+ } else if (pkt->l2info & htonl(RXF_IP)) {
+ __sum16 c = (__force __sum16)pkt->csum;
+ skb->csum = csum_unfold(c);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ rxq->stats.rx_cso++;
+ }
+ } else
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(pkt->vlan_ex)) {
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
+ netif_receive_skb(skb);
+ return 0;
+}
+
+/**
+ * restore_rx_bufs - put back a packet's Rx buffers
+ * @si: the packet gather list
+ * @q: the SGE free list
+ * @frags: number of FL buffers to restore
+ *
+ * Puts back on an FL the Rx buffers associated with @si. The buffers
+ * have already been unmapped and are left unmapped, we mark them so to
+ * prevent further unmapping attempts.
+ *
+ * This function undoes a series of @unmap_rx_buf calls when we find out
+ * that the current packet can't be processed right away afterall and we
+ * need to come back to it later. This is a very rare event and there's
+ * no effort to make this particularly efficient.
+ */
+static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
+ int frags)
+{
+ struct rx_sw_desc *d;
+
+ while (frags--) {
+ if (q->cidx == 0)
+ q->cidx = q->size - 1;
+ else
+ q->cidx--;
+ d = &q->sdesc[q->cidx];
+ d->page = si->frags[frags].page;
+ d->dma_addr |= RX_UNMAPPED_BUF;
+ q->avail++;
+ }
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *r,
+ const struct sge_rspq *q)
+{
+ return RSPD_GEN(r->type_gen) == q->gen;
+}
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @q: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+ q->cur_desc = (void *)q->cur_desc + q->iqe_len;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ q->cur_desc = q->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @q: the ingress queue to process
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as control messages from FW
+ * or HW.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget)
+{
+ int ret, rsp_type;
+ int budget_left = budget;
+ const struct rsp_ctrl *rc;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ while (likely(budget_left)) {
+ rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, q))
+ break;
+
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ struct page_frag *fp;
+ struct pkt_gl si;
+ const struct rx_sw_desc *rsd;
+ u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
+
+ if (len & RSPD_NEWBUF) {
+ if (likely(q->offset > 0)) {
+ free_rx_bufs(q->adap, &rxq->fl, 1);
+ q->offset = 0;
+ }
+ len = RSPD_LEN(len);
+ }
+ si.tot_len = len;
+
+ /* gather packet fragments */
+ for (frags = 0, fp = si.frags; ; frags++, fp++) {
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(rsd);
+ fp->page = rsd->page;
+ fp->offset = q->offset;
+ fp->size = min(bufsz, len);
+ len -= fp->size;
+ if (!len)
+ break;
+ unmap_rx_buf(q->adap, &rxq->fl);
+ }
+
+ /*
+ * Last buffer remains mapped so explicitly make it
+ * coherent for CPU access.
+ */
+ dma_sync_single_for_cpu(q->adap->pdev_dev,
+ get_buf_addr(rsd),
+ fp->size, DMA_FROM_DEVICE);
+
+ si.va = page_address(si.frags[0].page) +
+ si.frags[0].offset;
+ prefetch(si.va);
+
+ si.nfrags = frags + 1;
+ ret = q->handler(q, q->cur_desc, &si);
+ if (likely(ret == 0))
+ q->offset += ALIGN(fp->size, FL_ALIGN);
+ else
+ restore_rx_bufs(&si, &rxq->fl, frags);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ ret = q->handler(q, q->cur_desc, NULL);
+ } else {
+ ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+ }
+
+ if (unlikely(ret)) {
+ /* couldn't process descriptor, back off for recovery */
+ q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ break;
+ }
+
+ rspq_next(q);
+ budget_left--;
+ }
+
+ if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+ __refill_fl(q->adap, &rxq->fl);
+ return budget - budget_left;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for Rx processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ unsigned int params;
+ struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+ int work_done = process_responses(q, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ params = q->next_intr_params;
+ q->next_intr_params = q->intr_params;
+ } else
+ params = QINTR_TIMER_IDX(7);
+
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
+ INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
+ return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue.
+ */
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_rspq *q = cookie;
+
+ napi_schedule(&q->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adap)
+{
+ unsigned int credits;
+ const struct rsp_ctrl *rc;
+ struct sge_rspq *q = &adap->sge.intrq;
+
+ spin_lock(&adap->sge.intrq_lock);
+ for (credits = 0; ; credits++) {
+ rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, q))
+ break;
+
+ rmb();
+ if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ unsigned int qid = ntohl(rc->pldbuflen_qid);
+
+ qid -= adap->sge.ingr_start;
+ napi_schedule(&adap->sge.ingr_map[qid]->napi);
+ }
+
+ rspq_next(q);
+ }
+
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
+ INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
+ spin_unlock(&adap->sge.intrq_lock);
+ return credits;
+}
+
+/*
+ * The MSI interrupt handler, which handles data events from SGE response queues
+ * as well as error and other async events as they all use the same MSI vector.
+ */
+static irqreturn_t t4_intr_msi(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ t4_slow_intr_handler(adap);
+ process_intrq(adap);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt line.
+ */
+static irqreturn_t t4_intr_intx(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+ if (t4_slow_intr_handler(adap) | process_intrq(adap))
+ return IRQ_HANDLED;
+ return IRQ_NONE; /* probably shared interrupt */
+}
+
+/**
+ * t4_intr_handler - select the top-level interrupt handler
+ * @adap: the adapter
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X, MSI, or INTx).
+ */
+irq_handler_t t4_intr_handler(struct adapter *adap)
+{
+ if (adap->flags & USING_MSIX)
+ return t4_sge_intr_msix;
+ if (adap->flags & USING_MSI)
+ return t4_intr_msi;
+ return t4_intr_intx;
+}
+
+static void sge_rx_timer_cb(unsigned long data)
+{
+ unsigned long m;
+ unsigned int i, cnt[2];
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
+
+ for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+ for (m = s->starving_fl[i]; m; m &= m - 1) {
+ struct sge_eth_rxq *rxq;
+ unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_fl *fl = s->egr_map[id];
+
+ clear_bit(id, s->starving_fl);
+ smp_mb__after_clear_bit();
+
+ if (fl_starving(fl)) {
+ rxq = container_of(fl, struct sge_eth_rxq, fl);
+ if (napi_reschedule(&rxq->rspq.napi))
+ fl->starving++;
+ else
+ set_bit(id, s->starving_fl);
+ }
+ }
+
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
+ cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+ cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+ for (i = 0; i < 2; i++)
+ if (cnt[i] >= s->starve_thres) {
+ if (s->idma_state[i] || cnt[i] == 0xffffffff)
+ continue;
+ s->idma_state[i] = 1;
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+ m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
+ dev_warn(adap->pdev_dev,
+ "SGE idma%u starvation detected for "
+ "queue %lu\n", i, m & 0xffff);
+ } else if (s->idma_state[i])
+ s->idma_state[i] = 0;
+
+ mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+static void sge_tx_timer_cb(unsigned long data)
+{
+ unsigned long m;
+ unsigned int i, budget;
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
+
+ for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+ for (m = s->txq_maperr[i]; m; m &= m - 1) {
+ unsigned long id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_ofld_txq *txq = s->egr_map[id];
+
+ clear_bit(id, s->txq_maperr);
+ tasklet_schedule(&txq->qresume_tsk);
+ }
+
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+ struct sge_eth_txq *q = &s->ethtxq[i];
+
+ if (q->q.in_use &&
+ time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
+ __netif_tx_trylock(q->txq)) {
+ int avail = reclaimable(&q->q);
+
+ if (avail) {
+ if (avail > budget)
+ avail = budget;
+
+ free_tx_desc(adap, &q->q, avail, true);
+ q->q.in_use -= avail;
+ budget -= avail;
+ }
+ __netif_tx_unlock(q->txq);
+ }
+
+ if (++i >= s->ethqsets)
+ i = 0;
+ } while (budget && i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+ mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct net_device *dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd)
+{
+ int ret, flsz = 0;
+ struct fw_iq_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = roundup(iq->size, 16);
+
+ iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
+ &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
+ if (!iq->desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
+ FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
+ FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
+ FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ -intr_idx - 1));
+ c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ FW_IQ_CMD_IQGTSMODE |
+ FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqsize = htons(iq->size);
+ c.iqaddr = cpu_to_be64(iq->phys_addr);
+
+ if (fl) {
+ fl->size = roundup(fl->size, 8);
+ fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
+ sizeof(struct rx_sw_desc), &fl->addr,
+ &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
+ if (!fl->desc)
+ goto fl_nomem;
+
+ flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0FETCHRO(1) |
+ FW_IQ_CMD_FL0DATARO(1) |
+ FW_IQ_CMD_FL0PADEN);
+ c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
+ FW_IQ_CMD_FL0FBMAX(3));
+ c.fl0size = htons(flsz);
+ c.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret)
+ goto err;
+
+ netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ iq->cur_desc = iq->desc;
+ iq->cidx = 0;
+ iq->gen = 1;
+ iq->next_intr_params = iq->intr_params;
+ iq->cntxt_id = ntohs(c.iqid);
+ iq->abs_id = ntohs(c.physiqid);
+ iq->size--; /* subtract status entry */
+ iq->adap = adap;
+ iq->netdev = dev;
+ iq->handler = hnd;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ iq->offset = fl ? 0 : -1;
+
+ adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
+
+ if (fl) {
+ fl->cntxt_id = ntohs(c.fl0id);
+ fl->avail = fl->pend_cred = 0;
+ fl->pidx = fl->cidx = 0;
+ fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
+ adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
+ refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
+ }
+ return 0;
+
+fl_nomem:
+ ret = -ENOMEM;
+err:
+ if (iq->desc) {
+ dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
+ iq->desc, iq->phys_addr);
+ iq->desc = NULL;
+ }
+ if (fl && fl->desc) {
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
+ fl->desc, fl->addr);
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+{
+ q->in_use = 0;
+ q->cidx = q->pidx = 0;
+ q->stops = q->restarts = 0;
+ q->stat = (void *)&q->desc[q->size];
+ q->cntxt_id = id;
+ adap->sge.egr_map[id - adap->sge.egr_start] = q;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+ unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ netdev_queue_numa_node_read(netdevq));
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
+ FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
+ c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
+ FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO(1) |
+ FW_EQ_ETH_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
+ FW_EQ_ETH_CMD_FBMAX(3) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
+ FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ txq->txq = netdevq;
+ txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
+ sizeof(struct tx_desc), 0, &txq->q.phys_addr,
+ NULL, 0, NUMA_NO_NODE);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_CTRL_CMD_PFN(adap->fn) |
+ FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
+ FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
+ FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO |
+ FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
+ FW_EQ_CTRL_CMD_FBMAX(3) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
+ FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret) {
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
+ txq->adap = adap;
+ skb_queue_head_init(&txq->sendq);
+ tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+ txq->full = 0;
+ return 0;
+}
+
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+ struct net_device *dev, unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_ofld_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ NUMA_NO_NODE);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(adap->fn) |
+ FW_EQ_OFLD_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+ FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
+ c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
+ FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO(1) |
+ FW_EQ_OFLD_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
+ FW_EQ_OFLD_CMD_FBMAX(3) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
+ FW_EQ_OFLD_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ txq->adap = adap;
+ skb_queue_head_init(&txq->sendq);
+ tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+ txq->full = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+static void free_txq(struct adapter *adap, struct sge_txq *q)
+{
+ dma_free_coherent(adap->pdev_dev,
+ q->size * sizeof(struct tx_desc) + STAT_LEN,
+ q->desc, q->phys_addr);
+ q->cntxt_id = 0;
+ q->sdesc = NULL;
+ q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
+{
+ unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+ adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
+ t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+ rq->cntxt_id, fl_id, 0xffff);
+ dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
+ rq->desc, rq->phys_addr);
+ netif_napi_del(&rq->napi);
+ rq->netdev = NULL;
+ rq->cntxt_id = rq->abs_id = 0;
+ rq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(adap, fl, fl->avail);
+ dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+ fl->desc, fl->addr);
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/**
+ * t4_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+ int i;
+ struct sge_eth_rxq *eq = adap->sge.ethrxq;
+ struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
+
+ /* clean up Ethernet Tx/Rx queues */
+ for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ if (eq->rspq.desc)
+ free_rspq_fl(adap, &eq->rspq, &eq->fl);
+ if (etq->q.desc) {
+ t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+ etq->q.cntxt_id);
+ free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ kfree(etq->q.sdesc);
+ free_txq(adap, &etq->q);
+ }
+ }
+
+ /* clean up RDMA and iSCSI Rx queues */
+ for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
+ for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
+
+ /* clean up offload Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
+ struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
+
+ if (q->q.desc) {
+ tasklet_kill(&q->qresume_tsk);
+ t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+ q->q.cntxt_id);
+ free_tx_desc(adap, &q->q, q->q.in_use, false);
+ kfree(q->q.sdesc);
+ __skb_queue_purge(&q->sendq);
+ free_txq(adap, &q->q);
+ }
+ }
+
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ tasklet_kill(&cq->qresume_tsk);
+ t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+ cq->q.cntxt_id);
+ __skb_queue_purge(&cq->sendq);
+ free_txq(adap, &cq->q);
+ }
+ }
+
+ if (adap->sge.fw_evtq.desc)
+ free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+
+ if (adap->sge.intrq.desc)
+ free_rspq_fl(adap, &adap->sge.intrq, NULL);
+
+ /* clear the reverse egress queue map */
+ memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+}
+
+void t4_sge_start(struct adapter *adap)
+{
+ adap->sge.ethtxq_rover = 0;
+ mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+ mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ * t4_sge_stop - disable SGE operation
+ * @adap: the adapter
+ *
+ * Stop tasklets and timers associated with the DMA engine. Note that
+ * this is effective only if measures have been taken to disable any HW
+ * events that may restart them.
+ */
+void t4_sge_stop(struct adapter *adap)
+{
+ int i;
+ struct sge *s = &adap->sge;
+
+ if (in_interrupt()) /* actions below require waiting */
+ return;
+
+ if (s->rx_timer.function)
+ del_timer_sync(&s->rx_timer);
+ if (s->tx_timer.function)
+ del_timer_sync(&s->tx_timer);
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
+ struct sge_ofld_txq *q = &s->ofldtxq[i];
+
+ if (q->q.desc)
+ tasklet_kill(&q->qresume_tsk);
+ }
+ for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &s->ctrlq[i];
+
+ if (cq->q.desc)
+ tasklet_kill(&cq->qresume_tsk);
+ }
+}
+
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request them individually.
+ */
+void t4_sge_init(struct adapter *adap)
+{
+ unsigned int i, v;
+ struct sge *s = &adap->sge;
+ unsigned int fl_align_log = ilog2(FL_ALIGN);
+
+ t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
+ INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
+ INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
+ RXPKTCPLMODE |
+ (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+
+ for (i = v = 0; i < 32; i += 4)
+ v |= (PAGE_SHIFT - 10) << i;
+ t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
+#if FL_PG_ORDER > 0
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
+#endif
+ t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
+ THRESHOLD_0(s->counter_val[0]) |
+ THRESHOLD_1(s->counter_val[1]) |
+ THRESHOLD_2(s->counter_val[2]) |
+ THRESHOLD_3(s->counter_val[3]));
+ t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
+ t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+ t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+ setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
+ setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+ s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
+ s->idma_state[0] = s->idma_state[1] = 0;
+ spin_lock_init(&s->intrq_lock);
+}
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index d1ec111aebd..d1ec111aebd 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c26b455f37d..c26b455f37d 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index eb71b8250b9..eb71b8250b9 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0adc5bcec7c..0adc5bcec7c 100644
--- a/drivers/net/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index edcfd7ec780..edcfd7ec780 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
diff --git a/drivers/net/cxgb4vf/Makefile b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile
index d72ee26cb4c..d72ee26cb4c 100644
--- a/drivers/net/cxgb4vf/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 594334d5c71..611396c4b38 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -144,7 +144,7 @@ struct sge_fl {
* An ingress packet gather list.
*/
struct pkt_gl {
- skb_frag_t frags[MAX_SKB_FRAGS];
+ struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ec799139dfe..da9072bfca8 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2625,6 +2625,8 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->netdev_ops = &cxgb4vf_netdev_ops;
SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
new file mode 100644
index 00000000000..8d5d55ad102
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -0,0 +1,2453 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Decoded Adapter Parameters.
+ */
+static u32 FL_PG_ORDER; /* large page allocation size */
+static u32 STAT_LEN; /* length of status page at ring end */
+static u32 PKTSHIFT; /* padding between CPL and packet data */
+static u32 FL_ALIGN; /* response queue message alignment */
+
+/*
+ * Constants ...
+ */
+enum {
+ /*
+ * Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ /*
+ * Max number of TX descriptors we clean up at a time. Should be
+ * modest as freeing skbs isn't cheap and it happens while holding
+ * locks. We just need to free packets faster than they arrive, we
+ * eventually catch up and keep the amortized cost reasonable.
+ */
+ MAX_TX_RECLAIM = 16,
+
+ /*
+ * Max number of Rx buffers we replenish at a time. Again keep this
+ * modest, allocating buffers isn't cheap either.
+ */
+ MAX_RX_REFILL = 16,
+
+ /*
+ * Period of the Rx queue check timer. This timer is infrequent as it
+ * has something to do only when the system experiences severe memory
+ * shortage.
+ */
+ RX_QCHECK_PERIOD = (HZ / 2),
+
+ /*
+ * Period of the TX queue check timer and the maximum number of TX
+ * descriptors to be reclaimed by the TX timer.
+ */
+ TX_QCHECK_PERIOD = (HZ / 2),
+ MAX_TIMER_TX_RECLAIM = 100,
+
+ /*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
+ * timer will attempt to refill it.
+ */
+ FL_STARVE_THRES = 4,
+
+ /*
+ * Suspend an Ethernet TX queue with fewer available descriptors than
+ * this. We always want to have room for a maximum sized packet:
+ * inline immediate data + MAX_SKB_FRAGS. This is the same as
+ * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
+ * (see that function and its helpers for a description of the
+ * calculation).
+ */
+ ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
+ ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
+ ((ETHTXQ_MAX_FRAGS-1) & 1) +
+ 2),
+ ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+ ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
+
+ ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
+
+ /*
+ * Max TX descriptor space we allow for an Ethernet packet to be
+ * inlined into a WR. This is limited by the maximum value which
+ * we can specify for immediate data in the firmware Ethernet TX
+ * Work Request.
+ */
+ MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
+
+ /*
+ * Max size of a WR sent through a control TX queue.
+ */
+ MAX_CTRL_WR_LEN = 256,
+
+ /*
+ * Maximum amount of data which we'll ever need to inline into a
+ * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
+ */
+ MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
+ ? MAX_IMM_TX_PKT_LEN
+ : MAX_CTRL_WR_LEN),
+
+ /*
+ * For incoming packets less than RX_COPY_THRES, we copy the data into
+ * an skb rather than referencing the data. We allocate enough
+ * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
+ * of the data (header).
+ */
+ RX_COPY_THRES = 256,
+ RX_PULL_LEN = 128,
+
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
+
+/*
+ * Software state per TX descriptor.
+ */
+struct tx_sw_desc {
+ struct sk_buff *skb; /* socket buffer of TX data source */
+ struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
+};
+
+/*
+ * Software state per RX Free List descriptor. We keep track of the allocated
+ * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
+ * page size and its PCI DMA mapped state are stored in the low bits of the
+ * PCI DMA address as per below.
+ */
+struct rx_sw_desc {
+ struct page *page; /* Free List page buffer */
+ dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
+ /* and flags (see below) */
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
+ * SGE also uses the low 4 bits to determine the size of the buffer. It uses
+ * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
+ * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
+ * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
+ * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
+ * maintained in an inverse sense so the hardware never sees that bit high.
+ */
+enum {
+ RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
+ RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+/**
+ * get_buf_addr - return DMA buffer address of software descriptor
+ * @sdesc: pointer to the software buffer descriptor
+ *
+ * Return the DMA buffer address of a software descriptor (stripping out
+ * our low-order flag bits).
+ */
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
+{
+ return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+/**
+ * is_buf_mapped - is buffer mapped for DMA?
+ * @sdesc: pointer to the software buffer descriptor
+ *
+ * Determine whether the buffer associated with a software descriptor in
+ * mapped for DMA or not.
+ */
+static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
+{
+ return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ * need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ * Returns true if the platform needs sk_buff unmapping. The compiler
+ * optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * txq_avail - return the number of available slots in a TX queue
+ * @tq: the TX queue
+ *
+ * Returns the number of available descriptors in a TX queue.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *tq)
+{
+ return tq->size - 1 - tq->in_use;
+}
+
+/**
+ * fl_cap - return the capacity of a Free List
+ * @fl: the Free List
+ *
+ * Returns the capacity of a Free List. The capacity is less than the
+ * size because an Egress Queue Index Unit worth of descriptors needs to
+ * be left unpopulated, otherwise the Producer and Consumer indices PIDX
+ * and CIDX will match and the hardware will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - FL_PER_EQ_UNIT;
+}
+
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshold.
+ */
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+ return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+/**
+ * map_skb - map an skb for DMA to the device
+ * @dev: the egress net device
+ * @skb: the packet to map
+ * @addr: a pointer to the base of the DMA mapping array
+ *
+ * Map an skb for DMA to the device and return an array of DMA addresses.
+ */
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto out_err;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+ for (fp = si->frags; fp < end; fp++) {
+ *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+ const struct ulptx_sgl *sgl, const struct sge_txq *tq)
+{
+ const struct ulptx_sge_pair *p;
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (likely(skb_headlen(skb)))
+ dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
+ be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+ else {
+ dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
+ be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+ nfrags--;
+ }
+
+ /*
+ * the complexity below is because of the possibility of a wrap-around
+ * in the middle of an SGL
+ */
+ for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+ if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
+unmap:
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p++;
+ } else if ((u8 *)p == (u8 *)tq->stat) {
+ p = (const struct ulptx_sge_pair *)tq->desc;
+ goto unmap;
+ } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
+ const __be64 *addr = (const __be64 *)tq->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[1]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[2];
+ } else {
+ const __be64 *addr = (const __be64 *)tq->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[1];
+ }
+ }
+ if (nfrags) {
+ __be64 addr;
+
+ if ((u8 *)p == (u8 *)tq->stat)
+ p = (const struct ulptx_sge_pair *)tq->desc;
+ addr = ((u8 *)p + 16 <= (u8 *)tq->stat
+ ? p->addr[0]
+ : *(const __be64 *)tq->desc);
+ dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
+ DMA_TO_DEVICE);
+ }
+}
+
+/**
+ * free_tx_desc - reclaims TX descriptors and their buffers
+ * @adapter: the adapter
+ * @tq: the TX queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims TX descriptors from an SGE TX queue and frees the associated
+ * TX buffers. Called with the TX queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
+ unsigned int n, bool unmap)
+{
+ struct tx_sw_desc *sdesc;
+ unsigned int cidx = tq->cidx;
+ struct device *dev = adapter->pdev_dev;
+
+ const int need_unmap = need_skb_unmap() && unmap;
+
+ sdesc = &tq->sdesc[cidx];
+ while (n--) {
+ /*
+ * If we kept a reference to the original TX skb, we need to
+ * unmap it from PCI DMA space (if required) and free it.
+ */
+ if (sdesc->skb) {
+ if (need_unmap)
+ unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
+ kfree_skb(sdesc->skb);
+ sdesc->skb = NULL;
+ }
+
+ sdesc++;
+ if (++cidx == tq->size) {
+ cidx = 0;
+ sdesc = tq->sdesc;
+ }
+ }
+ tq->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a TX queue.
+ */
+static inline int reclaimable(const struct sge_txq *tq)
+{
+ int hw_cidx = be16_to_cpu(tq->stat->cidx);
+ int reclaimable = hw_cidx - tq->cidx;
+ if (reclaimable < 0)
+ reclaimable += tq->size;
+ return reclaimable;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed TX descriptors
+ * @adapter: the adapter
+ * @tq: the TX queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims TX descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the TX
+ * queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *tq,
+ bool unmap)
+{
+ int avail = reclaimable(tq);
+
+ if (avail) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the TX lock hold time O(1).
+ */
+ if (avail > MAX_TX_RECLAIM)
+ avail = MAX_TX_RECLAIM;
+
+ free_tx_desc(adapter, tq, avail, unmap);
+ tq->in_use -= avail;
+ }
+}
+
+/**
+ * get_buf_size - return the size of an RX Free List buffer.
+ * @sdesc: pointer to the software buffer descriptor
+ */
+static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+{
+ return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+ ? (PAGE_SIZE << FL_PG_ORDER)
+ : PAGE_SIZE;
+}
+
+/**
+ * free_rx_bufs - free RX buffers on an SGE Free List
+ * @adapter: the adapter
+ * @fl: the SGE Free List to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE Free List RX queue. The
+ * buffers must be made inaccessible to hardware before calling this
+ * function.
+ */
+static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
+{
+ while (n--) {
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+ if (is_buf_mapped(sdesc))
+ dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+ get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ put_page(sdesc->page);
+ sdesc->page = NULL;
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ fl->avail--;
+ }
+}
+
+/**
+ * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
+ * @adapter: the adapter
+ * @fl: the SGE Free List
+ *
+ * Unmap the current buffer on an SGE Free List RX queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ * This is used predominantly to "transfer ownership" of an FL buffer
+ * to another entity (typically an skb's fragment list).
+ */
+static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
+{
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+ if (is_buf_mapped(sdesc))
+ dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+ get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ sdesc->page = NULL;
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ fl->avail--;
+}
+
+/**
+ * ring_fl_db - righ doorbell on free list
+ * @adapter: the adapter
+ * @fl: the Free List whose doorbell should be rung ...
+ *
+ * Tell the Scatter Gather Engine that there are new free list entries
+ * available.
+ */
+static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
+{
+ /*
+ * The SGE keeps track of its Producer and Consumer Indices in terms
+ * of Egress Queue Units so we can only tell it about integral numbers
+ * of multiples of Free List Entries per Egress Queue Units ...
+ */
+ if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+ wmb();
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ DBPRIO |
+ QID(fl->cntxt_id) |
+ PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+ fl->pend_cred %= FL_PER_EQ_UNIT;
+ }
+}
+
+/**
+ * set_rx_sw_desc - initialize software RX buffer descriptor
+ * @sdesc: pointer to the softwore RX buffer descriptor
+ * @page: pointer to the page data structure backing the RX buffer
+ * @dma_addr: PCI DMA address (possibly with low-bit flags)
+ */
+static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
+ dma_addr_t dma_addr)
+{
+ sdesc->page = page;
+ sdesc->dma_addr = dma_addr;
+}
+
+/*
+ * Support for poisoning RX buffers ...
+ */
+#define POISON_BUF_VAL -1
+
+static inline void poison_buf(struct page *page, size_t sz)
+{
+#if POISON_BUF_VAL >= 0
+ memset(page_address(page), POISON_BUF_VAL, sz);
+#endif
+}
+
+/**
+ * refill_fl - refill an SGE RX buffer ring
+ * @adapter: the adapter
+ * @fl: the Free List ring to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for the allocations
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
+ * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
+ * of buffers allocated. If afterwards the queue is found critically low,
+ * mark it as starving in the bitmap of starving FLs.
+ */
+static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
+ int n, gfp_t gfp)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+ unsigned int cred = fl->avail;
+ __be64 *d = &fl->desc[fl->pidx];
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
+
+ /*
+ * Sanity: ensure that the result of adding n Free List buffers
+ * won't result in wrapping the SGE's Producer Index around to
+ * it's Consumer Index thereby indicating an empty Free List ...
+ */
+ BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+
+ /*
+ * If we support large pages, prefer large buffers and fail over to
+ * small pages if we can't allocate large pages to satisfy the refill.
+ * If we don't support large pages, drop directly into the small page
+ * allocation code.
+ */
+ if (FL_PG_ORDER == 0)
+ goto alloc_small_pages;
+
+ while (n) {
+ page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ FL_PG_ORDER);
+ if (unlikely(!page)) {
+ /*
+ * We've failed inour attempt to allocate a "large
+ * page". Fail over to the "small page" allocation
+ * below.
+ */
+ fl->large_alloc_failed++;
+ break;
+ }
+ poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+
+ dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
+ PAGE_SIZE << FL_PG_ORDER,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+ /*
+ * We've run out of DMA mapping space. Free up the
+ * buffer and return with what we've managed to put
+ * into the free list. We don't want to fail over to
+ * the small page allocation below in this case
+ * because DMA mapping resources are typically
+ * critical resources once they become scarse.
+ */
+ __free_pages(page, FL_PG_ORDER);
+ goto out;
+ }
+ dma_addr |= RX_LARGE_BUF;
+ *d++ = cpu_to_be64(dma_addr);
+
+ set_rx_sw_desc(sdesc, page, dma_addr);
+ sdesc++;
+
+ fl->avail++;
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ sdesc = fl->sdesc;
+ d = fl->desc;
+ }
+ n--;
+ }
+
+alloc_small_pages:
+ while (n--) {
+ page = __netdev_alloc_page(adapter->port[0],
+ gfp | __GFP_NOWARN);
+ if (unlikely(!page)) {
+ fl->alloc_failed++;
+ break;
+ }
+ poison_buf(page, PAGE_SIZE);
+
+ dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+ netdev_free_page(adapter->port[0], page);
+ break;
+ }
+ *d++ = cpu_to_be64(dma_addr);
+
+ set_rx_sw_desc(sdesc, page, dma_addr);
+ sdesc++;
+
+ fl->avail++;
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ sdesc = fl->sdesc;
+ d = fl->desc;
+ }
+ }
+
+out:
+ /*
+ * Update our accounting state to incorporate the new Free List
+ * buffers, tell the hardware about them and return the number of
+ * bufers which we were able to allocate.
+ */
+ cred = fl->avail - cred;
+ fl->pend_cred += cred;
+ ring_fl_db(adapter, fl);
+
+ if (unlikely(fl_starving(fl))) {
+ smp_wmb();
+ set_bit(fl->cntxt_id, adapter->sge.starving_fl);
+ }
+
+ return cred;
+}
+
+/*
+ * Refill a Free List to its capacity or the Maximum Refill Increment,
+ * whichever is smaller ...
+ */
+static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
+{
+ refill_fl(adapter, fl,
+ min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+ GFP_ATOMIC);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @hwsize: the size of each hardware descriptor
+ * @swsize: the size of each software descriptor
+ * @busaddrp: the physical PCI bus address of the allocated ring
+ * @swringp: return address pointer for software ring
+ * @stat_size: extra space in hardware ring for status information
+ *
+ * Allocates resources for an SGE descriptor ring, such as TX queues,
+ * free buffer lists, response queues, etc. Each SGE ring requires
+ * space for its hardware descriptors plus, optionally, space for software
+ * state associated with each hardware entry (the metadata). The function
+ * returns three values: the virtual address for the hardware ring (the
+ * return value of the function), the PCI bus address of the hardware
+ * ring (in *busaddrp), and the address of the software ring (in swringp).
+ * Both the hardware and software rings are returned zeroed out.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
+ size_t swsize, dma_addr_t *busaddrp, void *swringp,
+ size_t stat_size)
+{
+ /*
+ * Allocate the hardware ring and PCI DMA bus address space for said.
+ */
+ size_t hwlen = nelem * hwsize + stat_size;
+ void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+
+ if (!hwring)
+ return NULL;
+
+ /*
+ * If the caller wants a software ring, allocate it and return a
+ * pointer to it in *swringp.
+ */
+ BUG_ON((swsize != 0) != (swringp != NULL));
+ if (swsize) {
+ void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
+
+ if (!swring) {
+ dma_free_coherent(dev, hwlen, hwring, *busaddrp);
+ return NULL;
+ }
+ *(void **)swringp = swring;
+ }
+
+ /*
+ * Zero out the hardware ring and return its address as our function
+ * value.
+ */
+ memset(hwring, 0, hwlen);
+ return hwring;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits (8-byte units) needed for a Direct
+ * Scatter/Gather List that can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /*
+ * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of TX descriptors for the given flits
+ * @flits: the number of flits
+ *
+ * Returns the number of TX descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int flits)
+{
+ BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
+ return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+ /*
+ * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /*
+ * If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /*
+ * Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embeded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * write_sgl - populate a Scatter/Gather List for a packet
+ * @skb: the packet
+ * @tq: the TX queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into skb main-body data to include in the SGL
+ * @addr: the list of DMA bus addresses for the SGL elements
+ *
+ * Generates a Scatter/Gather List for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @pos must be 16-byte
+ * aligned and within a TX descriptor with available space. @end points
+ * write after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @tq->stat.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ const struct skb_shared_info *si = skb_shinfo(skb);
+ unsigned int nfrags = si->nr_frags;
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+ len = skb_headlen(skb) - start;
+ if (likely(len)) {
+ sgl->len0 = htonl(len);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ nfrags++;
+ } else {
+ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
+ sgl->addr0 = cpu_to_be64(addr[1]);
+ }
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
+
+ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
+ to->addr[0] = cpu_to_be64(addr[i]);
+ to->addr[1] = cpu_to_be64(addr[++i]);
+ }
+ if (nfrags) {
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(0);
+ to->addr[0] = cpu_to_be64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)tq->stat)) {
+ unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)tq->stat;
+ memcpy(tq->desc, (u8 *)buf + part0, part1);
+ end = (void *)tq->desc + part1;
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a TX queue's doorbell
+ * @adapter: the adapter
+ * @tq: the TX queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a TX queue.
+ */
+static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
+ int n)
+{
+ /*
+ * Warn if we write doorbells with the wrong priority and write
+ * descriptors before telling HW.
+ */
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ wmb();
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(tq->cntxt_id) | PIDX(n));
+}
+
+/**
+ * inline_tx_skb - inline a packet's data into TX descriptors
+ * @skb: the packet
+ * @tq: the TX queue where the packet will be inlined
+ * @pos: starting position in the TX queue to inline the packet
+ *
+ * Inline a packet's contents directly into TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
+ void *pos)
+{
+ u64 *p;
+ int left = (void *)tq->stat - pos;
+
+ if (likely(skb->len <= left)) {
+ if (likely(!skb->data_len))
+ skb_copy_from_linear_data(skb, pos, skb->len);
+ else
+ skb_copy_bits(skb, 0, pos, skb->len);
+ pos += skb->len;
+ } else {
+ skb_copy_bits(skb, 0, pos, left);
+ skb_copy_bits(skb, left, tq->desc, skb->len - left);
+ pos = (void *)tq->desc + (skb->len - left);
+ }
+
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8)
+ *p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+ int csum_type;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (iph->protocol == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP;
+ else if (iph->protocol == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP;
+ else {
+nocsum:
+ /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return TXPKT_L4CSUM_DIS;
+ }
+ } else {
+ /*
+ * this doesn't work with extension headers
+ */
+ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP6;
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP6;
+ else
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP))
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ else {
+ int start = skb_transport_offset(skb);
+
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_CSUM_START(start) |
+ TXPKT_CSUM_LOC(start + skb->csum_offset);
+ }
+}
+
+/*
+ * Stop an Ethernet TX queue and record that state change.
+ */
+static void txq_stop(struct sge_eth_txq *txq)
+{
+ netif_tx_stop_queue(txq->txq);
+ txq->q.stops++;
+}
+
+/*
+ * Advance our software state for a TX queue by adding n in use descriptors.
+ */
+static inline void txq_advance(struct sge_txq *tq, unsigned int n)
+{
+ tq->in_use += n;
+ tq->pidx += n;
+ if (tq->pidx >= tq->size)
+ tq->pidx -= tq->size;
+}
+
+/**
+ * t4vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 wr_mid;
+ u64 cntrl, *end;
+ int qidx, credits;
+ unsigned int flits, ndesc;
+ struct adapter *adapter;
+ struct sge_eth_txq *txq;
+ const struct port_info *pi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ const struct skb_shared_info *ssi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
+
+ /*
+ * The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /*
+ * Figure out which TX Queue we're going to use.
+ */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ BUG_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /*
+ * Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ reclaim_completed_tx(adapter, &txq->q, true);
+
+ /*
+ * Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /*
+ * Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!is_eth_imm(skb) &&
+ unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /*
+ * We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /*
+ * After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
+ txq_stop(txq);
+ wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ }
+
+ /*
+ * Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+ wr->r3[0] = cpu_to_be64(0);
+ wr->r3[1] = cpu_to_be64(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /*
+ * If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN(sizeof(*lso) +
+ sizeof(*cpl)));
+ /*
+ * Fill in the LSO CPL message.
+ */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE |
+ LSO_LAST_SLICE |
+ LSO_IPV6(v6) |
+ LSO_ETHHDR_LEN(eth_xtra_len/4) |
+ LSO_IPHDR_LEN(l3hdr_len/4) |
+ LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ lso->len = cpu_to_be32(skb->len);
+
+ /*
+ * Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+ cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN(l3hdr_len) |
+ TXPKT_ETHHDR_LEN(eth_xtra_len));
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN(len));
+
+ /*
+ * Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ txq->tx_cso++;
+ } else
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
+
+ /*
+ * If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (vlan_tx_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ }
+
+ /*
+ * Fill in the TX Packet CPL message header.
+ */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ TXPKT_INTF(pi->port_id) |
+ TXPKT_PF(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+#ifdef T4_TRACE
+ T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
+ "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
+ ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
+#endif
+
+ /*
+ * Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (is_eth_imm(skb)) {
+ /*
+ * In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_kfree_skb(skb);
+ } else {
+ /*
+ * Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /*
+ * If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /*
+ * Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+ dev->trans_start = jiffies;
+ ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /*
+ * An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * copy_frags - copy fragments from gather list into skb_shared_info
+ * @skb: destination skb
+ * @gl: source internal packet gather list
+ * @offset: packet start offset in first page
+ *
+ * Copy an internal packet gather list into a Linux skb_shared_info
+ * structure.
+ */
+static inline void copy_frags(struct sk_buff *skb,
+ const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ int i;
+
+ /* usually there's just one frag */
+ __skb_fill_page_desc(skb, 0, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ skb_shinfo(skb)->nr_frags = gl->nfrags;
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, i, gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ copy_frags(skb, gl, pull_len);
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+ }
+
+out:
+ return skb;
+}
+
+/**
+ * t4vf_pktgl_free - free a packet gather list
+ * @gl: the gather list
+ *
+ * Releases the pages of a packet gather list. We do not own the last
+ * page on the list and do not free it.
+ */
+void t4vf_pktgl_free(const struct pkt_gl *gl)
+{
+ int frag;
+
+ frag = gl->nfrags - 1;
+ while (frag--)
+ put_page(gl->frags[frag].page);
+}
+
+/**
+ * do_gro - perform Generic Receive Offload ingress packet processing
+ * @rxq: ingress RX Ethernet Queue
+ * @gl: gather list for ingress packet
+ * @pkt: CPL header for last packet fragment
+ *
+ * Perform Generic Receive Offload (GRO) ingress packet processing.
+ * We use the standard Linux GRO interfaces for this.
+ */
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ const struct cpl_rx_pkt *pkt)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(&rxq->rspq.napi);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return;
+ }
+
+ copy_frags(skb, gl, PKTSHIFT);
+ skb->len = gl->tot_len - PKTSHIFT;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxq->rspq.idx);
+
+ if (pkt->vlan_ex)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ ret = napi_gro_frags(&rxq->rspq.napi);
+
+ if (ret == GRO_HELD)
+ rxq->stats.lro_pkts++;
+ else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+ rxq->stats.lro_merged++;
+ rxq->stats.pkts++;
+ rxq->stats.rx_cso++;
+}
+
+/**
+ * t4vf_ethrx_handler - process an ingress ethernet packet
+ * @rspq: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @gl: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+
+ /*
+ * If this is a good TCP packet and we have Generic Receive Offload
+ * enabled, handle the packet in the GRO path.
+ */
+ if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+ (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
+ !pkt->ip_frag) {
+ do_gro(rxq, gl, pkt);
+ return 0;
+ }
+
+ /*
+ * Convert the Packet Gather List into an skb.
+ */
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+ __skb_pull(skb, PKTSHIFT);
+ skb->protocol = eth_type_trans(skb, rspq->netdev);
+ skb_record_rx_queue(skb, rspq->idx);
+ rxq->stats.pkts++;
+
+ if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
+ !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (!pkt->ip_frag)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else {
+ __sum16 c = (__force __sum16)pkt->csum;
+ skb->csum = csum_unfold(c);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ rxq->stats.rx_cso++;
+ } else
+ skb_checksum_none_assert(skb);
+
+ if (pkt->vlan_ex) {
+ rxq->stats.vlan_ex++;
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ }
+
+ netif_receive_skb(skb);
+
+ return 0;
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @rc: the response control descriptor
+ * @rspq: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *rc,
+ const struct sge_rspq *rspq)
+{
+ return RSPD_GEN(rc->type_gen) == rspq->gen;
+}
+
+/**
+ * restore_rx_bufs - put back a packet's RX buffers
+ * @gl: the packet gather list
+ * @fl: the SGE Free List
+ * @nfrags: how many fragments in @si
+ *
+ * Called when we find out that the current packet, @si, can't be
+ * processed right away for some reason. This is a very rare event and
+ * there's no effort to make this suspension/resumption process
+ * particularly efficient.
+ *
+ * We implement the suspension by putting all of the RX buffers associated
+ * with the current packet back on the original Free List. The buffers
+ * have already been unmapped and are left unmapped, we mark them as
+ * unmapped in order to prevent further unmapping attempts. (Effectively
+ * this function undoes the series of @unmap_rx_buf calls which were done
+ * to create the current packet's gather list.) This leaves us ready to
+ * restart processing of the packet the next time we start processing the
+ * RX Queue ...
+ */
+static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
+ int frags)
+{
+ struct rx_sw_desc *sdesc;
+
+ while (frags--) {
+ if (fl->cidx == 0)
+ fl->cidx = fl->size - 1;
+ else
+ fl->cidx--;
+ sdesc = &fl->sdesc[fl->cidx];
+ sdesc->page = gl->frags[frags].page;
+ sdesc->dma_addr |= RX_UNMAPPED_BUF;
+ fl->avail++;
+ }
+}
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @rspq: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *rspq)
+{
+ rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
+ if (unlikely(++rspq->cidx == rspq->size)) {
+ rspq->cidx = 0;
+ rspq->gen ^= 1;
+ rspq->cur_desc = rspq->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @rspq: the ingress response queue to process
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from a Scatter Gather Engine response queue up to
+ * the supplied budget. Responses include received packets as well as
+ * control messages from firmware or hardware.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+int process_responses(struct sge_rspq *rspq, int budget)
+{
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ int budget_left = budget;
+
+ while (likely(budget_left)) {
+ int ret, rsp_type;
+ const struct rsp_ctrl *rc;
+
+ rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, rspq))
+ break;
+
+ /*
+ * Figure out what kind of response we've received from the
+ * SGE.
+ */
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ struct page_frag *fp;
+ struct pkt_gl gl;
+ const struct rx_sw_desc *sdesc;
+ u32 bufsz, frag;
+ u32 len = be32_to_cpu(rc->pldbuflen_qid);
+
+ /*
+ * If we get a "new buffer" message from the SGE we
+ * need to move on to the next Free List buffer.
+ */
+ if (len & RSPD_NEWBUF) {
+ /*
+ * We get one "new buffer" message when we
+ * first start up a queue so we need to ignore
+ * it when our offset into the buffer is 0.
+ */
+ if (likely(rspq->offset > 0)) {
+ free_rx_bufs(rspq->adapter, &rxq->fl,
+ 1);
+ rspq->offset = 0;
+ }
+ len = RSPD_LEN(len);
+ }
+ gl.tot_len = len;
+
+ /*
+ * Gather packet fragments.
+ */
+ for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
+ BUG_ON(frag >= MAX_SKB_FRAGS);
+ BUG_ON(rxq->fl.avail == 0);
+ sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(sdesc);
+ fp->page = sdesc->page;
+ fp->offset = rspq->offset;
+ fp->size = min(bufsz, len);
+ len -= fp->size;
+ if (!len)
+ break;
+ unmap_rx_buf(rspq->adapter, &rxq->fl);
+ }
+ gl.nfrags = frag+1;
+
+ /*
+ * Last buffer remains mapped so explicitly make it
+ * coherent for CPU access and start preloading first
+ * cache line ...
+ */
+ dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
+ get_buf_addr(sdesc),
+ fp->size, DMA_FROM_DEVICE);
+ gl.va = (page_address(gl.frags[0].page) +
+ gl.frags[0].offset);
+ prefetch(gl.va);
+
+ /*
+ * Hand the new ingress packet to the handler for
+ * this Response Queue.
+ */
+ ret = rspq->handler(rspq, rspq->cur_desc, &gl);
+ if (likely(ret == 0))
+ rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ else
+ restore_rx_bufs(&gl, &rxq->fl, frag);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ ret = rspq->handler(rspq, rspq->cur_desc, NULL);
+ } else {
+ WARN_ON(rsp_type > RSP_TYPE_CPL);
+ ret = 0;
+ }
+
+ if (unlikely(ret)) {
+ /*
+ * Couldn't process descriptor, back off for recovery.
+ * We use the SGE's last timer which has the longest
+ * interrupt coalescing value ...
+ */
+ const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
+ rspq->next_intr_params =
+ QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ break;
+ }
+
+ rspq_next(rspq);
+ budget_left--;
+ }
+
+ /*
+ * If this is a Response Queue with an associated Free List and
+ * at least two Egress Queue units available in the Free List
+ * for new buffer pointers, refill the Free List.
+ */
+ if (rspq->offset >= 0 &&
+ rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+ __refill_fl(rspq->adapter, &rxq->fl);
+ return budget - budget_left;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for RX processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ unsigned int intr_params;
+ struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
+ int work_done = process_responses(rspq, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ intr_params = rspq->next_intr_params;
+ rspq->next_intr_params = rspq->intr_params;
+ } else
+ intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+
+ if (unlikely(work_done == 0))
+ rspq->unhandled_irqs++;
+
+ t4_write_reg(rspq->adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(work_done) |
+ INGRESSQID((u32)rspq->cntxt_id) |
+ SEINTARM(intr_params));
+ return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_rspq *rspq = cookie;
+
+ napi_schedule(&rspq->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_rspq *intrq = &s->intrq;
+ unsigned int work_done;
+
+ spin_lock(&adapter->sge.intrq_lock);
+ for (work_done = 0; ; work_done++) {
+ const struct rsp_ctrl *rc;
+ unsigned int qid, iq_idx;
+ struct sge_rspq *rspq;
+
+ /*
+ * Grab the next response from the interrupt queue and bail
+ * out if it's not a new response.
+ */
+ rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, intrq))
+ break;
+
+ /*
+ * If the response isn't a forwarded interrupt message issue a
+ * error and go on to the next response message. This should
+ * never happen ...
+ */
+ rmb();
+ if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ dev_err(adapter->pdev_dev,
+ "Unexpected INTRQ response type %d\n",
+ RSPD_TYPE(rc->type_gen));
+ continue;
+ }
+
+ /*
+ * Extract the Queue ID from the interrupt message and perform
+ * sanity checking to make sure it really refers to one of our
+ * Ingress Queues which is active and matches the queue's ID.
+ * None of these error conditions should ever happen so we may
+ * want to either make them fatal and/or conditionalized under
+ * DEBUG.
+ */
+ qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ iq_idx = IQ_IDX(s, qid);
+ if (unlikely(iq_idx >= MAX_INGQ)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d out of range\n", qid);
+ continue;
+ }
+ rspq = s->ingr_map[iq_idx];
+ if (unlikely(rspq == NULL)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d RSPQ=NULL\n", qid);
+ continue;
+ }
+ if (unlikely(rspq->abs_id != qid)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d refers to RSPQ %d\n",
+ qid, rspq->abs_id);
+ continue;
+ }
+
+ /*
+ * Schedule NAPI processing on the indicated Response Queue
+ * and move on to the next entry in the Forwarded Interrupt
+ * Queue.
+ */
+ napi_schedule(&rspq->napi);
+ rspq_next(intrq);
+ }
+
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(work_done) |
+ INGRESSQID(intrq->cntxt_id) |
+ SEINTARM(intrq->intr_params));
+
+ spin_unlock(&adapter->sge.intrq_lock);
+
+ return work_done;
+}
+
+/*
+ * The MSI interrupt handler handles data events from SGE response queues as
+ * well as error and other async events as they all use the same MSI vector.
+ */
+irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+{
+ struct adapter *adapter = cookie;
+
+ process_intrq(adapter);
+ return IRQ_HANDLED;
+}
+
+/**
+ * t4vf_intr_handler - select the top-level interrupt handler
+ * @adapter: the adapter
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X or MSI).
+ */
+irq_handler_t t4vf_intr_handler(struct adapter *adapter)
+{
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ if (adapter->flags & USING_MSIX)
+ return t4vf_sge_intr_msix;
+ else
+ return t4vf_intr_msi;
+}
+
+/**
+ * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
+ * @data: the adapter
+ *
+ * Runs periodically from a timer to perform maintenance of SGE RX queues.
+ *
+ * a) Replenishes RX queues that have run out due to memory shortage.
+ * Normally new RX buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We schedule NAPI to do
+ * the actual refill.
+ */
+static void sge_rx_timer_cb(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *s = &adapter->sge;
+ unsigned int i;
+
+ /*
+ * Scan the "Starving Free Lists" flag array looking for any Free
+ * Lists in need of more free buffers. If we find one and it's not
+ * being actively polled, then bump its "starving" counter and attempt
+ * to refill it. If we're successful in adding enough buffers to push
+ * the Free List over the starving threshold, then we can clear its
+ * "starving" status.
+ */
+ for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
+ unsigned long m;
+
+ for (m = s->starving_fl[i]; m; m &= m - 1) {
+ unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_fl *fl = s->egr_map[id];
+
+ clear_bit(id, s->starving_fl);
+ smp_mb__after_clear_bit();
+
+ /*
+ * Since we are accessing fl without a lock there's a
+ * small probability of a false positive where we
+ * schedule napi but the FL is no longer starving.
+ * No biggie.
+ */
+ if (fl_starving(fl)) {
+ struct sge_eth_rxq *rxq;
+
+ rxq = container_of(fl, struct sge_eth_rxq, fl);
+ if (napi_reschedule(&rxq->rspq.napi))
+ fl->starving++;
+ else
+ set_bit(id, s->starving_fl);
+ }
+ }
+ }
+
+ /*
+ * Reschedule the next scan for starving Free Lists ...
+ */
+ mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+/**
+ * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
+ * @data: the adapter
+ *
+ * Runs periodically from a timer to perform maintenance of SGE TX queues.
+ *
+ * b) Reclaims completed Tx packets for the Ethernet queues. Normally
+ * packets are cleaned up by new Tx packets, this timer cleans up packets
+ * when no new packets are being submitted. This is essential for pktgen,
+ * at least.
+ */
+static void sge_tx_timer_cb(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *s = &adapter->sge;
+ unsigned int i, budget;
+
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+ struct sge_eth_txq *txq = &s->ethtxq[i];
+
+ if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
+ int avail = reclaimable(&txq->q);
+
+ if (avail > budget)
+ avail = budget;
+
+ free_tx_desc(adapter, &txq->q, avail, true);
+ txq->q.in_use -= avail;
+ __netif_tx_unlock(txq->txq);
+
+ budget -= avail;
+ if (!budget)
+ break;
+ }
+
+ i++;
+ if (i >= s->ethqsets)
+ i = 0;
+ } while (i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+
+ /*
+ * If we found too many reclaimable packets schedule a timer in the
+ * near future to continue where we left off. Otherwise the next timer
+ * will be at its normal interval.
+ */
+ mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+/**
+ * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
+ * @adapter: the adapter
+ * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
+ * @dev: the network device associated with the new rspq
+ * @intr_dest: MSI-X vector index (overriden in MSI mode)
+ * @fl: pointer to the new rxq's Free List to be filled in
+ * @hnd: the interrupt handler to invoke for the rspq
+ */
+int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+ bool iqasynch, struct net_device *dev,
+ int intr_dest,
+ struct sge_fl *fl, rspq_handler_t hnd)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct fw_iq_cmd cmd, rpl;
+ int ret, iqandst, flsz = 0;
+
+ /*
+ * If we're using MSI interrupts and we're not initializing the
+ * Forwarded Interrupt Queue itself, then set up this queue for
+ * indirect interrupts to the Forwarded Interrupt Queue. Obviously
+ * the Forwarded Interrupt Queue must be set up before any other
+ * ingress queue ...
+ */
+ if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+ iqandst = SGE_INTRDST_IQ;
+ intr_dest = adapter->sge.intrq.abs_id;
+ } else
+ iqandst = SGE_INTRDST_PCI;
+
+ /*
+ * Allocate the hardware ring for the Response Queue. The size needs
+ * to be a multiple of 16 which includes the mandatory status entry
+ * (regardless of whether the Status Page capabilities are enabled or
+ * not).
+ */
+ rspq->size = roundup(rspq->size, 16);
+ rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
+ 0, &rspq->phys_addr, NULL, 0);
+ if (!rspq->desc)
+ return -ENOMEM;
+
+ /*
+ * Fill in the Ingress Queue Command. Note: Ideally this code would
+ * be in t4vf_hw.c but there are so many parameters and dependencies
+ * on our Linux SGE state that we would end up having to pass tons of
+ * parameters. We'll have to think about how this might be migrated
+ * into OS-independent common code ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
+ FW_IQ_CMD_IQSTART(1) |
+ FW_LEN16(cmd));
+ cmd.type_to_iqandstindex =
+ cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH(iqasynch) |
+ FW_IQ_CMD_VIID(pi->viid) |
+ FW_IQ_CMD_IQANDST(iqandst) |
+ FW_IQ_CMD_IQANUS(1) |
+ FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
+ FW_IQ_CMD_IQANDSTINDEX(intr_dest));
+ cmd.iqdroprss_to_iqesize =
+ cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
+ FW_IQ_CMD_IQGTSMODE |
+ FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
+ cmd.iqsize = cpu_to_be16(rspq->size);
+ cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
+
+ if (fl) {
+ /*
+ * Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit.
+ */
+ fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
+ fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
+ sizeof(__be64), sizeof(struct rx_sw_desc),
+ &fl->addr, &fl->sdesc, STAT_LEN);
+ if (!fl->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Calculate the size of the hardware free list ring plus
+ * Status Page (which the SGE will place after the end of the
+ * free list ring) in Egress Queue Units.
+ */
+ flsz = (fl->size / FL_PER_EQ_UNIT +
+ STAT_LEN / EQ_UNIT);
+
+ /*
+ * Fill in all the relevant firmware Ingress Queue Command
+ * fields for the free list.
+ */
+ cmd.iqns_to_fl0congen =
+ cpu_to_be32(
+ FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
+ FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0PADEN);
+ cmd.fl0dcaen_to_fl0cidxfthresh =
+ cpu_to_be16(
+ FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
+ cmd.fl0size = cpu_to_be16(flsz);
+ cmd.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ /*
+ * Issue the firmware Ingress Queue Command and extract the results if
+ * it completes successfully.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret)
+ goto err;
+
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ rspq->cur_desc = rspq->desc;
+ rspq->cidx = 0;
+ rspq->gen = 1;
+ rspq->next_intr_params = rspq->intr_params;
+ rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+ rspq->abs_id = be16_to_cpu(rpl.physiqid);
+ rspq->size--; /* subtract status entry */
+ rspq->adapter = adapter;
+ rspq->netdev = dev;
+ rspq->handler = hnd;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ rspq->offset = fl ? 0 : -1;
+
+ if (fl) {
+ fl->cntxt_id = be16_to_cpu(rpl.fl0id);
+ fl->avail = 0;
+ fl->pend_cred = 0;
+ fl->pidx = 0;
+ fl->cidx = 0;
+ fl->alloc_failed = 0;
+ fl->large_alloc_failed = 0;
+ fl->starving = 0;
+ refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
+ }
+
+ return 0;
+
+err:
+ /*
+ * An error occurred. Clean up our partial allocation state and
+ * return the error.
+ */
+ if (rspq->desc) {
+ dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
+ rspq->desc, rspq->phys_addr);
+ rspq->desc = NULL;
+ }
+ if (fl && fl->desc) {
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
+ fl->desc, fl->addr);
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+/**
+ * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
+ * @adapter: the adapter
+ * @txq: pointer to the new txq to be filled in
+ * @devq: the network TX queue associated with the new txq
+ * @iqid: the relative ingress queue ID to which events relating to
+ * the new txq should be directed
+ */
+int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *devq,
+ unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd cmd, rpl;
+ struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * Calculate the size of the hardware TX Queue (including the Status
+ * Page on the end of the TX Queue) in units of TX Descriptors.
+ */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ /*
+ * Allocate the hardware ring for the TX ring (with space for its
+ * status page) along with the associated software descriptor ring.
+ */
+ txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ /*
+ * Fill in the Egress Queue Command. Note: As with the direct use of
+ * the firmware Ingress Queue COmmand above in our RXQ allocation
+ * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
+ * have to see if there's some reasonable way to parameterize it
+ * into the common code ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
+ FW_EQ_ETH_CMD_EQSTART |
+ FW_LEN16(cmd));
+ cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+ cmd.fetchszm_to_iqid =
+ cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
+ FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
+ FW_EQ_ETH_CMD_IQID(iqid));
+ cmd.dcaen_to_eqsize =
+ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
+ FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
+ FW_EQ_ETH_CMD_EQSIZE(nentries));
+ cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ /*
+ * Issue the firmware Egress Queue Command and extract the results if
+ * it completes successfully.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret) {
+ /*
+ * The girmware Ingress Queue Command failed for some reason.
+ * Free up our partial allocation state and return the error.
+ */
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adapter->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ txq->q.in_use = 0;
+ txq->q.cidx = 0;
+ txq->q.pidx = 0;
+ txq->q.stat = (void *)&txq->q.desc[txq->q.size];
+ txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.abs_id =
+ FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
+ txq->txq = devq;
+ txq->tso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->q.stops = 0;
+ txq->q.restarts = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+/*
+ * Free the DMA map resources associated with a TX queue.
+ */
+static void free_txq(struct adapter *adapter, struct sge_txq *tq)
+{
+ dma_free_coherent(adapter->pdev_dev,
+ tq->size * sizeof(*tq->desc) + STAT_LEN,
+ tq->desc, tq->phys_addr);
+ tq->cntxt_id = 0;
+ tq->sdesc = NULL;
+ tq->desc = NULL;
+}
+
+/*
+ * Free the resources associated with a response queue (possibly including a
+ * free list).
+ */
+static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
+ struct sge_fl *fl)
+{
+ unsigned int flid = fl ? fl->cntxt_id : 0xffff;
+
+ t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
+ rspq->cntxt_id, flid, 0xffff);
+ dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
+ rspq->desc, rspq->phys_addr);
+ netif_napi_del(&rspq->napi);
+ rspq->netdev = NULL;
+ rspq->cntxt_id = 0;
+ rspq->abs_id = 0;
+ rspq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(adapter, fl, fl->avail);
+ dma_free_coherent(adapter->pdev_dev,
+ fl->size * sizeof(*fl->desc) + STAT_LEN,
+ fl->desc, fl->addr);
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/**
+ * t4vf_free_sge_resources - free SGE resources
+ * @adapter: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4vf_free_sge_resources(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *rxq = s->ethrxq;
+ struct sge_eth_txq *txq = s->ethtxq;
+ struct sge_rspq *evtq = &s->fw_evtq;
+ struct sge_rspq *intrq = &s->intrq;
+ int qs;
+
+ for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
+ if (rxq->rspq.desc)
+ free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
+ if (txq->q.desc) {
+ t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
+ free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
+ kfree(txq->q.sdesc);
+ free_txq(adapter, &txq->q);
+ }
+ }
+ if (evtq->desc)
+ free_rspq_fl(adapter, evtq, NULL);
+ if (intrq->desc)
+ free_rspq_fl(adapter, intrq, NULL);
+}
+
+/**
+ * t4vf_sge_start - enable SGE operation
+ * @adapter: the adapter
+ *
+ * Start tasklets and timers associated with the DMA engine.
+ */
+void t4vf_sge_start(struct adapter *adapter)
+{
+ adapter->sge.ethtxq_rover = 0;
+ mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+ mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ * t4vf_sge_stop - disable SGE operation
+ * @adapter: the adapter
+ *
+ * Stop tasklets and timers associated with the DMA engine. Note that
+ * this is effective only if measures have been taken to disable any HW
+ * events that may restart them.
+ */
+void t4vf_sge_stop(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+
+ if (s->rx_timer.function)
+ del_timer_sync(&s->rx_timer);
+ if (s->tx_timer.function)
+ del_timer_sync(&s->tx_timer);
+}
+
+/**
+ * t4vf_sge_init - initialize SGE
+ * @adapter: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+int t4vf_sge_init(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 fl0 = sge_params->sge_fl_buffer_size[0];
+ u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ struct sge *s = &adapter->sge;
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver. Ideally we should be able to deal
+ * with _any_ configuration. Practice is different ...
+ */
+ if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl0, fl1);
+ return -EINVAL;
+ }
+ if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now translate the adapter parameters into our internal forms.
+ */
+ if (fl1)
+ FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
+ STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+ PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
+ FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+ SGE_INGPADBOUNDARY_SHIFT);
+
+ /*
+ * Set up tasklet timers.
+ */
+ setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
+ setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+
+ /*
+ * Initialize Forwarded Interrupt Queue lock.
+ */
+ spin_lock_init(&s->intrq_lock);
+
+ return 0;
+}
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index a65c80aed1f..a65c80aed1f 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
diff --git a/drivers/net/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index c7b127d9376..c7b127d9376 100644
--- a/drivers/net/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fe3fd3dad6f..fe3fd3dad6f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
new file mode 100644
index 00000000000..6cbb81ccc02
--- /dev/null
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -0,0 +1,50 @@
+#
+# Cirrus network device configuration
+#
+
+config NET_VENDOR_CIRRUS
+ bool "Cirrus devices"
+ default y
+ depends on ISA || EISA || MACH_IXDP2351 || ARCH_IXDP2X01 \
+ || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Cirrus cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_CIRRUS
+
+config CS89x0
+ tristate "CS89x0 support"
+ depends on (ISA || EISA || MACH_IXDP2351 \
+ || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ network (Ethernet) card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:Documentation/networking/cs89x0.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called cs89x0.
+
+config CS89x0_NONISA_IRQ
+ def_bool y
+ depends on CS89x0 != n
+ depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
+
+config EP93XX_ETH
+ tristate "EP93xx Ethernet support"
+ depends on ARM && ARCH_EP93XX
+ select NET_CORE
+ select MII
+ help
+ This is a driver for the ethernet hardware included in EP93xx CPUs.
+ Say Y if you are building a kernel for EP93xx based devices.
+
+endif # NET_VENDOR_CIRRUS
diff --git a/drivers/net/ethernet/cirrus/Makefile b/drivers/net/ethernet/cirrus/Makefile
new file mode 100644
index 00000000000..14bd77e0cb5
--- /dev/null
+++ b/drivers/net/ethernet/cirrus/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Cirrus network device drivers.
+#
+
+obj-$(CONFIG_CS89x0) += cs89x0.o
+obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
diff --git a/drivers/net/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 537a4b2e202..f328da24c8f 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -488,7 +488,7 @@ static const struct net_device_ops net_ops = {
.ndo_tx_timeout = net_timeout,
.ndo_start_xmit = net_send_packet,
.ndo_get_stats = net_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = net_poll_controller,
diff --git a/drivers/net/cs89x0.h b/drivers/net/ethernet/cirrus/cs89x0.h
index 91423b70bb4..91423b70bb4 100644
--- a/drivers/net/cs89x0.h
+++ b/drivers/net/ethernet/cirrus/cs89x0.h
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 4317af8d2f0..4317af8d2f0 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
new file mode 100644
index 00000000000..94606f7ee13
--- /dev/null
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -0,0 +1,23 @@
+#
+# Cisco device configuration
+#
+
+config NET_VENDOR_CISCO
+ bool "Cisco devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Cisco cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_CISCO
+
+source "drivers/net/ethernet/cisco/enic/Kconfig"
+
+endif # NET_VENDOR_CISCO
diff --git a/drivers/net/ethernet/cisco/Makefile b/drivers/net/ethernet/cisco/Makefile
new file mode 100644
index 00000000000..6c7437bc4a9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cisco device drivers.
+#
+
+obj-$(CONFIG_ENIC) += enic/
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
new file mode 100644
index 00000000000..9cc706a6cff
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -0,0 +1,9 @@
+#
+# Cisco device configuration
+#
+
+config ENIC
+ tristate "Cisco VIC Ethernet NIC Support"
+ depends on PCI && INET
+ ---help---
+ This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index 9d4974bba24..9d4974bba24 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index d6dd1b4edf6..d6dd1b4edf6 100644
--- a/drivers/net/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index c2c0680a114..c2c0680a114 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
diff --git a/drivers/net/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index ce76d9a8ca6..fe0c29acdbe 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.24"
+#define DRV_VERSION "2.1.1.28"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -49,6 +49,10 @@ struct enic_msix_entry {
void *devid;
};
+/* priv_flags */
+#define ENIC_SRIOV_ENABLED (1 << 0)
+
+/* enic port profile set flags */
#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
@@ -83,12 +87,16 @@ struct enic {
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int flags;
+ unsigned int priv_flags;
unsigned int mc_count;
unsigned int uc_count;
u32 port_mtu;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
- struct enic_port_profile pp;
+#ifdef CONFIG_PCI_IOV
+ u32 num_vfs;
+#endif
+ struct enic_port_profile *pp;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
@@ -120,5 +128,8 @@ static inline struct device *enic_get_dev(struct enic *enic)
}
void enic_reset_addr_lists(struct enic *enic);
+int enic_sriov_enabled(struct enic *enic);
+int enic_is_valid_vf(struct enic *enic, int vf);
+int enic_is_dynamic(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index fd6247b3c0e..fd6247b3c0e 100644
--- a/drivers/net/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index ff8e87fdfc1..1f83a4747ba 100644
--- a/drivers/net/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -19,6 +19,25 @@
#ifndef _ENIC_DEV_H_
#define _ENIC_DEV_H_
+#include "vnic_dev.h"
+
+/*
+ * Calls the devcmd function given by argument vnicdevcmdfn.
+ * If vf argument is valid, it proxies the devcmd
+ */
+#define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \
+ do { \
+ spin_lock(&enic->devcmd_lock); \
+ if (enic_is_valid_vf(enic, vf)) { \
+ vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \
+ err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
+ vnic_dev_cmd_proxy_end(enic->vdev); \
+ } else { \
+ err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
+ } \
+ spin_unlock(&enic->devcmd_lock); \
+ } while (0)
+
int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
int enic_dev_add_station_addr(struct enic *enic);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 67a27cd304d..c3786fda11d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
@@ -121,11 +122,25 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-static int enic_is_dynamic(struct enic *enic)
+int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
}
+int enic_sriov_enabled(struct enic *enic)
+{
+ return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
+}
+
+int enic_is_valid_vf(struct enic *enic, int vf)
+{
+#ifdef CONFIG_PCI_IOV
+ return vf >= 0 && vf < enic->num_vfs;
+#else
+ return 0;
+#endif
+}
+
static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
{
return rq;
@@ -584,16 +599,16 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb,
unsigned int len_left, int loopback)
{
- skb_frag_t *frag;
+ const skb_frag_t *frag;
/* Queue additional data fragments */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- len_left -= frag->size;
+ len_left -= skb_frag_size(frag);
enic_queue_wq_desc_cont(wq, skb,
- pci_map_page(enic->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE),
- frag->size,
+ skb_frag_dma_map(&enic->pdev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE),
+ skb_frag_size(frag),
(len_left == 0), /* EOP? */
loopback);
}
@@ -702,16 +717,16 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
* for additional data fragments
*/
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- len_left -= frag->size;
- frag_len_left = frag->size;
- offset = frag->page_offset;
+ len_left -= skb_frag_size(frag);
+ frag_len_left = skb_frag_size(frag);
+ offset = 0;
while (frag_len_left) {
len = min(frag_len_left,
(unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_page(enic->pdev, frag->page,
- offset, len,
- PCI_DMA_TODEVICE);
+ dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
+ offset, len,
+ DMA_TO_DEVICE);
enic_queue_wq_desc_cont(wq, skb,
dma_addr,
len,
@@ -1039,15 +1054,15 @@ static void enic_tx_timeout(struct net_device *netdev)
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_port_profile *pp;
+ int err;
- if (vf != PORT_SELF_VF)
- return -EOPNOTSUPP;
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
- /* Ignore the vf argument for now. We can assume the request
- * is coming on a vf.
- */
if (is_valid_ether_addr(mac)) {
- memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
+ memcpy(pp->vf_mac, mac, ETH_ALEN);
return 0;
} else
return -EINVAL;
@@ -1058,71 +1073,74 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
{
struct enic *enic = netdev_priv(netdev);
struct enic_port_profile prev_pp;
+ struct enic_port_profile *pp;
int err = 0, restore_pp = 1;
- /* don't support VFs, yet */
- if (vf != PORT_SELF_VF)
- return -EOPNOTSUPP;
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
if (!port[IFLA_PORT_REQUEST])
return -EOPNOTSUPP;
- memcpy(&prev_pp, &enic->pp, sizeof(enic->pp));
- memset(&enic->pp, 0, sizeof(enic->pp));
+ memcpy(&prev_pp, pp, sizeof(*enic->pp));
+ memset(pp, 0, sizeof(*enic->pp));
- enic->pp.set |= ENIC_SET_REQUEST;
- enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+ pp->set |= ENIC_SET_REQUEST;
+ pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
- enic->pp.set |= ENIC_SET_NAME;
- memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
+ pp->set |= ENIC_SET_NAME;
+ memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
- enic->pp.set |= ENIC_SET_INSTANCE;
- memcpy(enic->pp.instance_uuid,
+ pp->set |= ENIC_SET_INSTANCE;
+ memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
- enic->pp.set |= ENIC_SET_HOST;
- memcpy(enic->pp.host_uuid,
+ pp->set |= ENIC_SET_HOST;
+ memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
}
/* Special case handling: mac came from IFLA_VF_MAC */
if (!is_zero_ether_addr(prev_pp.vf_mac))
- memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN);
+ memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
- if (is_zero_ether_addr(netdev->dev_addr))
- random_ether_addr(netdev->dev_addr);
+ if (vf == PORT_SELF_VF && is_zero_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
- err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp);
+ err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
if (err) {
if (restore_pp) {
/* Things are still the way they were: Implicit
* DISASSOCIATE failed
*/
- memcpy(&enic->pp, &prev_pp, sizeof(enic->pp));
+ memcpy(pp, &prev_pp, sizeof(*pp));
} else {
- memset(&enic->pp, 0, sizeof(enic->pp));
- memset(netdev->dev_addr, 0, ETH_ALEN);
+ memset(pp, 0, sizeof(*pp));
+ if (vf == PORT_SELF_VF)
+ memset(netdev->dev_addr, 0, ETH_ALEN);
}
} else {
/* Set flag to indicate that the port assoc/disassoc
* request has been sent out to fw
*/
- enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+ pp->set |= ENIC_PORT_REQUEST_APPLIED;
/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
- if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
- memset(enic->pp.mac_addr, 0, ETH_ALEN);
- memset(netdev->dev_addr, 0, ETH_ALEN);
+ if (pp->request == PORT_REQUEST_DISASSOCIATE) {
+ memset(pp->mac_addr, 0, ETH_ALEN);
+ if (vf == PORT_SELF_VF)
+ memset(netdev->dev_addr, 0, ETH_ALEN);
}
}
- memset(enic->pp.vf_mac, 0, ETH_ALEN);
+ memset(pp->vf_mac, 0, ETH_ALEN);
return err;
}
@@ -1132,26 +1150,31 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
{
struct enic *enic = netdev_priv(netdev);
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
+ struct enic_port_profile *pp;
int err;
- if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
- err = enic_process_get_pp_request(enic, enic->pp.request, &response);
+ err = enic_process_get_pp_request(enic, vf, pp->request, &response);
if (err)
return err;
- NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
+ NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
- if (enic->pp.set & ENIC_SET_NAME)
+ if (pp->set & ENIC_SET_NAME)
NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
- enic->pp.name);
- if (enic->pp.set & ENIC_SET_INSTANCE)
+ pp->name);
+ if (pp->set & ENIC_SET_INSTANCE)
NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
- enic->pp.instance_uuid);
- if (enic->pp.set & ENIC_SET_HOST)
+ pp->instance_uuid);
+ if (pp->set & ENIC_SET_HOST)
NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
- enic->pp.host_uuid);
+ pp->host_uuid);
return 0;
@@ -1585,10 +1608,9 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
- enic_dev_add_addr(enic, enic->pp.mac_addr);
- else
+ if (!enic_is_dynamic(enic))
enic_dev_add_station_addr(enic);
+
enic_set_rx_mode(netdev);
netif_wake_queue(netdev);
@@ -1636,9 +1658,8 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
- enic_dev_del_addr(enic, enic->pp.mac_addr);
- else
+
+ if (!enic_is_dynamic(enic))
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
@@ -2103,7 +2124,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_get_stats64 = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = enic_set_rx_mode,
- .ndo_set_multicast_list = enic_set_rx_mode,
.ndo_set_mac_address = enic_set_mac_address_dynamic,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2125,11 +2145,13 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = enic_set_mac_address,
.ndo_set_rx_mode = enic_set_rx_mode,
- .ndo_set_multicast_list = enic_set_rx_mode,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
.ndo_tx_timeout = enic_tx_timeout,
+ .ndo_set_vf_port = enic_set_vf_port,
+ .ndo_get_vf_port = enic_get_vf_port,
+ .ndo_set_vf_mac = enic_set_vf_mac,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = enic_poll_controller,
#endif
@@ -2241,6 +2263,10 @@ static int __devinit enic_probe(struct pci_dev *pdev,
int using_dac = 0;
unsigned int i;
int err;
+ int num_pps = 1;
+#ifdef CONFIG_PCI_IOV
+ int pos = 0;
+#endif
/* Allocate net device structure and initialize. Private
* instance data is initialized to zero.
@@ -2332,13 +2358,41 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_iounmap;
}
+#ifdef CONFIG_PCI_IOV
+ /* Get number of subvnics */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
+ (u16 *)&enic->num_vfs);
+ if (enic->num_vfs) {
+ err = pci_enable_sriov(pdev, enic->num_vfs);
+ if (err) {
+ dev_err(dev, "SRIOV enable failed, aborting."
+ " pci_enable_sriov() returned %d\n",
+ err);
+ goto err_out_vnic_unregister;
+ }
+ enic->priv_flags |= ENIC_SRIOV_ENABLED;
+ num_pps = enic->num_vfs;
+ }
+ }
+
+#endif
+ /* Allocate structure for port profiles */
+ enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+ if (!enic->pp) {
+ pr_err("port profile alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_out_disable_sriov;
+ }
+
/* Issue device open to get device in known state
*/
err = enic_dev_open(enic);
if (err) {
dev_err(dev, "vNIC dev open failed, aborting\n");
- goto err_out_vnic_unregister;
+ goto err_out_free_pp;
}
/* Setup devcmd lock
@@ -2405,6 +2459,12 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->port_mtu = enic->config.mtu;
(void)enic_change_mtu(netdev, enic->port_mtu);
+#ifdef CONFIG_PCI_IOV
+ if (enic_is_dynamic(enic) && pdev->is_virtfn &&
+ is_zero_ether_addr(enic->mac_addr))
+ random_ether_addr(enic->mac_addr);
+#endif
+
err = enic_set_mac_addr(netdev, enic->mac_addr);
if (err) {
dev_err(dev, "Invalid MAC address, aborting\n");
@@ -2442,6 +2502,8 @@ static int __devinit enic_probe(struct pci_dev *pdev,
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Cannot register net device, aborting\n");
@@ -2454,8 +2516,17 @@ err_out_dev_deinit:
enic_dev_deinit(enic);
err_out_dev_close:
vnic_dev_close(enic->vdev);
+err_out_free_pp:
+ kfree(enic->pp);
+err_out_disable_sriov:
+#ifdef CONFIG_PCI_IOV
+ if (enic_sriov_enabled(enic)) {
+ pci_disable_sriov(pdev);
+ enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
+ }
err_out_vnic_unregister:
vnic_dev_unregister(enic->vdev);
+#endif
err_out_iounmap:
enic_iounmap(enic);
err_out_release_regions:
@@ -2481,6 +2552,13 @@ static void __devexit enic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
enic_dev_deinit(enic);
vnic_dev_close(enic->vdev);
+#ifdef CONFIG_PCI_IOV
+ if (enic_sriov_enabled(enic)) {
+ pci_disable_sriov(pdev);
+ enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
+ }
+#endif
+ kfree(enic->pp);
vnic_dev_unregister(enic->vdev);
enic_iounmap(enic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
new file mode 100644
index 00000000000..22bf03a1829
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/ip.h>
+
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+#include "enic_pp.h"
+
+/*
+ * Checks validity of vf index that came in
+ * port profile request
+ */
+int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err)
+{
+ if (vf != PORT_SELF_VF) {
+#ifdef CONFIG_PCI_IOV
+ if (enic_sriov_enabled(enic)) {
+ if (vf < 0 || vf >= enic->num_vfs) {
+ *err = -EINVAL;
+ goto err_out;
+ }
+ } else {
+ *err = -EOPNOTSUPP;
+ goto err_out;
+ }
+#else
+ *err = -EOPNOTSUPP;
+ goto err_out;
+#endif
+ }
+
+ if (vf == PORT_SELF_VF && !enic_is_dynamic(enic)) {
+ *err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ *err = 0;
+ return 1;
+
+err_out:
+ return 0;
+}
+
+static int enic_set_port_profile(struct enic *enic, int vf)
+{
+ struct net_device *netdev = enic->netdev;
+ struct enic_port_profile *pp;
+ struct vic_provinfo *vp;
+ const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ char uuid_str[38];
+ char client_mac_str[18];
+ u8 *client_mac;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (!(pp->set & ENIC_SET_NAME) || !strlen(pp->name))
+ return -EINVAL;
+
+ vp = vic_provinfo_alloc(GFP_KERNEL, oui,
+ VIC_PROVINFO_GENERIC_TYPE);
+ if (!vp)
+ return -ENOMEM;
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
+ strlen(pp->name) + 1, pp->name);
+
+ if (!is_zero_ether_addr(pp->mac_addr)) {
+ client_mac = pp->mac_addr;
+ } else if (vf == PORT_SELF_VF) {
+ client_mac = netdev->dev_addr;
+ } else {
+ netdev_err(netdev, "Cannot find pp mac address "
+ "for VF %d\n", vf);
+ err = -EINVAL;
+ goto add_tlv_failure;
+ }
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, client_mac);
+
+ snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
+ sizeof(client_mac_str), client_mac_str);
+
+ if (pp->set & ENIC_SET_INSTANCE) {
+ sprintf(uuid_str, "%pUB", pp->instance_uuid);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ if (pp->set & ENIC_SET_HOST) {
+ sprintf(uuid_str, "%pUB", pp->host_uuid);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_OS_TYPE,
+ sizeof(os_type), &os_type);
+
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_init_prov2, (u8 *)vp,
+ vic_provinfo_size(vp));
+ err = enic_dev_status_to_errno(err);
+
+add_tlv_failure:
+ vic_provinfo_free(vp);
+
+ return err;
+}
+
+static int enic_unset_port_profile(struct enic *enic, int vf)
+{
+ int err;
+
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_deinit);
+ if (err)
+ return enic_dev_status_to_errno(err);
+
+ if (vf == PORT_SELF_VF)
+ enic_reset_addr_lists(enic);
+
+ return 0;
+}
+
+static int enic_are_pp_different(struct enic_port_profile *pp1,
+ struct enic_port_profile *pp2)
+{
+ return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
+ pp2->instance_uuid, PORT_UUID_MAX) |
+ !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
+ !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+}
+
+static int enic_pp_preassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_disassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_preassociate_rr(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_associate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+
+static int (*enic_pp_handlers[])(struct enic *enic, int vf,
+ struct enic_port_profile *prev_state,
+ int *restore_pp) = {
+ [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
+ [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
+ [PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
+ [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
+};
+
+static const int enic_pp_handlers_count =
+ sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
+
+static int enic_pp_preassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ return -EOPNOTSUPP;
+}
+
+static int enic_pp_disassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct net_device *netdev = enic->netdev;
+ struct enic_port_profile *pp;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ /* Deregister mac addresses */
+ if (!is_zero_ether_addr(pp->mac_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
+ pp->mac_addr);
+ else if (!is_zero_ether_addr(netdev->dev_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
+ netdev->dev_addr);
+
+ return enic_unset_port_profile(enic, vf);
+}
+
+static int enic_pp_preassociate_rr(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct enic_port_profile *pp;
+ int err;
+ int active = 0;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (pp->request != PORT_REQUEST_ASSOCIATE) {
+ /* If pre-associate is not part of an associate.
+ We always disassociate first */
+ err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic, vf,
+ prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+ }
+
+ *restore_pp = 0;
+
+ err = enic_set_port_profile(enic, vf);
+ if (err)
+ return err;
+
+ /* If pre-associate is not part of an associate. */
+ if (pp->request != PORT_REQUEST_ASSOCIATE) {
+ /* Enable device as standby */
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_enable2,
+ active);
+ err = enic_dev_status_to_errno(err);
+ }
+
+ return err;
+}
+
+static int enic_pp_associate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct net_device *netdev = enic->netdev;
+ struct enic_port_profile *pp;
+ int err;
+ int active = 1;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ /* Check if a pre-associate was called before */
+ if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
+ (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
+ enic_are_pp_different(prev_pp, pp))) {
+ err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
+ enic, vf, prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+ }
+
+ err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
+ enic, vf, prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+
+ /* Enable device as active */
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_enable2, active);
+ err = enic_dev_status_to_errno(err);
+ if (err)
+ return err;
+
+ /* Register mac address */
+ if (!is_zero_ether_addr(pp->mac_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
+ pp->mac_addr);
+ else if (!is_zero_ether_addr(netdev->dev_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
+ netdev->dev_addr);
+
+ return 0;
+}
+
+int enic_process_set_pp_request(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct enic_port_profile *pp;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (pp->request >= enic_pp_handlers_count
+ || !enic_pp_handlers[pp->request])
+ return -EOPNOTSUPP;
+
+ return enic_pp_handlers[pp->request](enic, vf, prev_pp, restore_pp);
+}
+
+int enic_process_get_pp_request(struct enic *enic, int vf,
+ int request, u16 *response)
+{
+ int err, status = ERR_SUCCESS;
+
+ switch (request) {
+
+ case PORT_REQUEST_PREASSOCIATE_RR:
+ case PORT_REQUEST_ASSOCIATE:
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
+ vnic_dev_enable2_done, &status);
+ break;
+
+ case PORT_REQUEST_DISASSOCIATE:
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
+ vnic_dev_deinit_done, &status);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ status = err;
+
+ switch (status) {
+ case ERR_SUCCESS:
+ *response = PORT_PROFILE_RESPONSE_SUCCESS;
+ break;
+ case ERR_EINVAL:
+ *response = PORT_PROFILE_RESPONSE_INVALID;
+ break;
+ case ERR_EBADSTATE:
+ *response = PORT_PROFILE_RESPONSE_BADSTATE;
+ break;
+ case ERR_ENOMEM:
+ *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
+ break;
+ case ERR_EINPROGRESS:
+ *response = PORT_PROFILE_RESPONSE_INPROGRESS;
+ break;
+ default:
+ *response = PORT_PROFILE_RESPONSE_ERROR;
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h
new file mode 100644
index 00000000000..a09ff392c1c
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_PP_H_
+#define _ENIC_PP_H_
+
+#define ENIC_PP_BY_INDEX(enic, vf, pp, err) \
+ do { \
+ if (enic_is_valid_pp_vf(enic, vf, err)) \
+ pp = (vf == PORT_SELF_VF) ? enic->pp : enic->pp + vf; \
+ else \
+ pp = NULL; \
+ } while (0)
+
+int enic_process_set_pp_request(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+int enic_process_get_pp_request(struct enic *enic, int vf,
+ int request, u16 *response);
+int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err);
+
+#endif /* _ENIC_PP_H_ */
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 4a35367de79..4a35367de79 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 25be2734c3f..25be2734c3f 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
index e6dd30988d6..e6dd30988d6 100644
--- a/drivers/net/enic/rq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 0daa1c7073c..0daa1c7073c 100644
--- a/drivers/net/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe80..579315cbe80 100644
--- a/drivers/net/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 8c4c8cf486f..31e7f9bc206 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -32,6 +32,7 @@
enum vnic_proxy_type {
PROXY_NONE,
PROXY_BY_BDF,
+ PROXY_BY_INDEX,
};
struct vnic_res {
@@ -328,20 +329,21 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -ETIMEDOUT;
}
-static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
- enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
{
u32 status;
int err;
memset(vdev->args, 0, sizeof(vdev->args));
- vdev->args[0] = vdev->proxy_index; /* bdf */
+ vdev->args[0] = vdev->proxy_index;
vdev->args[1] = cmd;
vdev->args[2] = *a0;
vdev->args[3] = *a1;
- err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
+ err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
if (err)
return err;
@@ -376,14 +378,30 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
return err;
}
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
+{
+ vdev->proxy = PROXY_BY_INDEX;
+ vdev->proxy_index = index;
+}
+
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
+{
+ vdev->proxy = PROXY_NONE;
+ vdev->proxy_index = 0;
+}
+
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
memset(vdev->args, 0, sizeof(vdev->args));
switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ a0, a1, wait);
case PROXY_BY_BDF:
- return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ a0, a1, wait);
case PROXY_NONE:
default:
return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 852b698fbe7..6a138b625d1 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -85,6 +85,8 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 8025e8808d6..8025e8808d6 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 609542848e0..609542848e0 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 0ca107f7bc8..0ca107f7bc8 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h
index 2b163639229..2b163639229 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 995a50dd4c9..995a50dd4c9 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index e0a73f1ca6f..e0a73f1ca6f 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 34105e0951a..34105e0951a 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 2056586f4d4..2056586f4d4 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index fa421baf45b..fa421baf45b 100644
--- a/drivers/net/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 77750ec9395..77750ec9395 100644
--- a/drivers/net/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 24ef8cd4054..24ef8cd4054 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 9ef81f14835..9ef81f14835 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index df61bd932ea..df61bd932ea 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 7dd937ac11c..7dd937ac11c 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
index c7021e3a631..c7021e3a631 100644
--- a/drivers/net/enic/wq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
new file mode 100644
index 00000000000..972b62b3183
--- /dev/null
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -0,0 +1,24 @@
+#
+# Davicom device configuration
+#
+
+config DM9000
+ tristate "DM9000 support"
+ depends on ARM || BLACKFIN || MIPS
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Support for DM9000 chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9000.
+
+config DM9000_FORCE_SIMPLE_PHY_POLL
+ bool "Force simple NSR based PHY polling"
+ depends on DM9000
+ ---help---
+ This configuration forces the DM9000 to use the NSR's LinkStatus
+ bit to determine if the link is up or down instead of the more
+ costly MII PHY reads. Note, this will not work if the chip is
+ operating with an external PHY.
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
new file mode 100644
index 00000000000..74b31f0ebe1
--- /dev/null
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Davicom device drivers.
+#
+
+obj-$(CONFIG_DM9000) += dm9000.o
diff --git a/drivers/net/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8ef31dc4704..438f4580bf6 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -56,6 +56,13 @@ static int watchdog = 5000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+/*
+ * Debug messages level
+ */
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
+
/* DM9000 register address locking.
*
* The DM9000 uses an address register to control where data written
@@ -103,7 +110,6 @@ typedef struct board_info {
unsigned int flags;
unsigned int in_suspend :1;
unsigned int wake_supported :1;
- int debug_level;
enum dm9000_type type;
@@ -138,8 +144,7 @@ typedef struct board_info {
/* debug code */
#define dm9000_dbg(db, lev, msg...) do { \
- if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
- (lev) < db->debug_level) { \
+ if ((lev) < debug) { \
dev_dbg(db->dev, msg); \
} \
} while (0)
@@ -1339,7 +1344,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
.ndo_stop = dm9000_stop,
.ndo_start_xmit = dm9000_start_xmit,
.ndo_tx_timeout = dm9000_timeout,
- .ndo_set_multicast_list = dm9000_hash_table,
+ .ndo_set_rx_mode = dm9000_hash_table,
.ndo_do_ioctl = dm9000_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_features = dm9000_set_features,
diff --git a/drivers/net/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
index 55688bd1a3e..55688bd1a3e 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/ethernet/davicom/dm9000.h
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
new file mode 100644
index 00000000000..37940279ded
--- /dev/null
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -0,0 +1,37 @@
+#
+# Digital Equipment Inc network device configuration
+#
+
+config NET_VENDOR_DEC
+ bool "Digital Equipment devices"
+ default y
+ depends on PCI || EISA || CARDBUS
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about DEC cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_DEC
+
+config EWRK3
+ tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
+ depends on ISA
+ select CRC32
+ ---help---
+ This driver supports the DE203, DE204 and DE205 network (Ethernet)
+ cards. If this is for you, say Y and read
+ <file:Documentation/networking/ewrk3.txt> in the kernel source as
+ well as the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ewrk3.
+
+source "drivers/net/ethernet/dec/tulip/Kconfig"
+
+endif # NET_VENDOR_DEC
diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile
new file mode 100644
index 00000000000..1b01ed8d42c
--- /dev/null
+++ b/drivers/net/ethernet/dec/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Digital Equipment Inc. network device drivers.
+#
+
+obj-$(CONFIG_EWRK3) += ewrk3.o
+obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index 05a5f71451a..f9df5e4d034 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -393,7 +393,7 @@ static const struct net_device_ops ewrk3_netdev_ops = {
.ndo_open = ewrk3_open,
.ndo_start_xmit = ewrk3_queue_pkt,
.ndo_stop = ewrk3_close,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = ewrk3_ioctl,
.ndo_tx_timeout = ewrk3_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h
index 8e0ee906567..8e0ee906567 100644
--- a/drivers/net/ewrk3.h
+++ b/drivers/net/ethernet/dec/ewrk3.h
diff --git a/drivers/net/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 092c3faa882..25b8deedbef 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
Hardware Reference Manual" is currently available at :
http://developer.intel.com/design/network/manuals/278074.htm
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
new file mode 100644
index 00000000000..1203be0436e
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -0,0 +1,172 @@
+#
+# Tulip family network device configuration
+#
+
+config NET_TULIP
+ bool "DEC - Tulip devices"
+ depends on (PCI || EISA || CARDBUS)
+ ---help---
+ This selects the "Tulip" family of EISA/PCI network cards.
+
+if NET_TULIP
+
+config DE2104X
+ tristate "Early DECchip Tulip (dc2104x) PCI support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is developed for the SMC EtherPower series Ethernet
+ cards and also works with cards based on the DECchip
+ 21040 (Tulip series) chips. Some LinkSys PCI cards are
+ of this type. (If your card is NOT SMC EtherPower 10/100 PCI
+ (smc9332dst), you can also try the driver for "Generic DECchip"
+ cards, below. However, most people with a network card of this type
+ will say Y here.) Do read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called de2104x.
+
+config DE2104X_DSL
+ int "Descriptor Skip Length in 32 bit longwords"
+ depends on DE2104X
+ range 0 31
+ default 0
+ ---help---
+ Setting this value allows to align ring buffer descriptors into their
+ own cache lines. Value of 4 corresponds to the typical 32 byte line
+ (the descriptor is 16 bytes). This is necessary on systems that lack
+ cache coherence, an example is PowerMac 5500. Otherwise 0 is safe.
+ Default is 0, and range is 0 to 31.
+
+config TULIP
+ tristate "DECchip Tulip (dc2114x) PCI support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is developed for the SMC EtherPower series Ethernet
+ cards and also works with cards based on the DECchip
+ 21140 (Tulip series) chips. Some LinkSys PCI cards are
+ of this type. (If your card is NOT SMC EtherPower 10/100 PCI
+ (smc9332dst), you can also try the driver for "Generic DECchip"
+ cards, above. However, most people with a network card of this type
+ will say Y here.) Do read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called tulip.
+
+config TULIP_MWI
+ bool "New bus configuration (EXPERIMENTAL)"
+ depends on TULIP && EXPERIMENTAL
+ ---help---
+ This configures your Tulip card specifically for the card and
+ system cache line size type you are using.
+
+ This is experimental code, not yet tested on many boards.
+
+ If unsure, say N.
+
+config TULIP_MMIO
+ bool "Use PCI shared mem for NIC registers"
+ depends on TULIP
+ ---help---
+ Use PCI shared memory for the NIC registers, rather than going through
+ the Tulip's PIO (programmed I/O ports). Faster, but could produce
+ obscure bugs if your mainboard has memory controller timing issues.
+ If in doubt, say N.
+
+config TULIP_NAPI
+ bool "Use RX polling (NAPI)"
+ depends on TULIP
+ ---help---
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ If in doubt, say N.
+
+config TULIP_NAPI_HW_MITIGATION
+ bool "Use Interrupt Mitigation"
+ depends on TULIP_NAPI
+ ---help---
+ Use HW to reduce RX interrupts. Not strictly necessary since NAPI
+ reduces RX interrupts by itself. Interrupt mitigation reduces RX
+ interrupts even at low levels of traffic at the cost of a small
+ latency.
+
+ If in doubt, say Y.
+
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
+config DE4X5
+ tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
+ depends on (PCI || EISA)
+ select CRC32
+ ---help---
+ This is support for the DIGITAL series of PCI/EISA Ethernet cards.
+ These include the DE425, DE434, DE435, DE450 and DE500 models. If
+ you have a network card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. More specific
+ information is contained in
+ <file:Documentation/networking/de4x5.txt>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called de4x5.
+
+config WINBOND_840
+ tristate "Winbond W89c840 Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver is for the Winbond W89c840 chip. It also works with
+ the TX9882 chip on the Compex RL100-ATX board.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/drivers.html>.
+
+config DM9102
+ tristate "Davicom DM910x/DM980x support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
+ Davicom (<http://www.davicom.com.tw/>). If you have such a network
+ (Ethernet) card, say Y. Some information is contained in the file
+ <file:Documentation/networking/dmfe.txt>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called dmfe.
+
+config ULI526X
+ tristate "ULi M526x controller support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for ULi M5261/M5263 10/100M Ethernet Controller
+ (<http://www.nvidia.com/page/uli_drivers.html>).
+
+ To compile this driver as a module, choose M here. The module will
+ be called uli526x.
+
+config PCMCIA_XIRCOM
+ tristate "Xircom CardBus support"
+ depends on CARDBUS
+ ---help---
+ This driver is for the Digital "Tulip" Ethernet CardBus adapters.
+ It should work with most DEC 21*4*-based chips/ethercards, as well
+ as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
+ ASIX.
+
+ To compile this driver as a module, choose M here. The module will
+ be called xircom_cb. If unsure, say N.
+
+endif # NET_TULIP
diff --git a/drivers/net/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
index 5e8be38b45b..5e8be38b45b 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/ethernet/dec/tulip/Makefile
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index ce90efc6ba3..1427739d9a5 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1956,7 +1956,7 @@ bad_srom:
static const struct net_device_ops de_netdev_ops = {
.ndo_open = de_open,
.ndo_stop = de_close,
- .ndo_set_multicast_list = de_set_rx_mode,
+ .ndo_set_rx_mode = de_set_rx_mode,
.ndo_start_xmit = de_start_xmit,
.ndo_get_stats = de_get_stats,
.ndo_tx_timeout = de_tx_timeout,
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 959b41021a6..871bcaa7068 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1084,7 +1084,7 @@ static const struct net_device_ops de4x5_netdev_ops = {
.ndo_stop = de4x5_close,
.ndo_start_xmit = de4x5_queue_pkt,
.ndo_get_stats = de4x5_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = de4x5_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address= eth_mac_addr,
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
index 9f2877438fb..ec756eba397 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/ethernet/dec/tulip/de4x5.h
@@ -991,8 +991,6 @@
*/
#include <linux/sockios.h>
-#define DE4X5IOCTL SIOCDEVPRIVATE
-
struct de4x5_ioctl {
unsigned short cmd; /* Command to run */
unsigned short len; /* Length of the data buffer */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 9a21ca3873f..17b11ee1745 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -356,7 +356,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = dmfe_open,
.ndo_stop = dmfe_stop,
.ndo_start_xmit = dmfe_start_xmit,
- .ndo_set_multicast_list = dmfe_set_filter_mode,
+ .ndo_set_rx_mode = dmfe_set_filter_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index fa5eee925f2..14d5b611783 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -7,8 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
Please submit bug reports to http://bugzilla.kernel.org/.
*/
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 5350d753e0f..4fb8c8c0a42 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -7,10 +7,7 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
Please submit bugs to http://bugzilla.kernel.org/ .
-
*/
#include <linux/pci.h>
diff --git a/drivers/net/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 4bd13922875..beeb17b52ad 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 52d898bdbeb..9c16e4ad02a 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 93358ee4d83..04a7e477eaf 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -8,9 +8,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 2017faf2d0e..19078d28ffb 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 9db528967da..fb3887c18dc 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1246998a677..9656dd0647d 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -6,9 +6,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
@@ -1291,7 +1288,7 @@ static const struct net_device_ops tulip_netdev_ops = {
.ndo_stop = tulip_close,
.ndo_get_stats = tulip_get_stats,
.ndo_do_ioctl = private_ioctl,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 9e63f406f72..7a44a7a6adc 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -259,7 +259,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = uli526x_open,
.ndo_stop = uli526x_stop,
.ndo_start_xmit = uli526x_start_xmit,
- .ndo_set_multicast_list = uli526x_set_filter_mode,
+ .ndo_set_rx_mode = uli526x_set_filter_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 862eadf0719..4d01219ba22 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -350,7 +350,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = netdev_close,
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 988b8eb24d3..988b8eb24d3 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
new file mode 100644
index 00000000000..b5afe218c31
--- /dev/null
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -0,0 +1,86 @@
+#
+# D-Link device configuration
+#
+
+config NET_VENDOR_DLINK
+ bool "D-Link devices"
+ default y
+ depends on PCI || PARPORT
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about D-Link devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_DLINK
+
+config DE600
+ tristate "D-Link DE600 pocket adapter support"
+ depends on PARPORT
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:Documentation/networking/DLINK.txt> as well as the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, if you want to use
+ this. It is possible to have several devices share a single parallel
+ port and it is safe to compile the corresponding drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called de600.
+
+config DE620
+ tristate "D-Link DE620 pocket adapter support"
+ depends on PARPORT
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:Documentation/networking/DLINK.txt> as well as the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, if you want to use
+ this. It is possible to have several devices share a single parallel
+ port and it is safe to compile the corresponding drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called de620.
+
+config DL2K
+ tristate "DL2000/TC902x-based Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver supports DL2000/TC902x-based Gigabit ethernet cards,
+ which includes
+ D-Link DGE-550T Gigabit Ethernet Adapter.
+ D-Link DL2000-based Gigabit Ethernet Adapter.
+ Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dl2k.
+
+config SUNDANCE
+ tristate "Sundance Alta support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver is for the Sundance "Alta" chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/sundance.html>.
+
+config SUNDANCE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on SUNDANCE
+ ---help---
+ Enable memory-mapped I/O for interaction with Sundance NIC registers.
+ Do NOT enable this by default, PIO (enabled when MMIO is disabled)
+ is known to solve bugs on certain chips.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_DLINK
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
new file mode 100644
index 00000000000..c705eaa4f5b
--- /dev/null
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the D-Link network device drivers.
+#
+
+obj-$(CONFIG_DE600) += de600.o
+obj-$(CONFIG_DE620) += de620.o
+obj-$(CONFIG_DL2K) += dl2k.o
+obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/de600.c b/drivers/net/ethernet/dlink/de600.c
index 23a65398d01..23a65398d01 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
diff --git a/drivers/net/de600.h b/drivers/net/ethernet/dlink/de600.h
index e80ecbabcf4..e80ecbabcf4 100644
--- a/drivers/net/de600.h
+++ b/drivers/net/ethernet/dlink/de600.h
diff --git a/drivers/net/de620.c b/drivers/net/ethernet/dlink/de620.c
index 1c51a757611..3b934ab784d 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/ethernet/dlink/de620.c
@@ -767,7 +767,7 @@ static const struct net_device_ops de620_netdev_ops = {
.ndo_stop = de620_close,
.ndo_start_xmit = de620_start_xmit,
.ndo_tx_timeout = de620_timeout,
- .ndo_set_multicast_list = de620_set_multicast_list,
+ .ndo_set_rx_mode = de620_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/de620.h b/drivers/net/ethernet/dlink/de620.h
index e8d9a88f4cb..e8d9a88f4cb 100644
--- a/drivers/net/de620.h
+++ b/drivers/net/ethernet/dlink/de620.h
diff --git a/drivers/net/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index ed73e4a9350..b2dc2c81a14 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -92,7 +92,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_get_stats = get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_set_multicast_list = set_multicast,
+ .ndo_set_rx_mode = set_multicast,
.ndo_do_ioctl = rio_ioctl,
.ndo_tx_timeout = rio_tx_timeout,
.ndo_change_mtu = change_mtu,
@@ -1428,7 +1428,7 @@ mii_wait_link (struct net_device *dev, int wait)
do {
bmsr = mii_read (dev, phy_addr, MII_BMSR);
- if (bmsr & MII_BMSR_LINK_STATUS)
+ if (bmsr & BMSR_LSTATUS)
return 0;
mdelay (1);
} while (--wait > 0);
@@ -1449,60 +1449,60 @@ mii_get_media (struct net_device *dev)
bmsr = mii_read (dev, phy_addr, MII_BMSR);
if (np->an_enable) {
- if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
+ if (!(bmsr & BMSR_ANEGCOMPLETE)) {
/* Auto-Negotiation not completed */
return -1;
}
- negotiate = mii_read (dev, phy_addr, MII_ANAR) &
- mii_read (dev, phy_addr, MII_ANLPAR);
- mscr = mii_read (dev, phy_addr, MII_MSCR);
- mssr = mii_read (dev, phy_addr, MII_MSSR);
- if (mscr & MII_MSCR_1000BT_FD && mssr & MII_MSSR_LP_1000BT_FD) {
+ negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
+ mii_read (dev, phy_addr, MII_LPA);
+ mscr = mii_read (dev, phy_addr, MII_CTRL1000);
+ mssr = mii_read (dev, phy_addr, MII_STAT1000);
+ if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
np->speed = 1000;
np->full_duplex = 1;
printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
- } else if (mscr & MII_MSCR_1000BT_HD && mssr & MII_MSSR_LP_1000BT_HD) {
+ } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
np->speed = 1000;
np->full_duplex = 0;
printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
- } else if (negotiate & MII_ANAR_100BX_FD) {
+ } else if (negotiate & ADVERTISE_100FULL) {
np->speed = 100;
np->full_duplex = 1;
printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
- } else if (negotiate & MII_ANAR_100BX_HD) {
+ } else if (negotiate & ADVERTISE_100HALF) {
np->speed = 100;
np->full_duplex = 0;
printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
- } else if (negotiate & MII_ANAR_10BT_FD) {
+ } else if (negotiate & ADVERTISE_10FULL) {
np->speed = 10;
np->full_duplex = 1;
printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
- } else if (negotiate & MII_ANAR_10BT_HD) {
+ } else if (negotiate & ADVERTISE_10HALF) {
np->speed = 10;
np->full_duplex = 0;
printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
}
- if (negotiate & MII_ANAR_PAUSE) {
+ if (negotiate & ADVERTISE_PAUSE_CAP) {
np->tx_flow &= 1;
np->rx_flow &= 1;
- } else if (negotiate & MII_ANAR_ASYMMETRIC) {
+ } else if (negotiate & ADVERTISE_PAUSE_ASYM) {
np->tx_flow = 0;
np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
__u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
- switch (bmcr & (MII_BMCR_SPEED_100 | MII_BMCR_SPEED_1000)) {
- case MII_BMCR_SPEED_1000:
+ switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
+ case BMCR_SPEED1000:
printk (KERN_INFO "Operating at 1000 Mbps, ");
break;
- case MII_BMCR_SPEED_100:
+ case BMCR_SPEED100:
printk (KERN_INFO "Operating at 100 Mbps, ");
break;
case 0:
printk (KERN_INFO "Operating at 10 Mbps, ");
}
- if (bmcr & MII_BMCR_DUPLEX_MODE) {
+ if (bmcr & BMCR_FULLDPLX) {
printk (KERN_CONT "Full duplex\n");
} else {
printk (KERN_CONT "Half duplex\n");
@@ -1536,24 +1536,22 @@ mii_set_media (struct net_device *dev)
if (np->an_enable) {
/* Advertise capabilities */
bmsr = mii_read (dev, phy_addr, MII_BMSR);
- anar = mii_read (dev, phy_addr, MII_ANAR) &
- ~MII_ANAR_100BX_FD &
- ~MII_ANAR_100BX_HD &
- ~MII_ANAR_100BT4 &
- ~MII_ANAR_10BT_FD &
- ~MII_ANAR_10BT_HD;
- if (bmsr & MII_BMSR_100BX_FD)
- anar |= MII_ANAR_100BX_FD;
- if (bmsr & MII_BMSR_100BX_HD)
- anar |= MII_ANAR_100BX_HD;
- if (bmsr & MII_BMSR_100BT4)
- anar |= MII_ANAR_100BT4;
- if (bmsr & MII_BMSR_10BT_FD)
- anar |= MII_ANAR_10BT_FD;
- if (bmsr & MII_BMSR_10BT_HD)
- anar |= MII_ANAR_10BT_HD;
- anar |= MII_ANAR_PAUSE | MII_ANAR_ASYMMETRIC;
- mii_write (dev, phy_addr, MII_ANAR, anar);
+ anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
+ ~(ADVERTISE_100FULL | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_10HALF |
+ ADVERTISE_100BASE4);
+ if (bmsr & BMSR_100FULL)
+ anar |= ADVERTISE_100FULL;
+ if (bmsr & BMSR_100HALF)
+ anar |= ADVERTISE_100HALF;
+ if (bmsr & BMSR_100BASE4)
+ anar |= ADVERTISE_100BASE4;
+ if (bmsr & BMSR_10FULL)
+ anar |= ADVERTISE_10FULL;
+ if (bmsr & BMSR_10HALF)
+ anar |= ADVERTISE_10HALF;
+ anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ mii_write (dev, phy_addr, MII_ADVERTISE, anar);
/* Enable Auto crossover */
pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
@@ -1561,8 +1559,8 @@ mii_set_media (struct net_device *dev)
mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
/* Soft reset PHY */
- mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
- bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | MII_BMCR_RESET;
+ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(1);
} else {
@@ -1574,7 +1572,7 @@ mii_set_media (struct net_device *dev)
/* 2) PHY Reset */
bmcr = mii_read (dev, phy_addr, MII_BMCR);
- bmcr |= MII_BMCR_RESET;
+ bmcr |= BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
/* 3) Power Down */
@@ -1583,25 +1581,25 @@ mii_set_media (struct net_device *dev)
mdelay (100); /* wait a certain time */
/* 4) Advertise nothing */
- mii_write (dev, phy_addr, MII_ANAR, 0);
+ mii_write (dev, phy_addr, MII_ADVERTISE, 0);
/* 5) Set media and Power Up */
- bmcr = MII_BMCR_POWER_DOWN;
+ bmcr = BMCR_PDOWN;
if (np->speed == 100) {
- bmcr |= MII_BMCR_SPEED_100;
+ bmcr |= BMCR_SPEED100;
printk (KERN_INFO "Manual 100 Mbps, ");
} else if (np->speed == 10) {
printk (KERN_INFO "Manual 10 Mbps, ");
}
if (np->full_duplex) {
- bmcr |= MII_BMCR_DUPLEX_MODE;
+ bmcr |= BMCR_FULLDPLX;
printk (KERN_CONT "Full duplex\n");
} else {
printk (KERN_CONT "Half duplex\n");
}
#if 0
/* Set 1000BaseT Master/Slave setting */
- mscr = mii_read (dev, phy_addr, MII_MSCR);
+ mscr = mii_read (dev, phy_addr, MII_CTRL1000);
mscr |= MII_MSCR_CFG_ENABLE;
mscr &= ~MII_MSCR_CFG_VALUE = 0;
#endif
@@ -1624,7 +1622,7 @@ mii_get_media_pcs (struct net_device *dev)
bmsr = mii_read (dev, phy_addr, PCS_BMSR);
if (np->an_enable) {
- if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
+ if (!(bmsr & BMSR_ANEGCOMPLETE)) {
/* Auto-Negotiation not completed */
return -1;
}
@@ -1649,7 +1647,7 @@ mii_get_media_pcs (struct net_device *dev)
} else {
__u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
printk (KERN_INFO "Operating at 1000 Mbps, ");
- if (bmcr & MII_BMCR_DUPLEX_MODE) {
+ if (bmcr & BMCR_FULLDPLX) {
printk (KERN_CONT "Full duplex\n");
} else {
printk (KERN_CONT "Half duplex\n");
@@ -1682,7 +1680,7 @@ mii_set_media_pcs (struct net_device *dev)
if (np->an_enable) {
/* Advertise capabilities */
esr = mii_read (dev, phy_addr, PCS_ESR);
- anar = mii_read (dev, phy_addr, MII_ANAR) &
+ anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
~PCS_ANAR_HALF_DUPLEX &
~PCS_ANAR_FULL_DUPLEX;
if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
@@ -1690,22 +1688,21 @@ mii_set_media_pcs (struct net_device *dev)
if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
anar |= PCS_ANAR_FULL_DUPLEX;
anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
- mii_write (dev, phy_addr, MII_ANAR, anar);
+ mii_write (dev, phy_addr, MII_ADVERTISE, anar);
/* Soft reset PHY */
- mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
- bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN |
- MII_BMCR_RESET;
+ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(1);
} else {
/* Force speed setting */
/* PHY Reset */
- bmcr = MII_BMCR_RESET;
+ bmcr = BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(10);
if (np->full_duplex) {
- bmcr = MII_BMCR_DUPLEX_MODE;
+ bmcr = BMCR_FULLDPLX;
printk (KERN_INFO "Manual full duplex\n");
} else {
bmcr = 0;
@@ -1715,7 +1712,7 @@ mii_set_media_pcs (struct net_device *dev)
mdelay(10);
/* Advertise nothing */
- mii_write (dev, phy_addr, MII_ANAR, 0);
+ mii_write (dev, phy_addr, MII_ADVERTISE, 0);
}
return 0;
}
diff --git a/drivers/net/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 7caab3d26a9..ba0adcafa55 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <linux/bitops.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
@@ -271,20 +272,9 @@ enum RFS_bits {
#define MII_RESET_TIME_OUT 10000
/* MII register */
enum _mii_reg {
- MII_BMCR = 0,
- MII_BMSR = 1,
- MII_PHY_ID1 = 2,
- MII_PHY_ID2 = 3,
- MII_ANAR = 4,
- MII_ANLPAR = 5,
- MII_ANER = 6,
- MII_ANNPT = 7,
- MII_ANLPRNP = 8,
- MII_MSCR = 9,
- MII_MSSR = 10,
- MII_ESR = 15,
MII_PHY_SCR = 16,
};
+
/* PCS register */
enum _pcs_reg {
PCS_BMCR = 0,
@@ -297,102 +287,6 @@ enum _pcs_reg {
PCS_ESR = 15,
};
-/* Basic Mode Control Register */
-enum _mii_bmcr {
- MII_BMCR_RESET = 0x8000,
- MII_BMCR_LOOP_BACK = 0x4000,
- MII_BMCR_SPEED_LSB = 0x2000,
- MII_BMCR_AN_ENABLE = 0x1000,
- MII_BMCR_POWER_DOWN = 0x0800,
- MII_BMCR_ISOLATE = 0x0400,
- MII_BMCR_RESTART_AN = 0x0200,
- MII_BMCR_DUPLEX_MODE = 0x0100,
- MII_BMCR_COL_TEST = 0x0080,
- MII_BMCR_SPEED_MSB = 0x0040,
- MII_BMCR_SPEED_RESERVED = 0x003f,
- MII_BMCR_SPEED_10 = 0,
- MII_BMCR_SPEED_100 = MII_BMCR_SPEED_LSB,
- MII_BMCR_SPEED_1000 = MII_BMCR_SPEED_MSB,
-};
-
-/* Basic Mode Status Register */
-enum _mii_bmsr {
- MII_BMSR_100BT4 = 0x8000,
- MII_BMSR_100BX_FD = 0x4000,
- MII_BMSR_100BX_HD = 0x2000,
- MII_BMSR_10BT_FD = 0x1000,
- MII_BMSR_10BT_HD = 0x0800,
- MII_BMSR_100BT2_FD = 0x0400,
- MII_BMSR_100BT2_HD = 0x0200,
- MII_BMSR_EXT_STATUS = 0x0100,
- MII_BMSR_PREAMBLE_SUPP = 0x0040,
- MII_BMSR_AN_COMPLETE = 0x0020,
- MII_BMSR_REMOTE_FAULT = 0x0010,
- MII_BMSR_AN_ABILITY = 0x0008,
- MII_BMSR_LINK_STATUS = 0x0004,
- MII_BMSR_JABBER_DETECT = 0x0002,
- MII_BMSR_EXT_CAP = 0x0001,
-};
-
-/* ANAR */
-enum _mii_anar {
- MII_ANAR_NEXT_PAGE = 0x8000,
- MII_ANAR_REMOTE_FAULT = 0x4000,
- MII_ANAR_ASYMMETRIC = 0x0800,
- MII_ANAR_PAUSE = 0x0400,
- MII_ANAR_100BT4 = 0x0200,
- MII_ANAR_100BX_FD = 0x0100,
- MII_ANAR_100BX_HD = 0x0080,
- MII_ANAR_10BT_FD = 0x0020,
- MII_ANAR_10BT_HD = 0x0010,
- MII_ANAR_SELECTOR = 0x001f,
- MII_IEEE8023_CSMACD = 0x0001,
-};
-
-/* ANLPAR */
-enum _mii_anlpar {
- MII_ANLPAR_NEXT_PAGE = MII_ANAR_NEXT_PAGE,
- MII_ANLPAR_REMOTE_FAULT = MII_ANAR_REMOTE_FAULT,
- MII_ANLPAR_ASYMMETRIC = MII_ANAR_ASYMMETRIC,
- MII_ANLPAR_PAUSE = MII_ANAR_PAUSE,
- MII_ANLPAR_100BT4 = MII_ANAR_100BT4,
- MII_ANLPAR_100BX_FD = MII_ANAR_100BX_FD,
- MII_ANLPAR_100BX_HD = MII_ANAR_100BX_HD,
- MII_ANLPAR_10BT_FD = MII_ANAR_10BT_FD,
- MII_ANLPAR_10BT_HD = MII_ANAR_10BT_HD,
- MII_ANLPAR_SELECTOR = MII_ANAR_SELECTOR,
-};
-
-/* Auto-Negotiation Expansion Register */
-enum _mii_aner {
- MII_ANER_PAR_DETECT_FAULT = 0x0010,
- MII_ANER_LP_NEXTPAGABLE = 0x0008,
- MII_ANER_NETXTPAGABLE = 0x0004,
- MII_ANER_PAGE_RECEIVED = 0x0002,
- MII_ANER_LP_NEGOTIABLE = 0x0001,
-};
-
-/* MASTER-SLAVE Control Register */
-enum _mii_mscr {
- MII_MSCR_TEST_MODE = 0xe000,
- MII_MSCR_CFG_ENABLE = 0x1000,
- MII_MSCR_CFG_VALUE = 0x0800,
- MII_MSCR_PORT_VALUE = 0x0400,
- MII_MSCR_1000BT_FD = 0x0200,
- MII_MSCR_1000BT_HD = 0X0100,
-};
-
-/* MASTER-SLAVE Status Register */
-enum _mii_mssr {
- MII_MSSR_CFG_FAULT = 0x8000,
- MII_MSSR_CFG_RES = 0x4000,
- MII_MSSR_LOCAL_RCV_STATUS = 0x2000,
- MII_MSSR_REMOTE_RCVR = 0x1000,
- MII_MSSR_LP_1000BT_FD = 0x0800,
- MII_MSSR_LP_1000BT_HD = 0x0400,
- MII_MSSR_IDLE_ERR_COUNT = 0x00ff,
-};
-
/* IEEE Extened Status Register */
enum _mii_esr {
MII_ESR_1000BX_FD = 0x8000,
diff --git a/drivers/net/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 4793df843c2..dcd7f7a71ad 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -464,7 +464,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = netdev_close,
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = change_mtu,
diff --git a/drivers/net/dnet.c b/drivers/net/ethernet/dnet.c
index c1063d1540c..c1063d1540c 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/ethernet/dnet.c
diff --git a/drivers/net/dnet.h b/drivers/net/ethernet/dnet.h
index 37f5b30fa78..37f5b30fa78 100644
--- a/drivers/net/dnet.h
+++ b/drivers/net/ethernet/dnet.h
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
new file mode 100644
index 00000000000..7a28a643394
--- /dev/null
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -0,0 +1,23 @@
+#
+# Emulex driver configuration
+#
+
+config NET_VENDOR_EMULEX
+ bool "Emulex devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Emulex cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_EMULEX
+
+source "drivers/net/ethernet/emulex/benet/Kconfig"
+
+endif # NET_VENDOR_EMULEX
diff --git a/drivers/net/ethernet/emulex/Makefile b/drivers/net/ethernet/emulex/Makefile
new file mode 100644
index 00000000000..ea8ec574d45
--- /dev/null
+++ b/drivers/net/ethernet/emulex/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Emulex device drivers.
+#
+
+obj-$(CONFIG_BE2NET) += benet/
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
new file mode 100644
index 00000000000..804db04a2bd
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -0,0 +1,6 @@
+config BE2NET
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
+ depends on PCI && INET
+ ---help---
+ This driver implements the NIC functionality for ServerEngines'
+ 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile
index a60cd805113..a60cd805113 100644
--- a/drivers/net/benet/Makefile
+++ b/drivers/net/ethernet/emulex/benet/Makefile
diff --git a/drivers/net/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c85768cd1b1..644e8fed836 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
#include "be_hw.h"
@@ -136,6 +137,11 @@ static inline void *queue_tail_node(struct be_queue_info *q)
return q->dma_mem.va + q->tail * q->entry_size;
}
+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
+{
+ return q->dma_mem.va + index * q->entry_size;
+}
+
static inline void queue_head_inc(struct be_queue_info *q)
{
index_inc(&q->head, q->len);
@@ -167,15 +173,15 @@ struct be_mcc_obj {
};
struct be_tx_stats {
- u32 be_tx_reqs; /* number of TX requests initiated */
- u32 be_tx_stops; /* number of times TX Q was stopped */
- u32 be_tx_wrbs; /* number of tx WRBs used */
- u32 be_tx_compl; /* number of tx completion entries processed */
- ulong be_tx_jiffies;
- u64 be_tx_bytes;
- u64 be_tx_bytes_prev;
- u64 be_tx_pkts;
- u32 be_tx_rate;
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_reqs;
+ u64 tx_wrbs;
+ u64 tx_compl;
+ ulong tx_jiffies;
+ u32 tx_stops;
+ struct u64_stats_sync sync;
+ struct u64_stats_sync sync_compl;
};
struct be_tx_obj {
@@ -195,22 +201,20 @@ struct be_rx_page_info {
};
struct be_rx_stats {
- u32 rx_post_fail;/* number of ethrx buffer alloc failures */
- u32 rx_polls; /* number of times NAPI called poll function */
- u32 rx_events; /* number of ucast rx completion events */
- u32 rx_compl; /* number of rx completion entries processed */
- ulong rx_dropped; /* number of skb allocation errors */
- ulong rx_jiffies;
u64 rx_bytes;
- u64 rx_bytes_prev;
u64 rx_pkts;
- u32 rx_rate;
+ u64 rx_pkts_prev;
+ ulong rx_jiffies;
+ u32 rx_drops_no_skbs; /* skb allocation errors */
+ u32 rx_drops_no_frags; /* HW has no fetched frags */
+ u32 rx_post_fail; /* page post alloc failures */
+ u32 rx_polls; /* NAPI calls */
+ u32 rx_events;
+ u32 rx_compl;
u32 rx_mcast_pkts;
- u32 rxcp_err; /* Num rx completion entries w/ err set. */
- ulong rx_fps_jiffies; /* jiffies at last FPS calc */
- u32 rx_frags;
- u32 prev_rx_frags;
- u32 rx_fps; /* Rx frags per second */
+ u32 rx_compl_err; /* completions with err set */
+ u32 rx_pps; /* pkts per second */
+ struct u64_stats_sync sync;
};
struct be_rx_compl_info {
@@ -218,7 +222,7 @@ struct be_rx_compl_info {
u16 vlan_tag;
u16 pkt_size;
u16 rxq_idx;
- u16 mac_id;
+ u16 port;
u8 vlanf;
u8 num_rcvd;
u8 err;
@@ -246,44 +250,41 @@ struct be_rx_obj {
};
struct be_drv_stats {
- u8 be_on_die_temperature;
- u64 be_tx_events;
- u64 eth_red_drops;
- u64 rx_drops_no_pbuf;
- u64 rx_drops_no_txpb;
- u64 rx_drops_no_erx_descr;
- u64 rx_drops_no_tpre_descr;
- u64 rx_drops_too_many_frags;
- u64 rx_drops_invalid_ring;
- u64 forwarded_packets;
- u64 rx_drops_mtu;
- u64 rx_crc_errors;
- u64 rx_alignment_symbol_errors;
- u64 rx_pause_frames;
- u64 rx_priority_pause_frames;
- u64 rx_control_frames;
- u64 rx_in_range_errors;
- u64 rx_out_range_errors;
- u64 rx_frame_too_long;
- u64 rx_address_match_errors;
- u64 rx_dropped_too_small;
- u64 rx_dropped_too_short;
- u64 rx_dropped_header_too_small;
- u64 rx_dropped_tcp_length;
- u64 rx_dropped_runt;
- u64 rx_ip_checksum_errs;
- u64 rx_tcp_checksum_errs;
- u64 rx_udp_checksum_errs;
- u64 rx_switched_unicast_packets;
- u64 rx_switched_multicast_packets;
- u64 rx_switched_broadcast_packets;
- u64 tx_pauseframes;
- u64 tx_priority_pauseframes;
- u64 tx_controlframes;
- u64 rxpp_fifo_overflow_drop;
- u64 rx_input_fifo_overflow_drop;
- u64 pmem_fifo_overflow_drop;
- u64 jabber_events;
+ u32 be_on_die_temperature;
+ u32 tx_events;
+ u32 eth_red_drops;
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_match_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
};
struct be_vf_cfg {
@@ -302,7 +303,6 @@ struct be_adapter {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
- u8 __iomem *pcicfg; /* PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
@@ -338,7 +338,7 @@ struct be_adapter {
u8 vlan_tag[VLAN_N_VID];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
- struct be_dma_mem mc_cmd_mem;
+ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
struct be_dma_mem stats_cmd;
/* Work queue used to perform periodic tasks like getting statistics */
@@ -352,7 +352,6 @@ struct be_adapter {
u32 beacon_state; /* for set_phys_id */
bool eeh_err;
- bool link_up;
u32 port_num;
bool promiscuous;
bool wol;
@@ -385,6 +384,8 @@ struct be_adapter {
#define BE_GEN2 2
#define BE_GEN3 3
+#define ON 1
+#define OFF 0
#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
(adapter->pdev->device == OC_DEVICE_ID4))
@@ -525,8 +526,7 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
-extern void netdev_stats_update(struct be_adapter *adapter);
+extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
extern void be_parse_stats(struct be_adapter *adapter);
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 054fa67bc4e..e0ff96193c4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,7 +19,12 @@
#include "be_cmds.h"
/* Must be a power of 2 or else MODULO will BUG_ON */
-static int be_get_temp_freq = 32;
+static int be_get_temp_freq = 64;
+
+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
+{
+ return wrb->payload.embedded_payload;
+}
static void be_mcc_notify(struct be_adapter *adapter)
{
@@ -82,31 +87,23 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
(compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
(compl->tag1 == CMD_SUBSYSTEM_ETH)) {
- if (adapter->generation == BE_GEN3) {
- if (lancer_chip(adapter)) {
- struct lancer_cmd_resp_pport_stats
- *resp = adapter->stats_cmd.va;
- be_dws_le_to_cpu(&resp->pport_stats,
- sizeof(resp->pport_stats));
- } else {
- struct be_cmd_resp_get_stats_v1 *resp =
- adapter->stats_cmd.va;
-
- be_dws_le_to_cpu(&resp->hw_stats,
- sizeof(resp->hw_stats));
- }
- } else {
- struct be_cmd_resp_get_stats_v0 *resp =
- adapter->stats_cmd.va;
-
- be_dws_le_to_cpu(&resp->hw_stats,
- sizeof(resp->hw_stats));
- }
be_parse_stats(adapter);
- netdev_stats_update(adapter);
adapter->stats_cmd_sent = false;
}
+ if (compl->tag0 ==
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
+ struct be_mcc_wrb *mcc_wrb =
+ queue_index_node(&adapter->mcc_obj.q,
+ compl->tag1);
+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+ embedded_payload(mcc_wrb);
+ adapter->drv_stats.be_on_die_temperature =
+ resp->on_die_temperature;
+ }
} else {
+ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+ be_get_temp_freq = 0;
+
if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
compl_status == MCC_STATUS_ILLEGAL_REQUEST)
goto done;
@@ -131,8 +128,7 @@ done:
static void be_async_link_state_process(struct be_adapter *adapter,
struct be_async_event_link_state *evt)
{
- be_link_status_update(adapter,
- evt->port_link_status == ASYNC_EVENT_LINK_UP);
+ be_link_status_update(adapter, evt->port_link_status);
}
/* Grp5 CoS Priority evt */
@@ -162,7 +158,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
struct be_async_event_grp5_pvid_state *evt)
{
if (evt->enabled)
- adapter->pvid = le16_to_cpu(evt->tag);
+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
else
adapter->pvid = 0;
}
@@ -420,16 +416,12 @@ int be_cmd_POST(struct be_adapter *adapter)
} else {
return 0;
}
- } while (timeout < 40);
+ } while (timeout < 60);
dev_err(dev, "POST timeout; stage=0x%x\n", stage);
return -1;
}
-static inline void *embedded_payload(struct be_mcc_wrb *wrb)
-{
- return wrb->payload.embedded_payload;
-}
static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
@@ -623,7 +615,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
return status;
}
-/* Uses mbox */
+/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle)
{
@@ -631,10 +623,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -651,13 +646,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req->permanent = 0;
}
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1119,20 +1115,22 @@ err:
}
/* Create an rx filtering policy configuration on an i/f
- * Uses mbox
+ * Uses MCCQ
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
- u32 domain)
+ u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -1144,23 +1142,25 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
- req->pmac_invalid = pmac_invalid;
- if (!pmac_invalid)
+ if (mac)
memcpy(req->mac_addr, mac, ETH_ALEN);
+ else
+ req->pmac_invalid = true;
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- if (!pmac_invalid)
+ if (mac)
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-/* Uses mbox */
+/* Uses MCCQ */
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
@@ -1170,10 +1170,16 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
if (adapter->eeh_err)
return -EIO;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ if (!interface_id)
+ return 0;
- wrb = wrb_from_mbox(adapter);
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -1185,10 +1191,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
-
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1282,8 +1287,8 @@ err:
}
/* Uses synchronous mcc */
-int be_cmd_link_status_query(struct be_adapter *adapter,
- bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
+int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1298,8 +1303,6 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
}
req = embedded_payload(wrb);
- *link_up = false;
-
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
@@ -1310,9 +1313,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- *link_up = true;
*link_speed = le16_to_cpu(resp->link_speed);
- *mac_speed = resp->mac_speed;
+ if (mac_speed)
+ *mac_speed = resp->mac_speed;
}
}
@@ -1326,10 +1329,13 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_cntl_addnl_attribs *req;
+ u16 mccq_index;
int status;
spin_lock_bh(&adapter->mcc_lock);
+ mccq_index = adapter->mcc_obj.q.head;
+
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
@@ -1343,16 +1349,9 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_cntl_addnl_attribs *resp =
- embedded_payload(wrb);
- adapter->drv_stats.be_on_die_temperature =
- resp->on_die_temperature;
- }
- /* If IOCTL fails once, do not bother issuing it again */
- else
- be_get_temp_freq = 0;
+ wrb->tag1 = mccq_index;
+
+ be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -1454,7 +1453,7 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
memcpy(buf + offset,
resp->data_buffer,
- resp->read_log_length);
+ le32_to_cpu(resp->read_log_length));
} else {
dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
goto err;
@@ -1469,32 +1468,37 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
}
-/* Uses Mbox */
-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
+/* Uses synchronous mcc */
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fw_version *req;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_GET_FW_VERSION);
-
+ OPCODE_COMMON_GET_FW_VERSION);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
+ strcpy(fw_ver, resp->firmware_version_string);
+ if (fw_on_flash)
+ strcpy(fw_on_flash, resp->fw_on_flash_version_string);
}
-
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1573,27 +1577,14 @@ err:
return status;
}
-/* Uses MCC for this command as it may be called in BH context
- * Uses synchronous mcc
- */
-int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_req_rx_filter *req;
- struct be_dma_mem promiscous_cmd;
+ struct be_dma_mem *mem = &adapter->rx_filter;
+ struct be_cmd_req_rx_filter *req = mem->va;
struct be_sge *sge;
int status;
- memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
- promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
- promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
- promiscous_cmd.size, &promiscous_cmd.dma);
- if (!promiscous_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
- return -ENOMEM;
- }
-
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1601,80 +1592,39 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
status = -EBUSY;
goto err;
}
-
- req = promiscous_cmd.va;
sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
- OPCODE_COMMON_NTWK_RX_FILTER);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
-
- req->if_id = cpu_to_le32(adapter->if_handle);
- req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
- if (en)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
-
- sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
- sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(promiscous_cmd.size);
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- pci_free_consistent(adapter->pdev, promiscous_cmd.size,
- promiscous_cmd.va, promiscous_cmd.dma);
- return status;
-}
-
-/*
- * Uses MCC for this command as it may be called in BH context
- * (mc == NULL) => multicast promiscuous
- */
-int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
- struct net_device *netdev, struct be_dma_mem *mem)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_mcast_mac_config *req = mem->va;
- struct be_sge *sge;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- sge = nonembedded_sgl(wrb);
- memset(req, 0, sizeof(*req));
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
- OPCODE_COMMON_NTWK_MULTICAST_SET);
sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(mem->size);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_NTWK_RX_FILTER);
+ memset(req, 0, sizeof(*req));
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
- req->interface_id = if_id;
- if (netdev) {
- int i;
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ if (flags & IFF_PROMISC) {
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ if (value == ON)
+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ } else if (flags & IFF_ALLMULTI) {
+ req->if_flags_mask = req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ } else {
struct netdev_hw_addr *ha;
+ int i = 0;
- req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
-
- i = 0;
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
- } else {
- req->promiscuous = 1;
+ req->if_flags_mask = req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_MULTICAST);
+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
+ netdev_for_each_mc_addr(ha, adapter->netdev)
+ memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2020,7 +1970,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(12000)))
+ msecs_to_jiffies(40000)))
status = -1;
else
status = adapter->flash_status;
@@ -2268,11 +2218,13 @@ err:
return status;
}
-int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
+int be_cmd_get_phy_info(struct be_adapter *adapter,
+ struct be_phy_info *phy_info)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_phy_info *req;
struct be_sge *sge;
+ struct be_dma_mem cmd;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2282,8 +2234,16 @@ int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
status = -EBUSY;
goto err;
}
+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
- req = cmd->va;
+ req = cmd.va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
@@ -2293,11 +2253,20 @@ int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
OPCODE_COMMON_GET_PHY_DETAILS,
sizeof(*req));
- sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
- sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(cmd->size);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd.size);
status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_phy_info *resp_phy_info =
+ cmd.va + sizeof(struct be_cmd_req_hdr);
+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
+ phy_info->interface_type =
+ le16_to_cpu(resp_phy_info->interface_type);
+ }
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 8e4d48824fe..a35cd03fac4 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -89,9 +89,10 @@ struct be_async_event_trailer {
};
enum {
- ASYNC_EVENT_LINK_DOWN = 0x0,
- ASYNC_EVENT_LINK_UP = 0x1
+ LINK_DOWN = 0x0,
+ LINK_UP = 0x1
};
+#define LINK_STATUS_MASK 0x1
/* When the event code of an async trailer is link-state, the mcc_compl
* must be interpreted as follows
@@ -693,8 +694,7 @@ struct be_cmd_resp_get_stats_v0 {
struct be_hw_stats_v0 hw_stats;
};
-#define make_64bit_val(hi_32, lo_32) (((u64)hi_32<<32) | lo_32)
-struct lancer_cmd_pport_stats {
+struct lancer_pport_stats {
u32 tx_packets_lo;
u32 tx_packets_hi;
u32 tx_unicast_packets_lo;
@@ -871,16 +871,16 @@ struct lancer_cmd_req_pport_stats {
struct be_cmd_req_hdr hdr;
union {
struct pport_stats_params params;
- u8 rsvd[sizeof(struct lancer_cmd_pport_stats)];
+ u8 rsvd[sizeof(struct lancer_pport_stats)];
} cmd_params;
};
struct lancer_cmd_resp_pport_stats {
struct be_cmd_resp_hdr hdr;
- struct lancer_cmd_pport_stats pport_stats;
+ struct lancer_pport_stats pport_stats;
};
-static inline struct lancer_cmd_pport_stats*
+static inline struct lancer_pport_stats*
pport_stats_from_cmd(struct be_adapter *adapter)
{
struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
@@ -910,21 +910,12 @@ struct be_cmd_req_vlan_config {
u16 normal_vlan[64];
} __packed;
-/******************** Multicast MAC Config *******************/
+/******************* RX FILTER ******************************/
#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
struct macaddr {
u8 byte[ETH_ALEN];
};
-struct be_cmd_req_mcast_mac_config {
- struct be_cmd_req_hdr hdr;
- u16 num_mac;
- u8 promiscuous;
- u8 interface_id;
- struct macaddr mac[BE_MAX_MC];
-} __packed;
-
-/******************* RX FILTER ******************************/
struct be_cmd_req_rx_filter {
struct be_cmd_req_hdr hdr;
u32 global_flags_mask;
@@ -932,11 +923,10 @@ struct be_cmd_req_rx_filter {
u32 if_flags_mask;
u32 if_flags;
u32 if_id;
- u32 multicast_num;
- struct macaddr mac[BE_MAX_MC];
+ u32 mcast_num;
+ struct macaddr mcast_mac[BE_MAX_MC];
};
-
/******************** Link Status Query *******************/
struct be_cmd_req_link_status {
struct be_cmd_req_hdr hdr;
@@ -1056,6 +1046,12 @@ struct be_cmd_resp_modify_eq_delay {
/******************** Get FW Config *******************/
#define BE_FUNCTION_CAPS_RSS 0x2
+/* The HW can come up in either of the following multi-channel modes
+ * based on the skew/IPL.
+ */
+#define FLEX10_MODE 0x400
+#define VNIC_MODE 0x20000
+#define UMC_ENABLED 0x1000000
struct be_cmd_req_query_fw_cfg {
struct be_cmd_req_hdr hdr;
u32 rsvd[31];
@@ -1254,14 +1250,19 @@ struct be_cmd_req_get_phy_info {
struct be_cmd_req_hdr hdr;
u8 rsvd0[24];
};
-struct be_cmd_resp_get_phy_info {
- struct be_cmd_req_hdr hdr;
+
+struct be_phy_info {
u16 phy_type;
u16 interface_type;
u32 misc_params;
u32 future_use[4];
};
+struct be_cmd_resp_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ struct be_phy_info phy_info;
+};
+
/*********************** Set QOS ***********************/
#define BE_QOS_BITS_NIC 1
@@ -1383,8 +1384,7 @@ struct be_cmd_resp_get_stats_v1 {
struct be_hw_stats_v1 hw_stats;
};
-static inline void *
-hw_stats_from_cmd(struct be_adapter *adapter)
+static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
{
if (adapter->generation == BE_GEN3) {
struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
@@ -1397,34 +1397,6 @@ hw_stats_from_cmd(struct be_adapter *adapter)
}
}
-static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
- struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
-
- return &rxf_stats->port[adapter->port_num];
- } else {
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
- struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
-
- return &rxf_stats->port[adapter->port_num];
- }
-}
-
-static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->rxf;
- } else {
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->rxf;
- }
-}
-
static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
{
if (adapter->generation == BE_GEN3) {
@@ -1438,19 +1410,6 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
}
}
-static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->pmem;
- } else {
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->pmem;
- }
-}
-
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1460,8 +1419,8 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
u32 pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id, u32 domain);
+ u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
+ u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1485,21 +1444,20 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
extern int be_cmd_link_status_query(struct be_adapter *adapter,
- bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom);
+ u8 *mac_speed, u16 *link_speed, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash);
extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
u16 *vtag_array, u32 num, bool untagged,
bool promiscuous);
-extern int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en);
-extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
- struct net_device *netdev, struct be_dma_mem *mem);
+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
extern int be_cmd_set_flow_control(struct be_adapter *adapter,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
@@ -1540,7 +1498,7 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
extern int be_cmd_get_phy_info(struct be_adapter *adapter,
- struct be_dma_mem *cmd);
+ struct be_phy_info *phy_info);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
extern void be_detect_dump_ue(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 7fd8130d86e..bf8153ea4ed 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -26,33 +26,18 @@ struct be_ethtool_stat {
int offset;
};
-enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
- DRVSTAT};
+enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
offsetof(_struct, field)
-#define NETSTAT_INFO(field) #field, NETSTAT,\
- FIELDINFO(struct net_device_stats,\
- field)
#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
FIELDINFO(struct be_tx_stats, field)
#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
FIELDINFO(struct be_rx_stats, field)
-#define ERXSTAT_INFO(field) #field, ERXSTAT,\
- FIELDINFO(struct be_erx_stats_v1, field)
#define DRVSTAT_INFO(field) #field, DRVSTAT,\
- FIELDINFO(struct be_drv_stats, \
- field)
+ FIELDINFO(struct be_drv_stats, field)
static const struct be_ethtool_stat et_stats[] = {
- {NETSTAT_INFO(rx_packets)},
- {NETSTAT_INFO(tx_packets)},
- {NETSTAT_INFO(rx_bytes)},
- {NETSTAT_INFO(tx_bytes)},
- {NETSTAT_INFO(rx_errors)},
- {NETSTAT_INFO(tx_errors)},
- {NETSTAT_INFO(rx_dropped)},
- {NETSTAT_INFO(tx_dropped)},
- {DRVSTAT_INFO(be_tx_events)},
+ {DRVSTAT_INFO(tx_events)},
{DRVSTAT_INFO(rx_crc_errors)},
{DRVSTAT_INFO(rx_alignment_symbol_errors)},
{DRVSTAT_INFO(rx_pause_frames)},
@@ -71,9 +56,6 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(rx_ip_checksum_errs)},
{DRVSTAT_INFO(rx_tcp_checksum_errs)},
{DRVSTAT_INFO(rx_udp_checksum_errs)},
- {DRVSTAT_INFO(rx_switched_unicast_packets)},
- {DRVSTAT_INFO(rx_switched_multicast_packets)},
- {DRVSTAT_INFO(rx_switched_broadcast_packets)},
{DRVSTAT_INFO(tx_pauseframes)},
{DRVSTAT_INFO(tx_controlframes)},
{DRVSTAT_INFO(rx_priority_pause_frames)},
@@ -92,28 +74,33 @@ static const struct be_ethtool_stat et_stats[] = {
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
-/* Stats related to multi RX queues */
+/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
+ * are first and second members respectively.
+ */
static const struct be_ethtool_stat et_rx_stats[] = {
- {DRVSTAT_RX_INFO(rx_bytes)},
- {DRVSTAT_RX_INFO(rx_pkts)},
- {DRVSTAT_RX_INFO(rx_rate)},
+ {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_polls)},
{DRVSTAT_RX_INFO(rx_events)},
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
{DRVSTAT_RX_INFO(rx_post_fail)},
- {DRVSTAT_RX_INFO(rx_dropped)},
- {ERXSTAT_INFO(rx_drops_no_fragments)}
+ {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
};
#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
-/* Stats related to multi TX queues */
+/* Stats related to multi TX queues: get_stats routine assumes compl is the
+ * first member
+ */
static const struct be_ethtool_stat et_tx_stats[] = {
- {DRVSTAT_TX_INFO(be_tx_rate)},
- {DRVSTAT_TX_INFO(be_tx_reqs)},
- {DRVSTAT_TX_INFO(be_tx_wrbs)},
- {DRVSTAT_TX_INFO(be_tx_stops)},
- {DRVSTAT_TX_INFO(be_tx_compl)}
+ {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
+ {DRVSTAT_TX_INFO(tx_bytes)},
+ {DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_reqs)},
+ {DRVSTAT_TX_INFO(tx_wrbs)},
+ {DRVSTAT_TX_INFO(tx_compl)},
+ {DRVSTAT_TX_INFO(tx_stops)}
};
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
@@ -131,14 +118,24 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
#define BE_NO_LOOPBACK 0xff
-static void
-be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+static void be_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ char fw_on_flash[FW_VER_LEN];
+
+ memset(fw_on_flash, 0 , sizeof(fw_on_flash));
+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
strcpy(drvinfo->driver, DRV_NAME);
strcpy(drvinfo->version, DRV_VER);
strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
+ if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
+ strcat(drvinfo->fw_version, " [");
+ strcat(drvinfo->fw_version, fw_on_flash);
+ strcat(drvinfo->fw_version, "]");
+ }
+
strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
@@ -260,50 +257,49 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- void *p = NULL;
- int i, j, base;
+ void *p;
+ unsigned int i, j, base = 0, start;
for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
- switch (et_stats[i].type) {
- case NETSTAT:
- p = &netdev->stats;
- break;
- case DRVSTAT:
- p = &adapter->drv_stats;
- break;
- }
-
- p = (u8 *)p + et_stats[i].offset;
- data[i] = (et_stats[i].size == sizeof(u64)) ?
- *(u64 *)p: *(u32 *)p;
+ p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
+ data[i] = *(u32 *)p;
}
+ base += ETHTOOL_STATS_NUM;
- base = ETHTOOL_STATS_NUM;
for_all_rx_queues(adapter, rxo, j) {
- for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
- switch (et_rx_stats[i].type) {
- case DRVSTAT_RX:
- p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
- break;
- case ERXSTAT:
- p = (u32 *)be_erx_stats_from_cmd(adapter) +
- rxo->q.id;
- break;
- }
- data[base + j * ETHTOOL_RXSTATS_NUM + i] =
- (et_rx_stats[i].size == sizeof(u64)) ?
- *(u64 *)p: *(u32 *)p;
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ data[base] = stats->rx_bytes;
+ data[base + 1] = stats->rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+
+ for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_rx_stats[i].offset;
+ data[base + i] = *(u32 *)p;
}
+ base += ETHTOOL_RXSTATS_NUM;
}
- base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
for_all_tx_queues(adapter, txo, j) {
- for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
- p = (u8 *)&txo->stats + et_tx_stats[i].offset;
- data[base + j * ETHTOOL_TXSTATS_NUM + i] =
- (et_tx_stats[i].size == sizeof(u64)) ?
- *(u64 *)p: *(u32 *)p;
- }
+ struct be_tx_stats *stats = tx_stats(txo);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync_compl);
+ data[base] = stats->tx_compl;
+ } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_tx_stats[i].offset;
+ data[base + i] =
+ (et_tx_stats[i].size == sizeof(u64)) ?
+ *(u64 *)p : *(u32 *)p;
+ }
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ base += ETHTOOL_TXSTATS_NUM;
}
}
@@ -363,19 +359,15 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_dma_mem phy_cmd;
- struct be_cmd_resp_get_phy_info *resp;
+ struct be_phy_info phy_info;
u8 mac_speed = 0;
u16 link_speed = 0;
- bool link_up = false;
int status;
- u16 intf_type;
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
- status = be_cmd_link_status_query(adapter, &link_up,
- &mac_speed, &link_speed, 0);
+ status = be_cmd_link_status_query(adapter, &mac_speed,
+ &link_speed, 0);
- be_link_status_update(adapter, link_up);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
ethtool_cmd_speed_set(ecmd, link_speed*10);
@@ -399,20 +391,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
}
- phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
- phy_cmd.size, &phy_cmd.dma,
- GFP_KERNEL);
- if (!phy_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
- return -ENOMEM;
- }
- status = be_cmd_get_phy_info(adapter, &phy_cmd);
+ status = be_cmd_get_phy_info(adapter, &phy_info);
if (!status) {
- resp = phy_cmd.va;
- intf_type = le16_to_cpu(resp->interface_type);
-
- switch (intf_type) {
+ switch (phy_info.interface_type) {
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
case PHY_TYPE_SFP_PLUS_10GB:
@@ -423,7 +404,7 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
break;
}
- switch (intf_type) {
+ switch (phy_info.interface_type) {
case PHY_TYPE_KR_10GB:
case PHY_TYPE_KX4_10GB:
ecmd->autoneg = AUTONEG_ENABLE;
@@ -441,8 +422,6 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
- dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
- phy_cmd.dma);
} else {
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->port = adapter->port_type;
@@ -631,7 +610,6 @@ static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
- bool link_up;
u8 mac_speed = 0;
u16 qos_link_speed = 0;
@@ -657,7 +635,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
test->flags |= ETH_TEST_FL_FAILED;
}
- if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+ if (be_cmd_link_status_query(adapter, &mac_speed,
&qos_link_speed, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 53d658afea2..fbc8a915519 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -175,18 +175,24 @@
#define IMG_TYPE_FCOE_FW_ACTIVE 10
#define IMG_TYPE_FCOE_FW_BACKUP 11
#define IMG_TYPE_NCSI_FW 13
+#define IMG_TYPE_PHY_FW 99
+#define TN_8022 13
+#define ILLEGAL_IOCTL_REQ 2
+#define FLASHROM_OPER_PHY_FLASH 9
+#define FLASHROM_OPER_PHY_SAVE 10
#define FLASHROM_OPER_FLASH 1
#define FLASHROM_OPER_SAVE 2
#define FLASHROM_OPER_REPORT 4
-#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
-#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
-#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144) /* Max NSCI image sz */
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
#define FLASH_NCSI_MAGIC (0x16032009)
#define FLASH_NCSI_DISABLED (0)
@@ -213,6 +219,7 @@
#define FLASH_PXE_BIOS_START_g3 (13107200)
#define FLASH_FCoE_BIOS_START_g3 (13631488)
#define FLASH_REDBOOT_START_g3 (262144)
+#define FLASH_PHY_FW_START_g3 1310720
/************* Rx Packet Type Encoding **************/
#define BE_UNICAST_PACKET 0
diff --git a/drivers/net/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c411bb1845f..d6a232a300a 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -114,6 +114,13 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter) {
+ return (adapter->function_mode & FLEX10_MODE ||
+ adapter->function_mode & VNIC_MODE ||
+ adapter->function_mode & UMC_ENABLED);
+}
+
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -141,13 +148,15 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
static void be_intr_set(struct be_adapter *adapter, bool enable)
{
- u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
- u32 reg = ioread32(addr);
- u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ u32 reg, enabled;
if (adapter->eeh_err)
return;
+ pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
+ &reg);
+ enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+
if (!enabled && enable)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
else if (enabled && !enable)
@@ -155,7 +164,8 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
else
return;
- iowrite32(reg, addr);
+ pci_write_config_dword(adapter->pdev,
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
@@ -245,14 +255,14 @@ netdev_addr:
static void populate_be2_stats(struct be_adapter *adapter)
{
-
- struct be_drv_stats *drvs = &adapter->drv_stats;
- struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
struct be_port_rxf_stats_v0 *port_stats =
- be_port_rxf_stats_from_cmd(adapter);
- struct be_rxf_stats_v0 *rxf_stats =
- be_rxf_stats_from_cmd(adapter);
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -267,12 +277,10 @@ static void populate_be2_stats(struct be_adapter *adapter)
drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
- drvs->rx_input_fifo_overflow_drop =
- port_stats->rx_input_fifo_overflow;
+ drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
drvs->rx_dropped_header_too_small =
port_stats->rx_dropped_header_too_small;
- drvs->rx_address_match_errors =
- port_stats->rx_address_match_errors;
+ drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
@@ -280,36 +288,30 @@ static void populate_be2_stats(struct be_adapter *adapter)
drvs->tx_controlframes = port_stats->tx_controlframes;
if (adapter->port_num)
- drvs->jabber_events =
- rxf_stats->port1_jabber_events;
+ drvs->jabber_events = rxf_stats->port1_jabber_events;
else
- drvs->jabber_events =
- rxf_stats->port0_jabber_events;
+ drvs->jabber_events = rxf_stats->port0_jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
- drvs->rx_drops_no_tpre_descr =
- rxf_stats->rx_drops_no_tpre_descr;
- drvs->rx_drops_too_many_frags =
- rxf_stats->rx_drops_too_many_frags;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
static void populate_be3_stats(struct be_adapter *adapter)
{
- struct be_drv_stats *drvs = &adapter->drv_stats;
- struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
-
- struct be_rxf_stats_v1 *rxf_stats =
- be_rxf_stats_from_cmd(adapter);
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
struct be_port_rxf_stats_v1 *port_stats =
- be_port_rxf_stats_from_cmd(adapter);
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
- drvs->rx_priority_pause_frames = 0;
- drvs->pmem_fifo_overflow_drop = 0;
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -327,12 +329,10 @@ static void populate_be3_stats(struct be_adapter *adapter)
port_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop =
port_stats->rx_input_fifo_overflow_drop;
- drvs->rx_address_match_errors =
- port_stats->rx_address_match_errors;
+ drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
- drvs->rxpp_fifo_overflow_drop =
- port_stats->rxpp_fifo_overflow_drop;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
drvs->tx_pauseframes = port_stats->tx_pauseframes;
drvs->tx_controlframes = port_stats->tx_controlframes;
drvs->jabber_events = port_stats->jabber_events;
@@ -342,10 +342,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
- drvs->rx_drops_no_tpre_descr =
- rxf_stats->rx_drops_no_tpre_descr;
- drvs->rx_drops_too_many_frags =
- rxf_stats->rx_drops_too_many_frags;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
@@ -353,22 +351,15 @@ static void populate_lancer_stats(struct be_adapter *adapter)
{
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
- (adapter);
- drvs->rx_priority_pause_frames = 0;
- drvs->pmem_fifo_overflow_drop = 0;
- drvs->rx_pause_frames =
- make_64bit_val(pport_stats->rx_pause_frames_hi,
- pport_stats->rx_pause_frames_lo);
- drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
- pport_stats->rx_crc_errors_lo);
- drvs->rx_control_frames =
- make_64bit_val(pport_stats->rx_control_frames_hi,
- pport_stats->rx_control_frames_lo);
+ struct lancer_pport_stats *pport_stats =
+ pport_stats_from_cmd(adapter);
+
+ be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
+ drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
+ drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
+ drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
- drvs->rx_frame_too_long =
- make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
- pport_stats->rx_frames_too_long_lo);
+ drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
@@ -382,32 +373,36 @@ static void populate_lancer_stats(struct be_adapter *adapter)
pport_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
- drvs->rx_alignment_symbol_errors =
- make_64bit_val(pport_stats->rx_symbol_errors_hi,
- pport_stats->rx_symbol_errors_lo);
+ drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
- pport_stats->tx_pause_frames_lo);
- drvs->tx_controlframes =
- make_64bit_val(pport_stats->tx_control_frames_hi,
- pport_stats->tx_control_frames_lo);
+ drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
+ drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
drvs->jabber_events = pport_stats->rx_jabbers;
- drvs->rx_drops_no_pbuf = 0;
- drvs->rx_drops_no_txpb = 0;
- drvs->rx_drops_no_erx_descr = 0;
drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
- drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
- pport_stats->num_forwards_lo);
- drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
- pport_stats->rx_drops_mtu_lo);
- drvs->rx_drops_no_tpre_descr = 0;
+ drvs->forwarded_packets = pport_stats->num_forwards_lo;
+ drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
drvs->rx_drops_too_many_frags =
- make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
- pport_stats->rx_drops_too_many_frags_lo);
+ pport_stats->rx_drops_too_many_frags_lo;
+}
+
+static void accumulate_16bit_val(u32 *acc, u16 val)
+{
+#define lo(x) (x & 0xFFFF)
+#define hi(x) (x & 0xFFFF0000)
+ bool wrapped = val < lo(*acc);
+ u32 newacc = hi(*acc) + val;
+
+ if (wrapped)
+ newacc += 65536;
+ ACCESS_ONCE(*acc) = newacc;
}
void be_parse_stats(struct be_adapter *adapter)
{
+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_rx_obj *rxo;
+ int i;
+
if (adapter->generation == BE_GEN3) {
if (lancer_chip(adapter))
populate_lancer_stats(adapter);
@@ -416,50 +411,55 @@ void be_parse_stats(struct be_adapter *adapter)
} else {
populate_be2_stats(adapter);
}
+
+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
+ for_all_rx_queues(adapter, rxo, i) {
+ /* below erx HW counter can actually wrap around after
+ * 65535. Driver accumulates a 32-bit value
+ */
+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+ (u16)erx->rx_drops_no_fragments[rxo->q.id]);
+ }
}
-void netdev_stats_update(struct be_adapter *adapter)
+static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct net_device_stats *dev_stats = &adapter->netdev->stats;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
+ u64 pkts, bytes;
+ unsigned int start;
int i;
for_all_rx_queues(adapter, rxo, i) {
- pkts += rx_stats(rxo)->rx_pkts;
- bytes += rx_stats(rxo)->rx_bytes;
- mcast += rx_stats(rxo)->rx_mcast_pkts;
- drops += rx_stats(rxo)->rx_dropped;
- /* no space in linux buffers: best possible approximation */
- if (adapter->generation == BE_GEN3) {
- if (!(lancer_chip(adapter))) {
- struct be_erx_stats_v1 *erx =
- be_erx_stats_from_cmd(adapter);
- drops += erx->rx_drops_no_fragments[rxo->q.id];
- }
- } else {
- struct be_erx_stats_v0 *erx =
- be_erx_stats_from_cmd(adapter);
- drops += erx->rx_drops_no_fragments[rxo->q.id];
- }
+ const struct be_rx_stats *rx_stats = rx_stats(rxo);
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_stats->sync);
+ pkts = rx_stats(rxo)->rx_pkts;
+ bytes = rx_stats(rxo)->rx_bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
+ stats->rx_packets += pkts;
+ stats->rx_bytes += bytes;
+ stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
+ stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
+ rx_stats(rxo)->rx_drops_no_frags;
}
- dev_stats->rx_packets = pkts;
- dev_stats->rx_bytes = bytes;
- dev_stats->multicast = mcast;
- dev_stats->rx_dropped = drops;
- pkts = bytes = 0;
for_all_tx_queues(adapter, txo, i) {
- pkts += tx_stats(txo)->be_tx_pkts;
- bytes += tx_stats(txo)->be_tx_bytes;
+ const struct be_tx_stats *tx_stats = tx_stats(txo);
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_stats->sync);
+ pkts = tx_stats(txo)->tx_pkts;
+ bytes = tx_stats(txo)->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
+ stats->tx_packets += pkts;
+ stats->tx_bytes += bytes;
}
- dev_stats->tx_packets = pkts;
- dev_stats->tx_bytes = bytes;
/* bad pkts received */
- dev_stats->rx_errors = drvs->rx_crc_errors +
+ stats->rx_errors = drvs->rx_crc_errors +
drvs->rx_alignment_symbol_errors +
drvs->rx_in_range_errors +
drvs->rx_out_range_errors +
@@ -468,115 +468,38 @@ void netdev_stats_update(struct be_adapter *adapter)
drvs->rx_dropped_too_short +
drvs->rx_dropped_header_too_small +
drvs->rx_dropped_tcp_length +
- drvs->rx_dropped_runt +
- drvs->rx_tcp_checksum_errs +
- drvs->rx_ip_checksum_errs +
- drvs->rx_udp_checksum_errs;
+ drvs->rx_dropped_runt;
/* detailed rx errors */
- dev_stats->rx_length_errors = drvs->rx_in_range_errors +
+ stats->rx_length_errors = drvs->rx_in_range_errors +
drvs->rx_out_range_errors +
drvs->rx_frame_too_long;
- dev_stats->rx_crc_errors = drvs->rx_crc_errors;
+ stats->rx_crc_errors = drvs->rx_crc_errors;
/* frame alignment errors */
- dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
+ stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
/* receiver fifo overrun */
/* drops_no_pbuf is no per i/f, it's per BE card */
- dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
+ stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
drvs->rx_input_fifo_overflow_drop +
drvs->rx_drops_no_pbuf;
+ return stats;
}
-void be_link_status_update(struct be_adapter *adapter, bool link_up)
+void be_link_status_update(struct be_adapter *adapter, u32 link_status)
{
struct net_device *netdev = adapter->netdev;
- /* If link came up or went down */
- if (adapter->link_up != link_up) {
- adapter->link_speed = -1;
- if (link_up) {
- netif_carrier_on(netdev);
- printk(KERN_INFO "%s: Link up\n", netdev->name);
- } else {
- netif_carrier_off(netdev);
- printk(KERN_INFO "%s: Link down\n", netdev->name);
- }
- adapter->link_up = link_up;
- }
-}
-
-/* Update the EQ delay n BE based on the RX frags consumed / sec */
-static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
-{
- struct be_eq_obj *rx_eq = &rxo->rx_eq;
- struct be_rx_stats *stats = &rxo->stats;
- ulong now = jiffies;
- u32 eqd;
-
- if (!rx_eq->enable_aic)
- return;
-
- /* Wrapped around */
- if (time_before(now, stats->rx_fps_jiffies)) {
- stats->rx_fps_jiffies = now;
- return;
- }
-
- /* Update once a second */
- if ((now - stats->rx_fps_jiffies) < HZ)
- return;
-
- stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
- ((now - stats->rx_fps_jiffies) / HZ);
-
- stats->rx_fps_jiffies = now;
- stats->prev_rx_frags = stats->rx_frags;
- eqd = stats->rx_fps / 110000;
- eqd = eqd << 3;
- if (eqd > rx_eq->max_eqd)
- eqd = rx_eq->max_eqd;
- if (eqd < rx_eq->min_eqd)
- eqd = rx_eq->min_eqd;
- if (eqd < 10)
- eqd = 0;
- if (eqd != rx_eq->cur_eqd)
- be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
-
- rx_eq->cur_eqd = eqd;
-}
-
-static u32 be_calc_rate(u64 bytes, unsigned long ticks)
-{
- u64 rate = bytes;
-
- do_div(rate, ticks / HZ);
- rate <<= 3; /* bytes/sec -> bits/sec */
- do_div(rate, 1000000ul); /* MB/Sec */
-
- return rate;
-}
-
-static void be_tx_rate_update(struct be_tx_obj *txo)
-{
- struct be_tx_stats *stats = tx_stats(txo);
- ulong now = jiffies;
-
- /* Wrapped around? */
- if (time_before(now, stats->be_tx_jiffies)) {
- stats->be_tx_jiffies = now;
- return;
- }
-
- /* Update tx rate once in two seconds */
- if ((now - stats->be_tx_jiffies) > 2 * HZ) {
- stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
- - stats->be_tx_bytes_prev,
- now - stats->be_tx_jiffies);
- stats->be_tx_jiffies = now;
- stats->be_tx_bytes_prev = stats->be_tx_bytes;
+ /* when link status changes, link speed must be re-queried from card */
+ adapter->link_speed = -1;
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
+ netif_carrier_on(netdev);
+ dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
+ } else {
+ netif_carrier_off(netdev);
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
}
}
@@ -585,12 +508,14 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
{
struct be_tx_stats *stats = tx_stats(txo);
- stats->be_tx_reqs++;
- stats->be_tx_wrbs += wrb_cnt;
- stats->be_tx_bytes += copied;
- stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
+ u64_stats_update_begin(&stats->sync);
+ stats->tx_reqs++;
+ stats->tx_wrbs += wrb_cnt;
+ stats->tx_bytes += copied;
+ stats->tx_pkts += (gso_segs ? gso_segs : 1);
if (stopped)
- stats->be_tx_stops++;
+ stats->tx_stops++;
+ u64_stats_update_end(&stats->sync);
}
/* Determine number of WRB entries needed to xmit data in an skb */
@@ -718,17 +643,17 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag =
+ const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
- busaddr = dma_map_page(dev, frag->page, frag->page_offset,
- frag->size, DMA_TO_DEVICE);
+ busaddr = skb_frag_dma_map(dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, frag->size);
+ wrb_fill(wrb, busaddr, skb_frag_size(frag));
be_dws_cpu_to_le(wrb, sizeof(*wrb));
queue_head_inc(txq);
- copied += frag->size;
+ copied += skb_frag_size(frag);
}
if (dummy_wrb) {
@@ -829,6 +754,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
}
+ /* No need to further configure vids if in promiscuous mode */
+ if (adapter->promiscuous)
+ return 0;
+
if (adapter->vlans_added <= adapter->max_vlans) {
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_N_VID; i++) {
@@ -874,12 +803,12 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
be_vid_config(adapter, false, 0);
}
-static void be_set_multicast_list(struct net_device *netdev)
+static void be_set_rx_mode(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (netdev->flags & IFF_PROMISC) {
- be_cmd_promiscuous_config(adapter, true);
+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
adapter->promiscuous = true;
goto done;
}
@@ -887,19 +816,20 @@ static void be_set_multicast_list(struct net_device *netdev)
/* BE was previously in promiscuous mode; disable it */
if (adapter->promiscuous) {
adapter->promiscuous = false;
- be_cmd_promiscuous_config(adapter, false);
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter, false, 0);
}
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > BE_MAX_MC) {
- be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
- &adapter->mc_cmd_mem);
+ netdev_mc_count(netdev) > BE_MAX_MC) {
+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
goto done;
}
- be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
- &adapter->mc_cmd_mem);
+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
done:
return;
}
@@ -1005,10 +935,17 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_rx_rate_update(struct be_rx_obj *rxo)
+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
{
- struct be_rx_stats *stats = &rxo->stats;
+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
+ struct be_rx_stats *stats = rx_stats(rxo);
ulong now = jiffies;
+ ulong delta = now - stats->rx_jiffies;
+ u64 pkts;
+ unsigned int start, eqd;
+
+ if (!rx_eq->enable_aic)
+ return;
/* Wrapped around */
if (time_before(now, stats->rx_jiffies)) {
@@ -1016,29 +953,46 @@ static void be_rx_rate_update(struct be_rx_obj *rxo)
return;
}
- /* Update the rate once in two seconds */
- if ((now - stats->rx_jiffies) < 2 * HZ)
+ /* Update once a second */
+ if (delta < HZ)
return;
- stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
- now - stats->rx_jiffies);
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ pkts = stats->rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+
+ stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
+ stats->rx_pkts_prev = pkts;
stats->rx_jiffies = now;
- stats->rx_bytes_prev = stats->rx_bytes;
+ eqd = stats->rx_pps / 110000;
+ eqd = eqd << 3;
+ if (eqd > rx_eq->max_eqd)
+ eqd = rx_eq->max_eqd;
+ if (eqd < rx_eq->min_eqd)
+ eqd = rx_eq->min_eqd;
+ if (eqd < 10)
+ eqd = 0;
+ if (eqd != rx_eq->cur_eqd) {
+ be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
+ rx_eq->cur_eqd = eqd;
+ }
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
struct be_rx_compl_info *rxcp)
{
- struct be_rx_stats *stats = &rxo->stats;
+ struct be_rx_stats *stats = rx_stats(rxo);
+ u64_stats_update_begin(&stats->sync);
stats->rx_compl++;
- stats->rx_frags += rxcp->num_rcvd;
stats->rx_bytes += rxcp->pkt_size;
stats->rx_pkts++;
if (rxcp->pkt_type == BE_MULTICAST_PACKET)
stats->rx_mcast_pkts++;
if (rxcp->err)
- stats->rxcp_err++;
+ stats->rx_compl_err++;
+ u64_stats_update_end(&stats->sync);
}
static inline bool csum_passed(struct be_rx_compl_info *rxcp)
@@ -1119,11 +1073,12 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
skb->tail += curr_frag_len;
} else {
skb_shinfo(skb)->nr_frags = 1;
- skb_shinfo(skb)->frags[0].page = page_info->page;
+ skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
+ skb->truesize += rx_frag_size;
skb->tail += hdr_len;
}
page_info->page = NULL;
@@ -1144,19 +1099,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
if (page_info->page_offset == 0) {
/* Fresh page */
j++;
- skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
put_page(page_info->page);
}
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
-
+ skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
index_inc(&rxcp->rxq_idx, rxq->len);
page_info->page = NULL;
@@ -1174,7 +1129,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
if (unlikely(!skb)) {
- rxo->stats.rx_dropped++;
+ rx_stats(rxo)->rx_drops_no_skbs++;
be_rx_compl_discard(adapter, rxo, rxcp);
return;
}
@@ -1186,13 +1141,12 @@ static void be_rx_compl_process(struct be_adapter *adapter,
else
skb_checksum_none_assert(skb);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
if (adapter->netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
- if (unlikely(rxcp->vlanf))
+ if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
netif_receive_skb(skb);
@@ -1226,15 +1180,15 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
if (i == 0 || page_info->page_offset == 0) {
/* First frag or Fresh page */
j++;
- skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
}
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
-
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
+ skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
index_inc(&rxcp->rxq_idx, rxq->len);
memset(page_info, 0, sizeof(*page_info));
@@ -1244,12 +1198,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
skb_shinfo(skb)->nr_frags = j + 1;
skb->len = rxcp->pkt_size;
skb->data_len = rxcp->pkt_size;
- skb->truesize += rxcp->pkt_size;
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (adapter->netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
- if (unlikely(rxcp->vlanf))
+ if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
napi_gro_frags(&eq_obj->napi);
@@ -1285,6 +1238,7 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
compl);
}
+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
}
static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1317,6 +1271,7 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
compl);
}
+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
}
static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1341,14 +1296,13 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
if (rxcp->vlanf) {
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
- if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
rxcp->vlanf = 0;
if (!lancer_chip(adapter))
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
- if (((adapter->pvid & VLAN_VID_MASK) ==
- (rxcp->vlan_tag & VLAN_VID_MASK)) &&
+ if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
!adapter->vlan_tag[rxcp->vlan_tag])
rxcp->vlanf = 0;
}
@@ -1389,7 +1343,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if (!pagep) {
pagep = be_alloc_pages(adapter->big_page_size, gfp);
if (unlikely(!pagep)) {
- rxo->stats.rx_post_fail++;
+ rx_stats(rxo)->rx_post_fail++;
break;
}
page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
@@ -1686,6 +1640,17 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_queue_free(adapter, q);
}
+static int be_num_txqs_want(struct be_adapter *adapter)
+{
+ if ((num_vfs && adapter->sriov_enabled) ||
+ be_is_mc(adapter) ||
+ lancer_chip(adapter) || !be_physfn(adapter) ||
+ adapter->generation == BE_GEN2)
+ return 1;
+ else
+ return MAX_TX_QS;
+}
+
/* One TX event queue is shared by all TX compl qs */
static int be_tx_queues_create(struct be_adapter *adapter)
{
@@ -1693,6 +1658,11 @@ static int be_tx_queues_create(struct be_adapter *adapter)
struct be_tx_obj *txo;
u8 i;
+ adapter->num_tx_qs = be_num_txqs_want(adapter);
+ if (adapter->num_tx_qs != MAX_TX_QS)
+ netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_qs);
+
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
adapter->tx_eq.cur_eqd = 96;
@@ -1755,7 +1725,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
+ !adapter->sriov_enabled && be_physfn(adapter) &&
+ !be_is_mc(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
@@ -1899,27 +1870,41 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
struct be_rx_compl_info *rxcp;
u32 work_done;
- rxo->stats.rx_polls++;
+ rx_stats(rxo)->rx_polls++;
for (work_done = 0; work_done < budget; work_done++) {
rxcp = be_rx_compl_get(rxo);
if (!rxcp)
break;
- /* Ignore flush completions */
- if (rxcp->num_rcvd && rxcp->pkt_size) {
- if (do_gro(rxcp))
- be_rx_compl_process_gro(adapter, rxo, rxcp);
- else
- be_rx_compl_process(adapter, rxo, rxcp);
- } else if (rxcp->pkt_size == 0) {
+ /* Is it a flush compl that has no data */
+ if (unlikely(rxcp->num_rcvd == 0))
+ goto loop_continue;
+
+ /* Discard compl with partial DMA Lancer B0 */
+ if (unlikely(!rxcp->pkt_size)) {
be_rx_compl_discard(adapter, rxo, rxcp);
+ goto loop_continue;
}
+ /* On BE drop pkts that arrive due to imperfect filtering in
+ * promiscuous mode on some skews
+ */
+ if (unlikely(rxcp->port != adapter->port_num &&
+ !lancer_chip(adapter))) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ goto loop_continue;
+ }
+
+ if (do_gro(rxcp))
+ be_rx_compl_process_gro(adapter, rxo, rxcp);
+ else
+ be_rx_compl_process(adapter, rxo, rxcp);
+loop_continue:
be_rx_stats_update(rxo, rxcp);
}
/* Refill the queue */
- if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
be_post_rx_frags(rxo, GFP_ATOMIC);
/* All consumed */
@@ -1968,8 +1953,9 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
netif_wake_subqueue(adapter->netdev, i);
}
- adapter->drv_stats.be_tx_events++;
- txo->stats.be_tx_compl += tx_compl;
+ u64_stats_update_begin(&tx_stats(txo)->sync_compl);
+ tx_stats(txo)->tx_compl += tx_compl;
+ u64_stats_update_end(&tx_stats(txo)->sync_compl);
}
}
@@ -1983,6 +1969,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
napi_complete(napi);
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
+ adapter->drv_stats.tx_events++;
return 1;
}
@@ -2031,7 +2018,6 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
- struct be_tx_obj *txo;
int i;
if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2060,11 +2046,7 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- for_all_tx_queues(adapter, txo, i)
- be_tx_rate_update(txo);
-
for_all_rx_queues(adapter, rxo, i) {
- be_rx_rate_update(rxo);
be_rx_eqd_update(adapter, rxo);
if (rxo->rx_post_starved) {
@@ -2111,7 +2093,7 @@ done:
return;
}
-static void be_sriov_enable(struct be_adapter *adapter)
+static int be_sriov_enable(struct be_adapter *adapter)
{
be_check_sriov_fn_type(adapter);
#ifdef CONFIG_PCI_IOV
@@ -2133,8 +2115,17 @@ static void be_sriov_enable(struct be_adapter *adapter)
status = pci_enable_sriov(adapter->pdev, num_vfs);
adapter->sriov_enabled = status ? false : true;
+
+ if (adapter->sriov_enabled) {
+ adapter->vf_cfg = kcalloc(num_vfs,
+ sizeof(struct be_vf_cfg),
+ GFP_KERNEL);
+ if (!adapter->vf_cfg)
+ return -ENOMEM;
+ }
}
#endif
+ return 0;
}
static void be_sriov_disable(struct be_adapter *adapter)
@@ -2142,6 +2133,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
#ifdef CONFIG_PCI_IOV
if (adapter->sriov_enabled) {
pci_disable_sriov(adapter->pdev);
+ kfree(adapter->vf_cfg);
adapter->sriov_enabled = false;
}
#endif
@@ -2294,9 +2286,6 @@ static int be_close(struct net_device *netdev)
be_async_mcc_disable(adapter);
- netif_carrier_off(netdev);
- adapter->link_up = false;
-
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
@@ -2374,10 +2363,7 @@ static int be_open(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *tx_eq = &adapter->tx_eq;
struct be_rx_obj *rxo;
- bool link_up;
int status, i;
- u8 mac_speed;
- u16 link_speed;
status = be_rx_queues_setup(adapter);
if (status)
@@ -2400,23 +2386,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
- status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
- &link_speed, 0);
- if (status)
- goto err;
- be_link_status_update(adapter, link_up);
-
- if (be_physfn(adapter)) {
- status = be_vid_config(adapter, false, 0);
- if (status)
- goto err;
-
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto err;
- }
-
return 0;
err:
be_close(adapter->netdev);
@@ -2470,7 +2439,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
*/
static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
{
- u32 vf = 0;
+ u32 vf;
int status = 0;
u8 mac[ETH_ALEN];
@@ -2492,7 +2461,7 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
return status;
}
-static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
+static void be_vf_clear(struct be_adapter *adapter)
{
u32 vf;
@@ -2502,132 +2471,159 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
adapter->vf_cfg[vf].vf_if_handle,
adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
}
+
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle, vf + 1);
}
-static int be_setup(struct be_adapter *adapter)
+static int be_clear(struct be_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags, vf = 0;
- int status;
- u8 mac[ETH_ALEN];
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ be_vf_clear(adapter);
- be_cmd_req_native_mode(adapter);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ be_mcc_queues_destroy(adapter);
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
- if (be_physfn(adapter)) {
- cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
-
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
- cap_flags |= BE_IF_FLAGS_RSS;
- en_flags |= BE_IF_FLAGS_RSS;
- }
- }
+ adapter->be3_native = false;
+ adapter->promiscuous = false;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id, 0);
- if (status != 0)
- goto do_none;
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+ return 0;
+}
- if (be_physfn(adapter)) {
- if (adapter->sriov_enabled) {
- while (vf < num_vfs) {
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST;
- status = be_cmd_if_create(adapter, cap_flags,
- en_flags, mac, true,
+static int be_vf_setup(struct be_adapter *adapter)
+{
+ u32 cap_flags, en_flags, vf;
+ u16 lnk_speed;
+ int status;
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
&adapter->vf_cfg[vf].vf_if_handle,
NULL, vf+1);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Interface Create failed for VF %d\n",
- vf);
- goto if_destroy;
- }
- adapter->vf_cfg[vf].vf_pmac_id =
- BE_INVALID_PMAC_ID;
- vf++;
- }
- }
- } else {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ if (status)
+ goto err;
+ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
}
+ if (!lancer_chip(adapter)) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
+ vf + 1);
+ if (status)
+ goto err;
+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ }
+ return 0;
+err:
+ return status;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 cap_flags, en_flags;
+ u32 tx_fc, rx_fc;
+ int status;
+ u8 mac[ETH_ALEN];
+
+ /* Allow all priorities by default. A GRP5 evt may modify this */
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->link_speed = -1;
+
+ be_cmd_req_native_mode(adapter);
+
status = be_tx_queues_create(adapter);
if (status != 0)
- goto if_destroy;
+ goto err;
status = be_rx_queues_create(adapter);
if (status != 0)
- goto tx_qs_destroy;
-
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
+ goto err;
status = be_mcc_queues_create(adapter);
if (status != 0)
- goto rx_qs_destroy;
+ goto err;
- adapter->link_speed = -1;
+ memset(mac, 0, ETH_ALEN);
+ status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
+ true /*permanent */, 0);
+ if (status)
+ return status;
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- return 0;
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
+ cap_flags |= BE_IF_FLAGS_RSS;
+ en_flags |= BE_IF_FLAGS_RSS;
+ }
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ netdev->dev_addr, &adapter->if_handle,
+ &adapter->pmac_id, 0);
+ if (status != 0)
+ goto err;
-rx_qs_destroy:
- be_rx_queues_destroy(adapter);
-tx_qs_destroy:
- be_tx_queues_destroy(adapter);
-if_destroy:
- if (be_physfn(adapter) && adapter->sriov_enabled)
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- vf + 1);
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
-do_none:
- return status;
-}
+ /* For BEx, the VF's permanent mac queried from card is incorrect.
+ * Query the mac configued by the PF using if_handle
+ */
+ if (!be_physfn(adapter) && !lancer_chip(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
-static int be_clear(struct be_adapter *adapter)
-{
- int vf;
+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- if (be_physfn(adapter) && adapter->sriov_enabled)
- be_vf_eth_addr_rem(adapter);
+ status = be_vid_config(adapter, false, 0);
+ if (status)
+ goto err;
- be_mcc_queues_destroy(adapter);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
+ be_set_rx_mode(adapter->netdev);
- if (be_physfn(adapter) && adapter->sriov_enabled)
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- vf + 1);
+ status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
+ if (status)
+ goto err;
+ if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
+ status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
+ adapter->rx_fc);
+ if (status)
+ goto err;
+ }
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ pcie_set_readrq(adapter->pdev, 4096);
- adapter->be3_native = 0;
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ status = be_vf_setup(adapter);
+ if (status)
+ goto err;
+ }
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
return 0;
+err:
+ be_clear(adapter);
+ return status;
}
-
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -2656,6 +2652,21 @@ static bool be_flash_redboot(struct be_adapter *adapter,
return true;
}
+static bool phy_flashing_required(struct be_adapter *adapter)
+{
+ int status = 0;
+ struct be_phy_info phy_info;
+
+ status = be_cmd_get_phy_info(adapter, &phy_info);
+ if (status)
+ return false;
+ if ((phy_info.phy_type == TN_8022) &&
+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
+ return true;
+ }
+ return false;
+}
+
static int be_flash_data(struct be_adapter *adapter,
const struct firmware *fw,
struct be_dma_mem *flash_cmd, int num_of_images)
@@ -2669,7 +2680,7 @@ static int be_flash_data(struct be_adapter *adapter,
const struct flash_comp *pflashcomp;
int num_comp;
- static const struct flash_comp gen3_flash_types[9] = {
+ static const struct flash_comp gen3_flash_types[10] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
@@ -2687,7 +2698,9 @@ static int be_flash_data(struct be_adapter *adapter,
{ FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
- FLASH_NCSI_IMAGE_MAX_SIZE_g3}
+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
};
static const struct flash_comp gen2_flash_types[8] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
@@ -2721,6 +2734,10 @@ static int be_flash_data(struct be_adapter *adapter,
if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
continue;
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
+ if (!phy_flashing_required(adapter))
+ continue;
+ }
if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
(!be_flash_redboot(adapter, fw->data,
pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
@@ -2729,25 +2746,35 @@ static int be_flash_data(struct be_adapter *adapter,
p = fw->data;
p += filehdr_size + pflashcomp[i].offset
+ (num_of_images * sizeof(struct image_hdr));
- if (p + pflashcomp[i].size > fw->data + fw->size)
- return -1;
- total_bytes = pflashcomp[i].size;
+ if (p + pflashcomp[i].size > fw->data + fw->size)
+ return -1;
+ total_bytes = pflashcomp[i].size;
while (total_bytes) {
if (total_bytes > 32*1024)
num_bytes = 32*1024;
else
num_bytes = total_bytes;
total_bytes -= num_bytes;
-
- if (!total_bytes)
- flash_op = FLASHROM_OPER_FLASH;
- else
- flash_op = FLASHROM_OPER_SAVE;
+ if (!total_bytes) {
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
memcpy(req->params.data_buf, p, num_bytes);
p += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd,
pflashcomp[i].optype, flash_op, num_bytes);
if (status) {
+ if ((status == ILLEGAL_IOCTL_REQ) &&
+ (pflashcomp[i].optype ==
+ IMG_TYPE_PHY_FW))
+ break;
dev_err(&adapter->pdev->dev,
"cmd to write to flash rom failed.\n");
return -1;
@@ -2935,9 +2962,10 @@ static struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
- .ndo_set_rx_mode = be_set_multicast_list,
+ .ndo_set_rx_mode = be_set_rx_mode,
.ndo_set_mac_address = be_mac_addr_set,
.ndo_change_mtu = be_change_mtu,
+ .ndo_get_stats64 = be_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
@@ -2967,10 +2995,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- /* Default settings for Rx and Tx flow control */
- adapter->rx_fc = true;
- adapter->tx_fc = true;
-
netif_set_gso_max_size(netdev, 65535);
BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
@@ -2991,14 +3015,12 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
iounmap(adapter->csr);
if (adapter->db)
iounmap(adapter->db);
- if (adapter->pcicfg && be_physfn(adapter))
- iounmap(adapter->pcicfg);
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int pcicfg_reg, db_reg;
+ int db_reg;
if (lancer_chip(adapter)) {
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
@@ -3018,10 +3040,8 @@ static int be_map_pci_bars(struct be_adapter *adapter)
}
if (adapter->generation == BE_GEN2) {
- pcicfg_reg = 1;
db_reg = 4;
} else {
- pcicfg_reg = 0;
if (be_physfn(adapter))
db_reg = 4;
else
@@ -3033,16 +3053,6 @@ static int be_map_pci_bars(struct be_adapter *adapter)
goto pci_map_err;
adapter->db = addr;
- if (be_physfn(adapter)) {
- addr = ioremap_nocache(
- pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
- if (addr == NULL)
- goto pci_map_err;
- adapter->pcicfg = addr;
- } else
- adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
-
return 0;
pci_map_err:
be_unmap_pci_bars(adapter);
@@ -3060,7 +3070,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
- mem = &adapter->mc_cmd_mem;
+ mem = &adapter->rx_filter;
if (mem->va)
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
@@ -3070,7 +3080,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
- struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
int status;
status = be_map_pci_bars(adapter);
@@ -3086,21 +3096,19 @@ static int be_ctrl_init(struct be_adapter *adapter)
status = -ENOMEM;
goto unmap_pci_bars;
}
-
mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
- mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
- mc_cmd_mem->size, &mc_cmd_mem->dma,
- GFP_KERNEL);
- if (mc_cmd_mem->va == NULL) {
+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
+ rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
+ if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
}
- memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
+ memset(rx_filter->va, 0, rx_filter->size);
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
@@ -3167,7 +3175,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
- kfree(adapter->vf_cfg);
be_sriov_disable(adapter);
be_msix_disable(adapter);
@@ -3182,35 +3189,13 @@ static void __devexit be_remove(struct pci_dev *pdev)
static int be_get_config(struct be_adapter *adapter)
{
int status;
- u8 mac[ETH_ALEN];
-
- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
- if (status)
- return status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
&adapter->function_mode, &adapter->function_caps);
if (status)
return status;
- memset(mac, 0, ETH_ALEN);
-
- /* A default permanent address is given to each VF for Lancer*/
- if (be_physfn(adapter) || lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
-
- if (status)
- return status;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
-
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
-
- if (adapter->function_mode & 0x400)
+ if (adapter->function_mode & FLEX10_MODE)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
else
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
@@ -3219,16 +3204,6 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- if ((num_vfs && adapter->sriov_enabled) ||
- (adapter->function_mode & 0x400) ||
- lancer_chip(adapter) || !be_physfn(adapter)) {
- adapter->num_tx_qs = 1;
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- } else {
- adapter->num_tx_qs = MAX_TX_QS;
- }
-
return 0;
}
@@ -3358,18 +3333,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
- be_sriov_enable(adapter);
- if (adapter->sriov_enabled) {
- adapter->vf_cfg = kcalloc(num_vfs,
- sizeof(struct be_vf_cfg), GFP_KERNEL);
-
- if (!adapter->vf_cfg)
- goto free_netdev;
- }
+ status = be_sriov_enable(adapter);
+ if (status)
+ goto free_netdev;
status = be_ctrl_init(adapter);
if (status)
- goto free_vf_cfg;
+ goto disable_sriov;
if (lancer_chip(adapter)) {
status = lancer_test_and_set_rdy_state(adapter);
@@ -3412,6 +3382,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_msix_enable(adapter);
INIT_DELAYED_WORK(&adapter->work, be_worker);
+ adapter->rx_fc = adapter->tx_fc = true;
status = be_setup(adapter);
if (status)
@@ -3421,36 +3392,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
- netif_carrier_off(netdev);
-
- if (be_physfn(adapter) && adapter->sriov_enabled) {
- u8 mac_speed;
- bool link_up;
- u16 vf, lnk_speed;
-
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto unreg_netdev;
- }
-
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_link_status_query(adapter, &link_up,
- &mac_speed, &lnk_speed, vf + 1);
- if (!status)
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
- else
- goto unreg_netdev;
- }
- }
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
-unreg_netdev:
- unregister_netdev(netdev);
unsetup:
be_clear(adapter);
msix_disable:
@@ -3459,10 +3406,9 @@ stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
be_ctrl_cleanup(adapter);
-free_vf_cfg:
- kfree(adapter->vf_cfg);
-free_netdev:
+disable_sriov:
be_sriov_disable(adapter);
+free_netdev:
free_netdev(netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
@@ -3489,7 +3435,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
be_close(netdev);
rtnl_unlock();
}
- be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
be_clear(adapter);
be_msix_disable(adapter);
diff --git a/drivers/net/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8abbe1d8282..bdb348a5ccf 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -888,7 +888,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_do_ioctl = ethoc_ioctl,
.ndo_set_config = ethoc_config,
.ndo_set_mac_address = ethoc_set_mac_address,
- .ndo_set_multicast_list = ethoc_set_multicast_list,
+ .ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
.ndo_tx_timeout = ethoc_tx_timeout,
.ndo_start_xmit = ethoc_start_xmit,
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
new file mode 100644
index 00000000000..b8974b9e3b4
--- /dev/null
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -0,0 +1,40 @@
+#
+# Faraday device configuration
+#
+
+config NET_VENDOR_FARADAY
+ bool "Faraday devices"
+ default y
+ depends on ARM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Faraday cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_FARADAY
+
+config FTMAC100
+ tristate "Faraday FTMAC100 10/100 Ethernet support"
+ depends on ARM
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the FTMAC100 10/100 Ethernet controller
+ from Faraday. It is used on Faraday A320, Andes AG101 and some
+ other ARM/NDS32 SoC's.
+
+config FTGMAC100
+ tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+ depends on ARM
+ select PHYLIB
+ ---help---
+ This driver supports the FTGMAC100 Gigabit Ethernet controller
+ from Faraday. It is used on Faraday A369, Andes AG102 and some
+ other ARM/NDS32 SoC's.
+
+endif # NET_VENDOR_FARADAY
diff --git a/drivers/net/ethernet/faraday/Makefile b/drivers/net/ethernet/faraday/Makefile
new file mode 100644
index 00000000000..408b53980d5
--- /dev/null
+++ b/drivers/net/ethernet/faraday/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Faraday device drivers.
+#
+
+obj-$(CONFIG_FTGMAC100) += ftgmac100.o
+obj-$(CONFIG_FTMAC100) += ftmac100.o
diff --git a/drivers/net/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 54709af917e..fb5579a3b19 100644
--- a/drivers/net/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -467,7 +467,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
skb->len += size;
skb->data_len += size;
- skb->truesize += size;
+ skb->truesize += PAGE_SIZE;
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
@@ -478,6 +478,8 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
rxdes = ftgmac100_current_rxdes(priv);
} while (!done);
+ if (skb->len <= 64)
+ skb->truesize -= PAGE_SIZE;
__pskb_pull_tail(skb, min(skb->len, 64U));
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 13408d448b0..13408d448b0 100644
--- a/drivers/net/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
diff --git a/drivers/net/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 9bd7746cbfc..a127cb2476c 100644
--- a/drivers/net/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -439,7 +439,10 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
skb_fill_page_desc(skb, 0, page, 0, length);
skb->len += length;
skb->data_len += length;
- skb->truesize += length;
+
+ /* page might be freed in __pskb_pull_tail() */
+ if (length > 64)
+ skb->truesize += PAGE_SIZE;
__pskb_pull_tail(skb, min(length, 64));
ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
diff --git a/drivers/net/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index 46a0c47b1ee..46a0c47b1ee 100644
--- a/drivers/net/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
diff --git a/drivers/net/fealnx.c b/drivers/net/ethernet/fealnx.c
index fa8677c3238..61d2bddec1f 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -469,7 +469,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = netdev_close,
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = mii_ioctl,
.ndo_tx_timeout = fealnx_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
new file mode 100644
index 00000000000..1cf671643d1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -0,0 +1,88 @@
+#
+# Freescale device configuration
+#
+
+config NET_VENDOR_FREESCALE
+ bool "Freescale devices"
+ default y
+ depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
+ M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS || \
+ (PPC_MPC52xx && PPC_BESTCOMM)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Freescale devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_FREESCALE
+
+config FEC
+ bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS)
+ select PHYLIB
+ ---help---
+ Say Y here if you want to use the built-in 10/100 Fast ethernet
+ controller on some Motorola ColdFire and Freescale i.MX processors.
+
+config FEC_MPC52xx
+ tristate "FEC MPC52xx driver"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select CRC32
+ select PHYLIB
+ select PPC_BESTCOMM_FEC
+ ---help---
+ This option enables support for the MPC5200's on-chip
+ Fast Ethernet Controller
+ If compiled as module, it will be called fec_mpc52xx.
+
+config FEC_MPC52xx_MDIO
+ bool "FEC MPC52xx MDIO bus driver"
+ depends on FEC_MPC52xx
+ default y
+ ---help---
+ The MPC5200's FEC can connect to the Ethernet either with
+ an external MII PHY chip or 10 Mbps 7-wire interface
+ (Motorola? industry standard).
+ If your board uses an external PHY connected to FEC, enable this.
+ If not sure, enable.
+ If compiled as module, it will be called fec_mpc52xx_phy.
+
+source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+
+config FSL_PQ_MDIO
+ tristate "Freescale PQ MDIO"
+ depends on FSL_SOC
+ select PHYLIB
+ ---help---
+ This driver supports the MDIO bus used by the gianfar and UCC drivers.
+
+config UCC_GETH
+ tristate "Freescale QE Gigabit Ethernet"
+ depends on QUICC_ENGINE
+ select FSL_PQ_MDIO
+ select PHYLIB
+ ---help---
+ This driver supports the Gigabit Ethernet mode of the QUICC Engine,
+ which is available on some Freescale SOCs.
+
+config UGETH_TX_ON_DEMAND
+ bool "Transmit on Demand support"
+ depends on UCC_GETH
+
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on FSL_SOC
+ select FSL_PQ_MDIO
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
+ and MPC86xx family of chips, and the FEC on the 8540.
+
+endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
new file mode 100644
index 00000000000..1752488c9ee
--- /dev/null
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Freescale network device drivers.
+#
+
+obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
+endif
+obj-$(CONFIG_FS_ENET) += fs_enet/
+obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
+gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o \
+ gianfar_sysfs.o
+obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/fec.c b/drivers/net/ethernet/freescale/fec.c
index e8266ccf818..1124ce0a159 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -18,7 +18,7 @@
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA.
*
- * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
@@ -72,6 +72,8 @@
#define FEC_QUIRK_SWAP_FRAME (1 << 1)
/* Controller uses gasket */
#define FEC_QUIRK_USE_GASKET (1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT (1 << 3)
static struct platform_device_id fec_devtype[] = {
{
@@ -88,6 +90,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
}, {
+ .name = "imx6q-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+ }, {
/* sentinel */
}
};
@@ -97,12 +102,14 @@ enum imx_fec_type {
IMX25_FEC = 1, /* runs on i.mx25/50/53 */
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
+ IMX6Q_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -170,6 +177,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
#define PKT_MAXBLR_SIZE 1520
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM 3
/*
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
@@ -233,6 +242,7 @@ struct fec_enet_private {
int link;
int full_duplex;
struct completion mdio_done;
+ int irq[FEC_IRQ_NUM];
};
/* FEC MII MMFR bits definition */
@@ -373,6 +383,7 @@ fec_restart(struct net_device *ndev, int duplex)
int i;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 ecntl = 0x2; /* ETHEREN */
/* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL);
@@ -442,18 +453,23 @@ fec_restart(struct net_device *ndev, int duplex)
/* Enable flow control and length check */
rcntl |= 0x40000000 | 0x00000020;
- /* MII or RMII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ /* RGMII, RMII or MII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ rcntl |= (1 << 6);
+ else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= (1 << 8);
else
rcntl &= ~(1 << 8);
- /* 10M or 100M */
- if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
- else
- rcntl |= (1 << 9);
-
+ /* 1G, 100M or 10M */
+ if (fep->phy_dev) {
+ if (fep->phy_dev->speed == SPEED_1000)
+ ecntl |= (1 << 5);
+ else if (fep->phy_dev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+ }
} else {
#ifdef FEC_MIIGSK_ENR
if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
@@ -478,8 +494,15 @@ fec_restart(struct net_device *ndev, int duplex)
}
writel(rcntl, fep->hwp + FEC_R_CNTRL);
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+ ecntl |= (1 << 8);
+ /* enable ENET store and forward mode */
+ writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ }
+
/* And last, enable the transmit and receive processing */
- writel(2, fep->hwp + FEC_ECNTRL);
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
/* Enable interrupts we wish to service */
@@ -490,6 +513,8 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -504,6 +529,10 @@ fec_stop(struct net_device *ndev)
udelay(10);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ /* We have to keep ENET enabled to have MII interrupt stay working */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ writel(2, fep->hwp + FEC_ECNTRL);
}
@@ -918,6 +947,8 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
@@ -949,14 +980,18 @@ static int fec_enet_mii_probe(struct net_device *ndev)
snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ fep->phy_interface);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
return PTR_ERR(phy_dev);
}
/* mask with MAC supported features */
- phy_dev->supported &= PHY_BASIC_FEATURES;
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
+ phy_dev->supported &= PHY_GBIT_FEATURES;
+ else
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+
phy_dev->advertising = phy_dev->supported;
fep->phy_dev = phy_dev;
@@ -996,7 +1031,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
/* fec1 uses fec0 mii_bus */
fep->mii_bus = fec0_mii_bus;
return 0;
@@ -1006,8 +1041,16 @@ static int fec_enet_mii_init(struct platform_device *pdev)
/*
* Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ *
+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
+ * Reference Manual has an error on this, and gets fixed on i.MX6Q
+ * document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fep->phy_speed--;
+ fep->phy_speed <<= 1;
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
fep->mii_bus = mdiobus_alloc();
@@ -1321,16 +1364,42 @@ fec_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * fec_poll_controller: FEC Poll controller function
+ * @dev: The FEC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+void fec_poll_controller(struct net_device *dev)
+{
+ int i;
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ if (fep->irq[i] > 0) {
+ disable_irq(fep->irq[i]);
+ fec_enet_interrupt(fep->irq[i], dev);
+ enable_irq(fep->irq[i]);
+ }
+ }
+}
+#endif
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
.ndo_start_xmit = fec_enet_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_do_ioctl = fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fec_poll_controller,
+#endif
};
/*
@@ -1411,24 +1480,22 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
return -ENODEV;
}
-static int __devinit fec_reset_phy(struct platform_device *pdev)
+static void __devinit fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
struct device_node *np = pdev->dev.of_node;
if (!np)
- return -ENODEV;
+ return;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
- pr_warn("FEC: failed to get gpio phy-reset: %d\n", err);
- return err;
+ pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
+ return;
}
msleep(1);
gpio_set_value(phy_reset, 1);
-
- return 0;
}
#else /* CONFIG_OF */
static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
@@ -1436,13 +1503,12 @@ static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
return -ENODEV;
}
-static inline int fec_reset_phy(struct platform_device *pdev)
+static inline void fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
* by machine code.
*/
- return 0;
}
#endif /* CONFIG_OF */
@@ -1503,8 +1569,7 @@ fec_probe(struct platform_device *pdev)
fec_reset_phy(pdev);
- /* This device has up to three irqs on some platforms */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
if (i && irq < 0)
break;
@@ -1549,7 +1614,7 @@ failed_init:
clk_disable(fep->clk);
clk_put(fep->clk);
failed_clk:
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
if (irq > 0)
free_irq(irq, ndev);
diff --git a/drivers/net/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8b2c6d797e6..8b2c6d797e6 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index cb4416e591f..30745b56fe5 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -828,7 +828,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_open = mpc52xx_fec_open,
.ndo_stop = mpc52xx_fec_close,
.ndo_start_xmit = mpc52xx_fec_start_xmit,
- .ndo_set_multicast_list = mpc52xx_fec_set_multicast_list,
+ .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mpc52xx_fec_ioctl,
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h
index 41d2dffde55..41d2dffde55 100644
--- a/drivers/net/fec_mpc52xx.h
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 360a578c2bb..360a578c2bb 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
new file mode 100644
index 00000000000..268414d9f2c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -0,0 +1,35 @@
+config FS_ENET
+ tristate "Freescale Ethernet Driver"
+ depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+ select NET_CORE
+ select MII
+ select PHYLIB
+
+config FS_ENET_MPC5121_FEC
+ def_bool y if (FS_ENET && PPC_MPC512x)
+ select FS_ENET_HAS_FEC
+
+config FS_ENET_HAS_SCC
+ bool "Chip has an SCC usable for ethernet"
+ depends on FS_ENET && (CPM1 || CPM2)
+ default y
+
+config FS_ENET_HAS_FCC
+ bool "Chip has an FCC usable for ethernet"
+ depends on FS_ENET && CPM2
+ default y
+
+config FS_ENET_HAS_FEC
+ bool "Chip has an FEC usable for ethernet"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+ select FS_ENET_MDIO_FEC
+ default y
+
+config FS_ENET_MDIO_FEC
+ tristate "MDIO driver for FEC"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+
+config FS_ENET_MDIO_FCC
+ tristate "MDIO driver for FCC"
+ depends on FS_ENET && CPM2
+ select MDIO_BITBANG
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile
index d4a305ee345..d4a305ee345 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/ethernet/freescale/fs_enet/Makefile
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index e980527e2b9..e980527e2b9 100644
--- a/drivers/net/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 329ef231a09..5bf5471f06f 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -988,7 +988,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_get_stats = fs_enet_get_stats,
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
- .ndo_set_multicast_list = fs_set_multicast_list,
+ .ndo_set_rx_mode = fs_set_multicast_list,
.ndo_do_ioctl = fs_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 1ece4b1a689..1ece4b1a689 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 7583a9572bc..7583a9572bc 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b9fbc83d64a..b9fbc83d64a 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 22a02a76706..22a02a76706 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index b09270b5d0a..b09270b5d0a 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index e0e9d6c35d8..e0e9d6c35d8 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e..52f4e8ad48e 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
index bd17a2a0139..bd17a2a0139 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
diff --git a/drivers/net/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 31d5c574e5a..83199fd0d62 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -458,7 +458,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_stop = gfar_close,
.ndo_change_mtu = gfar_change_mtu,
.ndo_set_features = gfar_set_features,
- .ndo_set_multicast_list = gfar_set_multi,
+ .ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
@@ -2140,11 +2140,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = dma_map_page(&priv->ofdev->dev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- length,
- DMA_TO_DEVICE);
+ bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ &skb_shinfo(skb)->frags[i],
+ 0,
+ length,
+ DMA_TO_DEVICE);
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr;
diff --git a/drivers/net/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9aa43773e8e..9aa43773e8e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 0caf3c323ec..212736bab6b 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1676,6 +1676,7 @@ static int gfar_get_cls_all(struct gfar_private *priv,
}
cmd->data = MAX_FILER_IDX;
+ cmd->rule_cnt = i;
return 0;
}
@@ -1712,7 +1713,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- void *rule_locs)
+ u32 *rule_locs)
{
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
@@ -1728,7 +1729,7 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = gfar_get_cls(priv, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs);
+ ret = gfar_get_cls_all(priv, cmd, rule_locs);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index f67b8aebc89..f67b8aebc89 100644
--- a/drivers/net/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index 64f4094ac7f..64f4094ac7f 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index d3465ab50e5..46d690a92c0 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1761,10 +1761,12 @@ static int init_phy(struct net_device *dev)
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
- phydev->supported &= (ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full);
+ phydev->supported &= (SUPPORTED_MII |
+ SUPPORTED_Autoneg |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full);
if (priv->max_speed == SPEED_1000)
phydev->supported |= ADVERTISED_1000baseT_Full;
@@ -3729,7 +3731,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_multicast_list = ucc_geth_set_multi,
+ .ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
.ndo_do_ioctl = ucc_geth_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index d12fcad145e..d12fcad145e 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index a97257f91a3..a97257f91a3 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
new file mode 100644
index 00000000000..dffee9d44fd
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -0,0 +1,54 @@
+#
+# Fujitsu Network device configuration
+#
+
+config NET_VENDOR_FUJITSU
+ bool "Fujitsu devices"
+ default y
+ depends on ISA || PCMCIA || ((ISA || MCA_LEGACY) && EXPERIMENTAL)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ the questions about Fujitsu cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_FUJITSU
+
+config AT1700
+ tristate "AT1700/1720 support (EXPERIMENTAL)"
+ depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called at1700.
+
+config PCMCIA_FMVJ18X
+ tristate "Fujitsu FMV-J18x PCMCIA support"
+ depends on PCMCIA
+ select CRC32
+ ---help---
+ Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called fmvj18x_cs. If unsure, say N.
+
+config ETH16I
+ tristate "ICL EtherTeam 16i/32 support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called eth16i.
+
+endif # NET_VENDOR_FUJITSU
diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile
new file mode 100644
index 00000000000..2730ae67d3a
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Fujitsu network device drivers.
+#
+
+obj-$(CONFIG_AT1700) += at1700.o
+obj-$(CONFIG_ETH16I) += eth16i.o
+obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
diff --git a/drivers/net/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
index 65a78f965dd..7c6c908bdf0 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/ethernet/fujitsu/at1700.c
@@ -253,7 +253,7 @@ static const struct net_device_ops at1700_netdev_ops = {
.ndo_open = net_open,
.ndo_stop = net_close,
.ndo_start_xmit = net_send_packet,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_tx_timeout = net_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c
index 12d28e9d0cb..b0e2313af3d 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/ethernet/fujitsu/eth16i.c
@@ -478,7 +478,7 @@ static const struct net_device_ops eth16i_netdev_ops = {
.ndo_open = eth16i_open,
.ndo_stop = eth16i_close,
.ndo_start_xmit = eth16i_tx,
- .ndo_set_multicast_list = eth16i_multicast,
+ .ndo_set_rx_mode = eth16i_multicast,
.ndo_tx_timeout = eth16i_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 723815e7a99..15416752c13 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -226,7 +226,7 @@ static const struct net_device_ops fjn_netdev_ops = {
.ndo_start_xmit = fjn_start_xmit,
.ndo_tx_timeout = fjn_tx_timeout,
.ndo_set_config = fjn_config,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/net/ethernet/hp/Kconfig
new file mode 100644
index 00000000000..a0b8ece1e3b
--- /dev/null
+++ b/drivers/net/ethernet/hp/Kconfig
@@ -0,0 +1,32 @@
+#
+# HP network device configuration
+#
+
+config NET_VENDOR_HP
+ bool "HP devices"
+ default y
+ depends on ISA || EISA || PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about HP cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_HP
+
+config HP100
+ tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
+ depends on (ISA || EISA || PCI)
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called hp100.
+
+endif # NET_VENDOR_HP
diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/net/ethernet/hp/Makefile
new file mode 100644
index 00000000000..20b6918b52b
--- /dev/null
+++ b/drivers/net/ethernet/hp/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the HP network device drivers.
+#
+
+obj-$(CONFIG_HP100) += hp100.o
diff --git a/drivers/net/hp100.c b/drivers/net/ethernet/hp/hp100.c
index b6519c1ba7e..6a5ee0776b2 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -430,7 +430,7 @@ static const struct net_device_ops hp100_bm_netdev_ops = {
.ndo_stop = hp100_close,
.ndo_start_xmit = hp100_start_xmit_bm,
.ndo_get_stats = hp100_get_stats,
- .ndo_set_multicast_list = hp100_set_multicast_list,
+ .ndo_set_rx_mode = hp100_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -441,7 +441,7 @@ static const struct net_device_ops hp100_netdev_ops = {
.ndo_stop = hp100_close,
.ndo_start_xmit = hp100_start_xmit,
.ndo_get_stats = hp100_get_stats,
- .ndo_set_multicast_list = hp100_set_multicast_list,
+ .ndo_set_rx_mode = hp100_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/hp100.h b/drivers/net/ethernet/hp/hp100.h
index b60e96fe38b..b60e96fe38b 100644
--- a/drivers/net/hp100.h
+++ b/drivers/net/ethernet/hp/hp100.h
diff --git a/drivers/net/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
index 88d766ee0e1..40e1a175fce 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/ethernet/i825xx/3c505.c
@@ -1363,7 +1363,7 @@ static const struct net_device_ops elp_netdev_ops = {
.ndo_get_stats = elp_get_stats,
.ndo_start_xmit = elp_start_xmit,
.ndo_tx_timeout = elp_timeout,
- .ndo_set_multicast_list = elp_set_mc_list,
+ .ndo_set_rx_mode = elp_set_mc_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/3c505.h b/drivers/net/ethernet/i825xx/3c505.h
index 04df2a9002b..04df2a9002b 100644
--- a/drivers/net/3c505.h
+++ b/drivers/net/ethernet/i825xx/3c505.h
diff --git a/drivers/net/3c507.c b/drivers/net/ethernet/i825xx/3c507.c
index 1e945551c14..1e945551c14 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/ethernet/i825xx/3c507.c
diff --git a/drivers/net/3c523.c b/drivers/net/ethernet/i825xx/3c523.c
index bc0d1a1c2e2..d70d3df4c98 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/ethernet/i825xx/3c523.c
@@ -409,7 +409,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = elmc_send_packet,
.ndo_tx_timeout = elmc_timeout,
#ifdef ELMC_MULTICAST
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
#endif
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/3c523.h b/drivers/net/ethernet/i825xx/3c523.h
index 6956441687b..6956441687b 100644
--- a/drivers/net/3c523.h
+++ b/drivers/net/ethernet/i825xx/3c523.h
diff --git a/drivers/net/3c527.c b/drivers/net/ethernet/i825xx/3c527.c
index d9d056d207f..474b5e71a53 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/ethernet/i825xx/3c527.c
@@ -292,7 +292,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = mc32_close,
.ndo_start_xmit = mc32_send_packet,
.ndo_get_stats = mc32_get_stats,
- .ndo_set_multicast_list = mc32_set_multicast_list,
+ .ndo_set_rx_mode = mc32_set_multicast_list,
.ndo_tx_timeout = mc32_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/3c527.h b/drivers/net/ethernet/i825xx/3c527.h
index d693b8d15cd..d693b8d15cd 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/ethernet/i825xx/3c527.h
diff --git a/drivers/net/82596.c b/drivers/net/ethernet/i825xx/82596.c
index be1f1970c84..f2408a4d5d9 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1145,7 +1145,7 @@ static const struct net_device_ops i596_netdev_ops = {
.ndo_open = i596_open,
.ndo_stop = i596_close,
.ndo_start_xmit = i596_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
new file mode 100644
index 00000000000..2be46986cbe
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -0,0 +1,183 @@
+#
+# Intel 82596/82593/82596 network device configuration
+#
+
+config NET_VENDOR_I825XX
+ bool "Intel (82586/82593/82596) devices"
+ default y
+ depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \
+ ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
+ GSC || BVME6000 || MVME16x || EXPERIMENTAL)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question does not directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about these devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_I825XX
+
+config ELPLUS
+ tristate "3c505 \"EtherLink Plus\" support"
+ depends on ISA && ISA_DMA_API
+ ---help---
+ Information about this network (Ethernet) card can be found in
+ <file:Documentation/networking/3c505.txt>. If you have a card of
+ this type, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c505.
+
+config EL16
+ tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
+ depends on ISA && EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c507.
+
+config ELMC
+ tristate "3c523 \"EtherLink/MC\" support"
+ depends on MCA_LEGACY
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c523.
+
+config ELMC_II
+ tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)"
+ depends on MCA && MCA_LEGACY
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c527.
+
+config ARM_ETHER1
+ tristate "Acorn Ether1 support"
+ depends on ARM && ARCH_ACORN
+ ---help---
+ If you have an Acorn system with one of these (AKA25) network cards,
+ you should say Y to this option if you wish to use it with Linux.
+
+config APRICOT
+ tristate "Apricot Xen-II on board Ethernet"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called apricot.
+
+config BVME6000_NET
+ tristate "BVME6000 Ethernet support"
+ depends on BVME6000MVME16x
+ ---help---
+ This is the driver for the Ethernet interface on BVME4000 and
+ BVME6000 VME boards. Say Y here to include the driver for this chip
+ in your kernel.
+ To compile this driver as a module, choose M here.
+
+config EEXPRESS
+ tristate "EtherExpress 16 support"
+ depends on ISA
+ ---help---
+ If you have an EtherExpress16 network (Ethernet) card, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that the Intel
+ EtherExpress16 card used to be regarded as a very poor choice
+ because the driver was very unreliable. We now have a new driver
+ that should do better.
+
+ To compile this driver as a module, choose M here. The module
+ will be called eexpress.
+
+config EEXPRESS_PRO
+ tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y. This
+ driver supports Intel i82595{FX,TX} based boards. Note however
+ that the EtherExpress PRO/100 Ethernet card has its own separate
+ driver. Please read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called eepro.
+
+config LASI_82596
+ tristate "Lasi ethernet"
+ depends on GSC
+ ---help---
+ Say Y here to support the builtin Intel 82596 ethernet controller
+ found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
+
+config LP486E
+ tristate "LP486E on board Ethernet"
+ depends on ISA
+ ---help---
+ Say Y here to support the 82596-based on-board Ethernet controller
+ for the Panther motherboard, which is one of the two shipped in the
+ Intel Professional Workstation.
+
+config MVME16x_NET
+ tristate "MVME16x Ethernet support"
+ depends on MVME16x
+ ---help---
+ This is the driver for the Ethernet interface on the Motorola
+ MVME162, 166, 167, 172 and 177 boards. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config NI52
+ tristate "NI5210 support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni52.
+
+config SNI_82596
+ tristate "SNI RM ethernet"
+ depends on SNI_RM
+ ---help---
+ Say Y here to support the on-board Intel 82596 ethernet controller
+ built into SNI RM machines.
+
+config SUN3_82586
+ bool "Sun3 on-board Intel 82586 support"
+ depends on SUN3
+ ---help---
+ This driver enables support for the on-board Intel 82586 based
+ Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note
+ that this driver does not support 82586-based adapters on additional
+ VME boards.
+
+config ZNET
+ tristate "Zenith Z-Note support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && ISA_DMA_API
+ ---help---
+ The Zenith Z-Note notebook computer has a built-in network
+ (Ethernet) card, and this is the Linux driver for it. Note that the
+ IBM Thinkpad 300 is compatible with the Z-Note and is also supported
+ by this driver. Read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+endif # NET_VENDOR_I825XX
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
new file mode 100644
index 00000000000..f68a3694968
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the Intel 82586/82593/82596 chipset device drivers.
+#
+
+obj-$(CONFIG_ARM_ETHER1) += ether1.o
+obj-$(CONFIG_EEXPRESS) += eexpress.o
+obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
+obj-$(CONFIG_ELPLUS) += 3c505.o
+obj-$(CONFIG_EL16) += 3c507.o
+obj-$(CONFIG_ELMC) += 3c523.o
+obj-$(CONFIG_ELMC_II) += 3c527.o
+obj-$(CONFIG_LP486E) += lp486e.o
+obj-$(CONFIG_NI52) += ni52.o
+obj-$(CONFIG_SUN3_82586) += sun3_82586.o
+obj-$(CONFIG_ZNET) += znet.o
+obj-$(CONFIG_APRICOT) += 82596.o
+obj-$(CONFIG_LASI_82596) += lasi_82596.o
+obj-$(CONFIG_SNI_82596) += sni_82596.o
+obj-$(CONFIG_MVME16x_NET) += 82596.o
+obj-$(CONFIG_BVME6000_NET) += 82596.o
diff --git a/drivers/net/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index dfeb006035d..067c46069a1 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -743,7 +743,7 @@ static const struct net_device_ops eepro_netdev_ops = {
.ndo_open = eepro_open,
.ndo_stop = eepro_close,
.ndo_start_xmit = eepro_send_packet,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = eepro_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
index a19228563ef..3a9580f3d4d 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/ethernet/i825xx/eexpress.c
@@ -1047,7 +1047,7 @@ static const struct net_device_ops eexp_netdev_ops = {
.ndo_open = eexp_open,
.ndo_stop = eexp_close,
.ndo_start_xmit = eexp_xmit,
- .ndo_set_multicast_list = eexp_set_multicast,
+ .ndo_set_rx_mode = eexp_set_multicast,
.ndo_tx_timeout = eexp_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h
index dc9c6ea289e..dc9c6ea289e 100644
--- a/drivers/net/eexpress.h
+++ b/drivers/net/ethernet/i825xx/eexpress.h
diff --git a/drivers/net/arm/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index b00781c02d5..42e90a97c7a 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -985,7 +985,7 @@ static const struct net_device_ops ether1_netdev_ops = {
.ndo_open = ether1_open,
.ndo_stop = ether1_close,
.ndo_start_xmit = ether1_sendpacket,
- .ndo_set_multicast_list = ether1_setmulticastlist,
+ .ndo_set_rx_mode = ether1_setmulticastlist,
.ndo_tx_timeout = ether1_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/ether1.h b/drivers/net/ethernet/i825xx/ether1.h
index 3a5830ab3dc..3a5830ab3dc 100644
--- a/drivers/net/arm/ether1.h
+++ b/drivers/net/ethernet/i825xx/ether1.h
diff --git a/drivers/net/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 6eba352c52e..6eba352c52e 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
diff --git a/drivers/net/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 9e042894479..3efbd8dbb63 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1038,7 +1038,7 @@ static const struct net_device_ops i596_netdev_ops = {
.ndo_open = i596_open,
.ndo_stop = i596_close,
.ndo_start_xmit = i596_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 385a95311cd..414044b3cb1 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -954,7 +954,7 @@ static const struct net_device_ops i596_netdev_ops = {
.ndo_open = i596_open,
.ndo_stop = i596_close,
.ndo_start_xmit = i596_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ni52.c b/drivers/net/ethernet/i825xx/ni52.c
index d973fc6c6b8..c0893715ef4 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ethernet/i825xx/ni52.c
@@ -445,7 +445,7 @@ static const struct net_device_ops ni52_netdev_ops = {
.ndo_get_stats = ni52_get_stats,
.ndo_tx_timeout = ni52_timeout,
.ndo_start_xmit = ni52_send_packet,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ni52.h b/drivers/net/ethernet/i825xx/ni52.h
index 0a03b288332..0a03b288332 100644
--- a/drivers/net/ni52.h
+++ b/drivers/net/ethernet/i825xx/ni52.h
diff --git a/drivers/net/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6b2a8881747..6b2a8881747 100644
--- a/drivers/net/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
diff --git a/drivers/net/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index b6ae53bada7..6ef5e11d1c8 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -333,7 +333,7 @@ static const struct net_device_ops sun3_82586_netdev_ops = {
.ndo_open = sun3_82586_open,
.ndo_stop = sun3_82586_close,
.ndo_start_xmit = sun3_82586_send_packet,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = sun3_82586_timeout,
.ndo_get_stats = sun3_82586_get_stats,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 93346f00486..93346f00486 100644
--- a/drivers/net/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
diff --git a/drivers/net/znet.c b/drivers/net/ethernet/i825xx/znet.c
index 8b8881718f5..962b4c421f3 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/ethernet/i825xx/znet.c
@@ -356,7 +356,7 @@ static const struct net_device_ops znet_netdev_ops = {
.ndo_open = znet_open,
.ndo_stop = znet_close,
.ndo_start_xmit = znet_send_packet,
- .ndo_set_multicast_list = znet_set_multicast_list,
+ .ndo_set_rx_mode = znet_set_multicast_list,
.ndo_tx_timeout = znet_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
new file mode 100644
index 00000000000..9e16f3fa97b
--- /dev/null
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -0,0 +1,48 @@
+#
+# IBM device configuration.
+#
+
+config NET_VENDOR_IBM
+ bool "IBM devices"
+ default y
+ depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
+ (IBMEBUS && INET && SPARSEMEM)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about IBM devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_IBM
+
+config IBMVETH
+ tristate "IBM LAN Virtual Ethernet support"
+ depends on PPC_PSERIES
+ ---help---
+ This driver supports virtual ethernet adapters on newer IBM iSeries
+ and pSeries systems.
+
+ To compile this driver as a module, choose M here. The module will
+ be called ibmveth.
+
+config ISERIES_VETH
+ tristate "iSeries Virtual Ethernet driver support"
+ depends on PPC_ISERIES
+
+source "drivers/net/ethernet/ibm/emac/Kconfig"
+
+config EHEA
+ tristate "eHEA Ethernet support"
+ depends on IBMEBUS && INET && SPARSEMEM
+ select INET_LRO
+ ---help---
+ This driver supports the IBM pSeries eHEA ethernet adapter.
+
+ To compile the driver as a module, choose M here. The module
+ will be called ehea.
+
+endif # NET_VENDOR_IBM
diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile
new file mode 100644
index 00000000000..5a7d4e9ac80
--- /dev/null
+++ b/drivers/net/ethernet/ibm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for th IBM network device drivers.
+#
+
+obj-$(CONFIG_IBMVETH) += ibmveth.o
+obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
+obj-$(CONFIG_IBM_EMAC) += emac/
+obj-$(CONFIG_EHEA) += ehea/
diff --git a/drivers/net/ehea/Makefile b/drivers/net/ethernet/ibm/ehea/Makefile
index 775d9969b5c..775d9969b5c 100644
--- a/drivers/net/ehea/Makefile
+++ b/drivers/net/ethernet/ibm/ehea/Makefile
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 7dd5e6a0d99..410d6a1984e 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -33,7 +33,6 @@
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
#include <asm/ibmebus.h>
#include <asm/abs_addr.h>
@@ -58,8 +57,6 @@
#define EHEA_MIN_ENTRIES_QP 127
#define EHEA_SMALL_QUEUES
-#define EHEA_NUM_TX_QP 1
-#define EHEA_LRO_MAX_AGGR 64
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
@@ -83,18 +80,16 @@
#define EHEA_SG_RQ3 0
#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
-#define EHEA_RQ2_PKT_SIZE 1522
+#define EHEA_RQ2_PKT_SIZE 2048
#define EHEA_L_PKT_SIZE 256 /* low latency */
-#define MAX_LRO_DESCRIPTORS 8
-
/* Send completion signaling */
/* Protection Domain Identifier */
#define EHEA_PD_ID 0xaabcdeff
#define EHEA_RQ2_THRESHOLD 1
-#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
+#define EHEA_RQ3_THRESHOLD 4 /* use RQ3 threshold of 2048 bytes */
#define EHEA_SPEED_10G 10000
#define EHEA_SPEED_1G 1000
@@ -363,7 +358,6 @@ struct ehea_port_res {
struct port_stats p_stats;
struct ehea_mr send_mr; /* send memory region */
struct ehea_mr recv_mr; /* receive memory region */
- spinlock_t xmit_lock;
struct ehea_port *port;
char int_recv_name[EHEA_IRQ_NAME_SIZE];
char int_send_name[EHEA_IRQ_NAME_SIZE];
@@ -376,8 +370,6 @@ struct ehea_port_res {
struct ehea_q_skb_arr rq3_skba;
struct ehea_q_skb_arr sq_skba;
int sq_skba_size;
- spinlock_t netif_queue;
- int queue_stopped;
int swqe_refill_th;
atomic_t swqe_avail;
int swqe_ll_count;
@@ -386,9 +378,6 @@ struct ehea_port_res {
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
- u32 poll_counter;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
int sq_restart_flag;
};
@@ -453,18 +442,17 @@ struct ehea_bcmc_reg_array {
struct ehea_port {
struct ehea_adapter *adapter; /* adapter that owns this port */
struct net_device *netdev;
- struct net_device_stats stats;
+ struct rtnl_link_stats64 stats;
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
struct platform_device ofdev; /* Open Firmware Device */
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
struct ehea_eq *qp_eq;
struct work_struct reset_task;
+ struct delayed_work stats_work;
struct mutex port_lock;
char int_aff_name[EHEA_IRQ_NAME_SIZE];
int allmulti; /* Indicates IFF_ALLMULTI state */
int promisc; /* Indicates IFF_PROMISC state */
- int num_tx_qps;
- int num_add_tx_qps;
int num_mcs;
int resets;
unsigned long flags;
@@ -474,7 +462,6 @@ struct ehea_port {
u32 msg_enable;
u32 sig_comp_iv;
u32 state;
- u32 lro_max_aggr;
u8 phy_link;
u8 full_duplex;
u8 autoneg;
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 7f642aef5e8..05b7359bde8 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -180,7 +180,7 @@ static void ehea_set_msglevel(struct net_device *dev, u32 value)
port->msg_enable = value;
}
-static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
+static const char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"sig_comp_iv"},
{"swqe_refill_th"},
{"port resets"},
@@ -189,7 +189,6 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"IP cksum errors"},
{"Frame cksum errors"},
{"num SQ stopped"},
- {"SQ stopped"},
{"PR0 free_swqes"},
{"PR1 free_swqes"},
{"PR2 free_swqes"},
@@ -198,9 +197,14 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"PR5 free_swqes"},
{"PR6 free_swqes"},
{"PR7 free_swqes"},
- {"LRO aggregated"},
- {"LRO flushed"},
- {"LRO no_desc"},
+ {"PR8 free_swqes"},
+ {"PR9 free_swqes"},
+ {"PR10 free_swqes"},
+ {"PR11 free_swqes"},
+ {"PR12 free_swqes"},
+ {"PR13 free_swqes"},
+ {"PR14 free_swqes"},
+ {"PR15 free_swqes"},
};
static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -255,25 +259,8 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
tmp += port->port_res[k].p_stats.queue_stopped;
data[i++] = tmp;
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].queue_stopped;
- data[i++] = tmp;
-
- for (k = 0; k < 8; k++)
+ for (k = 0; k < 16; k++)
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
-
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].lro_mgr.stats.aggregated;
- data[i++] = tmp;
-
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].lro_mgr.stats.flushed;
- data[i++] = tmp;
-
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].lro_mgr.stats.no_desc;
- data[i++] = tmp;
-
}
const struct ethtool_ops ehea_ethtool_ops = {
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
index 567981b4b2c..1a2fe4dc3eb 100644
--- a/drivers/net/ehea/ehea_hw.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
@@ -210,36 +210,11 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
__raw_writeq(value, (void __iomem *)(epa.addr + offset));
}
-#define epa_store_eq(epa, offset, value)\
- epa_store(epa, EQTEMM_OFFSET(offset), value)
-#define epa_load_eq(epa, offset)\
- epa_load(epa, EQTEMM_OFFSET(offset))
-
#define epa_store_cq(epa, offset, value)\
epa_store(epa, CQTEMM_OFFSET(offset), value)
#define epa_load_cq(epa, offset)\
epa_load(epa, CQTEMM_OFFSET(offset))
-#define epa_store_qp(epa, offset, value)\
- epa_store(epa, QPTEMM_OFFSET(offset), value)
-#define epa_load_qp(epa, offset)\
- epa_load(epa, QPTEMM_OFFSET(offset))
-
-#define epa_store_qped(epa, offset, value)\
- epa_store(epa, QPEDMM_OFFSET(offset), value)
-#define epa_load_qped(epa, offset)\
- epa_load(epa, QPEDMM_OFFSET(offset))
-
-#define epa_store_mrmw(epa, offset, value)\
- epa_store(epa, MRMWMM_OFFSET(offset), value)
-#define epa_load_mrmw(epa, offset)\
- epa_load(epa, MRMWMM_OFFSET(offset))
-
-#define epa_store_base(epa, offset, value)\
- epa_store(epa, HCAGR_OFFSET(offset), value)
-#define epa_load_base(epa, offset)\
- epa_load(epa, HCAGR_OFFSET(offset))
-
static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
{
struct h_epa epa = qp->epas.kernel;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index be2cb4ab8b4..37b70f7052b 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -61,10 +61,7 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs;
-static int use_lro;
-static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
-static int num_tx_qps = EHEA_NUM_TX_QP;
+static int use_mcs = 1;
static int prop_carrier_state;
module_param(msg_level, int, 0);
@@ -74,11 +71,7 @@ module_param(rq3_entries, int, 0);
module_param(sq_entries, int, 0);
module_param(prop_carrier_state, int, 0);
module_param(use_mcs, int, 0);
-module_param(use_lro, int, 0);
-module_param(lro_max_aggr, int, 0);
-module_param(num_tx_qps, int, 0);
-MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
@@ -94,12 +87,8 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
"[2^x - 1], x = [6..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
-MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
-
-MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
- __MODULE_STRING(EHEA_LRO_MAX_AGGR));
-MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
- "Default = 0");
+MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
+ "Default = 1");
static int port_name_cnt;
static LIST_HEAD(adapter_list);
@@ -173,7 +162,7 @@ static void ehea_update_firmware_handles(void)
continue;
num_ports++;
- num_portres += port->num_def_qps + port->num_add_tx_qps;
+ num_portres += port->num_def_qps;
}
}
@@ -199,9 +188,7 @@ static void ehea_update_firmware_handles(void)
(num_ports == 0))
continue;
- for (l = 0;
- l < port->num_def_qps + port->num_add_tx_qps;
- l++) {
+ for (l = 0; l < port->num_def_qps; l++) {
struct ehea_port_res *pr = &port->port_res[l];
arr[i].adh = adapter->handle;
@@ -327,20 +314,44 @@ out:
spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
}
-static struct net_device_stats *ehea_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct ehea_port *port = netdev_priv(dev);
- struct net_device_stats *stats = &port->stats;
- struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
+ u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
int i;
- memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < port->num_def_qps; i++) {
+ rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
+
+ for (i = 0; i < port->num_def_qps; i++) {
+ tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
+
+ stats->tx_packets = tx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+
+ return &port->stats;
+}
+
+static void ehea_update_stats(struct work_struct *work)
+{
+ struct ehea_port *port =
+ container_of(work, struct ehea_port, stats_work.work);
+ struct net_device *dev = port->netdev;
+ struct rtnl_link_stats64 *stats = &port->stats;
+ struct hcp_ehea_port_cb2 *cb2;
+ u64 hret;
cb2 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb2) {
- netdev_err(dev, "no mem for cb2\n");
- goto out;
+ netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
+ goto resched;
}
hret = ehea_h_query_ehea_port(port->adapter->handle,
@@ -354,29 +365,13 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
if (netif_msg_hw(port))
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
- rx_packets = 0;
- for (i = 0; i < port->num_def_qps; i++) {
- rx_packets += port->port_res[i].rx_packets;
- rx_bytes += port->port_res[i].rx_bytes;
- }
-
- tx_packets = 0;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
- tx_packets += port->port_res[i].tx_packets;
- tx_bytes += port->port_res[i].tx_bytes;
- }
-
- stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
- stats->rx_bytes = rx_bytes;
- stats->tx_bytes = tx_bytes;
- stats->rx_packets = rx_packets;
out_herr:
free_page((unsigned long)cb2);
-out:
- return stats;
+resched:
+ schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -543,7 +538,8 @@ static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
}
static inline void ehea_fill_skb(struct net_device *dev,
- struct sk_buff *skb, struct ehea_cqe *cqe)
+ struct sk_buff *skb, struct ehea_cqe *cqe,
+ struct ehea_port_res *pr)
{
int length = cqe->num_bytes_transfered - 4; /*remove CRC */
@@ -557,6 +553,8 @@ static inline void ehea_fill_skb(struct net_device *dev,
skb->csum = csum_unfold(~cqe->inet_checksum_value);
} else
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
}
static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -649,49 +647,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
return 0;
}
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- struct ehea_cqe *cqe = priv;
- unsigned int ip_len;
- struct iphdr *iph;
-
- /* non tcp/udp packets */
- if (!cqe->header_length)
- return -1;
-
- /* non tcp packet */
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- /* check if ip header and tcp header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
-
- return 0;
-}
-
-static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
- struct sk_buff *skb)
-{
- if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
- __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
-
- if (skb->dev->features & NETIF_F_LRO)
- lro_receive_skb(&pr->lro_mgr, skb, cqe);
- else
- netif_receive_skb(skb);
-}
-
static int ehea_proc_rwqes(struct net_device *dev,
struct ehea_port_res *pr,
int budget)
@@ -742,7 +697,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
} else if (rq == 2) {
/* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
@@ -752,7 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
"rq2: skb=NULL\n");
break;
}
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
processed_rq2++;
} else {
/* RQ3 */
@@ -763,12 +718,16 @@ static int ehea_proc_rwqes(struct net_device *dev,
"rq3: skb=NULL\n");
break;
}
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
processed_rq3++;
}
processed_bytes += skb->len;
- ehea_proc_skb(pr, cqe, skb);
+
+ if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+ __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
+
+ napi_gro_receive(&pr->napi, skb);
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -779,8 +738,6 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (dev->features & NETIF_F_LRO)
- lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
pr->rx_bytes += processed_bytes;
@@ -798,7 +755,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
pr->sq_restart_flag = 0;
}
@@ -811,7 +768,7 @@ static void check_sqs(struct ehea_port *port)
int swqe_index;
int i, k;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int ret;
k = 0;
@@ -849,7 +806,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
int cqe_counter = 0;
int swqe_av = 0;
int index;
- unsigned long flags;
+ struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
+ pr - &pr->port->port_res[0]);
cqe = ehea_poll_cq(send_cq);
while (cqe && (quota > 0)) {
@@ -899,20 +857,20 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
ehea_update_feca(send_cq, cqe_counter);
atomic_add(swqe_av, &pr->swqe_avail);
- spin_lock_irqsave(&pr->netif_queue, flags);
-
- if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
- >= pr->swqe_refill_th)) {
- netif_wake_queue(pr->port->netdev);
- pr->queue_stopped = 0;
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
- spin_unlock_irqrestore(&pr->netif_queue, flags);
+
wake_up(&pr->port->swqe_avail_wq);
return cqe;
}
-#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
#define EHEA_POLL_MAX_CQES 65535
static int ehea_poll(struct napi_struct *napi, int budget)
@@ -922,18 +880,13 @@ static int ehea_poll(struct napi_struct *napi, int budget)
struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL;
- int force_irq, wqe_index;
+ int wqe_index;
int rx = 0;
- force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
+ rx += ehea_proc_rwqes(dev, pr, budget - rx);
- if (!force_irq)
- rx += ehea_proc_rwqes(dev, pr, budget - rx);
-
- while ((rx != budget) || force_irq) {
- pr->poll_counter = 0;
- force_irq = 0;
+ while (rx != budget) {
napi_complete(napi);
ehea_reset_cq_ep(pr->recv_cq);
ehea_reset_cq_ep(pr->send_cq);
@@ -953,7 +906,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
rx += ehea_proc_rwqes(dev, pr, budget - rx);
}
- pr->poll_counter++;
return rx;
}
@@ -1105,13 +1057,6 @@ int ehea_sense_port_attr(struct ehea_port *port)
goto out_free;
}
- port->num_tx_qps = num_tx_qps;
-
- if (port->num_def_qps >= port->num_tx_qps)
- port->num_add_tx_qps = 0;
- else
- port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
-
ret = 0;
out_free:
if (ret || netif_msg_probe(port))
@@ -1243,7 +1188,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
netif_info(port, link, dev,
"Logical port down\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
@@ -1274,7 +1219,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
case EHEA_EC_PORT_MALFUNC:
netdev_info(dev, "Port malfunction\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
break;
default:
netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
@@ -1352,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
port->qp_eq->attr.ist1);
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
pr = &port->port_res[i];
snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
"%s-queue%d", dev->name, i);
@@ -1395,7 +1340,7 @@ static void ehea_free_interrupts(struct net_device *dev)
/* send */
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
pr = &port->port_res[i];
ibmebus_free_irq(pr->eq->attr.ist1, pr);
netif_info(port, intr, dev,
@@ -1526,8 +1471,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->rx_packets = rx_packets;
pr->port = port;
- spin_lock_init(&pr->xmit_lock);
- spin_lock_init(&pr->netif_queue);
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
if (!pr->eq) {
@@ -1618,15 +1561,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
- pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
- pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
- pr->lro_mgr.lro_arr = pr->lro_desc;
- pr->lro_mgr.get_skb_header = get_skb_hdr;
- pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- pr->lro_mgr.dev = port->netdev;
- pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
ret = 0;
goto out;
@@ -1683,96 +1617,35 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
return ret;
}
-/*
- * The write_* functions store information in swqe which is used by
- * the hardware to calculate the ip/tcp/udp checksum
- */
-
-static inline void write_ip_start_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->ip_start = skb_network_offset(skb);
- swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
-}
-
-static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-static inline void write_udp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-
-static void write_swqe2_TSO(struct sk_buff *skb,
- struct ehea_swqe *swqe, u32 lkey)
-{
- struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
- u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb_headlen(skb);
- int headersize;
-
- /* Packet is TCP with TSO enabled */
- swqe->tx_control |= EHEA_SWQE_TSO;
- swqe->mss = skb_shinfo(skb)->gso_size;
- /* copy only eth/ip/tcp headers to immediate data and
- * the rest of skb->data to sg1entry
- */
- headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
-
- skb_data_size = skb_headlen(skb);
-
- if (skb_data_size >= headersize) {
- /* copy immediate data */
- skb_copy_from_linear_data(skb, imm_data, headersize);
- swqe->immediate_data_length = headersize;
-
- if (skb_data_size > headersize) {
- /* set sg1entry data */
- sg1entry->l_key = lkey;
- sg1entry->len = skb_data_size - headersize;
- sg1entry->vaddr =
- ehea_map_vaddr(skb->data + headersize);
- swqe->descriptors++;
- }
- } else
- pr_err("cannot handle fragmented headers\n");
-}
-
-static void write_swqe2_nonTSO(struct sk_buff *skb,
- struct ehea_swqe *swqe, u32 lkey)
+static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
+ u32 lkey)
{
int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+ unsigned int immediate_len = SWQE2_MAX_IMM;
- /* Packet is any nonTSO type
- *
- * Copy as much as possible skb->data to immediate data and
- * the rest to sg1entry
- */
- if (skb_data_size >= SWQE2_MAX_IMM) {
- /* copy immediate data */
- skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
+ swqe->descriptors = 0;
- swqe->immediate_data_length = SWQE2_MAX_IMM;
+ if (skb_is_gso(skb)) {
+ swqe->tx_control |= EHEA_SWQE_TSO;
+ swqe->mss = skb_shinfo(skb)->gso_size;
+ /*
+ * For TSO packets we only copy the headers into the
+ * immediate area.
+ */
+ immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ }
- if (skb_data_size > SWQE2_MAX_IMM) {
- /* copy sg1entry data */
+ if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
+ skb_copy_from_linear_data(skb, imm_data, immediate_len);
+ swqe->immediate_data_length = immediate_len;
+
+ if (skb_data_size > immediate_len) {
sg1entry->l_key = lkey;
- sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
+ sg1entry->len = skb_data_size - immediate_len;
sg1entry->vaddr =
- ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
+ ehea_map_vaddr(skb->data + immediate_len);
swqe->descriptors++;
}
} else {
@@ -1791,13 +1664,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
- swqe->descriptors = 0;
sg1entry_contains_frag_data = 0;
- if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
- write_swqe2_TSO(skb, swqe, lkey);
- else
- write_swqe2_nonTSO(skb, swqe, lkey);
+ write_swqe2_immediate(skb, swqe, lkey);
/* write descriptors */
if (nfrags > 0) {
@@ -1807,10 +1676,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
/* copy sg1entry data */
sg1entry->l_key = lkey;
- sg1entry->len = frag->size;
+ sg1entry->len = skb_frag_size(frag);
sg1entry->vaddr =
- ehea_map_vaddr(page_address(frag->page)
- + frag->page_offset);
+ ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
sg1entry_contains_frag_data = 1;
}
@@ -1821,10 +1689,8 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sgentry = &sg_list[i - sg1entry_contains_frag_data];
sgentry->l_key = lkey;
- sgentry->len = frag->size;
- sgentry->vaddr =
- ehea_map_vaddr(page_address(frag->page)
- + frag->page_offset);
+ sgentry->len = skb_frag_size(frag);
+ sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
}
}
@@ -2115,41 +1981,44 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
- struct ehea_swqe *swqe, u32 lkey)
+static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
- /* IPv4 */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
- write_ip_start_end(swqe, skb);
+ swqe->ip_start = skb_network_offset(skb);
+ swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
- if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
- else
- write_udp_offset_end(swqe, skb);
- } else if (iph->protocol == IPPROTO_TCP) {
- write_tcp_offset_end(swqe, skb);
- }
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct udphdr, check);
+ break;
- /* icmp (big data) and ip segmentation packets (all other ip
- packets) do not require any special handling */
+ case IPPROTO_TCP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct tcphdr, check);
+ break;
}
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+ xmit_common(skb, swqe);
write_swqe2_data(skb, dev, swqe, lkey);
}
@@ -2157,107 +2026,30 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
struct ehea_swqe *swqe)
{
- int nfrags = skb_shinfo(skb)->nr_frags;
u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
- skb_frag_t *frag;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
-
- /* IPv4 */
- write_ip_start_end(swqe, skb);
-
- if (iph->protocol == IPPROTO_TCP) {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
-
- write_tcp_offset_end(swqe, skb);
- } else if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT;
- else {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
+ xmit_common(skb, swqe);
- write_udp_offset_end(swqe, skb);
- }
- } else {
- /* icmp (big data) and
- ip segmentation packets (all other ip packets) */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- /* copy (immediate) data */
- if (nfrags == 0) {
- /* data is in a single piece */
+ if (!skb->data_len)
skb_copy_from_linear_data(skb, imm_data, skb->len);
- } else {
- /* first copy data from the skb->data buffer ... */
- skb_copy_from_linear_data(skb, imm_data,
- skb_headlen(skb));
- imm_data += skb_headlen(skb);
+ else
+ skb_copy_bits(skb, 0, imm_data, skb->len);
- /* ... then copy data from the fragments */
- for (i = 0; i < nfrags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- memcpy(imm_data,
- page_address(frag->page) + frag->page_offset,
- frag->size);
- imm_data += frag->size;
- }
- }
swqe->immediate_data_length = skb->len;
dev_kfree_skb(skb);
}
-static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
-{
- struct tcphdr *tcp;
- u32 tmp;
-
- if ((skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
- tcp = (struct tcphdr *)(skb_network_header(skb) +
- (ip_hdr(skb)->ihl * 4));
- tmp = (tcp->source + (tcp->dest << 16)) % 31;
- tmp += ip_hdr(skb)->daddr % 31;
- return tmp % num_qps;
- } else
- return 0;
-}
-
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_swqe *swqe;
- unsigned long flags;
u32 lkey;
int swqe_index;
struct ehea_port_res *pr;
+ struct netdev_queue *txq;
- pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
-
- if (!spin_trylock(&pr->xmit_lock))
- return NETDEV_TX_BUSY;
-
- if (pr->queue_stopped) {
- spin_unlock(&pr->xmit_lock);
- return NETDEV_TX_BUSY;
- }
+ pr = &port->port_res[skb_get_queue_mapping(skb)];
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
swqe = ehea_get_swqe(pr->qp, &swqe_index);
memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -2307,23 +2099,16 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
ehea_dump(swqe, 512, "swqe");
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
swqe->tx_control |= EHEA_SWQE_PURGE;
}
ehea_post_swqe(pr->qp, swqe);
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
- spin_lock_irqsave(&pr->netif_queue, flags);
- if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
- pr->p_stats.queue_stopped++;
- netif_stop_queue(dev);
- pr->queue_stopped = 1;
- }
- spin_unlock_irqrestore(&pr->netif_queue, flags);
+ pr->p_stats.queue_stopped++;
+ netif_tx_stop_queue(txq);
}
- dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
- spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
}
@@ -2468,8 +2253,7 @@ out:
return ret;
}
-static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
- int add_tx_qps)
+static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
{
int ret, i;
struct port_res_cfg pr_cfg, pr_cfg_small_rx;
@@ -2502,7 +2286,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
if (ret)
goto out_clean_pr;
}
- for (i = def_qps; i < def_qps + add_tx_qps; i++) {
+ for (i = def_qps; i < def_qps; i++) {
ret = ehea_init_port_res(port, &port->port_res[i],
&pr_cfg_small_rx, i);
if (ret)
@@ -2525,7 +2309,7 @@ static int ehea_clean_all_portres(struct ehea_port *port)
int ret = 0;
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
ret |= ehea_clean_portres(port, &port->port_res[i]);
ret |= ehea_destroy_eq(port->qp_eq);
@@ -2557,8 +2341,7 @@ static int ehea_up(struct net_device *dev)
if (port->state == EHEA_PORT_UP)
return 0;
- ret = ehea_port_res_setup(port, port->num_def_qps,
- port->num_add_tx_qps);
+ ret = ehea_port_res_setup(port, port->num_def_qps);
if (ret) {
netdev_err(dev, "port_res_failed\n");
goto out;
@@ -2577,7 +2360,7 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
netdev_err(dev, "activate_qp failed\n");
@@ -2623,7 +2406,7 @@ static void port_napi_disable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
napi_disable(&port->port_res[i].napi);
}
@@ -2631,7 +2414,7 @@ static void port_napi_enable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
napi_enable(&port->port_res[i].napi);
}
@@ -2647,10 +2430,11 @@ static int ehea_open(struct net_device *dev)
ret = ehea_up(dev);
if (!ret) {
port_napi_enable(port);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
}
mutex_unlock(&port->port_lock);
+ schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
return ret;
}
@@ -2690,8 +2474,9 @@ static int ehea_stop(struct net_device *dev)
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
cancel_work_sync(&port->reset_task);
+ cancel_delayed_work_sync(&port->stats_work);
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
port_napi_disable(port);
ret = ehea_down(dev);
mutex_unlock(&port->port_lock);
@@ -2717,7 +2502,7 @@ static void ehea_flush_sq(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
int ret;
@@ -2751,7 +2536,7 @@ int ehea_stop_qps(struct net_device *dev)
goto out;
}
- for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
struct ehea_qp *qp = pr->qp;
@@ -2853,7 +2638,7 @@ int ehea_restart_qps(struct net_device *dev)
goto out;
}
- for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
struct ehea_qp *qp = pr->qp;
@@ -2915,7 +2700,7 @@ static void ehea_reset_port(struct work_struct *work)
mutex_lock(&dlpar_mem_lock);
port->resets++;
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
port_napi_disable(port);
@@ -2931,7 +2716,7 @@ static void ehea_reset_port(struct work_struct *work)
port_napi_enable(port);
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
out:
mutex_unlock(&port->port_lock);
mutex_unlock(&dlpar_mem_lock);
@@ -2958,7 +2743,7 @@ static void ehea_rereg_mrs(void)
if (dev->flags & IFF_UP) {
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
ehea_flush_sq(port);
ret = ehea_stop_qps(dev);
if (ret) {
@@ -3003,7 +2788,7 @@ static void ehea_rereg_mrs(void)
if (!ret) {
check_sqs(port);
port_napi_enable(port);
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
} else {
netdev_err(dev, "Unable to restart QPS\n");
}
@@ -3158,10 +2943,10 @@ static const struct net_device_ops ehea_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ehea_netpoll,
#endif
- .ndo_get_stats = ehea_get_stats,
+ .ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = ehea_set_multicast_list,
+ .ndo_set_rx_mode = ehea_set_multicast_list,
.ndo_change_mtu = ehea_change_mtu,
.ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
@@ -3179,7 +2964,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
int jumbo;
/* allocate memory for the port structures */
- dev = alloc_etherdev(sizeof(struct ehea_port));
+ dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
if (!dev) {
pr_err("no mem for net_device\n");
@@ -3211,6 +2996,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
if (ret)
goto out_free_mc_list;
+ netif_set_real_num_rx_queues(dev, port->num_def_qps);
+ netif_set_real_num_tx_queues(dev, port->num_def_qps);
+
port_dev = ehea_register_port(port, dn);
if (!port_dev)
goto out_free_mc_list;
@@ -3223,30 +3011,29 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
- | NETIF_F_LLTX | NETIF_F_RXCSUM;
+ | NETIF_F_RXCSUM;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
- if (use_lro)
- dev->features |= NETIF_F_LRO;
-
INIT_WORK(&port->reset_task, ehea_reset_port);
+ INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
init_waitqueue_head(&port->swqe_avail_wq);
init_waitqueue_head(&port->restart_wq);
+ memset(&port->stats, 0, sizeof(struct net_device_stats));
ret = register_netdev(dev);
if (ret) {
pr_err("register_netdev failed. ret=%d\n", ret);
goto out_unreg_port;
}
- port->lro_max_aggr = lro_max_aggr;
-
ret = ehea_get_jumboframe_status(port, &jumbo);
if (ret)
netdev_err(dev, "failed determining jumbo frame status\n");
@@ -3278,6 +3065,7 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
struct ehea_adapter *adapter = port->adapter;
cancel_work_sync(&port->reset_task);
+ cancel_delayed_work_sync(&port->stats_work);
unregister_netdev(port->netdev);
ehea_unregister_port(port);
kfree(port->mc_list);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
index 0506967b904..0506967b904 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 2f8174c248b..2f8174c248b 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 95b9f4fa811..95b9f4fa811 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
index fddff8ec8cf..337a47ecf4a 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
@@ -107,7 +107,7 @@ struct ehea_swqe {
u8 immediate_data_length;
u8 tcp_offset;
u8 reserved2;
- u16 tcp_end;
+ u16 reserved2b;
u8 wrap_tag;
u8 descriptors; /* number of valid descriptors in WQE */
u16 reserved3;
diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig
new file mode 100644
index 00000000000..3f44a30e061
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/Kconfig
@@ -0,0 +1,76 @@
+config IBM_EMAC
+ tristate "IBM EMAC Ethernet support"
+ depends on PPC_DCR
+ select CRC32
+ help
+ This driver supports the IBM EMAC family of Ethernet controllers
+ typically found on 4xx embedded PowerPC chips, but also on the
+ Axon southbridge for Cell.
+
+config IBM_EMAC_RXB
+ int "Number of receive buffers"
+ depends on IBM_EMAC
+ default "128"
+
+config IBM_EMAC_TXB
+ int "Number of transmit buffers"
+ depends on IBM_EMAC
+ default "64"
+
+config IBM_EMAC_POLL_WEIGHT
+ int "MAL NAPI polling weight"
+ depends on IBM_EMAC
+ default "32"
+
+config IBM_EMAC_RX_COPY_THRESHOLD
+ int "RX skb copy threshold (bytes)"
+ depends on IBM_EMAC
+ default "256"
+
+config IBM_EMAC_RX_SKB_HEADROOM
+ int "Additional RX skb headroom (bytes)"
+ depends on IBM_EMAC
+ default "0"
+ help
+ Additional receive skb headroom. Note, that driver
+ will always reserve at least 2 bytes to make IP header
+ aligned, so usually there is no need to add any additional
+ headroom.
+
+ If unsure, set to 0.
+
+config IBM_EMAC_DEBUG
+ bool "Debugging"
+ depends on IBM_EMAC
+ default n
+
+# The options below has to be select'ed by the respective
+# processor types or platforms
+
+config IBM_EMAC_ZMII
+ bool
+ default n
+
+config IBM_EMAC_RGMII
+ bool
+ default n
+
+config IBM_EMAC_TAH
+ bool
+ default n
+
+config IBM_EMAC_EMAC4
+ bool
+ default n
+
+config IBM_EMAC_NO_FLOW_CTRL
+ bool
+ default n
+
+config IBM_EMAC_MAL_CLR_ICINTSTAT
+ bool
+ default n
+
+config IBM_EMAC_MAL_COMMON_ERR
+ bool
+ default n
diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile
new file mode 100644
index 00000000000..eba21835d90
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the PowerPC 4xx on-chip ethernet driver
+#
+
+obj-$(CONFIG_IBM_EMAC) += ibm_emac.o
+
+ibm_emac-y := mal.o core.o phy.o
+ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o
+ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o
+ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o
+ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
new file mode 100644
index 00000000000..ed79b2d3ad3
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -0,0 +1,3074 @@
+/*
+ * drivers/net/ibm_newemac/core.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Matt Porter <mporter@kernel.crashing.org>
+ * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * Armin Kuster <akuster@mvista.com>
+ * Johnnie Peters <jpeters@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/slab.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+#include "core.h"
+
+/*
+ * Lack of dma_unmap_???? calls is intentional.
+ *
+ * API-correct usage requires additional support state information to be
+ * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
+ * EMAC design (e.g. TX buffer passed from network stack can be split into
+ * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
+ * maintaining such information will add additional overhead.
+ * Current DMA API implementation for 4xx processors only ensures cache coherency
+ * and dma_unmap_???? routines are empty and are likely to stay this way.
+ * I decided to omit dma_unmap_??? calls because I don't want to add additional
+ * complexity just for the sake of following some abstract API, when it doesn't
+ * add any real benefit to the driver. I understand that this decision maybe
+ * controversial, but I really tried to make code API-correct and efficient
+ * at the same time and didn't come up with code I liked :(. --ebs
+ */
+
+#define DRV_NAME "emac"
+#define DRV_VERSION "3.54"
+#define DRV_DESC "PPC 4xx OCP EMAC driver"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR
+ ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
+MODULE_LICENSE("GPL");
+
+/*
+ * PPC64 doesn't (yet) have a cacheable_memcpy
+ */
+#ifdef CONFIG_PPC64
+#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
+
+/* If packet size is less than this number, we allocate small skb and copy packet
+ * contents into it instead of just sending original big skb up
+ */
+#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
+
+/* Since multiple EMACs share MDIO lines in various ways, we need
+ * to avoid re-using the same PHY ID in cases where the arch didn't
+ * setup precise phy_map entries
+ *
+ * XXX This is something that needs to be reworked as we can have multiple
+ * EMAC "sets" (multiple ASICs containing several EMACs) though we can
+ * probably require in that case to have explicit PHY IDs in the device-tree
+ */
+static u32 busy_phy_map;
+static DEFINE_MUTEX(emac_phy_map_lock);
+
+/* This is the wait queue used to wait on any event related to probe, that
+ * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
+ */
+static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
+
+/* Having stable interface names is a doomed idea. However, it would be nice
+ * if we didn't have completely random interface names at boot too :-) It's
+ * just a matter of making everybody's life easier. Since we are doing
+ * threaded probing, it's a bit harder though. The base idea here is that
+ * we make up a list of all emacs in the device-tree before we register the
+ * driver. Every emac will then wait for the previous one in the list to
+ * initialize before itself. We should also keep that list ordered by
+ * cell_index.
+ * That list is only 4 entries long, meaning that additional EMACs don't
+ * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
+ */
+
+#define EMAC_BOOT_LIST_SIZE 4
+static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
+
+/* How long should I wait for dependent devices ? */
+#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
+
+/* I don't want to litter system log with timeout errors
+ * when we have brain-damaged PHY.
+ */
+static inline void emac_report_timeout_error(struct emac_instance *dev,
+ const char *error)
+{
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
+ EMAC_FTR_460EX_PHY_CLK_FIX |
+ EMAC_FTR_440EP_PHY_CLK_FIX))
+ DBG(dev, "%s" NL, error);
+ else if (net_ratelimit())
+ printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
+ error);
+}
+
+/* EMAC PHY clock workaround:
+ * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
+ * which allows controlling each EMAC clock
+ */
+static inline void emac_rx_clk_tx(struct emac_instance *dev)
+{
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR,
+ 0, SDR0_MFR_ECS >> dev->cell_index);
+#endif
+}
+
+static inline void emac_rx_clk_default(struct emac_instance *dev)
+{
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR,
+ SDR0_MFR_ECS >> dev->cell_index, 0);
+#endif
+}
+
+/* PHY polling intervals */
+#define PHY_POLL_LINK_ON HZ
+#define PHY_POLL_LINK_OFF (HZ / 5)
+
+/* Graceful stop timeouts in us.
+ * We should allow up to 1 frame time (full-duplex, ignoring collisions)
+ */
+#define STOP_TIMEOUT_10 1230
+#define STOP_TIMEOUT_100 124
+#define STOP_TIMEOUT_1000 13
+#define STOP_TIMEOUT_1000_JUMBO 73
+
+static unsigned char default_mcast_addr[] = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
+};
+
+/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
+static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
+ "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
+ "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
+ "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
+ "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
+ "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
+ "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
+ "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
+ "rx_bad_packet", "rx_runt_packet", "rx_short_event",
+ "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
+ "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
+ "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
+ "tx_bd_excessive_collisions", "tx_bd_late_collision",
+ "tx_bd_multple_collisions", "tx_bd_single_collision",
+ "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
+ "tx_errors"
+};
+
+static irqreturn_t emac_irq(int irq, void *dev_instance);
+static void emac_clean_tx_ring(struct emac_instance *dev);
+static void __emac_set_multicast_list(struct emac_instance *dev);
+
+static inline int emac_phy_supports_gige(int phy_mode)
+{
+ return phy_mode == PHY_MODE_GMII ||
+ phy_mode == PHY_MODE_RGMII ||
+ phy_mode == PHY_MODE_SGMII ||
+ phy_mode == PHY_MODE_TBI ||
+ phy_mode == PHY_MODE_RTBI;
+}
+
+static inline int emac_phy_gpcs(int phy_mode)
+{
+ return phy_mode == PHY_MODE_SGMII ||
+ phy_mode == PHY_MODE_TBI ||
+ phy_mode == PHY_MODE_RTBI;
+}
+
+static inline void emac_tx_enable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "tx_enable" NL);
+
+ r = in_be32(&p->mr0);
+ if (!(r & EMAC_MR0_TXE))
+ out_be32(&p->mr0, r | EMAC_MR0_TXE);
+}
+
+static void emac_tx_disable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "tx_disable" NL);
+
+ r = in_be32(&p->mr0);
+ if (r & EMAC_MR0_TXE) {
+ int n = dev->stop_timeout;
+ out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
+ while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
+ udelay(1);
+ --n;
+ }
+ if (unlikely(!n))
+ emac_report_timeout_error(dev, "TX disable timeout");
+ }
+}
+
+static void emac_rx_enable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
+ goto out;
+
+ DBG(dev, "rx_enable" NL);
+
+ r = in_be32(&p->mr0);
+ if (!(r & EMAC_MR0_RXE)) {
+ if (unlikely(!(r & EMAC_MR0_RXI))) {
+ /* Wait if previous async disable is still in progress */
+ int n = dev->stop_timeout;
+ while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
+ udelay(1);
+ --n;
+ }
+ if (unlikely(!n))
+ emac_report_timeout_error(dev,
+ "RX disable timeout");
+ }
+ out_be32(&p->mr0, r | EMAC_MR0_RXE);
+ }
+ out:
+ ;
+}
+
+static void emac_rx_disable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "rx_disable" NL);
+
+ r = in_be32(&p->mr0);
+ if (r & EMAC_MR0_RXE) {
+ int n = dev->stop_timeout;
+ out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
+ while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
+ udelay(1);
+ --n;
+ }
+ if (unlikely(!n))
+ emac_report_timeout_error(dev, "RX disable timeout");
+ }
+}
+
+static inline void emac_netif_stop(struct emac_instance *dev)
+{
+ netif_tx_lock_bh(dev->ndev);
+ netif_addr_lock(dev->ndev);
+ dev->no_mcast = 1;
+ netif_addr_unlock(dev->ndev);
+ netif_tx_unlock_bh(dev->ndev);
+ dev->ndev->trans_start = jiffies; /* prevent tx timeout */
+ mal_poll_disable(dev->mal, &dev->commac);
+ netif_tx_disable(dev->ndev);
+}
+
+static inline void emac_netif_start(struct emac_instance *dev)
+{
+ netif_tx_lock_bh(dev->ndev);
+ netif_addr_lock(dev->ndev);
+ dev->no_mcast = 0;
+ if (dev->mcast_pending && netif_running(dev->ndev))
+ __emac_set_multicast_list(dev);
+ netif_addr_unlock(dev->ndev);
+ netif_tx_unlock_bh(dev->ndev);
+
+ netif_wake_queue(dev->ndev);
+
+ /* NOTE: unconditional netif_wake_queue is only appropriate
+ * so long as all callers are assured to have free tx slots
+ * (taken from tg3... though the case where that is wrong is
+ * not terribly harmful)
+ */
+ mal_poll_enable(dev->mal, &dev->commac);
+}
+
+static inline void emac_rx_disable_async(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "rx_disable_async" NL);
+
+ r = in_be32(&p->mr0);
+ if (r & EMAC_MR0_RXE)
+ out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
+}
+
+static int emac_reset(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ int n = 20;
+
+ DBG(dev, "reset" NL);
+
+ if (!dev->reset_failed) {
+ /* 40x erratum suggests stopping RX channel before reset,
+ * we stop TX as well
+ */
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ }
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+ /* Enable internal clock source */
+ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, SDR0_ETH_CFG_ECS << dev->cell_index);
+#endif
+
+ out_be32(&p->mr0, EMAC_MR0_SRST);
+ while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
+ --n;
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+ /* Enable external clock source */
+ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+#endif
+
+ if (n) {
+ dev->reset_failed = 0;
+ return 0;
+ } else {
+ emac_report_timeout_error(dev, "reset timeout");
+ dev->reset_failed = 1;
+ return -ETIMEDOUT;
+ }
+}
+
+static void emac_hash_mc(struct emac_instance *dev)
+{
+ const int regs = EMAC_XAHT_REGS(dev);
+ u32 *gaht_base = emac_gaht_base(dev);
+ u32 gaht_temp[regs];
+ struct netdev_hw_addr *ha;
+ int i;
+
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
+
+ memset(gaht_temp, 0, sizeof (gaht_temp));
+
+ netdev_for_each_mc_addr(ha, dev->ndev) {
+ int slot, reg, mask;
+ DBG2(dev, "mc %pM" NL, ha->addr);
+
+ slot = EMAC_XAHT_CRC_TO_SLOT(dev,
+ ether_crc(ETH_ALEN, ha->addr));
+ reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
+ mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
+
+ gaht_temp[reg] |= mask;
+ }
+
+ for (i = 0; i < regs; i++)
+ out_be32(gaht_base + i, gaht_temp[i]);
+}
+
+static inline u32 emac_iff2rmr(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ u32 r;
+
+ r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
+
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r |= EMAC4_RMR_BASE;
+ else
+ r |= EMAC_RMR_BASE;
+
+ if (ndev->flags & IFF_PROMISC)
+ r |= EMAC_RMR_PME;
+ else if (ndev->flags & IFF_ALLMULTI ||
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
+ r |= EMAC_RMR_PMME;
+ else if (!netdev_mc_empty(ndev))
+ r |= EMAC_RMR_MAE;
+
+ return r;
+}
+
+static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
+{
+ u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
+
+ DBG2(dev, "__emac_calc_base_mr1" NL);
+
+ switch(tx_size) {
+ case 2048:
+ ret |= EMAC_MR1_TFS_2K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
+ dev->ndev->name, tx_size);
+ }
+
+ switch(rx_size) {
+ case 16384:
+ ret |= EMAC_MR1_RFS_16K;
+ break;
+ case 4096:
+ ret |= EMAC_MR1_RFS_4K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
+ dev->ndev->name, rx_size);
+ }
+
+ return ret;
+}
+
+static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
+{
+ u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
+ EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
+
+ DBG2(dev, "__emac4_calc_base_mr1" NL);
+
+ switch(tx_size) {
+ case 16384:
+ ret |= EMAC4_MR1_TFS_16K;
+ break;
+ case 4096:
+ ret |= EMAC4_MR1_TFS_4K;
+ break;
+ case 2048:
+ ret |= EMAC4_MR1_TFS_2K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
+ dev->ndev->name, tx_size);
+ }
+
+ switch(rx_size) {
+ case 16384:
+ ret |= EMAC4_MR1_RFS_16K;
+ break;
+ case 4096:
+ ret |= EMAC4_MR1_RFS_4K;
+ break;
+ case 2048:
+ ret |= EMAC4_MR1_RFS_2K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
+ dev->ndev->name, rx_size);
+ }
+
+ return ret;
+}
+
+static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
+{
+ return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
+ __emac4_calc_base_mr1(dev, tx_size, rx_size) :
+ __emac_calc_base_mr1(dev, tx_size, rx_size);
+}
+
+static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
+{
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
+ else
+ return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
+}
+
+static inline u32 emac_calc_rwmr(struct emac_instance *dev,
+ unsigned int low, unsigned int high)
+{
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ return (low << 22) | ( (high & 0x3ff) << 6);
+ else
+ return (low << 23) | ( (high & 0x1ff) << 7);
+}
+
+static int emac_configure(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ struct net_device *ndev = dev->ndev;
+ int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
+ u32 r, mr1 = 0;
+
+ DBG(dev, "configure" NL);
+
+ if (!link) {
+ out_be32(&p->mr1, in_be32(&p->mr1)
+ | EMAC_MR1_FDE | EMAC_MR1_ILE);
+ udelay(100);
+ } else if (emac_reset(dev) < 0)
+ return -ETIMEDOUT;
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ tah_reset(dev->tah_dev);
+
+ DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
+ link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
+
+ /* Default fifo sizes */
+ tx_size = dev->tx_fifo_size;
+ rx_size = dev->rx_fifo_size;
+
+ /* No link, force loopback */
+ if (!link)
+ mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
+
+ /* Check for full duplex */
+ else if (dev->phy.duplex == DUPLEX_FULL)
+ mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
+
+ /* Adjust fifo sizes, mr1 and timeouts based on link speed */
+ dev->stop_timeout = STOP_TIMEOUT_10;
+ switch (dev->phy.speed) {
+ case SPEED_1000:
+ if (emac_phy_gpcs(dev->phy.mode)) {
+ mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
+ (dev->phy.gpcs_address != 0xffffffff) ?
+ dev->phy.gpcs_address : dev->phy.address);
+
+ /* Put some arbitrary OUI, Manuf & Rev IDs so we can
+ * identify this GPCS PHY later.
+ */
+ out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
+ } else
+ mr1 |= EMAC_MR1_MF_1000;
+
+ /* Extended fifo sizes */
+ tx_size = dev->tx_fifo_size_gige;
+ rx_size = dev->rx_fifo_size_gige;
+
+ if (dev->ndev->mtu > ETH_DATA_LEN) {
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ mr1 |= EMAC4_MR1_JPSM;
+ else
+ mr1 |= EMAC_MR1_JPSM;
+ dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
+ } else
+ dev->stop_timeout = STOP_TIMEOUT_1000;
+ break;
+ case SPEED_100:
+ mr1 |= EMAC_MR1_MF_100;
+ dev->stop_timeout = STOP_TIMEOUT_100;
+ break;
+ default: /* make gcc happy */
+ break;
+ }
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
+ dev->phy.speed);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
+
+ /* on 40x erratum forces us to NOT use integrated flow control,
+ * let's hope it works on 44x ;)
+ */
+ if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
+ dev->phy.duplex == DUPLEX_FULL) {
+ if (dev->phy.pause)
+ mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
+ else if (dev->phy.asym_pause)
+ mr1 |= EMAC_MR1_APP;
+ }
+
+ /* Add base settings & fifo sizes & program MR1 */
+ mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
+ out_be32(&p->mr1, mr1);
+
+ /* Set individual MAC address */
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+
+ /* VLAN Tag Protocol ID */
+ out_be32(&p->vtpid, 0x8100);
+
+ /* Receive mode register */
+ r = emac_iff2rmr(ndev);
+ if (r & EMAC_RMR_MAE)
+ emac_hash_mc(dev);
+ out_be32(&p->rmr, r);
+
+ /* FIFOs thresholds */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
+ tx_size / 2 / dev->fifo_entry_size);
+ else
+ r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
+ tx_size / 2 / dev->fifo_entry_size);
+ out_be32(&p->tmr1, r);
+ out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
+
+ /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
+ there should be still enough space in FIFO to allow the our link
+ partner time to process this frame and also time to send PAUSE
+ frame itself.
+
+ Here is the worst case scenario for the RX FIFO "headroom"
+ (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
+
+ 1) One maximum-length frame on TX 1522 bytes
+ 2) One PAUSE frame time 64 bytes
+ 3) PAUSE frame decode time allowance 64 bytes
+ 4) One maximum-length frame on RX 1522 bytes
+ 5) Round-trip propagation delay of the link (100Mb) 15 bytes
+ ----------
+ 3187 bytes
+
+ I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
+ low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
+ */
+ r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
+ rx_size / 4 / dev->fifo_entry_size);
+ out_be32(&p->rwmr, r);
+
+ /* Set PAUSE timer to the maximum */
+ out_be32(&p->ptr, 0xffff);
+
+ /* IRQ sources */
+ r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
+ EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
+ EMAC_ISR_IRE | EMAC_ISR_TE;
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
+ EMAC4_ISR_RXOE | */;
+ out_be32(&p->iser, r);
+
+ /* We need to take GPCS PHY out of isolate mode after EMAC reset */
+ if (emac_phy_gpcs(dev->phy.mode)) {
+ if (dev->phy.gpcs_address != 0xffffffff)
+ emac_mii_reset_gpcs(&dev->phy);
+ else
+ emac_mii_reset_phy(&dev->phy);
+ }
+
+ return 0;
+}
+
+static void emac_reinitialize(struct emac_instance *dev)
+{
+ DBG(dev, "reinitialize" NL);
+
+ emac_netif_stop(dev);
+ if (!emac_configure(dev)) {
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+ }
+ emac_netif_start(dev);
+}
+
+static void emac_full_tx_reset(struct emac_instance *dev)
+{
+ DBG(dev, "full_tx_reset" NL);
+
+ emac_tx_disable(dev);
+ mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
+ emac_clean_tx_ring(dev);
+ dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
+
+ emac_configure(dev);
+
+ mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+}
+
+static void emac_reset_work(struct work_struct *work)
+{
+ struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
+
+ DBG(dev, "reset_work" NL);
+
+ mutex_lock(&dev->link_lock);
+ if (dev->opened) {
+ emac_netif_stop(dev);
+ emac_full_tx_reset(dev);
+ emac_netif_start(dev);
+ }
+ mutex_unlock(&dev->link_lock);
+}
+
+static void emac_tx_timeout(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ DBG(dev, "tx_timeout" NL);
+
+ schedule_work(&dev->reset_work);
+}
+
+
+static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
+{
+ int done = !!(stacr & EMAC_STACR_OC);
+
+ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
+ done = !done;
+
+ return done;
+};
+
+static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r = 0;
+ int n, err = -ETIMEDOUT;
+
+ mutex_lock(&dev->mdio_lock);
+
+ DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
+
+ /* Enable proper MDIO port */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
+
+ /* Wait for management interface to become idle */
+ n = 20;
+ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait idle\n");
+ goto bail;
+ }
+ }
+
+ /* Issue read command */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r = EMAC4_STACR_BASE(dev->opb_bus_freq);
+ else
+ r = EMAC_STACR_BASE(dev->opb_bus_freq);
+ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
+ r |= EMAC_STACR_OC;
+ if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
+ r |= EMACX_STACR_STAC_READ;
+ else
+ r |= EMAC_STACR_STAC_READ;
+ r |= (reg & EMAC_STACR_PRA_MASK)
+ | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
+ out_be32(&p->stacr, r);
+
+ /* Wait for read to complete */
+ n = 200;
+ while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait complete\n");
+ goto bail;
+ }
+ }
+
+ if (unlikely(r & EMAC_STACR_PHYE)) {
+ DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
+ err = -EREMOTEIO;
+ goto bail;
+ }
+
+ r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
+
+ DBG2(dev, "mdio_read -> %04x" NL, r);
+ err = 0;
+ bail:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
+ mutex_unlock(&dev->mdio_lock);
+
+ return err == 0 ? r : err;
+}
+
+static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
+ u16 val)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r = 0;
+ int n, err = -ETIMEDOUT;
+
+ mutex_lock(&dev->mdio_lock);
+
+ DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
+
+ /* Enable proper MDIO port */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
+
+ /* Wait for management interface to be idle */
+ n = 20;
+ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait idle\n");
+ goto bail;
+ }
+ }
+
+ /* Issue write command */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r = EMAC4_STACR_BASE(dev->opb_bus_freq);
+ else
+ r = EMAC_STACR_BASE(dev->opb_bus_freq);
+ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
+ r |= EMAC_STACR_OC;
+ if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
+ r |= EMACX_STACR_STAC_WRITE;
+ else
+ r |= EMAC_STACR_STAC_WRITE;
+ r |= (reg & EMAC_STACR_PRA_MASK) |
+ ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
+ (val << EMAC_STACR_PHYD_SHIFT);
+ out_be32(&p->stacr, r);
+
+ /* Wait for write to complete */
+ n = 200;
+ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait complete\n");
+ goto bail;
+ }
+ }
+ err = 0;
+ bail:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
+ mutex_unlock(&dev->mdio_lock);
+}
+
+static int emac_mdio_read(struct net_device *ndev, int id, int reg)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int res;
+
+ res = __emac_mdio_read((dev->mdio_instance &&
+ dev->phy.gpcs_address != id) ?
+ dev->mdio_instance : dev,
+ (u8) id, (u8) reg);
+ return res;
+}
+
+static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ __emac_mdio_write((dev->mdio_instance &&
+ dev->phy.gpcs_address != id) ?
+ dev->mdio_instance : dev,
+ (u8) id, (u8) reg, (u16) val);
+}
+
+/* Tx lock BH */
+static void __emac_set_multicast_list(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 rmr = emac_iff2rmr(dev->ndev);
+
+ DBG(dev, "__multicast %08x" NL, rmr);
+
+ /* I decided to relax register access rules here to avoid
+ * full EMAC reset.
+ *
+ * There is a real problem with EMAC4 core if we use MWSW_001 bit
+ * in MR1 register and do a full EMAC reset.
+ * One TX BD status update is delayed and, after EMAC reset, it
+ * never happens, resulting in TX hung (it'll be recovered by TX
+ * timeout handler eventually, but this is just gross).
+ * So we either have to do full TX reset or try to cheat here :)
+ *
+ * The only required change is to RX mode register, so I *think* all
+ * we need is just to stop RX channel. This seems to work on all
+ * tested SoCs. --ebs
+ *
+ * If we need the full reset, we might just trigger the workqueue
+ * and do it async... a bit nasty but should work --BenH
+ */
+ dev->mcast_pending = 0;
+ emac_rx_disable(dev);
+ if (rmr & EMAC_RMR_MAE)
+ emac_hash_mc(dev);
+ out_be32(&p->rmr, rmr);
+ emac_rx_enable(dev);
+}
+
+/* Tx lock BH */
+static void emac_set_multicast_list(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ DBG(dev, "multicast" NL);
+
+ BUG_ON(!netif_running(dev->ndev));
+
+ if (dev->no_mcast) {
+ dev->mcast_pending = 1;
+ return;
+ }
+ __emac_set_multicast_list(dev);
+}
+
+static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
+{
+ int rx_sync_size = emac_rx_sync_size(new_mtu);
+ int rx_skb_size = emac_rx_skb_size(new_mtu);
+ int i, ret = 0;
+
+ mutex_lock(&dev->link_lock);
+ emac_netif_stop(dev);
+ emac_rx_disable(dev);
+ mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
+
+ if (dev->rx_sg_skb) {
+ ++dev->estats.rx_dropped_resize;
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ }
+
+ /* Make a first pass over RX ring and mark BDs ready, dropping
+ * non-processed packets on the way. We need this as a separate pass
+ * to simplify error recovery in the case of allocation failure later.
+ */
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
+ ++dev->estats.rx_dropped_resize;
+
+ dev->rx_desc[i].data_len = 0;
+ dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
+ (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+ }
+
+ /* Reallocate RX ring only if bigger skb buffers are required */
+ if (rx_skb_size <= dev->rx_skb_size)
+ goto skip;
+
+ /* Second pass, allocate new skbs */
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto oom;
+ }
+
+ BUG_ON(!dev->rx_skb[i]);
+ dev_kfree_skb(dev->rx_skb[i]);
+
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ dev->rx_desc[i].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
+ DMA_FROM_DEVICE) + 2;
+ dev->rx_skb[i] = skb;
+ }
+ skip:
+ /* Check if we need to change "Jumbo" bit in MR1 */
+ if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+ /* This is to prevent starting RX channel in emac_rx_enable() */
+ set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+
+ dev->ndev->mtu = new_mtu;
+ emac_full_tx_reset(dev);
+ }
+
+ mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
+ oom:
+ /* Restart RX */
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+ dev->rx_slot = 0;
+ mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
+ emac_rx_enable(dev);
+ emac_netif_start(dev);
+ mutex_unlock(&dev->link_lock);
+
+ return ret;
+}
+
+/* Process ctx, rtnl_lock semaphore */
+static int emac_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int ret = 0;
+
+ if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
+ return -EINVAL;
+
+ DBG(dev, "change_mtu(%d)" NL, new_mtu);
+
+ if (netif_running(ndev)) {
+ /* Check if we really need to reinitialize RX ring */
+ if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
+ ret = emac_resize_rx_ring(dev, new_mtu);
+ }
+
+ if (!ret) {
+ ndev->mtu = new_mtu;
+ dev->rx_skb_size = emac_rx_skb_size(new_mtu);
+ dev->rx_sync_size = emac_rx_sync_size(new_mtu);
+ }
+
+ return ret;
+}
+
+static void emac_clean_tx_ring(struct emac_instance *dev)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_BUFF; ++i) {
+ if (dev->tx_skb[i]) {
+ dev_kfree_skb(dev->tx_skb[i]);
+ dev->tx_skb[i] = NULL;
+ if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
+ ++dev->estats.tx_dropped;
+ }
+ dev->tx_desc[i].ctrl = 0;
+ dev->tx_desc[i].data_ptr = 0;
+ }
+}
+
+static void emac_clean_rx_ring(struct emac_instance *dev)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_BUFF; ++i)
+ if (dev->rx_skb[i]) {
+ dev->rx_desc[i].ctrl = 0;
+ dev_kfree_skb(dev->rx_skb[i]);
+ dev->rx_skb[i] = NULL;
+ dev->rx_desc[i].data_ptr = 0;
+ }
+
+ if (dev->rx_sg_skb) {
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ }
+}
+
+static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
+ gfp_t flags)
+{
+ struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dev->rx_skb[slot] = skb;
+ dev->rx_desc[slot].data_len = 0;
+
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ dev->rx_desc[slot].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
+ DMA_FROM_DEVICE) + 2;
+ wmb();
+ dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
+ (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+
+ return 0;
+}
+
+static void emac_print_link_status(struct emac_instance *dev)
+{
+ if (netif_carrier_ok(dev->ndev))
+ printk(KERN_INFO "%s: link is up, %d %s%s\n",
+ dev->ndev->name, dev->phy.speed,
+ dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
+ dev->phy.pause ? ", pause enabled" :
+ dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
+ else
+ printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
+}
+
+/* Process ctx, rtnl_lock semaphore */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int err, i;
+
+ DBG(dev, "open" NL);
+
+ /* Setup error IRQ handler */
+ err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to request IRQ %d\n",
+ ndev->name, dev->emac_irq);
+ return err;
+ }
+
+ /* Allocate RX ring */
+ for (i = 0; i < NUM_RX_BUFF; ++i)
+ if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
+ printk(KERN_ERR "%s: failed to allocate RX ring\n",
+ ndev->name);
+ goto oom;
+ }
+
+ dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+ dev->rx_sg_skb = NULL;
+
+ mutex_lock(&dev->link_lock);
+ dev->opened = 1;
+
+ /* Start PHY polling now.
+ */
+ if (dev->phy.address >= 0) {
+ int link_poll_interval;
+ if (dev->phy.def->ops->poll_link(&dev->phy)) {
+ dev->phy.def->ops->read_link(&dev->phy);
+ emac_rx_clk_default(dev);
+ netif_carrier_on(dev->ndev);
+ link_poll_interval = PHY_POLL_LINK_ON;
+ } else {
+ emac_rx_clk_tx(dev);
+ netif_carrier_off(dev->ndev);
+ link_poll_interval = PHY_POLL_LINK_OFF;
+ }
+ dev->link_polling = 1;
+ wmb();
+ schedule_delayed_work(&dev->link_work, link_poll_interval);
+ emac_print_link_status(dev);
+ } else
+ netif_carrier_on(dev->ndev);
+
+ /* Required for Pause packet support in EMAC */
+ dev_mc_add_global(ndev, default_mcast_addr);
+
+ emac_configure(dev);
+ mal_poll_add(dev->mal, &dev->commac);
+ mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
+ mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
+ mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+ emac_netif_start(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
+ oom:
+ emac_clean_rx_ring(dev);
+ free_irq(dev->emac_irq, dev);
+
+ return -ENOMEM;
+}
+
+/* BHs disabled */
+#if 0
+static int emac_link_differs(struct emac_instance *dev)
+{
+ u32 r = in_be32(&dev->emacp->mr1);
+
+ int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
+ int speed, pause, asym_pause;
+
+ if (r & EMAC_MR1_MF_1000)
+ speed = SPEED_1000;
+ else if (r & EMAC_MR1_MF_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
+ case (EMAC_MR1_EIFC | EMAC_MR1_APP):
+ pause = 1;
+ asym_pause = 0;
+ break;
+ case EMAC_MR1_APP:
+ pause = 0;
+ asym_pause = 1;
+ break;
+ default:
+ pause = asym_pause = 0;
+ }
+ return speed != dev->phy.speed || duplex != dev->phy.duplex ||
+ pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
+}
+#endif
+
+static void emac_link_timer(struct work_struct *work)
+{
+ struct emac_instance *dev =
+ container_of(to_delayed_work(work),
+ struct emac_instance, link_work);
+ int link_poll_interval;
+
+ mutex_lock(&dev->link_lock);
+ DBG2(dev, "link timer" NL);
+
+ if (!dev->opened)
+ goto bail;
+
+ if (dev->phy.def->ops->poll_link(&dev->phy)) {
+ if (!netif_carrier_ok(dev->ndev)) {
+ emac_rx_clk_default(dev);
+ /* Get new link parameters */
+ dev->phy.def->ops->read_link(&dev->phy);
+
+ netif_carrier_on(dev->ndev);
+ emac_netif_stop(dev);
+ emac_full_tx_reset(dev);
+ emac_netif_start(dev);
+ emac_print_link_status(dev);
+ }
+ link_poll_interval = PHY_POLL_LINK_ON;
+ } else {
+ if (netif_carrier_ok(dev->ndev)) {
+ emac_rx_clk_tx(dev);
+ netif_carrier_off(dev->ndev);
+ netif_tx_disable(dev->ndev);
+ emac_reinitialize(dev);
+ emac_print_link_status(dev);
+ }
+ link_poll_interval = PHY_POLL_LINK_OFF;
+ }
+ schedule_delayed_work(&dev->link_work, link_poll_interval);
+ bail:
+ mutex_unlock(&dev->link_lock);
+}
+
+static void emac_force_link_update(struct emac_instance *dev)
+{
+ netif_carrier_off(dev->ndev);
+ smp_rmb();
+ if (dev->link_polling) {
+ cancel_delayed_work_sync(&dev->link_work);
+ if (dev->link_polling)
+ schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
+ }
+}
+
+/* Process ctx, rtnl_lock semaphore */
+static int emac_close(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ DBG(dev, "close" NL);
+
+ if (dev->phy.address >= 0) {
+ dev->link_polling = 0;
+ cancel_delayed_work_sync(&dev->link_work);
+ }
+ mutex_lock(&dev->link_lock);
+ emac_netif_stop(dev);
+ dev->opened = 0;
+ mutex_unlock(&dev->link_lock);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
+ mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
+ mal_poll_del(dev->mal, &dev->commac);
+
+ emac_clean_tx_ring(dev);
+ emac_clean_rx_ring(dev);
+
+ free_irq(dev->emac_irq, dev);
+
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static inline u16 emac_tx_csum(struct emac_instance *dev,
+ struct sk_buff *skb)
+{
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ ++dev->stats.tx_packets_csum;
+ return EMAC_TX_CTRL_TAH_CSUM;
+ }
+ return 0;
+}
+
+static inline int emac_xmit_finish(struct emac_instance *dev, int len)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ struct net_device *ndev = dev->ndev;
+
+ /* Send the packet out. If the if makes a significant perf
+ * difference, then we can store the TMR0 value in "dev"
+ * instead
+ */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
+ else
+ out_be32(&p->tmr0, EMAC_TMR0_XMIT);
+
+ if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
+ netif_stop_queue(ndev);
+ DBG2(dev, "stopped TX queue" NL);
+ }
+
+ ndev->trans_start = jiffies;
+ ++dev->stats.tx_packets;
+ dev->stats.tx_bytes += len;
+
+ return NETDEV_TX_OK;
+}
+
+/* Tx lock BH */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ unsigned int len = skb->len;
+ int slot;
+
+ u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
+ MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
+
+ slot = dev->tx_slot++;
+ if (dev->tx_slot == NUM_TX_BUFF) {
+ dev->tx_slot = 0;
+ ctrl |= MAL_TX_CTRL_WRAP;
+ }
+
+ DBG2(dev, "xmit(%u) %d" NL, len, slot);
+
+ dev->tx_skb[slot] = skb;
+ dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
+ skb->data, len,
+ DMA_TO_DEVICE);
+ dev->tx_desc[slot].data_len = (u16) len;
+ wmb();
+ dev->tx_desc[slot].ctrl = ctrl;
+
+ return emac_xmit_finish(dev, len);
+}
+
+static inline int emac_xmit_split(struct emac_instance *dev, int slot,
+ u32 pd, int len, int last, u16 base_ctrl)
+{
+ while (1) {
+ u16 ctrl = base_ctrl;
+ int chunk = min(len, MAL_MAX_TX_SIZE);
+ len -= chunk;
+
+ slot = (slot + 1) % NUM_TX_BUFF;
+
+ if (last && !len)
+ ctrl |= MAL_TX_CTRL_LAST;
+ if (slot == NUM_TX_BUFF - 1)
+ ctrl |= MAL_TX_CTRL_WRAP;
+
+ dev->tx_skb[slot] = NULL;
+ dev->tx_desc[slot].data_ptr = pd;
+ dev->tx_desc[slot].data_len = (u16) chunk;
+ dev->tx_desc[slot].ctrl = ctrl;
+ ++dev->tx_cnt;
+
+ if (!len)
+ break;
+
+ pd += chunk;
+ }
+ return slot;
+}
+
+/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
+static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int len = skb->len, chunk;
+ int slot, i;
+ u16 ctrl;
+ u32 pd;
+
+ /* This is common "fast" path */
+ if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
+ return emac_start_xmit(skb, ndev);
+
+ len -= skb->data_len;
+
+ /* Note, this is only an *estimation*, we can still run out of empty
+ * slots because of the additional fragmentation into
+ * MAL_MAX_TX_SIZE-sized chunks
+ */
+ if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
+ goto stop_queue;
+
+ ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
+ emac_tx_csum(dev, skb);
+ slot = dev->tx_slot;
+
+ /* skb data */
+ dev->tx_skb[slot] = NULL;
+ chunk = min(len, MAL_MAX_TX_SIZE);
+ dev->tx_desc[slot].data_ptr = pd =
+ dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
+ dev->tx_desc[slot].data_len = (u16) chunk;
+ len -= chunk;
+ if (unlikely(len))
+ slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
+ ctrl);
+ /* skb fragments */
+ for (i = 0; i < nr_frags; ++i) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
+
+ if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
+ goto undo_frame;
+
+ pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
+ ctrl);
+ }
+
+ DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
+
+ /* Attach skb to the last slot so we don't release it too early */
+ dev->tx_skb[slot] = skb;
+
+ /* Send the packet out */
+ if (dev->tx_slot == NUM_TX_BUFF - 1)
+ ctrl |= MAL_TX_CTRL_WRAP;
+ wmb();
+ dev->tx_desc[dev->tx_slot].ctrl = ctrl;
+ dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
+
+ return emac_xmit_finish(dev, skb->len);
+
+ undo_frame:
+ /* Well, too bad. Our previous estimation was overly optimistic.
+ * Undo everything.
+ */
+ while (slot != dev->tx_slot) {
+ dev->tx_desc[slot].ctrl = 0;
+ --dev->tx_cnt;
+ if (--slot < 0)
+ slot = NUM_TX_BUFF - 1;
+ }
+ ++dev->estats.tx_undo;
+
+ stop_queue:
+ netif_stop_queue(ndev);
+ DBG2(dev, "stopped TX queue" NL);
+ return NETDEV_TX_BUSY;
+}
+
+/* Tx lock BHs */
+static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
+{
+ struct emac_error_stats *st = &dev->estats;
+
+ DBG(dev, "BD TX error %04x" NL, ctrl);
+
+ ++st->tx_bd_errors;
+ if (ctrl & EMAC_TX_ST_BFCS)
+ ++st->tx_bd_bad_fcs;
+ if (ctrl & EMAC_TX_ST_LCS)
+ ++st->tx_bd_carrier_loss;
+ if (ctrl & EMAC_TX_ST_ED)
+ ++st->tx_bd_excessive_deferral;
+ if (ctrl & EMAC_TX_ST_EC)
+ ++st->tx_bd_excessive_collisions;
+ if (ctrl & EMAC_TX_ST_LC)
+ ++st->tx_bd_late_collision;
+ if (ctrl & EMAC_TX_ST_MC)
+ ++st->tx_bd_multple_collisions;
+ if (ctrl & EMAC_TX_ST_SC)
+ ++st->tx_bd_single_collision;
+ if (ctrl & EMAC_TX_ST_UR)
+ ++st->tx_bd_underrun;
+ if (ctrl & EMAC_TX_ST_SQE)
+ ++st->tx_bd_sqe;
+}
+
+static void emac_poll_tx(void *param)
+{
+ struct emac_instance *dev = param;
+ u32 bad_mask;
+
+ DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ bad_mask = EMAC_IS_BAD_TX_TAH;
+ else
+ bad_mask = EMAC_IS_BAD_TX;
+
+ netif_tx_lock_bh(dev->ndev);
+ if (dev->tx_cnt) {
+ u16 ctrl;
+ int slot = dev->ack_slot, n = 0;
+ again:
+ ctrl = dev->tx_desc[slot].ctrl;
+ if (!(ctrl & MAL_TX_CTRL_READY)) {
+ struct sk_buff *skb = dev->tx_skb[slot];
+ ++n;
+
+ if (skb) {
+ dev_kfree_skb(skb);
+ dev->tx_skb[slot] = NULL;
+ }
+ slot = (slot + 1) % NUM_TX_BUFF;
+
+ if (unlikely(ctrl & bad_mask))
+ emac_parse_tx_error(dev, ctrl);
+
+ if (--dev->tx_cnt)
+ goto again;
+ }
+ if (n) {
+ dev->ack_slot = slot;
+ if (netif_queue_stopped(dev->ndev) &&
+ dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
+ netif_wake_queue(dev->ndev);
+
+ DBG2(dev, "tx %d pkts" NL, n);
+ }
+ }
+ netif_tx_unlock_bh(dev->ndev);
+}
+
+static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
+ int len)
+{
+ struct sk_buff *skb = dev->rx_skb[slot];
+
+ DBG2(dev, "recycle %d %d" NL, slot, len);
+
+ if (len)
+ dma_map_single(&dev->ofdev->dev, skb->data - 2,
+ EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+
+ dev->rx_desc[slot].data_len = 0;
+ wmb();
+ dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
+ (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+}
+
+static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
+{
+ struct emac_error_stats *st = &dev->estats;
+
+ DBG(dev, "BD RX error %04x" NL, ctrl);
+
+ ++st->rx_bd_errors;
+ if (ctrl & EMAC_RX_ST_OE)
+ ++st->rx_bd_overrun;
+ if (ctrl & EMAC_RX_ST_BP)
+ ++st->rx_bd_bad_packet;
+ if (ctrl & EMAC_RX_ST_RP)
+ ++st->rx_bd_runt_packet;
+ if (ctrl & EMAC_RX_ST_SE)
+ ++st->rx_bd_short_event;
+ if (ctrl & EMAC_RX_ST_AE)
+ ++st->rx_bd_alignment_error;
+ if (ctrl & EMAC_RX_ST_BFCS)
+ ++st->rx_bd_bad_fcs;
+ if (ctrl & EMAC_RX_ST_PTL)
+ ++st->rx_bd_packet_too_long;
+ if (ctrl & EMAC_RX_ST_ORE)
+ ++st->rx_bd_out_of_range;
+ if (ctrl & EMAC_RX_ST_IRE)
+ ++st->rx_bd_in_range;
+}
+
+static inline void emac_rx_csum(struct emac_instance *dev,
+ struct sk_buff *skb, u16 ctrl)
+{
+#ifdef CONFIG_IBM_EMAC_TAH
+ if (!ctrl && dev->tah_dev) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ++dev->stats.rx_packets_csum;
+ }
+#endif
+}
+
+static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
+{
+ if (likely(dev->rx_sg_skb != NULL)) {
+ int len = dev->rx_desc[slot].data_len;
+ int tot_len = dev->rx_sg_skb->len + len;
+
+ if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
+ ++dev->estats.rx_dropped_mtu;
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ } else {
+ cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
+ dev->rx_skb[slot]->data, len);
+ skb_put(dev->rx_sg_skb, len);
+ emac_recycle_rx_skb(dev, slot, len);
+ return 0;
+ }
+ }
+ emac_recycle_rx_skb(dev, slot, 0);
+ return -1;
+}
+
+/* NAPI poll context */
+static int emac_poll_rx(void *param, int budget)
+{
+ struct emac_instance *dev = param;
+ int slot = dev->rx_slot, received = 0;
+
+ DBG2(dev, "poll_rx(%d)" NL, budget);
+
+ again:
+ while (budget > 0) {
+ int len;
+ struct sk_buff *skb;
+ u16 ctrl = dev->rx_desc[slot].ctrl;
+
+ if (ctrl & MAL_RX_CTRL_EMPTY)
+ break;
+
+ skb = dev->rx_skb[slot];
+ mb();
+ len = dev->rx_desc[slot].data_len;
+
+ if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
+ goto sg;
+
+ ctrl &= EMAC_BAD_RX_MASK;
+ if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
+ emac_parse_rx_error(dev, ctrl);
+ ++dev->estats.rx_dropped_error;
+ emac_recycle_rx_skb(dev, slot, 0);
+ len = 0;
+ goto next;
+ }
+
+ if (len < ETH_HLEN) {
+ ++dev->estats.rx_dropped_stack;
+ emac_recycle_rx_skb(dev, slot, len);
+ goto next;
+ }
+
+ if (len && len < EMAC_RX_COPY_THRESH) {
+ struct sk_buff *copy_skb =
+ alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+ if (unlikely(!copy_skb))
+ goto oom;
+
+ skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
+ cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
+ len + 2);
+ emac_recycle_rx_skb(dev, slot, len);
+ skb = copy_skb;
+ } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
+ goto oom;
+
+ skb_put(skb, len);
+ push_packet:
+ skb->protocol = eth_type_trans(skb, dev->ndev);
+ emac_rx_csum(dev, skb, ctrl);
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ ++dev->estats.rx_dropped_stack;
+ next:
+ ++dev->stats.rx_packets;
+ skip:
+ dev->stats.rx_bytes += len;
+ slot = (slot + 1) % NUM_RX_BUFF;
+ --budget;
+ ++received;
+ continue;
+ sg:
+ if (ctrl & MAL_RX_CTRL_FIRST) {
+ BUG_ON(dev->rx_sg_skb);
+ if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
+ DBG(dev, "rx OOM %d" NL, slot);
+ ++dev->estats.rx_dropped_oom;
+ emac_recycle_rx_skb(dev, slot, 0);
+ } else {
+ dev->rx_sg_skb = skb;
+ skb_put(skb, len);
+ }
+ } else if (!emac_rx_sg_append(dev, slot) &&
+ (ctrl & MAL_RX_CTRL_LAST)) {
+
+ skb = dev->rx_sg_skb;
+ dev->rx_sg_skb = NULL;
+
+ ctrl &= EMAC_BAD_RX_MASK;
+ if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
+ emac_parse_rx_error(dev, ctrl);
+ ++dev->estats.rx_dropped_error;
+ dev_kfree_skb(skb);
+ len = 0;
+ } else
+ goto push_packet;
+ }
+ goto skip;
+ oom:
+ DBG(dev, "rx OOM %d" NL, slot);
+ /* Drop the packet and recycle skb */
+ ++dev->estats.rx_dropped_oom;
+ emac_recycle_rx_skb(dev, slot, 0);
+ goto next;
+ }
+
+ if (received) {
+ DBG2(dev, "rx %d BDs" NL, received);
+ dev->rx_slot = slot;
+ }
+
+ if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
+ mb();
+ if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
+ DBG2(dev, "rx restart" NL);
+ received = 0;
+ goto again;
+ }
+
+ if (dev->rx_sg_skb) {
+ DBG2(dev, "dropping partial rx packet" NL);
+ ++dev->estats.rx_dropped_error;
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ }
+
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+ mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
+ emac_rx_enable(dev);
+ dev->rx_slot = 0;
+ }
+ return received;
+}
+
+/* NAPI poll context */
+static int emac_peek_rx(void *param)
+{
+ struct emac_instance *dev = param;
+
+ return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
+}
+
+/* NAPI poll context */
+static int emac_peek_rx_sg(void *param)
+{
+ struct emac_instance *dev = param;
+
+ int slot = dev->rx_slot;
+ while (1) {
+ u16 ctrl = dev->rx_desc[slot].ctrl;
+ if (ctrl & MAL_RX_CTRL_EMPTY)
+ return 0;
+ else if (ctrl & MAL_RX_CTRL_LAST)
+ return 1;
+
+ slot = (slot + 1) % NUM_RX_BUFF;
+
+ /* I'm just being paranoid here :) */
+ if (unlikely(slot == dev->rx_slot))
+ return 0;
+ }
+}
+
+/* Hard IRQ */
+static void emac_rxde(void *param)
+{
+ struct emac_instance *dev = param;
+
+ ++dev->estats.rx_stopped;
+ emac_rx_disable_async(dev);
+}
+
+/* Hard IRQ */
+static irqreturn_t emac_irq(int irq, void *dev_instance)
+{
+ struct emac_instance *dev = dev_instance;
+ struct emac_regs __iomem *p = dev->emacp;
+ struct emac_error_stats *st = &dev->estats;
+ u32 isr;
+
+ spin_lock(&dev->lock);
+
+ isr = in_be32(&p->isr);
+ out_be32(&p->isr, isr);
+
+ DBG(dev, "isr = %08x" NL, isr);
+
+ if (isr & EMAC4_ISR_TXPE)
+ ++st->tx_parity;
+ if (isr & EMAC4_ISR_RXPE)
+ ++st->rx_parity;
+ if (isr & EMAC4_ISR_TXUE)
+ ++st->tx_underrun;
+ if (isr & EMAC4_ISR_RXOE)
+ ++st->rx_fifo_overrun;
+ if (isr & EMAC_ISR_OVR)
+ ++st->rx_overrun;
+ if (isr & EMAC_ISR_BP)
+ ++st->rx_bad_packet;
+ if (isr & EMAC_ISR_RP)
+ ++st->rx_runt_packet;
+ if (isr & EMAC_ISR_SE)
+ ++st->rx_short_event;
+ if (isr & EMAC_ISR_ALE)
+ ++st->rx_alignment_error;
+ if (isr & EMAC_ISR_BFCS)
+ ++st->rx_bad_fcs;
+ if (isr & EMAC_ISR_PTLE)
+ ++st->rx_packet_too_long;
+ if (isr & EMAC_ISR_ORE)
+ ++st->rx_out_of_range;
+ if (isr & EMAC_ISR_IRE)
+ ++st->rx_in_range;
+ if (isr & EMAC_ISR_SQE)
+ ++st->tx_sqe;
+ if (isr & EMAC_ISR_TE)
+ ++st->tx_errors;
+
+ spin_unlock(&dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *emac_stats(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct emac_stats *st = &dev->stats;
+ struct emac_error_stats *est = &dev->estats;
+ struct net_device_stats *nst = &dev->nstats;
+ unsigned long flags;
+
+ DBG2(dev, "stats" NL);
+
+ /* Compute "legacy" statistics */
+ spin_lock_irqsave(&dev->lock, flags);
+ nst->rx_packets = (unsigned long)st->rx_packets;
+ nst->rx_bytes = (unsigned long)st->rx_bytes;
+ nst->tx_packets = (unsigned long)st->tx_packets;
+ nst->tx_bytes = (unsigned long)st->tx_bytes;
+ nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
+ est->rx_dropped_error +
+ est->rx_dropped_resize +
+ est->rx_dropped_mtu);
+ nst->tx_dropped = (unsigned long)est->tx_dropped;
+
+ nst->rx_errors = (unsigned long)est->rx_bd_errors;
+ nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
+ est->rx_fifo_overrun +
+ est->rx_overrun);
+ nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
+ est->rx_alignment_error);
+ nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
+ est->rx_bad_fcs);
+ nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
+ est->rx_bd_short_event +
+ est->rx_bd_packet_too_long +
+ est->rx_bd_out_of_range +
+ est->rx_bd_in_range +
+ est->rx_runt_packet +
+ est->rx_short_event +
+ est->rx_packet_too_long +
+ est->rx_out_of_range +
+ est->rx_in_range);
+
+ nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
+ nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
+ est->tx_underrun);
+ nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
+ nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
+ est->tx_bd_excessive_collisions +
+ est->tx_bd_late_collision +
+ est->tx_bd_multple_collisions);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return nst;
+}
+
+static struct mal_commac_ops emac_commac_ops = {
+ .poll_tx = &emac_poll_tx,
+ .poll_rx = &emac_poll_rx,
+ .peek_rx = &emac_peek_rx,
+ .rxde = &emac_rxde,
+};
+
+static struct mal_commac_ops emac_commac_sg_ops = {
+ .poll_tx = &emac_poll_tx,
+ .poll_rx = &emac_poll_rx,
+ .peek_rx = &emac_peek_rx_sg,
+ .rxde = &emac_rxde,
+};
+
+/* Ethtool support */
+static int emac_ethtool_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ cmd->supported = dev->phy.features;
+ cmd->port = PORT_MII;
+ cmd->phy_address = dev->phy.address;
+ cmd->transceiver =
+ dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
+
+ mutex_lock(&dev->link_lock);
+ cmd->advertising = dev->phy.advertising;
+ cmd->autoneg = dev->phy.autoneg;
+ cmd->speed = dev->phy.speed;
+ cmd->duplex = dev->phy.duplex;
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
+}
+
+static int emac_ethtool_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ u32 f = dev->phy.features;
+
+ DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
+ cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
+
+ /* Basic sanity checks */
+ if (dev->phy.address < 0)
+ return -EOPNOTSUPP;
+ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+ return -EINVAL;
+ if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ switch (cmd->speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_HALF &&
+ !(f & SUPPORTED_10baseT_Half))
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ !(f & SUPPORTED_10baseT_Full))
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_HALF &&
+ !(f & SUPPORTED_100baseT_Half))
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ !(f & SUPPORTED_100baseT_Full))
+ return -EINVAL;
+ break;
+ case SPEED_1000:
+ if (cmd->duplex == DUPLEX_HALF &&
+ !(f & SUPPORTED_1000baseT_Half))
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ !(f & SUPPORTED_1000baseT_Full))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->link_lock);
+ dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
+ cmd->duplex);
+ mutex_unlock(&dev->link_lock);
+
+ } else {
+ if (!(f & SUPPORTED_Autoneg))
+ return -EINVAL;
+
+ mutex_lock(&dev->link_lock);
+ dev->phy.def->ops->setup_aneg(&dev->phy,
+ (cmd->advertising & f) |
+ (dev->phy.advertising &
+ (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause)));
+ mutex_unlock(&dev->link_lock);
+ }
+ emac_force_link_update(dev);
+
+ return 0;
+}
+
+static void emac_ethtool_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *rp)
+{
+ rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
+ rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
+}
+
+static void emac_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ mutex_lock(&dev->link_lock);
+ if ((dev->phy.features & SUPPORTED_Autoneg) &&
+ (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
+ pp->autoneg = 1;
+
+ if (dev->phy.duplex == DUPLEX_FULL) {
+ if (dev->phy.pause)
+ pp->rx_pause = pp->tx_pause = 1;
+ else if (dev->phy.asym_pause)
+ pp->tx_pause = 1;
+ }
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_get_regs_len(struct emac_instance *dev)
+{
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ EMAC4_ETHTOOL_REGS_SIZE(dev);
+ else
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ EMAC_ETHTOOL_REGS_SIZE(dev);
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int size;
+
+ size = sizeof(struct emac_ethtool_regs_hdr) +
+ emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ size += zmii_get_regs_len(dev->zmii_dev);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ size += rgmii_get_regs_len(dev->rgmii_dev);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ size += tah_get_regs_len(dev->tah_dev);
+
+ return size;
+}
+
+static void *emac_dump_regs(struct emac_instance *dev, void *buf)
+{
+ struct emac_ethtool_regs_subhdr *hdr = buf;
+
+ hdr->index = dev->cell_index;
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
+ hdr->version = EMAC4_ETHTOOL_REGS_VER;
+ memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
+ return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
+ } else {
+ hdr->version = EMAC_ETHTOOL_REGS_VER;
+ memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
+ return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
+ }
+}
+
+static void emac_ethtool_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct emac_ethtool_regs_hdr *hdr = buf;
+
+ hdr->components = 0;
+ buf = hdr + 1;
+
+ buf = mal_dump_regs(dev->mal, buf);
+ buf = emac_dump_regs(dev, buf);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
+ hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
+ buf = zmii_dump_regs(dev->zmii_dev, buf);
+ }
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
+ hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
+ buf = rgmii_dump_regs(dev->rgmii_dev, buf);
+ }
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
+ hdr->components |= EMAC_ETHTOOL_REGS_TAH;
+ buf = tah_dump_regs(dev->tah_dev, buf);
+ }
+}
+
+static int emac_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int res = 0;
+
+ DBG(dev, "nway_reset" NL);
+
+ if (dev->phy.address < 0)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dev->link_lock);
+ if (!dev->phy.autoneg) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
+ out:
+ mutex_unlock(&dev->link_lock);
+ emac_force_link_update(dev);
+ return res;
+}
+
+static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return EMAC_ETHTOOL_STATS_COUNT;
+ else
+ return -EINVAL;
+}
+
+static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
+ u8 * buf)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
+}
+
+static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *estats,
+ u64 * tmp_stats)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
+ tmp_stats += sizeof(dev->stats) / sizeof(u64);
+ memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
+}
+
+static void emac_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ strcpy(info->driver, "ibm_emac");
+ strcpy(info->version, DRV_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
+ info->regdump_len = emac_ethtool_get_regs_len(ndev);
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_settings = emac_ethtool_get_settings,
+ .set_settings = emac_ethtool_set_settings,
+ .get_drvinfo = emac_ethtool_get_drvinfo,
+
+ .get_regs_len = emac_ethtool_get_regs_len,
+ .get_regs = emac_ethtool_get_regs,
+
+ .nway_reset = emac_ethtool_nway_reset,
+
+ .get_ringparam = emac_ethtool_get_ringparam,
+ .get_pauseparam = emac_ethtool_get_pauseparam,
+
+ .get_strings = emac_ethtool_get_strings,
+ .get_sset_count = emac_ethtool_get_sset_count,
+ .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
+
+ .get_link = ethtool_op_get_link,
+};
+
+static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ DBG(dev, "ioctl %08x" NL, cmd);
+
+ if (dev->phy.address < 0)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = dev->phy.address;
+ /* Fall through */
+ case SIOCGMIIREG:
+ data->val_out = emac_mdio_read(ndev, dev->phy.address,
+ data->reg_num);
+ return 0;
+
+ case SIOCSMIIREG:
+ emac_mdio_write(ndev, dev->phy.address, data->reg_num,
+ data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct emac_depentry {
+ u32 phandle;
+ struct device_node *node;
+ struct platform_device *ofdev;
+ void *drvdata;
+};
+
+#define EMAC_DEP_MAL_IDX 0
+#define EMAC_DEP_ZMII_IDX 1
+#define EMAC_DEP_RGMII_IDX 2
+#define EMAC_DEP_TAH_IDX 3
+#define EMAC_DEP_MDIO_IDX 4
+#define EMAC_DEP_PREV_IDX 5
+#define EMAC_DEP_COUNT 6
+
+static int __devinit emac_check_deps(struct emac_instance *dev,
+ struct emac_depentry *deps)
+{
+ int i, there = 0;
+ struct device_node *np;
+
+ for (i = 0; i < EMAC_DEP_COUNT; i++) {
+ /* no dependency on that item, allright */
+ if (deps[i].phandle == 0) {
+ there++;
+ continue;
+ }
+ /* special case for blist as the dependency might go away */
+ if (i == EMAC_DEP_PREV_IDX) {
+ np = *(dev->blist - 1);
+ if (np == NULL) {
+ deps[i].phandle = 0;
+ there++;
+ continue;
+ }
+ if (deps[i].node == NULL)
+ deps[i].node = of_node_get(np);
+ }
+ if (deps[i].node == NULL)
+ deps[i].node = of_find_node_by_phandle(deps[i].phandle);
+ if (deps[i].node == NULL)
+ continue;
+ if (deps[i].ofdev == NULL)
+ deps[i].ofdev = of_find_device_by_node(deps[i].node);
+ if (deps[i].ofdev == NULL)
+ continue;
+ if (deps[i].drvdata == NULL)
+ deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
+ if (deps[i].drvdata != NULL)
+ there++;
+ }
+ return there == EMAC_DEP_COUNT;
+}
+
+static void emac_put_deps(struct emac_instance *dev)
+{
+ if (dev->mal_dev)
+ of_dev_put(dev->mal_dev);
+ if (dev->zmii_dev)
+ of_dev_put(dev->zmii_dev);
+ if (dev->rgmii_dev)
+ of_dev_put(dev->rgmii_dev);
+ if (dev->mdio_dev)
+ of_dev_put(dev->mdio_dev);
+ if (dev->tah_dev)
+ of_dev_put(dev->tah_dev);
+}
+
+static int __devinit emac_of_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /* We are only intereted in device addition */
+ if (action == BUS_NOTIFY_BOUND_DRIVER)
+ wake_up_all(&emac_probe_wait);
+ return 0;
+}
+
+static struct notifier_block emac_of_bus_notifier __devinitdata = {
+ .notifier_call = emac_of_bus_notify
+};
+
+static int __devinit emac_wait_deps(struct emac_instance *dev)
+{
+ struct emac_depentry deps[EMAC_DEP_COUNT];
+ int i, err;
+
+ memset(&deps, 0, sizeof(deps));
+
+ deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
+ deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
+ deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
+ if (dev->tah_ph)
+ deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
+ if (dev->mdio_ph)
+ deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
+ if (dev->blist && dev->blist > emac_boot_list)
+ deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
+ bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
+ wait_event_timeout(emac_probe_wait,
+ emac_check_deps(dev, deps),
+ EMAC_PROBE_DEP_TIMEOUT);
+ bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
+ err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
+ for (i = 0; i < EMAC_DEP_COUNT; i++) {
+ if (deps[i].node)
+ of_node_put(deps[i].node);
+ if (err && deps[i].ofdev)
+ of_dev_put(deps[i].ofdev);
+ }
+ if (err == 0) {
+ dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
+ dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
+ dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
+ dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
+ dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
+ }
+ if (deps[EMAC_DEP_PREV_IDX].ofdev)
+ of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
+ return err;
+}
+
+static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
+ u32 *val, int fatal)
+{
+ int len;
+ const u32 *prop = of_get_property(np, name, &len);
+ if (prop == NULL || len < sizeof(u32)) {
+ if (fatal)
+ printk(KERN_ERR "%s: missing %s property\n",
+ np->full_name, name);
+ return -ENODEV;
+ }
+ *val = *prop;
+ return 0;
+}
+
+static int __devinit emac_init_phy(struct emac_instance *dev)
+{
+ struct device_node *np = dev->ofdev->dev.of_node;
+ struct net_device *ndev = dev->ndev;
+ u32 phy_map, adv;
+ int i;
+
+ dev->phy.dev = ndev;
+ dev->phy.mode = dev->phy_mode;
+
+ /* PHY-less configuration.
+ * XXX I probably should move these settings to the dev tree
+ */
+ if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
+ emac_reset(dev);
+
+ /* PHY-less configuration.
+ * XXX I probably should move these settings to the dev tree
+ */
+ dev->phy.address = -1;
+ dev->phy.features = SUPPORTED_MII;
+ if (emac_phy_supports_gige(dev->phy_mode))
+ dev->phy.features |= SUPPORTED_1000baseT_Full;
+ else
+ dev->phy.features |= SUPPORTED_100baseT_Full;
+ dev->phy.pause = 1;
+
+ return 0;
+ }
+
+ mutex_lock(&emac_phy_map_lock);
+ phy_map = dev->phy_map | busy_phy_map;
+
+ DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
+
+ dev->phy.mdio_read = emac_mdio_read;
+ dev->phy.mdio_write = emac_mdio_write;
+
+ /* Enable internal clock source */
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
+#endif
+ /* PHY clock workaround */
+ emac_rx_clk_tx(dev);
+
+ /* Enable internal clock source on 440GX*/
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
+#endif
+ /* Configure EMAC with defaults so we can at least use MDIO
+ * This is needed mostly for 440GX
+ */
+ if (emac_phy_gpcs(dev->phy.mode)) {
+ /* XXX
+ * Make GPCS PHY address equal to EMAC index.
+ * We probably should take into account busy_phy_map
+ * and/or phy_map here.
+ *
+ * Note that the busy_phy_map is currently global
+ * while it should probably be per-ASIC...
+ */
+ dev->phy.gpcs_address = dev->gpcs_address;
+ if (dev->phy.gpcs_address == 0xffffffff)
+ dev->phy.address = dev->cell_index;
+ }
+
+ emac_configure(dev);
+
+ if (dev->phy_address != 0xffffffff)
+ phy_map = ~(1 << dev->phy_address);
+
+ for (i = 0; i < 0x20; phy_map >>= 1, ++i)
+ if (!(phy_map & 1)) {
+ int r;
+ busy_phy_map |= 1 << i;
+
+ /* Quick check if there is a PHY at the address */
+ r = emac_mdio_read(dev->ndev, i, MII_BMCR);
+ if (r == 0xffff || r < 0)
+ continue;
+ if (!emac_mii_phy_probe(&dev->phy, i))
+ break;
+ }
+
+ /* Enable external clock source */
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
+#endif
+ mutex_unlock(&emac_phy_map_lock);
+ if (i == 0x20) {
+ printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
+ return -ENXIO;
+ }
+
+ /* Init PHY */
+ if (dev->phy.def->ops->init)
+ dev->phy.def->ops->init(&dev->phy);
+
+ /* Disable any PHY features not supported by the platform */
+ dev->phy.def->features &= ~dev->phy_feat_exc;
+
+ /* Setup initial link parameters */
+ if (dev->phy.features & SUPPORTED_Autoneg) {
+ adv = dev->phy.features;
+ if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
+ adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ /* Restart autonegotiation */
+ dev->phy.def->ops->setup_aneg(&dev->phy, adv);
+ } else {
+ u32 f = dev->phy.def->features;
+ int speed = SPEED_10, fd = DUPLEX_HALF;
+
+ /* Select highest supported speed/duplex */
+ if (f & SUPPORTED_1000baseT_Full) {
+ speed = SPEED_1000;
+ fd = DUPLEX_FULL;
+ } else if (f & SUPPORTED_1000baseT_Half)
+ speed = SPEED_1000;
+ else if (f & SUPPORTED_100baseT_Full) {
+ speed = SPEED_100;
+ fd = DUPLEX_FULL;
+ } else if (f & SUPPORTED_100baseT_Half)
+ speed = SPEED_100;
+ else if (f & SUPPORTED_10baseT_Full)
+ fd = DUPLEX_FULL;
+
+ /* Force link parameters */
+ dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
+ }
+ return 0;
+}
+
+static int __devinit emac_init_config(struct emac_instance *dev)
+{
+ struct device_node *np = dev->ofdev->dev.of_node;
+ const void *p;
+
+ /* Read config from device-tree */
+ if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
+ dev->max_mtu = 1500;
+ if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
+ dev->rx_fifo_size = 2048;
+ if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
+ dev->tx_fifo_size = 2048;
+ if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
+ dev->rx_fifo_size_gige = dev->rx_fifo_size;
+ if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
+ dev->tx_fifo_size_gige = dev->tx_fifo_size;
+ if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
+ dev->phy_address = 0xffffffff;
+ if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
+ dev->phy_map = 0xffffffff;
+ if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
+ dev->gpcs_address = 0xffffffff;
+ if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
+ dev->tah_ph = 0;
+ if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
+ dev->tah_port = 0;
+ if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
+ dev->mdio_ph = 0;
+ if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
+ dev->zmii_ph = 0;
+ if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
+ dev->zmii_port = 0xffffffff;
+ if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
+ dev->rgmii_ph = 0;
+ if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
+ dev->rgmii_port = 0xffffffff;
+ if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
+ dev->fifo_entry_size = 16;
+ if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
+ dev->mal_burst_size = 256;
+
+ /* PHY mode needs some decoding */
+ dev->phy_mode = of_get_phy_mode(np);
+ if (dev->phy_mode < 0)
+ dev->phy_mode = PHY_MODE_NA;
+
+ /* Check EMAC version */
+ if (of_device_is_compatible(np, "ibm,emac4sync")) {
+ dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
+ if (of_device_is_compatible(np, "ibm,emac-460ex") ||
+ of_device_is_compatible(np, "ibm,emac-460gt"))
+ dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
+ if (of_device_is_compatible(np, "ibm,emac-405ex") ||
+ of_device_is_compatible(np, "ibm,emac-405exr"))
+ dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
+ } else if (of_device_is_compatible(np, "ibm,emac4")) {
+ dev->features |= EMAC_FTR_EMAC4;
+ if (of_device_is_compatible(np, "ibm,emac-440gx"))
+ dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
+ } else {
+ if (of_device_is_compatible(np, "ibm,emac-440ep") ||
+ of_device_is_compatible(np, "ibm,emac-440gr"))
+ dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
+ if (of_device_is_compatible(np, "ibm,emac-405ez")) {
+#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
+ dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
+#else
+ printk(KERN_ERR "%s: Flow control not disabled!\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ }
+
+ /* Fixup some feature bits based on the device tree */
+ if (of_get_property(np, "has-inverted-stacr-oc", NULL))
+ dev->features |= EMAC_FTR_STACR_OC_INVERT;
+ if (of_get_property(np, "has-new-stacr-staopc", NULL))
+ dev->features |= EMAC_FTR_HAS_NEW_STACR;
+
+ /* CAB lacks the appropriate properties */
+ if (of_device_is_compatible(np, "ibm,emac-axon"))
+ dev->features |= EMAC_FTR_HAS_NEW_STACR |
+ EMAC_FTR_STACR_OC_INVERT;
+
+ /* Enable TAH/ZMII/RGMII features as found */
+ if (dev->tah_ph != 0) {
+#ifdef CONFIG_IBM_EMAC_TAH
+ dev->features |= EMAC_FTR_HAS_TAH;
+#else
+ printk(KERN_ERR "%s: TAH support not enabled !\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ if (dev->zmii_ph != 0) {
+#ifdef CONFIG_IBM_EMAC_ZMII
+ dev->features |= EMAC_FTR_HAS_ZMII;
+#else
+ printk(KERN_ERR "%s: ZMII support not enabled !\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ if (dev->rgmii_ph != 0) {
+#ifdef CONFIG_IBM_EMAC_RGMII
+ dev->features |= EMAC_FTR_HAS_RGMII;
+#else
+ printk(KERN_ERR "%s: RGMII support not enabled !\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ /* Read MAC-address */
+ p = of_get_property(np, "local-mac-address", NULL);
+ if (p == NULL) {
+ printk(KERN_ERR "%s: Can't find local-mac-address property\n",
+ np->full_name);
+ return -ENXIO;
+ }
+ memcpy(dev->ndev->dev_addr, p, 6);
+
+ /* IAHT and GAHT filter parameterization */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
+ dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
+ dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
+ } else {
+ dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
+ dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
+ }
+
+ DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
+ DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
+ DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
+ DBG(dev, "max_mtu : %d\n", dev->max_mtu);
+ DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
+
+ return 0;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_get_stats = emac_stats,
+ .ndo_set_rx_mode = emac_set_multicast_list,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static const struct net_device_ops emac_gige_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_get_stats = emac_stats,
+ .ndo_set_rx_mode = emac_set_multicast_list,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_start_xmit = emac_start_xmit_sg,
+ .ndo_change_mtu = emac_change_mtu,
+};
+
+static int __devinit emac_probe(struct platform_device *ofdev)
+{
+ struct net_device *ndev;
+ struct emac_instance *dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct device_node **blist = NULL;
+ int err, i;
+
+ /* Skip unused/unwired EMACS. We leave the check for an unused
+ * property here for now, but new flat device trees should set a
+ * status property to "disabled" instead.
+ */
+ if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+ return -ENODEV;
+
+ /* Find ourselves in the bootlist if we are there */
+ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
+ if (emac_boot_list[i] == np)
+ blist = &emac_boot_list[i];
+
+ /* Allocate our net_device structure */
+ err = -ENOMEM;
+ ndev = alloc_etherdev(sizeof(struct emac_instance));
+ if (!ndev) {
+ printk(KERN_ERR "%s: could not allocate ethernet device!\n",
+ np->full_name);
+ goto err_gone;
+ }
+ dev = netdev_priv(ndev);
+ dev->ndev = ndev;
+ dev->ofdev = ofdev;
+ dev->blist = blist;
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
+
+ /* Initialize some embedded data structures */
+ mutex_init(&dev->mdio_lock);
+ mutex_init(&dev->link_lock);
+ spin_lock_init(&dev->lock);
+ INIT_WORK(&dev->reset_work, emac_reset_work);
+
+ /* Init various config data based on device-tree */
+ err = emac_init_config(dev);
+ if (err != 0)
+ goto err_free;
+
+ /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
+ dev->emac_irq = irq_of_parse_and_map(np, 0);
+ dev->wol_irq = irq_of_parse_and_map(np, 1);
+ if (dev->emac_irq == NO_IRQ) {
+ printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
+ goto err_free;
+ }
+ ndev->irq = dev->emac_irq;
+
+ /* Map EMAC regs */
+ if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
+ printk(KERN_ERR "%s: Can't get registers address\n",
+ np->full_name);
+ goto err_irq_unmap;
+ }
+ // TODO : request_mem_region
+ dev->emacp = ioremap(dev->rsrc_regs.start,
+ resource_size(&dev->rsrc_regs));
+ if (dev->emacp == NULL) {
+ printk(KERN_ERR "%s: Can't map device registers!\n",
+ np->full_name);
+ err = -ENOMEM;
+ goto err_irq_unmap;
+ }
+
+ /* Wait for dependent devices */
+ err = emac_wait_deps(dev);
+ if (err) {
+ printk(KERN_ERR
+ "%s: Timeout waiting for dependent devices\n",
+ np->full_name);
+ /* display more info about what's missing ? */
+ goto err_reg_unmap;
+ }
+ dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
+ if (dev->mdio_dev != NULL)
+ dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
+
+ /* Register with MAL */
+ dev->commac.ops = &emac_commac_ops;
+ dev->commac.dev = dev;
+ dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
+ dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
+ err = mal_register_commac(dev->mal, &dev->commac);
+ if (err) {
+ printk(KERN_ERR "%s: failed to register with mal %s!\n",
+ np->full_name, dev->mal_dev->dev.of_node->full_name);
+ goto err_rel_deps;
+ }
+ dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
+ dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
+
+ /* Get pointers to BD rings */
+ dev->tx_desc =
+ dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
+ dev->rx_desc =
+ dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
+
+ DBG(dev, "tx_desc %p" NL, dev->tx_desc);
+ DBG(dev, "rx_desc %p" NL, dev->rx_desc);
+
+ /* Clean rings */
+ memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
+ memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
+ memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
+ memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
+
+ /* Attach to ZMII, if needed */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
+ (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
+ goto err_unreg_commac;
+
+ /* Attach to RGMII, if needed */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
+ (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
+ goto err_detach_zmii;
+
+ /* Attach to TAH, if needed */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
+ (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
+ goto err_detach_rgmii;
+
+ /* Set some link defaults before we can find out real parameters */
+ dev->phy.speed = SPEED_100;
+ dev->phy.duplex = DUPLEX_FULL;
+ dev->phy.autoneg = AUTONEG_DISABLE;
+ dev->phy.pause = dev->phy.asym_pause = 0;
+ dev->stop_timeout = STOP_TIMEOUT_100;
+ INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
+
+ /* Find PHY if any */
+ err = emac_init_phy(dev);
+ if (err != 0)
+ goto err_detach_tah;
+
+ if (dev->tah_dev) {
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
+ ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
+ }
+ ndev->watchdog_timeo = 5 * HZ;
+ if (emac_phy_supports_gige(dev->phy_mode)) {
+ ndev->netdev_ops = &emac_gige_netdev_ops;
+ dev->commac.ops = &emac_commac_sg_ops;
+ } else
+ ndev->netdev_ops = &emac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+
+ netif_carrier_off(ndev);
+
+ err = register_netdev(ndev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to register net device (%d)!\n",
+ np->full_name, err);
+ goto err_detach_tah;
+ }
+
+ /* Set our drvdata last as we don't want them visible until we are
+ * fully initialized
+ */
+ wmb();
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* There's a new kid in town ! Let's tell everybody */
+ wake_up_all(&emac_probe_wait);
+
+
+ printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
+ ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
+
+ if (dev->phy_mode == PHY_MODE_SGMII)
+ printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
+
+ if (dev->phy.address >= 0)
+ printk("%s: found %s PHY (0x%02x)\n", ndev->name,
+ dev->phy.def->name, dev->phy.address);
+
+ emac_dbg_register(dev);
+
+ /* Life is good */
+ return 0;
+
+ /* I have a bad feeling about this ... */
+
+ err_detach_tah:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ tah_detach(dev->tah_dev, dev->tah_port);
+ err_detach_rgmii:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
+ err_detach_zmii:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_detach(dev->zmii_dev, dev->zmii_port);
+ err_unreg_commac:
+ mal_unregister_commac(dev->mal, &dev->commac);
+ err_rel_deps:
+ emac_put_deps(dev);
+ err_reg_unmap:
+ iounmap(dev->emacp);
+ err_irq_unmap:
+ if (dev->wol_irq != NO_IRQ)
+ irq_dispose_mapping(dev->wol_irq);
+ if (dev->emac_irq != NO_IRQ)
+ irq_dispose_mapping(dev->emac_irq);
+ err_free:
+ free_netdev(ndev);
+ err_gone:
+ /* if we were on the bootlist, remove us as we won't show up and
+ * wake up all waiters to notify them in case they were waiting
+ * on us
+ */
+ if (blist) {
+ *blist = NULL;
+ wake_up_all(&emac_probe_wait);
+ }
+ return err;
+}
+
+static int __devexit emac_remove(struct platform_device *ofdev)
+{
+ struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ DBG(dev, "remove" NL);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ unregister_netdev(dev->ndev);
+
+ cancel_work_sync(&dev->reset_work);
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ tah_detach(dev->tah_dev, dev->tah_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_detach(dev->zmii_dev, dev->zmii_port);
+
+ mal_unregister_commac(dev->mal, &dev->commac);
+ emac_put_deps(dev);
+
+ emac_dbg_unregister(dev);
+ iounmap(dev->emacp);
+
+ if (dev->wol_irq != NO_IRQ)
+ irq_dispose_mapping(dev->wol_irq);
+ if (dev->emac_irq != NO_IRQ)
+ irq_dispose_mapping(dev->emac_irq);
+
+ free_netdev(dev->ndev);
+
+ return 0;
+}
+
+/* XXX Features in here should be replaced by properties... */
+static struct of_device_id emac_match[] =
+{
+ {
+ .type = "network",
+ .compatible = "ibm,emac",
+ },
+ {
+ .type = "network",
+ .compatible = "ibm,emac4",
+ },
+ {
+ .type = "network",
+ .compatible = "ibm,emac4sync",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, emac_match);
+
+static struct platform_driver emac_driver = {
+ .driver = {
+ .name = "emac",
+ .owner = THIS_MODULE,
+ .of_match_table = emac_match,
+ },
+ .probe = emac_probe,
+ .remove = emac_remove,
+};
+
+static void __init emac_make_bootlist(void)
+{
+ struct device_node *np = NULL;
+ int j, max, i = 0, k;
+ int cell_indices[EMAC_BOOT_LIST_SIZE];
+
+ /* Collect EMACs */
+ while((np = of_find_all_nodes(np)) != NULL) {
+ const u32 *idx;
+
+ if (of_match_node(emac_match, np) == NULL)
+ continue;
+ if (of_get_property(np, "unused", NULL))
+ continue;
+ idx = of_get_property(np, "cell-index", NULL);
+ if (idx == NULL)
+ continue;
+ cell_indices[i] = *idx;
+ emac_boot_list[i++] = of_node_get(np);
+ if (i >= EMAC_BOOT_LIST_SIZE) {
+ of_node_put(np);
+ break;
+ }
+ }
+ max = i;
+
+ /* Bubble sort them (doh, what a creative algorithm :-) */
+ for (i = 0; max > 1 && (i < (max - 1)); i++)
+ for (j = i; j < max; j++) {
+ if (cell_indices[i] > cell_indices[j]) {
+ np = emac_boot_list[i];
+ emac_boot_list[i] = emac_boot_list[j];
+ emac_boot_list[j] = np;
+ k = cell_indices[i];
+ cell_indices[i] = cell_indices[j];
+ cell_indices[j] = k;
+ }
+ }
+}
+
+static int __init emac_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
+
+ /* Init debug stuff */
+ emac_init_debug();
+
+ /* Build EMAC boot list */
+ emac_make_bootlist();
+
+ /* Init submodules */
+ rc = mal_init();
+ if (rc)
+ goto err;
+ rc = zmii_init();
+ if (rc)
+ goto err_mal;
+ rc = rgmii_init();
+ if (rc)
+ goto err_zmii;
+ rc = tah_init();
+ if (rc)
+ goto err_rgmii;
+ rc = platform_driver_register(&emac_driver);
+ if (rc)
+ goto err_tah;
+
+ return 0;
+
+ err_tah:
+ tah_exit();
+ err_rgmii:
+ rgmii_exit();
+ err_zmii:
+ zmii_exit();
+ err_mal:
+ mal_exit();
+ err:
+ return rc;
+}
+
+static void __exit emac_exit(void)
+{
+ int i;
+
+ platform_driver_unregister(&emac_driver);
+
+ tah_exit();
+ rgmii_exit();
+ zmii_exit();
+ mal_exit();
+ emac_fini_debug();
+
+ /* Destroy EMAC boot list */
+ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
+ if (emac_boot_list[i])
+ of_node_put(emac_boot_list[i]);
+}
+
+module_init(emac_init);
+module_exit(emac_exit);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
new file mode 100644
index 00000000000..fa3ec57935f
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -0,0 +1,462 @@
+/*
+ * drivers/net/ibm_newemac/core.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Armin Kuster <akuster@mvista.com>
+ * Johnnie Peters <jpeters@mvista.com>
+ * Copyright 2000, 2001 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_CORE_H
+#define __IBM_NEWEMAC_CORE_H
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/dcr.h>
+
+#include "emac.h"
+#include "phy.h"
+#include "zmii.h"
+#include "rgmii.h"
+#include "mal.h"
+#include "tah.h"
+#include "debug.h"
+
+#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
+#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
+
+/* Simple sanity check */
+#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
+#error Invalid number of buffer descriptors (greater than 256)
+#endif
+
+#define EMAC_MIN_MTU 46
+
+/* Maximum L2 header length (VLAN tagged, no FCS) */
+#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
+
+/* RX BD size for the given MTU */
+static inline int emac_rx_size(int mtu)
+{
+ if (mtu > ETH_DATA_LEN)
+ return MAL_MAX_RX_SIZE;
+ else
+ return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
+}
+
+#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
+
+#define EMAC_RX_SKB_HEADROOM \
+ EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
+
+/* Size of RX skb for the given MTU */
+static inline int emac_rx_skb_size(int mtu)
+{
+ int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
+ return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
+}
+
+/* RX DMA sync size */
+static inline int emac_rx_sync_size(int mtu)
+{
+ return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
+}
+
+/* Driver statistcs is split into two parts to make it more cache friendly:
+ * - normal statistics (packet count, etc)
+ * - error statistics
+ *
+ * When statistics is requested by ethtool, these parts are concatenated,
+ * normal one goes first.
+ *
+ * Please, keep these structures in sync with emac_stats_keys.
+ */
+
+/* Normal TX/RX Statistics */
+struct emac_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets_csum;
+ u64 tx_packets_csum;
+};
+
+/* Error statistics */
+struct emac_error_stats {
+ u64 tx_undo;
+
+ /* Software RX Errors */
+ u64 rx_dropped_stack;
+ u64 rx_dropped_oom;
+ u64 rx_dropped_error;
+ u64 rx_dropped_resize;
+ u64 rx_dropped_mtu;
+ u64 rx_stopped;
+ /* BD reported RX errors */
+ u64 rx_bd_errors;
+ u64 rx_bd_overrun;
+ u64 rx_bd_bad_packet;
+ u64 rx_bd_runt_packet;
+ u64 rx_bd_short_event;
+ u64 rx_bd_alignment_error;
+ u64 rx_bd_bad_fcs;
+ u64 rx_bd_packet_too_long;
+ u64 rx_bd_out_of_range;
+ u64 rx_bd_in_range;
+ /* EMAC IRQ reported RX errors */
+ u64 rx_parity;
+ u64 rx_fifo_overrun;
+ u64 rx_overrun;
+ u64 rx_bad_packet;
+ u64 rx_runt_packet;
+ u64 rx_short_event;
+ u64 rx_alignment_error;
+ u64 rx_bad_fcs;
+ u64 rx_packet_too_long;
+ u64 rx_out_of_range;
+ u64 rx_in_range;
+
+ /* Software TX Errors */
+ u64 tx_dropped;
+ /* BD reported TX errors */
+ u64 tx_bd_errors;
+ u64 tx_bd_bad_fcs;
+ u64 tx_bd_carrier_loss;
+ u64 tx_bd_excessive_deferral;
+ u64 tx_bd_excessive_collisions;
+ u64 tx_bd_late_collision;
+ u64 tx_bd_multple_collisions;
+ u64 tx_bd_single_collision;
+ u64 tx_bd_underrun;
+ u64 tx_bd_sqe;
+ /* EMAC IRQ reported TX errors */
+ u64 tx_parity;
+ u64 tx_underrun;
+ u64 tx_sqe;
+ u64 tx_errors;
+};
+
+#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
+ sizeof(struct emac_error_stats)) \
+ / sizeof(u64))
+
+struct emac_instance {
+ struct net_device *ndev;
+ struct resource rsrc_regs;
+ struct emac_regs __iomem *emacp;
+ struct platform_device *ofdev;
+ struct device_node **blist; /* bootlist entry */
+
+ /* MAL linkage */
+ u32 mal_ph;
+ struct platform_device *mal_dev;
+ u32 mal_rx_chan;
+ u32 mal_tx_chan;
+ struct mal_instance *mal;
+ struct mal_commac commac;
+
+ /* PHY infos */
+ u32 phy_mode;
+ u32 phy_map;
+ u32 phy_address;
+ u32 phy_feat_exc;
+ struct mii_phy phy;
+ struct mutex link_lock;
+ struct delayed_work link_work;
+ int link_polling;
+
+ /* GPCS PHY infos */
+ u32 gpcs_address;
+
+ /* Shared MDIO if any */
+ u32 mdio_ph;
+ struct platform_device *mdio_dev;
+ struct emac_instance *mdio_instance;
+ struct mutex mdio_lock;
+
+ /* ZMII infos if any */
+ u32 zmii_ph;
+ u32 zmii_port;
+ struct platform_device *zmii_dev;
+
+ /* RGMII infos if any */
+ u32 rgmii_ph;
+ u32 rgmii_port;
+ struct platform_device *rgmii_dev;
+
+ /* TAH infos if any */
+ u32 tah_ph;
+ u32 tah_port;
+ struct platform_device *tah_dev;
+
+ /* IRQs */
+ int wol_irq;
+ int emac_irq;
+
+ /* OPB bus frequency in Mhz */
+ u32 opb_bus_freq;
+
+ /* Cell index within an ASIC (for clk mgmnt) */
+ u32 cell_index;
+
+ /* Max supported MTU */
+ u32 max_mtu;
+
+ /* Feature bits (from probe table) */
+ unsigned int features;
+
+ /* Tx and Rx fifo sizes & other infos in bytes */
+ u32 tx_fifo_size;
+ u32 tx_fifo_size_gige;
+ u32 rx_fifo_size;
+ u32 rx_fifo_size_gige;
+ u32 fifo_entry_size;
+ u32 mal_burst_size; /* move to MAL ? */
+
+ /* IAHT and GAHT filter parameterization */
+ u32 xaht_slots_shift;
+ u32 xaht_width_shift;
+
+ /* Descriptor management
+ */
+ struct mal_descriptor *tx_desc;
+ int tx_cnt;
+ int tx_slot;
+ int ack_slot;
+
+ struct mal_descriptor *rx_desc;
+ int rx_slot;
+ struct sk_buff *rx_sg_skb; /* 1 */
+ int rx_skb_size;
+ int rx_sync_size;
+
+ struct sk_buff *tx_skb[NUM_TX_BUFF];
+ struct sk_buff *rx_skb[NUM_RX_BUFF];
+
+ /* Stats
+ */
+ struct emac_error_stats estats;
+ struct net_device_stats nstats;
+ struct emac_stats stats;
+
+ /* Misc
+ */
+ int reset_failed;
+ int stop_timeout; /* in us */
+ int no_mcast;
+ int mcast_pending;
+ int opened;
+ struct work_struct reset_work;
+ spinlock_t lock;
+};
+
+/*
+ * Features of various EMAC implementations
+ */
+
+/*
+ * No flow control on 40x according to the original driver
+ */
+#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001
+/*
+ * Cell is an EMAC4
+ */
+#define EMAC_FTR_EMAC4 0x00000002
+/*
+ * For the 440SPe, AMCC inexplicably changed the polarity of
+ * the "operation complete" bit in the MII control register.
+ */
+#define EMAC_FTR_STACR_OC_INVERT 0x00000004
+/*
+ * Set if we have a TAH.
+ */
+#define EMAC_FTR_HAS_TAH 0x00000008
+/*
+ * Set if we have a ZMII.
+ */
+#define EMAC_FTR_HAS_ZMII 0x00000010
+/*
+ * Set if we have a RGMII.
+ */
+#define EMAC_FTR_HAS_RGMII 0x00000020
+/*
+ * Set if we have new type STACR with STAOPC
+ */
+#define EMAC_FTR_HAS_NEW_STACR 0x00000040
+/*
+ * Set if we need phy clock workaround for 440gx
+ */
+#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080
+/*
+ * Set if we need phy clock workaround for 440ep or 440gr
+ */
+#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
+/*
+ * The 405EX and 460EX contain the EMAC4SYNC core
+ */
+#define EMAC_FTR_EMAC4SYNC 0x00000200
+/*
+ * Set if we need phy clock workaround for 460ex or 460gt
+ */
+#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400
+
+
+/* Right now, we don't quite handle the always/possible masks on the
+ * most optimal way as we don't have a way to say something like
+ * always EMAC4. Patches welcome.
+ */
+enum {
+ EMAC_FTRS_ALWAYS = 0,
+
+ EMAC_FTRS_POSSIBLE =
+#ifdef CONFIG_IBM_EMAC_EMAC4
+ EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC |
+ EMAC_FTR_HAS_NEW_STACR |
+ EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
+#endif
+#ifdef CONFIG_IBM_EMAC_TAH
+ EMAC_FTR_HAS_TAH |
+#endif
+#ifdef CONFIG_IBM_EMAC_ZMII
+ EMAC_FTR_HAS_ZMII |
+#endif
+#ifdef CONFIG_IBM_EMAC_RGMII
+ EMAC_FTR_HAS_RGMII |
+#endif
+#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
+ EMAC_FTR_NO_FLOW_CONTROL_40x |
+#endif
+ EMAC_FTR_460EX_PHY_CLK_FIX |
+ EMAC_FTR_440EP_PHY_CLK_FIX,
+};
+
+static inline int emac_has_feature(struct emac_instance *dev,
+ unsigned long feature)
+{
+ return (EMAC_FTRS_ALWAYS & feature) ||
+ (EMAC_FTRS_POSSIBLE & dev->features & feature);
+}
+
+/*
+ * Various instances of the EMAC core have varying 1) number of
+ * address match slots, 2) width of the registers for handling address
+ * match slots, 3) number of registers for handling address match
+ * slots and 4) base offset for those registers.
+ *
+ * These macros and inlines handle these differences based on
+ * parameters supplied by the device structure which are, in turn,
+ * initialized based on the "compatible" entry in the device tree.
+ */
+
+#define EMAC4_XAHT_SLOTS_SHIFT 6
+#define EMAC4_XAHT_WIDTH_SHIFT 4
+
+#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
+#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
+
+#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
+#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
+#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
+ (dev)->xaht_width_shift))
+
+#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \
+ ((EMAC_XAHT_SLOTS(dev) - 1) - \
+ ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \
+ (dev)->xaht_slots_shift)))
+
+#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \
+ ((slot) >> (dev)->xaht_width_shift)
+
+#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \
+ ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
+ ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
+
+static inline u32 *emac_xaht_base(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ int offset;
+
+ /* The first IAHT entry always is the base of the block of
+ * IAHT and GAHT registers.
+ */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC))
+ offset = offsetof(struct emac_regs, u1.emac4sync.iaht1);
+ else
+ offset = offsetof(struct emac_regs, u0.emac4.iaht1);
+
+ return (u32 *)((ptrdiff_t)p + offset);
+}
+
+static inline u32 *emac_gaht_base(struct emac_instance *dev)
+{
+ /* GAHT registers always come after an identical number of
+ * IAHT registers.
+ */
+ return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
+}
+
+static inline u32 *emac_iaht_base(struct emac_instance *dev)
+{
+ /* IAHT registers always come before an identical number of
+ * GAHT registers.
+ */
+ return emac_xaht_base(dev);
+}
+
+/* Ethtool get_regs complex data.
+ * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
+ * when available.
+ *
+ * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
+ * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
+ * Each register component is preceded with emac_ethtool_regs_subhdr.
+ * Order of the optional headers follows their relative bit posititions
+ * in emac_ethtool_regs_hdr.components
+ */
+#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
+#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
+#define EMAC_ETHTOOL_REGS_TAH 0x00000004
+
+struct emac_ethtool_regs_hdr {
+ u32 components;
+};
+
+struct emac_ethtool_regs_subhdr {
+ u32 version;
+ u32 index;
+};
+
+#define EMAC_ETHTOOL_REGS_VER 0
+#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
+ (dev)->rsrc_regs.start + 1)
+#define EMAC4_ETHTOOL_REGS_VER 1
+#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
+ (dev)->rsrc_regs.start + 1)
+
+#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
index 8c6c1e2a875..8c6c1e2a875 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ethernet/ibm/emac/debug.c
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
new file mode 100644
index 00000000000..90477fe69d0
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -0,0 +1,83 @@
+/*
+ * drivers/net/ibm_newemac/debug.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_DEBUG_H
+#define __IBM_NEWEMAC_DEBUG_H
+
+#include <linux/init.h>
+
+#include "core.h"
+
+#if defined(CONFIG_IBM_EMAC_DEBUG)
+
+struct emac_instance;
+struct mal_instance;
+
+extern void emac_dbg_register(struct emac_instance *dev);
+extern void emac_dbg_unregister(struct emac_instance *dev);
+extern void mal_dbg_register(struct mal_instance *mal);
+extern void mal_dbg_unregister(struct mal_instance *mal);
+extern int emac_init_debug(void) __init;
+extern void emac_fini_debug(void) __exit;
+extern void emac_dbg_dump_all(void);
+
+# define DBG_LEVEL 1
+
+#else
+
+# define emac_dbg_register(x) do { } while(0)
+# define emac_dbg_unregister(x) do { } while(0)
+# define mal_dbg_register(x) do { } while(0)
+# define mal_dbg_unregister(x) do { } while(0)
+# define emac_init_debug() do { } while(0)
+# define emac_fini_debug() do { } while(0)
+# define emac_dbg_dump_all() do { } while(0)
+
+# define DBG_LEVEL 0
+
+#endif
+
+#define EMAC_DBG(d, name, fmt, arg...) \
+ printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
+
+#if DBG_LEVEL > 0
+# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
+# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x)
+# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x)
+# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x)
+# define NL "\n"
+#else
+# define DBG(f,x...) ((void)0)
+# define MAL_DBG(d,f,x...) ((void)0)
+# define ZMII_DBG(d,f,x...) ((void)0)
+# define RGMII_DBG(d,f,x...) ((void)0)
+#endif
+#if DBG_LEVEL > 1
+# define DBG2(d,f,x...) DBG(d,f, ##x)
+# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x)
+# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x)
+# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x)
+#else
+# define DBG2(f,x...) ((void)0)
+# define MAL_DBG2(d,f,x...) ((void)0)
+# define ZMII_DBG2(d,f,x...) ((void)0)
+# define RGMII_DBG2(d,f,x...) ((void)0)
+#endif
+
+#endif /* __IBM_NEWEMAC_DEBUG_H */
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 1568278d759..1568278d759 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index d268f404b7b..f3c50b97ec6 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -577,8 +577,8 @@ static int __devinit mal_probe(struct platform_device *ofdev)
}
if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
-#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
- defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
+#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
+ defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
MAL_FTR_COMMON_ERR_INT);
#else
@@ -616,7 +616,7 @@ static int __devinit mal_probe(struct platform_device *ofdev)
init_dummy_netdev(&mal->dummy_dev);
netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
- CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
+ CONFIG_IBM_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
mal_reset(mal);
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index 66084214bf4..d06f985bda3 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -245,10 +245,10 @@ enum {
MAL_FTRS_ALWAYS = 0,
MAL_FTRS_POSSIBLE =
-#ifdef CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
+#ifdef CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT
MAL_FTR_CLEAR_ICINTSTAT |
#endif
-#ifdef CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR
+#ifdef CONFIG_IBM_EMAC_MAL_COMMON_ERR
MAL_FTR_COMMON_ERR_INT |
#endif
0,
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index ab4e5969fe6..ab4e5969fe6 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
diff --git a/drivers/net/ibm_newemac/phy.h b/drivers/net/ethernet/ibm/emac/phy.h
index 5d2bf4cbe50..5d2bf4cbe50 100644
--- a/drivers/net/ibm_newemac/phy.h
+++ b/drivers/net/ethernet/ibm/emac/phy.h
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fa53f3def6..4fa53f3def6 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
diff --git a/drivers/net/ibm_newemac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index d6979904986..9296b6c5f92 100644
--- a/drivers/net/ibm_newemac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -54,7 +54,7 @@ struct rgmii_instance {
struct platform_device *ofdev;
};
-#ifdef CONFIG_IBM_NEW_EMAC_RGMII
+#ifdef CONFIG_IBM_EMAC_RGMII
extern int rgmii_init(void);
extern void rgmii_exit(void);
@@ -77,6 +77,6 @@ extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
# define rgmii_set_speed(x,y,z) do { } while(0)
# define rgmii_get_regs_len(x) 0
# define rgmii_dump_regs(x,buf) (buf)
-#endif /* !CONFIG_IBM_NEW_EMAC_RGMII */
+#endif /* !CONFIG_IBM_EMAC_RGMII */
#endif /* __IBM_NEWEMAC_RGMII_H */
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 5f51bf7c9dc..5f51bf7c9dc 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 61dbeca006d..3437ab4964c 100644
--- a/drivers/net/ibm_newemac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -70,7 +70,7 @@ struct tah_instance {
#define TAH_MR_DTFP 0x00100000
#define TAH_MR_DIG 0x00080000
-#ifdef CONFIG_IBM_NEW_EMAC_TAH
+#ifdef CONFIG_IBM_EMAC_TAH
extern int tah_init(void);
extern void tah_exit(void);
@@ -90,6 +90,6 @@ extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
# define tah_get_regs_len(x) 0
# define tah_dump_regs(x,buf) (buf)
-#endif /* !CONFIG_IBM_NEW_EMAC_TAH */
+#endif /* !CONFIG_IBM_EMAC_TAH */
#endif /* __IBM_NEWEMAC_TAH_H */
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 97449e786d6..97449e786d6 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
diff --git a/drivers/net/ibm_newemac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 1333fa2b278..ceaed823a83 100644
--- a/drivers/net/ibm_newemac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -51,7 +51,7 @@ struct zmii_instance {
struct platform_device *ofdev;
};
-#ifdef CONFIG_IBM_NEW_EMAC_ZMII
+#ifdef CONFIG_IBM_EMAC_ZMII
extern int zmii_init(void);
extern void zmii_exit(void);
@@ -73,6 +73,6 @@ extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
# define zmii_set_speed(x,y,z) do { } while(0)
# define zmii_get_regs_len(x) 0
# define zmii_dump_regs(x,buf) (buf)
-#endif /* !CONFIG_IBM_NEW_EMAC_ZMII */
+#endif /* !CONFIG_IBM_EMAC_ZMII */
#endif /* __IBM_NEWEMAC_ZMII_H */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index d393f1e764e..b1cd41b9c61 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1014,16 +1014,15 @@ retry_bounce:
/* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
- frag->page_offset, frag->size,
- DMA_TO_DEVICE);
+ dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto map_failed_frags;
- descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
descs[i+1].fields.address = dma_addr;
}
@@ -1319,7 +1318,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_open = ibmveth_open,
.ndo_stop = ibmveth_close,
.ndo_start_xmit = ibmveth_start_xmit,
- .ndo_set_multicast_list = ibmveth_set_multicast_list,
+ .ndo_set_rx_mode = ibmveth_set_multicast_list,
.ndo_do_ioctl = ibmveth_ioctl,
.ndo_change_mtu = ibmveth_change_mtu,
.ndo_fix_features = ibmveth_fix_features,
diff --git a/drivers/net/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 43a794fab9f..43a794fab9f 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
diff --git a/drivers/net/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 53dd39e9130..4326681df38 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1009,7 +1009,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_stop = veth_close,
.ndo_start_xmit = veth_start_xmit,
.ndo_change_mtu = veth_change_mtu,
- .ndo_set_multicast_list = veth_set_multicast_list,
+ .ndo_set_rx_mode = veth_set_multicast_list,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
new file mode 100644
index 00000000000..3aff81d7989
--- /dev/null
+++ b/drivers/net/ethernet/icplus/Kconfig
@@ -0,0 +1,14 @@
+#
+# IC Plus device configuration
+#
+
+config IP1000
+ tristate "IP1000 Gigabit Ethernet support"
+ depends on PCI && EXPERIMENTAL
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports IP1000 gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
new file mode 100644
index 00000000000..5bc87c1f36a
--- /dev/null
+++ b/drivers/net/ethernet/icplus/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the IC Plus device drivers
+#
+
+obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index d4aa40adf1e..8fd80a00b89 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -20,6 +20,9 @@
* http://www.icplus.com.tw
* jesse@icplus.com.tw
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -67,7 +70,7 @@ MODULE_LICENSE("GPL");
* Variable record -- index by leading revision/length
* Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
*/
-static unsigned short DefaultPhyParam[] = {
+static const unsigned short DefaultPhyParam[] = {
/* 11/12/03 IP1000A v1-3 rev=0x40 */
/*--------------------------------------------------------------------------
(0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
@@ -85,7 +88,7 @@ static unsigned short DefaultPhyParam[] = {
0x0000
};
-static const char *ipg_brand_name[] = {
+static const char * const ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
@@ -118,23 +121,23 @@ static void ipg_dump_rfdlist(struct net_device *dev)
IPG_DEBUG_MSG("_dump_rfdlist\n");
- printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current);
- printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty);
- printk(KERN_INFO "RFDList start address = %16.16lx\n",
- (unsigned long) sp->rxd_map);
- printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n",
- ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
+ netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
+ netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
+ netdev_info(dev, "RFDList start address = %016lx\n",
+ (unsigned long)sp->rxd_map);
+ netdev_info(dev, "RFDListPtr register = %08x%08x\n",
+ ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
- printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i,
- offset, (unsigned long) sp->rxd[i].next_desc);
+ netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
+ i, offset, (unsigned long)sp->rxd[i].next_desc);
offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
- printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i,
- offset, (unsigned long) sp->rxd[i].rfs);
+ netdev_info(dev, "%02x %04x RFS = %016lx\n",
+ i, offset, (unsigned long)sp->rxd[i].rfs);
offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
- printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
- offset, (unsigned long) sp->rxd[i].frag_info);
+ netdev_info(dev, "%02x %04x frag_info = %016lx\n",
+ i, offset, (unsigned long)sp->rxd[i].frag_info);
}
}
@@ -147,24 +150,24 @@ static void ipg_dump_tfdlist(struct net_device *dev)
IPG_DEBUG_MSG("_dump_tfdlist\n");
- printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current);
- printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty);
- printk(KERN_INFO "TFDList start address = %16.16lx\n",
- (unsigned long) sp->txd_map);
- printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n",
- ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
+ netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
+ netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
+ netdev_info(dev, "TFDList start address = %016lx\n",
+ (unsigned long) sp->txd_map);
+ netdev_info(dev, "TFDListPtr register = %08x%08x\n",
+ ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
- printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i,
- offset, (unsigned long) sp->txd[i].next_desc);
+ netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
+ i, offset, (unsigned long)sp->txd[i].next_desc);
offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
- printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i,
- offset, (unsigned long) sp->txd[i].tfc);
+ netdev_info(dev, "%02x %04x TFC = %016lx\n",
+ i, offset, (unsigned long) sp->txd[i].tfc);
offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
- printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
- offset, (unsigned long) sp->txd[i].frag_info);
+ netdev_info(dev, "%02x %04x frag_info = %016lx\n",
+ i, offset, (unsigned long) sp->txd[i].frag_info);
}
}
#endif
@@ -480,6 +483,10 @@ static int ipg_config_autoneg(struct net_device *dev)
u32 mac_ctrl_val;
u32 asicctrl;
u8 phyctrl;
+ const char *speed;
+ const char *duplex;
+ const char *tx_desc;
+ const char *rx_desc;
IPG_DEBUG_MSG("_config_autoneg\n");
@@ -499,27 +506,27 @@ static int ipg_config_autoneg(struct net_device *dev)
*/
sp->tenmbpsmode = 0;
- printk(KERN_INFO "%s: Link speed = ", dev->name);
-
/* Determine actual speed of operation. */
switch (phyctrl & IPG_PC_LINK_SPEED) {
case IPG_PC_LINK_SPEED_10MBPS:
- printk("10Mbps.\n");
- printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n",
- dev->name);
+ speed = "10Mbps";
sp->tenmbpsmode = 1;
break;
case IPG_PC_LINK_SPEED_100MBPS:
- printk("100Mbps.\n");
+ speed = "100Mbps";
break;
case IPG_PC_LINK_SPEED_1000MBPS:
- printk("1000Mbps.\n");
+ speed = "1000Mbps";
break;
default:
- printk("undefined!\n");
+ speed = "undefined!";
return 0;
}
+ netdev_info(dev, "Link speed = %s\n", speed);
+ if (sp->tenmbpsmode == 1)
+ netdev_info(dev, "10Mbps operational mode enabled\n");
+
if (phyctrl & IPG_PC_DUPLEX_STATUS) {
fullduplex = 1;
txflowcontrol = 1;
@@ -528,38 +535,41 @@ static int ipg_config_autoneg(struct net_device *dev)
/* Configure full duplex, and flow control. */
if (fullduplex == 1) {
+
/* Configure IPG for full duplex operation. */
- printk(KERN_INFO "%s: setting full duplex, ", dev->name);
+
+ duplex = "full";
mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
if (txflowcontrol == 1) {
- printk("TX flow control");
+ tx_desc = "";
mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
} else {
- printk("no TX flow control");
+ tx_desc = "no ";
mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
}
if (rxflowcontrol == 1) {
- printk(", RX flow control.");
+ rx_desc = "";
mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
} else {
- printk(", no RX flow control.");
+ rx_desc = "no ";
mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
}
-
- printk("\n");
} else {
- /* Configure IPG for half duplex operation. */
- printk(KERN_INFO "%s: setting half duplex, "
- "no TX flow control, no RX flow control.\n", dev->name);
-
- mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD &
- ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
- ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
+ duplex = "half";
+ tx_desc = "no ";
+ rx_desc = "no ";
+ mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
+ ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
+ ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
}
+
+ netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
+ duplex, tx_desc, rx_desc);
ipg_w32(mac_ctrl_val, MAC_CTRL);
+
return 0;
}
@@ -784,14 +794,13 @@ static int init_rfdlist(struct net_device *dev)
* A receive buffer was not ready, break the
* RFD list here.
*/
- IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
+ IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
/* Just in case we cannot allocate a single RFD.
* Should not occur.
*/
if (i == 0) {
- printk(KERN_ERR "%s: No memory available"
- " for RFD list.\n", dev->name);
+ netdev_err(dev, "No memory available for RFD list\n");
return -ENOMEM;
}
}
@@ -838,7 +847,7 @@ static void init_tfdlist(struct net_device *dev)
sp->tx_dirty = 0;
/* Write the location of the TFDList to the IPG. */
- IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
+ IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
(u32) sp->txd_map);
ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
ipg_w32(0x00000000, TFD_LIST_PTR_1);
@@ -864,7 +873,7 @@ static void ipg_nic_txfree(struct net_device *dev)
struct sk_buff *skb = sp->tx_buff[dirty];
struct ipg_tx *txfd = sp->txd + dirty;
- IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc);
+ IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
/* Look at each TFD's TFC field beginning
* at the last freed TFD up to the current TFD.
@@ -906,10 +915,8 @@ static void ipg_tx_timeout(struct net_device *dev)
spin_lock_irq(&sp->lock);
/* Re-configure after DMA reset. */
- if (ipg_io_config(dev) < 0) {
- printk(KERN_INFO "%s: Error during re-configuration.\n",
- dev->name);
- }
+ if (ipg_io_config(dev) < 0)
+ netdev_info(dev, "Error during re-configuration\n");
init_tfdlist(dev);
@@ -938,7 +945,7 @@ static void ipg_nic_txcleanup(struct net_device *dev)
*/
u32 txstatusdword = ipg_r32(TX_STATUS);
- IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword);
+ IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
/* Check for Transmit errors. Error bits only valid if
* TX_COMPLETE bit in the TXSTATUS register is a 1.
@@ -953,20 +960,20 @@ static void ipg_nic_txcleanup(struct net_device *dev)
/* Transmit error, increment stat counters. */
if (txstatusdword & IPG_TS_TX_ERROR) {
- IPG_DEBUG_MSG("Transmit error.\n");
+ IPG_DEBUG_MSG("Transmit error\n");
sp->stats.tx_errors++;
}
/* Late collision, re-enable transmitter. */
if (txstatusdword & IPG_TS_LATE_COLLISION) {
- IPG_DEBUG_MSG("Late collision on transmit.\n");
+ IPG_DEBUG_MSG("Late collision on transmit\n");
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
IPG_MC_RSVD_MASK, MAC_CTRL);
}
/* Maximum collisions, re-enable transmitter. */
if (txstatusdword & IPG_TS_TX_MAX_COLL) {
- IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
+ IPG_DEBUG_MSG("Maximum collisions on transmit\n");
ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
IPG_MC_RSVD_MASK, MAC_CTRL);
}
@@ -975,16 +982,14 @@ static void ipg_nic_txcleanup(struct net_device *dev)
* transmitter.
*/
if (txstatusdword & IPG_TS_TX_UNDERRUN) {
- IPG_DEBUG_MSG("Transmitter underrun.\n");
+ IPG_DEBUG_MSG("Transmitter underrun\n");
sp->stats.tx_fifo_errors++;
ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
IPG_AC_NETWORK | IPG_AC_FIFO);
/* Re-configure after DMA reset. */
if (ipg_io_config(dev) < 0) {
- printk(KERN_INFO
- "%s: Error during re-configuration.\n",
- dev->name);
+ netdev_info(dev, "Error during re-configuration\n");
}
init_tfdlist(dev);
@@ -1063,7 +1068,7 @@ static int ipg_nic_rxrestore(struct net_device *dev)
* Linux system).
*/
if (ipg_get_rxbuff(dev, entry) < 0) {
- IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
+ IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
break;
}
@@ -1134,7 +1139,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
(IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
- IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
+ IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
(unsigned long) rxfd->rfs);
/* Increment general receive error statistic. */
@@ -1142,13 +1147,13 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
/* Increment detailed receive error statistics. */
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
- IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
+ IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
sp->stats.rx_fifo_errors++;
}
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
- IPG_DEBUG_MSG("RX runt occurred.\n");
+ IPG_DEBUG_MSG("RX runt occurred\n");
sp->stats.rx_length_errors++;
}
@@ -1157,7 +1162,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
*/
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
- IPG_DEBUG_MSG("RX alignment error occurred.\n");
+ IPG_DEBUG_MSG("RX alignment error occurred\n");
sp->stats.rx_frame_errors++;
}
@@ -1404,7 +1409,7 @@ static int ipg_nic_rx(struct net_device *dev)
*/
if (framelen > sp->rxfrag_size) {
IPG_DEBUG_MSG
- ("RFS FrameLen > allocated fragment size.\n");
+ ("RFS FrameLen > allocated fragment size\n");
framelen = sp->rxfrag_size;
}
@@ -1414,7 +1419,7 @@ static int ipg_nic_rx(struct net_device *dev)
IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
- IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
+ IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
(unsigned long int) rxfd->rfs);
/* Increment general receive error statistic. */
@@ -1422,12 +1427,12 @@ static int ipg_nic_rx(struct net_device *dev)
/* Increment detailed receive error statistics. */
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
- IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
+ IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
sp->stats.rx_fifo_errors++;
}
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
- IPG_DEBUG_MSG("RX runt occurred.\n");
+ IPG_DEBUG_MSG("RX runt occurred\n");
sp->stats.rx_length_errors++;
}
@@ -1437,7 +1442,7 @@ static int ipg_nic_rx(struct net_device *dev)
*/
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
- IPG_DEBUG_MSG("RX alignment error occurred.\n");
+ IPG_DEBUG_MSG("RX alignment error occurred\n");
sp->stats.rx_frame_errors++;
}
@@ -1508,7 +1513,7 @@ static int ipg_nic_rx(struct net_device *dev)
rxfd = sp->rxd + entry;
- IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
+ IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
/* An unexpected event, additional code needed to handle
* properly. So for the time being, just disregard the
@@ -1557,8 +1562,7 @@ static void ipg_reset_after_host_error(struct work_struct *work)
init_tfdlist(dev);
if (ipg_io_config(dev) < 0) {
- printk(KERN_INFO "%s: Cannot recover from PCI error.\n",
- dev->name);
+ netdev_info(dev, "Cannot recover from PCI error\n");
schedule_delayed_work(&sp->task, HZ);
}
}
@@ -1586,7 +1590,7 @@ static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
*/
status = ipg_r16(INT_STATUS_ACK);
- IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status);
+ IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
/* Shared IRQ of remove event. */
if (!(status & IPG_IS_RSVD_MASK))
@@ -1599,7 +1603,7 @@ static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
/* If RFDListEnd interrupt, restore all used RFDs. */
if (status & IPG_IS_RFD_LIST_END) {
- IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
+ IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
/* The RFD list end indicates an RFD was encountered
* with a 0 NextPtr, or with an RFDDone bit set to 1
@@ -1661,21 +1665,20 @@ static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
/* If LinkEvent interrupt, resolve autonegotiation. */
if (status & IPG_IS_LINK_EVENT) {
if (ipg_config_autoneg(dev) < 0)
- printk(KERN_INFO "%s: Auto-negotiation error.\n",
- dev->name);
+ netdev_info(dev, "Auto-negotiation error\n");
}
/* If MACCtrlFrame interrupt, do nothing. */
if (status & IPG_IS_MAC_CTRL_FRAME)
- IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
+ IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
/* If RxComplete interrupt, do nothing. */
if (status & IPG_IS_RX_COMPLETE)
- IPG_DEBUG_MSG("RxComplete interrupt.\n");
+ IPG_DEBUG_MSG("RxComplete interrupt\n");
/* If RxEarly interrupt, do nothing. */
if (status & IPG_IS_RX_EARLY)
- IPG_DEBUG_MSG("RxEarly interrupt.\n");
+ IPG_DEBUG_MSG("RxEarly interrupt\n");
out_enable:
/* Re-enable IPG interrupts. */
@@ -1749,8 +1752,7 @@ static int ipg_nic_open(struct net_device *dev)
rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
dev->name, dev);
if (rc < 0) {
- printk(KERN_INFO "%s: Error when requesting interrupt.\n",
- dev->name);
+ netdev_info(dev, "Error when requesting interrupt\n");
goto out;
}
@@ -1770,8 +1772,7 @@ static int ipg_nic_open(struct net_device *dev)
rc = init_rfdlist(dev);
if (rc < 0) {
- printk(KERN_INFO "%s: Error during configuration.\n",
- dev->name);
+ netdev_info(dev, "Error during configuration\n");
goto err_free_tx_2;
}
@@ -1779,14 +1780,13 @@ static int ipg_nic_open(struct net_device *dev)
rc = ipg_io_config(dev);
if (rc < 0) {
- printk(KERN_INFO "%s: Error during configuration.\n",
- dev->name);
+ netdev_info(dev, "Error during configuration\n");
goto err_release_tfdlist_3;
}
/* Resolve autonegotiation. */
if (ipg_config_autoneg(dev) < 0)
- printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name);
+ netdev_info(dev, "Auto-negotiation error\n");
/* initialize JUMBO Frame control variable */
sp->jumbo.found_start = 0;
@@ -1961,7 +1961,7 @@ static void ipg_set_phy_default_param(unsigned char rev,
{
unsigned short length;
unsigned char revision;
- unsigned short *phy_param;
+ const unsigned short *phy_param;
unsigned short address, value;
phy_param = &DefaultPhyParam[0];
@@ -2201,7 +2201,7 @@ static const struct net_device_ops ipg_netdev_ops = {
.ndo_stop = ipg_nic_stop,
.ndo_start_xmit = ipg_nic_hard_start_xmit,
.ndo_get_stats = ipg_nic_get_stats,
- .ndo_set_multicast_list = ipg_nic_set_multicast_list,
+ .ndo_set_rx_mode = ipg_nic_set_multicast_list,
.ndo_do_ioctl = ipg_ioctl,
.ndo_tx_timeout = ipg_tx_timeout,
.ndo_change_mtu = ipg_nic_change_mtu,
@@ -2222,7 +2222,7 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
if (rc < 0)
goto out;
- printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
+ pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
pci_set_master(pdev);
@@ -2230,8 +2230,7 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
if (rc < 0) {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- printk(KERN_ERR "%s: DMA config failed.\n",
- pci_name(pdev));
+ pr_err("%s: DMA config failed\n", pci_name(pdev));
goto err_disable_0;
}
}
@@ -2241,7 +2240,7 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
*/
dev = alloc_etherdev(sizeof(struct ipg_nic_private));
if (!dev) {
- printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev));
+ pr_err("%s: alloc_etherdev failed\n", pci_name(pdev));
rc = -ENOMEM;
goto err_disable_0;
}
@@ -2267,7 +2266,7 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
if (!ioaddr) {
- printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev));
+ pr_err("%s: cannot map MMIO\n", pci_name(pdev));
rc = -EIO;
goto err_release_regions_2;
}
@@ -2289,7 +2288,7 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
if (rc < 0)
goto err_unmap_3;
- printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name);
+ netdev_info(dev, "Ethernet device registered\n");
out:
return rc;
diff --git a/drivers/net/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index 6ce027355fc..6ce027355fc 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
new file mode 100644
index 00000000000..61029dc7fa6
--- /dev/null
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -0,0 +1,222 @@
+#
+# Intel network device configuration
+#
+
+config NET_VENDOR_INTEL
+ bool "Intel devices"
+ default y
+ depends on PCI || PCI_MSI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_INTEL
+
+config E100
+ tristate "Intel(R) PRO/100+ support"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports Intel(R) PRO/100 family of adapters.
+ To verify that your adapter is supported, find the board ID number
+ on the adapter. Look for a label that has a barcode and a number
+ in the format 123456-001 (six digits hyphen three digits).
+
+ Use the above information and the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ to identify the adapter.
+
+ For the latest Intel PRO/100 network driver for Linux, see:
+
+ <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e100.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called e100.
+
+config E1000
+ tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) PRO/1000 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called e1000.
+
+config E1000E
+ tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
+ depends on PCI && (!SPARC32 || BROKEN)
+ select CRC32
+ ---help---
+ This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
+ ethernet family of adapters. For PCI or PCI-X e1000 adapters,
+ use the regular e1000 driver For more information on how to
+ identify your adapter, go to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called e1000e.
+
+config IGB
+ tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called igb.
+
+config IGB_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on IGB && DCA && !(IGB=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
+config IGBVF
+ tristate "Intel(R) 82576 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82576 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called igbvf.
+
+config IXGB
+ tristate "Intel(R) PRO/10GbE support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) PRO/10GbE family of adapters for
+ PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
+ instead. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgb.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgb.
+
+config IXGBE
+ tristate "Intel(R) 10GbE PCI Express adapters support"
+ depends on PCI && INET
+ select MDIO
+ ---help---
+ This driver supports Intel(R) 10GbE PCI Express family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbe.
+
+config IXGBE_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
+config IXGBE_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on IXGBE && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
+endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
new file mode 100644
index 00000000000..c8210e68866
--- /dev/null
+++ b/drivers/net/ethernet/intel/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Intel network device drivers.
+#
+
+obj-$(CONFIG_E100) += e100.o
+obj-$(CONFIG_E1000) += e1000/
+obj-$(CONFIG_E1000E) += e1000e/
+obj-$(CONFIG_IGB) += igb/
+obj-$(CONFIG_IGBVF) += igbvf/
+obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
+obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/e100.c b/drivers/net/ethernet/intel/e100.c
index c1352c60c29..ae17cd1a907 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2502,12 +2502,8 @@ static void e100_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = rfds->max;
ring->tx_max_pending = cbs->max;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rfds->count;
ring->tx_pending = cbs->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
static int e100_set_ringparam(struct net_device *netdev,
@@ -2738,7 +2734,7 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_stop = e100_close,
.ndo_start_xmit = e100_xmit_frame,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = e100_set_multicast_list,
+ .ndo_set_rx_mode = e100_set_multicast_list,
.ndo_set_mac_address = e100_set_mac_address,
.ndo_change_mtu = e100_change_mtu,
.ndo_do_ioctl = e100_do_ioctl,
diff --git a/drivers/net/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index 4a6ab152245..4a6ab152245 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
new file mode 100644
index 00000000000..1e1596990b5
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -0,0 +1,363 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
+
+#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
+ u16 mapped_as_page;
+};
+
+struct e1000_tx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ u16 tdh;
+ u16 tdt;
+ bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *rx_skb_top;
+
+ /* cpu for rx queue */
+ int cpu;
+
+ u16 rdh;
+ u16 rdt;
+};
+
+#define E1000_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) \
+ ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u32 wol;
+ u32 smartspeed;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t stats_lock;
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ u8 fc_autoneg;
+
+ /* TX */
+ struct e1000_tx_ring *tx_ring; /* One per active queue */
+ unsigned int restart_queue;
+ u32 txd_cmd;
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+ u32 gotcl;
+ u64 gotcl_old;
+ u64 tpt_old;
+ u64 colc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u8 tx_timeout_factor;
+ atomic_t tx_fifo_stall;
+ bool pcix_82544;
+ bool detect_tx_hung;
+
+ /* RX */
+ bool (*clean_rx)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+ void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+ struct e1000_rx_ring *rx_ring; /* One per active queue */
+ struct napi_struct napi;
+
+ int num_tx_queues;
+ int num_rx_queues;
+
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u32 alloc_rx_buff_failed;
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+ bool rx_csum;
+ u32 gorcl;
+ u64 gorcl_old;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ u32 test_icr;
+ struct e1000_tx_ring test_tx_ring;
+ struct e1000_rx_ring test_rx_ring;
+
+ int msg_enable;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ bool tso_force;
+ bool smart_power_down; /* phy smart power down */
+ bool quad_port_a;
+ unsigned long flags;
+ u32 eeprom_wol;
+
+ /* for ioport free */
+ int bars;
+ int need_ioport;
+
+ bool discarding;
+
+ struct work_struct reset_task;
+ struct delayed_work watchdog_task;
+ struct delayed_work fifo_stall_task;
+ struct delayed_work phy_info_task;
+
+ struct mutex mutex;
+};
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_DOWN
+};
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+#define e_dbg(format, arg...) \
+ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
+#define e_err(msglvl, format, arg...) \
+ netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_info(msglvl, format, arg...) \
+ netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+ netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_notice(msglvl, format, arg...) \
+ netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+ dev_err(&adapter->pdev->dev, format, ## arg)
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern bool e1000_has_link(struct e1000_adapter *adapter);
+extern void e1000_power_up_phy(struct e1000_adapter *);
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+
+#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 5548d464261..2b223ac99c4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -540,12 +540,8 @@ static void e1000_get_ringparam(struct net_device *netdev,
E1000_MAX_82544_RXD;
ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
E1000_MAX_82544_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
static int e1000_set_ringparam(struct net_device *netdev,
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index a5a89ecb6f3..36ee76bf4cb 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -5385,7 +5385,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (ret_val)
return ret_val;
- mdelay(20);
+ msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA);
@@ -5413,7 +5413,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (ret_val)
return ret_val;
- mdelay(20);
+ msleep(20);
/* Now enable the transmitter */
ret_val =
@@ -5440,7 +5440,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (ret_val)
return ret_val;
- mdelay(20);
+ msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA);
@@ -5457,7 +5457,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (ret_val)
return ret_val;
- mdelay(20);
+ msleep(20);
/* Now enable the transmitter */
ret_val =
@@ -5750,26 +5750,26 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
break;
- mdelay(100);
+ msleep(100);
}
/* Recommended delay time after link has been lost */
- mdelay(1000);
+ msleep(1000);
/* Now we will re-enable th transmitter on the PHY */
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
if (ret_val)
return ret_val;
- mdelay(50);
+ msleep(50);
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
if (ret_val)
return ret_val;
- mdelay(50);
+ msleep(50);
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
if (ret_val)
return ret_val;
- mdelay(50);
+ msleep(50);
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
if (ret_val)
return ret_val;
@@ -5794,7 +5794,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_LINK_STATUS)
break;
- mdelay(100);
+ msleep(100);
}
return E1000_SUCCESS;
}
@@ -5825,6 +5825,6 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
e_dbg("e1000_get_phy_cfg_done");
- mdelay(10);
+ msleep(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 5c9a8403668..5c9a8403668 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f97afda941d..cf480b55462 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -131,10 +131,8 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
static void e1000_set_rx_mode(struct net_device *netdev);
-static void e1000_update_phy_info(unsigned long data);
static void e1000_update_phy_info_task(struct work_struct *work);
-static void e1000_watchdog(unsigned long data);
-static void e1000_82547_tx_fifo_stall(unsigned long data);
+static void e1000_watchdog(struct work_struct *work);
static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
@@ -487,12 +485,21 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
- mdelay(1);
+ msleep(1);
}
out:
return;
}
+static void e1000_down_and_stop(struct e1000_adapter *adapter)
+{
+ set_bit(__E1000_DOWN, &adapter->flags);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_delayed_work_sync(&adapter->phy_info_task);
+ cancel_delayed_work_sync(&adapter->fifo_stall_task);
+}
+
void e1000_down(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -522,13 +529,9 @@ void e1000_down(struct e1000_adapter *adapter)
/*
* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
- * timers and tasks from rescheduling.
+ * tasks from rescheduling.
*/
- set_bit(__E1000_DOWN, &adapter->flags);
-
- del_timer_sync(&adapter->tx_fifo_stall_timer);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ e1000_down_and_stop(adapter);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -543,10 +546,10 @@ static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- rtnl_lock();
+ mutex_lock(&adapter->mutex);
e1000_down(adapter);
e1000_up(adapter);
- rtnl_unlock();
+ mutex_unlock(&adapter->mutex);
clear_bit(__E1000_RESETTING, &adapter->flags);
}
@@ -1080,6 +1083,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
/* initialize eeprom parameters */
@@ -1118,21 +1123,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(netdev->perm_addr))
e_err(probe, "Invalid MAC Address\n");
- init_timer(&adapter->tx_fifo_stall_timer);
- adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
- adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
-
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = e1000_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
- init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long)adapter;
-
- INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
+ INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
+ INIT_DELAYED_WORK(&adapter->fifo_stall_task,
+ e1000_82547_tx_fifo_stall_task);
+ INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
- INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
e1000_check_options(adapter);
@@ -1277,13 +1273,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- set_bit(__E1000_DOWN, &adapter->flags);
- del_timer_sync(&adapter->tx_fifo_stall_timer);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
-
- cancel_work_sync(&adapter->reset_task);
-
+ e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
unregister_netdev(netdev);
@@ -1327,6 +1317,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
spin_lock_init(&adapter->stats_lock);
+ mutex_init(&adapter->mutex);
set_bit(__E1000_DOWN, &adapter->flags);
@@ -1367,7 +1358,7 @@ static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
+ * handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
@@ -1812,8 +1803,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ E1000_RCTL_RDMTS_HALF |
(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
if (hw->tbi_compatibility_on == 1)
@@ -1915,7 +1906,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Enable Receives */
- ew32(RCTL, rctl);
+ ew32(RCTL, rctl | E1000_RCTL_EN);
}
/**
@@ -2329,35 +2320,23 @@ static void e1000_set_rx_mode(struct net_device *netdev)
kfree(mcarray);
}
-/* Need to wait a few seconds after link up to get diagnostic information from
- * the phy */
-
-static void e1000_update_phy_info(unsigned long data)
-{
- struct e1000_adapter *adapter = (struct e1000_adapter *)data;
- schedule_work(&adapter->phy_info_task);
-}
-
+/**
+ * e1000_update_phy_info_task - get phy info
+ * @work: work struct contained inside adapter struct
+ *
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
static void e1000_update_phy_info_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter,
- phy_info_task);
- struct e1000_hw *hw = &adapter->hw;
-
- rtnl_lock();
- e1000_phy_get_info(hw, &adapter->phy_info);
- rtnl_unlock();
-}
-
-/**
- * e1000_82547_tx_fifo_stall - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
- **/
-static void e1000_82547_tx_fifo_stall(unsigned long data)
-{
- struct e1000_adapter *adapter = (struct e1000_adapter *)data;
- schedule_work(&adapter->fifo_stall_task);
+ struct e1000_adapter,
+ phy_info_task.work);
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+ mutex_lock(&adapter->mutex);
+ e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+ mutex_unlock(&adapter->mutex);
}
/**
@@ -2367,13 +2346,15 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter,
- fifo_stall_task);
+ struct e1000_adapter,
+ fifo_stall_task.work);
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 tctl;
- rtnl_lock();
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+ mutex_lock(&adapter->mutex);
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((er32(TDT) == er32(TDH)) &&
(er32(TDFT) == er32(TDFH)) &&
@@ -2391,10 +2372,10 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
atomic_set(&adapter->tx_fifo_stall, 0);
netif_wake_queue(netdev);
} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
- mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ schedule_delayed_work(&adapter->fifo_stall_task, 1);
}
}
- rtnl_unlock();
+ mutex_unlock(&adapter->mutex);
}
bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2435,17 +2416,23 @@ bool e1000_has_link(struct e1000_adapter *adapter)
}
/**
- * e1000_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * e1000_watchdog - work function
+ * @work: work struct contained inside adapter struct
**/
-static void e1000_watchdog(unsigned long data)
+static void e1000_watchdog(struct work_struct *work)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ watchdog_task.work);
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = adapter->tx_ring;
u32 link, tctl;
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+
+ mutex_lock(&adapter->mutex);
link = e1000_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link)
goto link_up;
@@ -2491,8 +2478,8 @@ static void e1000_watchdog(unsigned long data)
netif_carrier_on(netdev);
if (!test_bit(__E1000_DOWN, &adapter->flags))
- mod_timer(&adapter->phy_info_timer,
- round_jiffies(jiffies + 2 * HZ));
+ schedule_delayed_work(&adapter->phy_info_task,
+ 2 * HZ);
adapter->smartspeed = 0;
}
} else {
@@ -2504,8 +2491,8 @@ static void e1000_watchdog(unsigned long data)
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->flags))
- mod_timer(&adapter->phy_info_timer,
- round_jiffies(jiffies + 2 * HZ));
+ schedule_delayed_work(&adapter->phy_info_task,
+ 2 * HZ);
}
e1000_smartspeed(adapter);
@@ -2534,8 +2521,8 @@ link_up:
* (Do the reset outside of interrupt context). */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
+ /* exit immediately since reset is imminent */
+ goto unlock;
}
}
@@ -2561,10 +2548,12 @@ link_up:
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = true;
- /* Reset the timer */
+ /* Reschedule the task */
if (!test_bit(__E1000_DOWN, &adapter->flags))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
+
+unlock:
+ mutex_unlock(&adapter->mutex);
}
enum latency_range {
@@ -2846,7 +2835,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
- unsigned int f;
+ unsigned int f, bytecount, segs;
i = tx_ring->next_to_use;
@@ -2905,13 +2894,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
- offset = frag->page_offset;
+ len = skb_frag_size(frag);
+ offset = 0;
while (len) {
+ unsigned long bufend;
i++;
if (unlikely(i == tx_ring->count))
i = 0;
@@ -2925,18 +2915,19 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
* dwords. */
+ bufend = (unsigned long)
+ page_to_phys(skb_frag_page(frag));
+ bufend += offset + size - 1;
if (unlikely(adapter->pcix_82544 &&
- !((unsigned long)(page_to_phys(frag->page) + offset
- + size - 1) & 4) &&
- size > 4))
+ !(bufend & 4) &&
+ size > 4))
size -= 4;
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
- buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
- offset, size,
- DMA_TO_DEVICE);
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2947,7 +2938,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring->buffer_info[first].next_to_watch = i;
return count;
@@ -3186,7 +3183,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
if (adapter->pcix_82544)
count += nr_frags;
@@ -3196,14 +3193,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
- if (unlikely(hw->mac_type == e1000_82547)) {
- if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
- netif_stop_queue(netdev);
- if (!test_bit(__E1000_DOWN, &adapter->flags))
- mod_timer(&adapter->tx_fifo_stall_timer,
- jiffies + 1);
- return NETDEV_TX_BUSY;
- }
+ if (unlikely((hw->mac_type == e1000_82547) &&
+ (e1000_82547_fifo_workaround(adapter, skb)))) {
+ netif_stop_queue(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ schedule_delayed_work(&adapter->fifo_stall_task, 1);
+ return NETDEV_TX_BUSY;
}
if (vlan_tx_tag_present(skb)) {
@@ -3265,6 +3260,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
e1000_reinit_safe(adapter);
}
@@ -3273,7 +3270,7 @@ static void e1000_reset_task(struct work_struct *work)
* @netdev: network interface device structure
*
* Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
+ * The statistics are actually updated from the watchdog.
**/
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
@@ -3541,7 +3538,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
hw->get_link_status = 1;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->flags))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ schedule_delayed_work(&adapter->watchdog_task, 1);
}
/* disable interrupts, without the synchronize_irq bit */
@@ -3621,14 +3618,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
cleaned = (i == eop);
if (cleaned) {
- struct sk_buff *skb = buffer_info->skb;
- unsigned int segs, bytecount;
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
tx_desc->upper.data = 0;
@@ -3746,7 +3737,7 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
bi->page = NULL;
skb->len += length;
skb->data_len += length;
- skb->truesize += length;
+ skb->truesize += PAGE_SIZE;
}
/**
@@ -4725,6 +4716,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
+ mutex_lock(&adapter->mutex);
+
if (netif_running(netdev)) {
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -4732,8 +4725,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
- if (retval)
+ if (retval) {
+ mutex_unlock(&adapter->mutex);
return retval;
+ }
#endif
status = er32(STATUS);
@@ -4788,6 +4783,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
+ mutex_unlock(&adapter->mutex);
+
pci_disable_device(pdev);
return 0;
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
index 33e7c45a4fe..33e7c45a4fe 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 1301eba8b57..1301eba8b57 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
new file mode 100644
index 00000000000..e1159e54334
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -0,0 +1,1515 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000.h"
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
+ /* 1=Reverse Auto-Negotiation */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
+ 1 = 50-80M
+ 2 = 80-110M
+ 3 = 110-140M
+ 4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+ /* 1=Enable SERDES Electrical Idle */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+/*
+ * A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] = {
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_gg82563_cable_length_table)
+
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return 0;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+ phy->type = e1000_phy_gg82563;
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000e_get_phy_id(hw);
+
+ /* Verify phy id */
+ if (phy->id != GG82563_E_PHY_ID)
+ return -E1000_ERR_PHY;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u16 size;
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_mac_operations *func = &mac->ops;
+
+ /* Set media type */
+ switch (adapter->pdev->device) {
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
+
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
+ func->check_for_link = e1000e_check_for_copper_link;
+ break;
+ case e1000_media_type_fiber:
+ func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+ func->check_for_link = e1000e_check_for_fiber_link;
+ break;
+ case e1000_media_type_internal_serdes:
+ func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+ func->check_for_link = e1000e_check_for_serdes_link;
+ break;
+ default:
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return 0;
+}
+
+static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 rc;
+
+ rc = e1000_init_mac_params_80003es2lan(adapter);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_phy_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_phy_80003es2lan - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = E1000_SWFW_CSR_SM;
+
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the Kumeran interface
+ **/
+static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = E1000_SWFW_CSR_SM;
+
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the EEPROM.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_acquire_nvm(hw);
+
+ if (ret_val)
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the EEPROM.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ e1000e_release_nvm(hw);
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 i = 0;
+ s32 timeout = 50;
+
+ while (i < timeout) {
+ if (e1000e_get_hw_semaphore(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = er32(SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000e_put_hw_semaphore(hw);
+ mdelay(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000e_put_hw_semaphore(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ while (e1000e_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
+
+ swfw_sync = er32(SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000e_put_hw_semaphore(hw);
+}
+
+/**
+ * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: pointer to the data returned from the operation
+ *
+ * Read the GG82563 PHY register.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /*
+ * Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ udelay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ ret_val = -E1000_ERR_PHY;
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ udelay(200);
+
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ udelay(200);
+ } else {
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: value to write to the register
+ *
+ * Write to the GG82563 PHY register.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /*
+ * Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ udelay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ udelay(200);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ udelay(200);
+ } else {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @words: number of words to write
+ * @data: buffer of data to write to the NVM
+ *
+ * Write "words" of data to the ESB2 NVM.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return e1000e_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ * @hw: pointer to the HW structure
+ *
+ * Wait a specific amount of time for manageability processes to complete.
+ * This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ if (hw->bus.func == 1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+
+ while (timeout) {
+ if (er32(EEMNGCTL) & mask)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout) {
+ e_dbg("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ * @hw: pointer to the HW structure
+ *
+ * Force the speed and duplex settings onto the PHY. This is a
+ * function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("GG82563 PSCR: %X\n", phy_data);
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ /* Reset the phy to commit changes. */
+ phy_data |= MII_CR_RESET;
+
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ if (hw->phy.autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link "
+ "on GG82563 phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ /*
+ * We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1000e_phy_reset_dsp(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Resetting the phy means we need to verify the TX_CLK corresponds
+ * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
+ */
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+ /*
+ * In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_80003es2lan - Set approximate cable length
+ * @hw: pointer to the HW structure
+ *
+ * Find the approximate cable length as measured by the GG82563 PHY.
+ * This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_data, index;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+ if (ret_val)
+ goto out;
+
+ index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+ phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000e_get_speed_and_duplex_copper(hw,
+ speed,
+ duplex);
+ hw->phy.ops.cfg_on_link_up(hw);
+ } else {
+ ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
+ speed,
+ duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Perform a global reset to the ESB2 controller.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ ctrl = er32(CTRL);
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ e_dbg("Issuing a global reset to MAC\n");
+ ew32(CTRL, ctrl | E1000_CTRL_RST);
+ e1000_release_phy_80003es2lan(hw);
+
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Clear any pending interrupt events. */
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 kum_reg_data;
+ u16 i;
+
+ e1000_initialize_hw_bits_80003es2lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = e1000e_id_led_init(hw);
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+
+ /* Disabling VLAN filtering */
+ e_dbg("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = e1000e_setup_link(hw);
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = er32(TXDCTL(0));
+ reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ reg_data = er32(TXDCTL(1));
+ reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL(1), reg_data);
+
+ /* Enable retransmit on late collisions */
+ reg_data = er32(TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ ew32(TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = er32(TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+ ew32(TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = er32(TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ ew32(TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+ /* default to true to enable the MDIC W/A */
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
+
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT,
+ &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
+ }
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ if (hw->phy.media_type != e1000_media_type_copper)
+ reg &= ~(1 << 20);
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ ew32(TARC(1), reg);
+}
+
+/**
+ * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ * @hw: pointer to the HW structure
+ *
+ * Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl_ext;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+ data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (phy->mdix) {
+ case 1:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (phy->disable_polarity_correction)
+ data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000e_commit_phy(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Bypass Rx and Tx FIFO's */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+ E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
+ if (ret_val)
+ return ret_val;
+
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+ ew32(CTRL_EXT, ctrl_ext);
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!e1000e_check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /*
+ * Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_ICR_DIS_PADDING;
+ ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Essentially a wrapper for setting up all things "copper" related.
+ * This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ /*
+ * Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_setup_copper_link(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 speed;
+ u16 duplex;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
+ &duplex);
+ if (ret_val)
+ return ret_val;
+
+ if (speed == SPEED_1000)
+ ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+ else
+ ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val;
+ u32 tipg;
+ u32 i = 0;
+ u16 reg_data, reg_data2;
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+ ew32(TIPG, tipg);
+
+ do {
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return 0;
+}
+
+/**
+ * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ * @hw: pointer to the HW structure
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data, reg_data2;
+ u32 tipg;
+ u32 i = 0;
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ ew32(TIPG, tipg);
+
+ do {
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquire semaphore, then read the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release the semaphore before exiting.
+ **/
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val = 0;
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ kmrnctrlsta = er32(KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquire semaphore, then write the data to PHY register
+ * at the offset using the kumeran interface. Release semaphore
+ * before exiting.
+ **/
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val = 0;
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ er32(ICRXPTC);
+ er32(ICRXATC);
+ er32(ICTXPTC);
+ er32(ICTXATC);
+ er32(ICTXQEC);
+ er32(ICTXQMTC);
+ er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
+ .id_led_init = e1000e_id_led_init,
+ .blink_led = e1000e_blink_led_generic,
+ .check_mng_mode = e1000e_check_mng_mode_generic,
+ /* check_for_link dependent on media type */
+ .cleanup_led = e1000e_cleanup_led_generic,
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
+ .get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
+ .get_link_up_info = e1000_get_link_up_info_80003es2lan,
+ .led_on = e1000e_led_on_generic,
+ .led_off = e1000e_led_off_generic,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .write_vfta = e1000_write_vfta_generic,
+ .clear_vfta = e1000_clear_vfta_generic,
+ .reset_hw = e1000_reset_hw_80003es2lan,
+ .init_hw = e1000_init_hw_80003es2lan,
+ .setup_link = e1000e_setup_link,
+ /* setup_physical_interface dependent on media type */
+ .setup_led = e1000e_setup_led_generic,
+};
+
+static const struct e1000_phy_operations es2_phy_ops = {
+ .acquire = e1000_acquire_phy_80003es2lan,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan,
+ .get_cable_length = e1000_get_cable_length_80003es2lan,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
+ .release = e1000_release_phy_80003es2lan,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = NULL,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+};
+
+static const struct e1000_nvm_operations es2_nvm_ops = {
+ .acquire = e1000_acquire_nvm_80003es2lan,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_80003es2lan,
+ .update = e1000e_update_nvm_checksum_generic,
+ .valid_led_default = e1000e_valid_led_default,
+ .validate = e1000e_validate_nvm_checksum_generic,
+ .write = e1000_write_nvm_80003es2lan,
+};
+
+const struct e1000_info e1000_es2_info = {
+ .mac = e1000_80003es2lan,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_RX_NEEDS_RESTART /* errata */
+ | FLAG_TARC_SET_BIT_ZERO /* errata */
+ | FLAG_APME_CHECK_PORT_B
+ | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+ | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
+ .flags2 = FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_80003es2lan,
+ .mac_ops = &es2_mac_ops,
+ .phy_ops = &es2_phy_ops,
+ .nvm_ops = &es2_nvm_ops,
+};
+
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 536b3a55c45..a3e65fd26e0 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1927,7 +1927,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
er32(ICRXDMTC);
}
-static struct e1000_mac_operations e82571_mac_ops = {
+static const struct e1000_mac_operations e82571_mac_ops = {
/* .check_mng_mode: mac type dependent */
/* .check_for_link: media type dependent */
.id_led_init = e1000e_id_led_init,
@@ -1949,7 +1949,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
.read_mac_addr = e1000_read_mac_addr_82571,
};
-static struct e1000_phy_operations e82_phy_ops_igp = {
+static const struct e1000_phy_operations e82_phy_ops_igp = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_igp,
.check_reset_block = e1000e_check_reset_block_generic,
@@ -1967,7 +1967,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
.cfg_on_link_up = NULL,
};
-static struct e1000_phy_operations e82_phy_ops_m88 = {
+static const struct e1000_phy_operations e82_phy_ops_m88 = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
@@ -1985,7 +1985,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
.cfg_on_link_up = NULL,
};
-static struct e1000_phy_operations e82_phy_ops_bm = {
+static const struct e1000_phy_operations e82_phy_ops_bm = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
@@ -2003,7 +2003,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
.cfg_on_link_up = NULL,
};
-static struct e1000_nvm_operations e82571_nvm_ops = {
+static const struct e1000_nvm_operations e82571_nvm_ops = {
.acquire = e1000_acquire_nvm_82571,
.read = e1000e_read_nvm_eerd,
.release = e1000_release_nvm_82571,
@@ -2013,13 +2013,12 @@ static struct e1000_nvm_operations e82571_nvm_ops = {
.write = e1000_write_nvm_82571,
};
-struct e1000_info e1000_82571_info = {
+const struct e1000_info e1000_82571_info = {
.mac = e1000_82571,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_RESET_OVERWRITES_LAA /* errata */
@@ -2035,13 +2034,12 @@ struct e1000_info e1000_82571_info = {
.nvm_ops = &e82571_nvm_ops,
};
-struct e1000_info e1000_82572_info = {
+const struct e1000_info e1000_82572_info = {
.mac = e1000_82572,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
.flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
@@ -2054,12 +2052,11 @@ struct e1000_info e1000_82572_info = {
.nvm_ops = &e82571_nvm_ops,
};
-struct e1000_info e1000_82573_info = {
+const struct e1000_info e1000_82573_info = {
.mac = e1000_82573,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
@@ -2073,14 +2070,13 @@ struct e1000_info e1000_82573_info = {
.nvm_ops = &e82571_nvm_ops,
};
-struct e1000_info e1000_82574_info = {
+const struct e1000_info e1000_82574_info = {
.mac = e1000_82574,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_MSIX
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
@@ -2095,12 +2091,11 @@ struct e1000_info e1000_82574_info = {
.nvm_ops = &e82571_nvm_ops,
};
-struct e1000_info e1000_82583_info = {
+const struct e1000_info e1000_82583_info = {
.mac = e1000_82583,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_JUMBO_FRAMES
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
new file mode 100644
index 00000000000..948c05db5d6
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -0,0 +1,37 @@
+################################################################################
+#
+# Intel PRO/1000 Linux driver
+# Copyright(c) 1999 - 2011 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/1000 ethernet driver
+#
+
+obj-$(CONFIG_E1000E) += e1000e.o
+
+e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
+ lib.o phy.o param.o ethtool.o netdev.o
+
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index c516a7440be..c516a7440be 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
new file mode 100644
index 00000000000..9fe18d1d53d
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -0,0 +1,742 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+
+#include "hw.h"
+
+struct e1000_info;
+
+#define e_dbg(format, arg...) \
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define E1000E_INT_MODE_LEGACY 0
+#define E1000E_INT_MODE_MSI 1
+#define E1000E_INT_MODE_MSIX 2
+
+/* Tx/Rx descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 4096
+#define E1000_MIN_TXD 64
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 4096
+#define E1000_MIN_RXD 64
+
+#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
+
+/* Early Receive defines */
+#define E1000_ERT_2048 0x100
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_APME 0x0400
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+#define DEFAULT_JUMBO 9234
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_STATS_PAGE 778
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
+#define DEFAULT_RDTR 0
+#define DEFAULT_RADV 8
+#define BURST_RDTR 0x20
+#define BURST_RADV 0x20
+
+/*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+ * WTHRESH=4, and since we want 64 bytes at a time written back, set
+ * it to 5
+ */
+#define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+ (5 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+#define E1000_RXDCTL_DMA_BURST_ENABLE \
+ (0x01000000 | /* set descriptor granularity */ \
+ (4 << 16) | /* set writeback threshold */ \
+ (4 << 8) | /* set prefetch threshold */ \
+ 0x20) /* set hthresh */
+
+#define E1000_TIDV_FPD (1 << 31)
+#define E1000_RDTR_FPD (1 << 31)
+
+enum e1000_boards {
+ board_82571,
+ board_82572,
+ board_82573,
+ board_82574,
+ board_82583,
+ board_80003es2lan,
+ board_ich8lan,
+ board_ich9lan,
+ board_ich10lan,
+ board_pchlan,
+ board_pch2lan,
+};
+
+struct e1000_ps_page {
+ struct page *page;
+ u64 dma; /* must be u64 - written to hw */
+};
+
+/*
+ * wrappers around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct e1000_buffer {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+ union {
+ /* Tx */
+ struct {
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
+ u16 mapped_as_page;
+ };
+ /* Rx */
+ struct {
+ /* arrays of page information for packet split */
+ struct e1000_ps_page *ps_pages;
+ struct page *page;
+ };
+ };
+};
+
+struct e1000_ring {
+ void *desc; /* pointer to ring memory */
+ dma_addr_t dma; /* phys address of ring */
+ unsigned int size; /* length of ring in bytes */
+ unsigned int count; /* number of desc. in ring */
+
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u16 head;
+ u16 tail;
+
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ char name[IFNAMSIZ + 5];
+ u32 ims_val;
+ u32 itr_val;
+ u16 itr_register;
+ int set_itr;
+
+ struct sk_buff *rx_skb_top;
+};
+
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+ u16 bmcr; /* basic mode control register */
+ u16 bmsr; /* basic mode status register */
+ u16 advertise; /* auto-negotiation advertisement */
+ u16 lpa; /* link partner ability register */
+ u16 expansion; /* auto-negotiation expansion reg */
+ u16 ctrl1000; /* 1000BASE-T control register */
+ u16 stat1000; /* 1000BASE-T status register */
+ u16 estatus; /* extended status register */
+};
+
+/* board specific private data structure */
+struct e1000_adapter {
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+ struct timer_list blink_timer;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+
+ const struct e1000_info *ei;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u16 mng_vlan_id;
+ u16 link_speed;
+ u16 link_duplex;
+ u16 eeprom_vers;
+
+ /* track device up/down/testing state */
+ unsigned long state;
+
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ /*
+ * Tx
+ */
+ struct e1000_ring *tx_ring /* One per active queue */
+ ____cacheline_aligned_in_smp;
+
+ struct napi_struct napi;
+
+ unsigned int restart_queue;
+ u32 txd_cmd;
+
+ bool detect_tx_hung;
+ u8 tx_timeout_factor;
+
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+
+ /* Tx stats */
+ u64 tpt_old;
+ u64 colc_old;
+ u32 gotc;
+ u64 gotc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u32 tx_dma_failed;
+
+ /*
+ * Rx
+ */
+ bool (*clean_rx) (struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+ ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp);
+ struct e1000_ring *rx_ring;
+
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+
+ /* Rx stats */
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u64 rx_hdr_split;
+ u32 gorc;
+ u64 gorc_old;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+
+ unsigned int rx_ps_pages;
+ u16 rx_ps_bsize0;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+
+ spinlock_t stats64_lock;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ /* Snapshot of PHY registers */
+ struct e1000_phy_regs phy_regs;
+
+ struct e1000_ring test_tx_ring;
+ struct e1000_ring test_rx_ring;
+ u32 test_icr;
+
+ u32 msg_enable;
+ unsigned int num_vectors;
+ struct msix_entry *msix_entries;
+ int int_mode;
+ u32 eiac_mask;
+
+ u32 eeprom_wol;
+ u32 wol;
+ u32 pba;
+ u32 max_hw_frame_size;
+
+ bool fc_autoneg;
+
+ unsigned int flags;
+ unsigned int flags2;
+ struct work_struct downshift_task;
+ struct work_struct update_phy_task;
+ struct work_struct print_hang_task;
+
+ bool idle_check;
+ int phy_hang_count;
+};
+
+struct e1000_info {
+ enum e1000_mac_type mac;
+ unsigned int flags;
+ unsigned int flags2;
+ u32 pba;
+ u32 max_hw_frame_size;
+ s32 (*get_variants)(struct e1000_adapter *);
+ const struct e1000_mac_operations *mac_ops;
+ const struct e1000_phy_operations *phy_ops;
+ const struct e1000_nvm_operations *nvm_ops;
+};
+
+/* hardware capability, feature, and workaround flags */
+#define FLAG_HAS_AMT (1 << 0)
+#define FLAG_HAS_FLASH (1 << 1)
+#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
+#define FLAG_HAS_WOL (1 << 3)
+#define FLAG_HAS_ERT (1 << 4)
+#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
+#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
+#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
+#define FLAG_READ_ONLY_NVM (1 << 8)
+#define FLAG_IS_ICH (1 << 9)
+#define FLAG_HAS_MSIX (1 << 10)
+#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
+#define FLAG_IS_QUAD_PORT_A (1 << 12)
+#define FLAG_IS_QUAD_PORT (1 << 13)
+#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
+#define FLAG_APME_IN_WUC (1 << 15)
+#define FLAG_APME_IN_CTRL3 (1 << 16)
+#define FLAG_APME_CHECK_PORT_B (1 << 17)
+#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
+#define FLAG_NO_WAKE_UCAST (1 << 19)
+#define FLAG_MNG_PT_ENABLED (1 << 20)
+#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
+#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
+#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
+#define FLAG_RX_NEEDS_RESTART (1 << 24)
+#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
+#define FLAG_SMART_POWER_DOWN (1 << 26)
+#define FLAG_MSI_ENABLED (1 << 27)
+/* reserved (1 << 28) */
+#define FLAG_TSO_FORCE (1 << 29)
+#define FLAG_RX_RESTART_NOW (1 << 30)
+#define FLAG_MSI_TEST_FAILED (1 << 31)
+
+#define FLAG2_CRC_STRIPPING (1 << 0)
+#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
+#define FLAG2_HAS_PHY_STATS (1 << 4)
+#define FLAG2_HAS_EEE (1 << 5)
+#define FLAG2_DMA_BURST (1 << 6)
+#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
+#define FLAG2_DISABLE_AIM (1 << 8)
+#define FLAG2_CHECK_PHY_HANG (1 << 9)
+#define FLAG2_NO_DISABLE_RX (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
+
+#define E1000_RX_DESC_PS(R, i) \
+ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_ACCESS_SHARED_RESOURCE,
+ __E1000_DOWN
+};
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+extern char e1000e_driver_name[];
+extern const char e1000e_driver_version[];
+
+extern void e1000e_check_options(struct e1000_adapter *adapter);
+extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+extern int e1000e_up(struct e1000_adapter *adapter);
+extern void e1000e_down(struct e1000_adapter *adapter);
+extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000e_reset(struct e1000_adapter *adapter);
+extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
+extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
+extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64
+ *stats);
+extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+
+extern unsigned int copybreak;
+
+extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
+
+extern const struct e1000_info e1000_82571_info;
+extern const struct e1000_info e1000_82572_info;
+extern const struct e1000_info e1000_82573_info;
+extern const struct e1000_info e1000_82574_info;
+extern const struct e1000_info e1000_82583_info;
+extern const struct e1000_info e1000_ich8_info;
+extern const struct e1000_info e1000_ich9_info;
+extern const struct e1000_info e1000_ich10_info;
+extern const struct e1000_info e1000_pch_info;
+extern const struct e1000_info e1000_pch2_info;
+extern const struct e1000_info e1000_es2_info;
+
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+
+extern s32 e1000e_commit_phy(struct e1000_hw *hw);
+
+extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+
+extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
+extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+extern s32 e1000e_id_led_init(struct e1000_hw *hw);
+extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+extern s32 e1000e_setup_link(struct e1000_hw *hw);
+extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
+extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count);
+extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+extern void e1000e_config_collision_dist(struct e1000_hw *hw);
+extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+extern void e1000e_reset_adaptive(struct e1000_hw *hw);
+extern void e1000e_update_adaptive(struct e1000_hw *hw);
+
+extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
+extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+ u16 *phy_reg);
+extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+ u16 *phy_reg);
+extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
+extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_check_downshift(struct e1000_hw *hw);
+extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+
+static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ return hw->phy.ops.reset(hw);
+}
+
+static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+ return hw->phy.ops.check_reset_block(hw);
+}
+
+static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return hw->phy.ops.read_reg(hw, offset, data);
+}
+
+static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return hw->phy.ops.write_reg(hw, offset, data);
+}
+
+static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+ return hw->phy.ops.get_cable_length(hw);
+}
+
+extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+extern void e1000e_release_nvm(struct e1000_hw *hw);
+extern void e1000e_reload_nvm(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ return hw->nvm.ops.validate(hw);
+}
+
+static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
+{
+ return hw->nvm.ops.update(hw);
+}
+
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ return hw->nvm.ops.read(hw, offset, words, data);
+}
+
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ return hw->nvm.ops.write(hw, offset, words, data);
+}
+
+static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ return hw->phy.ops.get_info(hw);
+}
+
+static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
+{
+ return hw->mac.ops.check_mng_mode(hw);
+}
+
+extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+
+static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
new file mode 100644
index 00000000000..69c9d219914
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -0,0 +1,1991 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "e1000.h"
+
+enum {NETDEV_STATS, E1000_STATS};
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(str, m) { \
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
+#define E1000_NETDEV_STAT(str, m) { \
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ E1000_STAT("rx_packets", stats.gprc),
+ E1000_STAT("tx_packets", stats.gptc),
+ E1000_STAT("rx_bytes", stats.gorc),
+ E1000_STAT("tx_bytes", stats.gotc),
+ E1000_STAT("rx_broadcast", stats.bprc),
+ E1000_STAT("tx_broadcast", stats.bptc),
+ E1000_STAT("rx_multicast", stats.mprc),
+ E1000_STAT("tx_multicast", stats.mptc),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
+ E1000_STAT("multicast", stats.mprc),
+ E1000_STAT("collisions", stats.colc),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
+ E1000_STAT("rx_crc_errors", stats.crcerrs),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
+ E1000_STAT("rx_no_buffer_count", stats.rnbc),
+ E1000_STAT("rx_missed_errors", stats.mpc),
+ E1000_STAT("tx_aborted_errors", stats.ecol),
+ E1000_STAT("tx_carrier_errors", stats.tncrs),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
+ E1000_STAT("tx_window_errors", stats.latecol),
+ E1000_STAT("tx_abort_late_coll", stats.latecol),
+ E1000_STAT("tx_deferred_ok", stats.dc),
+ E1000_STAT("tx_single_coll_ok", stats.scc),
+ E1000_STAT("tx_multi_coll_ok", stats.mcc),
+ E1000_STAT("tx_timeout_count", tx_timeout_count),
+ E1000_STAT("tx_restart_queue", restart_queue),
+ E1000_STAT("rx_long_length_errors", stats.roc),
+ E1000_STAT("rx_short_length_errors", stats.ruc),
+ E1000_STAT("rx_align_errors", stats.algnerrc),
+ E1000_STAT("tx_tcp_seg_good", stats.tsctc),
+ E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ E1000_STAT("rx_flow_control_xon", stats.xonrxc),
+ E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ E1000_STAT("tx_flow_control_xon", stats.xontxc),
+ E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
+ E1000_STAT("rx_long_byte_count", stats.gorc),
+ E1000_STAT("rx_csum_offload_good", hw_csum_good),
+ E1000_STAT("rx_csum_offload_errors", hw_csum_err),
+ E1000_STAT("rx_header_split", rx_hdr_split),
+ E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ E1000_STAT("tx_smbus", stats.mgptc),
+ E1000_STAT("rx_smbus", stats.mgprc),
+ E1000_STAT("dropped_smbus", stats.mgpdc),
+ E1000_STAT("rx_dma_failed", rx_dma_failed),
+ E1000_STAT("tx_dma_failed", tx_dma_failed),
+};
+
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 speed;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->phy.type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ speed = -1;
+ ecmd->duplex = -1;
+
+ if (netif_running(netdev)) {
+ if (netif_carrier_ok(netdev)) {
+ speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex - 1;
+ }
+ } else {
+ u32 status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
+ }
+
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ netif_carrier_ok(netdev))
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ return 0;
+}
+
+static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ /* Fiber NICs only allow 1000 gbps Full duplex */
+ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL) {
+ goto err_inval;
+ }
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+ return 0;
+
+err_inval:
+ e_err("Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed
+ */
+ if (e1000_check_reset_block(hw)) {
+ e_err("Cannot change link characteristics when SoL/IDER is "
+ "active.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ if (hw->phy.media_type == e1000_media_type_fiber)
+ hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ else
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ ecmd->advertising = hw->phy.autoneg_advertised;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return -EINVAL;
+ }
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == e1000_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == e1000_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = e1000_fc_default;
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000e_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000e_set_fc_watermarks(hw);
+ }
+ }
+
+out:
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return retval;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32 /* overestimate */
+ return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN);
+ regs_buff[9] = er32(TDH);
+ regs_buff[10] = er32(TDT);
+ regs_buff[11] = er32(TIDV);
+
+ regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
+
+ /* ethtool doesn't use anything past this point, so all this
+ * code is likely legacy junk for apps that may or may not
+ * exist */
+ if (hw->phy.type == e1000_phy_m88) {
+ e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = 0; /* was idle_errors */
+ e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ ret_val = e1000_read_nvm(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ } else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_nvm(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ if (ret_val) {
+ /* a read error occurred, throw away the result */
+ memset(eeprom_buff, 0xff, sizeof(u16) *
+ (last_word - first_word + 1));
+ } else {
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
+ return -EFAULT;
+
+ if (adapter->flags & FLAG_READ_ONLY_NVM)
+ return -EINVAL;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+
+ if (ret_val)
+ goto out;
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = e1000_write_nvm(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ if (ret_val)
+ goto out;
+
+ /*
+ * Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for applicable controllers
+ */
+ if ((first_word <= NVM_CHECKSUM_REG) ||
+ (hw->mac.type == e1000_82583) ||
+ (hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82573))
+ ret_val = e1000e_update_nvm_checksum(hw);
+
+out:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32];
+
+ strncpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version) - 1);
+
+ /*
+ * EEPROM image version # is reported as firmware version # for
+ * PCI-E controllers
+ */
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ (adapter->eeprom_vers & 0xF000) >> 12,
+ (adapter->eeprom_vers & 0x0FF0) >> 4,
+ (adapter->eeprom_vers & 0x000F));
+
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = E1000_MAX_RXD;
+ ring->tx_max_pending = E1000_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring, *tx_old;
+ struct e1000_ring *rx_ring, *rx_old;
+ int err;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(adapter->netdev))
+ e1000e_down(adapter);
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ err = -ENOMEM;
+ tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!tx_ring)
+ goto err_alloc_tx;
+
+ rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!rx_ring)
+ goto err_alloc_rx;
+
+ adapter->tx_ring = tx_ring;
+ adapter->rx_ring = rx_ring;
+
+ rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
+ rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
+ rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
+ tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
+ tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ err = e1000e_setup_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+ err = e1000e_setup_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /*
+ * restore the old in order to free it,
+ * then add in the new
+ */
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000e_free_rx_resources(adapter);
+ e1000e_free_tx_resources(adapter);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rx_ring;
+ adapter->tx_ring = tx_ring;
+ err = e1000e_up(adapter);
+ if (err)
+ goto err_setup;
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return 0;
+err_setup_tx:
+ e1000e_free_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rx_ring);
+err_alloc_rx:
+ kfree(tx_ring);
+err_alloc_tx:
+ e1000e_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+ int reg, int offset, u32 mask, u32 write)
+{
+ u32 pat, val;
+ static const u32 test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
+ E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
+ (test[pat] & write));
+ val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
+ if (val != (test[pat] & write & mask)) {
+ e_err("pattern test reg %04X failed: got 0x%08X "
+ "expected 0x%08X\n", reg + offset, val,
+ (test[pat] & write & mask));
+ *data = reg;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 val;
+ __ew32(&adapter->hw, reg, write & mask);
+ val = __er32(&adapter->hw, reg);
+ if ((write & mask) != (val & mask)) {
+ e_err("set/check reg %04X test failed: got 0x%08X "
+ "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ *data = reg;
+ return 1;
+ }
+ return 0;
+}
+#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
+ return 1; \
+ } while (0)
+#define REG_PATTERN_TEST(reg, mask, write) \
+ REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ u32 value;
+ u32 before;
+ u32 after;
+ u32 i;
+ u32 toggle;
+ u32 mask;
+
+ /*
+ * The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored.
+ */
+ switch (mac->type) {
+ /* there are several bits on newer hardware that are r/w */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ toggle = 0x7FFFF3FF;
+ break;
+ default:
+ toggle = 0x7FFFF033;
+ break;
+ }
+
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
+ if (value != after) {
+ e_err("failed STATUS register test got: 0x%08X expected: "
+ "0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ ew32(STATUS, before);
+
+ if (!(adapter->flags & FLAG_IS_ICH)) {
+ REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
+
+ REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
+
+ before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
+ REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
+
+ REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ if (!(adapter->flags & FLAG_IS_ICH))
+ REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
+ mask = 0x8003FFFF;
+ switch (mac->type) {
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ mask |= (1 << 18);
+ break;
+ default:
+ break;
+ }
+ for (i = 0; i < mac->rar_entry_count; i++)
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
+ mask, 0xFFFFFFFF);
+
+ for (i = 0; i < mac->mta_reg_count; i++)
+ REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+ return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ return *data;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16) NVM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= er32(ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mask;
+ u32 shared_int = 1;
+ u32 irq = adapter->pdev->irq;
+ int i;
+ int ret_val = 0;
+ int int_mode = E1000E_INT_MODE_LEGACY;
+
+ *data = 0;
+
+ /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
+ if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
+ int_mode = adapter->int_mode;
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e1000e_set_interrupt_capability(adapter);
+ }
+ /* Hook up test interrupt handler just for this test */
+ if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev)) {
+ shared_int = 0;
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ ret_val = -1;
+ goto out;
+ }
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ /* Test each interrupt */
+ for (i = 0; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ switch (mask) {
+ case E1000_ICR_RXSEQ:
+ continue;
+ case 0x00000100:
+ if (adapter->hw.mac.type == e1000_ich8lan ||
+ adapter->hw.mac.type == e1000_ich9lan)
+ continue;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, mask);
+ ew32(ICS, mask);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /*
+ * Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMS, mask);
+ ew32(ICS, mask);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+out:
+ if (int_mode == E1000E_INT_MODE_MSIX) {
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = int_mode;
+ e1000e_set_interrupt_capability(adapter);
+ }
+
+ return ret_val;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if (tx_ring->desc && tx_ring->buffer_info) {
+ for (i = 0; i < tx_ring->count; i++) {
+ if (tx_ring->buffer_info[i].dma)
+ dma_unmap_single(&pdev->dev,
+ tx_ring->buffer_info[i].dma,
+ tx_ring->buffer_info[i].length,
+ DMA_TO_DEVICE);
+ if (tx_ring->buffer_info[i].skb)
+ dev_kfree_skb(tx_ring->buffer_info[i].skb);
+ }
+ }
+
+ if (rx_ring->desc && rx_ring->buffer_info) {
+ for (i = 0; i < rx_ring->count; i++) {
+ if (rx_ring->buffer_info[i].dma)
+ dma_unmap_single(&pdev->dev,
+ rx_ring->buffer_info[i].dma,
+ 2048, DMA_FROM_DEVICE);
+ if (rx_ring->buffer_info[i].skb)
+ dev_kfree_skb(rx_ring->buffer_info[i].skb);
+ }
+ }
+
+ if (tx_ring->desc) {
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+ if (rx_ring->desc) {
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+
+ kfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ kfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ int i;
+ int ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!tx_ring->count)
+ tx_ring->count = E1000_DEFAULT_TXD;
+
+ tx_ring->buffer_info = kcalloc(tx_ring->count,
+ sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!(tx_ring->buffer_info)) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH, ((u64) tx_ring->dma >> 32));
+ ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for (i = 0; i < tx_ring->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+ struct sk_buff *skb;
+ unsigned int skb_size = 1024;
+
+ skb = alloc_skb(skb_size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, skb_size);
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].length = skb->len;
+ tx_ring->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+ tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ if (!rx_ring->count)
+ rx_ring->count = E1000_DEFAULT_RXD;
+
+ rx_ring->buffer_info = kcalloc(rx_ring->count,
+ sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!(rx_ring->buffer_info)) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+
+ rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_clean = 0;
+
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
+ ew32(RDBAH, ((u64) rx_ring->dma >> 32));
+ ew32(RDLEN, rx_ring->size);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
+
+ for (i = 0; i < rx_ring->count; i++) {
+ union e1000_rx_desc_extended *rx_desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 7;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_ring->buffer_info[i].skb = skb;
+ rx_ring->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
+ ret_val = 8;
+ goto err_nomem;
+ }
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr =
+ cpu_to_le64(rx_ring->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1e_wphy(&adapter->hw, 29, 0x001F);
+ e1e_wphy(&adapter->hw, 30, 0x8FFC);
+ e1e_wphy(&adapter->hw, 29, 0x001A);
+ e1e_wphy(&adapter->hw, 30, 0x8FF0);
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+ u16 phy_reg = 0;
+ s32 ret_val = 0;
+
+ hw->mac.autoneg = 0;
+
+ if (hw->phy.type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1e_wphy(hw, PHY_CONTROL, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+ e1e_flush();
+ udelay(500);
+
+ return 0;
+ }
+
+ /* Specific PHY configuration for loopback */
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ /* Auto-MDI/MDIX Off */
+ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ break;
+ case e1000_phy_gg82563:
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
+ break;
+ case e1000_phy_bm:
+ /* Set Default MAC Interface speed to 1GB */
+ e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
+ phy_reg &= ~0x0007;
+ phy_reg |= 0x006;
+ e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
+ /* Assert SW reset for above settings to take effect */
+ e1000e_commit_phy(hw);
+ mdelay(1);
+ /* Force Full Duplex */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
+ /* Set Link Up (in force link) */
+ e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
+ /* Force Link */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
+ /* Set Early Link Enable */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82578:
+ /* Workaround: K1 must be disabled for stable 1Gbps operation */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Cannot setup 1Gbps loopback.\n");
+ return ret_val;
+ }
+ e1000_configure_k1_ich8lan(hw, false);
+ hw->phy.ops.release(hw);
+ break;
+ case e1000_phy_82579:
+ /* Disable PHY energy detect power down */
+ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ /* Disable full chip energy detect */
+ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
+ /* Enable loopback on the PHY */
+#define I82577_PHY_LBK_CTRL 19
+ e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
+ break;
+ default:
+ break;
+ }
+
+ /* force 1000, set loopback */
+ e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ mdelay(250);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->flags & FLAG_IS_ICH)
+ ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
+
+ if (hw->phy.media_type == e1000_media_type_copper &&
+ hw->phy.type == e1000_phy_m88) {
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ } else {
+ /*
+ * Set the ILOS bit on the fiber Nic if half duplex link is
+ * detected.
+ */
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ ew32(CTRL, ctrl_reg);
+
+ /*
+ * Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy.type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl = er32(CTRL);
+ int link = 0;
+
+ /* special requirements for 82571/82572 fiber adapters */
+
+ /*
+ * jump through hoops to make sure link is up because serdes
+ * link is hardwired up
+ */
+ ctrl |= E1000_CTRL_SLU;
+ ew32(CTRL, ctrl);
+
+ /* disable autoneg */
+ ctrl = er32(TXCW);
+ ctrl &= ~(1 << 31);
+ ew32(TXCW, ctrl);
+
+ link = (er32(STATUS) & E1000_STATUS_LU);
+
+ if (!link) {
+ /* set invert loss of signal */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_ILOS;
+ ew32(CTRL, ctrl);
+ }
+
+ /*
+ * special write to serdes control register to enable SerDes analog
+ * loopback
+ */
+#define E1000_SERDES_LB_ON 0x410
+ ew32(SCTL, E1000_SERDES_LB_ON);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/* only call this for fiber/serdes connections to es2lan */
+static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrlext = er32(CTRL_EXT);
+ u32 ctrl = er32(CTRL);
+
+ /*
+ * save CTRL_EXT to restore later, reuse an empty variable (unused
+ * on mac_type 80003es2lan)
+ */
+ adapter->tx_fifo_head = ctrlext;
+
+ /* clear the serdes mode bits, putting the device into mac loopback */
+ ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ ew32(CTRL_EXT, ctrlext);
+
+ /* force speed to 1000/FD, link up */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
+ E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* set mac loopback */
+ ctrl = er32(RCTL);
+ ctrl |= E1000_RCTL_LBM_MAC;
+ ew32(RCTL, ctrl);
+
+ /* set testing mode parameters (no need to reset later) */
+#define KMRNCTRLSTA_OPMODE (0x1F << 16)
+#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
+ ew32(KMRNCTRLSTA,
+ (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
+
+ return 0;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ switch (hw->mac.type) {
+ case e1000_80003es2lan:
+ return e1000_set_es2lan_mac_loopback(adapter);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ return e1000_set_82571_fiber_loopback(adapter);
+ break;
+ default:
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ ew32(RCTL, rctl);
+ return 0;
+ }
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ return e1000_integrated_phy_loopback(adapter);
+ }
+
+ return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ ew32(RCTL, rctl);
+
+ switch (hw->mac.type) {
+ case e1000_80003es2lan:
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ /* restore CTRL_EXT, stealing space from tx_fifo_head */
+ ew32(CTRL_EXT, adapter->tx_fifo_head);
+ adapter->tx_fifo_head = 0;
+ }
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+ ew32(SCTL, E1000_SERDES_LB_OFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+ break;
+ }
+ /* Fall Through */
+ default:
+ hw->mac.autoneg = 1;
+ if (hw->phy.type == e1000_phy_gg82563)
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
+ e1e_rphy(hw, PHY_CONTROL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ e1e_wphy(hw, PHY_CONTROL, phy_reg);
+ e1000e_commit_phy(hw);
+ }
+ break;
+ }
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF)
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ return 0;
+ return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int i, j, k, l;
+ int lc;
+ int good_cnt;
+ int ret_val = 0;
+ unsigned long time;
+
+ ew32(RDT, rx_ring->count - 1);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ k = 0;
+ l = 0;
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ for (i = 0; i < 64; i++) { /* send the packets */
+ e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
+ 1024);
+ dma_sync_single_for_device(&pdev->dev,
+ tx_ring->buffer_info[k].dma,
+ tx_ring->buffer_info[k].length,
+ DMA_TO_DEVICE);
+ k++;
+ if (k == tx_ring->count)
+ k = 0;
+ }
+ ew32(TDT, k);
+ e1e_flush();
+ msleep(200);
+ time = jiffies; /* set the start time for the receive */
+ good_cnt = 0;
+ do { /* receive the sent packets */
+ dma_sync_single_for_cpu(&pdev->dev,
+ rx_ring->buffer_info[l].dma, 2048,
+ DMA_FROM_DEVICE);
+
+ ret_val = e1000_check_lbtest_frame(
+ rx_ring->buffer_info[l].skb, 1024);
+ if (!ret_val)
+ good_cnt++;
+ l++;
+ if (l == rx_ring->count)
+ l = 0;
+ /*
+ * time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
+ if (good_cnt != 64) {
+ ret_val = 13; /* ret_val is the same as mis-compare */
+ break;
+ }
+ if (jiffies >= (time + 20)) {
+ ret_val = 14; /* error code for time out error */
+ break;
+ }
+ } /* end loop count loop */
+ return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+ /*
+ * PHY loopback cannot be performed if SoL/IDER
+ * sessions are active
+ */
+ if (e1000_check_reset_block(&adapter->hw)) {
+ e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+
+err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ *data = 0;
+ if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ hw->mac.serdes_has_link = false;
+
+ /*
+ * On some blade server designs, link establishment
+ * could take as long as 2-3 minutes
+ */
+ do {
+ hw->mac.ops.check_for_link(hw);
+ if (hw->mac.serdes_has_link)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ hw->mac.ops.check_for_link(hw);
+ if (hw->mac.autoneg)
+ /*
+ * On some Phy/switch combinations, link establishment
+ * can take a few seconds more than expected.
+ */
+ msleep(5000);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU))
+ *data = 1;
+ }
+ return *data;
+}
+
+static int e1000e_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E1000_TEST_LEN;
+ case ETH_SS_STATS:
+ return E1000_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u16 autoneg_advertised;
+ u8 forced_speed_duplex;
+ u8 autoneg;
+ bool if_running = netif_running(netdev);
+
+ set_bit(__E1000_TESTING, &adapter->state);
+
+ if (!if_running) {
+ /* Get control of and reset hardware */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+ }
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+ autoneg = adapter->hw.mac.autoneg;
+
+ e_info("offline testing starting\n");
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+
+ if (e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* force this routine to wait until autoneg complete/timeout */
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+ adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.mac.autoneg = autoneg;
+ e1000e_reset(adapter);
+
+ clear_bit(__E1000_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ /* Online tests */
+
+ e_info("online testing starting\n");
+
+ /* register, eeprom, intr and loopback tests not run online */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__E1000_TESTING, &adapter->state);
+ }
+
+ if (!if_running) {
+ e1000e_reset(adapter);
+
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_release_hw_control(adapter);
+ }
+
+ msleep_interruptible(4 * 1000);
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
+
+ /* apply any specific unsupported masks here */
+ if (adapter->flags & FLAG_NO_WAKE_UCAST) {
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ e_err("Interface does not support directed (unicast) "
+ "frame wake-up packets\n");
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev) ||
+ (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+ WAKE_MAGIC | WAKE_PHY)))
+ return -EOPNOTSUPP;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int e1000_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (!hw->mac.ops.blink_led)
+ return 2; /* cycle on/off twice per second */
+
+ hw->mac.ops.blink_led(hw);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (hw->phy.type == e1000_phy_ife)
+ e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ hw->mac.ops.led_off(hw);
+ hw->mac.ops.cleanup_led(hw);
+ break;
+
+ case ETHTOOL_ID_ON:
+ adapter->hw.mac.ops.led_on(&adapter->hw);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ adapter->hw.mac.ops.led_off(&adapter->hw);
+ break;
+ }
+ return 0;
+}
+
+static int e1000_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->itr_setting <= 4)
+ ec->rx_coalesce_usecs = adapter->itr_setting;
+ else
+ ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+ return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 4) &&
+ (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
+ adapter->itr = 20000;
+ adapter->itr_setting = ec->rx_coalesce_usecs;
+ } else {
+ adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+ adapter->itr_setting = adapter->itr & ~3;
+ }
+
+ if (adapter->itr_setting != 0)
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ else
+ ew32(ITR, 0);
+
+ return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (!adapter->hw.mac.autoneg)
+ return -EINVAL;
+
+ e1000e_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
+ int i;
+ char *p = NULL;
+
+ e1000e_get_stats64(netdev, &net_stats);
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ switch (e1000_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) &net_stats +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ case E1000_STATS:
+ p = (char *) adapter +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
+
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000e_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
+};
+
+void e1000e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 29670397079..29670397079 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 54add27c8f7..6a17c62cb86 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -811,7 +811,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
}
if ((adapter->hw.mac.type == e1000_ich8lan) &&
- (adapter->hw.phy.type == e1000_phy_igp_3))
+ (adapter->hw.phy.type != e1000_phy_ife))
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
/* Enable workaround for 82579 w/ ME enabled */
@@ -852,8 +852,6 @@ static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
mutex_unlock(&nvm_mutex);
}
-static DEFINE_MUTEX(swflag_mutex);
-
/**
* e1000_acquire_swflag_ich8lan - Acquire software control flag
* @hw: pointer to the HW structure
@@ -866,7 +864,12 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
s32 ret_val = 0;
- mutex_lock(&swflag_mutex);
+ if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
+ &hw->adapter->state)) {
+ WARN(1, "e1000e: %s: contention for Phy access\n",
+ hw->adapter->netdev->name);
+ return -E1000_ERR_PHY;
+ }
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -878,7 +881,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
}
if (!timeout) {
- e_dbg("SW/FW/HW has locked the resource for too long.\n");
+ e_dbg("SW has already locked the resource.\n");
ret_val = -E1000_ERR_CONFIG;
goto out;
}
@@ -898,7 +901,9 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
}
if (!timeout) {
- e_dbg("Failed to acquire the semaphore.\n");
+ e_dbg("Failed to acquire the semaphore, FW or HW has it: "
+ "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+ er32(FWSM), extcnf_ctrl);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
ret_val = -E1000_ERR_CONFIG;
@@ -907,7 +912,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
out:
if (ret_val)
- mutex_unlock(&swflag_mutex);
+ clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
return ret_val;
}
@@ -932,7 +937,7 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
}
- mutex_unlock(&swflag_mutex);
+ clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
}
/**
@@ -1319,16 +1324,20 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
oem_reg |= HV_OEM_BITS_LPLU;
+
+ /* Set Restart auto-neg to activate the bits */
+ if (!e1000_check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
} else {
- if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
+ if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
oem_reg |= HV_OEM_BITS_GBE_DIS;
- if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
+ if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU))
oem_reg |= HV_OEM_BITS_LPLU;
}
- /* Restart auto-neg to activate the bits */
- if (!e1000_check_reset_block(hw))
- oem_reg |= HV_OEM_BITS_RESTART_AN;
+
ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
out:
@@ -1574,7 +1583,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
if (ret_val)
goto out;
- ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
if (ret_val)
goto out;
e1e_rphy(hw, HV_PM_CTRL, &data);
@@ -3135,7 +3144,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
msleep(20);
if (!ret_val)
- mutex_unlock(&swflag_mutex);
+ clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -3638,15 +3647,14 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
* LPLU, Gig disable, MDIC PHY reset):
* 1) Set Kumeran Near-end loopback
* 2) Clear Kumeran Near-end loopback
- * Should only be called for ICH8[m] devices with IGP_3 Phy.
+ * Should only be called for ICH8[m] devices with any 1G Phy.
**/
void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
{
s32 ret_val;
u16 reg_data;
- if ((hw->mac.type != e1000_ich8lan) ||
- (hw->phy.type != e1000_phy_igp_3))
+ if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
return;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
@@ -3682,8 +3690,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
if (hw->mac.type >= e1000_pchlan) {
e1000_oem_bits_config_ich8lan(hw, false);
+ e1000_phy_hw_reset_ich8lan(hw);
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return;
@@ -4011,7 +4023,7 @@ release:
}
}
-static struct e1000_mac_operations ich8_mac_ops = {
+static const struct e1000_mac_operations ich8_mac_ops = {
.id_led_init = e1000e_id_led_init,
/* check_mng_mode dependent on mac type */
.check_for_link = e1000_check_for_copper_link_ich8lan,
@@ -4030,7 +4042,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
/* id_led_init dependent on mac type */
};
-static struct e1000_phy_operations ich8_phy_ops = {
+static const struct e1000_phy_operations ich8_phy_ops = {
.acquire = e1000_acquire_swflag_ich8lan,
.check_reset_block = e1000_check_reset_block_ich8lan,
.commit = NULL,
@@ -4044,7 +4056,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
.write_reg = e1000e_write_phy_reg_igp,
};
-static struct e1000_nvm_operations ich8_nvm_ops = {
+static const struct e1000_nvm_operations ich8_nvm_ops = {
.acquire = e1000_acquire_nvm_ich8lan,
.read = e1000_read_nvm_ich8lan,
.release = e1000_release_nvm_ich8lan,
@@ -4054,11 +4066,10 @@ static struct e1000_nvm_operations ich8_nvm_ops = {
.write = e1000_write_nvm_ich8lan,
};
-struct e1000_info e1000_ich8_info = {
+const struct e1000_info e1000_ich8_info = {
.mac = e1000_ich8lan,
.flags = FLAG_HAS_WOL
| FLAG_IS_ICH
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4071,12 +4082,11 @@ struct e1000_info e1000_ich8_info = {
.nvm_ops = &ich8_nvm_ops,
};
-struct e1000_info e1000_ich9_info = {
+const struct e1000_info e1000_ich9_info = {
.mac = e1000_ich9lan,
.flags = FLAG_HAS_JUMBO_FRAMES
| FLAG_IS_ICH
| FLAG_HAS_WOL
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_ERT
@@ -4090,12 +4100,11 @@ struct e1000_info e1000_ich9_info = {
.nvm_ops = &ich8_nvm_ops,
};
-struct e1000_info e1000_ich10_info = {
+const struct e1000_info e1000_ich10_info = {
.mac = e1000_ich10lan,
.flags = FLAG_HAS_JUMBO_FRAMES
| FLAG_IS_ICH
| FLAG_HAS_WOL
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_ERT
@@ -4109,11 +4118,10 @@ struct e1000_info e1000_ich10_info = {
.nvm_ops = &ich8_nvm_ops,
};
-struct e1000_info e1000_pch_info = {
+const struct e1000_info e1000_pch_info = {
.mac = e1000_pchlan,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4129,11 +4137,10 @@ struct e1000_info e1000_pch_info = {
.nvm_ops = &ich8_nvm_ops,
};
-struct e1000_info e1000_pch2_info = {
+const struct e1000_info e1000_pch2_info = {
.mac = e1000_pch2lan,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
- | FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/lib.c
index 0893ab107ad..0893ab107ad 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/ethernet/intel/e1000e/lib.c
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
new file mode 100644
index 00000000000..a855db1ad24
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -0,0 +1,6440 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+
+#include "e1000.h"
+
+#define DRV_EXTRAVERSION "-k"
+
+#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
+char e1000e_driver_name[] = "e1000e";
+const char e1000e_driver_version[] = DRV_VERSION;
+
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
+
+static const struct e1000_info *e1000_info_tbl[] = {
+ [board_82571] = &e1000_82571_info,
+ [board_82572] = &e1000_82572_info,
+ [board_82573] = &e1000_82573_info,
+ [board_82574] = &e1000_82574_info,
+ [board_82583] = &e1000_82583_info,
+ [board_80003es2lan] = &e1000_es2_info,
+ [board_ich8lan] = &e1000_ich8_info,
+ [board_ich9lan] = &e1000_ich9_info,
+ [board_ich10lan] = &e1000_ich10_info,
+ [board_pchlan] = &e1000_pch_info,
+ [board_pch2lan] = &e1000_pch2_info,
+};
+
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* Rx Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* Tx Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 {
+ u64 a;
+ u64 b;
+ } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ union e1000_rx_desc_extended *rx_desc;
+ struct my_u1 {
+ u64 a;
+ u64 b;
+ u64 c;
+ u64 d;
+ } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print Tx Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
+
+ /* Print Tx Ring */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print Rx Ring Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print Rx Ring */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Extended Receive Descriptor (Read) Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Reserved |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
+ "[reserved 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext (Read) format\n");
+ /* Extended Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 24 23 4 3 0
+ * +------------------------------------------------------+
+ * | RSS Hash | | | |
+ * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
+ * | Packet | IP | | | Type |
+ * | Checksum | Ident | | | |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
+ "[vt ln xe xs] "
+ "[bi->skb] <-- Ext (Write-Back) format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16,
+ 1,
+ phys_to_virt
+ (buffer_info->dma),
+ adapter->rx_buffer_len,
+ true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ }
+
+exit:
+ return;
+}
+
+/**
+ * e1000_desc_unused - calculate if we have unused descriptors
+ **/
+static int e1000_desc_unused(struct e1000_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle Rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void e1000_receive_skb(struct e1000_adapter *adapter,
+ struct net_device *netdev, struct sk_buff *skb,
+ u8 status, __le16 vlan)
+{
+ u16 tag = le16_to_cpu(vlan);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (status & E1000_RXD_STAT_VP)
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ napi_gro_receive(&adapter->napi, skb);
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ u32 csum, struct sk_buff *skb)
+{
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (status & E1000_RXD_STAT_IXSM)
+ return;
+ /* TCP/UDP checksum error bit is set */
+ if (errors & E1000_RXD_ERR_TCPE) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+
+ /* TCP/UDP Checksum has not been calculated */
+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+ return;
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (status & E1000_RXD_STAT_TCPCS) {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ /*
+ * IP fragment with UDP payload
+ * Hardware complements the payload checksum, so we undo it
+ * and then put the value in host order for further stack use.
+ */
+ __sum16 sum = (__force __sum16)htons(csum);
+ skb->csum = csum_unfold(~sum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ adapter->hw_csum_good++;
+}
+
+/**
+ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
+ * @hw: pointer to the HW structure
+ * @tail: address of tail descriptor register
+ * @i: value to write to tail descriptor register
+ *
+ * When updating the tail register, the ME could be accessing Host CSR
+ * registers at the same time. Normally, this is handled in h/w by an
+ * arbiter but on some parts there is a bug that acknowledges Host accesses
+ * later than it should which could result in the descriptor register to
+ * have an incorrect value. Workaround this by checking the FWSM register
+ * which has bit 24 set while ME is accessing Host CSR registers, wait
+ * if it is set and try again a number of times.
+ **/
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+ unsigned int i)
+{
+ unsigned int j = 0;
+
+ while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
+ (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
+ udelay(50);
+
+ writel(i, tail);
+
+ if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
+ return E1000_ERR_SWFW_SYNC;
+
+ return 0;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e_err("ME firmware caused invalid RDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ e_err("ME firmware caused invalid TDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_extended *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = adapter->rx_buffer_len;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
+ if (!skb) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+map_skb:
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
+ adapter->rx_dma_failed++;
+ break;
+ }
+
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_packet_split *rx_desc;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ps_page *ps_page;
+ struct sk_buff *skb;
+ unsigned int i, j;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (j >= adapter->rx_ps_pages) {
+ /* all unused desc entries get hw null ptr */
+ rx_desc->read.buffer_addr[j + 1] =
+ ~cpu_to_le64(0);
+ continue;
+ }
+ if (!ps_page->page) {
+ ps_page->page = alloc_page(gfp);
+ if (!ps_page->page) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
+ dev_err(&adapter->pdev->dev,
+ "Rx DMA page map failed\n");
+ adapter->rx_dma_failed++;
+ goto no_buffers;
+ }
+ }
+ /*
+ * Refresh the desc even if buffer_addrs
+ * didn't change because each write-back
+ * erases this info.
+ */
+ rx_desc->read.buffer_addr[j + 1] =
+ cpu_to_le64(ps_page->dma);
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev,
+ adapter->rx_ps_bsize0,
+ gfp);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ adapter->rx_ps_bsize0,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
+ adapter->rx_dma_failed++;
+ /* cleanup skb */
+ dev_kfree_skb_any(skb);
+ buffer_info->skb = NULL;
+ break;
+ }
+
+ rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i << 1);
+ else
+ writel(i << 1,
+ adapter->hw.hw_addr + rx_ring->tail);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+no_buffers:
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_extended *rx_desc;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = 256 - 16 /* for skb_reserve */;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto check_page;
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+check_page:
+ /* allocate a new page if necessary */
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(gfp);
+ if (unlikely(!buffer_info->page)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ if (!buffer_info->dma)
+ buffer_info->dma = dma_map_page(&pdev->dev,
+ buffer_info->page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_extended *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length, staterr;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = 1;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev,
+ buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ /*
+ * !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ /* All receives must fit into a single buffer */
+ e_dbg("Receive packet consumed multiple buffers\n");
+ /* recycle */
+ buffer_info->skb = skb;
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+
+ /* adjust length to remove Ethernet CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ length -= 4;
+
+ total_rx_bytes += length;
+ total_rx_packets++;
+
+ /*
+ * code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+ if (length < copybreak) {
+ struct sk_buff *new_skb =
+ netdev_alloc_skb_ip_align(netdev, length);
+ if (new_skb) {
+ skb_copy_to_linear_data_offset(new_skb,
+ -NET_IP_ALIGN,
+ (skb->data -
+ NET_IP_ALIGN),
+ (length +
+ NET_IP_ALIGN));
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ /* else just continue with the old one */
+ }
+ /* end copybreak code */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr,
+ le16_to_cpu(rx_desc->wb.lower.hi_dword.
+ csum_ip.csum), skb);
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+static void e1000_put_txbuf(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+}
+
+static void e1000_print_hw_hang(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ print_hang_task);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int i = tx_ring->next_to_clean;
+ unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
+ struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_status, phy_1000t_status, phy_ext_status;
+ u16 pci_status;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1e_rphy(hw, PHY_STATUS, &phy_status);
+ e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
+ e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+
+ pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
+
+ /* detected Hardware unit hang */
+ e_err("Detected Hardware Unit Hang:\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]:\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n"
+ "MAC Status <%x>\n"
+ "PHY Status <%x>\n"
+ "PHY 1000BASE-T Status <%x>\n"
+ "PHY Extended Status <%x>\n"
+ "PCI Status <%x>\n",
+ readl(adapter->hw.hw_addr + tx_ring->head),
+ readl(adapter->hw.hw_addr + tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->upper.fields.status,
+ er32(STATUS),
+ phy_status,
+ phy_1000t_status,
+ phy_ext_status,
+ pci_status);
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ unsigned int count = 0;
+ unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ bool cleaned = false;
+ rmb(); /* read buffer_info after eop_desc */
+ for (; !cleaned; count++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ if (cleaned) {
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
+ }
+
+ e1000_put_txbuf(adapter, buffer_info);
+ tx_desc->upper.data = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ if (i == tx_ring->next_to_use)
+ break;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+ if (count && netif_carrier_ok(netdev) &&
+ e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__E1000_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (adapter->detect_tx_hung) {
+ /*
+ * Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i
+ */
+ adapter->detect_tx_hung = 0;
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ + (adapter->tx_timeout_factor * HZ)) &&
+ !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ schedule_work(&adapter->print_hang_task);
+ netif_stop_queue(netdev);
+ }
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+ return count < tx_ring->count;
+}
+
+/**
+ * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ struct e1000_ps_page *ps_page;
+ struct sk_buff *skb;
+ unsigned int i, j;
+ u32 length, staterr;
+ int cleaned_count = 0;
+ bool cleaned = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ skb = buffer_info->skb;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ /* in the packet split case this is header only */
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = 1;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ /* see !EOP comment in other Rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ e_dbg("Packet Split buffers didn't pick up the full "
+ "packet\n");
+ dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ length = le16_to_cpu(rx_desc->wb.middle.length0);
+
+ if (!length) {
+ e_dbg("Last part of the packet spanning multiple "
+ "descriptors\n");
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ /* Good Receive */
+ skb_put(skb, length);
+
+ {
+ /*
+ * this looks ugly, but it seems compiler issues make it
+ * more efficient than reusing j
+ */
+ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
+
+ /*
+ * page alloc/put takes too long and effects small packet
+ * throughput, so unsplit small packets and save the alloc/put
+ * only valid in softirq (napi) context to call kmap_*
+ */
+ if (l1 && (l1 <= copybreak) &&
+ ((length + l1) <= adapter->rx_ps_bsize0)) {
+ u8 *vaddr;
+
+ ps_page = &buffer_info->ps_pages[0];
+
+ /*
+ * there is no documentation about how to call
+ * kmap_atomic, so we can't hold the mapping
+ * very long
+ */
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr, l1);
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* remove the CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ l1 -= 4;
+
+ skb_put(skb, l1);
+ goto copydone;
+ } /* if */
+ }
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ length = le16_to_cpu(rx_desc->wb.upper.length[j]);
+ if (!length)
+ break;
+
+ ps_page = &buffer_info->ps_pages[j];
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ps_page->dma = 0;
+ skb_fill_page_desc(skb, j, ps_page->page, 0, length);
+ ps_page->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += PAGE_SIZE;
+ }
+
+ /* strip the ethernet crc, problem is we're using pages now so
+ * this whole operation can get a little cpu intensive
+ */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ pskb_trim(skb, skb->len - 4);
+
+copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ e1000_rx_checksum(adapter, staterr, le16_to_cpu(
+ rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
+
+ if (rx_desc->wb.upper.header_status &
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
+ adapter->rx_hdr_split++;
+
+ e1000_receive_skb(adapter, netdev, skb,
+ staterr, rx_desc->wb.middle.vlan);
+
+next_desc:
+ rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
+ buffer_info->skb = NULL;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+ u16 length)
+{
+ bi->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += PAGE_SIZE;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_extended *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length, staterr;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ ++i;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ /* errors is only valid for DD + EOP descriptors */
+ if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
+ (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
+ /* recycle both page and skb */
+ buffer_info->skb = skb;
+ /* an error means any chain goes out the window too */
+ if (rx_ring->rx_skb_top)
+ dev_kfree_skb_irq(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ goto next_desc;
+ }
+
+#define rxtop (rx_ring->rx_skb_top)
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* this descriptor is only the beginning (or middle) */
+ if (!rxtop) {
+ /* this is the beginning of a chain */
+ rxtop = skb;
+ skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ 0, length);
+ } else {
+ /* this is the middle of a chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the skb, only consumed the page */
+ buffer_info->skb = skb;
+ }
+ e1000_consume_page(buffer_info, rxtop, length);
+ goto next_desc;
+ } else {
+ if (rxtop) {
+ /* end of the chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the current skb, we only consumed the
+ * page */
+ buffer_info->skb = skb;
+ skb = rxtop;
+ rxtop = NULL;
+ e1000_consume_page(buffer_info, skb, length);
+ } else {
+ /* no chain, got EOP, this buf is the packet
+ * copybreak to save the put_page/alloc_page */
+ if (length <= copybreak &&
+ skb_tailroom(skb) >= length) {
+ u8 *vaddr;
+ vaddr = kmap_atomic(buffer_info->page,
+ KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
+ kunmap_atomic(vaddr,
+ KM_SKB_DATA_SOFTIRQ);
+ /* re-use the page, so don't erase
+ * buffer_info->page */
+ skb_put(skb, length);
+ } else {
+ skb_fill_page_desc(skb, 0,
+ buffer_info->page, 0,
+ length);
+ e1000_consume_page(buffer_info, skb,
+ length);
+ }
+ }
+ }
+
+ /* Receive Checksum Offload XXX recompute due to CRC strip? */
+ e1000_rx_checksum(adapter, staterr,
+ le16_to_cpu(rx_desc->wb.lower.hi_dword.
+ csum_ip.csum), skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ e_err("pskb_may_pull failed.\n");
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ **/
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ps_page *ps_page;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int i, j;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ if (adapter->clean_rx == e1000_clean_rx_irq)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_bsize0,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (!ps_page->page)
+ break;
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ps_page->dma = 0;
+ put_page(ps_page->page);
+ ps_page->page = NULL;
+ }
+ }
+
+ /* there also may be some cached data from a chained receive */
+ if (rx_ring->rx_skb_top) {
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ }
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, downshift_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ /*
+ * read ICR disables interrupts using IAM
+ */
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /*
+ * ICH8 workaround-- Call gig speed drop workaround on cable
+ * disconnect (LSC) before accessing any PHY registers
+ */
+ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+ (!(er32(STATUS) & E1000_STATUS_LU)))
+ schedule_work(&adapter->downshift_task);
+
+ /*
+ * 80003ES2LAN workaround-- For packet buffer work-around on
+ * link down event; disable receives here in the ISR and reset
+ * adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ adapter->flags & FLAG_RX_NEEDS_RESTART) {
+ /* disable receives */
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ adapter->flags |= FLAG_RX_RESTART_NOW;
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, icr = er32(ICR);
+
+ if (!icr || test_bit(__E1000_DOWN, &adapter->state))
+ return IRQ_NONE; /* Not our interrupt */
+
+ /*
+ * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt
+ */
+ if (!(icr & E1000_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ /*
+ * Interrupt Auto-Mask...upon reading ICR,
+ * interrupts are masked. No need for the
+ * IMC write
+ */
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /*
+ * ICH8 workaround-- Call gig speed drop workaround on cable
+ * disconnect (LSC) before accessing any PHY registers
+ */
+ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+ (!(er32(STATUS) & E1000_STATUS_LU)))
+ schedule_work(&adapter->downshift_task);
+
+ /*
+ * 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
+ /* disable receives */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ adapter->flags |= FLAG_RX_RESTART_NOW;
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_msix_other(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ if (!(icr & E1000_ICR_INT_ASSERTED)) {
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_OTHER);
+ return IRQ_NONE;
+ }
+
+ if (icr & adapter->eiac_mask)
+ ew32(ICS, (icr & adapter->eiac_mask));
+
+ if (icr & E1000_ICR_OTHER) {
+ if (!(icr & E1000_ICR_LSC))
+ goto no_link_interrupt;
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+no_link_interrupt:
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+
+
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+
+ if (!e1000_clean_tx_irq(adapter))
+ /* Ring was not completely cleaned, so fire another interrupt */
+ ew32(ICS, tx_ring->ims_val);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Write the ITR value calculated at the end of the
+ * previous interrupt.
+ */
+ if (adapter->rx_ring->set_itr) {
+ writel(1000000000 / (adapter->rx_ring->itr_val * 256),
+ adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+ adapter->rx_ring->set_itr = 0;
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_configure_msix - Configure MSI-X hardware
+ *
+ * e1000_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void e1000_configure_msix(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ int vector = 0;
+ u32 ctrl_ext, ivar = 0;
+
+ adapter->eiac_mask = 0;
+
+ /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
+ if (hw->mac.type == e1000_82574) {
+ u32 rfctl = er32(RFCTL);
+ rfctl |= E1000_RFCTL_ACK_DIS;
+ ew32(RFCTL, rfctl);
+ }
+
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+ /* Configure Rx vector */
+ rx_ring->ims_val = E1000_IMS_RXQ0;
+ adapter->eiac_mask |= rx_ring->ims_val;
+ if (rx_ring->itr_val)
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ hw->hw_addr + rx_ring->itr_register);
+ else
+ writel(1, hw->hw_addr + rx_ring->itr_register);
+ ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
+
+ /* Configure Tx vector */
+ tx_ring->ims_val = E1000_IMS_TXQ0;
+ vector++;
+ if (tx_ring->itr_val)
+ writel(1000000000 / (tx_ring->itr_val * 256),
+ hw->hw_addr + tx_ring->itr_register);
+ else
+ writel(1, hw->hw_addr + tx_ring->itr_register);
+ adapter->eiac_mask |= tx_ring->ims_val;
+ ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
+
+ /* set vector for Other Causes, e.g. link changes */
+ vector++;
+ ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
+ if (rx_ring->itr_val)
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ hw->hw_addr + E1000_EITR_82574(vector));
+ else
+ writel(1, hw->hw_addr + E1000_EITR_82574(vector));
+
+ /* Cause Tx interrupts on every write back */
+ ivar |= (1 << 31);
+
+ ew32(IVAR, ivar);
+
+ /* enable MSI-X PBA support */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask Other interrupts upon ICR read */
+#define E1000_EIAC_MASK_82574 0x01F00000
+ ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
+ ctrl_ext |= E1000_CTRL_EXT_EIAME;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+}
+
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & FLAG_MSI_ENABLED) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~FLAG_MSI_ENABLED;
+ }
+}
+
+/**
+ * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
+{
+ int err;
+ int i;
+
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ if (adapter->flags & FLAG_HAS_MSIX) {
+ adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+ adapter->msix_entries = kcalloc(adapter->num_vectors,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (i = 0; i < adapter->num_vectors; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(adapter->pdev,
+ adapter->msix_entries,
+ adapter->num_vectors);
+ if (err == 0)
+ return;
+ }
+ /* MSI-X failed, so fall through and try MSI */
+ e_err("Failed to initialize MSI-X interrupts. "
+ "Falling back to MSI interrupts.\n");
+ e1000e_reset_interrupt_capability(adapter);
+ }
+ adapter->int_mode = E1000E_INT_MODE_MSI;
+ /* Fall through */
+ case E1000E_INT_MODE_MSI:
+ if (!pci_enable_msi(adapter->pdev)) {
+ adapter->flags |= FLAG_MSI_ENABLED;
+ } else {
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e_err("Failed to initialize MSI interrupts. Falling "
+ "back to legacy interrupts.\n");
+ }
+ /* Fall through */
+ case E1000E_INT_MODE_LEGACY:
+ /* Don't do anything; this is the system default */
+ break;
+ }
+
+ /* store the number of vectors being used */
+ adapter->num_vectors = 1;
+}
+
+/**
+ * e1000_request_msix - Initialize MSI-X interrupts
+ *
+ * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int e1000_request_msix(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0, vector = 0;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5))
+ snprintf(adapter->rx_ring->name,
+ sizeof(adapter->rx_ring->name) - 1,
+ "%s-rx-0", netdev->name);
+ else
+ memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+ adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
+ adapter->rx_ring->itr_val = adapter->itr;
+ vector++;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5))
+ snprintf(adapter->tx_ring->name,
+ sizeof(adapter->tx_ring->name) - 1,
+ "%s-tx-0", netdev->name);
+ else
+ memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+ adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
+ adapter->tx_ring->itr_val = adapter->itr;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_msix_other, 0, netdev->name, netdev);
+ if (err)
+ goto out;
+
+ e1000_configure_msix(adapter);
+ return 0;
+out:
+ return err;
+}
+
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (adapter->msix_entries) {
+ err = e1000_request_msix(adapter);
+ if (!err)
+ return err;
+ /* fall back to MSI */
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_MSI;
+ e1000e_set_interrupt_capability(adapter);
+ }
+ if (adapter->flags & FLAG_MSI_ENABLED) {
+ err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
+ netdev->name, netdev);
+ if (!err)
+ return err;
+
+ /* fall back to legacy interrupt */
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ }
+
+ err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
+ netdev->name, netdev);
+ if (err)
+ e_err("Unable to allocate interrupt, Error: %d\n", err);
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->msix_entries) {
+ int vector = 0;
+
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ vector++;
+
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ vector++;
+
+ /* Other Causes interrupt vector */
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ return;
+ }
+
+ free_irq(adapter->pdev->irq, netdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMC, ~0);
+ if (adapter->msix_entries)
+ ew32(EIAC_82574, 0);
+ e1e_flush();
+
+ if (adapter->msix_entries) {
+ int i;
+ for (i = 0; i < adapter->num_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ **/
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
+ ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else {
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+ e1e_flush();
+}
+
+/**
+ * e1000e_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ **/
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+ u32 swsm;
+
+ /* Let firmware know the driver has taken over */
+ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+ } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/**
+ * e1000e_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+ u32 swsm;
+
+ /* Let firmware taken over control of h/w */
+ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/**
+ * @e1000_alloc_ring - allocate memory for a ring structure
+ **/
+static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
+ struct e1000_ring *ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ int err = -ENOMEM, size;
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ err = e1000_alloc_ring_dma(adapter, tx_ring);
+ if (err)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+err:
+ vfree(tx_ring->buffer_info);
+ e_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return err;
+}
+
+/**
+ * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ int i, size, desc_len, err = -ENOMEM;
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info)
+ goto err;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
+ sizeof(struct e1000_ps_page),
+ GFP_KERNEL);
+ if (!buffer_info->ps_pages)
+ goto err_pages;
+ }
+
+ desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ err = e1000_alloc_ring_dma(adapter, rx_ring);
+ if (err)
+ goto err_pages;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->rx_skb_top = NULL;
+
+ return 0;
+
+err_pages:
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ kfree(buffer_info->ps_pages);
+ }
+err:
+ vfree(rx_ring->buffer_info);
+ e_err("Unable to allocate memory for the receive descriptor ring\n");
+ return err;
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_put_txbuf(adapter, buffer_info);
+ }
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * e1000e_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void e1000e_free_tx_resources(struct e1000_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+
+ e1000_clean_tx_ring(adapter);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * e1000e_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000e_free_rx_resources(struct e1000_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ int i;
+
+ e1000_clean_rx_ring(adapter);
+
+ for (i = 0; i < rx_ring->count; i++)
+ kfree(rx_ring->buffer_info[i].ps_pages);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput. This functionality is controlled
+ * by the InterruptThrottleRate module parameter.
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ u16 itr_setting, int packets,
+ int bytes)
+{
+ unsigned int retval = itr_setting;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000) {
+ retval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ retval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (adapter->link_speed != SPEED_1000) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ new_itr = 0;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /*
+ * this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ adapter->rx_ring->itr_val = new_itr;
+ if (adapter->msix_entries)
+ adapter->rx_ring->set_itr = 1;
+ else
+ if (new_itr)
+ ew32(ITR, 1000000000 / (new_itr * 256));
+ else
+ ew32(ITR, 0);
+ }
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err;
+
+ adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err;
+
+ return 0;
+err:
+ e_err("Unable to allocate memory for queues\n");
+ kfree(adapter->rx_ring);
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @budget: amount of packets driver is allowed to process this poll
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *poll_dev = adapter->netdev;
+ int tx_cleaned = 1, work_done = 0;
+
+ adapter = netdev_priv(poll_dev);
+
+ if (adapter->msix_entries &&
+ !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+ goto clean_rx;
+
+ tx_cleaned = e1000_clean_tx_irq(adapter);
+
+clean_rx:
+ adapter->clean_rx(adapter, &work_done, budget);
+
+ if (!tx_cleaned)
+ work_done = budget;
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ if (adapter->itr_setting & 3)
+ e1000_set_itr(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ ew32(IMS, adapter->rx_ring->ims_val);
+ else
+ e1000_irq_enable(adapter);
+ }
+ }
+
+ return work_done;
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ /* don't update vlan cookie if already programmed */
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == adapter->mng_vlan_id))
+ return;
+
+ /* add VID to filter table */
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ hw->mac.ops.write_vfta(hw, index, vfta);
+ }
+
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == adapter->mng_vlan_id)) {
+ /* release control to f/w */
+ e1000e_release_hw_control(adapter);
+ return;
+ }
+
+ /* remove VID from filter table */
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ hw->mac.ops.write_vfta(hw, index, vfta);
+ }
+
+ clear_bit(vid, adapter->active_vlans);
+}
+
+/**
+ * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* disable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
+ ew32(RCTL, rctl);
+
+ if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ }
+}
+
+/**
+ * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ ew32(RCTL, rctl);
+ }
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ /* disable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ /* enable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ e1000_vlan_rx_add_vid(netdev, vid);
+ adapter->mng_vlan_id = vid;
+ }
+
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ e1000_vlan_rx_add_vid(adapter->netdev, 0);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 manc, manc2h, mdef, i, j;
+
+ if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
+ return;
+
+ manc = er32(MANC);
+
+ /*
+ * enable receiving management packets to the host. this will probably
+ * generate destination unreachable messages from the host OS, but
+ * the packets will be handled on SMBUS
+ */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h = er32(MANC2H);
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /*
+ * Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
+ ew32(MANC2H, manc2h);
+ ew32(MANC, manc);
+}
+
+/**
+ * e1000_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ u64 tdba;
+ u32 tdlen, tctl, tipg, tarc;
+ u32 ipgr1, ipgr2;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = tx_ring->dma;
+ tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
+ ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
+ ew32(TDBAH, (tdba >> 32));
+ ew32(TDLEN, tdlen);
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ tx_ring->head = E1000_TDH;
+ tx_ring->tail = E1000_TDT;
+
+ /* Set the default values for the Tx Inter Packet Gap timer */
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
+ ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
+
+ if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
+ ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
+
+ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+ ew32(TIPG, tipg);
+
+ /* Set the Tx Interrupt Delay register */
+ ew32(TIDV, adapter->tx_int_delay);
+ /* Tx irq moderation */
+ ew32(TADV, adapter->tx_abs_int_delay);
+
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ u32 txdctl = er32(TXDCTL(0));
+ txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
+ E1000_TXDCTL_WTHRESH);
+ /*
+ * set up some performance related parameters to encourage the
+ * hardware to use the bus more efficiently in bursts, depends
+ * on the tx_int_delay to be enabled,
+ * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
+ * hthresh = 1 ==> prefetch when one or more available
+ * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ * BEWARE: this seems to work but should be considered first if
+ * there are Tx hangs or other Tx related bugs
+ */
+ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
+ ew32(TXDCTL(0), txdctl);
+ /* erratum work around: set txdctl the same for both queues */
+ ew32(TXDCTL(1), txdctl);
+ }
+
+ /* Program the Transmit Control Register */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
+ tarc = er32(TARC(0));
+ /*
+ * set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later
+ */
+#define SPEED_MODE_BIT (1 << 21)
+ tarc |= SPEED_MODE_BIT;
+ ew32(TARC(0), tarc);
+ }
+
+ /* errata: program both queues to unweighted RR */
+ if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
+ tarc = er32(TARC(0));
+ tarc |= 1;
+ ew32(TARC(0), tarc);
+ tarc = er32(TARC(1));
+ tarc |= 1;
+ ew32(TARC(1), tarc);
+ }
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ /* enable Report Status bit */
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ ew32(TCTL, tctl);
+
+ e1000e_config_collision_dist(hw);
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, rfctl;
+ u32 pages = 0;
+
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable jumbo frame workaround mode\n");
+ }
+
+ /* Program MC offset vector base */
+ rctl = er32(RCTL);
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Do not Store bad packets */
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Enable Long Packet receive */
+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
+ rctl &= ~E1000_RCTL_LPE;
+ else
+ rctl |= E1000_RCTL_LPE;
+
+ /* Some systems expect that the CRC is included in SMBUS traffic. The
+ * hardware strips the CRC before sending to both SMBUS (BMC) and to
+ * host memory when this is enabled
+ */
+ if (adapter->flags2 & FLAG2_CRC_STRIPPING)
+ rctl |= E1000_RCTL_SECRC;
+
+ /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+ if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+ u16 phy_data;
+
+ e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+ phy_data &= 0xfff8;
+ phy_data |= (1 << 2);
+ e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+ e1e_rphy(hw, 22, &phy_data);
+ phy_data &= 0x0fff;
+ phy_data |= (1 << 14);
+ e1e_wphy(hw, 0x10, 0x2823);
+ e1e_wphy(hw, 0x11, 0x0003);
+ e1e_wphy(hw, 22, phy_data);
+ }
+
+ /* Setup buffer sizes */
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case 2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case 4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case 8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case 16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ /* Enable Extended Status in all Receive Descriptors */
+ rfctl = er32(RFCTL);
+ rfctl |= E1000_RFCTL_EXTEN;
+
+ /*
+ * 82571 and greater support packet-split where the protocol
+ * header is placed in skb->data and the packet data is
+ * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
+ * In the case of a non-split, skb->data is linearly filled,
+ * followed by the page buffers. Therefore, skb->data is
+ * sized to hold the largest protocol header.
+ *
+ * allocations using alloc_page take too long for regular MTU
+ * so only enable packet split for jumbo frames
+ *
+ * Using pages when the page size is greater than 16k wastes
+ * a lot of memory, since we allocate 3 pages at all times
+ * per packet.
+ */
+ pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+ if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
+ (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
+ adapter->rx_ps_pages = pages;
+ else
+ adapter->rx_ps_pages = 0;
+
+ if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
+ /*
+ * disable packet split support for IPv6 extension headers,
+ * because some malformed IPv6 headers can hang the Rx
+ */
+ rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+ E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
+ /* Enable Packet split descriptors */
+ rctl |= E1000_RCTL_DTYP_PS;
+
+ psrctl |= adapter->rx_ps_bsize0 >>
+ E1000_PSRCTL_BSIZE0_SHIFT;
+
+ switch (adapter->rx_ps_pages) {
+ case 3:
+ psrctl |= PAGE_SIZE <<
+ E1000_PSRCTL_BSIZE3_SHIFT;
+ case 2:
+ psrctl |= PAGE_SIZE <<
+ E1000_PSRCTL_BSIZE2_SHIFT;
+ case 1:
+ psrctl |= PAGE_SIZE >>
+ E1000_PSRCTL_BSIZE1_SHIFT;
+ break;
+ }
+
+ ew32(PSRCTL, psrctl);
+ }
+
+ ew32(RFCTL, rfctl);
+ ew32(RCTL, rctl);
+ /* just started the receive unit, no need to restart */
+ adapter->flags &= ~FLAG_RX_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ u64 rdba;
+ u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+ if (adapter->rx_ps_pages) {
+ /* this is a 32 byte descriptor */
+ rdlen = rx_ring->count *
+ sizeof(union e1000_rx_desc_packet_split);
+ adapter->clean_rx = e1000_clean_rx_irq_ps;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+ } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+ } else {
+ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+ }
+
+ /* disable receives while setting up the descriptors */
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ /*
+ * set the writeback threshold (only takes effect if the RDTR
+ * is set). set GRAN=1 and write back up to 0x4 worth, and
+ * enable prefetching of 0x20 Rx descriptors
+ * granularity = 01
+ * wthresh = 04,
+ * hthresh = 04,
+ * pthresh = 0x20
+ */
+ ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
+ ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
+
+ /*
+ * override the delay timers for enabling bursting, only if
+ * the value was not set by the user via module options
+ */
+ if (adapter->rx_int_delay == DEFAULT_RDTR)
+ adapter->rx_int_delay = BURST_RDTR;
+ if (adapter->rx_abs_int_delay == DEFAULT_RADV)
+ adapter->rx_abs_int_delay = BURST_RADV;
+ }
+
+ /* set the Receive Delay Timer Register */
+ ew32(RDTR, adapter->rx_int_delay);
+
+ /* irq moderation */
+ ew32(RADV, adapter->rx_abs_int_delay);
+ if ((adapter->itr_setting != 0) && (adapter->itr != 0))
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+
+ ctrl_ext = er32(CTRL_EXT);
+ /* Auto-Mask interrupts upon ICR access */
+ ctrl_ext |= E1000_CTRL_EXT_IAME;
+ ew32(IAM, 0xffffffff);
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ rdba = rx_ring->dma;
+ ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
+ ew32(RDBAH, (rdba >> 32));
+ ew32(RDLEN, rdlen);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rx_ring->head = E1000_RDH;
+ rx_ring->tail = E1000_RDT;
+
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ rxcsum = er32(RXCSUM);
+ if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ rxcsum |= E1000_RXCSUM_TUOFL;
+
+ /*
+ * IPv4 payload checksum for UDP fragments must be
+ * used in conjunction with packet-split.
+ */
+ if (adapter->rx_ps_pages)
+ rxcsum |= E1000_RXCSUM_IPPCSE;
+ } else {
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ /* no need to clear IPPCSE as it defaults to 0 */
+ }
+ ew32(RXCSUM, rxcsum);
+
+ /*
+ * Enable early receives on supported devices, only takes effect when
+ * packet size is equal or larger than the specified value (in 8 byte
+ * units), e.g. using jumbo frames when setting to E1000_ERT_2048
+ */
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan)) {
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ u32 rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl | 0x3);
+ if (adapter->flags & FLAG_HAS_ERT)
+ ew32(ERT, E1000_ERT_2048 | (1 << 13));
+ /*
+ * With jumbo frames and early-receive enabled,
+ * excessive C-state transition latencies result in
+ * dropped transactions.
+ */
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
+ } else {
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+ }
+ }
+
+ /* Enable Receives */
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000_set_multi(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ u32 rctl;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = er32(RCTL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ rctl &= ~E1000_RCTL_VFE;
+ /* Do not hardware filter VLANs in promisc mode */
+ e1000e_vlan_filter_disable(adapter);
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ rctl &= ~E1000_RCTL_UPE;
+ } else {
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ }
+ e1000e_vlan_filter_enable(adapter);
+ }
+
+ ew32(RCTL, rctl);
+
+ if (!netdev_mc_empty(netdev)) {
+ int i = 0;
+
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+ if (!mta_list)
+ return;
+
+ /* prepare a packed array of only addresses. */
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ e1000_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+ } else {
+ /*
+ * if we're called from probe, we might not have
+ * anything to do here, so clear out the list
+ */
+ e1000_update_mc_addr_list(hw, NULL, 0);
+ }
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ e1000e_vlan_strip_enable(adapter);
+ else
+ e1000e_vlan_strip_disable(adapter);
+}
+
+/**
+ * e1000_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+ e1000_set_multi(adapter->netdev);
+
+ e1000_restore_vlan(adapter);
+ e1000_init_manageability_pt(adapter);
+
+ e1000_configure_tx(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
+ GFP_KERNEL);
+}
+
+/**
+ * e1000e_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000e_reset ***
+ **/
+void e1000e_power_up_phy(struct e1000_adapter *adapter)
+{
+ if (adapter->hw.phy.ops.power_up)
+ adapter->hw.phy.ops.power_up(&adapter->hw);
+
+ adapter->hw.mac.ops.setup_link(&adapter->hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down the PHY
+ *
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
+ */
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ /* WoL is enabled */
+ if (adapter->wol)
+ return;
+
+ if (adapter->hw.phy.ops.power_down)
+ adapter->hw.phy.ops.power_down(&adapter->hw);
+}
+
+/**
+ * e1000e_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+void e1000e_reset(struct e1000_adapter *adapter)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_fc_info *fc = &adapter->hw.fc;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tx_space, min_tx_space, min_rx_space;
+ u32 pba = adapter->pba;
+ u16 hwm;
+
+ /* reset Packet Buffer Allocation to default */
+ ew32(PBA, pba);
+
+ if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ /*
+ * To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accommodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB.
+ */
+ pba = er32(PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /*
+ * the Tx fifo also stores 16 bytes of information about the Tx
+ * but don't include ethernet FCS because hardware appends it
+ */
+ min_tx_space = (adapter->max_frame_size +
+ sizeof(struct e1000_tx_desc) -
+ ETH_FCS_LEN) * 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = adapter->max_frame_size;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /*
+ * If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation
+ */
+ if ((tx_space < min_tx_space) &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba -= min_tx_space - tx_space;
+
+ /*
+ * if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
+ if ((pba < min_rx_space) &&
+ (!(adapter->flags & FLAG_HAS_ERT)))
+ /* ERT enabled in e1000_configure_rx */
+ pba = min_rx_space;
+ }
+
+ ew32(PBA, pba);
+ }
+
+ /*
+ * flow control settings
+ *
+ * The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, and
+ * - the full Rx FIFO size minus the early receive size (for parts
+ * with ERT support assuming ERT set to E1000_ERT_2048), or
+ * - the full Rx FIFO size minus one full frame
+ */
+ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+ fc->pause_time = 0xFFFF;
+ else
+ fc->pause_time = E1000_FC_PAUSE_TIME;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
+
+ switch (hw->mac.type) {
+ default:
+ if ((adapter->flags & FLAG_HAS_ERT) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN))
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - (E1000_ERT_2048 << 3)));
+ else
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - adapter->max_frame_size));
+
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+ fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pchlan:
+ /*
+ * Workaround PCH LOM adapter hangs with certain network
+ * loads. If hangs persist, try disabling Tx flow control.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ fc->high_water = 0x3500;
+ fc->low_water = 0x1500;
+ } else {
+ fc->high_water = 0x5000;
+ fc->low_water = 0x3000;
+ }
+ fc->refresh_time = 0x1000;
+ break;
+ case e1000_pch2lan:
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ fc->refresh_time = 0x0400;
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ pba = 14;
+ ew32(PBA, pba);
+ }
+ break;
+ }
+
+ /*
+ * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ * fit in receive buffer and early-receive not supported.
+ */
+ if (adapter->itr_setting & 0x3) {
+ if (((adapter->max_frame_size * 2) > (pba << 10)) &&
+ !(adapter->flags & FLAG_HAS_ERT)) {
+ if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate turned off\n");
+ adapter->flags2 |= FLAG2_DISABLE_AIM;
+ ew32(ITR, 0);
+ }
+ } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate turned on\n");
+ adapter->flags2 &= ~FLAG2_DISABLE_AIM;
+ adapter->itr = 20000;
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ }
+ }
+
+ /* Allow time for pending master requests to run */
+ mac->ops.reset_hw(hw);
+
+ /*
+ * For parts with AMT enabled, let the firmware know
+ * that the network interface is in control
+ */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ ew32(WUC, 0);
+
+ if (mac->ops.init_hw(hw))
+ e_err("Hardware Error\n");
+
+ e1000_update_mng_vlan(adapter);
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ ew32(VET, ETH_P_8021Q);
+
+ e1000e_reset_adaptive(hw);
+
+ if (!netif_running(adapter->netdev) &&
+ !test_bit(__E1000_TESTING, &adapter->state)) {
+ e1000_power_down_phy(adapter);
+ return;
+ }
+
+ e1000_get_phy_info(hw);
+
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+ !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+ u16 phy_data = 0;
+ /*
+ * speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed
+ */
+ e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ }
+}
+
+int e1000e_up(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ e1000_configure(adapter);
+
+ clear_bit(__E1000_DOWN, &adapter->state);
+
+ napi_enable(&adapter->napi);
+ if (adapter->msix_entries)
+ e1000_configure_msix(adapter);
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(adapter->netdev);
+
+ /* fire a link change interrupt to start the watchdog */
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
+ return 0;
+}
+
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
+void e1000e_down(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+
+ /*
+ * signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer
+ */
+ set_bit(__E1000_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ /* flush both disables and wait for them to finish */
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ napi_disable(&adapter->napi);
+ e1000_irq_disable(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ netif_carrier_off(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ e1000e_flush_descriptors(adapter);
+ e1000_clean_tx_ring(adapter);
+ e1000_clean_rx_ring(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ if (!pci_channel_offline(adapter->pdev))
+ e1000e_reset(adapter);
+
+ /*
+ * TODO: for power management, we could drop the link and
+ * pci_disable_device here.
+ */
+}
+
+void e1000e_reinit_locked(struct e1000_adapter *adapter)
+{
+ might_sleep();
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ e1000e_down(adapter);
+ e1000e_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_ps_bsize0 = 128;
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->stats64_lock);
+
+ e1000e_set_interrupt_capability(adapter);
+
+ if (e1000_alloc_queues(adapter))
+ return -ENOMEM;
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ e1000_irq_disable(adapter);
+
+ set_bit(__E1000_DOWN, &adapter->state);
+ return 0;
+}
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ e_dbg("icr is %08X\n", icr);
+ if (icr & E1000_ICR_RXSEQ) {
+ adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+ wmb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* poll_enable hasn't been called yet, so don't need disable */
+ /* clear any pending events */
+ er32(ICR);
+
+ /* free the real vector and request a test handler */
+ e1000_free_irq(adapter);
+ e1000e_reset_interrupt_capability(adapter);
+
+ /* Assume that the test fails, if it succeeds then the test
+ * MSI irq handler will unset this flag */
+ adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (err)
+ goto msi_test_failed;
+
+ err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
+ netdev->name, netdev);
+ if (err) {
+ pci_disable_msi(adapter->pdev);
+ goto msi_test_failed;
+ }
+
+ wmb();
+
+ e1000_irq_enable(adapter);
+
+ /* fire an unusual interrupt on the test handler */
+ ew32(ICS, E1000_ICS_RXSEQ);
+ e1e_flush();
+ msleep(50);
+
+ e1000_irq_disable(adapter);
+
+ rmb();
+
+ if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e_info("MSI interrupt test failed, using legacy interrupt.\n");
+ } else
+ e_dbg("MSI interrupt test succeeded!\n");
+
+ free_irq(adapter->pdev->irq, netdev);
+ pci_disable_msi(adapter->pdev);
+
+msi_test_failed:
+ e1000e_set_interrupt_capability(adapter);
+ return e1000_request_irq(adapter);
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+ int err;
+ u16 pci_cmd;
+
+ if (!(adapter->flags & FLAG_MSI_ENABLED))
+ return 0;
+
+ /* disable SERR in case the MSI write causes a master abort */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_SERR)
+ pci_write_config_word(adapter->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
+
+ err = e1000_test_msi_interrupt(adapter);
+
+ /* re-enable SERR */
+ if (pci_cmd & PCI_COMMAND_SERR) {
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_SERR;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__E1000_TESTING, &adapter->state))
+ return -EBUSY;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = e1000e_setup_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = e1000e_setup_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000e_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+ e1000_update_mng_vlan(adapter);
+
+ /* DMA latency requirement to workaround early-receive/jumbo issue */
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan))
+ pm_qos_add_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ /*
+ * before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so.
+ */
+ e1000_configure(adapter);
+
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /*
+ * Work around PCIe errata with MSI interrupts causing some chipsets to
+ * ignore e1000e MSI messages, which means we need to test our MSI
+ * interrupt now
+ */
+ if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
+ err = e1000_test_msi(adapter);
+ if (err) {
+ e_err("Interrupt allocation failed\n");
+ goto err_req_irq;
+ }
+ }
+
+ /* From here on the code is the same as e1000e_up() */
+ clear_bit(__E1000_DOWN, &adapter->state);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(netdev);
+
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
+ /* fire a link status change interrupt to start the watchdog */
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
+ return 0;
+
+err_req_irq:
+ e1000e_release_hw_control(adapter);
+ e1000_power_down_phy(adapter);
+ e1000e_free_rx_resources(adapter);
+err_setup_rx:
+ e1000e_free_tx_resources(adapter);
+err_setup_tx:
+ e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
+ e1000_power_down_phy(adapter);
+
+ e1000e_free_tx_resources(adapter);
+ e1000e_free_rx_resources(adapter);
+
+ /*
+ * kill manageability vlan ID if supported, but not if a vlan with
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
+ if (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now closed
+ */
+ if ((adapter->flags & FLAG_HAS_AMT) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000e_release_hw_control(adapter);
+
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan))
+ pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+ e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
+ /* activate the work around */
+ e1000e_set_laa_state_82571(&adapter->hw, 1);
+
+ /*
+ * Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
+ * of the RARs and no incoming packets directed to this port
+ * are dropped. Eventually the LAA will be in RAR[0] and
+ * RAR[14]
+ */
+ e1000e_rar_set(&adapter->hw,
+ adapter->hw.mac.addr,
+ adapter->hw.mac.rar_entry_count - 1);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, update_phy_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1000_get_phy_info(&adapter->hw);
+}
+
+/*
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void e1000_update_phy_info(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ schedule_work(&adapter->update_phy_task);
+}
+
+/**
+ * e1000e_update_phy_stats - Update the PHY statistics counters
+ * @adapter: board private structure
+ *
+ * Read/clear the upper 16-bit PHY registers and read/accumulate lower
+ **/
+static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val;
+ u16 phy_data;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ /*
+ * A page set is expensive so check if already on desired page.
+ * If not, set to the page with the PHY status registers.
+ */
+ hw->phy.addr = 1;
+ ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ &phy_data);
+ if (ret_val)
+ goto release;
+ if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Single Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.scc += phy_data;
+
+ /* Excessive Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.ecol += phy_data;
+
+ /* Multiple Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.mcc += phy_data;
+
+ /* Late Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.latecol += phy_data;
+
+ /* Collision Count - also used for adaptive IFS */
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ if (!ret_val)
+ hw->mac.collision_delta = phy_data;
+
+ /* Defer Count */
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.dc += phy_data;
+
+ /* Transmit with no CRS */
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.tncrs += phy_data;
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000e_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+static void e1000e_update_stats(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorc += er32(GORCL);
+ er32(GORCH); /* Clear gorc */
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ adapter->stats.mpc += er32(MPC);
+
+ /* Half-duplex statistics */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
+ e1000e_update_phy_stats(adapter);
+ } else {
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+
+ hw->mac.collision_delta = er32(COLC);
+
+ if ((hw->mac.type != e1000_82574) &&
+ (hw->mac.type != e1000_82583))
+ adapter->stats.tncrs += er32(TNCRS);
+ }
+ adapter->stats.colc += hw->mac.collision_delta;
+ }
+
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotc += er32(GOTCL);
+ er32(GOTCH); /* Clear gotc */
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->mac.tx_packet_delta = er32(TPT);
+ adapter->stats.tpt += hw->mac.tx_packet_delta;
+
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
+
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = adapter->stats.mprc;
+ netdev->stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ netdev->stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ netdev->stats.rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+ netdev->stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ netdev->stats.tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+ netdev->stats.tx_window_errors = adapter->stats.latecol;
+ netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Management Stats */
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
+}
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_phy_regs *phy = &adapter->phy_regs;
+
+ if ((er32(STATUS) & E1000_STATUS_LU) &&
+ (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
+ ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
+ ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
+ ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
+ ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
+ ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
+ ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
+ ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+ if (ret_val)
+ e_warn("Error reading PHY register\n");
+ } else {
+ /*
+ * Do not read PHY registers if link is not up
+ * Set values to typical power-on defaults
+ */
+ phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+ phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+ BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+ BMSR_ERCAP);
+ phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ phy->lpa = 0;
+ phy->expansion = EXPANSION_ENABLENPAGE;
+ phy->ctrl1000 = ADVERTISE_1000FULL;
+ phy->stat1000 = 0;
+ phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+ }
+}
+
+static void e1000_print_link_info(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl = er32(CTRL);
+
+ /* Link status message must follow this format for user tools */
+ printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ (adapter->link_duplex == FULL_DUPLEX) ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
+ "Rx/Tx" :
+ ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+}
+
+static bool e1000e_has_link(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = 0;
+ s32 ret_val = 0;
+
+ /*
+ * get_link_status is set on LSC (link status) interrupt or
+ * Rx sequence error interrupt. get_link_status will stay
+ * false until the check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ if (hw->mac.get_link_status) {
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ } else {
+ link_active = 1;
+ }
+ break;
+ case e1000_media_type_fiber:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+ break;
+ case e1000_media_type_internal_serdes:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = adapter->hw.mac.serdes_has_link;
+ break;
+ default:
+ case e1000_media_type_unknown:
+ break;
+ }
+
+ if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+ e_info("Gigabit has been disabled, downgrading speed\n");
+ }
+
+ return link_active;
+}
+
+static void e1000e_enable_receives(struct e1000_adapter *adapter)
+{
+ /* make sure the receive unit is started */
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW)) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ }
+}
+
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * With 82574 controllers, PHY needs to be checked periodically
+ * for hung state and reset, if two calls return true
+ */
+ if (e1000_check_phy_82574(hw))
+ adapter->phy_hang_count++;
+ else
+ adapter->phy_hang_count = 0;
+
+ if (adapter->phy_hang_count > 1) {
+ adapter->phy_hang_count = 0;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_phy_info *phy = &adapter->hw.phy;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 link, tctl;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ link = e1000e_has_link(adapter);
+ if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ e1000e_enable_receives(adapter);
+ goto link_up;
+ }
+
+ if ((e1000e_enable_tx_pkt_filtering(hw)) &&
+ (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
+ e1000_update_mng_vlan(adapter);
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ /* update snapshot of PHY registers on LSC */
+ e1000_phy_read_status(adapter);
+ mac->ops.get_link_up_info(&adapter->hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+ e1000_print_link_info(adapter);
+ /*
+ * On supported PHYs, check for duplex mismatch only
+ * if link has autonegotiated at 10/100 half
+ */
+ if ((hw->phy.type == e1000_phy_igp_3 ||
+ hw->phy.type == e1000_phy_bm) &&
+ (hw->mac.autoneg == true) &&
+ (adapter->link_speed == SPEED_10 ||
+ adapter->link_speed == SPEED_100) &&
+ (adapter->link_duplex == HALF_DUPLEX)) {
+ u16 autoneg_exp;
+
+ e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+
+ if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+ e_info("Autonegotiated half duplex but"
+ " link partner cannot autoneg. "
+ " Try forcing full duplex if "
+ "link gets many collisions.\n");
+ }
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txb2b = 0;
+ adapter->tx_timeout_factor = 16;
+ break;
+ case SPEED_100:
+ txb2b = 0;
+ adapter->tx_timeout_factor = 10;
+ break;
+ }
+
+ /*
+ * workaround: re-program speed mode bit after
+ * link-up event
+ */
+ if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
+ !txb2b) {
+ u32 tarc0;
+ tarc0 = er32(TARC(0));
+ tarc0 &= ~SPEED_MODE_BIT;
+ ew32(TARC(0), tarc0);
+ }
+
+ /*
+ * disable TSO for pcie and 10/100 speeds, to avoid
+ * some hardware issues
+ */
+ if (!(adapter->flags & FLAG_TSO_FORCE)) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ e_info("10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ }
+
+ /*
+ * enable transmits in the hardware, need to do this
+ * after setting TARC(0)
+ */
+ tctl = er32(TCTL);
+ tctl |= E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ /*
+ * Perform any post-link-up configuration before
+ * reporting link up.
+ */
+ if (phy->ops.cfg_on_link_up)
+ phy->ops.cfg_on_link_up(hw);
+
+ netif_carrier_on(netdev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ /* Link status message must follow this format */
+ printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
+ adapter->netdev->name);
+ netif_carrier_off(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
+ }
+ }
+
+link_up:
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+
+ mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+ adapter->gorc_old = adapter->stats.gorc;
+ adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+ adapter->gotc_old = adapter->stats.gotc;
+ spin_unlock(&adapter->stats64_lock);
+
+ e1000e_update_adaptive(&adapter->hw);
+
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ if (adapter->msix_entries)
+ ew32(ICS, adapter->rx_ring->ims_val);
+ else
+ ew32(ICS, E1000_ICS_RXDMT0);
+
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = 1;
+
+ /*
+ * With 82571 controllers, LAA may be overwritten due to controller
+ * reset from the other port. Set the appropriate LAA in RAR[0]
+ */
+ if (e1000e_get_laa_state_82571(hw))
+ e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+
+ if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+ e1000e_check_82574_phy_workaround(adapter);
+
+ /* Reset the timer */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u32 cmd_length = 0;
+ u16 ipcse = 0, tucse, mss;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
+ if (err)
+ return err;
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ cmd_length = E1000_TXD_CMD_IP;
+ ipcse = skb_transport_offset(skb) - 1;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcse = 0;
+ }
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+ tucse = 0;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return 1;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
+ __be16 protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ e_warn("checksum_partial proto=%x!\n",
+ be16_to_cpu(protocol));
+ break;
+ }
+
+ css = skb_checksum_start_offset(skb);
+
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso =
+ css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return 1;
+}
+
+#define E1000_MAX_PER_TXD 8192
+#define E1000_MAX_TXD_PWR 12
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct sk_buff *skb, unsigned int first,
+ unsigned int max_per_txd, unsigned int nr_frags,
+ unsigned int mss)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f, bytecount, segs;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ buffer_info->mapped_as_page = false;
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+ len -= size;
+ offset += size;
+ count++;
+
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = skb_frag_size(frag);
+ offset = 0;
+
+ while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
+ buffer_info->mapped_as_page = true;
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+ len -= size;
+ offset += size;
+ count++;
+ }
+ }
+
+ segs = skb_shinfo(skb)->gso_segs ? : 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "Tx DMA map failed\n");
+ buffer_info->dma = 0;
+ if (count)
+ count--;
+
+ while (count--) {
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_put_txbuf(adapter, buffer_info);
+ }
+
+ return 0;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+ int tx_flags, int count)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if (tx_flags & E1000_TX_FLAGS_TSO) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+ if (tx_flags & E1000_TX_FLAGS_IPV4)
+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ if (tx_flags & E1000_TX_FLAGS_CSUM) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if (tx_flags & E1000_TX_FLAGS_VLAN) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ i = tx_ring->next_to_use;
+
+ do {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data =
+ cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ } while (--count > 0);
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
+ /*
+ * we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 length, offset;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
+ }
+
+ if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
+ return 0;
+
+ if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+ return 0;
+
+ {
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+ struct udphdr *udp;
+
+ if (ip->protocol != IPPROTO_UDP)
+ return 0;
+
+ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
+ if (ntohs(udp->dest) != 67)
+ return 0;
+
+ offset = (u8 *)udp + 8 - skb->data;
+ length = skb->len - offset;
+ return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
+ }
+
+ return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ /*
+ * Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /*
+ * We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (e1000_desc_unused(adapter->tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000_desc_unused(adapter->tx_ring) >= size)
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int first;
+ unsigned int max_per_txd = E1000_MAX_PER_TXD;
+ unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb_headlen(skb);
+ unsigned int nr_frags;
+ unsigned int mss;
+ int count = 0;
+ int tso;
+ unsigned int f;
+
+ if (test_bit(__E1000_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+ /*
+ * The controller does a simple calculation to
+ * make sure there is enough room in the FIFO before
+ * initiating the DMA for each buffer. The calc is:
+ * 4 = ceil(buffer len/mss). To make sure we don't
+ * overrun the FIFO, adjust the max buffer len if mss
+ * drops.
+ */
+ if (mss) {
+ u8 hdr_len;
+ max_per_txd = min(mss << 2, max_per_txd);
+ max_txd_pwr = fls(max_per_txd) - 1;
+
+ /*
+ * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data
+ */
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ /*
+ * we do this workaround for ES2LAN, but it is un-necessary,
+ * avoiding it could save a lot of cycles
+ */
+ if (skb->data_len && (hdr_len == len)) {
+ unsigned int pull_size;
+
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ e_err("__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ len = skb_headlen(skb);
+ }
+ }
+
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+ count++;
+ count++;
+
+ count += TXD_USE_COUNT(len, max_txd_pwr);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+ max_txd_pwr);
+
+ if (adapter->hw.mac.tx_pkt_filtering)
+ e1000_transfer_dhcp_info(adapter, skb);
+
+ /*
+ * need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time
+ */
+ if (e1000_maybe_stop_tx(netdev, count + 2))
+ return NETDEV_TX_BUSY;
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = tx_ring->next_to_use;
+
+ tso = e1000_tso(adapter, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ else if (e1000_tx_csum(adapter, skb))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ /*
+ * Old method was to assume IPv4 packet by default if TSO was enabled.
+ * 82571 hardware supports TSO capabilities for IPv6 as well...
+ * no longer assume, we must.
+ */
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ /* if count is 0 then mapping error has occurred */
+ count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
+ if (count) {
+ e1000_tx_queue(adapter, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter;
+ adapter = container_of(work, struct e1000_adapter, reset_task);
+
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
+ }
+ e1000e_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
+ *
+ * Returns the address of the device statistics structure.
+ **/
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Jumbo frame support */
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Supported frame sizes */
+ if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+ (max_frame > adapter->max_hw_frame_size)) {
+ e_err("Unsupported MTU setting\n");
+ return -EINVAL;
+ }
+
+ /* Jumbo frame workaround on 82579 requires CRC be stripped */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+ (new_mtu > ETH_DATA_LEN)) {
+ e_err("Jumbo Frames not supported on 82579 when CRC "
+ "stripping is disabled.\n");
+ return -EINVAL;
+ }
+
+ /* 82573 Errata 17 */
+ if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82574)) &&
+ (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+ e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
+ adapter->max_frame_size = max_frame;
+ e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ e1000e_down(adapter);
+
+ /*
+ * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * However with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
+
+ if (max_frame <= 2048)
+ adapter->rx_buffer_len = 2048;
+ else
+ adapter->rx_buffer_len = 4096;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
+ + ETH_FCS_LEN;
+
+ if (netif_running(netdev))
+ e1000e_up(adapter);
+ else
+ e1000e_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (adapter->hw.phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->hw.phy.addr;
+ break;
+ case SIOCGMIIREG:
+ e1000_phy_read_status(adapter);
+
+ switch (data->reg_num & 0x1F) {
+ case MII_BMCR:
+ data->val_out = adapter->phy_regs.bmcr;
+ break;
+ case MII_BMSR:
+ data->val_out = adapter->phy_regs.bmsr;
+ break;
+ case MII_PHYSID1:
+ data->val_out = (adapter->hw.phy.id >> 16);
+ break;
+ case MII_PHYSID2:
+ data->val_out = (adapter->hw.phy.id & 0xFFFF);
+ break;
+ case MII_ADVERTISE:
+ data->val_out = adapter->phy_regs.advertise;
+ break;
+ case MII_LPA:
+ data->val_out = adapter->phy_regs.lpa;
+ break;
+ case MII_EXPANSION:
+ data->val_out = adapter->phy_regs.expansion;
+ break;
+ case MII_CTRL1000:
+ data->val_out = adapter->phy_regs.ctrl1000;
+ break;
+ case MII_STAT1000:
+ data->val_out = adapter->phy_regs.stat1000;
+ break;
+ case MII_ESTATUS:
+ data->val_out = adapter->phy_regs.estatus;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 i, mac_reg;
+ u16 phy_reg, wuc_enable;
+ int retval = 0;
+
+ /* copy MAC RARs to PHY RARs */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ retval = hw->phy.ops.acquire(hw);
+ if (retval) {
+ e_err("Could not acquire PHY\n");
+ return retval;
+ }
+
+ /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+ retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ goto out;
+
+ /* copy MAC MTA to PHY MTA - only needed for pchlan */
+ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+ mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+ (u16)((mac_reg >> 16) & 0xFFFF));
+ }
+
+ /* configure PHY Rx Control register */
+ hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
+ mac_reg = er32(RCTL);
+ if (mac_reg & E1000_RCTL_UPE)
+ phy_reg |= BM_RCTL_UPE;
+ if (mac_reg & E1000_RCTL_MPE)
+ phy_reg |= BM_RCTL_MPE;
+ phy_reg &= ~(BM_RCTL_MO_MASK);
+ if (mac_reg & E1000_RCTL_MO_3)
+ phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ << BM_RCTL_MO_SHIFT);
+ if (mac_reg & E1000_RCTL_BAM)
+ phy_reg |= BM_RCTL_BAM;
+ if (mac_reg & E1000_RCTL_PMCF)
+ phy_reg |= BM_RCTL_PMCF;
+ mac_reg = er32(CTRL);
+ if (mac_reg & E1000_CTRL_RFCE)
+ phy_reg |= BM_RCTL_RFCE;
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+
+ /* enable PHY wakeup in MAC register */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+
+ /* configure and enable PHY wakeup in PHY registers */
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+ /* activate PHY wakeup */
+ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+ retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ e_err("Could not set PHY Host Wakeup bit\n");
+out:
+ hw->phy.ops.release(hw);
+
+ return retval;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
+ e1000e_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ e1000_setup_rctl(adapter);
+ e1000_set_multi(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_MPE;
+ ew32(RCTL, rctl);
+ }
+
+ ctrl = er32(CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC;
+ if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+ ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+ adapter->hw.phy.media_type ==
+ e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ if (adapter->flags & FLAG_IS_ICH)
+ e1000_suspend_workarounds_ich8lan(&adapter->hw);
+
+ /* Allow time for pending master requests to run */
+ e1000e_disable_pcie_master(&adapter->hw);
+
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ /* enable wakeup by the PHY */
+ retval = e1000_init_phy_wakeup(adapter, wufc);
+ if (retval)
+ return retval;
+ } else {
+ /* enable wakeup by the MAC */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PME_EN);
+ }
+ } else {
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
+ }
+
+ *enable_wake = !!wufc;
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
+ *enable_wake = true;
+
+ if (adapter->hw.phy.type == e1000_phy_igp_3)
+ e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+
+ /*
+ * Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ e1000e_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
+{
+ if (sleep && wake) {
+ pci_prepare_to_sleep(pdev);
+ return;
+ }
+
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
+ bool wake)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * The pci-e switch on some quad port adapters will report a
+ * correctable error when the MAC transitions from D0 to D3. To
+ * prevent this we need to mask off the correctable errors on the
+ * downstream port of the pci-e switch.
+ */
+ if (adapter->flags & FLAG_IS_QUAD_PORT) {
+ struct pci_dev *us_dev = pdev->bus->self;
+ int pos = pci_pcie_cap(us_dev);
+ u16 devctl;
+
+ pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
+ pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
+ (devctl & ~PCI_EXP_DEVCTL_CERE));
+
+ e1000_power_off(pdev, sleep, wake);
+
+ pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
+ } else {
+ e1000_power_off(pdev, sleep, wake);
+ }
+}
+
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ pci_disable_link_state_locked(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ int pos;
+ u16 reg16;
+
+ /*
+ * Both device and parent should have the same ASPM setting.
+ * Disable ASPM in downstream component first and then upstream.
+ */
+ pos = pci_pcie_cap(pdev);
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+ if (!pdev->bus->self)
+ return;
+
+ pos = pci_pcie_cap(pdev->bus->self);
+ pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+ (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+ __e1000e_disable_aspm(pdev, state);
+}
+
+#ifdef CONFIG_PM
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
+{
+ return !!adapter->tx_ring->buffer_info;
+}
+
+static int __e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
+ u32 err;
+
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ e1000e_set_interrupt_capability(adapter);
+ if (netif_running(netdev)) {
+ err = e1000_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_resume_workarounds_pchlan(&adapter->hw);
+
+ e1000e_power_up_phy(adapter);
+
+ /* report the system wakeup cause from S3/S4 */
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ u16 phy_data;
+
+ e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
+ if (phy_data) {
+ e_info("PHY Wakeup cause - %s\n",
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ? "Link Status "
+ " Change" : "other");
+ }
+ e1e_wphy(&adapter->hw, BM_WUS, ~0);
+ } else {
+ u32 wus = er32(WUS);
+ if (wus) {
+ e_info("MAC Wakeup cause - %s\n",
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
+ }
+ ew32(WUS, ~0);
+ }
+
+ e1000e_reset(adapter);
+
+ e1000_init_manageability_pt(adapter);
+
+ if (netif_running(netdev))
+ e1000e_up(adapter);
+
+ netif_device_attach(netdev);
+
+ /*
+ * If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+ bool wake = false;
+
+ __e1000_shutdown(pdev, &wake, false);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ e1000_complete_shutdown(pdev, false, wake);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->msix_entries) {
+ int vector, msix_irq;
+
+ vector = 0;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_rx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_tx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_msix_other(msix_irq, netdev);
+ enable_irq(msix_irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ e1000_intr_msix(adapter->pdev->irq, netdev);
+ break;
+ case E1000E_INT_MODE_MSI:
+ disable_irq(adapter->pdev->irq);
+ e1000_intr_msi(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ default: /* E1000E_INT_MODE_LEGACY */
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ }
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ e1000e_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
+ int err;
+ pci_ers_result_t result;
+
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pdev->state_saved = true;
+ pci_restore_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ e1000e_reset(adapter);
+ ew32(WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ return result;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000_init_manageability_pt(adapter);
+
+ if (netif_running(netdev)) {
+ if (e1000e_up(adapter)) {
+ dev_err(&pdev->dev,
+ "can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /*
+ * If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+}
+
+static void e1000_print_device_info(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 ret_val;
+ u8 pba_str[E1000_PBANUM_LENGTH];
+
+ /* print bus type/speed/width info */
+ e_info("(PCI Express:2.5GT/s:%s) %pM\n",
+ /* bus width */
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ "Width x1"),
+ /* MAC address */
+ netdev->dev_addr);
+ e_info("Intel(R) PRO/%s Network Connection\n",
+ (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
+ ret_val = e1000_read_pba_string_generic(hw, pba_str,
+ E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
+ e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, pba_str);
+}
+
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
+ u16 buf = 0;
+
+ if (hw->mac.type != e1000_82573)
+ return;
+
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+ if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
+ /* Deep Smart Power Down (DSPD) */
+ dev_warn(&adapter->pdev->dev,
+ "Warning: detected DSPD enabled in EEPROM\n");
+ }
+}
+
+static int e1000_set_features(struct net_device *netdev, u32 features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
+ adapter->flags |= FLAG_TSO_FORCE;
+
+ if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
+ NETIF_F_RXCSUM)))
+ return 0;
+
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+
+ return 0;
+}
+
+static const struct net_device_ops e1000e_netdev_ops = {
+ .ndo_open = e1000_open,
+ .ndo_stop = e1000_close,
+ .ndo_start_xmit = e1000_xmit_frame,
+ .ndo_get_stats64 = e1000e_get_stats64,
+ .ndo_set_rx_mode = e1000_set_multi,
+ .ndo_set_mac_address = e1000_set_mac,
+ .ndo_change_mtu = e1000_change_mtu,
+ .ndo_do_ioctl = e1000_ioctl,
+ .ndo_tx_timeout = e1000_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+
+ .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = e1000_netpoll,
+#endif
+ .ndo_set_features = e1000_set_features,
+};
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
+ const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
+ resource_size_t mmio_start, mmio_len;
+ resource_size_t flash_start, flash_len;
+
+ static int cards_found;
+ u16 aspm_disable_flag = 0;
+ int i, err, pci_using_dac;
+ u16 eeprom_data = 0;
+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
+
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_selected_regions_exclusive(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ e1000e_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+ /* PCI config space info */
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ netdev->irq = pdev->irq;
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ hw = &adapter->hw;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ei = ei;
+ adapter->pba = ei->pba;
+ adapter->flags = ei->flags;
+ adapter->flags2 = ei->flags2;
+ adapter->hw.adapter = adapter;
+ adapter->hw.mac.type = ei->mac;
+ adapter->max_hw_frame_size = ei->max_hw_frame_size;
+ adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+
+ err = -EIO;
+ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw.hw_addr)
+ goto err_ioremap;
+
+ if ((adapter->flags & FLAG_HAS_FLASH) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ flash_start = pci_resource_start(pdev, 1);
+ flash_len = pci_resource_len(pdev, 1);
+ adapter->hw.flash_address = ioremap(flash_start, flash_len);
+ if (!adapter->hw.flash_address)
+ goto err_flashmap;
+ }
+
+ /* construct the net_device struct */
+ netdev->netdev_ops = &e1000e_netdev_ops;
+ e1000e_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+
+ adapter->bd_number = cards_found++;
+
+ e1000e_check_options(adapter);
+
+ /* setup adapter struct */
+ err = e1000_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+ err = ei->get_variants(adapter);
+ if (err)
+ goto err_hw_init;
+
+ if ((adapter->flags & FLAG_IS_ICH) &&
+ (adapter->flags & FLAG_READ_ONLY_NVM))
+ e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
+ hw->mac.ops.get_bus_info(&adapter->hw);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+ /* Copper options */
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+ adapter->hw.phy.disable_polarity_correction = 0;
+ adapter->hw.phy.ms_type = e1000_ms_hw_default;
+ }
+
+ if (e1000_check_reset_block(&adapter->hw))
+ e_info("PHY reset is blocked due to SOL/IDER session.\n");
+
+ /* Set initial default active device features */
+ netdev->features = (NETIF_F_SG |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM);
+
+ /* Set user-changeable features (subset of all device features) */
+ netdev->hw_features = netdev->features;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= (NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM);
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (e1000e_enable_mng_pass_thru(&adapter->hw))
+ adapter->flags |= FLAG_MNG_PT_ENABLED;
+
+ /*
+ * before reading the NVM, reset the controller to
+ * put the device in a known good starting state
+ */
+ adapter->hw.mac.ops.reset_hw(&adapter->hw);
+
+ /*
+ * systems with ASPM and others may see the checksum fail on the first
+ * attempt. Let's give it a few tries
+ */
+ for (i = 0;; i++) {
+ if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
+ break;
+ if (i == 2) {
+ e_err("The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+
+ e1000_eeprom_checks(adapter);
+
+ /* copy the MAC address */
+ if (e1000e_read_mac_addr(&adapter->hw))
+ e_err("NVM Read Error while reading MAC address\n");
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = e1000_watchdog;
+ adapter->watchdog_timer.data = (unsigned long) adapter;
+
+ init_timer(&adapter->phy_info_timer);
+ adapter->phy_info_timer.function = e1000_update_phy_info;
+ adapter->phy_info_timer.data = (unsigned long) adapter;
+
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+ INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+ INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+ INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
+
+ /* Initialize link parameters. User can change them with ethtool */
+ adapter->hw.mac.autoneg = 1;
+ adapter->fc_autoneg = 1;
+ adapter->hw.fc.requested_mode = e1000_fc_default;
+ adapter->hw.fc.current_mode = e1000_fc_default;
+ adapter->hw.phy.autoneg_advertised = 0x2f;
+
+ /* ring size defaults */
+ adapter->rx_ring->count = 256;
+ adapter->tx_ring->count = 256;
+
+ /*
+ * Initial Wake on LAN setting - If APM wake is enabled in
+ * the EEPROM, enable the ACPI Magic Packet filter
+ */
+ if (adapter->flags & FLAG_APME_IN_WUC) {
+ /* APME bit in EEPROM is mapped to WUC.APME */
+ eeprom_data = er32(WUC);
+ eeprom_apme_mask = E1000_WUC_APME;
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
+ adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
+ } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
+ if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+ (adapter->hw.bus.func == 1))
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ else
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ }
+
+ /* fetch WoL from EEPROM */
+ if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /*
+ * now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port
+ */
+ if (!(adapter->flags & FLAG_HAS_WOL))
+ adapter->eeprom_wol = 0;
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* save off EEPROM version number */
+ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+ /* reset the hardware with the new settings */
+ e1000e_reset(adapter);
+
+ /*
+ * If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+ strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ e1000_print_device_info(adapter);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+
+err_register:
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_release_hw_control(adapter);
+err_eeprom:
+ if (!e1000_check_reset_block(&adapter->hw))
+ e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_sw_init:
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+ e1000e_reset_interrupt_capability(adapter);
+err_flashmap:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ /*
+ * The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
+ */
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->downshift_task);
+ cancel_work_sync(&adapter->update_phy_task);
+ cancel_work_sync(&adapter->print_hang_task);
+
+ if (!(netdev->flags & IFF_UP))
+ e1000_power_down_phy(adapter);
+
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
+ unregister_netdev(netdev);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /*
+ * Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ e1000e_release_hw_control(adapter);
+
+ e1000e_reset_interrupt_capability(adapter);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.hw_addr);
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ free_netdev(netdev);
+
+ /* AER disable */
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
+ board_80003es2lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
+/* PCI Device API Driver */
+static struct pci_driver e1000_driver = {
+ .name = e1000e_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+ .driver.pm = &e1000_pm_ops,
+#endif
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
+};
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+ int ret;
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
+ ret = pci_register_driver(&e1000_driver);
+
+ return ret;
+}
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+ pci_unregister_driver(&e1000_driver);
+}
+module_exit(e1000_exit_module);
+
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* e1000_main.c */
diff --git a/drivers/net/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 4dd9b63273f..4dd9b63273f 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 8666476cb9b..8666476cb9b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
diff --git a/drivers/net/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index c6e4621b626..c6e4621b626 100644
--- a/drivers/net/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index c0857bdfb03..7881fb95a25 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -66,10 +66,6 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
- u16 offset);
-static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
- u16 offset);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
@@ -1055,7 +1051,10 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
/* Disabling VLAN filtering */
hw_dbg("Initializing the IEEE VLAN\n");
- igb_clear_vfta(hw);
+ if (hw->mac.type == e1000_i350)
+ igb_clear_vfta_i350(hw);
+ else
+ igb_clear_vfta(hw);
/* Setup the receive address */
igb_init_rx_addrs(hw, rar_count);
@@ -1584,14 +1583,31 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
**/
void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
{
- u32 dtxswc = rd32(E1000_DTXSWC);
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ dtxswc = rd32(E1000_DTXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ wr32(E1000_DTXSWC, dtxswc);
+ break;
+ case e1000_i350:
+ dtxswc = rd32(E1000_TXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ wr32(E1000_TXSWC, dtxswc);
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
- if (enable)
- dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- else
- dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- wr32(E1000_DTXSWC, dtxswc);
}
/**
@@ -1820,7 +1836,8 @@ u16 igb_rxpbs_adjust_82580(u32 data)
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset)
{
s32 ret_val = 0;
u16 checksum = 0;
@@ -1855,7 +1872,7 @@ out:
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
-s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
{
s32 ret_val;
u16 checksum = 0;
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 786e110011a..08a757eb660 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -130,7 +130,9 @@ union e1000_adv_tx_desc {
#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 7b8ddd830f1..f5fc5725ea9 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -85,6 +85,7 @@
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
@@ -409,6 +410,9 @@
#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
/* Extended Interrupt Cause Set */
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+
/* Transmit Descriptor Control */
/* Enable the counting of descriptors still to be processed. */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 4519a136717..4519a136717 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 2b5ef761d2a..73aac082c44 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -117,6 +117,50 @@ static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
wrfl();
}
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * igb_clear_vfta_i350 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void igb_clear_vfta_i350(struct e1000_hw *hw)
+{
+ u32 offset;
+ int i;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ for (i = 0; i < 10; i++)
+ array_wr32(E1000_VFTA, offset, 0);
+
+ wrfl();
+ }
+}
+
+/**
+ * igb_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ int i;
+
+ for (i = 0; i < 10; i++)
+ array_wr32(E1000_VFTA, offset, value);
+
+ wrfl();
+}
+
/**
* igb_init_rx_addrs - Initialize receive address's
* @hw: pointer to the HW structure
@@ -155,9 +199,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
{
u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
- u32 vfta = array_rd32(E1000_VFTA, index);
+ u32 vfta;
+ struct igb_adapter *adapter = hw->back;
s32 ret_val = 0;
+ vfta = adapter->shadow_vfta[index];
+
/* bit was set/cleared before we started */
if ((!!(vfta & mask)) == add) {
ret_val = -E1000_ERR_CONFIG;
@@ -167,8 +214,11 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
else
vfta &= ~mask;
}
-
- igb_write_vfta(hw, index, vfta);
+ if (hw->mac.type == e1000_i350)
+ igb_write_vfta_i350(hw, index, vfta);
+ else
+ igb_write_vfta(hw, index, vfta);
+ adapter->shadow_vfta[index] = vfta;
return ret_val;
}
@@ -191,6 +241,13 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
+ /*
+ * Alternate MAC address is handled by the option ROM for 82580
+ * and newer. SW support not required.
+ */
+ if (hw->mac.type >= e1000_82580)
+ goto out;
+
ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
@@ -198,13 +255,18 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
goto out;
}
- if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
/* There is no Alternate MAC Address */
goto out;
- }
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ if (hw->bus.func == E1000_FUNC_2)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
for (i = 0; i < ETH_ALEN; i += 2) {
offset = nvm_alt_mac_addr_offset + (i >> 1);
ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 4927f61fbbc..e45996b4ea3 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -60,6 +60,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
+void igb_clear_vfta_i350(struct e1000_hw *hw);
s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
void igb_config_collision_dist(struct e1000_hw *hw);
void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 74f2f11ac29..469d95eaa15 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -34,7 +34,7 @@
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index eddb0f83dce..eddb0f83dce 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 40407124e72..40407124e72 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index a2a7ca9fa73..a2a7ca9fa73 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index e662554c62d..7edf31efe75 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -306,6 +306,12 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
u32 i, i2ccmd = 0;
u16 phy_data_swapped;
+ /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+ hw_dbg("PHY I2C Address %d is out of range.\n",
+ hw->phy.addr);
+ return -E1000_ERR_CONFIG;
+ }
/* Swap the data bytes for the I2C interface */
phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 8510797b9d8..8510797b9d8 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 0990f6d860c..0a860bc1198 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -318,6 +318,7 @@
#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
/* These act per VF so an array friendly macro is used */
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
new file mode 100644
index 00000000000..c69feebf265
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -0,0 +1,450 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGB_H_
+#define _IGB_H_
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
+#include <linux/net_tstamp.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+struct igb_adapter;
+
+/* Interrupt defines */
+#define IGB_START_ITR 648 /* ~6000 ints/sec */
+#define IGB_4K_ITR 980
+#define IGB_20K_ITR 196
+#define IGB_70K_ITR 56
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TX_WORK 128
+#define IGB_MIN_TXD 80
+#define IGB_MAX_TXD 4096
+
+#define IGB_DEFAULT_RXD 256
+#define IGB_MIN_RXD 80
+#define IGB_MAX_RXD 4096
+
+#define IGB_DEFAULT_ITR 3 /* dynamic */
+#define IGB_MAX_ITR_USECS 10000
+#define IGB_MIN_ITR_USECS 10
+#define NON_Q_VECTORS 1
+#define MAX_Q_VECTORS 8
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
+ (hw->mac.type > e1000_82575 ? 8 : 4))
+#define IGB_MAX_TX_QUEUES 16
+
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_MAX_VFTA_ENTRIES 128
+#define IGB_82576_VF_DEV_ID 0x10CA
+#define IGB_I350_VF_DEV_ID 0x1520
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlans_enabled;
+ u32 flags;
+ unsigned long last_nack;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ u16 tx_rate;
+ struct pci_dev *vfdev;
+};
+
+#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
+ */
+#define IGB_RX_PTHRESH 8
+#define IGB_RX_HTHRESH 8
+#define IGB_TX_PTHRESH 8
+#define IGB_TX_HTHRESH 1
+#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 4)
+#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 16)
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_512 512
+#define IGB_RXBUFFER_16384 16384
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_512
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define IGB_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define IGB_EEPROM_APME 0x0400
+
+#ifndef IGB_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define IGB_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define IGB_MNG_VLAN_NONE -1
+
+#define IGB_TX_FLAGS_CSUM 0x00000001
+#define IGB_TX_FLAGS_VLAN 0x00000002
+#define IGB_TX_FLAGS_TSO 0x00000004
+#define IGB_TX_FLAGS_IPV4 0x00000008
+#define IGB_TX_FLAGS_TSTAMP 0x00000010
+#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT 16
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct igb_tx_buffer {
+ union e1000_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+ dma_addr_t dma;
+ u32 length;
+ u32 tx_flags;
+};
+
+struct igb_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ u32 page_offset;
+};
+
+struct igb_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igb_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igb_ring_container {
+ struct igb_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+ int numa_node;
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ char name[IFNAMSIZ + 9];
+};
+
+struct igb_ring {
+ struct igb_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device pointer for dma mapping */
+ union { /* array of buffer info structs */
+ struct igb_tx_buffer *tx_buffer_info;
+ struct igb_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+ u32 size; /* length of desc. ring in bytes */
+
+ /* everything past this point are written often */
+ u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_use;
+
+ union {
+ /* TX */
+ struct {
+ struct igb_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+ struct igb_rx_queue_stats rx_stats;
+ struct u64_stats_sync rx_syncp;
+ };
+ };
+ /* Items past this point are only used during ring alloc / free */
+ dma_addr_t dma; /* phys address of the ring */
+ int numa_node; /* node to alloc ring memory on */
+};
+
+enum e1000_ring_flags_t {
+ IGB_RING_FLAG_RX_SCTP_CSUM,
+ IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_TX_CTX_IDX,
+ IGB_RING_FLAG_TX_DETECT_HANG
+};
+
+#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
+
+#define IGB_RX_DESC(R, i) \
+ (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
+#define IGB_TX_DESC(R, i) \
+ (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
+#define IGB_TX_CTXTDESC(R, i) \
+ (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+
+/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
+static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/* board specific private data structure */
+struct igb_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+
+ unsigned int num_q_vectors;
+ struct msix_entry *msix_entries;
+
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igb_ring *tx_ring[16];
+
+ /* RX */
+ int num_rx_queues;
+ struct igb_ring *rx_ring[16];
+
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ bool fc_autoneg;
+ u8 tx_timeout_factor;
+ struct timer_list blink_timer;
+ unsigned long led_status;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config hwtstamp_config;
+
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ u32 test_icr;
+ struct igb_ring test_tx_ring;
+ struct igb_ring test_rx_ring;
+
+ int msg_enable;
+
+ struct igb_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ u32 eeprom_wol;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+ unsigned int vfs_allocated_count;
+ struct vf_data_storage *vf_data;
+ int vf_rate_link_speed;
+ u32 rss_queues;
+ u32 wvbr;
+ int node;
+ u32 *shadow_vfta;
+};
+
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+
+/* DMA Coalescing defines */
+#define IGB_MIN_TXPBSIZE 20408
+#define IGB_TX_BUF_4096 4096
+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
+
+#define IGB_82576_TSYNC_SHIFT 19
+#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
+enum e1000_state_t {
+ __IGB_TESTING,
+ __IGB_RESETTING,
+ __IGB_DOWN
+};
+
+enum igb_boards {
+ board_82575,
+};
+
+extern char igb_driver_name[];
+extern char igb_driver_version[];
+
+extern int igb_up(struct igb_adapter *);
+extern void igb_down(struct igb_adapter *);
+extern void igb_reinit_locked(struct igb_adapter *);
+extern void igb_reset(struct igb_adapter *);
+extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+extern int igb_setup_tx_resources(struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_ring *);
+extern void igb_free_tx_resources(struct igb_ring *);
+extern void igb_free_rx_resources(struct igb_ring *);
+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_setup_tctl(struct igb_adapter *);
+extern void igb_setup_rctl(struct igb_adapter *);
+extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
+ struct igb_tx_buffer *);
+extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
+extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+extern bool igb_has_link(struct igb_adapter *adapter);
+extern void igb_set_ethtool_ops(struct net_device *);
+extern void igb_power_up_link(struct igb_adapter *);
+
+static inline s32 igb_reset_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return 0;
+}
+
+static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return 0;
+}
+
+static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return 0;
+}
+
+static inline s32 igb_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_phy_info)
+ return hw->phy.ops.get_phy_info(hw);
+
+ return 0;
+}
+
+#endif /* _IGB_H_ */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 414b0225be8..43873eba2f6 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -705,12 +705,8 @@ static void igb_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IGB_MAX_RXD;
ring->tx_max_pending = IGB_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->rx_ring_count;
ring->tx_pending = adapter->tx_ring_count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
static int igb_set_ringparam(struct net_device *netdev,
@@ -1368,7 +1364,6 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
rx_ring->count = IGB_DEFAULT_RXD;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
- rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
if (igb_setup_rx_resources(rx_ring)) {
@@ -1383,7 +1378,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
igb_setup_rctl(adapter);
igb_configure_rx_ring(adapter, rx_ring);
- igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
return 0;
@@ -1580,34 +1575,33 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
unsigned int size)
{
union e1000_adv_rx_desc *rx_desc;
- struct igb_buffer *buffer_info;
- int rx_ntc, tx_ntc, count = 0;
- u32 staterr;
+ struct igb_rx_buffer *rx_buffer_info;
+ struct igb_tx_buffer *tx_buffer_info;
+ u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
tx_ntc = tx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
- while (staterr & E1000_RXD_STAT_DD) {
+ while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
/* check rx buffer */
- buffer_info = &rx_ring->buffer_info[rx_ntc];
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- rx_ring->rx_buffer_len,
+ rx_buffer_info->dma,
+ IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
- buffer_info->dma = 0;
+ rx_buffer_info->dma = 0;
/* verify contents of skb */
- if (!igb_check_lbtest_frame(buffer_info->skb, size))
+ if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
count++;
/* unmap buffer on tx side */
- buffer_info = &tx_ring->buffer_info[tx_ntc];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
rx_ntc++;
@@ -1618,12 +1612,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
tx_ntc = 0;
/* fetch next descriptor */
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
/* re-map buffers to ring, store next to clean values */
- igb_alloc_rx_buffers_adv(rx_ring, count);
+ igb_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
tx_ring->next_to_clean = tx_ntc;
@@ -1634,8 +1627,9 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
{
struct igb_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring;
- int i, j, lc, good_cnt, ret_val = 0;
- unsigned int size = 1024;
+ u16 i, j, lc, good_cnt;
+ int ret_val = 0;
+ unsigned int size = IGB_RX_HDR_LEN;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;
@@ -1666,7 +1660,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
/* place 64 packets on the transmit queue*/
for (i = 0; i < 64; i++) {
skb_get(skb);
- tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
+ tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
if (tx_ret_val == NETDEV_TX_OK)
good_cnt++;
}
@@ -2012,7 +2006,8 @@ static int igb_set_coalesce(struct net_device *netdev,
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
- if (q_vector->rx_ring)
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->rx.ring)
q_vector->itr_val = adapter->rx_itr_setting;
else
q_vector->itr_val = adapter->tx_itr_setting;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 40d4c405fd7..ced544499f1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -39,11 +39,15 @@
#include <linux/net_tstamp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
@@ -53,8 +57,8 @@
#include "igb.h"
#define MAJ 3
-#define MIN 0
-#define BUILD 6
+#define MIN 2
+#define BUILD 10
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -121,7 +125,7 @@ static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
@@ -135,9 +139,9 @@ static irqreturn_t igb_msix_ring(int irq, void *);
static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
-static bool igb_clean_tx_irq(struct igb_q_vector *);
static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
@@ -159,6 +163,12 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
+#ifdef CONFIG_PCI_IOV
+static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_find_enabled_vfs(struct igb_adapter *adapter);
+static int igb_check_vf_assignment(struct igb_adapter *adapter);
+#endif
+
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
static int igb_resume(struct pci_dev *);
@@ -194,6 +204,7 @@ static struct pci_error_handlers igb_err_handler = {
.resume = igb_io_resume,
};
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
static struct pci_driver igb_driver = {
.name = igb_driver_name,
@@ -334,15 +345,13 @@ static void igb_dump(struct igb_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_reg_info *reginfo;
- int n = 0;
struct igb_ring *tx_ring;
union e1000_adv_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
- struct igb_buffer *buffer_info;
struct igb_ring *rx_ring;
union e1000_adv_rx_desc *rx_desc;
u32 staterr;
- int i = 0;
+ u16 i, n;
if (!netif_msg_hw(adapter))
return;
@@ -375,9 +384,10 @@ static void igb_dump(struct igb_adapter *adapter)
printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
" leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
+ struct igb_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
- buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)buffer_info->dma,
buffer_info->length,
@@ -412,11 +422,12 @@ static void igb_dump(struct igb_adapter *adapter)
"leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
+ struct igb_tx_buffer *buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+ buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
- " %04X %3X %016llX %p", i,
+ " %04X %p %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
@@ -492,8 +503,9 @@ rx_ring_summary:
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+ struct igb_rx_buffer *buffer_info;
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (staterr & E1000_RXD_STAT_DD) {
@@ -516,16 +528,14 @@ rx_ring_summary:
DUMP_PREFIX_ADDRESS,
16, 1,
phys_to_virt(buffer_info->dma),
- rx_ring->rx_buffer_len, true);
- if (rx_ring->rx_buffer_len
- < IGB_RXBUFFER_1024)
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1,
- phys_to_virt(
- buffer_info->page_dma +
- buffer_info->page_offset),
- PAGE_SIZE/2, true);
+ IGB_RX_HDR_LEN, true);
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(
+ buffer_info->page_dma +
+ buffer_info->page_offset),
+ PAGE_SIZE/2, true);
}
}
@@ -560,7 +570,7 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
* the lowest register is SYSTIMR instead of SYSTIML. However we never
* adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
*/
- if (hw->mac.type == e1000_82580) {
+ if (hw->mac.type >= e1000_82580) {
stamp = rd32(E1000_SYSTIMR) >> 8;
shift = IGB_82580_TSYNC_SHIFT;
}
@@ -683,61 +693,116 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
{
struct igb_ring *ring;
int i;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
+ ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
- ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
- ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+ ring->numa_node = adapter->node;
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
- ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+ /* On i350, loopback VLAN packets have the tag byte-swapped. */
+ if (adapter->hw.mac.type == e1000_i350)
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
igb_cache_ring_register(adapter);
return 0;
err:
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
igb_free_queues(adapter);
return -ENOMEM;
}
+/**
+ * igb_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * This function is intended to handle the writing of the IVAR register
+ * for adapters 82576 and newer. The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ **/
+static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = array_rd32(E1000_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+ array_wr32(E1000_IVAR0, index, ivar);
+}
+
#define IGB_N0_QUEUE -1
static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
{
- u32 msixbm = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
- u32 ivar, index;
int rx_queue = IGB_N0_QUEUE;
int tx_queue = IGB_N0_QUEUE;
+ u32 msixbm = 0;
- if (q_vector->rx_ring)
- rx_queue = q_vector->rx_ring->reg_idx;
- if (q_vector->tx_ring)
- tx_queue = q_vector->tx_ring->reg_idx;
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
switch (hw->mac.type) {
case e1000_82575:
@@ -755,72 +820,39 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector->eims_value = msixbm;
break;
case e1000_82576:
- /* 82576 uses a table-based method for assigning vectors.
- Each queue has a single entry in the table to which we write
- a vector number along with a "valid" bit. Sadly, the layout
- of the table is somewhat counterintuitive. */
- if (rx_queue > IGB_N0_QUEUE) {
- index = (rx_queue & 0x7);
- ivar = array_rd32(E1000_IVAR0, index);
- if (rx_queue < 8) {
- /* vector goes into low byte of register */
- ivar = ivar & 0xFFFFFF00;
- ivar |= msix_vector | E1000_IVAR_VALID;
- } else {
- /* vector goes into third byte of register */
- ivar = ivar & 0xFF00FFFF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
- }
- array_wr32(E1000_IVAR0, index, ivar);
- }
- if (tx_queue > IGB_N0_QUEUE) {
- index = (tx_queue & 0x7);
- ivar = array_rd32(E1000_IVAR0, index);
- if (tx_queue < 8) {
- /* vector goes into second byte of register */
- ivar = ivar & 0xFFFF00FF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
- } else {
- /* vector goes into high byte of register */
- ivar = ivar & 0x00FFFFFF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
- }
- array_wr32(E1000_IVAR0, index, ivar);
- }
+ /*
+ * 82576 uses a table that essentially consists of 2 columns
+ * with 8 rows. The ordering is column-major so we use the
+ * lower 3 bits as the row index, and the 4th bit as the
+ * column offset.
+ */
+ if (rx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ rx_queue & 0x7,
+ (rx_queue & 0x8) << 1);
+ if (tx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ tx_queue & 0x7,
+ ((tx_queue & 0x8) << 1) + 8);
q_vector->eims_value = 1 << msix_vector;
break;
case e1000_82580:
case e1000_i350:
- /* 82580 uses the same table-based approach as 82576 but has fewer
- entries as a result we carry over for queues greater than 4. */
- if (rx_queue > IGB_N0_QUEUE) {
- index = (rx_queue >> 1);
- ivar = array_rd32(E1000_IVAR0, index);
- if (rx_queue & 0x1) {
- /* vector goes into third byte of register */
- ivar = ivar & 0xFF00FFFF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
- } else {
- /* vector goes into low byte of register */
- ivar = ivar & 0xFFFFFF00;
- ivar |= msix_vector | E1000_IVAR_VALID;
- }
- array_wr32(E1000_IVAR0, index, ivar);
- }
- if (tx_queue > IGB_N0_QUEUE) {
- index = (tx_queue >> 1);
- ivar = array_rd32(E1000_IVAR0, index);
- if (tx_queue & 0x1) {
- /* vector goes into high byte of register */
- ivar = ivar & 0x00FFFFFF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
- } else {
- /* vector goes into second byte of register */
- ivar = ivar & 0xFFFF00FF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
- }
- array_wr32(E1000_IVAR0, index, ivar);
- }
+ /*
+ * On 82580 and newer adapters the scheme is similar to 82576
+ * however instead of ordering column-major we have things
+ * ordered row-major. So we traverse the table by using
+ * bit 0 as the column offset, and the remaining bits as the
+ * row index.
+ */
+ if (rx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
q_vector->eims_value = 1 << msix_vector;
break;
default:
@@ -920,15 +952,15 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
- if (q_vector->rx_ring && q_vector->tx_ring)
+ if (q_vector->rx.ring && q_vector->tx.ring)
sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
- q_vector->rx_ring->queue_index);
- else if (q_vector->tx_ring)
+ q_vector->rx.ring->queue_index);
+ else if (q_vector->tx.ring)
sprintf(q_vector->name, "%s-tx-%u", netdev->name,
- q_vector->tx_ring->queue_index);
- else if (q_vector->rx_ring)
+ q_vector->tx.ring->queue_index);
+ else if (q_vector->rx.ring)
sprintf(q_vector->name, "%s-rx-%u", netdev->name,
- q_vector->rx_ring->queue_index);
+ q_vector->rx.ring->queue_index);
else
sprintf(q_vector->name, "%s-unused", netdev->name);
@@ -1084,9 +1116,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw;
int v_idx;
+ int orig_node = adapter->node;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+ if ((adapter->num_q_vectors == (adapter->num_rx_queues +
+ adapter->num_tx_queues)) &&
+ (adapter->num_rx_queues == v_idx))
+ adapter->node = orig_node;
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
+ adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct igb_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -1095,9 +1142,14 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
return 0;
err_out:
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
igb_free_q_vectors(adapter);
return -ENOMEM;
}
@@ -1107,8 +1159,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- q_vector->rx_ring = adapter->rx_ring[ring_idx];
- q_vector->rx_ring->q_vector = q_vector;
+ q_vector->rx.ring = adapter->rx_ring[ring_idx];
+ q_vector->rx.ring->q_vector = q_vector;
+ q_vector->rx.count++;
q_vector->itr_val = adapter->rx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGB_START_ITR;
@@ -1119,9 +1172,11 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- q_vector->tx_ring = adapter->tx_ring[ring_idx];
- q_vector->tx_ring->q_vector = q_vector;
+ q_vector->tx.ring = adapter->tx_ring[ring_idx];
+ q_vector->tx.ring->q_vector = q_vector;
+ q_vector->tx.count++;
q_vector->itr_val = adapter->tx_itr_setting;
+ q_vector->tx.work_limit = adapter->tx_work_limit;
if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGB_START_ITR;
}
@@ -1219,7 +1274,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
goto request_done;
/* fall back to MSI */
igb_clear_interrupt_scheme(adapter);
- if (!pci_enable_msi(adapter->pdev))
+ if (!pci_enable_msi(pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
@@ -1241,12 +1296,12 @@ static int igb_request_irq(struct igb_adapter *adapter)
}
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
- } else {
- igb_assign_vector(adapter->q_vector[0], 0);
}
+ igb_assign_vector(adapter->q_vector[0], 0);
+
if (adapter->flags & IGB_FLAG_HAS_MSI) {
- err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
+ err = request_irq(pdev->irq, igb_intr_msi, 0,
netdev->name, adapter);
if (!err)
goto request_done;
@@ -1256,11 +1311,11 @@ static int igb_request_irq(struct igb_adapter *adapter)
adapter->flags &= ~IGB_FLAG_HAS_MSI;
}
- err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
+ err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
netdev->name, adapter);
if (err)
- dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
+ dev_err(&pdev->dev, "Error %d getting interrupt\n",
err);
request_done:
@@ -1274,11 +1329,9 @@ static void igb_free_irq(struct igb_adapter *adapter)
free_irq(adapter->msix_entries[vector++].vector, adapter);
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
+ for (i = 0; i < adapter->num_q_vectors; i++)
free_irq(adapter->msix_entries[vector++].vector,
- q_vector);
- }
+ adapter->q_vector[i]);
} else {
free_irq(adapter->pdev->irq, adapter);
}
@@ -1326,7 +1379,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries) {
- u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
+ u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
@@ -1336,9 +1389,6 @@ static void igb_irq_enable(struct igb_adapter *adapter)
wr32(E1000_MBVFIMR, 0xFF);
ims |= E1000_IMS_VMMB;
}
- if (adapter->hw.mac.type == e1000_82580)
- ims |= E1000_IMS_DRSTA;
-
wr32(E1000_IMS, ims);
} else {
wr32(E1000_IMS, IMS_ENABLE_MASK |
@@ -1438,7 +1488,7 @@ static void igb_configure(struct igb_adapter *adapter)
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -1480,10 +1530,9 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- napi_enable(&q_vector->napi);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&(adapter->q_vector[i]->napi));
+
if (adapter->msix_entries)
igb_configure_msix(adapter);
else
@@ -1535,10 +1584,8 @@ void igb_down(struct igb_adapter *adapter)
wrfl();
msleep(10);
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- napi_disable(&q_vector->napi);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_disable(&(adapter->q_vector[i]->napi));
igb_irq_disable(adapter);
@@ -1682,63 +1729,8 @@ void igb_reset(struct igb_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
- if (hw->mac.type > e1000_82580) {
- if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
- /*
- * DMA Coalescing high water mark needs to be higher
- * than * the * Rx threshold. The Rx threshold is
- * currently * pba - 6, so we * should use a high water
- * mark of pba * - 4. */
- hwm = (pba - 4) << 10;
- reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
- & E1000_DMACR_DMACTHR_MASK);
-
- /* transition to L0x or L1 if available..*/
- reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
-
- /* watchdog timer= +-1000 usec in 32usec intervals */
- reg |= (1000 >> 5);
- wr32(E1000_DMACR, reg);
-
- /* no lower threshold to disable coalescing(smart fifb)
- * -UTRESH=0*/
- wr32(E1000_DMCRTRH, 0);
-
- /* set hwm to PBA - 2 * max frame size */
- wr32(E1000_FCRTC, hwm);
-
- /*
- * This sets the time to wait before requesting tran-
- * sition to * low power state to number of usecs needed
- * to receive 1 512 * byte frame at gigabit line rate
- */
- reg = rd32(E1000_DMCTLX);
- reg |= IGB_DMCTLX_DCFLUSH_DIS;
-
- /* Delay 255 usec before entering Lx state. */
- reg |= 0xFF;
- wr32(E1000_DMCTLX, reg);
-
- /* free space in Tx packet buffer to wake from DMAC */
- wr32(E1000_DMCTXTH,
- (IGB_MIN_TXPBSIZE -
- (IGB_TX_BUF_4096 + adapter->max_frame_size))
- >> 6);
-
- /* make low power state decision controlled by DMAC */
- reg = rd32(E1000_PCIEMISC);
- reg |= E1000_PCIEMISC_LX_DECISION;
- wr32(E1000_PCIEMISC, reg);
- } /* end if IGB_FLAG_DMAC set */
- }
- if (hw->mac.type == e1000_82580) {
- u32 reg = rd32(E1000_PCIEMISC);
- wr32(E1000_PCIEMISC,
- reg & ~E1000_PCIEMISC_LX_DECISION);
- }
+ igb_init_dmac(adapter, pba);
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -1766,17 +1758,8 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
static int igb_set_features(struct net_device *netdev, u32 features)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
- int i;
u32 changed = netdev->features ^ features;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (features & NETIF_F_RXCSUM)
- adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
- else
- adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
- }
-
if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features);
@@ -1786,10 +1769,9 @@ static int igb_set_features(struct net_device *netdev, u32 features)
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
- .ndo_start_xmit = igb_xmit_frame_adv,
+ .ndo_start_xmit = igb_xmit_frame,
.ndo_get_stats64 = igb_get_stats64,
.ndo_set_rx_mode = igb_set_rx_mode,
- .ndo_set_multicast_list = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
.ndo_change_mtu = igb_change_mtu,
.ndo_do_ioctl = igb_ioctl,
@@ -1878,7 +1860,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
err = -ENOMEM;
netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
- IGB_ABS_MAX_TX_QUEUES);
+ IGB_MAX_TX_QUEUES);
if (!netdev)
goto err_alloc_etherdev;
@@ -1945,23 +1927,32 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev,
"PHY reset is blocked due to SOL/IDER session.\n");
- netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_RX;
-
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_FILTER;
-
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ /*
+ * features is initialized to 0 in allocation, it might have bits
+ * set by igb_sw_init so we should use an or instead of an
+ * assignment.
+ */
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+
+ /* set this bit last since it cannot be part of hw_features */
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG;
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -1973,6 +1964,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_SCTP_CSUM;
}
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
/* before reading the NVM, reset the controller to put the device in a
@@ -2077,8 +2070,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- igb_vlan_mode(netdev, netdev->features);
-
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -2193,8 +2184,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
/* disable iov and allow time for transactions to clear */
- pci_disable_sriov(pdev);
- msleep(500);
+ if (!igb_check_vf_assignment(adapter)) {
+ pci_disable_sriov(pdev);
+ msleep(500);
+ } else {
+ dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
+ }
kfree(adapter->vf_data);
adapter->vf_data = NULL;
@@ -2211,6 +2206,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(adapter->shadow_vfta);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
@@ -2231,42 +2227,49 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
{
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
+ int old_vfs = igb_find_enabled_vfs(adapter);
+ int i;
- if (adapter->vfs_allocated_count) {
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage),
- GFP_KERNEL);
- /* if allocation failed then we do not support SR-IOV */
- if (!adapter->vf_data) {
- adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev, "Unable to allocate memory for VF "
- "Data Storage\n");
- }
+ if (old_vfs) {
+ dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
+ "max_vfs setting of %d\n", old_vfs, max_vfs);
+ adapter->vfs_allocated_count = old_vfs;
}
- if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
-#endif /* CONFIG_PCI_IOV */
+ if (!adapter->vfs_allocated_count)
+ return;
+
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ /* if allocation failed then we do not support SR-IOV */
+ if (!adapter->vf_data) {
adapter->vfs_allocated_count = 0;
-#ifdef CONFIG_PCI_IOV
- } else {
- unsigned char mac_addr[ETH_ALEN];
- int i;
- dev_info(&pdev->dev, "%d vfs allocated\n",
- adapter->vfs_allocated_count);
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- random_ether_addr(mac_addr);
- igb_set_vf_mac(adapter, i, mac_addr);
- }
- /* DMA Coalescing is not supported in IOV mode. */
- if (adapter->flags & IGB_FLAG_DMAC)
- adapter->flags &= ~IGB_FLAG_DMAC;
+ dev_err(&pdev->dev, "Unable to allocate memory for VF "
+ "Data Storage\n");
+ goto out;
+ }
+
+ if (!old_vfs) {
+ if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
+ goto err_out;
}
+ dev_info(&pdev->dev, "%d VFs allocated\n",
+ adapter->vfs_allocated_count);
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ goto out;
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+out:
+ return;
#endif /* CONFIG_PCI_IOV */
}
-
/**
* igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
* @adapter: board private structure to initialize
@@ -2389,14 +2392,23 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+ /* set default ring sizes */
adapter->tx_ring_count = IGB_DEFAULT_TXD;
adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+ /* set default ITR values */
adapter->rx_itr_setting = IGB_DEFAULT_ITR;
adapter->tx_itr_setting = IGB_DEFAULT_ITR;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ adapter->node = -1;
+
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
@@ -2427,6 +2439,11 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ /* Setup and initialize a copy of the hw vlan table array */
+ adapter->shadow_vfta = kzalloc(sizeof(u32) *
+ E1000_VLAN_FILTER_TBL_SIZE,
+ GFP_ATOMIC);
+
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -2495,10 +2512,8 @@ static int igb_open(struct net_device *netdev)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- napi_enable(&q_vector->napi);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&(adapter->q_vector[i]->napi));
/* Clear any pending interrupts. */
rd32(E1000_ICR);
@@ -2567,31 +2582,42 @@ static int igb_close(struct net_device *netdev)
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ int orig_node = dev_to_node(dev);
int size;
- size = sizeof(struct igb_buffer) * tx_ring->count;
- tx_ring->buffer_info = vzalloc(size);
- if (!tx_ring->buffer_info)
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
goto err;
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
+ set_dev_node(dev, tx_ring->numa_node);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!tx_ring->desc)
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
if (!tx_ring->desc)
goto err;
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+
return 0;
err:
- vfree(tx_ring->buffer_info);
+ vfree(tx_ring->tx_buffer_info);
dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
@@ -2620,10 +2646,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
}
}
- for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
- int r_idx = i % adapter->num_tx_queues;
- adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
- }
return err;
}
@@ -2664,14 +2686,12 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
- u32 txdctl;
+ u32 txdctl = 0;
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
/* disable the queue */
- txdctl = rd32(E1000_TXDCTL(reg_idx));
- wr32(E1000_TXDCTL(reg_idx),
- txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+ wr32(E1000_TXDCTL(reg_idx), 0);
wrfl();
mdelay(10);
@@ -2681,9 +2701,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(reg_idx), tdba >> 32);
- ring->head = hw->hw_addr + E1000_TDH(reg_idx);
ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
- writel(0, ring->head);
+ wr32(E1000_TDH(reg_idx), 0);
writel(0, ring->tail);
txdctl |= IGB_TX_PTHRESH;
@@ -2717,11 +2736,14 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ int orig_node = dev_to_node(dev);
int size, desc_len;
- size = sizeof(struct igb_buffer) * rx_ring->count;
- rx_ring->buffer_info = vzalloc(size);
- if (!rx_ring->buffer_info)
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
goto err;
desc_len = sizeof(union e1000_adv_rx_desc);
@@ -2730,10 +2752,17 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
+ set_dev_node(dev, rx_ring->numa_node);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!rx_ring->desc)
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -2744,8 +2773,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
return 0;
err:
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the receive descriptor"
" ring\n");
return -ENOMEM;
@@ -2962,16 +2991,19 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
**/
static void igb_rlpml_set(struct igb_adapter *adapter)
{
- u32 max_frame_size;
+ u32 max_frame_size = adapter->max_frame_size;
struct e1000_hw *hw = &adapter->hw;
u16 pf_id = adapter->vfs_allocated_count;
- max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
-
- /* if vfs are enabled we set RLPML to the largest possible request
- * size and set the VMOLR RLPML to the size we need */
if (pf_id) {
igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+ /*
+ * If we're in VMDQ or SR-IOV mode, then set global RLPML
+ * to our max jumbo frame size, in case we need to enable
+ * jumbo frames on one of the rings later.
+ * This will not pass over-length frames into the default
+ * queue because it's gated by the VMOLR.RLPML.
+ */
max_frame_size = MAX_JUMBO_FRAME_SIZE;
}
@@ -3026,12 +3058,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
- u32 srrctl, rxdctl;
+ u32 srrctl = 0, rxdctl = 0;
/* disable the queue */
- rxdctl = rd32(E1000_RXDCTL(reg_idx));
- wr32(E1000_RXDCTL(reg_idx),
- rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ wr32(E1000_RXDCTL(reg_idx), 0);
/* Set DMA base address registers */
wr32(E1000_RDBAL(reg_idx),
@@ -3041,29 +3071,19 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
- ring->head = hw->hw_addr + E1000_RDH(reg_idx);
ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
- writel(0, ring->head);
+ wr32(E1000_RDH(reg_idx), 0);
writel(0, ring->tail);
/* set descriptor configuration */
- if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
- srrctl = ALIGN(ring->rx_buffer_len, 64) <<
- E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
- srrctl |= IGB_RXBUFFER_16384 >>
- E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#else
- srrctl |= (PAGE_SIZE / 2) >>
- E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else {
- srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
- E1000_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
- }
- if (hw->mac.type == e1000_82580)
+ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
@@ -3074,13 +3094,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set filtering for VMDQ pools */
igb_set_vmolr(adapter, reg_idx & 0x7, true);
- /* enable receive descriptor fetching */
- rxdctl = rd32(E1000_RXDCTL(reg_idx));
- rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
- rxdctl &= 0xFFF00000;
rxdctl |= IGB_RX_PTHRESH;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
+
+ /* enable receive descriptor fetching */
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
@@ -3117,8 +3136,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
{
igb_clean_tx_ring(tx_ring);
- vfree(tx_ring->buffer_info);
- tx_ring->buffer_info = NULL;
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
/* if not set, then don't free */
if (!tx_ring->desc)
@@ -3144,30 +3163,26 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
igb_free_tx_resources(adapter->tx_ring[i]);
}
-void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
- struct igb_buffer *buffer_info)
+void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
+ struct igb_tx_buffer *tx_buffer)
{
- if (buffer_info->dma) {
- if (buffer_info->mapped_as_page)
- dma_unmap_page(tx_ring->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_TO_DEVICE);
- else
- dma_unmap_single(tx_ring->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_TO_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->dma)
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ } else if (tx_buffer->dma) {
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
}
- buffer_info->time_stamp = 0;
- buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
- buffer_info->mapped_as_page = false;
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ tx_buffer->dma = 0;
+ /* buffer_info must be completely set up in the transmit path */
}
/**
@@ -3176,21 +3191,21 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
- struct igb_buffer *buffer_info;
+ struct igb_tx_buffer *buffer_info;
unsigned long size;
- unsigned int i;
+ u16 i;
- if (!tx_ring->buffer_info)
+ if (!tx_ring->tx_buffer_info)
return;
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
- size = sizeof(struct igb_buffer) * tx_ring->count;
- memset(tx_ring->buffer_info, 0, size);
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
/* Zero out the descriptor ring */
memset(tx_ring->desc, 0, tx_ring->size);
@@ -3221,8 +3236,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
{
igb_clean_rx_ring(rx_ring);
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -3254,20 +3269,19 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
- struct igb_buffer *buffer_info;
unsigned long size;
- unsigned int i;
+ u16 i;
- if (!rx_ring->buffer_info)
+ if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
if (buffer_info->dma) {
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
- rx_ring->rx_buffer_len,
+ IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -3290,8 +3304,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
}
}
- size = sizeof(struct igb_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -3713,16 +3727,14 @@ static void igb_watchdog_task(struct work_struct *work)
}
/* Force detection of hung controller every watchdog period */
- tx_ring->detect_tx_hung = true;
+ set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
}
/* Cause software interrupt to ensure rx ring is cleaned */
if (adapter->msix_entries) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- eics |= q_vector->eims_value;
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
} else {
wr32(E1000_ICS, E1000_ICS_RXDMT0);
@@ -3764,33 +3776,24 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
int new_val = q_vector->itr_val;
int avg_wire_size = 0;
struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *ring;
unsigned int packets;
/* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks.
*/
if (adapter->link_speed != SPEED_1000) {
- new_val = 976;
+ new_val = IGB_4K_ITR;
goto set_itr_val;
}
- ring = q_vector->rx_ring;
- if (ring) {
- packets = ACCESS_ONCE(ring->total_packets);
-
- if (packets)
- avg_wire_size = ring->total_bytes / packets;
- }
-
- ring = q_vector->tx_ring;
- if (ring) {
- packets = ACCESS_ONCE(ring->total_packets);
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
- if (packets)
- avg_wire_size = max_t(u32, avg_wire_size,
- ring->total_bytes / packets);
- }
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
/* if avg_wire_size isn't set no work was done */
if (!avg_wire_size)
@@ -3808,9 +3811,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
else
new_val = avg_wire_size / 2;
- /* when in itr mode 3 do not exceed 20K ints/sec */
- if (adapter->rx_itr_setting == 3 && new_val < 196)
- new_val = 196;
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGB_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGB_20K_ITR;
set_itr_val:
if (new_val != q_vector->itr_val) {
@@ -3818,14 +3823,10 @@ set_itr_val:
q_vector->set_itr = 1;
}
clear_counts:
- if (q_vector->rx_ring) {
- q_vector->rx_ring->total_bytes = 0;
- q_vector->rx_ring->total_packets = 0;
- }
- if (q_vector->tx_ring) {
- q_vector->tx_ring->total_bytes = 0;
- q_vector->tx_ring->total_packets = 0;
- }
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
}
/**
@@ -3841,106 +3842,102 @@ clear_counts:
* parameter (see igb_param.c)
* NOTE: These calculations are only valid when operating in a single-
* queue environment.
- * @adapter: pointer to adapter
- * @itr_setting: current q_vector->itr_val
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
**/
-static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
- int packets, int bytes)
+static void igb_update_itr(struct igb_q_vector *q_vector,
+ struct igb_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+ /* no packets, exit with status unchanged */
if (packets == 0)
- goto update_itr_done;
+ return;
- switch (itr_setting) {
+ switch (itrval) {
case lowest_latency:
/* handle TSO and jumbo frames */
if (bytes/packets > 8000)
- retval = bulk_latency;
+ itrval = bulk_latency;
else if ((packets < 5) && (bytes > 512))
- retval = low_latency;
+ itrval = low_latency;
break;
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
if (bytes/packets > 8000) {
- retval = bulk_latency;
+ itrval = bulk_latency;
} else if ((packets < 10) || ((bytes/packets) > 1200)) {
- retval = bulk_latency;
+ itrval = bulk_latency;
} else if ((packets > 35)) {
- retval = lowest_latency;
+ itrval = lowest_latency;
}
} else if (bytes/packets > 2000) {
- retval = bulk_latency;
+ itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
- retval = lowest_latency;
+ itrval = lowest_latency;
}
break;
case bulk_latency: /* 250 usec aka 4000 ints/s */
if (bytes > 25000) {
if (packets > 35)
- retval = low_latency;
+ itrval = low_latency;
} else if (bytes < 1500) {
- retval = low_latency;
+ itrval = low_latency;
}
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
}
-static void igb_set_itr(struct igb_adapter *adapter)
+static void igb_set_itr(struct igb_q_vector *q_vector)
{
- struct igb_q_vector *q_vector = adapter->q_vector[0];
- u16 current_itr;
+ struct igb_adapter *adapter = q_vector->adapter;
u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
current_itr = 0;
- new_itr = 4000;
+ new_itr = IGB_4K_ITR;
goto set_itr_now;
}
- adapter->rx_itr = igb_update_itr(adapter,
- adapter->rx_itr,
- q_vector->rx_ring->total_packets,
- q_vector->rx_ring->total_bytes);
+ igb_update_itr(q_vector, &q_vector->tx);
+ igb_update_itr(q_vector, &q_vector->rx);
- adapter->tx_itr = igb_update_itr(adapter,
- adapter->tx_itr,
- q_vector->tx_ring->total_packets,
- q_vector->tx_ring->total_bytes);
- current_itr = max(adapter->rx_itr, adapter->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
current_itr = low_latency;
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 56; /* aka 70,000 ints/sec */
+ new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
break;
case low_latency:
- new_itr = 196; /* aka 20,000 ints/sec */
+ new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
break;
case bulk_latency:
- new_itr = 980; /* aka 4,000 ints/sec */
+ new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
break;
default:
break;
}
set_itr_now:
- q_vector->rx_ring->total_bytes = 0;
- q_vector->rx_ring->total_packets = 0;
- q_vector->tx_ring->total_bytes = 0;
- q_vector->tx_ring->total_packets = 0;
-
if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
@@ -3948,7 +3945,7 @@ set_itr_now:
new_itr = new_itr > q_vector->itr_val ?
max((new_itr * q_vector->itr_val) /
(new_itr + (q_vector->itr_val >> 2)),
- new_itr) :
+ new_itr) :
new_itr;
/* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than
@@ -3961,35 +3958,51 @@ set_itr_now:
}
}
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT 16
-
-static inline int igb_tso_adv(struct igb_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+ u32 type_tucmd, u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct igb_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx;
- u8 l4len;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IGB_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+ /* For 82575, context index must be unique per ring. */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ mss_l4len_idx |= tx_ring->reg_idx << 4;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
+
+static int igb_tso(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ u8 *hdr_len)
+{
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
return err;
}
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (first->protocol == __constant_htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -3997,290 +4010,289 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
iph->daddr, 0,
IPPROTO_TCP,
0);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IGB_TX_FLAGS_TSO |
+ IGB_TX_FLAGS_CSUM |
+ IGB_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
+ first->tx_flags |= IGB_TX_FLAGS_TSO |
+ IGB_TX_FLAGS_CSUM;
}
- i = tx_ring->next_to_use;
-
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- info |= skb_network_header_len(skb);
- *hdr_len += skb_network_header_len(skb);
- context_desc->vlan_macip_lens = cpu_to_le32(info);
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
-
- if (skb->protocol == htons(ETH_P_IP))
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* compute header lengths */
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* MSS L4LEN IDX */
- mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+ mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
- /* For 82575, context index must be unique per ring. */
- if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
- mss_l4len_idx |= tx_ring->reg_idx << 4;
-
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
- context_desc->seqnum_seed = 0;
-
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- tx_ring->next_to_use = i;
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
- return true;
+ return 1;
}
-static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
{
- struct e1000_adv_tx_context_desc *context_desc;
- struct device *dev = tx_ring->dev;
- struct igb_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- unsigned int i;
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IGB_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+ return;
+ } else {
+ u8 l4_hdr = 0;
+ switch (first->protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ first->protocol);
+ }
+ break;
+ }
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
+ }
+ break;
+ }
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- info |= skb_network_header_len(skb);
+ /* update TX checksum flag */
+ first->tx_flags |= IGB_TX_FLAGS_CSUM;
+ }
- context_desc->vlan_macip_lens = cpu_to_le32(info);
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol;
+static __le32 igb_tx_cmd_type(u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_IFCS |
+ E1000_ADVTXD_DCMD_DEXT);
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
- const struct vlan_ethhdr *vhdr =
- (const struct vlan_ethhdr*)skb->data;
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IGB_TX_FLAGS_VLAN)
+ cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
- protocol = vhdr->h_vlan_encapsulated_proto;
- } else {
- protocol = skb->protocol;
- }
+ /* set timestamp bit if present */
+ if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+ cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
- switch (protocol) {
- case cpu_to_be16(ETH_P_IP):
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
- break;
- case cpu_to_be16(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
- break;
- default:
- if (unlikely(net_ratelimit()))
- dev_warn(dev,
- "partial checksum but proto=%x!\n",
- skb->protocol);
- break;
- }
- }
+ /* set segmentation bits for TSO */
+ if (tx_flags & IGB_TX_FLAGS_TSO)
+ cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
- context_desc->seqnum_seed = 0;
- if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
- context_desc->mss_l4len_idx =
- cpu_to_le32(tx_ring->reg_idx << 4);
+ return cmd_type;
+}
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = 0;
+static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
+ union e1000_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ /* 82575 requires a unique index per ring if any offload is enabled */
+ if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
+ test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
- return true;
+ /* insert L4 checksum */
+ if (tx_flags & IGB_TX_FLAGS_CSUM) {
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+ /* insert IPv4 checksum */
+ if (tx_flags & IGB_TX_FLAGS_IPV4)
+ olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
}
- return false;
+
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-#define IGB_MAX_TXD_PWR 16
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
- unsigned int first)
+static void igb_tx_map(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ const u8 hdr_len)
{
- struct igb_buffer *buffer_info;
- struct device *dev = tx_ring->dev;
- unsigned int hlen = skb_headlen(skb);
- unsigned int count = 0, i;
- unsigned int f;
- u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
-
- i = tx_ring->next_to_use;
-
- buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
- buffer_info->length = hlen;
- /* set time_stamp *before* dma to help avoid a possible race */
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = dma_map_single(dev, skb->data, hlen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, buffer_info->dma))
+ struct sk_buff *skb = first->skb;
+ struct igb_tx_buffer *tx_buffer_info;
+ union e1000_adv_tx_desc *tx_desc;
+ dma_addr_t dma;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ unsigned int paylen = skb->len - hdr_len;
+ __le32 cmd_type;
+ u32 tx_flags = first->tx_flags;
+ u16 i = tx_ring->next_to_use;
+
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
+ cmd_type = igb_tx_cmd_type(tx_flags);
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
- unsigned int len = frag->size;
+ /* record length, and DMA address */
+ first->length = size;
+ first->dma = dma;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ for (;;) {
+ while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
- buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
- buffer_info->length = len;
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->mapped_as_page = true;
- buffer_info->dma = dma_map_page(dev,
- frag->page,
- frag->page_offset,
- len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, buffer_info->dma))
- goto dma_error;
-
- }
-
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
- /* multiply data chunks by size of headers */
- tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
- tx_ring->buffer_info[i].gso_segs = gso_segs;
- tx_ring->buffer_info[first].next_to_watch = i;
-
- return ++count;
-
-dma_error:
- dev_err(dev, "TX DMA map failed\n");
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- /* clear timestamp and dma mappings for failed buffer_info mapping */
- buffer_info->dma = 0;
- buffer_info->time_stamp = 0;
- buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
- buffer_info->mapped_as_page = false;
+ dma += IGB_MAX_DATA_PER_TXD;
+ size -= IGB_MAX_DATA_PER_TXD;
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count--) {
- if (i == 0)
- i = tx_ring->count;
- i--;
- buffer_info = &tx_ring->buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
- }
-
- return 0;
-}
+ tx_desc->read.olinfo_status = 0;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
-static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
- u32 tx_flags, int count, u32 paylen,
- u8 hdr_len)
-{
- union e1000_adv_tx_desc *tx_desc;
- struct igb_buffer *buffer_info;
- u32 olinfo_status = 0, cmd_type_len;
- unsigned int i = tx_ring->next_to_use;
+ if (likely(!data_len))
+ break;
- cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- if (tx_flags & IGB_TX_FLAGS_TSTAMP)
- cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+ size = skb_frag_size(frag);
+ data_len -= size;
- if (tx_flags & IGB_TX_FLAGS_TSO) {
- cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- /* insert tcp checksum */
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = dma;
- /* insert ip checksum */
- if (tx_flags & IGB_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+ tx_desc->read.olinfo_status = 0;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ frag++;
}
- if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
- (tx_flags & (IGB_TX_FLAGS_CSUM |
- IGB_TX_FLAGS_TSO |
- IGB_TX_FLAGS_VLAN)))
- olinfo_status |= tx_ring->reg_idx << 4;
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
+ tx_desc->read.cmd_type_len = cmd_type;
- olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
- do {
- buffer_info = &tx_ring->buffer_info[i];
- tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- count--;
- i++;
- if (i == tx_ring->count)
- i = 0;
- } while (count > 0);
-
- tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ /*
+ * Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
wmb();
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
tx_ring->next_to_use = i;
+
writel(i, tx_ring->tail);
+
/* we need this if more than one processor can write to our tail
* at a time, it syncronizes IO on IA64/Altix systems */
mmiowb();
+
+ return;
+
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
}
-static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
struct net_device *netdev = tx_ring->netdev;
@@ -4306,19 +4318,20 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
return 0;
}
-static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
return __igb_maybe_stop_tx(tx_ring, size);
}
-netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
- struct igb_ring *tx_ring)
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
+ struct igb_ring *tx_ring)
{
- int tso = 0, count;
+ struct igb_tx_buffer *first;
+ int tso;
u32 tx_flags = 0;
- u16 first;
+ __be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
/* need: 1 descriptor per page,
@@ -4331,6 +4344,12 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
@@ -4341,51 +4360,44 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
}
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IGB_TX_FLAGS_IPV4;
-
- first = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
-
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-
- if (tso)
- tx_flags |= IGB_TX_FLAGS_TSO;
- else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
- tx_flags |= IGB_TX_FLAGS_CSUM;
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = protocol;
- /*
- * count reflects descriptors mapped, if 0 or less then mapping error
- * has occurred and we need to rewind the descriptor queue
- */
- count = igb_tx_map_adv(tx_ring, skb, first);
- if (!count) {
- dev_kfree_skb_any(skb);
- tx_ring->buffer_info[first].time_stamp = 0;
- tx_ring->next_to_use = first;
- return NETDEV_TX_OK;
- }
+ tso = igb_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+ igb_tx_csum(tx_ring, first);
- igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+ igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
return NETDEV_TX_OK;
+
+out_drop:
+ igb_unmap_and_free_tx_resource(tx_ring, first);
+
+ return NETDEV_TX_OK;
}
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
- struct net_device *netdev)
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+ struct sk_buff *skb)
+{
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_ring *tx_ring;
- int r_idx = 0;
if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -4397,14 +4409,17 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
- tx_ring = adapter->multi_tx_table[r_idx];
+ /*
+ * The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
- /* This goes back to the question of how to logically map a tx queue
- * to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
- return igb_xmit_frame_ring_adv(skb, tx_ring);
+ return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
}
/**
@@ -4419,7 +4434,7 @@ static void igb_tx_timeout(struct net_device *netdev)
/* Do the reset outside of interrupt context */
adapter->tx_timeout_count++;
- if (hw->mac.type == e1000_82580)
+ if (hw->mac.type >= e1000_82580)
hw->dev_spec._82575.global_device_reset = true;
schedule_work(&adapter->reset_task);
@@ -4467,14 +4482,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- u32 rx_buffer_len, i;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
dev_err(&pdev->dev, "Invalid MTU setting\n");
return -EINVAL;
}
+#define MAX_STD_JUMBO_FRAME_SIZE 9238
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
return -EINVAL;
@@ -4486,30 +4501,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
- /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
- * means we reserve 2 more, this pushes us to allocate from the next
- * larger slab size.
- * i.e. RXBUFFER_2048 --> size-4096 slab
- */
-
- if (adapter->hw.mac.type == e1000_82580)
- max_frame += IGB_TS_HDR_LEN;
-
- if (max_frame <= IGB_RXBUFFER_1024)
- rx_buffer_len = IGB_RXBUFFER_1024;
- else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
- rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buffer_len = IGB_RXBUFFER_128;
-
- if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
- (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
- rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
-
- if ((adapter->hw.mac.type == e1000_82580) &&
- (rx_buffer_len == IGB_RXBUFFER_128))
- rx_buffer_len += IGB_RXBUFFER_64;
-
if (netif_running(netdev))
igb_down(adapter);
@@ -4517,9 +4508,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
-
if (netif_running(netdev))
igb_up(adapter);
else
@@ -4752,12 +4740,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (adapter->vfs_allocated_count)
- wr32(E1000_IMS, E1000_IMS_LSC |
- E1000_IMS_VMMB |
- E1000_IMS_DOUTSYNC);
- else
- wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
wr32(E1000_EIMS, adapter->eims_other);
return IRQ_HANDLED;
@@ -4777,7 +4759,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
if (adapter->hw.mac.type == e1000_82575)
itr_val |= itr_val << 16;
else
- itr_val |= 0x8000000;
+ itr_val |= E1000_EITR_CNT_IGNR;
writel(itr_val, q_vector->itr_register);
q_vector->set_itr = 0;
@@ -4805,8 +4787,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
if (q_vector->cpu == cpu)
goto out_no_update;
- if (q_vector->tx_ring) {
- int q = q_vector->tx_ring->reg_idx;
+ if (q_vector->tx.ring) {
+ int q = q_vector->tx.ring->reg_idx;
u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
if (hw->mac.type == e1000_82575) {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
@@ -4819,8 +4801,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
}
- if (q_vector->rx_ring) {
- int q = q_vector->rx_ring->reg_idx;
+ if (q_vector->rx.ring) {
+ int q = q_vector->rx.ring->reg_idx;
u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
if (hw->mac.type == e1000_82575) {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
@@ -4904,6 +4886,109 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
}
#endif /* CONFIG_IGB_DCA */
+#ifdef CONFIG_PCI_IOV
+static int igb_vf_configure(struct igb_adapter *adapter, int vf)
+{
+ unsigned char mac_addr[ETH_ALEN];
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pvfdev;
+ unsigned int device_id;
+ u16 thisvf_devfn;
+
+ random_ether_addr(mac_addr);
+ igb_set_vf_mac(adapter, vf, mac_addr);
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ device_id = IGB_82576_VF_DEV_ID;
+ /* VF Stride for 82576 is 2 */
+ thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
+ (pdev->devfn & 1);
+ break;
+ case e1000_i350:
+ device_id = IGB_I350_VF_DEV_ID;
+ /* VF Stride for I350 is 4 */
+ thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
+ (pdev->devfn & 3);
+ break;
+ default:
+ device_id = 0;
+ thisvf_devfn = 0;
+ break;
+ }
+
+ pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == thisvf_devfn)
+ break;
+ pvfdev = pci_get_device(hw->vendor_id,
+ device_id, pvfdev);
+ }
+
+ if (pvfdev)
+ adapter->vf_data[vf].vfdev = pvfdev;
+ else
+ dev_err(&pdev->dev,
+ "Couldn't find pci dev ptr for VF %4.4x\n",
+ thisvf_devfn);
+ return pvfdev != NULL;
+}
+
+static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *pvfdev;
+ u16 vf_devfn = 0;
+ u16 vf_stride;
+ unsigned int device_id;
+ int vfs_found = 0;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ device_id = IGB_82576_VF_DEV_ID;
+ /* VF Stride for 82576 is 2 */
+ vf_stride = 2;
+ break;
+ case e1000_i350:
+ device_id = IGB_I350_VF_DEV_ID;
+ /* VF Stride for I350 is 4 */
+ vf_stride = 4;
+ break;
+ default:
+ device_id = 0;
+ vf_stride = 0;
+ break;
+ }
+
+ vf_devfn = pdev->devfn + 0x80;
+ pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == vf_devfn)
+ vfs_found++;
+ vf_devfn += vf_stride;
+ pvfdev = pci_get_device(hw->vendor_id,
+ device_id, pvfdev);
+ }
+
+ return vfs_found;
+}
+
+static int igb_check_vf_assignment(struct igb_adapter *adapter)
+{
+ int i;
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ if (adapter->vf_data[i].vfdev) {
+ if (adapter->vf_data[i].vfdev->dev_flags &
+ PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+ }
+ return false;
+}
+
+#endif
static void igb_ping_all_vfs(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -5101,7 +5186,6 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
}
adapter->vf_data[vf].vlans_enabled++;
- return 0;
}
} else {
if (i < E1000_VLVF_ARRAY_SIZE) {
@@ -5464,16 +5548,14 @@ static irqreturn_t igb_intr(int irq, void *data)
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
u32 icr = rd32(E1000_ICR);
- if (!icr)
- return IRQ_NONE; /* Not our interrupt */
-
- igb_write_itr(q_vector);
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt */
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
+ igb_write_itr(q_vector);
+
if (icr & E1000_ICR_DRSTA)
schedule_work(&adapter->reset_task);
@@ -5494,15 +5576,15 @@ static irqreturn_t igb_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+void igb_ring_irq_enable(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
- if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
- (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
- if (!adapter->msix_entries)
- igb_set_itr(adapter);
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
+ igb_set_itr(q_vector);
else
igb_update_ring_itr(q_vector);
}
@@ -5525,28 +5607,27 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
- int tx_clean_complete = 1, work_done = 0;
+ bool clean_complete = true;
#ifdef CONFIG_IGB_DCA
if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_dca(q_vector);
#endif
- if (q_vector->tx_ring)
- tx_clean_complete = igb_clean_tx_irq(q_vector);
+ if (q_vector->tx.ring)
+ clean_complete = igb_clean_tx_irq(q_vector);
- if (q_vector->rx_ring)
- igb_clean_rx_irq_adv(q_vector, &work_done, budget);
+ if (q_vector->rx.ring)
+ clean_complete &= igb_clean_rx_irq(q_vector, budget);
- if (!tx_clean_complete)
- work_done = budget;
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
/* If not enough Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- igb_ring_irq_enable(q_vector);
- }
+ napi_complete(napi);
+ igb_ring_irq_enable(q_vector);
- return work_done;
+ return 0;
}
/**
@@ -5568,7 +5649,7 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
* The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
* 24 to match clock shift we setup earlier.
*/
- if (adapter->hw.mac.type == e1000_82580)
+ if (adapter->hw.mac.type >= e1000_82580)
regval <<= IGB_82580_TSYNC_SHIFT;
ns = timecounter_cyc2time(&adapter->clock, regval);
@@ -5581,13 +5662,14 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_buffer structure
+ * @buffer: pointer to igb_tx_buffer structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
+ struct igb_tx_buffer *buffer_info)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
@@ -5595,7 +5677,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
+ if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -5614,70 +5696,109 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx_ring;
- struct net_device *netdev = tx_ring->netdev;
- struct e1000_hw *hw = &adapter->hw;
- struct igb_buffer *buffer_info;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
+ struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
- bool cleaned = false;
-
- i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
-
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
- for (cleaned = false; !cleaned; count++) {
- tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
-
- if (buffer_info->skb) {
- total_bytes += buffer_info->bytecount;
- /* gso_segs is currently only valid for tcp */
- total_packets += buffer_info->gso_segs;
- igb_tx_hwtstamp(q_vector, buffer_info);
- }
+ unsigned int budget = q_vector->tx.work_limit;
+ unsigned int i = tx_ring->next_to_clean;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
- tx_desc->wb.status = 0;
+ for (; budget; budget--) {
+ eop_desc = tx_buffer->next_to_watch;
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* retrieve hardware timestamp */
+ igb_tx_hwtstamp(q_vector, tx_buffer);
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+
+ /* clear last DMA location and unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer->dma = 0;
+
+ tx_buffer++;
+ tx_desc++;
i++;
- if (i == tx_ring->count)
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (tx_buffer->dma) {
+ dma_unmap_page(tx_ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ /* clear last DMA location */
+ tx_buffer->dma = 0;
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
}
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
}
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
- if (unlikely(count &&
- netif_carrier_ok(netdev) &&
- igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean.
- */
- smp_mb();
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
- !(test_bit(__IGB_DOWN, &adapter->state))) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+ struct e1000_hw *hw = &adapter->hw;
- u64_stats_update_begin(&tx_ring->tx_syncp);
- tx_ring->tx_stats.restart_queue++;
- u64_stats_update_end(&tx_ring->tx_syncp);
- }
- }
+ eop_desc = tx_buffer->next_to_watch;
- if (tx_ring->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
- tx_ring->detect_tx_hung = false;
- if (tx_ring->buffer_info[i].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+ clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ if (eop_desc &&
+ time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5691,50 +5812,73 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
" next_to_clean <%x>\n"
"buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
+ " next_to_watch <%p>\n"
" jiffies <%lx>\n"
" desc.status <%x>\n",
tx_ring->queue_index,
- readl(tx_ring->head),
+ rd32(E1000_TDH(tx_ring->reg_idx)),
readl(tx_ring->tail),
tx_ring->next_to_use,
tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
+ tx_buffer->time_stamp,
+ eop_desc,
jiffies,
eop_desc->wb.status);
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ /* we are about to reset, no point in enabling stuff */
+ return true;
}
}
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->tx_syncp);
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->tx_syncp);
- return count < tx_ring->count;
+
+ if (unlikely(total_packets &&
+ netif_carrier_ok(tx_ring->netdev) &&
+ igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !(test_bit(__IGB_DOWN, &adapter->state))) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.restart_queue++;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ }
+ }
+
+ return !!budget;
}
-static inline void igb_rx_checksum_adv(struct igb_ring *ring,
- u32 status_err, struct sk_buff *skb)
+static inline void igb_rx_checksum(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
- /* Ignore Checksum bit is set or checksum is disabled through ethtool */
- if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
- (status_err & E1000_RXD_STAT_IXSM))
+ /* Ignore Checksum bit is set */
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
+ return;
+
+ /* Rx checksum disabled via ethtool */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* TCP/UDP checksum error bit is set */
- if (status_err &
- (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+ if (igb_test_staterr(rx_desc,
+ E1000_RXDEXT_STATERR_TCPE |
+ E1000_RXDEXT_STATERR_IPE)) {
/*
* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets, (aka let the stack check the crc32c)
*/
- if ((skb->len == 60) &&
- (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
+ if (!((skb->len == 60) &&
+ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
u64_stats_update_begin(&ring->rx_syncp);
ring->rx_stats.csum_err++;
u64_stats_update_end(&ring->rx_syncp);
@@ -5743,19 +5887,34 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
- if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
+ E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
+ dev_dbg(ring->dev, "cksum success: bits %08X\n",
+ le32_to_cpu(rx_desc->wb.upper.status_error));
+}
+
+static inline void igb_rx_hash(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
- struct sk_buff *skb)
+static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
+ if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
+ E1000_RXDADV_STAT_TS))
+ return;
+
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -5767,7 +5926,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
- if (staterr & E1000_RXDADV_STAT_TSIP) {
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
u32 *stamp = (u32 *)skb->data;
regval = le32_to_cpu(*(stamp + 2));
regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
@@ -5782,8 +5941,24 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
-static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc)
+
+static void igb_rx_vlan(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+}
+
+static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
{
/* HW will not DMA in data larger than the given buffer, even if it
* parses the (NFS, of course) header to be larger. In that case, it
@@ -5791,247 +5966,244 @@ static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
*/
u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > rx_ring->rx_buffer_len)
- hlen = rx_ring->rx_buffer_len;
+ if (hlen > IGB_RX_HDR_LEN)
+ hlen = IGB_RX_HDR_LEN;
return hlen;
}
-static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
- int *work_done, int budget)
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
{
- struct igb_ring *rx_ring = q_vector->rx_ring;
- struct net_device *netdev = rx_ring->netdev;
- struct device *dev = rx_ring->dev;
- union e1000_adv_rx_desc *rx_desc , *next_rxd;
- struct igb_buffer *buffer_info , *next_buffer;
- struct sk_buff *skb;
- bool cleaned = false;
- int cleaned_count = 0;
- int current_node = numa_node_id();
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ union e1000_adv_rx_desc *rx_desc;
+ const int current_node = numa_node_id();
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i;
- u32 staterr;
- u16 length;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+ u16 i = rx_ring->next_to_clean;
- i = rx_ring->next_to_clean;
- buffer_info = &rx_ring->buffer_info[i];
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IGB_RX_DESC(rx_ring, i);
- while (staterr & E1000_RXD_STAT_DD) {
- if (*work_done >= budget)
- break;
- (*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+ struct sk_buff *skb = buffer_info->skb;
+ union e1000_adv_rx_desc *next_rxd;
- skb = buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
+ prefetch(skb->data);
i++;
if (i == rx_ring->count)
i = 0;
- next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IGB_RX_DESC(rx_ring, i);
prefetch(next_rxd);
- next_buffer = &rx_ring->buffer_info[i];
- length = le16_to_cpu(rx_desc->wb.upper.length);
- cleaned = true;
- cleaned_count++;
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
- if (buffer_info->dma) {
- dma_unmap_single(dev, buffer_info->dma,
- rx_ring->rx_buffer_len,
+ if (!skb_is_nonlinear(skb)) {
+ __skb_put(skb, igb_get_hlen(rx_desc));
+ dma_unmap_single(rx_ring->dev, buffer_info->dma,
+ IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
- if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
- skb_put(skb, length);
- goto send_up;
- }
- skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
}
- if (length) {
- dma_unmap_page(dev, buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
+ if (rx_desc->wb.upper.length) {
+ u16 length = le16_to_cpu(rx_desc->wb.upper.length);
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += PAGE_SIZE / 2;
+
if ((page_count(buffer_info->page) != 1) ||
(page_to_nid(buffer_info->page) != current_node))
buffer_info->page = NULL;
else
get_page(buffer_info->page);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ buffer_info->page_dma = 0;
}
- if (!(staterr & E1000_RXD_STAT_EOP)) {
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
+ struct igb_rx_buffer *next_buffer;
+ next_buffer = &rx_ring->rx_buffer_info[i];
buffer_info->skb = next_buffer->skb;
buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
next_buffer->dma = 0;
goto next_desc;
}
-send_up:
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
+
+ if (igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
goto next_desc;
}
- if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
- igb_rx_hwtstamp(q_vector, staterr, skb);
+ igb_rx_hwtstamp(q_vector, rx_desc, skb);
+ igb_rx_hash(rx_ring, rx_desc, skb);
+ igb_rx_checksum(rx_ring, rx_desc, skb);
+ igb_rx_vlan(rx_ring, rx_desc, skb);
+
total_bytes += skb->len;
total_packets++;
- igb_rx_checksum_adv(rx_ring, staterr, skb);
-
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
- if (staterr & E1000_RXD_STAT_VP) {
- u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- __vlan_hwaccel_put_tag(skb, vid);
- }
napi_gro_receive(&q_vector->napi, skb);
+ budget--;
next_desc:
- rx_desc->wb.upper.status_error = 0;
+ if (!budget)
+ break;
+ cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
- igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
- buffer_info = next_buffer;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
rx_ring->next_to_clean = i;
- cleaned_count = igb_desc_unused(rx_ring);
-
- if (cleaned_count)
- igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
-
- rx_ring->total_packets += total_packets;
- rx_ring->total_bytes += total_bytes;
u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes;
u64_stats_update_end(&rx_ring->rx_syncp);
- return cleaned;
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return !!budget;
+}
+
+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct sk_buff *skb = bi->skb;
+ dma_addr_t dma = bi->dma;
+
+ if (dma)
+ return true;
+
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ bi->skb = skb;
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* initialize skb for ring */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ }
+
+ dma = dma_map_single(rx_ring->dev, skb->data,
+ IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ return true;
+}
+
+static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t page_dma = bi->page_dma;
+ unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+
+ if (page_dma)
+ return true;
+
+ if (!page) {
+ page = netdev_alloc_page(rx_ring->netdev);
+ bi->page = page;
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+ }
+
+ page_dma = dma_map_page(rx_ring->dev, page,
+ page_offset, PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(rx_ring->dev, page_dma)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->page_dma = page_dma;
+ bi->page_offset = page_offset;
+ return true;
}
/**
- * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
**/
-void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
- struct net_device *netdev = rx_ring->netdev;
union e1000_adv_rx_desc *rx_desc;
- struct igb_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- int bufsz;
+ struct igb_rx_buffer *bi;
+ u16 i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- bufsz = rx_ring->rx_buffer_len;
+ rx_desc = IGB_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
while (cleaned_count--) {
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
-
- if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
- if (!buffer_info->page) {
- buffer_info->page = netdev_alloc_page(netdev);
- if (unlikely(!buffer_info->page)) {
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.alloc_failed++;
- u64_stats_update_end(&rx_ring->rx_syncp);
- goto no_buffers;
- }
- buffer_info->page_offset = 0;
- } else {
- buffer_info->page_offset ^= PAGE_SIZE / 2;
- }
- buffer_info->page_dma =
- dma_map_page(rx_ring->dev, buffer_info->page,
- buffer_info->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- buffer_info->page_dma)) {
- buffer_info->page_dma = 0;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.alloc_failed++;
- u64_stats_update_end(&rx_ring->rx_syncp);
- goto no_buffers;
- }
- }
+ if (!igb_alloc_mapped_skb(rx_ring, bi))
+ break;
- skb = buffer_info->skb;
- if (!skb) {
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
- if (unlikely(!skb)) {
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.alloc_failed++;
- u64_stats_update_end(&rx_ring->rx_syncp);
- goto no_buffers;
- }
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- buffer_info->skb = skb;
- }
- if (!buffer_info->dma) {
- buffer_info->dma = dma_map_single(rx_ring->dev,
- skb->data,
- bufsz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- buffer_info->dma)) {
- buffer_info->dma = 0;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.alloc_failed++;
- u64_stats_update_end(&rx_ring->rx_syncp);
- goto no_buffers;
- }
- }
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (bufsz < IGB_RXBUFFER_1024) {
- rx_desc->read.pkt_addr =
- cpu_to_le64(buffer_info->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
- rx_desc->read.hdr_addr = 0;
- }
+ if (!igb_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc++;
+ bi++;
i++;
- if (i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
+ if (unlikely(!i)) {
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the hdr_addr for the next_to_use descriptor */
+ rx_desc->read.hdr_addr = 0;
}
-no_buffers:
+ i += rx_ring->count;
+
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i == 0)
- i = (rx_ring->count - 1);
- else
- i--;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -6168,6 +6340,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
+ is_l4 = true;
break;
default:
return -ERANGE;
@@ -6184,7 +6357,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
* timestamped, so enable timestamping in all packets as
* long as one rx filter was configured.
*/
- if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
+ if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
}
@@ -6299,10 +6472,9 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl;
+ bool enable = !!(features & NETIF_F_HW_VLAN_RX);
- igb_irq_disable(adapter);
-
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (enable) {
/* enable VLAN tag insert/strip */
ctrl = rd32(E1000_CTRL);
ctrl |= E1000_CTRL_VME;
@@ -6320,9 +6492,6 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
}
igb_rlpml_set(adapter);
-
- if (!test_bit(__IGB_DOWN, &adapter->state))
- igb_irq_enable(adapter);
}
static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -6347,11 +6516,6 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
int pf_id = adapter->vfs_allocated_count;
s32 err;
- igb_irq_disable(adapter);
-
- if (!test_bit(__IGB_DOWN, &adapter->state))
- igb_irq_enable(adapter);
-
/* remove vlan from VLVF table array */
err = igb_vlvf_set(adapter, vid, false, pf_id);
@@ -6366,6 +6530,8 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
{
u16 vid;
+ igb_vlan_mode(adapter->netdev, adapter->netdev->features);
+
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
igb_vlan_rx_add_vid(adapter->netdev, vid);
}
@@ -6577,18 +6743,15 @@ static void igb_netpoll(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct igb_q_vector *q_vector;
int i;
- if (!adapter->msix_entries) {
- struct igb_q_vector *q_vector = adapter->q_vector[0];
- igb_irq_disable(adapter);
- napi_schedule(&q_vector->napi);
- return;
- }
-
for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- wr32(E1000_EIMC, q_vector->eims_value);
+ q_vector = adapter->q_vector[i];
+ if (adapter->msix_entries)
+ wr32(E1000_EIMC, q_vector->eims_value);
+ else
+ igb_irq_disable(adapter);
napi_schedule(&q_vector->napi);
}
}
@@ -6887,4 +7050,70 @@ static void igb_vmm_control(struct igb_adapter *adapter)
}
}
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dmac_thr;
+ u16 hwm;
+
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->flags & IGB_FLAG_DMAC) {
+ u32 reg;
+
+ /* force threshold to 0. */
+ wr32(E1000_DMCTXTH, 0);
+
+ /*
+ * DMA Coalescing high water mark needs to be higher
+ * than the RX threshold. set hwm to PBA - 2 * max
+ * frame size
+ */
+ hwm = pba - (2 * adapter->max_frame_size);
+ reg = rd32(E1000_DMACR);
+ reg &= ~E1000_DMACR_DMACTHR_MASK;
+ dmac_thr = pba - 4;
+
+ reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+ /* watchdog timer= +-1000 usec in 32usec intervals */
+ reg |= (1000 >> 5);
+ wr32(E1000_DMACR, reg);
+
+ /*
+ * no lower threshold to disable
+ * coalescing(smart fifb)-UTRESH=0
+ */
+ wr32(E1000_DMCRTRH, 0);
+ wr32(E1000_FCRTC, hwm);
+
+ reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
+
+ wr32(E1000_DMCTLX, reg);
+
+ /*
+ * free space in tx packet buffer to wake from
+ * DMA coal
+ */
+ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+
+ /*
+ * make low power state decision controlled
+ * by DMA coal
+ */
+ reg = rd32(E1000_PCIEMISC);
+ reg &= ~E1000_PCIEMISC_LX_DECISION;
+ wr32(E1000_PCIEMISC, reg);
+ } /* endif adapter->dmac is not disabled */
+ } else if (hw->mac.type == e1000_82580) {
+ u32 reg = rd32(E1000_PCIEMISC);
+ wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
+ wr32(E1000_DMACR, 0);
+ }
+}
+
/* igb_main.c */
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index 0fa3db3dd8b..0fa3db3dd8b 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 79f2604673f..79f2604673f 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
new file mode 100644
index 00000000000..2c25858cc0f
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -0,0 +1,473 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for igbvf */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include "igbvf.h"
+#include <linux/if_vlan.h>
+
+
+struct igbvf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+};
+
+#define IGBVF_STAT(current, base) \
+ sizeof(((struct igbvf_adapter *)0)->current), \
+ offsetof(struct igbvf_adapter, current), \
+ offsetof(struct igbvf_adapter, base)
+
+static const struct igbvf_stats igbvf_gstrings_stats[] = {
+ { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
+ { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
+ { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
+ { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
+ { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
+ { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
+ { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
+ { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
+ { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
+ { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
+ { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
+ { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
+ { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
+};
+
+#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
+
+static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Link test (on/offline)"
+};
+
+#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
+
+static int igbvf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status;
+
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+
+ ecmd->port = -1;
+ ecmd->transceiver = XCVR_DUMMY1;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ else if (status & E1000_STATUS_SPEED_100)
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ else
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int igbvf_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static void igbvf_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ return;
+}
+
+static int igbvf_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 igbvf_get_msglevel(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int igbvf_get_regs_len(struct net_device *netdev)
+{
+#define IGBVF_REGS_LEN 8
+ return IGBVF_REGS_LEN * sizeof(u32);
+}
+
+static void igbvf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+
+ memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RDLEN(0));
+ regs_buff[3] = er32(RDH(0));
+ regs_buff[4] = er32(RDT(0));
+
+ regs_buff[5] = er32(TDLEN(0));
+ regs_buff[6] = er32(TDH(0));
+ regs_buff[7] = er32(TDT(0));
+}
+
+static int igbvf_get_eeprom_len(struct net_device *netdev)
+{
+ return 0;
+}
+
+static int igbvf_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igbvf_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ return -EOPNOTSUPP;
+}
+
+static void igbvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32] = "N/A";
+
+ strncpy(drvinfo->driver, igbvf_driver_name, 32);
+ strncpy(drvinfo->version, igbvf_driver_version, 32);
+ strncpy(drvinfo->fw_version, firmware_version, 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->regdump_len = igbvf_get_regs_len(netdev);
+ drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
+}
+
+static void igbvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IGBVF_MAX_RXD;
+ ring->tx_max_pending = IGBVF_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+static int igbvf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct igbvf_ring *temp_ring;
+ int err = 0;
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (!netif_running(adapter->netdev)) {
+ adapter->tx_ring->count = new_tx_count;
+ adapter->rx_ring->count = new_rx_count;
+ goto clear_reset;
+ }
+
+ temp_ring = vmalloc(sizeof(struct igbvf_ring));
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igbvf_down(adapter);
+
+ /*
+ * We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the tx and rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring->count) {
+ memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
+
+ temp_ring->count = new_tx_count;
+ err = igbvf_setup_tx_resources(adapter, temp_ring);
+ if (err)
+ goto err_setup;
+
+ igbvf_free_tx_resources(adapter->tx_ring);
+
+ memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
+ }
+
+ if (new_rx_count != adapter->rx_ring->count) {
+ memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
+
+ temp_ring->count = new_rx_count;
+ err = igbvf_setup_rx_resources(adapter, temp_ring);
+ if (err)
+ goto err_setup;
+
+ igbvf_free_rx_resources(adapter->rx_ring);
+
+ memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
+ }
+err_setup:
+ igbvf_up(adapter);
+ vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ *data = 0;
+
+ hw->mac.ops.check_for_link(hw);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU))
+ *data = 1;
+
+ return *data;
+}
+
+static void igbvf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IGBVF_TESTING, &adapter->state);
+
+ /*
+ * Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (igbvf_link_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__IGBVF_TESTING, &adapter->state);
+ msleep_interruptible(4 * 1000);
+}
+
+static void igbvf_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+}
+
+static int igbvf_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igbvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+
+ return 0;
+}
+
+static int igbvf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 3) &&
+ (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+ adapter->itr = IGBVF_START_ITR;
+ adapter->itr_setting = ec->rx_coalesce_usecs;
+ } else {
+ adapter->itr = ec->rx_coalesce_usecs << 2;
+ adapter->itr_setting = adapter->itr;
+ }
+
+ writel(adapter->itr,
+ hw->hw_addr + adapter->rx_ring[0].itr_register);
+
+ return 0;
+}
+
+static int igbvf_nway_reset(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ igbvf_reinit_locked(adapter);
+ return 0;
+}
+
+
+static void igbvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ igbvf_update_stats(adapter);
+ for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ igbvf_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ igbvf_gstrings_stats[i].base_stat_offset;
+ data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
+ (*(u32 *)p - *(u32 *)b));
+ }
+
+}
+
+static int igbvf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch(stringset) {
+ case ETH_SS_TEST:
+ return IGBVF_TEST_LEN;
+ case ETH_SS_STATS:
+ return IGBVF_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igbvf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static const struct ethtool_ops igbvf_ethtool_ops = {
+ .get_settings = igbvf_get_settings,
+ .set_settings = igbvf_set_settings,
+ .get_drvinfo = igbvf_get_drvinfo,
+ .get_regs_len = igbvf_get_regs_len,
+ .get_regs = igbvf_get_regs,
+ .get_wol = igbvf_get_wol,
+ .set_wol = igbvf_set_wol,
+ .get_msglevel = igbvf_get_msglevel,
+ .set_msglevel = igbvf_set_msglevel,
+ .nway_reset = igbvf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = igbvf_get_eeprom_len,
+ .get_eeprom = igbvf_get_eeprom,
+ .set_eeprom = igbvf_set_eeprom,
+ .get_ringparam = igbvf_get_ringparam,
+ .set_ringparam = igbvf_set_ringparam,
+ .get_pauseparam = igbvf_get_pauseparam,
+ .set_pauseparam = igbvf_set_pauseparam,
+ .self_test = igbvf_diag_test,
+ .get_sset_count = igbvf_get_sset_count,
+ .get_strings = igbvf_get_strings,
+ .get_ethtool_stats = igbvf_get_ethtool_stats,
+ .get_coalesce = igbvf_get_coalesce,
+ .set_coalesce = igbvf_set_coalesce,
+};
+
+void igbvf_set_ethtool_ops(struct net_device *netdev)
+{
+ /* have to "undeclare" const on this struct to remove warnings */
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
+}
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fd4a7b780fd..fd4a7b780fd 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
new file mode 100644
index 00000000000..048aae248d0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -0,0 +1,350 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * e1000_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 e1000_poll_for_msg(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 e1000_poll_for_ack(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = e1000_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* exit if we either can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg*/
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = e1000_poll_for_ack(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
+{
+ u32 v2p_mailbox = er32(V2PMAILBOX(0));
+
+ v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
+ hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * e1000_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = E1000_SUCCESS;
+
+ hw->dev_spec.vf.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
+ E1000_V2PMAILBOX_RSTI))) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
+ ret_val = E1000_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ s32 err;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ err = e1000_obtain_mbx_lock_vf(hw);
+ if (err)
+ goto out_no_write;
+
+ /* flush any ack or msg as we are going to overwrite mailbox */
+ e1000_check_for_ack_vf(hw);
+ e1000_check_for_msg_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ array_ew32(VMBMEM(0), i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
+
+out_no_write:
+ return err;
+}
+
+/**
+ * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ s32 err;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ err = e1000_obtain_mbx_lock_vf(hw);
+ if (err)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = array_er32(VMBMEM(0), i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return err;
+}
+
+/**
+ * e1000_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to being communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_vf;
+ mbx->ops.write = e1000_write_mbx_vf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_vf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_vf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return E1000_SUCCESS;
+}
+
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index c2883c45d47..c2883c45d47 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
new file mode 100644
index 00000000000..cca78124be3
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -0,0 +1,2873 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+
+#include "igbvf.h"
+
+#define DRV_VERSION "2.0.1-k"
+char igbvf_driver_name[] = "igbvf";
+const char igbvf_driver_version[] = DRV_VERSION;
+static const char igbvf_driver_string[] =
+ "Intel(R) Gigabit Virtual Function Network Driver";
+static const char igbvf_copyright[] =
+ "Copyright (c) 2009 - 2011 Intel Corporation.";
+
+static int igbvf_poll(struct napi_struct *napi, int budget);
+static void igbvf_reset(struct igbvf_adapter *);
+static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
+static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
+
+static struct igbvf_info igbvf_vf_info = {
+ .mac = e1000_vfadapt,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
+};
+
+static struct igbvf_info igbvf_i350_vf_info = {
+ .mac = e1000_vfadapt_i350,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
+};
+
+static const struct igbvf_info *igbvf_info_tbl[] = {
+ [board_vf] = &igbvf_vf_info,
+ [board_i350_vf] = &igbvf_i350_vf_info,
+};
+
+/**
+ * igbvf_desc_unused - calculate if we have unused descriptors
+ **/
+static int igbvf_desc_unused(struct igbvf_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * igbvf_receive_skb - helper function to handle Rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void igbvf_receive_skb(struct igbvf_adapter *adapter,
+ struct net_device *netdev,
+ struct sk_buff *skb,
+ u32 status, u16 vlan)
+{
+ if (status & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ if (test_bit(vid, adapter->active_vlans))
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ netif_receive_skb(skb);
+}
+
+static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set or checksum is disabled through ethtool */
+ if ((status_err & E1000_RXD_STAT_IXSM) ||
+ (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (status_err &
+ (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ adapter->hw_csum_good++;
+}
+
+/**
+ * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: address of ring structure to repopulate
+ * @cleaned_count: number of buffers to repopulate
+ **/
+static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct igbvf_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_adv_rx_desc *rx_desc;
+ struct igbvf_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ int bufsz;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ if (adapter->rx_ps_hdr_size)
+ bufsz = adapter->rx_ps_hdr_size;
+ else
+ bufsz = adapter->rx_buffer_len;
+
+ while (cleaned_count--) {
+ rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
+
+ if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(GFP_ATOMIC);
+ if (!buffer_info->page) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ buffer_info->page_offset = 0;
+ } else {
+ buffer_info->page_offset ^= PAGE_SIZE / 2;
+ }
+ buffer_info->page_dma =
+ dma_map_page(&pdev->dev, buffer_info->page,
+ buffer_info->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ }
+
+ if (!buffer_info->skb) {
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ bufsz,
+ DMA_FROM_DEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->rx_ps_hdr_size) {
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(buffer_info->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
+ } else {
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(buffer_info->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i == 0)
+ i = (rx_ring->count - 1);
+ else
+ i--;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+}
+
+/**
+ * igbvf_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_adv_rx_desc *rx_desc, *next_rxd;
+ struct igbvf_buffer *buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i;
+ u32 length, hlen, staterr;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ buffer_info = &rx_ring->buffer_info[i];
+
+ /* HW will not DMA in data larger than the given buffer, even
+ * if it parses the (NFS, of course) header to be larger. In
+ * that case, it fills the header buffer and spills the rest
+ * into the page.
+ */
+ hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
+ E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > adapter->rx_ps_hdr_size)
+ hlen = adapter->rx_ps_hdr_size;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+ cleaned = true;
+ cleaned_count++;
+
+ skb = buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ buffer_info->skb = NULL;
+ if (!adapter->rx_ps_hdr_size) {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ skb_put(skb, length);
+ goto send_up;
+ }
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_hdr_size,
+ DMA_FROM_DEVICE);
+ skb_put(skb, hlen);
+ }
+
+ if (length) {
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ buffer_info->page_dma = 0;
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ buffer_info->page,
+ buffer_info->page_offset,
+ length);
+
+ if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
+ (page_count(buffer_info->page) != 1))
+ buffer_info->page = NULL;
+ else
+ get_page(buffer_info->page);
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += PAGE_SIZE / 2;
+ }
+send_up:
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ next_buffer = &rx_ring->buffer_info[i];
+
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
+ buffer_info->skb = next_buffer->skb;
+ buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ total_bytes += skb->len;
+ total_packets++;
+
+ igbvf_rx_checksum_adv(adapter, staterr, skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ igbvf_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
+ igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = igbvf_desc_unused(rx_ring);
+
+ if (cleaned_count)
+ igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_packets;
+ adapter->total_rx_bytes += total_bytes;
+ adapter->net_stats.rx_bytes += total_bytes;
+ adapter->net_stats.rx_packets += total_packets;
+ return cleaned;
+}
+
+static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
+ struct igbvf_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+}
+
+/**
+ * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct igbvf_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->adapter = adapter;
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+err:
+ vfree(tx_ring->buffer_info);
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
+ struct igbvf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size, desc_len;
+
+ size = sizeof(struct igbvf_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info)
+ goto err;
+
+ desc_len = sizeof(union e1000_adv_rx_desc);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ rx_ring->adapter = adapter;
+
+ return 0;
+
+err:
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igbvf_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
+{
+ struct igbvf_adapter *adapter = tx_ring->adapter;
+ struct igbvf_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ igbvf_put_txbuf(adapter, buffer_info);
+ }
+
+ size = sizeof(struct igbvf_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * igbvf_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: ring to free resources from
+ *
+ * Free all transmit software resources
+ **/
+void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
+{
+ struct pci_dev *pdev = tx_ring->adapter->pdev;
+
+ igbvf_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igbvf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ **/
+static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
+{
+ struct igbvf_adapter *adapter = rx_ring->adapter;
+ struct igbvf_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ if (adapter->rx_ps_hdr_size){
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_hdr_size,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ }
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ if (buffer_info->page) {
+ if (buffer_info->page_dma)
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ buffer_info->page_dma = 0;
+ buffer_info->page_offset = 0;
+ }
+ }
+
+ size = sizeof(struct igbvf_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * igbvf_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
+{
+ struct pci_dev *pdev = rx_ring->adapter->pdev;
+
+ igbvf_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * igbvf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput. This functionality is controlled
+ * by the InterruptThrottleRate module parameter.
+ **/
+static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
+ u16 itr_setting, int packets,
+ int bytes)
+{
+ unsigned int retval = itr_setting;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000) {
+ retval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ retval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void igbvf_set_itr(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != adapter->itr) {
+ /*
+ * this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ adapter->rx_ring->itr_val = 1952;
+
+ if (adapter->msix_entries)
+ adapter->rx_ring->set_itr = 1;
+ else
+ ew32(ITR, 1952);
+ }
+}
+
+/**
+ * igbvf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * returns true if ring is completely cleaned
+ **/
+static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
+{
+ struct igbvf_adapter *adapter = tx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct igbvf_buffer *buffer_info;
+ struct sk_buff *skb;
+ union e1000_adv_tx_desc *tx_desc, *eop_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i, eop, count = 0;
+ bool cleaned = false;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ rmb(); /* read buffer_info after eop_desc status */
+ for (cleaned = false; !cleaned; count++) {
+ tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+ skb = buffer_info->skb;
+
+ if (skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ igbvf_put_txbuf(adapter, buffer_info);
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+ if (unlikely(count &&
+ netif_carrier_ok(netdev) &&
+ igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__IGBVF_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+ return count < tx_ring->count;
+}
+
+static irqreturn_t igbvf_msix_other(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->int_counter1++;
+
+ netif_carrier_off(netdev);
+ hw->mac.get_link_status = 1;
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ ew32(EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+
+
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+
+ /* auto mask will automatically reenable the interrupt when we write
+ * EICS */
+ if (!igbvf_clean_tx_irq(tx_ring))
+ /* Ring was not completely cleaned, so fire another interrupt */
+ ew32(EICS, tx_ring->eims_value);
+ else
+ ew32(EIMS, tx_ring->eims_value);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->int_counter0++;
+
+ /* Write the ITR value calculated at the end of the
+ * previous interrupt.
+ */
+ if (adapter->rx_ring->set_itr) {
+ writel(adapter->rx_ring->itr_val,
+ adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+ adapter->rx_ring->set_itr = 0;
+ }
+
+ if (napi_schedule_prep(&adapter->rx_ring->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->rx_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define IGBVF_NO_QUEUE -1
+
+static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
+ int tx_queue, int msix_vector)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ivar, index;
+
+ /* 82576 uses a table-based method for assigning vectors.
+ Each queue has a single entry in the table to which we write
+ a vector number along with a "valid" bit. Sadly, the layout
+ of the table is somewhat counterintuitive. */
+ if (rx_queue > IGBVF_NO_QUEUE) {
+ index = (rx_queue >> 1);
+ ivar = array_er32(IVAR0, index);
+ if (rx_queue & 0x1) {
+ /* vector goes into third byte of register */
+ ivar = ivar & 0xFF00FFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+ } else {
+ /* vector goes into low byte of register */
+ ivar = ivar & 0xFFFFFF00;
+ ivar |= msix_vector | E1000_IVAR_VALID;
+ }
+ adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+ array_ew32(IVAR0, index, ivar);
+ }
+ if (tx_queue > IGBVF_NO_QUEUE) {
+ index = (tx_queue >> 1);
+ ivar = array_er32(IVAR0, index);
+ if (tx_queue & 0x1) {
+ /* vector goes into high byte of register */
+ ivar = ivar & 0x00FFFFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+ } else {
+ /* vector goes into second byte of register */
+ ivar = ivar & 0xFFFF00FF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+ }
+ adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+ array_ew32(IVAR0, index, ivar);
+ }
+}
+
+/**
+ * igbvf_configure_msix - Configure MSI-X hardware
+ *
+ * igbvf_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void igbvf_configure_msix(struct igbvf_adapter *adapter)
+{
+ u32 tmp;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+ int vector = 0;
+
+ adapter->eims_enable_mask = 0;
+
+ igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
+ adapter->eims_enable_mask |= tx_ring->eims_value;
+ if (tx_ring->itr_val)
+ writel(tx_ring->itr_val,
+ hw->hw_addr + tx_ring->itr_register);
+ else
+ writel(1952, hw->hw_addr + tx_ring->itr_register);
+
+ igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
+ adapter->eims_enable_mask |= rx_ring->eims_value;
+ if (rx_ring->itr_val)
+ writel(rx_ring->itr_val,
+ hw->hw_addr + rx_ring->itr_register);
+ else
+ writel(1952, hw->hw_addr + rx_ring->itr_register);
+
+ /* set vector for other causes, i.e. link changes */
+
+ tmp = (vector++ | E1000_IVAR_VALID);
+
+ ew32(IVAR_MISC, tmp);
+
+ adapter->eims_enable_mask = (1 << (vector)) - 1;
+ adapter->eims_other = 1 << (vector - 1);
+ e1e_flush();
+}
+
+static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ }
+}
+
+/**
+ * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
+{
+ int err = -ENOMEM;
+ int i;
+
+ /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
+ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (i = 0; i < 3; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(adapter->pdev,
+ adapter->msix_entries, 3);
+ }
+
+ if (err) {
+ /* MSI-X failed */
+ dev_err(&adapter->pdev->dev,
+ "Failed to initialize MSI-X interrupts.\n");
+ igbvf_reset_interrupt_capability(adapter);
+ }
+}
+
+/**
+ * igbvf_request_msix - Initialize MSI-X interrupts
+ *
+ * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int igbvf_request_msix(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0, vector = 0;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
+ sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+ sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+ } else {
+ memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+ memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+ }
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+
+ adapter->tx_ring->itr_register = E1000_EITR(vector);
+ adapter->tx_ring->itr_val = 1952;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+
+ adapter->rx_ring->itr_register = E1000_EITR(vector);
+ adapter->rx_ring->itr_val = 1952;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igbvf_msix_other, 0, netdev->name, netdev);
+ if (err)
+ goto out;
+
+ igbvf_configure_msix(adapter);
+ return 0;
+out:
+ return err;
+}
+
+/**
+ * igbvf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+
+ adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+
+ return 0;
+}
+
+/**
+ * igbvf_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int igbvf_request_irq(struct igbvf_adapter *adapter)
+{
+ int err = -1;
+
+ /* igbvf supports msi-x only */
+ if (adapter->msix_entries)
+ err = igbvf_request_msix(adapter);
+
+ if (!err)
+ return err;
+
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate interrupt, Error: %d\n", err);
+
+ return err;
+}
+
+static void igbvf_free_irq(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int vector;
+
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < 3; vector++)
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ }
+}
+
+/**
+ * igbvf_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void igbvf_irq_disable(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(EIMC, ~0);
+
+ if (adapter->msix_entries)
+ ew32(EIAC, 0);
+}
+
+/**
+ * igbvf_irq_enable - Enable default interrupt generation settings
+ **/
+static void igbvf_irq_enable(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(EIAC, adapter->eims_enable_mask);
+ ew32(EIAM, adapter->eims_enable_mask);
+ ew32(EIMS, adapter->eims_enable_mask);
+}
+
+/**
+ * igbvf_poll - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @budget: amount of packets driver is allowed to process this poll
+ **/
+static int igbvf_poll(struct napi_struct *napi, int budget)
+{
+ struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
+ struct igbvf_adapter *adapter = rx_ring->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int work_done = 0;
+
+ igbvf_clean_rx_irq(adapter, &work_done, budget);
+
+ /* If not enough Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ if (adapter->itr_setting & 3)
+ igbvf_set_itr(adapter);
+
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ ew32(EIMS, adapter->rx_ring->eims_value);
+ }
+
+ return work_done;
+}
+
+/**
+ * igbvf_set_rlpml - set receive large packet maximum length
+ * @adapter: board private structure
+ *
+ * Configure the maximum size of packets that will be received
+ */
+static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
+{
+ int max_frame_size;
+ struct e1000_hw *hw = &adapter->hw;
+
+ max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
+ e1000_rlpml_set_vf(hw, max_frame_size);
+}
+
+static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.ops.set_vfta(hw, vid, true))
+ dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
+ else
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ igbvf_irq_disable(adapter);
+
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ igbvf_irq_enable(adapter);
+
+ if (hw->mac.ops.set_vfta(hw, vid, false))
+ dev_err(&adapter->pdev->dev,
+ "Failed to remove vlan id %d\n", vid);
+ else
+ clear_bit(vid, adapter->active_vlans);
+}
+
+static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ igbvf_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+/**
+ * igbvf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igbvf_configure_tx(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ u64 tdba;
+ u32 txdctl, dca_txctrl;
+
+ /* disable transmits */
+ txdctl = er32(TXDCTL(0));
+ ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+ e1e_flush();
+ msleep(10);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
+ tdba = tx_ring->dma;
+ ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
+ ew32(TDBAH(0), (tdba >> 32));
+ ew32(TDH(0), 0);
+ ew32(TDT(0), 0);
+ tx_ring->head = E1000_TDH(0);
+ tx_ring->tail = E1000_TDT(0);
+
+ /* Turn off Relaxed Ordering on head write-backs. The writebacks
+ * MUST be delivered in order or it will completely screw up
+ * our bookeeping.
+ */
+ dca_txctrl = er32(DCA_TXCTRL(0));
+ dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
+ ew32(DCA_TXCTRL(0), dca_txctrl);
+
+ /* enable transmits */
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ ew32(TXDCTL(0), txdctl);
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
+
+ /* enable Report Status bit */
+ adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
+}
+
+/**
+ * igbvf_setup_srrctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 srrctl = 0;
+
+ srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
+ E1000_SRRCTL_BSIZEHDR_MASK |
+ E1000_SRRCTL_BSIZEPKT_MASK);
+
+ /* Enable queue drop to avoid head of line blocking */
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ /* Setup buffer sizes */
+ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
+ E1000_SRRCTL_BSIZEPKT_SHIFT;
+
+ if (adapter->rx_buffer_len < 2048) {
+ adapter->rx_ps_hdr_size = 0;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ } else {
+ adapter->rx_ps_hdr_size = 128;
+ srrctl |= adapter->rx_ps_hdr_size <<
+ E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ }
+
+ ew32(SRRCTL(0), srrctl);
+}
+
+/**
+ * igbvf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igbvf_configure_rx(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+ u64 rdba;
+ u32 rdlen, rxdctl;
+
+ /* disable receives */
+ rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ e1e_flush();
+ msleep(10);
+
+ rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ rdba = rx_ring->dma;
+ ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
+ ew32(RDBAH(0), (rdba >> 32));
+ ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
+ rx_ring->head = E1000_RDH(0);
+ rx_ring->tail = E1000_RDT(0);
+ ew32(RDH(0), 0);
+ ew32(RDT(0), 0);
+
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= IGBVF_RX_PTHRESH;
+ rxdctl |= IGBVF_RX_HTHRESH << 8;
+ rxdctl |= IGBVF_RX_WTHRESH << 16;
+
+ igbvf_set_rlpml(adapter);
+
+ /* enable receives */
+ ew32(RXDCTL(0), rxdctl);
+}
+
+/**
+ * igbvf_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igbvf_set_multi(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list = NULL;
+ int i;
+
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+ if (!mta_list) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate multicast filter list\n");
+ return;
+ }
+ }
+
+ /* prepare a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
+ kfree(mta_list);
+}
+
+/**
+ * igbvf_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void igbvf_configure(struct igbvf_adapter *adapter)
+{
+ igbvf_set_multi(adapter->netdev);
+
+ igbvf_restore_vlan(adapter);
+
+ igbvf_configure_tx(adapter);
+ igbvf_setup_srrctl(adapter);
+ igbvf_configure_rx(adapter);
+ igbvf_alloc_rx_buffers(adapter->rx_ring,
+ igbvf_desc_unused(adapter->rx_ring));
+}
+
+/* igbvf_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+static void igbvf_reset(struct igbvf_adapter *adapter)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Allow time for pending master requests to run */
+ if (mac->ops.reset_hw(hw))
+ dev_err(&adapter->pdev->dev, "PF still resetting\n");
+
+ mac->ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+
+ adapter->last_reset = jiffies;
+}
+
+int igbvf_up(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ igbvf_configure(adapter);
+
+ clear_bit(__IGBVF_DOWN, &adapter->state);
+
+ napi_enable(&adapter->rx_ring->napi);
+ if (adapter->msix_entries)
+ igbvf_configure_msix(adapter);
+
+ /* Clear any pending interrupts. */
+ er32(EICR);
+ igbvf_irq_enable(adapter);
+
+ /* start the watchdog */
+ hw->mac.get_link_status = 1;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+
+ return 0;
+}
+
+void igbvf_down(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxdctl, txdctl;
+
+ /*
+ * signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer
+ */
+ set_bit(__IGBVF_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ txdctl = er32(TXDCTL(0));
+ ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+
+ /* flush both disables and wait for them to finish */
+ e1e_flush();
+ msleep(10);
+
+ napi_disable(&adapter->rx_ring->napi);
+
+ igbvf_irq_disable(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ netif_carrier_off(netdev);
+
+ /* record the stats before reset*/
+ igbvf_update_stats(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ igbvf_reset(adapter);
+ igbvf_clean_tx_ring(adapter->tx_ring);
+ igbvf_clean_rx_ring(adapter->rx_ring);
+}
+
+void igbvf_reinit_locked(struct igbvf_adapter *adapter)
+{
+ might_sleep();
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ msleep(1);
+ igbvf_down(adapter);
+ igbvf_up(adapter);
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+}
+
+/**
+ * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igbvf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ s32 rc;
+
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_ps_hdr_size = 0;
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ adapter->tx_int_delay = 8;
+ adapter->tx_abs_int_delay = 32;
+ adapter->rx_int_delay = 0;
+ adapter->rx_abs_int_delay = 8;
+ adapter->itr_setting = 3;
+ adapter->itr = 20000;
+
+ /* Set various function pointers */
+ adapter->ei->init_ops(&adapter->hw);
+
+ rc = adapter->hw.mac.ops.init_params(&adapter->hw);
+ if (rc)
+ return rc;
+
+ rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
+ if (rc)
+ return rc;
+
+ igbvf_set_interrupt_capability(adapter);
+
+ if (igbvf_alloc_queues(adapter))
+ return -ENOMEM;
+
+ spin_lock_init(&adapter->tx_queue_lock);
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igbvf_irq_disable(adapter);
+
+ spin_lock_init(&adapter->stats_lock);
+
+ set_bit(__IGBVF_DOWN, &adapter->state);
+ return 0;
+}
+
+static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->stats.last_gprc = er32(VFGPRC);
+ adapter->stats.last_gorc = er32(VFGORC);
+ adapter->stats.last_gptc = er32(VFGPTC);
+ adapter->stats.last_gotc = er32(VFGOTC);
+ adapter->stats.last_mprc = er32(VFMPRC);
+ adapter->stats.last_gotlbc = er32(VFGOTLBC);
+ adapter->stats.last_gptlbc = er32(VFGPTLBC);
+ adapter->stats.last_gorlbc = er32(VFGORLBC);
+ adapter->stats.last_gprlbc = er32(VFGPRLBC);
+
+ adapter->stats.base_gprc = er32(VFGPRC);
+ adapter->stats.base_gorc = er32(VFGORC);
+ adapter->stats.base_gptc = er32(VFGPTC);
+ adapter->stats.base_gotc = er32(VFGOTC);
+ adapter->stats.base_mprc = er32(VFMPRC);
+ adapter->stats.base_gotlbc = er32(VFGOTLBC);
+ adapter->stats.base_gptlbc = er32(VFGPTLBC);
+ adapter->stats.base_gorlbc = er32(VFGORLBC);
+ adapter->stats.base_gprlbc = er32(VFGPRLBC);
+}
+
+/**
+ * igbvf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int igbvf_open(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IGBVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+
+ /*
+ * before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so.
+ */
+ igbvf_configure(adapter);
+
+ err = igbvf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* From here on the code is the same as igbvf_up() */
+ clear_bit(__IGBVF_DOWN, &adapter->state);
+
+ napi_enable(&adapter->rx_ring->napi);
+
+ /* clear any pending interrupts */
+ er32(EICR);
+
+ igbvf_irq_enable(adapter);
+
+ /* start the watchdog */
+ hw->mac.get_link_status = 1;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ return 0;
+
+err_req_irq:
+ igbvf_free_rx_resources(adapter->rx_ring);
+err_setup_rx:
+ igbvf_free_tx_resources(adapter->tx_ring);
+err_setup_tx:
+ igbvf_reset(adapter);
+
+ return err;
+}
+
+/**
+ * igbvf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int igbvf_close(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
+ igbvf_down(adapter);
+
+ igbvf_free_irq(adapter);
+
+ igbvf_free_tx_resources(adapter->tx_ring);
+ igbvf_free_rx_resources(adapter->rx_ring);
+
+ return 0;
+}
+/**
+ * igbvf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igbvf_set_mac(struct net_device *netdev, void *p)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return 0;
+}
+
+#define UPDATE_VF_COUNTER(reg, name) \
+ { \
+ u32 current_counter = er32(reg); \
+ if (current_counter < adapter->stats.last_##name) \
+ adapter->stats.name += 0x100000000LL; \
+ adapter->stats.last_##name = current_counter; \
+ adapter->stats.name &= 0xFFFFFFFF00000000LL; \
+ adapter->stats.name |= current_counter; \
+ }
+
+/**
+ * igbvf_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+**/
+void igbvf_update_stats(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /*
+ * Prevent stats update while adapter is being reset, link is down
+ * or if the pci connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+
+ if (test_bit(__IGBVF_RESETTING, &adapter->state))
+ return;
+
+ if (pci_channel_offline(pdev))
+ return;
+
+ UPDATE_VF_COUNTER(VFGPRC, gprc);
+ UPDATE_VF_COUNTER(VFGORC, gorc);
+ UPDATE_VF_COUNTER(VFGPTC, gptc);
+ UPDATE_VF_COUNTER(VFGOTC, gotc);
+ UPDATE_VF_COUNTER(VFMPRC, mprc);
+ UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
+ UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
+ UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
+ UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.mprc;
+}
+
+static void igbvf_print_link_info(struct igbvf_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
+ adapter->link_speed,
+ ((adapter->link_duplex == FULL_DUPLEX) ?
+ "Full Duplex" : "Half Duplex"));
+}
+
+static bool igbvf_has_link(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val = E1000_SUCCESS;
+ bool link_active;
+
+ /* If interface is down, stay link down */
+ if (test_bit(__IGBVF_DOWN, &adapter->state))
+ return false;
+
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+
+ /* if check for link returns error we will need to reset */
+ if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
+ schedule_work(&adapter->reset_task);
+
+ return link_active;
+}
+
+/**
+ * igbvf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igbvf_watchdog(unsigned long data)
+{
+ struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void igbvf_watchdog_task(struct work_struct *work)
+{
+ struct igbvf_adapter *adapter = container_of(work,
+ struct igbvf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 link;
+ int tx_pending = 0;
+
+ link = igbvf_has_link(adapter);
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ mac->ops.get_link_up_info(&adapter->hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+ igbvf_print_link_info(adapter);
+
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ dev_info(&adapter->pdev->dev, "Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ }
+
+ if (netif_carrier_ok(netdev)) {
+ igbvf_update_stats(adapter);
+ } else {
+ tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
+ tx_ring->count);
+ if (tx_pending) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ ew32(EICS, adapter->rx_ring->eims_value);
+
+ /* Reset the timer */
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+}
+
+#define IGBVF_TX_FLAGS_CSUM 0x00000001
+#define IGBVF_TX_FLAGS_VLAN 0x00000002
+#define IGBVF_TX_FLAGS_TSO 0x00000004
+#define IGBVF_TX_FLAGS_IPV4 0x00000008
+#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
+
+static int igbvf_tso(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct igbvf_buffer *buffer_info;
+ u32 info = 0, tu_cmd = 0;
+ u32 mss_l4len_idx, l4len;
+ *hdr_len = 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "igbvf_tso returning an error\n");
+ return err;
+ }
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ i = tx_ring->next_to_use;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IGBVF_TX_FLAGS_VLAN)
+ info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
+ info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ info |= (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(info);
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+ context_desc->seqnum_seed = 0;
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = 0;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct igbvf_buffer *buffer_info;
+ u32 info = 0, tu_cmd = 0;
+
+ if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IGBVF_TX_FLAGS_VLAN)
+ info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
+
+ info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ info |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+
+ context_desc->vlan_macip_lens = cpu_to_le32(info);
+
+ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ context_desc->seqnum_seed = 0;
+ context_desc->mss_l4len_idx = 0;
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = 0;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ /* there is enough descriptors then we don't need to worry */
+ if (igbvf_desc_unused(adapter->tx_ring) >= size)
+ return 0;
+
+ netif_stop_queue(netdev);
+
+ smp_mb();
+
+ /* We need to check again just in case room has been made available */
+ if (igbvf_desc_unused(adapter->tx_ring) < size)
+ return -EBUSY;
+
+ netif_wake_queue(netdev);
+
+ ++adapter->restart_queue;
+ return 0;
+}
+
+#define IGBVF_MAX_TXD_PWR 16
+#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+
+static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ struct sk_buff *skb,
+ unsigned int first)
+{
+ struct igbvf_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int len = skb_headlen(skb);
+ unsigned int count = 0, i;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
+ buffer_info->length = len;
+ /* set time_stamp *before* dma to help avoid a possible race */
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = false;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ const struct skb_frag_struct *frag;
+
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = skb_frag_size(frag);
+
+ buffer_info = &tx_ring->buffer_info[i];
+ BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
+ buffer_info->length = len;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = true;
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+ }
+
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return ++count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed buffer_info mapping */
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ buffer_info->length = 0;
+ buffer_info->next_to_watch = 0;
+ buffer_info->mapped_as_page = false;
+ if (count)
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count--) {
+ if (i==0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ igbvf_put_txbuf(adapter, buffer_info);
+ }
+
+ return 0;
+}
+
+static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ int tx_flags, int count, u32 paylen,
+ u8 hdr_len)
+{
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igbvf_buffer *buffer_info;
+ u32 olinfo_status = 0, cmd_type_len;
+ unsigned int i;
+
+ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
+ E1000_ADVTXD_DCMD_DEXT);
+
+ if (tx_flags & IGBVF_TX_FLAGS_VLAN)
+ cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IGBVF_TX_FLAGS_TSO) {
+ cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+
+ /* insert tcp checksum */
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+ /* insert ip checksum */
+ if (tx_flags & IGBVF_TX_FLAGS_IPV4)
+ olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+
+ } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+}
+
+static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct igbvf_ring *tx_ring)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ unsigned int first, tx_flags = 0;
+ u8 hdr_len = 0;
+ int count = 0;
+ int tso = 0;
+
+ if (test_bit(__IGBVF_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * need: count + 4 desc gap to keep tail from touching
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for skb->data,
+ * + 1 desc for context descriptor,
+ * head, otherwise try next time
+ */
+ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= IGBVF_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IGBVF_TX_FLAGS_IPV4;
+
+ first = tx_ring->next_to_use;
+
+ tso = skb_is_gso(skb) ?
+ igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+ if (unlikely(tso < 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IGBVF_TX_FLAGS_TSO;
+ else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IGBVF_TX_FLAGS_CSUM;
+
+ /*
+ * count reflects descriptors mapped, if 0 then mapping error
+ * has occurred and we need to rewind the descriptor queue
+ */
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+
+ if (count) {
+ igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
+ skb->len, hdr_len);
+ /* Make sure there is space in the ring for the next send. */
+ igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct igbvf_ring *tx_ring;
+
+ if (test_bit(__IGBVF_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_ring = &adapter->tx_ring[0];
+
+ return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
+}
+
+/**
+ * igbvf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void igbvf_tx_timeout(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void igbvf_reset_task(struct work_struct *work)
+{
+ struct igbvf_adapter *adapter;
+ adapter = container_of(work, struct igbvf_adapter, reset_task);
+
+ igbvf_reinit_locked(adapter);
+}
+
+/**
+ * igbvf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * igbvf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ msleep(1);
+ /* igbvf_down has a dependency on max_frame_size */
+ adapter->max_frame_size = max_frame;
+ if (netif_running(netdev))
+ igbvf_down(adapter);
+
+ /*
+ * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * However with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
+
+ if (max_frame <= 1024)
+ adapter->rx_buffer_len = 1024;
+ else if (max_frame <= 2048)
+ adapter->rx_buffer_len = 2048;
+ else
+#if (PAGE_SIZE / 2) > 16384
+ adapter->rx_buffer_len = 16384;
+#else
+ adapter->rx_buffer_len = PAGE_SIZE / 2;
+#endif
+
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
+ ETH_FCS_LEN;
+
+ dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igbvf_up(adapter);
+ else
+ igbvf_reset(adapter);
+
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
+ igbvf_down(adapter);
+ igbvf_free_irq(adapter);
+ }
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int igbvf_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ u32 err;
+
+ pci_restore_state(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ if (netif_running(netdev)) {
+ err = igbvf_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ igbvf_reset(adapter);
+
+ if (netif_running(netdev))
+ igbvf_up(adapter);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void igbvf_shutdown(struct pci_dev *pdev)
+{
+ igbvf_suspend(pdev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void igbvf_netpoll(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+
+ igbvf_clean_tx_irq(adapter->tx_ring);
+
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * igbvf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igbvf_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igbvf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igbvf_resume routine.
+ */
+static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ igbvf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * igbvf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igbvf_resume routine.
+ */
+static void igbvf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (igbvf_up(adapter)) {
+ dev_err(&pdev->dev,
+ "can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+static void igbvf_print_device_info(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (hw->mac.type == e1000_vfadapt_i350)
+ dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
+ else
+ dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
+ dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
+}
+
+static int igbvf_set_features(struct net_device *netdev, u32 features)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (features & NETIF_F_RXCSUM)
+ adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
+ else
+ adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
+
+ return 0;
+}
+
+static const struct net_device_ops igbvf_netdev_ops = {
+ .ndo_open = igbvf_open,
+ .ndo_stop = igbvf_close,
+ .ndo_start_xmit = igbvf_xmit_frame,
+ .ndo_get_stats = igbvf_get_stats,
+ .ndo_set_rx_mode = igbvf_set_multi,
+ .ndo_set_mac_address = igbvf_set_mac,
+ .ndo_change_mtu = igbvf_change_mtu,
+ .ndo_do_ioctl = igbvf_ioctl,
+ .ndo_tx_timeout = igbvf_tx_timeout,
+ .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = igbvf_netpoll,
+#endif
+ .ndo_set_features = igbvf_set_features,
+};
+
+/**
+ * igbvf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igbvf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igbvf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit igbvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct igbvf_adapter *adapter;
+ struct e1000_hw *hw;
+ const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
+
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_regions(pdev, igbvf_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ hw = &adapter->hw;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ei = ei;
+ adapter->pba = ei->pba;
+ adapter->flags = ei->flags;
+ adapter->hw.back = adapter;
+ adapter->hw.mac.type = ei->mac;
+ adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ err = -EIO;
+ adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ if (!adapter->hw.hw_addr)
+ goto err_ioremap;
+
+ if (ei->get_variants) {
+ err = ei->get_variants(adapter);
+ if (err)
+ goto err_ioremap;
+ }
+
+ /* setup adapter struct */
+ err = igbvf_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* construct the net_device struct */
+ netdev->netdev_ops = &igbvf_netdev_ops;
+
+ igbvf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found++;
+
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ /*reset the controller to put the device in a known good state */
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address."
+ " Is the PF interface up?\n");
+ dev_hw_addr_random(adapter->netdev, hw->mac.addr);
+ } else {
+ err = hw->mac.ops.read_mac_addr(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Error reading MAC address\n");
+ goto err_hw_init;
+ }
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
+ err = -EIO;
+ goto err_hw_init;
+ }
+
+ setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->reset_task, igbvf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
+
+ /* ring size defaults */
+ adapter->rx_ring->count = 1024;
+ adapter->tx_ring->count = 1024;
+
+ /* reset the hardware with the new settings */
+ igbvf_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_hw_init;
+
+ /* tell the stack to leave us alone until igbvf_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ igbvf_print_device_info(adapter);
+
+ igbvf_initialize_last_counter_stats(adapter);
+
+ return 0;
+
+err_hw_init:
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_sw_init:
+ igbvf_reset_interrupt_capability(adapter);
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * igbvf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igbvf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit igbvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable it from being rescheduled.
+ */
+ set_bit(__IGBVF_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+
+ unregister_netdev(netdev);
+
+ igbvf_reset_interrupt_capability(adapter);
+
+ /*
+ * it is important to delete the napi struct prior to freeing the
+ * rx ring so that you do not end up with null pointer refs
+ */
+ netif_napi_del(&adapter->rx_ring->napi);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_regions(pdev);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers igbvf_err_handler = {
+ .error_detected = igbvf_io_error_detected,
+ .slot_reset = igbvf_io_slot_reset,
+ .resume = igbvf_io_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
+
+/* PCI Device API Driver */
+static struct pci_driver igbvf_driver = {
+ .name = igbvf_driver_name,
+ .id_table = igbvf_pci_tbl,
+ .probe = igbvf_probe,
+ .remove = __devexit_p(igbvf_remove),
+#ifdef CONFIG_PM
+ /* Power Management Hooks */
+ .suspend = igbvf_suspend,
+ .resume = igbvf_resume,
+#endif
+ .shutdown = igbvf_shutdown,
+ .err_handler = &igbvf_err_handler
+};
+
+/**
+ * igbvf_init_module - Driver Registration Routine
+ *
+ * igbvf_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igbvf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ igbvf_driver_string, igbvf_driver_version);
+ printk(KERN_INFO "%s\n", igbvf_copyright);
+
+ ret = pci_register_driver(&igbvf_driver);
+
+ return ret;
+}
+module_init(igbvf_init_module);
+
+/**
+ * igbvf_exit_module - Driver Exit Cleanup Routine
+ *
+ * igbvf_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit igbvf_exit_module(void)
+{
+ pci_unregister_driver(&igbvf_driver);
+}
+module_exit(igbvf_exit_module);
+
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* netdev.c */
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 77e18d3d6b1..77e18d3d6b1 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index af3822f9ea9..af3822f9ea9 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index d7ed58fcd9b..d7ed58fcd9b 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
index 0b20c5e62ff..0b20c5e62ff 100644
--- a/drivers/net/ixgb/Makefile
+++ b/drivers/net/ethernet/intel/ixgb/Makefile
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 49e8408f05f..cb23448fe2f 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -204,6 +204,8 @@ extern void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
+extern void ixgb_set_speed_duplex(struct net_device *netdev);
+
extern int ixgb_up(struct ixgb_adapter *adapter);
extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
extern void ixgb_reset(struct ixgb_adapter *adapter);
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
index 38b362b6785..2ed925f3881 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
@@ -559,7 +559,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
ENTER();
if (ixgb_check_and_get_eeprom_data(hw) == true) {
- for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
mac_addr[i] = ee_map->mac_addr[i];
}
pr_debug("eeprom mac address = %pM\n", mac_addr);
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
index 7ea12652f47..5680f64314b 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
@@ -31,8 +31,6 @@
#define IXGB_EEPROM_SIZE 64 /* Size in words */
-#define IXGB_ETH_LENGTH_OF_ADDRESS 6
-
/* EEPROM Commands */
#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
@@ -75,7 +73,7 @@
/* EEPROM structure */
struct ixgb_ee_map_type {
- u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
+ u8 mac_addr[ETH_ALEN];
__le16 compatibility;
__le16 reserved1[4];
__le32 pba_number;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 6da890b9534..9dfce7dff79 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -115,7 +115,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void ixgb_set_speed_duplex(struct net_device *netdev)
+void ixgb_set_speed_duplex(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
/* be optimistic about our link, since we were up before */
@@ -195,57 +195,6 @@ ixgb_set_pauseparam(struct net_device *netdev,
}
static u32
-ixgb_get_rx_csum(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- return adapter->rx_csum;
-}
-
-static int
-ixgb_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- adapter->rx_csum = data;
-
- if (netif_running(netdev)) {
- ixgb_down(adapter, true);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
- return 0;
-}
-
-static u32
-ixgb_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-static int
-ixgb_set_tx_csum(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-
-static int
-ixgb_set_tso(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= NETIF_F_TSO;
- else
- netdev->features &= ~NETIF_F_TSO;
- return 0;
-}
-
-static u32
ixgb_get_msglevel(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -543,12 +492,8 @@ ixgb_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = MAX_RXD;
ring->tx_max_pending = MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
static int
@@ -685,43 +630,6 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
-static int ixgb_set_flags(struct net_device *netdev, u32 data)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- bool need_reset;
- int rc;
-
- /*
- * Tx VLAN insertion does not work per HW design when Rx stripping is
- * disabled. Disable txvlan when rxvlan is turned off, and enable
- * rxvlan when txvlan is turned on.
- */
- if (!(data & ETH_FLAG_RXVLAN) &&
- (netdev->features & NETIF_F_HW_VLAN_TX))
- data &= ~ETH_FLAG_TXVLAN;
- else if (data & ETH_FLAG_TXVLAN)
- data |= ETH_FLAG_RXVLAN;
-
- need_reset = (data & ETH_FLAG_RXVLAN) !=
- (netdev->features & NETIF_F_HW_VLAN_RX);
-
- rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
- ETH_FLAG_TXVLAN);
- if (rc)
- return rc;
-
- if (need_reset) {
- if (netif_running(netdev)) {
- ixgb_down(adapter, true);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
- }
-
- return 0;
-}
-
static const struct ethtool_ops ixgb_ethtool_ops = {
.get_settings = ixgb_get_settings,
.set_settings = ixgb_set_settings,
@@ -736,20 +644,12 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.set_ringparam = ixgb_set_ringparam,
.get_pauseparam = ixgb_get_pauseparam,
.set_pauseparam = ixgb_set_pauseparam,
- .get_rx_csum = ixgb_get_rx_csum,
- .set_rx_csum = ixgb_set_rx_csum,
- .get_tx_csum = ixgb_get_tx_csum,
- .set_tx_csum = ixgb_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
.get_msglevel = ixgb_get_msglevel,
.set_msglevel = ixgb_set_msglevel,
- .set_tso = ixgb_set_tso,
.get_strings = ixgb_get_strings,
.set_phys_id = ixgb_set_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
- .get_flags = ethtool_op_get_flags,
- .set_flags = ixgb_set_flags,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index 3d61a9e4faf..99b69adb4a0 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -478,7 +478,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
ixgb_mta_set(hw, hash_value);
}
- mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
+ mca += ETH_ALEN + pad;
}
pr_debug("MC Update Complete\n");
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 873d32b89fb..2a99a35c33a 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -97,8 +97,6 @@ typedef enum {
ixgb_bus_width_64
} ixgb_bus_width;
-#define IXGB_ETH_LENGTH_OF_ADDRESS 6
-
#define IXGB_EEPROM_SIZE 64 /* Size in words */
#define SPEED_10000 10000
@@ -674,7 +672,7 @@ struct ixgb_hw {
u32 max_frame_size; /* Maximum frame size supported */
u32 mc_filter_type; /* Multicast filter hash type */
u32 num_mc_addrs; /* Number of current Multicast addrs */
- u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
+ u8 curr_mac_addr[ETH_ALEN]; /* Individual address currently programmed in MAC */
u32 num_tx_desc; /* Number of Transmit descriptors */
u32 num_rx_desc; /* Number of Receive descriptors */
u32 rx_buffer_size; /* Size of Receive buffer */
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 2a58847f46e..2a58847f46e 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 6a130eb51cf..e21148f8b16 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -325,12 +325,47 @@ ixgb_reset(struct ixgb_adapter *adapter)
}
}
+static u32
+ixgb_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Tx VLAN insertion does not work per HW design when Rx stripping is
+ * disabled.
+ */
+ if (!(features & NETIF_F_HW_VLAN_RX))
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int
+ixgb_set_features(struct net_device *netdev, u32 features)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
+ return 0;
+
+ adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
+
+ if (netif_running(netdev)) {
+ ixgb_down(adapter, true);
+ ixgb_up(adapter);
+ ixgb_set_speed_duplex(netdev);
+ } else
+ ixgb_reset(adapter);
+
+ return 0;
+}
+
+
static const struct net_device_ops ixgb_netdev_ops = {
.ndo_open = ixgb_open,
.ndo_stop = ixgb_close,
.ndo_start_xmit = ixgb_xmit_frame,
.ndo_get_stats = ixgb_get_stats,
- .ndo_set_multicast_list = ixgb_set_multi,
+ .ndo_set_rx_mode = ixgb_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgb_set_mac,
.ndo_change_mtu = ixgb_change_mtu,
@@ -340,6 +375,8 @@ static const struct net_device_ops ixgb_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgb_netpoll,
#endif
+ .ndo_fix_features = ixgb_fix_features,
+ .ndo_set_features = ixgb_set_features,
};
/**
@@ -439,12 +476,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- netdev->features = NETIF_F_SG |
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_TSO |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_RX;
+ netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_FILTER;
- netdev->features |= NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_RXCSUM;
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -1068,7 +1107,6 @@ ixgb_set_multi(struct net_device *netdev)
struct ixgb_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
u32 rctl;
- int i;
/* Check for Promiscuous and All Multicast modes */
@@ -1095,19 +1133,27 @@ ixgb_set_multi(struct net_device *netdev)
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
- u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
- IXGB_ETH_LENGTH_OF_ADDRESS];
+ u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
+ ETH_ALEN, GFP_ATOMIC);
+ u8 *addr;
+ if (!mta) {
+ pr_err("allocation of multicast memory failed\n");
+ goto alloc_failed;
+ }
IXGB_WRITE_REG(hw, RCTL, rctl);
- i = 0;
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
- ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+ addr = mta;
+ netdev_for_each_mc_addr(ha, netdev) {
+ memcpy(addr, ha->addr, ETH_ALEN);
+ addr += ETH_ALEN;
+ }
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
+ kfree(mta);
}
+alloc_failed:
if (netdev->features & NETIF_F_HW_VLAN_RX)
ixgb_vlan_strip_enable(adapter);
else
@@ -1337,11 +1383,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
- offset = frag->page_offset;
+ len = skb_frag_size(frag);
+ offset = 0;
while (len) {
i++;
@@ -1361,8 +1407,8 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- dma_map_page(&pdev->dev, frag->page,
- offset, size, DMA_TO_DEVICE);
+ skb_frag_dma_map(&pdev->dev, frag, offset, size,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
index e361185920e..8fc90519223 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
@@ -38,6 +38,7 @@
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/if_ether.h>
#undef ASSERT
#define ASSERT(x) BUG_ON(!(x))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index dd7fbeb1f7d..07d83ab46e2 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -267,7 +267,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
{ /* Transmit Descriptor Count */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_TXD),
@@ -286,7 +286,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Descriptor Count */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_RXD),
@@ -305,7 +305,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Checksum Offload Enable */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = enable_option,
.name = "Receive Checksum Offload",
.err = "defaulting to Enabled",
@@ -348,7 +348,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
}
{ /* Receive Flow Control High Threshold */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control High Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
@@ -367,7 +367,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control Low Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
@@ -386,7 +386,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Flow Control Pause Time Request",
.err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
@@ -416,7 +416,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
}
{ /* Receive Interrupt Delay */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
@@ -433,7 +433,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
}
{ /* Transmit Interrupt Delay */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
@@ -451,7 +451,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
{ /* Transmit Interrupt Delay Enable */
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = enable_option,
.name = "Tx Interrupt Delay Enable",
.err = "defaulting to Enabled",
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 7d7387fbdec..7d7387fbdec 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e04a8e49e6d..a8368d5cf68 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -53,6 +53,7 @@
/* TX/RX descriptor defines */
#define IXGBE_DEFAULT_TXD 512
+#define IXGBE_DEFAULT_TX_WORK 256
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
@@ -71,10 +72,13 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
-#define IXGBE_RXBUFFER_2048 2048
-#define IXGBE_RXBUFFER_4096 4096
-#define IXGBE_RXBUFFER_8192 8192
-#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_RXBUFFER_2K 2048
+#define IXGBE_RXBUFFER_3K 3072
+#define IXGBE_RXBUFFER_4K 4096
+#define IXGBE_RXBUFFER_7K 7168
+#define IXGBE_RXBUFFER_8K 8192
+#define IXGBE_RXBUFFER_15K 15360
+#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
@@ -91,13 +95,17 @@
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
+#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
+#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define IXGBE_MAX_RSC_INT_RATE 162760
@@ -108,6 +116,8 @@
#define MAX_EMULATION_MAC_ADDRS 16
#define IXGBE_MAX_PF_MACVLANS 15
#define VMDQ_P(p) ((p) + adapter->num_vfs)
+#define IXGBE_82599_VF_DEVICE_ID 0x10ED
+#define IXGBE_X540_VF_DEVICE_ID 0x1515
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -120,6 +130,9 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ u16 vlan_count;
+ u8 spoofchk_enabled;
+ struct pci_dev *vfdev;
};
struct vf_macvlans {
@@ -141,14 +154,14 @@ struct vf_macvlans {
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
+ union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- u16 length;
- u16 next_to_watch;
- unsigned int bytecount;
+ dma_addr_t dma;
+ u32 length;
+ u32 tx_flags;
+ struct sk_buff *skb;
+ u32 bytecount;
u16 gso_segs;
- u8 mapped_as_page;
};
struct ixgbe_rx_buffer {
@@ -206,6 +219,7 @@ enum ixbge_ring_state_t {
#define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
+ struct ixgbe_ring *next; /* pointer to next ring in q_vector */
void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */
struct net_device *netdev; /* netdev ring belongs to */
@@ -274,11 +288,7 @@ struct ixgbe_ring_feature {
} ____cacheline_internodealigned_in_smp;
struct ixgbe_ring_container {
-#if MAX_RX_QUEUES > MAX_TX_QUEUES
- DECLARE_BITMAP(idx, MAX_RX_QUEUES);
-#else
- DECLARE_BITMAP(idx, MAX_TX_QUEUES);
-#endif
+ struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
@@ -295,26 +305,29 @@ struct ixgbe_ring_container {
*/
struct ixgbe_q_vector {
struct ixgbe_adapter *adapter;
- unsigned int v_idx; /* index of q_vector within array, also used for
- * finding the bit in EICR and friends that
- * represents the vector for this ring */
#ifdef CONFIG_IXGBE_DCA
int cpu; /* CPU for DCA */
#endif
- struct napi_struct napi;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
struct ixgbe_ring_container rx, tx;
- u32 eitr;
+
+ struct napi_struct napi;
cpumask_var_t affinity_mask;
char name[IFNAMSIZ + 9];
};
-/* Helper macros to switch between ints/sec and what the register uses.
- * And yes, it's the same math going both ways. The lowest value
- * supported by all of the ixgbe hardware is 8.
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
*/
-#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
- ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
-#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
{
@@ -484,12 +497,13 @@ struct ixgbe_adapter {
u64 rsc_total_count;
u64 rsc_total_flush;
u32 wol;
- u16 eeprom_version;
+ u16 eeprom_verh;
+ u16 eeprom_verl;
+ u16 eeprom_cap;
int node;
u32 led_reg;
u32 interrupt_event;
- char lsc_int_name[IFNAMSIZ + 9];
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -498,11 +512,12 @@ struct ixgbe_adapter {
int vf_rate_link_speed;
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
- bool antispoofing_enabled;
struct hlist_head fdir_filter_list;
union ixgbe_atr_input fdir_mask;
int fdir_filter_count;
+ u32 timer_event_accumulator;
+ u32 vferr_refcount;
};
struct ixgbe_fdir_filter {
@@ -546,7 +561,7 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
-extern int ixgbe_up(struct ixgbe_adapter *adapter);
+extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 0d4e3826449..ef2afefb0cd 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -307,7 +307,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_cu_unknown:
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
default:
@@ -358,7 +357,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
- u32 rx_pba_size;
u32 link_speed = 0;
bool link_up;
@@ -461,16 +459,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
-
- reg = (rx_pba_size - hw->fc.low_water) << 6;
+ reg = hw->fc.low_water << 6;
if (hw->fc.send_xon)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
- reg = (rx_pba_size - hw->fc.high_water) << 6;
+ reg = hw->fc.high_water[packetbuf_num] << 6;
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -654,11 +649,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
(ixgbe_validate_link_ready(hw) != 0))
*link_up = false;
- /* if link is down, zero out the current_mode */
- if (*link_up == false) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = false;
- }
out:
return 0;
}
@@ -759,7 +749,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
u8 analog_val;
/* Call adapter stop to disable tx/rx and clear interrupts */
- hw->mac.ops.stop_adapter(hw);
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
/*
* Power up the Atlas Tx lanes if they are currently powered down.
@@ -802,26 +794,19 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
phy_status = hw->phy.ops.init(hw);
if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto reset_hw_out;
- else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto no_phy_reset;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
hw->phy.ops.reset(hw);
}
-no_phy_reset:
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests before reset
- */
- ixgbe_disable_pcie_master(hw);
-
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
*/
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
@@ -836,21 +821,18 @@ mac_reset_top:
hw_dbg(hw, "Reset polling failed to complete.\n");
}
+ msleep(50);
+
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete. We use 1usec since that is
- * what is needed for ixgbe_disable_pcie_master(). The second reset
- * then clears out any effects of those events.
+ * for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- udelay(1);
goto mac_reset_top;
}
- msleep(50);
-
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
@@ -1129,7 +1111,6 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
* physical layer because 10GBase-T PHYs use LMS = KX4/KX */
switch (hw->phy.type) {
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
MDIO_MMD_PMAPMD, &ext_ability);
@@ -1324,6 +1305,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
+ .write = &ixgbe_write_eeprom_generic,
+ .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
.read_buffer = &ixgbe_read_eerd_buffer_generic,
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 34f30ec79c2..4ae26a748da 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -217,10 +217,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
- case ixgbe_phy_aq:
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_generic;
- break;
default:
break;
}
@@ -340,7 +336,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_cu_unknown:
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
default:
@@ -361,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
@@ -904,14 +900,18 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
**/
static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
- s32 status = 0;
- u32 ctrl;
- u32 i;
- u32 autoc;
- u32 autoc2;
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl, i, autoc, autoc2;
+ bool link_up = false;
/* Call adapter stop to disable tx/rx and clear interrupts */
- hw->mac.ops.stop_adapter(hw);
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
/* PHY ops must be identified and initialized prior to reset */
@@ -934,48 +934,49 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
hw->phy.ops.reset(hw);
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests before reset
- */
- ixgbe_disable_pcie_master(hw);
-
mac_reset_top:
/*
- * Issue global reset to the MAC. This needs to be a SW reset.
- * If link reset is used, it might reset the MAC when mng is using it
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
*/
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
udelay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST))
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
break;
}
- if (ctrl & IXGBE_CTRL_RST) {
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
+ msleep(50);
+
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete. We use 1usec since that is
- * what is needed for ixgbe_disable_pcie_master(). The second reset
- * then clears out any effects of those events.
+ * for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- udelay(1);
goto mac_reset_top;
}
- msleep(50);
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1108,79 +1109,6 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer
- * @hw: pointer to hardware structure
- * @pballoc: which mode to allocate filters with
- **/
-static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc)
-{
- u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
- u32 current_rxpbsize = 0;
- int i;
-
- /* reserve space for Flow Director filters */
- switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_256K:
- fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT;
- break;
- case IXGBE_FDIR_PBALLOC_128K:
- fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT;
- break;
- case IXGBE_FDIR_PBALLOC_64K:
- fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT;
- break;
- case IXGBE_FDIR_PBALLOC_NONE:
- default:
- return IXGBE_ERR_PARAM;
- }
-
- /* determine current RX packet buffer size */
- for (i = 0; i < 8; i++)
- current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
-
- /* if there is already room for the filters do nothing */
- if (current_rxpbsize <= fdir_pbsize)
- return 0;
-
- if (current_rxpbsize > hw->mac.rx_pb_size) {
- /*
- * if rxpbsize is greater than max then HW max the Rx buffer
- * sizes are unconfigured or misconfigured since HW default is
- * to give the full buffer to each traffic class resulting in
- * the total size being buffer size 8x actual size
- *
- * This assumes no DCB since the RXPBSIZE registers appear to
- * be unconfigured.
- */
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize);
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- } else {
- /*
- * Since the Rx packet buffer appears to have already been
- * configured we need to shrink each packet buffer by enough
- * to make room for the filters. As such we take each rxpbsize
- * value and multiply it by a fraction representing the size
- * needed over the size we currently have.
- *
- * We need to reduce fdir_pbsize and current_rxpbsize to
- * 1/1024 of their original values in order to avoid
- * overflowing the u32 being used to store rxpbsize.
- */
- fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT;
- current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT;
- for (i = 0; i < 8; i++) {
- u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- rxpbsize *= fdir_pbsize;
- rxpbsize /= current_rxpbsize;
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
- }
- }
-
- return 0;
-}
-
-/**
* ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register
@@ -1227,13 +1155,6 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
**/
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- s32 err;
-
- /* Before enabling Flow Director, verify the Rx Packet Buffer size */
- err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
- if (err)
- return err;
-
/*
* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
@@ -1258,13 +1179,6 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
**/
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- s32 err;
-
- /* Before enabling Flow Director, verify the Rx Packet Buffer size */
- err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
- if (err)
- return err;
-
/*
* Continue setup of fdirctrl register bits:
* Turn perfect match filtering on
@@ -1886,7 +1800,6 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
&ext_ability);
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index fc1375f26fe..834f044be4c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -61,6 +61,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
+static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -225,8 +226,9 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_GORCH);
IXGBE_READ_REG(hw, IXGBE_GOTCL);
IXGBE_READ_REG(hw, IXGBE_GOTCH);
- for (i = 0; i < 8; i++)
- IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
IXGBE_READ_REG(hw, IXGBE_RUC);
IXGBE_READ_REG(hw, IXGBE_RFC);
IXGBE_READ_REG(hw, IXGBE_ROC);
@@ -495,7 +497,6 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
**/
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
- u32 number_of_queues;
u32 reg_val;
u16 i;
@@ -506,35 +507,35 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
hw->adapter_stopped = true;
/* Disable the receive unit */
- reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- reg_val &= ~(IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
- IXGBE_WRITE_FLUSH(hw);
- usleep_range(2000, 4000);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
- /* Clear interrupt mask to stop from interrupts being generated */
+ /* Clear interrupt mask to stop interrupts from being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
- /* Clear any pending interrupts */
+ /* Clear any pending interrupts, flush previous writes */
IXGBE_READ_REG(hw, IXGBE_EICR);
/* Disable the transmit unit. Each queue must be disabled. */
- number_of_queues = hw->mac.max_tx_queues;
- for (i = 0; i < number_of_queues; i++) {
- reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
- if (reg_val & IXGBE_TXDCTL_ENABLE) {
- reg_val &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
- }
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ reg_val |= IXGBE_RXDCTL_SWFLSH;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
}
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(1000, 2000);
+
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- ixgbe_disable_pcie_master(hw);
-
- return 0;
+ return ixgbe_disable_pcie_master(hw);
}
/**
@@ -1931,7 +1932,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
s32 ret_val = 0;
u32 mflcn_reg, fccfg_reg;
u32 reg;
- u32 rx_pba_size;
u32 fcrtl, fcrth;
#ifdef CONFIG_DCB
@@ -2011,11 +2011,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
-
- fcrth = (rx_pba_size - hw->fc.high_water) << 10;
- fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
+ fcrth = hw->fc.high_water[packetbuf_num] << 10;
+ fcrtl = hw->fc.low_water << 10;
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
fcrth |= IXGBE_FCRTH_FCEN;
@@ -2121,8 +2118,8 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*/
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
@@ -2292,7 +2289,9 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
* Validate the water mark configuration. Zero water marks are invalid
* because it causes the controller to just blast out fc packets.
*/
- if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+ if (!hw->fc.low_water ||
+ !hw->fc.high_water[packetbuf_num] ||
+ !hw->fc.pause_time) {
hw_dbg(hw, "Invalid water mark configuration\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
@@ -2457,75 +2456,57 @@ out:
* bit hasn't caused the master requests to be disabled, else 0
* is returned signifying master requests disabled.
**/
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- u32 i;
- u32 reg_val;
- u32 number_of_queues;
s32 status = 0;
- u16 dev_status = 0;
+ u32 i;
+ u16 value;
- /* Just jump out if bus mastering is already disabled */
+ /* Always set this bit to ensure any future transactions are blocked */
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+ /* Exit if master requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
- /* Disable the receive unit by stopping each queue */
- number_of_queues = hw->mac.max_rx_queues;
- for (i = 0; i < number_of_queues; i++) {
- reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- if (reg_val & IXGBE_RXDCTL_ENABLE) {
- reg_val &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
- }
- }
-
- reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
- reg_val |= IXGBE_CTRL_GIO_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
-
+ /* Poll for master request bit to clear */
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
- goto check_device_status;
udelay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
}
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
-check_device_status:
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
- &dev_status);
- if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
- break;
udelay(100);
+ pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
+ &value);
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
}
- if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
- hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- else
- goto out;
-
- /*
- * Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
- * being issued by our device. We then must wait 1usec for any
- * remaining completions from the PCIe bus to trickle in, and then reset
- * again to clear out any effects they may have had on our device.
- */
- hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
out:
return status;
}
-
/**
* ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
* @hw: pointer to hardware structure
@@ -3114,12 +3095,6 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- /* if link is down, zero out the current_mode */
- if (*link_up == false) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = false;
- }
-
return 0;
}
@@ -3366,7 +3341,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
{
u32 hicr, i;
@@ -3399,7 +3374,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, *((u32 *)buffer + i));
+ i, cpu_to_le32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3423,9 +3398,10 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
- for (i = 0; i < dword_len; i++)
- *((u32 *)buffer + i) =
- IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ le32_to_cpus(&buffer[i]);
+ }
/* If there is any thing in data position pull it in */
buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
@@ -3443,8 +3419,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
/* Pull in the rest of the buffer (i is where we left off)*/
for (; i < buf_len; i++)
- *((u32 *)buffer + i) =
- IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
out:
return ret_val;
@@ -3490,7 +3465,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
sizeof(fw_cmd));
if (ret_val != 0)
continue;
@@ -3508,3 +3483,44 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
out:
return ret_val;
}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+ u32 gcr_ext, hlreg0;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f24fd64a4c4..863f9c1f145 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -81,7 +81,6 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
@@ -101,6 +100,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 9d88c31487b..318caf4bf62 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -40,7 +40,8 @@
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
*/
-s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+ __u16 *max, int max_frame)
{
int min_percent = 100;
int min_credit, multiplier;
@@ -183,7 +184,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
*pfc_en = 0;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+ *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
}
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
@@ -230,6 +231,18 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
}
}
+void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
+{
+ int i, up;
+ unsigned long bitmap;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
+ for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
+ map[up] = i;
+ }
+}
+
/**
* ixgbe_dcb_hw_config - Config and enable DCB
* @hw: pointer to hardware structure
@@ -244,10 +257,9 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
u8 pfc_en;
u8 ptype[MAX_TRAFFIC_CLASS];
u8 bwgid[MAX_TRAFFIC_CLASS];
+ u8 prio_tc[MAX_TRAFFIC_CLASS];
u16 refill[MAX_TRAFFIC_CLASS];
u16 max[MAX_TRAFFIC_CLASS];
- /* CEE does not define a priority to tc mapping so map 1:1 */
- u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
/* Unpack CEE standard containers */
ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
@@ -255,6 +267,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
ixgbe_dcb_unpack_max(dcb_config, max);
ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+ ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -273,7 +286,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
int ret = -EINVAL;
@@ -283,7 +296,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
break;
default:
break;
@@ -291,6 +304,39 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
return ret;
}
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+{
+ __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+ __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+ int i;
+
+ /* naively give each TC a bwg to map onto CEE hardware */
+ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ /* Map TSA onto CEE prio type */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ prio_type[i] = 2;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ prio_type[i] = 0;
+ break;
+ default:
+ /* Hardware only supports priority strict or
+ * ETS transmission selection algorithms if
+ * we receive some other value from dcbnl
+ * throw an error
+ */
+ return -EINVAL;
+ }
+ }
+
+ ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+ return ixgbe_dcb_hw_ets_config(hw, refill, max,
+ bwg_id, prio_type, ets->prio_tc);
+}
+
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index e85826ae032..e162775064d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -29,13 +29,13 @@
#ifndef _DCB_CONFIG_H_
#define _DCB_CONFIG_H_
+#include <linux/dcbnl.h>
#include "ixgbe_type.h"
/* DCB data structures */
#define IXGBE_MAX_PACKET_BUFFERS 8
#define MAX_USER_PRIORITY 8
-#define MAX_TRAFFIC_CLASS 8
#define MAX_BW_GROUP 8
#define BW_PERCENT 100
@@ -145,16 +145,17 @@ void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
/* DCB credits calculation */
-s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 2288c3cac01..fcd0e479721 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -191,7 +191,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
- u32 reg, rx_pba_size;
+ u32 reg;
u8 i;
if (pfc_en) {
@@ -222,9 +222,8 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
int enabled = pfc_en & (1 << i);
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
- reg = (rx_pba_size - hw->fc.low_water) << 10;
+
+ reg = hw->fc.low_water << 10;
if (enabled == pfc_enabled_tx ||
enabled == pfc_enabled_full)
@@ -232,7 +231,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
- reg = (rx_pba_size - hw->fc.high_water) << 10;
+ reg = hw->fc.high_water[i] << 10;
if (enabled == pfc_enabled_tx ||
enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index 2f318935561..2f318935561 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index ade98200288..32cd97bc794 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -59,9 +59,9 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
- /* Map all traffic classes to their UP, 1 to 1 */
+ /* Map all traffic classes to their UP */
reg = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
@@ -169,9 +169,9 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
IXGBE_RTTPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
- /* Map all traffic classes to their UP, 1 to 1 */
+ /* Map all traffic classes to their UP */
reg = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
@@ -205,26 +205,44 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
* @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
- u32 i, reg, rx_pba_size;
+ u32 i, j, reg;
+ u8 max_tc = 0;
+
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
+ if (prio_tc[i] > max_tc)
+ max_tc = prio_tc[i];
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- int enabled = pfc_en & (1 << i);
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+ int enabled = 0;
+
+ if (i > max_tc) {
+ reg = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+ continue;
+ }
+
+ for (j = 0; j < MAX_USER_PRIORITY; j++) {
+ if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+ enabled = 1;
+ break;
+ }
+ }
- reg = (rx_pba_size - hw->fc.low_water) << 10;
+ reg = hw->fc.low_water << 10;
if (enabled)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
- reg = (rx_pba_size - hw->fc.high_water) << 10;
+ reg = hw->fc.high_water[i] << 10;
if (enabled)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
@@ -252,12 +270,24 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
reg &= ~IXGBE_MFLCN_RFCE;
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
- if (hw->mac.type == ixgbe_mac_X540)
+ if (hw->mac.type == ixgbe_mac_X540) {
+ reg &= ~IXGBE_MFLCN_RPFCE_MASK;
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+ }
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
} else {
+ /* X540 devices have a RX bit that should be cleared
+ * if PFC is disabled on all TCs but PFC features is
+ * enabled.
+ */
+ if (hw->mac.type == ixgbe_mac_X540) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ reg &= ~IXGBE_MFLCN_RPFCE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+ }
+
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
hw->mac.ops.fc_enable(hw, i);
}
@@ -338,7 +368,7 @@ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
- ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+ ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
ixgbe_dcb_config_tc_stats_82599(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index 08d1749862a..a59d5dc59d0 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -93,7 +93,7 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 0ace6ce1d0b..3631d639d86 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -114,53 +114,19 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
u8 err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ /* Fail command if not in CEE mode */
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
/* verify there is something to do, if not then exit */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
- if (state > 0) {
- /* Turn on DCB */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- e_err(drv, "Enable failed, needs MSI-X\n");
- err = 1;
- goto out;
- }
-
- adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB:
- adapter->last_lfc_mode = adapter->hw.fc.current_mode;
- adapter->hw.fc.requested_mode = ixgbe_fc_none;
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- break;
- default:
- break;
- }
-
- ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
- } else {
- /* Turn off DCB */
- adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
- adapter->temp_dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- break;
- default:
- break;
- }
- ixgbe_setup_tc(netdev, 0);
- }
+ if (state > 0)
+ err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
+ else
+ err = ixgbe_setup_tc(netdev, 0);
-out:
return err;
}
@@ -192,6 +158,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ /* Abort a bad configuration */
+ if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return;
+
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -212,6 +182,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_TX;
+
+ if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
+ adapter->dcb_set_bitmap |= BIT_PFC;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -232,6 +206,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ /* Abort bad configurations */
+ if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return;
+
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -252,6 +230,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_RX;
+
+ if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
+ adapter->dcb_set_bitmap |= BIT_PFC;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -330,22 +312,34 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int ret;
+ int ret, i;
#ifdef IXGBE_FCOE
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
- u8 up = dcb_getapp(netdev, &app);
+ u8 up;
+
+ /* In IEEE mode, use the IEEE Ethertype selector value */
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ up = dcb_getapp(netdev, &app);
+ }
#endif
+ /* Fail command if not in CEE mode */
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
MAX_TRAFFIC_CLASS);
if (ret)
return DCB_NO_HW_CHG;
#ifdef IXGBE_FCOE
- if (up && (up != (1 << adapter->fcoe.up)))
+ if (up && !(up & (1 << adapter->fcoe.up)))
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
/*
@@ -400,21 +394,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
#endif
- if (adapter->dcb_set_bitmap & BIT_PFC) {
- u8 pfc_en;
- ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
- ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
- ret = DCB_HW_CHG;
- }
-
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
/* Priority to TC mapping in CEE case default to 1:1 */
- u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
+ u8 prio_tc[MAX_USER_PRIORITY];
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-#ifdef CONFIG_FCOE
+#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
@@ -431,9 +418,25 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
DCB_TX_CONFIG, bwg_id);
ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
DCB_TX_CONFIG, prio_type);
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_tc);
ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
bwg_id, prio_type, prio_tc);
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
+ }
+
+ if (adapter->dcb_set_bitmap & BIT_PFC) {
+ u8 pfc_en;
+ u8 prio_tc[MAX_USER_PRIORITY];
+
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_tc);
+ ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
+ ret = DCB_HW_CHG;
}
if (adapter->dcb_cfg.pfc_mode_enable)
@@ -490,10 +493,10 @@ static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
- *num = MAX_TRAFFIC_CLASS;
+ *num = adapter->dcb_cfg.num_tcs.pg_tcs;
break;
case DCB_NUMTCS_ATTR_PFC:
- *num = MAX_TRAFFIC_CLASS;
+ *num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
rval = -EINVAL;
@@ -562,7 +565,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
if (!my_ets)
return -EINVAL;
- ets->ets_cap = MAX_TRAFFIC_CLASS;
+ ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
@@ -575,13 +578,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
- __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int i, err;
- __u64 *p = (__u64 *) ets->prio_tc;
- /* naively give each TC a bwg to map onto CEE hardware */
- __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int i;
+ __u8 max_tc = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
@@ -595,34 +594,24 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
- /* Map TSA onto CEE prio type */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- switch (ets->tc_tsa[i]) {
- case IEEE_8021QAZ_TSA_STRICT:
- prio_type[i] = 2;
- break;
- case IEEE_8021QAZ_TSA_ETS:
- prio_type[i] = 0;
- break;
- default:
- /* Hardware only supports priority strict or
- * ETS transmission selection algorithms if
- * we receive some other value from dcbnl
- * throw an error
- */
- return -EINVAL;
- }
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
}
- if (*p)
- ixgbe_dcbnl_set_state(dev, 1);
- else
- ixgbe_dcbnl_set_state(dev, 0);
+ if (max_tc)
+ max_tc++;
- ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
- err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
- bwg_id, prio_type, ets->prio_tc);
- return err;
+ if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return -EINVAL;
+
+ if (max_tc != netdev_get_num_tc(dev))
+ ixgbe_setup_tc(dev, max_tc);
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
+
+ return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
}
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
@@ -636,7 +625,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
if (!my_pfc)
return -EINVAL;
- pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+ pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
pfc->pfc_en = my_pfc->pfc_en;
pfc->mbc = my_pfc->mbc;
pfc->delay = my_pfc->delay;
@@ -653,7 +642,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err;
+ u8 *prio_tc;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
@@ -665,9 +654,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return -ENOMEM;
}
+ prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
- err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
- return err;
+ return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
}
#ifdef IXGBE_FCOE
@@ -778,7 +767,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
*/
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
- ixgbe_dcbnl_set_state(dev, 0);
+ ixgbe_setup_tc(dev, 0);
}
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 82d4244c6e1..70d58c3849b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -113,6 +113,8 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
+ {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
+ {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
#endif /* IXGBE_FCOE */
@@ -324,12 +326,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
- /* 10000/copper and 1000/copper must autoneg
- * this function does not support any duplex forcing, but can
- * limit the advertising of the adapter to only 10000 or 1000 */
+ /*
+ * this function does not support duplex forcing, but can
+ * limit the advertising of the adapter to the specified speed
+ */
if (ecmd->autoneg == AUTONEG_DISABLE)
return -EINVAL;
+ if (ecmd->advertising & ~ecmd->supported)
+ return -EINVAL;
+
old = hw->phy.autoneg_advertised;
advertised = 0;
if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -368,13 +374,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- /*
- * Flow Control Autoneg isn't on if
- * - we didn't ask for it OR
- * - it failed, we know this by tx & rx being off
- */
- if (hw->fc.disable_fc_autoneg ||
- (hw->fc.current_mode == ixgbe_fc_none))
+ if (hw->fc.disable_fc_autoneg)
pause->autoneg = 0;
else
pause->autoneg = 1;
@@ -456,7 +456,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1128
+#define IXGBE_REGS_LEN 1129
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -525,6 +525,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -766,6 +767,9 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
+
+ /* 82599 X540 specific registers */
+ regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -810,26 +814,97 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
return ret_val;
}
+static int ixgbe_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EINVAL;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /*
+ * need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
+ ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
+ if (ret_val)
+ goto err;
+
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1) {
+ /*
+ * need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
+ ret_val = hw->eeprom.ops.read(hw, last_word,
+ &eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto err;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ cpu_to_le16s(&eeprom_buff[i]);
+
+ ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+
+ /* Update the checksum */
+ if (ret_val == 0)
+ hw->eeprom.ops.update_checksum(hw);
+
+err:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
+ u32 nvm_track_id;
strncpy(drvinfo->driver, ixgbe_driver_name,
sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version) - 1);
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
- (adapter->eeprom_version & 0xF000) >> 12,
- (adapter->eeprom_version & 0x0FF0) >> 4,
- adapter->eeprom_version & 0x000F);
+ nvm_track_id = (adapter->eeprom_verh << 16) |
+ adapter->eeprom_verl;
+ snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+ nvm_track_id);
strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version));
+ sizeof(drvinfo->fw_version) - 1);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info));
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -844,12 +919,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
static int ixgbe_set_ringparam(struct net_device *netdev,
@@ -1535,7 +1606,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
- rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
+ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
rx_ring->numa_node = adapter->node;
err = ixgbe_setup_rx_resources(rx_ring);
@@ -1566,26 +1637,26 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
/* X540 needs to set the MACC.FLU bit to force link up */
if (adapter->hw.mac.type == ixgbe_mac_X540) {
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
+ reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
}
/* right now we only support MAC loopback in the driver */
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* Setup MAC loopback */
reg_data |= IXGBE_HLREG0_LPBK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
+ reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
reg_data &= ~IXGBE_AUTOC_LMS_MASK;
reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
- IXGBE_WRITE_FLUSH(&adapter->hw);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
+ IXGBE_WRITE_FLUSH(hw);
usleep_range(10000, 20000);
/* Disable Atlas Tx lanes; re-enabled in reset path */
@@ -1883,6 +1954,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
{
struct ixgbe_hw *hw = &adapter->hw;
int retval = 1;
+ u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
/* WOL not supported except for the following */
switch(hw->device_id) {
@@ -1906,6 +1978,18 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
case IXGBE_DEV_ID_82599_KX4:
retval = 0;
break;
+ case IXGBE_DEV_ID_X540T:
+ /* check eeprom to see if enabled wol */
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0))) {
+ retval = 0;
+ break;
+ }
+
+ /* All others not supported */
+ wol->supported = 0;
+ break;
default:
wol->supported = 0;
}
@@ -2008,39 +2092,20 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
/* only valid if in constant ITR mode */
- switch (adapter->rx_itr_setting) {
- case 0:
- /* throttling disabled */
- ec->rx_coalesce_usecs = 0;
- break;
- case 1:
- /* dynamic ITR mode */
- ec->rx_coalesce_usecs = 1;
- break;
- default:
- /* fixed interrupt rate mode */
- ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
- break;
- }
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
/* if in mixed tx/rx queues per vector mode, report only rx settings */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
return 0;
/* only valid if in constant ITR mode */
- switch (adapter->tx_itr_setting) {
- case 0:
- /* throttling disabled */
- ec->tx_coalesce_usecs = 0;
- break;
- case 1:
- /* dynamic ITR mode */
- ec->tx_coalesce_usecs = 1;
- break;
- default:
- ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
- break;
- }
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
return 0;
}
@@ -2059,10 +2124,9 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
/* if interrupt rate is too high then disable RSC */
if (ec->rx_coalesce_usecs != 1 &&
- ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
+ ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) {
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- e_info(probe, "rx-usecs set too low, "
- "disabling RSC\n");
+ e_info(probe, "rx-usecs set too low, disabling RSC\n");
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
return true;
}
@@ -2070,8 +2134,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
/* check the feature flag value and enable RSC if necessary */
if ((netdev->features & NETIF_F_LRO) &&
!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
- e_info(probe, "rx-usecs set to %d, "
- "re-enabling RSC\n",
+ e_info(probe, "rx-usecs set to %d, re-enabling RSC\n",
ec->rx_coalesce_usecs);
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
return true;
@@ -2086,97 +2149,59 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
+ int num_vectors;
+ u16 tx_itr_param, rx_itr_param;
bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
+ && ec->tx_coalesce_usecs)
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
- if (ec->rx_coalesce_usecs > 1) {
- /* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
- (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
- return -EINVAL;
-
- /* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter, ec);
-
- /* store the value in ints/second */
- adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
- /* static value of interrupt rate */
- adapter->rx_itr_setting = adapter->rx_eitr_param;
- /* clear the lower bit as its used for dynamic state */
- adapter->rx_itr_setting &= ~1;
- } else if (ec->rx_coalesce_usecs == 1) {
- /* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter, ec);
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
- /* 1 means dynamic mode */
- adapter->rx_eitr_param = 20000;
- adapter->rx_itr_setting = 1;
- } else {
- /* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter, ec);
- /*
- * any other value means disable eitr, which is best
- * served by setting the interrupt rate very high
- */
- adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
- adapter->rx_itr_setting = 0;
- }
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
- if (ec->tx_coalesce_usecs > 1) {
- /*
- * don't have to worry about max_int as above because
- * tx vectors don't do hardware RSC (an rx function)
- */
- /* check the limits */
- if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
- (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
- return -EINVAL;
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
- /* store the value in ints/second */
- adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
- /* static value of interrupt rate */
- adapter->tx_itr_setting = adapter->tx_eitr_param;
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
- /* clear the lower bit as its used for dynamic state */
- adapter->tx_itr_setting &= ~1;
- } else if (ec->tx_coalesce_usecs == 1) {
- /* 1 means dynamic mode */
- adapter->tx_eitr_param = 10000;
- adapter->tx_itr_setting = 1;
- } else {
- adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
- adapter->tx_itr_setting = 0;
- }
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_vectors = 1;
- /* MSI/MSIx Interrupt Mode */
- if (adapter->flags &
- (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
- int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- for (i = 0; i < num_vectors; i++) {
- q_vector = adapter->q_vector[i];
- if (q_vector->tx.count && !q_vector->rx.count)
- /* tx only */
- q_vector->eitr = adapter->tx_eitr_param;
- else
- /* rx only or mixed */
- q_vector->eitr = adapter->rx_eitr_param;
- q_vector->tx.work_limit = adapter->tx_work_limit;
- ixgbe_write_eitr(q_vector);
- }
- /* Legacy Interrupt Mode */
- } else {
- q_vector = adapter->q_vector[0];
- q_vector->eitr = adapter->rx_eitr_param;
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
ixgbe_write_eitr(q_vector);
}
@@ -2279,11 +2304,13 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
cnt++;
}
+ cmd->rule_cnt = cnt;
+
return 0;
}
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- void *rule_locs)
+ u32 *rule_locs)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -2301,8 +2328,7 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
- (u32 *)rule_locs);
+ ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
default:
break;
@@ -2569,6 +2595,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
.get_pauseparam = ixgbe_get_pauseparam,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 824edae7786..df3b1be69d8 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -145,6 +145,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
struct pci_pool *pool;
+ unsigned int cpu;
if (!netdev || !sgl)
return 0;
@@ -182,7 +183,8 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
/* alloc the udl from per cpu ddp pool */
- pool = *per_cpu_ptr(fcoe->pool, get_cpu());
+ cpu = get_cpu();
+ pool = *per_cpu_ptr(fcoe->pool, cpu);
ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
@@ -199,9 +201,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
- e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
- "not enough descriptors\n",
- xid, i, j, dmacount, (u64)addr);
+ *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
goto out_noddp_free;
}
@@ -241,10 +241,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
*/
if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) {
- e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
- "not enough user buffers. We need an extra "
- "buffer because lastsize is bufflen.\n",
- xid, i, j, dmacount, (u64)addr);
+ *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
goto out_noddp_free;
}
@@ -438,7 +435,6 @@ ddp_out:
/**
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
- * @adapter: ixgbe adapter
* @tx_ring: tx desc ring
* @skb: associated skb
* @tx_flags: tx flags
@@ -599,6 +595,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ unsigned int cpu;
if (!fcoe->pool) {
spin_lock_init(&fcoe->lock);
@@ -626,6 +623,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err(drv, "failed to map extra DDP buffer\n");
goto out_extra_ddp_buffer;
}
+
+ /* Alloc per cpu mem to count the ddp alloc failure number */
+ fcoe->pcpu_noddp = alloc_percpu(u64);
+ if (!fcoe->pcpu_noddp) {
+ e_err(drv, "failed to alloc noddp counter\n");
+ goto out_pcpu_noddp_alloc_fail;
+ }
+
+ fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
+ if (!fcoe->pcpu_noddp_ext_buff) {
+ e_err(drv, "failed to alloc noddp extra buff cnt\n");
+ goto out_pcpu_noddp_extra_buff_alloc_fail;
+ }
+
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
+ *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
+ }
}
/* Enable L2 eth type filter for FCoE */
@@ -660,12 +675,16 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
- IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
- IXGBE_FCRXCTRL_FCOELLI |
- IXGBE_FCRXCTRL_FCCRCBO |
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
return;
-
+out_pcpu_noddp_extra_buff_alloc_fail:
+ free_percpu(fcoe->pcpu_noddp);
+out_pcpu_noddp_alloc_fail:
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
out_extra_ddp_buffer:
kfree(fcoe->extra_ddp_buffer);
out_ddp_pools:
@@ -694,6 +713,8 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
+ free_percpu(fcoe->pcpu_noddp);
+ free_percpu(fcoe->pcpu_noddp_ext_buff);
kfree(fcoe->extra_ddp_buffer);
ixgbe_fcoe_ddp_pools_free(fcoe);
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 99de145e290..261fd62dda1 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -73,6 +73,8 @@ struct ixgbe_fcoe {
unsigned char *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
+ u64 __percpu *pcpu_noddp;
+ u64 __percpu *pcpu_noddp_ext_buff;
#ifdef CONFIG_IXGBE_DCB
u8 up;
#endif
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e1fcc958927..09b8e88b299 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -42,6 +42,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -55,8 +56,8 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
#define MAJ 3
-#define MIN 4
-#define BUILD 8
+#define MIN 6
+#define BUILD 7
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
@@ -78,59 +79,33 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
- board_X540 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
- board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS),
- board_82599 },
-
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
/* required last entry */
{0, }
};
@@ -160,42 +135,6 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr;
- u32 gpie;
- u32 vmdctl;
-
-#ifdef CONFIG_PCI_IOV
- /* disable iov and allow time for transactions to clear */
- pci_disable_sriov(adapter->pdev);
-#endif
-
- /* turn off device IOV mode */
- gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr &= ~(IXGBE_GCR_EXT_SRIOV);
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* set default pool back to 0 */
- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* take a breather then clean up driver data */
- msleep(100);
-
- kfree(adapter->vfinfo);
- adapter->vfinfo = NULL;
-
- adapter->num_vfs = 0;
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
-}
-
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -384,7 +323,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
tx_ring = adapter->tx_ring[n];
tx_buffer_info =
&tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)tx_buffer_info->dma,
tx_buffer_info->length,
@@ -423,7 +362,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
tx_buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %3X %016llX %p", i,
+ " %04X %p %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)tx_buffer_info->dma,
@@ -525,7 +464,7 @@ rx_ring_summary:
rx_ring->rx_buf_len, true);
if (rx_ring->rx_buf_len
- < IXGBE_RXBUFFER_2048)
+ < IXGBE_RXBUFFER_2K)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
phys_to_virt(
@@ -642,27 +581,31 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *tx_buffer_info)
+static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
+ struct ixgbe_tx_buffer *tx_buffer)
{
- if (tx_buffer_info->dma) {
- if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
- DMA_TO_DEVICE);
+ if (tx_buffer->dma) {
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else
- dma_unmap_single(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
- DMA_TO_DEVICE);
- tx_buffer_info->dma = 0;
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
}
- if (tx_buffer_info->skb) {
+ tx_buffer->dma = 0;
+}
+
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
+{
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info->skb)
dev_kfree_skb_any(tx_buffer_info->skb);
- tx_buffer_info->skb = NULL;
- }
- tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->skb = NULL;
/* tx_buffer_info must be completely set up in the transmit path */
}
@@ -796,56 +739,72 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
- struct ixgbe_tx_buffer *tx_buffer_info;
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
- u16 i, eop, count = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ u16 i = tx_ring->next_to_clean;
- i = tx_ring->next_to_clean;
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
- while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < q_vector->tx.work_limit)) {
- bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
- for ( ; !cleaned; count++) {
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ for (; budget; budget--) {
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ /* count the packet as being completed */
+ tx_ring->tx_stats.completed++;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+ /* prevent any other reads prior to eop_desc being verified */
+ rmb();
+
+ do {
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
tx_desc->wb.status = 0;
- cleaned = (i == eop);
+ if (likely(tx_desc == eop_desc)) {
+ eop_desc = NULL;
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+ }
+
+ tx_buffer++;
+ tx_desc++;
i++;
- if (i == tx_ring->count)
+ if (unlikely(i == tx_ring->count)) {
i = 0;
- if (cleaned && tx_buffer_info->skb) {
- total_bytes += tx_buffer_info->bytecount;
- total_packets += tx_buffer_info->gso_segs;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
}
- ixgbe_unmap_and_free_tx_resource(tx_ring,
- tx_buffer_info);
- }
-
- tx_ring->tx_stats.completed++;
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+ } while (eop_desc);
}
tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
+ u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
struct ixgbe_hw *hw = &adapter->hw;
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
@@ -857,8 +816,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
- tx_ring->next_to_use, eop,
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ tx_ring->next_to_use, i,
+ tx_ring->tx_buffer_info[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -874,7 +833,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
@@ -887,7 +846,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
}
- return count < q_vector->tx.work_limit;
+ return !!budget;
}
#ifdef CONFIG_IXGBE_DCA
@@ -903,12 +862,12 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
break;
default:
@@ -932,7 +891,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= dca3_get_tag(tx_ring->dev, cpu);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
break;
@@ -940,7 +899,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
@@ -953,26 +912,17 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
int cpu = get_cpu();
- long r_idx;
- int i;
if (q_vector->cpu == cpu)
goto out_no_update;
- r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->tx.count; i++) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
- r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
- r_idx + 1);
- }
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_update_tx_dca(adapter, ring, cpu);
- r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rx.count; i++) {
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
- r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
- r_idx + 1);
- }
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_update_rx_dca(adapter, ring, cpu);
q_vector->cpu = cpu;
out_no_update:
@@ -1285,9 +1235,9 @@ static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
IXGBE_RXDADV_RSCCNT_MASK);
}
-static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
- int *work_done, int work_to_do)
+ int budget)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
@@ -1376,7 +1326,7 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->len += upper_len;
skb->data_len += upper_len;
- skb->truesize += upper_len;
+ skb->truesize += PAGE_SIZE / 2;
}
i++;
@@ -1467,11 +1417,11 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ budget--;
next_desc:
rx_desc->wb.upper.status_error = 0;
- (*work_done)++;
- if (*work_done >= work_to_do)
+ if (!budget)
break;
/* return some buffers to hardware, one at a time is too slow */
@@ -1512,9 +1462,10 @@ next_desc:
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
+
+ return !!budget;
}
-static int ixgbe_clean_rxonly(struct napi_struct *, int);
/**
* ixgbe_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -1525,61 +1476,46 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int i, q_vectors, v_idx, r_idx;
+ int q_vectors, v_idx;
u32 mask;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ /* Populate MSIX to EITR Select */
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
+
/*
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rx.idx,
- adapter->num_rx_queues);
-
- for (i = 0; i < q_vector->rx.count; i++) {
- u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
- r_idx = find_next_bit(q_vector->rx.idx,
- adapter->num_rx_queues,
- r_idx + 1);
- }
- r_idx = find_first_bit(q_vector->tx.idx,
- adapter->num_tx_queues);
-
- for (i = 0; i < q_vector->tx.count; i++) {
- u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
- r_idx = find_next_bit(q_vector->tx.idx,
- adapter->num_tx_queues,
- r_idx + 1);
- }
- if (q_vector->tx.count && !q_vector->rx.count)
- /* tx only */
- q_vector->eitr = adapter->tx_eitr_param;
- else if (q_vector->rx.count)
- /* rx or mixed */
- q_vector->eitr = adapter->rx_eitr_param;
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
- ixgbe_write_eitr(q_vector);
- /* If ATR is enabled, set interrupt affinity */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- /*
- * Allocate the affinity_hint cpumask, assign the mask
- * for this vector, and set our affinity_hint for
- * this irq.
- */
- if (!alloc_cpumask_var(&q_vector->affinity_mask,
- GFP_KERNEL))
- return;
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
- irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
- q_vector->affinity_mask);
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
}
+
+ ixgbe_write_eitr(q_vector);
}
switch (adapter->hw.mac.type) {
@@ -1591,7 +1527,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
-
default:
break;
}
@@ -1599,12 +1534,10 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- if (adapter->num_vfs)
- mask &= ~(IXGBE_EIMS_OTHER |
- IXGBE_EIMS_MAILBOX |
- IXGBE_EIMS_LSC);
- else
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -1649,7 +1582,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/q_vector->eitr;
+ timepassed_us = q_vector->itr >> 2;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
@@ -1690,7 +1623,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
int v_idx = q_vector->v_idx;
- u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
+ u32 itr_reg = q_vector->itr;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
@@ -1700,15 +1633,6 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/*
- * 82599 and X540 can support a value of zero, so allow it for
- * max interrupt rate, but there is an errata where it can
- * not be zero with RSC
- */
- if (itr_reg == 8 &&
- !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
- itr_reg = 0;
-
- /*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
@@ -1722,7 +1646,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{
- u32 new_itr = q_vector->eitr;
+ u32 new_itr = q_vector->itr;
u8 current_itr;
ixgbe_update_itr(q_vector, &q_vector->tx);
@@ -1733,24 +1657,25 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 100000;
+ new_itr = IXGBE_100K_ITR;
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
- new_itr = 8000;
+ new_itr = IXGBE_8K_ITR;
break;
default:
break;
}
- if (new_itr != q_vector->eitr) {
+ if (new_itr != q_vector->itr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 9) + new_itr)/10;
+ new_itr = (10 * new_itr * q_vector->itr) /
+ ((9 * new_itr) + q_vector->itr);
/* save the algorithm value here */
- q_vector->eitr = new_itr;
+ q_vector->itr = new_itr & IXGBE_MAX_EITR;
ixgbe_write_eitr(q_vector);
}
@@ -1827,6 +1752,39 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
}
}
+static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
+{
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
+ return;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ /*
+ * Need to check link state so complete overtemp check
+ * on service task
+ */
+ if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
+ (!test_bit(__IXGBE_DOWN, &adapter->state))) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ return;
+ }
+ return;
+ case ixgbe_mac_X540:
+ if (!(eicr & IXGBE_EICR_TS))
+ return;
+ break;
+ default:
+ return;
+ }
+
+ e_crit(drv,
+ "Network adapter has been stopped because it has over heated. "
+ "Restart the computer. If the problem persists, "
+ "power off the system and replace the adapter\n");
+}
+
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1864,72 +1822,6 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
-static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
-{
- struct ixgbe_adapter *adapter = data;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 eicr;
-
- /*
- * Workaround for Silicon errata. Use clear-by-write instead
- * of clear-by-read. Reading with EICS will return the
- * interrupt causes without clearing, which later be done
- * with the write to EICR.
- */
- eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
- IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
-
- if (eicr & IXGBE_EICR_LSC)
- ixgbe_check_lsc(adapter);
-
- if (eicr & IXGBE_EICR_MAILBOX)
- ixgbe_msg_task(adapter);
-
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- /* Handle Flow Director Full threshold interrupt */
- if (eicr & IXGBE_EICR_FLOW_DIR) {
- int reinit_count = 0;
- int i;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = adapter->tx_ring[i];
- if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &ring->state))
- reinit_count++;
- }
- if (reinit_count) {
- /* no more flow director interrupts until after init */
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
- eicr &= ~IXGBE_EICR_FLOW_DIR;
- adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
- ixgbe_service_event_schedule(adapter);
- }
- }
- ixgbe_check_sfp_event(adapter, eicr);
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- adapter->interrupt_event = eicr;
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
- ixgbe_service_event_schedule(adapter);
- }
- }
- break;
- default:
- break;
- }
-
- ixgbe_check_fan_failure(adapter, eicr);
-
- /* re-enable the original interrupt state, no lsc, no queues */
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
- ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
-
- return IRQ_HANDLED;
-}
-
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -1982,232 +1874,124 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
/* skip the flush */
}
-static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
-{
- struct ixgbe_q_vector *q_vector = data;
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *tx_ring;
- int i, r_idx;
-
- if (!q_vector->tx.count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->tx.count; i++) {
- tx_ring = adapter->tx_ring[r_idx];
- r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- /* EIAM disabled interrupts (on this vector) for us */
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
/**
- * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
- * @irq: unused
- * @data: pointer to our q_vector struct for this interrupt vector
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
-static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
-{
- struct ixgbe_q_vector *q_vector = data;
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *rx_ring;
- int r_idx;
- int i;
-
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_dca(q_vector);
-#endif
-
- r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rx.count; i++) {
- rx_ring = adapter->rx_ring[r_idx];
- r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- if (!q_vector->rx.count)
- return IRQ_HANDLED;
-
- /* EIAM disabled interrupts (on this vector) for us */
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
+ bool flush)
{
- struct ixgbe_q_vector *q_vector = data;
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *ring;
- int r_idx;
- int i;
-
- if (!q_vector->tx.count && !q_vector->rx.count)
- return IRQ_HANDLED;
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
- r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->tx.count; i++) {
- ring = adapter->tx_ring[r_idx];
- r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
- r_idx + 1);
- }
+ /* don't reenable LSC while waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ mask &= ~IXGBE_EIMS_LSC;
- r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rx.count; i++) {
- ring = adapter->rx_ring[r_idx];
- r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
- r_idx + 1);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ mask |= IXGBE_EIMS_GPI_SDP0;
+ break;
+ case ixgbe_mac_X540:
+ mask |= IXGBE_EIMS_TS;
+ break;
+ default:
+ break;
+ }
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP2;
+ case ixgbe_mac_X540:
+ mask |= IXGBE_EIMS_ECC;
+ mask |= IXGBE_EIMS_MAILBOX;
+ break;
+ default:
+ break;
}
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+ mask |= IXGBE_EIMS_FLOW_DIR;
- /* EIAM disabled interrupts (on this vector) for us */
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ if (queues)
+ ixgbe_irq_enable_queues(adapter, ~0);
+ if (flush)
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
-/**
- * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
- **/
-static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
+static irqreturn_t ixgbe_msix_other(int irq, void *data)
{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *rx_ring = NULL;
- int work_done = 0;
- long r_idx;
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_dca(q_vector);
-#endif
+ /*
+ * Workaround for Silicon errata. Use clear-by-write instead
+ * of clear-by-read. Reading with EICS will return the
+ * interrupt causes without clearing, which later be done
+ * with the write to EICR.
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
- r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
- rx_ring = adapter->rx_ring[r_idx];
+ if (eicr & IXGBE_EICR_LSC)
+ ixgbe_check_lsc(adapter);
- ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (eicr & IXGBE_EICR_ECC)
+ e_info(link, "Received unrecoverable ECC Err, please "
+ "reboot\n");
+ /* Handle Flow Director Full threshold interrupt */
+ if (eicr & IXGBE_EICR_FLOW_DIR) {
+ int reinit_count = 0;
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &ring->state))
+ reinit_count++;
+ }
+ if (reinit_count) {
+ /* no more flow director interrupts until after init */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
+ adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+ ixgbe_check_sfp_event(adapter, eicr);
+ ixgbe_check_overtemp_event(adapter, eicr);
+ break;
+ default:
+ break;
}
- return work_done;
-}
-
-/**
- * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
- * q_vector.
- **/
-static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
-{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *ring = NULL;
- int work_done = 0, i;
- long r_idx;
- bool tx_clean_complete = true;
-
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_dca(q_vector);
-#endif
-
- r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->tx.count; i++) {
- ring = adapter->tx_ring[r_idx];
- tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
- r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
- r_idx + 1);
- }
+ ixgbe_check_fan_failure(adapter, eicr);
- /* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rx.count ?: 1);
- budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rx.count; i++) {
- ring = adapter->rx_ring[r_idx];
- ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
- r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
- ring = adapter->rx_ring[r_idx];
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
- return 0;
- }
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, false, false);
- return work_done;
+ return IRQ_HANDLED;
}
-/**
- * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
- **/
-static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
+static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *tx_ring = NULL;
- int work_done = 0;
- long r_idx;
-
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_dca(q_vector);
-#endif
-
- r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
- tx_ring = adapter->tx_ring[r_idx];
+ struct ixgbe_q_vector *q_vector = data;
- if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
- work_done = budget;
+ /* EIAM disabled interrupts (on this vector) for us */
- /* If all Tx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->tx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
- }
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
- return work_done;
+ return IRQ_HANDLED;
}
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
@@ -2216,9 +2000,10 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
- set_bit(r_idx, q_vector->rx.idx);
- q_vector->rx.count++;
rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
@@ -2227,9 +2012,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
- set_bit(t_idx, q_vector->tx.idx);
- q_vector->tx.count++;
tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
q_vector->tx.work_limit = a->tx_work_limit;
}
@@ -2243,59 +2029,41 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
{
- int q_vectors;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
+ int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
int v_start = 0;
- int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int i, j;
- int rqpv, tqpv;
- int err = 0;
- /* No mapping required if MSI-X is disabled. */
+ /* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- goto out;
-
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ q_vectors = 1;
/*
- * The ideal configuration...
- * We have enough vectors to map one per queue.
+ * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ *
+ * Re-adjusting *qpv takes care of the remainder.
*/
- if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
- for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ for (; v_start < q_vectors && rxr_remaining; v_start++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
+ for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
map_vector_to_rxq(adapter, v_start, rxr_idx);
-
- for (; txr_idx < txr_remaining; v_start++, txr_idx++)
- map_vector_to_txq(adapter, v_start, txr_idx);
-
- goto out;
}
/*
- * If we don't have enough vectors for a 1-to-1
- * mapping, we'll have to group them so there are
- * multiple queues per vector.
+ * If there are not enough q_vectors for each ring to have it's own
+ * vector then we must pair up Rx/Tx on a each vector
*/
- /* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < q_vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
- for (j = 0; j < rqpv; j++) {
- map_vector_to_rxq(adapter, i, rxr_idx);
- rxr_idx++;
- rxr_remaining--;
- }
- tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
- for (j = 0; j < tqpv; j++) {
- map_vector_to_txq(adapter, i, txr_idx);
- txr_idx++;
- txr_remaining--;
- }
+ if ((v_start + txr_remaining) > q_vectors)
+ v_start = 0;
+
+ for (; v_start < q_vectors && txr_remaining; v_start++) {
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
+ for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
+ map_vector_to_txq(adapter, v_start, txr_idx);
}
-out:
- return err;
}
/**
@@ -2308,53 +2076,45 @@ out:
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- irqreturn_t (*handler)(int, void *);
- int i, vector, q_vectors, err;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
int ri = 0, ti = 0;
- /* Decrement for Other and TCP Timer vectors */
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- err = ixgbe_map_rings_to_vectors(adapter);
- if (err)
- return err;
-
-#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
- ? &ixgbe_msix_clean_many : \
- (_v)->rx.count ? &ixgbe_msix_clean_rx : \
- (_v)->tx.count ? &ixgbe_msix_clean_tx : \
- NULL)
for (vector = 0; vector < q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
- handler = SET_HANDLER(q_vector);
+ struct msix_entry *entry = &adapter->msix_entries[vector];
- if (handler == &ixgbe_msix_clean_rx) {
+ if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "rx", ri++);
- } else if (handler == &ixgbe_msix_clean_tx) {
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "tx", ti++);
- } else if (handler == &ixgbe_msix_clean_many) {
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "TxRx", ri++);
- ti++;
+ "%s-%s-%d", netdev->name, "tx", ti++);
} else {
/* skip this unused q_vector */
continue;
}
- err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, q_vector->name,
- q_vector);
+ err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
+ q_vector->name, q_vector);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
goto free_queue_irqs;
}
+ /* If Flow Director is enabled, set interrupt affinity */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(entry->vector,
+ q_vector->affinity_mask);
+ }
}
- sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter);
+ ixgbe_msix_other, 0, netdev->name, adapter);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2363,9 +2123,13 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
return 0;
free_queue_irqs:
- for (i = vector - 1; i >= 0; i--)
- free_irq(adapter->msix_entries[--vector].vector,
- adapter->q_vector[i]);
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
@@ -2374,47 +2138,6 @@ free_queue_irqs:
}
/**
- * ixgbe_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
- bool flush)
-{
- u32 mask;
-
- mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
- mask |= IXGBE_EIMS_GPI_SDP0;
- if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- mask |= IXGBE_EIMS_GPI_SDP1;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- mask |= IXGBE_EIMS_ECC;
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
- if (adapter->num_vfs)
- mask |= IXGBE_EIMS_MAILBOX;
- break;
- default:
- break;
- }
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- mask |= IXGBE_EIMS_FLOW_DIR;
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- if (queues)
- ixgbe_irq_enable_queues(adapter, ~0);
- if (flush)
- IXGBE_WRITE_FLUSH(&adapter->hw);
-
- if (adapter->num_vfs > 32) {
- u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
- }
-}
-
-/**
* ixgbe_intr - legacy mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
@@ -2454,14 +2177,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- adapter->interrupt_event = eicr;
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
- ixgbe_service_event_schedule(adapter);
- }
- }
+ /* Fall through */
+ case ixgbe_mac_X540:
+ if (eicr & IXGBE_EICR_ECC)
+ e_info(link, "Received unrecoverable ECC err, please "
+ "reboot\n");
+ ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
break;
@@ -2487,14 +2208,26 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
{
- int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int i;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i]->q_vector = NULL;
+ adapter->rx_ring[i]->next = NULL;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i]->q_vector = NULL;
+ adapter->tx_ring[i]->next = NULL;
+ }
for (i = 0; i < q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
- q_vector->rx.count = 0;
- q_vector->tx.count = 0;
+ memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
+ memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
}
}
@@ -2510,19 +2243,25 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ /* map all of the rings to the q_vectors */
+ ixgbe_map_rings_to_vectors(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
err = ixgbe_request_msix_irqs(adapter);
- } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, adapter);
- } else {
+ else
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, adapter);
- }
- if (err)
+ if (err) {
e_err(probe, "request_irq failed, Error %d\n", err);
+ /* place q_vectors and rings back into a known good state */
+ ixgbe_reset_q_vectors(adapter);
+ }
+
return err;
}
@@ -2532,25 +2271,29 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
-
i = q_vectors - 1;
free_irq(adapter->msix_entries[i].vector, adapter);
-
i--;
+
for (; i >= 0; i--) {
/* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rx.count &&
- !adapter->q_vector[i]->tx.count)
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
continue;
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(adapter->msix_entries[i].vector,
+ NULL);
+
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
-
- ixgbe_reset_q_vectors(adapter);
} else {
free_irq(adapter->pdev->irq, adapter);
}
+
+ /* clear q_vector state information */
+ ixgbe_reset_q_vectors(adapter);
}
/**
@@ -2568,8 +2311,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
- if (adapter->num_vfs > 32)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
break;
default:
break;
@@ -2590,17 +2331,19 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
**/
static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
- EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
+ /* rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
+
+ ixgbe_write_eitr(q_vector);
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
- map_vector_to_rxq(adapter, 0, 0);
- map_vector_to_txq(adapter, 0, 0);
-
e_info(hw, "Legacy interrupt IVAR setup done\n");
}
@@ -2617,13 +2360,11 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
u64 tdba = ring->dma;
int wait_loop = 10;
- u32 txdctl;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
- txdctl & ~IXGBE_TXDCTL_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
@@ -2635,18 +2376,22 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
- /* configure fetching thresholds */
- if (adapter->rx_itr_setting == 0) {
- /* cannot set wthresh when itr==0 */
- txdctl &= ~0x007F0000;
- } else {
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- }
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- /* PThresh workaround for Tx hang with DFP enabled. */
- txdctl |= 32;
- }
+ /*
+ * set WTHRESH to encourage burst writeback, it should not be set
+ * higher than 1 when ITR is 0 as it could cause false TX hangs
+ *
+ * In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
+ txdctl |= (1 << 16); /* WTHRESH = 1 */
+ else
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
@@ -2661,7 +2406,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
- txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
@@ -2899,9 +2643,9 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
#endif
} else {
- if (rx_buf_len < IXGBE_RXBUFFER_4096)
+ if (rx_buf_len < IXGBE_RXBUFFER_4K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+ else if (rx_buf_len < IXGBE_RXBUFFER_8K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
@@ -3072,6 +2816,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
u32 vt_reg_bits;
u32 reg_offset, vf_shift;
u32 vmdctl;
+ int i;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return;
@@ -3107,9 +2852,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw,
- (adapter->antispoofing_enabled =
- (adapter->num_vfs != 0)),
+ (adapter->num_vfs != 0),
adapter->num_vfs);
+ /* For VFs that have spoof checking turned off */
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (!adapter->vfinfo[i].spoofchk_enabled)
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
+ }
}
static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
@@ -3134,17 +2883,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB)
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_buf_len = IXGBE_RX_HDR_SIZE;
- } else {
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
- }
-
#ifdef IXGBE_FCOE
/* adjust max frame to be able to do baby jumbo for FCoE */
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -3160,6 +2898,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
+ /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
+ max_frame += VLAN_HLEN;
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = IXGBE_RX_HDR_SIZE;
+ } else {
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (netdev->mtu <= ETH_DATA_LEN))
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ /*
+ * Make best use of allocation by using all but 1K of a
+ * power of 2 allocation that will be used for skb->head.
+ */
+ else if (max_frame <= IXGBE_RXBUFFER_3K)
+ rx_buf_len = IXGBE_RXBUFFER_3K;
+ else if (max_frame <= IXGBE_RXBUFFER_7K)
+ rx_buf_len = IXGBE_RXBUFFER_7K;
+ else if (max_frame <= IXGBE_RXBUFFER_15K)
+ rx_buf_len = IXGBE_RXBUFFER_15K;
+ else
+ rx_buf_len = IXGBE_MAX_RXBUFFER;
+ }
+
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -3533,19 +3295,8 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
q_vectors = 1;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx];
- napi = &q_vector->napi;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- if (!q_vector->rx.count || !q_vector->tx.count) {
- if (q_vector->tx.count == 1)
- napi->poll = &ixgbe_clean_txonly;
- else if (q_vector->rx.count == 1)
- napi->poll = &ixgbe_clean_rxonly;
- }
- }
-
- napi_enable(napi);
+ napi_enable(&q_vector->napi);
}
}
@@ -3596,7 +3347,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* reconfigure the hardware */
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
-#ifdef CONFIG_FCOE
+#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
@@ -3608,12 +3359,20 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
} else {
struct net_device *dev = adapter->netdev;
- if (adapter->ixgbe_ieee_ets)
- dev->dcbnl_ops->ieee_setets(dev,
- adapter->ixgbe_ieee_ets);
- if (adapter->ixgbe_ieee_pfc)
- dev->dcbnl_ops->ieee_setpfc(dev,
- adapter->ixgbe_ieee_pfc);
+ if (adapter->ixgbe_ieee_ets) {
+ struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
+ }
+
+ if (adapter->ixgbe_ieee_pfc) {
+ struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc;
+ u8 *prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
+
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en,
+ prio_tc);
+ }
}
/* Enable RSS Hash per TC */
@@ -3633,20 +3392,142 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
}
}
+#endif
+
+/* Additional bittime to account for IXGBE framing */
+#define IXGBE_ETH_FRAMING 20
+
+/*
+ * ixgbe_hpbthresh - calculate high water mark for flow control
+ *
+ * @adapter: board private structure to calculate for
+ * @pb - packet buffer to calculate
+ */
+static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int link, tc, kb, marker;
+ u32 dv_id, rx_pba;
+
+ /* Calculate max LAN frame size */
+ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
+
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if (dev->features & NETIF_F_FCOE_MTU) {
+ int fcoe_pb = 0;
+
+#ifdef CONFIG_IXGBE_DCB
+ fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
#endif
+ if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+#endif
-static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
+ /* Calculate delay value for device */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ dv_id = IXGBE_DV_X540(link, tc);
+ break;
+ default:
+ dv_id = IXGBE_DV(link, tc);
+ break;
+ }
+
+ /* Loopback switch introduces additional latency */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ dv_id += IXGBE_B2BT(tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ kb = IXGBE_BT2KB(dv_id);
+ rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
+
+ marker = rx_pba - kb;
+
+ /* It is possible that the packet buffer is not large enough
+ * to provide required headroom. In this case throw an error
+ * to user and a do the best we can.
+ */
+ if (marker < 0) {
+ e_warn(drv, "Packet Buffer(%i) can not provide enough"
+ "headroom to support flow control."
+ "Decrease MTU or number of traffic classes\n", pb);
+ marker = tc + 1;
+ }
+
+ return marker;
+}
+
+/*
+ * ixgbe_lpbthresh - calculate low water mark for for flow control
+ *
+ * @adapter: board private structure to calculate for
+ * @pb - packet buffer to calculate
+ */
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int tc;
+ u32 dv_id;
+
+ /* Calculate max LAN frame size */
+ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Calculate delay value for device */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ dv_id = IXGBE_LOW_DV_X540(tc);
+ break;
+ default:
+ dv_id = IXGBE_LOW_DV(tc);
+ break;
+ }
+
+ /* Delay value is calculated in bit times convert to KB */
+ return IXGBE_BT2KB(dv_id);
+}
+
+/*
+ * ixgbe_pbthresh_setup - calculate and setup high low water marks
+ */
+static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
{
- int hdrm = 0;
+ struct ixgbe_hw *hw = &adapter->hw;
int num_tc = netdev_get_num_tc(adapter->netdev);
+ int i;
+
+ if (!num_tc)
+ num_tc = 1;
+
+ hw->fc.low_water = ixgbe_lpbthresh(adapter);
+
+ for (i = 0; i < num_tc; i++) {
+ hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+
+ /* Low water marks must not be larger than high water marks */
+ if (hw->fc.low_water > hw->fc.high_water[i])
+ hw->fc.low_water = 0;
+ }
+}
+
+static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
+{
struct ixgbe_hw *hw = &adapter->hw;
+ int hdrm;
+ u8 tc = netdev_get_num_tc(adapter->netdev);
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
- hdrm = 64 << adapter->fdir_pballoc;
+ hdrm = 32 << adapter->fdir_pballoc;
+ else
+ hdrm = 0;
- hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL);
+ hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+ ixgbe_pbthresh_setup(adapter);
}
static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
@@ -3675,16 +3556,12 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
-
ixgbe_configure_pb(adapter);
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
#endif
- ixgbe_set_rx_mode(netdev);
+ ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
#ifdef IXGBE_FCOE
@@ -3693,15 +3570,14 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->atr_sample_rate =
- adapter->atr_sample_rate;
- ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
+ ixgbe_init_fdir_signature_82599(&adapter->hw,
+ adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
ixgbe_init_fdir_perfect_82599(&adapter->hw,
adapter->fdir_pballoc);
ixgbe_fdir_filter_restore(adapter);
}
+
ixgbe_configure_virtualization(adapter);
ixgbe_configure_tx(adapter);
@@ -3720,6 +3596,9 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_active_unknown:
case ixgbe_phy_sfp_ftl_active:
return true;
+ case ixgbe_phy_nl:
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return true;
default:
return false;
}
@@ -3812,6 +3691,20 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
gpie |= IXGBE_GPIE_VTMODE_64;
}
+ /* Enable Thermal over heat sensor interrupt */
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ gpie |= IXGBE_SDP0_GPIEN;
+ break;
+ case ixgbe_mac_X540:
+ gpie |= IXGBE_EIMS_TS;
+ break;
+ default:
+ break;
+ }
+ }
+
/* Enable fan failure interrupt */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN;
@@ -3824,7 +3717,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
-static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
+static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int err;
@@ -3883,8 +3776,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
-
- return 0;
}
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
@@ -3908,12 +3799,12 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
-int ixgbe_up(struct ixgbe_adapter *adapter)
+void ixgbe_up(struct ixgbe_adapter *adapter)
{
/* hardware has been reset, we need to reload some things */
ixgbe_configure(adapter);
- return ixgbe_up_complete(adapter);
+ ixgbe_up_complete(adapter);
}
void ixgbe_reset(struct ixgbe_adapter *adapter)
@@ -4100,7 +3991,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
int i;
- int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
@@ -4132,26 +4022,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
del_timer_sync(&adapter->service_timer);
- /* disable receive for all VFs and wait one second */
if (adapter->num_vfs) {
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
+ /* Clear EITR Select mapping */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
/* Mark all the VFs as inactive */
for (i = 0 ; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = 0;
- }
- /* Cleanup the affinity_hint CPU mask memory and callback */
- for (i = 0; i < num_q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
- /* release the CPU mask memory */
- free_cpumask_var(q_vector->affinity_mask);
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
}
/* disable transmits in the hardware now that interrupts are off */
@@ -4203,28 +4086,41 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
- int tx_clean_complete, work_done = 0;
+ struct ixgbe_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector);
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
- if (!tx_clean_complete)
- work_done = budget;
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
- }
- return work_done;
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+
+ return 0;
}
/**
@@ -4352,7 +4248,6 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
q = min((int)num_online_cpus(), per_tc_q);
for (i = 0; i < tcs; i++) {
- netdev_set_prio_tc_map(dev, i, i);
netdev_set_tc_queue(dev, i, q, offset);
offset += q;
}
@@ -4536,7 +4431,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- if (num_tcs == 8) {
+ if (num_tcs > 4) {
if (tc < 3) {
*tx = tc << 5;
*rx = tc << 4;
@@ -4547,7 +4442,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
*tx = ((tc + 8) << 3);
*rx = tc << 4;
}
- } else if (num_tcs == 4) {
+ } else {
*rx = tc << 5;
switch (tc) {
case 0:
@@ -4865,19 +4760,15 @@ out:
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
- int q_idx, num_q_vectors;
+ int v_idx, num_q_vectors;
struct ixgbe_q_vector *q_vector;
- int (*poll)(struct napi_struct *, int);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- poll = &ixgbe_clean_rxtx_many;
- } else {
+ else
num_q_vectors = 1;
- poll = &ixgbe_poll;
- }
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
GFP_KERNEL, adapter->node);
if (!q_vector)
@@ -4885,25 +4776,29 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
GFP_KERNEL);
if (!q_vector)
goto err_out;
+
q_vector->adapter = adapter;
- if (q_vector->tx.count && !q_vector->rx.count)
- q_vector->eitr = adapter->tx_eitr_param;
- else
- q_vector->eitr = adapter->rx_eitr_param;
- q_vector->v_idx = q_idx;
- netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
- adapter->q_vector[q_idx] = q_vector;
+ q_vector->v_idx = v_idx;
+
+ /* Allocate the affinity_hint cpumask, configure the mask */
+ if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+ goto err_out;
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbe_poll, 64);
+ adapter->q_vector[v_idx] = q_vector;
}
return 0;
err_out:
- while (q_idx) {
- q_idx--;
- q_vector = adapter->q_vector[q_idx];
+ while (v_idx) {
+ v_idx--;
+ q_vector = adapter->q_vector[v_idx];
netif_napi_del(&q_vector->napi);
+ free_cpumask_var(q_vector->affinity_mask);
kfree(q_vector);
- adapter->q_vector[q_idx] = NULL;
+ adapter->q_vector[v_idx] = NULL;
}
return -ENOMEM;
}
@@ -4918,17 +4813,18 @@ err_out:
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
- int q_idx, num_q_vectors;
+ int v_idx, num_q_vectors;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
num_q_vectors = 1;
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
- adapter->q_vector[q_idx] = NULL;
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
+ free_cpumask_var(q_vector->affinity_mask);
kfree(q_vector);
}
}
@@ -5043,13 +4939,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- struct net_device *dev = adapter->netdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
struct tc_configuration *tc;
#endif
- int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* PCI config space info */
@@ -5069,8 +4963,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
break;
- case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ case ixgbe_mac_82599EB:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -5100,6 +4995,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+ break;
+ default:
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ break;
+ }
+
/* Configure DCB traffic classes */
for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
tc = &adapter->dcb_cfg.tc_config[j];
@@ -5109,6 +5015,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
tc->dcb_pfc = pfc_disabled;
}
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -5125,17 +5037,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
- hw->fc.high_water = FC_HIGH_WATER(max_frame);
- hw->fc.low_water = FC_LOW_WATER(max_frame);
+ ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
- adapter->rx_eitr_param = 20000;
adapter->tx_itr_setting = 1;
- adapter->tx_eitr_param = 10000;
/* set defaults for eitr in MegaBytes */
adapter->eitr_low = 10;
@@ -5146,7 +5055,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
/* set default work limits */
- adapter->tx_work_limit = adapter->tx_ring_count;
+ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
if (ixgbe_init_eeprom_params_generic(hw)) {
@@ -5395,9 +5304,6 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- hw->fc.high_water = FC_HIGH_WATER(max_frame);
- hw->fc.low_water = FC_LOW_WATER(max_frame);
-
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -5443,17 +5349,10 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- err = ixgbe_up_complete(adapter);
- if (err)
- goto err_up;
-
- netif_tx_start_all_queues(netdev);
+ ixgbe_up_complete(adapter);
return 0;
-err_up:
- ixgbe_release_hw_control(adapter);
- ixgbe_free_irq(adapter);
err_req_irq:
err_setup_rx:
ixgbe_free_all_rx_resources(adapter);
@@ -5658,6 +5557,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0;
+#ifdef IXGBE_FCOE
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ unsigned int cpu;
+ u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
+#endif /* IXGBE_FCOE */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5707,20 +5611,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
netdev->stats.tx_packets = packets;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+
+ /* 8 register reads */
for (i = 0; i < 8; i++) {
/* for packet buffers not used, the register should read 0 */
mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
missed_rx += mpc;
hwstats->mpc[i] += mpc;
total_mpc += hwstats->mpc[i];
- if (hw->mac.type == ixgbe_mac_82598EB)
- hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
+ hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
break;
@@ -5732,9 +5637,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
default:
break;
}
- hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
}
+
+ /*16 register reads */
+ for (i = 0; i < 16; i++) {
+ hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
+ }
+ }
+
hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
/* work around hardware counting issue */
hwstats->gprc -= missed_rx;
@@ -5772,6 +5689,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ /* Add up per cpu counters for total ddp aloc fail */
+ if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
+ for_each_possible_cpu(cpu) {
+ fcoe_noddp_counts_sum +=
+ *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
+ fcoe_noddp_ext_buff_counts_sum +=
+ *per_cpu_ptr(fcoe->
+ pcpu_noddp_ext_buff, cpu);
+ }
+ }
+ hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
+ hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
#endif /* IXGBE_FCOE */
break;
default:
@@ -5794,7 +5723,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->lxontxc += lxon;
lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
hwstats->lxofftxc += lxoff;
- hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
/*
@@ -5902,7 +5830,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
- if (qv->rx.count || qv->tx.count)
+ if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i);
}
}
@@ -6091,7 +6019,8 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
/* if interface is down do nothing */
- if (test_bit(__IXGBE_DOWN, &adapter->state))
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
return;
ixgbe_watchdog_update_link(adapter);
@@ -6205,6 +6134,51 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
}
+#ifdef CONFIG_PCI_IOV
+static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
+{
+ int vf;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 gpc;
+ u32 ciaa, ciad;
+
+ gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
+ if (gpc) /* If incrementing then no need for the check below */
+ return;
+ /*
+ * Check to see if a bad DMA write target from an errant or
+ * malicious VF has caused a PCIe error. If so then we can
+ * issue a VFLR to the offending VF(s) and then resume without
+ * requesting a full slot reset.
+ */
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ ciaa = (vf << 16) | 0x80000000;
+ /* 32 bit read so align, we really want status at offset 6 */
+ ciaa |= PCI_COMMAND;
+ IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
+ ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
+ ciaa &= 0x7FFFFFFF;
+ /* disable debug mode asap after reading data */
+ IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
+ /* Get the upper 16 bits which will be the PCI status reg */
+ ciad >>= 16;
+ if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
+ netdev_err(netdev, "VF %d Hung DMA\n", vf);
+ /* Issue VFLR */
+ ciaa = (vf << 16) | 0x80000000;
+ ciaa |= 0xA8;
+ IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
+ ciad = 0x00008000; /* VFLR */
+ IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
+ ciaa &= 0x7FFFFFFF;
+ IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
+ }
+ }
+}
+
+#endif
/**
* ixgbe_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -6213,17 +6187,49 @@ static void ixgbe_service_timer(unsigned long data)
{
struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
unsigned long next_event_offset;
+ bool ready = true;
+
+#ifdef CONFIG_PCI_IOV
+ ready = false;
+
+ /*
+ * don't bother with SR-IOV VF DMA hang check if there are
+ * no VFs or the link is down
+ */
+ if (!adapter->num_vfs ||
+ (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
+ ready = true;
+ goto normal_timer_service;
+ }
+
+ /* If we have VFs allocated then we must check for DMA hangs */
+ ixgbe_check_for_bad_vf(adapter);
+ next_event_offset = HZ / 50;
+ adapter->timer_event_accumulator++;
+
+ if (adapter->timer_event_accumulator >= 100) {
+ ready = true;
+ adapter->timer_event_accumulator = 0;
+ }
+ goto schedule_event;
+
+normal_timer_service:
+#endif
/* poll faster when waiting for link */
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
next_event_offset = HZ / 10;
else
next_event_offset = HZ * 2;
+#ifdef CONFIG_PCI_IOV
+schedule_event:
+#endif
/* Reset the timer */
mod_timer(&adapter->service_timer, next_event_offset + jiffies);
- ixgbe_service_event_schedule(adapter);
+ if (ready)
+ ixgbe_service_event_schedule(adapter);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -6350,7 +6356,8 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
+ if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(tx_flags & IXGBE_TX_FLAGS_TXSW))
return false;
} else {
u8 l4_hdr = 0;
@@ -6407,185 +6414,185 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- unsigned int first, const u8 hdr_len)
+static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
{
- struct device *dev = tx_ring->dev;
- struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int len;
- unsigned int total = skb->len;
- unsigned int offset = 0, size, count = 0;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
- unsigned int bytecount = skb->len;
- u16 gso_segs = 1;
- u16 i;
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
- i = tx_ring->next_to_use;
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
- if (tx_flags & IXGBE_TX_FLAGS_FCOE)
- /* excluding fcoe_crc_eof for FCoE */
- total -= sizeof(struct fcoe_crc_eof);
+ /* set segmentation enable bits for TSO/FSO */
+#ifdef IXGBE_FCOE
+ if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
+#else
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+#endif
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
- len = min(skb_headlen(skb), total);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
-
- tx_buffer_info->length = size;
- tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, tx_buffer_info->dma))
- goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ return cmd_type;
+}
- len -= size;
- total -= size;
- offset += size;
- count++;
+static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status =
+ cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
- if (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT));
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
}
- for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)frag->size, total);
- offset = frag->page_offset;
-
- while (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
-
- tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(dev,
- frag->page,
- offset, size,
- DMA_TO_DEVICE);
- tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(dev, tx_buffer_info->dma))
- goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
-
- len -= size;
- total -= size;
- offset += size;
- count++;
- }
- if (total == 0)
- break;
- }
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
- gso_segs = skb_shinfo(skb)->gso_segs;
#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- else if (tx_flags & IXGBE_TX_FLAGS_FSO)
- gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
- skb_shinfo(skb)->gso_size);
-#endif /* IXGBE_FCOE */
- bytecount += (gso_segs - 1) * hdr_len;
+ /* use index 1 context for FCOE/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT));
- /* multiply data chunks by size of headers */
- tx_ring->tx_buffer_info[i].bytecount = bytecount;
- tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
- tx_ring->tx_buffer_info[i].skb = skb;
- tx_ring->tx_buffer_info[first].next_to_watch = i;
+#endif
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ if (tx_flags & IXGBE_TX_FLAGS_TXSW)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
- return count;
+ return olinfo_status;
+}
-dma_error:
- e_dev_err("TX DMA map failed\n");
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
- /* clear timestamp and dma mappings for failed tx_buffer_info map */
- tx_buffer_info->dma = 0;
- tx_buffer_info->time_stamp = 0;
- tx_buffer_info->next_to_watch = 0;
- if (count)
- count--;
+static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb,
+ struct ixgbe_tx_buffer *first,
+ u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct device *dev = tx_ring->dev;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ dma_addr_t dma;
+ __le32 cmd_type, olinfo_status;
+ struct skb_frag_struct *frag;
+ unsigned int f = 0;
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ u32 offset = 0;
+ u32 paylen = skb->len - hdr_len;
+ u16 i = tx_ring->next_to_use;
+ u16 gso_segs;
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count--) {
- if (i == 0)
- i += tx_ring->count;
- i--;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+#ifdef IXGBE_FCOE
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+ if (data_len >= sizeof(struct fcoe_crc_eof)) {
+ data_len -= sizeof(struct fcoe_crc_eof);
+ } else {
+ size -= sizeof(struct fcoe_crc_eof) - data_len;
+ data_len = 0;
+ }
}
- return 0;
-}
+#endif
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
-static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
- int tx_flags, int count, u32 paylen, u8 hdr_len)
-{
- union ixgbe_adv_tx_desc *tx_desc = NULL;
- struct ixgbe_tx_buffer *tx_buffer_info;
- u32 olinfo_status = 0, cmd_type_len = 0;
- unsigned int i;
- u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+ cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
- cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
- cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+ for (;;) {
+ while (size > IXGBE_MAX_DATA_PER_TXD) {
+ tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ tx_desc->read.olinfo_status = olinfo_status;
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ offset += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
- if (tx_flags & IXGBE_TX_FLAGS_TSO) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ i = 0;
+ }
+ }
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_buffer_info->length = offset + size;
+ tx_buffer_info->tx_flags = tx_flags;
+ tx_buffer_info->dma = dma;
- /* use index 1 context for tso */
- olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.olinfo_status = olinfo_status;
- } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ if (!data_len)
+ break;
- if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
- olinfo_status |= IXGBE_ADVTXD_CC;
- olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- if (tx_flags & IXGBE_TX_FLAGS_FSO)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- }
+ frag = &skb_shinfo(skb)->frags[f];
+#ifdef IXGBE_FCOE
+ size = min_t(unsigned int, data_len, skb_frag_size(frag));
+#else
+ size = skb_frag_size(frag);
+#endif
+ data_len -= size;
+ f++;
- olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ offset = 0;
+ tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
- i = tx_ring->next_to_use;
- while (count--) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ tx_desc++;
i++;
- if (i == tx_ring->count)
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
i = 0;
+ }
}
- tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
+ tx_buffer_info->gso_segs = gso_segs;
+ tx_buffer_info->skb = skb;
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
/*
* Force memory writes to complete before letting h/w
@@ -6595,8 +6602,30 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
*/
wmb();
- tx_ring->next_to_use = i;
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ /* notify HW of packet */
writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ tx_ring->next_to_use = i;
}
static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
@@ -6635,8 +6664,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
th = tcp_hdr(skb);
- /* skip this packet since the socket is closing */
- if (th->fin)
+ /* skip this packet since it is invalid or the socket is closing */
+ if (!th || th->fin)
return;
/* sample on all syn packets or once every atr sample count */
@@ -6661,7 +6690,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
* since src port and flex bytes occupy the same word XOR them together
* and write the value to source port portion of compressed dword
*/
- if (vlan_id)
+ if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
else
common.port.src ^= th->dest ^ protocol;
@@ -6743,14 +6772,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
+ struct ixgbe_tx_buffer *first;
int tso;
- u32 tx_flags = 0;
+ u32 tx_flags = 0;
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
- u16 first;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
- __be16 protocol;
+ __be16 protocol = skb->protocol;
u8 hdr_len = 0;
/*
@@ -6771,68 +6800,89 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- protocol = vlan_get_protocol(skb);
+#ifdef CONFIG_PCI_IOV
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ tx_flags |= IXGBE_TX_FLAGS_TXSW;
+#endif
+ /* if we have a HW VLAN tag being added default to the HW one */
if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb);
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
- tx_flags |= tx_ring->dcb_tc << 13;
+ tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN check the next protocol and store the tag */
+ } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ goto out_drop;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
+ }
+
+ /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
+ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL))) {
+ tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= (skb->priority & 0x7) <<
+ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto out_drop;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ IXGBE_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
}
- tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IXGBE_TX_FLAGS_VLAN;
- } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
- skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= tx_ring->dcb_tc << 13;
- tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
-#ifdef IXGBE_FCOE
- /* for FCoE with DCB, we force the priority to what
- * was specified by the switch */
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (protocol == htons(ETH_P_FCOE)))
- tx_flags |= IXGBE_TX_FLAGS_FCOE;
-
-#endif
/* record the location of the first descriptor for this packet */
- first = tx_ring->next_to_use;
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
#ifdef IXGBE_FCOE
- /* setup tx offload for FCoE */
+ /* setup tx offload for FCoE */
+ if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0)
goto out_drop;
else if (tso)
- tx_flags |= IXGBE_TX_FLAGS_FSO;
-#endif /* IXGBE_FCOE */
- } else {
- if (protocol == htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
- if (tso < 0)
- goto out_drop;
- else if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ tx_flags |= IXGBE_TX_FLAGS_FSO |
+ IXGBE_TX_FLAGS_FCOE;
+ else
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+ goto xmit_fcoe;
}
- count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
- if (count) {
- /* add the ATR filter if ATR is on */
- if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
- ixgbe_atr(tx_ring, skb, tx_flags, protocol);
- ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
- ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+#endif /* IXGBE_FCOE */
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == __constant_htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
- } else {
- tx_ring->tx_buffer_info[first].time_stamp = 0;
- tx_ring->next_to_use = first;
+ tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ if (tso < 0)
goto out_drop;
- }
+ else if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ /* add the ATR filter if ATR is on */
+ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+ ixgbe_atr(tx_ring, skb, tx_flags, protocol);
+
+#ifdef IXGBE_FCOE
+xmit_fcoe:
+#endif /* IXGBE_FCOE */
+ ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
+
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -6971,7 +7021,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < num_q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- ixgbe_msix_clean_many(0, q_vector);
+ ixgbe_msix_clean_rings(0, q_vector);
}
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
@@ -7076,14 +7126,14 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- /* If DCB is anabled do not remove traffic classes, multiple
- * traffic classes are required to implement DCB
- */
- if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return 0;
+ /* Multiple traffic classes requires multiple queues */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ e_err(drv, "Enable failed, needs MSI-X\n");
+ return -EINVAL;
+ }
/* Hardware supports up to 8 traffic classes */
- if (tc > MAX_TRAFFIC_CLASS ||
+ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
return -EINVAL;
@@ -7095,11 +7145,27 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
- if (tc)
+ if (tc) {
netdev_set_num_tc(dev, tc);
- else
+ adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+
+ adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ } else {
netdev_reset_tc(dev);
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ }
+
ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
if (netif_running(dev))
@@ -7206,7 +7272,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
- .ndo_set_multicast_list = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu,
@@ -7217,6 +7282,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = ixgbe_setup_tc,
@@ -7240,11 +7306,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_hw *hw = &adapter->hw;
- int err;
- int num_vf_macvlans, i;
- struct vf_macvlans *mv_list;
- if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
+ if (hw->mac.type == ixgbe_mac_82598EB)
return;
/* The 82599 supports up to 64 VFs per physical function
@@ -7253,60 +7316,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
* physical function
*/
adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- goto err_novfs;
- }
-
- num_vf_macvlans = hw->mac.num_rar_entries -
- (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
-
- adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
- sizeof(struct vf_macvlans),
- GFP_KERNEL);
- if (mv_list) {
- /* Initialize list of VF macvlans */
- INIT_LIST_HEAD(&adapter->vf_mvs.l);
- for (i = 0; i < num_vf_macvlans; i++) {
- mv_list->vf = -1;
- mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
- list_add(&mv_list->l, &adapter->vf_mvs.l);
- mv_list++;
- }
- }
-
- /* If call to enable VFs succeeded then allocate memory
- * for per VF control structures.
- */
- adapter->vfinfo =
- kcalloc(adapter->num_vfs,
- sizeof(struct vf_data_storage), GFP_KERNEL);
- if (adapter->vfinfo) {
- /* Now that we're sure SR-IOV is enabled
- * and memory allocated set up the mailbox parameters
- */
- ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops,
- sizeof(hw->mbx.ops));
-
- /* Disable RSC when in SR-IOV mode */
- adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
- IXGBE_FLAG2_RSC_ENABLED);
- return;
- }
-
- /* Oh oh */
- e_err(probe, "Unable to allocate memory for VF Data Storage - "
- "SRIOV disabled\n");
- pci_disable_sriov(adapter->pdev);
-
-err_novfs:
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->num_vfs = 0;
+ ixgbe_enable_sriov(adapter, ii);
#endif /* CONFIG_PCI_IOV */
}
@@ -7336,6 +7346,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
u16 device_caps;
#endif
u32 eec;
+ u16 wol_cap;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -7506,7 +7517,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_FILTER |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_GRO |
NETIF_F_RXHASH |
NETIF_F_RXCSUM;
@@ -7529,6 +7539,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
IXGBE_FLAG_DCB_ENABLED);
@@ -7599,6 +7611,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features &= ~NETIF_F_RXHASH;
}
+ /* WOL not supported for all but the following */
+ adapter->wol = 0;
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
/* Only this subdevice supports WOL */
@@ -7613,12 +7627,23 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = IXGBE_WUFC_MAG;
break;
- default:
- adapter->wol = 0;
+ case IXGBE_DEV_ID_X540T:
+ /* Check eeprom to see if it is enabled */
+ hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
+ wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ adapter->wol = IXGBE_WUFC_MAG;
break;
}
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* save off EEPROM version number */
+ hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
+ hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+
/* pick up the PCI bus settings for reporting later */
hw->mac.ops.get_bus_info(hw);
@@ -7651,9 +7676,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
"is required.\n");
}
- /* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
-
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
@@ -7686,10 +7708,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_vf_configuration(pdev, (i | 0x10000000));
}
- /* Inform firmware of driver version */
+ /* firmware requires driver version to be 0xFFFFFFFF
+ * since os does not support feature
+ */
if (hw->mac.ops.set_fw_drv_ver)
- hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD,
- FW_CEM_UNUSED_VER);
+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
+ 0xFF);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -7755,8 +7779,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ if (!(ixgbe_check_vf_assignment(adapter)))
+ ixgbe_disable_sriov(adapter);
+ else
+ e_dev_warn("Unloading driver while VFs are assigned "
+ "- VFs will not be deallocated\n");
+ }
ixgbe_clear_interrupt_scheme(adapter);
@@ -7789,6 +7818,91 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *bdev, *vfdev;
+ u32 dw0, dw1, dw2, dw3;
+ int vf, pos;
+ u16 req_id, pf_func;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
+ adapter->num_vfs == 0)
+ goto skip_bad_vf_detection;
+
+ bdev = pdev->bus->self;
+ while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+ bdev = bdev->bus->self;
+
+ if (!bdev)
+ goto skip_bad_vf_detection;
+
+ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ goto skip_bad_vf_detection;
+
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+
+ req_id = dw1 >> 16;
+ /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
+ if (!(req_id & 0x0080))
+ goto skip_bad_vf_detection;
+
+ pf_func = req_id & 0x01;
+ if ((pf_func & 1) == (pdev->devfn & 1)) {
+ unsigned int device_id;
+
+ vf = (req_id & 0x7F) >> 1;
+ e_dev_err("VF %d has caused a PCIe error\n", vf);
+ e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
+ "%8.8x\tdw3: %8.8x\n",
+ dw0, dw1, dw2, dw3);
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ device_id = IXGBE_82599_VF_DEVICE_ID;
+ break;
+ case ixgbe_mac_X540:
+ device_id = IXGBE_X540_VF_DEVICE_ID;
+ break;
+ default:
+ device_id = 0;
+ break;
+ }
+
+ /* Find the pci device of the offending VF */
+ vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ while (vfdev) {
+ if (vfdev->devfn == (req_id & 0xFF))
+ break;
+ vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ device_id, vfdev);
+ }
+ /*
+ * There's a slim chance the VF could have been hot plugged,
+ * so if it is no longer present we don't need to issue the
+ * VFLR. Just clean up the AER in that case.
+ */
+ if (vfdev) {
+ e_dev_err("Issuing VFLR to VF %d\n", vf);
+ pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ }
+
+ /*
+ * Even though the error may have occurred on the other port
+ * we still need to increment the vf error reference count for
+ * both ports because the I/O resume function will be called
+ * for both of them.
+ */
+ adapter->vferr_refcount++;
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+skip_bad_vf_detection:
+#endif /* CONFIG_PCI_IOV */
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
@@ -7851,13 +7965,17 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- if (netif_running(netdev)) {
- if (ixgbe_up(adapter)) {
- e_info(probe, "ixgbe_up failed after reset\n");
- return;
- }
+#ifdef CONFIG_PCI_IOV
+ if (adapter->vferr_refcount) {
+ e_info(drv, "Resuming after VF err\n");
+ adapter->vferr_refcount--;
+ return;
}
+#endif
+ if (netif_running(netdev))
+ ixgbe_up(adapter);
+
netif_device_attach(netdev);
}
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1ff0eefcfd0..3f725d48336 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -38,7 +38,7 @@
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b239bdac38d..b239bdac38d 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index f7ca3511b9f..9a56fd74e67 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -39,7 +39,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
-static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
@@ -1205,7 +1205,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
- * a specified deivce address.
+ * a specified device address.
**/
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
@@ -1215,6 +1215,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u32 retry = 0;
u16 swfw_mask = 0;
bool nack = 1;
+ *data = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
swfw_mask = IXGBE_GSSR_PHY1_SM;
@@ -1419,19 +1420,15 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 status = 0;
s32 i;
bool bit = 0;
for (i = 7; i >= 0; i--) {
- status = ixgbe_clock_in_i2c_bit(hw, &bit);
+ ixgbe_clock_in_i2c_bit(hw, &bit);
*data |= bit << i;
-
- if (status != 0)
- break;
}
- return status;
+ return 0;
}
/**
@@ -1472,16 +1469,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
**/
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- s32 status;
+ s32 status = 0;
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 timeout = 10;
bool ack = 1;
- status = ixgbe_raise_i2c_clk(hw, &i2cctl);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
- if (status != 0)
- goto out;
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
@@ -1507,7 +1502,6 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Minimum low period of clock is 4.7 us */
udelay(IXGBE_I2C_T_LOW);
-out:
return status;
}
@@ -1520,10 +1514,9 @@ out:
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- status = ixgbe_raise_i2c_clk(hw, &i2cctl);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
@@ -1536,7 +1529,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
/* Minimum low period of clock is 4.7 us */
udelay(IXGBE_I2C_T_LOW);
- return status;
+ return 0;
}
/**
@@ -1553,7 +1546,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
- status = ixgbe_raise_i2c_clk(hw, &i2cctl);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
@@ -1578,10 +1571,8 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
*
* Raises the I2C clock line '0'->'1'
**/
-static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- s32 status = 0;
-
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
@@ -1589,8 +1580,6 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
-
- return status;
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 197bdd13106..197bdd13106 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
new file mode 100644
index 00000000000..db95731863d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,926 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_sriov.h"
+
+#ifdef CONFIG_PCI_IOV
+static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *pvfdev;
+ u16 vf_devfn = 0;
+ int device_id;
+ int vfs_found = 0;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ device_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ device_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ device_id = 0;
+ break;
+ }
+
+ vf_devfn = pdev->devfn + 0x80;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == vf_devfn)
+ vfs_found++;
+ vf_devfn += 2;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ device_id, pvfdev);
+ }
+
+ return vfs_found;
+}
+
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+ int num_vf_macvlans, i;
+ struct vf_macvlans *mv_list;
+ int pre_existing_vfs = 0;
+
+ pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+ if (!pre_existing_vfs && !adapter->num_vfs)
+ return;
+
+ /* If there are pre-existing VFs then we have to force
+ * use of that many because they were not deleted the last
+ * time someone removed the PF driver. That would have
+ * been because they were allocated to guest VMs and can't
+ * be removed. Go ahead and just re-enable the old amount.
+ * If the user wants to change the number of VFs they can
+ * use ethtool while making sure no VFs are allocated to
+ * guest VMs... i.e. the right way.
+ */
+ if (pre_existing_vfs) {
+ adapter->num_vfs = pre_existing_vfs;
+ dev_warn(&adapter->pdev->dev, "Virtual Functions already "
+ "enabled for this device - Please reload all "
+ "VF drivers to avoid spoofed packet errors\n");
+ } else {
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ }
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+
+ e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+
+ num_vf_macvlans = hw->mac.num_rar_entries -
+ (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+
+ adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
+ sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (mv_list) {
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&adapter->vf_mvs.l);
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list->vf = -1;
+ mv_list->free = true;
+ mv_list->rar_entry = hw->mac.num_rar_entries -
+ (i + adapter->num_vfs + 1);
+ list_add(&mv_list->l, &adapter->vf_mvs.l);
+ mv_list++;
+ }
+ }
+
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ for (i = 0; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].spoofchk_enabled = true;
+ return;
+ }
+
+ /* Oh oh */
+ e_err(probe, "Unable to allocate memory for VF Data Storage - "
+ "SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+}
+#endif /* #ifdef CONFIG_PCI_IOV */
+
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+ int i;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+
+ /* Release reference to VF devices */
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].vfdev)
+ pci_dev_put(adapter->vfinfo[i].vfdev);
+ }
+ kfree(adapter->vfinfo);
+ kfree(adapter->mv_list);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
+static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+
+ return 0;
+}
+
+static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *pos;
+ struct vf_macvlans *entry;
+
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free == false)
+ hw->mac.ops.set_rar(hw, entry->rar_entry,
+ entry->vf_macvlan,
+ entry->vf, IXGBE_RAH_AV);
+ }
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+
+ /* Restore any VF macvlans */
+ ixgbe_restore_vf_macvlans(adapter);
+}
+
+static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
+ u32 vf)
+{
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int new_mtu = msgbuf[1];
+ u32 max_frs;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Only X540 supports jumbo frames in IOV mode */
+ if (adapter->hw.mac.type != ixgbe_mac_X540)
+ return;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
+ e_err(drv, "VF mtu %d out of range\n", new_mtu);
+ return;
+ }
+
+ max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+ IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+ if (max_frs < new_mtu) {
+ max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+ }
+
+ e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+}
+
+static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_BAM);
+ if (aupe)
+ vmolr |= IXGBE_VMOLR_AUPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_AUPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (vid)
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
+ (vid | IXGBE_VMVIR_VLANA_DEFAULT));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
+
+static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ /* reset offloads to defaults */
+ if (adapter->vfinfo[vf].pf_vlan) {
+ ixgbe_set_vf_vlan(adapter, true,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter,
+ (adapter->vfinfo[vf].pf_vlan |
+ (adapter->vfinfo[vf].pf_qos <<
+ VLAN_PRIO_SHIFT)), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ } else {
+ ixgbe_set_vmvir(adapter, 0, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ }
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ hw->mac.ops.clear_rar(hw, rar_entry);
+}
+
+static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
+ int vf, int index, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *pos;
+ struct vf_macvlans *entry;
+
+ if (index <= 1) {
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ }
+ }
+ }
+
+ /*
+ * If index was zero then we were asked to clear the uc list
+ * for the VF. We're done.
+ */
+ if (!index)
+ return 0;
+
+ entry = NULL;
+
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free)
+ break;
+ }
+
+ /*
+ * If we traversed the entire list and didn't find a free entry
+ * then we're out of space on the RAR table. Also entry may
+ * be NULL because the original memory allocation for the list
+ * failed, which is not fatal but does mean we can't support
+ * VF requests for MACVLAN because we couldn't allocate
+ * memory for the list management required.
+ */
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+
+ hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+
+ return 0;
+}
+
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
+{
+ int i;
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].vfdev->dev_flags &
+ PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+ return false;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ unsigned int vfn = (event_mask & 0x3f);
+ struct pci_dev *pvfdev;
+ unsigned int device_id;
+ u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
+ (pdev->devfn & 1);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
+ vfn, vf_mac_addr);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ device_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ device_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ device_id = 0;
+ break;
+ }
+
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == thisvf_devfn)
+ break;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ device_id, pvfdev);
+ }
+ if (pvfdev)
+ adapter->vfinfo[vfn].vfdev = pvfdev;
+ else
+ e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
+ thisvf_devfn);
+ }
+
+ return 0;
+}
+
+static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ /* Enable counting of spoofed packets in the SSVPC register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
+ reg |= (1 << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid, index;
+ u8 *new_mac;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ pr_err("Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ new_mac = (u8 *)(&msgbuf[1]);
+ e_info(probe, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
+ else
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac) {
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
+ new_mac, ETH_ALEN)) {
+ e_warn(drv, "VF %d attempted to override "
+ "administratively set MAC address\nReload "
+ "the VF driver to resume operations\n", vf);
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ ixgbe_set_vf_lpe(adapter, msgbuf);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ if (adapter->vfinfo[vf].pf_vlan) {
+ e_warn(drv, "VF %d attempted to override "
+ "administratively set VLAN configuration\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ retval = -1;
+ } else {
+ if (add)
+ adapter->vfinfo[vf].vlan_count++;
+ else if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ }
+ break;
+ case IXGBE_VF_SET_MACVLAN:
+ index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ /*
+ * If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives. An index
+ * greater than 0 will indicate the VF is setting a
+ * macvlan MAC filter.
+ */
+ if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ retval = ixgbe_set_vf_macvlan(adapter, vf, index,
+ (unsigned char *)(&msgbuf[1]));
+ break;
+ default:
+ e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* disable transmit and receive for all vfs */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++) {
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[i].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+}
+
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+ return -EINVAL;
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return ixgbe_set_vf_mac(adapter, vf, mac);
+}
+
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ adapter->vfinfo[vf].vlan_count++;
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+static int ixgbe_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case IXGBE_LINK_SPEED_100_FULL:
+ return 100;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ return 1000;
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ return 10000;
+ default:
+ return 0;
+ }
+}
+
+static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
+ * and 0x004 otherwise.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
+ break;
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
+ break;
+ default:
+ break;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+}
+
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF Tx rate limit was not set */
+ if (adapter->vf_rate_link_speed == 0)
+ return;
+
+ actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit rate "
+ "is disabled\n");
+ }
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (reset_rate)
+ adapter->vfinfo[i].tx_rate = 0;
+
+ ixgbe_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vfinfo[i].tx_rate,
+ actual_link_speed);
+ }
+}
+
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
+ (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
+ ((tx_rate != 0) && (tx_rate <= 10)))
+ /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
+ ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
+}
+
+int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 regval;
+
+ adapter->vfinfo[vf].spoofchk_enabled = setting;
+
+ regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ regval &= ~(1 << vf_target_shift);
+ regval |= (setting << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+
+ if (adapter->vfinfo[vf].vlan_count) {
+ vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
+ regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ regval &= ~(1 << vf_target_shift);
+ regval |= (setting << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+ }
+
+ return 0;
+}
+
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+ ivi->qos = adapter->vfinfo[vf].pf_qos;
+ ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 34175564bb7..5a7e1eb3359 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -38,9 +38,15 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii);
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index e0d970ebab7..6c5cca808bd 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -59,12 +59,17 @@
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
+/* VF Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
+
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_STATUS 0x00008
@@ -400,6 +405,8 @@
#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
/* DCB registers */
+#define MAX_TRAFFIC_CLASS 8
+#define X540_TRAFFIC_CLASS 4
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@@ -766,6 +773,7 @@
#define IXGBE_GCR_CAP_VER2 0x00040000
#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
@@ -982,6 +990,7 @@
#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
/* FACTPS */
#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
@@ -1273,6 +1282,7 @@ enum {
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
@@ -1307,6 +1317,7 @@ enum {
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@@ -1748,6 +1759,10 @@ enum {
#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
+
/* PCI Bus Info */
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
@@ -1817,6 +1832,7 @@ enum {
#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */
#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
@@ -1834,6 +1850,7 @@ enum {
#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */
#define IXGBE_MFLCN_RPFCE_SHIFT 4
@@ -2311,13 +2328,60 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-/* Flow Control Macros */
-#define PAUSE_RTT 8
-#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D 672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D 12800
+#define IXGBE_MAC_D 4096
+#define IXGBE_XAUI_D (2 * 1024)
+
+#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY 10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_FILL_RATE (36 / 25)
+
+#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
+ (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
+ (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
+ IXGBE_HD + IXGBE_B2BT(TC)))
-#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
- PAUSE_MTU(MTU))
-#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
+ (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
+#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2536,7 +2600,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
- u32 high_water; /* Flow Control High-water */
+ u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
u32 low_water; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
@@ -2618,6 +2682,8 @@ struct ixgbe_hw_stats {
u64 fcoeptc;
u64 fcoedwrc;
u64 fcoedwtc;
+ u64 fcoe_noddp;
+ u64 fcoe_noddp_ext_buff;
u64 b2ospc;
u64 b2ogprc;
u64 o2bgptc;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2696c78e9f4..e5101e91b6b 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -93,79 +93,47 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
**/
static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
- ixgbe_link_speed link_speed;
- s32 status = 0;
- u32 ctrl;
- u32 ctrl_ext;
- u32 reset_bit;
- u32 i;
- u32 autoc;
- u32 autoc2;
- bool link_up = false;
+ s32 status;
+ u32 ctrl, i;
/* Call adapter stop to disable tx/rx and clear interrupts */
- hw->mac.ops.stop_adapter(hw);
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests before reset
- */
- ixgbe_disable_pcie_master(hw);
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
mac_reset_top:
- /*
- * Issue global reset to the MAC. Needs to be SW reset if link is up.
- * If link reset is used when link is up, it might reset the PHY when
- * mng is using it. If link is down or the flag to force full link
- * reset is set, then perform link reset.
- */
- if (hw->force_full_reset) {
- reset_bit = IXGBE_CTRL_LNK_RST;
- } else {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- if (!link_up)
- reset_bit = IXGBE_CTRL_LNK_RST;
- else
- reset_bit = IXGBE_CTRL_RST;
- }
-
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
udelay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & reset_bit))
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
break;
}
- if (ctrl & reset_bit) {
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
+ msleep(100);
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete. We use 1usec since that is
- * what is needed for ixgbe_disable_pcie_master(). The second reset
- * then clears out any effects of those events.
+ * for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- udelay(1);
goto mac_reset_top;
}
- /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
- IXGBE_WRITE_FLUSH(hw);
-
- msleep(50);
-
/* Set the Rx packet buffer size. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
@@ -173,31 +141,6 @@ mac_reset_top:
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
/*
- * Store the original AUTOC/AUTOC2 values if they have not been
- * stored off yet. Otherwise restore the stored original
- * values since the reset operation sets back to defaults.
- */
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = autoc;
- hw->mac.orig_autoc2 = autoc2;
- hw->mac.orig_link_settings_stored = true;
- } else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
-
- if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
- (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
- autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
- autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
- }
- }
-
- /*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
@@ -205,9 +148,6 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
@@ -224,6 +164,7 @@ mac_reset_top:
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
&hw->mac.wwpn_prefix);
+reset_hw_out:
return status;
}
@@ -929,6 +870,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
+ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
struct ixgbe_info ixgbe_X540_info = {
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 1f35d229e71..1f35d229e71 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 78abb6f1a86..78abb6f1a86 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
new file mode 100644
index 00000000000..e29ba4506b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -0,0 +1,692 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+};
+
+#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b), \
+ offsetof(struct ixgbevf_adapter, r)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
+ stats.saved_reset_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
+ stats.saved_reset_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
+ stats.saved_reset_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
+ stats.saved_reset_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
+ stats.saved_reset_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
+ zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
+ zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
+ zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ethtool_cmd_speed_set(
+ ecmd,
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000);
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err = 0;
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * If the adapter isn't up and running then just set the
+ * new parameters and scurry for the exits.
+ */
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+
+ ixgbevf_down(adapter);
+
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ goto err_tx_ring_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ goto err_rx_ring_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+
+ /*
+ * Only switch to new rings if all the prior allocations
+ * and ring setups have succeeded.
+ */
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ adapter->tx_ring_count = new_tx_count;
+
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ adapter->rx_ring_count = new_rx_count;
+
+ /* success! */
+ ixgbevf_up(adapter);
+
+ goto clear_reset;
+
+err_rx_ring_setup:
+ for(i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+
+err_tx_ring_setup:
+ kfree(rx_ring);
+
+err_rx_setup:
+ kfree(tx_ring);
+
+clear_reset:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = (char *)adapter +
+ ixgbe_gstrings_stats[i].saved_reset_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static const struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+static const u32 register_test_patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((register_test_patterns[pat] & W), \
+ (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (register_test_patterns[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (register_test_patterns[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ const struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8857df4dd3b..e6c9d1a927a 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/u64_stats_sync.h>
#include "vf.h"
@@ -71,12 +72,13 @@ struct ixgbevf_ring {
struct ixgbevf_rx_buffer *rx_buffer_info;
};
+ u64 total_bytes;
+ u64 total_packets;
+ struct u64_stats_sync syncp;
+
u16 head;
u16 tail;
- unsigned int total_bytes;
- unsigned int total_packets;
-
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3b880a27f8d..5e92cc2079b 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -44,6 +44,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
@@ -202,6 +203,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
(count < tx_ring->work_limit)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
+ /* eop could change between read and DD-check */
+ if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
+ goto cont_loop;
for ( ; !cleaned; count++) {
struct sk_buff *skb;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
@@ -231,6 +235,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
i = 0;
}
+cont_loop:
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
}
@@ -265,11 +270,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
}
+ u64_stats_update_begin(&tx_ring->syncp);
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
-
- netdev->stats.tx_bytes += total_bytes;
- netdev->stats.tx_packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit;
}
@@ -289,12 +293,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (is_vlan) {
- u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-
+ if (is_vlan && test_bit(tag, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- }
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
@@ -592,10 +594,10 @@ next_desc:
if (cleaned_count)
ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ u64_stats_update_begin(&rx_ring->syncp);
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
- adapter->netdev->stats.rx_bytes += total_rx_bytes;
- adapter->netdev->stats.rx_packets += total_rx_packets;
+ u64_stats_update_end(&rx_ring->syncp);
return cleaned;
}
@@ -2255,10 +2257,6 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfgotc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
adapter->stats.vfmprc);
-
- /* Fill out the OS statistics structure */
- adapter->netdev->stats.multicast = adapter->stats.vfmprc -
- adapter->stats.base_vfmprc;
}
/**
@@ -2914,22 +2912,20 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)frag->size, total);
- offset = frag->page_offset;
+ len = min((unsigned int)skb_frag_size(frag), total);
+ offset = 0;
while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
- frag->page,
- offset,
- size,
- DMA_TO_DEVICE);
+ tx_buffer_info->dma =
+ skb_frag_dma_map(&adapter->pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
@@ -3100,7 +3096,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += TXD_USE_COUNT(skb_headlen(skb));
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
@@ -3215,18 +3211,69 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ unsigned int start;
+ u64 bytes, packets;
+ const struct ixgbevf_ring *ring;
+ int i;
+
+ ixgbevf_update_stats(adapter);
+
+ stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ring = &adapter->rx_ring[i];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ bytes = ring->total_bytes;
+ packets = ring->total_packets;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_bytes += bytes;
+ stats->rx_packets += packets;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = &adapter->tx_ring[i];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ bytes = ring->total_bytes;
+ packets = ring->total_packets;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ }
+
+ return stats;
+}
+
+static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (features & NETIF_F_RXCSUM)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ return 0;
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
.ndo_start_xmit = ixgbevf_xmit_frame,
.ndo_set_rx_mode = ixgbevf_set_rx_mode,
- .ndo_set_multicast_list = ixgbevf_set_rx_mode,
+ .ndo_get_stats64 = ixgbevf_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbevf_set_mac,
.ndo_change_mtu = ixgbevf_change_mtu,
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+ .ndo_set_features = ixgbevf_set_features,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3339,16 +3386,18 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
- netdev->features = NETIF_F_SG |
+ netdev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+
+ netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- netdev->features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_IP_CSUM;
@@ -3358,6 +3407,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
/* The HW MAC address was set and/or determined in sw_init */
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
new file mode 100644
index 00000000000..930fa83f256
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index ea393eb03f3..ea393eb03f3 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 189200eeca2..189200eeca2 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index aa3682e8c47..aa3682e8c47 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 10306b492ee..10306b492ee 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
diff --git a/drivers/net/jme.c b/drivers/net/ethernet/jme.c
index 3ac262f5563..7becff1f387 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1920,7 +1920,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u32 len;
for (i = 0 ; i < nr_frags ; ++i) {
@@ -1928,8 +1928,9 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
- frag->page_offset, frag->size, hidma);
+ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ skb_frag_page(frag),
+ frag->page_offset, skb_frag_size(frag), hidma);
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
@@ -2839,7 +2840,7 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_do_ioctl = jme_ioctl,
.ndo_start_xmit = jme_start_xmit,
.ndo_set_mac_address = jme_set_macaddr,
- .ndo_set_multicast_list = jme_set_multi,
+ .ndo_set_rx_mode = jme_set_multi,
.ndo_change_mtu = jme_change_mtu,
.ndo_tx_timeout = jme_tx_timeout,
.ndo_fix_features = jme_fix_features,
@@ -3131,6 +3132,9 @@ jme_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
+ if (!netif_running(netdev))
+ return 0;
+
atomic_dec(&jme->link_changing);
netif_device_detach(netdev);
@@ -3171,6 +3175,9 @@ jme_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
+ if (!netif_running(netdev))
+ return 0;
+
jme_clear_pm(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
diff --git a/drivers/net/jme.h b/drivers/net/ethernet/jme.h
index c1f8b893e2e..02ea27c1dcb 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -102,7 +102,6 @@ enum jme_spi_op_bits {
};
#define HALF_US 500 /* 500 ns */
-#define JMESPIIOCTL SIOCDEVPRIVATE
#define PCI_PRIV_PE1 0xE4
diff --git a/drivers/net/korina.c b/drivers/net/ethernet/korina.c
index 763844c587f..d8430f487b8 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -58,7 +58,6 @@
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
-#include <asm/segment.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -1089,7 +1088,7 @@ static const struct net_device_ops korina_netdev_ops = {
.ndo_open = korina_open,
.ndo_stop = korina_close,
.ndo_start_xmit = korina_send_packet,
- .ndo_set_multicast_list = korina_multicast_list,
+ .ndo_set_rx_mode = korina_multicast_list,
.ndo_tx_timeout = korina_tx_timeout,
.ndo_do_ioctl = korina_ioctl,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 45f252b7da3..6bb2b9506ca 100644
--- a/drivers/net/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -687,7 +687,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_do_ioctl = ltq_etop_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = ltq_etop_set_multicast_list,
+ .ndo_set_rx_mode = ltq_etop_set_multicast_list,
.ndo_select_queue = ltq_etop_select_queue,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
new file mode 100644
index 00000000000..0029934748b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -0,0 +1,111 @@
+#
+# Marvell device configuration
+#
+
+config NET_VENDOR_MARVELL
+ bool "Marvell devices"
+ default y
+ depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Marvell devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_MARVELL
+
+config MV643XX_ETH
+ tristate "Marvell Discovery (643XX) and Orion ethernet support"
+ depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
+ select INET_LRO
+ select PHYLIB
+ ---help---
+ This driver supports the gigabit ethernet MACs in the
+ Marvell Discovery PPC/MIPS chipset family (MV643XX) and
+ in the Marvell Orion ARM SoC family.
+
+ Some boards that use the Discovery chipset are the Momenco
+ Ocelot C and Jaguar ATX and Pegasos II.
+
+config PXA168_ETH
+ tristate "Marvell pxa168 ethernet support"
+ depends on CPU_PXA168
+ select PHYLIB
+ ---help---
+ This driver supports the pxa168 Ethernet ports.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pxa168_eth.
+
+config SKGE
+ tristate "Marvell Yukon Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
+ and related Gigabit Ethernet adapters. It is a new smaller driver
+ with better performance and more complete ethtool support.
+
+ It does not support the link failover and network management
+ features that "portable" vendor supplied sk98lin driver does.
+
+ This driver supports adapters based on the original Yukon chipset:
+ Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
+ Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
+
+ It does not support the newer Yukon2 chipset: a separate driver,
+ sky2, is provided for these adapters.
+
+ To compile this driver as a module, choose M here: the module
+ will be called skge. This is recommended.
+
+config SKGE_DEBUG
+ bool "Debugging interface"
+ depends on SKGE && DEBUG_FS
+ ---help---
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
+ transmit and receive rings.
+
+ If unsure, say N.
+
+config SKGE_GENESIS
+ bool "Support for older SysKonnect Genesis boards"
+ depends on SKGE
+ ---help---
+ This enables support for the older and uncommon SysKonnect Genesis
+ chips, which support MII via an external transceiver, instead of
+ an internal one. Disabling this option will save some memory
+ by making code smaller. If unsure say Y.
+
+config SKY2
+ tristate "Marvell Yukon 2 support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver supports Gigabit Ethernet adapters based on the
+ Marvell Yukon 2 chipset:
+ Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
+ 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
+
+ There is companion driver for the older Marvell Yukon and
+ SysKonnect Genesis based adapters: skge.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sky2. This is recommended.
+
+config SKY2_DEBUG
+ bool "Debugging interface"
+ depends on SKY2 && DEBUG_FS
+ ---help---
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+ transmit and receive rings.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
new file mode 100644
index 00000000000..57e3234a37b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Marvell device drivers.
+#
+
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
+obj-$(CONFIG_SKGE) += skge.o
+obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 259699983ca..194a0311380 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -713,8 +713,9 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
int frag;
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- if (fragp->size <= 8 && fragp->page_offset & 7)
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
return 1;
}
@@ -751,11 +752,11 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
}
desc->l4i_chk = 0;
- desc->byte_cnt = this_frag->size;
- desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
- this_frag->page,
- this_frag->page_offset,
- this_frag->size, DMA_TO_DEVICE);
+ desc->byte_cnt = skb_frag_size(this_frag);
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0,
+ skb_frag_size(this_frag),
+ DMA_TO_DEVICE);
}
}
@@ -1547,13 +1548,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
er->rx_max_pending = 4096;
er->tx_max_pending = 4096;
- er->rx_mini_max_pending = 0;
- er->rx_jumbo_max_pending = 0;
er->rx_pending = mp->rx_ring_size;
er->tx_pending = mp->tx_ring_size;
- er->rx_mini_pending = 0;
- er->rx_jumbo_pending = 0;
}
static int
@@ -2923,6 +2920,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
SET_NETDEV_DEV(dev, &pdev->dev);
if (mp->shared->win_protect)
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d17d0624c5e..d17d0624c5e 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
diff --git a/drivers/net/skge.c b/drivers/net/ethernet/marvell/skge.c
index 98ec614c569..c7b60839ac9 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -113,6 +113,7 @@ static void yukon_init(struct skge_hw *hw, int port);
static void genesis_mac_init(struct skge_hw *hw, int port);
static void genesis_link_up(struct skge_port *skge);
static void skge_set_multicast(struct net_device *dev);
+static irqreturn_t skge_intr(int irq, void *dev_id);
/* Avoid conditionals by using array */
static const int txqaddr[] = { Q_XA1, Q_XA2 };
@@ -496,13 +497,9 @@ static void skge_get_ring_param(struct net_device *dev,
p->rx_max_pending = MAX_RX_RING_SIZE;
p->tx_max_pending = MAX_TX_RING_SIZE;
- p->rx_mini_max_pending = 0;
- p->rx_jumbo_max_pending = 0;
p->rx_pending = skge->rx_ring.count;
p->tx_pending = skge->tx_ring.count;
- p->rx_mini_pending = 0;
- p->rx_jumbo_pending = 0;
}
static int skge_set_ring_param(struct net_device *dev,
@@ -2568,6 +2565,16 @@ static int skge_up(struct net_device *dev)
if (err)
goto free_rx_ring;
+ if (hw->ports == 1) {
+ err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED,
+ dev->name, hw);
+ if (err) {
+ netdev_err(dev, "Unable to allocate interrupt %d error: %d\n",
+ hw->pdev->irq, err);
+ goto free_tx_ring;
+ }
+ }
+
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
if (is_genesis(hw))
@@ -2595,11 +2602,14 @@ static int skge_up(struct net_device *dev)
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= portmask[port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
napi_enable(&skge->napi);
return 0;
+ free_tx_ring:
+ kfree(skge->tx_ring.start);
free_rx_ring:
skge_rx_clean(skge);
kfree(skge->rx_ring.start);
@@ -2640,9 +2650,13 @@ static int skge_down(struct net_device *dev)
spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~portmask[port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
+ if (hw->ports == 1)
+ free_irq(hw->pdev->irq, hw);
+
skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
if (is_genesis(hw))
genesis_stop(skge);
@@ -2756,10 +2770,10 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
control |= BMU_STFWD;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- map = pci_map_page(hw->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
e = e->next;
e->skb = skb;
@@ -2769,9 +2783,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
dma_unmap_addr_set(e, mapaddr, map);
- dma_unmap_len_set(e, maplen, frag->size);
+ dma_unmap_len_set(e, maplen, skb_frag_size(frag));
- tf->control = BMU_OWN | BMU_SW | control | frag->size;
+ tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
}
tf->control |= BMU_EOF | BMU_IRQ_EOF;
}
@@ -3603,7 +3617,8 @@ static int skge_reset(struct skge_hw *hw)
skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
skge_write32(hw, B2_IRQM_CTRL, TIM_START);
- skge_write32(hw, B0_IMSK, hw->intr_mask);
+ /* Leave irq disabled until first port is brought up. */
+ skge_write32(hw, B0_IMSK, 0);
for (i = 0; i < hw->ports; i++) {
if (is_genesis(hw))
@@ -3762,7 +3777,7 @@ static const struct net_device_ops skge_netdev_ops = {
.ndo_tx_timeout = skge_tx_timeout,
.ndo_change_mtu = skge_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = skge_set_multicast,
+ .ndo_set_rx_mode = skge_set_multicast,
.ndo_set_mac_address = skge_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = skge_netpoll,
@@ -3930,31 +3945,39 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_netdev;
}
- err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
- if (err) {
- dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
- dev->name, pdev->irq);
- goto err_out_unregister;
- }
skge_show_addr(dev);
if (hw->ports > 1) {
dev1 = skge_devinit(hw, 1, using_dac);
- if (dev1 && register_netdev(dev1) == 0)
- skge_show_addr(dev1);
- else {
- /* Failure to register second port need not be fatal */
- dev_warn(&pdev->dev, "register of second port failed\n");
- hw->dev[1] = NULL;
- hw->ports = 1;
- if (dev1)
- free_netdev(dev1);
+ if (!dev1) {
+ err = -ENOMEM;
+ goto err_out_unregister;
}
+
+ err = register_netdev(dev1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register second net device\n");
+ goto err_out_free_dev1;
+ }
+
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED,
+ hw->irq_name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign irq %d\n",
+ pdev->irq);
+ goto err_out_unregister_dev1;
+ }
+
+ skge_show_addr(dev1);
}
pci_set_drvdata(pdev, hw);
return 0;
+err_out_unregister_dev1:
+ unregister_netdev(dev1);
+err_out_free_dev1:
+ free_netdev(dev1);
err_out_unregister:
unregister_netdev(dev);
err_out_free_netdev:
@@ -3992,14 +4015,19 @@ static void __devexit skge_remove(struct pci_dev *pdev)
spin_lock_irq(&hw->hw_lock);
hw->intr_mask = 0;
- skge_write32(hw, B0_IMSK, 0);
- skge_read32(hw, B0_IMSK);
+
+ if (hw->ports > 1) {
+ skge_write32(hw, B0_IMSK, 0);
+ skge_read32(hw, B0_IMSK);
+ free_irq(pdev->irq, hw);
+ }
spin_unlock_irq(&hw->hw_lock);
skge_write16(hw, B0_LED, LED_STAT_OFF);
skge_write8(hw, B0_CTST, CS_RST_SET);
- free_irq(pdev->irq, hw);
+ if (hw->ports > 1)
+ free_irq(pdev->irq, hw);
pci_release_regions(pdev);
pci_disable_device(pdev);
if (dev1)
diff --git a/drivers/net/skge.h b/drivers/net/ethernet/marvell/skge.h
index a2eb3411584..a2eb3411584 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
diff --git a/drivers/net/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 57339da7632..cbd026f3bc5 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -148,6 +148,7 @@ static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
static void sky2_set_multicast(struct net_device *dev);
+static irqreturn_t sky2_intr(int irq, void *dev_id);
/* Access to PHY via serial interconnect */
static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
@@ -1224,14 +1225,13 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- re->frag_addr[i] = pci_map_page(pdev, frag->page,
- frag->page_offset,
- frag->size,
- PCI_DMA_FROMDEVICE);
+ re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
+ if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
goto map_page_error;
}
return 0;
@@ -1239,7 +1239,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
map_page_error:
while (--i >= 0) {
pci_unmap_page(pdev, re->frag_addr[i],
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_FROMDEVICE);
}
@@ -1263,7 +1263,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
pci_unmap_page(pdev, re->frag_addr[i],
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_FROMDEVICE);
}
@@ -1716,6 +1716,27 @@ static void sky2_hw_up(struct sky2_port *sky2)
sky2_rx_start(sky2);
}
+/* Setup device IRQ and enable napi to process */
+static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ err = request_irq(pdev->irq, sky2_intr,
+ (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
+ name, hw);
+ if (err)
+ dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+ else {
+ napi_enable(&hw->napi);
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+ sky2_read32(hw, B0_IMSK);
+ }
+
+ return err;
+}
+
+
/* Bring up network interface. */
static int sky2_up(struct net_device *dev)
{
@@ -1731,6 +1752,10 @@ static int sky2_up(struct net_device *dev)
if (err)
goto err_out;
+ /* With single port, IRQ is setup when device is brought up */
+ if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
+ goto err_out;
+
sky2_hw_up(sky2);
/* Enable interrupts from phy/mac for port */
@@ -1910,10 +1935,10 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
- if (pci_dma_mapping_error(hw->pdev, mapping))
+ if (dma_mapping_error(&hw->pdev->dev, mapping))
goto mapping_unwind;
upper = upper_32_bits(mapping);
@@ -1927,11 +1952,11 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
dma_unmap_addr_set(re, mapaddr, mapping);
- dma_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_len_set(re, maplen, skb_frag_size(frag));
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
- le->length = cpu_to_le16(frag->size);
+ le->length = cpu_to_le16(skb_frag_size(frag));
le->ctrl = ctrl;
le->opcode = OP_BUFFER | HW_OWNER;
}
@@ -2057,7 +2082,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
- /* Force any delayed status interrrupt and NAPI */
+ /* Force any delayed status interrupt and NAPI */
sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
@@ -2092,8 +2117,13 @@ static int sky2_down(struct net_device *dev)
sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
sky2_read32(hw, B0_IMSK);
- synchronize_irq(hw->pdev->irq);
- napi_synchronize(&hw->napi);
+ if (hw->ports == 1) {
+ napi_disable(&hw->napi);
+ free_irq(hw->pdev->irq, hw);
+ } else {
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+ }
sky2_hw_down(sky2);
@@ -2449,14 +2479,14 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) {
/* don't need this page */
- __free_page(frag->page);
+ __skb_frag_unref(frag);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
- frag->size = size;
+ skb_frag_size_set(frag, size);
skb->data_len += size;
- skb->truesize += size;
+ skb->truesize += PAGE_SIZE;
skb->len += size;
length -= size;
}
@@ -4058,13 +4088,9 @@ static void sky2_get_ringparam(struct net_device *dev,
struct sky2_port *sky2 = netdev_priv(dev);
ering->rx_max_pending = RX_MAX_PENDING;
- ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = 0;
ering->tx_max_pending = TX_MAX_PENDING;
ering->rx_pending = sky2->rx_pending;
- ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = 0;
ering->tx_pending = sky2->tx_pending;
}
@@ -4612,7 +4638,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_do_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = sky2_set_mac_address,
- .ndo_set_multicast_list = sky2_set_multicast,
+ .ndo_set_rx_mode = sky2_set_multicast,
.ndo_change_mtu = sky2_change_mtu,
.ndo_fix_features = sky2_fix_features,
.ndo_set_features = sky2_set_features,
@@ -4629,7 +4655,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_do_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = sky2_set_mac_address,
- .ndo_set_multicast_list = sky2_set_multicast,
+ .ndo_set_rx_mode = sky2_set_multicast,
.ndo_change_mtu = sky2_change_mtu,
.ndo_fix_features = sky2_fix_features,
.ndo_set_features = sky2_set_features,
@@ -4799,7 +4825,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
static int __devinit sky2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct net_device *dev;
+ struct net_device *dev, *dev1;
struct sky2_hw *hw;
int err, using_dac = 0, wol_default;
u32 reg;
@@ -4925,33 +4951,26 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
- err = request_irq(pdev->irq, sky2_intr,
- (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
- hw->irq_name, hw);
- if (err) {
- dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
- goto err_out_unregister;
- }
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
-
sky2_show_addr(dev);
if (hw->ports > 1) {
- struct net_device *dev1;
-
- err = -ENOMEM;
dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
- if (dev1 && (err = register_netdev(dev1)) == 0)
- sky2_show_addr(dev1);
- else {
- dev_warn(&pdev->dev,
- "register of second port failed (%d)\n", err);
- hw->dev[1] = NULL;
- hw->ports = 1;
- if (dev1)
- free_netdev(dev1);
+ if (!dev1) {
+ err = -ENOMEM;
+ goto err_out_unregister;
}
+
+ err = register_netdev(dev1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register second net device\n");
+ goto err_out_free_dev1;
+ }
+
+ err = sky2_setup_irq(hw, hw->irq_name);
+ if (err)
+ goto err_out_unregister_dev1;
+
+ sky2_show_addr(dev1);
}
setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
@@ -4962,6 +4981,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
return 0;
+err_out_unregister_dev1:
+ unregister_netdev(dev1);
+err_out_free_dev1:
+ free_netdev(dev1);
err_out_unregister:
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
@@ -5001,13 +5024,18 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
unregister_netdev(hw->dev[i]);
sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
sky2_power_aux(hw);
sky2_write8(hw, B0_CTST, CS_RST_SET);
sky2_read8(hw, B0_CTST);
- free_irq(pdev->irq, hw);
+ if (hw->ports > 1) {
+ napi_disable(&hw->napi);
+ free_irq(pdev->irq, hw);
+ }
+
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
diff --git a/drivers/net/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 0af31b8b5f1..0af31b8b5f1 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
new file mode 100644
index 00000000000..d8099a7903d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -0,0 +1,23 @@
+#
+# Mellanox driver configuration
+#
+
+config NET_VENDOR_MELLANOX
+ bool "Mellanox devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Mellanox cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MELLANOX
+
+source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
+
+endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
new file mode 100644
index 00000000000..37afb968337
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Mellanox device drivers.
+#
+
+obj-$(CONFIG_MLX4_CORE) += mlx4/
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
new file mode 100644
index 00000000000..1bb93531f1b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -0,0 +1,27 @@
+#
+# Mellanox driver configuration
+#
+
+config MLX4_EN
+ tristate "Mellanox Technologies 10Gbit Ethernet support"
+ depends on PCI && INET
+ select MLX4_CORE
+ select INET_LRO
+ ---help---
+ This driver supports Mellanox Technologies ConnectX Ethernet
+ devices.
+
+config MLX4_CORE
+ tristate
+ depends on PCI
+ default n
+
+config MLX4_DEBUG
+ bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
+ depends on MLX4_CORE
+ default y
+ ---help---
+ This option causes debugging code to be compiled into the
+ mlx4_core driver. The output can be turned on via the
+ debug_level module parameter (which can also be set after
+ the driver is loaded through sysfs).
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a1585..d1aa45a1585 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 116cae334da..116cae334da 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 32f947154c3..32f947154c3 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 23cee7b6af9..23cee7b6af9 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index bd8ef9f2fa7..bd8ef9f2fa7 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index ec4b6d047fe..227997d775e 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -74,7 +74,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
return err;
}
-int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ int cq_idx)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
@@ -90,13 +91,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (cq->is_tx == RX) {
if (mdev->dev->caps.comp_pool) {
if (!cq->vector) {
- sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
+ sprintf(name, "%s-%d", priv->dev->name,
+ cq->ring);
+ /* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
- cq->vector = (cq->ring + 1 + priv->port) %
- mdev->dev->caps.num_comp_vectors;
+ cq->vector = (cq->ring + 1 + priv->port)
+ % mdev->dev->caps.num_comp_vectors;
mlx4_warn(mdev, "Failed Assigning an EQ to "
- "%s_rx-%d ,Falling back to legacy EQ's\n",
- priv->dev->name, cq->ring);
+ "%s ,Falling back to legacy EQ's\n",
+ name);
}
}
} else {
@@ -104,10 +107,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
mdev->dev->caps.num_comp_vectors;
}
} else {
- if (!cq->vector || !mdev->dev->caps.comp_pool) {
- /*Fallback to legacy pool in case of error*/
- cq->vector = 0;
- }
+ /* For TX we use the same irq per
+ ring we assigned for the RX */
+ struct mlx4_en_cq *rx_cq;
+
+ cq_idx = cq_idx % priv->rx_ring_num;
+ rx_cq = &priv->rx_cq[cq_idx];
+ cq->vector = rx_cq->vector;
}
if (!cq->is_tx)
@@ -133,14 +139,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
return 0;
}
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
- bool reserve_vectors)
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
+ if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
cq->buf_size = 0;
cq->buf = NULL;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index eb096253d78..74e2a2a8a02 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -342,13 +342,13 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
- priv->last_moder_time = MLX4_EN_AUTO_CONF;
if (priv->adaptive_rx_coal)
return 0;
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_cq[i].moder_cnt = priv->rx_frames;
priv->rx_cq[i].moder_time = priv->rx_usecs;
+ priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
if (err)
return err;
@@ -394,6 +394,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
+ int i;
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
@@ -416,7 +417,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
mlx4_en_stop_port(dev);
}
- mlx4_en_free_resources(priv, true);
+ mlx4_en_free_resources(priv);
priv->prof->tx_ring_size = tx_size;
priv->prof->rx_ring_size = rx_size;
@@ -432,6 +433,15 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
en_err(priv, "Failed starting port\n");
}
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_cq[i].moder_cnt = priv->rx_frames;
+ priv->rx_cq[i].moder_time = priv->rx_usecs;
+ priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ goto out;
+ }
+
out:
mutex_unlock(&mdev->state_lock);
return err;
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 6bfea233a9f..a06096fcc0b 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -176,6 +176,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
mlx4_mr_free(dev, &mdev->mr);
+ iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
kfree(mdev);
@@ -223,7 +224,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
0, 0, &mdev->mr)) {
mlx4_err(mdev, "Failed allocating memory region\n");
- goto err_uar;
+ goto err_map;
}
if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
mlx4_err(mdev, "Failed enabling memory region\n");
@@ -282,6 +283,9 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
err_mr:
mlx4_mr_free(dev, &mdev->mr);
+err_map:
+ if (!mdev->uar_map)
+ iounmap(mdev->uar_map);
err_uar:
mlx4_uar_free(dev, &mdev->priv_uar);
err_pd:
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4b0f32e568f..78d776bc355 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -415,6 +415,9 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
cq = &priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
+ priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
+ priv->last_moder_packets[i] = 0;
+ priv->last_moder_bytes[i] = 0;
}
for (i = 0; i < priv->tx_ring_num; i++) {
@@ -430,11 +433,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
priv->adaptive_rx_coal = 1;
- priv->last_moder_time = MLX4_EN_AUTO_CONF;
priv->last_moder_jiffies = 0;
- priv->last_moder_packets = 0;
priv->last_moder_tx_packets = 0;
- priv->last_moder_bytes = 0;
}
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
@@ -446,43 +446,30 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
unsigned long avg_pkt_size;
unsigned long rx_packets;
unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_pkt_diff;
unsigned long rx_pkt_diff;
int moder_time;
- int i, err;
+ int ring, err;
if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
return;
- spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->stats.rx_packets;
- rx_bytes = priv->stats.rx_bytes;
- tx_packets = priv->stats.tx_packets;
- spin_unlock_bh(&priv->stats_lock);
-
- if (!priv->last_moder_jiffies || !period)
- goto out;
-
- tx_pkt_diff = ((unsigned long) (tx_packets -
- priv->last_moder_tx_packets));
- rx_pkt_diff = ((unsigned long) (rx_packets -
- priv->last_moder_packets));
- packets = max(tx_pkt_diff, rx_pkt_diff);
- rate = packets * HZ / period;
- avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
- priv->last_moder_bytes)) / packets : 0;
-
- /* Apply auto-moderation only when packet rate exceeds a rate that
- * it matters */
- if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
- /* If tx and rx packet rates are not balanced, assume that
- * traffic is mainly BW bound and apply maximum moderation.
- * Otherwise, moderate according to packet rate */
- if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
- 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
- moder_time = priv->rx_usecs_high;
- } else {
+ for (ring = 0; ring < priv->rx_ring_num; ring++) {
+ spin_lock_bh(&priv->stats_lock);
+ rx_packets = priv->rx_ring[ring].packets;
+ rx_bytes = priv->rx_ring[ring].bytes;
+ spin_unlock_bh(&priv->stats_lock);
+
+ rx_pkt_diff = ((unsigned long) (rx_packets -
+ priv->last_moder_packets[ring]));
+ packets = rx_pkt_diff;
+ rate = packets * HZ / period;
+ avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
+ priv->last_moder_bytes[ring])) / packets : 0;
+
+ /* Apply auto-moderation only when packet rate
+ * exceeds a rate that it matters */
+ if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
+ avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
if (rate < priv->pkt_rate_low)
moder_time = priv->rx_usecs_low;
else if (rate > priv->pkt_rate_high)
@@ -492,36 +479,23 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
(priv->rx_usecs_high - priv->rx_usecs_low) /
(priv->pkt_rate_high - priv->pkt_rate_low) +
priv->rx_usecs_low;
+ } else {
+ moder_time = priv->rx_usecs_low;
}
- } else {
- moder_time = priv->rx_usecs_low;
- }
-
- en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
- tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
- en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
- "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
- priv->last_moder_time, moder_time, period, packets,
- avg_pkt_size, rate);
-
- if (moder_time != priv->last_moder_time) {
- priv->last_moder_time = moder_time;
- for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ if (moder_time != priv->last_moder_time[ring]) {
+ priv->last_moder_time[ring] = moder_time;
+ cq = &priv->rx_cq[ring];
cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq);
- if (err) {
- en_err(priv, "Failed modifying moderation for cq:%d\n", i);
- break;
- }
+ if (err)
+ en_err(priv, "Failed modifying moderation "
+ "for cq:%d\n", ring);
}
+ priv->last_moder_packets[ring] = rx_packets;
+ priv->last_moder_bytes[ring] = rx_bytes;
}
-out:
- priv->last_moder_packets = rx_packets;
- priv->last_moder_tx_packets = tx_packets;
- priv->last_moder_bytes = rx_bytes;
priv->last_moder_jiffies = jiffies;
}
@@ -587,7 +561,6 @@ int mlx4_en_start_port(struct net_device *dev)
int i;
int j;
u8 mc_list[16] = {0};
- char name[32];
if (priv->port_up) {
en_dbg(DRV, priv, "start port called while port already up\n");
@@ -608,7 +581,7 @@ int mlx4_en_start_port(struct net_device *dev)
for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i];
- err = mlx4_en_activate_cq(priv, cq);
+ err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
goto cq_err;
@@ -642,20 +615,11 @@ int mlx4_en_start_port(struct net_device *dev)
goto mac_err;
}
- if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
- sprintf(name , "%s-tx", priv->dev->name);
- if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
- mlx4_warn(mdev, "Failed Assigning an EQ to "
- "%s_tx ,Falling back to legacy "
- "EQ's\n", priv->dev->name);
- }
- }
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
cq = &priv->tx_cq[i];
- cq->vector = priv->tx_vector;
- err = mlx4_en_activate_cq(priv, cq);
+ err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
goto tx_err;
@@ -886,7 +850,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
+void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
@@ -894,14 +858,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
if (priv->tx_ring[i].tx_info)
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
if (priv->tx_cq[i].buf)
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info)
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
if (priv->rx_cq[i].buf)
- mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
}
@@ -971,7 +935,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mdev->pndev[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
- mlx4_en_free_resources(priv, false);
+ mlx4_en_free_resources(priv);
free_netdev(dev);
}
@@ -1016,7 +980,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
- .ndo_set_multicast_list = mlx4_en_set_multicast,
+ .ndo_set_rx_mode = mlx4_en_set_multicast,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
@@ -1120,7 +1084,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->vlan_features = dev->hw_features;
- dev->hw_features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
@@ -1133,6 +1097,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_err(priv, "Netdev registration failed for port %d\n", port);
goto out;
}
+ priv->registered = 1;
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
@@ -1154,7 +1119,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_err(priv, "Failed Initializing port\n");
goto out;
}
- priv->registered = 1;
mlx4_en_set_default_moderation(priv);
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
return 0;
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5ada5b46911..03c84cd78cd 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -128,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = 0x2;
+ context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
base_qpn);
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
@@ -167,11 +167,21 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
/* This command is always accessed from Ethtool context
* already synchronized, no need in locking */
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
- if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
- MLX4_EN_1G_SPEED)
+ switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
+ case MLX4_EN_1G_SPEED:
state->link_speed = 1000;
- else
+ break;
+ case MLX4_EN_10G_SPEED_XAUI:
+ case MLX4_EN_10G_SPEED_XFI:
state->link_speed = 10000;
+ break;
+ case MLX4_EN_40G_SPEED:
+ state->link_speed = 40000;
+ break;
+ default:
+ state->link_speed = -1;
+ break;
+ }
state->transciver = qport_context->transceiver;
out:
@@ -204,15 +214,21 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_packets = 0;
stats->rx_bytes = 0;
+ priv->port_stats.rx_chksum_good = 0;
+ priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i].packets;
stats->rx_bytes += priv->rx_ring[i].bytes;
+ priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok;
+ priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ priv->port_stats.tx_chksum_offload = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets;
stats->tx_bytes += priv->tx_ring[i].bytes;
+ priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index e3d73e41c56..19eb244f516 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -94,6 +94,14 @@ enum {
MLX4_MCAST_ENABLE = 2,
};
+enum {
+ MLX4_EN_1G_SPEED = 0x02,
+ MLX4_EN_10G_SPEED_XFI = 0x01,
+ MLX4_EN_10G_SPEED_XAUI = 0x00,
+ MLX4_EN_40G_SPEED = 0x40,
+ MLX4_EN_OTHER_SPEED = 0x0f,
+};
+
struct mlx4_en_query_port_context {
u8 link_up;
#define MLX4_EN_LINK_UP_MASK 0x80
@@ -101,8 +109,7 @@ struct mlx4_en_query_port_context {
__be16 mtu;
u8 reserved2;
u8 link_speed;
-#define MLX4_EN_SPEED_MASK 0x3
-#define MLX4_EN_1G_SPEED 0x2
+#define MLX4_EN_SPEED_MASK 0x43
u16 reserved3[5];
__be64 mac;
u8 transceiver;
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec8a9d..0dfb4ec8a9d 100644
--- a/drivers/net/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 37cc9e5c56b..b89c36dbf5b 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -44,7 +44,7 @@
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct skb_frag_struct *skb_frags,
+ struct page_frag *skb_frags,
struct mlx4_en_rx_alloc *ring_alloc,
int i)
{
@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
return -ENOMEM;
skb_frags[i].page = page_alloc->page;
- skb_frags[i].page_offset = page_alloc->offset;
+ skb_frags[i].offset = page_alloc->offset;
page_alloc->page = page;
page_alloc->offset = frag_info->frag_align;
} else {
@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
get_page(page);
skb_frags[i].page = page;
- skb_frags[i].page_offset = page_alloc->offset;
+ skb_frags[i].offset = page_alloc->offset;
page_alloc->offset += frag_info->frag_stride;
}
dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
- skb_frags[i].page_offset, frag_info->frag_size,
+ skb_frags[i].offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
rx_desc->data[i].addr = cpu_to_be64(dma);
return 0;
@@ -135,7 +135,7 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
- skb_frags[i].size = priv->frag_info[i].frag_size;
+ skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
- struct skb_frag_struct *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
+ struct page_frag *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
int i;
for (i = 0; i < priv->num_frags; i++)
@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
int index)
{
struct mlx4_en_dev *mdev = priv->mdev;
- struct skb_frag_struct *skb_frags;
+ struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
dma_addr_t dma;
int nr;
@@ -403,11 +403,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct skb_frag_struct *skb_frags,
- struct skb_frag_struct *skb_frags_rx,
+ struct page_frag *skb_frags,
+ struct sk_buff *skb,
struct mlx4_en_rx_alloc *page_alloc,
int length)
{
+ struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_frag_info *frag_info;
int nr;
@@ -420,9 +421,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
break;
/* Save page reference in skb */
- skb_frags_rx[nr].page = skb_frags[nr].page;
- skb_frags_rx[nr].size = skb_frags[nr].size;
- skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
+ skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
+ skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
+ skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr);
/* Allocate a replacement page */
@@ -430,13 +432,13 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
goto fail;
/* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
+ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]),
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
- skb_frags_rx[nr - 1].size = length -
- priv->frag_info[nr - 1].frag_prefix_size;
+ skb_frag_size_set(&skb_frags_rx[nr - 1],
+ length - priv->frag_info[nr - 1].frag_prefix_size);
return nr;
fail:
@@ -444,7 +446,7 @@ fail:
* the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
- put_page(skb_frags_rx[nr].page);
+ __skb_frag_unref(&skb_frags_rx[nr]);
}
return 0;
}
@@ -452,7 +454,7 @@ fail:
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct skb_frag_struct *skb_frags,
+ struct page_frag *skb_frags,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length)
{
@@ -470,11 +472,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb->dev = priv->dev;
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
- skb->truesize = length + sizeof(struct sk_buff);
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+ va = page_address(skb_frags[0].page) + skb_frags[0].offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
@@ -490,8 +491,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
- skb_shinfo(skb)->frags,
- page_alloc, length);
+ skb, page_alloc, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
@@ -506,7 +506,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
/* Adjust size of first fragment */
- skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
skb->data_len = length - HEADER_COPY_SIZE;
}
return skb;
@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- struct skb_frag_struct *skb_frags;
+ struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -587,7 +587,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
- priv->port_stats.rx_chksum_good++;
+ ring->csum_ok++;
/* This packet is eligible for LRO if it is:
* - DIX Ethernet (type interpretation)
* - TCP/IP (v4)
@@ -600,7 +600,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
nr = mlx4_en_complete_rx_desc(
priv, rx_desc,
- skb_frags, skb_shinfo(gro_skb)->frags,
+ skb_frags, gro_skb,
ring->page_alloc, length);
if (!nr)
goto next;
@@ -608,7 +608,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_shinfo(gro_skb)->nr_frags = nr;
gro_skb->len = length;
gro_skb->data_len = length;
- gro_skb->truesize += length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe->vlan_my_qpn &
@@ -618,6 +617,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
__vlan_hwaccel_put_tag(gro_skb, vid);
}
+ if (dev->features & NETIF_F_RXHASH)
+ gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+
+ skb_record_rx_queue(gro_skb, cq->ring);
napi_gro_frags(&cq->napi);
goto next;
@@ -627,11 +630,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ip_summed = CHECKSUM_UNNECESSARY;
} else {
ip_summed = CHECKSUM_NONE;
- priv->port_stats.rx_chksum_none++;
+ ring->csum_none++;
}
} else {
ip_summed = CHECKSUM_NONE;
- priv->port_stats.rx_chksum_none++;
+ ring->csum_none++;
}
skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
@@ -650,6 +653,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (dev->features & NETIF_F_RXHASH)
+ skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK)
__vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
@@ -806,6 +812,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
qpn, ring->cqn, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
+ /* Cancel FCS removal if FW allows */
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+ context->param3 |= cpu_to_be32(1 << 29);
+
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
if (err) {
mlx4_qp_remove(mdev->dev, qp);
@@ -829,6 +839,9 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
int i, qpn;
int err = 0;
int good_qps = 0;
+ static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
+ 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
+ 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
@@ -866,6 +879,9 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
rss_context->flags = rss_mask;
+ rss_context->hash_fn = 1;
+ for (i = 0; i < 10; i++)
+ rss_context->rss_key[i] = rsskey[i];
if (priv->mdev->profile.udp_rss)
rss_context->base_qpn_udp = rss_context->default_qpn;
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcecd499..9fdbcecd499 100644
--- a/drivers/net/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6e03de034ac..90f2cd24faa 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
- ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+ ring->doorbell_qpn = ring->qp.qpn << 8;
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
@@ -226,7 +226,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data[i].addr),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
/* Stamp the freed descriptor */
@@ -256,7 +256,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
++data;
}
}
@@ -460,26 +460,13 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
}
}
-static void *get_frag_ptr(struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- struct page *page = frag->page;
- void *ptr;
-
- ptr = page_address(page);
- if (unlikely(!ptr))
- return NULL;
-
- return ptr + frag->page_offset;
-}
-
static int is_inline(struct sk_buff *skb, void **pfrag)
{
void *ptr;
if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
if (skb_shinfo(skb)->nr_frags == 1) {
- ptr = get_frag_ptr(skb);
+ ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
if (unlikely(!ptr))
return 0;
@@ -550,7 +537,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
- skb_shinfo(skb)->frags[0].size);
+ skb_frag_size(&skb_shinfo(skb)->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
@@ -570,7 +557,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
skb_headlen(skb) - spc);
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
- fragptr, skb_shinfo(skb)->frags[0].size);
+ fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
}
wmb();
@@ -695,7 +682,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
- priv->port_stats.tx_chksum_offload++;
+ ring->tx_csum++;
}
if (unlikely(priv->validate_loopback)) {
@@ -756,12 +743,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Map fragments */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
- dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
- data->byte_count = cpu_to_be32(frag->size);
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
--data;
}
@@ -791,7 +779,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
- *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
+ *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
@@ -812,7 +800,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
- writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
/* Poll CQ here */
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1ad1f6029af..1ad1f6029af 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7eb8ba822e9..ed452ddfe34 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -101,6 +101,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[25] = "Router support",
[30] = "IBoE support",
[32] = "Unicast loopback support",
+ [34] = "FCS header control",
[38] = "Wake On LAN support",
[40] = "UDP RSS support",
[41] = "Unicast VEP steering support",
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 1e8ecc3708e..1e8ecc3708e 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fdf44c..02393fdf44c 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index b10c07a1dc1..b10c07a1dc1 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 73c94fcdfdd..73c94fcdfdd 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
diff --git a/drivers/net/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ee35df4dd..f0ee35df4dd 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index cd1784593a3..cd1784593a3 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a2fcd8402d3..a2fcd8402d3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ed84811766e..fca66165110 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.1"
-#define DRV_RELDATE "March 2011"
+#define DRV_VERSION "1.5.4.2"
+#define DRV_RELDATE "October 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -249,6 +249,7 @@ struct mlx4_en_tx_ring {
struct mlx4_srq dummy;
unsigned long bytes;
unsigned long packets;
+ unsigned long tx_csum;
spinlock_t comp_lock;
struct mlx4_bf bf;
bool bf_enabled;
@@ -275,6 +276,8 @@ struct mlx4_en_rx_ring {
void *rx_info;
unsigned long bytes;
unsigned long packets;
+ unsigned long csum_ok;
+ unsigned long csum_none;
};
@@ -426,11 +429,11 @@ struct mlx4_en_priv {
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
- unsigned long last_moder_packets;
+ unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
- unsigned long last_moder_bytes;
+ unsigned long last_moder_bytes[MAX_RX_RINGS];
unsigned long last_moder_jiffies;
- int last_moder_time;
+ int last_moder_time[MAX_RX_RINGS];
u16 rx_usecs;
u16 rx_frames;
u16 tx_usecs;
@@ -470,7 +473,6 @@ struct mlx4_en_priv {
u16 log_rx_info;
struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
- int tx_vector;
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
@@ -503,14 +505,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int mlx4_en_start_port(struct net_device *dev);
void mlx4_en_stop_port(struct net_device *dev);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
+void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
- bool reserve_vectors);
-int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ int cq_idx);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 9c188bdd7f4..9c188bdd7f4 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1286b886dce..1286b886dce 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
diff --git a/drivers/net/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 609e0ec14ce..163a314c148 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -65,7 +65,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->entries[i] = 0;
table->refs[i] = 0;
}
- table->max = 1 << dev->caps.log_num_vlans;
+ table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
table->total = 0;
}
@@ -354,6 +354,13 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
int free = -1;
mutex_lock(&table->mutex);
+
+ if (table->total == table->max) {
+ /* No free vlan entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if (free < 0 && (table->refs[i] == 0)) {
free = i;
@@ -375,12 +382,6 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- if (table->total == table->max) {
- /* No free vlan entries */
- err = -ENOSPC;
- goto out;
- }
-
/* Register new MAC */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647d0c7..b967647d0c7 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index ec9350e5f21..ec9350e5f21 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index 11e7c1cb99b..11e7c1cb99b 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7411d..e2337a7411d 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 3b07b80a045..3b07b80a045 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
new file mode 100644
index 00000000000..d10c2e15f4e
--- /dev/null
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -0,0 +1,69 @@
+#
+# Micrel device configuration
+#
+
+config NET_VENDOR_MICREL
+ bool "Micrel devices"
+ default y
+ depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \
+ (ARM && ARCH_KS8695)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Micrel devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MICREL
+
+config ARM_KS8695_ETHER
+ tristate "KS8695 Ethernet support"
+ depends on ARM && ARCH_KS8695
+ select NET_CORE
+ select MII
+ ---help---
+ If you wish to compile a kernel for the KS8695 and want to
+ use the internal ethernet then you should answer Y to this.
+
+config KS8842
+ tristate "Micrel KSZ8841/42 with generic bus interface"
+ depends on HAS_IOMEM && DMA_ENGINE
+ ---help---
+ This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+ ethernet switch chip (managed, VLAN, QoS) from Micrel or
+ Timberdale(FPGA).
+
+config KS8851
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+ select NET_CORE
+ select MII
+ select CRC32
+ ---help---
+ SPI driver for Micrel KS8851 SPI attached network chip.
+
+config KS8851_MLL
+ tristate "Micrel KS8851 MLL"
+ depends on HAS_IOMEM
+ select NET_CORE
+ select MII
+ ---help---
+ This platform driver is for Micrel KS8851 Address/data bus
+ multiplexed network chip.
+
+config KSZ884X_PCI
+ tristate "Micrel KSZ8841/2 PCI"
+ depends on PCI
+ select NET_CORE
+ select MII
+ select CRC32
+ ---help---
+ This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ksz884x.
+
+endif # NET_VENDOR_MICREL
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
new file mode 100644
index 00000000000..c83e4bc50c7
--- /dev/null
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Micrel network device drivers.
+#
+
+obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
+obj-$(CONFIG_KS8842) += ks8842.o
+obj-$(CONFIG_KS8851) += ks8851.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index c827a6097d0..ab81c0dc96e 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -414,7 +414,7 @@ ks8695_tx_irq(int irq, void *dev_id)
* Interrupt Status Register (Offset 0xF208)
* Bit29: WAN MAC Receive Status
* Bit16: LAN MAC Receive Status
- * So, this Rx interrrupt enable/status bit number is equal
+ * So, this Rx interrupt enable/status bit number is equal
* as Rx IRQ number.
*/
static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
@@ -1333,7 +1333,7 @@ static const struct net_device_ops ks8695_netdev_ops = {
.ndo_tx_timeout = ks8695_timeout,
.ndo_set_mac_address = ks8695_set_mac,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = ks8695_set_multicast,
+ .ndo_set_rx_mode = ks8695_set_multicast,
};
/**
diff --git a/drivers/net/arm/ks8695net.h b/drivers/net/ethernet/micrel/ks8695net.h
index 80eff6ea516..80eff6ea516 100644
--- a/drivers/net/arm/ks8695net.h
+++ b/drivers/net/ethernet/micrel/ks8695net.h
diff --git a/drivers/net/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 4a6ae057e3b..4a6ae057e3b 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
diff --git a/drivers/net/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f56743a28fc..f56743a28fc 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
diff --git a/drivers/net/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 537fb06e593..537fb06e593 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index d19c849059d..d19c849059d 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
diff --git a/drivers/net/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 27418d31a09..7ece990381c 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4700,12 +4700,11 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
++hw->tx_int_cnt;
dma_buf = DMA_BUFFER(desc);
- dma_buf->len = this_frag->size;
+ dma_buf->len = skb_frag_size(this_frag);
dma_buf->dma = pci_map_single(
hw_priv->pdev,
- page_address(this_frag->page) +
- this_frag->page_offset,
+ skb_frag_address(this_frag),
dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
new file mode 100644
index 00000000000..8163fd0f453
--- /dev/null
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -0,0 +1,38 @@
+#
+# Microchip network device configuration
+#
+
+config NET_VENDOR_MICROCHIP
+ bool "Microchip devices"
+ default y
+ depends on SPI && EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Microchip cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MICROCHIP
+
+config ENC28J60
+ tristate "ENC28J60 support"
+ depends on SPI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ Support for the Microchip EN28J60 ethernet chip.
+
+ To compile this driver as a module, choose M here. The module will be
+ called enc28j60.
+
+config ENC28J60_WRITEVERIFY
+ bool "Enable write verify"
+ depends on ENC28J60
+ ---help---
+ Enable the verify after the buffer write useful for debugging purpose.
+ If unsure, say N.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
new file mode 100644
index 00000000000..573d4292b9e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Microchip network device drivers.
+#
+
+obj-$(CONFIG_ENC28J60) += enc28j60.o
diff --git a/drivers/net/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 2837ce209cd..50055e0282e 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1534,7 +1534,7 @@ static const struct net_device_ops enc28j60_netdev_ops = {
.ndo_open = enc28j60_net_open,
.ndo_stop = enc28j60_net_close,
.ndo_start_xmit = enc28j60_send_packet,
- .ndo_set_multicast_list = enc28j60_set_multicast_list,
+ .ndo_set_rx_mode = enc28j60_set_multicast_list,
.ndo_set_mac_address = enc28j60_set_mac_address,
.ndo_tx_timeout = enc28j60_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/enc28j60_hw.h b/drivers/net/ethernet/microchip/enc28j60_hw.h
index 25b41de49f0..25b41de49f0 100644
--- a/drivers/net/enc28j60_hw.h
+++ b/drivers/net/ethernet/microchip/enc28j60_hw.h
diff --git a/drivers/net/mipsnet.c b/drivers/net/ethernet/mipsnet.c
index 004e64ab1f9..d05b0c9e1e9 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/ethernet/mipsnet.c
@@ -242,7 +242,7 @@ static const struct net_device_ops mipsnet_netdev_ops = {
.ndo_open = mipsnet_open,
.ndo_stop = mipsnet_close,
.ndo_start_xmit = mipsnet_xmit,
- .ndo_set_multicast_list = mipsnet_set_mclist,
+ .ndo_set_rx_mode = mipsnet_set_mclist,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig
new file mode 100644
index 00000000000..540f0c6fc16
--- /dev/null
+++ b/drivers/net/ethernet/myricom/Kconfig
@@ -0,0 +1,47 @@
+#
+# Myricom device configuration
+#
+
+config NET_VENDOR_MYRI
+ bool "Myricom devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say
+ Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Myricom cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_MYRI
+
+config MYRI10GE
+ tristate "Myricom Myri-10G Ethernet support"
+ depends on PCI && INET
+ select FW_LOADER
+ select CRC32
+ select INET_LRO
+ ---help---
+ This driver supports Myricom Myri-10G Dual Protocol interface in
+ Ethernet mode. If the eeprom on your board is not recent enough,
+ you will need a newer firmware image.
+ You may get this image or more information, at:
+
+ <http://www.myri.com/scs/download-Myri10GE.html>
+
+ To compile this driver as a module, choose M here. The module
+ will be called myri10ge.
+
+config MYRI10GE_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
+endif # NET_VENDOR_MYRI
diff --git a/drivers/net/ethernet/myricom/Makefile b/drivers/net/ethernet/myricom/Makefile
new file mode 100644
index 00000000000..296c0a10056
--- /dev/null
+++ b/drivers/net/ethernet/myricom/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Myricom network device drivers.
+#
+
+obj-$(CONFIG_MYRI10GE) += myri10ge/
diff --git a/drivers/net/myri10ge/Makefile b/drivers/net/ethernet/myricom/myri10ge/Makefile
index 5df891647ae..5df891647ae 100644
--- a/drivers/net/myri10ge/Makefile
+++ b/drivers/net/ethernet/myricom/myri10ge/Makefile
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 1d2247554a3..0778edcf7b9 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1210,13 +1210,12 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
struct skb_frag_struct *skb_frags;
skb->len = skb->data_len = len;
- skb->truesize = len + sizeof(struct sk_buff);
/* attach the page(s) */
skb_frags = skb_shinfo(skb)->frags;
while (len > 0) {
memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
- len -= rx_frags->size;
+ len -= skb_frag_size(rx_frags);
skb_frags++;
rx_frags++;
skb_shinfo(skb)->nr_frags++;
@@ -1228,7 +1227,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
* manually */
skb_copy_to_linear_data(skb, va, hlen);
skb_shinfo(skb)->frags[0].page_offset += hlen;
- skb_shinfo(skb)->frags[0].size -= hlen;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen);
skb->data_len -= hlen;
skb->tail += hlen;
skb_pull(skb, MXGEFW_PAD);
@@ -1342,12 +1341,12 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
/* Fill skb_frag_struct(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
- rx_frags[i].page = rx->info[idx].page;
+ __skb_frag_set_page(&rx_frags[i], rx->info[idx].page);
rx_frags[i].page_offset = rx->info[idx].page_offset;
if (remainder < MYRI10GE_ALLOC_SIZE)
- rx_frags[i].size = remainder;
+ skb_frag_size_set(&rx_frags[i], remainder);
else
- rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
+ skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
@@ -1355,7 +1354,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
if (lro_enabled) {
rx_frags[0].page_offset += MXGEFW_PAD;
- rx_frags[0].size -= MXGEFW_PAD;
+ skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
/* opaque, will come back in get_frag_header */
@@ -1375,16 +1374,18 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
ss->stats.rx_dropped++;
do {
i--;
- put_page(rx_frags[i].page);
+ __skb_frag_unref(&rx_frags[i]);
} while (i != 0);
return 0;
}
/* Attach the pages to the skb, and trim off any padding */
myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
- if (skb_shinfo(skb)->frags[0].size <= 0) {
- put_page(skb_shinfo(skb)->frags[0].page);
+ if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) {
+ skb_frag_unref(skb, 0);
skb_shinfo(skb)->nr_frags = 0;
+ } else {
+ skb->truesize += bytes * skb_shinfo(skb)->nr_frags;
}
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
@@ -2284,7 +2285,7 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
struct ethhdr *eh;
struct vlan_ethhdr *veh;
struct iphdr *iph;
- u8 *va = page_address(frag->page) + frag->page_offset;
+ u8 *va = skb_frag_address(frag);
unsigned long ll_hlen;
/* passed opaque through lro_receive_frags() */
__wsum csum = (__force __wsum) (unsigned long)priv;
@@ -2926,9 +2927,9 @@ again:
idx = (count + tx->req) & tx->mask;
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
- len = frag->size;
- bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
- len, PCI_DMA_TODEVICE);
+ len = skb_frag_size(frag);
+ bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
}
@@ -3892,7 +3893,7 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_fix_features = myri10ge_fix_features,
- .ndo_set_multicast_list = myri10ge_set_multicast_list,
+ .ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
};
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index 11be150e4d6..11be150e4d6 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
index 7ec4b864a55..7ec4b864a55 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
new file mode 100644
index 00000000000..4a6b9fd073b
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -0,0 +1,83 @@
+#
+# National Semi-conductor device configuration
+#
+
+config NET_VENDOR_NATSEMI
+ bool "National Semi-conductor devices"
+ default y
+ depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about National Semi-conductor devices. If you say Y,
+ you will be asked for your specific card in the following questions.
+
+if NET_VENDOR_NATSEMI
+
+config IBMLANA
+ tristate "IBM LAN Adapter/A support"
+ depends on MCA
+ ---help---
+ This is a Micro Channel Ethernet adapter. You need to set
+ CONFIG_MCA to use this driver. It is both available as an in-kernel
+ driver and as a module.
+
+ To compile this driver as a module, choose M here. The only
+ currently supported card is the IBM LAN Adapter/A for Ethernet. It
+ will both support 16K and 32K memory windows, however a 32K window
+ gives a better security against packet losses. Usage of multiple
+ boards with this driver should be possible, but has not been tested
+ up to now due to lack of hardware.
+
+config MACSONIC
+ tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
+ depends on MAC
+ ---help---
+ Support for NatSemi SONIC based Ethernet devices. This includes
+ the onboard Ethernet in many Quadras as well as some LC-PDS,
+ a few Nubus and all known Comm Slot Ethernet cards. If you have
+ one of these say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. This module will
+ be called macsonic.
+
+config MIPS_JAZZ_SONIC
+ tristate "MIPS JAZZ onboard SONIC Ethernet support"
+ depends on MACH_JAZZ
+ ---help---
+ This is the driver for the onboard card of MIPS Magnum 4000,
+ Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
+
+config NATSEMI
+ tristate "National Semiconductor DP8381x series PCI Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for the National Semiconductor DP83810 series,
+ which is used in cards from PureData, NetGear, Linksys
+ and others, including the 83815 chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/natsemi.html>.
+
+config NS83820
+ tristate "National Semiconductor DP83820 support"
+ depends on PCI
+ ---help---
+ This is a driver for the National Semiconductor DP83820 series
+ of gigabit ethernet MACs. Cards using this chipset include
+ the D-Link DGE-500T, PureData's PDP8023Z-TG, SMC's SMC9462TX,
+ SOHO-GA2000T, SOHO-GA2500T. The driver supports the use of
+ zero copy.
+
+config XTENSA_XT2000_SONIC
+ tristate "Xtensa XT2000 onboard SONIC Ethernet support"
+ depends on XTENSA_PLATFORM_XT2000
+ ---help---
+ This is the driver for the onboard card of the Xtensa XT2000 board.
+
+endif # NET_VENDOR_NATSEMI
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
new file mode 100644
index 00000000000..9aa5dea52b3
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the National Semi-conductor Sonic devices.
+#
+
+obj-$(CONFIG_IBMLANA) += ibmlana.o
+obj-$(CONFIG_MACSONIC) += macsonic.o
+obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
+obj-$(CONFIG_NATSEMI) += natsemi.o
+obj-$(CONFIG_NS83820) += ns83820.o
+obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
diff --git a/drivers/net/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
index a7d6cad3295..999407f7ebd 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ethernet/natsemi/ibmlana.c
@@ -910,7 +910,7 @@ static const struct net_device_ops ibmlana_netdev_ops = {
.ndo_open = ibmlana_open,
.ndo_stop = ibmlana_close,
.ndo_start_xmit = ibmlana_tx,
- .ndo_set_multicast_list = ibmlana_set_multicast_list,
+ .ndo_set_rx_mode = ibmlana_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ibmlana.h b/drivers/net/ethernet/natsemi/ibmlana.h
index accd5efc9c8..accd5efc9c8 100644
--- a/drivers/net/ibmlana.h
+++ b/drivers/net/ethernet/natsemi/ibmlana.h
diff --git a/drivers/net/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 949c1f93364..fc7c6a932ad 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -111,7 +111,7 @@ static const struct net_device_ops sonic_netdev_ops = {
.ndo_stop = jazzsonic_close,
.ndo_start_xmit = sonic_send_packet,
.ndo_get_stats = sonic_get_stats,
- .ndo_set_multicast_list = sonic_multicast_list,
+ .ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index c93679ee699..a2eacbfb425 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -190,7 +190,7 @@ static const struct net_device_ops macsonic_netdev_ops = {
.ndo_open = macsonic_open,
.ndo_stop = macsonic_close,
.ndo_start_xmit = sonic_send_packet,
- .ndo_set_multicast_list = sonic_multicast_list,
+ .ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_get_stats = sonic_get_stats,
.ndo_validate_addr = eth_validate_addr,
@@ -313,22 +313,13 @@ static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
{
- /* Bwahahaha */
- static int once_is_more_than_enough;
struct sonic_local* lp = netdev_priv(dev);
int sr;
int commslot = 0;
- if (once_is_more_than_enough)
- return -ENODEV;
- once_is_more_than_enough = 1;
-
if (!MACH_IS_MAC)
return -ENODEV;
- if (macintosh_config->ether_type != MAC_ETHER_SONIC)
- return -ENODEV;
-
printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
/* Bogus probing, on the models which may or may not have
diff --git a/drivers/net/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 2962cc695ce..6ca047aab79 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -783,7 +783,7 @@ static const struct net_device_ops natsemi_netdev_ops = {
.ndo_stop = netdev_close,
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = natsemi_change_mtu,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = ns_tx_timeout,
diff --git a/drivers/net/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index e736aec588f..2b8f64ddfb5 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1160,13 +1160,12 @@ again:
if (!nr_frags)
break;
- buf = pci_map_page(dev->pci_dev, frag->page,
- frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
- len = frag->size;
+ len = skb_frag_size(frag);
frag++;
nr_frags--;
}
@@ -1937,7 +1936,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = ns83820_hard_start_xmit,
.ndo_get_stats = ns83820_get_stats,
.ndo_change_mtu = ns83820_change_mtu,
- .ndo_set_multicast_list = ns83820_set_multicast,
+ .ndo_set_rx_mode = ns83820_set_multicast,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = ns83820_tx_timeout,
diff --git a/drivers/net/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 26e25d7f582..26e25d7f582 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
diff --git a/drivers/net/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 07091dd27e5..07091dd27e5 100644
--- a/drivers/net/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
diff --git a/drivers/net/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 9f12026d98e..ccf61b9da8d 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -122,7 +122,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_stop = xtsonic_close,
.ndo_start_xmit = sonic_send_packet,
.ndo_get_stats = sonic_get_stats,
- .ndo_set_multicast_list = sonic_multicast_list,
+ .ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
new file mode 100644
index 00000000000..ff26b54bd3f
--- /dev/null
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -0,0 +1,55 @@
+#
+# Exar device configuration
+#
+
+config NET_VENDOR_EXAR
+ bool "Exar devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say
+ Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Exar cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_EXAR
+
+config S2IO
+ tristate "Exar Xframe 10Gb Ethernet Adapter"
+ depends on PCI
+ ---help---
+ This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/s2io.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s2io.
+
+config VXGE
+ tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
+ depends on PCI && INET
+ ---help---
+ This driver supports Exar Corp's X3100 Series 10 GbE PCIe
+ I/O Virtualized Server Adapter.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/vxge.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called vxge.
+
+config VXGE_DEBUG_TRACE_ALL
+ bool "Enabling All Debug trace statements in driver"
+ default n
+ depends on VXGE
+ ---help---
+ Say Y here if you want to enabling all the debug trace statements in
+ the vxge driver. By default only few debug trace statements are
+ enabled.
+
+endif # NET_VENDOR_EXAR
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
new file mode 100644
index 00000000000..70c8058a601
--- /dev/null
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Exar network device drivers.
+#
+
+obj-$(CONFIG_S2IO) += s2io.o
+obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/s2io-regs.h b/drivers/net/ethernet/neterion/s2io-regs.h
index 3688325c11f..3688325c11f 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/ethernet/neterion/s2io-regs.h
diff --git a/drivers/net/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 277d48b0800..c27fb3dda9f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2350,12 +2350,12 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
if (frg_cnt) {
txds++;
for (j = 0; j < frg_cnt; j++, txds++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
if (!txds->Buffer_Pointer)
break;
pci_unmap_page(nic->pdev,
(dma_addr_t)txds->Buffer_Pointer,
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
@@ -4185,16 +4185,16 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
frg_cnt = skb_shinfo(skb)->nr_frags;
/* For fragmented SKB. */
for (i = 0; i < frg_cnt; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* A '0' length fragment will be ignored */
- if (!frag->size)
+ if (!skb_frag_size(frag))
continue;
txdp++;
- txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
- frag->page_offset,
- frag->size,
- PCI_DMA_TODEVICE);
- txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
+ txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
+ frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
}
@@ -5522,14 +5522,12 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
}
- ering->rx_mini_max_pending = 0;
ering->tx_max_pending = MAX_TX_DESC;
for (i = 0; i < sp->config.rx_ring_num; i++)
rx_desc_count += sp->config.rx_cfg[i].num_rxd;
ering->rx_pending = rx_desc_count;
ering->rx_jumbo_pending = rx_desc_count;
- ering->rx_mini_pending = 0;
for (i = 0; i < sp->config.tx_fifo_num; i++)
tx_desc_count += sp->config.tx_cfg[i].fifo_len;
@@ -7682,7 +7680,7 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_get_stats = s2io_get_stats,
.ndo_start_xmit = s2io_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = s2io_set_multicast,
+ .ndo_set_rx_mode = s2io_set_multicast,
.ndo_do_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
diff --git a/drivers/net/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d5596926a1e..d5596926a1e 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
diff --git a/drivers/net/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
index b625e2c503f..b625e2c503f 100644
--- a/drivers/net/vxge/Makefile
+++ b/drivers/net/ethernet/neterion/vxge/Makefile
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 1520c574cb2..98e2c10ae08 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -1342,9 +1342,7 @@ vxge_hw_device_initialize(
hldev->bar0 = attr->bar0;
hldev->pdev = attr->pdev;
- hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
- hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
- hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
+ hldev->uld_callbacks = attr->uld_callbacks;
__vxge_hw_device_pci_e_init(hldev);
@@ -2633,7 +2631,7 @@ __vxge_hw_mempool_create(struct __vxge_hw_device *devh,
u32 items_priv_size,
u32 items_initial,
u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
+ const struct vxge_hw_mempool_cbs *mp_callback,
void *userdata)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -2817,7 +2815,9 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_config *config;
struct __vxge_hw_device *hldev;
u32 vp_id;
- struct vxge_hw_mempool_cbs ring_mp_callback;
+ static const struct vxge_hw_mempool_cbs ring_mp_callback = {
+ .item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
+ };
if ((vp == NULL) || (attr == NULL)) {
status = VXGE_HW_FAIL;
@@ -2872,7 +2872,6 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
/* calculate actual RxD block private size */
ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
ring->mempool = __vxge_hw_mempool_create(hldev,
VXGE_HW_BLOCK_SIZE,
VXGE_HW_BLOCK_SIZE,
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index dd362584f5c..5046a64f0fe 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -740,7 +740,7 @@ struct __vxge_hw_device {
struct vxge_hw_device_config config;
enum vxge_hw_device_link_state link_state;
- struct vxge_hw_uld_cbs uld_callbacks;
+ const struct vxge_hw_uld_cbs *uld_callbacks;
u32 host_type;
u32 func_id;
@@ -840,7 +840,7 @@ struct vxge_hw_device_hw_info {
struct vxge_hw_device_attr {
void __iomem *bar0;
struct pci_dev *pdev;
- struct vxge_hw_uld_cbs uld_callbacks;
+ const struct vxge_hw_uld_cbs *uld_callbacks;
};
#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 92dd72d3f9d..92dd72d3f9d 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
diff --git a/drivers/net/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
index 6cf3044d7f4..6cf3044d7f4 100644
--- a/drivers/net/vxge/vxge-ethtool.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 178348a258d..671e166b5af 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -585,7 +585,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
for (j = 0; j < frg_cnt; j++) {
pci_unmap_page(fifo->pdev,
txd_priv->dma_buffers[i++],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -920,14 +920,14 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
frag = &skb_shinfo(skb)->frags[0];
for (i = 0; i < frg_cnt; i++) {
/* ignore 0 length fragment */
- if (!frag->size)
+ if (!skb_frag_size(frag))
continue;
- dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
- if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
+ if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
goto _exit2;
vxge_debug_tx(VXGE_TRACE,
"%s: %s:%d frag = %d dma_pointer = 0x%llx",
@@ -936,7 +936,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
txdl_priv->dma_buffers[j] = dma_pointer;
vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- frag->size);
+ skb_frag_size(frag));
frag += 1;
}
@@ -979,7 +979,7 @@ _exit1:
for (; j < i; j++) {
pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -1050,7 +1050,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
for (j = 0; j < frg_cnt; j++) {
pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -3354,7 +3354,7 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_get_stats64 = vxge_get_stats64,
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = vxge_set_multicast,
+ .ndo_set_rx_mode = vxge_set_multicast,
.ndo_do_ioctl = vxge_ioctl,
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
@@ -4284,6 +4284,12 @@ static int __devinit is_sriov_initialized(struct pci_dev *pdev)
return 0;
}
+static const struct vxge_hw_uld_cbs vxge_callbacks = {
+ .link_up = vxge_callback_link_up,
+ .link_down = vxge_callback_link_down,
+ .crit_err = vxge_callback_crit_err,
+};
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -4494,9 +4500,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
}
/* Setting driver callbacks */
- attr.uld_callbacks.link_up = vxge_callback_link_up;
- attr.uld_callbacks.link_down = vxge_callback_link_down;
- attr.uld_callbacks.crit_err = vxge_callback_crit_err;
+ attr.uld_callbacks = &vxge_callbacks;
status = vxge_hw_device_initialize(&hldev, &attr, device_config);
if (status != VXGE_HW_OK) {
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index f52a42d1dbb..f52a42d1dbb 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
index 3e658b17594..3e658b17594 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index ad64ce0afe3..5954fa264da 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -532,8 +532,8 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
}
/* notify driver */
- if (hldev->uld_callbacks.crit_err)
- hldev->uld_callbacks.crit_err(
+ if (hldev->uld_callbacks->crit_err)
+ hldev->uld_callbacks->crit_err(
(struct __vxge_hw_device *)hldev,
type, vp_id);
out:
@@ -560,8 +560,8 @@ __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
hldev->link_state = VXGE_HW_LINK_DOWN;
/* notify driver */
- if (hldev->uld_callbacks.link_down)
- hldev->uld_callbacks.link_down(hldev);
+ if (hldev->uld_callbacks->link_down)
+ hldev->uld_callbacks->link_down(hldev);
exit:
return VXGE_HW_OK;
}
@@ -585,8 +585,8 @@ __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
hldev->link_state = VXGE_HW_LINK_UP;
/* notify driver */
- if (hldev->uld_callbacks.link_up)
- hldev->uld_callbacks.link_up(hldev);
+ if (hldev->uld_callbacks->link_up)
+ hldev->uld_callbacks->link_up(hldev);
exit:
return VXGE_HW_OK;
}
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
index 4a518a3b131..4a518a3b131 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
index b9efa28bab3..b9efa28bab3 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-version.h
diff --git a/drivers/net/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 2dfee892d20..8d288af16fc 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -306,7 +306,7 @@ static const struct net_device_ops netx_eth_netdev_ops = {
.ndo_stop = netx_eth_close,
.ndo_start_xmit = netx_eth_hard_start_xmit,
.ndo_tx_timeout = netx_eth_timeout,
- .ndo_set_multicast_list = netx_eth_set_multicast_list,
+ .ndo_set_rx_mode = netx_eth_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
new file mode 100644
index 00000000000..334c1718309
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/Kconfig
@@ -0,0 +1,31 @@
+#
+# Nuvoton network device configuration
+#
+
+config NET_VENDOR_NUVOTON
+ bool "Nuvoton devices"
+ default y
+ depends on ARM && ARCH_W90X900
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Nuvoton cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_NUVOTON
+
+config W90P910_ETH
+ tristate "Nuvoton w90p910 Ethernet support"
+ depends on ARM && ARCH_W90X900
+ select PHYLIB
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you want to use built-in Ethernet ports
+ on w90p910 processor.
+
+endif # NET_VENDOR_NUVOTON
diff --git a/drivers/net/ethernet/nuvoton/Makefile b/drivers/net/ethernet/nuvoton/Makefile
new file mode 100644
index 00000000000..171aa044bd3
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Nuvoton network device drivers.
+#
+
+obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index bfea499a351..f1bfb8f8fcf 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -919,7 +919,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_stop = w90p910_ether_close,
.ndo_start_xmit = w90p910_ether_start_xmit,
.ndo_get_stats = w90p910_ether_stats,
- .ndo_set_multicast_list = w90p910_ether_set_multicast_list,
+ .ndo_set_rx_mode = w90p910_ether_set_multicast_list,
.ndo_set_mac_address = w90p910_set_mac_address,
.ndo_do_ioctl = w90p910_ether_ioctl,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig
new file mode 100644
index 00000000000..ace19e7f6d1
--- /dev/null
+++ b/drivers/net/ethernet/nvidia/Kconfig
@@ -0,0 +1,32 @@
+#
+# NVIDIA network device configuration
+#
+
+config NET_VENDOR_NVIDIA
+ bool "NVIDIA devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about NVIDIA cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_NVIDIA
+
+config FORCEDETH
+ tristate "nForce Ethernet support"
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called forcedeth.
+
+endif # NET_VENDOR_NVIDIA
diff --git a/drivers/net/ethernet/nvidia/Makefile b/drivers/net/ethernet/nvidia/Makefile
new file mode 100644
index 00000000000..e079ae5771d
--- /dev/null
+++ b/drivers/net/ethernet/nvidia/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the NVIDIA network device drivers.
+#
+
+obj-$(CONFIG_FORCEDETH) += forcedeth.o
diff --git a/drivers/net/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 6d5fbd4d425..1e37eb98c4e 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2099,8 +2099,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2138,16 +2140,19 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the fragments */
for (i = 0; i < fragments; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = skb_frag_size(frag);
offset = 0;
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
- PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma = skb_frag_dma_map(
+ &np->pci_dev->dev,
+ frag, offset,
+ bcnt,
+ DMA_TO_DEVICE);
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2208,8 +2213,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2250,15 +2257,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the fragments */
for (i = 0; i < fragments; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ u32 size = skb_frag_size(frag);
offset = 0;
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
- PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma = skb_frag_dma_map(
+ &np->pci_dev->dev,
+ frag, offset,
+ bcnt,
+ DMA_TO_DEVICE);
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -4274,13 +4284,9 @@ static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* r
struct fe_priv *np = netdev_priv(dev);
ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
ring->rx_pending = np->rx_ring_size;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
ring->tx_pending = np->tx_ring_size;
}
@@ -5208,7 +5214,7 @@ static const struct net_device_ops nv_netdev_ops = {
.ndo_set_features = nv_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
- .ndo_set_multicast_list = nv_set_multicast,
+ .ndo_set_rx_mode = nv_set_multicast,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = nv_poll_controller,
#endif
@@ -5225,7 +5231,7 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_set_features = nv_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
- .ndo_set_multicast_list = nv_set_multicast,
+ .ndo_set_rx_mode = nv_set_multicast,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = nv_poll_controller,
#endif
diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig
new file mode 100644
index 00000000000..3de52ffd287
--- /dev/null
+++ b/drivers/net/ethernet/octeon/Kconfig
@@ -0,0 +1,14 @@
+#
+# Cavium network device configuration
+#
+
+config OCTEON_MGMT_ETHERNET
+ tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
+ depends on CPU_CAVIUM_OCTEON
+ select PHYLIB
+ select MDIO_OCTEON
+ default y
+ ---help---
+ This option enables the ethernet driver for the management
+ port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
+ CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/ethernet/octeon/Makefile b/drivers/net/ethernet/octeon/Makefile
new file mode 100644
index 00000000000..efa41c1d91c
--- /dev/null
+++ b/drivers/net/ethernet/octeon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium network device drivers.
+#
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 429e08c84e9..bc1d946b797 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/phy.h>
@@ -1059,7 +1060,6 @@ static const struct net_device_ops octeon_mgmt_ops = {
.ndo_stop = octeon_mgmt_stop,
.ndo_start_xmit = octeon_mgmt_xmit,
.ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
- .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
.ndo_do_ioctl = octeon_mgmt_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
@@ -1102,6 +1102,8 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
tasklet_init(&p->tx_clean_tasklet,
octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
diff --git a/drivers/net/ethernet/oki-semi/Kconfig b/drivers/net/ethernet/oki-semi/Kconfig
new file mode 100644
index 00000000000..ecd45f9ea9d
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/Kconfig
@@ -0,0 +1,23 @@
+#
+# OKI Semiconductor device configuration
+#
+
+config NET_VENDOR_OKI
+ bool "OKI Semiconductor devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about OKI Semiconductor cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_OKI
+
+source "drivers/net/ethernet/oki-semi/pch_gbe/Kconfig"
+
+endif # NET_VENDOR_OKI
diff --git a/drivers/net/ethernet/oki-semi/Makefile b/drivers/net/ethernet/oki-semi/Makefile
new file mode 100644
index 00000000000..b6780c877c1
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the OKI Semiconductor device drivers.
+#
+
+obj-$(CONFIG_PCH_GBE) += pch_gbe/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
new file mode 100644
index 00000000000..00bc4fc968c
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -0,0 +1,22 @@
+#
+# OKI Semiconductor device configuration
+#
+
+config PCH_GBE
+ tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
+ general embedded platform. EG20T PCH has Gigabit Ethernet interface.
+ Using this interface, it is able to access system devices connected
+ to Gigabit Ethernet. This driver enables Gigabit Ethernet function.
+
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223/ML7831.
+ ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+ purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
diff --git a/drivers/net/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
index 31288d4ad24..31288d4ad24 100644
--- a/drivers/net/pch_gbe/Makefile
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index a09a07197eb..a09a07197eb 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
diff --git a/drivers/net/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084ad22..e48f084ad22 100644
--- a/drivers/net/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
diff --git a/drivers/net/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
index 94aaac5b057..94aaac5b057 100644
--- a/drivers/net/pch_gbe/pch_gbe_api.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ea2d8e41887..8c8027176be 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -273,12 +273,8 @@ static void pch_gbe_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = PCH_GBE_MAX_RXD;
ring->tx_max_pending = PCH_GBE_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
}
/**
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b8b4ba27b0e..b89f3a684ae 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -698,7 +698,6 @@ static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
*/
static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
u32 rx_mode, tcpip;
@@ -2233,7 +2232,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
.ndo_change_mtu = pch_gbe_change_mtu,
.ndo_set_features = pch_gbe_set_features,
.ndo_do_ioctl = pch_gbe_ioctl,
- .ndo_set_multicast_list = &pch_gbe_set_multi,
+ .ndo_set_rx_mode = pch_gbe_set_multi,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = pch_gbe_netpoll,
#endif
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 5b5d90a47e2..5b5d90a47e2 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 28bb9603d73..28bb9603d73 100644
--- a/drivers/net/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc7b5e..03264dc7b5e 100644
--- a/drivers/net/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
new file mode 100644
index 00000000000..b97132d9dff
--- /dev/null
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -0,0 +1,47 @@
+#
+# Packet engine device configuration
+#
+
+config NET_PACKET_ENGINE
+ bool "Packet Engine devices"
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about packet engine devices. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_PACKET_ENGINE
+
+config HAMACHI
+ tristate "Packet Engines Hamachi GNIC-II support"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a Gigabit Ethernet card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module will be
+ called hamachi.
+
+config YELLOWFIN
+ tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
+ adapter or the SYM53C885 Ethernet controller. The Gigabit adapter is
+ used by the Beowulf Linux cluster project. See
+ <http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html> for more
+ information about this driver in particular and Beowulf in general.
+
+ To compile this driver as a module, choose M here: the module
+ will be called yellowfin. This is recommended.
+
+endif # NET_PACKET_ENGINE
diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile
new file mode 100644
index 00000000000..995ccd077d0
--- /dev/null
+++ b/drivers/net/ethernet/packetengines/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Packet Engine network device drivers.
+#
+
+obj-$(CONFIG_HAMACHI) += hamachi.o
+obj-$(CONFIG_YELLOWFIN) += yellowfin.o
diff --git a/drivers/net/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index c274b3d77eb..3458df3780b 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -567,7 +567,7 @@ static const struct net_device_ops hamachi_netdev_ops = {
.ndo_stop = hamachi_close,
.ndo_start_xmit = hamachi_start_xmit,
.ndo_get_stats = hamachi_get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 3e5ac60b89a..db44e9af03c 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -359,7 +359,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = yellowfin_open,
.ndo_stop = yellowfin_close,
.ndo_start_xmit = yellowfin_start_xmit,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig
new file mode 100644
index 00000000000..01e6c329d78
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/Kconfig
@@ -0,0 +1,30 @@
+#
+# PA Semi network device configuration
+#
+
+config NET_VENDOR_PASEMI
+ bool "PA Semi devices"
+ default y
+ depends on PPC_PASEMI && PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about PA Semi cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_PASEMI
+
+config PASEMI_MAC
+ tristate "PA Semi 1/10Gbit MAC"
+ depends on PPC_PASEMI && PCI && INET
+ select PHYLIB
+ select INET_LRO
+ ---help---
+ This driver supports the on-chip 1/10Gbit Ethernet controller on
+ PA Semi's PWRficient line of chips.
+
+endif # NET_VENDOR_PASEMI
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
new file mode 100644
index 00000000000..05db5434baf
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the A Semi network device drivers.
+#
+
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 9ec112ca62e..49b549ff2c7 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -300,9 +300,9 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
for (f = 0; f < nfrags; f++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
+ pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE);
}
dev_kfree_skb_irq(skb);
@@ -1505,11 +1505,10 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
- map_size[i+1] = frag->size;
- if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
+ map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ map_size[i+1] = skb_frag_size(frag);
+ if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
nfrags = i;
goto out_err_nolock;
}
@@ -1719,7 +1718,7 @@ static const struct net_device_ops pasemi_netdev_ops = {
.ndo_open = pasemi_mac_open,
.ndo_stop = pasemi_mac_close,
.ndo_start_xmit = pasemi_mac_start_tx,
- .ndo_set_multicast_list = pasemi_mac_set_rx_mode,
+ .ndo_set_rx_mode = pasemi_mac_set_rx_mode,
.ndo_set_mac_address = pasemi_mac_set_mac_addr,
.ndo_change_mtu = pasemi_mac_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index e2f4efa8ad4..e2f4efa8ad4 100644
--- a/drivers/net/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 4825959a0ef..4825959a0ef 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
new file mode 100644
index 00000000000..a8669adecc9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -0,0 +1,54 @@
+#
+# QLogic network device configuration
+#
+
+config NET_VENDOR_QLOGIC
+ bool "QLogic devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about QLogic cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_QLOGIC
+
+config QLA3XXX
+ tristate "QLogic QLA3XXX Network Driver Support"
+ depends on PCI
+ ---help---
+ This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qla3xxx.
+
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
+config QLGE
+ tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+ depends on PCI
+ ---help---
+ This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qlge.
+
+config NETXEN_NIC
+ tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This enables the support for NetXen's Gigabit Ethernet card.
+
+endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
new file mode 100644
index 00000000000..b2a283d9ae6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the QLogic network device drivers.
+#
+
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
+obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_NETXEN_NIC) += netxen/
diff --git a/drivers/net/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f..861a0590b1f 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index f744d291218..a876dffd710 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 76
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.76"
+#define _NETXEN_NIC_LINUX_SUBVERSION 77
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.77"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -940,6 +940,11 @@ typedef struct nx_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
+struct nx_vlan_ip_list {
+ struct list_head list;
+ u32 ip_addr;
+};
+
/*
* Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
* adjusted based on configured MTU.
@@ -1165,6 +1170,7 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct list_head mac_list;
+ struct list_head vlan_ip_list;
spinlock_t tx_clean_lock;
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index a925392abd6..a925392abd6 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index b34fb74d07e..e09ea83b8c4 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -413,9 +413,6 @@ netxen_nic_get_ringparam(struct net_device *dev,
}
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
-
- ring->rx_mini_max_pending = 0;
- ring->rx_mini_pending = 0;
}
static u32
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index dc1967c1f31..dc1967c1f31 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 3f89e57cae5..3f89e57cae5 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df0..e2c5b6f2df0 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index e8993a76a08..a8259cc19a6 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -1619,6 +1620,7 @@ netxen_process_lro(struct netxen_adapter *adapter,
int index;
u16 lro_length, length, data_offset;
u32 seq_number;
+ u8 vhdr_len = 0;
if (unlikely(ring > adapter->max_rds_rings))
return NULL;
@@ -1652,8 +1654,10 @@ netxen_process_lro(struct netxen_adapter *adapter,
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
- iph = (struct iphdr *)skb->data;
- th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ if (skb->protocol == htons(ETH_P_8021Q))
+ vhdr_len = VLAN_HLEN;
+ iph = (struct iphdr *)(skb->data + vhdr_len);
+ th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
iph->tot_len = htons(length);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f574edff7fc..8cf3173ba48 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -91,7 +91,8 @@ static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
-static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
+static void netxen_free_vlan_ip_list(struct netxen_adapter *);
+static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static int netxen_nic_set_mac(struct net_device *netdev, void *p);
@@ -399,6 +400,63 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
}
}
+#define PCI_CAP_ID_GEN 0x10
+
+static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
+{
+ u32 pdevfuncsave;
+ u32 c8c9value = 0;
+ u32 chicken = 0;
+ u32 control = 0;
+ int i, pos;
+ struct pci_dev *pdev;
+
+ pdev = adapter->pdev;
+
+ chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3));
+ /* clear chicken3.25:24 */
+ chicken &= 0xFCFFFFFF;
+ /*
+ * if gen1 and B0, set F1020 - if gen 2, do nothing
+ * if gen2 set to F1000
+ */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
+ if (pos == 0xC0) {
+ pci_read_config_dword(pdev, pos + 0x10, &control);
+ if ((control & 0x000F0000) != 0x00020000) {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ }
+ dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n");
+ c8c9value = 0xF1000;
+ } else {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n");
+ if (adapter->ahw.revision_id == NX_P3_B0)
+ c8c9value = 0xF1020;
+ else
+ c8c9value = 0;
+ }
+
+ NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken);
+
+ if (!c8c9value)
+ return;
+
+ pdevfuncsave = pdev->devfn;
+ if (pdevfuncsave & 0x07)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_write_config_dword(pdev, pos + 8, c8c9value);
+ pdev->devfn++;
+ }
+ pdev->devfn = pdevfuncsave;
+}
+
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
@@ -523,7 +581,7 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_start_xmit = netxen_nic_xmit_frame,
.ndo_get_stats64 = netxen_nic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = netxen_set_multicast_list,
+ .ndo_set_rx_mode = netxen_set_multicast_list,
.ndo_set_mac_address = netxen_nic_set_mac,
.ndo_change_mtu = netxen_nic_change_mtu,
.ndo_tx_timeout = netxen_tx_timeout,
@@ -866,7 +924,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
if (err < 0)
goto err_out;
if (err == 0)
- goto wait_init;
+ goto pcie_strap_init;
if (first_boot != 0x55555555) {
NXWR32(adapter, CRB_CMDPEG_STATE, 0);
@@ -909,6 +967,10 @@ netxen_start_firmware(struct netxen_adapter *adapter)
| (_NETXEN_NIC_LINUX_SUBVERSION);
NXWR32(adapter, CRB_DRIVER_VERSION, val);
+pcie_strap_init:
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_pcie_strap_init(adapter);
+
wait_init:
/* Handshake with the card before we register the devices. */
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
@@ -1359,6 +1421,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
+ INIT_LIST_HEAD(&adapter->vlan_ip_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -1481,6 +1544,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->tx_timeout_task);
+ netxen_free_vlan_ip_list(adapter);
netxen_nic_detach(adapter);
nx_decr_dev_ref_cnt(adapter);
@@ -1563,7 +1627,7 @@ static int netxen_nic_attach_func(struct pci_dev *pdev)
if (err)
goto err_out_detach;
- netxen_config_indev_addr(netdev, NETDEV_UP);
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
}
netif_device_attach(netdev);
@@ -1841,13 +1905,13 @@ netxen_map_tx_skb(struct pci_dev *pdev,
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
- map = pci_map_page(pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
- nf->length = frag->size;
+ nf->length = skb_frag_size(frag);
}
return 0;
@@ -1898,7 +1962,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
frag = &skb_shinfo(skb)->frags[i];
- delta += frag->size;
+ delta += skb_frag_size(frag);
}
if (!__pskb_pull_tail(skb, delta))
@@ -2374,7 +2438,7 @@ netxen_attach_work(struct work_struct *work)
goto done;
}
- netxen_config_indev_addr(netdev, NETDEV_UP);
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
}
netif_device_attach(netdev);
@@ -2848,10 +2912,70 @@ netxen_destip_supported(struct netxen_adapter *adapter)
}
static void
-netxen_config_indev_addr(struct net_device *dev, unsigned long event)
+netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+{
+ struct nx_vlan_ip_list *cur;
+ struct list_head *head = &adapter->vlan_ip_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct nx_vlan_ip_list, list);
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+
+}
+static void
+netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
+ struct in_ifaddr *ifa, unsigned long event)
+{
+ struct net_device *dev;
+ struct nx_vlan_ip_list *cur, *tmp_cur;
+ struct list_head *head;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+ if (dev == NULL)
+ return;
+
+ if (!is_vlan_dev(dev))
+ return;
+
+ switch (event) {
+ case NX_IP_UP:
+ list_for_each(head, &adapter->vlan_ip_list) {
+ cur = list_entry(head, struct nx_vlan_ip_list, list);
+
+ if (cur->ip_addr == ifa->ifa_address)
+ return;
+ }
+
+ cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
+ if (cur == NULL) {
+ printk(KERN_ERR "%s: failed to add vlan ip to list\n",
+ adapter->netdev->name);
+ return;
+ }
+
+ cur->ip_addr = ifa->ifa_address;
+ list_add_tail(&cur->list, &adapter->vlan_ip_list);
+ break;
+ case NX_IP_DOWN:
+ list_for_each_entry_safe(cur, tmp_cur,
+ &adapter->vlan_ip_list, list) {
+ if (cur->ip_addr == ifa->ifa_address) {
+ list_del(&cur->list);
+ kfree(cur);
+ break;
+ }
+ }
+ }
+}
+static void
+netxen_config_indev_addr(struct netxen_adapter *adapter,
+ struct net_device *dev, unsigned long event)
{
struct in_device *indev;
- struct netxen_adapter *adapter = netdev_priv(dev);
if (!netxen_destip_supported(adapter))
return;
@@ -2865,10 +2989,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
case NETDEV_UP:
netxen_config_ipaddr(adapter,
ifa->ifa_address, NX_IP_UP);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
netxen_config_ipaddr(adapter,
ifa->ifa_address, NX_IP_DOWN);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
@@ -2878,11 +3004,28 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
in_dev_put(indev);
}
+static void
+netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
+
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_vlan_ip_list *pos, *tmp_pos;
+ unsigned long ip_event;
+
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+ netxen_config_indev_addr(adapter, netdev, event);
+
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) {
+ netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
+ }
+}
+
static int netxen_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
struct net_device *dev = (struct net_device *)ptr;
+ struct net_device *orig_dev = dev;
recheck:
if (dev == NULL)
@@ -2904,7 +3047,7 @@ recheck:
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
goto done;
- netxen_config_indev_addr(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
done:
return NOTIFY_DONE;
}
@@ -2921,7 +3064,7 @@ netxen_inetaddr_event(struct notifier_block *this,
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
recheck:
- if (dev == NULL || !netif_running(dev))
+ if (dev == NULL)
goto done;
if (dev->priv_flags & IFF_802_1Q_VLAN) {
@@ -2943,9 +3086,11 @@ recheck:
switch (event) {
case NETDEV_UP:
netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
@@ -2964,7 +3109,10 @@ static struct notifier_block netxen_inetaddr_cb = {
};
#else
static void
-netxen_config_indev_addr(struct net_device *dev, unsigned long event)
+netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+static void
+netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
{ }
#endif
diff --git a/drivers/net/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2f6914025ef..a4bdff438a5 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1650,7 +1650,7 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
SUPPORTED_1000baseT_Half | \
SUPPORTED_1000baseT_Full | \
SUPPORTED_Autoneg | \
- SUPPORTED_TP); \
+ SUPPORTED_TP) \
static u32 ql_supported_modes(struct ql3_adapter *qdev)
{
@@ -2388,11 +2388,10 @@ static int ql_send_map(struct ql3_adapter *qdev,
seg++;
}
- map = pci_map_page(qdev->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping frags failed with error: %d\n",
@@ -2402,9 +2401,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
- oal_entry->len = cpu_to_le32(frag->size);
+ oal_entry->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
}
/* Terminate the last segment. */
oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
@@ -3762,7 +3761,6 @@ static const struct net_device_ops ql3xxx_netdev_ops = {
.ndo_open = ql3xxx_open,
.ndo_start_xmit = ql3xxx_send,
.ndo_stop = ql3xxx_close,
- .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ql3xxx_set_mac_address,
diff --git a/drivers/net/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h
index 73e234366a8..73e234366a8 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/ethernet/qlogic/qla3xxx.h
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index ddba83ef3f4..ddba83ef3f4 100644
--- a/drivers/net/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index baf646d98fa..2fd1ba8fee4 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 21
-#define QLCNIC_LINUX_VERSIONID "5.0.21"
+#define _QLCNIC_LINUX_SUBVERSION 24
+#define QLCNIC_LINUX_VERSIONID "5.0.24"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -73,6 +73,7 @@
(sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
#define QLCNIC_P3P_A0 0x50
+#define QLCNIC_P3P_C0 0x58
#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
@@ -291,7 +292,8 @@ struct uni_data_desc{
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
-#define QLCNIC_FW_IMAGE_REGION 0x74
+#define QLCNIC_B0_FW_IMAGE_REGION 0x74
+#define QLCNIC_C0_FW_IMAGE_REGION 0x97
#define QLCNIC_BOOTLD_REGION 0X72
struct qlcnic_flt_header {
u16 version;
@@ -455,6 +457,8 @@ struct qlcnic_hardware_context {
u16 port_type;
u16 board_type;
+ u8 beacon_state;
+
struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_fw_dump fw_dump;
};
@@ -579,6 +583,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
@@ -911,6 +916,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_PROMISC_DISABLED 0x800
#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_FW_RESET_OWNER 0x2000
+#define QLCNIC_FW_HANG 0x4000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -928,6 +934,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_START_FW 4
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
+#define __QLCNIC_LED_ENABLE 7
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -1344,6 +1351,7 @@ enum op_codes {
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
+#define QLCNIC_FORCE_FW_RESET 0xdeaddead
struct qlcnic_dump_operations {
enum op_codes opcode;
@@ -1351,6 +1359,18 @@ struct qlcnic_dump_operations {
struct qlcnic_dump_entry *, u32 *);
};
+struct _cdrp_cmd {
+ u32 cmd;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+struct qlcnic_cmd_args {
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+};
+
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
@@ -1393,6 +1413,9 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define crb_win_unlock(a) \
qlcnic_pcie_sem_unlock((a), 7)
+#define __QLCNIC_MAX_LED_RATE 0xf
+#define __QLCNIC_MAX_LED_STATE 0x2
+
int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
@@ -1460,8 +1483,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
- u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
+void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b0d32ddd2cc..569a837d2ac 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -26,42 +26,54 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
return rsp;
}
-u32
-qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
- u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+void
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
{
u32 rsp;
u32 signature;
- u32 rcode = QLCNIC_RCODE_SUCCESS;
struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
- signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
+ signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
+ adapter->fw_hal_version);
/* Acquire semaphore before accessing CRB */
- if (qlcnic_api_lock(adapter))
- return QLCNIC_RCODE_TIMEOUT;
+ if (qlcnic_api_lock(adapter)) {
+ cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ return;
+ }
QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
- QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
- QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
- QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
- QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
+ QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
+ QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
+ QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
+ QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
dev_err(&pdev->dev, "card response timeout.\n");
- rcode = QLCNIC_RCODE_TIMEOUT;
+ cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
dev_err(&pdev->dev, "failed card response code:0x%x\n",
- rcode);
+ cmd->rsp.cmd);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK) {
+ cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
+ if (cmd->rsp.arg2)
+ cmd->rsp.arg2 = QLCRD32(adapter,
+ QLCNIC_ARG2_CRB_OFFSET);
+ if (cmd->rsp.arg3)
+ cmd->rsp.arg3 = QLCRD32(adapter,
+ QLCNIC_ARG3_CRB_OFFSET);
}
+ if (cmd->rsp.arg1)
+ cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
/* Release semaphore */
qlcnic_api_unlock(adapter);
- return rcode;
}
static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
@@ -81,27 +93,24 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
u16 temp_size;
void *tmp_addr;
u32 version, csum, *template, *tmp_buf;
+ struct qlcnic_cmd_args cmd;
struct qlcnic_hardware_context *ahw;
struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
dma_addr_t tmp_addr_t = 0;
ahw = adapter->ahw;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- 0,
- 0,
- 0,
- QLCNIC_CDRP_CMD_TEMP_SIZE);
- if (err != QLCNIC_RCODE_SUCCESS) {
- err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
dev_info(&adapter->pdev->dev,
- "Can't get template size %d\n", err);
+ "Can't get template size %d\n", cmd.rsp.cmd);
err = -EIO;
return err;
}
- version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET);
- temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+ temp_size = cmd.rsp.arg2;
+ version = cmd.rsp.arg3;
if (!temp_size)
return -EIO;
@@ -112,14 +121,14 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
"Can't get memory for FW dump template\n");
return -ENOMEM;
}
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- LSD(tmp_addr_t),
- MSD(tmp_addr_t),
- temp_size,
- QLCNIC_CDRP_CMD_GET_TEMP_HDR);
-
+ memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
+ cmd.req.arg1 = LSD(tmp_addr_t);
+ cmd.req.arg2 = MSD(tmp_addr_t);
+ cmd.req.arg3 = temp_size;
+ qlcnic_issue_cmd(adapter, &cmd);
+
+ err = cmd.rsp.cmd;
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
"Failed to get mini dump template header %d\n", err);
@@ -155,17 +164,17 @@ error:
int
qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
{
+ struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = mtu;
+ cmd.req.arg3 = 0;
if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
- if (qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- recv_ctx->context_id,
- mtu,
- 0,
- QLCNIC_CDRP_CMD_SET_MTU)) {
-
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd) {
dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
return -EIO;
}
@@ -186,6 +195,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
struct qlcnic_cardrsp_sds_ring *prsp_sds;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_cmd_args cmd;
dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
u64 phys_addr;
@@ -274,13 +284,13 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
}
phys_addr = hostrq_phys_addr;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- (u32)(phys_addr >> 32),
- (u32)(phys_addr & 0xffffffff),
- rq_size,
- QLCNIC_CDRP_CMD_CREATE_RX_CTX);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32) (phys_addr >> 32);
+ cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to create rx ctx in firmware%d\n", err);
@@ -326,19 +336,18 @@ out_free_rq:
static void
qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- if (qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- recv_ctx->context_id,
- QLCNIC_DESTROY_CTX_RESET,
- 0,
- QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
-
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
- }
recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
}
@@ -352,6 +361,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
void *rq_addr, *rsp_addr;
size_t rq_size, rsp_size;
u32 temp;
+ struct qlcnic_cmd_args cmd;
int err;
u64 phys_addr;
dma_addr_t rq_phys_addr, rsp_phys_addr;
@@ -401,13 +411,13 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- (u32)(phys_addr >> 32),
- ((u32)phys_addr & 0xffffffff),
- rq_size,
- QLCNIC_CDRP_CMD_CREATE_TX_CTX);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
@@ -433,29 +443,30 @@ out_free_rq:
static void
qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
{
- if (qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- adapter->tx_context_id,
- QLCNIC_DESTROY_CTX_RESET,
- 0,
- QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
-
+ struct qlcnic_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->tx_context_id;
+ cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd)
dev_err(&adapter->pdev->dev,
"Failed to destroy tx ctx in firmware\n");
- }
}
int
qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
{
- return qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- config,
- 0,
- 0,
- QLCNIC_CDRP_CMD_CONFIG_PORT);
+ struct qlcnic_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = config;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
+ qlcnic_issue_cmd(adapter, &cmd);
+
+ return cmd.rsp.cmd;
}
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
@@ -620,20 +631,17 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
{
int err;
- u32 arg1;
+ struct qlcnic_cmd_args cmd;
- arg1 = adapter->ahw->pci_func | BIT_8;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- arg1,
- 0,
- 0,
- QLCNIC_CDRP_CMD_MAC_ADDRESS);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
+ cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err == QLCNIC_RCODE_SUCCESS)
- qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
- QLCNIC_ARG2_CRB_OFFSET, 0, mac);
+ qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
@@ -651,6 +659,7 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
dma_addr_t nic_dma_t;
struct qlcnic_info *nic_info;
void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
@@ -660,13 +669,13 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- MSD(nic_dma_t),
- LSD(nic_dma_t),
- (func_id << 16 | nic_size),
- QLCNIC_CDRP_CMD_GET_NIC_INFO);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
+ cmd.req.arg1 = MSD(nic_dma_t);
+ cmd.req.arg2 = LSD(nic_dma_t);
+ cmd.req.arg3 = (func_id << 16 | nic_size);
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err == QLCNIC_RCODE_SUCCESS) {
npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
@@ -705,6 +714,7 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
int err = -EIO;
dma_addr_t nic_dma_t;
void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
struct qlcnic_info *nic_info;
size_t nic_size = sizeof(struct qlcnic_info);
@@ -730,13 +740,13 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- MSD(nic_dma_t),
- LSD(nic_dma_t),
- ((nic->pci_func << 16) | nic_size),
- QLCNIC_CDRP_CMD_SET_NIC_INFO);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
+ cmd.req.arg1 = MSD(nic_dma_t);
+ cmd.req.arg2 = LSD(nic_dma_t);
+ cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -754,6 +764,7 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
int err = 0, i;
+ struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
struct qlcnic_pci_info *npar;
void *pci_info_addr;
@@ -767,13 +778,13 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- MSD(pci_info_dma_t),
- LSD(pci_info_dma_t),
- pci_size,
- QLCNIC_CDRP_CMD_GET_PCI_INFO);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
+ cmd.req.arg1 = MSD(pci_info_dma_t);
+ cmd.req.arg2 = LSD(pci_info_dma_t);
+ cmd.req.arg3 = pci_size;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err == QLCNIC_RCODE_SUCCESS) {
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
@@ -805,6 +816,7 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
{
int err = -EIO;
u32 arg1;
+ struct qlcnic_cmd_args cmd;
if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
@@ -813,13 +825,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- arg1,
- 0,
- 0,
- QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
+ cmd.req.arg1 = arg1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -842,6 +852,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
dma_addr_t stats_dma_t;
void *stats_addr;
u32 arg1;
+ struct qlcnic_cmd_args cmd;
int err;
if (esw_stats == NULL)
@@ -865,13 +876,13 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- arg1,
- MSD(stats_dma_t),
- LSD(stats_dma_t),
- QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
+ cmd.req.arg1 = arg1;
+ cmd.req.arg2 = MSD(stats_dma_t);
+ cmd.req.arg3 = LSD(stats_dma_t);
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (!err) {
stats = stats_addr;
@@ -952,6 +963,7 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
{
u32 arg1;
+ struct qlcnic_cmd_args cmd;
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
@@ -972,13 +984,11 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
arg1 |= BIT_14 | rx_tx << 15;
- return qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- arg1,
- 0,
- 0,
- QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
+ cmd.req.arg1 = arg1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ return cmd.rsp.cmd;
err_ret:
dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
@@ -991,19 +1001,19 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
u32 *arg1, u32 *arg2)
{
int err = -EIO;
+ struct qlcnic_cmd_args cmd;
u8 pci_func;
pci_func = (*arg1 >> 8);
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- *arg1,
- 0,
- 0,
- QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
+
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
+ cmd.req.arg1 = *arg1;
+ cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg1;
+ *arg2 = cmd.rsp.arg2;
+ err = cmd.rsp.cmd;
if (err == QLCNIC_RCODE_SUCCESS) {
- *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
dev_info(&adapter->pdev->dev,
"eSwitch port config for pci func %d\n", pci_func);
} else {
@@ -1025,6 +1035,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
{
int err = -EIO;
u32 arg1, arg2 = 0;
+ struct qlcnic_cmd_args cmd;
u8 pci_func;
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
@@ -1071,14 +1082,13 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
return err;
}
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw->pci_func,
- adapter->fw_hal_version,
- arg1,
- arg2,
- 0,
- QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
+ cmd.req.arg1 = arg1;
+ cmd.req.arg2 = arg2;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
"Failed to configure eswitch pci func %d\n", pci_func);
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 72a723d5c98..5d8bec28326 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -418,9 +418,6 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_max_pending = adapter->max_rxd;
ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
-
- ring->rx_mini_max_pending = 0;
- ring->rx_mini_pending = 0;
}
static u32
@@ -659,6 +656,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int max_sds_rings = adapter->max_sds_rings;
int ret;
+ struct qlcnic_cmd_args cmd;
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
@@ -668,9 +666,12 @@ static int qlcnic_irq_test(struct net_device *netdev)
goto clear_it;
adapter->diag_cnt = 0;
- ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func,
- adapter->fw_hal_version, adapter->ahw->pci_func,
- 0, 0, 0x00000011);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
+ cmd.req.arg1 = adapter->ahw->pci_func;
+ qlcnic_issue_cmd(adapter, &cmd);
+ ret = cmd.rsp.cmd;
+
if (ret)
goto done;
@@ -710,7 +711,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter)
+static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -736,13 +737,18 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter)
dev_kfree_skb_any(skb);
if (!adapter->diag_cnt)
- dev_warn(&adapter->pdev->dev, "LB Test: %dth packet"
- " not recevied\n", i + 1);
+ QLCDB(adapter, DRV,
+ "LB Test: packet #%d was not received\n", i + 1);
else
cnt++;
}
if (cnt != i) {
dev_warn(&adapter->pdev->dev, "LB Test failed\n");
+ if (mode != QLCNIC_ILB_MODE) {
+ dev_warn(&adapter->pdev->dev,
+ "WARNING: Please make sure external"
+ "loopback connector is plugged in\n");
+ }
return -1;
}
return 0;
@@ -761,7 +767,7 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
return -EOPNOTSUPP;
}
- netdev_info(netdev, "%s loopback test in progress\n",
+ QLCDB(adapter, DRV, "%s loopback test in progress\n",
mode == QLCNIC_ILB_MODE ? "internal" : "external");
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev, "Loopback test not supported for non "
@@ -797,7 +803,7 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
}
} while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
- ret = qlcnic_do_lb_test(adapter);
+ ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_clear_lb_mode(adapter);
@@ -832,7 +838,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
-
if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
if (data[4])
@@ -933,6 +938,9 @@ static int qlcnic_set_led(struct net_device *dev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
@@ -967,6 +975,8 @@ static int qlcnic_set_led(struct net_device *dev,
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
return -EIO;
}
@@ -1105,7 +1115,10 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ if (fw_dump->clr)
+ dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ else
+ dump->len = 0;
dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
dump->version = adapter->fw_version;
return 0;
@@ -1152,7 +1165,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- if (val->flag == QLCNIC_FORCE_FW_DUMP_KEY) {
+ switch (val->flag) {
+ case QLCNIC_FORCE_FW_DUMP_KEY:
if (!fw_dump->enable) {
netdev_info(netdev, "FW dump not enabled\n");
return ret;
@@ -1164,17 +1178,25 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
}
netdev_info(netdev, "Forcing a FW dump\n");
qlcnic_dev_request_reset(adapter);
- } else if (val->flag == QLCNIC_DISABLE_FW_DUMP) {
+ break;
+ case QLCNIC_DISABLE_FW_DUMP:
if (fw_dump->enable) {
netdev_info(netdev, "Disabling FW dump\n");
fw_dump->enable = 0;
}
- } else if (val->flag == QLCNIC_ENABLE_FW_DUMP) {
+ break;
+ case QLCNIC_ENABLE_FW_DUMP:
if (!fw_dump->enable && fw_dump->tmpl_hdr) {
netdev_info(netdev, "Enabling FW dump\n");
fw_dump->enable = 1;
}
- } else {
+ break;
+ case QLCNIC_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing a FW reset\n");
+ qlcnic_dev_request_reset(adapter);
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ break;
+ default:
if (val->flag > QLCNIC_DUMP_MASK_MAX ||
val->flag < QLCNIC_DUMP_MASK_MIN) {
netdev_info(netdev,
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index d14506f764e..92bc8ce9b28 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -609,6 +609,7 @@ enum {
QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
};
+
/* Lock IDs for PHY lock */
#define PHY_LOCK_DRIVER 0x44524956
@@ -723,7 +724,8 @@ enum {
#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
#define QLCNIC_RCODE_FATAL_ERROR BIT_31
#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
-#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
+#define QLCNIC_FWERROR_FAN_FAILURE 0x16
#define FW_POLL_DELAY (1 * HZ)
#define FW_FAIL_THRESH 2
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4055c218ef2..74e9d7b9496 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1773,8 +1773,8 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
goto error;
} else {
fw_dump->clr = 1;
- snprintf(mesg, sizeof(mesg), "FW dump for device: %d\n",
- adapter->pdev->devfn);
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
+ adapter->netdev->name);
dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
fw_dump->size);
/* Send a udev event to notify availability of FW dump */
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index ee8a3982395..312c1c37889 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -686,7 +686,13 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
u32 ver = -1, min_ver;
int ret;
- ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+ if (adapter->ahw->revision_id == QLCNIC_P3P_C0)
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION,
+ &fw_entry);
+ else
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION,
+ &fw_entry);
+
if (!ret)
/* 0-4:-signature, 4-8:-fw version */
qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
@@ -1056,7 +1062,8 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
int
qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
{
- if (qlcnic_check_fw_hearbeat(adapter)) {
+ if ((adapter->flags & QLCNIC_FW_HANG) ||
+ qlcnic_check_fw_hearbeat(adapter)) {
qlcnic_rom_lock_recovery(adapter);
return 1;
}
@@ -1778,14 +1785,14 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
spin_unlock(&rds_ring->lock);
}
-static void dump_skb(struct sk_buff *skb)
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
{
int i;
unsigned char *data = skb->data;
printk(KERN_INFO "\n");
for (i = 0; i < skb->len; i++) {
- printk(KERN_INFO "%02x ", data[i]);
+ QLCDB(adapter, DRV, "%02x ", data[i]);
if ((i & 0x0f) == 8)
printk(KERN_INFO "\n");
}
@@ -1828,7 +1835,7 @@ void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
adapter->diag_cnt++;
else
- dump_skb(skb);
+ dump_skb(skb, adapter);
dev_kfree_skb_any(skb);
adapter->stats.rx_pkts++;
@@ -1882,8 +1889,8 @@ qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
u32 mac_low, mac_high;
int i;
- mac_low = QLCRD32(adapter, off1);
- mac_high = QLCRD32(adapter, off2);
+ mac_low = off1;
+ mac_high = off2;
if (alt_mac) {
mac_low |= (mac_low >> 16) | (mac_high << 16);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 5ca1b562443..106503f118f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -325,7 +325,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_start_xmit = qlcnic_xmit_frame,
.ndo_get_stats = qlcnic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = qlcnic_set_multi,
+ .ndo_set_rx_mode = qlcnic_set_multi,
.ndo_set_mac_address = qlcnic_set_mac,
.ndo_change_mtu = qlcnic_change_mtu,
.ndo_fix_features = qlcnic_fix_features,
@@ -643,8 +643,11 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
- u32 fw_major, fw_minor, fw_build;
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ prev_fw_version = adapter->fw_version;
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
@@ -652,6 +655,17 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ if (fw_dump->tmpl_hdr == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ if (fw_dump->tmpl_hdr)
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev,
+ "Supports FW dump capability\n");
+ }
+ }
+
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
if (adapter->ahw->port_type == QLCNIC_XGBE) {
@@ -1610,12 +1624,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_decr_ref;
}
- /* Get FW dump template and store it */
- if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
- if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
- dev_info(&pdev->dev,
- "Supports FW dump capability\n");
-
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2127,13 +2135,13 @@ qlcnic_map_tx_skb(struct pci_dev *pdev,
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
- map = pci_map_page(pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
- nf->length = frag->size;
+ nf->length = skb_frag_size(frag);
}
return 0;
@@ -2213,7 +2221,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
- delta += skb_shinfo(skb)->frags[i].size;
+ delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (!__pskb_pull_tail(skb, delta))
goto drop_packet;
@@ -2682,6 +2690,7 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
qlcnic_api_unlock(adapter);
err:
adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_START_FW, &adapter->state);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
@@ -2859,6 +2868,7 @@ skip_ack_check:
(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
QLCDB(adapter, DRV, "Take FW dump\n");
qlcnic_dump_fw(adapter);
+ adapter->flags |= QLCNIC_FW_HANG;
}
rtnl_unlock();
@@ -2918,15 +2928,36 @@ qlcnic_detach_work(struct work_struct *work)
status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
- if (status & QLCNIC_RCODE_FATAL_ERROR)
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "Detaching the device: peg halt status1=0x%x\n",
+ status);
+
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ }
+
goto err_ret;
+ }
- if (adapter->temp == QLCNIC_TEMP_PANIC)
+ if (adapter->temp == QLCNIC_TEMP_PANIC) {
+ dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
+ adapter->temp);
goto err_ret;
+ }
+
/* Dont ack if this instance is the reset owner */
if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
- if (qlcnic_set_drv_state(adapter, adapter->dev_state))
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set driver state,"
+ "detaching the device.\n");
goto err_ret;
+ }
}
adapter->fw_wait_cnt = 0;
@@ -2936,8 +2967,6 @@ qlcnic_detach_work(struct work_struct *work)
return;
err_ret:
- dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
- status, adapter->temp);
netif_device_attach(netdev);
qlcnic_clr_all_drv_state(adapter, 1);
}
@@ -3046,6 +3075,7 @@ attach:
done:
netif_device_attach(netdev);
adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
if (!qlcnic_clr_drv_state(adapter))
@@ -3057,7 +3087,7 @@ static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
u32 state = 0, heartbeat;
- struct net_device *netdev = adapter->netdev;
+ u32 peg_status;
if (qlcnic_check_temp(adapter))
goto detach;
@@ -3090,13 +3120,31 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
return 0;
+ adapter->flags |= QLCNIC_FW_HANG;
+
qlcnic_dev_request_reset(adapter);
if (auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
- dev_info(&netdev->dev, "firmware hang detected\n");
-
+ dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
+ QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
+ peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ if (LSW(MSB(peg_status)) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
detach:
adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
QLCNIC_DEV_NEED_RESET;
@@ -3423,6 +3471,98 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
}
static int
+qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
+ u8 *rate)
+{
+ *rate = LSB(beacon);
+ *state = MSB(beacon);
+
+ QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+ if (!*state) {
+ *rate = __QLCNIC_MAX_LED_RATE;
+ return 0;
+ } else if (*state > __QLCNIC_MAX_LED_STATE)
+ return -EINVAL;
+
+ if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_store_beacon(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int dev_down = 0;
+ u16 beacon;
+ u8 b_state, b_rate;
+ int err;
+
+ if (len != sizeof(u16))
+ return QL_STATUS_INVALID_PARAM;
+
+ memcpy(&beacon, buf, sizeof(u16));
+ err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+ if (err)
+ return err;
+
+ if (adapter->ahw->beacon_state == b_state)
+ return len;
+
+ if (!adapter->ahw->beacon_state)
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+ err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+ if (err) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+ return err;
+ }
+ dev_down = 1;
+ }
+
+ err = qlcnic_config_led(adapter, b_state, b_rate);
+
+ if (!err) {
+ adapter->ahw->beacon_state = b_state;
+ err = len;
+ }
+
+ if (dev_down) {
+ qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ }
+
+ if (!b_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+static ssize_t
+qlcnic_show_beacon(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static struct device_attribute dev_attr_beacon = {
+ .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_beacon,
+ .store = qlcnic_store_beacon,
+};
+
+static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
@@ -4119,6 +4259,8 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
return;
if (device_create_file(dev, &dev_attr_diag_mode))
dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_file(dev, &dev_attr_beacon))
+ dev_info(dev, "failed to create beacon sysfs entry");
if (device_create_bin_file(dev, &bin_attr_crb))
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
@@ -4149,6 +4291,7 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_file(dev, &dev_attr_beacon);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
device_remove_bin_file(dev, &bin_attr_pci_config);
diff --git a/drivers/net/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile
index 8a197658d76..8a197658d76 100644
--- a/drivers/net/qlge/Makefile
+++ b/drivers/net/ethernet/qlogic/qlge/Makefile
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9ef..8731f79c9ef 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index fca804f36d6..fca804f36d6 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 9b67bfea035..9b67bfea035 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f07e96ec884..c92afcd912e 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1431,12 +1431,10 @@ static int ql_map_send(struct ql_adapter *qdev,
map_idx++;
}
- map =
- pci_map_page(qdev->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping frags failed with error: %d.\n",
@@ -1445,10 +1443,10 @@ static int ql_map_send(struct ql_adapter *qdev,
}
tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(frag->size);
+ tbd->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- frag->size);
+ skb_frag_size(frag));
}
/* Save the number of segments we've mapped. */
@@ -1477,8 +1475,6 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
{
struct sk_buff *skb;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct skb_frag_struct *rx_frag;
- int nr_frags;
struct napi_struct *napi = &rx_ring->napi;
napi->dev = qdev->ndev;
@@ -1492,12 +1488,10 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
return;
}
prefetch(lbq_desc->p.pg_chunk.va);
- rx_frag = skb_shinfo(skb)->frags;
- nr_frags = skb_shinfo(skb)->nr_frags;
- rx_frag += nr_frags;
- rx_frag->page = lbq_desc->p.pg_chunk.page;
- rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
- rx_frag->size = length;
+ __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
skb->len += length;
skb->data_len += length;
@@ -4676,7 +4670,7 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_start_xmit = qlge_send,
.ndo_change_mtu = qlge_change_mtu,
.ndo_get_stats = qlge_get_stats,
- .ndo_set_multicast_list = qlge_set_multicast_list,
+ .ndo_set_rx_mode = qlge_set_multicast_list,
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e24..ff2bf8a4e24 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
diff --git a/drivers/net/ethernet/racal/Kconfig b/drivers/net/ethernet/racal/Kconfig
new file mode 100644
index 00000000000..01969e0a9c6
--- /dev/null
+++ b/drivers/net/ethernet/racal/Kconfig
@@ -0,0 +1,33 @@
+#
+# Racal-Interlan device configuration
+#
+
+config NET_VENDOR_RACAL
+ bool "Racal-Interlan (Micom) NI devices"
+ default y
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, such
+ as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about NI cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_RACAL
+
+config NI5010
+ tristate "NI5010 support (EXPERIMENTAL)"
+ depends on ISA && EXPERIMENTAL && BROKEN_ON_SMP
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that this is still
+ experimental code.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni5010.
+
+endif # NET_VENDOR_RACAL
diff --git a/drivers/net/ethernet/racal/Makefile b/drivers/net/ethernet/racal/Makefile
new file mode 100644
index 00000000000..1e210ca1d78
--- /dev/null
+++ b/drivers/net/ethernet/racal/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Racal-Interlan network device drivers.
+#
+
+obj-$(CONFIG_NI5010) += ni5010.o
diff --git a/drivers/net/ni5010.c b/drivers/net/ethernet/racal/ni5010.c
index 4d3f2e2b28b..072810da9a3 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ethernet/racal/ni5010.c
@@ -192,7 +192,7 @@ static const struct net_device_ops ni5010_netdev_ops = {
.ndo_open = ni5010_open,
.ndo_stop = ni5010_close,
.ndo_start_xmit = ni5010_send_packet,
- .ndo_set_multicast_list = ni5010_set_multicast_list,
+ .ndo_set_rx_mode = ni5010_set_multicast_list,
.ndo_tx_timeout = ni5010_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ni5010.h b/drivers/net/ethernet/racal/ni5010.h
index e10e717fcd7..e10e717fcd7 100644
--- a/drivers/net/ni5010.h
+++ b/drivers/net/ethernet/racal/ni5010.h
diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig
new file mode 100644
index 00000000000..c8ba4b3494c
--- /dev/null
+++ b/drivers/net/ethernet/rdc/Kconfig
@@ -0,0 +1,35 @@
+#
+# RDC network device configuration
+#
+
+config NET_VENDOR_RDC
+ bool "RDC devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about RDC cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_RDC
+
+config R6040
+ tristate "RDC R6040 Fast Ethernet Adapter support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ select PHYLIB
+ ---help---
+ This is a driver for the R6040 Fast Ethernet MACs found in the
+ the RDC R-321x System-on-chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called r6040. This is recommended.
+
+endif # NET_VENDOR_RDC
diff --git a/drivers/net/ethernet/rdc/Makefile b/drivers/net/ethernet/rdc/Makefile
new file mode 100644
index 00000000000..8d51fd2d07f
--- /dev/null
+++ b/drivers/net/ethernet/rdc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the RDC network device drivers.
+#
+
+obj-$(CONFIG_R6040) += r6040.o
diff --git a/drivers/net/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index b64fcee483a..1fc01ca72b4 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.27"
-#define DRV_RELDATE "23Feb2011"
+#define DRV_VERSION "0.28"
+#define DRV_RELDATE "07Oct2011"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -470,6 +470,8 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
+
+ phy_stop(lp->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -727,6 +729,8 @@ static int r6040_up(struct net_device *dev)
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
+ phy_start(lp->phydev);
+
return 0;
}
@@ -982,7 +986,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_stop = r6040_close,
.ndo_start_xmit = r6040_start_xmit,
.ndo_get_stats = r6040_get_stats,
- .ndo_set_multicast_list = r6040_multicast_list,
+ .ndo_set_rx_mode = r6040_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index cc4c210a91f..ee5da9293ce 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -777,15 +777,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
u32 ctrl;
dma_addr_t mapping;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
- ((void *) page_address(this_frag->page) +
- this_frag->page_offset),
+ skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
@@ -1325,6 +1324,15 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
strcpy (info->bus_info, pci_name(cp->pdev));
}
+static void cp_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ ring->rx_max_pending = CP_RX_RING_SIZE;
+ ring->tx_max_pending = CP_TX_RING_SIZE;
+ ring->rx_pending = CP_RX_RING_SIZE;
+ ring->tx_pending = CP_TX_RING_SIZE;
+}
+
static int cp_get_regs_len(struct net_device *dev)
{
return CP_REGS_SIZE;
@@ -1526,6 +1534,7 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_eeprom_len = cp_get_eeprom_len,
.get_eeprom = cp_get_eeprom,
.set_eeprom = cp_set_eeprom,
+ .get_ringparam = cp_get_ringparam,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1785,7 +1794,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_stop = cp_close,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = cp_set_mac_address,
- .ndo_set_multicast_list = cp_set_rx_mode,
+ .ndo_set_rx_mode = cp_set_rx_mode,
.ndo_get_stats = cp_get_stats,
.ndo_do_ioctl = cp_ioctl,
.ndo_start_xmit = cp_start_xmit,
diff --git a/drivers/net/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index c2672c692d6..4d6b254fc6c 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -916,7 +916,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
- .ndo_set_multicast_list = rtl8139_set_rx_mode,
+ .ndo_set_rx_mode = rtl8139_set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = rtl8139_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
new file mode 100644
index 00000000000..84083ec6e61
--- /dev/null
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -0,0 +1,130 @@
+#
+# Realtek device configuration
+#
+
+config NET_VENDOR_REALTEK
+ bool "Realtek devices"
+ default y
+ depends on PCI || (PARPORT && X86)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Realtek devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_REALTEK
+
+config ATP
+ tristate "AT-LAN-TEC/RealTek pocket adapter support"
+ depends on PARPORT && X86
+ select CRC32
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>, if you
+ want to use this. If you intend to use this driver, you should have
+ said N to the "Parallel printer support", because the two drivers
+ don't like each other.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atp.
+
+config 8139CP
+ tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the RTL8139C+ chips. If you have one of those, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8139cp. This is recommended.
+
+config 8139TOO
+ tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the RTL 8129/8130/8139 chips. If you have one of those, say Y and
+ read the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8139too. This is recommended.
+
+config 8139TOO_PIO
+ bool "Use PIO instead of MMIO"
+ default y
+ depends on 8139TOO
+ ---help---
+ This instructs the driver to use programmed I/O ports (PIO) instead
+ of PCI shared memory (MMIO). This can possibly solve some problems
+ in case your mainboard has memory consistency issues. If unsure,
+ say N.
+
+config 8139TOO_TUNE_TWISTER
+ bool "Support for uncommon RTL-8139 rev. K (automatic channel equalization)"
+ depends on 8139TOO
+ ---help---
+ This implements a function which might come in handy in case you
+ are using low quality on long cabling. It is required for RealTek
+ RTL-8139 revision K boards, and totally unused otherwise. It tries
+ to match the transceiver to the cable characteristics. This is
+ experimental since hardly documented by the manufacturer.
+ If unsure, say Y.
+
+config 8139TOO_8129
+ bool "Support for older RTL-8129/8130 boards"
+ depends on 8139TOO
+ ---help---
+ This enables support for the older and uncommon RTL-8129 and
+ RTL-8130 chips, which support MII via an external transceiver,
+ instead of an internal one. Disabling this option will save some
+ memory by making the code size smaller. If unsure, say Y.
+
+config 8139_OLD_RX_RESET
+ bool "Use older RX-reset method"
+ depends on 8139TOO
+ ---help---
+ The 8139too driver was recently updated to contain a more rapid
+ reset sequence, in the face of severe receive errors. This "new"
+ RX-reset method should be adequate for all boards. But if you
+ experience problems, you can enable this option to restore the
+ old RX-reset behavior. If unsure, say N.
+
+config R8169
+ tristate "Realtek 8169 gigabit ethernet support"
+ depends on PCI
+ select FW_LOADER
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
+
+ To compile this driver as a module, choose M here: the module
+ will be called r8169. This is recommended.
+
+config SC92031
+ tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
+ have one of these, say Y here.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sc92031. This is recommended.
+
+endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
new file mode 100644
index 00000000000..e48cfb6ac42
--- /dev/null
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Realtek network device drivers.
+#
+
+obj-$(CONFIG_8139CP) += 8139cp.o
+obj-$(CONFIG_8139TOO) += 8139too.o
+obj-$(CONFIG_ATP) += atp.o
+obj-$(CONFIG_R8169) += r8169.o
+obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/atp.c b/drivers/net/ethernet/realtek/atp.c
index f3459798b0e..e3f57fdbf0e 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -245,7 +245,7 @@ static const struct net_device_ops atp_netdev_ops = {
.ndo_open = net_open,
.ndo_stop = net_close,
.ndo_start_xmit = atp_send_packet,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/atp.h b/drivers/net/ethernet/realtek/atp.h
index 0edc642c2c2..0edc642c2c2 100644
--- a/drivers/net/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
diff --git a/drivers/net/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c2366701792..92b45f08858 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -42,6 +42,8 @@
#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
+#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
+#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#ifdef RTL8169_DEBUG
@@ -133,6 +135,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_32,
RTL_GIGA_MAC_VER_33,
RTL_GIGA_MAC_VER_34,
+ RTL_GIGA_MAC_VER_35,
+ RTL_GIGA_MAC_VER_36,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -141,84 +145,110 @@ enum rtl_tx_desc_version {
RTL_TD_1 = 1,
};
-#define _R(NAME,TD,FW) \
- { .name = NAME, .txd_version = TD, .fw_name = FW }
+#define JUMBO_1K ETH_DATA_LEN
+#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
+#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
+#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
+#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
+
+#define _R(NAME,TD,FW,SZ,B) { \
+ .name = NAME, \
+ .txd_version = TD, \
+ .fw_name = FW, \
+ .jumbo_max = SZ, \
+ .jumbo_tx_csum = B \
+}
static const struct {
const char *name;
enum rtl_tx_desc_version txd_version;
const char *fw_name;
+ u16 jumbo_max;
+ bool jumbo_tx_csum;
} rtl_chip_infos[] = {
/* PCI devices. */
[RTL_GIGA_MAC_VER_01] =
- _R("RTL8169", RTL_TD_0, NULL),
+ _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
[RTL_GIGA_MAC_VER_02] =
- _R("RTL8169s", RTL_TD_0, NULL),
+ _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
[RTL_GIGA_MAC_VER_03] =
- _R("RTL8110s", RTL_TD_0, NULL),
+ _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
[RTL_GIGA_MAC_VER_04] =
- _R("RTL8169sb/8110sb", RTL_TD_0, NULL),
+ _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
[RTL_GIGA_MAC_VER_05] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
[RTL_GIGA_MAC_VER_06] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
/* PCI-E devices. */
[RTL_GIGA_MAC_VER_07] =
- _R("RTL8102e", RTL_TD_1, NULL),
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_08] =
- _R("RTL8102e", RTL_TD_1, NULL),
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_09] =
- _R("RTL8102e", RTL_TD_1, NULL),
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_10] =
- _R("RTL8101e", RTL_TD_0, NULL),
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_11] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_12] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_13] =
- _R("RTL8101e", RTL_TD_0, NULL),
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_14] =
- _R("RTL8100e", RTL_TD_0, NULL),
+ _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_15] =
- _R("RTL8100e", RTL_TD_0, NULL),
+ _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_16] =
- _R("RTL8101e", RTL_TD_0, NULL),
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_18] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_19] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_20] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_21] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_22] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_23] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_24] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_25] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1),
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
+ JUMBO_9K, false),
[RTL_GIGA_MAC_VER_26] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2),
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
+ JUMBO_9K, false),
[RTL_GIGA_MAC_VER_27] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
[RTL_GIGA_MAC_VER_28] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
[RTL_GIGA_MAC_VER_29] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
+ JUMBO_1K, true),
[RTL_GIGA_MAC_VER_30] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
+ JUMBO_1K, true),
[RTL_GIGA_MAC_VER_31] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
[RTL_GIGA_MAC_VER_32] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1),
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
+ JUMBO_9K, false),
[RTL_GIGA_MAC_VER_33] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2),
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
+ JUMBO_9K, false),
[RTL_GIGA_MAC_VER_34] =
- _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3)
+ _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_35] =
+ _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_36] =
+ _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
+ JUMBO_9K, false),
};
#undef _R
@@ -311,6 +341,7 @@ enum rtl_registers {
MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
#define TxPacketMax (8064 >> 7)
+#define EarlySize 0x27
FuncEvent = 0xf0,
FuncEventMask = 0xf4,
@@ -460,8 +491,12 @@ enum rtl_register_content {
/* Config3 register p.25 */
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+ Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
+ /* Config4 register */
+ Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
+
/* Config5 register p.27 */
BWF = (1 << 6), /* Accept Broadcast wakeup frame */
MWF = (1 << 5), /* Accept Multicast wakeup frame */
@@ -670,6 +705,11 @@ struct rtl8169_private {
void (*up)(struct rtl8169_private *);
} pll_power_ops;
+ struct jumbo_ops {
+ void (*enable)(struct rtl8169_private *);
+ void (*disable)(struct rtl8169_private *);
+ } jumbo_ops;
+
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
int (*get_settings)(struct net_device *, struct ethtool_cmd *);
void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -714,6 +754,8 @@ MODULE_FIRMWARE(FIRMWARE_8168E_1);
MODULE_FIRMWARE(FIRMWARE_8168E_2);
MODULE_FIRMWARE(FIRMWARE_8168E_3);
MODULE_FIRMWARE(FIRMWARE_8105E_1);
+MODULE_FIRMWARE(FIRMWARE_8168F_1);
+MODULE_FIRMWARE(FIRMWARE_8168F_2);
static int rtl8169_open(struct net_device *dev);
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -732,6 +774,19 @@ static void rtl8169_down(struct net_device *dev);
static void rtl8169_rx_clear(struct rtl8169_private *tp);
static int rtl8169_poll(struct napi_struct *napi, int budget);
+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+}
+
static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -1202,6 +1257,19 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
ERIAR_EXGMAC);
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_36) {
+ if (RTL_R8(PHYstatus) & _1000bpsF) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x00000011, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x0000003f, ERIAR_EXGMAC);
+ }
}
}
@@ -1487,9 +1555,15 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
if (dev->mtu > TD_MSS_MAX)
features &= ~NETIF_F_ALL_TSO;
+ if (dev->mtu > JUMBO_1K &&
+ !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
+ features &= ~NETIF_F_IP_CSUM;
+
return features;
}
@@ -1741,6 +1815,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168F family. */
+ { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
+ { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
+
/* 8168E family. */
{ 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
{ 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
@@ -2859,7 +2937,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
@@ -2875,6 +2953,97 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0003 },
+ { 0x09, 0xa20f },
+ { 0x1f, 0x0000 },
+
+ /* Modify green table for giga & fnet */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b55 },
+ { 0x06, 0x0000 },
+ { 0x05, 0x8b5e },
+ { 0x06, 0x0000 },
+ { 0x05, 0x8b67 },
+ { 0x06, 0x0000 },
+ { 0x05, 0x8b70 },
+ { 0x06, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x0078 },
+ { 0x17, 0x0000 },
+ { 0x19, 0x00fb },
+ { 0x1f, 0x0000 },
+
+ /* Modify green table for 10M */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b79 },
+ { 0x06, 0xaa00 },
+ { 0x1f, 0x0000 },
+
+ /* Disable hiimpedance detection (RTCT) */
+ { 0x1f, 0x0003 },
+ { 0x01, 0x328a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* Improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Improve 2-pair detection performance */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl_apply_firmware(tp);
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* Improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -2999,6 +3168,12 @@ static void rtl_hw_phy_config(struct net_device *dev)
case RTL_GIGA_MAC_VER_34:
rtl8168e_2_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_35:
+ rtl8168f_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_36:
+ rtl8168f_2_hw_phy_config(tp);
+ break;
default:
break;
@@ -3288,7 +3463,7 @@ static const struct net_device_ops rtl8169_netdev_ops = {
.ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
.ndo_do_ioctl = rtl8169_ioctl,
- .ndo_set_multicast_list = rtl_set_rx_mode,
+ .ndo_set_rx_mode = rtl_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
#endif
@@ -3316,6 +3491,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
+static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ RTL_W32(RxConfig, RTL_R32(RxConfig) |
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
+{
+ if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ return false;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ rtl_wol_suspend_quirk(tp);
+
+ return true;
+}
+
static void r810x_phy_power_down(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
@@ -3330,18 +3536,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
static void r810x_pll_power_down(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
- tp->mac_version == RTL_GIGA_MAC_VER_30)
- RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
- AcceptMulticast | AcceptMyPhys);
+ if (rtl_wol_pll_power_down(tp))
return;
- }
r810x_phy_power_down(tp);
}
@@ -3430,17 +3626,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(ioaddr, 0x19, 0xff64);
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
- tp->mac_version == RTL_GIGA_MAC_VER_33 ||
- tp->mac_version == RTL_GIGA_MAC_VER_34)
- RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
- AcceptMulticast | AcceptMyPhys);
+ if (rtl_wol_pll_power_down(tp))
return;
- }
r8168_phy_power_down(tp);
@@ -3483,8 +3670,8 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
r8168_phy_power_up(tp);
}
-static void rtl_pll_power_op(struct rtl8169_private *tp,
- void (*op)(struct rtl8169_private *))
+static void rtl_generic_op(struct rtl8169_private *tp,
+ void (*op)(struct rtl8169_private *))
{
if (op)
op(tp);
@@ -3492,12 +3679,12 @@ static void rtl_pll_power_op(struct rtl8169_private *tp,
static void rtl_pll_power_down(struct rtl8169_private *tp)
{
- rtl_pll_power_op(tp, tp->pll_power_ops.down);
+ rtl_generic_op(tp, tp->pll_power_ops.down);
}
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
- rtl_pll_power_op(tp, tp->pll_power_ops.up);
+ rtl_generic_op(tp, tp->pll_power_ops.up);
}
static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
@@ -3534,6 +3721,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -3586,6 +3775,150 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
}
+static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ rtl_generic_op(tp, tp->jumbo_ops.enable);
+}
+
+static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ rtl_generic_op(tp, tp->jumbo_ops.disable);
+}
+
+static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+}
+
+static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
+}
+
+static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+}
+
+static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+}
+
+static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(MaxTxPacketSize, 0x3f);
+ RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) | 0x01);
+ pci_write_config_byte(pdev, 0x79, 0x20);
+}
+
+static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(MaxTxPacketSize, 0x0c);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
+ pci_write_config_byte(pdev, 0x79, 0x50);
+}
+
+static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ rtl_tx_performance_tweak(tp->pci_dev,
+ (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ rtl_tx_performance_tweak(tp->pci_dev,
+ (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ r8168b_0_hw_jumbo_enable(tp);
+
+ RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
+}
+
+static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ r8168b_0_hw_jumbo_disable(tp);
+
+ RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+}
+
+static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
+{
+ struct jumbo_ops *ops = &tp->jumbo_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ ops->disable = r8168b_0_hw_jumbo_disable;
+ ops->enable = r8168b_0_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ ops->disable = r8168b_1_hw_jumbo_disable;
+ ops->enable = r8168b_1_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ ops->disable = r8168c_hw_jumbo_disable;
+ ops->enable = r8168c_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ ops->disable = r8168dp_hw_jumbo_disable;
+ ops->enable = r8168dp_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ ops->disable = r8168e_hw_jumbo_disable;
+ ops->enable = r8168e_hw_jumbo_enable;
+ break;
+
+ /*
+ * No action needed for jumbo frames with 8169.
+ * No jumbo for 810x at all.
+ */
+ default:
+ ops->disable = NULL;
+ ops->enable = NULL;
+ break;
+ }
+}
+
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3730,6 +4063,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_init_mdio_ops(tp);
rtl_init_pll_power_ops(tp);
+ rtl_init_jumbo_ops(tp);
rtl8169_print_mac_version(tp);
@@ -3813,6 +4147,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
+ if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
+ netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
+ "tx checksumming: %s]\n",
+ rtl_chip_infos[chipset].jumbo_max,
+ rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+ }
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
@@ -4008,7 +4348,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_31) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_36) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
udelay(100);
@@ -4167,19 +4509,6 @@ static void rtl_hw_start_8169(struct net_device *dev)
RTL_W16(IntrMask, tp->intr_event);
}
-static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
-{
- int cap = pci_pcie_cap(pdev);
-
- if (cap) {
- u16 ctl;
-
- pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
- ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
- pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
- }
-}
-
static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
{
u32 csi;
@@ -4479,7 +4808,50 @@ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
ERIAR_EXGMAC);
- RTL_W8(MaxTxPacketSize, 0x27);
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
+static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168f_1[] = {
+ { 0x06, 0x00c0, 0x0020 },
+ { 0x08, 0x0001, 0x0002 },
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
+
+ RTL_W8(MaxTxPacketSize, EarlySize);
rtl_disable_clock_request(pdev);
@@ -4588,6 +4960,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8168e_2(ioaddr, pdev);
break;
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ rtl_hw_start_8168f_1(ioaddr, pdev);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -4759,9 +5136,17 @@ static void rtl_hw_start_8101(struct net_device *dev)
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (new_mtu < ETH_ZLEN ||
+ new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
return -EINVAL;
+ if (new_mtu > ETH_DATA_LEN)
+ rtl_hw_jumbo_enable(tp);
+ else
+ rtl_hw_jumbo_disable(tp);
+
dev->mtu = new_mtu;
netdev_update_features(dev);
@@ -5040,7 +5425,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
- skb_frag_t *frag = info->frags + cur_frag;
+ const skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
u32 status, len;
void *addr;
@@ -5048,8 +5433,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
entry = (entry + 1) % NUM_TX_DESC;
txd = tp->TxDescArray + entry;
- len = frag->size;
- addr = ((void *) page_address(frag->page)) + frag->page_offset;
+ len = skb_frag_size(frag);
+ addr = skb_frag_address(frag);
mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
@@ -5356,7 +5741,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
} else {
struct sk_buff *skb;
dma_addr_t addr = le64_to_cpu(desc->addr);
- int pkt_size = (status & 0x00001FFF) - 4;
+ int pkt_size = (status & 0x00003fff) - 4;
/*
* The driver does not support incoming fragmented
@@ -5788,11 +6173,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
#endif /* !CONFIG_PM */
+static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* WoL fails with 8168b when the receiver is disabled. */
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ pci_clear_master(tp->pci_dev);
+
+ RTL_W8(ChipCmd, CmdRxEnb);
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
rtl8169_net_suspend(dev);
@@ -5806,16 +6210,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
spin_unlock_irq(&tp->lock);
if (system_state == SYSTEM_POWER_OFF) {
- /* WoL fails with 8168b when the receiver is disabled. */
- if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_12 ||
- tp->mac_version == RTL_GIGA_MAC_VER_17) &&
- (tp->features & RTL_FEATURE_WOL)) {
- pci_clear_master(pdev);
-
- RTL_W8(ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(ChipCmd);
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_wol_suspend_quirk(tp);
+ rtl_wol_shutdown_quirk(tp);
}
pci_wake_from_d3(pdev, true);
diff --git a/drivers/net/sc92031.c b/drivers/net/ethernet/realtek/sc92031.c
index 9da47337b7c..a284d644053 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/ethernet/realtek/sc92031.c
@@ -31,6 +31,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <linux/crc32.h>
#include <asm/irq.h>
@@ -116,16 +117,9 @@ enum silan_registers {
TestD8 = 0xD8,
};
-#define MII_BMCR 0 // Basic mode control register
-#define MII_BMSR 1 // Basic mode status register
#define MII_JAB 16
#define MII_OutputStatus 24
-#define BMCR_FULLDPLX 0x0100 // Full duplex
-#define BMCR_ANRESTART 0x0200 // Auto negotiation restart
-#define BMCR_ANENABLE 0x1000 // Enable auto negotiation
-#define BMCR_SPEED100 0x2000 // Select 100Mbps
-#define BMSR_LSTATUS 0x0004 // Link status
#define PHY_16_JAB_ENB 0x1000
#define PHY_16_PORT_ENB 0x1
@@ -1390,7 +1384,7 @@ static const struct net_device_ops sc92031_netdev_ops = {
.ndo_start_xmit = sc92031_start_xmit,
.ndo_open = sc92031_open,
.ndo_stop = sc92031_stop,
- .ndo_set_multicast_list = sc92031_set_multicast_list,
+ .ndo_set_rx_mode = sc92031_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
new file mode 100644
index 00000000000..9755b49bbef
--- /dev/null
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -0,0 +1,19 @@
+#
+# Renesas device configuration
+#
+
+config SH_ETH
+ tristate "Renesas SuperH Ethernet support"
+ depends on SUPERH && \
+ (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
+ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
+ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
+ select CRC32
+ select NET_CORE
+ select MII
+ select MDIO_BITBANG
+ select PHYLIB
+ ---help---
+ Renesas SuperH Ethernet device driver.
+ This driver supporting CPUs are:
+ - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
new file mode 100644
index 00000000000..1c278a8e066
--- /dev/null
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Renesas device drivers.
+#
+
+obj-$(CONFIG_SH_ETH) += sh_eth.o
diff --git a/drivers/net/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1c1666e9910..9b230740c6a 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -21,6 +21,9 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
@@ -35,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <linux/sh_eth.h>
#include "sh_eth.h"
@@ -155,18 +159,18 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
/* save MAHR and MALR */
for (i = 0; i < 2; i++) {
- malr[i] = readl(GIGA_MALR(i));
- mahr[i] = readl(GIGA_MAHR(i));
+ malr[i] = ioread32((void *)GIGA_MALR(i));
+ mahr[i] = ioread32((void *)GIGA_MAHR(i));
}
/* reset device */
- writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
+ iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
mdelay(1);
/* restore MAHR and MALR */
for (i = 0; i < 2; i++) {
- writel(malr[i], GIGA_MALR(i));
- writel(mahr[i], GIGA_MAHR(i));
+ iowrite32(malr[i], (void *)GIGA_MALR(i));
+ iowrite32(mahr[i], (void *)GIGA_MAHR(i));
}
}
@@ -515,9 +519,9 @@ static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
}
struct bb_info {
- void (*set_gate)(unsigned long addr);
+ void (*set_gate)(void *addr);
struct mdiobb_ctrl ctrl;
- u32 addr;
+ void *addr;
u32 mmd_msk;/* MMD */
u32 mdo_msk;
u32 mdi_msk;
@@ -525,21 +529,21 @@ struct bb_info {
};
/* PHY bit set */
-static void bb_set(u32 addr, u32 msk)
+static void bb_set(void *addr, u32 msk)
{
- writel(readl(addr) | msk, addr);
+ iowrite32(ioread32(addr) | msk, addr);
}
/* PHY bit clear */
-static void bb_clr(u32 addr, u32 msk)
+static void bb_clr(void *addr, u32 msk)
{
- writel((readl(addr) & ~msk), addr);
+ iowrite32((ioread32(addr) & ~msk), addr);
}
/* PHY bit read */
-static int bb_read(u32 addr, u32 msk)
+static int bb_read(void *addr, u32 msk)
{
- return (readl(addr) & msk) != 0;
+ return (ioread32(addr) & msk) != 0;
}
/* Data I/O pin control */
@@ -1680,7 +1684,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
}
/* bitbang init */
- bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
+ bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
bitbang->set_gate = pd->set_mdio_gate;
bitbang->mdi_msk = 0x08;
bitbang->mdo_msk = 0x04;
@@ -1760,7 +1764,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
#if defined(SH_ETH_HAS_TSU)
- .ndo_set_multicast_list = sh_eth_set_multicast_list,
+ .ndo_set_rx_mode = sh_eth_set_multicast_list,
#endif
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
@@ -1812,6 +1816,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ether_setup(ndev);
mdp = netdev_priv(ndev);
+ mdp->addr = ioremap(res->start, resource_size(res));
+ if (mdp->addr == NULL) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "ioremap failed.\n");
+ goto out_release;
+ }
+
spin_lock_init(&mdp->lock);
mdp->pdev = pdev;
pm_runtime_enable(&pdev->dev);
@@ -1892,6 +1903,8 @@ out_unregister:
out_release:
/* net_dev free */
+ if (mdp && mdp->addr)
+ iounmap(mdp->addr);
if (mdp && mdp->tsu_addr)
iounmap(mdp->tsu_addr);
if (ndev)
@@ -1910,6 +1923,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
sh_mdio_release(ndev);
unregister_netdev(ndev);
pm_runtime_disable(&pdev->dev);
+ iounmap(mdp->addr);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
new file mode 100644
index 00000000000..47877b13ffa
--- /dev/null
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -0,0 +1,830 @@
+/*
+ * SuperH Ethernet device driver
+ *
+ * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
+ * Copyright (C) 2008-2011 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef __SH_ETH_H__
+#define __SH_ETH_H__
+
+#define CARDNAME "sh-eth"
+#define TX_TIMEOUT (5*HZ)
+#define TX_RING_SIZE 64 /* Tx ring size */
+#define RX_RING_SIZE 64 /* Rx ring size */
+#define ETHERSMALL 60
+#define PKT_BUF_SZ 1538
+
+enum {
+ /* E-DMAC registers */
+ EDSR = 0,
+ EDMR,
+ EDTRR,
+ EDRRR,
+ EESR,
+ EESIPR,
+ TDLAR,
+ TDFAR,
+ TDFXR,
+ TDFFR,
+ RDLAR,
+ RDFAR,
+ RDFXR,
+ RDFFR,
+ TRSCER,
+ RMFCR,
+ TFTR,
+ FDR,
+ RMCR,
+ EDOCR,
+ TFUCR,
+ RFOCR,
+ FCFTR,
+ RPADIR,
+ TRIMD,
+ RBWAR,
+ TBRAR,
+
+ /* Ether registers */
+ ECMR,
+ ECSR,
+ ECSIPR,
+ PIR,
+ PSR,
+ RDMLR,
+ PIPR,
+ RFLR,
+ IPGR,
+ APR,
+ MPR,
+ PFTCR,
+ PFRCR,
+ RFCR,
+ RFCF,
+ TPAUSER,
+ TPAUSECR,
+ BCFR,
+ BCFRR,
+ GECMR,
+ BCULR,
+ MAHR,
+ MALR,
+ TROCR,
+ CDCR,
+ LCCR,
+ CNDCR,
+ CEFCR,
+ FRECR,
+ TSFRCR,
+ TLFRCR,
+ CERCR,
+ CEECR,
+ MAFCR,
+ RTRATE,
+
+ /* TSU Absolute address */
+ ARSTR,
+ TSU_CTRST,
+ TSU_FWEN0,
+ TSU_FWEN1,
+ TSU_FCM,
+ TSU_BSYSL0,
+ TSU_BSYSL1,
+ TSU_PRISL0,
+ TSU_PRISL1,
+ TSU_FWSL0,
+ TSU_FWSL1,
+ TSU_FWSLC,
+ TSU_QTAG0,
+ TSU_QTAG1,
+ TSU_QTAGM0,
+ TSU_QTAGM1,
+ TSU_FWSR,
+ TSU_FWINMK,
+ TSU_ADQT0,
+ TSU_ADQT1,
+ TSU_VTAG0,
+ TSU_VTAG1,
+ TSU_ADSBSY,
+ TSU_TEN,
+ TSU_POST1,
+ TSU_POST2,
+ TSU_POST3,
+ TSU_POST4,
+ TSU_ADRH0,
+ TSU_ADRL0,
+ TSU_ADRH31,
+ TSU_ADRL31,
+
+ TXNLCR0,
+ TXALCR0,
+ RXNLCR0,
+ RXALCR0,
+ FWNLCR0,
+ FWALCR0,
+ TXNLCR1,
+ TXALCR1,
+ RXNLCR1,
+ RXALCR1,
+ FWNLCR1,
+ FWALCR1,
+
+ /* This value must be written at last. */
+ SH_ETH_MAX_REGISTER_OFFSET,
+};
+
+static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+
+ [ECMR] = 0x0500,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [PSR] = 0x0528,
+ [PIPR] = 0x052c,
+ [RFLR] = 0x0508,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [GECMR] = 0x05b0,
+ [BCULR] = 0x05b4,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [TROCR] = 0x0700,
+ [CDCR] = 0x0708,
+ [LCCR] = 0x0710,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [CERCR] = 0x0768,
+ [CEECR] = 0x0770,
+ [MAFCR] = 0x0778,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAG0] = 0x0040,
+ [TSU_QTAG1] = 0x0044,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_VTAG1] = 0x005c,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+};
+
+static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0100,
+ [RFLR] = 0x0108,
+ [ECSR] = 0x0110,
+ [ECSIPR] = 0x0118,
+ [PIR] = 0x0120,
+ [PSR] = 0x0128,
+ [RDMLR] = 0x0140,
+ [IPGR] = 0x0150,
+ [APR] = 0x0154,
+ [MPR] = 0x0158,
+ [TPAUSER] = 0x0164,
+ [RFCF] = 0x0160,
+ [TPAUSECR] = 0x0168,
+ [BCFRR] = 0x016c,
+ [MAHR] = 0x01c0,
+ [MALR] = 0x01c8,
+ [TROCR] = 0x01d0,
+ [CDCR] = 0x01d4,
+ [LCCR] = 0x01d8,
+ [CNDCR] = 0x01dc,
+ [CEFCR] = 0x01e4,
+ [FRECR] = 0x01e8,
+ [TSFRCR] = 0x01ec,
+ [TLFRCR] = 0x01f0,
+ [RFCR] = 0x01f4,
+ [MAFCR] = 0x01f8,
+ [RTRATE] = 0x01fc,
+
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0008,
+ [EDRRR] = 0x0010,
+ [TDLAR] = 0x0018,
+ [RDLAR] = 0x0020,
+ [EESR] = 0x0028,
+ [EESIPR] = 0x0030,
+ [TRSCER] = 0x0038,
+ [RMFCR] = 0x0040,
+ [TFTR] = 0x0048,
+ [FDR] = 0x0050,
+ [RMCR] = 0x0058,
+ [TFUCR] = 0x0064,
+ [RFOCR] = 0x0068,
+ [FCFTR] = 0x0070,
+ [RPADIR] = 0x0078,
+ [TRIMD] = 0x007c,
+ [RBWAR] = 0x00c8,
+ [RDFAR] = 0x00cc,
+ [TBRAR] = 0x00d4,
+ [TDFAR] = 0x00d8,
+};
+
+static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0160,
+ [ECSR] = 0x0164,
+ [ECSIPR] = 0x0168,
+ [PIR] = 0x016c,
+ [MAHR] = 0x0170,
+ [MALR] = 0x0174,
+ [RFLR] = 0x0178,
+ [PSR] = 0x017c,
+ [TROCR] = 0x0180,
+ [CDCR] = 0x0184,
+ [LCCR] = 0x0188,
+ [CNDCR] = 0x018c,
+ [CEFCR] = 0x0194,
+ [FRECR] = 0x0198,
+ [TSFRCR] = 0x019c,
+ [TLFRCR] = 0x01a0,
+ [RFCR] = 0x01a4,
+ [MAFCR] = 0x01a8,
+ [IPGR] = 0x01b4,
+ [APR] = 0x01b8,
+ [MPR] = 0x01bc,
+ [TPAUSER] = 0x01c4,
+ [BCFR] = 0x01cc,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAGM0] = 0x0040,
+ [TSU_QTAGM1] = 0x0044,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRL31] = 0x01fc,
+
+};
+
+/* Driver's parameters */
+#if defined(CONFIG_CPU_SH4)
+#define SH4_SKB_RX_ALIGN 32
+#else
+#define SH2_SH3_SKB_RX_ALIGN 2
+#endif
+
+/*
+ * Register's bits
+ */
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+/* EDSR */
+enum EDSR_BIT {
+ EDSR_ENT = 0x01, EDSR_ENR = 0x02,
+};
+#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
+};
+#endif
+
+/* EDMR */
+enum DMAC_M_BIT {
+ EDMR_EL = 0x40, /* Litte endian */
+ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
+ EDMR_SRST_GETHER = 0x03,
+ EDMR_SRST_ETHER = 0x01,
+};
+
+/* EDTRR */
+enum DMAC_T_BIT {
+ EDTRR_TRNS_GETHER = 0x03,
+ EDTRR_TRNS_ETHER = 0x01,
+};
+
+/* EDRRR*/
+enum EDRRR_R_BIT {
+ EDRRR_R = 0x01,
+};
+
+/* TPAUSER */
+enum TPAUSER_BIT {
+ TPAUSER_TPAUSE = 0x0000ffff,
+ TPAUSER_UNLIMITED = 0,
+};
+
+/* BCFR */
+enum BCFR_BIT {
+ BCFR_RPAUSE = 0x0000ffff,
+ BCFR_UNLIMITED = 0,
+};
+
+/* PIR */
+enum PIR_BIT {
+ PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01,
+};
+
+/* PSR */
+enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
+
+/* EESR */
+enum EESR_BIT {
+ EESR_TWB1 = 0x80000000,
+ EESR_TWB = 0x40000000, /* same as TWB0 */
+ EESR_TC1 = 0x20000000,
+ EESR_TUC = 0x10000000,
+ EESR_ROC = 0x08000000,
+ EESR_TABT = 0x04000000,
+ EESR_RABT = 0x02000000,
+ EESR_RFRMER = 0x01000000, /* same as RFCOF */
+ EESR_ADE = 0x00800000,
+ EESR_ECI = 0x00400000,
+ EESR_FTC = 0x00200000, /* same as TC or TC0 */
+ EESR_TDE = 0x00100000,
+ EESR_TFE = 0x00080000, /* same as TFUF */
+ EESR_FRC = 0x00040000, /* same as FR */
+ EESR_RDE = 0x00020000,
+ EESR_RFE = 0x00010000,
+ EESR_CND = 0x00000800,
+ EESR_DLC = 0x00000400,
+ EESR_CD = 0x00000200,
+ EESR_RTO = 0x00000100,
+ EESR_RMAF = 0x00000080,
+ EESR_CEEF = 0x00000040,
+ EESR_CELF = 0x00000020,
+ EESR_RRF = 0x00000010,
+ EESR_RTLF = 0x00000008,
+ EESR_RTSF = 0x00000004,
+ EESR_PRE = 0x00000002,
+ EESR_CERF = 0x00000001,
+};
+
+#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
+ EESR_RTO)
+#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_ADE | \
+ EESR_TFE | EESR_TDE | EESR_ECI)
+#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
+ EESR_TFE)
+
+/* EESIPR */
+enum DMAC_IM_BIT {
+ DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
+ DMAC_M_RABT = 0x02000000,
+ DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
+ DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
+ DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
+ DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
+ DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
+ DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
+ DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
+ DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
+ DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
+ DMAC_M_RINT1 = 0x00000001,
+};
+
+/* Receive descriptor bit */
+enum RD_STS_BIT {
+ RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
+ RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
+ RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
+ RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
+ RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
+ RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
+ RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
+ RD_RFS1 = 0x00000001,
+};
+#define RDF1ST RD_RFP1
+#define RDFEND RD_RFP0
+#define RD_RFP (RD_RFP1|RD_RFP0)
+
+/* FCFTR */
+enum FCFTR_BIT {
+ FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
+ FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004,
+ FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
+};
+#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
+#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
+
+/* Transfer descriptor bit */
+enum TD_STS_BIT {
+ TD_TACT = 0x80000000,
+ TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
+ TD_TFP0 = 0x10000000,
+};
+#define TDF1ST TD_TFP1
+#define TDFEND TD_TFP0
+#define TD_TFP (TD_TFP1|TD_TFP0)
+
+/* RMCR */
+#define DEFAULT_RMCR_VALUE 0x00000000
+
+/* ECMR */
+enum FELIC_MODE_BIT {
+ ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
+ ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
+ ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
+ ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
+ ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
+ ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
+};
+
+/* ECSR */
+enum ECSR_STATUS_BIT {
+ ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
+ ECSR_LCHNG = 0x04,
+ ECSR_MPD = 0x02, ECSR_ICD = 0x01,
+};
+
+#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \
+ ECSR_ICD | ECSIPR_MPDIP)
+
+/* ECSIPR */
+enum ECSIPR_STATUS_MASK_BIT {
+ ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
+ ECSIPR_LCHNGIP = 0x04,
+ ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
+};
+
+#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \
+ ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
+
+/* APR */
+enum APR_BIT {
+ APR_AP = 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+ MPR_MP = 0x00000001,
+};
+
+/* TRSCER */
+enum DESC_I_BIT {
+ DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
+ DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
+ DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
+ DESC_I_RINT1 = 0x0001,
+};
+
+/* RPADIR */
+enum RPADIR_BIT {
+ RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
+ RPADIR_PADR = 0x0003f,
+};
+
+/* RFLR */
+#define RFLR_VALUE 0x1000
+
+/* FDR */
+#define DEFAULT_FDR_INIT 0x00000707
+
+enum phy_offsets {
+ PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
+ PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
+ PHY_16 = 16,
+};
+
+/* PHY_CTRL */
+enum PHY_CTRL_BIT {
+ PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
+ PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
+ PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
+};
+#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
+
+/* PHY_STAT */
+enum PHY_STAT_BIT {
+ PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
+ PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
+ PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
+ PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
+};
+
+/* PHY_ANA */
+enum PHY_ANA_BIT {
+ PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
+ PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
+ PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
+ PHY_A_SEL = 0x001e,
+};
+/* PHY_ANL */
+enum PHY_ANL_BIT {
+ PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
+ PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
+ PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
+ PHY_L_SEL = 0x001f,
+};
+
+/* PHY_ANE */
+enum PHY_ANE_BIT {
+ PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
+ PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
+};
+
+/* DM9161 */
+enum PHY_16_BIT {
+ PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
+ PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
+ PHY_16_TXselect = 0x0400,
+ PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
+ PHY_16_Force100LNK = 0x0080,
+ PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
+ PHY_16_RPDCTR_EN = 0x0010,
+ PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
+ PHY_16_Sleepmode = 0x0002,
+ PHY_16_RemoteLoopOut = 0x0001,
+};
+
+#define POST_RX 0x08
+#define POST_FW 0x04
+#define POST0_RX (POST_RX)
+#define POST0_FW (POST_FW)
+#define POST1_RX (POST_RX >> 2)
+#define POST1_FW (POST_FW >> 2)
+#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
+
+/* ARSTR */
+enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+
+/* TSU_FWEN0 */
+enum TSU_FWEN0_BIT {
+ TSU_FWEN0_0 = 0x00000001,
+};
+
+/* TSU_ADSBSY */
+enum TSU_ADSBSY_BIT {
+ TSU_ADSBSY_0 = 0x00000001,
+};
+
+/* TSU_TEN */
+enum TSU_TEN_BIT {
+ TSU_TEN_0 = 0x80000000,
+};
+
+/* TSU_FWSL0 */
+enum TSU_FWSL0_BIT {
+ TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800,
+ TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200,
+ TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010,
+};
+
+/* TSU_FWSLC */
+enum TSU_FWSLC_BIT {
+ TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000,
+ TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040,
+ TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010,
+ TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004,
+ TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001,
+};
+
+/*
+ * The sh ether Tx buffer descriptors.
+ * This structure should be 20 bytes.
+ */
+struct sh_eth_txdesc {
+ u32 status; /* TD0 */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+ u16 pad0; /* TD1 */
+ u16 buffer_length; /* TD1 */
+#else
+ u16 buffer_length; /* TD1 */
+ u16 pad0; /* TD1 */
+#endif
+ u32 addr; /* TD2 */
+ u32 pad1; /* padding data */
+} __attribute__((aligned(2), packed));
+
+/*
+ * The sh ether Rx buffer descriptors.
+ * This structure should be 20 bytes.
+ */
+struct sh_eth_rxdesc {
+ u32 status; /* RD0 */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+ u16 frame_length; /* RD1 */
+ u16 buffer_length; /* RD1 */
+#else
+ u16 buffer_length; /* RD1 */
+ u16 frame_length; /* RD1 */
+#endif
+ u32 addr; /* RD2 */
+ u32 pad0; /* padding data */
+} __attribute__((aligned(2), packed));
+
+/* This structure is used by each CPU dependency handling. */
+struct sh_eth_cpu_data {
+ /* optional functions */
+ void (*chip_reset)(struct net_device *ndev);
+ void (*set_duplex)(struct net_device *ndev);
+ void (*set_rate)(struct net_device *ndev);
+
+ /* mandatory initialize value */
+ unsigned long eesipr_value;
+
+ /* optional initialize value */
+ unsigned long ecsr_value;
+ unsigned long ecsipr_value;
+ unsigned long fdr_value;
+ unsigned long fcftr_value;
+ unsigned long rpadir_value;
+ unsigned long rmcr_value;
+
+ /* interrupt checking mask */
+ unsigned long tx_check;
+ unsigned long eesr_err_check;
+ unsigned long tx_error_check;
+
+ /* hardware features */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+};
+
+struct sh_eth_private {
+ struct platform_device *pdev;
+ struct sh_eth_cpu_data *cd;
+ const u16 *reg_offset;
+ void __iomem *addr;
+ void __iomem *tsu_addr;
+ dma_addr_t rx_desc_dma;
+ dma_addr_t tx_desc_dma;
+ struct sh_eth_rxdesc *rx_ring;
+ struct sh_eth_txdesc *tx_ring;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ struct net_device_stats stats;
+ struct timer_list timer;
+ spinlock_t lock;
+ u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ u32 cur_tx, dirty_tx;
+ u32 rx_buf_sz; /* Based on MTU+slack. */
+ int edmac_endian;
+ /* MII transceiver section. */
+ u32 phy_id; /* PHY ID */
+ struct mii_bus *mii_bus; /* MDIO bus control */
+ struct phy_device *phydev; /* PHY device control */
+ enum phy_state link;
+ phy_interface_t phy_interface;
+ int msg_enable;
+ int speed;
+ int duplex;
+ u32 rx_int_var, tx_int_var; /* interrupt control variables */
+ char post_rx; /* POST receive */
+ char post_fw; /* POST forward */
+ struct net_device_stats tsu_stats; /* TSU forward status */
+
+ unsigned no_ether_link:1;
+ unsigned ether_link_active_low:1;
+};
+
+static inline void sh_eth_soft_swap(char *src, int len)
+{
+#ifdef __LITTLE_ENDIAN__
+ u32 *p = (u32 *)src;
+ u32 *maxp;
+ maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32));
+
+ for (; p < maxp; p++)
+ *p = swab32(*p);
+#endif
+}
+
+static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
+ int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_read(struct net_device *ndev,
+ int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ return ioread32(mdp->addr + mdp->reg_offset[enum_index]);
+}
+
+static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
+ unsigned long data, int enum_index)
+{
+ iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
+ int enum_index)
+{
+ return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a7ff8ea342b..a7ff8ea342b 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
new file mode 100644
index 00000000000..29f18533fdc
--- /dev/null
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -0,0 +1,47 @@
+#
+# SEEQ device configuration
+#
+
+config NET_VENDOR_SEEQ
+ bool "SEEQ devices"
+ default y
+ depends on HAS_IOMEM
+ depends on (ARM && ARCH_ACORN) || SGI_HAS_SEEQ || EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SEEQ devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SEEQ
+
+config ARM_ETHER3
+ tristate "Acorn/ANT Ether3 support"
+ depends on ARM && ARCH_ACORN
+ ---help---
+ If you have an Acorn system with one of these network cards, you
+ should say Y to this option if you wish to use it with Linux.
+
+config SEEQ8005
+ tristate "SEEQ8005 support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This is a driver for the SEEQ 8005 network (Ethernet) card. If this
+ is for you, read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called seeq8005.
+
+config SGISEEQ
+ tristate "SGI Seeq ethernet controller support"
+ depends on SGI_HAS_SEEQ
+ ---help---
+ Say Y here if you have an Seeq based Ethernet network card. This is
+ used in many Silicon Graphics machines.
+
+endif # NET_VENDOR_SEEQ
diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile
new file mode 100644
index 00000000000..3e258a580c0
--- /dev/null
+++ b/drivers/net/ethernet/seeq/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the SEEQ network device drivers
+#
+
+obj-$(CONFIG_ARM_ETHER3) += ether3.o
+obj-$(CONFIG_SEEQ8005) += seeq8005.o
+obj-$(CONFIG_SGISEEQ) += sgiseeq.o
diff --git a/drivers/net/arm/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 44a8746f401..893c880dadf 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -761,7 +761,7 @@ static const struct net_device_ops ether3_netdev_ops = {
.ndo_open = ether3_open,
.ndo_stop = ether3_close,
.ndo_start_xmit = ether3_sendpacket,
- .ndo_set_multicast_list = ether3_setmulticastlist,
+ .ndo_set_rx_mode = ether3_setmulticastlist,
.ndo_tx_timeout = ether3_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/ether3.h b/drivers/net/ethernet/seeq/ether3.h
index 2db63b08bdf..2db63b08bdf 100644
--- a/drivers/net/arm/ether3.h
+++ b/drivers/net/ethernet/seeq/ether3.h
diff --git a/drivers/net/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
index d2fce98f557..60561451789 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/ethernet/seeq/seeq8005.c
@@ -148,7 +148,7 @@ static const struct net_device_ops seeq8005_netdev_ops = {
.ndo_stop = seeq8005_close,
.ndo_start_xmit = seeq8005_send_packet,
.ndo_tx_timeout = seeq8005_timeout,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/seeq8005.h b/drivers/net/ethernet/seeq/seeq8005.h
index 5dfb0098c6c..5dfb0098c6c 100644
--- a/drivers/net/seeq8005.h
+++ b/drivers/net/ethernet/seeq/seeq8005.h
diff --git a/drivers/net/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 52fb7ed9f36..c3673f151a4 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -715,7 +715,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
.ndo_stop = sgiseeq_close,
.ndo_start_xmit = sgiseeq_start_xmit,
.ndo_tx_timeout = timeout,
- .ndo_set_multicast_list = sgiseeq_set_multicast,
+ .ndo_set_rx_mode = sgiseeq_set_multicast,
.ndo_set_mac_address = sgiseeq_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/sgiseeq.h b/drivers/net/ethernet/seeq/sgiseeq.h
index 2211e2987a8..2211e2987a8 100644
--- a/drivers/net/sgiseeq.h
+++ b/drivers/net/ethernet/seeq/sgiseeq.h
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
new file mode 100644
index 00000000000..5d18841f0f3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -0,0 +1,21 @@
+config SFC
+ tristate "Solarflare SFC4000/SFC9000-family support"
+ depends on PCI && INET
+ select MDIO
+ select CRC32
+ select I2C
+ select I2C_ALGOBIT
+ ---help---
+ This driver supports 10-gigabit Ethernet cards based on
+ the Solarflare SFC4000 and SFC9000-family controllers.
+
+ To compile this driver as a module, choose M here. The module
+ will be called sfc.
+config SFC_MTD
+ bool "Solarflare SFC4000/SFC9000-family MTD support"
+ depends on SFC && MTD && !(SFC=y && MTD=m)
+ default y
+ ---help---
+ This exposes the on-board flash memory as MTD devices (e.g.
+ /dev/mtd1). This makes it possible to upload new firmware
+ to the NIC.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ab31c7124db..ab31c7124db 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 098ac2ad757..098ac2ad757 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
diff --git a/drivers/net/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b59abc706d9..de9afebe183 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1343,7 +1343,8 @@ static int efx_probe_nic(struct efx_nic *efx)
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */
- efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
+ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
+ true);
return 0;
@@ -1542,18 +1543,19 @@ static void efx_remove_all(struct efx_nic *efx)
*
**************************************************************************/
-static unsigned irq_mod_ticks(int usecs, int resolution)
+static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
{
- if (usecs <= 0)
- return 0; /* cannot receive interrupts ahead of time :-) */
+ if (usecs == 0)
+ return 0;
if (usecs < resolution)
return 1; /* never round down to 0 */
return usecs / resolution;
}
/* Set interrupt moderation parameters */
-void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
- bool rx_adaptive)
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
{
struct efx_channel *channel;
unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
@@ -1561,6 +1563,16 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
EFX_ASSERT_RESET_SERIALISED(efx);
+ if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
+ return -EINVAL;
+
+ if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
+ !rx_may_override_tx) {
+ netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+ "RX and TX IRQ moderation must be equal\n");
+ return -EINVAL;
+ }
+
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
@@ -1569,6 +1581,26 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
+
+ return 0;
+}
+
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
+{
+ *rx_adaptive = efx->irq_rx_adaptive;
+ *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
+
+ /* If channels are shared between RX and TX, so is IRQ
+ * moderation. Otherwise, IRQ moderation is the same for all
+ * TX channels and is not adaptive.
+ */
+ if (efx->tx_channel_offset == 0)
+ *tx_usecs = *rx_usecs;
+ else
+ *tx_usecs =
+ efx->channel[efx->tx_channel_offset]->irq_moderation *
+ EFX_IRQ_MOD_RESOLUTION;
}
/**************************************************************************
@@ -1889,7 +1921,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_multicast_list = efx_set_multicast_list,
+ .ndo_set_rx_mode = efx_set_multicast_list,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
diff --git a/drivers/net/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b0d1209ea18..442f4d0c247 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -111,8 +111,11 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
- int rx_usecs, bool rx_adaptive);
+extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
/* Dummy PHY ops for PHY drivers */
extern int efx_port_dummy_op_int(struct efx_nic *efx);
diff --git a/drivers/net/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d725a8fbe1a..d725a8fbe1a 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
new file mode 100644
index 00000000000..f3cd96dfa39
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -0,0 +1,1025 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include "net_driver.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "efx.h"
+#include "filter.h"
+#include "nic.h"
+
+struct ethtool_string {
+ char name[ETH_GSTRING_LEN];
+};
+
+struct efx_ethtool_stat {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_mac_stats,
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned offset;
+ u64(*get_stat) (void *field); /* Reader function */
+};
+
+/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_ulong_stat(void *field)
+{
+ return *(unsigned long *)field;
+}
+
+static u64 efx_get_u64_stat(void *field)
+{
+ return *(u64 *) field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
+ EFX_ETHTOOL_STAT(field, mac_stats, field, \
+ unsigned long, efx_get_ulong_stat)
+
+#define EFX_ETHTOOL_U64_MAC_STAT(field) \
+ EFX_ETHTOOL_STAT(field, mac_stats, field, \
+ u64, efx_get_u64_stat)
+
+#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
+ EFX_ETHTOOL_STAT(name, nic, n_##name, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static struct efx_ethtool_stat efx_ethtool_stats[] = {
+ EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
+ EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+};
+
+/* Number of ethtool statistics */
+#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int efx_ethtool_phys_id(struct net_device *net_dev,
+ enum ethtool_phys_id_state state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ enum efx_led_mode mode = EFX_LED_DEFAULT;
+
+ switch (state) {
+ case ETHTOOL_ID_ON:
+ mode = EFX_LED_ON;
+ break;
+ case ETHTOOL_ID_OFF:
+ mode = EFX_LED_OFF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ mode = EFX_LED_DEFAULT;
+ break;
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ }
+
+ efx->type->set_id_led(efx, mode);
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int efx_ethtool_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_link_state *link_state = &efx->link_state;
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->get_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+
+ /* GMAC does not support 1000Mbps HD */
+ ecmd->supported &= ~SUPPORTED_1000baseT_Half;
+ /* Both MACs support pause frames (bidirectional and respond-only) */
+ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ if (LOOPBACK_INTERNAL(efx)) {
+ ethtool_cmd_speed_set(ecmd, link_state->speed);
+ ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int efx_ethtool_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* GMAC does not support 1000Mbps HD */
+ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
+ (ecmd->duplex != DUPLEX_FULL)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->set_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ efx_nic_get_regs(efx, buf);
+}
+
+static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->msg_enable;
+}
+
+static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ efx->msg_enable = msg_enable;
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index,
+ struct ethtool_string *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ struct ethtool_string unit_str, test_str;
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str.name, sizeof(unit_str.name),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str.name, unit_format);
+ snprintf(test_str.name, sizeof(test_str.name),
+ test_format, test_id);
+ snprintf(strings[test_index].name,
+ sizeof(strings[test_index].name),
+ "%-6s %-24s", unit_str.name, test_str.name);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ struct ethtool_string *strings, u64 *data)
+{
+ struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ struct ethtool_string *strings,
+ u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_poll[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.poll", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static int efx_ethtool_get_sset_count(struct net_device *net_dev,
+ int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return EFX_ETHTOOL_NUM_STATS;
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
+ NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void efx_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct ethtool_string *ethtool_strings =
+ (struct ethtool_string *)strings;
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
+ strncpy(ethtool_strings[i].name,
+ efx_ethtool_stats[i].name,
+ sizeof(ethtool_strings[i].name));
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL,
+ ethtool_strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+static void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_mac_stats *mac_stats = &efx->mac_stats;
+ struct efx_ethtool_stat *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct rtnl_link_stats64 temp;
+ int i;
+
+ EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
+
+ /* Update MAC and NIC statistics */
+ dev_get_stats(net_dev, &temp);
+
+ /* Fill detailed statistics buffer */
+ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
+ stat = &efx_ethtool_stats[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
+ data[i] = stat->get_stat((void *)mac_stats +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+}
+
+static void efx_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_self_tests *efx_tests;
+ int already_up;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
+
+ ASSERT_RTNL();
+ if (efx->state != STATE_RUNNING) {
+ rc = -EIO;
+ goto fail1;
+ }
+
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
+ goto fail1;
+ }
+ }
+
+ rc = efx_selftest(efx, efx_tests, test->flags);
+
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+fail1:
+ /* Fill ethtool results structures */
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
+/* Restart autonegotiation */
+static int efx_ethtool_nway_reset(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return mdio45_nway_restart(&efx->mdio);
+}
+
+/*
+ * Each channel has a single IRQ and moderation timer, started by any
+ * completion (or other event). Unless the module parameter
+ * separate_tx_channels is set, IRQs and moderation are therefore
+ * shared between RX and TX completions. In this case, when RX IRQ
+ * moderation is explicitly changed then TX IRQ moderation is
+ * automatically changed too, but otherwise we fail if the two values
+ * are requested to be different.
+ *
+ * The hardware does not support a limit on the number of completions
+ * before an IRQ, so we do not use the max_frames fields. We should
+ * report and require that max_frames == (usecs != 0), but this would
+ * invalidate existing user documentation.
+ *
+ * The hardware does not have distinct settings for interrupt
+ * moderation while the previous IRQ is being handled, so we should
+ * not use the 'irq' fields. However, an earlier developer
+ * misunderstood the meaning of the 'irq' fields and the driver did
+ * not support the standard fields. To avoid invalidating existing
+ * user documentation, we report and accept changes through either the
+ * standard or 'irq' fields. If both are changed at the same time, we
+ * prefer the standard field.
+ *
+ * We implement adaptive IRQ moderation, but use a different algorithm
+ * from that assumed in the definition of struct ethtool_coalesce.
+ * Therefore we do not use any of the adaptive moderation parameters
+ * in it.
+ */
+
+static int efx_ethtool_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ unsigned int tx_usecs, rx_usecs;
+ bool rx_adaptive;
+
+ efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+
+ coalesce->tx_coalesce_usecs = tx_usecs;
+ coalesce->tx_coalesce_usecs_irq = tx_usecs;
+ coalesce->rx_coalesce_usecs = rx_usecs;
+ coalesce->rx_coalesce_usecs_irq = rx_usecs;
+ coalesce->use_adaptive_rx_coalesce = rx_adaptive;
+
+ return 0;
+}
+
+static int efx_ethtool_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ unsigned int tx_usecs, rx_usecs;
+ bool adaptive, rx_may_override_tx;
+ int rc;
+
+ if (coalesce->use_adaptive_tx_coalesce)
+ return -EINVAL;
+
+ efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+
+ if (coalesce->rx_coalesce_usecs != rx_usecs)
+ rx_usecs = coalesce->rx_coalesce_usecs;
+ else
+ rx_usecs = coalesce->rx_coalesce_usecs_irq;
+
+ adaptive = coalesce->use_adaptive_rx_coalesce;
+
+ /* If channels are shared, TX IRQ moderation can be quietly
+ * overridden unless it is changed from its old value.
+ */
+ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
+ coalesce->tx_coalesce_usecs_irq == tx_usecs);
+ if (coalesce->tx_coalesce_usecs != tx_usecs)
+ tx_usecs = coalesce->tx_coalesce_usecs;
+ else
+ tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+ rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+ rx_may_override_tx);
+ if (rc != 0)
+ return rc;
+
+ efx_for_each_channel(channel, efx)
+ efx->type->push_irq_moderation(channel);
+
+ return 0;
+}
+
+static void efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+}
+
+static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+ ring->tx_pending < EFX_MIN_RING_SIZE) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX and RX queues cannot be smaller than %ld\n",
+ EFX_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+}
+
+static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u8 wanted_fc, old_fc;
+ u32 old_adv;
+ bool reset;
+ int rc = 0;
+
+ mutex_lock(&efx->mac_lock);
+
+ wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
+ (pause->tx_pause ? EFX_FC_TX : 0) |
+ (pause->autoneg ? EFX_FC_AUTO : 0));
+
+ if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* TX flow control may automatically turn itself off if the
+ * link partner (intermittently) stops responding to pause
+ * frames. There isn't any indication that this has happened,
+ * so the best we do is leave it up to the user to spot this
+ * and fix it be cycling transmit flow control on this end. */
+ reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
+ if (EFX_WORKAROUND_11482(efx) && reset) {
+ if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ efx->mac_op->reconfigure(efx);
+ falcon_start_nic_stats(efx);
+ } else {
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+ }
+ }
+
+ old_adv = efx->link_advertising;
+ old_fc = efx->wanted_fc;
+ efx_link_set_wanted_fc(efx, wanted_fc);
+ if (efx->link_advertising != old_adv ||
+ (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
+ goto out;
+ }
+ }
+
+ /* Reconfigure the MAC. The PHY *may* generate a link state change event
+ * if the user just changed the advertised capabilities, but there's no
+ * harm doing this twice */
+ efx->mac_op->reconfigure(efx);
+
+out:
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+
+static void efx_ethtool_get_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->get_wol(efx, wol);
+}
+
+
+static int efx_ethtool_set_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->set_wol(efx, wol->wolopts);
+}
+
+static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->map_reset_flags(flags);
+ if (rc < 0)
+ return rc;
+
+ return efx_reset(efx, rc);
+}
+
+static int
+efx_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rules __always_unused)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ unsigned min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_FALCON_B0;
+ break;
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_SIENA_A0;
+ break;
+ default:
+ break;
+ }
+ if (efx_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
+ struct ethtool_rx_ntuple *ntuple)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
+ struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
+ struct efx_filter_spec filter;
+ int rc;
+
+ /* Range-check action */
+ if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
+ ntuple->fs.action >= (s32)efx->n_rx_channels)
+ return -EINVAL;
+
+ if (~ntuple->fs.data_mask)
+ return -EINVAL;
+
+ efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
+ (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
+ 0xfff : ntuple->fs.action);
+
+ switch (ntuple->fs.flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+
+ /* Must match all of destination, */
+ if (ip_mask->ip4dst | ip_mask->pdst)
+ return -EINVAL;
+ /* all or none of source, */
+ if ((ip_mask->ip4src | ip_mask->psrc) &&
+ ((__force u32)~ip_mask->ip4src |
+ (__force u16)~ip_mask->psrc))
+ return -EINVAL;
+ /* and nothing else */
+ if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+ return -EINVAL;
+
+ if (!ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst,
+ ip_entry->ip4src,
+ ip_entry->psrc);
+ else
+ rc = efx_filter_set_ipv4_local(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst);
+ if (rc)
+ return rc;
+ break;
+ }
+
+ case ETHER_FLOW:
+ /* Must match all of destination, */
+ if (!is_zero_ether_addr(mac_mask->h_dest))
+ return -EINVAL;
+ /* all or none of VID, */
+ if (ntuple->fs.vlan_tag_mask != 0xf000 &&
+ ntuple->fs.vlan_tag_mask != 0xffff)
+ return -EINVAL;
+ /* and nothing else */
+ if (!is_broadcast_ether_addr(mac_mask->h_source) ||
+ mac_mask->h_proto != htons(0xffff))
+ return -EINVAL;
+
+ rc = efx_filter_set_eth_local(
+ &filter,
+ (ntuple->fs.vlan_tag_mask == 0xf000) ?
+ ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ mac_entry->h_dest);
+ if (rc)
+ return rc;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
+ return efx_filter_remove_filter(efx, &filter);
+
+ rc = efx_filter_insert_filter(efx, &filter, true);
+ return rc < 0 ? rc : 0;
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ indir->size = ARRAY_SIZE(efx->rx_indir_table);
+ memcpy(indir->ring_index, efx->rx_indir_table,
+ copy_size * sizeof(indir->ring_index[0]));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t i;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ if (indir->ring_index[i] >= efx->n_rx_channels)
+ return -EINVAL;
+
+ memcpy(efx->rx_indir_table, indir->ring_index,
+ sizeof(efx->rx_indir_table));
+ efx_nic_push_rx_indir_table(efx);
+ return 0;
+}
+
+const struct ethtool_ops efx_ethtool_ops = {
+ .get_settings = efx_ethtool_get_settings,
+ .set_settings = efx_ethtool_set_settings,
+ .get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_regs_len = efx_ethtool_get_regs_len,
+ .get_regs = efx_ethtool_get_regs,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
+ .nway_reset = efx_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = efx_ethtool_get_coalesce,
+ .set_coalesce = efx_ethtool_set_coalesce,
+ .get_ringparam = efx_ethtool_get_ringparam,
+ .set_ringparam = efx_ethtool_set_ringparam,
+ .get_pauseparam = efx_ethtool_get_pauseparam,
+ .set_pauseparam = efx_ethtool_set_pauseparam,
+ .get_sset_count = efx_ethtool_get_sset_count,
+ .self_test = efx_ethtool_self_test,
+ .get_strings = efx_ethtool_get_strings,
+ .set_phys_id = efx_ethtool_phys_id,
+ .get_ethtool_stats = efx_ethtool_get_stats,
+ .get_wol = efx_ethtool_get_wol,
+ .set_wol = efx_ethtool_set_wol,
+ .reset = efx_ethtool_reset,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
+ .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
+ .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
+};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 94bf4aaf984..4dd1748a19c 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -104,6 +104,8 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
efx_dword_t timer_cmd;
struct efx_nic *efx = channel->efx;
+ BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_AB_TC_TIMER_VAL_WIDTH));
+
/* Set timer register */
if (channel->irq_moderation) {
EFX_POPULATE_DWORD_2(timer_cmd,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index b9cc846811d..b9cc846811d 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 9516452c079..9516452c079 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
diff --git a/drivers/net/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2b9636f96e0..2b9636f96e0 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
diff --git a/drivers/net/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 872f2132a49..872f2132a49 100644
--- a/drivers/net/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
diff --git a/drivers/net/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 751d1ec112c..751d1ec112c 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
diff --git a/drivers/net/sfc/mac.h b/drivers/net/ethernet/sfc/mac.h
index d6a255d0856..d6a255d0856 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/ethernet/sfc/mac.h
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81a42539746..81a42539746 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index aced2a7856f..aced2a7856f 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
index 50c20777a56..50c20777a56 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/ethernet/sfc/mcdi_mac.c
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 41fe06fa060..41fe06fa060 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
index 6c63ab0710a..6c63ab0710a 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 7ab385c8136..7ab385c8136 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de9..a97dbbd2de9 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index b6304486f24..b6304486f24 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b8e251a1ee4..b8e251a1ee4 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
diff --git a/drivers/net/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 3edfbaf5f02..3edfbaf5f02 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
diff --git a/drivers/net/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7443f99c977..5fb24d3aa3c 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -202,7 +202,8 @@ extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
extern void falcon_irq_ack_a1(struct efx_nic *efx);
-#define EFX_IRQ_MOD_RESOLUTION 5
+#define EFX_IRQ_MOD_RESOLUTION 5
+#define EFX_IRQ_MOD_MAX 0x1000
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd844..11d148cd844 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 7ad97e39740..7ad97e39740 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
diff --git a/drivers/net/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h
index cc2c86b76a7..cc2c86b76a7 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/regs.h
diff --git a/drivers/net/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 62e43649466..adbda182f15 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -478,10 +478,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
- skb_shinfo(skb)->frags[0].page = page;
+ skb_frag_set_page(skb, 0, page);
skb_shinfo(skb)->frags[0].page_offset =
efx_rx_buf_offset(efx, rx_buf);
- skb_shinfo(skb)->frags[0].size = rx_buf->len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
skb_shinfo(skb)->nr_frags = 1;
skb->len = rx_buf->len;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 822f6c2a6a7..822f6c2a6a7 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index dba5456e70f..dba5456e70f 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
diff --git a/drivers/net/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 2c3bd93fab5..cc2549cb707 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -36,6 +36,8 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
{
efx_dword_t timer_cmd;
+ BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
+
if (channel->irq_moderation)
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
diff --git a/drivers/net/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
index 71f2e3ebe1c..71f2e3ebe1c 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/ethernet/sfc/spi.h
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index 7b0fd89e7b8..7b0fd89e7b8 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
diff --git a/drivers/net/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 84eb99e0f8d..df88c5430f9 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -137,8 +137,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct pci_dev *pci_dev = efx->pci_dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
- struct page *page;
- int page_offset;
unsigned int len, unmap_len = 0, fill_level, insert_ptr;
dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len;
@@ -240,14 +238,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (i >= skb_shinfo(skb)->nr_frags)
break;
fragment = &skb_shinfo(skb)->frags[i];
- len = fragment->size;
- page = fragment->page;
- page_offset = fragment->page_offset;
+ len = skb_frag_size(fragment);
i++;
/* Map for DMA */
unmap_single = false;
- dma_addr = pci_map_page(pci_dev, page, page_offset, len,
- PCI_DMA_TODEVICE);
+ dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
+ DMA_TO_DEVICE);
}
/* Transfer ownership of the skb to the final buffer */
@@ -929,13 +925,12 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
skb_frag_t *frag)
{
- st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = false;
- st->unmap_len = frag->size;
- st->in_len = frag->size;
+ st->unmap_len = skb_frag_size(frag);
+ st->in_len = skb_frag_size(frag);
st->dma_addr = st->unmap_addr;
return 0;
}
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 7c21b334a75..7c21b334a75 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304..e4dd3a7f304 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
new file mode 100644
index 00000000000..c1c4bb868a3
--- /dev/null
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -0,0 +1,36 @@
+#
+# SGI device configuration
+#
+
+config NET_VENDOR_SGI
+ bool "SGI devices"
+ default y
+ depends on (PCI && SGI_IP27) || SGI_IP32
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SGI devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SGI
+
+config SGI_IOC3_ETH
+ bool "SGI IOC3 Ethernet"
+ depends on PCI && SGI_IP27
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config SGI_O2MACE_ETH
+ tristate "SGI O2 MACE Fast Ethernet support"
+ depends on SGI_IP32=y
+
+endif # NET_VENDOR_SGI
diff --git a/drivers/net/ethernet/sgi/Makefile b/drivers/net/ethernet/sgi/Makefile
new file mode 100644
index 00000000000..e5bedd271e2
--- /dev/null
+++ b/drivers/net/ethernet/sgi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the SGI device drivers.
+#
+
+obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
+obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index a234e450452..ac149d99f78 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1220,7 +1220,7 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_start_xmit = ioc3_start_xmit,
.ndo_tx_timeout = ioc3_timeout,
.ndo_get_stats = ioc3_get_stats,
- .ndo_set_multicast_list = ioc3_set_multicast_list,
+ .ndo_set_rx_mode = ioc3_set_multicast_list,
.ndo_do_ioctl = ioc3_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ioc3_set_mac_address,
diff --git a/drivers/net/meth.c b/drivers/net/ethernet/sgi/meth.c
index 60135aa5580..60135aa5580 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
diff --git a/drivers/net/meth.h b/drivers/net/ethernet/sgi/meth.h
index 5b145c6bad6..5b145c6bad6 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/ethernet/sgi/meth.h
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
new file mode 100644
index 00000000000..f1135cc1bd4
--- /dev/null
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -0,0 +1,53 @@
+#
+# Silicon Integrated Systems (SiS) device configuration
+#
+
+config NET_VENDOR_SIS
+ bool "Silicon Integrated Systems (SiS) devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SiS devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SIS
+
+config SIS900
+ tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
+ SiS 630 and SiS 540 chipsets.
+
+ This driver also supports AMD 79C901 HomePNA so that you can use
+ your phone line as a network cable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sis900. This is recommended.
+
+config SIS190
+ tristate "SiS190/SiS191 gigabit ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
+ a SiS 191 PCI Gigabit Ethernet adapter. Both are expected to
+ appear in lan on motherboard designs which are based on SiS 965
+ and SiS 966 south bridge.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sis190. This is recommended.
+
+endif # NET_VENDOR_SIS
diff --git a/drivers/net/ethernet/sis/Makefile b/drivers/net/ethernet/sis/Makefile
new file mode 100644
index 00000000000..58d3ac1985d
--- /dev/null
+++ b/drivers/net/ethernet/sis/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Silicon Integrated Systems (SiS) network device drivers.
+#
+
+obj-$(CONFIG_SIS190) += sis190.o
+obj-$(CONFIG_SIS900) += sis900.o
diff --git a/drivers/net/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 3c0f1312b39..1b4658c9939 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1841,7 +1841,7 @@ static const struct net_device_ops sis190_netdev_ops = {
.ndo_do_ioctl = sis190_ioctl,
.ndo_start_xmit = sis190_start_xmit,
.ndo_tx_timeout = sis190_tx_timeout,
- .ndo_set_multicast_list = sis190_set_rx_mode,
+ .ndo_set_rx_mode = sis190_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = sis190_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 658a1928fe7..a184abc5ef1 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -403,7 +403,7 @@ static const struct net_device_ops sis900_netdev_ops = {
.ndo_stop = sis900_close,
.ndo_start_xmit = sis900_start_xmit,
.ndo_set_config = sis900_set_config,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 150511a922e..150511a922e 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
new file mode 100644
index 00000000000..5a689af516e
--- /dev/null
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -0,0 +1,136 @@
+#
+# Western Digital/SMC network device configuration
+#
+
+config NET_VENDOR_SMSC
+ bool "SMC (SMSC)/Western Digital devices"
+ default y
+ depends on ARM || ISA || MAC || ARM || MIPS || M32R || SUPERH || \
+ BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SMC/Western Digital cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_SMSC
+
+config SMC9194
+ tristate "SMC 9194 support"
+ depends on (ISA || MAC && BROKEN)
+ select CRC32
+ ---help---
+ This is support for the SMC9xxx based Ethernet cards. Choose this
+ option if you have a DELL laptop with the docking station, or
+ another SMC9192/9194 based chipset. Say Y if you want it compiled
+ into the kernel, and read the file
+ <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc9194.
+
+config SMC91X
+ tristate "SMC 91C9x/91C1xxx support"
+ select CRC32
+ select NET_CORE
+ select MII
+ depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
+ MN10300 || COLDFIRE)
+ ---help---
+ This is a driver for SMC's 91x series of Ethernet chipsets,
+ including the SMC91C94 and the SMC91C111. Say Y if you want it
+ compiled into the kernel, and read the file
+ <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ This driver is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called smc91x. If you want to compile it as a
+ module, say M here and read <file:Documentation/kbuild/modules.txt>.
+
+config PCMCIA_SMC91C92
+ tristate "SMC 91Cxx PCMCIA support"
+ depends on PCMCIA
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
+ (PC-card) Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called smc91c92_cs. If unsure, say N.
+
+config EPIC100
+ tristate "SMC EtherPower II"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
+ which is based on the SMC83c17x (EPIC/100).
+ More specific information and updates are available from
+ <http://www.scyld.com/network/epic100.html>.
+
+config SMC911X
+ tristate "SMSC LAN911[5678] support"
+ select CRC32
+ select NET_CORE
+ select MII
+ depends on (ARM || SUPERH || MN10300)
+ ---help---
+ This is a driver for SMSC's LAN911x series of Ethernet chipsets
+ including the new LAN9115, LAN9116, LAN9117, and LAN9118.
+ Say Y if you want it compiled into the kernel,
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver is also available as a module. The module will be
+ called smc911x. If you want to compile it as a module, say M
+ here and read <file:Documentation/kbuild/modules.txt>
+
+config SMSC911X
+ tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
+ depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300)
+ select CRC32
+ select NET_CORE
+ select MII
+ select PHYLIB
+ ---help---
+ Say Y here if you want support for SMSC LAN911x and LAN921x families
+ of ethernet controllers.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smsc911x.
+
+config SMSC911X_ARCH_HOOKS
+ def_bool n
+ depends on SMSC911X
+ ---help---
+ If the arch enables this, it allows the arch to implement various
+ hooks for more comprehensive interrupt control and also to override
+ the source of the MAC address.
+
+config SMSC9420
+ tristate "SMSC LAN9420 PCI ethernet adapter support"
+ depends on PCI
+ select CRC32
+ select PHYLIB
+ select SMSC_PHY
+ ---help---
+ This is a driver for SMSC's LAN9420 PCI ethernet adapter.
+ Say Y if you want it compiled into the kernel,
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver is also available as a module. The module will be
+ called smsc9420. If you want to compile it as a module, say M
+ here and read <file:Documentation/kbuild/modules.txt>
+
+endif # NET_VENDOR_SMSC
diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile
new file mode 100644
index 00000000000..f3438dec9d9
--- /dev/null
+++ b/drivers/net/ethernet/smsc/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the SMSC network device drivers.
+#
+
+obj-$(CONFIG_SMC9194) += smc9194.o
+obj-$(CONFIG_SMC91X) += smc91x.o
+obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o
+obj-$(CONFIG_EPIC100) += epic100.o
+obj-$(CONFIG_SMSC9420) += smsc9420.o
+obj-$(CONFIG_SMC911X) += smc911x.o
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/drivers/net/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 814c187d5f9..0a5dfb81415 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -314,7 +314,7 @@ static const struct net_device_ops epic_netdev_ops = {
.ndo_start_xmit = epic_start_xmit,
.ndo_tx_timeout = epic_tx_timeout,
.ndo_get_stats = epic_get_stats,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index a91fe172302..8f61fe9db1d 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1768,7 +1768,7 @@ static const struct net_device_ops smc911x_netdev_ops = {
.ndo_stop = smc911x_close,
.ndo_start_xmit = smc911x_hard_start_xmit,
.ndo_tx_timeout = smc911x_timeout,
- .ndo_set_multicast_list = smc911x_set_multicast_list,
+ .ndo_set_rx_mode = smc911x_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 3269292efec..3269292efec 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
diff --git a/drivers/net/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 5b65ac4b3ce..4e45094efb1 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -827,7 +827,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_stop = smc_close,
.ndo_start_xmit = smc_wait_to_send_packet,
.ndo_tx_timeout = smc_timeout,
- .ndo_set_multicast_list = smc_set_multicast_list,
+ .ndo_set_rx_mode = smc_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/smc9194.h b/drivers/net/ethernet/smsc/smc9194.h
index cf69d0a5a1c..cf69d0a5a1c 100644
--- a/drivers/net/smc9194.h
+++ b/drivers/net/ethernet/smsc/smc9194.h
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index cffbc0373fa..cbfa9818713 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -294,7 +294,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_start_xmit = smc_start_xmit,
.ndo_tx_timeout = smc_tx_timeout,
.ndo_set_config = s9k_config,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = smc_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 2b1d254d59a..f47f81e2532 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1768,7 +1768,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_stop = smc_close,
.ndo_start_xmit = smc_hard_start_xmit,
.ndo_tx_timeout = smc_timeout,
- .ndo_set_multicast_list = smc_set_multicast_list,
+ .ndo_set_rx_mode = smc_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5f53fbbf67b..5f53fbbf67b 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
diff --git a/drivers/net/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index b9016a30cdc..d2be42aafbe 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -26,6 +26,7 @@
* LAN9215, LAN9216, LAN9217, LAN9218
* LAN9210, LAN9211
* LAN9220, LAN9221
+ * LAN89218
*
*/
@@ -53,6 +54,10 @@
#include <linux/phy.h>
#include <linux/smsc911x.h>
#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include "smsc911x.h"
#define SMSC_CHIPNAME "smsc911x"
@@ -1902,7 +1907,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_stop = smsc911x_stop,
.ndo_start_xmit = smsc911x_hard_start_xmit,
.ndo_get_stats = smsc911x_get_stats,
- .ndo_set_multicast_list = smsc911x_set_multicast_list,
+ .ndo_set_rx_mode = smsc911x_set_multicast_list,
.ndo_do_ioctl = smsc911x_do_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -1983,6 +1988,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
case 0x01170000:
case 0x01160000:
case 0x01150000:
+ case 0x218A0000:
/* LAN911[5678] family */
pdata->generation = pdata->idrev & 0x0000FFFF;
break;
@@ -2095,8 +2101,60 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
.tx_writefifo = smsc911x_tx_writefifo_shift,
};
+#ifdef CONFIG_OF
+static int __devinit smsc911x_probe_config_dt(
+ struct smsc911x_platform_config *config,
+ struct device_node *np)
+{
+ const char *mac;
+ u32 width = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ config->phy_interface = of_get_phy_mode(np);
+
+ mac = of_get_mac_address(np);
+ if (mac)
+ memcpy(config->mac, mac, ETH_ALEN);
+
+ of_property_read_u32(np, "reg-shift", &config->shift);
+
+ of_property_read_u32(np, "reg-io-width", &width);
+ if (width == 4)
+ config->flags |= SMSC911X_USE_32BIT;
+ else
+ config->flags |= SMSC911X_USE_16BIT;
+
+ if (of_get_property(np, "smsc,irq-active-high", NULL))
+ config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
+
+ if (of_get_property(np, "smsc,irq-push-pull", NULL))
+ config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
+
+ if (of_get_property(np, "smsc,force-internal-phy", NULL))
+ config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
+
+ if (of_get_property(np, "smsc,force-external-phy", NULL))
+ config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
+
+ if (of_get_property(np, "smsc,save-mac-address", NULL))
+ config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
+
+ return 0;
+}
+#else
+static inline int smsc911x_probe_config_dt(
+ struct smsc911x_platform_config *config,
+ struct device_node *np)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = pdev->dev.platform_data;
@@ -2107,13 +2165,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pr_info("Driver version %s\n", SMSC_DRV_VERSION);
- /* platform data specifies irq & dynamic bus configuration */
- if (!pdev->dev.platform_data) {
- pr_warn("platform_data not provided\n");
- retval = -ENODEV;
- goto out_0;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
@@ -2152,9 +2203,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
pdata->ioaddr = ioremap_nocache(res->start, res_size);
- /* copy config parameters across to pdata */
- memcpy(&pdata->config, config, sizeof(pdata->config));
-
pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1);
@@ -2164,10 +2212,22 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
goto out_free_netdev_2;
}
+ retval = smsc911x_probe_config_dt(&pdata->config, np);
+ if (retval && config) {
+ /* copy config parameters across to pdata */
+ memcpy(&pdata->config, config, sizeof(pdata->config));
+ retval = 0;
+ }
+
+ if (retval) {
+ SMSC_WARN(pdata, probe, "Error smsc911x config not found");
+ goto out_unmap_io_3;
+ }
+
/* assume standard, non-shifted, access to HW registers */
pdata->ops = &standard_smsc911x_ops;
/* apply the right access if shifting is needed */
- if (config->shift)
+ if (pdata->config.shift)
pdata->ops = &shifted_smsc911x_ops;
retval = smsc911x_init(dev);
@@ -2314,6 +2374,12 @@ static const struct dev_pm_ops smsc911x_pm_ops = {
#define SMSC911X_PM_OPS NULL
#endif
+static const struct of_device_id smsc911x_dt_ids[] = {
+ { .compatible = "smsc,lan9115", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
+
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
.remove = __devexit_p(smsc911x_drv_remove),
@@ -2321,6 +2387,7 @@ static struct platform_driver smsc911x_driver = {
.name = SMSC_CHIPNAME,
.owner = THIS_MODULE,
.pm = SMSC911X_PM_OPS,
+ .of_match_table = smsc911x_dt_ids,
},
};
diff --git a/drivers/net/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d67aacf886..8d67aacf886 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
diff --git a/drivers/net/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 459726f5475..4f15680849f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1566,7 +1566,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_stop = smsc9420_stop,
.ndo_start_xmit = smsc9420_hard_start_xmit,
.ndo_get_stats = smsc9420_get_stats,
- .ndo_set_multicast_list = smsc9420_set_multicast_list,
+ .ndo_set_rx_mode = smsc9420_set_multicast_list,
.ndo_do_ioctl = smsc9420_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index e441402f77a..e441402f77a 100644
--- a/drivers/net/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
new file mode 100644
index 00000000000..f4a80da0065
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -0,0 +1,23 @@
+#
+# STMicroelectronics device configuration
+#
+
+config NET_VENDOR_STMICRO
+ bool "STMicroelectronics devices"
+ default y
+ depends on HAS_IOMEM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about STMicroelectronics cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_STMICRO
+
+source "drivers/net/ethernet/stmicro/stmmac/Kconfig"
+
+endif # NET_VENDOR_STMICRO
diff --git a/drivers/net/ethernet/stmicro/Makefile b/drivers/net/ethernet/stmicro/Makefile
new file mode 100644
index 00000000000..9b3bfddda7d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the STMicroelectronics device drivers.
+#
+
+obj-$(CONFIG_STMMAC_ETH) += stmmac/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
new file mode 100644
index 00000000000..ac6f190743d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -0,0 +1,84 @@
+config STMMAC_ETH
+ tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+ depends on HAS_IOMEM
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select CRC32
+ ---help---
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and only tested on the STMicroelectronics
+ platforms.
+
+if STMMAC_ETH
+
+config STMMAC_DEBUG_FS
+ bool "Enable monitoring via sysFS "
+ default n
+ depends on STMMAC_ETH && DEBUG_FS
+ -- help
+ The stmmac entry in /sys reports DMA TX/RX rings
+ or (if supported) the HW cap register.
+
+config STMMAC_DA
+ bool "STMMAC DMA arbitration scheme"
+ default n
+ ---help---
+ Selecting this option, rx has priority over Tx (only for Giga
+ Ethernet device).
+ By default, the DMA arbitration scheme is based on Round-robin
+ (rx:tx priority is 1:1).
+
+config STMMAC_DUAL_MAC
+ bool "STMMAC: dual mac support (EXPERIMENTAL)"
+ default n
+ depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
+ ---help---
+ Some ST SoCs (for example the stx7141 and stx7200c2) have two
+ Ethernet Controllers. This option turns on the second Ethernet
+ device on this kind of platforms.
+
+config STMMAC_TIMER
+ bool "STMMAC Timer optimisation"
+ default n
+ depends on RTC_HCTOSYS_DEVICE
+ ---help---
+ Use an external timer for mitigating the number of network
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
+
+choice
+ prompt "Select Timer device"
+ depends on STMMAC_TIMER
+
+config STMMAC_TMU_TIMER
+ bool "TMU channel 2"
+ depends on CPU_SH4
+ ---help---
+
+config STMMAC_RTC_TIMER
+ bool "Real time clock"
+ depends on RTC_CLASS
+ ---help---
+
+endchoice
+
+choice
+ prompt "Select the DMA TX/RX descriptor operating modes"
+ depends on STMMAC_ETH
+ ---help---
+ This driver supports DMA descriptor to operate both in dual buffer
+ (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
+ points to two data buffer pointers whereas in CHAINED mode they
+ points to only one data buffer pointer.
+
+config STMMAC_RING
+ bool "Enable Descriptor Ring Mode"
+
+config STMMAC_CHAINED
+ bool "Enable Descriptor Chained Mode"
+
+endchoice
+
+
+endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
new file mode 100644
index 00000000000..d7c45164ea7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
+stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
+stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+ mmc_core.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
new file mode 100644
index 00000000000..0668659803e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -0,0 +1,137 @@
+/*******************************************************************************
+ Specialised functions for managing Chained mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *) p;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int bmax;
+ unsigned int i = 1, len;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum);
+
+ while (len != 0) {
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ if (len > bmax) {
+ desc->des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i),
+ bmax, DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 0, bmax,
+ csum);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ len -= bmax;
+ i++;
+ } else {
+ desc->des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i), len,
+ DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len,
+ csum);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ len = 0;
+ }
+ }
+ return entry;
+}
+
+static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
+ (!enh_desc && (len > BUF_SIZE_2KiB))) {
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
+{
+}
+
+static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
+{
+}
+
+static void stmmac_clean_desc3(struct dma_desc *p)
+{
+}
+
+static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
+ unsigned int size)
+{
+ /*
+ * In chained mode the des3 points to the next element in the ring.
+ * The latest element has to point to the head.
+ */
+ int i;
+ struct dma_desc *p = des;
+ dma_addr_t dma_phy = phy_addr;
+
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_desc);
+ p->des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->des3 = (unsigned int)phy_addr;
+}
+
+static int stmmac_set_16kib_bfsize(int mtu)
+{
+ /* Not supported */
+ return 0;
+}
+
+const struct stmmac_ring_mode_ops ring_mode_ops = {
+ .is_jumbo_frm = stmmac_is_jumbo_frm,
+ .jumbo_frm = stmmac_jumbo_frm,
+ .refill_desc3 = stmmac_refill_desc3,
+ .init_desc3 = stmmac_init_desc3,
+ .init_dma_chain = stmmac_init_dma_chain,
+ .clean_desc3 = stmmac_clean_desc3,
+ .set_16kib_bfsize = stmmac_set_16kib_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
new file mode 100644
index 00000000000..9100c100d29
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -0,0 +1,319 @@
+/*******************************************************************************
+ STMMAC Common Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+#include "mmc.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+
+struct stmmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long tx_heartbeat;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long rx_partial;
+ unsigned long rx_runt;
+ unsigned long rx_toolong;
+ unsigned long rx_collision;
+ unsigned long rx_crc;
+ unsigned long rx_length;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ unsigned long rx_vlan;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Extra info */
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long poll_n;
+ unsigned long sched_timer_n;
+ unsigned long normal_irq_n;
+};
+
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+/* DAM HW feature register fields */
+#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
+#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
+#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
+#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
+#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
+#define DMA_HW_FEAT_ADDMACADRSEL 0x00000020 /* Multiple MAC Addr Reg */
+#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
+#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
+#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
+#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
+#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
+#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
+#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 Timestamp */
+#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 Adv Timestamp */
+#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
+#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
+#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP csum Offload(Type 1) in Rx */
+#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP csum Offload(Type 2) in Rx */
+#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
+#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. of additional Rx Channels */
+#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. of additional Tx Channels */
+#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate (Enhanced Descriptor) */
+#define DMA_HW_FEAT_INTTSEN 0x02000000 /* Timestamping with Internal
+ System Time */
+#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
+#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */
+#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
+
+enum rx_frame_status { /* IPC status */
+ good_frame = 0,
+ discard_frame = 1,
+ csum_none = 2,
+ llc_snap = 4,
+};
+
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
+
+/* DMA HW capabilities */
+struct dma_features {
+ unsigned int mbps_10_100;
+ unsigned int mbps_1000;
+ unsigned int half_duplex;
+ unsigned int hash_filter;
+ unsigned int multi_addr;
+ unsigned int pcs;
+ unsigned int sma_mdio;
+ unsigned int pmt_remote_wake_up;
+ unsigned int pmt_magic_frame;
+ unsigned int rmon;
+ /* IEEE 1588-2002*/
+ unsigned int time_stamp;
+ /* IEEE 1588-2008*/
+ unsigned int atime_stamp;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ unsigned int eee;
+ unsigned int av;
+ /* TX and RX csum */
+ unsigned int tx_coe;
+ unsigned int rx_coe_type1;
+ unsigned int rx_coe_type2;
+ unsigned int rxfifo_over_2048;
+ /* TX and RX number of channels */
+ unsigned int number_rx_channel;
+ unsigned int number_tx_channel;
+ /* Alternate (enhanced) DESC mode*/
+ unsigned int enh_desc;
+};
+
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+ int csum_flag);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner) (struct dma_desc *p);
+ int (*get_tx_owner) (struct dma_desc *p);
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc) (struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc) (struct dma_desc *p);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*clear_tx_ic) (struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls) (struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len) (struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*get_rx_owner) (struct dma_desc *p);
+ void (*set_rx_owner) (struct dma_desc *p);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len) (struct dma_desc *p);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (void __iomem *ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq) (void __iomem *ioaddr);
+ void (*disable_dma_irq) (void __iomem *ioaddr);
+ void (*start_tx) (void __iomem *ioaddr);
+ void (*stop_tx) (void __iomem *ioaddr);
+ void (*start_rx) (void __iomem *ioaddr);
+ void (*stop_rx) (void __iomem *ioaddr);
+ int (*dma_interrupt) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+ /* If supported then get the optional core features */
+ unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
+ /* Support checksum offload engine */
+ int (*rx_coe) (void __iomem *ioaddr);
+ /* Dump MAC registers */
+ void (*dump_regs) (void __iomem *ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (void __iomem *ioaddr);
+ /* Multicast filter setting */
+ void (*set_filter) (struct net_device *dev);
+ /* Flow control setting */
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct stmmac_ring_mode_ops {
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ void (*refill_desc3) (int bfsize, struct dma_desc *p);
+ void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p);
+ void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
+ unsigned int size);
+ void (*clean_desc3) (struct dma_desc *p);
+ int (*set_16kib_bfsize) (int mtu);
+};
+
+struct mac_device_info {
+ const struct stmmac_ops *mac;
+ const struct stmmac_desc_ops *desc;
+ const struct stmmac_dma_ops *dma;
+ const struct stmmac_ring_mode_ops *ring;
+ struct mii_regs mii; /* MII register Addresses */
+ struct mac_link link;
+ unsigned int synopsys_uid;
+};
+
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 63a03e26469..63a03e26469 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
new file mode 100644
index 00000000000..dd8d6e19dff
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -0,0 +1,126 @@
+/*******************************************************************************
+ Header File to describe Normal/enhanced descriptor functions used for RING
+ and CHAINED modes.
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#if defined(CONFIG_STMMAC_RING)
+static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (end)
+ p->des01.erx.end_ring = 1;
+}
+
+static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des01.etx.end_ring = 1;
+}
+
+static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.etx.end_ring = ter;
+}
+
+static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else
+ p->des01.etx.buffer1_size = len;
+}
+
+static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
+ if (end)
+ p->des01.rx.end_ring = 1;
+}
+
+static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des01.tx.end_ring = 1;
+}
+
+static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.tx.end_ring = ter;
+}
+
+static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_2KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size;
+ } else
+ p->des01.tx.buffer1_size = len;
+}
+
+#else
+
+static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.erx.second_address_chained = 1;
+}
+
+static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.etx.second_address_chained = 1;
+}
+
+static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.etx.second_address_chained = 1;
+}
+
+static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ p->des01.etx.buffer1_size = len;
+}
+
+static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.rx.second_address_chained = 1;
+}
+
+static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size)
+{
+ p->des01.tx.second_address_chained = 1;
+}
+
+static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.tx.second_address_chained = 1;
+}
+
+static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ p->des01.tx.buffer1_size = len;
+}
+#endif
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7c6d857a9cc..7c6d857a9cc 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index cfcef0ea0fa..cfcef0ea0fa 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 0f63b3c83c1..b1c48b97594 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -37,11 +37,6 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
value |= GMAC_CORE_INIT;
writel(value, ioaddr + GMAC_CONTROL);
- /* STBus Bridge Configuration */
- /*writel(0xc5608, ioaddr + 0x00007000);*/
-
- /* Freeze MMC counters */
- writel(0x8, ioaddr + GMAC_MMC_CTRL);
/* Mask GMAC interrupts */
writel(0x207, ioaddr + GMAC_INT_MASK);
@@ -229,10 +224,7 @@ static const struct stmmac_ops dwmac1000_ops = {
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
{
struct mac_device_info *mac;
- u32 uid = readl(ioaddr + GMAC_VERSION);
-
- pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
- ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+ u32 hwid = readl(ioaddr + GMAC_VERSION);
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
if (!mac)
@@ -246,6 +238,7 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
mac->link.speed = GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
+ mac->synopsys_uid = hwid;
return mac;
}
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 3dbeea61908..da66ac511c4 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -118,13 +118,6 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
writel(csr6, ioaddr + DMA_CONTROL);
}
-/* Not yet implemented --- no RMON module */
-static void dwmac1000_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x, void __iomem *ioaddr)
-{
- return;
-}
-
static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
{
int i;
@@ -139,11 +132,15 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
}
}
+static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+{
+ return readl(ioaddr + DMA_HW_FEATURE);
+}
+
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.init = dwmac1000_dma_init,
.dump_regs = dwmac1000_dump_dma_regs,
.dma_mode = dwmac1000_dma_operation_mode,
- .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
@@ -152,4 +149,5 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.start_rx = dwmac_dma_start_rx,
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
+ .get_hw_feature = dwmac1000_get_hw_feature,
};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 743a5801763..138fb8dd1e8 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -70,17 +70,6 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
readl(ioaddr + MAC_VLAN1));
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
readl(ioaddr + MAC_VLAN2));
- pr_info("\n\tMAC management counter registers\n");
- pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
- pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
- pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
- pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
- pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
}
static void dwmac100_irq_status(void __iomem *ioaddr)
@@ -199,6 +188,7 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
mac->link.speed = 0;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
+ mac->synopsys_uid = 0;
return mac;
}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 627f656b0f3..627f656b0f3 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index da3f5ccf83d..437edacd602 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -34,6 +34,7 @@
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
/* DMA Control register defines */
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e25093510b0..e25093510b0 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index e5dfb6a3018..d87976364ec 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -23,6 +23,7 @@
*******************************************************************************/
#include "common.h"
+#include "descs_com.h"
static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
@@ -233,10 +234,9 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
for (i = 0; i < ring_size; i++) {
p->des01.erx.own = 1;
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- /* To support jumbo frames */
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (i == ring_size - 1)
- p->des01.erx.end_ring = 1;
+
+ ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+
if (disable_rx_ic)
p->des01.erx.disable_ic = 1;
p++;
@@ -249,8 +249,7 @@ static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
for (i = 0; i < ring_size; i++) {
p->des01.etx.own = 0;
- if (i == ring_size - 1)
- p->des01.etx.end_ring = 1;
+ ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1));
p++;
}
}
@@ -285,19 +284,16 @@ static void enh_desc_release_tx_desc(struct dma_desc *p)
int ter = p->des01.etx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- p->des01.etx.end_ring = ter;
+ enh_desc_end_tx_desc(p, ter);
}
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
- } else {
- p->des01.etx.buffer1_size = len;
- }
+
+ enh_set_tx_desc_len(p, len);
+
if (likely(csum_flag))
p->des01.etx.checksum_insertion = cic_full;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
new file mode 100644
index 00000000000..a38352024cb
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ MMC Header file
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* MMC control register */
+/* When set, all counter are reset */
+#define MMC_CNTRL_COUNTER_RESET 0x1
+/* When set, do not roll over zero
+ * after reaching the max value*/
+#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2
+#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */
+#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the
+ * current value.*/
+#define MMC_CNTRL_PRESET 0x10
+#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+struct stmmac_counters {
+ unsigned int mmc_tx_octetcount_gb;
+ unsigned int mmc_tx_framecount_gb;
+ unsigned int mmc_tx_broadcastframe_g;
+ unsigned int mmc_tx_multicastframe_g;
+ unsigned int mmc_tx_64_octets_gb;
+ unsigned int mmc_tx_65_to_127_octets_gb;
+ unsigned int mmc_tx_128_to_255_octets_gb;
+ unsigned int mmc_tx_256_to_511_octets_gb;
+ unsigned int mmc_tx_512_to_1023_octets_gb;
+ unsigned int mmc_tx_1024_to_max_octets_gb;
+ unsigned int mmc_tx_unicast_gb;
+ unsigned int mmc_tx_multicast_gb;
+ unsigned int mmc_tx_broadcast_gb;
+ unsigned int mmc_tx_underflow_error;
+ unsigned int mmc_tx_singlecol_g;
+ unsigned int mmc_tx_multicol_g;
+ unsigned int mmc_tx_deferred;
+ unsigned int mmc_tx_latecol;
+ unsigned int mmc_tx_exesscol;
+ unsigned int mmc_tx_carrier_error;
+ unsigned int mmc_tx_octetcount_g;
+ unsigned int mmc_tx_framecount_g;
+ unsigned int mmc_tx_excessdef;
+ unsigned int mmc_tx_pause_frame;
+ unsigned int mmc_tx_vlan_frame_g;
+
+ /* MMC RX counter registers */
+ unsigned int mmc_rx_framecount_gb;
+ unsigned int mmc_rx_octetcount_gb;
+ unsigned int mmc_rx_octetcount_g;
+ unsigned int mmc_rx_broadcastframe_g;
+ unsigned int mmc_rx_multicastframe_g;
+ unsigned int mmc_rx_crc_errror;
+ unsigned int mmc_rx_align_error;
+ unsigned int mmc_rx_run_error;
+ unsigned int mmc_rx_jabber_error;
+ unsigned int mmc_rx_undersize_g;
+ unsigned int mmc_rx_oversize_g;
+ unsigned int mmc_rx_64_octets_gb;
+ unsigned int mmc_rx_65_to_127_octets_gb;
+ unsigned int mmc_rx_128_to_255_octets_gb;
+ unsigned int mmc_rx_256_to_511_octets_gb;
+ unsigned int mmc_rx_512_to_1023_octets_gb;
+ unsigned int mmc_rx_1024_to_max_octets_gb;
+ unsigned int mmc_rx_unicast_g;
+ unsigned int mmc_rx_length_error;
+ unsigned int mmc_rx_autofrangetype;
+ unsigned int mmc_rx_pause_frames;
+ unsigned int mmc_rx_fifo_overflow;
+ unsigned int mmc_rx_vlan_frames_gb;
+ unsigned int mmc_rx_watchdog_error;
+ /* IPC */
+ unsigned int mmc_rx_ipc_intr_mask;
+ unsigned int mmc_rx_ipc_intr;
+ /* IPv4 */
+ unsigned int mmc_rx_ipv4_gd;
+ unsigned int mmc_rx_ipv4_hderr;
+ unsigned int mmc_rx_ipv4_nopay;
+ unsigned int mmc_rx_ipv4_frag;
+ unsigned int mmc_rx_ipv4_udsbl;
+
+ unsigned int mmc_rx_ipv4_gd_octets;
+ unsigned int mmc_rx_ipv4_hderr_octets;
+ unsigned int mmc_rx_ipv4_nopay_octets;
+ unsigned int mmc_rx_ipv4_frag_octets;
+ unsigned int mmc_rx_ipv4_udsbl_octets;
+
+ /* IPV6 */
+ unsigned int mmc_rx_ipv6_gd_octets;
+ unsigned int mmc_rx_ipv6_hderr_octets;
+ unsigned int mmc_rx_ipv6_nopay_octets;
+
+ unsigned int mmc_rx_ipv6_gd;
+ unsigned int mmc_rx_ipv6_hderr;
+ unsigned int mmc_rx_ipv6_nopay;
+
+ /* Protocols */
+ unsigned int mmc_rx_udp_gd;
+ unsigned int mmc_rx_udp_err;
+ unsigned int mmc_rx_tcp_gd;
+ unsigned int mmc_rx_tcp_err;
+ unsigned int mmc_rx_icmp_gd;
+ unsigned int mmc_rx_icmp_err;
+
+ unsigned int mmc_rx_udp_gd_octets;
+ unsigned int mmc_rx_udp_err_octets;
+ unsigned int mmc_rx_tcp_gd_octets;
+ unsigned int mmc_rx_tcp_err_octets;
+ unsigned int mmc_rx_icmp_gd_octets;
+ unsigned int mmc_rx_icmp_err_octets;
+};
+
+extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
new file mode 100644
index 00000000000..41e6b33e1b0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -0,0 +1,265 @@
+/*******************************************************************************
+ DWMAC Management Counters
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "mmc.h"
+
+/* MAC Management Counters register offset */
+
+#define MMC_CNTRL 0x00000100 /* MMC Control */
+#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
+#define MMC_DEFAUL_MASK 0xffffffff
+
+/* MMC TX counter registers */
+
+/* Note:
+ * _GB register stands for good and bad frames
+ * _G is for good only.
+ */
+#define MMC_TX_OCTETCOUNT_GB 0x00000114
+#define MMC_TX_FRAMECOUNT_GB 0x00000118
+#define MMC_TX_BROADCASTFRAME_G 0x0000011c
+#define MMC_TX_MULTICASTFRAME_G 0x00000120
+#define MMC_TX_64_OCTETS_GB 0x00000124
+#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128
+#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138
+#define MMC_TX_UNICAST_GB 0x0000013c
+#define MMC_TX_MULTICAST_GB 0x00000140
+#define MMC_TX_BROADCAST_GB 0x00000144
+#define MMC_TX_UNDERFLOW_ERROR 0x00000148
+#define MMC_TX_SINGLECOL_G 0x0000014c
+#define MMC_TX_MULTICOL_G 0x00000150
+#define MMC_TX_DEFERRED 0x00000154
+#define MMC_TX_LATECOL 0x00000158
+#define MMC_TX_EXESSCOL 0x0000015c
+#define MMC_TX_CARRIER_ERROR 0x00000160
+#define MMC_TX_OCTETCOUNT_G 0x00000164
+#define MMC_TX_FRAMECOUNT_G 0x00000168
+#define MMC_TX_EXCESSDEF 0x0000016c
+#define MMC_TX_PAUSE_FRAME 0x00000170
+#define MMC_TX_VLAN_FRAME_G 0x00000174
+
+/* MMC RX counter registers */
+#define MMC_RX_FRAMECOUNT_GB 0x00000180
+#define MMC_RX_OCTETCOUNT_GB 0x00000184
+#define MMC_RX_OCTETCOUNT_G 0x00000188
+#define MMC_RX_BROADCASTFRAME_G 0x0000018c
+#define MMC_RX_MULTICASTFRAME_G 0x00000190
+#define MMC_RX_CRC_ERRROR 0x00000194
+#define MMC_RX_ALIGN_ERROR 0x00000198
+#define MMC_RX_RUN_ERROR 0x0000019C
+#define MMC_RX_JABBER_ERROR 0x000001A0
+#define MMC_RX_UNDERSIZE_G 0x000001A4
+#define MMC_RX_OVERSIZE_G 0x000001A8
+#define MMC_RX_64_OCTETS_GB 0x000001AC
+#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0
+#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4
+#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0
+#define MMC_RX_UNICAST_G 0x000001c4
+#define MMC_RX_LENGTH_ERROR 0x000001c8
+#define MMC_RX_AUTOFRANGETYPE 0x000001cc
+#define MMC_RX_PAUSE_FRAMES 0x000001d0
+#define MMC_RX_FIFO_OVERFLOW 0x000001d4
+#define MMC_RX_VLAN_FRAMES_GB 0x000001d8
+#define MMC_RX_WATCHDOG_ERROR 0x000001dc
+/* IPC*/
+#define MMC_RX_IPC_INTR_MASK 0x00000200
+#define MMC_RX_IPC_INTR 0x00000208
+/* IPv4*/
+#define MMC_RX_IPV4_GD 0x00000210
+#define MMC_RX_IPV4_HDERR 0x00000214
+#define MMC_RX_IPV4_NOPAY 0x00000218
+#define MMC_RX_IPV4_FRAG 0x0000021C
+#define MMC_RX_IPV4_UDSBL 0x00000220
+
+#define MMC_RX_IPV4_GD_OCTETS 0x00000250
+#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258
+#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260
+
+/* IPV6*/
+#define MMC_RX_IPV6_GD_OCTETS 0x00000264
+#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c
+
+#define MMC_RX_IPV6_GD 0x00000224
+#define MMC_RX_IPV6_HDERR 0x00000228
+#define MMC_RX_IPV6_NOPAY 0x0000022c
+
+/* Protocols*/
+#define MMC_RX_UDP_GD 0x00000230
+#define MMC_RX_UDP_ERR 0x00000234
+#define MMC_RX_TCP_GD 0x00000238
+#define MMC_RX_TCP_ERR 0x0000023c
+#define MMC_RX_ICMP_GD 0x00000240
+#define MMC_RX_ICMP_ERR 0x00000244
+
+#define MMC_RX_UDP_GD_OCTETS 0x00000270
+#define MMC_RX_UDP_ERR_OCTETS 0x00000274
+#define MMC_RX_TCP_GD_OCTETS 0x00000278
+#define MMC_RX_TCP_ERR_OCTETS 0x0000027c
+#define MMC_RX_ICMP_GD_OCTETS 0x00000280
+#define MMC_RX_ICMP_ERR_OCTETS 0x00000284
+
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+{
+ u32 value = readl(ioaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, ioaddr + MMC_CNTRL);
+
+ pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+ MMC_CNTRL, value);
+}
+
+/* To mask all all interrupts.*/
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+{
+ writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+{
+ mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_65_to_127_octets_gb +=
+ readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ mmc->mmc_tx_128_to_255_octets_gb +=
+ readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ mmc->mmc_tx_256_to_511_octets_gb +=
+ readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ mmc->mmc_tx_512_to_1023_octets_gb +=
+ readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_tx_1024_to_max_octets_gb +=
+ readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+
+ /* MMC RX counter registers */
+ mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_65_to_127_octets_gb +=
+ readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ mmc->mmc_rx_128_to_255_octets_gb +=
+ readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ mmc->mmc_rx_256_to_511_octets_gb +=
+ readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ mmc->mmc_rx_512_to_1023_octets_gb +=
+ readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_rx_1024_to_max_octets_gb +=
+ readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+ /* IPC */
+ mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+ /* IPv4 */
+ mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+
+ mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_hderr_octets +=
+ readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ mmc->mmc_rx_ipv4_nopay_octets +=
+ readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+ mmc->mmc_rx_ipv4_udsbl_octets +=
+ readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+
+ /* IPV6 */
+ mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_hderr_octets +=
+ readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ mmc->mmc_rx_ipv6_nopay_octets +=
+ readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+
+ mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+
+ /* Protocols */
+ mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+
+ mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+}
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 029c2a2cf52..f7e8ba7f501 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -23,6 +23,7 @@
*******************************************************************************/
#include "common.h"
+#include "descs_com.h"
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
@@ -126,8 +127,9 @@ static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
for (i = 0; i < ring_size; i++) {
p->des01.rx.own = 1;
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- if (i == ring_size - 1)
- p->des01.rx.end_ring = 1;
+
+ ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+
if (disable_rx_ic)
p->des01.rx.disable_ic = 1;
p++;
@@ -139,8 +141,7 @@ static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
int i;
for (i = 0; i < ring_size; i++) {
p->des01.tx.own = 0;
- if (i == ring_size - 1)
- p->des01.tx.end_ring = 1;
+ ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1)));
p++;
}
}
@@ -175,15 +176,14 @@ static void ndesc_release_tx_desc(struct dma_desc *p)
int ter = p->des01.tx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- /* set termination field */
- p->des01.tx.end_ring = ter;
+ ndesc_end_tx_desc(p, ter);
}
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
- p->des01.tx.buffer1_size = len;
+ norm_set_tx_desc_len(p, len);
}
static void ndesc_clear_tx_ic(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
new file mode 100644
index 00000000000..fb8377da168
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -0,0 +1,126 @@
+/*******************************************************************************
+ Specialised functions for managing Ring mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *) p;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int bmax, len;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
+ csum);
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ desc->des2 = dma_map_single(priv->device, skb->data + bmax,
+ len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ } else {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum);
+ }
+
+ return entry;
+}
+
+static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if (len >= BUF_SIZE_4KiB)
+ ret = 1;
+
+ return ret;
+}
+
+static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
+{
+ /* Fill DES3 in case of RING mode */
+ if (bfsize >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+}
+
+/* In ring mode we need to fill the desc3 because it is used
+ * as buffer */
+static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
+{
+ if (unlikely(des3_as_data_buf))
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+}
+
+static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
+ unsigned int size)
+{
+}
+
+static void stmmac_clean_desc3(struct dma_desc *p)
+{
+ if (unlikely(p->des3))
+ p->des3 = 0;
+}
+
+static int stmmac_set_16kib_bfsize(int mtu)
+{
+ int ret = 0;
+ if (unlikely(mtu >= BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+}
+
+const struct stmmac_ring_mode_ops ring_mode_ops = {
+ .is_jumbo_frm = stmmac_is_jumbo_frm,
+ .jumbo_frm = stmmac_jumbo_frm,
+ .refill_desc3 = stmmac_refill_desc3,
+ .init_desc3 = stmmac_init_desc3,
+ .init_dma_chain = stmmac_init_dma_chain,
+ .clean_desc3 = stmmac_clean_desc3,
+ .set_16kib_bfsize = stmmac_set_16kib_bfsize,
+};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index de1929b2641..9bafa6cf9e8 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,9 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "July_2011"
+#define DRV_MODULE_VERSION "Oct_2011"
#include <linux/stmmac.h>
-
+#include <linux/phy.h>
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -70,12 +70,16 @@ struct stmmac_priv {
u32 msg_enable;
spinlock_t lock;
+ spinlock_t tx_lock;
int wolopts;
int wolenabled;
+ int wol_irq;
#ifdef CONFIG_STMMAC_TIMER
struct stmmac_timer *tm;
#endif
struct plat_stmmacenet_data *plat;
+ struct stmmac_counters mmc;
+ struct dma_features dma_cap;
};
extern int stmmac_mdio_unregister(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
new file mode 100644
index 00000000000..406404f6e32
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -0,0 +1,477 @@
+/*******************************************************************************
+ STMMAC Ethtool support
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <asm/io.h>
+
+#include "stmmac.h"
+#include "dwmac_dma.h"
+
+#define REG_SPACE_SIZE 0x1054
+#define MAC100_ETHTOOL_NAME "st_mac100"
+#define GMAC_ETHTOOL_NAME "st_gmac"
+
+struct stmmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define STMMAC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
+ offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ STMMAC_STAT(tx_underflow),
+ STMMAC_STAT(tx_carrier),
+ STMMAC_STAT(tx_losscarrier),
+ STMMAC_STAT(tx_heartbeat),
+ STMMAC_STAT(tx_deferred),
+ STMMAC_STAT(tx_vlan),
+ STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(tx_jabber),
+ STMMAC_STAT(tx_frame_flushed),
+ STMMAC_STAT(tx_payload_error),
+ STMMAC_STAT(tx_ip_header_error),
+ STMMAC_STAT(rx_desc),
+ STMMAC_STAT(rx_partial),
+ STMMAC_STAT(rx_runt),
+ STMMAC_STAT(rx_toolong),
+ STMMAC_STAT(rx_collision),
+ STMMAC_STAT(rx_crc),
+ STMMAC_STAT(rx_length),
+ STMMAC_STAT(rx_mii),
+ STMMAC_STAT(rx_multicast),
+ STMMAC_STAT(rx_gmac_overflow),
+ STMMAC_STAT(rx_watchdog),
+ STMMAC_STAT(da_rx_filter_fail),
+ STMMAC_STAT(sa_rx_filter_fail),
+ STMMAC_STAT(rx_missed_cntr),
+ STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(tx_undeflow_irq),
+ STMMAC_STAT(tx_process_stopped_irq),
+ STMMAC_STAT(tx_jabber_irq),
+ STMMAC_STAT(rx_overflow_irq),
+ STMMAC_STAT(rx_buf_unav_irq),
+ STMMAC_STAT(rx_process_stopped_irq),
+ STMMAC_STAT(rx_watchdog_irq),
+ STMMAC_STAT(tx_early_irq),
+ STMMAC_STAT(fatal_bus_error_irq),
+ STMMAC_STAT(threshold),
+ STMMAC_STAT(tx_pkt_n),
+ STMMAC_STAT(rx_pkt_n),
+ STMMAC_STAT(poll_n),
+ STMMAC_STAT(sched_timer_n),
+ STMMAC_STAT(normal_irq_n),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+/* HW MAC Management counters (if supported) */
+#define STMMAC_MMC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_counters, m), \
+ offsetof(struct stmmac_priv, mmc.m)}
+
+static const struct stmmac_stats stmmac_mmc[] = {
+ STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_tx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_unicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_multicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
+ STMMAC_MMC_STAT(mmc_tx_underflow_error),
+ STMMAC_MMC_STAT(mmc_tx_singlecol_g),
+ STMMAC_MMC_STAT(mmc_tx_multicol_g),
+ STMMAC_MMC_STAT(mmc_tx_deferred),
+ STMMAC_MMC_STAT(mmc_tx_latecol),
+ STMMAC_MMC_STAT(mmc_tx_exesscol),
+ STMMAC_MMC_STAT(mmc_tx_carrier_error),
+ STMMAC_MMC_STAT(mmc_tx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_tx_framecount_g),
+ STMMAC_MMC_STAT(mmc_tx_excessdef),
+ STMMAC_MMC_STAT(mmc_tx_pause_frame),
+ STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_rx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_crc_errror),
+ STMMAC_MMC_STAT(mmc_rx_align_error),
+ STMMAC_MMC_STAT(mmc_rx_run_error),
+ STMMAC_MMC_STAT(mmc_rx_jabber_error),
+ STMMAC_MMC_STAT(mmc_rx_undersize_g),
+ STMMAC_MMC_STAT(mmc_rx_oversize_g),
+ STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_unicast_g),
+ STMMAC_MMC_STAT(mmc_rx_length_error),
+ STMMAC_MMC_STAT(mmc_rx_autofrangetype),
+ STMMAC_MMC_STAT(mmc_rx_pause_frames),
+ STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
+ STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd),
+ STMMAC_MMC_STAT(mmc_rx_udp_err),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+};
+#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
+
+static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_gmac)
+ strcpy(info->driver, GMAC_ETHTOOL_NAME);
+ else
+ strcpy(info->driver, MAC100_ETHTOOL_NAME);
+
+ strcpy(info->version, DRV_MODULE_VERSION);
+ info->fw_version[0] = '\0';
+}
+
+static int stmmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+ if (phy == NULL) {
+ pr_err("%s: %s: PHY is not registered\n",
+ __func__, dev->name);
+ return -ENODEV;
+ }
+ if (!netif_running(dev)) {
+ pr_err("%s: interface is disabled: we cannot track "
+ "link speed / duplex setting\n", dev->name);
+ return -EBUSY;
+ }
+ cmd->transceiver = XCVR_INTERNAL;
+ spin_lock_irq(&priv->lock);
+ rc = phy_ethtool_gset(phy, cmd);
+ spin_unlock_irq(&priv->lock);
+ return rc;
+}
+
+static int stmmac_ethtool_setsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+
+ spin_lock(&priv->lock);
+ rc = phy_ethtool_sset(phy, cmd);
+ spin_unlock(&priv->lock);
+
+ return rc;
+}
+
+static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+static int stmmac_check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EBUSY;
+ return 0;
+}
+
+static int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static void stmmac_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u32 *reg_space = (u32 *) space;
+
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ if (!priv->plat->has_gmac) {
+ /* MAC registers */
+ for (i = 0; i < 12; i++)
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 9; i++)
+ reg_space[i + 12] =
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
+ } else {
+ /* MAC registers */
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] =
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ }
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ pause->autoneg = priv->phydev->autoneg;
+
+ if (priv->flow_ctrl & FLOW_RX)
+ pause->rx_pause = 1;
+ if (priv->flow_ctrl & FLOW_TX)
+ pause->tx_pause = 1;
+
+ spin_unlock(&priv->lock);
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phydev;
+ int new_pause = FLOW_OFF;
+ int ret = 0;
+
+ spin_lock(&priv->lock);
+
+ if (pause->rx_pause)
+ new_pause |= FLOW_RX;
+ if (pause->tx_pause)
+ new_pause |= FLOW_TX;
+
+ priv->flow_ctrl = new_pause;
+ phy->autoneg = pause->autoneg;
+
+ if (phy->autoneg) {
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phy);
+ } else
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
+ spin_unlock(&priv->lock);
+ return ret;
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i, j = 0;
+
+ /* Update the DMA HW counters for dwmac10/100 */
+ if (!priv->plat->has_gmac)
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats,
+ (void *) &priv->xstats,
+ priv->ioaddr);
+ else {
+ /* If supported, for new GMAC chips expose the MMC counters */
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ char *p;
+ p = (char *)priv + stmmac_mmc[i].stat_offset;
+
+ data[j++] = (stmmac_mmc[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) :
+ (*(u32 *)p);
+ }
+ }
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+ data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ int len;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = STMMAC_STATS_LEN;
+
+ if (priv->dma_cap.rmon)
+ len += STMMAC_MMC_STATS_LEN;
+
+ return len;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (priv->dma_cap.rmon)
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ memcpy(p, stmmac_mmc[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+ spin_unlock_irq(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(priv->wol_irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(priv->wol_irq);
+ }
+
+ spin_lock_irq(&priv->lock);
+ priv->wolopts = wol->wolopts;
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops stmmac_ethtool_ops = {
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_settings = stmmac_ethtool_getsettings,
+ .set_settings = stmmac_ethtool_setsettings,
+ .get_msglevel = stmmac_ethtool_getmsglevel,
+ .set_msglevel = stmmac_ethtool_setmsglevel,
+ .get_regs = stmmac_ethtool_gregs,
+ .get_regs_len = stmmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = stmmac_get_pauseparam,
+ .set_pauseparam = stmmac_set_pauseparam,
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
+ .get_strings = stmmac_get_strings,
+ .get_wol = stmmac_get_wol,
+ .set_wol = stmmac_set_wol,
+ .get_sset_count = stmmac_get_sset_count,
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c6e567e04ef..aeaa15b451d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2,7 +2,7 @@
This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
ST Ethernet IPs are built around a Synopsys IP Core.
- Copyright (C) 2007-2009 STMicroelectronics Ltd
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,11 +41,15 @@
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/phy.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#ifdef CONFIG_STMMAC_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -299,7 +303,7 @@ static int stmmac_init_phy(struct net_device *dev)
struct phy_device *phydev;
char phy_id[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
-
+ int interface = priv->plat->interface;
priv->oldlink = 0;
priv->speed = 0;
priv->oldduplex = -1;
@@ -309,14 +313,21 @@ static int stmmac_init_phy(struct net_device *dev)
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
- priv->plat->interface);
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
}
+ /* Stop Advertising 1000BASE Capability if interface is not GMII */
+ if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) ||
+ (interface == PHY_INTERFACE_MODE_RMII))) {
+ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ priv->phydev->advertising = priv->phydev->supported;
+ }
+
/*
* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
@@ -376,11 +387,28 @@ static void display_ring(struct dma_desc *p, int size)
}
}
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu >= DMA_BUFFER_SIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DMA_BUFFER_SIZE;
+
+ return ret;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
* Description: this function initializes the DMA RX/TX descriptors
- * and allocates the socket buffers.
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
*/
static void init_dma_desc_rings(struct net_device *dev)
{
@@ -389,31 +417,24 @@ static void init_dma_desc_rings(struct net_device *dev)
struct sk_buff *skb;
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
- unsigned int bfsize = priv->dma_buf_sz;
- int buff2_needed = 0, dis_ic = 0;
+ unsigned int bfsize;
+ int dis_ic = 0;
+ int des3_as_data_buf = 0;
- /* Set the Buffer size according to the MTU;
- * indeed, in case of jumbo we need to bump-up the buffer sizes.
- */
- if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
- bfsize = BUF_SIZE_16KiB;
- else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
- bfsize = BUF_SIZE_8KiB;
- else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
- bfsize = BUF_SIZE_4KiB;
- else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
- bfsize = BUF_SIZE_2KiB;
+ /* Set the max buffer size according to the DESC mode
+ * and the MTU. Note that RING mode allows 16KiB bsize. */
+ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+
+ if (bfsize == BUF_SIZE_16KiB)
+ des3_as_data_buf = 1;
else
- bfsize = DMA_BUFFER_SIZE;
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
#ifdef CONFIG_STMMAC_TIMER
/* Disable interrupts on completion for the reception if timer is on */
if (likely(priv->tm->enable))
dis_ic = 1;
#endif
- /* If the MTU exceeds 8k so use the second buffer in the chain */
- if (bfsize >= BUF_SIZE_8KiB)
- buff2_needed = 1;
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
@@ -441,7 +462,7 @@ static void init_dma_desc_rings(struct net_device *dev)
return;
}
- DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+ DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
"Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
dev->name, priv->dma_rx, priv->dma_tx,
(unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
@@ -453,18 +474,21 @@ static void init_dma_desc_rings(struct net_device *dev)
for (i = 0; i < rxsize; i++) {
struct dma_desc *p = priv->dma_rx + i;
- skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
+ GFP_KERNEL);
if (unlikely(skb == NULL)) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
break;
}
+ skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
bfsize, DMA_FROM_DEVICE);
p->des2 = priv->rx_skbuff_dma[i];
- if (unlikely(buff2_needed))
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+
+ priv->hw->ring->init_desc3(des3_as_data_buf, p);
+
DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
}
@@ -478,6 +502,12 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->tx_skbuff[i] = NULL;
priv->dma_tx[i].des2 = 0;
}
+
+ /* In case of Chained mode this sets the des3 to the next
+ * element in the chain */
+ priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
+ priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
+
priv->dirty_tx = 0;
priv->cur_tx = 0;
@@ -576,6 +606,8 @@ static void stmmac_tx(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
+ spin_lock(&priv->tx_lock);
+
while (priv->dirty_tx != priv->cur_tx) {
int last;
unsigned int entry = priv->dirty_tx % txsize;
@@ -606,8 +638,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
dma_unmap_single(priv->device, p->des2,
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
- if (unlikely(p->des3))
- p->des3 = 0;
+ priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) {
/*
@@ -639,6 +670,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
+ spin_unlock(&priv->tx_lock);
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@@ -713,7 +745,6 @@ static void stmmac_no_timer_stopped(void)
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
-
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -747,6 +778,89 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
stmmac_tx_err(priv);
}
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ /* Do not manage MMC IRQ (FIXME) */
+ dwmac_mmc_intr_all_mask(priv->ioaddr);
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+}
+
+static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
+{
+ u32 hwid = priv->hw->synopsys_uid;
+
+ /* Only check valid Synopsys Id because old MAC chips
+ * have no HW registers where get the ID */
+ if (likely(hwid)) {
+ u32 uid = ((hwid & 0x0000ff00) >> 8);
+ u32 synid = (hwid & 0x000000ff);
+
+ pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ uid, synid);
+
+ return synid;
+ }
+ return 0;
+}
+
+/* New GMAC chips support a new register to indicate the
+ * presence of the optional feature/functions.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+ u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
+
+ if (likely(hw_cap)) {
+ priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ priv->dma_cap.multi_addr =
+ (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
+ priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ priv->dma_cap.pmt_remote_wake_up =
+ (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ priv->dma_cap.pmt_magic_frame =
+ (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /*MMC*/
+ priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+ /* IEEE 1588-2002*/
+ priv->dma_cap.time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008*/
+ priv->dma_cap.atime_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+ /* TX and RX csum */
+ priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ priv->dma_cap.rx_coe_type1 =
+ (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ priv->dma_cap.rx_coe_type2 =
+ (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ priv->dma_cap.rxfifo_over_2048 =
+ (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ /* TX and RX number of channels */
+ priv->dma_cap.number_rx_channel =
+ (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ priv->dma_cap.number_tx_channel =
+ (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode*/
+ priv->dma_cap.enh_desc =
+ (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+ } else
+ pr_debug("\tNo HW DMA feature register supported");
+
+ return hw_cap;
+}
+
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
@@ -819,17 +933,16 @@ static int stmmac_open(struct net_device *dev)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
- priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+ stmmac_get_synopsys_id(priv);
+
+ stmmac_get_hw_features(priv);
+
if (priv->rx_coe)
pr_info("stmmac: Rx Checksum Offload Engine supported\n");
if (priv->plat->tx_coe)
pr_info("\tTX Checksum insertion supported\n");
netdev_update_features(dev);
- /* Initialise the MMC (if present) to disable all interrupts. */
- writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
- writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
-
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
@@ -849,6 +962,9 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
+ if (priv->dma_cap.rmon)
+ stmmac_mmc_setup(priv);
+
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
priv->hw->dma->start_tx(priv->ioaddr);
@@ -928,47 +1044,6 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
-static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
- struct net_device *dev,
- int csum_insertion)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int nopaged_len = skb_headlen(skb);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
- struct dma_desc *desc = priv->dma_tx + entry;
-
- if (nopaged_len > BUF_SIZE_8KiB) {
-
- int buf2_size = nopaged_len - BUF_SIZE_8KiB;
-
- desc->des2 = dma_map_single(priv->device, skb->data,
- BUF_SIZE_8KiB, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
-
- entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
-
- desc->des2 = dma_map_single(priv->device,
- skb->data + BUF_SIZE_8KiB,
- buf2_size, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
- csum_insertion);
- priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
- } else {
- desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
- }
- return entry;
-}
-
/**
* stmmac_xmit:
* @skb : the socket buffer
@@ -983,6 +1058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int i, csum_insertion = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
+ unsigned int nopaged_len = skb_headlen(skb);
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -994,6 +1070,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ spin_lock(&priv->tx_lock);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@@ -1001,7 +1079,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("stmmac xmit:\n"
"\tskb addr %p - len: %d - nopaged_len: %d\n"
"\tn_frags: %d - ip_summed: %d - %s gso\n",
- skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+ skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
!skb_is_gso(skb) ? "isn't" : "is");
#endif
@@ -1014,14 +1092,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
"\t\tn_frags: %d, ip_summed: %d\n",
- skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+ skb->len, nopaged_len, nfrags, skb->ip_summed);
#endif
priv->tx_skbuff[entry] = skb;
- if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
- entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+
+ if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) {
+ entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion);
desc = priv->dma_tx + entry;
} else {
- unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
@@ -1029,16 +1107,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
- desc->des2 = dma_map_page(priv->device, frag->page,
- frag->page_offset,
- len, DMA_TO_DEVICE);
+ desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
@@ -1083,6 +1160,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ spin_unlock(&priv->tx_lock);
+
return NETDEV_TX_OK;
}
@@ -1111,11 +1190,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
DMA_FROM_DEVICE);
(p + entry)->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->plat->has_gmac)) {
- if (bfsize >= BUF_SIZE_8KiB)
- (p + entry)->des3 =
- (p + entry)->des2 + BUF_SIZE_8KiB;
- }
+
+ if (unlikely(priv->plat->has_gmac))
+ priv->hw->ring->refill_desc3(bfsize, p + entry);
+
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
wmb();
@@ -1284,7 +1362,7 @@ static int stmmac_config(struct net_device *dev, struct ifmap *map)
}
/**
- * stmmac_multicast_list - entry point for multicast addressing
+ * stmmac_set_rx_mode - entry point for multicast addressing
* @dev : pointer to the device structure
* Description:
* This function is a driver entry point which gets called by the kernel
@@ -1292,7 +1370,7 @@ static int stmmac_config(struct net_device *dev, struct ifmap *map)
* Return value:
* void.
*/
-static void stmmac_multicast_list(struct net_device *dev)
+static void stmmac_set_rx_mode(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1322,10 +1400,10 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->plat->has_gmac)
+ if (priv->plat->enh_desc)
max_mtu = JUMBO_LEN;
else
- max_mtu = ETH_DATA_LEN;
+ max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
@@ -1415,13 +1493,189 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+static struct dentry *stmmac_rings_status;
+static struct dentry *stmmac_dma_cap;
+
+static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+{
+ struct tmp_s {
+ u64 a;
+ unsigned int b;
+ unsigned int c;
+ };
+ int i;
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ seq_printf(seq, "=======================\n");
+ seq_printf(seq, " RX descriptor ring\n");
+ seq_printf(seq, "=======================\n");
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i);
+ seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)(x->a),
+ (unsigned int)((x->a) >> 32), x->b, x->c);
+ seq_printf(seq, "\n");
+ }
+
+ seq_printf(seq, "\n");
+ seq_printf(seq, "=======================\n");
+ seq_printf(seq, " TX descriptor ring\n");
+ seq_printf(seq, "=======================\n");
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i);
+ seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)(x->a),
+ (unsigned int)((x->a) >> 32), x->b, x->c);
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_rings_status_fops = {
+ .owner = THIS_MODULE,
+ .open = stmmac_sysfs_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!stmmac_get_hw_features(priv)) {
+ seq_printf(seq, "DMA HW features not supported\n");
+ return 0;
+ }
+
+ seq_printf(seq, "==============================\n");
+ seq_printf(seq, "\tDMA HW features\n");
+ seq_printf(seq, "==============================\n");
+
+ seq_printf(seq, "\t10/100 Mbps %s\n",
+ (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+ seq_printf(seq, "\t1000 Mbps %s\n",
+ (priv->dma_cap.mbps_1000) ? "Y" : "N");
+ seq_printf(seq, "\tHalf duple %s\n",
+ (priv->dma_cap.half_duplex) ? "Y" : "N");
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
+ (priv->dma_cap.pcs) ? "Y" : "N");
+ seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+ (priv->dma_cap.sma_mdio) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Remote wake up: %s\n",
+ (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Magic Frame: %s\n",
+ (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+ seq_printf(seq, "\tRMON module: %s\n",
+ (priv->dma_cap.rmon) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+ (priv->dma_cap.time_stamp) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
+ (priv->dma_cap.atime_stamp) ? "Y" : "N");
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
+ (priv->dma_cap.eee) ? "Y" : "N");
+ seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+ seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+ (priv->dma_cap.tx_coe) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+ priv->dma_cap.number_rx_channel);
+ seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+ priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tEnhanced descriptors: %s\n",
+ (priv->dma_cap.enh_desc) ? "Y" : "N");
+
+ return 0;
+}
+
+static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_dma_cap_fops = {
+ .owner = THIS_MODULE,
+ .open = stmmac_sysfs_dma_cap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int stmmac_init_fs(struct net_device *dev)
+{
+ /* Create debugfs entries */
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+ if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+ pr_err("ERROR %s, debugfs create directory failed\n",
+ STMMAC_RESOURCE_NAME);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report DMA RX/TX rings */
+ stmmac_rings_status = debugfs_create_file("descriptors_status",
+ S_IRUGO, stmmac_fs_dir, dev,
+ &stmmac_rings_status_fops);
+
+ if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+ pr_info("ERROR creating stmmac ring debugfs file\n");
+ debugfs_remove(stmmac_fs_dir);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report the DMA HW features */
+ stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
+ dev, &stmmac_dma_cap_fops);
+
+ if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+ pr_info("ERROR creating stmmac MMC debugfs file\n");
+ debugfs_remove(stmmac_rings_status);
+ debugfs_remove(stmmac_fs_dir);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void stmmac_exit_fs(void)
+{
+ debugfs_remove(stmmac_rings_status);
+ debugfs_remove(stmmac_dma_cap);
+ debugfs_remove(stmmac_fs_dir);
+}
+#endif /* CONFIG_STMMAC_DEBUG_FS */
+
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit,
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
.ndo_fix_features = stmmac_fix_features,
- .ndo_set_multicast_list = stmmac_multicast_list,
+ .ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
.ndo_set_config = stmmac_config,
@@ -1472,6 +1726,7 @@ static int stmmac_probe(struct net_device *dev)
"please, use ifconfig or nwhwconfig!\n");
spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->tx_lock);
ret = register_netdev(dev);
if (ret) {
@@ -1498,10 +1753,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
- if (priv->plat->has_gmac)
+ if (priv->plat->has_gmac) {
+ dev->priv_flags |= IFF_UNICAST_FLT;
device = dwmac1000_setup(priv->ioaddr);
- else
+ } else {
device = dwmac100_setup(priv->ioaddr);
+ }
if (!device)
return -ENOMEM;
@@ -1513,10 +1770,11 @@ static int stmmac_mac_device_setup(struct net_device *dev)
device->desc = &ndesc_ops;
priv->hw = device;
+ priv->hw->ring = &ring_mode_ops;
if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- enable_irq_wake(dev->irq);
+ enable_irq_wake(priv->wol_irq);
}
return 0;
@@ -1589,6 +1847,18 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
pr_info("\tPMT module supported\n");
device_set_wakeup_capable(&pdev->dev, 1);
}
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = ndev->irq;
+
platform_set_drvdata(pdev, ndev);
@@ -1627,6 +1897,13 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
if (ret < 0)
goto out_unregister;
pr_debug("registered!\n");
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(ndev);
+ if (ret < 0)
+ pr_warning("\tFailed debugFS registration");
+#endif
+
return 0;
out_unregister:
@@ -1679,6 +1956,10 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+
free_netdev(ndev);
return 0;
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9c3b9d5c341..9c3b9d5c341 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
index 2a0e1abde7e..2a0e1abde7e 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
index 6863590d184..6863590d184 100644
--- a/drivers/net/stmmac/stmmac_timer.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
new file mode 100644
index 00000000000..57bfd859967
--- /dev/null
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -0,0 +1,88 @@
+#
+# Sun network device configuration
+#
+
+config NET_VENDOR_SUN
+ bool "Sun devices"
+ default y
+ depends on SUN3 || SBUS || PCI || SUN_LDOMS
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say
+ Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Sun network interfaces. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_SUN
+
+config HAPPYMEAL
+ tristate "Sun Happy Meal 10/100baseT support"
+ depends on (SBUS || PCI)
+ select CRC32
+ ---help---
+ This driver supports the "hme" interface present on most Ultra
+ systems and as an option on older Sbus systems. This driver supports
+ both PCI and Sbus devices. This driver also supports the "qfe" quad
+ 100baseT device available in both PCI and Sbus configurations.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunhme.
+
+config SUNBMAC
+ tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
+ depends on SBUS && EXPERIMENTAL
+ select CRC32
+ ---help---
+ This driver supports the "be" interface available as an Sbus option.
+ This is Sun's older 100baseT Ethernet device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunbmac.
+
+config SUNQE
+ tristate "Sun QuadEthernet support"
+ depends on SBUS
+ select CRC32
+ ---help---
+ This driver supports the "qe" 10baseT Ethernet device, available as
+ an Sbus option. Note that this is not the same as Quad FastEthernet
+ "qfe" which is supported by the Happy Meal driver instead.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunqe.
+
+config SUNGEM
+ tristate "Sun GEM support"
+ depends on PCI
+ select CRC32
+ select SUNGEM_PHY
+ ---help---
+ Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
+ <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+
+config CASSINI
+ tristate "Sun Cassini support"
+ depends on PCI
+ select CRC32
+ ---help---
+ Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
+ <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+
+config SUNVNET
+ tristate "Sun Virtual Network support"
+ depends on SUN_LDOMS
+ ---help---
+ Support for virtual network devices under Sun Logical Domains.
+
+config NIU
+ tristate "Sun Neptune 10Gbit Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This enables support for cards based upon Sun's
+ Neptune chipset.
+
+endif # NET_VENDOR_SUN
diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile
new file mode 100644
index 00000000000..1e620ff88eb
--- /dev/null
+++ b/drivers/net/ethernet/sun/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Sun network device drivers.
+#
+
+obj-$(CONFIG_HAPPYMEAL) += sunhme.o
+obj-$(CONFIG_SUNQE) += sunqe.o
+obj-$(CONFIG_SUNBMAC) += sunbmac.o
+obj-$(CONFIG_SUNGEM) += sungem.o
+obj-$(CONFIG_CASSINI) += cassini.o
+obj-$(CONFIG_SUNVNET) += sunvnet.o
+obj-$(CONFIG_NIU) += niu.o
diff --git a/drivers/net/cassini.c b/drivers/net/ethernet/sun/cassini.c
index fdb7a175640..fd40988c19a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2048,10 +2048,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->truesize += hlen - swivel;
skb->len += hlen - swivel;
- get_page(page->buffer);
- frag->page = page->buffer;
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
frag->page_offset = off;
- frag->size = hlen - swivel;
+ skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
@@ -2072,10 +2072,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->len += hlen;
frag++;
- get_page(page->buffer);
- frag->page = page->buffer;
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
frag->page_offset = 0;
- frag->size = hlen;
+ skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -2826,12 +2826,11 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
entry = TX_DESC_NEXT(ring, entry);
for (frag = 0; frag < nr_frags; frag++) {
- skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- len = fragp->size;
- mapping = pci_map_page(cp->pdev, fragp->page,
- fragp->page_offset, len,
- PCI_DMA_TODEVICE);
+ len = skb_frag_size(fragp);
+ mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
+ DMA_TO_DEVICE);
tabort = cas_calc_tabort(cp, fragp->page_offset, len);
if (unlikely(tabort)) {
@@ -2842,7 +2841,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
- addr = cas_page_map(fragp->page);
+ addr = cas_page_map(skb_frag_page(fragp));
memcpy(tx_tiny_buf(cp, ring, entry),
addr + fragp->page_offset + len - tabort,
tabort);
@@ -4909,7 +4908,7 @@ static const struct net_device_ops cas_netdev_ops = {
.ndo_stop = cas_close,
.ndo_start_xmit = cas_start_xmit,
.ndo_get_stats = cas_get_stats,
- .ndo_set_multicast_list = cas_set_multicast,
+ .ndo_set_rx_mode = cas_set_multicast,
.ndo_do_ioctl = cas_ioctl,
.ndo_tx_timeout = cas_tx_timeout,
.ndo_change_mtu = cas_change_mtu,
diff --git a/drivers/net/cassini.h b/drivers/net/ethernet/sun/cassini.h
index b361424d5f5..b361424d5f5 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
diff --git a/drivers/net/niu.c b/drivers/net/ethernet/sun/niu.c
index ed47585a686..73c708107a3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/mii.h>
+#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
@@ -3286,20 +3287,13 @@ static u16 tcam_get_valid_entry_cnt(struct niu *np)
}
static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
- u32 offset, u32 size)
+ u32 offset, u32 size, u32 truesize)
{
- int i = skb_shinfo(skb)->nr_frags;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- frag->page = page;
- frag->page_offset = offset;
- frag->size = size;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
skb->len += size;
skb->data_len += size;
- skb->truesize += size;
-
- skb_shinfo(skb)->nr_frags = i + 1;
+ skb->truesize += truesize;
}
static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
@@ -3482,7 +3476,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
} else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
- niu_rx_skb_append(skb, page, off, append_size);
+ niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
*link = (struct page *) page->mapping;
np->ops->unmap_page(np->device, page->index,
@@ -3600,7 +3594,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
tb = &rp->tx_buffs[idx];
BUG_ON(tb->skb != NULL);
np->ops->unmap_page(np->device, tb->mapping,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
idx = NEXT_TX(rp, idx);
}
@@ -6733,10 +6727,10 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
- mapping = np->ops->map_page(np->device, frag->page,
+ len = skb_frag_size(frag);
+ mapping = np->ops->map_page(np->device, skb_frag_page(frag),
frag->page_offset, len,
DMA_TO_DEVICE);
@@ -7301,11 +7295,13 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
}
niu_unlock_parent(np, flags);
+ nfc->rule_cnt = cnt;
+
return ret;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- void *rule_locs)
+ u32 *rule_locs)
{
struct niu *np = netdev_priv(dev);
int ret = 0;
@@ -7324,7 +7320,7 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = niu_get_ethtool_tcam_entry(np, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
+ ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
break;
default:
ret = -EINVAL;
@@ -9716,7 +9712,7 @@ static const struct net_device_ops niu_netdev_ops = {
.ndo_stop = niu_close,
.ndo_start_xmit = niu_start_xmit,
.ndo_get_stats64 = niu_get_stats,
- .ndo_set_multicast_list = niu_set_rx_mode,
+ .ndo_set_rx_mode = niu_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = niu_set_mac_addr,
.ndo_do_ioctl = niu_ioctl,
@@ -9852,6 +9848,8 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
niu_set_basic_features(dev);
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
diff --git a/drivers/net/niu.h b/drivers/net/ethernet/sun/niu.h
index 51e177e1860..51e177e1860 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/ethernet/sun/niu.h
diff --git a/drivers/net/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 297a4242106..0d8cfd9ea05 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -500,13 +501,13 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
/* Reset the PHY. */
bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
- bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
bp->sw_bmcr = (BMCR_RESET);
- bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
timeout = 64;
while (--timeout) {
- bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
if ((bp->sw_bmcr & BMCR_RESET) == 0)
break;
udelay(20);
@@ -514,11 +515,11 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
if (timeout == 0)
printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
- bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
/* Now we try 10baseT. */
bp->sw_bmcr &= ~(BMCR_SPEED100);
- bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
return 0;
}
@@ -534,8 +535,8 @@ static void bigmac_timer(unsigned long data)
bp->timer_ticks++;
if (bp->timer_state == ltrywait) {
- bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
- bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
if (bp->sw_bmsr & BMSR_LSTATUS) {
printk(KERN_INFO "%s: Link is now up at %s.\n",
bp->dev->name,
@@ -588,18 +589,18 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
int timeout;
/* Grab new software copies of PHY registers. */
- bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
- bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
/* Reset the PHY. */
bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
- bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
bp->sw_bmcr = (BMCR_RESET);
- bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
timeout = 64;
while (--timeout) {
- bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
if ((bp->sw_bmcr & BMCR_RESET) == 0)
break;
udelay(20);
@@ -607,11 +608,11 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
if (timeout == 0)
printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
- bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
/* First we try 100baseT. */
bp->sw_bmcr |= BMCR_SPEED100;
- bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
bp->timer_state = ltrywait;
bp->timer_ticks = 0;
@@ -1054,7 +1055,7 @@ static u32 bigmac_get_link(struct net_device *dev)
struct bigmac *bp = netdev_priv(dev);
spin_lock_irq(&bp->lock);
- bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR);
+ bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR);
spin_unlock_irq(&bp->lock);
return (bp->sw_bmsr & BMSR_LSTATUS);
@@ -1070,7 +1071,7 @@ static const struct net_device_ops bigmac_ops = {
.ndo_stop = bigmac_close,
.ndo_start_xmit = bigmac_start_xmit,
.ndo_get_stats = bigmac_get_stats,
- .ndo_set_multicast_list = bigmac_set_multicast,
+ .ndo_set_rx_mode = bigmac_set_multicast,
.ndo_tx_timeout = bigmac_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 4943e975a73..06dd2170735 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -223,23 +223,6 @@
#define BIGMAC_PHY_EXTERNAL 0 /* External transceiver */
#define BIGMAC_PHY_INTERNAL 1 /* Internal transceiver */
-/* PHY registers */
-#define BIGMAC_BMCR 0x00 /* Basic mode control register */
-#define BIGMAC_BMSR 0x01 /* Basic mode status register */
-
-/* BMCR bits */
-#define BMCR_ISOLATE 0x0400 /* Disconnect DP83840 from MII */
-#define BMCR_PDOWN 0x0800 /* Powerdown the DP83840 */
-#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
-#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
-#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
-#define BMCR_RESET 0x8000 /* Reset the DP83840 */
-
-/* BMSR bits */
-#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
-#define BMSR_JCD 0x0002 /* Jabber detected */
-#define BMSR_LSTATUS 0x0004 /* Link status */
-
/* Ring descriptors and such, same as Quad Ethernet. */
struct be_rxd {
u32 rx_flags;
diff --git a/drivers/net/sungem.c b/drivers/net/ethernet/sun/sungem.c
index ade35dde5b5..ceab215bb4a 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -59,7 +59,7 @@
#include <asm/pmac_feature.h>
#endif
-#include "sungem_phy.h"
+#include <linux/sungem_phy.h>
#include "sungem.h"
/* Stripping FCS is causing problems, disabled for now */
@@ -1065,16 +1065,14 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
u64 this_ctrl;
- len = this_frag->size;
- mapping = pci_map_page(gp->pdev,
- this_frag->page,
- this_frag->page_offset,
- len, PCI_DMA_TODEVICE);
+ len = skb_frag_size(this_frag);
+ mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
+ 0, len, DMA_TO_DEVICE);
this_ctrl = ctrl;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_ctrl |= TXDCTRL_EOF;
@@ -1721,7 +1719,7 @@ static void gem_init_phy(struct gem *gp)
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
/* Reset and detect MII PHY */
- mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
+ sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
/* Init PHY */
if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
@@ -2820,7 +2818,7 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_stop = gem_close,
.ndo_start_xmit = gem_start_xmit,
.ndo_get_stats = gem_get_stats,
- .ndo_set_multicast_list = gem_set_multicast,
+ .ndo_set_rx_mode = gem_set_multicast,
.ndo_do_ioctl = gem_ioctl,
.ndo_tx_timeout = gem_tx_timeout,
.ndo_change_mtu = gem_change_mtu,
diff --git a/drivers/net/sungem.h b/drivers/net/ethernet/sun/sungem.h
index 835ce1b3cb9..835ce1b3cb9 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/ethernet/sun/sungem.h
diff --git a/drivers/net/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 856e05b9fba..c517dac02ae 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2305,13 +2305,12 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len, mapping, this_txflags;
- len = this_frag->size;
- mapping = dma_map_page(hp->dma_dev, this_frag->page,
- this_frag->page_offset, len,
- DMA_TO_DEVICE);
+ len = skb_frag_size(this_frag);
+ mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
+ 0, len, DMA_TO_DEVICE);
this_txflags = tx_flags;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_txflags |= TXFLAG_EOP;
@@ -2619,7 +2618,7 @@ static const struct net_device_ops hme_netdev_ops = {
.ndo_start_xmit = happy_meal_start_xmit,
.ndo_tx_timeout = happy_meal_tx_timeout,
.ndo_get_stats = happy_meal_get_stats,
- .ndo_set_multicast_list = happy_meal_set_multicast,
+ .ndo_set_rx_mode = happy_meal_set_multicast,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 64f278360d8..64f278360d8 100644
--- a/drivers/net/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
diff --git a/drivers/net/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 209c7f8df00..b28f74367eb 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -824,7 +824,7 @@ static const struct net_device_ops qec_ops = {
.ndo_open = qe_open,
.ndo_stop = qe_close,
.ndo_start_xmit = qe_start_xmit,
- .ndo_set_multicast_list = qe_set_multicast,
+ .ndo_set_rx_mode = qe_set_multicast,
.ndo_tx_timeout = qe_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 581781b6b2f..581781b6b2f 100644
--- a/drivers/net/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
diff --git a/drivers/net/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index bf3c762de62..8c6c059f348 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1012,7 +1012,7 @@ static DEFINE_MUTEX(vnet_list_mutex);
static const struct net_device_ops vnet_ops = {
.ndo_open = vnet_open,
.ndo_stop = vnet_close,
- .ndo_set_multicast_list = vnet_set_rx_mode,
+ .ndo_set_rx_mode = vnet_set_rx_mode,
.ndo_set_mac_address = vnet_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = vnet_tx_timeout,
diff --git a/drivers/net/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index d347a5bf24b..d347a5bf24b 100644
--- a/drivers/net/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
new file mode 100644
index 00000000000..1fc027eda33
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -0,0 +1,27 @@
+#
+# Tehuti network device configuration
+#
+
+config NET_VENDOR_TEHUTI
+ bool "Tehuti devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Tehuti cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TEHUTI
+
+config TEHUTI
+ tristate "Tehuti Networks 10G Ethernet"
+ depends on PCI
+ ---help---
+ Tehuti Networks 10G Ethernet NIC
+
+endif # NET_VENDOR_TEHUTI
diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile
new file mode 100644
index 00000000000..f995421ddbc
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Tehuti network device drivers.
+#
+
+obj-$(CONFIG_TEHUTI) += tehuti.o
diff --git a/drivers/net/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 749bbf18dc6..3a90af6d111 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -67,10 +67,10 @@
#include "tehuti.h"
static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
- {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0}
+ { PCI_VDEVICE(TEHUTI, 0x3009), },
+ { PCI_VDEVICE(TEHUTI, 0x3010), },
+ { PCI_VDEVICE(TEHUTI, 0x3014), },
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
@@ -1493,13 +1493,13 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
bdx_tx_db_inc_wptr(db);
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[i];
- db->wptr->len = frag->size;
- db->wptr->addr.dma =
- pci_map_page(priv->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ db->wptr->len = skb_frag_size(frag);
+ db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
pbl++;
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
@@ -1860,7 +1860,7 @@ static const struct net_device_ops bdx_netdev_ops = {
.ndo_start_xmit = bdx_tx_transmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bdx_ioctl,
- .ndo_set_multicast_list = bdx_setmulti,
+ .ndo_set_rx_mode = bdx_setmulti,
.ndo_change_mtu = bdx_change_mtu,
.ndo_set_mac_address = bdx_set_mac,
.ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
diff --git a/drivers/net/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 709ebd6e28b..709ebd6e28b 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
new file mode 100644
index 00000000000..de76c70ec8f
--- /dev/null
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -0,0 +1,77 @@
+#
+# TI device configuration
+#
+
+config NET_VENDOR_TI
+ bool "Texas Instruments (TI) devices"
+ default y
+ depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3))
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about TI devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TI
+
+config TI_DAVINCI_EMAC
+ tristate "TI DaVinci EMAC Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select TI_DAVINCI_MDIO
+ select TI_DAVINCI_CPDMA
+ select PHYLIB
+ ---help---
+ This driver supports TI's DaVinci Ethernet .
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_emac_driver. This is recommended.
+
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select PHYLIB
+ ---help---
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_DAVINCI_CPDMA
+ tristate "TI DaVinci CPDMA Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ ---help---
+ This driver supports TI's DaVinci CPDMA dma engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
+config TLAN
+ tristate "TI ThunderLAN support"
+ depends on (PCI || EISA)
+ ---help---
+ If you have a PCI Ethernet network card based on the ThunderLAN chip
+ which is supported by this driver, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Devices currently supported by this driver are Compaq Netelligent,
+ Compaq NetFlex and Olicom cards. Please read the file
+ <file:Documentation/networking/tlan.txt> for more details.
+
+ To compile this driver as a module, choose M here. The module
+ will be called tlan.
+
+ Please email feedback to <torben.mathiasen@compaq.com>.
+
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && AR7
+ select PHYLIB
+ ---help---
+ TI AR7 CPMAC Ethernet support
+
+endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
new file mode 100644
index 00000000000..aedb3af74e5
--- /dev/null
+++ b/drivers/net/ethernet/ti/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the TI network device drivers.
+#
+
+obj-$(CONFIG_TLAN) += tlan.o
+obj-$(CONFIG_CPMAC) += cpmac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
diff --git a/drivers/net/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index e0638cb4b07..aaac0c7ad11 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1100,7 +1100,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_stop = cpmac_stop,
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
- .ndo_set_multicast_list = cpmac_set_multicast_list,
+ .ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = cpmac_ioctl,
.ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d3369cd..dca9d3369cd 100644
--- a/drivers/net/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index afa19a0c0d8..afa19a0c0d8 100644
--- a/drivers/net/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
diff --git a/drivers/net/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 3f451e4d836..815c7970261 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1741,7 +1741,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_dev_open,
.ndo_stop = emac_dev_stop,
.ndo_start_xmit = emac_dev_xmit,
- .ndo_set_multicast_list = emac_dev_mcast_set,
+ .ndo_set_rx_mode = emac_dev_mcast_set,
.ndo_set_mac_address = emac_dev_setmac_addr,
.ndo_do_ioctl = emac_devioctl,
.ndo_tx_timeout = emac_dev_tx_timeout,
diff --git a/drivers/net/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 7615040df75..7615040df75 100644
--- a/drivers/net/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
diff --git a/drivers/net/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 145871b3130..9c0dd6b8d6c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -774,7 +774,7 @@ static const struct net_device_ops tlan_netdev_ops = {
.ndo_start_xmit = tlan_start_tx,
.ndo_tx_timeout = tlan_tx_timeout,
.ndo_get_stats = tlan_get_stats,
- .ndo_set_multicast_list = tlan_set_multicast_list,
+ .ndo_set_rx_mode = tlan_set_multicast_list,
.ndo_do_ioctl = tlan_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/tlan.h b/drivers/net/ethernet/ti/tlan.h
index 5fc98a8e488..5fc98a8e488 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/ethernet/ti/tlan.h
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
new file mode 100644
index 00000000000..2d9218f86bc
--- /dev/null
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -0,0 +1,15 @@
+#
+# Tilera network device configuration
+#
+
+config TILE_NET
+ tristate "Tilera GBE/XGBE network driver support"
+ depends on TILE
+ default y
+ select CRC32
+ ---help---
+ This is a standard Linux network device driver for the
+ on-chip Tilera Gigabit Ethernet and XAUI interfaces.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tile_net.
diff --git a/drivers/net/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f142cab..f634f142cab 100644
--- a/drivers/net/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 1e2af96fc29..10826d8a2a2 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -177,7 +177,7 @@ struct tile_net_cpu {
struct tile_net_stats_t stats;
/* True iff NAPI is enabled. */
bool napi_enabled;
- /* True if this tile has succcessfully registered with the IPP. */
+ /* True if this tile has successfully registered with the IPP. */
bool registered;
/* True if the link was down last time we tried to register. */
bool link_down;
@@ -1713,7 +1713,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
frags[n].cpa_lo = cpa;
frags[n].cpa_hi = cpa >> 32;
- frags[n].length = f->size;
+ frags[n].length = skb_frag_size(f);
frags[n].hash_for_home = hash_for_home;
n++;
}
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
new file mode 100644
index 00000000000..05176470455
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -0,0 +1,57 @@
+#
+# Toshiba network device configuration
+#
+
+config NET_VENDOR_TOSHIBA
+ bool "Toshiba devices"
+ default y
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Toshiba cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_TOSHIBA
+
+config GELIC_NET
+ tristate "PS3 Gigabit Ethernet driver"
+ depends on PPC_PS3
+ select PS3_SYS_MANAGER
+ ---help---
+ This driver supports the network device on the PS3 game
+ console. This driver has built-in support for Ethernet.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ps3_gelic.
+
+config GELIC_WIRELESS
+ bool "PS3 Wireless support"
+ depends on GELIC_NET && WLAN
+ select WIRELESS_EXT
+ ---help---
+ This option adds the support for the wireless feature of PS3.
+ If you have the wireless-less model of PS3 or have no plan to
+ use wireless feature, disabling this option saves memory. As
+ the driver automatically distinguishes the models, you can
+ safely enable this option even if you have a wireless-less model.
+
+config SPIDER_NET
+ tristate "Spider Gigabit Ethernet driver"
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
+ select FW_LOADER
+ select SUNGEM_PHY
+ ---help---
+ This driver supports the Gigabit Ethernet chips present on the
+ Cell Processor-Based Blades from IBM.
+
+config TC35815
+ tristate "TOSHIBA TC35815 Ethernet support"
+ depends on PCI && MIPS
+ select PHYLIB
+
+endif # NET_VENDOR_TOSHIBA
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
new file mode 100644
index 00000000000..a5069008435
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Toshiba network device drivers.
+#
+
+obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
+gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
+ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
+spidernet-y += spider_net.o spider_net_ethtool.o
+obj-$(CONFIG_SPIDER_NET) += spidernet.o
+obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d82a82d9870..ddb33cfd354 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1452,7 +1452,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
.ndo_open = gelic_net_open,
.ndo_stop = gelic_net_stop,
.ndo_start_xmit = gelic_net_xmit,
- .ndo_set_multicast_list = gelic_net_set_multi,
+ .ndo_set_rx_mode = gelic_net_set_multi,
.ndo_change_mtu = gelic_net_change_mtu,
.ndo_tx_timeout = gelic_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index d3fadfbc3bc..d3fadfbc3bc 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 2e62938c0f8..fd4ed7f8cfa 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -2568,7 +2568,7 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
.ndo_open = gelic_wl_open,
.ndo_stop = gelic_wl_stop,
.ndo_start_xmit = gelic_net_xmit,
- .ndo_set_multicast_list = gelic_net_set_multi,
+ .ndo_set_rx_mode = gelic_net_set_multi,
.ndo_change_mtu = gelic_net_change_mtu,
.ndo_tx_timeout = gelic_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index f7e51b7d704..f7e51b7d704 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
diff --git a/drivers/net/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 1ff3491c824..6199f6b387b 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -196,7 +196,7 @@ spider_net_setup_aneg(struct spider_net_card *card)
if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
advertise |= SUPPORTED_1000baseT_Half;
- mii_phy_probe(phy, phy->mii_id);
+ sungem_phy_probe(phy, phy->mii_id);
phy->def->ops->setup_aneg(phy, advertise);
}
@@ -2120,7 +2120,7 @@ spider_net_setup_phy(struct spider_net_card *card)
unsigned short id;
id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
if (id != 0x0000 && id != 0xffff) {
- if (!mii_phy_probe(phy, phy->mii_id)) {
+ if (!sungem_phy_probe(phy, phy->mii_id)) {
pr_info("Found %s.\n", phy->def->name);
break;
}
@@ -2259,7 +2259,7 @@ static const struct net_device_ops spider_net_ops = {
.ndo_open = spider_net_open,
.ndo_stop = spider_net_stop,
.ndo_start_xmit = spider_net_xmit,
- .ndo_set_multicast_list = spider_net_set_multi,
+ .ndo_set_rx_mode = spider_net_set_multi,
.ndo_set_mac_address = spider_net_set_mac,
.ndo_change_mtu = spider_net_change_mtu,
.ndo_do_ioctl = spider_net_do_ioctl,
diff --git a/drivers/net/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 020f64a8fcf..4ba2135474d 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -27,7 +27,7 @@
#define VERSION "2.0 B"
-#include "sungem_phy.h"
+#include <linux/sungem_phy.h>
extern int spider_net_stop(struct net_device *netdev);
extern int spider_net_open(struct net_device *netdev);
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 9c288cd7d17..9c288cd7d17 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
diff --git a/drivers/net/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 4a55a162dfe..71b785cd756 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -774,7 +774,7 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_stop = tc35815_close,
.ndo_start_xmit = tc35815_send_packet,
.ndo_get_stats = tc35815_get_stats,
- .ndo_set_multicast_list = tc35815_set_multicast_list,
+ .ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
.ndo_do_ioctl = tc35815_ioctl,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/tundra/Kconfig b/drivers/net/ethernet/tundra/Kconfig
new file mode 100644
index 00000000000..cf7d69b62c4
--- /dev/null
+++ b/drivers/net/ethernet/tundra/Kconfig
@@ -0,0 +1,29 @@
+#
+# Tundra network device configuration
+#
+
+config NET_VENDOR_TUNDRA
+ bool "Tundra devices"
+ default y
+ depends on TSI108_BRIDGE
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Tundra cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TUNDRA
+
+config TSI108_ETH
+ tristate "Tundra TSI108 gigabit Ethernet support"
+ depends on TSI108_BRIDGE
+ ---help---
+ This driver supports Tundra TSI108 gigabit Ethernet ports.
+ To compile this driver as a module, choose M here: the module
+ will be called tsi108_eth.
+
+endif # NET_VENDOR_TUNDRA
diff --git a/drivers/net/ethernet/tundra/Makefile b/drivers/net/ethernet/tundra/Makefile
new file mode 100644
index 00000000000..439f6930235
--- /dev/null
+++ b/drivers/net/ethernet/tundra/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Tundra network device drivers.
+#
+
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 64cb9ac19ed..a8df7eca095 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -709,12 +709,13 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- data->txring[tx].buf0 =
- dma_map_page(NULL, frag->page, frag->page_offset,
- frag->size, DMA_TO_DEVICE);
- data->txring[tx].len = frag->size;
+ data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag,
+ 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ data->txring[tx].len = skb_frag_size(frag);
}
if (i == frags - 1)
@@ -1554,7 +1555,7 @@ static const struct net_device_ops tsi108_netdev_ops = {
.ndo_open = tsi108_open,
.ndo_stop = tsi108_close,
.ndo_start_xmit = tsi108_send_packet,
- .ndo_set_multicast_list = tsi108_set_rx_mode,
+ .ndo_set_rx_mode = tsi108_set_rx_mode,
.ndo_get_stats = tsi108_get_stats,
.ndo_do_ioctl = tsi108_do_ioctl,
.ndo_set_mac_address = tsi108_set_mac,
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
index 5fee7d78dc6..5fee7d78dc6 100644
--- a/drivers/net/tsi108_eth.h
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
new file mode 100644
index 00000000000..68a9ba66feb
--- /dev/null
+++ b/drivers/net/ethernet/via/Kconfig
@@ -0,0 +1,59 @@
+#
+# VIA device configuration
+#
+
+config NET_VENDOR_VIA
+ bool "VIA devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about VIA devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_VIA
+
+config VIA_RHINE
+ tristate "VIA Rhine support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
+ Rhine-II (VT6102), or Rhine-III (VT6105)), say Y here. Rhine-type
+ Ethernet functions can also be found integrated on South Bridges
+ (e.g. VT8235).
+
+ To compile this driver as a module, choose M here. The module
+ will be called via-rhine.
+
+config VIA_RHINE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on VIA_RHINE
+ ---help---
+ This instructs the driver to use PCI shared memory (MMIO) instead of
+ programmed I/O ports (PIO). Enabling this gives an improvement in
+ processing time in parts of the driver.
+
+ If unsure, say Y.
+
+config VIA_VELOCITY
+ tristate "VIA Velocity support"
+ depends on PCI
+ select CRC32
+ select CRC_CCITT
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a VIA "Velocity" based network card say Y here.
+
+ To compile this driver as a module, choose M here. The module
+ will be called via-velocity.
+
+endif # NET_VENDOR_VIA
diff --git a/drivers/net/ethernet/via/Makefile b/drivers/net/ethernet/via/Makefile
new file mode 100644
index 00000000000..46c5d4a3d8f
--- /dev/null
+++ b/drivers/net/ethernet/via/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the VIA device drivers.
+#
+
+obj-$(CONFIG_VIA_RHINE) += via-rhine.o
+obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
diff --git a/drivers/net/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 7f23ab913fd..f34dd99fe57 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -697,7 +697,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_stop = rhine_close,
.ndo_start_xmit = rhine_start_tx,
.ndo_get_stats = rhine_get_stats,
- .ndo_set_multicast_list = rhine_set_rx_mode,
+ .ndo_set_rx_mode = rhine_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 7c5336c5c37..4535d7cc848 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -42,7 +42,6 @@
*
*/
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitops.h>
@@ -112,7 +111,6 @@ static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
}
-
/**
* mac_set_cam_mask - Set a CAM mask
* @regs: register block for this velocity
@@ -696,7 +694,6 @@ static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
return 0;
}
-
/**
* mii_check_media_mode - check media state
* @regs: velocity registers
@@ -862,8 +859,6 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
return status;
}
-
-
/**
* velocity_set_media_mode - set media mode
* @mii_status: old MII link state
@@ -1258,6 +1253,7 @@ static void setup_queue_timers(struct velocity_info *vptr)
writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
}
}
+
/**
* setup_adaptive_interrupts - Setup interrupt suppression
*
@@ -1597,8 +1593,6 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
vptr->rx.info = NULL;
}
-
-
/**
* velocity_init_rd_ring - set up receive ring
* @vptr: velocity to configure
@@ -1672,7 +1666,6 @@ static void velocity_free_dma_rings(struct velocity_info *vptr)
pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
}
-
static int velocity_init_rings(struct velocity_info *vptr, int mtu)
{
int ret;
@@ -1735,7 +1728,6 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
tdinfo->skb = NULL;
}
-
/*
* FIXME: could we merge this with velocity_free_tx_buf ?
*/
@@ -1783,7 +1775,6 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
}
}
-
static void velocity_free_rings(struct velocity_info *vptr)
{
velocity_free_td_ring(vptr);
@@ -2021,7 +2012,6 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
}
}
-
/**
* velocity_receive_frame - received packet processor
* @vptr: velocity we are handling
@@ -2088,11 +2078,11 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
netif_rx(skb);
stats->rx_bytes += pkt_len;
+ stats->rx_packets++;
return 0;
}
-
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
@@ -2400,7 +2390,6 @@ static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd
return 0;
}
-
/**
* velocity_ioctl - ioctl entry point
* @dev: network device
@@ -2565,15 +2554,16 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/* Handle fragments */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
+ frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
}
tdinfo->nskb_dma = i + 1;
@@ -2615,15 +2605,14 @@ out:
return NETDEV_TX_OK;
}
-
static const struct net_device_ops velocity_netdev_ops = {
.ndo_open = velocity_open,
.ndo_stop = velocity_close,
.ndo_start_xmit = velocity_xmit,
.ndo_get_stats = velocity_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_set_multicast_list = velocity_set_multi,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = velocity_set_multi,
.ndo_change_mtu = velocity_change_mtu,
.ndo_do_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
@@ -2713,7 +2702,6 @@ static u32 velocity_get_link(struct net_device *dev)
return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
}
-
/**
* velocity_found1 - set up discovered velocity card
* @pdev: PCI device
@@ -2860,7 +2848,6 @@ err_free_dev:
goto out;
}
-
#ifdef CONFIG_PM
/**
* wol_calc_crc - WOL CRC
@@ -3031,7 +3018,7 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
spin_lock_irqsave(&vptr->lock, flags);
pci_save_state(pdev);
-#ifdef ETHTOOL_GWOL
+
if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
velocity_get_ip(vptr);
velocity_save_context(vptr, &vptr->context);
@@ -3045,9 +3032,7 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
}
-#else
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-#endif
+
spin_unlock_irqrestore(&vptr->lock, flags);
return 0;
}
@@ -3128,13 +3113,13 @@ static int velocity_resume(struct pci_dev *pdev)
* uses this to handle all our card discover and plugging
*/
static struct pci_driver velocity_driver = {
- .name = VELOCITY_NAME,
- .id_table = velocity_id_table,
- .probe = velocity_found1,
- .remove = __devexit_p(velocity_remove1),
+ .name = VELOCITY_NAME,
+ .id_table = velocity_id_table,
+ .probe = velocity_found1,
+ .remove = __devexit_p(velocity_remove1),
#ifdef CONFIG_PM
- .suspend = velocity_suspend,
- .resume = velocity_resume,
+ .suspend = velocity_suspend,
+ .resume = velocity_resume,
#endif
};
@@ -3444,26 +3429,99 @@ static int velocity_set_coalesce(struct net_device *dev,
return 0;
}
+static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
+ "rx_all",
+ "rx_ok",
+ "tx_ok",
+ "rx_error",
+ "rx_runt_ok",
+ "rx_runt_err",
+ "rx_64",
+ "tx_64",
+ "rx_65_to_127",
+ "tx_65_to_127",
+ "rx_128_to_255",
+ "tx_128_to_255",
+ "rx_256_to_511",
+ "tx_256_to_511",
+ "rx_512_to_1023",
+ "tx_512_to_1023",
+ "rx_1024_to_1518",
+ "tx_1024_to_1518",
+ "tx_ether_collisions",
+ "rx_crc_errors",
+ "rx_jumbo",
+ "tx_jumbo",
+ "rx_mac_control_frames",
+ "tx_mac_control_frames",
+ "rx_frame_alignement_errors",
+ "rx_long_ok",
+ "rx_long_err",
+ "tx_sqe_errors",
+ "rx_no_buf",
+ "rx_symbol_errors",
+ "in_range_length_errors",
+ "late_collisions"
+};
+
+static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
+ break;
+ }
+}
+
+static int velocity_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(velocity_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void velocity_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ if (netif_running(dev)) {
+ struct velocity_info *vptr = netdev_priv(dev);
+ u32 *p = vptr->mib_counter;
+ int i;
+
+ spin_lock_irq(&vptr->lock);
+ velocity_update_hw_mibs(vptr);
+ spin_unlock_irq(&vptr->lock);
+
+ for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
+ *data++ = *p++;
+ }
+}
+
static const struct ethtool_ops velocity_ethtool_ops = {
- .get_settings = velocity_get_settings,
- .set_settings = velocity_set_settings,
- .get_drvinfo = velocity_get_drvinfo,
- .get_wol = velocity_ethtool_get_wol,
- .set_wol = velocity_ethtool_set_wol,
- .get_msglevel = velocity_get_msglevel,
- .set_msglevel = velocity_set_msglevel,
- .get_link = velocity_get_link,
- .get_coalesce = velocity_get_coalesce,
- .set_coalesce = velocity_set_coalesce,
- .begin = velocity_ethtool_up,
- .complete = velocity_ethtool_down
+ .get_settings = velocity_get_settings,
+ .set_settings = velocity_set_settings,
+ .get_drvinfo = velocity_get_drvinfo,
+ .get_wol = velocity_ethtool_get_wol,
+ .set_wol = velocity_ethtool_set_wol,
+ .get_msglevel = velocity_get_msglevel,
+ .set_msglevel = velocity_set_msglevel,
+ .get_link = velocity_get_link,
+ .get_strings = velocity_get_strings,
+ .get_sset_count = velocity_get_sset_count,
+ .get_ethtool_stats = velocity_get_ethtool_stats,
+ .get_coalesce = velocity_get_coalesce,
+ .set_coalesce = velocity_set_coalesce,
+ .begin = velocity_ethtool_up,
+ .complete = velocity_ethtool_down
};
-#ifdef CONFIG_PM
-#ifdef CONFIG_INET
+#if defined(CONFIG_PM) && defined(CONFIG_INET)
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
{
- struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct in_ifaddr *ifa = ptr;
struct net_device *dev = ifa->ifa_dev->dev;
if (dev_net(dev) == &init_net &&
@@ -3472,12 +3530,9 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
return NOTIFY_DONE;
}
-#endif /* CONFIG_INET */
-#endif /* CONFIG_PM */
-#if defined(CONFIG_PM) && defined(CONFIG_INET)
static struct notifier_block velocity_inetaddr_notifier = {
- .notifier_call = velocity_netdev_event,
+ .notifier_call = velocity_netdev_event,
};
static void velocity_register_notifier(void)
diff --git a/drivers/net/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index 4cb9f13485e..4cb9f13485e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
new file mode 100644
index 00000000000..d5a826063a8
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -0,0 +1,36 @@
+#
+# Xilink device configuration
+#
+
+config NET_VENDOR_XILINX
+ bool "Xilinx devices"
+ default y
+ depends on PPC || PPC32 || MICROBLAZE
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Xilinx devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_XILINX
+
+config XILINX_EMACLITE
+ tristate "Xilinx 10/100 Ethernet Lite support"
+ depends on (PPC32 || MICROBLAZE)
+ select PHYLIB
+ ---help---
+ This driver supports the 10/100 Ethernet Lite from Xilinx.
+
+config XILINX_LL_TEMAC
+ tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on (PPC || MICROBLAZE)
+ select PHYLIB
+ ---help---
+ This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+ core used in Xilinx Spartan and Virtex FPGAs
+
+endif # NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
new file mode 100644
index 00000000000..5feac734ea4
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Xilink network device drivers.
+#
+
+ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
+obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
+obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
diff --git a/drivers/net/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 522abe2ff25..522abe2ff25 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 728fe414147..4d1658e78de 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -715,10 +715,9 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->phys = dma_map_single(ndev->dev.parent,
- (void *)page_address(frag->page) +
- frag->page_offset,
- frag->size, DMA_TO_DEVICE);
- cur_p->len = frag->size;
+ skb_frag_address(frag),
+ frag_size(frag), DMA_TO_DEVICE);
+ cur_p->len = frag_size(frag);
cur_p->app0 = 0;
frag++;
}
@@ -922,7 +921,6 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- //.ndo_set_multicast_list = temac_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -957,6 +955,32 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
+/* ethtool support */
+static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return phy_ethtool_gset(lp->phy_dev, cmd);
+}
+
+static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return phy_ethtool_sset(lp->phy_dev, cmd);
+}
+
+static int temac_nway_reset(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return phy_start_aneg(lp->phy_dev);
+}
+
+static const struct ethtool_ops temac_ethtool_ops = {
+ .get_settings = temac_get_settings,
+ .set_settings = temac_set_settings,
+ .nway_reset = temac_nway_reset,
+ .get_link = ethtool_op_get_link,
+};
+
static int __devinit temac_of_probe(struct platform_device *op)
{
struct device_node *np;
@@ -978,6 +1002,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
ndev->netdev_ops = &temac_netdev_ops;
+ ndev->ethtool_ops = &temac_ethtool_ops;
#if 0
ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 8cf9d4f56bb..8cf9d4f56bb 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 8018d7d045b..8018d7d045b 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig
new file mode 100644
index 00000000000..69f56a6de82
--- /dev/null
+++ b/drivers/net/ethernet/xircom/Kconfig
@@ -0,0 +1,31 @@
+#
+# Xircom network device configuration
+#
+
+config NET_VENDOR_XIRCOM
+ bool "Xircom devices"
+ default y
+ depends on PCMCIA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Xircom cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_XIRCOM
+
+config PCMCIA_XIRC2PS
+ tristate "Xircom 16-bit PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card)
+ Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called xirc2ps_cs. If unsure, say N.
+
+endif # NET_VENDOR_XIRCOM
diff --git a/drivers/net/ethernet/xircom/Makefile b/drivers/net/ethernet/xircom/Makefile
new file mode 100644
index 00000000000..3b7aebd8b84
--- /dev/null
+++ b/drivers/net/ethernet/xircom/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Xircom network device drivers.
+#
+
+obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index e33b190d716..bbe8b7dbf3f 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -467,7 +467,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = xirc_tx_timeout,
.ndo_set_config = do_config,
.ndo_do_ioctl = do_ioctl,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
new file mode 100644
index 00000000000..cf67352cea1
--- /dev/null
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -0,0 +1,32 @@
+#
+# Intel XScale IXP device configuration
+#
+
+config NET_VENDOR_XSCALE
+ bool "Intel XScale IXP devices"
+ default y
+ depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \
+ IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question does not directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about XSacle IXP devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_XSCALE
+
+config IXP4XX_ETH
+ tristate "Intel IXP4xx Ethernet support"
+ depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ select PHYLIB
+ ---help---
+ Say Y here if you want to use built-in Ethernet ports
+ on IXP4xx processor.
+
+source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
+
+endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
new file mode 100644
index 00000000000..b195b9d7fe8
--- /dev/null
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Intel XScale IXP device drivers.
+#
+
+obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
+obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
new file mode 100644
index 00000000000..58dbc5b876b
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/Kconfig
@@ -0,0 +1,6 @@
+config ENP2611_MSF_NET
+ tristate "Radisys ENP2611 MSF network interface support"
+ depends on ARCH_ENP2611
+ ---help---
+ This is a driver for the MSF network interface unit in
+ the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
index fd38351ceaa..fd38351ceaa 100644
--- a/drivers/net/ixp2000/Makefile
+++ b/drivers/net/ethernet/xscale/ixp2000/Makefile
diff --git a/drivers/net/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
index 7dea5b95012..7dea5b95012 100644
--- a/drivers/net/ixp2000/caleb.c
+++ b/drivers/net/ethernet/xscale/ixp2000/caleb.c
diff --git a/drivers/net/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
index e93a1ef5b8a..e93a1ef5b8a 100644
--- a/drivers/net/ixp2000/caleb.h
+++ b/drivers/net/ethernet/xscale/ixp2000/caleb.h
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
index 34a6cfd1793..34a6cfd1793 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
diff --git a/drivers/net/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
index f5ffd7e05d2..f5ffd7e05d2 100644
--- a/drivers/net/ixp2000/ixp2400-msf.c
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
diff --git a/drivers/net/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
index 3ac1af2771d..3ac1af2771d 100644
--- a/drivers/net/ixp2000/ixp2400-msf.h
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
diff --git a/drivers/net/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
index 42a73e357af..42a73e357af 100644
--- a/drivers/net/ixp2000/ixp2400_rx.uc
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
diff --git a/drivers/net/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
index e8aee2f81aa..e8aee2f81aa 100644
--- a/drivers/net/ixp2000/ixp2400_rx.ucode
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
diff --git a/drivers/net/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
index d090d1884fb..d090d1884fb 100644
--- a/drivers/net/ixp2000/ixp2400_tx.uc
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
diff --git a/drivers/net/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
index a433e24b0a5..a433e24b0a5 100644
--- a/drivers/net/ixp2000/ixp2400_tx.ucode
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
index e122493ab70..e122493ab70 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
diff --git a/drivers/net/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
index 391ece62324..391ece62324 100644
--- a/drivers/net/ixp2000/ixpdev.h
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
diff --git a/drivers/net/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
index 86aa08ea0c3..86aa08ea0c3 100644
--- a/drivers/net/ixp2000/ixpdev_priv.h
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
index e08d3f9863b..e08d3f9863b 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
index cc4183dca91..cc4183dca91 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index de51e8453c1..ec96d910e9a 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1339,7 +1339,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_open = eth_open,
.ndo_stop = eth_close,
.ndo_start_xmit = eth_xmit,
- .ndo_set_multicast_list = eth_set_mcast_list,
+ .ndo_set_rx_mode = eth_set_mcast_list,
.ndo_do_ioctl = eth_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
new file mode 100644
index 00000000000..3a424c864f4
--- /dev/null
+++ b/drivers/net/fddi/Kconfig
@@ -0,0 +1,77 @@
+#
+# FDDI network device configuration
+#
+
+config FDDI
+ tristate "FDDI driver support"
+ depends on PCI || EISA || TC
+ ---help---
+ Fiber Distributed Data Interface is a high speed local area network
+ design; essentially a replacement for high speed Ethernet. FDDI can
+ run over copper or fiber. If you are connected to such a network and
+ want a driver for the FDDI card in your computer, say Y here (and
+ then also Y to the driver for your FDDI card, below). Most people
+ will say N.
+
+if FDDI
+
+config DEFXX
+ tristate "Digital DEFTA/DEFEA/DEFPA adapter support"
+ depends on FDDI && (PCI || EISA || TC)
+ ---help---
+ This is support for the DIGITAL series of TURBOchannel (DEFTA),
+ EISA (DEFEA) and PCI (DEFPA) controllers which can connect you
+ to a local FDDI network.
+
+ To compile this driver as a module, choose M here: the module
+ will be called defxx. If unsure, say N.
+
+config DEFXX_MMIO
+ bool
+ prompt "Use MMIO instead of PIO" if PCI || EISA
+ depends on DEFXX
+ default n if PCI || EISA
+ default y
+ ---help---
+ This instructs the driver to use EISA or PCI memory-mapped I/O
+ (MMIO) as appropriate instead of programmed I/O ports (PIO).
+ Enabling this gives an improvement in processing time in parts
+ of the driver, but it may cause problems with EISA (DEFEA)
+ adapters. TURBOchannel does not have the concept of I/O ports,
+ so MMIO is always used for these (DEFTA) adapters.
+
+ If unsure, say N.
+
+config SKFP
+ tristate "SysKonnect FDDI PCI support"
+ depends on FDDI && PCI
+ select BITREVERSE
+ ---help---
+ Say Y here if you have a SysKonnect FDDI PCI adapter.
+ The following adapters are supported by this driver:
+ - SK-5521 (SK-NET FDDI-UP)
+ - SK-5522 (SK-NET FDDI-UP DAS)
+ - SK-5541 (SK-NET FDDI-FP)
+ - SK-5543 (SK-NET FDDI-LP)
+ - SK-5544 (SK-NET FDDI-LP DAS)
+ - SK-5821 (SK-NET FDDI-UP64)
+ - SK-5822 (SK-NET FDDI-UP64 DAS)
+ - SK-5841 (SK-NET FDDI-FP64)
+ - SK-5843 (SK-NET FDDI-LP64)
+ - SK-5844 (SK-NET FDDI-LP64 DAS)
+ - Netelligent 100 FDDI DAS Fibre SC
+ - Netelligent 100 FDDI SAS Fibre SC
+ - Netelligent 100 FDDI DAS UTP
+ - Netelligent 100 FDDI SAS UTP
+ - Netelligent 100 FDDI SAS Fibre MIC
+
+ Read <file:Documentation/networking/skfp.txt> for information about
+ the driver.
+
+ Questions concerning this driver can be addressed to:
+ <linux@syskonnect.de>
+
+ To compile this driver as a module, choose M here: the module
+ will be called skfp. This is recommended.
+
+endif # FDDI
diff --git a/drivers/net/fddi/Makefile b/drivers/net/fddi/Makefile
new file mode 100644
index 00000000000..36da19c9a8a
--- /dev/null
+++ b/drivers/net/fddi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Linux FDDI network device drivers.
+#
+
+obj-$(CONFIG_DEFXX) += defxx.o
+obj-$(CONFIG_SKFP) += skfp/
diff --git a/drivers/net/defxx.c b/drivers/net/fddi/defxx.c
index 417e1438562..4ad80f77109 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -483,7 +483,7 @@ static const struct net_device_ops dfx_netdev_ops = {
.ndo_stop = dfx_close,
.ndo_start_xmit = dfx_xmt_queue_pkt,
.ndo_get_stats = dfx_ctl_get_stats,
- .ndo_set_multicast_list = dfx_ctl_set_multicast_list,
+ .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
.ndo_set_mac_address = dfx_ctl_set_mac_address,
};
diff --git a/drivers/net/defxx.h b/drivers/net/fddi/defxx.h
index 19a6f64df19..19a6f64df19 100644
--- a/drivers/net/defxx.h
+++ b/drivers/net/fddi/defxx.h
diff --git a/drivers/net/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index b0be0234abf..b0be0234abf 100644
--- a/drivers/net/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c
index e395ace3120..e395ace3120 100644
--- a/drivers/net/skfp/cfm.c
+++ b/drivers/net/fddi/skfp/cfm.c
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index 07da97c303d..07da97c303d 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
diff --git a/drivers/net/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c
index 47d922cb3c0..47d922cb3c0 100644
--- a/drivers/net/skfp/ecm.c
+++ b/drivers/net/fddi/skfp/ecm.c
diff --git a/drivers/net/skfp/ess.c b/drivers/net/fddi/skfp/ess.c
index 2fc5987b41d..2fc5987b41d 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/fddi/skfp/ess.c
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index a20ed1a9809..a20ed1a9809 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
diff --git a/drivers/net/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h
index 5a6c6122ccb..f5bc90ff2a2 100644
--- a/drivers/net/skfp/h/cmtdef.h
+++ b/drivers/net/fddi/skfp/h/cmtdef.h
@@ -477,8 +477,8 @@ struct s_plc {
/*
* function prototypes
*/
-#include "h/mbuf.h" /* Type definitions for MBUFs */
-#include "h/smtstate.h" /* struct smt_state */
+#include "mbuf.h" /* Type definitions for MBUFs */
+#include "smtstate.h" /* struct smt_state */
void hwt_restart(struct s_smc *smc); /* hwt.c */
SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
diff --git a/drivers/net/skfp/h/fddi.h b/drivers/net/fddi/skfp/h/fddi.h
index c9a28a8a383..c9a28a8a383 100644
--- a/drivers/net/skfp/h/fddi.h
+++ b/drivers/net/fddi/skfp/h/fddi.h
diff --git a/drivers/net/skfp/h/fddimib.h b/drivers/net/fddi/skfp/h/fddimib.h
index d1acdc77395..d1acdc77395 100644
--- a/drivers/net/skfp/h/fddimib.h
+++ b/drivers/net/fddi/skfp/h/fddimib.h
diff --git a/drivers/net/skfp/h/fplustm.h b/drivers/net/fddi/skfp/h/fplustm.h
index d43191ed938..d43191ed938 100644
--- a/drivers/net/skfp/h/fplustm.h
+++ b/drivers/net/fddi/skfp/h/fplustm.h
diff --git a/drivers/net/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index e1a7e5f683d..5924d4219e9 100644
--- a/drivers/net/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -15,7 +15,7 @@
#ifndef _HWM_
#define _HWM_
-#include "h/mbuf.h"
+#include "mbuf.h"
/*
* MACRO for DMA synchronization:
diff --git a/drivers/net/skfp/h/mbuf.h b/drivers/net/fddi/skfp/h/mbuf.h
index f2aadcda9e7..f2aadcda9e7 100644
--- a/drivers/net/skfp/h/mbuf.h
+++ b/drivers/net/fddi/skfp/h/mbuf.h
diff --git a/drivers/net/skfp/h/osdef1st.h b/drivers/net/fddi/skfp/h/osdef1st.h
index 763ca18cbea..763ca18cbea 100644
--- a/drivers/net/skfp/h/osdef1st.h
+++ b/drivers/net/fddi/skfp/h/osdef1st.h
diff --git a/drivers/net/skfp/h/sba.h b/drivers/net/fddi/skfp/h/sba.h
index 638cf0283bc..35ddb44a112 100644
--- a/drivers/net/skfp/h/sba.h
+++ b/drivers/net/fddi/skfp/h/sba.h
@@ -19,8 +19,8 @@
#ifndef _SBA_
#define _SBA_
-#include "h/mbuf.h"
-#include "h/sba_def.h"
+#include "mbuf.h"
+#include "sba_def.h"
#ifdef SBA
diff --git a/drivers/net/skfp/h/sba_def.h b/drivers/net/fddi/skfp/h/sba_def.h
index 0459a095d0c..0459a095d0c 100644
--- a/drivers/net/skfp/h/sba_def.h
+++ b/drivers/net/fddi/skfp/h/sba_def.h
diff --git a/drivers/net/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index c1ba26c06d7..c1ba26c06d7 100644
--- a/drivers/net/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
diff --git a/drivers/net/skfp/h/skfbiinc.h b/drivers/net/fddi/skfp/h/skfbiinc.h
index ac2d7192f1c..ce72557c354 100644
--- a/drivers/net/skfp/h/skfbiinc.h
+++ b/drivers/net/fddi/skfp/h/skfbiinc.h
@@ -15,7 +15,7 @@
#ifndef _SKFBIINC_
#define _SKFBIINC_
-#include "h/supern_2.h"
+#include "supern_2.h"
/*
* special defines for use into .asm files
diff --git a/drivers/net/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index c774a95902f..3ca308b2821 100644
--- a/drivers/net/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -38,18 +38,18 @@
* fddi.h
*/
#ifdef OSDEF
-#include "h/osdef1st.h"
+#include "osdef1st.h"
#endif /* OSDEF */
#ifdef OEM_CONCEPT
#include "oemdef.h"
#endif /* OEM_CONCEPT */
-#include "h/smt.h"
-#include "h/cmtdef.h"
-#include "h/fddimib.h"
-#include "h/targethw.h" /* all target hw dependencies */
-#include "h/targetos.h" /* all target os dependencies */
+#include "smt.h"
+#include "cmtdef.h"
+#include "fddimib.h"
+#include "targethw.h" /* all target hw dependencies */
+#include "targetos.h" /* all target os dependencies */
#ifdef ESS
-#include "h/sba.h"
+#include "sba.h"
#endif
/*
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/fddi/skfp/h/smt.h
index 2030f9cbb24..2030f9cbb24 100644
--- a/drivers/net/skfp/h/smt.h
+++ b/drivers/net/fddi/skfp/h/smt.h
diff --git a/drivers/net/skfp/h/smt_p.h b/drivers/net/fddi/skfp/h/smt_p.h
index 99f9be9552b..99f9be9552b 100644
--- a/drivers/net/skfp/h/smt_p.h
+++ b/drivers/net/fddi/skfp/h/smt_p.h
diff --git a/drivers/net/skfp/h/smtstate.h b/drivers/net/fddi/skfp/h/smtstate.h
index 62fe695077a..62fe695077a 100644
--- a/drivers/net/skfp/h/smtstate.h
+++ b/drivers/net/fddi/skfp/h/smtstate.h
diff --git a/drivers/net/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 0b73690280f..0b73690280f 100644
--- a/drivers/net/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
diff --git a/drivers/net/skfp/h/targethw.h b/drivers/net/fddi/skfp/h/targethw.h
index 626dc726359..842a690446f 100644
--- a/drivers/net/skfp/h/targethw.h
+++ b/drivers/net/fddi/skfp/h/targethw.h
@@ -25,11 +25,11 @@
#define SK_ML_ID_2 0x30
#endif
-#include "h/skfbi.h"
+#include "skfbi.h"
#ifndef TAG_MODE
-#include "h/fplus.h"
+#include "fplus.h"
#else
-#include "h/fplustm.h"
+#include "fplustm.h"
#endif
#ifndef HW_PTR
diff --git a/drivers/net/skfp/h/targetos.h b/drivers/net/fddi/skfp/h/targetos.h
index 5d940e7b8ea..53bacc10716 100644
--- a/drivers/net/skfp/h/targetos.h
+++ b/drivers/net/fddi/skfp/h/targetos.h
@@ -58,7 +58,7 @@
#define ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), (smc->hw.iop+( ((a)&0x7F) | ((a)>>7 ? 0x80:0)) )) : (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
#endif
-#include "h/hwmtm.h"
+#include "hwmtm.h"
#define TRUE 1
#define FALSE 0
diff --git a/drivers/net/skfp/h/types.h b/drivers/net/fddi/skfp/h/types.h
index 5a3bf8378f9..5a3bf8378f9 100644
--- a/drivers/net/skfp/h/types.h
+++ b/drivers/net/fddi/skfp/h/types.h
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c
index e26398b5a7d..e26398b5a7d 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/fddi/skfp/hwmtm.c
diff --git a/drivers/net/skfp/hwt.c b/drivers/net/fddi/skfp/hwt.c
index c0798fd2ca6..c0798fd2ca6 100644
--- a/drivers/net/skfp/hwt.c
+++ b/drivers/net/fddi/skfp/hwt.c
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 88d02d0a42c..88d02d0a42c 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 9ac4665d741..9ac4665d741 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
diff --git a/drivers/net/skfp/queue.c b/drivers/net/fddi/skfp/queue.c
index c1a0df455a5..c1a0df455a5 100644
--- a/drivers/net/skfp/queue.c
+++ b/drivers/net/fddi/skfp/queue.c
diff --git a/drivers/net/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c
index ef8d5672d9e..ef8d5672d9e 100644
--- a/drivers/net/skfp/rmt.c
+++ b/drivers/net/fddi/skfp/rmt.c
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 16c62659cdd..3d9a4596a42 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -167,7 +167,7 @@ static const struct net_device_ops skfp_netdev_ops = {
.ndo_start_xmit = skfp_send_pkt,
.ndo_get_stats = skfp_ctl_get_stats,
.ndo_change_mtu = fddi_change_mtu,
- .ndo_set_multicast_list = skfp_ctl_set_multicast_list,
+ .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
.ndo_set_mac_address = skfp_ctl_set_mac_address,
.ndo_do_ioctl = skfp_ioctl,
};
diff --git a/drivers/net/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 08d94329c12..08d94329c12 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
diff --git a/drivers/net/skfp/smtdef.c b/drivers/net/fddi/skfp/smtdef.c
index 1acab0b368e..1acab0b368e 100644
--- a/drivers/net/skfp/smtdef.c
+++ b/drivers/net/fddi/skfp/smtdef.c
diff --git a/drivers/net/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c
index e3a0c0bc223..e3a0c0bc223 100644
--- a/drivers/net/skfp/smtinit.c
+++ b/drivers/net/fddi/skfp/smtinit.c
diff --git a/drivers/net/skfp/smttimer.c b/drivers/net/fddi/skfp/smttimer.c
index 531795e98c3..531795e98c3 100644
--- a/drivers/net/skfp/smttimer.c
+++ b/drivers/net/fddi/skfp/smttimer.c
diff --git a/drivers/net/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f6f7baf9f27..f6f7baf9f27 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
deleted file mode 100644
index fc073b5a38c..00000000000
--- a/drivers/net/fs_enet/Kconfig
+++ /dev/null
@@ -1,34 +0,0 @@
-config FS_ENET
- tristate "Freescale Ethernet Driver"
- depends on CPM1 || CPM2 || PPC_MPC512x
- select MII
- select PHYLIB
-
-config FS_ENET_MPC5121_FEC
- def_bool y if (FS_ENET && PPC_MPC512x)
- select FS_ENET_HAS_FEC
-
-config FS_ENET_HAS_SCC
- bool "Chip has an SCC usable for ethernet"
- depends on FS_ENET && (CPM1 || CPM2)
- default y
-
-config FS_ENET_HAS_FCC
- bool "Chip has an FCC usable for ethernet"
- depends on FS_ENET && CPM2
- default y
-
-config FS_ENET_HAS_FEC
- bool "Chip has an FEC usable for ethernet"
- depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
- select FS_ENET_MDIO_FEC
- default y
-
-config FS_ENET_MDIO_FEC
- tristate "MDIO driver for FEC"
- depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
-
-config FS_ENET_MDIO_FCC
- tristate "MDIO driver for FCC"
- depends on FS_ENET && CPM2
- select MDIO_BITBANG
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
new file mode 100644
index 00000000000..7393eb732ee
--- /dev/null
+++ b/drivers/net/hippi/Kconfig
@@ -0,0 +1,39 @@
+#
+# HIPPI network device configuration
+#
+
+config HIPPI
+ bool "HIPPI driver support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && INET && PCI
+ ---help---
+ HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
+ 1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
+ can run over copper (25m) or fiber (300m on multi-mode or 10km on
+ single-mode). HIPPI networks are commonly used for clusters and to
+ connect to super computers. If you are connected to a HIPPI network
+ and have a HIPPI network card in your computer that you want to use
+ under Linux, say Y here (you must also remember to enable the driver
+ for your HIPPI card below). Most people will say N here.
+
+if HIPPI
+
+config ROADRUNNER
+ tristate "Essential RoadRunner HIPPI PCI adapter support (EXPERIMENTAL)"
+ depends on PCI
+ ---help---
+ Say Y here if this is your PCI HIPPI network card.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rrunner. If unsure, say N.
+
+config ROADRUNNER_LARGE_RINGS
+ bool "Use large TX/RX rings (EXPERIMENTAL)"
+ depends on ROADRUNNER
+ ---help---
+ If you say Y here, the RoadRunner driver will preallocate up to 2 MB
+ of additional memory to allow for fastest operation, both for
+ transmitting and receiving. This memory cannot be used by any other
+ kernel code or by user space programs. Say Y here only if you have
+ the memory.
+
+endif /* HIPPI */
diff --git a/drivers/net/hippi/Makefile b/drivers/net/hippi/Makefile
new file mode 100644
index 00000000000..b95d629baee
--- /dev/null
+++ b/drivers/net/hippi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the HIPPI network device drivers.
+#
+
+obj-$(CONFIG_ROADRUNNER) += rrunner.o
diff --git a/drivers/net/rrunner.c b/drivers/net/hippi/rrunner.c
index e68c941926f..e68c941926f 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
diff --git a/drivers/net/rrunner.h b/drivers/net/hippi/rrunner.h
index 28169043ae4..28169043ae4 100644
--- a/drivers/net/rrunner.h
+++ b/drivers/net/hippi/rrunner.h
diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
deleted file mode 100644
index 78a1628c989..00000000000
--- a/drivers/net/ibm_newemac/Kconfig
+++ /dev/null
@@ -1,76 +0,0 @@
-config IBM_NEW_EMAC
- tristate "IBM EMAC Ethernet support"
- depends on PPC_DCR
- select CRC32
- help
- This driver supports the IBM EMAC family of Ethernet controllers
- typically found on 4xx embedded PowerPC chips, but also on the
- Axon southbridge for Cell.
-
-config IBM_NEW_EMAC_RXB
- int "Number of receive buffers"
- depends on IBM_NEW_EMAC
- default "128"
-
-config IBM_NEW_EMAC_TXB
- int "Number of transmit buffers"
- depends on IBM_NEW_EMAC
- default "64"
-
-config IBM_NEW_EMAC_POLL_WEIGHT
- int "MAL NAPI polling weight"
- depends on IBM_NEW_EMAC
- default "32"
-
-config IBM_NEW_EMAC_RX_COPY_THRESHOLD
- int "RX skb copy threshold (bytes)"
- depends on IBM_NEW_EMAC
- default "256"
-
-config IBM_NEW_EMAC_RX_SKB_HEADROOM
- int "Additional RX skb headroom (bytes)"
- depends on IBM_NEW_EMAC
- default "0"
- help
- Additional receive skb headroom. Note, that driver
- will always reserve at least 2 bytes to make IP header
- aligned, so usually there is no need to add any additional
- headroom.
-
- If unsure, set to 0.
-
-config IBM_NEW_EMAC_DEBUG
- bool "Debugging"
- depends on IBM_NEW_EMAC
- default n
-
-# The options below has to be select'ed by the respective
-# processor types or platforms
-
-config IBM_NEW_EMAC_ZMII
- bool
- default n
-
-config IBM_NEW_EMAC_RGMII
- bool
- default n
-
-config IBM_NEW_EMAC_TAH
- bool
- default n
-
-config IBM_NEW_EMAC_EMAC4
- bool
- default n
-
-config IBM_NEW_EMAC_NO_FLOW_CTRL
- bool
- default n
-
-config IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
- bool
- default n
-
-config IBM_NEW_EMAC_MAL_COMMON_ERR
- bool
- default n
diff --git a/drivers/net/ibm_newemac/Makefile b/drivers/net/ibm_newemac/Makefile
deleted file mode 100644
index 0b5c9951276..00000000000
--- a/drivers/net/ibm_newemac/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the PowerPC 4xx on-chip ethernet driver
-#
-
-obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o
-
-ibm_newemac-y := mal.o core.o phy.o
-ibm_newemac-$(CONFIG_IBM_NEW_EMAC_ZMII) += zmii.o
-ibm_newemac-$(CONFIG_IBM_NEW_EMAC_RGMII) += rgmii.o
-ibm_newemac-$(CONFIG_IBM_NEW_EMAC_TAH) += tah.o
-ibm_newemac-$(CONFIG_IBM_NEW_EMAC_DEBUG) += debug.o
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
deleted file mode 100644
index 70cb7d8a3b5..00000000000
--- a/drivers/net/ibm_newemac/core.c
+++ /dev/null
@@ -1,3074 +0,0 @@
-/*
- * drivers/net/ibm_newemac/core.c
- *
- * Driver for PowerPC 4xx on-chip ethernet controller.
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * Based on the arch/ppc version of the driver:
- *
- * Copyright (c) 2004, 2005 Zultys Technologies.
- * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
- *
- * Based on original work by
- * Matt Porter <mporter@kernel.crashing.org>
- * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
- * Armin Kuster <akuster@mvista.com>
- * Johnnie Peters <jpeters@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/crc32.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/bitops.h>
-#include <linux/workqueue.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-#include <asm/dcr.h>
-#include <asm/dcr-regs.h>
-
-#include "core.h"
-
-/*
- * Lack of dma_unmap_???? calls is intentional.
- *
- * API-correct usage requires additional support state information to be
- * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
- * EMAC design (e.g. TX buffer passed from network stack can be split into
- * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
- * maintaining such information will add additional overhead.
- * Current DMA API implementation for 4xx processors only ensures cache coherency
- * and dma_unmap_???? routines are empty and are likely to stay this way.
- * I decided to omit dma_unmap_??? calls because I don't want to add additional
- * complexity just for the sake of following some abstract API, when it doesn't
- * add any real benefit to the driver. I understand that this decision maybe
- * controversial, but I really tried to make code API-correct and efficient
- * at the same time and didn't come up with code I liked :(. --ebs
- */
-
-#define DRV_NAME "emac"
-#define DRV_VERSION "3.54"
-#define DRV_DESC "PPC 4xx OCP EMAC driver"
-
-MODULE_DESCRIPTION(DRV_DESC);
-MODULE_AUTHOR
- ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
-MODULE_LICENSE("GPL");
-
-/*
- * PPC64 doesn't (yet) have a cacheable_memcpy
- */
-#ifdef CONFIG_PPC64
-#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
-#endif
-
-/* minimum number of free TX descriptors required to wake up TX process */
-#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
-
-/* If packet size is less than this number, we allocate small skb and copy packet
- * contents into it instead of just sending original big skb up
- */
-#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
-
-/* Since multiple EMACs share MDIO lines in various ways, we need
- * to avoid re-using the same PHY ID in cases where the arch didn't
- * setup precise phy_map entries
- *
- * XXX This is something that needs to be reworked as we can have multiple
- * EMAC "sets" (multiple ASICs containing several EMACs) though we can
- * probably require in that case to have explicit PHY IDs in the device-tree
- */
-static u32 busy_phy_map;
-static DEFINE_MUTEX(emac_phy_map_lock);
-
-/* This is the wait queue used to wait on any event related to probe, that
- * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
- */
-static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
-
-/* Having stable interface names is a doomed idea. However, it would be nice
- * if we didn't have completely random interface names at boot too :-) It's
- * just a matter of making everybody's life easier. Since we are doing
- * threaded probing, it's a bit harder though. The base idea here is that
- * we make up a list of all emacs in the device-tree before we register the
- * driver. Every emac will then wait for the previous one in the list to
- * initialize before itself. We should also keep that list ordered by
- * cell_index.
- * That list is only 4 entries long, meaning that additional EMACs don't
- * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
- */
-
-#define EMAC_BOOT_LIST_SIZE 4
-static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
-
-/* How long should I wait for dependent devices ? */
-#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
-
-/* I don't want to litter system log with timeout errors
- * when we have brain-damaged PHY.
- */
-static inline void emac_report_timeout_error(struct emac_instance *dev,
- const char *error)
-{
- if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
- EMAC_FTR_460EX_PHY_CLK_FIX |
- EMAC_FTR_440EP_PHY_CLK_FIX))
- DBG(dev, "%s" NL, error);
- else if (net_ratelimit())
- printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
- error);
-}
-
-/* EMAC PHY clock workaround:
- * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
- * which allows controlling each EMAC clock
- */
-static inline void emac_rx_clk_tx(struct emac_instance *dev)
-{
-#ifdef CONFIG_PPC_DCR_NATIVE
- if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_MFR,
- 0, SDR0_MFR_ECS >> dev->cell_index);
-#endif
-}
-
-static inline void emac_rx_clk_default(struct emac_instance *dev)
-{
-#ifdef CONFIG_PPC_DCR_NATIVE
- if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_MFR,
- SDR0_MFR_ECS >> dev->cell_index, 0);
-#endif
-}
-
-/* PHY polling intervals */
-#define PHY_POLL_LINK_ON HZ
-#define PHY_POLL_LINK_OFF (HZ / 5)
-
-/* Graceful stop timeouts in us.
- * We should allow up to 1 frame time (full-duplex, ignoring collisions)
- */
-#define STOP_TIMEOUT_10 1230
-#define STOP_TIMEOUT_100 124
-#define STOP_TIMEOUT_1000 13
-#define STOP_TIMEOUT_1000_JUMBO 73
-
-static unsigned char default_mcast_addr[] = {
- 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
-};
-
-/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
-static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
- "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
- "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
- "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
- "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
- "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
- "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
- "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
- "rx_bad_packet", "rx_runt_packet", "rx_short_event",
- "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
- "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
- "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
- "tx_bd_excessive_collisions", "tx_bd_late_collision",
- "tx_bd_multple_collisions", "tx_bd_single_collision",
- "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
- "tx_errors"
-};
-
-static irqreturn_t emac_irq(int irq, void *dev_instance);
-static void emac_clean_tx_ring(struct emac_instance *dev);
-static void __emac_set_multicast_list(struct emac_instance *dev);
-
-static inline int emac_phy_supports_gige(int phy_mode)
-{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
-}
-
-static inline int emac_phy_gpcs(int phy_mode)
-{
- return phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
-}
-
-static inline void emac_tx_enable(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r;
-
- DBG(dev, "tx_enable" NL);
-
- r = in_be32(&p->mr0);
- if (!(r & EMAC_MR0_TXE))
- out_be32(&p->mr0, r | EMAC_MR0_TXE);
-}
-
-static void emac_tx_disable(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r;
-
- DBG(dev, "tx_disable" NL);
-
- r = in_be32(&p->mr0);
- if (r & EMAC_MR0_TXE) {
- int n = dev->stop_timeout;
- out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
- while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
- udelay(1);
- --n;
- }
- if (unlikely(!n))
- emac_report_timeout_error(dev, "TX disable timeout");
- }
-}
-
-static void emac_rx_enable(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r;
-
- if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
- goto out;
-
- DBG(dev, "rx_enable" NL);
-
- r = in_be32(&p->mr0);
- if (!(r & EMAC_MR0_RXE)) {
- if (unlikely(!(r & EMAC_MR0_RXI))) {
- /* Wait if previous async disable is still in progress */
- int n = dev->stop_timeout;
- while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
- udelay(1);
- --n;
- }
- if (unlikely(!n))
- emac_report_timeout_error(dev,
- "RX disable timeout");
- }
- out_be32(&p->mr0, r | EMAC_MR0_RXE);
- }
- out:
- ;
-}
-
-static void emac_rx_disable(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r;
-
- DBG(dev, "rx_disable" NL);
-
- r = in_be32(&p->mr0);
- if (r & EMAC_MR0_RXE) {
- int n = dev->stop_timeout;
- out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
- while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
- udelay(1);
- --n;
- }
- if (unlikely(!n))
- emac_report_timeout_error(dev, "RX disable timeout");
- }
-}
-
-static inline void emac_netif_stop(struct emac_instance *dev)
-{
- netif_tx_lock_bh(dev->ndev);
- netif_addr_lock(dev->ndev);
- dev->no_mcast = 1;
- netif_addr_unlock(dev->ndev);
- netif_tx_unlock_bh(dev->ndev);
- dev->ndev->trans_start = jiffies; /* prevent tx timeout */
- mal_poll_disable(dev->mal, &dev->commac);
- netif_tx_disable(dev->ndev);
-}
-
-static inline void emac_netif_start(struct emac_instance *dev)
-{
- netif_tx_lock_bh(dev->ndev);
- netif_addr_lock(dev->ndev);
- dev->no_mcast = 0;
- if (dev->mcast_pending && netif_running(dev->ndev))
- __emac_set_multicast_list(dev);
- netif_addr_unlock(dev->ndev);
- netif_tx_unlock_bh(dev->ndev);
-
- netif_wake_queue(dev->ndev);
-
- /* NOTE: unconditional netif_wake_queue is only appropriate
- * so long as all callers are assured to have free tx slots
- * (taken from tg3... though the case where that is wrong is
- * not terribly harmful)
- */
- mal_poll_enable(dev->mal, &dev->commac);
-}
-
-static inline void emac_rx_disable_async(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r;
-
- DBG(dev, "rx_disable_async" NL);
-
- r = in_be32(&p->mr0);
- if (r & EMAC_MR0_RXE)
- out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
-}
-
-static int emac_reset(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- int n = 20;
-
- DBG(dev, "reset" NL);
-
- if (!dev->reset_failed) {
- /* 40x erratum suggests stopping RX channel before reset,
- * we stop TX as well
- */
- emac_rx_disable(dev);
- emac_tx_disable(dev);
- }
-
-#ifdef CONFIG_PPC_DCR_NATIVE
- /* Enable internal clock source */
- if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_ETH_CFG,
- 0, SDR0_ETH_CFG_ECS << dev->cell_index);
-#endif
-
- out_be32(&p->mr0, EMAC_MR0_SRST);
- while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
- --n;
-
-#ifdef CONFIG_PPC_DCR_NATIVE
- /* Enable external clock source */
- if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_ETH_CFG,
- SDR0_ETH_CFG_ECS << dev->cell_index, 0);
-#endif
-
- if (n) {
- dev->reset_failed = 0;
- return 0;
- } else {
- emac_report_timeout_error(dev, "reset timeout");
- dev->reset_failed = 1;
- return -ETIMEDOUT;
- }
-}
-
-static void emac_hash_mc(struct emac_instance *dev)
-{
- const int regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
- u32 gaht_temp[regs];
- struct netdev_hw_addr *ha;
- int i;
-
- DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
-
- memset(gaht_temp, 0, sizeof (gaht_temp));
-
- netdev_for_each_mc_addr(ha, dev->ndev) {
- int slot, reg, mask;
- DBG2(dev, "mc %pM" NL, ha->addr);
-
- slot = EMAC_XAHT_CRC_TO_SLOT(dev,
- ether_crc(ETH_ALEN, ha->addr));
- reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
- mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
-
- gaht_temp[reg] |= mask;
- }
-
- for (i = 0; i < regs; i++)
- out_be32(gaht_base + i, gaht_temp[i]);
-}
-
-static inline u32 emac_iff2rmr(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- u32 r;
-
- r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
-
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- r |= EMAC4_RMR_BASE;
- else
- r |= EMAC_RMR_BASE;
-
- if (ndev->flags & IFF_PROMISC)
- r |= EMAC_RMR_PME;
- else if (ndev->flags & IFF_ALLMULTI ||
- (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
- r |= EMAC_RMR_PMME;
- else if (!netdev_mc_empty(ndev))
- r |= EMAC_RMR_MAE;
-
- return r;
-}
-
-static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
-{
- u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
-
- DBG2(dev, "__emac_calc_base_mr1" NL);
-
- switch(tx_size) {
- case 2048:
- ret |= EMAC_MR1_TFS_2K;
- break;
- default:
- printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
- dev->ndev->name, tx_size);
- }
-
- switch(rx_size) {
- case 16384:
- ret |= EMAC_MR1_RFS_16K;
- break;
- case 4096:
- ret |= EMAC_MR1_RFS_4K;
- break;
- default:
- printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
- dev->ndev->name, rx_size);
- }
-
- return ret;
-}
-
-static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
-{
- u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
- EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
-
- DBG2(dev, "__emac4_calc_base_mr1" NL);
-
- switch(tx_size) {
- case 16384:
- ret |= EMAC4_MR1_TFS_16K;
- break;
- case 4096:
- ret |= EMAC4_MR1_TFS_4K;
- break;
- case 2048:
- ret |= EMAC4_MR1_TFS_2K;
- break;
- default:
- printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
- dev->ndev->name, tx_size);
- }
-
- switch(rx_size) {
- case 16384:
- ret |= EMAC4_MR1_RFS_16K;
- break;
- case 4096:
- ret |= EMAC4_MR1_RFS_4K;
- break;
- case 2048:
- ret |= EMAC4_MR1_RFS_2K;
- break;
- default:
- printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
- dev->ndev->name, rx_size);
- }
-
- return ret;
-}
-
-static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
-{
- return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
- __emac4_calc_base_mr1(dev, tx_size, rx_size) :
- __emac_calc_base_mr1(dev, tx_size, rx_size);
-}
-
-static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
-{
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
- else
- return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
-}
-
-static inline u32 emac_calc_rwmr(struct emac_instance *dev,
- unsigned int low, unsigned int high)
-{
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- return (low << 22) | ( (high & 0x3ff) << 6);
- else
- return (low << 23) | ( (high & 0x1ff) << 7);
-}
-
-static int emac_configure(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- struct net_device *ndev = dev->ndev;
- int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
- u32 r, mr1 = 0;
-
- DBG(dev, "configure" NL);
-
- if (!link) {
- out_be32(&p->mr1, in_be32(&p->mr1)
- | EMAC_MR1_FDE | EMAC_MR1_ILE);
- udelay(100);
- } else if (emac_reset(dev) < 0)
- return -ETIMEDOUT;
-
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
- tah_reset(dev->tah_dev);
-
- DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
- link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
-
- /* Default fifo sizes */
- tx_size = dev->tx_fifo_size;
- rx_size = dev->rx_fifo_size;
-
- /* No link, force loopback */
- if (!link)
- mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
-
- /* Check for full duplex */
- else if (dev->phy.duplex == DUPLEX_FULL)
- mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
-
- /* Adjust fifo sizes, mr1 and timeouts based on link speed */
- dev->stop_timeout = STOP_TIMEOUT_10;
- switch (dev->phy.speed) {
- case SPEED_1000:
- if (emac_phy_gpcs(dev->phy.mode)) {
- mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
- (dev->phy.gpcs_address != 0xffffffff) ?
- dev->phy.gpcs_address : dev->phy.address);
-
- /* Put some arbitrary OUI, Manuf & Rev IDs so we can
- * identify this GPCS PHY later.
- */
- out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
- } else
- mr1 |= EMAC_MR1_MF_1000;
-
- /* Extended fifo sizes */
- tx_size = dev->tx_fifo_size_gige;
- rx_size = dev->rx_fifo_size_gige;
-
- if (dev->ndev->mtu > ETH_DATA_LEN) {
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- mr1 |= EMAC4_MR1_JPSM;
- else
- mr1 |= EMAC_MR1_JPSM;
- dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
- } else
- dev->stop_timeout = STOP_TIMEOUT_1000;
- break;
- case SPEED_100:
- mr1 |= EMAC_MR1_MF_100;
- dev->stop_timeout = STOP_TIMEOUT_100;
- break;
- default: /* make gcc happy */
- break;
- }
-
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
- dev->phy.speed);
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
-
- /* on 40x erratum forces us to NOT use integrated flow control,
- * let's hope it works on 44x ;)
- */
- if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
- dev->phy.duplex == DUPLEX_FULL) {
- if (dev->phy.pause)
- mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
- else if (dev->phy.asym_pause)
- mr1 |= EMAC_MR1_APP;
- }
-
- /* Add base settings & fifo sizes & program MR1 */
- mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
- out_be32(&p->mr1, mr1);
-
- /* Set individual MAC address */
- out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
- out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
- (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
- ndev->dev_addr[5]);
-
- /* VLAN Tag Protocol ID */
- out_be32(&p->vtpid, 0x8100);
-
- /* Receive mode register */
- r = emac_iff2rmr(ndev);
- if (r & EMAC_RMR_MAE)
- emac_hash_mc(dev);
- out_be32(&p->rmr, r);
-
- /* FIFOs thresholds */
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
- tx_size / 2 / dev->fifo_entry_size);
- else
- r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
- tx_size / 2 / dev->fifo_entry_size);
- out_be32(&p->tmr1, r);
- out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
-
- /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
- there should be still enough space in FIFO to allow the our link
- partner time to process this frame and also time to send PAUSE
- frame itself.
-
- Here is the worst case scenario for the RX FIFO "headroom"
- (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
-
- 1) One maximum-length frame on TX 1522 bytes
- 2) One PAUSE frame time 64 bytes
- 3) PAUSE frame decode time allowance 64 bytes
- 4) One maximum-length frame on RX 1522 bytes
- 5) Round-trip propagation delay of the link (100Mb) 15 bytes
- ----------
- 3187 bytes
-
- I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
- low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
- */
- r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
- rx_size / 4 / dev->fifo_entry_size);
- out_be32(&p->rwmr, r);
-
- /* Set PAUSE timer to the maximum */
- out_be32(&p->ptr, 0xffff);
-
- /* IRQ sources */
- r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
- EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
- EMAC_ISR_IRE | EMAC_ISR_TE;
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
- EMAC4_ISR_RXOE | */;
- out_be32(&p->iser, r);
-
- /* We need to take GPCS PHY out of isolate mode after EMAC reset */
- if (emac_phy_gpcs(dev->phy.mode)) {
- if (dev->phy.gpcs_address != 0xffffffff)
- emac_mii_reset_gpcs(&dev->phy);
- else
- emac_mii_reset_phy(&dev->phy);
- }
-
- return 0;
-}
-
-static void emac_reinitialize(struct emac_instance *dev)
-{
- DBG(dev, "reinitialize" NL);
-
- emac_netif_stop(dev);
- if (!emac_configure(dev)) {
- emac_tx_enable(dev);
- emac_rx_enable(dev);
- }
- emac_netif_start(dev);
-}
-
-static void emac_full_tx_reset(struct emac_instance *dev)
-{
- DBG(dev, "full_tx_reset" NL);
-
- emac_tx_disable(dev);
- mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
- emac_clean_tx_ring(dev);
- dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
-
- emac_configure(dev);
-
- mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
- emac_tx_enable(dev);
- emac_rx_enable(dev);
-}
-
-static void emac_reset_work(struct work_struct *work)
-{
- struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
-
- DBG(dev, "reset_work" NL);
-
- mutex_lock(&dev->link_lock);
- if (dev->opened) {
- emac_netif_stop(dev);
- emac_full_tx_reset(dev);
- emac_netif_start(dev);
- }
- mutex_unlock(&dev->link_lock);
-}
-
-static void emac_tx_timeout(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- DBG(dev, "tx_timeout" NL);
-
- schedule_work(&dev->reset_work);
-}
-
-
-static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
-{
- int done = !!(stacr & EMAC_STACR_OC);
-
- if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
- done = !done;
-
- return done;
-};
-
-static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r = 0;
- int n, err = -ETIMEDOUT;
-
- mutex_lock(&dev->mdio_lock);
-
- DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
-
- /* Enable proper MDIO port */
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
-
- /* Wait for management interface to become idle */
- n = 20;
- while (!emac_phy_done(dev, in_be32(&p->stacr))) {
- udelay(1);
- if (!--n) {
- DBG2(dev, " -> timeout wait idle\n");
- goto bail;
- }
- }
-
- /* Issue read command */
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- r = EMAC4_STACR_BASE(dev->opb_bus_freq);
- else
- r = EMAC_STACR_BASE(dev->opb_bus_freq);
- if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
- r |= EMAC_STACR_OC;
- if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
- r |= EMACX_STACR_STAC_READ;
- else
- r |= EMAC_STACR_STAC_READ;
- r |= (reg & EMAC_STACR_PRA_MASK)
- | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
- out_be32(&p->stacr, r);
-
- /* Wait for read to complete */
- n = 200;
- while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
- udelay(1);
- if (!--n) {
- DBG2(dev, " -> timeout wait complete\n");
- goto bail;
- }
- }
-
- if (unlikely(r & EMAC_STACR_PHYE)) {
- DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
- err = -EREMOTEIO;
- goto bail;
- }
-
- r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
-
- DBG2(dev, "mdio_read -> %04x" NL, r);
- err = 0;
- bail:
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
- mutex_unlock(&dev->mdio_lock);
-
- return err == 0 ? r : err;
-}
-
-static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
- u16 val)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 r = 0;
- int n, err = -ETIMEDOUT;
-
- mutex_lock(&dev->mdio_lock);
-
- DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
-
- /* Enable proper MDIO port */
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
-
- /* Wait for management interface to be idle */
- n = 20;
- while (!emac_phy_done(dev, in_be32(&p->stacr))) {
- udelay(1);
- if (!--n) {
- DBG2(dev, " -> timeout wait idle\n");
- goto bail;
- }
- }
-
- /* Issue write command */
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- r = EMAC4_STACR_BASE(dev->opb_bus_freq);
- else
- r = EMAC_STACR_BASE(dev->opb_bus_freq);
- if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
- r |= EMAC_STACR_OC;
- if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
- r |= EMACX_STACR_STAC_WRITE;
- else
- r |= EMAC_STACR_STAC_WRITE;
- r |= (reg & EMAC_STACR_PRA_MASK) |
- ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
- (val << EMAC_STACR_PHYD_SHIFT);
- out_be32(&p->stacr, r);
-
- /* Wait for write to complete */
- n = 200;
- while (!emac_phy_done(dev, in_be32(&p->stacr))) {
- udelay(1);
- if (!--n) {
- DBG2(dev, " -> timeout wait complete\n");
- goto bail;
- }
- }
- err = 0;
- bail:
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
- mutex_unlock(&dev->mdio_lock);
-}
-
-static int emac_mdio_read(struct net_device *ndev, int id, int reg)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- int res;
-
- res = __emac_mdio_read((dev->mdio_instance &&
- dev->phy.gpcs_address != id) ?
- dev->mdio_instance : dev,
- (u8) id, (u8) reg);
- return res;
-}
-
-static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- __emac_mdio_write((dev->mdio_instance &&
- dev->phy.gpcs_address != id) ?
- dev->mdio_instance : dev,
- (u8) id, (u8) reg, (u16) val);
-}
-
-/* Tx lock BH */
-static void __emac_set_multicast_list(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- u32 rmr = emac_iff2rmr(dev->ndev);
-
- DBG(dev, "__multicast %08x" NL, rmr);
-
- /* I decided to relax register access rules here to avoid
- * full EMAC reset.
- *
- * There is a real problem with EMAC4 core if we use MWSW_001 bit
- * in MR1 register and do a full EMAC reset.
- * One TX BD status update is delayed and, after EMAC reset, it
- * never happens, resulting in TX hung (it'll be recovered by TX
- * timeout handler eventually, but this is just gross).
- * So we either have to do full TX reset or try to cheat here :)
- *
- * The only required change is to RX mode register, so I *think* all
- * we need is just to stop RX channel. This seems to work on all
- * tested SoCs. --ebs
- *
- * If we need the full reset, we might just trigger the workqueue
- * and do it async... a bit nasty but should work --BenH
- */
- dev->mcast_pending = 0;
- emac_rx_disable(dev);
- if (rmr & EMAC_RMR_MAE)
- emac_hash_mc(dev);
- out_be32(&p->rmr, rmr);
- emac_rx_enable(dev);
-}
-
-/* Tx lock BH */
-static void emac_set_multicast_list(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- DBG(dev, "multicast" NL);
-
- BUG_ON(!netif_running(dev->ndev));
-
- if (dev->no_mcast) {
- dev->mcast_pending = 1;
- return;
- }
- __emac_set_multicast_list(dev);
-}
-
-static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
-{
- int rx_sync_size = emac_rx_sync_size(new_mtu);
- int rx_skb_size = emac_rx_skb_size(new_mtu);
- int i, ret = 0;
-
- mutex_lock(&dev->link_lock);
- emac_netif_stop(dev);
- emac_rx_disable(dev);
- mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
-
- if (dev->rx_sg_skb) {
- ++dev->estats.rx_dropped_resize;
- dev_kfree_skb(dev->rx_sg_skb);
- dev->rx_sg_skb = NULL;
- }
-
- /* Make a first pass over RX ring and mark BDs ready, dropping
- * non-processed packets on the way. We need this as a separate pass
- * to simplify error recovery in the case of allocation failure later.
- */
- for (i = 0; i < NUM_RX_BUFF; ++i) {
- if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
- ++dev->estats.rx_dropped_resize;
-
- dev->rx_desc[i].data_len = 0;
- dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
- (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
- }
-
- /* Reallocate RX ring only if bigger skb buffers are required */
- if (rx_skb_size <= dev->rx_skb_size)
- goto skip;
-
- /* Second pass, allocate new skbs */
- for (i = 0; i < NUM_RX_BUFF; ++i) {
- struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
- if (!skb) {
- ret = -ENOMEM;
- goto oom;
- }
-
- BUG_ON(!dev->rx_skb[i]);
- dev_kfree_skb(dev->rx_skb[i]);
-
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
- dev->rx_desc[i].data_ptr =
- dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
- DMA_FROM_DEVICE) + 2;
- dev->rx_skb[i] = skb;
- }
- skip:
- /* Check if we need to change "Jumbo" bit in MR1 */
- if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
- /* This is to prevent starting RX channel in emac_rx_enable() */
- set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
-
- dev->ndev->mtu = new_mtu;
- emac_full_tx_reset(dev);
- }
-
- mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
- oom:
- /* Restart RX */
- clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- dev->rx_slot = 0;
- mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
- emac_rx_enable(dev);
- emac_netif_start(dev);
- mutex_unlock(&dev->link_lock);
-
- return ret;
-}
-
-/* Process ctx, rtnl_lock semaphore */
-static int emac_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- int ret = 0;
-
- if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
- return -EINVAL;
-
- DBG(dev, "change_mtu(%d)" NL, new_mtu);
-
- if (netif_running(ndev)) {
- /* Check if we really need to reinitialize RX ring */
- if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
- ret = emac_resize_rx_ring(dev, new_mtu);
- }
-
- if (!ret) {
- ndev->mtu = new_mtu;
- dev->rx_skb_size = emac_rx_skb_size(new_mtu);
- dev->rx_sync_size = emac_rx_sync_size(new_mtu);
- }
-
- return ret;
-}
-
-static void emac_clean_tx_ring(struct emac_instance *dev)
-{
- int i;
-
- for (i = 0; i < NUM_TX_BUFF; ++i) {
- if (dev->tx_skb[i]) {
- dev_kfree_skb(dev->tx_skb[i]);
- dev->tx_skb[i] = NULL;
- if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
- ++dev->estats.tx_dropped;
- }
- dev->tx_desc[i].ctrl = 0;
- dev->tx_desc[i].data_ptr = 0;
- }
-}
-
-static void emac_clean_rx_ring(struct emac_instance *dev)
-{
- int i;
-
- for (i = 0; i < NUM_RX_BUFF; ++i)
- if (dev->rx_skb[i]) {
- dev->rx_desc[i].ctrl = 0;
- dev_kfree_skb(dev->rx_skb[i]);
- dev->rx_skb[i] = NULL;
- dev->rx_desc[i].data_ptr = 0;
- }
-
- if (dev->rx_sg_skb) {
- dev_kfree_skb(dev->rx_sg_skb);
- dev->rx_sg_skb = NULL;
- }
-}
-
-static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
- gfp_t flags)
-{
- struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
- if (unlikely(!skb))
- return -ENOMEM;
-
- dev->rx_skb[slot] = skb;
- dev->rx_desc[slot].data_len = 0;
-
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
- dev->rx_desc[slot].data_ptr =
- dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
- DMA_FROM_DEVICE) + 2;
- wmb();
- dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
- (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
-
- return 0;
-}
-
-static void emac_print_link_status(struct emac_instance *dev)
-{
- if (netif_carrier_ok(dev->ndev))
- printk(KERN_INFO "%s: link is up, %d %s%s\n",
- dev->ndev->name, dev->phy.speed,
- dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
- dev->phy.pause ? ", pause enabled" :
- dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
- else
- printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
-}
-
-/* Process ctx, rtnl_lock semaphore */
-static int emac_open(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- int err, i;
-
- DBG(dev, "open" NL);
-
- /* Setup error IRQ handler */
- err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
- if (err) {
- printk(KERN_ERR "%s: failed to request IRQ %d\n",
- ndev->name, dev->emac_irq);
- return err;
- }
-
- /* Allocate RX ring */
- for (i = 0; i < NUM_RX_BUFF; ++i)
- if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
- printk(KERN_ERR "%s: failed to allocate RX ring\n",
- ndev->name);
- goto oom;
- }
-
- dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
- clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- dev->rx_sg_skb = NULL;
-
- mutex_lock(&dev->link_lock);
- dev->opened = 1;
-
- /* Start PHY polling now.
- */
- if (dev->phy.address >= 0) {
- int link_poll_interval;
- if (dev->phy.def->ops->poll_link(&dev->phy)) {
- dev->phy.def->ops->read_link(&dev->phy);
- emac_rx_clk_default(dev);
- netif_carrier_on(dev->ndev);
- link_poll_interval = PHY_POLL_LINK_ON;
- } else {
- emac_rx_clk_tx(dev);
- netif_carrier_off(dev->ndev);
- link_poll_interval = PHY_POLL_LINK_OFF;
- }
- dev->link_polling = 1;
- wmb();
- schedule_delayed_work(&dev->link_work, link_poll_interval);
- emac_print_link_status(dev);
- } else
- netif_carrier_on(dev->ndev);
-
- /* Required for Pause packet support in EMAC */
- dev_mc_add_global(ndev, default_mcast_addr);
-
- emac_configure(dev);
- mal_poll_add(dev->mal, &dev->commac);
- mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
- mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
- mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
- emac_tx_enable(dev);
- emac_rx_enable(dev);
- emac_netif_start(dev);
-
- mutex_unlock(&dev->link_lock);
-
- return 0;
- oom:
- emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
- return -ENOMEM;
-}
-
-/* BHs disabled */
-#if 0
-static int emac_link_differs(struct emac_instance *dev)
-{
- u32 r = in_be32(&dev->emacp->mr1);
-
- int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
- int speed, pause, asym_pause;
-
- if (r & EMAC_MR1_MF_1000)
- speed = SPEED_1000;
- else if (r & EMAC_MR1_MF_100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
-
- switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
- case (EMAC_MR1_EIFC | EMAC_MR1_APP):
- pause = 1;
- asym_pause = 0;
- break;
- case EMAC_MR1_APP:
- pause = 0;
- asym_pause = 1;
- break;
- default:
- pause = asym_pause = 0;
- }
- return speed != dev->phy.speed || duplex != dev->phy.duplex ||
- pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
-}
-#endif
-
-static void emac_link_timer(struct work_struct *work)
-{
- struct emac_instance *dev =
- container_of(to_delayed_work(work),
- struct emac_instance, link_work);
- int link_poll_interval;
-
- mutex_lock(&dev->link_lock);
- DBG2(dev, "link timer" NL);
-
- if (!dev->opened)
- goto bail;
-
- if (dev->phy.def->ops->poll_link(&dev->phy)) {
- if (!netif_carrier_ok(dev->ndev)) {
- emac_rx_clk_default(dev);
- /* Get new link parameters */
- dev->phy.def->ops->read_link(&dev->phy);
-
- netif_carrier_on(dev->ndev);
- emac_netif_stop(dev);
- emac_full_tx_reset(dev);
- emac_netif_start(dev);
- emac_print_link_status(dev);
- }
- link_poll_interval = PHY_POLL_LINK_ON;
- } else {
- if (netif_carrier_ok(dev->ndev)) {
- emac_rx_clk_tx(dev);
- netif_carrier_off(dev->ndev);
- netif_tx_disable(dev->ndev);
- emac_reinitialize(dev);
- emac_print_link_status(dev);
- }
- link_poll_interval = PHY_POLL_LINK_OFF;
- }
- schedule_delayed_work(&dev->link_work, link_poll_interval);
- bail:
- mutex_unlock(&dev->link_lock);
-}
-
-static void emac_force_link_update(struct emac_instance *dev)
-{
- netif_carrier_off(dev->ndev);
- smp_rmb();
- if (dev->link_polling) {
- cancel_delayed_work_sync(&dev->link_work);
- if (dev->link_polling)
- schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
- }
-}
-
-/* Process ctx, rtnl_lock semaphore */
-static int emac_close(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- DBG(dev, "close" NL);
-
- if (dev->phy.address >= 0) {
- dev->link_polling = 0;
- cancel_delayed_work_sync(&dev->link_work);
- }
- mutex_lock(&dev->link_lock);
- emac_netif_stop(dev);
- dev->opened = 0;
- mutex_unlock(&dev->link_lock);
-
- emac_rx_disable(dev);
- emac_tx_disable(dev);
- mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
- mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
- mal_poll_del(dev->mal, &dev->commac);
-
- emac_clean_tx_ring(dev);
- emac_clean_rx_ring(dev);
-
- free_irq(dev->emac_irq, dev);
-
- netif_carrier_off(ndev);
-
- return 0;
-}
-
-static inline u16 emac_tx_csum(struct emac_instance *dev,
- struct sk_buff *skb)
-{
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
- (skb->ip_summed == CHECKSUM_PARTIAL)) {
- ++dev->stats.tx_packets_csum;
- return EMAC_TX_CTRL_TAH_CSUM;
- }
- return 0;
-}
-
-static inline int emac_xmit_finish(struct emac_instance *dev, int len)
-{
- struct emac_regs __iomem *p = dev->emacp;
- struct net_device *ndev = dev->ndev;
-
- /* Send the packet out. If the if makes a significant perf
- * difference, then we can store the TMR0 value in "dev"
- * instead
- */
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
- else
- out_be32(&p->tmr0, EMAC_TMR0_XMIT);
-
- if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
- netif_stop_queue(ndev);
- DBG2(dev, "stopped TX queue" NL);
- }
-
- ndev->trans_start = jiffies;
- ++dev->stats.tx_packets;
- dev->stats.tx_bytes += len;
-
- return NETDEV_TX_OK;
-}
-
-/* Tx lock BH */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- unsigned int len = skb->len;
- int slot;
-
- u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
- MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
-
- slot = dev->tx_slot++;
- if (dev->tx_slot == NUM_TX_BUFF) {
- dev->tx_slot = 0;
- ctrl |= MAL_TX_CTRL_WRAP;
- }
-
- DBG2(dev, "xmit(%u) %d" NL, len, slot);
-
- dev->tx_skb[slot] = skb;
- dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
- skb->data, len,
- DMA_TO_DEVICE);
- dev->tx_desc[slot].data_len = (u16) len;
- wmb();
- dev->tx_desc[slot].ctrl = ctrl;
-
- return emac_xmit_finish(dev, len);
-}
-
-static inline int emac_xmit_split(struct emac_instance *dev, int slot,
- u32 pd, int len, int last, u16 base_ctrl)
-{
- while (1) {
- u16 ctrl = base_ctrl;
- int chunk = min(len, MAL_MAX_TX_SIZE);
- len -= chunk;
-
- slot = (slot + 1) % NUM_TX_BUFF;
-
- if (last && !len)
- ctrl |= MAL_TX_CTRL_LAST;
- if (slot == NUM_TX_BUFF - 1)
- ctrl |= MAL_TX_CTRL_WRAP;
-
- dev->tx_skb[slot] = NULL;
- dev->tx_desc[slot].data_ptr = pd;
- dev->tx_desc[slot].data_len = (u16) chunk;
- dev->tx_desc[slot].ctrl = ctrl;
- ++dev->tx_cnt;
-
- if (!len)
- break;
-
- pd += chunk;
- }
- return slot;
-}
-
-/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
-static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- int nr_frags = skb_shinfo(skb)->nr_frags;
- int len = skb->len, chunk;
- int slot, i;
- u16 ctrl;
- u32 pd;
-
- /* This is common "fast" path */
- if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
- return emac_start_xmit(skb, ndev);
-
- len -= skb->data_len;
-
- /* Note, this is only an *estimation*, we can still run out of empty
- * slots because of the additional fragmentation into
- * MAL_MAX_TX_SIZE-sized chunks
- */
- if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
- goto stop_queue;
-
- ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
- emac_tx_csum(dev, skb);
- slot = dev->tx_slot;
-
- /* skb data */
- dev->tx_skb[slot] = NULL;
- chunk = min(len, MAL_MAX_TX_SIZE);
- dev->tx_desc[slot].data_ptr = pd =
- dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
- dev->tx_desc[slot].data_len = (u16) chunk;
- len -= chunk;
- if (unlikely(len))
- slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
- ctrl);
- /* skb fragments */
- for (i = 0; i < nr_frags; ++i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
-
- if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
- goto undo_frame;
-
- pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
- DMA_TO_DEVICE);
-
- slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
- ctrl);
- }
-
- DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
-
- /* Attach skb to the last slot so we don't release it too early */
- dev->tx_skb[slot] = skb;
-
- /* Send the packet out */
- if (dev->tx_slot == NUM_TX_BUFF - 1)
- ctrl |= MAL_TX_CTRL_WRAP;
- wmb();
- dev->tx_desc[dev->tx_slot].ctrl = ctrl;
- dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
-
- return emac_xmit_finish(dev, skb->len);
-
- undo_frame:
- /* Well, too bad. Our previous estimation was overly optimistic.
- * Undo everything.
- */
- while (slot != dev->tx_slot) {
- dev->tx_desc[slot].ctrl = 0;
- --dev->tx_cnt;
- if (--slot < 0)
- slot = NUM_TX_BUFF - 1;
- }
- ++dev->estats.tx_undo;
-
- stop_queue:
- netif_stop_queue(ndev);
- DBG2(dev, "stopped TX queue" NL);
- return NETDEV_TX_BUSY;
-}
-
-/* Tx lock BHs */
-static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
-{
- struct emac_error_stats *st = &dev->estats;
-
- DBG(dev, "BD TX error %04x" NL, ctrl);
-
- ++st->tx_bd_errors;
- if (ctrl & EMAC_TX_ST_BFCS)
- ++st->tx_bd_bad_fcs;
- if (ctrl & EMAC_TX_ST_LCS)
- ++st->tx_bd_carrier_loss;
- if (ctrl & EMAC_TX_ST_ED)
- ++st->tx_bd_excessive_deferral;
- if (ctrl & EMAC_TX_ST_EC)
- ++st->tx_bd_excessive_collisions;
- if (ctrl & EMAC_TX_ST_LC)
- ++st->tx_bd_late_collision;
- if (ctrl & EMAC_TX_ST_MC)
- ++st->tx_bd_multple_collisions;
- if (ctrl & EMAC_TX_ST_SC)
- ++st->tx_bd_single_collision;
- if (ctrl & EMAC_TX_ST_UR)
- ++st->tx_bd_underrun;
- if (ctrl & EMAC_TX_ST_SQE)
- ++st->tx_bd_sqe;
-}
-
-static void emac_poll_tx(void *param)
-{
- struct emac_instance *dev = param;
- u32 bad_mask;
-
- DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
-
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
- bad_mask = EMAC_IS_BAD_TX_TAH;
- else
- bad_mask = EMAC_IS_BAD_TX;
-
- netif_tx_lock_bh(dev->ndev);
- if (dev->tx_cnt) {
- u16 ctrl;
- int slot = dev->ack_slot, n = 0;
- again:
- ctrl = dev->tx_desc[slot].ctrl;
- if (!(ctrl & MAL_TX_CTRL_READY)) {
- struct sk_buff *skb = dev->tx_skb[slot];
- ++n;
-
- if (skb) {
- dev_kfree_skb(skb);
- dev->tx_skb[slot] = NULL;
- }
- slot = (slot + 1) % NUM_TX_BUFF;
-
- if (unlikely(ctrl & bad_mask))
- emac_parse_tx_error(dev, ctrl);
-
- if (--dev->tx_cnt)
- goto again;
- }
- if (n) {
- dev->ack_slot = slot;
- if (netif_queue_stopped(dev->ndev) &&
- dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
- netif_wake_queue(dev->ndev);
-
- DBG2(dev, "tx %d pkts" NL, n);
- }
- }
- netif_tx_unlock_bh(dev->ndev);
-}
-
-static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
- int len)
-{
- struct sk_buff *skb = dev->rx_skb[slot];
-
- DBG2(dev, "recycle %d %d" NL, slot, len);
-
- if (len)
- dma_map_single(&dev->ofdev->dev, skb->data - 2,
- EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
-
- dev->rx_desc[slot].data_len = 0;
- wmb();
- dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
- (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
-}
-
-static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
-{
- struct emac_error_stats *st = &dev->estats;
-
- DBG(dev, "BD RX error %04x" NL, ctrl);
-
- ++st->rx_bd_errors;
- if (ctrl & EMAC_RX_ST_OE)
- ++st->rx_bd_overrun;
- if (ctrl & EMAC_RX_ST_BP)
- ++st->rx_bd_bad_packet;
- if (ctrl & EMAC_RX_ST_RP)
- ++st->rx_bd_runt_packet;
- if (ctrl & EMAC_RX_ST_SE)
- ++st->rx_bd_short_event;
- if (ctrl & EMAC_RX_ST_AE)
- ++st->rx_bd_alignment_error;
- if (ctrl & EMAC_RX_ST_BFCS)
- ++st->rx_bd_bad_fcs;
- if (ctrl & EMAC_RX_ST_PTL)
- ++st->rx_bd_packet_too_long;
- if (ctrl & EMAC_RX_ST_ORE)
- ++st->rx_bd_out_of_range;
- if (ctrl & EMAC_RX_ST_IRE)
- ++st->rx_bd_in_range;
-}
-
-static inline void emac_rx_csum(struct emac_instance *dev,
- struct sk_buff *skb, u16 ctrl)
-{
-#ifdef CONFIG_IBM_NEW_EMAC_TAH
- if (!ctrl && dev->tah_dev) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ++dev->stats.rx_packets_csum;
- }
-#endif
-}
-
-static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
-{
- if (likely(dev->rx_sg_skb != NULL)) {
- int len = dev->rx_desc[slot].data_len;
- int tot_len = dev->rx_sg_skb->len + len;
-
- if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
- ++dev->estats.rx_dropped_mtu;
- dev_kfree_skb(dev->rx_sg_skb);
- dev->rx_sg_skb = NULL;
- } else {
- cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
- dev->rx_skb[slot]->data, len);
- skb_put(dev->rx_sg_skb, len);
- emac_recycle_rx_skb(dev, slot, len);
- return 0;
- }
- }
- emac_recycle_rx_skb(dev, slot, 0);
- return -1;
-}
-
-/* NAPI poll context */
-static int emac_poll_rx(void *param, int budget)
-{
- struct emac_instance *dev = param;
- int slot = dev->rx_slot, received = 0;
-
- DBG2(dev, "poll_rx(%d)" NL, budget);
-
- again:
- while (budget > 0) {
- int len;
- struct sk_buff *skb;
- u16 ctrl = dev->rx_desc[slot].ctrl;
-
- if (ctrl & MAL_RX_CTRL_EMPTY)
- break;
-
- skb = dev->rx_skb[slot];
- mb();
- len = dev->rx_desc[slot].data_len;
-
- if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
- goto sg;
-
- ctrl &= EMAC_BAD_RX_MASK;
- if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
- emac_parse_rx_error(dev, ctrl);
- ++dev->estats.rx_dropped_error;
- emac_recycle_rx_skb(dev, slot, 0);
- len = 0;
- goto next;
- }
-
- if (len < ETH_HLEN) {
- ++dev->estats.rx_dropped_stack;
- emac_recycle_rx_skb(dev, slot, len);
- goto next;
- }
-
- if (len && len < EMAC_RX_COPY_THRESH) {
- struct sk_buff *copy_skb =
- alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
- if (unlikely(!copy_skb))
- goto oom;
-
- skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
- cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
- len + 2);
- emac_recycle_rx_skb(dev, slot, len);
- skb = copy_skb;
- } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
- goto oom;
-
- skb_put(skb, len);
- push_packet:
- skb->protocol = eth_type_trans(skb, dev->ndev);
- emac_rx_csum(dev, skb, ctrl);
-
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- ++dev->estats.rx_dropped_stack;
- next:
- ++dev->stats.rx_packets;
- skip:
- dev->stats.rx_bytes += len;
- slot = (slot + 1) % NUM_RX_BUFF;
- --budget;
- ++received;
- continue;
- sg:
- if (ctrl & MAL_RX_CTRL_FIRST) {
- BUG_ON(dev->rx_sg_skb);
- if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
- DBG(dev, "rx OOM %d" NL, slot);
- ++dev->estats.rx_dropped_oom;
- emac_recycle_rx_skb(dev, slot, 0);
- } else {
- dev->rx_sg_skb = skb;
- skb_put(skb, len);
- }
- } else if (!emac_rx_sg_append(dev, slot) &&
- (ctrl & MAL_RX_CTRL_LAST)) {
-
- skb = dev->rx_sg_skb;
- dev->rx_sg_skb = NULL;
-
- ctrl &= EMAC_BAD_RX_MASK;
- if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
- emac_parse_rx_error(dev, ctrl);
- ++dev->estats.rx_dropped_error;
- dev_kfree_skb(skb);
- len = 0;
- } else
- goto push_packet;
- }
- goto skip;
- oom:
- DBG(dev, "rx OOM %d" NL, slot);
- /* Drop the packet and recycle skb */
- ++dev->estats.rx_dropped_oom;
- emac_recycle_rx_skb(dev, slot, 0);
- goto next;
- }
-
- if (received) {
- DBG2(dev, "rx %d BDs" NL, received);
- dev->rx_slot = slot;
- }
-
- if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
- mb();
- if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
- DBG2(dev, "rx restart" NL);
- received = 0;
- goto again;
- }
-
- if (dev->rx_sg_skb) {
- DBG2(dev, "dropping partial rx packet" NL);
- ++dev->estats.rx_dropped_error;
- dev_kfree_skb(dev->rx_sg_skb);
- dev->rx_sg_skb = NULL;
- }
-
- clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
- emac_rx_enable(dev);
- dev->rx_slot = 0;
- }
- return received;
-}
-
-/* NAPI poll context */
-static int emac_peek_rx(void *param)
-{
- struct emac_instance *dev = param;
-
- return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
-}
-
-/* NAPI poll context */
-static int emac_peek_rx_sg(void *param)
-{
- struct emac_instance *dev = param;
-
- int slot = dev->rx_slot;
- while (1) {
- u16 ctrl = dev->rx_desc[slot].ctrl;
- if (ctrl & MAL_RX_CTRL_EMPTY)
- return 0;
- else if (ctrl & MAL_RX_CTRL_LAST)
- return 1;
-
- slot = (slot + 1) % NUM_RX_BUFF;
-
- /* I'm just being paranoid here :) */
- if (unlikely(slot == dev->rx_slot))
- return 0;
- }
-}
-
-/* Hard IRQ */
-static void emac_rxde(void *param)
-{
- struct emac_instance *dev = param;
-
- ++dev->estats.rx_stopped;
- emac_rx_disable_async(dev);
-}
-
-/* Hard IRQ */
-static irqreturn_t emac_irq(int irq, void *dev_instance)
-{
- struct emac_instance *dev = dev_instance;
- struct emac_regs __iomem *p = dev->emacp;
- struct emac_error_stats *st = &dev->estats;
- u32 isr;
-
- spin_lock(&dev->lock);
-
- isr = in_be32(&p->isr);
- out_be32(&p->isr, isr);
-
- DBG(dev, "isr = %08x" NL, isr);
-
- if (isr & EMAC4_ISR_TXPE)
- ++st->tx_parity;
- if (isr & EMAC4_ISR_RXPE)
- ++st->rx_parity;
- if (isr & EMAC4_ISR_TXUE)
- ++st->tx_underrun;
- if (isr & EMAC4_ISR_RXOE)
- ++st->rx_fifo_overrun;
- if (isr & EMAC_ISR_OVR)
- ++st->rx_overrun;
- if (isr & EMAC_ISR_BP)
- ++st->rx_bad_packet;
- if (isr & EMAC_ISR_RP)
- ++st->rx_runt_packet;
- if (isr & EMAC_ISR_SE)
- ++st->rx_short_event;
- if (isr & EMAC_ISR_ALE)
- ++st->rx_alignment_error;
- if (isr & EMAC_ISR_BFCS)
- ++st->rx_bad_fcs;
- if (isr & EMAC_ISR_PTLE)
- ++st->rx_packet_too_long;
- if (isr & EMAC_ISR_ORE)
- ++st->rx_out_of_range;
- if (isr & EMAC_ISR_IRE)
- ++st->rx_in_range;
- if (isr & EMAC_ISR_SQE)
- ++st->tx_sqe;
- if (isr & EMAC_ISR_TE)
- ++st->tx_errors;
-
- spin_unlock(&dev->lock);
-
- return IRQ_HANDLED;
-}
-
-static struct net_device_stats *emac_stats(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- struct emac_stats *st = &dev->stats;
- struct emac_error_stats *est = &dev->estats;
- struct net_device_stats *nst = &dev->nstats;
- unsigned long flags;
-
- DBG2(dev, "stats" NL);
-
- /* Compute "legacy" statistics */
- spin_lock_irqsave(&dev->lock, flags);
- nst->rx_packets = (unsigned long)st->rx_packets;
- nst->rx_bytes = (unsigned long)st->rx_bytes;
- nst->tx_packets = (unsigned long)st->tx_packets;
- nst->tx_bytes = (unsigned long)st->tx_bytes;
- nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
- est->rx_dropped_error +
- est->rx_dropped_resize +
- est->rx_dropped_mtu);
- nst->tx_dropped = (unsigned long)est->tx_dropped;
-
- nst->rx_errors = (unsigned long)est->rx_bd_errors;
- nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
- est->rx_fifo_overrun +
- est->rx_overrun);
- nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
- est->rx_alignment_error);
- nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
- est->rx_bad_fcs);
- nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
- est->rx_bd_short_event +
- est->rx_bd_packet_too_long +
- est->rx_bd_out_of_range +
- est->rx_bd_in_range +
- est->rx_runt_packet +
- est->rx_short_event +
- est->rx_packet_too_long +
- est->rx_out_of_range +
- est->rx_in_range);
-
- nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
- nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
- est->tx_underrun);
- nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
- nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
- est->tx_bd_excessive_collisions +
- est->tx_bd_late_collision +
- est->tx_bd_multple_collisions);
- spin_unlock_irqrestore(&dev->lock, flags);
- return nst;
-}
-
-static struct mal_commac_ops emac_commac_ops = {
- .poll_tx = &emac_poll_tx,
- .poll_rx = &emac_poll_rx,
- .peek_rx = &emac_peek_rx,
- .rxde = &emac_rxde,
-};
-
-static struct mal_commac_ops emac_commac_sg_ops = {
- .poll_tx = &emac_poll_tx,
- .poll_rx = &emac_poll_rx,
- .peek_rx = &emac_peek_rx_sg,
- .rxde = &emac_rxde,
-};
-
-/* Ethtool support */
-static int emac_ethtool_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- cmd->supported = dev->phy.features;
- cmd->port = PORT_MII;
- cmd->phy_address = dev->phy.address;
- cmd->transceiver =
- dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
-
- mutex_lock(&dev->link_lock);
- cmd->advertising = dev->phy.advertising;
- cmd->autoneg = dev->phy.autoneg;
- cmd->speed = dev->phy.speed;
- cmd->duplex = dev->phy.duplex;
- mutex_unlock(&dev->link_lock);
-
- return 0;
-}
-
-static int emac_ethtool_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- u32 f = dev->phy.features;
-
- DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
- cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
-
- /* Basic sanity checks */
- if (dev->phy.address < 0)
- return -EOPNOTSUPP;
- if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
- return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
- return -EINVAL;
- if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
- return -EINVAL;
-
- if (cmd->autoneg == AUTONEG_DISABLE) {
- switch (cmd->speed) {
- case SPEED_10:
- if (cmd->duplex == DUPLEX_HALF &&
- !(f & SUPPORTED_10baseT_Half))
- return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
- !(f & SUPPORTED_10baseT_Full))
- return -EINVAL;
- break;
- case SPEED_100:
- if (cmd->duplex == DUPLEX_HALF &&
- !(f & SUPPORTED_100baseT_Half))
- return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
- !(f & SUPPORTED_100baseT_Full))
- return -EINVAL;
- break;
- case SPEED_1000:
- if (cmd->duplex == DUPLEX_HALF &&
- !(f & SUPPORTED_1000baseT_Half))
- return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
- !(f & SUPPORTED_1000baseT_Full))
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- mutex_lock(&dev->link_lock);
- dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
- cmd->duplex);
- mutex_unlock(&dev->link_lock);
-
- } else {
- if (!(f & SUPPORTED_Autoneg))
- return -EINVAL;
-
- mutex_lock(&dev->link_lock);
- dev->phy.def->ops->setup_aneg(&dev->phy,
- (cmd->advertising & f) |
- (dev->phy.advertising &
- (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause)));
- mutex_unlock(&dev->link_lock);
- }
- emac_force_link_update(dev);
-
- return 0;
-}
-
-static void emac_ethtool_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *rp)
-{
- rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
- rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
-}
-
-static void emac_ethtool_get_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pp)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- mutex_lock(&dev->link_lock);
- if ((dev->phy.features & SUPPORTED_Autoneg) &&
- (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
- pp->autoneg = 1;
-
- if (dev->phy.duplex == DUPLEX_FULL) {
- if (dev->phy.pause)
- pp->rx_pause = pp->tx_pause = 1;
- else if (dev->phy.asym_pause)
- pp->tx_pause = 1;
- }
- mutex_unlock(&dev->link_lock);
-}
-
-static int emac_get_regs_len(struct emac_instance *dev)
-{
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- return sizeof(struct emac_ethtool_regs_subhdr) +
- EMAC4_ETHTOOL_REGS_SIZE(dev);
- else
- return sizeof(struct emac_ethtool_regs_subhdr) +
- EMAC_ETHTOOL_REGS_SIZE(dev);
-}
-
-static int emac_ethtool_get_regs_len(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- int size;
-
- size = sizeof(struct emac_ethtool_regs_hdr) +
- emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- size += zmii_get_regs_len(dev->zmii_dev);
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- size += rgmii_get_regs_len(dev->rgmii_dev);
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
- size += tah_get_regs_len(dev->tah_dev);
-
- return size;
-}
-
-static void *emac_dump_regs(struct emac_instance *dev, void *buf)
-{
- struct emac_ethtool_regs_subhdr *hdr = buf;
-
- hdr->index = dev->cell_index;
- if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
- hdr->version = EMAC4_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
- return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
- } else {
- hdr->version = EMAC_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
- return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
- }
-}
-
-static void emac_ethtool_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *buf)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- struct emac_ethtool_regs_hdr *hdr = buf;
-
- hdr->components = 0;
- buf = hdr + 1;
-
- buf = mal_dump_regs(dev->mal, buf);
- buf = emac_dump_regs(dev, buf);
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
- hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
- buf = zmii_dump_regs(dev->zmii_dev, buf);
- }
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
- hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
- buf = rgmii_dump_regs(dev->rgmii_dev, buf);
- }
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
- hdr->components |= EMAC_ETHTOOL_REGS_TAH;
- buf = tah_dump_regs(dev->tah_dev, buf);
- }
-}
-
-static int emac_ethtool_nway_reset(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- int res = 0;
-
- DBG(dev, "nway_reset" NL);
-
- if (dev->phy.address < 0)
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->link_lock);
- if (!dev->phy.autoneg) {
- res = -EINVAL;
- goto out;
- }
-
- dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
- out:
- mutex_unlock(&dev->link_lock);
- emac_force_link_update(dev);
- return res;
-}
-
-static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
-{
- if (stringset == ETH_SS_STATS)
- return EMAC_ETHTOOL_STATS_COUNT;
- else
- return -EINVAL;
-}
-
-static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
- u8 * buf)
-{
- if (stringset == ETH_SS_STATS)
- memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
-}
-
-static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *estats,
- u64 * tmp_stats)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
- tmp_stats += sizeof(dev->stats) / sizeof(u64);
- memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
-}
-
-static void emac_ethtool_get_drvinfo(struct net_device *ndev,
- struct ethtool_drvinfo *info)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- strcpy(info->driver, "ibm_emac");
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
- dev->cell_index, dev->ofdev->dev.of_node->full_name);
- info->regdump_len = emac_ethtool_get_regs_len(ndev);
-}
-
-static const struct ethtool_ops emac_ethtool_ops = {
- .get_settings = emac_ethtool_get_settings,
- .set_settings = emac_ethtool_set_settings,
- .get_drvinfo = emac_ethtool_get_drvinfo,
-
- .get_regs_len = emac_ethtool_get_regs_len,
- .get_regs = emac_ethtool_get_regs,
-
- .nway_reset = emac_ethtool_nway_reset,
-
- .get_ringparam = emac_ethtool_get_ringparam,
- .get_pauseparam = emac_ethtool_get_pauseparam,
-
- .get_strings = emac_ethtool_get_strings,
- .get_sset_count = emac_ethtool_get_sset_count,
- .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
-
- .get_link = ethtool_op_get_link,
-};
-
-static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct emac_instance *dev = netdev_priv(ndev);
- struct mii_ioctl_data *data = if_mii(rq);
-
- DBG(dev, "ioctl %08x" NL, cmd);
-
- if (dev->phy.address < 0)
- return -EOPNOTSUPP;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = dev->phy.address;
- /* Fall through */
- case SIOCGMIIREG:
- data->val_out = emac_mdio_read(ndev, dev->phy.address,
- data->reg_num);
- return 0;
-
- case SIOCSMIIREG:
- emac_mdio_write(ndev, dev->phy.address, data->reg_num,
- data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-struct emac_depentry {
- u32 phandle;
- struct device_node *node;
- struct platform_device *ofdev;
- void *drvdata;
-};
-
-#define EMAC_DEP_MAL_IDX 0
-#define EMAC_DEP_ZMII_IDX 1
-#define EMAC_DEP_RGMII_IDX 2
-#define EMAC_DEP_TAH_IDX 3
-#define EMAC_DEP_MDIO_IDX 4
-#define EMAC_DEP_PREV_IDX 5
-#define EMAC_DEP_COUNT 6
-
-static int __devinit emac_check_deps(struct emac_instance *dev,
- struct emac_depentry *deps)
-{
- int i, there = 0;
- struct device_node *np;
-
- for (i = 0; i < EMAC_DEP_COUNT; i++) {
- /* no dependency on that item, allright */
- if (deps[i].phandle == 0) {
- there++;
- continue;
- }
- /* special case for blist as the dependency might go away */
- if (i == EMAC_DEP_PREV_IDX) {
- np = *(dev->blist - 1);
- if (np == NULL) {
- deps[i].phandle = 0;
- there++;
- continue;
- }
- if (deps[i].node == NULL)
- deps[i].node = of_node_get(np);
- }
- if (deps[i].node == NULL)
- deps[i].node = of_find_node_by_phandle(deps[i].phandle);
- if (deps[i].node == NULL)
- continue;
- if (deps[i].ofdev == NULL)
- deps[i].ofdev = of_find_device_by_node(deps[i].node);
- if (deps[i].ofdev == NULL)
- continue;
- if (deps[i].drvdata == NULL)
- deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
- if (deps[i].drvdata != NULL)
- there++;
- }
- return there == EMAC_DEP_COUNT;
-}
-
-static void emac_put_deps(struct emac_instance *dev)
-{
- if (dev->mal_dev)
- of_dev_put(dev->mal_dev);
- if (dev->zmii_dev)
- of_dev_put(dev->zmii_dev);
- if (dev->rgmii_dev)
- of_dev_put(dev->rgmii_dev);
- if (dev->mdio_dev)
- of_dev_put(dev->mdio_dev);
- if (dev->tah_dev)
- of_dev_put(dev->tah_dev);
-}
-
-static int __devinit emac_of_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- /* We are only intereted in device addition */
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- wake_up_all(&emac_probe_wait);
- return 0;
-}
-
-static struct notifier_block emac_of_bus_notifier __devinitdata = {
- .notifier_call = emac_of_bus_notify
-};
-
-static int __devinit emac_wait_deps(struct emac_instance *dev)
-{
- struct emac_depentry deps[EMAC_DEP_COUNT];
- int i, err;
-
- memset(&deps, 0, sizeof(deps));
-
- deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
- deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
- deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
- if (dev->tah_ph)
- deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
- if (dev->mdio_ph)
- deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
- if (dev->blist && dev->blist > emac_boot_list)
- deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
- bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
- wait_event_timeout(emac_probe_wait,
- emac_check_deps(dev, deps),
- EMAC_PROBE_DEP_TIMEOUT);
- bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
- err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
- for (i = 0; i < EMAC_DEP_COUNT; i++) {
- if (deps[i].node)
- of_node_put(deps[i].node);
- if (err && deps[i].ofdev)
- of_dev_put(deps[i].ofdev);
- }
- if (err == 0) {
- dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
- dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
- dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
- dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
- dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
- }
- if (deps[EMAC_DEP_PREV_IDX].ofdev)
- of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
- return err;
-}
-
-static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
- u32 *val, int fatal)
-{
- int len;
- const u32 *prop = of_get_property(np, name, &len);
- if (prop == NULL || len < sizeof(u32)) {
- if (fatal)
- printk(KERN_ERR "%s: missing %s property\n",
- np->full_name, name);
- return -ENODEV;
- }
- *val = *prop;
- return 0;
-}
-
-static int __devinit emac_init_phy(struct emac_instance *dev)
-{
- struct device_node *np = dev->ofdev->dev.of_node;
- struct net_device *ndev = dev->ndev;
- u32 phy_map, adv;
- int i;
-
- dev->phy.dev = ndev;
- dev->phy.mode = dev->phy_mode;
-
- /* PHY-less configuration.
- * XXX I probably should move these settings to the dev tree
- */
- if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
- emac_reset(dev);
-
- /* PHY-less configuration.
- * XXX I probably should move these settings to the dev tree
- */
- dev->phy.address = -1;
- dev->phy.features = SUPPORTED_MII;
- if (emac_phy_supports_gige(dev->phy_mode))
- dev->phy.features |= SUPPORTED_1000baseT_Full;
- else
- dev->phy.features |= SUPPORTED_100baseT_Full;
- dev->phy.pause = 1;
-
- return 0;
- }
-
- mutex_lock(&emac_phy_map_lock);
- phy_map = dev->phy_map | busy_phy_map;
-
- DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
-
- dev->phy.mdio_read = emac_mdio_read;
- dev->phy.mdio_write = emac_mdio_write;
-
- /* Enable internal clock source */
-#ifdef CONFIG_PPC_DCR_NATIVE
- if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
-#endif
- /* PHY clock workaround */
- emac_rx_clk_tx(dev);
-
- /* Enable internal clock source on 440GX*/
-#ifdef CONFIG_PPC_DCR_NATIVE
- if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
-#endif
- /* Configure EMAC with defaults so we can at least use MDIO
- * This is needed mostly for 440GX
- */
- if (emac_phy_gpcs(dev->phy.mode)) {
- /* XXX
- * Make GPCS PHY address equal to EMAC index.
- * We probably should take into account busy_phy_map
- * and/or phy_map here.
- *
- * Note that the busy_phy_map is currently global
- * while it should probably be per-ASIC...
- */
- dev->phy.gpcs_address = dev->gpcs_address;
- if (dev->phy.gpcs_address == 0xffffffff)
- dev->phy.address = dev->cell_index;
- }
-
- emac_configure(dev);
-
- if (dev->phy_address != 0xffffffff)
- phy_map = ~(1 << dev->phy_address);
-
- for (i = 0; i < 0x20; phy_map >>= 1, ++i)
- if (!(phy_map & 1)) {
- int r;
- busy_phy_map |= 1 << i;
-
- /* Quick check if there is a PHY at the address */
- r = emac_mdio_read(dev->ndev, i, MII_BMCR);
- if (r == 0xffff || r < 0)
- continue;
- if (!emac_mii_phy_probe(&dev->phy, i))
- break;
- }
-
- /* Enable external clock source */
-#ifdef CONFIG_PPC_DCR_NATIVE
- if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
- dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
-#endif
- mutex_unlock(&emac_phy_map_lock);
- if (i == 0x20) {
- printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
- return -ENXIO;
- }
-
- /* Init PHY */
- if (dev->phy.def->ops->init)
- dev->phy.def->ops->init(&dev->phy);
-
- /* Disable any PHY features not supported by the platform */
- dev->phy.def->features &= ~dev->phy_feat_exc;
-
- /* Setup initial link parameters */
- if (dev->phy.features & SUPPORTED_Autoneg) {
- adv = dev->phy.features;
- if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
- adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- /* Restart autonegotiation */
- dev->phy.def->ops->setup_aneg(&dev->phy, adv);
- } else {
- u32 f = dev->phy.def->features;
- int speed = SPEED_10, fd = DUPLEX_HALF;
-
- /* Select highest supported speed/duplex */
- if (f & SUPPORTED_1000baseT_Full) {
- speed = SPEED_1000;
- fd = DUPLEX_FULL;
- } else if (f & SUPPORTED_1000baseT_Half)
- speed = SPEED_1000;
- else if (f & SUPPORTED_100baseT_Full) {
- speed = SPEED_100;
- fd = DUPLEX_FULL;
- } else if (f & SUPPORTED_100baseT_Half)
- speed = SPEED_100;
- else if (f & SUPPORTED_10baseT_Full)
- fd = DUPLEX_FULL;
-
- /* Force link parameters */
- dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
- }
- return 0;
-}
-
-static int __devinit emac_init_config(struct emac_instance *dev)
-{
- struct device_node *np = dev->ofdev->dev.of_node;
- const void *p;
-
- /* Read config from device-tree */
- if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
- return -ENXIO;
- if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
- return -ENXIO;
- if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
- return -ENXIO;
- if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
- return -ENXIO;
- if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
- dev->max_mtu = 1500;
- if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
- dev->rx_fifo_size = 2048;
- if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
- dev->tx_fifo_size = 2048;
- if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
- dev->rx_fifo_size_gige = dev->rx_fifo_size;
- if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
- dev->tx_fifo_size_gige = dev->tx_fifo_size;
- if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
- dev->phy_address = 0xffffffff;
- if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
- dev->phy_map = 0xffffffff;
- if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
- dev->gpcs_address = 0xffffffff;
- if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
- return -ENXIO;
- if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
- dev->tah_ph = 0;
- if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
- dev->tah_port = 0;
- if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
- dev->mdio_ph = 0;
- if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
- dev->zmii_ph = 0;
- if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
- dev->zmii_port = 0xffffffff;
- if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
- dev->rgmii_ph = 0;
- if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
- dev->rgmii_port = 0xffffffff;
- if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
- dev->fifo_entry_size = 16;
- if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
- dev->mal_burst_size = 256;
-
- /* PHY mode needs some decoding */
- dev->phy_mode = of_get_phy_mode(np);
- if (dev->phy_mode < 0)
- dev->phy_mode = PHY_MODE_NA;
-
- /* Check EMAC version */
- if (of_device_is_compatible(np, "ibm,emac4sync")) {
- dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
- if (of_device_is_compatible(np, "ibm,emac-460ex") ||
- of_device_is_compatible(np, "ibm,emac-460gt"))
- dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
- if (of_device_is_compatible(np, "ibm,emac-405ex") ||
- of_device_is_compatible(np, "ibm,emac-405exr"))
- dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
- } else if (of_device_is_compatible(np, "ibm,emac4")) {
- dev->features |= EMAC_FTR_EMAC4;
- if (of_device_is_compatible(np, "ibm,emac-440gx"))
- dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
- } else {
- if (of_device_is_compatible(np, "ibm,emac-440ep") ||
- of_device_is_compatible(np, "ibm,emac-440gr"))
- dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
- if (of_device_is_compatible(np, "ibm,emac-405ez")) {
-#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
- dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
-#else
- printk(KERN_ERR "%s: Flow control not disabled!\n",
- np->full_name);
- return -ENXIO;
-#endif
- }
-
- }
-
- /* Fixup some feature bits based on the device tree */
- if (of_get_property(np, "has-inverted-stacr-oc", NULL))
- dev->features |= EMAC_FTR_STACR_OC_INVERT;
- if (of_get_property(np, "has-new-stacr-staopc", NULL))
- dev->features |= EMAC_FTR_HAS_NEW_STACR;
-
- /* CAB lacks the appropriate properties */
- if (of_device_is_compatible(np, "ibm,emac-axon"))
- dev->features |= EMAC_FTR_HAS_NEW_STACR |
- EMAC_FTR_STACR_OC_INVERT;
-
- /* Enable TAH/ZMII/RGMII features as found */
- if (dev->tah_ph != 0) {
-#ifdef CONFIG_IBM_NEW_EMAC_TAH
- dev->features |= EMAC_FTR_HAS_TAH;
-#else
- printk(KERN_ERR "%s: TAH support not enabled !\n",
- np->full_name);
- return -ENXIO;
-#endif
- }
-
- if (dev->zmii_ph != 0) {
-#ifdef CONFIG_IBM_NEW_EMAC_ZMII
- dev->features |= EMAC_FTR_HAS_ZMII;
-#else
- printk(KERN_ERR "%s: ZMII support not enabled !\n",
- np->full_name);
- return -ENXIO;
-#endif
- }
-
- if (dev->rgmii_ph != 0) {
-#ifdef CONFIG_IBM_NEW_EMAC_RGMII
- dev->features |= EMAC_FTR_HAS_RGMII;
-#else
- printk(KERN_ERR "%s: RGMII support not enabled !\n",
- np->full_name);
- return -ENXIO;
-#endif
- }
-
- /* Read MAC-address */
- p = of_get_property(np, "local-mac-address", NULL);
- if (p == NULL) {
- printk(KERN_ERR "%s: Can't find local-mac-address property\n",
- np->full_name);
- return -ENXIO;
- }
- memcpy(dev->ndev->dev_addr, p, 6);
-
- /* IAHT and GAHT filter parameterization */
- if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
- dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
- dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
- } else {
- dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
- dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
- }
-
- DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
- DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
- DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
- DBG(dev, "max_mtu : %d\n", dev->max_mtu);
- DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
-
- return 0;
-}
-
-static const struct net_device_ops emac_netdev_ops = {
- .ndo_open = emac_open,
- .ndo_stop = emac_close,
- .ndo_get_stats = emac_stats,
- .ndo_set_multicast_list = emac_set_multicast_list,
- .ndo_do_ioctl = emac_ioctl,
- .ndo_tx_timeout = emac_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_start_xmit = emac_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-static const struct net_device_ops emac_gige_netdev_ops = {
- .ndo_open = emac_open,
- .ndo_stop = emac_close,
- .ndo_get_stats = emac_stats,
- .ndo_set_multicast_list = emac_set_multicast_list,
- .ndo_do_ioctl = emac_ioctl,
- .ndo_tx_timeout = emac_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_start_xmit = emac_start_xmit_sg,
- .ndo_change_mtu = emac_change_mtu,
-};
-
-static int __devinit emac_probe(struct platform_device *ofdev)
-{
- struct net_device *ndev;
- struct emac_instance *dev;
- struct device_node *np = ofdev->dev.of_node;
- struct device_node **blist = NULL;
- int err, i;
-
- /* Skip unused/unwired EMACS. We leave the check for an unused
- * property here for now, but new flat device trees should set a
- * status property to "disabled" instead.
- */
- if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
- return -ENODEV;
-
- /* Find ourselves in the bootlist if we are there */
- for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
- if (emac_boot_list[i] == np)
- blist = &emac_boot_list[i];
-
- /* Allocate our net_device structure */
- err = -ENOMEM;
- ndev = alloc_etherdev(sizeof(struct emac_instance));
- if (!ndev) {
- printk(KERN_ERR "%s: could not allocate ethernet device!\n",
- np->full_name);
- goto err_gone;
- }
- dev = netdev_priv(ndev);
- dev->ndev = ndev;
- dev->ofdev = ofdev;
- dev->blist = blist;
- SET_NETDEV_DEV(ndev, &ofdev->dev);
-
- /* Initialize some embedded data structures */
- mutex_init(&dev->mdio_lock);
- mutex_init(&dev->link_lock);
- spin_lock_init(&dev->lock);
- INIT_WORK(&dev->reset_work, emac_reset_work);
-
- /* Init various config data based on device-tree */
- err = emac_init_config(dev);
- if (err != 0)
- goto err_free;
-
- /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
- dev->emac_irq = irq_of_parse_and_map(np, 0);
- dev->wol_irq = irq_of_parse_and_map(np, 1);
- if (dev->emac_irq == NO_IRQ) {
- printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
- goto err_free;
- }
- ndev->irq = dev->emac_irq;
-
- /* Map EMAC regs */
- if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
- printk(KERN_ERR "%s: Can't get registers address\n",
- np->full_name);
- goto err_irq_unmap;
- }
- // TODO : request_mem_region
- dev->emacp = ioremap(dev->rsrc_regs.start,
- resource_size(&dev->rsrc_regs));
- if (dev->emacp == NULL) {
- printk(KERN_ERR "%s: Can't map device registers!\n",
- np->full_name);
- err = -ENOMEM;
- goto err_irq_unmap;
- }
-
- /* Wait for dependent devices */
- err = emac_wait_deps(dev);
- if (err) {
- printk(KERN_ERR
- "%s: Timeout waiting for dependent devices\n",
- np->full_name);
- /* display more info about what's missing ? */
- goto err_reg_unmap;
- }
- dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
- if (dev->mdio_dev != NULL)
- dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
-
- /* Register with MAL */
- dev->commac.ops = &emac_commac_ops;
- dev->commac.dev = dev;
- dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
- dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
- err = mal_register_commac(dev->mal, &dev->commac);
- if (err) {
- printk(KERN_ERR "%s: failed to register with mal %s!\n",
- np->full_name, dev->mal_dev->dev.of_node->full_name);
- goto err_rel_deps;
- }
- dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
- dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
-
- /* Get pointers to BD rings */
- dev->tx_desc =
- dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
- dev->rx_desc =
- dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
-
- DBG(dev, "tx_desc %p" NL, dev->tx_desc);
- DBG(dev, "rx_desc %p" NL, dev->rx_desc);
-
- /* Clean rings */
- memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
- memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
- memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
- memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
-
- /* Attach to ZMII, if needed */
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
- (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
- goto err_unreg_commac;
-
- /* Attach to RGMII, if needed */
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
- (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
- goto err_detach_zmii;
-
- /* Attach to TAH, if needed */
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
- (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
- goto err_detach_rgmii;
-
- /* Set some link defaults before we can find out real parameters */
- dev->phy.speed = SPEED_100;
- dev->phy.duplex = DUPLEX_FULL;
- dev->phy.autoneg = AUTONEG_DISABLE;
- dev->phy.pause = dev->phy.asym_pause = 0;
- dev->stop_timeout = STOP_TIMEOUT_100;
- INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
-
- /* Find PHY if any */
- err = emac_init_phy(dev);
- if (err != 0)
- goto err_detach_tah;
-
- if (dev->tah_dev) {
- ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
- ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
- }
- ndev->watchdog_timeo = 5 * HZ;
- if (emac_phy_supports_gige(dev->phy_mode)) {
- ndev->netdev_ops = &emac_gige_netdev_ops;
- dev->commac.ops = &emac_commac_sg_ops;
- } else
- ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
-
- netif_carrier_off(ndev);
-
- err = register_netdev(ndev);
- if (err) {
- printk(KERN_ERR "%s: failed to register net device (%d)!\n",
- np->full_name, err);
- goto err_detach_tah;
- }
-
- /* Set our drvdata last as we don't want them visible until we are
- * fully initialized
- */
- wmb();
- dev_set_drvdata(&ofdev->dev, dev);
-
- /* There's a new kid in town ! Let's tell everybody */
- wake_up_all(&emac_probe_wait);
-
-
- printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
- ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
-
- if (dev->phy_mode == PHY_MODE_SGMII)
- printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
-
- if (dev->phy.address >= 0)
- printk("%s: found %s PHY (0x%02x)\n", ndev->name,
- dev->phy.def->name, dev->phy.address);
-
- emac_dbg_register(dev);
-
- /* Life is good */
- return 0;
-
- /* I have a bad feeling about this ... */
-
- err_detach_tah:
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
- tah_detach(dev->tah_dev, dev->tah_port);
- err_detach_rgmii:
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
- err_detach_zmii:
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_detach(dev->zmii_dev, dev->zmii_port);
- err_unreg_commac:
- mal_unregister_commac(dev->mal, &dev->commac);
- err_rel_deps:
- emac_put_deps(dev);
- err_reg_unmap:
- iounmap(dev->emacp);
- err_irq_unmap:
- if (dev->wol_irq != NO_IRQ)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
- irq_dispose_mapping(dev->emac_irq);
- err_free:
- free_netdev(ndev);
- err_gone:
- /* if we were on the bootlist, remove us as we won't show up and
- * wake up all waiters to notify them in case they were waiting
- * on us
- */
- if (blist) {
- *blist = NULL;
- wake_up_all(&emac_probe_wait);
- }
- return err;
-}
-
-static int __devexit emac_remove(struct platform_device *ofdev)
-{
- struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
-
- DBG(dev, "remove" NL);
-
- dev_set_drvdata(&ofdev->dev, NULL);
-
- unregister_netdev(dev->ndev);
-
- cancel_work_sync(&dev->reset_work);
-
- if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
- tah_detach(dev->tah_dev, dev->tah_port);
- if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
- rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
- if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
- zmii_detach(dev->zmii_dev, dev->zmii_port);
-
- mal_unregister_commac(dev->mal, &dev->commac);
- emac_put_deps(dev);
-
- emac_dbg_unregister(dev);
- iounmap(dev->emacp);
-
- if (dev->wol_irq != NO_IRQ)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
- irq_dispose_mapping(dev->emac_irq);
-
- free_netdev(dev->ndev);
-
- return 0;
-}
-
-/* XXX Features in here should be replaced by properties... */
-static struct of_device_id emac_match[] =
-{
- {
- .type = "network",
- .compatible = "ibm,emac",
- },
- {
- .type = "network",
- .compatible = "ibm,emac4",
- },
- {
- .type = "network",
- .compatible = "ibm,emac4sync",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, emac_match);
-
-static struct platform_driver emac_driver = {
- .driver = {
- .name = "emac",
- .owner = THIS_MODULE,
- .of_match_table = emac_match,
- },
- .probe = emac_probe,
- .remove = emac_remove,
-};
-
-static void __init emac_make_bootlist(void)
-{
- struct device_node *np = NULL;
- int j, max, i = 0, k;
- int cell_indices[EMAC_BOOT_LIST_SIZE];
-
- /* Collect EMACs */
- while((np = of_find_all_nodes(np)) != NULL) {
- const u32 *idx;
-
- if (of_match_node(emac_match, np) == NULL)
- continue;
- if (of_get_property(np, "unused", NULL))
- continue;
- idx = of_get_property(np, "cell-index", NULL);
- if (idx == NULL)
- continue;
- cell_indices[i] = *idx;
- emac_boot_list[i++] = of_node_get(np);
- if (i >= EMAC_BOOT_LIST_SIZE) {
- of_node_put(np);
- break;
- }
- }
- max = i;
-
- /* Bubble sort them (doh, what a creative algorithm :-) */
- for (i = 0; max > 1 && (i < (max - 1)); i++)
- for (j = i; j < max; j++) {
- if (cell_indices[i] > cell_indices[j]) {
- np = emac_boot_list[i];
- emac_boot_list[i] = emac_boot_list[j];
- emac_boot_list[j] = np;
- k = cell_indices[i];
- cell_indices[i] = cell_indices[j];
- cell_indices[j] = k;
- }
- }
-}
-
-static int __init emac_init(void)
-{
- int rc;
-
- printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
-
- /* Init debug stuff */
- emac_init_debug();
-
- /* Build EMAC boot list */
- emac_make_bootlist();
-
- /* Init submodules */
- rc = mal_init();
- if (rc)
- goto err;
- rc = zmii_init();
- if (rc)
- goto err_mal;
- rc = rgmii_init();
- if (rc)
- goto err_zmii;
- rc = tah_init();
- if (rc)
- goto err_rgmii;
- rc = platform_driver_register(&emac_driver);
- if (rc)
- goto err_tah;
-
- return 0;
-
- err_tah:
- tah_exit();
- err_rgmii:
- rgmii_exit();
- err_zmii:
- zmii_exit();
- err_mal:
- mal_exit();
- err:
- return rc;
-}
-
-static void __exit emac_exit(void)
-{
- int i;
-
- platform_driver_unregister(&emac_driver);
-
- tah_exit();
- rgmii_exit();
- zmii_exit();
- mal_exit();
- emac_fini_debug();
-
- /* Destroy EMAC boot list */
- for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
- if (emac_boot_list[i])
- of_node_put(emac_boot_list[i]);
-}
-
-module_init(emac_init);
-module_exit(emac_exit);
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
deleted file mode 100644
index 4fec0844d59..00000000000
--- a/drivers/net/ibm_newemac/core.h
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * drivers/net/ibm_newemac/core.h
- *
- * Driver for PowerPC 4xx on-chip ethernet controller.
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * Based on the arch/ppc version of the driver:
- *
- * Copyright (c) 2004, 2005 Zultys Technologies.
- * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
- *
- * Based on original work by
- * Armin Kuster <akuster@mvista.com>
- * Johnnie Peters <jpeters@mvista.com>
- * Copyright 2000, 2001 MontaVista Softare Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#ifndef __IBM_NEWEMAC_CORE_H
-#define __IBM_NEWEMAC_CORE_H
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/of_platform.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/dcr.h>
-
-#include "emac.h"
-#include "phy.h"
-#include "zmii.h"
-#include "rgmii.h"
-#include "mal.h"
-#include "tah.h"
-#include "debug.h"
-
-#define NUM_TX_BUFF CONFIG_IBM_NEW_EMAC_TXB
-#define NUM_RX_BUFF CONFIG_IBM_NEW_EMAC_RXB
-
-/* Simple sanity check */
-#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
-#error Invalid number of buffer descriptors (greater than 256)
-#endif
-
-#define EMAC_MIN_MTU 46
-
-/* Maximum L2 header length (VLAN tagged, no FCS) */
-#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
-
-/* RX BD size for the given MTU */
-static inline int emac_rx_size(int mtu)
-{
- if (mtu > ETH_DATA_LEN)
- return MAL_MAX_RX_SIZE;
- else
- return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
-}
-
-#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
-
-#define EMAC_RX_SKB_HEADROOM \
- EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM)
-
-/* Size of RX skb for the given MTU */
-static inline int emac_rx_skb_size(int mtu)
-{
- int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
- return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
-}
-
-/* RX DMA sync size */
-static inline int emac_rx_sync_size(int mtu)
-{
- return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
-}
-
-/* Driver statistcs is split into two parts to make it more cache friendly:
- * - normal statistics (packet count, etc)
- * - error statistics
- *
- * When statistics is requested by ethtool, these parts are concatenated,
- * normal one goes first.
- *
- * Please, keep these structures in sync with emac_stats_keys.
- */
-
-/* Normal TX/RX Statistics */
-struct emac_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_packets_csum;
- u64 tx_packets_csum;
-};
-
-/* Error statistics */
-struct emac_error_stats {
- u64 tx_undo;
-
- /* Software RX Errors */
- u64 rx_dropped_stack;
- u64 rx_dropped_oom;
- u64 rx_dropped_error;
- u64 rx_dropped_resize;
- u64 rx_dropped_mtu;
- u64 rx_stopped;
- /* BD reported RX errors */
- u64 rx_bd_errors;
- u64 rx_bd_overrun;
- u64 rx_bd_bad_packet;
- u64 rx_bd_runt_packet;
- u64 rx_bd_short_event;
- u64 rx_bd_alignment_error;
- u64 rx_bd_bad_fcs;
- u64 rx_bd_packet_too_long;
- u64 rx_bd_out_of_range;
- u64 rx_bd_in_range;
- /* EMAC IRQ reported RX errors */
- u64 rx_parity;
- u64 rx_fifo_overrun;
- u64 rx_overrun;
- u64 rx_bad_packet;
- u64 rx_runt_packet;
- u64 rx_short_event;
- u64 rx_alignment_error;
- u64 rx_bad_fcs;
- u64 rx_packet_too_long;
- u64 rx_out_of_range;
- u64 rx_in_range;
-
- /* Software TX Errors */
- u64 tx_dropped;
- /* BD reported TX errors */
- u64 tx_bd_errors;
- u64 tx_bd_bad_fcs;
- u64 tx_bd_carrier_loss;
- u64 tx_bd_excessive_deferral;
- u64 tx_bd_excessive_collisions;
- u64 tx_bd_late_collision;
- u64 tx_bd_multple_collisions;
- u64 tx_bd_single_collision;
- u64 tx_bd_underrun;
- u64 tx_bd_sqe;
- /* EMAC IRQ reported TX errors */
- u64 tx_parity;
- u64 tx_underrun;
- u64 tx_sqe;
- u64 tx_errors;
-};
-
-#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
- sizeof(struct emac_error_stats)) \
- / sizeof(u64))
-
-struct emac_instance {
- struct net_device *ndev;
- struct resource rsrc_regs;
- struct emac_regs __iomem *emacp;
- struct platform_device *ofdev;
- struct device_node **blist; /* bootlist entry */
-
- /* MAL linkage */
- u32 mal_ph;
- struct platform_device *mal_dev;
- u32 mal_rx_chan;
- u32 mal_tx_chan;
- struct mal_instance *mal;
- struct mal_commac commac;
-
- /* PHY infos */
- u32 phy_mode;
- u32 phy_map;
- u32 phy_address;
- u32 phy_feat_exc;
- struct mii_phy phy;
- struct mutex link_lock;
- struct delayed_work link_work;
- int link_polling;
-
- /* GPCS PHY infos */
- u32 gpcs_address;
-
- /* Shared MDIO if any */
- u32 mdio_ph;
- struct platform_device *mdio_dev;
- struct emac_instance *mdio_instance;
- struct mutex mdio_lock;
-
- /* ZMII infos if any */
- u32 zmii_ph;
- u32 zmii_port;
- struct platform_device *zmii_dev;
-
- /* RGMII infos if any */
- u32 rgmii_ph;
- u32 rgmii_port;
- struct platform_device *rgmii_dev;
-
- /* TAH infos if any */
- u32 tah_ph;
- u32 tah_port;
- struct platform_device *tah_dev;
-
- /* IRQs */
- int wol_irq;
- int emac_irq;
-
- /* OPB bus frequency in Mhz */
- u32 opb_bus_freq;
-
- /* Cell index within an ASIC (for clk mgmnt) */
- u32 cell_index;
-
- /* Max supported MTU */
- u32 max_mtu;
-
- /* Feature bits (from probe table) */
- unsigned int features;
-
- /* Tx and Rx fifo sizes & other infos in bytes */
- u32 tx_fifo_size;
- u32 tx_fifo_size_gige;
- u32 rx_fifo_size;
- u32 rx_fifo_size_gige;
- u32 fifo_entry_size;
- u32 mal_burst_size; /* move to MAL ? */
-
- /* IAHT and GAHT filter parameterization */
- u32 xaht_slots_shift;
- u32 xaht_width_shift;
-
- /* Descriptor management
- */
- struct mal_descriptor *tx_desc;
- int tx_cnt;
- int tx_slot;
- int ack_slot;
-
- struct mal_descriptor *rx_desc;
- int rx_slot;
- struct sk_buff *rx_sg_skb; /* 1 */
- int rx_skb_size;
- int rx_sync_size;
-
- struct sk_buff *tx_skb[NUM_TX_BUFF];
- struct sk_buff *rx_skb[NUM_RX_BUFF];
-
- /* Stats
- */
- struct emac_error_stats estats;
- struct net_device_stats nstats;
- struct emac_stats stats;
-
- /* Misc
- */
- int reset_failed;
- int stop_timeout; /* in us */
- int no_mcast;
- int mcast_pending;
- int opened;
- struct work_struct reset_work;
- spinlock_t lock;
-};
-
-/*
- * Features of various EMAC implementations
- */
-
-/*
- * No flow control on 40x according to the original driver
- */
-#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001
-/*
- * Cell is an EMAC4
- */
-#define EMAC_FTR_EMAC4 0x00000002
-/*
- * For the 440SPe, AMCC inexplicably changed the polarity of
- * the "operation complete" bit in the MII control register.
- */
-#define EMAC_FTR_STACR_OC_INVERT 0x00000004
-/*
- * Set if we have a TAH.
- */
-#define EMAC_FTR_HAS_TAH 0x00000008
-/*
- * Set if we have a ZMII.
- */
-#define EMAC_FTR_HAS_ZMII 0x00000010
-/*
- * Set if we have a RGMII.
- */
-#define EMAC_FTR_HAS_RGMII 0x00000020
-/*
- * Set if we have new type STACR with STAOPC
- */
-#define EMAC_FTR_HAS_NEW_STACR 0x00000040
-/*
- * Set if we need phy clock workaround for 440gx
- */
-#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080
-/*
- * Set if we need phy clock workaround for 440ep or 440gr
- */
-#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
-/*
- * The 405EX and 460EX contain the EMAC4SYNC core
- */
-#define EMAC_FTR_EMAC4SYNC 0x00000200
-/*
- * Set if we need phy clock workaround for 460ex or 460gt
- */
-#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400
-
-
-/* Right now, we don't quite handle the always/possible masks on the
- * most optimal way as we don't have a way to say something like
- * always EMAC4. Patches welcome.
- */
-enum {
- EMAC_FTRS_ALWAYS = 0,
-
- EMAC_FTRS_POSSIBLE =
-#ifdef CONFIG_IBM_NEW_EMAC_EMAC4
- EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC |
- EMAC_FTR_HAS_NEW_STACR |
- EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
-#endif
-#ifdef CONFIG_IBM_NEW_EMAC_TAH
- EMAC_FTR_HAS_TAH |
-#endif
-#ifdef CONFIG_IBM_NEW_EMAC_ZMII
- EMAC_FTR_HAS_ZMII |
-#endif
-#ifdef CONFIG_IBM_NEW_EMAC_RGMII
- EMAC_FTR_HAS_RGMII |
-#endif
-#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
- EMAC_FTR_NO_FLOW_CONTROL_40x |
-#endif
- EMAC_FTR_460EX_PHY_CLK_FIX |
- EMAC_FTR_440EP_PHY_CLK_FIX,
-};
-
-static inline int emac_has_feature(struct emac_instance *dev,
- unsigned long feature)
-{
- return (EMAC_FTRS_ALWAYS & feature) ||
- (EMAC_FTRS_POSSIBLE & dev->features & feature);
-}
-
-/*
- * Various instances of the EMAC core have varying 1) number of
- * address match slots, 2) width of the registers for handling address
- * match slots, 3) number of registers for handling address match
- * slots and 4) base offset for those registers.
- *
- * These macros and inlines handle these differences based on
- * parameters supplied by the device structure which are, in turn,
- * initialized based on the "compatible" entry in the device tree.
- */
-
-#define EMAC4_XAHT_SLOTS_SHIFT 6
-#define EMAC4_XAHT_WIDTH_SHIFT 4
-
-#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
-#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
-
-#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
-#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
-#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
- (dev)->xaht_width_shift))
-
-#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \
- ((EMAC_XAHT_SLOTS(dev) - 1) - \
- ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \
- (dev)->xaht_slots_shift)))
-
-#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \
- ((slot) >> (dev)->xaht_width_shift)
-
-#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \
- ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
- ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
-
-static inline u32 *emac_xaht_base(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- int offset;
-
- /* The first IAHT entry always is the base of the block of
- * IAHT and GAHT registers.
- */
- if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC))
- offset = offsetof(struct emac_regs, u1.emac4sync.iaht1);
- else
- offset = offsetof(struct emac_regs, u0.emac4.iaht1);
-
- return (u32 *)((ptrdiff_t)p + offset);
-}
-
-static inline u32 *emac_gaht_base(struct emac_instance *dev)
-{
- /* GAHT registers always come after an identical number of
- * IAHT registers.
- */
- return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
-}
-
-static inline u32 *emac_iaht_base(struct emac_instance *dev)
-{
- /* IAHT registers always come before an identical number of
- * GAHT registers.
- */
- return emac_xaht_base(dev);
-}
-
-/* Ethtool get_regs complex data.
- * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
- * when available.
- *
- * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
- * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
- * Each register component is preceded with emac_ethtool_regs_subhdr.
- * Order of the optional headers follows their relative bit posititions
- * in emac_ethtool_regs_hdr.components
- */
-#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
-#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
-#define EMAC_ETHTOOL_REGS_TAH 0x00000004
-
-struct emac_ethtool_regs_hdr {
- u32 components;
-};
-
-struct emac_ethtool_regs_subhdr {
- u32 version;
- u32 index;
-};
-
-#define EMAC_ETHTOOL_REGS_VER 0
-#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
- (dev)->rsrc_regs.start + 1)
-#define EMAC4_ETHTOOL_REGS_VER 1
-#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
- (dev)->rsrc_regs.start + 1)
-
-#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h
deleted file mode 100644
index e596c77ccdf..00000000000
--- a/drivers/net/ibm_newemac/debug.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * drivers/net/ibm_newemac/debug.h
- *
- * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * Based on the arch/ppc version of the driver:
- *
- * Copyright (c) 2004, 2005 Zultys Technologies
- * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#ifndef __IBM_NEWEMAC_DEBUG_H
-#define __IBM_NEWEMAC_DEBUG_H
-
-#include <linux/init.h>
-
-#include "core.h"
-
-#if defined(CONFIG_IBM_NEW_EMAC_DEBUG)
-
-struct emac_instance;
-struct mal_instance;
-
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
-
-# define DBG_LEVEL 1
-
-#else
-
-# define emac_dbg_register(x) do { } while(0)
-# define emac_dbg_unregister(x) do { } while(0)
-# define mal_dbg_register(x) do { } while(0)
-# define mal_dbg_unregister(x) do { } while(0)
-# define emac_init_debug() do { } while(0)
-# define emac_fini_debug() do { } while(0)
-# define emac_dbg_dump_all() do { } while(0)
-
-# define DBG_LEVEL 0
-
-#endif
-
-#define EMAC_DBG(d, name, fmt, arg...) \
- printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
-
-#if DBG_LEVEL > 0
-# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
-# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x)
-# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x)
-# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x)
-# define NL "\n"
-#else
-# define DBG(f,x...) ((void)0)
-# define MAL_DBG(d,f,x...) ((void)0)
-# define ZMII_DBG(d,f,x...) ((void)0)
-# define RGMII_DBG(d,f,x...) ((void)0)
-#endif
-#if DBG_LEVEL > 1
-# define DBG2(d,f,x...) DBG(d,f, ##x)
-# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x)
-# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x)
-# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x)
-#else
-# define DBG2(f,x...) ((void)0)
-# define MAL_DBG2(d,f,x...) ((void)0)
-# define ZMII_DBG2(d,f,x...) ((void)0)
-# define RGMII_DBG2(d,f,x...) ((void)0)
-#endif
-
-#endif /* __IBM_NEWEMAC_DEBUG_H */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
deleted file mode 100644
index 265e151b66c..00000000000
--- a/drivers/net/igb/igb.h
+++ /dev/null
@@ -1,415 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-/* Linux PRO/1000 Ethernet Driver main header file */
-
-#ifndef _IGB_H_
-#define _IGB_H_
-
-#include "e1000_mac.h"
-#include "e1000_82575.h"
-
-#include <linux/clocksource.h>
-#include <linux/timecompare.h>
-#include <linux/net_tstamp.h>
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-
-struct igb_adapter;
-
-/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
-#define IGB_START_ITR 648
-
-/* TX/RX descriptor defines */
-#define IGB_DEFAULT_TXD 256
-#define IGB_MIN_TXD 80
-#define IGB_MAX_TXD 4096
-
-#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
-#define IGB_MAX_RXD 4096
-
-#define IGB_DEFAULT_ITR 3 /* dynamic */
-#define IGB_MAX_ITR_USECS 10000
-#define IGB_MIN_ITR_USECS 10
-#define NON_Q_VECTORS 1
-#define MAX_Q_VECTORS 8
-
-/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
- (hw->mac.type > e1000_82575 ? 8 : 4))
-#define IGB_ABS_MAX_TX_QUEUES 8
-#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
-
-#define IGB_MAX_VF_MC_ENTRIES 30
-#define IGB_MAX_VF_FUNCTIONS 8
-#define IGB_MAX_VFTA_ENTRIES 128
-
-struct vf_data_storage {
- unsigned char vf_mac_addresses[ETH_ALEN];
- u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
- u16 num_vf_mc_hashes;
- u16 vlans_enabled;
- u32 flags;
- unsigned long last_nack;
- u16 pf_vlan; /* When set, guest VLAN config not allowed. */
- u16 pf_qos;
- u16 tx_rate;
-};
-
-#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
-#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
-#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
-#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
-
-/* RX descriptor control thresholds.
- * PTHRESH - MAC will consider prefetch if it has fewer than this number of
- * descriptors available in its onboard memory.
- * Setting this to 0 disables RX descriptor prefetch.
- * HTHRESH - MAC will only prefetch if there are at least this many descriptors
- * available in host memory.
- * If PTHRESH is 0, this should also be 0.
- * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
- * descriptors until either it has this many to write back, or the
- * ITR timer expires.
- */
-#define IGB_RX_PTHRESH 8
-#define IGB_RX_HTHRESH 8
-#define IGB_RX_WTHRESH 1
-#define IGB_TX_PTHRESH 8
-#define IGB_TX_HTHRESH 1
-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
-
-/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
-
-/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_64 64 /* Used for packet split */
-#define IGB_RXBUFFER_128 128 /* Used for packet split */
-#define IGB_RXBUFFER_1024 1024
-#define IGB_RXBUFFER_2048 2048
-#define IGB_RXBUFFER_16384 16384
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
-
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE 16
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-
-#define AUTO_ALL_MODES 0
-#define IGB_EEPROM_APME 0x0400
-
-#ifndef IGB_MASTER_SLAVE
-/* Switch to override PHY master/slave setting */
-#define IGB_MASTER_SLAVE e1000_ms_hw_default
-#endif
-
-#define IGB_MNG_VLAN_NONE -1
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
-struct igb_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- union {
- /* TX */
- struct {
- unsigned long time_stamp;
- u16 length;
- u16 next_to_watch;
- unsigned int bytecount;
- u16 gso_segs;
- u8 tx_flags;
- u8 mapped_as_page;
- };
- /* RX */
- struct {
- struct page *page;
- dma_addr_t page_dma;
- u16 page_offset;
- };
- };
-};
-
-struct igb_tx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 restart_queue;
- u64 restart_queue2;
-};
-
-struct igb_rx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 drops;
- u64 csum_err;
- u64 alloc_failed;
-};
-
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- struct igb_ring *rx_ring;
- struct igb_ring *tx_ring;
- struct napi_struct napi;
-
- u32 eims_value;
- u16 cpu;
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
- char name[IFNAMSIZ + 9];
-};
-
-struct igb_ring {
- struct igb_q_vector *q_vector; /* backlink to q_vector */
- struct net_device *netdev; /* back pointer to net_device */
- struct device *dev; /* device pointer for dma mapping */
- dma_addr_t dma; /* phys address of the ring */
- void *desc; /* descriptor ring memory */
- unsigned int size; /* length of desc. ring in bytes */
- u16 count; /* number of desc. in the ring */
- u16 next_to_use;
- u16 next_to_clean;
- u8 queue_index;
- u8 reg_idx;
- void __iomem *head;
- void __iomem *tail;
- struct igb_buffer *buffer_info; /* array of buffer info structs */
-
- unsigned int total_bytes;
- unsigned int total_packets;
-
- u32 flags;
-
- union {
- /* TX */
- struct {
- struct igb_tx_queue_stats tx_stats;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync tx_syncp2;
- bool detect_tx_hung;
- };
- /* RX */
- struct {
- struct igb_rx_queue_stats rx_stats;
- struct u64_stats_sync rx_syncp;
- u32 rx_buffer_len;
- };
- };
-};
-
-#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
-#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
-
-#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
-
-#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
-
-#define E1000_RX_DESC_ADV(R, i) \
- (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
-#define E1000_TX_DESC_ADV(R, i) \
- (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
-#define E1000_TX_CTXTDESC_ADV(R, i) \
- (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
-
-/* igb_desc_unused - calculate if we have unused descriptors */
-static inline int igb_desc_unused(struct igb_ring *ring)
-{
- if (ring->next_to_clean > ring->next_to_use)
- return ring->next_to_clean - ring->next_to_use - 1;
-
- return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
-/* board specific private data structure */
-struct igb_adapter {
- struct timer_list watchdog_timer;
- struct timer_list phy_info_timer;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 mng_vlan_id;
- u32 bd_number;
- u32 wol;
- u32 en_mng_pt;
- u16 link_speed;
- u16 link_duplex;
-
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
- u16 tx_itr;
- u16 rx_itr;
-
- struct work_struct reset_task;
- struct work_struct watchdog_task;
- bool fc_autoneg;
- u8 tx_timeout_factor;
- struct timer_list blink_timer;
- unsigned long led_status;
-
- /* TX */
- struct igb_ring *tx_ring[16];
- u32 tx_timeout_count;
-
- /* RX */
- struct igb_ring *rx_ring[16];
- int num_tx_queues;
- int num_rx_queues;
-
- u32 max_frame_size;
- u32 min_frame_size;
-
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct cyclecounter cycles;
- struct timecounter clock;
- struct timecompare compare;
- struct hwtstamp_config hwtstamp_config;
-
- spinlock_t stats64_lock;
- struct rtnl_link_stats64 stats64;
-
- /* structs defined in e1000_hw.h */
- struct e1000_hw hw;
- struct e1000_hw_stats stats;
- struct e1000_phy_info phy_info;
- struct e1000_phy_stats phy_stats;
-
- u32 test_icr;
- struct igb_ring test_tx_ring;
- struct igb_ring test_rx_ring;
-
- int msg_enable;
-
- unsigned int num_q_vectors;
- struct igb_q_vector *q_vector[MAX_Q_VECTORS];
- struct msix_entry *msix_entries;
- u32 eims_enable_mask;
- u32 eims_other;
-
- /* to not mess up cache alignment, always add to the bottom */
- unsigned long state;
- unsigned int flags;
- u32 eeprom_wol;
-
- struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
- u16 tx_ring_count;
- u16 rx_ring_count;
- unsigned int vfs_allocated_count;
- struct vf_data_storage *vf_data;
- int vf_rate_link_speed;
- u32 rss_queues;
- u32 wvbr;
-};
-
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-
-/* DMA Coalescing defines */
-#define IGB_MIN_TXPBSIZE 20408
-#define IGB_TX_BUF_4096 4096
-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
-
-#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_82580_TSYNC_SHIFT 24
-#define IGB_TS_HDR_LEN 16
-enum e1000_state_t {
- __IGB_TESTING,
- __IGB_RESETTING,
- __IGB_DOWN
-};
-
-enum igb_boards {
- board_82575,
-};
-
-extern char igb_driver_name[];
-extern char igb_driver_version[];
-
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
- struct igb_buffer *);
-extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-
-static inline s32 igb_reset_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.reset)
- return hw->phy.ops.reset(hw);
-
- return 0;
-}
-
-static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- if (hw->phy.ops.read_reg)
- return hw->phy.ops.read_reg(hw, offset, data);
-
- return 0;
-}
-
-static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
- if (hw->phy.ops.write_reg)
- return hw->phy.ops.write_reg(hw, offset, data);
-
- return 0;
-}
-
-static inline s32 igb_get_phy_info(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_phy_info)
- return hw->phy.ops.get_phy_info(hw);
-
- return 0;
-}
-
-#endif /* _IGB_H_ */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
deleted file mode 100644
index b0b14d63dfb..00000000000
--- a/drivers/net/igbvf/ethtool.c
+++ /dev/null
@@ -1,534 +0,0 @@
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* ethtool support for igbvf */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-
-#include "igbvf.h"
-#include <linux/if_vlan.h>
-
-
-struct igbvf_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
-};
-
-#define IGBVF_STAT(current, base) \
- sizeof(((struct igbvf_adapter *)0)->current), \
- offsetof(struct igbvf_adapter, current), \
- offsetof(struct igbvf_adapter, base)
-
-static const struct igbvf_stats igbvf_gstrings_stats[] = {
- { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
- { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
- { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
- { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
- { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
- { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
- { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
- { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
- { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
- { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
- { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
- { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
- { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
-};
-
-#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
-
-static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
- "Link test (on/offline)"
-};
-
-#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
-
-static int igbvf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 status;
-
- ecmd->supported = SUPPORTED_1000baseT_Full;
-
- ecmd->advertising = ADVERTISED_1000baseT_Full;
-
- ecmd->port = -1;
- ecmd->transceiver = XCVR_DUMMY1;
-
- status = er32(STATUS);
- if (status & E1000_STATUS_LU) {
- if (status & E1000_STATUS_SPEED_1000)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- else if (status & E1000_STATUS_SPEED_100)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
- else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
-
- if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
- else
- ecmd->duplex = DUPLEX_HALF;
- } else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
- }
-
- ecmd->autoneg = AUTONEG_DISABLE;
-
- return 0;
-}
-
-static int igbvf_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- return -EOPNOTSUPP;
-}
-
-static void igbvf_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- return;
-}
-
-static int igbvf_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- return -EOPNOTSUPP;
-}
-
-static u32 igbvf_get_rx_csum(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED);
-}
-
-static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- if (data)
- adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
- else
- adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
-
- return 0;
-}
-
-static u32 igbvf_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- else
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- return 0;
-}
-
-static int igbvf_set_tso(struct net_device *netdev, u32 data)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- if (data) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
-
- dev_info(&adapter->pdev->dev, "TSO is %s\n",
- data ? "Enabled" : "Disabled");
- return 0;
-}
-
-static u32 igbvf_get_msglevel(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-
-static int igbvf_get_regs_len(struct net_device *netdev)
-{
-#define IGBVF_REGS_LEN 8
- return IGBVF_REGS_LEN * sizeof(u32);
-}
-
-static void igbvf_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 *regs_buff = p;
-
- memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
-
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
-
- regs_buff[0] = er32(CTRL);
- regs_buff[1] = er32(STATUS);
-
- regs_buff[2] = er32(RDLEN(0));
- regs_buff[3] = er32(RDH(0));
- regs_buff[4] = er32(RDT(0));
-
- regs_buff[5] = er32(TDLEN(0));
- regs_buff[6] = er32(TDH(0));
- regs_buff[7] = er32(TDT(0));
-}
-
-static int igbvf_get_eeprom_len(struct net_device *netdev)
-{
- return 0;
-}
-
-static int igbvf_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- return -EOPNOTSUPP;
-}
-
-static int igbvf_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- return -EOPNOTSUPP;
-}
-
-static void igbvf_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32] = "N/A";
-
- strncpy(drvinfo->driver, igbvf_driver_name, 32);
- strncpy(drvinfo->version, igbvf_driver_version, 32);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->regdump_len = igbvf_get_regs_len(netdev);
- drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
-}
-
-static void igbvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct igbvf_ring *tx_ring = adapter->tx_ring;
- struct igbvf_ring *rx_ring = adapter->rx_ring;
-
- ring->rx_max_pending = IGBVF_MAX_RXD;
- ring->tx_max_pending = IGBVF_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-
-static int igbvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct igbvf_ring *temp_ring;
- int err = 0;
- u32 new_rx_count, new_tx_count;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
-
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
- /* nothing to do */
- return 0;
- }
-
- while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
- msleep(1);
-
- if (!netif_running(adapter->netdev)) {
- adapter->tx_ring->count = new_tx_count;
- adapter->rx_ring->count = new_rx_count;
- goto clear_reset;
- }
-
- temp_ring = vmalloc(sizeof(struct igbvf_ring));
- if (!temp_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- igbvf_down(adapter);
-
- /*
- * We can't just free everything and then setup again,
- * because the ISRs in MSI-X mode get passed pointers
- * to the tx and rx ring structs.
- */
- if (new_tx_count != adapter->tx_ring->count) {
- memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
-
- temp_ring->count = new_tx_count;
- err = igbvf_setup_tx_resources(adapter, temp_ring);
- if (err)
- goto err_setup;
-
- igbvf_free_tx_resources(adapter->tx_ring);
-
- memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
- }
-
- if (new_rx_count != adapter->rx_ring->count) {
- memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
-
- temp_ring->count = new_rx_count;
- err = igbvf_setup_rx_resources(adapter, temp_ring);
- if (err)
- goto err_setup;
-
- igbvf_free_rx_resources(adapter->rx_ring);
-
- memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
- }
-err_setup:
- igbvf_up(adapter);
- vfree(temp_ring);
-clear_reset:
- clear_bit(__IGBVF_RESETTING, &adapter->state);
- return err;
-}
-
-static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
-{
- struct e1000_hw *hw = &adapter->hw;
- *data = 0;
-
- hw->mac.ops.check_for_link(hw);
-
- if (!(er32(STATUS) & E1000_STATUS_LU))
- *data = 1;
-
- return *data;
-}
-
-static void igbvf_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- set_bit(__IGBVF_TESTING, &adapter->state);
-
- /*
- * Link test performed before hardware reset so autoneg doesn't
- * interfere with test result
- */
- if (igbvf_link_test(adapter, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- clear_bit(__IGBVF_TESTING, &adapter->state);
- msleep_interruptible(4 * 1000);
-}
-
-static void igbvf_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
-{
- wol->supported = 0;
- wol->wolopts = 0;
-}
-
-static int igbvf_set_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
-{
- return -EOPNOTSUPP;
-}
-
-static int igbvf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- if (adapter->itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->itr_setting;
- else
- ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
-
- return 0;
-}
-
-static int igbvf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
- (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- return -EINVAL;
-
- /* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
- adapter->itr = IGBVF_START_ITR;
- adapter->itr_setting = ec->rx_coalesce_usecs;
- } else {
- adapter->itr = ec->rx_coalesce_usecs << 2;
- adapter->itr_setting = adapter->itr;
- }
-
- writel(adapter->itr,
- hw->hw_addr + adapter->rx_ring[0].itr_register);
-
- return 0;
-}
-
-static int igbvf_nway_reset(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev))
- igbvf_reinit_locked(adapter);
- return 0;
-}
-
-
-static void igbvf_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- int i;
-
- igbvf_update_stats(adapter);
- for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter +
- igbvf_gstrings_stats[i].stat_offset;
- char *b = (char *)adapter +
- igbvf_gstrings_stats[i].base_stat_offset;
- data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
- (*(u32 *)p - *(u32 *)b));
- }
-
-}
-
-static int igbvf_get_sset_count(struct net_device *dev, int stringset)
-{
- switch(stringset) {
- case ETH_SS_TEST:
- return IGBVF_TEST_LEN;
- case ETH_SS_STATS:
- return IGBVF_GLOBAL_STATS_LEN;
- default:
- return -EINVAL;
- }
-}
-
-static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
- break;
- case ETH_SS_STATS:
- for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igbvf_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static const struct ethtool_ops igbvf_ethtool_ops = {
- .get_settings = igbvf_get_settings,
- .set_settings = igbvf_set_settings,
- .get_drvinfo = igbvf_get_drvinfo,
- .get_regs_len = igbvf_get_regs_len,
- .get_regs = igbvf_get_regs,
- .get_wol = igbvf_get_wol,
- .set_wol = igbvf_set_wol,
- .get_msglevel = igbvf_get_msglevel,
- .set_msglevel = igbvf_set_msglevel,
- .nway_reset = igbvf_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = igbvf_get_eeprom_len,
- .get_eeprom = igbvf_get_eeprom,
- .set_eeprom = igbvf_set_eeprom,
- .get_ringparam = igbvf_get_ringparam,
- .set_ringparam = igbvf_set_ringparam,
- .get_pauseparam = igbvf_get_pauseparam,
- .set_pauseparam = igbvf_set_pauseparam,
- .get_rx_csum = igbvf_get_rx_csum,
- .set_rx_csum = igbvf_set_rx_csum,
- .get_tx_csum = igbvf_get_tx_csum,
- .set_tx_csum = igbvf_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = igbvf_set_tso,
- .self_test = igbvf_diag_test,
- .get_sset_count = igbvf_get_sset_count,
- .get_strings = igbvf_get_strings,
- .get_ethtool_stats = igbvf_get_ethtool_stats,
- .get_coalesce = igbvf_get_coalesce,
- .set_coalesce = igbvf_set_coalesce,
-};
-
-void igbvf_set_ethtool_ops(struct net_device *netdev)
-{
- /* have to "undeclare" const on this struct to remove warnings */
- SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
-}
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
deleted file mode 100644
index 3d6f4cc3998..00000000000
--- a/drivers/net/igbvf/mbx.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "mbx.h"
-
-/**
- * e1000_poll_for_msg - Wait for message notification
- * @hw: pointer to the HW structure
- *
- * returns SUCCESS if it successfully received a message notification
- **/
-static s32 e1000_poll_for_msg(struct e1000_hw *hw)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- int countdown = mbx->timeout;
-
- if (!mbx->ops.check_for_msg)
- goto out;
-
- while (countdown && mbx->ops.check_for_msg(hw)) {
- countdown--;
- udelay(mbx->usec_delay);
- }
-
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-out:
- return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
-}
-
-/**
- * e1000_poll_for_ack - Wait for message acknowledgement
- * @hw: pointer to the HW structure
- *
- * returns SUCCESS if it successfully received a message acknowledgement
- **/
-static s32 e1000_poll_for_ack(struct e1000_hw *hw)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- int countdown = mbx->timeout;
-
- if (!mbx->ops.check_for_ack)
- goto out;
-
- while (countdown && mbx->ops.check_for_ack(hw)) {
- countdown--;
- udelay(mbx->usec_delay);
- }
-
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-out:
- return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
-}
-
-/**
- * e1000_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns SUCCESS if it successfully received a message notification and
- * copied it into the receive buffer.
- **/
-static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- if (!mbx->ops.read)
- goto out;
-
- ret_val = e1000_poll_for_msg(hw);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size);
-out:
- return ret_val;
-}
-
-/**
- * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns SUCCESS if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
- **/
-static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- /* exit if we either can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
-
- /* send msg*/
- ret_val = mbx->ops.write(hw, msg, size);
-
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = e1000_poll_for_ack(hw);
-out:
- return ret_val;
-}
-
-/**
- * e1000_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
- *
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
- **/
-static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
-{
- u32 v2p_mailbox = er32(V2PMAILBOX(0));
-
- v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
- hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
-
- return v2p_mailbox;
-}
-
-/**
- * e1000_check_for_bit_vf - Determine if a status bit was set
- * @hw: pointer to the HW structure
- * @mask: bitmask for bits to be tested and cleared
- *
- * This function is used to check for the read to clear bits within
- * the V2P mailbox.
- **/
-static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
-{
- u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
- s32 ret_val = -E1000_ERR_MBX;
-
- if (v2p_mailbox & mask)
- ret_val = E1000_SUCCESS;
-
- hw->dev_spec.vf.v2p_mailbox &= ~mask;
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_msg_vf - checks to see if the PF has sent mail
- * @hw: pointer to the HW structure
- *
- * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_msg_vf(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_MBX;
-
- if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
- ret_val = E1000_SUCCESS;
- hw->mbx.stats.reqs++;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
- * @hw: pointer to the HW structure
- *
- * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
- **/
-static s32 e1000_check_for_ack_vf(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_MBX;
-
- if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
- ret_val = E1000_SUCCESS;
- hw->mbx.stats.acks++;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_rst_vf - checks to see if the PF has reset
- * @hw: pointer to the HW structure
- *
- * returns true if the PF has set the reset done bit or else false
- **/
-static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_MBX;
-
- if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
- E1000_V2PMAILBOX_RSTI))) {
- ret_val = E1000_SUCCESS;
- hw->mbx.stats.rsts++;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_obtain_mbx_lock_vf - obtain mailbox lock
- * @hw: pointer to the HW structure
- *
- * return SUCCESS if we obtained the mailbox lock
- **/
-static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_MBX;
-
- /* Take ownership of the buffer */
- ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
-
- /* reserve mailbox for vf use */
- if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
- ret_val = E1000_SUCCESS;
-
- return ret_val;
-}
-
-/**
- * e1000_write_mbx_vf - Write a message to the mailbox
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns SUCCESS if it successfully copied message into the buffer
- **/
-static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
-{
- s32 err;
- u16 i;
-
- /* lock the mailbox to prevent pf/vf race condition */
- err = e1000_obtain_mbx_lock_vf(hw);
- if (err)
- goto out_no_write;
-
- /* flush any ack or msg as we are going to overwrite mailbox */
- e1000_check_for_ack_vf(hw);
- e1000_check_for_msg_vf(hw);
-
- /* copy the caller specified message to the mailbox memory buffer */
- for (i = 0; i < size; i++)
- array_ew32(VMBMEM(0), i, msg[i]);
-
- /* update stats */
- hw->mbx.stats.msgs_tx++;
-
- /* Drop VFU and interrupt the PF to tell it a message has been sent */
- ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
-
-out_no_write:
- return err;
-}
-
-/**
- * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns SUCCESS if it successfuly read message from buffer
- **/
-static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
-{
- s32 err;
- u16 i;
-
- /* lock the mailbox to prevent pf/vf race condition */
- err = e1000_obtain_mbx_lock_vf(hw);
- if (err)
- goto out_no_read;
-
- /* copy the message from the mailbox memory buffer */
- for (i = 0; i < size; i++)
- msg[i] = array_er32(VMBMEM(0), i);
-
- /* Acknowledge receipt and release mailbox, then we're done */
- ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
-
- /* update stats */
- hw->mbx.stats.msgs_rx++;
-
-out_no_read:
- return err;
-}
-
-/**
- * e1000_init_mbx_params_vf - set initial values for vf mailbox
- * @hw: pointer to the HW structure
- *
- * Initializes the hw->mbx struct to correct values for vf mailbox
- */
-s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
-
- /* start mailbox as timed out and let the reset_hw call set the timeout
- * value to being communications */
- mbx->timeout = 0;
- mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
-
- mbx->size = E1000_VFMAILBOX_SIZE;
-
- mbx->ops.read = e1000_read_mbx_vf;
- mbx->ops.write = e1000_write_mbx_vf;
- mbx->ops.read_posted = e1000_read_posted_mbx;
- mbx->ops.write_posted = e1000_write_posted_mbx;
- mbx->ops.check_for_msg = e1000_check_for_msg_vf;
- mbx->ops.check_for_ack = e1000_check_for_ack_vf;
- mbx->ops.check_for_rst = e1000_check_for_rst_vf;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
-
- return E1000_SUCCESS;
-}
-
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
deleted file mode 100644
index 40ed066e3ef..00000000000
--- a/drivers/net/igbvf/netdev.c
+++ /dev/null
@@ -1,2859 +0,0 @@
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/tcp.h>
-#include <linux/ipv6.h>
-#include <linux/slab.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/prefetch.h>
-
-#include "igbvf.h"
-
-#define DRV_VERSION "2.0.0-k"
-char igbvf_driver_name[] = "igbvf";
-const char igbvf_driver_version[] = DRV_VERSION;
-static const char igbvf_driver_string[] =
- "Intel(R) Virtual Function Network Driver";
-static const char igbvf_copyright[] =
- "Copyright (c) 2009 - 2010 Intel Corporation.";
-
-static int igbvf_poll(struct napi_struct *napi, int budget);
-static void igbvf_reset(struct igbvf_adapter *);
-static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
-static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
-
-static struct igbvf_info igbvf_vf_info = {
- .mac = e1000_vfadapt,
- .flags = 0,
- .pba = 10,
- .init_ops = e1000_init_function_pointers_vf,
-};
-
-static struct igbvf_info igbvf_i350_vf_info = {
- .mac = e1000_vfadapt_i350,
- .flags = 0,
- .pba = 10,
- .init_ops = e1000_init_function_pointers_vf,
-};
-
-static const struct igbvf_info *igbvf_info_tbl[] = {
- [board_vf] = &igbvf_vf_info,
- [board_i350_vf] = &igbvf_i350_vf_info,
-};
-
-/**
- * igbvf_desc_unused - calculate if we have unused descriptors
- **/
-static int igbvf_desc_unused(struct igbvf_ring *ring)
-{
- if (ring->next_to_clean > ring->next_to_use)
- return ring->next_to_clean - ring->next_to_use - 1;
-
- return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
-/**
- * igbvf_receive_skb - helper function to handle Rx indications
- * @adapter: board private structure
- * @status: descriptor status field as written by hardware
- * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
- * @skb: pointer to sk_buff to be indicated to stack
- **/
-static void igbvf_receive_skb(struct igbvf_adapter *adapter,
- struct net_device *netdev,
- struct sk_buff *skb,
- u32 status, u16 vlan)
-{
- if (status & E1000_RXD_STAT_VP) {
- u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
-
- __vlan_hwaccel_put_tag(skb, vid);
- }
- netif_receive_skb(skb);
-}
-
-static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
- u32 status_err, struct sk_buff *skb)
-{
- skb_checksum_none_assert(skb);
-
- /* Ignore Checksum bit is set or checksum is disabled through ethtool */
- if ((status_err & E1000_RXD_STAT_IXSM) ||
- (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
- return;
-
- /* TCP/UDP checksum error bit is set */
- if (status_err &
- (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
- /* let the stack verify checksum errors */
- adapter->hw_csum_err++;
- return;
- }
-
- /* It must be a TCP or UDP packet with a valid checksum */
- if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- adapter->hw_csum_good++;
-}
-
-/**
- * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @rx_ring: address of ring structure to repopulate
- * @cleaned_count: number of buffers to repopulate
- **/
-static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
- int cleaned_count)
-{
- struct igbvf_adapter *adapter = rx_ring->adapter;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- union e1000_adv_rx_desc *rx_desc;
- struct igbvf_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- int bufsz;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- if (adapter->rx_ps_hdr_size)
- bufsz = adapter->rx_ps_hdr_size;
- else
- bufsz = adapter->rx_buffer_len;
-
- while (cleaned_count--) {
- rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
-
- if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
- if (!buffer_info->page) {
- buffer_info->page = alloc_page(GFP_ATOMIC);
- if (!buffer_info->page) {
- adapter->alloc_rx_buff_failed++;
- goto no_buffers;
- }
- buffer_info->page_offset = 0;
- } else {
- buffer_info->page_offset ^= PAGE_SIZE / 2;
- }
- buffer_info->page_dma =
- dma_map_page(&pdev->dev, buffer_info->page,
- buffer_info->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- }
-
- if (!buffer_info->skb) {
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
- if (!skb) {
- adapter->alloc_rx_buff_failed++;
- goto no_buffers;
- }
-
- buffer_info->skb = skb;
- buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
- bufsz,
- DMA_FROM_DEVICE);
- }
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (adapter->rx_ps_hdr_size) {
- rx_desc->read.pkt_addr =
- cpu_to_le64(buffer_info->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
- } else {
- rx_desc->read.pkt_addr =
- cpu_to_le64(buffer_info->dma);
- rx_desc->read.hdr_addr = 0;
- }
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
-no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i == 0)
- i = (rx_ring->count - 1);
- else
- i--;
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
-}
-
-/**
- * igbvf_clean_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
- int *work_done, int work_to_do)
-{
- struct igbvf_ring *rx_ring = adapter->rx_ring;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- union e1000_adv_rx_desc *rx_desc, *next_rxd;
- struct igbvf_buffer *buffer_info, *next_buffer;
- struct sk_buff *skb;
- bool cleaned = false;
- int cleaned_count = 0;
- unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i;
- u32 length, hlen, staterr;
-
- i = rx_ring->next_to_clean;
- rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-
- while (staterr & E1000_RXD_STAT_DD) {
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
-
- buffer_info = &rx_ring->buffer_info[i];
-
- /* HW will not DMA in data larger than the given buffer, even
- * if it parses the (NFS, of course) header to be larger. In
- * that case, it fills the header buffer and spills the rest
- * into the page.
- */
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > adapter->rx_ps_hdr_size)
- hlen = adapter->rx_ps_hdr_size;
-
- length = le16_to_cpu(rx_desc->wb.upper.length);
- cleaned = true;
- cleaned_count++;
-
- skb = buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
- buffer_info->skb = NULL;
- if (!adapter->rx_ps_hdr_size) {
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- skb_put(skb, length);
- goto send_up;
- }
-
- if (!skb_shinfo(skb)->nr_frags) {
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_hdr_size,
- DMA_FROM_DEVICE);
- skb_put(skb, hlen);
- }
-
- if (length) {
- dma_unmap_page(&pdev->dev, buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
-
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
-
- if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
- (page_count(buffer_info->page) != 1))
- buffer_info->page = NULL;
- else
- get_page(buffer_info->page);
-
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- }
-send_up:
- i++;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
- prefetch(next_rxd);
- next_buffer = &rx_ring->buffer_info[i];
-
- if (!(staterr & E1000_RXD_STAT_EOP)) {
- buffer_info->skb = next_buffer->skb;
- buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- goto next_desc;
- }
-
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
- goto next_desc;
- }
-
- total_bytes += skb->len;
- total_packets++;
-
- igbvf_rx_checksum_adv(adapter, staterr, skb);
-
- skb->protocol = eth_type_trans(skb, netdev);
-
- igbvf_receive_skb(adapter, netdev, skb, staterr,
- rx_desc->wb.upper.vlan);
-
-next_desc:
- rx_desc->wb.upper.status_error = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
- igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
-
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- }
-
- rx_ring->next_to_clean = i;
- cleaned_count = igbvf_desc_unused(rx_ring);
-
- if (cleaned_count)
- igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
-
- adapter->total_rx_packets += total_packets;
- adapter->total_rx_bytes += total_bytes;
- adapter->net_stats.rx_bytes += total_bytes;
- adapter->net_stats.rx_packets += total_packets;
- return cleaned;
-}
-
-static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
- struct igbvf_buffer *buffer_info)
-{
- if (buffer_info->dma) {
- if (buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_TO_DEVICE);
- else
- dma_unmap_single(&adapter->pdev->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_TO_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- buffer_info->time_stamp = 0;
-}
-
-/**
- * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring)
-{
- struct pci_dev *pdev = adapter->pdev;
- int size;
-
- size = sizeof(struct igbvf_buffer) * tx_ring->count;
- tx_ring->buffer_info = vzalloc(size);
- if (!tx_ring->buffer_info)
- goto err;
-
- /* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
-
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
-
- if (!tx_ring->desc)
- goto err;
-
- tx_ring->adapter = adapter;
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- return 0;
-err:
- vfree(tx_ring->buffer_info);
- dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
- return -ENOMEM;
-}
-
-/**
- * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
- struct igbvf_ring *rx_ring)
-{
- struct pci_dev *pdev = adapter->pdev;
- int size, desc_len;
-
- size = sizeof(struct igbvf_buffer) * rx_ring->count;
- rx_ring->buffer_info = vzalloc(size);
- if (!rx_ring->buffer_info)
- goto err;
-
- desc_len = sizeof(union e1000_adv_rx_desc);
-
- /* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * desc_len;
- rx_ring->size = ALIGN(rx_ring->size, 4096);
-
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
- if (!rx_ring->desc)
- goto err;
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- rx_ring->adapter = adapter;
-
- return 0;
-
-err:
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
- dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for the receive descriptor ring\n");
- return -ENOMEM;
-}
-
-/**
- * igbvf_clean_tx_ring - Free Tx Buffers
- * @tx_ring: ring to be cleaned
- **/
-static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
-{
- struct igbvf_adapter *adapter = tx_ring->adapter;
- struct igbvf_buffer *buffer_info;
- unsigned long size;
- unsigned int i;
-
- if (!tx_ring->buffer_info)
- return;
-
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->buffer_info[i];
- igbvf_put_txbuf(adapter, buffer_info);
- }
-
- size = sizeof(struct igbvf_buffer) * tx_ring->count;
- memset(tx_ring->buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
-}
-
-/**
- * igbvf_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: ring to free resources from
- *
- * Free all transmit software resources
- **/
-void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
-{
- struct pci_dev *pdev = tx_ring->adapter->pdev;
-
- igbvf_clean_tx_ring(tx_ring);
-
- vfree(tx_ring->buffer_info);
- tx_ring->buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
-
- tx_ring->desc = NULL;
-}
-
-/**
- * igbvf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
- **/
-static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
-{
- struct igbvf_adapter *adapter = rx_ring->adapter;
- struct igbvf_buffer *buffer_info;
- struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
- unsigned int i;
-
- if (!rx_ring->buffer_info)
- return;
-
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->dma) {
- if (adapter->rx_ps_hdr_size){
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_hdr_size,
- DMA_FROM_DEVICE);
- } else {
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- }
- buffer_info->dma = 0;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
-
- if (buffer_info->page) {
- if (buffer_info->page_dma)
- dma_unmap_page(&pdev->dev,
- buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- buffer_info->page_dma = 0;
- buffer_info->page_offset = 0;
- }
- }
-
- size = sizeof(struct igbvf_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
-}
-
-/**
- * igbvf_free_rx_resources - Free Rx Resources
- * @rx_ring: ring to clean the resources from
- *
- * Free all receive software resources
- **/
-
-void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
-{
- struct pci_dev *pdev = rx_ring->adapter->pdev;
-
- igbvf_clean_rx_ring(rx_ring);
-
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
- rx_ring->desc = NULL;
-}
-
-/**
- * igbvf_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput. This functionality is controlled
- * by the InterruptThrottleRate module parameter.
- **/
-static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
- u16 itr_setting, int packets,
- int bytes)
-{
- unsigned int retval = itr_setting;
-
- if (packets == 0)
- goto update_itr_done;
-
- switch (itr_setting) {
- case lowest_latency:
- /* handle TSO and jumbo frames */
- if (bytes/packets > 8000)
- retval = bulk_latency;
- else if ((packets < 5) && (bytes > 512))
- retval = low_latency;
- break;
- case low_latency: /* 50 usec aka 20000 ints/s */
- if (bytes > 10000) {
- /* this if handles the TSO accounting */
- if (bytes/packets > 8000)
- retval = bulk_latency;
- else if ((packets < 10) || ((bytes/packets) > 1200))
- retval = bulk_latency;
- else if ((packets > 35))
- retval = lowest_latency;
- } else if (bytes/packets > 2000) {
- retval = bulk_latency;
- } else if (packets <= 2 && bytes < 512) {
- retval = lowest_latency;
- }
- break;
- case bulk_latency: /* 250 usec aka 4000 ints/s */
- if (bytes > 25000) {
- if (packets > 35)
- retval = low_latency;
- } else if (bytes < 6000) {
- retval = low_latency;
- }
- break;
- }
-
-update_itr_done:
- return retval;
-}
-
-static void igbvf_set_itr(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 current_itr;
- u32 new_itr = adapter->itr;
-
- adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
- adapter->tx_itr = low_latency;
-
- adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
- adapter->rx_itr = low_latency;
-
- current_itr = max(adapter->rx_itr, adapter->tx_itr);
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = 70000;
- break;
- case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
- break;
- case bulk_latency:
- new_itr = 4000;
- break;
- default:
- break;
- }
-
- if (new_itr != adapter->itr) {
- /*
- * this attempts to bias the interrupt rate towards Bulk
- * by adding intermediate steps when interrupt rate is
- * increasing
- */
- new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
- adapter->itr = new_itr;
- adapter->rx_ring->itr_val = 1952;
-
- if (adapter->msix_entries)
- adapter->rx_ring->set_itr = 1;
- else
- ew32(ITR, 1952);
- }
-}
-
-/**
- * igbvf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
- * returns true if ring is completely cleaned
- **/
-static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
-{
- struct igbvf_adapter *adapter = tx_ring->adapter;
- struct net_device *netdev = adapter->netdev;
- struct igbvf_buffer *buffer_info;
- struct sk_buff *skb;
- union e1000_adv_tx_desc *tx_desc, *eop_desc;
- unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
- bool cleaned = false;
-
- i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
-
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
- for (cleaned = false; !cleaned; count++) {
- tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
- skb = buffer_info->skb;
-
- if (skb) {
- unsigned int segs, bytecount;
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
-
- igbvf_put_txbuf(adapter, buffer_info);
- tx_desc->wb.status = 0;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
- }
-
- tx_ring->next_to_clean = i;
-
- if (unlikely(count &&
- netif_carrier_ok(netdev) &&
- igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean.
- */
- smp_mb();
- if (netif_queue_stopped(netdev) &&
- !(test_bit(__IGBVF_DOWN, &adapter->state))) {
- netif_wake_queue(netdev);
- ++adapter->restart_queue;
- }
- }
-
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
- return count < tx_ring->count;
-}
-
-static irqreturn_t igbvf_msix_other(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- adapter->int_counter1++;
-
- netif_carrier_off(netdev);
- hw->mac.get_link_status = 1;
- if (!test_bit(__IGBVF_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
- ew32(EIMS, adapter->eims_other);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct igbvf_ring *tx_ring = adapter->tx_ring;
-
-
- adapter->total_tx_bytes = 0;
- adapter->total_tx_packets = 0;
-
- /* auto mask will automatically reenable the interrupt when we write
- * EICS */
- if (!igbvf_clean_tx_irq(tx_ring))
- /* Ring was not completely cleaned, so fire another interrupt */
- ew32(EICS, tx_ring->eims_value);
- else
- ew32(EIMS, tx_ring->eims_value);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- adapter->int_counter0++;
-
- /* Write the ITR value calculated at the end of the
- * previous interrupt.
- */
- if (adapter->rx_ring->set_itr) {
- writel(adapter->rx_ring->itr_val,
- adapter->hw.hw_addr + adapter->rx_ring->itr_register);
- adapter->rx_ring->set_itr = 0;
- }
-
- if (napi_schedule_prep(&adapter->rx_ring->napi)) {
- adapter->total_rx_bytes = 0;
- adapter->total_rx_packets = 0;
- __napi_schedule(&adapter->rx_ring->napi);
- }
-
- return IRQ_HANDLED;
-}
-
-#define IGBVF_NO_QUEUE -1
-
-static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
- int tx_queue, int msix_vector)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ivar, index;
-
- /* 82576 uses a table-based method for assigning vectors.
- Each queue has a single entry in the table to which we write
- a vector number along with a "valid" bit. Sadly, the layout
- of the table is somewhat counterintuitive. */
- if (rx_queue > IGBVF_NO_QUEUE) {
- index = (rx_queue >> 1);
- ivar = array_er32(IVAR0, index);
- if (rx_queue & 0x1) {
- /* vector goes into third byte of register */
- ivar = ivar & 0xFF00FFFF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
- } else {
- /* vector goes into low byte of register */
- ivar = ivar & 0xFFFFFF00;
- ivar |= msix_vector | E1000_IVAR_VALID;
- }
- adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
- array_ew32(IVAR0, index, ivar);
- }
- if (tx_queue > IGBVF_NO_QUEUE) {
- index = (tx_queue >> 1);
- ivar = array_er32(IVAR0, index);
- if (tx_queue & 0x1) {
- /* vector goes into high byte of register */
- ivar = ivar & 0x00FFFFFF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
- } else {
- /* vector goes into second byte of register */
- ivar = ivar & 0xFFFF00FF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
- }
- adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
- array_ew32(IVAR0, index, ivar);
- }
-}
-
-/**
- * igbvf_configure_msix - Configure MSI-X hardware
- *
- * igbvf_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
- **/
-static void igbvf_configure_msix(struct igbvf_adapter *adapter)
-{
- u32 tmp;
- struct e1000_hw *hw = &adapter->hw;
- struct igbvf_ring *tx_ring = adapter->tx_ring;
- struct igbvf_ring *rx_ring = adapter->rx_ring;
- int vector = 0;
-
- adapter->eims_enable_mask = 0;
-
- igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
- adapter->eims_enable_mask |= tx_ring->eims_value;
- if (tx_ring->itr_val)
- writel(tx_ring->itr_val,
- hw->hw_addr + tx_ring->itr_register);
- else
- writel(1952, hw->hw_addr + tx_ring->itr_register);
-
- igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
- adapter->eims_enable_mask |= rx_ring->eims_value;
- if (rx_ring->itr_val)
- writel(rx_ring->itr_val,
- hw->hw_addr + rx_ring->itr_register);
- else
- writel(1952, hw->hw_addr + rx_ring->itr_register);
-
- /* set vector for other causes, i.e. link changes */
-
- tmp = (vector++ | E1000_IVAR_VALID);
-
- ew32(IVAR_MISC, tmp);
-
- adapter->eims_enable_mask = (1 << (vector)) - 1;
- adapter->eims_other = 1 << (vector - 1);
- e1e_flush();
-}
-
-static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- }
-}
-
-/**
- * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
-{
- int err = -ENOMEM;
- int i;
-
- /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
- adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (adapter->msix_entries) {
- for (i = 0; i < 3; i++)
- adapter->msix_entries[i].entry = i;
-
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries, 3);
- }
-
- if (err) {
- /* MSI-X failed */
- dev_err(&adapter->pdev->dev,
- "Failed to initialize MSI-X interrupts.\n");
- igbvf_reset_interrupt_capability(adapter);
- }
-}
-
-/**
- * igbvf_request_msix - Initialize MSI-X interrupts
- *
- * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
- * kernel.
- **/
-static int igbvf_request_msix(struct igbvf_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int err = 0, vector = 0;
-
- if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
- sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
- sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
- } else {
- memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
- memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
- }
-
- err = request_irq(adapter->msix_entries[vector].vector,
- igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
- netdev);
- if (err)
- goto out;
-
- adapter->tx_ring->itr_register = E1000_EITR(vector);
- adapter->tx_ring->itr_val = 1952;
- vector++;
-
- err = request_irq(adapter->msix_entries[vector].vector,
- igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
- netdev);
- if (err)
- goto out;
-
- adapter->rx_ring->itr_register = E1000_EITR(vector);
- adapter->rx_ring->itr_val = 1952;
- vector++;
-
- err = request_irq(adapter->msix_entries[vector].vector,
- igbvf_msix_other, 0, netdev->name, netdev);
- if (err)
- goto out;
-
- igbvf_configure_msix(adapter);
- return 0;
-out:
- return err;
-}
-
-/**
- * igbvf_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- **/
-static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- return -ENOMEM;
-
- adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
- if (!adapter->rx_ring) {
- kfree(adapter->tx_ring);
- return -ENOMEM;
- }
-
- netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
-
- return 0;
-}
-
-/**
- * igbvf_request_irq - initialize interrupts
- *
- * Attempts to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int igbvf_request_irq(struct igbvf_adapter *adapter)
-{
- int err = -1;
-
- /* igbvf supports msi-x only */
- if (adapter->msix_entries)
- err = igbvf_request_msix(adapter);
-
- if (!err)
- return err;
-
- dev_err(&adapter->pdev->dev,
- "Unable to allocate interrupt, Error: %d\n", err);
-
- return err;
-}
-
-static void igbvf_free_irq(struct igbvf_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int vector;
-
- if (adapter->msix_entries) {
- for (vector = 0; vector < 3; vector++)
- free_irq(adapter->msix_entries[vector].vector, netdev);
- }
-}
-
-/**
- * igbvf_irq_disable - Mask off interrupt generation on the NIC
- **/
-static void igbvf_irq_disable(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- ew32(EIMC, ~0);
-
- if (adapter->msix_entries)
- ew32(EIAC, 0);
-}
-
-/**
- * igbvf_irq_enable - Enable default interrupt generation settings
- **/
-static void igbvf_irq_enable(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- ew32(EIAC, adapter->eims_enable_mask);
- ew32(EIAM, adapter->eims_enable_mask);
- ew32(EIMS, adapter->eims_enable_mask);
-}
-
-/**
- * igbvf_poll - NAPI Rx polling callback
- * @napi: struct associated with this polling callback
- * @budget: amount of packets driver is allowed to process this poll
- **/
-static int igbvf_poll(struct napi_struct *napi, int budget)
-{
- struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
- struct igbvf_adapter *adapter = rx_ring->adapter;
- struct e1000_hw *hw = &adapter->hw;
- int work_done = 0;
-
- igbvf_clean_rx_irq(adapter, &work_done, budget);
-
- /* If not enough Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
-
- if (adapter->itr_setting & 3)
- igbvf_set_itr(adapter);
-
- if (!test_bit(__IGBVF_DOWN, &adapter->state))
- ew32(EIMS, adapter->rx_ring->eims_value);
- }
-
- return work_done;
-}
-
-/**
- * igbvf_set_rlpml - set receive large packet maximum length
- * @adapter: board private structure
- *
- * Configure the maximum size of packets that will be received
- */
-static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
-{
- int max_frame_size;
- struct e1000_hw *hw = &adapter->hw;
-
- max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
- e1000_rlpml_set_vf(hw, max_frame_size);
-}
-
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- if (hw->mac.ops.set_vfta(hw, vid, true))
- dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
- else
- set_bit(vid, adapter->active_vlans);
-}
-
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- igbvf_irq_disable(adapter);
-
- if (!test_bit(__IGBVF_DOWN, &adapter->state))
- igbvf_irq_enable(adapter);
-
- if (hw->mac.ops.set_vfta(hw, vid, false))
- dev_err(&adapter->pdev->dev,
- "Failed to remove vlan id %d\n", vid);
- else
- clear_bit(vid, adapter->active_vlans);
-}
-
-static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
-{
- u16 vid;
-
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- igbvf_vlan_rx_add_vid(adapter->netdev, vid);
-}
-
-/**
- * igbvf_configure_tx - Configure Transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
- **/
-static void igbvf_configure_tx(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct igbvf_ring *tx_ring = adapter->tx_ring;
- u64 tdba;
- u32 txdctl, dca_txctrl;
-
- /* disable transmits */
- txdctl = er32(TXDCTL(0));
- ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
- e1e_flush();
- msleep(10);
-
- /* Setup the HW Tx Head and Tail descriptor pointers */
- ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
- tdba = tx_ring->dma;
- ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
- ew32(TDBAH(0), (tdba >> 32));
- ew32(TDH(0), 0);
- ew32(TDT(0), 0);
- tx_ring->head = E1000_TDH(0);
- tx_ring->tail = E1000_TDT(0);
-
- /* Turn off Relaxed Ordering on head write-backs. The writebacks
- * MUST be delivered in order or it will completely screw up
- * our bookeeping.
- */
- dca_txctrl = er32(DCA_TXCTRL(0));
- dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
- ew32(DCA_TXCTRL(0), dca_txctrl);
-
- /* enable transmits */
- txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
- ew32(TXDCTL(0), txdctl);
-
- /* Setup Transmit Descriptor Settings for eop descriptor */
- adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
-
- /* enable Report Status bit */
- adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
-}
-
-/**
- * igbvf_setup_srrctl - configure the receive control registers
- * @adapter: Board private structure
- **/
-static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 srrctl = 0;
-
- srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
- E1000_SRRCTL_BSIZEHDR_MASK |
- E1000_SRRCTL_BSIZEPKT_MASK);
-
- /* Enable queue drop to avoid head of line blocking */
- srrctl |= E1000_SRRCTL_DROP_EN;
-
- /* Setup buffer sizes */
- srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
- E1000_SRRCTL_BSIZEPKT_SHIFT;
-
- if (adapter->rx_buffer_len < 2048) {
- adapter->rx_ps_hdr_size = 0;
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
- } else {
- adapter->rx_ps_hdr_size = 128;
- srrctl |= adapter->rx_ps_hdr_size <<
- E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- }
-
- ew32(SRRCTL(0), srrctl);
-}
-
-/**
- * igbvf_configure_rx - Configure Receive Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-static void igbvf_configure_rx(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct igbvf_ring *rx_ring = adapter->rx_ring;
- u64 rdba;
- u32 rdlen, rxdctl;
-
- /* disable receives */
- rxdctl = er32(RXDCTL(0));
- ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
- e1e_flush();
- msleep(10);
-
- rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
- /*
- * Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring
- */
- rdba = rx_ring->dma;
- ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
- ew32(RDBAH(0), (rdba >> 32));
- ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
- rx_ring->head = E1000_RDH(0);
- rx_ring->tail = E1000_RDT(0);
- ew32(RDH(0), 0);
- ew32(RDT(0), 0);
-
- rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
- rxdctl &= 0xFFF00000;
- rxdctl |= IGBVF_RX_PTHRESH;
- rxdctl |= IGBVF_RX_HTHRESH << 8;
- rxdctl |= IGBVF_RX_WTHRESH << 16;
-
- igbvf_set_rlpml(adapter);
-
- /* enable receives */
- ew32(RXDCTL(0), rxdctl);
-}
-
-/**
- * igbvf_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- **/
-static void igbvf_set_multi(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u8 *mta_list = NULL;
- int i;
-
- if (!netdev_mc_empty(netdev)) {
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list) {
- dev_err(&adapter->pdev->dev,
- "failed to allocate multicast filter list\n");
- return;
- }
- }
-
- /* prepare a packed array of only addresses. */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
- kfree(mta_list);
-}
-
-/**
- * igbvf_configure - configure the hardware for Rx and Tx
- * @adapter: private board structure
- **/
-static void igbvf_configure(struct igbvf_adapter *adapter)
-{
- igbvf_set_multi(adapter->netdev);
-
- igbvf_restore_vlan(adapter);
-
- igbvf_configure_tx(adapter);
- igbvf_setup_srrctl(adapter);
- igbvf_configure_rx(adapter);
- igbvf_alloc_rx_buffers(adapter->rx_ring,
- igbvf_desc_unused(adapter->rx_ring));
-}
-
-/* igbvf_reset - bring the hardware into a known good state
- *
- * This function boots the hardware and enables some settings that
- * require a configuration cycle of the hardware - those cannot be
- * set/changed during runtime. After reset the device needs to be
- * properly configured for Rx, Tx etc.
- */
-static void igbvf_reset(struct igbvf_adapter *adapter)
-{
- struct e1000_mac_info *mac = &adapter->hw.mac;
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
-
- /* Allow time for pending master requests to run */
- if (mac->ops.reset_hw(hw))
- dev_err(&adapter->pdev->dev, "PF still resetting\n");
-
- mac->ops.init_hw(hw);
-
- if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr,
- netdev->addr_len);
- }
-
- adapter->last_reset = jiffies;
-}
-
-int igbvf_up(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /* hardware has been reset, we need to reload some things */
- igbvf_configure(adapter);
-
- clear_bit(__IGBVF_DOWN, &adapter->state);
-
- napi_enable(&adapter->rx_ring->napi);
- if (adapter->msix_entries)
- igbvf_configure_msix(adapter);
-
- /* Clear any pending interrupts. */
- er32(EICR);
- igbvf_irq_enable(adapter);
-
- /* start the watchdog */
- hw->mac.get_link_status = 1;
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
-
- return 0;
-}
-
-void igbvf_down(struct igbvf_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- u32 rxdctl, txdctl;
-
- /*
- * signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer
- */
- set_bit(__IGBVF_DOWN, &adapter->state);
-
- /* disable receives in the hardware */
- rxdctl = er32(RXDCTL(0));
- ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
-
- netif_stop_queue(netdev);
-
- /* disable transmits in the hardware */
- txdctl = er32(TXDCTL(0));
- ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
-
- /* flush both disables and wait for them to finish */
- e1e_flush();
- msleep(10);
-
- napi_disable(&adapter->rx_ring->napi);
-
- igbvf_irq_disable(adapter);
-
- del_timer_sync(&adapter->watchdog_timer);
-
- netif_carrier_off(netdev);
-
- /* record the stats before reset*/
- igbvf_update_stats(adapter);
-
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
-
- igbvf_reset(adapter);
- igbvf_clean_tx_ring(adapter->tx_ring);
- igbvf_clean_rx_ring(adapter->rx_ring);
-}
-
-void igbvf_reinit_locked(struct igbvf_adapter *adapter)
-{
- might_sleep();
- while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
- msleep(1);
- igbvf_down(adapter);
- igbvf_up(adapter);
- clear_bit(__IGBVF_RESETTING, &adapter->state);
-}
-
-/**
- * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
- * @adapter: board private structure to initialize
- *
- * igbvf_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- s32 rc;
-
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
- adapter->rx_ps_hdr_size = 0;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
-
- adapter->tx_int_delay = 8;
- adapter->tx_abs_int_delay = 32;
- adapter->rx_int_delay = 0;
- adapter->rx_abs_int_delay = 8;
- adapter->itr_setting = 3;
- adapter->itr = 20000;
-
- /* Set various function pointers */
- adapter->ei->init_ops(&adapter->hw);
-
- rc = adapter->hw.mac.ops.init_params(&adapter->hw);
- if (rc)
- return rc;
-
- rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
- if (rc)
- return rc;
-
- igbvf_set_interrupt_capability(adapter);
-
- if (igbvf_alloc_queues(adapter))
- return -ENOMEM;
-
- spin_lock_init(&adapter->tx_queue_lock);
-
- /* Explicitly disable IRQ since the NIC can be in any state. */
- igbvf_irq_disable(adapter);
-
- spin_lock_init(&adapter->stats_lock);
-
- set_bit(__IGBVF_DOWN, &adapter->state);
- return 0;
-}
-
-static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- adapter->stats.last_gprc = er32(VFGPRC);
- adapter->stats.last_gorc = er32(VFGORC);
- adapter->stats.last_gptc = er32(VFGPTC);
- adapter->stats.last_gotc = er32(VFGOTC);
- adapter->stats.last_mprc = er32(VFMPRC);
- adapter->stats.last_gotlbc = er32(VFGOTLBC);
- adapter->stats.last_gptlbc = er32(VFGPTLBC);
- adapter->stats.last_gorlbc = er32(VFGORLBC);
- adapter->stats.last_gprlbc = er32(VFGPRLBC);
-
- adapter->stats.base_gprc = er32(VFGPRC);
- adapter->stats.base_gorc = er32(VFGORC);
- adapter->stats.base_gptc = er32(VFGPTC);
- adapter->stats.base_gotc = er32(VFGOTC);
- adapter->stats.base_mprc = er32(VFMPRC);
- adapter->stats.base_gotlbc = er32(VFGOTLBC);
- adapter->stats.base_gptlbc = er32(VFGPTLBC);
- adapter->stats.base_gorlbc = er32(VFGORLBC);
- adapter->stats.base_gprlbc = er32(VFGPRLBC);
-}
-
-/**
- * igbvf_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-static int igbvf_open(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- int err;
-
- /* disallow open during test */
- if (test_bit(__IGBVF_TESTING, &adapter->state))
- return -EBUSY;
-
- /* allocate transmit descriptors */
- err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
- if (err)
- goto err_setup_tx;
-
- /* allocate receive descriptors */
- err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
- if (err)
- goto err_setup_rx;
-
- /*
- * before we allocate an interrupt, we must be ready to handle it.
- * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
- * as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so.
- */
- igbvf_configure(adapter);
-
- err = igbvf_request_irq(adapter);
- if (err)
- goto err_req_irq;
-
- /* From here on the code is the same as igbvf_up() */
- clear_bit(__IGBVF_DOWN, &adapter->state);
-
- napi_enable(&adapter->rx_ring->napi);
-
- /* clear any pending interrupts */
- er32(EICR);
-
- igbvf_irq_enable(adapter);
-
- /* start the watchdog */
- hw->mac.get_link_status = 1;
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
- return 0;
-
-err_req_irq:
- igbvf_free_rx_resources(adapter->rx_ring);
-err_setup_rx:
- igbvf_free_tx_resources(adapter->tx_ring);
-err_setup_tx:
- igbvf_reset(adapter);
-
- return err;
-}
-
-/**
- * igbvf_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- **/
-static int igbvf_close(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
- igbvf_down(adapter);
-
- igbvf_free_irq(adapter);
-
- igbvf_free_tx_resources(adapter->tx_ring);
- igbvf_free_rx_resources(adapter->rx_ring);
-
- return 0;
-}
-/**
- * igbvf_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int igbvf_set_mac(struct net_device *netdev, void *p)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
-
- hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
-
- if (memcmp(addr->sa_data, hw->mac.addr, 6))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- return 0;
-}
-
-#define UPDATE_VF_COUNTER(reg, name) \
- { \
- u32 current_counter = er32(reg); \
- if (current_counter < adapter->stats.last_##name) \
- adapter->stats.name += 0x100000000LL; \
- adapter->stats.last_##name = current_counter; \
- adapter->stats.name &= 0xFFFFFFFF00000000LL; \
- adapter->stats.name |= current_counter; \
- }
-
-/**
- * igbvf_update_stats - Update the board statistics counters
- * @adapter: board private structure
-**/
-void igbvf_update_stats(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
-
- /*
- * Prevent stats update while adapter is being reset, link is down
- * or if the pci connection is down.
- */
- if (adapter->link_speed == 0)
- return;
-
- if (test_bit(__IGBVF_RESETTING, &adapter->state))
- return;
-
- if (pci_channel_offline(pdev))
- return;
-
- UPDATE_VF_COUNTER(VFGPRC, gprc);
- UPDATE_VF_COUNTER(VFGORC, gorc);
- UPDATE_VF_COUNTER(VFGPTC, gptc);
- UPDATE_VF_COUNTER(VFGOTC, gotc);
- UPDATE_VF_COUNTER(VFMPRC, mprc);
- UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
- UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
- UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
- UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
-
- /* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.mprc;
-}
-
-static void igbvf_print_link_info(struct igbvf_adapter *adapter)
-{
- dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
- adapter->link_speed,
- ((adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex"));
-}
-
-static bool igbvf_has_link(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = E1000_SUCCESS;
- bool link_active;
-
- /* If interface is down, stay link down */
- if (test_bit(__IGBVF_DOWN, &adapter->state))
- return false;
-
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
-
- /* if check for link returns error we will need to reset */
- if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
- schedule_work(&adapter->reset_task);
-
- return link_active;
-}
-
-/**
- * igbvf_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
- **/
-static void igbvf_watchdog(unsigned long data)
-{
- struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
-
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->watchdog_task);
-}
-
-static void igbvf_watchdog_task(struct work_struct *work)
-{
- struct igbvf_adapter *adapter = container_of(work,
- struct igbvf_adapter,
- watchdog_task);
- struct net_device *netdev = adapter->netdev;
- struct e1000_mac_info *mac = &adapter->hw.mac;
- struct igbvf_ring *tx_ring = adapter->tx_ring;
- struct e1000_hw *hw = &adapter->hw;
- u32 link;
- int tx_pending = 0;
-
- link = igbvf_has_link(adapter);
-
- if (link) {
- if (!netif_carrier_ok(netdev)) {
- mac->ops.get_link_up_info(&adapter->hw,
- &adapter->link_speed,
- &adapter->link_duplex);
- igbvf_print_link_info(adapter);
-
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
- }
- } else {
- if (netif_carrier_ok(netdev)) {
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- dev_info(&adapter->pdev->dev, "Link is Down\n");
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- }
- }
-
- if (netif_carrier_ok(netdev)) {
- igbvf_update_stats(adapter);
- } else {
- tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
- tx_ring->count);
- if (tx_pending) {
- /*
- * We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- adapter->tx_timeout_count++;
- schedule_work(&adapter->reset_task);
- }
- }
-
- /* Cause software interrupt to ensure Rx ring is cleaned */
- ew32(EICS, adapter->rx_ring->eims_value);
-
- /* Reset the timer */
- if (!test_bit(__IGBVF_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + (2 * HZ)));
-}
-
-#define IGBVF_TX_FLAGS_CSUM 0x00000001
-#define IGBVF_TX_FLAGS_VLAN 0x00000002
-#define IGBVF_TX_FLAGS_TSO 0x00000004
-#define IGBVF_TX_FLAGS_IPV4 0x00000008
-#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
-
-static int igbvf_tso(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- *hdr_len = 0;
-
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "igbvf_tso returning an error\n");
- return err;
- }
- }
-
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
-
- i = tx_ring->next_to_use;
-
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- info |= (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(info);
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
-
- if (skb->protocol == htons(ETH_P_IP))
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
-
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
-
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
- context_desc->seqnum_seed = 0;
-
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_ring->next_to_use = i;
-
- return true;
-}
-
-static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
-
- if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
-
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
-
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- info |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
-
- context_desc->vlan_macip_lens = cpu_to_le32(info);
-
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- break;
- case __constant_htons(ETH_P_IPV6):
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- break;
- default:
- break;
- }
- }
-
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
- context_desc->seqnum_seed = 0;
- context_desc->mss_l4len_idx = 0;
-
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
-
- return true;
- }
-
- return false;
-}
-
-static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- /* there is enough descriptors then we don't need to worry */
- if (igbvf_desc_unused(adapter->tx_ring) >= size)
- return 0;
-
- netif_stop_queue(netdev);
-
- smp_mb();
-
- /* We need to check again just in case room has been made available */
- if (igbvf_desc_unused(adapter->tx_ring) < size)
- return -EBUSY;
-
- netif_wake_queue(netdev);
-
- ++adapter->restart_queue;
- return 0;
-}
-
-#define IGBVF_MAX_TXD_PWR 16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
-
-static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb,
- unsigned int first)
-{
- struct igbvf_buffer *buffer_info;
- struct pci_dev *pdev = adapter->pdev;
- unsigned int len = skb_headlen(skb);
- unsigned int count = 0, i;
- unsigned int f;
-
- i = tx_ring->next_to_use;
-
- buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
- buffer_info->length = len;
- /* set time_stamp *before* dma to help avoid a possible race */
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->mapped_as_page = false;
- buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
-
-
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
-
- buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
- buffer_info->length = len;
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
- buffer_info->mapped_as_page = true;
- buffer_info->dma = dma_map_page(&pdev->dev,
- frag->page,
- frag->page_offset,
- len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
- }
-
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
-
- return ++count;
-
-dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
-
- /* clear timestamp and dma mappings for failed buffer_info mapping */
- buffer_info->dma = 0;
- buffer_info->time_stamp = 0;
- buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
- buffer_info->mapped_as_page = false;
- if (count)
- count--;
-
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count--) {
- if (i==0)
- i += tx_ring->count;
- i--;
- buffer_info = &tx_ring->buffer_info[i];
- igbvf_put_txbuf(adapter, buffer_info);
- }
-
- return 0;
-}
-
-static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
- u8 hdr_len)
-{
- union e1000_adv_tx_desc *tx_desc = NULL;
- struct igbvf_buffer *buffer_info;
- u32 olinfo_status = 0, cmd_type_len;
- unsigned int i;
-
- cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
-
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
-
- if (tx_flags & IGBVF_TX_FLAGS_TSO) {
- cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
-
- /* insert tcp checksum */
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
-
- /* insert ip checksum */
- if (tx_flags & IGBVF_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
-
- } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
- }
-
- olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
-
- i = tx_ring->next_to_use;
- while (count--) {
- buffer_info = &tx_ring->buffer_info[i];
- tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
-
- tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
-
- tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
- /* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
- mmiowb();
-}
-
-static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
- struct net_device *netdev,
- struct igbvf_ring *tx_ring)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- unsigned int first, tx_flags = 0;
- u8 hdr_len = 0;
- int count = 0;
- int tso = 0;
-
- if (test_bit(__IGBVF_DOWN, &adapter->state)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- /*
- * need: count + 4 desc gap to keep tail from touching
- * + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
- * + 1 desc for context descriptor,
- * head, otherwise try next time
- */
- if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
- /* this is a hard error */
- return NETDEV_TX_BUSY;
- }
-
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= IGBVF_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
- }
-
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IGBVF_TX_FLAGS_IPV4;
-
- first = tx_ring->next_to_use;
-
- tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
- if (unlikely(tso < 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (tso)
- tx_flags |= IGBVF_TX_FLAGS_TSO;
- else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
- tx_flags |= IGBVF_TX_FLAGS_CSUM;
-
- /*
- * count reflects descriptors mapped, if 0 then mapping error
- * has occurred and we need to rewind the descriptor queue
- */
- count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
-
- if (count) {
- igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
- skb->len, hdr_len);
- /* Make sure there is space in the ring for the next send. */
- igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
- } else {
- dev_kfree_skb_any(skb);
- tx_ring->buffer_info[first].time_stamp = 0;
- tx_ring->next_to_use = first;
- }
-
- return NETDEV_TX_OK;
-}
-
-static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct igbvf_ring *tx_ring;
-
- if (test_bit(__IGBVF_DOWN, &adapter->state)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- tx_ring = &adapter->tx_ring[0];
-
- return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
-}
-
-/**
- * igbvf_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- **/
-static void igbvf_tx_timeout(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- /* Do the reset outside of interrupt context */
- adapter->tx_timeout_count++;
- schedule_work(&adapter->reset_task);
-}
-
-static void igbvf_reset_task(struct work_struct *work)
-{
- struct igbvf_adapter *adapter;
- adapter = container_of(work, struct igbvf_adapter, reset_task);
-
- igbvf_reinit_locked(adapter);
-}
-
-/**
- * igbvf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
- * igbvf_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- **/
-static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
- return -EINVAL;
- }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
- return -EINVAL;
- }
-
- while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
- msleep(1);
- /* igbvf_down has a dependency on max_frame_size */
- adapter->max_frame_size = max_frame;
- if (netif_running(netdev))
- igbvf_down(adapter);
-
- /*
- * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
- * means we reserve 2 more, this pushes us to allocate from the next
- * larger slab size.
- * i.e. RXBUFFER_2048 --> size-4096 slab
- * However with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs
- */
-
- if (max_frame <= 1024)
- adapter->rx_buffer_len = 1024;
- else if (max_frame <= 2048)
- adapter->rx_buffer_len = 2048;
- else
-#if (PAGE_SIZE / 2) > 16384
- adapter->rx_buffer_len = 16384;
-#else
- adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
-
-
- /* adjust allocation if LPE protects us, and we aren't using SBP */
- if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
- ETH_FCS_LEN;
-
- dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
-
- if (netif_running(netdev))
- igbvf_up(adapter);
- else
- igbvf_reset(adapter);
-
- clear_bit(__IGBVF_RESETTING, &adapter->state);
-
- return 0;
-}
-
-static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev)) {
- WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
- igbvf_down(adapter);
- igbvf_free_irq(adapter);
- }
-
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-#endif
-
- pci_disable_device(pdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int igbvf_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- u32 err;
-
- pci_restore_state(pdev);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- if (netif_running(netdev)) {
- err = igbvf_request_irq(adapter);
- if (err)
- return err;
- }
-
- igbvf_reset(adapter);
-
- if (netif_running(netdev))
- igbvf_up(adapter);
-
- netif_device_attach(netdev);
-
- return 0;
-}
-#endif
-
-static void igbvf_shutdown(struct pci_dev *pdev)
-{
- igbvf_suspend(pdev, PMSG_SUSPEND);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igbvf_netpoll(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- disable_irq(adapter->pdev->irq);
-
- igbvf_clean_tx_irq(adapter->tx_ring);
-
- enable_irq(adapter->pdev->irq);
-}
-#endif
-
-/**
- * igbvf_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev))
- igbvf_down(adapter);
- pci_disable_device(pdev);
-
- /* Request a slot slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * igbvf_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igbvf_resume routine.
- */
-static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- if (pci_enable_device_mem(pdev)) {
- dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
-
- igbvf_reset(adapter);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * igbvf_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation. Implementation resembles the
- * second-half of the igbvf_resume routine.
- */
-static void igbvf_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- if (netif_running(netdev)) {
- if (igbvf_up(adapter)) {
- dev_err(&pdev->dev,
- "can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-}
-
-static void igbvf_print_device_info(struct igbvf_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
- dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
- dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
-}
-
-static const struct net_device_ops igbvf_netdev_ops = {
- .ndo_open = igbvf_open,
- .ndo_stop = igbvf_close,
- .ndo_start_xmit = igbvf_xmit_frame,
- .ndo_get_stats = igbvf_get_stats,
- .ndo_set_multicast_list = igbvf_set_multi,
- .ndo_set_mac_address = igbvf_set_mac,
- .ndo_change_mtu = igbvf_change_mtu,
- .ndo_do_ioctl = igbvf_ioctl,
- .ndo_tx_timeout = igbvf_tx_timeout,
- .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = igbvf_netpoll,
-#endif
-};
-
-/**
- * igbvf_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in igbvf_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * igbvf_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-static int __devinit igbvf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct net_device *netdev;
- struct igbvf_adapter *adapter;
- struct e1000_hw *hw;
- const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
-
- static int cards_found;
- int err, pci_using_dac;
-
- err = pci_enable_device_mem(pdev);
- if (err)
- return err;
-
- pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
- }
- }
-
- err = pci_request_regions(pdev, igbvf_driver_name);
- if (err)
- goto err_pci_reg;
-
- pci_set_master(pdev);
-
- err = -ENOMEM;
- netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
- if (!netdev)
- goto err_alloc_etherdev;
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- pci_set_drvdata(pdev, netdev);
- adapter = netdev_priv(netdev);
- hw = &adapter->hw;
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- adapter->ei = ei;
- adapter->pba = ei->pba;
- adapter->flags = ei->flags;
- adapter->hw.back = adapter;
- adapter->hw.mac.type = ei->mac;
- adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
- hw->revision_id = pdev->revision;
-
- err = -EIO;
- adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-
- if (!adapter->hw.hw_addr)
- goto err_ioremap;
-
- if (ei->get_variants) {
- err = ei->get_variants(adapter);
- if (err)
- goto err_ioremap;
- }
-
- /* setup adapter struct */
- err = igbvf_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- /* construct the net_device struct */
- netdev->netdev_ops = &igbvf_netdev_ops;
-
- igbvf_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
-
- adapter->bd_number = cards_found++;
-
- netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
-
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
-
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
-
- /*reset the controller to put the device in a known good state */
- err = hw->mac.ops.reset_hw(hw);
- if (err) {
- dev_info(&pdev->dev,
- "PF still in reset state, assigning new address."
- " Is the PF interface up?\n");
- dev_hw_addr_random(adapter->netdev, hw->mac.addr);
- } else {
- err = hw->mac.ops.read_mac_addr(hw);
- if (err) {
- dev_err(&pdev->dev, "Error reading MAC address\n");
- goto err_hw_init;
- }
- }
-
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->dev_addr);
- err = -EIO;
- goto err_hw_init;
- }
-
- setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
- (unsigned long) adapter);
-
- INIT_WORK(&adapter->reset_task, igbvf_reset_task);
- INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
-
- /* ring size defaults */
- adapter->rx_ring->count = 1024;
- adapter->tx_ring->count = 1024;
-
- /* reset the hardware with the new settings */
- igbvf_reset(adapter);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_hw_init;
-
- /* tell the stack to leave us alone until igbvf_open() is called */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
- igbvf_print_device_info(adapter);
-
- igbvf_initialize_last_counter_stats(adapter);
-
- return 0;
-
-err_hw_init:
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-err_sw_init:
- igbvf_reset_interrupt_capability(adapter);
- iounmap(adapter->hw.hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
- pci_release_regions(pdev);
-err_pci_reg:
-err_dma:
- pci_disable_device(pdev);
- return err;
-}
-
-/**
- * igbvf_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * igbvf_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-static void __devexit igbvf_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igbvf_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * The watchdog timer may be rescheduled, so explicitly
- * disable it from being rescheduled.
- */
- set_bit(__IGBVF_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
-
- cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->watchdog_task);
-
- unregister_netdev(netdev);
-
- igbvf_reset_interrupt_capability(adapter);
-
- /*
- * it is important to delete the napi struct prior to freeing the
- * rx ring so that you do not end up with null pointer refs
- */
- netif_napi_del(&adapter->rx_ring->napi);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
- iounmap(hw->hw_addr);
- if (hw->flash_address)
- iounmap(hw->flash_address);
- pci_release_regions(pdev);
-
- free_netdev(netdev);
-
- pci_disable_device(pdev);
-}
-
-/* PCI Error Recovery (ERS) */
-static struct pci_error_handlers igbvf_err_handler = {
- .error_detected = igbvf_io_error_detected,
- .slot_reset = igbvf_io_slot_reset,
- .resume = igbvf_io_resume,
-};
-
-static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
-
-/* PCI Device API Driver */
-static struct pci_driver igbvf_driver = {
- .name = igbvf_driver_name,
- .id_table = igbvf_pci_tbl,
- .probe = igbvf_probe,
- .remove = __devexit_p(igbvf_remove),
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = igbvf_suspend,
- .resume = igbvf_resume,
-#endif
- .shutdown = igbvf_shutdown,
- .err_handler = &igbvf_err_handler
-};
-
-/**
- * igbvf_init_module - Driver Registration Routine
- *
- * igbvf_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- **/
-static int __init igbvf_init_module(void)
-{
- int ret;
- printk(KERN_INFO "%s - version %s\n",
- igbvf_driver_string, igbvf_driver_version);
- printk(KERN_INFO "%s\n", igbvf_copyright);
-
- ret = pci_register_driver(&igbvf_driver);
-
- return ret;
-}
-module_init(igbvf_init_module);
-
-/**
- * igbvf_exit_module - Driver Exit Cleanup Routine
- *
- * igbvf_exit_module is called just before the driver is removed
- * from memory.
- **/
-static void __exit igbvf_exit_module(void)
-{
- pci_unregister_driver(&igbvf_driver);
-}
-module_exit(igbvf_exit_module);
-
-
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-/* netdev.c */
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 25bb2a015e1..a40fab44b9a 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -183,7 +183,7 @@ config OLD_BELKIN_DONGLE
Say Y here if you want to build support for the Adaptec Airport 1000
and 2000 dongles. If you want to compile it as a module, choose
M here. Some information is contained in the comments
- at the top of <file:drivers/net/irda/old_belkin.c>.
+ at the top of <file:drivers/net/irda/old_belkin-sir.c>.
config ACT200L_DONGLE
tristate "ACTiSYS IR-200L dongle"
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 82660672dcd..d275e276e74 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -2,7 +2,7 @@
* SuperH IrDA Driver
*
* Copyright (C) 2010 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* Based on sh_sir.c
* Copyright (C) 2009 Renesas Solutions Corp.
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
@@ -144,8 +145,8 @@ struct sh_irda_xir_func {
struct sh_irda_self {
void __iomem *membase;
- unsigned int irq;
- struct clk *clk;
+ unsigned int irq;
+ struct platform_device *pdev;
struct net_device *ndev;
@@ -264,7 +265,7 @@ static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
return 0;
}
-static int xir_get_rcv_length(struct sh_irda_self *self)
+static int sh_irda_get_rcv_length(struct sh_irda_self *self)
{
return RFL_MASK & sh_irda_read(self, IRRFLR);
}
@@ -274,47 +275,47 @@ static int xir_get_rcv_length(struct sh_irda_self *self)
* NONE MODE
*
*=====================================*/
-static int xir_fre(struct sh_irda_self *self)
+static int sh_irda_xir_fre(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
dev_err(dev, "none mode: frame recv\n");
return 0;
}
-static int xir_trov(struct sh_irda_self *self)
+static int sh_irda_xir_trov(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
dev_err(dev, "none mode: buffer ram over\n");
return 0;
}
-static int xir_9(struct sh_irda_self *self)
+static int sh_irda_xir_9(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
dev_err(dev, "none mode: time over\n");
return 0;
}
-static int xir_8(struct sh_irda_self *self)
+static int sh_irda_xir_8(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
dev_err(dev, "none mode: framing error\n");
return 0;
}
-static int xir_fte(struct sh_irda_self *self)
+static int sh_irda_xir_fte(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
dev_err(dev, "none mode: frame transmit end\n");
return 0;
}
-static struct sh_irda_xir_func xir_func = {
- .xir_fre = xir_fre,
- .xir_trov = xir_trov,
- .xir_9 = xir_9,
- .xir_8 = xir_8,
- .xir_fte = xir_fte,
+static struct sh_irda_xir_func sh_irda_xir_func = {
+ .xir_fre = sh_irda_xir_fre,
+ .xir_trov = sh_irda_xir_trov,
+ .xir_9 = sh_irda_xir_9,
+ .xir_8 = sh_irda_xir_8,
+ .xir_fte = sh_irda_xir_fte,
};
/*=====================================
@@ -323,12 +324,12 @@ static struct sh_irda_xir_func xir_func = {
*
* MIR/FIR are not supported now
*=====================================*/
-static struct sh_irda_xir_func mfir_func = {
- .xir_fre = xir_fre,
- .xir_trov = xir_trov,
- .xir_9 = xir_9,
- .xir_8 = xir_8,
- .xir_fte = xir_fte,
+static struct sh_irda_xir_func sh_irda_mfir_func = {
+ .xir_fre = sh_irda_xir_fre,
+ .xir_trov = sh_irda_xir_trov,
+ .xir_9 = sh_irda_xir_9,
+ .xir_8 = sh_irda_xir_8,
+ .xir_fte = sh_irda_xir_fte,
};
/*=====================================
@@ -336,12 +337,12 @@ static struct sh_irda_xir_func mfir_func = {
* SIR MODE
*
*=====================================*/
-static int sir_fre(struct sh_irda_self *self)
+static int sh_irda_sir_fre(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
u16 data16;
u8 *data = (u8 *)&data16;
- int len = xir_get_rcv_length(self);
+ int len = sh_irda_get_rcv_length(self);
int i, j;
if (len > IRDARAM_LEN)
@@ -364,7 +365,7 @@ static int sir_fre(struct sh_irda_self *self)
return 0;
}
-static int sir_trov(struct sh_irda_self *self)
+static int sh_irda_sir_trov(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
@@ -373,7 +374,7 @@ static int sir_trov(struct sh_irda_self *self)
return 0;
}
-static int sir_tot(struct sh_irda_self *self)
+static int sh_irda_sir_tot(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
@@ -383,7 +384,7 @@ static int sir_tot(struct sh_irda_self *self)
return 0;
}
-static int sir_fer(struct sh_irda_self *self)
+static int sh_irda_sir_fer(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
@@ -392,7 +393,7 @@ static int sir_fer(struct sh_irda_self *self)
return 0;
}
-static int sir_fte(struct sh_irda_self *self)
+static int sh_irda_sir_fte(struct sh_irda_self *self)
{
struct device *dev = &self->ndev->dev;
@@ -402,12 +403,12 @@ static int sir_fte(struct sh_irda_self *self)
return 0;
}
-static struct sh_irda_xir_func sir_func = {
- .xir_fre = sir_fre,
- .xir_trov = sir_trov,
- .xir_9 = sir_tot,
- .xir_8 = sir_fer,
- .xir_fte = sir_fte,
+static struct sh_irda_xir_func sh_irda_sir_func = {
+ .xir_fre = sh_irda_sir_fre,
+ .xir_trov = sh_irda_sir_trov,
+ .xir_9 = sh_irda_sir_tot,
+ .xir_8 = sh_irda_sir_fer,
+ .xir_fte = sh_irda_sir_fte,
};
static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
@@ -421,22 +422,22 @@ static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
case SH_IRDA_SIR:
name = "SIR";
data = TMD_SIR;
- func = &sir_func;
+ func = &sh_irda_sir_func;
break;
case SH_IRDA_MIR:
name = "MIR";
data = TMD_MIR;
- func = &mfir_func;
+ func = &sh_irda_mfir_func;
break;
case SH_IRDA_FIR:
name = "FIR";
data = TMD_FIR;
- func = &mfir_func;
+ func = &sh_irda_mfir_func;
break;
default:
- name = "NONE";
- data = 0;
- func = &xir_func;
+ name = "NONE";
+ data = 0;
+ func = &sh_irda_xir_func;
break;
}
@@ -694,7 +695,7 @@ static int sh_irda_open(struct net_device *ndev)
struct sh_irda_self *self = netdev_priv(ndev);
int err;
- clk_enable(self->clk);
+ pm_runtime_get_sync(&self->pdev->dev);
err = sh_irda_crc_init(self);
if (err)
goto open_err;
@@ -718,7 +719,7 @@ static int sh_irda_open(struct net_device *ndev)
return 0;
open_err:
- clk_disable(self->clk);
+ pm_runtime_put_sync(&self->pdev->dev);
return err;
}
@@ -734,6 +735,7 @@ static int sh_irda_stop(struct net_device *ndev)
}
netif_stop_queue(ndev);
+ pm_runtime_put_sync(&self->pdev->dev);
dev_info(&ndev->dev, "stoped\n");
@@ -786,11 +788,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
if (err)
goto err_mem_2;
- self->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(self->clk)) {
- dev_err(&pdev->dev, "cannot get irda clock\n");
- goto err_mem_3;
- }
+ self->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
irda_init_max_qos_capabilies(&self->qos);
@@ -820,8 +819,7 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
goto exit;
err_mem_4:
- clk_put(self->clk);
-err_mem_3:
+ pm_runtime_disable(&pdev->dev);
sh_irda_remove_iobuf(self);
err_mem_2:
iounmap(self->membase);
@@ -840,7 +838,7 @@ static int __devexit sh_irda_remove(struct platform_device *pdev)
return 0;
unregister_netdev(ndev);
- clk_put(self->clk);
+ pm_runtime_disable(&pdev->dev);
sh_irda_remove_iobuf(self);
iounmap(self->membase);
free_netdev(ndev);
@@ -849,11 +847,29 @@ static int __devexit sh_irda_remove(struct platform_device *pdev)
return 0;
}
+static int sh_irda_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops sh_irda_pm_ops = {
+ .runtime_suspend = sh_irda_runtime_nop,
+ .runtime_resume = sh_irda_runtime_nop,
+};
+
static struct platform_driver sh_irda_driver = {
- .probe = sh_irda_probe,
- .remove = __devexit_p(sh_irda_remove),
- .driver = {
- .name = DRIVER_NAME,
+ .probe = sh_irda_probe,
+ .remove = __devexit_p(sh_irda_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &sh_irda_pm_ops,
},
};
@@ -870,6 +886,6 @@ static void __exit sh_irda_exit(void)
module_init(sh_irda_init);
module_exit(sh_irda_exit);
-MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
deleted file mode 100644
index d99d01e2132..00000000000
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ /dev/null
@@ -1,687 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/ipv6.h>
-#ifdef NETIF_F_HW_VLAN_TX
-#include <linux/if_vlan.h>
-#endif
-
-#include "ixgbe.h"
-
-#include "ixgbe_sriov.h"
-
-static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf)
-{
- struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
- u32 vector_bit;
- u32 vector_reg;
- u32 mta_reg;
-
- /* only so many hash values supported */
- entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
-
- /*
- * salt away the number of multi cast addresses assigned
- * to this VF for later use to restore when the PF multi cast
- * list changes
- */
- vfinfo->num_vf_mc_hashes = entries;
-
- /*
- * VFs are limited to using the MTA hash table for their multicast
- * addresses
- */
- for (i = 0; i < entries; i++) {
- vfinfo->vf_mc_hashes[i] = hash_list[i];
- }
-
- for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
- vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
- vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
- mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
- IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
- }
-
- return 0;
-}
-
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (entry->free == false)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
-void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct vf_data_storage *vfinfo;
- int i, j;
- u32 vector_bit;
- u32 vector_reg;
- u32 mta_reg;
-
- for (i = 0; i < adapter->num_vfs; i++) {
- vfinfo = &adapter->vfinfo[i];
- for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
- hw->addr_ctrl.mta_in_use++;
- vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
- vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
- mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
- IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
- }
- }
-
- /* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
-}
-
-static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
- u32 vf)
-{
- return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
-}
-
-static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int new_mtu = msgbuf[1];
- u32 max_frs;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- /* Only X540 supports jumbo frames in IOV mode */
- if (adapter->hw.mac.type != ixgbe_mac_X540)
- return;
-
- /* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
- e_err(drv, "VF mtu %d out of range\n", new_mtu);
- return;
- }
-
- max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
- IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
- if (max_frs < new_mtu) {
- max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
- }
-
- e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
-}
-
-static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
-{
- u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
- if (aupe)
- vmolr |= IXGBE_VMOLR_AUPE;
- else
- vmolr &= ~IXGBE_VMOLR_AUPE;
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
-}
-
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (vid)
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
- (vid | IXGBE_VMVIR_VLANA_DEFAULT));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
-}
-
-static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
- /* reset offloads to defaults */
- if (adapter->vfinfo[vf].pf_vlan) {
- ixgbe_set_vf_vlan(adapter, true,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter,
- (adapter->vfinfo[vf].pf_vlan |
- (adapter->vfinfo[vf].pf_qos <<
- VLAN_PRIO_SHIFT)), vf);
- ixgbe_set_vmolr(hw, vf, false);
- } else {
- ixgbe_set_vmvir(adapter, 0, vf);
- ixgbe_set_vmolr(hw, vf, true);
- }
-
- /* reset multicast table array for vf */
- adapter->vfinfo[vf].num_vf_mc_hashes = 0;
-
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
-
- hw->mac.ops.clear_rar(hw, rar_entry);
-}
-
-static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
- int vf, unsigned char *mac_addr)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
-
- return 0;
-}
-
-static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
- int vf, int index, unsigned char *mac_addr)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- if (index <= 1) {
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (entry->vf == vf) {
- entry->vf = -1;
- entry->free = true;
- entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
- }
- }
- }
-
- /*
- * If index was zero then we were asked to clear the uc list
- * for the VF. We're done.
- */
- if (!index)
- return 0;
-
- entry = NULL;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (entry->free)
- break;
- }
-
- /*
- * If we traversed the entire list and didn't find a free entry
- * then we're out of space on the RAR table. Also entry may
- * be NULL because the original memory allocation for the list
- * failed, which is not fatal but does mean we can't support
- * VF requests for MACVLAN because we couldn't allocate
- * memory for the list management required.
- */
- if (!entry || !entry->free)
- return -ENOSPC;
-
- entry->free = false;
- entry->is_macvlan = true;
- entry->vf = vf;
- memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
-
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
-
- return 0;
-}
-
-int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
-{
- unsigned char vf_mac_addr[6];
- struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
- unsigned int vfn = (event_mask & 0x3f);
-
- bool enable = ((event_mask & 0x10000000U) != 0);
-
- if (enable) {
- random_ether_addr(vf_mac_addr);
- e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
- vfn, vf_mac_addr);
- /*
- * Store away the VF "permananet" MAC address, it will ask
- * for it later.
- */
- memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
- }
-
- return 0;
-}
-
-static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg;
- u32 reg_offset, vf_shift;
-
- vf_shift = vf % 32;
- reg_offset = vf / 32;
-
- /* enable transmit and receive for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= (reg | (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
- reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= (reg | (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
-
- /* Enable counting of spoofed packets in the SSVPC register */
- reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
- reg |= (1 << vf_shift);
- IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
-
- ixgbe_vf_reset_event(adapter, vf);
-}
-
-static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
-{
- u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
- u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
- struct ixgbe_hw *hw = &adapter->hw;
- s32 retval;
- int entries;
- u16 *hash_list;
- int add, vid, index;
- u8 *new_mac;
-
- retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
-
- if (retval)
- pr_err("Error receiving message from VF\n");
-
- /* this is a message we already processed, do nothing */
- if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
- return retval;
-
- /*
- * until the vf completes a virtual function reset it should not be
- * allowed to start any configuration.
- */
-
- if (msgbuf[0] == IXGBE_VF_RESET) {
- unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- new_mac = (u8 *)(&msgbuf[1]);
- e_info(probe, "VF Reset msg received from vf %d\n", vf);
- adapter->vfinfo[vf].clear_to_send = false;
- ixgbe_vf_reset_msg(adapter, vf);
- adapter->vfinfo[vf].clear_to_send = true;
-
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac)
- ixgbe_set_vf_mac(adapter, vf, vf_mac);
- else
- ixgbe_set_vf_mac(adapter,
- vf, adapter->vfinfo[vf].vf_mac_addresses);
-
- /* reply to reset with ack and vf mac address */
- msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
- /*
- * Piggyback the multicast filter type so VF can compute the
- * correct vectors
- */
- msgbuf[3] = hw->mac.mc_filter_type;
- ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
- return retval;
- }
-
- if (!adapter->vfinfo[vf].clear_to_send) {
- msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
- ixgbe_write_mbx(hw, msgbuf, 1, vf);
- return retval;
- }
-
- switch ((msgbuf[0] & 0xFFFF)) {
- case IXGBE_VF_SET_MAC_ADDR:
- new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac) {
- ixgbe_set_vf_mac(adapter, vf, new_mac);
- } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
- new_mac, ETH_ALEN)) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set MAC address\nReload "
- "the VF driver to resume operations\n", vf);
- retval = -1;
- }
- break;
- case IXGBE_VF_SET_MULTICAST:
- entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- hash_list = (u16 *)&msgbuf[1];
- retval = ixgbe_set_vf_multicasts(adapter, entries,
- hash_list, vf);
- break;
- case IXGBE_VF_SET_LPE:
- ixgbe_set_vf_lpe(adapter, msgbuf);
- break;
- case IXGBE_VF_SET_VLAN:
- add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- if (adapter->vfinfo[vf].pf_vlan) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set VLAN configuration\n"
- "Reload the VF driver to resume operations\n",
- vf);
- retval = -1;
- } else {
- retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- }
- break;
- case IXGBE_VF_SET_MACVLAN:
- index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
- /*
- * If the VF is allowed to set MAC filters then turn off
- * anti-spoofing to avoid false positives. An index
- * greater than 0 will indicate the VF is setting a
- * macvlan MAC filter.
- */
- if (index > 0 && adapter->antispoofing_enabled) {
- hw->mac.ops.set_mac_anti_spoofing(hw, false,
- adapter->num_vfs);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- adapter->antispoofing_enabled = false;
- }
- retval = ixgbe_set_vf_macvlan(adapter, vf, index,
- (unsigned char *)(&msgbuf[1]));
- break;
- default:
- e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
- retval = IXGBE_ERR_MBX;
- break;
- }
-
- /* notify the VF of the results of what it sent us */
- if (retval)
- msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
- else
- msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
-
- msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
-
- ixgbe_write_mbx(hw, msgbuf, 1, vf);
-
- return retval;
-}
-
-static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 msg = IXGBE_VT_MSGTYPE_NACK;
-
- /* if device isn't clear to send it shouldn't be reading either */
- if (!adapter->vfinfo[vf].clear_to_send)
- ixgbe_write_mbx(hw, &msg, 1, vf);
-}
-
-void ixgbe_msg_task(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vf;
-
- for (vf = 0; vf < adapter->num_vfs; vf++) {
- /* process any reset requests */
- if (!ixgbe_check_for_rst(hw, vf))
- ixgbe_vf_reset_event(adapter, vf);
-
- /* process any messages pending */
- if (!ixgbe_check_for_msg(hw, vf))
- ixgbe_rcv_msg_from_vf(adapter, vf);
-
- /* process any acks */
- if (!ixgbe_check_for_ack(hw, vf))
- ixgbe_rcv_ack_from_vf(adapter, vf);
- }
-}
-
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* disable transmit and receive for all vfs */
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
-
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
-}
-
-void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ping;
- int i;
-
- for (i = 0 ; i < adapter->num_vfs; i++) {
- ping = IXGBE_PF_CONTROL_MSG;
- if (adapter->vfinfo[i].clear_to_send)
- ping |= IXGBE_VT_MSGTYPE_CTS;
- ixgbe_write_mbx(hw, &ping, 1, i);
- }
-}
-
-int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
- return -EINVAL;
- adapter->vfinfo[vf].pf_set_mac = true;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
- " change effective.");
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
- " attempting to use the VF device.\n");
- }
- return ixgbe_set_vf_mac(adapter, vf, mac);
-}
-
-int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
-{
- int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
-
- if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
- return -EINVAL;
- if (vlan || qos) {
- err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
- if (err)
- goto out;
- ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
- ixgbe_set_vmolr(hw, vf, false);
- if (adapter->antispoofing_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].pf_vlan = vlan;
- adapter->vfinfo[vf].pf_qos = qos;
- dev_info(&adapter->pdev->dev,
- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF VLAN has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before"
- " attempting to use the VF device.\n");
- }
- } else {
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter, vlan, vf);
- ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- adapter->vfinfo[vf].pf_vlan = 0;
- adapter->vfinfo[vf].pf_qos = 0;
- }
-out:
- return err;
-}
-
-static int ixgbe_link_mbps(int internal_link_speed)
-{
- switch (internal_link_speed) {
- case IXGBE_LINK_SPEED_100_FULL:
- return 100;
- case IXGBE_LINK_SPEED_1GB_FULL:
- return 1000;
- case IXGBE_LINK_SPEED_10GB_FULL:
- return 10000;
- default:
- return 0;
- }
-}
-
-static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
- int link_speed)
-{
- int rf_dec, rf_int;
- u32 bcnrc_val;
-
- if (tx_rate != 0) {
- /* Calculate the rate factor values to set */
- rf_int = link_speed / tx_rate;
- rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
-
- bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
- IXGBE_RTTBCNRC_RF_INT_MASK);
- bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
- } else {
- bcnrc_val = 0;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
- /*
- * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
- * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
- * and 0x004 otherwise.
- */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
- break;
- case ixgbe_mac_X540:
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
- break;
- default:
- break;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
-}
-
-void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
-{
- int actual_link_speed, i;
- bool reset_rate = false;
-
- /* VF Tx rate limit was not set */
- if (adapter->vf_rate_link_speed == 0)
- return;
-
- actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
- if (actual_link_speed != adapter->vf_rate_link_speed) {
- reset_rate = true;
- adapter->vf_rate_link_speed = 0;
- dev_info(&adapter->pdev->dev,
- "Link speed has been changed. VF Transmit rate "
- "is disabled\n");
- }
-
- for (i = 0; i < adapter->num_vfs; i++) {
- if (reset_rate)
- adapter->vfinfo[i].tx_rate = 0;
-
- ixgbe_set_vf_rate_limit(&adapter->hw, i,
- adapter->vfinfo[i].tx_rate,
- actual_link_speed);
- }
-}
-
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int actual_link_speed;
-
- actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
- if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
- (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
- ((tx_rate != 0) && (tx_rate <= 10)))
- /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
- return -EINVAL;
-
- adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
- ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
-
- return 0;
-}
-
-int ixgbe_ndo_get_vf_config(struct net_device *netdev,
- int vf, struct ifla_vf_info *ivi)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (vf >= adapter->num_vfs)
- return -EINVAL;
- ivi->vf = vf;
- memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
- ivi->vlan = adapter->vfinfo[vf].pf_vlan;
- ivi->qos = adapter->vfinfo[vf].pf_qos;
- return 0;
-}
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
deleted file mode 100644
index deee3754b1f..00000000000
--- a/drivers/net/ixgbevf/ethtool.c
+++ /dev/null
@@ -1,742 +0,0 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* ethtool support for ixgbevf */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/vmalloc.h>
-#include <linux/if_vlan.h>
-#include <linux/uaccess.h>
-
-#include "ixgbevf.h"
-
-#define IXGBE_ALL_RAR_ENTRIES 16
-
-#ifdef ETHTOOL_GSTATS
-struct ixgbe_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
-};
-
-#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
- offsetof(struct ixgbevf_adapter, m), \
- offsetof(struct ixgbevf_adapter, b), \
- offsetof(struct ixgbevf_adapter, r)
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
- stats.saved_reset_vfgprc)},
- {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
- stats.saved_reset_vfgptc)},
- {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
- stats.saved_reset_vfgorc)},
- {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
- stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
- {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
- stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
- zero_base)},
- {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
- zero_base)},
- {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
- zero_base)},
- {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
-};
-
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
-
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
-static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
- "Register test (offline)",
- "Link test (on/offline)"
-};
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
-
-static int ixgbevf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = 0;
- bool link_up;
-
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = -1;
-
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-
- if (link_up) {
- ethtool_cmd_speed_set(
- ecmd,
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- SPEED_10000 : SPEED_1000);
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
- }
-
- return 0;
-}
-
-static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
-}
-
-static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- if (data)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- if (netif_running(netdev)) {
- if (!adapter->dev_closed)
- ixgbevf_reinit_locked(adapter);
- } else {
- ixgbevf_reset(adapter);
- }
-
- return 0;
-}
-
-static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
-{
- if (data) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netif_tx_stop_all_queues(netdev);
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- netif_tx_start_all_queues(netdev);
- }
- return 0;
-}
-
-static u32 ixgbevf_get_msglevel(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
-static char *ixgbevf_reg_names[] = {
- "IXGBE_VFCTRL",
- "IXGBE_VFSTATUS",
- "IXGBE_VFLINKS",
- "IXGBE_VFRXMEMWRAP",
- "IXGBE_VFFRTIMER",
- "IXGBE_VTEICR",
- "IXGBE_VTEICS",
- "IXGBE_VTEIMS",
- "IXGBE_VTEIMC",
- "IXGBE_VTEIAC",
- "IXGBE_VTEIAM",
- "IXGBE_VTEITR",
- "IXGBE_VTIVAR",
- "IXGBE_VTIVAR_MISC",
- "IXGBE_VFRDBAL0",
- "IXGBE_VFRDBAL1",
- "IXGBE_VFRDBAH0",
- "IXGBE_VFRDBAH1",
- "IXGBE_VFRDLEN0",
- "IXGBE_VFRDLEN1",
- "IXGBE_VFRDH0",
- "IXGBE_VFRDH1",
- "IXGBE_VFRDT0",
- "IXGBE_VFRDT1",
- "IXGBE_VFRXDCTL0",
- "IXGBE_VFRXDCTL1",
- "IXGBE_VFSRRCTL0",
- "IXGBE_VFSRRCTL1",
- "IXGBE_VFPSRTYPE",
- "IXGBE_VFTDBAL0",
- "IXGBE_VFTDBAL1",
- "IXGBE_VFTDBAH0",
- "IXGBE_VFTDBAH1",
- "IXGBE_VFTDLEN0",
- "IXGBE_VFTDLEN1",
- "IXGBE_VFTDH0",
- "IXGBE_VFTDH1",
- "IXGBE_VFTDT0",
- "IXGBE_VFTDT1",
- "IXGBE_VFTXDCTL0",
- "IXGBE_VFTXDCTL1",
- "IXGBE_VFTDWBAL0",
- "IXGBE_VFTDWBAL1",
- "IXGBE_VFTDWBAH0",
- "IXGBE_VFTDWBAH1"
-};
-
-
-static int ixgbevf_get_regs_len(struct net_device *netdev)
-{
- return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
-}
-
-static void ixgbevf_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs,
- void *p)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 *regs_buff = p;
- u32 regs_len = ixgbevf_get_regs_len(netdev);
- u8 i;
-
- memset(p, 0, regs_len);
-
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
-
- /* General Registers */
- regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
- regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
- regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
- regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
- regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
-
- /* Interrupt */
- /* don't read EICR because it can clear interrupt causes, instead
- * read EICS which is a shadow but doesn't clear EICR */
- regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
- regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
- regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
- regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
- regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
- regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
-
- /* Receive DMA */
- for (i = 0; i < 2; i++)
- regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
- for (i = 0; i < 2; i++)
- regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
- for (i = 0; i < 2; i++)
- regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
- for (i = 0; i < 2; i++)
- regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
- for (i = 0; i < 2; i++)
- regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
- for (i = 0; i < 2; i++)
- regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- for (i = 0; i < 2; i++)
- regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
-
- /* Receive */
- regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
-
- /* Transmit */
- for (i = 0; i < 2; i++)
- regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
- for (i = 0; i < 2; i++)
- regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
- for (i = 0; i < 2; i++)
- regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
- for (i = 0; i < 2; i++)
- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
- for (i = 0; i < 2; i++)
- regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
- for (i = 0; i < 2; i++)
- regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
- for (i = 0; i < 2; i++)
- regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
- for (i = 0; i < 2; i++)
- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
- for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
- hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
-}
-
-static void ixgbevf_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
- strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-}
-
-static void ixgbevf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbevf_ring *tx_ring = adapter->tx_ring;
- struct ixgbevf_ring *rx_ring = adapter->rx_ring;
-
- ring->rx_max_pending = IXGBEVF_MAX_RXD;
- ring->tx_max_pending = IXGBEVF_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-
-static int ixgbevf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
- int i, err = 0;
- u32 new_rx_count, new_tx_count;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
-
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
- /* nothing to do */
- return 0;
- }
-
- while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
- msleep(1);
-
- /*
- * If the adapter isn't up and running then just set the
- * new parameters and scurry for the exits.
- */
- if (!netif_running(adapter->netdev)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
- adapter->tx_ring_count = new_tx_count;
- adapter->rx_ring_count = new_rx_count;
- goto clear_reset;
- }
-
- tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!tx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring) {
- err = -ENOMEM;
- goto err_rx_setup;
- }
-
- ixgbevf_down(adapter);
-
- memcpy(tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_tx_resources(adapter,
- &tx_ring[i]);
- }
- goto err_tx_ring_setup;
- }
- tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
- }
-
- memcpy(rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter,
- &rx_ring[i]);
- }
- goto err_rx_ring_setup;
- }
- rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
- }
-
- /*
- * Only switch to new rings if all the prior allocations
- * and ring setups have succeeded.
- */
- kfree(adapter->tx_ring);
- adapter->tx_ring = tx_ring;
- adapter->tx_ring_count = new_tx_count;
-
- kfree(adapter->rx_ring);
- adapter->rx_ring = rx_ring;
- adapter->rx_ring_count = new_rx_count;
-
- /* success! */
- ixgbevf_up(adapter);
-
- goto clear_reset;
-
-err_rx_ring_setup:
- for(i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
-
-err_tx_ring_setup:
- kfree(rx_ring);
-
-err_rx_setup:
- kfree(tx_ring);
-
-clear_reset:
- clear_bit(__IXGBEVF_RESETTING, &adapter->state);
- return err;
-}
-
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
-{
- switch (stringset) {
- case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
- case ETH_SS_STATS:
- return IXGBE_GLOBAL_STATS_LEN;
- default:
- return -EINVAL;
- }
-}
-
-static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- int i;
-
- ixgbevf_update_stats(adapter);
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter +
- ixgbe_gstrings_stats[i].stat_offset;
- char *b = (char *)adapter +
- ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = (char *)adapter +
- ixgbe_gstrings_stats[i].saved_reset_offset;
- data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
- }
-}
-
-static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- char *p = (char *)data;
- int i;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- bool link_up;
- u32 link_speed = 0;
- *data = 0;
-
- hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
- if (!link_up)
- *data = 1;
-
- return *data;
-}
-
-/* ethtool register test data */
-struct ixgbevf_reg_test {
- u16 reg;
- u8 array_len;
- u8 test_type;
- u32 mask;
- u32 write;
-};
-
-/* In the hardware, registers are laid out either singly, in arrays
- * spaced 0x40 bytes apart, or in contiguous tables. We assume
- * most tests take place on arrays or single registers (handled
- * as a single-element array) and special-case the tables.
- * Table tests are always pattern tests.
- *
- * We also make provision for some required setup steps by specifying
- * registers to be written without any read-back testing.
- */
-
-#define PATTERN_TEST 1
-#define SET_READ_TEST 2
-#define WRITE_NO_TEST 3
-#define TABLE32_TEST 4
-#define TABLE64_TEST_LO 5
-#define TABLE64_TEST_HI 6
-
-/* default VF register test */
-static const struct ixgbevf_reg_test reg_test_vf[] = {
- { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
- { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
- { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
- { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
- { 0, 0, 0, 0 }
-};
-
-static const u32 register_test_patterns[] = {
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
-};
-
-#define REG_PATTERN_TEST(R, M, W) \
-{ \
- u32 pat, val, before; \
- for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
- before = readl(adapter->hw.hw_addr + R); \
- writel((register_test_patterns[pat] & W), \
- (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if (val != (register_test_patterns[pat] & W & M)) { \
- hw_dbg(&adapter->hw, \
- "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (register_test_patterns[pat] & W & M)); \
- *data = R; \
- writel(before, adapter->hw.hw_addr + R); \
- return 1; \
- } \
- writel(before, adapter->hw.hw_addr + R); \
- } \
-}
-
-#define REG_SET_AND_CHECK(R, M, W) \
-{ \
- u32 val, before; \
- before = readl(adapter->hw.hw_addr + R); \
- writel((W & M), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if ((W & M) != (val & M)) { \
- printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
- *data = R; \
- writel(before, (adapter->hw.hw_addr + R)); \
- return 1; \
- } \
- writel(before, (adapter->hw.hw_addr + R)); \
-}
-
-static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
-{
- const struct ixgbevf_reg_test *test;
- u32 i;
-
- test = reg_test_vf;
-
- /*
- * Perform the register test, looping through the test table
- * until we either fail or reach the null entry.
- */
- while (test->reg) {
- for (i = 0; i < test->array_len; i++) {
- switch (test->test_type) {
- case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
- break;
- case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
- break;
- case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
- break;
- case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
- break;
- case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
- break;
- case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
- break;
- }
- }
- test++;
- }
-
- *data = 0;
- return *data;
-}
-
-static void ixgbevf_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- bool if_running = netif_running(netdev);
-
- set_bit(__IXGBEVF_TESTING, &adapter->state);
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- hw_dbg(&adapter->hw, "offline testing starting\n");
-
- /* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
- if (ixgbevf_link_test(adapter, &data[1]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
- else
- ixgbevf_reset(adapter);
-
- hw_dbg(&adapter->hw, "register testing starting\n");
- if (ixgbevf_reg_test(adapter, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- ixgbevf_reset(adapter);
-
- clear_bit(__IXGBEVF_TESTING, &adapter->state);
- if (if_running)
- dev_open(netdev);
- } else {
- hw_dbg(&adapter->hw, "online testing starting\n");
- /* Online tests */
- if (ixgbevf_link_test(adapter, &data[1]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* Online tests aren't run; pass by default */
- data[0] = 0;
-
- clear_bit(__IXGBEVF_TESTING, &adapter->state);
- }
- msleep_interruptible(4 * 1000);
-}
-
-static int ixgbevf_nway_reset(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- if (netif_running(netdev)) {
- if (!adapter->dev_closed)
- ixgbevf_reinit_locked(adapter);
- }
-
- return 0;
-}
-
-static struct ethtool_ops ixgbevf_ethtool_ops = {
- .get_settings = ixgbevf_get_settings,
- .get_drvinfo = ixgbevf_get_drvinfo,
- .get_regs_len = ixgbevf_get_regs_len,
- .get_regs = ixgbevf_get_regs,
- .nway_reset = ixgbevf_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_ringparam = ixgbevf_get_ringparam,
- .set_ringparam = ixgbevf_set_ringparam,
- .get_rx_csum = ixgbevf_get_rx_csum,
- .set_rx_csum = ixgbevf_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_msglevel = ixgbevf_get_msglevel,
- .set_msglevel = ixgbevf_set_msglevel,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ixgbevf_set_tso,
- .self_test = ixgbevf_diag_test,
- .get_sset_count = ixgbevf_get_sset_count,
- .get_strings = ixgbevf_get_strings,
- .get_ethtool_stats = ixgbevf_get_ethtool_stats,
-};
-
-void ixgbevf_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
-}
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
deleted file mode 100644
index 7a883312577..00000000000
--- a/drivers/net/ixgbevf/mbx.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "mbx.h"
-
-/**
- * ixgbevf_poll_for_msg - Wait for message notification
- * @hw: pointer to the HW structure
- *
- * returns 0 if it successfully received a message notification
- **/
-static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- int countdown = mbx->timeout;
-
- while (countdown && mbx->ops.check_for_msg(hw)) {
- countdown--;
- udelay(mbx->udelay);
- }
-
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
-}
-
-/**
- * ixgbevf_poll_for_ack - Wait for message acknowledgement
- * @hw: pointer to the HW structure
- *
- * returns 0 if it successfully received a message acknowledgement
- **/
-static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- int countdown = mbx->timeout;
-
- while (countdown && mbx->ops.check_for_ack(hw)) {
- countdown--;
- udelay(mbx->udelay);
- }
-
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
-}
-
-/**
- * ixgbevf_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns 0 if it successfully received a message notification and
- * copied it into the receive buffer.
- **/
-static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
-
- ret_val = ixgbevf_poll_for_msg(hw);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size);
-
- return ret_val;
-}
-
-/**
- * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns 0 if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
- **/
-static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
-
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size);
-
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbevf_poll_for_ack(hw);
-
- return ret_val;
-}
-
-/**
- * ixgbevf_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
- *
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
- **/
-static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
-{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
-
- return v2p_mailbox;
-}
-
-/**
- * ixgbevf_check_for_bit_vf - Determine if a status bit was set
- * @hw: pointer to the HW structure
- * @mask: bitmask for bits to be tested and cleared
- *
- * This function is used to check for the read to clear bits within
- * the V2P mailbox.
- **/
-static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
-{
- u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
- s32 ret_val = IXGBE_ERR_MBX;
-
- if (v2p_mailbox & mask)
- ret_val = 0;
-
- hw->mbx.v2p_mailbox &= ~mask;
-
- return ret_val;
-}
-
-/**
- * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
- * @hw: pointer to the HW structure
- *
- * returns 0 if the PF has set the Status bit or else ERR_MBX
- **/
-static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
-{
- s32 ret_val = IXGBE_ERR_MBX;
-
- if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
- ret_val = 0;
- hw->mbx.stats.reqs++;
- }
-
- return ret_val;
-}
-
-/**
- * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
- * @hw: pointer to the HW structure
- *
- * returns 0 if the PF has set the ACK bit or else ERR_MBX
- **/
-static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
-{
- s32 ret_val = IXGBE_ERR_MBX;
-
- if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
- ret_val = 0;
- hw->mbx.stats.acks++;
- }
-
- return ret_val;
-}
-
-/**
- * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
- * @hw: pointer to the HW structure
- *
- * returns true if the PF has set the reset done bit or else false
- **/
-static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
-{
- s32 ret_val = IXGBE_ERR_MBX;
-
- if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
- IXGBE_VFMAILBOX_RSTI))) {
- ret_val = 0;
- hw->mbx.stats.rsts++;
- }
-
- return ret_val;
-}
-
-/**
- * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
- * @hw: pointer to the HW structure
- *
- * return 0 if we obtained the mailbox lock
- **/
-static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
-{
- s32 ret_val = IXGBE_ERR_MBX;
-
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
-
- /* reserve mailbox for vf use */
- if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = 0;
-
- return ret_val;
-}
-
-/**
- * ixgbevf_write_mbx_vf - Write a message to the mailbox
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns 0 if it successfully copied message into the buffer
- **/
-static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
-{
- s32 ret_val;
- u16 i;
-
-
- /* lock the mailbox to prevent pf/vf race condition */
- ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
- if (ret_val)
- goto out_no_write;
-
- /* flush msg and acks as we are overwriting the message buffer */
- ixgbevf_check_for_msg_vf(hw);
- ixgbevf_check_for_ack_vf(hw);
-
- /* copy the caller specified message to the mailbox memory buffer */
- for (i = 0; i < size; i++)
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
-
- /* update stats */
- hw->mbx.stats.msgs_tx++;
-
- /* Drop VFU and interrupt the PF to tell it a message has been sent */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
-
-out_no_write:
- return ret_val;
-}
-
-/**
- * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- *
- * returns 0 if it successfuly read message from buffer
- **/
-static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
-{
- s32 ret_val = 0;
- u16 i;
-
- /* lock the mailbox to prevent pf/vf race condition */
- ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
- if (ret_val)
- goto out_no_read;
-
- /* copy the message from the mailbox memory buffer */
- for (i = 0; i < size; i++)
- msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
-
- /* Acknowledge receipt and release mailbox, then we're done */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
-
- /* update stats */
- hw->mbx.stats.msgs_rx++;
-
-out_no_read:
- return ret_val;
-}
-
-/**
- * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
- * @hw: pointer to the HW structure
- *
- * Initializes the hw->mbx struct to correct values for vf mailbox
- */
-static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
-
- /* start mailbox as timed out and let the reset_hw call set the timeout
- * value to begin communications */
- mbx->timeout = 0;
- mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
-
- mbx->size = IXGBE_VFMAILBOX_SIZE;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
-
- return 0;
-}
-
-struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .read = ixgbevf_read_mbx_vf,
- .write = ixgbevf_write_mbx_vf,
- .read_posted = ixgbevf_read_posted_mbx,
- .write_posted = ixgbevf_write_posted_mbx,
- .check_for_msg = ixgbevf_check_for_msg_vf,
- .check_for_ack = ixgbevf_check_for_ack_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
-
diff --git a/drivers/net/ixp2000/Kconfig b/drivers/net/ixp2000/Kconfig
deleted file mode 100644
index 2fec2415651..00000000000
--- a/drivers/net/ixp2000/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config ENP2611_MSF_NET
- tristate "Radisys ENP2611 MSF network interface support"
- depends on ARCH_ENP2611
- help
- This is a driver for the MSF network interface unit in
- the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 05172c39a0c..a3ce3d4561e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -169,6 +169,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
@@ -239,7 +242,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
/* send to lowerdev first for its network taps */
- vlan->forward(vlan->lowerdev, skb);
+ dev_forward_skb(vlan->lowerdev, skb);
return NET_XMIT_SUCCESS;
}
@@ -543,7 +546,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
- return dev_ethtool_get_settings(vlan->lowerdev, cmd);
+
+ return __ethtool_get_settings(vlan->lowerdev, cmd);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -561,7 +565,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_change_mtu = macvlan_change_mtu,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
- .ndo_set_multicast_list = macvlan_set_multicast_list,
+ .ndo_set_rx_mode = macvlan_set_multicast_list,
.ndo_get_stats64 = macvlan_dev_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ab96c319a24..1b7082d08f3 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -51,15 +51,13 @@ static struct proto macvtap_proto = {
};
/*
- * Minor number matches netdev->ifindex, so need a potentially
- * large value. This also makes it possible to split the
- * tap functionality out again in the future by offering it
- * from other drivers besides macvtap. As long as every device
- * only has one tap, the interface numbers assure that the
- * device nodes are unique.
+ * Variables for dealing with macvtaps device numbers.
*/
static dev_t macvtap_major;
-#define MACVTAP_NUM_DEVS 65536
+#define MACVTAP_NUM_DEVS (1U << MINORBITS)
+static DEFINE_MUTEX(minor_lock);
+static DEFINE_IDR(minor_idr);
+
#define GOODCOPY_LEN 128
static struct class *macvtap_class;
static struct cdev macvtap_cdev;
@@ -231,6 +229,8 @@ static void macvtap_del_queues(struct net_device *dev)
}
}
BUG_ON(vlan->numvtaps != 0);
+ /* guarantee that any future macvtap_set_queue will fail */
+ vlan->numvtaps = MAX_MACVTAP_QUEUES;
spin_unlock(&macvtap_lock);
synchronize_rcu();
@@ -273,39 +273,73 @@ static int macvtap_receive(struct sk_buff *skb)
return macvtap_forward(skb->dev, skb);
}
-static int macvtap_newlink(struct net *src_net,
- struct net_device *dev,
- struct nlattr *tb[],
- struct nlattr *data[])
+static int macvtap_get_minor(struct macvlan_dev *vlan)
{
- struct device *classdev;
- dev_t devt;
- int err;
+ int retval = -ENOMEM;
+ int id;
+
+ mutex_lock(&minor_lock);
+ if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
+ goto exit;
+
+ retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
+ if (retval < 0) {
+ if (retval == -EAGAIN)
+ retval = -ENOMEM;
+ goto exit;
+ }
+ if (id < MACVTAP_NUM_DEVS) {
+ vlan->minor = id;
+ } else {
+ printk(KERN_ERR "too many macvtap devices\n");
+ retval = -EINVAL;
+ idr_remove(&minor_idr, id);
+ }
+exit:
+ mutex_unlock(&minor_lock);
+ return retval;
+}
- err = macvlan_common_newlink(src_net, dev, tb, data,
- macvtap_receive, macvtap_forward);
- if (err)
- goto out;
+static void macvtap_free_minor(struct macvlan_dev *vlan)
+{
+ mutex_lock(&minor_lock);
+ if (vlan->minor) {
+ idr_remove(&minor_idr, vlan->minor);
+ vlan->minor = 0;
+ }
+ mutex_unlock(&minor_lock);
+}
- devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+static struct net_device *dev_get_by_macvtap_minor(int minor)
+{
+ struct net_device *dev = NULL;
+ struct macvlan_dev *vlan;
- classdev = device_create(macvtap_class, &dev->dev, devt,
- dev, "tap%d", dev->ifindex);
- if (IS_ERR(classdev)) {
- err = PTR_ERR(classdev);
- macvtap_del_queues(dev);
+ mutex_lock(&minor_lock);
+ vlan = idr_find(&minor_idr, minor);
+ if (vlan) {
+ dev = vlan->dev;
+ dev_hold(dev);
}
+ mutex_unlock(&minor_lock);
+ return dev;
+}
-out:
- return err;
+static int macvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ /* Don't put anything that may fail after macvlan_common_newlink
+ * because we can't undo what it does.
+ */
+ return macvlan_common_newlink(src_net, dev, tb, data,
+ macvtap_receive, macvtap_forward);
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
- device_destroy(macvtap_class,
- MKDEV(MAJOR(macvtap_major), dev->ifindex));
-
macvtap_del_queues(dev);
macvlan_dellink(dev, head);
}
@@ -337,11 +371,15 @@ static void macvtap_sock_write_space(struct sock *sk)
wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
+static void macvtap_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
static int macvtap_open(struct inode *inode, struct file *file)
{
struct net *net = current->nsproxy->net_ns;
- struct net_device *dev = dev_get_by_index(net, iminor(inode));
- struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
struct macvtap_queue *q;
int err;
@@ -349,11 +387,6 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!dev)
goto out;
- /* check if this is a macvtap device */
- err = -EINVAL;
- if (dev->rtnl_link_ops != &macvtap_link_ops)
- goto out;
-
err = -ENOMEM;
q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&macvtap_proto);
@@ -368,18 +401,19 @@ static int macvtap_open(struct inode *inode, struct file *file)
q->sock.ops = &macvtap_socket_ops;
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
+ q->sk.sk_destruct = macvtap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
/*
* so far only KVM virtio_net uses macvtap, enable zero copy between
* guest kernel and host kernel when lower device supports zerocopy
+ *
+ * The macvlan supports zerocopy iff the lower device supports zero
+ * copy so we don't have to look at the lower device directly.
*/
- if (vlan) {
- if ((vlan->lowerdev->features & NETIF_F_HIGHDMA) &&
- (vlan->lowerdev->features & NETIF_F_SG))
- sock_set_flag(&q->sk, SOCK_ZEROCOPY);
- }
+ if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
+ sock_set_flag(&q->sk, SOCK_ZEROCOPY);
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -453,7 +487,6 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
int copy = skb_headlen(skb);
int size, offset1 = 0;
int i = 0;
- skb_frag_t *f;
/* Skip over from offset */
while (count && (offset >= from->iov_len)) {
@@ -503,14 +536,13 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
skb->truesize += len;
atomic_add(len, &skb->sk->sk_wmem_alloc);
while (len) {
- f = &skb_shinfo(skb)->frags[i];
- f->page = page[i];
- f->page_offset = base & ~PAGE_MASK;
- f->size = min_t(int, len, PAGE_SIZE - f->page_offset);
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+ __skb_fill_page_desc(skb, i, page[i], off, size);
skb_shinfo(skb)->nr_frags++;
/* increase sk_wmem_alloc */
- base += f->size;
- len -= f->size;
+ base += size;
+ len -= size;
i++;
}
offset1 = 0;
@@ -970,6 +1002,52 @@ struct socket *macvtap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(macvtap_get_socket);
+static int macvtap_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct macvlan_dev *vlan;
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ return NOTIFY_DONE;
+
+ vlan = netdev_priv(dev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /* Create the device node here after the network device has
+ * been registered but before register_netdevice has
+ * finished running.
+ */
+ err = macvtap_get_minor(vlan);
+ if (err)
+ return notifier_from_errno(err);
+
+ devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ macvtap_free_minor(vlan);
+ return notifier_from_errno(PTR_ERR(classdev));
+ }
+ break;
+ case NETDEV_UNREGISTER:
+ devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ device_destroy(macvtap_class, devt);
+ macvtap_free_minor(vlan);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block macvtap_notifier_block __read_mostly = {
+ .notifier_call = macvtap_device_event,
+};
+
static int macvtap_init(void)
{
int err;
@@ -990,12 +1068,18 @@ static int macvtap_init(void)
goto out3;
}
- err = macvlan_link_register(&macvtap_link_ops);
+ err = register_netdevice_notifier(&macvtap_notifier_block);
if (err)
goto out4;
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out5;
+
return 0;
+out5:
+ unregister_netdevice_notifier(&macvtap_notifier_block);
out4:
class_unregister(macvtap_class);
out3:
@@ -1010,6 +1094,7 @@ module_init(macvtap_init);
static void macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
+ unregister_netdevice_notifier(&macvtap_notifier_block);
class_unregister(macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ed2a3977c6e..e8882023576 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,
return err;
if (enabled < 0 || enabled > 1)
return -EINVAL;
+ if (enabled == nt->enabled) {
+ printk(KERN_INFO "netconsole: network logging has already %s\n",
+ nt->enabled ? "started" : "stopped");
+ return -EINVAL;
+ }
if (enabled) { /* 1 */
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig
deleted file mode 100644
index 1e56bbf3f5c..00000000000
--- a/drivers/net/octeon/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config OCTEON_MGMT_ETHERNET
- tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
- depends on CPU_CAVIUM_OCTEON
- select PHYLIB
- select MDIO_OCTEON
- default y
- help
- This option enables the ethernet driver for the management
- port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
- CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
deleted file mode 100644
index 906edecacfd..00000000000
--- a/drivers/net/octeon/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
deleted file mode 100644
index c0f23376a46..00000000000
--- a/drivers/net/pci-skeleton.c
+++ /dev/null
@@ -1,1923 +0,0 @@
-/*
-
- drivers/net/pci-skeleton.c
-
- Maintained by Jeff Garzik <jgarzik@pobox.com>
-
- Original code came from 8139too.c, which in turns was based
- originally on Donald Becker's rtl8139.c driver, versions 1.11
- and older. This driver was originally based on rtl8139.c
- version 1.07. Header of rtl8139.c version 1.11:
-
- -----<snip>-----
-
- Written 1997-2000 by Donald Becker.
- This software may be used and distributed according to the
- terms of the GNU General Public License (GPL), incorporated
- herein by reference. Drivers based on or derived from this
- code fall under the GPL and must retain the authorship,
- copyright and license notice. This file is not a complete
- program and may only be used when the entire operating
- system is licensed under the GPL.
-
- This driver is for boards based on the RTL8129 and RTL8139
- PCI ethernet chips.
-
- The author may be reached as becker@scyld.com, or C/O Scyld
- Computing Corporation 410 Severn Ave., Suite 210 Annapolis
- MD 21403
-
- Support and updates available at
- http://www.scyld.com/network/rtl8139.html
-
- Twister-tuning table provided by Kinston
- <shangh@realtek.com.tw>.
-
- -----<snip>-----
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
-
------------------------------------------------------------------------------
-
- Theory of Operation
-
-I. Board Compatibility
-
-This device driver is designed for the RealTek RTL8139 series, the RealTek
-Fast Ethernet controllers for PCI and CardBus. This chip is used on many
-low-end boards, sometimes with its markings changed.
-
-
-II. Board-specific settings
-
-PCI bus devices are configured by the system at boot time, so no jumpers
-need to be set on the board. The system BIOS will assign the
-PCI INTA signal to a (preferably otherwise unused) system IRQ line.
-
-III. Driver operation
-
-IIIa. Rx Ring buffers
-
-The receive unit uses a single linear ring buffer rather than the more
-common (and more efficient) descriptor-based architecture. Incoming frames
-are sequentially stored into the Rx region, and the host copies them into
-skbuffs.
-
-Comment: While it is theoretically possible to process many frames in place,
-any delay in Rx processing would cause us to drop frames. More importantly,
-the Linux protocol stack is not designed to operate in this manner.
-
-IIIb. Tx operation
-
-The RTL8139 uses a fixed set of four Tx descriptors in register space.
-In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
-aligns the IP header on word boundaries, and 14 byte ethernet header means
-that almost all frames will need to be copied to an alignment buffer.
-
-IVb. References
-
-http://www.realtek.com.tw/
-http://www.scyld.com/expert/NWay.html
-
-IVc. Errata
-
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/crc32.h>
-#include <linux/io.h>
-
-#define NETDRV_VERSION "1.0.1"
-#define MODNAME "netdrv"
-#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
-
-static char version[] __devinitdata =
- KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
- " Support available from http://foo.com/bar/baz.html\n";
-
-/* define to 1 to enable PIO instead of MMIO */
-#undef USE_IO_OPS
-
-/* define to 1 to enable copious debugging info */
-#undef NETDRV_DEBUG
-
-/* define to 1 to disable lightweight runtime debugging checks */
-#undef NETDRV_NDEBUG
-
-
-#ifdef NETDRV_DEBUG
-/* note: prints function name for you */
-#define DPRINTK(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-#else
-#define DPRINTK(fmt, args...) \
-do { \
- if (0) \
- printk(KERN_DEBUG fmt, ##args); \
-} while (0)
-#endif
-
-#ifdef NETDRV_NDEBUG
-#define assert(expr) do {} while (0)
-#else
-#define assert(expr) \
- if (!(expr)) { \
- printk("Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#endif
-
-
-/* A few user-configurable values. */
-/* media options */
-static int media[] = {-1, -1, -1, -1, -1, -1, -1, -1};
-
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static int max_interrupt_work = 20;
-
-/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
- The RTL chips use a 64 element hash table based on the Ethernet CRC. */
-static int multicast_filter_limit = 32;
-
-/* Size of the in-memory receive ring. */
-#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
-#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
-#define RX_BUF_PAD 16
-#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
-#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
-
-/* Number of Tx descriptor registers. */
-#define NUM_TX_DESC 4
-
-/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
-#define MAX_ETH_FRAME_SIZE 1536
-
-/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
-#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
-#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
-
-/* PCI Tuning Parameters
- Threshold is bytes transferred to chip before transmission starts. */
-#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
-
-/* The following settings are log_2(bytes)-4:
- 0==16 bytes 1==32 2==64 3==128 4==256 5==512 6==1024 7==end of packet.
-*/
-#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
-#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
-#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
-
-
-/* Operational parameters that usually are not changed. */
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (6 * HZ)
-
-enum {
- HAS_CHIP_XCVR = 0x020000,
- HAS_LNK_CHNG = 0x040000,
-};
-
-#define NETDRV_MIN_IO_SIZE 0x80
-#define RTL8139B_IO_SIZE 256
-
-#define NETDRV_CAPS (HAS_CHIP_XCVR | HAS_LNK_CHNG)
-
-typedef enum {
- RTL8139 = 0,
- NETDRV_CB,
- SMC1211TX,
- /*MPX5030,*/
- DELTA8139,
- ADDTRON8139,
-} board_t;
-
-
-/* indexed by board_t, above */
-static struct {
- const char *name;
-} board_info[] __devinitdata = {
- { "RealTek RTL8139 Fast Ethernet" },
- { "RealTek RTL8139B PCI/CardBus" },
- { "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
-/* { MPX5030, "Accton MPX5030 (RealTek RTL8139)" },*/
- { "Delta Electronics 8139 10/100BaseTX" },
- { "Addtron Technology 8139 10/100BaseTX" },
-};
-
-
-static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
- {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
- {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
- {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
-/* {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MPX5030 },*/
- {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DELTA8139 },
- {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
- {0,}
-};
-MODULE_DEVICE_TABLE(pci, netdrv_pci_tbl);
-
-
-/* The rest of these values should never change. */
-
-/* Symbolic offsets to registers. */
-enum NETDRV_registers {
- MAC0 = 0, /* Ethernet hardware address. */
- MAR0 = 8, /* Multicast filter. */
- TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
- TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
- RxBuf = 0x30,
- RxEarlyCnt = 0x34,
- RxEarlyStatus = 0x36,
- ChipCmd = 0x37,
- RxBufPtr = 0x38,
- RxBufAddr = 0x3A,
- IntrMask = 0x3C,
- IntrStatus = 0x3E,
- TxConfig = 0x40,
- ChipVersion = 0x43,
- RxConfig = 0x44,
- Timer = 0x48, /* A general-purpose counter. */
- RxMissed = 0x4C, /* 24 bits valid, write clears. */
- Cfg9346 = 0x50,
- Config0 = 0x51,
- Config1 = 0x52,
- FlashReg = 0x54,
- MediaStatus = 0x58,
- Config3 = 0x59,
- Config4 = 0x5A, /* absent on RTL-8139A */
- HltClk = 0x5B,
- MultiIntr = 0x5C,
- TxSummary = 0x60,
- BasicModeCtrl = 0x62,
- BasicModeStatus = 0x64,
- NWayAdvert = 0x66,
- NWayLPAR = 0x68,
- NWayExpansion = 0x6A,
- /* Undocumented registers, but required for proper operation. */
- FIFOTMS = 0x70, /* FIFO Control and test. */
- CSCR = 0x74, /* Chip Status and Configuration Register. */
- PARA78 = 0x78,
- PARA7c = 0x7c, /* Magic transceiver parameter register. */
- Config5 = 0xD8, /* absent on RTL-8139A */
-};
-
-enum ClearBitMasks {
- MultiIntrClear = 0xF000,
- ChipCmdClear = 0xE2,
- Config1Clear = (1 << 7) | (1 << 6) | (1 << 3) | (1 << 2) | (1 << 1),
-};
-
-enum ChipCmdBits {
- CmdReset = 0x10,
- CmdRxEnb = 0x08,
- CmdTxEnb = 0x04,
- RxBufEmpty = 0x01,
-};
-
-/* Interrupt register bits, using my own meaningful names. */
-enum IntrStatusBits {
- PCIErr = 0x8000,
- PCSTimeout = 0x4000,
- RxFIFOOver = 0x40,
- RxUnderrun = 0x20,
- RxOverflow = 0x10,
- TxErr = 0x08,
- TxOK = 0x04,
- RxErr = 0x02,
- RxOK = 0x01,
-};
-enum TxStatusBits {
- TxHostOwns = 0x2000,
- TxUnderrun = 0x4000,
- TxStatOK = 0x8000,
- TxOutOfWindow = 0x20000000,
- TxAborted = 0x40000000,
- TxCarrierLost = 0x80000000,
-};
-enum RxStatusBits {
- RxMulticast = 0x8000,
- RxPhysical = 0x4000,
- RxBroadcast = 0x2000,
- RxBadSymbol = 0x0020,
- RxRunt = 0x0010,
- RxTooLong = 0x0008,
- RxCRCErr = 0x0004,
- RxBadAlign = 0x0002,
- RxStatusOK = 0x0001,
-};
-
-/* Bits in RxConfig. */
-enum rx_mode_bits {
- AcceptErr = 0x20,
- AcceptRunt = 0x10,
- AcceptBroadcast = 0x08,
- AcceptMulticast = 0x04,
- AcceptMyPhys = 0x02,
- AcceptAllPhys = 0x01,
-};
-
-/* Bits in TxConfig. */
-enum tx_config_bits {
- TxIFG1 = (1 << 25), /* Interframe Gap Time */
- TxIFG0 = (1 << 24), /* Enabling these bits violates IEEE 802.3 */
- TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
- TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
- TxClearAbt = (1 << 0), /* Clear abort (WO) */
- TxDMAShift = 8, /* DMA burst value(0-7) is shift this many bits */
-
- TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
-};
-
-/* Bits in Config1 */
-enum Config1Bits {
- Cfg1_PM_Enable = 0x01,
- Cfg1_VPD_Enable = 0x02,
- Cfg1_PIO = 0x04,
- Cfg1_MMIO = 0x08,
- Cfg1_LWAKE = 0x10,
- Cfg1_Driver_Load = 0x20,
- Cfg1_LED0 = 0x40,
- Cfg1_LED1 = 0x80,
-};
-
-enum RxConfigBits {
- /* Early Rx threshold, none or X/16 */
- RxCfgEarlyRxNone = 0,
- RxCfgEarlyRxShift = 24,
-
- /* rx fifo threshold */
- RxCfgFIFOShift = 13,
- RxCfgFIFONone = (7 << RxCfgFIFOShift),
-
- /* Max DMA burst */
- RxCfgDMAShift = 8,
- RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
-
- /* rx ring buffer length */
- RxCfgRcv8K = 0,
- RxCfgRcv16K = (1 << 11),
- RxCfgRcv32K = (1 << 12),
- RxCfgRcv64K = (1 << 11) | (1 << 12),
-
- /* Disable packet wrap at end of Rx buffer */
- RxNoWrap = (1 << 7),
-};
-
-
-/* Twister tuning parameters from RealTek.
- Completely undocumented, but required to tune bad links. */
-enum CSCRBits {
- CSCR_LinkOKBit = 0x0400,
- CSCR_LinkChangeBit = 0x0800,
- CSCR_LinkStatusBits = 0x0f000,
- CSCR_LinkDownOffCmd = 0x003c0,
- CSCR_LinkDownCmd = 0x0f3c0,
-};
-
-
-enum Cfg9346Bits {
- Cfg9346_Lock = 0x00,
- Cfg9346_Unlock = 0xC0,
-};
-
-
-#define PARA78_default 0x78fa8388
-#define PARA7c_default 0xcb38de43 /* param[0][3] */
-#define PARA7c_xxx 0xcb38de43
-static const unsigned long param[4][4] = {
- {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
- {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
- {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
- {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
-};
-
-struct ring_info {
- struct sk_buff *skb;
- dma_addr_t mapping;
-};
-
-
-typedef enum {
- CH_8139 = 0,
- CH_8139_K,
- CH_8139A,
- CH_8139B,
- CH_8130,
- CH_8139C,
-} chip_t;
-
-
-/* directly indexed by chip_t, above */
-static const struct {
- const char *name;
- u8 version; /* from RTL8139C docs */
- u32 RxConfigMask; /* should clear the bits supported by this chip */
-} rtl_chip_info[] = {
- { "RTL-8139",
- 0x40,
- 0xf0fe0040, /* XXX copied from RTL8139A, verify */
- },
-
- { "RTL-8139 rev K",
- 0x60,
- 0xf0fe0040,
- },
-
- { "RTL-8139A",
- 0x70,
- 0xf0fe0040,
- },
-
- { "RTL-8139B",
- 0x78,
- 0xf0fc0040
- },
-
- { "RTL-8130",
- 0x7C,
- 0xf0fe0040, /* XXX copied from RTL8139A, verify */
- },
-
- { "RTL-8139C",
- 0x74,
- 0xf0fc0040, /* XXX copied from RTL8139B, verify */
- },
-
-};
-
-
-struct netdrv_private {
- board_t board;
- void *mmio_addr;
- int drv_flags;
- struct pci_dev *pci_dev;
- struct timer_list timer; /* Media selection timer. */
- unsigned char *rx_ring;
- unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
- unsigned int tx_flag;
- atomic_t cur_tx;
- atomic_t dirty_tx;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
- struct ring_info tx_info[NUM_TX_DESC];
- unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
- unsigned char *tx_bufs; /* Tx bounce buffer region. */
- dma_addr_t rx_ring_dma;
- dma_addr_t tx_bufs_dma;
- char phys[4]; /* MII device addresses. */
- char twistie, twist_row, twist_col; /* Twister tune state. */
- unsigned int full_duplex:1; /* Full-duplex operation requested. */
- unsigned int duplex_lock:1;
- unsigned int default_port:4; /* Last dev->if_port value. */
- unsigned int media2:4; /* Secondary monitored media port. */
- unsigned int medialock:1; /* Don't sense media type. */
- unsigned int mediasense:1; /* Media sensing in progress. */
- spinlock_t lock;
- chip_t chipset;
-};
-
-MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION("Skeleton for a PCI Fast Ethernet driver");
-MODULE_LICENSE("GPL");
-module_param(multicast_filter_limit, int, 0);
-module_param(max_interrupt_work, int, 0);
-module_param_array(media, int, NULL, 0);
-MODULE_PARM_DESC(multicast_filter_limit,
- MODNAME " maximum number of filtered multicast addresses");
-MODULE_PARM_DESC(max_interrupt_work,
- MODNAME " maximum events handled per interrupt");
-MODULE_PARM_DESC(media,
- MODNAME " Bits 0-3: media type, bit 17: full duplex");
-
-static int read_eeprom(void *ioaddr, int location, int addr_len);
-static int netdrv_open(struct net_device *dev);
-static int mdio_read(struct net_device *dev, int phy_id, int location);
-static void mdio_write(struct net_device *dev, int phy_id, int location,
- int val);
-static void netdrv_timer(unsigned long data);
-static void netdrv_tx_timeout(struct net_device *dev);
-static void netdrv_init_ring(struct net_device *dev);
-static int netdrv_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t netdrv_interrupt(int irq, void *dev_instance);
-static int netdrv_close(struct net_device *dev);
-static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void netdrv_set_rx_mode(struct net_device *dev);
-static void netdrv_hw_start(struct net_device *dev);
-
-
-#ifdef USE_IO_OPS
-
-#define NETDRV_R8(reg) inb(((unsigned long)ioaddr) + (reg))
-#define NETDRV_R16(reg) inw(((unsigned long)ioaddr) + (reg))
-#define NETDRV_R32(reg) ((unsigned long)inl(((unsigned long)ioaddr) + (reg)))
-#define NETDRV_W8(reg, val8) outb((val8), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W16(reg, val16) outw((val16), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W32(reg, val32) outl((val32), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W8_F NETDRV_W8
-#define NETDRV_W16_F NETDRV_W16
-#define NETDRV_W32_F NETDRV_W32
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb(addr) inb((unsigned long)(addr))
-#define readw(addr) inw((unsigned long)(addr))
-#define readl(addr) inl((unsigned long)(addr))
-#define writeb(val, addr) outb((val), (unsigned long)(addr))
-#define writew(val, addr) outw((val), (unsigned long)(addr))
-#define writel(val, addr) outl((val), (unsigned long)(addr))
-
-#else
-
-/* write MMIO register, with flush */
-/* Flush avoids rtl8139 bug w/ posted MMIO writes */
-#define NETDRV_W8_F(reg, val8) \
-do { \
- writeb((val8), ioaddr + (reg)); \
- readb(ioaddr + (reg)); \
-} while (0)
-#define NETDRV_W16_F(reg, val16) \
-do { \
- writew((val16), ioaddr + (reg)); \
- readw(ioaddr + (reg)); \
-} while (0)
-#define NETDRV_W32_F(reg, val32) \
-do { \
- writel((val32), ioaddr + (reg)); \
- readl(ioaddr + (reg)); \
-} while (0)
-
-
-#ifdef MMIO_FLUSH_AUDIT_COMPLETE
-
-/* write MMIO register */
-#define NETDRV_W8(reg, val8) writeb((val8), ioaddr + (reg))
-#define NETDRV_W16(reg, val16) writew((val16), ioaddr + (reg))
-#define NETDRV_W32(reg, val32) writel((val32), ioaddr + (reg))
-
-#else
-
-/* write MMIO register, then flush */
-#define NETDRV_W8 NETDRV_W8_F
-#define NETDRV_W16 NETDRV_W16_F
-#define NETDRV_W32 NETDRV_W32_F
-
-#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
-
-/* read MMIO register */
-#define NETDRV_R8(reg) readb(ioaddr + (reg))
-#define NETDRV_R16(reg) readw(ioaddr + (reg))
-#define NETDRV_R32(reg) ((unsigned long) readl(ioaddr + (reg)))
-
-#endif /* USE_IO_OPS */
-
-
-static const u16 netdrv_intr_mask =
- PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
- TxErr | TxOK | RxErr | RxOK;
-
-static const unsigned int netdrv_rx_config =
- RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
- (RX_FIFO_THRESH << RxCfgFIFOShift) |
- (RX_DMA_BURST << RxCfgDMAShift);
-
-
-static int __devinit netdrv_init_board(struct pci_dev *pdev,
- struct net_device **dev_out,
- void **ioaddr_out)
-{
- void *ioaddr = NULL;
- struct net_device *dev;
- struct netdrv_private *tp;
- int rc, i;
- u32 pio_start, pio_end, pio_flags, pio_len;
- unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
- u32 tmp;
-
- DPRINTK("ENTER\n");
-
- assert(pdev != NULL);
- assert(ioaddr_out != NULL);
-
- *ioaddr_out = NULL;
- *dev_out = NULL;
-
- /* dev zeroed in alloc_etherdev */
- dev = alloc_etherdev(sizeof(*tp));
- if (dev == NULL) {
- dev_err(&pdev->dev, "unable to alloc new ethernet\n");
- DPRINTK("EXIT, returning -ENOMEM\n");
- return -ENOMEM;
- }
- SET_NETDEV_DEV(dev, &pdev->dev);
- tp = netdev_priv(dev);
-
- /* enable device(incl. PCI PM wakeup), and bus-mastering */
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_out;
-
- pio_start = pci_resource_start(pdev, 0);
- pio_end = pci_resource_end(pdev, 0);
- pio_flags = pci_resource_flags(pdev, 0);
- pio_len = pci_resource_len(pdev, 0);
-
- mmio_start = pci_resource_start(pdev, 1);
- mmio_end = pci_resource_end(pdev, 1);
- mmio_flags = pci_resource_flags(pdev, 1);
- mmio_len = pci_resource_len(pdev, 1);
-
- /* set this immediately, we need to know before
- * we talk to the chip directly */
- DPRINTK("PIO region size == %#02X\n", pio_len);
- DPRINTK("MMIO region size == %#02lX\n", mmio_len);
-
- /* make sure PCI base addr 0 is PIO */
- if (!(pio_flags & IORESOURCE_IO)) {
- dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
- rc = -ENODEV;
- goto err_out;
- }
-
- /* make sure PCI base addr 1 is MMIO */
- if (!(mmio_flags & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
- rc = -ENODEV;
- goto err_out;
- }
-
- /* check for weird/broken PCI region reporting */
- if ((pio_len < NETDRV_MIN_IO_SIZE) ||
- (mmio_len < NETDRV_MIN_IO_SIZE)) {
- dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out;
- }
-
- rc = pci_request_regions(pdev, MODNAME);
- if (rc)
- goto err_out;
-
- pci_set_master(pdev);
-
-#ifdef USE_IO_OPS
- ioaddr = (void *)pio_start;
-#else
- /* ioremap MMIO region */
- ioaddr = ioremap(mmio_start, mmio_len);
- if (ioaddr == NULL) {
- dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res;
- }
-#endif /* USE_IO_OPS */
-
- /* Soft reset the chip. */
- NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
-
- /* Check that the chip has finished the reset. */
- for (i = 1000; i > 0; i--)
- if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
- break;
- else
- udelay(10);
-
- /* Bring the chip out of low-power mode. */
- /* <insert device-specific code here> */
-
-#ifndef USE_IO_OPS
- /* sanity checks -- ensure PIO and MMIO registers agree */
- assert(inb(pio_start+Config0) == readb(ioaddr+Config0));
- assert(inb(pio_start+Config1) == readb(ioaddr+Config1));
- assert(inb(pio_start+TxConfig) == readb(ioaddr+TxConfig));
- assert(inb(pio_start+RxConfig) == readb(ioaddr+RxConfig));
-#endif /* !USE_IO_OPS */
-
- /* identify chip attached to board */
- tmp = NETDRV_R8(ChipVersion);
- for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
- if (tmp == rtl_chip_info[i].version) {
- tp->chipset = i;
- goto match;
- }
-
- /* if unknown chip, assume array element #0, original RTL-8139 in this case */
- dev_printk(KERN_DEBUG, &pdev->dev,
- "unknown chip version, assuming RTL-8139\n");
- dev_printk(KERN_DEBUG, &pdev->dev, "TxConfig = %#lx\n",
- NETDRV_R32(TxConfig));
- tp->chipset = 0;
-
-match:
- DPRINTK("chipset id(%d) == index %d, '%s'\n",
- tmp, tp->chipset, rtl_chip_info[tp->chipset].name);
-
- rc = register_netdev(dev);
- if (rc)
- goto err_out_unmap;
-
- DPRINTK("EXIT, returning 0\n");
- *ioaddr_out = ioaddr;
- *dev_out = dev;
- return 0;
-
-err_out_unmap:
-#ifndef USE_IO_OPS
- iounmap(ioaddr);
-err_out_free_res:
-#endif
- pci_release_regions(pdev);
-err_out:
- free_netdev(dev);
- DPRINTK("EXIT, returning %d\n", rc);
- return rc;
-}
-
-static const struct net_device_ops netdrv_netdev_ops = {
- .ndo_open = netdrv_open,
- .ndo_stop = netdrv_close,
- .ndo_start_xmit = netdrv_start_xmit,
- .ndo_set_multicast_list = netdrv_set_rx_mode,
- .ndo_do_ioctl = netdrv_ioctl,
- .ndo_tx_timeout = netdrv_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
-};
-
-static int __devinit netdrv_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct net_device *dev = NULL;
- struct netdrv_private *tp;
- int i, addr_len, option;
- void *ioaddr = NULL;
- static int board_idx = -1;
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
- DPRINTK("ENTER\n");
-
- assert(pdev != NULL);
- assert(ent != NULL);
-
- board_idx++;
-
- i = netdrv_init_board(pdev, &dev, &ioaddr);
- if (i < 0) {
- DPRINTK("EXIT, returning %d\n", i);
- return i;
- }
-
- tp = netdev_priv(dev);
-
- assert(ioaddr != NULL);
- assert(dev != NULL);
- assert(tp != NULL);
-
- addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
- for (i = 0; i < 3; i++)
- ((u16 *)(dev->dev_addr))[i] =
- le16_to_cpu(read_eeprom(ioaddr, i + 7, addr_len));
-
- dev->netdev_ops = &netdrv_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- dev->irq = pdev->irq;
- dev->base_addr = (unsigned long) ioaddr;
-
- /* netdev_priv()/tp zeroed and aligned in alloc_etherdev */
- tp = netdev_priv(dev);
-
- /* note: tp->chipset set in netdrv_init_board */
- tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | NETDRV_CAPS;
- tp->pci_dev = pdev;
- tp->board = ent->driver_data;
- tp->mmio_addr = ioaddr;
- spin_lock_init(&tp->lock);
-
- pci_set_drvdata(pdev, dev);
-
- tp->phys[0] = 32;
-
- netdev_info(dev, "%s at %#lx, %pM IRQ %d\n",
- board_info[ent->driver_data].name,
- dev->base_addr, dev->dev_addr, dev->irq);
-
- netdev_printk(KERN_DEBUG, dev, "Identified 8139 chip type '%s'\n",
- rtl_chip_info[tp->chipset].name);
-
- /* Put the chip into low-power mode. */
- NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
-
- /* The lower four bits are the media type. */
- option = (board_idx > 7) ? 0 : media[board_idx];
- if (option > 0) {
- tp->full_duplex = (option & 0x200) ? 1 : 0;
- tp->default_port = option & 15;
- if (tp->default_port)
- tp->medialock = 1;
- }
-
- if (tp->full_duplex) {
- netdev_info(dev, "Media type forced to Full Duplex\n");
- mdio_write(dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
- tp->duplex_lock = 1;
- }
-
- DPRINTK("EXIT - returning 0\n");
- return 0;
-}
-
-
-static void __devexit netdrv_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct netdrv_private *np;
-
- DPRINTK("ENTER\n");
-
- assert(dev != NULL);
-
- np = netdev_priv(dev);
- assert(np != NULL);
-
- unregister_netdev(dev);
-
-#ifndef USE_IO_OPS
- iounmap(np->mmio_addr);
-#endif /* !USE_IO_OPS */
-
- pci_release_regions(pdev);
-
- free_netdev(dev);
-
- pci_set_drvdata(pdev, NULL);
-
- pci_disable_device(pdev);
-
- DPRINTK("EXIT\n");
-}
-
-
-/* Serial EEPROM section. */
-
-/* EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
-#define EE_CS 0x08 /* EEPROM chip select. */
-#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
-#define EE_WRITE_0 0x00
-#define EE_WRITE_1 0x02
-#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
-#define EE_ENB (0x80 | EE_CS)
-
-/* Delay between EEPROM clock transitions.
- No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
-*/
-
-#define eeprom_delay() readl(ee_addr)
-
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD (5)
-#define EE_READ_CMD (6)
-#define EE_ERASE_CMD (7)
-
-static int __devinit read_eeprom(void *ioaddr, int location, int addr_len)
-{
- int i;
- unsigned retval = 0;
- void *ee_addr = ioaddr + Cfg9346;
- int read_cmd = location | (EE_READ_CMD << addr_len);
-
- DPRINTK("ENTER\n");
-
- writeb(EE_ENB & ~EE_CS, ee_addr);
- writeb(EE_ENB, ee_addr);
- eeprom_delay();
-
- /* Shift the read command bits out. */
- for (i = 4 + addr_len; i >= 0; i--) {
- int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
- writeb(EE_ENB | dataval, ee_addr);
- eeprom_delay();
- writeb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
- eeprom_delay();
- }
- writeb(EE_ENB, ee_addr);
- eeprom_delay();
-
- for (i = 16; i > 0; i--) {
- writeb(EE_ENB | EE_SHIFT_CLK, ee_addr);
- eeprom_delay();
- retval =
- (retval << 1) | ((readb(ee_addr) & EE_DATA_READ) ? 1 :
- 0);
- writeb(EE_ENB, ee_addr);
- eeprom_delay();
- }
-
- /* Terminate the EEPROM access. */
- writeb(~EE_CS, ee_addr);
- eeprom_delay();
-
- DPRINTK("EXIT - returning %d\n", retval);
- return retval;
-}
-
-/* MII serial management: mostly bogus for now. */
-/* Read and write the MII management registers using software-generated
- serial MDIO protocol.
- The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
- met by back-to-back PCI I/O cycles, but we insert a delay to avoid
- "overclocking" issues. */
-#define MDIO_DIR 0x80
-#define MDIO_DATA_OUT 0x04
-#define MDIO_DATA_IN 0x02
-#define MDIO_CLK 0x01
-#define MDIO_WRITE0 (MDIO_DIR)
-#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
-
-#define mdio_delay() readb(mdio_addr)
-
-
-static char mii_2_8139_map[8] = {
- BasicModeCtrl,
- BasicModeStatus,
- 0,
- 0,
- NWayAdvert,
- NWayLPAR,
- NWayExpansion,
- 0
-};
-
-
-/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_sync(void *mdio_addr)
-{
- int i;
-
- DPRINTK("ENTER\n");
-
- for (i = 32; i >= 0; i--) {
- writeb(MDIO_WRITE1, mdio_addr);
- mdio_delay();
- writeb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
- mdio_delay();
- }
-
- DPRINTK("EXIT\n");
-}
-
-
-static int mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *mdio_addr = tp->mmio_addr + Config4;
- int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
- int retval = 0;
- int i;
-
- DPRINTK("ENTER\n");
-
- if (phy_id > 31) { /* Really a 8139. Use internal registers. */
- DPRINTK("EXIT after directly using 8139 internal regs\n");
- return location < 8 && mii_2_8139_map[location] ?
- readw(tp->mmio_addr + mii_2_8139_map[location]) : 0;
- }
- mdio_sync(mdio_addr);
- /* Shift the read command bits out. */
- for (i = 15; i >= 0; i--) {
- int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
-
- writeb(MDIO_DIR | dataval, mdio_addr);
- mdio_delay();
- writeb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
- mdio_delay();
- }
-
- /* Read the two transition, 16 data, and wire-idle bits. */
- for (i = 19; i > 0; i--) {
- writeb(0, mdio_addr);
- mdio_delay();
- retval = ((retval << 1) | ((readb(mdio_addr) & MDIO_DATA_IN))
- ? 1 : 0);
- writeb(MDIO_CLK, mdio_addr);
- mdio_delay();
- }
-
- DPRINTK("EXIT, returning %d\n", (retval >> 1) & 0xffff);
- return (retval >> 1) & 0xffff;
-}
-
-
-static void mdio_write(struct net_device *dev, int phy_id, int location,
- int value)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *mdio_addr = tp->mmio_addr + Config4;
- int mii_cmd =
- (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
- int i;
-
- DPRINTK("ENTER\n");
-
- if (phy_id > 31) { /* Really a 8139. Use internal registers. */
- if (location < 8 && mii_2_8139_map[location]) {
- writew(value,
- tp->mmio_addr + mii_2_8139_map[location]);
- readw(tp->mmio_addr + mii_2_8139_map[location]);
- }
- DPRINTK("EXIT after directly using 8139 internal regs\n");
- return;
- }
- mdio_sync(mdio_addr);
-
- /* Shift the command bits out. */
- for (i = 31; i >= 0; i--) {
- int dataval =
- (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writeb(dataval, mdio_addr);
- mdio_delay();
- writeb(dataval | MDIO_CLK, mdio_addr);
- mdio_delay();
- }
-
- /* Clear out extra bits. */
- for (i = 2; i > 0; i--) {
- writeb(0, mdio_addr);
- mdio_delay();
- writeb(MDIO_CLK, mdio_addr);
- mdio_delay();
- }
-
- DPRINTK("EXIT\n");
-}
-
-
-static int netdrv_open(struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- int retval;
- void *ioaddr = tp->mmio_addr;
-
- DPRINTK("ENTER\n");
-
- retval = request_irq(dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
- if (retval) {
- DPRINTK("EXIT, returning %d\n", retval);
- return retval;
- }
-
- tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
- &tp->tx_bufs_dma);
- tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
- &tp->rx_ring_dma);
- if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
- free_irq(dev->irq, dev);
-
- if (tp->tx_bufs)
- pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
- tp->tx_bufs, tp->tx_bufs_dma);
- if (tp->rx_ring)
- pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
- tp->rx_ring, tp->rx_ring_dma);
-
- DPRINTK("EXIT, returning -ENOMEM\n");
- return -ENOMEM;
-
- }
-
- tp->full_duplex = tp->duplex_lock;
- tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
-
- netdrv_init_ring(dev);
- netdrv_hw_start(dev);
-
- netdev_dbg(dev, "ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
- (unsigned long long)pci_resource_start(tp->pci_dev, 1),
- dev->irq, NETDRV_R8(MediaStatus),
- tp->full_duplex ? "full" : "half");
-
- /* Set the timer to switch to check for link beat and perhaps switch
- to an alternate media type. */
- init_timer(&tp->timer);
- tp->timer.expires = jiffies + 3 * HZ;
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = netdrv_timer;
- add_timer(&tp->timer);
-
- DPRINTK("EXIT, returning 0\n");
- return 0;
-}
-
-
-/* Start the hardware at open or resume. */
-static void netdrv_hw_start(struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- u32 i;
-
- DPRINTK("ENTER\n");
-
- /* Soft reset the chip. */
- NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
- udelay(100);
-
- /* Check that the chip has finished the reset. */
- for (i = 1000; i > 0; i--)
- if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
- break;
-
- /* Restore our idea of the MAC address. */
- NETDRV_W32_F(MAC0 + 0, cpu_to_le32(*(u32 *)(dev->dev_addr + 0)));
- NETDRV_W32_F(MAC0 + 4, cpu_to_le32(*(u32 *)(dev->dev_addr + 4)));
-
- /* Must enable Tx/Rx before setting transfer thresholds! */
- NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
-
- i = netdrv_rx_config |
- (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- NETDRV_W32_F(RxConfig, i);
-
- /* Check this value: the documentation for IFG contradicts ifself. */
- NETDRV_W32(TxConfig, (TX_DMA_BURST << TxDMAShift));
-
- /* unlock Config[01234] and BMCR register writes */
- NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
- udelay(10);
-
- tp->cur_rx = 0;
-
- /* Lock Config[01234] and BMCR register writes */
- NETDRV_W8_F(Cfg9346, Cfg9346_Lock);
- udelay(10);
-
- /* init Rx ring buffer DMA address */
- NETDRV_W32_F(RxBuf, tp->rx_ring_dma);
-
- /* init Tx buffer DMA addresses */
- for (i = 0; i < NUM_TX_DESC; i++)
- NETDRV_W32_F(TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
-
- NETDRV_W32_F(RxMissed, 0);
-
- netdrv_set_rx_mode(dev);
-
- /* no early-rx interrupts */
- NETDRV_W16(MultiIntr, NETDRV_R16(MultiIntr) & MultiIntrClear);
-
- /* make sure RxTx has started */
- NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
-
- /* Enable all known interrupts by setting the interrupt mask. */
- NETDRV_W16_F(IntrMask, netdrv_intr_mask);
-
- netif_start_queue(dev);
-
- DPRINTK("EXIT\n");
-}
-
-
-/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void netdrv_init_ring(struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- int i;
-
- DPRINTK("ENTER\n");
-
- tp->cur_rx = 0;
- atomic_set(&tp->cur_tx, 0);
- atomic_set(&tp->dirty_tx, 0);
-
- for (i = 0; i < NUM_TX_DESC; i++) {
- tp->tx_info[i].skb = NULL;
- tp->tx_info[i].mapping = 0;
- tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
- }
-
- DPRINTK("EXIT\n");
-}
-
-
-static void netdrv_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- int next_tick = 60 * HZ;
- int mii_lpa;
-
- mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
-
- if (!tp->duplex_lock && mii_lpa != 0xffff) {
- int duplex = ((mii_lpa & LPA_100FULL) ||
- (mii_lpa & 0x01C0) == 0x0040);
- if (tp->full_duplex != duplex) {
- tp->full_duplex = duplex;
- netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
- tp->full_duplex ? "full" : "half",
- tp->phys[0], mii_lpa);
- NETDRV_W8(Cfg9346, Cfg9346_Unlock);
- NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
- NETDRV_W8(Cfg9346, Cfg9346_Lock);
- }
- }
-
- netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
- NETDRV_R16(NWayLPAR));
- netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x RxStatus %04lx\n",
- NETDRV_R16(IntrMask),
- NETDRV_R16(IntrStatus),
- NETDRV_R32(RxEarlyStatus));
- netdev_dbg(dev, "Chip config %02x %02x\n",
- NETDRV_R8(Config0), NETDRV_R8(Config1));
-
- tp->timer.expires = jiffies + next_tick;
- add_timer(&tp->timer);
-}
-
-
-static void netdrv_tx_clear(struct net_device *dev)
-{
- int i;
- struct netdrv_private *tp = netdev_priv(dev);
-
- atomic_set(&tp->cur_tx, 0);
- atomic_set(&tp->dirty_tx, 0);
-
- /* Dump the unsent Tx packets. */
- for (i = 0; i < NUM_TX_DESC; i++) {
- struct ring_info *rp = &tp->tx_info[i];
- if (rp->mapping != 0) {
- pci_unmap_single(tp->pci_dev, rp->mapping,
- rp->skb->len, PCI_DMA_TODEVICE);
- rp->mapping = 0;
- }
- if (rp->skb) {
- dev_kfree_skb(rp->skb);
- rp->skb = NULL;
- dev->stats.tx_dropped++;
- }
- }
-}
-
-
-static void netdrv_tx_timeout(struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- int i;
- u8 tmp8;
- unsigned long flags;
-
- netdev_dbg(dev, "Transmit timeout, status %02x %04x media %02x\n",
- NETDRV_R8(ChipCmd),
- NETDRV_R16(IntrStatus),
- NETDRV_R8(MediaStatus));
-
- /* disable Tx ASAP, if not already */
- tmp8 = NETDRV_R8(ChipCmd);
- if (tmp8 & CmdTxEnb)
- NETDRV_W8(ChipCmd, tmp8 & ~CmdTxEnb);
-
- /* Disable interrupts by clearing the interrupt mask. */
- NETDRV_W16(IntrMask, 0x0000);
-
- /* Emit info to figure out what went wrong. */
- netdev_dbg(dev, "Tx queue start entry %d dirty entry %d\n",
- atomic_read(&tp->cur_tx),
- atomic_read(&tp->dirty_tx));
- for (i = 0; i < NUM_TX_DESC; i++)
- netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
- i, NETDRV_R32(TxStatus0 + (i * 4)),
- i == atomic_read(&tp->dirty_tx) % NUM_TX_DESC ?
- "(queue head)" : "");
-
- /* Stop a shared interrupt from scavenging while we are. */
- spin_lock_irqsave(&tp->lock, flags);
-
- netdrv_tx_clear(dev);
-
- spin_unlock_irqrestore(&tp->lock, flags);
-
- /* ...and finally, reset everything */
- netdrv_hw_start(dev);
-
- netif_wake_queue(dev);
-}
-
-
-
-static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- int entry;
-
- /* Calculate the next Tx descriptor entry. */
- entry = atomic_read(&tp->cur_tx) % NUM_TX_DESC;
-
- assert(tp->tx_info[entry].skb == NULL);
- assert(tp->tx_info[entry].mapping == 0);
-
- tp->tx_info[entry].skb = skb;
- /* tp->tx_info[entry].mapping = 0; */
- skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
-
- /* Note: the chip doesn't have auto-pad! */
- NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
- tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
-
- atomic_inc(&tp->cur_tx);
- if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
- netif_stop_queue(dev);
-
- netdev_dbg(dev, "Queued Tx packet at %p size %u to slot %d\n",
- skb->data, skb->len, entry);
-
- return NETDEV_TX_OK;
-}
-
-
-static void netdrv_tx_interrupt(struct net_device *dev,
- struct netdrv_private *tp,
- void *ioaddr)
-{
- int cur_tx, dirty_tx, tx_left;
-
- assert(dev != NULL);
- assert(tp != NULL);
- assert(ioaddr != NULL);
-
- dirty_tx = atomic_read(&tp->dirty_tx);
-
- cur_tx = atomic_read(&tp->cur_tx);
- tx_left = cur_tx - dirty_tx;
- while (tx_left > 0) {
- int entry = dirty_tx % NUM_TX_DESC;
- int txstatus;
-
- txstatus = NETDRV_R32(TxStatus0 + (entry * sizeof(u32)));
-
- if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
- break; /* It still hasn't been Txed */
-
- /* Note: TxCarrierLost is always asserted at 100mbps. */
- if (txstatus & (TxOutOfWindow | TxAborted)) {
- /* There was an major error, log it. */
- netdev_dbg(dev, "Transmit error, Tx status %#08x\n",
- txstatus);
- dev->stats.tx_errors++;
- if (txstatus & TxAborted) {
- dev->stats.tx_aborted_errors++;
- NETDRV_W32(TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
- }
- if (txstatus & TxCarrierLost)
- dev->stats.tx_carrier_errors++;
- if (txstatus & TxOutOfWindow)
- dev->stats.tx_window_errors++;
- } else {
- if (txstatus & TxUnderrun) {
- /* Add 64 to the Tx FIFO threshold. */
- if (tp->tx_flag < 0x00300000)
- tp->tx_flag += 0x00020000;
- dev->stats.tx_fifo_errors++;
- }
- dev->stats.collisions += (txstatus >> 24) & 15;
- dev->stats.tx_bytes += txstatus & 0x7ff;
- dev->stats.tx_packets++;
- }
-
- /* Free the original skb. */
- if (tp->tx_info[entry].mapping != 0) {
- pci_unmap_single(tp->pci_dev,
- tp->tx_info[entry].mapping,
- tp->tx_info[entry].skb->len,
- PCI_DMA_TODEVICE);
- tp->tx_info[entry].mapping = 0;
- }
- dev_kfree_skb_irq(tp->tx_info[entry].skb);
- tp->tx_info[entry].skb = NULL;
- dirty_tx++;
- if (dirty_tx < 0) { /* handle signed int overflow */
- atomic_sub(cur_tx, &tp->cur_tx); /* XXX racy? */
- dirty_tx = cur_tx - tx_left + 1;
- }
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
- cur_tx = atomic_read(&tp->cur_tx);
- tx_left = cur_tx - dirty_tx;
-
- }
-
-#ifndef NETDRV_NDEBUG
- if (atomic_read(&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
- netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d\n",
- dirty_tx, atomic_read(&tp->cur_tx));
- dirty_tx += NUM_TX_DESC;
- }
-#endif /* NETDRV_NDEBUG */
-
- atomic_set(&tp->dirty_tx, dirty_tx);
-}
-
-
-/* TODO: clean this up! Rx reset need not be this intensive */
-static void netdrv_rx_err(u32 rx_status, struct net_device *dev,
- struct netdrv_private *tp, void *ioaddr)
-{
- u8 tmp8;
- int tmp_work = 1000;
-
- netdev_dbg(dev, "Ethernet frame had errors, status %08x\n", rx_status);
- if (rx_status & RxTooLong)
- netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
- rx_status);
- /* A.C.: The chip hangs here. */
- dev->stats.rx_errors++;
- if (rx_status & (RxBadSymbol | RxBadAlign))
- dev->stats.rx_frame_errors++;
- if (rx_status & (RxRunt | RxTooLong))
- dev->stats.rx_length_errors++;
- if (rx_status & RxCRCErr)
- dev->stats.rx_crc_errors++;
- /* Reset the receiver, based on RealTek recommendation.(Bug?) */
- tp->cur_rx = 0;
-
- /* disable receive */
- tmp8 = NETDRV_R8(ChipCmd) & ChipCmdClear;
- NETDRV_W8_F(ChipCmd, tmp8 | CmdTxEnb);
-
- /* A.C.: Reset the multicast list. */
- netdrv_set_rx_mode(dev);
-
- /* XXX potentially temporary hack to
- * restart hung receiver */
- while (--tmp_work > 0) {
- tmp8 = NETDRV_R8(ChipCmd);
- if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
- break;
- NETDRV_W8_F(ChipCmd,
- (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
- }
-
- /* G.S.: Re-enable receiver */
- /* XXX temporary hack to work around receiver hang */
- netdrv_set_rx_mode(dev);
-
- if (tmp_work <= 0)
- netdev_warn(dev, "tx/rx enable wait too long\n");
-}
-
-
-/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
- field alignments and semantics. */
-static void netdrv_rx_interrupt(struct net_device *dev,
- struct netdrv_private *tp, void *ioaddr)
-{
- unsigned char *rx_ring;
- u16 cur_rx;
-
- assert(dev != NULL);
- assert(tp != NULL);
- assert(ioaddr != NULL);
-
- rx_ring = tp->rx_ring;
- cur_rx = tp->cur_rx;
-
- netdev_dbg(dev, "In netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
- cur_rx, NETDRV_R16(RxBufAddr),
- NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
-
- while ((NETDRV_R8(ChipCmd) & RxBufEmpty) == 0) {
- int ring_offset = cur_rx % RX_BUF_LEN;
- u32 rx_status;
- unsigned int rx_size;
- unsigned int pkt_size;
- struct sk_buff *skb;
-
- /* read size+status of next frame from DMA ring buffer */
- rx_status = le32_to_cpu(*(u32 *)(rx_ring + ring_offset));
- rx_size = rx_status >> 16;
- pkt_size = rx_size - 4;
-
- netdev_dbg(dev, "netdrv_rx() status %04x, size %04x, cur %04x\n",
- rx_status, rx_size, cur_rx);
-#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
- print_hex_dump_bytes("Frame contents: ", HEX_DUMP_OFFSET,
- &rx_ring[ring_offset], 70);
-#endif
-
- /* If Rx err or invalid rx_size/rx_status received
- *(which happens if we get lost in the ring),
- * Rx process gets reset, so we abort any further
- * Rx processing.
- */
- if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
- (!(rx_status & RxStatusOK))) {
- netdrv_rx_err(rx_status, dev, tp, ioaddr);
- return;
- }
-
- /* Malloc up new buffer, compatible with net-2e. */
- /* Omit the four octet CRC from the length. */
-
- /* TODO: consider allocating skb's outside of
- * interrupt context, both to speed interrupt processing,
- * and also to reduce the chances of having to
- * drop packets here under memory pressure.
- */
-
- skb = dev_alloc_skb(pkt_size + 2);
- if (skb) {
- skb_reserve(skb, 2); /* 16 byte align the IP fields. */
-
- skb_copy_to_linear_data(skb, &rx_ring[ring_offset + 4], pkt_size);
- skb_put(skb, pkt_size);
-
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_bytes += pkt_size;
- dev->stats.rx_packets++;
- } else {
- netdev_warn(dev, "Memory squeeze, dropping packet\n");
- dev->stats.rx_dropped++;
- }
-
- cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
- NETDRV_W16_F(RxBufPtr, cur_rx - 16);
- }
-
- netdev_dbg(dev, "Done netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
- cur_rx, NETDRV_R16(RxBufAddr),
- NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
-
- tp->cur_rx = cur_rx;
-}
-
-
-static void netdrv_weird_interrupt(struct net_device *dev,
- struct netdrv_private *tp,
- void *ioaddr,
- int status, int link_changed)
-{
- netdev_printk(KERN_DEBUG, dev, "Abnormal interrupt, status %08x\n",
- status);
-
- assert(dev != NULL);
- assert(tp != NULL);
- assert(ioaddr != NULL);
-
- /* Update the error count. */
- dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
- NETDRV_W32(RxMissed, 0);
-
- if ((status & RxUnderrun) && link_changed &&
- (tp->drv_flags & HAS_LNK_CHNG)) {
- /* Really link-change on new chips. */
- int lpar = NETDRV_R16(NWayLPAR);
- int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
- tp->duplex_lock);
- if (tp->full_duplex != duplex) {
- tp->full_duplex = duplex;
- NETDRV_W8(Cfg9346, Cfg9346_Unlock);
- NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
- NETDRV_W8(Cfg9346, Cfg9346_Lock);
- }
- status &= ~RxUnderrun;
- }
-
- /* XXX along with netdrv_rx_err, are we double-counting errors? */
- if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
- dev->stats.rx_errors++;
-
- if (status & (PCSTimeout))
- dev->stats.rx_length_errors++;
- if (status & (RxUnderrun | RxFIFOOver))
- dev->stats.rx_fifo_errors++;
- if (status & RxOverflow) {
- dev->stats.rx_over_errors++;
- tp->cur_rx = NETDRV_R16(RxBufAddr) % RX_BUF_LEN;
- NETDRV_W16_F(RxBufPtr, tp->cur_rx - 16);
- }
- if (status & PCIErr) {
- u16 pci_cmd_status;
- pci_read_config_word(tp->pci_dev, PCI_STATUS, &pci_cmd_status);
-
- netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
- }
-}
-
-
-/* The interrupt handler does all of the Rx thread work and cleans up
- after the Tx thread. */
-static irqreturn_t netdrv_interrupt(int irq, void *dev_instance)
-{
- struct net_device *dev = (struct net_device *) dev_instance;
- struct netdrv_private *tp = netdev_priv(dev);
- int boguscnt = max_interrupt_work;
- void *ioaddr = tp->mmio_addr;
- int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
- int handled = 0;
-
- spin_lock(&tp->lock);
-
- do {
- status = NETDRV_R16(IntrStatus);
-
- /* h/w no longer present(hotplug?) or major error, bail */
- if (status == 0xFFFF)
- break;
-
- handled = 1;
- /* Acknowledge all of the current interrupt sources ASAP */
- NETDRV_W16_F(IntrStatus, status);
-
- netdev_dbg(dev, "interrupt status=%#04x new intstat=%#04x\n",
- status, NETDRV_R16(IntrStatus));
-
- if ((status &
- (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
- RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
- break;
-
- /* Check uncommon events with one test. */
- if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
- RxFIFOOver | TxErr | RxErr))
- netdrv_weird_interrupt(dev, tp, ioaddr,
- status, link_changed);
-
- if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
- netdrv_rx_interrupt(dev, tp, ioaddr);
-
- if (status & (TxOK | TxErr))
- netdrv_tx_interrupt(dev, tp, ioaddr);
-
- boguscnt--;
- } while (boguscnt > 0);
-
- if (boguscnt <= 0) {
- netdev_warn(dev, "Too much work at interrupt, IntrStatus=%#04x\n",
- status);
-
- /* Clear all interrupt sources. */
- NETDRV_W16(IntrStatus, 0xffff);
- }
-
- spin_unlock(&tp->lock);
-
- netdev_dbg(dev, "exiting interrupt, intr_status=%#04x\n",
- NETDRV_R16(IntrStatus));
- return IRQ_RETVAL(handled);
-}
-
-
-static int netdrv_close(struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- unsigned long flags;
-
- DPRINTK("ENTER\n");
-
- netif_stop_queue(dev);
-
- netdev_dbg(dev, "Shutting down ethercard, status was %#04x\n",
- NETDRV_R16(IntrStatus));
-
- del_timer_sync(&tp->timer);
-
- spin_lock_irqsave(&tp->lock, flags);
-
- /* Stop the chip's Tx and Rx DMA processes. */
- NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
-
- /* Disable interrupts by clearing the interrupt mask. */
- NETDRV_W16(IntrMask, 0x0000);
-
- /* Update the error counts. */
- dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
- NETDRV_W32(RxMissed, 0);
-
- spin_unlock_irqrestore(&tp->lock, flags);
-
- free_irq(dev->irq, dev);
-
- netdrv_tx_clear(dev);
-
- pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
- tp->rx_ring, tp->rx_ring_dma);
- pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
- tp->tx_bufs, tp->tx_bufs_dma);
- tp->rx_ring = NULL;
- tp->tx_bufs = NULL;
-
- /* Green! Put the chip in low-power mode. */
- NETDRV_W8(Cfg9346, Cfg9346_Unlock);
- NETDRV_W8(Config1, 0x03);
- NETDRV_W8(Cfg9346, Cfg9346_Lock);
-
- DPRINTK("EXIT\n");
- return 0;
-}
-
-
-static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(rq);
- unsigned long flags;
- int rc = 0;
-
- DPRINTK("ENTER\n");
-
- switch (cmd) {
- case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = tp->phys[0] & 0x3f;
- /* Fall Through */
-
- case SIOCGMIIREG: /* Read MII PHY register. */
- spin_lock_irqsave(&tp->lock, flags);
- data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
- spin_unlock_irqrestore(&tp->lock, flags);
- break;
-
- case SIOCSMIIREG: /* Write MII PHY register. */
- spin_lock_irqsave(&tp->lock, flags);
- mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
- spin_unlock_irqrestore(&tp->lock, flags);
- break;
-
- default:
- rc = -EOPNOTSUPP;
- break;
- }
-
- DPRINTK("EXIT, returning %d\n", rc);
- return rc;
-}
-
-/* Set or clear the multicast filter for this adaptor.
- This routine is not state sensitive and need not be SMP locked. */
-
-static void netdrv_set_rx_mode(struct net_device *dev)
-{
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp;
-
- DPRINTK("ENTER\n");
-
- netdev_dbg(dev, "%s(%04x) done -- Rx config %08lx\n",
- __func__, dev->flags, NETDRV_R32(RxConfig));
-
- /* Note: do not reorder, GCC is clever about common statements. */
- if (dev->flags & IFF_PROMISC) {
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else {
- struct netdev_hw_addr *ha;
-
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
-
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- }
-
- /* if called from irq handler, lock already acquired */
- if (!in_irq())
- spin_lock_irq(&tp->lock);
-
- /* We can safely update without stopping the chip. */
- tmp = netdrv_rx_config | rx_mode |
- (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- NETDRV_W32_F(RxConfig, tmp);
- NETDRV_W32_F(MAR0 + 0, mc_filter[0]);
- NETDRV_W32_F(MAR0 + 4, mc_filter[1]);
-
- if (!in_irq())
- spin_unlock_irq(&tp->lock);
-
- DPRINTK("EXIT\n");
-}
-
-
-#ifdef CONFIG_PM
-
-static int netdrv_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct netdrv_private *tp = netdev_priv(dev);
- void *ioaddr = tp->mmio_addr;
- unsigned long flags;
-
- if (!netif_running(dev))
- return 0;
- netif_device_detach(dev);
-
- spin_lock_irqsave(&tp->lock, flags);
-
- /* Disable interrupts, stop Tx and Rx. */
- NETDRV_W16(IntrMask, 0x0000);
- NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
-
- /* Update the error counts. */
- dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
- NETDRV_W32(RxMissed, 0);
-
- spin_unlock_irqrestore(&tp->lock, flags);
-
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-
-static int netdrv_resume(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- /*struct netdrv_private *tp = netdev_priv(dev);*/
-
- if (!netif_running(dev))
- return 0;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- netif_device_attach(dev);
- netdrv_hw_start(dev);
-
- return 0;
-}
-
-#endif /* CONFIG_PM */
-
-
-static struct pci_driver netdrv_pci_driver = {
- .name = MODNAME,
- .id_table = netdrv_pci_tbl,
- .probe = netdrv_init_one,
- .remove = __devexit_p(netdrv_remove_one),
-#ifdef CONFIG_PM
- .suspend = netdrv_suspend,
- .resume = netdrv_resume,
-#endif /* CONFIG_PM */
-};
-
-
-static int __init netdrv_init_module(void)
-{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
- return pci_register_driver(&netdrv_pci_driver);
-}
-
-
-static void __exit netdrv_cleanup_module(void)
-{
- pci_unregister_driver(&netdrv_pci_driver);
-}
-
-
-module_init(netdrv_init_module);
-module_exit(netdrv_cleanup_module);
diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig
deleted file mode 100644
index 9b8f793b1cc..00000000000
--- a/drivers/net/pcmcia/Kconfig
+++ /dev/null
@@ -1,123 +0,0 @@
-#
-# PCMCIA Network device configuration
-#
-
-menuconfig NET_PCMCIA
- bool "PCMCIA network device support"
- depends on PCMCIA
- ---help---
- Say Y if you would like to include support for any PCMCIA or CardBus
- network adapters, then say Y to the driver for your particular card
- below. PCMCIA- or PC-cards are credit-card size devices often used
- with laptops computers; CardBus is the newer and faster version of
- PCMCIA.
-
- To use your PC-cards, you will need supporting software from David
- Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
- for location). You also want to check out the PCMCIA-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- If unsure, say N.
-
-if NET_PCMCIA && PCMCIA
-
-config PCMCIA_3C589
- tristate "3Com 3c589 PCMCIA support"
- help
- Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
- (PC-card) Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called 3c589_cs. If unsure, say N.
-
-config PCMCIA_3C574
- tristate "3Com 3c574 PCMCIA support"
- help
- Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
- (PC-card) Fast Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called 3c574_cs. If unsure, say N.
-
-config PCMCIA_FMVJ18X
- tristate "Fujitsu FMV-J18x PCMCIA support"
- select CRC32
- help
- Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
- PCMCIA (PC-card) Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called fmvj18x_cs. If unsure, say N.
-
-config PCMCIA_PCNET
- tristate "NE2000 compatible PCMCIA support"
- select CRC32
- help
- Say Y here if you intend to attach an NE2000 compatible PCMCIA
- (PC-card) Ethernet or Fast Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called pcnet_cs. If unsure, say N.
-
-config PCMCIA_NMCLAN
- tristate "New Media PCMCIA support"
- help
- Say Y here if you intend to attach a New Media Ethernet or LiveWire
- PCMCIA (PC-card) Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called nmclan_cs. If unsure, say N.
-
-config PCMCIA_SMC91C92
- tristate "SMC 91Cxx PCMCIA support"
- select CRC32
- select MII
- help
- Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
- (PC-card) Ethernet or Fast Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called smc91c92_cs. If unsure, say N.
-
-config PCMCIA_XIRC2PS
- tristate "Xircom 16-bit PCMCIA support"
- help
- Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card)
- Ethernet or Fast Ethernet card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called xirc2ps_cs. If unsure, say N.
-
-config PCMCIA_AXNET
- tristate "Asix AX88190 PCMCIA support"
- ---help---
- Say Y here if you intend to attach an Asix AX88190-based PCMCIA
- (PC-card) Fast Ethernet card to your computer. These cards are
- nearly NE2000 compatible but need a separate driver due to a few
- misfeatures.
-
- To compile this driver as a module, choose M here: the module will be
- called axnet_cs. If unsure, say N.
-
-config ARCNET_COM20020_CS
- tristate "COM20020 ARCnet PCMCIA support"
- depends on ARCNET_COM20020
- help
- Say Y here if you intend to attach this type of ARCnet PCMCIA card
- to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called com20020_cs. If unsure, say N.
-
-config PCMCIA_IBMTR
- tristate "IBM PCMCIA tokenring adapter support"
- depends on IBMTR!=y && TR
- help
- Say Y here if you intend to attach this type of Token Ring PCMCIA
- card to your computer. You then also need to say Y to "Token Ring
- driver support".
-
- To compile this driver as a module, choose M here: the module will be
- called ibmtr_cs.
-
-endif # NET_PCMCIA
diff --git a/drivers/net/pcmcia/Makefile b/drivers/net/pcmcia/Makefile
deleted file mode 100644
index 87d2d99f4c1..00000000000
--- a/drivers/net/pcmcia/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# Makefile for the Linux PCMCIA network device drivers.
-#
-
-# 16-bit client drivers
-obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
-obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o
-obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
-obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
-obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o
-obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o
-obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o
-obj-$(CONFIG_ARCNET_COM20020_CS)+= com20020_cs.o
-obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o
-
-obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a70244306c9..bb88e12101c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig PHYLIB
- tristate "PHY Device support and infrastructure"
+ bool "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d84c4224dd1..e8be47d6d7d 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -553,7 +553,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
/*
* There is no BCM5481 specification available, so down
* here is everything we know about "register 0x18". This
- * at least helps BCM5481 to successfuly receive packets
+ * at least helps BCM5481 to successfully receive packets
* on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com>
* says: "This sets delay between the RXD and RXC signals
* instead of using trace lengths to achieve timing".
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index edd7304773e..9663e0ba600 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -35,16 +35,15 @@
#define LAYER4 0x02
#define LAYER2 0x01
#define MAX_RXTS 64
-#define N_EXT_TS 1
+#define N_EXT_TS 6
#define PSF_PTPVER 2
#define PSF_EVNT 0x4000
#define PSF_RX 0x2000
#define PSF_TX 0x1000
#define EXT_EVENT 1
-#define EXT_GPIO 1
-#define CAL_EVENT 2
-#define CAL_GPIO 9
-#define CAL_TRIGGER 2
+#define CAL_EVENT 7
+#define CAL_TRIGGER 7
+#define PER_TRIGGER 6
/* phyter seems to miss the mark by 16 ns */
#define ADJTIME_FIX 16
@@ -131,16 +130,30 @@ struct dp83640_clock {
/* globals */
+enum {
+ CALIBRATE_GPIO,
+ PEROUT_GPIO,
+ EXTTS0_GPIO,
+ EXTTS1_GPIO,
+ EXTTS2_GPIO,
+ EXTTS3_GPIO,
+ EXTTS4_GPIO,
+ EXTTS5_GPIO,
+ GPIO_TABLE_SIZE
+};
+
static int chosen_phy = -1;
-static ushort cal_gpio = 4;
+static ushort gpio_tab[GPIO_TABLE_SIZE] = {
+ 1, 2, 3, 4, 8, 9, 10, 11
+};
module_param(chosen_phy, int, 0444);
-module_param(cal_gpio, ushort, 0444);
+module_param_array(gpio_tab, ushort, NULL, 0444);
MODULE_PARM_DESC(chosen_phy, \
"The address of the PHY to use for the ancillary clock features");
-MODULE_PARM_DESC(cal_gpio, \
- "Which GPIO line to use for synchronizing multiple PHYs");
+MODULE_PARM_DESC(gpio_tab, \
+ "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
/* a list of clocks and a mutex to protect it */
static LIST_HEAD(phyter_clocks);
@@ -235,6 +248,61 @@ static u64 phy2txts(struct phy_txts *p)
return ns;
}
+static void periodic_output(struct dp83640_clock *clock,
+ struct ptp_clock_request *clkreq, bool on)
+{
+ struct dp83640_private *dp83640 = clock->chosen;
+ struct phy_device *phydev = dp83640->phydev;
+ u32 sec, nsec, period;
+ u16 gpio, ptp_trig, trigger, val;
+
+ gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
+ trigger = PER_TRIGGER;
+
+ ptp_trig = TRIG_WR |
+ (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT |
+ (gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT |
+ TRIG_PER |
+ TRIG_PULSE;
+
+ val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
+
+ if (!on) {
+ val |= TRIG_DIS;
+ mutex_lock(&clock->extreg_lock);
+ ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
+ ext_write(0, phydev, PAGE4, PTP_CTL, val);
+ mutex_unlock(&clock->extreg_lock);
+ return;
+ }
+
+ sec = clkreq->perout.start.sec;
+ nsec = clkreq->perout.start.nsec;
+ period = clkreq->perout.period.sec * 1000000000UL;
+ period += clkreq->perout.period.nsec;
+
+ mutex_lock(&clock->extreg_lock);
+
+ ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
+
+ /*load trigger*/
+ val |= TRIG_LOAD;
+ ext_write(0, phydev, PAGE4, PTP_CTL, val);
+ ext_write(0, phydev, PAGE4, PTP_TDR, nsec & 0xffff); /* ns[15:0] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16); /* ns[31:16] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff); /* sec[15:0] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16); /* sec[31:16] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16); /* ns[31:16] */
+
+ /*enable trigger*/
+ val &= ~TRIG_LOAD;
+ val |= TRIG_EN;
+ ext_write(0, phydev, PAGE4, PTP_CTL, val);
+
+ mutex_unlock(&clock->extreg_lock);
+}
+
/* ptp clock methods */
static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@ -338,19 +406,30 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
- u16 evnt;
+ int index;
+ u16 evnt, event_num, gpio_num;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- if (rq->extts.index != 0)
+ index = rq->extts.index;
+ if (index < 0 || index >= N_EXT_TS)
return -EINVAL;
- evnt = EVNT_WR | (EXT_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
+ event_num = EXT_EVENT + index;
+ evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
if (on) {
- evnt |= (EXT_GPIO & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
+ gpio_num = gpio_tab[EXTTS0_GPIO + index];
+ evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.index != 0)
+ return -EINVAL;
+ periodic_output(clock, rq, on);
+ return 0;
+
default:
break;
}
@@ -441,9 +520,10 @@ static void recalibrate(struct dp83640_clock *clock)
struct list_head *this;
struct dp83640_private *tmp;
struct phy_device *master = clock->chosen->phydev;
- u16 cfg0, evnt, ptp_trig, trigger, val;
+ u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
trigger = CAL_TRIGGER;
+ cal_gpio = gpio_tab[CALIBRATE_GPIO];
mutex_lock(&clock->extreg_lock);
@@ -542,11 +622,17 @@ static void recalibrate(struct dp83640_clock *clock)
/* time stamping methods */
+static inline u16 exts_chan_to_edata(int ch)
+{
+ return 1 << ((ch + EXT_EVENT) * 2);
+}
+
static int decode_evnt(struct dp83640_private *dp83640,
void *data, u16 ests)
{
struct phy_txts *phy_txts;
struct ptp_clock_event event;
+ int i, parsed;
int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
u16 ext_status = 0;
@@ -568,14 +654,25 @@ static int decode_evnt(struct dp83640_private *dp83640,
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
+ if (ext_status) {
+ parsed = words + 2;
+ } else {
+ parsed = words + 1;
+ i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT;
+ ext_status = exts_chan_to_edata(i);
+ }
+
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
event.timestamp = phy2txts(&dp83640->edata);
- ptp_clock_event(dp83640->clock->ptp_clock, &event);
+ for (i = 0; i < N_EXT_TS; i++) {
+ if (ext_status & exts_chan_to_edata(i)) {
+ event.index = i;
+ ptp_clock_event(dp83640->clock->ptp_clock, &event);
+ }
+ }
- words = ext_status ? words + 2 : words + 1;
- return words * sizeof(u16);
+ return parsed * sizeof(u16);
}
static void decode_rxts(struct dp83640_private *dp83640,
@@ -664,6 +761,41 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
+static int is_sync(struct sk_buff *skb, int type)
+{
+ u8 *data = skb->data, *msgtype;
+ unsigned int offset = 0;
+
+ switch (type) {
+ case PTP_CLASS_V1_IPV4:
+ case PTP_CLASS_V2_IPV4:
+ offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ case PTP_CLASS_V2_IPV6:
+ offset = OFF_PTP6;
+ break;
+ case PTP_CLASS_V2_L2:
+ offset = ETH_HLEN;
+ break;
+ case PTP_CLASS_V2_VLAN:
+ offset = ETH_HLEN + VLAN_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (type & PTP_CLASS_V1)
+ offset += OFF_PTP_CONTROL;
+
+ if (skb->len < offset + 1)
+ return 0;
+
+ msgtype = data + offset;
+
+ return (*msgtype & 0xf) == 0;
+}
+
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
{
u16 *seqid;
@@ -740,7 +872,7 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.max_adj = 1953124;
clock->caps.n_alarm = 0;
clock->caps.n_ext_ts = N_EXT_TS;
- clock->caps.n_per_out = 0;
+ clock->caps.n_per_out = 1;
clock->caps.pps = 0;
clock->caps.adjfreq = ptp_dp83640_adjfreq;
clock->caps.adjtime = ptp_dp83640_adjtime;
@@ -875,6 +1007,7 @@ static void dp83640_remove(struct phy_device *phydev)
struct dp83640_clock *clock;
struct list_head *this, *next;
struct dp83640_private *tmp, *dp83640 = phydev->priv;
+ struct sk_buff *skb;
if (phydev->addr == BROADCAST_ADDR)
return;
@@ -882,6 +1015,12 @@ static void dp83640_remove(struct phy_device *phydev)
enable_status_frames(phydev, false);
cancel_work_sync(&dp83640->ts_work);
+ while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL)
+ kfree_skb(skb);
+
+ while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
+ skb_complete_tx_timestamp(skb, NULL);
+
clock = dp83640_clock_get(dp83640->clock);
if (dp83640 == clock->chosen) {
@@ -913,16 +1052,10 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
if (cfg.flags) /* reserved for future extensions */
return -EINVAL;
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_OFF:
- dp83640->hwts_tx_en = 0;
- break;
- case HWTSTAMP_TX_ON:
- dp83640->hwts_tx_en = 1;
- break;
- default:
+ if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
return -ERANGE;
- }
+
+ dp83640->hwts_tx_en = cfg.tx_type;
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
@@ -977,6 +1110,9 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
if (dp83640->hwts_tx_en)
txcfg0 |= TX_TS_EN;
+ if (dp83640->hwts_tx_en == HWTSTAMP_TX_ONESTEP_SYNC)
+ txcfg0 |= SYNC_1STEP | CHK_1STEP;
+
if (dp83640->hwts_rx_en)
rxcfg0 |= RX_TS_EN;
@@ -1059,12 +1195,24 @@ static void dp83640_txtstamp(struct phy_device *phydev,
{
struct dp83640_private *dp83640 = phydev->priv;
- if (!dp83640->hwts_tx_en) {
- kfree_skb(skb);
- return;
+ switch (dp83640->hwts_tx_en) {
+
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (is_sync(skb, type)) {
+ skb_complete_tx_timestamp(skb, NULL);
+ return;
+ }
+ /* fall through */
+ case HWTSTAMP_TX_ON:
+ skb_queue_tail(&dp83640->tx_queue, skb);
+ schedule_work(&dp83640->ts_work);
+ break;
+
+ case HWTSTAMP_TX_OFF:
+ default:
+ skb_complete_tx_timestamp(skb, NULL);
+ break;
}
- skb_queue_tail(&dp83640->tx_queue, skb);
- schedule_work(&dp83640->ts_work);
}
static struct phy_driver dp83640_driver = {
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d4cbc2922b2..c81f136ae67 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -30,10 +30,17 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers");
+MODULE_DESCRIPTION("ICPlus IP175C/IP101A/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
+/* IP101A/IP1001 */
+#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
+#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
+#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
+#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
+#define IP101A_APS_ON 2 /* IP101A APS Mode bit */
+
static int ip175c_config_init(struct phy_device *phydev)
{
int err, i;
@@ -42,36 +49,36 @@ static int ip175c_config_init(struct phy_device *phydev)
if (full_reset_performed == 0) {
/* master reset */
- err = phydev->bus->write(phydev->bus, 30, 0, 0x175c);
+ err = mdiobus_write(phydev->bus, 30, 0, 0x175c);
if (err < 0)
return err;
/* ensure no bus delays overlap reset period */
- err = phydev->bus->read(phydev->bus, 30, 0);
+ err = mdiobus_read(phydev->bus, 30, 0);
/* data sheet specifies reset period is 2 msec */
mdelay(2);
/* enable IP175C mode */
- err = phydev->bus->write(phydev->bus, 29, 31, 0x175c);
+ err = mdiobus_write(phydev->bus, 29, 31, 0x175c);
if (err < 0)
return err;
/* Set MII0 speed and duplex (in PHY mode) */
- err = phydev->bus->write(phydev->bus, 29, 22, 0x420);
+ err = mdiobus_write(phydev->bus, 29, 22, 0x420);
if (err < 0)
return err;
/* reset switch ports */
for (i = 0; i < 5; i++) {
- err = phydev->bus->write(phydev->bus, i,
- MII_BMCR, BMCR_RESET);
+ err = mdiobus_write(phydev->bus, i,
+ MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
}
for (i = 0; i < 5; i++)
- err = phydev->bus->read(phydev->bus, i, MII_BMCR);
+ err = mdiobus_read(phydev->bus, i, MII_BMCR);
mdelay(2);
@@ -89,27 +96,61 @@ static int ip175c_config_init(struct phy_device *phydev)
return 0;
}
-static int ip1001_config_init(struct phy_device *phydev)
+static int ip1xx_reset(struct phy_device *phydev)
{
- int err, value;
+ int err, bmcr;
/* Software Reset PHY */
- value = phy_read(phydev, MII_BMCR);
- value |= BMCR_RESET;
- err = phy_write(phydev, MII_BMCR, value);
+ bmcr = phy_read(phydev, MII_BMCR);
+ bmcr |= BMCR_RESET;
+ err = phy_write(phydev, MII_BMCR, bmcr);
if (err < 0)
return err;
do {
- value = phy_read(phydev, MII_BMCR);
- } while (value & BMCR_RESET);
+ bmcr = phy_read(phydev, MII_BMCR);
+ } while (bmcr & BMCR_RESET);
+
+ return err;
+}
+
+static int ip1001_config_init(struct phy_device *phydev)
+{
+ int c;
+
+ c = ip1xx_reset(phydev);
+ if (c < 0)
+ return c;
+
+ /* Enable Auto Power Saving mode */
+ c = phy_read(phydev, IP1001_SPEC_CTRL_STATUS_2);
+ c |= IP1001_APS_ON;
+ if (c < 0)
+ return c;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ /* Additional delay (2ns) used to adjust RX clock phase
+ * at RGMII interface */
+ c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+ c |= IP1001_PHASE_SEL_MASK;
+ c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+ }
+
+ return c;
+}
+
+static int ip101a_config_init(struct phy_device *phydev)
+{
+ int c;
- /* Additional delay (2ns) used to adjust RX clock phase
- * at GMII/ RGMII interface */
- value = phy_read(phydev, 16);
- value |= 0x3;
+ c = ip1xx_reset(phydev);
+ if (c < 0)
+ return c;
- return phy_write(phydev, 16, value);
+ /* Enable Auto Power Saving mode */
+ c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+ c |= IP101A_APS_ON;
+ return c;
}
static int ip175c_read_status(struct phy_device *phydev)
@@ -158,6 +199,20 @@ static struct phy_driver ip1001_driver = {
.driver = { .owner = THIS_MODULE,},
};
+static struct phy_driver ip101a_driver = {
+ .phy_id = 0x02430c54,
+ .name = "ICPlus IP101A",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause,
+ .config_init = &ip101a_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+};
+
static int __init icplus_init(void)
{
int ret = 0;
@@ -166,12 +221,17 @@ static int __init icplus_init(void)
if (ret < 0)
return -ENODEV;
+ ret = phy_driver_register(&ip101a_driver);
+ if (ret < 0)
+ return -ENODEV;
+
return phy_driver_register(&ip175c_driver);
}
static void __exit icplus_exit(void)
{
phy_driver_unregister(&ip1001_driver);
+ phy_driver_unregister(&ip101a_driver);
phy_driver_unregister(&ip175c_driver);
}
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 47c8339a035..2843c90f712 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
static struct platform_driver mdio_ofgpio_driver = {
.driver = {
- .name = "mdio-gpio",
+ .name = "mdio-ofgpio",
.owner = THIS_MODULE,
.of_match_table = mdio_ofgpio_match,
},
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ff109fe5af6..83a5a5afec6 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -213,7 +213,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
/* Grab the bits from PHYIR1, and put them
* in the upper half */
- phy_reg = bus->read(bus, addr, MII_PHYSID1);
+ phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
if (phy_reg < 0)
return -EIO;
@@ -221,7 +221,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
*phy_id = (phy_reg & 0xffff) << 16;
/* Grab the bits from PHYIR2, and put them in the lower half */
- phy_reg = bus->read(bus, addr, MII_PHYSID2);
+ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
if (phy_reg < 0)
return -EIO;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 5d8f6e17bd5..0ec8e09cc2a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -3,7 +3,7 @@
*
* Author: Kriston Carson
*
- * Copyright (c) 2005 Freescale Semiconductor, Inc.
+ * Copyright (c) 2005, 2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -61,32 +61,42 @@ MODULE_DESCRIPTION("Vitesse PHY driver");
MODULE_AUTHOR("Kriston Carson");
MODULE_LICENSE("GPL");
-static int vsc824x_config_init(struct phy_device *phydev)
+int vsc824x_add_skew(struct phy_device *phydev)
{
- int extcon;
int err;
-
- err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
- MII_VSC8244_AUXCONSTAT_INIT);
- if (err < 0)
- return err;
+ int extcon;
extcon = phy_read(phydev, MII_VSC8244_EXT_CON1);
if (extcon < 0)
- return err;
+ return extcon;
extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK |
MII_VSC8244_EXTCON1_RX_SKEW_MASK);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
- MII_VSC8244_EXTCON1_RX_SKEW);
+ extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
+ MII_VSC8244_EXTCON1_RX_SKEW);
err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon);
return err;
}
+EXPORT_SYMBOL(vsc824x_add_skew);
+
+static int vsc824x_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
+ MII_VSC8244_AUXCONSTAT_INIT);
+ if (err < 0)
+ return err;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ err = vsc824x_add_skew(phydev);
+
+ return err;
+}
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
diff --git a/drivers/net/plip/Kconfig b/drivers/net/plip/Kconfig
new file mode 100644
index 00000000000..80c4a3373e5
--- /dev/null
+++ b/drivers/net/plip/Kconfig
@@ -0,0 +1,38 @@
+#
+# Parallel Line Internet Protocol (PLIP) network device configuration
+#
+
+config PLIP
+ tristate "PLIP (parallel port) support"
+ depends on PARPORT
+ ---help---
+ PLIP (Parallel Line Internet Protocol) is used to create a
+ reasonably fast mini network consisting of two (or, rarely, more)
+ local machines. A PLIP link from a Linux box is a popular means to
+ install a Linux distribution on a machine which doesn't have a
+ CD-ROM drive (a minimal system has to be transferred with floppies
+ first). The kernels on both machines need to have this PLIP option
+ enabled for this to work.
+
+ The PLIP driver has two modes, mode 0 and mode 1. The parallel
+ ports (the connectors at the computers with 25 holes) are connected
+ with "null printer" or "Turbo Laplink" cables which can transmit 4
+ bits at a time (mode 0) or with special PLIP cables, to be used on
+ bidirectional parallel ports only, which can transmit 8 bits at a
+ time (mode 1); you can find the wiring of these cables in
+ <file:Documentation/networking/PLIP.txt>. The cables can be up to
+ 15m long. Mode 0 works also if one of the machines runs DOS/Windows
+ and has some PLIP software installed, e.g. the Crynwr PLIP packet
+ driver (<http://oak.oakland.edu/simtel.net/msdos/pktdrvr-pre.html>)
+ and winsock or NCSA's telnet.
+
+ If you want to use PLIP, say Y and read the PLIP mini-HOWTO as well
+ as the NET-3-HOWTO, both available from
+ <http://www.tldp.org/docs.html#howto>. Note that the PLIP
+ protocol has been changed and this PLIP driver won't work together
+ with the PLIP support in Linux versions 1.0.x. This option enlarges
+ your kernel by about 8 KB.
+
+ To compile this driver as a module, choose M here. The module
+ will be called plip. If unsure, say Y or M, in case you buy
+ a laptop later.
diff --git a/drivers/net/plip/Makefile b/drivers/net/plip/Makefile
new file mode 100644
index 00000000000..ed958796dc6
--- /dev/null
+++ b/drivers/net/plip/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the PLIP network device drivers.
+#
+
+obj-$(CONFIG_PLIP) += plip.o
diff --git a/drivers/net/plip.c b/drivers/net/plip/plip.c
index a9e9ca8a86e..a9e9ca8a86e 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip/plip.c
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
new file mode 100644
index 00000000000..872df3ef07a
--- /dev/null
+++ b/drivers/net/ppp/Kconfig
@@ -0,0 +1,175 @@
+#
+# PPP network device configuration
+#
+
+config PPP
+ tristate "PPP (point-to-point protocol) support"
+ select SLHC
+ ---help---
+ PPP (Point to Point Protocol) is a newer and better SLIP. It serves
+ the same purpose: sending Internet traffic over telephone (and other
+ serial) lines. Ask your access provider if they support it, because
+ otherwise you can't use it; most Internet access providers these
+ days support PPP rather than SLIP.
+
+ To use PPP, you need an additional program called pppd as described
+ in the PPP-HOWTO, available at
+ <http://www.tldp.org/docs.html#howto>. Make sure that you have
+ the version of pppd recommended in <file:Documentation/Changes>.
+ The PPP option enlarges your kernel by about 16 KB.
+
+ There are actually two versions of PPP: the traditional PPP for
+ asynchronous lines, such as regular analog phone lines, and
+ synchronous PPP which can be used over digital ISDN lines for
+ example. If you want to use PPP over phone lines or other
+ asynchronous serial lines, you need to say Y (or M) here and also to
+ the next option, "PPP support for async serial ports". For PPP over
+ synchronous lines, you should say Y (or M) here and to "Support
+ synchronous PPP", below.
+
+ If you said Y to "Version information on all symbols" above, then
+ you cannot compile the PPP driver into the kernel; you can then only
+ compile it as a module. To compile this driver as a module, choose M
+ here. The module will be called ppp_generic.
+
+if PPP
+
+config PPP_BSDCOMP
+ tristate "PPP BSD-Compress compression"
+ depends on PPP
+ ---help---
+ Support for the BSD-Compress compression method for PPP, which uses
+ the LZW compression method to compress each PPP packet before it is
+ sent over the wire. The machine at the other end of the PPP link
+ (usually your ISP) has to support the BSD-Compress compression
+ method as well for this to be useful. Even if they don't support it,
+ it is safe to say Y here.
+
+ The PPP Deflate compression method ("PPP Deflate compression",
+ above) is preferable to BSD-Compress, because it compresses better
+ and is patent-free.
+
+ Note that the BSD compression code will always be compiled as a
+ module; it is called bsd_comp and will show up in the directory
+ modules once you have said "make modules". If unsure, say N.
+
+config PPP_DEFLATE
+ tristate "PPP Deflate compression"
+ depends on PPP
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ ---help---
+ Support for the Deflate compression method for PPP, which uses the
+ Deflate algorithm (the same algorithm that gzip uses) to compress
+ each PPP packet before it is sent over the wire. The machine at the
+ other end of the PPP link (usually your ISP) has to support the
+ Deflate compression method as well for this to be useful. Even if
+ they don't support it, it is safe to say Y here.
+
+ To compile this driver as a module, choose M here.
+
+config PPP_FILTER
+ bool "PPP filtering"
+ depends on PPP
+ ---help---
+ Say Y here if you want to be able to filter the packets passing over
+ PPP interfaces. This allows you to control which packets count as
+ activity (i.e. which packets will reset the idle timer or bring up
+ a demand-dialed link) and which packets are to be dropped entirely.
+ You need to say Y here if you wish to use the pass-filter and
+ active-filter options to pppd.
+
+ If unsure, say N.
+
+config PPP_MPPE
+ tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+ depends on PPP && EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_SHA1
+ select CRYPTO_ARC4
+ select CRYPTO_ECB
+ ---help---
+ Support for the MPPE Encryption protocol, as employed by the
+ Microsoft Point-to-Point Tunneling Protocol.
+
+ See http://pptpclient.sourceforge.net/ for information on
+ configuring PPTP clients and servers to utilize this method.
+
+config PPP_MULTILINK
+ bool "PPP multilink support (EXPERIMENTAL)"
+ depends on PPP && EXPERIMENTAL
+ ---help---
+ PPP multilink is a protocol (defined in RFC 1990) which allows you
+ to combine several (logical or physical) lines into one logical PPP
+ connection, so that you can utilize your full bandwidth.
+
+ This has to be supported at the other end as well and you need a
+ version of the pppd daemon which understands the multilink protocol.
+
+ If unsure, say N.
+
+config PPPOATM
+ tristate "PPP over ATM"
+ depends on ATM && PPP
+ ---help---
+ Support PPP (Point to Point Protocol) encapsulated in ATM frames.
+ This implementation does not yet comply with section 8 of RFC2364,
+ which can lead to bad results if the ATM peer loses state and
+ changes its encapsulation unilaterally.
+
+config PPPOE
+ tristate "PPP over Ethernet (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP
+ ---help---
+ Support for PPP over Ethernet.
+
+ This driver requires the latest version of pppd from the CVS
+ repository at cvs.samba.org. Alternatively, see the
+ RoaringPenguin package (<http://www.roaringpenguin.com/pppoe>)
+ which contains instruction on how to use this driver (under
+ the heading "Kernel mode PPPoE").
+
+config PPTP
+ tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
+ ---help---
+ Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
+
+ This driver requires pppd plugin to work in client mode or
+ modified pptpd (poptop) to work in server mode.
+ See http://accel-pptp.sourceforge.net/ for information how to
+ utilize this module.
+
+config PPPOL2TP
+ tristate "PPP over L2TP (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && L2TP && PPP
+ ---help---
+ Support for PPP-over-L2TP socket family. L2TP is a protocol
+ used by ISPs and enterprises to tunnel PPP traffic over UDP
+ tunnels. L2TP is replacing PPTP for VPN uses.
+
+config PPP_ASYNC
+ tristate "PPP support for async serial ports"
+ depends on PPP
+ select CRC_CCITT
+ ---help---
+ Say Y (or M) here if you want to be able to use PPP over standard
+ asynchronous serial ports, such as COM1 or COM2 on a PC. If you use
+ a modem (not a synchronous or ISDN modem) to contact your ISP, you
+ need this option.
+
+ To compile this driver as a module, choose M here.
+
+ If unsure, say Y.
+
+config PPP_SYNC_TTY
+ tristate "PPP support for sync tty ports"
+ depends on PPP
+ ---help---
+ Say Y (or M) here if you want to be able to use PPP over synchronous
+ (HDLC) tty devices, such as the SyncLink adapter. These devices
+ are often used for high-speed leased lines like T1/E1.
+
+ To compile this driver as a module, choose M here.
+
+endif # PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
new file mode 100644
index 00000000000..a6b6297b006
--- /dev/null
+++ b/drivers/net/ppp/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux PPP network device drivers.
+#
+
+obj-$(CONFIG_PPP) += ppp_generic.o
+obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
+obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
+obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
+obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
+obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
+obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o
+obj-$(CONFIG_PPTP) += pppox.o pptp.o
diff --git a/drivers/net/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index a9b759add18..a9b759add18 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp/ppp_async.c
index c6ba6438082..c6ba6438082 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 1dbdf82a6df..1dbdf82a6df 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index edfa15d2e79..edfa15d2e79 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 9a1849a83e2..9a1849a83e2 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
diff --git a/drivers/net/ppp_mppe.h b/drivers/net/ppp/ppp_mppe.h
index 7a14e058c66..7a14e058c66 100644
--- a/drivers/net/ppp_mppe.h
+++ b/drivers/net/ppp/ppp_mppe.h
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 736a39ee05b..736a39ee05b 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
diff --git a/drivers/net/pppoe.c b/drivers/net/ppp/pppoe.c
index bc9a4bb3198..bc9a4bb3198 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
diff --git a/drivers/net/pppox.c b/drivers/net/ppp/pppox.c
index 8c0d170dabc..8c0d170dabc 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/ppp/pppox.c
diff --git a/drivers/net/pptp.c b/drivers/net/ppp/pptp.c
index eae542a7e98..89f829f5f72 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
ip_send_check(iph);
ip_local_out(skb);
+ return 1;
tx_error:
+ kfree_skb(skb);
return 1;
}
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
}
header = (struct pptp_gre_header *)(skb->data);
+ headersize = sizeof(*header);
/* test if acknowledgement present */
if (PPTP_GRE_IS_A(header->ver)) {
- __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
- header->ack : header->seq; /* ack in different place if S = 0 */
+ __u32 ack;
+
+ if (!pskb_may_pull(skb, headersize))
+ goto drop;
+ header = (struct pptp_gre_header *)(skb->data);
+
+ /* ack in different place if S = 0 */
+ ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
ack = ntohl(ack);
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
/* also handle sequence number wrap-around */
if (WRAPPED(ack, opt->ack_recv))
opt->ack_recv = ack;
+ } else {
+ headersize -= sizeof(header->ack);
}
-
/* test if payload present */
if (!PPTP_GRE_IS_S(header->flags))
goto drop;
- headersize = sizeof(*header);
payload_len = ntohs(header->payload_len);
seq = ntohl(header->seq);
- /* no ack present? */
- if (!PPTP_GRE_IS_A(header->ver))
- headersize -= sizeof(header->ack);
/* check for incomplete packet (length smaller than expected) */
- if (skb->len - headersize < payload_len)
+ if (!pskb_may_pull(skb, headersize + payload_len))
goto drop;
payload = skb->data + headersize;
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
deleted file mode 100644
index a3d5bb9e39d..00000000000
--- a/drivers/net/sfc/Kconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-config SFC
- tristate "Solarflare SFC4000/SFC9000-family support"
- depends on PCI && INET
- select MDIO
- select CRC32
- select I2C
- select I2C_ALGOBIT
- help
- This driver supports 10-gigabit Ethernet cards based on
- the Solarflare SFC4000 and SFC9000-family controllers.
-
- To compile this driver as a module, choose M here. The module
- will be called sfc.
-config SFC_MTD
- bool "Solarflare SFC4000/SFC9000-family MTD support"
- depends on SFC && MTD && !(SFC=y && MTD=m)
- default y
- help
- This exposes the on-board flash memory as MTD devices (e.g.
- /dev/mtd1). This makes it possible to upload new firmware
- to the NIC.
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
deleted file mode 100644
index bc4643af6dd..00000000000
--- a/drivers/net/sfc/ethtool.c
+++ /dev/null
@@ -1,1012 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-#include <linux/in.h>
-#include "net_driver.h"
-#include "workarounds.h"
-#include "selftest.h"
-#include "efx.h"
-#include "filter.h"
-#include "nic.h"
-
-struct ethtool_string {
- char name[ETH_GSTRING_LEN];
-};
-
-struct efx_ethtool_stat {
- const char *name;
- enum {
- EFX_ETHTOOL_STAT_SOURCE_mac_stats,
- EFX_ETHTOOL_STAT_SOURCE_nic,
- EFX_ETHTOOL_STAT_SOURCE_channel,
- EFX_ETHTOOL_STAT_SOURCE_tx_queue
- } source;
- unsigned offset;
- u64(*get_stat) (void *field); /* Reader function */
-};
-
-/* Initialiser for a struct #efx_ethtool_stat with type-checking */
-#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
- get_stat_function) { \
- .name = #stat_name, \
- .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
- .offset = ((((field_type *) 0) == \
- &((struct efx_##source_name *)0)->field) ? \
- offsetof(struct efx_##source_name, field) : \
- offsetof(struct efx_##source_name, field)), \
- .get_stat = get_stat_function, \
-}
-
-static u64 efx_get_uint_stat(void *field)
-{
- return *(unsigned int *)field;
-}
-
-static u64 efx_get_ulong_stat(void *field)
-{
- return *(unsigned long *)field;
-}
-
-static u64 efx_get_u64_stat(void *field)
-{
- return *(u64 *) field;
-}
-
-static u64 efx_get_atomic_stat(void *field)
-{
- return atomic_read((atomic_t *) field);
-}
-
-#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- unsigned long, efx_get_ulong_stat)
-
-#define EFX_ETHTOOL_U64_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- u64, efx_get_u64_stat)
-
-#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
- EFX_ETHTOOL_STAT(name, nic, n_##name, \
- unsigned int, efx_get_uint_stat)
-
-#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
- EFX_ETHTOOL_STAT(field, nic, field, \
- atomic_t, efx_get_atomic_stat)
-
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
- EFX_ETHTOOL_STAT(field, channel, n_##field, \
- unsigned int, efx_get_uint_stat)
-
-#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
- EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
- unsigned int, efx_get_uint_stat)
-
-static struct efx_ethtool_stat efx_ethtool_stats[] = {
- EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
- EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
- EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
-};
-
-/* Number of ethtool statistics */
-#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
-
-#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
-
-/**************************************************************************
- *
- * Ethtool operations
- *
- **************************************************************************
- */
-
-/* Identify device by flashing LEDs */
-static int efx_ethtool_phys_id(struct net_device *net_dev,
- enum ethtool_phys_id_state state)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- enum efx_led_mode mode = EFX_LED_DEFAULT;
-
- switch (state) {
- case ETHTOOL_ID_ON:
- mode = EFX_LED_ON;
- break;
- case ETHTOOL_ID_OFF:
- mode = EFX_LED_OFF;
- break;
- case ETHTOOL_ID_INACTIVE:
- mode = EFX_LED_DEFAULT;
- break;
- case ETHTOOL_ID_ACTIVE:
- return 1; /* cycle on/off once per second */
- }
-
- efx->type->set_id_led(efx, mode);
- return 0;
-}
-
-/* This must be called with rtnl_lock held. */
-static int efx_ethtool_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_link_state *link_state = &efx->link_state;
-
- mutex_lock(&efx->mac_lock);
- efx->phy_op->get_settings(efx, ecmd);
- mutex_unlock(&efx->mac_lock);
-
- /* GMAC does not support 1000Mbps HD */
- ecmd->supported &= ~SUPPORTED_1000baseT_Half;
- /* Both MACs support pause frames (bidirectional and respond-only) */
- ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- if (LOOPBACK_INTERNAL(efx)) {
- ethtool_cmd_speed_set(ecmd, link_state->speed);
- ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
- }
-
- return 0;
-}
-
-/* This must be called with rtnl_lock held. */
-static int efx_ethtool_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- /* GMAC does not support 1000Mbps HD */
- if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
- (ecmd->duplex != DUPLEX_FULL)) {
- netif_dbg(efx, drv, efx->net_dev,
- "rejecting unsupported 1000Mbps HD setting\n");
- return -EINVAL;
- }
-
- mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_settings(efx, ecmd);
- mutex_unlock(&efx->mac_lock);
- return rc;
-}
-
-static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
- struct ethtool_drvinfo *info)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_mcdi_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
-}
-
-static int efx_ethtool_get_regs_len(struct net_device *net_dev)
-{
- return efx_nic_get_regs_len(netdev_priv(net_dev));
-}
-
-static void efx_ethtool_get_regs(struct net_device *net_dev,
- struct ethtool_regs *regs, void *buf)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- regs->version = efx->type->revision;
- efx_nic_get_regs(efx, buf);
-}
-
-static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- return efx->msg_enable;
-}
-
-static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- efx->msg_enable = msg_enable;
-}
-
-/**
- * efx_fill_test - fill in an individual self-test entry
- * @test_index: Index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- * @test: Pointer to test result (used only if data != %NULL)
- * @unit_format: Unit name format (e.g. "chan\%d")
- * @unit_id: Unit id (e.g. 0 for "chan0")
- * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
- * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
- *
- * Fill in an individual self-test entry.
- */
-static void efx_fill_test(unsigned int test_index,
- struct ethtool_string *strings, u64 *data,
- int *test, const char *unit_format, int unit_id,
- const char *test_format, const char *test_id)
-{
- struct ethtool_string unit_str, test_str;
-
- /* Fill data value, if applicable */
- if (data)
- data[test_index] = *test;
-
- /* Fill string, if applicable */
- if (strings) {
- if (strchr(unit_format, '%'))
- snprintf(unit_str.name, sizeof(unit_str.name),
- unit_format, unit_id);
- else
- strcpy(unit_str.name, unit_format);
- snprintf(test_str.name, sizeof(test_str.name),
- test_format, test_id);
- snprintf(strings[test_index].name,
- sizeof(strings[test_index].name),
- "%-6s %-24s", unit_str.name, test_str.name);
- }
-}
-
-#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
-#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
-#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
-#define EFX_LOOPBACK_NAME(_mode, _counter) \
- "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
-
-/**
- * efx_fill_loopback_test - fill in a block of loopback self-test entries
- * @efx: Efx NIC
- * @lb_tests: Efx loopback self-test results structure
- * @mode: Loopback test mode
- * @test_index: Starting index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- */
-static int efx_fill_loopback_test(struct efx_nic *efx,
- struct efx_loopback_self_tests *lb_tests,
- enum efx_loopback_mode mode,
- unsigned int test_index,
- struct ethtool_string *strings, u64 *data)
-{
- struct efx_channel *channel = efx_get_channel(efx, 0);
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_sent[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_sent"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_done[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_done"));
- }
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_good,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_good"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_bad,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_bad"));
-
- return test_index;
-}
-
-/**
- * efx_ethtool_fill_self_tests - get self-test details
- * @efx: Efx NIC
- * @tests: Efx self-test results structure, or %NULL
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- */
-static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
- struct efx_self_tests *tests,
- struct ethtool_string *strings,
- u64 *data)
-{
- struct efx_channel *channel;
- unsigned int n = 0, i;
- enum efx_loopback_mode mode;
-
- efx_fill_test(n++, strings, data, &tests->phy_alive,
- "phy", 0, "alive", NULL);
- efx_fill_test(n++, strings, data, &tests->nvram,
- "core", 0, "nvram", NULL);
- efx_fill_test(n++, strings, data, &tests->interrupt,
- "core", 0, "interrupt", NULL);
-
- /* Event queues */
- efx_for_each_channel(channel, efx) {
- efx_fill_test(n++, strings, data,
- &tests->eventq_dma[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.dma", NULL);
- efx_fill_test(n++, strings, data,
- &tests->eventq_int[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.int", NULL);
- efx_fill_test(n++, strings, data,
- &tests->eventq_poll[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.poll", NULL);
- }
-
- efx_fill_test(n++, strings, data, &tests->registers,
- "core", 0, "registers", NULL);
-
- if (efx->phy_op->run_tests != NULL) {
- EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
-
- for (i = 0; true; ++i) {
- const char *name;
-
- EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
- name = efx->phy_op->test_name(efx, i);
- if (name == NULL)
- break;
-
- efx_fill_test(n++, strings, data, &tests->phy_ext[i],
- "phy", 0, name, NULL);
- }
- }
-
- /* Loopback tests */
- for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
- if (!(efx->loopback_modes & (1 << mode)))
- continue;
- n = efx_fill_loopback_test(efx,
- &tests->loopback[mode], mode, n,
- strings, data);
- }
-
- return n;
-}
-
-static int efx_ethtool_get_sset_count(struct net_device *net_dev,
- int string_set)
-{
- switch (string_set) {
- case ETH_SS_STATS:
- return EFX_ETHTOOL_NUM_STATS;
- case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
- NULL, NULL, NULL);
- default:
- return -EINVAL;
- }
-}
-
-static void efx_ethtool_get_strings(struct net_device *net_dev,
- u32 string_set, u8 *strings)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_string *ethtool_strings =
- (struct ethtool_string *)strings;
- int i;
-
- switch (string_set) {
- case ETH_SS_STATS:
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strncpy(ethtool_strings[i].name,
- efx_ethtool_stats[i].name,
- sizeof(ethtool_strings[i].name));
- break;
- case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL,
- ethtool_strings, NULL);
- break;
- default:
- /* No other string sets */
- break;
- }
-}
-
-static void efx_ethtool_get_stats(struct net_device *net_dev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
- struct efx_ethtool_stat *stat;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct rtnl_link_stats64 temp;
- int i;
-
- EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
-
- /* Update MAC and NIC statistics */
- dev_get_stats(net_dev, &temp);
-
- /* Fill detailed statistics buffer */
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
- stat = &efx_ethtool_stats[i];
- switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
- data[i] = stat->get_stat((void *)mac_stats +
- stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_nic:
- data[i] = stat->get_stat((void *)efx + stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_channel:
- data[i] = 0;
- efx_for_each_channel(channel, efx)
- data[i] += stat->get_stat((void *)channel +
- stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
- data[i] = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- data[i] +=
- stat->get_stat((void *)tx_queue
- + stat->offset);
- }
- break;
- }
- }
-}
-
-static void efx_ethtool_self_test(struct net_device *net_dev,
- struct ethtool_test *test, u64 *data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_self_tests *efx_tests;
- int already_up;
- int rc = -ENOMEM;
-
- efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
- if (!efx_tests)
- goto fail;
-
-
- ASSERT_RTNL();
- if (efx->state != STATE_RUNNING) {
- rc = -EIO;
- goto fail1;
- }
-
- netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
- (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-
- /* We need rx buffers and interrupts. */
- already_up = (efx->net_dev->flags & IFF_UP);
- if (!already_up) {
- rc = dev_open(efx->net_dev);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed opening device.\n");
- goto fail1;
- }
- }
-
- rc = efx_selftest(efx, efx_tests, test->flags);
-
- if (!already_up)
- dev_close(efx->net_dev);
-
- netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
- rc == 0 ? "passed" : "failed",
- (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-
-fail1:
- /* Fill ethtool results structures */
- efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
- kfree(efx_tests);
-fail:
- if (rc)
- test->flags |= ETH_TEST_FL_FAILED;
-}
-
-/* Restart autonegotiation */
-static int efx_ethtool_nway_reset(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- return mdio45_nway_restart(&efx->mdio);
-}
-
-static int efx_ethtool_get_coalesce(struct net_device *net_dev,
- struct ethtool_coalesce *coalesce)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
-
- memset(coalesce, 0, sizeof(*coalesce));
-
- /* Find lowest IRQ moderation across all used TX queues */
- coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
- efx_for_each_channel(channel, efx) {
- if (!efx_channel_has_tx_queues(channel))
- continue;
- if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
- if (channel->channel < efx->n_rx_channels)
- coalesce->tx_coalesce_usecs_irq =
- channel->irq_moderation;
- else
- coalesce->tx_coalesce_usecs_irq = 0;
- }
- }
-
- coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
- coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
-
- coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
- coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
-
- return 0;
-}
-
-/* Set coalescing parameters
- * The difficulties occur for shared channels
- */
-static int efx_ethtool_set_coalesce(struct net_device *net_dev,
- struct ethtool_coalesce *coalesce)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
- unsigned tx_usecs, rx_usecs, adaptive;
-
- if (coalesce->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
-
- if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
- netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. "
- "Only rx/tx_coalesce_usecs_irq are supported\n");
- return -EOPNOTSUPP;
- }
-
- rx_usecs = coalesce->rx_coalesce_usecs_irq;
- tx_usecs = coalesce->tx_coalesce_usecs_irq;
- adaptive = coalesce->use_adaptive_rx_coalesce;
-
- /* If the channel is shared only allow RX parameters to be set */
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel) &&
- efx_channel_has_tx_queues(channel) &&
- tx_usecs) {
- netif_err(efx, drv, efx->net_dev, "Channel is shared. "
- "Only RX coalescing may be set\n");
- return -EOPNOTSUPP;
- }
- }
-
- efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
- efx_for_each_channel(channel, efx)
- efx->type->push_irq_moderation(channel);
-
- return 0;
-}
-
-static void efx_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = efx->rxq_entries;
- ring->tx_pending = efx->txq_entries;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-
-static int efx_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
- ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
- return -EINVAL;
-
- if (ring->rx_pending < EFX_MIN_RING_SIZE ||
- ring->tx_pending < EFX_MIN_RING_SIZE) {
- netif_err(efx, drv, efx->net_dev,
- "TX and RX queues cannot be smaller than %ld\n",
- EFX_MIN_RING_SIZE);
- return -EINVAL;
- }
-
- return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
-}
-
-static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
- struct ethtool_pauseparam *pause)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- u8 wanted_fc, old_fc;
- u32 old_adv;
- bool reset;
- int rc = 0;
-
- mutex_lock(&efx->mac_lock);
-
- wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
- (pause->tx_pause ? EFX_FC_TX : 0) |
- (pause->autoneg ? EFX_FC_AUTO : 0));
-
- if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
- netif_dbg(efx, drv, efx->net_dev,
- "Flow control unsupported: tx ON rx OFF\n");
- rc = -EINVAL;
- goto out;
- }
-
- if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
- netif_dbg(efx, drv, efx->net_dev,
- "Autonegotiation is disabled\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* TX flow control may automatically turn itself off if the
- * link partner (intermittently) stops responding to pause
- * frames. There isn't any indication that this has happened,
- * so the best we do is leave it up to the user to spot this
- * and fix it be cycling transmit flow control on this end. */
- reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
- if (EFX_WORKAROUND_11482(efx) && reset) {
- if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
- /* Recover by resetting the EM block */
- falcon_stop_nic_stats(efx);
- falcon_drain_tx_fifo(efx);
- efx->mac_op->reconfigure(efx);
- falcon_start_nic_stats(efx);
- } else {
- /* Schedule a reset to recover */
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
- }
- }
-
- old_adv = efx->link_advertising;
- old_fc = efx->wanted_fc;
- efx_link_set_wanted_fc(efx, wanted_fc);
- if (efx->link_advertising != old_adv ||
- (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
- rc = efx->phy_op->reconfigure(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "Unable to advertise requested flow "
- "control setting\n");
- goto out;
- }
- }
-
- /* Reconfigure the MAC. The PHY *may* generate a link state change event
- * if the user just changed the advertised capabilities, but there's no
- * harm doing this twice */
- efx->mac_op->reconfigure(efx);
-
-out:
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
- struct ethtool_pauseparam *pause)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
- pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
- pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
-}
-
-
-static void efx_ethtool_get_wol(struct net_device *net_dev,
- struct ethtool_wolinfo *wol)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- return efx->type->get_wol(efx, wol);
-}
-
-
-static int efx_ethtool_set_wol(struct net_device *net_dev,
- struct ethtool_wolinfo *wol)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- return efx->type->set_wol(efx, wol->wolopts);
-}
-
-static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- rc = efx->type->map_reset_flags(flags);
- if (rc < 0)
- return rc;
-
- return efx_reset(efx, rc);
-}
-
-static int
-efx_ethtool_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *info, void *rules __always_unused)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
- case ETHTOOL_GRXFH: {
- unsigned min_revision = 0;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EFX_REV_FALCON_B0;
- break;
- case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
- case UDP_V6_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EFX_REV_SIENA_A0;
- break;
- default:
- break;
- }
- if (efx_nic_rev(efx) < min_revision)
- info->data = 0;
- return 0;
- }
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
- struct ethtool_rx_ntuple *ntuple)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
- struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
- struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
- struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
- struct efx_filter_spec filter;
- int rc;
-
- /* Range-check action */
- if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
- ntuple->fs.action >= (s32)efx->n_rx_channels)
- return -EINVAL;
-
- if (~ntuple->fs.data_mask)
- return -EINVAL;
-
- efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
- (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
- 0xfff : ntuple->fs.action);
-
- switch (ntuple->fs.flow_type) {
- case TCP_V4_FLOW:
- case UDP_V4_FLOW: {
- u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
-
- /* Must match all of destination, */
- if (ip_mask->ip4dst | ip_mask->pdst)
- return -EINVAL;
- /* all or none of source, */
- if ((ip_mask->ip4src | ip_mask->psrc) &&
- ((__force u32)~ip_mask->ip4src |
- (__force u16)~ip_mask->psrc))
- return -EINVAL;
- /* and nothing else */
- if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
- return -EINVAL;
-
- if (!ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&filter, proto,
- ip_entry->ip4dst,
- ip_entry->pdst,
- ip_entry->ip4src,
- ip_entry->psrc);
- else
- rc = efx_filter_set_ipv4_local(&filter, proto,
- ip_entry->ip4dst,
- ip_entry->pdst);
- if (rc)
- return rc;
- break;
- }
-
- case ETHER_FLOW:
- /* Must match all of destination, */
- if (!is_zero_ether_addr(mac_mask->h_dest))
- return -EINVAL;
- /* all or none of VID, */
- if (ntuple->fs.vlan_tag_mask != 0xf000 &&
- ntuple->fs.vlan_tag_mask != 0xffff)
- return -EINVAL;
- /* and nothing else */
- if (!is_broadcast_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto != htons(0xffff))
- return -EINVAL;
-
- rc = efx_filter_set_eth_local(
- &filter,
- (ntuple->fs.vlan_tag_mask == 0xf000) ?
- ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
- mac_entry->h_dest);
- if (rc)
- return rc;
- break;
-
- default:
- return -EINVAL;
- }
-
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
- return efx_filter_remove_filter(efx, &filter);
-
- rc = efx_filter_insert_filter(efx, &filter, true);
- return rc < 0 ? rc : 0;
-}
-
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
- struct ethtool_rxfh_indir *indir)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- size_t copy_size =
- min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
-
- indir->size = ARRAY_SIZE(efx->rx_indir_table);
- memcpy(indir->ring_index, efx->rx_indir_table,
- copy_size * sizeof(indir->ring_index[0]));
- return 0;
-}
-
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const struct ethtool_rxfh_indir *indir)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- size_t i;
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
-
- /* Validate size and indices */
- if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- if (indir->ring_index[i] >= efx->n_rx_channels)
- return -EINVAL;
-
- memcpy(efx->rx_indir_table, indir->ring_index,
- sizeof(efx->rx_indir_table));
- efx_nic_push_rx_indir_table(efx);
- return 0;
-}
-
-const struct ethtool_ops efx_ethtool_ops = {
- .get_settings = efx_ethtool_get_settings,
- .set_settings = efx_ethtool_set_settings,
- .get_drvinfo = efx_ethtool_get_drvinfo,
- .get_regs_len = efx_ethtool_get_regs_len,
- .get_regs = efx_ethtool_get_regs,
- .get_msglevel = efx_ethtool_get_msglevel,
- .set_msglevel = efx_ethtool_set_msglevel,
- .nway_reset = efx_ethtool_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_coalesce = efx_ethtool_get_coalesce,
- .set_coalesce = efx_ethtool_set_coalesce,
- .get_ringparam = efx_ethtool_get_ringparam,
- .set_ringparam = efx_ethtool_set_ringparam,
- .get_pauseparam = efx_ethtool_get_pauseparam,
- .set_pauseparam = efx_ethtool_set_pauseparam,
- .get_sset_count = efx_ethtool_get_sset_count,
- .self_test = efx_ethtool_self_test,
- .get_strings = efx_ethtool_get_strings,
- .set_phys_id = efx_ethtool_phys_id,
- .get_ethtool_stats = efx_ethtool_get_stats,
- .get_wol = efx_ethtool_get_wol,
- .set_wol = efx_ethtool_set_wol,
- .reset = efx_ethtool_reset,
- .get_rxnfc = efx_ethtool_get_rxnfc,
- .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
- .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
- .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
-};
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
deleted file mode 100644
index c3048a6ba67..00000000000
--- a/drivers/net/sh_eth.h
+++ /dev/null
@@ -1,837 +0,0 @@
-/*
- * SuperH Ethernet device driver
- *
- * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2011 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- */
-
-#ifndef __SH_ETH_H__
-#define __SH_ETH_H__
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-
-#include <asm/sh_eth.h>
-
-#define CARDNAME "sh-eth"
-#define TX_TIMEOUT (5*HZ)
-#define TX_RING_SIZE 64 /* Tx ring size */
-#define RX_RING_SIZE 64 /* Rx ring size */
-#define ETHERSMALL 60
-#define PKT_BUF_SZ 1538
-
-enum {
- /* E-DMAC registers */
- EDSR = 0,
- EDMR,
- EDTRR,
- EDRRR,
- EESR,
- EESIPR,
- TDLAR,
- TDFAR,
- TDFXR,
- TDFFR,
- RDLAR,
- RDFAR,
- RDFXR,
- RDFFR,
- TRSCER,
- RMFCR,
- TFTR,
- FDR,
- RMCR,
- EDOCR,
- TFUCR,
- RFOCR,
- FCFTR,
- RPADIR,
- TRIMD,
- RBWAR,
- TBRAR,
-
- /* Ether registers */
- ECMR,
- ECSR,
- ECSIPR,
- PIR,
- PSR,
- RDMLR,
- PIPR,
- RFLR,
- IPGR,
- APR,
- MPR,
- PFTCR,
- PFRCR,
- RFCR,
- RFCF,
- TPAUSER,
- TPAUSECR,
- BCFR,
- BCFRR,
- GECMR,
- BCULR,
- MAHR,
- MALR,
- TROCR,
- CDCR,
- LCCR,
- CNDCR,
- CEFCR,
- FRECR,
- TSFRCR,
- TLFRCR,
- CERCR,
- CEECR,
- MAFCR,
- RTRATE,
-
- /* TSU Absolute address */
- ARSTR,
- TSU_CTRST,
- TSU_FWEN0,
- TSU_FWEN1,
- TSU_FCM,
- TSU_BSYSL0,
- TSU_BSYSL1,
- TSU_PRISL0,
- TSU_PRISL1,
- TSU_FWSL0,
- TSU_FWSL1,
- TSU_FWSLC,
- TSU_QTAG0,
- TSU_QTAG1,
- TSU_QTAGM0,
- TSU_QTAGM1,
- TSU_FWSR,
- TSU_FWINMK,
- TSU_ADQT0,
- TSU_ADQT1,
- TSU_VTAG0,
- TSU_VTAG1,
- TSU_ADSBSY,
- TSU_TEN,
- TSU_POST1,
- TSU_POST2,
- TSU_POST3,
- TSU_POST4,
- TSU_ADRH0,
- TSU_ADRL0,
- TSU_ADRH31,
- TSU_ADRL31,
-
- TXNLCR0,
- TXALCR0,
- RXNLCR0,
- RXALCR0,
- FWNLCR0,
- FWALCR0,
- TXNLCR1,
- TXALCR1,
- RXNLCR1,
- RXALCR1,
- FWNLCR1,
- FWALCR1,
-
- /* This value must be written at last. */
- SH_ETH_MAX_REGISTER_OFFSET,
-};
-
-static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
-
- [ECMR] = 0x0500,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [PSR] = 0x0528,
- [PIPR] = 0x052c,
- [RFLR] = 0x0508,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [GECMR] = 0x05b0,
- [BCULR] = 0x05b4,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [TROCR] = 0x0700,
- [CDCR] = 0x0708,
- [LCCR] = 0x0710,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [CERCR] = 0x0768,
- [CEECR] = 0x0770,
- [MAFCR] = 0x0778,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWEN0] = 0x0010,
- [TSU_FWEN1] = 0x0014,
- [TSU_FCM] = 0x0018,
- [TSU_BSYSL0] = 0x0020,
- [TSU_BSYSL1] = 0x0024,
- [TSU_PRISL0] = 0x0028,
- [TSU_PRISL1] = 0x002c,
- [TSU_FWSL0] = 0x0030,
- [TSU_FWSL1] = 0x0034,
- [TSU_FWSLC] = 0x0038,
- [TSU_QTAG0] = 0x0040,
- [TSU_QTAG1] = 0x0044,
- [TSU_FWSR] = 0x0050,
- [TSU_FWINMK] = 0x0054,
- [TSU_ADQT0] = 0x0048,
- [TSU_ADQT1] = 0x004c,
- [TSU_VTAG0] = 0x0058,
- [TSU_VTAG1] = 0x005c,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRH31] = 0x01f8,
- [TSU_ADRL31] = 0x01fc,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008c,
- [FWNLCR0] = 0x0090,
- [FWALCR0] = 0x0094,
- [TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
- [RXNLCR1] = 0x00a8,
- [RXALCR1] = 0x00ac,
- [FWNLCR1] = 0x00b0,
- [FWALCR1] = 0x00b4,
-};
-
-static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
- [ECMR] = 0x0100,
- [RFLR] = 0x0108,
- [ECSR] = 0x0110,
- [ECSIPR] = 0x0118,
- [PIR] = 0x0120,
- [PSR] = 0x0128,
- [RDMLR] = 0x0140,
- [IPGR] = 0x0150,
- [APR] = 0x0154,
- [MPR] = 0x0158,
- [TPAUSER] = 0x0164,
- [RFCF] = 0x0160,
- [TPAUSECR] = 0x0168,
- [BCFRR] = 0x016c,
- [MAHR] = 0x01c0,
- [MALR] = 0x01c8,
- [TROCR] = 0x01d0,
- [CDCR] = 0x01d4,
- [LCCR] = 0x01d8,
- [CNDCR] = 0x01dc,
- [CEFCR] = 0x01e4,
- [FRECR] = 0x01e8,
- [TSFRCR] = 0x01ec,
- [TLFRCR] = 0x01f0,
- [RFCR] = 0x01f4,
- [MAFCR] = 0x01f8,
- [RTRATE] = 0x01fc,
-
- [EDMR] = 0x0000,
- [EDTRR] = 0x0008,
- [EDRRR] = 0x0010,
- [TDLAR] = 0x0018,
- [RDLAR] = 0x0020,
- [EESR] = 0x0028,
- [EESIPR] = 0x0030,
- [TRSCER] = 0x0038,
- [RMFCR] = 0x0040,
- [TFTR] = 0x0048,
- [FDR] = 0x0050,
- [RMCR] = 0x0058,
- [TFUCR] = 0x0064,
- [RFOCR] = 0x0068,
- [FCFTR] = 0x0070,
- [RPADIR] = 0x0078,
- [TRIMD] = 0x007c,
- [RBWAR] = 0x00c8,
- [RDFAR] = 0x00cc,
- [TBRAR] = 0x00d4,
- [TDFAR] = 0x00d8,
-};
-
-static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
- [ECMR] = 0x0160,
- [ECSR] = 0x0164,
- [ECSIPR] = 0x0168,
- [PIR] = 0x016c,
- [MAHR] = 0x0170,
- [MALR] = 0x0174,
- [RFLR] = 0x0178,
- [PSR] = 0x017c,
- [TROCR] = 0x0180,
- [CDCR] = 0x0184,
- [LCCR] = 0x0188,
- [CNDCR] = 0x018c,
- [CEFCR] = 0x0194,
- [FRECR] = 0x0198,
- [TSFRCR] = 0x019c,
- [TLFRCR] = 0x01a0,
- [RFCR] = 0x01a4,
- [MAFCR] = 0x01a8,
- [IPGR] = 0x01b4,
- [APR] = 0x01b8,
- [MPR] = 0x01bc,
- [TPAUSER] = 0x01c4,
- [BCFR] = 0x01cc,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWEN0] = 0x0010,
- [TSU_FWEN1] = 0x0014,
- [TSU_FCM] = 0x0018,
- [TSU_BSYSL0] = 0x0020,
- [TSU_BSYSL1] = 0x0024,
- [TSU_PRISL0] = 0x0028,
- [TSU_PRISL1] = 0x002c,
- [TSU_FWSL0] = 0x0030,
- [TSU_FWSL1] = 0x0034,
- [TSU_FWSLC] = 0x0038,
- [TSU_QTAGM0] = 0x0040,
- [TSU_QTAGM1] = 0x0044,
- [TSU_ADQT0] = 0x0048,
- [TSU_ADQT1] = 0x004c,
- [TSU_FWSR] = 0x0050,
- [TSU_FWINMK] = 0x0054,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008c,
- [FWNLCR0] = 0x0090,
- [FWALCR0] = 0x0094,
- [TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
- [RXNLCR1] = 0x00a8,
- [RXALCR1] = 0x00ac,
- [FWNLCR1] = 0x00b0,
- [FWALCR1] = 0x00b4,
-
- [TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRL31] = 0x01fc,
-
-};
-
-/* Driver's parameters */
-#if defined(CONFIG_CPU_SH4)
-#define SH4_SKB_RX_ALIGN 32
-#else
-#define SH2_SH3_SKB_RX_ALIGN 2
-#endif
-
-/*
- * Register's bits
- */
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-/* EDSR */
-enum EDSR_BIT {
- EDSR_ENT = 0x01, EDSR_ENR = 0x02,
-};
-#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
-
-/* GECMR */
-enum GECMR_BIT {
- GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
-};
-#endif
-
-/* EDMR */
-enum DMAC_M_BIT {
- EDMR_EL = 0x40, /* Litte endian */
- EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
- EDMR_SRST_GETHER = 0x03,
- EDMR_SRST_ETHER = 0x01,
-};
-
-/* EDTRR */
-enum DMAC_T_BIT {
- EDTRR_TRNS_GETHER = 0x03,
- EDTRR_TRNS_ETHER = 0x01,
-};
-
-/* EDRRR*/
-enum EDRRR_R_BIT {
- EDRRR_R = 0x01,
-};
-
-/* TPAUSER */
-enum TPAUSER_BIT {
- TPAUSER_TPAUSE = 0x0000ffff,
- TPAUSER_UNLIMITED = 0,
-};
-
-/* BCFR */
-enum BCFR_BIT {
- BCFR_RPAUSE = 0x0000ffff,
- BCFR_UNLIMITED = 0,
-};
-
-/* PIR */
-enum PIR_BIT {
- PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01,
-};
-
-/* PSR */
-enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
-
-/* EESR */
-enum EESR_BIT {
- EESR_TWB1 = 0x80000000,
- EESR_TWB = 0x40000000, /* same as TWB0 */
- EESR_TC1 = 0x20000000,
- EESR_TUC = 0x10000000,
- EESR_ROC = 0x08000000,
- EESR_TABT = 0x04000000,
- EESR_RABT = 0x02000000,
- EESR_RFRMER = 0x01000000, /* same as RFCOF */
- EESR_ADE = 0x00800000,
- EESR_ECI = 0x00400000,
- EESR_FTC = 0x00200000, /* same as TC or TC0 */
- EESR_TDE = 0x00100000,
- EESR_TFE = 0x00080000, /* same as TFUF */
- EESR_FRC = 0x00040000, /* same as FR */
- EESR_RDE = 0x00020000,
- EESR_RFE = 0x00010000,
- EESR_CND = 0x00000800,
- EESR_DLC = 0x00000400,
- EESR_CD = 0x00000200,
- EESR_RTO = 0x00000100,
- EESR_RMAF = 0x00000080,
- EESR_CEEF = 0x00000040,
- EESR_CELF = 0x00000020,
- EESR_RRF = 0x00000010,
- EESR_RTLF = 0x00000008,
- EESR_RTSF = 0x00000004,
- EESR_PRE = 0x00000002,
- EESR_CERF = 0x00000001,
-};
-
-#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
- EESR_RTO)
-#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
- EESR_RDE | EESR_RFRMER | EESR_ADE | \
- EESR_TFE | EESR_TDE | EESR_ECI)
-#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
- EESR_TFE)
-
-/* EESIPR */
-enum DMAC_IM_BIT {
- DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
- DMAC_M_RABT = 0x02000000,
- DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
- DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
- DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
- DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
- DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
- DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
- DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
- DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
- DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
- DMAC_M_RINT1 = 0x00000001,
-};
-
-/* Receive descriptor bit */
-enum RD_STS_BIT {
- RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
- RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
- RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
- RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
- RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
- RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
- RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
- RD_RFS1 = 0x00000001,
-};
-#define RDF1ST RD_RFP1
-#define RDFEND RD_RFP0
-#define RD_RFP (RD_RFP1|RD_RFP0)
-
-/* FCFTR */
-enum FCFTR_BIT {
- FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
- FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004,
- FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
-};
-#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
-#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
-
-/* Transfer descriptor bit */
-enum TD_STS_BIT {
- TD_TACT = 0x80000000,
- TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
- TD_TFP0 = 0x10000000,
-};
-#define TDF1ST TD_TFP1
-#define TDFEND TD_TFP0
-#define TD_TFP (TD_TFP1|TD_TFP0)
-
-/* RMCR */
-#define DEFAULT_RMCR_VALUE 0x00000000
-
-/* ECMR */
-enum FELIC_MODE_BIT {
- ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
- ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
- ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
- ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
- ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
- ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
- ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
-};
-
-/* ECSR */
-enum ECSR_STATUS_BIT {
- ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
- ECSR_LCHNG = 0x04,
- ECSR_MPD = 0x02, ECSR_ICD = 0x01,
-};
-
-#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \
- ECSR_ICD | ECSIPR_MPDIP)
-
-/* ECSIPR */
-enum ECSIPR_STATUS_MASK_BIT {
- ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
- ECSIPR_LCHNGIP = 0x04,
- ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
-};
-
-#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \
- ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
-
-/* APR */
-enum APR_BIT {
- APR_AP = 0x00000001,
-};
-
-/* MPR */
-enum MPR_BIT {
- MPR_MP = 0x00000001,
-};
-
-/* TRSCER */
-enum DESC_I_BIT {
- DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
- DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
- DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
- DESC_I_RINT1 = 0x0001,
-};
-
-/* RPADIR */
-enum RPADIR_BIT {
- RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
- RPADIR_PADR = 0x0003f,
-};
-
-/* RFLR */
-#define RFLR_VALUE 0x1000
-
-/* FDR */
-#define DEFAULT_FDR_INIT 0x00000707
-
-enum phy_offsets {
- PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
- PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
- PHY_16 = 16,
-};
-
-/* PHY_CTRL */
-enum PHY_CTRL_BIT {
- PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
- PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
- PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
-};
-#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
-
-/* PHY_STAT */
-enum PHY_STAT_BIT {
- PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
- PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
- PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
- PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
-};
-
-/* PHY_ANA */
-enum PHY_ANA_BIT {
- PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
- PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
- PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
- PHY_A_SEL = 0x001e,
-};
-/* PHY_ANL */
-enum PHY_ANL_BIT {
- PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
- PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
- PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
- PHY_L_SEL = 0x001f,
-};
-
-/* PHY_ANE */
-enum PHY_ANE_BIT {
- PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
- PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
-};
-
-/* DM9161 */
-enum PHY_16_BIT {
- PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
- PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
- PHY_16_TXselect = 0x0400,
- PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
- PHY_16_Force100LNK = 0x0080,
- PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
- PHY_16_RPDCTR_EN = 0x0010,
- PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
- PHY_16_Sleepmode = 0x0002,
- PHY_16_RemoteLoopOut = 0x0001,
-};
-
-#define POST_RX 0x08
-#define POST_FW 0x04
-#define POST0_RX (POST_RX)
-#define POST0_FW (POST_FW)
-#define POST1_RX (POST_RX >> 2)
-#define POST1_FW (POST_FW >> 2)
-#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
-
-/* ARSTR */
-enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
-
-/* TSU_FWEN0 */
-enum TSU_FWEN0_BIT {
- TSU_FWEN0_0 = 0x00000001,
-};
-
-/* TSU_ADSBSY */
-enum TSU_ADSBSY_BIT {
- TSU_ADSBSY_0 = 0x00000001,
-};
-
-/* TSU_TEN */
-enum TSU_TEN_BIT {
- TSU_TEN_0 = 0x80000000,
-};
-
-/* TSU_FWSL0 */
-enum TSU_FWSL0_BIT {
- TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800,
- TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200,
- TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010,
-};
-
-/* TSU_FWSLC */
-enum TSU_FWSLC_BIT {
- TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000,
- TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040,
- TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010,
- TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004,
- TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001,
-};
-
-/*
- * The sh ether Tx buffer descriptors.
- * This structure should be 20 bytes.
- */
-struct sh_eth_txdesc {
- u32 status; /* TD0 */
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- u16 pad0; /* TD1 */
- u16 buffer_length; /* TD1 */
-#else
- u16 buffer_length; /* TD1 */
- u16 pad0; /* TD1 */
-#endif
- u32 addr; /* TD2 */
- u32 pad1; /* padding data */
-} __attribute__((aligned(2), packed));
-
-/*
- * The sh ether Rx buffer descriptors.
- * This structure should be 20 bytes.
- */
-struct sh_eth_rxdesc {
- u32 status; /* RD0 */
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- u16 frame_length; /* RD1 */
- u16 buffer_length; /* RD1 */
-#else
- u16 buffer_length; /* RD1 */
- u16 frame_length; /* RD1 */
-#endif
- u32 addr; /* RD2 */
- u32 pad0; /* padding data */
-} __attribute__((aligned(2), packed));
-
-/* This structure is used by each CPU dependency handling. */
-struct sh_eth_cpu_data {
- /* optional functions */
- void (*chip_reset)(struct net_device *ndev);
- void (*set_duplex)(struct net_device *ndev);
- void (*set_rate)(struct net_device *ndev);
-
- /* mandatory initialize value */
- unsigned long eesipr_value;
-
- /* optional initialize value */
- unsigned long ecsr_value;
- unsigned long ecsipr_value;
- unsigned long fdr_value;
- unsigned long fcftr_value;
- unsigned long rpadir_value;
- unsigned long rmcr_value;
-
- /* interrupt checking mask */
- unsigned long tx_check;
- unsigned long eesr_err_check;
- unsigned long tx_error_check;
-
- /* hardware features */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
- unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
-};
-
-struct sh_eth_private {
- struct platform_device *pdev;
- struct sh_eth_cpu_data *cd;
- const u16 *reg_offset;
- void __iomem *tsu_addr;
- dma_addr_t rx_desc_dma;
- dma_addr_t tx_desc_dma;
- struct sh_eth_rxdesc *rx_ring;
- struct sh_eth_txdesc *tx_ring;
- struct sk_buff **rx_skbuff;
- struct sk_buff **tx_skbuff;
- struct net_device_stats stats;
- struct timer_list timer;
- spinlock_t lock;
- u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
- u32 cur_tx, dirty_tx;
- u32 rx_buf_sz; /* Based on MTU+slack. */
- int edmac_endian;
- /* MII transceiver section. */
- u32 phy_id; /* PHY ID */
- struct mii_bus *mii_bus; /* MDIO bus control */
- struct phy_device *phydev; /* PHY device control */
- enum phy_state link;
- phy_interface_t phy_interface;
- int msg_enable;
- int speed;
- int duplex;
- u32 rx_int_var, tx_int_var; /* interrupt control variables */
- char post_rx; /* POST receive */
- char post_fw; /* POST forward */
- struct net_device_stats tsu_stats; /* TSU forward status */
-
- unsigned no_ether_link:1;
- unsigned ether_link_active_low:1;
-};
-
-static inline void sh_eth_soft_swap(char *src, int len)
-{
-#ifdef __LITTLE_ENDIAN__
- u32 *p = (u32 *)src;
- u32 *maxp;
- maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32));
-
- for (; p < maxp; p++)
- *p = swab32(*p);
-#endif
-}
-
-static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
- int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- writel(data, ndev->base_addr + mdp->reg_offset[enum_index]);
-}
-
-static inline unsigned long sh_eth_read(struct net_device *ndev,
- int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- return readl(ndev->base_addr + mdp->reg_offset[enum_index]);
-}
-
-static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
- unsigned long data, int enum_index)
-{
- writel(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
-}
-
-static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
- int enum_index)
-{
- return readl(mdp->tsu_addr + mdp->reg_offset[enum_index]);
-}
-
-#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/slip/Kconfig b/drivers/net/slip/Kconfig
new file mode 100644
index 00000000000..211b160e4e9
--- /dev/null
+++ b/drivers/net/slip/Kconfig
@@ -0,0 +1,79 @@
+#
+# SLIP network device configuration
+#
+
+config SLIP
+ tristate "SLIP (serial line) support"
+ ---help---
+ Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
+ connect to your Internet service provider or to connect to some
+ other local Unix box or if you want to configure your Linux box as a
+ Slip/CSlip server for other people to dial in. SLIP (Serial Line
+ Internet Protocol) is a protocol used to send Internet traffic over
+ serial connections such as telephone lines or null modem cables;
+ nowadays, the protocol PPP is more commonly used for this same
+ purpose.
+
+ Normally, your access provider has to support SLIP in order for you
+ to be able to use it, but there is now a SLIP emulator called SLiRP
+ around (available from
+ <ftp://ibiblio.org/pub/Linux/system/network/serial/>) which
+ allows you to use SLIP over a regular dial up shell connection. If
+ you plan to use SLiRP, make sure to say Y to CSLIP, below. The
+ NET-3-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, explains how to
+ configure SLIP. Note that you don't need this option if you just
+ want to run term (term is a program which gives you almost full
+ Internet connectivity if you have a regular dial up shell account on
+ some Internet connected Unix computer. Read
+ <http://www.bart.nl/~patrickr/term-howto/Term-HOWTO.html>). SLIP
+ support will enlarge your kernel by about 4 KB. If unsure, say N.
+
+ To compile this driver as a module, choose M here. The module
+ will be called slip.
+
+config SLHC
+ tristate
+ ---help---
+ This option enables Van Jacobsen serial line header compression
+ routines.
+
+if SLIP
+
+config SLIP_COMPRESSED
+ bool "CSLIP compressed headers"
+ depends on SLIP
+ select SLHC
+ ---help---
+ This protocol is faster than SLIP because it uses compression on the
+ TCP/IP headers (not on the data itself), but it has to be supported
+ on both ends. Ask your access provider if you are not sure and
+ answer Y, just in case. You will still be able to use plain SLIP. If
+ you plan to use SLiRP, the SLIP emulator (available from
+ <ftp://ibiblio.org/pub/Linux/system/network/serial/>) which
+ allows you to use SLIP over a regular dial up shell connection, you
+ definitely want to say Y here. The NET-3-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, explains how to configure
+ CSLIP. This won't enlarge your kernel.
+
+config SLIP_SMART
+ bool "Keepalive and linefill"
+ depends on SLIP
+ ---help---
+ Adds additional capabilities to the SLIP driver to support the
+ RELCOM line fill and keepalive monitoring. Ideal on poor quality
+ analogue lines.
+
+config SLIP_MODE_SLIP6
+ bool "Six bit SLIP encapsulation"
+ depends on SLIP
+ ---help---
+ Just occasionally you may need to run IP over hostile serial
+ networks that don't pass all control characters or are only seven
+ bit. Saying Y here adds an extra mode you can use with SLIP:
+ "slip6". In this mode, SLIP will only send normal ASCII symbols over
+ the serial device. Naturally, this has to be supported at the other
+ end of the link as well. It's good enough, for example, to run IP
+ over the async ports of a Camtec JNT Pad. If unsure, say N.
+
+endif # SLIP
diff --git a/drivers/net/slip/Makefile b/drivers/net/slip/Makefile
new file mode 100644
index 00000000000..e3ebc59e6fb
--- /dev/null
+++ b/drivers/net/slip/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the SLIP network device drivers.
+#
+
+obj-$(CONFIG_SLIP) += slip.o
+obj-$(CONFIG_SLHC) += slhc.o
diff --git a/drivers/net/slhc.c b/drivers/net/slip/slhc.c
index 0a0a6643cf3..0a0a6643cf3 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slip/slhc.c
diff --git a/drivers/net/slip.c b/drivers/net/slip/slip.c
index 4c617534f93..ba08341fb92 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip/slip.c
@@ -562,34 +562,33 @@ static struct rtnl_link_stats64 *
sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct net_device_stats *devstats = &dev->stats;
- unsigned long c_rx_dropped = 0;
#ifdef SL_INCLUDE_CSLIP
- unsigned long c_rx_fifo_errors = 0;
- unsigned long c_tx_fifo_errors = 0;
- unsigned long c_collisions = 0;
struct slip *sl = netdev_priv(dev);
struct slcompress *comp = sl->slcomp;
-
- if (comp) {
- c_rx_fifo_errors = comp->sls_i_compressed;
- c_rx_dropped = comp->sls_i_tossed;
- c_tx_fifo_errors = comp->sls_o_compressed;
- c_collisions = comp->sls_o_misses;
- }
- stats->rx_fifo_errors = sl->rx_compressed + c_rx_fifo_errors;
- stats->tx_fifo_errors = sl->tx_compressed + c_tx_fifo_errors;
- stats->collisions = sl->tx_misses + c_collisions;
#endif
stats->rx_packets = devstats->rx_packets;
stats->tx_packets = devstats->tx_packets;
stats->rx_bytes = devstats->rx_bytes;
stats->tx_bytes = devstats->tx_bytes;
- stats->rx_dropped = devstats->rx_dropped + c_rx_dropped;
+ stats->rx_dropped = devstats->rx_dropped;
stats->tx_dropped = devstats->tx_dropped;
stats->tx_errors = devstats->tx_errors;
stats->rx_errors = devstats->rx_errors;
stats->rx_over_errors = devstats->rx_over_errors;
+#ifdef SL_INCLUDE_CSLIP
+ if (comp) {
+ /* Generic compressed statistics */
+ stats->rx_compressed = comp->sls_i_compressed;
+ stats->tx_compressed = comp->sls_o_compressed;
+
+ /* Are we really still needs this? */
+ stats->rx_fifo_errors += comp->sls_i_compressed;
+ stats->rx_dropped += comp->sls_i_tossed;
+ stats->tx_fifo_errors += comp->sls_o_compressed;
+ stats->collisions += comp->sls_o_misses;
+ }
+#endif
return stats;
}
diff --git a/drivers/net/slip.h b/drivers/net/slip/slip.h
index aa0764ce234..67673cf1266 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip/slip.h
@@ -65,15 +65,6 @@ struct slip {
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* pointer to next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
-
- /* SLIP interface statistics. */
-#ifdef SL_INCLUDE_CSLIP
- unsigned long tx_compressed;
- unsigned long rx_compressed;
- unsigned long tx_misses;
-#endif
- /* Detailed SLIP statistics. */
-
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
deleted file mode 100644
index 7df7df4e79c..00000000000
--- a/drivers/net/stmmac/Kconfig
+++ /dev/null
@@ -1,57 +0,0 @@
-config STMMAC_ETH
- tristate "STMicroelectronics 10/100/1000 Ethernet driver"
- select MII
- select PHYLIB
- select CRC32
- depends on NETDEVICES && HAS_IOMEM
- help
- This is the driver for the Ethernet IPs are built around a
- Synopsys IP Core and only tested on the STMicroelectronics
- platforms.
-
-if STMMAC_ETH
-
-config STMMAC_DA
- bool "STMMAC DMA arbitration scheme"
- default n
- help
- Selecting this option, rx has priority over Tx (only for Giga
- Ethernet device).
- By default, the DMA arbitration scheme is based on Round-robin
- (rx:tx priority is 1:1).
-
-config STMMAC_DUAL_MAC
- bool "STMMAC: dual mac support (EXPERIMENTAL)"
- default n
- depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
- help
- Some ST SoCs (for example the stx7141 and stx7200c2) have two
- Ethernet Controllers. This option turns on the second Ethernet
- device on this kind of platforms.
-
-config STMMAC_TIMER
- bool "STMMAC Timer optimisation"
- default n
- depends on RTC_HCTOSYS_DEVICE
- help
- Use an external timer for mitigating the number of network
- interrupts. Currently, for SH architectures, it is possible
- to use the TMU channel 2 and the SH-RTC device.
-
-choice
- prompt "Select Timer device"
- depends on STMMAC_TIMER
-
-config STMMAC_TMU_TIMER
- bool "TMU channel 2"
- depends on CPU_SH4
- help
-
-config STMMAC_RTC_TIMER
- bool "Real time clock"
- depends on RTC_CLASS
- help
-
-endchoice
-
-endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
deleted file mode 100644
index 9691733ddb8..00000000000
--- a/drivers/net/stmmac/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
deleted file mode 100644
index 375ea193e13..00000000000
--- a/drivers/net/stmmac/common.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/*******************************************************************************
- STMMAC Common Header File
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/netdevice.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define STMMAC_VLAN_TAG_USED
-#include <linux/if_vlan.h>
-#endif
-
-#include "descs.h"
-
-#undef CHIP_DEBUG_PRINT
-/* Turn-on extra printk debug for MAC core, dma and descriptors */
-/* #define CHIP_DEBUG_PRINT */
-
-#ifdef CHIP_DEBUG_PRINT
-#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define CHIP_DBG(fmt, args...) do { } while (0)
-#endif
-
-#undef FRAME_FILTER_DEBUG
-/* #define FRAME_FILTER_DEBUG */
-
-struct stmmac_extra_stats {
- /* Transmit errors */
- unsigned long tx_underflow ____cacheline_aligned;
- unsigned long tx_carrier;
- unsigned long tx_losscarrier;
- unsigned long tx_heartbeat;
- unsigned long tx_deferred;
- unsigned long tx_vlan;
- unsigned long tx_jabber;
- unsigned long tx_frame_flushed;
- unsigned long tx_payload_error;
- unsigned long tx_ip_header_error;
- /* Receive errors */
- unsigned long rx_desc;
- unsigned long rx_partial;
- unsigned long rx_runt;
- unsigned long rx_toolong;
- unsigned long rx_collision;
- unsigned long rx_crc;
- unsigned long rx_length;
- unsigned long rx_mii;
- unsigned long rx_multicast;
- unsigned long rx_gmac_overflow;
- unsigned long rx_watchdog;
- unsigned long da_rx_filter_fail;
- unsigned long sa_rx_filter_fail;
- unsigned long rx_missed_cntr;
- unsigned long rx_overflow_cntr;
- unsigned long rx_vlan;
- /* Tx/Rx IRQ errors */
- unsigned long tx_undeflow_irq;
- unsigned long tx_process_stopped_irq;
- unsigned long tx_jabber_irq;
- unsigned long rx_overflow_irq;
- unsigned long rx_buf_unav_irq;
- unsigned long rx_process_stopped_irq;
- unsigned long rx_watchdog_irq;
- unsigned long tx_early_irq;
- unsigned long fatal_bus_error_irq;
- /* Extra info */
- unsigned long threshold;
- unsigned long tx_pkt_n;
- unsigned long rx_pkt_n;
- unsigned long poll_n;
- unsigned long sched_timer_n;
- unsigned long normal_irq_n;
-};
-
-#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
-
-/* Flow Control defines */
-#define FLOW_OFF 0
-#define FLOW_RX 1
-#define FLOW_TX 2
-#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-
-#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
-
-enum rx_frame_status { /* IPC status */
- good_frame = 0,
- discard_frame = 1,
- csum_none = 2,
- llc_snap = 4,
-};
-
-enum tx_dma_irq_status {
- tx_hard_error = 1,
- tx_hard_error_bump_tc = 2,
- handle_tx_rx = 3,
-};
-
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
-#define BUF_SIZE_4KiB 4096
-#define BUF_SIZE_2KiB 2048
-
-/* Power Down and WOL */
-#define PMT_NOT_SUPPORTED 0
-#define PMT_SUPPORTED 1
-
-/* Common MAC defines */
-#define MAC_CTRL_REG 0x00000000 /* MAC Control */
-#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
-
-/* MAC Management Counters register */
-#define MMC_CONTROL 0x00000100 /* MMC Control */
-#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
-#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT 3
-#define MMC_CONTROL_MAX_FRAME 0x7FF
-
-struct stmmac_desc_ops {
- /* DMA RX descriptor ring initialization */
- void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
- /* DMA TX descriptor ring initialization */
- void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
-
- /* Invoked by the xmit function to prepare the tx descriptor */
- void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
- int csum_flag);
- /* Set/get the owner of the descriptor */
- void (*set_tx_owner) (struct dma_desc *p);
- int (*get_tx_owner) (struct dma_desc *p);
- /* Invoked by the xmit function to close the tx descriptor */
- void (*close_tx_desc) (struct dma_desc *p);
- /* Clean the tx descriptor as soon as the tx irq is received */
- void (*release_tx_desc) (struct dma_desc *p);
- /* Clear interrupt on tx frame completion. When this bit is
- * set an interrupt happens as soon as the frame is transmitted */
- void (*clear_tx_ic) (struct dma_desc *p);
- /* Last tx segment reports the transmit status */
- int (*get_tx_ls) (struct dma_desc *p);
- /* Return the transmit status looking at the TDES1 */
- int (*tx_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, void __iomem *ioaddr);
- /* Get the buffer size from the descriptor */
- int (*get_tx_len) (struct dma_desc *p);
- /* Handle extra events on specific interrupts hw dependent */
- int (*get_rx_owner) (struct dma_desc *p);
- void (*set_rx_owner) (struct dma_desc *p);
- /* Get the receive frame size */
- int (*get_rx_frame_len) (struct dma_desc *p);
- /* Return the reception status looking at the RDES1 */
- int (*rx_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p);
-};
-
-struct stmmac_dma_ops {
- /* DMA core initialization */
- int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
- /* Dump DMA registers */
- void (*dump_regs) (void __iomem *ioaddr);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- void __iomem *ioaddr);
- void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq) (void __iomem *ioaddr);
- void (*disable_dma_irq) (void __iomem *ioaddr);
- void (*start_tx) (void __iomem *ioaddr);
- void (*stop_tx) (void __iomem *ioaddr);
- void (*start_rx) (void __iomem *ioaddr);
- void (*stop_rx) (void __iomem *ioaddr);
- int (*dma_interrupt) (void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
-};
-
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
- /* Support checksum offload engine */
- int (*rx_coe) (void __iomem *ioaddr);
- /* Dump MAC registers */
- void (*dump_regs) (void __iomem *ioaddr);
- /* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (void __iomem *ioaddr);
- /* Multicast filter setting */
- void (*set_filter) (struct net_device *dev);
- /* Flow control setting */
- void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time);
- /* Set power management mode (e.g. magic frame) */
- void (*pmt) (void __iomem *ioaddr, unsigned long mode);
- /* Set/Get Unicast MAC addresses */
- void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
- unsigned int reg_n);
- void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
- unsigned int reg_n);
-};
-
-struct mac_link {
- int port;
- int duplex;
- int speed;
-};
-
-struct mii_regs {
- unsigned int addr; /* MII Address */
- unsigned int data; /* MII Data */
-};
-
-struct mac_device_info {
- const struct stmmac_ops *mac;
- const struct stmmac_desc_ops *desc;
- const struct stmmac_dma_ops *dma;
- struct mii_regs mii; /* MII register Addresses */
- struct mac_link link;
-};
-
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
-
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
- unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
- unsigned int high, unsigned int low);
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
deleted file mode 100644
index 7ed8fb6c211..00000000000
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*******************************************************************************
- STMMAC Ethtool support
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/interrupt.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <asm/io.h>
-
-#include "stmmac.h"
-#include "dwmac_dma.h"
-
-#define REG_SPACE_SIZE 0x1054
-#define MAC100_ETHTOOL_NAME "st_mac100"
-#define GMAC_ETHTOOL_NAME "st_gmac"
-
-struct stmmac_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
-};
-
-#define STMMAC_STAT(m) \
- { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
- offsetof(struct stmmac_priv, xstats.m)}
-
-static const struct stmmac_stats stmmac_gstrings_stats[] = {
- STMMAC_STAT(tx_underflow),
- STMMAC_STAT(tx_carrier),
- STMMAC_STAT(tx_losscarrier),
- STMMAC_STAT(tx_heartbeat),
- STMMAC_STAT(tx_deferred),
- STMMAC_STAT(tx_vlan),
- STMMAC_STAT(rx_vlan),
- STMMAC_STAT(tx_jabber),
- STMMAC_STAT(tx_frame_flushed),
- STMMAC_STAT(tx_payload_error),
- STMMAC_STAT(tx_ip_header_error),
- STMMAC_STAT(rx_desc),
- STMMAC_STAT(rx_partial),
- STMMAC_STAT(rx_runt),
- STMMAC_STAT(rx_toolong),
- STMMAC_STAT(rx_collision),
- STMMAC_STAT(rx_crc),
- STMMAC_STAT(rx_length),
- STMMAC_STAT(rx_mii),
- STMMAC_STAT(rx_multicast),
- STMMAC_STAT(rx_gmac_overflow),
- STMMAC_STAT(rx_watchdog),
- STMMAC_STAT(da_rx_filter_fail),
- STMMAC_STAT(sa_rx_filter_fail),
- STMMAC_STAT(rx_missed_cntr),
- STMMAC_STAT(rx_overflow_cntr),
- STMMAC_STAT(tx_undeflow_irq),
- STMMAC_STAT(tx_process_stopped_irq),
- STMMAC_STAT(tx_jabber_irq),
- STMMAC_STAT(rx_overflow_irq),
- STMMAC_STAT(rx_buf_unav_irq),
- STMMAC_STAT(rx_process_stopped_irq),
- STMMAC_STAT(rx_watchdog_irq),
- STMMAC_STAT(tx_early_irq),
- STMMAC_STAT(fatal_bus_error_irq),
- STMMAC_STAT(threshold),
- STMMAC_STAT(tx_pkt_n),
- STMMAC_STAT(rx_pkt_n),
- STMMAC_STAT(poll_n),
- STMMAC_STAT(sched_timer_n),
- STMMAC_STAT(normal_irq_n),
-};
-#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
-
-static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- if (!priv->plat->has_gmac)
- strcpy(info->driver, MAC100_ETHTOOL_NAME);
- else
- strcpy(info->driver, GMAC_ETHTOOL_NAME);
-
- strcpy(info->version, DRV_MODULE_VERSION);
- info->fw_version[0] = '\0';
- info->n_stats = STMMAC_STATS_LEN;
-}
-
-static int stmmac_ethtool_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phydev;
- int rc;
- if (phy == NULL) {
- pr_err("%s: %s: PHY is not registered\n",
- __func__, dev->name);
- return -ENODEV;
- }
- if (!netif_running(dev)) {
- pr_err("%s: interface is disabled: we cannot track "
- "link speed / duplex setting\n", dev->name);
- return -EBUSY;
- }
- cmd->transceiver = XCVR_INTERNAL;
- spin_lock_irq(&priv->lock);
- rc = phy_ethtool_gset(phy, cmd);
- spin_unlock_irq(&priv->lock);
- return rc;
-}
-
-static int stmmac_ethtool_setsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phydev;
- int rc;
-
- spin_lock(&priv->lock);
- rc = phy_ethtool_sset(phy, cmd);
- spin_unlock(&priv->lock);
-
- return rc;
-}
-
-static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- return priv->msg_enable;
-}
-
-static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- priv->msg_enable = level;
-
-}
-
-static int stmmac_check_if_running(struct net_device *dev)
-{
- if (!netif_running(dev))
- return -EBUSY;
- return 0;
-}
-
-static int stmmac_ethtool_get_regs_len(struct net_device *dev)
-{
- return REG_SPACE_SIZE;
-}
-
-static void stmmac_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int i;
- u32 *reg_space = (u32 *) space;
-
- struct stmmac_priv *priv = netdev_priv(dev);
-
- memset(reg_space, 0x0, REG_SPACE_SIZE);
-
- if (!priv->plat->has_gmac) {
- /* MAC registers */
- for (i = 0; i < 12; i++)
- reg_space[i] = readl(priv->ioaddr + (i * 4));
- /* DMA registers */
- for (i = 0; i < 9; i++)
- reg_space[i + 12] =
- readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
- reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
- reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
- } else {
- /* MAC registers */
- for (i = 0; i < 55; i++)
- reg_space[i] = readl(priv->ioaddr + (i * 4));
- /* DMA registers */
- for (i = 0; i < 22; i++)
- reg_space[i + 55] =
- readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
- }
-}
-
-static void
-stmmac_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct stmmac_priv *priv = netdev_priv(netdev);
-
- spin_lock(&priv->lock);
-
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- pause->autoneg = priv->phydev->autoneg;
-
- if (priv->flow_ctrl & FLOW_RX)
- pause->rx_pause = 1;
- if (priv->flow_ctrl & FLOW_TX)
- pause->tx_pause = 1;
-
- spin_unlock(&priv->lock);
-}
-
-static int
-stmmac_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct stmmac_priv *priv = netdev_priv(netdev);
- struct phy_device *phy = priv->phydev;
- int new_pause = FLOW_OFF;
- int ret = 0;
-
- spin_lock(&priv->lock);
-
- if (pause->rx_pause)
- new_pause |= FLOW_RX;
- if (pause->tx_pause)
- new_pause |= FLOW_TX;
-
- priv->flow_ctrl = new_pause;
- phy->autoneg = pause->autoneg;
-
- if (phy->autoneg) {
- if (netif_running(netdev))
- ret = phy_start_aneg(phy);
- } else
- priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
- priv->flow_ctrl, priv->pause);
- spin_unlock(&priv->lock);
- return ret;
-}
-
-static void stmmac_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *dummy, u64 *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int i;
-
- /* Update HW stats if supported */
- priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
- priv->ioaddr);
-
- for (i = 0; i < STMMAC_STATS_LEN; i++) {
- char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
- data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
- }
-}
-
-static int stmmac_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return STMMAC_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- int i;
- u8 *p = data;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < STMMAC_STATS_LEN; i++) {
- memcpy(p, stmmac_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- default:
- WARN_ON(1);
- break;
- }
-}
-
-/* Currently only support WOL through Magic packet. */
-static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- spin_lock_irq(&priv->lock);
- if (device_can_wakeup(priv->device)) {
- wol->supported = WAKE_MAGIC | WAKE_UCAST;
- wol->wolopts = priv->wolopts;
- }
- spin_unlock_irq(&priv->lock);
-}
-
-static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 support = WAKE_MAGIC | WAKE_UCAST;
-
- if (!device_can_wakeup(priv->device))
- return -EINVAL;
-
- if (wol->wolopts & ~support)
- return -EINVAL;
-
- if (wol->wolopts) {
- pr_info("stmmac: wakeup enable\n");
- device_set_wakeup_enable(priv->device, 1);
- enable_irq_wake(dev->irq);
- } else {
- device_set_wakeup_enable(priv->device, 0);
- disable_irq_wake(dev->irq);
- }
-
- spin_lock_irq(&priv->lock);
- priv->wolopts = wol->wolopts;
- spin_unlock_irq(&priv->lock);
-
- return 0;
-}
-
-static struct ethtool_ops stmmac_ethtool_ops = {
- .begin = stmmac_check_if_running,
- .get_drvinfo = stmmac_ethtool_getdrvinfo,
- .get_settings = stmmac_ethtool_getsettings,
- .set_settings = stmmac_ethtool_setsettings,
- .get_msglevel = stmmac_ethtool_getmsglevel,
- .set_msglevel = stmmac_ethtool_setmsglevel,
- .get_regs = stmmac_ethtool_gregs,
- .get_regs_len = stmmac_ethtool_get_regs_len,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = stmmac_get_pauseparam,
- .set_pauseparam = stmmac_set_pauseparam,
- .get_ethtool_stats = stmmac_get_ethtool_stats,
- .get_strings = stmmac_get_strings,
- .get_wol = stmmac_get_wol,
- .set_wol = stmmac_set_wol,
- .get_sset_count = stmmac_get_sset_count,
-};
-
-void stmmac_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
-}
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index d16880d7099..58f13adaa54 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -33,7 +33,7 @@
#include <asm/prom.h>
#endif
-#include "sungem_phy.h"
+#include <linux/sungem_phy.h>
/* Link modes of the BCM5400 PHY */
static const int phy_BCM5400_link_table[8][3] = {
@@ -1156,7 +1156,7 @@ static struct mii_phy_def* mii_phy_table[] = {
NULL
};
-int mii_phy_probe(struct mii_phy *phy, int mii_id)
+int sungem_phy_probe(struct mii_phy *phy, int mii_id)
{
int rc;
u32 id;
@@ -1195,6 +1195,5 @@ fail:
return -ENODEV;
}
-EXPORT_SYMBOL(mii_phy_probe);
+EXPORT_SYMBOL(sungem_phy_probe);
MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index b6162fe2348..ef9fdf3652f 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -282,7 +282,7 @@ static const struct net_device_ops xl_netdev_ops = {
.ndo_stop = xl_close,
.ndo_start_xmit = xl_xmit,
.ndo_change_mtu = xl_change_mtu,
- .ndo_set_multicast_list = xl_set_rx_mode,
+ .ndo_set_rx_mode = xl_set_rx_mode,
.ndo_set_mac_address = xl_set_mac_address,
};
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index c4137b0f808..c7e0149d151 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -4,9 +4,9 @@
# So far, we only have PCI, ISA, and MCA token ring devices
menuconfig TR
- tristate "Token Ring driver support"
+ bool "Token Ring driver support"
depends on NETDEVICES && !UML
- depends on (PCI || ISA || MCA || CCW)
+ depends on (PCI || ISA || MCA || CCW || PCMCIA)
select LLC
help
Token Ring is IBM's way of communication on a local network; the
@@ -20,6 +20,17 @@ menuconfig TR
if TR
+config PCMCIA_IBMTR
+ tristate "IBM PCMCIA tokenring adapter support"
+ depends on IBMTR!=y && PCMCIA
+ ---help---
+ Say Y here if you intend to attach this type of Token Ring PCMCIA
+ card to your computer. You then also need to say Y to "Token Ring
+ driver support".
+
+ To compile this driver as a module, choose M here: the module will be
+ called ibmtr_cs.
+
config IBMTR
tristate "IBM Tropic chipset based adapter support"
depends on ISA || MCA
diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile
index c88b0a5e538..f1be8d97b7a 100644
--- a/drivers/net/tokenring/Makefile
+++ b/drivers/net/tokenring/Makefile
@@ -2,14 +2,15 @@
# Makefile for drivers/net/tokenring
#
-obj-$(CONFIG_IBMTR) += ibmtr.o
-obj-$(CONFIG_IBMOL) += olympic.o
-obj-$(CONFIG_IBMLS) += lanstreamer.o
-obj-$(CONFIG_TMS380TR) += tms380tr.o
-obj-$(CONFIG_ABYSS) += abyss.o
-obj-$(CONFIG_MADGEMC) += madgemc.o
-obj-$(CONFIG_PROTEON) += proteon.o
-obj-$(CONFIG_TMSPCI) += tmspci.o
-obj-$(CONFIG_SKISA) += skisa.o
-obj-$(CONFIG_SMCTR) += smctr.o
+obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o
+obj-$(CONFIG_IBMTR) += ibmtr.o
+obj-$(CONFIG_IBMOL) += olympic.o
+obj-$(CONFIG_IBMLS) += lanstreamer.o
+obj-$(CONFIG_TMS380TR) += tms380tr.o
+obj-$(CONFIG_ABYSS) += abyss.o
+obj-$(CONFIG_MADGEMC) += madgemc.o
+obj-$(CONFIG_PROTEON) += proteon.o
+obj-$(CONFIG_TMSPCI) += tmspci.o
+obj-$(CONFIG_SKISA) += skisa.o
+obj-$(CONFIG_SMCTR) += smctr.o
obj-$(CONFIG_3C359) += 3c359.o
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index e257a00fe14..b5c8c18f504 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -823,7 +823,7 @@ static const struct net_device_ops trdev_netdev_ops = {
.ndo_open = tok_open,
.ndo_stop = tok_close,
.ndo_start_xmit = tok_send_packet,
- .ndo_set_multicast_list = tok_set_multicast_list,
+ .ndo_set_rx_mode = tok_set_multicast_list,
.ndo_change_mtu = ibmtr_change_mtu,
};
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/tokenring/ibmtr_cs.c
index 6006d5488fb..91b684630fc 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/tokenring/ibmtr_cs.c
@@ -66,7 +66,7 @@
#include <asm/system.h>
#define PCMCIA
-#include "../tokenring/ibmtr.c"
+#include "ibmtr.c"
/*====================================================================*/
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 9354ca9da57..8d71e0d2906 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -231,7 +231,7 @@ static const struct net_device_ops streamer_netdev_ops = {
#if STREAMER_IOCTL
.ndo_do_ioctl = streamer_ioctl,
#endif
- .ndo_set_multicast_list = streamer_set_rx_mode,
+ .ndo_set_rx_mode = streamer_set_rx_mode,
.ndo_set_mac_address = streamer_set_mac_address,
};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index e3855aeb13d..fd8dce90c95 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -201,7 +201,7 @@ static const struct net_device_ops olympic_netdev_ops = {
.ndo_stop = olympic_close,
.ndo_start_xmit = olympic_xmit,
.ndo_change_mtu = olympic_change_mtu,
- .ndo_set_multicast_list = olympic_set_rx_mode,
+ .ndo_set_rx_mode = olympic_set_rx_mode,
.ndo_set_mac_address = olympic_set_mac_address,
};
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index d9044aba7af..029846a9863 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3623,7 +3623,7 @@ static const struct net_device_ops smctr_netdev_ops = {
.ndo_start_xmit = smctr_send_packet,
.ndo_tx_timeout = smctr_timeout,
.ndo_get_stats = smctr_get_stats,
- .ndo_set_multicast_list = smctr_set_multicast_list,
+ .ndo_set_rx_mode = smctr_set_multicast_list,
};
static int __init smctr_probe1(struct net_device *dev, int ioaddr)
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 793020347e5..65e9cf3a71f 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -2289,7 +2289,7 @@ const struct net_device_ops tms380tr_netdev_ops = {
.ndo_start_xmit = tms380tr_send_packet,
.ndo_tx_timeout = tms380tr_timeout,
.ndo_get_stats = tms380tr_get_stats,
- .ndo_set_multicast_list = tms380tr_set_multicast_list,
+ .ndo_set_rx_mode = tms380tr_set_multicast_list,
.ndo_set_mac_address = tms380tr_set_mac_address,
};
EXPORT_SYMBOL(tms380tr_netdev_ops);
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
deleted file mode 100644
index 1f8d4a8d8ea..00000000000
--- a/drivers/net/tulip/Kconfig
+++ /dev/null
@@ -1,171 +0,0 @@
-#
-# Tulip family network device configuration
-#
-
-menuconfig NET_TULIP
- bool "\"Tulip\" family network device support"
- depends on PCI || EISA || CARDBUS
- help
- This selects the "Tulip" family of EISA/PCI network cards.
-
-if NET_TULIP
-
-config DE2104X
- tristate "Early DECchip Tulip (dc2104x) PCI support"
- depends on PCI
- select CRC32
- ---help---
- This driver is developed for the SMC EtherPower series Ethernet
- cards and also works with cards based on the DECchip
- 21040 (Tulip series) chips. Some LinkSys PCI cards are
- of this type. (If your card is NOT SMC EtherPower 10/100 PCI
- (smc9332dst), you can also try the driver for "Generic DECchip"
- cards, below. However, most people with a network card of this type
- will say Y here.) Do read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module will
- be called de2104x.
-
-config DE2104X_DSL
- int "Descriptor Skip Length in 32 bit longwords"
- depends on DE2104X
- range 0 31
- default 0
- help
- Setting this value allows to align ring buffer descriptors into their
- own cache lines. Value of 4 corresponds to the typical 32 byte line
- (the descriptor is 16 bytes). This is necessary on systems that lack
- cache coherence, an example is PowerMac 5500. Otherwise 0 is safe.
- Default is 0, and range is 0 to 31.
-
-config TULIP
- tristate "DECchip Tulip (dc2114x) PCI support"
- depends on PCI
- select CRC32
- ---help---
- This driver is developed for the SMC EtherPower series Ethernet
- cards and also works with cards based on the DECchip
- 21140 (Tulip series) chips. Some LinkSys PCI cards are
- of this type. (If your card is NOT SMC EtherPower 10/100 PCI
- (smc9332dst), you can also try the driver for "Generic DECchip"
- cards, above. However, most people with a network card of this type
- will say Y here.) Do read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module will
- be called tulip.
-
-config TULIP_MWI
- bool "New bus configuration (EXPERIMENTAL)"
- depends on TULIP && EXPERIMENTAL
- help
- This configures your Tulip card specifically for the card and
- system cache line size type you are using.
-
- This is experimental code, not yet tested on many boards.
-
- If unsure, say N.
-
-config TULIP_MMIO
- bool "Use PCI shared mem for NIC registers"
- depends on TULIP
- help
- Use PCI shared memory for the NIC registers, rather than going through
- the Tulip's PIO (programmed I/O ports). Faster, but could produce
- obscure bugs if your mainboard has memory controller timing issues.
- If in doubt, say N.
-
-config TULIP_NAPI
- bool "Use RX polling (NAPI)"
- depends on TULIP
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
-config TULIP_NAPI_HW_MITIGATION
- bool "Use Interrupt Mitigation"
- depends on TULIP_NAPI
- ---help---
- Use HW to reduce RX interrupts. Not strictly necessary since NAPI
- reduces RX interrupts by itself. Interrupt mitigation reduces RX
- interrupts even at low levels of traffic at the cost of a small
- latency.
-
- If in doubt, say Y.
-
-config TULIP_DM910X
- def_bool y
- depends on TULIP && SPARC
-
-config DE4X5
- tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
- depends on PCI || EISA
- select CRC32
- ---help---
- This is support for the DIGITAL series of PCI/EISA Ethernet cards.
- These include the DE425, DE434, DE435, DE450 and DE500 models. If
- you have a network card of this type, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. More specific
- information is contained in
- <file:Documentation/networking/de4x5.txt>.
-
- To compile this driver as a module, choose M here. The module will
- be called de4x5.
-
-config WINBOND_840
- tristate "Winbond W89c840 Ethernet support"
- depends on PCI
- select CRC32
- select MII
- help
- This driver is for the Winbond W89c840 chip. It also works with
- the TX9882 chip on the Compex RL100-ATX board.
- More specific information and updates are available from
- <http://www.scyld.com/network/drivers.html>.
-
-config DM9102
- tristate "Davicom DM910x/DM980x support"
- depends on PCI
- select CRC32
- ---help---
- This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
- Davicom (<http://www.davicom.com.tw/>). If you have such a network
- (Ethernet) card, say Y. Some information is contained in the file
- <file:Documentation/networking/dmfe.txt>.
-
- To compile this driver as a module, choose M here. The module will
- be called dmfe.
-
-config ULI526X
- tristate "ULi M526x controller support"
- depends on PCI
- select CRC32
- ---help---
- This driver is for ULi M5261/M5263 10/100M Ethernet Controller
- (<http://www.nvidia.com/page/uli_drivers.html>).
-
- To compile this driver as a module, choose M here. The module will
- be called uli526x.
-
-config PCMCIA_XIRCOM
- tristate "Xircom CardBus support"
- depends on CARDBUS
- ---help---
- This driver is for the Digital "Tulip" Ethernet CardBus adapters.
- It should work with most DEC 21*4*-based chips/ethercards, as well
- as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
- ASIX.
-
- To compile this driver as a module, choose M here. The module will
- be called xircom_cb. If unsure, say N.
-
-endif # NET_TULIP
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 71f3d1a35b7..7bea9c65119 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -496,7 +496,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
- .ndo_set_multicast_list = tun_net_mclist,
+ .ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 84d4608153c..23357612793 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -68,6 +68,7 @@ config USB_KAWETH
config USB_PEGASUS
tristate "USB Pegasus/Pegasus-II based ethernet device support"
+ select NET_CORE
select MII
---help---
Say Y here if you know you have Pegasus or Pegasus-II based adapter.
@@ -84,6 +85,7 @@ config USB_PEGASUS
config USB_RTL8150
tristate "USB RTL8150 based ethernet device support (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ select NET_CORE
select MII
help
Say Y here if you have RTL8150 based usb-ethernet adapter.
@@ -95,6 +97,7 @@ config USB_RTL8150
config USB_USBNET
tristate "Multi-purpose USB Networking Framework"
+ select NET_CORE
select MII
---help---
This driver supports several kinds of network links over USB,
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index c5c4b4def7f..e81e22e3d1d 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,8 +36,8 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "14-Jun-2006"
-static const char driver_name [] = "asix";
+#define DRIVER_VERSION "26-Sep-2011"
+#define DRIVER_NAME "asix"
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -115,28 +115,27 @@ static const char driver_name [] = "asix";
#define AX88178_MEDIUM_DEFAULT \
(AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
- AX_MEDIUM_RE )
+ AX_MEDIUM_RE)
#define AX88772_MEDIUM_DEFAULT \
(AX_MEDIUM_FD | AX_MEDIUM_RFC | \
AX_MEDIUM_TFC | AX_MEDIUM_PS | \
- AX_MEDIUM_AC | AX_MEDIUM_RE )
+ AX_MEDIUM_AC | AX_MEDIUM_RE)
/* AX88772 & AX88178 RX_CTL values */
-#define AX_RX_CTL_SO 0x0080
-#define AX_RX_CTL_AP 0x0020
-#define AX_RX_CTL_AM 0x0010
-#define AX_RX_CTL_AB 0x0008
-#define AX_RX_CTL_SEP 0x0004
-#define AX_RX_CTL_AMALL 0x0002
-#define AX_RX_CTL_PRO 0x0001
-#define AX_RX_CTL_MFB_2048 0x0000
-#define AX_RX_CTL_MFB_4096 0x0100
-#define AX_RX_CTL_MFB_8192 0x0200
-#define AX_RX_CTL_MFB_16384 0x0300
-
-#define AX_DEFAULT_RX_CTL \
- (AX_RX_CTL_SO | AX_RX_CTL_AB )
+#define AX_RX_CTL_SO 0x0080
+#define AX_RX_CTL_AP 0x0020
+#define AX_RX_CTL_AM 0x0010
+#define AX_RX_CTL_AB 0x0008
+#define AX_RX_CTL_SEP 0x0004
+#define AX_RX_CTL_AMALL 0x0002
+#define AX_RX_CTL_PRO 0x0001
+#define AX_RX_CTL_MFB_2048 0x0000
+#define AX_RX_CTL_MFB_4096 0x0100
+#define AX_RX_CTL_MFB_8192 0x0200
+#define AX_RX_CTL_MFB_16384 0x0300
+
+#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
/* GPIO 0 .. 2 toggles */
#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
@@ -164,6 +163,8 @@ static const char driver_name [] = "asix";
#define MARVELL_CTRL_TXDELAY 0x0002
#define MARVELL_CTRL_RXDELAY 0x0080
+#define PHY_MODE_RTL8211CL 0x0004
+
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
@@ -268,12 +269,15 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
cmd, value, index, size);
- if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
return;
}
- if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
+ req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!req) {
netdev_err(dev->net, "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
@@ -290,7 +294,8 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
(void *)req, data, size,
asix_async_cmd_callback, req);
- if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status < 0) {
netdev_err(dev->net, "Error submitting the control message: status=%d\n",
status);
kfree(req);
@@ -531,11 +536,11 @@ static u16 asix_read_medium_status(struct usbnet *dev)
if (ret < 0) {
netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
ret);
- goto out;
+ return ret; /* TODO: callers not checking for error ret */
}
- ret = le16_to_cpu(v);
-out:
- return ret;
+
+ return le16_to_cpu(v);
+
}
static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
@@ -676,12 +681,6 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
- if (opt & AX_MONITOR_MODE) {
- if (opt & AX_MONITOR_LINK)
- wolinfo->wolopts |= WAKE_PHY;
- if (opt & AX_MONITOR_MAGIC)
- wolinfo->wolopts |= WAKE_MAGIC;
- }
}
static int
@@ -694,8 +693,6 @@ asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
opt |= AX_MONITOR_MAGIC;
- if (opt != 0)
- opt |= AX_MONITOR_MODE;
if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
opt, 0, 0, NULL) < 0)
@@ -744,7 +741,7 @@ static void asix_get_drvinfo (struct net_device *net,
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strncpy (info->driver, driver_name, sizeof info->driver);
+ strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
strncpy (info->version, DRIVER_VERSION, sizeof info->version);
info->eedump_len = data->eeprom_len;
}
@@ -872,7 +869,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
- .ndo_set_multicast_list = ax88172_set_multicast,
+ .ndo_set_rx_mode = ax88172_set_multicast,
};
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -889,19 +886,20 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
- if ((ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS,
- (gpio_bits >> (i * 8)) & 0xff, 0, 0,
- NULL)) < 0)
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS,
+ (gpio_bits >> (i * 8)) & 0xff, 0, 0, NULL);
+ if (ret < 0)
goto out;
msleep(5);
}
- if ((ret = asix_write_rx_ctl(dev, 0x80)) < 0)
+ ret = asix_write_rx_ctl(dev, 0x80);
+ if (ret < 0)
goto out;
/* Get the MAC address */
- if ((ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID,
- 0, 0, ETH_ALEN, buf)) < 0) {
+ ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (ret < 0) {
dbg("read AX_CMD_READ_NODE_ID failed: %d", ret);
goto out;
}
@@ -966,117 +964,88 @@ static int ax88772_link_reset(struct usbnet *dev)
return 0;
}
-static const struct net_device_ops ax88772_netdev_ops = {
- .ndo_open = usbnet_open,
- .ndo_stop = usbnet_stop,
- .ndo_start_xmit = usbnet_start_xmit,
- .ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_change_mtu = usbnet_change_mtu,
- .ndo_set_mac_address = asix_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = asix_ioctl,
- .ndo_set_multicast_list = asix_set_multicast,
-};
-
-static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+static int ax88772_reset(struct usbnet *dev)
{
int ret, embd_phy;
u16 rx_ctl;
- struct asix_data *data = (struct asix_data *)&dev->data;
- u8 buf[ETH_ALEN];
- u32 phyid;
- data->eeprom_len = AX88772_EEPROM_LEN;
-
- usbnet_get_endpoints(dev,intf);
-
- if ((ret = asix_write_gpio(dev,
- AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5)) < 0)
+ ret = asix_write_gpio(dev,
+ AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5);
+ if (ret < 0)
goto out;
- /* 0x10 is the phy id of the embedded 10/100 ethernet phy */
embd_phy = ((asix_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
- if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT,
- embd_phy, 0, 0, NULL)) < 0) {
+
+ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
dbg("Select PHY #1 failed: %d", ret);
goto out;
}
- if ((ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL)) < 0)
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
+ if (ret < 0)
goto out;
msleep(150);
- if ((ret = asix_sw_reset(dev, AX_SWRESET_CLEAR)) < 0)
+
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
+ if (ret < 0)
goto out;
msleep(150);
+
if (embd_phy) {
- if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL)) < 0)
+ ret = asix_sw_reset(dev, AX_SWRESET_IPRL);
+ if (ret < 0)
goto out;
- }
- else {
- if ((ret = asix_sw_reset(dev, AX_SWRESET_PRTE)) < 0)
+ } else {
+ ret = asix_sw_reset(dev, AX_SWRESET_PRTE);
+ if (ret < 0)
goto out;
}
msleep(150);
rx_ctl = asix_read_rx_ctl(dev);
dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
- if ((ret = asix_write_rx_ctl(dev, 0x0000)) < 0)
+ ret = asix_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl);
- /* Get the MAC address */
- if ((ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
- 0, 0, ETH_ALEN, buf)) < 0) {
- dbg("Failed to read MAC address: %d", ret);
- goto out;
- }
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
-
- /* Initialize MII structure */
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = asix_mdio_read;
- dev->mii.mdio_write = asix_mdio_write;
- dev->mii.phy_id_mask = 0x1f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = asix_get_phy_addr(dev);
-
- phyid = asix_get_phyid(dev);
- dbg("PHYID=0x%08x", phyid);
-
- if ((ret = asix_sw_reset(dev, AX_SWRESET_PRL)) < 0)
+ ret = asix_sw_reset(dev, AX_SWRESET_PRL);
+ if (ret < 0)
goto out;
msleep(150);
- if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL)) < 0)
+ ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL);
+ if (ret < 0)
goto out;
msleep(150);
- dev->net->netdev_ops = &ax88772_netdev_ops;
- dev->net->ethtool_ops = &ax88772_ethtool_ops;
-
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA);
mii_nway_restart(&dev->mii);
- if ((ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT)) < 0)
+ ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT);
+ if (ret < 0)
goto out;
- if ((ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
- AX88772_IPG2_DEFAULT, 0, NULL)) < 0) {
+ AX88772_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
dbg("Write IPG,IPG1,IPG2 failed: %d", ret);
goto out;
}
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
- if ((ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL)) < 0)
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
@@ -1085,16 +1054,70 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
rx_ctl = asix_read_medium_status(dev);
dbg("Medium Status is 0x%04x after all initializations", rx_ctl);
+ return 0;
+
+out:
+ return ret;
+
+}
+
+static const struct net_device_ops ax88772_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = asix_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = asix_ioctl,
+ .ndo_set_rx_mode = asix_set_multicast,
+};
+
+static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ u8 buf[ETH_ALEN];
+ u32 phyid;
+
+ data->eeprom_len = AX88772_EEPROM_LEN;
+
+ usbnet_get_endpoints(dev,intf);
+
+ /* Get the MAC address */
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (ret < 0) {
+ dbg("Failed to read MAC address: %d", ret);
+ return ret;
+ }
+ memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = asix_mdio_read;
+ dev->mii.mdio_write = asix_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = asix_get_phy_addr(dev);
+
+ phyid = asix_get_phyid(dev);
+ dbg("PHYID=0x%08x", phyid);
+
+ dev->net->netdev_ops = &ax88772_netdev_ops;
+ dev->net->ethtool_ops = &ax88772_ethtool_ops;
+
+ ret = ax88772_reset(dev);
+ if (ret < 0)
+ return ret;
+
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
jumbo eth frames */
dev->rx_urb_size = 2048;
}
- return 0;
-out:
- return ret;
+ return 0;
}
static struct ethtool_ops ax88178_ethtool_ops = {
@@ -1143,6 +1166,27 @@ static int marvell_phy_init(struct usbnet *dev)
return 0;
}
+static int rtl8211cl_phy_init(struct usbnet *dev)
+{
+ struct asix_data *data = (struct asix_data *)&dev->data;
+
+ netdev_dbg(dev->net, "rtl8211cl_phy_init()\n");
+
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0x0005);
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x0c, 0);
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x01,
+ asix_mdio_read (dev->net, dev->mii.phy_id, 0x01) | 0x0080);
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0);
+
+ if (data->ledmode == 12) {
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0x0002);
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x1a, 0x00cb);
+ asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0);
+ }
+
+ return 0;
+}
+
static int marvell_led_status(struct usbnet *dev, u16 speed)
{
u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
@@ -1169,6 +1213,79 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
return 0;
}
+static int ax88178_reset(struct usbnet *dev)
+{
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ int ret;
+ __le16 eeprom;
+ u8 status;
+ int gpio0 = 0;
+
+ asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
+ dbg("GPIO Status: 0x%04x", status);
+
+ asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
+ asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
+ asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
+
+ dbg("EEPROM index 0x17 is 0x%04x", eeprom);
+
+ if (eeprom == cpu_to_le16(0xffff)) {
+ data->phymode = PHY_MODE_MARVELL;
+ data->ledmode = 0;
+ gpio0 = 1;
+ } else {
+ data->phymode = le16_to_cpu(eeprom) & 7;
+ data->ledmode = le16_to_cpu(eeprom) >> 8;
+ gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
+ }
+ dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
+
+ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
+ if ((le16_to_cpu(eeprom) >> 8) != 1) {
+ asix_write_gpio(dev, 0x003c, 30);
+ asix_write_gpio(dev, 0x001c, 300);
+ asix_write_gpio(dev, 0x003c, 30);
+ } else {
+ dbg("gpio phymode == 1 path");
+ asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
+ asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
+ }
+
+ asix_sw_reset(dev, 0);
+ msleep(150);
+
+ asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
+ msleep(150);
+
+ asix_write_rx_ctl(dev, 0);
+
+ if (data->phymode == PHY_MODE_MARVELL) {
+ marvell_phy_init(dev);
+ msleep(60);
+ } else if (data->phymode == PHY_MODE_RTL8211CL)
+ rtl8211cl_phy_init(dev);
+
+ asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
+ BMCR_RESET | BMCR_ANENABLE);
+ asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
+ ADVERTISE_1000FULL);
+
+ mii_nway_restart(&dev->mii);
+
+ ret = asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT);
+ if (ret < 0)
+ return ret;
+
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int ax88178_link_reset(struct usbnet *dev)
{
u16 mode;
@@ -1270,67 +1387,27 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = asix_set_multicast,
+ .ndo_set_rx_mode = asix_set_multicast,
.ndo_do_ioctl = asix_ioctl,
.ndo_change_mtu = ax88178_change_mtu,
};
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
- struct asix_data *data = (struct asix_data *)&dev->data;
int ret;
u8 buf[ETH_ALEN];
- __le16 eeprom;
- u8 status;
- int gpio0 = 0;
u32 phyid;
+ struct asix_data *data = (struct asix_data *)&dev->data;
- usbnet_get_endpoints(dev,intf);
-
- asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
- dbg("GPIO Status: 0x%04x", status);
-
- asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
- asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
- asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
-
- dbg("EEPROM index 0x17 is 0x%04x", eeprom);
-
- if (eeprom == cpu_to_le16(0xffff)) {
- data->phymode = PHY_MODE_MARVELL;
- data->ledmode = 0;
- gpio0 = 1;
- } else {
- data->phymode = le16_to_cpu(eeprom) & 7;
- data->ledmode = le16_to_cpu(eeprom) >> 8;
- gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
- }
- dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
-
- asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
- if ((le16_to_cpu(eeprom) >> 8) != 1) {
- asix_write_gpio(dev, 0x003c, 30);
- asix_write_gpio(dev, 0x001c, 300);
- asix_write_gpio(dev, 0x003c, 30);
- } else {
- dbg("gpio phymode == 1 path");
- asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
- asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
- }
-
- asix_sw_reset(dev, 0);
- msleep(150);
-
- asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
- msleep(150);
+ data->eeprom_len = AX88772_EEPROM_LEN;
- asix_write_rx_ctl(dev, 0);
+ usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
- if ((ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
- 0, 0, ETH_ALEN, buf)) < 0) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (ret < 0) {
dbg("Failed to read MAC address: %d", ret);
- goto out;
+ return ret;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -1349,25 +1426,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
phyid = asix_get_phyid(dev);
dbg("PHYID=0x%08x", phyid);
- if (data->phymode == PHY_MODE_MARVELL) {
- marvell_phy_init(dev);
- msleep(60);
- }
-
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
- BMCR_RESET | BMCR_ANENABLE);
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
- ADVERTISE_1000FULL);
-
- mii_nway_restart(&dev->mii);
-
- if ((ret = asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT)) < 0)
- goto out;
-
- if ((ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL)) < 0)
- goto out;
+ ret = ax88178_reset(dev);
+ if (ret < 0)
+ return ret;
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -1375,10 +1436,8 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
jumbo eth frames */
dev->rx_urb_size = 2048;
}
- return 0;
-out:
- return ret;
+ return 0;
}
static const struct driver_info ax8817x_info = {
@@ -1426,7 +1485,7 @@ static const struct driver_info ax88772_info = {
.bind = ax88772_bind,
.status = asix_status,
.link_reset = ax88772_link_reset,
- .reset = ax88772_link_reset,
+ .reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
.rx_fixup = asix_rx_fixup,
.tx_fixup = asix_tx_fixup,
@@ -1437,7 +1496,7 @@ static const struct driver_info ax88178_info = {
.bind = ax88178_bind,
.status = asix_status,
.link_reset = ax88178_link_reset,
- .reset = ax88178_link_reset,
+ .reset = ax88178_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
.rx_fixup = asix_rx_fixup,
.tx_fixup = asix_tx_fixup,
@@ -1566,7 +1625,7 @@ static const struct usb_device_id products [] = {
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver asix_driver = {
- .name = "asix",
+ .name = DRIVER_NAME,
.id_table = products,
.probe = usbnet_probe,
.suspend = usbnet_suspend,
@@ -1588,6 +1647,7 @@ static void __exit asix_exit(void)
module_exit(asix_exit);
MODULE_AUTHOR("David Hollis");
+MODULE_VERSION(DRIVER_VERSION);
MODULE_DESCRIPTION("ASIX AX8817X based USB 2.0 Ethernet Devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8056f8a27c6..a68272c9338 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -749,7 +749,7 @@ static const struct net_device_ops catc_netdev_ops = {
.ndo_start_xmit = catc_start_xmit,
.ndo_tx_timeout = catc_tx_timeout,
- .ndo_set_multicast_list = catc_set_multicast_list,
+ .ndo_set_rx_mode = catc_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 1d93133e9b7..fbc0e4def76 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -428,7 +428,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = dm9601_ioctl,
- .ndo_set_multicast_list = dm9601_set_multicast,
+ .ndo_set_rx_mode = dm9601_set_multicast,
.ndo_set_mac_address = dm9601_set_mac_address,
};
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index be02a25da71..131ac6c172f 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -193,7 +193,7 @@ static const struct net_device_ops int51x1_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = int51x1_set_multicast,
+ .ndo_set_rx_mode = int51x1_set_multicast,
};
static int int51x1_bind(struct usbnet *dev, struct usb_interface *intf)
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index ad0298f9b5f..582ca2dfa5f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -985,7 +985,7 @@ static const struct net_device_ops kaweth_netdev_ops = {
.ndo_stop = kaweth_close,
.ndo_start_xmit = kaweth_start_xmit,
.ndo_tx_timeout = kaweth_tx_timeout,
- .ndo_set_multicast_list = kaweth_set_rx_mode,
+ .ndo_set_rx_mode = kaweth_set_rx_mode,
.ndo_get_stats = kaweth_netdev_stats,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 1d83ccfd727..1e722195105 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -89,6 +89,8 @@ static int vl600_bind(struct usbnet *dev, struct usb_interface *intf)
* addresses have no meaning, the destination and the source of every
* packet depend only on whether it is on the IN or OUT endpoint. */
dev->net->flags |= IFF_NOARP;
+ /* IPv6 NDP relies on multicast. Enable it by default. */
+ dev->net->flags |= IFF_MULTICAST;
return ret;
}
@@ -200,6 +202,14 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
} else {
memset(ethhdr->h_source, 0, ETH_ALEN);
memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN);
+
+ /* Inbound IPv6 packets have an IPv4 ethertype (0x800)
+ * for some reason. Peek at the L3 header to check
+ * for IPv6 packets, and set the ethertype to IPv6
+ * (0x86dd) so Linux can understand it.
+ */
+ if ((buf->data[sizeof(*ethhdr)] & 0xf0) == 0x60)
+ ethhdr->h_proto = __constant_htons(ETH_P_IPV6);
}
if (count) {
@@ -297,6 +307,15 @@ encapsulate:
if (skb->len < full_len) /* Pad */
skb_put(skb, full_len - skb->len);
+ /* The VL600 wants IPv6 packets to have an IPv4 ethertype
+ * Check if this is an IPv6 packet, and set the ethertype
+ * to 0x800
+ */
+ if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) {
+ skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08;
+ skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0;
+ }
+
return skb;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 2b791392e78..db2cb74bf85 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -553,7 +553,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
- .ndo_set_multicast_list = mcs7830_set_multicast,
+ .ndo_set_rx_mode = mcs7830_set_multicast,
.ndo_set_mac_address = mcs7830_set_mac_address,
};
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ef3667690b1..769f5090bda 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1476,7 +1476,7 @@ static const struct net_device_ops pegasus_netdev_ops = {
.ndo_stop = pegasus_close,
.ndo_do_ioctl = pegasus_ioctl,
.ndo_start_xmit = pegasus_start_xmit,
- .ndo_set_multicast_list = pegasus_set_multicast,
+ .ndo_set_rx_mode = pegasus_set_multicast,
.ndo_get_stats = pegasus_netdev_stats,
.ndo_tx_timeout = pegasus_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index ef3b236b514..bf8c84d0adf 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -169,26 +169,8 @@ struct rtl8150 {
typedef struct rtl8150 rtl8150_t;
-static void fill_skb_pool(rtl8150_t *);
-static void free_skb_pool(rtl8150_t *);
-static inline struct sk_buff *pull_skb(rtl8150_t *);
-static void rtl8150_disconnect(struct usb_interface *intf);
-static int rtl8150_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message);
-static int rtl8150_resume(struct usb_interface *intf);
-
static const char driver_name [] = "rtl8150";
-static struct usb_driver rtl8150_driver = {
- .name = driver_name,
- .probe = rtl8150_probe,
- .disconnect = rtl8150_disconnect,
- .id_table = rtl8150_table,
- .suspend = rtl8150_suspend,
- .resume = rtl8150_resume
-};
-
/*
**
** device related part of the code
@@ -333,7 +315,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
/* Write the MAC address into eeprom. Eeprom writes must be word-sized,
so we need to split them up. */
for (i = 0; i * 2 < netdev->addr_len; i++) {
- set_registers(dev, IDR_EEPROM + (i * 2), 2,
+ set_registers(dev, IDR_EEPROM + (i * 2), 2,
netdev->dev_addr + (i * 2));
}
/* Clear the WEPROM bit (preventing accidental eeprom writes). */
@@ -490,44 +472,6 @@ resched:
tasklet_schedule(&dev->tl);
}
-static void rx_fixup(unsigned long data)
-{
- rtl8150_t *dev;
- struct sk_buff *skb;
- int status;
-
- dev = (rtl8150_t *)data;
-
- spin_lock_irq(&dev->rx_pool_lock);
- fill_skb_pool(dev);
- spin_unlock_irq(&dev->rx_pool_lock);
- if (test_bit(RX_URB_FAIL, &dev->flags))
- if (dev->rx_skb)
- goto try_again;
- spin_lock_irq(&dev->rx_pool_lock);
- skb = pull_skb(dev);
- spin_unlock_irq(&dev->rx_pool_lock);
- if (skb == NULL)
- goto tlsched;
- dev->rx_skb = skb;
- usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
- dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
-try_again:
- status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC);
- if (status == -ENODEV) {
- netif_device_detach(dev->netdev);
- } else if (status) {
- set_bit(RX_URB_FAIL, &dev->flags);
- goto tlsched;
- } else {
- clear_bit(RX_URB_FAIL, &dev->flags);
- }
-
- return;
-tlsched:
- tasklet_schedule(&dev->tl);
-}
-
static void write_bulk_callback(struct urb *urb)
{
rtl8150_t *dev;
@@ -665,6 +609,42 @@ static void free_skb_pool(rtl8150_t *dev)
dev_kfree_skb(dev->rx_skb_pool[i]);
}
+static void rx_fixup(unsigned long data)
+{
+ struct rtl8150 *dev = (struct rtl8150 *)data;
+ struct sk_buff *skb;
+ int status;
+
+ spin_lock_irq(&dev->rx_pool_lock);
+ fill_skb_pool(dev);
+ spin_unlock_irq(&dev->rx_pool_lock);
+ if (test_bit(RX_URB_FAIL, &dev->flags))
+ if (dev->rx_skb)
+ goto try_again;
+ spin_lock_irq(&dev->rx_pool_lock);
+ skb = pull_skb(dev);
+ spin_unlock_irq(&dev->rx_pool_lock);
+ if (skb == NULL)
+ goto tlsched;
+ dev->rx_skb = skb;
+ usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
+ dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
+try_again:
+ status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC);
+ if (status == -ENODEV) {
+ netif_device_detach(dev->netdev);
+ } else if (status) {
+ set_bit(RX_URB_FAIL, &dev->flags);
+ goto tlsched;
+ } else {
+ clear_bit(RX_URB_FAIL, &dev->flags);
+ }
+
+ return;
+tlsched:
+ tasklet_schedule(&dev->tl);
+}
+
static int enable_net_traffic(rtl8150_t * dev)
{
u8 cr, tcr, rcr, msr;
@@ -778,7 +758,7 @@ static int rtl8150_open(struct net_device *netdev)
return -ENOMEM;
set_registers(dev, IDR, 6, netdev->dev_addr);
-
+
usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) {
@@ -898,8 +878,8 @@ static const struct net_device_ops rtl8150_netdev_ops = {
.ndo_stop = rtl8150_close,
.ndo_do_ioctl = rtl8150_ioctl,
.ndo_start_xmit = rtl8150_start_xmit,
- .ndo_tx_timeout = rtl8150_tx_timeout,
- .ndo_set_multicast_list = rtl8150_set_multicast,
+ .ndo_tx_timeout = rtl8150_tx_timeout,
+ .ndo_set_rx_mode = rtl8150_set_multicast,
.ndo_set_mac_address = rtl8150_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
@@ -929,7 +909,7 @@ static int rtl8150_probe(struct usb_interface *intf,
tasklet_init(&dev->tl, rx_fixup, (unsigned long)dev);
spin_lock_init(&dev->rx_pool_lock);
-
+
dev->udev = udev;
dev->netdev = netdev;
netdev->netdev_ops = &rtl8150_netdev_ops;
@@ -947,7 +927,7 @@ static int rtl8150_probe(struct usb_interface *intf,
}
fill_skb_pool(dev);
set_ethernet_addr(dev);
-
+
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev);
if (register_netdev(netdev) != 0) {
@@ -989,6 +969,15 @@ static void rtl8150_disconnect(struct usb_interface *intf)
}
}
+static struct usb_driver rtl8150_driver = {
+ .name = driver_name,
+ .probe = rtl8150_probe,
+ .disconnect = rtl8150_disconnect,
+ .id_table = rtl8150_table,
+ .suspend = rtl8150_suspend,
+ .resume = rtl8150_resume
+};
+
static int __init usb_rtl8150_init(void)
{
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 15b3d6888ae..22a7cf951e7 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1000,7 +1000,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc75xx_ioctl,
- .ndo_set_multicast_list = smsc75xx_set_multicast,
+ .ndo_set_rx_mode = smsc75xx_set_multicast,
.ndo_set_features = smsc75xx_set_features,
};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f74f3ce7152..eff67678c5a 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -972,7 +972,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc95xx_ioctl,
- .ndo_set_multicast_list = smsc95xx_set_multicast,
+ .ndo_set_rx_mode = smsc95xx_set_multicast,
.ndo_set_features = smsc95xx_set_features,
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ce395fe5de2..7d6082160bc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -238,6 +238,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof (struct ethhdr), skb->protocol);
memset (skb->cb, 0, sizeof (struct skb_data));
+
+ if (skb_defer_rx_timestamp(skb))
+ return;
+
status = netif_rx (skb);
if (status != NET_RX_SUCCESS)
netif_dbg(dev, rx_err, dev->net,
@@ -1053,6 +1057,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
unsigned long flags;
int retval;
+ skb_tx_timestamp(skb);
+
// some devices want funky USB-level framing, for
// win32 driver (usually) and/or hardware quirks
if (info->tx_fixup) {
@@ -1470,7 +1476,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
- if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) {
+ if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
spin_unlock_irq(&dev->txq.lock);
return -EBUSY;
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0c7321c35ad..91039ab1672 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -143,18 +143,16 @@ static void skb_xmit_done(struct virtqueue *svq)
static void set_skb_frag(struct sk_buff *skb, struct page *page,
unsigned int offset, unsigned int *len)
{
+ int size = min((unsigned)PAGE_SIZE - offset, *len);
int i = skb_shinfo(skb)->nr_frags;
- skb_frag_t *f;
- f = &skb_shinfo(skb)->frags[i];
- f->size = min((unsigned)PAGE_SIZE - offset, *len);
- f->page_offset = offset;
- f->page = page;
+ __skb_fill_page_desc(skb, i, page, offset, size);
- skb->data_len += f->size;
- skb->len += f->size;
+ skb->data_len += size;
+ skb->len += size;
+ skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
- *len -= f->size;
+ *len -= size;
}
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
@@ -195,6 +193,19 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
len -= copy;
offset += copy;
+ /*
+ * Verify that we can indeed put this data into a skb.
+ * This is here to handle cases when the device erroneously
+ * tries to receive more than is possible. This is usually
+ * the case of a broken device.
+ */
+ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
+ if (net_ratelimit())
+ pr_debug("%s: too much data\n", skb->dev->name);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
while (len) {
set_skb_frag(skb, page, offset, &len);
page = (struct page *)page->private;
@@ -277,7 +288,6 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
}
hdr = skb_vnet_hdr(skb);
- skb->truesize += skb->data_len;
u64_stats_update_begin(&stats->syncp);
stats->rx_bytes += skb->len;
@@ -867,8 +877,21 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
}
+static void virtnet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
+ ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+ ring->rx_pending = ring->rx_max_pending;
+ ring->tx_pending = ring->tx_max_pending;
+
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
+ .get_ringparam = virtnet_get_ringparam,
};
#define MIN_MTU 68
@@ -949,6 +972,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Set up network device as normal. */
+ dev->priv_flags |= IFF_UNICAST_FLT;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0959583feb2..b771ebac0f0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -654,10 +654,11 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
- frag->page = rbi->page;
+ __skb_frag_set_page(frag, rbi->page);
frag->page_offset = 0;
- frag->size = rcd->len;
- skb->data_len += frag->size;
+ skb_frag_size_set(frag, rcd->len);
+ skb->data_len += rcd->len;
+ skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
}
@@ -744,21 +745,21 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_PAGE;
- tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
- frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
- tbi->len = frag->size;
+ tbi->len = skb_frag_size(frag);
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
- gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
+ gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
gdesc->dword[3] = 0;
dev_dbg(&adapter->netdev->dev,
@@ -1277,7 +1278,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
- skb->truesize += skb->data_len;
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
@@ -2876,7 +2876,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
.ndo_set_features = vmxnet3_set_features,
.ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout,
- .ndo_set_multicast_list = vmxnet3_set_mc,
+ .ndo_set_rx_mode = vmxnet3_set_mc,
.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 27400edeef5..e662cbc8bfb 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -558,7 +558,7 @@ out:
static int
vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
- void *rules)
+ u32 *rules)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
switch (info->cmd) {
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 055a918067e..0d7645581f9 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -515,37 +515,37 @@ static int ppp_rx(struct sk_buff *skb)
switch (cp->code) {
case CP_CONF_REQ:
ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
- goto out;
+ break;
case CP_CONF_ACK:
if (cp->id == proto->cr_id)
ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
- goto out;
+ break;
case CP_CONF_REJ:
case CP_CONF_NAK:
if (cp->id == proto->cr_id)
ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
- goto out;
+ break;
case CP_TERM_REQ:
ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
- goto out;
+ break;
case CP_TERM_ACK:
ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
- goto out;
+ break;
case CP_CODE_REJ:
ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
- goto out;
+ break;
default:
len += sizeof(struct cp_header);
if (len > dev->mtu)
len = dev->mtu;
ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
- goto out;
+ break;
}
goto out;
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 56aeb011cb3..a49aec5efd2 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -134,15 +134,15 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
static int x25_open(struct net_device *dev)
{
- struct lapb_register_struct cb;
int result;
-
- cb.connect_confirmation = x25_connected;
- cb.connect_indication = x25_connected;
- cb.disconnect_confirmation = x25_disconnected;
- cb.disconnect_indication = x25_disconnected;
- cb.data_indication = x25_data_indication;
- cb.data_transmit = x25_data_transmit;
+ static const struct lapb_register_struct cb = {
+ .connect_confirmation = x25_connected,
+ .connect_indication = x25_connected,
+ .disconnect_confirmation = x25_disconnected,
+ .disconnect_indication = x25_disconnected,
+ .data_indication = x25_data_indication,
+ .data_transmit = x25_data_transmit,
+ };
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index a817081737a..7beeb9b88a3 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -259,14 +259,13 @@ static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
}
-static struct lapb_register_struct lapbeth_callbacks = {
+static const struct lapb_register_struct lapbeth_callbacks = {
.connect_confirmation = lapbeth_connected,
.connect_indication = lapbeth_connected,
.disconnect_confirmation = lapbeth_disconnected,
.disconnect_indication = lapbeth_disconnected,
.data_indication = lapbeth_data_indication,
.data_transmit = lapbeth_data_transmit,
-
};
/*
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 86127bcc9f7..783168cce07 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -212,7 +212,7 @@ static const struct net_device_ops sbni_netdev_ops = {
.ndo_open = sbni_open,
.ndo_stop = sbni_close,
.ndo_start_xmit = sbni_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = sbni_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 46ceb3ae907..8a10bb730d5 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -434,14 +434,13 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
netif_rx(skb);
}
-static struct lapb_register_struct x25_asy_callbacks = {
+static const struct lapb_register_struct x25_asy_callbacks = {
.connect_confirmation = x25_asy_connected,
.connect_indication = x25_asy_connected,
.disconnect_confirmation = x25_asy_disconnected,
.disconnect_indication = x25_asy_disconnected,
.data_indication = x25_asy_data_indication,
.data_transmit = x25_asy_data_transmit,
-
};
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 298f2b0b631..9a644d052f1 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
*
* As well, the device might refuse going to sleep for whichever
* reason. In this case we just fail. For system suspend/hibernate,
- * we *can't* fail. We check PM_EVENT_AUTO to see if the
+ * we *can't* fail. We check PMSG_IS_AUTO to see if the
* suspend call comes from the USB stack or from the system and act
* in consequence.
*
@@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
struct i2400m *i2400m = &i2400mu->i2400m;
#ifdef CONFIG_PM
- if (pm_msg.event & PM_EVENT_AUTO)
+ if (PMSG_IS_AUTO(pm_msg))
is_autosuspend = 1;
#endif
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f354bd4e121..abd3b71cd4a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -271,6 +271,7 @@ config MWL8K
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
+source "drivers/net/wireless/brcm80211/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7bba6a82b87..0a304b060b6 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
-obj-$(CONFIG_IWLAGN) += iwlwifi/
+obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
@@ -58,3 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
obj-$(CONFIG_IWM) += iwmc3200wifi/
obj-$(CONFIG_MWIFIEX) += mwifiex/
+obj-$(CONFIG_BRCMFMAC) += brcm80211/
+obj-$(CONFIG_BRCMUMAC) += brcm80211/
+obj-$(CONFIG_BRCMSMAC) += brcm80211/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 43ebc44fc82..3b752d9fb3c 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1249,7 +1249,8 @@ static int adm8211_hw_reset(struct ieee80211_hw *dev)
return 0;
}
-static u64 adm8211_get_tsft(struct ieee80211_hw *dev)
+static u64 adm8211_get_tsft(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
u32 tsftl;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index e1b3e3c134f..ac1176a4f46 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2754,7 +2754,7 @@ static const struct net_device_ops airo_netdev_ops = {
.ndo_stop = airo_close,
.ndo_start_xmit = airo_start_xmit,
.ndo_get_stats = airo_get_stats,
- .ndo_set_multicast_list = airo_set_multicast_list,
+ .ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
.ndo_do_ioctl = airo_ioctl,
.ndo_change_mtu = airo_change_mtu,
@@ -2766,7 +2766,7 @@ static const struct net_device_ops mpi_netdev_ops = {
.ndo_stop = airo_close,
.ndo_start_xmit = mpi_start_xmit,
.ndo_get_stats = airo_get_stats,
- .ndo_set_multicast_list = airo_set_multicast_list,
+ .ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
.ndo_do_ioctl = airo_ioctl,
.ndo_change_mtu = airo_change_mtu,
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 298601436ee..39322d4121b 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -500,10 +500,9 @@ exit:
#define HEX2STR_BUFFERS 4
#define HEX2STR_MAX_LEN 64
-#define BIN2HEX(x) ((x) < 10 ? '0' + (x) : (x) + 'A' - 10)
/* Convert binary data into hex string */
-static char *hex2str(void *buf, int len)
+static char *hex2str(void *buf, size_t len)
{
static atomic_t a = ATOMIC_INIT(0);
static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1];
@@ -514,18 +513,17 @@ static char *hex2str(void *buf, int len)
if (len > HEX2STR_MAX_LEN)
len = HEX2STR_MAX_LEN;
- if (len <= 0) {
- ret[0] = '\0';
- return ret;
- }
+ if (len == 0)
+ goto exit;
while (len--) {
- *obuf++ = BIN2HEX(*ibuf >> 4);
- *obuf++ = BIN2HEX(*ibuf & 0xf);
+ obuf = pack_hex_byte(obuf, *ibuf++);
*obuf++ = '-';
- ibuf++;
}
- *(--obuf) = '\0';
+ obuf--;
+
+exit:
+ *obuf = '\0';
return ret;
}
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d1b23067619..07354883641 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,5 +25,6 @@ config ATH_DEBUG
source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
+source "drivers/net/wireless/ath/ath6kl/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 0e8f528c81c..d1214696a35 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
+obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 17c4b56c387..908fdbc3e0e 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -71,9 +71,7 @@ struct ath_regulatory {
char alpha2[2];
u16 country_code;
u16 max_power_level;
- u32 tp_scale;
u16 current_rd;
- u16 current_rd_ext;
int16_t power_limit;
struct reg_dmn_pair_mapping *regpair;
};
@@ -140,9 +138,6 @@ struct ath_common {
u8 curbssid[ETH_ALEN];
u8 bssidmask[ETH_ALEN];
- u8 tx_chainmask;
- u8 rx_chainmask;
-
u32 rx_bufsize;
u32 keymax;
@@ -178,23 +173,29 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
-extern __attribute__ ((format (printf, 3, 4))) int
-ath_printk(const char *level, struct ath_common *common, const char *fmt, ...);
+extern __attribute__((format (printf, 2, 3)))
+void ath_printk(const char *level, const char *fmt, ...);
+
+#define _ath_printk(level, common, fmt, ...) \
+do { \
+ __always_unused struct ath_common *unused = common; \
+ ath_printk(level, fmt, ##__VA_ARGS__); \
+} while (0)
#define ath_emerg(common, fmt, ...) \
- ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
#define ath_alert(common, fmt, ...) \
- ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
#define ath_crit(common, fmt, ...) \
- ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
#define ath_err(common, fmt, ...) \
- ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
#define ath_warn(common, fmt, ...) \
- ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
#define ath_notice(common, fmt, ...) \
- ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
#define ath_info(common, fmt, ...) \
- ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
+ _ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
/**
* enum ath_debug_level - atheros wireless debug level
@@ -246,27 +247,21 @@ enum ATH_DEBUG {
#ifdef CONFIG_ATH_DEBUG
-#define ath_dbg(common, dbg_mask, fmt, ...) \
-({ \
- int rtn; \
- if ((common)->debug_mask & dbg_mask) \
- rtn = ath_printk(KERN_DEBUG, common, fmt, \
- ##__VA_ARGS__); \
- else \
- rtn = 0; \
- \
- rtn; \
-})
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+do { \
+ if ((common)->debug_mask & dbg_mask) \
+ _ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
+} while (0)
+
#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
#else
-static inline __attribute__ ((format (printf, 3, 4))) int
-ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
- const char *fmt, ...)
+static inline __attribute__((format (printf, 3, 4)))
+void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+ const char *fmt, ...)
{
- return 0;
}
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index a2a167363db..e5be7e70181 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -169,7 +169,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
__set_bit(ATH_STAT_2G_DISABLED, ah->status);
}
- ret = ath5k_init_softc(ah, &ath_ahb_bus_ops);
+ ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
if (ret != 0) {
dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
ret = -ENODEV;
@@ -214,7 +214,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
__raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
}
- ath5k_deinit_softc(ah);
+ ath5k_deinit_ah(ah);
platform_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 603ae15f139..bea90e6be70 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -15,7 +15,6 @@
*/
#include "ath5k.h"
-#include "base.h"
#include "reg.h"
#include "debug.h"
#include "ani.h"
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 03401539709..7358b6c83c6 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -16,6 +16,10 @@
#ifndef ANI_H
#define ANI_H
+#include "../ath.h"
+
+enum ath5k_phy_error_code;
+
/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
#define ATH5K_ANI_LISTEN_PERIOD 100
#define ATH5K_ANI_OFDM_TRIG_HIGH 500
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 277d5cbe006..fecbcd9a425 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -131,13 +131,6 @@
#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \
ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
-/* Access to PHY registers */
-#define AR5K_PHY_READ(ah, _reg) \
- ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
-
-#define AR5K_PHY_WRITE(ah, _reg, _val) \
- ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
-
/* Access QCU registers per queue */
#define AR5K_REG_READ_Q(ah, _reg, _queue) \
(ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \
@@ -166,7 +159,6 @@
#define AR5K_TUNE_DMA_BEACON_RESP 2
#define AR5K_TUNE_SW_BEACON_RESP 10
#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0
-#define AR5K_TUNE_RADAR_ALERT false
#define AR5K_TUNE_MIN_TX_FIFO_THRES 1
#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_FRAME_LEN / 64) + 1)
#define AR5K_TUNE_REGISTER_TIMEOUT 20000
@@ -295,17 +287,6 @@ enum ath5k_radio {
* Common silicon revision/version values
*/
-enum ath5k_srev_type {
- AR5K_VERSION_MAC,
- AR5K_VERSION_RAD,
-};
-
-struct ath5k_srev_name {
- const char *sr_name;
- enum ath5k_srev_type sr_type;
- u_int sr_val;
-};
-
#define AR5K_SREV_UNKNOWN 0xffff
#define AR5K_SREV_AR5210 0x00 /* Crete */
@@ -424,7 +405,6 @@ enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
AR5K_MODE_11B = 1,
AR5K_MODE_11G = 2,
- AR5K_MODE_XR = 0,
AR5K_MODE_MAX = 3
};
@@ -694,33 +674,6 @@ struct ath5k_gain {
#define AR5K_SLOT_TIME_20 880
#define AR5K_SLOT_TIME_MAX 0xffff
-/* channel_flags */
-#define CHANNEL_CW_INT 0x0008 /* Contention Window interference detected */
-#define CHANNEL_CCK 0x0020 /* CCK channel */
-#define CHANNEL_OFDM 0x0040 /* OFDM channel */
-#define CHANNEL_2GHZ 0x0080 /* 2GHz channel. */
-#define CHANNEL_5GHZ 0x0100 /* 5GHz channel */
-#define CHANNEL_PASSIVE 0x0200 /* Only passive scan allowed */
-#define CHANNEL_DYN 0x0400 /* Dynamic CCK-OFDM channel (for g operation) */
-#define CHANNEL_XR 0x0800 /* XR channel */
-
-#define CHANNEL_A (CHANNEL_5GHZ | CHANNEL_OFDM)
-#define CHANNEL_B (CHANNEL_2GHZ | CHANNEL_CCK)
-#define CHANNEL_G (CHANNEL_2GHZ | CHANNEL_OFDM)
-#define CHANNEL_X (CHANNEL_5GHZ | CHANNEL_OFDM | CHANNEL_XR)
-
-#define CHANNEL_ALL (CHANNEL_OFDM | CHANNEL_CCK | \
- CHANNEL_2GHZ | CHANNEL_5GHZ)
-
-#define CHANNEL_MODES CHANNEL_ALL
-
-/*
- * Used internally for ath5k_hw_reset_tx_queue().
- * Also see struct struct ieee80211_channel.
- */
-#define IS_CHAN_XR(_c) ((_c->hw_value & CHANNEL_XR) != 0)
-#define IS_CHAN_B(_c) ((_c->hw_value & CHANNEL_B) != 0)
-
/*
* The following structure is used to map 2GHz channels to
* 5GHz Atheros channels.
@@ -977,7 +930,7 @@ enum ath5k_power_mode {
struct ath5k_capabilities {
/*
* Supported PHY modes
- * (ie. CHANNEL_A, CHANNEL_B, ...)
+ * (ie. AR5K_MODE_11A, AR5K_MODE_11B, ...)
*/
DECLARE_BITMAP(cap_mode, AR5K_MODE_MAX);
@@ -1013,16 +966,6 @@ struct ath5k_nfcal_hist {
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
-/**
- * struct avg_val - Helper structure for average calculation
- * @avg: contains the actual average value
- * @avg_weight: is used internally during calculation to prevent rounding errors
- */
-struct ath5k_avg_val {
- int avg;
- int avg_weight;
-};
-
#define ATH5K_LED_MAX_NAME_LEN 31
/*
@@ -1148,7 +1091,6 @@ struct ath5k_hw {
bool rx_pending; /* rx tasklet pending */
bool tx_pending; /* tx tasklet pending */
- u8 lladdr[ETH_ALEN];
u8 bssidmask[ETH_ALEN];
unsigned int led_pin, /* GPIO pin for driving LED */
@@ -1156,7 +1098,6 @@ struct ath5k_hw {
struct work_struct reset_work; /* deferred chip reset */
- unsigned int rxbufsize; /* rx size based on mtu */
struct list_head rxbuf; /* receive buffer */
spinlock_t rxbuflock;
u32 *rxlink; /* link ptr in last RX desc */
@@ -1208,10 +1149,8 @@ struct ath5k_hw {
enum ath5k_version ah_version;
enum ath5k_radio ah_radio;
- u32 ah_phy;
u32 ah_mac_srev;
u16 ah_mac_version;
- u16 ah_mac_revision;
u16 ah_phy_revision;
u16 ah_radio_5ghz_revision;
u16 ah_radio_2ghz_revision;
@@ -1279,12 +1218,6 @@ struct ath5k_hw {
bool txp_setup;
} ah_txpower;
- struct {
- bool r_enabled;
- int r_last_alert;
- struct ieee80211_channel r_last_channel;
- } ah_radar;
-
struct ath5k_nfcal_hist ah_nfcal_hist;
/* average beacon RSSI in our BSS (used by ANI) */
@@ -1327,36 +1260,13 @@ struct ath_bus_ops {
extern const struct ieee80211_ops ath5k_hw_ops;
/* Initialization and detach functions */
-int ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
-void ath5k_deinit_softc(struct ath5k_hw *ah);
int ath5k_hw_init(struct ath5k_hw *ah);
void ath5k_hw_deinit(struct ath5k_hw *ah);
int ath5k_sysfs_register(struct ath5k_hw *ah);
void ath5k_sysfs_unregister(struct ath5k_hw *ah);
-/* base.c */
-struct ath5k_buf;
-struct ath5k_txq;
-
-void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
-bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
-void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath5k_txq *txq);
-int ath5k_start(struct ieee80211_hw *hw);
-void ath5k_stop(struct ieee80211_hw *hw);
-void ath5k_mode_setup(struct ath5k_hw *ah, struct ieee80211_vif *vif);
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
- struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
-void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
-int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void ath5k_beacon_config(struct ath5k_hw *ah);
-void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
-void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
-
/*Chip id helper functions */
-const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
int ath5k_hw_read_srev(struct ath5k_hw *ah);
/* LED functions */
@@ -1367,7 +1277,7 @@ void ath5k_unregister_leds(struct ath5k_hw *ah);
/* Reset Functions */
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel);
int ath5k_hw_on_hold(struct ath5k_hw *ah);
int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu);
@@ -1487,13 +1397,13 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* PHY functions */
/* Misc PHY functions */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band);
int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Gain_F optimization */
enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
/* PHY/RF channel functions */
-bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
+bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel);
/* PHY calibration */
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index f8a6b380d96..91627dd2c26 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -25,7 +25,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/**
* ath5k_hw_post - Power On Self Test helper function
@@ -95,7 +94,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
/**
* ath5k_hw_init - Check if hw is supported and init the needed structs
*
- * @ah: The &struct ath5k_hw we got from the driver's init_softc function
+ * @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
@@ -114,7 +113,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
/*
* HW information
*/
- ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_imr = 0;
@@ -137,9 +135,8 @@ int ath5k_hw_init(struct ath5k_hw *ah)
else
ah->ah_version = AR5K_AR5212;
- /* Get the MAC revision */
+ /* Get the MAC version */
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
- ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
/* Fill the ath5k_hw struct with the needed functions */
ret = ath5k_hw_init_desc_functions(ah);
@@ -147,7 +144,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
goto err;
/* Bring device out of sleep and reset its units */
- ret = ath5k_hw_nic_wakeup(ah, 0, true);
+ ret = ath5k_hw_nic_wakeup(ah, NULL);
if (ret)
goto err;
@@ -155,8 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
- CHANNEL_5GHZ);
- ah->ah_phy = AR5K_PHY(0);
+ IEEE80211_BAND_5GHZ);
/* Try to identify radio chip based on its srev */
switch (ah->ah_radio_5ghz_revision & 0xf0) {
@@ -164,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- CHANNEL_2GHZ);
+ IEEE80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_5112:
case AR5K_SREV_RAD_2112:
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- CHANNEL_2GHZ);
+ IEEE80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_2413:
ah->ah_radio = AR5K_RF2413;
@@ -208,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- CHANNEL_2GHZ);
+ IEEE80211_BAND_2GHZ);
} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index c3119a6caac..b346d049200 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -52,6 +52,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
+#include <linux/nl80211.h>
#include <net/ieee80211_radiotap.h>
@@ -61,6 +62,8 @@
#include "reg.h"
#include "debug.h"
#include "ani.h"
+#include "ath5k.h"
+#include "../regd.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -272,20 +275,18 @@ static unsigned int
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
unsigned int mode, unsigned int max)
{
- unsigned int count, size, chfreq, freq, ch;
+ unsigned int count, size, freq, ch;
enum ieee80211_band band;
switch (mode) {
case AR5K_MODE_11A:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
size = 220;
- chfreq = CHANNEL_5GHZ;
band = IEEE80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
size = 26;
- chfreq = CHANNEL_2GHZ;
band = IEEE80211_BAND_2GHZ;
break;
default:
@@ -300,26 +301,19 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
if (freq == 0) /* mapping failed - not a standard channel */
continue;
+ /* Write channel info, needed for ath5k_channel_ok() */
+ channels[count].center_freq = freq;
+ channels[count].band = band;
+ channels[count].hw_value = mode;
+
/* Check if channel is supported by the chipset */
- if (!ath5k_channel_ok(ah, freq, chfreq))
+ if (!ath5k_channel_ok(ah, &channels[count]))
continue;
if (!modparam_all_channels &&
!ath5k_is_standard_channel(ch, band))
continue;
- /* Write channel info and increment counter */
- channels[count].center_freq = freq;
- channels[count].band = band;
- switch (mode) {
- case AR5K_MODE_11A:
- case AR5K_MODE_11G:
- channels[count].hw_value = chfreq | CHANNEL_OFDM;
- break;
- case AR5K_MODE_11B:
- channels[count].hw_value = CHANNEL_B;
- }
-
count++;
}
@@ -927,12 +921,6 @@ ath5k_txq_setup(struct ath5k_hw *ah,
*/
return ERR_PTR(qnum);
}
- if (qnum >= ARRAY_SIZE(ah->txqs)) {
- ATH5K_ERR(ah, "hw qnum %u out of range, max %tu!\n",
- qnum, ARRAY_SIZE(ah->txqs));
- ath5k_hw_release_tx_queue(ah, qnum);
- return ERR_PTR(-EINVAL);
- }
txq = &ah->txqs[qnum];
if (!txq->setup) {
txq->qnum = qnum;
@@ -2349,7 +2337,7 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
\*************************/
int __devinit
-ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
+ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
struct ieee80211_hw *hw = ah->hw;
struct ath_common *common;
@@ -2867,7 +2855,6 @@ ath5k_init(struct ieee80211_hw *hw)
}
SET_IEEE80211_PERM_ADDR(hw, mac);
- memcpy(&ah->lladdr, mac, ETH_ALEN);
/* All MAC address bits matter for ACKs */
ath5k_update_bssid_mask_and_opmode(ah, NULL);
@@ -2903,7 +2890,7 @@ err:
}
void
-ath5k_deinit_softc(struct ath5k_hw *ah)
+ath5k_deinit_ah(struct ath5k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index a81f28d5bdd..6c94c7ff235 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -38,19 +38,27 @@
/*
* Definitions for the Atheros Wireless LAN controller driver.
*/
-#ifndef _DEV_ATH_ATHVAR_H
-#define _DEV_ATH_ATHVAR_H
+#ifndef _DEV_ATH5K_BASE_H
+#define _DEV_ATH5K_BASE_H
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/wireless.h>
-#include <linux/if_ether.h>
-#include <linux/rfkill.h>
-#include <linux/workqueue.h>
+struct ieee80211_vif;
+struct ieee80211_hw;
+struct ath5k_hw;
+struct ath5k_txq;
+struct ieee80211_channel;
+struct ath_bus_ops;
+enum nl80211_iftype;
-#include "ath5k.h"
-#include "../regd.h"
-#include "../ath.h"
+enum ath5k_srev_type {
+ AR5K_VERSION_MAC,
+ AR5K_VERSION_RAD,
+};
+
+struct ath5k_srev_name {
+ const char *sr_name;
+ enum ath5k_srev_type sr_type;
+ u_int sr_val;
+};
struct ath5k_buf {
struct list_head list;
@@ -65,7 +73,6 @@ struct ath5k_vif {
enum nl80211_iftype opmode;
int bslot;
struct ath5k_buf *bbuf; /* beacon buffer */
- u8 lladdr[ETH_ALEN];
};
struct ath5k_vif_iter_data {
@@ -78,8 +85,30 @@ struct ath5k_vif_iter_data {
enum nl80211_iftype opmode;
int n_stas;
};
+
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
+bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
+
+int ath5k_start(struct ieee80211_hw *hw);
+void ath5k_stop(struct ieee80211_hw *hw);
+
+void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_config(struct ath5k_hw *ah);
+void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
+ struct ieee80211_vif *vif);
+int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
+void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
+void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath5k_txq *txq);
+
+const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
+int ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
+void ath5k_deinit_ah(struct ath5k_hw *ah);
/* Check whether BSSID mask is supported */
#define ath5k_hw_hasbssidmask(_ah) (ah->ah_version == AR5K_AR5212)
@@ -87,4 +116,4 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
/* Check whether virtual EOL is supported */
#define ath5k_hw_hasveol(_ah) (ah->ah_version != AR5K_AR5210)
-#endif
+#endif /* _DEV_ATH5K_BASE_H */
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index eefe670e28a..810fba96702 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -24,7 +24,7 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
+#include "../regd.h"
/*
* Fill the capabilities struct
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index ccca724de17..fce8c904eea 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -58,19 +58,18 @@
* THE POSSIBILITY OF SUCH DAMAGES.
*/
-#include "base.h"
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
#include "debug.h"
+#include "ath5k.h"
+#include "reg.h"
+#include "base.h"
static unsigned int ath5k_debug;
module_param_named(debug, ath5k_debug, uint, 0);
-#ifdef CONFIG_ATH5K_DEBUG
-
-#include <linux/seq_file.h>
-#include "reg.h"
-#include "ani.h"
-
static int ath5k_debugfs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
@@ -1031,5 +1030,3 @@ ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
td->tx_stat.tx_status_0, td->tx_stat.tx_status_1,
done ? ' ' : (ts.ts_status == 0) ? '*' : '!');
}
-
-#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 846535f59ef..7e88dda8222 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -24,7 +24,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/************************\
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 0d5d4033f12..2481f9c7f4b 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -35,7 +35,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/*********\
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 9068b916526..cd708c15b77 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -26,7 +26,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/******************\
@@ -1780,13 +1779,12 @@ ath5k_eeprom_detach(struct ath5k_hw *ah)
int
ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
{
- switch (channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
- case CHANNEL_XR:
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
return AR5K_EEPROM_MODE_11A;
- case CHANNEL_G:
+ case AR5K_MODE_11G:
return AR5K_EEPROM_MODE_11G;
- case CHANNEL_B:
+ case AR5K_MODE_11B:
return AR5K_EEPROM_MODE_11B;
default:
return -1;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index bc90503f4b7..85929781191 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -23,7 +23,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/*
* Set led state
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 5ab607f40e0..1ffecc0fd3e 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -22,7 +22,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/*
* Mode-independent initial register writes
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 8c17a00f7da..c1151c72371 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -41,7 +41,6 @@
#include <linux/pci.h>
#include "ath5k.h"
-#include "base.h"
#define ATH_SDEVICE(subv, subd) \
.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 2a715ca0c5e..6ed4c0717e3 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -41,8 +41,10 @@
*
*/
+#include <net/mac80211.h>
#include <asm/unaligned.h>
+#include "ath5k.h"
#include "base.h"
#include "reg.h"
@@ -137,11 +139,8 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
/* Any MAC address is fine, all others are included through the
* filter.
*/
- memcpy(&ah->lladdr, vif->addr, ETH_ALEN);
ath5k_hw_set_lladdr(ah, vif->addr);
- memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
-
ath5k_update_bssid_mask_and_opmode(ah, vif);
ret = 0;
end:
@@ -564,7 +563,7 @@ ath5k_get_stats(struct ieee80211_hw *hw,
static int
-ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath5k_hw *ah = hw->priv;
@@ -603,7 +602,7 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
static u64
-ath5k_get_tsf(struct ieee80211_hw *hw)
+ath5k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
@@ -612,7 +611,7 @@ ath5k_get_tsf(struct ieee80211_hw *hw)
static void
-ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+ath5k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf)
{
struct ath5k_hw *ah = hw->priv;
@@ -621,7 +620,7 @@ ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
static void
-ath5k_reset_tsf(struct ieee80211_hw *hw)
+ath5k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index eaf79b49341..c1dff2ced04 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -261,7 +261,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
ah->iobase = mem; /* So we can unmap it on detach */
/* Initialize */
- ret = ath5k_init_softc(ah, &ath_pci_bus_ops);
+ ret = ath5k_init_ah(ah, &ath_pci_bus_ops);
if (ret)
goto err_free;
@@ -287,7 +287,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
- ath5k_deinit_softc(ah);
+ ath5k_deinit_ah(ah);
pci_iounmap(pdev, ah->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 06731384506..a7eafa3edc2 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -29,7 +29,6 @@
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/*
* AR5212+ can use higher rates for ack transmission
@@ -152,7 +151,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
case AR5K_BWMODE_DEFAULT:
default:
slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
- if ((channel->hw_value & CHANNEL_CCK) && !ah->ah_short_slot)
+ if ((channel->hw_value == AR5K_MODE_11B) && !ah->ah_short_slot)
slot_time = AR5K_INIT_SLOT_TIME_B;
break;
}
@@ -183,7 +182,7 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
case AR5K_BWMODE_DEFAULT:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
- if (channel->hw_value & CHANNEL_5GHZ)
+ if (channel->band == IEEE80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
}
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 81e465e7017..01cb72de44c 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -26,9 +26,9 @@
#include "ath5k.h"
#include "reg.h"
-#include "base.h"
#include "rfbuffer.h"
#include "rfgain.h"
+#include "../regd.h"
/******************\
@@ -38,7 +38,7 @@
/*
* Get the PHY Chip revision
*/
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
{
unsigned int i;
u32 srev;
@@ -47,11 +47,11 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
/*
* Set the radio chip access register
*/
- switch (chan) {
- case CHANNEL_2GHZ:
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
break;
- case CHANNEL_5GHZ:
+ case IEEE80211_BAND_5GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
break;
default:
@@ -84,14 +84,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
/*
* Check if a channel is supported
*/
-bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags)
+bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
+ u16 freq = channel->center_freq;
+
/* Check if the channel is in our supported range */
- if (flags & CHANNEL_2GHZ) {
+ if (channel->band == IEEE80211_BAND_2GHZ) {
if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
return true;
- } else if (flags & CHANNEL_5GHZ)
+ } else if (channel->band == IEEE80211_BAND_5GHZ)
if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
return true;
@@ -224,7 +226,7 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
ds_coef_exp, ds_coef_man, clock;
BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
- !(channel->hw_value & CHANNEL_OFDM));
+ (channel->hw_value == AR5K_MODE_11B));
/* Get coefficient
* ALGO: coef = (5 * clock / carrier_freq) / 2
@@ -298,7 +300,7 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
u32 delay;
delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
+ delay = (channel->hw_value == AR5K_MODE_11B) ?
((delay << 2) / 22) : (delay / 10);
if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
delay = delay << 1;
@@ -798,9 +800,9 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
/* Set Output and Driver bias current (OB/DB) */
- if (channel->hw_value & CHANNEL_2GHZ) {
+ if (channel->band == IEEE80211_BAND_2GHZ) {
- if (channel->hw_value & CHANNEL_CCK)
+ if (channel->hw_value == AR5K_MODE_11B)
ee_mode = AR5K_EEPROM_MODE_11B;
else
ee_mode = AR5K_EEPROM_MODE_11G;
@@ -825,7 +827,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
AR5K_RF_DB_2GHZ, true);
/* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
- } else if ((channel->hw_value & CHANNEL_5GHZ) ||
+ } else if ((channel->band == IEEE80211_BAND_5GHZ) ||
(ah->ah_radio == AR5K_RF5111)) {
/* For 11a, Turbo and XR we need to choose
@@ -857,7 +859,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
if (ah->ah_radio == AR5K_RF5111) {
/* Set gain_F settings according to current step */
- if (channel->hw_value & CHANNEL_OFDM) {
+ if (channel->hw_value != AR5K_MODE_11B) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
AR5K_PHY_FRAME_CTL_TX_CLIP,
@@ -914,7 +916,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
if (ah->ah_radio == AR5K_RF5112) {
/* Set gain_F settings according to current step */
- if (channel->hw_value & CHANNEL_OFDM) {
+ if (channel->hw_value != AR5K_MODE_11B) {
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[0],
AR5K_RF_MIXGAIN_OVR, true);
@@ -1026,7 +1028,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
if (ah->ah_radio == AR5K_RF5413 &&
- channel->hw_value & CHANNEL_2GHZ) {
+ channel->band == IEEE80211_BAND_2GHZ) {
ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
true);
@@ -1138,7 +1140,7 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
*/
data0 = data1 = 0;
- if (channel->hw_value & CHANNEL_2GHZ) {
+ if (channel->band == IEEE80211_BAND_2GHZ) {
/* Map 2GHz channel to 5GHz Atheros channel ID */
ret = ath5k_hw_rf5111_chan2athchan(
ieee80211_frequency_to_channel(channel->center_freq),
@@ -1265,10 +1267,9 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
int ret;
/*
* Check bounds supported by the PHY (we don't care about regulatory
- * restrictions at this point). Note: hw_value already has the band
- * (CHANNEL_2GHZ, or CHANNEL_5GHZ) so we inform ath5k_channel_ok()
- * of the band by that */
- if (!ath5k_channel_ok(ah, channel->center_freq, channel->hw_value)) {
+ * restrictions at this point).
+ */
+ if (!ath5k_channel_ok(ah, channel)) {
ATH5K_ERR(ah,
"channel frequency (%u MHz) out of supported "
"band range\n",
@@ -1614,7 +1615,7 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
ret = ath5k_hw_rf511x_iq_calibrate(ah);
if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
- (channel->hw_value & CHANNEL_OFDM))
+ (channel->hw_value != AR5K_MODE_11B))
ath5k_hw_request_rfgain_probe(ah);
return ret;
@@ -1641,7 +1642,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
/* Convert current frequency to fbin value (the same way channels
* are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
* up by 2 so we can compare it later */
- if (channel->hw_value & CHANNEL_2GHZ) {
+ if (channel->band == IEEE80211_BAND_2GHZ) {
chan_fbin = (channel->center_freq - 2300) * 10;
freq_band = AR5K_EEPROM_BAND_2GHZ;
} else {
@@ -1703,7 +1704,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
spur_freq_sigma_delta = (spur_delta_phase >> 10);
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
default:
- if (channel->hw_value == CHANNEL_A) {
+ if (channel->band == IEEE80211_BAND_5GHZ) {
/* Both sample_freq and chip_freq are 40MHz */
spur_delta_phase = (spur_offset << 17) / 25;
spur_freq_sigma_delta =
@@ -2226,15 +2227,20 @@ ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
idx_l = 0;
idx_r = 0;
- if (!(channel->hw_value & CHANNEL_OFDM)) {
+ switch (channel->hw_value) {
+ case AR5K_EEPROM_MODE_11A:
+ pcinfo = ee->ee_pwr_cal_a;
+ mode = AR5K_EEPROM_MODE_11A;
+ break;
+ case AR5K_EEPROM_MODE_11B:
pcinfo = ee->ee_pwr_cal_b;
mode = AR5K_EEPROM_MODE_11B;
- } else if (channel->hw_value & CHANNEL_2GHZ) {
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ default:
pcinfo = ee->ee_pwr_cal_g;
mode = AR5K_EEPROM_MODE_11G;
- } else {
- pcinfo = ee->ee_pwr_cal_a;
- mode = AR5K_EEPROM_MODE_11A;
+ break;
}
max = ee->ee_n_piers[mode] - 1;
@@ -2303,15 +2309,20 @@ ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
idx_l = 0;
idx_r = 0;
- if (!(channel->hw_value & CHANNEL_OFDM)) {
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
+ rpinfo = ee->ee_rate_tpwr_a;
+ mode = AR5K_EEPROM_MODE_11A;
+ break;
+ case AR5K_MODE_11B:
rpinfo = ee->ee_rate_tpwr_b;
mode = AR5K_EEPROM_MODE_11B;
- } else if (channel->hw_value & CHANNEL_2GHZ) {
+ break;
+ case AR5K_MODE_11G:
+ default:
rpinfo = ee->ee_rate_tpwr_g;
mode = AR5K_EEPROM_MODE_11G;
- } else {
- rpinfo = ee->ee_rate_tpwr_a;
- mode = AR5K_EEPROM_MODE_11A;
+ break;
}
max = ee->ee_rate_target_pwr_num[mode] - 1;
@@ -2392,24 +2403,22 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
ctl_mode = ath_regd_get_band_ctl(regulatory, channel->band);
- switch (channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
ctl_mode |= AR5K_CTL_TURBO;
else
ctl_mode |= AR5K_CTL_11A;
break;
- case CHANNEL_G:
+ case AR5K_MODE_11G:
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
ctl_mode |= AR5K_CTL_TURBOG;
else
ctl_mode |= AR5K_CTL_11G;
break;
- case CHANNEL_B:
+ case AR5K_MODE_11B:
ctl_mode |= AR5K_CTL_11B;
break;
- case CHANNEL_XR:
- /* Fall through */
default:
return;
}
@@ -3292,7 +3301,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* Write OFDM timings on 5212*/
if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
+ channel->hw_value != AR5K_MODE_11B) {
ret = ath5k_hw_write_ofdm_timings(ah, channel);
if (ret)
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 65f10398999..776654228ea 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -23,7 +23,6 @@ Queue Control Unit, DFS Control Unit Functions
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
/******************\
@@ -185,13 +184,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
case AR5K_TX_QUEUE_CAB:
queue = AR5K_TX_QUEUE_ID_CAB;
break;
- case AR5K_TX_QUEUE_XR_DATA:
- if (ah->ah_version != AR5K_AR5212)
- ATH5K_ERR(ah,
- "XR data queues only supported in"
- " 5212!\n");
- queue = AR5K_TX_QUEUE_ID_XR_DATA;
- break;
default:
return -EINVAL;
}
@@ -544,7 +536,7 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
*
* Also we have different lowest rate for 802.11a
*/
- if (channel->hw_value & CHANNEL_5GHZ)
+ if (channel->band == IEEE80211_BAND_5GHZ)
rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
else
rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 0686c5d8d56..2abac257b4b 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -30,7 +30,6 @@
#include <linux/platform_device.h>
#include "ath5k.h"
#include "reg.h"
-#include "base.h"
#include "debug.h"
@@ -102,12 +101,18 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
/*
* Set core clock frequency
*/
- if (channel->hw_value & CHANNEL_5GHZ)
- clock = 40; /* 802.11a */
- else if (channel->hw_value & CHANNEL_CCK)
- clock = 22; /* 802.11b */
- else
- clock = 44; /* 802.11g */
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
+ clock = 40;
+ break;
+ case AR5K_MODE_11B:
+ clock = 22;
+ break;
+ case AR5K_MODE_11G:
+ default:
+ clock = 44;
+ break;
+ }
/* Use clock multiplier for non-default
* bwmode */
@@ -581,8 +586,9 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
/*
* Bring up MAC + PHY Chips and program PLL
+ * Channel is NULL for the initial wakeup.
*/
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
@@ -592,7 +598,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
mode = 0;
clock = 0;
- if ((ath5k_get_bus_type(ah) != ATH_AHB) || !initial) {
+ if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
@@ -652,7 +658,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
/* On initialization skip PLL programming since we don't have
* a channel / mode set yet */
- if (initial)
+ if (!channel)
return 0;
if (ah->ah_version != AR5K_AR5210) {
@@ -668,13 +674,13 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
clock = AR5K_PHY_PLL_RF5111; /*Zero*/
}
- if (flags & CHANNEL_2GHZ) {
+ if (channel->band == IEEE80211_BAND_2GHZ) {
mode |= AR5K_PHY_MODE_FREQ_2GHZ;
clock |= AR5K_PHY_PLL_44MHZ;
- if (flags & CHANNEL_CCK) {
+ if (channel->hw_value == AR5K_MODE_11B) {
mode |= AR5K_PHY_MODE_MOD_CCK;
- } else if (flags & CHANNEL_OFDM) {
+ } else {
/* XXX Dynamic OFDM/CCK is not supported by the
* AR5211 so we set MOD_OFDM for plain g (no
* CCK headers) operation. We need to test
@@ -686,27 +692,16 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
mode |= AR5K_PHY_MODE_MOD_OFDM;
else
mode |= AR5K_PHY_MODE_MOD_DYN;
- } else {
- ATH5K_ERR(ah,
- "invalid radio modulation mode\n");
- return -EINVAL;
}
- } else if (flags & CHANNEL_5GHZ) {
- mode |= AR5K_PHY_MODE_FREQ_5GHZ;
+ } else if (channel->band == IEEE80211_BAND_5GHZ) {
+ mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
+ AR5K_PHY_MODE_MOD_OFDM);
/* Different PLL setting for 5413 */
if (ah->ah_radio == AR5K_RF5413)
clock = AR5K_PHY_PLL_40MHZ_5413;
else
clock |= AR5K_PHY_PLL_40MHZ;
-
- if (flags & CHANNEL_OFDM)
- mode |= AR5K_PHY_MODE_MOD_OFDM;
- else {
- ATH5K_ERR(ah,
- "invalid radio modulation mode\n");
- return -EINVAL;
- }
} else {
ATH5K_ERR(ah, "invalid radio frequency mode\n");
return -EINVAL;
@@ -822,7 +817,7 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
u32 data;
ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
AR5K_PHY_CCKTXCTL);
- if (channel->hw_value & CHANNEL_5GHZ)
+ if (channel->band == IEEE80211_BAND_5GHZ)
data = 0xffb81020;
else
data = 0xffb80d20;
@@ -905,7 +900,7 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
/* Set CCK to OFDM power delta on tx power
* adjustment register */
if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
- if (channel->hw_value == CHANNEL_G)
+ if (channel->hw_value == AR5K_MODE_11G)
ath5k_hw_reg_write(ah,
AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1),
AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
@@ -1084,37 +1079,23 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ret = 0;
}
- switch (channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
- mode = AR5K_MODE_11A;
+ mode = channel->hw_value;
+ switch (mode) {
+ case AR5K_MODE_11A:
break;
- case CHANNEL_G:
-
+ case AR5K_MODE_11G:
if (ah->ah_version <= AR5K_AR5211) {
ATH5K_ERR(ah,
"G mode not available on 5210/5211");
return -EINVAL;
}
-
- mode = AR5K_MODE_11G;
break;
- case CHANNEL_B:
-
+ case AR5K_MODE_11B:
if (ah->ah_version < AR5K_AR5211) {
ATH5K_ERR(ah,
"B mode not available on 5210");
return -EINVAL;
}
-
- mode = AR5K_MODE_11B;
- break;
- case CHANNEL_XR:
- if (ah->ah_version == AR5K_AR5211) {
- ATH5K_ERR(ah,
- "XR mode not available on 5211");
- return -EINVAL;
- }
- mode = AR5K_MODE_XR;
break;
default:
ATH5K_ERR(ah,
@@ -1200,7 +1181,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
}
/* Wakeup the device */
- ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
+ ret = ath5k_hw_nic_wakeup(ah, channel);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
index 945fc9f21e7..270a319f3ae 100644
--- a/drivers/net/wireless/ath/ath5k/rfkill.c
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -33,7 +33,7 @@
* THE POSSIBILITY OF SUCH DAMAGES.
*/
-#include "base.h"
+#include "ath5k.h"
static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 0244a36ba95..9364da7bd13 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -1,7 +1,6 @@
#include <linux/device.h>
#include <linux/pci.h>
-#include "base.h"
#include "ath5k.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index c741c871f4e..39f002ed4a8 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -2,7 +2,6 @@
#define __TRACE_ATH5K_H
#include <linux/tracepoint.h>
-#include "base.h"
#ifndef CONFIG_ATH5K_TRACER
#undef TRACE_EVENT
@@ -11,6 +10,8 @@ static inline void trace_ ## name(proto) {}
#endif
struct sk_buff;
+struct ath5k_txq;
+struct ath5k_tx_status;
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath5k
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
new file mode 100644
index 00000000000..3d5f8be20ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -0,0 +1,15 @@
+config ATH6KL
+ tristate "Atheros ath6kl support"
+ depends on MMC
+ depends on CFG80211
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros AR6003 chipset running over SDIO. If you choose to
+ build it as a module, it will be called ath6kl. Pls note
+ that AR6002 and AR6001 are not supported by this driver.
+
+config ATH6KL_DEBUG
+ bool "Atheros ath6kl debugging"
+ depends on ATH6KL
+ ---help---
+ Enables debug support
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
new file mode 100644
index 00000000000..8f7a0d1c290
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -0,0 +1,37 @@
+#------------------------------------------------------------------------------
+# Copyright (c) 2004-2010 Atheros Communications Inc.
+# All rights reserved.
+#
+#
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+#
+#
+# Author(s): ="Atheros"
+#------------------------------------------------------------------------------
+
+obj-$(CONFIG_ATH6KL) := ath6kl.o
+ath6kl-y += debug.o
+ath6kl-y += htc_hif.o
+ath6kl-y += htc.o
+ath6kl-y += bmi.o
+ath6kl-y += cfg80211.o
+ath6kl-y += init.o
+ath6kl-y += main.o
+ath6kl-y += txrx.o
+ath6kl-y += wmi.o
+ath6kl-y += sdio.o
+ath6kl-$(CONFIG_NL80211_TESTMODE) += testmode.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
new file mode 100644
index 00000000000..c5d11cc536e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -0,0 +1,689 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+
+static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
+{
+ u32 addr;
+ unsigned long timeout;
+ int ret;
+
+ ar->bmi.cmd_credits = 0;
+
+ /* Read the counter register to get the command credits */
+ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+
+ /*
+ * Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = hif_read_write_sync(ar, addr,
+ (u8 *)&ar->bmi.cmd_credits, 4,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to decrement the command credit count register: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ ar->bmi.cmd_credits &= 0xFF;
+ }
+
+ if (!ar->bmi.cmd_credits) {
+ ath6kl_err("bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
+{
+ unsigned long timeout;
+ u32 rx_word = 0;
+ int ret = 0;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !rx_word) {
+ ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
+ (u8 *)&rx_word, sizeof(rx_word),
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= (1 << ENDPOINT1);
+ }
+
+ if (!rx_word) {
+ ath6kl_err("bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ ret = ath6kl_get_bmi_cmd_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar->mbox_info.htc_addr;
+
+ ret = hif_read_write_sync(ar, addr, buf, len,
+ HIF_WR_SYNC_BYTE_INC);
+ if (ret)
+ ath6kl_err("unable to send the bmi data to the device\n");
+
+ return ret;
+}
+
+static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (len >= 4) { /* NB: Currently, always true */
+ ret = ath6kl_bmi_get_rx_lkahd(ar);
+ if (ret)
+ return ret;
+ }
+
+ addr = ar->mbox_info.htc_addr;
+ ret = hif_read_write_sync(ar, addr, buf, len,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_done(struct ath6kl *ar)
+{
+ int ret;
+ u32 cid = BMI_DONE;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+
+ ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ if (ret) {
+ ath6kl_err("Unable to send bmi done: %d\n", ret);
+ return ret;
+ }
+
+ ath6kl_bmi_cleanup(ar);
+
+ return 0;
+}
+
+int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ struct ath6kl_bmi_target_info *targ_info)
+{
+ int ret;
+ u32 cid = BMI_GET_TARGET_INFO;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ if (ret) {
+ ath6kl_err("Unable to send get target info: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
+ sizeof(targ_info->version));
+ if (ret) {
+ ath6kl_err("Unable to recv target info: %d\n", ret);
+ return ret;
+ }
+
+ if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
+ /* Determine how many bytes are in the Target's targ_info */
+ ret = ath6kl_bmi_recv_buf(ar,
+ (u8 *)&targ_info->byte_count,
+ sizeof(targ_info->byte_count));
+ if (ret) {
+ ath6kl_err("unable to read target info byte count: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * The target's targ_info doesn't match the host's targ_info.
+ * We need to do some backwards compatibility to make this work.
+ */
+ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Read the remainder of the targ_info */
+ ret = ath6kl_bmi_recv_buf(ar,
+ ((u8 *)targ_info) +
+ sizeof(targ_info->byte_count),
+ sizeof(*targ_info) -
+ sizeof(targ_info->byte_count));
+
+ if (ret) {
+ ath6kl_err("Unable to read target info (%d bytes): %d\n",
+ targ_info->byte_count, ret);
+ return ret;
+ }
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
+ targ_info->version, targ_info->type);
+
+ return 0;
+}
+
+int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ u32 cid = BMI_READ_MEMORY;
+ int ret;
+ u32 offset;
+ u32 len_remain, rx_len;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi read memory: device: addr: 0x%x, len: %d\n",
+ addr, len);
+
+ len_remain = len;
+
+ while (len_remain) {
+ rx_len = (len_remain < BMI_DATASZ_MAX) ?
+ len_remain : BMI_DATASZ_MAX;
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
+ offset += sizeof(len);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
+ len_remain -= rx_len; addr += rx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ u32 cid = BMI_WRITE_MEMORY;
+ int ret;
+ u32 offset;
+ u32 len_remain, tx_len;
+ const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
+ u8 aligned_buf[BMI_DATASZ_MAX];
+ u8 *src;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
+
+ len_remain = len;
+ while (len_remain) {
+ src = &buf[len - len_remain];
+
+ if (len_remain < (BMI_DATASZ_MAX - header)) {
+ if (len_remain & 3) {
+ /* align it with 4 bytes */
+ len_remain = len_remain +
+ (4 - (len_remain & 3));
+ memcpy(aligned_buf, src, len_remain);
+ src = aligned_buf;
+ }
+ tx_len = len_remain;
+ } else {
+ tx_len = (BMI_DATASZ_MAX - header);
+ }
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
+ offset += sizeof(tx_len);
+ memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
+ offset += tx_len;
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+ len_remain -= tx_len; addr += tx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
+{
+ u32 cid = BMI_EXECUTE;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
+ addr, *param);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
+ offset += sizeof(*param);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
+
+ return 0;
+}
+
+int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
+{
+ u32 cid = BMI_SET_APP_START;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
+{
+ u32 cid = BMI_READ_SOC_REGISTER;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n", ret);
+ return ret;
+ }
+ memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
+
+ return 0;
+}
+
+int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
+{
+ u32 cid = BMI_WRITE_SOC_REGISTER;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi write SOC reg: addr: 0x%x, param: %d\n",
+ addr, param);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
+ offset += sizeof(param);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ u32 cid = BMI_LZ_DATA;
+ int ret;
+ u32 offset;
+ u32 len_remain, tx_len;
+ const u32 header = sizeof(cid) + sizeof(len);
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = BMI_DATASZ_MAX + header;
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
+ len);
+
+ len_remain = len;
+ while (len_remain) {
+ tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
+ len_remain : (BMI_DATASZ_MAX - header);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
+ offset += sizeof(tx_len);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
+ tx_len);
+ offset += tx_len;
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ len_remain -= tx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
+{
+ u32 cid = BMI_LZ_STREAM_START;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi LZ stream start: addr: 0x%x)\n",
+ addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to start LZ stream to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ int ret;
+ u32 last_word = 0;
+ u32 last_word_offset = len & ~0x3;
+ u32 unaligned_bytes = len & 0x3;
+
+ ret = ath6kl_bmi_lz_stream_start(ar, addr);
+ if (ret)
+ return ret;
+
+ if (unaligned_bytes) {
+ /* copy the last word into a zero padded buffer */
+ memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
+ }
+
+ ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
+ if (ret)
+ return ret;
+
+ if (unaligned_bytes)
+ ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
+
+ if (!ret) {
+ /* Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches. */
+ ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
+ }
+ return ret;
+}
+
+int ath6kl_bmi_init(struct ath6kl *ar)
+{
+ ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
+
+ if (!ar->bmi.cmd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ath6kl_bmi_cleanup(struct ath6kl *ar)
+{
+ kfree(ar->bmi.cmd_buf);
+ ar->bmi.cmd_buf = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
new file mode 100644
index 00000000000..96851d5df24
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BMI_H
+#define BMI_H
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to ATH6KL, to provide
+ * patches to code that is already resident on ATH6KL, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using ATH6KL Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
+ (sizeof(u32) * 3 /* cmd + addr + len */))
+
+/* Maximum data size used for BMI transfers */
+#define BMI_DATASZ_MAX 256
+
+/* BMI Commands */
+
+#define BMI_NO_COMMAND 0
+
+#define BMI_DONE 1
+/*
+ * Semantics: Host is done using BMI
+ * Request format:
+ * u32 command (BMI_DONE)
+ * Response format: none
+ */
+
+#define BMI_READ_MEMORY 2
+/*
+ * Semantics: Host reads ATH6KL memory
+ * Request format:
+ * u32 command (BMI_READ_MEMORY)
+ * u32 address
+ * u32 length, at most BMI_DATASZ_MAX
+ * Response format:
+ * u8 data[length]
+ */
+
+#define BMI_WRITE_MEMORY 3
+/*
+ * Semantics: Host writes ATH6KL memory
+ * Request format:
+ * u32 command (BMI_WRITE_MEMORY)
+ * u32 address
+ * u32 length, at most BMI_DATASZ_MAX
+ * u8 data[length]
+ * Response format: none
+ */
+
+#define BMI_EXECUTE 4
+/*
+ * Semantics: Causes ATH6KL to execute code
+ * Request format:
+ * u32 command (BMI_EXECUTE)
+ * u32 address
+ * u32 parameter
+ * Response format:
+ * u32 return value
+ */
+
+#define BMI_SET_APP_START 5
+/*
+ * Semantics: Set Target application starting address
+ * Request format:
+ * u32 command (BMI_SET_APP_START)
+ * u32 address
+ * Response format: none
+ */
+
+#define BMI_READ_SOC_REGISTER 6
+/*
+ * Semantics: Read a 32-bit Target SOC register.
+ * Request format:
+ * u32 command (BMI_READ_REGISTER)
+ * u32 address
+ * Response format:
+ * u32 value
+ */
+
+#define BMI_WRITE_SOC_REGISTER 7
+/*
+ * Semantics: Write a 32-bit Target SOC register.
+ * Request format:
+ * u32 command (BMI_WRITE_REGISTER)
+ * u32 address
+ * u32 value
+ *
+ * Response format: none
+ */
+
+#define BMI_GET_TARGET_ID 8
+#define BMI_GET_TARGET_INFO 8
+/*
+ * Semantics: Fetch the 4-byte Target information
+ * Request format:
+ * u32 command (BMI_GET_TARGET_ID/INFO)
+ * Response format1 (old firmware):
+ * u32 TargetVersionID
+ * Response format2 (newer firmware):
+ * u32 TARGET_VERSION_SENTINAL
+ * struct bmi_target_info;
+ */
+
+#define TARGET_VERSION_SENTINAL 0xffffffff
+#define TARGET_TYPE_AR6003 3
+#define TARGET_TYPE_AR6004 5
+#define BMI_ROMPATCH_INSTALL 9
+/*
+ * Semantics: Install a ROM Patch.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_INSTALL)
+ * u32 Target ROM Address
+ * u32 Target RAM Address or Value (depending on Target Type)
+ * u32 Size, in bytes
+ * u32 Activate? 1-->activate;
+ * 0-->install but do not activate
+ * Response format:
+ * u32 PatchID
+ */
+
+#define BMI_ROMPATCH_UNINSTALL 10
+/*
+ * Semantics: Uninstall a previously-installed ROM Patch,
+ * automatically deactivating, if necessary.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_UNINSTALL)
+ * u32 PatchID
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_ACTIVATE 11
+/*
+ * Semantics: Activate a list of previously-installed ROM Patches.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_ACTIVATE)
+ * u32 rompatch_count
+ * u32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_DEACTIVATE 12
+/*
+ * Semantics: Deactivate a list of active ROM Patches.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_DEACTIVATE)
+ * u32 rompatch_count
+ * u32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+
+#define BMI_LZ_STREAM_START 13
+/*
+ * Semantics: Begin an LZ-compressed stream of input
+ * which is to be uncompressed by the Target to an
+ * output buffer at address. The output buffer must
+ * be sufficiently large to hold the uncompressed
+ * output from the compressed input stream. This BMI
+ * command should be followed by a series of 1 or more
+ * BMI_LZ_DATA commands.
+ * u32 command (BMI_LZ_STREAM_START)
+ * u32 address
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_LZ_DATA 14
+/*
+ * Semantics: Host writes ATH6KL memory with LZ-compressed
+ * data which is uncompressed by the Target. This command
+ * must be preceded by a BMI_LZ_STREAM_START command. A series
+ * of BMI_LZ_DATA commands are considered part of a single
+ * input stream until another BMI_LZ_STREAM_START is issued.
+ * Request format:
+ * u32 command (BMI_LZ_DATA)
+ * u32 length (of compressed data),
+ * at most BMI_DATASZ_MAX
+ * u8 CompressedData[length]
+ * Response format: none
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */
+
+struct ath6kl;
+struct ath6kl_bmi_target_info {
+ __le32 byte_count; /* size of this structure */
+ __le32 version; /* target version id */
+ __le32 type; /* target type */
+} __packed;
+
+int ath6kl_bmi_init(struct ath6kl *ar);
+void ath6kl_bmi_cleanup(struct ath6kl *ar);
+int ath6kl_bmi_done(struct ath6kl *ar);
+int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ struct ath6kl_bmi_target_info *targ_info);
+int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
+int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
+int ath6kl_bmi_execute(struct ath6kl *ar,
+ u32 addr, u32 *param);
+int ath6kl_bmi_set_app_start(struct ath6kl *ar,
+ u32 addr);
+int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
+int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
+int ath6kl_bmi_lz_data(struct ath6kl *ar,
+ u8 *buf, u32 len);
+int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
+ u32 addr);
+int ath6kl_bmi_fast_download(struct ath6kl *ar,
+ u32 addr, u8 *buf, u32 len);
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
new file mode 100644
index 00000000000..3aff36bad5d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -0,0 +1,1914 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include "testmode.h"
+
+static unsigned int ath6kl_p2p;
+
+module_param(ath6kl_p2p, uint, 0644);
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_rate ath6kl_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define ath6kl_a_rates (ath6kl_rates + 4)
+#define ath6kl_a_rates_size 8
+#define ath6kl_g_rates (ath6kl_rates + 0)
+#define ath6kl_g_rates_size 12
+
+static struct ieee80211_channel ath6kl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band ath6kl_band_2ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
+ .channels = ath6kl_2ghz_channels,
+ .n_bitrates = ath6kl_g_rates_size,
+ .bitrates = ath6kl_g_rates,
+};
+
+static struct ieee80211_supported_band ath6kl_band_5ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
+ .channels = ath6kl_5ghz_a_channels,
+ .n_bitrates = ath6kl_a_rates_size,
+ .bitrates = ath6kl_a_rates,
+};
+
+static int ath6kl_set_wpa_version(struct ath6kl *ar,
+ enum nl80211_wpa_versions wpa_version)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ ar->auth_mode = NONE_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_2) {
+ ar->auth_mode = WPA2_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_1) {
+ ar->auth_mode = WPA_AUTH;
+ } else {
+ ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_auth_type(struct ath6kl *ar,
+ enum nl80211_auth_type auth_type)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ ar->dot11_auth_mode = OPEN_AUTH;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ ar->dot11_auth_mode = SHARED_AUTH;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ ar->dot11_auth_mode = LEAP_AUTH;
+ break;
+
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ break;
+
+ default:
+ ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+{
+ u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
+ &ar->grp_crypto_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
+ __func__, cipher, ucast);
+
+ switch (cipher) {
+ case 0:
+ /* our own hack to use value 0 as no crypto used */
+ *ar_cipher = NONE_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 5;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 13;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *ar_cipher = TKIP_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *ar_cipher = AES_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ default:
+ ath6kl_err("cipher 0x%x not supported\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
+
+ if (key_mgmt == WLAN_AKM_SUITE_PSK) {
+ if (ar->auth_mode == WPA_AUTH)
+ ar->auth_mode = WPA_PSK_AUTH;
+ else if (ar->auth_mode == WPA2_AUTH)
+ ar->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
+ ar->auth_mode = NONE_AUTH;
+ }
+}
+
+static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+{
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("wmi is not ready\n");
+ return false;
+ }
+
+ if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+ ath6kl_err("wlan disabled\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ ar->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(SKIP_SCAN, &ar->flag) &&
+ ((sme->channel && sme->channel->center_freq == 0) ||
+ (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
+ ath6kl_err("SkipScan: channel or bssid invalid\n");
+ return -EINVAL;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ up(&ar->sem);
+ return -EBUSY;
+ }
+
+ if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
+ WMI_TIMEOUT);
+ if (signal_pending(current)) {
+ ath6kl_err("cmd queue drain timeout\n");
+ up(&ar->sem);
+ return -EINTR;
+ }
+ }
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ar->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
+ ar->ch_hint);
+
+ up(&ar->sem);
+ if (status) {
+ ath6kl_err("wmi_reconnect_cmd failed\n");
+ return -EIO;
+ }
+ return 0;
+ } else if (ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ath6kl_disconnect(ar);
+ }
+
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = sme->ssid_len;
+ memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+
+ if (sme->channel)
+ ar->ch_hint = sme->channel->center_freq;
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
+ memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+
+ status = ath6kl_set_auth_type(ar, sme->auth_type);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->crypto.n_ciphers_pairwise)
+ ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ else
+ ath6kl_set_cipher(ar, 0, true);
+
+ ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+
+ if (sme->crypto.n_akm_suites)
+ ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+
+ if ((sme->key_len) &&
+ (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ struct ath6kl_key *key = NULL;
+
+ if (sme->key_idx < WMI_MIN_KEY_INDEX ||
+ sme->key_idx > WMI_MAX_KEY_INDEX) {
+ ath6kl_err("key index %d out of bounds\n",
+ sme->key_idx);
+ up(&ar->sem);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[sme->key_idx];
+ key->key_len = sme->key_len;
+ memcpy(key->key, sme->key, key->key_len);
+ key->cipher = ar->prwise_crypto;
+ ar->def_txkey_index = sme->key_idx;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
+ ar->prwise_crypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len,
+ NULL,
+ key->key, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ ath6kl_err("couldn't set bss filtering\n");
+ up(&ar->sem);
+ return -EIO;
+ }
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crypto_len, ar->ch_hint);
+
+ ar->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crypto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+
+ up(&ar->sem);
+
+ if (status == -EINVAL) {
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+ ath6kl_err("invalid request\n");
+ return -ENOENT;
+ } else if (status) {
+ ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
+ return -EIO;
+ }
+
+ if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&ar->disconnect_timer,
+ jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
+ }
+
+ ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+ struct ieee80211_channel *chan,
+ const u8 *beacon_ie, size_t beacon_ie_len)
+{
+ struct cfg80211_bss *bss;
+ u8 *ie;
+
+ bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
+ ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (bss == NULL) {
+ /*
+ * Since cfg80211 may not yet know about the BSS,
+ * generate a partial entry until the first BSS info
+ * event becomes available.
+ *
+ * Prepend SSID element since it is not included in the Beacon
+ * IEs from the target.
+ */
+ ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+ if (ie == NULL)
+ return -ENOMEM;
+ ie[0] = WLAN_EID_SSID;
+ ie[1] = ar->ssid_len;
+ memcpy(ie + 2, ar->ssid, ar->ssid_len);
+ memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
+ bssid, 0, WLAN_CAPABILITY_ESS, 100,
+ ie, 2 + ar->ssid_len + beacon_ie_len,
+ 0, GFP_KERNEL);
+ if (bss)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
+ "%pM prior to indicating connect/roamed "
+ "event\n", bssid);
+ kfree(ie);
+ } else
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
+ "entry\n");
+
+ if (bss == NULL)
+ return -ENOMEM;
+
+ cfg80211_put_bss(bss);
+
+ return 0;
+}
+
+void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info)
+{
+ struct ieee80211_channel *chan;
+
+ /* capinfo + listen interval */
+ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
+
+ /* capinfo + status code + associd */
+ u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
+
+ u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
+ u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
+ assoc_resp_ie_offset;
+
+ assoc_req_len -= assoc_req_ie_offset;
+ assoc_resp_len -= assoc_resp_ie_offset;
+
+ /*
+ * Store Beacon interval here; DTIM period will be available only once
+ * a Beacon frame from the AP is seen.
+ */
+ ar->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+
+ if (nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ }
+
+ if (nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
+ ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
+
+
+ if (nw_type & ADHOC_NETWORK) {
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
+ beacon_ie_len) < 0) {
+ ath6kl_err("could not add cfg80211 bss entry for "
+ "connect/roamed notification\n");
+ return;
+ }
+
+ if (ar->sme_state == SME_CONNECTING) {
+ /* inform connect result to cfg80211 */
+ ar->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(ar->net_dev, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ } else if (ar->sme_state == SME_CONNECTED) {
+ /* inform roam event to cfg80211 */
+ cfg80211_roamed(ar->net_dev, chan, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ }
+}
+
+static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *dev, u16 reason_code)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
+ reason_code);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ ar->reconnect_flag = 0;
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ if (!test_bit(SKIP_SCAN, &ar->flag))
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+
+ up(&ar->sem);
+
+ ar->sme_state = SME_DISCONNECTED;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason)
+{
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (ar->nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ memset(bssid, 0, ETH_ALEN);
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ar->nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
+ ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ /*
+ * Send a disconnect command to target when a disconnect event is
+ * received with reason code other than 3 (DISCONNECT_CMD - disconnect
+ * request from host) to make the firmware stop trying to connect even
+ * after giving disconnect event. There will be one more disconnect
+ * event for this disconnect command with reason code DISCONNECT_CMD
+ * which will be notified to cfg80211.
+ */
+
+ if (reason != DISCONNECT_CMD) {
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ return;
+ }
+
+ clear_bit(CONNECT_PEND, &ar->flag);
+
+ if (ar->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(ar->net_dev,
+ bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ } else if (ar->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(ar->net_dev, reason,
+ NULL, 0, GFP_KERNEL);
+ }
+
+ ar->sme_state = SME_DISCONNECTED;
+}
+
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ s8 n_channels = 0;
+ u16 *channels = NULL;
+ int ret = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ ret = ath6kl_wmi_bssfilter_cmd(
+ ar->wmi,
+ (test_bit(CONNECTED, &ar->flag) ?
+ ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
+ if (ret) {
+ ath6kl_err("couldn't set bss filtering\n");
+ return ret;
+ }
+ }
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ u8 i;
+
+ if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
+ request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
+
+ for (i = 0; i < request->n_ssids; i++)
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+
+ if (request->ie) {
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_err("failed to set Probe Request appie for "
+ "scan");
+ return ret;
+ }
+ }
+
+ /*
+ * Scan only the requested channels if the request specifies a set of
+ * channels. If the list is longer than the target supports, do not
+ * configure the list and instead, scan all available channels.
+ */
+ if (request->n_channels > 0 &&
+ request->n_channels <= WMI_MAX_CHANNELS) {
+ u8 i;
+
+ n_channels = request->n_channels;
+
+ channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
+ if (channels == NULL) {
+ ath6kl_warn("failed to set scan channels, "
+ "scan all channels");
+ n_channels = 0;
+ }
+
+ for (i = 0; i < n_channels; i++)
+ channels[i] = request->channels[i]->center_freq;
+ }
+
+ ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
+ false, 0, 0, n_channels, channels);
+ if (ret)
+ ath6kl_err("wmi_startscan_cmd failed\n");
+ else
+ ar->scan_req = request;
+
+ kfree(channels);
+
+ return ret;
+}
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+{
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+
+ if (!ar->scan_req)
+ return;
+
+ if ((status == -ECANCELED) || (status == -EBUSY)) {
+ cfg80211_scan_done(ar->scan_req, true);
+ goto out;
+ }
+
+ cfg80211_scan_done(ar->scan_req, false);
+
+ if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < ar->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ DISABLE_SSID_FLAG,
+ 0, NULL);
+ }
+ }
+
+out:
+ ar->scan_req = NULL;
+}
+
+static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ u8 key_usage;
+ u8 key_type;
+ int status = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(key, 0, sizeof(struct ath6kl_key));
+
+ if (pairwise)
+ key_usage = PAIRWISE_USAGE;
+ else
+ key_usage = GROUP_USAGE;
+
+ if (params) {
+ if (params->key_len > WLAN_MAX_KEY_LEN ||
+ params->seq_len > sizeof(key->seq))
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ memcpy(key->key, params->key, key->key_len);
+ key->seq_len = params->seq_len;
+ memcpy(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_type = WEP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AES_CRYPT;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))
+ && (key_usage & GROUP_USAGE))
+ del_timer(&ar->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
+ __func__, key_index, key->key_len, key_type,
+ key_usage, key->seq_len);
+
+ ar->def_txkey_index = key_index;
+
+ if (ar->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+ ar->ap_mode_bkey.valid = true;
+ ar->ap_mode_bkey.key_index = key_index;
+ ar->ap_mode_bkey.key_type = key_type;
+ ar->ap_mode_bkey.key_len = key->key_len;
+ memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
+ if (!test_bit(CONNECTED, &ar->flag)) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
+ "key configuration until AP mode has been "
+ "started\n");
+ /*
+ * The key will be set in ath6kl_connect_ap_mode() once
+ * the connected event is received from the target.
+ */
+ return 0;
+ }
+ }
+
+ if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &ar->flag)) {
+ /*
+ * Store the key locally so that it can be re-configured after
+ * the AP mode has properly started
+ * (ath6kl_install_statioc_wep_keys).
+ */
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
+ "until AP mode has been started\n");
+ ar->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+ return 0;
+ }
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->key, KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d is empty\n", __func__, key_index);
+ return 0;
+ }
+
+ ar->keys[key_index].key_len = 0;
+
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+}
+
+static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie,
+ struct key_params *))
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ struct key_params params;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(&params, 0, sizeof(params));
+ params.cipher = key->cipher;
+ params.key_len = key->key_len;
+ params.seq_len = key->seq_len;
+ params.seq = key->seq;
+ params.key = key->key;
+
+ callback(cookie, &params);
+
+ return key->key_len ? 0 : -ENOENT;
+}
+
+static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ int status = 0;
+ u8 key_usage;
+ enum crypto_type key_type = NONE_CRYPT;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n",
+ __func__, key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
+ __func__, key_index);
+ return -EINVAL;
+ }
+
+ ar->def_txkey_index = key_index;
+ key = &ar->keys[ar->def_txkey_index];
+ key_usage = GROUP_USAGE;
+ if (ar->prwise_crypto == WEP_CRYPT)
+ key_usage |= TX_USAGE;
+ if (unicast)
+ key_type = ar->prwise_crypto;
+ if (multicast)
+ key_type = ar->grp_crypto;
+
+ if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+ return 0; /* Delay until AP mode has been started */
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+ bool ismcast)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
+
+ cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ (ismcast ? NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
+ GFP_KERNEL);
+}
+
+static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
+ changed);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
+ if (ret != 0) {
+ ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The type nl80211_tx_power_setting replaces the following
+ * data type from 2.6.36 onwards
+*/
+static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ int dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ u8 ath6kl_dbm;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
+ type, dbm);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ return 0;
+ case NL80211_TX_POWER_LIMITED:
+ ar->tx_pwr = ath6kl_dbm = dbm;
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
+ __func__, type);
+ return -EOPNOTSUPP;
+ }
+
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ ar->tx_pwr = 0;
+
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ 5 * HZ);
+
+ if (signal_pending(current)) {
+ ath6kl_err("target did not respond\n");
+ return -EINTR;
+ }
+ }
+
+ *dbm = ar->tx_pwr;
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool pmgmt, int timeout)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct wmi_power_mode_cmd mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
+ __func__, pmgmt, timeout);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (pmgmt) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ mode.pwr_mode = REC_POWER;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ mode.pwr_mode = MAX_PERF_POWER;
+ }
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ ath6kl_err("wmi_powermode_cmd failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct wireless_dev *wdev = ar->wdev;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ar->next_mode = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ ar->next_mode = AP_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ ar->next_mode = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *ibss_param)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ar->ssid_len = ibss_param->ssid_len;
+ memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+
+ if (ibss_param->channel)
+ ar->ch_hint = ibss_param->channel->center_freq;
+
+ if (ibss_param->channel_fixed) {
+ /*
+ * TODO: channel_fixed: The channel should be fixed, do not
+ * search for IBSSs to join on other channels. Target
+ * firmware does not support this feature, needs to be
+ * updated.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
+ memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, 0);
+
+ status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ if (status)
+ return status;
+
+ if (ibss_param->privacy) {
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ } else {
+ ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(ar, 0, false);
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crypto_len, ar->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crypto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ return 0;
+}
+
+static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+};
+
+static bool is_rate_legacy(s32 rate)
+{
+ static const s32 legacy[] = { 1000, 2000, 5500, 11000,
+ 6000, 9000, 12000, 18000, 24000,
+ 36000, 48000, 54000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy); i++)
+ if (rate == legacy[i])
+ return true;
+
+ return false;
+}
+
+static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
+ 52000, 58500, 65000, 72200
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht20); i++) {
+ if (rate == ht20[i]) {
+ if (i == ARRAY_SIZE(ht20) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht40[] = { 13500, 27000, 40500, 54000,
+ 81000, 108000, 121500, 135000,
+ 150000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht40); i++) {
+ if (rate == ht40[i]) {
+ if (i == ARRAY_SIZE(ht40) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ long left;
+ bool sgi;
+ s32 rate;
+ int ret;
+ u8 mcs;
+
+ if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ return -ENOENT;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+
+ if (ret != 0) {
+ up(&ar->sem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag),
+ WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left == 0)
+ return -ETIMEDOUT;
+ else if (left < 0)
+ return left;
+
+ if (ar->target_stats.rx_byte) {
+ sinfo->rx_bytes = ar->target_stats.rx_byte;
+ sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->filled |= STATION_INFO_RX_PACKETS;
+ }
+
+ if (ar->target_stats.tx_byte) {
+ sinfo->tx_bytes = ar->target_stats.tx_byte;
+ sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->filled |= STATION_INFO_TX_PACKETS;
+ }
+
+ sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->filled |= STATION_INFO_SIGNAL;
+
+ rate = ar->target_stats.tx_ucast_rate;
+
+ if (is_rate_legacy(rate)) {
+ sinfo->txrate.legacy = rate / 100;
+ } else if (is_rate_ht20(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else if (is_rate_ht40(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "invalid rate from stats: %d\n", rate);
+ ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE);
+ return 0;
+ }
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
+ ar->nw_type == INFRA_NETWORK) {
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->bss_param.flags = 0;
+ sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, true);
+}
+
+static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, false);
+}
+
+static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ if (test_bit(CONNECTED, &ar->flag))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ return ath6kl_hif_suspend(ar);
+}
+#endif
+
+static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
+ __func__, chan->center_freq, chan->hw_value);
+ ar->next_chan = chan->center_freq;
+
+ return 0;
+}
+
+static bool ath6kl_is_p2p_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
+ pos[2] == 0x50 && pos[3] == 0x6f &&
+ pos[4] == 0x9a && pos[5] == 0x09;
+}
+
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
+ size_t ies_len)
+{
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Filter out P2P IE(s) since they will be included depending on
+ * the Probe Request frame in ath6kl_send_go_probe_resp().
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!ath6kl_is_p2p_ie(pos)) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
+ buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info, bool add)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ieee80211_mgmt *mgmt;
+ u8 *ies;
+ int ies_len;
+ struct wmi_connect_cmd p;
+ int res;
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (ar->next_mode != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ if (info->beacon_ies) {
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+ info->beacon_ies,
+ info->beacon_ies_len);
+ if (res)
+ return res;
+ }
+ if (info->proberesp_ies) {
+ res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+ info->proberesp_ies_len);
+ if (res)
+ return res;
+ }
+ if (info->assocresp_ies) {
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+ info->assocresp_ies,
+ info->assocresp_ies_len);
+ if (res)
+ return res;
+ }
+
+ if (!add)
+ return 0;
+
+ ar->ap_mode_bkey.valid = false;
+
+ /* TODO:
+ * info->interval
+ * info->dtim_period
+ */
+
+ if (info->head == NULL)
+ return -EINVAL;
+ mgmt = (struct ieee80211_mgmt *) info->head;
+ ies = mgmt->u.beacon.variable;
+ if (ies > info->head + info->head_len)
+ return -EINVAL;
+ ies_len = info->head + info->head_len - ies;
+
+ if (info->ssid == NULL)
+ return -EINVAL;
+ memcpy(ar->ssid, info->ssid, info->ssid_len);
+ ar->ssid_len = info->ssid_len;
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
+ return -EOPNOTSUPP; /* TODO */
+
+ ar->dot11_auth_mode = OPEN_AUTH;
+
+ memset(&p, 0, sizeof(p));
+
+ for (i = 0; i < info->crypto.n_akm_suites; i++) {
+ switch (info->crypto.akm_suites[i]) {
+ case WLAN_AKM_SUITE_8021X:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_AUTH;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_PSK_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_PSK_AUTH;
+ break;
+ }
+ }
+ if (p.auth_mode == 0)
+ p.auth_mode = NONE_AUTH;
+ ar->auth_mode = p.auth_mode;
+
+ for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
+ switch (info->crypto.ciphers_pairwise[i]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.prwise_crypto_type |= WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.prwise_crypto_type |= TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.prwise_crypto_type |= AES_CRYPT;
+ break;
+ }
+ }
+ if (p.prwise_crypto_type == 0) {
+ p.prwise_crypto_type = NONE_CRYPT;
+ ath6kl_set_cipher(ar, 0, true);
+ } else if (info->crypto.n_ciphers_pairwise == 1)
+ ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+
+ switch (info->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.grp_crypto_type = WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.grp_crypto_type = TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.grp_crypto_type = AES_CRYPT;
+ break;
+ default:
+ p.grp_crypto_type = NONE_CRYPT;
+ break;
+ }
+ ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+
+ p.nw_type = AP_NETWORK;
+ ar->nw_type = ar->next_mode;
+
+ p.ssid_len = ar->ssid_len;
+ memcpy(p.ssid, ar->ssid, ar->ssid_len);
+ p.dot11_auth_mode = ar->dot11_auth_mode;
+ p.ch = cpu_to_le16(ar->next_chan);
+
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath6kl_add_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ return ath6kl_ap_beacon(wiphy, dev, info, true);
+}
+
+static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ return ath6kl_ap_beacon(wiphy, dev, info, false);
+}
+
+static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (ar->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+ if (!test_bit(CONNECTED, &ar->flag))
+ return -ENOTCONN;
+
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ clear_bit(CONNECTED, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (ar->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ /* Use this only for authorizing/unauthorizing a station */
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return -EOPNOTSUPP;
+
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
+ mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
+ 0);
+}
+
+static int ath6kl_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ /* TODO: if already pending or ongoing remain-on-channel,
+ * return -EBUSY */
+ *cookie = 1; /* only a single pending request is supported */
+
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
+ duration);
+}
+
+static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (cookie != 1)
+ return -ENOENT;
+
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+}
+
+static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
+ size_t len, unsigned int freq)
+{
+ const u8 *pos;
+ u8 *p2p;
+ int p2p_len;
+ int ret;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+
+ /* Include P2P IE(s) from the frame generated in user space. */
+
+ p2p = kmalloc(len, GFP_KERNEL);
+ if (p2p == NULL)
+ return -ENOMEM;
+ p2p_len = 0;
+
+ pos = mgmt->u.probe_resp.variable;
+ while (pos + 1 < buf + len) {
+ if (pos + 2 + pos[1] > buf + len)
+ break;
+ if (ath6kl_is_p2p_ie(pos)) {
+ memcpy(p2p + p2p_len, pos, 2 + pos[1]);
+ p2p_len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
+ p2p, p2p_len);
+ kfree(p2p);
+ return ret;
+}
+
+static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ u32 id;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+ if (buf + len >= mgmt->u.probe_resp.variable &&
+ ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+ ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /*
+ * Send Probe Response frame in AP mode using a separate WMI
+ * command to allow the target to fill in the generic IEs.
+ */
+ *cookie = 0; /* TX status not supported */
+ return ath6kl_send_go_probe_resp(ar, buf, len,
+ chan->center_freq);
+ }
+
+ id = ar->send_action_id++;
+ if (id == 0) {
+ /*
+ * 0 is a reserved value in the WMI command and shall not be
+ * used for the command.
+ */
+ id = ar->send_action_id++;
+ }
+
+ *cookie = id;
+ return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
+ buf, len);
+}
+
+static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
+ __func__, frame_type, reg);
+ if (frame_type == IEEE80211_STYPE_PROBE_REQ) {
+ /*
+ * Note: This notification callback is not allowed to sleep, so
+ * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
+ * hardcode target to report Probe Request frames all the time.
+ */
+ ar->probe_req_report = reg;
+ }
+}
+
+static const struct ieee80211_txrx_stypes
+ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .change_virtual_intf = ath6kl_cfg80211_change_iface,
+ .scan = ath6kl_cfg80211_scan,
+ .connect = ath6kl_cfg80211_connect,
+ .disconnect = ath6kl_cfg80211_disconnect,
+ .add_key = ath6kl_cfg80211_add_key,
+ .get_key = ath6kl_cfg80211_get_key,
+ .del_key = ath6kl_cfg80211_del_key,
+ .set_default_key = ath6kl_cfg80211_set_default_key,
+ .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
+ .set_tx_power = ath6kl_cfg80211_set_txpower,
+ .get_tx_power = ath6kl_cfg80211_get_txpower,
+ .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
+ .join_ibss = ath6kl_cfg80211_join_ibss,
+ .leave_ibss = ath6kl_cfg80211_leave_ibss,
+ .get_station = ath6kl_get_station,
+ .set_pmksa = ath6kl_set_pmksa,
+ .del_pmksa = ath6kl_del_pmksa,
+ .flush_pmksa = ath6kl_flush_pmksa,
+ CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
+#ifdef CONFIG_PM
+ .suspend = ar6k_cfg80211_suspend,
+#endif
+ .set_channel = ath6kl_set_channel,
+ .add_beacon = ath6kl_add_beacon,
+ .set_beacon = ath6kl_set_beacon,
+ .del_beacon = ath6kl_del_beacon,
+ .change_station = ath6kl_change_station,
+ .remain_on_channel = ath6kl_remain_on_channel,
+ .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
+ .mgmt_tx = ath6kl_mgmt_tx,
+ .mgmt_frame_register = ath6kl_mgmt_frame_register,
+};
+
+struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+ struct ath6kl *ar;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev) {
+ ath6kl_err("couldn't allocate wireless device\n");
+ return NULL;
+ }
+
+ /* create a new wiphy for use with cfg80211 */
+ wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+ if (!wdev->wiphy) {
+ ath6kl_err("couldn't allocate wiphy device\n");
+ kfree(wdev);
+ return NULL;
+ }
+
+ ar = wiphy_priv(wdev->wiphy);
+ ar->p2p = !!ath6kl_p2p;
+
+ wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wdev->wiphy, dev);
+
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ if (ar->p2p) {
+ wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
+ }
+ /* max num of ssids that can be probed during scanning */
+ wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wdev->wiphy->cipher_suites = cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ret = wiphy_register(wdev->wiphy);
+ if (ret < 0) {
+ ath6kl_err("couldn't register wiphy device\n");
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+ return NULL;
+ }
+
+ return wdev;
+}
+
+void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+{
+ struct wireless_dev *wdev = ar->wdev;
+
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
new file mode 100644
index 00000000000..a84adc249c6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH6KL_CFG80211_H
+#define ATH6KL_CFG80211_H
+
+struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
+void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info);
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason);
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+ bool ismcast);
+
+#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
new file mode 100644
index 00000000000..b92f0e5d233
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include <linux/netdevice.h>
+
+#define ATH6KL_MAX_IE 256
+
+extern int ath6kl_printk(const char *level, const char *fmt, ...);
+
+#define A_CACHE_LINE_PAD 128
+
+/*
+ * Reflects the version of binary interface exposed by ATH6KL target
+ * firmware. Needs to be incremented by 1 for any change in the firmware
+ * that requires upgrade of the driver on the host side for the change to
+ * work correctly
+ */
+#define ATH6KL_ABI_VERSION 1
+
+#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
+
+enum {
+ SIGNAL_QUALITY_METRICS_SNR = 0,
+ SIGNAL_QUALITY_METRICS_RSSI,
+ SIGNAL_QUALITY_METRICS_ALL,
+};
+
+/*
+ * Data Path
+ */
+
+#define WMI_MAX_TX_DATA_FRAME_LENGTH \
+ (1500 + sizeof(struct wmi_data_hdr) + \
+ sizeof(struct ethhdr) + \
+ sizeof(struct ath6kl_llc_snap_hdr))
+
+/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
+#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \
+ (3840 + sizeof(struct wmi_data_hdr) + \
+ sizeof(struct ethhdr) + \
+ sizeof(struct ath6kl_llc_snap_hdr))
+
+#define EPPING_ALIGNMENT_PAD \
+ (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \
+ - sizeof(struct htc_frame_hdr))
+
+struct ath6kl_llc_snap_hdr {
+ u8 dsap;
+ u8 ssap;
+ u8 cntl;
+ u8 org_code[3];
+ __be16 eth_type;
+} __packed;
+
+enum crypto_type {
+ NONE_CRYPT = 0x01,
+ WEP_CRYPT = 0x02,
+ TKIP_CRYPT = 0x04,
+ AES_CRYPT = 0x08,
+};
+
+struct htc_endpoint_credit_dist;
+struct ath6kl;
+enum htc_credit_dist_reason;
+struct htc_credit_state_info;
+
+int ath6k_setup_credit_dist(void *htc_handle,
+ struct htc_credit_state_info *cred_info);
+void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
+ struct list_head *epdist_list,
+ enum htc_credit_dist_reason reason);
+void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
+ struct list_head *ep_list,
+ int tot_credits);
+void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
+ struct htc_endpoint_credit_dist *ep_dist);
+struct ath6kl *ath6kl_core_alloc(struct device *sdev);
+int ath6kl_core_init(struct ath6kl *ar);
+int ath6kl_unavail_ev(struct ath6kl *ar);
+struct sk_buff *ath6kl_buf_alloc(int size);
+#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
new file mode 100644
index 00000000000..6d8a4845baa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef CORE_H
+#define CORE_H
+
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/circ_buf.h>
+#include <net/cfg80211.h>
+#include "htc.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "target.h"
+
+#define MAX_ATH6KL 1
+#define ATH6KL_MAX_RX_BUFFERS 16
+#define ATH6KL_BUFFER_SIZE 1664
+#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4
+#define ATH6KL_AMSDU_REFILL_THRESHOLD 3
+#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
+#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
+#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
+
+#define USER_SAVEDKEYS_STAT_INIT 0
+#define USER_SAVEDKEYS_STAT_RUN 1
+
+#define ATH6KL_TX_TIMEOUT 10
+#define ATH6KL_MAX_ENDPOINTS 4
+#define MAX_NODE_NUM 15
+
+/* Extra bytes for htc header alignment */
+#define ATH6KL_HTC_ALIGN_BYTES 3
+
+/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
+#define MAX_DEF_COOKIE_NUM 180
+#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
+#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
+
+#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
+
+#define DISCON_TIMER_INTVAL 10000 /* in msec */
+#define A_DEFAULT_LISTEN_INTERVAL 100
+#define A_MAX_WOW_LISTEN_INTERVAL 1000
+
+/* includes also the null byte */
+#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL"
+
+enum ath6kl_fw_ie_type {
+ ATH6KL_FW_IE_FW_VERSION = 0,
+ ATH6KL_FW_IE_TIMESTAMP = 1,
+ ATH6KL_FW_IE_OTP_IMAGE = 2,
+ ATH6KL_FW_IE_FW_IMAGE = 3,
+ ATH6KL_FW_IE_PATCH_IMAGE = 4,
+ ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
+ ATH6KL_FW_IE_CAPABILITIES = 6,
+ ATH6KL_FW_IE_PATCH_ADDR = 7,
+};
+
+enum ath6kl_fw_capability {
+ ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
+
+ /* this needs to be last */
+ ATH6KL_FW_CAPABILITY_MAX,
+};
+
+#define ATH6KL_CAPABILITY_LEN (ALIGN(ATH6KL_FW_CAPABILITY_MAX, 32) / 32)
+
+struct ath6kl_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+/* AR6003 1.0 definitions */
+#define AR6003_REV1_VERSION 0x300002ba
+
+/* AR6003 2.0 definitions */
+#define AR6003_REV2_VERSION 0x30000384
+#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
+#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_REV2_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
+#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+
+/* AR6003 3.0 definitions */
+#define AR6003_REV3_VERSION 0x30000582
+#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
+#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
+#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
+#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
+#define AR6003_REV3_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
+#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+
+/* AR6004 1.0 definitions */
+#define AR6004_REV1_VERSION 0x30000623
+#define AR6004_REV1_FIRMWARE_FILE "ath6k/AR6004/hw6.1/fw.ram.bin"
+#define AR6004_REV1_FIRMWARE_2_FILE "ath6k/AR6004/hw6.1/fw-2.bin"
+#define AR6004_REV1_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.bin"
+#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin"
+#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin"
+
+/* Per STA data, used in AP mode */
+#define STA_PS_AWAKE BIT(0)
+#define STA_PS_SLEEP BIT(1)
+#define STA_PS_POLLED BIT(2)
+
+/* HTC TX packet tagging definitions */
+#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
+#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1)
+
+#define AR6003_CUST_DATA_SIZE 16
+
+#define AGGR_WIN_IDX(x, y) ((x) % (y))
+#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y))
+#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y))
+#define ATH6KL_MAX_SEQ_NO 0xFFF
+#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO)
+
+#define NUM_OF_TIDS 8
+#define AGGR_SZ_DEFAULT 8
+
+#define AGGR_WIN_SZ_MIN 2
+#define AGGR_WIN_SZ_MAX 8
+
+#define TID_WINDOW_SZ(_x) ((_x) << 1)
+
+#define AGGR_NUM_OF_FREE_NETBUFS 16
+
+#define AGGR_RX_TIMEOUT 400 /* in ms */
+
+#define WMI_TIMEOUT (2 * HZ)
+
+#define MBOX_YIELD_LIMIT 99
+
+/* configuration lags */
+/*
+ * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
+ * ERP IE of beacon to determine the short premable support when
+ * sending (Re)Assoc req.
+ * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power
+ * module state transition failure events which happen during
+ * scan, to the host.
+ */
+#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0)
+#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
+#define ATH6KL_CONF_ENABLE_11N BIT(2)
+#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
+
+enum wlan_low_pwr_state {
+ WLAN_POWER_STATE_ON,
+ WLAN_POWER_STATE_CUT_PWR,
+ WLAN_POWER_STATE_DEEP_SLEEP,
+ WLAN_POWER_STATE_WOW
+};
+
+enum sme_state {
+ SME_DISCONNECTED,
+ SME_CONNECTING,
+ SME_CONNECTED
+};
+
+struct skb_hold_q {
+ struct sk_buff *skb;
+ bool is_amsdu;
+ u16 seq_no;
+};
+
+struct rxtid {
+ bool aggr;
+ bool progress;
+ bool timer_mon;
+ u16 win_sz;
+ u16 seq_next;
+ u32 hold_q_sz;
+ struct skb_hold_q *hold_q;
+ struct sk_buff_head q;
+ spinlock_t lock;
+};
+
+struct rxtid_stats {
+ u32 num_into_aggr;
+ u32 num_dups;
+ u32 num_oow;
+ u32 num_mpdu;
+ u32 num_amsdu;
+ u32 num_delivered;
+ u32 num_timeouts;
+ u32 num_hole;
+ u32 num_bar;
+};
+
+struct aggr_info {
+ u8 aggr_sz;
+ u8 timer_scheduled;
+ struct timer_list timer;
+ struct net_device *dev;
+ struct rxtid rx_tid[NUM_OF_TIDS];
+ struct sk_buff_head free_q;
+ struct rxtid_stats stat[NUM_OF_TIDS];
+};
+
+struct ath6kl_wep_key {
+ u8 key_index;
+ u8 key_len;
+ u8 key[64];
+};
+
+#define ATH6KL_KEY_SEQ_LEN 8
+
+struct ath6kl_key {
+ u8 key[WLAN_MAX_KEY_LEN];
+ u8 key_len;
+ u8 seq[ATH6KL_KEY_SEQ_LEN];
+ u8 seq_len;
+ u32 cipher;
+};
+
+struct ath6kl_node_mapping {
+ u8 mac_addr[ETH_ALEN];
+ u8 ep_id;
+ u8 tx_pend;
+};
+
+struct ath6kl_cookie {
+ struct sk_buff *skb;
+ u32 map_no;
+ struct htc_packet htc_pkt;
+ struct ath6kl_cookie *arc_list_next;
+};
+
+struct ath6kl_sta {
+ u16 sta_flags;
+ u8 mac[ETH_ALEN];
+ u8 aid;
+ u8 keymgmt;
+ u8 ucipher;
+ u8 auth;
+ u8 wpa_ie[ATH6KL_MAX_IE];
+ struct sk_buff_head psq;
+ spinlock_t psq_lock;
+};
+
+struct ath6kl_version {
+ u32 target_ver;
+ u32 wlan_ver;
+ u32 abi_ver;
+};
+
+struct ath6kl_bmi {
+ u32 cmd_credits;
+ bool done_sent;
+ u8 *cmd_buf;
+};
+
+struct target_stats {
+ u64 tx_pkt;
+ u64 tx_byte;
+ u64 tx_ucast_pkt;
+ u64 tx_ucast_byte;
+ u64 tx_mcast_pkt;
+ u64 tx_mcast_byte;
+ u64 tx_bcast_pkt;
+ u64 tx_bcast_byte;
+ u64 tx_rts_success_cnt;
+ u64 tx_pkt_per_ac[4];
+
+ u64 tx_err;
+ u64 tx_fail_cnt;
+ u64 tx_retry_cnt;
+ u64 tx_mult_retry_cnt;
+ u64 tx_rts_fail_cnt;
+
+ u64 rx_pkt;
+ u64 rx_byte;
+ u64 rx_ucast_pkt;
+ u64 rx_ucast_byte;
+ u64 rx_mcast_pkt;
+ u64 rx_mcast_byte;
+ u64 rx_bcast_pkt;
+ u64 rx_bcast_byte;
+ u64 rx_frgment_pkt;
+
+ u64 rx_err;
+ u64 rx_crc_err;
+ u64 rx_key_cache_miss;
+ u64 rx_decrypt_err;
+ u64 rx_dupl_frame;
+
+ u64 tkip_local_mic_fail;
+ u64 tkip_cnter_measures_invoked;
+ u64 tkip_replays;
+ u64 tkip_fmt_err;
+ u64 ccmp_fmt_err;
+ u64 ccmp_replays;
+
+ u64 pwr_save_fail_cnt;
+
+ u64 cs_bmiss_cnt;
+ u64 cs_low_rssi_cnt;
+ u64 cs_connect_cnt;
+ u64 cs_discon_cnt;
+
+ s32 tx_ucast_rate;
+ s32 rx_ucast_rate;
+
+ u32 lq_val;
+
+ u32 wow_pkt_dropped;
+ u16 wow_evt_discarded;
+
+ s16 noise_floor_calib;
+ s16 cs_rssi;
+ s16 cs_ave_beacon_rssi;
+ u8 cs_ave_beacon_snr;
+ u8 cs_last_roam_msec;
+ u8 cs_snr;
+
+ u8 wow_host_pkt_wakeups;
+ u8 wow_host_evt_wakeups;
+
+ u32 arp_received;
+ u32 arp_matched;
+ u32 arp_replied;
+};
+
+struct ath6kl_mbox_info {
+ u32 htc_addr;
+ u32 htc_ext_addr;
+ u32 htc_ext_sz;
+
+ u32 block_size;
+
+ u32 gmbox_addr;
+
+ u32 gmbox_sz;
+};
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP. For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+
+#define ATH6KL_KEYBUF_SIZE 16
+#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */
+
+#define ATH6KL_KEY_XMIT 0x01
+#define ATH6KL_KEY_RECV 0x02
+#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */
+
+/* Initial group key for AP mode */
+struct ath6kl_req_key {
+ bool valid;
+ u8 key_index;
+ int key_type;
+ u8 key[WLAN_MAX_KEY_LEN];
+ u8 key_len;
+};
+
+/* Flag info */
+#define WMI_ENABLED 0
+#define WMI_READY 1
+#define CONNECTED 2
+#define STATS_UPDATE_PEND 3
+#define CONNECT_PEND 4
+#define WMM_ENABLED 5
+#define NETQ_STOPPED 6
+#define WMI_CTRL_EP_FULL 7
+#define DTIM_EXPIRED 8
+#define DESTROY_IN_PROGRESS 9
+#define NETDEV_REGISTERED 10
+#define SKIP_SCAN 11
+#define WLAN_ENABLED 12
+#define TESTMODE 13
+#define CLEAR_BSSFILTER_ON_BEACON 14
+#define DTIM_PERIOD_AVAIL 15
+
+struct ath6kl {
+ struct device *dev;
+ struct net_device *net_dev;
+ struct ath6kl_bmi bmi;
+ const struct ath6kl_hif_ops *hif_ops;
+ struct wmi *wmi;
+ int tx_pending[ENDPOINT_MAX];
+ int total_tx_data_pend;
+ struct htc_target *htc_target;
+ void *hif_priv;
+ spinlock_t lock;
+ struct semaphore sem;
+ int ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 next_mode;
+ u8 nw_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 prwise_crypto;
+ u8 prwise_crypto_len;
+ u8 grp_crypto;
+ u8 grp_crypto_len;
+ u8 def_txkey_index;
+ struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ u8 bssid[ETH_ALEN];
+ u8 req_bssid[ETH_ALEN];
+ u16 ch_hint;
+ u16 bss_ch;
+ u16 listen_intvl_b;
+ u16 listen_intvl_t;
+ u8 lrssi_roam_threshold;
+ struct ath6kl_version version;
+ u32 target_type;
+ u8 tx_pwr;
+ struct net_device_stats net_stats;
+ struct target_stats target_stats;
+ struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
+ u8 ibss_ps_enable;
+ u8 node_num;
+ u8 next_ep_id;
+ struct ath6kl_cookie *cookie_list;
+ u32 cookie_count;
+ enum htc_endpoint_id ac2ep_map[WMM_NUM_AC];
+ bool ac_stream_active[WMM_NUM_AC];
+ u8 ac_stream_pri_map[WMM_NUM_AC];
+ u8 hiac_stream_active_pri;
+ u8 ep2ac_map[ENDPOINT_MAX];
+ enum htc_endpoint_id ctrl_ep;
+ struct htc_credit_state_info credit_state_info;
+ u32 connect_ctrl_flags;
+ u32 user_key_ctrl;
+ u8 usr_bss_filter;
+ struct ath6kl_sta sta_list[AP_MAX_NUM_STA];
+ u8 sta_list_index;
+ struct ath6kl_req_key ap_mode_bkey;
+ struct sk_buff_head mcastpsq;
+ spinlock_t mcastpsq_lock;
+ u8 intra_bss;
+ struct aggr_info *aggr_cntxt;
+ struct wmi_ap_mode_stat ap_stats;
+ u8 ap_country_code[3];
+ struct list_head amsdu_rx_buffer_queue;
+ struct timer_list disconnect_timer;
+ u8 rx_meta_ver;
+ struct wireless_dev *wdev;
+ struct cfg80211_scan_request *scan_req;
+ struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+ enum sme_state sme_state;
+ enum wlan_low_pwr_state wlan_pwr_state;
+ struct wmi_scan_params_cmd sc_params;
+#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
+ struct {
+ void *rx_report;
+ size_t rx_report_len;
+ } tm;
+
+ struct {
+ u32 dataset_patch_addr;
+ u32 app_load_addr;
+ u32 app_start_override_addr;
+ u32 board_ext_data_addr;
+ u32 reserved_ram_size;
+ } hw;
+
+ u16 conf_flags;
+ wait_queue_head_t event_wq;
+ struct ath6kl_mbox_info mbox_info;
+
+ struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
+ int reconnect_flag;
+ unsigned long flag;
+
+ u8 *fw_board;
+ size_t fw_board_len;
+
+ u8 *fw_otp;
+ size_t fw_otp_len;
+
+ u8 *fw;
+ size_t fw_len;
+
+ u8 *fw_patch;
+ size_t fw_patch_len;
+
+ unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN];
+
+ struct workqueue_struct *ath6kl_wq;
+
+ struct dentry *debugfs_phy;
+
+ u32 send_action_id;
+ bool probe_req_report;
+ u16 next_chan;
+
+ bool p2p;
+ u16 assoc_bss_beacon_int;
+ u8 assoc_bss_dtim_period;
+
+#ifdef CONFIG_ATH6KL_DEBUG
+ struct {
+ struct circ_buf fwlog_buf;
+ spinlock_t fwlog_lock;
+ void *fwlog_tmp;
+ u32 fwlog_mask;
+ unsigned int dbgfs_diag_reg;
+ u32 diag_reg_addr_wr;
+ u32 diag_reg_val_wr;
+
+ struct {
+ unsigned int invalid_rate;
+ } war_stats;
+ } debug;
+#endif /* CONFIG_ATH6KL_DEBUG */
+};
+
+static inline void *ath6kl_priv(struct net_device *dev)
+{
+ return wdev_priv(dev->ieee80211_ptr);
+}
+
+static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
+ *cred_info,
+ struct htc_endpoint_credit_dist
+ *ep_dist, int credits)
+{
+ ep_dist->credits += credits;
+ ep_dist->cred_assngd += credits;
+ cred_info->cur_free_credits -= credits;
+}
+
+static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
+ u32 item_offset)
+{
+ u32 addr = 0;
+
+ if (ar->target_type == TARGET_TYPE_AR6003)
+ addr = ATH6KL_AR6003_HI_START_ADDR + item_offset;
+ else if (ar->target_type == TARGET_TYPE_AR6004)
+ addr = ATH6KL_AR6004_HI_START_ADDR + item_offset;
+
+ return addr;
+}
+
+void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
+int ath6kl_configure_target(struct ath6kl *ar);
+void ath6kl_detect_error(unsigned long ptr);
+void disconnect_timer_handler(unsigned long ptr);
+void init_netdev(struct net_device *dev);
+void ath6kl_cookie_init(struct ath6kl *ar);
+void ath6kl_cookie_cleanup(struct ath6kl *ar);
+void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
+void ath6kl_tx_complete(void *context, struct list_head *packet_queue);
+enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
+ struct htc_packet *packet);
+void ath6kl_stop_txrx(struct ath6kl *ar);
+void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
+int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value);
+int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
+int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
+int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
+int ath6kl_read_fwlogs(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_tx_data_cleanup(struct ath6kl *ar);
+void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
+ bool get_dbglogs);
+
+struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
+void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
+int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
+
+struct aggr_info *aggr_init(struct net_device *dev);
+void ath6kl_rx_refill(struct htc_target *target,
+ enum htc_endpoint_id endpoint);
+void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
+struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ int len);
+void aggr_module_destroy(struct aggr_info *aggr_info);
+void aggr_reset_state(struct aggr_info *aggr_info);
+
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
+
+void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
+int ath6kl_control_tx(void *devt, struct sk_buff *skb,
+ enum htc_endpoint_id eid);
+void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_int,
+ u16 beacon_int, enum network_type net_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info);
+void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+ u8 keymgmt, u8 ucipher, u8 auth,
+ u8 assoc_req_len, u8 *assoc_info);
+void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 prot_reason_status);
+void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
+void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
+void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
+enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
+
+void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+
+void ath6kl_dtimexpiry_event(struct ath6kl *ar);
+void ath6kl_disconnect(struct ath6kl *ar);
+void ath6kl_deep_sleep_enable(struct ath6kl *ar);
+void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+ u8 win_sz);
+void ath6kl_wakeup_event(void *dev);
+void ath6kl_target_failure(struct ath6kl *ar);
+
+#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
new file mode 100644
index 00000000000..ba3f23d7115
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -0,0 +1,934 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+
+#include "debug.h"
+#include "target.h"
+
+struct ath6kl_fwlog_slot {
+ __le32 timestamp;
+ __le32 length;
+
+ /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
+ u8 payload[0];
+};
+
+#define ATH6KL_FWLOG_SIZE 32768
+#define ATH6KL_FWLOG_SLOT_SIZE (sizeof(struct ath6kl_fwlog_slot) + \
+ ATH6KL_FWLOG_PAYLOAD_SIZE)
+#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
+
+int ath6kl_printk(const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ rtn = printk("%sath6kl: %pV", level, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
+
+#ifdef CONFIG_ATH6KL_DEBUG
+
+#define REG_OUTPUT_LEN_PER_LINE 25
+#define REGTYPE_STR_LEN 100
+
+struct ath6kl_diag_reg_info {
+ u32 reg_start;
+ u32 reg_end;
+ const char *reg_info;
+};
+
+static const struct ath6kl_diag_reg_info diag_reg[] = {
+ { 0x20000, 0x200fc, "General DMA and Rx registers" },
+ { 0x28000, 0x28900, "MAC PCU register & keycache" },
+ { 0x20800, 0x20a40, "QCU" },
+ { 0x21000, 0x212f0, "DCU" },
+ { 0x4000, 0x42e4, "RTC" },
+ { 0x540000, 0x540000 + (256 * 1024), "RAM" },
+ { 0x29800, 0x2B210, "Base Band" },
+ { 0x1C000, 0x1C748, "Analog" },
+};
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_enable_reg)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
+
+ if (irq_proc_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Host Int status: 0x%x\n",
+ irq_proc_reg->host_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "CPU Int status: 0x%x\n",
+ irq_proc_reg->cpu_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Error Int status: 0x%x\n",
+ irq_proc_reg->error_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Counter Int status: 0x%x\n",
+ irq_proc_reg->counter_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Mbox Frame: 0x%x\n",
+ irq_proc_reg->mbox_frame);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead Valid: 0x%x\n",
+ irq_proc_reg->rx_lkahd_valid);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 0: 0x%x\n",
+ irq_proc_reg->rx_lkahd[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 1: 0x%x\n",
+ irq_proc_reg->rx_lkahd[1]);
+
+ if (dev->ar->mbox_info.gmbox_addr != 0) {
+ /*
+ * If the target supports GMBOX hardware, dump some
+ * additional state.
+ */
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX Host Int status 2: 0x%x\n",
+ irq_proc_reg->host_int_status2);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX RX Avail: 0x%x\n",
+ irq_proc_reg->gmbox_rx_avail);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 0: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 1: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[1]);
+ }
+
+ }
+
+ if (irq_enable_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Int status Enable: 0x%x\n",
+ irq_enable_reg->int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
+ irq_enable_reg->cntr_int_status_en);
+ }
+ ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
+}
+
+static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
+{
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "--- endpoint: %d svc_id: 0x%X ---\n",
+ ep_dist->endpoint, ep_dist->svc_id);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ep_dist->dist_flags);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ep_dist->cred_norm);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ep_dist->cred_min);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ep_dist->credits);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ep_dist->cred_assngd);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ep_dist->seek_cred);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ep_dist->cred_sz);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ep_dist->cred_per_msg);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ep_dist->cred_to_dist);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_dist->htc_rsvd)->txq));
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "----------------------------------\n");
+}
+
+void dump_cred_dist_stats(struct htc_target *target)
+{
+ struct htc_endpoint_credit_dist *ep_list;
+
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ return;
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list)
+ dump_cred_dist(ep_list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
+ target->cred_dist_cntxt, NULL);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
+ target->cred_dist_cntxt->total_avail_credits,
+ target->cred_dist_cntxt->cur_free_credits);
+}
+
+static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
+{
+ switch (war) {
+ case ATH6KL_WAR_INVALID_RATE:
+ ar->debug.war_stats.invalid_rate++;
+ break;
+ }
+}
+
+static ssize_t read_file_war_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Workaround stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10u\n",
+ "Invalid rates", ar->debug.war_stats.invalid_rate);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_war_stats = {
+ .read = read_file_war_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath6kl_debug_fwlog_add(struct ath6kl *ar, const void *buf,
+ size_t buf_len)
+{
+ struct circ_buf *fwlog = &ar->debug.fwlog_buf;
+ size_t space;
+ int i;
+
+ /* entries must all be equal size */
+ if (WARN_ON(buf_len != ATH6KL_FWLOG_SLOT_SIZE))
+ return;
+
+ space = CIRC_SPACE(fwlog->head, fwlog->tail, ATH6KL_FWLOG_SIZE);
+ if (space < buf_len)
+ /* discard oldest slot */
+ fwlog->tail = (fwlog->tail + ATH6KL_FWLOG_SLOT_SIZE) &
+ (ATH6KL_FWLOG_SIZE - 1);
+
+ for (i = 0; i < buf_len; i += space) {
+ space = CIRC_SPACE_TO_END(fwlog->head, fwlog->tail,
+ ATH6KL_FWLOG_SIZE);
+
+ if ((size_t) space > buf_len - i)
+ space = buf_len - i;
+
+ memcpy(&fwlog->buf[fwlog->head], buf, space);
+ fwlog->head = (fwlog->head + space) & (ATH6KL_FWLOG_SIZE - 1);
+ }
+
+}
+
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
+{
+ struct ath6kl_fwlog_slot *slot = ar->debug.fwlog_tmp;
+ size_t slot_len;
+
+ if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
+ return;
+
+ spin_lock_bh(&ar->debug.fwlog_lock);
+
+ slot->timestamp = cpu_to_le32(jiffies);
+ slot->length = cpu_to_le32(len);
+ memcpy(slot->payload, buf, len);
+
+ slot_len = sizeof(*slot) + len;
+
+ if (slot_len < ATH6KL_FWLOG_SLOT_SIZE)
+ memset(slot->payload + len, 0,
+ ATH6KL_FWLOG_SLOT_SIZE - slot_len);
+
+ ath6kl_debug_fwlog_add(ar, slot, ATH6KL_FWLOG_SLOT_SIZE);
+
+ spin_unlock_bh(&ar->debug.fwlog_lock);
+}
+
+static bool ath6kl_debug_fwlog_empty(struct ath6kl *ar)
+{
+ return CIRC_CNT(ar->debug.fwlog_buf.head,
+ ar->debug.fwlog_buf.tail,
+ ATH6KL_FWLOG_SLOT_SIZE) == 0;
+}
+
+static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct circ_buf *fwlog = &ar->debug.fwlog_buf;
+ size_t len = 0, buf_len = count;
+ ssize_t ret_cnt;
+ char *buf;
+ int ccnt;
+
+ buf = vmalloc(buf_len);
+ if (!buf)
+ return -ENOMEM;
+
+ /* read undelivered logs from firmware */
+ ath6kl_read_fwlogs(ar);
+
+ spin_lock_bh(&ar->debug.fwlog_lock);
+
+ while (len < buf_len && !ath6kl_debug_fwlog_empty(ar)) {
+ ccnt = CIRC_CNT_TO_END(fwlog->head, fwlog->tail,
+ ATH6KL_FWLOG_SIZE);
+
+ if ((size_t) ccnt > buf_len - len)
+ ccnt = buf_len - len;
+
+ memcpy(buf + len, &fwlog->buf[fwlog->tail], ccnt);
+ len += ccnt;
+
+ fwlog->tail = (fwlog->tail + ccnt) &
+ (ATH6KL_FWLOG_SIZE - 1);
+ }
+
+ spin_unlock_bh(&ar->debug.fwlog_lock);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ vfree(buf);
+
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fwlog = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_fwlog_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_fwlog_mask_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi,
+ ATH6KL_FWLOG_VALID_MASK,
+ ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_fwlog_mask = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_fwlog_mask_read,
+ .write = ath6kl_fwlog_mask_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct target_stats *tgt_stats = &ar->target_stats;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ int i;
+ long left;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (down_interruptible(&ar->sem)) {
+ kfree(buf);
+ return -EBUSY;
+ }
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+ up(&ar->sem);
+ kfree(buf);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag), WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left <= 0) {
+ kfree(buf);
+ return -ETIMEDOUT;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Tx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->tx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->tx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->tx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->tx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts success cnt", tgt_stats->tx_rts_success_cnt);
+ for (i = 0; i < 4; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%18s %d %10llu\n", "PER on ac",
+ i, tgt_stats->tx_pkt_per_ac[i]);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->tx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fail count", tgt_stats->tx_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Retry count", tgt_stats->tx_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Multi retry cnt", tgt_stats->tx_mult_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts fail cnt", tgt_stats->tx_rts_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n",
+ "TKIP counter measure used",
+ tgt_stats->tkip_cnter_measures_invoked);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Rx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->rx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Ucast Rate", tgt_stats->rx_ucast_rate);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->rx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->rx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->rx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fragmented pkt", tgt_stats->rx_frgment_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CRC Err", tgt_stats->rx_crc_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Key chache miss", tgt_stats->rx_key_cache_miss);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Decrypt Err", tgt_stats->rx_decrypt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Duplicate frame", tgt_stats->rx_dupl_frame);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Tkip Mic failure", tgt_stats->tkip_local_mic_fail);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "TKIP format err", tgt_stats->tkip_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CCMP format Err", tgt_stats->ccmp_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n",
+ "CCMP Replay Err", tgt_stats->ccmp_replays);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Misc Target stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Beacon Miss count", tgt_stats->cs_bmiss_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num Connects", tgt_stats->cs_connect_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num disconnects", tgt_stats->cs_discon_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define print_credit_info(fmt_str, ep_list_field) \
+ (len += scnprintf(buf + len, buf_len - len, fmt_str, \
+ ep_list->ep_list_field))
+#define CREDIT_INFO_DISPLAY_STRING_LEN 200
+#define CREDIT_INFO_LEN 128
+
+static ssize_t read_file_credit_dist_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ struct htc_endpoint_credit_dist *ep_list;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = CREDIT_INFO_DISPLAY_STRING_LEN +
+ get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Total Avail Credits: ",
+ target->cred_dist_cntxt->total_avail_credits);
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Free credits :",
+ target->cred_dist_cntxt->cur_free_credits);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " Epid Flags Cred_norm Cred_min Credits Cred_assngd"
+ " Seek_cred Cred_sz Cred_per_msg Cred_to_dist"
+ " qdepth\n");
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list) {
+ print_credit_info(" %2d", endpoint);
+ print_credit_info("%10x", dist_flags);
+ print_credit_info("%8d", cred_norm);
+ print_credit_info("%9d", cred_min);
+ print_credit_info("%9d", credits);
+ print_credit_info("%10d", cred_assngd);
+ print_credit_info("%13d", seek_cred);
+ print_credit_info("%12d", cred_sz);
+ print_credit_info("%9d", cred_per_msg);
+ print_credit_info("%14d", cred_to_dist);
+ len += scnprintf(buf + len, buf_len - len, "%12d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_list->htc_rsvd)->txq));
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_credit_dist_stats = {
+ .read = read_file_credit_dist_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static unsigned long ath6kl_get_num_reg(void)
+{
+ int i;
+ unsigned long n_reg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++)
+ n_reg = n_reg +
+ (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1;
+
+ return n_reg;
+}
+
+static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ if (reg_addr >= diag_reg[i].reg_start &&
+ reg_addr <= diag_reg[i].reg_end)
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len = 0;
+
+ if (ar->debug.dbgfs_diag_reg)
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n",
+ ar->debug.dbgfs_diag_reg);
+ else
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "All diag registers\n");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regread_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len;
+ unsigned long reg_addr;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strict_strtoul(buf, 0, &reg_addr))
+ return -EINVAL;
+
+ if ((reg_addr % 4) != 0)
+ return -EINVAL;
+
+ if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ ar->debug.dbgfs_diag_reg = reg_addr;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_read = {
+ .read = ath6kl_regread_read,
+ .write = ath6kl_regread_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath6kl_regdump_open(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
+ u8 *buf;
+ unsigned long int reg_len;
+ unsigned int len = 0, n_reg;
+ u32 addr;
+ __le32 reg_val;
+ int i, status;
+
+ /* Dump all the registers if no register is specified */
+ if (!ar->debug.dbgfs_diag_reg)
+ n_reg = ath6kl_get_num_reg();
+ else
+ n_reg = 1;
+
+ reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE;
+ if (n_reg > 1)
+ reg_len += REGTYPE_STR_LEN;
+
+ buf = vmalloc(reg_len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (n_reg == 1) {
+ addr = ar->debug.dbgfs_diag_reg;
+
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)&reg_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val));
+ goto done;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ len += scnprintf(buf + len, reg_len - len,
+ "%s\n", diag_reg[i].reg_info);
+ for (addr = diag_reg[i].reg_start;
+ addr <= diag_reg[i].reg_end; addr += 4) {
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)&reg_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n",
+ addr, le32_to_cpu(reg_val));
+ }
+ }
+
+done:
+ file->private_data = buf;
+ return 0;
+
+fail_reg_read:
+ ath6kl_warn("Unable to read memory:%u\n", addr);
+ vfree(buf);
+ return -EIO;
+}
+
+static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 *buf = file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath6kl_regdump_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_reg_dump = {
+ .open = ath6kl_regdump_open,
+ .read = ath6kl_regdump_read,
+ .release = ath6kl_regdump_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_lrssi_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ unsigned long lrssi_roam_threshold;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &lrssi_roam_threshold))
+ return -EINVAL;
+
+ ar->lrssi_roam_threshold = lrssi_roam_threshold;
+
+ ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+
+ return count;
+}
+
+static ssize_t ath6kl_lrssi_roam_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_lrssi_roam_threshold = {
+ .read = ath6kl_lrssi_roam_read,
+ .write = ath6kl_lrssi_roam_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_regwrite_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n",
+ ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regwrite_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_addr, reg_val;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, "=");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_addr))
+ return -EINVAL;
+
+ if (!ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ if (kstrtou32(sptr, 0, &reg_val))
+ return -EINVAL;
+
+ ar->debug.diag_reg_addr_wr = reg_addr;
+ ar->debug.diag_reg_val_wr = reg_val;
+
+ if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr,
+ cpu_to_le32(ar->debug.diag_reg_val_wr)))
+ return -EIO;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_write = {
+ .read = ath6kl_regwrite_read,
+ .write = ath6kl_regwrite_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath6kl_debug_init(struct ath6kl *ar)
+{
+ ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
+ if (ar->debug.fwlog_buf.buf == NULL)
+ return -ENOMEM;
+
+ ar->debug.fwlog_tmp = kmalloc(ATH6KL_FWLOG_SLOT_SIZE, GFP_KERNEL);
+ if (ar->debug.fwlog_tmp == NULL) {
+ vfree(ar->debug.fwlog_buf.buf);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ar->debug.fwlog_lock);
+
+ /*
+ * Actually we are lying here but don't know how to read the mask
+ * value from the firmware.
+ */
+ ar->debug.fwlog_mask = 0;
+
+ ar->debugfs_phy = debugfs_create_dir("ath6kl",
+ ar->wdev->wiphy->debugfsdir);
+ if (!ar->debugfs_phy) {
+ vfree(ar->debug.fwlog_buf.buf);
+ kfree(ar->debug.fwlog_tmp);
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_tgt_stats);
+
+ debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_credit_dist_stats);
+
+ debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_fwlog);
+
+ debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy,
+ ar, &fops_fwlog_mask);
+
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_diag_reg_read);
+
+ debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_reg_dump);
+
+ debugfs_create_file("lrssi_roam_threshold", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_lrssi_roam_threshold);
+
+ debugfs_create_file("reg_write", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_diag_reg_write);
+
+ debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_war_stats);
+
+ return 0;
+}
+
+void ath6kl_debug_cleanup(struct ath6kl *ar)
+{
+ vfree(ar->debug.fwlog_buf.buf);
+ kfree(ar->debug.fwlog_tmp);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
new file mode 100644
index 00000000000..9288a3ce1e3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include "htc_hif.h"
+
+enum ATH6K_DEBUG_MASK {
+ ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
+ ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
+ ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
+ ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
+ ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
+ ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
+ ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
+ ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
+ ATH6KL_DBG_PM = BIT(8), /* power management */
+ ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
+ ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
+ ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
+ ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
+ ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */
+ ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx frames */
+ ATH6KL_DBG_AGGR = BIT(15), /* aggregation */
+ ATH6KL_DBG_SDIO = BIT(16),
+ ATH6KL_DBG_SDIO_DUMP = BIT(17),
+ ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
+ ATH6KL_DBG_WMI_DUMP = BIT(19),
+ ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
+};
+
+extern unsigned int debug_mask;
+extern int ath6kl_printk(const char *level, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+#define ath6kl_info(fmt, ...) \
+ ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
+#define ath6kl_err(fmt, ...) \
+ ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
+#define ath6kl_warn(fmt, ...) \
+ ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
+
+#define AR_DBG_LVL_CHECK(mask) (debug_mask & mask)
+
+enum ath6kl_war {
+ ATH6KL_WAR_INVALID_RATE,
+};
+
+#ifdef CONFIG_ATH6KL_DEBUG
+#define ath6kl_dbg(mask, fmt, ...) \
+ ({ \
+ int rtn; \
+ if (debug_mask & mask) \
+ rtn = ath6kl_printk(KERN_DEBUG, fmt, ##__VA_ARGS__); \
+ else \
+ rtn = 0; \
+ \
+ rtn; \
+ })
+
+static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ if (debug_mask & mask) {
+ if (msg)
+ ath6kl_dbg(mask, "%s\n", msg);
+
+ print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+ }
+}
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_en_reg);
+void dump_cred_dist_stats(struct htc_target *target);
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
+void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_init(struct ath6kl *ar);
+void ath6kl_debug_cleanup(struct ath6kl *ar);
+
+#else
+static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+
+static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_en_reg)
+{
+
+}
+static inline void dump_cred_dist_stats(struct htc_target *target)
+{
+}
+
+static inline void ath6kl_debug_fwlog_event(struct ath6kl *ar,
+ const void *buf, size_t len)
+{
+}
+
+static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
+{
+}
+
+static inline int ath6kl_debug_init(struct ath6kl *ar)
+{
+ return 0;
+}
+
+static inline void ath6kl_debug_cleanup(struct ath6kl *ar)
+{
+}
+
+#endif
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
new file mode 100644
index 00000000000..d6c898f3d0b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HIF_OPS_H
+#define HIF_OPS_H
+
+#include "hif.h"
+
+static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
+}
+
+static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ return ar->hif_ops->write_async(ar, address, buffer, length,
+ request, packet);
+}
+static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
+{
+ return ar->hif_ops->irq_enable(ar);
+}
+
+static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
+{
+ return ar->hif_ops->irq_disable(ar);
+}
+
+static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar)
+{
+ return ar->hif_ops->scatter_req_get(ar);
+}
+
+static inline void hif_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ return ar->hif_ops->scatter_req_add(ar, s_req);
+}
+
+static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar)
+{
+ return ar->hif_ops->enable_scatter(ar);
+}
+
+static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ return ar->hif_ops->scat_req_rw(ar, scat_req);
+}
+
+static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
+{
+ return ar->hif_ops->cleanup_scatter(ar);
+}
+
+static inline int ath6kl_hif_suspend(struct ath6kl *ar)
+{
+ return ar->hif_ops->suspend(ar);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
new file mode 100644
index 00000000000..797e2d1d9bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HIF_H
+#define HIF_H
+
+#include "common.h"
+#include "core.h"
+
+#include <linux/scatterlist.h>
+
+#define BUS_REQUEST_MAX_NUM 64
+#define HIF_MBOX_BLOCK_SIZE 128
+#define HIF_MBOX0_BLOCK_SIZE 1
+
+#define HIF_DMA_BUFFER_SIZE (32 * 1024)
+#define CMD53_FIXED_ADDRESS 1
+#define CMD53_INCR_ADDRESS 2
+
+#define MAX_SCATTER_REQUESTS 4
+#define MAX_SCATTER_ENTRIES_PER_REQ 16
+#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
+
+#define MANUFACTURER_ID_AR6003_BASE 0x300
+ /* SDIO manufacturer ID and Codes */
+#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
+#define MANUFACTURER_CODE 0x271 /* Atheros */
+
+/* Mailbox address in SDIO address space */
+#define HIF_MBOX_BASE_ADDR 0x800
+#define HIF_MBOX_WIDTH 0x800
+
+#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1)
+
+/* version 1 of the chip has only a 12K extended mbox range */
+#define HIF_MBOX0_EXT_BASE_ADDR 0x4000
+#define HIF_MBOX0_EXT_WIDTH (12*1024)
+
+/* GMBOX addresses */
+#define HIF_GMBOX_BASE_ADDR 0x7000
+#define HIF_GMBOX_WIDTH 0x4000
+
+/* interrupt mode register */
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0
+
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
+
+struct bus_request {
+ struct list_head list;
+
+ /* request data */
+ u32 address;
+
+ u8 *buffer;
+ u32 length;
+ u32 request;
+ struct htc_packet *packet;
+ int status;
+
+ /* this is a scatter request */
+ struct hif_scatter_req *scat_req;
+};
+
+/* direction of transfer (read/write) */
+#define HIF_READ 0x00000001
+#define HIF_WRITE 0x00000002
+#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
+
+/*
+ * emode - This indicates the whether the command is to be executed in a
+ * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
+ * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
+ * implemented using the asynchronous mode allowing the the bus
+ * driver to indicate the completion of operation through the
+ * registered callback routine. The requirement primarily comes
+ * from the contexts these operations get called from (a driver's
+ * transmit context or the ISR context in case of receive).
+ * Support for both of these modes is essential.
+ */
+#define HIF_SYNCHRONOUS 0x00000010
+#define HIF_ASYNCHRONOUS 0x00000020
+#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
+
+/*
+ * dmode - An interface may support different kinds of commands based on
+ * the tradeoff between the amount of data it can carry and the
+ * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
+ * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
+ * to the nearest block size by padding. The size of the block is
+ * configurable at compile time using the HIF_BLOCK_SIZE and is
+ * negotiated with the target during initialization after the
+ * ATH6KL interrupts are enabled.
+ */
+#define HIF_BYTE_BASIS 0x00000040
+#define HIF_BLOCK_BASIS 0x00000080
+#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
+
+/*
+ * amode - This indicates if the address has to be incremented on ATH6KL
+ * after every read/write operation (HIF?FIXED_ADDRESS/
+ * HIF_INCREMENTAL_ADDRESS).
+ */
+#define HIF_FIXED_ADDRESS 0x00000100
+#define HIF_INCREMENTAL_ADDRESS 0x00000200
+#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BYTE_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_SYNC_BYTE_FIX \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_WR_SYNC_BYTE_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_SYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_RD_SYNC_BYTE_INC \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_RD_SYNC_BYTE_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_RD_ASYNC_BLOCK_FIX \
+ (HIF_READ | HIF_ASYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_RD_SYNC_BLOCK_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+struct hif_scatter_item {
+ u8 *buf;
+ int len;
+ struct htc_packet *packet;
+};
+
+struct hif_scatter_req {
+ struct list_head list;
+ /* address for the read/write operation */
+ u32 addr;
+
+ /* request flags */
+ u32 req;
+
+ /* total length of entire transfer */
+ u32 len;
+
+ bool virt_scat;
+
+ void (*complete) (struct htc_target *, struct hif_scatter_req *);
+ int status;
+ int scat_entries;
+
+ struct bus_request *busrequest;
+ struct scatterlist *sgentries;
+
+ /* bounce buffer for upper layers to copy to/from */
+ u8 *virt_dma_buf;
+
+ struct hif_scatter_item scat_list[1];
+};
+
+struct ath6kl_hif_ops {
+ int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request);
+ int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request, struct htc_packet *packet);
+
+ void (*irq_enable)(struct ath6kl *ar);
+ void (*irq_disable)(struct ath6kl *ar);
+
+ struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
+ void (*scatter_req_add)(struct ath6kl *ar,
+ struct hif_scatter_req *s_req);
+ int (*enable_scatter)(struct ath6kl *ar);
+ int (*scat_req_rw) (struct ath6kl *ar,
+ struct hif_scatter_req *scat_req);
+ void (*cleanup_scatter)(struct ath6kl *ar);
+ int (*suspend)(struct ath6kl *ar);
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
new file mode 100644
index 00000000000..f88a7c9e414
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -0,0 +1,2478 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "htc_hif.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include <asm/unaligned.h>
+
+#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+
+static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
+{
+ u8 *align_addr;
+
+ if (!IS_ALIGNED((unsigned long) *buf, 4)) {
+ align_addr = PTR_ALIGN(*buf - 4, 4);
+ memmove(align_addr, *buf, len);
+ *buf = align_addr;
+ }
+}
+
+static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
+ int ctrl0, int ctrl1)
+{
+ struct htc_frame_hdr *hdr;
+
+ packet->buf -= HTC_HDR_LENGTH;
+ hdr = (struct htc_frame_hdr *)packet->buf;
+
+ /* Endianess? */
+ put_unaligned((u16)packet->act_len, &hdr->payld_len);
+ hdr->flags = flags;
+ hdr->eid = packet->endpoint;
+ hdr->ctrl[0] = ctrl0;
+ hdr->ctrl[1] = ctrl1;
+}
+
+static void htc_reclaim_txctrl_buf(struct htc_target *target,
+ struct htc_packet *pkt)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static struct htc_packet *htc_get_control_buf(struct htc_target *target,
+ bool tx)
+{
+ struct htc_packet *packet = NULL;
+ struct list_head *buf_list;
+
+ buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
+
+ spin_lock_bh(&target->htc_lock);
+
+ if (list_empty(buf_list)) {
+ spin_unlock_bh(&target->htc_lock);
+ return NULL;
+ }
+
+ packet = list_first_entry(buf_list, struct htc_packet, list);
+ list_del(&packet->list);
+ spin_unlock_bh(&target->htc_lock);
+
+ if (tx)
+ packet->buf = packet->buf_start + HTC_HDR_LENGTH;
+
+ return packet;
+}
+
+static void htc_tx_comp_update(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ packet->completion = NULL;
+ packet->buf += HTC_HDR_LENGTH;
+
+ if (!packet->status)
+ return;
+
+ ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
+ packet->status, packet->endpoint, packet->act_len,
+ packet->info.tx.cred_used);
+
+ /* on failure to submit, reclaim credits for this packet */
+ spin_lock_bh(&target->tx_lock);
+ endpoint->cred_dist.cred_to_dist +=
+ packet->info.tx.cred_used;
+ endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static void htc_tx_complete(struct htc_endpoint *endpoint,
+ struct list_head *txq)
+{
+ if (list_empty(txq))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "send complete ep %d, (%d pkts)\n",
+ endpoint->eid, get_queue_depth(txq));
+
+ ath6kl_tx_complete(endpoint->target->dev->ar, txq);
+}
+
+static void htc_tx_comp_handler(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
+ struct list_head container;
+
+ htc_tx_comp_update(target, endpoint, packet);
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ /* do completion */
+ htc_tx_complete(endpoint, &container);
+}
+
+static void htc_async_tx_scat_complete(struct htc_target *target,
+ struct hif_scatter_req *scat_req)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *packet;
+ struct list_head tx_compq;
+ int i;
+
+ INIT_LIST_HEAD(&tx_compq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_async_tx_scat_complete total len: %d entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (scat_req->status)
+ ath6kl_err("send scatter req failed: %d\n", scat_req->status);
+
+ packet = scat_req->scat_list[0].packet;
+ endpoint = &target->endpoint[packet->endpoint];
+
+ /* walk through the scatter list and process */
+ for (i = 0; i < scat_req->scat_entries; i++) {
+ packet = scat_req->scat_list[i].packet;
+ if (!packet) {
+ WARN_ON(1);
+ return;
+ }
+
+ packet->status = scat_req->status;
+ htc_tx_comp_update(target, endpoint, packet);
+ list_add_tail(&packet->list, &tx_compq);
+ }
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+ /* complete all packets */
+ htc_tx_complete(endpoint, &tx_compq);
+}
+
+static int ath6kl_htc_tx_issue(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ int status;
+ bool sync = false;
+ u32 padded_len, send_len;
+
+ if (!packet->completion)
+ sync = true;
+
+ send_len = packet->act_len + HTC_HDR_LENGTH;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
+ __func__, send_len, sync ? "sync" : "async");
+
+ padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
+ padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
+
+ if (sync) {
+ status = hif_read_write_sync(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_SYNC_BLOCK_INC);
+
+ packet->status = status;
+ packet->buf += HTC_HDR_LENGTH;
+ } else
+ status = hif_write_async(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_ASYNC_BLOCK_INC, packet);
+
+ return status;
+}
+
+static int htc_check_credits(struct htc_target *target,
+ struct htc_endpoint *ep, u8 *flags,
+ enum htc_endpoint_id eid, unsigned int len,
+ int *req_cred)
+{
+
+ *req_cred = (len > target->tgt_cred_sz) ?
+ DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+ *req_cred, ep->cred_dist.credits);
+
+ if (ep->cred_dist.credits < *req_cred) {
+ if (eid == ENDPOINT_0)
+ return -EINVAL;
+
+ /* Seek more credits */
+ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &ep->cred_dist);
+
+ ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+
+ ep->cred_dist.seek_cred = 0;
+
+ if (ep->cred_dist.credits < *req_cred) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "not enough credits for ep %d - leaving packet in queue\n",
+ eid);
+ return -EINVAL;
+ }
+ }
+
+ ep->cred_dist.credits -= *req_cred;
+ ep->ep_st.cred_cosumd += *req_cred;
+
+ /* When we are getting low on credits, ask for more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ ep->cred_dist.seek_cred =
+ ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &ep->cred_dist);
+
+ ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+
+ /* see if we were successful in getting more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ /* tell the target we need credits ASAP! */
+ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+ ep->ep_st.cred_low_indicate += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+ }
+ }
+
+ return 0;
+}
+
+static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int req_cred;
+ u8 flags;
+ struct htc_packet *packet;
+ unsigned int len;
+
+ while (true) {
+
+ flags = 0;
+
+ if (list_empty(&endpoint->txq))
+ break;
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "got head pkt:0x%p , queue depth: %d\n",
+ packet, get_queue_depth(&endpoint->txq));
+
+ len = CALC_TXRX_PADDED_LEN(target,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ if (htc_check_credits(target, endpoint, &flags,
+ packet->endpoint, len, &req_cred))
+ break;
+
+ /* now we can fully move onto caller's queue */
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+ list_move_tail(&packet->list, queue);
+
+ /* save the number of credits this packet consumed */
+ packet->info.tx.cred_used = req_cred;
+
+ /* all TX packets are handled asynchronously */
+ packet->completion = htc_tx_comp_handler;
+ packet->context = target;
+ endpoint->ep_st.tx_issued += 1;
+
+ /* save send flags */
+ packet->info.tx.flags = flags;
+ packet->info.tx.seqno = endpoint->seqno;
+ endpoint->seqno++;
+ }
+}
+
+/* See if the padded tx length falls on a credit boundary */
+static int htc_get_credit_padding(unsigned int cred_sz, int *len,
+ struct htc_endpoint *ep)
+{
+ int rem_cred, cred_pad;
+
+ rem_cred = *len % cred_sz;
+
+ /* No padding needed */
+ if (!rem_cred)
+ return 0;
+
+ if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
+ return -1;
+
+ /*
+ * The transfer consumes a "partial" credit, this
+ * packet cannot be bundled unless we add
+ * additional "dummy" padding (max 255 bytes) to
+ * consume the entire credit.
+ */
+ cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
+
+ if ((cred_pad > 0) && (cred_pad <= 255))
+ *len += cred_pad;
+ else
+ /* The amount of padding is too large, send as non-bundled */
+ return -1;
+
+ return cred_pad;
+}
+
+static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct hif_scatter_req *scat_req,
+ int n_scat,
+ struct list_head *queue)
+{
+ struct htc_packet *packet;
+ int i, len, rem_scat, cred_pad;
+ int status = 0;
+
+ rem_scat = target->max_tx_bndl_sz;
+
+ for (i = 0; i < n_scat; i++) {
+ scat_req->scat_list[i].packet = NULL;
+
+ if (list_empty(queue))
+ break;
+
+ packet = list_first_entry(queue, struct htc_packet, list);
+ len = CALC_TXRX_PADDED_LEN(target,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
+ &len, endpoint);
+ if (cred_pad < 0 || rem_scat < len) {
+ status = -ENOSPC;
+ break;
+ }
+
+ rem_scat -= len;
+ /* now remove it from the queue */
+ list_del(&packet->list);
+
+ scat_req->scat_list[i].packet = packet;
+ /* prepare packet and flag message as part of a send bundle */
+ ath6kl_htc_tx_prep_pkt(packet,
+ packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
+ cred_pad, packet->info.tx.seqno);
+ /* Make sure the buffer is 4-byte aligned */
+ ath6kl_htc_tx_buf_align(&packet->buf,
+ packet->act_len + HTC_HDR_LENGTH);
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = len;
+
+ scat_req->len += len;
+ scat_req->scat_entries++;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
+ i, packet, len, rem_scat);
+ }
+
+ /* Roll back scatter setup in case of any failure */
+ if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
+ for (i = scat_req->scat_entries - 1; i >= 0; i--) {
+ packet = scat_req->scat_list[i].packet;
+ if (packet) {
+ packet->buf += HTC_HDR_LENGTH;
+ list_add(&packet->list, queue);
+ }
+ }
+ return -EAGAIN;
+ }
+
+ return status;
+}
+
+/*
+ * Drain a queue and send as bundles this function may return without fully
+ * draining the queue when
+ *
+ * 1. scatter resources are exhausted
+ * 2. a message that will consume a partial credit will stop the
+ * bundling process early
+ * 3. we drop below the minimum number of messages for a bundle
+ */
+static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
+ struct list_head *queue,
+ int *sent_bundle, int *n_bundle_pkts)
+{
+ struct htc_target *target = endpoint->target;
+ struct hif_scatter_req *scat_req = NULL;
+ int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
+ int status;
+
+ while (true) {
+ status = 0;
+ n_scat = get_queue_depth(queue);
+ n_scat = min(n_scat, target->msg_per_bndl_max);
+
+ if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
+ /* not enough to bundle */
+ break;
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (!scat_req) {
+ /* no scatter resources */
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "no more scatter resources\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+ n_scat);
+
+ scat_req->len = 0;
+ scat_req->scat_entries = 0;
+
+ status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
+ scat_req, n_scat,
+ queue);
+ if (status == -EAGAIN) {
+ hif_scatter_req_add(target->dev->ar, scat_req);
+ break;
+ }
+
+ /* send path is always asynchronous */
+ scat_req->complete = htc_async_tx_scat_complete;
+ n_sent_bundle++;
+ tot_pkts_bundle += scat_req->scat_entries;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "send scatter total bytes: %d , entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+ ath6kldev_submit_scat_req(target->dev, scat_req, false);
+
+ if (status)
+ break;
+ }
+
+ *sent_bundle = n_sent_bundle;
+ *n_bundle_pkts = tot_pkts_bundle;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
+ __func__, n_sent_bundle);
+
+ return;
+}
+
+static void ath6kl_htc_tx_from_queue(struct htc_target *target,
+ struct htc_endpoint *endpoint)
+{
+ struct list_head txq;
+ struct htc_packet *packet;
+ int bundle_sent;
+ int n_pkts_bundle;
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->tx_proc_cnt++;
+ if (endpoint->tx_proc_cnt > 1) {
+ endpoint->tx_proc_cnt--;
+ spin_unlock_bh(&target->tx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+ return;
+ }
+
+ /*
+ * drain the endpoint TX queue for transmission as long
+ * as we have enough credits.
+ */
+ INIT_LIST_HEAD(&txq);
+
+ while (true) {
+
+ if (list_empty(&endpoint->txq))
+ break;
+
+ ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
+
+ if (list_empty(&txq))
+ break;
+
+ spin_unlock_bh(&target->tx_lock);
+
+ bundle_sent = 0;
+ n_pkts_bundle = 0;
+
+ while (true) {
+ /* try to send a bundle on each pass */
+ if ((target->tx_bndl_enable) &&
+ (get_queue_depth(&txq) >=
+ HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ int temp1 = 0, temp2 = 0;
+
+ ath6kl_htc_tx_bundle(endpoint, &txq,
+ &temp1, &temp2);
+ bundle_sent += temp1;
+ n_pkts_bundle += temp2;
+ }
+
+ if (list_empty(&txq))
+ break;
+
+ packet = list_first_entry(&txq, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
+ 0, packet->info.tx.seqno);
+ ath6kl_htc_tx_issue(target, packet);
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->ep_st.tx_bundles += bundle_sent;
+ endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
+ }
+
+ endpoint->tx_proc_cnt = 0;
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static bool ath6kl_htc_tx_try(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *tx_pkt)
+{
+ struct htc_ep_callbacks ep_cb;
+ int txq_depth;
+ bool overflow = false;
+
+ ep_cb = endpoint->ep_cb;
+
+ spin_lock_bh(&target->tx_lock);
+ txq_depth = get_queue_depth(&endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ if (txq_depth >= endpoint->max_txq_depth)
+ overflow = true;
+
+ if (overflow)
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
+ endpoint->eid, overflow, txq_depth,
+ endpoint->max_txq_depth);
+
+ if (overflow && ep_cb.tx_full) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "indicating overflowed tx packet: 0x%p\n", tx_pkt);
+
+ if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
+ HTC_SEND_FULL_DROP) {
+ endpoint->ep_st.tx_dropped += 1;
+ return false;
+ }
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ list_add_tail(&tx_pkt->list, &endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ ath6kl_htc_tx_from_queue(target, endpoint);
+
+ return true;
+}
+
+static void htc_chk_ep_txq(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_endpoint_credit_dist *cred_dist;
+
+ /*
+ * Run through the credit distribution list to see if there are
+ * packets queued. NOTE: no locks need to be taken since the
+ * distribution list is not dynamic (cannot be re-ordered) and we
+ * are not modifying any state.
+ */
+ list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
+ endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+
+ spin_lock_bh(&target->tx_lock);
+ if (!list_empty(&endpoint->txq)) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "ep %d has %d credits and %d packets in tx queue\n",
+ cred_dist->endpoint,
+ endpoint->cred_dist.credits,
+ get_queue_depth(&endpoint->txq));
+ spin_unlock_bh(&target->tx_lock);
+ /*
+ * Try to start the stalled queue, this list is
+ * ordered by priority. If there are credits
+ * available the highest priority queue will get a
+ * chance to reclaim credits from lower priority
+ * ones.
+ */
+ ath6kl_htc_tx_from_queue(target, endpoint);
+ spin_lock_bh(&target->tx_lock);
+ }
+ spin_unlock_bh(&target->tx_lock);
+ }
+}
+
+static int htc_setup_tx_complete(struct htc_target *target)
+{
+ struct htc_packet *send_pkt = NULL;
+ int status;
+
+ send_pkt = htc_get_control_buf(target, true);
+
+ if (!send_pkt)
+ return -ENOMEM;
+
+ if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
+ struct htc_setup_comp_ext_msg *setup_comp_ext;
+ u32 flags = 0;
+
+ setup_comp_ext =
+ (struct htc_setup_comp_ext_msg *)send_pkt->buf;
+ memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
+ setup_comp_ext->msg_id =
+ cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (target->msg_per_bndl_max > 0) {
+ /* Indicate HTC bundling to the target */
+ flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
+ setup_comp_ext->msg_per_rxbndl =
+ target->msg_per_bndl_max;
+ }
+
+ memcpy(&setup_comp_ext->flags, &flags,
+ sizeof(setup_comp_ext->flags));
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
+ sizeof(struct htc_setup_comp_ext_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ } else {
+ struct htc_setup_comp_msg *setup_comp;
+ setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
+ memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
+ setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
+ sizeof(struct htc_setup_comp_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+ }
+
+ /* we want synchronous operation */
+ send_pkt->completion = NULL;
+ ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
+ status = ath6kl_htc_tx_issue(target, send_pkt);
+
+ if (send_pkt != NULL)
+ htc_reclaim_txctrl_buf(target, send_pkt);
+
+ return status;
+}
+
+void ath6kl_htc_set_credit_dist(struct htc_target *target,
+ struct htc_credit_state_info *cred_dist_cntxt,
+ u16 srvc_pri_order[], int list_len)
+{
+ struct htc_endpoint *endpoint;
+ int i, ep;
+
+ target->cred_dist_cntxt = cred_dist_cntxt;
+
+ list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
+ &target->cred_dist_list);
+
+ for (i = 0; i < list_len; i++) {
+ for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
+ endpoint = &target->endpoint[ep];
+ if (endpoint->svc_id == srvc_pri_order[i]) {
+ list_add_tail(&endpoint->cred_dist.list,
+ &target->cred_dist_list);
+ break;
+ }
+ }
+ if (ep >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return;
+ }
+ }
+}
+
+int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint;
+ struct list_head queue;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+ packet->endpoint, packet->buf, packet->act_len);
+
+ if (packet->endpoint >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ endpoint = &target->endpoint[packet->endpoint];
+
+ if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
+ packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
+ -ECANCELED : -ENOSPC;
+ INIT_LIST_HEAD(&queue);
+ list_add(&packet->list, &queue);
+ htc_tx_complete(endpoint, &queue);
+ }
+
+ return 0;
+}
+
+/* flush endpoint TX queue */
+void ath6kl_htc_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct list_head discard_q, container;
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+
+ if (!endpoint->svc_id) {
+ WARN_ON(1);
+ return;
+ }
+
+ /* initialize the discard queue */
+ INIT_LIST_HEAD(&discard_q);
+
+ spin_lock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
+ if ((tag == HTC_TX_PACKET_TAG_ALL) ||
+ (tag == packet->info.tx.tag))
+ list_move_tail(&packet->list, &discard_q);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+ packet, packet->act_len,
+ packet->endpoint, packet->info.tx.tag);
+
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ htc_tx_complete(endpoint, &container);
+ }
+
+}
+
+static void ath6kl_htc_flush_txep_all(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ dump_cred_dist_stats(target);
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (endpoint->svc_id == 0)
+ /* not in use.. */
+ continue;
+ ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
+ }
+}
+
+void ath6kl_htc_indicate_activity_change(struct htc_target *target,
+ enum htc_endpoint_id eid, bool active)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+ bool dist = false;
+
+ if (endpoint->svc_id == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ if (active) {
+ if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
+ endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
+ dist = true;
+ }
+ } else {
+ if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
+ endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
+ dist = true;
+ }
+ }
+
+ if (dist) {
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (dist && !active)
+ htc_chk_ep_txq(target);
+}
+
+/* HTC Rx */
+
+static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
+ int n_look_ahds)
+{
+ endpoint->ep_st.rx_pkts++;
+ if (n_look_ahds == 1)
+ endpoint->ep_st.rx_lkahds++;
+ else if (n_look_ahds > 1)
+ endpoint->ep_st.rx_bundle_lkahd++;
+}
+
+static inline bool htc_valid_rx_frame_len(struct htc_target *target,
+ enum htc_endpoint_id eid, int len)
+{
+ return (eid == target->dev->ar->ctrl_ep) ?
+ len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
+}
+
+static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
+{
+ struct list_head queue;
+
+ INIT_LIST_HEAD(&queue);
+ list_add_tail(&packet->list, &queue);
+ return ath6kl_htc_add_rxbuf_multiple(target, &queue);
+}
+
+static void htc_reclaim_rxbuf(struct htc_target *target,
+ struct htc_packet *packet,
+ struct htc_endpoint *ep)
+{
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
+ htc_rxpkt_reset(packet);
+ packet->status = -ECANCELED;
+ ep->ep_cb.rx(ep->target, packet);
+ } else {
+ htc_rxpkt_reset(packet);
+ htc_add_rxbuf((void *)(target), packet);
+ }
+}
+
+static void reclaim_rx_ctrl_buf(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static int ath6kl_htc_rx_packet(struct htc_target *target,
+ struct htc_packet *packet,
+ u32 rx_len)
+{
+ struct ath6kl_device *dev = target->dev;
+ u32 padded_len;
+ int status;
+
+ padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
+
+ if (padded_len > packet->buf_len) {
+ ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+ padded_len, rx_len, packet->buf_len);
+ return -ENOMEM;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+ packet, packet->info.rx.exp_hdr,
+ padded_len, dev->ar->mbox_info.htc_addr, "sync");
+
+ status = hif_read_write_sync(dev->ar,
+ dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_RD_SYNC_BLOCK_FIX);
+
+ packet->status = status;
+
+ return status;
+}
+
+/*
+ * optimization for recv packets, we can indicate a
+ * "hint" that there are more single-packets to fetch
+ * on this endpoint.
+ */
+static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
+
+ if (htc_hdr->eid == packet->endpoint) {
+ if (!list_empty(&endpoint->rx_bufq))
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+ }
+}
+
+static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
+{
+ struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
+
+ if (ep_cb.rx_refill_thresh > 0) {
+ spin_lock_bh(&endpoint->target->rx_lock);
+ if (get_queue_depth(&endpoint->rx_bufq)
+ < ep_cb.rx_refill_thresh) {
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ ep_cb.rx_refill(endpoint->target, endpoint->eid);
+ return;
+ }
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ }
+}
+
+/* This function is called with rx_lock held */
+static int ath6kl_htc_rx_setup(struct htc_target *target,
+ struct htc_endpoint *ep,
+ u32 *lk_ahds, struct list_head *queue, int n_msg)
+{
+ struct htc_packet *packet;
+ /* FIXME: type of lk_ahds can't be right */
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
+ struct htc_ep_callbacks ep_cb;
+ int status = 0, j, full_len;
+ bool no_recycle;
+
+ full_len = CALC_TXRX_PADDED_LEN(target,
+ le16_to_cpu(htc_hdr->payld_len) +
+ sizeof(*htc_hdr));
+
+ if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
+ ath6kl_warn("Rx buffer requested with invalid length\n");
+ return -EINVAL;
+ }
+
+ ep_cb = ep->ep_cb;
+ for (j = 0; j < n_msg; j++) {
+
+ /*
+ * Reset flag, any packets allocated using the
+ * rx_alloc() API cannot be recycled on
+ * cleanup,they must be explicitly returned.
+ */
+ no_recycle = false;
+
+ if (ep_cb.rx_allocthresh &&
+ (full_len > ep_cb.rx_alloc_thresh)) {
+ ep->ep_st.rx_alloc_thresh_hit += 1;
+ ep->ep_st.rxalloc_thresh_byte +=
+ le16_to_cpu(htc_hdr->payld_len);
+
+ spin_unlock_bh(&target->rx_lock);
+ no_recycle = true;
+
+ packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
+ full_len);
+ spin_lock_bh(&target->rx_lock);
+ } else {
+ /* refill handler is being used */
+ if (list_empty(&ep->rx_bufq)) {
+ if (ep_cb.rx_refill) {
+ spin_unlock_bh(&target->rx_lock);
+ ep_cb.rx_refill(ep->target, ep->eid);
+ spin_lock_bh(&target->rx_lock);
+ }
+ }
+
+ if (list_empty(&ep->rx_bufq))
+ packet = NULL;
+ else {
+ packet = list_first_entry(&ep->rx_bufq,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ }
+ }
+
+ if (!packet) {
+ target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ep->eid;
+ return -ENOSPC;
+ }
+
+ /* clear flags */
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.indicat_flags = 0;
+ packet->status = 0;
+
+ if (no_recycle)
+ /*
+ * flag that these packets cannot be
+ * recycled, they have to be returned to
+ * the user
+ */
+ packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
+
+ /* Caller needs to free this upon any failure */
+ list_add_tail(&packet->list, queue);
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ status = -ECANCELED;
+ break;
+ }
+
+ if (j) {
+ packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
+ packet->info.rx.exp_hdr = 0xFFFFFFFF;
+ } else
+ /* set expected look ahead */
+ packet->info.rx.exp_hdr = *lk_ahds;
+
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
+ HTC_HDR_LENGTH;
+ }
+
+ return status;
+}
+
+static int ath6kl_htc_rx_alloc(struct htc_target *target,
+ u32 lk_ahds[], int msg,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int status = 0;
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_frame_hdr *htc_hdr;
+ int i, n_msg;
+
+ spin_lock_bh(&target->rx_lock);
+
+ for (i = 0; i < msg; i++) {
+
+ htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
+
+ if (htc_hdr->eid >= ENDPOINT_MAX) {
+ ath6kl_err("invalid ep in look-ahead: %d\n",
+ htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->eid != endpoint->eid) {
+ ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
+ htc_hdr->eid, endpoint->eid, i);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
+ ath6kl_err("payload len %d exceeds max htc : %d !\n",
+ htc_hdr->payld_len,
+ (u32) HTC_MAX_PAYLOAD_LENGTH);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (endpoint->svc_id == 0) {
+ ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
+ /*
+ * HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
+ HTC_FLG_RX_BNDL_CNT_S;
+
+ /* the count doesn't include the starter frame */
+ n_msg++;
+ if (n_msg > target->msg_per_bndl_max) {
+ status = -ENOMEM;
+ break;
+ }
+
+ endpoint->ep_st.rx_bundle_from_hdr += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc hdr indicates :%d msg can be fetched as a bundle\n",
+ n_msg);
+ } else
+ /* HTC header only indicates 1 message to fetch */
+ n_msg = 1;
+
+ /* Setup packet buffers for each message */
+ status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
+ queue, n_msg);
+
+ /*
+ * This is due to unavailabilty of buffers to rx entire data.
+ * Return no error so that free buffers from queue can be used
+ * to receive partial data.
+ */
+ if (status == -ENOSPC) {
+ spin_unlock_bh(&target->rx_lock);
+ return 0;
+ }
+
+ if (status)
+ break;
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (status) {
+ list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+ }
+
+ return status;
+}
+
+static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
+{
+ if (packets->endpoint != ENDPOINT_0) {
+ WARN_ON(1);
+ return;
+ }
+
+ if (packets->status == -ECANCELED) {
+ reclaim_rx_ctrl_buf(context, packets);
+ return;
+ }
+
+ if (packets->act_len > 0) {
+ ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
+ packets->act_len + HTC_HDR_LENGTH);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
+ "Unexpected ENDPOINT 0 Message", "",
+ packets->buf - HTC_HDR_LENGTH,
+ packets->act_len + HTC_HDR_LENGTH);
+ }
+
+ htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
+}
+
+static void htc_proc_cred_rpt(struct htc_target *target,
+ struct htc_credit_report *rpt,
+ int n_entries,
+ enum htc_endpoint_id from_ep)
+{
+ struct htc_endpoint *endpoint;
+ int tot_credits = 0, i;
+ bool dist = false;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
+
+ spin_lock_bh(&target->tx_lock);
+
+ for (i = 0; i < n_entries; i++, rpt++) {
+ if (rpt->eid >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ spin_unlock_bh(&target->tx_lock);
+ return;
+ }
+
+ endpoint = &target->endpoint[rpt->eid];
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
+ rpt->eid, rpt->credits);
+
+ endpoint->ep_st.tx_cred_rpt += 1;
+ endpoint->ep_st.cred_retnd += rpt->credits;
+
+ if (from_ep == rpt->eid) {
+ /*
+ * This credit report arrived on the same endpoint
+ * indicating it arrived in an RX packet.
+ */
+ endpoint->ep_st.cred_from_rx += rpt->credits;
+ endpoint->ep_st.cred_rpt_from_rx += 1;
+ } else if (from_ep == ENDPOINT_0) {
+ /* credit arrived on endpoint 0 as a NULL message */
+ endpoint->ep_st.cred_from_ep0 += rpt->credits;
+ endpoint->ep_st.cred_rpt_ep0 += 1;
+ } else {
+ endpoint->ep_st.cred_from_other += rpt->credits;
+ endpoint->ep_st.cred_rpt_from_other += 1;
+ }
+
+ if (rpt->eid == ENDPOINT_0)
+ /* always give endpoint 0 credits back */
+ endpoint->cred_dist.credits += rpt->credits;
+ else {
+ endpoint->cred_dist.cred_to_dist += rpt->credits;
+ dist = true;
+ }
+
+ /*
+ * Refresh tx depth for distribution function that will
+ * recover these credits NOTE: this is only valid when
+ * there are credits to recover!
+ */
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ tot_credits += rpt->credits;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "report indicated %d credits to distribute\n",
+ tot_credits);
+
+ if (dist) {
+ /*
+ * This was a credit return based on a completed send
+ * operations note, this is done with the lock held
+ */
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (tot_credits)
+ htc_chk_ep_txq(target);
+}
+
+static int htc_parse_trailer(struct htc_target *target,
+ struct htc_record_hdr *record,
+ u8 *record_buf, u32 *next_lk_ahds,
+ enum htc_endpoint_id endpoint,
+ int *n_lk_ahds)
+{
+ struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
+ struct htc_lookahead_report *lk_ahd;
+ int len;
+
+ switch (record->rec_id) {
+ case HTC_RECORD_CREDITS:
+ len = record->len / sizeof(struct htc_credit_report);
+ if (!len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ htc_proc_cred_rpt(target,
+ (struct htc_credit_report *) record_buf,
+ len, endpoint);
+ break;
+ case HTC_RECORD_LOOKAHEAD:
+ len = record->len / sizeof(*lk_ahd);
+ if (!len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ lk_ahd = (struct htc_lookahead_report *) record_buf;
+ if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
+ && next_lk_ahds) {
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+ lk_ahd->pre_valid, lk_ahd->post_valid);
+
+ /* look ahead bytes are valid, copy them over */
+ memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+ "", next_lk_ahds, 4);
+
+ *n_lk_ahds = 1;
+ }
+ break;
+ case HTC_RECORD_LOOKAHEAD_BUNDLE:
+ len = record->len / sizeof(*bundle_lkahd_rpt);
+ if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (next_lk_ahds) {
+ int i;
+
+ bundle_lkahd_rpt =
+ (struct htc_bundle_lkahd_rpt *) record_buf;
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+ "", record_buf, record->len);
+
+ for (i = 0; i < len; i++) {
+ memcpy((u8 *)&next_lk_ahds[i],
+ bundle_lkahd_rpt->lk_ahd, 4);
+ bundle_lkahd_rpt++;
+ }
+
+ *n_lk_ahds = i;
+ }
+ break;
+ default:
+ ath6kl_err("unhandled record: id:%d len:%d\n",
+ record->rec_id, record->len);
+ break;
+ }
+
+ return 0;
+
+}
+
+static int htc_proc_trailer(struct htc_target *target,
+ u8 *buf, int len, u32 *next_lk_ahds,
+ int *n_lk_ahds, enum htc_endpoint_id endpoint)
+{
+ struct htc_record_hdr *record;
+ int orig_len;
+ int status;
+ u8 *record_buf;
+ u8 *orig_buf;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
+ buf, len);
+
+ orig_buf = buf;
+ orig_len = len;
+ status = 0;
+
+ while (len > 0) {
+
+ if (len < sizeof(struct htc_record_hdr)) {
+ status = -ENOMEM;
+ break;
+ }
+ /* these are byte aligned structs */
+ record = (struct htc_record_hdr *) buf;
+ len -= sizeof(struct htc_record_hdr);
+ buf += sizeof(struct htc_record_hdr);
+
+ if (record->len > len) {
+ ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
+ record->len, record->rec_id, len);
+ status = -ENOMEM;
+ break;
+ }
+ record_buf = buf;
+
+ status = htc_parse_trailer(target, record, record_buf,
+ next_lk_ahds, endpoint, n_lk_ahds);
+
+ if (status)
+ break;
+
+ /* advance buffer past this record for next time around */
+ buf += record->len;
+ len -= record->len;
+ }
+
+ if (status)
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+ "", orig_buf, orig_len);
+
+ return status;
+}
+
+static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
+ struct htc_packet *packet,
+ u32 *next_lkahds, int *n_lkahds)
+{
+ int status = 0;
+ u16 payload_len;
+ u32 lk_ahd;
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
+
+ if (n_lkahds != NULL)
+ *n_lkahds = 0;
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
+ packet->buf, packet->act_len);
+
+ /*
+ * NOTE: we cannot assume the alignment of buf, so we use the safe
+ * macros to retrieve 16 bit fields.
+ */
+ payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
+
+ memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
+ /*
+ * Refresh the expected header and the actual length as it
+ * was unknown when this packet was grabbed as part of the
+ * bundle.
+ */
+ packet->info.rx.exp_hdr = lk_ahd;
+ packet->act_len = payload_len + HTC_HDR_LENGTH;
+
+ /* validate the actual header that was refreshed */
+ if (packet->act_len > packet->buf_len) {
+ ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
+ payload_len, lk_ahd);
+ /*
+ * Limit this to max buffer just to print out some
+ * of the buffer.
+ */
+ packet->act_len = min(packet->act_len, packet->buf_len);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (packet->endpoint != htc_hdr->eid) {
+ ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
+ htc_hdr->eid, packet->endpoint);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+ }
+
+ if (lk_ahd != packet->info.rx.exp_hdr) {
+ ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
+ __func__, packet, packet->info.rx.rx_flags);
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+ "", &packet->info.rx.exp_hdr, 4);
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+ "", (u8 *)&lk_ahd, sizeof(lk_ahd));
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
+ if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
+ htc_hdr->ctrl[0] > payload_len) {
+ ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
+ __func__, payload_len, htc_hdr->ctrl[0]);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
+ next_lkahds = NULL;
+ n_lkahds = NULL;
+ }
+
+ status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
+ + payload_len - htc_hdr->ctrl[0],
+ htc_hdr->ctrl[0], next_lkahds,
+ n_lkahds, packet->endpoint);
+
+ if (status)
+ goto fail_rx;
+
+ packet->act_len -= htc_hdr->ctrl[0];
+ }
+
+ packet->buf += HTC_HDR_LENGTH;
+ packet->act_len -= HTC_HDR_LENGTH;
+
+fail_rx:
+ if (status)
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
+ "", packet->buf,
+ packet->act_len < 256 ? packet->act_len : 256);
+ else {
+ if (packet->act_len > 0)
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
+ "HTC - Application Msg", "",
+ packet->buf, packet->act_len);
+ }
+
+ return status;
+}
+
+static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc calling ep %d recv callback on packet 0x%p\n",
+ endpoint->eid, packet);
+ endpoint->ep_cb.rx(endpoint->target, packet);
+}
+
+static int ath6kl_htc_rx_bundle(struct htc_target *target,
+ struct list_head *rxq,
+ struct list_head *sync_compq,
+ int *n_pkt_fetched, bool part_bundle)
+{
+ struct hif_scatter_req *scat_req;
+ struct htc_packet *packet;
+ int rem_space = target->max_rx_bndl_sz;
+ int n_scat_pkt, status = 0, i, len;
+
+ n_scat_pkt = get_queue_depth(rxq);
+ n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
+
+ if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
+ /*
+ * We were forced to split this bundle receive operation
+ * all packets in this partial bundle must have their
+ * lookaheads ignored.
+ */
+ part_bundle = true;
+
+ /*
+ * This would only happen if the target ignored our max
+ * bundle limit.
+ */
+ ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
+ __func__, get_queue_depth(rxq), n_scat_pkt);
+ }
+
+ len = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "%s(): (numpackets: %d , actual : %d)\n",
+ __func__, get_queue_depth(rxq), n_scat_pkt);
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (scat_req == NULL)
+ goto fail_rx_pkt;
+
+ for (i = 0; i < n_scat_pkt; i++) {
+ int pad_len;
+
+ packet = list_first_entry(rxq, struct htc_packet, list);
+ list_del(&packet->list);
+
+ pad_len = CALC_TXRX_PADDED_LEN(target,
+ packet->act_len);
+
+ if ((rem_space - pad_len) < 0) {
+ list_add(&packet->list, rxq);
+ break;
+ }
+
+ rem_space -= pad_len;
+
+ if (part_bundle || (i < (n_scat_pkt - 1)))
+ /*
+ * Packet 0..n-1 cannot be checked for look-aheads
+ * since we are fetching a bundle the last packet
+ * however can have it's lookahead used
+ */
+ packet->info.rx.rx_flags |=
+ HTC_RX_PKT_IGNORE_LOOKAHEAD;
+
+ /* NOTE: 1 HTC packet per scatter entry */
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = pad_len;
+
+ packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
+
+ list_add_tail(&packet->list, sync_compq);
+
+ WARN_ON(!scat_req->scat_list[i].len);
+ len += scat_req->scat_list[i].len;
+ }
+
+ scat_req->len = len;
+ scat_req->scat_entries = i;
+
+ status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+
+ if (!status)
+ *n_pkt_fetched = i;
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+fail_rx_pkt:
+
+ return status;
+}
+
+static int ath6kl_htc_rx_process_packets(struct htc_target *target,
+ struct list_head *comp_pktq,
+ u32 lk_ahds[],
+ int *n_lk_ahd)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_endpoint *ep;
+ int status = 0;
+
+ list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
+ list_del(&packet->list);
+ ep = &target->endpoint[packet->endpoint];
+
+ /* process header for each of the recv packet */
+ status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
+ n_lk_ahd);
+ if (status)
+ return status;
+
+ if (list_empty(comp_pktq)) {
+ /*
+ * Last packet's more packet flag is set
+ * based on the lookahead.
+ */
+ if (*n_lk_ahd > 0)
+ ath6kl_htc_rx_set_indicate(lk_ahds[0],
+ ep, packet);
+ } else
+ /*
+ * Packets in a bundle automatically have
+ * this flag set.
+ */
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+
+ ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
+ ep->ep_st.rx_bundl += 1;
+
+ ath6kl_htc_rx_complete(ep, packet);
+ }
+
+ return status;
+}
+
+static int ath6kl_htc_rx_fetch(struct htc_target *target,
+ struct list_head *rx_pktq,
+ struct list_head *comp_pktq)
+{
+ int fetched_pkts;
+ bool part_bundle = false;
+ int status = 0;
+
+ /* now go fetch the list of HTC packets */
+ while (!list_empty(rx_pktq)) {
+ fetched_pkts = 0;
+
+ if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
+ /*
+ * There are enough packets to attempt a
+ * bundle transfer and recv bundling is
+ * allowed.
+ */
+ status = ath6kl_htc_rx_bundle(target, rx_pktq,
+ comp_pktq,
+ &fetched_pkts,
+ part_bundle);
+ if (status)
+ return status;
+
+ if (!list_empty(rx_pktq))
+ part_bundle = true;
+ }
+
+ if (!fetched_pkts) {
+ struct htc_packet *packet;
+
+ packet = list_first_entry(rx_pktq, struct htc_packet,
+ list);
+
+ list_del(&packet->list);
+
+ /* fully synchronous */
+ packet->completion = NULL;
+
+ if (!list_empty(rx_pktq))
+ /*
+ * look_aheads in all packet
+ * except the last one in the
+ * bundle must be ignored
+ */
+ packet->info.rx.rx_flags |=
+ HTC_RX_PKT_IGNORE_LOOKAHEAD;
+
+ /* go fetch the packet */
+ status = ath6kl_htc_rx_packet(target, packet,
+ packet->act_len);
+ if (status)
+ return status;
+
+ list_add_tail(&packet->list, comp_pktq);
+ }
+ }
+
+ return status;
+}
+
+int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
+ u32 msg_look_ahead[], int *num_pkts)
+{
+ struct htc_packet *packets, *tmp_pkt;
+ struct htc_endpoint *endpoint;
+ struct list_head rx_pktq, comp_pktq;
+ int status = 0;
+ u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
+ int num_look_ahead = 1;
+ enum htc_endpoint_id id;
+ int n_fetched = 0;
+
+ *num_pkts = 0;
+
+ /*
+ * On first entry copy the look_aheads into our temp array for
+ * processing
+ */
+ memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+
+ while (true) {
+
+ /*
+ * First lookahead sets the expected endpoint IDs for all
+ * packets in a bundle.
+ */
+ id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
+ endpoint = &target->endpoint[id];
+
+ if (id >= ENDPOINT_MAX) {
+ ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
+ id);
+ status = -ENOMEM;
+ break;
+ }
+
+ INIT_LIST_HEAD(&rx_pktq);
+ INIT_LIST_HEAD(&comp_pktq);
+
+ /*
+ * Try to allocate as many HTC RX packets indicated by the
+ * look_aheads.
+ */
+ status = ath6kl_htc_rx_alloc(target, look_aheads,
+ num_look_ahead, endpoint,
+ &rx_pktq);
+ if (status)
+ break;
+
+ if (get_queue_depth(&rx_pktq) >= 2)
+ /*
+ * A recv bundle was detected, force IRQ status
+ * re-check again
+ */
+ target->chk_irq_status_cnt = 1;
+
+ n_fetched += get_queue_depth(&rx_pktq);
+
+ num_look_ahead = 0;
+
+ status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
+
+ if (!status)
+ ath6kl_htc_rx_chk_water_mark(endpoint);
+
+ /* Process fetched packets */
+ status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
+ look_aheads,
+ &num_look_ahead);
+
+ if (!num_look_ahead || status)
+ break;
+
+ /*
+ * For SYNCH processing, if we get here, we are running
+ * through the loop again due to a detected lookahead. Set
+ * flag that we should re-check IRQ status registers again
+ * before leaving IRQ processing, this can net better
+ * performance in high throughput situations.
+ */
+ target->chk_irq_status_cnt = 1;
+ }
+
+ if (status) {
+ ath6kl_err("failed to get pending recv messages: %d\n",
+ status);
+ /*
+ * Cleanup any packets we allocated but didn't use to
+ * actually fetch any packets.
+ */
+ list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
+ list_del(&packets->list);
+ htc_reclaim_rxbuf(target, packets,
+ &target->endpoint[packets->endpoint]);
+ }
+
+ /* cleanup any packets in sync completion queue */
+ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
+ list_del(&packets->list);
+ htc_reclaim_rxbuf(target, packets,
+ &target->endpoint[packets->endpoint]);
+ }
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
+ ath6kldev_rx_control(target->dev, false);
+ }
+ }
+
+ /*
+ * Before leaving, check to see if host ran out of buffers and
+ * needs to stop the receiver.
+ */
+ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
+ ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
+ ath6kldev_rx_control(target->dev, false);
+ }
+ *num_pkts = n_fetched;
+
+ return status;
+}
+
+/*
+ * Synchronously wait for a control message from the target,
+ * This function is used at initialization time ONLY. At init messages
+ * on ENDPOINT 0 are expected.
+ */
+static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
+{
+ struct htc_packet *packet = NULL;
+ struct htc_frame_hdr *htc_hdr;
+ u32 look_ahead;
+
+ if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+ HTC_TARGET_RESPONSE_TIMEOUT))
+ return NULL;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+
+ htc_hdr = (struct htc_frame_hdr *)&look_ahead;
+
+ if (htc_hdr->eid != ENDPOINT_0)
+ return NULL;
+
+ packet = htc_get_control_buf(target, false);
+
+ if (!packet)
+ return NULL;
+
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.exp_hdr = look_ahead;
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
+
+ if (packet->act_len > packet->buf_len)
+ goto fail_ctrl_rx;
+
+ /* we want synchronous operation */
+ packet->completion = NULL;
+
+ /* get the message from the device, this will block */
+ if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
+ goto fail_ctrl_rx;
+
+ /* process receive header */
+ packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
+
+ if (packet->status) {
+ ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
+ packet->status);
+ goto fail_ctrl_rx;
+ }
+
+ return packet;
+
+fail_ctrl_rx:
+ if (packet != NULL) {
+ htc_rxpkt_reset(packet);
+ reclaim_rx_ctrl_buf(target, packet);
+ }
+
+ return NULL;
+}
+
+int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pkt_queue)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *first_pkt;
+ bool rx_unblock = false;
+ int status = 0, depth;
+
+ if (list_empty(pkt_queue))
+ return -ENOMEM;
+
+ first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
+
+ if (first_pkt->endpoint >= ENDPOINT_MAX)
+ return status;
+
+ depth = get_queue_depth(pkt_queue);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+ first_pkt->endpoint, depth, first_pkt->buf_len);
+
+ endpoint = &target->endpoint[first_pkt->endpoint];
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ struct htc_packet *packet, *tmp_pkt;
+
+ /* walk through queue and mark each one canceled */
+ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ ath6kl_htc_rx_complete(endpoint, packet);
+ }
+
+ return status;
+ }
+
+ spin_lock_bh(&target->rx_lock);
+
+ list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
+
+ /* check if we are blocked waiting for a new buffer */
+ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
+ if (target->ep_waiting == first_pkt->endpoint) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "receiver was blocked on ep:%d, unblocking.\n",
+ target->ep_waiting);
+ target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ENDPOINT_MAX;
+ rx_unblock = true;
+ }
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
+ /* TODO : implement a buffer threshold count? */
+ ath6kldev_rx_control(target->dev, true);
+
+ return status;
+}
+
+void ath6kl_htc_flush_rx_buf(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *packet, *tmp_pkt;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (!endpoint->svc_id)
+ /* not in use.. */
+ continue;
+
+ spin_lock_bh(&target->rx_lock);
+ list_for_each_entry_safe(packet, tmp_pkt,
+ &endpoint->rx_bufq, list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&target->rx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+ packet, packet->buf_len,
+ packet->endpoint);
+ dev_kfree_skb(packet->pkt_cntxt);
+ spin_lock_bh(&target->rx_lock);
+ }
+ spin_unlock_bh(&target->rx_lock);
+ }
+}
+
+int ath6kl_htc_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *conn_req,
+ struct htc_service_connect_resp *conn_resp)
+{
+ struct htc_packet *rx_pkt = NULL;
+ struct htc_packet *tx_pkt = NULL;
+ struct htc_conn_service_resp *resp_msg;
+ struct htc_conn_service_msg *conn_msg;
+ struct htc_endpoint *endpoint;
+ enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
+ unsigned int max_msg_sz = 0;
+ int status = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "htc_conn_service, target:0x%p service id:0x%X\n",
+ target, conn_req->svc_id);
+
+ if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
+ /* special case for pseudo control service */
+ assigned_ep = ENDPOINT_0;
+ max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
+ } else {
+ /* allocate a packet to send to the target */
+ tx_pkt = htc_get_control_buf(target, true);
+
+ if (!tx_pkt)
+ return -ENOMEM;
+
+ conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
+ memset(conn_msg, 0, sizeof(*conn_msg));
+ conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
+ conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
+ conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
+
+ set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
+ sizeof(*conn_msg) + conn_msg->svc_meta_len,
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ /* we want synchronous operation */
+ tx_pkt->completion = NULL;
+ ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
+ status = ath6kl_htc_tx_issue(target, tx_pkt);
+
+ if (status)
+ goto fail_tx;
+
+ /* wait for response */
+ rx_pkt = htc_wait_for_ctrl_msg(target);
+
+ if (!rx_pkt) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
+
+ if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
+ || (rx_pkt->act_len < sizeof(*resp_msg))) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ conn_resp->resp_code = resp_msg->status;
+ /* check response status */
+ if (resp_msg->status != HTC_SERVICE_SUCCESS) {
+ ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
+ resp_msg->svc_id, resp_msg->status);
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
+ max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
+ }
+
+ if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ endpoint = &target->endpoint[assigned_ep];
+ endpoint->eid = assigned_ep;
+ if (endpoint->svc_id) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ /* return assigned endpoint to caller */
+ conn_resp->endpoint = assigned_ep;
+ conn_resp->len_max = max_msg_sz;
+
+ /* setup the endpoint */
+
+ /* this marks the endpoint in use */
+ endpoint->svc_id = conn_req->svc_id;
+
+ endpoint->max_txq_depth = conn_req->max_txq_depth;
+ endpoint->len_max = max_msg_sz;
+ endpoint->ep_cb = conn_req->ep_cb;
+ endpoint->cred_dist.svc_id = conn_req->svc_id;
+ endpoint->cred_dist.htc_rsvd = endpoint;
+ endpoint->cred_dist.endpoint = assigned_ep;
+ endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
+
+ if (conn_req->max_rxmsg_sz) {
+ /*
+ * Override cred_per_msg calculation, this optimizes
+ * the credit-low indications since the host will actually
+ * issue smaller messages in the Send path.
+ */
+ if (conn_req->max_rxmsg_sz > max_msg_sz) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+ endpoint->cred_dist.cred_per_msg =
+ conn_req->max_rxmsg_sz / target->tgt_cred_sz;
+ } else
+ endpoint->cred_dist.cred_per_msg =
+ max_msg_sz / target->tgt_cred_sz;
+
+ if (!endpoint->cred_dist.cred_per_msg)
+ endpoint->cred_dist.cred_per_msg = 1;
+
+ /* save local connection flags */
+ endpoint->conn_flags = conn_req->flags;
+
+fail_tx:
+ if (tx_pkt)
+ htc_reclaim_txctrl_buf(target, tx_pkt);
+
+ if (rx_pkt) {
+ htc_rxpkt_reset(rx_pkt);
+ reclaim_rx_ctrl_buf(target, rx_pkt);
+ }
+
+ return status;
+}
+
+static void reset_ep_state(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
+ endpoint->svc_id = 0;
+ endpoint->len_max = 0;
+ endpoint->max_txq_depth = 0;
+ memset(&endpoint->ep_st, 0,
+ sizeof(endpoint->ep_st));
+ INIT_LIST_HEAD(&endpoint->rx_bufq);
+ INIT_LIST_HEAD(&endpoint->txq);
+ endpoint->target = target;
+ }
+
+ /* reset distribution list */
+ INIT_LIST_HEAD(&target->cred_dist_list);
+}
+
+int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
+ enum htc_endpoint_id endpoint)
+{
+ int num;
+
+ spin_lock_bh(&target->rx_lock);
+ num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
+ spin_unlock_bh(&target->rx_lock);
+ return num;
+}
+
+static void htc_setup_msg_bndl(struct htc_target *target)
+{
+ /* limit what HTC can handle */
+ target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
+ target->msg_per_bndl_max);
+
+ if (ath6kl_hif_enable_scatter(target->dev->ar)) {
+ target->msg_per_bndl_max = 0;
+ return;
+ }
+
+ /* limit bundle what the device layer can handle */
+ target->msg_per_bndl_max = min(target->max_scat_entries,
+ target->msg_per_bndl_max);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "htc bundling allowed. max msg per htc bundle: %d\n",
+ target->msg_per_bndl_max);
+
+ /* Max rx bundle size is limited by the max tx bundle size */
+ target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
+ /* Max tx bundle size if limited by the extended mbox address range */
+ target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
+ target->max_xfer_szper_scatreq);
+
+ ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+ target->max_rx_bndl_sz, target->max_tx_bndl_sz);
+
+ if (target->max_tx_bndl_sz)
+ target->tx_bndl_enable = true;
+
+ if (target->max_rx_bndl_sz)
+ target->rx_bndl_enable = true;
+
+ if ((target->tgt_cred_sz % target->block_sz) != 0) {
+ ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
+ target->tgt_cred_sz);
+
+ /*
+ * Disallow send bundling since the credit size is
+ * not aligned to a block size the I/O block
+ * padding will spill into the next credit buffer
+ * which is fatal.
+ */
+ target->tx_bndl_enable = false;
+ }
+}
+
+int ath6kl_htc_wait_target(struct htc_target *target)
+{
+ struct htc_packet *packet = NULL;
+ struct htc_ready_ext_msg *rdy_msg;
+ struct htc_service_connect_req connect;
+ struct htc_service_connect_resp resp;
+ int status;
+
+ /* we should be getting 1 control message that the target is ready */
+ packet = htc_wait_for_ctrl_msg(target);
+
+ if (!packet)
+ return -ENOMEM;
+
+ /* we controlled the buffer creation so it's properly aligned */
+ rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
+
+ if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
+ (packet->act_len < sizeof(struct htc_ready_msg))) {
+ status = -ENOMEM;
+ goto fail_wait_target;
+ }
+
+ if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
+ status = -ENOMEM;
+ goto fail_wait_target;
+ }
+
+ target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
+ target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "target ready: credits: %d credit size: %d\n",
+ target->tgt_creds, target->tgt_cred_sz);
+
+ /* check if this is an extended ready message */
+ if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
+ /* this is an extended message */
+ target->htc_tgt_ver = rdy_msg->htc_ver;
+ target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
+ } else {
+ /* legacy */
+ target->htc_tgt_ver = HTC_VERSION_2P0;
+ target->msg_per_bndl_max = 0;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+ (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
+ target->htc_tgt_ver);
+
+ if (target->msg_per_bndl_max > 0)
+ htc_setup_msg_bndl(target);
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&connect, 0, sizeof(connect));
+ memset(&resp, 0, sizeof(resp));
+ connect.ep_cb.rx = htc_ctrl_rx;
+ connect.ep_cb.rx_refill = NULL;
+ connect.ep_cb.tx_full = NULL;
+ connect.max_txq_depth = NUM_CONTROL_BUFFERS;
+ connect.svc_id = HTC_CTRL_RSVD_SVC;
+
+ /* connect fake service */
+ status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
+
+ if (status)
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
+
+fail_wait_target:
+ if (packet) {
+ htc_rxpkt_reset(packet);
+ reclaim_rx_ctrl_buf(target, packet);
+ }
+
+ return status;
+}
+
+/*
+ * Start HTC, enable interrupts and let the target know
+ * host has finished setup.
+ */
+int ath6kl_htc_start(struct htc_target *target)
+{
+ struct htc_packet *packet;
+ int status;
+
+ /* Disable interrupts at the chip level */
+ ath6kldev_disable_intrs(target->dev);
+
+ target->htc_flags = 0;
+ target->rx_st_flags = 0;
+
+ /* Push control receive buffers into htc control endpoint */
+ while ((packet = htc_get_control_buf(target, false)) != NULL) {
+ status = htc_add_rxbuf(target, packet);
+ if (status)
+ return status;
+ }
+
+ /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
+ ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
+ target->tgt_creds);
+
+ dump_cred_dist_stats(target);
+
+ /* Indicate to the target of the setup completion */
+ status = htc_setup_tx_complete(target);
+
+ if (status)
+ return status;
+
+ /* unmask interrupts */
+ status = ath6kldev_unmask_intrs(target->dev);
+
+ if (status)
+ ath6kl_htc_stop(target);
+
+ return status;
+}
+
+/* htc_stop: stop interrupt reception, and flush all queued buffers */
+void ath6kl_htc_stop(struct htc_target *target)
+{
+ spin_lock_bh(&target->htc_lock);
+ target->htc_flags |= HTC_OP_STATE_STOPPING;
+ spin_unlock_bh(&target->htc_lock);
+
+ /*
+ * Masking interrupts is a synchronous operation, when this
+ * function returns all pending HIF I/O has completed, we can
+ * safely flush the queues.
+ */
+ ath6kldev_mask_intrs(target->dev);
+
+ ath6kl_htc_flush_txep_all(target);
+
+ ath6kl_htc_flush_rx_buf(target);
+
+ reset_ep_state(target);
+}
+
+void *ath6kl_htc_create(struct ath6kl *ar)
+{
+ struct htc_target *target = NULL;
+ struct htc_packet *packet;
+ int status = 0, i = 0;
+ u32 block_size, ctrl_bufsz;
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target) {
+ ath6kl_err("unable to allocate memory\n");
+ return NULL;
+ }
+
+ target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
+ if (!target->dev) {
+ ath6kl_err("unable to allocate memory\n");
+ status = -ENOMEM;
+ goto fail_create_htc;
+ }
+
+ spin_lock_init(&target->htc_lock);
+ spin_lock_init(&target->rx_lock);
+ spin_lock_init(&target->tx_lock);
+
+ INIT_LIST_HEAD(&target->free_ctrl_txbuf);
+ INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
+ INIT_LIST_HEAD(&target->cred_dist_list);
+
+ target->dev->ar = ar;
+ target->dev->htc_cnxt = target;
+ target->ep_waiting = ENDPOINT_MAX;
+
+ reset_ep_state(target);
+
+ status = ath6kldev_setup(target->dev);
+
+ if (status)
+ goto fail_create_htc;
+
+ block_size = ar->mbox_info.block_size;
+
+ ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+ (block_size + HTC_HDR_LENGTH) :
+ (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+ for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+ packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ break;
+
+ packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+ if (!packet->buf_start) {
+ kfree(packet);
+ break;
+ }
+
+ packet->buf_len = ctrl_bufsz;
+ if (i < NUM_CONTROL_RX_BUFFERS) {
+ packet->act_len = 0;
+ packet->buf = packet->buf_start;
+ packet->endpoint = ENDPOINT_0;
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ } else
+ list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
+
+fail_create_htc:
+ if (i != NUM_CONTROL_BUFFERS || status) {
+ if (target) {
+ ath6kl_htc_cleanup(target);
+ target = NULL;
+ }
+ }
+
+ return target;
+}
+
+/* cleanup the HTC instance */
+void ath6kl_htc_cleanup(struct htc_target *target)
+{
+ struct htc_packet *packet, *tmp_packet;
+
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
+
+ list_for_each_entry_safe(packet, tmp_packet,
+ &target->free_ctrl_txbuf, list) {
+ list_del(&packet->list);
+ kfree(packet->buf_start);
+ kfree(packet);
+ }
+
+ list_for_each_entry_safe(packet, tmp_packet,
+ &target->free_ctrl_rxbuf, list) {
+ list_del(&packet->list);
+ kfree(packet->buf_start);
+ kfree(packet);
+ }
+
+ kfree(target->dev);
+ kfree(target);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
new file mode 100644
index 00000000000..8ce0c2c07de
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include "common.h"
+
+/* frame header flags */
+
+/* send direction */
+#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
+#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
+
+/* receive direction */
+#define HTC_FLG_RX_UNUSED (1 << 0)
+#define HTC_FLG_RX_TRAILER (1 << 1)
+/* Bundle count maske and shift */
+#define HTC_FLG_RX_BNDL_CNT (0xF0)
+#define HTC_FLG_RX_BNDL_CNT_S 4
+
+#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
+#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
+
+/* HTC control message IDs */
+
+#define HTC_MSG_READY_ID 1
+#define HTC_MSG_CONN_SVC_ID 2
+#define HTC_MSG_CONN_SVC_RESP_ID 3
+#define HTC_MSG_SETUP_COMPLETE_ID 4
+#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
+
+#define HTC_MAX_CTRL_MSG_LEN 256
+
+#define HTC_VERSION_2P0 0x00
+#define HTC_VERSION_2P1 0x01
+
+#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
+
+#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0
+#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1
+#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
+#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
+#define HTC_CONN_FLGS_THRESH_MASK 0x3
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+
+/* no resources (i.e. no more endpoints) */
+#define HTC_SERVICE_NO_RESOURCES 3
+
+/* specific service is not allowing any more endpoints */
+#define HTC_SERVICE_NO_MORE_EP 4
+
+/* report record IDs */
+#define HTC_RECORD_NULL 0
+#define HTC_RECORD_CREDITS 1
+#define HTC_RECORD_LOOKAHEAD 2
+#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
+
+#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_MAX_SERVICES 5
+
+/* reserved and used to flush ALL packets */
+#define HTC_TX_PACKET_TAG_ALL 0
+#define HTC_SERVICE_TX_PACKET_TAG 1
+#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9)
+
+/* more packets on this endpoint are being fetched */
+#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0)
+
+/* TODO.. for BMI */
+#define ENDPOINT1 0
+/* TODO -remove me, but we have to fix BMI first */
+#define HTC_MAILBOX_NUM_MAX 4
+
+/* enable send bundle padding for this endpoint */
+#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0)
+#define HTC_EP_ACTIVE ((u32) (1u << 31))
+
+/* HTC operational parameters */
+#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
+#define HTC_TARGET_DEBUG_INTR_MASK 0x01
+#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
+
+#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
+
+/* packet flags */
+
+#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
+#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
+#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
+#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
+
+#define NUM_CONTROL_BUFFERS 8
+#define NUM_CONTROL_TX_BUFFERS 2
+#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
+
+#define HTC_RECV_WAIT_BUFFERS (1 << 0)
+#define HTC_OP_STATE_STOPPING (1 << 0)
+
+/*
+ * The frame header length and message formats defined herein were selected
+ * to accommodate optimal alignment for target processing. This reduces
+ * code size and improves performance. Any changes to the header length may
+ * alter the alignment and cause exceptions on the target. When adding to
+ * the messagestructures insure that fields are properly aligned.
+ */
+
+/* HTC frame header
+ *
+ * NOTE: do not remove or re-arrange the fields, these are minimally
+ * required to take advantage of 4-byte lookaheads in some hardware
+ * implementations.
+ */
+struct htc_frame_hdr {
+ u8 eid;
+ u8 flags;
+
+ /* length of data (including trailer) that follows the header */
+ __le16 payld_len;
+
+ /* end of 4-byte lookahead */
+
+ u8 ctrl[2];
+} __packed;
+
+/* HTC ready message */
+struct htc_ready_msg {
+ __le16 msg_id;
+ __le16 cred_cnt;
+ __le16 cred_sz;
+ u8 max_ep;
+ u8 pad;
+} __packed;
+
+/* extended HTC ready message */
+struct htc_ready_ext_msg {
+ struct htc_ready_msg ver2_0_info;
+ u8 htc_ver;
+ u8 msg_per_htc_bndl;
+} __packed;
+
+/* connect service */
+struct htc_conn_service_msg {
+ __le16 msg_id;
+ __le16 svc_id;
+ __le16 conn_flags;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response */
+struct htc_conn_service_resp {
+ __le16 msg_id;
+ __le16 svc_id;
+ u8 status;
+ u8 eid;
+ __le16 max_msg_sz;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_setup_comp_msg {
+ __le16 msg_id;
+} __packed;
+
+/* extended setup completion message */
+struct htc_setup_comp_ext_msg {
+ __le16 msg_id;
+ __le32 flags;
+ u8 msg_per_rxbndl;
+ u8 Rsvd[3];
+} __packed;
+
+struct htc_record_hdr {
+ u8 rec_id;
+ u8 len;
+} __packed;
+
+struct htc_credit_report {
+ u8 eid;
+ u8 credits;
+} __packed;
+
+/*
+ * NOTE: The lk_ahd array is guarded by a pre_valid
+ * and Post Valid guard bytes. The pre_valid bytes must
+ * equal the inverse of the post_valid byte.
+ */
+struct htc_lookahead_report {
+ u8 pre_valid;
+ u8 lk_ahd[4];
+ u8 post_valid;
+} __packed;
+
+struct htc_bundle_lkahd_rpt {
+ u8 lk_ahd[4];
+} __packed;
+
+/* Current service IDs */
+
+enum htc_service_grp_ids {
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_TEST_GROUP = 254,
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+/* ------ endpoint IDS ------ */
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT_0 = 0,
+ ENDPOINT_1 = 1,
+ ENDPOINT_2 = 2,
+ ENDPOINT_3,
+ ENDPOINT_4,
+ ENDPOINT_5,
+ ENDPOINT_6,
+ ENDPOINT_7,
+ ENDPOINT_8,
+ ENDPOINT_MAX,
+};
+
+struct htc_tx_packet_info {
+ u16 tag;
+ int cred_used;
+ u8 flags;
+ int seqno;
+};
+
+struct htc_rx_packet_info {
+ u32 exp_hdr;
+ u32 rx_flags;
+ u32 indicat_flags;
+};
+
+struct htc_target;
+
+/* wrapper around endpoint-specific packets */
+struct htc_packet {
+ struct list_head list;
+
+ /* caller's per packet specific context */
+ void *pkt_cntxt;
+
+ /*
+ * the true buffer start , the caller can store the real
+ * buffer start here. In receive callbacks, the HTC layer
+ * sets buf to the start of the payload past the header.
+ * This field allows the caller to reset buf when it recycles
+ * receive packets back to HTC.
+ */
+ u8 *buf_start;
+
+ /*
+ * Pointer to the start of the buffer. In the transmit
+ * direction this points to the start of the payload. In the
+ * receive direction, however, the buffer when queued up
+ * points to the start of the HTC header but when returned
+ * to the caller points to the start of the payload
+ */
+ u8 *buf;
+ u32 buf_len;
+
+ /* actual length of payload */
+ u32 act_len;
+
+ /* endpoint that this packet was sent/recv'd from */
+ enum htc_endpoint_id endpoint;
+
+ /* completion status */
+
+ int status;
+ union {
+ struct htc_tx_packet_info tx;
+ struct htc_rx_packet_info rx;
+ } info;
+
+ void (*completion) (struct htc_target *, struct htc_packet *);
+ struct htc_target *context;
+};
+
+enum htc_send_full_action {
+ HTC_SEND_FULL_KEEP = 0,
+ HTC_SEND_FULL_DROP = 1,
+};
+
+struct htc_ep_callbacks {
+ void (*rx) (struct htc_target *, struct htc_packet *);
+ void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
+ enum htc_send_full_action (*tx_full) (struct htc_target *,
+ struct htc_packet *);
+ struct htc_packet *(*rx_allocthresh) (struct htc_target *,
+ enum htc_endpoint_id, int);
+ int rx_alloc_thresh;
+ int rx_refill_thresh;
+};
+
+/* service connection information */
+struct htc_service_connect_req {
+ u16 svc_id;
+ u16 conn_flags;
+ struct htc_ep_callbacks ep_cb;
+ int max_txq_depth;
+ u32 flags;
+ unsigned int max_rxmsg_sz;
+};
+
+/* service connection response information */
+struct htc_service_connect_resp {
+ u8 buf_len;
+ u8 act_len;
+ enum htc_endpoint_id endpoint;
+ unsigned int len_max;
+ u8 resp_code;
+};
+
+/* endpoint distributionstructure */
+struct htc_endpoint_credit_dist {
+ struct list_head list;
+
+ /* Service ID (set by HTC) */
+ u16 svc_id;
+
+ /* endpoint for this distributionstruct (set by HTC) */
+ enum htc_endpoint_id endpoint;
+
+ u32 dist_flags;
+
+ /*
+ * credits for normal operation, anything above this
+ * indicates the endpoint is over-subscribed.
+ */
+ int cred_norm;
+
+ /* floor for credit distribution */
+ int cred_min;
+
+ int cred_assngd;
+
+ /* current credits available */
+ int credits;
+
+ /*
+ * pending credits to distribute on this endpoint, this
+ * is set by HTC when credit reports arrive. The credit
+ * distribution functions sets this to zero when it distributes
+ * the credits.
+ */
+ int cred_to_dist;
+
+ /*
+ * the number of credits that the current pending TX packet needs
+ * to transmit. This is set by HTC when endpoint needs credits in
+ * order to transmit.
+ */
+ int seek_cred;
+
+ /* size in bytes of each credit */
+ int cred_sz;
+
+ /* credits required for a maximum sized messages */
+ int cred_per_msg;
+
+ /* reserved for HTC use */
+ void *htc_rsvd;
+
+ /*
+ * current depth of TX queue , i.e. messages waiting for credits
+ * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
+ * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
+ * that has non-zero credits to recover.
+ */
+ int txq_depth;
+};
+
+/*
+ * credit distibution code that is passed into the distrbution function,
+ * there are mandatory and optional codes that must be handled
+ */
+enum htc_credit_dist_reason {
+ HTC_CREDIT_DIST_SEND_COMPLETE = 0,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1,
+ HTC_CREDIT_DIST_SEEK_CREDITS,
+};
+
+struct htc_credit_state_info {
+ int total_avail_credits;
+ int cur_free_credits;
+ struct list_head lowestpri_ep_dist;
+};
+
+/* endpoint statistics */
+struct htc_endpoint_stats {
+ /*
+ * number of times the host set the credit-low flag in a send
+ * message on this endpoint
+ */
+ u32 cred_low_indicate;
+
+ u32 tx_issued;
+ u32 tx_pkt_bundled;
+ u32 tx_bundles;
+ u32 tx_dropped;
+
+ /* running count of total credit reports received for this endpoint */
+ u32 tx_cred_rpt;
+
+ /* credit reports received from this endpoint's RX packets */
+ u32 cred_rpt_from_rx;
+
+ /* credit reports received from RX packets of other endpoints */
+ u32 cred_rpt_from_other;
+
+ /* credit reports received from endpoint 0 RX packets */
+ u32 cred_rpt_ep0;
+
+ /* count of credits received via Rx packets on this endpoint */
+ u32 cred_from_rx;
+
+ /* count of credits received via another endpoint */
+ u32 cred_from_other;
+
+ /* count of credits received via another endpoint */
+ u32 cred_from_ep0;
+
+ /* count of consummed credits */
+ u32 cred_cosumd;
+
+ /* count of credits returned */
+ u32 cred_retnd;
+
+ u32 rx_pkts;
+
+ /* count of lookahead records found in Rx msg */
+ u32 rx_lkahds;
+
+ /* count of recv packets received in a bundle */
+ u32 rx_bundl;
+
+ /* count of number of bundled lookaheads */
+ u32 rx_bundle_lkahd;
+
+ /* count of the number of bundle indications from the HTC header */
+ u32 rx_bundle_from_hdr;
+
+ /* the number of times the recv allocation threshold was hit */
+ u32 rx_alloc_thresh_hit;
+
+ /* total number of bytes */
+ u32 rxalloc_thresh_byte;
+};
+
+struct htc_endpoint {
+ enum htc_endpoint_id eid;
+ u16 svc_id;
+ struct list_head txq;
+ struct list_head rx_bufq;
+ struct htc_endpoint_credit_dist cred_dist;
+ struct htc_ep_callbacks ep_cb;
+ int max_txq_depth;
+ int len_max;
+ int tx_proc_cnt;
+ int rx_proc_cnt;
+ struct htc_target *target;
+ u8 seqno;
+ u32 conn_flags;
+ struct htc_endpoint_stats ep_st;
+};
+
+struct htc_control_buffer {
+ struct htc_packet packet;
+ u8 *buf;
+};
+
+struct ath6kl_device;
+
+/* our HTC target state */
+struct htc_target {
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+ struct list_head cred_dist_list;
+ struct list_head free_ctrl_txbuf;
+ struct list_head free_ctrl_rxbuf;
+ struct htc_credit_state_info *cred_dist_cntxt;
+ int tgt_creds;
+ unsigned int tgt_cred_sz;
+ spinlock_t htc_lock;
+ spinlock_t rx_lock;
+ spinlock_t tx_lock;
+ struct ath6kl_device *dev;
+ u32 htc_flags;
+ u32 rx_st_flags;
+ enum htc_endpoint_id ep_waiting;
+ u8 htc_tgt_ver;
+
+ /* max messages per bundle for HTC */
+ int msg_per_bndl_max;
+
+ bool tx_bndl_enable;
+ int rx_bndl_enable;
+ int max_rx_bndl_sz;
+ int max_tx_bndl_sz;
+
+ u32 block_sz;
+ u32 block_mask;
+
+ int max_scat_entries;
+ int max_xfer_szper_scatreq;
+
+ int chk_irq_status_cnt;
+};
+
+void *ath6kl_htc_create(struct ath6kl *ar);
+void ath6kl_htc_set_credit_dist(struct htc_target *target,
+ struct htc_credit_state_info *cred_info,
+ u16 svc_pri_order[], int len);
+int ath6kl_htc_wait_target(struct htc_target *target);
+int ath6kl_htc_start(struct htc_target *target);
+int ath6kl_htc_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *req,
+ struct htc_service_connect_resp *resp);
+int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet);
+void ath6kl_htc_stop(struct htc_target *target);
+void ath6kl_htc_cleanup(struct htc_target *target);
+void ath6kl_htc_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id endpoint, u16 tag);
+void ath6kl_htc_flush_rx_buf(struct htc_target *target);
+void ath6kl_htc_indicate_activity_change(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ bool active);
+int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
+ enum htc_endpoint_id endpoint);
+int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pktq);
+int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
+ u32 msg_look_ahead[], int *n_pkts);
+
+static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
+ u8 *buf, unsigned int len,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ packet->pkt_cntxt = context;
+ packet->buf = buf;
+ packet->act_len = len;
+ packet->endpoint = eid;
+ packet->info.tx.tag = tag;
+}
+
+static inline void htc_rxpkt_reset(struct htc_packet *packet)
+{
+ packet->buf = packet->buf_start;
+ packet->act_len = 0;
+}
+
+static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context,
+ u8 *buf, unsigned long len,
+ enum htc_endpoint_id eid)
+{
+ packet->pkt_cntxt = context;
+ packet->buf = buf;
+ packet->buf_start = buf;
+ packet->buf_len = len;
+ packet->endpoint = eid;
+}
+
+static inline int get_queue_depth(struct list_head *queue)
+{
+ struct list_head *tmp_list;
+ int depth = 0;
+
+ list_for_each(tmp_list, queue)
+ depth++;
+
+ return depth;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/htc_hif.c
new file mode 100644
index 00000000000..86b1cc7409c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.c
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "target.h"
+#include "hif-ops.h"
+#include "htc_hif.h"
+#include "debug.h"
+
+#define MAILBOX_FOR_BLOCK_SIZE 1
+
+#define ATH6KL_TIME_QUANTUM 10 /* in ms */
+
+static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+{
+ u8 *buf;
+ int i;
+
+ buf = req->virt_dma_buf;
+
+ for (i = 0; i < req->scat_entries; i++) {
+
+ if (from_dma)
+ memcpy(req->scat_list[i].buf, buf,
+ req->scat_list[i].len);
+ else
+ memcpy(buf, req->scat_list[i].buf,
+ req->scat_list[i].len);
+
+ buf += req->scat_list[i].len;
+ }
+
+ return 0;
+}
+
+int ath6kldev_rw_comp_handler(void *context, int status)
+{
+ struct htc_packet *packet = context;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+ packet, status);
+
+ packet->status = status;
+ packet->completion(packet->context, packet);
+
+ return 0;
+}
+
+static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+{
+ u32 dummy;
+ int status;
+
+ ath6kl_err("target debug interrupt\n");
+
+ ath6kl_target_failure(dev->ar);
+
+ /*
+ * read counter to clear the interrupt, the debug error interrupt is
+ * counter 0.
+ */
+ status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+ (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
+ if (status)
+ WARN_ON(1);
+
+ return status;
+}
+
+/* mailbox recv message polling */
+int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+ int timeout)
+{
+ struct ath6kl_irq_proc_registers *rg;
+ int status = 0, i;
+ u8 htc_mbox = 1 << HTC_MAILBOX;
+
+ for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
+ /* this is the standard HIF way, load the reg table */
+ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
+ (u8 *) &dev->irq_proc_reg,
+ sizeof(dev->irq_proc_reg),
+ HIF_RD_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("failed to read reg table\n");
+ return status;
+ }
+
+ /* check for MBOX data and valid lookahead */
+ if (dev->irq_proc_reg.host_int_status & htc_mbox) {
+ if (dev->irq_proc_reg.rx_lkahd_valid &
+ htc_mbox) {
+ /*
+ * Mailbox has a message and the look ahead
+ * is valid.
+ */
+ rg = &dev->irq_proc_reg;
+ *lk_ahd =
+ le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
+ break;
+ }
+ }
+
+ /* delay a little */
+ mdelay(ATH6KL_TIME_QUANTUM);
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+ }
+
+ if (i == 0) {
+ ath6kl_err("timeout waiting for recv message\n");
+ status = -ETIME;
+ /* check if the target asserted */
+ if (dev->irq_proc_reg.counter_int_status &
+ ATH6KL_TARGET_DEBUG_INTR_MASK)
+ /*
+ * Target failure handler will be called in case of
+ * an assert.
+ */
+ ath6kldev_proc_dbg_intr(dev);
+ }
+
+ return status;
+}
+
+/*
+ * Disable packet reception (used in case the host runs out of buffers)
+ * using the interrupt enable registers through the host I/F
+ */
+int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+{
+ struct ath6kl_irq_enable_reg regs;
+ int status = 0;
+
+ /* take the lock to protect interrupt enable shadows */
+ spin_lock_bh(&dev->lock);
+
+ if (enable_rx)
+ dev->irq_en_reg.int_status_en |=
+ SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+ else
+ dev->irq_en_reg.int_status_en &=
+ ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+
+ spin_unlock_bh(&dev->lock);
+
+ status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en,
+ sizeof(struct ath6kl_irq_enable_reg),
+ HIF_WR_SYNC_BYTE_INC);
+
+ return status;
+}
+
+int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read)
+{
+ int status = 0;
+
+ if (read) {
+ scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
+ scat_req->addr = dev->ar->mbox_info.htc_addr;
+ } else {
+ scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
+
+ scat_req->addr =
+ (scat_req->len > HIF_MBOX_WIDTH) ?
+ dev->ar->mbox_info.htc_ext_addr :
+ dev->ar->mbox_info.htc_addr;
+ }
+
+ ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
+ "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+ scat_req->scat_entries, scat_req->len,
+ scat_req->addr, !read ? "async" : "sync",
+ (read) ? "rd" : "wr");
+
+ if (!read && scat_req->virt_scat) {
+ status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+ if (status) {
+ scat_req->status = status;
+ scat_req->complete(dev->ar->htc_target, scat_req);
+ return 0;
+ }
+ }
+
+ status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
+
+ if (read) {
+ /* in sync mode, we can touch the scatter request */
+ scat_req->status = status;
+ if (!status && scat_req->virt_scat)
+ scat_req->status =
+ ath6kldev_cp_scat_dma_buf(scat_req, true);
+ }
+
+ return status;
+}
+
+static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+{
+ u8 counter_int_status;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
+
+ counter_int_status = dev->irq_proc_reg.counter_int_status &
+ dev->irq_en_reg.cntr_int_status_en;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
+ counter_int_status);
+
+ /*
+ * NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for
+ * the debug assertion counter interrupt.
+ */
+ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
+ return ath6kldev_proc_dbg_intr(dev);
+
+ return 0;
+}
+
+static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+{
+ int status;
+ u8 error_int_status;
+ u8 reg_buf[4];
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
+
+ error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
+ if (!error_int_status) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
+ error_int_status);
+
+ if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
+
+ if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
+ ath6kl_err("rx underflow\n");
+
+ if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
+ ath6kl_err("tx overflow\n");
+
+ /* Clear the interrupt */
+ dev->irq_proc_reg.error_int_status &= ~error_int_status;
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ reg_buf[0] = error_int_status;
+ reg_buf[1] = 0;
+ reg_buf[2] = 0;
+ reg_buf[3] = 0;
+
+ status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
+ reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
+
+ if (status)
+ WARN_ON(1);
+
+ return status;
+}
+
+static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+{
+ int status;
+ u8 cpu_int_status;
+ u8 reg_buf[4];
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
+
+ cpu_int_status = dev->irq_proc_reg.cpu_int_status &
+ dev->irq_en_reg.cpu_int_status_en;
+ if (!cpu_int_status) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
+ cpu_int_status);
+
+ /* Clear the interrupt */
+ dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
+
+ /*
+ * Set up the register transfer buffer to hit the register 4 times ,
+ * this is done to make the access 4-byte aligned to mitigate issues
+ * with host bus interconnects that restrict bus transfer lengths to
+ * be a multiple of 4-bytes.
+ */
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ reg_buf[0] = cpu_int_status;
+ /* the remaining are set to zero which have no-effect */
+ reg_buf[1] = 0;
+ reg_buf[2] = 0;
+ reg_buf[3] = 0;
+
+ status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
+ reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
+
+ if (status)
+ WARN_ON(1);
+
+ return status;
+}
+
+/* process pending interrupts synchronously */
+static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
+{
+ struct ath6kl_irq_proc_registers *rg;
+ int status = 0;
+ u8 host_int_status = 0;
+ u32 lk_ahd = 0;
+ u8 htc_mbox = 1 << HTC_MAILBOX;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
+
+ /*
+ * NOTE: HIF implementation guarantees that the context of this
+ * call allows us to perform SYNCHRONOUS I/O, that is we can block,
+ * sleep or call any API that can block or switch thread/task
+ * contexts. This is a fully schedulable context.
+ */
+
+ /*
+ * Process pending intr only when int_status_en is clear, it may
+ * result in unnecessary bus transaction otherwise. Target may be
+ * unresponsive at the time.
+ */
+ if (dev->irq_en_reg.int_status_en) {
+ /*
+ * Read the first 28 bytes of the HTC register table. This
+ * will yield us the value of different int status
+ * registers and the lookahead registers.
+ *
+ * length = sizeof(int_status) + sizeof(cpu_int_status)
+ * + sizeof(error_int_status) +
+ * sizeof(counter_int_status) +
+ * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
+ * + sizeof(hole) + sizeof(rx_lkahd) +
+ * sizeof(int_status_en) +
+ * sizeof(cpu_int_status_en) +
+ * sizeof(err_int_status_en) +
+ * sizeof(cntr_int_status_en);
+ */
+ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
+ (u8 *) &dev->irq_proc_reg,
+ sizeof(dev->irq_proc_reg),
+ HIF_RD_SYNC_BYTE_INC);
+ if (status)
+ goto out;
+
+ if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
+ ath6kl_dump_registers(dev, &dev->irq_proc_reg,
+ &dev->irq_en_reg);
+
+ /* Update only those registers that are enabled */
+ host_int_status = dev->irq_proc_reg.host_int_status &
+ dev->irq_en_reg.int_status_en;
+
+ /* Look at mbox status */
+ if (host_int_status & htc_mbox) {
+ /*
+ * Mask out pending mbox value, we use "lookAhead as
+ * the real flag for mbox processing.
+ */
+ host_int_status &= ~htc_mbox;
+ if (dev->irq_proc_reg.rx_lkahd_valid &
+ htc_mbox) {
+ rg = &dev->irq_proc_reg;
+ lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
+ if (!lk_ahd)
+ ath6kl_err("lookAhead is zero!\n");
+ }
+ }
+ }
+
+ if (!host_int_status && !lk_ahd) {
+ *done = true;
+ goto out;
+ }
+
+ if (lk_ahd) {
+ int fetched = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
+ /*
+ * Mailbox Interrupt, the HTC layer may issue async
+ * requests to empty the mailbox. When emptying the recv
+ * mailbox we use the async handler above called from the
+ * completion routine of the callers read request. This can
+ * improve performance by reducing context switching when
+ * we rapidly pull packets.
+ */
+ status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
+ &lk_ahd, &fetched);
+ if (status)
+ goto out;
+
+ if (!fetched)
+ /*
+ * HTC could not pull any messages out due to lack
+ * of resources.
+ */
+ dev->htc_cnxt->chk_irq_status_cnt = 0;
+ }
+
+ /* now handle the rest of them */
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) for other interrupts: 0x%x\n",
+ host_int_status);
+
+ if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
+ /* CPU Interrupt */
+ status = ath6kldev_proc_cpu_intr(dev);
+ if (status)
+ goto out;
+ }
+
+ if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
+ /* Error Interrupt */
+ status = ath6kldev_proc_err_intr(dev);
+ if (status)
+ goto out;
+ }
+
+ if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
+ /* Counter Interrupt */
+ status = ath6kldev_proc_counter_intr(dev);
+
+out:
+ /*
+ * An optimization to bypass reading the IRQ status registers
+ * unecessarily which can re-wake the target, if upper layers
+ * determine that we are in a low-throughput mode, we can rely on
+ * taking another interrupt rather than re-checking the status
+ * registers which can re-wake the target.
+ *
+ * NOTE : for host interfaces that makes use of detecting pending
+ * mbox messages at hif can not use this optimization due to
+ * possible side effects, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine.
+ */
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "bypassing irq status re-check, forcing done\n");
+
+ if (!dev->htc_cnxt->chk_irq_status_cnt)
+ *done = true;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
+
+ return status;
+}
+
+/* interrupt handler, kicks off all interrupt processing */
+int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+{
+ struct ath6kl_device *dev = ar->htc_target->dev;
+ int status = 0;
+ bool done = false;
+
+ /*
+ * Reset counter used to flag a re-scan of IRQ status registers on
+ * the target.
+ */
+ dev->htc_cnxt->chk_irq_status_cnt = 0;
+
+ /*
+ * IRQ processing is synchronous, interrupt status registers can be
+ * re-read.
+ */
+ while (!done) {
+ status = proc_pending_irqs(dev, &done);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+{
+ struct ath6kl_irq_enable_reg regs;
+ int status;
+
+ spin_lock_bh(&dev->lock);
+
+ /* Enable all but ATH6KL CPU interrupts */
+ dev->irq_en_reg.int_status_en =
+ SM(INT_STATUS_ENABLE_ERROR, 0x01) |
+ SM(INT_STATUS_ENABLE_CPU, 0x01) |
+ SM(INT_STATUS_ENABLE_COUNTER, 0x01);
+
+ /*
+ * NOTE: There are some cases where HIF can do detection of
+ * pending mbox messages which is disabled now.
+ */
+ dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+
+ /* Set up the CPU Interrupt status Register */
+ dev->irq_en_reg.cpu_int_status_en = 0;
+
+ /* Set up the Error Interrupt status Register */
+ dev->irq_en_reg.err_int_status_en =
+ SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
+ SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
+
+ /*
+ * Enable Counter interrupt status register to get fatal errors for
+ * debugging.
+ */
+ dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
+ ATH6KL_TARGET_DEBUG_INTR_MASK);
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+
+ spin_unlock_bh(&dev->lock);
+
+ status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en, sizeof(regs),
+ HIF_WR_SYNC_BYTE_INC);
+
+ if (status)
+ ath6kl_err("failed to update interrupt ctl reg err: %d\n",
+ status);
+
+ return status;
+}
+
+int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+{
+ struct ath6kl_irq_enable_reg regs;
+
+ spin_lock_bh(&dev->lock);
+ /* Disable all interrupts */
+ dev->irq_en_reg.int_status_en = 0;
+ dev->irq_en_reg.cpu_int_status_en = 0;
+ dev->irq_en_reg.err_int_status_en = 0;
+ dev->irq_en_reg.cntr_int_status_en = 0;
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+ spin_unlock_bh(&dev->lock);
+
+ return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en, sizeof(regs),
+ HIF_WR_SYNC_BYTE_INC);
+}
+
+/* enable device interrupts */
+int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+{
+ int status = 0;
+
+ /*
+ * Make sure interrupt are disabled before unmasking at the HIF
+ * layer. The rationale here is that between device insertion
+ * (where we clear the interrupts the first time) and when HTC
+ * is finally ready to handle interrupts, other software can perform
+ * target "soft" resets. The ATH6KL interrupt enables reset back to an
+ * "enabled" state when this happens.
+ */
+ ath6kldev_disable_intrs(dev);
+
+ /* unmask the host controller interrupts */
+ ath6kl_hif_irq_enable(dev->ar);
+ status = ath6kldev_enable_intrs(dev);
+
+ return status;
+}
+
+/* disable all device interrupts */
+int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+{
+ /*
+ * Mask the interrupt at the HIF layer to avoid any stray interrupt
+ * taken while we zero out our shadow registers in
+ * ath6kldev_disable_intrs().
+ */
+ ath6kl_hif_irq_disable(dev->ar);
+
+ return ath6kldev_disable_intrs(dev);
+}
+
+int ath6kldev_setup(struct ath6kl_device *dev)
+{
+ int status = 0;
+
+ spin_lock_init(&dev->lock);
+
+ /*
+ * NOTE: we actually get the block size of a mailbox other than 0,
+ * for SDIO the block size on mailbox 0 is artificially set to 1.
+ * So we use the block size that is set for the other 3 mailboxes.
+ */
+ dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
+
+ /* must be a power of 2 */
+ if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
+ WARN_ON(1);
+ goto fail_setup;
+ }
+
+ /* assemble mask, used for padding to a block */
+ dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+ dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "hif interrupt processing is sync only\n");
+
+ status = ath6kldev_disable_intrs(dev);
+
+fail_setup:
+ return status;
+
+}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
new file mode 100644
index 00000000000..171ad63d89b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HIF_H
+#define HTC_HIF_H
+
+#include "htc.h"
+#include "hif.h"
+
+#define ATH6KL_MAILBOXES 4
+
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
+
+#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
+ INT_STATUS_ENABLE_CPU_MASK | \
+ INT_STATUS_ENABLE_COUNTER_MASK)
+
+#define ATH6KL_REG_IO_BUFFER_SIZE 32
+#define ATH6KL_MAX_REG_IO_BUFFERS 8
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
+#define ATH6KL_SCATTER_REQS 4
+
+#ifndef A_CACHE_LINE_PAD
+#define A_CACHE_LINE_PAD 128
+#endif
+#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
+#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
+
+struct ath6kl_irq_proc_registers {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lkahd_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lkahd[2];
+ __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+ spinlock_t lock;
+ u8 pad1[A_CACHE_LINE_PAD];
+ struct ath6kl_irq_proc_registers irq_proc_reg;
+ u8 pad2[A_CACHE_LINE_PAD];
+ struct ath6kl_irq_enable_reg irq_en_reg;
+ u8 pad3[A_CACHE_LINE_PAD];
+ struct htc_target *htc_cnxt;
+ struct ath6kl *ar;
+};
+
+int ath6kldev_setup(struct ath6kl_device *dev);
+int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
+int ath6kldev_mask_intrs(struct ath6kl_device *dev);
+int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
+ u32 *lk_ahd, int timeout);
+int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kldev_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kldev_rw_comp_handler(void *context, int status);
+int ath6kldev_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read);
+
+#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
new file mode 100644
index 00000000000..c1d2366704b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -0,0 +1,1727 @@
+
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/mmc/sdio_func.h>
+#include "core.h"
+#include "cfg80211.h"
+#include "target.h"
+#include "debug.h"
+#include "hif-ops.h"
+
+unsigned int debug_mask;
+static unsigned int testmode;
+
+module_param(debug_mask, uint, 0644);
+module_param(testmode, uint, 0644);
+
+/*
+ * Include definitions here that can be used to tune the WLAN module
+ * behavior. Different customers can tune the behavior as per their needs,
+ * here.
+ */
+
+/*
+ * This configuration item enable/disable keepalive support.
+ * Keepalive support: In the absence of any data traffic to AP, null
+ * frames will be sent to the AP at periodic interval, to keep the association
+ * active. This configuration item defines the periodic interval.
+ * Use value of zero to disable keepalive support
+ * Default: 60 seconds
+ */
+#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
+
+/*
+ * This configuration item sets the value of disconnect timeout
+ * Firmware delays sending the disconnec event to the host for this
+ * timeout after is gets disconnected from the current AP.
+ * If the firmware successly roams within the disconnect timeout
+ * it sends a new connect event
+ */
+#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
+
+#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
+
+#define ATH6KL_DATA_OFFSET 64
+struct sk_buff *ath6kl_buf_alloc(int size)
+{
+ struct sk_buff *skb;
+ u16 reserved;
+
+ /* Add chacheline space at front and back of buffer */
+ reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
+ sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES;
+ skb = dev_alloc_skb(size + reserved);
+
+ if (skb)
+ skb_reserve(skb, reserved - L1_CACHE_BYTES);
+ return skb;
+}
+
+void ath6kl_init_profile_info(struct ath6kl *ar)
+{
+ ar->ssid_len = 0;
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+
+ ar->dot11_auth_mode = OPEN_AUTH;
+ ar->auth_mode = NONE_AUTH;
+ ar->prwise_crypto = NONE_CRYPT;
+ ar->prwise_crypto_len = 0;
+ ar->grp_crypto = NONE_CRYPT;
+ ar->grp_crypto_len = 0;
+ memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(ar->bssid, 0, sizeof(ar->bssid));
+ ar->bss_ch = 0;
+ ar->nw_type = ar->next_mode = INFRA_NETWORK;
+}
+
+static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
+{
+ switch (ar->nw_type) {
+ case INFRA_NETWORK:
+ return HI_OPTION_FW_MODE_BSS_STA;
+ case ADHOC_NETWORK:
+ return HI_OPTION_FW_MODE_IBSS;
+ case AP_NETWORK:
+ return HI_OPTION_FW_MODE_AP;
+ default:
+ ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
+ return 0xff;
+ }
+}
+
+static int ath6kl_set_host_app_area(struct ath6kl *ar)
+{
+ u32 address, data;
+ struct host_app_area host_app_area;
+
+ /* Fetch the address of the host_app_area_s
+ * instance in the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest));
+ address = TARG_VTOP(ar->target_type, address);
+
+ if (ath6kl_diag_read32(ar, address, &data))
+ return -EIO;
+
+ address = TARG_VTOP(ar->target_type, data);
+ host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+ if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
+ sizeof(struct host_app_area)))
+ return -EIO;
+
+ return 0;
+}
+
+static inline void set_ac2_ep_map(struct ath6kl *ar,
+ u8 ac,
+ enum htc_endpoint_id ep)
+{
+ ar->ac2ep_map[ac] = ep;
+ ar->ep2ac_map[ep] = ac;
+}
+
+/* connect to a service */
+static int ath6kl_connectservice(struct ath6kl *ar,
+ struct htc_service_connect_req *con_req,
+ char *desc)
+{
+ int status;
+ struct htc_service_connect_resp response;
+
+ memset(&response, 0, sizeof(response));
+
+ status = ath6kl_htc_conn_service(ar->htc_target, con_req, &response);
+ if (status) {
+ ath6kl_err("failed to connect to %s service status:%d\n",
+ desc, status);
+ return status;
+ }
+
+ switch (con_req->svc_id) {
+ case WMI_CONTROL_SVC:
+ if (test_bit(WMI_ENABLED, &ar->flag))
+ ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint);
+ ar->ctrl_ep = response.endpoint;
+ break;
+ case WMI_DATA_BE_SVC:
+ set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint);
+ break;
+ case WMI_DATA_BK_SVC:
+ set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint);
+ break;
+ case WMI_DATA_VI_SVC:
+ set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint);
+ break;
+ case WMI_DATA_VO_SVC:
+ set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint);
+ break;
+ default:
+ ath6kl_err("service id is not mapped %d\n", con_req->svc_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath6kl_init_service_ep(struct ath6kl *ar)
+{
+ struct htc_service_connect_req connect;
+
+ memset(&connect, 0, sizeof(connect));
+
+ /* these fields are the same for all service endpoints */
+ connect.ep_cb.rx = ath6kl_rx;
+ connect.ep_cb.rx_refill = ath6kl_rx_refill;
+ connect.ep_cb.tx_full = ath6kl_tx_queue_full;
+
+ /*
+ * Set the max queue depth so that our ath6kl_tx_queue_full handler
+ * gets called.
+ */
+ connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
+ connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4;
+ if (!connect.ep_cb.rx_refill_thresh)
+ connect.ep_cb.rx_refill_thresh++;
+
+ /* connect to control service */
+ connect.svc_id = WMI_CONTROL_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI CONTROL"))
+ return -EIO;
+
+ connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN;
+
+ /*
+ * Limit the HTC message size on the send path, although e can
+ * receive A-MSDU frames of 4K, we will only send ethernet-sized
+ * (802.3) frames on the send path.
+ */
+ connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH;
+
+ /*
+ * To reduce the amount of committed memory for larger A_MSDU
+ * frames, use the recv-alloc threshold mechanism for larger
+ * packets.
+ */
+ connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE;
+ connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf;
+
+ /*
+ * For the remaining data services set the connection flag to
+ * reduce dribbling, if configured to do so.
+ */
+ connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB;
+ connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK;
+ connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF;
+
+ connect.svc_id = WMI_DATA_BE_SVC;
+
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA BE"))
+ return -EIO;
+
+ /* connect to back-ground map this to WMI LOW_PRI */
+ connect.svc_id = WMI_DATA_BK_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
+ return -EIO;
+
+ /* connect to Video service, map this to to HI PRI */
+ connect.svc_id = WMI_DATA_VI_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
+ return -EIO;
+
+ /*
+ * Connect to VO service, this is currently not mapped to a WMI
+ * priority stream due to historical reasons. WMI originally
+ * defined 3 priorities over 3 mailboxes We can change this when
+ * WMI is reworked so that priorities are not dependent on
+ * mailboxes.
+ */
+ connect.svc_id = WMI_DATA_VO_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA VO"))
+ return -EIO;
+
+ return 0;
+}
+
+static void ath6kl_init_control_info(struct ath6kl *ar)
+{
+ u8 ctr;
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ath6kl_init_profile_info(ar);
+ ar->def_txkey_index = 0;
+ memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
+ ar->ch_hint = 0;
+ ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+ ar->listen_intvl_b = 0;
+ ar->tx_pwr = 0;
+ clear_bit(SKIP_SCAN, &ar->flag);
+ set_bit(WMM_ENABLED, &ar->flag);
+ ar->intra_bss = 1;
+ memset(&ar->sc_params, 0, sizeof(ar->sc_params));
+ ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
+ ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
+ ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ spin_lock_init(&ar->mcastpsq_lock);
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+}
+
+/*
+ * Set HTC/Mbox operational parameters, this can only be called when the
+ * target is in the BMI phase.
+ */
+static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
+ u8 htc_ctrl_buf)
+{
+ int status;
+ u32 blk_size;
+
+ blk_size = ar->mbox_info.block_size;
+
+ if (htc_ctrl_buf)
+ blk_size |= ((u32)htc_ctrl_buf) << 16;
+
+ /* set the host interest area for the block size */
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_mbox_io_block_sz)),
+ (u8 *)&blk_size,
+ 4);
+ if (status) {
+ ath6kl_err("bmi_write_memory for IO block size failed\n");
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n",
+ blk_size,
+ ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz)));
+
+ if (mbox_isr_yield_val) {
+ /* set the host interest area for the mbox ISR yield limit */
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_mbox_isr_yield_limit)),
+ (u8 *)&mbox_isr_yield_val,
+ 4);
+ if (status) {
+ ath6kl_err("bmi_write_memory for yield limit failed\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
+
+static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
+{
+ u32 address;
+ u32 regdump_loc = 0;
+ int status;
+ u32 regdump_val[REGISTER_DUMP_LEN_MAX];
+ u32 i;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return;
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+ address = TARG_VTOP(ar->target_type, address);
+
+ /* read RAM location through diagnostic window */
+ status = ath6kl_diag_read32(ar, address, &regdump_loc);
+
+ if (status || !regdump_loc) {
+ ath6kl_err("failed to get ptr to register dump area\n");
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
+ regdump_loc);
+ regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
+
+ /* fetch register dump data */
+ status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
+ REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+
+ if (status) {
+ ath6kl_err("failed to get register dump\n");
+ return;
+ }
+ ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
+
+ for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
+ ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
+ i, regdump_val[i]);
+
+}
+
+void ath6kl_target_failure(struct ath6kl *ar)
+{
+ ath6kl_err("target asserted\n");
+
+ /* try dumping target assertion information (if any) */
+ ath6kl_dump_target_assert_info(ar);
+
+}
+
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+{
+ int status = 0;
+ int ret;
+
+ /*
+ * Configure the device for rx dot11 header rules. "0,0" are the
+ * default values. Required if checksum offload is needed. Set
+ * RxMetaVersion to 2.
+ */
+ if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ ar->rx_meta_ver, 0, 0)) {
+ ath6kl_err("unable to set the rx frame format\n");
+ status = -EIO;
+ }
+
+ if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
+ if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+ IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
+ ath6kl_err("unable to set power save fail event policy\n");
+ status = -EIO;
+ }
+
+ if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
+ if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+ WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
+ ath6kl_err("unable to set barker preamble policy\n");
+ status = -EIO;
+ }
+
+ if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+ WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
+ ath6kl_err("unable to set keep alive interval\n");
+ status = -EIO;
+ }
+
+ if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+ WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
+ ath6kl_err("unable to set disconnect timeout\n");
+ status = -EIO;
+ }
+
+ if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
+ if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+ ath6kl_err("unable to set txop bursting\n");
+ status = -EIO;
+ }
+
+ if (ar->p2p) {
+ ret = ath6kl_wmi_info_req_cmd(ar->wmi,
+ P2P_FLAG_CAPABILITIES_REQ |
+ P2P_FLAG_MACADDR_REQ |
+ P2P_FLAG_HMODEL_REQ);
+ if (ret) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
+ "capabilities (%d) - assuming P2P not "
+ "supported\n", ret);
+ ar->p2p = 0;
+ }
+ }
+
+ if (ar->p2p) {
+ /* Enable Probe Request reporting for P2P */
+ ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true);
+ if (ret) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
+ "Request reporting (%d)\n", ret);
+ }
+ }
+
+ return status;
+}
+
+int ath6kl_configure_target(struct ath6kl *ar)
+{
+ u32 param, ram_reserved_size;
+ u8 fw_iftype;
+
+ fw_iftype = ath6kl_get_fw_iftype(ar);
+ if (fw_iftype == 0xff)
+ return -EINVAL;
+
+ /* Tell target which HTC version it is used*/
+ param = HTC_PROTOCOL_VERSION;
+ if (ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_app_host_interest)),
+ (u8 *)&param, 4) != 0) {
+ ath6kl_err("bmi_write_memory for htc version failed\n");
+ return -EIO;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ param = 0;
+
+ if (ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_option_flag)),
+ (u8 *)&param, 4) != 0) {
+ ath6kl_err("bmi_read_memory for setting fwmode failed\n");
+ return -EIO;
+ }
+
+ param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+ param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
+ if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) {
+ param |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+ HI_OPTION_FW_SUBMODE_SHIFT;
+ }
+ param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+
+ if (ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_option_flag)),
+ (u8 *)&param,
+ 4) != 0) {
+ ath6kl_err("bmi_write_memory for setting fwmode failed\n");
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n");
+
+ /*
+ * Hardcode the address use for the extended board data
+ * Ideally this should be pre-allocate by the OS at boot time
+ * But since it is a new feature and board data is loaded
+ * at init time, we have to workaround this from host.
+ * It is difficult to patch the firmware boot code,
+ * but possible in theory.
+ */
+
+ param = ar->hw.board_ext_data_addr;
+ ram_reserved_size = ar->hw.reserved_ram_size;
+
+ if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_ext_data)),
+ (u8 *)&param, 4) != 0) {
+ ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
+ return -EIO;
+ }
+
+ if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_end_ram_reserve_sz)),
+ (u8 *)&ram_reserved_size, 4) != 0) {
+ ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
+ return -EIO;
+ }
+
+ /* set the block size for the target */
+ if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0))
+ /* use default number of control buffers */
+ return -EIO;
+
+ return 0;
+}
+
+struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+{
+ struct net_device *dev;
+ struct ath6kl *ar;
+ struct wireless_dev *wdev;
+
+ wdev = ath6kl_cfg80211_init(sdev);
+ if (!wdev) {
+ ath6kl_err("ath6kl_cfg80211_init failed\n");
+ return NULL;
+ }
+
+ ar = wdev_priv(wdev);
+ ar->dev = sdev;
+ ar->wdev = wdev;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+
+ if (ath6kl_debug_init(ar)) {
+ ath6kl_err("Failed to initialize debugfs\n");
+ ath6kl_cfg80211_deinit(ar);
+ return NULL;
+ }
+
+ dev = alloc_netdev(0, "wlan%d", ether_setup);
+ if (!dev) {
+ ath6kl_err("no memory for network device instance\n");
+ ath6kl_cfg80211_deinit(ar);
+ return NULL;
+ }
+
+ dev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = dev;
+ ar->sme_state = SME_DISCONNECTED;
+
+ init_netdev(dev);
+
+ ar->net_dev = dev;
+ set_bit(WLAN_ENABLED, &ar->flag);
+
+ ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+
+ spin_lock_init(&ar->lock);
+
+ ath6kl_init_control_info(ar);
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+
+ setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
+ (unsigned long) dev);
+
+ return ar;
+}
+
+int ath6kl_unavail_ev(struct ath6kl *ar)
+{
+ ath6kl_destroy(ar->net_dev, 1);
+
+ return 0;
+}
+
+/* firmware upload */
+static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
+ u8 **fw, size_t *fw_len)
+{
+ const struct firmware *fw_entry;
+ int ret;
+
+ ret = request_firmware(&fw_entry, filename, ar->dev);
+ if (ret)
+ return ret;
+
+ *fw_len = fw_entry->size;
+ *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+
+ if (*fw == NULL)
+ ret = -ENOMEM;
+
+ release_firmware(fw_entry);
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const char *get_target_ver_dir(const struct ath6kl *ar)
+{
+ switch (ar->version.target_ver) {
+ case AR6003_REV1_VERSION:
+ return "ath6k/AR6003/hw1.0";
+ case AR6003_REV2_VERSION:
+ return "ath6k/AR6003/hw2.0";
+ case AR6003_REV3_VERSION:
+ return "ath6k/AR6003/hw2.1.1";
+ }
+ ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
+ ar->version.target_ver);
+ return NULL;
+}
+
+/*
+ * Check the device tree for a board-id and use it to construct
+ * the pathname to the firmware file. Used (for now) to find a
+ * fallback to the "bdata.bin" file--typically a symlink to the
+ * appropriate board-specific file.
+ */
+static bool check_device_tree(struct ath6kl *ar)
+{
+ static const char *board_id_prop = "atheros,board-id";
+ struct device_node *node;
+ char board_filename[64];
+ const char *board_id;
+ int ret;
+
+ for_each_compatible_node(node, NULL, "atheros,ath6kl") {
+ board_id = of_get_property(node, board_id_prop, NULL);
+ if (board_id == NULL) {
+ ath6kl_warn("No \"%s\" property on %s node.\n",
+ board_id_prop, node->name);
+ continue;
+ }
+ snprintf(board_filename, sizeof(board_filename),
+ "%s/bdata.%s.bin", get_target_ver_dir(ar), board_id);
+
+ ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret) {
+ ath6kl_err("Failed to get DT board file %s: %d\n",
+ board_filename, ret);
+ continue;
+ }
+ return true;
+ }
+ return false;
+}
+#else
+static bool check_device_tree(struct ath6kl *ar)
+{
+ return false;
+}
+#endif /* CONFIG_OF */
+
+static int ath6kl_fetch_board_file(struct ath6kl *ar)
+{
+ const char *filename;
+ int ret;
+
+ if (ar->fw_board != NULL)
+ return 0;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_BOARD_DATA_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ filename = AR6004_REV1_BOARD_DATA_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_BOARD_DATA_FILE;
+ break;
+ }
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret == 0) {
+ /* managed to get proper board file */
+ return 0;
+ }
+
+ if (check_device_tree(ar)) {
+ /* got board file from device tree */
+ return 0;
+ }
+
+ /* there was no proper board file, try to use default instead */
+ ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
+ filename, ret);
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
+ break;
+ }
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret) {
+ ath6kl_err("Failed to get default board file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n");
+ ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n");
+
+ return 0;
+}
+
+static int ath6kl_fetch_otp_file(struct ath6kl *ar)
+{
+ const char *filename;
+ int ret;
+
+ if (ar->fw_otp != NULL)
+ return 0;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_OTP_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n");
+ return 0;
+ break;
+ default:
+ filename = AR6003_REV3_OTP_FILE;
+ break;
+ }
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
+ &ar->fw_otp_len);
+ if (ret) {
+ ath6kl_err("Failed to get OTP file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_fw_file(struct ath6kl *ar)
+{
+ const char *filename;
+ int ret;
+
+ if (ar->fw != NULL)
+ return 0;
+
+ if (testmode) {
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
+ break;
+ case AR6003_REV3_VERSION:
+ filename = AR6003_REV3_TCMD_FIRMWARE_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ ath6kl_warn("testmode not supported with ar6004\n");
+ return -EOPNOTSUPP;
+ default:
+ ath6kl_warn("unknown target version: 0x%x\n",
+ ar->version.target_ver);
+ return -EINVAL;
+ }
+
+ set_bit(TESTMODE, &ar->flag);
+
+ goto get_fw;
+ }
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_FIRMWARE_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ filename = AR6004_REV1_FIRMWARE_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_FIRMWARE_FILE;
+ break;
+ }
+
+get_fw:
+ ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
+ if (ret) {
+ ath6kl_err("Failed to get firmware file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_patch_file(struct ath6kl *ar)
+{
+ const char *filename;
+ int ret;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_PATCH_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ /* FIXME: implement for AR6004 */
+ return 0;
+ break;
+ default:
+ filename = AR6003_REV3_PATCH_FILE;
+ break;
+ }
+
+ if (ar->fw_patch == NULL) {
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+ &ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to get patch file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_fw_api1(struct ath6kl *ar)
+{
+ int ret;
+
+ ret = ath6kl_fetch_otp_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_fw_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_patch_file(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
+{
+ size_t magic_len, len, ie_len;
+ const struct firmware *fw;
+ struct ath6kl_fw_ie *hdr;
+ const char *filename;
+ const u8 *data;
+ int ret, ie_id, i, index, bit;
+ __le32 *val;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_FIRMWARE_2_FILE;
+ break;
+ case AR6003_REV3_VERSION:
+ filename = AR6003_REV3_FIRMWARE_2_FILE;
+ break;
+ case AR6004_REV1_VERSION:
+ filename = AR6004_REV1_FIRMWARE_2_FILE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = request_firmware(&fw, filename, ar->dev);
+ if (ret)
+ return ret;
+
+ data = fw->data;
+ len = fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH6KL_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (memcmp(data, ATH6KL_FIRMWARE_MAGIC, magic_len) != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath6kl_fw_ie)) {
+ /* hdr is unaligned! */
+ hdr = (struct ath6kl_fw_ie *) data;
+
+ ie_id = le32_to_cpup(&hdr->id);
+ ie_len = le32_to_cpup(&hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (ie_id) {
+ case ATH6KL_FW_IE_OTP_IMAGE:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
+ ie_len);
+
+ ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL);
+
+ if (ar->fw_otp == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ar->fw_otp_len = ie_len;
+ break;
+ case ATH6KL_FW_IE_FW_IMAGE:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ar->fw = kmemdup(data, ie_len, GFP_KERNEL);
+
+ if (ar->fw == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ar->fw_len = ie_len;
+ break;
+ case ATH6KL_FW_IE_PATCH_IMAGE:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch image ie (%zd B)\n",
+ ie_len);
+
+ ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL);
+
+ if (ar->fw_patch == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ar->fw_patch_len = ie_len;
+ break;
+ case ATH6KL_FW_IE_RESERVED_RAM_SIZE:
+ val = (__le32 *) data;
+ ar->hw.reserved_ram_size = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found reserved ram size ie 0x%d\n",
+ ar->hw.reserved_ram_size);
+ break;
+ case ATH6KL_FW_IE_CAPABILITIES:
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found firmware capabilities ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
+ index = ALIGN(i, 8) / 8;
+ bit = i % 8;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ar->fw_capabilities);
+ }
+
+ ath6kl_dbg_dump(ATH6KL_DBG_BOOT, "capabilities", "",
+ ar->fw_capabilities,
+ sizeof(ar->fw_capabilities));
+ break;
+ case ATH6KL_FW_IE_PATCH_ADDR:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->hw.dataset_patch_addr = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found patch address ie 0x%d\n",
+ ar->hw.dataset_patch_addr);
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n",
+ le32_to_cpup(&hdr->id));
+ break;
+ }
+
+ len -= ie_len;
+ data += ie_len;
+ };
+
+ ret = 0;
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int ath6kl_fetch_firmwares(struct ath6kl *ar)
+{
+ int ret;
+
+ ret = ath6kl_fetch_board_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_fw_api2(ar);
+ if (ret == 0) {
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api 2\n");
+ return 0;
+ }
+
+ ret = ath6kl_fetch_fw_api1(ar);
+ if (ret)
+ return ret;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api 1\n");
+
+ return 0;
+}
+
+static int ath6kl_upload_board_file(struct ath6kl *ar)
+{
+ u32 board_address, board_ext_address, param;
+ u32 board_data_size, board_ext_data_size;
+ int ret;
+
+ if (WARN_ON(ar->fw_board == NULL))
+ return -ENOENT;
+
+ /*
+ * Determine where in Target RAM to write Board Data.
+ * For AR6004, host determine Target RAM address for
+ * writing board data.
+ */
+ if (ar->target_type == TARGET_TYPE_AR6004) {
+ board_address = AR6004_REV1_BOARD_DATA_ADDRESS;
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_data)),
+ (u8 *) &board_address, 4);
+ } else {
+ ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_data)),
+ (u8 *) &board_address, 4);
+ }
+
+ /* determine where in target ram to write extended board data */
+ ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_ext_data)),
+ (u8 *) &board_ext_address, 4);
+
+ if (board_ext_address == 0) {
+ ath6kl_err("Failed to get board file target address.\n");
+ return -EINVAL;
+ }
+
+ switch (ar->target_type) {
+ case TARGET_TYPE_AR6003:
+ board_data_size = AR6003_BOARD_DATA_SZ;
+ board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ;
+ break;
+ case TARGET_TYPE_AR6004:
+ board_data_size = AR6004_BOARD_DATA_SZ;
+ board_ext_data_size = AR6004_BOARD_EXT_DATA_SZ;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ break;
+ }
+
+ if (ar->fw_board_len == (board_data_size +
+ board_ext_data_size)) {
+
+ /* write extended board data */
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "writing extended board data to 0x%x (%d B)\n",
+ board_ext_address, board_ext_data_size);
+
+ ret = ath6kl_bmi_write(ar, board_ext_address,
+ ar->fw_board + board_data_size,
+ board_ext_data_size);
+ if (ret) {
+ ath6kl_err("Failed to write extended board data: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* record that extended board data is initialized */
+ param = (board_ext_data_size << 16) | 1;
+
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_ext_data_config)),
+ (unsigned char *) &param, 4);
+ }
+
+ if (ar->fw_board_len < board_data_size) {
+ ath6kl_err("Too small board file: %zu\n", ar->fw_board_len);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing board file to 0x%x (%d B)\n",
+ board_address, board_data_size);
+
+ ret = ath6kl_bmi_write(ar, board_address, ar->fw_board,
+ board_data_size);
+
+ if (ret) {
+ ath6kl_err("Board file bmi write failed: %d\n", ret);
+ return ret;
+ }
+
+ /* record the fact that Board Data IS initialized */
+ param = 1;
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_data_initialized)),
+ (u8 *)&param, 4);
+
+ return ret;
+}
+
+static int ath6kl_upload_otp(struct ath6kl *ar)
+{
+ u32 address, param;
+ int ret;
+
+ if (WARN_ON(ar->fw_otp == NULL))
+ return -ENOENT;
+
+ address = ar->hw.app_load_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing otp to 0x%x (%zd B)\n", address,
+ ar->fw_otp_len);
+
+ ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp,
+ ar->fw_otp_len);
+ if (ret) {
+ ath6kl_err("Failed to upload OTP file: %d\n", ret);
+ return ret;
+ }
+
+ /* read firmware start address */
+ ret = ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_app_start)),
+ (u8 *) &address, sizeof(address));
+
+ if (ret) {
+ ath6kl_err("Failed to read hi_app_start: %d\n", ret);
+ return ret;
+ }
+
+ ar->hw.app_start_override_addr = address;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n",
+ ar->hw.app_start_override_addr);
+
+ /* execute the OTP code */
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address);
+ param = 0;
+ ath6kl_bmi_execute(ar, address, &param);
+
+ return ret;
+}
+
+static int ath6kl_upload_firmware(struct ath6kl *ar)
+{
+ u32 address;
+ int ret;
+
+ if (WARN_ON(ar->fw == NULL))
+ return -ENOENT;
+
+ address = ar->hw.app_load_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing firmware to 0x%x (%zd B)\n",
+ address, ar->fw_len);
+
+ ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len);
+
+ if (ret) {
+ ath6kl_err("Failed to write firmware: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set starting address for firmware
+ * Don't need to setup app_start override addr on AR6004
+ */
+ if (ar->target_type != TARGET_TYPE_AR6004) {
+ address = ar->hw.app_start_override_addr;
+ ath6kl_bmi_set_app_start(ar, address);
+ }
+ return ret;
+}
+
+static int ath6kl_upload_patch(struct ath6kl *ar)
+{
+ u32 address, param;
+ int ret;
+
+ if (WARN_ON(ar->fw_patch == NULL))
+ return -ENOENT;
+
+ address = ar->hw.dataset_patch_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing patch to 0x%x (%zd B)\n",
+ address, ar->fw_patch_len);
+
+ ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to write patch file: %d\n", ret);
+ return ret;
+ }
+
+ param = address;
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dset_list_head)),
+ (unsigned char *) &param, 4);
+
+ return 0;
+}
+
+static int ath6kl_init_upload(struct ath6kl *ar)
+{
+ u32 param, options, sleep, address;
+ int status = 0;
+
+ if (ar->target_type != TARGET_TYPE_AR6003 &&
+ ar->target_type != TARGET_TYPE_AR6004)
+ return -EINVAL;
+
+ /* temporarily disable system sleep */
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ status = ath6kl_bmi_reg_read(ar, address, &param);
+ if (status)
+ return status;
+
+ options = param;
+
+ param |= ATH6KL_OPTION_SLEEP_DISABLE;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ status = ath6kl_bmi_reg_read(ar, address, &param);
+ if (status)
+ return status;
+
+ sleep = param;
+
+ param |= SM(SYSTEM_SLEEP_DISABLE, 1);
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n",
+ options, sleep);
+
+ /* program analog PLL register */
+ /* no need to control 40/44MHz clock on AR6004 */
+ if (ar->target_type != TARGET_TYPE_AR6004) {
+ status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER,
+ 0xF9104001);
+
+ if (status)
+ return status;
+
+ /* Run at 80/88MHz by default */
+ param = SM(CPU_CLOCK_STANDARD, 1);
+
+ address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+ }
+
+ param = 0;
+ address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
+ param = SM(LPO_CAL_ENABLE, 1);
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ /* WAR to avoid SDIO CRC err */
+ if (ar->version.target_ver == AR6003_REV2_VERSION) {
+ ath6kl_err("temporary war to avoid sdio crc error\n");
+
+ param = 0x20;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+ }
+
+ /* write EEPROM data to Target RAM */
+ status = ath6kl_upload_board_file(ar);
+ if (status)
+ return status;
+
+ /* transfer One time Programmable data */
+ status = ath6kl_upload_otp(ar);
+ if (status)
+ return status;
+
+ /* Download Target firmware */
+ status = ath6kl_upload_firmware(ar);
+ if (status)
+ return status;
+
+ status = ath6kl_upload_patch(ar);
+ if (status)
+ return status;
+
+ /* Restore system sleep */
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, sleep);
+ if (status)
+ return status;
+
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ param = options | 0x20;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ /* Configure GPIO AR6003 UART */
+ param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbg_uart_txpin)),
+ (u8 *)&param, 4);
+
+ return status;
+}
+
+static int ath6kl_init_hw_params(struct ath6kl *ar)
+{
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
+ ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
+ ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
+ ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
+ break;
+ case AR6003_REV3_VERSION:
+ ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
+ ar->hw.app_load_addr = 0x1234;
+ ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
+ ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE;
+ break;
+ case AR6004_REV1_VERSION:
+ ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
+ ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS;
+ ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS;
+ ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE;
+ break;
+ default:
+ ath6kl_err("Unsupported hardware version: 0x%x\n",
+ ar->version.target_ver);
+ return -EINVAL;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n",
+ ar->version.target_ver, ar->target_type,
+ ar->hw.dataset_patch_addr, ar->hw.app_load_addr);
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x",
+ ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr,
+ ar->hw.reserved_ram_size);
+
+ return 0;
+}
+
+static int ath6kl_init(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status = 0;
+ s32 timeleft;
+
+ if (!ar)
+ return -EIO;
+
+ /* Do we need to finish the BMI phase */
+ if (ath6kl_bmi_done(ar)) {
+ status = -EIO;
+ goto ath6kl_init_done;
+ }
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init(ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ status = -EIO;
+ goto ath6kl_init_done;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
+
+ /*
+ * The reason we have to wait for the target here is that the
+ * driver layer has to init BMI in order to set the host block
+ * size.
+ */
+ if (ath6kl_htc_wait_target(ar->htc_target)) {
+ status = -EIO;
+ goto err_node_cleanup;
+ }
+
+ if (ath6kl_init_service_ep(ar)) {
+ status = -EIO;
+ goto err_cleanup_scatter;
+ }
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ /* setup credit distribution */
+ ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
+
+ ath6kl_cookie_init(ar);
+
+ /* start HTC */
+ status = ath6kl_htc_start(ar->htc_target);
+
+ if (status) {
+ ath6kl_cookie_cleanup(ar);
+ goto err_rxbuf_cleanup;
+ }
+
+ /* Wait for Wmi event to be ready */
+ timeleft = wait_event_interruptible_timeout(ar->event_wq,
+ test_bit(WMI_READY,
+ &ar->flag),
+ WMI_TIMEOUT);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
+
+ if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
+ ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
+ ATH6KL_ABI_VERSION, ar->version.abi_ver);
+ status = -EIO;
+ goto err_htc_stop;
+ }
+
+ if (!timeleft || signal_pending(current)) {
+ ath6kl_err("wmi is not ready or wait was interrupted\n");
+ status = -EIO;
+ goto err_htc_stop;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
+
+ /* communicate the wmi protocol verision to the target */
+ if ((ath6kl_set_host_app_area(ar)) != 0)
+ ath6kl_err("unable to set the host app area\n");
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+
+ status = ath6kl_target_config_wlan_params(ar);
+ if (!status)
+ goto ath6kl_init_done;
+
+err_htc_stop:
+ ath6kl_htc_stop(ar->htc_target);
+err_rxbuf_cleanup:
+ ath6kl_htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+err_cleanup_scatter:
+ ath6kl_hif_cleanup_scatter(ar);
+err_node_cleanup:
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+
+ath6kl_init_done:
+ return status;
+}
+
+int ath6kl_core_init(struct ath6kl *ar)
+{
+ int ret = 0;
+ struct ath6kl_bmi_target_info targ_info;
+
+ ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
+ if (!ar->ath6kl_wq)
+ return -ENOMEM;
+
+ ret = ath6kl_bmi_init(ar);
+ if (ret)
+ goto err_wq;
+
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ar->version.target_ver = le32_to_cpu(targ_info.version);
+ ar->target_type = le32_to_cpu(targ_info.type);
+ ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
+
+ ret = ath6kl_init_hw_params(ar);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ret = ath6kl_configure_target(ar);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ar->htc_target = ath6kl_htc_create(ar);
+
+ if (!ar->htc_target) {
+ ret = -ENOMEM;
+ goto err_bmi_cleanup;
+ }
+
+ ar->aggr_cntxt = aggr_init(ar->net_dev);
+ if (!ar->aggr_cntxt) {
+ ath6kl_err("failed to initialize aggr\n");
+ ret = -ENOMEM;
+ goto err_htc_cleanup;
+ }
+
+ ret = ath6kl_fetch_firmwares(ar);
+ if (ret)
+ goto err_htc_cleanup;
+
+ ret = ath6kl_init_upload(ar);
+ if (ret)
+ goto err_htc_cleanup;
+
+ ret = ath6kl_init(ar->net_dev);
+ if (ret)
+ goto err_htc_cleanup;
+
+ /* This runs the init function if registered */
+ ret = register_netdev(ar->net_dev);
+ if (ret) {
+ ath6kl_err("register_netdev failed\n");
+ ath6kl_destroy(ar->net_dev, 0);
+ return ret;
+ }
+
+ set_bit(NETDEV_REGISTERED, &ar->flag);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
+ __func__, ar->net_dev->name, ar->net_dev, ar);
+
+ return ret;
+
+err_htc_cleanup:
+ ath6kl_htc_cleanup(ar->htc_target);
+err_bmi_cleanup:
+ ath6kl_bmi_cleanup(ar);
+err_wq:
+ destroy_workqueue(ar->ath6kl_wq);
+ return ret;
+}
+
+void ath6kl_stop_txrx(struct ath6kl *ar)
+{
+ struct net_device *ndev = ar->net_dev;
+
+ if (!ndev)
+ return;
+
+ set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("down_interruptible failed\n");
+ return;
+ }
+
+ if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
+ ath6kl_stop_endpoint(ndev, false, true);
+
+ clear_bit(WLAN_ENABLED, &ar->flag);
+}
+
+/*
+ * We need to differentiate between the surprise and planned removal of the
+ * device because of the following consideration:
+ *
+ * - In case of surprise removal, the hcd already frees up the pending
+ * for the device and hence there is no need to unregister the function
+ * driver inorder to get these requests. For planned removal, the function
+ * driver has to explicitly unregister itself to have the hcd return all the
+ * pending requests before the data structures for the devices are freed up.
+ * Note that as per the current implementation, the function driver will
+ * end up releasing all the devices since there is no API to selectively
+ * release a particular device.
+ *
+ * - Certain commands issued to the target can be skipped for surprise
+ * removal since they will anyway not go through.
+ */
+void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
+{
+ struct ath6kl *ar;
+
+ if (!dev || !ath6kl_priv(dev)) {
+ ath6kl_err("failed to get device structure\n");
+ return;
+ }
+
+ ar = ath6kl_priv(dev);
+
+ destroy_workqueue(ar->ath6kl_wq);
+
+ if (ar->htc_target)
+ ath6kl_htc_cleanup(ar->htc_target);
+
+ aggr_module_destroy(ar->aggr_cntxt);
+
+ ath6kl_cookie_cleanup(ar);
+
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+
+ ath6kl_bmi_cleanup(ar);
+
+ ath6kl_debug_cleanup(ar);
+
+ if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
+ unregister_netdev(dev);
+ clear_bit(NETDEV_REGISTERED, &ar->flag);
+ }
+
+ free_netdev(dev);
+
+ kfree(ar->fw_board);
+ kfree(ar->fw_otp);
+ kfree(ar->fw);
+ kfree(ar->fw_patch);
+
+ ath6kl_cfg80211_deinit(ar);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
new file mode 100644
index 00000000000..30b5a53db9e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -0,0 +1,1477 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif-ops.h"
+#include "cfg80211.h"
+#include "target.h"
+#include "debug.h"
+
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+{
+ struct ath6kl_sta *conn = NULL;
+ u8 i, max_conn;
+
+ max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+
+ for (i = 0; i < max_conn; i++) {
+ if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
+ conn = &ar->sta_list[i];
+ break;
+ }
+ }
+
+ return conn;
+}
+
+struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
+{
+ struct ath6kl_sta *conn = NULL;
+ u8 ctr;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].aid == aid) {
+ conn = &ar->sta_list[ctr];
+ break;
+ }
+ }
+ return conn;
+}
+
+static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
+ u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
+{
+ struct ath6kl_sta *sta;
+ u8 free_slot;
+
+ free_slot = aid - 1;
+
+ sta = &ar->sta_list[free_slot];
+ memcpy(sta->mac, mac, ETH_ALEN);
+ if (ielen <= ATH6KL_MAX_IE)
+ memcpy(sta->wpa_ie, wpaie, ielen);
+ sta->aid = aid;
+ sta->keymgmt = keymgmt;
+ sta->ucipher = ucipher;
+ sta->auth = auth;
+
+ ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
+ ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
+}
+
+static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
+{
+ struct ath6kl_sta *sta = &ar->sta_list[i];
+
+ /* empty the queued pkts in the PS queue if any */
+ spin_lock_bh(&sta->psq_lock);
+ skb_queue_purge(&sta->psq);
+ spin_unlock_bh(&sta->psq_lock);
+
+ memset(&ar->ap_stats.sta[sta->aid - 1], 0,
+ sizeof(struct wmi_per_sta_stat));
+ memset(sta->mac, 0, ETH_ALEN);
+ memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
+ sta->aid = 0;
+ sta->sta_flags = 0;
+
+ ar->sta_list_index = ar->sta_list_index & ~(1 << i);
+
+}
+
+static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
+{
+ u8 i, removed = 0;
+
+ if (is_zero_ether_addr(mac))
+ return removed;
+
+ if (is_broadcast_ether_addr(mac)) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
+
+ for (i = 0; i < AP_MAX_NUM_STA; i++) {
+ if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
+ ath6kl_sta_cleanup(ar, i);
+ removed = 1;
+ }
+ }
+ } else {
+ for (i = 0; i < AP_MAX_NUM_STA; i++) {
+ if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "deleting station %pM aid=%d reason=%d\n",
+ mac, ar->sta_list[i].aid, reason);
+ ath6kl_sta_cleanup(ar, i);
+ removed = 1;
+ break;
+ }
+ }
+ }
+
+ return removed;
+}
+
+enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
+{
+ struct ath6kl *ar = devt;
+ return ar->ac2ep_map[ac];
+}
+
+struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
+{
+ struct ath6kl_cookie *cookie;
+
+ cookie = ar->cookie_list;
+ if (cookie != NULL) {
+ ar->cookie_list = cookie->arc_list_next;
+ ar->cookie_count--;
+ }
+
+ return cookie;
+}
+
+void ath6kl_cookie_init(struct ath6kl *ar)
+{
+ u32 i;
+
+ ar->cookie_list = NULL;
+ ar->cookie_count = 0;
+
+ memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
+
+ for (i = 0; i < MAX_COOKIE_NUM; i++)
+ ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
+}
+
+void ath6kl_cookie_cleanup(struct ath6kl *ar)
+{
+ ar->cookie_list = NULL;
+ ar->cookie_count = 0;
+}
+
+void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
+{
+ /* Insert first */
+
+ if (!ar || !cookie)
+ return;
+
+ cookie->arc_list_next = ar->cookie_list;
+ ar->cookie_list = cookie;
+ ar->cookie_count++;
+}
+
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+ int status;
+ s32 i;
+ __le32 addr_val;
+
+ /*
+ * Write bytes 1,2,3 of the register to set the upper address bytes,
+ * the LSB is written last to initiate the access cycle
+ */
+
+ for (i = 1; i <= 3; i++) {
+ /*
+ * Fill the buffer with the address byte value we want to
+ * hit 4 times. No need to worry about endianness as the
+ * same byte is copied to all four bytes of addr_val at
+ * any time.
+ */
+ memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4);
+
+ /*
+ * Hit each byte of the register address with a 4-byte
+ * write operation to the same address, this is a harmless
+ * operation.
+ */
+ status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val,
+ 4, HIF_WR_SYNC_BYTE_FIX);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
+ addr, reg_addr);
+ return status;
+ }
+
+ /*
+ * Write the address register again, this time write the whole
+ * 4-byte value. The effect here is that the LSB write causes the
+ * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+ * effect since we are writing the same values again
+ */
+ addr_val = cpu_to_le32(addr);
+ status = hif_read_write_sync(ar, reg_addr,
+ (u8 *)&(addr_val),
+ 4, HIF_WR_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
+ addr, reg_addr);
+ return status;
+ }
+
+ return 0;
+}
+
+/*
+ * Read from the hardware through its diagnostic window. No cooperation
+ * from the firmware is required for this.
+ */
+int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
+{
+ int ret;
+
+ /* set window register to start read cycle */
+ ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address);
+ if (ret)
+ return ret;
+
+ /* read the data */
+ ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value,
+ sizeof(*value), HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_warn("failed to read32 through diagnose window: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
+{
+ int ret;
+
+ /* set write data */
+ ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value,
+ sizeof(value), HIF_WR_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
+ address, value);
+ return ret;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+ address);
+}
+
+int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
+{
+ u32 count, *buf = data;
+ int ret;
+
+ if (WARN_ON(length % 4))
+ return -EINVAL;
+
+ for (count = 0; count < length / 4; count++, address += 4) {
+ ret = ath6kl_diag_read32(ar, address, &buf[count]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length)
+{
+ u32 count;
+ __le32 *buf = data;
+ int ret;
+
+ if (WARN_ON(length % 4))
+ return -EINVAL;
+
+ for (count = 0; count < length / 4; count++, address += 4) {
+ ret = ath6kl_diag_write32(ar, address, buf[count]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_read_fwlogs(struct ath6kl *ar)
+{
+ struct ath6kl_dbglog_hdr debug_hdr;
+ struct ath6kl_dbglog_buf debug_buf;
+ u32 address, length, dropped, firstbuf, debug_hdr_addr;
+ int ret = 0, loop;
+ u8 *buf;
+
+ buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ address = TARG_VTOP(ar->target_type,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbglog_hdr)));
+
+ ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr);
+ if (ret)
+ goto out;
+
+ /* Get the contents of the ring buffer */
+ if (debug_hdr_addr == 0) {
+ ath6kl_warn("Invalid address for debug_hdr_addr\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ address = TARG_VTOP(ar->target_type, debug_hdr_addr);
+ ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
+
+ address = TARG_VTOP(ar->target_type,
+ le32_to_cpu(debug_hdr.dbuf_addr));
+ firstbuf = address;
+ dropped = le32_to_cpu(debug_hdr.dropped);
+ ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+
+ loop = 100;
+
+ do {
+ address = TARG_VTOP(ar->target_type,
+ le32_to_cpu(debug_buf.buffer_addr));
+ length = le32_to_cpu(debug_buf.length);
+
+ if (length != 0 && (le32_to_cpu(debug_buf.length) <=
+ le32_to_cpu(debug_buf.bufsize))) {
+ length = ALIGN(length, 4);
+
+ ret = ath6kl_diag_read(ar, address,
+ buf, length);
+ if (ret)
+ goto out;
+
+ ath6kl_debug_fwlog_event(ar, buf, length);
+ }
+
+ address = TARG_VTOP(ar->target_type,
+ le32_to_cpu(debug_buf.next));
+ ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+ if (ret)
+ goto out;
+
+ loop--;
+
+ if (WARN_ON(loop == 0)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ } while (address != firstbuf);
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+/* FIXME: move to a better place, target.h? */
+#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
+#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
+
+static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset)
+{
+ int status = 0;
+ u32 address;
+ __le32 data;
+
+ if (target_type != TARGET_TYPE_AR6003 &&
+ target_type != TARGET_TYPE_AR6004)
+ return;
+
+ data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
+ cpu_to_le32(RESET_CONTROL_MBOX_RST);
+
+ switch (target_type) {
+ case TARGET_TYPE_AR6003:
+ address = AR6003_RESET_CONTROL_ADDRESS;
+ break;
+ case TARGET_TYPE_AR6004:
+ address = AR6004_RESET_CONTROL_ADDRESS;
+ break;
+ default:
+ address = AR6003_RESET_CONTROL_ADDRESS;
+ break;
+ }
+
+ status = ath6kl_diag_write32(ar, address, data);
+
+ if (status)
+ ath6kl_err("failed to reset target\n");
+}
+
+void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
+ bool get_dbglogs)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
+
+ netif_stop_queue(dev);
+
+ /* disable the target and the interrupts associated with it */
+ if (test_bit(WMI_READY, &ar->flag)) {
+ discon_issued = (test_bit(CONNECTED, &ar->flag) ||
+ test_bit(CONNECT_PEND, &ar->flag));
+ ath6kl_disconnect(ar);
+ if (!keep_profile)
+ ath6kl_init_profile_info(ar);
+
+ del_timer(&ar->disconnect_timer);
+
+ clear_bit(WMI_READY, &ar->flag);
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+
+ /*
+ * After wmi_shudown all WMI events will be dropped. We
+ * need to cleanup the buffers allocated in AP mode and
+ * give disconnect notification to stack, which usually
+ * happens in the disconnect_event. Simulate the disconnect
+ * event by calling the function directly. Sometimes
+ * disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ if (discon_issued)
+ ath6kl_disconnect_event(ar, DISCONNECT_CMD,
+ (ar->nw_type & AP_NETWORK) ?
+ bcast_mac : ar->bssid,
+ 0, NULL, 0);
+
+ ar->user_key_ctrl = 0;
+
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "%s: wmi is not ready 0x%p 0x%p\n",
+ __func__, ar, ar->wmi);
+
+ /* Shut down WMI if we have started it */
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "%s: shut down wmi\n", __func__);
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+ }
+ }
+
+ if (ar->htc_target) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+ ath6kl_htc_stop(ar->htc_target);
+ }
+
+ /*
+ * Try to reset the device if we can. The driver may have been
+ * configure NOT to reset the target during a debug session.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "attempting to reset target on instance destroy\n");
+ ath6kl_reset_device(ar, ar->target_type, true, true);
+}
+
+static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+{
+ u8 index;
+ u8 keyusage;
+
+ for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
+ if (ar->wep_key_list[index].key_len) {
+ keyusage = GROUP_USAGE;
+ if (index == ar->def_txkey_index)
+ keyusage |= TX_USAGE;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi,
+ index,
+ WEP_CRYPT,
+ keyusage,
+ ar->wep_key_list[index].key_len,
+ NULL,
+ ar->wep_key_list[index].key,
+ KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+ }
+}
+
+void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
+{
+ struct ath6kl_req_key *ik;
+ int res;
+ u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
+
+ ik = &ar->ap_mode_bkey;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
+
+ switch (ar->auth_mode) {
+ case NONE_AUTH:
+ if (ar->prwise_crypto == WEP_CRYPT)
+ ath6kl_install_static_wep_keys(ar);
+ break;
+ case WPA_PSK_AUTH:
+ case WPA2_PSK_AUTH:
+ case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
+ if (!ik->valid)
+ break;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed addkey for "
+ "the initial group key for AP mode\n");
+ memset(key_rsc, 0, sizeof(key_rsc));
+ res = ath6kl_wmi_addkey_cmd(
+ ar->wmi, ik->key_index, ik->key_type,
+ GROUP_USAGE, ik->key_len, key_rsc, ik->key,
+ KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
+ if (res) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
+ "addkey failed: %d\n", res);
+ }
+ break;
+ }
+
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ set_bit(CONNECTED, &ar->flag);
+ netif_carrier_on(ar->net_dev);
+}
+
+void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+ u8 keymgmt, u8 ucipher, u8 auth,
+ u8 assoc_req_len, u8 *assoc_info)
+{
+ u8 *ies = NULL, *wpa_ie = NULL, *pos;
+ size_t ies_len = 0;
+ struct station_info sinfo;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+
+ if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
+ struct ieee80211_mgmt *mgmt =
+ (struct ieee80211_mgmt *) assoc_info;
+ if (ieee80211_is_assoc_req(mgmt->frame_control) &&
+ assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) +
+ sizeof(mgmt->u.assoc_req)) {
+ ies = mgmt->u.assoc_req.variable;
+ ies_len = assoc_info + assoc_req_len - ies;
+ } else if (ieee80211_is_reassoc_req(mgmt->frame_control) &&
+ assoc_req_len >= sizeof(struct ieee80211_hdr_3addr)
+ + sizeof(mgmt->u.reassoc_req)) {
+ ies = mgmt->u.reassoc_req.variable;
+ ies_len = assoc_info + assoc_req_len - ies;
+ }
+ }
+
+ pos = ies;
+ while (pos && pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (pos[0] == WLAN_EID_RSN)
+ wpa_ie = pos; /* RSN IE */
+ else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) {
+ if (pos[5] == 0x01)
+ wpa_ie = pos; /* WPA IE */
+ else if (pos[5] == 0x04) {
+ wpa_ie = pos; /* WPS IE */
+ break; /* overrides WPA/RSN IE */
+ }
+ }
+ pos += 2 + pos[1];
+ }
+
+ ath6kl_add_new_sta(ar, mac_addr, aid, wpa_ie,
+ wpa_ie ? 2 + wpa_ie[1] : 0,
+ keymgmt, ucipher, auth);
+
+ /* send event to application */
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ /* TODO: sinfo.generation */
+
+ sinfo.assoc_req_ies = ies;
+ sinfo.assoc_req_ies_len = ies_len;
+ sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+
+ cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL);
+
+ netif_wake_queue(ar->net_dev);
+}
+
+/* Functions for Tx credit handling */
+void ath6k_credit_init(struct htc_credit_state_info *cred_info,
+ struct list_head *ep_list,
+ int tot_credits)
+{
+ struct htc_endpoint_credit_dist *cur_ep_dist;
+ int count;
+
+ cred_info->cur_free_credits = tot_credits;
+ cred_info->total_avail_credits = tot_credits;
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+ if (tot_credits > 4)
+ if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+ (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+ ath6kl_deposit_credit_to_ep(cred_info,
+ cur_ep_dist,
+ cur_ep_dist->cred_min);
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
+ cur_ep_dist->cred_min);
+ /*
+ * Control service is always marked active, it
+ * never goes inactive EVER.
+ */
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+ /* this is the lowest priority data endpoint */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+ /*
+ * Streams have to be created (explicit | implicit) for all
+ * kinds of traffic. BE endpoints are also inactive in the
+ * beginning. When BE traffic starts it creates implicit
+ * streams that redistributes credits.
+ *
+ * Note: all other endpoints have minimums set but are
+ * initially given NO credits. credits will be distributed
+ * as traffic activity demands
+ */
+ }
+
+ WARN_ON(cred_info->cur_free_credits <= 0);
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+ else {
+ /*
+ * For the remaining data endpoints, we assume that
+ * each cred_per_msg are the same. We use a simple
+ * calculation here, we take the remaining credits
+ * and determine how many max messages this can
+ * cover and then set each endpoint's normal value
+ * equal to 3/4 this amount.
+ */
+ count = (cred_info->cur_free_credits /
+ cur_ep_dist->cred_per_msg)
+ * cur_ep_dist->cred_per_msg;
+ count = (count * 3) >> 2;
+ count = max(count, cur_ep_dist->cred_per_msg);
+ cur_ep_dist->cred_norm = count;
+
+ }
+ }
+}
+
+/* initialize and setup credit distribution */
+int ath6k_setup_credit_dist(void *htc_handle,
+ struct htc_credit_state_info *cred_info)
+{
+ u16 servicepriority[5];
+
+ memset(cred_info, 0, sizeof(struct htc_credit_state_info));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set priority list */
+ ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+ return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int limit)
+{
+ int credits;
+
+ ep_dist->cred_assngd = limit;
+
+ if (ep_dist->credits <= limit)
+ return;
+
+ credits = ep_dist->credits - limit;
+ ep_dist->credits -= credits;
+ cred_info->cur_free_credits += credits;
+}
+
+static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
+ struct list_head *epdist_list)
+{
+ struct htc_endpoint_credit_dist *cur_dist_list;
+
+ list_for_each_entry(cur_dist_list, epdist_list, list) {
+ if (cur_dist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_dist_list->cred_to_dist > 0) {
+ cur_dist_list->credits +=
+ cur_dist_list->cred_to_dist;
+ cur_dist_list->cred_to_dist = 0;
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_assngd)
+ ath6k_reduce_credits(cred_info,
+ cur_dist_list,
+ cur_dist_list->cred_assngd);
+
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_norm)
+ ath6k_reduce_credits(cred_info, cur_dist_list,
+ cur_dist_list->cred_norm);
+
+ if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_dist_list->txq_depth == 0)
+ ath6k_reduce_credits(cred_info,
+ cur_dist_list, 0);
+ }
+ }
+ }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+ int credits = 0;
+ int need;
+
+ if (ep_dist->svc_id == WMI_CONTROL_SVC)
+ goto out;
+
+ if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+ (ep_dist->svc_id == WMI_DATA_VO_SVC))
+ if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+ goto out;
+
+ /*
+ * For all other services, we follow a simple algorithm of:
+ *
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take
+ */
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+ if (credits >= ep_dist->seek_cred)
+ goto out;
+
+ /*
+ * We don't have enough in the free pool, try taking away from
+ * lower priority services The rule for taking away credits:
+ *
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never
+ * starve an endpoint completely)
+ * 3. Only take what you need.
+ */
+
+ list_for_each_entry_reverse(curdist_list,
+ &cred_info->lowestpri_ep_dist,
+ list) {
+ if (curdist_list == ep_dist)
+ break;
+
+ need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+ if ((curdist_list->cred_assngd - need) >=
+ curdist_list->cred_min) {
+ /*
+ * The current one has been allocated more than
+ * it's minimum and it has enough credits assigned
+ * above it's minimum to fulfill our need try to
+ * take away just enough to fulfill our need.
+ */
+ ath6k_reduce_credits(cred_info, curdist_list,
+ curdist_list->cred_assngd - need);
+
+ if (cred_info->cur_free_credits >=
+ ep_dist->seek_cred)
+ break;
+ }
+
+ if (curdist_list->endpoint == ENDPOINT_0)
+ break;
+ }
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+ /* did we find some credits? */
+ if (credits)
+ ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
+
+ ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
+ struct list_head *ep_dist_list)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+
+ list_for_each_entry(curdist_list, ep_dist_list, list) {
+ if (curdist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
+ (curdist_list->svc_id == WMI_DATA_BE_SVC))
+ curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+ if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+ !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (curdist_list->txq_depth == 0)
+ ath6k_reduce_credits(info,
+ curdist_list, 0);
+ else
+ ath6k_reduce_credits(info,
+ curdist_list,
+ curdist_list->cred_min);
+ }
+ }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
+ struct list_head *ep_dist_list,
+ enum htc_credit_dist_reason reason)
+{
+ switch (reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE:
+ ath6k_credit_update(cred_info, ep_dist_list);
+ break;
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+ ath6k_redistribute_credits(cred_info, ep_dist_list);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+ WARN_ON(cred_info->cur_free_credits < 0);
+}
+
+void disconnect_timer_handler(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ ath6kl_init_profile_info(ar);
+ ath6kl_disconnect(ar);
+}
+
+void ath6kl_disconnect(struct ath6kl *ar)
+{
+ if (test_bit(CONNECTED, &ar->flag) ||
+ test_bit(CONNECT_PEND, &ar->flag)) {
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ /*
+ * Disconnect command is issued, clear the connect pending
+ * flag. The connected flag will be cleared in
+ * disconnect event notification.
+ */
+ clear_bit(CONNECT_PEND, &ar->flag);
+ }
+}
+
+void ath6kl_deep_sleep_enable(struct ath6kl *ar)
+{
+ switch (ar->sme_state) {
+ case SME_CONNECTING:
+ cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ case SME_CONNECTED:
+ default:
+ /*
+ * FIXME: oddly enough smeState is in DISCONNECTED during
+ * suspend, why? Need to send disconnected event in that
+ * state.
+ */
+ cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
+ break;
+ }
+
+ if (test_bit(CONNECTED, &ar->flag) ||
+ test_bit(CONNECT_PEND, &ar->flag))
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+
+ ar->sme_state = SME_DISCONNECTED;
+
+ /* disable scanning */
+ if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0) != 0)
+ printk(KERN_WARNING "ath6kl: failed to disable scan "
+ "during suspend\n");
+
+ ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+}
+
+/* WMI Event handlers */
+
+static const char *get_hw_id_string(u32 id)
+{
+ switch (id) {
+ case AR6003_REV1_VERSION:
+ return "1.0";
+ case AR6003_REV2_VERSION:
+ return "2.0";
+ case AR6003_REV3_VERSION:
+ return "2.1.1";
+ default:
+ return "unknown";
+ }
+}
+
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
+{
+ struct ath6kl *ar = devt;
+ struct net_device *dev = ar->net_dev;
+
+ memcpy(dev->dev_addr, datap, ETH_ALEN);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
+ __func__, dev->dev_addr);
+
+ ar->version.wlan_ver = sw_ver;
+ ar->version.abi_ver = abi_ver;
+
+ snprintf(ar->wdev->wiphy->fw_version,
+ sizeof(ar->wdev->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ (ar->version.wlan_ver & 0xf0000000) >> 28,
+ (ar->version.wlan_ver & 0x0f000000) >> 24,
+ (ar->version.wlan_ver & 0x00ff0000) >> 16,
+ (ar->version.wlan_ver & 0x0000ffff));
+
+ /* indicate to the waiting thread that the ready event was received */
+ set_bit(WMI_READY, &ar->flag);
+ wake_up(&ar->event_wq);
+
+ ath6kl_info("hw %s fw %s%s\n",
+ get_hw_id_string(ar->wdev->wiphy->hw_version),
+ ar->wdev->wiphy->fw_version,
+ test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+}
+
+void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+{
+ ath6kl_cfg80211_scan_complete_event(ar, status);
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+}
+
+void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+ u16 listen_int, u16 beacon_int,
+ enum network_type net_type, u8 beacon_ie_len,
+ u8 assoc_req_len, u8 assoc_resp_len,
+ u8 *assoc_info)
+{
+ unsigned long flags;
+
+ ath6kl_cfg80211_connect_event(ar, channel, bssid,
+ listen_int, beacon_int,
+ net_type, beacon_ie_len,
+ assoc_req_len, assoc_resp_len,
+ assoc_info);
+
+ memcpy(ar->bssid, bssid, sizeof(ar->bssid));
+ ar->bss_ch = channel;
+
+ if ((ar->nw_type == INFRA_NETWORK))
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ netif_wake_queue(ar->net_dev);
+
+ /* Update connect & link status atomically */
+ spin_lock_irqsave(&ar->lock, flags);
+ set_bit(CONNECTED, &ar->flag);
+ clear_bit(CONNECT_PEND, &ar->flag);
+ netif_carrier_on(ar->net_dev);
+ spin_unlock_irqrestore(&ar->lock, flags);
+
+ aggr_reset_state(ar->aggr_cntxt);
+ ar->reconnect_flag = 0;
+
+ if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+ memset(ar->node_map, 0, sizeof(ar->node_map));
+ ar->node_num = 0;
+ ar->next_ep_id = ENDPOINT_2;
+ }
+
+ if (!ar->usr_bss_filter) {
+ set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0);
+ }
+}
+
+void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+{
+ struct ath6kl_sta *sta;
+ u8 tsc[6];
+ /*
+ * For AP case, keyid will have aid of STA which sent pkt with
+ * MIC error. Use this aid to get MAC & send it to hostapd.
+ */
+ if (ar->nw_type == AP_NETWORK) {
+ sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
+ if (!sta)
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "ap tkip mic error received from aid=%d\n", keyid);
+
+ memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
+ cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+ NL80211_KEYTYPE_PAIRWISE, keyid,
+ tsc, GFP_KERNEL);
+ } else
+ ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+
+}
+
+static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+{
+ struct wmi_target_stats *tgt_stats =
+ (struct wmi_target_stats *) ptr;
+ struct target_stats *stats = &ar->target_stats;
+ struct tkip_ccmp_stats *ccmp_stats;
+ u8 ac;
+
+ if (len < sizeof(*tgt_stats))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
+
+ stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
+ stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
+ stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
+ stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
+ stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
+ stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
+ stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
+ stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
+ stats->tx_rts_success_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
+
+ for (ac = 0; ac < WMM_NUM_AC; ac++)
+ stats->tx_pkt_per_ac[ac] +=
+ le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
+
+ stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
+ stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
+ stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
+ stats->tx_mult_retry_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
+ stats->tx_rts_fail_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
+ stats->tx_ucast_rate =
+ ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
+
+ stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
+ stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
+ stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
+ stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
+ stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
+ stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
+ stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
+ stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
+ stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
+ stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
+ stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
+ stats->rx_key_cache_miss +=
+ le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
+ stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
+ stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
+ stats->rx_ucast_rate =
+ ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
+
+ ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
+
+ stats->tkip_local_mic_fail +=
+ le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
+ stats->tkip_cnter_measures_invoked +=
+ le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
+ stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
+
+ stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
+ stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
+
+ stats->pwr_save_fail_cnt +=
+ le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
+ stats->noise_floor_calib =
+ a_sle32_to_cpu(tgt_stats->noise_floor_calib);
+
+ stats->cs_bmiss_cnt +=
+ le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
+ stats->cs_low_rssi_cnt +=
+ le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
+ stats->cs_connect_cnt +=
+ le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
+ stats->cs_discon_cnt +=
+ le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
+
+ stats->cs_ave_beacon_rssi =
+ a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
+
+ stats->cs_last_roam_msec =
+ tgt_stats->cserv_stats.cs_last_roam_msec;
+ stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
+ stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
+
+ stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
+
+ stats->wow_pkt_dropped +=
+ le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
+ stats->wow_host_pkt_wakeups +=
+ tgt_stats->wow_stats.wow_host_pkt_wakeups;
+ stats->wow_host_evt_wakeups +=
+ tgt_stats->wow_stats.wow_host_evt_wakeups;
+ stats->wow_evt_discarded +=
+ le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
+
+ if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
+ clear_bit(STATS_UPDATE_PEND, &ar->flag);
+ wake_up(&ar->event_wq);
+ }
+}
+
+static void ath6kl_add_le32(__le32 *var, __le32 val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
+}
+
+void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+{
+ struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+ struct wmi_ap_mode_stat *ap = &ar->ap_stats;
+ struct wmi_per_sta_stat *st_ap, *st_p;
+ u8 ac;
+
+ if (ar->nw_type == AP_NETWORK) {
+ if (len < sizeof(*p))
+ return;
+
+ for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
+ st_ap = &ap->sta[ac];
+ st_p = &p->sta[ac];
+
+ ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
+ ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
+ ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
+ ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
+ ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
+ ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
+ ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
+ ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
+ }
+
+ } else {
+ ath6kl_update_target_stats(ar, ptr, len);
+ }
+}
+
+void ath6kl_wakeup_event(void *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *) dev;
+
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
+{
+ struct ath6kl *ar = (struct ath6kl *) devt;
+
+ ar->tx_pwr = tx_pwr;
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+{
+ struct ath6kl_sta *conn;
+ struct sk_buff *skb;
+ bool psq_empty = false;
+
+ conn = ath6kl_find_sta_by_aid(ar, aid);
+
+ if (!conn)
+ return;
+ /*
+ * Send out a packet queued on ps queue. When the ps queue
+ * becomes empty update the PVB for this station.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ psq_empty = skb_queue_empty(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (psq_empty)
+ /* TODO: Send out a NULL data frame */
+ return;
+
+ spin_lock_bh(&conn->psq_lock);
+ skb = skb_dequeue(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ conn->sta_flags |= STA_PS_POLLED;
+ ath6kl_data_tx(skb, ar->net_dev);
+ conn->sta_flags &= ~STA_PS_POLLED;
+
+ spin_lock_bh(&conn->psq_lock);
+ psq_empty = skb_queue_empty(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+}
+
+void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+{
+ bool mcastq_empty = false;
+ struct sk_buff *skb;
+
+ /*
+ * If there are no associated STAs, ignore the DTIM expiry event.
+ * There can be potential race conditions where the last associated
+ * STA may disconnect & before the host could clear the 'Indicate
+ * DTIM' request to the firmware, the firmware would have just
+ * indicated a DTIM expiry event. The race is between 'clear DTIM
+ * expiry cmd' going from the host to the firmware & the DTIM
+ * expiry event happening from the firmware to the host.
+ */
+ if (!ar->sta_list_index)
+ return;
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ mcastq_empty = skb_queue_empty(&ar->mcastpsq);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ if (mcastq_empty)
+ return;
+
+ /* set the STA flag to dtim_expired for the frame to go out */
+ set_bit(DTIM_EXPIRED, &ar->flag);
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ ath6kl_data_tx(skb, ar->net_dev);
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ }
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ clear_bit(DTIM_EXPIRED, &ar->flag);
+
+ /* clear the LSB of the BitMapCtl field of the TIM IE */
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+}
+
+void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+ u8 assoc_resp_len, u8 *assoc_info,
+ u16 prot_reason_status)
+{
+ unsigned long flags;
+
+ if (ar->nw_type == AP_NETWORK) {
+ if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
+ return;
+
+ /* if no more associated STAs, empty the mcast PS q */
+ if (ar->sta_list_index == 0) {
+ spin_lock_bh(&ar->mcastpsq_lock);
+ skb_queue_purge(&ar->mcastpsq);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ /* clear the LSB of the TIM IE's BitMapCtl field */
+ if (test_bit(WMI_READY, &ar->flag))
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ }
+
+ if (!is_broadcast_ether_addr(bssid)) {
+ /* send event to application */
+ cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+ }
+
+ if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) {
+ memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
+ clear_bit(CONNECTED, &ar->flag);
+ }
+ return;
+ }
+
+ ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+ assoc_resp_len, assoc_info,
+ prot_reason_status);
+
+ aggr_reset_state(ar->aggr_cntxt);
+
+ del_timer(&ar->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
+ "disconnect reason is %d\n", reason);
+
+ /*
+ * If the event is due to disconnect cmd from the host, only they
+ * the target would stop trying to connect. Under any other
+ * condition, target would keep trying to connect.
+ */
+ if (reason == DISCONNECT_CMD) {
+ if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ } else {
+ set_bit(CONNECT_PEND, &ar->flag);
+ if (((reason == ASSOC_FAILED) &&
+ (prot_reason_status == 0x11)) ||
+ ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
+ && (ar->reconnect_flag == 1))) {
+ set_bit(CONNECTED, &ar->flag);
+ return;
+ }
+ }
+
+ /* update connect & link status atomically */
+ spin_lock_irqsave(&ar->lock, flags);
+ clear_bit(CONNECTED, &ar->flag);
+ netif_carrier_off(ar->net_dev);
+ spin_unlock_irqrestore(&ar->lock, flags);
+
+ if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
+ ar->reconnect_flag = 0;
+
+ if (reason != CSERV_DISCONNECT)
+ ar->user_key_ctrl = 0;
+
+ netif_stop_queue(ar->net_dev);
+ memset(ar->bssid, 0, sizeof(ar->bssid));
+ ar->bss_ch = 0;
+
+ ath6kl_tx_data_cleanup(ar);
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar->lock, flags);
+
+ set_bit(WLAN_ENABLED, &ar->flag);
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ } else
+ netif_carrier_off(dev);
+
+ spin_unlock_irqrestore(&ar->lock, flags);
+
+ return 0;
+}
+
+static int ath6kl_close(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ netif_stop_queue(dev);
+
+ ath6kl_disconnect(ar);
+
+ if (test_bit(WMI_READY, &ar->flag)) {
+ if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0))
+ return -EIO;
+
+ clear_bit(WLAN_ENABLED, &ar->flag);
+ }
+
+ ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+
+ return 0;
+}
+
+static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ return &ar->net_stats;
+}
+
+static struct net_device_ops ath6kl_netdev_ops = {
+ .ndo_open = ath6kl_open,
+ .ndo_stop = ath6kl_close,
+ .ndo_start_xmit = ath6kl_data_tx,
+ .ndo_get_stats = ath6kl_get_stats,
+};
+
+void init_netdev(struct net_device *dev)
+{
+ dev->netdev_ops = &ath6kl_netdev_ops;
+ dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
+
+ dev->needed_headroom = ETH_HLEN;
+ dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
+ sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
+ + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
+
+ return;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
new file mode 100644
index 00000000000..f1dc311ee0c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include "htc_hif.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+#include "cfg80211.h"
+
+struct ath6kl_sdio {
+ struct sdio_func *func;
+
+ spinlock_t lock;
+
+ /* free list */
+ struct list_head bus_req_freeq;
+
+ /* available bus requests */
+ struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
+
+ struct ath6kl *ar;
+ u8 *dma_buffer;
+
+ /* scatter request list head */
+ struct list_head scat_req;
+
+ spinlock_t scat_lock;
+ bool is_disabled;
+ atomic_t irq_handling;
+ const struct sdio_device_id *id;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ spinlock_t wr_async_lock;
+};
+
+#define CMD53_ARG_READ 0
+#define CMD53_ARG_WRITE 1
+#define CMD53_ARG_BLOCK_BASIS 1
+#define CMD53_ARG_FIXED_ADDRESS 0
+#define CMD53_ARG_INCR_ADDRESS 1
+
+static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
+{
+ return ar->hif_priv;
+}
+
+/*
+ * Macro to check if DMA buffer is WORD-aligned and DMA-able.
+ * Most host controllers assume the buffer is DMA'able and will
+ * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
+ * check fails on stack memory.
+ */
+static inline bool buf_needs_bounce(u8 *buf)
+{
+ return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
+}
+
+static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
+{
+ struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
+
+ /* EP1 has an extended range */
+ mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
+ mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
+ mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
+ mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
+ mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
+}
+
+static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
+ u8 mode, u8 opcode, u32 addr,
+ u16 blksz)
+{
+ *arg = (((rw & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((mode & 1) << 27) |
+ ((opcode & 1) << 26) |
+ ((addr & 0x1FFFF) << 9) |
+ (blksz & 0x1FF));
+}
+
+static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ const u8 func = 0;
+
+ *arg = ((write & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((raw & 1) << 27) |
+ (1 << 26) |
+ ((address & 0x1FFFF) << 9) |
+ (1 << 8) |
+ (val & 0xFF);
+}
+
+static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
+ u8 *buf, u32 len)
+{
+ int ret = 0;
+
+ if (request & HIF_WRITE) {
+ /* FIXME: looks like ugly workaround for something */
+ if (addr >= HIF_MBOX_BASE_ADDR &&
+ addr <= HIF_MBOX_END_ADDR)
+ addr += (HIF_MBOX_WIDTH - len);
+
+ /* FIXME: this also looks like ugly workaround */
+ if (addr == HIF_MBOX0_EXT_BASE_ADDR)
+ addr += HIF_MBOX0_EXT_WIDTH - len;
+
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ } else {
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
+ request & HIF_WRITE ? "wr" : "rd", addr,
+ request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
+ ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
+
+ return ret;
+}
+
+static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
+{
+ struct bus_request *bus_req;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ return NULL;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct bus_request, list);
+ list_del(&bus_req->list);
+
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ return bus_req;
+}
+
+static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *bus_req)
+{
+ unsigned long flag;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+}
+
+static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+ data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
+ (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
+ data->blksz, data->blocks, scat_req->len,
+ scat_req->scat_entries);
+
+ data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
+ MMC_DATA_READ;
+
+ /* fill SG entries */
+ sg = scat_req->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
+ /* assemble SG list */
+ for (i = 0; i < scat_req->scat_entries; i++, sg++) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+ i, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+
+ sg_set_buf(sg, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+ }
+
+ /* set scatter-gather table for request */
+ data->sg = scat_req->sgentries;
+ data->sg_len = scat_req->scat_entries;
+}
+
+static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ struct mmc_request mmc_req;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct hif_scatter_req *scat_req;
+ u8 opcode, rw;
+ int status, len;
+
+ scat_req = req->scat_req;
+
+ if (scat_req->virt_scat) {
+ len = scat_req->len;
+ if (scat_req->req & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
+ scat_req->addr, scat_req->virt_dma_buf,
+ len);
+ goto scat_complete;
+ }
+
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ ath6kl_sdio_setup_scat_data(scat_req, &data);
+
+ opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
+ CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
+
+ rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
+
+ /* Fixup the address so that the last byte will fall on MBOX EOM */
+ if (scat_req->req & HIF_WRITE) {
+ if (scat_req->addr == HIF_MBOX_BASE_ADDR)
+ scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
+ else
+ /* Uses extended address range */
+ scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
+ }
+
+ /* set command argument */
+ ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
+ CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &cmd;
+ mmc_req.data = &data;
+
+ mmc_set_data_timeout(&data, ar_sdio->func->card);
+ /* synchronous call to process request */
+ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+
+ status = cmd.error ? cmd.error : data.error;
+
+scat_complete:
+ scat_req->status = status;
+
+ if (scat_req->status)
+ ath6kl_err("Scatter write request failed:%d\n",
+ scat_req->status);
+
+ if (scat_req->req & HIF_ASYNCHRONOUS)
+ scat_req->complete(ar_sdio->ar->htc_target, scat_req);
+
+ return status;
+}
+
+static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
+ int n_scat_entry, int n_scat_req,
+ bool virt_scat)
+{
+ struct hif_scatter_req *s_req;
+ struct bus_request *bus_req;
+ int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
+ u8 *virt_buf;
+
+ scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+ scat_req_sz = sizeof(*s_req) + scat_list_sz;
+
+ if (!virt_scat)
+ sg_sz = sizeof(struct scatterlist) * n_scat_entry;
+ else
+ buf_sz = 2 * L1_CACHE_BYTES +
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+
+ for (i = 0; i < n_scat_req; i++) {
+ /* allocate the scatter request */
+ s_req = kzalloc(scat_req_sz, GFP_KERNEL);
+ if (!s_req)
+ return -ENOMEM;
+
+ if (virt_scat) {
+ virt_buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!virt_buf) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ s_req->virt_dma_buf =
+ (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
+ } else {
+ /* allocate sglist */
+ s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
+
+ if (!s_req->sgentries) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+ }
+
+ /* allocate a bus request for this scatter request */
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+ if (!bus_req) {
+ kfree(s_req->sgentries);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ /* assign the scatter request to this bus request */
+ bus_req->scat_req = s_req;
+ s_req->busrequest = bus_req;
+
+ s_req->virt_scat = virt_scat;
+
+ /* add it to the scatter pool */
+ hif_scatter_req_add(ar_sdio->ar, s_req);
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u8 *tbuf = NULL;
+ int ret;
+ bool bounced = false;
+
+ if (request & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ if (buf_needs_bounce(buf)) {
+ if (!ar_sdio->dma_buffer)
+ return -ENOMEM;
+ tbuf = ar_sdio->dma_buffer;
+ memcpy(tbuf, buf, len);
+ bounced = true;
+ } else
+ tbuf = buf;
+
+ sdio_claim_host(ar_sdio->func);
+ ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
+ if ((request & HIF_READ) && bounced)
+ memcpy(buf, tbuf, len);
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ if (req->scat_req)
+ ath6kl_sdio_scat_rw(ar_sdio, req);
+ else {
+ void *context;
+ int status;
+
+ status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
+ req->buffer, req->length,
+ req->request);
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kldev_rw_comp_handler(context, status);
+ }
+}
+
+static void ath6kl_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath6kl_sdio *ar_sdio;
+ unsigned long flags;
+ struct bus_request *req, *tmp_req;
+
+ ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
+ sdio_claim_host(ar_sdio->func);
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ __ath6kl_sdio_write_async(ar_sdio, req);
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ }
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_handler(struct sdio_func *func)
+{
+ int status;
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
+
+ ar_sdio = sdio_get_drvdata(func);
+ atomic_set(&ar_sdio->irq_handling, 1);
+
+ /*
+ * Release the host during interrups so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ sdio_claim_host(ar_sdio->func);
+ atomic_set(&ar_sdio->irq_handling, 0);
+ WARN_ON(status && status != -ECANCELED);
+}
+
+static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+{
+ struct sdio_func *func = ar_sdio->func;
+ int ret = 0;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath6kl_err("Unable to enable sdio func: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /*
+ * Wait for hardware to initialise. It should take a lot less than
+ * 10 ms but let's be conservative here.
+ */
+ msleep(10);
+
+ ar_sdio->is_disabled = false;
+
+ return ret;
+}
+
+static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+{
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return 0;
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ return ret;
+
+ ar_sdio->is_disabled = true;
+
+ return ret;
+}
+
+static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *bus_req;
+ unsigned long flags;
+
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+
+ if (!bus_req)
+ return -ENOMEM;
+
+ bus_req->address = address;
+ bus_req->buffer = buffer;
+ bus_req->length = length;
+ bus_req->request = request;
+ bus_req->packet = packet;
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
+ if (ret)
+ ath6kl_err("Failed to claim sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Mask our function IRQ */
+ while (atomic_read(&ar_sdio->irq_handling)) {
+ sdio_release_host(ar_sdio->func);
+ schedule_timeout(HZ / 10);
+ sdio_claim_host(ar_sdio->func);
+ }
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath6kl_err("Failed to release sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *node = NULL;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ if (!list_empty(&ar_sdio->scat_req)) {
+ node = list_first_entry(&ar_sdio->scat_req,
+ struct hif_scatter_req, list);
+ list_del(&node->list);
+ }
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ return node;
+}
+
+static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ list_add_tail(&s_req->list, &ar_sdio->scat_req);
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+}
+
+/* scatter gather read write request */
+static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u32 request = scat_req->req;
+ int status = 0;
+ unsigned long flags;
+
+ if (!scat_req->len)
+ return -EINVAL;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: total len: %d scatter entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (request & HIF_SYNCHRONOUS) {
+ sdio_claim_host(ar_sdio->func);
+ status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
+ sdio_release_host(ar_sdio->func);
+ } else {
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+ }
+
+ return status;
+}
+
+/* clean up scatter support */
+static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *s_req, *tmp_req;
+ unsigned long flag;
+
+ /* empty the free list */
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
+ list_del(&s_req->list);
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ if (s_req->busrequest)
+ ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req->sgentries);
+ kfree(s_req);
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ }
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+}
+
+/* setup of HIF scatter resources */
+static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct htc_target *target = ar->htc_target;
+ int ret;
+ bool virt_scat = false;
+
+ /* check if host supports scatter and it meets our requirements */
+ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
+ ar_sdio->func->card->host->max_segs,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+ virt_scat = true;
+ }
+
+ if (!virt_scat) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ MAX_SCATTER_ENTRIES_PER_REQ,
+ MAX_SCATTER_REQUESTS, virt_scat);
+
+ if (!ret) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ MAX_SCATTER_REQUESTS,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ MAX_SCATTER_REQ_TRANSFER_SIZE;
+ } else {
+ ath6kl_sdio_cleanup_scatter(ar);
+ ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
+ }
+ }
+
+ if (virt_scat || ret) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ ATH6KL_SCATTER_ENTRIES_PER_REQ,
+ ATH6KL_SCATTER_REQS, virt_scat);
+
+ if (ret) {
+ ath6kl_err("failed to alloc virtual scatter resources !\n");
+ ath6kl_sdio_cleanup_scatter(ar);
+ return ret;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+ ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ mmc_pm_flag_t flags;
+ int ret;
+
+ flags = sdio_get_host_pm_caps(func);
+
+ if (!(flags & MMC_PM_KEEP_POWER))
+ /* as host doesn't support keep power we need to bail out */
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "func %d doesn't support MMC_PM_KEEP_POWER\n",
+ func->num);
+ return -EINVAL;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret) {
+ printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath6kl_deep_sleep_enable(ar);
+
+ return 0;
+}
+
+static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
+ .read_write_sync = ath6kl_sdio_read_write_sync,
+ .write_async = ath6kl_sdio_write_async,
+ .irq_enable = ath6kl_sdio_irq_enable,
+ .irq_disable = ath6kl_sdio_irq_disable,
+ .scatter_req_get = ath6kl_sdio_scatter_req_get,
+ .scatter_req_add = ath6kl_sdio_scatter_req_add,
+ .enable_scatter = ath6kl_sdio_enable_scatter,
+ .scat_req_rw = ath6kl_sdio_async_rw_scatter,
+ .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
+ .suspend = ath6kl_sdio_suspend,
+};
+
+static int ath6kl_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret;
+ struct ath6kl_sdio *ar_sdio;
+ struct ath6kl *ar;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
+ if (!ar_sdio)
+ return -ENOMEM;
+
+ ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!ar_sdio->dma_buffer) {
+ ret = -ENOMEM;
+ goto err_hif;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->id = id;
+ ar_sdio->is_disabled = true;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->scat_lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+
+ INIT_LIST_HEAD(&ar_sdio->scat_req);
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
+
+ for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
+ ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
+
+ ar = ath6kl_core_alloc(&ar_sdio->func->dev);
+ if (!ar) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ ar_sdio->ar = ar;
+ ar->hif_priv = ar_sdio;
+ ar->hif_ops = &ath6kl_sdio_ops;
+
+ ath6kl_sdio_set_mbox_info(ar);
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ sdio_release_host(func);
+ goto err_cfg80211;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ sdio_release_host(func);
+
+ ret = ath6kl_sdio_power_on(ar_sdio);
+ if (ret)
+ goto err_cfg80211;
+
+ sdio_claim_host(func);
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto err_off;
+ }
+
+ sdio_release_host(func);
+
+ ret = ath6kl_core_init(ar);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core\n");
+ goto err_off;
+ }
+
+ return ret;
+
+err_off:
+ ath6kl_sdio_power_off(ar_sdio);
+err_cfg80211:
+ ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_dma:
+ kfree(ar_sdio->dma_buffer);
+err_hif:
+ kfree(ar_sdio);
+
+ return ret;
+}
+
+static void ath6kl_sdio_remove(struct sdio_func *func)
+{
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ ar_sdio = sdio_get_drvdata(func);
+
+ ath6kl_stop_txrx(ar_sdio->ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ ath6kl_unavail_ev(ar_sdio->ar);
+
+ ath6kl_sdio_power_off(ar_sdio);
+
+ kfree(ar_sdio->dma_buffer);
+ kfree(ar_sdio);
+}
+
+static const struct sdio_device_id ath6kl_sdio_devices[] = {
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
+
+static struct sdio_driver ath6kl_sdio_driver = {
+ .name = "ath6kl_sdio",
+ .id_table = ath6kl_sdio_devices,
+ .probe = ath6kl_sdio_probe,
+ .remove = ath6kl_sdio_remove,
+};
+
+static int __init ath6kl_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath6kl_sdio_driver);
+ if (ret)
+ ath6kl_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath6kl_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath6kl_sdio_driver);
+}
+
+module_init(ath6kl_sdio_init);
+module_exit(ath6kl_sdio_exit);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
new file mode 100644
index 00000000000..c9a76051f04
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef TARGET_H
+#define TARGET_H
+
+#define AR6003_BOARD_DATA_SZ 1024
+#define AR6003_BOARD_EXT_DATA_SZ 768
+
+#define AR6004_BOARD_DATA_SZ 7168
+#define AR6004_BOARD_EXT_DATA_SZ 0
+
+#define RESET_CONTROL_ADDRESS 0x00000000
+#define RESET_CONTROL_COLD_RST 0x00000100
+#define RESET_CONTROL_MBOX_RST 0x00000004
+
+#define CPU_CLOCK_STANDARD_S 0
+#define CPU_CLOCK_STANDARD 0x00000003
+#define CPU_CLOCK_ADDRESS 0x00000020
+
+#define CLOCK_CONTROL_ADDRESS 0x00000028
+#define CLOCK_CONTROL_LF_CLK32_S 2
+#define CLOCK_CONTROL_LF_CLK32 0x00000004
+
+#define SYSTEM_SLEEP_ADDRESS 0x000000c4
+#define SYSTEM_SLEEP_DISABLE_S 0
+#define SYSTEM_SLEEP_DISABLE 0x00000001
+
+#define LPO_CAL_ADDRESS 0x000000e0
+#define LPO_CAL_ENABLE_S 20
+#define LPO_CAL_ENABLE 0x00100000
+
+#define GPIO_PIN10_ADDRESS 0x00000050
+#define GPIO_PIN11_ADDRESS 0x00000054
+#define GPIO_PIN12_ADDRESS 0x00000058
+#define GPIO_PIN13_ADDRESS 0x0000005c
+
+#define HOST_INT_STATUS_ADDRESS 0x00000400
+#define HOST_INT_STATUS_ERROR_S 7
+#define HOST_INT_STATUS_ERROR 0x00000080
+
+#define HOST_INT_STATUS_CPU_S 6
+#define HOST_INT_STATUS_CPU 0x00000040
+
+#define HOST_INT_STATUS_COUNTER_S 4
+#define HOST_INT_STATUS_COUNTER 0x00000010
+
+#define CPU_INT_STATUS_ADDRESS 0x00000401
+
+#define ERROR_INT_STATUS_ADDRESS 0x00000402
+#define ERROR_INT_STATUS_WAKEUP_S 2
+#define ERROR_INT_STATUS_WAKEUP 0x00000004
+
+#define ERROR_INT_STATUS_RX_UNDERFLOW_S 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW 0x00000002
+
+#define ERROR_INT_STATUS_TX_OVERFLOW_S 0
+#define ERROR_INT_STATUS_TX_OVERFLOW 0x00000001
+
+#define COUNTER_INT_STATUS_ADDRESS 0x00000403
+#define COUNTER_INT_STATUS_COUNTER_S 0
+#define COUNTER_INT_STATUS_COUNTER 0x000000ff
+
+#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
+
+#define INT_STATUS_ENABLE_ADDRESS 0x00000418
+#define INT_STATUS_ENABLE_ERROR_S 7
+#define INT_STATUS_ENABLE_ERROR 0x00000080
+
+#define INT_STATUS_ENABLE_CPU_S 6
+#define INT_STATUS_ENABLE_CPU 0x00000040
+
+#define INT_STATUS_ENABLE_INT_S 5
+#define INT_STATUS_ENABLE_INT 0x00000020
+#define INT_STATUS_ENABLE_COUNTER_S 4
+#define INT_STATUS_ENABLE_COUNTER 0x00000010
+
+#define INT_STATUS_ENABLE_MBOX_DATA_S 0
+#define INT_STATUS_ENABLE_MBOX_DATA 0x0000000f
+
+#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
+#define CPU_INT_STATUS_ENABLE_BIT_S 0
+#define CPU_INT_STATUS_ENABLE_BIT 0x000000ff
+
+#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_S 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW 0x00000002
+
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_S 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW 0x00000001
+
+#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_BIT_S 0
+#define COUNTER_INT_STATUS_ENABLE_BIT 0x000000ff
+
+#define COUNT_ADDRESS 0x00000420
+
+#define COUNT_DEC_ADDRESS 0x00000440
+
+#define WINDOW_DATA_ADDRESS 0x00000474
+#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
+#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
+#define CPU_DBG_SEL_ADDRESS 0x00000483
+#define CPU_DBG_ADDRESS 0x00000484
+
+#define LOCAL_SCRATCH_ADDRESS 0x000000c0
+#define ATH6KL_OPTION_SLEEP_DISABLE 0x08
+
+#define RTC_BASE_ADDRESS 0x00004000
+#define GPIO_BASE_ADDRESS 0x00014000
+#define MBOX_BASE_ADDRESS 0x00018000
+#define ANALOG_INTF_BASE_ADDRESS 0x0001c000
+
+/* real name of the register is unknown */
+#define ATH6KL_ANALOG_PLL_REGISTER (ANALOG_INTF_BASE_ADDRESS + 0x284)
+
+#define SM(f, v) (((v) << f##_S) & f)
+#define MS(f, v) (((v) & f) >> f##_S)
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure.
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end).
+ */
+#define ATH6KL_AR6003_HI_START_ADDR 0x00540600
+#define ATH6KL_AR6004_HI_START_ADDR 0x00400800
+
+/*
+ * These are items that the Host may need to access
+ * via BMI or via the Diagnostic Window. The position
+ * of items in this structure must remain constant.
+ * across firmware revisions!
+ *
+ * Types for each item must be fixed size across target and host platforms.
+ * The structure is used only to calculate offset for each register with
+ * HI_ITEM() macro, no values are stored to it.
+ *
+ * More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused1; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to ATH6KL_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /*
+ * Flash configuration overrides, used only
+ * when firmware is not executing from flash.
+ * (When using flash, modify the global variables
+ * with equivalent names.)
+ */
+ u32 hi_bank0_addr_value; /* 0x44 */
+ u32 hi_bank0_read_value; /* 0x48 */
+ u32 hi_bank0_write_value; /* 0x4c */
+ u32 hi_bank0_config_value; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_tbl; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+ /*
+ * NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts
+ * pin
+ */
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+ /*
+ * NOTE: byte [0] = RESET pin (bit 7 is polarity),
+ * bytes[1]..bytes[3] are for future use
+ */
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+
+ /* Pointer to extended board data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /*
+ * 0xbc - [31:0]: idle timeout in ms
+ */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic -
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related ; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+#define HI_OPTION_FW_MODE_IBSS 0x0
+#define HI_OPTION_FW_MODE_BSS_STA 0x1
+#define HI_OPTION_FW_MODE_AP 0x2
+
+#define HI_OPTION_FW_SUBMODE_NONE 0x0
+#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2
+#define HI_OPTION_FW_SUBMODE_P2PGO 0x3
+
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/* Fw Mode/SubMode Mask
+|------------------------------------------------------------------------------|
+| SUB | SUB | SUB | SUB | | | |
+| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0|
+| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
+|------------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
+
+/* Convert a Target virtual address into a Target physical address */
+#define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff)
+#define AR6004_VTOP(vaddr) (vaddr)
+
+#define TARG_VTOP(target_type, vaddr) \
+ (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
+ (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
+
+#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
+#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
+#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
+#define AR6003_REV2_RAM_RESERVE_SIZE 6912
+
+#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
+#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
+#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
+#define AR6003_REV3_RAM_RESERVE_SIZE 512
+
+#define AR6004_REV1_BOARD_DATA_ADDRESS 0x435400
+#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS 0x437000
+#define AR6004_REV1_RAM_RESERVE_SIZE 11264
+
+#define ATH6KL_FWLOG_PAYLOAD_SIZE 1500
+
+struct ath6kl_dbglog_buf {
+ __le32 next;
+ __le32 buffer_addr;
+ __le32 bufsize;
+ __le32 length;
+ __le32 count;
+ __le32 free;
+} __packed;
+
+struct ath6kl_dbglog_hdr {
+ __le32 dbuf_addr;
+ __le32 dropped;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
new file mode 100644
index 00000000000..381eb66a605
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "testmode.h"
+
+#include <net/netlink.h>
+
+enum ath6kl_tm_attr {
+ __ATH6KL_TM_ATTR_INVALID = 0,
+ ATH6KL_TM_ATTR_CMD = 1,
+ ATH6KL_TM_ATTR_DATA = 2,
+
+ /* keep last */
+ __ATH6KL_TM_ATTR_AFTER_LAST,
+ ATH6KL_TM_ATTR_MAX = __ATH6KL_TM_ATTR_AFTER_LAST - 1,
+};
+
+enum ath6kl_tm_cmd {
+ ATH6KL_TM_CMD_TCMD = 0,
+ ATH6KL_TM_CMD_RX_REPORT = 1,
+};
+
+#define ATH6KL_TM_DATA_MAX_LEN 5000
+
+static const struct nla_policy ath6kl_tm_policy[ATH6KL_TM_ATTR_MAX + 1] = {
+ [ATH6KL_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH6KL_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH6KL_TM_DATA_MAX_LEN },
+};
+
+void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf, size_t buf_len)
+{
+ if (down_interruptible(&ar->sem))
+ return;
+
+ kfree(ar->tm.rx_report);
+
+ ar->tm.rx_report = kmemdup(buf, buf_len, GFP_KERNEL);
+ ar->tm.rx_report_len = buf_len;
+
+ up(&ar->sem);
+
+ wake_up(&ar->event_wq);
+}
+
+static int ath6kl_tm_rx_report(struct ath6kl *ar, void *buf, size_t buf_len,
+ struct sk_buff *skb)
+{
+ int ret = 0;
+ long left;
+
+ if (down_interruptible(&ar->sem))
+ return -ERESTARTSYS;
+
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len) < 0) {
+ up(&ar->sem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ ar->tm.rx_report != NULL,
+ WMI_TIMEOUT);
+
+ if (left == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ } else if (left < 0) {
+ ret = left;
+ goto out;
+ }
+
+ if (ar->tm.rx_report == NULL || ar->tm.rx_report_len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, ar->tm.rx_report_len,
+ ar->tm.rx_report);
+
+ kfree(ar->tm.rx_report);
+ ar->tm.rx_report = NULL;
+
+out:
+ up(&ar->sem);
+
+ return ret;
+
+nla_put_failure:
+ ret = -ENOBUFS;
+ goto out;
+}
+
+int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
+ int err, buf_len, reply_len;
+ struct sk_buff *skb;
+ void *buf;
+
+ err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len,
+ ath6kl_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[ATH6KL_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) {
+ case ATH6KL_TM_CMD_TCMD:
+ if (!tb[ATH6KL_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]);
+
+ ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len);
+
+ return 0;
+
+ break;
+ case ATH6KL_TM_CMD_RX_REPORT:
+ if (!tb[ATH6KL_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]);
+
+ reply_len = nla_total_size(ATH6KL_TM_DATA_MAX_LEN);
+ skb = cfg80211_testmode_alloc_reply_skb(wiphy, reply_len);
+ if (!skb)
+ return -ENOMEM;
+
+ err = ath6kl_tm_rx_report(ar, buf, buf_len, skb);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ return cfg80211_testmode_reply(skb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h
new file mode 100644
index 00000000000..43dffcc11fb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/testmode.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf, size_t buf_len);
+int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
+
+#else
+
+static inline void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf,
+ size_t buf_len)
+{
+}
+
+static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
new file mode 100644
index 00000000000..a7117074f81
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -0,0 +1,1478 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
+ u32 *map_no)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ethhdr *eth_hdr;
+ u32 i, ep_map = -1;
+ u8 *datap;
+
+ *map_no = 0;
+ datap = skb->data;
+ eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
+
+ if (is_multicast_ether_addr(eth_hdr->h_dest))
+ return ENDPOINT_2;
+
+ for (i = 0; i < ar->node_num; i++) {
+ if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
+ ETH_ALEN) == 0) {
+ *map_no = i + 1;
+ ar->node_map[i].tx_pend++;
+ return ar->node_map[i].ep_id;
+ }
+
+ if ((ep_map == -1) && !ar->node_map[i].tx_pend)
+ ep_map = i;
+ }
+
+ if (ep_map == -1) {
+ ep_map = ar->node_num;
+ ar->node_num++;
+ if (ar->node_num > MAX_NODE_NUM)
+ return ENDPOINT_UNUSED;
+ }
+
+ memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
+
+ for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
+ if (!ar->tx_pending[i]) {
+ ar->node_map[ep_map].ep_id = i;
+ break;
+ }
+
+ /*
+ * No free endpoint is available, start redistribution on
+ * the inuse endpoints.
+ */
+ if (i == ENDPOINT_5) {
+ ar->node_map[ep_map].ep_id = ar->next_ep_id;
+ ar->next_ep_id++;
+ if (ar->next_ep_id > ENDPOINT_5)
+ ar->next_ep_id = ENDPOINT_2;
+ }
+ }
+
+ *map_no = ep_map + 1;
+ ar->node_map[ep_map].tx_pend++;
+
+ return ar->node_map[ep_map].ep_id;
+}
+
+static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+ bool *more_data)
+{
+ struct ethhdr *datap = (struct ethhdr *) skb->data;
+ struct ath6kl_sta *conn = NULL;
+ bool ps_queued = false, is_psq_empty = false;
+
+ if (is_multicast_ether_addr(datap->h_dest)) {
+ u8 ctr = 0;
+ bool q_mcast = false;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
+ q_mcast = true;
+ break;
+ }
+ }
+
+ if (q_mcast) {
+ /*
+ * If this transmit is not because of a Dtim Expiry
+ * q it.
+ */
+ if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+ bool is_mcastq_empty = false;
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ is_mcastq_empty =
+ skb_queue_empty(&ar->mcastpsq);
+ skb_queue_tail(&ar->mcastpsq, skb);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ /*
+ * If this is the first Mcast pkt getting
+ * queued indicate to the target to set the
+ * BitmapControl LSB of the TIM IE.
+ */
+ if (is_mcastq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ MCAST_AID, 1);
+
+ ps_queued = true;
+ } else {
+ /*
+ * This transmit is because of Dtim expiry.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&ar->mcastpsq_lock);
+ if (!skb_queue_empty(&ar->mcastpsq))
+ *more_data = true;
+ spin_unlock_bh(&ar->mcastpsq_lock);
+ }
+ }
+ } else {
+ conn = ath6kl_find_sta(ar, datap->h_dest);
+ if (!conn) {
+ dev_kfree_skb(skb);
+
+ /* Inform the caller that the skb is consumed */
+ return true;
+ }
+
+ if (conn->sta_flags & STA_PS_SLEEP) {
+ if (!(conn->sta_flags & STA_PS_POLLED)) {
+ /* Queue the frames if the STA is sleeping */
+ spin_lock_bh(&conn->psq_lock);
+ is_psq_empty = skb_queue_empty(&conn->psq);
+ skb_queue_tail(&conn->psq, skb);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this
+ * STA.
+ */
+ if (is_psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ conn->aid, 1);
+
+ ps_queued = true;
+ } else {
+ /*
+ * This tx is because of a PsPoll.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->psq))
+ *more_data = true;
+ spin_unlock_bh(&conn->psq_lock);
+ }
+ }
+ }
+
+ return ps_queued;
+}
+
+/* Tx functions */
+
+int ath6kl_control_tx(void *devt, struct sk_buff *skb,
+ enum htc_endpoint_id eid)
+{
+ struct ath6kl *ar = devt;
+ int status = 0;
+ struct ath6kl_cookie *cookie = NULL;
+
+ spin_lock_bh(&ar->lock);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
+ skb, skb->len, eid);
+
+ if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
+ /*
+ * Control endpoint is full, don't allocate resources, we
+ * are just going to drop this packet.
+ */
+ cookie = NULL;
+ ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
+ skb, skb->len);
+ } else
+ cookie = ath6kl_alloc_cookie(ar);
+
+ if (cookie == NULL) {
+ spin_unlock_bh(&ar->lock);
+ status = -ENOMEM;
+ goto fail_ctrl_tx;
+ }
+
+ ar->tx_pending[eid]++;
+
+ if (eid != ar->ctrl_ep)
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ cookie->skb = skb;
+ cookie->map_no = 0;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
+ eid, ATH6KL_CONTROL_PKT_TAG);
+
+ /*
+ * This interface is asynchronous, if there is an error, cleanup
+ * will happen in the TX completion callback.
+ */
+ ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
+
+ return 0;
+
+fail_ctrl_tx:
+ dev_kfree_skb(skb);
+ return status;
+}
+
+int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_cookie *cookie = NULL;
+ enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+ u32 map_no = 0;
+ u16 htc_tag = ATH6KL_DATA_PKT_TAG;
+ u8 ac = 99 ; /* initialize to unmapped ac */
+ bool chk_adhoc_ps_mapping = false, more_data = false;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
+ skb, skb->data, skb->len);
+
+ /* If target is not associated */
+ if (!test_bit(CONNECTED, &ar->flag)) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (!test_bit(WMI_READY, &ar->flag))
+ goto fail_tx;
+
+ /* AP mode Power saving processing */
+ if (ar->nw_type == AP_NETWORK) {
+ if (ath6kl_powersave_ap(ar, skb, &more_data))
+ return 0;
+ }
+
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ if (skb_headroom(skb) < dev->needed_headroom) {
+ WARN_ON(1);
+ goto fail_tx;
+ }
+
+ if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
+ ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
+ goto fail_tx;
+ }
+
+ if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
+ more_data, 0, 0, NULL)) {
+ ath6kl_err("wmi_data_hdr_add failed\n");
+ goto fail_tx;
+ }
+
+ if ((ar->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+ chk_adhoc_ps_mapping = true;
+ else {
+ /* get the stream mapping */
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
+ 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+ if (ret)
+ goto fail_tx;
+ }
+ } else
+ goto fail_tx;
+
+ spin_lock_bh(&ar->lock);
+
+ if (chk_adhoc_ps_mapping)
+ eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
+ else
+ eid = ar->ac2ep_map[ac];
+
+ if (eid == 0 || eid == ENDPOINT_UNUSED) {
+ ath6kl_err("eid %d is not mapped!\n", eid);
+ spin_unlock_bh(&ar->lock);
+ goto fail_tx;
+ }
+
+ /* allocate resource for this packet */
+ cookie = ath6kl_alloc_cookie(ar);
+
+ if (!cookie) {
+ spin_unlock_bh(&ar->lock);
+ goto fail_tx;
+ }
+
+ /* update counts while the lock is held */
+ ar->tx_pending[eid]++;
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
+ skb_cloned(skb)) {
+ /*
+ * We will touch (move the buffer data to align it. Since the
+ * skb buffer is cloned and not only the header is changed, we
+ * have to copy it to allow the changes. Since we are copying
+ * the data here, we may as well align it by reserving suitable
+ * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
+ */
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
+ if (nskb == NULL)
+ goto fail_tx;
+ kfree_skb(skb);
+ skb = nskb;
+ }
+
+ cookie->skb = skb;
+ cookie->map_no = map_no;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
+ eid, htc_tag);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
+ skb->data, skb->len);
+
+ /*
+ * HTC interface is asynchronous, if this fails, cleanup will
+ * happen in the ath6kl_tx_complete callback.
+ */
+ ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb(skb);
+
+ ar->net_stats.tx_dropped++;
+ ar->net_stats.tx_aborted_errors++;
+
+ return 0;
+}
+
+/* indicate tx activity or inactivity on a WMI stream */
+void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
+{
+ struct ath6kl *ar = devt;
+ enum htc_endpoint_id eid;
+ int i;
+
+ eid = ar->ac2ep_map[traffic_class];
+
+ if (!test_bit(WMI_ENABLED, &ar->flag))
+ goto notify_htc;
+
+ spin_lock_bh(&ar->lock);
+
+ ar->ac_stream_active[traffic_class] = active;
+
+ if (active) {
+ /*
+ * Keep track of the active stream with the highest
+ * priority.
+ */
+ if (ar->ac_stream_pri_map[traffic_class] >
+ ar->hiac_stream_active_pri)
+ /* set the new highest active priority */
+ ar->hiac_stream_active_pri =
+ ar->ac_stream_pri_map[traffic_class];
+
+ } else {
+ /*
+ * We may have to search for the next active stream
+ * that is the highest priority.
+ */
+ if (ar->hiac_stream_active_pri ==
+ ar->ac_stream_pri_map[traffic_class]) {
+ /*
+ * The highest priority stream just went inactive
+ * reset and search for the "next" highest "active"
+ * priority stream.
+ */
+ ar->hiac_stream_active_pri = 0;
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (ar->ac_stream_active[i] &&
+ (ar->ac_stream_pri_map[i] >
+ ar->hiac_stream_active_pri))
+ /*
+ * Set the new highest active
+ * priority.
+ */
+ ar->hiac_stream_active_pri =
+ ar->ac_stream_pri_map[i];
+ }
+ }
+ }
+
+ spin_unlock_bh(&ar->lock);
+
+notify_htc:
+ /* notify HTC, this may cause credit distribution changes */
+ ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
+}
+
+enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct ath6kl *ar = target->dev->ar;
+ enum htc_endpoint_id endpoint = packet->endpoint;
+
+ if (endpoint == ar->ctrl_ep) {
+ /*
+ * Under normal WMI if this is getting full, then something
+ * is running rampant the host should not be exhausting the
+ * WMI queue with too many commands the only exception to
+ * this is during testing using endpointping.
+ */
+ spin_lock_bh(&ar->lock);
+ set_bit(WMI_CTRL_EP_FULL, &ar->flag);
+ spin_unlock_bh(&ar->lock);
+ ath6kl_err("wmi ctrl ep is full\n");
+ return HTC_SEND_FULL_KEEP;
+ }
+
+ if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
+ return HTC_SEND_FULL_KEEP;
+
+ if (ar->nw_type == ADHOC_NETWORK)
+ /*
+ * In adhoc mode, we cannot differentiate traffic
+ * priorities so there is no need to continue, however we
+ * should stop the network.
+ */
+ goto stop_net_queues;
+
+ /*
+ * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
+ * the highest active stream.
+ */
+ if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
+ ar->hiac_stream_active_pri &&
+ ar->cookie_count <= MAX_HI_COOKIE_NUM)
+ /*
+ * Give preference to the highest priority stream by
+ * dropping the packets which overflowed.
+ */
+ return HTC_SEND_FULL_DROP;
+
+stop_net_queues:
+ spin_lock_bh(&ar->lock);
+ set_bit(NETQ_STOPPED, &ar->flag);
+ spin_unlock_bh(&ar->lock);
+ netif_stop_queue(ar->net_dev);
+
+ return HTC_SEND_FULL_KEEP;
+}
+
+/* TODO this needs to be looked at */
+static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+ enum htc_endpoint_id eid, u32 map_no)
+{
+ u32 i;
+
+ if (ar->nw_type != ADHOC_NETWORK)
+ return;
+
+ if (!ar->ibss_ps_enable)
+ return;
+
+ if (eid == ar->ctrl_ep)
+ return;
+
+ if (map_no == 0)
+ return;
+
+ map_no--;
+ ar->node_map[map_no].tx_pend--;
+
+ if (ar->node_map[map_no].tx_pend)
+ return;
+
+ if (map_no != (ar->node_num - 1))
+ return;
+
+ for (i = ar->node_num; i > 0; i--) {
+ if (ar->node_map[i - 1].tx_pend)
+ break;
+
+ memset(&ar->node_map[i - 1], 0,
+ sizeof(struct ath6kl_node_mapping));
+ ar->node_num--;
+ }
+}
+
+void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
+{
+ struct ath6kl *ar = context;
+ struct sk_buff_head skb_queue;
+ struct htc_packet *packet;
+ struct sk_buff *skb;
+ struct ath6kl_cookie *ath6kl_cookie;
+ u32 map_no = 0;
+ int status;
+ enum htc_endpoint_id eid;
+ bool wake_event = false;
+ bool flushing = false;
+
+ skb_queue_head_init(&skb_queue);
+
+ /* lock the driver as we update internal state */
+ spin_lock_bh(&ar->lock);
+
+ /* reap completed packets */
+ while (!list_empty(packet_queue)) {
+
+ packet = list_first_entry(packet_queue, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
+ if (!ath6kl_cookie)
+ goto fatal;
+
+ status = packet->status;
+ skb = ath6kl_cookie->skb;
+ eid = packet->endpoint;
+ map_no = ath6kl_cookie->map_no;
+
+ if (!skb || !skb->data)
+ goto fatal;
+
+ packet->buf = skb->data;
+
+ __skb_queue_tail(&skb_queue, skb);
+
+ if (!status && (packet->act_len != skb->len))
+ goto fatal;
+
+ ar->tx_pending[eid]--;
+
+ if (eid != ar->ctrl_ep)
+ ar->total_tx_data_pend--;
+
+ if (eid == ar->ctrl_ep) {
+ if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
+ clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
+
+ if (ar->tx_pending[eid] == 0)
+ wake_event = true;
+ }
+
+ if (status) {
+ if (status == -ECANCELED)
+ /* a packet was flushed */
+ flushing = true;
+
+ ar->net_stats.tx_errors++;
+
+ if (status != -ENOSPC)
+ ath6kl_err("tx error, status: 0x%x\n", status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
+ __func__, skb, packet->buf, packet->act_len,
+ eid, "error!");
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
+ __func__, skb, packet->buf, packet->act_len,
+ eid, "OK");
+
+ flushing = false;
+ ar->net_stats.tx_packets++;
+ ar->net_stats.tx_bytes += skb->len;
+ }
+
+ ath6kl_tx_clear_node_map(ar, eid, map_no);
+
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+
+ if (test_bit(NETQ_STOPPED, &ar->flag))
+ clear_bit(NETQ_STOPPED, &ar->flag);
+ }
+
+ spin_unlock_bh(&ar->lock);
+
+ __skb_queue_purge(&skb_queue);
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ if (!flushing)
+ netif_wake_queue(ar->net_dev);
+ }
+
+ if (wake_event)
+ wake_up(&ar->event_wq);
+
+ return;
+
+fatal:
+ WARN_ON(1);
+ spin_unlock_bh(&ar->lock);
+ return;
+}
+
+void ath6kl_tx_data_cleanup(struct ath6kl *ar)
+{
+ int i;
+
+ /* flush all the data (non-control) streams */
+ for (i = 0; i < WMM_NUM_AC; i++)
+ ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
+ ATH6KL_DATA_PKT_TAG);
+}
+
+/* Rx functions */
+
+static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ skb->dev = dev;
+
+ if (!(skb->dev->flags & IFF_UP)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif_rx_ni(skb);
+}
+
+static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
+{
+ struct sk_buff *skb;
+
+ while (num) {
+ skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
+ if (!skb) {
+ ath6kl_err("netbuf allocation failed\n");
+ return;
+ }
+ skb_queue_tail(q, skb);
+ num--;
+ }
+}
+
+static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
+{
+ struct sk_buff *skb = NULL;
+
+ if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
+ ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
+
+ skb = skb_dequeue(&p_aggr->free_q);
+
+ return skb;
+}
+
+void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff *skb;
+ int rx_buf;
+ int n_buf_refill;
+ struct htc_packet *packet;
+ struct list_head queue;
+
+ n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
+ ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
+
+ if (n_buf_refill <= 0)
+ return;
+
+ INIT_LIST_HEAD(&queue);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
+ "%s: providing htc with %d buffers at eid=%d\n",
+ __func__, n_buf_refill, endpoint);
+
+ for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
+ skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
+ if (!skb)
+ break;
+
+ packet = (struct htc_packet *) skb->head;
+ if (!IS_ALIGNED((unsigned long) skb->data, 4))
+ skb->data = PTR_ALIGN(skb->data - 4, 4);
+ set_htc_rxpkt_info(packet, skb, skb->data,
+ ATH6KL_BUFFER_SIZE, endpoint);
+ list_add_tail(&packet->list, &queue);
+ }
+
+ if (!list_empty(&queue))
+ ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
+}
+
+void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
+{
+ struct htc_packet *packet;
+ struct sk_buff *skb;
+
+ while (count) {
+ skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
+ if (!skb)
+ return;
+
+ packet = (struct htc_packet *) skb->head;
+ if (!IS_ALIGNED((unsigned long) skb->data, 4))
+ skb->data = PTR_ALIGN(skb->data - 4, 4);
+ set_htc_rxpkt_info(packet, skb, skb->data,
+ ATH6KL_AMSDU_BUFFER_SIZE, 0);
+ spin_lock_bh(&ar->lock);
+ list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
+ spin_unlock_bh(&ar->lock);
+ count--;
+ }
+}
+
+/*
+ * Callback to allocate a receive buffer for a pending packet. We use a
+ * pre-allocated list of buffers of maximum AMSDU size (4K).
+ */
+struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ int len)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct htc_packet *packet = NULL;
+ struct list_head *pkt_pos;
+ int refill_cnt = 0, depth = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
+ __func__, endpoint, len);
+
+ if ((len <= ATH6KL_BUFFER_SIZE) ||
+ (len > ATH6KL_AMSDU_BUFFER_SIZE))
+ return NULL;
+
+ spin_lock_bh(&ar->lock);
+
+ if (list_empty(&ar->amsdu_rx_buffer_queue)) {
+ spin_unlock_bh(&ar->lock);
+ refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
+ goto refill_buf;
+ }
+
+ packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
+ depth++;
+
+ refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
+ spin_unlock_bh(&ar->lock);
+
+ /* set actual endpoint ID */
+ packet->endpoint = endpoint;
+
+refill_buf:
+ if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
+ ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
+
+ return packet;
+}
+
+static void aggr_slice_amsdu(struct aggr_info *p_aggr,
+ struct rxtid *rxtid, struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ struct ethhdr *hdr;
+ u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
+ u8 *framep;
+
+ mac_hdr_len = sizeof(struct ethhdr);
+ framep = skb->data + mac_hdr_len;
+ amsdu_len = skb->len - mac_hdr_len;
+
+ while (amsdu_len > mac_hdr_len) {
+ hdr = (struct ethhdr *) framep;
+ payload_8023_len = ntohs(hdr->h_proto);
+
+ if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
+ payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
+ ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
+ payload_8023_len);
+ break;
+ }
+
+ frame_8023_len = payload_8023_len + mac_hdr_len;
+ new_skb = aggr_get_free_skb(p_aggr);
+ if (!new_skb) {
+ ath6kl_err("no buffer available\n");
+ break;
+ }
+
+ memcpy(new_skb->data, framep, frame_8023_len);
+ skb_put(new_skb, frame_8023_len);
+ if (ath6kl_wmi_dot3_2_dix(new_skb)) {
+ ath6kl_err("dot3_2_dix error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ skb_queue_tail(&rxtid->q, new_skb);
+
+ /* Is this the last subframe within this aggregate ? */
+ if ((amsdu_len - frame_8023_len) == 0)
+ break;
+
+ /* Add the length of A-MSDU subframe padding bytes -
+ * Round to nearest word.
+ */
+ frame_8023_len = ALIGN(frame_8023_len, 4);
+
+ framep += frame_8023_len;
+ amsdu_len -= frame_8023_len;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
+ u16 seq_no, u8 order)
+{
+ struct sk_buff *skb;
+ struct rxtid *rxtid;
+ struct skb_hold_q *node;
+ u16 idx, idx_end, seq_end;
+ struct rxtid_stats *stats;
+
+ if (!p_aggr)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+ stats = &p_aggr->stat[tid];
+
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+
+ /*
+ * idx_end is typically the last possible frame in the window,
+ * but changes to 'the' seq_no, when BAR comes. If seq_no
+ * is non-zero, we will go up to that and stop.
+ * Note: last seq no in current window will occupy the same
+ * index position as index that is just previous to start.
+ * An imp point : if win_sz is 7, for seq_no space of 4095,
+ * then, there would be holes when sequence wrap around occurs.
+ * Target should judiciously choose the win_sz, based on
+ * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
+ * 2, 4, 8, 16 win_sz works fine).
+ * We must deque from "idx" to "idx_end", including both.
+ */
+ seq_end = seq_no ? seq_no : rxtid->seq_next;
+ idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
+
+ spin_lock_bh(&rxtid->lock);
+
+ do {
+ node = &rxtid->hold_q[idx];
+ if ((order == 1) && (!node->skb))
+ break;
+
+ if (node->skb) {
+ if (node->is_amsdu)
+ aggr_slice_amsdu(p_aggr, rxtid, node->skb);
+ else
+ skb_queue_tail(&rxtid->q, node->skb);
+ node->skb = NULL;
+ } else
+ stats->num_hole++;
+
+ rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+ } while (idx != idx_end);
+
+ spin_unlock_bh(&rxtid->lock);
+
+ stats->num_delivered += skb_queue_len(&rxtid->q);
+
+ while ((skb = skb_dequeue(&rxtid->q)))
+ ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
+}
+
+static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
+ u16 seq_no,
+ bool is_amsdu, struct sk_buff *frame)
+{
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+ struct sk_buff *skb;
+ struct skb_hold_q *node;
+ u16 idx, st, cur, end;
+ bool is_queued = false;
+ u16 extended_end;
+
+ rxtid = &agg_info->rx_tid[tid];
+ stats = &agg_info->stat[tid];
+
+ stats->num_into_aggr++;
+
+ if (!rxtid->aggr) {
+ if (is_amsdu) {
+ aggr_slice_amsdu(agg_info, rxtid, frame);
+ is_queued = true;
+ stats->num_amsdu++;
+ while ((skb = skb_dequeue(&rxtid->q)))
+ ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
+ skb);
+ }
+ return is_queued;
+ }
+
+ /* Check the incoming sequence no, if it's in the window */
+ st = rxtid->seq_next;
+ cur = seq_no;
+ end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
+
+ if (((st < end) && (cur < st || cur > end)) ||
+ ((st > end) && (cur > end) && (cur < st))) {
+ extended_end = (end + rxtid->hold_q_sz - 1) &
+ ATH6KL_MAX_SEQ_NO;
+
+ if (((end < extended_end) &&
+ (cur < end || cur > extended_end)) ||
+ ((end > extended_end) && (cur > extended_end) &&
+ (cur < end))) {
+ aggr_deque_frms(agg_info, tid, 0, 0);
+ if (cur >= rxtid->hold_q_sz - 1)
+ rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
+ else
+ rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
+ (rxtid->hold_q_sz - 2 - cur);
+ } else {
+ /*
+ * Dequeue only those frames that are outside the
+ * new shifted window.
+ */
+ if (cur >= rxtid->hold_q_sz - 1)
+ st = cur - (rxtid->hold_q_sz - 1);
+ else
+ st = ATH6KL_MAX_SEQ_NO -
+ (rxtid->hold_q_sz - 2 - cur);
+
+ aggr_deque_frms(agg_info, tid, st, 0);
+ }
+
+ stats->num_oow++;
+ }
+
+ idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
+
+ node = &rxtid->hold_q[idx];
+
+ spin_lock_bh(&rxtid->lock);
+
+ /*
+ * Is the cur frame duplicate or something beyond our window(hold_q
+ * -> which is 2x, already)?
+ *
+ * 1. Duplicate is easy - drop incoming frame.
+ * 2. Not falling in current sliding window.
+ * 2a. is the frame_seq_no preceding current tid_seq_no?
+ * -> drop the frame. perhaps sender did not get our ACK.
+ * this is taken care of above.
+ * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
+ * -> Taken care of it above, by moving window forward.
+ */
+ dev_kfree_skb(node->skb);
+ stats->num_dups++;
+
+ node->skb = frame;
+ is_queued = true;
+ node->is_amsdu = is_amsdu;
+ node->seq_no = seq_no;
+
+ if (node->is_amsdu)
+ stats->num_amsdu++;
+ else
+ stats->num_mpdu++;
+
+ spin_unlock_bh(&rxtid->lock);
+
+ aggr_deque_frms(agg_info, tid, 0, 1);
+
+ if (agg_info->timer_scheduled)
+ rxtid->progress = true;
+ else
+ for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ if (rxtid->hold_q[idx].skb) {
+ /*
+ * There is a frame in the queue and no
+ * timer so start a timer to ensure that
+ * the frame doesn't remain stuck
+ * forever.
+ */
+ agg_info->timer_scheduled = true;
+ mod_timer(&agg_info->timer,
+ (jiffies +
+ HZ * (AGGR_RX_TIMEOUT) / 1000));
+ rxtid->progress = false;
+ rxtid->timer_mon = true;
+ break;
+ }
+ }
+
+ return is_queued;
+}
+
+void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff *skb = packet->pkt_cntxt;
+ struct wmi_rx_meta_v2 *meta;
+ struct wmi_data_hdr *dhdr;
+ int min_hdr_len;
+ u8 meta_type, dot11_hdr = 0;
+ int status = packet->status;
+ enum htc_endpoint_id ept = packet->endpoint;
+ bool is_amsdu, prev_ps, ps_state = false;
+ struct ath6kl_sta *conn = NULL;
+ struct sk_buff *skb1 = NULL;
+ struct ethhdr *datap = NULL;
+ u16 seq_no, offset;
+ u8 tid;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
+ "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
+ __func__, ar, ept, skb, packet->buf,
+ packet->act_len, status);
+
+ if (status || !(skb->data + HTC_HDR_LENGTH)) {
+ ar->net_stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * Take lock to protect buffer counts and adaptive power throughput
+ * state.
+ */
+ spin_lock_bh(&ar->lock);
+
+ ar->net_stats.rx_packets++;
+ ar->net_stats.rx_bytes += packet->act_len;
+
+ spin_unlock_bh(&ar->lock);
+
+ skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
+ skb->data, skb->len);
+
+ skb->dev = ar->net_dev;
+
+ if (!test_bit(WMI_ENABLED, &ar->flag)) {
+ if (EPPING_ALIGNMENT_PAD > 0)
+ skb_pull(skb, EPPING_ALIGNMENT_PAD);
+ ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ return;
+ }
+
+ if (ept == ar->ctrl_ep) {
+ ath6kl_wmi_control_rx(ar->wmi, skb);
+ return;
+ }
+
+ min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
+ sizeof(struct ath6kl_llc_snap_hdr);
+
+ dhdr = (struct wmi_data_hdr *) skb->data;
+
+ /*
+ * In the case of AP mode we may receive NULL data frames
+ * that do not have LLC hdr. They are 16 bytes in size.
+ * Allow these frames in the AP mode.
+ */
+ if (ar->nw_type != AP_NETWORK &&
+ ((packet->act_len < min_hdr_len) ||
+ (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
+ ath6kl_info("frame len is too short or too long\n");
+ ar->net_stats.rx_errors++;
+ ar->net_stats.rx_length_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* Get the Power save state of the STA */
+ if (ar->nw_type == AP_NETWORK) {
+ meta_type = wmi_data_hdr_get_meta(dhdr);
+
+ ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
+ WMI_DATA_HDR_PS_MASK);
+
+ offset = sizeof(struct wmi_data_hdr);
+
+ switch (meta_type) {
+ case 0:
+ break;
+ case WMI_META_VERSION_1:
+ offset += sizeof(struct wmi_rx_meta_v1);
+ break;
+ case WMI_META_VERSION_2:
+ offset += sizeof(struct wmi_rx_meta_v2);
+ break;
+ default:
+ break;
+ }
+
+ datap = (struct ethhdr *) (skb->data + offset);
+ conn = ath6kl_find_sta(ar, datap->h_source);
+
+ if (!conn) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * If there is a change in PS state of the STA,
+ * take appropriate steps:
+ *
+ * 1. If Sleep-->Awake, flush the psq for the STA
+ * Clear the PVB for the STA.
+ * 2. If Awake-->Sleep, Starting queueing frames
+ * the STA.
+ */
+ prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
+
+ if (ps_state)
+ conn->sta_flags |= STA_PS_SLEEP;
+ else
+ conn->sta_flags &= ~STA_PS_SLEEP;
+
+ if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
+ if (!(conn->sta_flags & STA_PS_SLEEP)) {
+ struct sk_buff *skbuff = NULL;
+
+ spin_lock_bh(&conn->psq_lock);
+ while ((skbuff = skb_dequeue(&conn->psq))
+ != NULL) {
+ spin_unlock_bh(&conn->psq_lock);
+ ath6kl_data_tx(skbuff, ar->net_dev);
+ spin_lock_bh(&conn->psq_lock);
+ }
+ spin_unlock_bh(&conn->psq_lock);
+ /* Clear the PVB for this STA */
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ }
+ }
+
+ /* drop NULL data frames here */
+ if ((packet->act_len < min_hdr_len) ||
+ (packet->act_len >
+ WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
+
+ is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
+ tid = wmi_data_hdr_get_up(dhdr);
+ seq_no = wmi_data_hdr_get_seqno(dhdr);
+ meta_type = wmi_data_hdr_get_meta(dhdr);
+ dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
+ skb_pull(skb, sizeof(struct wmi_data_hdr));
+
+ switch (meta_type) {
+ case WMI_META_VERSION_1:
+ skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
+ break;
+ case WMI_META_VERSION_2:
+ meta = (struct wmi_rx_meta_v2 *) skb->data;
+ if (meta->csum_flags & 0x1) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum) meta->csum;
+ }
+ skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
+ break;
+ default:
+ break;
+ }
+
+ if (dot11_hdr)
+ status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
+ else if (!is_amsdu)
+ status = ath6kl_wmi_dot3_2_dix(skb);
+
+ if (status) {
+ /*
+ * Drop frames that could not be processed (lack of
+ * memory, etc.)
+ */
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (!(ar->net_dev->flags & IFF_UP)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (ar->nw_type == AP_NETWORK) {
+ datap = (struct ethhdr *) skb->data;
+ if (is_multicast_ether_addr(datap->h_dest))
+ /*
+ * Bcast/Mcast frames should be sent to the
+ * OS stack as well as on the air.
+ */
+ skb1 = skb_copy(skb, GFP_ATOMIC);
+ else {
+ /*
+ * Search for a connected STA with dstMac
+ * as the Mac address. If found send the
+ * frame to it on the air else send the
+ * frame up the stack.
+ */
+ struct ath6kl_sta *conn = NULL;
+ conn = ath6kl_find_sta(ar, datap->h_dest);
+
+ if (conn && ar->intra_bss) {
+ skb1 = skb;
+ skb = NULL;
+ } else if (conn && !ar->intra_bss) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+ if (skb1)
+ ath6kl_data_tx(skb1, ar->net_dev);
+ }
+
+ datap = (struct ethhdr *) skb->data;
+
+ if (is_unicast_ether_addr(datap->h_dest) &&
+ aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+ is_amsdu, skb))
+ /* aggregation code will handle the skb */
+ return;
+
+ ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+}
+
+static void aggr_timeout(unsigned long arg)
+{
+ u8 i, j;
+ struct aggr_info *p_aggr = (struct aggr_info *) arg;
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &p_aggr->rx_tid[i];
+ stats = &p_aggr->stat[i];
+
+ if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
+ continue;
+
+ stats->num_timeouts++;
+ ath6kl_dbg(ATH6KL_DBG_AGGR,
+ "aggr timeout (st %d end %d)\n",
+ rxtid->seq_next,
+ ((rxtid->seq_next + rxtid->hold_q_sz-1) &
+ ATH6KL_MAX_SEQ_NO));
+ aggr_deque_frms(p_aggr, i, 0, 0);
+ }
+
+ p_aggr->timer_scheduled = false;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &p_aggr->rx_tid[i];
+
+ if (rxtid->aggr && rxtid->hold_q) {
+ for (j = 0; j < rxtid->hold_q_sz; j++) {
+ if (rxtid->hold_q[j].skb) {
+ p_aggr->timer_scheduled = true;
+ rxtid->timer_mon = true;
+ rxtid->progress = false;
+ break;
+ }
+ }
+
+ if (j >= rxtid->hold_q_sz)
+ rxtid->timer_mon = false;
+ }
+ }
+
+ if (p_aggr->timer_scheduled)
+ mod_timer(&p_aggr->timer,
+ jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
+}
+
+static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
+{
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+
+ if (!p_aggr || tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+ stats = &p_aggr->stat[tid];
+
+ if (rxtid->aggr)
+ aggr_deque_frms(p_aggr, tid, 0, 0);
+
+ rxtid->aggr = false;
+ rxtid->progress = false;
+ rxtid->timer_mon = false;
+ rxtid->win_sz = 0;
+ rxtid->seq_next = 0;
+ rxtid->hold_q_sz = 0;
+
+ kfree(rxtid->hold_q);
+ rxtid->hold_q = NULL;
+
+ memset(stats, 0, sizeof(struct rxtid_stats));
+}
+
+void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+{
+ struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+ u16 hold_q_size;
+
+ if (!p_aggr)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+ stats = &p_aggr->stat[tid];
+
+ if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
+ __func__, win_sz, tid);
+
+ if (rxtid->aggr)
+ aggr_delete_tid_state(p_aggr, tid);
+
+ rxtid->seq_next = seq_no;
+ hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
+ rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
+ if (!rxtid->hold_q)
+ return;
+
+ rxtid->win_sz = win_sz;
+ rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
+ if (!skb_queue_empty(&rxtid->q))
+ return;
+
+ rxtid->aggr = true;
+}
+
+struct aggr_info *aggr_init(struct net_device *dev)
+{
+ struct aggr_info *p_aggr = NULL;
+ struct rxtid *rxtid;
+ u8 i;
+
+ p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
+ if (!p_aggr) {
+ ath6kl_err("failed to alloc memory for aggr_node\n");
+ return NULL;
+ }
+
+ p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
+ p_aggr->dev = dev;
+ init_timer(&p_aggr->timer);
+ p_aggr->timer.function = aggr_timeout;
+ p_aggr->timer.data = (unsigned long) p_aggr;
+
+ p_aggr->timer_scheduled = false;
+ skb_queue_head_init(&p_aggr->free_q);
+
+ ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &p_aggr->rx_tid[i];
+ rxtid->aggr = false;
+ rxtid->progress = false;
+ rxtid->timer_mon = false;
+ skb_queue_head_init(&rxtid->q);
+ spin_lock_init(&rxtid->lock);
+ }
+
+ return p_aggr;
+}
+
+void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+{
+ struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct rxtid *rxtid;
+
+ if (!p_aggr)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+
+ if (rxtid->aggr)
+ aggr_delete_tid_state(p_aggr, tid);
+}
+
+void aggr_reset_state(struct aggr_info *aggr_info)
+{
+ u8 tid;
+
+ for (tid = 0; tid < NUM_OF_TIDS; tid++)
+ aggr_delete_tid_state(aggr_info, tid);
+}
+
+/* clean up our amsdu buffer list */
+void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
+{
+ struct htc_packet *packet, *tmp_pkt;
+
+ spin_lock_bh(&ar->lock);
+ if (list_empty(&ar->amsdu_rx_buffer_queue)) {
+ spin_unlock_bh(&ar->lock);
+ return;
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
+ list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&ar->lock);
+ dev_kfree_skb(packet->pkt_cntxt);
+ spin_lock_bh(&ar->lock);
+ }
+
+ spin_unlock_bh(&ar->lock);
+}
+
+void aggr_module_destroy(struct aggr_info *aggr_info)
+{
+ struct rxtid *rxtid;
+ u8 i, k;
+
+ if (!aggr_info)
+ return;
+
+ if (aggr_info->timer_scheduled) {
+ del_timer(&aggr_info->timer);
+ aggr_info->timer_scheduled = false;
+ }
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &aggr_info->rx_tid[i];
+ if (rxtid->hold_q) {
+ for (k = 0; k < rxtid->hold_q_sz; k++)
+ dev_kfree_skb(rxtid->hold_q[k].skb);
+ kfree(rxtid->hold_q);
+ }
+
+ skb_queue_purge(&rxtid->q);
+ }
+
+ skb_queue_purge(&aggr_info->free_q);
+ kfree(aggr_info);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
new file mode 100644
index 00000000000..a7de23cbd2c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -0,0 +1,3127 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include "core.h"
+#include "debug.h"
+#include "testmode.h"
+#include "../regd.h"
+#include "../regd_common.h"
+
+static int ath6kl_wmi_sync_point(struct wmi *wmi);
+
+static const s32 wmi_rate_tbl[][2] = {
+ /* {W/O SGI, with SGI} */
+ {1000, 1000},
+ {2000, 2000},
+ {5500, 5500},
+ {11000, 11000},
+ {6000, 6000},
+ {9000, 9000},
+ {12000, 12000},
+ {18000, 18000},
+ {24000, 24000},
+ {36000, 36000},
+ {48000, 48000},
+ {54000, 54000},
+ {6500, 7200},
+ {13000, 14400},
+ {19500, 21700},
+ {26000, 28900},
+ {39000, 43300},
+ {52000, 57800},
+ {58500, 65000},
+ {65000, 72200},
+ {13500, 15000},
+ {27000, 30000},
+ {40500, 45000},
+ {54000, 60000},
+ {81000, 90000},
+ {108000, 120000},
+ {121500, 135000},
+ {135000, 150000},
+ {0, 0}
+};
+
+/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
+static const u8 up_to_ac[] = {
+ WMM_AC_BE,
+ WMM_AC_BK,
+ WMM_AC_BK,
+ WMM_AC_BE,
+ WMM_AC_VI,
+ WMM_AC_VI,
+ WMM_AC_VO,
+ WMM_AC_VO,
+};
+
+void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
+{
+ if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
+ return;
+
+ wmi->ep_id = ep_id;
+}
+
+enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
+{
+ return wmi->ep_id;
+}
+
+/* Performs DIX to 802.3 encapsulation for transmit packets.
+ * Assumes the entire DIX header is contigous and that there is
+ * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
+ */
+int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr *eth_hdr;
+ size_t new_len;
+ __be16 type;
+ u8 *datap;
+ u16 size;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr);
+ if (skb_headroom(skb) < size)
+ return -ENOMEM;
+
+ eth_hdr = (struct ethhdr *) skb->data;
+ type = eth_hdr->h_proto;
+
+ if (!is_ethertype(be16_to_cpu(type))) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: pkt is already in 802.3 format\n", __func__);
+ return 0;
+ }
+
+ new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
+
+ skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ datap = skb->data;
+
+ eth_hdr->h_proto = cpu_to_be16(new_len);
+
+ memcpy(datap, eth_hdr, sizeof(*eth_hdr));
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr));
+ llc_hdr->dsap = 0xAA;
+ llc_hdr->ssap = 0xAA;
+ llc_hdr->cntl = 0x03;
+ llc_hdr->org_code[0] = 0x0;
+ llc_hdr->org_code[1] = 0x0;
+ llc_hdr->org_code[2] = 0x0;
+ llc_hdr->eth_type = type;
+
+ return 0;
+}
+
+static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 *version, void *tx_meta_info)
+{
+ struct wmi_tx_meta_v1 *v1;
+ struct wmi_tx_meta_v2 *v2;
+
+ if (WARN_ON(skb == NULL || version == NULL))
+ return -EINVAL;
+
+ switch (*version) {
+ case WMI_META_VERSION_1:
+ skb_push(skb, WMI_MAX_TX_META_SZ);
+ v1 = (struct wmi_tx_meta_v1 *) skb->data;
+ v1->pkt_id = 0;
+ v1->rate_plcy_id = 0;
+ *version = WMI_META_VERSION_1;
+ break;
+ case WMI_META_VERSION_2:
+ skb_push(skb, WMI_MAX_TX_META_SZ);
+ v2 = (struct wmi_tx_meta_v2 *) skb->data;
+ memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info,
+ sizeof(struct wmi_tx_meta_v2));
+ break;
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 msg_type, bool more_data,
+ enum wmi_data_hdr_data_type data_type,
+ u8 meta_ver, void *tx_meta_info)
+{
+ struct wmi_data_hdr *data_hdr;
+ int ret;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ if (tx_meta_info) {
+ ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
+ if (ret)
+ return ret;
+ }
+
+ skb_push(skb, sizeof(struct wmi_data_hdr));
+
+ data_hdr = (struct wmi_data_hdr *)skb->data;
+ memset(data_hdr, 0, sizeof(struct wmi_data_hdr));
+
+ data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
+ data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
+
+ if (more_data)
+ data_hdr->info |=
+ WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
+
+ data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
+ data_hdr->info3 = 0;
+
+ return 0;
+}
+
+static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
+{
+ struct iphdr *ip_hdr = (struct iphdr *) pkt;
+ u8 ip_pri;
+
+ /*
+ * Determine IPTOS priority
+ *
+ * IP-TOS - 8bits
+ * : DSCP(6-bits) ECN(2-bits)
+ * : DSCP - P2 P1 P0 X X X
+ * where (P2 P1 P0) form 802.1D
+ */
+ ip_pri = ip_hdr->tos >> 5;
+ ip_pri &= 0x7;
+
+ if ((layer2_pri & 0x7) > ip_pri)
+ return (u8) layer2_pri & 0x7;
+ else
+ return ip_pri;
+}
+
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+ u32 layer2_priority, bool wmm_enabled,
+ u8 *ac)
+{
+ struct wmi_data_hdr *data_hdr;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct wmi_create_pstream_cmd cmd;
+ u32 meta_size, hdr_size;
+ u16 ip_type = IP_ETHERTYPE;
+ u8 stream_exist, usr_pri;
+ u8 traffic_class = WMM_AC_BE;
+ u8 *datap;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+ data_hdr = (struct wmi_data_hdr *) datap;
+
+ meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
+ WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0;
+
+ if (!wmm_enabled) {
+ /* If WMM is disabled all traffic goes as BE traffic */
+ usr_pri = 0;
+ } else {
+ hdr_size = sizeof(struct ethhdr);
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap +
+ sizeof(struct
+ wmi_data_hdr) +
+ meta_size + hdr_size);
+
+ if (llc_hdr->eth_type == htons(ip_type)) {
+ /*
+ * Extract the endpoint info from the TOS field
+ * in the IP header.
+ */
+ usr_pri =
+ ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
+ sizeof(struct ath6kl_llc_snap_hdr),
+ layer2_priority);
+ } else
+ usr_pri = layer2_priority & 0x7;
+ }
+
+ /* workaround for WMM S5 */
+ if ((wmi->traffic_class == WMM_AC_VI) &&
+ ((usr_pri == 5) || (usr_pri == 4)))
+ usr_pri = 1;
+
+ /* Convert user priority to traffic class */
+ traffic_class = up_to_ac[usr_pri & 0x7];
+
+ wmi_data_hdr_set_up(data_hdr, usr_pri);
+
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ if (!(stream_exist & (1 << traffic_class))) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.traffic_class = traffic_class;
+ cmd.user_pri = usr_pri;
+ cmd.inactivity_int =
+ cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
+ /* Implicit streams are created with TSID 0xFF */
+ cmd.tsid = WMI_IMPLICIT_PSTREAM;
+ ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+ }
+
+ *ac = traffic_class;
+
+ return 0;
+}
+
+int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct ieee80211_hdr_3addr *pwh, wh;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr eth_hdr;
+ u32 hdr_size;
+ u8 *datap;
+ __le16 sub_type;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+ pwh = (struct ieee80211_hdr_3addr *) datap;
+
+ sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
+
+ memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr));
+
+ /* Strip off the 802.11 header */
+ if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
+ sizeof(u32));
+ skb_pull(skb, hdr_size);
+ } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+ skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+
+ datap = skb->data;
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
+
+ memset(&eth_hdr, 0, sizeof(eth_hdr));
+ eth_hdr.h_proto = llc_hdr->eth_type;
+
+ switch ((le16_to_cpu(wh.frame_control)) &
+ (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+ case 0:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_TODS:
+ memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+ break;
+ }
+
+ skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ skb_push(skb, sizeof(eth_hdr));
+
+ datap = skb->data;
+
+ memcpy(datap, &eth_hdr, sizeof(eth_hdr));
+
+ return 0;
+}
+
+/*
+ * Performs 802.3 to DIX encapsulation for received packets.
+ * Assumes the entire 802.3 header is contigous.
+ */
+int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
+{
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr eth_hdr;
+ u8 *datap;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+
+ memcpy(&eth_hdr, datap, sizeof(eth_hdr));
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr));
+ eth_hdr.h_proto = llc_hdr->eth_type;
+
+ skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ datap = skb->data;
+
+ memcpy(datap, &eth_hdr, sizeof(eth_hdr));
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
+{
+ struct tx_complete_msg_v1 *msg_v1;
+ struct wmi_tx_complete_event *evt;
+ int index;
+ u16 size;
+
+ evt = (struct wmi_tx_complete_event *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
+ evt->num_msg, evt->msg_len, evt->msg_type);
+
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_WMI))
+ return 0;
+
+ for (index = 0; index < evt->num_msg; index++) {
+ size = sizeof(struct wmi_tx_complete_event) +
+ (index * sizeof(struct tx_complete_msg_v1));
+ msg_v1 = (struct tx_complete_msg_v1 *)(datap + size);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n",
+ msg_v1->status, msg_v1->pkt_id,
+ msg_v1->rate_idx, msg_v1->ack_failures);
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_remain_on_chnl_event *ev;
+ u32 freq;
+ u32 dur;
+ struct ieee80211_channel *chan;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_remain_on_chnl_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dur = le32_to_cpu(ev->duration);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
+ freq, dur);
+ chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ if (!chan) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
+ "(freq=%u)\n", freq);
+ return -EINVAL;
+ }
+ cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT,
+ dur, GFP_ATOMIC);
+
+ return 0;
+}
+
+static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
+ u8 *datap, int len)
+{
+ struct wmi_cancel_remain_on_chnl_event *ev;
+ u32 freq;
+ u32 dur;
+ struct ieee80211_channel *chan;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_cancel_remain_on_chnl_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dur = le32_to_cpu(ev->duration);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
+ "status=%u\n", freq, dur, ev->status);
+ chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ if (!chan) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
+ "channel (freq=%u)\n", freq);
+ return -EINVAL;
+ }
+ cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan,
+ NL80211_CHAN_NO_HT, GFP_ATOMIC);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_tx_status_event *ev;
+ u32 id;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_tx_status_event *) datap;
+ id = le32_to_cpu(ev->id);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
+ id, ev->ack_status);
+ if (wmi->last_mgmt_tx_frame) {
+ cfg80211_mgmt_tx_status(ar->net_dev, id,
+ wmi->last_mgmt_tx_frame,
+ wmi->last_mgmt_tx_frame_len,
+ !!ev->ack_status, GFP_ATOMIC);
+ kfree(wmi->last_mgmt_tx_frame);
+ wmi->last_mgmt_tx_frame = NULL;
+ wmi->last_mgmt_tx_frame_len = 0;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_p2p_rx_probe_req_event *ev;
+ u32 freq;
+ u16 dlen;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_p2p_rx_probe_req_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dlen = le16_to_cpu(ev->len);
+ if (datap + len < ev->data + dlen) {
+ ath6kl_err("invalid wmi_p2p_rx_probe_req_event: "
+ "len=%d dlen=%u\n", len, dlen);
+ return -EINVAL;
+ }
+ ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
+ "probe_req_report=%d\n",
+ dlen, freq, ar->probe_req_report);
+
+ if (ar->probe_req_report || ar->nw_type == AP_NETWORK)
+ cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+
+ return 0;
+}
+
+static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
+{
+ struct wmi_p2p_capabilities_event *ev;
+ u16 dlen;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_p2p_capabilities_event *) datap;
+ dlen = le16_to_cpu(ev->len);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_capab: len=%u\n", dlen);
+
+ return 0;
+}
+
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_rx_action_event *ev;
+ u32 freq;
+ u16 dlen;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_rx_action_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dlen = le16_to_cpu(ev->len);
+ if (datap + len < ev->data + dlen) {
+ ath6kl_err("invalid wmi_rx_action_event: "
+ "len=%d dlen=%u\n", len, dlen);
+ return -EINVAL;
+ }
+ ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
+ cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+
+ return 0;
+}
+
+static int ath6kl_wmi_p2p_info_event_rx(u8 *datap, int len)
+{
+ struct wmi_p2p_info_event *ev;
+ u32 flags;
+ u16 dlen;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_p2p_info_event *) datap;
+ flags = le32_to_cpu(ev->info_req_flags);
+ dlen = le16_to_cpu(ev->len);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: flags=%x len=%d\n", flags, dlen);
+
+ if (flags & P2P_FLAG_CAPABILITIES_REQ) {
+ struct wmi_p2p_capabilities *cap;
+ if (dlen < sizeof(*cap))
+ return -EINVAL;
+ cap = (struct wmi_p2p_capabilities *) ev->data;
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: GO Power Save = %d\n",
+ cap->go_power_save);
+ }
+
+ if (flags & P2P_FLAG_MACADDR_REQ) {
+ struct wmi_p2p_macaddr *mac;
+ if (dlen < sizeof(*mac))
+ return -EINVAL;
+ mac = (struct wmi_p2p_macaddr *) ev->data;
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: MAC Address = %pM\n",
+ mac->mac_addr);
+ }
+
+ if (flags & P2P_FLAG_HMODEL_REQ) {
+ struct wmi_p2p_hmodel *mod;
+ if (dlen < sizeof(*mod))
+ return -EINVAL;
+ mod = (struct wmi_p2p_hmodel *) ev->data;
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: P2P Model = %d (%s)\n",
+ mod->p2p_model,
+ mod->p2p_model ? "host" : "firmware");
+ }
+ return 0;
+}
+
+static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
+{
+ struct sk_buff *skb;
+
+ skb = ath6kl_buf_alloc(size);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, size);
+ if (size)
+ memset(skb->data, 0, size);
+
+ return skb;
+}
+
+/* Send a "simple" wmi command -- one with no arguments */
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(0);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
+
+ if (len < sizeof(struct wmi_ready_event_2))
+ return -EINVAL;
+
+ wmi->ready = true;
+ ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
+ le32_to_cpu(ev->sw_version),
+ le32_to_cpu(ev->abi_version));
+
+ return 0;
+}
+
+/*
+ * Mechanism to modify the roaming behavior in the firmware. The lower rssi
+ * at which the station has to roam can be passed with
+ * WMI_SET_LRSSI_SCAN_PARAMS. Subtract 96 from RSSI to get the signal level
+ * in dBm.
+ */
+int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+
+ cmd->info.params.lrssi_scan_period = cpu_to_le16(DEF_LRSSI_SCAN_PERIOD);
+ cmd->info.params.lrssi_scan_threshold = a_cpu_to_sle16(lrssi +
+ DEF_SCAN_FOR_ROAM_INTVL);
+ cmd->info.params.lrssi_roam_threshold = a_cpu_to_sle16(lrssi);
+ cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
+ cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
+
+ ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG);
+
+ return 0;
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_connect_event *ev;
+ u8 *pie, *peie;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (len < sizeof(struct wmi_connect_event))
+ return -EINVAL;
+
+ ev = (struct wmi_connect_event *) datap;
+
+ if (ar->nw_type == AP_NETWORK) {
+ /* AP mode start/STA connected event */
+ struct net_device *dev = ar->net_dev;
+ if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
+ "(AP started)\n",
+ __func__, le16_to_cpu(ev->u.ap_bss.ch),
+ ev->u.ap_bss.bssid);
+ ath6kl_connect_ap_mode_bss(
+ ar, le16_to_cpu(ev->u.ap_bss.ch));
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
+ "auth=%u keymgmt=%u cipher=%u apsd_info=%u "
+ "(STA connected)\n",
+ __func__, ev->u.ap_sta.aid,
+ ev->u.ap_sta.mac_addr,
+ ev->u.ap_sta.auth,
+ ev->u.ap_sta.keymgmt,
+ le16_to_cpu(ev->u.ap_sta.cipher),
+ ev->u.ap_sta.apsd_info);
+ ath6kl_connect_ap_mode_sta(
+ ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+ ev->u.ap_sta.keymgmt,
+ le16_to_cpu(ev->u.ap_sta.cipher),
+ ev->u.ap_sta.auth, ev->assoc_req_len,
+ ev->assoc_info + ev->beacon_ie_len);
+ }
+ return 0;
+ }
+
+ /* STA/IBSS mode connection event */
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "wmi event connect freq %d bssid %pM listen_intvl %d beacon_intvl %d type %d\n",
+ le16_to_cpu(ev->u.sta.ch), ev->u.sta.bssid,
+ le16_to_cpu(ev->u.sta.listen_intvl),
+ le16_to_cpu(ev->u.sta.beacon_intvl),
+ le32_to_cpu(ev->u.sta.nw_type));
+
+ /* Start of assoc rsp IEs */
+ pie = ev->assoc_info + ev->beacon_ie_len +
+ ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */
+
+ /* End of assoc rsp IEs */
+ peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len +
+ ev->assoc_resp_len;
+
+ while (pie < peie) {
+ switch (*pie) {
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
+ pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
+ /* WMM OUT (00:50:F2) */
+ if (pie[1] > 5
+ && pie[6] == WMM_PARAM_OUI_SUBTYPE)
+ wmi->is_wmm_enabled = true;
+ }
+ break;
+ }
+
+ if (wmi->is_wmm_enabled)
+ break;
+
+ pie += pie[1] + 2;
+ }
+
+ ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch),
+ ev->u.sta.bssid,
+ le16_to_cpu(ev->u.sta.listen_intvl),
+ le16_to_cpu(ev->u.sta.beacon_intvl),
+ le32_to_cpu(ev->u.sta.nw_type),
+ ev->beacon_ie_len, ev->assoc_req_len,
+ ev->assoc_resp_len, ev->assoc_info);
+
+ return 0;
+}
+
+static struct country_code_to_enum_rd *
+ath6kl_regd_find_country(u16 countryCode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countryCode == countryCode)
+ return &allCountries[i];
+ }
+
+ return NULL;
+}
+
+static struct reg_dmn_pair_mapping *
+ath6kl_get_regpair(u16 regdmn)
+{
+ int i;
+
+ if (regdmn == NO_ENUMRD)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
+ if (regDomainPairs[i].regDmnEnum == regdmn)
+ return &regDomainPairs[i];
+ }
+
+ return NULL;
+}
+
+static struct country_code_to_enum_rd *
+ath6kl_regd_find_country_by_rd(u16 regdmn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].regDmnEnum == regdmn)
+ return &allCountries[i];
+ }
+
+ return NULL;
+}
+
+static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
+{
+
+ struct ath6kl_wmi_regdomain *ev;
+ struct country_code_to_enum_rd *country = NULL;
+ struct reg_dmn_pair_mapping *regpair = NULL;
+ char alpha2[2];
+ u32 reg_code;
+
+ ev = (struct ath6kl_wmi_regdomain *) datap;
+ reg_code = le32_to_cpu(ev->reg_code);
+
+ if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
+ country = ath6kl_regd_find_country((u16) reg_code);
+ else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
+
+ regpair = ath6kl_get_regpair((u16) reg_code);
+ country = ath6kl_regd_find_country_by_rd((u16) reg_code);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
+ regpair->regDmnEnum);
+ }
+
+ if (country) {
+ alpha2[0] = country->isoName[0];
+ alpha2[1] = country->isoName[1];
+
+ regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
+ alpha2[0], alpha2[1]);
+ }
+}
+
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_disconnect_event *ev;
+ wmi->traffic_class = 100;
+
+ if (len < sizeof(struct wmi_disconnect_event))
+ return -EINVAL;
+
+ ev = (struct wmi_disconnect_event *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "wmi event disconnect proto_reason %d bssid %pM wmi_reason %d assoc_resp_len %d\n",
+ le16_to_cpu(ev->proto_reason_status), ev->bssid,
+ ev->disconn_reason, ev->assoc_resp_len);
+
+ wmi->is_wmm_enabled = false;
+ wmi->pair_crypto_type = NONE_CRYPT;
+ wmi->grp_crypto_type = NONE_CRYPT;
+
+ ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+ ev->bssid, ev->assoc_resp_len, ev->assoc_info,
+ le16_to_cpu(ev->proto_reason_status));
+
+ return 0;
+}
+
+static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_peer_node_event *ev;
+
+ if (len < sizeof(struct wmi_peer_node_event))
+ return -EINVAL;
+
+ ev = (struct wmi_peer_node_event *) datap;
+
+ if (ev->event_code == PEER_NODE_JOIN_EVENT)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n",
+ ev->peer_mac_addr);
+ else if (ev->event_code == PEER_NODE_LEAVE_EVENT)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n",
+ ev->peer_mac_addr);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_tkip_micerr_event *ev;
+
+ if (len < sizeof(struct wmi_tkip_micerr_event))
+ return -EINVAL;
+
+ ev = (struct wmi_tkip_micerr_event *) datap;
+
+ ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+
+ return 0;
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_bss_info_hdr2 *bih;
+ u8 *buf;
+ struct ieee80211_channel *channel;
+ struct ath6kl *ar = wmi->parent_dev;
+ struct ieee80211_mgmt *mgmt;
+ struct cfg80211_bss *bss;
+
+ if (len <= sizeof(struct wmi_bss_info_hdr2))
+ return -EINVAL;
+
+ bih = (struct wmi_bss_info_hdr2 *) datap;
+ buf = datap + sizeof(struct wmi_bss_info_hdr2);
+ len -= sizeof(struct wmi_bss_info_hdr2);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "bss info evt - ch %u, snr %d, rssi %d, bssid \"%pM\" "
+ "frame_type=%d\n",
+ bih->ch, bih->snr, bih->snr - 95, bih->bssid,
+ bih->frame_type);
+
+ if (bih->frame_type != BEACON_FTYPE &&
+ bih->frame_type != PROBERESP_FTYPE)
+ return 0; /* Only update BSS table for now */
+
+ if (bih->frame_type == BEACON_FTYPE &&
+ test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ }
+
+ channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch));
+ if (channel == NULL)
+ return -EINVAL;
+
+ if (len < 8 + 2 + 2)
+ return -EINVAL;
+
+ if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) &&
+ memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) {
+ const u8 *tim;
+ tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
+ len - 8 - 2 - 2);
+ if (tim && tim[1] >= 2) {
+ ar->assoc_bss_dtim_period = tim[3];
+ set_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ }
+ }
+
+ /*
+ * In theory, use of cfg80211_inform_bss() would be more natural here
+ * since we do not have the full frame. However, at least for now,
+ * cfg80211 can only distinguish Beacon and Probe Response frames from
+ * each other when using cfg80211_inform_bss_frame(), so let's build a
+ * fake IEEE 802.11 header to be able to take benefit of this.
+ */
+ mgmt = kmalloc(24 + len, GFP_ATOMIC);
+ if (mgmt == NULL)
+ return -EINVAL;
+
+ if (bih->frame_type == BEACON_FTYPE) {
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_BEACON);
+ memset(mgmt->da, 0xff, ETH_ALEN);
+ } else {
+ struct net_device *dev = ar->net_dev;
+
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+ memcpy(mgmt->da, dev->dev_addr, ETH_ALEN);
+ }
+ mgmt->duration = cpu_to_le16(0);
+ memcpy(mgmt->sa, bih->bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, bih->bssid, ETH_ALEN);
+ mgmt->seq_ctrl = cpu_to_le16(0);
+
+ memcpy(&mgmt->u.beacon, buf, len);
+
+ bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt,
+ 24 + len, (bih->snr - 95) * 100,
+ GFP_ATOMIC);
+ kfree(mgmt);
+ if (bss == NULL)
+ return -ENOMEM;
+ cfg80211_put_bss(bss);
+
+ return 0;
+}
+
+/* Inactivity timeout of a fatpipe(pstream) at the target */
+static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_pstream_timeout_event *ev;
+
+ if (len < sizeof(struct wmi_pstream_timeout_event))
+ return -EINVAL;
+
+ ev = (struct wmi_pstream_timeout_event *) datap;
+
+ /*
+ * When the pstream (fat pipe == AC) timesout, it means there were
+ * no thinStreams within this pstream & it got implicitly created
+ * due to data flow on this AC. We start the inactivity timer only
+ * for implicitly created pstream. Just reset the host state.
+ */
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[ev->traffic_class] = 0;
+ wmi->fat_pipe_exist &= ~(1 << ev->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+
+ /* Indicate inactivity to driver layer for this fatpipe (pstream) */
+ ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false);
+
+ return 0;
+}
+
+static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_bit_rate_reply *reply;
+ s32 rate;
+ u32 sgi, index;
+
+ if (len < sizeof(struct wmi_bit_rate_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_bit_rate_reply *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
+
+ if (reply->rate_index == (s8) RATE_AUTO) {
+ rate = RATE_AUTO;
+ } else {
+ index = reply->rate_index & 0x7f;
+ sgi = (reply->rate_index & 0x80) ? 1 : 0;
+ rate = wmi_rate_tbl[index][sgi];
+ }
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tcmd_test_report_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ ath6kl_tm_rx_report_event(wmi->parent_dev, datap, len);
+
+ return 0;
+}
+
+static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_fix_rates_reply))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_channel_list_reply))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_tx_pwr_reply *reply;
+
+ if (len < sizeof(struct wmi_tx_pwr_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_tx_pwr_reply *) datap;
+ ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM);
+
+ return 0;
+}
+
+static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_get_keepalive_cmd))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_scan_complete_event *ev;
+
+ ev = (struct wmi_scan_complete_event *) datap;
+
+ ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+ wmi->is_probe_ssid = false;
+
+ return 0;
+}
+
+static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_neighbor_report_event *ev;
+ u8 i;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+ ev = (struct wmi_neighbor_report_event *) datap;
+ if (sizeof(*ev) + ev->num_neighbors * sizeof(struct wmi_neighbor_info)
+ > len) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "truncated neighbor event "
+ "(num=%d len=%d)\n", ev->num_neighbors, len);
+ return -EINVAL;
+ }
+ for (i = 0; i < ev->num_neighbors; i++) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
+ i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
+ ev->neighbor[i].bss_flags);
+ cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i,
+ ev->neighbor[i].bssid,
+ !!(ev->neighbor[i].bss_flags &
+ WMI_PREAUTH_CAPABLE_BSS),
+ GFP_ATOMIC);
+ }
+
+ return 0;
+}
+
+/*
+ * Target is reporting a programming error. This is for
+ * developer aid only. Target only checks a few common violations
+ * and it is responsibility of host to do all error checking.
+ * Behavior of target after wmi error event is undefined.
+ * A reset is recommended.
+ */
+static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ const char *type = "unknown error";
+ struct wmi_cmd_error_event *ev;
+ ev = (struct wmi_cmd_error_event *) datap;
+
+ switch (ev->err_code) {
+ case INVALID_PARAM:
+ type = "invalid parameter";
+ break;
+ case ILLEGAL_STATE:
+ type = "invalid state";
+ break;
+ case INTERNAL_ERROR:
+ type = "internal error";
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n",
+ ev->cmd_id, type);
+
+ return 0;
+}
+
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+
+ return 0;
+}
+
+static u8 ath6kl_wmi_get_upper_threshold(s16 rssi,
+ struct sq_threshold_params *sq_thresh,
+ u32 size)
+{
+ u32 index;
+ u8 threshold = (u8) sq_thresh->upper_threshold[size - 1];
+
+ /* The list is already in sorted order. Get the next lower value */
+ for (index = 0; index < size; index++) {
+ if (rssi < sq_thresh->upper_threshold[index]) {
+ threshold = (u8) sq_thresh->upper_threshold[index];
+ break;
+ }
+ }
+
+ return threshold;
+}
+
+static u8 ath6kl_wmi_get_lower_threshold(s16 rssi,
+ struct sq_threshold_params *sq_thresh,
+ u32 size)
+{
+ u32 index;
+ u8 threshold = (u8) sq_thresh->lower_threshold[size - 1];
+
+ /* The list is already in sorted order. Get the next lower value */
+ for (index = 0; index < size; index++) {
+ if (rssi > sq_thresh->lower_threshold[index]) {
+ threshold = (u8) sq_thresh->lower_threshold[index];
+ break;
+ }
+ }
+
+ return threshold;
+}
+
+static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
+ struct wmi_rssi_threshold_params_cmd *rssi_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_rssi_threshold_params_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
+ memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
+
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_rssi_threshold_event *reply;
+ struct wmi_rssi_threshold_params_cmd cmd;
+ struct sq_threshold_params *sq_thresh;
+ enum wmi_rssi_threshold_val new_threshold;
+ u8 upper_rssi_threshold, lower_rssi_threshold;
+ s16 rssi;
+ int ret;
+
+ if (len < sizeof(struct wmi_rssi_threshold_event))
+ return -EINVAL;
+
+ reply = (struct wmi_rssi_threshold_event *) datap;
+ new_threshold = (enum wmi_rssi_threshold_val) reply->range;
+ rssi = a_sle16_to_cpu(reply->rssi);
+
+ sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI];
+
+ /*
+ * Identify the threshold breached and communicate that to the app.
+ * After that install a new set of thresholds based on the signal
+ * quality reported by the target
+ */
+ if (new_threshold) {
+ /* Upper threshold breached */
+ if (rssi < sq_thresh->upper_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious upper rssi threshold event: %d\n",
+ rssi);
+ } else if ((rssi < sq_thresh->upper_threshold[1]) &&
+ (rssi >= sq_thresh->upper_threshold[0])) {
+ new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[2]) &&
+ (rssi >= sq_thresh->upper_threshold[1])) {
+ new_threshold = WMI_RSSI_THRESHOLD2_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[3]) &&
+ (rssi >= sq_thresh->upper_threshold[2])) {
+ new_threshold = WMI_RSSI_THRESHOLD3_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[4]) &&
+ (rssi >= sq_thresh->upper_threshold[3])) {
+ new_threshold = WMI_RSSI_THRESHOLD4_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[5]) &&
+ (rssi >= sq_thresh->upper_threshold[4])) {
+ new_threshold = WMI_RSSI_THRESHOLD5_ABOVE;
+ } else if (rssi >= sq_thresh->upper_threshold[5]) {
+ new_threshold = WMI_RSSI_THRESHOLD6_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (rssi > sq_thresh->lower_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious lower rssi threshold event: %d %d\n",
+ rssi, sq_thresh->lower_threshold[0]);
+ } else if ((rssi > sq_thresh->lower_threshold[1]) &&
+ (rssi <= sq_thresh->lower_threshold[0])) {
+ new_threshold = WMI_RSSI_THRESHOLD6_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[2]) &&
+ (rssi <= sq_thresh->lower_threshold[1])) {
+ new_threshold = WMI_RSSI_THRESHOLD5_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[3]) &&
+ (rssi <= sq_thresh->lower_threshold[2])) {
+ new_threshold = WMI_RSSI_THRESHOLD4_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[4]) &&
+ (rssi <= sq_thresh->lower_threshold[3])) {
+ new_threshold = WMI_RSSI_THRESHOLD3_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[5]) &&
+ (rssi <= sq_thresh->lower_threshold[4])) {
+ new_threshold = WMI_RSSI_THRESHOLD2_BELOW;
+ } else if (rssi <= sq_thresh->lower_threshold[5]) {
+ new_threshold = WMI_RSSI_THRESHOLD1_BELOW;
+ }
+ }
+
+ /* Calculate and install the next set of thresholds */
+ lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold);
+ cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold);
+ cmd.weight = sq_thresh->weight;
+ cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
+
+ ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd);
+ if (ret) {
+ ath6kl_err("unable to configure rssi thresholds\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_cac_event *reply;
+ struct ieee80211_tspec_ie *ts;
+ u16 active_tsids, tsinfo;
+ u8 tsid, index;
+ u8 ts_id;
+
+ if (len < sizeof(struct wmi_cac_event))
+ return -EINVAL;
+
+ reply = (struct wmi_cac_event *) datap;
+
+ if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
+ (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
+
+ ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
+ tsinfo = le16_to_cpu(ts->tsinfo);
+ tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
+ IEEE80211_WMM_IE_TSPEC_TID_MASK;
+
+ ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+ } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
+ /*
+ * Following assumes that there is only one outstanding
+ * ADDTS request when this event is received
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[reply->ac];
+ spin_unlock_bh(&wmi->lock);
+
+ for (index = 0; index < sizeof(active_tsids) * 8; index++) {
+ if ((active_tsids >> index) & 1)
+ break;
+ }
+ if (index < (sizeof(active_tsids) * 8))
+ ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+ }
+
+ /*
+ * Clear active tsids and Add missing handling
+ * for delete qos stream from AP
+ */
+ else if (reply->cac_indication == CAC_INDICATION_DELETE) {
+
+ ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
+ tsinfo = le16_to_cpu(ts->tsinfo);
+ ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
+ IEEE80211_WMM_IE_TSPEC_TID_MASK);
+
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
+ active_tsids = wmi->stream_exist_for_ac[reply->ac];
+ spin_unlock_bh(&wmi->lock);
+
+ /* Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!active_tsids) {
+ ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
+ false);
+ wmi->fat_pipe_exist &= ~(1 << reply->ac);
+ }
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
+ struct wmi_snr_threshold_params_cmd *snr_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_snr_threshold_params_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
+ memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
+
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_snr_threshold_event *reply;
+ struct sq_threshold_params *sq_thresh;
+ struct wmi_snr_threshold_params_cmd cmd;
+ enum wmi_snr_threshold_val new_threshold;
+ u8 upper_snr_threshold, lower_snr_threshold;
+ s16 snr;
+ int ret;
+
+ if (len < sizeof(struct wmi_snr_threshold_event))
+ return -EINVAL;
+
+ reply = (struct wmi_snr_threshold_event *) datap;
+
+ new_threshold = (enum wmi_snr_threshold_val) reply->range;
+ snr = reply->snr;
+
+ sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR];
+
+ /*
+ * Identify the threshold breached and communicate that to the app.
+ * After that install a new set of thresholds based on the signal
+ * quality reported by the target.
+ */
+ if (new_threshold) {
+ /* Upper threshold breached */
+ if (snr < sq_thresh->upper_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious upper snr threshold event: %d\n",
+ snr);
+ } else if ((snr < sq_thresh->upper_threshold[1]) &&
+ (snr >= sq_thresh->upper_threshold[0])) {
+ new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
+ } else if ((snr < sq_thresh->upper_threshold[2]) &&
+ (snr >= sq_thresh->upper_threshold[1])) {
+ new_threshold = WMI_SNR_THRESHOLD2_ABOVE;
+ } else if ((snr < sq_thresh->upper_threshold[3]) &&
+ (snr >= sq_thresh->upper_threshold[2])) {
+ new_threshold = WMI_SNR_THRESHOLD3_ABOVE;
+ } else if (snr >= sq_thresh->upper_threshold[3]) {
+ new_threshold = WMI_SNR_THRESHOLD4_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (snr > sq_thresh->lower_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious lower snr threshold event: %d\n",
+ sq_thresh->lower_threshold[0]);
+ } else if ((snr > sq_thresh->lower_threshold[1]) &&
+ (snr <= sq_thresh->lower_threshold[0])) {
+ new_threshold = WMI_SNR_THRESHOLD4_BELOW;
+ } else if ((snr > sq_thresh->lower_threshold[2]) &&
+ (snr <= sq_thresh->lower_threshold[1])) {
+ new_threshold = WMI_SNR_THRESHOLD3_BELOW;
+ } else if ((snr > sq_thresh->lower_threshold[3]) &&
+ (snr <= sq_thresh->lower_threshold[2])) {
+ new_threshold = WMI_SNR_THRESHOLD2_BELOW;
+ } else if (snr <= sq_thresh->lower_threshold[3]) {
+ new_threshold = WMI_SNR_THRESHOLD1_BELOW;
+ }
+ }
+
+ /* Calculate and install the next set of thresholds */
+ lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresh_above1_val = upper_snr_threshold;
+ cmd.thresh_below1_val = lower_snr_threshold;
+ cmd.weight = sq_thresh->weight;
+ cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "snr: %d, threshold: %d, lower: %d, upper: %d\n",
+ snr, new_threshold,
+ lower_snr_threshold, upper_snr_threshold);
+
+ ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd);
+ if (ret) {
+ ath6kl_err("unable to configure snr threshold\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ u16 ap_info_entry_size;
+ struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
+ struct wmi_ap_info_v1 *ap_info_v1;
+ u8 index;
+
+ if (len < sizeof(struct wmi_aplist_event) ||
+ ev->ap_list_ver != APLIST_VER1)
+ return -EINVAL;
+
+ ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
+ ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "number of APs in aplist event: %d\n", ev->num_ap);
+
+ if (len < (int) (sizeof(struct wmi_aplist_event) +
+ (ev->num_ap - 1) * ap_info_entry_size))
+ return -EINVAL;
+
+ /* AP list version 1 contents */
+ for (index = 0; index < ev->num_ap; index++) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n",
+ index, ap_info_v1->bssid, ap_info_v1->channel);
+ ap_info_v1++;
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum htc_endpoint_id ep_id = wmi->ep_id;
+ int ret;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
+ cmd_id, skb->len, sync_flag);
+ ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi tx ",
+ skb->data, skb->len);
+
+ if (sync_flag >= END_WMIFLAG) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if ((sync_flag == SYNC_BEFORE_WMIFLAG) ||
+ (sync_flag == SYNC_BOTH_WMIFLAG)) {
+ /*
+ * Make sure all data currently queued is transmitted before
+ * the cmd execution. Establish a new sync point.
+ */
+ ath6kl_wmi_sync_point(wmi);
+ }
+
+ skb_push(skb, sizeof(struct wmi_cmd_hdr));
+
+ cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
+ cmd_hdr->info1 = 0; /* added for virtual interface */
+
+ /* Only for OPT_TX_CMD, use BE endpoint. */
+ if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
+ ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
+ false, false, 0, NULL);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE);
+ }
+
+ ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
+
+ if ((sync_flag == SYNC_AFTER_WMIFLAG) ||
+ (sync_flag == SYNC_BOTH_WMIFLAG)) {
+ /*
+ * Make sure all new data queued waits for the command to
+ * execute. Establish a new sync point.
+ */
+ ath6kl_wmi_sync_point(wmi);
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+ enum dot11_auth_mode dot11_auth_mode,
+ enum auth_mode auth_mode,
+ enum crypto_type pairwise_crypto,
+ u8 pairwise_crypto_len,
+ enum crypto_type group_crypto,
+ u8 group_crypto_len, int ssid_len, u8 *ssid,
+ u8 *bssid, u16 channel, u32 ctrl_flags)
+{
+ struct sk_buff *skb;
+ struct wmi_connect_cmd *cc;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "wmi connect bssid %pM freq %d flags 0x%x ssid_len %d "
+ "type %d dot11_auth %d auth %d pairwise %d group %d\n",
+ bssid, channel, ctrl_flags, ssid_len, nw_type,
+ dot11_auth_mode, auth_mode, pairwise_crypto, group_crypto);
+ ath6kl_dbg_dump(ATH6KL_DBG_WMI, NULL, "ssid ", ssid, ssid_len);
+
+ wmi->traffic_class = 100;
+
+ if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT))
+ return -EINVAL;
+
+ if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cc = (struct wmi_connect_cmd *) skb->data;
+
+ if (ssid_len)
+ memcpy(cc->ssid, ssid, ssid_len);
+
+ cc->ssid_len = ssid_len;
+ cc->nw_type = nw_type;
+ cc->dot11_auth_mode = dot11_auth_mode;
+ cc->auth_mode = auth_mode;
+ cc->prwise_crypto_type = pairwise_crypto;
+ cc->prwise_crypto_len = pairwise_crypto_len;
+ cc->grp_crypto_type = group_crypto;
+ cc->grp_crypto_len = group_crypto_len;
+ cc->ch = cpu_to_le16(channel);
+ cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+
+ if (bssid != NULL)
+ memcpy(cc->bssid, bssid, ETH_ALEN);
+
+ wmi->pair_crypto_type = pairwise_crypto;
+ wmi->grp_crypto_type = group_crypto;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+{
+ struct sk_buff *skb;
+ struct wmi_reconnect_cmd *cc;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi reconnect bssid %pM freq %d\n",
+ bssid, channel);
+
+ wmi->traffic_class = 100;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cc = (struct wmi_reconnect_cmd *) skb->data;
+ cc->channel = cpu_to_le16(channel);
+
+ if (bssid != NULL)
+ memcpy(cc->bssid, bssid, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+{
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi disconnect\n");
+
+ wmi->traffic_class = 100;
+
+ /* Disconnect command does not need to do a SYNC before. */
+ ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+
+ return ret;
+}
+
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list)
+{
+ struct sk_buff *skb;
+ struct wmi_start_scan_cmd *sc;
+ s8 size;
+ int i, ret;
+
+ size = sizeof(struct wmi_start_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_start_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->num_ch = num_chan;
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+ u16 fg_end_sec, u16 bg_sec,
+ u16 minact_chdw_msec, u16 maxact_chdw_msec,
+ u16 pas_chdw_msec, u8 short_scan_ratio,
+ u8 scan_ctrl_flag, u32 max_dfsch_act_time,
+ u16 maxact_scan_per_ssid)
+{
+ struct sk_buff *skb;
+ struct wmi_scan_params_cmd *sc;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_scan_params_cmd *) skb->data;
+ sc->fg_start_period = cpu_to_le16(fg_start_sec);
+ sc->fg_end_period = cpu_to_le16(fg_end_sec);
+ sc->bg_period = cpu_to_le16(bg_sec);
+ sc->minact_chdwell_time = cpu_to_le16(minact_chdw_msec);
+ sc->maxact_chdwell_time = cpu_to_le16(maxact_chdw_msec);
+ sc->pas_chdwell_time = cpu_to_le16(pas_chdw_msec);
+ sc->short_scan_ratio = short_scan_ratio;
+ sc->scan_ctrl_flags = scan_ctrl_flag;
+ sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
+ sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+{
+ struct sk_buff *skb;
+ struct wmi_bss_filter_cmd *cmd;
+ int ret;
+
+ if (filter >= LAST_BSS_FILTER)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_filter_cmd *) skb->data;
+ cmd->bss_filter = filter;
+ cmd->ie_mask = cpu_to_le32(ie_mask);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+ u8 ssid_len, u8 *ssid)
+{
+ struct sk_buff *skb;
+ struct wmi_probed_ssid_cmd *cmd;
+ int ret;
+
+ if (index > MAX_PROBED_SSID_INDEX)
+ return -EINVAL;
+
+ if (ssid_len > sizeof(cmd->ssid))
+ return -EINVAL;
+
+ if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssid_len > 0))
+ return -EINVAL;
+
+ if ((flag & SPECIFIC_SSID_FLAG) && !ssid_len)
+ return -EINVAL;
+
+ if (flag & SPECIFIC_SSID_FLAG)
+ wmi->is_probe_ssid = true;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probed_ssid_cmd *) skb->data;
+ cmd->entry_index = index;
+ cmd->flag = flag;
+ cmd->ssid_len = ssid_len;
+ memcpy(cmd->ssid, ssid, ssid_len);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+ u16 listen_beacons)
+{
+ struct sk_buff *skb;
+ struct wmi_listen_int_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_listen_int_cmd *) skb->data;
+ cmd->listen_intvl = cpu_to_le16(listen_interval);
+ cmd->num_beacons = cpu_to_le16(listen_beacons);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_power_mode_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_power_mode_cmd *) skb->data;
+ cmd->pwr_mode = pwr_mode;
+ wmi->pwr_mode = pwr_mode;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+ u16 ps_poll_num, u16 dtim_policy,
+ u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
+ u16 ps_fail_event_policy)
+{
+ struct sk_buff *skb;
+ struct wmi_power_params_cmd *pm;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*pm));
+ if (!skb)
+ return -ENOMEM;
+
+ pm = (struct wmi_power_params_cmd *)skb->data;
+ pm->idle_period = cpu_to_le16(idle_period);
+ pm->pspoll_number = cpu_to_le16(ps_poll_num);
+ pm->dtim_policy = cpu_to_le16(dtim_policy);
+ pm->tx_wakeup_policy = cpu_to_le16(tx_wakeup_policy);
+ pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
+ pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+{
+ struct sk_buff *skb;
+ struct wmi_disc_timeout_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_disc_timeout_cmd *) skb->data;
+ cmd->discon_timeout = timeout;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+ enum crypto_type key_type,
+ u8 key_usage, u8 key_len,
+ u8 *key_rsc, u8 *key_material,
+ u8 key_op_ctrl, u8 *mac_addr,
+ enum wmi_sync_flag sync_flag)
+{
+ struct sk_buff *skb;
+ struct wmi_add_cipher_key_cmd *cmd;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "addkey cmd: key_index=%u key_type=%d "
+ "key_usage=%d key_len=%d key_op_ctrl=%d\n",
+ key_index, key_type, key_usage, key_len, key_op_ctrl);
+
+ if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
+ (key_material == NULL))
+ return -EINVAL;
+
+ if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_cipher_key_cmd *) skb->data;
+ cmd->key_index = key_index;
+ cmd->key_type = key_type;
+ cmd->key_usage = key_usage;
+ cmd->key_len = key_len;
+ memcpy(cmd->key, key_material, key_len);
+
+ if (key_rsc != NULL)
+ memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+
+ cmd->key_op_ctrl = key_op_ctrl;
+
+ if (mac_addr)
+ memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+ sync_flag);
+
+ return ret;
+}
+
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+{
+ struct sk_buff *skb;
+ struct wmi_add_krk_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_krk_cmd *) skb->data;
+ memcpy(cmd->krk, krk, WMI_KRK_LEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+{
+ struct sk_buff *skb;
+ struct wmi_delete_cipher_key_cmd *cmd;
+ int ret;
+
+ if (key_index > WMI_MAX_KEY_INDEX)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
+ cmd->key_index = key_index;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+ const u8 *pmkid, bool set)
+{
+ struct sk_buff *skb;
+ struct wmi_setpmkid_cmd *cmd;
+ int ret;
+
+ if (bssid == NULL)
+ return -EINVAL;
+
+ if (set && pmkid == NULL)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_setpmkid_cmd *) skb->data;
+ memcpy(cmd->bssid, bssid, ETH_ALEN);
+ if (set) {
+ memcpy(cmd->pmkid, pmkid, sizeof(cmd->pmkid));
+ cmd->enable = PMKID_ENABLE;
+ } else {
+ memset(cmd->pmkid, 0, sizeof(cmd->pmkid));
+ cmd->enable = PMKID_DISABLE;
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct wmi_data_hdr *data_hdr;
+ int ret;
+
+ if (WARN_ON(skb == NULL || ep_id == wmi->ep_id))
+ return -EINVAL;
+
+ skb_push(skb, sizeof(struct wmi_data_hdr));
+
+ data_hdr = (struct wmi_data_hdr *) skb->data;
+ data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
+ data_hdr->info3 = 0;
+
+ ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
+
+ return ret;
+}
+
+static int ath6kl_wmi_sync_point(struct wmi *wmi)
+{
+ struct sk_buff *skb;
+ struct wmi_sync_cmd *cmd;
+ struct wmi_data_sync_bufs data_sync_bufs[WMM_NUM_AC];
+ enum htc_endpoint_id ep_id;
+ u8 index, num_pri_streams = 0;
+ int ret = 0;
+
+ memset(data_sync_bufs, 0, sizeof(data_sync_bufs));
+
+ spin_lock_bh(&wmi->lock);
+
+ for (index = 0; index < WMM_NUM_AC; index++) {
+ if (wmi->fat_pipe_exist & (1 << index)) {
+ num_pri_streams++;
+ data_sync_bufs[num_pri_streams - 1].traffic_class =
+ index;
+ }
+ }
+
+ spin_unlock_bh(&wmi->lock);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto free_skb;
+ }
+
+ cmd = (struct wmi_sync_cmd *) skb->data;
+
+ /*
+ * In the SYNC cmd sent on the control Ep, send a bitmap
+ * of the data eps on which the Data Sync will be sent
+ */
+ cmd->data_sync_map = wmi->fat_pipe_exist;
+
+ for (index = 0; index < num_pri_streams; index++) {
+ data_sync_bufs[index].skb = ath6kl_buf_alloc(0);
+ if (data_sync_bufs[index].skb == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /*
+ * If buffer allocation for any of the dataSync fails,
+ * then do not send the Synchronize cmd on the control ep
+ */
+ if (ret)
+ goto free_skb;
+
+ /*
+ * Send sync cmd followed by sync data messages on all
+ * endpoints being used
+ */
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ if (ret)
+ goto free_skb;
+
+ /* cmd buffer sent, we no longer own it */
+ skb = NULL;
+
+ for (index = 0; index < num_pri_streams; index++) {
+
+ if (WARN_ON(!data_sync_bufs[index].skb))
+ break;
+
+ ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
+ data_sync_bufs[index].
+ traffic_class);
+ ret =
+ ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
+ ep_id);
+
+ if (ret)
+ break;
+
+ data_sync_bufs[index].skb = NULL;
+ }
+
+free_skb:
+ /* free up any resources left over (possibly due to an error) */
+ if (skb)
+ dev_kfree_skb(skb);
+
+ for (index = 0; index < num_pri_streams; index++) {
+ if (data_sync_bufs[index].skb != NULL) {
+ dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
+ skb);
+ }
+ }
+
+ return ret;
+}
+
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+ struct wmi_create_pstream_cmd *params)
+{
+ struct sk_buff *skb;
+ struct wmi_create_pstream_cmd *cmd;
+ u8 fatpipe_exist_for_ac = 0;
+ s32 min_phy = 0;
+ s32 nominal_phy = 0;
+ int ret;
+
+ if (!((params->user_pri < 8) &&
+ (params->user_pri <= 0x7) &&
+ (up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
+ (params->traffic_direc == UPLINK_TRAFFIC ||
+ params->traffic_direc == DNLINK_TRAFFIC ||
+ params->traffic_direc == BIDIR_TRAFFIC) &&
+ (params->traffic_type == TRAFFIC_TYPE_APERIODIC ||
+ params->traffic_type == TRAFFIC_TYPE_PERIODIC) &&
+ (params->voice_psc_cap == DISABLE_FOR_THIS_AC ||
+ params->voice_psc_cap == ENABLE_FOR_THIS_AC ||
+ params->voice_psc_cap == ENABLE_FOR_ALL_AC) &&
+ (params->tsid == WMI_IMPLICIT_PSTREAM ||
+ params->tsid <= WMI_MAX_THINSTREAM))) {
+ return -EINVAL;
+ }
+
+ /*
+ * Check nominal PHY rate is >= minimalPHY,
+ * so that DUT can allow TSRS IE
+ */
+
+ /* Get the physical rate (units of bps) */
+ min_phy = ((le32_to_cpu(params->min_phy_rate) / 1000) / 1000);
+
+ /* Check minimal phy < nominal phy rate */
+ if (params->nominal_phy >= min_phy) {
+ /* unit of 500 kbps */
+ nominal_phy = (params->nominal_phy * 1000) / 500;
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "TSRS IE enabled::MinPhy %x->NominalPhy ===> %x\n",
+ min_phy, nominal_phy);
+
+ params->nominal_phy = nominal_phy;
+ } else {
+ params->nominal_phy = 0;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "sending create_pstream_cmd: ac=%d tsid:%d\n",
+ params->traffic_class, params->tsid);
+
+ cmd = (struct wmi_create_pstream_cmd *) skb->data;
+ memcpy(cmd, params, sizeof(*cmd));
+
+ /* This is an implicitly created Fat pipe */
+ if ((u32) params->tsid == (u32) WMI_IMPLICIT_PSTREAM) {
+ spin_lock_bh(&wmi->lock);
+ fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
+ (1 << params->traffic_class));
+ wmi->fat_pipe_exist |= (1 << params->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+ } else {
+ /* explicitly created thin stream within a fat pipe */
+ spin_lock_bh(&wmi->lock);
+ fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
+ (1 << params->traffic_class));
+ wmi->stream_exist_for_ac[params->traffic_class] |=
+ (1 << params->tsid);
+ /*
+ * If a thinstream becomes active, the fat pipe automatically
+ * becomes active
+ */
+ wmi->fat_pipe_exist |= (1 << params->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+ }
+
+ /*
+ * Indicate activty change to driver layer only if this is the
+ * first TSID to get created in this AC explicitly or an implicit
+ * fat pipe is getting created.
+ */
+ if (!fatpipe_exist_for_ac)
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ params->traffic_class, true);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+{
+ struct sk_buff *skb;
+ struct wmi_delete_pstream_cmd *cmd;
+ u16 active_tsids = 0;
+ int ret;
+
+ if (traffic_class > 3) {
+ ath6kl_err("invalid traffic class: %d\n", traffic_class);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delete_pstream_cmd *) skb->data;
+ cmd->traffic_class = traffic_class;
+ cmd->tsid = tsid;
+
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[traffic_class];
+ spin_unlock_bh(&wmi->lock);
+
+ if (!(active_tsids & (1 << tsid))) {
+ dev_kfree_skb(skb);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "TSID %d doesn't exist for traffic class: %d\n",
+ tsid, traffic_class);
+ return -ENODATA;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
+ traffic_class, tsid);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+ SYNC_BEFORE_WMIFLAG);
+
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[traffic_class] &= ~(1 << tsid);
+ active_tsids = wmi->stream_exist_for_ac[traffic_class];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!active_tsids) {
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ traffic_class, false);
+ wmi->fat_pipe_exist &= ~(1 << traffic_class);
+ }
+
+ return ret;
+}
+
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_set_ip_cmd *cmd;
+ int ret;
+
+ /* Multicast address are not valid */
+ if ((*((u8 *) &ip_cmd->ips[0]) >= 0xE0) ||
+ (*((u8 *) &ip_cmd->ips[1]) >= 0xE0))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_ip_cmd *) skb->data;
+ memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
+ int len)
+{
+ if (len < sizeof(struct wmi_get_wow_list_reply))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
+ enum wmix_command_id cmd_id,
+ enum wmi_sync_flag sync_flag)
+{
+ struct wmix_cmd_hdr *cmd_hdr;
+ int ret;
+
+ skb_push(skb, sizeof(struct wmix_cmd_hdr));
+
+ cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
+ cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+
+ return ret;
+}
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source)
+{
+ struct sk_buff *skb;
+ struct wmix_hb_challenge_resp_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data;
+ cmd->cookie = cpu_to_le32(cookie);
+ cmd->source = cpu_to_le32(source);
+
+ ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
+{
+ struct ath6kl_wmix_dbglog_cfg_module_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath6kl_wmix_dbglog_cfg_module_cmd *) skb->data;
+ cmd->valid = cpu_to_le32(valid);
+ cmd->config = cpu_to_le32(config);
+
+ ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_DBGLOG_CFG_MODULE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+{
+ return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+}
+
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+{
+ struct sk_buff *skb;
+ struct wmi_set_tx_pwr_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
+ cmd->dbM = dbM;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+{
+ return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+}
+
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+{
+ struct sk_buff *skb;
+ struct wmi_set_lpreamble_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_lpreamble_cmd *) skb->data;
+ cmd->status = status;
+ cmd->preamble_policy = preamble_policy;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
+{
+ struct sk_buff *skb;
+ struct wmi_set_rts_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_rts_cmd *) skb->data;
+ cmd->threshold = cpu_to_le16(threshold);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wmm_txop_cmd *cmd;
+ int ret;
+
+ if (!((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
+ cmd->txop_enable = cfg;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+{
+ struct sk_buff *skb;
+ struct wmi_set_keepalive_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_keepalive_cmd *) skb->data;
+ cmd->keep_alive_intvl = keep_alive_intvl;
+ wmi->keep_alive_intvl = keep_alive_intvl;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(len);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb->data, buf, len);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+
+s32 ath6kl_wmi_get_rate(s8 rate_index)
+{
+ if (rate_index == RATE_AUTO)
+ return 0;
+
+ return wmi_rate_tbl[(u32) rate_index][0];
+}
+
+static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
+ u32 len)
+{
+ struct wmi_pmkid_list_reply *reply;
+ u32 expected_len;
+
+ if (len < sizeof(struct wmi_pmkid_list_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_pmkid_list_reply *)datap;
+ expected_len = sizeof(reply->num_pmkid) +
+ le32_to_cpu(reply->num_pmkid) * WMI_PMKID_LEN;
+
+ if (len < expected_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
+
+ aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+ le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
+
+ return 0;
+}
+
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
+
+ aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+
+ return 0;
+}
+
+/* AP mode functions */
+
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
+{
+ struct sk_buff *skb;
+ struct wmi_connect_cmd *cm;
+ int res;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cm));
+ if (!skb)
+ return -ENOMEM;
+
+ cm = (struct wmi_connect_cmd *) skb->data;
+ memcpy(cm, p, sizeof(*cm));
+
+ res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+ NO_SYNC_WMIFLAG);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
+ "ctrl_flags=0x%x-> res=%d\n",
+ __func__, p->nw_type, p->auth_mode, le16_to_cpu(p->ch),
+ le32_to_cpu(p->ctrl_flags), res);
+ return res;
+}
+
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_set_mlme_cmd *cm;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cm));
+ if (!skb)
+ return -ENOMEM;
+
+ cm = (struct wmi_ap_set_mlme_cmd *) skb->data;
+ memcpy(cm->mac, mac, ETH_ALEN);
+ cm->reason = cpu_to_le16(reason);
+ cm->cmd = cmd;
+
+ return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_pspoll_event *ev;
+
+ if (len < sizeof(struct wmi_pspoll_event))
+ return -EINVAL;
+
+ ev = (struct wmi_pspoll_event *) datap;
+
+ ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+
+ return 0;
+}
+
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ ath6kl_dtimexpiry_event(wmi->parent_dev);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_set_pvb_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_set_pvb_cmd *) skb->data;
+ cmd->aid = cpu_to_le16(aid);
+ cmd->rsvd = cpu_to_le16(0);
+ cmd->flag = cpu_to_le32(flag);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+ bool rx_dot11_hdr, bool defrag_on_host)
+{
+ struct sk_buff *skb;
+ struct wmi_rx_frame_format_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_rx_frame_format_cmd *) skb->data;
+ cmd->dot11_hdr = rx_dot11_hdr ? 1 : 0;
+ cmd->defrag_on_host = defrag_on_host ? 1 : 0;
+ cmd->meta_ver = rx_meta_ver;
+
+ /* Delete the local aggr state, on host */
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
+ u8 ie_len)
+{
+ struct sk_buff *skb;
+ struct wmi_set_appie_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "set_appie_cmd: mgmt_frm_type=%u "
+ "ie_len=%u\n", mgmt_frm_type, ie_len);
+ p = (struct wmi_set_appie_cmd *) skb->data;
+ p->mgmt_frm_type = mgmt_frm_type;
+ p->ie_len = ie_len;
+ memcpy(p->ie_info, ie, ie_len);
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
+{
+ struct sk_buff *skb;
+ struct wmi_disable_11b_rates_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "disable_11b_rates_cmd: disable=%u\n",
+ disable);
+ cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
+ cmd->disable = disable ? 1 : 0;
+
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
+{
+ struct sk_buff *skb;
+ struct wmi_remain_on_chnl_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl_cmd: freq=%u dur=%u\n",
+ freq, dur);
+ p = (struct wmi_remain_on_chnl_cmd *) skb->data;
+ p->freq = cpu_to_le32(freq);
+ p->duration = cpu_to_le32(dur);
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
+ const u8 *data, u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_send_action_cmd *p;
+ u8 *buf;
+
+ if (wait)
+ return -EINVAL; /* Offload for wait not supported */
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ kfree(wmi->last_mgmt_tx_frame);
+ wmi->last_mgmt_tx_frame = buf;
+ wmi->last_mgmt_tx_frame_len = data_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
+ "len=%u\n", id, freq, wait, data_len);
+ p = (struct wmi_send_action_cmd *) skb->data;
+ p->id = cpu_to_le32(id);
+ p->freq = cpu_to_le32(freq);
+ p->wait = cpu_to_le32(wait);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
+ const u8 *dst,
+ const u8 *data, u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_p2p_probe_response_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "send_probe_response_cmd: freq=%u dst=%pM "
+ "len=%u\n", freq, dst, data_len);
+ p = (struct wmi_p2p_probe_response_cmd *) skb->data;
+ p->freq = cpu_to_le32(freq);
+ memcpy(p->destination_addr, dst, ETH_ALEN);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
+{
+ struct sk_buff *skb;
+ struct wmi_probe_req_report_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "probe_report_req_cmd: enable=%u\n",
+ enable);
+ p = (struct wmi_probe_req_report_cmd *) skb->data;
+ p->enable = enable ? 1 : 0;
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
+{
+ struct sk_buff *skb;
+ struct wmi_get_p2p_info *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "info_req_cmd: flags=%x\n",
+ info_req_flags);
+ p = (struct wmi_get_p2p_info *) skb->data;
+ p->info_req_flags = cpu_to_le32(info_req_flags);
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi)
+{
+ ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
+ return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+}
+
+static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmix_cmd_hdr *cmd;
+ u32 len;
+ u16 id;
+ u8 *datap;
+ int ret = 0;
+
+ if (skb->len < sizeof(struct wmix_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ wmi->stat.cmd_len_err++;
+ return -EINVAL;
+ }
+
+ cmd = (struct wmix_cmd_hdr *) skb->data;
+ id = le32_to_cpu(cmd->cmd_id);
+
+ skb_pull(skb, sizeof(struct wmix_cmd_hdr));
+
+ datap = skb->data;
+ len = skb->len;
+
+ switch (id) {
+ case WMIX_HB_CHALLENGE_RESP_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n");
+ break;
+ case WMIX_DBGLOG_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len);
+ ath6kl_debug_fwlog_event(wmi->parent_dev, datap, len);
+ break;
+ default:
+ ath6kl_warn("unknown cmd id 0x%x\n", id);
+ wmi->stat.cmd_id_err++;
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Control Path */
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd;
+ u32 len;
+ u16 id;
+ u8 *datap;
+ int ret = 0;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ if (skb->len < sizeof(struct wmi_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ dev_kfree_skb(skb);
+ wmi->stat.cmd_len_err++;
+ return -EINVAL;
+ }
+
+ cmd = (struct wmi_cmd_hdr *) skb->data;
+ id = le16_to_cpu(cmd->cmd_id);
+
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ datap = skb->data;
+ len = skb->len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi rx id %d len %d\n", id, len);
+ ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
+ datap, len);
+
+ switch (id) {
+ case WMI_GET_BITRATE_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
+ ret = ath6kl_wmi_bitrate_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_CHANNEL_LIST_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_CHANNEL_LIST_CMDID\n");
+ ret = ath6kl_wmi_ch_list_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_TX_PWR_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_TX_PWR_CMDID\n");
+ ret = ath6kl_wmi_tx_pwr_reply_rx(wmi, datap, len);
+ break;
+ case WMI_READY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
+ ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
+ break;
+ case WMI_CONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
+ ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+ break;
+ case WMI_DISCONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
+ ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+ break;
+ case WMI_PEER_NODE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
+ ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
+ break;
+ case WMI_TKIP_MICERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
+ ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+ break;
+ case WMI_BSSINFO_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
+ ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len);
+ break;
+ case WMI_REGDOMAIN_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
+ ath6kl_wmi_regdomain_event(wmi, datap, len);
+ break;
+ case WMI_PSTREAM_TIMEOUT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
+ ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
+ break;
+ case WMI_NEIGHBOR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
+ ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len);
+ break;
+ case WMI_SCAN_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
+ ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+ break;
+ case WMI_CMDERROR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
+ ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
+ break;
+ case WMI_REPORT_STATISTICS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
+ ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+ break;
+ case WMI_RSSI_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
+ ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
+ break;
+ case WMI_ERROR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ERROR_REPORT_EVENTID\n");
+ break;
+ case WMI_OPT_RX_FRAME_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_OPT_RX_FRAME_EVENTID\n");
+ /* this event has been deprecated */
+ break;
+ case WMI_REPORT_ROAM_TBL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+ break;
+ case WMI_EXTENSION_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
+ ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
+ break;
+ case WMI_CAC_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
+ ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+ break;
+ case WMI_CHANNEL_CHANGE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
+ break;
+ case WMI_REPORT_ROAM_DATA_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_DATA_EVENTID\n");
+ break;
+ case WMI_TEST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TEST_EVENTID\n");
+ ret = ath6kl_wmi_tcmd_test_report_rx(wmi, datap, len);
+ break;
+ case WMI_GET_FIXRATES_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
+ ret = ath6kl_wmi_ratemask_reply_rx(wmi, datap, len);
+ break;
+ case WMI_TX_RETRY_ERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_RETRY_ERR_EVENTID\n");
+ break;
+ case WMI_SNR_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SNR_THRESHOLD_EVENTID\n");
+ ret = ath6kl_wmi_snr_threshold_event_rx(wmi, datap, len);
+ break;
+ case WMI_LQ_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_LQ_THRESHOLD_EVENTID\n");
+ break;
+ case WMI_APLIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_APLIST_EVENTID\n");
+ ret = ath6kl_wmi_aplist_event_rx(wmi, datap, len);
+ break;
+ case WMI_GET_KEEPALIVE_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_KEEPALIVE_CMDID\n");
+ ret = ath6kl_wmi_keepalive_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_WOW_LIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
+ ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
+ break;
+ case WMI_GET_PMKID_LIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
+ ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
+ break;
+ case WMI_PSPOLL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
+ ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+ break;
+ case WMI_DTIMEXPIRY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
+ ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+ break;
+ case WMI_SET_PARAMS_REPLY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
+ break;
+ case WMI_ADDBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
+ ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+ break;
+ case WMI_ADDBA_RESP_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
+ break;
+ case WMI_DELBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
+ ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+ break;
+ case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
+ break;
+ case WMI_REPORT_BTCOEX_STATS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_REPORT_BTCOEX_STATS_EVENTID\n");
+ break;
+ case WMI_TX_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
+ ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
+ break;
+ case WMI_REMAIN_ON_CHNL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
+ ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len);
+ break;
+ case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
+ ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
+ len);
+ break;
+ case WMI_TX_STATUS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
+ ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len);
+ break;
+ case WMI_RX_PROBE_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
+ ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len);
+ break;
+ case WMI_P2P_CAPABILITIES_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
+ ret = ath6kl_wmi_p2p_capabilities_event_rx(datap, len);
+ break;
+ case WMI_RX_ACTION_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
+ ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len);
+ break;
+ case WMI_P2P_INFO_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
+ ret = ath6kl_wmi_p2p_info_event_rx(datap, len);
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
+ wmi->stat.cmd_id_err++;
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+{
+ if (!wmi)
+ return;
+
+ spin_lock_bh(&wmi->lock);
+
+ wmi->fat_pipe_exist = 0;
+ memset(wmi->stream_exist_for_ac, 0, sizeof(wmi->stream_exist_for_ac));
+
+ spin_unlock_bh(&wmi->lock);
+}
+
+void *ath6kl_wmi_init(struct ath6kl *dev)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ spin_lock_init(&wmi->lock);
+
+ wmi->parent_dev = dev;
+
+ ath6kl_wmi_qos_state_init(wmi);
+
+ wmi->pwr_mode = REC_POWER;
+ wmi->phy_mode = WMI_11G_MODE;
+
+ wmi->pair_crypto_type = NONE_CRYPT;
+ wmi->grp_crypto_type = NONE_CRYPT;
+
+ wmi->ht_allowed[A_BAND_24GHZ] = 1;
+ wmi->ht_allowed[A_BAND_5GHZ] = 1;
+
+ return wmi;
+}
+
+void ath6kl_wmi_shutdown(struct wmi *wmi)
+{
+ if (!wmi)
+ return;
+
+ kfree(wmi->last_mgmt_tx_frame);
+ kfree(wmi);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
new file mode 100644
index 00000000000..f8e644d54aa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -0,0 +1,2282 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI). It includes definitions of all the
+ * commands and events. Commands are messages from the host to the WM.
+ * Events and Replies are messages from the WM to the host.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+#include <linux/ieee80211.h>
+
+#include "htc.h"
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+#define WMI_CONTROL_MSG_MAX_LEN 256
+#define is_ethertype(type_or_len) ((type_or_len) >= 0x0600)
+
+#define IP_ETHERTYPE 0x0800
+
+#define WMI_IMPLICIT_PSTREAM 0xFF
+#define WMI_MAX_THINSTREAM 15
+
+#define SSID_IE_LEN_INDEX 13
+
+/* Host side link management data structures */
+#define SIG_QUALITY_THRESH_LVLS 6
+#define SIG_QUALITY_UPPER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
+#define SIG_QUALITY_LOWER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
+
+#define A_BAND_24GHZ 0
+#define A_BAND_5GHZ 1
+#define A_NUM_BANDS 2
+
+/* in ms */
+#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
+
+/*
+ * There are no signed versions of __le16 and __le32, so for a temporary
+ * solution come up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s16 __bitwise a_sle16;
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32) cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32) val);
+}
+
+static inline a_sle16 a_cpu_to_sle16(s16 val)
+{
+ return (__force a_sle16) cpu_to_le16(val);
+}
+
+static inline s16 a_sle16_to_cpu(a_sle16 val)
+{
+ return le16_to_cpu((__force __le16) val);
+}
+
+struct sq_threshold_params {
+ s16 upper_threshold[SIG_QUALITY_UPPER_THRESH_LVLS];
+ s16 lower_threshold[SIG_QUALITY_LOWER_THRESH_LVLS];
+ u32 upper_threshold_valid_count;
+ u32 lower_threshold_valid_count;
+ u32 polling_interval;
+ u8 weight;
+ u8 last_rssi;
+ u8 last_rssi_poll_event;
+};
+
+struct wmi_stats {
+ u32 cmd_len_err;
+ u32 cmd_id_err;
+};
+
+struct wmi_data_sync_bufs {
+ u8 traffic_class;
+ struct sk_buff *skb;
+};
+
+/* WMM stream classes */
+#define WMM_NUM_AC 4
+#define WMM_AC_BE 0 /* best effort */
+#define WMM_AC_BK 1 /* background */
+#define WMM_AC_VI 2 /* video */
+#define WMM_AC_VO 3 /* voice */
+
+struct wmi {
+ bool ready;
+ u16 stream_exist_for_ac[WMM_NUM_AC];
+ u8 fat_pipe_exist;
+ struct ath6kl *parent_dev;
+ struct wmi_stats stat;
+ u8 pwr_mode;
+ u8 phy_mode;
+ u8 keep_alive_intvl;
+ spinlock_t lock;
+ enum htc_endpoint_id ep_id;
+ struct sq_threshold_params
+ sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
+ enum crypto_type pair_crypto_type;
+ enum crypto_type grp_crypto_type;
+ bool is_wmm_enabled;
+ u8 ht_allowed[A_NUM_BANDS];
+ u8 traffic_class;
+ bool is_probe_ssid;
+
+ u8 *last_mgmt_tx_frame;
+ size_t last_mgmt_tx_frame_len;
+};
+
+struct host_app_area {
+ u32 wmi_protocol_ver;
+};
+
+enum wmi_msg_type {
+ DATA_MSGTYPE = 0x0,
+ CNTL_MSGTYPE,
+ SYNC_MSGTYPE,
+ OPT_MSGTYPE,
+};
+
+/*
+ * Macros for operating on WMI_DATA_HDR (info) field
+ */
+
+#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
+#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
+#define WMI_DATA_HDR_UP_MASK 0x07
+#define WMI_DATA_HDR_UP_SHIFT 2
+
+/* In AP mode, the same bit (b5) is used to indicate Power save state in
+ * the Rx dir and More data bit state in the tx direction.
+ */
+#define WMI_DATA_HDR_PS_MASK 0x1
+#define WMI_DATA_HDR_PS_SHIFT 5
+
+#define WMI_DATA_HDR_MORE_MASK 0x1
+#define WMI_DATA_HDR_MORE_SHIFT 5
+
+enum wmi_data_hdr_data_type {
+ WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
+ WMI_DATA_HDR_DATA_TYPE_802_11,
+
+ /* used to be used for the PAL */
+ WMI_DATA_HDR_DATA_TYPE_ACL,
+};
+
+#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
+#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
+
+/* Macros for operating on WMI_DATA_HDR (info2) field */
+#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
+#define WMI_DATA_HDR_SEQNO_SHIFT 0
+
+#define WMI_DATA_HDR_AMSDU_MASK 0x1
+#define WMI_DATA_HDR_AMSDU_SHIFT 12
+
+#define WMI_DATA_HDR_META_MASK 0x7
+#define WMI_DATA_HDR_META_SHIFT 13
+
+struct wmi_data_hdr {
+ s8 rssi;
+
+ /*
+ * usage of 'info' field(8-bit):
+ *
+ * b1:b0 - WMI_MSG_TYPE
+ * b4:b3:b2 - UP(tid)
+ * b5 - Used in AP mode.
+ * More-data in tx dir, PS in rx.
+ * b7:b6 - Dot3 header(0),
+ * Dot11 Header(1),
+ * ACL data(2)
+ */
+ u8 info;
+
+ /*
+ * usage of 'info2' field(16-bit):
+ *
+ * b11:b0 - seq_no
+ * b12 - A-MSDU?
+ * b15:b13 - META_DATA_VERSION 0 - 7
+ */
+ __le16 info2;
+ __le16 info3;
+} __packed;
+
+static inline u8 wmi_data_hdr_get_up(struct wmi_data_hdr *dhdr)
+{
+ return (dhdr->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK;
+}
+
+static inline void wmi_data_hdr_set_up(struct wmi_data_hdr *dhdr,
+ u8 usr_pri)
+{
+ dhdr->info &= ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT);
+ dhdr->info |= usr_pri << WMI_DATA_HDR_UP_SHIFT;
+}
+
+static inline u8 wmi_data_hdr_get_dot11(struct wmi_data_hdr *dhdr)
+{
+ u8 data_type;
+
+ data_type = (dhdr->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) &
+ WMI_DATA_HDR_DATA_TYPE_MASK;
+ return (data_type == WMI_DATA_HDR_DATA_TYPE_802_11);
+}
+
+static inline u16 wmi_data_hdr_get_seqno(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_SEQNO_SHIFT) &
+ WMI_DATA_HDR_SEQNO_MASK;
+}
+
+static inline u8 wmi_data_hdr_is_amsdu(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_AMSDU_SHIFT) &
+ WMI_DATA_HDR_AMSDU_MASK;
+}
+
+static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
+ WMI_DATA_HDR_META_MASK;
+}
+
+/* Tx meta version definitions */
+#define WMI_MAX_TX_META_SZ 12
+#define WMI_META_VERSION_1 0x01
+#define WMI_META_VERSION_2 0x02
+
+struct wmi_tx_meta_v1 {
+ /* packet ID to identify the tx request */
+ u8 pkt_id;
+
+ /* rate policy to be used for the tx of this frame */
+ u8 rate_plcy_id;
+} __packed;
+
+struct wmi_tx_meta_v2 {
+ /*
+ * Offset from start of the WMI header for csum calculation to
+ * begin.
+ */
+ u8 csum_start;
+
+ /* offset from start of WMI header where final csum goes */
+ u8 csum_dest;
+
+ /* no of bytes over which csum is calculated */
+ u8 csum_flags;
+} __packed;
+
+struct wmi_rx_meta_v1 {
+ u8 status;
+
+ /* rate index mapped to rate at which this packet was received. */
+ u8 rix;
+
+ /* rssi of packet */
+ u8 rssi;
+
+ /* rf channel during packet reception */
+ u8 channel;
+
+ __le16 flags;
+} __packed;
+
+struct wmi_rx_meta_v2 {
+ __le16 csum;
+
+ /* bit 0 set -partial csum valid bit 1 set -test mode */
+ u8 csum_flags;
+} __packed;
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le16 cmd_id;
+
+ /* info1 - 16 bits
+ * b03:b00 - id
+ * b15:b04 - unused */
+ __le16 info1;
+
+ /* for alignment */
+ __le16 reserved;
+} __packed;
+
+/* List of WMI commands */
+enum wmi_cmd_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_RECONNECT_CMDID,
+ WMI_DISCONNECT_CMDID,
+ WMI_SYNCHRONIZE_CMDID,
+ WMI_CREATE_PSTREAM_CMDID,
+ WMI_DELETE_PSTREAM_CMDID,
+ WMI_START_SCAN_CMDID,
+ WMI_SET_SCAN_PARAMS_CMDID,
+ WMI_SET_BSS_FILTER_CMDID,
+ WMI_SET_PROBED_SSID_CMDID, /* 10 */
+ WMI_SET_LISTEN_INT_CMDID,
+ WMI_SET_BMISS_TIME_CMDID,
+ WMI_SET_DISC_TIMEOUT_CMDID,
+ WMI_GET_CHANNEL_LIST_CMDID,
+ WMI_SET_BEACON_INT_CMDID,
+ WMI_GET_STATISTICS_CMDID,
+ WMI_SET_CHANNEL_PARAMS_CMDID,
+ WMI_SET_POWER_MODE_CMDID,
+ WMI_SET_IBSS_PM_CAPS_CMDID,
+ WMI_SET_POWER_PARAMS_CMDID, /* 20 */
+ WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
+ WMI_ADD_CIPHER_KEY_CMDID,
+ WMI_DELETE_CIPHER_KEY_CMDID,
+ WMI_ADD_KRK_CMDID,
+ WMI_DELETE_KRK_CMDID,
+ WMI_SET_PMKID_CMDID,
+ WMI_SET_TX_PWR_CMDID,
+ WMI_GET_TX_PWR_CMDID,
+ WMI_SET_ASSOC_INFO_CMDID,
+ WMI_ADD_BAD_AP_CMDID, /* 30 */
+ WMI_DELETE_BAD_AP_CMDID,
+ WMI_SET_TKIP_COUNTERMEASURES_CMDID,
+ WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
+ WMI_SET_ACCESS_PARAMS_CMDID,
+ WMI_SET_RETRY_LIMITS_CMDID,
+ WMI_SET_OPT_MODE_CMDID,
+ WMI_OPT_TX_FRAME_CMDID,
+ WMI_SET_VOICE_PKT_SIZE_CMDID,
+ WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
+ WMI_SET_ROAM_CTRL_CMDID,
+ WMI_GET_ROAM_TBL_CMDID,
+ WMI_GET_ROAM_DATA_CMDID,
+ WMI_ENABLE_RM_CMDID,
+ WMI_SET_MAX_OFFHOME_DURATION_CMDID,
+ WMI_EXTENSION_CMDID, /* Non-wireless extensions */
+ WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ WMI_LQ_THRESHOLD_PARAMS_CMDID,
+ WMI_SET_LPREAMBLE_CMDID,
+ WMI_SET_RTS_CMDID, /* 50 */
+ WMI_CLR_RSSI_SNR_CMDID,
+ WMI_SET_FIXRATES_CMDID,
+ WMI_GET_FIXRATES_CMDID,
+ WMI_SET_AUTH_MODE_CMDID,
+ WMI_SET_REASSOC_MODE_CMDID,
+ WMI_SET_WMM_CMDID,
+ WMI_SET_WMM_TXOP_CMDID,
+ WMI_TEST_CMDID,
+
+ /* COEX AR6002 only */
+ WMI_SET_BT_STATUS_CMDID,
+ WMI_SET_BT_PARAMS_CMDID, /* 60 */
+
+ WMI_SET_KEEPALIVE_CMDID,
+ WMI_GET_KEEPALIVE_CMDID,
+ WMI_SET_APPIE_CMDID,
+ WMI_GET_APPIE_CMDID,
+ WMI_SET_WSC_STATUS_CMDID,
+
+ /* Wake on Wireless */
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ WMI_SET_WOW_MODE_CMDID,
+ WMI_GET_WOW_LIST_CMDID,
+ WMI_ADD_WOW_PATTERN_CMDID,
+ WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
+
+ WMI_SET_FRAMERATES_CMDID,
+ WMI_SET_AP_PS_CMDID,
+ WMI_SET_QOS_SUPP_CMDID,
+
+ /* WMI_THIN_RESERVED_... mark the start and end
+ * values for WMI_THIN_RESERVED command IDs. These
+ * command IDs can be found in wmi_thin.h */
+ WMI_THIN_RESERVED_START = 0x8000,
+ WMI_THIN_RESERVED_END = 0x8fff,
+
+ /* Developer commands starts at 0xF000 */
+ WMI_SET_BITRATE_CMDID = 0xF000,
+ WMI_GET_BITRATE_CMDID,
+ WMI_SET_WHALPARAM_CMDID,
+ WMI_SET_MAC_ADDRESS_CMDID,
+ WMI_SET_AKMP_PARAMS_CMDID,
+ WMI_SET_PMKID_LIST_CMDID,
+ WMI_GET_PMKID_LIST_CMDID,
+ WMI_ABORT_SCAN_CMDID,
+ WMI_SET_TARGET_EVENT_REPORT_CMDID,
+
+ /* Unused */
+ WMI_UNUSED1,
+ WMI_UNUSED2,
+
+ /* AP mode commands */
+ WMI_AP_HIDDEN_SSID_CMDID,
+ WMI_AP_SET_NUM_STA_CMDID,
+ WMI_AP_ACL_POLICY_CMDID,
+ WMI_AP_ACL_MAC_LIST_CMDID,
+ WMI_AP_CONFIG_COMMIT_CMDID,
+ WMI_AP_SET_MLME_CMDID,
+ WMI_AP_SET_PVB_CMDID,
+ WMI_AP_CONN_INACT_CMDID,
+ WMI_AP_PROT_SCAN_TIME_CMDID,
+ WMI_AP_SET_COUNTRY_CMDID,
+ WMI_AP_SET_DTIM_CMDID,
+ WMI_AP_MODE_STAT_CMDID,
+
+ WMI_SET_IP_CMDID,
+ WMI_SET_PARAMS_CMDID,
+ WMI_SET_MCAST_FILTER_CMDID,
+ WMI_DEL_MCAST_FILTER_CMDID,
+
+ WMI_ALLOW_AGGR_CMDID,
+ WMI_ADDBA_REQ_CMDID,
+ WMI_DELBA_REQ_CMDID,
+ WMI_SET_HT_CAP_CMDID,
+ WMI_SET_HT_OP_CMDID,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ WMI_SET_TX_SGI_PARAM_CMDID,
+ WMI_SET_RATE_POLICY_CMDID,
+
+ WMI_HCI_CMD_CMDID,
+ WMI_RX_FRAME_FORMAT_CMDID,
+ WMI_SET_THIN_MODE_CMDID,
+ WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
+
+ WMI_AP_SET_11BG_RATESET_CMDID,
+ WMI_SET_PMK_CMDID,
+ WMI_MCAST_FILTER_CMDID,
+
+ /* COEX CMDID AR6003 */
+ WMI_SET_BTCOEX_FE_ANT_CMDID,
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
+ WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
+ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
+ WMI_SET_BTCOEX_DEBUG_CMDID,
+ WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
+ WMI_GET_BTCOEX_STATS_CMDID,
+ WMI_GET_BTCOEX_CONFIG_CMDID,
+
+ WMI_SET_DFS_ENABLE_CMDID, /* F034 */
+ WMI_SET_DFS_MINRSSITHRESH_CMDID,
+ WMI_SET_DFS_MAXPULSEDUR_CMDID,
+ WMI_DFS_RADAR_DETECTED_CMDID,
+
+ /* P2P commands */
+ WMI_P2P_SET_CONFIG_CMDID, /* F038 */
+ WMI_WPS_SET_CONFIG_CMDID,
+ WMI_SET_REQ_DEV_ATTR_CMDID,
+ WMI_P2P_FIND_CMDID,
+ WMI_P2P_STOP_FIND_CMDID,
+ WMI_P2P_GO_NEG_START_CMDID,
+ WMI_P2P_LISTEN_CMDID,
+
+ WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */
+ WMI_SET_PROMISCUOUS_MODE_CMDID,
+ WMI_RX_FRAME_FILTER_CMDID,
+ WMI_SET_CHANNEL_CMDID,
+
+ /* WAC commands */
+ WMI_ENABLE_WAC_CMDID,
+ WMI_WAC_SCAN_REPLY_CMDID,
+ WMI_WAC_CTRL_REQ_CMDID,
+ WMI_SET_DIV_PARAMS_CMDID,
+
+ WMI_GET_PMK_CMDID,
+ WMI_SET_PASSPHRASE_CMDID,
+ WMI_SEND_ASSOC_RES_CMDID,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID,
+
+ /* ACS command, consists of sub-commands */
+ WMI_ACS_CTRL_CMDID,
+ WMI_SET_EXCESS_TX_RETRY_THRES_CMDID,
+ WMI_SET_TBD_TIME_CMDID, /*added for wmiconfig command for TBD */
+
+ /* Pktlog cmds */
+ WMI_PKTLOG_ENABLE_CMDID,
+ WMI_PKTLOG_DISABLE_CMDID,
+
+ /* More P2P Cmds */
+ WMI_P2P_GO_NEG_REQ_RSP_CMDID,
+ WMI_P2P_GRP_INIT_CMDID,
+ WMI_P2P_GRP_FORMATION_DONE_CMDID,
+ WMI_P2P_INVITE_CMDID,
+ WMI_P2P_INVITE_REQ_RSP_CMDID,
+ WMI_P2P_PROV_DISC_REQ_CMDID,
+ WMI_P2P_SET_CMDID,
+
+ WMI_GET_RFKILL_MODE_CMDID,
+ WMI_SET_RFKILL_MODE_CMDID,
+ WMI_AP_SET_APSD_CMDID,
+ WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID,
+
+ WMI_P2P_SDPD_TX_CMDID, /* F05C */
+ WMI_P2P_STOP_SDPD_CMDID,
+ WMI_P2P_CANCEL_CMDID,
+ /* Ultra low power store / recall commands */
+ WMI_STORERECALL_CONFIGURE_CMDID,
+ WMI_STORERECALL_RECALL_CMDID,
+ WMI_STORERECALL_HOST_READY_CMDID,
+ WMI_FORCE_TARGET_ASSERT_CMDID,
+
+ WMI_SET_PROBED_SSID_EX_CMDID,
+ WMI_SET_NETWORK_LIST_OFFLOAD_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ WMI_ADD_WOW_EXT_PATTERN_CMDID,
+ WMI_GTK_OFFLOAD_OP_CMDID,
+ WMI_REMAIN_ON_CHNL_CMDID,
+ WMI_CANCEL_REMAIN_ON_CHNL_CMDID,
+ WMI_SEND_ACTION_CMDID,
+ WMI_PROBE_REQ_REPORT_CMDID,
+ WMI_DISABLE_11B_RATES_CMDID,
+ WMI_SEND_PROBE_RESPONSE_CMDID,
+ WMI_GET_P2P_INFO_CMDID,
+ WMI_AP_JOIN_BSS_CMDID,
+};
+
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ,
+ WMI_FRAME_PROBE_RESP,
+ WMI_FRAME_ASSOC_REQ,
+ WMI_FRAME_ASSOC_RESP,
+ WMI_NUM_MGMT_FRAME
+};
+
+/* WMI_CONNECT_CMDID */
+enum network_type {
+ INFRA_NETWORK = 0x01,
+ ADHOC_NETWORK = 0x02,
+ ADHOC_CREATOR = 0x04,
+ AP_NETWORK = 0x10,
+};
+
+enum dot11_auth_mode {
+ OPEN_AUTH = 0x01,
+ SHARED_AUTH = 0x02,
+
+ /* different from IEEE_AUTH_MODE definitions */
+ LEAP_AUTH = 0x04,
+};
+
+enum auth_mode {
+ NONE_AUTH = 0x01,
+ WPA_AUTH = 0x02,
+ WPA2_AUTH = 0x04,
+ WPA_PSK_AUTH = 0x08,
+ WPA2_PSK_AUTH = 0x10,
+ WPA_AUTH_CCKM = 0x20,
+ WPA2_AUTH_CCKM = 0x40,
+};
+
+#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
+#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
+
+#define WMI_MIN_KEY_INDEX 0
+#define WMI_MAX_KEY_INDEX 3
+
+#define WMI_MAX_KEY_LEN 32
+
+/*
+ * NB: these values are ordered carefully; there are lots of
+ * of implications in any reordering. In particular beware
+ * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
+ */
+#define ATH6KL_CIPHER_WEP 0
+#define ATH6KL_CIPHER_TKIP 1
+#define ATH6KL_CIPHER_AES_OCB 2
+#define ATH6KL_CIPHER_AES_CCM 3
+#define ATH6KL_CIPHER_CKIP 5
+#define ATH6KL_CIPHER_CCKM_KRK 6
+#define ATH6KL_CIPHER_NONE 7 /* pseudo value */
+
+/*
+ * 802.11 rate set.
+ */
+#define ATH6KL_RATE_MAXSIZE 15 /* max rates we'll handle */
+
+#define ATH_OUI_TYPE 0x01
+#define WPA_OUI_TYPE 0x01
+#define WMM_PARAM_OUI_SUBTYPE 0x01
+#define WMM_OUI_TYPE 0x02
+#define WSC_OUT_TYPE 0x04
+
+enum wmi_connect_ctrl_flags_bits {
+ CONNECT_ASSOC_POLICY_USER = 0x0001,
+ CONNECT_SEND_REASSOC = 0x0002,
+ CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+struct wmi_connect_cmd {
+ u8 nw_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 prwise_crypto_type;
+ u8 prwise_crypto_len;
+ u8 grp_crypto_type;
+ u8 grp_crypto_len;
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ __le32 ctrl_flags;
+} __packed;
+
+/* WMI_RECONNECT_CMDID */
+struct wmi_reconnect_cmd {
+ /* channel hint */
+ __le16 channel;
+
+ /* mandatory if set */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/* WMI_ADD_CIPHER_KEY_CMDID */
+enum key_usage {
+ PAIRWISE_USAGE = 0x00,
+ GROUP_USAGE = 0x01,
+
+ /* default Tx Key - static WEP only */
+ TX_USAGE = 0x02,
+};
+
+/*
+ * Bit Flag
+ * Bit 0 - Initialise TSC - default is Initialize
+ */
+#define KEY_OP_INIT_TSC 0x01
+#define KEY_OP_INIT_RSC 0x02
+
+/* default initialise the TSC & RSC */
+#define KEY_OP_INIT_VAL 0x03
+#define KEY_OP_VALID_MASK 0x03
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+
+ /* enum key_usage */
+ u8 key_usage;
+
+ u8 key_len;
+
+ /* key replay sequence counter */
+ u8 key_rsc[8];
+
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ /* additional key control info */
+ u8 key_op_ctrl;
+
+ u8 key_mac_addr[ETH_ALEN];
+} __packed;
+
+/* WMI_DELETE_CIPHER_KEY_CMDID */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+} __packed;
+
+#define WMI_KRK_LEN 16
+
+/* WMI_ADD_KRK_CMDID */
+struct wmi_add_krk_cmd {
+ u8 krk[WMI_KRK_LEN];
+} __packed;
+
+/* WMI_SETPMKID_CMDID */
+
+#define WMI_PMKID_LEN 16
+
+enum pmkid_enable_flg {
+ PMKID_DISABLE = 0,
+ PMKID_ENABLE = 1,
+};
+
+struct wmi_setpmkid_cmd {
+ u8 bssid[ETH_ALEN];
+
+ /* enum pmkid_enable_flg */
+ u8 enable;
+
+ u8 pmkid[WMI_PMKID_LEN];
+} __packed;
+
+/* WMI_START_SCAN_CMD */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_start_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/* WMI_SET_SCAN_PARAMS_CMDID */
+#define WMI_SHORTSCANRATIO_DEFAULT 3
+
+/*
+ * Warning: scan control flag value of 0xFF is used to disable
+ * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more
+ * flags here
+ */
+enum wmi_scan_ctrl_flags_bits {
+
+ /* set if can scan in the connect cmd */
+ CONNECT_SCAN_CTRL_FLAGS = 0x01,
+
+ /* set if scan for the SSID it is already connected to */
+ SCAN_CONNECTED_CTRL_FLAGS = 0x02,
+
+ /* set if enable active scan */
+ ACTIVE_SCAN_CTRL_FLAGS = 0x04,
+
+ /* set if enable roam scan when bmiss and lowrssi */
+ ROAM_SCAN_CTRL_FLAGS = 0x08,
+
+ /* set if follows customer BSSINFO reporting rule */
+ REPORT_BSSINFO_CTRL_FLAGS = 0x10,
+
+ /* if disabled, target doesn't scan after a disconnect event */
+ ENABLE_AUTO_CTRL_FLAGS = 0x20,
+
+ /*
+ * Scan complete event with canceled status will be generated when
+ * a scan is prempted before it gets completed.
+ */
+ ENABLE_SCAN_ABORT_EVENT = 0x40
+};
+
+#define DEFAULT_SCAN_CTRL_FLAGS \
+ (CONNECT_SCAN_CTRL_FLAGS | \
+ SCAN_CONNECTED_CTRL_FLAGS | \
+ ACTIVE_SCAN_CTRL_FLAGS | \
+ ROAM_SCAN_CTRL_FLAGS | \
+ ENABLE_AUTO_CTRL_FLAGS)
+
+struct wmi_scan_params_cmd {
+ /* sec */
+ __le16 fg_start_period;
+
+ /* sec */
+ __le16 fg_end_period;
+
+ /* sec */
+ __le16 bg_period;
+
+ /* msec */
+ __le16 maxact_chdwell_time;
+
+ /* msec */
+ __le16 pas_chdwell_time;
+
+ /* how many shorts scan for one long */
+ u8 short_scan_ratio;
+
+ u8 scan_ctrl_flags;
+
+ /* msec */
+ __le16 minact_chdwell_time;
+
+ /* max active scans per ssid */
+ __le16 maxact_scan_per_ssid;
+
+ /* msecs */
+ __le32 max_dfsch_act_time;
+} __packed;
+
+/* WMI_SET_BSS_FILTER_CMDID */
+enum wmi_bss_filter {
+ /* no beacons forwarded */
+ NONE_BSS_FILTER = 0x0,
+
+ /* all beacons forwarded */
+ ALL_BSS_FILTER,
+
+ /* only beacons matching profile */
+ PROFILE_FILTER,
+
+ /* all but beacons matching profile */
+ ALL_BUT_PROFILE_FILTER,
+
+ /* only beacons matching current BSS */
+ CURRENT_BSS_FILTER,
+
+ /* all but beacons matching BSS */
+ ALL_BUT_BSS_FILTER,
+
+ /* beacons matching probed ssid */
+ PROBED_SSID_FILTER,
+
+ /* marker only */
+ LAST_BSS_FILTER,
+};
+
+struct wmi_bss_filter_cmd {
+ /* see, enum wmi_bss_filter */
+ u8 bss_filter;
+
+ /* for alignment */
+ u8 reserved1;
+
+ /* for alignment */
+ __le16 reserved2;
+
+ __le32 ie_mask;
+} __packed;
+
+/* WMI_SET_PROBED_SSID_CMDID */
+#define MAX_PROBED_SSID_INDEX 9
+
+enum wmi_ssid_flag {
+ /* disables entry */
+ DISABLE_SSID_FLAG = 0,
+
+ /* probes specified ssid */
+ SPECIFIC_SSID_FLAG = 0x01,
+
+ /* probes for any ssid */
+ ANY_SSID_FLAG = 0x02,
+};
+
+struct wmi_probed_ssid_cmd {
+ /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 entry_index;
+
+ /* see, enum wmi_ssid_flg */
+ u8 flag;
+
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_LISTEN_INT_CMDID
+ * The Listen interval is between 15 and 3000 TUs
+ */
+struct wmi_listen_int_cmd {
+ __le16 listen_intvl;
+ __le16 num_beacons;
+} __packed;
+
+/* WMI_SET_POWER_MODE_CMDID */
+enum wmi_power_mode {
+ REC_POWER = 0x01,
+ MAX_PERF_POWER,
+};
+
+struct wmi_power_mode_cmd {
+ /* see, enum wmi_power_mode */
+ u8 pwr_mode;
+} __packed;
+
+/*
+ * Policy to determnine whether power save failure event should be sent to
+ * host during scanning
+ */
+enum power_save_fail_event_policy {
+ SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
+ IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
+};
+
+struct wmi_power_params_cmd {
+ /* msec */
+ __le16 idle_period;
+
+ __le16 pspoll_number;
+ __le16 dtim_policy;
+ __le16 tx_wakeup_policy;
+ __le16 num_tx_to_wakeup;
+ __le16 ps_fail_event_policy;
+} __packed;
+
+/* WMI_SET_DISC_TIMEOUT_CMDID */
+struct wmi_disc_timeout_cmd {
+ /* seconds */
+ u8 discon_timeout;
+} __packed;
+
+enum dir_type {
+ UPLINK_TRAFFIC = 0,
+ DNLINK_TRAFFIC = 1,
+ BIDIR_TRAFFIC = 2,
+};
+
+enum voiceps_cap_type {
+ DISABLE_FOR_THIS_AC = 0,
+ ENABLE_FOR_THIS_AC = 1,
+ ENABLE_FOR_ALL_AC = 2,
+};
+
+enum traffic_type {
+ TRAFFIC_TYPE_APERIODIC = 0,
+ TRAFFIC_TYPE_PERIODIC = 1,
+};
+
+/* WMI_SYNCHRONIZE_CMDID */
+struct wmi_sync_cmd {
+ u8 data_sync_map;
+} __packed;
+
+/* WMI_CREATE_PSTREAM_CMDID */
+struct wmi_create_pstream_cmd {
+ /* msec */
+ __le32 min_service_int;
+
+ /* msec */
+ __le32 max_service_int;
+
+ /* msec */
+ __le32 inactivity_int;
+
+ /* msec */
+ __le32 suspension_int;
+
+ __le32 service_start_time;
+
+ /* in bps */
+ __le32 min_data_rate;
+
+ /* in bps */
+ __le32 mean_data_rate;
+
+ /* in bps */
+ __le32 peak_data_rate;
+
+ __le32 max_burst_size;
+ __le32 delay_bound;
+
+ /* in bps */
+ __le32 min_phy_rate;
+
+ __le32 sba;
+ __le32 medium_time;
+
+ /* in octects */
+ __le16 nominal_msdu;
+
+ /* in octects */
+ __le16 max_msdu;
+
+ u8 traffic_class;
+
+ /* see, enum dir_type */
+ u8 traffic_direc;
+
+ u8 rx_queue_num;
+
+ /* see, enum traffic_type */
+ u8 traffic_type;
+
+ /* see, enum voiceps_cap_type */
+ u8 voice_psc_cap;
+ u8 tsid;
+
+ /* 802.1D user priority */
+ u8 user_pri;
+
+ /* nominal phy rate */
+ u8 nominal_phy;
+} __packed;
+
+/* WMI_DELETE_PSTREAM_CMDID */
+struct wmi_delete_pstream_cmd {
+ u8 tx_queue_num;
+ u8 rx_queue_num;
+ u8 traffic_direc;
+ u8 traffic_class;
+ u8 tsid;
+} __packed;
+
+/* WMI_SET_CHANNEL_PARAMS_CMDID */
+enum wmi_phy_mode {
+ WMI_11A_MODE = 0x1,
+ WMI_11G_MODE = 0x2,
+ WMI_11AG_MODE = 0x3,
+ WMI_11B_MODE = 0x4,
+ WMI_11GONLY_MODE = 0x5,
+};
+
+#define WMI_MAX_CHANNELS 32
+
+/*
+ * WMI_RSSI_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling. Threshold values are
+ * in the ascending order, and should agree to:
+ * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
+ * < highThreshold_upperVal)
+ */
+
+struct wmi_rssi_threshold_params_cmd {
+ /* polling time as a factor of LI */
+ __le32 poll_time;
+
+ /* lowest of upper */
+ a_sle16 thresh_above1_val;
+
+ a_sle16 thresh_above2_val;
+ a_sle16 thresh_above3_val;
+ a_sle16 thresh_above4_val;
+ a_sle16 thresh_above5_val;
+
+ /* highest of upper */
+ a_sle16 thresh_above6_val;
+
+ /* lowest of bellow */
+ a_sle16 thresh_below1_val;
+
+ a_sle16 thresh_below2_val;
+ a_sle16 thresh_below3_val;
+ a_sle16 thresh_below4_val;
+ a_sle16 thresh_below5_val;
+
+ /* highest of bellow */
+ a_sle16 thresh_below6_val;
+
+ /* "alpha" */
+ u8 weight;
+
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SNR_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling.
+ */
+
+struct wmi_snr_threshold_params_cmd {
+ /* polling time as a factor of LI */
+ __le32 poll_time;
+
+ /* "alpha" */
+ u8 weight;
+
+ /* lowest of uppper */
+ u8 thresh_above1_val;
+
+ u8 thresh_above2_val;
+ u8 thresh_above3_val;
+
+ /* highest of upper */
+ u8 thresh_above4_val;
+
+ /* lowest of bellow */
+ u8 thresh_below1_val;
+
+ u8 thresh_below2_val;
+ u8 thresh_below3_val;
+
+ /* highest of bellow */
+ u8 thresh_below4_val;
+
+ u8 reserved[3];
+} __packed;
+
+enum wmi_preamble_policy {
+ WMI_IGNORE_BARKER_IN_ERP = 0,
+ WMI_DONOT_IGNORE_BARKER_IN_ERP
+};
+
+struct wmi_set_lpreamble_cmd {
+ u8 status;
+ u8 preamble_policy;
+} __packed;
+
+struct wmi_set_rts_cmd {
+ __le16 threshold;
+} __packed;
+
+/* WMI_SET_TX_PWR_CMDID */
+struct wmi_set_tx_pwr_cmd {
+ /* in dbM units */
+ u8 dbM;
+} __packed;
+
+struct wmi_tx_pwr_reply {
+ /* in dbM units */
+ u8 dbM;
+} __packed;
+
+struct wmi_report_sleep_state_event {
+ __le32 sleep_state;
+};
+
+enum wmi_report_sleep_status {
+ WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP = 0,
+ WMI_REPORT_SLEEP_STATUS_IS_AWAKE
+};
+enum target_event_report_config {
+ /* default */
+ DISCONN_EVT_IN_RECONN = 0,
+
+ NO_DISCONN_EVT_IN_RECONN
+};
+
+/* Command Replies */
+
+/* WMI_GET_CHANNEL_LIST_CMDID reply */
+struct wmi_channel_list_reply {
+ u8 reserved;
+
+ /* number of channels in reply */
+ u8 num_ch;
+
+ /* channel in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/* List of Events (target to host) */
+enum wmi_event_id {
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID,
+ WMI_DISCONNECT_EVENTID,
+ WMI_BSSINFO_EVENTID,
+ WMI_CMDERROR_EVENTID,
+ WMI_REGDOMAIN_EVENTID,
+ WMI_PSTREAM_TIMEOUT_EVENTID,
+ WMI_NEIGHBOR_REPORT_EVENTID,
+ WMI_TKIP_MICERR_EVENTID,
+ WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
+ WMI_REPORT_STATISTICS_EVENTID,
+ WMI_RSSI_THRESHOLD_EVENTID,
+ WMI_ERROR_REPORT_EVENTID,
+ WMI_OPT_RX_FRAME_EVENTID,
+ WMI_REPORT_ROAM_TBL_EVENTID,
+ WMI_EXTENSION_EVENTID,
+ WMI_CAC_EVENTID,
+ WMI_SNR_THRESHOLD_EVENTID,
+ WMI_LQ_THRESHOLD_EVENTID,
+ WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
+ WMI_REPORT_ROAM_DATA_EVENTID,
+ WMI_TEST_EVENTID,
+ WMI_APLIST_EVENTID,
+ WMI_GET_WOW_LIST_EVENTID,
+ WMI_GET_PMKID_LIST_EVENTID,
+ WMI_CHANNEL_CHANGE_EVENTID,
+ WMI_PEER_NODE_EVENTID,
+ WMI_PSPOLL_EVENTID,
+ WMI_DTIMEXPIRY_EVENTID,
+ WMI_WLAN_VERSION_EVENTID,
+ WMI_SET_PARAMS_REPLY_EVENTID,
+ WMI_ADDBA_REQ_EVENTID, /*0x1020 */
+ WMI_ADDBA_RESP_EVENTID,
+ WMI_DELBA_REQ_EVENTID,
+ WMI_TX_COMPLETE_EVENTID,
+ WMI_HCI_EVENT_EVENTID,
+ WMI_ACL_DATA_EVENTID,
+ WMI_REPORT_SLEEP_STATE_EVENTID,
+ WMI_REPORT_BTCOEX_STATS_EVENTID,
+ WMI_REPORT_BTCOEX_CONFIG_EVENTID,
+ WMI_GET_PMK_EVENTID,
+
+ /* DFS Events */
+ WMI_DFS_HOST_ATTACH_EVENTID,
+ WMI_DFS_HOST_INIT_EVENTID,
+ WMI_DFS_RESET_DELAYLINES_EVENTID,
+ WMI_DFS_RESET_RADARQ_EVENTID,
+ WMI_DFS_RESET_AR_EVENTID,
+ WMI_DFS_RESET_ARQ_EVENTID,
+ WMI_DFS_SET_DUR_MULTIPLIER_EVENTID,
+ WMI_DFS_SET_BANGRADAR_EVENTID,
+ WMI_DFS_SET_DEBUGLEVEL_EVENTID,
+ WMI_DFS_PHYERR_EVENTID,
+
+ /* CCX Evants */
+ WMI_CCX_RM_STATUS_EVENTID,
+
+ /* P2P Events */
+ WMI_P2P_GO_NEG_RESULT_EVENTID,
+
+ WMI_WAC_SCAN_DONE_EVENTID,
+ WMI_WAC_REPORT_BSS_EVENTID,
+ WMI_WAC_START_WPS_EVENTID,
+ WMI_WAC_CTRL_REQ_REPLY_EVENTID,
+
+ WMI_REPORT_WMM_PARAMS_EVENTID,
+ WMI_WAC_REJECT_WPS_EVENTID,
+
+ /* More P2P Events */
+ WMI_P2P_GO_NEG_REQ_EVENTID,
+ WMI_P2P_INVITE_REQ_EVENTID,
+ WMI_P2P_INVITE_RCVD_RESULT_EVENTID,
+ WMI_P2P_INVITE_SENT_RESULT_EVENTID,
+ WMI_P2P_PROV_DISC_RESP_EVENTID,
+ WMI_P2P_PROV_DISC_REQ_EVENTID,
+
+ /* RFKILL Events */
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_RFKILL_GET_MODE_CMD_EVENTID,
+
+ WMI_P2P_START_SDPD_EVENTID,
+ WMI_P2P_SDPD_RX_EVENTID,
+
+ WMI_THIN_RESERVED_START_EVENTID = 0x8000,
+ /* Events in this range are reserved for thinmode */
+ WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
+
+ WMI_SET_CHANNEL_EVENTID,
+ WMI_ASSOC_REQ_EVENTID,
+
+ /* Generic ACS event */
+ WMI_ACS_EVENTID,
+ WMI_STORERECALL_STORE_EVENTID,
+ WMI_WOW_EXT_WAKE_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID,
+ WMI_NETWORK_LIST_OFFLOAD_EVENTID,
+ WMI_REMAIN_ON_CHNL_EVENTID,
+ WMI_CANCEL_REMAIN_ON_CHNL_EVENTID,
+ WMI_TX_STATUS_EVENTID,
+ WMI_RX_PROBE_REQ_EVENTID,
+ WMI_P2P_CAPABILITIES_EVENTID,
+ WMI_RX_ACTION_EVENTID,
+ WMI_P2P_INFO_EVENTID,
+};
+
+struct wmi_ready_event_2 {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac_addr[ETH_ALEN];
+ u8 phy_cap;
+} __packed;
+
+/* Connect Event */
+struct wmi_connect_event {
+ union {
+ struct {
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ __le16 listen_intvl;
+ __le16 beacon_intvl;
+ __le32 nw_type;
+ } sta;
+ struct {
+ u8 phymode;
+ u8 aid;
+ u8 mac_addr[ETH_ALEN];
+ u8 auth;
+ u8 keymgmt;
+ __le16 cipher;
+ u8 apsd_info;
+ u8 unused[3];
+ } ap_sta;
+ struct {
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ u8 unused[8];
+ } ap_bss;
+ } u;
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 assoc_info[1];
+} __packed;
+
+/* Disconnect Event */
+enum wmi_disconnect_reason {
+ NO_NETWORK_AVAIL = 0x01,
+
+ /* bmiss */
+ LOST_LINK = 0x02,
+
+ DISCONNECT_CMD = 0x03,
+ BSS_DISCONNECTED = 0x04,
+ AUTH_FAILED = 0x05,
+ ASSOC_FAILED = 0x06,
+ NO_RESOURCES_AVAIL = 0x07,
+ CSERV_DISCONNECT = 0x08,
+ INVALID_PROFILE = 0x0a,
+ DOT11H_CHANNEL_SWITCH = 0x0b,
+ PROFILE_MISMATCH = 0x0c,
+ CONNECTION_EVICTED = 0x0d,
+ IBSS_MERGE = 0xe,
+};
+
+#define ATH6KL_COUNTRY_RD_SHIFT 16
+
+struct ath6kl_wmi_regdomain {
+ __le32 reg_code;
+};
+
+struct wmi_disconnect_event {
+ /* reason code, see 802.11 spec. */
+ __le16 proto_reason_status;
+
+ /* set if known */
+ u8 bssid[ETH_ALEN];
+
+ /* see WMI_DISCONNECT_REASON */
+ u8 disconn_reason;
+
+ u8 assoc_resp_len;
+ u8 assoc_info[1];
+} __packed;
+
+/*
+ * BSS Info Event.
+ * Mechanism used to inform host of the presence and characteristic of
+ * wireless networks present. Consists of bss info header followed by
+ * the beacon or probe-response frame body. The 802.11 header is no included.
+ */
+enum wmi_bi_ftype {
+ BEACON_FTYPE = 0x1,
+ PROBERESP_FTYPE,
+ ACTION_MGMT_FTYPE,
+ PROBEREQ_FTYPE,
+};
+
+#define DEF_LRSSI_SCAN_PERIOD 5
+#define DEF_LRSSI_ROAM_THRESHOLD 20
+#define DEF_LRSSI_ROAM_FLOOR 60
+#define DEF_SCAN_FOR_ROAM_INTVL 2
+
+enum wmi_roam_ctrl {
+ WMI_FORCE_ROAM = 1,
+ WMI_SET_ROAM_MODE,
+ WMI_SET_HOST_BIAS,
+ WMI_SET_LRSSI_SCAN_PARAMS,
+};
+
+struct bss_bias {
+ u8 bssid[ETH_ALEN];
+ u8 bias;
+} __packed;
+
+struct bss_bias_info {
+ u8 num_bss;
+ struct bss_bias bss_bias[1];
+} __packed;
+
+struct low_rssi_scan_params {
+ __le16 lrssi_scan_period;
+ a_sle16 lrssi_scan_threshold;
+ a_sle16 lrssi_roam_threshold;
+ u8 roam_rssi_floor;
+ u8 reserved[1];
+} __packed;
+
+struct roam_ctrl_cmd {
+ union {
+ u8 bssid[ETH_ALEN];
+ u8 roam_mode;
+ struct bss_bias_info bss;
+ struct low_rssi_scan_params params;
+ } __packed info;
+ u8 roam_ctrl;
+} __packed;
+
+/* BSS INFO HDR version 2.0 */
+struct wmi_bss_info_hdr2 {
+ __le16 ch; /* frequency in MHz */
+
+ /* see, enum wmi_bi_ftype */
+ u8 frame_type;
+
+ u8 snr; /* note: rssi = snr - 95 dBm */
+ u8 bssid[ETH_ALEN];
+ __le16 ie_mask;
+} __packed;
+
+/* Command Error Event */
+enum wmi_error_code {
+ INVALID_PARAM = 0x01,
+ ILLEGAL_STATE = 0x02,
+ INTERNAL_ERROR = 0x03,
+};
+
+struct wmi_cmd_error_event {
+ __le16 cmd_id;
+ u8 err_code;
+} __packed;
+
+struct wmi_pstream_timeout_event {
+ u8 tx_queue_num;
+ u8 rx_queue_num;
+ u8 traffic_direc;
+ u8 traffic_class;
+} __packed;
+
+/*
+ * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
+ * the host of BSS's it has found that matches the current profile.
+ * It can be used by the host to cache PMKs and/to initiate pre-authentication
+ * if the BSS supports it. The first bssid is always the current associated
+ * BSS.
+ * The bssid and bssFlags information repeats according to the number
+ * or APs reported.
+ */
+enum wmi_bss_flags {
+ WMI_DEFAULT_BSS_FLAGS = 0x00,
+ WMI_PREAUTH_CAPABLE_BSS = 0x01,
+ WMI_PMKID_VALID_BSS = 0x02,
+};
+
+struct wmi_neighbor_info {
+ u8 bssid[ETH_ALEN];
+ u8 bss_flags; /* enum wmi_bss_flags */
+} __packed;
+
+struct wmi_neighbor_report_event {
+ u8 num_neighbors;
+ struct wmi_neighbor_info neighbor[0];
+} __packed;
+
+/* TKIP MIC Error Event */
+struct wmi_tkip_micerr_event {
+ u8 key_id;
+ u8 is_mcast;
+} __packed;
+
+/* WMI_SCAN_COMPLETE_EVENTID */
+struct wmi_scan_complete_event {
+ a_sle32 status;
+} __packed;
+
+#define MAX_OPT_DATA_LEN 1400
+
+/*
+ * Special frame receive Event.
+ * Mechanism used to inform host of the receiption of the special frames.
+ * Consists of special frame info header followed by special frame body.
+ * The 802.11 header is not included.
+ */
+struct wmi_opt_rx_info_hdr {
+ __le16 ch;
+ u8 frame_type;
+ s8 snr;
+ u8 src_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/* Reporting statistic */
+struct tx_stats {
+ __le32 pkt;
+ __le32 byte;
+ __le32 ucast_pkt;
+ __le32 ucast_byte;
+ __le32 mcast_pkt;
+ __le32 mcast_byte;
+ __le32 bcast_pkt;
+ __le32 bcast_byte;
+ __le32 rts_success_cnt;
+ __le32 pkt_per_ac[4];
+ __le32 err_per_ac[4];
+
+ __le32 err;
+ __le32 fail_cnt;
+ __le32 retry_cnt;
+ __le32 mult_retry_cnt;
+ __le32 rts_fail_cnt;
+ a_sle32 ucast_rate;
+} __packed;
+
+struct rx_stats {
+ __le32 pkt;
+ __le32 byte;
+ __le32 ucast_pkt;
+ __le32 ucast_byte;
+ __le32 mcast_pkt;
+ __le32 mcast_byte;
+ __le32 bcast_pkt;
+ __le32 bcast_byte;
+ __le32 frgment_pkt;
+
+ __le32 err;
+ __le32 crc_err;
+ __le32 key_cache_miss;
+ __le32 decrypt_err;
+ __le32 dupl_frame;
+ a_sle32 ucast_rate;
+} __packed;
+
+struct tkip_ccmp_stats {
+ __le32 tkip_local_mic_fail;
+ __le32 tkip_cnter_measures_invoked;
+ __le32 tkip_replays;
+ __le32 tkip_fmt_err;
+ __le32 ccmp_fmt_err;
+ __le32 ccmp_replays;
+} __packed;
+
+struct pm_stats {
+ __le32 pwr_save_failure_cnt;
+ __le16 stop_tx_failure_cnt;
+ __le16 atim_tx_failure_cnt;
+ __le16 atim_rx_failure_cnt;
+ __le16 bcn_rx_failure_cnt;
+} __packed;
+
+struct cserv_stats {
+ __le32 cs_bmiss_cnt;
+ __le32 cs_low_rssi_cnt;
+ __le16 cs_connect_cnt;
+ __le16 cs_discon_cnt;
+ a_sle16 cs_ave_beacon_rssi;
+ __le16 cs_roam_count;
+ a_sle16 cs_rssi;
+ u8 cs_snr;
+ u8 cs_ave_beacon_snr;
+ u8 cs_last_roam_msec;
+} __packed;
+
+struct wlan_net_stats {
+ struct tx_stats tx;
+ struct rx_stats rx;
+ struct tkip_ccmp_stats tkip_ccmp_stats;
+} __packed;
+
+struct arp_stats {
+ __le32 arp_received;
+ __le32 arp_matched;
+ __le32 arp_replied;
+} __packed;
+
+struct wlan_wow_stats {
+ __le32 wow_pkt_dropped;
+ __le16 wow_evt_discarded;
+ u8 wow_host_pkt_wakeups;
+ u8 wow_host_evt_wakeups;
+} __packed;
+
+struct wmi_target_stats {
+ __le32 lq_val;
+ a_sle32 noise_floor_calib;
+ struct pm_stats pm_stats;
+ struct wlan_net_stats stats;
+ struct wlan_wow_stats wow_stats;
+ struct arp_stats arp_stats;
+ struct cserv_stats cserv_stats;
+} __packed;
+
+/*
+ * WMI_RSSI_THRESHOLD_EVENTID.
+ * Indicate the RSSI events to host. Events are indicated when we breach a
+ * thresold value.
+ */
+enum wmi_rssi_threshold_val {
+ WMI_RSSI_THRESHOLD1_ABOVE = 0,
+ WMI_RSSI_THRESHOLD2_ABOVE,
+ WMI_RSSI_THRESHOLD3_ABOVE,
+ WMI_RSSI_THRESHOLD4_ABOVE,
+ WMI_RSSI_THRESHOLD5_ABOVE,
+ WMI_RSSI_THRESHOLD6_ABOVE,
+ WMI_RSSI_THRESHOLD1_BELOW,
+ WMI_RSSI_THRESHOLD2_BELOW,
+ WMI_RSSI_THRESHOLD3_BELOW,
+ WMI_RSSI_THRESHOLD4_BELOW,
+ WMI_RSSI_THRESHOLD5_BELOW,
+ WMI_RSSI_THRESHOLD6_BELOW
+};
+
+struct wmi_rssi_threshold_event {
+ a_sle16 rssi;
+ u8 range;
+} __packed;
+
+enum wmi_snr_threshold_val {
+ WMI_SNR_THRESHOLD1_ABOVE = 1,
+ WMI_SNR_THRESHOLD1_BELOW,
+ WMI_SNR_THRESHOLD2_ABOVE,
+ WMI_SNR_THRESHOLD2_BELOW,
+ WMI_SNR_THRESHOLD3_ABOVE,
+ WMI_SNR_THRESHOLD3_BELOW,
+ WMI_SNR_THRESHOLD4_ABOVE,
+ WMI_SNR_THRESHOLD4_BELOW
+};
+
+struct wmi_snr_threshold_event {
+ /* see, enum wmi_snr_threshold_val */
+ u8 range;
+
+ u8 snr;
+} __packed;
+
+/* WMI_REPORT_ROAM_TBL_EVENTID */
+#define MAX_ROAM_TBL_CAND 5
+
+struct wmi_bss_roam_info {
+ a_sle32 roam_util;
+ u8 bssid[ETH_ALEN];
+ s8 rssi;
+ s8 rssidt;
+ s8 last_rssi;
+ s8 util;
+ s8 bias;
+
+ /* for alignment */
+ u8 reserved;
+} __packed;
+
+/* WMI_CAC_EVENTID */
+enum cac_indication {
+ CAC_INDICATION_ADMISSION = 0x00,
+ CAC_INDICATION_ADMISSION_RESP = 0x01,
+ CAC_INDICATION_DELETE = 0x02,
+ CAC_INDICATION_NO_RESP = 0x03,
+};
+
+#define WMM_TSPEC_IE_LEN 63
+
+struct wmi_cac_event {
+ u8 ac;
+ u8 cac_indication;
+ u8 status_code;
+ u8 tspec_suggestion[WMM_TSPEC_IE_LEN];
+} __packed;
+
+/* WMI_APLIST_EVENTID */
+
+enum aplist_ver {
+ APLIST_VER1 = 1,
+};
+
+struct wmi_ap_info_v1 {
+ u8 bssid[ETH_ALEN];
+ __le16 channel;
+} __packed;
+
+union wmi_ap_info {
+ struct wmi_ap_info_v1 ap_info_v1;
+} __packed;
+
+struct wmi_aplist_event {
+ u8 ap_list_ver;
+ u8 num_ap;
+ union wmi_ap_info ap_list[1];
+} __packed;
+
+/* Developer Commands */
+
+/*
+ * WMI_SET_BITRATE_CMDID
+ *
+ * Get bit rate cmd uses same definition as set bit rate cmd
+ */
+enum wmi_bit_rate {
+ RATE_AUTO = -1,
+ RATE_1Mb = 0,
+ RATE_2Mb = 1,
+ RATE_5_5Mb = 2,
+ RATE_11Mb = 3,
+ RATE_6Mb = 4,
+ RATE_9Mb = 5,
+ RATE_12Mb = 6,
+ RATE_18Mb = 7,
+ RATE_24Mb = 8,
+ RATE_36Mb = 9,
+ RATE_48Mb = 10,
+ RATE_54Mb = 11,
+ RATE_MCS_0_20 = 12,
+ RATE_MCS_1_20 = 13,
+ RATE_MCS_2_20 = 14,
+ RATE_MCS_3_20 = 15,
+ RATE_MCS_4_20 = 16,
+ RATE_MCS_5_20 = 17,
+ RATE_MCS_6_20 = 18,
+ RATE_MCS_7_20 = 19,
+ RATE_MCS_0_40 = 20,
+ RATE_MCS_1_40 = 21,
+ RATE_MCS_2_40 = 22,
+ RATE_MCS_3_40 = 23,
+ RATE_MCS_4_40 = 24,
+ RATE_MCS_5_40 = 25,
+ RATE_MCS_6_40 = 26,
+ RATE_MCS_7_40 = 27,
+};
+
+struct wmi_bit_rate_reply {
+ /* see, enum wmi_bit_rate */
+ s8 rate_index;
+} __packed;
+
+/*
+ * WMI_SET_FIXRATES_CMDID
+ *
+ * Get fix rates cmd uses same definition as set fix rates cmd
+ */
+struct wmi_fix_rates_reply {
+ /* see wmi_bit_rate */
+ __le32 fix_rate_mask;
+} __packed;
+
+enum roam_data_type {
+ /* get the roam time data */
+ ROAM_DATA_TIME = 1,
+};
+
+struct wmi_target_roam_time {
+ __le32 disassoc_time;
+ __le32 no_txrx_time;
+ __le32 assoc_time;
+ __le32 allow_txrx_time;
+ u8 disassoc_bssid[ETH_ALEN];
+ s8 disassoc_bss_rssi;
+ u8 assoc_bssid[ETH_ALEN];
+ s8 assoc_bss_rssi;
+} __packed;
+
+enum wmi_txop_cfg {
+ WMI_TXOP_DISABLED = 0,
+ WMI_TXOP_ENABLED
+};
+
+struct wmi_set_wmm_txop_cmd {
+ u8 txop_enable;
+} __packed;
+
+struct wmi_set_keepalive_cmd {
+ u8 keep_alive_intvl;
+} __packed;
+
+struct wmi_get_keepalive_cmd {
+ __le32 configured;
+ u8 keep_alive_intvl;
+} __packed;
+
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+/* Notify the WSC registration status to the target */
+#define WSC_REG_ACTIVE 1
+#define WSC_REG_INACTIVE 0
+
+#define WOW_MAX_FILTER_LISTS 1
+#define WOW_MAX_FILTERS_PER_LIST 4
+#define WOW_PATTERN_SIZE 64
+#define WOW_MASK_SIZE 64
+
+#define MAC_MAX_FILTERS_PER_LIST 4
+
+struct wow_filter {
+ u8 wow_valid_filter;
+ u8 wow_filter_id;
+ u8 wow_filter_size;
+ u8 wow_filter_offset;
+ u8 wow_filter_mask[WOW_MASK_SIZE];
+ u8 wow_filter_pattern[WOW_PATTERN_SIZE];
+} __packed;
+
+#define MAX_IP_ADDRS 2
+
+struct wmi_set_ip_cmd {
+ /* IP in network byte order */
+ __le32 ips[MAX_IP_ADDRS];
+} __packed;
+
+/* WMI_GET_WOW_LIST_CMD reply */
+struct wmi_get_wow_list_reply {
+ /* number of patterns in reply */
+ u8 num_filters;
+
+ /* this is filter # x of total num_filters */
+ u8 this_filter_num;
+
+ u8 wow_mode;
+ u8 host_mode;
+ struct wow_filter wow_filters[1];
+} __packed;
+
+/* WMI_SET_AKMP_PARAMS_CMD */
+
+struct wmi_pmkid {
+ u8 pmkid[WMI_PMKID_LEN];
+} __packed;
+
+/* WMI_GET_PMKID_LIST_CMD Reply */
+struct wmi_pmkid_list_reply {
+ __le32 num_pmkid;
+ u8 bssid_list[ETH_ALEN][1];
+ struct wmi_pmkid pmkid_list[1];
+} __packed;
+
+/* WMI_ADDBA_REQ_EVENTID */
+struct wmi_addba_req_event {
+ u8 tid;
+ u8 win_sz;
+ __le16 st_seq_no;
+
+ /* f/w response for ADDBA Req; OK (0) or failure (!=0) */
+ u8 status;
+} __packed;
+
+/* WMI_ADDBA_RESP_EVENTID */
+struct wmi_addba_resp_event {
+ u8 tid;
+
+ /* OK (0), failure (!=0) */
+ u8 status;
+
+ /* three values: not supported(0), 3839, 8k */
+ __le16 amsdu_sz;
+} __packed;
+
+/* WMI_DELBA_EVENTID
+ * f/w received a DELBA for peer and processed it.
+ * Host is notified of this
+ */
+struct wmi_delba_event {
+ u8 tid;
+ u8 is_peer_initiator;
+ __le16 reason_code;
+} __packed;
+
+#define PEER_NODE_JOIN_EVENT 0x00
+#define PEER_NODE_LEAVE_EVENT 0x01
+#define PEER_FIRST_NODE_JOIN_EVENT 0x10
+#define PEER_LAST_NODE_LEAVE_EVENT 0x11
+
+struct wmi_peer_node_event {
+ u8 event_code;
+ u8 peer_mac_addr[ETH_ALEN];
+} __packed;
+
+/* Transmit complete event data structure(s) */
+
+/* version 1 of tx complete msg */
+struct tx_complete_msg_v1 {
+#define TX_COMPLETE_STATUS_SUCCESS 0
+#define TX_COMPLETE_STATUS_RETRIES 1
+#define TX_COMPLETE_STATUS_NOLINK 2
+#define TX_COMPLETE_STATUS_TIMEOUT 3
+#define TX_COMPLETE_STATUS_OTHER 4
+
+ u8 status;
+
+ /* packet ID to identify parent packet */
+ u8 pkt_id;
+
+ /* rate index on successful transmission */
+ u8 rate_idx;
+
+ /* number of ACK failures in tx attempt */
+ u8 ack_failures;
+} __packed;
+
+struct wmi_tx_complete_event {
+ /* no of tx comp msgs following this struct */
+ u8 num_msg;
+
+ /* length in bytes for each individual msg following this struct */
+ u8 msg_len;
+
+ /* version of tx complete msg data following this struct */
+ u8 msg_type;
+
+ /* individual messages follow this header */
+ u8 reserved;
+} __packed;
+
+/*
+ * ------- AP Mode definitions --------------
+ */
+
+/*
+ * !!! Warning !!!
+ * -Changing the following values needs compilation of both driver and firmware
+ */
+#define AP_MAX_NUM_STA 8
+
+/* Spl. AID used to set DTIM flag in the beacons */
+#define MCAST_AID 0xFF
+
+#define DEF_AP_COUNTRY_CODE "US "
+
+/* Used with WMI_AP_SET_NUM_STA_CMDID */
+
+/*
+ * Used with WMI_AP_SET_MLME_CMDID
+ */
+
+/* MLME Commands */
+#define WMI_AP_MLME_ASSOC 1 /* associate station */
+#define WMI_AP_DISASSOC 2 /* disassociate station */
+#define WMI_AP_DEAUTH 3 /* deauthenticate station */
+#define WMI_AP_MLME_AUTHORIZE 4 /* authorize station */
+#define WMI_AP_MLME_UNAUTHORIZE 5 /* unauthorize station */
+
+struct wmi_ap_set_mlme_cmd {
+ u8 mac[ETH_ALEN];
+ __le16 reason; /* 802.11 reason code */
+ u8 cmd; /* operation to perform (WMI_AP_*) */
+} __packed;
+
+struct wmi_ap_set_pvb_cmd {
+ __le32 flag;
+ __le16 rsvd;
+ __le16 aid;
+} __packed;
+
+struct wmi_rx_frame_format_cmd {
+ /* version of meta data for rx packets <0 = default> (0-7 = valid) */
+ u8 meta_ver;
+
+ /*
+ * 1 == leave .11 header intact,
+ * 0 == replace .11 header with .3 <default>
+ */
+ u8 dot11_hdr;
+
+ /*
+ * 1 == defragmentation is performed by host,
+ * 0 == performed by target <default>
+ */
+ u8 defrag_on_host;
+
+ /* for alignment */
+ u8 reserved[1];
+} __packed;
+
+/* AP mode events */
+
+/* WMI_PS_POLL_EVENT */
+struct wmi_pspoll_event {
+ __le16 aid;
+} __packed;
+
+struct wmi_per_sta_stat {
+ __le32 tx_bytes;
+ __le32 tx_pkts;
+ __le32 tx_error;
+ __le32 tx_discard;
+ __le32 rx_bytes;
+ __le32 rx_pkts;
+ __le32 rx_error;
+ __le32 rx_discard;
+ __le32 aid;
+} __packed;
+
+struct wmi_ap_mode_stat {
+ __le32 action;
+ struct wmi_per_sta_stat sta[AP_MAX_NUM_STA + 1];
+} __packed;
+
+/* End of AP mode definitions */
+
+struct wmi_remain_on_chnl_cmd {
+ __le32 freq;
+ __le32 duration;
+} __packed;
+
+struct wmi_send_action_cmd {
+ __le32 id;
+ __le32 freq;
+ __le32 wait;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_tx_status_event {
+ __le32 id;
+ u8 ack_status;
+} __packed;
+
+struct wmi_probe_req_report_cmd {
+ u8 enable;
+} __packed;
+
+struct wmi_disable_11b_rates_cmd {
+ u8 disable;
+} __packed;
+
+struct wmi_set_appie_extended_cmd {
+ u8 role_id;
+ u8 mgmt_frm_type;
+ u8 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+struct wmi_remain_on_chnl_event {
+ __le32 freq;
+ __le32 duration;
+} __packed;
+
+struct wmi_cancel_remain_on_chnl_event {
+ __le32 freq;
+ __le32 duration;
+ u8 status;
+} __packed;
+
+struct wmi_rx_action_event {
+ __le32 freq;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_p2p_capabilities_event {
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_p2p_rx_probe_req_event {
+ __le32 freq;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+#define P2P_FLAG_CAPABILITIES_REQ (0x00000001)
+#define P2P_FLAG_MACADDR_REQ (0x00000002)
+#define P2P_FLAG_HMODEL_REQ (0x00000002)
+
+struct wmi_get_p2p_info {
+ __le32 info_req_flags;
+} __packed;
+
+struct wmi_p2p_info_event {
+ __le32 info_req_flags;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_p2p_capabilities {
+ u8 go_power_save;
+} __packed;
+
+struct wmi_p2p_macaddr {
+ u8 mac_addr[ETH_ALEN];
+} __packed;
+
+struct wmi_p2p_hmodel {
+ u8 p2p_model;
+} __packed;
+
+struct wmi_p2p_probe_response_cmd {
+ __le32 freq;
+ u8 destination_addr[ETH_ALEN];
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/* Extended WMI (WMIX)
+ *
+ * Extended WMIX commands are encapsulated in a WMI message with
+ * cmd=WMI_EXTENSION_CMD.
+ *
+ * Extended WMI commands are those that are needed during wireless
+ * operation, but which are not really wireless commands. This allows,
+ * for instance, platform-specific commands. Extended WMI commands are
+ * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
+ * Extended WMI events are similarly embedded in a WMI event message with
+ * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
+ */
+struct wmix_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+enum wmix_command_id {
+ WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
+ WMIX_DSETDATA_REPLY_CMDID,
+ WMIX_GPIO_OUTPUT_SET_CMDID,
+ WMIX_GPIO_INPUT_GET_CMDID,
+ WMIX_GPIO_REGISTER_SET_CMDID,
+ WMIX_GPIO_REGISTER_GET_CMDID,
+ WMIX_GPIO_INTR_ACK_CMDID,
+ WMIX_HB_CHALLENGE_RESP_CMDID,
+ WMIX_DBGLOG_CFG_MODULE_CMDID,
+ WMIX_PROF_CFG_CMDID, /* 0x200a */
+ WMIX_PROF_ADDR_SET_CMDID,
+ WMIX_PROF_START_CMDID,
+ WMIX_PROF_STOP_CMDID,
+ WMIX_PROF_COUNT_GET_CMDID,
+};
+
+enum wmix_event_id {
+ WMIX_DSETOPENREQ_EVENTID = 0x3001,
+ WMIX_DSETCLOSE_EVENTID,
+ WMIX_DSETDATAREQ_EVENTID,
+ WMIX_GPIO_INTR_EVENTID,
+ WMIX_GPIO_DATA_EVENTID,
+ WMIX_GPIO_ACK_EVENTID,
+ WMIX_HB_CHALLENGE_RESP_EVENTID,
+ WMIX_DBGLOG_EVENTID,
+ WMIX_PROF_COUNT_EVENTID,
+};
+
+/*
+ * ------Error Detection support-------
+ */
+
+/*
+ * WMIX_HB_CHALLENGE_RESP_CMDID
+ * Heartbeat Challenge Response command
+ */
+struct wmix_hb_challenge_resp_cmd {
+ __le32 cookie;
+ __le32 source;
+} __packed;
+
+struct ath6kl_wmix_dbglog_cfg_module_cmd {
+ __le32 valid;
+ __le32 config;
+} __packed;
+
+/* End of Extended WMI (WMIX) */
+
+enum wmi_sync_flag {
+ NO_SYNC_WMIFLAG = 0,
+
+ /* transmit all queued data before cmd */
+ SYNC_BEFORE_WMIFLAG,
+
+ /* any new data waits until cmd execs */
+ SYNC_AFTER_WMIFLAG,
+
+ SYNC_BOTH_WMIFLAG,
+
+ /* end marker */
+ END_WMIFLAG
+};
+
+enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
+void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
+int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 msg_type, bool more_data,
+ enum wmi_data_hdr_data_type data_type,
+ u8 meta_ver, void *tx_meta_info);
+
+int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+ u32 layer2_priority, bool wmm_enabled,
+ u8 *ac);
+
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
+
+int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
+
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+ enum dot11_auth_mode dot11_auth_mode,
+ enum auth_mode auth_mode,
+ enum crypto_type pairwise_crypto,
+ u8 pairwise_crypto_len,
+ enum crypto_type group_crypto,
+ u8 group_crypto_len, int ssid_len, u8 *ssid,
+ u8 *bssid, u16 channel, u32 ctrl_flags);
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list);
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+ u16 fg_end_sec, u16 bg_sec,
+ u16 minact_chdw_msec, u16 maxact_chdw_msec,
+ u16 pas_chdw_msec, u8 short_scan_ratio,
+ u8 scan_ctrl_flag, u32 max_dfsch_act_time,
+ u16 maxact_scan_per_ssid);
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+ u8 ssid_len, u8 *ssid);
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+ u16 listen_beacons);
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+ u16 ps_poll_num, u16 dtim_policy,
+ u16 tx_wakup_policy, u16 num_tx_to_wakeup,
+ u16 ps_fail_event_policy);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+ struct wmi_create_pstream_cmd *pstream);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+
+int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+ u8 preamble_policy);
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
+int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
+
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+ enum crypto_type key_type,
+ u8 key_usage, u8 key_len,
+ u8 *key_rsc, u8 *key_material,
+ u8 key_op_ctrl, u8 *mac_addr,
+ enum wmi_sync_flag sync_flag);
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+ const u8 *pmkid, bool set);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
+
+s32 ath6kl_wmi_get_rate(s8 rate_index);
+
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+
+/* AP mode */
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p);
+
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason);
+
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+ bool rx_dot11_hdr, bool defrag_on_host);
+
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
+ u8 ie_len);
+
+/* P2P */
+int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
+
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur);
+
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
+ const u8 *data, u16 data_len);
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
+ const u8 *dst,
+ const u8 *data, u16 data_len);
+
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable);
+
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags);
+
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi);
+
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
+ u8 ie_len);
+
+void *ath6kl_wmi_init(struct ath6kl *devt);
+void ath6kl_wmi_shutdown(struct wmi *wmi);
+
+#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 05a6fade7b1..36ed3c46fec 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -21,6 +21,7 @@ ath9k_hw-y:= \
ar5008_phy.o \
ar9002_calib.o \
ar9003_calib.o \
+ ar9003_rtt.o \
calib.o \
eeprom.o \
eeprom_def.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 0b36fcf8a28..85a54cd2b08 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -133,7 +133,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_free_hw;
}
- ret = ath9k_init_device(id->driver_data, sc, 0x0, &ath_ahb_bus_ops);
+ ret = ath9k_init_device(id->driver_data, sc, &ath_ahb_bus_ops);
if (ret) {
dev_err(&pdev->dev, "failed to initialize device\n");
goto err_irq;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index bfb6481f01f..2776c3c1f50 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -273,7 +273,8 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
immunityLevel, aniState->noiseFloor,
aniState->rssiThrLow, aniState->rssiThrHigh);
- aniState->ofdmNoiseImmunityLevel = immunityLevel;
+ if (aniState->update_ani)
+ aniState->ofdmNoiseImmunityLevel = immunityLevel;
entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -346,7 +347,8 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
- aniState->cckNoiseImmunityLevel = immunityLevel;
+ if (aniState->update_ani)
+ aniState->cckNoiseImmunityLevel = immunityLevel;
entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -502,9 +504,6 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
ATH9K_ANI_CCK_WEAK_SIG_THR);
- ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) |
- ATH9K_RX_FILTER_PHYERR);
-
ath9k_ani_restart(ah);
return;
}
@@ -525,8 +524,6 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel);
- ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
- ~ATH9K_RX_FILTER_PHYERR);
ath9k_ani_restart(ah);
ENABLE_REGWRITE_BUFFER(ah);
@@ -593,6 +590,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
+ aniState->update_ani = false;
ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
}
@@ -609,6 +607,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
+ aniState->update_ani = true;
ath9k_hw_set_ofdm_nil(ah,
aniState->ofdmNoiseImmunityLevel);
ath9k_hw_set_cck_nil(ah,
@@ -643,7 +642,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
listenTime = ath_hw_get_listen_time(common);
if (listenTime <= 0) {
- ah->stats.ast_ani_lneg++;
+ ah->stats.ast_ani_lneg_or_lzero++;
ath9k_ani_restart(ah);
return false;
}
@@ -892,6 +891,8 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ani->ofdmWeakSigDetectOff =
!ATH9K_ANI_USE_OFDM_WEAK_SIG;
ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+ ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
+ ani->update_ani = false;
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index dbab5b9ce49..83029d6c7b2 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -122,6 +122,7 @@ struct ar5416AniState {
u8 firstepLevel;
u8 ofdmWeakSigDetectOff;
u8 cckWeakSigThreshold;
+ bool update_ani;
u32 listenTime;
int32_t rssiThrLow;
int32_t rssiThrHigh;
@@ -148,8 +149,7 @@ struct ar5416Stats {
u32 ast_ani_ofdmerrs;
u32 ast_ani_cckerrs;
u32 ast_ani_reset;
- u32 ast_ani_lzero;
- u32 ast_ani_lneg;
+ u32 ast_ani_lneg_or_lzero;
u32 avgbrssi;
struct ath9k_mib_stats ast_mibstats;
};
@@ -159,7 +159,5 @@ void ath9k_enable_mib_counters(struct ath_hw *ah);
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
-int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
- struct ath9k_channel *chan);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index 234617c948a..f81e7fc60a3 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -14,70 +14,71 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar5416Modes[][6] = {
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
- {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
- {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
- {0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0},
- {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de},
- {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
- {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
- {0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18},
- {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190},
- {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
- {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134},
- {0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b},
- {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
- {0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80},
- {0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80},
- {0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80},
- {0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120},
- {0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00},
- {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
- {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
- {0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c},
- {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
- {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
- {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
- {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788},
- {0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa},
- {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
- {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402},
- {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06},
- {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b},
- {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b},
- {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a},
- {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf},
- {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f},
- {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f},
- {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f},
- {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+static const u32 ar5416Modes[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0},
+ {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de},
+ {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18},
+ {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190},
+ {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134},
+ {0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b},
+ {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
+ {0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80},
+ {0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80},
+ {0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80},
+ {0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120},
+ {0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00},
+ {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
+ {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788},
+ {0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120},
+ {0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120},
+ {0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa},
+ {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
+ {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402},
+ {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06},
+ {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b},
+ {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b},
+ {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a},
+ {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf},
+ {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f},
+ {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f},
+ {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f},
+ {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
static const u32 ar5416Common[][2] = {
@@ -668,6 +669,6 @@ static const u32 ar5416Addac[][2] = {
{0x0000989c, 0x00000000},
{0x0000989c, 0x00000000},
{0x0000989c, 0x00000000},
- {0x000098cc, 0x00000000},
+ {0x000098c4, 0x00000000},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index fac2c6da6ca..f199e9e2514 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -704,8 +704,10 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
- if (!AR_SREV_5416_20_OR_LATER(ah) ||
- AR_SREV_9280_20_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
* Disable BB clock gating
@@ -761,10 +763,8 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
static int ar5008_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
int i, regWrites = 0;
- struct ieee80211_channel *channel = chan->chan;
u32 modesIndex, freqIndex;
switch (chan->chanmode) {
@@ -802,7 +802,8 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
/* Write ADDAC shifts */
REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
- ah->eep_ops->set_addac(ah, chan);
+ if (ah->eep_ops->set_addac)
+ ah->eep_ops->set_addac(ah, chan);
if (AR_SREV_5416_22_OR_LATER(ah)) {
REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
@@ -900,14 +901,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
ar5008_hw_set_channel_regs(ah, chan);
ar5008_hw_init_chain_masks(ah);
ath9k_olc_init(ah);
-
- /* Set TX power */
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit), false);
+ ath9k_hw_apply_txpower(ah, chan);
/* Write analog registers */
if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
@@ -1007,24 +1001,6 @@ static void ar5008_restore_chainmask(struct ath_hw *ah)
}
}
-static void ar5008_set_diversity(struct ath_hw *ah, bool value)
-{
- u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
- if (value)
- v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- else
- v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
-}
-
-static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- if (chan && IS_CHAN_5GHZ(chan))
- return 0x1450;
- return 0x1458;
-}
-
static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1654,7 +1630,6 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->rfbus_req = ar5008_hw_rfbus_req;
priv_ops->rfbus_done = ar5008_hw_rfbus_done;
priv_ops->restore_chainmask = ar5008_restore_chainmask;
- priv_ops->set_diversity = ar5008_set_diversity;
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->set_radar_params = ar5008_hw_set_radar_params;
@@ -1664,9 +1639,7 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
} else
priv_ops->ani_control = ar5008_hw_ani_control_old;
- if (AR_SREV_9100(ah))
- priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
- else if (AR_SREV_9160_10_OR_LATER(ah))
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
else
priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index 6d2e2f3303f..e8bdc75405f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -14,73 +14,74 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar5416Modes_9100[][6] = {
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
- {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
- {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
- {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0},
- {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2, 0x6c48b0e2},
- {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
- {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
- {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18},
- {0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
- {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
- {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
- {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
- {0x00009940, 0x00750604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204},
- {0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020},
- {0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e},
- {0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff},
- {0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
- {0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
- {0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
- {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
- {0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00},
- {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
- {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
- {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
- {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
- {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
- {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
- {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788},
- {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa},
- {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
- {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402},
- {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06},
- {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b},
- {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b},
- {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a},
- {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf},
- {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f},
- {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f},
- {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f},
- {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+static const u32 ar5416Modes_9100[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2},
+ {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20},
+ {0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
+ {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009940, 0x00750604, 0x00754604, 0xfff81204, 0xfff81204},
+ {0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020},
+ {0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e},
+ {0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff},
+ {0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
+ {0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
+ {0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
+ {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
+ {0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00},
+ {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
+ {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788},
+ {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa},
+ {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
+ {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402},
+ {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06},
+ {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b},
+ {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b},
+ {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a},
+ {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf},
+ {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f},
+ {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f},
+ {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f},
+ {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
static const u32 ar5416Common_9100[][2] = {
@@ -666,71 +667,72 @@ static const u32 ar5416Addac_9100[][2] = {
{0x000098cc, 0x00000000},
};
-static const u32 ar5416Modes_9160[][6] = {
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
- {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
- {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
- {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0},
- {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68},
- {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2, 0x6c48b0e2},
- {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
- {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
- {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18},
- {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
- {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
- {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
- {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
- {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
- {0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
- {0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
- {0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
- {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
- {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce},
- {0x000099bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00},
- {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
- {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
- {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
- {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
- {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
- {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
- {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788},
- {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120},
- {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa},
- {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
- {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402},
- {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06},
- {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b},
- {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b},
- {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a},
- {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf},
- {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f},
- {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f},
- {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f},
- {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+static const u32 ar5416Modes_9160[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2},
+ {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20},
+ {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
+ {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
+ {0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
+ {0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
+ {0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
+ {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
+ {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x000099bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00},
+ {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
+ {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788},
+ {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa},
+ {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
+ {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402},
+ {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06},
+ {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b},
+ {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b},
+ {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a},
+ {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf},
+ {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f},
+ {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f},
+ {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f},
+ {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
static const u32 ar5416Common_9160[][2] = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d394af8217..e0ab0657cc3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -869,6 +869,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
/* Do NF Calibration after DC offset and other calibrations */
+ ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
if (ah->caldata)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 44d9d8d5649..626d547d2f0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -30,7 +30,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 6);
+ ARRAY_SIZE(ar9271Modes_9271), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
ARRAY_SIZE(ar9271Common_9271), 2);
INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
@@ -41,21 +41,21 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
ar9271Modes_9271_1_0_only,
- ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
+ ARRAY_SIZE(ar9271Modes_9271_1_0_only), 5);
INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
- ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
+ ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 5);
INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
ar9271Modes_high_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
+ ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 5);
INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
ar9271Modes_normal_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
+ ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 5);
return;
}
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
+ ARRAY_SIZE(ar9287Modes_9287_1_1), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
ARRAY_SIZE(ar9287Common_9287_1_1), 2);
if (ah->config.pcie_clock_req)
@@ -71,7 +71,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
+ ARRAY_SIZE(ar9285Modes_9285_1_2), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
ARRAY_SIZE(ar9285Common_9285_1_2), 2);
@@ -87,7 +87,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
}
} else if (AR_SREV_9280_20_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_9280_2), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
ARRAY_SIZE(ar9280Common_9280_2), 2);
@@ -105,7 +105,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 6);
+ ARRAY_SIZE(ar5416Modes_9160), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
ARRAY_SIZE(ar5416Common_9160), 2);
INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
@@ -134,7 +134,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
}
} else if (AR_SREV_9100_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 6);
+ ARRAY_SIZE(ar5416Modes_9100), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
ARRAY_SIZE(ar5416Common_9100), 2);
INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
@@ -157,7 +157,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar5416Addac_9100), 2);
} else {
INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 6);
+ ARRAY_SIZE(ar5416Modes), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
ARRAY_SIZE(ar5416Common), 2);
INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
@@ -207,19 +207,19 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 5);
else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 5);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
}
}
@@ -234,15 +234,15 @@ static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 5);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
}
}
@@ -251,14 +251,14 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 5);
else if (AR_SREV_9280_20(ah))
ar9280_20_hw_init_rxgain_ini(ah);
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 5);
} else if (AR_SREV_9280_20(ah)) {
ar9280_20_hw_init_txgain_ini(ah);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
@@ -270,24 +270,24 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_XE2_0_high_power,
ARRAY_SIZE(
- ar9285Modes_XE2_0_high_power), 6);
+ ar9285Modes_XE2_0_high_power), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_high_power_tx_gain_9285_1_2,
ARRAY_SIZE(
- ar9285Modes_high_power_tx_gain_9285_1_2), 6);
+ ar9285Modes_high_power_tx_gain_9285_1_2), 5);
}
} else {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_XE2_0_normal_power,
ARRAY_SIZE(
- ar9285Modes_XE2_0_normal_power), 6);
+ ar9285Modes_XE2_0_normal_power), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_original_tx_gain_9285_1_2,
ARRAY_SIZE(
- ar9285Modes_original_tx_gain_9285_1_2), 6);
+ ar9285Modes_original_tx_gain_9285_1_2), 5);
}
}
}
@@ -303,17 +303,13 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
* register as the other analog registers. Hence the 9 writes.
*/
static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
- int restore,
- int power_off)
+ bool power_off)
{
u8 i;
u32 val;
- if (ah->is_pciexpress != true || ah->aspm_enabled != true)
- return;
-
/* Nothing to do on restore for 11N */
- if (!restore) {
+ if (!power_off /* !restore */) {
if (AR_SREV_9280_20_OR_LATER(ah)) {
/*
* AR9280 2.0 or later chips use SerDes values from the
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 7573257731b..863db321070 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -14,53 +14,54 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar9280Modes_9280_2[][6] = {
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880},
- {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
- {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
- {0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e},
- {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0},
- {0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2},
- {0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e},
- {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18},
- {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881},
- {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016},
- {0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
- {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010},
- {0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
- {0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
- {0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210},
- {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce},
- {0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c},
- {0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444},
- {0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019},
- {0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019},
- {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000},
- {0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000},
- {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
- {0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000},
- {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000},
+static const u32 ar9280Modes_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20},
+ {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b},
+ {0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010},
+ {0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
+ {0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
+ {0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210},
+ {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c},
+ {0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444},
+ {0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019},
+ {0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000},
+ {0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
+ {0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000},
+ {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000},
};
static const u32 ar9280Common_9280_2[][2] = {
@@ -424,471 +425,476 @@ static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
{0x00009918, 0x0000000b, 0x00000016},
};
-static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = {
- {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290},
- {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300},
- {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304},
- {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308},
- {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c},
- {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004},
- {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008},
- {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c},
- {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080},
- {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084},
- {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088},
- {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c},
- {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100},
- {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104},
- {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108},
- {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c},
- {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110},
- {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114},
- {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180},
- {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184},
- {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188},
- {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c},
- {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190},
- {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194},
- {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0},
- {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c},
- {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8},
- {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284},
- {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288},
- {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224},
- {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290},
- {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300},
- {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304},
- {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308},
- {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c},
- {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380},
- {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384},
- {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700},
- {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704},
- {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708},
- {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c},
- {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780},
- {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784},
- {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00},
- {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04},
- {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08},
- {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c},
- {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10},
- {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b80, 0x00008b80, 0x00008b80},
- {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b84, 0x00008b84, 0x00008b84},
- {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b88, 0x00008b88, 0x00008b88},
- {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b8c, 0x00008b8c, 0x00008b8c},
- {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b90, 0x00008b90, 0x00008b90},
- {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b94, 0x00008b94, 0x00008b94},
- {0x00009adc, 0x0000b390, 0x0000b390, 0x00008b98, 0x00008b98, 0x00008b98},
- {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008ba4, 0x00008ba4, 0x00008ba4},
- {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008ba8, 0x00008ba8, 0x00008ba8},
- {0x00009ae8, 0x0000b780, 0x0000b780, 0x00008bac, 0x00008bac, 0x00008bac},
- {0x00009aec, 0x0000b784, 0x0000b784, 0x00008bb0, 0x00008bb0, 0x00008bb0},
- {0x00009af0, 0x0000b788, 0x0000b788, 0x00008bb4, 0x00008bb4, 0x00008bb4},
- {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008ba1, 0x00008ba1, 0x00008ba1},
- {0x00009af8, 0x0000b790, 0x0000b790, 0x00008ba5, 0x00008ba5, 0x00008ba5},
- {0x00009afc, 0x0000b794, 0x0000b794, 0x00008ba9, 0x00008ba9, 0x00008ba9},
- {0x00009b00, 0x0000b798, 0x0000b798, 0x00008bad, 0x00008bad, 0x00008bad},
- {0x00009b04, 0x0000d784, 0x0000d784, 0x00008bb1, 0x00008bb1, 0x00008bb1},
- {0x00009b08, 0x0000d788, 0x0000d788, 0x00008bb5, 0x00008bb5, 0x00008bb5},
- {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008ba2, 0x00008ba2, 0x00008ba2},
- {0x00009b10, 0x0000d790, 0x0000d790, 0x00008ba6, 0x00008ba6, 0x00008ba6},
- {0x00009b14, 0x0000f780, 0x0000f780, 0x00008baa, 0x00008baa, 0x00008baa},
- {0x00009b18, 0x0000f784, 0x0000f784, 0x00008bae, 0x00008bae, 0x00008bae},
- {0x00009b1c, 0x0000f788, 0x0000f788, 0x00008bb2, 0x00008bb2, 0x00008bb2},
- {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008bb6, 0x00008bb6, 0x00008bb6},
- {0x00009b24, 0x0000f790, 0x0000f790, 0x00008ba3, 0x00008ba3, 0x00008ba3},
- {0x00009b28, 0x0000f794, 0x0000f794, 0x00008ba7, 0x00008ba7, 0x00008ba7},
- {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008bab, 0x00008bab, 0x00008bab},
- {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008baf, 0x00008baf, 0x00008baf},
- {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008bb3, 0x00008bb3, 0x00008bb3},
- {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008bb7, 0x00008bb7, 0x00008bb7},
- {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008bc3, 0x00008bc3, 0x00008bc3},
- {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008bc7, 0x00008bc7, 0x00008bc7},
- {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008bcb, 0x00008bcb, 0x00008bcb},
- {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008bcf, 0x00008bcf, 0x00008bcf},
- {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008bd3, 0x00008bd3, 0x00008bd3},
- {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008bd7, 0x00008bd7, 0x00008bd7},
- {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb},
- {0x00009848, 0x00001066, 0x00001066, 0x00001055, 0x00001055, 0x00001055},
- {0x0000a848, 0x00001066, 0x00001066, 0x00001055, 0x00001055, 0x00001055},
+static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290},
+ {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300},
+ {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304},
+ {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308},
+ {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c},
+ {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000},
+ {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004},
+ {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008},
+ {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c},
+ {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080},
+ {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084},
+ {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088},
+ {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c},
+ {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100},
+ {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104},
+ {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108},
+ {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c},
+ {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110},
+ {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114},
+ {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180},
+ {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184},
+ {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188},
+ {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c},
+ {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190},
+ {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194},
+ {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0},
+ {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c},
+ {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8},
+ {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284},
+ {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288},
+ {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224},
+ {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290},
+ {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300},
+ {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304},
+ {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308},
+ {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c},
+ {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380},
+ {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384},
+ {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700},
+ {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704},
+ {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708},
+ {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c},
+ {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780},
+ {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784},
+ {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00},
+ {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04},
+ {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08},
+ {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c},
+ {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10},
+ {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b80, 0x00008b80},
+ {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b84, 0x00008b84},
+ {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b88, 0x00008b88},
+ {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b8c, 0x00008b8c},
+ {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b90, 0x00008b90},
+ {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b94, 0x00008b94},
+ {0x00009adc, 0x0000b390, 0x0000b390, 0x00008b98, 0x00008b98},
+ {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008ba4, 0x00008ba4},
+ {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008ba8, 0x00008ba8},
+ {0x00009ae8, 0x0000b780, 0x0000b780, 0x00008bac, 0x00008bac},
+ {0x00009aec, 0x0000b784, 0x0000b784, 0x00008bb0, 0x00008bb0},
+ {0x00009af0, 0x0000b788, 0x0000b788, 0x00008bb4, 0x00008bb4},
+ {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008ba1, 0x00008ba1},
+ {0x00009af8, 0x0000b790, 0x0000b790, 0x00008ba5, 0x00008ba5},
+ {0x00009afc, 0x0000b794, 0x0000b794, 0x00008ba9, 0x00008ba9},
+ {0x00009b00, 0x0000b798, 0x0000b798, 0x00008bad, 0x00008bad},
+ {0x00009b04, 0x0000d784, 0x0000d784, 0x00008bb1, 0x00008bb1},
+ {0x00009b08, 0x0000d788, 0x0000d788, 0x00008bb5, 0x00008bb5},
+ {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008ba2, 0x00008ba2},
+ {0x00009b10, 0x0000d790, 0x0000d790, 0x00008ba6, 0x00008ba6},
+ {0x00009b14, 0x0000f780, 0x0000f780, 0x00008baa, 0x00008baa},
+ {0x00009b18, 0x0000f784, 0x0000f784, 0x00008bae, 0x00008bae},
+ {0x00009b1c, 0x0000f788, 0x0000f788, 0x00008bb2, 0x00008bb2},
+ {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008bb6, 0x00008bb6},
+ {0x00009b24, 0x0000f790, 0x0000f790, 0x00008ba3, 0x00008ba3},
+ {0x00009b28, 0x0000f794, 0x0000f794, 0x00008ba7, 0x00008ba7},
+ {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008bab, 0x00008bab},
+ {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008baf, 0x00008baf},
+ {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008bb3, 0x00008bb3},
+ {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008bb7, 0x00008bb7},
+ {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008bc3, 0x00008bc3},
+ {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008bc7, 0x00008bc7},
+ {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008bcb, 0x00008bcb},
+ {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008bcf, 0x00008bcf},
+ {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008bd3, 0x00008bd3},
+ {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008bd7, 0x00008bd7},
+ {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008bdb, 0x00008bdb},
+ {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008bdb, 0x00008bdb},
+ {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008bdb, 0x00008bdb},
+ {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008bdb, 0x00008bdb},
+ {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008bdb, 0x00008bdb},
+ {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008bdb, 0x00008bdb},
+ {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008bdb, 0x00008bdb},
+ {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008bdb, 0x00008bdb},
+ {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008bdb, 0x00008bdb},
+ {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008bdb, 0x00008bdb},
+ {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008bdb, 0x00008bdb},
+ {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008bdb, 0x00008bdb},
+ {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008bdb, 0x00008bdb},
+ {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008bdb, 0x00008bdb},
+ {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008bdb, 0x00008bdb},
+ {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008bdb, 0x00008bdb},
+ {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008bdb, 0x00008bdb},
+ {0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001055, 0x00001055},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001055, 0x00001055},
};
-static const u32 ar9280Modes_original_rxgain_9280_2[][6] = {
- {0x00009a00, 0x00008184, 0x00008184, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a04, 0x00008188, 0x00008188, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a08, 0x0000818c, 0x0000818c, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a0c, 0x00008190, 0x00008190, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a10, 0x00008194, 0x00008194, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004},
- {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008},
- {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c},
- {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080},
- {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084},
- {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088},
- {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c},
- {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100},
- {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104},
- {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108},
- {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c},
- {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110},
- {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114},
- {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180},
- {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184},
- {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188},
- {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c},
- {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190},
- {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194},
- {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0},
- {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c},
- {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8},
- {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284},
- {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288},
- {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224},
- {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290},
- {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300},
- {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304},
- {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308},
- {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c},
- {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380},
- {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384},
- {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700},
- {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704},
- {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708},
- {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c},
- {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780},
- {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784},
- {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00},
- {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04},
- {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08},
- {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c},
- {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80},
- {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84},
- {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88},
- {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c},
- {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90},
- {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80},
- {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84},
- {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88},
- {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c},
- {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90},
- {0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c},
- {0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310},
- {0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384},
- {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388},
- {0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324},
- {0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704},
- {0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4},
- {0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8},
- {0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710},
- {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714},
- {0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720},
- {0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724},
- {0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728},
- {0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c},
- {0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0},
- {0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4},
- {0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8},
- {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0},
- {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4},
- {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8},
- {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5},
- {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9},
- {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad},
- {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1},
- {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5},
- {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9},
- {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5},
- {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9},
- {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1},
- {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5},
- {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9},
- {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6},
- {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca},
- {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce},
- {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2},
- {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6},
- {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3},
- {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7},
- {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb},
- {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf},
- {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7},
- {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db},
- {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db},
- {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db},
- {0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db},
- {0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063},
- {0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063},
+static const u32 ar9280Modes_original_rxgain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00008184, 0x00008184, 0x00008000, 0x00008000},
+ {0x00009a04, 0x00008188, 0x00008188, 0x00008000, 0x00008000},
+ {0x00009a08, 0x0000818c, 0x0000818c, 0x00008000, 0x00008000},
+ {0x00009a0c, 0x00008190, 0x00008190, 0x00008000, 0x00008000},
+ {0x00009a10, 0x00008194, 0x00008194, 0x00008000, 0x00008000},
+ {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000},
+ {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004},
+ {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008},
+ {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c},
+ {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080},
+ {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084},
+ {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088},
+ {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c},
+ {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100},
+ {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104},
+ {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108},
+ {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c},
+ {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110},
+ {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114},
+ {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180},
+ {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184},
+ {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188},
+ {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c},
+ {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190},
+ {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194},
+ {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0},
+ {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c},
+ {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8},
+ {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284},
+ {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288},
+ {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224},
+ {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290},
+ {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300},
+ {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304},
+ {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308},
+ {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c},
+ {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380},
+ {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384},
+ {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700},
+ {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704},
+ {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708},
+ {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c},
+ {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780},
+ {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784},
+ {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00},
+ {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04},
+ {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08},
+ {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c},
+ {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80},
+ {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84},
+ {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88},
+ {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c},
+ {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90},
+ {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80},
+ {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84},
+ {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88},
+ {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c},
+ {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90},
+ {0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c},
+ {0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310},
+ {0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384},
+ {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388},
+ {0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324},
+ {0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704},
+ {0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4},
+ {0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8},
+ {0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710},
+ {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714},
+ {0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720},
+ {0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724},
+ {0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728},
+ {0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c},
+ {0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0},
+ {0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4},
+ {0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8},
+ {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0},
+ {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4},
+ {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8},
+ {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5},
+ {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9},
+ {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad},
+ {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1},
+ {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5},
+ {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9},
+ {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5},
+ {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9},
+ {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1},
+ {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5},
+ {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9},
+ {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6},
+ {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca},
+ {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce},
+ {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2},
+ {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6},
+ {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3},
+ {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7},
+ {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb},
+ {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf},
+ {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7},
+ {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db},
+ {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db},
+ {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db},
+ {0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063},
};
-static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = {
- {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290},
- {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300},
- {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304},
- {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308},
- {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c},
- {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000},
- {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004},
- {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008},
- {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c},
- {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080},
- {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084},
- {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088},
- {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c},
- {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100},
- {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104},
- {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108},
- {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c},
- {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110},
- {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114},
- {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180},
- {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184},
- {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188},
- {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c},
- {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190},
- {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194},
- {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0},
- {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c},
- {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8},
- {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284},
- {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288},
- {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224},
- {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290},
- {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300},
- {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304},
- {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308},
- {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c},
- {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380},
- {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384},
- {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700},
- {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704},
- {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708},
- {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c},
- {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780},
- {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784},
- {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00},
- {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04},
- {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08},
- {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c},
- {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80},
- {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84},
- {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88},
- {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c},
- {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90},
- {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80},
- {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84},
- {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88},
- {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c},
- {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90},
- {0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310},
- {0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314},
- {0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320},
- {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324},
- {0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328},
- {0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c},
- {0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330},
- {0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334},
- {0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321},
- {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325},
- {0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329},
- {0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d},
- {0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331},
- {0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335},
- {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322},
- {0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326},
- {0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a},
- {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e},
- {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332},
- {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336},
- {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323},
- {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327},
- {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b},
- {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f},
- {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333},
- {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337},
- {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343},
- {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347},
- {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b},
- {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f},
- {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353},
- {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357},
- {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b},
- {0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a},
- {0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a},
+static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290},
+ {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300},
+ {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304},
+ {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308},
+ {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c},
+ {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000},
+ {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004},
+ {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008},
+ {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c},
+ {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080},
+ {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084},
+ {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088},
+ {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c},
+ {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100},
+ {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104},
+ {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108},
+ {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c},
+ {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110},
+ {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114},
+ {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180},
+ {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184},
+ {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188},
+ {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c},
+ {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190},
+ {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194},
+ {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0},
+ {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c},
+ {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8},
+ {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284},
+ {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288},
+ {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224},
+ {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290},
+ {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300},
+ {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304},
+ {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308},
+ {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c},
+ {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380},
+ {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384},
+ {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700},
+ {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704},
+ {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708},
+ {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c},
+ {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780},
+ {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784},
+ {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00},
+ {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04},
+ {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08},
+ {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c},
+ {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80},
+ {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84},
+ {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88},
+ {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c},
+ {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90},
+ {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80},
+ {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84},
+ {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88},
+ {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c},
+ {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90},
+ {0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310},
+ {0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314},
+ {0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320},
+ {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324},
+ {0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328},
+ {0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c},
+ {0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330},
+ {0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334},
+ {0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321},
+ {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325},
+ {0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329},
+ {0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d},
+ {0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331},
+ {0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335},
+ {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322},
+ {0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326},
+ {0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a},
+ {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e},
+ {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332},
+ {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336},
+ {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323},
+ {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327},
+ {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b},
+ {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f},
+ {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333},
+ {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337},
+ {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343},
+ {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347},
+ {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b},
+ {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f},
+ {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353},
+ {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357},
+ {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b},
+ {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b},
+ {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b},
+ {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b},
+ {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b},
+ {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b},
+ {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b},
+ {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b},
+ {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b},
+ {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b},
+ {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b},
+ {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b},
+ {0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a},
+ {0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a},
};
-static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
- {0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652},
- {0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce},
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002},
- {0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008},
- {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010},
- {0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012},
- {0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014},
- {0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a},
- {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211},
- {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213},
- {0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411},
- {0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413},
- {0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811},
- {0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813},
- {0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14},
- {0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50},
- {0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c},
- {0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a},
- {0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92},
- {0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2},
- {0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5},
- {0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54},
- {0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5},
- {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
- {0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
- {0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
- {0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
- {0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
- {0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
- {0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
+static const u32 ar9280Modes_high_power_tx_gain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652},
+ {0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce},
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002},
+ {0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008},
+ {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010},
+ {0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012},
+ {0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014},
+ {0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a},
+ {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211},
+ {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213},
+ {0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411},
+ {0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413},
+ {0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811},
+ {0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813},
+ {0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14},
+ {0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50},
+ {0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c},
+ {0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a},
+ {0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92},
+ {0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2},
+ {0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5},
+ {0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54},
+ {0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5},
+ {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
+ {0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
+ {0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
+ {0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
+ {0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
+ {0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
+ {0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
};
-static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
- {0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652},
- {0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce},
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002},
- {0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009},
- {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b},
- {0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012},
- {0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048},
- {0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a},
- {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211},
- {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213},
- {0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b},
- {0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412},
- {0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414},
- {0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a},
- {0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649},
- {0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b},
- {0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49},
- {0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48},
- {0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a},
- {0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88},
- {0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a},
- {0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9},
- {0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42},
- {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
- {0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
- {0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
- {0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
- {0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
- {0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
- {0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
+static const u32 ar9280Modes_original_tx_gain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652},
+ {0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce},
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002},
+ {0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009},
+ {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b},
+ {0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012},
+ {0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048},
+ {0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a},
+ {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211},
+ {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213},
+ {0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b},
+ {0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412},
+ {0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414},
+ {0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a},
+ {0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649},
+ {0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b},
+ {0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49},
+ {0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48},
+ {0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a},
+ {0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88},
+ {0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a},
+ {0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9},
+ {0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42},
+ {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
+ {0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
+ {0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
+ {0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
+ {0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
+ {0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
+ {0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
};
static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
@@ -947,309 +953,310 @@ static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
{0x00004044, 0x00000000},
};
-static const u32 ar9285Modes_9285_1_2[][6] = {
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880},
- {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
- {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
- {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
- {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0},
- {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
- {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
- {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
- {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e},
- {0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18},
- {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881},
- {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
- {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d},
- {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010},
- {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
- {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
- {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
- {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
- {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
- {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
- {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
- {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
- {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
- {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
- {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
- {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
- {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
- {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
- {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
- {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
- {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
- {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
- {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
- {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
- {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
- {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
- {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
- {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
- {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
- {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
- {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
- {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
- {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
- {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
- {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
- {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
- {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
- {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
- {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
- {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
- {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
- {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
- {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
- {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
- {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
- {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
- {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
- {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
- {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
- {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
- {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
- {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
- {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
- {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
- {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
- {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
- {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
- {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
- {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
- {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
- {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
- {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
- {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
- {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
- {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
- {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
- {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
- {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
- {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
- {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
- {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
- {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
- {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
- {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
- {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
- {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
- {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
- {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
- {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
- {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
- {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
- {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
- {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
- {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
- {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
- {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
- {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
- {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
- {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
- {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
- {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
- {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
- {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
- {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
- {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
- {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
- {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
- {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
- {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
- {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
- {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
- {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
- {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
- {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
- {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
- {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
- {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
- {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
- {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
- {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
- {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
- {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
- {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
- {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
- {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
- {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
- {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
- {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
- {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
- {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
- {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
- {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
- {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
- {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
- {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
- {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
- {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
- {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
- {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
- {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
- {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
- {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
- {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
- {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
- {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
- {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
- {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
- {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
- {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
- {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
- {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
- {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
- {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
- {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
- {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
- {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
- {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
- {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
- {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
- {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
- {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
- {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
- {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
- {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
- {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
- {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
- {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
- {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
- {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
- {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
- {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
- {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
- {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
- {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
- {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
- {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
- {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
- {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
- {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
- {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
- {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
- {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
- {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
- {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
- {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
- {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
- {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
- {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
- {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
- {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
- {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
- {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
- {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
- {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
- {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
- {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
- {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
- {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
- {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
- {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
- {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000},
- {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
+static const u32 ar9285Modes_9285_1_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
+ {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e},
+ {0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20},
+ {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d},
+ {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
+ {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
+ {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
};
static const u32 ar9285Common_9285_1_2[][2] = {
@@ -1572,164 +1579,168 @@ static const u32 ar9285Common_9285_1_2[][2] = {
{0x00007870, 0x10142c00},
};
-static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000},
- {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000},
- {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000},
- {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000},
- {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000},
- {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000},
- {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000},
- {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000},
- {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000},
- {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000},
- {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
- {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
- {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
- {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
- {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
- {0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803},
- {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
- {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
- {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
- {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652},
- {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
- {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
- {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805},
+ {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
+ {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
+ {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
+ {0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803},
+ {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
+ {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
+ {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
+ {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652},
+ {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
+ {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+ {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
};
-static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000},
- {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000},
- {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000},
- {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000},
- {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000},
- {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000},
- {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000},
- {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000},
- {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000},
- {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000},
- {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
- {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
- {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
- {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
- {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
- {0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801},
- {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
- {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
- {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
- {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
- {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652},
- {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
- {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
- {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
- {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
- {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
- {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+static const u32 ar9285Modes_original_tx_gain_9285_1_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608},
+ {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718},
+ {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f},
+ {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
+ {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
+ {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
+ {0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801},
+ {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
+ {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
+ {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
+ {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
+ {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652},
+ {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
+ {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+ {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
};
-static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000},
- {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000},
- {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000},
- {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000},
- {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000},
- {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000},
- {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000},
- {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000},
- {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000},
- {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000},
- {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
- {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
- {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
- {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b},
- {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae},
- {0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441},
- {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
- {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
- {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
- {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
- {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652},
- {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
- {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
- {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
- {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
- {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
- {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+static const u32 ar9285Modes_XE2_0_normal_power[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608},
+ {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718},
+ {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f},
+ {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
+ {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b},
+ {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e},
+ {0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441},
+ {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
+ {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
+ {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
+ {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
+ {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652},
+ {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
+ {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+ {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
};
-static const u32 ar9285Modes_XE2_0_high_power[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000},
- {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000},
- {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000},
- {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000},
- {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000},
- {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000},
- {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000},
- {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000},
- {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000},
- {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000},
- {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000},
- {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
- {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
- {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
- {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b},
- {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e},
- {0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443},
- {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
- {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
- {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
- {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652},
- {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
- {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
- {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+static const u32 ar9285Modes_XE2_0_high_power[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805},
+ {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
+ {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b},
+ {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e},
+ {0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443},
+ {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
+ {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
+ {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
+ {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652},
+ {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
+ {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+ {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
};
static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
@@ -1760,50 +1771,51 @@ static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
{0x00004044, 0x00000000},
};
-static const u32 ar9287Modes_9287_1_1[][6] = {
- {0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
- {0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880},
- {0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001},
- {0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007},
- {0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e},
- {0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0},
- {0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2},
- {0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e},
- {0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18},
- {0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881},
- {0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016},
- {0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
- {0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010},
- {0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010},
- {0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010},
- {0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210},
- {0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce},
- {0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c},
- {0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444},
- {0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000},
- {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
- {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+static const u32 ar9287Modes_9287_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300},
+ {0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001},
+ {0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0},
+ {0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2},
+ {0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20},
+ {0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881},
+ {0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898},
+ {0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210},
+ {0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce},
+ {0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c},
+ {0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4},
+ {0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444},
+ {0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
+ {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
static const u32 ar9287Common_9287_1_1[][2] = {
@@ -2189,313 +2201,315 @@ static const u32 ar9287Common_japan_2484_cck_fir_coeff_9287_1_1[][2] = {
{0x0000a1fc, 0xca9228ee},
};
-static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002},
- {0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004},
- {0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a},
- {0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c},
- {0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b},
- {0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a},
- {0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a},
- {0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a},
- {0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a},
- {0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
- {0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a},
- {0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a},
- {0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c},
- {0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc},
- {0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4},
- {0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc},
- {0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede},
- {0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
- {0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e},
- {0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e},
- {0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062},
- {0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064},
- {0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4},
- {0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa},
- {0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac},
- {0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4},
- {0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4},
- {0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134},
- {0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174},
- {0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c},
- {0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e},
- {0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be},
- {0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
- {0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000},
+static const u32 ar9287Modes_tx_gain_9287_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b},
+ {0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a},
+ {0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c},
+ {0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc},
+ {0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4},
+ {0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc},
+ {0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede},
+ {0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e},
+ {0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e},
+ {0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e},
+ {0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062},
+ {0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064},
+ {0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4},
+ {0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa},
+ {0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac},
+ {0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4},
+ {0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4},
+ {0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134},
+ {0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174},
+ {0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c},
+ {0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e},
+ {0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be},
+ {0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000},
};
-static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
- {0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120},
- {0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124},
- {0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128},
- {0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c},
- {0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130},
- {0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194},
- {0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198},
- {0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c},
- {0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210},
- {0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284},
- {0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288},
- {0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c},
- {0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290},
- {0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294},
- {0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0},
- {0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4},
- {0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8},
- {0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac},
- {0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0},
- {0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4},
- {0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8},
- {0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4},
- {0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708},
- {0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c},
- {0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710},
- {0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04},
- {0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08},
- {0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c},
- {0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10},
- {0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14},
- {0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18},
- {0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c},
- {0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90},
- {0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94},
- {0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98},
- {0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4},
- {0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8},
- {0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04},
- {0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08},
- {0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c},
- {0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10},
- {0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14},
- {0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18},
- {0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c},
- {0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90},
- {0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18},
- {0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24},
- {0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28},
- {0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314},
- {0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318},
- {0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c},
- {0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390},
- {0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394},
- {0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398},
- {0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4},
- {0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8},
- {0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac},
- {0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0},
- {0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380},
- {0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384},
- {0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388},
- {0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710},
- {0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714},
- {0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718},
- {0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10},
- {0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14},
- {0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18},
- {0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c},
- {0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90},
- {0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94},
- {0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c},
- {0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90},
- {0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94},
- {0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0},
- {0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4},
- {0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8},
- {0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac},
- {0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0},
- {0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4},
- {0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1},
- {0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5},
- {0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9},
- {0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad},
- {0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1},
- {0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5},
- {0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9},
- {0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5},
- {0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9},
- {0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd},
- {0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1},
- {0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5},
- {0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2},
- {0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6},
- {0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca},
- {0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce},
- {0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2},
- {0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6},
- {0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda},
- {0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7},
- {0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb},
- {0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf},
- {0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3},
- {0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7},
- {0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120},
- {0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124},
- {0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128},
- {0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c},
- {0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130},
- {0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194},
- {0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198},
- {0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c},
- {0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210},
- {0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284},
- {0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288},
- {0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c},
- {0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290},
- {0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294},
- {0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0},
- {0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4},
- {0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8},
- {0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac},
- {0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0},
- {0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4},
- {0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8},
- {0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4},
- {0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708},
- {0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c},
- {0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710},
- {0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04},
- {0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08},
- {0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c},
- {0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10},
- {0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14},
- {0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18},
- {0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c},
- {0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90},
- {0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94},
- {0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98},
- {0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4},
- {0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8},
- {0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04},
- {0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08},
- {0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c},
- {0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10},
- {0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14},
- {0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18},
- {0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c},
- {0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90},
- {0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18},
- {0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24},
- {0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28},
- {0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314},
- {0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318},
- {0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c},
- {0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390},
- {0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394},
- {0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398},
- {0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4},
- {0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8},
- {0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac},
- {0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0},
- {0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380},
- {0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384},
- {0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388},
- {0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710},
- {0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714},
- {0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718},
- {0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10},
- {0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14},
- {0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18},
- {0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c},
- {0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90},
- {0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94},
- {0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c},
- {0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90},
- {0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94},
- {0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0},
- {0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4},
- {0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8},
- {0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac},
- {0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0},
- {0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4},
- {0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1},
- {0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5},
- {0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9},
- {0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad},
- {0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1},
- {0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5},
- {0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9},
- {0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5},
- {0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9},
- {0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd},
- {0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1},
- {0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5},
- {0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2},
- {0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6},
- {0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca},
- {0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce},
- {0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2},
- {0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6},
- {0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda},
- {0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7},
- {0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb},
- {0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf},
- {0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3},
- {0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7},
- {0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
- {0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067},
- {0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067},
+static const u32 ar9287Modes_rx_gain_9287_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120},
+ {0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c},
+ {0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130},
+ {0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c},
+ {0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210},
+ {0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284},
+ {0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290},
+ {0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294},
+ {0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8},
+ {0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac},
+ {0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4},
+ {0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8},
+ {0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4},
+ {0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c},
+ {0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04},
+ {0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c},
+ {0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10},
+ {0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14},
+ {0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90},
+ {0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94},
+ {0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4},
+ {0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8},
+ {0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04},
+ {0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18},
+ {0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24},
+ {0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c},
+ {0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380},
+ {0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384},
+ {0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388},
+ {0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710},
+ {0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714},
+ {0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718},
+ {0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10},
+ {0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14},
+ {0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c},
+ {0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90},
+ {0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94},
+ {0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90},
+ {0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94},
+ {0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0},
+ {0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8},
+ {0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac},
+ {0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0},
+ {0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1},
+ {0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5},
+ {0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9},
+ {0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1},
+ {0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5},
+ {0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9},
+ {0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9},
+ {0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd},
+ {0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1},
+ {0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2},
+ {0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6},
+ {0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca},
+ {0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2},
+ {0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6},
+ {0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda},
+ {0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb},
+ {0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf},
+ {0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3},
+ {0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067},
+ {0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067},
};
static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
@@ -2526,310 +2540,311 @@ static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
{0x00004044, 0x00000000},
};
-static const u32 ar9271Modes_9271[][6] = {
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180},
- {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880},
- {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303},
- {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001},
- {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
- {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
- {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0},
- {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
- {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059},
- {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
- {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e},
- {0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
- {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
- {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881},
- {0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310},
- {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0},
- {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016},
- {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d},
- {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010},
- {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
- {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
- {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
- {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
- {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
- {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
- {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
- {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
- {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
- {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
- {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
- {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
- {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
- {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
- {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
- {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
- {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
- {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
- {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
- {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
- {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
- {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
- {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
- {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
- {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
- {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
- {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
- {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
- {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
- {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
- {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
- {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
- {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
- {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
- {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
- {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
- {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
- {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
- {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
- {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
- {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
- {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
- {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
- {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
- {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
- {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
- {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
- {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
- {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
- {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
- {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
- {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
- {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
- {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
- {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
- {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
- {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
- {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
- {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
- {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
- {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
- {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
- {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
- {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
- {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
- {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
- {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
- {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
- {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
- {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
- {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
- {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
- {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
- {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
- {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
- {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
- {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
- {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
- {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
- {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
- {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
- {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
- {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
- {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
- {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
- {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
- {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
- {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
- {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
- {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
- {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
- {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
- {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
- {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
- {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000},
- {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000},
- {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000},
- {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000},
- {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000},
- {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000},
- {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000},
- {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000},
- {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000},
- {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000},
- {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000},
- {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000},
- {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000},
- {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000},
- {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000},
- {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000},
- {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000},
- {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000},
- {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000},
- {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000},
- {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000},
- {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000},
- {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000},
- {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000},
- {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000},
- {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000},
- {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000},
- {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000},
- {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000},
- {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000},
- {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000},
- {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000},
- {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000},
- {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000},
- {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000},
- {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000},
- {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000},
- {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000},
- {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000},
- {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000},
- {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000},
- {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000},
- {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000},
- {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000},
- {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000},
- {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000},
- {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000},
- {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000},
- {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000},
- {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000},
- {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000},
- {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000},
- {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000},
- {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000},
- {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000},
- {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000},
- {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000},
- {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000},
- {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000},
- {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000},
- {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000},
- {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000},
- {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000},
- {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000},
- {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000},
- {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000},
- {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000},
- {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000},
- {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000},
- {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000},
- {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000},
- {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000},
- {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000},
- {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000},
- {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000},
- {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000},
- {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000},
- {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000},
- {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000},
- {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000},
- {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000},
- {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000},
- {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000},
- {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000},
- {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000},
- {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000},
- {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000},
- {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000},
- {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000},
- {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000},
- {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
- {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
- {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000},
- {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
- {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
- {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000},
- {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
+static const u32 ar9271Modes_9271[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
+ {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e},
+ {0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d},
+ {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
+ {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
+ {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
};
static const u32 ar9271Common_9271[][2] = {
@@ -3175,91 +3190,95 @@ static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
{0x0000a1fc, 0xca9228ee},
};
-static const u32 ar9271Modes_9271_1_0_only[][6] = {
- {0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+static const u32 ar9271Modes_9271_1_0_only[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
};
-static const u32 ar9271Modes_9271_ANI_reg[][6] = {
- {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
- {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e},
- {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881},
- {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8},
- {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d},
- {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+static const u32 ar9271Modes_9271_ANI_reg[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
+ {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e},
+ {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8},
+ {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
};
-static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000},
- {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000},
- {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000},
- {0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000},
- {0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000},
- {0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000},
- {0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000},
- {0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000},
- {0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000},
- {0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000},
- {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000},
- {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
- {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
- {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029},
- {0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff},
- {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
- {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
- {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652},
- {0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
- {0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd},
- {0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
- {0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
- {0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
- {0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
+static const u32 ar9271Modes_normal_power_tx_gain_9271[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610},
+ {0x0000a314, 0x00000000, 0x00000000, 0x00024650, 0x00024650},
+ {0x0000a318, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x000316d2, 0x000316d2},
+ {0x0000a320, 0x00000000, 0x00000000, 0x00039758, 0x00039758},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759},
+ {0x0000a328, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e},
+ {0x0000a334, 0x000368de, 0x000368de, 0x0004979f, 0x0004979f},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0004d7df, 0x0004d7df},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029},
+ {0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff},
+ {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
+ {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
+ {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21c652, 0x0a21c652},
+ {0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
+ {0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd},
+ {0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
+ {0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
+ {0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
+ {0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
};
-static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
- {0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000},
- {0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000},
- {0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000},
- {0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000},
- {0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000},
- {0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000},
- {0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000},
- {0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000},
- {0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000},
- {0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000},
- {0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000},
- {0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000},
- {0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000},
- {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000},
- {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000},
- {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000},
- {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000},
- {0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b},
- {0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff},
- {0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6},
- {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
- {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652, 0x0a22a652},
- {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
- {0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063},
- {0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
- {0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
- {0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
- {0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
+static const u32 ar9271Modes_high_power_tx_gain_9271[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80},
+ {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b},
+ {0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff},
+ {0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba},
+ {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652},
+ {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063},
+ {0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
+ {0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
+ {0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
+ {0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 45b262fe2c2..f7d8e516a2a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -170,33 +170,104 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
return true;
}
-static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
- bool is_firstseg, bool is_lastseg,
- const void *ds0, dma_addr_t buf_addr,
- unsigned int qcu)
+static void
+ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
{
struct ar5416_desc *ads = AR5416DESC(ds);
+ u32 ctl1, ctl6;
- ads->ds_data = buf_addr;
-
- if (is_firstseg) {
- ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
- } else if (is_lastseg) {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = seglen;
- ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
- ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
- } else {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = seglen | AR_TxMore;
- ads->ds_ctl2 = 0;
- ads->ds_ctl3 = 0;
- }
ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+
+ ACCESS_ONCE(ads->ds_link) = i->link;
+ ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
+
+ ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
+ ctl6 = SM(i->keytype, AR_EncrType);
+
+ if (AR_SREV_9285(ah)) {
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+
+ if ((i->is_first || i->is_last) &&
+ i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
+ ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+ ACCESS_ONCE(ads->ds_ctl2) = 0;
+ ACCESS_ONCE(ads->ds_ctl3) = 0;
+ }
+
+ if (!i->is_first) {
+ ACCESS_ONCE(ads->ds_ctl0) = 0;
+ ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+ ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+ return;
+ }
+
+ ctl1 |= (i->keyix != ATH9K_TXKEYIX_INVALID ? SM(i->keyix, AR_DestIdx) : 0)
+ | SM(i->type, AR_FrameType)
+ | (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ switch (i->aggr) {
+ case AGGR_BUF_FIRST:
+ ctl6 |= SM(i->aggr_len, AR_AggrLen);
+ /* fall through */
+ case AGGR_BUF_MIDDLE:
+ ctl1 |= AR_IsAggr | AR_MoreAggr;
+ ctl6 |= SM(i->ndelim, AR_PadDelim);
+ break;
+ case AGGR_BUF_LAST:
+ ctl1 |= AR_IsAggr;
+ break;
+ case AGGR_BUF_NONE:
+ break;
+ }
+
+ ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower, AR_XmitPower)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+
+ ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+ ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+
+ if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
+ return;
+
+ ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+ ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+ ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+ | SM(i->rtscts_rate, AR_RTSCTSRate);
}
static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
@@ -271,145 +342,6 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
return 0;
}
-static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
- u32 pktLen, enum ath9k_pkt_type type,
- u32 txPower, u32 keyIx,
- enum ath9k_key_type keyType, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (txPower > 63)
- txPower = 63;
-
- ads->ds_ctl0 = (pktLen & AR_FrameLen)
- | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(txPower, AR_XmitPower)
- | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
- | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
- | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
-
- ads->ds_ctl1 =
- (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
- | SM(type, AR_FrameType)
- | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
- | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
- | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
-
- ads->ds_ctl6 = SM(keyType, AR_EncrType);
-
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
- ads->ds_ctl8 = 0;
- ads->ds_ctl9 = 0;
- ads->ds_ctl10 = 0;
- ads->ds_ctl11 = 0;
- }
-}
-
-static void ar9002_hw_set_clrdmask(struct ath_hw *ah, void *ds, bool val)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (val)
- ads->ds_ctl0 |= AR_ClrDestMask;
- else
- ads->ds_ctl0 &= ~AR_ClrDestMask;
-}
-
-static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
- void *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ar5416_desc *last_ads = AR5416DESC(lastds);
- u32 ds_ctl0;
-
- if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
- ds_ctl0 = ads->ds_ctl0;
-
- if (flags & ATH9K_TXDESC_RTSENA) {
- ds_ctl0 &= ~AR_CTSEnable;
- ds_ctl0 |= AR_RTSEnable;
- } else {
- ds_ctl0 &= ~AR_RTSEnable;
- ds_ctl0 |= AR_CTSEnable;
- }
-
- ads->ds_ctl0 = ds_ctl0;
- } else {
- ads->ds_ctl0 =
- (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
- }
-
- ads->ds_ctl2 = set11nTries(series, 0)
- | set11nTries(series, 1)
- | set11nTries(series, 2)
- | set11nTries(series, 3)
- | (durUpdateEn ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
- ads->ds_ctl3 = set11nRate(series, 0)
- | set11nRate(series, 1)
- | set11nRate(series, 2)
- | set11nRate(series, 3);
-
- ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
- | set11nPktDurRTSCTS(series, 1);
-
- ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
- | set11nPktDurRTSCTS(series, 3);
-
- ads->ds_ctl7 = set11nRateFlags(series, 0)
- | set11nRateFlags(series, 1)
- | set11nRateFlags(series, 2)
- | set11nRateFlags(series, 3)
- | SM(rtsctsRate, AR_RTSCTSRate);
- last_ads->ds_ctl2 = ads->ds_ctl2;
- last_ads->ds_ctl3 = ads->ds_ctl3;
-}
-
-static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
- u32 aggrLen)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
- ads->ds_ctl6 &= ~AR_AggrLen;
- ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
-}
-
-static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
- u32 numDelims)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- unsigned int ctl6;
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
-
- ctl6 = ads->ds_ctl6;
- ctl6 &= ~AR_PadDelim;
- ctl6 |= SM(numDelims, AR_PadDelim);
- ads->ds_ctl6 = ctl6;
-}
-
-static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= AR_IsAggr;
- ads->ds_ctl1 &= ~AR_MoreAggr;
- ads->ds_ctl6 &= ~AR_PadDelim;
-}
-
-static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
-}
-
void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags)
{
@@ -433,13 +365,6 @@ void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
ops->rx_enable = ar9002_hw_rx_enable;
ops->set_desc_link = ar9002_hw_set_desc_link;
ops->get_isr = ar9002_hw_get_isr;
- ops->fill_txdesc = ar9002_hw_fill_txdesc;
+ ops->set_txdesc = ar9002_set_txdesc;
ops->proc_txdesc = ar9002_hw_proc_txdesc;
- ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
- ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
- ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
- ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
- ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
- ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
- ops->set_clrdmask = ar9002_hw_set_clrdmask;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 3e69c631ebb..026f9de15d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -24,11 +24,11 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
{0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
{0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
{0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
- {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001610c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001650c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001690c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
@@ -190,7 +190,7 @@ static const u32 ar9300_2p2_radio_core[][2] = {
{0x00016288, 0x05a20408},
{0x0001628c, 0x00038c07},
{0x00016290, 0x00000004},
- {0x00016294, 0x458aa14f},
+ {0x00016294, 0x458a214f},
{0x00016380, 0x00000000},
{0x00016384, 0x00000000},
{0x00016388, 0x00800700},
@@ -636,7 +636,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
+ {0x0000a204, 0x000036c0, 0x000036c4, 0x000036c4, 0x000036c0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
{0x0000a22c, 0x01026a2f, 0x01026a2f, 0x01026a2f, 0x01026a2f},
{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
@@ -835,108 +835,108 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
- {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
- {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
- {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
- {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
- {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
- {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
- {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
- {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index f48051c5009..16851cb109a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -17,8 +17,9 @@
#include "hw.h"
#include "hw-ops.h"
#include "ar9003_phy.h"
+#include "ar9003_rtt.h"
-#define MAX_MEASUREMENT 8
+#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
#define MAX_PHS_DELTA 10
@@ -615,11 +616,10 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
{
int mp_max = -64, max_idx = 0;
int mp_min = 63, min_idx = 0;
- int mp_avg = 0, i, outlier_idx = 0;
+ int mp_avg = 0, i, outlier_idx = 0, mp_count = 0;
/* find min/max mismatch across all calibrated gains */
for (i = 0; i < nmeasurement; i++) {
- mp_avg += mp_coeff[i];
if (mp_coeff[i] > mp_max) {
mp_max = mp_coeff[i];
max_idx = i;
@@ -632,10 +632,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
/* find average (exclude max abs value) */
for (i = 0; i < nmeasurement; i++) {
if ((abs(mp_coeff[i]) < abs(mp_max)) ||
- (abs(mp_coeff[i]) < abs(mp_min)))
+ (abs(mp_coeff[i]) < abs(mp_min))) {
mp_avg += mp_coeff[i];
+ mp_count++;
+ }
}
- mp_avg /= (nmeasurement - 1);
+
+ /*
+ * finding mean magnitude/phase if possible, otherwise
+ * just use the last value as the mean
+ */
+ if (mp_count)
+ mp_avg /= mp_count;
+ else
+ mp_avg = mp_coeff[nmeasurement - 1];
/* detect outlier */
if (abs(mp_max - mp_min) > max_delta) {
@@ -643,16 +653,19 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
outlier_idx = max_idx;
else
outlier_idx = min_idx;
+
+ mp_coeff[outlier_idx] = mp_avg;
}
- mp_coeff[outlier_idx] = mp_avg;
}
static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
u8 num_chains,
- struct coeff *coeff)
+ struct coeff *coeff,
+ bool is_reusable)
{
int i, im, nmeasurement;
u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
@@ -702,7 +715,13 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
coeff->iqc_coeff[0]);
+
+ if (caldata)
+ caldata->tx_corr_coeff[im][i] =
+ coeff->iqc_coeff[0];
}
+ if (caldata)
+ caldata->num_measures[i] = nmeasurement;
}
REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
@@ -710,8 +729,10 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
- return;
+ if (caldata)
+ caldata->done_txiqcal_once = is_reusable;
+ return;
}
static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
@@ -738,7 +759,7 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
return true;
}
-static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -827,7 +848,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
coeff.phs_coeff[i][im] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, &coeff);
+ ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains,
+ &coeff, is_reusable);
return;
@@ -835,23 +857,129 @@ tx_iqcal_fail:
ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
return;
}
+
+static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
+ int i, im;
+
+ memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
+ for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
+ tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ if (!AR_SREV_9485(ah)) {
+ tx_corr_coeff[i * 2][1] =
+ tx_corr_coeff[(i * 2) + 1][1] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B1(i);
+
+ tx_corr_coeff[i * 2][2] =
+ tx_corr_coeff[(i * 2) + 1][2] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B2(i);
+ }
+ }
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+
+ for (im = 0; im < caldata->num_measures[i]; im++) {
+ if ((im % 2) == 0)
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE,
+ caldata->tx_corr_coeff[im][i]);
+ else
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ caldata->tx_corr_coeff[im][i]);
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+}
+
+static bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_rtt_hist *hist;
+ u32 *table;
+ int i;
+ bool restore;
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT) || !ah->caldata)
+ return false;
+
+ hist = &ah->caldata->rtt_hist;
+ ar9003_hw_rtt_enable(ah);
+ ar9003_hw_rtt_set_mask(ah, 0x10);
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ table = &hist->table[i][hist->num_readings][0];
+ ar9003_hw_rtt_load_hist(ah, i, table);
+ }
+ restore = ar9003_hw_rtt_force_restore(ah);
+ ar9003_hw_rtt_disable(ah);
+
+ return restore;
+}
+
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- int val;
- bool txiqcal_done = false;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txiqcal_done = false, txclcal_done = false;
+ bool is_reusable = true, status = true;
+ bool run_rtt_cal = false, run_agc_cal;
+ bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL;
+ int i, j;
+ u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+
+ if (rtt) {
+ if (!ar9003_hw_rtt_restore(ah, chan))
+ run_rtt_cal = true;
+
+ ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n",
+ run_rtt_cal ? "failed" : "succeed");
+ }
+ run_agc_cal = run_rtt_cal;
- val = REG_READ(ah, AR_ENT_OTP);
- ath_dbg(common, ATH_DBG_CALIBRATE, "ath9k: AR_ENT_OTP 0x%x\n", val);
+ if (run_rtt_cal) {
+ ar9003_hw_rtt_enable(ah);
+ ar9003_hw_rtt_set_mask(ah, 0x00);
+ ar9003_hw_rtt_clear_hist(ah);
+ }
- /* Configure rx/tx chains before running AGC/TxiQ cals */
- if (val & AR_ENT_OTP_CHAIN2_DISABLE)
- ar9003_hw_set_chain_masks(ah, 0x3, 0x3);
- else
- ar9003_hw_set_chain_masks(ah, pCap->rx_chainmask,
- pCap->tx_chainmask);
+ if (rtt && !run_rtt_cal) {
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_supp_cals &= agc_ctrl;
+ agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ }
+
+ if (ah->enabled_cals & TX_CL_CAL) {
+ if (caldata && caldata->done_txclcal_once)
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ else {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ run_agc_cal = true;
+ }
+ }
+
+ if (!(ah->enabled_cals & TX_IQ_CAL))
+ goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
@@ -862,34 +990,98 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
* For AR9485 or later chips, TxIQ cal runs as part of
* AGC calibration
*/
- if (AR_SREV_9485_OR_LATER(ah))
- txiqcal_done = true;
- else {
- txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
- udelay(5);
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
- }
-
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
+ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
+ if (caldata && !caldata->done_txiqcal_once)
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ else
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ txiqcal_done = run_agc_cal = true;
+ goto skip_tx_iqcal;
+ } else if (caldata && !caldata->done_txiqcal_once)
+ run_agc_cal = true;
+
+ txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+skip_tx_iqcal:
+ if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ }
+ if (rtt && !run_rtt_cal) {
+ agc_ctrl |= agc_supp_cals;
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ }
+
+ if (!status) {
+ if (run_rtt_cal)
+ ar9003_hw_rtt_disable(ah);
+
ath_dbg(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to complete in 1ms; noisy environment?\n");
+ "offset calibration failed to complete in 1ms;"
+ "noisy environment?\n");
return false;
}
if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah);
+ ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+ else if (caldata && caldata->done_txiqcal_once)
+ ar9003_hw_tx_iq_cal_reload(ah);
+
+#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
+ if (caldata && (ah->enabled_cals & TX_CL_CAL)) {
+ txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_CLC_SUCCESS);
+ if (caldata->done_txclcal_once) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]),
+ caldata->tx_clcal[i][j]);
+ }
+ } else if (is_reusable && txclcal_done) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ caldata->tx_clcal[i][j] =
+ REG_READ(ah,
+ CL_TAB_ENTRY(cl_idx[i]));
+ }
+ caldata->done_txclcal_once = true;
+ }
+ }
+#undef CL_TAB_ENTRY
+
+ if (run_rtt_cal && caldata) {
+ struct ath9k_rtt_hist *hist = &caldata->rtt_hist;
+ if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) {
+ u32 *table;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ table = &hist->table[i][hist->num_readings][0];
+ ar9003_hw_rtt_fill_hist(ah, i, table);
+ }
+ }
- /* Revert chainmasks to their original values before NF cal */
- ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+ ar9003_hw_rtt_disable(ah);
+ }
+ ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
/* Initialize list pointers */
@@ -916,8 +1108,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (ah->cal_list_curr)
ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
- if (ah->caldata)
- ah->caldata->CalValid = 0;
+ if (caldata)
+ caldata->CalValid = 0;
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 1b9400371ea..3b262ba6b17 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -22,25 +22,6 @@
#define COMP_HDR_LEN 4
#define COMP_CKSUM_LEN 2
-#define AR_CH0_TOP (0x00016288)
-#define AR_CH0_TOP_XPABIASLVL (0x300)
-#define AR_CH0_TOP_XPABIASLVL_S (8)
-
-#define AR_CH0_THERM (0x00016290)
-#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
-#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
-#define AR_CH0_THERM_XPASHORT2GND 0x4
-#define AR_CH0_THERM_XPASHORT2GND_S 2
-
-#define AR_SWITCH_TABLE_COM_ALL (0xffff)
-#define AR_SWITCH_TABLE_COM_ALL_S (0)
-
-#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
-#define AR_SWITCH_TABLE_COM2_ALL_S (0)
-
-#define AR_SWITCH_TABLE_ALL (0xfff)
-#define AR_SWITCH_TABLE_ALL_S (0)
-
#define LE16(x) __constant_cpu_to_le16(x)
#define LE32(x) __constant_cpu_to_le32(x)
@@ -158,7 +139,7 @@ static const struct ar9300_eeprom ar9300_default = {
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -360,7 +341,7 @@ static const struct ar9300_eeprom ar9300_default = {
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -735,7 +716,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -937,7 +918,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -1313,7 +1294,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.papdRateMaskHt20 = LE32(0x80c080),
.papdRateMaskHt40 = LE32(0x80c080),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -1515,7 +1496,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -1891,7 +1872,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -2093,7 +2074,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -2468,7 +2449,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.papdRateMaskHt20 = LE32(0x0c80C080),
.papdRateMaskHt40 = LE32(0x0080C080),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -2670,7 +2651,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -3014,8 +2995,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return get_unaligned_be16(eep->macAddr + 4);
case EEP_REG_0:
return le16_to_cpu(pBase->regDmn[0]);
- case EEP_REG_1:
- return le16_to_cpu(pBase->regDmn[1]);
case EEP_OP_CAP:
return pBase->deviceCap;
case EEP_OP_MODE:
@@ -3040,6 +3019,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
return eep->base_ext1.ant_div_control;
+ case EEP_ANTENNA_GAIN_5G:
+ return eep->modalHeader5G.antennaGain;
+ case EEP_ANTENNA_GAIN_2G:
+ return eep->modalHeader2G.antennaGain;
default:
return 0;
}
@@ -3318,7 +3301,7 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
word = kzalloc(2048, GFP_KERNEL);
if (!word)
- return -1;
+ return -ENOMEM;
memcpy(mptr, &ar9300_default, mdata_size);
@@ -3418,6 +3401,133 @@ static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
return true;
}
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
+ struct ar9300_modal_eep_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2]));
+ PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
+ PR_EEP("Ant. Common Control2", le32_to_cpu(modal_hdr->antCtrlCommon2));
+ PR_EEP("Ant. Gain", modal_hdr->antennaGain);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 xatten1DB", modal_hdr->xatten1DB[0]);
+ PR_EEP("Chain1 xatten1DB", modal_hdr->xatten1DB[1]);
+ PR_EEP("Chain2 xatten1DB", modal_hdr->xatten1DB[2]);
+ PR_EEP("Chain0 xatten1Margin", modal_hdr->xatten1Margin[0]);
+ PR_EEP("Chain1 xatten1Margin", modal_hdr->xatten1Margin[1]);
+ PR_EEP("Chain2 xatten1Margin", modal_hdr->xatten1Margin[2]);
+ PR_EEP("Temp Slope", modal_hdr->tempSlope);
+ PR_EEP("Volt Slope", modal_hdr->voltSlope);
+ PR_EEP("spur Channels0", modal_hdr->spurChans[0]);
+ PR_EEP("spur Channels1", modal_hdr->spurChans[1]);
+ PR_EEP("spur Channels2", modal_hdr->spurChans[2]);
+ PR_EEP("spur Channels3", modal_hdr->spurChans[3]);
+ PR_EEP("spur Channels4", modal_hdr->spurChans[4]);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
+ PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("txClip", modal_hdr->txClip);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("Chain0 ob", modal_hdr->ob[0]);
+ PR_EEP("Chain1 ob", modal_hdr->ob[1]);
+ PR_EEP("Chain2 ob", modal_hdr->ob[2]);
+
+ PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
+ PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
+ PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
+ PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
+ PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
+ PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
+ PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
+ PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
+ PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
+
+ return len;
+}
+
+static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase;
+
+ if (!dump_base_hdr) {
+ len += snprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len += ar9003_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader2G);
+ len += snprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
+ len += ar9003_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader5G);
+ goto out;
+ }
+
+ pBase = &eep->baseEepHeader;
+
+ PR_EEP("EEPROM Version", ah->eeprom.ar9300_eep.eepromVersion);
+ PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+ PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
+ PR_EEP("TX Mask", (pBase->txrxMask >> 4));
+ PR_EEP("RX Mask", (pBase->txrxMask & 0x0f));
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01));
+ PR_EEP("RF Silent", pBase->rfSilent);
+ PR_EEP("BT option", pBase->blueToothOptions);
+ PR_EEP("Device Cap", pBase->deviceCap);
+ PR_EEP("Device Type", pBase->deviceType);
+ PR_EEP("Power Table Offset", pBase->pwrTableOffset);
+ PR_EEP("Tuning Caps1", pBase->params_for_tuning_caps[0]);
+ PR_EEP("Tuning Caps2", pBase->params_for_tuning_caps[1]);
+ PR_EEP("Enable Tx Temp Comp", !!(pBase->featureEnable & BIT(0)));
+ PR_EEP("Enable Tx Volt Comp", !!(pBase->featureEnable & BIT(1)));
+ PR_EEP("Enable fast clock", !!(pBase->featureEnable & BIT(2)));
+ PR_EEP("Enable doubling", !!(pBase->featureEnable & BIT(3)));
+ PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
+ PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
+ PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+ PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
+ PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
+ PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
+ PR_EEP("WLAN LED Gpio", pBase->wlanLedGpio);
+ PR_EEP("Rx Band Select Gpio", pBase->rxBandSelectGpio);
+ PR_EEP("Tx Gain", pBase->txrxgain >> 4);
+ PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
+ PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
+
+ len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ ah->eeprom.ar9300_eep.macAddr);
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
/* XXX: review hardware docs */
static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
{
@@ -3446,6 +3556,8 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
+ else if (AR_SREV_9462(ah))
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
REG_RMW_FIELD(ah, AR_CH0_THERM,
@@ -3456,6 +3568,19 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
}
}
+static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is_2ghz)
+ val = eep->modalHeader2G.switchcomspdt;
+ else
+ val = eep->modalHeader5G.switchcomspdt;
+ return le32_to_cpu(val);
+}
+
+
static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
@@ -3510,7 +3635,36 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
- REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
+ if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462_10(ah)) {
+ value &= ~AR_SWITCH_TABLE_COM_SPDT;
+ value |= 0x00100000;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_AR9462_ALL, value);
+ } else
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_ALL, value);
+
+
+ /*
+ * AR9462 defines new switch table for BT/WLAN,
+ * here's new field name in XXX.ref for both 2G and 5G.
+ * Register: [GLB_CONTROL] GLB_CONTROL (@0x20044)
+ * 15:12 R/W SWITCH_TABLE_COM_SPDT_WLAN_RX
+ * SWITCH_TABLE_COM_SPDT_WLAN_RX
+ *
+ * 11:8 R/W SWITCH_TABLE_COM_SPDT_WLAN_TX
+ * SWITCH_TABLE_COM_SPDT_WLAN_TX
+ *
+ * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
+ * SWITCH_TABLE_COM_SPDT_WLAN_IDLE
+ */
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ value = ar9003_switch_com_spdt_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_SWITCH_TABLE_COM_SPDT_ALL, value);
+ }
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
@@ -3710,6 +3864,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
int internal_regulator =
ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+ u32 reg_val;
if (internal_regulator) {
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
@@ -3754,13 +3909,16 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
return;
+ } else if (AR_SREV_9462(ah)) {
+ reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ REG_WRITE(ah, AR_PHY_PMU1, reg_val);
} else {
/* Internal regulator is ON. Write swreg register. */
- int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
REG_WRITE(ah, AR_RTC_REG_CONTROL1,
REG_READ(ah, AR_RTC_REG_CONTROL1) &
(~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
- REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, reg_val);
/* Set REG_CONTROL1.SWREG_PROGRAM */
REG_WRITE(ah, AR_RTC_REG_CONTROL1,
REG_READ(ah,
@@ -3771,22 +3929,24 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
while (REG_READ_FIELD(ah, AR_PHY_PMU2,
- AR_PHY_PMU2_PGM))
+ AR_PHY_PMU2_PGM))
udelay(10);
REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
while (!REG_READ_FIELD(ah, AR_PHY_PMU1,
- AR_PHY_PMU1_PWD))
+ AR_PHY_PMU1_PWD))
udelay(10);
REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1);
while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
- AR_PHY_PMU2_PGM))
+ AR_PHY_PMU2_PGM))
udelay(10);
- } else
- REG_WRITE(ah, AR_RTC_SLEEP_CLK,
- (REG_READ(ah,
- AR_RTC_SLEEP_CLK) |
- AR_RTC_FORCE_SWREG_PRD));
+ } else if (AR_SREV_9462(ah))
+ REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
+ else {
+ reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD;
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val);
+ }
}
}
@@ -4061,7 +4221,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the power for duplicated frames - HT40 */
/* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */
- REG_WRITE(ah, 0xa3e0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(8),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
@@ -4366,6 +4526,12 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
tempSlope = eep->modalHeader5G.tempSlope;
REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+
+ if (AR_SREV_9462_20(ah))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_B1_ALPHA_THERM, tempSlope);
+
+
REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
temperature[0]);
@@ -4600,20 +4766,14 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 *pPwrArray, u16 cfgCtl,
- u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower,
+ u8 antenna_reduction,
u16 powerLimit)
{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
u16 twiceMaxEdgePower = MAX_RATE_POWER;
- static const u16 tpScaleReductionTable[5] = {
- 0, 3, 6, 9, MAX_RATE_POWER
- };
int i;
- int16_t twiceLargestAntenna;
- u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+ u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
};
@@ -4631,28 +4791,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
bool is2ghz = IS_CHAN_2GHZ(chan);
ath9k_hw_get_channel_centers(ah, chan, &centers);
-
- /* Compute TxPower reduction due to Antenna Gain */
- if (is2ghz)
- twiceLargestAntenna = pEepData->modalHeader2G.antennaGain;
- else
- twiceLargestAntenna = pEepData->modalHeader5G.antennaGain;
-
- twiceLargestAntenna = (int16_t)min((twiceAntennaReduction) -
- twiceLargestAntenna, 0);
-
- /*
- * scaledPower is the minimum of the user input power level
- * and the regulatory allowed power level
- */
- maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
-
- if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) {
- maxRegAllowedPower -=
- (tpScaleReductionTable[(regulatory->tp_scale)] * 2);
- }
-
- scaledPower = min(powerLimit, maxRegAllowedPower);
+ scaledPower = powerLimit - antenna_reduction;
/*
* Reduce scaled Power by number of chains active to get
@@ -4839,7 +4978,6 @@ static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx)
static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan, u16 cfgCtl,
u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower,
u8 powerLimit, bool test)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
@@ -4892,7 +5030,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
ar9003_hw_set_power_per_rate_table(ah, chan,
targetPowerValT2, cfgCtl,
twiceAntennaReduction,
- twiceMaxRegulatoryPower,
powerLimit);
if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
@@ -4922,25 +5059,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
"TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
}
- /*
- * This is the TX power we send back to driver core,
- * and it can use to pass to userspace to display our
- * currently configured TX power setting.
- *
- * Since power is rate dependent, use one of the indices
- * from the AR9300_Rates enum to select an entry from
- * targetPowerValT2[] to report. Currently returns the
- * power for HT40 MCS 0, HT20 MCS 0, or OFDM 6 Mbps
- * as CCK power is less interesting (?).
- */
- i = ALL_TARGET_LEGACY_6_24; /* legacy */
- if (IS_CHAN_HT40(chan))
- i = ALL_TARGET_HT40_0_8_16; /* ht40 */
- else if (IS_CHAN_HT20(chan))
- i = ALL_TARGET_HT20_0_8_16; /* ht20 */
-
- ah->txpower_limit = targetPowerValT2[i];
- regulatory->max_power_level = targetPowerValT2[i];
+ ah->txpower_limit = regulatory->max_power_level;
/* Write target power array to registers */
ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
@@ -5015,6 +5134,7 @@ const struct eeprom_ops eep_ar9300_ops = {
.check_eeprom = ath9k_hw_ar9300_check_eeprom,
.get_eeprom = ath9k_hw_ar9300_get_eeprom,
.fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
+ .dump_eeprom = ath9k_hw_ar9003_dump_eeprom,
.get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
.set_board_values = ath9k_hw_ar9300_set_board_values,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index ab21a491598..6335a867527 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -233,7 +233,8 @@ struct ar9300_modal_eep_header {
u8 thresh62;
__le32 papdRateMaskHt20;
__le32 papdRateMaskHt40;
- u8 futureModal[10];
+ __le16 switchcomspdt;
+ u8 futureModal[8];
} __packed;
struct ar9300_cal_data_per_freq_op_loop {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index ad2bb2bf4e8..fb937ba93e0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -21,6 +21,9 @@
#include "ar9340_initvals.h"
#include "ar9330_1p1_initvals.h"
#include "ar9330_1p2_initvals.h"
+#include "ar9580_1p0_initvals.h"
+#include "ar9462_1p0_initvals.h"
+#include "ar9462_2p0_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -31,6 +34,14 @@
*/
static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
{
+#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
+ ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
+
+#define AR9462_BB_CTX_COEFJ(x) \
+ ar9462_##x##_baseband_core_txfir_coeff_japan_2484
+
+#define AR9462_BBC_TXIFR_COEFFJ \
+ ar9462_2p0_baseband_core_txfir_coeff_japan_2484
if (AR_SREV_9330_11(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -253,6 +264,182 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9485_1_1_pcie_phy_clkreq_disable_L1,
ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
2);
+ } else if (AR_SREV_9462_10(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_1p0_mac_core,
+ ARRAY_SIZE(ar9462_1p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9462_1p0_mac_postamble,
+ ARRAY_SIZE(ar9462_1p0_mac_postamble),
+ 5);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9462_1p0_baseband_core,
+ ARRAY_SIZE(ar9462_1p0_baseband_core),
+ 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9462_1p0_baseband_postamble,
+ ARRAY_SIZE(ar9462_1p0_baseband_postamble), 5);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9462_1p0_radio_core,
+ ARRAY_SIZE(ar9462_1p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9462_1p0_radio_postamble,
+ ARRAY_SIZE(ar9462_1p0_radio_postamble), 5);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9462_1p0_soc_preamble,
+ ARRAY_SIZE(ar9462_1p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9462_1p0_soc_postamble,
+ ARRAY_SIZE(ar9462_1p0_soc_postamble), 5);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_rx_gain_table_1p0,
+ ARRAY_SIZE(ar9462_common_rx_gain_table_1p0), 2);
+
+ /* Awake -> Sleep Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_pcie_phy_clkreq_disable_L1_1p0,
+ ARRAY_SIZE(ar9462_pcie_phy_clkreq_disable_L1_1p0),
+ 2);
+
+ /* Sleep -> Awake Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_pcie_phy_clkreq_disable_L1_1p0,
+ ARRAY_SIZE(ar9462_pcie_phy_clkreq_disable_L1_1p0),
+ 2);
+
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9462_modes_fast_clock_1p0,
+ ARRAY_SIZE(ar9462_modes_fast_clock_1p0), 3);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ AR9462_BB_CTX_COEFJ(1p0),
+ ARRAY_SIZE(AR9462_BB_CTX_COEFJ(1p0)), 2);
+
+ } else if (AR_SREV_9462_20(ah)) {
+
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core,
+ ARRAY_SIZE(ar9462_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9462_2p0_mac_postamble,
+ ARRAY_SIZE(ar9462_2p0_mac_postamble), 5);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9462_2p0_baseband_core,
+ ARRAY_SIZE(ar9462_2p0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9462_2p0_baseband_postamble,
+ ARRAY_SIZE(ar9462_2p0_baseband_postamble), 5);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9462_2p0_radio_core,
+ ARRAY_SIZE(ar9462_2p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9462_2p0_radio_postamble,
+ ARRAY_SIZE(ar9462_2p0_radio_postamble), 5);
+ INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
+ ar9462_2p0_radio_postamble_sys2ant,
+ ARRAY_SIZE(ar9462_2p0_radio_postamble_sys2ant),
+ 5);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9462_2p0_soc_preamble,
+ ARRAY_SIZE(ar9462_2p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9462_2p0_soc_postamble,
+ ARRAY_SIZE(ar9462_2p0_soc_postamble), 5);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2);
+
+ INIT_INI_ARRAY(&ah->ini_BTCOEX_MAX_TXPWR,
+ ar9462_2p0_BTCOEX_MAX_TXPWR_table,
+ ARRAY_SIZE(ar9462_2p0_BTCOEX_MAX_TXPWR_table),
+ 2);
+
+ /* Awake -> Sleep Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ PCIE_PLL_ON_CREQ_DIS_L1_2P0,
+ ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
+ 2);
+ /* Sleep -> Awake Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ PCIE_PLL_ON_CREQ_DIS_L1_2P0,
+ ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9462_modes_fast_clock_2p0,
+ ARRAY_SIZE(ar9462_modes_fast_clock_2p0), 3);
+
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ AR9462_BB_CTX_COEFJ(2p0),
+ ARRAY_SIZE(AR9462_BB_CTX_COEFJ(2p0)), 2);
+
+ INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ,
+ ARRAY_SIZE(AR9462_BBC_TXIFR_COEFFJ), 2);
+
+ } else if (AR_SREV_9580(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9580_1p0_mac_core,
+ ARRAY_SIZE(ar9580_1p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9580_1p0_mac_postamble,
+ ARRAY_SIZE(ar9580_1p0_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9580_1p0_baseband_core,
+ ARRAY_SIZE(ar9580_1p0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9580_1p0_baseband_postamble,
+ ARRAY_SIZE(ar9580_1p0_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9580_1p0_radio_core,
+ ARRAY_SIZE(ar9580_1p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9580_1p0_radio_postamble,
+ ARRAY_SIZE(ar9580_1p0_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9580_1p0_soc_preamble,
+ ARRAY_SIZE(ar9580_1p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9580_1p0_soc_postamble,
+ ARRAY_SIZE(ar9580_1p0_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9580_1p0_rx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_rx_gain_table), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_low_ob_db_tx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
+ 5);
+
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9580_1p0_modes_fast_clock,
+ ARRAY_SIZE(ar9580_1p0_modes_fast_clock),
+ 3);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -323,178 +510,293 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
}
}
+static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
+ ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
+ 5);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_lowest_ob_db_tx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_lowest_ob_db_tx_gain_table),
+ 5);
+ else if (AR_SREV_9462_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_modes_low_ob_db_tx_gain_table_1p0,
+ ARRAY_SIZE(ar9462_modes_low_ob_db_tx_gain_table_1p0),
+ 5);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_modes_low_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_modes_low_ob_db_tx_gain_table_2p0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+ 5);
+}
+
+static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
+ ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
+ 5);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_high_ob_db_tx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table),
+ 5);
+ else if (AR_SREV_9462_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_modes_high_ob_db_tx_gain_table_1p0,
+ ARRAY_SIZE(ar9462_modes_high_ob_db_tx_gain_table_1p0),
+ 5);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_modes_high_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_modes_high_ob_db_tx_gain_table_2p0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
+ 5);
+}
+
+static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_low_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_low_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
+ ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
+ 5);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_low_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_low_ob_db_tx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
+ 5);
+}
+
+static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_power_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_power_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
+ ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
+ 5);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_power_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_high_power_tx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_high_power_tx_gain_table),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_high_power_tx_gain_table_2p2),
+ 5);
+}
+
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
{
switch (ar9003_hw_get_tx_gain_idx(ah)) {
case 0:
default:
- if (AR_SREV_9330_12(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
- 5);
- else if (AR_SREV_9330_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
- 5);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
- else if (AR_SREV_9485_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485_modes_lowest_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
- 5);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
+ ar9003_tx_gain_table_mode0(ah);
break;
case 1:
- if (AR_SREV_9330_12(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p2),
- 5);
- else if (AR_SREV_9330_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p1),
- 5);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
- else if (AR_SREV_9485_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_high_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
- 5);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
- 5);
+ ar9003_tx_gain_table_mode1(ah);
break;
case 2:
- if (AR_SREV_9330_12(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_low_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p2),
- 5);
- else if (AR_SREV_9330_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_low_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p1),
- 5);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
- else if (AR_SREV_9485_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_low_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
- 5);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_low_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
- 5);
+ ar9003_tx_gain_table_mode2(ah);
break;
case 3:
- if (AR_SREV_9330_12(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_power_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p2),
- 5);
- else if (AR_SREV_9330_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_power_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p1),
- 5);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
- else if (AR_SREV_9485_11(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_high_power_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
- 5);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_power_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_power_tx_gain_table_2p2),
- 5);
+ ar9003_tx_gain_table_mode3(ah);
break;
}
}
+static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p2,
+ ARRAY_SIZE(ar9331_common_rx_gain_1p2),
+ 2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p1,
+ ARRAY_SIZE(ar9331_common_rx_gain_1p1),
+ 2);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9340Common_rx_gain_table_1p0,
+ ARRAY_SIZE(ar9340Common_rx_gain_table_1p0),
+ 2);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_1,
+ ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9580_1p0_rx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_rx_gain_table),
+ 2);
+ else if (AR_SREV_9462_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_rx_gain_table_1p0,
+ ARRAY_SIZE(ar9462_common_rx_gain_table_1p0),
+ 2);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_common_rx_gain_table_2p0),
+ 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
+ 2);
+}
+
+static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_wo_xlna_rx_gain_1p2,
+ ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p2),
+ 2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_wo_xlna_rx_gain_1p1,
+ ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p1),
+ 2);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9340Common_wo_xlna_rx_gain_table_1p0,
+ ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
+ 2);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_1,
+ ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9462_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_wo_xlna_rx_gain_table_1p0,
+ ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_1p0),
+ 2);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_wo_xlna_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_2p0),
+ 2);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9580_1p0_wo_xlna_rx_gain_table,
+ ARRAY_SIZE(ar9580_1p0_wo_xlna_rx_gain_table),
+ 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
+ 2);
+}
+
+static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
+{
+ if (AR_SREV_9462_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_mixed_rx_gain_table_1p0,
+ ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_1p0), 2);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_common_mixed_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_2p0), 2);
+}
+
static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
{
switch (ar9003_hw_get_rx_gain_idx(ah)) {
case 0:
default:
- if (AR_SREV_9330_12(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_rx_gain_1p2),
- 2);
- else if (AR_SREV_9330_11(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_rx_gain_1p1),
- 2);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_rx_gain_table_1p0),
- 2);
- else if (AR_SREV_9485_11(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
- 2);
- else
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
- 2);
+ ar9003_rx_gain_table_mode0(ah);
break;
case 1:
- if (AR_SREV_9330_12(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_wo_xlna_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p2),
- 2);
- else if (AR_SREV_9330_11(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_wo_xlna_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p1),
- 2);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_wo_xlna_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
- 2);
- else if (AR_SREV_9485_11(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
- 2);
- else
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_wo_xlna_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
- 2);
+ ar9003_rx_gain_table_mode1(ah);
+ break;
+ case 2:
+ ar9003_rx_gain_table_mode2(ah);
break;
}
}
@@ -516,14 +818,10 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
* register as the other analog registers. Hence the 9 writes.
*/
static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
- int restore,
- int power_off)
+ bool power_off)
{
- if (ah->is_pciexpress != true || ah->aspm_enabled != true)
- return;
-
/* Nothing to do on restore for 11N */
- if (!restore) {
+ if (!power_off /* !restore */) {
/* set bit 19 to allow forcing of pcie core into L1 state */
REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 8ff0b88a29b..b363cc06cfd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -21,6 +21,132 @@ static void ar9003_hw_rx_enable(struct ath_hw *hw)
REG_WRITE(hw, AR_CR, 0);
}
+static void
+ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+{
+ struct ar9003_txc *ads = ds;
+ int checksum = 0;
+ u32 val, ctl12, ctl17;
+
+ val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
+ (1 << AR_TxRxDesc_S) |
+ (1 << AR_CtrlStat_S) |
+ (i->qcu << AR_TxQcuNum_S) | 0x17;
+
+ checksum += val;
+ ACCESS_ONCE(ads->info) = val;
+
+ checksum += i->link;
+ ACCESS_ONCE(ads->link) = i->link;
+
+ checksum += i->buf_addr[0];
+ ACCESS_ONCE(ads->data0) = i->buf_addr[0];
+ checksum += i->buf_addr[1];
+ ACCESS_ONCE(ads->data1) = i->buf_addr[1];
+ checksum += i->buf_addr[2];
+ ACCESS_ONCE(ads->data2) = i->buf_addr[2];
+ checksum += i->buf_addr[3];
+ ACCESS_ONCE(ads->data3) = i->buf_addr[3];
+
+ checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl3) = val;
+ checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl5) = val;
+ checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl7) = val;
+ checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl9) = val;
+
+ checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
+ ACCESS_ONCE(ads->ctl10) = checksum;
+
+ if (i->is_first || i->is_last) {
+ ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+ ACCESS_ONCE(ads->ctl13) = 0;
+ ACCESS_ONCE(ads->ctl14) = 0;
+ }
+
+ ads->ctl20 = 0;
+ ads->ctl21 = 0;
+ ads->ctl22 = 0;
+
+ ctl17 = SM(i->keytype, AR_EncrType);
+ if (!i->is_first) {
+ ACCESS_ONCE(ads->ctl11) = 0;
+ ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+ ACCESS_ONCE(ads->ctl15) = 0;
+ ACCESS_ONCE(ads->ctl16) = 0;
+ ACCESS_ONCE(ads->ctl17) = ctl17;
+ ACCESS_ONCE(ads->ctl18) = 0;
+ ACCESS_ONCE(ads->ctl19) = 0;
+ return;
+ }
+
+ ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower, AR_XmitPower)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
+ | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+
+ ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ?
+ SM(i->keyix, AR_DestIdx) : 0)
+ | SM(i->type, AR_FrameType)
+ | (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ctl17 |= (i->flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
+ switch (i->aggr) {
+ case AGGR_BUF_FIRST:
+ ctl17 |= SM(i->aggr_len, AR_AggrLen);
+ /* fall through */
+ case AGGR_BUF_MIDDLE:
+ ctl12 |= AR_IsAggr | AR_MoreAggr;
+ ctl17 |= SM(i->ndelim, AR_PadDelim);
+ break;
+ case AGGR_BUF_LAST:
+ ctl12 |= AR_IsAggr;
+ break;
+ case AGGR_BUF_NONE:
+ break;
+ }
+
+ val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
+ ctl12 |= SM(val, AR_PAPRDChainMask);
+
+ ACCESS_ONCE(ads->ctl12) = ctl12;
+ ACCESS_ONCE(ads->ctl17) = ctl17;
+
+ ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+ ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+ ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+ | SM(i->rtscts_rate, AR_RTSCTSRate);
+
+ ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+}
+
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
{
int checksum;
@@ -185,47 +311,6 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
return true;
}
-static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
- bool is_firstseg, bool is_lastseg,
- const void *ds0, dma_addr_t buf_addr,
- unsigned int qcu)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
- unsigned int descid = 0;
-
- ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
- (1 << AR_TxRxDesc_S) |
- (1 << AR_CtrlStat_S) |
- (qcu << AR_TxQcuNum_S) | 0x17;
-
- ads->data0 = buf_addr;
- ads->data1 = 0;
- ads->data2 = 0;
- ads->data3 = 0;
-
- ads->ctl3 = (seglen << AR_BufLen_S);
- ads->ctl3 &= AR_BufLen;
-
- /* Fill in pointer checksum and descriptor id */
- ads->ctl10 = ar9003_calc_ptr_chksum(ads);
- ads->ctl10 |= (descid << AR_TxDescId_S);
-
- if (is_firstseg) {
- ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
- } else if (is_lastseg) {
- ads->ctl11 = 0;
- ads->ctl12 = 0;
- ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
- ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
- } else {
- /* XXX Intermediate descriptor in a multi-descriptor frame.*/
- ads->ctl11 = 0;
- ads->ctl12 = AR_TxMore;
- ads->ctl13 = 0;
- ads->ctl14 = 0;
- }
-}
-
static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts)
{
@@ -253,8 +338,6 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
return -EIO;
}
- if (status & AR_TxOpExceeded)
- ts->ts_status |= ATH9K_TXERR_XTXOP;
ts->ts_rateindex = MS(status, AR_FinalTxIdx);
ts->ts_seqnum = MS(status, AR_SeqNum);
ts->tid = MS(status, AR_TxTid);
@@ -264,6 +347,8 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ts_status = 0;
ts->ts_flags = 0;
+ if (status & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
status = ACCESS_ONCE(ads->status2);
ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
@@ -310,185 +395,6 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
return 0;
}
-static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
- u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
-
- if (txpower > ah->txpower_limit)
- txpower = ah->txpower_limit;
-
- if (txpower > 63)
- txpower = 63;
-
- ads->ctl11 = (pktlen & AR_FrameLen)
- | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(txpower, AR_XmitPower)
- | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
- | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
- | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
-
- ads->ctl12 =
- (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
- | SM(type, AR_FrameType)
- | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
- | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
- | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
-
- ads->ctl17 = SM(keyType, AR_EncrType) |
- (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
- ads->ctl18 = 0;
- ads->ctl19 = AR_Not_Sounding;
-
- ads->ctl20 = 0;
- ads->ctl21 = 0;
- ads->ctl22 = 0;
-}
-
-static void ar9003_hw_set_clrdmask(struct ath_hw *ah, void *ds, bool val)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
-
- if (val)
- ads->ctl11 |= AR_ClrDestMask;
- else
- ads->ctl11 &= ~AR_ClrDestMask;
-}
-
-static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
- void *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
- struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
- u_int32_t ctl11;
-
- if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
- ctl11 = ads->ctl11;
-
- if (flags & ATH9K_TXDESC_RTSENA) {
- ctl11 &= ~AR_CTSEnable;
- ctl11 |= AR_RTSEnable;
- } else {
- ctl11 &= ~AR_RTSEnable;
- ctl11 |= AR_CTSEnable;
- }
-
- ads->ctl11 = ctl11;
- } else {
- ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
- }
-
- ads->ctl13 = set11nTries(series, 0)
- | set11nTries(series, 1)
- | set11nTries(series, 2)
- | set11nTries(series, 3)
- | (durUpdateEn ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
- ads->ctl14 = set11nRate(series, 0)
- | set11nRate(series, 1)
- | set11nRate(series, 2)
- | set11nRate(series, 3);
-
- ads->ctl15 = set11nPktDurRTSCTS(series, 0)
- | set11nPktDurRTSCTS(series, 1);
-
- ads->ctl16 = set11nPktDurRTSCTS(series, 2)
- | set11nPktDurRTSCTS(series, 3);
-
- ads->ctl18 = set11nRateFlags(series, 0)
- | set11nRateFlags(series, 1)
- | set11nRateFlags(series, 2)
- | set11nRateFlags(series, 3)
- | SM(rtsctsRate, AR_RTSCTSRate);
- ads->ctl19 = AR_Not_Sounding;
-
- last_ads->ctl13 = ads->ctl13;
- last_ads->ctl14 = ads->ctl14;
-}
-
-static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
- u32 aggrLen)
-{
-#define FIRST_DESC_NDELIMS 60
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
-
- ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
-
- if (ah->ent_mode & AR_ENT_OTP_MPSD) {
- u32 ctl17, ndelim;
- /*
- * Add delimiter when using RTS/CTS with aggregation
- * and non enterprise AR9003 card
- */
- ctl17 = ads->ctl17;
- ndelim = MS(ctl17, AR_PadDelim);
-
- if (ndelim < FIRST_DESC_NDELIMS) {
- aggrLen += (FIRST_DESC_NDELIMS - ndelim) * 4;
- ndelim = FIRST_DESC_NDELIMS;
- }
-
- ctl17 &= ~AR_AggrLen;
- ctl17 |= SM(aggrLen, AR_AggrLen);
-
- ctl17 &= ~AR_PadDelim;
- ctl17 |= SM(ndelim, AR_PadDelim);
-
- ads->ctl17 = ctl17;
- } else {
- ads->ctl17 &= ~AR_AggrLen;
- ads->ctl17 |= SM(aggrLen, AR_AggrLen);
- }
-}
-
-static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
- u32 numDelims)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
- unsigned int ctl17;
-
- ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
-
- /*
- * We use a stack variable to manipulate ctl6 to reduce uncached
- * read modify, modfiy, write.
- */
- ctl17 = ads->ctl17;
- ctl17 &= ~AR_PadDelim;
- ctl17 |= SM(numDelims, AR_PadDelim);
- ads->ctl17 = ctl17;
-}
-
-static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
-
- ads->ctl12 |= AR_IsAggr;
- ads->ctl12 &= ~AR_MoreAggr;
- ads->ctl17 &= ~AR_PadDelim;
-}
-
-static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
-{
- struct ar9003_txc *ads = (struct ar9003_txc *) ds;
-
- ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
-}
-
-void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains)
-{
- struct ar9003_txc *ads = ds;
-
- ads->ctl12 |= SM(chains, AR_PAPRDChainMask);
-}
-EXPORT_SYMBOL(ar9003_hw_set_paprd_txdesc);
-
void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
{
struct ath_hw_ops *ops = ath9k_hw_ops(hw);
@@ -496,15 +402,8 @@ void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
ops->rx_enable = ar9003_hw_rx_enable;
ops->set_desc_link = ar9003_hw_set_desc_link;
ops->get_isr = ar9003_hw_get_isr;
- ops->fill_txdesc = ar9003_hw_fill_txdesc;
+ ops->set_txdesc = ar9003_set_txdesc;
ops->proc_txdesc = ar9003_hw_proc_txdesc;
- ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
- ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
- ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
- ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
- ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
- ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
- ops->set_clrdmask = ar9003_hw_set_clrdmask;
}
void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
@@ -531,17 +430,18 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
/* TODO: byte swap on big endian for ar9300_10 */
- if ((rxsp->status11 & AR_RxDone) == 0)
- return -EINPROGRESS;
+ if (!rxs) {
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
- if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
- return -EINVAL;
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
- if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
- return -EINPROGRESS;
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
- if (!rxs)
return 0;
+ }
rxs->rs_status = 0;
rxs->rs_flags = 0;
@@ -625,8 +525,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
- else if (rxsp->status11 & AR_KeyMiss)
- rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ if (rxsp->status11 & AR_KeyMiss)
+ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index f80d1d63398..0c462c904cb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -19,7 +19,6 @@
void ar9003_paprd_enable(struct ath_hw *ah, bool val)
{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
@@ -54,13 +53,7 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
if (val) {
ah->paprd_table_write_done = true;
-
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- chan->chan->max_antenna_gain * 2,
- chan->chan->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit), false);
+ ath9k_hw_apply_txpower(ah, chan);
}
REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
@@ -113,7 +106,7 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah)
if (delta > scale)
return -1;
- switch (get_streams(common->tx_chainmask)) {
+ switch (get_streams(ah->txchainmask)) {
case 1:
delta = 6;
break;
@@ -126,7 +119,7 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah)
default:
delta = 0;
ath_dbg(common, ATH_DBG_CALIBRATE,
- "Invalid tx-chainmask: %u\n", common->tx_chainmask);
+ "Invalid tx-chainmask: %u\n", ah->txchainmask);
}
power += delta;
@@ -147,7 +140,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_CTRL1_B2
};
int training_power;
- int i;
+ int i, val;
if (IS_CHAN_2GHZ(ah->curchan))
training_power = ar9003_get_training_power_2g(ah);
@@ -207,8 +200,9 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
+ val = AR_SREV_9462(ah) ? 0x91 : 147;
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
- AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, 147);
+ AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
@@ -217,7 +211,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9462(ah))
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
-3);
@@ -225,9 +219,10 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
-6);
+ val = AR_SREV_9462(ah) ? -10 : -15;
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
- -15);
+ val);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
@@ -757,6 +752,7 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
training_power);
if (ah->caps.tx_chainmask & BIT(2))
+ /* val AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL correct? */
REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
training_power);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index fcafec0605f..fe96997921d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -370,7 +370,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
else
spur_subchannel_sd = 0;
- spur_freq_sd = ((freq_offset + 10) << 9) / 11;
+ spur_freq_sd = (freq_offset << 9) / 11;
} else {
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
@@ -379,7 +379,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
else
spur_subchannel_sd = 1;
- spur_freq_sd = ((freq_offset - 10) << 9) / 11;
+ spur_freq_sd = (freq_offset << 9) / 11;
}
@@ -482,7 +482,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
(REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
/* Enable 11n HT, 20 MHz */
- phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 |
AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
/* Configure baseband for dynamic 20/40 operation */
@@ -540,7 +540,7 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
udelay(synthDelay + BASE_ACTIVATE_DELAY);
}
-void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
{
switch (rx) {
case 0x5:
@@ -559,6 +559,9 @@ void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
+ else if (AR_SREV_9462(ah))
+ /* xxx only when MCI support is enabled */
+ REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
else
REG_WRITE(ah, AR_SELFGEN_MASK, tx);
@@ -592,6 +595,9 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
REG_WRITE(ah, AR_PCU_MISC_MODE2,
val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
+
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -625,9 +631,7 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
static int ar9003_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
unsigned int regWrites = 0, i;
- struct ieee80211_channel *channel = chan->chan;
u32 modesIndex;
switch (chan->chanmode) {
@@ -658,6 +662,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
+ if (i == ATH_INI_POST && AR_SREV_9462_20(ah))
+ ar9003_hw_prog_ini(ah,
+ &ah->ini_radio_post_sys2ant,
+ modesIndex);
}
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
@@ -677,17 +685,27 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
REG_WRITE_ARRAY(&ah->iniModesAdditional_40M, 1, regWrites);
+ if (AR_SREV_9462(ah))
+ ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1);
+
+ ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+ ath9k_hw_apply_txpower(ah, chan);
+
+ if (AR_SREV_9462(ah)) {
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+ ah->enabled_cals |= TX_IQ_CAL;
+ else
+ ah->enabled_cals &= ~TX_IQ_CAL;
- /* Set TX power */
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit), false);
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+ ah->enabled_cals |= TX_CL_CAL;
+ else
+ ah->enabled_cals &= ~TX_CL_CAL;
+ }
return 0;
}
@@ -785,16 +803,6 @@ static void ar9003_hw_rfbus_done(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
}
-static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
-{
- u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
- if (value)
- v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- else
- v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
-}
-
static bool ar9003_hw_ani_control(struct ath_hw *ah,
enum ath9k_ani_cmd cmd, int param)
{
@@ -1253,6 +1261,73 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
+static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *ini_reloaded)
+{
+ unsigned int regWrites = 0;
+ u32 modesIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (modesIndex == ah->modes_index) {
+ *ini_reloaded = false;
+ goto set_rfmode;
+ }
+
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[ATH_INI_POST], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
+ if (AR_SREV_9462_20(ah))
+ ar9003_hw_prog_ini(ah,
+ &ah->ini_radio_post_sys2ant,
+ modesIndex);
+
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, regWrites);
+
+ if (AR_SREV_9330(ah))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
+
+ if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
+ REG_WRITE_ARRAY(&ah->iniModesAdditional_40M, 1, regWrites);
+
+ ah->modes_index = modesIndex;
+ *ini_reloaded = true;
+
+set_rfmode:
+ ar9003_hw_set_rfmode(ah, chan);
+ return 0;
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1277,11 +1352,11 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
priv_ops->rfbus_req = ar9003_hw_rfbus_req;
priv_ops->rfbus_done = ar9003_hw_rfbus_done;
- priv_ops->set_diversity = ar9003_hw_set_diversity;
priv_ops->ani_control = ar9003_hw_ani_control;
priv_ops->do_getnf = ar9003_hw_do_getnf;
priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
priv_ops->set_radar_params = ar9003_hw_set_radar_params;
+ priv_ops->fast_chan_change = ar9003_hw_fast_chan_change;
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 5c590429f12..2f4023e6608 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -325,10 +325,10 @@
#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
-#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
-#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
-#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
-#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ (AR_SREV_9462(ah) ? -127 : -110)
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ (AR_SREV_9462(ah) ? -127 : -115)
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ (AR_SREV_9462(ah) ? -127 : -125)
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ (AR_SREV_9462(ah) ? -127 : -125)
#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
@@ -572,6 +572,8 @@
#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
+#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + AR_SREV_9485(ah) ? \
+ 0x3c4 : 0x444)
#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + AR_SREV_9485(ah) ? \
0x3c8 : 0x448)
#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + AR_SREV_9485(ah) ? \
@@ -581,6 +583,7 @@
#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
(AR_SREV_9485(ah) ? \
0x3d0 : 0x450) + ((_i) << 2))
+#define AR_PHY_RTT_CTRL (AR_SM_BASE + 0x380)
#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0)
#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4)
@@ -600,6 +603,17 @@
#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
+/* AIC Registers */
+#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
+#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
+#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + (AR_SREV_9462_10(ah) ? \
+ 0x4c0 : 0x4c4))
+#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + (AR_SREV_9462_10(ah) ? \
+ 0x4c4 : 0x4c8))
+#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
+#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
@@ -609,7 +623,35 @@
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
#define AR_PHY_65NM_CH0_BIAS4 0x160cc
#define AR_PHY_65NM_CH0_RXTX4 0x1610c
-#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : 0x1628c)
+
+#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
+ ((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
+#define AR_CH0_TOP_XPABIASLVL (0x300)
+#define AR_CH0_TOP_XPABIASLVL_S (8)
+
+#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
+ ((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
+#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
+#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
+#define AR_CH0_THERM_XPASHORT2GND 0x4
+#define AR_CH0_THERM_XPASHORT2GND_S 2
+
+#define AR_SWITCH_TABLE_COM_ALL (0xffff)
+#define AR_SWITCH_TABLE_COM_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000)
+#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0)
+#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4)
+
+#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM2_ALL_S (0)
+
+#define AR_SWITCH_TABLE_ALL (0xfff)
+#define AR_SWITCH_TABLE_ALL_S (0)
+
+#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
+ (AR_SREV_9485(ah) ? 0x1628c : 0x16294))
#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -625,21 +667,23 @@
#define AR_PHY_65NM_CH2_RXTX1 0x16900
#define AR_PHY_65NM_CH2_RXTX2 0x16904
-#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : 0x16284)
+#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
+ (AR_SREV_9485(ah) ? 0x16284 : 0x16290))
#define AR_CH0_TOP2_XPABIASLVL 0xf000
#define AR_CH0_TOP2_XPABIASLVL_S 12
-#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : 0x16290)
+#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
+ (AR_SREV_9485(ah) ? 0x16290 : 0x16298))
#define AR_CH0_XTAL_CAPINDAC 0x7f000000
#define AR_CH0_XTAL_CAPINDAC_S 24
#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
#define AR_CH0_XTAL_CAPOUTDAC_S 17
-#define AR_PHY_PMU1 0x16c40
+#define AR_PHY_PMU1 (AR_SREV_9462(ah) ? 0x16340 : 0x16c40)
#define AR_PHY_PMU1_PWD 0x1
#define AR_PHY_PMU1_PWD_S 0
-#define AR_PHY_PMU2 0x16c44
+#define AR_PHY_PMU2 (AR_SREV_9462(ah) ? 0x16344 : 0x16c44)
#define AR_PHY_PMU2_PGM 0x00200000
#define AR_PHY_PMU2_PGM_S 21
@@ -779,6 +823,22 @@
#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
+#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001
+#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0
+#define AR_PHY_RTT_CTRL_RESTORE_MASK 0x0000007E
+#define AR_PHY_RTT_CTRL_RESTORE_MASK_S 1
+#define AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE 0x00000080
+#define AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE_S 7
+#define AR_PHY_RTT_SW_RTT_TABLE_ACCESS 0x00000001
+#define AR_PHY_RTT_SW_RTT_TABLE_ACCESS_S 0
+#define AR_PHY_RTT_SW_RTT_TABLE_WRITE 0x00000002
+#define AR_PHY_RTT_SW_RTT_TABLE_WRITE_S 1
+#define AR_PHY_RTT_SW_RTT_TABLE_ADDR 0x0000001C
+#define AR_PHY_RTT_SW_RTT_TABLE_ADDR_S 2
+#define AR_PHY_RTT_SW_RTT_TABLE_DATA 0xFFFFFFF0
+#define AR_PHY_RTT_SW_RTT_TABLE_DATA_S 4
+#define AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL_S 31
#define AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
#define AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
@@ -839,19 +899,42 @@
*/
#define AR_SM1_BASE 0xb200
-#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
-#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
-#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
-#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
-#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
-#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
-#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
-#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
-#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
+#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
+#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
+#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
+#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
+#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9300(ah) ? \
+ 0x240 : 0x280))
+#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0
#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
+/* SM 1 AIC Registers */
+
+#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_2_B1 (AR_SM1_BASE + 0x4b8)
+#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
+ 0x4c0 : 0x4c4))
+#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
+ 0x4c4 : 0x4c8))
+#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
+#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
+
+#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
+
+#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + (i) ? \
+ AR_SM1_BASE : AR_SM_BASE)
+#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + (i) ? \
+ AR_SM1_BASE : AR_SM_BASE)
/*
* Channel 2 Register Map
*/
@@ -914,6 +997,13 @@
#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
+/* GLB Registers */
+#define AR_GLB_BASE 0x20000
+#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
+#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
+ (AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
+#define AR_GLB_STATUS (AR_GLB_BASE + 0x48)
+
/*
* Misc helper defines
*/
@@ -1124,6 +1214,4 @@
#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f
#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0
-void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
-
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
new file mode 100644
index 00000000000..48803ee9c0d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+#define RTT_RESTORE_TIMEOUT 1000
+#define RTT_ACCESS_TIMEOUT 100
+#define RTT_BAD_VALUE 0x0bad0bad
+
+/*
+ * RTT (Radio Retention Table) hardware implementation information
+ *
+ * There is an internal table (i.e. the rtt) for each chain (or bank).
+ * Each table contains 6 entries and each entry is corresponding to
+ * a specific calibration parameter as depicted below.
+ * 0~2 - DC offset DAC calibration: loop, low, high (offsetI/Q_...)
+ * 3 - Filter cal (filterfc)
+ * 4 - RX gain settings
+ * 5 - Peak detector offset calibration (agc_caldac)
+ */
+
+void ar9003_hw_rtt_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RTT_CTRL, 1);
+}
+
+void ar9003_hw_rtt_disable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RTT_CTRL, 0);
+}
+
+void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask)
+{
+ REG_RMW_FIELD(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_RESTORE_MASK, rtt_mask);
+}
+
+bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
+{
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE,
+ 0, RTT_RESTORE_TIMEOUT))
+ return false;
+
+ REG_RMW_FIELD(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE, 1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE,
+ 0, RTT_RESTORE_TIMEOUT))
+ return false;
+
+ return true;
+}
+
+static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain,
+ u32 index, u32 data28)
+{
+ u32 val;
+
+ val = SM(data28, AR_PHY_RTT_SW_RTT_TABLE_DATA);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain), val);
+
+ val = SM(0, AR_PHY_RTT_SW_RTT_TABLE_ACCESS) |
+ SM(1, AR_PHY_RTT_SW_RTT_TABLE_WRITE) |
+ SM(index, AR_PHY_RTT_SW_RTT_TABLE_ADDR);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ val |= SM(1, AR_PHY_RTT_SW_RTT_TABLE_ACCESS);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain),
+ AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0,
+ RTT_ACCESS_TIMEOUT))
+ return;
+
+ val &= ~SM(1, AR_PHY_RTT_SW_RTT_TABLE_WRITE);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain),
+ AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0,
+ RTT_ACCESS_TIMEOUT);
+}
+
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table)
+{
+ int i;
+
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i, table[i]);
+}
+
+static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
+{
+ u32 val;
+
+ val = SM(0, AR_PHY_RTT_SW_RTT_TABLE_ACCESS) |
+ SM(0, AR_PHY_RTT_SW_RTT_TABLE_WRITE) |
+ SM(index, AR_PHY_RTT_SW_RTT_TABLE_ADDR);
+
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ val |= SM(1, AR_PHY_RTT_SW_RTT_TABLE_ACCESS);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain),
+ AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0,
+ RTT_ACCESS_TIMEOUT))
+ return RTT_BAD_VALUE;
+
+ val = REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain));
+
+ return val;
+}
+
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table)
+{
+ int i;
+
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
+ table[i] = ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+}
+
+void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
+{
+ int i, j;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_RTT_TABLE_ENTRY; j++)
+ ar9003_hw_rtt_load_hist_entry(ah, i, j, 0);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
new file mode 100644
index 00000000000..030758d087d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_RTT_H
+#define AR9003_RTT_H
+
+void ar9003_hw_rtt_enable(struct ath_hw *ah);
+void ar9003_hw_rtt_disable(struct ath_hw *ah);
+void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask);
+bool ar9003_hw_rtt_force_restore(struct ath_hw *ah);
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table);
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table);
+void ar9003_hw_rtt_clear_hist(struct ath_hw *ah);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h
new file mode 100644
index 00000000000..5c55ae389ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h
@@ -0,0 +1,1833 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9462_1P0_H
+#define INITVALS_9462_1P0_H
+
+/* AR9462 1.0 */
+
+static const u32 ar9462_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00060085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00080000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008050, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486e00},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x99c00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9462_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9462_1p0_sys3ant[][2] = {
+ /* Addr allmodes */
+ {0x00063280, 0x00040807},
+ {0x00063284, 0x104ccccc},
+};
+
+static const u32 ar9462_pcie_phy_clkreq_enable_L1_1p0[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10053e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9462_1p0_mac_core_emulation[][2] = {
+ /* Addr allmodes */
+ {0x00000030, 0x00060085},
+ {0x00000044, 0x00000008},
+ {0x0000805c, 0xffffc7ff},
+ {0x00008344, 0xaa4a105b},
+};
+
+static const u32 ar9462_common_rx_gain_table_ar9280_2p0_1p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9200_ar9280_2p0_radio_core_1p0[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9462_1p0_baseband_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e3c, 0xcf946221, 0xcf946221, 0xcf946221, 0xcf946221},
+ {0x00009e44, 0x005c0000, 0x005c0000, 0x005c0000, 0x005c0000},
+ {0x0000a258, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x0000a25c, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a28c, 0x00011111, 0x00011111, 0x00011111, 0x00011111},
+ {0x0000a2c4, 0x00148d18, 0x00148d18, 0x00148d20, 0x00148d20},
+ {0x0000a2d8, 0xf999a800, 0xf999a800, 0xf999a80c, 0xf999a80c},
+ {0x0000a50c, 0x0000c00a, 0x0000c00a, 0x0000c00a, 0x0000c00a},
+ {0x0000a538, 0x00038e8c, 0x00038e8c, 0x00038e8c, 0x00038e8c},
+ {0x0000a53c, 0x0003cecc, 0x0003cecc, 0x0003cecc, 0x0003cecc},
+ {0x0000a540, 0x00040ed4, 0x00040ed4, 0x00040ed4, 0x00040ed4},
+ {0x0000a544, 0x00044edc, 0x00044edc, 0x00044edc, 0x00044edc},
+ {0x0000a548, 0x00048ede, 0x00048ede, 0x00048ede, 0x00048ede},
+ {0x0000a54c, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
+ {0x0000a550, 0x00050f5e, 0x00050f5e, 0x00050f5e, 0x00050f5e},
+ {0x0000a554, 0x00054f9e, 0x00054f9e, 0x00054f9e, 0x00054f9e},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9462_pcie_phy_pll_on_clkreq_disable_L1_1p0[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10012e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9462_common_rx_gain_table_1p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_modes_high_ob_db_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
+};
+
+static const u32 ar9462_common_wo_xlna_rx_gain_table_1p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_1p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9462_1p0_mac_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00008014, 0x10f810f8, 0x10f810f8, 0x10f810f8, 0x10f810f8},
+ {0x0000801c, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017},
+};
+
+static const u32 ar9462_1p0_tx_gain_table_baseband_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000000d5, 0x000000d5, 0x000000d5, 0x000000d5},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x00004002, 0x00004002, 0x00004002, 0x00004002},
+ {0x0000a508, 0x00008004, 0x00008004, 0x00008004, 0x00008004},
+ {0x0000a510, 0x0001000c, 0x0001000c, 0x0001000c, 0x0001000c},
+ {0x0000a514, 0x0001420b, 0x0001420b, 0x0001420b, 0x0001420b},
+ {0x0000a518, 0x0001824a, 0x0001824a, 0x0001824a, 0x0001824a},
+ {0x0000a51c, 0x0001c44a, 0x0001c44a, 0x0001c44a, 0x0001c44a},
+ {0x0000a520, 0x0002064a, 0x0002064a, 0x0002064a, 0x0002064a},
+ {0x0000a524, 0x0002484a, 0x0002484a, 0x0002484a, 0x0002484a},
+ {0x0000a528, 0x00028a4a, 0x00028a4a, 0x00028a4a, 0x00028a4a},
+ {0x0000a52c, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
+ {0x0000a530, 0x00030e4a, 0x00030e4a, 0x00030e4a, 0x00030e4a},
+ {0x0000a534, 0x00034e8a, 0x00034e8a, 0x00034e8a, 0x00034e8a},
+};
+
+static const u32 ar9462_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24646c08, 0x24646c08},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9462_1p0_soc_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00001133, 0x00001133, 0x00001133, 0x00001133},
+};
+
+static const u32 ar9462_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098e4, 0x01ffffff},
+ {0x000098e8, 0x01ffffff},
+ {0x000098ec, 0x01ffffff},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb514},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x64c355c7},
+ {0x00009e58, 0xfd897735},
+ {0x00009e5c, 0xe9198724},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a6b0, 0x0000000a},
+ {0x0000a6b4, 0x28f12c01},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b6b0, 0x0000000a},
+ {0x0000b6b4, 0x00c00001},
+};
+
+static const u32 ar9462_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x0131b7c0, 0x0131b7c4, 0x0131b7c4, 0x0131b7c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00100110, 0x00100110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+};
+
+static const u32 ar9462_modes_fast_clock_1p0[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9462_modes_low_ob_db_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
+ {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
+};
+
+static const u32 ar9462_1p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+};
+
+static const u32 ar9462_common_mixed_rx_gain_table_1p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_pcie_phy_clkreq_disable_L1_1p0[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9462_1p0_baseband_core_emulation[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafa68e30},
+ {0x00009884, 0x00002842},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009e50, 0x00000000},
+ {0x00009fcc, 0x00000014},
+ {0x0000a344, 0x00000010},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x71733d01},
+ {0x0000a3a0, 0xd0ad5c12},
+ {0x0000a3c0, 0x22222220},
+ {0x0000a3c4, 0x22222222},
+ {0x0000a404, 0x00418a11},
+ {0x0000a418, 0x050001ce},
+ {0x0000a438, 0x00001800},
+ {0x0000a458, 0x01444452},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a690, 0x00000038},
+};
+
+static const u32 ar9462_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016010, 0x6d820001},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x2699e04f},
+ {0x00016050, 0x6db6db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016058, 0x6c200000},
+ {0x00016080, 0x00040000},
+ {0x00016084, 0x9a68048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x12030409},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc490},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92000000},
+ {0x000160b8, 0x0285dddc},
+ {0x000160bc, 0x02908888},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x0de6c1b0},
+ {0x00016100, 0x3fffbe04},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00200400},
+ {0x00016110, 0x00000000},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x050a0001},
+ {0x00016284, 0x3d841400},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0xe3000000},
+ {0x00016290, 0xa1005080},
+ {0x00016294, 0x00000020},
+ {0x00016298, 0x50a02900},
+ {0x00016340, 0x121e4276},
+ {0x00016344, 0x00300000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016410, 0x6c800001},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x4699e04f},
+ {0x00016450, 0x6db6db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x3fffbe04},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00200400},
+ {0x00016510, 0x00000000},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x000080c0},
+};
+
+static const u32 ar9462_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9462_1p0_sys2ant[][2] = {
+ /* Addr allmodes */
+ {0x00063120, 0x00801980},
+};
+
+#endif /* INITVALS_9462_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
new file mode 100644
index 00000000000..9c51b395b4f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -0,0 +1,1928 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9462_2P0_H
+#define INITVALS_9462_2P0_H
+
+/* AR9462 2.0 */
+
+static const u32 ar9462_modes_fast_clock_2p0[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18253ede},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0003580c},
+};
+
+static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+};
+
+static const u32 ar9462_2p0_mac_core_emulation[][2] = {
+ /* Addr allmodes */
+ {0x00000030, 0x000e0085},
+ {0x00000044, 0x00000008},
+ {0x0000805c, 0xffffc7ff},
+ {0x00008344, 0xaa4a105b},
+};
+
+static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18213ede},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0003580c},
+};
+
+static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18212ede},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0003580c},
+};
+
+static const u32 ar9462_2p0_sys3ant[][2] = {
+ /* Addr allmodes */
+ {0x00063280, 0x00040807},
+ {0x00063284, 0x104ccccc},
+};
+
+static const u32 ar9462_common_rx_gain_table_ar9280_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9200_ar9280_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9462_2p0_mac_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00008014, 0x10f810f8, 0x10f810f8, 0x10f810f8, 0x10f810f8},
+ {0x0000801c, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017},
+};
+
+static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+};
+
+static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e3c, 0xcf946221, 0xcf946221, 0xcf946221, 0xcf946221},
+ {0x00009e44, 0xfc5c0000, 0xfc5c0000, 0xfc5c0000, 0xfc5c0000},
+ {0x0000a258, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x0000a25c, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a28c, 0x00011111, 0x00011111, 0x00011111, 0x00011111},
+ {0x0000a2c4, 0x00148d18, 0x00148d18, 0x00148d20, 0x00148d20},
+ {0x0000a2d8, 0xf999a800, 0xf999a800, 0xf999a80c, 0xf999a80c},
+ {0x0000a50c, 0x0000c00a, 0x0000c00a, 0x0000c00a, 0x0000c00a},
+ {0x0000a538, 0x00038e8c, 0x00038e8c, 0x00038e8c, 0x00038e8c},
+ {0x0000a53c, 0x0003cecc, 0x0003cecc, 0x0003cecc, 0x0003cecc},
+ {0x0000a540, 0x00040ed4, 0x00040ed4, 0x00040ed4, 0x00040ed4},
+ {0x0000a544, 0x00044edc, 0x00044edc, 0x00044edc, 0x00044edc},
+ {0x0000a548, 0x00048ede, 0x00048ede, 0x00048ede, 0x00048ede},
+ {0x0000a54c, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
+ {0x0000a550, 0x00050f5e, 0x00050f5e, 0x00050f5e, 0x00050f5e},
+ {0x0000a554, 0x00054f9e, 0x00054f9e, 0x00054f9e, 0x00054f9e},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+};
+
+static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+ {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
+ {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+};
+
+static const u32 ar9462_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098e4, 0x01ffffff},
+ {0x000098e8, 0x01ffffff},
+ {0x000098ec, 0x01ffffff},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009bf0, 0x80000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0xe4c355c7},
+ {0x00009e58, 0xfd897735},
+ {0x00009e5c, 0xe9198724},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x07000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00002037},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a6b0, 0x0000000a},
+ {0x0000a6b4, 0x00512c01},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a7f0, 0x80000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000abf0, 0x80000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b6b0, 0x0000000a},
+ {0x0000b6b4, 0x00000001},
+};
+
+static const u32 ar9462_2p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+};
+
+static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+ {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
+ {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016010, 0x6d820001},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x2699e04f},
+ {0x00016050, 0x6db6db6c},
+ {0x00016058, 0x6c200000},
+ {0x00016080, 0x00040000},
+ {0x00016084, 0x9a68048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x1203040b},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc491},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92000000},
+ {0x000160b8, 0x0285dddc},
+ {0x000160bc, 0x02908888},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x0de6c1b0},
+ {0x00016100, 0x3fffbe04},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00200400},
+ {0x00016110, 0x00000000},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x050a0001},
+ {0x00016284, 0x3d841400},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0xe3000000},
+ {0x00016290, 0xa1005080},
+ {0x00016294, 0x00000020},
+ {0x00016298, 0x54a82900},
+ {0x00016340, 0x121e4276},
+ {0x00016344, 0x00300000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016410, 0x6c800001},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x4699e04f},
+ {0x00016450, 0x6db6db6c},
+ {0x00016500, 0x3fffbe04},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00200400},
+ {0x00016510, 0x00000000},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x000080c0},
+};
+
+static const u32 ar9462_2p0_tx_gain_table_baseband_postamble_emulation[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000000d5, 0x000000d5, 0x000000d5, 0x000000d5},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x00004002, 0x00004002, 0x00004002, 0x00004002},
+ {0x0000a508, 0x00008004, 0x00008004, 0x00008004, 0x00008004},
+ {0x0000a510, 0x0001000c, 0x0001000c, 0x0001000c, 0x0001000c},
+ {0x0000a514, 0x0001420b, 0x0001420b, 0x0001420b, 0x0001420b},
+ {0x0000a518, 0x0001824a, 0x0001824a, 0x0001824a, 0x0001824a},
+ {0x0000a51c, 0x0001c44a, 0x0001c44a, 0x0001c44a, 0x0001c44a},
+ {0x0000a520, 0x0002064a, 0x0002064a, 0x0002064a, 0x0002064a},
+ {0x0000a524, 0x0002484a, 0x0002484a, 0x0002484a, 0x0002484a},
+ {0x0000a528, 0x00028a4a, 0x00028a4a, 0x00028a4a, 0x00028a4a},
+ {0x0000a52c, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
+ {0x0000a530, 0x00030e4a, 0x00030e4a, 0x00030e4a, 0x00030e4a},
+ {0x0000a534, 0x00034e8a, 0x00034e8a, 0x00034e8a, 0x00034e8a},
+};
+
+static const u32 ar9462_2p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9462_2p0_sys2ant[][2] = {
+ /* Addr allmodes */
+ {0x00063120, 0x00801980},
+};
+
+static const u32 ar9462_2p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x000e0085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00080000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008050, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00b00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486e00},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x99c00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9462_2p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016054, 0x6db60180, 0x6db60180, 0x6db60180, 0x6db60180},
+ {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
+ {0x00016454, 0x6db60180, 0x6db60180, 0x6db60180, 0x6db60180},
+};
+
+static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = {
+ /* Addr allmodes */
+ {0x000018c0, 0x10101010},
+ {0x000018c4, 0x10101010},
+ {0x000018c8, 0x10101010},
+ {0x000018cc, 0x10101010},
+ {0x000018d0, 0x10101010},
+ {0x000018d4, 0x10101010},
+ {0x000018d8, 0x10101010},
+ {0x000018dc, 0x10101010},
+};
+
+static const u32 ar9462_2p0_baseband_core_emulation[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafa68e30},
+ {0x00009884, 0x00002842},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009e50, 0x00000000},
+ {0x00009fcc, 0x00000014},
+ {0x0000a344, 0x00000010},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x71733d01},
+ {0x0000a3a0, 0xd0ad5c12},
+ {0x0000a3c0, 0x22222220},
+ {0x0000a3c4, 0x22222222},
+ {0x0000a404, 0x00418a11},
+ {0x0000a418, 0x050001ce},
+ {0x0000a438, 0x00001800},
+ {0x0000a458, 0x01444452},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a690, 0x00000038},
+};
+
+#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
new file mode 100644
index 00000000000..06b3f0df9fa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -0,0 +1,1673 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9580_1P0_H
+#define INITVALS_9580_1P0_H
+
+/* AR9580 1.0 */
+
+static const u32 ar9580_1p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9580_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9580_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x3280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9580_1p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9580_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9bc00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9580_1p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9580_1p0_high_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000008},
+};
+
+static const u32 ar9580_1p0_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9580_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x00000004},
+ {0x00016294, 0x458aa14f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9580_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000036c0, 0x000036c4, 0x000036c4, 0x000036c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9580_1p0_pcie_phy_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0835365e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9580_1p0_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0831365e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0831265e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+#endif /* INITVALS_9580_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 46393f90f16..1c269f50822 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -87,17 +87,14 @@ struct ath_config {
* @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
* @BUF_AGGR: Indicates whether the buffer can be aggregated
* (used in aggregation scheduling)
- * @BUF_XRETRY: To denote excessive retries of the buffer
*/
enum buffer_type {
BUF_AMPDU = BIT(0),
BUF_AGGR = BIT(1),
- BUF_XRETRY = BIT(2),
};
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
#define ATH_TXSTATUS_RING_SIZE 64
@@ -206,18 +203,19 @@ struct ath_atx_ac {
};
struct ath_frame_info {
+ struct ath_buf *bf;
int framelen;
- u32 keyix;
enum ath9k_key_type keytype;
+ u8 keyix;
u8 retries;
- u16 seqno;
};
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
+ u8 ndelim;
+ u16 seqno;
unsigned long bfs_paprd_timestamp;
- enum ath9k_internal_frame_type bfs_ftype;
};
struct ath_buf {
@@ -230,13 +228,12 @@ struct ath_buf {
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
bool bf_stale;
- u16 bf_flags;
struct ath_buf_state bf_state;
};
struct ath_atx_tid {
struct list_head list;
- struct list_head buf_q;
+ struct sk_buff_head buf_q;
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
@@ -273,14 +270,11 @@ struct ath_node {
struct ath_tx_control {
struct ath_txq *txq;
struct ath_node *an;
- int if_id;
- enum ath9k_internal_frame_type frame_type;
u8 paprd;
};
#define ATH_TX_ERROR 0x01
-#define ATH_TX_XRETRY 0x02
-#define ATH_TX_BAR 0x04
+#define ATH_TX_BAR 0x02
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -346,7 +340,8 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
-bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an);
+void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ struct ath_node *an);
/********/
/* VIFs */
@@ -427,6 +422,7 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+void ath_reset_work(struct work_struct *work);
void ath_hw_check(struct work_struct *work);
void ath_hw_pll_work(struct work_struct *work);
void ath_paprd_calibrate(struct work_struct *work);
@@ -462,6 +458,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9300 10
#define ATH_LED_PIN_9485 6
+#define ATH_LED_PIN_9462 0
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
@@ -561,8 +558,7 @@ struct ath_ant_comb {
#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
#define SC_OP_BT_SCAN BIT(13)
#define SC_OP_ANI_RUN BIT(14)
-#define SC_OP_ENABLE_APM BIT(15)
-#define SC_OP_PRIM_STA_VIF BIT(16)
+#define SC_OP_PRIM_STA_VIF BIT(15)
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
@@ -570,7 +566,6 @@ struct ath_ant_comb {
#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
#define PS_WAIT_FOR_TX_ACK BIT(3)
#define PS_BEACON_SYNC BIT(4)
-#define PS_TSFOOR_SYNC BIT(5)
struct ath_rate_table;
@@ -608,6 +603,7 @@ struct ath_softc {
struct mutex mutex;
struct work_struct paprd_work;
struct work_struct hw_check_work;
+ struct work_struct hw_reset_work;
struct completion paprd_complete;
unsigned int hw_busy_count;
@@ -651,10 +647,10 @@ struct ath_softc {
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
+ u8 ant_tx, ant_rx;
};
void ath9k_tasklet(unsigned long data);
-int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_cabq_update(struct ath_softc *);
static inline void ath_read_cachesize(struct ath_common *common, int *csz)
@@ -668,11 +664,11 @@ extern int led_blink;
extern bool is_ath9k_unloaded;
irqreturn_t ath_isr(int irq, void *dev);
-void ath9k_init_crypto(struct ath_softc *sc);
-int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath9k_reload_chainmask_settings(struct ath_softc *sc);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
bool ath9k_uses_beacons(int type);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 0d13ff74a68..a13cabb9543 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -73,44 +73,39 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
struct sk_buff *skb = bf->bf_mpdu;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_desc *ds;
- struct ath9k_11n_rate_series series[4];
- int flags, ctsrate = 0, ctsduration = 0;
+ struct ath_tx_info info;
struct ieee80211_supported_band *sband;
+ u8 chainmask = ah->txchainmask;
u8 rate = 0;
ath9k_reset_beacon_status(sc);
- ds = bf->bf_desc;
- flags = ATH9K_TXDESC_NOACK;
-
- ds->ds_link = 0;
-
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
rate |= sband->bitrates[rateidx].hw_value_short;
- ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN,
- ATH9K_PKT_TYPE_BEACON,
- MAX_RATE_POWER,
- ATH9K_TXKEYIX_INVALID,
- ATH9K_KEY_TYPE_CLEAR,
- flags);
-
- /* NB: beacon's BufLen must be a multiple of 4 bytes */
- ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
- true, true, ds, bf->bf_buf_addr,
- sc->beacon.beaconq);
-
- memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
- series[0].Tries = 1;
- series[0].Rate = rate;
- series[0].ChSel = ath_txchainmask_reduction(sc,
- common->tx_chainmask, series[0].Rate);
- series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
- ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration,
- series, 4, 0);
+ memset(&info, 0, sizeof(info));
+ info.pkt_len = skb->len + FCS_LEN;
+ info.type = ATH9K_PKT_TYPE_BEACON;
+ info.txpower = MAX_RATE_POWER;
+ info.keyix = ATH9K_TXKEYIX_INVALID;
+ info.keytype = ATH9K_KEY_TYPE_CLEAR;
+ info.flags = ATH9K_TXDESC_NOACK;
+
+ info.buf_addr[0] = bf->bf_buf_addr;
+ info.buf_len[0] = roundup(skb->len, 4);
+
+ info.is_first = true;
+ info.is_last = true;
+
+ info.qcu = sc->beacon.beaconq;
+
+ info.rates[0].Tries = 1;
+ info.rates[0].Rate = rate;
+ info.rates[0].ChSel = ath_txchainmask_reduction(sc, chainmask, rate);
+
+ ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
}
static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -386,9 +381,7 @@ void ath_beacon_tasklet(unsigned long data)
ath_dbg(common, ATH_DBG_BSTUCK,
"beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
- spin_lock(&sc->sc_pcu_lock);
- ath_reset(sc, true);
- spin_unlock(&sc->sc_pcu_lock);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
return;
@@ -519,9 +512,11 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
/* Set the computed AP beacon timers */
ath9k_hw_disable_interrupts(ah);
+ sc->sc_flags |= SC_OP_TSF_RESET;
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
/*
@@ -648,12 +643,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
ath9k_hw_set_sta_beacon_timers(ah, &bs);
ah->imask |= ATH9K_INT_BMISS;
- /*
- * If the beacon config is called beacause of TSFOOR,
- * Interrupts will be enabled back at the end of ath9k_tasklet
- */
- if (!(sc->ps_flags & PS_TSFOOR_SYNC))
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
@@ -687,12 +678,9 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
ath9k_hw_disable_interrupts(ah);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- /*
- * If the beacon config is called beacause of TSFOOR,
- * Interrupts will be enabled back at the end of ath9k_tasklet
- */
- if (!(sc->ps_flags & PS_TSFOOR_SYNC))
- ath9k_hw_set_interrupts(ah, ah->imask);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
static bool ath9k_allow_beacon_config(struct ath_softc *sc,
@@ -833,11 +821,11 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
if (status) {
/* Re-enable beaconing */
ah->imask |= ATH9K_INT_SWBA;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
} else {
/* Disable SWBA interrupt */
ah->imask &= ~ATH9K_INT_SWBA;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
tasklet_kill(&sc->bcon_tasklet);
ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index a1250c586e4..ebaf304f464 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -63,13 +63,25 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ s8 noise = ATH_DEFAULT_NOISE_FLOOR;
+
+ if (chan && chan->noisefloor) {
+ s8 delta = chan->noisefloor -
+ ath9k_hw_get_default_nf(ah, chan);
+ if (delta > 0)
+ noise += delta;
+ }
+ return noise;
+}
+EXPORT_SYMBOL(ath9k_hw_getchan_noise);
static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_hw_cal_data *cal,
int16_t *nfarray)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
struct ath_nf_limits *limit;
struct ath9k_nfcal_hist *h;
bool high_nf_mid = false;
@@ -81,7 +93,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
- ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
+ ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(ah->curchan)))
continue;
h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
@@ -378,6 +390,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (!caldata) {
chan->noisefloor = nf;
+ ah->noise = ath9k_hw_getchan_noise(ah, chan);
return false;
}
@@ -385,6 +398,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
caldata->nfcal_pending = false;
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
+ ah->noise = ath9k_hw_getchan_noise(ah, chan);
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 1bef41d1b1f..05b9dbf8185 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -108,6 +108,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
void ath9k_hw_reset_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index fa6bd2d189e..905f1b31396 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -161,14 +161,42 @@ EXPORT_SYMBOL(ath9k_cmn_count_streams);
void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
u16 new_txpow, u16 *txpower)
{
- if (cur_txpow != new_txpow) {
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+ if (reg->power_limit != new_txpow) {
ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
/* read back in case value is clamped */
- *txpower = ath9k_hw_regulatory(ah)->power_limit;
+ *txpower = reg->max_power_level;
}
}
EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+void ath9k_cmn_init_crypto(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = AR_KEYTABLE_SIZE;
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
+ common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath_hw_keyreset(common, (u16) i);
+}
+EXPORT_SYMBOL(ath9k_cmn_init_crypto);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 77ec288b5a7..ad14fecc76c 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -62,3 +62,4 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
u16 new_txpow, u16 *txpower);
+void ath9k_cmn_init_crypto(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index d1eb89611ff..327aa28f603 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -95,11 +95,11 @@ static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
char buf[32];
unsigned int len;
- len = sprintf(buf, "0x%08x\n", common->tx_chainmask);
+ len = sprintf(buf, "0x%08x\n", ah->txchainmask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -107,7 +107,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
unsigned long mask;
char buf[32];
ssize_t len;
@@ -120,8 +120,8 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
if (strict_strtoul(buf, 0, &mask))
return -EINVAL;
- common->tx_chainmask = mask;
- sc->sc_ah->caps.tx_chainmask = mask;
+ ah->txchainmask = mask;
+ ah->caps.tx_chainmask = mask;
return count;
}
@@ -138,11 +138,11 @@ static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
char buf[32];
unsigned int len;
- len = sprintf(buf, "0x%08x\n", common->rx_chainmask);
+ len = sprintf(buf, "0x%08x\n", ah->rxchainmask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -150,7 +150,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
unsigned long mask;
char buf[32];
ssize_t len;
@@ -163,8 +163,8 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
if (strict_strtoul(buf, 0, &mask))
return -EINVAL;
- common->rx_chainmask = mask;
- sc->sc_ah->caps.rx_chainmask = mask;
+ ah->rxchainmask = mask;
+ ah->caps.rx_chainmask = mask;
return count;
}
@@ -523,9 +523,22 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
if (tmp & ATH9K_RX_FILTER_PHYRADAR)
len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (tmp & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL\n");
- else
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\n\nReset causes:\n"
+ " baseband hang: %d\n"
+ " baseband watchdog: %d\n"
+ " fatal hardware error interrupt: %d\n"
+ " tx hardware error: %d\n"
+ " tx path hang: %d\n"
+ " pll rx hang: %d\n",
+ sc->debug.stats.reset[RESET_TYPE_BB_HANG],
+ sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG],
+ sc->debug.stats.reset[RESET_TYPE_FATAL_INT],
+ sc->debug.stats.reset[RESET_TYPE_TX_ERROR],
+ sc->debug.stats.reset[RESET_TYPE_TX_HANG],
+ sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -711,7 +724,7 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
" tid: %p %s %s %i %p %p\n",
tid, tid->sched ? "sched" : "idle",
tid->paused ? "paused" : "running",
- list_empty(&tid->buf_q),
+ skb_queue_empty(&tid->buf_q),
tid->an, tid->ac);
if (len >= size)
goto done;
@@ -826,20 +839,23 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts, struct ath_txq *txq)
+ struct ath_tx_status *ts, struct ath_txq *txq,
+ unsigned int flags)
{
+#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
+ [sc->debug.tsidx].c)
int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
- if (bf_isxretried(bf))
+ if (flags & ATH_TX_BAR)
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
} else {
- if (bf_isxretried(bf))
+ if (ts->ts_status & ATH9K_TXERR_XRETRY)
TX_STAT_INC(qnum, xretries);
else
TX_STAT_INC(qnum, completed);
@@ -857,6 +873,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
TX_STAT_INC(qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
+
+ spin_lock(&sc->debug.samp_lock);
+ TX_SAMP_DBG(jiffies) = jiffies;
+ TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
+ TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
+ TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
+ TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
+ TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
+ TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
+ TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
+ TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
+ TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
+ TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
+ TX_SAMP_DBG(rssi) = ts->ts_rssi;
+ TX_SAMP_DBG(tid) = ts->tid;
+ TX_SAMP_DBG(qid) = ts->qid;
+
+ if (ts->ts_flags & ATH9K_TX_BA) {
+ TX_SAMP_DBG(ba_low) = ts->ba_low;
+ TX_SAMP_DBG(ba_high) = ts->ba_high;
+ } else {
+ TX_SAMP_DBG(ba_low) = 0;
+ TX_SAMP_DBG(ba_high) = 0;
+ }
+
+ sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
+ spin_unlock(&sc->debug.samp_lock);
+
+#undef TX_SAMP_DBG
}
static const struct file_operations fops_xmit = {
@@ -995,6 +1040,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
+ [sc->debug.rsidx].c)
u32 phyerr;
@@ -1030,8 +1077,25 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
+ spin_lock(&sc->debug.samp_lock);
+ RX_SAMP_DBG(jiffies) = jiffies;
+ RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
+ RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
+ RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
+ RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
+ RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
+ RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
+ RX_SAMP_DBG(antenna) = rs->rs_antenna;
+ RX_SAMP_DBG(rssi) = rs->rs_rssi;
+ RX_SAMP_DBG(rate) = rs->rs_rate;
+ RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
+
+ sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
+ spin_unlock(&sc->debug.samp_lock);
+
#undef RX_STAT_INC
#undef RX_PHY_ERR_INC
+#undef RX_SAMP_DBG
}
static const struct file_operations fops_recv = {
@@ -1163,6 +1227,389 @@ static const struct file_operations fops_regdump = {
.llseek = default_llseek,/* read accesses f_pos */
};
+static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_nfcal_hist *h = sc->caldata.nfCalHist;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ u32 len = 0, size = 1500;
+ u32 i, j;
+ ssize_t retval = 0;
+ char *buf;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ u8 nread;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len,
+ "Channel Noise Floor : %d\n", ah->noise);
+ len += snprintf(buf + len, size - len,
+ "Chain | privNF | # Readings | NF Readings\n");
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (!(chainmask & (1 << i)) ||
+ ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
+ continue;
+
+ nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
+ len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+ i, h[i].privNF, nread);
+ for (j = 0; j < nread; j++)
+ len += snprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += snprintf(buf + len, size - len, "\n");
+ }
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_dump_nfcal = {
+ .read = read_file_dump_nfcal,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ u32 len = 0, size = 1500;
+ ssize_t retval = 0;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_base_eeprom = {
+ .read = read_file_base_eeprom,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ u32 len = 0, size = 6000;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_modal_eeprom = {
+ .read = read_file_modal_eeprom,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
+{
+#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long flags;
+ int i;
+
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->debug.samp_lock);
+
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath_hw_cycle_counters_update(common);
+
+ ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
+ ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
+ ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
+ ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ATH_SAMP_DBG(noise) = ah->noise;
+
+ REG_WRITE_D(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
+ ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
+ AR_DMADBG_0 + (i * sizeof(u32)));
+
+ ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
+ ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
+
+ memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
+ sizeof(ATH_SAMP_DBG(nfCalHist)));
+
+ sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
+ spin_unlock_bh(&sc->debug.samp_lock);
+ ath9k_ps_restore(sc);
+
+#undef ATH_SAMP_DBG
+}
+
+static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
+{
+#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
+ struct ath_softc *sc = inode->i_private;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ struct ath_dbg_bb_mac_samp *bb_mac_samp;
+ struct ath9k_nfcal_hist *h;
+ int i, j, qcuOffset = 0, dcuOffset = 0;
+ u32 *qcuBase, *dcuBase, size = 30000, len = 0;
+ u32 sampidx = 0;
+ u8 *buf;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ u8 nread;
+
+ if (sc->sc_flags & SC_OP_INVALID)
+ return -EAGAIN;
+
+ buf = vmalloc(size);
+ if (!buf)
+ return -ENOMEM;
+ bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
+ if (!bb_mac_samp) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+ /* Account the current state too */
+ ath9k_debug_samp_bb_mac(sc);
+
+ spin_lock_bh(&sc->debug.samp_lock);
+ memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
+ sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
+ len += snprintf(buf + len, size - len,
+ "Current Sample Index: %d\n", sc->debug.sampidx);
+ spin_unlock_bh(&sc->debug.samp_lock);
+
+ len += snprintf(buf + len, size - len,
+ "Raw DMA Debug Dump:\n");
+ len += snprintf(buf + len, size - len, "Sample |\t");
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
+ len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
+ len += snprintf(buf + len, size - len, "\n");
+
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ len += snprintf(buf + len, size - len, "%d\t", sampidx);
+
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
+ len += snprintf(buf + len, size - len, " %08x\t",
+ ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
+ len += snprintf(buf + len, size - len, "\n");
+ }
+ len += snprintf(buf + len, size - len, "\n");
+
+ len += snprintf(buf + len, size - len,
+ "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
+ dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
+
+ for (i = 0; i < ATH9K_NUM_QUEUES; i++,
+ qcuOffset += 4, dcuOffset += 5) {
+ if (i == 8) {
+ qcuOffset = 0;
+ qcuBase++;
+ }
+
+ if (i == 6) {
+ dcuOffset = 0;
+ dcuBase++;
+ }
+ if (!sc->debug.stats.txstats[i].queued)
+ continue;
+
+ len += snprintf(buf + len, size - len,
+ "%4d %7d %2x %1x %2x %2x\n",
+ sampidx, i,
+ (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >>
+ (qcuOffset + 3),
+ ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
+ (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ }
+ len += snprintf(buf + len, size - len, "\n");
+ }
+ len += snprintf(buf + len, size - len,
+ "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
+ "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
+ "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
+
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
+ dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
+
+ len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
+ len += snprintf(buf + len, size - len, "%7x %8x ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
+ len += snprintf(buf + len, size - len, "%7x %7x ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
+ len += snprintf(buf + len, size - len, "%7d %12d ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
+ len += snprintf(buf + len, size - len, "%12d %12d ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
+ len += snprintf(buf + len, size - len, "%12d %12d ",
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
+ (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
+ len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
+ ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
+ }
+
+ len += snprintf(buf + len, size - len,
+ "Sample ChNoise Chain privNF #Reading Readings\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ h = ATH_SAMP_DBG(nfCalHist);
+ if (!ATH_SAMP_DBG(noise))
+ continue;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (!(chainmask & (1 << i)) ||
+ ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
+ continue;
+
+ nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
+ h[i].invalidNFcount;
+ len += snprintf(buf + len, size - len,
+ "%4d %5d %4d\t %d\t %d\t",
+ sampidx, ATH_SAMP_DBG(noise),
+ i, h[i].privNF, nread);
+ for (j = 0; j < nread; j++)
+ len += snprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += snprintf(buf + len, size - len, "\n");
+ }
+ }
+ len += snprintf(buf + len, size - len, "\nCycle counters:\n"
+ "Sample Total Rxbusy Rxframes Txframes\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ if (!ATH_SAMP_DBG(cc.cycles))
+ continue;
+ len += snprintf(buf + len, size - len,
+ "%4d %08x %08x %08x %08x\n",
+ sampidx, ATH_SAMP_DBG(cc.cycles),
+ ATH_SAMP_DBG(cc.rx_busy),
+ ATH_SAMP_DBG(cc.rx_frame),
+ ATH_SAMP_DBG(cc.tx_frame));
+ }
+
+ len += snprintf(buf + len, size - len, "Tx status Dump :\n");
+ len += snprintf(buf + len, size - len,
+ "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
+ "isok rts_fail data_fail rate tid qid "
+ "ba_low ba_high tx_before(ms)\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
+ if (!ATH_SAMP_DBG(ts[i].jiffies))
+ continue;
+ len += snprintf(buf + len, size - len, "%-14d"
+ "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d "
+ "%-9d %-4d %-3d %-3d %08x %08x %-11d\n",
+ sampidx,
+ ATH_SAMP_DBG(ts[i].rssi_ctl0),
+ ATH_SAMP_DBG(ts[i].rssi_ctl1),
+ ATH_SAMP_DBG(ts[i].rssi_ctl2),
+ ATH_SAMP_DBG(ts[i].rssi_ext0),
+ ATH_SAMP_DBG(ts[i].rssi_ext1),
+ ATH_SAMP_DBG(ts[i].rssi_ext2),
+ ATH_SAMP_DBG(ts[i].rssi),
+ ATH_SAMP_DBG(ts[i].isok),
+ ATH_SAMP_DBG(ts[i].rts_fail_cnt),
+ ATH_SAMP_DBG(ts[i].data_fail_cnt),
+ ATH_SAMP_DBG(ts[i].rateindex),
+ ATH_SAMP_DBG(ts[i].tid),
+ ATH_SAMP_DBG(ts[i].qid),
+ ATH_SAMP_DBG(ts[i].ba_low),
+ ATH_SAMP_DBG(ts[i].ba_high),
+ jiffies_to_msecs(jiffies -
+ ATH_SAMP_DBG(ts[i].jiffies)));
+ }
+ }
+
+ len += snprintf(buf + len, size - len, "Rx status Dump :\n");
+ len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
+ "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
+ for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
+ for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
+ if (!ATH_SAMP_DBG(rs[i].jiffies))
+ continue;
+ len += snprintf(buf + len, size - len, "%-14d"
+ "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n",
+ sampidx,
+ ATH_SAMP_DBG(rs[i].rssi_ctl0),
+ ATH_SAMP_DBG(rs[i].rssi_ctl1),
+ ATH_SAMP_DBG(rs[i].rssi_ctl2),
+ ATH_SAMP_DBG(rs[i].rssi_ext0),
+ ATH_SAMP_DBG(rs[i].rssi_ext1),
+ ATH_SAMP_DBG(rs[i].rssi_ext2),
+ ATH_SAMP_DBG(rs[i].rssi),
+ ATH_SAMP_DBG(rs[i].is_mybeacon) ?
+ "True" : "False",
+ ATH_SAMP_DBG(rs[i].antenna),
+ ATH_SAMP_DBG(rs[i].rate),
+ jiffies_to_msecs(jiffies -
+ ATH_SAMP_DBG(rs[i].jiffies)));
+ }
+ }
+
+ vfree(bb_mac_samp);
+ file->private_data = buf;
+
+ return 0;
+#undef ATH_SAMP_DBG
+}
+
+static const struct file_operations fops_samps = {
+ .open = open_file_bb_mac_samps,
+ .read = ath9k_debugfs_read_buf,
+ .release = ath9k_debugfs_release_buf,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1206,6 +1653,14 @@ int ath9k_init_debug(struct ath_hw *ah)
&ah->config.cwm_ignore_extcca);
debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_regdump);
+ debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_dump_nfcal);
+ debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_base_eeprom);
+ debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_modal_eeprom);
+ debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_samps);
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
@@ -1214,5 +1669,9 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
sc->debug.regidx = 0;
+ memset(&sc->debug.bb_mac_samp, 0, sizeof(sc->debug.bb_mac_samp));
+ sc->debug.sampidx = 0;
+ sc->debug.tsidx = 0;
+ sc->debug.rsidx = 0;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 4a04510e111..356352ac2d6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -25,8 +25,10 @@ struct ath_buf;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
+#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
#else
#define TX_STAT_INC(q, c) do { } while (0)
+#define RESET_STAT_INC(sc, type) do { } while (0)
#endif
#ifdef CONFIG_ATH9K_DEBUGFS
@@ -171,23 +173,80 @@ struct ath_rx_stats {
u8 rs_antenna;
};
+enum ath_reset_type {
+ RESET_TYPE_BB_HANG,
+ RESET_TYPE_BB_WATCHDOG,
+ RESET_TYPE_FATAL_INT,
+ RESET_TYPE_TX_ERROR,
+ RESET_TYPE_TX_HANG,
+ RESET_TYPE_PLL_HANG,
+ __RESET_TYPE_MAX
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
+ u32 reset[__RESET_TYPE_MAX];
+};
+
+#define ATH_DBG_MAX_SAMPLES 10
+struct ath_dbg_bb_mac_samp {
+ u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
+ u32 pcu_obs, pcu_cr, noise;
+ struct {
+ u64 jiffies;
+ int8_t rssi_ctl0;
+ int8_t rssi_ctl1;
+ int8_t rssi_ctl2;
+ int8_t rssi_ext0;
+ int8_t rssi_ext1;
+ int8_t rssi_ext2;
+ int8_t rssi;
+ bool isok;
+ u8 rts_fail_cnt;
+ u8 data_fail_cnt;
+ u8 rateindex;
+ u8 qid;
+ u8 tid;
+ u32 ba_low;
+ u32 ba_high;
+ } ts[ATH_DBG_MAX_SAMPLES];
+ struct {
+ u64 jiffies;
+ int8_t rssi_ctl0;
+ int8_t rssi_ctl1;
+ int8_t rssi_ctl2;
+ int8_t rssi_ext0;
+ int8_t rssi_ext1;
+ int8_t rssi_ext2;
+ int8_t rssi;
+ bool is_mybeacon;
+ u8 antenna;
+ u8 rate;
+ } rs[ATH_DBG_MAX_SAMPLES];
+ struct ath_cycle_counters cc;
+ struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
};
struct ath9k_debug {
struct dentry *debugfs_phy;
u32 regidx;
struct ath_stats stats;
+ spinlock_t samp_lock;
+ struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
+ u8 sampidx;
+ u8 tsidx;
+ u8 rsidx;
};
int ath9k_init_debug(struct ath_hw *ah);
+void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts, struct ath_txq *txq);
+ struct ath_tx_status *ts, struct ath_txq *txq,
+ unsigned int flags);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
#else
@@ -197,6 +256,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
return 0;
}
+static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
+{
+}
+
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
@@ -205,7 +268,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_buf *bf,
struct ath_tx_status *ts,
- struct ath_txq *txq)
+ struct ath_txq *txq,
+ unsigned int flags)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e61404dda8c..e46f751ab50 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -456,12 +456,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
pPdGainBoundaries[i] =
min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]);
- if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
- minDelta = pPdGainBoundaries[0] - 23;
- pPdGainBoundaries[0] = 23;
- } else {
- minDelta = 0;
- }
+ minDelta = 0;
if (i == 0) {
if (AR_SREV_9280_20_OR_LATER(ah))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index de99c0da52e..49abd34be74 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -104,16 +104,11 @@
#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
-#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
-#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
-#define AR_EEPROM_RFSILENT_POLARITY 0x0002
-#define AR_EEPROM_RFSILENT_POLARITY_S 1
-
#define EEP_RFSILENT_ENABLED 0x0001
#define EEP_RFSILENT_ENABLED_S 0
#define EEP_RFSILENT_POLARITY 0x0002
#define EEP_RFSILENT_POLARITY_S 1
-#define EEP_RFSILENT_GPIO_SEL 0x001c
+#define EEP_RFSILENT_GPIO_SEL (AR_SREV_9462(ah) ? 0x00fc : 0x001c)
#define EEP_RFSILENT_GPIO_SEL_S 2
#define AR5416_OPFLAGS_11A 0x01
@@ -225,7 +220,6 @@ enum eeprom_param {
EEP_MAC_MID,
EEP_MAC_LSW,
EEP_REG_0,
- EEP_REG_1,
EEP_OP_CAP,
EEP_OP_MODE,
EEP_RF_SILENT,
@@ -253,7 +247,9 @@ enum eeprom_param {
EEP_PAPRD,
EEP_MODAL_VER,
EEP_ANT_DIV_CTL1,
- EEP_CHAIN_MASK_REDUCE
+ EEP_CHAIN_MASK_REDUCE,
+ EEP_ANTENNA_GAIN_2G,
+ EEP_ANTENNA_GAIN_5G
};
enum ar5416_rates {
@@ -649,14 +645,15 @@ struct eeprom_ops {
int (*check_eeprom)(struct ath_hw *hw);
u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
bool (*fill_eeprom)(struct ath_hw *hw);
+ u32 (*dump_eeprom)(struct ath_hw *hw, bool dump_base_hdr, u8 *buf,
+ u32 len, u32 size);
int (*get_eeprom_ver)(struct ath_hw *hw);
int (*get_eeprom_rev)(struct ath_hw *hw);
void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
u16 cfgCtl, u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower, u8 powerLimit,
- bool test);
+ u8 powerLimit, bool test);
u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 47cc95086e6..9a7520f987f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -72,6 +72,117 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
return __ath9k_hw_4k_fill_eeprom(ah);
}
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size,
+ struct modal_eep_4k_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
+ PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
+ PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize);
+ PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("CCA Threshold)", modal_hdr->thresh62);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("xpdGain", modal_hdr->xpdGain);
+ PR_EEP("External PD", modal_hdr->xpd);
+ PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]);
+ PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]);
+ PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap);
+ PR_EEP("O/D Bias Version", modal_hdr->version);
+ PR_EEP("CCK OutputBias", modal_hdr->ob_0);
+ PR_EEP("BPSK OutputBias", modal_hdr->ob_1);
+ PR_EEP("QPSK OutputBias", modal_hdr->ob_2);
+ PR_EEP("16QAM OutputBias", modal_hdr->ob_3);
+ PR_EEP("64QAM OutputBias", modal_hdr->ob_4);
+ PR_EEP("CCK Driver1_Bias", modal_hdr->db1_0);
+ PR_EEP("BPSK Driver1_Bias", modal_hdr->db1_1);
+ PR_EEP("QPSK Driver1_Bias", modal_hdr->db1_2);
+ PR_EEP("16QAM Driver1_Bias", modal_hdr->db1_3);
+ PR_EEP("64QAM Driver1_Bias", modal_hdr->db1_4);
+ PR_EEP("CCK Driver2_Bias", modal_hdr->db2_0);
+ PR_EEP("BPSK Driver2_Bias", modal_hdr->db2_1);
+ PR_EEP("QPSK Driver2_Bias", modal_hdr->db2_2);
+ PR_EEP("16QAM Driver2_Bias", modal_hdr->db2_3);
+ PR_EEP("64QAM Driver2_Bias", modal_hdr->db2_4);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc);
+ PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]);
+ PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]);
+ PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40);
+ PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]);
+ PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]);
+ PR_EEP("Ant. Diversity ctl1", modal_hdr->antdiv_ctl1);
+ PR_EEP("Ant. Diversity ctl2", modal_hdr->antdiv_ctl2);
+ PR_EEP("TX Diversity", modal_hdr->tx_diversity);
+
+ return len;
+}
+
+static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ struct base_eep_header_4k *pBase = &eep->baseEepHeader;
+
+ if (!dump_base_hdr) {
+ len += snprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len += ath9k_dump_4k_modal_eeprom(buf, len, size,
+ &eep->modalHeader);
+ goto out;
+ }
+
+ PR_EEP("Major Version", pBase->version >> 12);
+ PR_EEP("Minor Version", pBase->version & 0xFFF);
+ PR_EEP("Checksum", pBase->checksum);
+ PR_EEP("Length", pBase->length);
+ PR_EEP("RegDomain1", pBase->regDmn[0]);
+ PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("TX Mask", pBase->txMask);
+ PR_EEP("RX Mask", pBase->rxMask);
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
+ PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("TX Gain type", pBase->txGainType);
+
+ len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
+
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+
#undef SIZE_EEPROM_4K
static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
@@ -211,8 +322,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
return pBase->regDmn[0];
- case EEP_REG_1:
- return pBase->regDmn[1];
case EEP_OP_CAP:
return pBase->deviceCap;
case EEP_OP_MODE:
@@ -238,18 +347,16 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
case EEP_ANT_DIV_CTL1:
return pModal->antdiv_ctl1;
case EEP_TXGAIN_TYPE:
- if (ver_minor >= AR5416_EEP_MINOR_VER_19)
- return pBase->txGainType;
- else
- return AR5416_EEP_TXGAIN_ORIGINAL;
+ return pBase->txGainType;
+ case EEP_ANTENNA_GAIN_2G:
+ return pModal->antennaGainCh[0];
default:
return 0;
}
}
static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
- struct ath9k_channel *chan,
- int16_t *pTxPowerIndexOffset)
+ struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
@@ -298,12 +405,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
- if (AR_SREV_5416_20_OR_LATER(ah) &&
- (ah->rxchainmask == 5 || ah->txchainmask == 5) &&
- (i != 0)) {
- regChainOffset = (i == 1) ? 0x2000 : 0x1000;
- } else
- regChainOffset = i * 0x1000;
+ regChainOffset = i * 0x1000;
if (pEepData->baseEepHeader.txMask & (1 << i)) {
pRawDataset = pEepData->calPierData2G[i];
@@ -316,19 +418,17 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
- SM(pdGainOverlap_t2,
- AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
- | SM(gainBoundaries[0],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
- | SM(gainBoundaries[1],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
- | SM(gainBoundaries[2],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
- | SM(gainBoundaries[3],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
- }
+ REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
+ SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
+ | SM(gainBoundaries[0],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
+ | SM(gainBoundaries[1],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
+ | SM(gainBoundaries[2],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
+ | SM(gainBoundaries[3],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
for (j = 0; j < 32; j++) {
@@ -356,16 +456,13 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
REGWRITE_BUFFER_FLUSH(ah);
}
}
-
- *pTxPowerIndexOffset = 0;
}
static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *ratesArray,
u16 cfgCtl,
- u16 AntennaReduction,
- u16 twiceMaxRegulatoryPower,
+ u16 antenna_reduction,
u16 powerLimit)
{
#define CMP_TEST_GRP \
@@ -374,20 +471,16 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
|| (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
int i;
- int16_t twiceLargestAntenna;
u16 twiceMinEdgePower;
u16 twiceMaxEdgePower = MAX_RATE_POWER;
- u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+ u16 scaledPower = 0, minCtlPower;
u16 numCtlModes;
const u16 *pCtlMode;
u16 ctlMode, freq;
struct chan_centers centers;
struct cal_ctl_data_4k *rep;
struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
- static const u16 tpScaleReductionTable[5] =
- { 0, 3, 6, 9, MAX_RATE_POWER };
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
0, { 0, 0, 0, 0}
};
@@ -405,19 +498,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
- twiceLargestAntenna = pEepData->modalHeader.antennaGainCh[0];
- twiceLargestAntenna = (int16_t)min(AntennaReduction -
- twiceLargestAntenna, 0);
-
- maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
- if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) {
- maxRegAllowedPower -=
- (tpScaleReductionTable[(regulatory->tp_scale)] * 2);
- }
-
- scaledPower = min(powerLimit, maxRegAllowedPower);
- scaledPower = max((u16)0, scaledPower);
-
+ scaledPower = powerLimit - antenna_reduction;
numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
pCtlMode = ctlModesFor11g;
@@ -573,14 +654,12 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 cfgCtl,
u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower,
u8 powerLimit, bool test)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
struct modal_eep_4k_header *pModal = &pEepData->modalHeader;
int16_t ratesArray[Ar5416RateSize];
- int16_t txPowerIndexOffset = 0;
u8 ht40PowerIncForPdadc = 2;
int i;
@@ -594,14 +673,12 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ath9k_hw_set_4k_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
twiceAntennaReduction,
- twiceMaxRegulatoryPower,
powerLimit);
- ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset);
+ ath9k_hw_set_4k_power_cal_table(ah, chan);
regulatory->max_power_level = 0;
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
- ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
if (ratesArray[i] > MAX_RATE_POWER)
ratesArray[i] = MAX_RATE_POWER;
@@ -612,19 +689,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
if (test)
return;
- /* Update regulatory */
- i = rate6mb;
- if (IS_CHAN_HT40(chan))
- i = rateHt40_0;
- else if (IS_CHAN_HT20(chan))
- i = rateHt20_0;
-
- regulatory->max_power_level = ratesArray[i];
-
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- for (i = 0; i < Ar5416RateSize; i++)
- ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
- }
+ for (i = 0; i < Ar5416RateSize; i++)
+ ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
ENABLE_REGWRITE_BUFFER(ah);
@@ -694,28 +760,6 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
REGWRITE_BUFFER_FLUSH(ah);
}
-static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct modal_eep_4k_header *pModal;
- struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
- u8 biaslevel;
-
- if (ah->hw_version.macVersion != AR_SREV_VERSION_9160)
- return;
-
- if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7)
- return;
-
- pModal = &eep->modalHeader;
-
- if (pModal->xpaBiasLvl != 0xff) {
- biaslevel = pModal->xpaBiasLvl;
- INI_RA(&ah->iniAddac, 7, 1) =
- (INI_RA(&ah->iniAddac, 7, 1) & (~0x18)) | biaslevel << 3;
- }
-}
-
static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
struct modal_eep_4k_header *pModal,
struct ar5416_eeprom_4k *eep,
@@ -783,6 +827,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
u8 txRxAttenLocal;
u8 ob[5], db1[5], db2[5];
u8 ant_div_control1, ant_div_control2;
+ u8 bb_desired_scale;
u32 regVal;
pModal = &eep->modalHeader;
@@ -1002,30 +1047,29 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
AR_PHY_SETTLING_SWITCH,
pModal->swSettleHt40);
}
- if (AR_SREV_9271(ah) || AR_SREV_9285(ah)) {
- u8 bb_desired_scale = (pModal->bb_scale_smrt_antenna &
- EEP_4K_BB_DESIRED_SCALE_MASK);
- if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
- u32 pwrctrl, mask, clr;
-
- mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
- pwrctrl = mask * bb_desired_scale;
- clr = mask * 0x1f;
- REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
- REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
- REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
-
- mask = BIT(0)|BIT(5)|BIT(15);
- pwrctrl = mask * bb_desired_scale;
- clr = mask * 0x1f;
- REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr);
-
- mask = BIT(0)|BIT(5);
- pwrctrl = mask * bb_desired_scale;
- clr = mask * 0x1f;
- REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
- REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
- }
+
+ bb_desired_scale = (pModal->bb_scale_smrt_antenna &
+ EEP_4K_BB_DESIRED_SCALE_MASK);
+ if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
+ u32 pwrctrl, mask, clr;
+
+ mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
+ pwrctrl = mask * bb_desired_scale;
+ clr = mask * 0x1f;
+ REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
+ REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
+ REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
+
+ mask = BIT(0)|BIT(5)|BIT(15);
+ pwrctrl = mask * bb_desired_scale;
+ clr = mask * 0x1f;
+ REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr);
+
+ mask = BIT(0)|BIT(5);
+ pwrctrl = mask * bb_desired_scale;
+ clr = mask * 0x1f;
+ REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
+ REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
}
}
@@ -1063,10 +1107,10 @@ const struct eeprom_ops eep_4k_ops = {
.check_eeprom = ath9k_hw_4k_check_eeprom,
.get_eeprom = ath9k_hw_4k_get_eeprom,
.fill_eeprom = ath9k_hw_4k_fill_eeprom,
+ .dump_eeprom = ath9k_hw_4k_dump_eeprom,
.get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev,
.set_board_values = ath9k_hw_4k_set_board_values,
- .set_addac = ath9k_hw_4k_set_addac,
.set_txpower = ath9k_hw_4k_set_txpower,
.get_spur_channel = ath9k_hw_4k_get_spur_channel
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index d6f6b192f45..4f5c50a87ce 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -76,6 +76,111 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
return __ath9k_hw_ar9287_fill_eeprom(ah);
}
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size,
+ struct modal_eep_ar9287_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
+ PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
+ PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
+ PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
+ PR_EEP("Chain1 TxRxAtten", modal_hdr->txRxAttenCh[1]);
+ PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]);
+ PR_EEP("Chain1 RxTxMargin", modal_hdr->rxTxMarginCh[1]);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("CCA Threshold)", modal_hdr->thresh62);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
+ PR_EEP("xpdGain", modal_hdr->xpdGain);
+ PR_EEP("External PD", modal_hdr->xpd);
+ PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]);
+ PR_EEP("Chain1 I Coefficient", modal_hdr->iqCalICh[1]);
+ PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]);
+ PR_EEP("Chain1 Q Coefficient", modal_hdr->iqCalQCh[1]);
+ PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc);
+ PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]);
+ PR_EEP("Chain1 bswAtten", modal_hdr->bswAtten[1]);
+ PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]);
+ PR_EEP("Chain1 bswMargin", modal_hdr->bswMargin[1]);
+ PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40);
+ PR_EEP("AR92x7 Version", modal_hdr->version);
+ PR_EEP("DriverBias1", modal_hdr->db1);
+ PR_EEP("DriverBias2", modal_hdr->db1);
+ PR_EEP("CCK OutputBias", modal_hdr->ob_cck);
+ PR_EEP("PSK OutputBias", modal_hdr->ob_psk);
+ PR_EEP("QAM OutputBias", modal_hdr->ob_qam);
+ PR_EEP("PAL_OFF OutputBias", modal_hdr->ob_pal_off);
+
+ return len;
+}
+
+static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
+
+ if (!dump_base_hdr) {
+ len += snprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len += ar9287_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader);
+ goto out;
+ }
+
+ PR_EEP("Major Version", pBase->version >> 12);
+ PR_EEP("Minor Version", pBase->version & 0xFFF);
+ PR_EEP("Checksum", pBase->checksum);
+ PR_EEP("Length", pBase->length);
+ PR_EEP("RegDomain1", pBase->regDmn[0]);
+ PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("TX Mask", pBase->txMask);
+ PR_EEP("RX Mask", pBase->rxMask);
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
+ PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("Power Table Offset", pBase->pwrTableOffset);
+ PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
+
+ len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
+
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+
static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
{
u32 sum = 0, el, integer;
@@ -203,8 +308,6 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
return pBase->regDmn[0];
- case EEP_REG_1:
- return pBase->regDmn[1];
case EEP_OP_CAP:
return pBase->deviceCap;
case EEP_OP_MODE:
@@ -231,6 +334,9 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
return pBase->tempSensSlopePalOn;
else
return 0;
+ case EEP_ANTENNA_GAIN_2G:
+ return max_t(u8, pModal->antennaGainCh[0],
+ pModal->antennaGainCh[1]);
default:
return 0;
}
@@ -307,8 +413,7 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
}
static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
- struct ath9k_channel *chan,
- int16_t *pTxPowerIndexOffset)
+ struct ath9k_channel *chan)
{
struct cal_data_per_freq_ar9287 *pRawDataset;
struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
@@ -444,16 +549,13 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
REGWRITE_BUFFER_FLUSH(ah);
}
}
-
- *pTxPowerIndexOffset = 0;
}
static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *ratesArray,
u16 cfgCtl,
- u16 AntennaReduction,
- u16 twiceMaxRegulatoryPower,
+ u16 antenna_reduction,
u16 powerLimit)
{
#define CMP_CTL \
@@ -467,12 +569,8 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
u16 twiceMaxEdgePower = MAX_RATE_POWER;
- static const u16 tpScaleReductionTable[5] =
- { 0, 3, 6, 9, MAX_RATE_POWER };
int i;
- int16_t twiceLargestAntenna;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
targetPowerCck = {0, {0, 0, 0, 0} };
@@ -480,7 +578,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
targetPowerCckExt = {0, {0, 0, 0, 0} };
struct cal_target_power_ht targetPowerHt20,
targetPowerHt40 = {0, {0, 0, 0, 0} };
- u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+ u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11g[] = {
CTL_11B, CTL_11G, CTL_2GHT20,
CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
@@ -495,24 +593,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
tx_chainmask = ah->txchainmask;
ath9k_hw_get_channel_centers(ah, chan, &centers);
-
- /* Compute TxPower reduction due to Antenna Gain */
- twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0],
- pEepData->modalHeader.antennaGainCh[1]);
- twiceLargestAntenna = (int16_t)min((AntennaReduction) -
- twiceLargestAntenna, 0);
-
- /*
- * scaledPower is the minimum of the user input power level
- * and the regulatory allowed power level.
- */
- maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
-
- if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX)
- maxRegAllowedPower -=
- (tpScaleReductionTable[(regulatory->tp_scale)] * 2);
-
- scaledPower = min(powerLimit, maxRegAllowedPower);
+ scaledPower = powerLimit - antenna_reduction;
/*
* Reduce scaled Power by number of chains active
@@ -713,14 +794,12 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan, u16 cfgCtl,
u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower,
u8 powerLimit, bool test)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
int16_t ratesArray[Ar5416RateSize];
- int16_t txPowerIndexOffset = 0;
u8 ht40PowerIncForPdadc = 2;
int i;
@@ -733,14 +812,12 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
twiceAntennaReduction,
- twiceMaxRegulatoryPower,
powerLimit);
- ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
+ ath9k_hw_set_ar9287_power_cal_table(ah, chan);
regulatory->max_power_level = 0;
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
- ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
if (ratesArray[i] > MAX_RATE_POWER)
ratesArray[i] = MAX_RATE_POWER;
@@ -751,17 +828,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
if (test)
return;
- if (IS_CHAN_2GHZ(chan))
- i = rate1l;
- else
- i = rate6mb;
-
- regulatory->max_power_level = ratesArray[i];
-
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- for (i = 0; i < Ar5416RateSize; i++)
- ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
- }
+ for (i = 0; i < Ar5416RateSize; i++)
+ ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
ENABLE_REGWRITE_BUFFER(ah);
@@ -851,11 +919,6 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
REGWRITE_BUFFER_FLUSH(ah);
}
-static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
-}
-
static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1003,10 +1066,10 @@ const struct eeprom_ops eep_ar9287_ops = {
.check_eeprom = ath9k_hw_ar9287_check_eeprom,
.get_eeprom = ath9k_hw_ar9287_get_eeprom,
.fill_eeprom = ath9k_hw_ar9287_fill_eeprom,
+ .dump_eeprom = ath9k_hw_ar9287_dump_eeprom,
.get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
.set_board_values = ath9k_hw_ar9287_set_board_values,
- .set_addac = ath9k_hw_ar9287_set_addac,
.set_txpower = ath9k_hw_ar9287_set_txpower,
.get_spur_channel = ath9k_hw_ar9287_get_spur_channel
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index b9540a99261..81e62967167 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -133,6 +133,136 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
#undef SIZE_EEPROM_DEF
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
+ struct modal_eep_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
+ PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
+ PR_EEP("Chain2 Ant. Control", modal_hdr->antCtrlChain[2]);
+ PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
+ PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
+ PR_EEP("Chain2 Ant. Gain", modal_hdr->antennaGainCh[2]);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
+ PR_EEP("Chain1 TxRxAtten", modal_hdr->txRxAttenCh[1]);
+ PR_EEP("Chain2 TxRxAtten", modal_hdr->txRxAttenCh[2]);
+ PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]);
+ PR_EEP("Chain1 RxTxMargin", modal_hdr->rxTxMarginCh[1]);
+ PR_EEP("Chain2 RxTxMargin", modal_hdr->rxTxMarginCh[2]);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize);
+ PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]);
+ PR_EEP("Chain1 xlna Gain", modal_hdr->xlnaGainCh[1]);
+ PR_EEP("Chain2 xlna Gain", modal_hdr->xlnaGainCh[2]);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("CCA Threshold)", modal_hdr->thresh62);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
+ PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("xpdGain", modal_hdr->xpdGain);
+ PR_EEP("External PD", modal_hdr->xpd);
+ PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]);
+ PR_EEP("Chain1 I Coefficient", modal_hdr->iqCalICh[1]);
+ PR_EEP("Chain2 I Coefficient", modal_hdr->iqCalICh[2]);
+ PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]);
+ PR_EEP("Chain1 Q Coefficient", modal_hdr->iqCalQCh[1]);
+ PR_EEP("Chain2 Q Coefficient", modal_hdr->iqCalQCh[2]);
+ PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap);
+ PR_EEP("Chain0 OutputBias", modal_hdr->ob);
+ PR_EEP("Chain0 DriverBias", modal_hdr->db);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("2chain pwr decrease", modal_hdr->pwrDecreaseFor2Chain);
+ PR_EEP("3chain pwr decrease", modal_hdr->pwrDecreaseFor3Chain);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc);
+ PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]);
+ PR_EEP("Chain1 bswAtten", modal_hdr->bswAtten[1]);
+ PR_EEP("Chain2 bswAtten", modal_hdr->bswAtten[2]);
+ PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]);
+ PR_EEP("Chain1 bswMargin", modal_hdr->bswMargin[1]);
+ PR_EEP("Chain2 bswMargin", modal_hdr->bswMargin[2]);
+ PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40);
+ PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]);
+ PR_EEP("Chain1 xatten2Db", modal_hdr->xatten2Db[1]);
+ PR_EEP("Chain2 xatten2Db", modal_hdr->xatten2Db[2]);
+ PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]);
+ PR_EEP("Chain1 xatten2Margin", modal_hdr->xatten2Margin[1]);
+ PR_EEP("Chain2 xatten2Margin", modal_hdr->xatten2Margin[2]);
+ PR_EEP("Chain1 OutputBias", modal_hdr->ob_ch1);
+ PR_EEP("Chain1 DriverBias", modal_hdr->db_ch1);
+ PR_EEP("LNA Control", modal_hdr->lna_ctl);
+ PR_EEP("XPA Bias Freq0", modal_hdr->xpaBiasLvlFreq[0]);
+ PR_EEP("XPA Bias Freq1", modal_hdr->xpaBiasLvlFreq[1]);
+ PR_EEP("XPA Bias Freq2", modal_hdr->xpaBiasLvlFreq[2]);
+
+ return len;
+}
+
+static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+
+ if (!dump_base_hdr) {
+ len += snprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len += ath9k_def_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader[0]);
+ len += snprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
+ len += ath9k_def_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader[1]);
+ goto out;
+ }
+
+ PR_EEP("Major Version", pBase->version >> 12);
+ PR_EEP("Minor Version", pBase->version & 0xFFF);
+ PR_EEP("Checksum", pBase->checksum);
+ PR_EEP("Length", pBase->length);
+ PR_EEP("RegDomain1", pBase->regDmn[0]);
+ PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("TX Mask", pBase->txMask);
+ PR_EEP("RX Mask", pBase->rxMask);
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
+ PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
+
+ len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
+
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
struct ar5416_eeprom_def *eep =
@@ -270,6 +400,7 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct modal_eep_header *pModal = eep->modalHeader;
struct base_eep_header *pBase = &eep->baseEepHeader;
+ int band = 0;
switch (param) {
case EEP_NFTHRESH_5:
@@ -284,8 +415,6 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
return pBase->regDmn[0];
- case EEP_REG_1:
- return pBase->regDmn[1];
case EEP_OP_CAP:
return pBase->deviceCap;
case EEP_OP_MODE:
@@ -337,6 +466,14 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pBase->pwr_table_offset;
else
return AR5416_PWR_TABLE_OFFSET_DB;
+ case EEP_ANTENNA_GAIN_2G:
+ band = 1;
+ /* fall through */
+ case EEP_ANTENNA_GAIN_5G:
+ return max_t(u8, max_t(u8,
+ pModal[band].antennaGainCh[0],
+ pModal[band].antennaGainCh[1]),
+ pModal[band].antennaGainCh[2]);
default:
return 0;
}
@@ -417,8 +554,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
break;
}
- if (AR_SREV_5416_20_OR_LATER(ah) &&
- (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0))
+ if ((ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0))
regChainOffset = (i == 1) ? 0x2000 : 0x1000;
else
regChainOffset = i * 0x1000;
@@ -435,9 +571,8 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
SM(pModal->iqCalQCh[i],
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
- if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah))
- ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal,
- regChainOffset, i);
+ ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal,
+ regChainOffset, i);
}
if (AR_SREV_9280_20_OR_LATER(ah)) {
@@ -693,8 +828,7 @@ static void ath9k_adjust_pdadc_values(struct ath_hw *ah,
}
static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
- struct ath9k_channel *chan,
- int16_t *pTxPowerIndexOffset)
+ struct ath9k_channel *chan)
{
#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x)
#define SM_PDGAIN_B(x, y) \
@@ -764,8 +898,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
xpdGainValues[2]);
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- if (AR_SREV_5416_20_OR_LATER(ah) &&
- (ah->rxchainmask == 5 || ah->txchainmask == 5) &&
+ if ((ah->rxchainmask == 5 || ah->txchainmask == 5) &&
(i != 0)) {
regChainOffset = (i == 1) ? 0x2000 : 0x1000;
} else
@@ -806,27 +939,24 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
- if (OLC_FOR_AR9280_20_LATER) {
- REG_WRITE(ah,
- AR_PHY_TPCRG5 + regChainOffset,
- SM(0x6,
- AR_PHY_TPCRG5_PD_GAIN_OVERLAP) |
- SM_PD_GAIN(1) | SM_PD_GAIN(2) |
- SM_PD_GAIN(3) | SM_PD_GAIN(4));
- } else {
- REG_WRITE(ah,
- AR_PHY_TPCRG5 + regChainOffset,
- SM(pdGainOverlap_t2,
- AR_PHY_TPCRG5_PD_GAIN_OVERLAP)|
- SM_PDGAIN_B(0, 1) |
- SM_PDGAIN_B(1, 2) |
- SM_PDGAIN_B(2, 3) |
- SM_PDGAIN_B(3, 4));
- }
+ if (OLC_FOR_AR9280_20_LATER) {
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ SM(0x6,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP) |
+ SM_PD_GAIN(1) | SM_PD_GAIN(2) |
+ SM_PD_GAIN(3) | SM_PD_GAIN(4));
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)|
+ SM_PDGAIN_B(0, 1) |
+ SM_PDGAIN_B(1, 2) |
+ SM_PDGAIN_B(2, 3) |
+ SM_PDGAIN_B(3, 4));
}
-
ath9k_adjust_pdadc_values(ah, pwr_table_offset,
diff, pdadcValues);
@@ -855,7 +985,6 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
}
}
- *pTxPowerIndexOffset = 0;
#undef SM_PD_GAIN
#undef SM_PDGAIN_B
}
@@ -864,21 +993,15 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *ratesArray,
u16 cfgCtl,
- u16 AntennaReduction,
- u16 twiceMaxRegulatoryPower,
+ u16 antenna_reduction,
u16 powerLimit)
{
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
u16 twiceMaxEdgePower = MAX_RATE_POWER;
- static const u16 tpScaleReductionTable[5] =
- { 0, 3, 6, 9, MAX_RATE_POWER };
-
int i;
- int16_t twiceLargestAntenna;
struct cal_ctl_data *rep;
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
0, { 0, 0, 0, 0}
@@ -890,7 +1013,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
0, {0, 0, 0, 0}
};
- u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+ u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
};
@@ -909,27 +1032,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
- twiceLargestAntenna = max(
- pEepData->modalHeader
- [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
- pEepData->modalHeader
- [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
-
- twiceLargestAntenna = max((u8)twiceLargestAntenna,
- pEepData->modalHeader
- [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
-
- twiceLargestAntenna = (int16_t)min(AntennaReduction -
- twiceLargestAntenna, 0);
-
- maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
-
- if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) {
- maxRegAllowedPower -=
- (tpScaleReductionTable[(regulatory->tp_scale)] * 2);
- }
-
- scaledPower = min(powerLimit, maxRegAllowedPower);
+ scaledPower = powerLimit - antenna_reduction;
switch (ar5416_get_ntxchains(tx_chainmask)) {
case 1:
@@ -1134,7 +1237,6 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 cfgCtl,
u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower,
u8 powerLimit, bool test)
{
#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
@@ -1143,7 +1245,6 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
struct modal_eep_header *pModal =
&(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
int16_t ratesArray[Ar5416RateSize];
- int16_t txPowerIndexOffset = 0;
u8 ht40PowerIncForPdadc = 2;
int i, cck_ofdm_delta = 0;
@@ -1157,31 +1258,18 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
ath9k_hw_set_def_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
twiceAntennaReduction,
- twiceMaxRegulatoryPower,
powerLimit);
- ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
+ ath9k_hw_set_def_power_cal_table(ah, chan);
regulatory->max_power_level = 0;
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
- ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
if (ratesArray[i] > MAX_RATE_POWER)
ratesArray[i] = MAX_RATE_POWER;
if (ratesArray[i] > regulatory->max_power_level)
regulatory->max_power_level = ratesArray[i];
}
- if (!test) {
- i = rate6mb;
-
- if (IS_CHAN_HT40(chan))
- i = rateHt40_0;
- else if (IS_CHAN_HT20(chan))
- i = rateHt20_0;
-
- regulatory->max_power_level = ratesArray[i];
- }
-
switch(ar5416_get_ntxchains(ah->txchainmask)) {
case 1:
break;
@@ -1336,6 +1424,7 @@ const struct eeprom_ops eep_def_ops = {
.check_eeprom = ath9k_hw_def_check_eeprom,
.get_eeprom = ath9k_hw_def_get_eeprom,
.fill_eeprom = ath9k_hw_def_fill_eeprom,
+ .dump_eeprom = ath9k_hw_def_dump_eeprom,
.get_eeprom_ver = ath9k_hw_def_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_def_get_eeprom_rev,
.set_board_values = ath9k_hw_def_set_board_values,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index bc713fc2819..655576c8fda 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -48,6 +48,8 @@ void ath_init_leds(struct ath_softc *sc)
sc->sc_ah->led_pin = ATH_LED_PIN_9485;
else if (AR_SREV_9300(sc->sc_ah))
sc->sc_ah->led_pin = ATH_LED_PIN_9300;
+ else if (AR_SREV_9462(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9462;
else
sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
}
@@ -82,9 +84,14 @@ void ath_init_leds(struct ath_softc *sc)
static bool ath_is_rfkill_set(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ bool is_blocked;
- return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ath9k_ps_wakeup(sc);
+ is_blocked = ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
ah->rfkill_polarity;
+ ath9k_ps_restore(sc);
+
+ return is_blocked;
}
void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
@@ -148,7 +155,8 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_disable_interrupts(ah);
ah->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
}
@@ -162,7 +170,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
if (timer_table->timer_mask.val == 0) {
ath9k_hw_disable_interrupts(ah);
ah->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index d3f4a59cd45..77c8ded8de5 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -38,6 +38,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
+ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 5bc022087e6..da5596766d8 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -521,8 +521,6 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv,
u8 enable_coex);
-void ath9k_htc_station_work(struct work_struct *work);
-void ath9k_htc_aggr_work(struct work_struct *work);
void ath9k_htc_ani_work(struct work_struct *work);
void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
@@ -542,7 +540,6 @@ int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv);
void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot);
void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
-void ath9k_htc_tx_failed(struct ath9k_htc_priv *priv);
void ath9k_tx_failed_tasklet(unsigned long data);
void ath9k_htc_tx_cleanup_timer(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index db2352e5cc0..e3a02eb8e0c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -228,8 +228,14 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
{
- return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
- priv->ah->rfkill_polarity;
+ bool is_blocked;
+
+ ath9k_htc_ps_wakeup(priv);
+ is_blocked = ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+ ath9k_htc_ps_restore(priv);
+
+ return is_blocked;
}
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 3bea7ea86f0..966661c9e58 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -509,8 +509,8 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
/* ath9k_htc supports only 1 or 2 stream devices */
- tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2);
- rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2);
+ tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
+ rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
ath_dbg(common, ATH_DBG_CONFIG,
"TX streams %d, RX streams: %d\n",
@@ -572,25 +572,6 @@ err:
return -EINVAL;
}
-static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
-{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- int i = 0;
-
- /* Get the hardware key cache size. */
- common->keymax = AR_KEYTABLE_SIZE;
-
- if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
- common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath_hw_keyreset(common, (u16) i);
-}
-
static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
{
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
@@ -620,9 +601,6 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- common->tx_chainmask = priv->ah->caps.tx_chainmask;
- common->rx_chainmask = priv->ah->caps.rx_chainmask;
-
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
priv->ah->opmode = NL80211_IFTYPE_STATION;
@@ -666,7 +644,6 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
return -ENOMEM;
ah->hw_version.devid = devid;
- ah->hw_version.subsysid = 0; /* FIXME */
ah->hw_version.usbdev = drv_info;
ah->ah_flags |= AH_USE_EEPROM;
ah->reg_ops.read = ath9k_regread;
@@ -721,7 +698,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
priv->cur_beacon_conf.bslot[i] = NULL;
- ath9k_init_crypto(priv);
+ ath9k_cmn_init_crypto(ah);
ath9k_init_channels_rates(priv);
ath9k_init_misc(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 7212acb2bd6..0b9a0e8a495 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -826,8 +826,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
if (longcal || shortcal)
common->ani.caldone =
ath9k_hw_calibrate(ah, ah->curchan,
- common->rx_chainmask,
- longcal);
+ ah->rxchainmask, longcal);
ath9k_htc_ps_restore(priv);
}
@@ -1300,6 +1299,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
if (priv->op_flags & OP_INVALID) {
ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
"Unable to configure filter on invalid state\n");
+ mutex_unlock(&priv->mutex);
return;
}
ath9k_htc_ps_wakeup(priv);
@@ -1352,7 +1352,8 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
return ret;
}
-static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath9k_htc_priv *priv = hw->priv;
@@ -1563,7 +1564,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
-static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
+static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath9k_htc_priv *priv = hw->priv;
u64 tsf;
@@ -1577,7 +1579,8 @@ static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
return tsf;
}
-static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+static void ath9k_htc_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
{
struct ath9k_htc_priv *priv = hw->priv;
@@ -1588,7 +1591,8 @@ static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
mutex_unlock(&priv->mutex);
}
-static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
+static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath9k_htc_priv *priv = hw->priv;
@@ -1736,6 +1740,22 @@ out:
return ret;
}
+
+static int ath9k_htc_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
+
+ stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
+ stats->dot11RTSFailureCount = mib_stats->rts_bad;
+ stats->dot11FCSErrorCount = mib_stats->fcs_bad;
+ stats->dot11RTSSuccessCount = mib_stats->rts_good;
+
+ return 0;
+}
+
struct ieee80211_ops ath9k_htc_ops = {
.tx = ath9k_htc_tx,
.start = ath9k_htc_start,
@@ -1759,4 +1779,5 @@ struct ieee80211_ops ath9k_htc_ops = {
.rfkill_poll = ath9k_htc_rfkill_poll_state,
.set_coverage_class = ath9k_htc_set_coverage_class,
.set_bitrate_mask = ath9k_htc_set_bitrate_mask,
+ .get_stats = ath9k_htc_get_stats,
};
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index cb29e887538..e74c233757a 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -22,10 +22,12 @@
/* Hardware core and driver accessible callbacks */
static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
- int restore,
- int power_off)
+ bool power_off)
{
- ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
+ if (ah->aspm_enabled != true)
+ return;
+
+ ath9k_hw_ops(ah)->config_pci_powersave(ah, power_off);
}
static inline void ath9k_hw_rxena(struct ath_hw *ah)
@@ -52,13 +54,10 @@ static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
return ath9k_hw_ops(ah)->get_isr(ah, masked);
}
-static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
- bool is_firstseg, bool is_lastseg,
- const void *ds0, dma_addr_t buf_addr,
- unsigned int qcu)
+static inline void ath9k_hw_set_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_info *i)
{
- ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
- ds0, buf_addr, qcu);
+ return ath9k_hw_ops(ah)->set_txdesc(ah, ds, i);
}
static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
@@ -67,55 +66,6 @@ static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
}
-static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
- u32 pktLen, enum ath9k_pkt_type type,
- u32 txPower, u32 keyIx,
- enum ath9k_key_type keyType,
- u32 flags)
-{
- ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
- keyType, flags);
-}
-
-static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
- void *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
- rtsctsRate, rtsctsDuration, series,
- nseries, flags);
-}
-
-static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
- u32 aggrLen)
-{
- ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
-}
-
-static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
- u32 numDelims)
-{
- ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
-}
-
-static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
-{
- ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
-}
-
-static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
-{
- ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
-}
-
-static inline void ath9k_hw_set_clrdmask(struct ath_hw *ah, void *ds, bool val)
-{
- ath9k_hw_ops(ah)->set_clrdmask(ah, ds, val);
-}
-
static inline void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf)
{
@@ -231,11 +181,6 @@ static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
}
-static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
-{
- return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
-}
-
static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
enum ath9k_ani_cmd cmd, int param)
{
@@ -260,4 +205,11 @@ static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
}
+static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *ini_reloaded)
+{
+ return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
+ ini_reloaded);
+}
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8dcefe74f4c..f16d2033081 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -284,7 +284,12 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
ah->hw_version.macVersion =
(val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
- ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
+
+ if (AR_SREV_9462(ah))
+ ah->is_pciexpress = true;
+ else
+ ah->is_pciexpress = (val &
+ AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
} else {
if (!AR_SREV_9100(ah))
ah->hw_version.macVersion = MS(val, AR_SREV_VERSION);
@@ -428,7 +433,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
regulatory->country_code = CTRY_DEFAULT;
regulatory->power_limit = MAX_RATE_POWER;
- regulatory->tp_scale = ATH9K_TP_SCALE_MAX;
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
@@ -440,7 +444,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
if (AR_SREV_9100(ah))
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
- ah->slottime = 20;
+ ah->slottime = ATH9K_SLOT_TIME_9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
@@ -537,6 +541,9 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
+ if (AR_SREV_9462(ah))
+ ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
+
ath9k_hw_init_defaults(ah);
ath9k_hw_init_config(ah);
@@ -580,6 +587,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9330:
case AR_SREV_VERSION_9485:
case AR_SREV_VERSION_9340:
+ case AR_SREV_VERSION_9462:
break;
default:
ath_err(common,
@@ -603,10 +611,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_mode_regs(ah);
-
- if (ah->is_pciexpress)
- ath9k_hw_aspm_init(ah);
- else
+ if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -621,6 +626,9 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
+ if (ah->is_pciexpress)
+ ath9k_hw_aspm_init(ah);
+
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_err(common, "Failed to initialize MAC address\n");
@@ -663,6 +671,8 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9485_PCIE:
case AR9300_DEVID_AR9330:
case AR9300_DEVID_AR9340:
+ case AR9300_DEVID_AR9580:
+ case AR9300_DEVID_AR9462:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -959,7 +969,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
const struct ath9k_channel *chan = ah->curchan;
- int acktimeout;
+ int acktimeout, ctstimeout;
int slottime;
int sifstime;
int rx_lat = 0, tx_lat = 0, eifs = 0;
@@ -974,7 +984,10 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (ah->misc_mode != 0)
REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
- rx_lat = 37;
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rx_lat = 41;
+ else
+ rx_lat = 37;
tx_lat = 54;
if (IS_CHAN_HALF_RATE(chan)) {
@@ -988,7 +1001,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
sifstime = 32;
} else if (IS_CHAN_QUARTER_RATE(chan)) {
eifs = 340;
- rx_lat *= 4;
+ rx_lat = (rx_lat * 4) - 1;
tx_lat *= 4;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 22;
@@ -996,8 +1009,14 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
slottime = 21;
sifstime = 64;
} else {
- eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS);
- reg = REG_READ(ah, AR_USEC);
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
+ eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
+ reg = AR_USEC_ASYNC_FIFO;
+ } else {
+ eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/
+ common->clockrate;
+ reg = REG_READ(ah, AR_USEC);
+ }
rx_lat = MS(reg, AR_USEC_RX_LAT);
tx_lat = MS(reg, AR_USEC_TX_LAT);
@@ -1010,6 +1029,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
/* As defined by IEEE 802.11-2007 17.3.8.6 */
acktimeout = slottime + sifstime + 3 * ah->coverage_class;
+ ctstimeout = acktimeout;
/*
* Workaround for early ACK timeouts, add an offset to match the
@@ -1024,7 +1044,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
- ath9k_hw_set_cts_timeout(ah, acktimeout);
+ ath9k_hw_set_cts_timeout(ah, ctstimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
@@ -1329,6 +1349,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
udelay(10);
@@ -1370,11 +1391,17 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_channel *channel = chan->chan;
u32 qnum;
int r;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ bool band_switch, mode_diff;
+ u8 ini_reloaded;
+
+ band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) !=
+ (ah->curchan->channelFlags & (CHANNEL_2GHZ |
+ CHANNEL_5GHZ));
+ mode_diff = (chan->chanmode != ah->curchan->chanmode);
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
@@ -1389,6 +1416,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return false;
}
+ if (edma && (band_switch || mode_diff)) {
+ ath9k_hw_mark_phy_inactive(ah);
+ udelay(5);
+
+ ath9k_hw_init_pll(ah, NULL);
+
+ if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
+ ath_err(common, "Failed to do fast channel change\n");
+ return false;
+ }
+ }
+
ath9k_hw_set_channel_regs(ah, chan);
r = ath9k_hw_rf_set_freq(ah, chan);
@@ -1397,14 +1436,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return false;
}
ath9k_hw_set_clockrate(ah);
-
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit), false);
-
+ ath9k_hw_apply_txpower(ah, chan);
ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
@@ -1412,6 +1444,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
ath9k_hw_spur_mitigate_freq(ah, chan);
+ if (edma && (band_switch || mode_diff)) {
+ ah->ah_flags |= AH_FASTCC;
+ if (band_switch || ini_reloaded)
+ ah->eep_ops->set_board_values(ah, chan);
+
+ ath9k_hw_init_bb(ah, chan);
+
+ if (band_switch || ini_reloaded)
+ ath9k_hw_init_cal(ah, chan);
+ ah->ah_flags &= ~AH_FASTCC;
+ }
+
return true;
}
@@ -1467,9 +1511,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 macStaId1;
u64 tsf = 0;
int i, r;
-
- ah->txchainmask = common->tx_chainmask;
- ah->rxchainmask = common->rx_chainmask;
+ bool allow_fbs = false;
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1486,15 +1528,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
}
+ ah->noise = ath9k_hw_getchan_noise(ah, chan);
+
+ if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
+ bChannelChange = false;
+
+ if (caldata &&
+ caldata->done_txiqcal_once &&
+ caldata->done_txclcal_once &&
+ caldata->rtt_hist.num_readings)
+ allow_fbs = true;
if (bChannelChange &&
(ah->chip_fullsleep != true) &&
(ah->curchan != NULL) &&
(chan->channel != ah->curchan->channel) &&
- ((chan->channelFlags & CHANNEL_ALL) ==
- (ah->curchan->channelFlags & CHANNEL_ALL)) &&
- (!AR_SREV_9280(ah) || AR_DEVID_7010(ah))) {
-
+ (allow_fbs ||
+ ((chan->channelFlags & CHANNEL_ALL) ==
+ (ah->curchan->channelFlags & CHANNEL_ALL)))) {
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
@@ -1665,6 +1716,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_bb(ah, chan);
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_hist.num_readings = 0;
+ }
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
@@ -1734,25 +1790,40 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
+ if (AR_SREV_9462(ah)) {
+ REG_WRITE(ah, AR_TIMER_MODE,
+ REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00);
+ REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah,
+ AR_NDP2_TIMER_MODE) & 0xFFFFFF00);
+ REG_WRITE(ah, AR_SLP32_INC,
+ REG_READ(ah, AR_SLP32_INC) & 0xFFF00000);
+ /* xxx Required for WLAN only case ? */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+ udelay(100);
+ }
+
/*
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_EN);
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+
+ if (AR_SREV_9462(ah))
+ udelay(100);
+
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
/* Shutdown chip. Active low */
- if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
- REG_CLR_BIT(ah, (AR_RTC_RESET),
- AR_RTC_RESET_EN);
+ if (!AR_SREV_5416(ah) &&
+ !AR_SREV_9271(ah) && !AR_SREV_9462_10(ah)) {
+ REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
+ udelay(2);
+ }
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
- if (AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_WA,
- ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
@@ -1762,6 +1833,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
*/
static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
{
+ u32 val;
+
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -1771,12 +1844,30 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_ON_INT);
} else {
+
+ /* When chip goes into network sleep, it could be waken
+ * up by MCI_INT interrupt caused by BT's HW messages
+ * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * rate (~100us). This will cause chip to leave and
+ * re-enter network sleep mode frequently, which in
+ * consequence will have WLAN MCI HW to generate lots of
+ * SYS_WAKING and SYS_SLEEPING messages which will make
+ * BT CPU to busy to process.
+ */
+ if (AR_SREV_9462(ah)) {
+ val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) &
+ ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK;
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val);
+ }
/*
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
+
+ if (AR_SREV_9462(ah))
+ udelay(30);
}
}
@@ -2027,11 +2118,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
regulatory->current_rd = eeval;
- eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1);
- if (AR_SREV_9285_12_OR_LATER(ah))
- eeval |= AR9285_RDEXT_DEFAULT;
- regulatory->current_rd_ext = eeval;
-
if (ah->opmode != NL80211_IFTYPE_AP &&
ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) {
if (regulatory->current_rd == 0x64 ||
@@ -2083,6 +2169,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
+ ah->txchainmask = pCap->tx_chainmask;
+ ah->rxchainmask = pCap->rx_chainmask;
ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
@@ -2101,6 +2189,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->num_gpio_pins = AR9271_NUM_GPIO;
else if (AR_DEVID_7010(ah))
pCap->num_gpio_pins = AR7010_NUM_GPIO;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->num_gpio_pins = AR9300_NUM_GPIO;
+ else if (AR_SREV_9287_11_OR_LATER(ah))
+ pCap->num_gpio_pins = AR9287_NUM_GPIO;
else if (AR_SREV_9285_12_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_20_OR_LATER(ah))
@@ -2233,6 +2325,14 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
rx_chainmask >>= 1;
}
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->enabled_cals |= TX_IQ_CAL;
+ if (!AR_SREV_9330(ah))
+ ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
+ }
+ if (AR_SREV_9462(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+
return 0;
}
@@ -2393,6 +2493,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
ENABLE_REGWRITE_BUFFER(ah);
+ if (AR_SREV_9462(ah))
+ bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
+
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
@@ -2434,20 +2537,56 @@ bool ath9k_hw_disable(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable);
+static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ enum eeprom_param gain_param;
+
+ if (IS_CHAN_2GHZ(chan))
+ gain_param = EEP_ANTENNA_GAIN_2G;
+ else
+ gain_param = EEP_ANTENNA_GAIN_5G;
+
+ return ah->eep_ops->get_eeprom(ah, gain_param);
+}
+
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ struct ieee80211_channel *channel;
+ int chan_pwr, new_pwr, max_gain;
+ int ant_gain, ant_reduction = 0;
+
+ if (!chan)
+ return;
+
+ channel = chan->chan;
+ chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
+ new_pwr = min_t(int, chan_pwr, reg->power_limit);
+ max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
+
+ ant_gain = get_antenna_gain(ah, chan);
+ if (ant_gain > max_gain)
+ ant_reduction = ant_gain - max_gain;
+
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(reg, chan),
+ ant_reduction, new_pwr, false);
+}
+
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
- regulatory->power_limit = min(limit, (u32) MAX_RATE_POWER);
+ reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
+ if (test)
+ channel->max_power = MAX_RATE_POWER / 2;
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit), test);
+ ath9k_hw_apply_txpower(ah, chan);
+
+ if (test)
+ channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
}
EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
@@ -2646,6 +2785,20 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
+ if (AR_SREV_9462(ah)) {
+ /*
+ * Starting from AR9462, each generic timer can select which tsf
+ * to use. But we still follow the old rule, 0 - 7 use tsf and
+ * 8 - 15 use tsf2.
+ */
+ if ((timer->index < AR_GEN_TIMER_BANK_1_LEN))
+ REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+ (1 << timer->index));
+ else
+ REG_SET_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+ (1 << timer->index));
+ }
+
/* Enable both trigger and thresh interrupt masks */
REG_SET_BIT(ah, AR_IMR_S5,
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
@@ -2749,7 +2902,9 @@ static struct {
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
{ AR_SREV_VERSION_9330, "9330" },
+ { AR_SREV_VERSION_9340, "9340" },
{ AR_SREV_VERSION_9485, "9485" },
+ { AR_SREV_VERSION_9462, "9462" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index c79889036ec..f389b3c93cf 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -45,6 +45,8 @@
#define AR9300_DEVID_PCIE 0x0030
#define AR9300_DEVID_AR9340 0x0031
#define AR9300_DEVID_AR9485_PCIE 0x0032
+#define AR9300_DEVID_AR9580 0x0033
+#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
#define AR5416_AR9100_DEVID 0x000b
@@ -93,6 +95,12 @@
(_ah)->reg_ops.write_flush((_ah)); \
} while (0)
+#define PR_EEP(_s, _val) \
+ do { \
+ len += snprintf(buf + len, size - len, "%20s : %10d\n", \
+ _s, (_val)); \
+ } while (0)
+
#define SM(_v, _f) (((_v) << _f##_S) & _f)
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
#define REG_RMW_FIELD(_a, _r, _f, _v) \
@@ -194,6 +202,7 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_2GHZ = BIT(13),
ATH9K_HW_CAP_5GHZ = BIT(14),
ATH9K_HW_CAP_APM = BIT(15),
+ ATH9K_HW_CAP_RTT = BIT(16),
};
struct ath9k_hw_capabilities {
@@ -329,6 +338,16 @@ enum ath9k_int {
CHANNEL_HT40PLUS | \
CHANNEL_HT40MINUS)
+#define MAX_RTT_TABLE_ENTRY 6
+#define RTT_HIST_MAX 3
+struct ath9k_rtt_hist {
+ u32 table[AR9300_MAX_CHAINS][RTT_HIST_MAX][MAX_RTT_TABLE_ENTRY];
+ u8 num_readings;
+};
+
+#define MAX_IQCAL_MEASUREMENT 8
+#define MAX_CL_TAB_ENTRY 16
+
struct ath9k_hw_cal_data {
u16 channel;
u32 channelFlags;
@@ -338,9 +357,15 @@ struct ath9k_hw_cal_data {
bool paprd_done;
bool nfcal_pending;
bool nfcal_interference;
+ bool done_txiqcal_once;
+ bool done_txclcal_once;
u16 small_signal_gain[AR9300_MAX_CHAINS];
u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
+ u32 num_measures[AR9300_MAX_CHAINS];
+ int tx_corr_coeff[MAX_IQCAL_MEASUREMENT][AR9300_MAX_CHAINS];
+ u32 tx_clcal[AR9300_MAX_CHAINS][MAX_CL_TAB_ENTRY];
struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+ struct ath9k_rtt_hist rtt_hist;
};
struct ath9k_channel {
@@ -382,14 +407,6 @@ enum ath9k_power_mode {
ATH9K_PM_UNDEFINED
};
-enum ath9k_tp_scale {
- ATH9K_TP_SCALE_MAX = 0,
- ATH9K_TP_SCALE_50,
- ATH9K_TP_SCALE_25,
- ATH9K_TP_SCALE_12,
- ATH9K_TP_SCALE_MIN
-};
-
enum ser_reg_mode {
SER_REG_MODE_OFF = 0,
SER_REG_MODE_ON = 1,
@@ -438,7 +455,6 @@ struct ath9k_hw_version {
u16 phyRev;
u16 analog5GhzRev;
u16 analog2GhzRev;
- u16 subsysid;
enum ath_usb_dev usbdev;
};
@@ -577,7 +593,6 @@ struct ath_hw_private_ops {
bool (*rfbus_req)(struct ath_hw *ah);
void (*rfbus_done)(struct ath_hw *ah);
void (*restore_chainmask)(struct ath_hw *ah);
- void (*set_diversity)(struct ath_hw *ah, bool value);
u32 (*compute_pll_control)(struct ath_hw *ah,
struct ath9k_channel *chan);
bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
@@ -585,6 +600,8 @@ struct ath_hw_private_ops {
void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
void (*set_radar_params)(struct ath_hw *ah,
struct ath_hw_radar_conf *conf);
+ int (*fast_chan_change)(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 *ini_reloaded);
/* ANI */
void (*ani_cache_ini_regs)(struct ath_hw *ah);
@@ -601,8 +618,7 @@ struct ath_hw_private_ops {
*/
struct ath_hw_ops {
void (*config_pci_powersave)(struct ath_hw *ah,
- int restore,
- int power_off);
+ bool power_off);
void (*rx_enable)(struct ath_hw *ah);
void (*set_desc_link)(void *ds, u32 link);
bool (*calibrate)(struct ath_hw *ah,
@@ -610,30 +626,10 @@ struct ath_hw_ops {
u8 rxchainmask,
bool longcal);
bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
- void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
- bool is_firstseg, bool is_is_lastseg,
- const void *ds0, dma_addr_t buf_addr,
- unsigned int qcu);
+ void (*set_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_info *i);
int (*proc_txdesc)(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts);
- void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
- u32 pktLen, enum ath9k_pkt_type type,
- u32 txPower, u32 keyIx,
- enum ath9k_key_type keyType,
- u32 flags);
- void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
- void *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags);
- void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
- u32 aggrLen);
- void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
- u32 numDelims);
- void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
- void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
- void (*set_clrdmask)(struct ath_hw *ah, void *ds, bool val);
void (*antdiv_comb_conf_get)(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf);
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
@@ -647,9 +643,16 @@ struct ath_nf_limits {
s16 nominal;
};
+enum ath_cal_list {
+ TX_IQ_CAL = BIT(0),
+ TX_IQ_ON_AGC_CAL = BIT(1),
+ TX_CL_CAL = BIT(2),
+};
+
/* ah_flags */
#define AH_USE_EEPROM 0x1
#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
+#define AH_FASTCC 0x4
struct ath_hw {
struct ath_ops reg_ops;
@@ -690,6 +693,7 @@ struct ath_hw {
enum nl80211_iftype opmode;
enum ath9k_power_mode power_mode;
+ s8 noise;
struct ath9k_hw_cal_data *caldata;
struct ath9k_pacal_info pacal_info;
struct ar5416Stats stats;
@@ -703,8 +707,10 @@ struct ath_hw {
u32 txdesc_interrupt_mask;
u32 txeol_interrupt_mask;
u32 txurn_interrupt_mask;
+ atomic_t intr_ref_cnt;
bool chip_fullsleep;
u32 atim_window;
+ u32 modes_index;
/* Calibration */
u32 supp_cals;
@@ -743,6 +749,7 @@ struct ath_hw {
int32_t sign[AR5416_MAX_CHAINS];
} meas3;
u16 cal_samples;
+ u8 enabled_cals;
u32 sta_id1_defaults;
u32 misc_mode;
@@ -820,11 +827,14 @@ struct ath_hw {
struct ar5416IniArray iniModes_9271_1_0_only;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
+ struct ar5416IniArray ini_japan2484;
struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray iniModes_high_power_tx_gain_9271;
struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
+ struct ar5416IniArray ini_radio_post_sys2ant;
+ struct ar5416IniArray ini_BTCOEX_MAX_TXPWR;
struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -978,6 +988,7 @@ void ath9k_hw_htc_resetinit(struct ath_hw *ah);
/* PHY */
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
u32 *coef_mantissa, u32 *coef_exponent);
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan);
/*
* Code Specific to AR5008, AR9001 or AR9002,
@@ -1030,10 +1041,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
-#define ATH_PCIE_CAP_LINK_CTRL 0x70
-#define ATH_PCIE_CAP_LINK_L0S 1
-#define ATH_PCIE_CAP_LINK_L1 2
-
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index aa0ff7e2c92..af1b3254953 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -270,8 +270,8 @@ static void setup_ht_cap(struct ath_softc *sc,
/* set up supported mcs set */
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
- rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
+ tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
+ rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
ath_dbg(common, ATH_DBG_CONFIG,
"TX streams %d, RX streams: %d\n",
@@ -404,31 +404,6 @@ fail:
return error;
}
-void ath9k_init_crypto(struct ath_softc *sc)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int i = 0;
-
- /* Get the hardware key cache size. */
- common->keymax = AR_KEYTABLE_SIZE;
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath_hw_keyreset(common, (u16) i);
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
- common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
-}
-
static int ath9k_init_btcoex(struct ath_softc *sc)
{
struct ath_txq *txq;
@@ -531,10 +506,6 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->sc_flags |= SC_OP_RXAGGR;
}
- common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
- common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
-
- ath9k_hw_set_diversity(sc->sc_ah, true);
sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
@@ -548,7 +519,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
}
-static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
@@ -563,10 +534,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
ah->hw = sc->hw;
ah->hw_version.devid = devid;
- ah->hw_version.subsysid = subsysid;
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
+ atomic_set(&ah->intr_ref_cnt, -1);
sc->sc_ah = ah;
if (!pdata) {
@@ -597,6 +568,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
mutex_init(&sc->mutex);
#ifdef CONFIG_ATH9K_DEBUGFS
spin_lock_init(&sc->nodes_lock);
+ spin_lock_init(&sc->debug.samp_lock);
INIT_LIST_HEAD(&sc->nodes);
#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
@@ -630,7 +602,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
if (ret)
goto err_btcoex;
- ath9k_init_crypto(sc);
+ ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
return 0;
@@ -654,7 +626,6 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct ath_hw *ah = sc->sc_ah;
- struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
int i;
sband = &sc->sbands[band];
@@ -663,17 +634,14 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
ah->curchan = &ah->channels[chan->hw_value];
ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
- chan->max_power = reg->max_power_level / 2;
}
}
static void ath9k_init_txpower_limits(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_channel *curchan = ah->curchan;
- ah->txchainmask = common->tx_chainmask;
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
@@ -682,9 +650,22 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
ah->curchan = curchan;
}
+void ath9k_reload_chainmask_settings(struct ath_softc *sc)
+{
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
+ return;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+}
+
+
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
@@ -722,6 +703,16 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct ath_node);
hw->vif_data_size = sizeof(struct ath_vif);
+ hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
+ hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
+
+ /* single chain devices with rx diversity */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
+ hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
+
+ sc->ant_rx = hw->wiphy->available_antennas_rx;
+ sc->ant_tx = hw->wiphy->available_antennas_tx;
+
#ifdef CONFIG_ATH9K_RATE_CONTROL
hw->rate_control_algorithm = "ath9k_rate_control";
#endif
@@ -733,17 +724,12 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&sc->sbands[IEEE80211_BAND_5GHZ];
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
+ ath9k_reload_chainmask_settings(sc);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
}
-int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
struct ieee80211_hw *hw = sc->hw;
@@ -753,7 +739,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
struct ath_regulatory *reg;
/* Bring up device */
- error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
+ error = ath9k_init_softc(devid, sc, bus_ops);
if (error != 0)
goto error_init;
@@ -806,6 +792,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
goto error_world;
}
+ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
INIT_WORK(&sc->hw_check_work, ath_hw_check);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index b6b523a897e..6a8fdf33a52 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -62,18 +62,6 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_txstart);
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
-
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
u32 npend;
@@ -345,21 +333,8 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
}
memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
qi->tqi_type = type;
- if (qinfo == NULL) {
- qi->tqi_qflags =
- TXQ_FLAG_TXOKINT_ENABLE
- | TXQ_FLAG_TXERRINT_ENABLE
- | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
- qi->tqi_aifs = INIT_AIFS;
- qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
- qi->tqi_cwmax = INIT_CWMAX;
- qi->tqi_shretry = INIT_SH_RETRY;
- qi->tqi_lgretry = INIT_LG_RETRY;
- qi->tqi_physCompBuf = 0;
- } else {
- qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
- (void) ath9k_hw_set_txq_props(ah, q, qinfo);
- }
+ qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
+ (void) ath9k_hw_set_txq_props(ah, q, qinfo);
return q;
}
@@ -564,7 +539,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_rx_status *rs, u64 tsf)
+ struct ath_rx_status *rs)
{
struct ar5416_desc ads;
struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -609,7 +584,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
else
rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
- rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
+ rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
@@ -645,8 +620,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
- else if (ads.ds_rxstatus8 & AR_KeyMiss)
- rs->rs_status |= ATH9K_RXERR_DECRYPT;
+ if (ads.ds_rxstatus8 & AR_KeyMiss)
+ rs->rs_status |= ATH9K_RXERR_KEYMISS;
}
return 0;
@@ -800,6 +775,11 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ atomic_set(&ah->intr_ref_cnt, -1);
+ else
+ atomic_dec(&ah->intr_ref_cnt);
+
ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
@@ -821,6 +801,13 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
+ if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
+ ath_dbg(common, ATH_DBG_INTERRUPT,
+ "Do not enable IER ref count %d\n",
+ atomic_read(&ah->intr_ref_cnt));
+ return;
+ }
+
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
@@ -840,9 +827,9 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
-void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
+void ath9k_hw_set_interrupts(struct ath_hw *ah)
{
- enum ath9k_int omask = ah->imask;
+ enum ath9k_int ints = ah->imask;
u32 mask, mask2;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
@@ -850,9 +837,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
if (!(ints & ATH9K_INT_GLOBAL))
ath9k_hw_disable_interrupts(ah);
- ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
+ ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
- /* TODO: global int Ref count */
mask = ints & ATH9K_INT_COMMON;
mask2 = 0;
@@ -929,9 +915,6 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
}
- if (ints & ATH9K_INT_GLOBAL)
- ath9k_hw_enable_interrupts(ah);
-
return;
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 8e848c4d16b..11dbd1473a1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -17,10 +17,6 @@
#ifndef MAC_H
#define MAC_H
-#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_20_OR_LATER(ah) ? \
- MS(ads->ds_rxstatus0, AR_RxRate) : \
- (ads->ds_rxstatus3 >> 2) & 0xFF)
-
#define set11nTries(_series, _index) \
(SM((_series)[_index].Tries, AR_XmitDataTries##_index))
@@ -79,9 +75,10 @@
#define ATH9K_TXERR_XTXOP 0x08
#define ATH9K_TXERR_TIMER_EXPIRED 0x10
#define ATH9K_TX_ACKED 0x20
+#define ATH9K_TX_FLUSH 0x40
#define ATH9K_TXERR_MASK \
(ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
- ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
+ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED | ATH9K_TX_FLUSH)
#define ATH9K_TX_BA 0x01
#define ATH9K_TX_PWRMGMT 0x02
@@ -146,6 +143,7 @@ struct ath_rx_status {
u8 rs_moreaggr;
u8 rs_num_delims;
u8 rs_flags;
+ bool is_mybeacon;
u32 evm0;
u32 evm1;
u32 evm2;
@@ -184,6 +182,7 @@ struct ath_htc_rx_status {
#define ATH9K_RXERR_FIFO 0x04
#define ATH9K_RXERR_DECRYPT 0x08
#define ATH9K_RXERR_MIC 0x10
+#define ATH9K_RXERR_KEYMISS 0x20
#define ATH9K_RX_MORE 0x01
#define ATH9K_RX_MORE_AGGR 0x02
@@ -194,7 +193,7 @@ struct ath_htc_rx_status {
#define ATH9K_RX_DECRYPT_BUSY 0x40
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
-#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+#define ATH9K_TXKEYIX_INVALID ((u8)-1)
enum ath9k_phyerr {
ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
@@ -262,7 +261,11 @@ struct ath_desc {
#define ATH9K_TXDESC_VMF 0x0100
#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
-#define ATH9K_TXDESC_LDPC 0x00010000
+#define ATH9K_TXDESC_LDPC 0x0800
+#define ATH9K_TXDESC_CLRDMASK 0x1000
+
+#define ATH9K_TXDESC_PAPRD 0x70000
+#define ATH9K_TXDESC_PAPRD_S 16
#define ATH9K_RXDESC_INTREQ 0x0020
@@ -643,6 +646,7 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PSPOLL = 0x00004000,
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
+ ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000,
};
#define ATH9K_RATESERIES_RTS_CTS 0x0001
@@ -658,6 +662,13 @@ struct ath9k_11n_rate_series {
u32 RateFlags;
};
+enum aggr_type {
+ AGGR_BUF_NONE,
+ AGGR_BUF_FIRST,
+ AGGR_BUF_MIDDLE,
+ AGGR_BUF_LAST,
+};
+
enum ath9k_key_type {
ATH9K_KEY_TYPE_CLEAR,
ATH9K_KEY_TYPE_WEP,
@@ -665,6 +676,33 @@ enum ath9k_key_type {
ATH9K_KEY_TYPE_TKIP,
};
+struct ath_tx_info {
+ u8 qcu;
+
+ bool is_first;
+ bool is_last;
+
+ enum aggr_type aggr;
+ u8 ndelim;
+ u16 aggr_len;
+
+ dma_addr_t link;
+ int pkt_len;
+ u32 flags;
+
+ dma_addr_t buf_addr[4];
+ int buf_len[4];
+
+ struct ath9k_11n_rate_series rates[4];
+ u8 rtscts_rate;
+ bool dur_update;
+
+ enum ath9k_pkt_type type;
+ enum ath9k_key_type keytype;
+ u8 keyix;
+ u8 txpower;
+};
+
struct ath_hw;
struct ath9k_channel;
enum ath9k_int;
@@ -672,7 +710,6 @@ enum ath9k_int;
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
@@ -687,7 +724,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_rx_status *rs, u64 tsf);
+ struct ath_rx_status *rs);
void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
@@ -699,7 +736,7 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah);
/* Interrupt Handling */
bool ath9k_hw_intrpend(struct ath_hw *ah);
-void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
+void ath9k_hw_set_interrupts(struct ath_hw *ah);
void ath9k_hw_enable_interrupts(struct ath_hw *ah);
void ath9k_hw_disable_interrupts(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 722967b86cf..93fbe6f4089 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -111,24 +111,29 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
void ath9k_ps_restore(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ enum ath9k_power_mode mode;
unsigned long flags;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (--sc->ps_usecount != 0)
goto unlock;
- spin_lock(&common->cc_lock);
- ath_hw_cycle_counters_update(common);
- spin_unlock(&common->cc_lock);
-
if (sc->ps_idle)
- ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK)))
- ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
+ mode = ATH9K_PM_NETWORK_SLEEP;
+ else
+ goto unlock;
+
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ spin_unlock(&common->cc_lock);
+
+ ath9k_hw_setpower(sc->sc_ah, mode);
unlock:
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -163,7 +168,7 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
if (chan->noisefloor) {
survey->filled |= SURVEY_INFO_NOISE_DBM;
- survey->noise = chan->noisefloor;
+ survey->noise = ath9k_hw_getchan_noise(ah, chan);
}
}
@@ -212,104 +217,168 @@ static int ath_update_survey_stats(struct ath_softc *sc)
return ret;
}
-/*
- * Set/change channels. If the channel is really being changed, it's done
- * by reseting the chip. To accomplish this we must first cleanup any pending
- * DMA, then restart stuff.
-*/
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *hchan)
+static void __ath_cancel_work(struct ath_softc *sc)
{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
- bool fastcc = true, stopped;
- struct ieee80211_channel *channel = hw->conf.channel;
- struct ath9k_hw_cal_data *caldata = NULL;
- int r;
-
- if (sc->sc_flags & SC_OP_INVALID)
- return -EIO;
-
- sc->hw_busy_count = 0;
-
- del_timer_sync(&common->ani.timer);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
+}
- ath9k_ps_wakeup(sc);
+static void ath_cancel_work(struct ath_softc *sc)
+{
+ __ath_cancel_work(sc);
+ cancel_work_sync(&sc->hw_reset_work);
+}
- spin_lock_bh(&sc->sc_pcu_lock);
+static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool ret;
- /*
- * This is only performed if the channel settings have
- * actually changed.
- *
- * To switch channels clear any pending DMA operations;
- * wait long enough for the RX fifo to drain, reset the
- * hardware at the new frequency, and then re-enable
- * the relevant bits of the h/w.
- */
- ath9k_hw_disable_interrupts(ah);
- stopped = ath_drain_all_txq(sc, false);
+ ieee80211_stop_queues(sc->hw);
- if (!ath_stoprecv(sc))
- stopped = false;
+ sc->hw_busy_count = 0;
+ del_timer_sync(&common->ani.timer);
- if (!ath9k_hw_check_alive(ah))
- stopped = false;
+ ath9k_debug_samp_bb_mac(sc);
+ ath9k_hw_disable_interrupts(ah);
- /* XXX: do not flush receive queue here. We don't want
- * to flush data frames already in queue because of
- * changing channel. */
+ ret = ath_drain_all_txq(sc, retry_tx);
- if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL))
- fastcc = false;
+ if (!ath_stoprecv(sc))
+ ret = false;
- if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
- caldata = &sc->caldata;
+ if (!flush) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
+ } else {
+ ath_flushrecv(sc);
+ }
- ath_dbg(common, ATH_DBG_CONFIG,
- "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
- sc->sc_ah->curchan->channel,
- channel->center_freq, conf_is_ht40(conf),
- fastcc);
+ return ret;
+}
- r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
- if (r) {
- ath_err(common,
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- goto ps_restore;
- }
+static bool ath_complete_reset(struct ath_softc *sc, bool start)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
- r = -EIO;
- goto ps_restore;
+ return false;
}
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
- if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
+ if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && start) {
if (sc->sc_flags & SC_OP_BEACONS)
ath_set_beacon(sc);
+
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
if (!common->disable_ani)
ath_start_ani(common);
}
- ps_restore:
- ieee80211_wake_queues(hw);
+ if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ struct ath_hw_antcomb_conf div_ant_conf;
+ u8 lna_conf;
+
+ ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+
+ if (sc->ant_rx == 1)
+ lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.main_lna_conf = lna_conf;
+ div_ant_conf.alt_lna_conf = lna_conf;
+
+ ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+ }
+
+ ieee80211_wake_queues(sc->hw);
+
+ return true;
+}
+
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
+ bool retry_tx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = NULL;
+ bool fastcc = true;
+ bool flush = false;
+ int r;
+
+ __ath_cancel_work(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) {
+ fastcc = false;
+ caldata = &sc->caldata;
+ }
+
+ if (!hchan) {
+ fastcc = false;
+ flush = true;
+ hchan = ah->curchan;
+ }
+
+ if (fastcc && !ath9k_hw_check_alive(ah))
+ fastcc = false;
+
+ if (!ath_prepare_reset(sc, retry_tx, flush))
+ fastcc = false;
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Reset to %u MHz, HT40: %d fastcc: %d\n",
+ hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS |
+ CHANNEL_HT40PLUS)),
+ fastcc);
+
+ r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
+ if (r) {
+ ath_err(common,
+ "Unable to reset channel, reset status %d\n", r);
+ goto out;
+ }
+
+ if (!ath_complete_reset(sc, true))
+ r = -EIO;
+
+out:
spin_unlock_bh(&sc->sc_pcu_lock);
+ return r;
+}
+
+
+/*
+ * Set/change channels. If the channel is really being changed, it's done
+ * by reseting the chip. To accomplish this we must first cleanup any pending
+ * DMA, then restart stuff.
+*/
+static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
+ struct ath9k_channel *hchan)
+{
+ int r;
+
+ if (sc->sc_flags & SC_OP_INVALID)
+ return -EIO;
+
+ ath9k_ps_wakeup(sc);
+
+ r = ath_reset_internal(sc, hchan, false);
ath9k_ps_restore(sc);
+
return r;
}
@@ -317,7 +386,6 @@ static void ath_paprd_activate(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_common *common = ath9k_hw_common(ah);
int chain;
if (!caldata || !caldata->paprd_done)
@@ -326,7 +394,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
ath9k_ps_wakeup(sc);
ar9003_paprd_enable(ah, false);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(common->tx_chainmask & BIT(chain)))
+ if (!(ah->txchainmask & BIT(chain)))
continue;
ar9003_paprd_populate_single_table(ah, caldata, chain);
@@ -413,7 +481,7 @@ void ath_paprd_calibrate(struct work_struct *work)
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(common->tx_chainmask & BIT(chain)))
+ if (!(ah->txchainmask & BIT(chain)))
continue;
chain_ok = 0;
@@ -534,7 +602,7 @@ void ath_ani_calibrate(unsigned long data)
if (longcal || shortcal) {
common->ani.caldone =
ath9k_hw_calibrate(ah, ah->curchan,
- common->rx_chainmask, longcal);
+ ah->rxchainmask, longcal);
}
ath9k_ps_restore(sc);
@@ -545,6 +613,7 @@ set_timer:
* The interval must be the shortest necessary to satisfy ANI,
* short calibration and long calibration.
*/
+ ath9k_debug_samp_bb_mac(sc);
cal_interval = ATH_LONG_CALINTERVAL;
if (sc->sc_ah->config.enable_ani)
cal_interval = min(cal_interval,
@@ -564,7 +633,6 @@ set_timer:
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an;
- struct ath_hw *ah = sc->sc_ah;
an = (struct ath_node *)sta->drv_priv;
#ifdef CONFIG_ATH9K_DEBUGFS
@@ -573,9 +641,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
spin_unlock(&sc->nodes_lock);
an->sta = sta;
#endif
- if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
- sc->sc_flags |= SC_OP_ENABLE_APM;
-
if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -599,74 +664,6 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-void ath_hw_check(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long flags;
- int busy;
-
- ath9k_ps_wakeup(sc);
- if (ath9k_hw_check_alive(sc->sc_ah))
- goto out;
-
- spin_lock_irqsave(&common->cc_lock, flags);
- busy = ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
- "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
- if (busy >= 99) {
- if (++sc->hw_busy_count >= 3) {
- spin_lock_bh(&sc->sc_pcu_lock);
- ath_reset(sc, true);
- spin_unlock_bh(&sc->sc_pcu_lock);
- }
- } else if (busy >= 0)
- sc->hw_busy_count = 0;
-
-out:
- ath9k_ps_restore(sc);
-}
-
-static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
-{
- static int count;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
- if (pll_sqsum >= 0x40000) {
- count++;
- if (count == 3) {
- /* Rx is hung for more than 500ms. Reset it */
- ath_dbg(common, ATH_DBG_RESET,
- "Possible RX hang, resetting");
- spin_lock_bh(&sc->sc_pcu_lock);
- ath_reset(sc, true);
- spin_unlock_bh(&sc->sc_pcu_lock);
- count = 0;
- }
- } else
- count = 0;
-}
-
-void ath_hw_pll_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- hw_pll_work.work);
- u32 pll_sqsum;
-
- if (AR_SREV_9485(sc->sc_ah)) {
-
- ath9k_ps_wakeup(sc);
- pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- ath_hw_pll_rx_hang_check(sc, pll_sqsum);
-
- ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
- }
-}
-
void ath9k_tasklet(unsigned long data)
{
@@ -677,16 +674,24 @@ void ath9k_tasklet(unsigned long data)
u32 status = sc->intrstatus;
u32 rxmask;
+ ath9k_ps_wakeup(sc);
+ spin_lock(&sc->sc_pcu_lock);
+
if ((status & ATH9K_INT_FATAL) ||
(status & ATH9K_INT_BB_WATCHDOG)) {
- spin_lock(&sc->sc_pcu_lock);
- ath_reset(sc, true);
- spin_unlock(&sc->sc_pcu_lock);
- return;
- }
+#ifdef CONFIG_ATH9K_DEBUGFS
+ enum ath_reset_type type;
- ath9k_ps_wakeup(sc);
- spin_lock(&sc->sc_pcu_lock);
+ if (status & ATH9K_INT_FATAL)
+ type = RESET_TYPE_FATAL_INT;
+ else
+ type = RESET_TYPE_BB_WATCHDOG;
+
+ RESET_STAT_INC(sc, type);
+#endif
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ goto out;
+ }
/*
* Only run the baseband hang check if beacons stop working in AP or
@@ -706,8 +711,7 @@ void ath9k_tasklet(unsigned long data)
*/
ath_dbg(common, ATH_DBG_PS,
"TSFOOR - Sync with next Beacon\n");
- sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC |
- PS_TSFOOR_SYNC;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -736,6 +740,7 @@ void ath9k_tasklet(unsigned long data)
if (status & ATH9K_INT_GENTIMER)
ath_gen_timer_isr(sc->sc_ah);
+out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
@@ -826,11 +831,9 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & ATH9K_INT_TXURN)
ath9k_hw_updatetxtriglevel(ah, true);
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
- if (status & ATH9K_INT_RXEOL) {
- ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
- ath9k_hw_set_interrupts(ah, ah->imask);
- }
+ if (status & ATH9K_INT_RXEOL) {
+ ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah);
}
if (status & ATH9K_INT_MIB) {
@@ -886,8 +889,9 @@ static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
+ atomic_set(&ah->intr_ref_cnt, -1);
- ath9k_hw_configpcipowersave(ah, 0, 0);
+ ath9k_hw_configpcipowersave(ah, false);
if (!ah->curchan)
ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
@@ -899,27 +903,13 @@ static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
channel->center_freq, r);
}
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->config.txpowlimit, &sc->curtxpow);
- if (ath_startrecv(sc) != 0) {
- ath_err(common, "Unable to restart recv logic\n");
- goto out;
- }
- if (sc->sc_flags & SC_OP_BEACONS)
- ath_set_beacon(sc); /* restart beacons */
-
- /* Re-Enable interrupts */
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath_complete_reset(sc, true);
/* Enable LED */
ath9k_hw_cfg_output(ah, ah->led_pin,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
ath9k_hw_set_gpio(ah, ah->led_pin, 0);
- ieee80211_wake_queues(hw);
- ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
-
-out:
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
@@ -932,11 +922,10 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
int r;
ath9k_ps_wakeup(sc);
- cancel_delayed_work_sync(&sc->hw_pll_work);
- spin_lock_bh(&sc->sc_pcu_lock);
+ ath_cancel_work(sc);
- ieee80211_stop_queues(hw);
+ spin_lock_bh(&sc->sc_pcu_lock);
/*
* Keep the LED on when the radio is disabled
@@ -947,13 +936,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
- /* Disable interrupts */
- ath9k_hw_disable_interrupts(ah);
-
- ath_drain_all_txq(sc, false); /* clear pending tx frames */
-
- ath_stoprecv(sc); /* turn off frame recv */
- ath_flushrecv(sc); /* flush recv queue */
+ ath_prepare_reset(sc, false, true);
if (!ah->curchan)
ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
@@ -967,55 +950,19 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_phy_disable(ah);
- ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_hw_configpcipowersave(ah, true);
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}
-int ath_reset(struct ath_softc *sc, bool retry_tx)
+static int ath_reset(struct ath_softc *sc, bool retry_tx)
{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_hw *hw = sc->hw;
int r;
- sc->hw_busy_count = 0;
-
- /* Stop ANI */
-
- del_timer_sync(&common->ani.timer);
-
ath9k_ps_wakeup(sc);
- ieee80211_stop_queues(hw);
-
- ath9k_hw_disable_interrupts(ah);
- ath_drain_all_txq(sc, retry_tx);
-
- ath_stoprecv(sc);
- ath_flushrecv(sc);
-
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
- if (r)
- ath_err(common,
- "Unable to reset hardware; reset status %d\n", r);
-
- if (ath_startrecv(sc) != 0)
- ath_err(common, "Unable to start recv logic\n");
-
- /*
- * We may be doing a reset in response to a request
- * that changes the channel so update any state that
- * might change as a result.
- */
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->config.txpowlimit, &sc->curtxpow);
-
- if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
- ath_set_beacon(sc); /* restart beacons */
-
- ath9k_hw_set_interrupts(ah, ah->imask);
+ r = ath_reset_internal(sc, NULL, retry_tx);
if (retry_tx) {
int i;
@@ -1028,15 +975,83 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
- ieee80211_wake_queues(hw);
+ ath9k_ps_restore(sc);
+
+ return r;
+}
+
+void ath_reset_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
+
+ ath_reset(sc, true);
+}
+
+void ath_hw_check(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ int busy;
+
+ ath9k_ps_wakeup(sc);
+ if (ath9k_hw_check_alive(sc->sc_ah))
+ goto out;
+
+ spin_lock_irqsave(&common->cc_lock, flags);
+ busy = ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
+ "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ if (busy >= 99) {
+ if (++sc->hw_busy_count >= 3) {
+ RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ }
- /* Start ANI */
- if (!common->disable_ani)
- ath_start_ani(common);
+ } else if (busy >= 0)
+ sc->hw_busy_count = 0;
+out:
ath9k_ps_restore(sc);
+}
- return r;
+static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
+{
+ static int count;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (pll_sqsum >= 0x40000) {
+ count++;
+ if (count == 3) {
+ /* Rx is hung for more than 500ms. Reset it */
+ ath_dbg(common, ATH_DBG_RESET,
+ "Possible RX hang, resetting");
+ RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ count = 0;
+ }
+ } else
+ count = 0;
+}
+
+void ath_hw_pll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ u32 pll_sqsum;
+
+ if (AR_SREV_9485(sc->sc_ah)) {
+
+ ath9k_ps_wakeup(sc);
+ pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
+ ath9k_ps_restore(sc);
+
+ ath_hw_pll_rx_hang_check(sc, pll_sqsum);
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
+ }
}
/**********************/
@@ -1066,7 +1081,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
init_channel = ath9k_cmn_get_curchannel(hw, ah);
/* Reset SERDES registers */
- ath9k_hw_configpcipowersave(ah, 0, 0);
+ ath9k_hw_configpcipowersave(ah, false);
/*
* The basic interface to setting the hardware in a good
@@ -1085,28 +1100,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
goto mutex_unlock;
}
- /*
- * This is needed only to setup initial state
- * but it's best done after a reset.
- */
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->config.txpowlimit, &sc->curtxpow);
-
- /*
- * Setup the hardware after reset:
- * The receive engine is set going.
- * Frame transmit is handled entirely
- * in the frame output path; there's nothing to do
- * here except setup the interrupt mask.
- */
- if (ath_startrecv(sc) != 0) {
- ath_err(common, "Unable to start recv logic\n");
- r = -EIO;
- spin_unlock_bh(&sc->sc_pcu_lock);
- goto mutex_unlock;
- }
- spin_unlock_bh(&sc->sc_pcu_lock);
-
/* Setup our intr mask. */
ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
ATH9K_INT_RXORN | ATH9K_INT_FATAL |
@@ -1129,11 +1122,14 @@ static int ath9k_start(struct ieee80211_hw *hw)
/* Disable BMISS interrupt when we're not associated */
ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
- ath9k_hw_set_interrupts(ah, ah->imask);
- ieee80211_wake_queues(hw);
+ if (!ath_complete_reset(sc, false)) {
+ r = -EIO;
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ goto mutex_unlock;
+ }
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ spin_unlock_bh(&sc->sc_pcu_lock);
if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
!ah->btcoex_hw.enabled) {
@@ -1141,8 +1137,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
- if (common->bus_ops->bt_coex_prep)
- common->bus_ops->bt_coex_prep(common);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_resume(sc);
}
@@ -1228,10 +1222,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
mutex_lock(&sc->mutex);
- cancel_delayed_work_sync(&sc->tx_complete_work);
- cancel_delayed_work_sync(&sc->hw_pll_work);
- cancel_work_sync(&sc->paprd_work);
- cancel_work_sync(&sc->hw_check_work);
+ ath_cancel_work(sc);
if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
@@ -1418,7 +1409,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ah->imask &= ~ATH9K_INT_TSFOOR;
}
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
/* Set up ANI */
if (iter_data.naps > 0) {
@@ -1593,7 +1584,7 @@ static void ath9k_enable_ps(struct ath_softc *sc)
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
ah->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
}
ath9k_hw_setrxabort(ah, 1);
}
@@ -1613,7 +1604,7 @@ static void ath9k_disable_ps(struct ath_softc *sc)
PS_WAIT_FOR_TX_ACK);
if (ah->imask & ATH9K_INT_TIM_TIMER) {
ah->imask &= ~ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_set_interrupts(ah);
}
}
@@ -1676,6 +1667,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel old_chan;
int pos = curchan->hw_value;
int old_pos = -1;
unsigned long flags;
@@ -1692,15 +1684,25 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
"Set channel: %d MHz type: %d\n",
curchan->center_freq, conf->channel_type);
- ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- curchan, conf->channel_type);
-
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
ath_update_survey_stats(sc);
spin_unlock_irqrestore(&common->cc_lock, flags);
/*
+ * Preserve the current channel values, before updating
+ * the same channel
+ */
+ if (old_pos == pos) {
+ memcpy(&old_chan, &sc->sc_ah->channels[pos],
+ sizeof(struct ath9k_channel));
+ ah->curchan = &old_chan;
+ }
+
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
+
+ /*
* If the operating channel changes, change the survey in-use flags
* along with it.
* Reset the survey data for the new channel, unless we're switching
@@ -1844,8 +1846,7 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;
- if (ath_tx_aggr_sleep(sc, an))
- ieee80211_sta_set_tim(sta);
+ ath_tx_aggr_sleep(sta, sc, an);
break;
case STA_NOTIFY_AWAKE:
an->sleeping = false;
@@ -1854,7 +1855,8 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
}
}
-static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int ath9k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath_softc *sc = hw->priv;
@@ -2032,6 +2034,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
/* Stop ANI */
sc->sc_flags &= ~SC_OP_ANI_RUN;
del_timer_sync(&common->ani.timer);
+ memset(&sc->caldata, 0, sizeof(sc->caldata));
}
}
@@ -2153,7 +2156,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath9k_ps_restore(sc);
}
-static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
+static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
u64 tsf;
@@ -2167,7 +2170,9 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
return tsf;
}
-static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+static void ath9k_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 tsf)
{
struct ath_softc *sc = hw->priv;
@@ -2178,7 +2183,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
mutex_unlock(&sc->mutex);
}
-static void ath9k_reset_tsf(struct ieee80211_hw *hw)
+static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
@@ -2341,9 +2346,11 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
drain_txq = ath_drain_all_txq(sc, false);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
if (!drain_txq)
ath_reset(sc, false);
- spin_unlock_bh(&sc->sc_pcu_lock);
+
ath9k_ps_restore(sc);
ieee80211_wake_queues(hw);
@@ -2406,6 +2413,73 @@ skip:
return sc->beacon.tx_last;
}
+static int ath9k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
+
+ stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
+ stats->dot11RTSFailureCount = mib_stats->rts_bad;
+ stats->dot11FCSErrorCount = mib_stats->fcs_bad;
+ stats->dot11RTSSuccessCount = mib_stats->rts_good;
+ return 0;
+}
+
+static u32 fill_chainmask(u32 cap, u32 new)
+{
+ u32 filled = 0;
+ int i;
+
+ for (i = 0; cap && new; i++, cap >>= 1) {
+ if (!(cap & BIT(0)))
+ continue;
+
+ if (new & BIT(0))
+ filled |= BIT(i);
+
+ new >>= 1;
+ }
+
+ return filled;
+}
+
+static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (!rx_ant || !tx_ant)
+ return -EINVAL;
+
+ sc->ant_rx = rx_ant;
+ sc->ant_tx = tx_ant;
+
+ if (ah->caps.rx_chainmask == 1)
+ return 0;
+
+ /* AR9100 runs into calibration issues if not all rx chains are enabled */
+ if (AR_SREV_9100(ah))
+ ah->rxchainmask = 0x7;
+ else
+ ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
+
+ ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
+ ath9k_reload_chainmask_settings(sc);
+
+ return 0;
+}
+
+static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath_softc *sc = hw->priv;
+
+ *tx_ant = sc->ant_tx;
+ *rx_ant = sc->ant_rx;
+ return 0;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2430,5 +2504,8 @@ struct ieee80211_ops ath9k_ops = {
.set_coverage_class = ath9k_set_coverage_class,
.flush = ath9k_flush,
.tx_frames_pending = ath9k_tx_frames_pending,
- .tx_last_beacon = ath9k_tx_last_beacon,
+ .tx_last_beacon = ath9k_tx_last_beacon,
+ .get_stats = ath9k_get_stats,
+ .set_antenna = ath9k_set_antenna,
+ .get_antenna = ath9k_get_antenna,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index be4ea132981..edb0b4b3da3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -32,9 +32,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
+ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ 0 }
};
+
/* return bus cachesize in 4B word units */
static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
{
@@ -88,23 +91,6 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
return true;
}
-/*
- * Bluetooth coexistance requires disabling ASPM.
- */
-static void ath_pci_bt_coex_prep(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct pci_dev *pdev = to_pci_dev(sc->dev);
- u8 aspm;
-
- if (!pci_is_pcie(pdev))
- return;
-
- pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
- aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
- pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
-}
-
static void ath_pci_extn_synch_enable(struct ath_common *common)
{
struct ath_softc *sc = (struct ath_softc *) common->priv;
@@ -116,6 +102,7 @@ static void ath_pci_extn_synch_enable(struct ath_common *common)
pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
}
+/* Need to be called after we discover btcoex capabilities */
static void ath_pci_aspm_init(struct ath_common *common)
{
struct ath_softc *sc = (struct ath_softc *) common->priv;
@@ -125,19 +112,38 @@ static void ath_pci_aspm_init(struct ath_common *common)
int pos;
u8 aspm;
- if (!pci_is_pcie(pdev))
+ pos = pci_pcie_cap(pdev);
+ if (!pos)
return;
parent = pdev->bus->self;
- if (WARN_ON(!parent))
+ if (!parent)
return;
+ if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+ /* Bluetooth coexistance requires disabling ASPM. */
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
+ aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
+
+ /*
+ * Both upstream and downstream PCIe components should
+ * have the same ASPM settings.
+ */
+ pos = pci_pcie_cap(parent);
+ pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
+ aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
+
+ return;
+ }
+
pos = pci_pcie_cap(parent);
pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
- ath9k_hw_configpcipowersave(ah, 0, 0);
+ ath9k_hw_configpcipowersave(ah, false);
}
}
@@ -145,7 +151,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
- .bt_coex_prep = ath_pci_bt_coex_prep,
.extn_synch_en = ath_pci_extn_synch_enable,
.aspm_init = ath_pci_aspm_init,
};
@@ -156,7 +161,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
- u16 subsysid;
u32 val;
int ret = 0;
char hw_name[64];
@@ -250,8 +254,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->irq = pdev->irq;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
- ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
+ ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize device\n");
goto err_init;
@@ -330,17 +333,17 @@ static int ath_pci_resume(struct device *device)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ ath9k_ps_wakeup(sc);
/* Enable LED */
ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
/*
* Reset key cache to sane defaults (all entries cleared) instead of
* semi-random values after suspend/resume.
*/
- ath9k_ps_wakeup(sc);
- ath9k_init_crypto(sc);
+ ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_ps_restore(sc);
sc->ps_idle = true;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index c04a6c3cac7..8448281dd06 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -603,7 +603,8 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
const struct ath_rate_table *rate_table,
- int *is_probing)
+ int *is_probing,
+ bool legacy)
{
u32 best_thruput, this_thruput, now_msec;
u8 rate, next_rate, best_rate, maxindex, minindex;
@@ -624,6 +625,8 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
u8 per_thres;
rate = ath_rc_priv->valid_rate_index[index];
+ if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
+ continue;
if (rate > ath_rc_priv->rate_max_phy)
continue;
@@ -767,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix;
+ u8 try_per_rate, i = 0, rix, high_rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@@ -786,7 +789,9 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
try_per_rate = 4;
rate_table = ath_rc_priv->rate_table;
- rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
+ rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
+ &is_probe, false);
+ high_rix = rix;
/*
* If we're in HT mode and both us and our peer supports LDPC.
@@ -822,10 +827,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
}
/* Fill in the other rates for multirate retry */
- for ( ; i < 4; i++) {
- /* Use twice the number of tries for the last MRR segment. */
- if (i + 1 == 4)
- try_per_rate = 8;
+ for ( ; i < 3; i++) {
ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
@@ -833,6 +835,24 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
try_per_rate, rix, 1);
}
+ /* Use twice the number of tries for the last MRR segment. */
+ try_per_rate = 8;
+
+ /*
+ * Use a legacy rate as last retry to ensure that the frame
+ * is tried in both MCS and legacy rates.
+ */
+ if ((rates[2].flags & IEEE80211_TX_RC_MCS) &&
+ (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) ||
+ (ath_rc_priv->per[high_rix] > 45)))
+ rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
+ &is_probe, true);
+ else
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+
+ /* All other rates in the series have RTS enabled */
+ ath_rc_rate_set_series(rate_table, &rates[i], txrc,
+ try_per_rate, rix, 1);
/*
* NB:Change rate series to enable aggregation when operating
* at lower MCS rates. When first rate in series is MCS2
@@ -1342,12 +1362,6 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
return;
- if (!(tx_info->flags & IEEE80211_TX_STAT_AMPDU)) {
- tx_info->status.ampdu_ack_len =
- (tx_info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
- tx_info->status.ampdu_len = 1;
- }
-
if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
tx_status = 1;
@@ -1484,7 +1498,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
if (rc->rate_table == NULL)
return 0;
- max = 80 + rc->rate_table->rate_cnt * 1024 + 1;
+ max = 80 + rc->rate_table_size * 1024 + 1;
buf = kmalloc(max, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
@@ -1494,7 +1508,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
"HT", "MCS", "Rate",
"Success", "Retries", "XRetries", "PER");
- for (i = 0; i < rc->rate_table->rate_cnt; i++) {
+ for (i = 0; i < rc->rate_table_size; i++) {
u32 ratekbps = rc->rate_table->info[i].ratekbps;
struct ath_rc_stats *stats = &rc->rcstats[i];
char mcs[5];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index c3d850207be..b7a4bcd3eec 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -221,12 +221,6 @@ struct ath_rate_priv {
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
};
-enum ath9k_internal_frame_type {
- ATH9K_IFT_NOT_INTERNAL,
- ATH9K_IFT_PAUSE,
- ATH9K_IFT_UNPAUSE
-};
-
#ifdef CONFIG_ATH9K_RATE_CONTROL
int ath_rate_control_register(void);
void ath_rate_control_unregister(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4c21f8cbdeb..67b862cdae6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -433,12 +433,9 @@ void ath_rx_cleanup(struct ath_softc *sc)
u32 ath_calcrxfilter(struct ath_softc *sc)
{
-#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
-
u32 rfilt;
- rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
- | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST;
if (sc->rx.rxfilter & FIF_PROBE_REQ)
@@ -586,22 +583,11 @@ static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
{
- struct ieee80211_mgmt *mgmt;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
if (skb->len < 24 + 8 + 2 + 2)
return;
- mgmt = (struct ieee80211_mgmt *)skb->data;
- if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
- /* TODO: This doesn't work well if you have stations
- * associated to two different APs because curbssid
- * is just the last AP that any of the stations associated
- * with.
- */
- return; /* not from our current AP */
- }
-
sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
if (sc->ps_flags & PS_BEACON_SYNC) {
@@ -609,7 +595,6 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
ath_dbg(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
ath_set_beacon(sc);
- sc->ps_flags &= ~PS_TSFOOR_SYNC;
}
if (ath_beacon_dtim_pending_cab(skb)) {
@@ -638,7 +623,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
}
}
-static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
+static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
{
struct ieee80211_hdr *hdr;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -647,7 +632,7 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
/* Process Beacon and CAB receive in PS state */
if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
- && ieee80211_is_beacon(hdr->frame_control))
+ && mybeacon)
ath_rx_ps_beacon(sc, skb);
else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
@@ -770,7 +755,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
* on. All this is necessary because of our use of
* a self-linked list to avoid rx overruns.
*/
- ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
if (ret == -EINPROGRESS) {
struct ath_rx_status trs;
struct ath_buf *tbf;
@@ -796,7 +781,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
*/
tds = tbf->bf_desc;
- ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
+ ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
if (ret == -EINPROGRESS)
return NULL;
}
@@ -823,6 +808,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
struct ath_rx_status *rx_stats,
bool *decrypt_error)
{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
bool is_mc, is_valid_tkip, strip_mic, mic_error;
struct ath_hw *ah = common->ah;
__le16 fc;
@@ -833,8 +819,10 @@ static bool ath9k_rx_accept(struct ath_common *common,
is_mc = !!is_multicast_ether_addr(hdr->addr1);
is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
test_bit(rx_stats->rs_keyix, common->tkip_keymap);
- strip_mic = is_valid_tkip && !(rx_stats->rs_status &
- (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC));
+ strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+ !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
if (!rx_stats->rs_datalen)
return false;
@@ -862,6 +850,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
* descriptors.
*/
if (rx_stats->rs_status != 0) {
+ u8 status_mask;
+
if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
mic_error = false;
@@ -869,7 +859,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
return false;
- if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
+ if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+ (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
*decrypt_error = true;
mic_error = false;
}
@@ -879,17 +870,14 @@ static bool ath9k_rx_accept(struct ath_common *common,
* decryption and MIC failures. For monitor mode,
* we also ignore the CRC error.
*/
- if (ah->is_monitoring) {
- if (rx_stats->rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_CRC))
- return false;
- } else {
- if (rx_stats->rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
- return false;
- }
- }
+ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS;
+
+ if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
+ status_mask |= ATH9K_RXERR_CRC;
+
+ if (rx_stats->rs_status & ~status_mask)
+ return false;
}
/*
@@ -945,7 +933,7 @@ static int ath9k_process_rate(struct ath_common *common,
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
- ath_dbg(common, ATH_DBG_XMIT,
+ ath_dbg(common, ATH_DBG_ANY,
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
@@ -960,23 +948,12 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
- __le16 fc;
- if ((ah->opmode != NL80211_IFTYPE_STATION) &&
- (ah->opmode != NL80211_IFTYPE_ADHOC))
+ if (!rx_stats->is_mybeacon ||
+ ((ah->opmode != NL80211_IFTYPE_STATION) &&
+ (ah->opmode != NL80211_IFTYPE_ADHOC)))
return;
- fc = hdr->frame_control;
- if (!ieee80211_is_beacon(fc) ||
- compare_ether_addr(hdr->addr3, common->curbssid)) {
- /* TODO: This doesn't work well if you have stations
- * associated to two different APs because curbssid
- * is just the last AP that any of the stations associated
- * with.
- */
- return;
- }
-
if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
@@ -1003,6 +980,8 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
struct ieee80211_rx_status *rx_status,
bool *decrypt_error)
{
+ struct ath_hw *ah = common->ah;
+
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
/*
@@ -1023,7 +1002,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
- rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
+ rx_status->signal = ah->noise + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
@@ -1791,11 +1770,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- /*
- * The hw can technically differ from common->hw when using ath9k
- * virtual wiphy so to account for that we iterate over the active
- * wiphys and find the appropriate wiphy and therefore hw.
- */
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
int retval;
@@ -1849,6 +1823,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
+ if (ieee80211_is_beacon(hdr->frame_control) &&
+ !compare_ether_addr(hdr->addr3, common->curbssid))
+ rs.is_mybeacon = true;
+ else
+ rs.is_mybeacon = false;
ath_debug_stat_rx(sc, &rs);
@@ -1856,7 +1835,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
*/
- if (flush)
+ if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag;
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
@@ -1961,13 +1940,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
- PS_WAIT_FOR_CAB |
- PS_WAIT_FOR_PSPOLL_DATA)) ||
- ath9k_check_auto_sleep(sc))
- ath_rx_ps(sc, skb);
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)) ||
+ ath9k_check_auto_sleep(sc))
+ ath_rx_ps(sc, skb, rs.is_mybeacon);
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
ath_ant_comb_scan(sc, &rs);
ieee80211_rx(hw, skb);
@@ -1984,11 +1963,17 @@ requeue:
} else {
list_move_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_buf_link(sc, bf);
- ath9k_hw_rxena(ah);
+ if (!flush)
+ ath9k_hw_rxena(ah);
}
} while (1);
spin_unlock_bh(&sc->rx.rxbuflock);
+ if (!(ah->imask & ATH9K_INT_RXEOL)) {
+ ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index fa4c0bbce6b..8fcb7e9e839 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -619,6 +619,7 @@
#define AR_D_GBL_IFS_EIFS 0x10b0
#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
+#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO 363
#define AR_D_GBL_IFS_MISC 0x10f0
#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
@@ -793,14 +794,15 @@
#define AR_SREV_REVISION_9485_10 0
#define AR_SREV_REVISION_9485_11 1
#define AR_SREV_VERSION_9340 0x300
+#define AR_SREV_VERSION_9580 0x1C0
+#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
+#define AR_SREV_VERSION_9462 0x280
+#define AR_SREV_REVISION_9462_10 0
+#define AR_SREV_REVISION_9462_20 2
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE))
-#define AR_SREV_5416_20_OR_LATER(_ah) \
- (((AR_SREV_5416(_ah)) && \
- ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_20)) || \
- ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
#define AR_SREV_5416_22_OR_LATER(_ah) \
(((AR_SREV_5416(_ah)) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \
@@ -893,6 +895,33 @@
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
+#define AR_SREV_9462(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
+
+#define AR_SREV_9462_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_10))
+
+#define AR_SREV_9462_20(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
+
+#define AR_SREV_9462_20_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+
+#define AR_SREV_9580(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
+
+#define AR_SREV_9580_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9580_10))
+
+/* NOTE: When adding chips newer than Peacock, add chip check here */
+#define AR_SREV_9580_10_OR_LATER(_ah) \
+ (AR_SREV_9580(_ah))
+
enum ath_usb_dev {
AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
@@ -1117,7 +1146,7 @@ enum {
#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4)
#define AR_ENT_OTP 0x40d8
#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
-#define AR_ENT_OTP_MPSD 0x00800000
+#define AR_ENT_OTP_MIN_PKT_SIZE_DISABLE 0x00800000
#define AR_CH0_BB_DPLL1 0x16180
#define AR_CH0_BB_DPLL1_REFDIV 0xF8000000
@@ -1489,6 +1518,7 @@ enum {
#define AR_USEC_TX_LAT_S 14
#define AR_USEC_RX_LAT 0x1F800000
#define AR_USEC_RX_LAT_S 23
+#define AR_USEC_ASYNC_FIFO 0x12E00074
#define AR_RESET_TSF 0x8020
#define AR_RESET_TSF_ONCE 0x01000000
@@ -1763,6 +1793,7 @@ enum {
#define AR_TXOP_12_15 0x81fc
#define AR_NEXT_NDP2_TIMER 0x8180
+#define AR_GEN_TIMER_BANK_1_LEN 8
#define AR_FIRST_NDP_TIMER 7
#define AR_NDP2_PERIOD 0x81a0
#define AR_NDP2_TIMER_MODE 0x81c0
@@ -1851,9 +1882,10 @@ enum {
#define AR_PCU_MISC_MODE2_HWWAR2 0x02000000
#define AR_PCU_MISC_MODE2_RESERVED2 0xFFFE0000
-#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358
-#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400
-#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
+#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358
+#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400
+#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
+#define AR_MAC_PCU_GEN_TIMER_TSF_SEL 0x83d8
#define AR_AES_MUTE_MASK0 0x805c
@@ -1901,7 +1933,42 @@ enum {
#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
+#define AR_PHY_AGC_CONTROL_PKDET_CAL 0x00100000
#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
+/* MCI Registers */
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index cc595712f51..03b0a651a59 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -48,19 +48,23 @@ static u16 bits_per_symbol[][2] = {
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_head);
+ struct ath_atx_tid *tid, struct sk_buff *skb);
+static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
-static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
- int txok, bool update_rc);
+ int txok);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
int seqno);
+static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct sk_buff *skb);
enum {
MCS_HT20,
@@ -129,7 +133,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
spin_lock_bh(&txq->axq_lock);
tid->paused = false;
- if (list_empty(&tid->buf_q))
+ if (skb_queue_empty(&tid->buf_q))
goto unlock;
ath_tx_queue_tid(txq, tid);
@@ -149,6 +153,7 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
+ struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
@@ -159,17 +164,17 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
memset(&ts, 0, sizeof(ts));
spin_lock_bh(&txq->axq_lock);
- while (!list_empty(&tid->buf_q)) {
- bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- list_move_tail(&bf->list, &bf_head);
+ while ((skb = __skb_dequeue(&tid->buf_q))) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
spin_unlock_bh(&txq->axq_lock);
- fi = get_frame_info(bf->bf_mpdu);
- if (fi->retries) {
- ath_tx_update_baw(sc, tid, fi->seqno);
+ if (bf && fi->retries) {
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
} else {
- ath_tx_send_normal(sc, txq, NULL, &bf_head);
+ ath_tx_send_normal(sc, txq, NULL, skb);
}
spin_lock_bh(&txq->axq_lock);
}
@@ -219,6 +224,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
{
+ struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
@@ -227,16 +233,21 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- for (;;) {
- if (list_empty(&tid->buf_q))
- break;
+ while ((skb = __skb_dequeue(&tid->buf_q))) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
- bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- list_move_tail(&bf->list, &bf_head);
+ if (!bf) {
+ spin_unlock(&txq->axq_lock);
+ ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
+ spin_lock(&txq->axq_lock);
+ continue;
+ }
+
+ list_add_tail(&bf->list, &bf_head);
- fi = get_frame_info(bf->bf_mpdu);
if (fi->retries)
- ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
spin_unlock(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@@ -251,6 +262,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
struct sk_buff *skb)
{
struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_buf *bf = fi->bf;
struct ieee80211_hdr *hdr;
TX_STAT_INC(txq->axq_qnum, a_retries);
@@ -259,6 +271,8 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
hdr = (struct ieee80211_hdr *)skb->data;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
}
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
@@ -326,7 +340,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
while (bf) {
fi = get_frame_info(bf->bf_mpdu);
- ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
+ ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
(*nframes)++;
if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
@@ -349,7 +363,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
- struct list_head bf_head, bf_pending;
+ struct list_head bf_head;
+ struct sk_buff_head bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
@@ -358,7 +373,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_frame_info *fi;
int nframes;
u8 tidno;
- bool clear_filter;
+ bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -377,11 +392,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
while (bf) {
bf_next = bf->bf_next;
- bf->bf_state.bf_type |= BUF_XRETRY;
if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
0, 0);
@@ -422,11 +435,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
}
- INIT_LIST_HEAD(&bf_pending);
- INIT_LIST_HEAD(&bf_head);
+ __skb_queue_head_init(&bf_pending);
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
+ u16 seqno = bf->bf_state.seqno;
+
txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
@@ -434,7 +448,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
- if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
+ if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
/* transmit completion, subframe is
* acked by block ack */
acked_cnt++;
@@ -448,15 +462,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* the un-acked sub-frames
*/
txfail = 1;
+ } else if (flush) {
+ txpending = 1;
} else if (fi->retries < ATH_MAX_SW_RETRIES) {
- if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
- !an->sleeping)
+ if (txok || !an->sleeping)
ath_tx_set_retry(sc, txq, bf->bf_mpdu);
- clear_filter = true;
txpending = 1;
} else {
- bf->bf_state.bf_type |= BUF_XRETRY;
txfail = 1;
sendbar = 1;
txfail_cnt++;
@@ -467,10 +480,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* Make sure the last desc is reclaimed if it
* not a holding desc.
*/
- if (!bf_last->bf_stale || bf_next != NULL)
+ INIT_LIST_HEAD(&bf_head);
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
+ bf_next != NULL || !bf_last->bf_stale)
list_move_tail(&bf->list, &bf_head);
- else
- INIT_LIST_HEAD(&bf_head);
if (!txpending || (tid->state & AGGR_CLEANUP)) {
/*
@@ -478,22 +491,19 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* block-ack window
*/
spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
- ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
rc_update = false;
- } else {
- ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
!txfail, sendbar);
} else {
/* retry the un-acked ones */
- ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
if (bf->bf_next == NULL && bf_last->bf_stale) {
struct ath_buf *tbf;
@@ -506,29 +516,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
*/
if (!tbf) {
spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
- bf->bf_state.bf_type |=
- BUF_XRETRY;
- ath_tx_rc_status(sc, bf, ts, nframes,
- nbad, 0, false);
ath_tx_complete_buf(sc, bf, txq,
&bf_head,
- ts, 0, 0);
+ ts, 0,
+ !flush);
break;
}
- ath9k_hw_cleartxdesc(sc->sc_ah,
- tbf->bf_desc);
- list_add_tail(&tbf->list, &bf_head);
- } else {
- /*
- * Clear descriptor status words for
- * software retry
- */
- ath9k_hw_cleartxdesc(sc->sc_ah,
- bf->bf_desc);
+ fi->bf = tbf;
}
}
@@ -536,22 +534,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* Put this buffer to the temporary pending
* queue to retain ordering
*/
- list_splice_tail_init(&bf_head, &bf_pending);
+ __skb_queue_tail(&bf_pending, skb);
}
bf = bf_next;
}
/* prepend un-acked frames to the beginning of the pending frame queue */
- if (!list_empty(&bf_pending)) {
+ if (!skb_queue_empty(&bf_pending)) {
if (an->sleeping)
- ieee80211_sta_set_tim(sta);
+ ieee80211_sta_set_buffered(sta, tid->tidno, true);
spin_lock_bh(&txq->axq_lock);
- if (clear_filter)
- tid->ac->clear_ps_filter = true;
- list_splice(&bf_pending, &tid->buf_q);
- ath_tx_queue_tid(txq, tid);
+ skb_queue_splice(&bf_pending, &tid->buf_q);
+ if (!an->sleeping) {
+ ath_tx_queue_tid(txq, tid);
+
+ if (ts->ts_status & ATH9K_TXERR_FILT)
+ tid->ac->clear_ps_filter = true;
+ }
spin_unlock_bh(&txq->axq_lock);
}
@@ -566,8 +567,32 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset)
- ath_reset(sc, false);
+ if (needreset) {
+ RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ }
+}
+
+static bool ath_lookup_legacy(struct ath_buf *bf)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rates;
+ int i;
+
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ rates = tx_info->control.rates;
+
+ for (i = 0; i < 4; i++) {
+ if (!rates[i].count || rates[i].idx < 0)
+ break;
+
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
+ return true;
+ }
+
+ return false;
}
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -643,8 +668,10 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
* meet the minimum required mpdudensity.
*/
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf, u16 frmlen)
+ struct ath_buf *bf, u16 frmlen,
+ bool first_subfrm)
{
+#define FIRST_DESC_NDELIMS 60
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
u32 nsymbits, nsymbols;
@@ -667,6 +694,14 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
ndelim += ATH_AGGR_ENCRYPTDELIM;
/*
+ * Add delimiter when using RTS/CTS with aggregation
+ * and non enterprise AR9003 card
+ */
+ if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
+ (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
+ ndelim = max(ndelim, FIRST_DESC_NDELIMS);
+
+ /*
* Convert desired mpdu density from microeconds to bytes based
* on highest rate in rate series (i.e. first rate) to determine
* required minimum length for subframe. Take into account
@@ -711,22 +746,33 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
- struct ath_buf *bf, *bf_first, *bf_prev = NULL;
+ struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
int rl = 0, nframes = 0, ndelim, prev_al = 0;
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
-
- bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
+ struct sk_buff *skb;
+ u16 seqno;
do {
- bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- fi = get_frame_info(bf->bf_mpdu);
+ skb = skb_peek(&tid->buf_q);
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!fi->bf)
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+
+ if (!bf)
+ continue;
+
+ bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
+ seqno = bf->bf_state.seqno;
+ if (!bf_first)
+ bf_first = bf;
/* do not step over block-ack window */
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
status = ATH_AGGR_BAW_CLOSED;
break;
}
@@ -740,14 +786,14 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
if (nframes &&
- (aggr_limit < (al + bpad + al_delta + prev_al))) {
+ ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
+ ath_lookup_legacy(bf))) {
status = ATH_AGGR_LIMITED;
break;
}
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
- if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
- !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
+ if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
break;
/* do not exceed subframe limit */
@@ -755,7 +801,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
status = ATH_AGGR_LIMITED;
break;
}
- nframes++;
/* add padding for previous frame to aggregation length */
al += bpad + al_delta;
@@ -764,25 +809,26 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
* Get the delimiters needed to meet the MPDU
* density for this node.
*/
- ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
+ ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
+ !nframes);
bpad = PADBYTES(al_delta) + (ndelim << 2);
+ nframes++;
bf->bf_next = NULL;
- ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
/* link buffers of this frame to the aggregate */
if (!fi->retries)
- ath_tx_addto_baw(sc, tid, fi->seqno);
- ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
- list_move_tail(&bf->list, bf_q);
- if (bf_prev) {
+ ath_tx_addto_baw(sc, tid, seqno);
+ bf->bf_state.ndelim = ndelim;
+
+ __skb_unlink(skb, &tid->buf_q);
+ list_add_tail(&bf->list, bf_q);
+ if (bf_prev)
bf_prev->bf_next = bf;
- ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
- bf->bf_daddr);
- }
+
bf_prev = bf;
- } while (!list_empty(&tid->buf_q));
+ } while (!skb_queue_empty(&tid->buf_q));
*aggr_len = al;
@@ -790,17 +836,236 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
#undef PADBYTES
}
+/*
+ * rix - rate index
+ * pktlen - total bytes (delims + data + fcs + pads + pad delims)
+ * width - 0 for 20 MHz, 1 for 40 MHz
+ * half_gi - to use 4us v/s 3.6 us for symbol time
+ */
+static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
+ int width, int half_gi, bool shortPreamble)
+{
+ u32 nbits, nsymbits, duration, nsymbols;
+ int streams;
+
+ /* find number of symbols: PLCP + data */
+ streams = HT_RC_2_STREAMS(rix);
+ nbits = (pktlen << 3) + OFDM_PLCP_BITS;
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
+ nsymbols = (nbits + nsymbits - 1) / nsymbits;
+
+ if (!half_gi)
+ duration = SYMBOL_TIME(nsymbols);
+ else
+ duration = SYMBOL_TIME_HALFGI(nsymbols);
+
+ /* addup duration for legacy/ht training and signal fields */
+ duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+
+ return duration;
+}
+
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_info *info, int len)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rates;
+ const struct ieee80211_rate *rate;
+ struct ieee80211_hdr *hdr;
+ int i;
+ u8 rix = 0;
+
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ rates = tx_info->control.rates;
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* set dur_update_en for l-sig computation except for PS-Poll frames */
+ info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
+
+ /*
+ * We check if Short Preamble is needed for the CTS rate by
+ * checking the BSS's global flag.
+ * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+ */
+ rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
+ info->rtscts_rate = rate->hw_value;
+ if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
+ info->rtscts_rate |= rate->hw_value_short;
+
+ for (i = 0; i < 4; i++) {
+ bool is_40, is_sgi, is_sp;
+ int phy;
+
+ if (!rates[i].count || (rates[i].idx < 0))
+ continue;
+
+ rix = rates[i].idx;
+ info->rates[i].Tries = rates[i].count;
+
+ if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ info->flags |= ATH9K_TXDESC_RTSENA;
+ } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ info->flags |= ATH9K_TXDESC_CTSENA;
+ }
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
+
+ is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
+ is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
+ is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+
+ if (rates[i].flags & IEEE80211_TX_RC_MCS) {
+ /* MCS rates */
+ info->rates[i].Rate = rix | 0x80;
+ info->rates[i].ChSel = ath_txchainmask_reduction(sc,
+ ah->txchainmask, info->rates[i].Rate);
+ info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
+ is_40, is_sgi, is_sp);
+ if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
+ continue;
+ }
+
+ /* legacy rates */
+ if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+ !(rate->flags & IEEE80211_RATE_ERP_G))
+ phy = WLAN_RC_PHY_CCK;
+ else
+ phy = WLAN_RC_PHY_OFDM;
+
+ rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ info->rates[i].Rate = rate->hw_value;
+ if (rate->hw_value_short) {
+ if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ info->rates[i].Rate |= rate->hw_value_short;
+ } else {
+ is_sp = false;
+ }
+
+ if (bf->bf_state.bfs_paprd)
+ info->rates[i].ChSel = ah->txchainmask;
+ else
+ info->rates[i].ChSel = ath_txchainmask_reduction(sc,
+ ah->txchainmask, info->rates[i].Rate);
+
+ info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
+ phy, rate->bitrate * 100, len, rix, is_sp);
+ }
+
+ /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+ if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
+ info->flags &= ~ATH9K_TXDESC_RTSENA;
+
+ /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+ if (info->flags & ATH9K_TXDESC_RTSENA)
+ info->flags &= ~ATH9K_TXDESC_CTSENA;
+}
+
+static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath9k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = ATH9K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = ATH9K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = ATH9K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = ATH9K_PKT_TYPE_PSPOLL;
+ else
+ htype = ATH9K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
+static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_txq *txq, int len)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ struct ath_buf *bf_first = bf;
+ struct ath_tx_info info;
+ bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
+
+ memset(&info, 0, sizeof(info));
+ info.is_first = true;
+ info.is_last = true;
+ info.txpower = MAX_RATE_POWER;
+ info.qcu = txq->axq_qnum;
+
+ info.flags = ATH9K_TXDESC_INTREQ;
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info.flags |= ATH9K_TXDESC_NOACK;
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+ info.flags |= ATH9K_TXDESC_LDPC;
+
+ ath_buf_set_rate(sc, bf, &info, len);
+
+ if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+ info.flags |= ATH9K_TXDESC_CLRDMASK;
+
+ if (bf->bf_state.bfs_paprd)
+ info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
+
+
+ while (bf) {
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ath_frame_info *fi = get_frame_info(skb);
+
+ info.type = get_hw_packet_type(skb);
+ if (bf->bf_next)
+ info.link = bf->bf_next->bf_daddr;
+ else
+ info.link = 0;
+
+ info.buf_addr[0] = bf->bf_buf_addr;
+ info.buf_len[0] = skb->len;
+ info.pkt_len = fi->framelen;
+ info.keyix = fi->keyix;
+ info.keytype = fi->keytype;
+
+ if (aggr) {
+ if (bf == bf_first)
+ info.aggr = AGGR_BUF_FIRST;
+ else if (!bf->bf_next)
+ info.aggr = AGGR_BUF_LAST;
+ else
+ info.aggr = AGGR_BUF_MIDDLE;
+
+ info.ndelim = bf->bf_state.ndelim;
+ info.aggr_len = len;
+ }
+
+ ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
+ bf = bf->bf_next;
+ }
+}
+
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
{
struct ath_buf *bf;
enum ATH_AGGR_STATUS status;
- struct ath_frame_info *fi;
+ struct ieee80211_tx_info *tx_info;
struct list_head bf_q;
int aggr_len;
do {
- if (list_empty(&tid->buf_q))
+ if (skb_queue_empty(&tid->buf_q))
return;
INIT_LIST_HEAD(&bf_q);
@@ -816,34 +1081,25 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(&bf_q, struct ath_buf, list);
bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
if (tid->ac->clear_ps_filter) {
tid->ac->clear_ps_filter = false;
- ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
+ tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ } else {
+ tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
}
/* if only one frame, send as non-aggregate */
if (bf == bf->bf_lastbf) {
- fi = get_frame_info(bf->bf_mpdu);
-
- bf->bf_state.bf_type &= ~BUF_AGGR;
- ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
- ath_buf_set_rate(sc, bf, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, &bf_q, false);
- continue;
+ aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
+ bf->bf_state.bf_type = BUF_AMPDU;
+ } else {
+ TX_STAT_INC(txq->axq_qnum, a_aggr);
}
- /* setup first desc of aggregate */
- bf->bf_state.bf_type |= BUF_AGGR;
- ath_buf_set_rate(sc, bf, aggr_len);
- ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
-
- /* anchor last desc of aggregate */
- ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
-
+ ath_tx_fill_desc(sc, bf, txq, aggr_len);
ath_tx_txqaddbuf(sc, txq, &bf_q, false);
- TX_STAT_INC(txq->axq_qnum, a_aggr);
-
} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
status != ATH_AGGR_BAW_CLOSED);
}
@@ -902,12 +1158,13 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
ath_tx_flush_tid(sc, txtid);
}
-bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
+void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
- bool buffered = false;
+ bool buffered;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
@@ -921,8 +1178,7 @@ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
spin_lock_bh(&txq->axq_lock);
- if (!list_empty(&tid->buf_q))
- buffered = true;
+ buffered = !skb_queue_empty(&tid->buf_q);
tid->sched = false;
list_del(&tid->list);
@@ -933,9 +1189,9 @@ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
}
spin_unlock_bh(&txq->axq_lock);
- }
- return buffered;
+ ieee80211_sta_set_buffered(sta, tidno, buffered);
+ }
}
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
@@ -954,7 +1210,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
spin_lock_bh(&txq->axq_lock);
ac->clear_ps_filter = true;
- if (!list_empty(&tid->buf_q) && !tid->paused) {
+ if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1004,7 +1260,6 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi;
static const int subtype_txq_to_hwq[] = {
[WME_AC_BE] = ATH_TXQ_AC_BE,
@@ -1054,12 +1309,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
*/
return NULL;
}
- if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
- ath_err(common, "qnum %u out of range, max %zu!\n",
- axq_qnum, ARRAY_SIZE(sc->tx.txq));
- ath9k_hw_releasetxqueue(ah, axq_qnum);
- return NULL;
- }
if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
struct ath_txq *txq = &sc->tx.txq[axq_qnum];
@@ -1156,6 +1405,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status ts;
memset(&ts, 0, sizeof(ts));
+ ts.ts_status = ATH9K_TX_FLUSH;
INIT_LIST_HEAD(&bf_head);
while (!list_empty(list)) {
@@ -1222,7 +1472,8 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_txq *txq;
- int i, npend = 0;
+ int i;
+ u32 npend = 0;
if (sc->sc_flags & SC_OP_INVALID)
return true;
@@ -1234,11 +1485,12 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (!ATH_TXQ_SETUP(sc, i))
continue;
- npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
+ if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
+ npend |= BIT(i);
}
if (npend)
- ath_err(common, "Failed to stop TX DMA!\n");
+ ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -1271,7 +1523,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
struct ath_atx_ac *ac, *ac_tmp, *last_ac;
struct ath_atx_tid *tid, *last_tid;
- if (list_empty(&txq->axq_acq) ||
+ if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
@@ -1298,7 +1550,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
* add tid to round-robin queue if more frames
* are pending for the tid
*/
- if (!list_empty(&tid->buf_q))
+ if (!skb_queue_empty(&tid->buf_q))
ath_tx_queue_tid(txq, tid);
if (tid == last_tid ||
@@ -1390,12 +1642,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf, struct ath_tx_control *txctl)
+ struct sk_buff *skb, struct ath_tx_control *txctl)
{
- struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
-
- bf->bf_state.bf_type |= BUF_AMPDU;
+ struct ath_buf *bf;
/*
* Do not queue to h/w when any of the following conditions is true:
@@ -1404,113 +1655,82 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* - seqno is not within block-ack window
* - h/w queue depth exceeds low water mark
*/
- if (!list_empty(&tid->buf_q) || tid->paused ||
- !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
+ if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
+ !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
- list_add_tail(&bf->list, &tid->buf_q);
- ath_tx_queue_tid(txctl->txq, tid);
+ __skb_queue_tail(&tid->buf_q, skb);
+ if (!txctl->an || !txctl->an->sleeping)
+ ath_tx_queue_tid(txctl->txq, tid);
return;
}
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ if (!bf)
+ return;
+
+ bf->bf_state.bf_type = BUF_AMPDU;
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
/* Add sub-frame to BAW */
- if (!fi->retries)
- ath_tx_addto_baw(sc, tid, fi->seqno);
+ ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
/* Queue to h/w without aggregation */
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
- ath_buf_set_rate(sc, bf, fi->framelen);
+ ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
}
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_head)
+ struct ath_atx_tid *tid, struct sk_buff *skb)
{
- struct ath_frame_info *fi;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct list_head bf_head;
struct ath_buf *bf;
- bf = list_first_entry(bf_head, struct ath_buf, list);
- bf->bf_state.bf_type &= ~BUF_AMPDU;
+ bf = fi->bf;
+ if (!bf)
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+
+ if (!bf)
+ return;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
+ bf->bf_state.bf_type = 0;
/* update starting sequence number for subsequent ADDBA request */
if (tid)
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
bf->bf_lastbf = bf;
- fi = get_frame_info(bf->bf_mpdu);
- ath_buf_set_rate(sc, bf, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, bf_head, false);
+ ath_tx_fill_desc(sc, bf, txq, fi->framelen);
+ ath_tx_txqaddbuf(sc, txq, &bf_head, false);
TX_STAT_INC(txq->axq_qnum, queued);
}
-static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
- enum ath9k_pkt_type htype;
- __le16 fc;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
-
- if (ieee80211_is_beacon(fc))
- htype = ATH9K_PKT_TYPE_BEACON;
- else if (ieee80211_is_probe_resp(fc))
- htype = ATH9K_PKT_TYPE_PROBE_RESP;
- else if (ieee80211_is_atim(fc))
- htype = ATH9K_PKT_TYPE_ATIM;
- else if (ieee80211_is_pspoll(fc))
- htype = ATH9K_PKT_TYPE_PSPOLL;
- else
- htype = ATH9K_PKT_TYPE_NORMAL;
-
- return htype;
-}
-
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
int framelen)
{
- struct ath_softc *sc = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_node *an = NULL;
- struct ath_atx_tid *tid;
enum ath9k_key_type keytype;
- u16 seqno = 0;
- u8 tidno;
keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (sta)
an = (struct ath_node *) sta->drv_priv;
- hdr = (struct ieee80211_hdr *)skb->data;
- if (an && ieee80211_is_data_qos(hdr->frame_control) &&
- conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
-
- tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
-
- /*
- * Override seqno set by upper layer with the one
- * in tx aggregation state.
- */
- tid = ATH_AN_2_TID(an, tidno);
- seqno = tid->seq_next;
- hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
- INCR(tid->seq_next, IEEE80211_SEQ_MAX);
- }
-
memset(fi, 0, sizeof(*fi));
if (hw_key)
fi->keyix = hw_key->hw_key_idx;
@@ -1520,199 +1740,50 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
- fi->seqno = seqno;
-}
-
-static int setup_tx_flags(struct sk_buff *skb)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- int flags = 0;
-
- flags |= ATH9K_TXDESC_INTREQ;
-
- if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
- flags |= ATH9K_TXDESC_NOACK;
-
- if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
- flags |= ATH9K_TXDESC_LDPC;
-
- return flags;
-}
-
-/*
- * rix - rate index
- * pktlen - total bytes (delims + data + fcs + pads + pad delims)
- * width - 0 for 20 MHz, 1 for 40 MHz
- * half_gi - to use 4us v/s 3.6 us for symbol time
- */
-static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
- int width, int half_gi, bool shortPreamble)
-{
- u32 nbits, nsymbits, duration, nsymbols;
- int streams;
-
- /* find number of symbols: PLCP + data */
- streams = HT_RC_2_STREAMS(rix);
- nbits = (pktlen << 3) + OFDM_PLCP_BITS;
- nsymbits = bits_per_symbol[rix % 8][width] * streams;
- nsymbols = (nbits + nsymbits - 1) / nsymbits;
-
- if (!half_gi)
- duration = SYMBOL_TIME(nsymbols);
- else
- duration = SYMBOL_TIME_HALFGI(nsymbols);
-
- /* addup duration for legacy/ht training and signal fields */
- duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
-
- return duration;
}
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_channel *curchan = ah->curchan;
- if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
- (curchan->channelFlags & CHANNEL_5GHZ) &&
- (chainmask == 0x7) && (rate < 0x90))
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
+ (curchan->channelFlags & CHANNEL_5GHZ) &&
+ (chainmask == 0x7) && (rate < 0x90))
return 0x3;
else
return chainmask;
}
-static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath9k_11n_rate_series series[4];
- struct sk_buff *skb;
- struct ieee80211_tx_info *tx_info;
- struct ieee80211_tx_rate *rates;
- const struct ieee80211_rate *rate;
- struct ieee80211_hdr *hdr;
- int i, flags = 0;
- u8 rix = 0, ctsrate = 0;
- bool is_pspoll;
-
- memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
-
- skb = bf->bf_mpdu;
- tx_info = IEEE80211_SKB_CB(skb);
- rates = tx_info->control.rates;
- hdr = (struct ieee80211_hdr *)skb->data;
- is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
-
- /*
- * We check if Short Preamble is needed for the CTS rate by
- * checking the BSS's global flag.
- * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
- */
- rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
- ctsrate = rate->hw_value;
- if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
- ctsrate |= rate->hw_value_short;
-
- for (i = 0; i < 4; i++) {
- bool is_40, is_sgi, is_sp;
- int phy;
-
- if (!rates[i].count || (rates[i].idx < 0))
- continue;
-
- rix = rates[i].idx;
- series[i].Tries = rates[i].count;
-
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
- flags |= ATH9K_TXDESC_RTSENA;
- } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
- flags |= ATH9K_TXDESC_CTSENA;
- }
-
- if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- series[i].RateFlags |= ATH9K_RATESERIES_2040;
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
-
- is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
- is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
- is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-
- if (rates[i].flags & IEEE80211_TX_RC_MCS) {
- /* MCS rates */
- series[i].Rate = rix | 0x80;
- series[i].ChSel = ath_txchainmask_reduction(sc,
- common->tx_chainmask, series[i].Rate);
- series[i].PktDuration = ath_pkt_duration(sc, rix, len,
- is_40, is_sgi, is_sp);
- if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
- series[i].RateFlags |= ATH9K_RATESERIES_STBC;
- continue;
- }
-
- /* legacy rates */
- if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
- !(rate->flags & IEEE80211_RATE_ERP_G))
- phy = WLAN_RC_PHY_CCK;
- else
- phy = WLAN_RC_PHY_OFDM;
-
- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
- series[i].Rate = rate->hw_value;
- if (rate->hw_value_short) {
- if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- series[i].Rate |= rate->hw_value_short;
- } else {
- is_sp = false;
- }
-
- if (bf->bf_state.bfs_paprd)
- series[i].ChSel = common->tx_chainmask;
- else
- series[i].ChSel = ath_txchainmask_reduction(sc,
- common->tx_chainmask, series[i].Rate);
-
- series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
- phy, rate->bitrate * 100, len, rix, is_sp);
- }
-
- /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
- if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
- flags &= ~ATH9K_TXDESC_RTSENA;
-
- /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
- if (flags & ATH9K_TXDESC_RTSENA)
- flags &= ~ATH9K_TXDESC_CTSENA;
-
- /* set dur_update_en for l-sig computation except for PS-Poll frames */
- ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
- bf->bf_lastbf->bf_desc,
- !is_pspoll, ctsrate,
- 0, series, 4, flags);
-
-}
-
-static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
+/*
+ * Assign a descriptor (and sequence number if necessary,
+ * and map buffer for DMA. Frees skb on error
+ */
+static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
+ struct ath_atx_tid *tid,
struct sk_buff *skb)
{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_buf *bf;
- struct ath_desc *ds;
- int frm_type;
+ u16 seqno;
bf = ath_tx_get_buffer(sc);
if (!bf) {
ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
- return NULL;
+ goto error;
}
ATH_TXBUF_RESET(bf);
- bf->bf_flags = setup_tx_flags(skb);
+ if (tid) {
+ seqno = tid->seq_next;
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+ bf->bf_state.seqno = seqno;
+ }
+
bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
@@ -1723,38 +1794,26 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
ath_err(ath9k_hw_common(sc->sc_ah),
"dma_mapping_error() on TX\n");
ath_tx_return_buffer(sc, bf);
- return NULL;
+ goto error;
}
- frm_type = get_hw_packet_type(skb);
-
- ds = bf->bf_desc;
- ath9k_hw_set_desc_link(ah, ds, 0);
-
- ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
- fi->keyix, fi->keytype, bf->bf_flags);
-
- ath9k_hw_filltxdesc(ah, ds,
- skb->len, /* segment length */
- true, /* first segment */
- true, /* last segment */
- ds, /* first descriptor */
- bf->bf_buf_addr,
- txq->axq_qnum);
-
+ fi->bf = bf;
return bf;
+
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
}
/* FIXME: tx power */
-static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
+static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
- struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct list_head bf_head;
struct ath_atx_tid *tid = NULL;
+ struct ath_buf *bf;
u8 tidno;
spin_lock_bh(&txctl->txq->axq_lock);
@@ -1772,27 +1831,21 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
*/
- ath_tx_send_ampdu(sc, tid, bf, txctl);
+ ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
- INIT_LIST_HEAD(&bf_head);
- list_add_tail(&bf->list, &bf_head);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ if (!bf)
+ goto out;
- bf->bf_state.bfs_ftype = txctl->frame_type;
bf->bf_state.bfs_paprd = txctl->paprd;
- if (bf->bf_state.bfs_paprd)
- ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
- bf->bf_state.bfs_paprd);
-
if (txctl->paprd)
bf->bf_state.bfs_paprd_timestamp = jiffies;
- if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
- ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
-
- ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
+ ath_tx_send_normal(sc, txctl->txq, tid, skb);
}
+out:
spin_unlock_bh(&txctl->txq->axq_lock);
}
@@ -1806,7 +1859,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
- struct ath_buf *bf;
int padpos, padsize;
int frmlen = skb->len + FCS_LEN;
int q;
@@ -1839,6 +1891,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
+ hdr = (struct ieee80211_hdr *) skb->data;
}
if ((vif && vif->type != NL80211_IFTYPE_AP &&
@@ -1853,10 +1906,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
* info are no longer valid (overwritten by the ath_frame_info data.
*/
- bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
- if (unlikely(!bf))
- return -ENOMEM;
-
q = skb_get_queue_mapping(skb);
spin_lock_bh(&txq->axq_lock);
if (txq == sc->tx.txq_map[q] &&
@@ -1866,8 +1915,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
}
spin_unlock_bh(&txq->axq_lock);
- ath_tx_start_dma(sc, bf, txctl);
-
+ ath_tx_start_dma(sc, skb, txctl);
return 0;
}
@@ -1876,7 +1924,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- int tx_flags, int ftype, struct ath_txq *txq)
+ int tx_flags, struct ath_txq *txq)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1889,10 +1937,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (tx_flags & ATH_TX_BAR)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
- if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
+ if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
tx_info->flags |= IEEE80211_TX_STAT_ACK;
- }
padpos = ath9k_cmn_padpos(hdr->frame_control);
padsize = padpos & 3;
@@ -1936,18 +1983,18 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int txok, int sendbar)
{
struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
unsigned long flags;
int tx_flags = 0;
if (sendbar)
tx_flags = ATH_TX_BAR;
- if (!txok) {
+ if (!txok)
tx_flags |= ATH_TX_ERROR;
- if (bf_isxretried(bf))
- tx_flags |= ATH_TX_XRETRY;
- }
+ if (ts->ts_status & ATH9K_TXERR_FILT)
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
bf->bf_buf_addr = 0;
@@ -1960,9 +2007,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
else
complete(&sc->paprd_complete);
} else {
- ath_debug_stat_tx(sc, bf, ts, txq);
- ath_tx_complete(sc, skb, tx_flags,
- bf->bf_state.bfs_ftype, txq);
+ ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
+ ath_tx_complete(sc, skb, tx_flags, txq);
}
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
* accidentally reference it later.
@@ -1979,7 +2025,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
- int txok, bool update_rc)
+ int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1994,19 +2040,16 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_rateindex = ts->ts_rateindex;
WARN_ON(tx_rateindex >= hw->max_rates);
- if (ts->ts_status & ATH9K_TXERR_FILT)
- tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
BUG_ON(nbad > nframes);
-
- tx_info->status.ampdu_len = nframes;
- tx_info->status.ampdu_ack_len = nframes - nbad;
}
+ tx_info->status.ampdu_len = nframes;
+ tx_info->status.ampdu_ack_len = nframes - nbad;
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
- (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
+ (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
* If an underrun error is seen assume it as an excessive
* retry only if max frame trigger level has been reached
@@ -2019,9 +2062,9 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
* successfully by eventually preferring slower rates.
* This itself should also alleviate congestion on the bus.
*/
- if (ieee80211_is_data(hdr->frame_control) &&
- (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
- ATH9K_TX_DELIM_UNDERRUN)) &&
+ if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
+ ATH9K_TX_DELIM_UNDERRUN)) &&
+ ieee80211_is_data(hdr->frame_control) &&
ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
@@ -2052,13 +2095,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (!bf_isampdu(bf)) {
- /*
- * This frame is sent out as a single frame.
- * Use hardware retry status for this frame.
- */
- if (ts->ts_status & ATH9K_TXERR_XRETRY)
- bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
@@ -2085,6 +2122,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
for (;;) {
+ if (work_pending(&sc->hw_reset_work))
+ break;
+
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
if (sc->sc_flags & SC_OP_TXAGGR)
@@ -2172,9 +2212,8 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
if (needreset) {
ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
- spin_lock_bh(&sc->sc_pcu_lock);
- ath_reset(sc, true);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2207,6 +2246,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
int status;
for (;;) {
+ if (work_pending(&sc->hw_reset_work))
+ break;
+
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
if (status == -EINPROGRESS)
break;
@@ -2361,7 +2403,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->sched = false;
tid->paused = false;
tid->state &= ~AGGR_CLEANUP;
- INIT_LIST_HEAD(&tid->buf_q);
+ __skb_queue_head_init(&tid->buf_q);
acno = TID_TO_WME_AC(tidno);
tid->ac = &an->ac[acno];
tid->state &= ~AGGR_ADDBA_COMPLETE;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 2d1b821b440..267d5dcf82d 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -39,3 +39,17 @@ config CARL9170_WPC
bool
depends on CARL9170 && (INPUT = y || INPUT = CARL9170)
default y
+
+config CARL9170_HWRNG
+ bool "Random number generator"
+ depends on CARL9170 && (HW_RANDOM = y || HW_RANDOM = CARL9170)
+ default n
+ help
+ Provides a hardware random number generator to the kernel.
+
+ SECURITY WARNING: It's relatively easy to eavesdrop all
+ generated random numbers from the transport stream with
+ usbmon [software] or special usb sniffer hardware.
+
+ Say N, unless your setup[i.e.: embedded system] has no
+ other rng source and you can afford to take the risk.
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index c5427a72a1e..6cfbb419e2f 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -43,6 +43,7 @@
#include <linux/firmware.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
+#include <linux/hw_random.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <linux/usb.h>
@@ -151,6 +152,7 @@ struct carl9170_sta_tid {
#define CARL9170_TX_TIMEOUT 2500
#define CARL9170_JANITOR_DELAY 128
#define CARL9170_QUEUE_STUCK_TIMEOUT 5500
+#define CARL9170_STAT_WORK 30000
#define CARL9170_NUM_TX_AGG_MAX 30
@@ -282,6 +284,7 @@ struct ar9170 {
bool rx_stream;
bool tx_stream;
bool rx_filter;
+ bool hw_counters;
unsigned int mem_blocks;
unsigned int mem_block_size;
unsigned int rx_size;
@@ -331,11 +334,21 @@ struct ar9170 {
/* PHY */
struct ieee80211_channel *channel;
+ unsigned int num_channels;
int noise[4];
unsigned int chan_fail;
unsigned int total_chan_fail;
u8 heavy_clip;
u8 ht_settings;
+ struct {
+ u64 active; /* usec */
+ u64 cca; /* usec */
+ u64 tx_time; /* usec */
+ u64 rx_total;
+ u64 rx_overrun;
+ } tally;
+ struct delayed_work stat_work;
+ struct survey_info *survey;
/* power calibration data */
u8 power_5G_leg[4];
@@ -437,6 +450,17 @@ struct ar9170 {
unsigned int off_override;
bool state;
} ps;
+
+#ifdef CONFIG_CARL9170_HWRNG
+# define CARL9170_HWRNG_CACHE_SIZE CARL9170_MAX_CMD_PAYLOAD_LEN
+ struct {
+ struct hwrng rng;
+ bool initialized;
+ char name[30 + 1];
+ u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
+ unsigned int cache_idx;
+ } rng;
+#endif /* CONFIG_CARL9170_HWRNG */
};
enum carl9170_ps_off_override_reasons {
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index cdfc94c371b..195dc653811 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -36,6 +36,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <asm/div64.h>
#include "carl9170.h"
#include "cmd.h"
@@ -165,6 +166,39 @@ int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
return __carl9170_exec_cmd(ar, cmd, true);
}
+int carl9170_collect_tally(struct ar9170 *ar)
+{
+ struct carl9170_tally_rsp tally;
+ struct survey_info *info;
+ unsigned int tick;
+ int err;
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_TALLY, 0, NULL,
+ sizeof(tally), (u8 *)&tally);
+ if (err)
+ return err;
+
+ tick = le32_to_cpu(tally.tick);
+ if (tick) {
+ ar->tally.active += le32_to_cpu(tally.active) / tick;
+ ar->tally.cca += le32_to_cpu(tally.cca) / tick;
+ ar->tally.tx_time += le32_to_cpu(tally.tx_time) / tick;
+ ar->tally.rx_total += le32_to_cpu(tally.rx_total);
+ ar->tally.rx_overrun += le32_to_cpu(tally.rx_overrun);
+
+ if (ar->channel) {
+ info = &ar->survey[ar->channel->hw_value];
+ info->channel_time = ar->tally.active;
+ info->channel_time_busy = ar->tally.cca;
+ info->channel_time_tx = ar->tally.tx_time;
+ do_div(info->channel_time, 1000);
+ do_div(info->channel_time_busy, 1000);
+ do_div(info->channel_time_tx, 1000);
+ }
+ }
+ return 0;
+}
+
int carl9170_powersave(struct ar9170 *ar, const bool ps)
{
struct carl9170_cmd *cmd;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index d5f95bdc75c..885c42778b8 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -50,6 +50,7 @@ int carl9170_echo_test(struct ar9170 *ar, u32 v);
int carl9170_reboot(struct ar9170 *ar);
int carl9170_mac_reset(struct ar9170 *ar);
int carl9170_powersave(struct ar9170 *ar, const bool power_on);
+int carl9170_collect_tally(struct ar9170 *ar);
int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
const u32 mode, const u32 addr, const u32 len);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 39ddea5794f..f4cae1cccbf 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -266,6 +266,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
FIF_PROMISC_IN_BSS;
}
+ if (SUPP(CARL9170FW_HW_COUNTERS))
+ ar->fw.hw_counters = true;
+
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 0a6dec529b5..9443c802b25 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -55,6 +55,7 @@ enum carl9170_cmd_oids {
CARL9170_CMD_READ_TSF = 0x06,
CARL9170_CMD_RX_FILTER = 0x07,
CARL9170_CMD_WOL = 0x08,
+ CARL9170_CMD_TALLY = 0x09,
/* CAM */
CARL9170_CMD_EKEY = 0x10,
@@ -286,6 +287,15 @@ struct carl9170_tsf_rsp {
} __packed;
#define CARL9170_TSF_RSP_SIZE 8
+struct carl9170_tally_rsp {
+ __le32 active;
+ __le32 cca;
+ __le32 tx_time;
+ __le32 rx_total;
+ __le32 rx_overrun;
+ __le32 tick;
+} __packed;
+
struct carl9170_rsp {
struct carl9170_cmd_head hdr;
@@ -300,6 +310,7 @@ struct carl9170_rsp {
struct carl9170_gpio gpio;
struct carl9170_tsf_rsp tsf;
struct carl9170_psm psm;
+ struct carl9170_tally_rsp tally;
u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
} __packed;
} __packed __aligned(4);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 0474e6638d2..f06e0695d41 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -413,6 +413,9 @@ static int carl9170_op_start(struct ieee80211_hw *hw)
carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
+ round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
+
ieee80211_wake_queues(ar->hw);
err = 0;
@@ -423,6 +426,7 @@ out:
static void carl9170_cancel_worker(struct ar9170 *ar)
{
+ cancel_delayed_work_sync(&ar->stat_work);
cancel_delayed_work_sync(&ar->tx_janitor);
#ifdef CONFIG_CARL9170_LEDS
cancel_delayed_work_sync(&ar->led_work);
@@ -794,6 +798,43 @@ static void carl9170_ps_work(struct work_struct *work)
mutex_unlock(&ar->mutex);
}
+static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
+{
+ int err;
+
+ if (noise) {
+ err = carl9170_get_noisefloor(ar);
+ if (err)
+ return err;
+ }
+
+ if (ar->fw.hw_counters) {
+ err = carl9170_collect_tally(ar);
+ if (err)
+ return err;
+ }
+
+ if (flush)
+ memset(&ar->tally, 0, sizeof(ar->tally));
+
+ return 0;
+}
+
+static void carl9170_stat_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
+ int err;
+
+ mutex_lock(&ar->mutex);
+ err = carl9170_update_survey(ar, false, true);
+ mutex_unlock(&ar->mutex);
+
+ if (err)
+ return;
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
+ round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
+}
static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
{
@@ -828,11 +869,19 @@ static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
if (err)
goto out;
+ err = carl9170_update_survey(ar, true, false);
+ if (err)
+ goto out;
+
err = carl9170_set_channel(ar, hw->conf.channel,
hw->conf.channel_type, CARL9170_RFI_NONE);
if (err)
goto out;
+ err = carl9170_update_survey(ar, false, true);
+ if (err)
+ goto out;
+
err = carl9170_set_dyn_sifs_ack(ar);
if (err)
goto out;
@@ -1029,7 +1078,8 @@ out:
mutex_unlock(&ar->mutex);
}
-static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw)
+static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
struct carl9170_tsf_rsp tsf;
@@ -1255,7 +1305,8 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
return 0;
}
-static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct ar9170 *ar = hw->priv;
@@ -1421,24 +1472,159 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
}
#endif /* CONFIG_CARL9170_WPC */
-static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
- struct survey_info *survey)
+#ifdef CONFIG_CARL9170_HWRNG
+static int carl9170_rng_get(struct ar9170 *ar)
{
- struct ar9170 *ar = hw->priv;
+
+#define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
+#define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
+
+ static const __le32 rng_load[RW] = {
+ [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
+
+ u32 buf[RW];
+
+ unsigned int i, off = 0, transfer, count;
int err;
- if (idx != 0)
- return -ENOENT;
+ BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
+
+ if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
+ return -EAGAIN;
+
+ count = ARRAY_SIZE(ar->rng.cache);
+ while (count) {
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
+ RB, (u8 *) rng_load,
+ RB, (u8 *) buf);
+ if (err)
+ return err;
+
+ transfer = min_t(unsigned int, count, RW);
+ for (i = 0; i < transfer; i++)
+ ar->rng.cache[off + i] = buf[i];
+
+ off += transfer;
+ count -= transfer;
+ }
+
+ ar->rng.cache_idx = 0;
+
+#undef RW
+#undef RB
+ return 0;
+}
+
+static int carl9170_rng_read(struct hwrng *rng, u32 *data)
+{
+ struct ar9170 *ar = (struct ar9170 *)rng->priv;
+ int ret = -EIO;
mutex_lock(&ar->mutex);
- err = carl9170_get_noisefloor(ar);
+ if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
+ ret = carl9170_rng_get(ar);
+ if (ret) {
+ mutex_unlock(&ar->mutex);
+ return ret;
+ }
+ }
+
+ *data = ar->rng.cache[ar->rng.cache_idx++];
mutex_unlock(&ar->mutex);
- if (err)
+
+ return sizeof(u16);
+}
+
+static void carl9170_unregister_hwrng(struct ar9170 *ar)
+{
+ if (ar->rng.initialized) {
+ hwrng_unregister(&ar->rng.rng);
+ ar->rng.initialized = false;
+ }
+}
+
+static int carl9170_register_hwrng(struct ar9170 *ar)
+{
+ int err;
+
+ snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
+ "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
+ ar->rng.rng.name = ar->rng.name;
+ ar->rng.rng.data_read = carl9170_rng_read;
+ ar->rng.rng.priv = (unsigned long)ar;
+
+ if (WARN_ON(ar->rng.initialized))
+ return -EALREADY;
+
+ err = hwrng_register(&ar->rng.rng);
+ if (err) {
+ dev_err(&ar->udev->dev, "Failed to register the random "
+ "number generator (%d)\n", err);
return err;
+ }
+
+ ar->rng.initialized = true;
+
+ err = carl9170_rng_get(ar);
+ if (err) {
+ carl9170_unregister_hwrng(ar);
+ return err;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_CARL9170_HWRNG */
+
+static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ieee80211_channel *chan;
+ struct ieee80211_supported_band *band;
+ int err, b, i;
- survey->channel = ar->channel;
+ chan = ar->channel;
+ if (!chan)
+ return -ENODEV;
+
+ if (idx == chan->hw_value) {
+ mutex_lock(&ar->mutex);
+ err = carl9170_update_survey(ar, false, true);
+ mutex_unlock(&ar->mutex);
+ if (err)
+ return err;
+ }
+
+ for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ band = ar->hw->wiphy->bands[b];
+
+ if (!band)
+ continue;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].hw_value == idx) {
+ chan = &band->channels[i];
+ goto found;
+ }
+ }
+ }
+ return -ENOENT;
+
+found:
+ memcpy(survey, &ar->survey[idx], sizeof(*survey));
+
+ survey->channel = chan;
survey->filled = SURVEY_INFO_NOISE_DBM;
- survey->noise = ar->noise[0];
+
+ if (ar->channel == chan)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ if (ar->fw.hw_counters) {
+ survey->filled |= SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+ }
+
return 0;
}
@@ -1571,6 +1757,7 @@ void *carl9170_alloc(size_t priv_size)
INIT_WORK(&ar->ping_work, carl9170_ping_work);
INIT_WORK(&ar->restart_work, carl9170_restart_work);
INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
+ INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
INIT_LIST_HEAD(&ar->tx_ampdu_list);
rcu_assign_pointer(ar->tx_ampdu_iter,
@@ -1654,6 +1841,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
struct ath_regulatory *regulatory = &ar->common.regulatory;
unsigned int rx_streams, tx_streams, tx_params = 0;
int bands = 0;
+ int chans = 0;
if (ar->eeprom.length == cpu_to_le16(0xffff))
return -ENODATA;
@@ -1677,14 +1865,24 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&carl9170_band_2GHz;
+ chans += carl9170_band_2GHz.n_channels;
bands++;
}
if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&carl9170_band_5GHz;
+ chans += carl9170_band_5GHz.n_channels;
bands++;
}
+ if (!bands)
+ return -EINVAL;
+
+ ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
+ if (!ar->survey)
+ return -ENOMEM;
+ ar->num_channels = chans;
+
/*
* I measured this, a bandswitch takes roughly
* 135 ms and a frequency switch about 80.
@@ -1698,12 +1896,11 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
ar->hw->channel_change_time = 80 * 1000;
regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
- regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
/* second part of wiphy init */
SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
- return bands ? 0 : -EINVAL;
+ return 0;
}
static int carl9170_reg_notifier(struct wiphy *wiphy,
@@ -1787,6 +1984,12 @@ int carl9170_register(struct ar9170 *ar)
goto err_unreg;
#endif /* CONFIG_CARL9170_WPC */
+#ifdef CONFIG_CARL9170_HWRNG
+ err = carl9170_register_hwrng(ar);
+ if (err)
+ goto err_unreg;
+#endif /* CONFIG_CARL9170_HWRNG */
+
dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
wiphy_name(ar->hw->wiphy));
@@ -1819,6 +2022,10 @@ void carl9170_unregister(struct ar9170 *ar)
}
#endif /* CONFIG_CARL9170_WPC */
+#ifdef CONFIG_CARL9170_HWRNG
+ carl9170_unregister_hwrng(ar);
+#endif /* CONFIG_CARL9170_HWRNG */
+
carl9170_cancel_worker(ar);
cancel_work_sync(&ar->restart_work);
@@ -1836,6 +2043,9 @@ void carl9170_free(struct ar9170 *ar)
kfree(ar->mem_bitmap);
ar->mem_bitmap = NULL;
+ kfree(ar->survey);
+ ar->survey = NULL;
+
mutex_destroy(&ar->mutex);
ieee80211_free_hw(ar->hw);
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index aa147a9120b..472efc7e340 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -578,11 +578,10 @@ static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
if (err)
return err;
- /* XXX: remove magic! */
- if (is_2ghz)
- err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, 0x5163);
- else
- err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, 0x5143);
+ if (!ar->fw.hw_counters) {
+ err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC,
+ is_2ghz ? 0x5163 : 0x5143);
+ }
return err;
}
@@ -1574,6 +1573,9 @@ int carl9170_get_noisefloor(struct ar9170 *ar)
AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
}
+ if (ar->channel)
+ ar->survey[ar->channel->hw_value].noise = ar->noise[0];
+
return 0;
}
@@ -1766,10 +1768,6 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
ar->chan_fail = 0;
}
- err = carl9170_get_noisefloor(ar);
- if (err)
- return err;
-
if (ar->heavy_clip) {
err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
0x200 | ar->heavy_clip);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index 64703778cfe..e651db85634 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
#define CARL9170FW_VERSION_YEAR 11
-#define CARL9170FW_VERSION_MONTH 6
-#define CARL9170FW_VERSION_DAY 30
+#define CARL9170FW_VERSION_MONTH 8
+#define CARL9170FW_VERSION_DAY 15
#define CARL9170FW_VERSION_GIT "1.9.4"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index c325202fdc5..d9218fe0203 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -57,22 +57,18 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
-int ath_printk(const char *level, struct ath_common *common,
- const char *fmt, ...)
+void ath_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int rtn;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- rtn = printk("%sath: %pV", level, &vaf);
+ printk("%sath: %pV", level, &vaf);
va_end(args);
-
- return rtn;
}
EXPORT_SYMBOL(ath_printk);
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 172f63f671c..03a8268ccf2 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -101,7 +101,7 @@ enum CountryCode {
CTRY_GERMANY = 276,
CTRY_GREECE = 300,
CTRY_GREENLAND = 304,
- CTRY_GRENEDA = 308,
+ CTRY_GRENADA = 308,
CTRY_GUAM = 316,
CTRY_GUATEMALA = 320,
CTRY_HAITI = 332,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 24b53839fc3..bdd2b4d61f2 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -332,7 +332,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_GERMANY, ETSI1_WORLD, "DE"},
{CTRY_GREECE, ETSI1_WORLD, "GR"},
{CTRY_GREENLAND, ETSI1_WORLD, "GL"},
- {CTRY_GRENEDA, FCC3_FCCA, "GD"},
+ {CTRY_GRENADA, FCC3_FCCA, "GD"},
{CTRY_GUAM, FCC1_FCCA, "GU"},
{CTRY_GUATEMALA, FCC1_FCCA, "GT"},
{CTRY_HAITI, ETSI1_WORLD, "HT"},
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 3cab843afb0..b97a40ed5ff 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -114,22 +114,22 @@ config B43_PHY_N
affect other devices support and may provide support for basic needs.
config B43_PHY_LP
- bool "Support for low-power (LP-PHY) devices (EXPERIMENTAL)"
- depends on B43 && EXPERIMENTAL
+ bool "Support for low-power (LP-PHY) devices"
+ depends on B43
default y
---help---
Support for the LP-PHY.
The LP-PHY is a low-power PHY built into some notebooks
- and embedded devices. It supports 802.11a/g
+ and embedded devices. It supports 802.11a/b/g
(802.11a support is optional, and currently disabled).
config B43_PHY_HT
- bool "Support for HT-PHY devices (BROKEN)"
- depends on B43 && BROKEN
+ bool "Support for HT-PHY (high throughput) devices (EXPERIMENTAL)"
+ depends on B43 && EXPERIMENTAL
---help---
Support for the HT-PHY.
- Say N, this is BROKEN and crashes driver.
+ Enables support for BCM4331 and possibly other chipsets with that PHY.
config B43_PHY_LCN
bool "Support for LCN-PHY devices (BROKEN)"
@@ -169,13 +169,3 @@ config B43_DEBUG
Say N, if you are a distributor or user building a release kernel
for production use.
Only say Y, if you are debugging a problem in the b43 driver sourcecode.
-
-config B43_FORCE_PIO
- bool "Force usage of PIO instead of DMA"
- depends on B43 && B43_DEBUG
- ---help---
- This will disable DMA and always enable PIO instead.
-
- Say N!
- This is only for debugging the PIO engine code. You do
- _NOT_ want to enable this.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index c818b0bc88e..447a2307c9d 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -17,11 +17,6 @@
#include "phy_common.h"
-/* The unique identifier of the firmware that's officially supported by
- * this driver version. */
-#define B43_SUPPORTED_FIRMWARE_ID "FW13"
-
-
#ifdef CONFIG_B43_DEBUG
# define B43_DEBUG 1
#else
@@ -112,21 +107,61 @@
#define B43_MMIO_RADIO_HWENABLED_LO 0x49A
#define B43_MMIO_GPIO_CONTROL 0x49C
#define B43_MMIO_GPIO_MASK 0x49E
+#define B43_MMIO_TXE0_CTL 0x500
+#define B43_MMIO_TXE0_AUX 0x502
+#define B43_MMIO_TXE0_TS_LOC 0x504
+#define B43_MMIO_TXE0_TIME_OUT 0x506
+#define B43_MMIO_TXE0_WM_0 0x508
+#define B43_MMIO_TXE0_WM_1 0x50A
+#define B43_MMIO_TXE0_PHYCTL 0x50C
+#define B43_MMIO_TXE0_STATUS 0x50E
+#define B43_MMIO_TXE0_MMPLCP0 0x510
+#define B43_MMIO_TXE0_MMPLCP1 0x512
+#define B43_MMIO_TXE0_PHYCTL1 0x514
+#define B43_MMIO_XMTFIFODEF 0x520
+#define B43_MMIO_XMTFIFO_FRAME_CNT 0x522 /* core rev>= 16 only */
+#define B43_MMIO_XMTFIFO_BYTE_CNT 0x524 /* core rev>= 16 only */
+#define B43_MMIO_XMTFIFO_HEAD 0x526 /* core rev>= 16 only */
+#define B43_MMIO_XMTFIFO_RD_PTR 0x528 /* core rev>= 16 only */
+#define B43_MMIO_XMTFIFO_WR_PTR 0x52A /* core rev>= 16 only */
+#define B43_MMIO_XMTFIFODEF1 0x52C /* core rev>= 16 only */
+#define B43_MMIO_XMTFIFOCMD 0x540
+#define B43_MMIO_XMTFIFOFLUSH 0x542
+#define B43_MMIO_XMTFIFOTHRESH 0x544
+#define B43_MMIO_XMTFIFORDY 0x546
+#define B43_MMIO_XMTFIFOPRIRDY 0x548
+#define B43_MMIO_XMTFIFORQPRI 0x54A
+#define B43_MMIO_XMTTPLATETXPTR 0x54C
+#define B43_MMIO_XMTTPLATEPTR 0x550
+#define B43_MMIO_SMPL_CLCT_STRPTR 0x552 /* core rev>= 22 only */
+#define B43_MMIO_SMPL_CLCT_STPPTR 0x554 /* core rev>= 22 only */
+#define B43_MMIO_SMPL_CLCT_CURPTR 0x556 /* core rev>= 22 only */
+#define B43_MMIO_XMTTPLATEDATALO 0x560
+#define B43_MMIO_XMTTPLATEDATAHI 0x562
+#define B43_MMIO_XMTSEL 0x568
+#define B43_MMIO_XMTTXCNT 0x56A
+#define B43_MMIO_XMTTXSHMADDR 0x56C
#define B43_MMIO_TSF_CFP_START_LOW 0x604
#define B43_MMIO_TSF_CFP_START_HIGH 0x606
#define B43_MMIO_TSF_CFP_PRETBTT 0x612
+#define B43_MMIO_TSF_CLK_FRAC_LOW 0x62E
+#define B43_MMIO_TSF_CLK_FRAC_HIGH 0x630
#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */
#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */
#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
#define B43_MMIO_RNG 0x65A
#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
-#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
+#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
+#define B43_MMIO_IFSSTAT 0x690
+#define B43_MMIO_IFSMEDBUSYCTL 0x692
+#define B43_MMIO_IFTXDUR 0x694
#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
#define B43_MMIO_POWERUP_DELAY 0x6A8
#define B43_MMIO_BTCOEX_CTL 0x6B4 /* Bluetooth Coexistence Control */
#define B43_MMIO_BTCOEX_STAT 0x6B6 /* Bluetooth Coexistence Status */
#define B43_MMIO_BTCOEX_TXCTL 0x6B8 /* Bluetooth Coexistence Transmit Control */
+#define B43_MMIO_WEPCTL 0x7C0
/* SPROM boardflags_lo values */
#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
@@ -594,6 +629,7 @@ struct b43_dma {
struct b43_dmaring *rx_ring;
u32 translation; /* Routing bits */
+ bool translation_in_low; /* Should translation bit go into low addr? */
bool parity; /* Check for parity */
};
@@ -694,6 +730,12 @@ struct b43_firmware_file {
enum b43_firmware_file_type type;
};
+enum b43_firmware_hdr_format {
+ B43_FW_HDR_598,
+ B43_FW_HDR_410,
+ B43_FW_HDR_351,
+};
+
/* Pointers to the firmware data and meta information about it. */
struct b43_firmware {
/* Microcode */
@@ -710,6 +752,9 @@ struct b43_firmware {
/* Firmware patchlevel */
u16 patch;
+ /* Format of header used by firmware */
+ enum b43_firmware_hdr_format hdr_format;
+
/* Set to true, if we are using an opensource firmware.
* Use this to check for proprietary vs opensource. */
bool opensource;
@@ -875,7 +920,7 @@ struct b43_wl {
struct b43_leds leds;
/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
- u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
+ u8 pio_scratchspace[118] __attribute__((__aligned__(8)));
u8 pio_tailspace[4] __attribute__((__aligned__(8)));
};
@@ -965,12 +1010,6 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
-#ifdef CONFIG_B43_FORCE_PIO
-# define B43_PIO_DEFAULT 1
-#else
-# define B43_PIO_DEFAULT 0
-#endif
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
index 05f6c7bff6a..424692df239 100644
--- a/drivers/net/wireless/b43/bus.c
+++ b/drivers/net/wireless/b43/bus.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
Bus abstraction layer
+ Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 481e534534e..5e45604f0f5 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -47,6 +47,38 @@
* into separate slots. */
#define TX_SLOTS_PER_FRAME 2
+static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
+ enum b43_addrtype addrtype)
+{
+ u32 uninitialized_var(addr);
+
+ switch (addrtype) {
+ case B43_DMA_ADDR_LOW:
+ addr = lower_32_bits(dmaaddr);
+ if (dma->translation_in_low) {
+ addr &= ~SSB_DMA_TRANSLATION_MASK;
+ addr |= dma->translation;
+ }
+ break;
+ case B43_DMA_ADDR_HIGH:
+ addr = upper_32_bits(dmaaddr);
+ if (!dma->translation_in_low) {
+ addr &= ~SSB_DMA_TRANSLATION_MASK;
+ addr |= dma->translation;
+ }
+ break;
+ case B43_DMA_ADDR_EXT:
+ if (dma->translation_in_low)
+ addr = lower_32_bits(dmaaddr);
+ else
+ addr = upper_32_bits(dmaaddr);
+ addr &= SSB_DMA_TRANSLATION_MASK;
+ addr >>= SSB_DMA_TRANSLATION_SHIFT;
+ break;
+ }
+
+ return addr;
+}
/* 32bit DMA ops. */
static
@@ -77,10 +109,9 @@ static void op32_fill_descriptor(struct b43_dmaring *ring,
slot = (int)(&(desc->dma32) - descbase);
B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
- addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
- addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- addr |= ring->dev->dma.translation;
+ addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
+ addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
+
ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
if (slot == ring->nr_slots - 1)
ctl |= B43_DMA32_DCTL_DTABLEEND;
@@ -170,11 +201,10 @@ static void op64_fill_descriptor(struct b43_dmaring *ring,
slot = (int)(&(desc->dma64) - descbase);
B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
- addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
- addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
- addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- addrhi |= ring->dev->dma.translation;
+ addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
+ addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
+ addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
+
if (slot == ring->nr_slots - 1)
ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
if (start)
@@ -389,33 +419,34 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
gfp_t flags = GFP_KERNEL;
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
- * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
- * has shown that 4K is sufficient for the latter as long as the buffer
- * does not cross an 8K boundary.
- *
- * For unknown reasons - possibly a hardware error - the BCM4311 rev
- * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
- * which accounts for the GFP_DMA flag below.
- *
- * The flags here must match the flags in free_ringmemory below!
+ * alignment and 8K buffers for 64-bit DMA with 8K alignment.
+ * In practice we could use smaller buffers for the latter, but the
+ * alignment is really important because of the hardware bug. If bit
+ * 0x00001000 is used in DMA address, some hardware (like BCM4331)
+ * copies that bit into B43_DMA64_RXSTATUS and we get false values from
+ * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
+ * more than 256 slots for ring.
*/
- if (ring->type == B43_DMA_64BIT)
- flags |= GFP_DMA;
+ u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
+ B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
+
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
- B43_DMA_RINGMEMSIZE,
- &(ring->dmabase), flags);
+ ring_mem_size, &(ring->dmabase),
+ flags);
if (!ring->descbase) {
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
}
- memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
+ memset(ring->descbase, 0, ring_mem_size);
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
- dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
+ u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
+ B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
+ dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
ring->descbase, ring->dmabase);
}
@@ -658,41 +689,37 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
int err = 0;
u32 value;
u32 addrext;
- u32 trans = ring->dev->dma.translation;
bool parity = ring->dev->dma.parity;
+ u32 addrlo;
+ u32 addrhi;
if (ring->tx) {
if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase);
+ addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
+ addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
+ addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
- addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
value = B43_DMA64_TXENABLE;
value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
& B43_DMA64_TXADDREXT_MASK;
if (!parity)
value |= B43_DMA64_TXPARITYDISABLE;
b43_dma_write(ring, B43_DMA64_TXCTL, value);
- b43_dma_write(ring, B43_DMA64_TXRINGLO,
- (ringbase & 0xFFFFFFFF));
- b43_dma_write(ring, B43_DMA64_TXRINGHI,
- ((ringbase >> 32) &
- ~SSB_DMA_TRANSLATION_MASK)
- | trans);
+ b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
+ b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
} else {
u32 ringbase = (u32) (ring->dmabase);
+ addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
+ addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
- addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
value = B43_DMA32_TXENABLE;
value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
& B43_DMA32_TXADDREXT_MASK;
if (!parity)
value |= B43_DMA32_TXPARITYDISABLE;
b43_dma_write(ring, B43_DMA32_TXCTL, value);
- b43_dma_write(ring, B43_DMA32_TXRING,
- (ringbase & ~SSB_DMA_TRANSLATION_MASK)
- | trans);
+ b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
}
} else {
err = alloc_initial_descbuffers(ring);
@@ -700,9 +727,10 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
goto out;
if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase);
+ addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
+ addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
+ addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
- addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
value |= B43_DMA64_RXENABLE;
value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
@@ -710,19 +738,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
if (!parity)
value |= B43_DMA64_RXPARITYDISABLE;
b43_dma_write(ring, B43_DMA64_RXCTL, value);
- b43_dma_write(ring, B43_DMA64_RXRINGLO,
- (ringbase & 0xFFFFFFFF));
- b43_dma_write(ring, B43_DMA64_RXRINGHI,
- ((ringbase >> 32) &
- ~SSB_DMA_TRANSLATION_MASK)
- | trans);
+ b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
+ b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
sizeof(struct b43_dmadesc64));
} else {
u32 ringbase = (u32) (ring->dmabase);
+ addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
+ addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
- addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
value |= B43_DMA32_RXENABLE;
value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
@@ -730,9 +754,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
if (!parity)
value |= B43_DMA32_RXPARITYDISABLE;
b43_dma_write(ring, B43_DMA32_RXCTL, value);
- b43_dma_write(ring, B43_DMA32_RXRING,
- (ringbase & ~SSB_DMA_TRANSLATION_MASK)
- | trans);
+ b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
sizeof(struct b43_dmadesc32));
}
@@ -872,8 +894,17 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring->current_slot = -1;
} else {
if (ring->index == 0) {
- ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
- ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
+ ring->frameoffset = B43_DMA0_RX_FW598_FO;
+ break;
+ case B43_FW_HDR_410:
+ case B43_FW_HDR_351:
+ ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
+ ring->frameoffset = B43_DMA0_RX_FW351_FO;
+ break;
+ }
} else
B43_WARN_ON(1);
}
@@ -1066,6 +1097,25 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
return 0;
}
+/* Some hardware with 64-bit DMA seems to be bugged and looks for translation
+ * bit in low address word instead of high one.
+ */
+static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
+ enum b43_dmatype type)
+{
+ if (type != B43_DMA_64BIT)
+ return 1;
+
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
+ !(dev->dev->sdev->bus->host_pci->is_pcie &&
+ ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
+ return 1;
+#endif
+ return 0;
+}
+
int b43_dma_init(struct b43_wldev *dev)
{
struct b43_dma *dma = &dev->dma;
@@ -1091,6 +1141,7 @@ int b43_dma_init(struct b43_wldev *dev)
break;
#endif
}
+ dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
dma->parity = true;
#ifdef CONFIG_B43_BCMA
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index cdf87094efe..315b96ed1d9 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -161,13 +161,17 @@ struct b43_dmadesc_generic {
} __packed;
/* Misc DMA constants */
-#define B43_DMA_RINGMEMSIZE PAGE_SIZE
-#define B43_DMA0_RX_FRAMEOFFSET 30
+#define B43_DMA32_RINGMEMSIZE 4096
+#define B43_DMA64_RINGMEMSIZE 8192
+/* Offset of frame with actual data */
+#define B43_DMA0_RX_FW598_FO 38
+#define B43_DMA0_RX_FW351_FO 30
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64
-#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
+#define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
+#define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
/* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
@@ -212,6 +216,12 @@ enum b43_dmatype {
B43_DMA_64BIT = 64,
};
+enum b43_addrtype {
+ B43_DMA_ADDR_LOW,
+ B43_DMA_ADDR_HIGH,
+ B43_DMA_ADDR_EXT,
+};
+
struct b43_dmaring {
/* Lowlevel DMA ops. */
const struct b43_dma_ops *ops;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e293a7921bf..7cf4125a162 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -7,6 +7,7 @@
Copyright (c) 2005-2009 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
+ Copyright (c) 2010-2011 Rafał Miłecki <zajec5@gmail.com>
SDIO support
Copyright (c) 2009 Albert Herranz <albert_herranz@yahoo.es>
@@ -37,7 +38,6 @@
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
-#include <linux/wireless.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/io.h>
@@ -65,9 +65,9 @@ MODULE_AUTHOR("Martin Langer");
MODULE_AUTHOR("Stefano Brivio");
MODULE_AUTHOR("Michael Buesch");
MODULE_AUTHOR("Gábor Stefanik");
+MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
MODULE_FIRMWARE("b43/ucode11.fw");
MODULE_FIRMWARE("b43/ucode13.fw");
MODULE_FIRMWARE("b43/ucode14.fw");
@@ -109,12 +109,13 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
-static int b43_modparam_pio = B43_PIO_DEFAULT;
+static int b43_modparam_pio = 0;
module_param_named(pio, b43_modparam_pio, int, 0644);
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
#ifdef CONFIG_B43_BCMA
static const struct bcma_device_id b43_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
@@ -320,6 +321,10 @@ static void b43_wireless_core_exit(struct b43_wldev *dev);
static int b43_wireless_core_init(struct b43_wldev *dev);
static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev);
static int b43_wireless_core_start(struct b43_wldev *dev);
+static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changed);
static int b43_ratelimit(struct b43_wl *wl)
{
@@ -724,52 +729,59 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
for (i = 0; i < 5; i++)
b43_ram_write(dev, i * 4, buffer[i]);
- b43_write16(dev, 0x0568, 0x0000);
+ b43_write16(dev, B43_MMIO_XMTSEL, 0x0000);
+
if (dev->dev->core_rev < 11)
- b43_write16(dev, 0x07C0, 0x0000);
+ b43_write16(dev, B43_MMIO_WEPCTL, 0x0000);
else
- b43_write16(dev, 0x07C0, 0x0100);
+ b43_write16(dev, B43_MMIO_WEPCTL, 0x0100);
+
value = (ofdm ? 0x41 : 0x40);
- b43_write16(dev, 0x050C, value);
- if ((phy->type == B43_PHYTYPE_N) || (phy->type == B43_PHYTYPE_LP))
- b43_write16(dev, 0x0514, 0x1A02);
- b43_write16(dev, 0x0508, 0x0000);
- b43_write16(dev, 0x050A, 0x0000);
- b43_write16(dev, 0x054C, 0x0000);
- b43_write16(dev, 0x056A, 0x0014);
- b43_write16(dev, 0x0568, 0x0826);
- b43_write16(dev, 0x0500, 0x0000);
- if (!pa_on && (phy->type == B43_PHYTYPE_N)) {
- //SPEC TODO
- }
+ b43_write16(dev, B43_MMIO_TXE0_PHYCTL, value);
+ if (phy->type == B43_PHYTYPE_N || phy->type == B43_PHYTYPE_LP ||
+ phy->type == B43_PHYTYPE_LCN)
+ b43_write16(dev, B43_MMIO_TXE0_PHYCTL1, 0x1A02);
+
+ b43_write16(dev, B43_MMIO_TXE0_WM_0, 0x0000);
+ b43_write16(dev, B43_MMIO_TXE0_WM_1, 0x0000);
+
+ b43_write16(dev, B43_MMIO_XMTTPLATETXPTR, 0x0000);
+ b43_write16(dev, B43_MMIO_XMTTXCNT, 0x0014);
+ b43_write16(dev, B43_MMIO_XMTSEL, 0x0826);
+ b43_write16(dev, B43_MMIO_TXE0_CTL, 0x0000);
+
+ if (!pa_on && phy->type == B43_PHYTYPE_N)
+ ; /*b43_nphy_pa_override(dev, false) */
switch (phy->type) {
case B43_PHYTYPE_N:
- b43_write16(dev, 0x0502, 0x00D0);
+ case B43_PHYTYPE_LCN:
+ b43_write16(dev, B43_MMIO_TXE0_AUX, 0x00D0);
break;
case B43_PHYTYPE_LP:
- b43_write16(dev, 0x0502, 0x0050);
+ b43_write16(dev, B43_MMIO_TXE0_AUX, 0x0050);
break;
default:
- b43_write16(dev, 0x0502, 0x0030);
+ b43_write16(dev, B43_MMIO_TXE0_AUX, 0x0030);
}
+ b43_read16(dev, B43_MMIO_TXE0_AUX);
if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5)
b43_radio_write16(dev, 0x0051, 0x0017);
for (i = 0x00; i < max_loop; i++) {
- value = b43_read16(dev, 0x050E);
+ value = b43_read16(dev, B43_MMIO_TXE0_STATUS);
if (value & 0x0080)
break;
udelay(10);
}
for (i = 0x00; i < 0x0A; i++) {
- value = b43_read16(dev, 0x050E);
+ value = b43_read16(dev, B43_MMIO_TXE0_STATUS);
if (value & 0x0400)
break;
udelay(10);
}
for (i = 0x00; i < 0x19; i++) {
- value = b43_read16(dev, 0x0690);
+ value = b43_read16(dev, B43_MMIO_IFSSTAT);
if (!(value & 0x0100))
break;
udelay(10);
@@ -2511,6 +2523,12 @@ static int b43_upload_microcode(struct b43_wldev *dev)
}
dev->fw.rev = fwrev;
dev->fw.patch = fwpatch;
+ if (dev->fw.rev >= 598)
+ dev->fw.hdr_format = B43_FW_HDR_598;
+ else if (dev->fw.rev >= 410)
+ dev->fw.hdr_format = B43_FW_HDR_410;
+ else
+ dev->fw.hdr_format = B43_FW_HDR_351;
dev->fw.opensource = (fwdate == 0xFFFF);
/* Default to use-all-queues. */
@@ -2558,7 +2576,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->fw.rev, dev->fw.patch);
wiphy->hw_version = dev->dev->core_id;
- if (b43_is_old_txhdr_format(dev)) {
+ if (dev->fw.hdr_format == B43_FW_HDR_351) {
/* We're over the deadline, but we keep support for old fw
* until it turns out to be in major conflict with something new. */
b43warn(dev->wl, "You are using an old firmware image. "
@@ -2944,6 +2962,8 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
case B43_PHYTYPE_G:
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
+ case B43_PHYTYPE_HT:
+ case B43_PHYTYPE_LCN:
b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1);
@@ -3539,7 +3559,8 @@ static void b43_qos_init(struct b43_wldev *dev)
b43dbg(dev->wl, "QoS enabled\n");
}
-static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
+static int b43_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 _queue,
const struct ieee80211_tx_queue_params *params)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3586,7 +3607,7 @@ static int b43_op_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static u64 b43_op_get_tsf(struct ieee80211_hw *hw)
+static u64 b43_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -3605,7 +3626,8 @@ static u64 b43_op_get_tsf(struct ieee80211_hw *hw)
return tsf;
}
-static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+static void b43_op_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -3779,14 +3801,24 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
struct ieee80211_conf *conf = &hw->conf;
int antenna;
int err = 0;
+ bool reload_bss = false;
mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+
/* Switch the band (if necessary). This might change the active core. */
err = b43_switch_band(wl, conf->channel);
if (err)
goto out_unlock_mutex;
- dev = wl->current_dev;
+
+ /* Need to reload all settings if the core changed */
+ if (dev != wl->current_dev) {
+ dev = wl->current_dev;
+ changed = ~0;
+ reload_bss = true;
+ }
+
phy = &dev->phy;
if (conf_is_ht(conf))
@@ -3847,6 +3879,9 @@ out_mac_enable:
out_unlock_mutex:
mutex_unlock(&wl->mutex);
+ if (wl->vif && reload_bss)
+ b43_op_bss_info_changed(hw, wl->vif, &wl->vif->bss_conf, ~0);
+
return err;
}
@@ -3935,7 +3970,8 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT &&
(b43_is_mode(wl, NL80211_IFTYPE_AP) ||
b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) ||
- b43_is_mode(wl, NL80211_IFTYPE_ADHOC)))
+ b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) &&
+ conf->beacon_int)
b43_set_beacon_int(dev, conf->beacon_int);
if (changed & BSS_CHANGED_BASIC_RATES)
@@ -4108,10 +4144,13 @@ out_unlock:
* because the core might be gone away while we unlocked the mutex. */
static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
{
- struct b43_wl *wl = dev->wl;
+ struct b43_wl *wl;
struct b43_wldev *orig_dev;
u32 mask;
+ if (!dev)
+ return NULL;
+ wl = dev->wl;
redo:
if (!dev || b43_status(dev) < B43_STAT_STARTED)
return dev;
@@ -4630,8 +4669,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
if (b43_bus_host_is_pcmcia(dev->dev) ||
- b43_bus_host_is_sdio(dev->dev) ||
- dev->use_pio) {
+ b43_bus_host_is_sdio(dev->dev)) {
+ dev->__using_pio_transfers = 1;
+ err = b43_pio_init(dev);
+ } else if (dev->use_pio) {
+ b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
+ "This should not be needed and will result in lower "
+ "performance.\n");
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4703,6 +4747,9 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
out_mutex_unlock:
mutex_unlock(&wl->mutex);
+ if (err == 0)
+ b43_op_bss_info_changed(hw, vif, &vif->bss_conf, ~0);
+
return err;
}
@@ -4773,6 +4820,9 @@ static int b43_op_start(struct ieee80211_hw *hw)
out_mutex_unlock:
mutex_unlock(&wl->mutex);
+ /* reload configuration */
+ b43_op_config(hw, ~0);
+
return err;
}
@@ -4929,10 +4979,18 @@ out:
if (err)
wl->current_dev = NULL; /* Failed to init the dev. */
mutex_unlock(&wl->mutex);
- if (err)
+
+ if (err) {
b43err(wl, "Controller restart FAILED\n");
- else
- b43info(wl, "Controller restarted\n");
+ return;
+ }
+
+ /* reload configuration */
+ b43_op_config(wl->hw, ~0);
+ if (wl->vif)
+ b43_op_bss_info_changed(wl->hw, wl->vif, &wl->vif->bss_conf, ~0);
+
+ b43info(wl, "Controller restarted\n");
}
static int b43_setup_bands(struct b43_wldev *dev,
@@ -5417,8 +5475,7 @@ static void b43_print_driverinfo(void)
feat_sdio = "S";
#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
- "[ Features: %s%s%s%s%s, Firmware-ID: "
- B43_SUPPORTED_FIRMWARE_ID " ]\n",
+ "[ Features: %s%s%s%s%s ]\n",
feat_pci, feat_pcmcia, feat_nphy,
feat_leds, feat_sdio);
}
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 07f009ff5ee..3ea44bb0368 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -448,6 +448,38 @@ bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type)
channel_type == NL80211_CHAN_HT40PLUS);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
+void b43_phy_force_clock(struct b43_wldev *dev, bool force)
+{
+ u32 tmp;
+
+ WARN_ON(dev->phy.type != B43_PHYTYPE_N &&
+ dev->phy.type != B43_PHYTYPE_HT);
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ if (force)
+ tmp |= BCMA_IOCTL_FGC;
+ else
+ tmp &= ~BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ if (force)
+ tmp |= SSB_TMSLOW_FGC;
+ else
+ tmp &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ break;
+#endif
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
struct b43_c32 b43_cordic(int theta)
{
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index aa77ba612a9..9233b13fc16 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -444,6 +444,8 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type);
+void b43_phy_force_clock(struct b43_wldev *dev, bool force);
+
struct b43_c32 b43_cordic(int theta);
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c
index 7c40919651a..7416c5e9154 100644
--- a/drivers/net/wireless/b43/phy_ht.c
+++ b/drivers/net/wireless/b43/phy_ht.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
IEEE 802.11n HT-PHY support
+ Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -152,6 +154,92 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
}
/**************************************************
+ * Various PHY ops
+ **************************************************/
+
+static void b43_phy_ht_zero_extg(struct b43_wldev *dev)
+{
+ u8 i, j;
+ u16 base[] = { 0x40, 0x60, 0x80 };
+
+ for (i = 0; i < ARRAY_SIZE(base); i++) {
+ for (j = 0; j < 4; j++)
+ b43_phy_write(dev, B43_PHY_EXTG(base[i] + j), 0);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(base); i++)
+ b43_phy_write(dev, B43_PHY_EXTG(base[i] + 0xc), 0);
+}
+
+/* Some unknown AFE (Analog Frondned) op */
+static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
+{
+ u8 i;
+
+ const u16 ctl_regs[3][2] = {
+ { B43_PHY_HT_AFE_CTL1, B43_PHY_HT_AFE_CTL2 },
+ { B43_PHY_HT_AFE_CTL3, B43_PHY_HT_AFE_CTL4 },
+ { B43_PHY_HT_AFE_CTL5, B43_PHY_HT_AFE_CTL6},
+ };
+
+ for (i = 0; i < 3; i++) {
+ /* TODO: verify masks&sets */
+ b43_phy_set(dev, ctl_regs[i][1], 0x4);
+ b43_phy_set(dev, ctl_regs[i][0], 0x4);
+ b43_phy_mask(dev, ctl_regs[i][1], ~0x1);
+ b43_phy_set(dev, ctl_regs[i][0], 0x1);
+ b43_httab_write(dev, B43_HTTAB16(8, 5 + (i * 0x10)), 0);
+ b43_phy_mask(dev, ctl_regs[i][0], ~0x4);
+ }
+}
+
+static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
+{
+ u8 i;
+
+ u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
+ b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
+
+ b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
+ for (i = 0; i < 200; i++) {
+ if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
+ i = 0;
+ break;
+ }
+ msleep(1);
+ }
+ if (i)
+ b43err(dev->wl, "Forcing RF sequence timeout\n");
+
+ b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
+}
+
+static void b43_phy_ht_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_PHY_HT_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_PHY_HT_C2_CLIP1THRES);
+ clip_st[2] = b43_phy_read(dev, B43_PHY_HT_C3_CLIP1THRES);
+}
+
+static void b43_phy_ht_bphy_init(struct b43_wldev *dev)
+{
+ unsigned int i;
+ u16 val;
+
+ val = 0x1E1F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
+ val -= 0x202;
+ }
+ val = 0x3E3F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
+ val -= 0x202;
+ }
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
+}
+
+/**************************************************
* Channel switching ops.
**************************************************/
@@ -255,8 +343,125 @@ static void b43_phy_ht_op_prepare_structs(struct b43_wldev *dev)
static int b43_phy_ht_op_init(struct b43_wldev *dev)
{
+ u16 tmp;
+ u16 clip_state[3];
+
b43_phy_ht_tables_init(dev);
+ b43_phy_mask(dev, 0x0be, ~0x2);
+ b43_phy_set(dev, 0x23f, 0x7ff);
+ b43_phy_set(dev, 0x240, 0x7ff);
+ b43_phy_set(dev, 0x241, 0x7ff);
+
+ b43_phy_ht_zero_extg(dev);
+
+ b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3);
+
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0);
+
+ b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20);
+ b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20);
+ b43_phy_write(dev, 0x20d, 0xb8);
+ b43_phy_write(dev, B43_PHY_EXTG(0x14f), 0xc8);
+ b43_phy_write(dev, 0x70, 0x50);
+ b43_phy_write(dev, 0x1ff, 0x30);
+
+ if (0) /* TODO: condition */
+ ; /* TODO: PHY op on reg 0x217 */
+
+ b43_phy_read(dev, 0xb0); /* TODO: what for? */
+ b43_phy_set(dev, 0xb0, 0x1);
+
+ b43_phy_set(dev, 0xb1, 0x91);
+ b43_phy_write(dev, 0x32f, 0x0003);
+ b43_phy_write(dev, 0x077, 0x0010);
+ b43_phy_write(dev, 0x0b4, 0x0258);
+ b43_phy_mask(dev, 0x17e, ~0x4000);
+
+ b43_phy_write(dev, 0x0b9, 0x0072);
+
+ b43_httab_write_few(dev, B43_HTTAB16(7, 0x14e), 2, 0x010f, 0x010f);
+ b43_httab_write_few(dev, B43_HTTAB16(7, 0x15e), 2, 0x010f, 0x010f);
+ b43_httab_write_few(dev, B43_HTTAB16(7, 0x16e), 2, 0x010f, 0x010f);
+
+ b43_phy_ht_afe_unk1(dev);
+
+ b43_httab_write_few(dev, B43_HTTAB16(7, 0x130), 9, 0x777, 0x111, 0x111,
+ 0x777, 0x111, 0x111, 0x777, 0x111, 0x111);
+
+ b43_httab_write(dev, B43_HTTAB16(7, 0x120), 0x0777);
+ b43_httab_write(dev, B43_HTTAB16(7, 0x124), 0x0777);
+
+ b43_httab_write(dev, B43_HTTAB16(8, 0x00), 0x02);
+ b43_httab_write(dev, B43_HTTAB16(8, 0x10), 0x02);
+ b43_httab_write(dev, B43_HTTAB16(8, 0x20), 0x02);
+
+ b43_httab_write_few(dev, B43_HTTAB16(8, 0x08), 4,
+ 0x8e, 0x96, 0x96, 0x96);
+ b43_httab_write_few(dev, B43_HTTAB16(8, 0x18), 4,
+ 0x8f, 0x9f, 0x9f, 0x9f);
+ b43_httab_write_few(dev, B43_HTTAB16(8, 0x28), 4,
+ 0x8f, 0x9f, 0x9f, 0x9f);
+
+ b43_httab_write_few(dev, B43_HTTAB16(8, 0x0c), 4, 0x2, 0x2, 0x2, 0x2);
+ b43_httab_write_few(dev, B43_HTTAB16(8, 0x1c), 4, 0x2, 0x2, 0x2, 0x2);
+ b43_httab_write_few(dev, B43_HTTAB16(8, 0x2c), 4, 0x2, 0x2, 0x2, 0x2);
+
+ b43_phy_maskset(dev, 0x0280, 0xff00, 0x3e);
+ b43_phy_maskset(dev, 0x0283, 0xff00, 0x3e);
+ b43_phy_maskset(dev, B43_PHY_OFDM(0x0141), 0xff00, 0x46);
+ b43_phy_maskset(dev, 0x0283, 0xff00, 0x40);
+
+ b43_httab_write_few(dev, B43_HTTAB16(00, 0x8), 4,
+ 0x09, 0x0e, 0x13, 0x18);
+ b43_httab_write_few(dev, B43_HTTAB16(01, 0x8), 4,
+ 0x09, 0x0e, 0x13, 0x18);
+ /* TODO: Did wl mean 2 instead of 40? */
+ b43_httab_write_few(dev, B43_HTTAB16(40, 0x8), 4,
+ 0x09, 0x0e, 0x13, 0x18);
+
+ b43_phy_maskset(dev, B43_PHY_OFDM(0x24), 0x3f, 0xd);
+ b43_phy_maskset(dev, B43_PHY_OFDM(0x64), 0x3f, 0xd);
+ b43_phy_maskset(dev, B43_PHY_OFDM(0xa4), 0x3f, 0xd);
+
+ b43_phy_set(dev, B43_PHY_EXTG(0x060), 0x1);
+ b43_phy_set(dev, B43_PHY_EXTG(0x064), 0x1);
+ b43_phy_set(dev, B43_PHY_EXTG(0x080), 0x1);
+ b43_phy_set(dev, B43_PHY_EXTG(0x084), 0x1);
+
+ /* Copy some tables entries */
+ tmp = b43_httab_read(dev, B43_HTTAB16(7, 0x144));
+ b43_httab_write(dev, B43_HTTAB16(7, 0x14a), tmp);
+ tmp = b43_httab_read(dev, B43_HTTAB16(7, 0x154));
+ b43_httab_write(dev, B43_HTTAB16(7, 0x15a), tmp);
+ tmp = b43_httab_read(dev, B43_HTTAB16(7, 0x164));
+ b43_httab_write(dev, B43_HTTAB16(7, 0x16a), tmp);
+
+ /* Reset CCA */
+ b43_phy_force_clock(dev, true);
+ tmp = b43_phy_read(dev, B43_PHY_HT_BBCFG);
+ b43_phy_write(dev, B43_PHY_HT_BBCFG, tmp | B43_PHY_HT_BBCFG_RSTCCA);
+ b43_phy_write(dev, B43_PHY_HT_BBCFG, tmp & ~B43_PHY_HT_BBCFG_RSTCCA);
+ b43_phy_force_clock(dev, false);
+
+ b43_mac_phy_clock_set(dev, true);
+
+ b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX);
+ b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
+
+ /* TODO: PHY op on reg 0xb0 */
+
+ /* TODO: Should we restore it? Or store it in global PHY info? */
+ b43_phy_ht_read_clip_detection(dev, clip_state);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_ht_bphy_init(dev);
+
+ b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
+ B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late);
+
return 0;
}
diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/b43/phy_ht.h
index 7ad7affc8df..6544c4293b3 100644
--- a/drivers/net/wireless/b43/phy_ht.h
+++ b/drivers/net/wireless/b43/phy_ht.h
@@ -4,7 +4,11 @@
#include "phy_common.h"
+#define B43_PHY_HT_BBCFG 0x001 /* BB config */
+#define B43_PHY_HT_BBCFG_RSTCCA 0x4000 /* Reset CCA */
+#define B43_PHY_HT_BBCFG_RSTRX 0x8000 /* Reset RX */
#define B43_PHY_HT_BANDCTL 0x009 /* Band control */
+#define B43_PHY_HT_BANDCTL_5GHZ 0x0001 /* Use the 5GHz band */
#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */
#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */
#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */
@@ -15,6 +19,21 @@
#define B43_PHY_HT_BW5 0x1D2
#define B43_PHY_HT_BW6 0x1D3
+#define B43_PHY_HT_C1_CLIP1THRES B43_PHY_OFDM(0x00E)
+#define B43_PHY_HT_C2_CLIP1THRES B43_PHY_OFDM(0x04E)
+#define B43_PHY_HT_C3_CLIP1THRES B43_PHY_OFDM(0x08E)
+
+#define B43_PHY_HT_RF_SEQ_MODE B43_PHY_EXTG(0x000)
+#define B43_PHY_HT_RF_SEQ_TRIG B43_PHY_EXTG(0x003)
+#define B43_PHY_HT_RF_SEQ_TRIG_RX2TX 0x0001 /* RX2TX */
+#define B43_PHY_HT_RF_SEQ_TRIG_TX2RX 0x0002 /* TX2RX */
+#define B43_PHY_HT_RF_SEQ_TRIG_UPGH 0x0004 /* Update gain H */
+#define B43_PHY_HT_RF_SEQ_TRIG_UPGL 0x0008 /* Update gain L */
+#define B43_PHY_HT_RF_SEQ_TRIG_UPGU 0x0010 /* Update gain U */
+#define B43_PHY_HT_RF_SEQ_TRIG_RST2RX 0x0020 /* Reset to RX */
+#define B43_PHY_HT_RF_SEQ_STATUS B43_PHY_EXTG(0x004)
+/* Values for the status are the same as for the trigger */
+
#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010)
#define B43_PHY_HT_AFE_CTL1 B43_PHY_EXTG(0x110)
diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/b43/phy_lcn.c
index 9f7dbbd5ced..a13e28ef624 100644
--- a/drivers/net/wireless/b43/phy_lcn.c
+++ b/drivers/net/wireless/b43/phy_lcn.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
IEEE 802.11n LCN-PHY support
+ Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -18,6 +20,14 @@
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
+ This file incorporates work covered by the following copyright and
+ permission notice:
+
+ Copyright (c) 2010 Broadcom Corporation
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
*/
#include <linux/slab.h>
@@ -27,12 +37,852 @@
#include "tables_phy_lcn.h"
#include "main.h"
+struct lcn_tx_gains {
+ u16 gm_gain;
+ u16 pga_gain;
+ u16 pad_gain;
+ u16 dac_gain;
+};
+
+struct lcn_tx_iir_filter {
+ u8 type;
+ u16 values[16];
+};
+
+enum lcn_sense_type {
+ B43_SENSE_TEMP,
+ B43_SENSE_VBAT,
+};
+
+/* In theory it's PHY common function, move if needed */
+/* brcms_b_switch_macfreq */
+static void b43_phy_switch_macfreq(struct b43_wldev *dev, u8 spurmode)
+{
+ if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+ switch (spurmode) {
+ case 2: /* 126 Mhz */
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x2082);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ break;
+ case 1: /* 123 Mhz */
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x5341);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ break;
+ default: /* 120 Mhz */
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x8889);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ break;
+ }
+ } else if (dev->phy.type == B43_PHYTYPE_LCN) {
+ switch (spurmode) {
+ case 1: /* 82 Mhz */
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x7CE0);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0xC);
+ break;
+ default: /* 80 Mhz */
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0xCCCD);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0xC);
+ break;
+ }
+ }
+}
+
+/**************************************************
+ * Radio 2064.
+ **************************************************/
+
+/* wlc_lcnphy_radio_2064_channel_tune_4313 */
+static void b43_radio_2064_channel_setup(struct b43_wldev *dev)
+{
+ u16 save[2];
+
+ b43_radio_set(dev, 0x09d, 0x4);
+ b43_radio_write(dev, 0x09e, 0xf);
+
+ /* Channel specific values in theory, in practice always the same */
+ b43_radio_write(dev, 0x02a, 0xb);
+ b43_radio_maskset(dev, 0x030, ~0x3, 0xa);
+ b43_radio_maskset(dev, 0x091, ~0x3, 0);
+ b43_radio_maskset(dev, 0x038, ~0xf, 0x7);
+ b43_radio_maskset(dev, 0x030, ~0xc, 0x8);
+ b43_radio_maskset(dev, 0x05e, ~0xf, 0x8);
+ b43_radio_maskset(dev, 0x05e, ~0xf0, 0x80);
+ b43_radio_write(dev, 0x06c, 0x80);
+
+ save[0] = b43_radio_read(dev, 0x044);
+ save[1] = b43_radio_read(dev, 0x12b);
+
+ b43_radio_set(dev, 0x044, 0x7);
+ b43_radio_set(dev, 0x12b, 0xe);
+
+ /* TODO */
+
+ b43_radio_write(dev, 0x040, 0xfb);
+
+ b43_radio_write(dev, 0x041, 0x9a);
+ b43_radio_write(dev, 0x042, 0xa3);
+ b43_radio_write(dev, 0x043, 0x0c);
+
+ /* TODO */
+
+ b43_radio_set(dev, 0x044, 0x0c);
+ udelay(1);
+
+ b43_radio_write(dev, 0x044, save[0]);
+ b43_radio_write(dev, 0x12b, save[1]);
+
+ if (dev->phy.rev == 1) {
+ /* brcmsmac uses outdated 0x3 for 0x038 */
+ b43_radio_write(dev, 0x038, 0x0);
+ b43_radio_write(dev, 0x091, 0x7);
+ }
+}
+
+/* wlc_radio_2064_init */
+static void b43_radio_2064_init(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, 0x09c, 0x0020);
+ b43_radio_write(dev, 0x105, 0x0008);
+ } else {
+ /* TODO */
+ }
+ b43_radio_write(dev, 0x032, 0x0062);
+ b43_radio_write(dev, 0x033, 0x0019);
+ b43_radio_write(dev, 0x090, 0x0010);
+ b43_radio_write(dev, 0x010, 0x0000);
+ if (dev->phy.rev == 1) {
+ b43_radio_write(dev, 0x060, 0x007f);
+ b43_radio_write(dev, 0x061, 0x0072);
+ b43_radio_write(dev, 0x062, 0x007f);
+ }
+ b43_radio_write(dev, 0x01d, 0x0002);
+ b43_radio_write(dev, 0x01e, 0x0006);
+
+ b43_phy_write(dev, 0x4ea, 0x4688);
+ b43_phy_maskset(dev, 0x4eb, ~0x7, 0x2);
+ b43_phy_mask(dev, 0x4eb, ~0x01c0);
+ b43_phy_maskset(dev, 0x46a, 0xff00, 0x19);
+
+ b43_lcntab_write(dev, B43_LCNTAB16(0x00, 0x55), 0);
+
+ b43_radio_mask(dev, 0x05b, (u16) ~0xff02);
+ b43_radio_set(dev, 0x004, 0x40);
+ b43_radio_set(dev, 0x120, 0x10);
+ b43_radio_set(dev, 0x078, 0x80);
+ b43_radio_set(dev, 0x129, 0x2);
+ b43_radio_set(dev, 0x057, 0x1);
+ b43_radio_set(dev, 0x05b, 0x2);
+
+ /* TODO: wait for some bit to be set */
+ b43_radio_read(dev, 0x05c);
+
+ b43_radio_mask(dev, 0x05b, (u16) ~0xff02);
+ b43_radio_mask(dev, 0x057, (u16) ~0xff01);
+
+ b43_phy_write(dev, 0x933, 0x2d6b);
+ b43_phy_write(dev, 0x934, 0x2d6b);
+ b43_phy_write(dev, 0x935, 0x2d6b);
+ b43_phy_write(dev, 0x936, 0x2d6b);
+ b43_phy_write(dev, 0x937, 0x016b);
+
+ b43_radio_mask(dev, 0x057, (u16) ~0xff02);
+ b43_radio_write(dev, 0x0c2, 0x006f);
+}
+
+/**************************************************
+ * Various PHY ops
+ **************************************************/
+
+/* wlc_lcnphy_toggle_afe_pwdn */
+static void b43_phy_lcn_afe_set_unset(struct b43_wldev *dev)
+{
+ u16 afe_ctl2 = b43_phy_read(dev, B43_PHY_LCN_AFE_CTL2);
+ u16 afe_ctl1 = b43_phy_read(dev, B43_PHY_LCN_AFE_CTL1);
+
+ b43_phy_write(dev, B43_PHY_LCN_AFE_CTL2, afe_ctl2 | 0x1);
+ b43_phy_write(dev, B43_PHY_LCN_AFE_CTL1, afe_ctl1 | 0x1);
+
+ b43_phy_write(dev, B43_PHY_LCN_AFE_CTL2, afe_ctl2 & ~0x1);
+ b43_phy_write(dev, B43_PHY_LCN_AFE_CTL1, afe_ctl1 & ~0x1);
+
+ b43_phy_write(dev, B43_PHY_LCN_AFE_CTL2, afe_ctl2);
+ b43_phy_write(dev, B43_PHY_LCN_AFE_CTL1, afe_ctl1);
+}
+
+/* wlc_lcnphy_get_pa_gain */
+static u16 b43_phy_lcn_get_pa_gain(struct b43_wldev *dev)
+{
+ return (b43_phy_read(dev, 0x4fb) & 0x7f00) >> 8;
+}
+
+/* wlc_lcnphy_set_dac_gain */
+static void b43_phy_lcn_set_dac_gain(struct b43_wldev *dev, u16 dac_gain)
+{
+ u16 dac_ctrl;
+
+ dac_ctrl = b43_phy_read(dev, 0x439);
+ dac_ctrl = dac_ctrl & 0xc7f;
+ dac_ctrl = dac_ctrl | (dac_gain << 7);
+ b43_phy_maskset(dev, 0x439, ~0xfff, dac_ctrl);
+}
+
+/* wlc_lcnphy_set_bbmult */
+static void b43_phy_lcn_set_bbmult(struct b43_wldev *dev, u8 m0)
+{
+ b43_lcntab_write(dev, B43_LCNTAB16(0x00, 0x57), m0 << 8);
+}
+
+/* wlc_lcnphy_clear_tx_power_offsets */
+static void b43_phy_lcn_clear_tx_power_offsets(struct b43_wldev *dev)
+{
+ u8 i;
+
+ if (1) { /* FIXME */
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, (0x7 << 10) | 0x340);
+ for (i = 0; i < 30; i++) {
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, 0);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, 0);
+ }
+ }
+
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, (0x7 << 10) | 0x80);
+ for (i = 0; i < 64; i++) {
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, 0);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, 0);
+ }
+}
+
+/* wlc_lcnphy_rev0_baseband_init */
+static void b43_phy_lcn_rev0_baseband_init(struct b43_wldev *dev)
+{
+ b43_radio_write(dev, 0x11c, 0);
+
+ b43_phy_write(dev, 0x43b, 0);
+ b43_phy_write(dev, 0x43c, 0);
+ b43_phy_write(dev, 0x44c, 0);
+ b43_phy_write(dev, 0x4e6, 0);
+ b43_phy_write(dev, 0x4f9, 0);
+ b43_phy_write(dev, 0x4b0, 0);
+ b43_phy_write(dev, 0x938, 0);
+ b43_phy_write(dev, 0x4b0, 0);
+ b43_phy_write(dev, 0x44e, 0);
+
+ b43_phy_set(dev, 0x567, 0x03);
+
+ b43_phy_set(dev, 0x44a, 0x44);
+ b43_phy_write(dev, 0x44a, 0x80);
+
+ if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_FEM))
+ ; /* TODO */
+ b43_phy_maskset(dev, 0x634, ~0xff, 0xc);
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_FEM) {
+ b43_phy_maskset(dev, 0x634, ~0xff, 0xa);
+ b43_phy_write(dev, 0x910, 0x1);
+ }
+
+ b43_phy_write(dev, 0x910, 0x1);
+
+ b43_phy_maskset(dev, 0x448, ~0x300, 0x100);
+ b43_phy_maskset(dev, 0x608, ~0xff, 0x17);
+ b43_phy_maskset(dev, 0x604, ~0x7ff, 0x3ea);
+}
+
+/* wlc_lcnphy_bu_tweaks */
+static void b43_phy_lcn_bu_tweaks(struct b43_wldev *dev)
+{
+ b43_phy_set(dev, 0x805, 0x1);
+
+ b43_phy_maskset(dev, 0x42f, ~0x7, 0x3);
+ b43_phy_maskset(dev, 0x030, ~0x7, 0x3);
+
+ b43_phy_write(dev, 0x414, 0x1e10);
+ b43_phy_write(dev, 0x415, 0x0640);
+
+ b43_phy_maskset(dev, 0x4df, (u16) ~0xff00, 0xf700);
+
+ b43_phy_set(dev, 0x44a, 0x44);
+ b43_phy_write(dev, 0x44a, 0x80);
+
+ b43_phy_maskset(dev, 0x434, ~0xff, 0xfd);
+ b43_phy_maskset(dev, 0x420, ~0xff, 0x10);
+
+ if (dev->dev->bus_sprom->board_rev >= 0x1204)
+ b43_radio_set(dev, 0x09b, 0xf0);
+
+ b43_phy_write(dev, 0x7d6, 0x0902);
+
+ b43_phy_maskset(dev, 0x429, ~0xf, 0x9);
+ b43_phy_maskset(dev, 0x429, ~(0x3f << 4), 0xe << 4);
+
+ if (dev->phy.rev == 1) {
+ b43_phy_maskset(dev, 0x423, ~0xff, 0x46);
+ b43_phy_maskset(dev, 0x411, ~0xff, 1);
+ b43_phy_set(dev, 0x434, 0xff); /* FIXME: update to wl */
+
+ /* TODO: wl operates on PHY 0x416, brcmsmac is outdated here */
+
+ b43_phy_maskset(dev, 0x656, ~0xf, 2);
+ b43_phy_set(dev, 0x44d, 4);
+
+ b43_radio_set(dev, 0x0f7, 0x4);
+ b43_radio_mask(dev, 0x0f1, ~0x3);
+ b43_radio_maskset(dev, 0x0f2, ~0xf8, 0x90);
+ b43_radio_maskset(dev, 0x0f3, ~0x3, 0x2);
+ b43_radio_maskset(dev, 0x0f3, ~0xf0, 0xa0);
+
+ b43_radio_set(dev, 0x11f, 0x2);
+
+ b43_phy_lcn_clear_tx_power_offsets(dev);
+
+ /* TODO: something more? */
+ }
+}
+
+/* wlc_lcnphy_vbat_temp_sense_setup */
+static void b43_phy_lcn_sense_setup(struct b43_wldev *dev,
+ enum lcn_sense_type sense_type)
+{
+ u8 auxpga_vmidcourse, auxpga_vmidfine, auxpga_gain;
+ u16 auxpga_vmid;
+ u8 tx_pwr_idx;
+ u8 i;
+
+ u16 save_radio_regs[6][2] = {
+ { 0x007, 0 }, { 0x0ff, 0 }, { 0x11f, 0 }, { 0x005, 0 },
+ { 0x025, 0 }, { 0x112, 0 },
+ };
+ u16 save_phy_regs[14][2] = {
+ { 0x503, 0 }, { 0x4a4, 0 }, { 0x4d0, 0 }, { 0x4d9, 0 },
+ { 0x4da, 0 }, { 0x4a6, 0 }, { 0x938, 0 }, { 0x939, 0 },
+ { 0x4d8, 0 }, { 0x4d0, 0 }, { 0x4d7, 0 }, { 0x4a5, 0 },
+ { 0x40d, 0 }, { 0x4a2, 0 },
+ };
+ u16 save_radio_4a4;
+
+ msleep(1);
+
+ /* Save */
+ for (i = 0; i < 6; i++)
+ save_radio_regs[i][1] = b43_radio_read(dev,
+ save_radio_regs[i][0]);
+ for (i = 0; i < 14; i++)
+ save_phy_regs[i][1] = b43_phy_read(dev, save_phy_regs[i][0]);
+ b43_mac_suspend(dev);
+ save_radio_4a4 = b43_radio_read(dev, 0x4a4);
+ /* wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); */
+ tx_pwr_idx = dev->phy.lcn->tx_pwr_curr_idx;
+
+ /* Setup */
+ /* TODO: wlc_lcnphy_set_tx_pwr_by_index(pi, 127); */
+ b43_radio_set(dev, 0x007, 0x1);
+ b43_radio_set(dev, 0x0ff, 0x10);
+ b43_radio_set(dev, 0x11f, 0x4);
+
+ b43_phy_mask(dev, 0x503, ~0x1);
+ b43_phy_mask(dev, 0x503, ~0x4);
+ b43_phy_mask(dev, 0x4a4, ~0x4000);
+ b43_phy_mask(dev, 0x4a4, (u16) ~0x8000);
+ b43_phy_mask(dev, 0x4d0, ~0x20);
+ b43_phy_set(dev, 0x4a5, 0xff);
+ b43_phy_maskset(dev, 0x4a5, ~0x7000, 0x5000);
+ b43_phy_mask(dev, 0x4a5, ~0x700);
+ b43_phy_maskset(dev, 0x40d, ~0xff, 64);
+ b43_phy_maskset(dev, 0x40d, ~0x700, 0x600);
+ b43_phy_maskset(dev, 0x4a2, ~0xff, 64);
+ b43_phy_maskset(dev, 0x4a2, ~0x700, 0x600);
+ b43_phy_maskset(dev, 0x4d9, ~0x70, 0x20);
+ b43_phy_maskset(dev, 0x4d9, ~0x700, 0x300);
+ b43_phy_maskset(dev, 0x4d9, ~0x7000, 0x1000);
+ b43_phy_mask(dev, 0x4da, ~0x1000);
+ b43_phy_set(dev, 0x4da, 0x2000);
+ b43_phy_set(dev, 0x4a6, 0x8000);
+
+ b43_radio_write(dev, 0x025, 0xc);
+ b43_radio_set(dev, 0x005, 0x8);
+ b43_phy_set(dev, 0x938, 0x4);
+ b43_phy_set(dev, 0x939, 0x4);
+ b43_phy_set(dev, 0x4a4, 0x1000);
+
+ /* FIXME: don't hardcode */
+ b43_lcntab_write(dev, B43_LCNTAB16(0x8, 0x6), 0x640);
+
+ switch (sense_type) {
+ case B43_SENSE_TEMP:
+ b43_phy_set(dev, 0x4d7, 0x8);
+ b43_phy_maskset(dev, 0x4d7, ~0x7000, 0x1000);
+ auxpga_vmidcourse = 8;
+ auxpga_vmidfine = 0x4;
+ auxpga_gain = 2;
+ b43_radio_set(dev, 0x082, 0x20);
+ break;
+ case B43_SENSE_VBAT:
+ b43_phy_set(dev, 0x4d7, 0x8);
+ b43_phy_maskset(dev, 0x4d7, ~0x7000, 0x3000);
+ auxpga_vmidcourse = 7;
+ auxpga_vmidfine = 0xa;
+ auxpga_gain = 2;
+ break;
+ }
+ auxpga_vmid = (0x200 | (auxpga_vmidcourse << 4) | auxpga_vmidfine);
+
+ b43_phy_set(dev, 0x4d8, 0x1);
+ b43_phy_maskset(dev, 0x4d8, ~(0x3ff << 2), auxpga_vmid << 2);
+ b43_phy_set(dev, 0x4d8, 0x2);
+ b43_phy_maskset(dev, 0x4d8, ~(0x7 << 12), auxpga_gain << 12);
+ b43_phy_set(dev, 0x4d0, 0x20);
+ b43_radio_write(dev, 0x112, 0x6);
+
+ b43_dummy_transmission(dev, true, false);
+ /* Wait if not done */
+ if (!(b43_phy_read(dev, 0x476) & 0x8000))
+ udelay(10);
+
+ /* Restore */
+ for (i = 0; i < 6; i++)
+ b43_radio_write(dev, save_radio_regs[i][0],
+ save_radio_regs[i][1]);
+ for (i = 0; i < 14; i++)
+ b43_phy_write(dev, save_phy_regs[i][0], save_phy_regs[i][1]);
+ /* TODO: wlc_lcnphy_set_tx_pwr_by_index(tx_pwr_idx) */
+ b43_radio_write(dev, 0x4a4, save_radio_4a4);
+
+ b43_mac_enable(dev);
+
+ msleep(1);
+}
+
+static bool b43_phy_lcn_load_tx_iir_cck_filter(struct b43_wldev *dev,
+ u8 filter_type)
+{
+ int i, j;
+ u16 phy_regs[] = { 0x910, 0x91e, 0x91f, 0x924, 0x925, 0x926, 0x920,
+ 0x921, 0x927, 0x928, 0x929, 0x922, 0x923, 0x930,
+ 0x931, 0x932 };
+ /* Table is from brcmsmac, values for type 25 were outdated, probably
+ * others need updating too */
+ struct lcn_tx_iir_filter tx_iir_filters_cck[] = {
+ { 0, { 1, 415, 1874, 64, 128, 64, 792, 1656, 64, 128, 64, 778,
+ 1582, 64, 128, 64 } },
+ { 1, { 1, 402, 1847, 259, 59, 259, 671, 1794, 68, 54, 68, 608,
+ 1863, 93, 167, 93 } },
+ { 2, { 1, 415, 1874, 64, 128, 64, 792, 1656, 192, 384, 192,
+ 778, 1582, 64, 128, 64 } },
+ { 3, { 1, 302, 1841, 129, 258, 129, 658, 1720, 205, 410, 205,
+ 754, 1760, 170, 340, 170 } },
+ { 20, { 1, 360, 1884, 242, 1734, 242, 752, 1720, 205, 1845, 205,
+ 767, 1760, 256, 185, 256 } },
+ { 21, { 1, 360, 1884, 149, 1874, 149, 752, 1720, 205, 1883, 205,
+ 767, 1760, 256, 273, 256 } },
+ { 22, { 1, 360, 1884, 98, 1948, 98, 752, 1720, 205, 1924, 205,
+ 767, 1760, 256, 352, 256 } },
+ { 23, { 1, 350, 1884, 116, 1966, 116, 752, 1720, 205, 2008, 205,
+ 767, 1760, 128, 233, 128 } },
+ { 24, { 1, 325, 1884, 32, 40, 32, 756, 1720, 256, 471, 256, 766,
+ 1760, 256, 1881, 256 } },
+ { 25, { 1, 299, 1884, 51, 64, 51, 736, 1720, 256, 471, 256, 765,
+ 1760, 262, 1878, 262 } },
+ /* brcmsmac version { 25, { 1, 299, 1884, 51, 64, 51, 736, 1720,
+ * 256, 471, 256, 765, 1760, 256, 1881, 256 } }, */
+ { 26, { 1, 277, 1943, 39, 117, 88, 637, 1838, 64, 192, 144, 614,
+ 1864, 128, 384, 288 } },
+ { 27, { 1, 245, 1943, 49, 147, 110, 626, 1838, 256, 768, 576,
+ 613, 1864, 128, 384, 288 } },
+ { 30, { 1, 302, 1841, 61, 122, 61, 658, 1720, 205, 410, 205,
+ 754, 1760, 170, 340, 170 } },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(tx_iir_filters_cck); i++) {
+ if (tx_iir_filters_cck[i].type == filter_type) {
+ for (j = 0; j < 16; j++)
+ b43_phy_write(dev, phy_regs[j],
+ tx_iir_filters_cck[i].values[j]);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool b43_phy_lcn_load_tx_iir_ofdm_filter(struct b43_wldev *dev,
+ u8 filter_type)
+{
+ int i, j;
+ u16 phy_regs[] = { 0x90f, 0x900, 0x901, 0x906, 0x907, 0x908, 0x902,
+ 0x903, 0x909, 0x90a, 0x90b, 0x904, 0x905, 0x90c,
+ 0x90d, 0x90e };
+ struct lcn_tx_iir_filter tx_iir_filters_ofdm[] = {
+ { 0, { 0, 0xa2, 0x0, 0x100, 0x100, 0x0, 0x0, 0x0, 0x100, 0x0,
+ 0x0, 0x278, 0xfea0, 0x80, 0x100, 0x80 } },
+ { 1, { 0, 374, 0xFF79, 16, 32, 16, 799, 0xFE74, 50, 32, 50, 750,
+ 0xFE2B, 212, 0xFFCE, 212 } },
+ { 2, { 0, 375, 0xFF16, 37, 76, 37, 799, 0xFE74, 32, 20, 32, 748,
+ 0xFEF2, 128, 0xFFE2, 128 } },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(tx_iir_filters_ofdm); i++) {
+ if (tx_iir_filters_ofdm[i].type == filter_type) {
+ for (j = 0; j < 16; j++)
+ b43_phy_write(dev, phy_regs[j],
+ tx_iir_filters_ofdm[i].values[j]);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* wlc_lcnphy_set_tx_gain_override */
+static void b43_phy_lcn_set_tx_gain_override(struct b43_wldev *dev, bool enable)
+{
+ b43_phy_maskset(dev, 0x4b0, ~(0x1 << 7), enable << 7);
+ b43_phy_maskset(dev, 0x4b0, ~(0x1 << 14), enable << 14);
+ b43_phy_maskset(dev, 0x43b, ~(0x1 << 6), enable << 6);
+}
+
+/* wlc_lcnphy_set_tx_gain */
+static void b43_phy_lcn_set_tx_gain(struct b43_wldev *dev,
+ struct lcn_tx_gains *target_gains)
+{
+ u16 pa_gain = b43_phy_lcn_get_pa_gain(dev);
+
+ b43_phy_write(dev, 0x4b5,
+ (target_gains->gm_gain | (target_gains->pga_gain << 8)));
+ b43_phy_maskset(dev, 0x4fb, ~0x7fff,
+ (target_gains->pad_gain | (pa_gain << 8)));
+ b43_phy_write(dev, 0x4fc,
+ (target_gains->gm_gain | (target_gains->pga_gain << 8)));
+ b43_phy_maskset(dev, 0x4fd, ~0x7fff,
+ (target_gains->pad_gain | (pa_gain << 8)));
+
+ b43_phy_lcn_set_dac_gain(dev, target_gains->dac_gain);
+ b43_phy_lcn_set_tx_gain_override(dev, true);
+}
+
+/* wlc_lcnphy_tx_pwr_ctrl_init */
+static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev)
+{
+ struct lcn_tx_gains tx_gains;
+ u8 bbmult;
+
+ b43_mac_suspend(dev);
+
+ if (!dev->phy.lcn->hw_pwr_ctl_capable) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ tx_gains.gm_gain = 4;
+ tx_gains.pga_gain = 12;
+ tx_gains.pad_gain = 12;
+ tx_gains.dac_gain = 0;
+ bbmult = 150;
+ } else {
+ tx_gains.gm_gain = 7;
+ tx_gains.pga_gain = 15;
+ tx_gains.pad_gain = 14;
+ tx_gains.dac_gain = 0;
+ bbmult = 150;
+ }
+ b43_phy_lcn_set_tx_gain(dev, &tx_gains);
+ b43_phy_lcn_set_bbmult(dev, bbmult);
+ b43_phy_lcn_sense_setup(dev, B43_SENSE_TEMP);
+ } else {
+ b43err(dev->wl, "TX power control not supported for this HW\n");
+ }
+
+ b43_mac_enable(dev);
+}
+
+/* wlc_lcnphy_txrx_spur_avoidance_mode */
+static void b43_phy_lcn_txrx_spur_avoidance_mode(struct b43_wldev *dev,
+ bool enable)
+{
+ if (enable) {
+ b43_phy_write(dev, 0x942, 0x7);
+ b43_phy_write(dev, 0x93b, ((1 << 13) + 23));
+ b43_phy_write(dev, 0x93c, ((1 << 13) + 1989));
+
+ b43_phy_write(dev, 0x44a, 0x084);
+ b43_phy_write(dev, 0x44a, 0x080);
+ b43_phy_write(dev, 0x6d3, 0x2222);
+ b43_phy_write(dev, 0x6d3, 0x2220);
+ } else {
+ b43_phy_write(dev, 0x942, 0x0);
+ b43_phy_write(dev, 0x93b, ((0 << 13) + 23));
+ b43_phy_write(dev, 0x93c, ((0 << 13) + 1989));
+ }
+ b43_phy_switch_macfreq(dev, enable);
+}
+
+/**************************************************
+ * Channel switching ops.
+ **************************************************/
+
+/* wlc_lcnphy_set_chanspec_tweaks */
+static void b43_phy_lcn_set_channel_tweaks(struct b43_wldev *dev, int channel)
+{
+ struct bcma_drv_cc *cc = &dev->dev->bdev->bus->drv_cc;
+
+ b43_phy_maskset(dev, 0x448, ~0x300, (channel == 14) ? 0x200 : 0x100);
+
+ if (channel == 1 || channel == 2 || channel == 3 || channel == 4 ||
+ channel == 9 || channel == 10 || channel == 11 || channel == 12) {
+ bcma_chipco_pll_write(cc, 0x2, 0x03000c04);
+ bcma_chipco_pll_maskset(cc, 0x3, 0x00ffffff, 0x0);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 0x400);
+
+ b43_phy_write(dev, 0x942, 0);
+
+ b43_phy_lcn_txrx_spur_avoidance_mode(dev, false);
+ b43_phy_maskset(dev, 0x424, (u16) ~0xff00, 0x1b00);
+ b43_phy_write(dev, 0x425, 0x5907);
+ } else {
+ bcma_chipco_pll_write(cc, 0x2, 0x03140c04);
+ bcma_chipco_pll_maskset(cc, 0x3, 0x00ffffff, 0x333333);
+ bcma_chipco_pll_write(cc, 0x4, 0x202c2820);
+
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 0x400);
+
+ b43_phy_write(dev, 0x942, 0);
+
+ b43_phy_lcn_txrx_spur_avoidance_mode(dev, true);
+ b43_phy_maskset(dev, 0x424, (u16) ~0xff00, 0x1f00);
+ b43_phy_write(dev, 0x425, 0x590a);
+ }
+
+ b43_phy_set(dev, 0x44a, 0x44);
+ b43_phy_write(dev, 0x44a, 0x80);
+}
+
+/* wlc_phy_chanspec_set_lcnphy */
+static int b43_phy_lcn_set_channel(struct b43_wldev *dev,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type)
+{
+ static const u16 sfo_cfg[14][2] = {
+ {965, 1087}, {967, 1085}, {969, 1082}, {971, 1080}, {973, 1078},
+ {975, 1076}, {977, 1073}, {979, 1071}, {981, 1069}, {983, 1067},
+ {985, 1065}, {987, 1063}, {989, 1060}, {994, 1055},
+ };
+
+ b43_phy_lcn_set_channel_tweaks(dev, channel->hw_value);
+
+ b43_phy_set(dev, 0x44a, 0x44);
+ b43_phy_write(dev, 0x44a, 0x80);
+
+ b43_radio_2064_channel_setup(dev);
+ mdelay(1);
+
+ b43_phy_lcn_afe_set_unset(dev);
+
+ b43_phy_write(dev, 0x657, sfo_cfg[channel->hw_value - 1][0]);
+ b43_phy_write(dev, 0x658, sfo_cfg[channel->hw_value - 1][1]);
+
+ if (channel->hw_value == 14) {
+ b43_phy_maskset(dev, 0x448, ~(0x3 << 8), (2) << 8);
+ b43_phy_lcn_load_tx_iir_cck_filter(dev, 3);
+ } else {
+ b43_phy_maskset(dev, 0x448, ~(0x3 << 8), (1) << 8);
+ /* brcmsmac uses filter_type 2, we follow wl with 25 */
+ b43_phy_lcn_load_tx_iir_cck_filter(dev, 25);
+ }
+ /* brcmsmac uses filter_type 2, we follow wl with 0 */
+ b43_phy_lcn_load_tx_iir_ofdm_filter(dev, 0);
+
+ b43_phy_maskset(dev, 0x4eb, ~(0x7 << 3), 0x1 << 3);
+
+ return 0;
+}
+
+/**************************************************
+ * Basic PHY ops.
+ **************************************************/
+
+static int b43_phy_lcn_op_allocate(struct b43_wldev *dev)
+{
+ struct b43_phy_lcn *phy_lcn;
+
+ phy_lcn = kzalloc(sizeof(*phy_lcn), GFP_KERNEL);
+ if (!phy_lcn)
+ return -ENOMEM;
+ dev->phy.lcn = phy_lcn;
+
+ return 0;
+}
+
+static void b43_phy_lcn_op_free(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_lcn *phy_lcn = phy->lcn;
+
+ kfree(phy_lcn);
+ phy->lcn = NULL;
+}
+
+static void b43_phy_lcn_op_prepare_structs(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_lcn *phy_lcn = phy->lcn;
+
+ memset(phy_lcn, 0, sizeof(*phy_lcn));
+}
+
+/* wlc_phy_init_lcnphy */
+static int b43_phy_lcn_op_init(struct b43_wldev *dev)
+{
+ struct bcma_drv_cc *cc = &dev->dev->bdev->bus->drv_cc;
+
+ b43_phy_set(dev, 0x44a, 0x80);
+ b43_phy_mask(dev, 0x44a, 0x7f);
+ b43_phy_set(dev, 0x6d1, 0x80);
+ b43_phy_write(dev, 0x6d0, 0x7);
+
+ b43_phy_lcn_afe_set_unset(dev);
+
+ b43_phy_write(dev, 0x60a, 0xa0);
+ b43_phy_write(dev, 0x46a, 0x19);
+ b43_phy_maskset(dev, 0x663, 0xFF00, 0x64);
+
+ b43_phy_lcn_tables_init(dev);
+
+ b43_phy_lcn_rev0_baseband_init(dev);
+ b43_phy_lcn_bu_tweaks(dev);
+
+ if (dev->phy.radio_ver == 0x2064)
+ b43_radio_2064_init(dev);
+ else
+ B43_WARN_ON(1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_lcn_tx_pwr_ctl_init(dev);
+
+ b43_switch_channel(dev, dev->phy.channel);
+
+ bcma_chipco_regctl_maskset(cc, 0, 0xf, 0x9);
+ bcma_chipco_chipctl_maskset(cc, 0, 0, 0x03cddddd);
+
+ /* TODO */
+
+ b43_phy_set(dev, 0x448, 0x4000);
+ udelay(100);
+ b43_phy_mask(dev, 0x448, ~0x4000);
+
+ /* TODO */
+
+ return 0;
+}
+
+static void b43_phy_lcn_op_software_rfkill(struct b43_wldev *dev,
+ bool blocked)
+{
+ if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
+ b43err(dev->wl, "MAC not suspended\n");
+
+ if (blocked) {
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL2, ~0x7c00);
+ b43_phy_set(dev, B43_PHY_LCN_RF_CTL1, 0x1f00);
+
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL5, ~0x7f00);
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL4, ~0x2);
+ b43_phy_set(dev, B43_PHY_LCN_RF_CTL3, 0x808);
+
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL7, ~0x8);
+ b43_phy_set(dev, B43_PHY_LCN_RF_CTL6, 0x8);
+ } else {
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL1, ~0x1f00);
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL3, ~0x808);
+ b43_phy_mask(dev, B43_PHY_LCN_RF_CTL6, ~0x8);
+ }
+}
+
+static void b43_phy_lcn_op_switch_analog(struct b43_wldev *dev, bool on)
+{
+ if (on) {
+ b43_phy_mask(dev, B43_PHY_LCN_AFE_CTL1, ~0x7);
+ } else {
+ b43_phy_set(dev, B43_PHY_LCN_AFE_CTL2, 0x7);
+ b43_phy_set(dev, B43_PHY_LCN_AFE_CTL1, 0x7);
+ }
+}
+
+static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
+ unsigned int new_channel)
+{
+ struct ieee80211_channel *channel = dev->wl->hw->conf.channel;
+ enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if ((new_channel < 1) || (new_channel > 14))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return b43_phy_lcn_set_channel(dev, channel, channel_type);
+}
+
+static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ return 1;
+ return 36;
+}
+
+static enum b43_txpwr_result
+b43_phy_lcn_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi)
+{
+ return B43_TXPWR_RES_DONE;
+}
+
+static void b43_phy_lcn_op_adjust_txpower(struct b43_wldev *dev)
+{
+}
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+static u16 b43_phy_lcn_op_read(struct b43_wldev *dev, u16 reg)
+{
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ return b43_read16(dev, B43_MMIO_PHY_DATA);
+}
+
+static void b43_phy_lcn_op_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_PHY_DATA, value);
+}
+
+static void b43_phy_lcn_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
+ u16 set)
+{
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_PHY_DATA,
+ (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
+}
+
+static u16 b43_phy_lcn_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+ /* LCN-PHY needs 0x200 for read access */
+ reg |= 0x200;
+
+ b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg);
+ return b43_read16(dev, B43_MMIO_RADIO24_DATA);
+}
+
+static void b43_phy_lcn_op_radio_write(struct b43_wldev *dev, u16 reg,
+ u16 value)
+{
+ b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_RADIO24_DATA, value);
+}
+
/**************************************************
* PHY ops struct.
**************************************************/
const struct b43_phy_operations b43_phyops_lcn = {
- /*
.allocate = b43_phy_lcn_op_allocate,
.free = b43_phy_lcn_op_free,
.prepare_structs = b43_phy_lcn_op_prepare_structs,
@@ -48,5 +898,4 @@ const struct b43_phy_operations b43_phyops_lcn = {
.get_default_chan = b43_phy_lcn_op_get_default_chan,
.recalc_txpower = b43_phy_lcn_op_recalc_txpower,
.adjust_txpower = b43_phy_lcn_op_adjust_txpower,
- */
};
diff --git a/drivers/net/wireless/b43/phy_lcn.h b/drivers/net/wireless/b43/phy_lcn.h
index c046c2a6cab..6a7092e13ff 100644
--- a/drivers/net/wireless/b43/phy_lcn.h
+++ b/drivers/net/wireless/b43/phy_lcn.h
@@ -4,11 +4,28 @@
#include "phy_common.h"
+#define B43_PHY_LCN_AFE_CTL1 B43_PHY_OFDM(0x03B)
+#define B43_PHY_LCN_AFE_CTL2 B43_PHY_OFDM(0x03C)
+#define B43_PHY_LCN_RF_CTL1 B43_PHY_OFDM(0x04C)
+#define B43_PHY_LCN_RF_CTL2 B43_PHY_OFDM(0x04D)
+#define B43_PHY_LCN_TABLE_ADDR B43_PHY_OFDM(0x055) /* Table address */
+#define B43_PHY_LCN_TABLE_DATALO B43_PHY_OFDM(0x056) /* Table data low */
+#define B43_PHY_LCN_TABLE_DATAHI B43_PHY_OFDM(0x057) /* Table data high */
+#define B43_PHY_LCN_RF_CTL3 B43_PHY_OFDM(0x0B0)
+#define B43_PHY_LCN_RF_CTL4 B43_PHY_OFDM(0x0B1)
+#define B43_PHY_LCN_RF_CTL5 B43_PHY_OFDM(0x0B7)
+#define B43_PHY_LCN_RF_CTL6 B43_PHY_OFDM(0x0F9)
+#define B43_PHY_LCN_RF_CTL7 B43_PHY_OFDM(0x0FA)
+
+
struct b43_phy_lcn {
+ bool hw_pwr_ctl;
+ bool hw_pwr_ctl_capable;
+ u8 tx_pwr_curr_idx;
};
struct b43_phy_operations;
extern const struct b43_phy_operations b43_phyops_lcn;
-#endif /* B43_PHY_LCN_H_ */ \ No newline at end of file
+#endif /* B43_PHY_LCN_H_ */
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 3b46360da99..b17d9b6c33a 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -4,6 +4,7 @@
IEEE 802.11n PHY support
Copyright (c) 2008 Michael Buesch <m@bues.ch>
+ Copyright (c) 2010-2011 Rafał Miłecki <zajec5@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -77,6 +78,7 @@ enum b43_nphy_rssi_type {
B43_NPHY_RSSI_TBD,
};
+/* TODO: reorder functions */
static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
bool enable);
static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
@@ -87,6 +89,14 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off);
static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
u16 value, u8 core);
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev);
+
+static inline bool b43_nphy_ipa(struct b43_wldev *dev)
+{
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
+ (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
+}
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -248,15 +258,25 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
{
struct b43_phy_n *nphy = dev->phy.n;
u8 i;
- u16 tmp;
+ u16 bmask, val, tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
nphy->txpwrctrl = enable;
if (!enable) {
- if (dev->phy.rev >= 3)
- ; /* TODO */
+ if (dev->phy.rev >= 3 &&
+ (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
+ (B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN |
+ B43_NPHY_TXPCTL_CMD_PCTLEN))) {
+ /* We disable enabled TX pwr ctl, save it's state */
+ nphy->tx_pwr_idx[0] = b43_phy_read(dev,
+ B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
+ nphy->tx_pwr_idx[1] = b43_phy_read(dev,
+ B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
+ }
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
for (i = 0; i < 84; i++)
@@ -285,10 +305,67 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
- if (dev->phy.rev < 2 && 0)
- ; /* TODO */
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
} else {
- b43err(dev->wl, "enabling tx pwr ctrl not implemented yet\n");
+ b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
+ nphy->adj_pwr_tbl);
+ b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
+ nphy->adj_pwr_tbl);
+
+ bmask = B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ /* wl does useless check for "enable" param here */
+ val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3) {
+ bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ if (val)
+ val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ }
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+ ~B43_NPHY_TXPCTL_INIT_PIDXI1,
+ 0x64);
+ }
+
+ if (dev->phy.rev >= 3) {
+ if (nphy->tx_pwr_idx[0] != 128 &&
+ nphy->tx_pwr_idx[1] != 128) {
+ /* Recover TX pwr ctl state */
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT,
+ nphy->tx_pwr_idx[0]);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev,
+ B43_NPHY_TXPCTL_INIT,
+ ~0xff, nphy->tx_pwr_idx[1]);
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
+
+ if (b43_nphy_ipa(dev)) {
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
+ }
}
if (nphy->hang_avoid)
@@ -369,22 +446,23 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
else
b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D10 + i);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, radio_gain);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C57);
- tmp = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
+ tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
if (i == 0)
tmp = (tmp & 0x00FF) | (bbmult << 8);
else
tmp = (tmp & 0xFF00) | bbmult;
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C57);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, tmp);
-
- if (0)
- ; /* TODO */
+ b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
+
+ if (b43_nphy_ipa(dev)) {
+ u32 tmp32;
+ u16 reg = (i == 0) ?
+ B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
+ b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
+ b43_phy_set(dev, reg, 0x4);
+ }
}
b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
@@ -393,6 +471,57 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
+static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ const u32 *table = NULL;
+#if 0
+ TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
+ u32 rfpwr_offset;
+ u8 pga_gain;
+ int i;
+#endif
+
+ if (phy->rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (phy->rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ if (phy->rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+
+ if (phy->rev >= 3) {
+#if 0
+ nphy->gmval = (table[0] >> 16) & 0x7000;
+
+ for (i = 0; i < 128; i++) {
+ pga_gain = (table[i] >> 24) & 0xF;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+ else
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
+ b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
+ rfpwr_offset);
+ b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
+ rfpwr_offset);
+ }
+#endif
+ }
+}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
static void b43_radio_2055_setup(struct b43_wldev *dev,
@@ -581,14 +710,10 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
u16 tmp;
- enum ieee80211_band band = b43_current_band(dev->wl);
- bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
- (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
if (dev->phy.rev >= 3) {
- if (ipa) {
+ if (b43_nphy_ipa(dev)) {
tmp = 4;
b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
(((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
@@ -600,49 +725,17 @@ static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
}
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
-static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
-{
- u32 tmp;
-
- if (dev->phy.type != B43_PHYTYPE_N)
- return;
-
- switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
- case B43_BUS_BCMA:
- tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
- if (force)
- tmp |= BCMA_IOCTL_FGC;
- else
- tmp &= ~BCMA_IOCTL_FGC;
- bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
- break;
-#endif
-#ifdef CONFIG_B43_SSB
- case B43_BUS_SSB:
- tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
- if (force)
- tmp |= SSB_TMSLOW_FGC;
- else
- tmp &= ~SSB_TMSLOW_FGC;
- ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
- break;
-#endif
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
static void b43_nphy_reset_cca(struct b43_wldev *dev)
{
u16 bbcfg;
- b43_nphy_bmac_clock_fgc(dev, 1);
+ b43_phy_force_clock(dev, 1);
bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
udelay(1);
b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- b43_nphy_bmac_clock_fgc(dev, 0);
+ b43_phy_force_clock(dev, 0);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
}
@@ -931,11 +1024,7 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
{
u16 array[4];
- int i;
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
- for (i = 0; i < 4; i++)
- array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
@@ -1398,180 +1487,220 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
}
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
-static void b43_nphy_workarounds(struct b43_wldev *dev)
+static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
{
+ struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = dev->dev->bus_sprom;
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
- u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
- u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
- u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+ /* TX to RX */
+ u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ /* RX to TX */
+ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+ 0x1F };
+ u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
+ u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
u16 tmp16;
u32 tmp32;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
- b43_nphy_classifier(dev, 1, 0);
- else
- b43_nphy_classifier(dev, 1, 1);
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+ tmp32 &= 0xffffff;
+ b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+ b43_phy_write(dev, 0x2AE, 0x000C);
+
+ /* TX to RX */
+ b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
+
+ /* RX to TX */
+ if (b43_nphy_ipa(dev))
+ b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
+ rx2tx_delays_ipa, 9);
+ if (nphy->hw_phyrxchain != 3 &&
+ nphy->hw_phyrxchain != nphy->hw_phytxchain) {
+ if (b43_nphy_ipa(dev)) {
+ rx2tx_delays[5] = 59;
+ rx2tx_delays[6] = 1;
+ rx2tx_events[7] = 0x1F;
+ }
+ b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
+ }
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
+ tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ 0x2 : 0x9C40;
+ b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+ b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
- if (dev->phy.rev >= 3) {
- tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
- tmp32 &= 0xffffff;
- b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+ b43_nphy_gain_ctrl_workarounds(dev);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
- b43_phy_write(dev, 0x2AE, 0x000C);
+ b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
+ b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
- /* TODO */
+ /* TODO */
- tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
- 0x2 : 0x9C40;
- b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+ /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ tmp32 = 0x00088888;
+ else
+ tmp32 = 0x88888888;
+ b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+ if (dev->phy.rev == 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ }
- b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+ b43_phy_write(dev, 0x224, 0x039C);
+ b43_phy_write(dev, 0x225, 0x0357);
+ b43_phy_write(dev, 0x226, 0x0317);
+ b43_phy_write(dev, 0x227, 0x02D7);
+ b43_phy_write(dev, 0x228, 0x039C);
+ b43_phy_write(dev, 0x229, 0x0357);
+ b43_phy_write(dev, 0x22A, 0x0317);
+ b43_phy_write(dev, 0x22B, 0x02D7);
+ b43_phy_write(dev, 0x22C, 0x039C);
+ b43_phy_write(dev, 0x22D, 0x0357);
+ b43_phy_write(dev, 0x22E, 0x0317);
+ b43_phy_write(dev, 0x22F, 0x02D7);
+}
- b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
- b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
- b43_nphy_gain_ctrl_workarounds(dev);
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
- b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
- b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
- /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
-
- /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
-
- if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
- (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
- tmp32 = 0x00088888;
- else
- tmp32 = 0x88888888;
- b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
-
- if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
- 0x70);
- b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
- 0x70);
- }
+ b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
- b43_phy_write(dev, 0x224, 0x039C);
- b43_phy_write(dev, 0x225, 0x0357);
- b43_phy_write(dev, 0x226, 0x0317);
- b43_phy_write(dev, 0x227, 0x02D7);
- b43_phy_write(dev, 0x228, 0x039C);
- b43_phy_write(dev, 0x229, 0x0357);
- b43_phy_write(dev, 0x22A, 0x0317);
- b43_phy_write(dev, 0x22B, 0x02D7);
- b43_phy_write(dev, 0x22C, 0x039C);
- b43_phy_write(dev, 0x22D, 0x0357);
- b43_phy_write(dev, 0x22E, 0x0317);
- b43_phy_write(dev, 0x22F, 0x02D7);
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
- nphy->band5g_pwrgain) {
- b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
- b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
- } else {
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
- }
+ if (dev->phy.rev < 2) {
+ b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
+ }
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
- if (dev->phy.rev < 2) {
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
- }
+ if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
+ dev->dev->board_type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ b43_nphy_gain_ctrl_workarounds(dev);
- if (sprom->boardflags2_lo & 0x100 &&
- dev->dev->board_type == 0x8B) {
- delays1[0] = 0x1;
- delays1[5] = 0x14;
- }
- b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
- b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ b43_hf_write(dev, b43_hf_read(dev) |
+ B43_HF_MLADVW);
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
- b43_nphy_gain_ctrl_workarounds(dev);
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
+}
- if (dev->phy.rev < 2) {
- if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
- b43_hf_write(dev, b43_hf_read(dev) |
- B43_HF_MLADVW);
- } else if (dev->phy.rev == 2) {
- b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
- b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
- }
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
- if (dev->phy.rev < 2)
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
- b43_phy_mask(dev, B43_NPHY_PIL_DW1,
- ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
- if (dev->phy.rev == 2)
- b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
- B43_NPHY_FINERX2_CGC_DECGC);
- }
+ if (dev->phy.rev >= 3)
+ b43_nphy_workarounds_rev3plus(dev);
+ else
+ b43_nphy_workarounds_rev1_2(dev);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 0);
@@ -2167,7 +2296,6 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
- struct b43_phy_n *nphy = dev->phy.n;
u8 i;
u16 reg, val;
@@ -2231,10 +2359,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
enum ieee80211_band band =
b43_current_band(dev->wl);
- if ((nphy->ipa2g_on &&
- band == IEEE80211_BAND_2GHZ) ||
- (nphy->ipa5g_on &&
- band == IEEE80211_BAND_5GHZ))
+ if (b43_nphy_ipa(dev))
val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
else
val = 0x11;
@@ -2609,8 +2734,8 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
{
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
if (dev->phy.rev >= 6) {
- /* TODO If the chip is 47162
- return txpwrctrl_tx_gain_ipa_rev5 */
+ if (dev->dev->chip_id == 47162)
+ return txpwrctrl_tx_gain_ipa_rev5;
return txpwrctrl_tx_gain_ipa_rev6;
} else if (dev->phy.rev >= 5) {
return txpwrctrl_tx_gain_ipa_rev5;
@@ -2860,10 +2985,7 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
enum ieee80211_band band =
b43_current_band(dev->wl);
- if ((nphy->ipa2g_on &&
- band == IEEE80211_BAND_2GHZ) ||
- (nphy->ipa5g_on &&
- band == IEEE80211_BAND_5GHZ)) {
+ if (b43_nphy_ipa(dev)) {
table = b43_nphy_get_ipa_gain_table(dev);
} else {
if (band == IEEE80211_BAND_5GHZ) {
@@ -3680,7 +3802,7 @@ int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
- if (sprom->boardflags2_lo & 0x100 ||
+ if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
(dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
dev->dev->board_type == 0x8B))
b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
@@ -3699,8 +3821,7 @@ int b43_phy_initn(struct b43_wldev *dev)
}
tmp2 = b43_current_band(dev->wl);
- if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
- (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
+ if (b43_nphy_ipa(dev)) {
b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
nphy->papd_epsilon_offset[0] << 7);
@@ -3715,11 +3836,11 @@ int b43_phy_initn(struct b43_wldev *dev)
b43_nphy_workarounds(dev);
/* Reset CCA, in init code it differs a little from standard way */
- b43_nphy_bmac_clock_fgc(dev, 1);
+ b43_phy_force_clock(dev, 1);
tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
- b43_nphy_bmac_clock_fgc(dev, 0);
+ b43_phy_force_clock(dev, 0);
b43_mac_phy_clock_set(dev, true);
@@ -3738,15 +3859,7 @@ int b43_phy_initn(struct b43_wldev *dev)
b43_nphy_tx_power_fix(dev);
/* TODO N PHY TX Power Control Idle TSSI */
/* TODO N PHY TX Power Control Setup */
-
- if (phy->rev >= 3) {
- /* TODO */
- } else {
- b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128,
- b43_ntab_tx_gain_rev0_1_2);
- b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128,
- b43_ntab_tx_gain_rev0_1_2);
- }
+ b43_nphy_tx_gain_table_upload(dev);
if (nphy->phyrxchain != 3)
b43_nphy_set_rx_core_state(dev, nphy->phyrxchain);
@@ -3950,6 +4063,10 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
nphy->perical = 2; /* avoid additional rssi cal on init (like wl) */
+ /* 128 can mean disabled-by-default state of TX pwr ctl. Max value is
+ * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
+ nphy->tx_pwr_idx[0] = 128;
+ nphy->tx_pwr_idx[1] = 128;
}
static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index e789a89f104..fbf520285bd 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -764,6 +764,8 @@ struct b43_phy_n {
u8 cal_orig_pwr_idx[2];
u8 measure_hold;
u8 phyrxchain;
+ u8 hw_phyrxchain;
+ u8 hw_phytxchain;
u8 perical;
u32 deaf_count;
u32 rxcalparams;
@@ -783,6 +785,8 @@ struct b43_phy_n {
u16 mphase_txcal_bestcoeffs[11];
bool txpwrctrl;
+ u8 tx_pwr_idx[2];
+ u16 adj_pwr_tbl[84];
u16 txcal_bbmult;
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 6e4228c3ed1..fcff923b3c1 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -611,7 +611,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
struct b43_wldev *dev = q->dev;
struct b43_wl *wl = dev->wl;
u16 len;
- u32 macstat;
+ u32 macstat = 0;
unsigned int i, padding;
struct sk_buff *skb;
const char *err_msg = NULL;
@@ -676,7 +676,15 @@ data_ready:
goto rx_error;
}
- macstat = le32_to_cpu(rxhdr->mac_status);
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ macstat = le32_to_cpu(rxhdr->format_598.mac_status);
+ break;
+ case B43_FW_HDR_410:
+ case B43_FW_HDR_351:
+ macstat = le32_to_cpu(rxhdr->format_351.mac_status);
+ break;
+ }
if (macstat & B43_RX_MAC_FCSERR) {
if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
/* Drop frames with failed FCS. */
diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/b43/radio_2055.c
index 93643f18c2b..5289a18ddd8 100644
--- a/drivers/net/wireless/b43/radio_2055.c
+++ b/drivers/net/wireless/b43/radio_2055.c
@@ -4,6 +4,7 @@
IEEE 802.11n PHY and radio device data tables
Copyright (c) 2008 Michael Buesch <m@bues.ch>
+ Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index 8890df06702..a01f776ca4d 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
IEEE 802.11n 2056 radio device data tables
+ Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index d52df6be705..a7159d8578b 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1,29 +1,3 @@
-/*
-
- Broadcom B43 wireless driver
-
- Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
-
- Some parts of the code in this file are derived from the brcm80211
- driver Copyright (c) 2010 Broadcom Corporation
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING. If not, write to
- the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
- Boston, MA 02110-1301, USA.
-
-*/
-
#ifndef B43_RADIO_2056_H_
#define B43_RADIO_2056_H_
diff --git a/drivers/net/wireless/b43/radio_2059.c b/drivers/net/wireless/b43/radio_2059.c
index f029f6e1f5d..d4ce8a12ff9 100644
--- a/drivers/net/wireless/b43/radio_2059.c
+++ b/drivers/net/wireless/b43/radio_2059.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
IEEE 802.11n 2059 radio device data tables
+ Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 916f238a71d..7b326f2efdc 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -4,6 +4,7 @@
IEEE 802.11n PHY data tables
Copyright (c) 2008 Michael Buesch <m@bues.ch>
+ Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43/tables_phy_ht.c b/drivers/net/wireless/b43/tables_phy_ht.c
index 603938657b1..176c49d74ef 100644
--- a/drivers/net/wireless/b43/tables_phy_ht.c
+++ b/drivers/net/wireless/b43/tables_phy_ht.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
IEEE 802.11n HT-PHY data tables
+ Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -574,6 +576,42 @@ static const u32 b43_httab_0x24[] = {
0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
};
+/* Some late-init table */
+const u32 b43_httab_0x1a_0xc0_late[] = {
+ 0x10f90040, 0x10e10040, 0x10e1003c, 0x10c9003d,
+ 0x10b9003c, 0x10a9003d, 0x10a1003c, 0x1099003b,
+ 0x1091003b, 0x1089003a, 0x1081003a, 0x10790039,
+ 0x10710039, 0x1069003a, 0x1061003b, 0x1059003d,
+ 0x1051003f, 0x10490042, 0x1049003e, 0x1049003b,
+ 0x1041003e, 0x1041003b, 0x1039003e, 0x1039003b,
+ 0x10390038, 0x10390035, 0x1031003a, 0x10310036,
+ 0x10310033, 0x1029003a, 0x10290037, 0x10290034,
+ 0x10290031, 0x10210039, 0x10210036, 0x10210033,
+ 0x10210030, 0x1019003c, 0x10190039, 0x10190036,
+ 0x10190033, 0x10190030, 0x1019002d, 0x1019002b,
+ 0x10190028, 0x1011003a, 0x10110036, 0x10110033,
+ 0x10110030, 0x1011002e, 0x1011002b, 0x10110029,
+ 0x10110027, 0x10110024, 0x10110022, 0x10110020,
+ 0x1011001f, 0x1011001d, 0x1009003a, 0x10090037,
+ 0x10090034, 0x10090031, 0x1009002e, 0x1009002c,
+ 0x10090029, 0x10090027, 0x10090025, 0x10090023,
+ 0x10090021, 0x1009001f, 0x1009001d, 0x1009001b,
+ 0x1009001a, 0x10090018, 0x10090017, 0x10090016,
+ 0x10090015, 0x10090013, 0x10090012, 0x10090011,
+ 0x10090010, 0x1009000f, 0x1009000f, 0x1009000e,
+ 0x1009000d, 0x1009000c, 0x1009000c, 0x1009000b,
+ 0x1009000a, 0x1009000a, 0x10090009, 0x10090009,
+ 0x10090008, 0x10090008, 0x10090007, 0x10090007,
+ 0x10090007, 0x10090006, 0x10090006, 0x10090005,
+ 0x10090005, 0x10090005, 0x10090005, 0x10090004,
+ 0x10090004, 0x10090004, 0x10090004, 0x10090003,
+ 0x10090003, 0x10090003, 0x10090003, 0x10090003,
+ 0x10090003, 0x10090002, 0x10090002, 0x10090002,
+ 0x10090002, 0x10090002, 0x10090002, 0x10090002,
+ 0x10090002, 0x10090002, 0x10090001, 0x10090001,
+ 0x10090001, 0x10090001, 0x10090001, 0x10090001,
+};
+
/**************************************************
* R/W ops.
**************************************************/
@@ -674,6 +712,51 @@ void b43_httab_write(struct b43_wldev *dev, u32 offset, u32 value)
return;
}
+void b43_httab_write_few(struct b43_wldev *dev, u32 offset, size_t num, ...)
+{
+ va_list args;
+ u32 type, value;
+ unsigned int i;
+
+ type = offset & B43_HTTAB_TYPEMASK;
+ offset &= 0xFFFF;
+
+ va_start(args, num);
+ switch (type) {
+ case B43_HTTAB_8BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ for (i = 0; i < num; i++) {
+ value = va_arg(args, int);
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
+ }
+ break;
+ case B43_HTTAB_16BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ for (i = 0; i < num; i++) {
+ value = va_arg(args, int);
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
+ }
+ break;
+ case B43_HTTAB_32BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ for (i = 0; i < num; i++) {
+ value = va_arg(args, int);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI,
+ value >> 16);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO,
+ value & 0xFFFF);
+ }
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ va_end(args);
+
+ return;
+}
+
void b43_httab_write_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, const void *_data)
{
@@ -723,6 +806,9 @@ void b43_httab_write_bulk(struct b43_wldev *dev, u32 offset,
} while (0)
void b43_phy_ht_tables_init(struct b43_wldev *dev)
{
+ BUILD_BUG_ON(ARRAY_SIZE(b43_httab_0x1a_0xc0_late) !=
+ B43_HTTAB_1A_C0_LATE_SIZE);
+
httab_upload(dev, B43_HTTAB16(0x12, 0), b43_httab_0x12);
httab_upload(dev, B43_HTTAB16(0x27, 0), b43_httab_0x27);
httab_upload(dev, B43_HTTAB16(0x26, 0), b43_httab_0x26);
diff --git a/drivers/net/wireless/b43/tables_phy_ht.h b/drivers/net/wireless/b43/tables_phy_ht.h
index ea3be382c89..1b5ef2bc770 100644
--- a/drivers/net/wireless/b43/tables_phy_ht.h
+++ b/drivers/net/wireless/b43/tables_phy_ht.h
@@ -14,9 +14,13 @@ u32 b43_httab_read(struct b43_wldev *dev, u32 offset);
void b43_httab_read_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, void *_data);
void b43_httab_write(struct b43_wldev *dev, u32 offset, u32 value);
+void b43_httab_write_few(struct b43_wldev *dev, u32 offset, size_t num, ...);
void b43_httab_write_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, const void *_data);
void b43_phy_ht_tables_init(struct b43_wldev *dev);
+#define B43_HTTAB_1A_C0_LATE_SIZE 128
+extern const u32 b43_httab_0x1a_0xc0_late[];
+
#endif /* B43_TABLES_PHY_HT_H_ */
diff --git a/drivers/net/wireless/b43/tables_phy_lcn.c b/drivers/net/wireless/b43/tables_phy_lcn.c
index 40c1d0915dd..5176363cadf 100644
--- a/drivers/net/wireless/b43/tables_phy_lcn.c
+++ b/drivers/net/wireless/b43/tables_phy_lcn.c
@@ -3,6 +3,8 @@
Broadcom B43 wireless driver
IEEE 802.11n LCN-PHY data tables
+ Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -25,10 +27,698 @@
#include "phy_common.h"
#include "phy_lcn.h"
+struct b43_lcntab_tx_gain_tbl_entry {
+ u8 gm;
+ u8 pga;
+ u8 pad;
+ u8 dac;
+ u8 bb_mult;
+};
+
+/**************************************************
+ * Static tables.
+ **************************************************/
+
+static const u16 b43_lcntab_0x02[] = {
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d,
+ 0x014d, 0x014d, 0x014d, 0x014d,
+};
+
+static const u16 b43_lcntab_0x01[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+};
+
+static const u32 b43_lcntab_0x0b[] = {
+ 0x000141f8, 0x000021f8, 0x000021fb, 0x000041fb,
+ 0x0001fedb, 0x0000217b, 0x00002133, 0x000040eb,
+ 0x0001fea3, 0x0000024b,
+};
+
+static const u32 b43_lcntab_0x0c[] = {
+ 0x00100001, 0x00200010, 0x00300001, 0x00400010,
+ 0x00500022, 0x00600122, 0x00700222, 0x00800322,
+ 0x00900422, 0x00a00522, 0x00b00622, 0x00c00722,
+ 0x00d00822, 0x00f00922, 0x00100a22, 0x00200b22,
+ 0x00300c22, 0x00400d22, 0x00500e22, 0x00600f22,
+};
+
+static const u32 b43_lcntab_0x0d[] = {
+ 0x00000000, 0x00000000, 0x10000000, 0x00000000,
+ 0x20000000, 0x00000000, 0x30000000, 0x00000000,
+ 0x40000000, 0x00000000, 0x50000000, 0x00000000,
+ 0x60000000, 0x00000000, 0x70000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x90000000, 0x00000008,
+ 0xa0000000, 0x00000008, 0xb0000000, 0x00000008,
+ 0xc0000000, 0x00000008, 0xd0000000, 0x00000008,
+ 0xe0000000, 0x00000008, 0xf0000000, 0x00000008,
+ 0x00000000, 0x00000009, 0x10000000, 0x00000009,
+ 0x20000000, 0x00000019, 0x30000000, 0x00000019,
+ 0x40000000, 0x00000019, 0x50000000, 0x00000019,
+ 0x60000000, 0x00000019, 0x70000000, 0x00000019,
+ 0x80000000, 0x00000019, 0x90000000, 0x00000019,
+ 0xa0000000, 0x00000019, 0xb0000000, 0x00000019,
+ 0xc0000000, 0x00000019, 0xd0000000, 0x00000019,
+ 0xe0000000, 0x00000019, 0xf0000000, 0x00000019,
+ 0x00000000, 0x0000001a, 0x10000000, 0x0000001a,
+ 0x20000000, 0x0000001a, 0x30000000, 0x0000001a,
+ 0x40000000, 0x0000001a, 0x50000000, 0x00000002,
+ 0x60000000, 0x00000002, 0x70000000, 0x00000002,
+ 0x80000000, 0x00000002, 0x90000000, 0x00000002,
+ 0xa0000000, 0x00000002, 0xb0000000, 0x00000002,
+ 0xc0000000, 0x0000000a, 0xd0000000, 0x0000000a,
+ 0xe0000000, 0x0000000a, 0xf0000000, 0x0000000a,
+ 0x00000000, 0x0000000b, 0x10000000, 0x0000000b,
+ 0x20000000, 0x0000000b, 0x30000000, 0x0000000b,
+ 0x40000000, 0x0000000b, 0x50000000, 0x0000001b,
+ 0x60000000, 0x0000001b, 0x70000000, 0x0000001b,
+ 0x80000000, 0x0000001b, 0x90000000, 0x0000001b,
+ 0xa0000000, 0x0000001b, 0xb0000000, 0x0000001b,
+ 0xc0000000, 0x0000001b, 0xd0000000, 0x0000001b,
+ 0xe0000000, 0x0000001b, 0xf0000000, 0x0000001b,
+ 0x00000000, 0x0000001c, 0x10000000, 0x0000001c,
+ 0x20000000, 0x0000001c, 0x30000000, 0x0000001c,
+ 0x40000000, 0x0000001c, 0x50000000, 0x0000001c,
+ 0x60000000, 0x0000001c, 0x70000000, 0x0000001c,
+ 0x80000000, 0x0000001c, 0x90000000, 0x0000001c,
+};
+
+static const u16 b43_lcntab_0x0e[] = {
+ 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406,
+ 0x0407, 0x0408, 0x0409, 0x040a, 0x058b, 0x058c,
+ 0x058d, 0x058e, 0x058f, 0x0090, 0x0091, 0x0092,
+ 0x0193, 0x0194, 0x0195, 0x0196, 0x0197, 0x0198,
+ 0x0199, 0x019a, 0x019b, 0x019c, 0x019d, 0x019e,
+ 0x019f, 0x01a0, 0x01a1, 0x01a2, 0x01a3, 0x01a4,
+ 0x01a5, 0x0000,
+};
+
+static const u16 b43_lcntab_0x0f[] = {
+ 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009,
+ 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005,
+ 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009,
+ 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005,
+ 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009,
+ 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005,
+ 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009,
+ 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005,
+ 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009,
+ 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005,
+ 0x000a, 0x0009, 0x0006, 0x0005,
+};
+
+static const u16 b43_lcntab_0x10[] = {
+ 0x005f, 0x0036, 0x0029, 0x001f, 0x005f, 0x0036,
+ 0x0029, 0x001f, 0x005f, 0x0036, 0x0029, 0x001f,
+ 0x005f, 0x0036, 0x0029, 0x001f,
+};
+
+static const u16 b43_lcntab_0x11[] = {
+ 0x0009, 0x000f, 0x0014, 0x0018, 0x00fe, 0x0007,
+ 0x000b, 0x000f, 0x00fb, 0x00fe, 0x0001, 0x0005,
+ 0x0008, 0x000b, 0x000e, 0x0011, 0x0014, 0x0017,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0003, 0x0006, 0x0009, 0x000c, 0x000f,
+ 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0003,
+ 0x0006, 0x0009, 0x000c, 0x000f, 0x0012, 0x0015,
+ 0x0018, 0x001b, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0003, 0x00eb, 0x0000, 0x0000,
+};
+
+static const u32 b43_lcntab_0x12[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000, 0x00000004, 0x00000008,
+ 0x00000001, 0x00000005, 0x00000009, 0x0000000d,
+ 0x0000004d, 0x0000008d, 0x0000000d, 0x0000004d,
+ 0x0000008d, 0x000000cd, 0x0000004f, 0x0000008f,
+ 0x000000cf, 0x000000d3, 0x00000113, 0x00000513,
+ 0x00000913, 0x00000953, 0x00000d53, 0x00001153,
+ 0x00001193, 0x00005193, 0x00009193, 0x0000d193,
+ 0x00011193, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000004,
+ 0x00000000, 0x00000004, 0x00000008, 0x00000001,
+ 0x00000005, 0x00000009, 0x0000000d, 0x0000004d,
+ 0x0000008d, 0x0000000d, 0x0000004d, 0x0000008d,
+ 0x000000cd, 0x0000004f, 0x0000008f, 0x000000cf,
+ 0x000000d3, 0x00000113, 0x00000513, 0x00000913,
+ 0x00000953, 0x00000d53, 0x00001153, 0x00005153,
+ 0x00009153, 0x0000d153, 0x00011153, 0x00015153,
+ 0x00019153, 0x0001d153, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_lcntab_0x14[] = {
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0002, 0x0003, 0x0001, 0x0003, 0x0002, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0002, 0x0003,
+ 0x0001, 0x0003, 0x0002, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001,
+};
+
+static const u16 b43_lcntab_0x17[] = {
+ 0x001a, 0x0034, 0x004e, 0x0068, 0x009c, 0x00d0,
+ 0x00ea, 0x0104, 0x0034, 0x0068, 0x009c, 0x00d0,
+ 0x0138, 0x01a0, 0x01d4, 0x0208, 0x004e, 0x009c,
+ 0x00ea, 0x0138, 0x01d4, 0x0270, 0x02be, 0x030c,
+ 0x0068, 0x00d0, 0x0138, 0x01a0, 0x0270, 0x0340,
+ 0x03a8, 0x0410, 0x0018, 0x009c, 0x00d0, 0x0104,
+ 0x00ea, 0x0138, 0x0186, 0x00d0, 0x0104, 0x0104,
+ 0x0138, 0x016c, 0x016c, 0x01a0, 0x0138, 0x0186,
+ 0x0186, 0x01d4, 0x0222, 0x0222, 0x0270, 0x0104,
+ 0x0138, 0x016c, 0x0138, 0x016c, 0x01a0, 0x01d4,
+ 0x01a0, 0x01d4, 0x0208, 0x0208, 0x023c, 0x0186,
+ 0x01d4, 0x0222, 0x01d4, 0x0222, 0x0270, 0x02be,
+ 0x0270, 0x02be, 0x030c, 0x030c, 0x035a, 0x0036,
+ 0x006c, 0x00a2, 0x00d8, 0x0144, 0x01b0, 0x01e6,
+ 0x021c, 0x006c, 0x00d8, 0x0144, 0x01b0, 0x0288,
+ 0x0360, 0x03cc, 0x0438, 0x00a2, 0x0144, 0x01e6,
+ 0x0288, 0x03cc, 0x0510, 0x05b2, 0x0654, 0x00d8,
+ 0x01b0, 0x0288, 0x0360, 0x0510, 0x06c0, 0x0798,
+ 0x0870, 0x0018, 0x0144, 0x01b0, 0x021c, 0x01e6,
+ 0x0288, 0x032a, 0x01b0, 0x021c, 0x021c, 0x0288,
+ 0x02f4, 0x02f4, 0x0360, 0x0288, 0x032a, 0x032a,
+ 0x03cc, 0x046e, 0x046e, 0x0510, 0x021c, 0x0288,
+ 0x02f4, 0x0288, 0x02f4, 0x0360, 0x03cc, 0x0360,
+ 0x03cc, 0x0438, 0x0438, 0x04a4, 0x032a, 0x03cc,
+ 0x046e, 0x03cc, 0x046e, 0x0510, 0x05b2, 0x0510,
+ 0x05b2, 0x0654, 0x0654, 0x06f6,
+};
+
+static const u16 b43_lcntab_0x00[] = {
+ 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00,
+ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005,
+ 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600,
+ 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003,
+ 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007,
+ 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+};
+
+static const u32 b43_lcntab_0x18[] = {
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+ 0x00080000, 0x00080000, 0x00080000, 0x00080000,
+};
+
+/**************************************************
+ * TX gain.
+ **************************************************/
+
+const struct b43_lcntab_tx_gain_tbl_entry
+ b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0[B43_LCNTAB_TX_GAIN_SIZE] = {
+ { 0x03, 0x00, 0x1f, 0x0, 0x48 },
+ { 0x03, 0x00, 0x1f, 0x0, 0x46 },
+ { 0x03, 0x00, 0x1f, 0x0, 0x44 },
+ { 0x03, 0x00, 0x1e, 0x0, 0x43 },
+ { 0x03, 0x00, 0x1d, 0x0, 0x44 },
+ { 0x03, 0x00, 0x1c, 0x0, 0x44 },
+ { 0x03, 0x00, 0x1b, 0x0, 0x45 },
+ { 0x03, 0x00, 0x1a, 0x0, 0x46 },
+ { 0x03, 0x00, 0x19, 0x0, 0x46 },
+ { 0x03, 0x00, 0x18, 0x0, 0x47 },
+ { 0x03, 0x00, 0x17, 0x0, 0x48 },
+ { 0x03, 0x00, 0x17, 0x0, 0x46 },
+ { 0x03, 0x00, 0x16, 0x0, 0x47 },
+ { 0x03, 0x00, 0x15, 0x0, 0x48 },
+ { 0x03, 0x00, 0x15, 0x0, 0x46 },
+ { 0x03, 0x00, 0x15, 0x0, 0x44 },
+ { 0x03, 0x00, 0x15, 0x0, 0x42 },
+ { 0x03, 0x00, 0x15, 0x0, 0x40 },
+ { 0x03, 0x00, 0x15, 0x0, 0x3f },
+ { 0x03, 0x00, 0x14, 0x0, 0x40 },
+ { 0x03, 0x00, 0x13, 0x0, 0x41 },
+ { 0x03, 0x00, 0x13, 0x0, 0x40 },
+ { 0x03, 0x00, 0x12, 0x0, 0x41 },
+ { 0x03, 0x00, 0x12, 0x0, 0x40 },
+ { 0x03, 0x00, 0x11, 0x0, 0x41 },
+ { 0x03, 0x00, 0x11, 0x0, 0x40 },
+ { 0x03, 0x00, 0x10, 0x0, 0x41 },
+ { 0x03, 0x00, 0x10, 0x0, 0x40 },
+ { 0x03, 0x00, 0x10, 0x0, 0x3e },
+ { 0x03, 0x00, 0x10, 0x0, 0x3c },
+ { 0x03, 0x00, 0x10, 0x0, 0x3a },
+ { 0x03, 0x00, 0x0f, 0x0, 0x3d },
+ { 0x03, 0x00, 0x0f, 0x0, 0x3b },
+ { 0x03, 0x00, 0x0e, 0x0, 0x3d },
+ { 0x03, 0x00, 0x0e, 0x0, 0x3c },
+ { 0x03, 0x00, 0x0e, 0x0, 0x3a },
+ { 0x03, 0x00, 0x0d, 0x0, 0x3c },
+ { 0x03, 0x00, 0x0d, 0x0, 0x3b },
+ { 0x03, 0x00, 0x0c, 0x0, 0x3e },
+ { 0x03, 0x00, 0x0c, 0x0, 0x3c },
+ { 0x03, 0x00, 0x0c, 0x0, 0x3a },
+ { 0x03, 0x00, 0x0b, 0x0, 0x3e },
+ { 0x03, 0x00, 0x0b, 0x0, 0x3c },
+ { 0x03, 0x00, 0x0b, 0x0, 0x3b },
+ { 0x03, 0x00, 0x0b, 0x0, 0x39 },
+ { 0x03, 0x00, 0x0a, 0x0, 0x3d },
+ { 0x03, 0x00, 0x0a, 0x0, 0x3b },
+ { 0x03, 0x00, 0x0a, 0x0, 0x39 },
+ { 0x03, 0x00, 0x09, 0x0, 0x3e },
+ { 0x03, 0x00, 0x09, 0x0, 0x3c },
+ { 0x03, 0x00, 0x09, 0x0, 0x3a },
+ { 0x03, 0x00, 0x09, 0x0, 0x39 },
+ { 0x03, 0x00, 0x08, 0x0, 0x3e },
+ { 0x03, 0x00, 0x08, 0x0, 0x3c },
+ { 0x03, 0x00, 0x08, 0x0, 0x3a },
+ { 0x03, 0x00, 0x08, 0x0, 0x39 },
+ { 0x03, 0x00, 0x08, 0x0, 0x37 },
+ { 0x03, 0x00, 0x07, 0x0, 0x3d },
+ { 0x03, 0x00, 0x07, 0x0, 0x3c },
+ { 0x03, 0x00, 0x07, 0x0, 0x3a },
+ { 0x03, 0x00, 0x07, 0x0, 0x38 },
+ { 0x03, 0x00, 0x07, 0x0, 0x37 },
+ { 0x03, 0x00, 0x06, 0x0, 0x3e },
+ { 0x03, 0x00, 0x06, 0x0, 0x3c },
+ { 0x03, 0x00, 0x06, 0x0, 0x3a },
+ { 0x03, 0x00, 0x06, 0x0, 0x39 },
+ { 0x03, 0x00, 0x06, 0x0, 0x37 },
+ { 0x03, 0x00, 0x06, 0x0, 0x36 },
+ { 0x03, 0x00, 0x06, 0x0, 0x34 },
+ { 0x03, 0x00, 0x05, 0x0, 0x3d },
+ { 0x03, 0x00, 0x05, 0x0, 0x3b },
+ { 0x03, 0x00, 0x05, 0x0, 0x39 },
+ { 0x03, 0x00, 0x05, 0x0, 0x38 },
+ { 0x03, 0x00, 0x05, 0x0, 0x36 },
+ { 0x03, 0x00, 0x05, 0x0, 0x35 },
+ { 0x03, 0x00, 0x05, 0x0, 0x33 },
+ { 0x03, 0x00, 0x04, 0x0, 0x3e },
+ { 0x03, 0x00, 0x04, 0x0, 0x3c },
+ { 0x03, 0x00, 0x04, 0x0, 0x3a },
+ { 0x03, 0x00, 0x04, 0x0, 0x39 },
+ { 0x03, 0x00, 0x04, 0x0, 0x37 },
+ { 0x03, 0x00, 0x04, 0x0, 0x36 },
+ { 0x03, 0x00, 0x04, 0x0, 0x34 },
+ { 0x03, 0x00, 0x04, 0x0, 0x33 },
+ { 0x03, 0x00, 0x04, 0x0, 0x31 },
+ { 0x03, 0x00, 0x04, 0x0, 0x30 },
+ { 0x03, 0x00, 0x04, 0x0, 0x2e },
+ { 0x03, 0x00, 0x03, 0x0, 0x3c },
+ { 0x03, 0x00, 0x03, 0x0, 0x3a },
+ { 0x03, 0x00, 0x03, 0x0, 0x39 },
+ { 0x03, 0x00, 0x03, 0x0, 0x37 },
+ { 0x03, 0x00, 0x03, 0x0, 0x36 },
+ { 0x03, 0x00, 0x03, 0x0, 0x34 },
+ { 0x03, 0x00, 0x03, 0x0, 0x33 },
+ { 0x03, 0x00, 0x03, 0x0, 0x31 },
+ { 0x03, 0x00, 0x03, 0x0, 0x30 },
+ { 0x03, 0x00, 0x03, 0x0, 0x2e },
+ { 0x03, 0x00, 0x03, 0x0, 0x2d },
+ { 0x03, 0x00, 0x03, 0x0, 0x2c },
+ { 0x03, 0x00, 0x03, 0x0, 0x2b },
+ { 0x03, 0x00, 0x03, 0x0, 0x29 },
+ { 0x03, 0x00, 0x02, 0x0, 0x3d },
+ { 0x03, 0x00, 0x02, 0x0, 0x3b },
+ { 0x03, 0x00, 0x02, 0x0, 0x39 },
+ { 0x03, 0x00, 0x02, 0x0, 0x38 },
+ { 0x03, 0x00, 0x02, 0x0, 0x36 },
+ { 0x03, 0x00, 0x02, 0x0, 0x35 },
+ { 0x03, 0x00, 0x02, 0x0, 0x33 },
+ { 0x03, 0x00, 0x02, 0x0, 0x32 },
+ { 0x03, 0x00, 0x02, 0x0, 0x30 },
+ { 0x03, 0x00, 0x02, 0x0, 0x2f },
+ { 0x03, 0x00, 0x02, 0x0, 0x2e },
+ { 0x03, 0x00, 0x02, 0x0, 0x2c },
+ { 0x03, 0x00, 0x02, 0x0, 0x2b },
+ { 0x03, 0x00, 0x02, 0x0, 0x2a },
+ { 0x03, 0x00, 0x02, 0x0, 0x29 },
+ { 0x03, 0x00, 0x02, 0x0, 0x27 },
+ { 0x03, 0x00, 0x02, 0x0, 0x26 },
+ { 0x03, 0x00, 0x02, 0x0, 0x25 },
+ { 0x03, 0x00, 0x02, 0x0, 0x24 },
+ { 0x03, 0x00, 0x02, 0x0, 0x23 },
+ { 0x03, 0x00, 0x02, 0x0, 0x22 },
+ { 0x03, 0x00, 0x02, 0x0, 0x21 },
+ { 0x03, 0x00, 0x02, 0x0, 0x20 },
+ { 0x03, 0x00, 0x01, 0x0, 0x3f },
+ { 0x03, 0x00, 0x01, 0x0, 0x3d },
+ { 0x03, 0x00, 0x01, 0x0, 0x3b },
+ { 0x03, 0x00, 0x01, 0x0, 0x39 },
+};
+
+/**************************************************
+ * SW control.
+ **************************************************/
+
+const u16 b43_lcntab_sw_ctl_4313_epa_rev0[] = {
+ 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
+ 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001,
+ 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
+ 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001,
+ 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
+ 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001,
+ 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
+ 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001,
+ 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
+ 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001,
+ 0x0002, 0x0008, 0x0004, 0x0001,
+};
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+u32 b43_lcntab_read(struct b43_wldev *dev, u32 offset)
+{
+ u32 type, value;
+
+ type = offset & B43_LCNTAB_TYPEMASK;
+ offset &= ~B43_LCNTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ switch (type) {
+ case B43_LCNTAB_8BIT:
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO) & 0xFF;
+ break;
+ case B43_LCNTAB_16BIT:
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO);
+ break;
+ case B43_LCNTAB_32BIT:
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO);
+ value |= (b43_phy_read(dev, B43_PHY_LCN_TABLE_DATAHI) << 16);
+ break;
+ default:
+ B43_WARN_ON(1);
+ value = 0;
+ }
+
+ return value;
+}
+
+void b43_lcntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data)
+{
+ u32 type;
+ u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_LCNTAB_TYPEMASK;
+ offset &= ~B43_LCNTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_LCNTAB_8BIT:
+ *data = b43_phy_read(dev,
+ B43_PHY_LCN_TABLE_DATALO) & 0xFF;
+ data++;
+ break;
+ case B43_LCNTAB_16BIT:
+ *((u16 *)data) = b43_phy_read(dev,
+ B43_PHY_LCN_TABLE_DATALO);
+ data += 2;
+ break;
+ case B43_LCNTAB_32BIT:
+ *((u32 *)data) = b43_phy_read(dev,
+ B43_PHY_LCN_TABLE_DATALO);
+ *((u32 *)data) |= (b43_phy_read(dev,
+ B43_PHY_LCN_TABLE_DATAHI) << 16);
+ data += 4;
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+void b43_lcntab_write(struct b43_wldev *dev, u32 offset, u32 value)
+{
+ u32 type;
+
+ type = offset & B43_LCNTAB_TYPEMASK;
+ offset &= 0xFFFF;
+
+ switch (type) {
+ case B43_LCNTAB_8BIT:
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value);
+ break;
+ case B43_LCNTAB_16BIT:
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value);
+ break;
+ case B43_LCNTAB_32BIT:
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+
+ return;
+}
+
+void b43_lcntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data)
+{
+ u32 type, value;
+ const u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_LCNTAB_TYPEMASK;
+ offset &= ~B43_LCNTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_LCNTAB_8BIT:
+ value = *data;
+ data++;
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value);
+ break;
+ case B43_LCNTAB_16BIT:
+ value = *((u16 *)data);
+ data += 2;
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value);
+ break;
+ case B43_LCNTAB_32BIT:
+ value = *((u32 *)data);
+ data += 4;
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI,
+ value >> 16);
+ b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO,
+ value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
/**************************************************
* Tables ops.
**************************************************/
+#define lcntab_upload(dev, offset, data) do { \
+ b43_lcntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
+ } while (0)
+static void b43_phy_lcn_upload_static_tables(struct b43_wldev *dev)
+{
+ lcntab_upload(dev, B43_LCNTAB16(0x02, 0), b43_lcntab_0x02);
+ lcntab_upload(dev, B43_LCNTAB16(0x01, 0), b43_lcntab_0x01);
+ lcntab_upload(dev, B43_LCNTAB32(0x0b, 0), b43_lcntab_0x0b);
+ lcntab_upload(dev, B43_LCNTAB32(0x0c, 0), b43_lcntab_0x0c);
+ lcntab_upload(dev, B43_LCNTAB32(0x0d, 0), b43_lcntab_0x0d);
+ lcntab_upload(dev, B43_LCNTAB16(0x0e, 0), b43_lcntab_0x0e);
+ lcntab_upload(dev, B43_LCNTAB16(0x0f, 0), b43_lcntab_0x0f);
+ lcntab_upload(dev, B43_LCNTAB16(0x10, 0), b43_lcntab_0x10);
+ lcntab_upload(dev, B43_LCNTAB16(0x11, 0), b43_lcntab_0x11);
+ lcntab_upload(dev, B43_LCNTAB32(0x12, 0), b43_lcntab_0x12);
+ lcntab_upload(dev, B43_LCNTAB16(0x14, 0), b43_lcntab_0x14);
+ lcntab_upload(dev, B43_LCNTAB16(0x17, 0), b43_lcntab_0x17);
+ lcntab_upload(dev, B43_LCNTAB16(0x00, 0), b43_lcntab_0x00);
+ lcntab_upload(dev, B43_LCNTAB32(0x18, 0), b43_lcntab_0x18);
+}
+
+void b43_phy_lcn_load_tx_gain_tab(struct b43_wldev *dev,
+ const struct b43_lcntab_tx_gain_tbl_entry *gain_table)
+{
+ u32 i;
+ u32 val;
+
+ u16 pa_gain = 0x70;
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_FEM)
+ pa_gain = 0x10;
+
+ for (i = 0; i < B43_LCNTAB_TX_GAIN_SIZE; i++) {
+ val = ((pa_gain << 24) |
+ (gain_table[i].pad << 16) |
+ (gain_table[i].pga << 8) |
+ gain_table[i].gm);
+ b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0xc0 + i), val);
+
+ /* brcmsmac doesn't maskset, we follow newer wl here */
+ val = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0x140 + i));
+ val &= 0x000fffff;
+ val |= ((gain_table[i].dac << 28) |
+ (gain_table[i].bb_mult << 20));
+ b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0x140 + i), val);
+ }
+}
+
+/* wlc_lcnphy_load_rfpower */
+static void b43_phy_lcn_load_rfpower(struct b43_wldev *dev)
+{
+ u32 bbmult, rfgain;
+ u8 i;
+
+ for (i = 0; i < 128; i++) {
+ bbmult = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0x140 + i));
+ bbmult >>= 20;
+ rfgain = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0xc0 + i));
+
+ /* TODO: calculate value for 0x240 + i table offset
+ * b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0x240 + i), val);
+ */
+ }
+}
+
+/* Not implemented in brcmsmac, noticed in wl in MMIO dump */
+static void b43_phy_lcn_rewrite_rfpower_table(struct b43_wldev *dev)
+{
+ int i;
+ u32 tmp;
+ for (i = 0; i < 128; i++) {
+ tmp = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0x240 + i));
+ b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0x240 + i), tmp);
+ }
+}
+
+/* wlc_lcnphy_clear_papd_comptable */
+static void b43_phy_lcn_clean_papd_comp_table(struct b43_wldev *dev)
+{
+ u8 i;
+
+ for (i = 0; i < 0x80; i++)
+ b43_lcntab_write(dev, B43_LCNTAB32(0x18, i), 0x80000);
+}
+
+/* wlc_lcnphy_tbl_init */
void b43_phy_lcn_tables_init(struct b43_wldev *dev)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ b43_phy_lcn_upload_static_tables(dev);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (sprom->boardflags_lo & B43_BFL_FEM)
+ b43_phy_lcn_load_tx_gain_tab(dev,
+ b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
+ else
+ b43err(dev->wl,
+ "TX gain table unknown for this card\n");
+ }
+
+ if (sprom->boardflags_lo & B43_BFL_FEM &&
+ !(sprom->boardflags_hi & B43_BFH_FEM_BT))
+ b43_lcntab_write_bulk(dev, B43_LCNTAB16(0xf, 0),
+ ARRAY_SIZE(b43_lcntab_sw_ctl_4313_epa_rev0),
+ b43_lcntab_sw_ctl_4313_epa_rev0);
+ else
+ b43err(dev->wl, "SW ctl table is unknown for this card\n");
+
+ b43_phy_lcn_load_rfpower(dev);
+ b43_phy_lcn_rewrite_rfpower_table(dev);
+ b43_phy_lcn_clean_papd_comp_table(dev);
}
diff --git a/drivers/net/wireless/b43/tables_phy_lcn.h b/drivers/net/wireless/b43/tables_phy_lcn.h
index 5e31b15b81e..caff9db6831 100644
--- a/drivers/net/wireless/b43/tables_phy_lcn.h
+++ b/drivers/net/wireless/b43/tables_phy_lcn.h
@@ -1,6 +1,24 @@
#ifndef B43_TABLES_PHY_LCN_H_
#define B43_TABLES_PHY_LCN_H_
+/* The LCN-PHY tables. */
+#define B43_LCNTAB_TYPEMASK 0xF0000000
+#define B43_LCNTAB_8BIT 0x10000000
+#define B43_LCNTAB_16BIT 0x20000000
+#define B43_LCNTAB_32BIT 0x30000000
+#define B43_LCNTAB8(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_8BIT)
+#define B43_LCNTAB16(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_16BIT)
+#define B43_LCNTAB32(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_32BIT)
+
+#define B43_LCNTAB_TX_GAIN_SIZE 128
+
+u32 b43_lcntab_read(struct b43_wldev *dev, u32 offset);
+void b43_lcntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data);
+void b43_lcntab_write(struct b43_wldev *dev, u32 offset, u32 value);
+void b43_lcntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data);
+
void b43_phy_lcn_tables_init(struct b43_wldev *dev);
#endif /* B43_TABLES_PHY_LCN_H_ */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index b74f25ec1ab..c73e8600d21 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -337,12 +337,19 @@ int b43_generate_txhdr(struct b43_wldev *dev,
memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
}
}
- if (b43_is_old_txhdr_format(dev)) {
- b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->old_format.plcp),
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->format_598.plcp),
plcp_fragment_len, rate);
- } else {
- b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->new_format.plcp),
+ break;
+ case B43_FW_HDR_351:
+ b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->format_351.plcp),
+ plcp_fragment_len, rate);
+ break;
+ case B43_FW_HDR_410:
+ b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->format_410.plcp),
plcp_fragment_len, rate);
+ break;
}
b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->plcp_fb),
plcp_fragment_len, rate_fb);
@@ -415,10 +422,10 @@ int b43_generate_txhdr(struct b43_wldev *dev,
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
unsigned int len;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr *uninitialized_var(hdr);
int rts_rate, rts_rate_fb;
int rts_rate_ofdm, rts_rate_fb_ofdm;
- struct b43_plcp_hdr6 *plcp;
+ struct b43_plcp_hdr6 *uninitialized_var(plcp);
struct ieee80211_rate *rts_cts_rate;
rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info);
@@ -429,14 +436,21 @@ int b43_generate_txhdr(struct b43_wldev *dev,
rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- struct ieee80211_cts *cts;
+ struct ieee80211_cts *uninitialized_var(cts);
- if (b43_is_old_txhdr_format(dev)) {
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
cts = (struct ieee80211_cts *)
- (txhdr->old_format.rts_frame);
- } else {
+ (txhdr->format_598.rts_frame);
+ break;
+ case B43_FW_HDR_351:
cts = (struct ieee80211_cts *)
- (txhdr->new_format.rts_frame);
+ (txhdr->format_351.rts_frame);
+ break;
+ case B43_FW_HDR_410:
+ cts = (struct ieee80211_cts *)
+ (txhdr->format_410.rts_frame);
+ break;
}
ieee80211_ctstoself_get(dev->wl->hw, info->control.vif,
fragment_data, fragment_len,
@@ -444,14 +458,21 @@ int b43_generate_txhdr(struct b43_wldev *dev,
mac_ctl |= B43_TXH_MAC_SENDCTS;
len = sizeof(struct ieee80211_cts);
} else {
- struct ieee80211_rts *rts;
+ struct ieee80211_rts *uninitialized_var(rts);
- if (b43_is_old_txhdr_format(dev)) {
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
rts = (struct ieee80211_rts *)
- (txhdr->old_format.rts_frame);
- } else {
+ (txhdr->format_598.rts_frame);
+ break;
+ case B43_FW_HDR_351:
rts = (struct ieee80211_rts *)
- (txhdr->new_format.rts_frame);
+ (txhdr->format_351.rts_frame);
+ break;
+ case B43_FW_HDR_410:
+ rts = (struct ieee80211_rts *)
+ (txhdr->format_410.rts_frame);
+ break;
}
ieee80211_rts_get(dev->wl->hw, info->control.vif,
fragment_data, fragment_len,
@@ -462,22 +483,36 @@ int b43_generate_txhdr(struct b43_wldev *dev,
len += FCS_LEN;
/* Generate the PLCP headers for the RTS/CTS frame */
- if (b43_is_old_txhdr_format(dev))
- plcp = &txhdr->old_format.rts_plcp;
- else
- plcp = &txhdr->new_format.rts_plcp;
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ plcp = &txhdr->format_598.rts_plcp;
+ break;
+ case B43_FW_HDR_351:
+ plcp = &txhdr->format_351.rts_plcp;
+ break;
+ case B43_FW_HDR_410:
+ plcp = &txhdr->format_410.rts_plcp;
+ break;
+ }
b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)plcp,
len, rts_rate);
plcp = &txhdr->rts_plcp_fb;
b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)plcp,
len, rts_rate_fb);
- if (b43_is_old_txhdr_format(dev)) {
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
hdr = (struct ieee80211_hdr *)
- (&txhdr->old_format.rts_frame);
- } else {
+ (&txhdr->format_598.rts_frame);
+ break;
+ case B43_FW_HDR_351:
hdr = (struct ieee80211_hdr *)
- (&txhdr->new_format.rts_frame);
+ (&txhdr->format_351.rts_frame);
+ break;
+ case B43_FW_HDR_410:
+ hdr = (struct ieee80211_hdr *)
+ (&txhdr->format_410.rts_frame);
+ break;
}
txhdr->rts_dur_fb = hdr->duration_id;
@@ -505,10 +540,17 @@ int b43_generate_txhdr(struct b43_wldev *dev,
}
/* Magic cookie */
- if (b43_is_old_txhdr_format(dev))
- txhdr->old_format.cookie = cpu_to_le16(cookie);
- else
- txhdr->new_format.cookie = cpu_to_le16(cookie);
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ txhdr->format_598.cookie = cpu_to_le16(cookie);
+ break;
+ case B43_FW_HDR_351:
+ txhdr->format_351.cookie = cpu_to_le16(cookie);
+ break;
+ case B43_FW_HDR_410:
+ txhdr->format_410.cookie = cpu_to_le16(cookie);
+ break;
+ }
if (phy->type == B43_PHYTYPE_N) {
txhdr->phy_ctl1 =
@@ -611,8 +653,9 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
struct ieee80211_hdr *wlhdr;
const struct b43_rxhdr_fw4 *rxhdr = _rxhdr;
__le16 fctl;
- u16 phystat0, phystat3, chanstat, mactime;
- u32 macstat;
+ u16 phystat0, phystat3;
+ u16 uninitialized_var(chanstat), uninitialized_var(mactime);
+ u32 uninitialized_var(macstat);
u16 chanid;
u16 phytype;
int padding;
@@ -622,9 +665,19 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
/* Get metadata about the frame from the header. */
phystat0 = le16_to_cpu(rxhdr->phy_status0);
phystat3 = le16_to_cpu(rxhdr->phy_status3);
- macstat = le32_to_cpu(rxhdr->mac_status);
- mactime = le16_to_cpu(rxhdr->mac_time);
- chanstat = le16_to_cpu(rxhdr->channel);
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ macstat = le32_to_cpu(rxhdr->format_598.mac_status);
+ mactime = le16_to_cpu(rxhdr->format_598.mac_time);
+ chanstat = le16_to_cpu(rxhdr->format_598.channel);
+ break;
+ case B43_FW_HDR_410:
+ case B43_FW_HDR_351:
+ macstat = le32_to_cpu(rxhdr->format_351.mac_status);
+ mactime = le16_to_cpu(rxhdr->format_351.mac_time);
+ chanstat = le16_to_cpu(rxhdr->format_351.channel);
+ break;
+ }
phytype = chanstat & B43_RX_CHAN_PHYTYPE;
if (unlikely(macstat & B43_RX_MAC_FCSERR)) {
@@ -682,16 +735,22 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
/* Link quality statistics */
- if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
-// s8 rssi = max(rxhdr->power0, rxhdr->power1);
- //TODO: Find out what the rssi value is (dBm or percentage?)
- // and also find out what the maximum possible value is.
- // Fill status.ssi and status.signal fields.
- } else {
+ switch (chanstat & B43_RX_CHAN_PHYTYPE) {
+ case B43_PHYTYPE_N:
+ if (rxhdr->power0 == 16 || rxhdr->power0 == 32)
+ status.signal = max(rxhdr->power1, rxhdr->power2);
+ else
+ status.signal = max(rxhdr->power0, rxhdr->power1);
+ break;
+ case B43_PHYTYPE_A:
+ case B43_PHYTYPE_B:
+ case B43_PHYTYPE_G:
+ case B43_PHYTYPE_LP:
status.signal = b43_rssi_postprocess(dev, rxhdr->jssi,
(phystat0 & B43_RX_PHYST0_OFDM),
(phystat0 & B43_RX_PHYST0_GAINCTL),
(phystat3 & B43_RX_PHYST3_TRSTATE));
+ break;
}
if (phystat0 & B43_RX_PHYST0_OFDM)
@@ -744,6 +803,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
break;
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
+ case B43_PHYTYPE_HT:
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ) {
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 42debb5cd6f..16c514d54af 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -46,7 +46,24 @@ struct b43_txhdr {
__le32 timeout; /* Timeout */
union {
- /* The new r410 format. */
+ /* Tested with 598.314, 644.1001 and 666.2 */
+ struct {
+ __le16 mimo_antenna; /* MIMO antenna select */
+ __le16 preload_size; /* Preload size */
+ PAD_BYTES(2);
+ __le16 cookie; /* TX frame cookie */
+ __le16 tx_status; /* TX status */
+ __le16 max_n_mpdus;
+ __le16 max_a_bytes_mrt;
+ __le16 max_a_bytes_fbr;
+ __le16 min_m_bytes;
+ struct b43_plcp_hdr6 rts_plcp; /* RTS PLCP header */
+ __u8 rts_frame[16]; /* The RTS frame (if used) */
+ PAD_BYTES(2);
+ struct b43_plcp_hdr6 plcp; /* Main PLCP header */
+ } format_598 __packed;
+
+ /* Tested with 410.2160, 478.104 and 508.* */
struct {
__le16 mimo_antenna; /* MIMO antenna select */
__le16 preload_size; /* Preload size */
@@ -57,9 +74,9 @@ struct b43_txhdr {
__u8 rts_frame[16]; /* The RTS frame (if used) */
PAD_BYTES(2);
struct b43_plcp_hdr6 plcp; /* Main PLCP header */
- } new_format __packed;
+ } format_410 __packed;
- /* The old r351 format. */
+ /* Tested with 351.126 */
struct {
PAD_BYTES(2);
__le16 cookie; /* TX frame cookie */
@@ -68,7 +85,7 @@ struct b43_txhdr {
__u8 rts_frame[16]; /* The RTS frame (if used) */
PAD_BYTES(2);
struct b43_plcp_hdr6 plcp; /* Main PLCP header */
- } old_format __packed;
+ } format_351 __packed;
} __packed;
} __packed;
@@ -166,19 +183,18 @@ struct b43_tx_legacy_rate_phy_ctl_entry {
#define B43_TXH_PHY1_MODUL_QAM256 0x2000 /* QAM256 */
-/* r351 firmware compatibility stuff. */
-static inline
-bool b43_is_old_txhdr_format(struct b43_wldev *dev)
-{
- return (dev->fw.rev <= 351);
-}
-
static inline
size_t b43_txhdr_size(struct b43_wldev *dev)
{
- if (b43_is_old_txhdr_format(dev))
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_598:
+ return 112 + sizeof(struct b43_plcp_hdr6);
+ case B43_FW_HDR_410:
+ return 104 + sizeof(struct b43_plcp_hdr6);
+ case B43_FW_HDR_351:
return 100 + sizeof(struct b43_plcp_hdr6);
- return 104 + sizeof(struct b43_plcp_hdr6);
+ }
+ return 0;
}
@@ -232,11 +248,33 @@ struct b43_rxhdr_fw4 {
__s8 power1; /* PHY RX Status 1: Power 1 */
} __packed;
} __packed;
- __le16 phy_status2; /* PHY RX Status 2 */
+ union {
+ /* RSSI for N-PHYs */
+ struct {
+ __s8 power2;
+ PAD_BYTES(1);
+ } __packed;
+
+ __le16 phy_status2; /* PHY RX Status 2 */
+ } __packed;
__le16 phy_status3; /* PHY RX Status 3 */
- __le32 mac_status; /* MAC RX status */
- __le16 mac_time;
- __le16 channel;
+ union {
+ /* Tested with 598.314, 644.1001 and 666.2 */
+ struct {
+ __le16 phy_status4; /* PHY RX Status 4 */
+ __le16 phy_status5; /* PHY RX Status 5 */
+ __le32 mac_status; /* MAC RX status */
+ __le16 mac_time;
+ __le16 channel;
+ } format_598 __packed;
+
+ /* Tested with 351.126, 410.2160, 478.104 and 508.* */
+ struct {
+ __le32 mac_status; /* MAC RX status */
+ __le16 mac_time;
+ __le16 channel;
+ } format_351 __packed;
+ } __packed;
} __packed;
/* PHY RX Status 0 */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index a610a352102..12b51825158 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -14,7 +14,6 @@
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include "debugfs.h"
@@ -23,10 +22,6 @@
#include "phy.h"
-/* The unique identifier of the firmware that's officially supported by this
- * driver version. */
-#define B43legacy_SUPPORTED_FIRMWARE_ID "FW10"
-
#define B43legacy_IRQWAIT_MAX_RETRIES 20
/* MMIO offsets */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 5010c477abd..c5535adf699 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -42,10 +42,9 @@
/* 32bit DMA ops. */
static
-struct b43legacy_dmadesc_generic *op32_idx2desc(
- struct b43legacy_dmaring *ring,
- int slot,
- struct b43legacy_dmadesc_meta **meta)
+struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
+ int slot,
+ struct b43legacy_dmadesc_meta **meta)
{
struct b43legacy_dmadesc32 *desc;
@@ -53,11 +52,11 @@ struct b43legacy_dmadesc_generic *op32_idx2desc(
desc = ring->descbase;
desc = &(desc[slot]);
- return (struct b43legacy_dmadesc_generic *)desc;
+ return (struct b43legacy_dmadesc32 *)desc;
}
static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
- struct b43legacy_dmadesc_generic *desc,
+ struct b43legacy_dmadesc32 *desc,
dma_addr_t dmaaddr, u16 bufsize,
int start, int end, int irq)
{
@@ -67,7 +66,7 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
u32 addr;
u32 addrext;
- slot = (int)(&(desc->dma32) - descbase);
+ slot = (int)(desc - descbase);
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
@@ -87,8 +86,8 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
& B43legacy_DMA32_DCTL_ADDREXT_MASK;
- desc->dma32.control = cpu_to_le32(ctl);
- desc->dma32.address = cpu_to_le32(addr);
+ desc->control = cpu_to_le32(ctl);
+ desc->address = cpu_to_le32(addr);
}
static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
@@ -128,121 +127,6 @@ static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
(u32)(slot * sizeof(struct b43legacy_dmadesc32)));
}
-static const struct b43legacy_dma_ops dma32_ops = {
- .idx2desc = op32_idx2desc,
- .fill_descriptor = op32_fill_descriptor,
- .poke_tx = op32_poke_tx,
- .tx_suspend = op32_tx_suspend,
- .tx_resume = op32_tx_resume,
- .get_current_rxslot = op32_get_current_rxslot,
- .set_current_rxslot = op32_set_current_rxslot,
-};
-
-/* 64bit DMA ops. */
-static
-struct b43legacy_dmadesc_generic *op64_idx2desc(
- struct b43legacy_dmaring *ring,
- int slot,
- struct b43legacy_dmadesc_meta
- **meta)
-{
- struct b43legacy_dmadesc64 *desc;
-
- *meta = &(ring->meta[slot]);
- desc = ring->descbase;
- desc = &(desc[slot]);
-
- return (struct b43legacy_dmadesc_generic *)desc;
-}
-
-static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
- struct b43legacy_dmadesc_generic *desc,
- dma_addr_t dmaaddr, u16 bufsize,
- int start, int end, int irq)
-{
- struct b43legacy_dmadesc64 *descbase = ring->descbase;
- int slot;
- u32 ctl0 = 0;
- u32 ctl1 = 0;
- u32 addrlo;
- u32 addrhi;
- u32 addrext;
-
- slot = (int)(&(desc->dma64) - descbase);
- B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
-
- addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
- addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
- addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- addrhi |= ring->dev->dma.translation;
- if (slot == ring->nr_slots - 1)
- ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
- if (start)
- ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART;
- if (end)
- ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND;
- if (irq)
- ctl0 |= B43legacy_DMA64_DCTL0_IRQ;
- ctl1 |= (bufsize - ring->frameoffset)
- & B43legacy_DMA64_DCTL1_BYTECNT;
- ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT)
- & B43legacy_DMA64_DCTL1_ADDREXT_MASK;
-
- desc->dma64.control0 = cpu_to_le32(ctl0);
- desc->dma64.control1 = cpu_to_le32(ctl1);
- desc->dma64.address_low = cpu_to_le32(addrlo);
- desc->dma64.address_high = cpu_to_le32(addrhi);
-}
-
-static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot)
-{
- b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX,
- (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
-}
-
-static void op64_tx_suspend(struct b43legacy_dmaring *ring)
-{
- b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
- b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
- | B43legacy_DMA64_TXSUSPEND);
-}
-
-static void op64_tx_resume(struct b43legacy_dmaring *ring)
-{
- b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
- b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
- & ~B43legacy_DMA64_TXSUSPEND);
-}
-
-static int op64_get_current_rxslot(struct b43legacy_dmaring *ring)
-{
- u32 val;
-
- val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS);
- val &= B43legacy_DMA64_RXSTATDPTR;
-
- return (val / sizeof(struct b43legacy_dmadesc64));
-}
-
-static void op64_set_current_rxslot(struct b43legacy_dmaring *ring,
- int slot)
-{
- b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
- (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
-}
-
-static const struct b43legacy_dma_ops dma64_ops = {
- .idx2desc = op64_idx2desc,
- .fill_descriptor = op64_fill_descriptor,
- .poke_tx = op64_poke_tx,
- .tx_suspend = op64_tx_suspend,
- .tx_resume = op64_tx_resume,
- .get_current_rxslot = op64_get_current_rxslot,
- .set_current_rxslot = op64_set_current_rxslot,
-};
-
-
static inline int free_slots(struct b43legacy_dmaring *ring)
{
return (ring->nr_slots - ring->used_slots);
@@ -358,14 +242,6 @@ return 0;
static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
int controller_idx)
{
- static const u16 map64[] = {
- B43legacy_MMIO_DMA64_BASE0,
- B43legacy_MMIO_DMA64_BASE1,
- B43legacy_MMIO_DMA64_BASE2,
- B43legacy_MMIO_DMA64_BASE3,
- B43legacy_MMIO_DMA64_BASE4,
- B43legacy_MMIO_DMA64_BASE5,
- };
static const u16 map32[] = {
B43legacy_MMIO_DMA32_BASE0,
B43legacy_MMIO_DMA32_BASE1,
@@ -375,11 +251,6 @@ static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
B43legacy_MMIO_DMA32_BASE5,
};
- if (type == B43legacy_DMA_64BIT) {
- B43legacy_WARN_ON(!(controller_idx >= 0 &&
- controller_idx < ARRAY_SIZE(map64)));
- return map64[controller_idx];
- }
B43legacy_WARN_ON(!(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map32)));
return map32[controller_idx];
@@ -491,25 +362,15 @@ static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
might_sleep();
- offset = (type == B43legacy_DMA_64BIT) ?
- B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
+ offset = B43legacy_DMA32_RXCTL;
b43legacy_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
- offset = (type == B43legacy_DMA_64BIT) ?
- B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
+ offset = B43legacy_DMA32_RXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
- if (type == B43legacy_DMA_64BIT) {
- value &= B43legacy_DMA64_RXSTAT;
- if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
- i = -1;
- break;
- }
- } else {
- value &= B43legacy_DMA32_RXSTATE;
- if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
- i = -1;
- break;
- }
+ value &= B43legacy_DMA32_RXSTATE;
+ if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
+ i = -1;
+ break;
}
msleep(1);
}
@@ -533,43 +394,24 @@ static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
might_sleep();
for (i = 0; i < 10; i++) {
- offset = (type == B43legacy_DMA_64BIT) ?
- B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
+ offset = B43legacy_DMA32_TXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
- if (type == B43legacy_DMA_64BIT) {
- value &= B43legacy_DMA64_TXSTAT;
- if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
- value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
- value == B43legacy_DMA64_TXSTAT_STOPPED)
- break;
- } else {
- value &= B43legacy_DMA32_TXSTATE;
- if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
- value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
- value == B43legacy_DMA32_TXSTAT_STOPPED)
- break;
- }
+ value &= B43legacy_DMA32_TXSTATE;
+ if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
+ value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
+ value == B43legacy_DMA32_TXSTAT_STOPPED)
+ break;
msleep(1);
}
- offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
- B43legacy_DMA32_TXCTL;
+ offset = B43legacy_DMA32_TXCTL;
b43legacy_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
- offset = (type == B43legacy_DMA_64BIT) ?
- B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
+ offset = B43legacy_DMA32_TXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
- if (type == B43legacy_DMA_64BIT) {
- value &= B43legacy_DMA64_TXSTAT;
- if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
- i = -1;
- break;
- }
- } else {
- value &= B43legacy_DMA32_TXSTATE;
- if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
- i = -1;
- break;
- }
+ value &= B43legacy_DMA32_TXSTATE;
+ if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
+ i = -1;
+ break;
}
msleep(1);
}
@@ -601,9 +443,6 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
if ((u64)addr + buffersize > (1ULL << 32))
goto address_error;
break;
- case B43legacy_DMA_64BIT:
- /* Currently we can't have addresses beyond 64 bits in the kernel. */
- break;
}
/* The address is OK. */
@@ -617,7 +456,7 @@ address_error:
}
static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
- struct b43legacy_dmadesc_generic *desc,
+ struct b43legacy_dmadesc32 *desc,
struct b43legacy_dmadesc_meta *meta,
gfp_t gfp_flags)
{
@@ -653,8 +492,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
meta->skb = skb;
meta->dmaaddr = dmaaddr;
- ring->ops->fill_descriptor(ring, desc, dmaaddr,
- ring->rx_buffersize, 0, 0, 0);
+ op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
rxhdr->frame_len = 0;
@@ -671,11 +509,11 @@ static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
{
int i;
int err = -ENOMEM;
- struct b43legacy_dmadesc_generic *desc;
+ struct b43legacy_dmadesc32 *desc;
struct b43legacy_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) {
- desc = ring->ops->idx2desc(ring, i, &meta);
+ desc = op32_idx2desc(ring, i, &meta);
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
if (err) {
@@ -692,7 +530,7 @@ out:
err_unwind:
for (i--; i >= 0; i--) {
- desc = ring->ops->idx2desc(ring, i, &meta);
+ desc = op32_idx2desc(ring, i, &meta);
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb(meta->skb);
@@ -710,83 +548,35 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
u32 value;
u32 addrext;
u32 trans = ring->dev->dma.translation;
+ u32 ringbase = (u32)(ring->dmabase);
if (ring->tx) {
- if (ring->type == B43legacy_DMA_64BIT) {
- u64 ringbase = (u64)(ring->dmabase);
-
- addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- value = B43legacy_DMA64_TXENABLE;
- value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT)
- & B43legacy_DMA64_TXADDREXT_MASK;
- b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
- value);
- b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO,
- (ringbase & 0xFFFFFFFF));
- b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI,
- ((ringbase >> 32)
- & ~SSB_DMA_TRANSLATION_MASK)
- | trans);
- } else {
- u32 ringbase = (u32)(ring->dmabase);
-
- addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- value = B43legacy_DMA32_TXENABLE;
- value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
- & B43legacy_DMA32_TXADDREXT_MASK;
- b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
- value);
- b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
- (ringbase &
- ~SSB_DMA_TRANSLATION_MASK)
- | trans);
- }
+ addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
+ >> SSB_DMA_TRANSLATION_SHIFT;
+ value = B43legacy_DMA32_TXENABLE;
+ value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
+ & B43legacy_DMA32_TXADDREXT_MASK;
+ b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
+ b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
+ (ringbase & ~SSB_DMA_TRANSLATION_MASK)
+ | trans);
} else {
err = alloc_initial_descbuffers(ring);
if (err)
goto out;
- if (ring->type == B43legacy_DMA_64BIT) {
- u64 ringbase = (u64)(ring->dmabase);
-
- addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- value = (ring->frameoffset <<
- B43legacy_DMA64_RXFROFF_SHIFT);
- value |= B43legacy_DMA64_RXENABLE;
- value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT)
- & B43legacy_DMA64_RXADDREXT_MASK;
- b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL,
- value);
- b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO,
- (ringbase & 0xFFFFFFFF));
- b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI,
- ((ringbase >> 32) &
- ~SSB_DMA_TRANSLATION_MASK) |
- trans);
- b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
- 200);
- } else {
- u32 ringbase = (u32)(ring->dmabase);
-
- addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
- >> SSB_DMA_TRANSLATION_SHIFT;
- value = (ring->frameoffset <<
- B43legacy_DMA32_RXFROFF_SHIFT);
- value |= B43legacy_DMA32_RXENABLE;
- value |= (addrext <<
- B43legacy_DMA32_RXADDREXT_SHIFT)
- & B43legacy_DMA32_RXADDREXT_MASK;
- b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL,
- value);
- b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
- (ringbase &
- ~SSB_DMA_TRANSLATION_MASK)
- | trans);
- b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
- 200);
- }
+
+ addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
+ >> SSB_DMA_TRANSLATION_SHIFT;
+ value = (ring->frameoffset <<
+ B43legacy_DMA32_RXFROFF_SHIFT);
+ value |= B43legacy_DMA32_RXENABLE;
+ value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
+ & B43legacy_DMA32_RXADDREXT_MASK;
+ b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
+ b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
+ (ringbase & ~SSB_DMA_TRANSLATION_MASK)
+ | trans);
+ b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
}
out:
@@ -799,19 +589,11 @@ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
if (ring->tx) {
b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
ring->type);
- if (ring->type == B43legacy_DMA_64BIT) {
- b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
- b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
- } else
- b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
+ b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
} else {
b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
ring->type);
- if (ring->type == B43legacy_DMA_64BIT) {
- b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
- b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
- } else
- b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
+ b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
}
}
@@ -823,7 +605,7 @@ static void free_all_descbuffers(struct b43legacy_dmaring *ring)
if (!ring->used_slots)
return;
for (i = 0; i < ring->nr_slots; i++) {
- ring->ops->idx2desc(ring, i, &meta);
+ op32_idx2desc(ring, i, &meta);
if (!meta->skb) {
B43legacy_WARN_ON(!ring->tx);
@@ -844,9 +626,6 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
u32 tmp;
u16 mmio_base;
- tmp = b43legacy_read32(dev, SSB_TMSHIGH);
- if (tmp & SSB_TMSHIGH_DMA64)
- return DMA_BIT_MASK(64);
mmio_base = b43legacy_dmacontroller_base(0, 0);
b43legacy_write32(dev,
mmio_base + B43legacy_DMA32_TXCTL,
@@ -865,8 +644,6 @@ static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
return B43legacy_DMA_30BIT;
if (dmamask == DMA_BIT_MASK(32))
return B43legacy_DMA_32BIT;
- if (dmamask == DMA_BIT_MASK(64))
- return B43legacy_DMA_64BIT;
B43legacy_WARN_ON(1);
return B43legacy_DMA_30BIT;
}
@@ -937,10 +714,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
ring->nr_slots = nr_slots;
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
- if (type == B43legacy_DMA_64BIT)
- ring->ops = &dma64_ops;
- else
- ring->ops = &dma32_ops;
if (for_tx) {
ring->tx = 1;
ring->current_slot = -1;
@@ -1247,12 +1020,11 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
struct sk_buff **in_skb)
{
struct sk_buff *skb = *in_skb;
- const struct b43legacy_dma_ops *ops = ring->ops;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 *header;
int slot, old_top_slot, old_used_slots;
int err;
- struct b43legacy_dmadesc_generic *desc;
+ struct b43legacy_dmadesc32 *desc;
struct b43legacy_dmadesc_meta *meta;
struct b43legacy_dmadesc_meta *meta_hdr;
struct sk_buff *bounce_skb;
@@ -1265,7 +1037,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
/* Get a slot for the header. */
slot = request_slot(ring);
- desc = ops->idx2desc(ring, slot, &meta_hdr);
+ desc = op32_idx2desc(ring, slot, &meta_hdr);
memset(meta_hdr, 0, sizeof(*meta_hdr));
header = &(ring->txhdr_cache[slot * sizeof(
@@ -1287,12 +1059,12 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
ring->used_slots = old_used_slots;
return -EIO;
}
- ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
+ op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
/* Get a slot for the payload. */
slot = request_slot(ring);
- desc = ops->idx2desc(ring, slot, &meta);
+ desc = op32_idx2desc(ring, slot, &meta);
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
@@ -1328,12 +1100,12 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
}
}
- ops->fill_descriptor(ring, desc, meta->dmaaddr,
+ op32_fill_descriptor(ring, desc, meta->dmaaddr,
skb->len, 0, 1, 1);
wmb(); /* previous stuff MUST be done */
/* Now transfer the whole frame. */
- ops->poke_tx(ring, next_slot(ring, slot));
+ op32_poke_tx(ring, next_slot(ring, slot));
return 0;
out_free_bounce:
@@ -1429,7 +1201,6 @@ out_unlock:
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status)
{
- const struct b43legacy_dma_ops *ops;
struct b43legacy_dmaring *ring;
struct b43legacy_dmadesc_meta *meta;
int retry_limit;
@@ -1442,10 +1213,9 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
spin_lock(&ring->lock);
B43legacy_WARN_ON(!ring->tx);
- ops = ring->ops;
while (1) {
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
- ops->idx2desc(ring, slot, &meta);
+ op32_idx2desc(ring, slot, &meta);
if (meta->skb)
unmap_descbuffer(ring, meta->dmaaddr,
@@ -1528,8 +1298,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
static void dma_rx(struct b43legacy_dmaring *ring,
int *slot)
{
- const struct b43legacy_dma_ops *ops = ring->ops;
- struct b43legacy_dmadesc_generic *desc;
+ struct b43legacy_dmadesc32 *desc;
struct b43legacy_dmadesc_meta *meta;
struct b43legacy_rxhdr_fw3 *rxhdr;
struct sk_buff *skb;
@@ -1537,7 +1306,7 @@ static void dma_rx(struct b43legacy_dmaring *ring,
int err;
dma_addr_t dmaaddr;
- desc = ops->idx2desc(ring, *slot, &meta);
+ desc = op32_idx2desc(ring, *slot, &meta);
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb;
@@ -1589,7 +1358,7 @@ static void dma_rx(struct b43legacy_dmaring *ring,
s32 tmp = len;
while (1) {
- desc = ops->idx2desc(ring, *slot, &meta);
+ desc = op32_idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
@@ -1626,13 +1395,12 @@ drop:
void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
{
- const struct b43legacy_dma_ops *ops = ring->ops;
int slot;
int current_slot;
int used_slots = 0;
B43legacy_WARN_ON(ring->tx);
- current_slot = ops->get_current_rxslot(ring);
+ current_slot = op32_get_current_rxslot(ring);
B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
ring->nr_slots));
@@ -1641,7 +1409,7 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
dma_rx(ring, &slot);
update_max_used_slots(ring, ++used_slots);
}
- ops->set_current_rxslot(ring, slot);
+ op32_set_current_rxslot(ring, slot);
ring->current_slot = slot;
}
@@ -1651,7 +1419,7 @@ static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
- ring->ops->tx_suspend(ring);
+ op32_tx_suspend(ring);
spin_unlock_irqrestore(&ring->lock, flags);
}
@@ -1661,7 +1429,7 @@ static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
- ring->ops->tx_resume(ring);
+ op32_tx_resume(ring);
spin_unlock_irqrestore(&ring->lock, flags);
}
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 686941c242f..504a58767e9 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -82,90 +82,6 @@ struct b43legacy_dmadesc32 {
#define B43legacy_DMA32_DCTL_FRAMESTART 0x80000000
-
-/*** 64-bit DMA Engine. ***/
-
-/* 64-bit DMA controller registers. */
-#define B43legacy_DMA64_TXCTL 0x00
-#define B43legacy_DMA64_TXENABLE 0x00000001
-#define B43legacy_DMA64_TXSUSPEND 0x00000002
-#define B43legacy_DMA64_TXLOOPBACK 0x00000004
-#define B43legacy_DMA64_TXFLUSH 0x00000010
-#define B43legacy_DMA64_TXADDREXT_MASK 0x00030000
-#define B43legacy_DMA64_TXADDREXT_SHIFT 16
-#define B43legacy_DMA64_TXINDEX 0x04
-#define B43legacy_DMA64_TXRINGLO 0x08
-#define B43legacy_DMA64_TXRINGHI 0x0C
-#define B43legacy_DMA64_TXSTATUS 0x10
-#define B43legacy_DMA64_TXSTATDPTR 0x00001FFF
-#define B43legacy_DMA64_TXSTAT 0xF0000000
-#define B43legacy_DMA64_TXSTAT_DISABLED 0x00000000
-#define B43legacy_DMA64_TXSTAT_ACTIVE 0x10000000
-#define B43legacy_DMA64_TXSTAT_IDLEWAIT 0x20000000
-#define B43legacy_DMA64_TXSTAT_STOPPED 0x30000000
-#define B43legacy_DMA64_TXSTAT_SUSP 0x40000000
-#define B43legacy_DMA64_TXERROR 0x14
-#define B43legacy_DMA64_TXERRDPTR 0x0001FFFF
-#define B43legacy_DMA64_TXERR 0xF0000000
-#define B43legacy_DMA64_TXERR_NOERR 0x00000000
-#define B43legacy_DMA64_TXERR_PROT 0x10000000
-#define B43legacy_DMA64_TXERR_UNDERRUN 0x20000000
-#define B43legacy_DMA64_TXERR_TRANSFER 0x30000000
-#define B43legacy_DMA64_TXERR_DESCREAD 0x40000000
-#define B43legacy_DMA64_TXERR_CORE 0x50000000
-#define B43legacy_DMA64_RXCTL 0x20
-#define B43legacy_DMA64_RXENABLE 0x00000001
-#define B43legacy_DMA64_RXFROFF_MASK 0x000000FE
-#define B43legacy_DMA64_RXFROFF_SHIFT 1
-#define B43legacy_DMA64_RXDIRECTFIFO 0x00000100
-#define B43legacy_DMA64_RXADDREXT_MASK 0x00030000
-#define B43legacy_DMA64_RXADDREXT_SHIFT 16
-#define B43legacy_DMA64_RXINDEX 0x24
-#define B43legacy_DMA64_RXRINGLO 0x28
-#define B43legacy_DMA64_RXRINGHI 0x2C
-#define B43legacy_DMA64_RXSTATUS 0x30
-#define B43legacy_DMA64_RXSTATDPTR 0x00001FFF
-#define B43legacy_DMA64_RXSTAT 0xF0000000
-#define B43legacy_DMA64_RXSTAT_DISABLED 0x00000000
-#define B43legacy_DMA64_RXSTAT_ACTIVE 0x10000000
-#define B43legacy_DMA64_RXSTAT_IDLEWAIT 0x20000000
-#define B43legacy_DMA64_RXSTAT_STOPPED 0x30000000
-#define B43legacy_DMA64_RXSTAT_SUSP 0x40000000
-#define B43legacy_DMA64_RXERROR 0x34
-#define B43legacy_DMA64_RXERRDPTR 0x0001FFFF
-#define B43legacy_DMA64_RXERR 0xF0000000
-#define B43legacy_DMA64_RXERR_NOERR 0x00000000
-#define B43legacy_DMA64_RXERR_PROT 0x10000000
-#define B43legacy_DMA64_RXERR_UNDERRUN 0x20000000
-#define B43legacy_DMA64_RXERR_TRANSFER 0x30000000
-#define B43legacy_DMA64_RXERR_DESCREAD 0x40000000
-#define B43legacy_DMA64_RXERR_CORE 0x50000000
-
-/* 64-bit DMA descriptor. */
-struct b43legacy_dmadesc64 {
- __le32 control0;
- __le32 control1;
- __le32 address_low;
- __le32 address_high;
-} __packed;
-#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000
-#define B43legacy_DMA64_DCTL0_IRQ 0x20000000
-#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000
-#define B43legacy_DMA64_DCTL0_FRAMESTART 0x80000000
-#define B43legacy_DMA64_DCTL1_BYTECNT 0x00001FFF
-#define B43legacy_DMA64_DCTL1_ADDREXT_MASK 0x00030000
-#define B43legacy_DMA64_DCTL1_ADDREXT_SHIFT 16
-
-
-
-struct b43legacy_dmadesc_generic {
- union {
- struct b43legacy_dmadesc32 dma32;
- struct b43legacy_dmadesc64 dma64;
- } __packed;
-} __packed;
-
-
/* Misc DMA constants */
#define B43legacy_DMA_RINGMEMSIZE PAGE_SIZE
#define B43legacy_DMA0_RX_FRAMEOFFSET 30
@@ -197,35 +113,12 @@ struct b43legacy_dmadesc_meta {
bool is_last_fragment;
};
-struct b43legacy_dmaring;
-
-/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */
-struct b43legacy_dma_ops {
- struct b43legacy_dmadesc_generic * (*idx2desc)
- (struct b43legacy_dmaring *ring,
- int slot,
- struct b43legacy_dmadesc_meta
- **meta);
- void (*fill_descriptor)(struct b43legacy_dmaring *ring,
- struct b43legacy_dmadesc_generic *desc,
- dma_addr_t dmaaddr, u16 bufsize,
- int start, int end, int irq);
- void (*poke_tx)(struct b43legacy_dmaring *ring, int slot);
- void (*tx_suspend)(struct b43legacy_dmaring *ring);
- void (*tx_resume)(struct b43legacy_dmaring *ring);
- int (*get_current_rxslot)(struct b43legacy_dmaring *ring);
- void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot);
-};
-
enum b43legacy_dmatype {
B43legacy_DMA_30BIT = 30,
B43legacy_DMA_32BIT = 32,
- B43legacy_DMA_64BIT = 64,
};
struct b43legacy_dmaring {
- /* Lowlevel DMA ops. */
- const struct b43legacy_dma_ops *ops;
/* Kernel virtual base address of the ring memory. */
void *descbase;
/* Meta data about all descriptors. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 04c03b212a5..a3b72cd72c6 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -35,7 +35,6 @@
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
-#include <linux/wireless.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
@@ -61,7 +60,6 @@ MODULE_AUTHOR("Stefano Brivio");
MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
MODULE_FIRMWARE("b43legacy/ucode2.fw");
MODULE_FIRMWARE("b43legacy/ucode4.fw");
@@ -2468,7 +2466,8 @@ out:
}
}
-static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
return 0;
@@ -3785,7 +3784,8 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
ssb_set_devtypedata(dev, wl);
- b43legacyinfo(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id);
+ b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n",
+ dev->bus->chip_id, dev->id.revision);
err = 0;
out:
return err;
@@ -3947,8 +3947,7 @@ static void b43legacy_print_driverinfo(void)
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
- "[ Features: %s%s%s%s, Firmware-ID: "
- B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
+ "[ Features: %s%s%s%s ]\n",
feat_pci, feat_leds, feat_pio, feat_dma);
}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
new file mode 100644
index 00000000000..2069fc8f7ad
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -0,0 +1,35 @@
+config BRCMUTIL
+ tristate
+
+config BRCMSMAC
+ tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
+ depends on PCI
+ depends on MAC80211
+ depends on BCMA=n
+ select BRCMUTIL
+ select FW_LOADER
+ select CRC_CCITT
+ select CRC8
+ select CORDIC
+ ---help---
+ This module adds support for PCIe wireless adapters based on Broadcom
+ IEEE802.11n SoftMAC chipsets. If you choose to build a module, it'll
+ be called brcmsmac.ko.
+
+config BRCMFMAC
+ tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
+ depends on MMC
+ depends on CFG80211
+ select BRCMUTIL
+ select FW_LOADER
+ ---help---
+ This module adds support for embedded wireless adapters based on
+ Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's
+ wireless extensions subsystem. If you choose to build a module,
+ it'll be called brcmfmac.ko.
+
+config BRCMDBG
+ bool "Broadcom driver debug functions"
+ depends on BRCMSMAC || BRCMFMAC
+ ---help---
+ Selecting this enables additional code for debug purposes.
diff --git a/drivers/net/wireless/brcm80211/Makefile b/drivers/net/wireless/brcm80211/Makefile
new file mode 100644
index 00000000000..f41c047eca8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile fragment for Broadcom 802.11n Networking Device Driver
+#
+# Copyright (c) 2010 Broadcom Corporation
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# common flags
+subdir-ccflags-$(CONFIG_BRCMDBG) += -DBCMDBG
+
+obj-$(CONFIG_BRCMUTIL) += brcmutil/
+obj-$(CONFIG_BRCMFMAC) += brcmfmac/
+obj-$(CONFIG_BRCMSMAC) += brcmsmac/
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
new file mode 100644
index 00000000000..b44e3094588
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile fragment for Broadcom 802.11n Networking Device Driver
+#
+# Copyright (c) 2010 Broadcom Corporation
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ccflags-y += \
+ -Idrivers/net/wireless/brcm80211/brcmfmac \
+ -Idrivers/net/wireless/brcm80211/include
+
+DHDOFILES = \
+ wl_cfg80211.o \
+ dhd_cdc.o \
+ dhd_common.o \
+ dhd_sdio.o \
+ dhd_linux.o \
+ bcmsdh.o \
+ bcmsdh_sdmmc.o
+
+obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
+brcmfmac-objs += $(DHDOFILES)
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
index d7d3afd5a10..d7d3afd5a10 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmchip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
new file mode 100644
index 00000000000..bff9dcd6fad
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ****************** SDIO CARD Interface Functions **************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <soc.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+
+static void brcmf_sdioh_irqhandler(struct sdio_func *func)
+{
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+
+ brcmf_dbg(TRACE, "***IRQHandler\n");
+
+ sdio_release_host(func);
+
+ brcmf_sdbrcm_isr(sdiodev->bus);
+
+ sdio_claim_host(func);
+}
+
+int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
+{
+ brcmf_dbg(TRACE, "Entering\n");
+
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
+ sdio_release_host(sdiodev->func[1]);
+
+ return 0;
+}
+
+int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
+{
+ brcmf_dbg(TRACE, "Entering\n");
+
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_release_irq(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
+ return 0;
+}
+
+u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
+ int *err)
+{
+ int status;
+ s32 retry = 0;
+ u8 data = 0;
+
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ udelay(1000);
+ status = brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, fnc_num,
+ addr, (u8 *) &data);
+ } while (status != 0
+ && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+ if (err)
+ *err = status;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
+ fnc_num, addr, data);
+
+ return data;
+}
+
+void
+brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
+ u8 data, int *err)
+{
+ int status;
+ s32 retry = 0;
+
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ udelay(1000);
+ status = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, fnc_num,
+ addr, (u8 *) &data);
+ } while (status != 0
+ && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+ if (err)
+ *err = status;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
+ fnc_num, addr, data);
+}
+
+int
+brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+{
+ int err = 0;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK,
+ &err);
+ if (!err)
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
+ &err);
+
+ return err;
+}
+
+u32 brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size)
+{
+ int status;
+ u32 word = 0;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+
+ brcmf_dbg(INFO, "fun = 1, addr = 0x%x\n", addr);
+
+ if (bar0 != sdiodev->sbwad) {
+ if (brcmf_sdcard_set_sbaddr_window(sdiodev, bar0))
+ return 0xFFFFFFFF;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = brcmf_sdioh_request_word(sdiodev, SDIOH_READ, SDIO_FUNC_1,
+ addr, &word, size);
+
+ sdiodev->regfail = (status != 0);
+
+ brcmf_dbg(INFO, "u32data = 0x%x\n", word);
+
+ /* if ok, return appropriately masked word */
+ if (status == 0) {
+ switch (size) {
+ case sizeof(u8):
+ return word & 0xff;
+ case sizeof(u16):
+ return word & 0xffff;
+ case sizeof(u32):
+ return word;
+ default:
+ sdiodev->regfail = true;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ brcmf_dbg(ERROR, "error reading addr 0x%04x size %d\n", addr, size);
+ return 0xFFFFFFFF;
+}
+
+u32 brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
+ u32 data)
+{
+ int status;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ addr, size * 8, data);
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status =
+ brcmf_sdioh_request_word(sdiodev, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ sdiodev->regfail = (status != 0);
+
+ if (status == 0)
+ return 0;
+
+ brcmf_dbg(ERROR, "error writing 0x%08x to addr 0x%04x size %d\n",
+ data, addr, size);
+ return 0xFFFFFFFF;
+}
+
+bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
+{
+ return sdiodev->regfail;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags,
+ u8 *buf, uint nbytes, struct sk_buff *pkt)
+{
+ int status;
+ uint incr_fix;
+ uint width;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+
+ /* Async not implemented yet */
+ if (flags & SDIO_REQ_ASYNC)
+ return -ENOTSUPP;
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+ fn, addr, width, nbytes, buf, pkt);
+
+ return status;
+}
+
+int
+brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+{
+ uint incr_fix;
+ uint width;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+
+ /* Async not implemented yet */
+ if (flags & SDIO_REQ_ASYNC)
+ return -ENOTSUPP;
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
+ addr, width, nbytes, buf, pkt);
+}
+
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+ u8 *buf, uint nbytes)
+{
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+}
+
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+{
+ char t_func = (char)fn;
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* issue abort cmd52 command through F0 */
+ brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
+ SDIO_CCCR_ABORT, &t_func);
+
+ brcmf_dbg(TRACE, "Exit\n");
+ return 0;
+}
+
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+{
+ u32 regs = 0;
+ int ret = 0;
+
+ ret = brcmf_sdioh_attach(sdiodev);
+ if (ret)
+ goto out;
+
+ regs = SI_ENUM_BASE;
+
+ /* Report the BAR, to fix if needed */
+ sdiodev->sbwad = SI_ENUM_BASE;
+
+ /* try to attach to the target device */
+ sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+ if (!sdiodev->bus) {
+ brcmf_dbg(ERROR, "device attach failed\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+out:
+ if (ret)
+ brcmf_sdio_remove(sdiodev);
+
+ return ret;
+}
+EXPORT_SYMBOL(brcmf_sdio_probe);
+
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
+{
+ if (sdiodev->bus) {
+ brcmf_sdbrcm_disconnect(sdiodev->bus);
+ sdiodev->bus = NULL;
+ }
+
+ brcmf_sdioh_detach(sdiodev);
+
+ sdiodev->sbwad = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(brcmf_sdio_remove);
+
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
+{
+ if (enable)
+ brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ else
+ brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
new file mode 100644
index 00000000000..bbaeb2d5c93
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -0,0 +1,626 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/sched.h> /* request_irq() */
+#include <linux/module.h>
+#include <net/cfg80211.h>
+
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "sdio_host.h"
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
+
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+
+#define DMA_ALIGN_MASK 0x03
+
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+
+#define SDIO_FUNC1_BLOCKSIZE 64
+#define SDIO_FUNC2_BLOCKSIZE 512
+
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+static bool
+brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
+{
+ bool is_err = false;
+#ifdef CONFIG_PM_SLEEP
+ is_err = atomic_read(&sdiodev->suspend);
+#endif
+ return is_err;
+}
+
+static void
+brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
+{
+#ifdef CONFIG_PM_SLEEP
+ int retry = 0;
+ while (atomic_read(&sdiodev->suspend) && retry++ != 30)
+ wait_event_timeout(*wq, false, HZ/100);
+#endif
+}
+
+static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
+ uint regaddr, u8 *byte)
+{
+ struct sdio_func *sdfunc = sdiodev->func[0];
+ int err_ret;
+
+ /*
+ * Can only directly write to some F0 registers.
+ * Handle F2 enable/disable and Abort command
+ * as a special case.
+ */
+ if (regaddr == SDIO_CCCR_IOEx) {
+ sdfunc = sdiodev->func[2];
+ if (sdfunc) {
+ sdio_claim_host(sdfunc);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(sdfunc);
+ if (err_ret)
+ brcmf_dbg(ERROR,
+ "enable F2 failed:%d\n",
+ err_ret);
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(sdfunc);
+ if (err_ret)
+ brcmf_dbg(ERROR,
+ "Disable F2 failed:%d\n",
+ err_ret);
+ }
+ sdio_release_host(sdfunc);
+ }
+ } else if (regaddr == SDIO_CCCR_ABORT) {
+ sdio_claim_host(sdfunc);
+ sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
+ sdio_release_host(sdfunc);
+ } else if (regaddr < 0xF0) {
+ brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
+ err_ret = -EPERM;
+ } else {
+ sdio_claim_host(sdfunc);
+ sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
+ sdio_release_host(sdfunc);
+ }
+
+ return err_ret;
+}
+
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
+ uint regaddr, u8 *byte)
+{
+ int err_ret;
+
+ brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ if (rw && func == 0) {
+ /* handle F0 separately */
+ err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
+ } else {
+ sdio_claim_host(sdiodev->func[func]);
+ if (rw) /* CMD52 Write */
+ sdio_writeb(sdiodev->func[func], *byte, regaddr,
+ &err_ret);
+ else if (func == 0) {
+ *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
+ &err_ret);
+ } else {
+ *byte = sdio_readb(sdiodev->func[func], regaddr,
+ &err_ret);
+ }
+ sdio_release_host(sdiodev->func[func]);
+ }
+
+ if (err_ret)
+ brcmf_dbg(ERROR, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "write" : "read", func, regaddr, *byte, err_ret);
+
+ return err_ret;
+}
+
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
+ uint rw, uint func, uint addr, u32 *word,
+ uint nbytes)
+{
+ int err_ret = -EIO;
+
+ if (func == 0) {
+ brcmf_dbg(ERROR, "Only CMD52 allowed to F0\n");
+ return -EINVAL;
+ }
+
+ brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ rw, func, addr, nbytes);
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
+
+ if (rw) { /* CMD52 Write */
+ if (nbytes == 4)
+ sdio_writel(sdiodev->func[func], *word, addr,
+ &err_ret);
+ else if (nbytes == 2)
+ sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
+ addr, &err_ret);
+ else
+ brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
+ } else { /* CMD52 Read */
+ if (nbytes == 4)
+ *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
+ else if (nbytes == 2)
+ *word = sdio_readw(sdiodev->func[func], addr,
+ &err_ret) & 0xFFFF;
+ else
+ brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
+ }
+
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ if (err_ret)
+ brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
+ rw ? "write" : "read", err_ret);
+
+ return err_ret;
+}
+
+static int
+brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ u32 SGCount = 0;
+ int err_ret = 0;
+
+ struct sk_buff *pnext;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
+ for (pnext = pkt; pnext; pnext = pnext->next) {
+ uint pkt_len = pnext->len;
+ pkt_len += 3;
+ pkt_len &= 0xFFFFFFFC;
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pnext->data)),
+ pkt_len);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pnext->data)),
+ pkt_len);
+ } else if (fifo) {
+ err_ret = sdio_readsb(sdiodev->func[func],
+ ((u8 *) (pnext->data)),
+ addr, pkt_len);
+ } else {
+ err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+ ((u8 *) (pnext->data)),
+ addr, pkt_len);
+ }
+
+ if (err_ret) {
+ brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ write ? "TX" : "RX", pnext, SGCount, addr,
+ pkt_len, err_ret);
+ } else {
+ brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ write ? "TX" : "RX", pnext, SGCount, addr,
+ pkt_len);
+ }
+
+ if (!fifo)
+ addr += pkt_len;
+ SGCount++;
+
+ }
+
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ brcmf_dbg(TRACE, "Exit\n");
+ return err_ret;
+}
+
+/*
+ * This function takes a buffer or packet, and fixes everything up
+ * so that in the end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer,
+ * and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.
+ * If it is a packet chain, then all the packets in the chain
+ * must be properly aligned.
+ *
+ * If the packet data is not aligned, then there may only be
+ * one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
+ uint fix_inc, uint write, uint func, uint addr,
+ uint reg_width, uint buflen_u, u8 *buffer,
+ struct sk_buff *pkt)
+{
+ int Status;
+ struct sk_buff *mypkt = NULL;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+ /* Case 1: we don't have a packet. */
+ if (pkt == NULL) {
+ brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
+ write ? "TX" : "RX", buflen_u);
+ mypkt = brcmu_pkt_buf_get_skb(buflen_u);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ buflen_u);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, buffer, buflen_u);
+
+ Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
+ func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write)
+ memcpy(buffer, mypkt->data, buflen_u);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
+ /*
+ * Case 2: We have a packet, but it is unaligned.
+ * In this case, we cannot have a chain (pkt->next == NULL)
+ */
+ brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
+ write ? "TX" : "RX", pkt->len);
+ mypkt = brcmu_pkt_buf_get_skb(pkt->len);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ pkt->len);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, pkt->data, pkt->len);
+
+ Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
+ func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write)
+ memcpy(pkt->data, mypkt->data, mypkt->len);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ } else { /* case 3: We have a packet and
+ it is aligned. */
+ brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
+ write ? "Tx" : "Rx");
+ Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
+ func, addr, pkt);
+ }
+
+ return Status;
+}
+
+/* Read client card reg */
+static int
+brcmf_sdioh_card_regread(struct brcmf_sdio_dev *sdiodev, int func, u32 regaddr,
+ int regsize, u32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ u8 temp = 0;
+
+ brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, func, regaddr,
+ &temp);
+ *data = temp;
+ *data &= 0xff;
+ brcmf_dbg(DATA, "byte read data=0x%02x\n", *data);
+ } else {
+ brcmf_sdioh_request_word(sdiodev, SDIOH_READ, func, regaddr,
+ data, regsize);
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ brcmf_dbg(DATA, "word read data=0x%08x\n", *data);
+ }
+
+ return SUCCESS;
+}
+
+static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ u32 scratch, regdata;
+ __le32 scratch_le;
+ u8 *ptr = (u8 *)&scratch_le;
+
+ for (i = 0; i < 3; i++) {
+ if ((brcmf_sdioh_card_regread(sdiodev, 0, regaddr, 1,
+ &regdata)) != SUCCESS)
+ brcmf_dbg(ERROR, "Can't read!\n");
+
+ *ptr++ = (u8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = le32_to_cpu(scratch_le);
+ scratch &= 0x0001FFFF;
+ return scratch;
+}
+
+static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
+{
+ int err_ret;
+ u32 fbraddr;
+ u8 func;
+
+ brcmf_dbg(TRACE, "\n");
+
+ /* Get the Card's common CIS address */
+ sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
+ SDIO_CCCR_CIS);
+ brcmf_dbg(INFO, "Card's Common CIS Ptr = 0x%x\n",
+ sdiodev->func_cis_ptr[0]);
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIO_FBR_BASE(1), func = 1;
+ func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sdiodev->func_cis_ptr[func] =
+ brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
+ brcmf_dbg(INFO, "Function %d CIS Ptr = 0x%x\n",
+ func, sdiodev->func_cis_ptr[func]);
+ }
+
+ /* Enable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ err_ret = sdio_enable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+ if (err_ret)
+ brcmf_dbg(ERROR, "Failed to enable F1 Err: 0x%08x\n", err_ret);
+
+ return false;
+}
+
+/*
+ * Public entry points & extern's
+ */
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
+{
+ int err_ret = 0;
+
+ brcmf_dbg(TRACE, "\n");
+
+ sdiodev->num_funcs = 2;
+
+ sdio_claim_host(sdiodev->func[1]);
+ err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ sdio_release_host(sdiodev->func[1]);
+ if (err_ret) {
+ brcmf_dbg(ERROR, "Failed to set F1 blocksize\n");
+ goto out;
+ }
+
+ sdio_claim_host(sdiodev->func[2]);
+ err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ sdio_release_host(sdiodev->func[2]);
+ if (err_ret) {
+ brcmf_dbg(ERROR, "Failed to set F2 blocksize\n");
+ goto out;
+ }
+
+ brcmf_sdioh_enablefuncs(sdiodev);
+
+out:
+ brcmf_dbg(TRACE, "Done\n");
+ return err_ret;
+}
+
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
+{
+ brcmf_dbg(TRACE, "\n");
+
+ /* Disable Function 2 */
+ sdio_claim_host(sdiodev->func[2]);
+ sdio_disable_func(sdiodev->func[2]);
+ sdio_release_host(sdiodev->func[2]);
+
+ /* Disable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
+}
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ struct brcmf_sdio_dev *sdiodev;
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "func->class=%x\n", func->class);
+ brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
+ brcmf_dbg(TRACE, "sdio_device: 0x%04x\n", func->device);
+ brcmf_dbg(TRACE, "Function#: 0x%04x\n", func->num);
+
+ if (func->num == 1) {
+ if (dev_get_drvdata(&func->card->dev)) {
+ brcmf_dbg(ERROR, "card private drvdata occupied\n");
+ return -ENXIO;
+ }
+ sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ if (!sdiodev)
+ return -ENOMEM;
+ sdiodev->func[0] = func->card->sdio_func[0];
+ sdiodev->func[1] = func;
+ dev_set_drvdata(&func->card->dev, sdiodev);
+
+ atomic_set(&sdiodev->suspend, false);
+ init_waitqueue_head(&sdiodev->request_byte_wait);
+ init_waitqueue_head(&sdiodev->request_word_wait);
+ init_waitqueue_head(&sdiodev->request_packet_wait);
+ init_waitqueue_head(&sdiodev->request_buffer_wait);
+ }
+
+ if (func->num == 2) {
+ sdiodev = dev_get_drvdata(&func->card->dev);
+ if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
+ return -ENODEV;
+ sdiodev->func[2] = func;
+
+ brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
+ ret = brcmf_sdio_probe(sdiodev);
+ }
+
+ return ret;
+}
+
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+{
+ struct brcmf_sdio_dev *sdiodev;
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(INFO, "func->class=%x\n", func->class);
+ brcmf_dbg(INFO, "sdio_vendor: 0x%04x\n", func->vendor);
+ brcmf_dbg(INFO, "sdio_device: 0x%04x\n", func->device);
+ brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
+
+ if (func->num == 2) {
+ sdiodev = dev_get_drvdata(&func->card->dev);
+ brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
+ brcmf_sdio_remove(sdiodev);
+ dev_set_drvdata(&func->card->dev, NULL);
+ kfree(sdiodev);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_sdio_suspend(struct device *dev)
+{
+ mmc_pm_flag_t sdio_flags;
+ struct brcmf_sdio_dev *sdiodev;
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ int ret = 0;
+
+ brcmf_dbg(TRACE, "\n");
+
+ sdiodev = dev_get_drvdata(&func->card->dev);
+
+ atomic_set(&sdiodev->suspend, true);
+
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ brcmf_dbg(ERROR, "Host can't keep power while suspended\n");
+ return -EINVAL;
+ }
+
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ brcmf_dbg(ERROR, "Failed to set pm_flags\n");
+ return ret;
+ }
+
+ brcmf_sdio_wdtmr_enable(sdiodev, false);
+
+ return ret;
+}
+
+static int brcmf_sdio_resume(struct device *dev)
+{
+ struct brcmf_sdio_dev *sdiodev;
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ sdiodev = dev_get_drvdata(&func->card->dev);
+ brcmf_sdio_wdtmr_enable(sdiodev, true);
+ atomic_set(&sdiodev->suspend, false);
+ return 0;
+}
+
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+ .suspend = brcmf_sdio_suspend,
+ .resume = brcmf_sdio_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+ .probe = brcmf_ops_sdio_probe,
+ .remove = brcmf_ops_sdio_remove,
+ .name = "brcmfmac",
+ .id_table = brcmf_sdmmc_ids,
+#ifdef CONFIG_PM_SLEEP
+ .drv = {
+ .pm = &brcmf_sdio_pm_ops,
+ },
+#endif /* CONFIG_PM_SLEEP */
+};
+
+/* bus register interface */
+int brcmf_bus_register(void)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return sdio_register_driver(&brcmf_sdmmc_driver);
+}
+
+void brcmf_bus_unregister(void)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
new file mode 100644
index 00000000000..4645766b407
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _BRCMF_H_
+#define _BRCMF_H_
+
+#define BRCMF_VERSION_STR "4.218.248.5"
+
+/*******************************************************************************
+ * IO codes that are interpreted by dongle firmware
+ ******************************************************************************/
+#define BRCMF_C_UP 2
+#define BRCMF_C_SET_PROMISC 10
+#define BRCMF_C_GET_RATE 12
+#define BRCMF_C_GET_INFRA 19
+#define BRCMF_C_SET_INFRA 20
+#define BRCMF_C_GET_AUTH 21
+#define BRCMF_C_SET_AUTH 22
+#define BRCMF_C_GET_BSSID 23
+#define BRCMF_C_GET_SSID 25
+#define BRCMF_C_SET_SSID 26
+#define BRCMF_C_GET_CHANNEL 29
+#define BRCMF_C_GET_SRL 31
+#define BRCMF_C_GET_LRL 33
+#define BRCMF_C_GET_RADIO 37
+#define BRCMF_C_SET_RADIO 38
+#define BRCMF_C_GET_PHYTYPE 39
+#define BRCMF_C_SET_KEY 45
+#define BRCMF_C_SET_PASSIVE_SCAN 49
+#define BRCMF_C_SCAN 50
+#define BRCMF_C_SCAN_RESULTS 51
+#define BRCMF_C_DISASSOC 52
+#define BRCMF_C_REASSOC 53
+#define BRCMF_C_SET_ROAM_TRIGGER 55
+#define BRCMF_C_SET_ROAM_DELTA 57
+#define BRCMF_C_GET_DTIMPRD 77
+#define BRCMF_C_SET_COUNTRY 84
+#define BRCMF_C_GET_PM 85
+#define BRCMF_C_SET_PM 86
+#define BRCMF_C_GET_AP 117
+#define BRCMF_C_SET_AP 118
+#define BRCMF_C_GET_RSSI 127
+#define BRCMF_C_GET_WSEC 133
+#define BRCMF_C_SET_WSEC 134
+#define BRCMF_C_GET_PHY_NOISE 135
+#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
+#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
+#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define BRCMF_C_GET_VALID_CHANNELS 217
+#define BRCMF_C_GET_KEY_PRIMARY 235
+#define BRCMF_C_SET_KEY_PRIMARY 236
+#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
+#define BRCMF_C_GET_VAR 262
+#define BRCMF_C_SET_VAR 263
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_NULL 0xf
+
+#define BRCMF_EVENTING_MASK_LEN 16
+
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */
+
+/* size of brcmf_scan_params not including variable length array */
+#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
+
+/* masks for channel and ssid count */
+#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define BRCMF_SCAN_ACTION_START 1
+#define BRCMF_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+#define BRCMF_ISCAN_REQ_VERSION 1
+
+/* brcmf_iscan_results status values */
+#define BRCMF_SCAN_RESULTS_SUCCESS 0
+#define BRCMF_SCAN_RESULTS_PARTIAL 1
+#define BRCMF_SCAN_RESULTS_PENDING 2
+#define BRCMF_SCAN_RESULTS_ABORTED 3
+#define BRCMF_SCAN_RESULTS_NO_MEM 4
+
+/* Indicates this key is using soft encrypt */
+#define WL_SOFT_KEY (1 << 0)
+/* primary (ie tx) key */
+#define BRCMF_PRIMARY_KEY (1 << 1)
+/* Reserved for backward compat */
+#define WL_KF_RES_4 (1 << 4)
+/* Reserved for backward compat */
+#define WL_KF_RES_5 (1 << 5)
+/* Indicates a group key for a IBSS PEER */
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6)
+
+/* For supporting multiple interfaces */
+#define BRCMF_MAX_IFS 16
+#define BRCMF_DEL_IF -0xe
+#define BRCMF_BAD_IF -0xf
+
+#define DOT11_BSSTYPE_ANY 2
+#define DOT11_MAX_DEFAULT_KEYS 4
+
+#define BRCMF_EVENT_MSG_LINK 0x01
+#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
+#define BRCMF_EVENT_MSG_GROUP 0x04
+
+struct brcmf_event_msg {
+ __be16 version;
+ __be16 flags;
+ __be32 event_type;
+ __be32 status;
+ __be32 reason;
+ __be32 auth_type;
+ __be32 datalen;
+ u8 addr[ETH_ALEN];
+ char ifname[IFNAMSIZ];
+} __packed;
+
+struct brcm_ethhdr {
+ u16 subtype;
+ u16 length;
+ u8 version;
+ u8 oui[3];
+ u16 usr_subtype;
+} __packed;
+
+struct brcmf_event {
+ struct ethhdr eth;
+ struct brcm_ethhdr hdr;
+ struct brcmf_event_msg msg;
+} __packed;
+
+struct dngl_stats {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+};
+
+/* event codes sent by the dongle to this driver */
+#define BRCMF_E_SET_SSID 0
+#define BRCMF_E_JOIN 1
+#define BRCMF_E_START 2
+#define BRCMF_E_AUTH 3
+#define BRCMF_E_AUTH_IND 4
+#define BRCMF_E_DEAUTH 5
+#define BRCMF_E_DEAUTH_IND 6
+#define BRCMF_E_ASSOC 7
+#define BRCMF_E_ASSOC_IND 8
+#define BRCMF_E_REASSOC 9
+#define BRCMF_E_REASSOC_IND 10
+#define BRCMF_E_DISASSOC 11
+#define BRCMF_E_DISASSOC_IND 12
+#define BRCMF_E_QUIET_START 13
+#define BRCMF_E_QUIET_END 14
+#define BRCMF_E_BEACON_RX 15
+#define BRCMF_E_LINK 16
+#define BRCMF_E_MIC_ERROR 17
+#define BRCMF_E_NDIS_LINK 18
+#define BRCMF_E_ROAM 19
+#define BRCMF_E_TXFAIL 20
+#define BRCMF_E_PMKID_CACHE 21
+#define BRCMF_E_RETROGRADE_TSF 22
+#define BRCMF_E_PRUNE 23
+#define BRCMF_E_AUTOAUTH 24
+#define BRCMF_E_EAPOL_MSG 25
+#define BRCMF_E_SCAN_COMPLETE 26
+#define BRCMF_E_ADDTS_IND 27
+#define BRCMF_E_DELTS_IND 28
+#define BRCMF_E_BCNSENT_IND 29
+#define BRCMF_E_BCNRX_MSG 30
+#define BRCMF_E_BCNLOST_MSG 31
+#define BRCMF_E_ROAM_PREP 32
+#define BRCMF_E_PFN_NET_FOUND 33
+#define BRCMF_E_PFN_NET_LOST 34
+#define BRCMF_E_RESET_COMPLETE 35
+#define BRCMF_E_JOIN_START 36
+#define BRCMF_E_ROAM_START 37
+#define BRCMF_E_ASSOC_START 38
+#define BRCMF_E_IBSS_ASSOC 39
+#define BRCMF_E_RADIO 40
+#define BRCMF_E_PSM_WATCHDOG 41
+#define BRCMF_E_PROBREQ_MSG 44
+#define BRCMF_E_SCAN_CONFIRM_IND 45
+#define BRCMF_E_PSK_SUP 46
+#define BRCMF_E_COUNTRY_CODE_CHANGED 47
+#define BRCMF_E_EXCEEDED_MEDIUM_TIME 48
+#define BRCMF_E_ICV_ERROR 49
+#define BRCMF_E_UNICAST_DECODE_ERROR 50
+#define BRCMF_E_MULTICAST_DECODE_ERROR 51
+#define BRCMF_E_TRACE 52
+#define BRCMF_E_IF 54
+#define BRCMF_E_RSSI 56
+#define BRCMF_E_PFN_SCAN_COMPLETE 57
+#define BRCMF_E_EXTLOG_MSG 58
+#define BRCMF_E_ACTION_FRAME 59
+#define BRCMF_E_ACTION_FRAME_COMPLETE 60
+#define BRCMF_E_PRE_ASSOC_IND 61
+#define BRCMF_E_PRE_REASSOC_IND 62
+#define BRCMF_E_CHANNEL_ADOPTED 63
+#define BRCMF_E_AP_STARTED 64
+#define BRCMF_E_DFS_AP_STOP 65
+#define BRCMF_E_DFS_AP_RESUME 66
+#define BRCMF_E_RESERVED1 67
+#define BRCMF_E_RESERVED2 68
+#define BRCMF_E_ESCAN_RESULT 69
+#define BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
+#define BRCMF_E_DCS_REQUEST 73
+
+#define BRCMF_E_FIFO_CREDIT_MAP 74
+
+#define BRCMF_E_LAST 75
+
+#define BRCMF_E_STATUS_SUCCESS 0
+#define BRCMF_E_STATUS_FAIL 1
+#define BRCMF_E_STATUS_TIMEOUT 2
+#define BRCMF_E_STATUS_NO_NETWORKS 3
+#define BRCMF_E_STATUS_ABORT 4
+#define BRCMF_E_STATUS_NO_ACK 5
+#define BRCMF_E_STATUS_UNSOLICITED 6
+#define BRCMF_E_STATUS_ATTEMPT 7
+#define BRCMF_E_STATUS_PARTIAL 8
+#define BRCMF_E_STATUS_NEWSCAN 9
+#define BRCMF_E_STATUS_NEWASSOC 10
+#define BRCMF_E_STATUS_11HQUIET 11
+#define BRCMF_E_STATUS_SUPPRESS 12
+#define BRCMF_E_STATUS_NOCHANS 13
+#define BRCMF_E_STATUS_CS_ABORT 15
+#define BRCMF_E_STATUS_ERROR 16
+
+#define BRCMF_E_REASON_INITIAL_ASSOC 0
+#define BRCMF_E_REASON_LOW_RSSI 1
+#define BRCMF_E_REASON_DEAUTH 2
+#define BRCMF_E_REASON_DISASSOC 3
+#define BRCMF_E_REASON_BCNS_LOST 4
+#define BRCMF_E_REASON_MINTXRATE 9
+#define BRCMF_E_REASON_TXFAIL 10
+
+#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
+#define BRCMF_E_REASON_DIRECTED_ROAM 6
+#define BRCMF_E_REASON_TSPEC_REJECTED 7
+#define BRCMF_E_REASON_BETTER_AP 8
+
+#define BRCMF_E_PRUNE_ENCR_MISMATCH 1
+#define BRCMF_E_PRUNE_BCAST_BSSID 2
+#define BRCMF_E_PRUNE_MAC_DENY 3
+#define BRCMF_E_PRUNE_MAC_NA 4
+#define BRCMF_E_PRUNE_REG_PASSV 5
+#define BRCMF_E_PRUNE_SPCT_MGMT 6
+#define BRCMF_E_PRUNE_RADAR 7
+#define BRCMF_E_RSN_MISMATCH 8
+#define BRCMF_E_PRUNE_NO_COMMON_RATES 9
+#define BRCMF_E_PRUNE_BASIC_RATES 10
+#define BRCMF_E_PRUNE_CIPHER_NA 12
+#define BRCMF_E_PRUNE_KNOWN_STA 13
+#define BRCMF_E_PRUNE_WDS_PEER 15
+#define BRCMF_E_PRUNE_QBSS_LOAD 16
+#define BRCMF_E_PRUNE_HOME_AP 17
+
+#define BRCMF_E_SUP_OTHER 0
+#define BRCMF_E_SUP_DECRYPT_KEY_DATA 1
+#define BRCMF_E_SUP_BAD_UCAST_WEP128 2
+#define BRCMF_E_SUP_BAD_UCAST_WEP40 3
+#define BRCMF_E_SUP_UNSUP_KEY_LEN 4
+#define BRCMF_E_SUP_PW_KEY_CIPHER 5
+#define BRCMF_E_SUP_MSG3_TOO_MANY_IE 6
+#define BRCMF_E_SUP_MSG3_IE_MISMATCH 7
+#define BRCMF_E_SUP_NO_INSTALL_FLAG 8
+#define BRCMF_E_SUP_MSG3_NO_GTK 9
+#define BRCMF_E_SUP_GRP_KEY_CIPHER 10
+#define BRCMF_E_SUP_GRP_MSG1_NO_GTK 11
+#define BRCMF_E_SUP_GTK_DECRYPT_FAIL 12
+#define BRCMF_E_SUP_SEND_FAIL 13
+#define BRCMF_E_SUP_DEAUTH 14
+
+#define BRCMF_E_IF_ADD 1
+#define BRCMF_E_IF_DEL 2
+#define BRCMF_E_IF_CHANGE 3
+
+#define BRCMF_E_IF_ROLE_STA 0
+#define BRCMF_E_IF_ROLE_AP 1
+#define BRCMF_E_IF_ROLE_WDS 2
+
+#define BRCMF_E_LINK_BCN_LOSS 1
+#define BRCMF_E_LINK_DISASSOC 2
+#define BRCMF_E_LINK_ASSOC_REC 3
+#define BRCMF_E_LINK_BSSCFG_DIS 4
+
+/* The level of bus communication with the dongle */
+enum brcmf_bus_state {
+ BRCMF_BUS_DOWN, /* Not ready for frame transfers */
+ BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
+ BRCMF_BUS_DATA /* Ready for frame transfers */
+};
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+struct brcmf_pkt_filter_pattern_le {
+ /*
+ * Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ __le32 offset;
+ /* Size of the pattern. Bitmask must be the same size.*/
+ __le32 size_bytes;
+ /*
+ * Variable length mask and pattern data. mask starts at offset 0.
+ * Pattern immediately follows mask.
+ */
+ u8 mask_and_pattern[1];
+};
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+struct brcmf_pkt_filter_le {
+ __le32 id; /* Unique filter id, specified by app. */
+ __le32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ __le32 negate_match; /* Negate the result of filter matches */
+ union { /* Filter definitions */
+ struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
+ } u;
+};
+
+/* IOVAR "pkt_filter_enable" parameter. */
+struct brcmf_pkt_filter_enable_le {
+ __le32 id; /* Unique filter id */
+ __le32 enable; /* Enable/disable bool */
+};
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in struct brcmf_scan_results)
+ */
+struct brcmf_bss_info {
+ __le32 version; /* version field */
+ __le32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ u8 BSSID[ETH_ALEN];
+ __le16 beacon_period; /* units are Kusec */
+ __le16 capability; /* Capability information */
+ u8 SSID_len;
+ u8 SSID[32];
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ __le16 chanspec; /* chanspec for bss */
+ __le16 atim_window; /* units are Kusec */
+ u8 dtim_period; /* DTIM period */
+ __le16 RSSI; /* receive signal strength (in dBm) */
+ s8 phy_noise; /* noise (in dBm) */
+
+ u8 n_cap; /* BSS is 802.11N Capable */
+ /* 802.11N BSS Capabilities (based on HT_CAP_*): */
+ __le32 nbss_cap;
+ u8 ctl_ch; /* 802.11N BSS control channel number */
+ __le32 reserved32[1]; /* Reserved for expansion of BSS properties */
+ u8 flags; /* flags */
+ u8 reserved[3]; /* Reserved for expansion of BSS properties */
+ u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ __le16 ie_offset; /* offset at which IEs start, from beginning */
+ __le32 ie_length; /* byte length of Information Elements */
+ __le16 SNR; /* average SNR of during frame reception */
+ /* Add new fields here */
+ /* variable length Information Elements */
+};
+
+struct brcm_rateset_le {
+ /* # rates in this set */
+ __le32 count;
+ /* rates in 500kbps units w/hi bit set if basic */
+ u8 rates[WL_NUMRATES];
+};
+
+struct brcmf_ssid {
+ u32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_ssid_le {
+ __le32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_scan_params_le {
+ struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
+ u8 bssid[ETH_ALEN]; /* default: bcast */
+ s8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ u8 scan_type; /* flags, 0 use default */
+ __le32 nprobes; /* -1 use default, number of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the
+ * home channel between channel scans
+ */
+ __le32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in
+ * channel_list, 0 means default (use all
+ * available channels)
+ *
+ * high half is entries in struct brcmf_ssid
+ * array that follows channel_list, aligned for
+ * s32 (4 bytes) meaning an odd channel count
+ * implies a 2-byte pad between end of
+ * channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the
+ * fixed parameter portion is assumed, otherwise
+ * ssid in the fixed portion is ignored
+ */
+ __le16 channel_list[1]; /* list of chanspecs */
+};
+
+/* incremental scan struct */
+struct brcmf_iscan_params_le {
+ __le32 version;
+ __le16 action;
+ __le16 scan_duration;
+ struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_scan_results {
+ u32 buflen;
+ u32 version;
+ u32 count;
+ struct brcmf_bss_info bss_info[1];
+};
+
+struct brcmf_scan_results_le {
+ __le32 buflen;
+ __le32 version;
+ __le32 count;
+ struct brcmf_bss_info bss_info[1];
+};
+
+/* used for association with a specific BSSID and chanspec list */
+struct brcmf_assoc_params_le {
+ /* 00:00:00:00:00:00: broadcast scan */
+ u8 bssid[ETH_ALEN];
+ /* 0: all available channels, otherwise count of chanspecs in
+ * chanspec_list */
+ __le32 chanspec_num;
+ /* list of chanspecs */
+ __le16 chanspec_list[1];
+};
+
+/* used for join with or without a specific bssid and channel list */
+struct brcmf_join_params {
+ struct brcmf_ssid_le ssid_le;
+ struct brcmf_assoc_params_le params_le;
+};
+
+/* size of brcmf_scan_results not including variable length array */
+#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
+ (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
+
+/* incremental scan results struct */
+struct brcmf_iscan_results {
+ union {
+ u32 status;
+ __le32 status_le;
+ };
+ union {
+ struct brcmf_scan_results results;
+ struct brcmf_scan_results_le results_le;
+ };
+};
+
+/* size of brcmf_iscan_results not including variable length array */
+#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
+ (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+ offsetof(struct brcmf_iscan_results, results))
+
+struct brcmf_wsec_key {
+ u32 index; /* key index */
+ u32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ u32 pad_1[18];
+ u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ u32 flags; /* misc flags */
+ u32 pad_2[3];
+ u32 iv_initialized; /* has IV been initialized already? */
+ u32 pad_3;
+ /* Rx IV */
+ struct {
+ u32 hi; /* upper 32 bits of IV */
+ u16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ u32 pad_4[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/*
+ * dongle requires same struct as above but with fields in little endian order
+ */
+struct brcmf_wsec_key_le {
+ __le32 index; /* key index */
+ __le32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ __le32 pad_1[18];
+ __le32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ __le32 flags; /* misc flags */
+ __le32 pad_2[3];
+ __le32 iv_initialized; /* has IV been initialized already? */
+ __le32 pad_3;
+ /* Rx IV */
+ struct {
+ __le32 hi; /* upper 32 bits of IV */
+ __le16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ __le32 pad_4[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/* Used to get specific STA parameters */
+struct brcmf_scb_val_le {
+ __le32 val;
+ u8 ea[ETH_ALEN];
+};
+
+/* channel encoding */
+struct brcmf_channel_info_le {
+ __le32 hw_channel;
+ __le32 target_channel;
+ __le32 scan_channel;
+};
+
+/* Bus independent dongle command */
+struct brcmf_dcmd {
+ uint cmd; /* common dongle cmd definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ u8 set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+};
+
+/* Forward decls for struct brcmf_pub (see below) */
+struct brcmf_bus; /* device bus info */
+struct brcmf_proto; /* device communication protocol info */
+struct brcmf_info; /* device driver info */
+struct brcmf_cfg80211_dev; /* cfg80211 device info */
+
+/* Common structure for module and instance linkage */
+struct brcmf_pub {
+ /* Linkage ponters */
+ struct brcmf_bus *bus;
+ struct brcmf_proto *prot;
+ struct brcmf_info *info;
+ struct brcmf_cfg80211_dev *config;
+
+ /* Internal brcmf items */
+ bool up; /* Driver up/down (to OS) */
+ bool txoff; /* Transmit flow-controlled */
+ enum brcmf_bus_state busstate;
+ uint hdrlen; /* Total BRCMF header length (proto + bus) */
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ uint rxsz; /* Rx buffer size bus module should use */
+ u8 wme_dp; /* wme discard priority */
+
+ /* Dongle media info */
+ bool iswl; /* Dongle-resident driver is wl */
+ unsigned long drv_version; /* Version of dongle-resident driver */
+ u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
+ struct dngl_stats dstats; /* Stats for dongle-based data */
+
+ /* Additional stats for the bus level */
+
+ /* Data packets sent to dongle */
+ unsigned long tx_packets;
+ /* Multicast data packets sent to dongle */
+ unsigned long tx_multicast;
+ /* Errors in sending data to dongle */
+ unsigned long tx_errors;
+ /* Control packets sent to dongle */
+ unsigned long tx_ctlpkts;
+ /* Errors sending control frames to dongle */
+ unsigned long tx_ctlerrs;
+ /* Packets sent up the network interface */
+ unsigned long rx_packets;
+ /* Multicast packets sent up the network interface */
+ unsigned long rx_multicast;
+ /* Errors processing rx data packets */
+ unsigned long rx_errors;
+ /* Control frames processed from dongle */
+ unsigned long rx_ctlpkts;
+
+ /* Errors in processing rx control frames */
+ unsigned long rx_ctlerrs;
+ /* Packets dropped locally (no memory) */
+ unsigned long rx_dropped;
+ /* Packets flushed due to unscheduled sendup thread */
+ unsigned long rx_flushed;
+ /* Number of times dpc scheduled by watchdog timer */
+ unsigned long wd_dpc_sched;
+
+ /* Number of packets where header read-ahead was used. */
+ unsigned long rx_readahead_cnt;
+ /* Number of tx packets we had to realloc for headroom */
+ unsigned long tx_realloc;
+ /* Number of flow control pkts recvd */
+ unsigned long fc_packets;
+
+ /* Last error return */
+ int bcmerror;
+ uint tickcnt;
+
+ /* Last error from dongle */
+ int dongle_error;
+
+ /* Suspend disable flag flag */
+ int suspend_disable_flag; /* "1" to disable all extra powersaving
+ during suspend */
+ int in_suspend; /* flag set to 1 when early suspend called */
+ int dtim_skip; /* dtim skip , default 0 means wake each dtim */
+
+ /* Pkt filter defination */
+ char *pktfilter[100];
+ int pktfilter_count;
+
+ u8 country_code[BRCM_CNTRY_BUF_SZ];
+ char eventmask[BRCMF_EVENTING_MASK_LEN];
+
+};
+
+struct brcmf_if_event {
+ u8 ifidx;
+ u8 action;
+ u8 flags;
+ u8 bssidx;
+};
+
+struct bcmevent_name {
+ uint event;
+ const char *name;
+};
+
+extern const struct bcmevent_name bcmevent_names[];
+
+extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
+ char *buf, uint len);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
+ uint bus_hdrlen);
+extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
+extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+
+extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void brcmf_detach(struct brcmf_pub *drvr);
+
+/* Indication from bus module to change flow-control state */
+extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
+
+extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+ struct sk_buff *pkt, int prec);
+
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *rxp, int numpkt);
+
+/* Return pointer to interface name */
+extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+
+/* Notify tx completion */
+extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
+ bool success);
+
+/* Query dongle */
+extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len);
+
+/* OS independent layer functions */
+extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
+extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
+#ifdef BCMDBG
+extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
+#endif /* BCMDBG */
+
+extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
+extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
+ void *pktdata, struct brcmf_event_msg *,
+ void **data_ptr);
+
+extern void brcmf_c_init(void);
+
+extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
+ struct net_device *ndev, char *name, u8 *mac_addr,
+ u32 flags, u8 bssidx);
+extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
+
+/* Send packet to dongle via data channel */
+extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
+ struct sk_buff *pkt);
+
+extern int brcmf_bus_start(struct brcmf_pub *drvr);
+
+extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
+extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
+ int enable, int master_mode);
+
+#define BRCMF_DCMD_SMLEN 256 /* "small" cmd buffer required */
+#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
+#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
+
+/* message levels */
+#define BRCMF_ERROR_VAL 0x0001
+#define BRCMF_TRACE_VAL 0x0002
+#define BRCMF_INFO_VAL 0x0004
+#define BRCMF_DATA_VAL 0x0008
+#define BRCMF_CTL_VAL 0x0010
+#define BRCMF_TIMER_VAL 0x0020
+#define BRCMF_HDRS_VAL 0x0040
+#define BRCMF_BYTES_VAL 0x0080
+#define BRCMF_INTR_VAL 0x0100
+#define BRCMF_GLOM_VAL 0x0400
+#define BRCMF_EVENT_VAL 0x0800
+#define BRCMF_BTA_VAL 0x1000
+#define BRCMF_ISCAN_VAL 0x2000
+
+/* Enter idle immediately (no timeout) */
+#define BRCMF_IDLE_IMMEDIATE (-1)
+#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ when idle */
+#define BRCMF_IDLE_INTERVAL 1
+
+#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
new file mode 100644
index 00000000000..a249407c9a1
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMF_BUS_H_
+#define _BRCMF_BUS_H_
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#define BRCMF_SDALIGN (1 << 6)
+
+/* watchdog polling interval in ms */
+#define BRCMF_WD_POLL_MS 10
+
+/*
+ * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int brcmf_bus_register(void);
+extern void brcmf_bus_unregister(void);
+
+/* obtain linux device object providing bus function */
+extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+
+/* Send a data frame to the dongle. Callee disposes of txp. */
+extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int
+brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+
+extern int
+brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+
+#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
new file mode 100644
index 00000000000..e34c5c3d1d5
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <defs.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_proto.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+
+struct brcmf_proto_cdc_dcmd {
+ __le32 cmd; /* dongle command value */
+ __le32 len; /* lower 16: output buflen;
+ * upper 16: input buflen (excludes header) */
+ __le32 flags; /* flag defns given below */
+ __le32 status; /* status code returned from the device */
+};
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
+
+/* CDC flag definitions */
+#define CDC_DCMD_ERROR 0x01 /* 1=cmd failed */
+#define CDC_DCMD_SET 0x02 /* 0=get, 1=set cmd */
+#define CDC_DCMD_IF_MASK 0xF000 /* I/F index */
+#define CDC_DCMD_IF_SHIFT 12
+#define CDC_DCMD_ID_MASK 0xFFFF0000 /* id an cmd pairing */
+#define CDC_DCMD_ID_SHIFT 16 /* ID Mask shift bits */
+#define CDC_DCMD_ID(flags) \
+ (((flags) & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT)
+
+/*
+ * BDC header - Broadcom specific extension of CDC.
+ * Used on data packets to convey priority across USB.
+ */
+#define BDC_HEADER_LEN 4
+#define BDC_PROTO_VER 1 /* Protocol version */
+#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
+#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
+#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
+#define BDC_PRIORITY_MASK 0x7
+#define BDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
+#define BDC_FLAG2_IF_SHIFT 0
+
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
+ ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+struct brcmf_proto_bdc_header {
+ u8 flags;
+ u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
+ u8 flags2;
+ u8 rssi;
+};
+
+
+#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
+#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
+ * (amount of header tha might be added)
+ * plus any space that might be needed
+ * for alignment padding.
+ */
+#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+ * round off at the end of buffer
+ */
+
+struct brcmf_proto {
+ u16 reqid;
+ u8 pending;
+ u32 lastcmd;
+ u8 bus_header[BUS_HEADER_LEN];
+ struct brcmf_proto_cdc_dcmd msg;
+ unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
+};
+
+static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
+{
+ struct brcmf_proto *prot = drvr->prot;
+ int len = le32_to_cpu(prot->msg.len) +
+ sizeof(struct brcmf_proto_cdc_dcmd);
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* NOTE : cdc->msg.len holds the desired length of the buffer to be
+ * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+ * is actually sent to the dongle
+ */
+ if (len > CDC_MAX_MSG_SIZE)
+ len = CDC_MAX_MSG_SIZE;
+
+ /* Send request */
+ return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
+ len);
+}
+
+static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
+{
+ int ret;
+ struct brcmf_proto *prot = drvr->prot;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ do {
+ ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+ (unsigned char *)&prot->msg,
+ len + sizeof(struct brcmf_proto_cdc_dcmd));
+ if (ret < 0)
+ break;
+ } while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id);
+
+ return ret;
+}
+
+int
+brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
+{
+ struct brcmf_proto *prot = drvr->prot;
+ struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ u32 id, flags;
+
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len);
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == BRCMF_C_GET_VAR && buf) {
+ if (!strcmp((char *)buf, "bcmerrorstr")) {
+ strncpy((char *)buf, "bcm_error",
+ BCME_STRLEN);
+ goto done;
+ } else if (!strcmp((char *)buf, "bcmerror")) {
+ *(int *)buf = drvr->dongle_error;
+ goto done;
+ }
+ }
+
+ memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
+
+ msg->cmd = cpu_to_le32(cmd);
+ msg->len = cpu_to_le32(len);
+ flags = (++prot->reqid << CDC_DCMD_ID_SHIFT);
+ flags = (flags & ~CDC_DCMD_IF_MASK) |
+ (ifidx << CDC_DCMD_IF_SHIFT);
+ msg->flags = cpu_to_le32(flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ ret = brcmf_proto_cdc_msg(drvr);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "brcmf_proto_cdc_msg failed w/status %d\n",
+ ret);
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
+ if (ret < 0)
+ goto done;
+
+ flags = le32_to_cpu(msg->flags);
+ id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
+
+ if ((id < prot->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != prot->reqid) {
+ brcmf_dbg(ERROR, "%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), id, prot->reqid);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void *)&msg[1];
+
+ /* Copy info buffer */
+ if (buf) {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDC_DCMD_ERROR) {
+ ret = le32_to_cpu(msg->status);
+ /* Cache error from dongle */
+ drvr->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
+{
+ struct brcmf_proto *prot = drvr->prot;
+ struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
+ int ret = 0;
+ u32 flags, id;
+
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len);
+
+ memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
+
+ msg->cmd = cpu_to_le32(cmd);
+ msg->len = cpu_to_le32(len);
+ flags = (++prot->reqid << CDC_DCMD_ID_SHIFT) | CDC_DCMD_SET;
+ flags = (flags & ~CDC_DCMD_IF_MASK) |
+ (ifidx << CDC_DCMD_IF_SHIFT);
+ msg->flags = cpu_to_le32(flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ ret = brcmf_proto_cdc_msg(drvr);
+ if (ret < 0)
+ goto done;
+
+ ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
+ if (ret < 0)
+ goto done;
+
+ flags = le32_to_cpu(msg->flags);
+ id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
+
+ if (id != prot->reqid) {
+ brcmf_dbg(ERROR, "%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), id, prot->reqid);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDC_DCMD_ERROR) {
+ ret = le32_to_cpu(msg->status);
+ /* Cache error from dongle */
+ drvr->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+int
+brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
+ int len)
+{
+ struct brcmf_proto *prot = drvr->prot;
+ int ret = -1;
+
+ if (drvr->busstate == BRCMF_BUS_DOWN) {
+ brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
+ return ret;
+ }
+ brcmf_os_proto_block(drvr);
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (len > BRCMF_DCMD_MAXLEN)
+ goto done;
+
+ if (prot->pending == true) {
+ brcmf_dbg(TRACE, "CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ dcmd->cmd, (unsigned long)dcmd->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd);
+ if (dcmd->cmd == BRCMF_C_SET_VAR ||
+ dcmd->cmd == BRCMF_C_GET_VAR)
+ brcmf_dbg(TRACE, "iovar cmd=%s\n", (char *)dcmd->buf);
+
+ goto done;
+ }
+
+ prot->pending = true;
+ prot->lastcmd = dcmd->cmd;
+ if (dcmd->set)
+ ret = brcmf_proto_cdc_set_dcmd(drvr, ifidx, dcmd->cmd,
+ dcmd->buf, len);
+ else {
+ ret = brcmf_proto_cdc_query_dcmd(drvr, ifidx, dcmd->cmd,
+ dcmd->buf, len);
+ if (ret > 0)
+ dcmd->used = ret -
+ sizeof(struct brcmf_proto_cdc_dcmd);
+ }
+
+ if (ret >= 0)
+ ret = 0;
+ else {
+ struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
+ /* len == needed when set/query fails from dongle */
+ dcmd->needed = le32_to_cpu(msg->len);
+ }
+
+ /* Intercept the wme_dp dongle cmd here */
+ if (!ret && dcmd->cmd == BRCMF_C_SET_VAR &&
+ !strcmp(dcmd->buf, "wme_dp")) {
+ int slen;
+ __le32 val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ memcpy(&val, (char *)dcmd->buf + slen, sizeof(int));
+ drvr->wme_dp = (u8) le32_to_cpu(val);
+ }
+
+ prot->pending = false;
+
+done:
+ brcmf_os_proto_unblock(drvr);
+
+ return ret;
+}
+
+static bool pkt_sum_needed(struct sk_buff *skb)
+{
+ return skb->ip_summed == CHECKSUM_PARTIAL;
+}
+
+static void pkt_set_sum_good(struct sk_buff *skb, bool x)
+{
+ skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+}
+
+void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *pktbuf)
+{
+ struct brcmf_proto_bdc_header *h;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Push BDC header used to convey priority for buses that don't */
+
+ skb_push(pktbuf, BDC_HEADER_LEN);
+
+ h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
+
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (pkt_sum_needed(pktbuf))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+ h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->rssi = 0;
+ BDC_SET_IF_IDX(h, ifidx);
+}
+
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
+ struct sk_buff *pktbuf)
+{
+ struct brcmf_proto_bdc_header *h;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Pop BDC header used to convey priority for buses that don't */
+
+ if (pktbuf->len < BDC_HEADER_LEN) {
+ brcmf_dbg(ERROR, "rx data too short (%d < %d)\n",
+ pktbuf->len, BDC_HEADER_LEN);
+ return -EBADE;
+ }
+
+ h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
+
+ *ifidx = BDC_GET_IF_IDX(h);
+ if (*ifidx >= BRCMF_MAX_IFS) {
+ brcmf_dbg(ERROR, "rx data ifnum out of range (%d)\n", *ifidx);
+ return -EBADE;
+ }
+
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
+ BDC_PROTO_VER) {
+ brcmf_dbg(ERROR, "%s: non-BDC packet received, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags);
+ return -EBADE;
+ }
+
+ if (h->flags & BDC_FLAG_SUM_GOOD) {
+ brcmf_dbg(INFO, "%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags);
+ pkt_set_sum_good(pktbuf, true);
+ }
+
+ pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
+
+ skb_pull(pktbuf, BDC_HEADER_LEN);
+
+ return 0;
+}
+
+int brcmf_proto_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_proto *cdc;
+
+ cdc = kzalloc(sizeof(struct brcmf_proto), GFP_ATOMIC);
+ if (!cdc)
+ goto fail;
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
+ brcmf_dbg(ERROR, "struct brcmf_proto is not correctly defined\n");
+ goto fail;
+ }
+
+ drvr->prot = cdc;
+ drvr->hdrlen += BDC_HEADER_LEN;
+ drvr->maxctl = BRCMF_DCMD_MAXLEN +
+ sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
+ return 0;
+
+fail:
+ kfree(cdc);
+ return -ENOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
+void brcmf_proto_detach(struct brcmf_pub *drvr)
+{
+ kfree(drvr->prot);
+ drvr->prot = NULL;
+}
+
+void brcmf_proto_dstats(struct brcmf_pub *drvr)
+{
+ /* No stats from dongle added yet, copy bus stats */
+ drvr->dstats.tx_packets = drvr->tx_packets;
+ drvr->dstats.tx_errors = drvr->tx_errors;
+ drvr->dstats.rx_packets = drvr->rx_packets;
+ drvr->dstats.rx_errors = drvr->rx_errors;
+ drvr->dstats.rx_dropped = drvr->rx_dropped;
+ drvr->dstats.multicast = drvr->rx_multicast;
+ return;
+}
+
+int brcmf_proto_init(struct brcmf_pub *drvr)
+{
+ int ret = 0;
+ char buf[128];
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_os_proto_block(drvr);
+
+ /* Get the device MAC address */
+ strcpy(buf, "cur_etheraddr");
+ ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
+ buf, sizeof(buf));
+ if (ret < 0) {
+ brcmf_os_proto_unblock(drvr);
+ return ret;
+ }
+ memcpy(drvr->mac, buf, ETH_ALEN);
+
+ brcmf_os_proto_unblock(drvr);
+
+ ret = brcmf_c_preinit_dcmds(drvr);
+
+ /* Always assumes wl for now */
+ drvr->iswl = true;
+
+ return ret;
+}
+
+void brcmf_proto_stop(struct brcmf_pub *drvr)
+{
+ /* Nothing to do for CDC */
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
new file mode 100644
index 00000000000..891826197f9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -0,0 +1,895 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <asm/unaligned.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+
+#define BRCM_OUI "\x00\x10\x18"
+#define DOT11_OUI_LEN 3
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define PKTFILTER_BUF_SIZE 2048
+#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
+
+int brcmf_msg_level;
+
+#define MSGTRACE_VERSION 1
+
+#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
+#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN \
+ offsetof(struct brcmf_pkt_filter_pattern_le, mask_and_pattern)
+
+#ifdef BCMDBG
+static const char brcmf_version[] =
+ "Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on "
+ __DATE__ " at " __TIME__;
+#else
+static const char brcmf_version[] =
+ "Dongle Host Driver, version " BRCMF_VERSION_STR;
+#endif
+
+/* Message trace header */
+struct msgtrace_hdr {
+ u8 version;
+ u8 spare;
+ __be16 len; /* Len of the trace */
+ __be32 seqnum; /* Sequence number of message. Useful
+ * if the messsage has been lost
+ * because of DMA error or a bus reset
+ * (ex: SDIO Func2)
+ */
+ __be32 discarded_bytes; /* Number of discarded bytes because of
+ trace overflow */
+ __be32 discarded_printf; /* Number of discarded printf
+ because of trace overflow */
+} __packed;
+
+
+uint
+brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+
+void brcmf_c_init(void)
+{
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver.
+ * Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behaviour since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent
+ * initializations.
+ */
+ brcmf_msg_level = BRCMF_ERROR_VAL;
+}
+
+bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+ struct sk_buff *pkt, int prec)
+{
+ struct sk_buff *p;
+ int eprec = -1; /* precedence to evict from */
+ bool discard_oldest;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ brcmu_pktq_penq(q, prec, pkt);
+ return true;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = brcmu_pktq_peek_tail(q, &eprec);
+ if (eprec > prec)
+ return false;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ discard_oldest = ac_bitmap_tst(drvr->wme_dp, eprec);
+ if (eprec == prec && !discard_oldest)
+ return false; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
+ brcmu_pktq_pdeq_tail(q, eprec);
+ if (p == NULL)
+ brcmf_dbg(ERROR, "brcmu_pktq_penq() failed, oldest %d\n",
+ discard_oldest);
+
+ brcmu_pkt_buf_free_skb(p);
+ }
+
+ /* Enqueue */
+ p = brcmu_pktq_penq(q, prec, pkt);
+ if (p == NULL)
+ brcmf_dbg(ERROR, "brcmu_pktq_penq() failed\n");
+
+ return p != NULL;
+}
+
+#ifdef BCMDBG
+static void
+brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
+{
+ uint i, status, reason;
+ bool group = false, flush_txq = false, link = false;
+ char *auth_str, *event_name;
+ unsigned char *buf;
+ char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+ static struct {
+ uint event;
+ char *event_name;
+ } event_names[] = {
+ {
+ BRCMF_E_SET_SSID, "SET_SSID"}, {
+ BRCMF_E_JOIN, "JOIN"}, {
+ BRCMF_E_START, "START"}, {
+ BRCMF_E_AUTH, "AUTH"}, {
+ BRCMF_E_AUTH_IND, "AUTH_IND"}, {
+ BRCMF_E_DEAUTH, "DEAUTH"}, {
+ BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, {
+ BRCMF_E_ASSOC, "ASSOC"}, {
+ BRCMF_E_ASSOC_IND, "ASSOC_IND"}, {
+ BRCMF_E_REASSOC, "REASSOC"}, {
+ BRCMF_E_REASSOC_IND, "REASSOC_IND"}, {
+ BRCMF_E_DISASSOC, "DISASSOC"}, {
+ BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, {
+ BRCMF_E_QUIET_START, "START_QUIET"}, {
+ BRCMF_E_QUIET_END, "END_QUIET"}, {
+ BRCMF_E_BEACON_RX, "BEACON_RX"}, {
+ BRCMF_E_LINK, "LINK"}, {
+ BRCMF_E_MIC_ERROR, "MIC_ERROR"}, {
+ BRCMF_E_NDIS_LINK, "NDIS_LINK"}, {
+ BRCMF_E_ROAM, "ROAM"}, {
+ BRCMF_E_TXFAIL, "TXFAIL"}, {
+ BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, {
+ BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
+ BRCMF_E_PRUNE, "PRUNE"}, {
+ BRCMF_E_AUTOAUTH, "AUTOAUTH"}, {
+ BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, {
+ BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
+ BRCMF_E_ADDTS_IND, "ADDTS_IND"}, {
+ BRCMF_E_DELTS_IND, "DELTS_IND"}, {
+ BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, {
+ BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, {
+ BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
+ BRCMF_E_ROAM_PREP, "ROAM_PREP"}, {
+ BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
+ BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
+ BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
+ BRCMF_E_JOIN_START, "JOIN_START"}, {
+ BRCMF_E_ROAM_START, "ROAM_START"}, {
+ BRCMF_E_ASSOC_START, "ASSOC_START"}, {
+ BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
+ BRCMF_E_RADIO, "RADIO"}, {
+ BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
+ BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
+ BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
+ BRCMF_E_PSK_SUP, "PSK_SUP"}, {
+ BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
+ BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
+ BRCMF_E_ICV_ERROR, "ICV_ERROR"}, {
+ BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
+ BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
+ BRCMF_E_TRACE, "TRACE"}, {
+ BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, {
+ BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
+ BRCMF_E_IF, "IF"}, {
+ BRCMF_E_RSSI, "RSSI"}, {
+ BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+ };
+ uint event_type, flags, auth_type, datalen;
+ static u32 seqnum_prev;
+ struct msgtrace_hdr hdr;
+ u32 nblost;
+ char *s, *p;
+
+ event_type = be32_to_cpu(event->event_type);
+ flags = be16_to_cpu(event->flags);
+ status = be32_to_cpu(event->status);
+ reason = be32_to_cpu(event->reason);
+ auth_type = be32_to_cpu(event->auth_type);
+ datalen = be32_to_cpu(event->datalen);
+ /* debug dump of event messages */
+ sprintf(eabuf, "%pM", event->addr);
+
+ event_name = "UNKNOWN";
+ for (i = 0; i < ARRAY_SIZE(event_names); i++) {
+ if (event_names[i].event == event_type)
+ event_name = event_names[i].event_name;
+ }
+
+ brcmf_dbg(EVENT, "EVENT: %s, event ID = %d\n", event_name, event_type);
+ brcmf_dbg(EVENT, "flags 0x%04x, status %d, reason %d, auth_type %d MAC %s\n",
+ flags, status, reason, auth_type, eabuf);
+
+ if (flags & BRCMF_EVENT_MSG_LINK)
+ link = true;
+ if (flags & BRCMF_EVENT_MSG_GROUP)
+ group = true;
+ if (flags & BRCMF_EVENT_MSG_FLUSHTXQ)
+ flush_txq = true;
+
+ switch (event_type) {
+ case BRCMF_E_START:
+ case BRCMF_E_DEAUTH:
+ case BRCMF_E_DISASSOC:
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
+ break;
+
+ case BRCMF_E_ASSOC_IND:
+ case BRCMF_E_REASSOC_IND:
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
+ break;
+
+ case BRCMF_E_ASSOC:
+ case BRCMF_E_REASSOC:
+ if (status == BRCMF_E_STATUS_SUCCESS)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, SUCCESS\n",
+ event_name, eabuf);
+ else if (status == BRCMF_E_STATUS_TIMEOUT)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, TIMEOUT\n",
+ event_name, eabuf);
+ else if (status == BRCMF_E_STATUS_FAIL)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+ event_name, eabuf, (int)reason);
+ else
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, unexpected status %d\n",
+ event_name, eabuf, (int)status);
+ break;
+
+ case BRCMF_E_DEAUTH_IND:
+ case BRCMF_E_DISASSOC_IND:
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, reason %d\n",
+ event_name, eabuf, (int)reason);
+ break;
+
+ case BRCMF_E_AUTH:
+ case BRCMF_E_AUTH_IND:
+ if (auth_type == WLAN_AUTH_OPEN)
+ auth_str = "Open System";
+ else if (auth_type == WLAN_AUTH_SHARED_KEY)
+ auth_str = "Shared Key";
+ else {
+ sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+ auth_str = err_msg;
+ }
+ if (event_type == BRCMF_E_AUTH_IND)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s\n",
+ event_name, eabuf, auth_str);
+ else if (status == BRCMF_E_STATUS_SUCCESS)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str);
+ else if (status == BRCMF_E_STATUS_TIMEOUT)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str);
+ else if (status == BRCMF_E_STATUS_FAIL) {
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+ event_name, eabuf, auth_str, (int)reason);
+ }
+
+ break;
+
+ case BRCMF_E_JOIN:
+ case BRCMF_E_ROAM:
+ case BRCMF_E_SET_SSID:
+ if (status == BRCMF_E_STATUS_SUCCESS)
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n",
+ event_name, eabuf);
+ else if (status == BRCMF_E_STATUS_FAIL)
+ brcmf_dbg(EVENT, "MACEVENT: %s, failed\n", event_name);
+ else if (status == BRCMF_E_STATUS_NO_NETWORKS)
+ brcmf_dbg(EVENT, "MACEVENT: %s, no networks found\n",
+ event_name);
+ else
+ brcmf_dbg(EVENT, "MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status);
+ break;
+
+ case BRCMF_E_BEACON_RX:
+ if (status == BRCMF_E_STATUS_SUCCESS)
+ brcmf_dbg(EVENT, "MACEVENT: %s, SUCCESS\n", event_name);
+ else if (status == BRCMF_E_STATUS_FAIL)
+ brcmf_dbg(EVENT, "MACEVENT: %s, FAIL\n", event_name);
+ else
+ brcmf_dbg(EVENT, "MACEVENT: %s, status %d\n",
+ event_name, status);
+ break;
+
+ case BRCMF_E_LINK:
+ brcmf_dbg(EVENT, "MACEVENT: %s %s\n",
+ event_name, link ? "UP" : "DOWN");
+ break;
+
+ case BRCMF_E_MIC_ERROR:
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq);
+ break;
+
+ case BRCMF_E_ICV_ERROR:
+ case BRCMF_E_UNICAST_DECODE_ERROR:
+ case BRCMF_E_MULTICAST_DECODE_ERROR:
+ brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
+ break;
+
+ case BRCMF_E_TXFAIL:
+ brcmf_dbg(EVENT, "MACEVENT: %s, RA %s\n", event_name, eabuf);
+ break;
+
+ case BRCMF_E_SCAN_COMPLETE:
+ case BRCMF_E_PMKID_CACHE:
+ brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
+ break;
+
+ case BRCMF_E_PFN_NET_FOUND:
+ case BRCMF_E_PFN_NET_LOST:
+ case BRCMF_E_PFN_SCAN_COMPLETE:
+ brcmf_dbg(EVENT, "PNOEVENT: %s\n", event_name);
+ break;
+
+ case BRCMF_E_PSK_SUP:
+ case BRCMF_E_PRUNE:
+ brcmf_dbg(EVENT, "MACEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason);
+ break;
+
+ case BRCMF_E_TRACE:
+ buf = (unsigned char *) event_data;
+ memcpy(&hdr, buf, sizeof(struct msgtrace_hdr));
+
+ if (hdr.version != MSGTRACE_VERSION) {
+ brcmf_dbg(ERROR,
+ "MACEVENT: %s [unsupported version --> brcmf"
+ " version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version);
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+ /* There are 2 bytes available at the end of data */
+ *(buf + sizeof(struct msgtrace_hdr)
+ + be16_to_cpu(hdr.len)) = '\0';
+
+ if (be32_to_cpu(hdr.discarded_bytes)
+ || be32_to_cpu(hdr.discarded_printf))
+ brcmf_dbg(ERROR,
+ "WLC_E_TRACE: [Discarded traces in dongle -->"
+ " discarded_bytes %d discarded_printf %d]\n",
+ be32_to_cpu(hdr.discarded_bytes),
+ be32_to_cpu(hdr.discarded_printf));
+
+ nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1;
+ if (nblost > 0)
+ brcmf_dbg(ERROR, "WLC_E_TRACE: [Event lost --> seqnum "
+ " %d nblost %d\n", be32_to_cpu(hdr.seqnum),
+ nblost);
+ seqnum_prev = be32_to_cpu(hdr.seqnum);
+
+ /* Display the trace buffer. Advance from \n to \n to
+ * avoid display big
+ * printf (issue with Linux printk )
+ */
+ p = (char *)&buf[sizeof(struct msgtrace_hdr)];
+ while ((s = strstr(p, "\n")) != NULL) {
+ *s = '\0';
+ printk(KERN_DEBUG"%s\n", p);
+ p = s + 1;
+ }
+ printk(KERN_DEBUG "%s\n", p);
+
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+
+ case BRCMF_E_RSSI:
+ brcmf_dbg(EVENT, "MACEVENT: %s %d\n",
+ event_name, be32_to_cpu(*((__be32 *)event_data)));
+ break;
+
+ default:
+ brcmf_dbg(EVENT,
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, "
+ "auth %d\n", event_name, event_type, eabuf,
+ (int)status, (int)reason, (int)auth_type);
+ break;
+ }
+
+ /* show any appended data */
+ if (datalen) {
+ buf = (unsigned char *) event_data;
+ brcmf_dbg(EVENT, " data (%d) : ", datalen);
+ for (i = 0; i < datalen; i++)
+ brcmf_dbg(EVENT, " 0x%02x ", *buf++);
+ brcmf_dbg(EVENT, "\n");
+ }
+}
+#endif /* BCMDBG */
+
+int
+brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+ struct brcmf_event_msg *event, void **data_ptr)
+{
+ /* check whether packet is a BRCM event pkt */
+ struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
+ struct brcmf_if_event *ifevent;
+ char *event_data;
+ u32 type, status;
+ u16 flags;
+ int evlen;
+
+ if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) {
+ brcmf_dbg(ERROR, "mismatched OUI, bailing\n");
+ return -EBADE;
+ }
+
+ /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+ if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) !=
+ BCMILCP_BCM_SUBTYPE_EVENT) {
+ brcmf_dbg(ERROR, "mismatched subtype, bailing\n");
+ return -EBADE;
+ }
+
+ *data_ptr = &pvt_data[1];
+ event_data = *data_ptr;
+
+ /* memcpy since BRCM event pkt may be unaligned. */
+ memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg));
+
+ type = get_unaligned_be32(&event->event_type);
+ flags = get_unaligned_be16(&event->flags);
+ status = get_unaligned_be32(&event->status);
+ evlen = get_unaligned_be32(&event->datalen) +
+ sizeof(struct brcmf_event);
+
+ switch (type) {
+ case BRCMF_E_IF:
+ ifevent = (struct brcmf_if_event *) event_data;
+ brcmf_dbg(TRACE, "if event\n");
+
+ if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
+ if (ifevent->action == BRCMF_E_IF_ADD)
+ brcmf_add_if(drvr_priv, ifevent->ifidx, NULL,
+ event->ifname,
+ pvt_data->eth.h_dest,
+ ifevent->flags, ifevent->bssidx);
+ else
+ brcmf_del_if(drvr_priv, ifevent->ifidx);
+ } else {
+ brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
+ ifevent->ifidx, event->ifname);
+ }
+
+ /* send up the if event: btamp user needs it */
+ *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ break;
+
+ /* These are what external supplicant/authenticator wants */
+ case BRCMF_E_LINK:
+ case BRCMF_E_ASSOC_IND:
+ case BRCMF_E_REASSOC_IND:
+ case BRCMF_E_DISASSOC_IND:
+ case BRCMF_E_MIC_ERROR:
+ default:
+ /* Fall through: this should get _everything_ */
+
+ *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
+ type, flags, status);
+
+ /* put it back to BRCMF_E_NDIS_LINK */
+ if (type == BRCMF_E_NDIS_LINK) {
+ u32 temp1;
+ __be32 temp2;
+
+ temp1 = get_unaligned_be32(&event->event_type);
+ brcmf_dbg(TRACE, "Converted to WLC_E_LINK type %d\n",
+ temp1);
+
+ temp2 = cpu_to_be32(BRCMF_E_NDIS_LINK);
+ memcpy((void *)(&pvt_data->msg.event_type), &temp2,
+ sizeof(pvt_data->msg.event_type));
+ }
+ break;
+ }
+
+#ifdef BCMDBG
+ brcmf_c_show_host_event(event, event_data);
+#endif /* BCMDBG */
+
+ return 0;
+}
+
+/* Convert user's input in hex pattern to byte-size mask */
+static int brcmf_c_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
+ brcmf_dbg(ERROR, "Mask invalid format. Needs to start with 0x\n");
+ return -EINVAL;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ brcmf_dbg(ERROR, "Mask invalid format. Length must be even.\n");
+ return -EINVAL;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ unsigned long res;
+ char num[3];
+ strncpy(num, src, 2);
+ num[2] = '\0';
+ if (kstrtoul(num, 16, &res))
+ return -EINVAL;
+ dst[i] = (u8)res;
+ src += 2;
+ }
+ return i;
+}
+
+void
+brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable,
+ int master_mode)
+{
+ unsigned long res;
+ char *argv[8];
+ int i = 0;
+ const char *str;
+ int buf_len;
+ int str_len;
+ char *arg_save = NULL, *arg_org = NULL;
+ int rc;
+ char buf[128];
+ struct brcmf_pkt_filter_enable_le enable_parm;
+ struct brcmf_pkt_filter_enable_le *pkt_filterp;
+ __le32 mmode_le;
+
+ arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
+ if (!arg_save)
+ goto fail;
+
+ arg_org = arg_save;
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ argv[i] = strsep(&arg_save, " ");
+
+ i = 0;
+ if (NULL == argv[i]) {
+ brcmf_dbg(ERROR, "No args provided\n");
+ goto fail;
+ }
+
+ str = "pkt_filter_enable";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (struct brcmf_pkt_filter_enable_le *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ enable_parm.id = 0;
+ if (!kstrtoul(argv[i], 0, &res))
+ enable_parm.id = cpu_to_le32((u32)res);
+
+ /* Parse enable/disable value. */
+ enable_parm.enable = cpu_to_le32(enable);
+
+ buf_len += sizeof(enable_parm);
+ memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm));
+
+ /* Enable/disable the specified filter. */
+ rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
+ arg, rc);
+ else
+ brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
+
+ /* Contorl the master mode */
+ mmode_le = cpu_to_le32(master_mode);
+ brcmf_c_mkiovar("pkt_filter_mode", (char *)&mmode_le, 4, buf,
+ sizeof(buf));
+ rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf,
+ sizeof(buf));
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
+ arg, rc);
+
+fail:
+ kfree(arg_org);
+}
+
+void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
+{
+ const char *str;
+ struct brcmf_pkt_filter_le pkt_filter;
+ struct brcmf_pkt_filter_le *pkt_filterp;
+ unsigned long res;
+ int buf_len;
+ int str_len;
+ int rc;
+ u32 mask_size;
+ u32 pattern_size;
+ char *argv[8], *buf = NULL;
+ int i = 0;
+ char *arg_save = NULL, *arg_org = NULL;
+
+ arg_save = kstrdup(arg, GFP_ATOMIC);
+ if (!arg_save)
+ goto fail;
+
+ arg_org = arg_save;
+
+ buf = kmalloc(PKTFILTER_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ goto fail;
+
+ argv[i] = strsep(&arg_save, " ");
+ while (argv[i++])
+ argv[i] = strsep(&arg_save, " ");
+
+ i = 0;
+ if (NULL == argv[i]) {
+ brcmf_dbg(ERROR, "No args provided\n");
+ goto fail;
+ }
+
+ str = "pkt_filter_add";
+ strcpy(buf, str);
+ str_len = strlen(str);
+ buf_len = str_len + 1;
+
+ pkt_filterp = (struct brcmf_pkt_filter_le *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = 0;
+ if (!kstrtoul(argv[i], 0, &res))
+ pkt_filter.id = cpu_to_le32((u32)res);
+
+ if (NULL == argv[++i]) {
+ brcmf_dbg(ERROR, "Polarity not provided\n");
+ goto fail;
+ }
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = 0;
+ if (!kstrtoul(argv[i], 0, &res))
+ pkt_filter.negate_match = cpu_to_le32((u32)res);
+
+ if (NULL == argv[++i]) {
+ brcmf_dbg(ERROR, "Filter type not provided\n");
+ goto fail;
+ }
+
+ /* Parse filter type. */
+ pkt_filter.type = 0;
+ if (!kstrtoul(argv[i], 0, &res))
+ pkt_filter.type = cpu_to_le32((u32)res);
+
+ if (NULL == argv[++i]) {
+ brcmf_dbg(ERROR, "Offset not provided\n");
+ goto fail;
+ }
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = 0;
+ if (!kstrtoul(argv[i], 0, &res))
+ pkt_filter.u.pattern.offset = cpu_to_le32((u32)res);
+
+ if (NULL == argv[++i]) {
+ brcmf_dbg(ERROR, "Bitmask not provided\n");
+ goto fail;
+ }
+
+ /* Parse pattern filter mask. */
+ mask_size =
+ brcmf_c_pattern_atoh
+ (argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
+
+ if (NULL == argv[++i]) {
+ brcmf_dbg(ERROR, "Pattern not provided\n");
+ goto fail;
+ }
+
+ /* Parse pattern filter pattern. */
+ pattern_size =
+ brcmf_c_pattern_atoh(argv[i],
+ (char *)&pkt_filterp->u.pattern.
+ mask_and_pattern[mask_size]);
+
+ if (mask_size != pattern_size) {
+ brcmf_dbg(ERROR, "Mask and pattern not the same size\n");
+ goto fail;
+ }
+
+ pkt_filter.u.pattern.size_bytes = cpu_to_le32(mask_size);
+ buf_len += BRCMF_PKT_FILTER_FIXED_LEN;
+ buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local
+ * variable (keep_alive_pkt), and
+ ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ ** guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp,
+ &pkt_filter,
+ BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
+ rc = rc >= 0 ? 0 : rc;
+
+ if (rc)
+ brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
+ arg, rc);
+ else
+ brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
+
+fail:
+ kfree(arg_org);
+
+ kfree(buf);
+}
+
+static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
+{
+ char iovbuf[32];
+ int retcode;
+
+ brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ arp_mode, retcode);
+ else
+ brcmf_dbg(TRACE, "successfully set ARP offload mode to 0x%x\n",
+ arp_mode);
+}
+
+static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
+{
+ char iovbuf[32];
+ int retcode;
+
+ brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4,
+ iovbuf, sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ brcmf_dbg(TRACE, "failed to enable ARP offload to %d, retcode = %d\n",
+ arp_enable, retcode);
+ else
+ brcmf_dbg(TRACE, "successfully enabled ARP offload to %d\n",
+ arp_enable);
+}
+
+int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+{
+ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
+ "event_msgs" + '\0' + bitvec */
+ uint up = 0;
+ char buf[128], *ptr;
+ u32 dongle_align = BRCMF_SDALIGN;
+ u32 glom = 0;
+ u32 roaming = 1;
+ uint bcn_timeout = 3;
+ int scan_assoc_time = 40;
+ int scan_unassoc_time = 40;
+ int i;
+
+ brcmf_os_proto_block(drvr);
+
+ /* Set Country code */
+ if (drvr->country_code[0] != 0) {
+ if (brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_COUNTRY,
+ drvr->country_code,
+ sizeof(drvr->country_code)) < 0)
+ brcmf_dbg(ERROR, "country code setting failed\n");
+ }
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ brcmf_c_mkiovar("ver", NULL, 0, buf, sizeof(buf));
+ brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf));
+ strsep(&ptr, "\n");
+ /* Print fw version info */
+ brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
+
+ /* Match Host and Dongle rx alignment */
+ brcmf_c_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
+ sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ /* disable glom option per default */
+ brcmf_c_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ /* Setup timeout if Beacons are lost and roam is off to report
+ link down */
+ brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
+ sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ /* Enable/Disable build-in roaming to allowed ext supplicant to take
+ of romaing */
+ brcmf_c_mkiovar("roam_off", (char *)&roaming, 4,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ /* Force STA UP */
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
+
+ /* Setup event_msgs */
+ brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ (char *)&scan_assoc_time, sizeof(scan_assoc_time));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ (char *)&scan_unassoc_time, sizeof(scan_unassoc_time));
+
+ /* Set and enable ARP offload feature */
+ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
+ brcmf_c_arp_offload_enable(drvr, true);
+
+ /* Set up pkt filter */
+ for (i = 0; i < drvr->pktfilter_count; i++) {
+ brcmf_c_pktfilter_offload_set(drvr, drvr->pktfilter[i]);
+ brcmf_c_pktfilter_offload_enable(drvr, drvr->pktfilter[i],
+ 0, true);
+ }
+
+ brcmf_os_proto_unblock(drvr);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
new file mode 100644
index 00000000000..7467922f053
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMF_DBG_H_
+#define _BRCMF_DBG_H_
+
+#if defined(BCMDBG)
+
+#define brcmf_dbg(level, fmt, ...) \
+do { \
+ if (BRCMF_ERROR_VAL == BRCMF_##level##_VAL) { \
+ if (brcmf_msg_level & BRCMF_##level##_VAL) { \
+ if (net_ratelimit()) \
+ printk(KERN_DEBUG "%s: " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } \
+ } else { \
+ if (brcmf_msg_level & BRCMF_##level##_VAL) { \
+ printk(KERN_DEBUG "%s: " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } \
+ } \
+} while (0)
+
+#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
+#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL)
+#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
+#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
+#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
+
+#else /* (defined BCMDBG) || (defined BCMDBG) */
+
+#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+
+#define BRCMF_DATA_ON() 0
+#define BRCMF_CTL_ON() 0
+#define BRCMF_HDRS_ON() 0
+#define BRCMF_BYTES_ON() 0
+#define BRCMF_GLOM_ON() 0
+
+#endif /* defined(BCMDBG) */
+
+extern int brcmf_msg_level;
+
+#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
new file mode 100644
index 00000000000..4acbac5a74c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -0,0 +1,1356 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <defs.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
+#include "bcmchip.h"
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+
+/* Interface control information */
+struct brcmf_if {
+ struct brcmf_info *info; /* back pointer to brcmf_info */
+ /* OS/stack specifics */
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ int idx; /* iface idx in dongle */
+ int state; /* interface state */
+ u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
+};
+
+/* Local private structure (extension of pub) */
+struct brcmf_info {
+ struct brcmf_pub pub;
+
+ /* OS/stack specifics */
+ struct brcmf_if *iflist[BRCMF_MAX_IFS];
+
+ struct mutex proto_block;
+
+ struct work_struct setmacaddr_work;
+ struct work_struct multicast_work;
+ u8 macvalue[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
+};
+
+/* Error bits */
+module_param(brcmf_msg_level, int, 0);
+
+
+static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
+{
+ int i = 0;
+
+ while (i < BRCMF_MAX_IFS) {
+ if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
+ return i;
+ i++;
+ }
+
+ return BRCMF_BAD_IF;
+}
+
+int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
+{
+ int i = BRCMF_MAX_IFS;
+ struct brcmf_if *ifp;
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0) {
+ ifp = drvr_priv->iflist[i];
+ if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
+ break;
+ }
+
+ brcmf_dbg(TRACE, "return idx %d for \"%s\"\n", i, name);
+
+ return i; /* default - the primary interface */
+}
+
+char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
+ brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
+ return "<if_bad>";
+ }
+
+ if (drvr_priv->iflist[ifidx] == NULL) {
+ brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
+ return "<if_null>";
+ }
+
+ if (drvr_priv->iflist[ifidx]->ndev)
+ return drvr_priv->iflist[ifidx]->ndev->name;
+
+ return "<if_none>";
+}
+
+static void _brcmf_set_multicast_list(struct work_struct *work)
+{
+ struct net_device *ndev;
+ struct netdev_hw_addr *ha;
+ u32 dcmd_value, cnt;
+ __le32 cnt_le;
+ __le32 dcmd_le_value;
+
+ struct brcmf_dcmd dcmd;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+ struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ multicast_work);
+
+ ndev = drvr_priv->iflist[0]->ndev;
+ cnt = netdev_mc_count(ndev);
+
+ /* Determine initial value of allmulti flag */
+ dcmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
+
+ /* Send down the multicast list first. */
+
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
+ bufp = buf = kmalloc(buflen, GFP_ATOMIC);
+ if (!bufp)
+ return;
+
+ strcpy(bufp, "mcast_list");
+ bufp += strlen("mcast_list") + 1;
+
+ cnt_le = cpu_to_le32(cnt);
+ memcpy(bufp, &cnt_le, sizeof(cnt));
+ bufp += sizeof(cnt_le);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETH_ALEN);
+ bufp += ETH_ALEN;
+ cnt--;
+ }
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = buflen;
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
+ brcmf_ifname(&drvr_priv->pub, 0), cnt);
+ dcmd_value = cnt ? true : dcmd_value;
+ }
+
+ kfree(buf);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ buflen = sizeof("allmulti") + sizeof(dcmd_value);
+ buf = kmalloc(buflen, GFP_ATOMIC);
+ if (!buf)
+ return;
+
+ dcmd_le_value = cpu_to_le32(dcmd_value);
+
+ if (!brcmf_c_mkiovar
+ ("allmulti", (void *)&dcmd_le_value,
+ sizeof(dcmd_le_value), buf, buflen)) {
+ brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+ brcmf_ifname(&drvr_priv->pub, 0),
+ (int)sizeof(dcmd_value), buflen);
+ kfree(buf);
+ return;
+ }
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = buflen;
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
+ brcmf_ifname(&drvr_priv->pub, 0),
+ le32_to_cpu(dcmd_le_value));
+ }
+
+ kfree(buf);
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC
+ driver does */
+
+ dcmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
+ dcmd_le_value = cpu_to_le32(dcmd_value);
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_PROMISC;
+ dcmd.buf = &dcmd_le_value;
+ dcmd.len = sizeof(dcmd_le_value);
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
+ brcmf_ifname(&drvr_priv->pub, 0),
+ le32_to_cpu(dcmd_le_value));
+ }
+}
+
+static void
+_brcmf_set_mac_address(struct work_struct *work)
+{
+ char buf[32];
+ struct brcmf_dcmd dcmd;
+ int ret;
+
+ struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ setmacaddr_work);
+
+ brcmf_dbg(TRACE, "enter\n");
+ if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue,
+ ETH_ALEN, buf, 32)) {
+ brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
+ brcmf_ifname(&drvr_priv->pub, 0));
+ return;
+ }
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = 32;
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0)
+ brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
+ brcmf_ifname(&drvr_priv->pub, 0));
+ else
+ memcpy(drvr_priv->iflist[0]->ndev->dev_addr,
+ drvr_priv->macvalue, ETH_ALEN);
+
+ return;
+}
+
+static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF)
+ return -1;
+
+ memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
+ schedule_work(&drvr_priv->setmacaddr_work);
+ return 0;
+}
+
+static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF)
+ return;
+
+ schedule_work(&drvr_priv->multicast_work);
+}
+
+int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ /* Reject if down */
+ if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+ return -ENODEV;
+
+ /* Update multicast statistic */
+ if (pktbuf->len >= ETH_ALEN) {
+ u8 *pktdata = (u8 *) (pktbuf->data);
+ struct ethhdr *eh = (struct ethhdr *)pktdata;
+
+ if (is_multicast_ether_addr(eh->h_dest))
+ drvr->tx_multicast++;
+ if (ntohs(eh->h_proto) == ETH_P_PAE)
+ atomic_inc(&drvr_priv->pend_8021x_cnt);
+ }
+
+ /* If the protocol uses a data header, apply it */
+ brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
+
+ /* Use bus module to send data frame */
+ return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+}
+
+static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret;
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Reject if down */
+ if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
+ brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
+ drvr_priv->pub.up, drvr_priv->pub.busstate);
+ netif_stop_queue(ndev);
+ return -ENODEV;
+ }
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF) {
+ brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+ netif_stop_queue(ndev);
+ return -ENODEV;
+ }
+
+ /* Make sure there's enough room for any header */
+ if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
+ struct sk_buff *skb2;
+
+ brcmf_dbg(INFO, "%s: insufficient headroom\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx));
+ drvr_priv->pub.tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
+ dev_kfree_skb(skb);
+ skb = skb2;
+ if (skb == NULL) {
+ brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+
+done:
+ if (ret)
+ drvr_priv->pub.dstats.tx_dropped++;
+ else
+ drvr_priv->pub.tx_packets++;
+
+ /* Return ok: we always eat the packet */
+ return 0;
+}
+
+void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
+{
+ struct net_device *ndev;
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ drvr->txoff = state;
+ ndev = drvr_priv->iflist[ifidx]->ndev;
+ if (state == ON)
+ netif_stop_queue(ndev);
+ else
+ netif_wake_queue(ndev);
+}
+
+static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
+ void *pktdata, struct brcmf_event_msg *event,
+ void **data)
+{
+ int bcmerror = 0;
+
+ bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
+ if (bcmerror != 0)
+ return bcmerror;
+
+ if (drvr_priv->iflist[*ifidx]->ndev)
+ brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev,
+ event, *data);
+
+ return bcmerror;
+}
+
+void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
+ int numpkt)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+ unsigned char *eth;
+ uint len;
+ void *data;
+ struct sk_buff *pnext, *save_pktbuf;
+ int i;
+ struct brcmf_if *ifp;
+ struct brcmf_event_msg event;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ save_pktbuf = skb;
+
+ for (i = 0; skb && i < numpkt; i++, skb = pnext) {
+
+ pnext = skb->next;
+ skb->next = NULL;
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the
+ * 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface
+ * registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space
+ * required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = drvr_priv->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = drvr_priv->iflist[0];
+
+ skb->dev = ifp->ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ drvr_priv->pub.rx_multicast++;
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Process special event packets and then discard them */
+ if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
+ brcmf_host_event(drvr_priv, &ifidx,
+ skb_mac_header(skb),
+ &event, &data);
+
+ if (drvr_priv->iflist[ifidx] &&
+ !drvr_priv->iflist[ifidx]->state)
+ ifp = drvr_priv->iflist[ifidx];
+
+ if (ifp->ndev)
+ ifp->ndev->last_rx = jiffies;
+
+ drvr->dstats.rx_bytes += skb->len;
+ drvr->rx_packets++; /* Local count */
+
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ netif_rx_ni(skb);
+ }
+}
+
+void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
+{
+ uint ifidx;
+ struct brcmf_info *drvr_priv = drvr->info;
+ struct ethhdr *eh;
+ u16 type;
+
+ brcmf_proto_hdrpull(drvr, &ifidx, txp);
+
+ eh = (struct ethhdr *)(txp->data);
+ type = ntohs(eh->h_proto);
+
+ if (type == ETH_P_PAE)
+ atomic_dec(&drvr_priv->pend_8021x_cnt);
+
+}
+
+static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ struct brcmf_if *ifp;
+ int ifidx;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF)
+ return NULL;
+
+ ifp = drvr_priv->iflist[ifidx];
+
+ if (drvr_priv->pub.up)
+ /* Use the protocol to get dongle stats */
+ brcmf_proto_dstats(&drvr_priv->pub);
+
+ /* Copy dongle stats to net device stats */
+ ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
+ ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
+
+ return &ifp->stats;
+}
+
+/* Retrieve current toe component enables, which are kept
+ as a bitmap in toe_ol iovar */
+static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
+{
+ struct brcmf_dcmd dcmd;
+ __le32 toe_le;
+ char buf[32];
+ int ret;
+
+ memset(&dcmd, 0, sizeof(dcmd));
+
+ dcmd.cmd = BRCMF_C_GET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = (uint) sizeof(buf);
+ dcmd.set = false;
+
+ strcpy(buf, "toe_ol");
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ if (ret < 0) {
+ /* Check for older dongle image that doesn't support toe_ol */
+ if (ret == -EIO) {
+ brcmf_dbg(ERROR, "%s: toe not supported by device\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx));
+ return -EOPNOTSUPP;
+ }
+
+ brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ return ret;
+ }
+
+ memcpy(&toe_le, buf, sizeof(u32));
+ *toe_ol = le32_to_cpu(toe_le);
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar,
+ and set toe global enable iovar */
+static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
+{
+ struct brcmf_dcmd dcmd;
+ char buf[32];
+ int ret;
+ __le32 toe_le = cpu_to_le32(toe_ol);
+
+ memset(&dcmd, 0, sizeof(dcmd));
+
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = (uint) sizeof(buf);
+ dcmd.set = true;
+
+ /* Set toe_ol as requested */
+ strcpy(buf, "toe_ol");
+ memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+ toe_le = cpu_to_le32(toe_ol != 0);
+
+ strcpy(buf, "toe");
+ memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+
+ sprintf(info->driver, KBUILD_MODNAME);
+ sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
+ sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
+ sprintf(info->bus_info, "%s",
+ dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+}
+
+static struct ethtool_ops brcmf_ethtool_ops = {
+ .get_drvinfo = brcmf_ethtool_get_drvinfo
+};
+
+static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ u32 cmd;
+ struct ethtool_value edata;
+ u32 toe_cmpnt, csum_dir;
+ int ret;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver) - 1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ sprintf(info.driver, "dhd");
+ strcpy(info.version, BRCMF_VERSION_STR);
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!drvr_priv->pub.up) {
+ brcmf_dbg(ERROR, "dongle is not up\n");
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (drvr_priv->pub.iswl)
+ sprintf(info.driver, "wl");
+ else
+ sprintf(info.driver, "xx");
+
+ sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ brcmf_dbg(CTL, "given %*s, returning %s\n",
+ (int)sizeof(drvname), drvname, info.driver);
+ break;
+
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ if (ret < 0)
+ return ret;
+
+ csum_dir =
+ (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ if (ret < 0)
+ return ret;
+
+ csum_dir =
+ (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
+ if (ret < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ drvr_priv->iflist[0]->ndev->features |=
+ NETIF_F_IP_CSUM;
+ else
+ drvr_priv->iflist[0]->ndev->features &=
+ ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+
+ if (ifidx == BRCMF_BAD_IF)
+ return -1;
+
+ if (cmd == SIOCETHTOOL)
+ return brcmf_ethtool(drvr_priv, ifr->ifr_data);
+
+ return -EOPNOTSUPP;
+}
+
+/* called only from within this driver. Sends a command to the dongle. */
+s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
+{
+ struct brcmf_dcmd dcmd;
+ s32 err = 0;
+ int buflen = 0;
+ bool is_set_key_cmd;
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = cmd;
+ dcmd.buf = arg;
+ dcmd.len = len;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+
+ if (dcmd.buf != NULL)
+ buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
+
+ /* send to dongle (must be up, and wl) */
+ if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ brcmf_dbg(ERROR, "DONGLE_DOWN\n");
+ err = -EIO;
+ goto done;
+ }
+
+ if (!drvr_priv->pub.iswl) {
+ err = -EIO;
+ goto done;
+ }
+
+ /*
+ * Intercept BRCMF_C_SET_KEY CMD - serialize M4 send and
+ * set key CMD to prevent M4 encryption.
+ */
+ is_set_key_cmd = ((dcmd.cmd == BRCMF_C_SET_KEY) ||
+ ((dcmd.cmd == BRCMF_C_SET_VAR) &&
+ !(strncmp("wsec_key", dcmd.buf, 9))) ||
+ ((dcmd.cmd == BRCMF_C_SET_VAR) &&
+ !(strncmp("bsscfg:wsec_key", dcmd.buf, 15))));
+ if (is_set_key_cmd)
+ brcmf_netdev_wait_pend8021x(ndev);
+
+ err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+
+done:
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+
+static int brcmf_netdev_stop(struct net_device *ndev)
+{
+ struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_cfg80211_down(drvr->config);
+ if (drvr->up == 0)
+ return 0;
+
+ /* Set state and stop OS transmissions */
+ drvr->up = 0;
+ netif_stop_queue(ndev);
+
+ return 0;
+}
+
+static int brcmf_netdev_open(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ u32 toe_ol;
+ int ifidx = brcmf_net2idx(drvr_priv, ndev);
+ s32 ret = 0;
+
+ brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
+
+ if (ifidx == 0) { /* do it only for primary eth0 */
+
+ /* try to bring up bus */
+ ret = brcmf_bus_start(&drvr_priv->pub);
+ if (ret != 0) {
+ brcmf_dbg(ERROR, "failed with code %d\n", ret);
+ return -1;
+ }
+ atomic_set(&drvr_priv->pend_8021x_cnt, 0);
+
+ memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
+
+ /* Get current TOE mode from dongle */
+ if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+ && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ drvr_priv->iflist[ifidx]->ndev->features |=
+ NETIF_F_IP_CSUM;
+ else
+ drvr_priv->iflist[ifidx]->ndev->features &=
+ ~NETIF_F_IP_CSUM;
+ }
+ /* Allow transmit calls */
+ netif_start_queue(ndev);
+ drvr_priv->pub.up = 1;
+ if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
+ brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
+ return -1;
+ }
+
+ return ret;
+}
+
+int
+brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
+ char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+{
+ struct brcmf_if *ifp;
+ int ret = 0, err = 0;
+
+ brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+
+ ifp = drvr_priv->iflist[ifidx];
+ if (!ifp) {
+ ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
+ if (!ifp)
+ return -ENOMEM;
+ }
+
+ memset(ifp, 0, sizeof(struct brcmf_if));
+ ifp->info = drvr_priv;
+ drvr_priv->iflist[ifidx] = ifp;
+ if (mac_addr != NULL)
+ memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
+
+ if (ndev == NULL) {
+ ifp->state = BRCMF_E_IF_ADD;
+ ifp->idx = ifidx;
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the BRCMF_E_IF_DEL event.
+ */
+ if (ifp->ndev != NULL) {
+ brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+ ifp->ndev->name);
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ }
+
+ /* Allocate netdev, including space for private structure */
+ ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
+ ether_setup);
+ if (!ifp->ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ ret = -ENOMEM;
+ }
+
+ if (ret == 0) {
+ memcpy(netdev_priv(ifp->ndev), &drvr_priv,
+ sizeof(drvr_priv));
+ err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
+ if (err != 0) {
+ brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
+ err);
+ ret = -EOPNOTSUPP;
+ } else {
+ brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+ current->pid, ifp->ndev->name);
+ ifp->state = 0;
+ }
+ }
+
+ if (ret < 0) {
+ if (ifp->ndev)
+ free_netdev(ifp->ndev);
+
+ drvr_priv->iflist[ifp->idx] = NULL;
+ kfree(ifp);
+ }
+ } else
+ ifp->ndev = ndev;
+
+ return 0;
+}
+
+void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
+{
+ struct brcmf_if *ifp;
+
+ brcmf_dbg(TRACE, "idx %d\n", ifidx);
+
+ ifp = drvr_priv->iflist[ifidx];
+ if (!ifp) {
+ brcmf_dbg(ERROR, "Null interface\n");
+ return;
+ }
+
+ ifp->state = BRCMF_E_IF_DEL;
+ ifp->idx = ifidx;
+ if (ifp->ndev != NULL) {
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr_priv->iflist[ifidx] = NULL;
+ kfree(ifp);
+ }
+}
+
+struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+{
+ struct brcmf_info *drvr_priv = NULL;
+ struct net_device *ndev;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Allocate netdev, including space for private structure */
+ ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
+ if (!ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ goto fail;
+ }
+
+ /* Allocate primary brcmf_info */
+ drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
+ if (!drvr_priv)
+ goto fail;
+
+ /*
+ * Save the brcmf_info into the priv
+ */
+ memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
+
+ if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
+ BRCMF_BAD_IF)
+ goto fail;
+
+ ndev->netdev_ops = NULL;
+ mutex_init(&drvr_priv->proto_block);
+
+ /* Link to info module */
+ drvr_priv->pub.info = drvr_priv;
+
+ /* Link to bus module */
+ drvr_priv->pub.bus = bus;
+ drvr_priv->pub.hdrlen = bus_hdrlen;
+
+ /* Attach and link in the protocol */
+ if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+ brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
+ goto fail;
+ }
+
+ /* Attach and link in the cfg80211 */
+ drvr_priv->pub.config =
+ brcmf_cfg80211_attach(ndev,
+ brcmf_bus_get_device(bus),
+ &drvr_priv->pub);
+ if (drvr_priv->pub.config == NULL) {
+ brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+ goto fail;
+ }
+
+ INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
+
+ /*
+ * Save the brcmf_info into the priv
+ */
+ memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
+
+ return &drvr_priv->pub;
+
+fail:
+ if (ndev)
+ free_netdev(ndev);
+ if (drvr_priv)
+ brcmf_detach(&drvr_priv->pub);
+
+ return NULL;
+}
+
+int brcmf_bus_start(struct brcmf_pub *drvr)
+{
+ int ret = -1;
+ struct brcmf_info *drvr_priv = drvr->info;
+ /* Room for "event_msgs" + '\0' + bitvec */
+ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+
+ brcmf_dbg(TRACE, "\n");
+
+ /* Bring up the bus */
+ ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+ if (ret != 0) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
+ return ret;
+ }
+
+ /* If bus is not ready, can't come up */
+ if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ brcmf_dbg(ERROR, "failed bus is not ready\n");
+ return -ENODEV;
+ }
+
+ brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
+ sizeof(iovbuf));
+ memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
+
+ setbit(drvr->eventmask, BRCMF_E_SET_SSID);
+ setbit(drvr->eventmask, BRCMF_E_PRUNE);
+ setbit(drvr->eventmask, BRCMF_E_AUTH);
+ setbit(drvr->eventmask, BRCMF_E_REASSOC);
+ setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
+ setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_DISASSOC);
+ setbit(drvr->eventmask, BRCMF_E_JOIN);
+ setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
+ setbit(drvr->eventmask, BRCMF_E_LINK);
+ setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
+ setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
+ setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
+ setbit(drvr->eventmask, BRCMF_E_TXFAIL);
+ setbit(drvr->eventmask, BRCMF_E_JOIN_START);
+ setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
+
+/* enable dongle roaming event */
+
+ drvr->pktfilter_count = 1;
+ /* Setup filter to allow only unicast */
+ drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
+
+ /* Bus is ready, do any protocol initialization */
+ ret = brcmf_proto_init(&drvr_priv->pub);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct net_device_ops brcmf_netdev_ops_pri = {
+ .ndo_open = brcmf_netdev_open,
+ .ndo_stop = brcmf_netdev_stop,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
+int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+ struct net_device *ndev;
+ u8 temp_addr[ETH_ALEN] = {
+ 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
+
+ brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
+
+ ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev->netdev_ops = &brcmf_netdev_ops_pri;
+
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ if (ifidx != 0) {
+ /* for virtual interfaces use the primary MAC */
+ memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
+
+ }
+
+ if (ifidx == 1) {
+ brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
+ /* ACCESSPOINT INTERFACE CASE */
+ temp_addr[0] |= 0X02; /* set bit 2 ,
+ - Locally Administered address */
+
+ }
+ ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+ ndev->ethtool_ops = &brcmf_ethtool_ops;
+
+ drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len +
+ drvr_priv->pub.hdrlen;
+
+ memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
+
+ if (register_netdev(ndev) != 0) {
+ brcmf_dbg(ERROR, "couldn't register the net device\n");
+ goto fail;
+ }
+
+ brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+ return 0;
+
+fail:
+ ndev->netdev_ops = NULL;
+ return -EBADE;
+}
+
+static void brcmf_bus_detach(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (drvr) {
+ drvr_priv = drvr->info;
+ if (drvr_priv) {
+ /* Stop the protocol module */
+ brcmf_proto_stop(&drvr_priv->pub);
+
+ /* Stop the bus module */
+ brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
+ }
+ }
+}
+
+void brcmf_detach(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (drvr) {
+ drvr_priv = drvr->info;
+ if (drvr_priv) {
+ struct brcmf_if *ifp;
+ int i;
+
+ for (i = 1; i < BRCMF_MAX_IFS; i++)
+ if (drvr_priv->iflist[i])
+ brcmf_del_if(drvr_priv, i);
+
+ ifp = drvr_priv->iflist[0];
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ rtnl_lock();
+ brcmf_netdev_stop(ifp->ndev);
+ rtnl_unlock();
+ unregister_netdev(ifp->ndev);
+ }
+
+ cancel_work_sync(&drvr_priv->setmacaddr_work);
+ cancel_work_sync(&drvr_priv->multicast_work);
+
+ brcmf_bus_detach(drvr);
+
+ if (drvr->prot)
+ brcmf_proto_detach(drvr);
+
+ brcmf_cfg80211_detach(drvr->config);
+
+ free_netdev(ifp->ndev);
+ kfree(ifp);
+ kfree(drvr_priv);
+ }
+ }
+}
+
+static void __exit brcmf_module_cleanup(void)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_bus_unregister();
+}
+
+static int __init brcmf_module_init(void)
+{
+ int error;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ error = brcmf_bus_register();
+
+ if (error) {
+ brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
+ goto failed;
+ }
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+module_init(brcmf_module_init);
+module_exit(brcmf_module_cleanup);
+
+int brcmf_os_proto_block(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ if (drvr_priv) {
+ mutex_lock(&drvr_priv->proto_block);
+ return 1;
+ }
+ return 0;
+}
+
+int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ if (drvr_priv) {
+ mutex_unlock(&drvr_priv->proto_block);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
+{
+ return atomic_read(&drvr_priv->pend_8021x_cnt);
+}
+
+#define MAX_WAIT_FOR_8021X_TX 10
+
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+ int timeout = 10 * HZ / 1000;
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ }
+ return pend;
+}
+
+#ifdef BCMDBG
+int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* open file to write */
+ fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
+ if (!fp) {
+ brcmf_dbg(ERROR, "open file error\n");
+ ret = -1;
+ goto exit;
+ }
+
+ /* Write buf to file */
+ fp->f_op->write(fp, (char __user *)buf, size, &pos);
+
+exit:
+ /* free buf before return */
+ kfree(buf);
+ /* close file before return */
+ if (fp)
+ filp_close(fp, current->files);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* BCMDBG */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
new file mode 100644
index 00000000000..4ee1ea846f6
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMF_PROTO_H_
+#define _BRCMF_PROTO_H_
+
+/*
+ * Exported from the brcmf protocol module (brcmf_cdc)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+
+/* Unlink, frees allocated protocol memory (including brcmf_proto) */
+extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int brcmf_proto_init(struct brcmf_pub *drvr);
+
+/* Stop protocol: sync w/dongle state. */
+extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
+ struct sk_buff *txp);
+
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
+ struct sk_buff *rxp);
+
+/* Use protocol to issue command to dongle */
+extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
+ struct brcmf_dcmd *dcmd, int len);
+
+/* Update local copy of dongle statistics */
+extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
+
+extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
+
+extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len);
+
+#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
new file mode 100644
index 00000000000..313b8bf592d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -0,0 +1,4591 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/printk.h>
+#include <linux/pci_ids.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/semaphore.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include <soc.h>
+#include "sdio_host.h"
+
+#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
+
+#ifdef BCMDBG
+
+#define BRCMF_TRAP_INFO_SIZE 80
+
+#define CBUF_LEN (128)
+
+struct rte_log_le {
+ __le32 buf; /* Can't be pointer on (64-bit) hosts */
+ __le32 buf_size;
+ __le32 idx;
+ char *_buf_compat; /* Redundant pointer for backward compat. */
+};
+
+struct rte_console {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn),
+ * the host should write a complete
+ * input line directly into cbuf and then write
+ * the length into vcons_in.
+ * This may also be used when there is a real UART
+ * (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ uint vcons_in;
+ uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host
+ * polls.
+ */
+ struct rte_log_le log_le;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf
+ * until <CR> is received, then
+ * the buffer is processed as a command line.
+ * Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+};
+
+#endif /* BCMDBG */
+#include <chipcommon.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+#include <bcmchip.h>
+
+#define TXQLEN 2048 /* bulk tx queue length */
+#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
+#define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
+#define PRIOMASK 7
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+
+#define BRCMF_RXBOUND 50 /* Default for max rx frames in
+ one scheduling */
+
+#define BRCMF_TXBOUND 20 /* Default for max tx frames in
+ one scheduling */
+
+#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading
+ of dongle image */
+#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
+ biggest possible glom */
+
+#define BRCMF_FIRSTREAD (1 << 6)
+
+
+/* SBSDIO_DEVICE_CTL */
+
+/* 1: device will assert busy signal when receiving CMD53 */
+#define SBSDIO_DEVCTL_SETBUSY 0x01
+/* 1: assertion of sdio interrupt is synchronous to the sdio clock */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
+/* 1: mask all interrupts to host except the chipActive (rev 8) */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
+/* 1: isolate internal sdio signals, put external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9) */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08
+/* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30
+/* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00
+/* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10
+/* Force no backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* direct(mapped) cis space */
+
+/* MAPPED common CIS address */
+#define SBSDIO_CIS_BASE_COMMON 0x1000
+/* maximum bytes in one CIS */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200
+/* cis offset addr is < 17 bits */
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
+
+/* manfid tuple length, include tuple, link bytes */
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
+#define CC_XMTDATAAVAIL_MODE (1 << 4)
+#define CC_XMTDATAAVAIL_CTRL (1 << 5)
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
+#define HMB_DATA_DEVREADY 2 /* talk to host after enable */
+#define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
+#define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000
+#define HMB_DATA_FCDATA_SHIFT 24
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000
+#define HMB_DATA_VERSION_SHIFT 16
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
+
+#define SDPCM_CHANNEL_MASK 0x00000f00
+#define SDPCM_CHANNEL_SHIFT 8
+#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
+
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
+
+#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0002
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+
+/* Space for header read, limit for data packets */
+#define MAX_HDR_READ (1 << 6)
+#define MAX_RX_DATASZ 2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define BRCMF_WAIT_F2RDY 3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+
+/* Value for ChipClockCSR during initial setup */
+#define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
+ SBSDIO_ALP_AVAIL_REQ)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* sbimstate */
+#define SBIM_IBE 0x20000 /* inbanderror */
+#define SBIM_TO 0x40000 /* timeout */
+#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
+#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+
+/* reset */
+#define SBTML_RESET 0x0001
+/* reject field */
+#define SBTML_REJ_MASK 0x0006
+/* reject */
+#define SBTML_REJ 0x0002
+/* temporary reject, for error recovery */
+#define SBTML_TMPREJ 0x0004
+
+/* Shift to locate the SI control flags in sbtml */
+#define SBTML_SICF_SHIFT 16
+
+/* sbtmstatehigh */
+#define SBTMH_SERR 0x0001 /* serror */
+#define SBTMH_INT 0x0002 /* interrupt */
+#define SBTMH_BUSY 0x0004 /* busy */
+#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
+
+/* Shift to locate the SI status flags in sbtmh */
+#define SBTMH_SISF_SHIFT 16
+
+/* sbidlow */
+#define SBIDL_INIT 0x80 /* initiator */
+
+/* sbidhigh */
+#define SBIDH_RC_MASK 0x000f /* revision code */
+#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
+ ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0 /* core code */
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
+#define SBIDH_VC_SHIFT 16
+
+/*
+ * Conversion of 802.1D priority to precedence level
+ */
+static uint prio2prec(u32 prio)
+{
+ return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
+ (prio^2) : prio;
+}
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
+};
+
+#ifdef BCMDBG
+/* Device console log buffer state */
+struct brcmf_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ struct rte_log_le log_le; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ u8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+};
+#endif /* BCMDBG */
+
+struct sdpcm_shared {
+ u32 flags;
+ u32 trap_addr;
+ u32 assert_exp_addr;
+ u32 assert_file_addr;
+ u32 assert_line;
+ u32 console_addr; /* Address of struct rte_console */
+ u32 msgtrace_addr;
+ u8 tag[32];
+};
+
+struct sdpcm_shared_le {
+ __le32 flags;
+ __le32 trap_addr;
+ __le32 assert_exp_addr;
+ __le32 assert_file_addr;
+ __le32 assert_line;
+ __le32 console_addr; /* Address of struct rte_console */
+ __le32 msgtrace_addr;
+ u8 tag[32];
+};
+
+
+/* misc chip info needed by some of the routines */
+struct chip_info {
+ u32 chip;
+ u32 chiprev;
+ u32 cccorebase;
+ u32 ccrev;
+ u32 cccaps;
+ u32 buscorebase; /* 32 bits backplane bus address */
+ u32 buscorerev;
+ u32 buscoretype;
+ u32 ramcorebase;
+ u32 armcorebase;
+ u32 pmurev;
+ u32 ramsize;
+};
+
+/* Private data for SDIO bus interaction */
+struct brcmf_bus {
+ struct brcmf_pub *drvr;
+
+ struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
+ struct chip_info *ci; /* Chip info struct */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+
+ u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+
+ u32 hostintmask; /* Copy of Host Interrupt Mask */
+ u32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ u8 flowcontrol; /* per prio flow control bitmask */
+ u8 tx_seq; /* Transmit sequence number (next) */
+ u8 tx_max; /* Maximum transmit sequence allowed */
+
+ u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
+ u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ u16 nextlen; /* Next Read Len from last header */
+ u8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ uint rxbound; /* Rx frames to read before resched */
+ uint txbound; /* Tx frames to send before resched */
+ uint txminmax;
+
+ struct sk_buff *glomd; /* Packet containing glomming descriptor */
+ struct sk_buff *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ u8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ u8 *rxctl; /* Aligned pointer into rxbuf */
+ u8 *databuf; /* Buffer for receiving big glom packet */
+ u8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ u8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+#ifdef BCMDBG
+ uint console_interval;
+ struct brcmf_console console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* BCMDBG */
+
+ uint regfails; /* Count of R_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ s32 idletime; /* Control for activity timeout */
+ s32 idlecount; /* Activity timeout counter */
+ s32 idleclock; /* How to set bus driver when idle */
+ s32 sd_rxchain;
+ bool use_rxchain; /* If brcmf should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+ bool rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ bool alp_only; /* Don't use HT clock (ALP only) */
+/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+
+ u8 *ctrl_frame_buf;
+ u32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+
+ spinlock_t txqlock;
+ wait_queue_head_t ctrl_wait;
+ wait_queue_head_t dcmd_resp_wait;
+
+ struct timer_list timer;
+ struct completion watchdog_wait;
+ struct task_struct *watchdog_tsk;
+ bool wd_timer_valid;
+ uint save_ms;
+
+ struct task_struct *dpc_tsk;
+ struct completion dpc_wait;
+
+ struct semaphore sdsem;
+
+ const char *fw_name;
+ const struct firmware *firmware;
+ const char *nv_name;
+ u32 fw_ptr;
+};
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#ifdef BCMDBG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* BCMDBG */
+
+#define SDIO_DRIVE_STRENGTH 6 /* in milliamps */
+
+#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+#define ALIGNMENT 4
+
+static void pkt_align(struct sk_buff *p, int len, int align)
+{
+ uint datalign;
+ datalign = (unsigned long)(p->data);
+ datalign = roundup(datalign, (align)) - datalign;
+ if (datalign)
+ skb_pull(p, datalign);
+ __skb_trim(p, len);
+}
+
+/* To check if there's window offered */
+static bool data_ok(struct brcmf_bus *bus)
+{
+ return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
+ ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
+}
+
+/*
+ * Reads a register in the SDIO hardware block. This block occupies a series of
+ * adresses on the 32 bit backplane bus.
+ */
+static void
+r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+{
+ *retryvar = 0;
+ do {
+ *regvar = brcmf_sdcard_reg_read(bus->sdiodev,
+ bus->ci->buscorebase + reg_offset, sizeof(u32));
+ } while (brcmf_sdcard_regfail(bus->sdiodev) &&
+ (++(*retryvar) <= retry_limit));
+ if (*retryvar) {
+ bus->regfails += (*retryvar-1);
+ if (*retryvar > retry_limit) {
+ brcmf_dbg(ERROR, "FAILED READ %Xh\n", reg_offset);
+ *regvar = 0;
+ }
+ }
+}
+
+static void
+w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+{
+ *retryvar = 0;
+ do {
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ bus->ci->buscorebase + reg_offset,
+ sizeof(u32), regval);
+ } while (brcmf_sdcard_regfail(bus->sdiodev) &&
+ (++(*retryvar) <= retry_limit));
+ if (*retryvar) {
+ bus->regfails += (*retryvar-1);
+ if (*retryvar > retry_limit)
+ brcmf_dbg(ERROR, "FAILED REGISTER WRITE %Xh\n",
+ reg_offset);
+ }
+}
+
+#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+/* Packet free applicable unconditionally for sdio and sdspi.
+ * Conditional if bufpool was present for gspi bus.
+ */
+static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+{
+ if (bus->usebufpool)
+ brcmu_pkt_buf_free_skb(pkt);
+}
+
+/* Turn backplane clock on or off */
+static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+{
+ int err;
+ u8 clkctl, clkreq, devctl;
+ unsigned long timeout;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ clkctl = 0;
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq =
+ bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+ if ((bus->ci->chip == BCM4329_CHIP_ID)
+ && (bus->ci->chiprev == 0))
+ clkreq |= SBSDIO_FORCE_ALP;
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
+ return -EBADE;
+ }
+
+ if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
+ && (bus->ci->buscorerev == 9))) {
+ u32 dummy, retries;
+ r_sdreg32(bus, &dummy,
+ offsetof(struct sdpcmd_regs, clockctlstatus),
+ &retries);
+ }
+
+ /* Check current status */
+ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "HT Avail read error: %d\n", err);
+ return -EBADE;
+ }
+
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "Devctl error setting CA: %d\n",
+ err);
+ return -EBADE;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_dbg(INFO, "CLKCTL: set PENDING\n");
+ bus->clkstate = CLK_PENDING;
+
+ return 0;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Otherwise, wait here (polling) for HT Avail */
+ timeout = jiffies +
+ msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
+ while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
+ if (time_after(jiffies, timeout))
+ break;
+ else
+ usleep_range(5000, 10000);
+ }
+ if (err) {
+ brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
+ return -EBADE;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ brcmf_dbg(ERROR, "HT Avail timeout (%d): clkctl 0x%02x\n",
+ PMU_MAX_TRANSITION_DLY, clkctl);
+ return -EBADE;
+ }
+
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ brcmf_dbg(INFO, "CLKCTL: turned ON\n");
+
+#if defined(BCMDBG)
+ if (bus->alp_only != true) {
+ if (SBSDIO_ALPONLY(clkctl))
+ brcmf_dbg(ERROR, "HT Clock should be on\n");
+ }
+#endif /* defined (BCMDBG) */
+
+ bus->activity = true;
+ } else {
+ clkreq = 0;
+
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
+ if (err) {
+ brcmf_dbg(ERROR, "Failed access turning clock off: %d\n",
+ err);
+ return -EBADE;
+ }
+ }
+ return 0;
+}
+
+/* Change idle/active SD state */
+static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (on)
+ bus->clkstate = CLK_SDONLY;
+ else
+ bus->clkstate = CLK_NONE;
+
+ return 0;
+}
+
+/* Transition SD and backplane clock readiness */
+static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+{
+#ifdef BCMDBG
+ uint oldstate = bus->clkstate;
+#endif /* BCMDBG */
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ bus->activity = true;
+ }
+ return 0;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ brcmf_sdbrcm_sdclk(bus, true);
+ /* Now request HT Avail on the backplane */
+ brcmf_sdbrcm_htclk(bus, true, pendok);
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ bus->activity = true;
+ break;
+
+ case CLK_SDONLY:
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ brcmf_sdbrcm_sdclk(bus, true);
+ else if (bus->clkstate == CLK_AVAIL)
+ brcmf_sdbrcm_htclk(bus, false, false);
+ else
+ brcmf_dbg(ERROR, "request for %d -> %d\n",
+ bus->clkstate, target);
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ break;
+
+ case CLK_NONE:
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ brcmf_sdbrcm_htclk(bus, false, false);
+ /* Now remove the SD clock */
+ brcmf_sdbrcm_sdclk(bus, false);
+ brcmf_sdbrcm_wd_timer(bus, 0);
+ break;
+ }
+#ifdef BCMDBG
+ brcmf_dbg(INFO, "%d -> %d\n", oldstate, bus->clkstate);
+#endif /* BCMDBG */
+
+ return 0;
+}
+
+static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+{
+ uint retries = 0;
+
+ brcmf_dbg(INFO, "request %s (currently %s)\n",
+ sleep ? "SLEEP" : "WAKE",
+ bus->sleeping ? "SLEEP" : "WAKE");
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return 0;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+ return -EBUSY;
+
+ /* Make sure the controller has the bus up */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Tell device to start using OOB wakeup */
+ w_sdreg32(bus, SMB_USE_OOB,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ if (retries > retry_limit)
+ brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
+
+ /* Turn off our contribution to the HT clock request */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ if (bus->ci->chip != BCM4329_CHIP_ID) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+ }
+
+ /* Change state */
+ bus->sleeping = true;
+
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* Force pad isolation off if possible
+ (in case power never toggled) */
+ if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
+ && (bus->ci->buscorerev >= 10))
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, 0, NULL);
+
+ /* Make sure the controller has the bus up */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, tosbmailboxdata),
+ &retries);
+ if (retries <= retry_limit)
+ w_sdreg32(bus, SMB_DEV_INT,
+ offsetof(struct sdpcmd_regs, tosbmailbox),
+ &retries);
+
+ if (retries > retry_limit)
+ brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
+
+ /* Make sure we have SD bus access */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Change state */
+ bus->sleeping = false;
+ }
+
+ return 0;
+}
+
+static void bus_wake(struct brcmf_bus *bus)
+{
+ if (bus->sleeping)
+ brcmf_sdbrcm_bussleep(bus, false);
+}
+
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+{
+ u32 intstatus = 0;
+ u32 hmb_data;
+ u8 fcbits;
+ uint retries = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Read mailbox data and ack that we did so */
+ r_sdreg32(bus, &hmb_data,
+ offsetof(struct sdpcmd_regs, tohostmailboxdata), &retries);
+
+ if (retries <= retry_limit)
+ w_sdreg32(bus, SMB_INT_ACK,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n",
+ bus->rx_seq);
+ if (!bus->rxskip)
+ brcmf_dbg(ERROR, "unexpected NAKHANDLED!\n");
+
+ bus->rxskip = false;
+ intstatus |= I_HMB_FRAME_IND;
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver =
+ (hmb_data & HMB_DATA_VERSION_MASK) >>
+ HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ brcmf_dbg(ERROR, "Version mismatch, dongle reports %d, "
+ "expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION);
+ else
+ brcmf_dbg(INFO, "Dongle ready, protocol version %d\n",
+ bus->sdpcm_ver);
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more.
+ * remaining backward compatible with older dongles.
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
+ HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
+ brcmf_dbg(ERROR, "Unknown mailbox data content: 0x%02x\n",
+ hmb_data);
+
+ return intstatus;
+}
+
+static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+{
+ uint retries = 0;
+ u16 lastrbc;
+ u8 hi, lo;
+ int err;
+
+ brcmf_dbg(ERROR, "%sterminate frame%s\n",
+ abort ? "abort command, " : "",
+ rtx ? ", send NAK" : "");
+
+ if (abort)
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ brcmf_dbg(ERROR, "count growing: last 0x%04x now 0x%04x\n",
+ lastrbc, (hi << 8) + lo);
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries)
+ brcmf_dbg(ERROR, "count never zeroed: last 0x%04x\n", lastrbc);
+ else
+ brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
+
+ if (rtx) {
+ bus->rxrtx++;
+ w_sdreg32(bus, SMB_NAK,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+
+ bus->f1regdata++;
+ if (retries <= retry_limit)
+ bus->rxskip = true;
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+ /* If we can't reach the device, signal failure */
+ if (err || brcmf_sdcard_regfail(bus->sdiodev))
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+{
+ u16 dlen, totlen;
+ u8 *dptr, num = 0;
+
+ u16 sublen, check;
+ struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+
+ int errcode;
+ u8 chan, seq, doff, sfdoff;
+ u8 txmax;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ pfirst = plast = pnext = NULL;
+ dlen = (u16) (bus->glomd->len);
+ dptr = bus->glomd->data;
+ if (!dlen || (dlen & 1)) {
+ brcmf_dbg(ERROR, "bad glomd len(%d), ignore descriptor\n",
+ dlen);
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = get_unaligned_le16(dptr);
+ dlen -= sizeof(u16);
+ dptr += sizeof(u16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ brcmf_dbg(ERROR, "descriptor len %d bad: %d\n",
+ num, sublen);
+ pnext = NULL;
+ break;
+ }
+ if (sublen % BRCMF_SDALIGN) {
+ brcmf_dbg(ERROR, "sublen %d not multiple of %d\n",
+ sublen, BRCMF_SDALIGN);
+ usechain = false;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total
+ is a block multiple */
+ if (!dlen) {
+ sublen +=
+ (roundup(totlen, bus->blocksize) - totlen);
+ totlen = roundup(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
+ if (pnext == NULL) {
+ brcmf_dbg(ERROR, "bcm_pkt_buf_get_skb failed, num %d len %d\n",
+ num, sublen);
+ break;
+ }
+ if (!pfirst) {
+ pfirst = plast = pnext;
+ } else {
+ plast->next = pnext;
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ pkt_align(pnext, sublen, BRCMF_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain
+ in bus structure */
+ if (pnext) {
+ brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
+ totlen, num);
+ if (BRCMF_GLOM_ON() && bus->nextlen &&
+ totlen != bus->nextlen) {
+ brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
+ bus->nextlen, totlen, rxseq);
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ brcmu_pkt_buf_free_skb(pfirst);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ brcmu_pkt_buf_free_skb(bus->glomd);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+ }
+
+ /* Ok -- either we just generated a packet chain,
+ or had one from before */
+ if (bus->glom) {
+ if (BRCMF_GLOM_ON()) {
+ brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
+ for (pnext = bus->glom; pnext; pnext = pnext->next) {
+ brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
+ pnext, (u8 *) (pnext->data),
+ pnext->len, pnext->len);
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (u16) brcmu_pkttotlen(pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2,
+ F2SYNC, (u8 *) pfirst->data, dlen,
+ pfirst);
+ } else if (bus->dataptr) {
+ errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2,
+ F2SYNC, bus->dataptr, dlen,
+ NULL);
+ sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
+ bus->dataptr);
+ if (sublen != dlen) {
+ brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
+ dlen, sublen);
+ errcode = -1;
+ }
+ pnext = NULL;
+ } else {
+ brcmf_dbg(ERROR, "COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
+ dlen);
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
+ dlen, errcode);
+ bus->drvr->rx_errors++;
+
+ if (bus->glomerr++ < 3) {
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ } else {
+ bus->glomerr = 0;
+ brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmu_pkt_buf_free_skb(bus->glom);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ printk(KERN_DEBUG "SUPERFRAME:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ pfirst->data, min_t(int, pfirst->len, 48));
+ }
+#endif
+
+ /* Validate the superframe header */
+ dptr = (u8 *) (pfirst->data);
+ sublen = get_unaligned_le16(dptr);
+ check = get_unaligned_le16(dptr + sizeof(u16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
+ bus->nextlen, seq);
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((u16)~(sublen ^ check)) {
+ brcmf_dbg(ERROR, "(superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ sublen, check);
+ errcode = -1;
+ } else if (roundup(sublen, bus->blocksize) != dlen) {
+ brcmf_dbg(ERROR, "(superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ sublen, roundup(sublen, bus->blocksize),
+ dlen);
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
+ SDPCM_GLOM_CHANNEL) {
+ brcmf_dbg(ERROR, "(superframe): bad channel %d\n",
+ SDPCM_PACKET_CHANNEL(
+ &dptr[SDPCM_FRAMETAG_LEN]));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ brcmf_dbg(ERROR, "(superframe): got 2nd descriptor?\n");
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (pfirst->len - SDPCM_HDRLEN))) {
+ brcmf_dbg(ERROR, "(superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ doff, sublen, pfirst->len, SDPCM_HDRLEN);
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
+ seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((u8) (txmax - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
+ txmax, bus->tx_seq);
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ skb_pull(pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = pnext->next) {
+ dptr = (u8 *) (pnext->data);
+ dlen = (u16) (pnext->len);
+ sublen = get_unaligned_le16(dptr);
+ check = get_unaligned_le16(dptr + sizeof(u16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ printk(KERN_DEBUG "subframe:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ dptr, 32);
+ }
+#endif
+
+ if ((u16)~(sublen ^ check)) {
+ brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
+ num, sublen, check);
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ brcmf_dbg(ERROR, "(subframe %d): length mismatch: len 0x%04x, expect 0x%04x\n",
+ num, sublen, dlen);
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ brcmf_dbg(ERROR, "(subframe %d): bad channel %d\n",
+ num, chan);
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ brcmf_dbg(ERROR, "(subframe %d): Bad data offset %d: HW %d min %d\n",
+ num, doff, sublen, SDPCM_HDRLEN);
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request
+ a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ skb_push(pfirst, sfdoff);
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ } else {
+ bus->glomerr = 0;
+ brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmu_pkt_buf_free_skb(bus->glom);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ save_pfirst = pfirst;
+ bus->glom = NULL;
+ plast = NULL;
+
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = pfirst->next;
+ pfirst->next = NULL;
+
+ dptr = (u8 *) (pfirst->data);
+ sublen = get_unaligned_le16(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ brcmf_dbg(GLOM, "Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ num, pfirst, pfirst->data,
+ pfirst->len, sublen, chan, seq);
+
+ /* precondition: chan == SDPCM_DATA_CHANNEL ||
+ chan == SDPCM_EVENT_CHANNEL */
+
+ if (rxseq != seq) {
+ brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
+ seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
+ printk(KERN_DEBUG "Rx Subframe Data:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ dptr, dlen);
+ }
+#endif
+
+ __skb_trim(pfirst, sublen);
+ skb_pull(pfirst, doff);
+
+ if (pfirst->len == 0) {
+ brcmu_pkt_buf_free_skb(pfirst);
+ if (plast)
+ plast->next = pnext;
+ else
+ save_pfirst = pnext;
+
+ continue;
+ } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
+ pfirst) != 0) {
+ brcmf_dbg(ERROR, "rx protocol error\n");
+ bus->drvr->rx_errors++;
+ brcmu_pkt_buf_free_skb(pfirst);
+ if (plast)
+ plast->next = pnext;
+ else
+ save_pfirst = pnext;
+
+ continue;
+ }
+
+ /* this packet will go up, link back into
+ chain and count it */
+ pfirst->next = pnext;
+ plast = pfirst;
+ num++;
+
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
+ num, pfirst, pfirst->data,
+ pfirst->len, pfirst->next,
+ pfirst->prev);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ pfirst->data,
+ min_t(int, pfirst->len, 32));
+ }
+#endif /* BCMDBG */
+ }
+ if (num) {
+ up(&bus->sdsem);
+ brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ down(&bus->sdsem);
+ }
+
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+ bool *pending)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
+
+ /* Wait until control frame is available */
+ add_wait_queue(&bus->dcmd_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!(*condition) && (!signal_pending(current) && timeout))
+ timeout = schedule_timeout(timeout);
+
+ if (signal_pending(current))
+ *pending = true;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bus->dcmd_resp_wait, &wait);
+
+ return timeout;
+}
+
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+{
+ if (waitqueue_active(&bus->dcmd_resp_wait))
+ wake_up_interruptible(&bus->dcmd_resp_wait);
+
+ return 0;
+}
+static void
+brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+{
+ uint rdlen, pad;
+
+ int sdret;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ bus->rxctl += BRCMF_FIRSTREAD;
+ pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN);
+ if (pad)
+ bus->rxctl += (BRCMF_SDALIGN - pad);
+ bus->rxctl -= BRCMF_FIRSTREAD;
+
+ /* Copy the already-read portion over */
+ memcpy(bus->rxctl, hdr, BRCMF_FIRSTREAD);
+ if (len <= BRCMF_FIRSTREAD)
+ goto gotpkt;
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - BRCMF_FIRSTREAD;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->drvr->maxctl))
+ rdlen += pad;
+ } else if (rdlen % BRCMF_SDALIGN) {
+ rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (rdlen & (ALIGNMENT - 1))
+ rdlen = roundup(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) {
+ brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
+ rdlen, bus->drvr->maxctl);
+ bus->drvr->rx_errors++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ goto done;
+ }
+
+ if ((len - doff) > bus->drvr->maxctl) {
+ brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ len, len - doff, bus->drvr->maxctl);
+ bus->drvr->rx_errors++;
+ bus->rx_toolong++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ goto done;
+ }
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2,
+ F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
+ NULL);
+ bus->f2rxdata++;
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
+ rdlen, sdret);
+ bus->rxc_errors++;
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
+ printk(KERN_DEBUG "RxCtrl:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+}
+
+/* Pad read to blocksize for efficiency */
+static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+{
+ if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
+ *pad = bus->blocksize - (*rdlen % bus->blocksize);
+ if (*pad <= bus->roundup && *pad < bus->blocksize &&
+ *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
+ *rdlen += *pad;
+ } else if (*rdlen % BRCMF_SDALIGN) {
+ *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
+ }
+}
+
+static void
+brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+ struct sk_buff **pkt, u8 **rxbuf)
+{
+ int sdret; /* Return code from calls */
+
+ *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
+ if (*pkt == NULL)
+ return;
+
+ pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
+ *rxbuf = (u8 *) ((*pkt)->data);
+ /* Read the entire frame */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC,
+ *rxbuf, rdlen, *pkt);
+ bus->f2rxdata++;
+
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
+ rdlen, sdret);
+ brcmu_pkt_buf_free_skb(*pkt);
+ bus->drvr->rx_errors++;
+ /* Force retry w/normal header read.
+ * Don't attempt NAK for
+ * gSPI
+ */
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ *pkt = NULL;
+ }
+}
+
+/* Checks the header */
+static int
+brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+ u8 rxseq, u16 nextlen, u16 *len)
+{
+ u16 check;
+ bool len_consistent; /* Result of comparing readahead len and
+ len from hw-hdr */
+
+ memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ *len = get_unaligned_le16(bus->rxhdr);
+ check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
+
+ /* All zeros means readahead info was bad */
+ if (!(*len | check)) {
+ brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
+ goto fail;
+ }
+
+ /* Validate check bytes */
+ if ((u16)~(*len ^ check)) {
+ brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
+ nextlen, *len, check);
+ bus->rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ goto fail;
+ }
+
+ /* Validate frame length */
+ if (*len < SDPCM_HDRLEN) {
+ brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
+ *len);
+ goto fail;
+ }
+
+ /* Check for consistency with readahead info */
+ len_consistent = (nextlen != (roundup(*len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal
+ header (may be >4K) */
+ brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
+ nextlen, *len, roundup(*len, 16),
+ rxseq);
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ brcmf_sdbrcm_pktfree2(bus, pkt);
+ return -EINVAL;
+}
+
+/* Return true if there may be more frames to read */
+static uint
+brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+{
+ u16 len, check; /* Extracted hardware header fields */
+ u8 chan, seq, doff; /* Extracted software header fields */
+ u8 fcbits; /* Extracted fcbits from software header */
+
+ struct sk_buff *pkt; /* Packet for event or data frames */
+ u16 pad; /* Number of pad bytes to read */
+ u16 rdlen; /* Total number of bytes to read */
+ u8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from calls */
+ u8 txmax; /* Maximum tx sequence offered */
+ u8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = false;
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+ rxseq++, rxleft--) {
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ u8 cnt;
+ brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
+ bus->glomd, bus->glom);
+ cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
+ brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (bus->nextlen) {
+ u16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ rdlen = len = nextlen << 4;
+ brcmf_pad(bus, &pad, &rdlen);
+
+ /*
+ * After the frame is received we have to
+ * distinguish whether it is data
+ * or non-data frame.
+ */
+ brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf);
+ if (pkt == NULL) {
+ /* Give up on data, request rtx of events */
+ brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
+ len, rdlen, rxseq);
+ continue;
+ }
+
+ if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
+ &len) < 0)
+ continue;
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN +
+ SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ bus->nextlen, seq);
+ bus->nextlen = 0;
+ }
+
+ bus->drvr->rx_readahead_cnt++;
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ if (bus->flowcontrol != fcbits) {
+ if (~bus->flowcontrol & fcbits)
+ bus->fc_xoff++;
+
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
+ seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((u8) (txmax - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
+ txmax, bus->tx_seq);
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
+ printk(KERN_DEBUG "Rx Data:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ rxbuf, len);
+ } else if (BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "RxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n",
+ seq);
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ brcmf_sdbrcm_rxfail(bus, false, true);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n",
+ doff, len, SDPCM_HDRLEN);
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, bus->rxhdr,
+ BRCMF_FIRSTREAD, NULL);
+ bus->f2rxhdrs++;
+
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
+ bus->rx_hdrfail++;
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ continue;
+ }
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() || BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "RxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = get_unaligned_le16(bus->rxhdr);
+ check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
+
+ /* All zeros means no more frames */
+ if (!(len | check)) {
+ *finished = true;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((u16) ~(len ^ check)) {
+ brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
+ len, check);
+ bus->rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
+ doff, len, SDPCM_HDRLEN, seq);
+ bus->rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ bus->nextlen, seq);
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ if (bus->flowcontrol != fcbits) {
+ if (~bus->flowcontrol & fcbits)
+ bus->fc_xoff++;
+
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((u8) (txmax - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
+ txmax, bus->tx_seq);
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ /* precondition: chan is either SDPCM_DATA_CHANNEL,
+ SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
+ SDPCM_GLOM_CHANNEL */
+
+ /* Length to read */
+ rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize &&
+ (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % BRCMF_SDALIGN) {
+ rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (rdlen & (ALIGNMENT - 1))
+ rdlen = roundup(rdlen, ALIGNMENT);
+
+ if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
+ len, rdlen);
+ bus->drvr->rx_errors++;
+ bus->rx_toolong++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ continue;
+ }
+
+ pkt = brcmu_pkt_buf_get_skb(rdlen +
+ BRCMF_FIRSTREAD + BRCMF_SDALIGN);
+ if (!pkt) {
+ /* Give up on data, request rtx of events */
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
+ rdlen, chan);
+ bus->drvr->rx_dropped++;
+ brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Leave room for what we already read, and align remainder */
+ skb_pull(pkt, BRCMF_FIRSTREAD);
+ pkt_align(pkt, rdlen, BRCMF_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
+ rdlen, pkt);
+ bus->f2rxdata++;
+
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event"
+ : ((chan == SDPCM_DATA_CHANNEL) ? "data"
+ : "test")), sdret);
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
+ brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ skb_push(pkt, BRCMF_FIRSTREAD);
+ memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD);
+
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
+ printk(KERN_DEBUG "Rx Data:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ pkt->data, len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
+ len);
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ printk(KERN_DEBUG "Glom Data:\n");
+ print_hex_dump_bytes("",
+ DUMP_PREFIX_OFFSET,
+ pkt->data, len);
+ }
+#endif
+ __skb_trim(pkt, len);
+ skb_pull(pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ brcmf_dbg(ERROR, "%s: glom superframe w/o "
+ "descriptor!\n", __func__);
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ __skb_trim(pkt, len);
+ skb_pull(pkt, doff);
+
+ if (pkt->len == 0) {
+ brcmu_pkt_buf_free_skb(pkt);
+ continue;
+ } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+ brcmf_dbg(ERROR, "rx protocol error\n");
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
+ continue;
+ }
+
+ /* Unlock during rx call */
+ up(&bus->sdsem);
+ brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ down(&bus->sdsem);
+ }
+ rxcount = maxframes - rxleft;
+#ifdef BCMDBG
+ /* Message if we hit the limit */
+ if (!rxleft)
+ brcmf_dbg(DATA, "hit rx limit of %d frames\n",
+ maxframes);
+ else
+#endif /* BCMDBG */
+ brcmf_dbg(DATA, "processed %d frames\n", rxcount);
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ return rxcount;
+}
+
+static int
+brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
+ u8 *buf, uint nbytes, struct sk_buff *pkt)
+{
+ return brcmf_sdcard_send_buf
+ (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
+}
+
+static void
+brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+{
+ up(&bus->sdsem);
+ wait_event_interruptible_timeout(bus->ctrl_wait,
+ (*lockvar == false), HZ * 2);
+ down(&bus->sdsem);
+ return;
+}
+
+static void
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+{
+ if (waitqueue_active(&bus->ctrl_wait))
+ wake_up_interruptible(&bus->ctrl_wait);
+ return;
+}
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+ uint chan, bool free_pkt)
+{
+ int ret;
+ u8 *frame;
+ u16 len, pad = 0;
+ u32 swheader;
+ struct sk_buff *new;
+ int i;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ frame = (u8 *) (pkt->data);
+
+ /* Add alignment padding, allocate new packet if needed */
+ pad = ((unsigned long)frame % BRCMF_SDALIGN);
+ if (pad) {
+ if (skb_headroom(pkt) < pad) {
+ brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
+ skb_headroom(pkt), pad);
+ bus->drvr->tx_realloc++;
+ new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
+ if (!new) {
+ brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
+ pkt->len + BRCMF_SDALIGN);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ pkt_align(new, pkt->len, BRCMF_SDALIGN);
+ memcpy(new->data, pkt->data, pkt->len);
+ if (free_pkt)
+ brcmu_pkt_buf_free_skb(pkt);
+ /* free the pkt if canned one is not used */
+ free_pkt = true;
+ pkt = new;
+ frame = (u8 *) (pkt->data);
+ /* precondition: (frame % BRCMF_SDALIGN) == 0) */
+ pad = 0;
+ } else {
+ skb_push(pkt, pad);
+ frame = (u8 *) (pkt->data);
+ /* precondition: pad + SDPCM_HDRLEN <= pkt->len */
+ memset(frame, 0, pad + SDPCM_HDRLEN);
+ }
+ }
+ /* precondition: pad < BRCMF_SDALIGN */
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (u16) (pkt->len);
+ *(__le16 *) frame = cpu_to_le16(len);
+ *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader =
+ ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad +
+ SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+
+ put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
+ put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef BCMDBG
+ tx_packets[pkt->priority]++;
+ if (BRCMF_BYTES_ON() &&
+ (((BRCMF_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (BRCMF_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ printk(KERN_DEBUG "Tx Frame:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, frame, len);
+ } else if (BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "TxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ frame, min_t(u16, len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ u16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % BRCMF_SDALIGN) {
+ len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (len & (ALIGNMENT - 1))
+ len = roundup(len, ALIGNMENT);
+
+ ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame,
+ len, pkt);
+ bus->f2txdata++;
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+ ret);
+ bus->tx_sderrs++;
+
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
+ NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ u8 hi, lo;
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0)
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+done:
+ /* restore pkt buffer pointer before calling tx complete routine */
+ skb_pull(pkt, SDPCM_HDRLEN + pad);
+ up(&bus->sdsem);
+ brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+ down(&bus->sdsem);
+
+ if (free_pkt)
+ brcmu_pkt_buf_free_skb(pkt);
+
+ return ret;
+}
+
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+{
+ struct sk_buff *pkt;
+ u32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ u8 tx_prec_map;
+
+ struct brcmf_pub *drvr = bus->drvr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ tx_prec_map = ~bus->flowcontrol;
+
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
+ spin_lock_bh(&bus->txqlock);
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
+ if (pkt == NULL) {
+ spin_unlock_bh(&bus->txqlock);
+ break;
+ }
+ spin_unlock_bh(&bus->txqlock);
+ datalen = pkt->len - SDPCM_HDRLEN;
+
+ ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
+ if (ret)
+ bus->drvr->tx_errors++;
+ else
+ bus->drvr->dstats.tx_bytes += datalen;
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt) {
+ /* Check device status, signal pending interrupt */
+ r_sdreg32(bus, &intstatus,
+ offsetof(struct sdpcmd_regs, intstatus),
+ &retries);
+ bus->f2txdata++;
+ if (brcmf_sdcard_regfail(bus->sdiodev))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = true;
+ }
+ }
+
+ /* Deflow-control stack if needed */
+ if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
+ drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
+ brcmf_txflowcontrol(drvr, 0, OFF);
+
+ return cnt;
+}
+
+static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+{
+ u32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
+ uint txlimit = bus->txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = true; /* Flag for no more read data */
+ bool resched = false; /* Flag indicating resched wanted */
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ down(&bus->sdsem);
+
+ /* If waiting for HTAVAIL, check status */
+ if (bus->clkstate == CLK_PENDING) {
+ int err;
+ u8 clkctl, devctl = 0;
+
+#ifdef BCMDBG
+ /* Check for inconsistent device control */
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+#endif /* BCMDBG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error reading CSR: %d\n",
+ err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+
+ brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
+ devctl, clkctl);
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
+ err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
+ err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ bus_wake(bus);
+
+ /* Make sure backplane clock is on */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
+ if (bus->clkstate == CLK_PENDING)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = false;
+ r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+ bus->f1regdata++;
+ if (brcmf_sdcard_regfail(bus->sdiodev))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ w_sdreg32(bus, newstatus,
+ offsetof(struct sdpcmd_regs, intstatus),
+ &retries);
+ bus->f1regdata++;
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ w_sdreg32(bus, I_HMB_FC_CHANGE,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+ bus->f1regdata += 2;
+ bus->fcstate =
+ !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= brcmf_sdbrcm_hostmail(bus);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ if (intstatus & I_WR_OOSYNC) {
+ brcmf_dbg(ERROR, "Dongle reports WR_OOSYNC\n");
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ brcmf_dbg(ERROR, "Dongle reports RD_OOSYNC\n");
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ if (intstatus & I_SBINT) {
+ brcmf_dbg(ERROR, "Dongle reports SBINT\n");
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip)
+ intstatus &= ~I_HMB_FRAME_IND;
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE()) {
+ framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~I_HMB_FRAME_IND;
+ rxlimit -= min(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ if (data_ok(bus) && bus->ctrl_frame_stat &&
+ (bus->clkstate == CLK_AVAIL)) {
+ int ret, i;
+
+ ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
+ (u32) bus->ctrl_frame_len, NULL);
+
+ if (ret < 0) {
+ /* On failure, abort the command and
+ terminate the frame */
+ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+ ret);
+ bus->tx_sderrs++;
+
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
+ NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ u8 hi, lo;
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0)
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+ brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
+ bus->ctrl_frame_stat = false;
+ brcmf_sdbrcm_wait_event_wakeup(bus);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
+ && data_ok(bus)) {
+ framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax);
+ framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+
+ /* Resched if events or tx frames are pending,
+ else await next interrupt */
+ /* On failed register access, all bets are off:
+ no resched or interrupts */
+ if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ brcmf_sdcard_regfail(bus->sdiodev)) {
+ brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
+ brcmf_sdcard_regfail(bus->sdiodev));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->intstatus = 0;
+ } else if (bus->clkstate == CLK_PENDING) {
+ brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
+ resched = true;
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
+ && data_ok(bus)) || PKT_AVAILABLE()) {
+ resched = true;
+ }
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ if ((bus->clkstate != CLK_PENDING)
+ && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
+ bus->activity = false;
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ }
+
+ up(&bus->sdsem);
+
+ return resched;
+}
+
+static int brcmf_sdbrcm_dpc_thread(void *data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *) data;
+
+ allow_signal(SIGTERM);
+ /* Run until signal received */
+ while (1) {
+ if (kthread_should_stop())
+ break;
+ if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
+ /* Call bus dpc unless it indicated down
+ (then clean stop) */
+ if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (brcmf_sdbrcm_dpc(bus))
+ complete(&bus->dpc_wait);
+ } else {
+ /* after stopping the bus, exit thread */
+ brcmf_sdbrcm_bus_stop(bus);
+ bus->dpc_tsk = NULL;
+ break;
+ }
+ } else
+ break;
+ }
+ return 0;
+}
+
+int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+{
+ int ret = -EBADE;
+ uint datalen, prec;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ datalen = pkt->len;
+
+ /* Add space for the header */
+ skb_push(pkt, SDPCM_HDRLEN);
+ /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
+
+ prec = prio2prec((pkt->priority & PRIOMASK));
+
+ /* Check for existing queue, current flow-control,
+ pending event, or pending clock */
+ brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ spin_lock_bh(&bus->txqlock);
+ if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
+ skb_pull(pkt, SDPCM_HDRLEN);
+ brcmf_txcomplete(bus->drvr, pkt, false);
+ brcmu_pkt_buf_free_skb(pkt);
+ brcmf_dbg(ERROR, "out of bus->txq !!!\n");
+ ret = -ENOSR;
+ } else {
+ ret = 0;
+ }
+ spin_unlock_bh(&bus->txqlock);
+
+ if (pktq_len(&bus->txq) >= TXHI)
+ brcmf_txflowcontrol(bus->drvr, 0, ON);
+
+#ifdef BCMDBG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = true;
+ if (bus->dpc_tsk)
+ complete(&bus->dpc_wait);
+ }
+
+ return ret;
+}
+
+static int
+brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+ uint size)
+{
+ int bcmerror = 0;
+ u32 sdaddr;
+ uint dsize;
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "window change failed\n");
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ brcmf_dbg(INFO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
+ write ? "write" : "read", dsize,
+ sdaddr, address & SBSDIO_SBWINDOW_MASK);
+ bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write,
+ sdaddr, data, dsize);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "membytes transfer failed\n");
+ break;
+ }
+
+ /* Adjust for next transfer (if any) */
+ size -= dsize;
+ if (size) {
+ data += dsize;
+ address += dsize;
+ bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev,
+ address);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "window change failed\n");
+ break;
+ }
+ sdaddr = 0;
+ dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad))
+ brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
+ bus->sdiodev->sbwad);
+
+ return bcmerror;
+}
+
+#ifdef BCMDBG
+#define CONSOLE_LINE_MAX 192
+
+static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+{
+ struct brcmf_console *c = &bus->console;
+ u8 line[CONSOLE_LINE_MAX], ch;
+ u32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + offsetof(struct rte_console, log_le);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&c->log_le,
+ sizeof(c->log_le));
+ if (rv < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = le32_to_cpu(c->log_le.buf_size);
+ c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
+ if (c->buf == NULL)
+ return -ENOMEM;
+ }
+
+ idx = le32_to_cpu(c->log_le.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return -EBADE;
+
+ /* Skip reading the console buffer if the index pointer
+ has not moved */
+ if (idx == c->last)
+ return 0;
+
+ /* Read the console buffer */
+ addr = le32_to_cpu(c->log_le.buf);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, c->buf, c->bufsize);
+ if (rv < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line.
+ * Instead, back up
+ * the buffer pointer and output this
+ * line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printk(KERN_DEBUG "CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return 0;
+}
+#endif /* BCMDBG */
+
+static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+{
+ int i;
+ int ret;
+
+ bus->ctrl_frame_stat = false;
+ ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+ ret);
+ bus->tx_sderrs++;
+
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ u8 hi, lo;
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if (hi == 0 && lo == 0)
+ break;
+ }
+ return ret;
+ }
+
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+ return ret;
+}
+
+int
+brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+{
+ u8 *frame;
+ u16 len;
+ u32 swheader;
+ uint retries = 0;
+ u8 doff = 0;
+ int ret = -1;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - SDPCM_HDRLEN;
+ len = (msglen += SDPCM_HDRLEN);
+
+ /* Add alignment padding (optional for ctl frames) */
+ doff = ((unsigned long)frame % BRCMF_SDALIGN);
+ if (doff) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ memset(frame, 0, doff + SDPCM_HDRLEN);
+ }
+ /* precondition: doff < BRCMF_SDALIGN */
+ doff += SDPCM_HDRLEN;
+
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ u16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % BRCMF_SDALIGN) {
+ len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (len & (ALIGNMENT - 1))
+ len = roundup(len, ALIGNMENT);
+
+ /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Make sure backplane clock is on */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(__le16 *) frame = cpu_to_le16((u16) msglen);
+ *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader =
+ ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
+ SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
+ SDPCM_DOFFSET_MASK);
+ put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
+ put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+ if (!data_ok(bus)) {
+ brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ bus->tx_max, bus->tx_seq);
+ bus->ctrl_frame_stat = true;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+ brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
+
+ if (bus->ctrl_frame_stat == false) {
+ brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
+ ret = 0;
+ } else {
+ brcmf_dbg(INFO, "ctrl_frame_stat == true\n");
+ ret = -1;
+ }
+ }
+
+ if (ret == -1) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
+ printk(KERN_DEBUG "Tx Frame:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ frame, len);
+ } else if (BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "TxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ frame, min_t(u16, len, 16));
+ }
+#endif
+
+ do {
+ ret = brcmf_tx_frame(bus, frame, len);
+ } while (ret < 0 && retries++ < TXRETRIES);
+ }
+
+ if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = false;
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ }
+
+ up(&bus->sdsem);
+
+ if (ret)
+ bus->drvr->tx_ctlerrs++;
+ else
+ bus->drvr->tx_ctlpkts++;
+
+ return ret ? -EIO : 0;
+}
+
+int
+brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Wait until control frame is available */
+ timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+
+ down(&bus->sdsem);
+ rxlen = bus->rxlen;
+ memcpy(msg, bus->rxctl, min(msglen, rxlen));
+ bus->rxlen = 0;
+ up(&bus->sdsem);
+
+ if (rxlen) {
+ brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
+ rxlen, msglen);
+ } else if (timeleft == 0) {
+ brcmf_dbg(ERROR, "resumed on timeout\n");
+ } else if (pending == true) {
+ brcmf_dbg(CTL, "cancelled\n");
+ return -ERESTARTSYS;
+ } else {
+ brcmf_dbg(CTL, "resumed for unknown reason?\n");
+ }
+
+ if (rxlen)
+ bus->drvr->rx_ctlpkts++;
+ else
+ bus->drvr->rx_ctlerrs++;
+
+ return rxlen ? (int)rxlen : -ETIMEDOUT;
+}
+
+static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+{
+ int bcmerror = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Basic sanity checks */
+ if (bus->drvr->up) {
+ bcmerror = -EISCONN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = -EOVERFLOW;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ kfree(bus->vars);
+
+ bus->vars = kmalloc(len, GFP_ATOMIC);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = -ENOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the
+ terminating double-null */
+ memcpy(bus->vars, arg, bus->varsz);
+err:
+ return bcmerror;
+}
+
+static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+{
+ int bcmerror = 0;
+ u32 varsize;
+ u32 varaddr;
+ u8 *vbuffer;
+ u32 varsizew;
+ __le32 varsizew_le;
+#ifdef BCMDBG
+ char *nvram_ularray;
+#endif /* BCMDBG */
+
+ /* Even if there are no vars are to be written, we still
+ need to set the ramsize. */
+ varsize = bus->varsz ? roundup(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ if (bus->vars) {
+ vbuffer = kzalloc(varsize, GFP_ATOMIC);
+ if (!vbuffer)
+ return -ENOMEM;
+
+ memcpy(vbuffer, bus->vars, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror =
+ brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
+#ifdef BCMDBG
+ /* Verify NVRAM bytes */
+ brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
+ nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
+ if (!nvram_ularray)
+ return -ENOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror =
+ brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray,
+ varsize);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
+ bcmerror, varsize, varaddr);
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize))
+ brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
+ else
+ brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
+
+ kfree(nvram_ularray);
+#endif /* BCMDBG */
+
+ kfree(vbuffer);
+ }
+
+ /* adjust to the user specified RAM */
+ brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
+ brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize);
+ varsize = ((bus->ramsize - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum
+ * in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ varsizew_le = cpu_to_le32(0);
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew_le = cpu_to_le32(varsizew);
+ }
+
+ brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
+ varsize, varsizew);
+
+ /* Write the length token to the last word */
+ bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
+ (u8 *)&varsizew_le, 4);
+
+ return bcmerror;
+}
+
+static void
+brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
+{
+ u32 regdata;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ if (regdata & SBTML_RESET)
+ return;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
+ 4, regdata | SBTML_REJ);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4) &
+ SBTMH_BUSY), 100000);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4);
+ if (regdata & SBTMH_BUSY)
+ brcmf_dbg(ERROR, "ARM core still busy\n");
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbidlow), 4);
+ if (regdata & SBIDL_INIT) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4) |
+ SBIM_RJ;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbimstate), 4,
+ regdata);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4) &
+ SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4,
+ (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbidlow), 4);
+ if (regdata & SBIDL_INIT) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4) &
+ ~SBIM_RJ;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbimstate), 4,
+ regdata);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ (SBTML_REJ | SBTML_RESET));
+ udelay(1);
+}
+
+static void
+brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
+{
+ u32 regdata;
+
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET);
+ udelay(1);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4);
+ if (regdata & SBTMH_SERR)
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4, 0);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4);
+ if (regdata & (SBIM_IBE | SBIM_TO))
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
+ regdata & ~(SBIM_IBE | SBIM_TO));
+
+ /* clear reset and allow it to propagate throughout the core */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ (SICF_FGC << SBTML_SICF_SHIFT) |
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+ udelay(1);
+
+ /* leave clock enabled */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+ udelay(1);
+}
+
+static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+{
+ uint retries;
+ u32 regdata;
+ int bcmerror = 0;
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = true;
+
+ brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
+ bus->ci->armcorebase);
+
+ brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ u32 zeros = 0;
+ brcmf_sdbrcm_membytes(bus, true, bus->ramsize - 4,
+ (u8 *)&zeros, 4);
+ }
+ } else {
+ regdata = brcmf_sdcard_reg_read(bus->sdiodev,
+ CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
+ regdata &= (SBTML_RESET | SBTML_REJ_MASK |
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+ if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+ brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
+ bcmerror = -EBADE;
+ goto fail;
+ }
+
+ bcmerror = brcmf_sdbrcm_write_vars(bus);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "no vars written to RAM\n");
+ bcmerror = 0;
+ }
+
+ w_sdreg32(bus, 0xFFFFFFFF,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = false;
+
+ bus->drvr->busstate = BRCMF_BUS_LOAD;
+ }
+fail:
+ return bcmerror;
+}
+
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+{
+ if (bus->firmware->size < bus->fw_ptr + len)
+ len = bus->firmware->size - bus->fw_ptr;
+
+ memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
+ bus->fw_ptr += len;
+ return len;
+}
+
+MODULE_FIRMWARE(BCM4329_FW_NAME);
+MODULE_FIRMWARE(BCM4329_NV_NAME);
+
+static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+{
+ int offset = 0;
+ uint len;
+ u8 *memblock = NULL, *memptr;
+ int ret;
+
+ brcmf_dbg(INFO, "Enter\n");
+
+ bus->fw_name = BCM4329_FW_NAME;
+ ret = request_firmware(&bus->firmware, bus->fw_name,
+ &bus->sdiodev->func[2]->dev);
+ if (ret) {
+ brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
+ return ret;
+ }
+ bus->fw_ptr = 0;
+
+ memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
+ if (memblock == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
+ memptr += (BRCMF_SDALIGN -
+ ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
+
+ /* Download image */
+ while ((len =
+ brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
+ ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
+ if (ret) {
+ brcmf_dbg(ERROR, "error %d on writing %d membytes at 0x%08x\n",
+ ret, MEMBLOCK, offset);
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ kfree(memblock);
+
+ release_firmware(bus->firmware);
+ bus->fw_ptr = 0;
+
+ return ret;
+}
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
+ * and ending in a NUL.
+ * Removes carriage returns, empty lines, comment lines, and converts
+ * newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked
+ * by two NULs.
+*/
+
+static uint brcmf_process_nvram_vars(char *varbuf, uint len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ uint buf_len, n;
+
+ dp = varbuf;
+
+ findNewline = false;
+ column = 0;
+
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == 0)
+ break;
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = false;
+ if (varbuf[n] == '#') {
+ findNewline = true;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = dp - varbuf;
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
+
+static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+{
+ uint len;
+ char *memblock = NULL;
+ char *bufp;
+ int ret;
+
+ bus->nv_name = BCM4329_NV_NAME;
+ ret = request_firmware(&bus->firmware, bus->nv_name,
+ &bus->sdiodev->func[2]->dev);
+ if (ret) {
+ brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
+ return ret;
+ }
+ bus->fw_ptr = 0;
+
+ memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
+ if (memblock == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
+
+ if (len > 0 && len < MEMBLOCK) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = brcmf_process_nvram_vars(bufp, len);
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
+ if (ret)
+ brcmf_dbg(ERROR, "error downloading vars: %d\n", ret);
+ } else {
+ brcmf_dbg(ERROR, "error reading nvram file: %d\n", len);
+ ret = -EIO;
+ }
+
+err:
+ kfree(memblock);
+
+ release_firmware(bus->firmware);
+ bus->fw_ptr = 0;
+
+ return ret;
+}
+
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+{
+ int bcmerror = -1;
+
+ /* Keep arm in reset */
+ if (brcmf_sdbrcm_download_state(bus, true)) {
+ brcmf_dbg(ERROR, "error placing ARM core in reset\n");
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if (brcmf_sdbrcm_download_code_file(bus)) {
+ brcmf_dbg(ERROR, "dongle image file download failed\n");
+ goto err;
+ }
+
+ /* External nvram takes precedence if specified */
+ if (brcmf_sdbrcm_download_nvram(bus))
+ brcmf_dbg(ERROR, "dongle nvram file download failed\n");
+
+ /* Take arm out of reset */
+ if (brcmf_sdbrcm_download_state(bus, false)) {
+ brcmf_dbg(ERROR, "error getting out of ARM core reset\n");
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static bool
+brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+{
+ bool ret;
+
+ /* Download the firmware */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
+
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ return ret;
+}
+
+void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+{
+ u32 local_hostintmask;
+ u8 saveclk;
+ uint retries;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
+ if (bus->dpc_tsk && bus->dpc_tsk != current) {
+ send_sig(SIGTERM, bus->dpc_tsk, 1);
+ kthread_stop(bus->dpc_tsk);
+ bus->dpc_tsk = NULL;
+ }
+
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Change our idea of bus state */
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err)
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ /* Turn off the backplane clock (only) */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Clear the data packet queues */
+ brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ brcmu_pkt_buf_free_skb(bus->glomd);
+
+ if (bus->glom)
+ brcmu_pkt_buf_free_skb(bus->glom);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = false;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ up(&bus->sdsem);
+}
+
+int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+{
+ struct brcmf_bus *bus = drvr->bus;
+ unsigned long timeout;
+ uint retries = 0;
+ u8 ready, enable;
+ int err, ret = 0;
+ u8 saveclk;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* try to download image and nvram to the dongle */
+ if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (!(brcmf_sdbrcm_download_firmware(bus)))
+ return -1;
+ }
+
+ if (!bus->drvr)
+ return 0;
+
+ /* Start the watchdog timer */
+ bus->drvr->tickcnt = 0;
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+
+ down(&bus->sdsem);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ if (bus->clkstate != CLK_AVAIL)
+ goto exit;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata), &retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ enable, NULL);
+
+ timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
+ ready = 0;
+ while (enable != ready) {
+ ready = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_0,
+ SDIO_CCCR_IORx, NULL);
+ if (time_after(jiffies, timeout))
+ break;
+ else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
+ /* prevent busy waiting if it takes too long */
+ msleep_interruptible(20);
+ }
+
+ brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ w_sdreg32(bus, bus->hostintmask,
+ offsetof(struct sdpcmd_regs, hostintmask), &retries);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_WATERMARK, 8, &err);
+
+ /* Set bus state according to enable result */
+ drvr->busstate = BRCMF_BUS_DATA;
+ }
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0,
+ SDIO_CCCR_IOEx, enable, NULL);
+ }
+
+ /* Restore previous clock setting */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+ /* If we didn't come up, turn off backplane clock */
+ if (drvr->busstate != BRCMF_BUS_DATA)
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+
+exit:
+ up(&bus->sdsem);
+
+ return ret;
+}
+
+void brcmf_sdbrcm_isr(void *arg)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (!bus) {
+ brcmf_dbg(ERROR, "bus is null pointer, exiting\n");
+ return;
+ }
+
+ if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
+ return;
+ }
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = true;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (bus->sleeping) {
+ brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n");
+ return;
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (!bus->intr)
+ brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
+
+ bus->dpc_sched = true;
+ if (bus->dpc_tsk)
+ complete(&bus->dpc_wait);
+}
+
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+{
+ struct brcmf_bus *bus;
+
+ brcmf_dbg(TIMER, "Enter\n");
+
+ bus = drvr->bus;
+
+ /* Ignore the timer if simulating bus down */
+ if (bus->sleeping)
+ return false;
+
+ down(&bus->sdsem);
+
+ /* Poll period: check device if appropriate. */
+ if (bus->poll && (++bus->polltick >= bus->pollrate)) {
+ u32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+ if (!bus->dpc_sched) {
+ u8 devpend;
+ devpend = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_0, SDIO_CCCR_INTx,
+ NULL);
+ intstatus =
+ devpend & (INTR_STATUS_FUNC1 |
+ INTR_STATUS_FUNC2);
+ }
+
+ /* If there is something, make like the ISR and
+ schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = true;
+
+ bus->dpc_sched = true;
+ if (bus->dpc_tsk)
+ complete(&bus->dpc_wait);
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+#ifdef BCMDBG
+ /* Poll for console output periodically */
+ if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+ bus->console.count += BRCMF_WD_POLL_MS;
+ if (bus->console.count >= bus->console_interval) {
+ bus->console.count -= bus->console_interval;
+ /* Make sure backplane clock is on */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ if (brcmf_sdbrcm_readconsole(bus) < 0)
+ /* stop on error */
+ bus->console_interval = 0;
+ }
+ }
+#endif /* BCMDBG */
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+ if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = false;
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ } else {
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ }
+ }
+ }
+
+ up(&bus->sdsem);
+
+ return bus->ipend;
+}
+
+static bool brcmf_sdbrcm_chipmatch(u16 chipid)
+{
+ if (chipid == BCM4329_CHIP_ID)
+ return true;
+ return false;
+}
+
+static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(bus->rxbuf);
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
+
+ kfree(bus->databuf);
+ bus->databuf = NULL;
+}
+
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->drvr->maxctl) {
+ bus->rxblen =
+ roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + BRCMF_SDALIGN;
+ bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
+ if (!(bus->rxbuf))
+ goto fail;
+ }
+
+ /* Allocate buffer to receive glomed packet */
+ bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
+ if (!(bus->databuf)) {
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ kfree(bus->rxbuf);
+ goto fail;
+ }
+
+ /* Align the buffer */
+ if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
+ bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
+ ((unsigned long)bus->databuf % BRCMF_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return true;
+
+fail:
+ return false;
+}
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
+ {
+ 4, 0x2}, {
+ 2, 0x3}, {
+ 1, 0x0}, {
+ 0, 0x0}
+ };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
+ {
+ 12, 0x7}, {
+ 10, 0x6}, {
+ 8, 0x5}, {
+ 6, 0x4}, {
+ 4, 0x2}, {
+ 2, 0x1}, {
+ 0, 0x0}
+ };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
+ {
+ 32, 0x7}, {
+ 26, 0x6}, {
+ 22, 0x5}, {
+ 16, 0x4}, {
+ 12, 0x3}, {
+ 8, 0x2}, {
+ 4, 0x1}, {
+ 0, 0x0}
+ };
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+static char *brcmf_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
+ u32 drivestrength) {
+ struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask = 0;
+ u32 str_shift = 0;
+ char chn[8];
+
+ if (!(bus->ci->cccaps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
+ str_mask = 0x30000000;
+ str_shift = 28;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_chipname(bus->ci->chip, chn, 8),
+ bus->ci->chiprev, bus->ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
+ 4, 1);
+ cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
+ CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
+ 4, cc_data_temp);
+
+ brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp);
+ }
+}
+
+static int
+brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 regs)
+{
+ u32 regdata;
+
+ /*
+ * Get CC core rev
+ * Chipid is assume to be at offset 0 from regs arg
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ ci->cccorebase = regs;
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->cccorebase, chipid), 4);
+ ci->chip = regdata & CID_ID_MASK;
+ ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+
+ brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM4329_CHIP_ID:
+ ci->buscorebase = BCM4329_CORE_BUS_BASE;
+ ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
+ ci->armcorebase = BCM4329_CORE_ARM_BASE;
+ ci->ramsize = BCM4329_RAMSIZE;
+ break;
+ default:
+ brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+ return -ENODEV;
+ }
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->cccorebase, sbidhigh), 4);
+ ci->ccrev = SBCOREREV(regdata);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
+ ci->pmurev = regdata & PCAP_REV_MASK;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->buscorebase, sbidhigh), 4);
+ ci->buscorerev = SBCOREREV(regdata);
+ ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+ ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
+
+ /* get chipcommon capabilites */
+ ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->cccorebase, capabilities), 4);
+
+ return 0;
+}
+
+static int
+brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
+{
+ struct chip_info *ci;
+ int err;
+ u8 clkval, clkset;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* alloc chip_info_t */
+ ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ if (NULL == ci)
+ return -ENOMEM;
+
+ /* bus/core/clk setup for register access */
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing for HT off\n");
+ goto fail;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ err = -EBUSY;
+ goto fail;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
+ SBSDIO_FORCE_ALP;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ udelay(65);
+ } else {
+ brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ err = -EACCES;
+ goto fail;
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
+ if (err)
+ goto fail;
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
+
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* WAR: cmd52 backplane read so core HW will drop ALPReq */
+ clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ 0, NULL);
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ bus->ci = ci;
+ return 0;
+fail:
+ bus->ci = NULL;
+ kfree(ci);
+ return err;
+}
+
+static bool
+brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+{
+ u8 clkctl = 0;
+ int err = 0;
+ int reg_addr;
+ u32 reg_val;
+
+ bus->alp_only = true;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, SI_ENUM_BASE))
+ brcmf_dbg(ERROR, "FAILED to return to SI_ENUM_BASE\n");
+
+#ifdef BCMDBG
+ printk(KERN_DEBUG "F1 signature read @0x18000000=0x%4x\n",
+ brcmf_sdcard_reg_read(bus->sdiodev, SI_ENUM_BASE, 4));
+
+#endif /* BCMDBG */
+
+ /*
+ * Force PLL off until brcmf_sdbrcm_chip_attach()
+ * programs PLL control regs
+ */
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
+ brcmf_dbg(ERROR, "ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, BRCMF_INIT_CLKCTL1, clkctl);
+ goto fail;
+ }
+
+ if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+ goto fail;
+ }
+
+ if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
+ brcmf_dbg(ERROR, "unsupported chip: 0x%04x\n", bus->ci->chip);
+ goto fail;
+ }
+
+ brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+
+ /* Get info on the ARM and SOCRAM cores... */
+ brcmf_sdcard_reg_read(bus->sdiodev,
+ CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+ bus->ramsize = bus->ci->ramsize;
+ if (!(bus->ramsize)) {
+ brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
+ goto fail;
+ }
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ reg_addr = bus->ci->buscorebase +
+ offsetof(struct sdpcmd_regs, corecontrol);
+ reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
+ brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
+ reg_val | CC_BPRESEN);
+
+ brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+ bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
+ BRCMF_SDALIGN);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = true;
+ bus->poll = false;
+ if (bus->poll)
+ bus->pollrate = 1;
+
+ return true;
+
+fail:
+ return false;
+}
+
+static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sleeping = false;
+ bus->rxflow = false;
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = BRCMF_IDLE_INTERVAL;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+
+ /* bus module does not support packet chaining */
+ bus->use_rxchain = false;
+ bus->sd_rxchain = false;
+
+ return true;
+}
+
+static int
+brcmf_sdbrcm_watchdog_thread(void *data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)data;
+
+ allow_signal(SIGTERM);
+ /* Run until signal received */
+ while (1) {
+ if (kthread_should_stop())
+ break;
+ if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
+ brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ /* Count the tick for reference */
+ bus->drvr->tickcnt++;
+ } else
+ break;
+ }
+ return 0;
+}
+
+static void
+brcmf_sdbrcm_watchdog(unsigned long data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)data;
+
+ if (bus->watchdog_tsk) {
+ complete(&bus->watchdog_wait);
+ /* Reschedule the watchdog */
+ if (bus->wd_timer_valid)
+ mod_timer(&bus->timer,
+ jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+ }
+}
+
+static void
+brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(bus->ci);
+ bus->ci = NULL;
+}
+
+static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->ci) {
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ brcmf_sdbrcm_chip_detach(bus);
+ if (bus->vars && bus->varsz)
+ kfree(bus->vars);
+ bus->vars = NULL;
+ }
+
+ brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+/* Detach and free everything */
+static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus) {
+ /* De-register interrupt handler */
+ brcmf_sdcard_intr_dereg(bus->sdiodev);
+
+ if (bus->drvr) {
+ brcmf_detach(bus->drvr);
+ brcmf_sdbrcm_release_dongle(bus);
+ bus->drvr = NULL;
+ }
+
+ brcmf_sdbrcm_release_malloc(bus);
+
+ kfree(bus);
+ }
+
+ brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
+ u32 regsva, struct brcmf_sdio_dev *sdiodev)
+{
+ int ret;
+ struct brcmf_bus *bus;
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver.
+ * Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent
+ * initializations.
+ */
+ brcmf_c_init();
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* We make an assumption about address window mappings:
+ * regsva == SI_ENUM_BASE*/
+
+ /* Allocate private bus interface state */
+ bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ if (!bus)
+ goto fail;
+
+ bus->sdiodev = sdiodev;
+ sdiodev->bus = bus;
+ bus->txbound = BRCMF_TXBOUND;
+ bus->rxbound = BRCMF_RXBOUND;
+ bus->txminmax = BRCMF_TXMINMAX;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = false; /* Use bufpool if allocated,
+ else use locally malloced rxbuf */
+
+ /* attempt to attach to the dongle */
+ if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_attach failed\n");
+ goto fail;
+ }
+
+ spin_lock_init(&bus->txqlock);
+ init_waitqueue_head(&bus->ctrl_wait);
+ init_waitqueue_head(&bus->dcmd_resp_wait);
+
+ /* Set up the watchdog timer */
+ init_timer(&bus->timer);
+ bus->timer.data = (unsigned long)bus;
+ bus->timer.function = brcmf_sdbrcm_watchdog;
+
+ /* Initialize thread based operation and lock */
+ sema_init(&bus->sdsem, 1);
+
+ /* Initialize watchdog thread */
+ init_completion(&bus->watchdog_wait);
+ bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+ bus, "brcmf_watchdog");
+ if (IS_ERR(bus->watchdog_tsk)) {
+ printk(KERN_WARNING
+ "brcmf_watchdog thread failed to start\n");
+ bus->watchdog_tsk = NULL;
+ }
+ /* Initialize DPC thread */
+ init_completion(&bus->dpc_wait);
+ bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
+ bus, "brcmf_dpc");
+ if (IS_ERR(bus->dpc_tsk)) {
+ printk(KERN_WARNING
+ "brcmf_dpc thread failed to start\n");
+ bus->dpc_tsk = NULL;
+ }
+
+ /* Attach to the brcmf/OS/network interface */
+ bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
+ if (!bus->drvr) {
+ brcmf_dbg(ERROR, "brcmf_attach failed\n");
+ goto fail;
+ }
+
+ /* Allocate buffers */
+ if (!(brcmf_sdbrcm_probe_malloc(bus))) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_malloc failed\n");
+ goto fail;
+ }
+
+ if (!(brcmf_sdbrcm_probe_init(bus))) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_init failed\n");
+ goto fail;
+ }
+
+ /* Register interrupt callback, but mask it (not operational yet). */
+ brcmf_dbg(INTR, "disable SDIO interrupts (not interested yet)\n");
+ ret = brcmf_sdcard_intr_reg(bus->sdiodev);
+ if (ret != 0) {
+ brcmf_dbg(ERROR, "FAILED: sdcard_intr_reg returned %d\n", ret);
+ goto fail;
+ }
+ brcmf_dbg(INTR, "registered SDIO interrupt function ok\n");
+
+ brcmf_dbg(INFO, "completed!!\n");
+
+ /* if firmware path present try to download and bring up bus */
+ ret = brcmf_bus_start(bus->drvr);
+ if (ret != 0) {
+ if (ret == -ENOLINK) {
+ brcmf_dbg(ERROR, "dongle is not responding\n");
+ goto fail;
+ }
+ }
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (brcmf_net_attach(bus->drvr, 0) != 0) {
+ brcmf_dbg(ERROR, "Net attach failed!!\n");
+ goto fail;
+ }
+
+ return bus;
+
+fail:
+ brcmf_sdbrcm_release(bus);
+ return NULL;
+}
+
+void brcmf_sdbrcm_disconnect(void *ptr)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus)
+ brcmf_sdbrcm_release(bus);
+
+ brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
+{
+ return &bus->sdiodev->func[2]->dev;
+}
+
+void
+brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+{
+ /* don't start the wd until fw is loaded */
+ if (bus->drvr->busstate == BRCMF_BUS_DOWN)
+ return;
+
+ /* Totally stop the timer */
+ if (!wdtick && bus->wd_timer_valid == true) {
+ del_timer_sync(&bus->timer);
+ bus->wd_timer_valid = false;
+ bus->save_ms = wdtick;
+ return;
+ }
+
+ if (wdtick) {
+ if (bus->save_ms != BRCMF_WD_POLL_MS) {
+ if (bus->wd_timer_valid == true)
+ /* Stop timer and restart at new value */
+ del_timer_sync(&bus->timer);
+
+ /* Create timer again when watchdog period is
+ dynamically changed or in the first instance
+ */
+ bus->timer.expires =
+ jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
+ add_timer(&bus->timer);
+
+ } else {
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&bus->timer,
+ jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+ }
+
+ bus->wd_timer_valid = true;
+ bus->save_ms = wdtick;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
new file mode 100644
index 00000000000..726fa898111
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_SDH_H_
+#define _BRCM_SDH_H_
+
+#include <linux/skbuff.h>
+
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+
+#define SDIOD_FBR_SIZE 0x100
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02
+#define SDIO_FUNC_ENABLE_2 0x04
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02
+#define SDIO_FUNC_READY_2 0x04
+
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2
+#define INTR_STATUS_FUNC2 0x4
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS 7
+
+/* as of sdiod rev 0, supports 3 functions */
+#define SBSDIO_NUM_FUNCTION 3
+
+/* function 1 miscellaneous registers */
+
+/* sprom command and status */
+#define SBSDIO_SPROM_CS 0x10000
+/* sprom info register */
+#define SBSDIO_SPROM_INFO 0x10001
+/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_LOW 0x10002
+/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003
+/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004
+/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005
+/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006
+/* xtal_pu (gpio) enable */
+#define SBSDIO_CHIP_CTRL_EN 0x10007
+/* rev < 7, watermark for sdio device */
+#define SBSDIO_WATERMARK 0x10008
+/* control busy signal generation */
+#define SBSDIO_DEVICE_CTL 0x10009
+
+/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A
+/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B
+/* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C
+/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D
+/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E
+/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F
+/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019
+/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A
+/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B
+/* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* function 1 OCP space */
+
+/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+/* with b15, maps to 32-bit SB access */
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000
+
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+/* Address bits from SBADDR regs */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+struct brcmf_sdreg {
+ int func;
+ int offset;
+ int value;
+};
+
+struct brcmf_sdio_dev {
+ struct sdio_func *func[SDIO_MAX_FUNCS];
+ u8 num_funcs; /* Supported funcs on client */
+ u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ u32 sbwad; /* Save backplane window address */
+ bool regfail; /* status of last reg_r/w call */
+ void *bus;
+ atomic_t suspend; /* suspend flag */
+ wait_queue_head_t request_byte_wait;
+ wait_queue_head_t request_word_wait;
+ wait_queue_head_t request_packet_wait;
+ wait_queue_head_t request_buffer_wait;
+
+};
+
+/* Register/deregister device interrupt handler. */
+extern int
+brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev);
+
+extern int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint func,
+ u32 addr, int *err);
+extern void brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint func,
+ u32 addr, u8 data, int *err);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern u32
+brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size);
+
+extern u32
+brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
+ u32 data);
+
+/* Indicate if last reg read/write failed */
+extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+extern int
+brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+extern int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+
+/* Flags bits */
+
+/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_4BYTE 0x1
+/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_FIXED 0x2
+/* Async request (vs. sync request) */
+#define SDIO_REQ_ASYNC 0x4
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
+ u32 addr, u8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+
+/* platform specific/high level functions */
+extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+
+extern int brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev,
+ u32 address);
+
+/* attach, return handler on success, NULL if failed.
+ * The handler shall be provided by all subsequent calls. No local cache
+ * cfghdl points to the starting address of pci device mapped memory
+ */
+extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+
+/* read or write one byte using cmd52 */
+extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
+ uint fnc, uint addr, u8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern int
+brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
+ uint rw, uint fnc, uint addr,
+ u32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern int
+brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
+ uint fix_inc, uint rw, uint fnc_num,
+ u32 addr, uint regwidth,
+ u32 buflen, u8 *buffer, struct sk_buff *pkt);
+
+/* Watchdog timer interface for pm ops */
+extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
+ bool enable);
+
+extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
+ u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void brcmf_sdbrcm_disconnect(void *ptr);
+extern void brcmf_sdbrcm_isr(void *arg);
+#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
new file mode 100644
index 00000000000..5eddabe5228
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -0,0 +1,3868 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */
+
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/uaccess.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "wl_cfg80211.h"
+
+#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
+ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
+
+static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
+
+static u32 brcmf_dbg_level = WL_DBG_ERR;
+
+static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
+{
+ dev->driver_data = data;
+}
+
+static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
+{
+ void *data = NULL;
+
+ if (dev)
+ data = dev->driver_data;
+ return data;
+}
+
+static
+struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev)
+{
+ struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev);
+ return ci->cfg_priv;
+}
+
+static bool check_sys_up(struct wiphy *wiphy)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ WL_INFO("device is not ready : status (%d)\n",
+ (int)cfg_priv->status);
+ return false;
+ }
+ return true;
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+ { \
+ .bitrate = RATE_TO_BASE100KBPS(_rateid), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate __wl_rates[] = {
+ RATETAB_ENT(BRCM_RATE_1M, 0),
+ RATETAB_ENT(BRCM_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(BRCM_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(BRCM_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(BRCM_RATE_6M, 0),
+ RATETAB_ENT(BRCM_RATE_9M, 0),
+ RATETAB_ENT(BRCM_RATE_12M, 0),
+ RATETAB_ENT(BRCM_RATE_18M, 0),
+ RATETAB_ENT(BRCM_RATE_24M, 0),
+ RATETAB_ENT(BRCM_RATE_36M, 0),
+ RATETAB_ENT(BRCM_RATE_48M, 0),
+ RATETAB_ENT(BRCM_RATE_54M, 0),
+};
+
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size 8
+#define wl_g_rates (__wl_rates + 0)
+#define wl_g_rates_size 12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_channel __wl_5ghz_n_channels[] = {
+ CHAN5G(32, 0), CHAN5G(34, 0),
+ CHAN5G(36, 0), CHAN5G(38, 0),
+ CHAN5G(40, 0), CHAN5G(42, 0),
+ CHAN5G(44, 0), CHAN5G(46, 0),
+ CHAN5G(48, 0), CHAN5G(50, 0),
+ CHAN5G(52, 0), CHAN5G(54, 0),
+ CHAN5G(56, 0), CHAN5G(58, 0),
+ CHAN5G(60, 0), CHAN5G(62, 0),
+ CHAN5G(64, 0), CHAN5G(66, 0),
+ CHAN5G(68, 0), CHAN5G(70, 0),
+ CHAN5G(72, 0), CHAN5G(74, 0),
+ CHAN5G(76, 0), CHAN5G(78, 0),
+ CHAN5G(80, 0), CHAN5G(82, 0),
+ CHAN5G(84, 0), CHAN5G(86, 0),
+ CHAN5G(88, 0), CHAN5G(90, 0),
+ CHAN5G(92, 0), CHAN5G(94, 0),
+ CHAN5G(96, 0), CHAN5G(98, 0),
+ CHAN5G(100, 0), CHAN5G(102, 0),
+ CHAN5G(104, 0), CHAN5G(106, 0),
+ CHAN5G(108, 0), CHAN5G(110, 0),
+ CHAN5G(112, 0), CHAN5G(114, 0),
+ CHAN5G(116, 0), CHAN5G(118, 0),
+ CHAN5G(120, 0), CHAN5G(122, 0),
+ CHAN5G(124, 0), CHAN5G(126, 0),
+ CHAN5G(128, 0), CHAN5G(130, 0),
+ CHAN5G(132, 0), CHAN5G(134, 0),
+ CHAN5G(136, 0), CHAN5G(138, 0),
+ CHAN5G(140, 0), CHAN5G(142, 0),
+ CHAN5G(144, 0), CHAN5G(145, 0),
+ CHAN5G(146, 0), CHAN5G(147, 0),
+ CHAN5G(148, 0), CHAN5G(149, 0),
+ CHAN5G(150, 0), CHAN5G(151, 0),
+ CHAN5G(152, 0), CHAN5G(153, 0),
+ CHAN5G(154, 0), CHAN5G(155, 0),
+ CHAN5G(156, 0), CHAN5G(157, 0),
+ CHAN5G(158, 0), CHAN5G(159, 0),
+ CHAN5G(160, 0), CHAN5G(161, 0),
+ CHAN5G(162, 0), CHAN5G(163, 0),
+ CHAN5G(164, 0), CHAN5G(165, 0),
+ CHAN5G(166, 0), CHAN5G(168, 0),
+ CHAN5G(170, 0), CHAN5G(172, 0),
+ CHAN5G(174, 0), CHAN5G(176, 0),
+ CHAN5G(178, 0), CHAN5G(180, 0),
+ CHAN5G(182, 0), CHAN5G(184, 0),
+ CHAN5G(186, 0), CHAN5G(188, 0),
+ CHAN5G(190, 0), CHAN5G(192, 0),
+ CHAN5G(194, 0), CHAN5G(196, 0),
+ CHAN5G(198, 0), CHAN5G(200, 0),
+ CHAN5G(202, 0), CHAN5G(204, 0),
+ CHAN5G(206, 0), CHAN5G(208, 0),
+ CHAN5G(210, 0), CHAN5G(212, 0),
+ CHAN5G(214, 0), CHAN5G(216, 0),
+ CHAN5G(218, 0), CHAN5G(220, 0),
+ CHAN5G(222, 0), CHAN5G(224, 0),
+ CHAN5G(226, 0), CHAN5G(228, 0),
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+ .channels = __wl_2ghz_channels,
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size,
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = __wl_5ghz_a_channels,
+ .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size,
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_n = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = __wl_5ghz_n_channels,
+ .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size,
+};
+
+static const u32 __wl_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* tag_ID/length/value_buffer tuple */
+struct brcmf_tlv {
+ u8 id;
+ u8 len;
+ u8 data[1];
+};
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a u16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) +
+ * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+static u16 brcmf_qdbm_to_mw(u8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN)
+ /* clamp to max u16 mW value */
+ return 0xFFFF;
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return (nqdBm_to_mW_map[idx] + factor / 2) / factor;
+}
+
+static u8 brcmf_mw_to_qdbm(u16 mw)
+{
+ u8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] -
+ nqdBm_to_mW_map[qdbm]) / 2;
+ if (mw_uint < boundary)
+ break;
+ }
+
+ qdbm += (u8) offset;
+
+ return qdbm;
+}
+
+/* function for reading/writing a single u32 from/to the dongle */
+static int
+brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
+{
+ int err;
+ __le32 par_le = cpu_to_le32(*par);
+
+ err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32));
+ *par = le32_to_cpu(par_le);
+
+ return err;
+}
+
+static void convert_key_from_CPU(struct brcmf_wsec_key *key,
+ struct brcmf_wsec_key_le *key_le)
+{
+ key_le->index = cpu_to_le32(key->index);
+ key_le->len = cpu_to_le32(key->len);
+ key_le->algo = cpu_to_le32(key->algo);
+ key_le->flags = cpu_to_le32(key->flags);
+ key_le->rxiv.hi = cpu_to_le32(key->rxiv.hi);
+ key_le->rxiv.lo = cpu_to_le16(key->rxiv.lo);
+ key_le->iv_initialized = cpu_to_le32(key->iv_initialized);
+ memcpy(key_le->data, key->data, sizeof(key->data));
+ memcpy(key_le->ea, key->ea, sizeof(key->ea));
+}
+
+static int send_key_to_dongle(struct net_device *ndev,
+ struct brcmf_wsec_key *key)
+{
+ int err;
+ struct brcmf_wsec_key_le key_le;
+
+ convert_key_from_CPU(key, &key_le);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le));
+ if (err)
+ WL_ERR("WLC_SET_KEY error (%d)\n", err);
+ return err;
+}
+
+static s32
+brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct wireless_dev *wdev;
+ s32 infra = 0;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ WL_ERR("type (%d) : currently we do not support this type\n",
+ type);
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_ADHOC:
+ cfg_priv->conf->mode = WL_MODE_IBSS;
+ infra = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ cfg_priv->conf->mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ default:
+ err = -EINVAL;
+ goto done;
+ }
+
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+ if (err) {
+ WL_ERR("WLC_SET_INFRA error (%d)\n", err);
+ err = -EAGAIN;
+ } else {
+ wdev = ndev->ieee80211_ptr;
+ wdev->iftype = type;
+ }
+
+ WL_INFO("IF Type = %s\n",
+ (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
+
+done:
+ WL_TRACE("Exit\n");
+
+ return err;
+}
+
+static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val)
+{
+ s8 buf[BRCMF_DCMD_SMLEN];
+ u32 len;
+ s32 err = 0;
+ __le32 val_le;
+
+ val_le = cpu_to_le32(val);
+ len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf,
+ sizeof(buf));
+ BUG_ON(!len);
+
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
+ if (err)
+ WL_ERR("error (%d)\n", err);
+
+ return err;
+}
+
+static s32
+brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
+{
+ union {
+ s8 buf[BRCMF_DCMD_SMLEN];
+ __le32 val;
+ } var;
+ u32 len;
+ u32 data_null;
+ s32 err = 0;
+
+ len =
+ brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
+ sizeof(var.buf));
+ BUG_ON(!len);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len);
+ if (err)
+ WL_ERR("error (%d)\n", err);
+
+ *retval = le32_to_cpu(var.val);
+
+ return err;
+}
+
+static void brcmf_set_mpc(struct net_device *ndev, int mpc)
+{
+ s32 err = 0;
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
+ if (err) {
+ WL_ERR("fail to set mpc\n");
+ return;
+ }
+ WL_INFO("MPC : %d\n", mpc);
+ }
+}
+
+static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
+ struct brcmf_ssid *ssid)
+{
+ memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+ params_le->bss_type = DOT11_BSSTYPE_ANY;
+ params_le->scan_type = 0;
+ params_le->channel_num = 0;
+ params_le->nprobes = cpu_to_le32(-1);
+ params_le->active_time = cpu_to_le32(-1);
+ params_le->passive_time = cpu_to_le32(-1);
+ params_le->home_time = cpu_to_le32(-1);
+ if (ssid && ssid->SSID_len)
+ memcpy(&params_le->ssid_le, ssid, sizeof(struct brcmf_ssid));
+}
+
+static s32
+brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param,
+ s32 paramlen, void *bufptr, s32 buflen)
+{
+ s32 iolen;
+
+ iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ BUG_ON(!iolen);
+
+ return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen);
+}
+
+static s32
+brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param,
+ s32 paramlen, void *bufptr, s32 buflen)
+{
+ s32 iolen;
+
+ iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ BUG_ON(!iolen);
+
+ return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen);
+}
+
+static s32
+brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ struct brcmf_ssid *ssid, u16 action)
+{
+ s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
+ offsetof(struct brcmf_iscan_params_le, params_le);
+ struct brcmf_iscan_params_le *params;
+ s32 err = 0;
+
+ if (ssid && ssid->SSID_len)
+ params_size += sizeof(struct brcmf_ssid);
+ params = kzalloc(params_size, GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
+
+ wl_iscan_prep(&params->params_le, ssid);
+
+ params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
+ params->action = cpu_to_le16(action);
+ params->scan_duration = cpu_to_le16(0);
+
+ err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size,
+ iscan->dcmd_buf, BRCMF_DCMD_SMLEN);
+ if (err) {
+ if (err == -EBUSY)
+ WL_INFO("system busy : iscan canceled\n");
+ else
+ WL_ERR("error (%d)\n", err);
+ }
+
+ kfree(params);
+ return err;
+}
+
+static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_ssid ssid;
+ __le32 passive_scan;
+ s32 err = 0;
+
+ /* Broadcast scan by default */
+ memset(&ssid, 0, sizeof(ssid));
+
+ iscan->state = WL_ISCAN_STATE_SCANING;
+
+ passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
+ err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ return err;
+ }
+ brcmf_set_mpc(ndev, 0);
+ cfg_priv->iscan_kickstart = true;
+ err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
+ if (err) {
+ brcmf_set_mpc(ndev, 1);
+ cfg_priv->iscan_kickstart = false;
+ return err;
+ }
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+ return err;
+}
+
+static s32
+__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct cfg80211_ssid *ssids;
+ struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int;
+ __le32 passive_scan;
+ bool iscan_req;
+ bool spec_scan;
+ s32 err = 0;
+ u32 SSID_len;
+
+ if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+ WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status);
+ return -EAGAIN;
+ }
+ if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) {
+ WL_ERR("Scanning being aborted : status (%lu)\n",
+ cfg_priv->status);
+ return -EAGAIN;
+ }
+ if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+ WL_ERR("Connecting : status (%lu)\n",
+ cfg_priv->status);
+ return -EAGAIN;
+ }
+
+ iscan_req = false;
+ spec_scan = false;
+ if (request) {
+ /* scan bss */
+ ssids = request->ssids;
+ if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len))
+ iscan_req = true;
+ } else {
+ /* scan in ibss */
+ /* we don't do iscan in ibss */
+ ssids = this_ssid;
+ }
+
+ cfg_priv->scan_request = request;
+ set_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ if (iscan_req) {
+ err = brcmf_do_iscan(cfg_priv);
+ if (!err)
+ return err;
+ else
+ goto scan_out;
+ } else {
+ WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
+ ssids->ssid, ssids->ssid_len);
+ memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
+ SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
+ sr->ssid_le.SSID_len = cpu_to_le32(0);
+ if (SSID_len) {
+ memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
+ sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
+ spec_scan = true;
+ } else {
+ WL_SCAN("Broadcast scan\n");
+ }
+
+ passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (err) {
+ WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
+ goto scan_out;
+ }
+ brcmf_set_mpc(ndev, 0);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
+ sizeof(sr->ssid_le));
+ if (err) {
+ if (err == -EBUSY)
+ WL_INFO("system busy : scan for \"%s\" "
+ "canceled\n", sr->ssid_le.SSID);
+ else
+ WL_ERR("WLC_SCAN error (%d)\n", err);
+
+ brcmf_set_mpc(ndev, 1);
+ goto scan_out;
+ }
+ }
+
+ return 0;
+
+scan_out:
+ clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ cfg_priv->scan_request = NULL;
+ return err;
+}
+
+static s32
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (err)
+ WL_ERR("scan error (%d)\n", err);
+
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
+{
+ s32 err = 0;
+
+ err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold);
+ if (err)
+ WL_ERR("Error (%d)\n", err);
+
+ return err;
+}
+
+static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
+{
+ s32 err = 0;
+
+ err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold);
+ if (err)
+ WL_ERR("Error (%d)\n", err);
+
+ return err;
+}
+
+static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL);
+
+ err = brcmf_exec_dcmd_u32(ndev, cmd, &retry);
+ if (err) {
+ WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
+ return err;
+ }
+ return err;
+}
+
+static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg_priv->conf->rts_threshold = wiphy->rts_threshold;
+ err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold);
+ if (!err)
+ goto done;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg_priv->conf->frag_threshold = wiphy->frag_threshold;
+ err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold);
+ if (!err)
+ goto done;
+ }
+ if (changed & WIPHY_PARAM_RETRY_LONG
+ && (cfg_priv->conf->retry_long != wiphy->retry_long)) {
+ cfg_priv->conf->retry_long = wiphy->retry_long;
+ err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true);
+ if (!err)
+ goto done;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT
+ && (cfg_priv->conf->retry_short != wiphy->retry_short)) {
+ cfg_priv->conf->retry_short = wiphy->retry_short;
+ err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false);
+ if (!err)
+ goto done;
+ }
+
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
+{
+ switch (item) {
+ case WL_PROF_SEC:
+ return &cfg_priv->profile->sec;
+ case WL_PROF_BSSID:
+ return &cfg_priv->profile->bssid;
+ case WL_PROF_SSID:
+ return &cfg_priv->profile->ssid;
+ }
+ WL_ERR("invalid item (%d)\n", item);
+ return NULL;
+}
+
+static s32
+brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e, void *data, s32 item)
+{
+ s32 err = 0;
+ struct brcmf_ssid *ssid;
+
+ switch (item) {
+ case WL_PROF_SSID:
+ ssid = (struct brcmf_ssid *) data;
+ memset(cfg_priv->profile->ssid.SSID, 0,
+ sizeof(cfg_priv->profile->ssid.SSID));
+ memcpy(cfg_priv->profile->ssid.SSID,
+ ssid->SSID, ssid->SSID_len);
+ cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
+ break;
+ case WL_PROF_BSSID:
+ if (data)
+ memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
+ else
+ memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
+ break;
+ case WL_PROF_SEC:
+ memcpy(&cfg_priv->profile->sec, data,
+ sizeof(cfg_priv->profile->sec));
+ break;
+ case WL_PROF_BEACONINT:
+ cfg_priv->profile->beacon_interval = *(u16 *)data;
+ break;
+ case WL_PROF_DTIMPERIOD:
+ cfg_priv->profile->dtim_period = *(u8 *)data;
+ break;
+ default:
+ WL_ERR("unsupported item (%d)\n", item);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
+{
+ memset(prof, 0, sizeof(*prof));
+}
+
+static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
+ size_t *join_params_size)
+{
+ u16 chanspec = 0;
+
+ if (ch != 0) {
+ if (ch <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
+ sizeof(u16);
+
+ chanspec |= (ch & WL_CHANSPEC_CHAN_MASK);
+ join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
+ join_params->params_le.chanspec_num = cpu_to_le32(1);
+
+ WL_CONN("join_params->params.chanspec_list[0]= %#X,"
+ "channel %d, chanspec %#X\n",
+ chanspec, ch, chanspec);
+ }
+}
+
+static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct net_device *ndev = NULL;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+
+ if (cfg_priv->link_up) {
+ ndev = cfg_to_ndev(cfg_priv);
+ WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
+ if (err)
+ WL_ERR("WLC_DISASSOC failed (%d)\n", err);
+ cfg_priv->link_up = false;
+ }
+ WL_TRACE("Exit\n");
+}
+
+static s32
+brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ibss_params *params)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_join_params join_params;
+ size_t join_params_size = 0;
+ s32 err = 0;
+ s32 wsec = 0;
+ s32 bcnprd;
+ struct brcmf_ssid ssid;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ if (params->ssid)
+ WL_CONN("SSID: %s\n", params->ssid);
+ else {
+ WL_CONN("SSID: NULL, Not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+
+ if (params->bssid)
+ WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n",
+ params->bssid[0], params->bssid[1], params->bssid[2],
+ params->bssid[3], params->bssid[4], params->bssid[5]);
+ else
+ WL_CONN("No BSSID specified\n");
+
+ if (params->channel)
+ WL_CONN("channel: %d\n", params->channel->center_freq);
+ else
+ WL_CONN("no channel specified\n");
+
+ if (params->channel_fixed)
+ WL_CONN("fixed channel required\n");
+ else
+ WL_CONN("no fixed channel required\n");
+
+ if (params->ie && params->ie_len)
+ WL_CONN("ie len: %d\n", params->ie_len);
+ else
+ WL_CONN("no ie specified\n");
+
+ if (params->beacon_interval)
+ WL_CONN("beacon interval: %d\n", params->beacon_interval);
+ else
+ WL_CONN("no beacon interval specified\n");
+
+ if (params->basic_rates)
+ WL_CONN("basic rates: %08X\n", params->basic_rates);
+ else
+ WL_CONN("no basic rates specified\n");
+
+ if (params->privacy)
+ WL_CONN("privacy required\n");
+ else
+ WL_CONN("no privacy required\n");
+
+ /* Configure Privacy for starter */
+ if (params->privacy)
+ wsec |= WEP_ENABLED;
+
+ err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+ if (err) {
+ WL_ERR("wsec failed (%d)\n", err);
+ goto done;
+ }
+
+ /* Configure Beacon Interval for starter */
+ if (params->beacon_interval)
+ bcnprd = params->beacon_interval;
+ else
+ bcnprd = 100;
+
+ err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd);
+ if (err) {
+ WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err);
+ goto done;
+ }
+
+ /* Configure required join parameter */
+ memset(&join_params, 0, sizeof(struct brcmf_join_params));
+
+ /* SSID */
+ ssid.SSID_len = min_t(u32, params->ssid_len, 32);
+ memcpy(ssid.SSID, params->ssid, ssid.SSID_len);
+ memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len);
+ join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+ join_params_size = sizeof(join_params.ssid_le);
+ brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
+
+ /* BSSID */
+ if (params->bssid) {
+ memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
+ join_params_size = sizeof(join_params.ssid_le) +
+ BRCMF_ASSOC_PARAMS_FIXED_SIZE;
+ } else {
+ memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+ }
+
+ brcmf_update_prof(cfg_priv, NULL,
+ &join_params.params_le.bssid, WL_PROF_BSSID);
+
+ /* Channel */
+ if (params->channel) {
+ u32 target_channel;
+
+ cfg_priv->channel =
+ ieee80211_frequency_to_channel(
+ params->channel->center_freq);
+ if (params->channel_fixed) {
+ /* adding chanspec */
+ brcmf_ch_to_chanspec(cfg_priv->channel,
+ &join_params, &join_params_size);
+ }
+
+ /* set channel for starter */
+ target_channel = cfg_priv->channel;
+ err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
+ &target_channel);
+ if (err) {
+ WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err);
+ goto done;
+ }
+ } else
+ cfg_priv->channel = 0;
+
+ cfg_priv->ibss_starter = false;
+
+
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
+ if (err) {
+ WL_ERR("WLC_SET_SSID failed (%d)\n", err);
+ goto done;
+ }
+
+done:
+ if (err)
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ brcmf_link_down(cfg_priv);
+
+ WL_TRACE("Exit\n");
+
+ return err;
+}
+
+static s32 brcmf_set_wpa_version(struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+ else
+ val = WPA_AUTH_DISABLED;
+ WL_CONN("setting wpa_auth to 0x%0x\n", val);
+ err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+ if (err) {
+ WL_ERR("set wpa_auth failed (%d)\n", err);
+ return err;
+ }
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec->wpa_versions = sme->crypto.wpa_versions;
+ return err;
+}
+
+static s32 brcmf_set_auth_type(struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+
+ switch (sme->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ val = 0;
+ WL_CONN("open system\n");
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ val = 1;
+ WL_CONN("shared key\n");
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ val = 2;
+ WL_CONN("automatic\n");
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ WL_CONN("network eap\n");
+ default:
+ val = 2;
+ WL_ERR("invalid auth type (%d)\n", sme->auth_type);
+ break;
+ }
+
+ err = brcmf_dev_intvar_set(ndev, "auth", val);
+ if (err) {
+ WL_ERR("set auth failed (%d)\n", err);
+ return err;
+ }
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec->auth_type = sme->auth_type;
+ return err;
+}
+
+static s32
+brcmf_set_set_cipher(struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
+ s32 err = 0;
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ switch (sme->crypto.ciphers_pairwise[0]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ pval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pval = AES_ENABLED;
+ break;
+ default:
+ WL_ERR("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
+ return -EINVAL;
+ }
+ }
+ if (sme->crypto.cipher_group) {
+ switch (sme->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ gval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ gval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ gval = AES_ENABLED;
+ break;
+ default:
+ WL_ERR("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
+ }
+ }
+
+ WL_CONN("pval (%d) gval (%d)\n", pval, gval);
+ err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval);
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ return err;
+ }
+
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+ sec->cipher_group = sme->crypto.cipher_group;
+
+ return err;
+}
+
+static s32
+brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+
+ if (sme->crypto.n_akm_suites) {
+ err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val);
+ if (err) {
+ WL_ERR("could not get wpa_auth (%d)\n", err);
+ return err;
+ }
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
+ }
+ } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
+ }
+ }
+
+ WL_CONN("setting wpa_auth to %d\n", val);
+ err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+ if (err) {
+ WL_ERR("could not set wpa_auth (%d)\n", err);
+ return err;
+ }
+ }
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec->wpa_auth = sme->crypto.akm_suites[0];
+
+ return err;
+}
+
+static s32
+brcmf_set_wep_sharedkey(struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_security *sec;
+ struct brcmf_wsec_key key;
+ s32 val;
+ s32 err = 0;
+
+ WL_CONN("key len (%d)\n", sme->key_len);
+
+ if (sme->key_len == 0)
+ return 0;
+
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise);
+
+ if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
+ return 0;
+
+ if (sec->cipher_pairwise &
+ (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) {
+ memset(&key, 0, sizeof(key));
+ key.len = (u32) sme->key_len;
+ key.index = (u32) sme->key_idx;
+ if (key.len > sizeof(key.data)) {
+ WL_ERR("Too long key length (%u)\n", key.len);
+ return -EINVAL;
+ }
+ memcpy(key.data, sme->key, key.len);
+ key.flags = BRCMF_PRIMARY_KEY;
+ switch (sec->cipher_pairwise) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ default:
+ WL_ERR("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
+ return -EINVAL;
+ }
+ /* Set the new key/index */
+ WL_CONN("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo);
+ WL_CONN("key \"%s\"\n", key.data);
+ err = send_key_to_dongle(ndev, &key);
+ if (err)
+ return err;
+
+ if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
+ WL_CONN("set auth_type to shared key\n");
+ val = 1; /* shared key */
+ err = brcmf_dev_intvar_set(ndev, "auth", val);
+ if (err) {
+ WL_ERR("set auth failed (%d)\n", err);
+ return err;
+ }
+ }
+ }
+ return err;
+}
+
+static s32
+brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct ieee80211_channel *chan = sme->channel;
+ struct brcmf_join_params join_params;
+ size_t join_params_size;
+ struct brcmf_ssid ssid;
+
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ if (!sme->ssid) {
+ WL_ERR("Invalid ssid\n");
+ return -EOPNOTSUPP;
+ }
+
+ set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+
+ if (chan) {
+ cfg_priv->channel =
+ ieee80211_frequency_to_channel(chan->center_freq);
+ WL_CONN("channel (%d), center_req (%d)\n",
+ cfg_priv->channel, chan->center_freq);
+ } else
+ cfg_priv->channel = 0;
+
+ WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
+
+ err = brcmf_set_wpa_version(ndev, sme);
+ if (err) {
+ WL_ERR("wl_set_wpa_version failed (%d)\n", err);
+ goto done;
+ }
+
+ err = brcmf_set_auth_type(ndev, sme);
+ if (err) {
+ WL_ERR("wl_set_auth_type failed (%d)\n", err);
+ goto done;
+ }
+
+ err = brcmf_set_set_cipher(ndev, sme);
+ if (err) {
+ WL_ERR("wl_set_set_cipher failed (%d)\n", err);
+ goto done;
+ }
+
+ err = brcmf_set_key_mgmt(ndev, sme);
+ if (err) {
+ WL_ERR("wl_set_key_mgmt failed (%d)\n", err);
+ goto done;
+ }
+
+ err = brcmf_set_wep_sharedkey(ndev, sme);
+ if (err) {
+ WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err);
+ goto done;
+ }
+
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid_le);
+
+ ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), sme->ssid_len);
+ memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len);
+ memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len);
+ join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+ brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
+
+ memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+
+ if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
+ WL_CONN("ssid \"%s\", len (%d)\n",
+ ssid.SSID, ssid.SSID_len);
+
+ brcmf_ch_to_chanspec(cfg_priv->channel,
+ &join_params, &join_params_size);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
+ if (err)
+ WL_ERR("WLC_SET_SSID failed (%d)\n", err);
+
+done:
+ if (err)
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
+ u16 reason_code)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_scb_val_le scbval;
+ s32 err = 0;
+
+ WL_TRACE("Enter. Reason code = %d\n", reason_code);
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+
+ memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN);
+ scbval.val = cpu_to_le32(reason_code);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
+ sizeof(struct brcmf_scb_val_le));
+ if (err)
+ WL_ERR("error (%d)\n", err);
+
+ cfg_priv->link_up = false;
+
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, s32 dbm)
+{
+
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ u16 txpwrmw;
+ s32 err = 0;
+ s32 disable = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ if (dbm < 0) {
+ WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
+ err = -EINVAL;
+ goto done;
+ }
+ break;
+ case NL80211_TX_POWER_FIXED:
+ if (dbm < 0) {
+ WL_ERR("TX_POWER_FIXED - dbm is negative\n");
+ err = -EINVAL;
+ goto done;
+ }
+ break;
+ }
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = WL_RADIO_SW_DISABLE << 16;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable);
+ if (err)
+ WL_ERR("WLC_SET_RADIO error (%d)\n", err);
+
+ if (dbm > 0xffff)
+ txpwrmw = 0xffff;
+ else
+ txpwrmw = (u16) dbm;
+ err = brcmf_dev_intvar_set(ndev, "qtxpower",
+ (s32) (brcmf_mw_to_qdbm(txpwrmw)));
+ if (err)
+ WL_ERR("qtxpower error (%d)\n", err);
+ cfg_priv->conf->tx_power = dbm;
+
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ s32 txpwrdbm;
+ u8 result;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ goto done;
+ }
+
+ result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ *dbm = (s32) brcmf_qdbm_to_mw(result);
+
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_idx, bool unicast, bool multicast)
+{
+ u32 index;
+ u32 wsec;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ WL_CONN("key index (%d)\n", key_idx);
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+ if (err) {
+ WL_ERR("WLC_GET_WSEC error (%d)\n", err);
+ goto done;
+ }
+
+ if (wsec & WEP_ENABLED) {
+ /* Just select a new current key */
+ index = key_idx;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY,
+ &index);
+ if (err)
+ WL_ERR("error (%d)\n", err);
+ }
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+ struct brcmf_wsec_key key;
+ struct brcmf_wsec_key_le key_le;
+ s32 err = 0;
+
+ memset(&key, 0, sizeof(key));
+ key.index = (u32) key_idx;
+ /* Instead of bcast for ea address for default wep keys,
+ driver needs it to be Null */
+ if (!is_multicast_ether_addr(mac_addr))
+ memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
+ key.len = (u32) params->key_len;
+ /* check for key index change */
+ if (key.len == 0) {
+ /* key delete */
+ err = send_key_to_dongle(ndev, &key);
+ if (err)
+ return err;
+ } else {
+ if (key.len > sizeof(key.data)) {
+ WL_ERR("Invalid key length (%d)\n", key.len);
+ return -EINVAL;
+ }
+
+ WL_CONN("Setting the key index %d\n", key.index);
+ memcpy(key.data, params->key, key.len);
+
+ if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ u8 keybuf[8];
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ }
+
+ /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+ if (params->seq && params->seq_len == 6) {
+ /* rx iv */
+ u8 *ivptr;
+ ivptr = (u8 *) params->seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = true;
+ }
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
+ break;
+ default:
+ WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
+ return -EINVAL;
+ }
+ convert_key_from_CPU(&key, &key_le);
+
+ brcmf_netdev_wait_pend8021x(ndev);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le,
+ sizeof(key_le));
+ if (err) {
+ WL_ERR("WLC_SET_KEY error (%d)\n", err);
+ return err;
+ }
+ }
+ return err;
+}
+
+static s32
+brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct brcmf_wsec_key key;
+ s32 val;
+ s32 wsec;
+ s32 err = 0;
+ u8 keybuf[8];
+
+ WL_TRACE("Enter\n");
+ WL_CONN("key index (%d)\n", key_idx);
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ if (mac_addr) {
+ WL_TRACE("Exit");
+ return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
+ }
+ memset(&key, 0, sizeof(key));
+
+ key.len = (u32) params->key_len;
+ key.index = (u32) key_idx;
+
+ if (key.len > sizeof(key.data)) {
+ WL_ERR("Too long key length (%u)\n", key.len);
+ err = -EINVAL;
+ goto done;
+ }
+ memcpy(key.data, params->key, key.len);
+
+ key.flags = BRCMF_PRIMARY_KEY;
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ key.algo = CRYPTO_ALGO_TKIP;
+ WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
+ break;
+ default:
+ WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
+ err = -EINVAL;
+ goto done;
+ }
+
+ err = send_key_to_dongle(ndev, &key); /* Set the new key/index */
+ if (err)
+ goto done;
+
+ val = WEP_ENABLED;
+ err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
+ if (err) {
+ WL_ERR("get wsec error (%d)\n", err);
+ goto done;
+ }
+ wsec &= ~(WEP_ENABLED);
+ wsec |= val;
+ err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+ if (err) {
+ WL_ERR("set wsec error (%d)\n", err);
+ goto done;
+ }
+
+ val = 1; /* assume shared key. otherwise 0 */
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
+ if (err)
+ WL_ERR("WLC_SET_AUTH error (%d)\n", err);
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+ struct brcmf_wsec_key key;
+ s32 err = 0;
+ s32 val;
+ s32 wsec;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ memset(&key, 0, sizeof(key));
+
+ key.index = (u32) key_idx;
+ key.flags = BRCMF_PRIMARY_KEY;
+ key.algo = CRYPTO_ALGO_OFF;
+
+ WL_CONN("key index (%d)\n", key_idx);
+
+ /* Set the new key/index */
+ err = send_key_to_dongle(ndev, &key);
+ if (err) {
+ if (err == -EINVAL) {
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ /* we ignore this key index in this case */
+ WL_ERR("invalid key index (%d)\n", key_idx);
+ }
+ /* Ignore this error, may happen during DISASSOC */
+ err = -EAGAIN;
+ goto done;
+ }
+
+ val = 0;
+ err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
+ if (err) {
+ WL_ERR("get wsec error (%d)\n", err);
+ /* Ignore this error, may happen during DISASSOC */
+ err = -EAGAIN;
+ goto done;
+ }
+ wsec &= ~(WEP_ENABLED);
+ wsec |= val;
+ err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+ if (err) {
+ WL_ERR("set wsec error (%d)\n", err);
+ /* Ignore this error, may happen during DISASSOC */
+ err = -EAGAIN;
+ goto done;
+ }
+
+ val = 0; /* assume open key. otherwise 1 */
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
+ if (err) {
+ WL_ERR("WLC_SET_AUTH error (%d)\n", err);
+ /* Ignore this error, may happen during DISASSOC */
+ err = -EAGAIN;
+ }
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie, struct key_params * params))
+{
+ struct key_params params;
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_security *sec;
+ s32 wsec;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ WL_CONN("key index (%d)\n", key_idx);
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ memset(&params, 0, sizeof(params));
+
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+ if (err) {
+ WL_ERR("WLC_GET_WSEC error (%d)\n", err);
+ /* Ignore this error, may happen during DISASSOC */
+ err = -EAGAIN;
+ goto done;
+ }
+ switch (wsec) {
+ case WEP_ENABLED:
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP40;
+ WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
+ } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
+ }
+ break;
+ case TKIP_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_TKIP;
+ WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
+ break;
+ case AES_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
+ break;
+ default:
+ WL_ERR("Invalid algo (0x%x)\n", wsec);
+ err = -EINVAL;
+ goto done;
+ }
+ callback(cookie, &params);
+
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *ndev, u8 key_idx)
+{
+ WL_INFO("Not supported\n");
+
+ return -EOPNOTSUPP;
+}
+
+static s32
+brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_scb_val_le scb_val;
+ int rssi;
+ s32 rate;
+ s32 err = 0;
+ u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID);
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ if (memcmp(mac, bssid, ETH_ALEN)) {
+ WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X"
+ "wl_bssid-%X:%X:%X:%X:%X:%X\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ bssid[0], bssid[1], bssid[2], bssid[3],
+ bssid[4], bssid[5]);
+ err = -ENOENT;
+ goto done;
+ }
+
+ /* Report the current tx rate */
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
+ if (err) {
+ WL_ERR("Could not get rate (%d)\n", err);
+ } else {
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.legacy = rate * 5;
+ WL_CONN("Rate %d Mbps\n", rate / 2);
+ }
+
+ if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
+ scb_val.val = cpu_to_le32(0);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
+ sizeof(struct brcmf_scb_val_le));
+ if (err)
+ WL_ERR("Could not get rssi (%d)\n", err);
+
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = rssi;
+ WL_CONN("RSSI %d dBm\n", rssi);
+ }
+
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
+ bool enabled, s32 timeout)
+{
+ s32 pm;
+ s32 err = 0;
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+
+ WL_TRACE("Enter\n");
+
+ /*
+ * Powersave enable/disable request is coming from the
+ * cfg80211 even before the interface is up. In that
+ * scenario, driver will be storing the power save
+ * preference in cfg_priv struct to apply this to
+ * FW later while initializing the dongle
+ */
+ cfg_priv->pwr_save = enabled;
+ if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+
+ WL_INFO("Device is not ready,"
+ "storing the value in cfg_priv struct\n");
+ goto done;
+ }
+
+ pm = enabled ? PM_FAST : PM_OFF;
+ WL_INFO("power save %s\n", (pm ? "enabled" : "disabled"));
+
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm);
+ if (err) {
+ if (err == -ENODEV)
+ WL_ERR("net_device is not ready yet\n");
+ else
+ WL_ERR("error (%d)\n", err);
+ }
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
+ const u8 *addr,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct brcm_rateset_le rateset_le;
+ s32 rate;
+ s32 val;
+ s32 err_bg;
+ s32 err_a;
+ u32 legacy;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ /* addr param is always NULL. ignore it */
+ /* Get current rateset */
+ err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le,
+ sizeof(rateset_le));
+ if (err) {
+ WL_ERR("could not get current rateset (%d)\n", err);
+ goto done;
+ }
+
+ legacy = ffs(mask->control[IEEE80211_BAND_2GHZ].legacy & 0xFFFF);
+ if (!legacy)
+ legacy = ffs(mask->control[IEEE80211_BAND_5GHZ].legacy &
+ 0xFFFF);
+
+ val = wl_g_rates[legacy - 1].bitrate * 100000;
+
+ if (val < le32_to_cpu(rateset_le.count))
+ /* Select rate by rateset index */
+ rate = rateset_le.rates[val] & 0x7f;
+ else
+ /* Specified rate in bps */
+ rate = val / 500000;
+
+ WL_CONN("rate %d mbps\n", rate / 2);
+
+ /*
+ *
+ * Set rate override,
+ * Since the is a/b/g-blind, both a/bg_rate are enforced.
+ */
+ err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate);
+ err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate);
+ if (err_bg && err_a) {
+ WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
+ err = err_bg | err_a;
+ }
+
+done:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
+ struct brcmf_bss_info *bi)
+{
+ struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+ struct ieee80211_channel *notify_channel;
+ struct cfg80211_bss *bss;
+ struct ieee80211_supported_band *band;
+ s32 err = 0;
+ u16 channel;
+ u32 freq;
+ u64 notify_timestamp;
+ u16 notify_capability;
+ u16 notify_interval;
+ u8 *notify_ie;
+ size_t notify_ielen;
+ s32 notify_signal;
+
+ if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
+ WL_ERR("Bss info is larger than buffer. Discarding\n");
+ return 0;
+ }
+
+ channel = bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
+ notify_capability = le16_to_cpu(bi->capability);
+ notify_interval = le16_to_cpu(bi->beacon_period);
+ notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
+ notify_ielen = le32_to_cpu(bi->ie_length);
+ notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+
+ WL_CONN("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
+ bi->BSSID[0], bi->BSSID[1], bi->BSSID[2],
+ bi->BSSID[3], bi->BSSID[4], bi->BSSID[5]);
+ WL_CONN("Channel: %d(%d)\n", channel, freq);
+ WL_CONN("Capability: %X\n", notify_capability);
+ WL_CONN("Beacon interval: %d\n", notify_interval);
+ WL_CONN("Signal: %d\n", notify_signal);
+ WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
+
+ bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID,
+ notify_timestamp, notify_capability, notify_interval, notify_ie,
+ notify_ielen, notify_signal, GFP_KERNEL);
+
+ if (!bss) {
+ WL_ERR("cfg80211_inform_bss_frame error\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_scan_results *bss_list;
+ struct brcmf_bss_info *bi = NULL; /* must be initialized */
+ s32 err = 0;
+ int i;
+
+ bss_list = cfg_priv->bss_list;
+ if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
+ WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
+ bss_list->version);
+ return -EOPNOTSUPP;
+ }
+ WL_SCAN("scanned AP count (%d)\n", bss_list->count);
+ for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
+ bi = next_bss(bss_list, bi);
+ err = brcmf_inform_single_bss(cfg_priv, bi);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev, const u8 *bssid)
+{
+ struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+ struct ieee80211_channel *notify_channel;
+ struct brcmf_bss_info *bi = NULL;
+ struct ieee80211_supported_band *band;
+ u8 *buf = NULL;
+ s32 err = 0;
+ u16 channel;
+ u32 freq;
+ u64 notify_timestamp;
+ u16 notify_capability;
+ u16 notify_interval;
+ u8 *notify_ie;
+ size_t notify_ielen;
+ s32 notify_signal;
+
+ WL_TRACE("Enter\n");
+
+ buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (buf == NULL) {
+ err = -ENOMEM;
+ goto CleanUp;
+ }
+
+ *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
+
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+ if (err) {
+ WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err);
+ goto CleanUp;
+ }
+
+ bi = (struct brcmf_bss_info *)(buf + 4);
+
+ channel = bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
+ notify_capability = le16_to_cpu(bi->capability);
+ notify_interval = le16_to_cpu(bi->beacon_period);
+ notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
+ notify_ielen = le32_to_cpu(bi->ie_length);
+ notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+
+ WL_CONN("channel: %d(%d)\n", channel, freq);
+ WL_CONN("capability: %X\n", notify_capability);
+ WL_CONN("beacon interval: %d\n", notify_interval);
+ WL_CONN("signal: %d\n", notify_signal);
+ WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
+
+ cfg80211_inform_bss(wiphy, notify_channel, bssid,
+ notify_timestamp, notify_capability, notify_interval,
+ notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
+
+CleanUp:
+
+ kfree(buf);
+
+ WL_TRACE("Exit\n");
+
+ return err;
+}
+
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ return cfg_priv->conf->mode == WL_MODE_IBSS;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+{
+ struct brcmf_tlv *elt;
+ int totlen;
+
+ elt = (struct brcmf_tlv *) buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + 2)))
+ return elt;
+
+ elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2));
+ totlen -= (len + 2);
+ }
+
+ return NULL;
+}
+
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_bss_info *bi;
+ struct brcmf_ssid *ssid;
+ struct brcmf_tlv *tim;
+ u16 beacon_interval;
+ u8 dtim_period;
+ size_t ie_len;
+ u8 *ie;
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (brcmf_is_ibssmode(cfg_priv))
+ return err;
+
+ ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID);
+
+ *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
+ err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO,
+ cfg_priv->extra_buf, WL_EXTRA_BUF_MAX);
+ if (err) {
+ WL_ERR("Could not get bss info %d\n", err);
+ goto update_bss_info_out;
+ }
+
+ bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+ err = brcmf_inform_single_bss(cfg_priv, bi);
+ if (err)
+ goto update_bss_info_out;
+
+ ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+ ie_len = le32_to_cpu(bi->ie_length);
+ beacon_interval = le16_to_cpu(bi->beacon_period);
+
+ tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ if (tim)
+ dtim_period = tim->data[1];
+ else {
+ /*
+ * active scan was done so we could not get dtim
+ * information out of probe response.
+ * so we speficially query dtim information to dongle.
+ */
+ u32 var;
+ err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv),
+ "dtim_assoc", &var);
+ if (err) {
+ WL_ERR("wl dtim_assoc failed (%d)\n", err);
+ goto update_bss_info_out;
+ }
+ dtim_period = (u8)var;
+ }
+
+ brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+ WL_TRACE("Exit");
+ return err;
+}
+
+static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ struct brcmf_ssid ssid;
+
+ if (cfg_priv->iscan_on) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+
+ cancel_work_sync(&iscan->work);
+
+ /* Abort iscan running in FW */
+ memset(&ssid, 0, sizeof(ssid));
+ brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
+ }
+}
+
+static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ bool aborted)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+
+ if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+ WL_ERR("Scan complete while device not scanning\n");
+ return;
+ }
+ if (cfg_priv->scan_request) {
+ WL_SCAN("ISCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(cfg_priv->scan_request, aborted);
+ brcmf_set_mpc(ndev, 1);
+ cfg_priv->scan_request = NULL;
+ }
+ cfg_priv->iscan_kickstart = false;
+}
+
+static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
+{
+ if (iscan->state != WL_ISCAN_STATE_IDLE) {
+ WL_SCAN("wake up iscan\n");
+ schedule_work(&iscan->work);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static s32
+brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
+ struct brcmf_scan_results **bss_list)
+{
+ struct brcmf_iscan_results list;
+ struct brcmf_scan_results *results;
+ struct brcmf_scan_results_le *results_le;
+ struct brcmf_iscan_results *list_buf;
+ s32 err = 0;
+
+ memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
+ list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
+ results = &list_buf->results;
+ results_le = &list_buf->results_le;
+ results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
+ err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list,
+ BRCMF_ISCAN_RESULTS_FIXED_SIZE,
+ iscan->scan_buf, WL_ISCAN_BUF_MAX);
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ return err;
+ }
+ results->buflen = le32_to_cpu(results_le->buflen);
+ results->version = le32_to_cpu(results_le->version);
+ results->count = le32_to_cpu(results_le->count);
+ WL_SCAN("results->count = %d\n", results_le->count);
+ WL_SCAN("results->buflen = %d\n", results_le->buflen);
+ *status = le32_to_cpu(list_buf->status_le);
+ WL_SCAN("status = %d\n", *status);
+ *bss_list = results;
+
+ return err;
+}
+
+static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ brcmf_inform_bss(cfg_priv);
+ brcmf_notify_iscan_complete(iscan, false);
+
+ return err;
+}
+
+static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ s32 err = 0;
+
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ s32 err = 0;
+
+ brcmf_inform_bss(cfg_priv);
+ brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ brcmf_notify_iscan_complete(iscan, true);
+
+ return err;
+}
+
+static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan =
+ container_of(work, struct brcmf_cfg80211_iscan_ctrl,
+ work);
+ struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+ struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
+ u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
+
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+
+ if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) {
+ status = BRCMF_SCAN_RESULTS_ABORTED;
+ WL_ERR("Abort iscan\n");
+ }
+
+ el->handler[status](cfg_priv);
+}
+
+static void brcmf_iscan_timer(unsigned long data)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan =
+ (struct brcmf_cfg80211_iscan_ctrl *)data;
+
+ if (iscan) {
+ iscan->timer_on = 0;
+ WL_SCAN("timer expired\n");
+ brcmf_wakeup_iscan(iscan);
+ }
+}
+
+static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+
+ if (cfg_priv->iscan_on) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
+ }
+
+ return 0;
+}
+
+static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
+{
+ memset(el, 0, sizeof(*el));
+ el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done;
+ el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress;
+ el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending;
+ el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted;
+ el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
+}
+
+static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ int err = 0;
+
+ if (cfg_priv->iscan_on) {
+ iscan->ndev = cfg_to_ndev(cfg_priv);
+ brcmf_init_iscan_eloop(&iscan->el);
+ iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (unsigned long) iscan;
+ iscan->timer.function = brcmf_iscan_timer;
+ err = brcmf_invoke_iscan(cfg_priv);
+ if (!err)
+ iscan->data = cfg_priv;
+ }
+
+ return err;
+}
+
+static void brcmf_delay(u32 ms)
+{
+ if (ms < 1000 / HZ) {
+ cond_resched();
+ mdelay(ms);
+ } else {
+ msleep(ms);
+ }
+}
+
+static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+
+ /*
+ * Check for WL_STATUS_READY before any function call which
+ * could result is bus access. Don't block the resume for
+ * any driver error conditions
+ */
+ WL_TRACE("Enter\n");
+
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status))
+ brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
+
+ WL_TRACE("Exit\n");
+ return 0;
+}
+
+static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+
+ WL_TRACE("Enter\n");
+
+ /*
+ * Check for WL_STATUS_READY before any function call which
+ * could result is bus access. Don't block the suspend for
+ * any driver error conditions
+ */
+
+ /*
+ * While going to suspend if associated with AP disassociate
+ * from AP to save power while system is in suspended state
+ */
+ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
+ test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
+ test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ WL_INFO("Disassociating from AP"
+ " while entering suspend state\n");
+ brcmf_link_down(cfg_priv);
+
+ /*
+ * Make sure WPA_Supplicant receives all the event
+ * generated due to DISASSOC call to the fw to keep
+ * the state fw and WPA_Supplicant state consistent
+ */
+ brcmf_delay(500);
+ }
+
+ set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status))
+ brcmf_term_iscan(cfg_priv);
+
+ if (cfg_priv->scan_request) {
+ /* Indidate scan abort to cfg80211 layer */
+ WL_INFO("Terminating scan in progress\n");
+ cfg80211_scan_done(cfg_priv->scan_request, true);
+ cfg_priv->scan_request = NULL;
+ }
+ clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+
+ /* Turn off watchdog timer */
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ WL_INFO("Enable MPC\n");
+ brcmf_set_mpc(ndev, 1);
+ }
+
+ WL_TRACE("Exit\n");
+
+ return 0;
+}
+
+static __used s32
+brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ u32 buflen;
+
+ buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf,
+ WL_DCMD_LEN_MAX);
+ BUG_ON(!buflen);
+
+ return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf,
+ buflen);
+}
+
+static s32
+brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
+ s32 buf_len)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ u32 len;
+ s32 err = 0;
+
+ len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf,
+ WL_DCMD_LEN_MAX);
+ BUG_ON(!len);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf,
+ WL_DCMD_LEN_MAX);
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ return err;
+ }
+ memcpy(buf, cfg_priv->dcmd_buf, buf_len);
+
+ return err;
+}
+
+static __used s32
+brcmf_update_pmklist(struct net_device *ndev,
+ struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
+{
+ int i, j;
+ int pmkid_len;
+
+ pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
+
+ WL_CONN("No of elements %d\n", pmkid_len);
+ for (i = 0; i < pmkid_len; i++) {
+ WL_CONN("PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].BSSID);
+ for (j = 0; j < WLAN_PMKID_LEN; j++)
+ WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
+ }
+
+ if (!err)
+ brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list));
+
+ return err;
+}
+
+static s32
+brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids;
+ s32 err = 0;
+ int i;
+ int pmkid_len;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ pmkid_len = le32_to_cpu(pmkids->npmkid);
+ for (i = 0; i < pmkid_len; i++)
+ if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
+ break;
+ if (i < WL_NUM_PMKIDS_MAX) {
+ memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
+ memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+ if (i == pmkid_len) {
+ pmkid_len++;
+ pmkids->npmkid = cpu_to_le32(pmkid_len);
+ }
+ } else
+ err = -EINVAL;
+
+ WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ pmkids->pmkid[pmkid_len].BSSID);
+ for (i = 0; i < WLAN_PMKID_LEN; i++)
+ WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+
+ err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct pmkid_list pmkid;
+ s32 err = 0;
+ int i, pmkid_len;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
+ memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+
+ WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID);
+ for (i = 0; i < WLAN_PMKID_LEN; i++)
+ WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
+
+ pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid);
+ for (i = 0; i < pmkid_len; i++)
+ if (!memcmp
+ (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
+ ETH_ALEN))
+ break;
+
+ if ((pmkid_len > 0)
+ && (i < pmkid_len)) {
+ memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0,
+ sizeof(struct pmkid));
+ for (; i < (pmkid_len - 1); i++) {
+ memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
+ &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ ETH_ALEN);
+ memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID,
+ &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ WLAN_PMKID_LEN);
+ }
+ cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
+ } else
+ err = -EINVAL;
+
+ err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+
+ WL_TRACE("Exit\n");
+ return err;
+
+}
+
+static s32
+brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
+{
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list));
+ err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+
+ WL_TRACE("Exit\n");
+ return err;
+
+}
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+ .change_virtual_intf = brcmf_cfg80211_change_iface,
+ .scan = brcmf_cfg80211_scan,
+ .set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
+ .join_ibss = brcmf_cfg80211_join_ibss,
+ .leave_ibss = brcmf_cfg80211_leave_ibss,
+ .get_station = brcmf_cfg80211_get_station,
+ .set_tx_power = brcmf_cfg80211_set_tx_power,
+ .get_tx_power = brcmf_cfg80211_get_tx_power,
+ .add_key = brcmf_cfg80211_add_key,
+ .del_key = brcmf_cfg80211_del_key,
+ .get_key = brcmf_cfg80211_get_key,
+ .set_default_key = brcmf_cfg80211_config_default_key,
+ .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
+ .set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
+ .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask,
+ .connect = brcmf_cfg80211_connect,
+ .disconnect = brcmf_cfg80211_disconnect,
+ .suspend = brcmf_cfg80211_suspend,
+ .resume = brcmf_cfg80211_resume,
+ .set_pmksa = brcmf_cfg80211_set_pmksa,
+ .del_pmksa = brcmf_cfg80211_del_pmksa,
+ .flush_pmksa = brcmf_cfg80211_flush_pmksa
+};
+
+static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
+{
+ s32 err = 0;
+
+ switch (mode) {
+ case WL_MODE_BSS:
+ return NL80211_IFTYPE_STATION;
+ case WL_MODE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return err;
+}
+
+static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
+ struct device *ndev)
+{
+ struct wireless_dev *wdev;
+ s32 err = 0;
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy =
+ wiphy_new(&wl_cfg80211_ops,
+ sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
+ if (!wdev->wiphy) {
+ WL_ERR("Couldn not allocate wiphy device\n");
+ err = -ENOMEM;
+ goto wiphy_new_out;
+ }
+ set_wiphy_dev(wdev->wiphy, ndev);
+ wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+ wdev->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
+ * it as 11a by default.
+ * This will be updated with
+ * 11n phy tables in
+ * "ifconfig up"
+ * if phy has 11n capability
+ */
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wdev->wiphy->cipher_suites = __wl_cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
+ * save mode
+ * by default
+ */
+ err = wiphy_register(wdev->wiphy);
+ if (err < 0) {
+ WL_ERR("Couldn not register wiphy device (%d)\n", err);
+ goto wiphy_register_out;
+ }
+ return wdev;
+
+wiphy_register_out:
+ wiphy_free(wdev->wiphy);
+
+wiphy_new_out:
+ kfree(wdev);
+
+ return ERR_PTR(err);
+}
+
+static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct wireless_dev *wdev = cfg_priv->wdev;
+
+ if (!wdev) {
+ WL_ERR("wdev is invalid\n");
+ return;
+ }
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+ cfg_priv->wdev = NULL;
+}
+
+static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e)
+{
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+
+ if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
+ WL_CONN("Processing set ssid\n");
+ cfg_priv->link_up = true;
+ return true;
+ }
+
+ return false;
+}
+
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e)
+{
+ u32 event = be32_to_cpu(e->event_type);
+ u16 flags = be16_to_cpu(e->flags);
+
+ if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
+ WL_CONN("Processing link down\n");
+ return true;
+ }
+ return false;
+}
+
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e)
+{
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+
+ if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
+ WL_CONN("Processing Link %s & no network found\n",
+ be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ?
+ "up" : "down");
+ return true;
+ }
+
+ if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) {
+ WL_CONN("Processing connecting & no network found\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+
+ kfree(conn_info->req_ie);
+ conn_info->req_ie = NULL;
+ conn_info->req_ie_len = 0;
+ kfree(conn_info->resp_ie);
+ conn_info->resp_ie = NULL;
+ conn_info->resp_ie_len = 0;
+}
+
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+ u32 req_len;
+ u32 resp_len;
+ s32 err = 0;
+
+ brcmf_clear_assoc_ies(cfg_priv);
+
+ err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf,
+ WL_ASSOC_INFO_MAX);
+ if (err) {
+ WL_ERR("could not get assoc info (%d)\n", err);
+ return err;
+ }
+ assoc_info =
+ (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf;
+ req_len = le32_to_cpu(assoc_info->req_len);
+ resp_len = le32_to_cpu(assoc_info->resp_len);
+ if (req_len) {
+ err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
+ cfg_priv->extra_buf,
+ WL_ASSOC_INFO_MAX);
+ if (err) {
+ WL_ERR("could not get assoc req (%d)\n", err);
+ return err;
+ }
+ conn_info->req_ie_len = req_len;
+ conn_info->req_ie =
+ kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len,
+ GFP_KERNEL);
+ } else {
+ conn_info->req_ie_len = 0;
+ conn_info->req_ie = NULL;
+ }
+ if (resp_len) {
+ err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
+ cfg_priv->extra_buf,
+ WL_ASSOC_INFO_MAX);
+ if (err) {
+ WL_ERR("could not get assoc resp (%d)\n", err);
+ return err;
+ }
+ conn_info->resp_ie_len = resp_len;
+ conn_info->resp_ie =
+ kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len,
+ GFP_KERNEL);
+ } else {
+ conn_info->resp_ie_len = 0;
+ conn_info->resp_ie = NULL;
+ }
+ WL_CONN("req len (%d) resp len (%d)\n",
+ conn_info->req_ie_len, conn_info->resp_ie_len);
+
+ return err;
+}
+
+static s32
+brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e)
+{
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+ struct brcmf_channel_info_le channel_le;
+ struct ieee80211_channel *notify_channel;
+ struct ieee80211_supported_band *band;
+ u32 freq;
+ s32 err = 0;
+ u32 target_channel;
+
+ WL_TRACE("Enter\n");
+
+ brcmf_get_assoc_ies(cfg_priv);
+ brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID);
+ brcmf_update_bss_info(cfg_priv);
+
+ brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
+ sizeof(channel_le));
+
+ target_channel = le32_to_cpu(channel_le.target_channel);
+ WL_CONN("Roamed to channel %d\n", target_channel);
+
+ if (target_channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ freq = ieee80211_channel_to_frequency(target_channel, band->band);
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ cfg80211_roamed(ndev, notify_channel,
+ (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
+ conn_info->req_ie, conn_info->req_ie_len,
+ conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+ WL_CONN("Report roaming result\n");
+
+ set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev, const struct brcmf_event_msg *e,
+ bool completed)
+{
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+
+ if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+ if (completed) {
+ brcmf_get_assoc_ies(cfg_priv);
+ brcmf_update_prof(cfg_priv, NULL, &e->addr,
+ WL_PROF_BSSID);
+ brcmf_update_bss_info(cfg_priv);
+ }
+ cfg80211_connect_result(ndev,
+ (u8 *)brcmf_read_prof(cfg_priv,
+ WL_PROF_BSSID),
+ conn_info->req_ie,
+ conn_info->req_ie_len,
+ conn_info->resp_ie,
+ conn_info->resp_ie_len,
+ completed ? WLAN_STATUS_SUCCESS :
+ WLAN_STATUS_AUTH_TIMEOUT,
+ GFP_KERNEL);
+ if (completed)
+ set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ WL_CONN("Report connect result - connection %s\n",
+ completed ? "succeeded" : "failed");
+ }
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ s32 err = 0;
+
+ if (brcmf_is_linkup(cfg_priv, e)) {
+ WL_CONN("Linkup\n");
+ if (brcmf_is_ibssmode(cfg_priv)) {
+ brcmf_update_prof(cfg_priv, NULL, (void *)e->addr,
+ WL_PROF_BSSID);
+ wl_inform_ibss(cfg_priv, ndev, e->addr);
+ cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ } else
+ brcmf_bss_connect_done(cfg_priv, ndev, e, true);
+ } else if (brcmf_is_linkdown(cfg_priv, e)) {
+ WL_CONN("Linkdown\n");
+ if (brcmf_is_ibssmode(cfg_priv)) {
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ if (test_and_clear_bit(WL_STATUS_CONNECTED,
+ &cfg_priv->status))
+ brcmf_link_down(cfg_priv);
+ } else {
+ brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+ if (test_and_clear_bit(WL_STATUS_CONNECTED,
+ &cfg_priv->status)) {
+ cfg80211_disconnected(ndev, 0, NULL, 0,
+ GFP_KERNEL);
+ brcmf_link_down(cfg_priv);
+ }
+ }
+ brcmf_init_prof(cfg_priv->profile);
+ } else if (brcmf_is_nonetwork(cfg_priv, e)) {
+ if (brcmf_is_ibssmode(cfg_priv))
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ else
+ brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+ }
+
+ return err;
+}
+
+static s32
+brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+
+ if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
+ if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status))
+ brcmf_bss_roaming_done(cfg_priv, ndev, e);
+ else
+ brcmf_bss_connect_done(cfg_priv, ndev, e, true);
+ }
+
+ return err;
+}
+
+static s32
+brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ u16 flags = be16_to_cpu(e->flags);
+ enum nl80211_key_type key_type;
+
+ if (flags & BRCMF_EVENT_MSG_GROUP)
+ key_type = NL80211_KEYTYPE_GROUP;
+ else
+ key_type = NL80211_KEYTYPE_PAIRWISE;
+
+ cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+ NULL, GFP_KERNEL);
+
+ return 0;
+}
+
+static s32
+brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_channel_info_le channel_inform_le;
+ struct brcmf_scan_results_le *bss_list_le;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ bool scan_abort = false;
+ u32 scan_channel;
+
+ WL_TRACE("Enter\n");
+
+ if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) {
+ WL_TRACE("Exit\n");
+ return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv));
+ }
+
+ if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+ WL_ERR("Scan complete while device not scanning\n");
+ scan_abort = true;
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le,
+ sizeof(channel_inform_le));
+ if (err) {
+ WL_ERR("scan busy (%d)\n", err);
+ scan_abort = true;
+ goto scan_done_out;
+ }
+ scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
+ if (scan_channel)
+ WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
+ cfg_priv->bss_list = cfg_priv->scan_results;
+ bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list;
+
+ memset(cfg_priv->scan_results, 0, len);
+ bss_list_le->buflen = cpu_to_le32(len);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
+ cfg_priv->scan_results, len);
+ if (err) {
+ WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
+ err = -EINVAL;
+ scan_abort = true;
+ goto scan_done_out;
+ }
+ cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
+ cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version);
+ cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count);
+
+ err = brcmf_inform_bss(cfg_priv);
+ if (err) {
+ scan_abort = true;
+ goto scan_done_out;
+ }
+
+scan_done_out:
+ if (cfg_priv->scan_request) {
+ WL_SCAN("calling cfg80211_scan_done\n");
+ cfg80211_scan_done(cfg_priv->scan_request, scan_abort);
+ brcmf_set_mpc(ndev, 1);
+ cfg_priv->scan_request = NULL;
+ }
+
+ WL_TRACE("Exit\n");
+
+ return err;
+}
+
+static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
+{
+ conf->mode = (u32)-1;
+ conf->frag_threshold = (u32)-1;
+ conf->rts_threshold = (u32)-1;
+ conf->retry_short = (u32)-1;
+ conf->retry_long = (u32)-1;
+ conf->tx_power = -1;
+}
+
+static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
+{
+ memset(el, 0, sizeof(*el));
+ el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
+ el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
+ el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
+ el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
+}
+
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ kfree(cfg_priv->scan_results);
+ cfg_priv->scan_results = NULL;
+ kfree(cfg_priv->bss_info);
+ cfg_priv->bss_info = NULL;
+ kfree(cfg_priv->conf);
+ cfg_priv->conf = NULL;
+ kfree(cfg_priv->profile);
+ cfg_priv->profile = NULL;
+ kfree(cfg_priv->scan_req_int);
+ cfg_priv->scan_req_int = NULL;
+ kfree(cfg_priv->dcmd_buf);
+ cfg_priv->dcmd_buf = NULL;
+ kfree(cfg_priv->extra_buf);
+ cfg_priv->extra_buf = NULL;
+ kfree(cfg_priv->iscan);
+ cfg_priv->iscan = NULL;
+ kfree(cfg_priv->pmk_list);
+ cfg_priv->pmk_list = NULL;
+}
+
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (!cfg_priv->scan_results)
+ goto init_priv_mem_out;
+ cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL);
+ if (!cfg_priv->conf)
+ goto init_priv_mem_out;
+ cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL);
+ if (!cfg_priv->profile)
+ goto init_priv_mem_out;
+ cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (!cfg_priv->bss_info)
+ goto init_priv_mem_out;
+ cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int),
+ GFP_KERNEL);
+ if (!cfg_priv->scan_req_int)
+ goto init_priv_mem_out;
+ cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
+ if (!cfg_priv->dcmd_buf)
+ goto init_priv_mem_out;
+ cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (!cfg_priv->extra_buf)
+ goto init_priv_mem_out;
+ cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL);
+ if (!cfg_priv->iscan)
+ goto init_priv_mem_out;
+ cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL);
+ if (!cfg_priv->pmk_list)
+ goto init_priv_mem_out;
+
+ return 0;
+
+init_priv_mem_out:
+ brcmf_deinit_priv_mem(cfg_priv);
+
+ return -ENOMEM;
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct brcmf_cfg80211_event_q *brcmf_deq_event(
+ struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_event_q *e = NULL;
+
+ spin_lock_irq(&cfg_priv->evt_q_lock);
+ if (!list_empty(&cfg_priv->evt_q_list)) {
+ e = list_first_entry(&cfg_priv->evt_q_list,
+ struct brcmf_cfg80211_event_q, evt_q_list);
+ list_del(&e->evt_q_list);
+ }
+ spin_unlock_irq(&cfg_priv->evt_q_lock);
+
+ return e;
+}
+
+/*
+** push event to tail of the queue
+*/
+
+static s32
+brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
+ const struct brcmf_event_msg *msg)
+{
+ struct brcmf_cfg80211_event_q *e;
+ s32 err = 0;
+
+ e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
+
+ spin_lock_irq(&cfg_priv->evt_q_lock);
+ list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list);
+ spin_unlock_irq(&cfg_priv->evt_q_lock);
+
+ return err;
+}
+
+static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
+{
+ kfree(e);
+}
+
+static void brcmf_cfg80211_event_handler(struct work_struct *work)
+{
+ struct brcmf_cfg80211_priv *cfg_priv =
+ container_of(work, struct brcmf_cfg80211_priv,
+ event_work);
+ struct brcmf_cfg80211_event_q *e;
+
+ e = brcmf_deq_event(cfg_priv);
+ if (unlikely(!e)) {
+ WL_ERR("event queue empty...\n");
+ return;
+ }
+
+ do {
+ WL_INFO("event type (%d)\n", e->etype);
+ if (cfg_priv->el.handler[e->etype])
+ cfg_priv->el.handler[e->etype](cfg_priv,
+ cfg_to_ndev(cfg_priv),
+ &e->emsg, e->edata);
+ else
+ WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
+ brcmf_put_event(e);
+ } while ((e = brcmf_deq_event(cfg_priv)));
+
+}
+
+static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ spin_lock_init(&cfg_priv->evt_q_lock);
+ INIT_LIST_HEAD(&cfg_priv->evt_q_list);
+}
+
+static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct brcmf_cfg80211_event_q *e;
+
+ spin_lock_irq(&cfg_priv->evt_q_lock);
+ while (!list_empty(&cfg_priv->evt_q_list)) {
+ e = list_first_entry(&cfg_priv->evt_q_list,
+ struct brcmf_cfg80211_event_q, evt_q_list);
+ list_del(&e->evt_q_list);
+ kfree(e);
+ }
+ spin_unlock_irq(&cfg_priv->evt_q_lock);
+}
+
+static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ s32 err = 0;
+
+ cfg_priv->scan_request = NULL;
+ cfg_priv->pwr_save = true;
+ cfg_priv->iscan_on = true; /* iscan on & off switch.
+ we enable iscan per default */
+ cfg_priv->roam_on = true; /* roam on & off switch.
+ we enable roam per default */
+
+ cfg_priv->iscan_kickstart = false;
+ cfg_priv->active_scan = true; /* we do active scan for
+ specific scan per default */
+ cfg_priv->dongle_up = false; /* dongle is not up yet */
+ brcmf_init_eq(cfg_priv);
+ err = brcmf_init_priv_mem(cfg_priv);
+ if (err)
+ return err;
+ INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler);
+ brcmf_init_eloop_handler(&cfg_priv->el);
+ mutex_init(&cfg_priv->usr_sync);
+ err = brcmf_init_iscan(cfg_priv);
+ if (err)
+ return err;
+ brcmf_init_conf(cfg_priv->conf);
+ brcmf_init_prof(cfg_priv->profile);
+ brcmf_link_down(cfg_priv);
+
+ return err;
+}
+
+static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ cancel_work_sync(&cfg_priv->event_work);
+ cfg_priv->dongle_up = false; /* dongle down */
+ brcmf_flush_eq(cfg_priv);
+ brcmf_link_down(cfg_priv);
+ brcmf_term_iscan(cfg_priv);
+ brcmf_deinit_priv_mem(cfg_priv);
+}
+
+struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
+ struct device *busdev,
+ void *data)
+{
+ struct wireless_dev *wdev;
+ struct brcmf_cfg80211_priv *cfg_priv;
+ struct brcmf_cfg80211_iface *ci;
+ struct brcmf_cfg80211_dev *cfg_dev;
+ s32 err = 0;
+
+ if (!ndev) {
+ WL_ERR("ndev is invalid\n");
+ return NULL;
+ }
+ cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
+ if (!cfg_dev)
+ return NULL;
+
+ wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev);
+ if (IS_ERR(wdev)) {
+ kfree(cfg_dev);
+ return NULL;
+ }
+
+ wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
+ cfg_priv = wdev_to_cfg(wdev);
+ cfg_priv->wdev = wdev;
+ cfg_priv->pub = data;
+ ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
+ ci->cfg_priv = cfg_priv;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+ err = wl_init_priv(cfg_priv);
+ if (err) {
+ WL_ERR("Failed to init iwm_priv (%d)\n", err);
+ goto cfg80211_attach_out;
+ }
+ brcmf_set_drvdata(cfg_dev, ci);
+
+ return cfg_dev;
+
+cfg80211_attach_out:
+ brcmf_free_wdev(cfg_priv);
+ kfree(cfg_dev);
+ return NULL;
+}
+
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev)
+{
+ struct brcmf_cfg80211_priv *cfg_priv;
+
+ cfg_priv = brcmf_priv_get(cfg_dev);
+
+ wl_deinit_priv(cfg_priv);
+ brcmf_free_wdev(cfg_priv);
+ brcmf_set_drvdata(cfg_dev, NULL);
+ kfree(cfg_dev);
+}
+
+void
+brcmf_cfg80211_event(struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ u32 event_type = be32_to_cpu(e->event_type);
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+
+ if (!brcmf_enq_event(cfg_priv, event_type, e))
+ schedule_work(&cfg_priv->event_work);
+}
+
+static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
+{
+ s32 infra = 0;
+ s32 err = 0;
+
+ switch (iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ WL_ERR("type (%d) : currently we do not support this mode\n",
+ iftype);
+ err = -EINVAL;
+ return err;
+ case NL80211_IFTYPE_ADHOC:
+ infra = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ infra = 1;
+ break;
+ default:
+ err = -EINVAL;
+ WL_ERR("invalid type (%d)\n", iftype);
+ return err;
+ }
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+ if (err) {
+ WL_ERR("WLC_SET_INFRA error (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
+{
+ /* Room for "event_msgs" + '\0' + bitvec */
+ s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ s32 err = 0;
+
+ WL_TRACE("Enter\n");
+
+ /* Setup event_msgs */
+ brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
+ if (err) {
+ WL_ERR("Get event_msgs error (%d)\n", err);
+ goto dongle_eventmsg_out;
+ }
+ memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
+
+ setbit(eventmask, BRCMF_E_SET_SSID);
+ setbit(eventmask, BRCMF_E_ROAM);
+ setbit(eventmask, BRCMF_E_PRUNE);
+ setbit(eventmask, BRCMF_E_AUTH);
+ setbit(eventmask, BRCMF_E_REASSOC);
+ setbit(eventmask, BRCMF_E_REASSOC_IND);
+ setbit(eventmask, BRCMF_E_DEAUTH_IND);
+ setbit(eventmask, BRCMF_E_DISASSOC_IND);
+ setbit(eventmask, BRCMF_E_DISASSOC);
+ setbit(eventmask, BRCMF_E_JOIN);
+ setbit(eventmask, BRCMF_E_ASSOC_IND);
+ setbit(eventmask, BRCMF_E_PSK_SUP);
+ setbit(eventmask, BRCMF_E_LINK);
+ setbit(eventmask, BRCMF_E_NDIS_LINK);
+ setbit(eventmask, BRCMF_E_MIC_ERROR);
+ setbit(eventmask, BRCMF_E_PMKID_CACHE);
+ setbit(eventmask, BRCMF_E_TXFAIL);
+ setbit(eventmask, BRCMF_E_JOIN_START);
+ setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
+
+ brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
+ if (err) {
+ WL_ERR("Set event_msgs error (%d)\n", err);
+ goto dongle_eventmsg_out;
+ }
+
+dongle_eventmsg_out:
+ WL_TRACE("Exit\n");
+ return err;
+}
+
+static s32
+brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+{
+ s8 iovbuf[32];
+ s32 err = 0;
+ __le32 roamtrigger[2];
+ __le32 roam_delta[2];
+ __le32 bcn_to_le;
+ __le32 roamvar_le;
+
+ /*
+ * Setup timeout if Beacons are lost and roam is
+ * off to report link down
+ */
+ if (roamvar) {
+ bcn_to_le = cpu_to_le32(bcn_timeout);
+ brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le,
+ sizeof(bcn_to_le), iovbuf, sizeof(iovbuf));
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ if (err) {
+ WL_ERR("bcn_timeout error (%d)\n", err);
+ goto dongle_rom_out;
+ }
+ }
+
+ /*
+ * Enable/Disable built-in roaming to allow supplicant
+ * to take care of roaming
+ */
+ WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On");
+ roamvar_le = cpu_to_le32(roamvar);
+ brcmf_c_mkiovar("roam_off", (char *)&roamvar_le,
+ sizeof(roamvar_le), iovbuf, sizeof(iovbuf));
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
+ if (err) {
+ WL_ERR("roam_off error (%d)\n", err);
+ goto dongle_rom_out;
+ }
+
+ roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL);
+ roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER,
+ (void *)roamtrigger, sizeof(roamtrigger));
+ if (err) {
+ WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
+ goto dongle_rom_out;
+ }
+
+ roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
+ roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA,
+ (void *)roam_delta, sizeof(roam_delta));
+ if (err) {
+ WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err);
+ goto dongle_rom_out;
+ }
+
+dongle_rom_out:
+ return err;
+}
+
+static s32
+brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+ s32 scan_unassoc_time, s32 scan_passive_time)
+{
+ s32 err = 0;
+ __le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time);
+ __le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time);
+ __le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time);
+
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ &scan_assoc_tm_le, sizeof(scan_assoc_tm_le));
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFO("Scan assoc time is not supported\n");
+ else
+ WL_ERR("Scan assoc time error (%d)\n", err);
+ goto dongle_scantime_out;
+ }
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le));
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFO("Scan unassoc time is not supported\n");
+ else
+ WL_ERR("Scan unassoc time error (%d)\n", err);
+ goto dongle_scantime_out;
+ }
+
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME,
+ &scan_passive_tm_le, sizeof(scan_passive_tm_le));
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFO("Scan passive time is not supported\n");
+ else
+ WL_ERR("Scan passive time error (%d)\n", err);
+ goto dongle_scantime_out;
+ }
+
+dongle_scantime_out:
+ return err;
+}
+
+static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct wiphy *wiphy;
+ s32 phy_list;
+ s8 phy;
+ s32 err = 0;
+
+ err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST,
+ &phy_list, sizeof(phy_list));
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ return err;
+ }
+
+ phy = ((char *)&phy_list)[1];
+ WL_INFO("%c phy\n", phy);
+ if (phy == 'n' || phy == 'a') {
+ wiphy = cfg_to_wiphy(cfg_priv);
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
+ }
+
+ return err;
+}
+
+static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ return wl_update_wiphybands(cfg_priv);
+}
+
+static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ s32 power_mode;
+ s32 err = 0;
+
+ if (cfg_priv->dongle_up)
+ return err;
+
+ ndev = cfg_to_ndev(cfg_priv);
+ wdev = ndev->ieee80211_ptr;
+
+ brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
+ WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
+
+ err = brcmf_dongle_eventmsg(ndev);
+ if (err)
+ goto default_conf_out;
+
+ power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
+ if (err)
+ goto default_conf_out;
+ WL_INFO("power save set to %s\n",
+ (power_mode ? "enabled" : "disabled"));
+
+ err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1),
+ WL_BEACON_TIMEOUT);
+ if (err)
+ goto default_conf_out;
+ err = brcmf_dongle_mode(ndev, wdev->iftype);
+ if (err && err != -EINPROGRESS)
+ goto default_conf_out;
+ err = brcmf_dongle_probecap(cfg_priv);
+ if (err)
+ goto default_conf_out;
+
+ /* -EINPROGRESS: Call commit handler */
+
+default_conf_out:
+
+ cfg_priv->dongle_up = true;
+
+ return err;
+
+}
+
+static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ char buf[10+IFNAMSIZ];
+ struct dentry *fd;
+ s32 err = 0;
+
+ sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name);
+ cfg_priv->debugfsdir = debugfs_create_dir(buf,
+ cfg_to_wiphy(cfg_priv)->debugfsdir);
+
+ fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir,
+ (u16 *)&cfg_priv->profile->beacon_interval);
+ if (!fd) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir,
+ (u8 *)&cfg_priv->profile->dtim_period);
+ if (!fd) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+err_out:
+ return err;
+}
+
+static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ debugfs_remove_recursive(cfg_priv->debugfsdir);
+ cfg_priv->debugfsdir = NULL;
+}
+
+static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ s32 err = 0;
+
+ set_bit(WL_STATUS_READY, &cfg_priv->status);
+
+ brcmf_debugfs_add_netdev_params(cfg_priv);
+
+ err = brcmf_config_dongle(cfg_priv);
+ if (err)
+ return err;
+
+ brcmf_invoke_iscan(cfg_priv);
+
+ return err;
+}
+
+static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ /*
+ * While going down, if associated with AP disassociate
+ * from AP to save power
+ */
+ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
+ test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
+ test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ WL_INFO("Disassociating from AP");
+ brcmf_link_down(cfg_priv);
+
+ /* Make sure WPA_Supplicant receives all the event
+ generated due to DISASSOC call to the fw to keep
+ the state fw and WPA_Supplicant state consistent
+ */
+ brcmf_delay(500);
+ }
+
+ set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+ brcmf_term_iscan(cfg_priv);
+ if (cfg_priv->scan_request) {
+ cfg80211_scan_done(cfg_priv->scan_request, true);
+ /* May need to perform this to cover rmmod */
+ /* wl_set_mpc(cfg_to_ndev(wl), 1); */
+ cfg_priv->scan_request = NULL;
+ }
+ clear_bit(WL_STATUS_READY, &cfg_priv->status);
+ clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+
+ brcmf_debugfs_remove_netdev(cfg_priv);
+
+ return 0;
+}
+
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev)
+{
+ struct brcmf_cfg80211_priv *cfg_priv;
+ s32 err = 0;
+
+ cfg_priv = brcmf_priv_get(cfg_dev);
+ mutex_lock(&cfg_priv->usr_sync);
+ err = __brcmf_cfg80211_up(cfg_priv);
+ mutex_unlock(&cfg_priv->usr_sync);
+
+ return err;
+}
+
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev)
+{
+ struct brcmf_cfg80211_priv *cfg_priv;
+ s32 err = 0;
+
+ cfg_priv = brcmf_priv_get(cfg_dev);
+ mutex_lock(&cfg_priv->usr_sync);
+ err = __brcmf_cfg80211_down(cfg_priv);
+ mutex_unlock(&cfg_priv->usr_sync);
+
+ return err;
+}
+
+static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
+ u8 t, u8 l, u8 *v)
+{
+ struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
+ s32 err = 0;
+
+ if (ie->offset + l + 2 > WL_TLV_INFO_MAX) {
+ WL_ERR("ei crosses buffer boundary\n");
+ return -ENOSPC;
+ }
+ ie->buf[ie->offset] = t;
+ ie->buf[ie->offset + 1] = l;
+ memcpy(&ie->buf[ie->offset + 2], v, l);
+ ie->offset += l + 2;
+
+ return err;
+}
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index f26d08793ca..62dc46144ed 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -103,10 +103,10 @@ do { \
* report it to cfg80211 through "connect"
* event
*/
-#define WL_IOCTL_LEN_MAX 1024
+#define WL_DCMD_LEN_MAX 1024
#define WL_EXTRA_BUF_MAX 2048
#define WL_ISCAN_BUF_MAX 2048 /*
- * the buf length can be BRCMF_C_IOCTL_MAXLEN
+ * the buf length can be BRCMF_DCMD_MAXLEN
* to reduce iteration
*/
#define WL_ISCAN_TIMER_INTERVAL_MS 3000
@@ -188,7 +188,7 @@ struct brcmf_cfg80211_dev {
/* basic structure of scan request */
struct brcmf_cfg80211_scan_req {
- struct brcmf_ssid ssid;
+ struct brcmf_ssid_le ssid_le;
};
/* basic structure of information element */
@@ -199,7 +199,7 @@ struct brcmf_cfg80211_ie {
/* event queue for cfg80211 main event */
struct brcmf_cfg80211_event_q {
- struct list_head eq_list;
+ struct list_head evt_q_list;
u32 etype;
struct brcmf_event_msg emsg;
s8 edata[1];
@@ -243,16 +243,15 @@ struct brcmf_cfg80211_iscan_eloop {
/* dongle iscan controller */
struct brcmf_cfg80211_iscan_ctrl {
- struct net_device *dev;
+ struct net_device *ndev;
struct timer_list timer;
u32 timer_ms;
u32 timer_on;
s32 state;
- struct task_struct *tsk;
- struct semaphore sync;
+ struct work_struct work;
struct brcmf_cfg80211_iscan_eloop el;
void *data;
- s8 ioctl_buf[BRCMF_C_IOCTL_SMLEN];
+ s8 dcmd_buf[BRCMF_DCMD_SMLEN];
s8 scan_buf[WL_ISCAN_BUF_MAX];
};
@@ -265,15 +264,15 @@ struct brcmf_cfg80211_connect_info {
};
/* assoc ie length */
-struct brcmf_cfg80211_assoc_ielen {
- u32 req_len;
- u32 resp_len;
+struct brcmf_cfg80211_assoc_ielen_le {
+ __le32 req_len;
+ __le32 resp_len;
};
/* wpa2 pmk list */
struct brcmf_cfg80211_pmk_list {
- pmkid_list_t pmkids;
- pmkid_t foo[MAXPMKID - 1];
+ struct pmkid_list pmkids;
+ struct pmkid foo[MAXPMKID - 1];
};
/* dongle private data of cfg80211 interface */
@@ -283,8 +282,8 @@ struct brcmf_cfg80211_priv {
struct cfg80211_scan_request *scan_request; /* scan request
object */
struct brcmf_cfg80211_event_loop el; /* main event loop */
- struct list_head eq_list; /* used for event queue */
- spinlock_t eq_lock; /* for event queue synchronization */
+ struct list_head evt_q_list; /* used for event queue */
+ spinlock_t evt_q_lock; /* for event queue synchronization */
struct mutex usr_sync; /* maily for dongle up/down synchronization */
struct brcmf_scan_results *bss_list; /* bss_list holding scanned
ap information */
@@ -295,13 +294,11 @@ struct brcmf_cfg80211_priv {
cfg80211 layer */
struct brcmf_cfg80211_ie ie; /* information element object for
internal purpose */
- struct semaphore event_sync; /* for synchronization of main event
- thread */
struct brcmf_cfg80211_profile *profile; /* holding dongle profile */
struct brcmf_cfg80211_iscan_ctrl *iscan; /* iscan controller */
struct brcmf_cfg80211_connect_info conn_info; /* association info */
struct brcmf_cfg80211_pmk_list *pmk_list; /* wpa2 pmk list */
- struct task_struct *event_tsk; /* task of main event handler thread */
+ struct work_struct event_work; /* event handler work struct */
unsigned long status; /* current dongle status */
void *pub;
u32 channel; /* current channel */
@@ -315,21 +312,45 @@ struct brcmf_cfg80211_priv {
bool dongle_up; /* indicate whether dongle up or not */
bool roam_on; /* on/off switch for dongle self-roaming */
bool scan_tried; /* indicates if first scan attempted */
- u8 *ioctl_buf; /* ioctl buffer */
- u8 *extra_buf; /* maily to grab assoc information */
+ u8 *dcmd_buf; /* dcmd buffer */
+ u8 *extra_buf; /* maily to grab assoc information */
struct dentry *debugfsdir;
- u8 ci[0] __attribute__ ((__aligned__(NETDEV_ALIGN)));
+ u8 ci[0] __aligned(NETDEV_ALIGN);
};
-#define cfg_to_wiphy(w) (w->wdev->wiphy)
-#define wiphy_to_cfg(w) ((struct brcmf_cfg80211_priv *)(wiphy_priv(w)))
-#define cfg_to_wdev(w) (w->wdev)
-#define wdev_to_cfg(w) ((struct brcmf_cfg80211_priv *)(wdev_priv(w)))
-#define cfg_to_ndev(w) (w->wdev->netdev)
-#define ndev_to_cfg(n) (wdev_to_cfg(n->ieee80211_ptr))
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_priv *w)
+{
+ return w->wdev->wiphy;
+}
+
+static inline struct brcmf_cfg80211_priv *wiphy_to_cfg(struct wiphy *w)
+{
+ return (struct brcmf_cfg80211_priv *)(wiphy_priv(w));
+}
+
+static inline struct brcmf_cfg80211_priv *wdev_to_cfg(struct wireless_dev *wd)
+{
+ return (struct brcmf_cfg80211_priv *)(wdev_priv(wd));
+}
+
+static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_priv *cfg)
+{
+ return cfg->wdev->netdev;
+}
+
+static inline struct brcmf_cfg80211_priv *ndev_to_cfg(struct net_device *ndev)
+{
+ return wdev_to_cfg(ndev->ieee80211_ptr);
+}
+
#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data))
#define cfg_to_iscan(w) (w->iscan)
-#define cfg_to_conn(w) (&w->conn_info)
+
+static inline struct
+brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
+{
+ return &cfg->conn_info;
+}
static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
struct brcmf_bss_info *bss)
@@ -340,17 +361,15 @@ static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
list->bss_info;
}
-#define for_each_bss(list, bss, __i) \
- for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
+ struct device *busdev,
+ void *data);
+extern void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg);
-extern s32 brcmf_cfg80211_attach(struct net_device *ndev, void *data);
-extern void brcmf_cfg80211_detach(void);
/* event handler from dongle */
extern void brcmf_cfg80211_event(struct net_device *ndev,
const struct brcmf_event_msg *e, void *data);
-extern void brcmf_cfg80211_sdio_func(void *func); /* set sdio function info */
-extern struct sdio_func *brcmf_cfg80211_get_sdio_func(void);
-extern s32 brcmf_cfg80211_up(void); /* dongle up */
-extern s32 brcmf_cfg80211_down(void); /* dongle down */
+extern s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev);
+extern s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev);
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
new file mode 100644
index 00000000000..c2eb2d0af38
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -0,0 +1,51 @@
+#
+# Makefile fragment for Broadcom 802.11n Networking Device Driver
+#
+# Copyright (c) 2010 Broadcom Corporation
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ccflags-y := \
+ -D__CHECK_ENDIAN__ \
+ -Idrivers/net/wireless/brcm80211/brcmsmac \
+ -Idrivers/net/wireless/brcm80211/brcmsmac/phy \
+ -Idrivers/net/wireless/brcm80211/include
+
+BRCMSMAC_OFILES := \
+ mac80211_if.o \
+ ucode_loader.o \
+ ampdu.o \
+ antsel.o \
+ channel.o \
+ main.o \
+ phy_shim.o \
+ pmu.o \
+ rate.o \
+ stf.o \
+ aiutils.o \
+ phy/phy_cmn.o \
+ phy/phy_lcn.o \
+ phy/phy_n.o \
+ phy/phytbl_lcn.o \
+ phy/phytbl_n.o \
+ phy/phy_qmath.o \
+ otp.o \
+ srom.o \
+ dma.o \
+ nicpci.o \
+ brcms_trace_events.o
+
+MODULEPFX := brcmsmac
+
+obj-$(CONFIG_BRCMSMAC) += $(MODULEPFX).o
+$(MODULEPFX)-objs = $(BRCMSMAC_OFILES)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
new file mode 100644
index 00000000000..025fa0eb6f4
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -0,0 +1,2079 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * File contents: support functions for PCI/PCIe
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <defs.h>
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include <soc.h>
+#include "types.h"
+#include "pub.h"
+#include "pmu.h"
+#include "srom.h"
+#include "nicpci.h"
+#include "aiutils.h"
+
+/* slow_clk_ctl */
+ /* slow clock source mask */
+#define SCC_SS_MASK 0x00000007
+ /* source of slow clock is LPO */
+#define SCC_SS_LPO 0x00000000
+ /* source of slow clock is crystal */
+#define SCC_SS_XTAL 0x00000001
+ /* source of slow clock is PCI */
+#define SCC_SS_PCI 0x00000002
+ /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LF 0x00000200
+ /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */
+#define SCC_LP 0x00000400
+ /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */
+#define SCC_FS 0x00000800
+ /* IgnorePllOffReq, 1/0:
+ * power logic ignores/honors PLL clock disable requests from core
+ */
+#define SCC_IP 0x00001000
+ /* XtalControlEn, 1/0:
+ * power logic does/doesn't disable crystal when appropriate
+ */
+#define SCC_XC 0x00002000
+ /* XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_XP 0x00004000
+ /* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_MASK 0xffff0000
+#define SCC_CD_SHIFT 16
+
+/* system_clk_ctl */
+ /* ILPen: Enable Idle Low Power */
+#define SYCC_IE 0x00000001
+ /* ALPen: Enable Active Low Power */
+#define SYCC_AE 0x00000002
+ /* ForcePLLOn */
+#define SYCC_FP 0x00000004
+ /* Force ALP (or HT if ALPen is not set */
+#define SYCC_AR 0x00000008
+ /* Force HT */
+#define SYCC_HR 0x00000010
+ /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_MASK 0xffff0000
+#define SYCC_CD_SHIFT 16
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+ /* OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_DEFCIS_SEL 0
+ /* OTP is powered up, SPROM is present */
+#define CST4329_SPROM_SEL 1
+ /* OTP is powered up, no SPROM */
+#define CST4329_OTP_SEL 2
+ /* OTP is powered down, SPROM is present */
+#define CST4329_OTP_PWRDN 3
+
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE 0x8000
+ /* 12 mA drive strength */
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
+ /* 12 mA drive strength for later 43224s */
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
+
+/* 43236 Chip specific ChipStatus register bits */
+#define CST43236_SFLASH_MASK 0x00000040
+#define CST43236_OTP_MASK 0x00000080
+#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
+#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
+#define CST43236_BOOT_MASK 0x00001800
+#define CST43236_BOOT_SHIFT 11
+#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
+#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
+#define CST43236_BOOT_FROM_INVALID 3
+
+/* 4331 chip-specific ChipControl register bits */
+ /* 0 disable */
+#define CCTRL4331_BT_COEXIST (1<<0)
+ /* 0 SECI is disabled (JTAG functional) */
+#define CCTRL4331_SECI (1<<1)
+ /* 0 disable */
+#define CCTRL4331_EXT_LNA (1<<2)
+ /* sprom/gpio13-15 mux */
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3)
+ /* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXTPA_EN (1<<4)
+ /* set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
+ /* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
+ /* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
+ /* override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
+ /* override core control on pipe_AuxPowerDown */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
+ /* pcie_auxclkenable */
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10)
+ /* pcie_pipe_pllpowerdown */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
+ /* enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
+ /* enable bt_shd1 at gpio5 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
+
+/* 4331 Chip specific ChipStatus register bits */
+ /* crystal frequency 20/40Mhz */
+#define CST4331_XTAL_FREQ 0x00000001
+#define CST4331_SPROM_PRESENT 0x00000002
+#define CST4331_OTP_PRESENT 0x00000004
+#define CST4331_LDO_RF 0x00000008
+#define CST4331_LDO_PAR 0x00000010
+
+/* 4319 chip-specific ChipStatus register bits */
+#define CST4319_SPI_CPULESSUSB 0x00000001
+#define CST4319_SPI_CLK_POL 0x00000002
+#define CST4319_SPI_CLK_PH 0x00000008
+ /* gpio [7:6], SDIO CIS selection */
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4319_SPROM_OTP_SEL_SHIFT 6
+ /* use default CIS, OTP is powered up */
+#define CST4319_DEFCIS_SEL 0x00000000
+ /* use SPROM, OTP is powered up */
+#define CST4319_SPROM_SEL 0x00000040
+ /* use OTP, OTP is powered up */
+#define CST4319_OTP_SEL 0x00000080
+ /* use SPROM, OTP is powered down */
+#define CST4319_OTP_PWRDN 0x000000c0
+ /* gpio [8], sdio/usb mode */
+#define CST4319_SDIO_USB_MODE 0x00000100
+#define CST4319_REMAP_SEL_MASK 0x00000600
+#define CST4319_ILPDIV_EN 0x00000800
+#define CST4319_XTAL_PD_POL 0x00001000
+#define CST4319_LPO_SEL 0x00002000
+#define CST4319_RES_INIT_MODE 0x0000c000
+ /* PALDO is configured with external PNP */
+#define CST4319_PALDO_EXTPNP 0x00010000
+#define CST4319_CBUCK_MODE_MASK 0x00060000
+#define CST4319_CBUCK_MODE_BURST 0x00020000
+#define CST4319_CBUCK_MODE_LPBURST 0x00060000
+#define CST4319_RCAL_VALID 0x01000000
+#define CST4319_RCAL_VALUE_MASK 0x3e000000
+#define CST4319_RCAL_VALUE_SHIFT 25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define CST4336_SPI_MODE_MASK 0x00000001
+#define CST4336_SPROM_PRESENT 0x00000002
+#define CST4336_OTP_PRESENT 0x00000004
+#define CST4336_ARMREMAP_0 0x00000008
+#define CST4336_ILPDIV_EN_MASK 0x00000010
+#define CST4336_ILPDIV_EN_SHIFT 4
+#define CST4336_XTAL_PD_POL_MASK 0x00000020
+#define CST4336_XTAL_PD_POL_SHIFT 5
+#define CST4336_LPO_SEL_MASK 0x00000040
+#define CST4336_LPO_SEL_SHIFT 6
+#define CST4336_RES_INIT_MODE_MASK 0x00000180
+#define CST4336_RES_INIT_MODE_SHIFT 7
+#define CST4336_CBUCK_MODE_MASK 0x00000600
+#define CST4336_CBUCK_MODE_SHIFT 9
+
+/* 4313 chip-specific ChipStatus register bits */
+#define CST4313_SPROM_PRESENT 1
+#define CST4313_OTP_PRESENT 2
+#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
+#define CST4313_SPROM_OTP_SEL_SHIFT 0
+
+/* 4313 Chip specific ChipControl register bits */
+ /* 12 mA drive strengh for later 4313 */
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
+
+/* Manufacturer Ids */
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+/* Enumeration ROM registers */
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+/* Erom entries */
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+
+/* EROM CompIdentA */
+#define CIA_MFG_MASK 0xfff00000
+#define CIA_MFG_SHIFT 20
+#define CIA_CID_MASK 0x000fff00
+#define CIA_CID_SHIFT 8
+#define CIA_CCL_MASK 0x000000f0
+#define CIA_CCL_SHIFT 4
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+#define CIB_NSW_MASK 0x00f80000
+#define CIB_NSW_SHIFT 19
+#define CIB_NMW_MASK 0x0007c000
+#define CIB_NMW_SHIFT 14
+#define CIB_NSP_MASK 0x00003e00
+#define CIB_NSP_SHIFT 9
+#define CIB_NMP_MASK 0x000001f0
+#define CIB_NMP_SHIFT 4
+
+/* EROM AddrDesc */
+#define AD_ADDR_MASK 0xfffff000
+#define AD_SP_MASK 0x00000f00
+#define AD_SP_SHIFT 8
+#define AD_ST_MASK 0x000000c0
+#define AD_ST_SHIFT 6
+#define AD_ST_SLAVE 0x00000000
+#define AD_ST_BRIDGE 0x00000040
+#define AD_ST_SWRAP 0x00000080
+#define AD_ST_MWRAP 0x000000c0
+#define AD_SZ_MASK 0x00000030
+#define AD_SZ_SHIFT 4
+#define AD_SZ_4K 0x00000000
+#define AD_SZ_8K 0x00000010
+#define AD_SZ_16K 0x00000020
+#define AD_SZ_SZD 0x00000030
+#define AD_AG32 0x00000008
+#define AD_ADDR_ALIGN 0x00000fff
+#define AD_SZ_BASE 0x00001000 /* 4KB */
+
+/* EROM SizeDesc */
+#define SD_SZ_MASK 0xfffff000
+#define SD_SG32 0x00000008
+#define SD_SZ_ALIGN 0x00000fff
+
+/* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_SCS 0x10
+/* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_XTAL 0x40
+/* PCI config space GPIO 15 for PLL power-down */
+#define PCI_CFG_GPIO_PLL 0x80
+
+/* power control defines */
+#define PLL_DELAY 150 /* us pll on delay */
+#define FREF_DELAY 200 /* us fref change delay */
+#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
+
+/* resetctrl */
+#define AIRC_RESET 1
+
+#define NOREV -1 /* Invalid rev */
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
+
+/* When Srom support present, fields in sromcontrol */
+#define SRC_START 0x80000000
+#define SRC_BUSY 0x80000000
+#define SRC_OPCODE 0x60000000
+#define SRC_OP_READ 0x00000000
+#define SRC_OP_WRITE 0x20000000
+#define SRC_OP_WRDIS 0x40000000
+#define SRC_OP_WREN 0x60000000
+#define SRC_OTPSEL 0x00000010
+#define SRC_LOCK 0x00000008
+#define SRC_SIZE_MASK 0x00000006
+#define SRC_SIZE_1K 0x00000000
+#define SRC_SIZE_4K 0x00000002
+#define SRC_SIZE_16K 0x00000004
+#define SRC_SIZE_SHIFT 1
+#define SRC_PRESENT 0x00000001
+
+/* External PA enable mask */
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+
+#define DEFAULT_GPIOTIMERVAL \
+ ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+
+#define BADIDX (SI_MAXCORES + 1)
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
+ (((si)->pub.buscoretype == PCI_CORE_ID) && \
+ (si)->pub.buscorerev >= 13))
+
+#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
+ PCI_16KB0_CCREGS_OFFSET))
+
+#define IS_SIM(chippkg) \
+ ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
+ * before after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && \
+ (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
+ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
+
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && \
+ (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
+
+#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID)
+#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
+
+#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+
+#ifdef BCMDBG
+#define SI_MSG(args) printk args
+#else
+#define SI_MSG(args)
+#endif /* BCMDBG */
+
+#define GOODCOREADDR(x, b) \
+ (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+ IS_ALIGNED((x), SI_CORE_SIZE))
+
+#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
+ PCI_16KB0_PCIREGS_OFFSET)
+
+struct aidmp {
+ u32 oobselina30; /* 0x000 */
+ u32 oobselina74; /* 0x004 */
+ u32 PAD[6];
+ u32 oobselinb30; /* 0x020 */
+ u32 oobselinb74; /* 0x024 */
+ u32 PAD[6];
+ u32 oobselinc30; /* 0x040 */
+ u32 oobselinc74; /* 0x044 */
+ u32 PAD[6];
+ u32 oobselind30; /* 0x060 */
+ u32 oobselind74; /* 0x064 */
+ u32 PAD[38];
+ u32 oobselouta30; /* 0x100 */
+ u32 oobselouta74; /* 0x104 */
+ u32 PAD[6];
+ u32 oobseloutb30; /* 0x120 */
+ u32 oobseloutb74; /* 0x124 */
+ u32 PAD[6];
+ u32 oobseloutc30; /* 0x140 */
+ u32 oobseloutc74; /* 0x144 */
+ u32 PAD[6];
+ u32 oobseloutd30; /* 0x160 */
+ u32 oobseloutd74; /* 0x164 */
+ u32 PAD[38];
+ u32 oobsynca; /* 0x200 */
+ u32 oobseloutaen; /* 0x204 */
+ u32 PAD[6];
+ u32 oobsyncb; /* 0x220 */
+ u32 oobseloutben; /* 0x224 */
+ u32 PAD[6];
+ u32 oobsyncc; /* 0x240 */
+ u32 oobseloutcen; /* 0x244 */
+ u32 PAD[6];
+ u32 oobsyncd; /* 0x260 */
+ u32 oobseloutden; /* 0x264 */
+ u32 PAD[38];
+ u32 oobaextwidth; /* 0x300 */
+ u32 oobainwidth; /* 0x304 */
+ u32 oobaoutwidth; /* 0x308 */
+ u32 PAD[5];
+ u32 oobbextwidth; /* 0x320 */
+ u32 oobbinwidth; /* 0x324 */
+ u32 oobboutwidth; /* 0x328 */
+ u32 PAD[5];
+ u32 oobcextwidth; /* 0x340 */
+ u32 oobcinwidth; /* 0x344 */
+ u32 oobcoutwidth; /* 0x348 */
+ u32 PAD[5];
+ u32 oobdextwidth; /* 0x360 */
+ u32 oobdinwidth; /* 0x364 */
+ u32 oobdoutwidth; /* 0x368 */
+ u32 PAD[37];
+ u32 ioctrlset; /* 0x400 */
+ u32 ioctrlclear; /* 0x404 */
+ u32 ioctrl; /* 0x408 */
+ u32 PAD[61];
+ u32 iostatus; /* 0x500 */
+ u32 PAD[127];
+ u32 ioctrlwidth; /* 0x700 */
+ u32 iostatuswidth; /* 0x704 */
+ u32 PAD[62];
+ u32 resetctrl; /* 0x800 */
+ u32 resetstatus; /* 0x804 */
+ u32 resetreadid; /* 0x808 */
+ u32 resetwriteid; /* 0x80c */
+ u32 PAD[60];
+ u32 errlogctrl; /* 0x900 */
+ u32 errlogdone; /* 0x904 */
+ u32 errlogstatus; /* 0x908 */
+ u32 errlogaddrlo; /* 0x90c */
+ u32 errlogaddrhi; /* 0x910 */
+ u32 errlogid; /* 0x914 */
+ u32 errloguser; /* 0x918 */
+ u32 errlogflags; /* 0x91c */
+ u32 PAD[56];
+ u32 intstatus; /* 0xa00 */
+ u32 PAD[127];
+ u32 config; /* 0xe00 */
+ u32 PAD[63];
+ u32 itcr; /* 0xf00 */
+ u32 PAD[3];
+ u32 itipooba; /* 0xf10 */
+ u32 itipoobb; /* 0xf14 */
+ u32 itipoobc; /* 0xf18 */
+ u32 itipoobd; /* 0xf1c */
+ u32 PAD[4];
+ u32 itipoobaout; /* 0xf30 */
+ u32 itipoobbout; /* 0xf34 */
+ u32 itipoobcout; /* 0xf38 */
+ u32 itipoobdout; /* 0xf3c */
+ u32 PAD[4];
+ u32 itopooba; /* 0xf50 */
+ u32 itopoobb; /* 0xf54 */
+ u32 itopoobc; /* 0xf58 */
+ u32 itopoobd; /* 0xf5c */
+ u32 PAD[4];
+ u32 itopoobain; /* 0xf70 */
+ u32 itopoobbin; /* 0xf74 */
+ u32 itopoobcin; /* 0xf78 */
+ u32 itopoobdin; /* 0xf7c */
+ u32 PAD[4];
+ u32 itopreset; /* 0xf90 */
+ u32 PAD[15];
+ u32 peripherialid4; /* 0xfd0 */
+ u32 peripherialid5; /* 0xfd4 */
+ u32 peripherialid6; /* 0xfd8 */
+ u32 peripherialid7; /* 0xfdc */
+ u32 peripherialid0; /* 0xfe0 */
+ u32 peripherialid1; /* 0xfe4 */
+ u32 peripherialid2; /* 0xfe8 */
+ u32 peripherialid3; /* 0xfec */
+ u32 componentid0; /* 0xff0 */
+ u32 componentid1; /* 0xff4 */
+ u32 componentid2; /* 0xff8 */
+ u32 componentid3; /* 0xffc */
+};
+
+/* EROM parsing */
+
+static u32
+get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
+{
+ u32 ent;
+ uint inv = 0, nom = 0;
+
+ while (true) {
+ ent = R_REG(*eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ nom++;
+ }
+
+ return ent;
+}
+
+static u32
+get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
+ u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
+{
+ u32 asd, sz, szd;
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+ /* This is not what we want, "push" it back */
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ return asd;
+}
+
+static void ai_hwfixup(struct si_info *sii)
+{
+}
+
+/* parse the enumeration rom to identify all cores */
+static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
+{
+ struct si_info *sii = (struct si_info *)sih;
+
+ u32 erombase;
+ u32 __iomem *eromptr, *eromlim;
+ void __iomem *regs = cc;
+
+ erombase = R_REG(&cc->eromptr);
+
+ /* Set wrappers address */
+ sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
+
+ /* Now point the window at the erom */
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
+ eromptr = regs;
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
+
+ while (eromptr < eromlim) {
+ u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ u32 mpd, asd, addrl, addrh, sizel, sizeh;
+ u32 __iomem *base;
+ uint i, j, idx;
+ bool br;
+
+ br = false;
+
+ /* Grok a component */
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ /* Found END of erom */
+ ai_hwfixup(sii);
+ return;
+ }
+ base = eromptr - 1;
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ /* CIA not followed by CIB */
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+ continue;
+ if ((nmw + nsw == 0)) {
+ /* A component which is not a core */
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0)
+ sii->oob_router = addrl;
+ }
+ continue;
+ }
+
+ idx = sii->numcores;
+/* sii->eromptr[idx] = base; */
+ sii->cia[idx] = cia;
+ sii->cib[idx] = cib;
+ sii->coreid[idx] = cid;
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ /* Not enough MP entries for component */
+ goto error;
+ }
+ }
+
+ /* First Slave Address Descriptor should be port 0:
+ * the main register space for the core
+ */
+ asd =
+ get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ /* Try again to see if it is a bridge */
+ asd =
+ get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
+ &addrh, &sizel, &sizeh);
+ if (asd != 0)
+ br = true;
+ else if ((addrh != 0) || (sizeh != 0)
+ || (sizel != SI_CORE_SIZE)) {
+ /* First Slave ASD for core malformed */
+ goto error;
+ }
+ }
+ sii->coresba[idx] = addrl;
+ sii->coresba_size[idx] = sizel;
+ /* Get any more ASDs in port 0 */
+ j = 1;
+ do {
+ asd =
+ get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
+ &addrh, &sizel, &sizeh);
+ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+ sii->coresba2[idx] = addrl;
+ sii->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd =
+ get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ } while (asd != 0);
+ if (j == 0) {
+ /* SP has no address descriptors */
+ goto error;
+ }
+ }
+
+ /* Now get master wrappers */
+ for (i = 0; i < nmw; i++) {
+ asd =
+ get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
+ &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ /* Missing descriptor for MW */
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ /* Master wrapper %d is not 4KB */
+ goto error;
+ }
+ if (i == 0)
+ sii->wrapba[idx] = addrl;
+ }
+
+ /* And finally slave wrappers */
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp == 1) ? 0 : 1;
+ asd =
+ get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ /* Missing descriptor for SW */
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ /* Slave wrapper is not 4KB */
+ goto error;
+ }
+ if ((nmw == 0) && (i == 0))
+ sii->wrapba[idx] = addrl;
+ }
+
+ /* Don't record bridges */
+ if (br)
+ continue;
+
+ /* Done with core */
+ sii->numcores++;
+ }
+
+ error:
+ /* Reached end of erom without finding END */
+ sii->numcores = 0;
+ return;
+}
+
+/*
+ * This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address. Since each core starts with the
+ * same set of registers (BIST, clock control, etc), the returned address
+ * contains the first register of this 'common' register block (not to be
+ * confused with 'common core').
+ */
+void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ u32 addr = sii->coresba[coreidx];
+ u32 wrap = sii->wrapba[coreidx];
+
+ if (coreidx >= sii->numcores)
+ return NULL;
+
+ /* point bar0 window */
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
+ /* point bar0 2nd 4KB window */
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
+ sii->curidx = coreidx;
+
+ return sii->curmap;
+}
+
+/* Return the number of address spaces in current core */
+int ai_numaddrspaces(struct si_pub *sih)
+{
+ return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+u32 ai_addrspace(struct si_pub *sih, uint asidx)
+{
+ struct si_info *sii;
+ uint cidx;
+
+ sii = (struct si_info *)sih;
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba[cidx];
+ else if (asidx == 1)
+ return sii->coresba2[cidx];
+ else {
+ /* Need to parse the erom again to find addr space */
+ return 0;
+ }
+}
+
+/* Return the size of the nth address space in the current core */
+u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
+{
+ struct si_info *sii;
+ uint cidx;
+
+ sii = (struct si_info *)sih;
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba_size[cidx];
+ else if (asidx == 1)
+ return sii->coresba2_size[cidx];
+ else {
+ /* Need to parse the erom again to find addr */
+ return 0;
+ }
+}
+
+uint ai_flag(struct si_pub *sih)
+{
+ struct si_info *sii;
+ struct aidmp *ai;
+
+ sii = (struct si_info *)sih;
+ ai = sii->curwrap;
+
+ return R_REG(&ai->oobselouta30) & 0x1f;
+}
+
+void ai_setint(struct si_pub *sih, int siflag)
+{
+}
+
+uint ai_corevendor(struct si_pub *sih)
+{
+ struct si_info *sii;
+ u32 cia;
+
+ sii = (struct si_info *)sih;
+ cia = sii->cia[sii->curidx];
+ return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+}
+
+uint ai_corerev(struct si_pub *sih)
+{
+ struct si_info *sii;
+ u32 cib;
+
+ sii = (struct si_info *)sih;
+ cib = sii->cib[sii->curidx];
+ return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+}
+
+bool ai_iscoreup(struct si_pub *sih)
+{
+ struct si_info *sii;
+ struct aidmp *ai;
+
+ sii = (struct si_info *)sih;
+ ai = sii->curwrap;
+
+ return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
+ SICF_CLOCK_EN)
+ && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
+{
+ struct si_info *sii;
+ struct aidmp *ai;
+ u32 w;
+
+ sii = (struct si_info *)sih;
+
+ ai = sii->curwrap;
+
+ if (mask || val) {
+ w = ((R_REG(&ai->ioctrl) & ~mask) | val);
+ W_REG(&ai->ioctrl, w);
+ }
+}
+
+u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
+{
+ struct si_info *sii;
+ struct aidmp *ai;
+ u32 w;
+
+ sii = (struct si_info *)sih;
+ ai = sii->curwrap;
+
+ if (mask || val) {
+ w = ((R_REG(&ai->ioctrl) & ~mask) | val);
+ W_REG(&ai->ioctrl, w);
+ }
+
+ return R_REG(&ai->ioctrl);
+}
+
+/* return true if PCIE capability exists in the pci config space */
+static bool ai_ispcie(struct si_info *sii)
+{
+ u8 cap_ptr;
+
+ cap_ptr =
+ pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+ NULL);
+ if (!cap_ptr)
+ return false;
+
+ return true;
+}
+
+static bool ai_buscore_prep(struct si_info *sii)
+{
+ /* kludge to enable the clock on the 4306 which lacks a slowclock */
+ if (!ai_ispcie(sii))
+ ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
+ return true;
+}
+
+u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
+{
+ struct si_info *sii;
+ struct aidmp *ai;
+ u32 w;
+
+ sii = (struct si_info *)sih;
+ ai = sii->curwrap;
+
+ if (mask || val) {
+ w = ((R_REG(&ai->iostatus) & ~mask) | val);
+ W_REG(&ai->iostatus, w);
+ }
+
+ return R_REG(&ai->iostatus);
+}
+
+static bool
+ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+{
+ bool pci, pcie;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+ struct chipcregs __iomem *cc;
+
+ cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (sii->pub.ccrev >= 11)
+ sii->pub.chipst = R_REG(&cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(&cc->capabilities);
+ /* get chipcommon extended capabilities */
+
+ if (sii->pub.ccrev >= 35)
+ sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ /* figure out bus/orignal core idx */
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ sii->pub.buscorerev = NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = false;
+ pcirev = pcierev = NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ ai_setcoreidx(&sii->pub, i);
+ cid = ai_coreid(&sii->pub);
+ crev = ai_corerev(&sii->pub);
+
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = true;
+ } else if (cid == PCIE_CORE_ID) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = true;
+ }
+
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == sii->coresba[i])) ||
+ (cc == sii->regs[i]))
+ *origidx = i;
+ }
+
+ if (pci && pcie) {
+ if (ai_ispcie(sii))
+ pci = false;
+ else
+ pcie = false;
+ }
+ if (pci) {
+ sii->pub.buscoretype = PCI_CORE_ID;
+ sii->pub.buscorerev = pcirev;
+ sii->pub.buscoreidx = pciidx;
+ } else if (pcie) {
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = pcierev;
+ sii->pub.buscoreidx = pcieidx;
+ }
+
+ /* fixup necessary chip/core configurations */
+ if (SI_FAST(sii)) {
+ if (!sii->pch) {
+ sii->pch = pcicore_init(&sii->pub, sii->pbus,
+ (__iomem void *)PCIEREGS(sii));
+ if (sii->pch == NULL)
+ return false;
+ }
+ }
+ if (ai_pci_fixcfg(&sii->pub)) {
+ /* si_doattach: si_pci_fixcfg failed */
+ return false;
+ }
+
+ /* return to the original core */
+ ai_setcoreidx(&sii->pub, *origidx);
+
+ return true;
+}
+
+/*
+ * get boardtype and boardrev
+ */
+static __used void ai_nvram_process(struct si_info *sii)
+{
+ uint w = 0;
+
+ /* do a pci config read to get subsystem id and subvendor id */
+ pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+
+ sii->pub.boardvendor = w & 0xffff;
+ sii->pub.boardtype = (w >> 16) & 0xffff;
+ sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
+}
+
+static struct si_info *ai_doattach(struct si_info *sii,
+ void __iomem *regs, struct pci_dev *pbus)
+{
+ struct si_pub *sih = &sii->pub;
+ u32 w, savewin;
+ struct chipcregs __iomem *cc;
+ uint socitype;
+ uint origidx;
+
+ memset((unsigned char *) sii, 0, sizeof(struct si_info));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->pbus = pbus;
+
+ /* find Chipcommon address */
+ pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
+ SI_ENUM_BASE);
+ cc = (struct chipcregs __iomem *) regs;
+
+ /* bus/core/clk setup for register access */
+ if (!ai_buscore_prep(sii))
+ return NULL;
+
+ /*
+ * ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio
+ * hosts w/o chipcommon), some way of recognizing them needs to
+ * be added here.
+ */
+ w = R_REG(&cc->chipid);
+ socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+ sih->issim = false;
+
+ /* scan for cores */
+ if (socitype == SOCI_AI) {
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ ai_scan(&sii->pub, cc);
+ } else {
+ /* Found chip of unknown type */
+ return NULL;
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0)
+ return NULL;
+
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!ai_buscore_setup(sii, savewin, &origidx))
+ goto exit;
+
+ /* Init nvram from sprom/otp if they exist */
+ if (srom_var_init(&sii->pub, cc))
+ goto exit;
+
+ ai_nvram_process(sii);
+
+ /* === NVRAM, clock is ready === */
+ cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
+ W_REG(&cc->gpiopullup, 0);
+ W_REG(&cc->gpiopulldown, 0);
+ ai_setcoreidx(sih, origidx);
+
+ /* PMU specific initializations */
+ if (sih->cccaps & CC_CAP_PMU) {
+ u32 xtalfreq;
+ si_pmu_init(sih);
+ si_pmu_chip_init(sih);
+
+ xtalfreq = si_pmu_measure_alpclk(sih);
+ si_pmu_pll_init(sih, xtalfreq);
+ si_pmu_res_init(sih);
+ si_pmu_swreg_init(sih);
+ }
+
+ /* setup the GPIO based LED powersave register */
+ w = getintvar(sih, BRCMS_SROM_LEDDC);
+ if (w == 0)
+ w = DEFAULT_GPIOTIMERVAL;
+ ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
+ ~0, w);
+
+ if (PCIE(sii))
+ pcicore_attach(sii->pch, SI_DOATTACH);
+
+ if (sih->chip == BCM43224_CHIP_ID) {
+ /*
+ * enable 12 mA drive strenth for 43224 and
+ * set chipControl register bit 15
+ */
+ if (sih->chiprev == 0) {
+ SI_MSG(("Applying 43224A0 WARs\n"));
+ ai_corereg(sih, SI_CC_IDX,
+ offsetof(struct chipcregs, chipcontrol),
+ CCTRL43224_GPIO_TOGGLE,
+ CCTRL43224_GPIO_TOGGLE);
+ si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
+ CCTRL_43224A0_12MA_LED_DRIVE);
+ }
+ if (sih->chiprev >= 1) {
+ SI_MSG(("Applying 43224B0+ WARs\n"));
+ si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
+ CCTRL_43224B0_12MA_LED_DRIVE);
+ }
+ }
+
+ if (sih->chip == BCM4313_CHIP_ID) {
+ /*
+ * enable 12 mA drive strenth for 4313 and
+ * set chipControl register bit 1
+ */
+ SI_MSG(("Applying 4313 WARs\n"));
+ si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
+ CCTRL_4313_12MA_LED_DRIVE);
+ }
+
+ return sii;
+
+ exit:
+ if (sii->pch)
+ pcicore_deinit(sii->pch);
+ sii->pch = NULL;
+
+ return NULL;
+}
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ */
+struct si_pub *
+ai_attach(void __iomem *regs, struct pci_dev *sdh)
+{
+ struct si_info *sii;
+
+ /* alloc struct si_info */
+ sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+ if (sii == NULL)
+ return NULL;
+
+ if (ai_doattach(sii, regs, sdh) == NULL) {
+ kfree(sii);
+ return NULL;
+ }
+
+ return (struct si_pub *) sii;
+}
+
+/* may be called with core in reset */
+void ai_detach(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ struct si_pub *si_local = NULL;
+ memcpy(&si_local, &sih, sizeof(struct si_pub **));
+
+ sii = (struct si_info *)sih;
+
+ if (sii == NULL)
+ return;
+
+ if (sii->pch)
+ pcicore_deinit(sii->pch);
+ sii->pch = NULL;
+
+ srom_free_vars(sih);
+ kfree(sii);
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
+ void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
+ sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
+ sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = sii->coreid[sii->curidx];
+}
+
+void ai_deregister_intr_callback(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+ sii->intrsoff_fn = NULL;
+}
+
+uint ai_coreid(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+ return sii->coreid[sii->curidx];
+}
+
+uint ai_coreidx(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+ return sii->curidx;
+}
+
+bool ai_backplane64(struct si_pub *sih)
+{
+ return (sih->cccaps & CC_CAP_BKPLN64) != 0;
+}
+
+/* return index of coreid or BADIDX if not found */
+uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+{
+ struct si_info *sii;
+ uint found;
+ uint i;
+
+ sii = (struct si_info *)sih;
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++)
+ if (sii->coreid[i] == coreid) {
+ if (found == coreunit)
+ return i;
+ found++;
+ }
+
+ return BADIDX;
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching
+ * out of and back to d11 core.
+ */
+void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
+{
+ uint idx;
+
+ idx = ai_findcoreidx(sih, coreid, coreunit);
+ if (idx >= SI_MAXCORES)
+ return NULL;
+
+ return ai_setcoreidx(sih, idx);
+}
+
+/* Turn off interrupt as required by ai_setcore, before switch core */
+void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
+ uint *intr_val)
+{
+ void __iomem *cc;
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ if (SI_FAST(sii)) {
+ /* Overloading the origidx variable to remember the coreid,
+ * this works because the core ids cannot be confused with
+ * core indices.
+ */
+ *origidx = coreid;
+ if (coreid == CC_CORE_ID)
+ return CCREGS_FAST(sii);
+ else if (coreid == sih->buscoretype)
+ return PCIEREGS(sii);
+ }
+ INTR_OFF(sii, *intr_val);
+ *origidx = sii->curidx;
+ cc = ai_setcore(sih, coreid, 0);
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+ if (SI_FAST(sii)
+ && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+ return;
+
+ ai_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ u32 *w = (u32 *) sii->curwrap;
+ W_REG(w + (offset / 4), val);
+ return;
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
+ * operation, switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core
+ * switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci
+ * registers and (on newer pci cores) chipcommon registers.
+ */
+uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
+ uint val)
+{
+ uint origidx = 0;
+ u32 __iomem *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = false;
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ /*
+ * If pci/pcie, we can get at pci/pcie regs
+ * and on newer cores to chipc
+ */
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+ fast = true;
+ r = (u32 __iomem *)((__iomem char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /*
+ * pci registers are at either in the last 2KB of
+ * an 8KB window or, in pcie and pci rev 13 at 8KB
+ */
+ fast = true;
+ if (SI_FAST(sii))
+ r = (u32 __iomem *)((__iomem char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (u32 __iomem *)((__iomem char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET :
+ PCI_BAR0_PCIREGS_OFFSET) + regoff);
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = ai_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (u32 __iomem *) ((unsigned char __iomem *)
+ ai_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(r) & ~mask) | val;
+ W_REG(r, w);
+ }
+
+ /* readback */
+ w = R_REG(r);
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return w;
+}
+
+void ai_core_disable(struct si_pub *sih, u32 bits)
+{
+ struct si_info *sii;
+ u32 dummy;
+ struct aidmp *ai;
+
+ sii = (struct si_info *)sih;
+
+ ai = sii->curwrap;
+
+ /* if core is already in reset, just return */
+ if (R_REG(&ai->resetctrl) & AIRC_RESET)
+ return;
+
+ W_REG(&ai->ioctrl, bits);
+ dummy = R_REG(&ai->ioctrl);
+ udelay(10);
+
+ W_REG(&ai->resetctrl, AIRC_RESET);
+ udelay(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
+{
+ struct si_info *sii;
+ struct aidmp *ai;
+ u32 dummy;
+
+ sii = (struct si_info *)sih;
+ ai = sii->curwrap;
+
+ /*
+ * Must do the disable sequence first to work
+ * for arbitrary current core state.
+ */
+ ai_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+ W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(&ai->ioctrl);
+ W_REG(&ai->resetctrl, 0);
+ udelay(1);
+
+ W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
+ dummy = R_REG(&ai->ioctrl);
+ udelay(1);
+}
+
+/* return the slow clock source - LPO, XTAL, or PCI */
+static uint ai_slowclk_src(struct si_info *sii)
+{
+ struct chipcregs __iomem *cc;
+ u32 val;
+
+ if (sii->pub.ccrev < 6) {
+ pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+ &val);
+ if (val & PCI_CFG_GPIO_SCS)
+ return SCC_SS_PCI;
+ return SCC_SS_XTAL;
+ } else if (sii->pub.ccrev < 10) {
+ cc = (struct chipcregs __iomem *)
+ ai_setcoreidx(&sii->pub, sii->curidx);
+ return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+ } else /* Insta-clock */
+ return SCC_SS_XTAL;
+}
+
+/*
+* return the ILP (slowclock) min or max frequency
+* precondition: we've established the chip has dynamic clk control
+*/
+static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
+ struct chipcregs __iomem *cc)
+{
+ u32 slowclk;
+ uint div;
+
+ slowclk = ai_slowclk_src(sii);
+ if (sii->pub.ccrev < 6) {
+ if (slowclk == SCC_SS_PCI)
+ return max_freq ? (PCIMAXFREQ / 64)
+ : (PCIMINFREQ / 64);
+ else
+ return max_freq ? (XTALMAXFREQ / 32)
+ : (XTALMINFREQ / 32);
+ } else if (sii->pub.ccrev < 10) {
+ div = 4 *
+ (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
+ SCC_CD_SHIFT) + 1);
+ if (slowclk == SCC_SS_LPO)
+ return max_freq ? LPOMAXFREQ : LPOMINFREQ;
+ else if (slowclk == SCC_SS_XTAL)
+ return max_freq ? (XTALMAXFREQ / div)
+ : (XTALMINFREQ / div);
+ else if (slowclk == SCC_SS_PCI)
+ return max_freq ? (PCIMAXFREQ / div)
+ : (PCIMINFREQ / div);
+ } else {
+ /* Chipc rev 10 is InstaClock */
+ div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+ div = 4 * (div + 1);
+ return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
+ }
+ return 0;
+}
+
+static void
+ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+{
+ uint slowmaxfreq, pll_delay, slowclk;
+ uint pll_on_delay, fref_sel_delay;
+
+ pll_delay = PLL_DELAY;
+
+ /*
+ * If the slow clock is not sourced by the xtal then
+ * add the xtal_on_delay since the xtal will also be
+ * powered down by dynamic clk control logic.
+ */
+
+ slowclk = ai_slowclk_src(sii);
+ if (slowclk != SCC_SS_XTAL)
+ pll_delay += XTAL_ON_DELAY;
+
+ /* Starting with 4318 it is ILP that is used for the delays */
+ slowmaxfreq =
+ ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+
+ pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+ fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+ W_REG(&cc->pll_on_delay, pll_on_delay);
+ W_REG(&cc->fref_sel_delay, fref_sel_delay);
+}
+
+/* initialize power control delay registers */
+void ai_clkctl_init(struct si_pub *sih)
+{
+ struct si_info *sii;
+ uint origidx = 0;
+ struct chipcregs __iomem *cc;
+ bool fast;
+
+ if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ return;
+
+ sii = (struct si_info *)sih;
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ cc = (struct chipcregs __iomem *)
+ ai_setcore(sih, CC_CORE_ID, 0);
+ if (cc == NULL)
+ return;
+ } else {
+ cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
+ if (cc == NULL)
+ return;
+ }
+
+ /* set all Instaclk chip ILP to 1 MHz */
+ if (sih->ccrev >= 10)
+ SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+ ai_clkctl_setdelay(sii, cc);
+
+ if (!fast)
+ ai_setcoreidx(sih, origidx);
+}
+
+/*
+ * return the value suitable for writing to the
+ * dot11 core FAST_PWRUP_DELAY register
+ */
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
+{
+ struct si_info *sii;
+ uint origidx = 0;
+ struct chipcregs __iomem *cc;
+ uint slowminfreq;
+ u16 fpdelay;
+ uint intr_val = 0;
+ bool fast;
+
+ sii = (struct si_info *)sih;
+ if (sih->cccaps & CC_CAP_PMU) {
+ INTR_OFF(sii, intr_val);
+ fpdelay = si_pmu_fast_pwrup_delay(sih);
+ INTR_RESTORE(sii, intr_val);
+ return fpdelay;
+ }
+
+ if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ return 0;
+
+ fast = SI_FAST(sii);
+ fpdelay = 0;
+ if (!fast) {
+ origidx = sii->curidx;
+ INTR_OFF(sii, intr_val);
+ cc = (struct chipcregs __iomem *)
+ ai_setcore(sih, CC_CORE_ID, 0);
+ if (cc == NULL)
+ goto done;
+ } else {
+ cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
+ if (cc == NULL)
+ goto done;
+ }
+
+ slowminfreq = ai_slowclk_freq(sii, false, cc);
+ fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
+ (slowminfreq - 1)) / slowminfreq;
+
+ done:
+ if (!fast) {
+ ai_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+ }
+ return fpdelay;
+}
+
+/* turn primary xtal and/or pll off/on */
+int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
+{
+ struct si_info *sii;
+ u32 in, out, outen;
+
+ sii = (struct si_info *)sih;
+
+ /* pcie core doesn't have any mapping to control the xtal pu */
+ if (PCIE(sii))
+ return -1;
+
+ pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+
+ /*
+ * Avoid glitching the clock if GPRS is already using it.
+ * We can't actually read the state of the PLLPD so we infer it
+ * by the value of XTAL_PU which *is* readable via gpioin.
+ */
+ if (on && (in & PCI_CFG_GPIO_XTAL))
+ return 0;
+
+ if (what & XTAL)
+ outen |= PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ outen |= PCI_CFG_GPIO_PLL;
+
+ if (on) {
+ /* turn primary xtal on */
+ if (what & XTAL) {
+ out |= PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ out |= PCI_CFG_GPIO_PLL;
+ pci_write_config_dword(sii->pbus,
+ PCI_GPIO_OUT, out);
+ pci_write_config_dword(sii->pbus,
+ PCI_GPIO_OUTEN, outen);
+ udelay(XTAL_ON_DELAY);
+ }
+
+ /* turn pll on */
+ if (what & PLL) {
+ out &= ~PCI_CFG_GPIO_PLL;
+ pci_write_config_dword(sii->pbus,
+ PCI_GPIO_OUT, out);
+ mdelay(2);
+ }
+ } else {
+ if (what & XTAL)
+ out &= ~PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ out |= PCI_CFG_GPIO_PLL;
+ pci_write_config_dword(sii->pbus,
+ PCI_GPIO_OUT, out);
+ pci_write_config_dword(sii->pbus,
+ PCI_GPIO_OUTEN, outen);
+ }
+
+ return 0;
+}
+
+/* clk control mechanism through chipcommon, no policy checking */
+static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
+{
+ uint origidx = 0;
+ struct chipcregs __iomem *cc;
+ u32 scc;
+ uint intr_val = 0;
+ bool fast = SI_FAST(sii);
+
+ /* chipcommon cores prior to rev6 don't support dynamic clock control */
+ if (sii->pub.ccrev < 6)
+ return false;
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+ origidx = sii->curidx;
+ cc = (struct chipcregs __iomem *)
+ ai_setcore(&sii->pub, CC_CORE_ID, 0);
+ } else {
+ cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
+ if (cc == NULL)
+ goto done;
+ }
+
+ if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
+ goto done;
+
+ switch (mode) {
+ case CLK_FAST: /* FORCEHT, fast (pll) clock */
+ if (sii->pub.ccrev < 10) {
+ /*
+ * don't forget to force xtal back
+ * on before we clear SCC_DYN_XTAL..
+ */
+ ai_clkctl_xtal(&sii->pub, XTAL, ON);
+ SET_REG(&cc->slow_clk_ctl,
+ (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (sii->pub.ccrev < 20) {
+ OR_REG(&cc->system_clk_ctl, SYCC_HR);
+ } else {
+ OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+ }
+
+ /* wait for the PLL */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ u32 htavail = CCS_HTAVAIL;
+ SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
+ == 0), PMU_MAX_TRANSITION_DLY);
+ } else {
+ udelay(PLL_DELAY);
+ }
+ break;
+
+ case CLK_DYNAMIC: /* enable dynamic clock control */
+ if (sii->pub.ccrev < 10) {
+ scc = R_REG(&cc->slow_clk_ctl);
+ scc &= ~(SCC_FS | SCC_IP | SCC_XC);
+ if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
+ scc |= SCC_XC;
+ W_REG(&cc->slow_clk_ctl, scc);
+
+ /*
+ * for dynamic control, we have to
+ * release our xtal_pu "force on"
+ */
+ if (scc & SCC_XC)
+ ai_clkctl_xtal(&sii->pub, XTAL, OFF);
+ } else if (sii->pub.ccrev < 20) {
+ /* Instaclock */
+ AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+ } else {
+ AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ done:
+ if (!fast) {
+ ai_setcoreidx(&sii->pub, origidx);
+ INTR_RESTORE(sii, intr_val);
+ }
+ return mode == CLK_FAST;
+}
+
+/*
+ * clock control policy function throught chipcommon
+ *
+ * set dynamic clk control mode (forceslow, forcefast, dynamic)
+ * returns true if we are forcing fast clock
+ * this is a wrapper over the next internal function
+ * to allow flexible policy settings for outside caller
+ */
+bool ai_clkctl_cc(struct si_pub *sih, uint mode)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ /* chipcommon cores prior to rev6 don't support dynamic clock control */
+ if (sih->ccrev < 6)
+ return false;
+
+ if (PCI_FORCEHT(sii))
+ return mode == CLK_FAST;
+
+ return _ai_clkctl_cc(sii, mode);
+}
+
+/* Build device path */
+int ai_devpath(struct si_pub *sih, char *path, int size)
+{
+ int slen;
+
+ if (!path || size <= 0)
+ return -1;
+
+ slen = snprintf(path, (size_t) size, "pci/%u/%u/",
+ ((struct si_info *)sih)->pbus->bus->number,
+ PCI_SLOT(((struct pci_dev *)
+ (((struct si_info *)(sih))->pbus))->devfn));
+
+ if (slen < 0 || slen >= size) {
+ path[0] = '\0';
+ return -1;
+ }
+
+ return 0;
+}
+
+void ai_pci_up(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ if (PCI_FORCEHT(sii))
+ _ai_clkctl_cc(sii, CLK_FAST);
+
+ if (PCIE(sii))
+ pcicore_up(sii->pch, SI_PCIUP);
+
+}
+
+/* Unconfigure and/or apply various WARs when system is going to sleep mode */
+void ai_pci_sleep(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ pcicore_sleep(sii->pch);
+}
+
+/* Unconfigure and/or apply various WARs when going down */
+void ai_pci_down(struct si_pub *sih)
+{
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ /* release FORCEHT since chip is going to "down" state */
+ if (PCI_FORCEHT(sii))
+ _ai_clkctl_cc(sii, CLK_DYNAMIC);
+
+ pcicore_down(sii->pch, SI_PCIDOWN);
+}
+
+/*
+ * Configure the pci core for pci client (NIC) action
+ * coremask is the bitvec of cores by index to be enabled.
+ */
+void ai_pci_setup(struct si_pub *sih, uint coremask)
+{
+ struct si_info *sii;
+ struct sbpciregs __iomem *regs = NULL;
+ u32 siflag = 0, w;
+ uint idx = 0;
+
+ sii = (struct si_info *)sih;
+
+ if (PCI(sii)) {
+ /* get current core index */
+ idx = sii->curidx;
+
+ /* we interrupt on this backplane flag number */
+ siflag = ai_flag(sih);
+
+ /* switch over to pci core */
+ regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
+ }
+
+ /*
+ * Enable sb->pci interrupts. Assume
+ * PCI rev 2.3 support was added in pci core rev 6 and things changed..
+ */
+ if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ /* pci config write to set this core bit in PCIIntMask */
+ pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+ w |= (coremask << PCI_SBIM_SHIFT);
+ pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
+ } else {
+ /* set sbintvec bit for our flag number */
+ ai_setint(sih, siflag);
+ }
+
+ if (PCI(sii)) {
+ pcicore_pci_setup(sii->pch, regs);
+
+ /* switch back to previous core */
+ ai_setcoreidx(sih, idx);
+ }
+}
+
+/*
+ * Fixup SROMless PCI device's configuration.
+ * The current core may be changed upon return.
+ */
+int ai_pci_fixcfg(struct si_pub *sih)
+{
+ uint origidx;
+ void __iomem *regs = NULL;
+ struct si_info *sii = (struct si_info *)sih;
+
+ /* Fixup PI in SROM shadow area to enable the correct PCI core access */
+ /* save the current index */
+ origidx = ai_coreidx(&sii->pub);
+
+ /* check 'pi' is correct and fix it if not */
+ regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
+ if (sii->pub.buscoretype == PCIE_CORE_ID)
+ pcicore_fixcfg_pcie(sii->pch,
+ (struct sbpcieregs __iomem *)regs);
+ else if (sii->pub.buscoretype == PCI_CORE_ID)
+ pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
+
+ /* restore the original index */
+ ai_setcoreidx(&sii->pub, origidx);
+
+ pcicore_hwup(sii->pch);
+ return 0;
+}
+
+/* mask&set gpiocontrol bits */
+u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
+{
+ uint regoff;
+
+ regoff = offsetof(struct chipcregs, gpiocontrol);
+ return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+}
+
+void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
+{
+ struct si_info *sii;
+ struct chipcregs __iomem *cc;
+ uint origidx;
+ u32 val;
+
+ sii = (struct si_info *)sih;
+ origidx = ai_coreidx(sih);
+
+ cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(&cc->chipcontrol);
+
+ if (on) {
+ if (sih->chippkg == 9 || sih->chippkg == 0xb)
+ /* Ext PA Controls for 4331 12x9 Package */
+ W_REG(&cc->chipcontrol, val |
+ CCTRL4331_EXTPA_EN |
+ CCTRL4331_EXTPA_ON_GPIO2_5);
+ else
+ /* Ext PA Controls for 4331 12x12 Package */
+ W_REG(&cc->chipcontrol,
+ val | CCTRL4331_EXTPA_EN);
+ } else {
+ val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(&cc->chipcontrol, val);
+ }
+
+ ai_setcoreidx(sih, origidx);
+}
+
+/* Enable BT-COEX & Ex-PA for 4313 */
+void ai_epa_4313war(struct si_pub *sih)
+{
+ struct si_info *sii;
+ struct chipcregs __iomem *cc;
+ uint origidx;
+
+ sii = (struct si_info *)sih;
+ origidx = ai_coreidx(sih);
+
+ cc = ai_setcore(sih, CC_CORE_ID, 0);
+
+ /* EPA Fix */
+ W_REG(&cc->gpiocontrol,
+ R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+ ai_setcoreidx(sih, origidx);
+}
+
+/* check if the device is removed */
+bool ai_deviceremoved(struct si_pub *sih)
+{
+ u32 w;
+ struct si_info *sii;
+
+ sii = (struct si_info *)sih;
+
+ pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+ if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
+ return true;
+
+ return false;
+}
+
+bool ai_is_sprom_available(struct si_pub *sih)
+{
+ if (sih->ccrev >= 31) {
+ struct si_info *sii;
+ uint origidx;
+ struct chipcregs __iomem *cc;
+ u32 sromctrl;
+
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return false;
+
+ sii = (struct si_info *)sih;
+ origidx = sii->curidx;
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+ sromctrl = R_REG(&cc->sromcontrol);
+ ai_setcoreidx(sih, origidx);
+ return sromctrl & SRC_PRESENT;
+ }
+
+ switch (sih->chip) {
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ default:
+ return true;
+ }
+}
+
+bool ai_is_otp_disabled(struct si_pub *sih)
+{
+ switch (sih->chip) {
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+ /* These chips always have their OTP on */
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ default:
+ return false;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
new file mode 100644
index 00000000000..106a7424a7c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_AIUTILS_H_
+#define _BRCM_AIUTILS_H_
+
+#include "types.h"
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+/* each core gets 4Kbytes for registers */
+#define SI_CORE_SIZE 0x1000
+/*
+ * Max cores (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+#define SI_MAXCORES 16
+
+/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCI_DMA_SZ 0x40000000
+
+/* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
+#define SI_PCIE_DMA_H32 0x80000000
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
+#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
+ * maps all unused address ranges
+ */
+
+/* chipcommon being the first core: */
+#define SI_CC_IDX 0
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_AI 1
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+
+/* clk_ctl_st register */
+#define CCS_FORCEALP 0x00000001 /* force ALP request */
+#define CCS_FORCEHT 0x00000002 /* force HT request */
+#define CCS_FORCEILP 0x00000004 /* force ILP request */
+#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
+#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
+#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
+#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT 8
+#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
+#define CCS_HTAVAIL 0x00020000 /* HT is available */
+#define CCS_BP_ON_APL 0x00040000 /* RO: running on ALP clock */
+#define CCS_BP_ON_HT 0x00080000 /* RO: running on HT clock */
+#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
+#define CCS_ERSRC_STS_SHIFT 24
+
+/* HT avail in chipc and pcmcia on 4328a0 */
+#define CCS0_HTAVAIL 0x00010000
+/* ALP avail in chipc and pcmcia on 4328a0 */
+#define CCS0_ALPAVAIL 0x00020000
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT 16
+
+/* Fields in clkdiv */
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+
+/* Package IDs */
+#define BCM4717_PKG_ID 9 /* 4717 package id */
+#define BCM4718_PKG_ID 10 /* 4718 package id */
+#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
+
+/* these are router chips */
+#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
+#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
+#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
+
+/* dynamic clock control defines */
+#define LPOMINFREQ 25000 /* low power oscillator min */
+#define LPOMAXFREQ 43000 /* low power oscillator max */
+#define XTALMINFREQ 19800000 /* 20 MHz - 1% */
+#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
+#define PCIMINFREQ 25000000 /* 25 MHz */
+#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
+
+#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
+#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
+
+/* clkctl xtal what flags */
+#define XTAL 0x1 /* primary crystal oscillator (2050) */
+#define PLL 0x2 /* main chip pll */
+
+/* clkctl clk mode */
+#define CLK_FAST 0 /* force fast (pll) clock */
+#define CLK_DYNAMIC 2 /* enable dynamic clock control */
+
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY 0 /* Driver */
+#define GPIO_APP_PRIORITY 1 /* Application */
+#define GPIO_HI_PRIORITY 2 /* Highest priority. Ignore GPIO
+ * reservation
+ */
+
+/* GPIO pull up/down */
+#define GPIO_PULLUP 0
+#define GPIO_PULLDN 1
+
+/* GPIO event regtype */
+#define GPIO_REGEVT 0 /* GPIO register event */
+#define GPIO_REGEVT_INTMSK 1 /* GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL 2 /* GPIO register event int polarity */
+
+/* device path */
+#define SI_DEVPATH_BUFSZ 16 /* min buffer size in bytes */
+
+/* SI routine enumeration: to be used by update function with multiple hooks */
+#define SI_DOATTACH 1
+#define SI_PCIDOWN 2
+#define SI_PCIUP 3
+
+/*
+ * Data structure to export all chip specific common variables
+ * public (read-only) portion of aiutils handle returned by si_attach()
+ */
+struct si_pub {
+ uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+ uint buscorerev; /* buscore rev */
+ uint buscoreidx; /* buscore index */
+ int ccrev; /* chip common core rev */
+ u32 cccaps; /* chip common capabilities */
+ u32 cccaps_ext; /* chip common capabilities extension */
+ int pmurev; /* pmu core rev */
+ u32 pmucaps; /* pmu capabilities */
+ uint boardtype; /* board type */
+ uint boardvendor; /* board vendor */
+ uint boardflags; /* board flags */
+ uint boardflags2; /* board flags2 */
+ uint chip; /* chip number */
+ uint chiprev; /* chip revision */
+ uint chippkg; /* chip package option */
+ u32 chipst; /* chip status */
+ bool issim; /* chip is in simulation or emulation */
+ uint socirev; /* SOC interconnect rev */
+ bool pci_pr32414;
+
+};
+
+struct pci_dev;
+
+struct gpioh_item {
+ void *arg;
+ bool level;
+ void (*handler) (u32 stat, void *arg);
+ u32 event;
+ struct gpioh_item *next;
+};
+
+/* misc si info needed by some of the routines */
+struct si_info {
+ struct si_pub pub; /* back plane public state (must be first) */
+ struct pci_dev *pbus; /* handle to pci bus */
+ uint dev_coreid; /* the core provides driver functions */
+ void *intr_arg; /* interrupt callback function arg */
+ u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
+ /* restore chip interrupts */
+ void (*intrsrestore_fn) (void *intr_arg, u32 arg);
+ /* check if interrupts are enabled */
+ bool (*intrsenabled_fn) (void *intr_arg);
+
+ struct pcicore_info *pch; /* PCI/E core handle */
+
+ struct list_head var_list; /* list of srom variables */
+
+ void __iomem *curmap; /* current regs va */
+ void __iomem *regs[SI_MAXCORES]; /* other regs va */
+
+ uint curidx; /* current core index */
+ uint numcores; /* # discovered cores */
+ uint coreid[SI_MAXCORES]; /* id of each core */
+ u32 coresba[SI_MAXCORES]; /* backplane address of each core */
+ void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
+ u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
+ u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
+ u32 coresba2_size[SI_MAXCORES]; /* second address space size */
+
+ void *curwrap; /* current wrapper va */
+ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
+ u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+
+ u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
+ u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
+ u32 oob_router; /* oob router registers for axi */
+};
+
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach(). Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core
+ */
+
+
+/* AMBA Interconnect exported externs */
+extern uint ai_flag(struct si_pub *sih);
+extern void ai_setint(struct si_pub *sih, int siflag);
+extern uint ai_coreidx(struct si_pub *sih);
+extern uint ai_corevendor(struct si_pub *sih);
+extern uint ai_corerev(struct si_pub *sih);
+extern bool ai_iscoreup(struct si_pub *sih);
+extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
+extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
+extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
+extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
+ uint val);
+extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
+extern void ai_core_disable(struct si_pub *sih, u32 bits);
+extern int ai_numaddrspaces(struct si_pub *sih);
+extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
+extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
+extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+
+/* === exported functions === */
+extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern void ai_detach(struct si_pub *sih);
+extern uint ai_coreid(struct si_pub *sih);
+extern uint ai_corerev(struct si_pub *sih);
+extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
+ uint val);
+extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
+extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
+extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
+extern bool ai_iscoreup(struct si_pub *sih);
+extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
+extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
+extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
+extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
+ uint *origidx, uint *intr_val);
+extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
+extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
+extern void ai_core_disable(struct si_pub *sih, u32 bits);
+extern u32 ai_alp_clock(struct si_pub *sih);
+extern u32 ai_ilp_clock(struct si_pub *sih);
+extern void ai_pci_setup(struct si_pub *sih, uint coremask);
+extern void ai_setint(struct si_pub *sih, int siflag);
+extern bool ai_backplane64(struct si_pub *sih);
+extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
+ void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg);
+extern void ai_deregister_intr_callback(struct si_pub *sih);
+extern void ai_clkctl_init(struct si_pub *sih);
+extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
+extern int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on);
+extern bool ai_deviceremoved(struct si_pub *sih);
+extern u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val,
+ u8 priority);
+
+/* OTP status */
+extern bool ai_is_otp_disabled(struct si_pub *sih);
+
+/* SPROM availability */
+extern bool ai_is_sprom_available(struct si_pub *sih);
+
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
+extern int ai_devpath(struct si_pub *sih, char *path, int size);
+
+extern void ai_pci_sleep(struct si_pub *sih);
+extern void ai_pci_down(struct si_pub *sih);
+extern void ai_pci_up(struct si_pub *sih);
+extern int ai_pci_fixcfg(struct si_pub *sih);
+
+extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
+/* Enable Ex-PA for 4313 */
+extern void ai_epa_4313war(struct si_pub *sih);
+
+#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index fcaf61e3b13..7f27dbdb6b6 100644
--- a/drivers/staging/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -22,19 +22,32 @@
#include "main.h"
#include "ampdu.h"
-#define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu */
-#define AMPDU_NUM_MPDU_LEGACY 16 /* max number of mpdus in an ampdu to a legacy */
-#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
-#define AMPDU_TX_BA_DEF_WSIZE 64 /* default Tx ba window size (in pdu) */
-#define AMPDU_RX_BA_DEF_WSIZE 64 /* max Rx ba window size (in pdu) */
-#define AMPDU_RX_BA_MAX_WSIZE 64 /* default Rx ba window size (in pdu) */
-#define AMPDU_MAX_DUR 5 /* max dur of tx ampdu (in msec) */
-#define AMPDU_DEF_RETRY_LIMIT 5 /* default tx retry limit */
-#define AMPDU_DEF_RR_RETRY_LIMIT 2 /* default tx retry limit at reg rate */
-#define AMPDU_DEF_TXPKT_WEIGHT 2 /* default weight of ampdu in txfifo */
-#define AMPDU_DEF_FFPLD_RSVD 2048 /* default ffpld reserved bytes */
-#define AMPDU_INI_FREE 10 /* # of inis to be freed on detach */
-#define AMPDU_SCB_MAX_RELEASE 20 /* max # of mpdus released at a time */
+/* max number of mpdus in an ampdu */
+#define AMPDU_MAX_MPDU 32
+/* max number of mpdus in an ampdu to a legacy */
+#define AMPDU_NUM_MPDU_LEGACY 16
+/* max Tx ba window size (in pdu) */
+#define AMPDU_TX_BA_MAX_WSIZE 64
+/* default Tx ba window size (in pdu) */
+#define AMPDU_TX_BA_DEF_WSIZE 64
+/* default Rx ba window size (in pdu) */
+#define AMPDU_RX_BA_DEF_WSIZE 64
+/* max Rx ba window size (in pdu) */
+#define AMPDU_RX_BA_MAX_WSIZE 64
+/* max dur of tx ampdu (in msec) */
+#define AMPDU_MAX_DUR 5
+/* default tx retry limit */
+#define AMPDU_DEF_RETRY_LIMIT 5
+/* default tx retry limit at reg rate */
+#define AMPDU_DEF_RR_RETRY_LIMIT 2
+/* default weight of ampdu in txfifo */
+#define AMPDU_DEF_TXPKT_WEIGHT 2
+/* default ffpld reserved bytes */
+#define AMPDU_DEF_FFPLD_RSVD 2048
+/* # of inis to be freed on detach */
+#define AMPDU_INI_FREE 10
+/* max # of mpdus released at a time */
+#define AMPDU_SCB_MAX_RELEASE 20
#define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */
#define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu
@@ -47,6 +60,11 @@
* accumulate between resets.
*/
+#define AMPDU_DELIMITER_LEN 4
+
+/* max allowed number of mpdus in an ampdu (2 streams) */
+#define AMPDU_NUM_MPDU 16
+
#define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
/* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
@@ -54,49 +72,77 @@
AMPDU_DELIMITER_LEN + 3\
+ DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
+/* modulo add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
/* structure to hold tx fifo information and pre-loading state
* counters specific to tx underflows of ampdus
* some counters might be redundant with the ones in wlc or ampdu structures.
* This allows to maintain a specific state independently of
* how often and/or when the wlc counters are updated.
+ *
+ * ampdu_pld_size: number of bytes to be pre-loaded
+ * mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
+ * prev_txfunfl: num of underflows last read from the HW macstats counter
+ * accum_txfunfl: num of underflows since we modified pld params
+ * accum_txampdu: num of tx ampdu since we modified pld params
+ * prev_txampdu: previous reading of tx ampdu
+ * dmaxferrate: estimated dma avg xfer rate in kbits/sec
*/
struct brcms_fifo_info {
- u16 ampdu_pld_size; /* number of bytes to be pre-loaded */
- u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1]; /* per-mcs max # of mpdus in an ampdu */
- u16 prev_txfunfl; /* num of underflows last read from the HW macstats counter */
- u32 accum_txfunfl; /* num of underflows since we modified pld params */
- u32 accum_txampdu; /* num of tx ampdu since we modified pld params */
- u32 prev_txampdu; /* previous reading of tx ampdu */
- u32 dmaxferrate; /* estimated dma avg xfer rate in kbits/sec */
+ u16 ampdu_pld_size;
+ u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
+ u16 prev_txfunfl;
+ u32 accum_txfunfl;
+ u32 accum_txampdu;
+ u32 prev_txampdu;
+ u32 dmaxferrate;
};
-/* AMPDU module specific state */
+/* AMPDU module specific state
+ *
+ * wlc: pointer to main wlc structure
+ * scb_handle: scb cubby handle to retrieve data from scb
+ * ini_enable: per-tid initiator enable/disable of ampdu
+ * ba_tx_wsize: Tx ba window size (in pdu)
+ * ba_rx_wsize: Rx ba window size (in pdu)
+ * retry_limit: mpdu transmit retry limit
+ * rr_retry_limit: mpdu transmit retry limit at regular rate
+ * retry_limit_tid: per-tid mpdu transmit retry limit
+ * rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
+ * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
+ * max_pdu: max pdus allowed in ampdu
+ * dur: max duration of an ampdu (in msec)
+ * txpkt_weight: weight of ampdu in txfifo; reduces rate lag
+ * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
+ * ffpld_rsvd: number of bytes to reserve for preload
+ * max_txlen: max size of ampdu per mcs, bw and sgi
+ * mfbr: enable multiple fallback rate
+ * tx_max_funl: underflows should be kept such that
+ * (tx_max_funfl*underflows) < tx frames
+ * fifo_tb: table of fifo infos
+ */
struct ampdu_info {
- struct brcms_c_info *wlc; /* pointer to main wlc structure */
- int scb_handle; /* scb cubby handle to retrieve data from scb */
- u8 ini_enable[AMPDU_MAX_SCB_TID]; /* per-tid initiator enable/disable of ampdu */
- u8 ba_tx_wsize; /* Tx ba window size (in pdu) */
- u8 ba_rx_wsize; /* Rx ba window size (in pdu) */
- u8 retry_limit; /* mpdu transmit retry limit */
- u8 rr_retry_limit; /* mpdu transmit retry limit at regular rate */
- u8 retry_limit_tid[AMPDU_MAX_SCB_TID]; /* per-tid mpdu transmit retry limit */
- /* per-tid mpdu transmit retry limit at regular rate */
+ struct brcms_c_info *wlc;
+ int scb_handle;
+ u8 ini_enable[AMPDU_MAX_SCB_TID];
+ u8 ba_tx_wsize;
+ u8 ba_rx_wsize;
+ u8 retry_limit;
+ u8 rr_retry_limit;
+ u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
- u8 mpdu_density; /* min mpdu spacing (0-7) ==> 2^(x-1)/8 usec */
- s8 max_pdu; /* max pdus allowed in ampdu */
- u8 dur; /* max duration of an ampdu (in msec) */
- u8 txpkt_weight; /* weight of ampdu in txfifo; reduces rate lag */
- u8 rx_factor; /* maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes */
- u32 ffpld_rsvd; /* number of bytes to reserve for preload */
- u32 max_txlen[MCS_TABLE_SIZE][2][2]; /* max size of ampdu per mcs, bw and sgi */
- void *ini_free[AMPDU_INI_FREE]; /* array of ini's to be freed on detach */
- bool mfbr; /* enable multiple fallback rate */
- u32 tx_max_funl; /* underflows should be kept such that
- * (tx_max_funfl*underflows) < tx frames
- */
- /* table of fifo infos */
+ u8 mpdu_density;
+ s8 max_pdu;
+ u8 dur;
+ u8 txpkt_weight;
+ u8 rx_factor;
+ u32 ffpld_rsvd;
+ u32 max_txlen[MCS_TABLE_SIZE][2][2];
+ bool mfbr;
+ u32 tx_max_funl;
struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
-
};
/* used for flushing ampdu packets */
@@ -105,32 +151,75 @@ struct cb_del_ampdu_pars {
u16 tid;
};
-#define AMPDU_CLEANUPFLAG_RX (0x1)
-#define AMPDU_CLEANUPFLAG_TX (0x2)
+static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
+{
+ u32 rate, mcs;
-#define SCB_AMPDU_CUBBY(ampdu, scb) (&(scb->scb_ampdu))
-#define SCB_AMPDU_INI(scb_ampdu, tid) (&(scb_ampdu->ini[tid]))
+ for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
+ /* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
+ /* 20MHz, No SGI */
+ rate = mcs_2_rate(mcs, false, false);
+ ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
+ /* 40 MHz, No SGI */
+ rate = mcs_2_rate(mcs, true, false);
+ ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
+ /* 20MHz, SGI */
+ rate = mcs_2_rate(mcs, false, true);
+ ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
+ /* 40 MHz, SGI */
+ rate = mcs_2_rate(mcs, true, true);
+ ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
+ }
+}
-static void brcms_c_ffpld_init(struct ampdu_info *ampdu);
-static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int f);
-static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f);
+static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
+{
+ if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
+ return true;
+ else
+ return false;
+}
-static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu,
- u8 dur);
-static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
- struct scb *scb);
-static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu);
+static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
+{
+ struct brcms_c_info *wlc = ampdu->wlc;
-#define brcms_c_ampdu_txflowcontrol(a, b, c) do {} while (0)
+ wlc->pub->_ampdu = false;
-static void
-brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu,
- struct scb *scb,
- struct sk_buff *p, struct tx_status *txs,
- u32 frmtxstatus, u32 frmtxstatus2);
+ if (on) {
+ if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
+ wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not "
+ "nmode enabled\n", wlc->pub->unit);
+ return -ENOTSUPP;
+ }
+ if (!brcms_c_ampdu_cap(ampdu)) {
+ wiphy_err(ampdu->wlc->wiphy, "wl%d: device not "
+ "ampdu capable\n", wlc->pub->unit);
+ return -ENOTSUPP;
+ }
+ wlc->pub->_ampdu = on;
+ }
+
+ return 0;
+}
+
+static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
+{
+ int i, j;
+ struct brcms_fifo_info *fifo;
+
+ for (j = 0; j < NUM_FFPLD_FIFO; j++) {
+ fifo = (ampdu->fifo_tb + j);
+ fifo->ampdu_pld_size = 0;
+ for (i = 0; i <= FFPLD_MAX_MCS; i++)
+ fifo->mcs2ampdu_table[i] = 255;
+ fifo->dmaxferrate = 0;
+ fifo->accum_txampdu = 0;
+ fifo->prev_txfunfl = 0;
+ fifo->accum_txfunfl = 0;
-static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu);
-static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on);
+ }
+}
struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
{
@@ -138,11 +227,9 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
int i;
ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
- if (!ampdu) {
- wiphy_err(wlc->wiphy, "wl%d: brcms_c_ampdu_attach: out of mem"
- "\n", wlc->pub->unit);
+ if (!ampdu)
return NULL;
- }
+
ampdu->wlc = wlc;
for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
@@ -163,7 +250,10 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
- /* bump max ampdu rcv size to 64k for all 11n devices except 4321A0 and 4321A1 */
+ /*
+ * bump max ampdu rcv size to 64k for all 11n
+ * devices except 4321A0 and 4321A1
+ */
if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
else
@@ -189,27 +279,16 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
{
- int i;
-
- if (!ampdu)
- return;
-
- /* free all ini's which were to be freed on callbacks which were never called */
- for (i = 0; i < AMPDU_INI_FREE; i++) {
- kfree(ampdu->ini_free[i]);
- }
-
- brcms_c_module_unregister(ampdu->wlc->pub, "ampdu", ampdu);
kfree(ampdu);
}
static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
struct scb *scb)
{
- struct scb_ampdu *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
+ struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
int i;
- scb_ampdu->max_pdu = (u8) ampdu->wlc->pub->tunables->ampdunummpdu;
+ scb_ampdu->max_pdu = AMPDU_NUM_MPDU;
/* go back to legacy size if some preloading is occurring */
for (i = 0; i < NUM_FFPLD_FIFO; i++) {
@@ -221,7 +300,8 @@ static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
if (ampdu->max_pdu != AUTO)
scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
- scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu, AMPDU_SCB_MAX_RELEASE);
+ scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
+ AMPDU_SCB_MAX_RELEASE);
if (scb_ampdu->max_rx_ampdu_bytes)
scb_ampdu->release = min_t(u8, scb_ampdu->release,
@@ -234,24 +314,38 @@ static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
{
- brcms_c_scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
+ brcms_c_scb_ampdu_update_config(ampdu, &ampdu->wlc->pri_scb);
}
-static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
+static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
{
- int i, j;
- struct brcms_fifo_info *fifo;
+ int i;
+ u32 phy_rate, dma_rate, tmp;
+ u8 max_mpdu;
+ struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
- for (j = 0; j < NUM_FFPLD_FIFO; j++) {
- fifo = (ampdu->fifo_tb + j);
- fifo->ampdu_pld_size = 0;
- for (i = 0; i <= FFPLD_MAX_MCS; i++)
- fifo->mcs2ampdu_table[i] = 255;
- fifo->dmaxferrate = 0;
- fifo->accum_txampdu = 0;
- fifo->prev_txfunfl = 0;
- fifo->accum_txfunfl = 0;
+ /* recompute the dma rate */
+ /* note : we divide/multiply by 100 to avoid integer overflows */
+ max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
+ AMPDU_NUM_MPDU_LEGACY);
+ phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
+ dma_rate =
+ (((phy_rate / 100) *
+ (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
+ / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
+ fifo->dmaxferrate = dma_rate;
+ /* fill up the mcs2ampdu table; do not recalc the last mcs */
+ dma_rate = dma_rate >> 7;
+ for (i = 0; i < FFPLD_MAX_MCS; i++) {
+ /* shifting to keep it within integer range */
+ phy_rate = mcs_2_rate(i, true, false) >> 7;
+ if (phy_rate > dma_rate) {
+ tmp = ((fifo->ampdu_pld_size * phy_rate) /
+ ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
+ tmp = min_t(u32, tmp, 255);
+ fifo->mcs2ampdu_table[i] = (u8) tmp;
+ }
}
}
@@ -264,7 +358,7 @@ static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
{
struct ampdu_info *ampdu = wlc->ampdu;
- u32 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
+ u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
u32 txunfl_ratio;
u8 max_mpdu;
u32 current_ampdu_cnt = 0;
@@ -275,7 +369,7 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
u16 cur_txunfl;
/* return if we got here for a different reason than underflows */
- cur_txunfl = brcms_c_read_shm(wlc,
+ cur_txunfl = brcms_b_read_shm(wlc->hw,
M_UCODE_MACSTAT +
offsetof(struct macstat, txfunfl[fid]));
new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
@@ -289,7 +383,7 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
return 1;
/* check if fifo is big enough */
- if (brcms_c_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz))
+ if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz))
return -1;
if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
@@ -317,13 +411,13 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
if (txunfl_ratio > ampdu->tx_max_funl) {
- if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT) {
+ if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
fifo->accum_txfunfl = 0;
- }
+
return 0;
}
- max_mpdu =
- min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
+ max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
+ AMPDU_NUM_MPDU_LEGACY);
/* In case max value max_pdu is already lower than
the fifo depth, there is nothing more we can do.
@@ -345,11 +439,12 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
brcms_c_scb_ampdu_update_config_all(ampdu);
/*
- compute a new dma xfer rate for max_mpdu @ max mcs.
- This is the minimum dma rate that
- can achieve no underflow condition for the current mpdu size.
+ * compute a new dma xfer rate for max_mpdu @ max mcs.
+ * This is the minimum dma rate that can achieve no
+ * underflow condition for the current mpdu size.
+ *
+ * note : we divide/multiply by 100 to avoid integer overflows
*/
- /* note : we divide/multiply by 100 to avoid integer overflows */
fifo->dmaxferrate =
(((phy_rate / 100) *
(max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
@@ -379,38 +474,6 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
return 0;
}
-static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
-{
- int i;
- u32 phy_rate, dma_rate, tmp;
- u8 max_mpdu;
- struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
-
- /* recompute the dma rate */
- /* note : we divide/multiply by 100 to avoid integer overflows */
- max_mpdu =
- min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
- phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
- dma_rate =
- (((phy_rate / 100) *
- (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
- / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
- fifo->dmaxferrate = dma_rate;
-
- /* fill up the mcs2ampdu table; do not recalc the last mcs */
- dma_rate = dma_rate >> 7;
- for (i = 0; i < FFPLD_MAX_MCS; i++) {
- /* shifting to keep it within integer range */
- phy_rate = MCS_RATE(i, true, false) >> 7;
- if (phy_rate > dma_rate) {
- tmp = ((fifo->ampdu_pld_size * phy_rate) /
- ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
- tmp = min_t(u32, tmp, 255);
- fifo->mcs2ampdu_table[i] = (u8) tmp;
- }
- }
-}
-
void
brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
u8 ba_wsize, /* negotiated ba window size (in pdu) */
@@ -419,8 +482,8 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
struct scb_ampdu *scb_ampdu;
struct scb_ampdu_tid_ini *ini;
struct ampdu_info *ampdu = wlc->ampdu;
- struct scb *scb = wlc->pub->global_scb;
- scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
+ struct scb *scb = &wlc->pri_scb;
+ scb_ampdu = &scb->scb_ampdu;
if (!ampdu->ini_enable[tid]) {
wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n",
@@ -428,7 +491,7 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
return;
}
- ini = SCB_AMPDU_INI(scb_ampdu, tid);
+ ini = &scb_ampdu->ini[tid];
ini->tid = tid;
ini->scb = scb_ampdu->scb;
ini->ba_wsize = ba_wsize;
@@ -460,8 +523,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
struct scb_ampdu_tid_ini *ini;
u8 mcs = 0;
bool use_rts = false, use_cts = false;
- ratespec_t rspec = 0, rspec_fallback = 0;
- ratespec_t rts_rspec = 0, rts_rspec_fallback = 0;
+ u32 rspec = 0, rspec_fallback = 0;
+ u32 rts_rspec = 0, rts_rspec_fallback = 0;
u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
struct ieee80211_rts *rts;
u8 rr_retry_limit;
@@ -479,17 +542,16 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
f = ampdu->fifo_tb + prio2fifo[tid];
- scb = wlc->pub->global_scb;
- scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
+ scb = &wlc->pri_scb;
+ scb_ampdu = &scb->scb_ampdu;
ini = &scb_ampdu->ini[tid];
/* Let pressure continue to build ... */
qlen = pktq_plen(&qi->q, prec);
if (ini->tx_in_transit > 0 &&
- qlen < min(scb_ampdu->max_pdu, ini->ba_wsize)) {
+ qlen < min(scb_ampdu->max_pdu, ini->ba_wsize))
/* Collect multiple MPDU's to be sent in the next AMPDU */
return -EBUSY;
- }
/* at this point we intend to transmit an AMPDU */
rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
@@ -614,7 +676,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
}
is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
- sgi = PLCP3_ISSGI(plcp3) ? 1 : 0;
+ sgi = plcp3_issgi(plcp3) ? 1 : 0;
mcs = plcp0 & ~MIMO_PLCP_40MHZ;
max_ampdu_bytes =
min(scb_ampdu->max_rx_ampdu_bytes,
@@ -622,7 +684,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
if (is40)
mimo_ctlchbw =
- CHSPEC_SB_UPPER(BRCMS_BAND_PI_RADIO_CHANSPEC)
+ CHSPEC_SB_UPPER(wlc_phy_chanspec_get(
+ wlc->band->pi))
? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
/* rebuild the rspec and rspec_fallback */
@@ -632,9 +695,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
if (fbr_iscck) /* CCK */
- rspec_fallback =
- CCK_RSPEC(CCK_PHY2MAC_RATE
- (txh->FragPLCPFallback[0]));
+ rspec_fallback = cck_rspec(cck_phy2mac_rate
+ (txh->FragPLCPFallback[0]));
else { /* MIMO */
rspec_fallback = RSPEC_MIMORATE;
rspec_fallback |=
@@ -657,7 +719,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
/* if (first mpdu for host agg) */
/* test whether to add more */
- if ((MCS_RATE(mcs, true, false) >= f->dmaxferrate) &&
+ if ((mcs_2_rate(mcs, true, false) >= f->dmaxferrate) &&
(count == f->mcs2ampdu_table[mcs])) {
BCMMSG(wlc->wiphy, "wl%d: PR 37644: stopping"
" ampdu at %d for mcs %d\n",
@@ -665,13 +727,16 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
break;
}
- if (count == scb_ampdu->max_pdu) {
+ if (count == scb_ampdu->max_pdu)
break;
- }
- /* check to see if the next pkt is a candidate for aggregation */
+ /*
+ * check to see if the next pkt is
+ * a candidate for aggregation
+ */
p = pktq_ppeek(&qi->q, prec);
- tx_info = IEEE80211_SKB_CB(p); /* tx_info must be checked with current p */
+ /* tx_info must be checked with current p */
+ tx_info = IEEE80211_SKB_CB(p);
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -686,8 +751,11 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
continue;
}
- /* check if there are enough descriptors available */
- if (TXAVAIL(wlc, fifo) <= (seg_cnt + 1)) {
+ /*
+ * check if there are enough
+ * descriptors available
+ */
+ if (*wlc->core->txavail[fifo] <= seg_cnt + 1) {
wiphy_err(wiphy, "%s: No fifo space "
"!!\n", __func__);
p = NULL;
@@ -745,7 +813,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
}
/* set the preload length */
- if (MCS_RATE(mcs, true, false) >= f->dmaxferrate) {
+ if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
dma_len = min(dma_len, f->ampdu_pld_size);
txh->PreloadSize = cpu_to_le16(dma_len);
} else
@@ -797,10 +865,10 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
/* inform rate_sel if it this is a rate probe pkt */
frameid = le16_to_cpu(txh->TxFrameID);
- if (frameid & TXFID_RATE_PROBE_MASK) {
+ if (frameid & TXFID_RATE_PROBE_MASK)
wiphy_err(wiphy, "%s: XXX what to do with "
"TXFID_RATE_PROBE_MASK!?\n", __func__);
- }
+
for (i = 0; i < count; i++)
brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1),
ampdu->txpkt_weight);
@@ -810,62 +878,6 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
return err;
}
-void
-brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, struct tx_status *txs)
-{
- struct scb_ampdu *scb_ampdu;
- struct brcms_c_info *wlc = ampdu->wlc;
- struct scb_ampdu_tid_ini *ini;
- u32 s1 = 0, s2 = 0;
- struct ieee80211_tx_info *tx_info;
-
- tx_info = IEEE80211_SKB_CB(p);
-
- /* BMAC_NOTE: For the split driver, second level txstatus comes later
- * So if the ACK was received then wait for the second level else just
- * call the first one
- */
- if (txs->status & TX_STATUS_ACK_RCV) {
- u8 status_delay = 0;
-
- /* wait till the next 8 bytes of txstatus is available */
- while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
- udelay(1);
- status_delay++;
- if (status_delay > 10) {
- return; /* error condition */
- }
- }
-
- s2 = R_REG(&wlc->regs->frmtxstatus2);
- }
-
- if (likely(scb)) {
- scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
- ini = SCB_AMPDU_INI(scb_ampdu, p->priority);
- brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
- } else {
- /* loop through all pkts and free */
- u8 queue = txs->frameid & TXFID_QUEUE_MASK;
- struct d11txh *txh;
- u16 mcl;
- while (p) {
- tx_info = IEEE80211_SKB_CB(p);
- txh = (struct d11txh *) p->data;
- mcl = le16_to_cpu(txh->MacTxControlLow);
- brcmu_pkt_buf_free_skb(p);
- /* break out if last packet of ampdu */
- if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
- TXC_AMPDU_LAST)
- break;
- p = GETNEXTTXP(wlc, queue);
- }
- brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
- }
- brcms_c_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
-}
-
static void
brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
struct ieee80211_tx_info *tx_info,
@@ -881,8 +893,6 @@ brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
}
}
-#define SHORTNAME "AMPDU status"
-
static void
brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs,
@@ -912,10 +922,10 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
memset(hole, 0, sizeof(hole));
#endif
- scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
+ scb_ampdu = &scb->scb_ampdu;
tid = (u8) (p->priority);
- ini = SCB_AMPDU_INI(scb_ampdu, tid);
+ ini = &scb_ampdu->ini[tid];
retry_limit = ampdu->retry_limit_tid[tid];
rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
memset(bitmap, 0, sizeof(bitmap));
@@ -923,9 +933,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
supr_status = txs->status & TX_STATUS_SUPR_MASK;
if (txs->status & TX_STATUS_ACK_RCV) {
- if (TX_STATUS_SUPR_UF == supr_status) {
+ if (TX_STATUS_SUPR_UF == supr_status)
update_rate = false;
- }
WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
start_seq = txs->sequence >> SEQNUM_SHIFT;
@@ -967,15 +976,17 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
supr_status == TX_STATUS_SUPR_EXPTIME) {
retry = false;
} else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
- /* TX underflow : try tuning pre-loading or ampdu size */
+ /* TX underflow:
+ * try tuning pre-loading or ampdu size
+ */
} else if (supr_status == TX_STATUS_SUPR_FRAG) {
- /* if there were underflows, but pre-loading is not active,
- notify rate adaptation.
+ /*
+ * if there were underflows, but pre-loading
+ * is not active, notify rate adaptation.
*/
if (brcms_c_ffpld_check_txfunfl(wlc,
- prio2fifo[tid]) > 0) {
+ prio2fifo[tid]) > 0)
tx_error = true;
- }
}
} else if (txs->phyerr) {
update_rate = false;
@@ -983,7 +994,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
"error (0x%x)\n", wlc->pub->unit,
txs->phyerr);
- if (WL_ERROR_ON()) {
+ if (brcm_msg_level & LOG_ERROR_VAL) {
brcmu_prpkt("txpkt (AMPDU)", p);
brcms_c_print_txdesc((struct d11txh *) p->data);
}
@@ -1019,7 +1030,10 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ini->tx_in_transit--;
ini->txretry[index] = 0;
- /* ampdu_ack_len: number of acked aggregated frames */
+ /*
+ * ampdu_ack_len:
+ * number of acked aggregated frames
+ */
/* ampdu_len: number of aggregated frames */
brcms_c_ampdu_rate_status(wlc, tx_info, txs,
mcs);
@@ -1044,11 +1058,14 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (retry && (txrate[0].count < (int)retry_limit)) {
ini->txretry[index]++;
ini->tx_in_transit--;
- /* Use high prededence for retransmit to give some punch */
+ /*
+ * Use high prededence for retransmit to
+ * give some punch
+ */
/* brcms_c_txq_enq(wlc, scb, p,
* BRCMS_PRIO_TO_PREC(tid)); */
brcms_c_txq_enq(wlc, scb, p,
- BRCMS_PRIO_TO_HI_PREC(tid));
+ BRCMS_PRIO_TO_HI_PREC(tid));
} else {
/* Retry timeout */
ini->tx_in_transit--;
@@ -1060,7 +1077,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_"
- "transit %d\n", SHORTNAME, seq,
+ "transit %d\n", "AMPDU status", seq,
ini->tx_in_transit);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
@@ -1073,7 +1090,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
TXC_AMPDU_LAST)
break;
- p = GETNEXTTXP(wlc, queue);
+ p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
}
brcms_c_send_q(wlc);
@@ -1083,55 +1100,58 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
-static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
+void
+brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs)
{
+ struct scb_ampdu *scb_ampdu;
struct brcms_c_info *wlc = ampdu->wlc;
+ struct scb_ampdu_tid_ini *ini;
+ u32 s1 = 0, s2 = 0;
+ struct ieee80211_tx_info *tx_info;
- wlc->pub->_ampdu = false;
-
- if (on) {
- if (!N_ENAB(wlc->pub)) {
- wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not "
- "nmode enabled\n", wlc->pub->unit);
- return -ENOTSUPP;
- }
- if (!brcms_c_ampdu_cap(ampdu)) {
- wiphy_err(ampdu->wlc->wiphy, "wl%d: device not "
- "ampdu capable\n", wlc->pub->unit);
- return -ENOTSUPP;
- }
- wlc->pub->_ampdu = on;
- }
+ tx_info = IEEE80211_SKB_CB(p);
- return 0;
-}
+ /* BMAC_NOTE: For the split driver, second level txstatus comes later
+ * So if the ACK was received then wait for the second level else just
+ * call the first one
+ */
+ if (txs->status & TX_STATUS_ACK_RCV) {
+ u8 status_delay = 0;
-static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
-{
- if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
- return true;
- else
- return false;
-}
+ /* wait till the next 8 bytes of txstatus is available */
+ while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+ udelay(1);
+ status_delay++;
+ if (status_delay > 10)
+ return; /* error condition */
+ }
-static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
-{
- u32 rate, mcs;
+ s2 = R_REG(&wlc->regs->frmtxstatus2);
+ }
- for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
- /* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
- /* 20MHz, No SGI */
- rate = MCS_RATE(mcs, false, false);
- ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
- /* 40 MHz, No SGI */
- rate = MCS_RATE(mcs, true, false);
- ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
- /* 20MHz, SGI */
- rate = MCS_RATE(mcs, false, true);
- ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
- /* 40 MHz, SGI */
- rate = MCS_RATE(mcs, true, true);
- ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
+ if (scb) {
+ scb_ampdu = &scb->scb_ampdu;
+ ini = &scb_ampdu->ini[p->priority];
+ brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
+ } else {
+ /* loop through all pkts and free */
+ u8 queue = txs->frameid & TXFID_QUEUE_MASK;
+ struct d11txh *txh;
+ u16 mcl;
+ while (p) {
+ tx_info = IEEE80211_SKB_CB(p);
+ txh = (struct d11txh *) p->data;
+ mcl = le16_to_cpu(txh->MacTxControlLow);
+ brcmu_pkt_buf_free_skb(p);
+ /* break out if last packet of ampdu */
+ if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
+ TXC_AMPDU_LAST)
+ break;
+ p = dma_getnexttxp(wlc->hw->di[queue],
+ DMA_RANGE_TRANSMITTED);
+ }
+ brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
}
@@ -1142,7 +1162,7 @@ void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
/* driver needs to write the ta in the template; ta is at offset 16 */
memset(template, 0, sizeof(template));
memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
- brcms_c_write_template_ram(wlc, (T_BA_TPL_BASE + 16),
+ brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16),
(T_RAM_ACCESS_SZ * 2),
template);
}
@@ -1156,14 +1176,17 @@ void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
{
struct brcms_c_info *wlc = ampdu->wlc;
- /* Extend ucode internal watchdog timer to match larger received frames */
+ /*
+ * Extend ucode internal watchdog timer to
+ * match larger received frames
+ */
if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
IEEE80211_HT_MAX_AMPDU_64K) {
- brcms_c_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
- brcms_c_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
+ brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
+ brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
} else {
- brcms_c_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
- brcms_c_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
+ brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
+ brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
}
}
@@ -1211,9 +1234,8 @@ void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
ampdu_pars.sta = sta;
ampdu_pars.tid = tid;
- for (prec = 0; prec < pq->num_prec; prec++) {
+ for (prec = 0; prec < pq->num_prec; prec++)
brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
(void *)&ampdu_pars);
- }
brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
}
diff --git a/drivers/staging/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 421f4ba7c63..421f4ba7c63 100644
--- a/drivers/staging/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
diff --git a/drivers/staging/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
index c4e76c093ae..a47ce25cb9a 100644
--- a/drivers/staging/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
@@ -18,7 +18,6 @@
#include <net/mac80211.h>
#include "types.h"
-#include "bmac.h"
#include "main.h"
#include "phy_shim.h"
#include "antsel.h"
@@ -50,55 +49,76 @@
#define ANT_SELCFG_NUM_2x4 4
#define ANT_SELCFG_DEF_2x4 0x02 /* default antenna configuration */
-/* static functions */
-static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
- struct brcms_antselcfg *antsel);
-static u8 brcms_c_antsel_id2antcfg(struct antsel_info *asi, u8 id);
-static u16 brcms_c_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg);
-static void brcms_c_antsel_init_cfg(struct antsel_info *asi,
- struct brcms_antselcfg *antsel,
- bool auto_sel);
-
-const u16 mimo_2x4_div_antselpat_tbl[] = {
+static const u16 mimo_2x4_div_antselpat_tbl[] = {
0, 0, 0x9, 0xa, /* ant0: 0 ant1: 2,3 */
0, 0, 0x5, 0x6, /* ant0: 1 ant1: 2,3 */
0, 0, 0, 0, /* n.a. */
0, 0, 0, 0 /* n.a. */
};
-const u8 mimo_2x4_div_antselid_tbl[16] = {
+static const u8 mimo_2x4_div_antselid_tbl[16] = {
0, 0, 0, 0, 0, 2, 3, 0,
0, 0, 1, 0, 0, 0, 0, 0 /* pat to antselid */
};
-const u16 mimo_2x3_div_antselpat_tbl[] = {
+static const u16 mimo_2x3_div_antselpat_tbl[] = {
16, 0, 1, 16, /* ant0: 0 ant1: 1,2 */
16, 16, 16, 16, /* n.a. */
16, 2, 16, 16, /* ant0: 2 ant1: 1 */
16, 16, 16, 16 /* n.a. */
};
-const u8 mimo_2x3_div_antselid_tbl[16] = {
+static const u8 mimo_2x3_div_antselid_tbl[16] = {
0, 1, 2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 /* pat to antselid */
};
+/* boardlevel antenna selection: init antenna selection structure */
+static void
+brcms_c_antsel_init_cfg(struct antsel_info *asi, struct brcms_antselcfg *antsel,
+ bool auto_sel)
+{
+ if (asi->antsel_type == ANTSEL_2x3) {
+ u8 antcfg_def = ANT_SELCFG_DEF_2x3 |
+ ((asi->antsel_avail && auto_sel) ? ANT_SELCFG_AUTO : 0);
+ antsel->ant_config[ANT_SELCFG_TX_DEF] = antcfg_def;
+ antsel->ant_config[ANT_SELCFG_TX_UNICAST] = antcfg_def;
+ antsel->ant_config[ANT_SELCFG_RX_DEF] = antcfg_def;
+ antsel->ant_config[ANT_SELCFG_RX_UNICAST] = antcfg_def;
+ antsel->num_antcfg = ANT_SELCFG_NUM_2x3;
+
+ } else if (asi->antsel_type == ANTSEL_2x4) {
+
+ antsel->ant_config[ANT_SELCFG_TX_DEF] = ANT_SELCFG_DEF_2x4;
+ antsel->ant_config[ANT_SELCFG_TX_UNICAST] = ANT_SELCFG_DEF_2x4;
+ antsel->ant_config[ANT_SELCFG_RX_DEF] = ANT_SELCFG_DEF_2x4;
+ antsel->ant_config[ANT_SELCFG_RX_UNICAST] = ANT_SELCFG_DEF_2x4;
+ antsel->num_antcfg = ANT_SELCFG_NUM_2x4;
+
+ } else { /* no antenna selection available */
+
+ antsel->ant_config[ANT_SELCFG_TX_DEF] = ANT_SELCFG_DEF_2x2;
+ antsel->ant_config[ANT_SELCFG_TX_UNICAST] = ANT_SELCFG_DEF_2x2;
+ antsel->ant_config[ANT_SELCFG_RX_DEF] = ANT_SELCFG_DEF_2x2;
+ antsel->ant_config[ANT_SELCFG_RX_UNICAST] = ANT_SELCFG_DEF_2x2;
+ antsel->num_antcfg = 0;
+ }
+}
+
struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
{
struct antsel_info *asi;
+ struct si_pub *sih = wlc->hw->sih;
asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
- if (!asi) {
- wiphy_err(wlc->wiphy, "wl%d: brcms_c_antsel_attach: out of "
- "mem\n", wlc->pub->unit);
+ if (!asi)
return NULL;
- }
asi->wlc = wlc;
asi->pub = wlc->pub;
asi->antsel_type = ANTSEL_NA;
asi->antsel_avail = false;
- asi->antsel_antswitch = (u8) getintvar(asi->pub->vars, "antswitch");
+ asi->antsel_antswitch = (u8) getintvar(sih, BRCMS_SROM_ANTSWITCH);
if ((asi->pub->sromrev >= 4) && (asi->antsel_antswitch != 0)) {
switch (asi->antsel_antswitch) {
@@ -108,27 +128,26 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
/* 4321/2 board with 2x3 switch logic */
asi->antsel_type = ANTSEL_2x3;
/* Antenna selection availability */
- if (((u16) getintvar(asi->pub->vars, "aa2g") == 7) ||
- ((u16) getintvar(asi->pub->vars, "aa5g") == 7)) {
+ if (((u16) getintvar(sih, BRCMS_SROM_AA2G) == 7) ||
+ ((u16) getintvar(sih, BRCMS_SROM_AA5G) == 7)) {
asi->antsel_avail = true;
- } else
- if (((u16) getintvar(asi->pub->vars, "aa2g") ==
- 3)
- || ((u16) getintvar(asi->pub->vars, "aa5g")
- == 3)) {
+ } else if (
+ (u16) getintvar(sih, BRCMS_SROM_AA2G) == 3 ||
+ (u16) getintvar(sih, BRCMS_SROM_AA5G) == 3) {
asi->antsel_avail = false;
} else {
asi->antsel_avail = false;
wiphy_err(wlc->wiphy, "antsel_attach: 2o3 "
"board cfg invalid\n");
}
+
break;
default:
break;
}
} else if ((asi->pub->sromrev == 4) &&
- ((u16) getintvar(asi->pub->vars, "aa2g") == 7) &&
- ((u16) getintvar(asi->pub->vars, "aa5g") == 0)) {
+ ((u16) getintvar(sih, BRCMS_SROM_AA2G) == 7) &&
+ ((u16) getintvar(sih, BRCMS_SROM_AA5G) == 0)) {
/* hack to match old 4321CB2 cards with 2of3 antenna switch */
asi->antsel_type = ANTSEL_2x3;
asi->antsel_avail = true;
@@ -152,6 +171,64 @@ void brcms_c_antsel_detach(struct antsel_info *asi)
kfree(asi);
}
+/*
+ * boardlevel antenna selection:
+ * convert ant_cfg to mimo_antsel (ucode interface)
+ */
+static u16 brcms_c_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
+{
+ u8 idx = BRCMS_ANTIDX_11N(BRCMS_ANTSEL_11N(ant_cfg));
+ u16 mimo_antsel = 0;
+
+ if (asi->antsel_type == ANTSEL_2x4) {
+ /* 2x4 antenna diversity board, 4 cfgs: 0-2 0-3 1-2 1-3 */
+ mimo_antsel = (mimo_2x4_div_antselpat_tbl[idx] & 0xf);
+ return mimo_antsel;
+
+ } else if (asi->antsel_type == ANTSEL_2x3) {
+ /* 2x3 antenna selection, 3 cfgs: 0-1 0-2 2-1 */
+ mimo_antsel = (mimo_2x3_div_antselpat_tbl[idx] & 0xf);
+ return mimo_antsel;
+ }
+
+ return mimo_antsel;
+}
+
+/* boardlevel antenna selection: ucode interface control */
+static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
+ struct brcms_antselcfg *antsel)
+{
+ struct brcms_c_info *wlc = asi->wlc;
+ u8 ant_cfg;
+ u16 mimo_antsel;
+
+ /* 1) Update TX antconfig for all frames that are not unicast data
+ * (aka default TX)
+ */
+ ant_cfg = antsel->ant_config[ANT_SELCFG_TX_DEF];
+ mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
+ brcms_b_write_shm(wlc->hw, M_MIMO_ANTSEL_TXDFLT, mimo_antsel);
+ /*
+ * Update driver stats for currently selected
+ * default tx/rx antenna config
+ */
+ asi->antcfg_cur.ant_config[ANT_SELCFG_TX_DEF] = ant_cfg;
+
+ /* 2) Update RX antconfig for all frames that are not unicast data
+ * (aka default RX)
+ */
+ ant_cfg = antsel->ant_config[ANT_SELCFG_RX_DEF];
+ mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
+ brcms_b_write_shm(wlc->hw, M_MIMO_ANTSEL_RXDFLT, mimo_antsel);
+ /*
+ * Update driver stats for currently selected
+ * default tx/rx antenna config
+ */
+ asi->antcfg_cur.ant_config[ANT_SELCFG_RX_DEF] = ant_cfg;
+
+ return 0;
+}
+
void brcms_c_antsel_init(struct antsel_info *asi)
{
if ((asi->antsel_type == ANTSEL_2x3) ||
@@ -159,36 +236,23 @@ void brcms_c_antsel_init(struct antsel_info *asi)
brcms_c_antsel_cfgupd(asi, &asi->antcfg_11n);
}
-/* boardlevel antenna selection: init antenna selection structure */
-static void
-brcms_c_antsel_init_cfg(struct antsel_info *asi, struct brcms_antselcfg *antsel,
- bool auto_sel)
+/* boardlevel antenna selection: convert id to ant_cfg */
+static u8 brcms_c_antsel_id2antcfg(struct antsel_info *asi, u8 id)
{
- if (asi->antsel_type == ANTSEL_2x3) {
- u8 antcfg_def = ANT_SELCFG_DEF_2x3 |
- ((asi->antsel_avail && auto_sel) ? ANT_SELCFG_AUTO : 0);
- antsel->ant_config[ANT_SELCFG_TX_DEF] = antcfg_def;
- antsel->ant_config[ANT_SELCFG_TX_UNICAST] = antcfg_def;
- antsel->ant_config[ANT_SELCFG_RX_DEF] = antcfg_def;
- antsel->ant_config[ANT_SELCFG_RX_UNICAST] = antcfg_def;
- antsel->num_antcfg = ANT_SELCFG_NUM_2x3;
-
- } else if (asi->antsel_type == ANTSEL_2x4) {
-
- antsel->ant_config[ANT_SELCFG_TX_DEF] = ANT_SELCFG_DEF_2x4;
- antsel->ant_config[ANT_SELCFG_TX_UNICAST] = ANT_SELCFG_DEF_2x4;
- antsel->ant_config[ANT_SELCFG_RX_DEF] = ANT_SELCFG_DEF_2x4;
- antsel->ant_config[ANT_SELCFG_RX_UNICAST] = ANT_SELCFG_DEF_2x4;
- antsel->num_antcfg = ANT_SELCFG_NUM_2x4;
+ u8 antcfg = ANT_SELCFG_DEF_2x2;
- } else { /* no antenna selection available */
+ if (asi->antsel_type == ANTSEL_2x4) {
+ /* 2x4 antenna diversity board, 4 cfgs: 0-2 0-3 1-2 1-3 */
+ antcfg = (((id & 0x2) << 3) | ((id & 0x1) + 2));
+ return antcfg;
- antsel->ant_config[ANT_SELCFG_TX_DEF] = ANT_SELCFG_DEF_2x2;
- antsel->ant_config[ANT_SELCFG_TX_UNICAST] = ANT_SELCFG_DEF_2x2;
- antsel->ant_config[ANT_SELCFG_RX_DEF] = ANT_SELCFG_DEF_2x2;
- antsel->ant_config[ANT_SELCFG_RX_UNICAST] = ANT_SELCFG_DEF_2x2;
- antsel->num_antcfg = 0;
+ } else if (asi->antsel_type == ANTSEL_2x3) {
+ /* 2x3 antenna selection, 3 cfgs: 0-1 0-2 2-1 */
+ antcfg = (((id & 0x02) << 4) | ((id & 0x1) + 1));
+ return antcfg;
}
+
+ return antcfg;
}
void
@@ -241,71 +305,3 @@ u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel)
return antselid;
}
-
-/* boardlevel antenna selection: convert id to ant_cfg */
-static u8 brcms_c_antsel_id2antcfg(struct antsel_info *asi, u8 id)
-{
- u8 antcfg = ANT_SELCFG_DEF_2x2;
-
- if (asi->antsel_type == ANTSEL_2x4) {
- /* 2x4 antenna diversity board, 4 cfgs: 0-2 0-3 1-2 1-3 */
- antcfg = (((id & 0x2) << 3) | ((id & 0x1) + 2));
- return antcfg;
-
- } else if (asi->antsel_type == ANTSEL_2x3) {
- /* 2x3 antenna selection, 3 cfgs: 0-1 0-2 2-1 */
- antcfg = (((id & 0x02) << 4) | ((id & 0x1) + 1));
- return antcfg;
- }
-
- return antcfg;
-}
-
-/* boardlevel antenna selection: convert ant_cfg to mimo_antsel (ucode interface) */
-static u16 brcms_c_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
-{
- u8 idx = BRCMS_ANTIDX_11N(BRCMS_ANTSEL_11N(ant_cfg));
- u16 mimo_antsel = 0;
-
- if (asi->antsel_type == ANTSEL_2x4) {
- /* 2x4 antenna diversity board, 4 cfgs: 0-2 0-3 1-2 1-3 */
- mimo_antsel = (mimo_2x4_div_antselpat_tbl[idx] & 0xf);
- return mimo_antsel;
-
- } else if (asi->antsel_type == ANTSEL_2x3) {
- /* 2x3 antenna selection, 3 cfgs: 0-1 0-2 2-1 */
- mimo_antsel = (mimo_2x3_div_antselpat_tbl[idx] & 0xf);
- return mimo_antsel;
- }
-
- return mimo_antsel;
-}
-
-/* boardlevel antenna selection: ucode interface control */
-static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
- struct brcms_antselcfg *antsel)
-{
- struct brcms_c_info *wlc = asi->wlc;
- u8 ant_cfg;
- u16 mimo_antsel;
-
- /* 1) Update TX antconfig for all frames that are not unicast data
- * (aka default TX)
- */
- ant_cfg = antsel->ant_config[ANT_SELCFG_TX_DEF];
- mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
- brcms_c_write_shm(wlc, M_MIMO_ANTSEL_TXDFLT, mimo_antsel);
- /* Update driver stats for currently selected default tx/rx antenna config */
- asi->antcfg_cur.ant_config[ANT_SELCFG_TX_DEF] = ant_cfg;
-
- /* 2) Update RX antconfig for all frames that are not unicast data
- * (aka default RX)
- */
- ant_cfg = antsel->ant_config[ANT_SELCFG_RX_DEF];
- mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
- brcms_c_write_shm(wlc, M_MIMO_ANTSEL_RXDFLT, mimo_antsel);
- /* Update driver stats for currently selected default tx/rx antenna config */
- asi->antcfg_cur.ant_config[ANT_SELCFG_RX_DEF] = ant_cfg;
-
- return 0;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
index 97ea3881a8e..97ea3881a8e 100644
--- a/drivers/staging/brcm80211/brcmsmac/antsel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.c b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.c
new file mode 100644
index 00000000000..52fc9eeb5fa
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h> /* bug in tracepoint.h, it should include this */
+
+#ifndef __CHECKER__
+#include "mac80211_if.h"
+#define CREATE_TRACE_POINTS
+#include "brcms_trace_events.h"
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
new file mode 100644
index 00000000000..27dd73eef56
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac
+
+#if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define __TRACE_BRCMSMAC_H
+
+#include <linux/tracepoint.h>
+#include "mac80211_if.h"
+
+#ifndef CONFIG_BRCMDBG
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+/*
+ * We define a tracepoint, its arguments, its printk format and its
+ * 'fast binary record' layout.
+ */
+TRACE_EVENT(brcms_timer,
+ /* TPPROTO is the prototype of the function called by this tracepoint */
+ TP_PROTO(struct brcms_timer *t),
+ /*
+ * TPARGS(firstarg, p) are the parameters names, same as found in the
+ * prototype.
+ */
+ TP_ARGS(t),
+ /*
+ * Fast binary tracing: define the trace record via TP_STRUCT__entry().
+ * You can think about it like a regular C structure local variable
+ * definition.
+ */
+ TP_STRUCT__entry(
+ __field(uint, ms)
+ __field(uint, set)
+ __field(uint, periodic)
+ ),
+ TP_fast_assign(
+ __entry->ms = t->ms;
+ __entry->set = t->set;
+ __entry->periodic = t->periodic;
+ ),
+ TP_printk(
+ "ms=%u set=%u periodic=%u",
+ __entry->ms, __entry->set, __entry->periodic
+ )
+);
+
+TRACE_EVENT(brcms_dpc,
+ TP_PROTO(unsigned long data),
+ TP_ARGS(data),
+ TP_STRUCT__entry(
+ __field(unsigned long, data)
+ ),
+ TP_fast_assign(
+ __entry->data = data;
+ ),
+ TP_printk(
+ "data=%p",
+ (void *)__entry->data
+ )
+);
+
+#endif /* __TRACE_BRCMSMAC_H */
+
+#ifdef CONFIG_BRCMDBG
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE brcms_trace_events
+
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_BRCMDBG */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
new file mode 100644
index 00000000000..89ad1b7dab8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -0,0 +1,1591 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+#include <defs.h>
+#include "pub.h"
+#include "phy/phy_hal.h"
+#include "main.h"
+#include "stf.h"
+#include "channel.h"
+
+/* QDB() macro takes a dB value and converts to a quarter dB value */
+#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
+
+#define LOCALE_CHAN_01_11 (1<<0)
+#define LOCALE_CHAN_12_13 (1<<1)
+#define LOCALE_CHAN_14 (1<<2)
+#define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */
+#define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */
+#define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */
+#define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */
+#define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */
+#define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */
+#define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */
+#define LOCALE_SET_5G_MID3 (1<<10) /* 128 */
+#define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */
+#define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */
+#define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */
+#define LOCALE_CHAN_52_140_ALL (1<<14)
+#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
+
+#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \
+ LOCALE_SET_5G_LOW2 | \
+ LOCALE_SET_5G_LOW3)
+#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
+#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
+#define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \
+ LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
+#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
+#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
+
+#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \
+ LOCALE_CHAN_12_13 | \
+ LOCALE_CHAN_14)
+
+#define LOCALE_RADAR_SET_NONE 0
+#define LOCALE_RADAR_SET_1 1
+
+#define LOCALE_RESTRICTED_NONE 0
+#define LOCALE_RESTRICTED_SET_2G_SHORT 1
+#define LOCALE_RESTRICTED_CHAN_165 2
+#define LOCALE_CHAN_ALL_5G 3
+#define LOCALE_RESTRICTED_JAPAN_LEGACY 4
+#define LOCALE_RESTRICTED_11D_2G 5
+#define LOCALE_RESTRICTED_11D_5G 6
+#define LOCALE_RESTRICTED_LOW_HI 7
+#define LOCALE_RESTRICTED_12_13_14 8
+
+#define LOCALE_2G_IDX_i 0
+#define LOCALE_5G_IDX_11 0
+#define LOCALE_MIMO_IDX_bn 0
+#define LOCALE_MIMO_IDX_11n 0
+
+/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
+#define BRCMS_MAXPWR_TBL_SIZE 6
+/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
+#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
+
+/* power level in group of 2.4GHz band channels:
+ * maxpwr[0] - CCK channels [1]
+ * maxpwr[1] - CCK channels [2-10]
+ * maxpwr[2] - CCK channels [11-14]
+ * maxpwr[3] - OFDM channels [1]
+ * maxpwr[4] - OFDM channels [2-10]
+ * maxpwr[5] - OFDM channels [11-14]
+ */
+
+/* maxpwr mapping to 5GHz band channels:
+ * maxpwr[0] - channels [34-48]
+ * maxpwr[1] - channels [52-60]
+ * maxpwr[2] - channels [62-64]
+ * maxpwr[3] - channels [100-140]
+ * maxpwr[4] - channels [149-165]
+ */
+#define BAND_5G_PWR_LVLS 5 /* 5 power levels for 5G */
+
+#define LC(id) LOCALE_MIMO_IDX_ ## id
+
+#define LC_2G(id) LOCALE_2G_IDX_ ## id
+
+#define LC_5G(id) LOCALE_5G_IDX_ ## id
+
+#define LOCALES(band2, band5, mimo2, mimo5) \
+ {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
+
+/* macro to get 2.4 GHz channel group index for tx power */
+#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2))
+#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5))
+
+/* macro to get 5 GHz channel group index for tx power */
+#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
+ (((c) < 62) ? 1 : \
+ (((c) < 100) ? 2 : \
+ (((c) < 149) ? 3 : 4))))
+
+#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU)
+
+struct brcms_cm_band {
+ /* struct locale_info flags */
+ u8 locale_flags;
+ /* List of valid channels in the country */
+ struct brcms_chanvec valid_channels;
+ /* List of restricted use channels */
+ const struct brcms_chanvec *restricted_channels;
+ /* List of radar sensitive channels */
+ const struct brcms_chanvec *radar_channels;
+ u8 PAD[8];
+};
+
+ /* locale per-channel tx power limits for MIMO frames
+ * maxpwr arrays are index by channel for 2.4 GHz limits, and
+ * by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel)
+ */
+struct locale_mimo_info {
+ /* tx 20 MHz power limits, qdBm units */
+ s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
+ /* tx 40 MHz power limits, qdBm units */
+ s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
+ u8 flags;
+};
+
+/* Country names and abbreviations with locale defined from ISO 3166 */
+struct country_info {
+ const u8 locale_2G; /* 2.4G band locale */
+ const u8 locale_5G; /* 5G band locale */
+ const u8 locale_mimo_2G; /* 2.4G mimo info */
+ const u8 locale_mimo_5G; /* 5G mimo info */
+};
+
+struct brcms_cm_info {
+ struct brcms_pub *pub;
+ struct brcms_c_info *wlc;
+ char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
+ uint srom_regrev; /* Regulatory Rev for the SROM ccode */
+ const struct country_info *country; /* current country def */
+ char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
+ uint regrev; /* current Regulatory Revision */
+ char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
+ /* per-band state (one per phy/radio) */
+ struct brcms_cm_band bandstate[MAXBANDS];
+ /* quiet channels currently for radar sensitivity or 11h support */
+ /* channels on which we cannot transmit */
+ struct brcms_chanvec quiet_channels;
+};
+
+/* locale channel and power info. */
+struct locale_info {
+ u32 valid_channels;
+ /* List of radar sensitive channels */
+ u8 radar_channels;
+ /* List of channels used only if APs are detected */
+ u8 restricted_channels;
+ /* Max tx pwr in qdBm for each sub-band */
+ s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
+ /* Country IE advertised max tx pwr in dBm per sub-band */
+ s8 pub_maxpwr[BAND_5G_PWR_LVLS];
+ u8 flags;
+};
+
+/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
+
+/*
+ * Some common channel sets
+ */
+
+/* No channels */
+static const struct brcms_chanvec chanvec_none = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* All 2.4 GHz HW channels */
+static const struct brcms_chanvec chanvec_all_2G = {
+ {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* All 5 GHz HW channels */
+static const struct brcms_chanvec chanvec_all_5G = {
+ {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11,
+ 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11,
+ 0x11, 0x11, 0x11, 0x01}
+};
+
+/*
+ * Radar channel sets
+ */
+
+/* Channels 52 - 64, 100 - 140 */
+static const struct brcms_chanvec radar_set1 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
+ 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
+ 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/*
+ * Restricted channel sets
+ */
+
+/* Channels 34, 38, 42, 46 */
+static const struct brcms_chanvec restricted_set_japan_legacy = {
+ {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* Channels 12, 13 */
+static const struct brcms_chanvec restricted_set_2g_short = {
+ {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* Channel 165 */
+static const struct brcms_chanvec restricted_chan_165 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* Channels 36 - 48 & 149 - 165 */
+static const struct brcms_chanvec restricted_low_hi = {
+ {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* Channels 12 - 14 */
+static const struct brcms_chanvec restricted_set_12_13_14 = {
+ {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+/* global memory to provide working buffer for expanded locale */
+
+static const struct brcms_chanvec *g_table_radar_set[] = {
+ &chanvec_none,
+ &radar_set1
+};
+
+static const struct brcms_chanvec *g_table_restricted_chan[] = {
+ &chanvec_none, /* restricted_set_none */
+ &restricted_set_2g_short,
+ &restricted_chan_165,
+ &chanvec_all_5G,
+ &restricted_set_japan_legacy,
+ &chanvec_all_2G, /* restricted_set_11d_2G */
+ &chanvec_all_5G, /* restricted_set_11d_5G */
+ &restricted_low_hi,
+ &restricted_set_12_13_14
+};
+
+static const struct brcms_chanvec locale_2g_01_11 = {
+ {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_2g_12_13 = {
+ {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_2g_14 = {
+ {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_LOW_JP1 = {
+ {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_LOW_JP2 = {
+ {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_LOW1 = {
+ {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_LOW2 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_LOW3 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_MID1 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_MID2 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_MID3 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_HIGH1 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_HIGH2 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_HIGH3 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_52_140_ALL = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_chanvec locale_5g_HIGH4 = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
+ 0x11, 0x11, 0x11, 0x11}
+};
+
+static const struct brcms_chanvec *g_table_locale_base[] = {
+ &locale_2g_01_11,
+ &locale_2g_12_13,
+ &locale_2g_14,
+ &locale_5g_LOW_JP1,
+ &locale_5g_LOW_JP2,
+ &locale_5g_LOW1,
+ &locale_5g_LOW2,
+ &locale_5g_LOW3,
+ &locale_5g_MID1,
+ &locale_5g_MID2,
+ &locale_5g_MID3,
+ &locale_5g_HIGH1,
+ &locale_5g_HIGH2,
+ &locale_5g_HIGH3,
+ &locale_5g_52_140_ALL,
+ &locale_5g_HIGH4
+};
+
+static void brcms_c_locale_add_channels(struct brcms_chanvec *target,
+ const struct brcms_chanvec *channels)
+{
+ u8 i;
+ for (i = 0; i < sizeof(struct brcms_chanvec); i++)
+ target->vec[i] |= channels->vec[i];
+}
+
+static void brcms_c_locale_get_channels(const struct locale_info *locale,
+ struct brcms_chanvec *channels)
+{
+ u8 i;
+
+ memset(channels, 0, sizeof(struct brcms_chanvec));
+
+ for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
+ if (locale->valid_channels & (1 << i))
+ brcms_c_locale_add_channels(channels,
+ g_table_locale_base[i]);
+ }
+}
+
+/*
+ * Locale Definitions - 2.4 GHz
+ */
+static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
+ LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
+ LOCALE_RADAR_SET_NONE,
+ LOCALE_RESTRICTED_SET_2G_SHORT,
+ {QDB(19), QDB(19), QDB(19),
+ QDB(19), QDB(19), QDB(19)},
+ {20, 20, 20, 0},
+ BRCMS_EIRP
+};
+
+/*
+ * Locale Definitions - 5 GHz
+ */
+static const struct locale_info locale_11 = {
+ /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
+ LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
+ LOCALE_RADAR_SET_1,
+ LOCALE_RESTRICTED_NONE,
+ {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
+ {23, 23, 23, 30, 30},
+ BRCMS_EIRP | BRCMS_DFS_EU
+};
+
+static const struct locale_info *g_locale_2g_table[] = {
+ &locale_i
+};
+
+static const struct locale_info *g_locale_5g_table[] = {
+ &locale_11
+};
+
+/*
+ * MIMO Locale Definitions - 2.4 GHz
+ */
+static const struct locale_mimo_info locale_bn = {
+ {QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
+ QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
+ QDB(13), QDB(13), QDB(13)},
+ {0, 0, QDB(13), QDB(13), QDB(13),
+ QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
+ QDB(13), 0, 0},
+ 0
+};
+
+static const struct locale_mimo_info *g_mimo_2g_table[] = {
+ &locale_bn
+};
+
+/*
+ * MIMO Locale Definitions - 5 GHz
+ */
+static const struct locale_mimo_info locale_11n = {
+ { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
+ {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
+ 0
+};
+
+static const struct locale_mimo_info *g_mimo_5g_table[] = {
+ &locale_11n
+};
+
+static const struct {
+ char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */
+ struct country_info country;
+} cntry_locales[] = {
+ {
+ "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */
+};
+
+#ifdef SUPPORT_40MHZ
+/* 20MHz channel info for 40MHz pairing support */
+struct chan20_info {
+ u8 sb;
+ u8 adj_sbs;
+};
+
+/* indicates adjacent channels that are allowed for a 40 Mhz channel and
+ * those that permitted by the HT
+ */
+struct chan20_info chan20_info[] = {
+ /* 11b/11g */
+/* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
+/* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
+/* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
+/* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
+/* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
+/* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 11 */ {12, (CH_LOWER_SB)},
+/* 12 */ {13, (CH_LOWER_SB)},
+/* 13 */ {14, (CH_LOWER_SB)},
+
+/* 11a japan high */
+/* 14 */ {34, (CH_UPPER_SB)},
+/* 15 */ {38, (CH_LOWER_SB)},
+/* 16 */ {42, (CH_LOWER_SB)},
+/* 17 */ {46, (CH_LOWER_SB)},
+
+/* 11a usa low */
+/* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)},
+
+/* 11a Europe */
+/* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 36 */ {140, (CH_LOWER_SB)},
+
+/* 11a usa high, ref5 only */
+/* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */
+/* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)},
+/* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)},
+/* 41 */ {165, (CH_LOWER_SB)},
+
+/* 11a japan */
+/* 42 */ {184, (CH_UPPER_SB)},
+/* 43 */ {188, (CH_LOWER_SB)},
+/* 44 */ {192, (CH_UPPER_SB)},
+/* 45 */ {196, (CH_LOWER_SB)},
+/* 46 */ {200, (CH_UPPER_SB)},
+/* 47 */ {204, (CH_LOWER_SB)},
+/* 48 */ {208, (CH_UPPER_SB)},
+/* 49 */ {212, (CH_LOWER_SB)},
+/* 50 */ {216, (CH_LOWER_SB)}
+};
+#endif /* SUPPORT_40MHZ */
+
+static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
+{
+ if (locale_idx >= ARRAY_SIZE(g_locale_2g_table))
+ return NULL; /* error condition */
+
+ return g_locale_2g_table[locale_idx];
+}
+
+static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
+{
+ if (locale_idx >= ARRAY_SIZE(g_locale_5g_table))
+ return NULL; /* error condition */
+
+ return g_locale_5g_table[locale_idx];
+}
+
+static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
+{
+ if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table))
+ return NULL;
+
+ return g_mimo_2g_table[locale_idx];
+}
+
+static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
+{
+ if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table))
+ return NULL;
+
+ return g_mimo_5g_table[locale_idx];
+}
+
+static int
+brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
+ char *mapped_ccode, uint *mapped_regrev)
+{
+ return false;
+}
+
+/* Lookup a country info structure from a null terminated country
+ * abbreviation and regrev directly with no translation.
+ */
+static const struct country_info *
+brcms_c_country_lookup_direct(const char *ccode, uint regrev)
+{
+ uint size, i;
+
+ /* Should just return 0 for single locale driver. */
+ /* Keep it this way in case we add more locales. (for now anyway) */
+
+ /*
+ * all other country def arrays are for regrev == 0, so if
+ * regrev is non-zero, fail
+ */
+ if (regrev > 0)
+ return NULL;
+
+ /* find matched table entry from country code */
+ size = ARRAY_SIZE(cntry_locales);
+ for (i = 0; i < size; i++) {
+ if (strcmp(ccode, cntry_locales[i].abbrev) == 0)
+ return &cntry_locales[i].country;
+ }
+ return NULL;
+}
+
+static const struct country_info *
+brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
+ char *mapped_ccode, uint *mapped_regrev)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ const struct country_info *country;
+ uint srom_regrev = wlc_cm->srom_regrev;
+ const char *srom_ccode = wlc_cm->srom_ccode;
+ int mapped;
+
+ /* check for currently supported ccode size */
+ if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
+ "match\n", wlc->pub->unit, __func__, ccode);
+ return NULL;
+ }
+
+ /* default mapping is the given ccode and regrev 0 */
+ strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
+ *mapped_regrev = 0;
+
+ /* If the desired country code matches the srom country code,
+ * then the mapped country is the srom regulatory rev.
+ * Otherwise look for an aggregate mapping.
+ */
+ if (!strcmp(srom_ccode, ccode)) {
+ *mapped_regrev = srom_regrev;
+ mapped = 0;
+ wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
+ } else {
+ mapped =
+ brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
+ mapped_regrev);
+ }
+
+ /* find the matching built-in country definition */
+ country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
+
+ /* if there is not an exact rev match, default to rev zero */
+ if (country == NULL && *mapped_regrev != 0) {
+ *mapped_regrev = 0;
+ country =
+ brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
+ }
+
+ return country;
+}
+
+/* Lookup a country info structure from a null terminated country code
+ * The lookup is case sensitive.
+ */
+static const struct country_info *
+brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
+{
+ const struct country_info *country;
+ char mapped_ccode[BRCM_CNTRY_BUF_SZ];
+ uint mapped_regrev;
+
+ /*
+ * map the country code to a built-in country code, regrev, and
+ * country_info struct
+ */
+ country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
+ &mapped_regrev);
+
+ return country;
+}
+
+/*
+ * reset the quiet channels vector to the union
+ * of the restricted and radar channel sets
+ */
+static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ uint i, j;
+ struct brcms_band *band;
+ const struct brcms_chanvec *chanvec;
+
+ memset(&wlc_cm->quiet_channels, 0, sizeof(struct brcms_chanvec));
+
+ band = wlc->band;
+ for (i = 0; i < wlc->pub->_nbands;
+ i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
+
+ /* initialize quiet channels for restricted channels */
+ chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels;
+ for (j = 0; j < sizeof(struct brcms_chanvec); j++)
+ wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j];
+
+ }
+}
+
+/* Is the channel valid for the current locale and current band? */
+static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+
+ return ((val < MAXCHANNEL) &&
+ isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
+ val));
+}
+
+/* Is the channel valid for the current locale and specified band? */
+static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
+ uint bandunit, uint val)
+{
+ return ((val < MAXCHANNEL)
+ && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
+}
+
+/* Is the channel valid for the current locale? (but don't consider channels not
+ * available due to bandlocking)
+ */
+static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+
+ return brcms_c_valid_channel20(wlc->cmi, val) ||
+ (!wlc->bandlocked
+ && brcms_c_valid_channel20_in_band(wlc->cmi,
+ OTHERBANDUNIT(wlc), val));
+}
+
+/* JP, J1 - J10 are Japan ccodes */
+static bool brcms_c_japan_ccode(const char *ccode)
+{
+ return (ccode[0] == 'J' &&
+ (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
+}
+
+/* Returns true if currently set country is Japan or variant */
+static bool brcms_c_japan(struct brcms_c_info *wlc)
+{
+ return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
+}
+
+static void
+brcms_c_channel_min_txpower_limits_with_local_constraint(
+ struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
+ u8 local_constraint_qdbm)
+{
+ int j;
+
+ /* CCK Rates */
+ for (j = 0; j < WL_TX_POWER_CCK_NUM; j++)
+ txpwr->cck[j] = min(txpwr->cck[j], local_constraint_qdbm);
+
+ /* 20 MHz Legacy OFDM SISO */
+ for (j = 0; j < WL_TX_POWER_OFDM_NUM; j++)
+ txpwr->ofdm[j] = min(txpwr->ofdm[j], local_constraint_qdbm);
+
+ /* 20 MHz Legacy OFDM CDD */
+ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++)
+ txpwr->ofdm_cdd[j] =
+ min(txpwr->ofdm_cdd[j], local_constraint_qdbm);
+
+ /* 40 MHz Legacy OFDM SISO */
+ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++)
+ txpwr->ofdm_40_siso[j] =
+ min(txpwr->ofdm_40_siso[j], local_constraint_qdbm);
+
+ /* 40 MHz Legacy OFDM CDD */
+ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++)
+ txpwr->ofdm_40_cdd[j] =
+ min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm);
+
+ /* 20MHz MCS 0-7 SISO */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
+ txpwr->mcs_20_siso[j] =
+ min(txpwr->mcs_20_siso[j], local_constraint_qdbm);
+
+ /* 20MHz MCS 0-7 CDD */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
+ txpwr->mcs_20_cdd[j] =
+ min(txpwr->mcs_20_cdd[j], local_constraint_qdbm);
+
+ /* 20MHz MCS 0-7 STBC */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
+ txpwr->mcs_20_stbc[j] =
+ min(txpwr->mcs_20_stbc[j], local_constraint_qdbm);
+
+ /* 20MHz MCS 8-15 MIMO */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
+ txpwr->mcs_20_mimo[j] =
+ min(txpwr->mcs_20_mimo[j], local_constraint_qdbm);
+
+ /* 40MHz MCS 0-7 SISO */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
+ txpwr->mcs_40_siso[j] =
+ min(txpwr->mcs_40_siso[j], local_constraint_qdbm);
+
+ /* 40MHz MCS 0-7 CDD */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
+ txpwr->mcs_40_cdd[j] =
+ min(txpwr->mcs_40_cdd[j], local_constraint_qdbm);
+
+ /* 40MHz MCS 0-7 STBC */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
+ txpwr->mcs_40_stbc[j] =
+ min(txpwr->mcs_40_stbc[j], local_constraint_qdbm);
+
+ /* 40MHz MCS 8-15 MIMO */
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
+ txpwr->mcs_40_mimo[j] =
+ min(txpwr->mcs_40_mimo[j], local_constraint_qdbm);
+
+ /* 40MHz MCS 32 */
+ txpwr->mcs32 = min(txpwr->mcs32, local_constraint_qdbm);
+
+}
+
+/* Update the radio state (enable/disable) and tx power targets
+ * based on a new set of channel/regulatory information
+ */
+static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ uint chan;
+ struct txpwr_limits txpwr;
+
+ /* search for the existence of any valid channel */
+ for (chan = 0; chan < MAXCHANNEL; chan++) {
+ if (brcms_c_valid_channel20_db(wlc->cmi, chan))
+ break;
+ }
+ if (chan == MAXCHANNEL)
+ chan = INVCHANNEL;
+
+ /*
+ * based on the channel search above, set or
+ * clear WL_RADIO_COUNTRY_DISABLE.
+ */
+ if (chan == INVCHANNEL) {
+ /*
+ * country/locale with no valid channels, set
+ * the radio disable bit
+ */
+ mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
+ wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
+ "nbands %d bandlocked %d\n", wlc->pub->unit,
+ __func__, wlc_cm->country_abbrev, wlc->pub->_nbands,
+ wlc->bandlocked);
+ } else if (mboolisset(wlc->pub->radio_disabled,
+ WL_RADIO_COUNTRY_DISABLE)) {
+ /*
+ * country/locale with valid channel, clear
+ * the radio disable bit
+ */
+ mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
+ }
+
+ /*
+ * Now that the country abbreviation is set, if the radio supports 2G,
+ * then set channel 14 restrictions based on the new locale.
+ */
+ if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
+ wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
+ brcms_c_japan(wlc) ? true :
+ false);
+
+ if (wlc->pub->up && chan != INVCHANNEL) {
+ brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
+ brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
+ &txpwr, BRCMS_TXPWR_MAX);
+ wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
+ }
+}
+
+static int
+brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
+ const struct country_info *country)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ uint i, j;
+ struct brcms_band *band;
+ const struct locale_info *li;
+ struct brcms_chanvec sup_chan;
+ const struct locale_mimo_info *li_mimo;
+
+ band = wlc->band;
+ for (i = 0; i < wlc->pub->_nbands;
+ i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
+
+ li = (band->bandtype == BRCM_BAND_5G) ?
+ brcms_c_get_locale_5g(country->locale_5G) :
+ brcms_c_get_locale_2g(country->locale_2G);
+ wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
+ li_mimo = (band->bandtype == BRCM_BAND_5G) ?
+ brcms_c_get_mimo_5g(country->locale_mimo_5G) :
+ brcms_c_get_mimo_2g(country->locale_mimo_2G);
+
+ /* merge the mimo non-mimo locale flags */
+ wlc_cm->bandstate[band->bandunit].locale_flags |=
+ li_mimo->flags;
+
+ wlc_cm->bandstate[band->bandunit].restricted_channels =
+ g_table_restricted_chan[li->restricted_channels];
+ wlc_cm->bandstate[band->bandunit].radar_channels =
+ g_table_radar_set[li->radar_channels];
+
+ /*
+ * set the channel availability, masking out the channels
+ * that may not be supported on this phy.
+ */
+ wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
+ &sup_chan);
+ brcms_c_locale_get_channels(li,
+ &wlc_cm->bandstate[band->bandunit].
+ valid_channels);
+ for (j = 0; j < sizeof(struct brcms_chanvec); j++)
+ wlc_cm->bandstate[band->bandunit].valid_channels.
+ vec[j] &= sup_chan.vec[j];
+ }
+
+ brcms_c_quiet_channels_reset(wlc_cm);
+ brcms_c_channels_commit(wlc_cm);
+
+ return 0;
+}
+
+/*
+ * set the driver's current country and regulatory information
+ * using a country code as the source. Look up built in country
+ * information found with the country code.
+ */
+static void
+brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
+ const char *country_abbrev,
+ const char *ccode, uint regrev,
+ const struct country_info *country)
+{
+ const struct locale_info *locale;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
+
+ /* save current country state */
+ wlc_cm->country = country;
+
+ memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
+ strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
+ BRCM_CNTRY_BUF_SZ - 1);
+
+ strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
+ strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
+ wlc_cm->regrev = regrev;
+
+ if ((wlc->pub->_n_enab & SUPPORT_11N) !=
+ wlc->protection->nmode_user)
+ brcms_c_set_nmode(wlc);
+
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
+ /* set or restore gmode as required by regulatory */
+ locale = brcms_c_get_locale_2g(country->locale_2G);
+ if (locale && (locale->flags & BRCMS_NO_OFDM))
+ brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
+ else
+ brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
+
+ brcms_c_channels_init(wlc_cm, country);
+
+ return;
+}
+
+static int
+brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
+ const char *country_abbrev,
+ const char *ccode, int regrev)
+{
+ const struct country_info *country;
+ char mapped_ccode[BRCM_CNTRY_BUF_SZ];
+ uint mapped_regrev;
+
+ /* if regrev is -1, lookup the mapped country code,
+ * otherwise use the ccode and regrev directly
+ */
+ if (regrev == -1) {
+ /*
+ * map the country code to a built-in country
+ * code, regrev, and country_info
+ */
+ country =
+ brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
+ &mapped_regrev);
+ } else {
+ /* find the matching built-in country definition */
+ country = brcms_c_country_lookup_direct(ccode, regrev);
+ strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
+ mapped_regrev = regrev;
+ }
+
+ if (country == NULL)
+ return -EINVAL;
+
+ /* set the driver state for the country */
+ brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
+ mapped_regrev, country);
+
+ return 0;
+}
+
+/*
+ * set the driver's current country and regulatory information using
+ * a country code as the source. Lookup built in country information
+ * found with the country code.
+ */
+static int
+brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
+{
+ char country_abbrev[BRCM_CNTRY_BUF_SZ];
+ strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
+ return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
+}
+
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
+{
+ struct brcms_cm_info *wlc_cm;
+ char country_abbrev[BRCM_CNTRY_BUF_SZ];
+ const struct country_info *country;
+ struct brcms_pub *pub = wlc->pub;
+ char *ccode;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
+ if (wlc_cm == NULL)
+ return NULL;
+ wlc_cm->pub = pub;
+ wlc_cm->wlc = wlc;
+ wlc->cmi = wlc_cm;
+
+ /* store the country code for passing up as a regulatory hint */
+ ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE);
+ if (ccode)
+ strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
+
+ /*
+ * internal country information which must match
+ * regulatory constraints in firmware
+ */
+ memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
+ strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
+ country = brcms_c_country_lookup(wlc, country_abbrev);
+
+ /* save default country for exiting 11d regulatory mode */
+ strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
+
+ /* initialize autocountry_default to driver default */
+ strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1);
+
+ brcms_c_set_countrycode(wlc_cm, country_abbrev);
+
+ return wlc_cm;
+}
+
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
+{
+ kfree(wlc_cm);
+}
+
+u8
+brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
+ uint bandunit)
+{
+ return wlc_cm->bandstate[bandunit].locale_flags;
+}
+
+static bool
+brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec)
+{
+ return (wlc_cm->wlc->pub->_n_enab & SUPPORT_11N) &&
+ CHSPEC_IS40(chspec) ?
+ (isset(wlc_cm->quiet_channels.vec,
+ lower_20_sb(CHSPEC_CHANNEL(chspec))) ||
+ isset(wlc_cm->quiet_channels.vec,
+ upper_20_sb(CHSPEC_CHANNEL(chspec)))) :
+ isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec));
+}
+
+void
+brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ u8 local_constraint_qdbm)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ struct txpwr_limits txpwr;
+
+ brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
+
+ brcms_c_channel_min_txpower_limits_with_local_constraint(
+ wlc_cm, &txpwr, local_constraint_qdbm
+ );
+
+ brcms_b_set_chanspec(wlc->hw, chanspec,
+ (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0),
+ &txpwr);
+}
+
+#ifdef POWER_DBG
+static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
+{
+ int i;
+ char buf[80];
+ char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
+
+ sprintf(buf, "CCK ");
+ for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "20 MHz OFDM SISO ");
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "20 MHz OFDM CDD ");
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "40 MHz OFDM SISO ");
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm_40_siso[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "40 MHz OFDM CDD ");
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm_40_cdd[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "20 MHz MCS0-7 SISO ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_siso[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "20 MHz MCS0-7 CDD ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_cdd[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "20 MHz MCS0-7 STBC ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_stbc[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "20 MHz MCS8-15 SDM ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_mimo[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "40 MHz MCS0-7 SISO ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_siso[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "40 MHz MCS0-7 CDD ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_cdd[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "40 MHz MCS0-7 STBC ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_stbc[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ printk(KERN_DEBUG "%s\n", buf);
+
+ sprintf(buf, "40 MHz MCS8-15 SDM ");
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
+ sprintf(buf[strlen(buf)], " %2d%s",
+ txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_mimo[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
+ }
+ printk(KERN_DEBUG "%s\n", buf);
+
+ printk(KERN_DEBUG "MCS32 %2d%s\n",
+ txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
+}
+#endif /* POWER_DBG */
+
+void
+brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ struct txpwr_limits *txpwr)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ uint i;
+ uint chan;
+ int maxpwr;
+ int delta;
+ const struct country_info *country;
+ struct brcms_band *band;
+ const struct locale_info *li;
+ int conducted_max = BRCMS_TXPWR_MAX;
+ int conducted_ofdm_max = BRCMS_TXPWR_MAX;
+ const struct locale_mimo_info *li_mimo;
+ int maxpwr20, maxpwr40;
+ int maxpwr_idx;
+ uint j;
+
+ memset(txpwr, 0, sizeof(struct txpwr_limits));
+
+ if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) {
+ country = brcms_c_country_lookup(wlc, wlc->autocountry_default);
+ if (country == NULL)
+ return;
+ } else {
+ country = wlc_cm->country;
+ }
+
+ chan = CHSPEC_CHANNEL(chanspec);
+ band = wlc->bandstate[chspec_bandunit(chanspec)];
+ li = (band->bandtype == BRCM_BAND_5G) ?
+ brcms_c_get_locale_5g(country->locale_5G) :
+ brcms_c_get_locale_2g(country->locale_2G);
+
+ li_mimo = (band->bandtype == BRCM_BAND_5G) ?
+ brcms_c_get_mimo_5g(country->locale_mimo_5G) :
+ brcms_c_get_mimo_2g(country->locale_mimo_2G);
+
+ if (li->flags & BRCMS_EIRP) {
+ delta = band->antgain;
+ } else {
+ delta = 0;
+ if (band->antgain > QDB(6))
+ delta = band->antgain - QDB(6); /* Excess over 6 dB */
+ }
+
+ if (li == &locale_i) {
+ conducted_max = QDB(22);
+ conducted_ofdm_max = QDB(22);
+ }
+
+ /* CCK txpwr limits for 2.4G band */
+ if (band->bandtype == BRCM_BAND_2G) {
+ maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)];
+
+ maxpwr = maxpwr - delta;
+ maxpwr = max(maxpwr, 0);
+ maxpwr = min(maxpwr, conducted_max);
+
+ for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
+ txpwr->cck[i] = (u8) maxpwr;
+ }
+
+ /* OFDM txpwr limits for 2.4G or 5G bands */
+ if (band->bandtype == BRCM_BAND_2G)
+ maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)];
+ else
+ maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)];
+
+ maxpwr = maxpwr - delta;
+ maxpwr = max(maxpwr, 0);
+ maxpwr = min(maxpwr, conducted_ofdm_max);
+
+ /* Keep OFDM lmit below CCK limit */
+ if (band->bandtype == BRCM_BAND_2G)
+ maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
+
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ txpwr->ofdm[i] = (u8) maxpwr;
+
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
+ /*
+ * OFDM 40 MHz SISO has the same power as the corresponding
+ * MCS0-7 rate unless overriden by the locale specific code.
+ * We set this value to 0 as a flag (presumably 0 dBm isn't
+ * a possibility) and then copy the MCS0-7 value to the 40 MHz
+ * value if it wasn't explicitly set.
+ */
+ txpwr->ofdm_40_siso[i] = 0;
+
+ txpwr->ofdm_cdd[i] = (u8) maxpwr;
+
+ txpwr->ofdm_40_cdd[i] = 0;
+ }
+
+ /* MIMO/HT specific limits */
+ if (li_mimo->flags & BRCMS_EIRP) {
+ delta = band->antgain;
+ } else {
+ delta = 0;
+ if (band->antgain > QDB(6))
+ delta = band->antgain - QDB(6); /* Excess over 6 dB */
+ }
+
+ if (band->bandtype == BRCM_BAND_2G)
+ maxpwr_idx = (chan - 1);
+ else
+ maxpwr_idx = CHANNEL_POWER_IDX_5G(chan);
+
+ maxpwr20 = li_mimo->maxpwr20[maxpwr_idx];
+ maxpwr40 = li_mimo->maxpwr40[maxpwr_idx];
+
+ maxpwr20 = maxpwr20 - delta;
+ maxpwr20 = max(maxpwr20, 0);
+ maxpwr40 = maxpwr40 - delta;
+ maxpwr40 = max(maxpwr40, 0);
+
+ /* Fill in the MCS 0-7 (SISO) rates */
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
+
+ /*
+ * 20 MHz has the same power as the corresponding OFDM rate
+ * unless overriden by the locale specific code.
+ */
+ txpwr->mcs_20_siso[i] = txpwr->ofdm[i];
+ txpwr->mcs_40_siso[i] = 0;
+ }
+
+ /* Fill in the MCS 0-7 CDD rates */
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
+ txpwr->mcs_20_cdd[i] = (u8) maxpwr20;
+ txpwr->mcs_40_cdd[i] = (u8) maxpwr40;
+ }
+
+ /*
+ * These locales have SISO expressed in the
+ * table and override CDD later
+ */
+ if (li_mimo == &locale_bn) {
+ if (li_mimo == &locale_bn) {
+ maxpwr20 = QDB(16);
+ maxpwr40 = 0;
+
+ if (chan >= 3 && chan <= 11)
+ maxpwr40 = QDB(16);
+ }
+
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
+ txpwr->mcs_20_siso[i] = (u8) maxpwr20;
+ txpwr->mcs_40_siso[i] = (u8) maxpwr40;
+ }
+ }
+
+ /* Fill in the MCS 0-7 STBC rates */
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
+ txpwr->mcs_20_stbc[i] = 0;
+ txpwr->mcs_40_stbc[i] = 0;
+ }
+
+ /* Fill in the MCS 8-15 SDM rates */
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
+ txpwr->mcs_20_mimo[i] = (u8) maxpwr20;
+ txpwr->mcs_40_mimo[i] = (u8) maxpwr40;
+ }
+
+ /* Fill in MCS32 */
+ txpwr->mcs32 = (u8) maxpwr40;
+
+ for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
+ if (txpwr->ofdm_40_cdd[i] == 0)
+ txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
+ if (i == 0) {
+ i = i + 1;
+ if (txpwr->ofdm_40_cdd[i] == 0)
+ txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
+ }
+ }
+
+ /*
+ * Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO
+ * value if it wasn't provided explicitly.
+ */
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
+ if (txpwr->mcs_40_siso[i] == 0)
+ txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i];
+ }
+
+ for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
+ if (txpwr->ofdm_40_siso[i] == 0)
+ txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
+ if (i == 0) {
+ i = i + 1;
+ if (txpwr->ofdm_40_siso[i] == 0)
+ txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
+ }
+ }
+
+ /*
+ * Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding
+ * STBC values if they weren't provided explicitly.
+ */
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
+ if (txpwr->mcs_20_stbc[i] == 0)
+ txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i];
+
+ if (txpwr->mcs_40_stbc[i] == 0)
+ txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
+ }
+
+#ifdef POWER_DBG
+ wlc_phy_txpower_limits_dump(txpwr);
+#endif
+ return;
+}
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: true is the chanspec is malformed, false if it looks good.
+ */
+static bool brcms_c_chspec_malformed(u16 chanspec)
+{
+ /* must be 2G or 5G band */
+ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+ return true;
+ /* must be 20 or 40 bandwidth */
+ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+ return true;
+
+ /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */
+ if (CHSPEC_IS20(chanspec)) {
+ if (!CHSPEC_SB_NONE(chanspec))
+ return true;
+ } else if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Validate the chanspec for this locale, for 40MHZ we need to also
+ * check that the sidebands are valid 20MZH channels in this locale
+ * and they are also a legal HT combination
+ */
+static bool
+brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
+ bool dualband)
+{
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ u8 channel = CHSPEC_CHANNEL(chspec);
+
+ /* check the chanspec */
+ if (brcms_c_chspec_malformed(chspec)) {
+ wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n",
+ wlc->pub->unit, chspec);
+ return false;
+ }
+
+ if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) !=
+ chspec_bandunit(chspec))
+ return false;
+
+ /* Check a 20Mhz channel */
+ if (CHSPEC_IS20(chspec)) {
+ if (dualband)
+ return brcms_c_valid_channel20_db(wlc_cm->wlc->cmi,
+ channel);
+ else
+ return brcms_c_valid_channel20(wlc_cm->wlc->cmi,
+ channel);
+ }
+#ifdef SUPPORT_40MHZ
+ /*
+ * We know we are now checking a 40MHZ channel, so we should
+ * only be here for NPHYS
+ */
+ if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) {
+ u8 upper_sideband = 0, idx;
+ u8 num_ch20_entries =
+ sizeof(chan20_info) / sizeof(struct chan20_info);
+
+ if (!VALID_40CHANSPEC_IN_BAND(wlc, chspec_bandunit(chspec)))
+ return false;
+
+ if (dualband) {
+ if (!brcms_c_valid_channel20_db(wlc->cmi,
+ lower_20_sb(channel)) ||
+ !brcms_c_valid_channel20_db(wlc->cmi,
+ upper_20_sb(channel)))
+ return false;
+ } else {
+ if (!brcms_c_valid_channel20(wlc->cmi,
+ lower_20_sb(channel)) ||
+ !brcms_c_valid_channel20(wlc->cmi,
+ upper_20_sb(channel)))
+ return false;
+ }
+
+ /* find the lower sideband info in the sideband array */
+ for (idx = 0; idx < num_ch20_entries; idx++) {
+ if (chan20_info[idx].sb == lower_20_sb(channel))
+ upper_sideband = chan20_info[idx].adj_sbs;
+ }
+ /* check that the lower sideband allows an upper sideband */
+ if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) ==
+ (CH_UPPER_SB | CH_EWA_VALID))
+ return true;
+ return false;
+ }
+#endif /* 40 MHZ */
+
+ return false;
+}
+
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
+{
+ return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
new file mode 100644
index 00000000000..808cb4fbfbe
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_CHANNEL_H_
+#define _BRCM_CHANNEL_H_
+
+/* conversion for phy txpwr calculations that use .25 dB units */
+#define BRCMS_TXPWR_DB_FACTOR 4
+
+/* bits for locale_info flags */
+#define BRCMS_PEAK_CONDUCTED 0x00 /* Peak for locals */
+#define BRCMS_EIRP 0x01 /* Flag for EIRP */
+#define BRCMS_DFS_TPC 0x02 /* Flag for DFS TPC */
+#define BRCMS_NO_OFDM 0x04 /* Flag for No OFDM */
+#define BRCMS_NO_40MHZ 0x08 /* Flag for No MIMO 40MHz */
+#define BRCMS_NO_MIMO 0x10 /* Flag for No MIMO, 20 or 40 MHz */
+#define BRCMS_RADAR_TYPE_EU 0x20 /* Flag for EU */
+#define BRCMS_DFS_FCC BRCMS_DFS_TPC /* Flag for DFS FCC */
+
+#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
+
+extern struct brcms_cm_info *
+brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+
+extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+
+extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
+ uint bandunit);
+
+extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
+ u16 chspec);
+
+extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
+ u16 chanspec,
+ struct txpwr_limits *txpwr);
+extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
+ u16 chanspec,
+ u8 local_constraint_qdbm);
+
+#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
new file mode 100644
index 00000000000..ed51616abc8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -0,0 +1,1898 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_D11_H_
+#define _BRCM_D11_H_
+
+#include <linux/ieee80211.h>
+
+#include <defs.h>
+#include "pub.h"
+#include "dma.h"
+
+/* RX FIFO numbers */
+#define RX_FIFO 0 /* data and ctl frames */
+#define RX_TXSTATUS_FIFO 3 /* RX fifo for tx status packages */
+
+/* TX FIFO numbers using WME Access Category */
+#define TX_AC_BK_FIFO 0 /* Background TX FIFO */
+#define TX_AC_BE_FIFO 1 /* Best-Effort TX FIFO */
+#define TX_AC_VI_FIFO 2 /* Video TX FIFO */
+#define TX_AC_VO_FIFO 3 /* Voice TX FIFO */
+#define TX_BCMC_FIFO 4 /* Broadcast/Multicast TX FIFO */
+#define TX_ATIM_FIFO 5 /* TX fifo for ATIM window info */
+
+/* Addr is byte address used by SW; offset is word offset used by uCode */
+
+/* Per AC TX limit settings */
+#define M_AC_TXLMT_BASE_ADDR (0x180 * 2)
+#define M_AC_TXLMT_ADDR(_ac) (M_AC_TXLMT_BASE_ADDR + (2 * (_ac)))
+
+/* Legacy TX FIFO numbers */
+#define TX_DATA_FIFO TX_AC_BE_FIFO
+#define TX_CTL_FIFO TX_AC_VO_FIFO
+
+#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
+
+struct intctrlregs {
+ u32 intstatus;
+ u32 intmask;
+};
+
+/* PIO structure,
+ * support two PIO format: 2 bytes access and 4 bytes access
+ * basic FIFO register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+/* 2byte-wide pio register set per channel(xmt or rcv) */
+struct pio2regs {
+ u16 fifocontrol;
+ u16 fifodata;
+ u16 fifofree; /* only valid in xmt channel, not in rcv channel */
+ u16 PAD;
+};
+
+/* a pair of pio channels(tx and rx) */
+struct pio2regp {
+ struct pio2regs tx;
+ struct pio2regs rx;
+};
+
+/* 4byte-wide pio register set per channel(xmt or rcv) */
+struct pio4regs {
+ u32 fifocontrol;
+ u32 fifodata;
+};
+
+/* a pair of pio channels(tx and rx) */
+struct pio4regp {
+ struct pio4regs tx;
+ struct pio4regs rx;
+};
+
+/* read: 32-bit register that can be read as 32-bit or as 2 16-bit
+ * write: only low 16b-it half can be written
+ */
+union pmqreg {
+ u32 pmqhostdata; /* read only! */
+ struct {
+ u16 pmqctrlstatus; /* read/write */
+ u16 PAD;
+ } w;
+};
+
+struct fifo64 {
+ struct dma64regs dmaxmt; /* dma tx */
+ struct pio4regs piotx; /* pio tx */
+ struct dma64regs dmarcv; /* dma rx */
+ struct pio4regs piorx; /* pio rx */
+};
+
+/*
+ * Host Interface Registers
+ */
+struct d11regs {
+ /* Device Control ("semi-standard host registers") */
+ u32 PAD[3]; /* 0x0 - 0x8 */
+ u32 biststatus; /* 0xC */
+ u32 biststatus2; /* 0x10 */
+ u32 PAD; /* 0x14 */
+ u32 gptimer; /* 0x18 */
+ u32 usectimer; /* 0x1c *//* for corerev >= 26 */
+
+ /* Interrupt Control *//* 0x20 */
+ struct intctrlregs intctrlregs[8];
+
+ u32 PAD[40]; /* 0x60 - 0xFC */
+
+ u32 intrcvlazy[4]; /* 0x100 - 0x10C */
+
+ u32 PAD[4]; /* 0x110 - 0x11c */
+
+ u32 maccontrol; /* 0x120 */
+ u32 maccommand; /* 0x124 */
+ u32 macintstatus; /* 0x128 */
+ u32 macintmask; /* 0x12C */
+
+ /* Transmit Template Access */
+ u32 tplatewrptr; /* 0x130 */
+ u32 tplatewrdata; /* 0x134 */
+ u32 PAD[2]; /* 0x138 - 0x13C */
+
+ /* PMQ registers */
+ union pmqreg pmqreg; /* 0x140 */
+ u32 pmqpatl; /* 0x144 */
+ u32 pmqpath; /* 0x148 */
+ u32 PAD; /* 0x14C */
+
+ u32 chnstatus; /* 0x150 */
+ u32 psmdebug; /* 0x154 */
+ u32 phydebug; /* 0x158 */
+ u32 machwcap; /* 0x15C */
+
+ /* Extended Internal Objects */
+ u32 objaddr; /* 0x160 */
+ u32 objdata; /* 0x164 */
+ u32 PAD[2]; /* 0x168 - 0x16c */
+
+ u32 frmtxstatus; /* 0x170 */
+ u32 frmtxstatus2; /* 0x174 */
+ u32 PAD[2]; /* 0x178 - 0x17c */
+
+ /* TSF host access */
+ u32 tsf_timerlow; /* 0x180 */
+ u32 tsf_timerhigh; /* 0x184 */
+ u32 tsf_cfprep; /* 0x188 */
+ u32 tsf_cfpstart; /* 0x18c */
+ u32 tsf_cfpmaxdur32; /* 0x190 */
+ u32 PAD[3]; /* 0x194 - 0x19c */
+
+ u32 maccontrol1; /* 0x1a0 */
+ u32 machwcap1; /* 0x1a4 */
+ u32 PAD[14]; /* 0x1a8 - 0x1dc */
+
+ /* Clock control and hardware workarounds*/
+ u32 clk_ctl_st; /* 0x1e0 */
+ u32 hw_war;
+ u32 d11_phypllctl; /* the phypll request/avail bits are
+ * moved to clk_ctl_st
+ */
+ u32 PAD[5]; /* 0x1ec - 0x1fc */
+
+ /* 0x200-0x37F dma/pio registers */
+ struct fifo64 fifo64regs[6];
+
+ /* FIFO diagnostic port access */
+ struct dma32diag dmafifo; /* 0x380 - 0x38C */
+
+ u32 aggfifocnt; /* 0x390 */
+ u32 aggfifodata; /* 0x394 */
+ u32 PAD[16]; /* 0x398 - 0x3d4 */
+ u16 radioregaddr; /* 0x3d8 */
+ u16 radioregdata; /* 0x3da */
+
+ /*
+ * time delay between the change on rf disable input and
+ * radio shutdown
+ */
+ u32 rfdisabledly; /* 0x3DC */
+
+ /* PHY register access */
+ u16 phyversion; /* 0x3e0 - 0x0 */
+ u16 phybbconfig; /* 0x3e2 - 0x1 */
+ u16 phyadcbias; /* 0x3e4 - 0x2 Bphy only */
+ u16 phyanacore; /* 0x3e6 - 0x3 pwwrdwn on aphy */
+ u16 phyrxstatus0; /* 0x3e8 - 0x4 */
+ u16 phyrxstatus1; /* 0x3ea - 0x5 */
+ u16 phycrsth; /* 0x3ec - 0x6 */
+ u16 phytxerror; /* 0x3ee - 0x7 */
+ u16 phychannel; /* 0x3f0 - 0x8 */
+ u16 PAD[1]; /* 0x3f2 - 0x9 */
+ u16 phytest; /* 0x3f4 - 0xa */
+ u16 phy4waddr; /* 0x3f6 - 0xb */
+ u16 phy4wdatahi; /* 0x3f8 - 0xc */
+ u16 phy4wdatalo; /* 0x3fa - 0xd */
+ u16 phyregaddr; /* 0x3fc - 0xe */
+ u16 phyregdata; /* 0x3fe - 0xf */
+
+ /* IHR *//* 0x400 - 0x7FE */
+
+ /* RXE Block */
+ u16 PAD[3]; /* 0x400 - 0x406 */
+ u16 rcv_fifo_ctl; /* 0x406 */
+ u16 PAD; /* 0x408 - 0x40a */
+ u16 rcv_frm_cnt; /* 0x40a */
+ u16 PAD[4]; /* 0x40a - 0x414 */
+ u16 rssi; /* 0x414 */
+ u16 PAD[5]; /* 0x414 - 0x420 */
+ u16 rcm_ctl; /* 0x420 */
+ u16 rcm_mat_data; /* 0x422 */
+ u16 rcm_mat_mask; /* 0x424 */
+ u16 rcm_mat_dly; /* 0x426 */
+ u16 rcm_cond_mask_l; /* 0x428 */
+ u16 rcm_cond_mask_h; /* 0x42A */
+ u16 rcm_cond_dly; /* 0x42C */
+ u16 PAD[1]; /* 0x42E */
+ u16 ext_ihr_addr; /* 0x430 */
+ u16 ext_ihr_data; /* 0x432 */
+ u16 rxe_phyrs_2; /* 0x434 */
+ u16 rxe_phyrs_3; /* 0x436 */
+ u16 phy_mode; /* 0x438 */
+ u16 rcmta_ctl; /* 0x43a */
+ u16 rcmta_size; /* 0x43c */
+ u16 rcmta_addr0; /* 0x43e */
+ u16 rcmta_addr1; /* 0x440 */
+ u16 rcmta_addr2; /* 0x442 */
+ u16 PAD[30]; /* 0x444 - 0x480 */
+
+ /* PSM Block *//* 0x480 - 0x500 */
+
+ u16 PAD; /* 0x480 */
+ u16 psm_maccontrol_h; /* 0x482 */
+ u16 psm_macintstatus_l; /* 0x484 */
+ u16 psm_macintstatus_h; /* 0x486 */
+ u16 psm_macintmask_l; /* 0x488 */
+ u16 psm_macintmask_h; /* 0x48A */
+ u16 PAD; /* 0x48C */
+ u16 psm_maccommand; /* 0x48E */
+ u16 psm_brc; /* 0x490 */
+ u16 psm_phy_hdr_param; /* 0x492 */
+ u16 psm_postcard; /* 0x494 */
+ u16 psm_pcard_loc_l; /* 0x496 */
+ u16 psm_pcard_loc_h; /* 0x498 */
+ u16 psm_gpio_in; /* 0x49A */
+ u16 psm_gpio_out; /* 0x49C */
+ u16 psm_gpio_oe; /* 0x49E */
+
+ u16 psm_bred_0; /* 0x4A0 */
+ u16 psm_bred_1; /* 0x4A2 */
+ u16 psm_bred_2; /* 0x4A4 */
+ u16 psm_bred_3; /* 0x4A6 */
+ u16 psm_brcl_0; /* 0x4A8 */
+ u16 psm_brcl_1; /* 0x4AA */
+ u16 psm_brcl_2; /* 0x4AC */
+ u16 psm_brcl_3; /* 0x4AE */
+ u16 psm_brpo_0; /* 0x4B0 */
+ u16 psm_brpo_1; /* 0x4B2 */
+ u16 psm_brpo_2; /* 0x4B4 */
+ u16 psm_brpo_3; /* 0x4B6 */
+ u16 psm_brwk_0; /* 0x4B8 */
+ u16 psm_brwk_1; /* 0x4BA */
+ u16 psm_brwk_2; /* 0x4BC */
+ u16 psm_brwk_3; /* 0x4BE */
+
+ u16 psm_base_0; /* 0x4C0 */
+ u16 psm_base_1; /* 0x4C2 */
+ u16 psm_base_2; /* 0x4C4 */
+ u16 psm_base_3; /* 0x4C6 */
+ u16 psm_base_4; /* 0x4C8 */
+ u16 psm_base_5; /* 0x4CA */
+ u16 psm_base_6; /* 0x4CC */
+ u16 psm_pc_reg_0; /* 0x4CE */
+ u16 psm_pc_reg_1; /* 0x4D0 */
+ u16 psm_pc_reg_2; /* 0x4D2 */
+ u16 psm_pc_reg_3; /* 0x4D4 */
+ u16 PAD[0xD]; /* 0x4D6 - 0x4DE */
+ u16 psm_corectlsts; /* 0x4f0 *//* Corerev >= 13 */
+ u16 PAD[0x7]; /* 0x4f2 - 0x4fE */
+
+ /* TXE0 Block *//* 0x500 - 0x580 */
+ u16 txe_ctl; /* 0x500 */
+ u16 txe_aux; /* 0x502 */
+ u16 txe_ts_loc; /* 0x504 */
+ u16 txe_time_out; /* 0x506 */
+ u16 txe_wm_0; /* 0x508 */
+ u16 txe_wm_1; /* 0x50A */
+ u16 txe_phyctl; /* 0x50C */
+ u16 txe_status; /* 0x50E */
+ u16 txe_mmplcp0; /* 0x510 */
+ u16 txe_mmplcp1; /* 0x512 */
+ u16 txe_phyctl1; /* 0x514 */
+
+ u16 PAD[0x05]; /* 0x510 - 0x51E */
+
+ /* Transmit control */
+ u16 xmtfifodef; /* 0x520 */
+ u16 xmtfifo_frame_cnt; /* 0x522 *//* Corerev >= 16 */
+ u16 xmtfifo_byte_cnt; /* 0x524 *//* Corerev >= 16 */
+ u16 xmtfifo_head; /* 0x526 *//* Corerev >= 16 */
+ u16 xmtfifo_rd_ptr; /* 0x528 *//* Corerev >= 16 */
+ u16 xmtfifo_wr_ptr; /* 0x52A *//* Corerev >= 16 */
+ u16 xmtfifodef1; /* 0x52C *//* Corerev >= 16 */
+
+ u16 PAD[0x09]; /* 0x52E - 0x53E */
+
+ u16 xmtfifocmd; /* 0x540 */
+ u16 xmtfifoflush; /* 0x542 */
+ u16 xmtfifothresh; /* 0x544 */
+ u16 xmtfifordy; /* 0x546 */
+ u16 xmtfifoprirdy; /* 0x548 */
+ u16 xmtfiforqpri; /* 0x54A */
+ u16 xmttplatetxptr; /* 0x54C */
+ u16 PAD; /* 0x54E */
+ u16 xmttplateptr; /* 0x550 */
+ u16 smpl_clct_strptr; /* 0x552 *//* Corerev >= 22 */
+ u16 smpl_clct_stpptr; /* 0x554 *//* Corerev >= 22 */
+ u16 smpl_clct_curptr; /* 0x556 *//* Corerev >= 22 */
+ u16 PAD[0x04]; /* 0x558 - 0x55E */
+ u16 xmttplatedatalo; /* 0x560 */
+ u16 xmttplatedatahi; /* 0x562 */
+
+ u16 PAD[2]; /* 0x564 - 0x566 */
+
+ u16 xmtsel; /* 0x568 */
+ u16 xmttxcnt; /* 0x56A */
+ u16 xmttxshmaddr; /* 0x56C */
+
+ u16 PAD[0x09]; /* 0x56E - 0x57E */
+
+ /* TXE1 Block */
+ u16 PAD[0x40]; /* 0x580 - 0x5FE */
+
+ /* TSF Block */
+ u16 PAD[0X02]; /* 0x600 - 0x602 */
+ u16 tsf_cfpstrt_l; /* 0x604 */
+ u16 tsf_cfpstrt_h; /* 0x606 */
+ u16 PAD[0X05]; /* 0x608 - 0x610 */
+ u16 tsf_cfppretbtt; /* 0x612 */
+ u16 PAD[0XD]; /* 0x614 - 0x62C */
+ u16 tsf_clk_frac_l; /* 0x62E */
+ u16 tsf_clk_frac_h; /* 0x630 */
+ u16 PAD[0X14]; /* 0x632 - 0x658 */
+ u16 tsf_random; /* 0x65A */
+ u16 PAD[0x05]; /* 0x65C - 0x664 */
+ /* GPTimer 2 registers */
+ u16 tsf_gpt2_stat; /* 0x666 */
+ u16 tsf_gpt2_ctr_l; /* 0x668 */
+ u16 tsf_gpt2_ctr_h; /* 0x66A */
+ u16 tsf_gpt2_val_l; /* 0x66C */
+ u16 tsf_gpt2_val_h; /* 0x66E */
+ u16 tsf_gptall_stat; /* 0x670 */
+ u16 PAD[0x07]; /* 0x672 - 0x67E */
+
+ /* IFS Block */
+ u16 ifs_sifs_rx_tx_tx; /* 0x680 */
+ u16 ifs_sifs_nav_tx; /* 0x682 */
+ u16 ifs_slot; /* 0x684 */
+ u16 PAD; /* 0x686 */
+ u16 ifs_ctl; /* 0x688 */
+ u16 PAD[0x3]; /* 0x68a - 0x68F */
+ u16 ifsstat; /* 0x690 */
+ u16 ifsmedbusyctl; /* 0x692 */
+ u16 iftxdur; /* 0x694 */
+ u16 PAD[0x3]; /* 0x696 - 0x69b */
+ /* EDCF support in dot11macs */
+ u16 ifs_aifsn; /* 0x69c */
+ u16 ifs_ctl1; /* 0x69e */
+
+ /* slow clock registers */
+ u16 scc_ctl; /* 0x6a0 */
+ u16 scc_timer_l; /* 0x6a2 */
+ u16 scc_timer_h; /* 0x6a4 */
+ u16 scc_frac; /* 0x6a6 */
+ u16 scc_fastpwrup_dly; /* 0x6a8 */
+ u16 scc_per; /* 0x6aa */
+ u16 scc_per_frac; /* 0x6ac */
+ u16 scc_cal_timer_l; /* 0x6ae */
+ u16 scc_cal_timer_h; /* 0x6b0 */
+ u16 PAD; /* 0x6b2 */
+
+ u16 PAD[0x26];
+
+ /* NAV Block */
+ u16 nav_ctl; /* 0x700 */
+ u16 navstat; /* 0x702 */
+ u16 PAD[0x3e]; /* 0x702 - 0x77E */
+
+ /* WEP/PMQ Block *//* 0x780 - 0x7FE */
+ u16 PAD[0x20]; /* 0x780 - 0x7BE */
+
+ u16 wepctl; /* 0x7C0 */
+ u16 wepivloc; /* 0x7C2 */
+ u16 wepivkey; /* 0x7C4 */
+ u16 wepwkey; /* 0x7C6 */
+
+ u16 PAD[4]; /* 0x7C8 - 0x7CE */
+ u16 pcmctl; /* 0X7D0 */
+ u16 pcmstat; /* 0X7D2 */
+ u16 PAD[6]; /* 0x7D4 - 0x7DE */
+
+ u16 pmqctl; /* 0x7E0 */
+ u16 pmqstatus; /* 0x7E2 */
+ u16 pmqpat0; /* 0x7E4 */
+ u16 pmqpat1; /* 0x7E6 */
+ u16 pmqpat2; /* 0x7E8 */
+
+ u16 pmqdat; /* 0x7EA */
+ u16 pmqdator; /* 0x7EC */
+ u16 pmqhst; /* 0x7EE */
+ u16 pmqpath0; /* 0x7F0 */
+ u16 pmqpath1; /* 0x7F2 */
+ u16 pmqpath2; /* 0x7F4 */
+ u16 pmqdath; /* 0x7F6 */
+
+ u16 PAD[0x04]; /* 0x7F8 - 0x7FE */
+
+ /* SHM *//* 0x800 - 0xEFE */
+ u16 PAD[0x380]; /* 0x800 - 0xEFE */
+};
+
+#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
+
+/* biststatus */
+#define BT_DONE (1U << 31) /* bist done */
+#define BT_B2S (1 << 30) /* bist2 ram summary bit */
+
+/* intstatus and intmask */
+#define I_PC (1 << 10) /* pci descriptor error */
+#define I_PD (1 << 11) /* pci data error */
+#define I_DE (1 << 12) /* descriptor protocol error */
+#define I_RU (1 << 13) /* receive descriptor underflow */
+#define I_RO (1 << 14) /* receive fifo overflow */
+#define I_XU (1 << 15) /* transmit fifo underflow */
+#define I_RI (1 << 16) /* receive interrupt */
+#define I_XI (1 << 24) /* transmit interrupt */
+
+/* interrupt receive lazy */
+#define IRL_TO_MASK 0x00ffffff /* timeout */
+#define IRL_FC_MASK 0xff000000 /* frame count */
+#define IRL_FC_SHIFT 24 /* frame count */
+
+/*== maccontrol register ==*/
+#define MCTL_GMODE (1U << 31)
+#define MCTL_DISCARD_PMQ (1 << 30)
+#define MCTL_WAKE (1 << 26)
+#define MCTL_HPS (1 << 25)
+#define MCTL_PROMISC (1 << 24)
+#define MCTL_KEEPBADFCS (1 << 23)
+#define MCTL_KEEPCONTROL (1 << 22)
+#define MCTL_PHYLOCK (1 << 21)
+#define MCTL_BCNS_PROMISC (1 << 20)
+#define MCTL_LOCK_RADIO (1 << 19)
+#define MCTL_AP (1 << 18)
+#define MCTL_INFRA (1 << 17)
+#define MCTL_BIGEND (1 << 16)
+#define MCTL_GPOUT_SEL_MASK (3 << 14)
+#define MCTL_GPOUT_SEL_SHIFT 14
+#define MCTL_EN_PSMDBG (1 << 13)
+#define MCTL_IHR_EN (1 << 10)
+#define MCTL_SHM_UPPER (1 << 9)
+#define MCTL_SHM_EN (1 << 8)
+#define MCTL_PSM_JMP_0 (1 << 2)
+#define MCTL_PSM_RUN (1 << 1)
+#define MCTL_EN_MAC (1 << 0)
+
+/*== maccommand register ==*/
+#define MCMD_BCN0VLD (1 << 0)
+#define MCMD_BCN1VLD (1 << 1)
+#define MCMD_DIRFRMQVAL (1 << 2)
+#define MCMD_CCA (1 << 3)
+#define MCMD_BG_NOISE (1 << 4)
+#define MCMD_SKIP_SHMINIT (1 << 5) /* only used for simulation */
+#define MCMD_SAMPLECOLL MCMD_SKIP_SHMINIT /* reuse for sample collect */
+
+/*== macintstatus/macintmask ==*/
+/* gracefully suspended */
+#define MI_MACSSPNDD (1 << 0)
+/* beacon template available */
+#define MI_BCNTPL (1 << 1)
+/* TBTT indication */
+#define MI_TBTT (1 << 2)
+/* beacon successfully tx'd */
+#define MI_BCNSUCCESS (1 << 3)
+/* beacon canceled (IBSS) */
+#define MI_BCNCANCLD (1 << 4)
+/* end of ATIM-window (IBSS) */
+#define MI_ATIMWINEND (1 << 5)
+/* PMQ entries available */
+#define MI_PMQ (1 << 6)
+/* non-specific gen-stat bits that are set by PSM */
+#define MI_NSPECGEN_0 (1 << 7)
+/* non-specific gen-stat bits that are set by PSM */
+#define MI_NSPECGEN_1 (1 << 8)
+/* MAC level Tx error */
+#define MI_MACTXERR (1 << 9)
+/* non-specific gen-stat bits that are set by PSM */
+#define MI_NSPECGEN_3 (1 << 10)
+/* PHY Tx error */
+#define MI_PHYTXERR (1 << 11)
+/* Power Management Event */
+#define MI_PME (1 << 12)
+/* General-purpose timer0 */
+#define MI_GP0 (1 << 13)
+/* General-purpose timer1 */
+#define MI_GP1 (1 << 14)
+/* (ORed) DMA-interrupts */
+#define MI_DMAINT (1 << 15)
+/* MAC has completed a TX FIFO Suspend/Flush */
+#define MI_TXSTOP (1 << 16)
+/* MAC has completed a CCA measurement */
+#define MI_CCA (1 << 17)
+/* MAC has collected background noise samples */
+#define MI_BG_NOISE (1 << 18)
+/* MBSS DTIM TBTT indication */
+#define MI_DTIM_TBTT (1 << 19)
+/* Probe response queue needs attention */
+#define MI_PRQ (1 << 20)
+/* Radio/PHY has been powered back up. */
+#define MI_PWRUP (1 << 21)
+#define MI_RESERVED3 (1 << 22)
+#define MI_RESERVED2 (1 << 23)
+#define MI_RESERVED1 (1 << 25)
+/* MAC detected change on RF Disable input*/
+#define MI_RFDISABLE (1 << 28)
+/* MAC has completed a TX */
+#define MI_TFS (1 << 29)
+/* A phy status change wrt G mode */
+#define MI_PHYCHANGED (1 << 30)
+/* general purpose timeout */
+#define MI_TO (1U << 31)
+
+/* Mac capabilities registers */
+/*== machwcap ==*/
+#define MCAP_TKIPMIC 0x80000000 /* TKIP MIC hardware present */
+
+/*== pmqhost data ==*/
+/* data entry of head pmq entry */
+#define PMQH_DATA_MASK 0xffff0000
+/* PM entry for BSS config */
+#define PMQH_BSSCFG 0x00100000
+/* PM Mode OFF: power save off */
+#define PMQH_PMOFF 0x00010000
+/* PM Mode ON: power save on */
+#define PMQH_PMON 0x00020000
+/* Dis-associated or De-authenticated */
+#define PMQH_DASAT 0x00040000
+/* ATIM not acknowledged */
+#define PMQH_ATIMFAIL 0x00080000
+/* delete head entry */
+#define PMQH_DEL_ENTRY 0x00000001
+/* delete head entry to cur read pointer -1 */
+#define PMQH_DEL_MULT 0x00000002
+/* pmq overflow indication */
+#define PMQH_OFLO 0x00000004
+/* entries are present in pmq */
+#define PMQH_NOT_EMPTY 0x00000008
+
+/*== phydebug ==*/
+/* phy is asserting carrier sense */
+#define PDBG_CRS (1 << 0)
+/* phy is taking xmit byte from mac this cycle */
+#define PDBG_TXA (1 << 1)
+/* mac is instructing the phy to transmit a frame */
+#define PDBG_TXF (1 << 2)
+/* phy is signalling a transmit Error to the mac */
+#define PDBG_TXE (1 << 3)
+/* phy detected the end of a valid frame preamble */
+#define PDBG_RXF (1 << 4)
+/* phy detected the end of a valid PLCP header */
+#define PDBG_RXS (1 << 5)
+/* rx start not asserted */
+#define PDBG_RXFRG (1 << 6)
+/* mac is taking receive byte from phy this cycle */
+#define PDBG_RXV (1 << 7)
+/* RF portion of the radio is disabled */
+#define PDBG_RFD (1 << 16)
+
+/*== objaddr register ==*/
+#define OBJADDR_SEL_MASK 0x000F0000
+#define OBJADDR_UCM_SEL 0x00000000
+#define OBJADDR_SHM_SEL 0x00010000
+#define OBJADDR_SCR_SEL 0x00020000
+#define OBJADDR_IHR_SEL 0x00030000
+#define OBJADDR_RCMTA_SEL 0x00040000
+#define OBJADDR_SRCHM_SEL 0x00060000
+#define OBJADDR_WINC 0x01000000
+#define OBJADDR_RINC 0x02000000
+#define OBJADDR_AUTO_INC 0x03000000
+
+#define WEP_PCMADDR 0x07d4
+#define WEP_PCMDATA 0x07d6
+
+/*== frmtxstatus ==*/
+#define TXS_V (1 << 0) /* valid bit */
+#define TXS_STATUS_MASK 0xffff
+#define TXS_FID_MASK 0xffff0000
+#define TXS_FID_SHIFT 16
+
+/*== frmtxstatus2 ==*/
+#define TXS_SEQ_MASK 0xffff
+#define TXS_PTX_MASK 0xff0000
+#define TXS_PTX_SHIFT 16
+#define TXS_MU_MASK 0x01000000
+#define TXS_MU_SHIFT 24
+
+/*== clk_ctl_st ==*/
+#define CCS_ERSRC_REQ_D11PLL 0x00000100 /* d11 core pll request */
+#define CCS_ERSRC_REQ_PHYPLL 0x00000200 /* PHY pll request */
+#define CCS_ERSRC_AVAIL_D11PLL 0x01000000 /* d11 core pll available */
+#define CCS_ERSRC_AVAIL_PHYPLL 0x02000000 /* PHY pll available */
+
+/* HT Cloclk Ctrl and Clock Avail for 4313 */
+#define CCS_ERSRC_REQ_HT 0x00000010 /* HT avail request */
+#define CCS_ERSRC_AVAIL_HT 0x00020000 /* HT clock available */
+
+/* tsf_cfprep register */
+#define CFPREP_CBI_MASK 0xffffffc0
+#define CFPREP_CBI_SHIFT 6
+#define CFPREP_CFPP 0x00000001
+
+/* tx fifo sizes values are in terms of 256 byte blocks */
+#define TXFIFOCMD_RESET_MASK (1 << 15) /* reset */
+#define TXFIFOCMD_FIFOSEL_SHIFT 8 /* fifo */
+#define TXFIFO_FIFOTOP_SHIFT 8 /* fifo start */
+
+#define TXFIFO_START_BLK16 65 /* Base address + 32 * 512 B/P */
+#define TXFIFO_START_BLK 6 /* Base address + 6 * 256 B */
+#define TXFIFO_SIZE_UNIT 256 /* one unit corresponds to 256 bytes */
+#define MBSS16_TEMPLMEM_MINBLKS 65 /* one unit corresponds to 256 bytes */
+
+/*== phy versions (PhyVersion:Revision field) ==*/
+/* analog block version */
+#define PV_AV_MASK 0xf000
+/* analog block version bitfield offset */
+#define PV_AV_SHIFT 12
+/* phy type */
+#define PV_PT_MASK 0x0f00
+/* phy type bitfield offset */
+#define PV_PT_SHIFT 8
+/* phy version */
+#define PV_PV_MASK 0x000f
+#define PHY_TYPE(v) ((v & PV_PT_MASK) >> PV_PT_SHIFT)
+
+/*== phy types (PhyVersion:PhyType field) ==*/
+#define PHY_TYPE_N 4 /* N-Phy value */
+#define PHY_TYPE_SSN 6 /* SSLPN-Phy value */
+#define PHY_TYPE_LCN 8 /* LCN-Phy value */
+#define PHY_TYPE_LCNXN 9 /* LCNXN-Phy value */
+#define PHY_TYPE_NULL 0xf /* Invalid Phy value */
+
+/*== analog types (PhyVersion:AnalogType field) ==*/
+#define ANA_11N_013 5
+
+/* 802.11a PLCP header def */
+struct ofdm_phy_hdr {
+ u8 rlpt[3]; /* rate, length, parity, tail */
+ u16 service;
+ u8 pad;
+} __packed;
+
+#define D11A_PHY_HDR_GRATE(phdr) ((phdr)->rlpt[0] & 0x0f)
+#define D11A_PHY_HDR_GRES(phdr) (((phdr)->rlpt[0] >> 4) & 0x01)
+#define D11A_PHY_HDR_GLENGTH(phdr) (((u32 *)((phdr)->rlpt) >> 5) & 0x0fff)
+#define D11A_PHY_HDR_GPARITY(phdr) (((phdr)->rlpt[3] >> 1) & 0x01)
+#define D11A_PHY_HDR_GTAIL(phdr) (((phdr)->rlpt[3] >> 2) & 0x3f)
+
+/* rate encoded per 802.11a-1999 sec 17.3.4.1 */
+#define D11A_PHY_HDR_SRATE(phdr, rate) \
+ ((phdr)->rlpt[0] = ((phdr)->rlpt[0] & 0xf0) | ((rate) & 0xf))
+/* set reserved field to zero */
+#define D11A_PHY_HDR_SRES(phdr) ((phdr)->rlpt[0] &= 0xef)
+/* length is number of octets in PSDU */
+#define D11A_PHY_HDR_SLENGTH(phdr, length) \
+ (*(u32 *)((phdr)->rlpt) = *(u32 *)((phdr)->rlpt) | \
+ (((length) & 0x0fff) << 5))
+/* set the tail to all zeros */
+#define D11A_PHY_HDR_STAIL(phdr) ((phdr)->rlpt[3] &= 0x03)
+
+#define D11A_PHY_HDR_LEN_L 3 /* low-rate part of PLCP header */
+#define D11A_PHY_HDR_LEN_R 2 /* high-rate part of PLCP header */
+
+#define D11A_PHY_TX_DELAY (2) /* 2.1 usec */
+
+#define D11A_PHY_HDR_TIME (4) /* low-rate part of PLCP header */
+#define D11A_PHY_PRE_TIME (16)
+#define D11A_PHY_PREHDR_TIME (D11A_PHY_PRE_TIME + D11A_PHY_HDR_TIME)
+
+/* 802.11b PLCP header def */
+struct cck_phy_hdr {
+ u8 signal;
+ u8 service;
+ u16 length;
+ u16 crc;
+} __packed;
+
+#define D11B_PHY_HDR_LEN 6
+
+#define D11B_PHY_TX_DELAY (3) /* 3.4 usec */
+
+#define D11B_PHY_LHDR_TIME (D11B_PHY_HDR_LEN << 3)
+#define D11B_PHY_LPRE_TIME (144)
+#define D11B_PHY_LPREHDR_TIME (D11B_PHY_LPRE_TIME + D11B_PHY_LHDR_TIME)
+
+#define D11B_PHY_SHDR_TIME (D11B_PHY_LHDR_TIME >> 1)
+#define D11B_PHY_SPRE_TIME (D11B_PHY_LPRE_TIME >> 1)
+#define D11B_PHY_SPREHDR_TIME (D11B_PHY_SPRE_TIME + D11B_PHY_SHDR_TIME)
+
+#define D11B_PLCP_SIGNAL_LOCKED (1 << 2)
+#define D11B_PLCP_SIGNAL_LE (1 << 7)
+
+#define MIMO_PLCP_MCS_MASK 0x7f /* mcs index */
+#define MIMO_PLCP_40MHZ 0x80 /* 40 Hz frame */
+#define MIMO_PLCP_AMPDU 0x08 /* ampdu */
+
+#define BRCMS_GET_CCK_PLCP_LEN(plcp) (plcp[4] + (plcp[5] << 8))
+#define BRCMS_GET_MIMO_PLCP_LEN(plcp) (plcp[1] + (plcp[2] << 8))
+#define BRCMS_SET_MIMO_PLCP_LEN(plcp, len) \
+ do { \
+ plcp[1] = len & 0xff; \
+ plcp[2] = ((len >> 8) & 0xff); \
+ } while (0);
+
+#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
+#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
+#define BRCMS_IS_MIMO_PLCP_AMPDU(plcp) (plcp[3] & MIMO_PLCP_AMPDU)
+
+/*
+ * The dot11a PLCP header is 5 bytes. To simplify the software (so that we
+ * don't need e.g. different tx DMA headers for 11a and 11b), the PLCP header
+ * has padding added in the ucode.
+ */
+#define D11_PHY_HDR_LEN 6
+
+/* TX DMA buffer header */
+struct d11txh {
+ __le16 MacTxControlLow; /* 0x0 */
+ __le16 MacTxControlHigh; /* 0x1 */
+ __le16 MacFrameControl; /* 0x2 */
+ __le16 TxFesTimeNormal; /* 0x3 */
+ __le16 PhyTxControlWord; /* 0x4 */
+ __le16 PhyTxControlWord_1; /* 0x5 */
+ __le16 PhyTxControlWord_1_Fbr; /* 0x6 */
+ __le16 PhyTxControlWord_1_Rts; /* 0x7 */
+ __le16 PhyTxControlWord_1_FbrRts; /* 0x8 */
+ __le16 MainRates; /* 0x9 */
+ __le16 XtraFrameTypes; /* 0xa */
+ u8 IV[16]; /* 0x0b - 0x12 */
+ u8 TxFrameRA[6]; /* 0x13 - 0x15 */
+ __le16 TxFesTimeFallback; /* 0x16 */
+ u8 RTSPLCPFallback[6]; /* 0x17 - 0x19 */
+ __le16 RTSDurFallback; /* 0x1a */
+ u8 FragPLCPFallback[6]; /* 0x1b - 1d */
+ __le16 FragDurFallback; /* 0x1e */
+ __le16 MModeLen; /* 0x1f */
+ __le16 MModeFbrLen; /* 0x20 */
+ __le16 TstampLow; /* 0x21 */
+ __le16 TstampHigh; /* 0x22 */
+ __le16 ABI_MimoAntSel; /* 0x23 */
+ __le16 PreloadSize; /* 0x24 */
+ __le16 AmpduSeqCtl; /* 0x25 */
+ __le16 TxFrameID; /* 0x26 */
+ __le16 TxStatus; /* 0x27 */
+ __le16 MaxNMpdus; /* 0x28 */
+ __le16 MaxABytes_MRT; /* 0x29 */
+ __le16 MaxABytes_FBR; /* 0x2a */
+ __le16 MinMBytes; /* 0x2b */
+ u8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */
+ struct ieee80211_rts rts_frame; /* 0x2f - 0x36 */
+ u16 PAD; /* 0x37 */
+} __packed;
+
+#define D11_TXH_LEN 112 /* bytes */
+
+/* Frame Types */
+#define FT_CCK 0
+#define FT_OFDM 1
+#define FT_HT 2
+#define FT_N 3
+
+/*
+ * Position of MPDU inside A-MPDU; indicated with bits 10:9
+ * of MacTxControlLow
+ */
+#define TXC_AMPDU_SHIFT 9 /* shift for ampdu settings */
+#define TXC_AMPDU_NONE 0 /* Regular MPDU, not an A-MPDU */
+#define TXC_AMPDU_FIRST 1 /* first MPDU of an A-MPDU */
+#define TXC_AMPDU_MIDDLE 2 /* intermediate MPDU of an A-MPDU */
+#define TXC_AMPDU_LAST 3 /* last (or single) MPDU of an A-MPDU */
+
+/*== MacTxControlLow ==*/
+#define TXC_AMIC 0x8000
+#define TXC_SENDCTS 0x0800
+#define TXC_AMPDU_MASK 0x0600
+#define TXC_BW_40 0x0100
+#define TXC_FREQBAND_5G 0x0080
+#define TXC_DFCS 0x0040
+#define TXC_IGNOREPMQ 0x0020
+#define TXC_HWSEQ 0x0010
+#define TXC_STARTMSDU 0x0008
+#define TXC_SENDRTS 0x0004
+#define TXC_LONGFRAME 0x0002
+#define TXC_IMMEDACK 0x0001
+
+/*== MacTxControlHigh ==*/
+/* RTS fallback preamble type 1 = SHORT 0 = LONG */
+#define TXC_PREAMBLE_RTS_FB_SHORT 0x8000
+/* RTS main rate preamble type 1 = SHORT 0 = LONG */
+#define TXC_PREAMBLE_RTS_MAIN_SHORT 0x4000
+/*
+ * Main fallback rate preamble type
+ * 1 = SHORT for OFDM/GF for MIMO
+ * 0 = LONG for CCK/MM for MIMO
+ */
+#define TXC_PREAMBLE_DATA_FB_SHORT 0x2000
+
+/* TXC_PREAMBLE_DATA_MAIN is in PhyTxControl bit 5 */
+/* use fallback rate for this AMPDU */
+#define TXC_AMPDU_FBR 0x1000
+#define TXC_SECKEY_MASK 0x0FF0
+#define TXC_SECKEY_SHIFT 4
+/* Use alternate txpwr defined at loc. M_ALT_TXPWR_IDX */
+#define TXC_ALT_TXPWR 0x0008
+#define TXC_SECTYPE_MASK 0x0007
+#define TXC_SECTYPE_SHIFT 0
+
+/* Null delimiter for Fallback rate */
+#define AMPDU_FBR_NULL_DELIM 5 /* Location of Null delimiter count for AMPDU */
+
+/* PhyTxControl for Mimophy */
+#define PHY_TXC_PWR_MASK 0xFC00
+#define PHY_TXC_PWR_SHIFT 10
+#define PHY_TXC_ANT_MASK 0x03C0 /* bit 6, 7, 8, 9 */
+#define PHY_TXC_ANT_SHIFT 6
+#define PHY_TXC_ANT_0_1 0x00C0 /* auto, last rx */
+#define PHY_TXC_LCNPHY_ANT_LAST 0x0000
+#define PHY_TXC_ANT_3 0x0200 /* virtual antenna 3 */
+#define PHY_TXC_ANT_2 0x0100 /* virtual antenna 2 */
+#define PHY_TXC_ANT_1 0x0080 /* virtual antenna 1 */
+#define PHY_TXC_ANT_0 0x0040 /* virtual antenna 0 */
+#define PHY_TXC_SHORT_HDR 0x0010
+
+#define PHY_TXC_OLD_ANT_0 0x0000
+#define PHY_TXC_OLD_ANT_1 0x0100
+#define PHY_TXC_OLD_ANT_LAST 0x0300
+
+/* PhyTxControl_1 for Mimophy */
+#define PHY_TXC1_BW_MASK 0x0007
+#define PHY_TXC1_BW_10MHZ 0
+#define PHY_TXC1_BW_10MHZ_UP 1
+#define PHY_TXC1_BW_20MHZ 2
+#define PHY_TXC1_BW_20MHZ_UP 3
+#define PHY_TXC1_BW_40MHZ 4
+#define PHY_TXC1_BW_40MHZ_DUP 5
+#define PHY_TXC1_MODE_SHIFT 3
+#define PHY_TXC1_MODE_MASK 0x0038
+#define PHY_TXC1_MODE_SISO 0
+#define PHY_TXC1_MODE_CDD 1
+#define PHY_TXC1_MODE_STBC 2
+#define PHY_TXC1_MODE_SDM 3
+
+/* PhyTxControl for HTphy that are different from Mimophy */
+#define PHY_TXC_HTANT_MASK 0x3fC0 /* bits 6-13 */
+
+/* XtraFrameTypes */
+#define XFTS_RTS_FT_SHIFT 2
+#define XFTS_FBRRTS_FT_SHIFT 4
+#define XFTS_CHANNEL_SHIFT 8
+
+/* Antenna diversity bit in ant_wr_settle */
+#define PHY_AWS_ANTDIV 0x2000
+
+/* IFS ctl */
+#define IFS_USEEDCF (1 << 2)
+
+/* IFS ctl1 */
+#define IFS_CTL1_EDCRS (1 << 3)
+#define IFS_CTL1_EDCRS_20L (1 << 4)
+#define IFS_CTL1_EDCRS_40 (1 << 5)
+
+/* ABI_MimoAntSel */
+#define ABI_MAS_ADDR_BMP_IDX_MASK 0x0f00
+#define ABI_MAS_ADDR_BMP_IDX_SHIFT 8
+#define ABI_MAS_FBR_ANT_PTN_MASK 0x00f0
+#define ABI_MAS_FBR_ANT_PTN_SHIFT 4
+#define ABI_MAS_MRT_ANT_PTN_MASK 0x000f
+
+/* tx status packet */
+struct tx_status {
+ u16 framelen;
+ u16 PAD;
+ u16 frameid;
+ u16 status;
+ u16 lasttxtime;
+ u16 sequence;
+ u16 phyerr;
+ u16 ackphyrxsh;
+} __packed;
+
+#define TXSTATUS_LEN 16
+
+/* status field bit definitions */
+#define TX_STATUS_FRM_RTX_MASK 0xF000
+#define TX_STATUS_FRM_RTX_SHIFT 12
+#define TX_STATUS_RTS_RTX_MASK 0x0F00
+#define TX_STATUS_RTS_RTX_SHIFT 8
+#define TX_STATUS_MASK 0x00FE
+#define TX_STATUS_PMINDCTD (1 << 7) /* PM mode indicated to AP */
+#define TX_STATUS_INTERMEDIATE (1 << 6) /* intermediate or 1st ampdu pkg */
+#define TX_STATUS_AMPDU (1 << 5) /* AMPDU status */
+#define TX_STATUS_SUPR_MASK 0x1C /* suppress status bits (4:2) */
+#define TX_STATUS_SUPR_SHIFT 2
+#define TX_STATUS_ACK_RCV (1 << 1) /* ACK received */
+#define TX_STATUS_VALID (1 << 0) /* Tx status valid */
+#define TX_STATUS_NO_ACK 0
+
+/* suppress status reason codes */
+#define TX_STATUS_SUPR_PMQ (1 << 2) /* PMQ entry */
+#define TX_STATUS_SUPR_FLUSH (2 << 2) /* flush request */
+#define TX_STATUS_SUPR_FRAG (3 << 2) /* previous frag failure */
+#define TX_STATUS_SUPR_TBTT (3 << 2) /* SHARED: Probe resp supr for TBTT */
+#define TX_STATUS_SUPR_BADCH (4 << 2) /* channel mismatch */
+#define TX_STATUS_SUPR_EXPTIME (5 << 2) /* lifetime expiry */
+#define TX_STATUS_SUPR_UF (6 << 2) /* underflow */
+
+/* Unexpected tx status for rate update */
+#define TX_STATUS_UNEXP(status) \
+ ((((status) & TX_STATUS_INTERMEDIATE) != 0) && \
+ TX_STATUS_UNEXP_AMPDU(status))
+
+/* Unexpected tx status for A-MPDU rate update */
+#define TX_STATUS_UNEXP_AMPDU(status) \
+ ((((status) & TX_STATUS_SUPR_MASK) != 0) && \
+ (((status) & TX_STATUS_SUPR_MASK) != TX_STATUS_SUPR_EXPTIME))
+
+#define TX_STATUS_BA_BMAP03_MASK 0xF000 /* ba bitmap 0:3 in 1st pkg */
+#define TX_STATUS_BA_BMAP03_SHIFT 12 /* ba bitmap 0:3 in 1st pkg */
+#define TX_STATUS_BA_BMAP47_MASK 0x001E /* ba bitmap 4:7 in 2nd pkg */
+#define TX_STATUS_BA_BMAP47_SHIFT 3 /* ba bitmap 4:7 in 2nd pkg */
+
+/* RXE (Receive Engine) */
+
+/* RCM_CTL */
+#define RCM_INC_MASK_H 0x0080
+#define RCM_INC_MASK_L 0x0040
+#define RCM_INC_DATA 0x0020
+#define RCM_INDEX_MASK 0x001F
+#define RCM_SIZE 15
+
+#define RCM_MAC_OFFSET 0 /* current MAC address */
+#define RCM_BSSID_OFFSET 3 /* current BSSID address */
+#define RCM_F_BSSID_0_OFFSET 6 /* foreign BSS CFP tracking */
+#define RCM_F_BSSID_1_OFFSET 9 /* foreign BSS CFP tracking */
+#define RCM_F_BSSID_2_OFFSET 12 /* foreign BSS CFP tracking */
+
+#define RCM_WEP_TA0_OFFSET 16
+#define RCM_WEP_TA1_OFFSET 19
+#define RCM_WEP_TA2_OFFSET 22
+#define RCM_WEP_TA3_OFFSET 25
+
+/* PSM Block */
+
+/* psm_phy_hdr_param bits */
+#define MAC_PHY_RESET 1
+#define MAC_PHY_CLOCK_EN 2
+#define MAC_PHY_FORCE_CLK 4
+
+/* WEP Block */
+
+/* WEP_WKEY */
+#define WKEY_START (1 << 8)
+#define WKEY_SEL_MASK 0x1F
+
+/* WEP data formats */
+
+/* the number of RCMTA entries */
+#define RCMTA_SIZE 50
+
+#define M_ADDR_BMP_BLK (0x37e * 2)
+#define M_ADDR_BMP_BLK_SZ 12
+
+#define ADDR_BMP_RA (1 << 0) /* Receiver Address (RA) */
+#define ADDR_BMP_TA (1 << 1) /* Transmitter Address (TA) */
+#define ADDR_BMP_BSSID (1 << 2) /* BSSID */
+#define ADDR_BMP_AP (1 << 3) /* Infra-BSS Access Point */
+#define ADDR_BMP_STA (1 << 4) /* Infra-BSS Station */
+#define ADDR_BMP_RESERVED1 (1 << 5)
+#define ADDR_BMP_RESERVED2 (1 << 6)
+#define ADDR_BMP_RESERVED3 (1 << 7)
+#define ADDR_BMP_BSS_IDX_MASK (3 << 8) /* BSS control block index */
+#define ADDR_BMP_BSS_IDX_SHIFT 8
+
+#define WSEC_MAX_RCMTA_KEYS 54
+
+/* max keys in M_TKMICKEYS_BLK */
+#define WSEC_MAX_TKMIC_ENGINE_KEYS 12 /* 8 + 4 default */
+
+/* max RXE match registers */
+#define WSEC_MAX_RXE_KEYS 4
+
+/* SECKINDXALGO (Security Key Index & Algorithm Block) word format */
+/* SKL (Security Key Lookup) */
+#define SKL_ALGO_MASK 0x0007
+#define SKL_ALGO_SHIFT 0
+#define SKL_KEYID_MASK 0x0008
+#define SKL_KEYID_SHIFT 3
+#define SKL_INDEX_MASK 0x03F0
+#define SKL_INDEX_SHIFT 4
+#define SKL_GRP_ALGO_MASK 0x1c00
+#define SKL_GRP_ALGO_SHIFT 10
+
+/* additional bits defined for IBSS group key support */
+#define SKL_IBSS_INDEX_MASK 0x01F0
+#define SKL_IBSS_INDEX_SHIFT 4
+#define SKL_IBSS_KEYID1_MASK 0x0600
+#define SKL_IBSS_KEYID1_SHIFT 9
+#define SKL_IBSS_KEYID2_MASK 0x1800
+#define SKL_IBSS_KEYID2_SHIFT 11
+#define SKL_IBSS_KEYALGO_MASK 0xE000
+#define SKL_IBSS_KEYALGO_SHIFT 13
+
+#define WSEC_MODE_OFF 0
+#define WSEC_MODE_HW 1
+#define WSEC_MODE_SW 2
+
+#define WSEC_ALGO_OFF 0
+#define WSEC_ALGO_WEP1 1
+#define WSEC_ALGO_TKIP 2
+#define WSEC_ALGO_AES 3
+#define WSEC_ALGO_WEP128 4
+#define WSEC_ALGO_AES_LEGACY 5
+#define WSEC_ALGO_NALG 6
+
+#define AES_MODE_NONE 0
+#define AES_MODE_CCM 1
+
+/* WEP_CTL (Rev 0) */
+#define WECR0_KEYREG_SHIFT 0
+#define WECR0_KEYREG_MASK 0x7
+#define WECR0_DECRYPT (1 << 3)
+#define WECR0_IVINLINE (1 << 4)
+#define WECR0_WEPALG_SHIFT 5
+#define WECR0_WEPALG_MASK (0x7 << 5)
+#define WECR0_WKEYSEL_SHIFT 8
+#define WECR0_WKEYSEL_MASK (0x7 << 8)
+#define WECR0_WKEYSTART (1 << 11)
+#define WECR0_WEPINIT (1 << 14)
+#define WECR0_ICVERR (1 << 15)
+
+/* Frame template map byte offsets */
+#define T_ACTS_TPL_BASE (0)
+#define T_NULL_TPL_BASE (0xc * 2)
+#define T_QNULL_TPL_BASE (0x1c * 2)
+#define T_RR_TPL_BASE (0x2c * 2)
+#define T_BCN0_TPL_BASE (0x34 * 2)
+#define T_PRS_TPL_BASE (0x134 * 2)
+#define T_BCN1_TPL_BASE (0x234 * 2)
+#define T_TX_FIFO_TXRAM_BASE (T_ACTS_TPL_BASE + \
+ (TXFIFO_START_BLK * TXFIFO_SIZE_UNIT))
+
+#define T_BA_TPL_BASE T_QNULL_TPL_BASE /* template area for BA */
+
+#define T_RAM_ACCESS_SZ 4 /* template ram is 4 byte access only */
+
+/* Shared Mem byte offsets */
+
+/* Location where the ucode expects the corerev */
+#define M_MACHW_VER (0x00b * 2)
+
+/* Location where the ucode expects the MAC capabilities */
+#define M_MACHW_CAP_L (0x060 * 2)
+#define M_MACHW_CAP_H (0x061 * 2)
+
+/* WME shared memory */
+#define M_EDCF_STATUS_OFF (0x007 * 2)
+#define M_TXF_CUR_INDEX (0x018 * 2)
+#define M_EDCF_QINFO (0x120 * 2)
+
+/* PS-mode related parameters */
+#define M_DOT11_SLOT (0x008 * 2)
+#define M_DOT11_DTIMPERIOD (0x009 * 2)
+#define M_NOSLPZNATDTIM (0x026 * 2)
+
+/* Beacon-related parameters */
+#define M_BCN0_FRM_BYTESZ (0x00c * 2) /* Bcn 0 template length */
+#define M_BCN1_FRM_BYTESZ (0x00d * 2) /* Bcn 1 template length */
+#define M_BCN_TXTSF_OFFSET (0x00e * 2)
+#define M_TIMBPOS_INBEACON (0x00f * 2)
+#define M_SFRMTXCNTFBRTHSD (0x022 * 2)
+#define M_LFRMTXCNTFBRTHSD (0x023 * 2)
+#define M_BCN_PCTLWD (0x02a * 2)
+#define M_BCN_LI (0x05b * 2) /* beacon listen interval */
+
+/* MAX Rx Frame len */
+#define M_MAXRXFRM_LEN (0x010 * 2)
+
+/* ACK/CTS related params */
+#define M_RSP_PCTLWD (0x011 * 2)
+
+/* Hardware Power Control */
+#define M_TXPWR_N (0x012 * 2)
+#define M_TXPWR_TARGET (0x013 * 2)
+#define M_TXPWR_MAX (0x014 * 2)
+#define M_TXPWR_CUR (0x019 * 2)
+
+/* Rx-related parameters */
+#define M_RX_PAD_DATA_OFFSET (0x01a * 2)
+
+/* WEP Shared mem data */
+#define M_SEC_DEFIVLOC (0x01e * 2)
+#define M_SEC_VALNUMSOFTMCHTA (0x01f * 2)
+#define M_PHYVER (0x028 * 2)
+#define M_PHYTYPE (0x029 * 2)
+#define M_SECRXKEYS_PTR (0x02b * 2)
+#define M_TKMICKEYS_PTR (0x059 * 2)
+#define M_SECKINDXALGO_BLK (0x2ea * 2)
+#define M_SECKINDXALGO_BLK_SZ 54
+#define M_SECPSMRXTAMCH_BLK (0x2fa * 2)
+#define M_TKIP_TSC_TTAK (0x18c * 2)
+#define D11_MAX_KEY_SIZE 16
+
+#define M_MAX_ANTCNT (0x02e * 2) /* antenna swap threshold */
+
+/* Probe response related parameters */
+#define M_SSIDLEN (0x024 * 2)
+#define M_PRB_RESP_FRM_LEN (0x025 * 2)
+#define M_PRS_MAXTIME (0x03a * 2)
+#define M_SSID (0xb0 * 2)
+#define M_CTXPRS_BLK (0xc0 * 2)
+#define C_CTX_PCTLWD_POS (0x4 * 2)
+
+/* Delta between OFDM and CCK power in CCK power boost mode */
+#define M_OFDM_OFFSET (0x027 * 2)
+
+/* TSSI for last 4 11b/g CCK packets transmitted */
+#define M_B_TSSI_0 (0x02c * 2)
+#define M_B_TSSI_1 (0x02d * 2)
+
+/* Host flags to turn on ucode options */
+#define M_HOST_FLAGS1 (0x02f * 2)
+#define M_HOST_FLAGS2 (0x030 * 2)
+#define M_HOST_FLAGS3 (0x031 * 2)
+#define M_HOST_FLAGS4 (0x03c * 2)
+#define M_HOST_FLAGS5 (0x06a * 2)
+#define M_HOST_FLAGS_SZ 16
+
+#define M_RADAR_REG (0x033 * 2)
+
+/* TSSI for last 4 11a OFDM packets transmitted */
+#define M_A_TSSI_0 (0x034 * 2)
+#define M_A_TSSI_1 (0x035 * 2)
+
+/* noise interference measurement */
+#define M_NOISE_IF_COUNT (0x034 * 2)
+#define M_NOISE_IF_TIMEOUT (0x035 * 2)
+
+#define M_RF_RX_SP_REG1 (0x036 * 2)
+
+/* TSSI for last 4 11g OFDM packets transmitted */
+#define M_G_TSSI_0 (0x038 * 2)
+#define M_G_TSSI_1 (0x039 * 2)
+
+/* Background noise measure */
+#define M_JSSI_0 (0x44 * 2)
+#define M_JSSI_1 (0x45 * 2)
+#define M_JSSI_AUX (0x46 * 2)
+
+#define M_CUR_2050_RADIOCODE (0x47 * 2)
+
+/* TX fifo sizes */
+#define M_FIFOSIZE0 (0x4c * 2)
+#define M_FIFOSIZE1 (0x4d * 2)
+#define M_FIFOSIZE2 (0x4e * 2)
+#define M_FIFOSIZE3 (0x4f * 2)
+#define D11_MAX_TX_FRMS 32 /* max frames allowed in tx fifo */
+
+/* Current channel number plus upper bits */
+#define M_CURCHANNEL (0x50 * 2)
+#define D11_CURCHANNEL_5G 0x0100;
+#define D11_CURCHANNEL_40 0x0200;
+#define D11_CURCHANNEL_MAX 0x00FF;
+
+/* last posted frameid on the bcmc fifo */
+#define M_BCMC_FID (0x54 * 2)
+#define INVALIDFID 0xffff
+
+/* extended beacon phyctl bytes for 11N */
+#define M_BCN_PCTL1WD (0x058 * 2)
+
+/* idle busy ratio to duty_cycle requirement */
+#define M_TX_IDLE_BUSY_RATIO_X_16_CCK (0x52 * 2)
+#define M_TX_IDLE_BUSY_RATIO_X_16_OFDM (0x5A * 2)
+
+/* CW RSSI for LCNPHY */
+#define M_LCN_RSSI_0 0x1332
+#define M_LCN_RSSI_1 0x1338
+#define M_LCN_RSSI_2 0x133e
+#define M_LCN_RSSI_3 0x1344
+
+/* SNR for LCNPHY */
+#define M_LCN_SNR_A_0 0x1334
+#define M_LCN_SNR_B_0 0x1336
+
+#define M_LCN_SNR_A_1 0x133a
+#define M_LCN_SNR_B_1 0x133c
+
+#define M_LCN_SNR_A_2 0x1340
+#define M_LCN_SNR_B_2 0x1342
+
+#define M_LCN_SNR_A_3 0x1346
+#define M_LCN_SNR_B_3 0x1348
+
+#define M_LCN_LAST_RESET (81*2)
+#define M_LCN_LAST_LOC (63*2)
+#define M_LCNPHY_RESET_STATUS (4902)
+#define M_LCNPHY_DSC_TIME (0x98d*2)
+#define M_LCNPHY_RESET_CNT_DSC (0x98b*2)
+#define M_LCNPHY_RESET_CNT (0x98c*2)
+
+/* Rate table offsets */
+#define M_RT_DIRMAP_A (0xe0 * 2)
+#define M_RT_BBRSMAP_A (0xf0 * 2)
+#define M_RT_DIRMAP_B (0x100 * 2)
+#define M_RT_BBRSMAP_B (0x110 * 2)
+
+/* Rate table entry offsets */
+#define M_RT_PRS_PLCP_POS 10
+#define M_RT_PRS_DUR_POS 16
+#define M_RT_OFDM_PCTL1_POS 18
+
+#define M_20IN40_IQ (0x380 * 2)
+
+/* SHM locations where ucode stores the current power index */
+#define M_CURR_IDX1 (0x384 * 2)
+#define M_CURR_IDX2 (0x387 * 2)
+
+#define M_BSCALE_ANT0 (0x5e * 2)
+#define M_BSCALE_ANT1 (0x5f * 2)
+
+/* Antenna Diversity Testing */
+#define M_MIMO_ANTSEL_RXDFLT (0x63 * 2)
+#define M_ANTSEL_CLKDIV (0x61 * 2)
+#define M_MIMO_ANTSEL_TXDFLT (0x64 * 2)
+
+#define M_MIMO_MAXSYM (0x5d * 2)
+#define MIMO_MAXSYM_DEF 0x8000 /* 32k */
+#define MIMO_MAXSYM_MAX 0xffff /* 64k */
+
+#define M_WATCHDOG_8TU (0x1e * 2)
+#define WATCHDOG_8TU_DEF 5
+#define WATCHDOG_8TU_MAX 10
+
+/* Manufacturing Test Variables */
+/* PER test mode */
+#define M_PKTENG_CTRL (0x6c * 2)
+/* IFS for TX mode */
+#define M_PKTENG_IFS (0x6d * 2)
+/* Lower word of tx frmcnt/rx lostcnt */
+#define M_PKTENG_FRMCNT_LO (0x6e * 2)
+/* Upper word of tx frmcnt/rx lostcnt */
+#define M_PKTENG_FRMCNT_HI (0x6f * 2)
+
+/* Index variation in vbat ripple */
+#define M_LCN_PWR_IDX_MAX (0x67 * 2) /* highest index read by ucode */
+#define M_LCN_PWR_IDX_MIN (0x66 * 2) /* lowest index read by ucode */
+
+/* M_PKTENG_CTRL bit definitions */
+#define M_PKTENG_MODE_TX 0x0001
+#define M_PKTENG_MODE_TX_RIFS 0x0004
+#define M_PKTENG_MODE_TX_CTS 0x0008
+#define M_PKTENG_MODE_RX 0x0002
+#define M_PKTENG_MODE_RX_WITH_ACK 0x0402
+#define M_PKTENG_MODE_MASK 0x0003
+/* TX frames indicated in the frmcnt reg */
+#define M_PKTENG_FRMCNT_VLD 0x0100
+
+/* Sample Collect parameters (bitmap and type) */
+/* Trigger bitmap for sample collect */
+#define M_SMPL_COL_BMP (0x37d * 2)
+/* Sample collect type */
+#define M_SMPL_COL_CTL (0x3b2 * 2)
+
+#define ANTSEL_CLKDIV_4MHZ 6
+#define MIMO_ANTSEL_BUSY 0x4000 /* bit 14 (busy) */
+#define MIMO_ANTSEL_SEL 0x8000 /* bit 15 write the value */
+#define MIMO_ANTSEL_WAIT 50 /* 50us wait */
+#define MIMO_ANTSEL_OVERRIDE 0x8000 /* flag */
+
+struct shm_acparams {
+ u16 txop;
+ u16 cwmin;
+ u16 cwmax;
+ u16 cwcur;
+ u16 aifs;
+ u16 bslots;
+ u16 reggap;
+ u16 status;
+ u16 rsvd[8];
+} __packed;
+#define M_EDCF_QLEN (16 * 2)
+
+#define WME_STATUS_NEWAC (1 << 8)
+
+/* M_HOST_FLAGS */
+#define MHFMAX 5 /* Number of valid hostflag half-word (u16) */
+#define MHF1 0 /* Hostflag 1 index */
+#define MHF2 1 /* Hostflag 2 index */
+#define MHF3 2 /* Hostflag 3 index */
+#define MHF4 3 /* Hostflag 4 index */
+#define MHF5 4 /* Hostflag 5 index */
+
+/* Flags in M_HOST_FLAGS */
+/* Enable ucode antenna diversity help */
+#define MHF1_ANTDIV 0x0001
+/* Enable EDCF access control */
+#define MHF1_EDCF 0x0100
+#define MHF1_IQSWAP_WAR 0x0200
+/* Disable Slow clock request, for corerev < 11 */
+#define MHF1_FORCEFASTCLK 0x0400
+
+/* Flags in M_HOST_FLAGS2 */
+
+/* Flush BCMC FIFO immediately */
+#define MHF2_TXBCMC_NOW 0x0040
+/* Enable ucode/hw power control */
+#define MHF2_HWPWRCTL 0x0080
+#define MHF2_NPHY40MHZ_WAR 0x0800
+
+/* Flags in M_HOST_FLAGS3 */
+/* enabled mimo antenna selection */
+#define MHF3_ANTSEL_EN 0x0001
+/* antenna selection mode: 0: 2x3, 1: 2x4 */
+#define MHF3_ANTSEL_MODE 0x0002
+#define MHF3_RESERVED1 0x0004
+#define MHF3_RESERVED2 0x0008
+#define MHF3_NPHY_MLADV_WAR 0x0010
+
+/* Flags in M_HOST_FLAGS4 */
+/* force bphy Tx on core 0 (board level WAR) */
+#define MHF4_BPHY_TXCORE0 0x0080
+/* for 4313A0 FEM boards */
+#define MHF4_EXTPA_ENABLE 0x4000
+
+/* Flags in M_HOST_FLAGS5 */
+#define MHF5_4313_GPIOCTRL 0x0001
+#define MHF5_RESERVED1 0x0002
+#define MHF5_RESERVED2 0x0004
+/* Radio power setting for ucode */
+#define M_RADIO_PWR (0x32 * 2)
+
+/* phy noise recorded by ucode right after tx */
+#define M_PHY_NOISE (0x037 * 2)
+#define PHY_NOISE_MASK 0x00ff
+
+/*
+ * Receive Frame Data Header for 802.11b DCF-only frames
+ *
+ * RxFrameSize: Actual byte length of the frame data received
+ * PAD: padding (not used)
+ * PhyRxStatus_0: PhyRxStatus 15:0
+ * PhyRxStatus_1: PhyRxStatus 31:16
+ * PhyRxStatus_2: PhyRxStatus 47:32
+ * PhyRxStatus_3: PhyRxStatus 63:48
+ * PhyRxStatus_4: PhyRxStatus 79:64
+ * PhyRxStatus_5: PhyRxStatus 95:80
+ * RxStatus1: MAC Rx Status
+ * RxStatus2: extended MAC Rx status
+ * RxTSFTime: RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY
+ * RxChan: gain code, channel radio code, and phy type
+ */
+struct d11rxhdr_le {
+ __le16 RxFrameSize;
+ u16 PAD;
+ __le16 PhyRxStatus_0;
+ __le16 PhyRxStatus_1;
+ __le16 PhyRxStatus_2;
+ __le16 PhyRxStatus_3;
+ __le16 PhyRxStatus_4;
+ __le16 PhyRxStatus_5;
+ __le16 RxStatus1;
+ __le16 RxStatus2;
+ __le16 RxTSFTime;
+ __le16 RxChan;
+} __packed;
+
+struct d11rxhdr {
+ u16 RxFrameSize;
+ u16 PAD;
+ u16 PhyRxStatus_0;
+ u16 PhyRxStatus_1;
+ u16 PhyRxStatus_2;
+ u16 PhyRxStatus_3;
+ u16 PhyRxStatus_4;
+ u16 PhyRxStatus_5;
+ u16 RxStatus1;
+ u16 RxStatus2;
+ u16 RxTSFTime;
+ u16 RxChan;
+} __packed;
+
+/* PhyRxStatus_0: */
+/* NPHY only: CCK, OFDM, preN, N */
+#define PRXS0_FT_MASK 0x0003
+/* NPHY only: clip count adjustment steps by AGC */
+#define PRXS0_CLIP_MASK 0x000C
+#define PRXS0_CLIP_SHIFT 2
+/* PHY received a frame with unsupported rate */
+#define PRXS0_UNSRATE 0x0010
+/* GPHY: rx ant, NPHY: upper sideband */
+#define PRXS0_RXANT_UPSUBBAND 0x0020
+/* CCK frame only: lost crs during cck frame reception */
+#define PRXS0_LCRS 0x0040
+/* Short Preamble */
+#define PRXS0_SHORTH 0x0080
+/* PLCP violation */
+#define PRXS0_PLCPFV 0x0100
+/* PLCP header integrity check failed */
+#define PRXS0_PLCPHCF 0x0200
+/* legacy PHY gain control */
+#define PRXS0_GAIN_CTL 0x4000
+/* NPHY: Antennas used for received frame, bitmask */
+#define PRXS0_ANTSEL_MASK 0xF000
+#define PRXS0_ANTSEL_SHIFT 0x12
+
+/* subfield PRXS0_FT_MASK */
+#define PRXS0_CCK 0x0000
+/* valid only for G phy, use rxh->RxChan for A phy */
+#define PRXS0_OFDM 0x0001
+#define PRXS0_PREN 0x0002
+#define PRXS0_STDN 0x0003
+
+/* subfield PRXS0_ANTSEL_MASK */
+#define PRXS0_ANTSEL_0 0x0 /* antenna 0 is used */
+#define PRXS0_ANTSEL_1 0x2 /* antenna 1 is used */
+#define PRXS0_ANTSEL_2 0x4 /* antenna 2 is used */
+#define PRXS0_ANTSEL_3 0x8 /* antenna 3 is used */
+
+/* PhyRxStatus_1: */
+#define PRXS1_JSSI_MASK 0x00FF
+#define PRXS1_JSSI_SHIFT 0
+#define PRXS1_SQ_MASK 0xFF00
+#define PRXS1_SQ_SHIFT 8
+
+/* nphy PhyRxStatus_1: */
+#define PRXS1_nphy_PWR0_MASK 0x00FF
+#define PRXS1_nphy_PWR1_MASK 0xFF00
+
+/* HTPHY Rx Status defines */
+/* htphy PhyRxStatus_0: those bit are overlapped with PhyRxStatus_0 */
+#define PRXS0_BAND 0x0400 /* 0 = 2.4G, 1 = 5G */
+#define PRXS0_RSVD 0x0800 /* reserved; set to 0 */
+#define PRXS0_UNUSED 0xF000 /* unused and not defined; set to 0 */
+
+/* htphy PhyRxStatus_1: */
+/* core enables for {3..0}, 0=disabled, 1=enabled */
+#define PRXS1_HTPHY_CORE_MASK 0x000F
+/* antenna configation */
+#define PRXS1_HTPHY_ANTCFG_MASK 0x00F0
+/* Mixmode PLCP Length low byte mask */
+#define PRXS1_HTPHY_MMPLCPLenL_MASK 0xFF00
+
+/* htphy PhyRxStatus_2: */
+/* Mixmode PLCP Length high byte maskw */
+#define PRXS2_HTPHY_MMPLCPLenH_MASK 0x000F
+/* Mixmode PLCP rate mask */
+#define PRXS2_HTPHY_MMPLCH_RATE_MASK 0x00F0
+/* Rx power on core 0 */
+#define PRXS2_HTPHY_RXPWR_ANT0 0xFF00
+
+/* htphy PhyRxStatus_3: */
+/* Rx power on core 1 */
+#define PRXS3_HTPHY_RXPWR_ANT1 0x00FF
+/* Rx power on core 2 */
+#define PRXS3_HTPHY_RXPWR_ANT2 0xFF00
+
+/* htphy PhyRxStatus_4: */
+/* Rx power on core 3 */
+#define PRXS4_HTPHY_RXPWR_ANT3 0x00FF
+/* Coarse frequency offset */
+#define PRXS4_HTPHY_CFO 0xFF00
+
+/* htphy PhyRxStatus_5: */
+/* Fine frequency offset */
+#define PRXS5_HTPHY_FFO 0x00FF
+/* Advance Retard */
+#define PRXS5_HTPHY_AR 0xFF00
+
+#define HTPHY_MMPLCPLen(rxs) \
+ ((((rxs)->PhyRxStatus_1 & PRXS1_HTPHY_MMPLCPLenL_MASK) >> 8) | \
+ (((rxs)->PhyRxStatus_2 & PRXS2_HTPHY_MMPLCPLenH_MASK) << 8))
+/* Get Rx power on core 0 */
+#define HTPHY_RXPWR_ANT0(rxs) \
+ ((((rxs)->PhyRxStatus_2) & PRXS2_HTPHY_RXPWR_ANT0) >> 8)
+/* Get Rx power on core 1 */
+#define HTPHY_RXPWR_ANT1(rxs) \
+ (((rxs)->PhyRxStatus_3) & PRXS3_HTPHY_RXPWR_ANT1)
+/* Get Rx power on core 2 */
+#define HTPHY_RXPWR_ANT2(rxs) \
+ ((((rxs)->PhyRxStatus_3) & PRXS3_HTPHY_RXPWR_ANT2) >> 8)
+
+/* ucode RxStatus1: */
+#define RXS_BCNSENT 0x8000
+#define RXS_SECKINDX_MASK 0x07e0
+#define RXS_SECKINDX_SHIFT 5
+#define RXS_DECERR (1 << 4)
+#define RXS_DECATMPT (1 << 3)
+/* PAD bytes to make IP data 4 bytes aligned */
+#define RXS_PBPRES (1 << 2)
+#define RXS_RESPFRAMETX (1 << 1)
+#define RXS_FCSERR (1 << 0)
+
+/* ucode RxStatus2: */
+#define RXS_AMSDU_MASK 1
+#define RXS_AGGTYPE_MASK 0x6
+#define RXS_AGGTYPE_SHIFT 1
+#define RXS_PHYRXST_VALID (1 << 8)
+#define RXS_RXANT_MASK 0x3
+#define RXS_RXANT_SHIFT 12
+
+/* RxChan */
+#define RXS_CHAN_40 0x1000
+#define RXS_CHAN_5G 0x0800
+#define RXS_CHAN_ID_MASK 0x07f8
+#define RXS_CHAN_ID_SHIFT 3
+#define RXS_CHAN_PHYTYPE_MASK 0x0007
+#define RXS_CHAN_PHYTYPE_SHIFT 0
+
+/* Index of attenuations used during ucode power control. */
+#define M_PWRIND_BLKS (0x184 * 2)
+#define M_PWRIND_MAP0 (M_PWRIND_BLKS + 0x0)
+#define M_PWRIND_MAP1 (M_PWRIND_BLKS + 0x2)
+#define M_PWRIND_MAP2 (M_PWRIND_BLKS + 0x4)
+#define M_PWRIND_MAP3 (M_PWRIND_BLKS + 0x6)
+/* M_PWRIND_MAP(core) macro */
+#define M_PWRIND_MAP(core) (M_PWRIND_BLKS + ((core)<<1))
+
+/* PSM SHM variable offsets */
+#define M_PSM_SOFT_REGS 0x0
+#define M_BOM_REV_MAJOR (M_PSM_SOFT_REGS + 0x0)
+#define M_BOM_REV_MINOR (M_PSM_SOFT_REGS + 0x2)
+#define M_UCODE_DBGST (M_PSM_SOFT_REGS + 0x40) /* ucode debug status code */
+#define M_UCODE_MACSTAT (M_PSM_SOFT_REGS + 0xE0) /* macstat counters */
+
+#define M_AGING_THRSH (0x3e * 2) /* max time waiting for medium before tx */
+#define M_MBURST_SIZE (0x40 * 2) /* max frames in a frameburst */
+#define M_MBURST_TXOP (0x41 * 2) /* max frameburst TXOP in unit of us */
+#define M_SYNTHPU_DLY (0x4a * 2) /* pre-wakeup for synthpu, default: 500 */
+#define M_PRETBTT (0x4b * 2)
+
+/* offset to the target txpwr */
+#define M_ALT_TXPWR_IDX (M_PSM_SOFT_REGS + (0x3b * 2))
+#define M_PHY_TX_FLT_PTR (M_PSM_SOFT_REGS + (0x3d * 2))
+#define M_CTS_DURATION (M_PSM_SOFT_REGS + (0x5c * 2))
+#define M_LP_RCCAL_OVR (M_PSM_SOFT_REGS + (0x6b * 2))
+
+/* PKTENG Rx Stats Block */
+#define M_RXSTATS_BLK_PTR (M_PSM_SOFT_REGS + (0x65 * 2))
+
+/* ucode debug status codes */
+/* not valid really */
+#define DBGST_INACTIVE 0
+/* after zeroing SHM, before suspending at init */
+#define DBGST_INIT 1
+/* "normal" state */
+#define DBGST_ACTIVE 2
+/* suspended */
+#define DBGST_SUSPENDED 3
+/* asleep (PS mode) */
+#define DBGST_ASLEEP 4
+
+/* Scratch Reg defs */
+enum _ePsmScratchPadRegDefinitions {
+ S_RSV0 = 0,
+ S_RSV1,
+ S_RSV2,
+
+ /* offset 0x03: scratch registers for Dot11-contants */
+ S_DOT11_CWMIN, /* CW-minimum */
+ S_DOT11_CWMAX, /* CW-maximum */
+ S_DOT11_CWCUR, /* CW-current */
+ S_DOT11_SRC_LMT, /* short retry count limit */
+ S_DOT11_LRC_LMT, /* long retry count limit */
+ S_DOT11_DTIMCOUNT, /* DTIM-count */
+
+ /* offset 0x09: Tx-side scratch registers */
+ S_SEQ_NUM, /* hardware sequence number reg */
+ S_SEQ_NUM_FRAG, /* seq num for frags (at the start of MSDU) */
+ S_FRMRETX_CNT, /* frame retx count */
+ S_SSRC, /* Station short retry count */
+ S_SLRC, /* Station long retry count */
+ S_EXP_RSP, /* Expected response frame */
+ S_OLD_BREM, /* Remaining backoff ctr */
+ S_OLD_CWWIN, /* saved-off CW-cur */
+ S_TXECTL, /* TXE-Ctl word constructed in scr-pad */
+ S_CTXTST, /* frm type-subtype as read from Tx-descr */
+
+ /* offset 0x13: Rx-side scratch registers */
+ S_RXTST, /* Type and subtype in Rxframe */
+
+ /* Global state register */
+ S_STREG, /* state storage actual bit maps below */
+
+ S_TXPWR_SUM, /* Tx power control: accumulator */
+ S_TXPWR_ITER, /* Tx power control: iteration */
+ S_RX_FRMTYPE, /* Rate and PHY type for frames */
+ S_THIS_AGG, /* Size of this AGG (A-MSDU) */
+
+ S_KEYINDX,
+ S_RXFRMLEN, /* Receive MPDU length in bytes */
+
+ /* offset 0x1B: Receive TSF time stored in SCR */
+ S_RXTSFTMRVAL_WD3, /* TSF value at the start of rx */
+ S_RXTSFTMRVAL_WD2, /* TSF value at the start of rx */
+ S_RXTSFTMRVAL_WD1, /* TSF value at the start of rx */
+ S_RXTSFTMRVAL_WD0, /* TSF value at the start of rx */
+ S_RXSSN, /* Received start seq number for A-MPDU BA */
+ S_RXQOSFLD, /* Rx-QoS field (if present) */
+
+ /* offset 0x21: Scratch pad regs used in microcode as temp storage */
+ S_TMP0, /* stmp0 */
+ S_TMP1, /* stmp1 */
+ S_TMP2, /* stmp2 */
+ S_TMP3, /* stmp3 */
+ S_TMP4, /* stmp4 */
+ S_TMP5, /* stmp5 */
+ S_PRQPENALTY_CTR, /* Probe response queue penalty counter */
+ S_ANTCNT, /* unsuccessful attempts on current ant. */
+ S_SYMBOL, /* flag for possible symbol ctl frames */
+ S_RXTP, /* rx frame type */
+ S_STREG2, /* extra state storage */
+ S_STREG3, /* even more extra state storage */
+ S_STREG4, /* ... */
+ S_STREG5, /* remember to initialize it to zero */
+
+ S_ADJPWR_IDX,
+ S_CUR_PTR, /* Temp pointer for A-MPDU re-Tx SHM table */
+ S_REVID4, /* 0x33 */
+ S_INDX, /* 0x34 */
+ S_ADDR0, /* 0x35 */
+ S_ADDR1, /* 0x36 */
+ S_ADDR2, /* 0x37 */
+ S_ADDR3, /* 0x38 */
+ S_ADDR4, /* 0x39 */
+ S_ADDR5, /* 0x3A */
+ S_TMP6, /* 0x3B */
+ S_KEYINDX_BU, /* Backup for Key index */
+ S_MFGTEST_TMP0, /* Temp regs used for RX test calculations */
+ S_RXESN, /* Received end sequence number for A-MPDU BA */
+ S_STREG6, /* 0x3F */
+};
+
+#define S_BEACON_INDX S_OLD_BREM
+#define S_PRS_INDX S_OLD_CWWIN
+#define S_PHYTYPE S_SSRC
+#define S_PHYVER S_SLRC
+
+/* IHR SLOW_CTRL values */
+#define SLOW_CTRL_PDE (1 << 0)
+#define SLOW_CTRL_FD (1 << 8)
+
+/* ucode mac statistic counters in shared memory */
+struct macstat {
+ u16 txallfrm; /* 0x80 */
+ u16 txrtsfrm; /* 0x82 */
+ u16 txctsfrm; /* 0x84 */
+ u16 txackfrm; /* 0x86 */
+ u16 txdnlfrm; /* 0x88 */
+ u16 txbcnfrm; /* 0x8a */
+ u16 txfunfl[8]; /* 0x8c - 0x9b */
+ u16 txtplunfl; /* 0x9c */
+ u16 txphyerr; /* 0x9e */
+ u16 pktengrxducast; /* 0xa0 */
+ u16 pktengrxdmcast; /* 0xa2 */
+ u16 rxfrmtoolong; /* 0xa4 */
+ u16 rxfrmtooshrt; /* 0xa6 */
+ u16 rxinvmachdr; /* 0xa8 */
+ u16 rxbadfcs; /* 0xaa */
+ u16 rxbadplcp; /* 0xac */
+ u16 rxcrsglitch; /* 0xae */
+ u16 rxstrt; /* 0xb0 */
+ u16 rxdfrmucastmbss; /* 0xb2 */
+ u16 rxmfrmucastmbss; /* 0xb4 */
+ u16 rxcfrmucast; /* 0xb6 */
+ u16 rxrtsucast; /* 0xb8 */
+ u16 rxctsucast; /* 0xba */
+ u16 rxackucast; /* 0xbc */
+ u16 rxdfrmocast; /* 0xbe */
+ u16 rxmfrmocast; /* 0xc0 */
+ u16 rxcfrmocast; /* 0xc2 */
+ u16 rxrtsocast; /* 0xc4 */
+ u16 rxctsocast; /* 0xc6 */
+ u16 rxdfrmmcast; /* 0xc8 */
+ u16 rxmfrmmcast; /* 0xca */
+ u16 rxcfrmmcast; /* 0xcc */
+ u16 rxbeaconmbss; /* 0xce */
+ u16 rxdfrmucastobss; /* 0xd0 */
+ u16 rxbeaconobss; /* 0xd2 */
+ u16 rxrsptmout; /* 0xd4 */
+ u16 bcntxcancl; /* 0xd6 */
+ u16 PAD;
+ u16 rxf0ovfl; /* 0xda */
+ u16 rxf1ovfl; /* 0xdc */
+ u16 rxf2ovfl; /* 0xde */
+ u16 txsfovfl; /* 0xe0 */
+ u16 pmqovfl; /* 0xe2 */
+ u16 rxcgprqfrm; /* 0xe4 */
+ u16 rxcgprsqovfl; /* 0xe6 */
+ u16 txcgprsfail; /* 0xe8 */
+ u16 txcgprssuc; /* 0xea */
+ u16 prs_timeout; /* 0xec */
+ u16 rxnack;
+ u16 frmscons;
+ u16 txnack;
+ u16 txglitch_nack;
+ u16 txburst; /* 0xf6 # tx bursts */
+ u16 bphy_rxcrsglitch; /* bphy rx crs glitch */
+ u16 phywatchdog; /* 0xfa # of phy watchdog events */
+ u16 PAD;
+ u16 bphy_badplcp; /* bphy bad plcp */
+};
+
+/* dot11 core-specific control flags */
+#define SICF_PCLKE 0x0004 /* PHY clock enable */
+#define SICF_PRST 0x0008 /* PHY reset */
+#define SICF_MPCLKE 0x0010 /* MAC PHY clockcontrol enable */
+#define SICF_FREF 0x0020 /* PLL FreqRefSelect */
+/* NOTE: the following bw bits only apply when the core is attached
+ * to a NPHY
+ */
+#define SICF_BWMASK 0x00c0 /* phy clock mask (b6 & b7) */
+#define SICF_BW40 0x0080 /* 40MHz BW (160MHz phyclk) */
+#define SICF_BW20 0x0040 /* 20MHz BW (80MHz phyclk) */
+#define SICF_BW10 0x0000 /* 10MHz BW (40MHz phyclk) */
+#define SICF_GMODE 0x2000 /* gmode enable */
+
+/* dot11 core-specific status flags */
+#define SISF_2G_PHY 0x0001 /* 2.4G capable phy */
+#define SISF_5G_PHY 0x0002 /* 5G capable phy */
+#define SISF_FCLKA 0x0004 /* FastClkAvailable */
+#define SISF_DB_PHY 0x0008 /* Dualband phy */
+
+/* === End of MAC reg, Beginning of PHY(b/a/g/n) reg === */
+/* radio and LPPHY regs are separated */
+
+#define BPHY_REG_OFT_BASE 0x0
+/* offsets for indirect access to bphy registers */
+#define BPHY_BB_CONFIG 0x01
+#define BPHY_ADCBIAS 0x02
+#define BPHY_ANACORE 0x03
+#define BPHY_PHYCRSTH 0x06
+#define BPHY_TEST 0x0a
+#define BPHY_PA_TX_TO 0x10
+#define BPHY_SYNTH_DC_TO 0x11
+#define BPHY_PA_TX_TIME_UP 0x12
+#define BPHY_RX_FLTR_TIME_UP 0x13
+#define BPHY_TX_POWER_OVERRIDE 0x14
+#define BPHY_RF_OVERRIDE 0x15
+#define BPHY_RF_TR_LOOKUP1 0x16
+#define BPHY_RF_TR_LOOKUP2 0x17
+#define BPHY_COEFFS 0x18
+#define BPHY_PLL_OUT 0x19
+#define BPHY_REFRESH_MAIN 0x1a
+#define BPHY_REFRESH_TO0 0x1b
+#define BPHY_REFRESH_TO1 0x1c
+#define BPHY_RSSI_TRESH 0x20
+#define BPHY_IQ_TRESH_HH 0x21
+#define BPHY_IQ_TRESH_H 0x22
+#define BPHY_IQ_TRESH_L 0x23
+#define BPHY_IQ_TRESH_LL 0x24
+#define BPHY_GAIN 0x25
+#define BPHY_LNA_GAIN_RANGE 0x26
+#define BPHY_JSSI 0x27
+#define BPHY_TSSI_CTL 0x28
+#define BPHY_TSSI 0x29
+#define BPHY_TR_LOSS_CTL 0x2a
+#define BPHY_LO_LEAKAGE 0x2b
+#define BPHY_LO_RSSI_ACC 0x2c
+#define BPHY_LO_IQMAG_ACC 0x2d
+#define BPHY_TX_DC_OFF1 0x2e
+#define BPHY_TX_DC_OFF2 0x2f
+#define BPHY_PEAK_CNT_THRESH 0x30
+#define BPHY_FREQ_OFFSET 0x31
+#define BPHY_DIVERSITY_CTL 0x32
+#define BPHY_PEAK_ENERGY_LO 0x33
+#define BPHY_PEAK_ENERGY_HI 0x34
+#define BPHY_SYNC_CTL 0x35
+#define BPHY_TX_PWR_CTRL 0x36
+#define BPHY_TX_EST_PWR 0x37
+#define BPHY_STEP 0x38
+#define BPHY_WARMUP 0x39
+#define BPHY_LMS_CFF_READ 0x3a
+#define BPHY_LMS_COEFF_I 0x3b
+#define BPHY_LMS_COEFF_Q 0x3c
+#define BPHY_SIG_POW 0x3d
+#define BPHY_RFDC_CANCEL_CTL 0x3e
+#define BPHY_HDR_TYPE 0x40
+#define BPHY_SFD_TO 0x41
+#define BPHY_SFD_CTL 0x42
+#define BPHY_DEBUG 0x43
+#define BPHY_RX_DELAY_COMP 0x44
+#define BPHY_CRS_DROP_TO 0x45
+#define BPHY_SHORT_SFD_NZEROS 0x46
+#define BPHY_DSSS_COEFF1 0x48
+#define BPHY_DSSS_COEFF2 0x49
+#define BPHY_CCK_COEFF1 0x4a
+#define BPHY_CCK_COEFF2 0x4b
+#define BPHY_TR_CORR 0x4c
+#define BPHY_ANGLE_SCALE 0x4d
+#define BPHY_TX_PWR_BASE_IDX 0x4e
+#define BPHY_OPTIONAL_MODES2 0x4f
+#define BPHY_CCK_LMS_STEP 0x50
+#define BPHY_BYPASS 0x51
+#define BPHY_CCK_DELAY_LONG 0x52
+#define BPHY_CCK_DELAY_SHORT 0x53
+#define BPHY_PPROC_CHAN_DELAY 0x54
+#define BPHY_DDFS_ENABLE 0x58
+#define BPHY_PHASE_SCALE 0x59
+#define BPHY_FREQ_CONTROL 0x5a
+#define BPHY_LNA_GAIN_RANGE_10 0x5b
+#define BPHY_LNA_GAIN_RANGE_32 0x5c
+#define BPHY_OPTIONAL_MODES 0x5d
+#define BPHY_RX_STATUS2 0x5e
+#define BPHY_RX_STATUS3 0x5f
+#define BPHY_DAC_CONTROL 0x60
+#define BPHY_ANA11G_FILT_CTRL 0x62
+#define BPHY_REFRESH_CTRL 0x64
+#define BPHY_RF_OVERRIDE2 0x65
+#define BPHY_SPUR_CANCEL_CTRL 0x66
+#define BPHY_FINE_DIGIGAIN_CTRL 0x67
+#define BPHY_RSSI_LUT 0x88
+#define BPHY_RSSI_LUT_END 0xa7
+#define BPHY_TSSI_LUT 0xa8
+#define BPHY_TSSI_LUT_END 0xc7
+#define BPHY_TSSI2PWR_LUT 0x380
+#define BPHY_TSSI2PWR_LUT_END 0x39f
+#define BPHY_LOCOMP_LUT 0x3a0
+#define BPHY_LOCOMP_LUT_END 0x3bf
+#define BPHY_TXGAIN_LUT 0x3c0
+#define BPHY_TXGAIN_LUT_END 0x3ff
+
+/* Bits in BB_CONFIG: */
+#define PHY_BBC_ANT_MASK 0x0180
+#define PHY_BBC_ANT_SHIFT 7
+#define BB_DARWIN 0x1000
+#define BBCFG_RESETCCA 0x4000
+#define BBCFG_RESETRX 0x8000
+
+/* Bits in phytest(0x0a): */
+#define TST_DDFS 0x2000
+#define TST_TXFILT1 0x0800
+#define TST_UNSCRAM 0x0400
+#define TST_CARR_SUPP 0x0200
+#define TST_DC_COMP_LOOP 0x0100
+#define TST_LOOPBACK 0x0080
+#define TST_TXFILT0 0x0040
+#define TST_TXTEST_ENABLE 0x0020
+#define TST_TXTEST_RATE 0x0018
+#define TST_TXTEST_PHASE 0x0007
+
+/* phytest txTestRate values */
+#define TST_TXTEST_RATE_1MBPS 0
+#define TST_TXTEST_RATE_2MBPS 1
+#define TST_TXTEST_RATE_5_5MBPS 2
+#define TST_TXTEST_RATE_11MBPS 3
+#define TST_TXTEST_RATE_SHIFT 3
+
+#define SHM_BYT_CNT 0x2 /* IHR location */
+#define MAX_BYT_CNT 0x600 /* Maximum frame len */
+
+struct d11cnt {
+ u32 txfrag;
+ u32 txmulti;
+ u32 txfail;
+ u32 txretry;
+ u32 txretrie;
+ u32 rxdup;
+ u32 txrts;
+ u32 txnocts;
+ u32 txnoack;
+ u32 rxfrag;
+ u32 rxmulti;
+ u32 rxcrc;
+ u32 txfrmsnt;
+ u32 rxundec;
+};
+
+#endif /* _BRCM_D11_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
new file mode 100644
index 00000000000..b56a30297c2
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -0,0 +1,1425 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <brcmu_utils.h>
+#include <aiutils.h>
+#include "types.h"
+#include "dma.h"
+
+/*
+ * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
+ * a contiguous 8kB physical address.
+ */
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
+
+#define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
+
+/* transmit channel control */
+#define D64_XC_XE 0x00000001 /* transmit enable */
+#define D64_XC_SE 0x00000002 /* transmit suspend request */
+#define D64_XC_LE 0x00000004 /* loopback enable */
+#define D64_XC_FL 0x00000010 /* flush request */
+#define D64_XC_PD 0x00000800 /* parity check disable */
+#define D64_XC_AE 0x00030000 /* address extension bits */
+#define D64_XC_AE_SHIFT 16
+
+/* transmit descriptor table pointer */
+#define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
+
+/* transmit channel status */
+#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
+#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
+#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
+#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
+#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
+#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
+#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000 /* no error */
+#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
+#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
+#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
+#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
+#define D64_XS1_XE_COREE 0x50000000 /* core error */
+
+/* receive channel control */
+/* receive enable */
+#define D64_RC_RE 0x00000001
+/* receive frame offset */
+#define D64_RC_RO_MASK 0x000000fe
+#define D64_RC_RO_SHIFT 1
+/* direct fifo receive (pio) mode */
+#define D64_RC_FM 0x00000100
+/* separate rx header descriptor enable */
+#define D64_RC_SH 0x00000200
+/* overflow continue */
+#define D64_RC_OC 0x00000400
+/* parity check disable */
+#define D64_RC_PD 0x00000800
+/* address extension bits */
+#define D64_RC_AE 0x00030000
+#define D64_RC_AE_SHIFT 16
+
+/* flags for dma controller */
+/* partity enable */
+#define DMA_CTRL_PEN (1 << 0)
+/* rx overflow continue */
+#define DMA_CTRL_ROC (1 << 1)
+/* allow rx scatter to multiple descriptors */
+#define DMA_CTRL_RXMULTI (1 << 2)
+/* Unframed Rx/Tx data */
+#define DMA_CTRL_UNFRAMED (1 << 3)
+
+/* receive descriptor table pointer */
+#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
+
+/* receive channel status */
+#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
+#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
+#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
+#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
+#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
+#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
+#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000 /* no error */
+#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
+#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
+#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
+#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
+#define D64_RS1_RE_COREE 0x50000000 /* core error */
+
+/* fifoaddr */
+#define D64_FA_OFF_MASK 0xffff /* offset */
+#define D64_FA_SEL_MASK 0xf0000 /* select */
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
+#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
+#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
+#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
+#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
+#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
+#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
+#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
+#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
+#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
+#define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
+#define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
+#define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
+#define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
+
+/* descriptor control flags 2 */
+/* buffer byte count. real data len must <= 16KB */
+#define D64_CTRL2_BC_MASK 0x00007fff
+/* address extension bits */
+#define D64_CTRL2_AE 0x00030000
+#define D64_CTRL2_AE_SHIFT 16
+/* parity bit */
+#define D64_CTRL2_PARITY 0x00040000
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
+#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
+
+/*
+ * packet headroom necessary to accommodate the largest header
+ * in the system, (i.e TXOFF). By doing, we avoid the need to
+ * allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this
+ * value is at least as big as TXOFF. This value is used in
+ * dma_rxfill().
+ */
+
+#define BCMEXTRAHDROOM 172
+
+/* debug/trace */
+#ifdef BCMDBG
+#define DMA_ERROR(args) \
+ do { \
+ if (!(*di->msg_level & 1)) \
+ ; \
+ else \
+ printk args; \
+ } while (0)
+#define DMA_TRACE(args) \
+ do { \
+ if (!(*di->msg_level & 2)) \
+ ; \
+ else \
+ printk args; \
+ } while (0)
+#else
+#define DMA_ERROR(args)
+#define DMA_TRACE(args)
+#endif /* BCMDBG */
+
+#define DMA_NONE(args)
+
+#define MAXNAMEL 8 /* 8 char names */
+
+/* macros to convert between byte offsets and indexes */
+#define B2I(bytes, type) ((bytes) / sizeof(type))
+#define I2B(index, type) ((index) * sizeof(type))
+
+#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
+#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
+
+#define PCI64ADDR_HIGH 0x80000000 /* address[63] */
+#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+struct dma64desc {
+ __le32 ctrl1; /* misc control bits & bufcount */
+ __le32 ctrl2; /* buffer count and address extension */
+ __le32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+};
+
+/* dma engine software state */
+struct dma_info {
+ struct dma_pub dma; /* exported structure */
+ uint *msg_level; /* message level pointer */
+ char name[MAXNAMEL]; /* callers name for diag msgs */
+
+ struct pci_dev *pbus; /* bus handle */
+
+ bool dma64; /* this dma engine is operating in 64-bit mode */
+ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
+
+ /* 64-bit dma tx engine registers */
+ struct dma64regs __iomem *d64txregs;
+ /* 64-bit dma rx engine registers */
+ struct dma64regs __iomem *d64rxregs;
+ /* pointer to dma64 tx descriptor ring */
+ struct dma64desc *txd64;
+ /* pointer to dma64 rx descriptor ring */
+ struct dma64desc *rxd64;
+
+ u16 dmadesc_align; /* alignment requirement for dma descriptors */
+
+ u16 ntxd; /* # tx descriptors tunable */
+ u16 txin; /* index of next descriptor to reclaim */
+ u16 txout; /* index of next descriptor to post */
+ /* pointer to parallel array of pointers to packets */
+ struct sk_buff **txp;
+ /* Aligned physical address of descriptor ring */
+ dma_addr_t txdpa;
+ /* Original physical address of descriptor ring */
+ dma_addr_t txdpaorig;
+ u16 txdalign; /* #bytes added to alloc'd mem to align txd */
+ u32 txdalloc; /* #bytes allocated for the ring */
+ u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
+ * is not just an index, it needs all 13 bits to be
+ * an offset from the addr register.
+ */
+
+ u16 nrxd; /* # rx descriptors tunable */
+ u16 rxin; /* index of next descriptor to reclaim */
+ u16 rxout; /* index of next descriptor to post */
+ /* pointer to parallel array of pointers to packets */
+ struct sk_buff **rxp;
+ /* Aligned physical address of descriptor ring */
+ dma_addr_t rxdpa;
+ /* Original physical address of descriptor ring */
+ dma_addr_t rxdpaorig;
+ u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
+ u32 rxdalloc; /* #bytes allocated for the ring */
+ u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
+
+ /* tunables */
+ unsigned int rxbufsize; /* rx buffer size in bytes, not including
+ * the extra headroom
+ */
+ uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
+ * stack, e.g. some rx pkt buffers will be
+ * bridged to tx side without byte copying.
+ * The extra headroom needs to be large enough
+ * to fit txheader needs. Some dongle driver may
+ * not need it.
+ */
+ uint nrxpost; /* # rx buffers to keep posted */
+ unsigned int rxoffset; /* rxcontrol offset */
+ /* add to get dma address of descriptor ring, low 32 bits */
+ uint ddoffsetlow;
+ /* high 32 bits */
+ uint ddoffsethigh;
+ /* add to get dma address of data buffer, low 32 bits */
+ uint dataoffsetlow;
+ /* high 32 bits */
+ uint dataoffsethigh;
+ /* descriptor base need to be aligned or not */
+ bool aligndesc_4k;
+};
+
+/*
+ * default dma message level (if input msg_level
+ * pointer is null in dma_attach())
+ */
+static uint dma_msg_level;
+
+/* Check for odd number of 1's */
+static u32 parity32(__le32 data)
+{
+ /* no swap needed for counting 1's */
+ u32 par_data = *(u32 *)&data;
+
+ par_data ^= par_data >> 16;
+ par_data ^= par_data >> 8;
+ par_data ^= par_data >> 4;
+ par_data ^= par_data >> 2;
+ par_data ^= par_data >> 1;
+
+ return par_data & 1;
+}
+
+static bool dma64_dd_parity(struct dma64desc *dd)
+{
+ return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
+}
+
+/* descriptor bumping functions */
+
+static uint xxd(uint x, uint n)
+{
+ return x & (n - 1); /* faster than %, but n must be power of 2 */
+}
+
+static uint txd(struct dma_info *di, uint x)
+{
+ return xxd(x, di->ntxd);
+}
+
+static uint rxd(struct dma_info *di, uint x)
+{
+ return xxd(x, di->nrxd);
+}
+
+static uint nexttxd(struct dma_info *di, uint i)
+{
+ return txd(di, i + 1);
+}
+
+static uint prevtxd(struct dma_info *di, uint i)
+{
+ return txd(di, i - 1);
+}
+
+static uint nextrxd(struct dma_info *di, uint i)
+{
+ return txd(di, i + 1);
+}
+
+static uint ntxdactive(struct dma_info *di, uint h, uint t)
+{
+ return txd(di, t-h);
+}
+
+static uint nrxdactive(struct dma_info *di, uint h, uint t)
+{
+ return rxd(di, t-h);
+}
+
+static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
+{
+ uint dmactrlflags = di->dma.dmactrlflags;
+
+ if (di == NULL) {
+ DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
+ return 0;
+ }
+
+ dmactrlflags &= ~mask;
+ dmactrlflags |= flags;
+
+ /* If trying to enable parity, check if parity is actually supported */
+ if (dmactrlflags & DMA_CTRL_PEN) {
+ u32 control;
+
+ control = R_REG(&di->d64txregs->control);
+ W_REG(&di->d64txregs->control,
+ control | D64_XC_PD);
+ if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+ /* We *can* disable it so it is supported,
+ * restore control register
+ */
+ W_REG(&di->d64txregs->control,
+ control);
+ else
+ /* Not supported, don't allow it to be enabled */
+ dmactrlflags &= ~DMA_CTRL_PEN;
+ }
+
+ di->dma.dmactrlflags = dmactrlflags;
+
+ return dmactrlflags;
+}
+
+static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+{
+ u32 w;
+ OR_REG(&dma64regs->control, D64_XC_AE);
+ w = R_REG(&dma64regs->control);
+ AND_REG(&dma64regs->control, ~D64_XC_AE);
+ return (w & D64_XC_AE) == D64_XC_AE;
+}
+
+/*
+ * return true if this dma engine supports DmaExtendedAddrChanges,
+ * otherwise false
+ */
+static bool _dma_isaddrext(struct dma_info *di)
+{
+ /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
+
+ /* not all tx or rx channel are available */
+ if (di->d64txregs != NULL) {
+ if (!_dma64_addrext(di->d64txregs))
+ DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
+ "AE set\n", di->name));
+ return true;
+ } else if (di->d64rxregs != NULL) {
+ if (!_dma64_addrext(di->d64rxregs))
+ DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
+ "AE set\n", di->name));
+ return true;
+ }
+
+ return false;
+}
+
+static bool _dma_descriptor_align(struct dma_info *di)
+{
+ u32 addrl;
+
+ /* Check to see if the descriptors need to be aligned on 4K/8K or not */
+ if (di->d64txregs != NULL) {
+ W_REG(&di->d64txregs->addrlow, 0xff0);
+ addrl = R_REG(&di->d64txregs->addrlow);
+ if (addrl != 0)
+ return false;
+ } else if (di->d64rxregs != NULL) {
+ W_REG(&di->d64rxregs->addrlow, 0xff0);
+ addrl = R_REG(&di->d64rxregs->addrlow);
+ if (addrl != 0)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Descriptor table must start at the DMA hardware dictated alignment, so
+ * allocated memory must be large enough to support this requirement.
+ */
+static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+ u16 align_bits, uint *alloced,
+ dma_addr_t *pap)
+{
+ if (align_bits) {
+ u16 align = (1 << align_bits);
+ if (!IS_ALIGNED(PAGE_SIZE, align))
+ size += align;
+ *alloced = size;
+ }
+ return pci_alloc_consistent(pdev, size, pap);
+}
+
+static
+u8 dma_align_sizetobits(uint size)
+{
+ u8 bitpos = 0;
+ while (size >>= 1)
+ bitpos++;
+ return bitpos;
+}
+
+/* This function ensures that the DMA descriptor ring will not get allocated
+ * across Page boundary. If the allocation is done across the page boundary
+ * at the first time, then it is freed and the allocation is done at
+ * descriptor ring size aligned location. This will ensure that the ring will
+ * not cross page boundary
+ */
+static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
+ u16 *alignbits, uint *alloced,
+ dma_addr_t *descpa)
+{
+ void *va;
+ u32 desc_strtaddr;
+ u32 alignbytes = 1 << *alignbits;
+
+ va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+
+ if (NULL == va)
+ return NULL;
+
+ desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
+ if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
+ & boundary)) {
+ *alignbits = dma_align_sizetobits(size);
+ pci_free_consistent(di->pbus, size, va, *descpa);
+ va = dma_alloc_consistent(di->pbus, size, *alignbits,
+ alloced, descpa);
+ }
+ return va;
+}
+
+static bool dma64_alloc(struct dma_info *di, uint direction)
+{
+ u16 size;
+ uint ddlen;
+ void *va;
+ uint alloced = 0;
+ u16 align;
+ u16 align_bits;
+
+ ddlen = sizeof(struct dma64desc);
+
+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
+ align_bits = di->dmadesc_align;
+ align = (1 << align_bits);
+
+ if (direction == DMA_TX) {
+ va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
+ &alloced, &di->txdpaorig);
+ if (va == NULL) {
+ DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
+ " failed\n", di->name));
+ return false;
+ }
+ align = (1 << align_bits);
+ di->txd64 = (struct dma64desc *)
+ roundup((unsigned long)va, align);
+ di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
+ di->txdpa = di->txdpaorig + di->txdalign;
+ di->txdalloc = alloced;
+ } else {
+ va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
+ &alloced, &di->rxdpaorig);
+ if (va == NULL) {
+ DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
+ " failed\n", di->name));
+ return false;
+ }
+ align = (1 << align_bits);
+ di->rxd64 = (struct dma64desc *)
+ roundup((unsigned long)va, align);
+ di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
+ di->rxdpa = di->rxdpaorig + di->rxdalign;
+ di->rxdalloc = alloced;
+ }
+
+ return true;
+}
+
+static bool _dma_alloc(struct dma_info *di, uint direction)
+{
+ return dma64_alloc(di, direction);
+}
+
+struct dma_pub *dma_attach(char *name, struct si_pub *sih,
+ void __iomem *dmaregstx, void __iomem *dmaregsrx,
+ uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level)
+{
+ struct dma_info *di;
+ uint size;
+
+ /* allocate private info structure */
+ di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
+ if (di == NULL)
+ return NULL;
+
+ di->msg_level = msg_level ? msg_level : &dma_msg_level;
+
+
+ di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+
+ /* init dma reg pointer */
+ di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
+ di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+
+ /*
+ * Default flags (which can be changed by the driver calling
+ * dma_ctrlflags before enable): For backwards compatibility
+ * both Rx Overflow Continue and Parity are DISABLED.
+ */
+ _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
+
+ DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+
+ /* make a private copy of our callers name */
+ strncpy(di->name, name, MAXNAMEL);
+ di->name[MAXNAMEL - 1] = '\0';
+
+ di->pbus = ((struct si_info *)sih)->pbus;
+
+ /* save tunables */
+ di->ntxd = (u16) ntxd;
+ di->nrxd = (u16) nrxd;
+
+ /* the actual dma size doesn't include the extra headroom */
+ di->rxextrahdrroom =
+ (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
+ if (rxbufsize > BCMEXTRAHDROOM)
+ di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
+ else
+ di->rxbufsize = (u16) rxbufsize;
+
+ di->nrxpost = (u16) nrxpost;
+ di->rxoffset = (u8) rxoffset;
+
+ /*
+ * figure out the DMA physical address offset for dd and data
+ * PCI/PCIE: they map silicon backplace address to zero
+ * based memory, need offset
+ * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
+ * swapped region for data buffer, not descriptor
+ */
+ di->ddoffsetlow = 0;
+ di->dataoffsetlow = 0;
+ /* add offset for pcie with DMA64 bus */
+ di->ddoffsetlow = 0;
+ di->ddoffsethigh = SI_PCIE_DMA_H32;
+ di->dataoffsetlow = di->ddoffsetlow;
+ di->dataoffsethigh = di->ddoffsethigh;
+ /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
+ if ((ai_coreid(sih) == SDIOD_CORE_ID)
+ && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
+ di->addrext = 0;
+ else if ((ai_coreid(sih) == I2S_CORE_ID) &&
+ ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
+ di->addrext = 0;
+ else
+ di->addrext = _dma_isaddrext(di);
+
+ /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
+ di->aligndesc_4k = _dma_descriptor_align(di);
+ if (di->aligndesc_4k) {
+ di->dmadesc_align = D64RINGALIGN_BITS;
+ if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
+ /* for smaller dd table, HW relax alignment reqmnt */
+ di->dmadesc_align = D64RINGALIGN_BITS - 1;
+ } else {
+ di->dmadesc_align = 4; /* 16 byte alignment */
+ }
+
+ DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align));
+
+ /* allocate tx packet pointer vector */
+ if (ntxd) {
+ size = ntxd * sizeof(void *);
+ di->txp = kzalloc(size, GFP_ATOMIC);
+ if (di->txp == NULL)
+ goto fail;
+ }
+
+ /* allocate rx packet pointer vector */
+ if (nrxd) {
+ size = nrxd * sizeof(void *);
+ di->rxp = kzalloc(size, GFP_ATOMIC);
+ if (di->rxp == NULL)
+ goto fail;
+ }
+
+ /*
+ * allocate transmit descriptor ring, only need ntxd descriptors
+ * but it must be aligned
+ */
+ if (ntxd) {
+ if (!_dma_alloc(di, DMA_TX))
+ goto fail;
+ }
+
+ /*
+ * allocate receive descriptor ring, only need nrxd descriptors
+ * but it must be aligned
+ */
+ if (nrxd) {
+ if (!_dma_alloc(di, DMA_RX))
+ goto fail;
+ }
+
+ if ((di->ddoffsetlow != 0) && !di->addrext) {
+ if (di->txdpa > SI_PCI_DMA_SZ) {
+ DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
+ "supported\n", di->name, (u32)di->txdpa));
+ goto fail;
+ }
+ if (di->rxdpa > SI_PCI_DMA_SZ) {
+ DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
+ "supported\n", di->name, (u32)di->rxdpa));
+ goto fail;
+ }
+ }
+
+ DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
+ "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
+ di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
+ di->addrext));
+
+ return (struct dma_pub *) di;
+
+ fail:
+ dma_detach((struct dma_pub *)di);
+ return NULL;
+}
+
+static inline void
+dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
+ dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
+{
+ u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
+
+ /* PCI bus with big(>1G) physical address, use address extension */
+ if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
+ ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
+ ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
+ ddring[outidx].ctrl1 = cpu_to_le32(*flags);
+ ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
+ } else {
+ /* address extension for 32-bit PCI */
+ u32 ae;
+
+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ pa &= ~PCI32ADDR_HIGH;
+
+ ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
+ ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
+ ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
+ ddring[outidx].ctrl1 = cpu_to_le32(*flags);
+ ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
+ }
+ if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
+ if (dma64_dd_parity(&ddring[outidx]))
+ ddring[outidx].ctrl2 =
+ cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
+ }
+}
+
+/* !! may be called with core in reset */
+void dma_detach(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+
+ DMA_TRACE(("%s: dma_detach\n", di->name));
+
+ /* free dma descriptor rings */
+ if (di->txd64)
+ pci_free_consistent(di->pbus, di->txdalloc,
+ ((s8 *)di->txd64 - di->txdalign),
+ (di->txdpaorig));
+ if (di->rxd64)
+ pci_free_consistent(di->pbus, di->rxdalloc,
+ ((s8 *)di->rxd64 - di->rxdalign),
+ (di->rxdpaorig));
+
+ /* free packet pointer vectors */
+ kfree(di->txp);
+ kfree(di->rxp);
+
+ /* free our private info structure */
+ kfree(di);
+
+}
+
+/* initialize descriptor table base address */
+static void
+_dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
+{
+ if (!di->aligndesc_4k) {
+ if (direction == DMA_TX)
+ di->xmtptrbase = pa;
+ else
+ di->rcvptrbase = pa;
+ }
+
+ if ((di->ddoffsetlow == 0)
+ || !(pa & PCI32ADDR_HIGH)) {
+ if (direction == DMA_TX) {
+ W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
+ W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+ } else {
+ W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
+ W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+ }
+ } else {
+ /* DMA64 32bits address extension */
+ u32 ae;
+
+ /* shift the high bit(s) from pa to ae */
+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ pa &= ~PCI32ADDR_HIGH;
+
+ if (direction == DMA_TX) {
+ W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
+ W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+ SET_REG(&di->d64txregs->control,
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+ } else {
+ W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
+ W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+ SET_REG(&di->d64rxregs->control,
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+ }
+ }
+}
+
+static void _dma_rxenable(struct dma_info *di)
+{
+ uint dmactrlflags = di->dma.dmactrlflags;
+ u32 control;
+
+ DMA_TRACE(("%s: dma_rxenable\n", di->name));
+
+ control =
+ (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
+ D64_RC_RE;
+
+ if ((dmactrlflags & DMA_CTRL_PEN) == 0)
+ control |= D64_RC_PD;
+
+ if (dmactrlflags & DMA_CTRL_ROC)
+ control |= D64_RC_OC;
+
+ W_REG(&di->d64rxregs->control,
+ ((di->rxoffset << D64_RC_RO_SHIFT) | control));
+}
+
+void dma_rxinit(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+
+ DMA_TRACE(("%s: dma_rxinit\n", di->name));
+
+ if (di->nrxd == 0)
+ return;
+
+ di->rxin = di->rxout = 0;
+
+ /* clear rx descriptor ring */
+ memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
+
+ /* DMA engine with out alignment requirement requires table to be inited
+ * before enabling the engine
+ */
+ if (!di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+
+ _dma_rxenable(di);
+
+ if (di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+}
+
+static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
+{
+ uint i, curr;
+ struct sk_buff *rxp;
+ dma_addr_t pa;
+
+ i = di->rxin;
+
+ /* return if no packets posted */
+ if (i == di->rxout)
+ return NULL;
+
+ curr =
+ B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+ di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
+
+ /* ignore curr if forceall */
+ if (!forceall && (i == curr))
+ return NULL;
+
+ /* get the packet pointer that corresponds to the rx descriptor */
+ rxp = di->rxp[i];
+ di->rxp[i] = NULL;
+
+ pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
+
+ /* clear this packet from the descriptor ring */
+ pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+
+ di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
+ di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
+
+ di->rxin = nextrxd(di, i);
+
+ return rxp;
+}
+
+static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
+{
+ if (di->nrxd == 0)
+ return NULL;
+
+ return dma64_getnextrxp(di, forceall);
+}
+
+/*
+ * !! rx entry routine
+ * returns a pointer to the next frame received, or NULL if there are no more
+ * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
+ * supported with pkts chain
+ * otherwise, it's treated as giant pkt and will be tossed.
+ * The DMA scattering starts with normal DMA header, followed by first
+ * buffer data. After it reaches the max size of buffer, the data continues
+ * in next DMA descriptor buffer WITHOUT DMA header
+ */
+struct sk_buff *dma_rx(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct sk_buff *p, *head, *tail;
+ uint len;
+ uint pkt_len;
+ int resid = 0;
+
+ next_frame:
+ head = _dma_getnextrxp(di, false);
+ if (head == NULL)
+ return NULL;
+
+ len = le16_to_cpu(*(__le16 *) (head->data));
+ DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
+ dma_spin_for_len(len, head);
+
+ /* set actual length */
+ pkt_len = min((di->rxoffset + len), di->rxbufsize);
+ __skb_trim(head, pkt_len);
+ resid = len - (di->rxbufsize - di->rxoffset);
+
+ /* check for single or multi-buffer rx */
+ if (resid > 0) {
+ tail = head;
+ while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
+ tail->next = p;
+ pkt_len = min_t(uint, resid, di->rxbufsize);
+ __skb_trim(p, pkt_len);
+
+ tail = p;
+ resid -= di->rxbufsize;
+ }
+
+#ifdef BCMDBG
+ if (resid > 0) {
+ uint cur;
+ cur =
+ B2I(((R_REG(&di->d64rxregs->status0) &
+ D64_RS0_CD_MASK) -
+ di->rcvptrbase) & D64_RS0_CD_MASK,
+ struct dma64desc);
+ DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur));
+ }
+#endif /* BCMDBG */
+
+ if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
+ DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
+ di->name, len));
+ brcmu_pkt_buf_free_skb(head);
+ di->dma.rxgiants++;
+ goto next_frame;
+ }
+ }
+
+ return head;
+}
+
+static bool dma64_rxidle(struct dma_info *di)
+{
+ DMA_TRACE(("%s: dma_rxidle\n", di->name));
+
+ if (di->nrxd == 0)
+ return true;
+
+ return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
+ (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+}
+
+/*
+ * post receive buffers
+ * return false is refill failed completely and ring is empty this will stall
+ * the rx dma and user might want to call rxfill again asap. This unlikely
+ * happens on memory-rich NIC, but often on memory-constrained dongle
+ */
+bool dma_rxfill(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct sk_buff *p;
+ u16 rxin, rxout;
+ u32 flags = 0;
+ uint n;
+ uint i;
+ dma_addr_t pa;
+ uint extra_offset = 0;
+ bool ring_empty;
+
+ ring_empty = false;
+
+ /*
+ * Determine how many receive buffers we're lacking
+ * from the full complement, allocate, initialize,
+ * and post them, then update the chip rx lastdscr.
+ */
+
+ rxin = di->rxin;
+ rxout = di->rxout;
+
+ n = di->nrxpost - nrxdactive(di, rxin, rxout);
+
+ DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+
+ if (di->rxbufsize > BCMEXTRAHDROOM)
+ extra_offset = di->rxextrahdrroom;
+
+ for (i = 0; i < n; i++) {
+ /*
+ * the di->rxbufsize doesn't include the extra headroom,
+ * we need to add it to the size to be allocated
+ */
+ p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
+
+ if (p == NULL) {
+ DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
+ di->name));
+ if (i == 0 && dma64_rxidle(di)) {
+ DMA_ERROR(("%s: rxfill64: ring is empty !\n",
+ di->name));
+ ring_empty = true;
+ }
+ di->dma.rxnobuf++;
+ break;
+ }
+ /* reserve an extra headroom, if applicable */
+ if (extra_offset)
+ skb_pull(p, extra_offset);
+
+ /* Do a cached write instead of uncached write since DMA_MAP
+ * will flush the cache.
+ */
+ *(u32 *) (p->data) = 0;
+
+ pa = pci_map_single(di->pbus, p->data,
+ di->rxbufsize, PCI_DMA_FROMDEVICE);
+
+ /* save the free packet pointer */
+ di->rxp[rxout] = p;
+
+ /* reset flags for each descriptor */
+ flags = 0;
+ if (rxout == (di->nrxd - 1))
+ flags = D64_CTRL1_EOT;
+
+ dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
+ di->rxbufsize);
+ rxout = nextrxd(di, rxout);
+ }
+
+ di->rxout = rxout;
+
+ /* update the chip lastdscr pointer */
+ W_REG(&di->d64rxregs->ptr,
+ di->rcvptrbase + I2B(rxout, struct dma64desc));
+
+ return ring_empty;
+}
+
+void dma_rxreclaim(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct sk_buff *p;
+
+ DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+
+ while ((p = _dma_getnextrxp(di, true)))
+ brcmu_pkt_buf_free_skb(p);
+}
+
+void dma_counterreset(struct dma_pub *pub)
+{
+ /* reset all software counters */
+ pub->rxgiants = 0;
+ pub->rxnobuf = 0;
+ pub->txnobuf = 0;
+}
+
+/* get the address of the var in order to change later */
+unsigned long dma_getvar(struct dma_pub *pub, const char *name)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+
+ if (!strcmp(name, "&txavail"))
+ return (unsigned long)&(di->dma.txavail);
+ return 0;
+}
+
+/* 64-bit DMA functions */
+
+void dma_txinit(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ u32 control = D64_XC_XE;
+
+ DMA_TRACE(("%s: dma_txinit\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ di->txin = di->txout = 0;
+ di->dma.txavail = di->ntxd - 1;
+
+ /* clear tx descriptor ring */
+ memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
+
+ /* DMA engine with out alignment requirement requires table to be inited
+ * before enabling the engine
+ */
+ if (!di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+
+ if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
+ control |= D64_XC_PD;
+ OR_REG(&di->d64txregs->control, control);
+
+ /* DMA engine with alignment requirement requires table to be inited
+ * before enabling the engine
+ */
+ if (di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+}
+
+void dma_txsuspend(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+
+ DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ OR_REG(&di->d64txregs->control, D64_XC_SE);
+}
+
+void dma_txresume(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+
+ DMA_TRACE(("%s: dma_txresume\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+}
+
+bool dma_txsuspended(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+
+ return (di->ntxd == 0) ||
+ ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
+ D64_XC_SE);
+}
+
+void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct sk_buff *p;
+
+ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
+ (range == DMA_RANGE_ALL) ? "all" :
+ ((range ==
+ DMA_RANGE_TRANSMITTED) ? "transmitted" :
+ "transferred")));
+
+ if (di->txin == di->txout)
+ return;
+
+ while ((p = dma_getnexttxp(pub, range))) {
+ /* For unframed data, we don't have any packets to free */
+ if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
+ brcmu_pkt_buf_free_skb(p);
+ }
+}
+
+bool dma_txreset(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ u32 status;
+
+ if (di->ntxd == 0)
+ return true;
+
+ /* suspend tx DMA first */
+ W_REG(&di->d64txregs->control, D64_XC_SE);
+ SPINWAIT(((status =
+ (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
+ != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
+ && (status != D64_XS0_XS_STOPPED), 10000);
+
+ W_REG(&di->d64txregs->control, 0);
+ SPINWAIT(((status =
+ (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
+ != D64_XS0_XS_DISABLED), 10000);
+
+ /* wait for the last transaction to complete */
+ udelay(300);
+
+ return status == D64_XS0_XS_DISABLED;
+}
+
+bool dma_rxreset(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ u32 status;
+
+ if (di->nrxd == 0)
+ return true;
+
+ W_REG(&di->d64rxregs->control, 0);
+ SPINWAIT(((status =
+ (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
+ != D64_RS0_RS_DISABLED), 10000);
+
+ return status == D64_RS0_RS_DISABLED;
+}
+
+/*
+ * !! tx entry routine
+ * WARNING: call must check the return value for error.
+ * the error(toss frames) could be fatal and cause many subsequent hard
+ * to debug problems
+ */
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct sk_buff *p, *next;
+ unsigned char *data;
+ uint len;
+ u16 txout;
+ u32 flags = 0;
+ dma_addr_t pa;
+
+ DMA_TRACE(("%s: dma_txfast\n", di->name));
+
+ txout = di->txout;
+
+ /*
+ * Walk the chain of packet buffers
+ * allocating and initializing transmit descriptor entries.
+ */
+ for (p = p0; p; p = next) {
+ data = p->data;
+ len = p->len;
+ next = p->next;
+
+ /* return nonzero if out of tx descriptors */
+ if (nexttxd(di, txout) == di->txin)
+ goto outoftxd;
+
+ if (len == 0)
+ continue;
+
+ /* get physical address of buffer start */
+ pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+
+ flags = 0;
+ if (p == p0)
+ flags |= D64_CTRL1_SOF;
+
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+ * is when end of segment list is reached.
+ */
+ if (next == NULL)
+ flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
+
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+
+ txout = nexttxd(di, txout);
+ }
+
+ /* if last txd eof not set, fix it */
+ if (!(flags & D64_CTRL1_EOF))
+ di->txd64[prevtxd(di, txout)].ctrl1 =
+ cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+
+ /* save the packet */
+ di->txp[prevtxd(di, txout)] = p0;
+
+ /* bump the tx descriptor index */
+ di->txout = txout;
+
+ /* kick the chip */
+ if (commit)
+ W_REG(&di->d64txregs->ptr,
+ di->xmtptrbase + I2B(txout, struct dma64desc));
+
+ /* tx flow control */
+ di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+
+ return 0;
+
+ outoftxd:
+ DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
+ brcmu_pkt_buf_free_skb(p0);
+ di->dma.txavail = 0;
+ di->dma.txnobuf++;
+ return -1;
+}
+
+/*
+ * Reclaim next completed txd (txds if using chained buffers) in the range
+ * specified and return associated packet.
+ * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
+ * transmitted as noted by the hardware "CurrDescr" pointer.
+ * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
+ * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
+ * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
+ * return associated packet regardless of the value of hardware pointers.
+ */
+struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ u16 start, end, i;
+ u16 active_desc;
+ struct sk_buff *txp;
+
+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
+ (range == DMA_RANGE_ALL) ? "all" :
+ ((range ==
+ DMA_RANGE_TRANSMITTED) ? "transmitted" :
+ "transferred")));
+
+ if (di->ntxd == 0)
+ return NULL;
+
+ txp = NULL;
+
+ start = di->txin;
+ if (range == DMA_RANGE_ALL)
+ end = di->txout;
+ else {
+ struct dma64regs __iomem *dregs = di->d64txregs;
+
+ end = (u16) (B2I(((R_REG(&dregs->status0) &
+ D64_XS0_CD_MASK) -
+ di->xmtptrbase) & D64_XS0_CD_MASK,
+ struct dma64desc));
+
+ if (range == DMA_RANGE_TRANSFERED) {
+ active_desc =
+ (u16) (R_REG(&dregs->status1) &
+ D64_XS1_AD_MASK);
+ active_desc =
+ (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
+ active_desc = B2I(active_desc, struct dma64desc);
+ if (end != active_desc)
+ end = prevtxd(di, active_desc);
+ }
+ }
+
+ if ((start == 0) && (end > di->txout))
+ goto bogus;
+
+ for (i = start; i != end && !txp; i = nexttxd(di, i)) {
+ dma_addr_t pa;
+ uint size;
+
+ pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow;
+
+ size =
+ (le32_to_cpu(di->txd64[i].ctrl2) &
+ D64_CTRL2_BC_MASK);
+
+ di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef);
+ di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
+
+ txp = di->txp[i];
+ di->txp[i] = NULL;
+
+ pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+ }
+
+ di->txin = i;
+
+ /* tx flow control */
+ di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+
+ return txp;
+
+ bogus:
+ DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
+ "force %d\n", start, end, di->txout, forceall));
+ return NULL;
+}
+
+/*
+ * Mac80211 initiated actions sometimes require packets in the DMA queue to be
+ * modified. The modified portion of the packet is not under control of the DMA
+ * engine. This function calls a caller-supplied function for each packet in
+ * the caller specified dma chain.
+ */
+void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
+ (void *pkt, void *arg_a), void *arg_a)
+{
+ struct dma_info *di = (struct dma_info *) dmah;
+ uint i = di->txin;
+ uint end = di->txout;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+
+ while (i != end) {
+ skb = (struct sk_buff *)di->txp[i];
+ if (skb != NULL) {
+ tx_info = (struct ieee80211_tx_info *)skb->cb;
+ (callback_fnc)(tx_info, arg_a);
+ }
+ i = nexttxd(di, i);
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
new file mode 100644
index 00000000000..ebc5bc546f3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_DMA_H_
+#define _BRCM_DMA_H_
+
+#include <linux/delay.h>
+#include "types.h" /* forward structure declarations */
+
+/* map/unmap direction */
+#define DMA_TX 1 /* TX direction for DMA */
+#define DMA_RX 2 /* RX direction for DMA */
+
+/* DMA structure:
+ * support two DMA engines: 32 bits address or 64 bit addressing
+ * basic DMA register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+
+/* 32 bits addressing */
+
+struct dma32diag { /* diag access */
+ u32 fifoaddr; /* diag address */
+ u32 fifodatalow; /* low 32bits of data */
+ u32 fifodatahigh; /* high 32bits of data */
+ u32 pad; /* reserved */
+};
+
+/* 64 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+struct dma64regs {
+ u32 control; /* enable, et al */
+ u32 ptr; /* last descriptor posted to chip */
+ u32 addrlow; /* desc ring base address low 32-bits (8K aligned) */
+ u32 addrhigh; /* desc ring base address bits 63:32 (8K aligned) */
+ u32 status0; /* current descriptor, xmt state */
+ u32 status1; /* active descriptor, xmt error */
+};
+
+/* range param for dma_getnexttxp() and dma_txreclaim */
+enum txd_range {
+ DMA_RANGE_ALL = 1,
+ DMA_RANGE_TRANSMITTED,
+ DMA_RANGE_TRANSFERED
+};
+
+/*
+ * Exported data structure (read-only)
+ */
+/* export structure */
+struct dma_pub {
+ uint txavail; /* # free tx descriptors */
+ uint dmactrlflags; /* dma control flags */
+
+ /* rx error counters */
+ uint rxgiants; /* rx giant frames */
+ uint rxnobuf; /* rx out of dma descriptors */
+ /* tx error counters */
+ uint txnobuf; /* tx out of dma descriptors */
+};
+
+extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
+ void __iomem *dmaregstx, void __iomem *dmaregsrx,
+ uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level);
+
+void dma_rxinit(struct dma_pub *pub);
+struct sk_buff *dma_rx(struct dma_pub *pub);
+bool dma_rxfill(struct dma_pub *pub);
+bool dma_rxreset(struct dma_pub *pub);
+bool dma_txreset(struct dma_pub *pub);
+void dma_txinit(struct dma_pub *pub);
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit);
+void dma_txsuspend(struct dma_pub *pub);
+bool dma_txsuspended(struct dma_pub *pub);
+void dma_txresume(struct dma_pub *pub);
+void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
+void dma_rxreclaim(struct dma_pub *pub);
+void dma_detach(struct dma_pub *pub);
+unsigned long dma_getvar(struct dma_pub *pub, const char *name);
+struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
+void dma_counterreset(struct dma_pub *pub);
+
+void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
+ (void *pkt, void *arg_a), void *arg_a);
+
+/*
+ * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
+ * the packet length is not updated yet (by DMA) on the expected time.
+ * Workaround is to hold processor till DMA updates the length, and stay off
+ * the bus to allow DMA update the length in buffer
+ */
+static inline void dma_spin_for_len(uint len, struct sk_buff *head)
+{
+#if defined(CONFIG_BCM47XX)
+ if (!len) {
+ while (!(len = *(u16 *) KSEG1ADDR(head->data)))
+ udelay(1);
+
+ *(u16 *) (head->data) = cpu_to_le16((u16) len);
+ }
+#endif /* defined(CONFIG_BCM47XX) */
+}
+
+#endif /* _BRCM_DMA_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
new file mode 100644
index 00000000000..ac8d02bd34f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -0,0 +1,1696 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <net/mac80211.h>
+#include <defs.h>
+#include "nicpci.h"
+#include "phy/phy_int.h"
+#include "d11.h"
+#include "channel.h"
+#include "scb.h"
+#include "pub.h"
+#include "ucode_loader.h"
+#include "mac80211_if.h"
+#include "main.h"
+
+#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
+
+/* Flags we support */
+#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_FCSFAIL | \
+ FIF_PLCPFAIL | \
+ FIF_CONTROL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC)
+
+#define CHAN2GHZ(channel, freqency, chflags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (freqency), \
+ .hw_value = (channel), \
+ .flags = chflags, \
+ .max_antenna_gain = 0, \
+ .max_power = 19, \
+}
+
+#define CHAN5GHZ(channel, chflags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + 5*(channel), \
+ .hw_value = (channel), \
+ .flags = chflags, \
+ .max_antenna_gain = 0, \
+ .max_power = 21, \
+}
+
+#define RATE(rate100m, _flags) { \
+ .bitrate = (rate100m), \
+ .flags = (_flags), \
+ .hw_value = (rate100m / 5), \
+}
+
+struct firmware_hdr {
+ __le32 offset;
+ __le32 len;
+ __le32 idx;
+};
+
+static const char * const brcms_firmwares[MAX_FW_IMAGES] = {
+ "brcm/bcm43xx",
+ NULL
+};
+
+static int n_adapters_found;
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* recognized PCI IDs */
+static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+
+#ifdef BCMDBG
+static int msglevel = 0xdeadbeef;
+module_param(msglevel, int, 0);
+#endif /* BCMDBG */
+
+static struct ieee80211_channel brcms_2ghz_chantable[] = {
+ CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN2GHZ(2, 2417, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN2GHZ(3, 2422, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN2GHZ(4, 2427, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN2GHZ(5, 2432, 0),
+ CHAN2GHZ(6, 2437, 0),
+ CHAN2GHZ(7, 2442, 0),
+ CHAN2GHZ(8, 2447, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN2GHZ(9, 2452, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN2GHZ(10, 2457, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN2GHZ(11, 2462, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN2GHZ(12, 2467,
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN2GHZ(13, 2472,
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN2GHZ(14, 2484,
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+};
+
+static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
+ /* UNII-1 */
+ CHAN5GHZ(36, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(40, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(44, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(48, IEEE80211_CHAN_NO_HT40PLUS),
+ /* UNII-2 */
+ CHAN5GHZ(52,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(56,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(60,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(64,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ /* MID */
+ CHAN5GHZ(100,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(104,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(108,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(112,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(116,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(120,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(124,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(128,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(132,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(136,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(140,
+ IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS |
+ IEEE80211_CHAN_NO_HT40MINUS),
+ /* UNII-3 */
+ CHAN5GHZ(149, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(153, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(157, IEEE80211_CHAN_NO_HT40MINUS),
+ CHAN5GHZ(161, IEEE80211_CHAN_NO_HT40PLUS),
+ CHAN5GHZ(165, IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+};
+
+/*
+ * The rate table is used for both 2.4G and 5G rates. The
+ * latter being a subset as it does not support CCK rates.
+ */
+static struct ieee80211_rate legacy_ratetable[] = {
+ RATE(10, 0),
+ RATE(20, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0),
+ RATE(90, 0),
+ RATE(120, 0),
+ RATE(180, 0),
+ RATE(240, 0),
+ RATE(360, 0),
+ RATE(480, 0),
+ RATE(540, 0),
+};
+
+static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
+ .band = IEEE80211_BAND_2GHZ,
+ .channels = brcms_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
+ .bitrates = legacy_ratetable,
+ .n_bitrates = ARRAY_SIZE(legacy_ratetable),
+ .ht_cap = {
+ /* from include/linux/ieee80211.h */
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
+ .mcs = {
+ /* placeholders for now */
+ .rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
+ .rx_highest = cpu_to_le16(500),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED}
+ }
+};
+
+static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = brcms_5ghz_nphy_chantable,
+ .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
+ .bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
+ .n_bitrates = ARRAY_SIZE(legacy_ratetable) -
+ BRCMS_LEGACY_5G_RATE_OFFSET,
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
+ .mcs = {
+ /* placeholders for now */
+ .rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
+ .rx_highest = cpu_to_le16(500),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED}
+ }
+};
+
+/* flags the given rate in rateset as requested */
+static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
+{
+ u32 i;
+
+ for (i = 0; i < rs->count; i++) {
+ if (rate != (rs->rates[i] & 0x7f))
+ continue;
+
+ if (is_br)
+ rs->rates[i] |= BRCMS_RATE_FLAG;
+ else
+ rs->rates[i] &= BRCMS_RATE_MASK;
+ return;
+ }
+}
+
+static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct brcms_info *wl = hw->priv;
+
+ spin_lock_bh(&wl->lock);
+ if (!wl->pub->up) {
+ wiphy_err(wl->wiphy, "ops->tx called while down\n");
+ kfree_skb(skb);
+ goto done;
+ }
+ brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
+ done:
+ spin_unlock_bh(&wl->lock);
+}
+
+static int brcms_ops_start(struct ieee80211_hw *hw)
+{
+ struct brcms_info *wl = hw->priv;
+ bool blocked;
+
+ ieee80211_wake_queues(hw);
+ spin_lock_bh(&wl->lock);
+ blocked = brcms_rfkill_set_hw_state(wl);
+ spin_unlock_bh(&wl->lock);
+ if (!blocked)
+ wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+
+ return 0;
+}
+
+static void brcms_ops_stop(struct ieee80211_hw *hw)
+{
+ ieee80211_stop_queues(hw);
+}
+
+static int
+brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct brcms_info *wl;
+ int err;
+
+ /* Just STA for now */
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_MESH_POINT &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC) {
+ wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
+ " STA for now\n", __func__, vif->type);
+ return -EOPNOTSUPP;
+ }
+
+ wl = hw->priv;
+ spin_lock_bh(&wl->lock);
+ if (!wl->pub->up)
+ err = brcms_up(wl);
+ else
+ err = -ENODEV;
+ spin_unlock_bh(&wl->lock);
+
+ if (err != 0)
+ wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
+ err);
+
+ return err;
+}
+
+static void
+brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct brcms_info *wl;
+
+ wl = hw->priv;
+
+ /* put driver in down state */
+ spin_lock_bh(&wl->lock);
+ brcms_down(wl);
+ spin_unlock_bh(&wl->lock);
+}
+
+static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ieee80211_conf *conf = &hw->conf;
+ struct brcms_info *wl = hw->priv;
+ int err = 0;
+ int new_int;
+ struct wiphy *wiphy = hw->wiphy;
+
+ spin_lock_bh(&wl->lock);
+ if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
+ brcms_c_set_beacon_listen_interval(wl->wlc,
+ conf->listen_interval);
+ }
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR)
+ wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+ __func__, conf->flags & IEEE80211_CONF_MONITOR ?
+ "true" : "false");
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ wiphy_err(wiphy, "%s: change power-save mode: %s (implement)\n",
+ __func__, conf->flags & IEEE80211_CONF_PS ?
+ "true" : "false");
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ err = brcms_c_set_tx_power(wl->wlc, conf->power_level);
+ if (err < 0) {
+ wiphy_err(wiphy, "%s: Error setting power_level\n",
+ __func__);
+ goto config_out;
+ }
+ new_int = brcms_c_get_tx_power(wl->wlc);
+ if (new_int != conf->power_level)
+ wiphy_err(wiphy, "%s: Power level req != actual, %d %d"
+ "\n", __func__, conf->power_level,
+ new_int);
+ }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if (conf->channel_type == NL80211_CHAN_HT20 ||
+ conf->channel_type == NL80211_CHAN_NO_HT)
+ err = brcms_c_set_channel(wl->wlc,
+ conf->channel->hw_value);
+ else
+ err = -ENOTSUPP;
+ }
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
+ err = brcms_c_set_rate_limit(wl->wlc,
+ conf->short_frame_max_tx_count,
+ conf->long_frame_max_tx_count);
+
+ config_out:
+ spin_unlock_bh(&wl->lock);
+ return err;
+}
+
+static void
+brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct brcms_info *wl = hw->priv;
+ struct wiphy *wiphy = hw->wiphy;
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ /* association status changed (associated/disassociated)
+ * also implies a change in the AID.
+ */
+ wiphy_err(wiphy, "%s: %s: %sassociated\n", KBUILD_MODNAME,
+ __func__, info->assoc ? "" : "dis");
+ spin_lock_bh(&wl->lock);
+ brcms_c_associate_upd(wl->wlc, info->assoc);
+ spin_unlock_bh(&wl->lock);
+ }
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ s8 val;
+
+ /* slot timing changed */
+ if (info->use_short_slot)
+ val = 1;
+ else
+ val = 0;
+ spin_lock_bh(&wl->lock);
+ brcms_c_set_shortslot_override(wl->wlc, val);
+ spin_unlock_bh(&wl->lock);
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ /* 802.11n parameters changed */
+ u16 mode = info->ht_operation_mode;
+
+ spin_lock_bh(&wl->lock);
+ brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_CFG,
+ mode & IEEE80211_HT_OP_MODE_PROTECTION);
+ brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_NONGF,
+ mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_OBSS,
+ mode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT);
+ spin_unlock_bh(&wl->lock);
+ }
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ struct ieee80211_supported_band *bi;
+ u32 br_mask, i;
+ u16 rate;
+ struct brcm_rateset rs;
+ int error;
+
+ /* retrieve the current rates */
+ spin_lock_bh(&wl->lock);
+ brcms_c_get_current_rateset(wl->wlc, &rs);
+ spin_unlock_bh(&wl->lock);
+
+ br_mask = info->basic_rates;
+ bi = hw->wiphy->bands[brcms_c_get_curband(wl->wlc)];
+ for (i = 0; i < bi->n_bitrates; i++) {
+ /* convert to internal rate value */
+ rate = (bi->bitrates[i].bitrate << 1) / 10;
+
+ /* set/clear basic rate flag */
+ brcms_set_basic_rate(&rs, rate, br_mask & 1);
+ br_mask >>= 1;
+ }
+
+ /* update the rate set */
+ spin_lock_bh(&wl->lock);
+ error = brcms_c_set_rateset(wl->wlc, &rs);
+ spin_unlock_bh(&wl->lock);
+ if (error)
+ wiphy_err(wiphy, "changing basic rates failed: %d\n",
+ error);
+ }
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ /* Beacon interval changed */
+ spin_lock_bh(&wl->lock);
+ brcms_c_set_beacon_period(wl->wlc, info->beacon_int);
+ spin_unlock_bh(&wl->lock);
+ }
+ if (changed & BSS_CHANGED_BSSID) {
+ /* BSSID changed, for whatever reason (IBSS and managed mode) */
+ spin_lock_bh(&wl->lock);
+ brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET, info->bssid);
+ spin_unlock_bh(&wl->lock);
+ }
+ if (changed & BSS_CHANGED_BEACON)
+ /* Beacon data changed, retrieve new beacon (beaconing modes) */
+ wiphy_err(wiphy, "%s: beacon changed\n", __func__);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ /* Beaconing should be enabled/disabled (beaconing modes) */
+ wiphy_err(wiphy, "%s: Beacon enabled: %s\n", __func__,
+ info->enable_beacon ? "true" : "false");
+ }
+
+ if (changed & BSS_CHANGED_CQM) {
+ /* Connection quality monitor config changed */
+ wiphy_err(wiphy, "%s: cqm change: threshold %d, hys %d "
+ " (implement)\n", __func__, info->cqm_rssi_thold,
+ info->cqm_rssi_hyst);
+ }
+
+ if (changed & BSS_CHANGED_IBSS) {
+ /* IBSS join status changed */
+ wiphy_err(wiphy, "%s: IBSS joined: %s (implement)\n", __func__,
+ info->ibss_joined ? "true" : "false");
+ }
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ /* Hardware ARP filter address list or state changed */
+ wiphy_err(wiphy, "%s: arp filtering: enabled %s, count %d"
+ " (implement)\n", __func__, info->arp_filter_enabled ?
+ "true" : "false", info->arp_addr_cnt);
+ }
+
+ if (changed & BSS_CHANGED_QOS) {
+ /*
+ * QoS for this association was enabled/disabled.
+ * Note that it is only ever disabled for station mode.
+ */
+ wiphy_err(wiphy, "%s: qos enabled: %s (implement)\n", __func__,
+ info->qos ? "true" : "false");
+ }
+ return;
+}
+
+static void
+brcms_ops_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct brcms_info *wl = hw->priv;
+ struct wiphy *wiphy = hw->wiphy;
+
+ changed_flags &= MAC_FILTERS;
+ *total_flags &= MAC_FILTERS;
+ if (changed_flags & FIF_PROMISC_IN_BSS)
+ wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+ if (changed_flags & FIF_ALLMULTI)
+ wiphy_err(wiphy, "FIF_ALLMULTI\n");
+ if (changed_flags & FIF_FCSFAIL)
+ wiphy_err(wiphy, "FIF_FCSFAIL\n");
+ if (changed_flags & FIF_PLCPFAIL)
+ wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+ if (changed_flags & FIF_CONTROL)
+ wiphy_err(wiphy, "FIF_CONTROL\n");
+ if (changed_flags & FIF_OTHER_BSS)
+ wiphy_err(wiphy, "FIF_OTHER_BSS\n");
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+ spin_lock_bh(&wl->lock);
+ if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
+ wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
+ brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
+ } else {
+ brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
+ wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
+ }
+ spin_unlock_bh(&wl->lock);
+ }
+ return;
+}
+
+static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct brcms_info *wl = hw->priv;
+ spin_lock_bh(&wl->lock);
+ brcms_c_scan_start(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return;
+}
+
+static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct brcms_info *wl = hw->priv;
+ spin_lock_bh(&wl->lock);
+ brcms_c_scan_stop(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return;
+}
+
+static int
+brcms_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct brcms_info *wl = hw->priv;
+
+ spin_lock_bh(&wl->lock);
+ brcms_c_wme_setparams(wl->wlc, queue, params, true);
+ spin_unlock_bh(&wl->lock);
+
+ return 0;
+}
+
+static int
+brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct brcms_info *wl = hw->priv;
+ struct scb *scb = &wl->wlc->pri_scb;
+
+ brcms_c_init_scb(scb);
+
+ wl->pub->global_ampdu = &(scb->scb_ampdu);
+ wl->pub->global_ampdu->scb = scb;
+ wl->pub->global_ampdu->max_pdu = 16;
+
+ sta->ht_cap.ht_supported = true;
+ sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
+ sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
+
+ /*
+ * minstrel_ht initiates addBA on our behalf by calling
+ * ieee80211_start_tx_ba_session()
+ */
+ return 0;
+}
+
+static int
+brcms_ops_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct brcms_info *wl = hw->priv;
+ struct scb *scb = &wl->wlc->pri_scb;
+ int status;
+
+ if (WARN_ON(scb->magic != SCB_MAGIC))
+ return -EIDRM;
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ spin_lock_bh(&wl->lock);
+ status = brcms_c_aggregatable(wl->wlc, tid);
+ spin_unlock_bh(&wl->lock);
+ if (!status) {
+ wiphy_err(wl->wiphy, "START: tid %d is not agg\'able\n",
+ tid);
+ return -EINVAL;
+ }
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IEEE80211_AMPDU_TX_STOP:
+ spin_lock_bh(&wl->lock);
+ brcms_c_ampdu_flush(wl->wlc, sta, tid);
+ spin_unlock_bh(&wl->lock);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /*
+ * BA window size from ADDBA response ('buf_size') defines how
+ * many outstanding MPDUs are allowed for the BA stream by
+ * recipient and traffic class. 'ampdu_factor' gives maximum
+ * AMPDU size.
+ */
+ spin_lock_bh(&wl->lock);
+ brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size,
+ (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor)) - 1);
+ spin_unlock_bh(&wl->lock);
+ /* Power save wakeup */
+ break;
+ default:
+ wiphy_err(wl->wiphy, "%s: Invalid command, ignoring\n",
+ __func__);
+ }
+
+ return 0;
+}
+
+static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct brcms_info *wl = hw->priv;
+ bool blocked;
+
+ spin_lock_bh(&wl->lock);
+ blocked = brcms_c_check_radio_disabled(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+
+ wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
+}
+
+static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct brcms_info *wl = hw->priv;
+
+ no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
+
+ /* wait for packet queue and dma fifos to run empty */
+ spin_lock_bh(&wl->lock);
+ brcms_c_wait_for_tx_completion(wl->wlc, drop);
+ spin_unlock_bh(&wl->lock);
+}
+
+static const struct ieee80211_ops brcms_ops = {
+ .tx = brcms_ops_tx,
+ .start = brcms_ops_start,
+ .stop = brcms_ops_stop,
+ .add_interface = brcms_ops_add_interface,
+ .remove_interface = brcms_ops_remove_interface,
+ .config = brcms_ops_config,
+ .bss_info_changed = brcms_ops_bss_info_changed,
+ .configure_filter = brcms_ops_configure_filter,
+ .sw_scan_start = brcms_ops_sw_scan_start,
+ .sw_scan_complete = brcms_ops_sw_scan_complete,
+ .conf_tx = brcms_ops_conf_tx,
+ .sta_add = brcms_ops_sta_add,
+ .ampdu_action = brcms_ops_ampdu_action,
+ .rfkill_poll = brcms_ops_rfkill_poll,
+ .flush = brcms_ops_flush,
+};
+
+/*
+ * is called in brcms_pci_probe() context, therefore no locking required.
+ */
+static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
+{
+ return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
+}
+
+void brcms_dpc(unsigned long data)
+{
+ struct brcms_info *wl;
+
+ wl = (struct brcms_info *) data;
+
+ spin_lock_bh(&wl->lock);
+
+ /* call the common second level interrupt handler */
+ if (wl->pub->up) {
+ if (wl->resched) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->isr_lock, flags);
+ brcms_c_intrsupd(wl->wlc);
+ spin_unlock_irqrestore(&wl->isr_lock, flags);
+ }
+
+ wl->resched = brcms_c_dpc(wl->wlc, true);
+ }
+
+ /* brcms_c_dpc() may bring the driver down */
+ if (!wl->pub->up)
+ goto done;
+
+ /* re-schedule dpc */
+ if (wl->resched)
+ tasklet_schedule(&wl->tasklet);
+ else
+ /* re-enable interrupts */
+ brcms_intrson(wl);
+
+ done:
+ spin_unlock_bh(&wl->lock);
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev)
+{
+ int status;
+ struct device *device = &pdev->dev;
+ char fw_name[100];
+ int i;
+
+ memset(&wl->fw, 0, sizeof(struct brcms_firmware));
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ if (brcms_firmwares[i] == NULL)
+ break;
+ sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
+ UCODE_LOADER_API_VER);
+ status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
+ if (status) {
+ wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+ KBUILD_MODNAME, fw_name);
+ return status;
+ }
+ sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
+ UCODE_LOADER_API_VER);
+ status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
+ if (status) {
+ wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+ KBUILD_MODNAME, fw_name);
+ return status;
+ }
+ wl->fw.hdr_num_entries[i] =
+ wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
+ }
+ wl->fw.fw_cnt = i;
+ return brcms_ucode_data_init(wl, &wl->ucode);
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static void brcms_release_fw(struct brcms_info *wl)
+{
+ int i;
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ release_firmware(wl->fw.fw_bin[i]);
+ release_firmware(wl->fw.fw_hdr[i]);
+ }
+}
+
+/**
+ * This function frees the WL per-device resources.
+ *
+ * This function frees resources owned by the WL device pointed to
+ * by the wl parameter.
+ *
+ * precondition: can both be called locked and unlocked
+ *
+ */
+static void brcms_free(struct brcms_info *wl)
+{
+ struct brcms_timer *t, *next;
+
+ /* free ucode data */
+ if (wl->fw.fw_cnt)
+ brcms_ucode_data_free(&wl->ucode);
+ if (wl->irq)
+ free_irq(wl->irq, wl);
+
+ /* kill dpc */
+ tasklet_kill(&wl->tasklet);
+
+ if (wl->pub)
+ brcms_c_module_unregister(wl->pub, "linux", wl);
+
+ /* free common resources */
+ if (wl->wlc) {
+ brcms_c_detach(wl->wlc);
+ wl->wlc = NULL;
+ wl->pub = NULL;
+ }
+
+ /* virtual interface deletion is deferred so we cannot spinwait */
+
+ /* wait for all pending callbacks to complete */
+ while (atomic_read(&wl->callbacks) > 0)
+ schedule();
+
+ /* free timers */
+ for (t = wl->timers; t; t = next) {
+ next = t->next;
+#ifdef BCMDBG
+ kfree(t->name);
+#endif
+ kfree(t);
+ }
+
+ /*
+ * unregister_netdev() calls get_stats() which may read chip
+ * registers so we cannot unmap the chip registers until
+ * after calling unregister_netdev() .
+ */
+ if (wl->regsva)
+ iounmap(wl->regsva);
+
+ wl->regsva = NULL;
+}
+
+/*
+* called from both kernel as from this kernel module.
+* precondition: perimeter lock is not acquired.
+*/
+static void brcms_remove(struct pci_dev *pdev)
+{
+ struct brcms_info *wl;
+ struct ieee80211_hw *hw;
+ int status;
+
+ hw = pci_get_drvdata(pdev);
+ wl = hw->priv;
+ if (!wl) {
+ pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
+ return;
+ }
+
+ spin_lock_bh(&wl->lock);
+ status = brcms_c_chipmatch(pdev->vendor, pdev->device);
+ spin_unlock_bh(&wl->lock);
+ if (!status) {
+ wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
+ "failed\n");
+ return;
+ }
+ if (wl->wlc) {
+ wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
+ wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+ ieee80211_unregister_hw(hw);
+ spin_lock_bh(&wl->lock);
+ brcms_down(wl);
+ spin_unlock_bh(&wl->lock);
+ }
+ pci_disable_device(pdev);
+
+ brcms_free(wl);
+
+ pci_set_drvdata(pdev, NULL);
+ ieee80211_free_hw(hw);
+}
+
+static irqreturn_t brcms_isr(int irq, void *dev_id)
+{
+ struct brcms_info *wl;
+ bool ours, wantdpc;
+
+ wl = (struct brcms_info *) dev_id;
+
+ spin_lock(&wl->isr_lock);
+
+ /* call common first level interrupt handler */
+ ours = brcms_c_isr(wl->wlc, &wantdpc);
+ if (ours) {
+ /* if more to do... */
+ if (wantdpc) {
+
+ /* ...and call the second level interrupt handler */
+ /* schedule dpc */
+ tasklet_schedule(&wl->tasklet);
+ }
+ }
+
+ spin_unlock(&wl->isr_lock);
+
+ return IRQ_RETVAL(ours);
+}
+
+/*
+ * is called in brcms_pci_probe() context, therefore no locking required.
+ */
+static int ieee_hw_rate_init(struct ieee80211_hw *hw)
+{
+ struct brcms_info *wl = hw->priv;
+ struct brcms_c_info *wlc = wl->wlc;
+ struct ieee80211_supported_band *band;
+ int has_5g = 0;
+ u16 phy_type;
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+
+ phy_type = brcms_c_get_phy_type(wl->wlc, 0);
+ if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
+ band = &wlc->bandstate[BAND_2G_INDEX]->band;
+ *band = brcms_band_2GHz_nphy_template;
+ if (phy_type == PHY_TYPE_LCN) {
+ /* Single stream */
+ band->ht_cap.mcs.rx_mask[1] = 0;
+ band->ht_cap.mcs.rx_highest = cpu_to_le16(72);
+ }
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ } else {
+ return -EPERM;
+ }
+
+ /* Assume all bands use the same phy. True for 11n devices. */
+ if (wl->pub->_nbands > 1) {
+ has_5g++;
+ if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
+ band = &wlc->bandstate[BAND_5G_INDEX]->band;
+ *band = brcms_band_5GHz_nphy_template;
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ } else {
+ return -EPERM;
+ }
+ }
+ return 0;
+}
+
+/*
+ * is called in brcms_pci_probe() context, therefore no locking required.
+ */
+static int ieee_hw_init(struct ieee80211_hw *hw)
+{
+ hw->flags = IEEE80211_HW_SIGNAL_DBM
+ /* | IEEE80211_HW_CONNECTION_MONITOR What is this? */
+ | IEEE80211_HW_REPORTS_TX_ACK_STATUS
+ | IEEE80211_HW_AMPDU_AGGREGATION;
+
+ hw->extra_tx_headroom = brcms_c_get_header_len();
+ hw->queues = N_TX_QUEUES;
+ hw->max_rates = 2; /* Primary rate and 1 fallback rate */
+
+ /* channel change time is dependent on chip and band */
+ hw->channel_change_time = 7 * 1000;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ hw->rate_control_algorithm = "minstrel_ht";
+
+ hw->sta_data_size = 0;
+ return ieee_hw_rate_init(hw);
+}
+
+/**
+ * attach to the WL device.
+ *
+ * Attach to the WL device identified by vendor and device parameters.
+ * regs is a host accessible memory address pointing to WL device registers.
+ *
+ * brcms_attach is not defined as static because in the case where no bus
+ * is defined, wl_attach will never be called, and thus, gcc will issue
+ * a warning that this function is defined but not used if we declare
+ * it as static.
+ *
+ *
+ * is called in brcms_pci_probe() context, therefore no locking required.
+ */
+static struct brcms_info *brcms_attach(u16 vendor, u16 device,
+ resource_size_t regs,
+ struct pci_dev *btparam, uint irq)
+{
+ struct brcms_info *wl = NULL;
+ int unit, err;
+ struct ieee80211_hw *hw;
+ u8 perm[ETH_ALEN];
+
+ unit = n_adapters_found;
+ err = 0;
+
+ if (unit < 0)
+ return NULL;
+
+ /* allocate private info */
+ hw = pci_get_drvdata(btparam); /* btparam == pdev */
+ if (hw != NULL)
+ wl = hw->priv;
+ if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
+ return NULL;
+ wl->wiphy = hw->wiphy;
+
+ atomic_set(&wl->callbacks, 0);
+
+ /* setup the bottom half handler */
+ tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
+
+ wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
+ if (wl->regsva == NULL) {
+ wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
+ goto fail;
+ }
+ spin_lock_init(&wl->lock);
+ spin_lock_init(&wl->isr_lock);
+
+ /* prepare ucode */
+ if (brcms_request_fw(wl, btparam) < 0) {
+ wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
+ "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
+ brcms_release_fw(wl);
+ brcms_remove(btparam);
+ return NULL;
+ }
+
+ /* common load-time initialization */
+ wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
+ wl->regsva, btparam, &err);
+ brcms_release_fw(wl);
+ if (!wl->wlc) {
+ wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
+ KBUILD_MODNAME, err);
+ goto fail;
+ }
+ wl->pub = brcms_c_pub(wl->wlc);
+
+ wl->pub->ieee_hw = hw;
+
+ /* disable mpc */
+ brcms_c_set_radio_mpc(wl->wlc, false);
+
+ /* register our interrupt handler */
+ if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+ wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
+ goto fail;
+ }
+ wl->irq = irq;
+
+ /* register module */
+ brcms_c_module_register(wl->pub, "linux", wl, NULL);
+
+ if (ieee_hw_init(hw)) {
+ wiphy_err(wl->wiphy, "wl%d: %s: ieee_hw_init failed!\n", unit,
+ __func__);
+ goto fail;
+ }
+
+ memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
+ if (WARN_ON(!is_valid_ether_addr(perm)))
+ goto fail;
+ SET_IEEE80211_PERM_ADDR(hw, perm);
+
+ err = ieee80211_register_hw(hw);
+ if (err)
+ wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
+ "%d\n", __func__, err);
+
+ if (wl->pub->srom_ccode[0])
+ err = brcms_set_hint(wl, wl->pub->srom_ccode);
+ else
+ err = brcms_set_hint(wl, "US");
+ if (err)
+ wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
+ __func__, err);
+
+ n_adapters_found++;
+ return wl;
+
+fail:
+ brcms_free(wl);
+ return NULL;
+}
+
+
+
+/**
+ * determines if a device is a WL device, and if so, attaches it.
+ *
+ * This function determines if a device pointed to by pdev is a WL device,
+ * and if so, performs a brcms_attach() on it.
+ *
+ * Perimeter lock is initialized in the course of this function.
+ */
+static int __devinit
+brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ struct brcms_info *wl;
+ struct ieee80211_hw *hw;
+ u32 val;
+
+ dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq);
+
+ if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
+ ((pdev->device != 0x0576) &&
+ ((pdev->device & 0xff00) != 0x4300) &&
+ ((pdev->device & 0xff00) != 0x4700) &&
+ ((pdev->device < 43000) || (pdev->device > 43999))))
+ return -ENODEV;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ pr_err("%s: Cannot enable device %d-%d_%d\n",
+ __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ return -ENODEV;
+ }
+ pci_set_master(pdev);
+
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
+ if (!hw) {
+ pr_err("%s: ieee80211_alloc_hw failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ pci_set_drvdata(pdev, hw);
+
+ memset(hw->priv, 0, sizeof(*wl));
+
+ wl = brcms_attach(pdev->vendor, pdev->device,
+ pci_resource_start(pdev, 0), pdev,
+ pdev->irq);
+
+ if (!wl) {
+ pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
+ __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct brcms_info *wl;
+ struct ieee80211_hw *hw;
+
+ hw = pci_get_drvdata(pdev);
+ wl = hw->priv;
+ if (!wl) {
+ wiphy_err(wl->wiphy,
+ "brcms_suspend: pci_get_drvdata failed\n");
+ return -ENODEV;
+ }
+
+ /* only need to flag hw is down for proper resume */
+ spin_lock_bh(&wl->lock);
+ wl->pub->hw_up = false;
+ spin_unlock_bh(&wl->lock);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ return pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static int brcms_resume(struct pci_dev *pdev)
+{
+ struct brcms_info *wl;
+ struct ieee80211_hw *hw;
+ int err = 0;
+ u32 val;
+
+ hw = pci_get_drvdata(pdev);
+ wl = hw->priv;
+ if (!wl) {
+ wiphy_err(wl->wiphy,
+ "wl: brcms_resume: pci_get_drvdata failed\n");
+ return -ENODEV;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ /*
+ * done. driver will be put in up state
+ * in brcms_ops_add_interface() call.
+ */
+ return err;
+}
+
+static struct pci_driver brcms_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = brcms_pci_probe,
+ .suspend = brcms_suspend,
+ .resume = brcms_resume,
+ .remove = __devexit_p(brcms_remove),
+ .id_table = brcms_pci_id_table,
+};
+
+/**
+ * This is the main entry point for the WL driver.
+ *
+ * This function determines if a device pointed to by pdev is a WL device,
+ * and if so, performs a brcms_attach() on it.
+ *
+ */
+static int __init brcms_module_init(void)
+{
+ int error = -ENODEV;
+
+#ifdef BCMDBG
+ if (msglevel != 0xdeadbeef)
+ brcm_msg_level = msglevel;
+#endif /* BCMDBG */
+
+ error = pci_register_driver(&brcms_pci_driver);
+ if (!error)
+ return 0;
+
+
+
+ return error;
+}
+
+/**
+ * This function unloads the WL driver from the system.
+ *
+ * This function unconditionally unloads the WL driver module from the
+ * system.
+ *
+ */
+static void __exit brcms_module_exit(void)
+{
+ pci_unregister_driver(&brcms_pci_driver);
+
+}
+
+module_init(brcms_module_init);
+module_exit(brcms_module_exit);
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio)
+{
+ wiphy_err(wl->wiphy, "Shouldn't be here %s\n", __func__);
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+void brcms_init(struct brcms_info *wl)
+{
+ BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
+ brcms_reset(wl);
+
+ brcms_c_init(wl->wlc);
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+uint brcms_reset(struct brcms_info *wl)
+{
+ BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
+ brcms_c_reset(wl->wlc);
+
+ /* dpc will not be rescheduled */
+ wl->resched = 0;
+
+ return 0;
+}
+
+/*
+ * These are interrupt on/off entry points. Disable interrupts
+ * during interrupt state transition.
+ */
+void brcms_intrson(struct brcms_info *wl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->isr_lock, flags);
+ brcms_c_intrson(wl->wlc);
+ spin_unlock_irqrestore(&wl->isr_lock, flags);
+}
+
+u32 brcms_intrsoff(struct brcms_info *wl)
+{
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&wl->isr_lock, flags);
+ status = brcms_c_intrsoff(wl->wlc);
+ spin_unlock_irqrestore(&wl->isr_lock, flags);
+ return status;
+}
+
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->isr_lock, flags);
+ brcms_c_intrsrestore(wl->wlc, macintmask);
+ spin_unlock_irqrestore(&wl->isr_lock, flags);
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+int brcms_up(struct brcms_info *wl)
+{
+ int error = 0;
+
+ if (wl->pub->up)
+ return 0;
+
+ error = brcms_c_up(wl->wlc);
+
+ return error;
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+void brcms_down(struct brcms_info *wl)
+{
+ uint callbacks, ret_val = 0;
+
+ /* call common down function */
+ ret_val = brcms_c_down(wl->wlc);
+ callbacks = atomic_read(&wl->callbacks) - ret_val;
+
+ /* wait for down callbacks to complete */
+ spin_unlock_bh(&wl->lock);
+
+ /* For HIGH_only driver, it's important to actually schedule other work,
+ * not just spin wait since everything runs at schedule level
+ */
+ SPINWAIT((atomic_read(&wl->callbacks) > callbacks), 100 * 1000);
+
+ spin_lock_bh(&wl->lock);
+}
+
+/*
+* precondition: perimeter lock is not acquired
+ */
+static void _brcms_timer(struct work_struct *work)
+{
+ struct brcms_timer *t = container_of(work, struct brcms_timer,
+ dly_wrk.work);
+
+ spin_lock_bh(&t->wl->lock);
+
+ if (t->set) {
+ if (t->periodic) {
+ atomic_inc(&t->wl->callbacks);
+ ieee80211_queue_delayed_work(t->wl->pub->ieee_hw,
+ &t->dly_wrk,
+ msecs_to_jiffies(t->ms));
+ } else {
+ t->set = false;
+ }
+
+ t->fn(t->arg);
+ }
+
+ atomic_dec(&t->wl->callbacks);
+
+ spin_unlock_bh(&t->wl->lock);
+}
+
+/*
+ * Adds a timer to the list. Caller supplies a timer function.
+ * Is called from wlc.
+ *
+ * precondition: perimeter lock has been acquired
+ */
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg),
+ void *arg, const char *name)
+{
+ struct brcms_timer *t;
+
+ t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC);
+ if (!t)
+ return NULL;
+
+ INIT_DELAYED_WORK(&t->dly_wrk, _brcms_timer);
+ t->wl = wl;
+ t->fn = fn;
+ t->arg = arg;
+ t->next = wl->timers;
+ wl->timers = t;
+
+#ifdef BCMDBG
+ t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
+ if (t->name)
+ strcpy(t->name, name);
+#endif
+
+ return t;
+}
+
+/*
+ * adds only the kernel timer since it's going to be more accurate
+ * as well as it's easier to make it periodic
+ *
+ * precondition: perimeter lock has been acquired
+ */
+void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
+{
+ struct ieee80211_hw *hw = t->wl->pub->ieee_hw;
+
+#ifdef BCMDBG
+ if (t->set)
+ wiphy_err(hw->wiphy, "%s: Already set. Name: %s, per %d\n",
+ __func__, t->name, periodic);
+#endif
+ t->ms = ms;
+ t->periodic = (bool) periodic;
+ t->set = true;
+
+ atomic_inc(&t->wl->callbacks);
+
+ ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
+}
+
+/*
+ * return true if timer successfully deleted, false if still pending
+ *
+ * precondition: perimeter lock has been acquired
+ */
+bool brcms_del_timer(struct brcms_timer *t)
+{
+ if (t->set) {
+ t->set = false;
+ if (!cancel_delayed_work(&t->dly_wrk))
+ return false;
+
+ atomic_dec(&t->wl->callbacks);
+ }
+
+ return true;
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+void brcms_free_timer(struct brcms_timer *t)
+{
+ struct brcms_info *wl = t->wl;
+ struct brcms_timer *tmp;
+
+ /* delete the timer in case it is active */
+ brcms_del_timer(t);
+
+ if (wl->timers == t) {
+ wl->timers = wl->timers->next;
+#ifdef BCMDBG
+ kfree(t->name);
+#endif
+ kfree(t);
+ return;
+
+ }
+
+ tmp = wl->timers;
+ while (tmp) {
+ if (tmp->next == t) {
+ tmp->next = t->next;
+#ifdef BCMDBG
+ kfree(t->name);
+#endif
+ kfree(t);
+ return;
+ }
+ tmp = tmp->next;
+ }
+
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
+{
+ int i, entry;
+ const u8 *pdata;
+ struct firmware_hdr *hdr;
+ for (i = 0; i < wl->fw.fw_cnt; i++) {
+ hdr = (struct firmware_hdr *)wl->fw.fw_hdr[i]->data;
+ for (entry = 0; entry < wl->fw.hdr_num_entries[i];
+ entry++, hdr++) {
+ u32 len = le32_to_cpu(hdr->len);
+ if (le32_to_cpu(hdr->idx) == idx) {
+ pdata = wl->fw.fw_bin[i]->data +
+ le32_to_cpu(hdr->offset);
+ *pbuf = kmalloc(len, GFP_ATOMIC);
+ if (*pbuf == NULL)
+ goto fail;
+
+ memcpy(*pbuf, pdata, len);
+ return 0;
+ }
+ }
+ }
+ wiphy_err(wl->wiphy, "ERROR: ucode buf tag:%d can not be found!\n",
+ idx);
+ *pbuf = NULL;
+fail:
+ return -ENODATA;
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
+{
+ int i, entry;
+ const u8 *pdata;
+ struct firmware_hdr *hdr;
+ for (i = 0; i < wl->fw.fw_cnt; i++) {
+ hdr = (struct firmware_hdr *)wl->fw.fw_hdr[i]->data;
+ for (entry = 0; entry < wl->fw.hdr_num_entries[i];
+ entry++, hdr++) {
+ if (le32_to_cpu(hdr->idx) == idx) {
+ pdata = wl->fw.fw_bin[i]->data +
+ le32_to_cpu(hdr->offset);
+ if (le32_to_cpu(hdr->len) != 4) {
+ wiphy_err(wl->wiphy,
+ "ERROR: fw hdr len\n");
+ return -ENOMSG;
+ }
+ *n_bytes = le32_to_cpu(*((__le32 *) pdata));
+ return 0;
+ }
+ }
+ }
+ wiphy_err(wl->wiphy, "ERROR: ucode tag:%d can not be found!\n", idx);
+ return -ENOMSG;
+}
+
+/*
+ * precondition: can both be called locked and unlocked
+ */
+void brcms_ucode_free_buf(void *p)
+{
+ kfree(p);
+}
+
+/*
+ * checks validity of all firmware images loaded from user space
+ *
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+int brcms_check_firmwares(struct brcms_info *wl)
+{
+ int i;
+ int entry;
+ int rc = 0;
+ const struct firmware *fw;
+ const struct firmware *fw_hdr;
+ struct firmware_hdr *ucode_hdr;
+ for (i = 0; i < MAX_FW_IMAGES && rc == 0; i++) {
+ fw = wl->fw.fw_bin[i];
+ fw_hdr = wl->fw.fw_hdr[i];
+ if (fw == NULL && fw_hdr == NULL) {
+ break;
+ } else if (fw == NULL || fw_hdr == NULL) {
+ wiphy_err(wl->wiphy, "%s: invalid bin/hdr fw\n",
+ __func__);
+ rc = -EBADF;
+ } else if (fw_hdr->size % sizeof(struct firmware_hdr)) {
+ wiphy_err(wl->wiphy, "%s: non integral fw hdr file "
+ "size %zu/%zu\n", __func__, fw_hdr->size,
+ sizeof(struct firmware_hdr));
+ rc = -EBADF;
+ } else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
+ wiphy_err(wl->wiphy, "%s: out of bounds fw file size "
+ "%zu\n", __func__, fw->size);
+ rc = -EBADF;
+ } else {
+ /* check if ucode section overruns firmware image */
+ ucode_hdr = (struct firmware_hdr *)fw_hdr->data;
+ for (entry = 0; entry < wl->fw.hdr_num_entries[i] &&
+ !rc; entry++, ucode_hdr++) {
+ if (le32_to_cpu(ucode_hdr->offset) +
+ le32_to_cpu(ucode_hdr->len) >
+ fw->size) {
+ wiphy_err(wl->wiphy,
+ "%s: conflicting bin/hdr\n",
+ __func__);
+ rc = -EBADF;
+ }
+ }
+ }
+ }
+ if (rc == 0 && wl->fw.fw_cnt != i) {
+ wiphy_err(wl->wiphy, "%s: invalid fw_cnt=%d\n", __func__,
+ wl->fw.fw_cnt);
+ rc = -EBADF;
+ }
+ return rc;
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
+{
+ bool blocked = brcms_c_check_radio_disabled(wl->wlc);
+
+ spin_unlock_bh(&wl->lock);
+ wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
+ if (blocked)
+ wiphy_rfkill_start_polling(wl->pub->ieee_hw->wiphy);
+ spin_lock_bh(&wl->lock);
+ return blocked;
+}
+
+/*
+ * precondition: perimeter lock has been acquired
+ */
+void brcms_msleep(struct brcms_info *wl, uint ms)
+{
+ spin_unlock_bh(&wl->lock);
+ msleep(ms);
+ spin_lock_bh(&wl->lock);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
new file mode 100644
index 00000000000..177f0e44e4b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_MAC80211_IF_H_
+#define _BRCM_MAC80211_IF_H_
+
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include "ucode_loader.h"
+/*
+ * Starting index for 5G rates in the
+ * legacy rate table.
+ */
+#define BRCMS_LEGACY_5G_RATE_OFFSET 4
+
+/* softmac ioctl definitions */
+#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
+
+struct brcms_timer {
+ struct delayed_work dly_wrk;
+ struct brcms_info *wl;
+ void (*fn) (void *); /* function called upon expiration */
+ void *arg; /* fixed argument provided to called function */
+ uint ms;
+ bool periodic;
+ bool set; /* indicates if timer is active */
+ struct brcms_timer *next; /* for freeing on unload */
+#ifdef BCMDBG
+ char *name; /* Description of the timer */
+#endif
+};
+
+struct brcms_if {
+ uint subunit; /* WDS/BSS unit */
+ struct pci_dev *pci_dev;
+};
+
+#define MAX_FW_IMAGES 4
+struct brcms_firmware {
+ u32 fw_cnt;
+ const struct firmware *fw_bin[MAX_FW_IMAGES];
+ const struct firmware *fw_hdr[MAX_FW_IMAGES];
+ u32 hdr_num_entries[MAX_FW_IMAGES];
+};
+
+struct brcms_info {
+ struct brcms_pub *pub; /* pointer to public wlc state */
+ struct brcms_c_info *wlc; /* pointer to private common data */
+ u32 magic;
+
+ int irq;
+
+ spinlock_t lock; /* per-device perimeter lock */
+ spinlock_t isr_lock; /* per-device ISR synchronization lock */
+
+ /* regsva for unmap in brcms_free() */
+ void __iomem *regsva; /* opaque chip registers virtual address */
+
+ /* timer related fields */
+ atomic_t callbacks; /* # outstanding callback functions */
+ struct brcms_timer *timers; /* timer cleanup queue */
+
+ struct tasklet_struct tasklet; /* dpc tasklet */
+ bool resched; /* dpc needs to be and is rescheduled */
+ struct brcms_firmware fw;
+ struct wiphy *wiphy;
+ struct brcms_ucode ucode;
+};
+
+/* misc callbacks */
+extern void brcms_init(struct brcms_info *wl);
+extern uint brcms_reset(struct brcms_info *wl);
+extern void brcms_intrson(struct brcms_info *wl);
+extern u32 brcms_intrsoff(struct brcms_info *wl);
+extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+extern int brcms_up(struct brcms_info *wl);
+extern void brcms_down(struct brcms_info *wl);
+extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio);
+extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+
+/* timer functions */
+extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg), void *arg,
+ const char *name);
+extern void brcms_free_timer(struct brcms_timer *timer);
+extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+extern bool brcms_del_timer(struct brcms_timer *timer);
+extern void brcms_msleep(struct brcms_info *wl, uint ms);
+extern void brcms_dpc(unsigned long data);
+extern void brcms_timer(struct brcms_timer *t);
+
+#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
new file mode 100644
index 00000000000..510e9bb5228
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -0,0 +1,8775 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci_ids.h>
+#include <linux/if_ether.h>
+#include <net/mac80211.h>
+#include <brcm_hw_ids.h>
+#include <aiutils.h>
+#include <chipcommon.h>
+#include "rate.h"
+#include "scb.h"
+#include "phy/phy_hal.h"
+#include "channel.h"
+#include "antsel.h"
+#include "stf.h"
+#include "ampdu.h"
+#include "mac80211_if.h"
+#include "ucode_loader.h"
+#include "main.h"
+
+/*
+ * Indication for txflowcontrol that all priority bits in
+ * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
+ */
+#define ALLPRIO -1
+
+/*
+ * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
+ */
+#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+
+/* watchdog timer, in unit of ms */
+#define TIMER_INTERVAL_WATCHDOG 1000
+/* radio monitor timer, in unit of ms */
+#define TIMER_INTERVAL_RADIOCHK 800
+
+/* Max MPC timeout, in unit of watchdog */
+#ifndef BRCMS_MPC_MAX_DELAYCNT
+#define BRCMS_MPC_MAX_DELAYCNT 10
+#endif
+
+/* Min MPC timeout, in unit of watchdog */
+#define BRCMS_MPC_MIN_DELAYCNT 1
+#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */
+
+/* beacon interval, in unit of 1024TU */
+#define BEACON_INTERVAL_DEFAULT 100
+/* DTIM interval, in unit of beacon interval */
+#define DTIM_INTERVAL_DEFAULT 3
+
+/* Scale down delays to accommodate QT slow speed */
+/* beacon interval, in unit of 1024TU */
+#define BEACON_INTERVAL_DEF_QT 20
+/* DTIM interval, in unit of beacon interval */
+#define DTIM_INTERVAL_DEF_QT 1
+
+#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_SHIFT 4
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_AIFSN_MAX 15
+#define EDCF_ECWMAX_MASK 0xf0
+
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_TXOP_STA 0x002f
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+
+#define APHY_SYMBOL_TIME 4
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SIFS_TIME 16
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define BPHY_SIFS_TIME 10
+#define BPHY_PLCP_SHORT_TIME 96
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
+
+#define DOT11_MAC_HDR_LEN 24
+#define DOT11_ACK_LEN 10
+#define DOT11_BA_LEN 4
+#define DOT11_OFDM_SIGNAL_EXTENSION 6
+#define DOT11_MIN_FRAG_LEN 256
+#define DOT11_RTS_LEN 16
+#define DOT11_CTS_LEN 10
+#define DOT11_BA_BITMAP_LEN 128
+#define DOT11_MIN_BEACON_PERIOD 1
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF
+#define DOT11_MAXNUMFRAGS 16
+#define DOT11_MAX_FRAG_LEN 2346
+
+#define BPHY_PLCP_TIME 192
+#define RIFS_11N_TIME 2
+
+#define WME_VER 1
+#define WME_SUBTYPE_PARAM_IE 1
+#define WME_TYPE 2
+#define WME_OUI "\x00\x50\xf2"
+
+#define AC_BE 0
+#define AC_BK 1
+#define AC_VI 2
+#define AC_VO 3
+
+#define BCN_TMPL_LEN 512 /* length of the BCN template area */
+
+/* brcms_bss_info flag bit values */
+#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
+
+/* Flags used in brcms_c_txq_info.stopped */
+/* per prio flow control bits */
+#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF
+/* stop txq enqueue for packet drain */
+#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100
+/* stop txq enqueue for ampdu flow control */
+#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200
+
+#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */
+
+/* Find basic rate for a given rate */
+static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
+{
+ if (is_mcs_rate(rspec))
+ return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
+ .leg_ofdm];
+ return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
+}
+
+static u16 frametype(u32 rspec, u8 mimoframe)
+{
+ if (is_mcs_rate(rspec))
+ return mimoframe;
+ return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
+}
+
+/* rfdisable delay timer 500 ms, runs of ALP clock */
+#define RFDISABLE_DEFAULT 10000000
+
+#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
+
+/* precedences numbers for wlc queues. These are twice as may levels as
+ * 802.1D priorities.
+ * Odd numbers are used for HI priority traffic at same precedence levels
+ * These constants are used ONLY by wlc_prio2prec_map. Do not use them
+ * elsewhere.
+ */
+#define _BRCMS_PREC_NONE 0 /* None = - */
+#define _BRCMS_PREC_BK 2 /* BK - Background */
+#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
+#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
+#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
+#define _BRCMS_PREC_VI 10 /* Vi - Video */
+#define _BRCMS_PREC_VO 12 /* Vo - Voice */
+#define _BRCMS_PREC_NC 14 /* NC - Network Control */
+
+/* The BSS is generating beacons in HW */
+#define BRCMS_BSSCFG_HW_BCN 0x20
+
+#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */
+#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us */
+#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us */
+#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */
+
+#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */
+
+#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */
+
+/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
+#define EDCF_SHORT_S 0
+#define EDCF_SFB_S 4
+#define EDCF_LONG_S 8
+#define EDCF_LFB_S 12
+#define EDCF_SHORT_M BITFIELD_MASK(4)
+#define EDCF_SFB_M BITFIELD_MASK(4)
+#define EDCF_LONG_M BITFIELD_MASK(4)
+#define EDCF_LFB_M BITFIELD_MASK(4)
+
+#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
+#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
+#define RETRY_LONG_DEF 4 /* Default Long retry count */
+#define RETRY_SHORT_FB 3 /* Short count for fallback rate */
+#define RETRY_LONG_FB 2 /* Long count for fallback rate */
+
+#define APHY_CWMIN 15
+#define PHY_CWMAX 1023
+
+#define EDCF_AIFSN_MIN 1
+
+#define FRAGNUM_MASK 0xF
+
+#define APHY_SLOT_TIME 9
+#define BPHY_SLOT_TIME 20
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
+
+/* invalid core flags, use the saved coreflags */
+#define BRCMS_USE_COREFLAGS 0xffffffff
+
+/* values for PLCPHdr_override */
+#define BRCMS_PLCP_AUTO -1
+#define BRCMS_PLCP_SHORT 0
+#define BRCMS_PLCP_LONG 1
+
+/* values for g_protection_override and n_protection_override */
+#define BRCMS_PROTECTION_AUTO -1
+#define BRCMS_PROTECTION_OFF 0
+#define BRCMS_PROTECTION_ON 1
+#define BRCMS_PROTECTION_MMHDR_ONLY 2
+#define BRCMS_PROTECTION_CTS_ONLY 3
+
+/* values for g_protection_control and n_protection_control */
+#define BRCMS_PROTECTION_CTL_OFF 0
+#define BRCMS_PROTECTION_CTL_LOCAL 1
+#define BRCMS_PROTECTION_CTL_OVERLAP 2
+
+/* values for n_protection */
+#define BRCMS_N_PROTECTION_OFF 0
+#define BRCMS_N_PROTECTION_OPTIONAL 1
+#define BRCMS_N_PROTECTION_20IN40 2
+#define BRCMS_N_PROTECTION_MIXEDMODE 3
+
+/* values for band specific 40MHz capabilities */
+#define BRCMS_N_BW_20ALL 0
+#define BRCMS_N_BW_40ALL 1
+#define BRCMS_N_BW_20IN2G_40IN5G 2
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define BRCMS_N_SGI_20 0x01
+#define BRCMS_N_SGI_40 0x02
+
+/* defines used by the nrate iovar */
+/* MSC in use,indicates b0-6 holds an mcs */
+#define NRATE_MCS_INUSE 0x00000080
+/* rate/mcs value */
+#define NRATE_RATE_MASK 0x0000007f
+/* stf mode mask: siso, cdd, stbc, sdm */
+#define NRATE_STF_MASK 0x0000ff00
+/* stf mode shift */
+#define NRATE_STF_SHIFT 8
+/* bit indicates override both rate & mode */
+#define NRATE_OVERRIDE 0x80000000
+/* bit indicate to override mcs only */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
+
+#define MAX_DMA_SEGS 4
+
+/* Max # of entries in Tx FIFO based on 4kb page size */
+#define NTXD 256
+/* Max # of entries in Rx FIFO based on 4kb page size */
+#define NRXD 256
+
+/* try to keep this # rbufs posted to the chip */
+#define NRXBUFPOST 32
+
+/* data msg txq hiwat mark */
+#define BRCMS_DATAHIWAT 50
+
+/* bounded rx loops */
+#define RXBND 8 /* max # frames to process in brcms_c_recv() */
+#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
+
+/*
+ * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
+ */
+#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+
+/* brcmu_format_flags() bit description structure */
+struct brcms_c_bit_desc {
+ u32 bit;
+ const char *name;
+};
+
+/*
+ * The following table lists the buffer memory allocated to xmt fifos in HW.
+ * the size is in units of 256bytes(one block), total size is HW dependent
+ * ucode has default fifo partition, sw can overwrite if necessary
+ *
+ * This is documented in twiki under the topic UcodeTxFifo. Please ensure
+ * the twiki is updated before making changes.
+ */
+
+/* Starting corerev for the fifo size table */
+#define XMTFIFOTBL_STARTREV 20
+
+struct d11init {
+ __le16 addr;
+ __le16 size;
+ __le32 value;
+};
+
+struct edcf_acparam {
+ u8 ACI;
+ u8 ECW;
+ u16 TXOP;
+} __packed;
+
+const u8 prio2fifo[NUMPRIO] = {
+ TX_AC_BE_FIFO, /* 0 BE AC_BE Best Effort */
+ TX_AC_BK_FIFO, /* 1 BK AC_BK Background */
+ TX_AC_BK_FIFO, /* 2 -- AC_BK Background */
+ TX_AC_BE_FIFO, /* 3 EE AC_BE Best Effort */
+ TX_AC_VI_FIFO, /* 4 CL AC_VI Video */
+ TX_AC_VI_FIFO, /* 5 VI AC_VI Video */
+ TX_AC_VO_FIFO, /* 6 VO AC_VO Voice */
+ TX_AC_VO_FIFO /* 7 NC AC_VO Voice */
+};
+
+/* debug/trace */
+uint brcm_msg_level =
+#if defined(BCMDBG)
+ LOG_ERROR_VAL;
+#else
+ 0;
+#endif /* BCMDBG */
+
+/* TX FIFO number to WME/802.1E Access Category */
+static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
+
+/* WME/802.1E Access Category to TX FIFO number */
+static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
+
+/* 802.1D Priority to precedence queue mapping */
+const u8 wlc_prio2prec_map[] = {
+ _BRCMS_PREC_BE, /* 0 BE - Best-effort */
+ _BRCMS_PREC_BK, /* 1 BK - Background */
+ _BRCMS_PREC_NONE, /* 2 None = - */
+ _BRCMS_PREC_EE, /* 3 EE - Excellent-effort */
+ _BRCMS_PREC_CL, /* 4 CL - Controlled Load */
+ _BRCMS_PREC_VI, /* 5 Vi - Video */
+ _BRCMS_PREC_VO, /* 6 Vo - Voice */
+ _BRCMS_PREC_NC, /* 7 NC - Network Control */
+};
+
+static const u16 xmtfifo_sz[][NFIFO] = {
+ /* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
+ {20, 192, 192, 21, 17, 5},
+ /* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
+ {9, 58, 22, 14, 14, 5},
+ /* corerev 22: 5120, 49152, 49152, 5376, 4352, 1280 */
+ {20, 192, 192, 21, 17, 5},
+ /* corerev 23: 5120, 49152, 49152, 5376, 4352, 1280 */
+ {20, 192, 192, 21, 17, 5},
+ /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
+ {9, 58, 22, 14, 14, 5},
+};
+
+static const u8 acbitmap2maxprio[] = {
+ PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
+ PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
+ PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
+ PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
+};
+
+#ifdef BCMDBG
+static const char * const fifo_names[] = {
+ "AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
+#else
+static const char fifo_names[6][0];
+#endif
+
+#ifdef BCMDBG
+/* pointer to most recently allocated wl/wlc */
+static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
+#endif
+
+/* currently the best mechanism for determining SIFS is the band in use */
+static u16 get_sifs(struct brcms_band *band)
+{
+ return band->bandtype == BRCM_BAND_5G ? APHY_SIFS_TIME :
+ BPHY_SIFS_TIME;
+}
+
+/*
+ * Detect Card removed.
+ * Even checking an sbconfig register read will not false trigger when the core
+ * is in reset it breaks CF address mechanism. Accessing gphy phyversion will
+ * cause SB error if aphy is in reset on 4306B0-DB. Need a simple accessible
+ * reg with fixed 0/1 pattern (some platforms return all 0).
+ * If clocks are present, call the sb routine which will figure out if the
+ * device is removed.
+ */
+static bool brcms_deviceremoved(struct brcms_c_info *wlc)
+{
+ if (!wlc->hw->clk)
+ return ai_deviceremoved(wlc->hw->sih);
+ return (R_REG(&wlc->hw->regs->maccontrol) &
+ (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+}
+
+/* sum the individual fifo tx pending packet counts */
+static s16 brcms_txpktpendtot(struct brcms_c_info *wlc)
+{
+ return wlc->core->txpktpend[0] + wlc->core->txpktpend[1] +
+ wlc->core->txpktpend[2] + wlc->core->txpktpend[3];
+}
+
+static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc)
+{
+ return wlc->pub->_nbands > 1 && !wlc->bandlocked;
+}
+
+static int brcms_chspec_bw(u16 chanspec)
+{
+ if (CHSPEC_IS40(chanspec))
+ return BRCMS_40_MHZ;
+ if (CHSPEC_IS20(chanspec))
+ return BRCMS_20_MHZ;
+
+ return BRCMS_10_MHZ;
+}
+
+/*
+ * return true if Minimum Power Consumption should
+ * be entered, false otherwise
+ */
+static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
+{
+ return false;
+}
+
+static bool brcms_c_ismpc(struct brcms_c_info *wlc)
+{
+ return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
+}
+
+static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
+{
+ if (cfg == NULL)
+ return;
+
+ kfree(cfg->current_bss);
+ kfree(cfg);
+}
+
+static void brcms_c_detach_mfree(struct brcms_c_info *wlc)
+{
+ if (wlc == NULL)
+ return;
+
+ brcms_c_bsscfg_mfree(wlc->bsscfg);
+ kfree(wlc->pub);
+ kfree(wlc->modulecb);
+ kfree(wlc->default_bss);
+ kfree(wlc->protection);
+ kfree(wlc->stf);
+ kfree(wlc->bandstate[0]);
+ kfree(wlc->corestate->macstat_snapshot);
+ kfree(wlc->corestate);
+ kfree(wlc->hw->bandstate[0]);
+ kfree(wlc->hw);
+
+ /* free the wlc */
+ kfree(wlc);
+ wlc = NULL;
+}
+
+static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
+{
+ struct brcms_bss_cfg *cfg;
+
+ cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC);
+ if (cfg == NULL)
+ goto fail;
+
+ cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
+ if (cfg->current_bss == NULL)
+ goto fail;
+
+ return cfg;
+
+ fail:
+ brcms_c_bsscfg_mfree(cfg);
+ return NULL;
+}
+
+static struct brcms_c_info *
+brcms_c_attach_malloc(uint unit, uint *err, uint devid)
+{
+ struct brcms_c_info *wlc;
+
+ wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC);
+ if (wlc == NULL) {
+ *err = 1002;
+ goto fail;
+ }
+
+ /* allocate struct brcms_c_pub state structure */
+ wlc->pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC);
+ if (wlc->pub == NULL) {
+ *err = 1003;
+ goto fail;
+ }
+ wlc->pub->wlc = wlc;
+
+ /* allocate struct brcms_hardware state structure */
+
+ wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC);
+ if (wlc->hw == NULL) {
+ *err = 1005;
+ goto fail;
+ }
+ wlc->hw->wlc = wlc;
+
+ wlc->hw->bandstate[0] =
+ kzalloc(sizeof(struct brcms_hw_band) * MAXBANDS, GFP_ATOMIC);
+ if (wlc->hw->bandstate[0] == NULL) {
+ *err = 1006;
+ goto fail;
+ } else {
+ int i;
+
+ for (i = 1; i < MAXBANDS; i++)
+ wlc->hw->bandstate[i] = (struct brcms_hw_band *)
+ ((unsigned long)wlc->hw->bandstate[0] +
+ (sizeof(struct brcms_hw_band) * i));
+ }
+
+ wlc->modulecb =
+ kzalloc(sizeof(struct modulecb) * BRCMS_MAXMODULES, GFP_ATOMIC);
+ if (wlc->modulecb == NULL) {
+ *err = 1009;
+ goto fail;
+ }
+
+ wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
+ if (wlc->default_bss == NULL) {
+ *err = 1010;
+ goto fail;
+ }
+
+ wlc->bsscfg = brcms_c_bsscfg_malloc(unit);
+ if (wlc->bsscfg == NULL) {
+ *err = 1011;
+ goto fail;
+ }
+
+ wlc->protection = kzalloc(sizeof(struct brcms_protection),
+ GFP_ATOMIC);
+ if (wlc->protection == NULL) {
+ *err = 1016;
+ goto fail;
+ }
+
+ wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC);
+ if (wlc->stf == NULL) {
+ *err = 1017;
+ goto fail;
+ }
+
+ wlc->bandstate[0] =
+ kzalloc(sizeof(struct brcms_band)*MAXBANDS, GFP_ATOMIC);
+ if (wlc->bandstate[0] == NULL) {
+ *err = 1025;
+ goto fail;
+ } else {
+ int i;
+
+ for (i = 1; i < MAXBANDS; i++)
+ wlc->bandstate[i] = (struct brcms_band *)
+ ((unsigned long)wlc->bandstate[0]
+ + (sizeof(struct brcms_band)*i));
+ }
+
+ wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC);
+ if (wlc->corestate == NULL) {
+ *err = 1026;
+ goto fail;
+ }
+
+ wlc->corestate->macstat_snapshot =
+ kzalloc(sizeof(struct macstat), GFP_ATOMIC);
+ if (wlc->corestate->macstat_snapshot == NULL) {
+ *err = 1027;
+ goto fail;
+ }
+
+ return wlc;
+
+ fail:
+ brcms_c_detach_mfree(wlc);
+ return NULL;
+}
+
+/*
+ * Update the slot timing for standard 11b/g (20us slots)
+ * or shortslot 11g (9us slots)
+ * The PSM needs to be suspended for this call.
+ */
+static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
+ bool shortslot)
+{
+ struct d11regs __iomem *regs;
+
+ regs = wlc_hw->regs;
+
+ if (shortslot) {
+ /* 11g short slot: 11a timing */
+ W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
+ brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
+ } else {
+ /* 11g long slot: 11b timing */
+ W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
+ brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
+ }
+}
+
+/*
+ * calculate frame duration of a given rate and length, return
+ * time in usec unit
+ */
+uint
+brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
+ u8 preamble_type, uint mac_len)
+{
+ uint nsyms, dur = 0, Ndps, kNdps;
+ uint rate = rspec2rate(ratespec);
+
+ if (rate == 0) {
+ wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n",
+ wlc->pub->unit);
+ rate = BRCM_RATE_1M;
+ }
+
+ BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n",
+ wlc->pub->unit, ratespec, preamble_type, mac_len);
+
+ if (is_mcs_rate(ratespec)) {
+ uint mcs = ratespec & RSPEC_RATE_MASK;
+ int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
+
+ dur = PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
+ if (preamble_type == BRCMS_MM_PREAMBLE)
+ dur += PREN_MM_EXT;
+ /* 1000Ndbps = kbps * 4 */
+ kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec),
+ rspec_issgi(ratespec)) * 4;
+
+ if (rspec_stc(ratespec) == 0)
+ nsyms =
+ CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
+ APHY_TAIL_NBITS) * 1000, kNdps);
+ else
+ /* STBC needs to have even number of symbols */
+ nsyms =
+ 2 *
+ CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
+ APHY_TAIL_NBITS) * 1000, 2 * kNdps);
+
+ dur += APHY_SYMBOL_TIME * nsyms;
+ if (wlc->band->bandtype == BRCM_BAND_2G)
+ dur += DOT11_OFDM_SIGNAL_EXTENSION;
+ } else if (is_ofdm_rate(rate)) {
+ dur = APHY_PREAMBLE_TIME;
+ dur += APHY_SIGNAL_TIME;
+ /* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */
+ Ndps = rate * 2;
+ /* NSyms = CEILING((SERVICE + 8*NBytes + TAIL) / Ndbps) */
+ nsyms =
+ CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS),
+ Ndps);
+ dur += APHY_SYMBOL_TIME * nsyms;
+ if (wlc->band->bandtype == BRCM_BAND_2G)
+ dur += DOT11_OFDM_SIGNAL_EXTENSION;
+ } else {
+ /*
+ * calc # bits * 2 so factor of 2 in rate (1/2 mbps)
+ * will divide out
+ */
+ mac_len = mac_len * 8 * 2;
+ /* calc ceiling of bits/rate = microseconds of air time */
+ dur = (mac_len + rate - 1) / rate;
+ if (preamble_type & BRCMS_SHORT_PREAMBLE)
+ dur += BPHY_PLCP_SHORT_TIME;
+ else
+ dur += BPHY_PLCP_TIME;
+ }
+ return dur;
+}
+
+static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
+ const struct d11init *inits)
+{
+ int i;
+ u8 __iomem *base;
+ u8 __iomem *addr;
+ u16 size;
+ u32 value;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ base = (u8 __iomem *)wlc_hw->regs;
+
+ for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
+ size = le16_to_cpu(inits[i].size);
+ addr = base + le16_to_cpu(inits[i].addr);
+ value = le32_to_cpu(inits[i].value);
+ if (size == 2)
+ W_REG((u16 __iomem *)addr, value);
+ else if (size == 4)
+ W_REG((u32 __iomem *)addr, value);
+ else
+ break;
+ }
+}
+
+static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs)
+{
+ u8 idx;
+ u16 addr[] = {
+ M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
+ M_HOST_FLAGS5
+ };
+
+ for (idx = 0; idx < MHFMAX; idx++)
+ brcms_b_write_shm(wlc_hw, addr[idx], mhfs[idx]);
+}
+
+static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
+{
+ struct wiphy *wiphy = wlc_hw->wlc->wiphy;
+ struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
+
+ /* init microcode host flags */
+ brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs);
+
+ /* do band-specific ucode IHR, SHM, and SCR inits */
+ if (D11REV_IS(wlc_hw->corerev, 23)) {
+ if (BRCMS_ISNPHY(wlc_hw->band))
+ brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16);
+ else
+ wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+ " %d\n", __func__, wlc_hw->unit,
+ wlc_hw->corerev);
+ } else {
+ if (D11REV_IS(wlc_hw->corerev, 24)) {
+ if (BRCMS_ISLCNPHY(wlc_hw->band))
+ brcms_c_write_inits(wlc_hw,
+ ucode->d11lcn0bsinitvals24);
+ else
+ wiphy_err(wiphy, "%s: wl%d: unsupported phy in"
+ " core rev %d\n", __func__,
+ wlc_hw->unit, wlc_hw->corerev);
+ } else {
+ wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
+ }
+ }
+}
+
+static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
+{
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
+
+ wlc_hw->phyclk = clk;
+
+ if (OFF == clk) { /* clear gmode bit, put phy into reset */
+
+ ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
+ (SICF_PRST | SICF_FGC));
+ udelay(1);
+ ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+ udelay(1);
+
+ } else { /* take phy out of reset */
+
+ ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+ udelay(1);
+ ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+ udelay(1);
+
+ }
+}
+
+/* low-level band switch utility routine */
+static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
+{
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
+ bandunit);
+
+ wlc_hw->band = wlc_hw->bandstate[bandunit];
+
+ /*
+ * BMAC_NOTE:
+ * until we eliminate need for wlc->band refs in low level code
+ */
+ wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
+
+ /* set gmode core flag */
+ if (wlc_hw->sbclk && !wlc_hw->noreset)
+ ai_core_cflags(wlc_hw->sih, SICF_GMODE,
+ ((bandunit == 0) ? SICF_GMODE : 0));
+}
+
+/* switch to new band but leave it inactive */
+static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ u32 macintmask;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+
+ /* disable interrupts */
+ macintmask = brcms_intrsoff(wlc->wl);
+
+ /* radio off */
+ wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
+
+ brcms_b_core_phy_clk(wlc_hw, OFF);
+
+ brcms_c_setxband(wlc_hw, bandunit);
+
+ return macintmask;
+}
+
+/* process an individual struct tx_status */
+static bool
+brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
+{
+ struct sk_buff *p;
+ uint queue;
+ struct d11txh *txh;
+ struct scb *scb = NULL;
+ bool free_pdu;
+ int tx_rts, tx_frame_count, tx_rts_count;
+ uint totlen, supr_status;
+ bool lastframe;
+ struct ieee80211_hdr *h;
+ u16 mcl;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *txrate;
+ int i;
+
+ /* discard intermediate indications for ucode with one legitimate case:
+ * e.g. if "useRTS" is set. ucode did a successful rts/cts exchange,
+ * but the subsequent tx of DATA failed. so it will start rts/cts
+ * from the beginning (resetting the rts transmission count)
+ */
+ if (!(txs->status & TX_STATUS_AMPDU)
+ && (txs->status & TX_STATUS_INTERMEDIATE)) {
+ wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
+ __func__);
+ return false;
+ }
+
+ queue = txs->frameid & TXFID_QUEUE_MASK;
+ if (queue >= NFIFO) {
+ p = NULL;
+ goto fatal;
+ }
+
+ p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
+ if (p == NULL)
+ goto fatal;
+
+ txh = (struct d11txh *) (p->data);
+ mcl = le16_to_cpu(txh->MacTxControlLow);
+
+ if (txs->phyerr) {
+ if (brcm_msg_level & LOG_ERROR_VAL) {
+ wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n",
+ txs->phyerr, txh->MainRates);
+ brcms_c_print_txdesc(txh);
+ }
+ brcms_c_print_txstatus(txs);
+ }
+
+ if (txs->frameid != le16_to_cpu(txh->TxFrameID))
+ goto fatal;
+ tx_info = IEEE80211_SKB_CB(p);
+ h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
+
+ if (tx_info->control.sta)
+ scb = &wlc->pri_scb;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
+ return false;
+ }
+
+ supr_status = txs->status & TX_STATUS_SUPR_MASK;
+ if (supr_status == TX_STATUS_SUPR_BADCH)
+ BCMMSG(wlc->wiphy,
+ "%s: Pkt tx suppressed, possibly channel %d\n",
+ __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec));
+
+ tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
+ tx_frame_count =
+ (txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
+ tx_rts_count =
+ (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
+
+ lastframe = !ieee80211_has_morefrags(h->frame_control);
+
+ if (!lastframe) {
+ wiphy_err(wlc->wiphy, "Not last frame!\n");
+ } else {
+ /*
+ * Set information to be consumed by Minstrel ht.
+ *
+ * The "fallback limit" is the number of tx attempts a given
+ * MPDU is sent at the "primary" rate. Tx attempts beyond that
+ * limit are sent at the "secondary" rate.
+ * A 'short frame' does not exceed RTS treshold.
+ */
+ u16 sfbl, /* Short Frame Rate Fallback Limit */
+ lfbl, /* Long Frame Rate Fallback Limit */
+ fbl;
+
+ if (queue < AC_COUNT) {
+ sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
+ EDCF_SFB);
+ lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
+ EDCF_LFB);
+ } else {
+ sfbl = wlc->SFBL;
+ lfbl = wlc->LFBL;
+ }
+
+ txrate = tx_info->status.rates;
+ if (txrate[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ fbl = lfbl;
+ else
+ fbl = sfbl;
+
+ ieee80211_tx_info_clear_status(tx_info);
+
+ if ((tx_frame_count > fbl) && (txrate[1].idx >= 0)) {
+ /*
+ * rate selection requested a fallback rate
+ * and we used it
+ */
+ txrate[0].count = fbl;
+ txrate[1].count = tx_frame_count - fbl;
+ } else {
+ /*
+ * rate selection did not request fallback rate, or
+ * we didn't need it
+ */
+ txrate[0].count = tx_frame_count;
+ /*
+ * rc80211_minstrel.c:minstrel_tx_status() expects
+ * unused rates to be marked with idx = -1
+ */
+ txrate[1].idx = -1;
+ txrate[1].count = 0;
+ }
+
+ /* clear the rest of the rates */
+ for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
+ txrate[i].idx = -1;
+ txrate[i].count = 0;
+ }
+
+ if (txs->status & TX_STATUS_ACK_RCV)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ totlen = brcmu_pkttotlen(p);
+ free_pdu = true;
+
+ brcms_c_txfifo_complete(wlc, queue, 1);
+
+ if (lastframe) {
+ p->next = NULL;
+ p->prev = NULL;
+ /* remove PLCP & Broadcom tx descriptor header */
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
+ ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
+ } else {
+ wiphy_err(wlc->wiphy, "%s: Not last frame => not calling "
+ "tx_status\n", __func__);
+ }
+
+ return false;
+
+ fatal:
+ if (p)
+ brcmu_pkt_buf_free_skb(p);
+
+ return true;
+
+}
+
+/* process tx completion events in BMAC
+ * Return true if more tx status need to be processed. false otherwise.
+ */
+static bool
+brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
+{
+ bool morepending = false;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
+ struct d11regs __iomem *regs;
+ struct tx_status txstatus, *txs;
+ u32 s1, s2;
+ uint n = 0;
+ /*
+ * Param 'max_tx_num' indicates max. # tx status to process before
+ * break out.
+ */
+ uint max_tx_num = bound ? TXSBND : -1;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ txs = &txstatus;
+ regs = wlc_hw->regs;
+ *fatal = false;
+ while (!(*fatal)
+ && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+
+ if (s1 == 0xffffffff) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
+ wlc_hw->unit, __func__);
+ return morepending;
+ }
+
+ s2 = R_REG(&regs->frmtxstatus2);
+
+ txs->status = s1 & TXS_STATUS_MASK;
+ txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
+ txs->sequence = s2 & TXS_SEQ_MASK;
+ txs->phyerr = (s2 & TXS_PTX_MASK) >> TXS_PTX_SHIFT;
+ txs->lasttxtime = 0;
+
+ *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
+
+ /* !give others some time to run! */
+ if (++n >= max_tx_num)
+ break;
+ }
+
+ if (*fatal)
+ return 0;
+
+ if (n >= max_tx_num)
+ morepending = true;
+
+ if (!pktq_empty(&wlc->pkt_queue->q))
+ brcms_c_send_q(wlc);
+
+ return morepending;
+}
+
+static void brcms_c_tbtt(struct brcms_c_info *wlc)
+{
+ if (!wlc->bsscfg->BSS)
+ /*
+ * DirFrmQ is now valid...defer setting until end
+ * of ATIM window
+ */
+ wlc->qvalid |= MCMD_DIRFRMQVAL;
+}
+
+/* set initial host flags value */
+static void
+brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+
+ memset(mhfs, 0, MHFMAX * sizeof(u16));
+
+ mhfs[MHF2] |= mhf2_init;
+
+ /* prohibit use of slowclock on multifunction boards */
+ if (wlc_hw->boardflags & BFL_NOPLLDOWN)
+ mhfs[MHF1] |= MHF1_FORCEFASTCLK;
+
+ if (BRCMS_ISNPHY(wlc_hw->band) && NREV_LT(wlc_hw->band->phyrev, 2)) {
+ mhfs[MHF2] |= MHF2_NPHY40MHZ_WAR;
+ mhfs[MHF1] |= MHF1_IQSWAP_WAR;
+ }
+}
+
+static struct dma64regs __iomem *
+dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+{
+ if (direction == DMA_TX)
+ return &(hw->regs->fifo64regs[fifonum].dmaxmt);
+ return &(hw->regs->fifo64regs[fifonum].dmarcv);
+}
+
+static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
+{
+ uint i;
+ char name[8];
+ /*
+ * ucode host flag 2 needed for pio mode, independent of band and fifo
+ */
+ u16 pio_mhf2 = 0;
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ uint unit = wlc_hw->unit;
+ struct wiphy *wiphy = wlc->wiphy;
+
+ /* name and offsets for dma_attach */
+ snprintf(name, sizeof(name), "wl%d", unit);
+
+ if (wlc_hw->di[0] == NULL) { /* Init FIFOs */
+ int dma_attach_err = 0;
+
+ /*
+ * FIFO 0
+ * TX: TX_AC_BK_FIFO (TX AC Background data packets)
+ * RX: RX_FIFO (RX data packets)
+ */
+ wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
+ (wme ? dmareg(wlc_hw, DMA_TX, 0) :
+ NULL), dmareg(wlc_hw, DMA_RX, 0),
+ (wme ? NTXD : 0), NRXD,
+ RXBUFSZ, -1, NRXBUFPOST,
+ BRCMS_HWRXOFF, &brcm_msg_level);
+ dma_attach_err |= (NULL == wlc_hw->di[0]);
+
+ /*
+ * FIFO 1
+ * TX: TX_AC_BE_FIFO (TX AC Best-Effort data packets)
+ * (legacy) TX_DATA_FIFO (TX data packets)
+ * RX: UNUSED
+ */
+ wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
+ dmareg(wlc_hw, DMA_TX, 1), NULL,
+ NTXD, 0, 0, -1, 0, 0,
+ &brcm_msg_level);
+ dma_attach_err |= (NULL == wlc_hw->di[1]);
+
+ /*
+ * FIFO 2
+ * TX: TX_AC_VI_FIFO (TX AC Video data packets)
+ * RX: UNUSED
+ */
+ wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
+ dmareg(wlc_hw, DMA_TX, 2), NULL,
+ NTXD, 0, 0, -1, 0, 0,
+ &brcm_msg_level);
+ dma_attach_err |= (NULL == wlc_hw->di[2]);
+ /*
+ * FIFO 3
+ * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
+ * (legacy) TX_CTL_FIFO (TX control & mgmt packets)
+ */
+ wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
+ dmareg(wlc_hw, DMA_TX, 3),
+ NULL, NTXD, 0, 0, -1,
+ 0, 0, &brcm_msg_level);
+ dma_attach_err |= (NULL == wlc_hw->di[3]);
+/* Cleaner to leave this as if with AP defined */
+
+ if (dma_attach_err) {
+ wiphy_err(wiphy, "wl%d: wlc_attach: dma_attach failed"
+ "\n", unit);
+ return false;
+ }
+
+ /* get pointer to dma engine tx flow control variable */
+ for (i = 0; i < NFIFO; i++)
+ if (wlc_hw->di[i])
+ wlc_hw->txavail[i] =
+ (uint *) dma_getvar(wlc_hw->di[i],
+ "&txavail");
+ }
+
+ /* initial ucode host flags */
+ brcms_c_mhfdef(wlc, wlc_hw->band->mhfs, pio_mhf2);
+
+ return true;
+}
+
+static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw)
+{
+ uint j;
+
+ for (j = 0; j < NFIFO; j++) {
+ if (wlc_hw->di[j]) {
+ dma_detach(wlc_hw->di[j]);
+ wlc_hw->di[j] = NULL;
+ }
+ }
+}
+
+/*
+ * Initialize brcms_c_info default values ...
+ * may get overrides later in this function
+ * BMAC_NOTES, move low out and resolve the dangling ones
+ */
+static void brcms_b_info_init(struct brcms_hardware *wlc_hw)
+{
+ struct brcms_c_info *wlc = wlc_hw->wlc;
+
+ /* set default sw macintmask value */
+ wlc->defmacintmask = DEF_MACINTMASK;
+
+ /* various 802.11g modes */
+ wlc_hw->shortslot = false;
+
+ wlc_hw->SFBL = RETRY_SHORT_FB;
+ wlc_hw->LFBL = RETRY_LONG_FB;
+
+ /* default mac retry limits */
+ wlc_hw->SRL = RETRY_SHORT_DEF;
+ wlc_hw->LRL = RETRY_LONG_DEF;
+ wlc_hw->chanspec = ch20mhz_chspec(1);
+}
+
+static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
+{
+ /* delay before first read of ucode state */
+ udelay(40);
+
+ /* wait until ucode is no longer asleep */
+ SPINWAIT((brcms_b_read_shm(wlc_hw, M_UCODE_DBGST) ==
+ DBGST_ASLEEP), wlc_hw->wlc->fastpwrup_dly);
+}
+
+/* control chip clock to save power, enable dynamic clock or force fast clock */
+static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
+{
+ if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+ /* new chips with PMU, CCS_FORCEHT will distribute the HT clock
+ * on backplane, but mac core will still run on ALP(not HT) when
+ * it enters powersave mode, which means the FCA bit may not be
+ * set. Should wakeup mac if driver wants it to run on HT.
+ */
+
+ if (wlc_hw->clk) {
+ if (mode == CLK_FAST) {
+ OR_REG(&wlc_hw->regs->clk_ctl_st,
+ CCS_FORCEHT);
+
+ udelay(64);
+
+ SPINWAIT(((R_REG
+ (&wlc_hw->regs->
+ clk_ctl_st) & CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ WARN_ON(!(R_REG
+ (&wlc_hw->regs->
+ clk_ctl_st) & CCS_HTAVAIL));
+ } else {
+ if ((wlc_hw->sih->pmurev == 0) &&
+ (R_REG
+ (&wlc_hw->regs->
+ clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
+ SPINWAIT(((R_REG
+ (&wlc_hw->regs->
+ clk_ctl_st) & CCS_HTAVAIL)
+ == 0),
+ PMU_MAX_TRANSITION_DLY);
+ AND_REG(&wlc_hw->regs->clk_ctl_st,
+ ~CCS_FORCEHT);
+ }
+ }
+ wlc_hw->forcefastclk = (mode == CLK_FAST);
+ } else {
+
+ /* old chips w/o PMU, force HT through cc,
+ * then use FCA to verify mac is running fast clock
+ */
+
+ wlc_hw->forcefastclk = ai_clkctl_cc(wlc_hw->sih, mode);
+
+ /* check fast clock is available (if core is not in reset) */
+ if (wlc_hw->forcefastclk && wlc_hw->clk)
+ WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+ SISF_FCLKA));
+
+ /*
+ * keep the ucode wake bit on if forcefastclk is on since we
+ * do not want ucode to put us back to slow clock when it dozes
+ * for PM mode. Code below matches the wake override bit with
+ * current forcefastclk state. Only setting bit in wake_override
+ * instead of waking ucode immediately since old code had this
+ * behavior. Older code set wlc->forcefastclk but only had the
+ * wake happen if the wakup_ucode work (protected by an up
+ * check) was executed just below.
+ */
+ if (wlc_hw->forcefastclk)
+ mboolset(wlc_hw->wake_override,
+ BRCMS_WAKE_OVERRIDE_FORCEFAST);
+ else
+ mboolclr(wlc_hw->wake_override,
+ BRCMS_WAKE_OVERRIDE_FORCEFAST);
+ }
+}
+
+/* set or clear ucode host flag bits
+ * it has an optimization for no-change write
+ * it only writes through shared memory when the core has clock;
+ * pre-CLK changes should use wlc_write_mhf to get around the optimization
+ *
+ *
+ * bands values are: BRCM_BAND_AUTO <--- Current band only
+ * BRCM_BAND_5G <--- 5G band only
+ * BRCM_BAND_2G <--- 2G band only
+ * BRCM_BAND_ALL <--- All bands
+ */
+void
+brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+ int bands)
+{
+ u16 save;
+ u16 addr[MHFMAX] = {
+ M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
+ M_HOST_FLAGS5
+ };
+ struct brcms_hw_band *band;
+
+ if ((val & ~mask) || idx >= MHFMAX)
+ return; /* error condition */
+
+ switch (bands) {
+ /* Current band only or all bands,
+ * then set the band to current band
+ */
+ case BRCM_BAND_AUTO:
+ case BRCM_BAND_ALL:
+ band = wlc_hw->band;
+ break;
+ case BRCM_BAND_5G:
+ band = wlc_hw->bandstate[BAND_5G_INDEX];
+ break;
+ case BRCM_BAND_2G:
+ band = wlc_hw->bandstate[BAND_2G_INDEX];
+ break;
+ default:
+ band = NULL; /* error condition */
+ }
+
+ if (band) {
+ save = band->mhfs[idx];
+ band->mhfs[idx] = (band->mhfs[idx] & ~mask) | val;
+
+ /* optimization: only write through if changed, and
+ * changed band is the current band
+ */
+ if (wlc_hw->clk && (band->mhfs[idx] != save)
+ && (band == wlc_hw->band))
+ brcms_b_write_shm(wlc_hw, addr[idx],
+ (u16) band->mhfs[idx]);
+ }
+
+ if (bands == BRCM_BAND_ALL) {
+ wlc_hw->bandstate[0]->mhfs[idx] =
+ (wlc_hw->bandstate[0]->mhfs[idx] & ~mask) | val;
+ wlc_hw->bandstate[1]->mhfs[idx] =
+ (wlc_hw->bandstate[1]->mhfs[idx] & ~mask) | val;
+ }
+}
+
+/* set the maccontrol register to desired reset state and
+ * initialize the sw cache of the register
+ */
+static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw)
+{
+ /* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */
+ wlc_hw->maccontrol = 0;
+ wlc_hw->suspended_fifos = 0;
+ wlc_hw->wake_override = 0;
+ wlc_hw->mute_override = 0;
+ brcms_b_mctrl(wlc_hw, ~0, MCTL_IHR_EN | MCTL_WAKE);
+}
+
+/*
+ * write the software state of maccontrol and
+ * overrides to the maccontrol register
+ */
+static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
+{
+ u32 maccontrol = wlc_hw->maccontrol;
+
+ /* OR in the wake bit if overridden */
+ if (wlc_hw->wake_override)
+ maccontrol |= MCTL_WAKE;
+
+ /* set AP and INFRA bits for mute if needed */
+ if (wlc_hw->mute_override) {
+ maccontrol &= ~(MCTL_AP);
+ maccontrol |= MCTL_INFRA;
+ }
+
+ W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+}
+
+/* set or clear maccontrol bits */
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val)
+{
+ u32 maccontrol;
+ u32 new_maccontrol;
+
+ if (val & ~mask)
+ return; /* error condition */
+ maccontrol = wlc_hw->maccontrol;
+ new_maccontrol = (maccontrol & ~mask) | val;
+
+ /* if the new maccontrol value is the same as the old, nothing to do */
+ if (new_maccontrol == maccontrol)
+ return;
+
+ /* something changed, cache the new value */
+ wlc_hw->maccontrol = new_maccontrol;
+
+ /* write the new values with overrides applied */
+ brcms_c_mctrl_write(wlc_hw);
+}
+
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit)
+{
+ if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE)) {
+ mboolset(wlc_hw->wake_override, override_bit);
+ return;
+ }
+
+ mboolset(wlc_hw->wake_override, override_bit);
+
+ brcms_c_mctrl_write(wlc_hw);
+ brcms_b_wait_for_wake(wlc_hw);
+}
+
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit)
+{
+ mboolclr(wlc_hw->wake_override, override_bit);
+
+ if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE))
+ return;
+
+ brcms_c_mctrl_write(wlc_hw);
+}
+
+/* When driver needs ucode to stop beaconing, it has to make sure that
+ * MCTL_AP is clear and MCTL_INFRA is set
+ * Mode MCTL_AP MCTL_INFRA
+ * AP 1 1
+ * STA 0 1 <--- This will ensure no beacons
+ * IBSS 0 0
+ */
+static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw)
+{
+ wlc_hw->mute_override = 1;
+
+ /* if maccontrol already has AP == 0 and INFRA == 1 without this
+ * override, then there is no change to write
+ */
+ if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
+ return;
+
+ brcms_c_mctrl_write(wlc_hw);
+}
+
+/* Clear the override on AP and INFRA bits */
+static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw)
+{
+ if (wlc_hw->mute_override == 0)
+ return;
+
+ wlc_hw->mute_override = 0;
+
+ /* if maccontrol already has AP == 0 and INFRA == 1 without this
+ * override, then there is no change to write
+ */
+ if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
+ return;
+
+ brcms_c_mctrl_write(wlc_hw);
+}
+
+/*
+ * Write a MAC address to the given match reg offset in the RXE match engine.
+ */
+static void
+brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
+ const u8 *addr)
+{
+ struct d11regs __iomem *regs;
+ u16 mac_l;
+ u16 mac_m;
+ u16 mac_h;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
+ wlc_hw->unit);
+
+ regs = wlc_hw->regs;
+ mac_l = addr[0] | (addr[1] << 8);
+ mac_m = addr[2] | (addr[3] << 8);
+ mac_h = addr[4] | (addr[5] << 8);
+
+ /* enter the MAC addr into the RXE match registers */
+ W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
+ W_REG(&regs->rcm_mat_data, mac_l);
+ W_REG(&regs->rcm_mat_data, mac_m);
+ W_REG(&regs->rcm_mat_data, mac_h);
+
+}
+
+void
+brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
+ void *buf)
+{
+ struct d11regs __iomem *regs;
+ u32 word;
+ __le32 word_le;
+ __be32 word_be;
+ bool be_bit;
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ regs = wlc_hw->regs;
+ W_REG(&regs->tplatewrptr, offset);
+
+ /* if MCTL_BIGEND bit set in mac control register,
+ * the chip swaps data in fifo, as well as data in
+ * template ram
+ */
+ be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+
+ while (len > 0) {
+ memcpy(&word, buf, sizeof(u32));
+
+ if (be_bit) {
+ word_be = cpu_to_be32(word);
+ word = *(u32 *)&word_be;
+ } else {
+ word_le = cpu_to_le32(word);
+ word = *(u32 *)&word_le;
+ }
+
+ W_REG(&regs->tplatewrdata, word);
+
+ buf = (u8 *) buf + sizeof(u32);
+ len -= sizeof(u32);
+ }
+}
+
+static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
+{
+ wlc_hw->band->CWmin = newmin;
+
+ W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+ (void)R_REG(&wlc_hw->regs->objaddr);
+ W_REG(&wlc_hw->regs->objdata, newmin);
+}
+
+static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
+{
+ wlc_hw->band->CWmax = newmax;
+
+ W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+ (void)R_REG(&wlc_hw->regs->objaddr);
+ W_REG(&wlc_hw->regs->objdata, newmax);
+}
+
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
+{
+ bool fastclk;
+
+ /* request FAST clock if not on */
+ fastclk = wlc_hw->forcefastclk;
+ if (!fastclk)
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+
+ wlc_phy_bw_state_set(wlc_hw->band->pi, bw);
+
+ brcms_b_phy_reset(wlc_hw);
+ wlc_phy_init(wlc_hw->band->pi, wlc_phy_chanspec_get(wlc_hw->band->pi));
+
+ /* restore the clk */
+ if (!fastclk)
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+}
+
+static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw)
+{
+ u16 v;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
+ /* update SYNTHPU_DLY */
+
+ if (BRCMS_ISLCNPHY(wlc->band))
+ v = SYNTHPU_DLY_LPPHY_US;
+ else if (BRCMS_ISNPHY(wlc->band) && (NREV_GE(wlc->band->phyrev, 3)))
+ v = SYNTHPU_DLY_NPHY_US;
+ else
+ v = SYNTHPU_DLY_BPHY_US;
+
+ brcms_b_write_shm(wlc_hw, M_SYNTHPU_DLY, v);
+}
+
+static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw)
+{
+ u16 phyctl;
+ u16 phytxant = wlc_hw->bmac_phytxant;
+ u16 mask = PHY_TXC_ANT_MASK;
+
+ /* set the Probe Response frame phy control word */
+ phyctl = brcms_b_read_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS);
+ phyctl = (phyctl & ~mask) | phytxant;
+ brcms_b_write_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS, phyctl);
+
+ /* set the Response (ACK/CTS) frame phy control word */
+ phyctl = brcms_b_read_shm(wlc_hw, M_RSP_PCTLWD);
+ phyctl = (phyctl & ~mask) | phytxant;
+ brcms_b_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
+}
+
+static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw,
+ u8 rate)
+{
+ uint i;
+ u8 plcp_rate = 0;
+ struct plcp_signal_rate_lookup {
+ u8 rate;
+ u8 signal_rate;
+ };
+ /* OFDM RATE sub-field of PLCP SIGNAL field, per 802.11 sec 17.3.4.1 */
+ const struct plcp_signal_rate_lookup rate_lookup[] = {
+ {BRCM_RATE_6M, 0xB},
+ {BRCM_RATE_9M, 0xF},
+ {BRCM_RATE_12M, 0xA},
+ {BRCM_RATE_18M, 0xE},
+ {BRCM_RATE_24M, 0x9},
+ {BRCM_RATE_36M, 0xD},
+ {BRCM_RATE_48M, 0x8},
+ {BRCM_RATE_54M, 0xC}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(rate_lookup); i++) {
+ if (rate == rate_lookup[i].rate) {
+ plcp_rate = rate_lookup[i].signal_rate;
+ break;
+ }
+ }
+
+ /* Find the SHM pointer to the rate table entry by looking in the
+ * Direct-map Table
+ */
+ return 2 * brcms_b_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
+}
+
+static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw)
+{
+ u8 rate;
+ u8 rates[8] = {
+ BRCM_RATE_6M, BRCM_RATE_9M, BRCM_RATE_12M, BRCM_RATE_18M,
+ BRCM_RATE_24M, BRCM_RATE_36M, BRCM_RATE_48M, BRCM_RATE_54M
+ };
+ u16 entry_ptr;
+ u16 pctl1;
+ uint i;
+
+ if (!BRCMS_PHY_11N_CAP(wlc_hw->band))
+ return;
+
+ /* walk the phy rate table and update the entries */
+ for (i = 0; i < ARRAY_SIZE(rates); i++) {
+ rate = rates[i];
+
+ entry_ptr = brcms_b_ofdm_ratetable_offset(wlc_hw, rate);
+
+ /* read the SHM Rate Table entry OFDM PCTL1 values */
+ pctl1 =
+ brcms_b_read_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS);
+
+ /* modify the value */
+ pctl1 &= ~PHY_TXC1_MODE_MASK;
+ pctl1 |= (wlc_hw->hw_stf_ss_opmode << PHY_TXC1_MODE_SHIFT);
+
+ /* Update the SHM Rate Table entry OFDM PCTL1 values */
+ brcms_b_write_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS,
+ pctl1);
+ }
+}
+
+/* band-specific init */
+static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+
+ BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
+ wlc_hw->band->bandunit);
+
+ brcms_c_ucode_bsinit(wlc_hw);
+
+ wlc_phy_init(wlc_hw->band->pi, chanspec);
+
+ brcms_c_ucode_txant_set(wlc_hw);
+
+ /*
+ * cwmin is band-specific, update hardware
+ * with value for current band
+ */
+ brcms_b_set_cwmin(wlc_hw, wlc_hw->band->CWmin);
+ brcms_b_set_cwmax(wlc_hw, wlc_hw->band->CWmax);
+
+ brcms_b_update_slot_timing(wlc_hw,
+ wlc_hw->band->bandtype == BRCM_BAND_5G ?
+ true : wlc_hw->shortslot);
+
+ /* write phytype and phyvers */
+ brcms_b_write_shm(wlc_hw, M_PHYTYPE, (u16) wlc_hw->band->phytype);
+ brcms_b_write_shm(wlc_hw, M_PHYVER, (u16) wlc_hw->band->phyrev);
+
+ /*
+ * initialize the txphyctl1 rate table since
+ * shmem is shared between bands
+ */
+ brcms_upd_ofdm_pctl1_table(wlc_hw);
+
+ brcms_b_upd_synthpu(wlc_hw);
+}
+
+/* Perform a soft reset of the PHY PLL */
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
+{
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ ai_corereg(wlc_hw->sih, SI_CC_IDX,
+ offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+ udelay(1);
+ ai_corereg(wlc_hw->sih, SI_CC_IDX,
+ offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ udelay(1);
+ ai_corereg(wlc_hw->sih, SI_CC_IDX,
+ offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+ udelay(1);
+ ai_corereg(wlc_hw->sih, SI_CC_IDX,
+ offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ udelay(1);
+}
+
+/* light way to turn on phy clock without reset for NPHY only
+ * refer to brcms_b_core_phy_clk for full version
+ */
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
+{
+ /* support(necessary for NPHY and HYPHY) only */
+ if (!BRCMS_ISNPHY(wlc_hw->band))
+ return;
+
+ if (ON == clk)
+ ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+ else
+ ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+
+}
+
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
+{
+ if (ON == clk)
+ ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+ else
+ ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+}
+
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
+{
+ struct brcms_phy_pub *pih = wlc_hw->band->pi;
+ u32 phy_bw_clkbits;
+ bool phy_in_reset = false;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ if (pih == NULL)
+ return;
+
+ phy_bw_clkbits = wlc_phy_clk_bwbits(wlc_hw->band->pi);
+
+ /* Specific reset sequence required for NPHY rev 3 and 4 */
+ if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
+ NREV_LE(wlc_hw->band->phyrev, 4)) {
+ /* Set the PHY bandwidth */
+ ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+
+ udelay(1);
+
+ /* Perform a soft reset of the PHY PLL */
+ brcms_b_core_phypll_reset(wlc_hw);
+
+ /* reset the PHY */
+ ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
+ (SICF_PRST | SICF_PCLKE));
+ phy_in_reset = true;
+ } else {
+ ai_core_cflags(wlc_hw->sih,
+ (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+ (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+ }
+
+ udelay(2);
+ brcms_b_core_phy_clk(wlc_hw, ON);
+
+ if (pih)
+ wlc_phy_anacore(pih, ON);
+}
+
+/* switch to and initialize new band */
+static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
+ u16 chanspec) {
+ struct brcms_c_info *wlc = wlc_hw->wlc;
+ u32 macintmask;
+
+ /* Enable the d11 core before accessing it */
+ if (!ai_iscoreup(wlc_hw->sih)) {
+ ai_core_reset(wlc_hw->sih, 0, 0);
+ brcms_c_mctrl_reset(wlc_hw);
+ }
+
+ macintmask = brcms_c_setband_inact(wlc, bandunit);
+
+ if (!wlc_hw->up)
+ return;
+
+ brcms_b_core_phy_clk(wlc_hw, ON);
+
+ /* band-specific initializations */
+ brcms_b_bsinit(wlc, chanspec);
+
+ /*
+ * If there are any pending software interrupt bits,
+ * then replace these with a harmless nonzero value
+ * so brcms_c_dpc() will re-enable interrupts when done.
+ */
+ if (wlc->macintstatus)
+ wlc->macintstatus = MI_DMAINT;
+
+ /* restore macintmask */
+ brcms_intrsrestore(wlc->wl, macintmask);
+
+ /* ucode should still be suspended.. */
+ WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+}
+
+static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
+{
+
+ /* reject unsupported corerev */
+ if (!CONF_HAS(D11CONF, wlc_hw->corerev)) {
+ wiphy_err(wlc_hw->wlc->wiphy, "unsupported core rev %d\n",
+ wlc_hw->corerev);
+ return false;
+ }
+
+ return true;
+}
+
+/* Validate some board info parameters */
+static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
+{
+ uint boardrev = wlc_hw->boardrev;
+
+ /* 4 bits each for board type, major, minor, and tiny version */
+ uint brt = (boardrev & 0xf000) >> 12;
+ uint b0 = (boardrev & 0xf00) >> 8;
+ uint b1 = (boardrev & 0xf0) >> 4;
+ uint b2 = boardrev & 0xf;
+
+ /* voards from other vendors are always considered valid */
+ if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+ return true;
+
+ /* do some boardrev sanity checks when boardvendor is Broadcom */
+ if (boardrev == 0)
+ return false;
+
+ if (boardrev <= 0xff)
+ return true;
+
+ if ((brt > 2) || (brt == 0) || (b0 > 9) || (b0 == 0) || (b1 > 9)
+ || (b2 > 9))
+ return false;
+
+ return true;
+}
+
+static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw)
+{
+ enum brcms_srom_id var_id = BRCMS_SROM_MACADDR;
+ char *macaddr;
+
+ /* If macaddr exists, use it (Sromrev4, CIS, ...). */
+ macaddr = getvar(wlc_hw->sih, var_id);
+ if (macaddr != NULL)
+ return macaddr;
+
+ if (wlc_hw->_nbands > 1)
+ var_id = BRCMS_SROM_ET1MACADDR;
+ else
+ var_id = BRCMS_SROM_IL0MACADDR;
+
+ macaddr = getvar(wlc_hw->sih, var_id);
+ if (macaddr == NULL)
+ wiphy_err(wlc_hw->wlc->wiphy, "wl%d: wlc_get_macaddr: macaddr "
+ "getvar(%d) not found\n", wlc_hw->unit, var_id);
+
+ return macaddr;
+}
+
+/* power both the pll and external oscillator on/off */
+static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
+{
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want);
+
+ /*
+ * dont power down if plldown is false or
+ * we must poll hw radio disable
+ */
+ if (!want && wlc_hw->pllreq)
+ return;
+
+ if (wlc_hw->sih)
+ ai_clkctl_xtal(wlc_hw->sih, XTAL | PLL, want);
+
+ wlc_hw->sbclk = want;
+ if (!wlc_hw->sbclk) {
+ wlc_hw->clk = false;
+ if (wlc_hw->band && wlc_hw->band->pi)
+ wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
+ }
+}
+
+/*
+ * Return true if radio is disabled, otherwise false.
+ * hw radio disable signal is an external pin, users activate it asynchronously
+ * this function could be called when driver is down and w/o clock
+ * it operates on different registers depending on corerev and boardflag.
+ */
+static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
+{
+ bool v, clk, xtal;
+ u32 resetbits = 0, flags = 0;
+
+ xtal = wlc_hw->sbclk;
+ if (!xtal)
+ brcms_b_xtal(wlc_hw, ON);
+
+ /* may need to take core out of reset first */
+ clk = wlc_hw->clk;
+ if (!clk) {
+ /*
+ * mac no longer enables phyclk automatically when driver
+ * accesses phyreg throughput mac. This can be skipped since
+ * only mac reg is accessed below
+ */
+ flags |= SICF_PCLKE;
+
+ /*
+ * AI chip doesn't restore bar0win2 on
+ * hibernation/resume, need sw fixup
+ */
+ if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ wlc_hw->regs = (struct d11regs __iomem *)
+ ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
+ ai_core_reset(wlc_hw->sih, flags, resetbits);
+ brcms_c_mctrl_reset(wlc_hw);
+ }
+
+ v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+
+ /* put core back into reset */
+ if (!clk)
+ ai_core_disable(wlc_hw->sih, 0);
+
+ if (!xtal)
+ brcms_b_xtal(wlc_hw, OFF);
+
+ return v;
+}
+
+static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
+{
+ struct dma_pub *di = wlc_hw->di[fifo];
+ return dma_rxreset(di);
+}
+
+/* d11 core reset
+ * ensure fask clock during reset
+ * reset dma
+ * reset d11(out of reset)
+ * reset phy(out of reset)
+ * clear software macintstatus for fresh new start
+ * one testing hack wlc_hw->noreset will bypass the d11/phy reset
+ */
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
+{
+ struct d11regs __iomem *regs;
+ uint i;
+ bool fastclk;
+ u32 resetbits = 0;
+
+ if (flags == BRCMS_USE_COREFLAGS)
+ flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ regs = wlc_hw->regs;
+
+ /* request FAST clock if not on */
+ fastclk = wlc_hw->forcefastclk;
+ if (!fastclk)
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+
+ /* reset the dma engines except first time thru */
+ if (ai_iscoreup(wlc_hw->sih)) {
+ for (i = 0; i < NFIFO; i++)
+ if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
+ wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
+ "dma_txreset[%d]: cannot stop dma\n",
+ wlc_hw->unit, __func__, i);
+
+ if ((wlc_hw->di[RX_FIFO])
+ && (!wlc_dma_rxreset(wlc_hw, RX_FIFO)))
+ wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: dma_rxreset"
+ "[%d]: cannot stop dma\n",
+ wlc_hw->unit, __func__, RX_FIFO);
+ }
+ /* if noreset, just stop the psm and return */
+ if (wlc_hw->noreset) {
+ wlc_hw->wlc->macintstatus = 0; /* skip wl_dpc after down */
+ brcms_b_mctrl(wlc_hw, MCTL_PSM_RUN | MCTL_EN_MAC, 0);
+ return;
+ }
+
+ /*
+ * mac no longer enables phyclk automatically when driver accesses
+ * phyreg throughput mac, AND phy_reset is skipped at early stage when
+ * band->pi is invalid. need to enable PHY CLK
+ */
+ flags |= SICF_PCLKE;
+
+ /*
+ * reset the core
+ * In chips with PMU, the fastclk request goes through d11 core
+ * reg 0x1e0, which is cleared by the core_reset. have to re-request it.
+ *
+ * This adds some delay and we can optimize it by also requesting
+ * fastclk through chipcommon during this period if necessary. But
+ * that has to work coordinate with other driver like mips/arm since
+ * they may touch chipcommon as well.
+ */
+ wlc_hw->clk = false;
+ ai_core_reset(wlc_hw->sih, flags, resetbits);
+ wlc_hw->clk = true;
+ if (wlc_hw->band && wlc_hw->band->pi)
+ wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
+
+ brcms_c_mctrl_reset(wlc_hw);
+
+ if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+
+ brcms_b_phy_reset(wlc_hw);
+
+ /* turn on PHY_PLL */
+ brcms_b_core_phypll_ctl(wlc_hw, true);
+
+ /* clear sw intstatus */
+ wlc_hw->wlc->macintstatus = 0;
+
+ /* restore the clk setting */
+ if (!fastclk)
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+}
+
+/* txfifo sizes needs to be modified(increased) since the newer cores
+ * have more memory.
+ */
+static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
+{
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ u16 fifo_nu;
+ u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
+ u16 txfifo_def, txfifo_def1;
+ u16 txfifo_cmd;
+
+ /* tx fifos start at TXFIFO_START_BLK from the Base address */
+ txfifo_startblk = TXFIFO_START_BLK;
+
+ /* sequence of operations: reset fifo, set fifo size, reset fifo */
+ for (fifo_nu = 0; fifo_nu < NFIFO; fifo_nu++) {
+
+ txfifo_endblk = txfifo_startblk + wlc_hw->xmtfifo_sz[fifo_nu];
+ txfifo_def = (txfifo_startblk & 0xff) |
+ (((txfifo_endblk - 1) & 0xff) << TXFIFO_FIFOTOP_SHIFT);
+ txfifo_def1 = ((txfifo_startblk >> 8) & 0x1) |
+ ((((txfifo_endblk -
+ 1) >> 8) & 0x1) << TXFIFO_FIFOTOP_SHIFT);
+ txfifo_cmd =
+ TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
+
+ W_REG(&regs->xmtfifocmd, txfifo_cmd);
+ W_REG(&regs->xmtfifodef, txfifo_def);
+ W_REG(&regs->xmtfifodef1, txfifo_def1);
+
+ W_REG(&regs->xmtfifocmd, txfifo_cmd);
+
+ txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
+ }
+ /*
+ * need to propagate to shm location to be in sync since ucode/hw won't
+ * do this
+ */
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE0,
+ wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]);
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE1,
+ wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]);
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE2,
+ ((wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO] << 8) | wlc_hw->
+ xmtfifo_sz[TX_AC_BK_FIFO]));
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE3,
+ ((wlc_hw->xmtfifo_sz[TX_ATIM_FIFO] << 8) | wlc_hw->
+ xmtfifo_sz[TX_BCMC_FIFO]));
+}
+
+/* This function is used for changing the tsf frac register
+ * If spur avoidance mode is off, the mac freq will be 80/120/160Mhz
+ * If spur avoidance mode is on1, the mac freq will be 82/123/164Mhz
+ * If spur avoidance mode is on2, the mac freq will be 84/126/168Mhz
+ * HTPHY Formula is 2^26/freq(MHz) e.g.
+ * For spuron2 - 126MHz -> 2^26/126 = 532610.0
+ * - 532610 = 0x82082 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x2082
+ * For spuron: 123MHz -> 2^26/123 = 545600.5
+ * - 545601 = 0x85341 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x5341
+ * For spur off: 120MHz -> 2^26/120 = 559240.5
+ * - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889
+ */
+
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
+{
+ struct d11regs __iomem *regs = wlc_hw->regs;
+
+ if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+ if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
+ W_REG(&regs->tsf_clk_frac_l, 0x2082);
+ W_REG(&regs->tsf_clk_frac_h, 0x8);
+ } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
+ W_REG(&regs->tsf_clk_frac_l, 0x5341);
+ W_REG(&regs->tsf_clk_frac_h, 0x8);
+ } else { /* 120Mhz */
+ W_REG(&regs->tsf_clk_frac_l, 0x8889);
+ W_REG(&regs->tsf_clk_frac_h, 0x8);
+ }
+ } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
+ if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
+ W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
+ W_REG(&regs->tsf_clk_frac_h, 0xC);
+ } else { /* 80Mhz */
+ W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
+ W_REG(&regs->tsf_clk_frac_h, 0xC);
+ }
+ }
+}
+
+/* Initialize GPIOs that are controlled by D11 core */
+static void brcms_c_gpio_init(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ struct d11regs __iomem *regs;
+ u32 gc, gm;
+
+ regs = wlc_hw->regs;
+
+ /* use GPIO select 0 to get all gpio signals from the gpio out reg */
+ brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
+
+ /*
+ * Common GPIO setup:
+ * G0 = LED 0 = WLAN Activity
+ * G1 = LED 1 = WLAN 2.4 GHz Radio State
+ * G2 = LED 2 = WLAN 5 GHz Radio State
+ * G4 = radio disable input (HI enabled, LO disabled)
+ */
+
+ gc = gm = 0;
+
+ /* Allocate GPIOs for mimo antenna diversity feature */
+ if (wlc_hw->antsel_type == ANTSEL_2x3) {
+ /* Enable antenna diversity, use 2x3 mode */
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
+ MHF3_ANTSEL_EN, BRCM_BAND_ALL);
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE,
+ MHF3_ANTSEL_MODE, BRCM_BAND_ALL);
+
+ /* init superswitch control */
+ wlc_phy_antsel_init(wlc_hw->band->pi, false);
+
+ } else if (wlc_hw->antsel_type == ANTSEL_2x4) {
+ gm |= gc |= (BOARD_GPIO_12 | BOARD_GPIO_13);
+ /*
+ * The board itself is powered by these GPIOs
+ * (when not sending pattern) so set them high
+ */
+ OR_REG(&regs->psm_gpio_oe,
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
+ OR_REG(&regs->psm_gpio_out,
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
+
+ /* Enable antenna diversity, use 2x4 mode */
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
+ MHF3_ANTSEL_EN, BRCM_BAND_ALL);
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, 0,
+ BRCM_BAND_ALL);
+
+ /* Configure the desired clock to be 4Mhz */
+ brcms_b_write_shm(wlc_hw, M_ANTSEL_CLKDIV,
+ ANTSEL_CLKDIV_4MHZ);
+ }
+
+ /*
+ * gpio 9 controls the PA. ucode is responsible
+ * for wiggling out and oe
+ */
+ if (wlc_hw->boardflags & BFL_PACTRL)
+ gm |= gc |= BOARD_GPIO_PACTRL;
+
+ /* apply to gpiocontrol register */
+ ai_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
+}
+
+static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
+ const __le32 ucode[], const size_t nbytes)
+{
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ uint i;
+ uint count;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ count = (nbytes / sizeof(u32));
+
+ W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
+ (void)R_REG(&regs->objaddr);
+ for (i = 0; i < count; i++)
+ W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+
+}
+
+static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
+{
+ struct brcms_c_info *wlc;
+ struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
+
+ wlc = wlc_hw->wlc;
+
+ if (wlc_hw->ucode_loaded)
+ return;
+
+ if (D11REV_IS(wlc_hw->corerev, 23)) {
+ if (BRCMS_ISNPHY(wlc_hw->band)) {
+ brcms_ucode_write(wlc_hw, ucode->bcm43xx_16_mimo,
+ ucode->bcm43xx_16_mimosz);
+ wlc_hw->ucode_loaded = true;
+ } else
+ wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
+ "corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
+ } else if (D11REV_IS(wlc_hw->corerev, 24)) {
+ if (BRCMS_ISLCNPHY(wlc_hw->band)) {
+ brcms_ucode_write(wlc_hw, ucode->bcm43xx_24_lcn,
+ ucode->bcm43xx_24_lcnsz);
+ wlc_hw->ucode_loaded = true;
+ } else {
+ wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
+ "corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
+ }
+ }
+}
+
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant)
+{
+ /* update sw state */
+ wlc_hw->bmac_phytxant = phytxant;
+
+ /* push to ucode if up */
+ if (!wlc_hw->up)
+ return;
+ brcms_c_ucode_txant_set(wlc_hw);
+
+}
+
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw)
+{
+ return (u16) wlc_hw->wlc->stf->txant;
+}
+
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
+{
+ wlc_hw->antsel_type = antsel_type;
+
+ /* Update the antsel type for phy module to use */
+ wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
+}
+
+static void brcms_c_fatal_error(struct brcms_c_info *wlc)
+{
+ wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+ wlc->pub->unit);
+ brcms_init(wlc->wl);
+}
+
+static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
+{
+ bool fatal = false;
+ uint unit;
+ uint intstatus, idx;
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ struct wiphy *wiphy = wlc_hw->wlc->wiphy;
+
+ unit = wlc_hw->unit;
+
+ for (idx = 0; idx < NFIFO; idx++) {
+ /* read intstatus register and ignore any non-error bits */
+ intstatus =
+ R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+ if (!intstatus)
+ continue;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: intstatus%d 0x%x\n",
+ unit, idx, intstatus);
+
+ if (intstatus & I_RO) {
+ wiphy_err(wiphy, "wl%d: fifo %d: receive fifo "
+ "overflow\n", unit, idx);
+ fatal = true;
+ }
+
+ if (intstatus & I_PC) {
+ wiphy_err(wiphy, "wl%d: fifo %d: descriptor error\n",
+ unit, idx);
+ fatal = true;
+ }
+
+ if (intstatus & I_PD) {
+ wiphy_err(wiphy, "wl%d: fifo %d: data error\n", unit,
+ idx);
+ fatal = true;
+ }
+
+ if (intstatus & I_DE) {
+ wiphy_err(wiphy, "wl%d: fifo %d: descriptor protocol "
+ "error\n", unit, idx);
+ fatal = true;
+ }
+
+ if (intstatus & I_RU)
+ wiphy_err(wiphy, "wl%d: fifo %d: receive descriptor "
+ "underflow\n", idx, unit);
+
+ if (intstatus & I_XU) {
+ wiphy_err(wiphy, "wl%d: fifo %d: transmit fifo "
+ "underflow\n", idx, unit);
+ fatal = true;
+ }
+
+ if (fatal) {
+ brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */
+ break;
+ } else
+ W_REG(&regs->intctrlregs[idx].intstatus,
+ intstatus);
+ }
+}
+
+void brcms_c_intrson(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ wlc->macintmask = wlc->defmacintmask;
+ W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+}
+
+/*
+ * callback for siutils.c, which has only wlc handler, no wl they both check
+ * up, not only because there is no need to off/restore d11 interrupt but also
+ * because per-port code may require sync with valid interrupt.
+ */
+static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
+{
+ if (!wlc->hw->up)
+ return 0;
+
+ return brcms_intrsoff(wlc->wl);
+}
+
+static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
+{
+ if (!wlc->hw->up)
+ return;
+
+ brcms_intrsrestore(wlc->wl, macintmask);
+}
+
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ u32 macintmask;
+
+ if (!wlc_hw->clk)
+ return 0;
+
+ macintmask = wlc->macintmask; /* isr can still happen */
+
+ W_REG(&wlc_hw->regs->macintmask, 0);
+ (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
+ udelay(1); /* ensure int line is no longer driven */
+ wlc->macintmask = 0;
+
+ /* return previous macintmask; resolve race between us and our isr */
+ return wlc->macintstatus ? 0 : macintmask;
+}
+
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ if (!wlc_hw->clk)
+ return;
+
+ wlc->macintmask = macintmask;
+ W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+}
+
+static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
+ uint tx_fifo)
+{
+ u8 fifo = 1 << tx_fifo;
+
+ /* Two clients of this code, 11h Quiet period and scanning. */
+
+ /* only suspend if not already suspended */
+ if ((wlc_hw->suspended_fifos & fifo) == fifo)
+ return;
+
+ /* force the core awake only if not already */
+ if (wlc_hw->suspended_fifos == 0)
+ brcms_c_ucode_wake_override_set(wlc_hw,
+ BRCMS_WAKE_OVERRIDE_TXFIFO);
+
+ wlc_hw->suspended_fifos |= fifo;
+
+ if (wlc_hw->di[tx_fifo]) {
+ /*
+ * Suspending AMPDU transmissions in the middle can cause
+ * underflow which may result in mismatch between ucode and
+ * driver so suspend the mac before suspending the FIFO
+ */
+ if (BRCMS_PHY_11N_CAP(wlc_hw->band))
+ brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
+
+ dma_txsuspend(wlc_hw->di[tx_fifo]);
+
+ if (BRCMS_PHY_11N_CAP(wlc_hw->band))
+ brcms_c_enable_mac(wlc_hw->wlc);
+ }
+}
+
+static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
+ uint tx_fifo)
+{
+ /* BMAC_NOTE: BRCMS_TX_FIFO_ENAB is done in brcms_c_dpc() for DMA case
+ * but need to be done here for PIO otherwise the watchdog will catch
+ * the inconsistency and fire
+ */
+ /* Two clients of this code, 11h Quiet period and scanning. */
+ if (wlc_hw->di[tx_fifo])
+ dma_txresume(wlc_hw->di[tx_fifo]);
+
+ /* allow core to sleep again */
+ if (wlc_hw->suspended_fifos == 0)
+ return;
+ else {
+ wlc_hw->suspended_fifos &= ~(1 << tx_fifo);
+ if (wlc_hw->suspended_fifos == 0)
+ brcms_c_ucode_wake_override_clear(wlc_hw,
+ BRCMS_WAKE_OVERRIDE_TXFIFO);
+ }
+}
+
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
+{
+ static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+ if (on) {
+ /* suspend tx fifos */
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_BK_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
+
+ /* zero the address match register so we do not send ACKs */
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
+ null_ether_addr);
+ } else {
+ /* resume tx fifos */
+ brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_CTL_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_AC_BK_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
+
+ /* Restore address */
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
+ wlc_hw->etheraddr);
+ }
+
+ wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
+
+ if (on)
+ brcms_c_ucode_mute_override_set(wlc_hw);
+ else
+ brcms_c_ucode_mute_override_clear(wlc_hw);
+}
+
+/*
+ * Read and clear macintmask and macintstatus and intstatus registers.
+ * This routine should be called with interrupts off
+ * Return:
+ * -1 if brcms_deviceremoved(wlc) evaluates to true;
+ * 0 if the interrupt is not for us, or we are in some special cases;
+ * device interrupt status bits otherwise.
+ */
+static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ u32 macintstatus;
+
+ /* macintstatus includes a DMA interrupt summary bit */
+ macintstatus = R_REG(&regs->macintstatus);
+
+ BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
+ macintstatus);
+
+ /* detect cardbus removed, in power down(suspend) and in reset */
+ if (brcms_deviceremoved(wlc))
+ return -1;
+
+ /* brcms_deviceremoved() succeeds even when the core is still resetting,
+ * handle that case here.
+ */
+ if (macintstatus == 0xffffffff)
+ return 0;
+
+ /* defer unsolicited interrupts */
+ macintstatus &= (in_isr ? wlc->macintmask : wlc->defmacintmask);
+
+ /* if not for us */
+ if (macintstatus == 0)
+ return 0;
+
+ /* interrupts are already turned off for CFE build
+ * Caution: For CFE Turning off the interrupts again has some undesired
+ * consequences
+ */
+ /* turn off the interrupts */
+ W_REG(&regs->macintmask, 0);
+ (void)R_REG(&regs->macintmask); /* sync readback */
+ wlc->macintmask = 0;
+
+ /* clear device interrupts */
+ W_REG(&regs->macintstatus, macintstatus);
+
+ /* MI_DMAINT is indication of non-zero intstatus */
+ if (macintstatus & MI_DMAINT)
+ /*
+ * only fifo interrupt enabled is I_RI in
+ * RX_FIFO. If MI_DMAINT is set, assume it
+ * is set and clear the interrupt.
+ */
+ W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
+ DEF_RXINTMASK);
+
+ return macintstatus;
+}
+
+/* Update wlc->macintstatus and wlc->intstatus[]. */
+/* Return true if they are updated successfully. false otherwise */
+bool brcms_c_intrsupd(struct brcms_c_info *wlc)
+{
+ u32 macintstatus;
+
+ /* read and clear macintstatus and intstatus registers */
+ macintstatus = wlc_intstatus(wlc, false);
+
+ /* device is removed */
+ if (macintstatus == 0xffffffff)
+ return false;
+
+ /* update interrupt status in software */
+ wlc->macintstatus |= macintstatus;
+
+ return true;
+}
+
+/*
+ * First-level interrupt processing.
+ * Return true if this was our interrupt, false otherwise.
+ * *wantdpc will be set to true if further brcms_c_dpc() processing is required,
+ * false otherwise.
+ */
+bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ u32 macintstatus;
+
+ *wantdpc = false;
+
+ if (!wlc_hw->up || !wlc->macintmask)
+ return false;
+
+ /* read and clear macintstatus and intstatus registers */
+ macintstatus = wlc_intstatus(wlc, true);
+
+ if (macintstatus == 0xffffffff)
+ wiphy_err(wlc->wiphy, "DEVICEREMOVED detected in the ISR code"
+ " path\n");
+
+ /* it is not for us */
+ if (macintstatus == 0)
+ return false;
+
+ *wantdpc = true;
+
+ /* save interrupt status bits */
+ wlc->macintstatus = macintstatus;
+
+ return true;
+
+}
+
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ u32 mc, mi;
+ struct wiphy *wiphy = wlc->wiphy;
+
+ BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
+ wlc_hw->band->bandunit);
+
+ /*
+ * Track overlapping suspend requests
+ */
+ wlc_hw->mac_suspend_depth++;
+ if (wlc_hw->mac_suspend_depth > 1)
+ return;
+
+ /* force the core awake */
+ brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
+
+ mc = R_REG(&regs->maccontrol);
+
+ if (mc == 0xffffffff) {
+ wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ __func__);
+ brcms_down(wlc->wl);
+ return;
+ }
+ WARN_ON(mc & MCTL_PSM_JMP_0);
+ WARN_ON(!(mc & MCTL_PSM_RUN));
+ WARN_ON(!(mc & MCTL_EN_MAC));
+
+ mi = R_REG(&regs->macintstatus);
+ if (mi == 0xffffffff) {
+ wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ __func__);
+ brcms_down(wlc->wl);
+ return;
+ }
+ WARN_ON(mi & MI_MACSSPNDD);
+
+ brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
+
+ SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+ BRCMS_MAX_MAC_SUSPEND);
+
+ if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+ wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
+ " and MI_MACSSPNDD is still not on.\n",
+ wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
+ wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
+ "psm_brc 0x%04x\n", wlc_hw->unit,
+ R_REG(&regs->psmdebug),
+ R_REG(&regs->phydebug),
+ R_REG(&regs->psm_brc));
+ }
+
+ mc = R_REG(&regs->maccontrol);
+ if (mc == 0xffffffff) {
+ wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ __func__);
+ brcms_down(wlc->wl);
+ return;
+ }
+ WARN_ON(mc & MCTL_PSM_JMP_0);
+ WARN_ON(!(mc & MCTL_PSM_RUN));
+ WARN_ON(mc & MCTL_EN_MAC);
+}
+
+void brcms_c_enable_mac(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ u32 mc, mi;
+
+ BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
+ wlc->band->bandunit);
+
+ /*
+ * Track overlapping suspend requests
+ */
+ wlc_hw->mac_suspend_depth--;
+ if (wlc_hw->mac_suspend_depth > 0)
+ return;
+
+ mc = R_REG(&regs->maccontrol);
+ WARN_ON(mc & MCTL_PSM_JMP_0);
+ WARN_ON(mc & MCTL_EN_MAC);
+ WARN_ON(!(mc & MCTL_PSM_RUN));
+
+ brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
+ W_REG(&regs->macintstatus, MI_MACSSPNDD);
+
+ mc = R_REG(&regs->maccontrol);
+ WARN_ON(mc & MCTL_PSM_JMP_0);
+ WARN_ON(!(mc & MCTL_EN_MAC));
+ WARN_ON(!(mc & MCTL_PSM_RUN));
+
+ mi = R_REG(&regs->macintstatus);
+ WARN_ON(mi & MI_MACSSPNDD);
+
+ brcms_c_ucode_wake_override_clear(wlc_hw,
+ BRCMS_WAKE_OVERRIDE_MACSUSPEND);
+}
+
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
+{
+ wlc_hw->hw_stf_ss_opmode = stf_mode;
+
+ if (wlc_hw->clk)
+ brcms_upd_ofdm_pctl1_table(wlc_hw);
+}
+
+static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
+{
+ struct d11regs __iomem *regs;
+ u32 w, val;
+ struct wiphy *wiphy = wlc_hw->wlc->wiphy;
+
+ BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
+
+ regs = wlc_hw->regs;
+
+ /* Validate dchip register access */
+
+ W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
+ (void)R_REG(&regs->objaddr);
+ w = R_REG(&regs->objdata);
+
+ /* Can we write and read back a 32bit register? */
+ W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
+ (void)R_REG(&regs->objaddr);
+ W_REG(&regs->objdata, (u32) 0xaa5555aa);
+
+ W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
+ (void)R_REG(&regs->objaddr);
+ val = R_REG(&regs->objdata);
+ if (val != (u32) 0xaa5555aa) {
+ wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
+ "expected 0xaa5555aa\n", wlc_hw->unit, val);
+ return false;
+ }
+
+ W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
+ (void)R_REG(&regs->objaddr);
+ W_REG(&regs->objdata, (u32) 0x55aaaa55);
+
+ W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
+ (void)R_REG(&regs->objaddr);
+ val = R_REG(&regs->objdata);
+ if (val != (u32) 0x55aaaa55) {
+ wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
+ "expected 0x55aaaa55\n", wlc_hw->unit, val);
+ return false;
+ }
+
+ W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
+ (void)R_REG(&regs->objaddr);
+ W_REG(&regs->objdata, w);
+
+ /* clear CFPStart */
+ W_REG(&regs->tsf_cfpstart, 0);
+
+ w = R_REG(&regs->maccontrol);
+ if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
+ (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
+ wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
+ "0x%x, expected 0x%x or 0x%x\n", wlc_hw->unit, w,
+ (MCTL_IHR_EN | MCTL_WAKE),
+ (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE));
+ return false;
+ }
+
+ return true;
+}
+
+#define PHYPLL_WAIT_US 100000
+
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
+{
+ struct d11regs __iomem *regs;
+ u32 tmp;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ tmp = 0;
+ regs = wlc_hw->regs;
+
+ if (on) {
+ if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+ OR_REG(&regs->clk_ctl_st,
+ (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL));
+ SPINWAIT((R_REG(&regs->clk_ctl_st) &
+ (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+ PHYPLL_WAIT_US);
+
+ tmp = R_REG(&regs->clk_ctl_st);
+ if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
+ (CCS_ERSRC_AVAIL_HT))
+ wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
+ " PLL failed\n", __func__);
+ } else {
+ OR_REG(&regs->clk_ctl_st,
+ (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
+ SPINWAIT((R_REG(&regs->clk_ctl_st) &
+ (CCS_ERSRC_AVAIL_D11PLL |
+ CCS_ERSRC_AVAIL_PHYPLL)) !=
+ (CCS_ERSRC_AVAIL_D11PLL |
+ CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
+
+ tmp = R_REG(&regs->clk_ctl_st);
+ if ((tmp &
+ (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
+ !=
+ (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
+ wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on "
+ "PHY PLL failed\n", __func__);
+ }
+ } else {
+ /*
+ * Since the PLL may be shared, other cores can still
+ * be requesting it; so we'll deassert the request but
+ * not wait for status to comply.
+ */
+ AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
+ tmp = R_REG(&regs->clk_ctl_st);
+ }
+}
+
+static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
+{
+ bool dev_gone;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ dev_gone = brcms_deviceremoved(wlc_hw->wlc);
+
+ if (dev_gone)
+ return;
+
+ if (wlc_hw->noreset)
+ return;
+
+ /* radio off */
+ wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
+
+ /* turn off analog core */
+ wlc_phy_anacore(wlc_hw->band->pi, OFF);
+
+ /* turn off PHYPLL to save power */
+ brcms_b_core_phypll_ctl(wlc_hw, false);
+
+ wlc_hw->clk = false;
+ ai_core_disable(wlc_hw->sih, 0);
+ wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
+}
+
+static void brcms_c_flushqueues(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ uint i;
+
+ /* free any posted tx packets */
+ for (i = 0; i < NFIFO; i++)
+ if (wlc_hw->di[i]) {
+ dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
+ wlc->core->txpktpend[i] = 0;
+ BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i);
+ }
+
+ /* free any posted rx packets */
+ dma_rxreclaim(wlc_hw->di[RX_FIFO]);
+}
+
+static u16
+brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
+{
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
+ u16 __iomem *objdata_hi = objdata_lo + 1;
+ u16 v;
+
+ W_REG(&regs->objaddr, sel | (offset >> 2));
+ (void)R_REG(&regs->objaddr);
+ if (offset & 2)
+ v = R_REG(objdata_hi);
+ else
+ v = R_REG(objdata_lo);
+
+ return v;
+}
+
+static void
+brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
+ u32 sel)
+{
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
+ u16 __iomem *objdata_hi = objdata_lo + 1;
+
+ W_REG(&regs->objaddr, sel | (offset >> 2));
+ (void)R_REG(&regs->objaddr);
+ if (offset & 2)
+ W_REG(objdata_hi, v);
+ else
+ W_REG(objdata_lo, v);
+}
+
+/*
+ * Read a single u16 from shared memory.
+ * SHM 'offset' needs to be an even address
+ */
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset)
+{
+ return brcms_b_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
+}
+
+/*
+ * Write a single u16 to shared memory.
+ * SHM 'offset' needs to be an even address
+ */
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v)
+{
+ brcms_b_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
+}
+
+/*
+ * Copy a buffer to shared memory of specified type .
+ * SHM 'offset' needs to be an even address and
+ * Buffer length 'len' must be an even number of bytes
+ * 'sel' selects the type of memory
+ */
+void
+brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ const void *buf, int len, u32 sel)
+{
+ u16 v;
+ const u8 *p = (const u8 *)buf;
+ int i;
+
+ if (len <= 0 || (offset & 1) || (len & 1))
+ return;
+
+ for (i = 0; i < len; i += 2) {
+ v = p[i] | (p[i + 1] << 8);
+ brcms_b_write_objmem(wlc_hw, offset + i, v, sel);
+ }
+}
+
+/*
+ * Copy a piece of shared memory of specified type to a buffer .
+ * SHM 'offset' needs to be an even address and
+ * Buffer length 'len' must be an even number of bytes
+ * 'sel' selects the type of memory
+ */
+void
+brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset, void *buf,
+ int len, u32 sel)
+{
+ u16 v;
+ u8 *p = (u8 *) buf;
+ int i;
+
+ if (len <= 0 || (offset & 1) || (len & 1))
+ return;
+
+ for (i = 0; i < len; i += 2) {
+ v = brcms_b_read_objmem(wlc_hw, offset + i, sel);
+ p[i] = v & 0xFF;
+ p[i + 1] = (v >> 8) & 0xFF;
+ }
+}
+
+/* Copy a buffer to shared memory.
+ * SHM 'offset' needs to be an even address and
+ * Buffer length 'len' must be an even number of bytes
+ */
+static void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset,
+ const void *buf, int len)
+{
+ brcms_b_copyto_objmem(wlc->hw, offset, buf, len, OBJADDR_SHM_SEL);
+}
+
+static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
+ u16 SRL, u16 LRL)
+{
+ wlc_hw->SRL = SRL;
+ wlc_hw->LRL = LRL;
+
+ /* write retry limit to SCR, shouldn't need to suspend */
+ if (wlc_hw->up) {
+ W_REG(&wlc_hw->regs->objaddr,
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)R_REG(&wlc_hw->regs->objaddr);
+ W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
+ W_REG(&wlc_hw->regs->objaddr,
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)R_REG(&wlc_hw->regs->objaddr);
+ W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+ }
+}
+
+static void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set, u32 req_bit)
+{
+ if (set) {
+ if (mboolisset(wlc_hw->pllreq, req_bit))
+ return;
+
+ mboolset(wlc_hw->pllreq, req_bit);
+
+ if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
+ if (!wlc_hw->sbclk)
+ brcms_b_xtal(wlc_hw, ON);
+ }
+ } else {
+ if (!mboolisset(wlc_hw->pllreq, req_bit))
+ return;
+
+ mboolclr(wlc_hw->pllreq, req_bit);
+
+ if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
+ if (wlc_hw->sbclk)
+ brcms_b_xtal(wlc_hw, OFF);
+ }
+ }
+}
+
+static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
+{
+ wlc_hw->antsel_avail = antsel_avail;
+}
+
+/*
+ * conditions under which the PM bit should be set in outgoing frames
+ * and STAY_AWAKE is meaningful
+ */
+static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
+{
+ struct brcms_bss_cfg *cfg = wlc->bsscfg;
+
+ /* disallow PS when one of the following global conditions meets */
+ if (!wlc->pub->associated)
+ return false;
+
+ /* disallow PS when one of these meets when not scanning */
+ if (wlc->monitor)
+ return false;
+
+ if (cfg->associated) {
+ /*
+ * disallow PS when one of the following
+ * bsscfg specific conditions meets
+ */
+ if (!cfg->BSS)
+ return false;
+
+ return false;
+ }
+
+ return true;
+}
+
+static void brcms_c_statsupd(struct brcms_c_info *wlc)
+{
+ int i;
+ struct macstat macstats;
+#ifdef BCMDBG
+ u16 delta;
+ u16 rxf0ovfl;
+ u16 txfunfl[NFIFO];
+#endif /* BCMDBG */
+
+ /* if driver down, make no sense to update stats */
+ if (!wlc->pub->up)
+ return;
+
+#ifdef BCMDBG
+ /* save last rx fifo 0 overflow count */
+ rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl;
+
+ /* save last tx fifo underflow count */
+ for (i = 0; i < NFIFO; i++)
+ txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i];
+#endif /* BCMDBG */
+
+ /* Read mac stats from contiguous shared memory */
+ brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats,
+ sizeof(struct macstat), OBJADDR_SHM_SEL);
+
+#ifdef BCMDBG
+ /* check for rx fifo 0 overflow */
+ delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
+ if (delta)
+ wiphy_err(wlc->wiphy, "wl%d: %u rx fifo 0 overflows!\n",
+ wlc->pub->unit, delta);
+
+ /* check for tx fifo underflows */
+ for (i = 0; i < NFIFO; i++) {
+ delta =
+ (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
+ txfunfl[i]);
+ if (delta)
+ wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!"
+ "\n", wlc->pub->unit, delta, i);
+ }
+#endif /* BCMDBG */
+
+ /* merge counters from dma module */
+ for (i = 0; i < NFIFO; i++) {
+ if (wlc->hw->di[i])
+ dma_counterreset(wlc->hw->di[i]);
+ }
+}
+
+static void brcms_b_reset(struct brcms_hardware *wlc_hw)
+{
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ /* reset the core */
+ if (!brcms_deviceremoved(wlc_hw->wlc))
+ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
+
+ /* purge the dma rings */
+ brcms_c_flushqueues(wlc_hw->wlc);
+}
+
+void brcms_c_reset(struct brcms_c_info *wlc)
+{
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ /* slurp up hw mac counters before core reset */
+ brcms_c_statsupd(wlc);
+
+ /* reset our snapshot of macstat counters */
+ memset((char *)wlc->core->macstat_snapshot, 0,
+ sizeof(struct macstat));
+
+ brcms_b_reset(wlc->hw);
+}
+
+/* Return the channel the driver should initialize during brcms_c_init.
+ * the channel may have to be changed from the currently configured channel
+ * if other configurations are in conflict (bandlocked, 11n mode disabled,
+ * invalid channel for current country, etc.)
+ */
+static u16 brcms_c_init_chanspec(struct brcms_c_info *wlc)
+{
+ u16 chanspec =
+ 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
+ WL_CHANSPEC_BAND_2G;
+
+ return chanspec;
+}
+
+void brcms_c_init_scb(struct scb *scb)
+{
+ int i;
+
+ memset(scb, 0, sizeof(struct scb));
+ scb->flags = SCB_WMECAP | SCB_HTCAP;
+ for (i = 0; i < NUMPRIO; i++) {
+ scb->seqnum[i] = 0;
+ scb->seqctl[i] = 0xFFFF;
+ }
+
+ scb->seqctl_nonqos = 0xFFFF;
+ scb->magic = SCB_MAGIC;
+}
+
+/* d11 core init
+ * reset PSM
+ * download ucode/PCM
+ * let ucode run to suspended
+ * download ucode inits
+ * config other core registers
+ * init dma
+ */
+static void brcms_b_coreinit(struct brcms_c_info *wlc)
+{
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ struct d11regs __iomem *regs;
+ u32 sflags;
+ uint bcnint_us;
+ uint i = 0;
+ bool fifosz_fixup = false;
+ int err = 0;
+ u16 buf[NFIFO];
+ struct wiphy *wiphy = wlc->wiphy;
+ struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
+
+ regs = wlc_hw->regs;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ /* reset PSM */
+ brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
+
+ brcms_ucode_download(wlc_hw);
+ /*
+ * FIFOSZ fixup. driver wants to controls the fifo allocation.
+ */
+ fifosz_fixup = true;
+
+ /* let the PSM run to the suspended state, set mode to BSS STA */
+ W_REG(&regs->macintstatus, -1);
+ brcms_b_mctrl(wlc_hw, ~0,
+ (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
+
+ /* wait for ucode to self-suspend after auto-init */
+ SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
+ 1000 * 1000);
+ if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+ wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
+ "suspend!\n", wlc_hw->unit);
+
+ brcms_c_gpio_init(wlc);
+
+ sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+
+ if (D11REV_IS(wlc_hw->corerev, 23)) {
+ if (BRCMS_ISNPHY(wlc_hw->band))
+ brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16);
+ else
+ wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+ " %d\n", __func__, wlc_hw->unit,
+ wlc_hw->corerev);
+ } else if (D11REV_IS(wlc_hw->corerev, 24)) {
+ if (BRCMS_ISLCNPHY(wlc_hw->band))
+ brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24);
+ else
+ wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+ " %d\n", __func__, wlc_hw->unit,
+ wlc_hw->corerev);
+ } else {
+ wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
+ }
+
+ /* For old ucode, txfifo sizes needs to be modified(increased) */
+ if (fifosz_fixup == true)
+ brcms_b_corerev_fifofixup(wlc_hw);
+
+ /* check txfifo allocations match between ucode and driver */
+ buf[TX_AC_BE_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE0);
+ if (buf[TX_AC_BE_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]) {
+ i = TX_AC_BE_FIFO;
+ err = -1;
+ }
+ buf[TX_AC_VI_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE1);
+ if (buf[TX_AC_VI_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]) {
+ i = TX_AC_VI_FIFO;
+ err = -1;
+ }
+ buf[TX_AC_BK_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE2);
+ buf[TX_AC_VO_FIFO] = (buf[TX_AC_BK_FIFO] >> 8) & 0xff;
+ buf[TX_AC_BK_FIFO] &= 0xff;
+ if (buf[TX_AC_BK_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BK_FIFO]) {
+ i = TX_AC_BK_FIFO;
+ err = -1;
+ }
+ if (buf[TX_AC_VO_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO]) {
+ i = TX_AC_VO_FIFO;
+ err = -1;
+ }
+ buf[TX_BCMC_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE3);
+ buf[TX_ATIM_FIFO] = (buf[TX_BCMC_FIFO] >> 8) & 0xff;
+ buf[TX_BCMC_FIFO] &= 0xff;
+ if (buf[TX_BCMC_FIFO] != wlc_hw->xmtfifo_sz[TX_BCMC_FIFO]) {
+ i = TX_BCMC_FIFO;
+ err = -1;
+ }
+ if (buf[TX_ATIM_FIFO] != wlc_hw->xmtfifo_sz[TX_ATIM_FIFO]) {
+ i = TX_ATIM_FIFO;
+ err = -1;
+ }
+ if (err != 0)
+ wiphy_err(wiphy, "wlc_coreinit: txfifo mismatch: ucode size %d"
+ " driver size %d index %d\n", buf[i],
+ wlc_hw->xmtfifo_sz[i], i);
+
+ /* make sure we can still talk to the mac */
+ WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+
+ /* band-specific inits done by wlc_bsinit() */
+
+ /* Set up frame burst size and antenna swap threshold init values */
+ brcms_b_write_shm(wlc_hw, M_MBURST_SIZE, MAXTXFRAMEBURST);
+ brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
+
+ /* enable one rx interrupt per received frame */
+ W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+
+ /* set the station mode (BSS STA) */
+ brcms_b_mctrl(wlc_hw,
+ (MCTL_INFRA | MCTL_DISCARD_PMQ | MCTL_AP),
+ (MCTL_INFRA | MCTL_DISCARD_PMQ));
+
+ /* set up Beacon interval */
+ bcnint_us = 0x8000 << 10;
+ W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
+ W_REG(&regs->tsf_cfpstart, bcnint_us);
+ W_REG(&regs->macintstatus, MI_GP1);
+
+ /* write interrupt mask */
+ W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+
+ /* allow the MAC to control the PHY clock (dynamic on/off) */
+ brcms_b_macphyclk_set(wlc_hw, ON);
+
+ /* program dynamic clock control fast powerup delay register */
+ wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
+ W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+
+ /* tell the ucode the corerev */
+ brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
+
+ /* tell the ucode MAC capabilities */
+ brcms_b_write_shm(wlc_hw, M_MACHW_CAP_L,
+ (u16) (wlc_hw->machwcap & 0xffff));
+ brcms_b_write_shm(wlc_hw, M_MACHW_CAP_H,
+ (u16) ((wlc_hw->
+ machwcap >> 16) & 0xffff));
+
+ /* write retry limits to SCR, this done after PSM init */
+ W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)R_REG(&regs->objaddr);
+ W_REG(&regs->objdata, wlc_hw->SRL);
+ W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)R_REG(&regs->objaddr);
+ W_REG(&regs->objdata, wlc_hw->LRL);
+
+ /* write rate fallback retry limits */
+ brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
+ brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
+
+ AND_REG(&regs->ifs_ctl, 0x0FFF);
+ W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+
+ /* init the tx dma engines */
+ for (i = 0; i < NFIFO; i++) {
+ if (wlc_hw->di[i])
+ dma_txinit(wlc_hw->di[i]);
+ }
+
+ /* init the rx dma engine(s) and post receive buffers */
+ dma_rxinit(wlc_hw->di[RX_FIFO]);
+ dma_rxfill(wlc_hw->di[RX_FIFO]);
+}
+
+void
+static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
+ bool mute) {
+ u32 macintmask;
+ bool fastclk;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ /* request FAST clock if not on */
+ fastclk = wlc_hw->forcefastclk;
+ if (!fastclk)
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+
+ /* disable interrupts */
+ macintmask = brcms_intrsoff(wlc->wl);
+
+ /* set up the specified band and chanspec */
+ brcms_c_setxband(wlc_hw, chspec_bandunit(chanspec));
+ wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
+
+ /* do one-time phy inits and calibration */
+ wlc_phy_cal_init(wlc_hw->band->pi);
+
+ /* core-specific initialization */
+ brcms_b_coreinit(wlc);
+
+ /* suspend the tx fifos and mute the phy for preism cac time */
+ if (mute)
+ brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
+
+ /* band-specific inits */
+ brcms_b_bsinit(wlc, chanspec);
+
+ /* restore macintmask */
+ brcms_intrsrestore(wlc->wl, macintmask);
+
+ /* seed wake_override with BRCMS_WAKE_OVERRIDE_MACSUSPEND since the mac
+ * is suspended and brcms_c_enable_mac() will clear this override bit.
+ */
+ mboolset(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
+
+ /*
+ * initialize mac_suspend_depth to 1 to match ucode
+ * initial suspended state
+ */
+ wlc_hw->mac_suspend_depth = 1;
+
+ /* restore the clk */
+ if (!fastclk)
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+}
+
+static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc,
+ u16 chanspec)
+{
+ /* Save our copy of the chanspec */
+ wlc->chanspec = chanspec;
+
+ /* Set the chanspec and power limits for this locale */
+ brcms_c_channel_set_chanspec(wlc->cmi, chanspec, BRCMS_TXPWR_MAX);
+
+ if (wlc->stf->ss_algosel_auto)
+ brcms_c_stf_ss_algo_channel_get(wlc, &wlc->stf->ss_algo_channel,
+ chanspec);
+
+ brcms_c_stf_ss_update(wlc, wlc->band);
+}
+
+static void
+brcms_default_rateset(struct brcms_c_info *wlc, struct brcms_c_rateset *rs)
+{
+ brcms_c_rateset_default(rs, NULL, wlc->band->phytype,
+ wlc->band->bandtype, false, BRCMS_RATE_MASK_FULL,
+ (bool) (wlc->pub->_n_enab & SUPPORT_11N),
+ brcms_chspec_bw(wlc->default_bss->chanspec),
+ wlc->stf->txstreams);
+}
+
+/* derive wlc->band->basic_rate[] table from 'rateset' */
+static void brcms_c_rate_lookup_init(struct brcms_c_info *wlc,
+ struct brcms_c_rateset *rateset)
+{
+ u8 rate;
+ u8 mandatory;
+ u8 cck_basic = 0;
+ u8 ofdm_basic = 0;
+ u8 *br = wlc->band->basic_rate;
+ uint i;
+
+ /* incoming rates are in 500kbps units as in 802.11 Supported Rates */
+ memset(br, 0, BRCM_MAXRATE + 1);
+
+ /* For each basic rate in the rates list, make an entry in the
+ * best basic lookup.
+ */
+ for (i = 0; i < rateset->count; i++) {
+ /* only make an entry for a basic rate */
+ if (!(rateset->rates[i] & BRCMS_RATE_FLAG))
+ continue;
+
+ /* mask off basic bit */
+ rate = (rateset->rates[i] & BRCMS_RATE_MASK);
+
+ if (rate > BRCM_MAXRATE) {
+ wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: "
+ "invalid rate 0x%X in rate set\n",
+ rateset->rates[i]);
+ continue;
+ }
+
+ br[rate] = rate;
+ }
+
+ /* The rate lookup table now has non-zero entries for each
+ * basic rate, equal to the basic rate: br[basicN] = basicN
+ *
+ * To look up the best basic rate corresponding to any
+ * particular rate, code can use the basic_rate table
+ * like this
+ *
+ * basic_rate = wlc->band->basic_rate[tx_rate]
+ *
+ * Make sure there is a best basic rate entry for
+ * every rate by walking up the table from low rates
+ * to high, filling in holes in the lookup table
+ */
+
+ for (i = 0; i < wlc->band->hw_rateset.count; i++) {
+ rate = wlc->band->hw_rateset.rates[i];
+
+ if (br[rate] != 0) {
+ /* This rate is a basic rate.
+ * Keep track of the best basic rate so far by
+ * modulation type.
+ */
+ if (is_ofdm_rate(rate))
+ ofdm_basic = rate;
+ else
+ cck_basic = rate;
+
+ continue;
+ }
+
+ /* This rate is not a basic rate so figure out the
+ * best basic rate less than this rate and fill in
+ * the hole in the table
+ */
+
+ br[rate] = is_ofdm_rate(rate) ? ofdm_basic : cck_basic;
+
+ if (br[rate] != 0)
+ continue;
+
+ if (is_ofdm_rate(rate)) {
+ /*
+ * In 11g and 11a, the OFDM mandatory rates
+ * are 6, 12, and 24 Mbps
+ */
+ if (rate >= BRCM_RATE_24M)
+ mandatory = BRCM_RATE_24M;
+ else if (rate >= BRCM_RATE_12M)
+ mandatory = BRCM_RATE_12M;
+ else
+ mandatory = BRCM_RATE_6M;
+ } else {
+ /* In 11b, all CCK rates are mandatory 1 - 11 Mbps */
+ mandatory = rate;
+ }
+
+ br[rate] = mandatory;
+ }
+}
+
+static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
+ u16 chanspec)
+{
+ struct brcms_c_rateset default_rateset;
+ uint parkband;
+ uint i, band_order[2];
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+ /*
+ * We might have been bandlocked during down and the chip
+ * power-cycled (hibernate). Figure out the right band to park on
+ */
+ if (wlc->bandlocked || wlc->pub->_nbands == 1) {
+ /* updated in brcms_c_bandlock() */
+ parkband = wlc->band->bandunit;
+ band_order[0] = band_order[1] = parkband;
+ } else {
+ /* park on the band of the specified chanspec */
+ parkband = chspec_bandunit(chanspec);
+
+ /* order so that parkband initialize last */
+ band_order[0] = parkband ^ 1;
+ band_order[1] = parkband;
+ }
+
+ /* make each band operational, software state init */
+ for (i = 0; i < wlc->pub->_nbands; i++) {
+ uint j = band_order[i];
+
+ wlc->band = wlc->bandstate[j];
+
+ brcms_default_rateset(wlc, &default_rateset);
+
+ /* fill in hw_rate */
+ brcms_c_rateset_filter(&default_rateset, &wlc->band->hw_rateset,
+ false, BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
+ (bool) (wlc->pub->_n_enab & SUPPORT_11N));
+
+ /* init basic rate lookup */
+ brcms_c_rate_lookup_init(wlc, &default_rateset);
+ }
+
+ /* sync up phy/radio chanspec */
+ brcms_c_set_phy_chanspec(wlc, chanspec);
+}
+
+static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
+{
+ if (wlc->bcnmisc_monitor)
+ brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
+ else
+ brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
+}
+
+void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
+{
+ wlc->bcnmisc_monitor = promisc;
+ brcms_c_mac_bcn_promisc(wlc);
+}
+
+/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
+static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+{
+ u32 promisc_bits = 0;
+
+ /*
+ * promiscuous mode just sets MCTL_PROMISC
+ * Note: APs get all BSS traffic without the need to set
+ * the MCTL_PROMISC bit since all BSS data traffic is
+ * directed at the AP
+ */
+ if (wlc->pub->promisc)
+ promisc_bits |= MCTL_PROMISC;
+
+ /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
+ * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
+ * handled in brcms_c_mac_bcn_promisc()
+ */
+ if (wlc->monitor)
+ promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
+
+ brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+}
+
+/*
+ * ucode, hwmac update
+ * Channel dependent updates for ucode and hw
+ */
+static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
+{
+ /* enable or disable any active IBSSs depending on whether or not
+ * we are on the home channel
+ */
+ if (wlc->home_chanspec == wlc_phy_chanspec_get(wlc->band->pi)) {
+ if (wlc->pub->associated) {
+ /*
+ * BMAC_NOTE: This is something that should be fixed
+ * in ucode inits. I think that the ucode inits set
+ * up the bcn templates and shm values with a bogus
+ * beacon. This should not be done in the inits. If
+ * ucode needs to set up a beacon for testing, the
+ * test routines should write it down, not expect the
+ * inits to populate a bogus beacon.
+ */
+ if (BRCMS_PHY_11N_CAP(wlc->band))
+ brcms_b_write_shm(wlc->hw,
+ M_BCN_TXTSF_OFFSET, 0);
+ }
+ } else {
+ /* disable an active IBSS if we are not on the home channel */
+ }
+
+ /* update the various promisc bits */
+ brcms_c_mac_bcn_promisc(wlc);
+ brcms_c_mac_promisc(wlc);
+}
+
+static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
+ u8 basic_rate)
+{
+ u8 phy_rate, index;
+ u8 basic_phy_rate, basic_index;
+ u16 dir_table, basic_table;
+ u16 basic_ptr;
+
+ /* Shared memory address for the table we are reading */
+ dir_table = is_ofdm_rate(basic_rate) ? M_RT_DIRMAP_A : M_RT_DIRMAP_B;
+
+ /* Shared memory address for the table we are writing */
+ basic_table = is_ofdm_rate(rate) ? M_RT_BBRSMAP_A : M_RT_BBRSMAP_B;
+
+ /*
+ * for a given rate, the LS-nibble of the PLCP SIGNAL field is
+ * the index into the rate table.
+ */
+ phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
+ basic_phy_rate = rate_info[basic_rate] & BRCMS_RATE_MASK;
+ index = phy_rate & 0xf;
+ basic_index = basic_phy_rate & 0xf;
+
+ /* Find the SHM pointer to the ACK rate entry by looking in the
+ * Direct-map Table
+ */
+ basic_ptr = brcms_b_read_shm(wlc->hw, (dir_table + basic_index * 2));
+
+ /* Update the SHM BSS-basic-rate-set mapping table with the pointer
+ * to the correct basic rate for the given incoming rate
+ */
+ brcms_b_write_shm(wlc->hw, (basic_table + index * 2), basic_ptr);
+}
+
+static const struct brcms_c_rateset *
+brcms_c_rateset_get_hwrs(struct brcms_c_info *wlc)
+{
+ const struct brcms_c_rateset *rs_dflt;
+
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
+ if (wlc->band->bandtype == BRCM_BAND_5G)
+ rs_dflt = &ofdm_mimo_rates;
+ else
+ rs_dflt = &cck_ofdm_mimo_rates;
+ } else if (wlc->band->gmode)
+ rs_dflt = &cck_ofdm_rates;
+ else
+ rs_dflt = &cck_rates;
+
+ return rs_dflt;
+}
+
+static void brcms_c_set_ratetable(struct brcms_c_info *wlc)
+{
+ const struct brcms_c_rateset *rs_dflt;
+ struct brcms_c_rateset rs;
+ u8 rate, basic_rate;
+ uint i;
+
+ rs_dflt = brcms_c_rateset_get_hwrs(wlc);
+
+ brcms_c_rateset_copy(rs_dflt, &rs);
+ brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
+
+ /* walk the phy rate table and update SHM basic rate lookup table */
+ for (i = 0; i < rs.count; i++) {
+ rate = rs.rates[i] & BRCMS_RATE_MASK;
+
+ /* for a given rate brcms_basic_rate returns the rate at
+ * which a response ACK/CTS should be sent.
+ */
+ basic_rate = brcms_basic_rate(wlc, rate);
+ if (basic_rate == 0)
+ /* This should only happen if we are using a
+ * restricted rateset.
+ */
+ basic_rate = rs.rates[0] & BRCMS_RATE_MASK;
+
+ brcms_c_write_rate_shm(wlc, rate, basic_rate);
+ }
+}
+
+/* band-specific init */
+static void brcms_c_bsinit(struct brcms_c_info *wlc)
+{
+ BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n",
+ wlc->pub->unit, wlc->band->bandunit);
+
+ /* write ucode ACK/CTS rate table */
+ brcms_c_set_ratetable(wlc);
+
+ /* update some band specific mac configuration */
+ brcms_c_ucode_mac_upd(wlc);
+
+ /* init antenna selection */
+ brcms_c_antsel_init(wlc->asi);
+
+}
+
+/* formula: IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */
+static int
+brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
+ bool writeToShm)
+{
+ int idle_busy_ratio_x_16 = 0;
+ uint offset =
+ isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
+ M_TX_IDLE_BUSY_RATIO_X_16_CCK;
+ if (duty_cycle > 100 || duty_cycle < 0) {
+ wiphy_err(wlc->wiphy, "wl%d: duty cycle value off limit\n",
+ wlc->pub->unit);
+ return -EINVAL;
+ }
+ if (duty_cycle)
+ idle_busy_ratio_x_16 = (100 - duty_cycle) * 16 / duty_cycle;
+ /* Only write to shared memory when wl is up */
+ if (writeToShm)
+ brcms_b_write_shm(wlc->hw, offset, (u16) idle_busy_ratio_x_16);
+
+ if (isOFDM)
+ wlc->tx_duty_cycle_ofdm = (u16) duty_cycle;
+ else
+ wlc->tx_duty_cycle_cck = (u16) duty_cycle;
+
+ return 0;
+}
+
+/*
+ * Initialize the base precedence map for dequeueing
+ * from txq based on WME settings
+ */
+static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc)
+{
+ wlc->tx_prec_map = BRCMS_PREC_BMP_ALL;
+ memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
+
+ wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK;
+ wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE;
+ wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI;
+ wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO;
+}
+
+static void
+brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi, bool on, int prio)
+{
+ /* transmit flowcontrol is not yet implemented */
+}
+
+static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc)
+{
+ struct brcms_txq_info *qi;
+
+ for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
+ if (qi->stopped) {
+ brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
+ qi->stopped = 0;
+ }
+ }
+}
+
+/* push sw hps and wake state through hardware */
+static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
+{
+ u32 v1, v2;
+ bool hps;
+ bool awake_before;
+
+ hps = brcms_c_ps_allowed(wlc);
+
+ BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
+
+ v1 = R_REG(&wlc->regs->maccontrol);
+ v2 = MCTL_WAKE;
+ if (hps)
+ v2 |= MCTL_HPS;
+
+ brcms_b_mctrl(wlc->hw, MCTL_WAKE | MCTL_HPS, v2);
+
+ awake_before = ((v1 & MCTL_WAKE) || ((v1 & MCTL_HPS) == 0));
+
+ if (!awake_before)
+ brcms_b_wait_for_wake(wlc->hw);
+}
+
+/*
+ * Write this BSS config's MAC address to core.
+ * Updates RXE match engine.
+ */
+static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
+{
+ int err = 0;
+ struct brcms_c_info *wlc = bsscfg->wlc;
+
+ /* enter the MAC addr into the RXE match registers */
+ brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, bsscfg->cur_etheraddr);
+
+ brcms_c_ampdu_macaddr_upd(wlc);
+
+ return err;
+}
+
+/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).
+ * Updates RXE match engine.
+ */
+static void brcms_c_set_bssid(struct brcms_bss_cfg *bsscfg)
+{
+ /* we need to update BSSID in RXE match registers */
+ brcms_c_set_addrmatch(bsscfg->wlc, RCM_BSSID_OFFSET, bsscfg->BSSID);
+}
+
+static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
+{
+ wlc_hw->shortslot = shortslot;
+
+ if (wlc_hw->band->bandtype == BRCM_BAND_2G && wlc_hw->up) {
+ brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
+ brcms_b_update_slot_timing(wlc_hw, shortslot);
+ brcms_c_enable_mac(wlc_hw->wlc);
+ }
+}
+
+/*
+ * Suspend the the MAC and update the slot timing
+ * for standard 11b/g (20us slots) or shortslot 11g (9us slots).
+ */
+static void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot)
+{
+ /* use the override if it is set */
+ if (wlc->shortslot_override != BRCMS_SHORTSLOT_AUTO)
+ shortslot = (wlc->shortslot_override == BRCMS_SHORTSLOT_ON);
+
+ if (wlc->shortslot == shortslot)
+ return;
+
+ wlc->shortslot = shortslot;
+
+ brcms_b_set_shortslot(wlc->hw, shortslot);
+}
+
+static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
+{
+ if (wlc->home_chanspec != chanspec) {
+ wlc->home_chanspec = chanspec;
+
+ if (wlc->bsscfg->associated)
+ wlc->bsscfg->current_bss->chanspec = chanspec;
+ }
+}
+
+void
+brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+ bool mute, struct txpwr_limits *txpwr)
+{
+ uint bandunit;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: 0x%x\n", wlc_hw->unit, chanspec);
+
+ wlc_hw->chanspec = chanspec;
+
+ /* Switch bands if necessary */
+ if (wlc_hw->_nbands > 1) {
+ bandunit = chspec_bandunit(chanspec);
+ if (wlc_hw->band->bandunit != bandunit) {
+ /* brcms_b_setband disables other bandunit,
+ * use light band switch if not up yet
+ */
+ if (wlc_hw->up) {
+ wlc_phy_chanspec_radio_set(wlc_hw->
+ bandstate[bandunit]->
+ pi, chanspec);
+ brcms_b_setband(wlc_hw, bandunit, chanspec);
+ } else {
+ brcms_c_setxband(wlc_hw, bandunit);
+ }
+ }
+ }
+
+ wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
+
+ if (!wlc_hw->up) {
+ if (wlc_hw->clk)
+ wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr,
+ chanspec);
+ wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
+ } else {
+ wlc_phy_chanspec_set(wlc_hw->band->pi, chanspec);
+ wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
+
+ /* Update muting of the channel */
+ brcms_b_mute(wlc_hw, mute, 0);
+ }
+}
+
+/* switch to and initialize new band */
+static void brcms_c_setband(struct brcms_c_info *wlc,
+ uint bandunit)
+{
+ wlc->band = wlc->bandstate[bandunit];
+
+ if (!wlc->pub->up)
+ return;
+
+ /* wait for at least one beacon before entering sleeping state */
+ brcms_c_set_ps_ctrl(wlc);
+
+ /* band-specific initializations */
+ brcms_c_bsinit(wlc);
+}
+
+static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
+{
+ uint bandunit;
+ bool switchband = false;
+ u16 old_chanspec = wlc->chanspec;
+
+ if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n",
+ wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
+ return;
+ }
+
+ /* Switch bands if necessary */
+ if (wlc->pub->_nbands > 1) {
+ bandunit = chspec_bandunit(chanspec);
+ if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
+ switchband = true;
+ if (wlc->bandlocked) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: chspec %d "
+ "band is locked!\n",
+ wlc->pub->unit, __func__,
+ CHSPEC_CHANNEL(chanspec));
+ return;
+ }
+ /*
+ * should the setband call come after the
+ * brcms_b_chanspec() ? if the setband updates
+ * (brcms_c_bsinit) use low level calls to inspect and
+ * set state, the state inspected may be from the wrong
+ * band, or the following brcms_b_set_chanspec() may
+ * undo the work.
+ */
+ brcms_c_setband(wlc, bandunit);
+ }
+ }
+
+ /* sync up phy/radio chanspec */
+ brcms_c_set_phy_chanspec(wlc, chanspec);
+
+ /* init antenna selection */
+ if (brcms_chspec_bw(old_chanspec) != brcms_chspec_bw(chanspec)) {
+ brcms_c_antsel_init(wlc->asi);
+
+ /* Fix the hardware rateset based on bw.
+ * Mainly add MCS32 for 40Mhz, remove MCS 32 for 20Mhz
+ */
+ brcms_c_rateset_bw_mcs_filter(&wlc->band->hw_rateset,
+ wlc->band->mimo_cap_40 ? brcms_chspec_bw(chanspec) : 0);
+ }
+
+ /* update some mac configuration since chanspec changed */
+ brcms_c_ucode_mac_upd(wlc);
+}
+
+/*
+ * This function changes the phytxctl for beacon based on current
+ * beacon ratespec AND txant setting as per this table:
+ * ratespec CCK ant = wlc->stf->txant
+ * OFDM ant = 3
+ */
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
+ u32 bcn_rspec)
+{
+ u16 phyctl;
+ u16 phytxant = wlc->stf->phytxant;
+ u16 mask = PHY_TXC_ANT_MASK;
+
+ /* for non-siso rates or default setting, use the available chains */
+ if (BRCMS_PHY_11N_CAP(wlc->band))
+ phytxant = brcms_c_stf_phytxchain_sel(wlc, bcn_rspec);
+
+ phyctl = brcms_b_read_shm(wlc->hw, M_BCN_PCTLWD);
+ phyctl = (phyctl & ~mask) | phytxant;
+ brcms_b_write_shm(wlc->hw, M_BCN_PCTLWD, phyctl);
+}
+
+/*
+ * centralized protection config change function to simplify debugging, no
+ * consistency checking this should be called only on changes to avoid overhead
+ * in periodic function
+ */
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
+{
+ BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
+
+ switch (idx) {
+ case BRCMS_PROT_G_SPEC:
+ wlc->protection->_g = (bool) val;
+ break;
+ case BRCMS_PROT_G_OVR:
+ wlc->protection->g_override = (s8) val;
+ break;
+ case BRCMS_PROT_G_USER:
+ wlc->protection->gmode_user = (u8) val;
+ break;
+ case BRCMS_PROT_OVERLAP:
+ wlc->protection->overlap = (s8) val;
+ break;
+ case BRCMS_PROT_N_USER:
+ wlc->protection->nmode_user = (s8) val;
+ break;
+ case BRCMS_PROT_N_CFG:
+ wlc->protection->n_cfg = (s8) val;
+ break;
+ case BRCMS_PROT_N_CFG_OVR:
+ wlc->protection->n_cfg_override = (s8) val;
+ break;
+ case BRCMS_PROT_N_NONGF:
+ wlc->protection->nongf = (bool) val;
+ break;
+ case BRCMS_PROT_N_NONGF_OVR:
+ wlc->protection->nongf_override = (s8) val;
+ break;
+ case BRCMS_PROT_N_PAM_OVR:
+ wlc->protection->n_pam_override = (s8) val;
+ break;
+ case BRCMS_PROT_N_OBSS:
+ wlc->protection->n_obss = (bool) val;
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val)
+{
+ if (wlc->pub->up) {
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, true);
+ }
+}
+
+static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val)
+{
+ wlc->stf->ldpc = val;
+
+ if (wlc->pub->up) {
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, true);
+ wlc_phy_ldpc_override_set(wlc->band->pi, (val ? true : false));
+ }
+}
+
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+ const struct ieee80211_tx_queue_params *params,
+ bool suspend)
+{
+ int i;
+ struct shm_acparams acp_shm;
+ u16 *shm_entry;
+
+ /* Only apply params if the core is out of reset and has clocks */
+ if (!wlc->clk) {
+ wiphy_err(wlc->wiphy, "wl%d: %s : no-clock\n", wlc->pub->unit,
+ __func__);
+ return;
+ }
+
+ memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
+ /* fill in shm ac params struct */
+ acp_shm.txop = params->txop;
+ /* convert from units of 32us to us for ucode */
+ wlc->edcf_txop[aci & 0x3] = acp_shm.txop =
+ EDCF_TXOP2USEC(acp_shm.txop);
+ acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
+
+ if (aci == AC_VI && acp_shm.txop == 0
+ && acp_shm.aifs < EDCF_AIFSN_MAX)
+ acp_shm.aifs++;
+
+ if (acp_shm.aifs < EDCF_AIFSN_MIN
+ || acp_shm.aifs > EDCF_AIFSN_MAX) {
+ wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad "
+ "aifs %d\n", wlc->pub->unit, acp_shm.aifs);
+ } else {
+ acp_shm.cwmin = params->cw_min;
+ acp_shm.cwmax = params->cw_max;
+ acp_shm.cwcur = acp_shm.cwmin;
+ acp_shm.bslots =
+ R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+ acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
+ /* Indicate the new params to the ucode */
+ acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
+ wme_ac2fifo[aci] *
+ M_EDCF_QLEN +
+ M_EDCF_STATUS_OFF));
+ acp_shm.status |= WME_STATUS_NEWAC;
+
+ /* Fill in shm acparam table */
+ shm_entry = (u16 *) &acp_shm;
+ for (i = 0; i < (int)sizeof(struct shm_acparams); i += 2)
+ brcms_b_write_shm(wlc->hw,
+ M_EDCF_QINFO +
+ wme_ac2fifo[aci] * M_EDCF_QLEN + i,
+ *shm_entry++);
+ }
+
+ if (suspend) {
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_enable_mac(wlc);
+ }
+}
+
+void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
+{
+ u16 aci;
+ int i_ac;
+ struct ieee80211_tx_queue_params txq_pars;
+ static const struct edcf_acparam default_edcf_acparams[] = {
+ {EDCF_AC_BE_ACI_STA, EDCF_AC_BE_ECW_STA, EDCF_AC_BE_TXOP_STA},
+ {EDCF_AC_BK_ACI_STA, EDCF_AC_BK_ECW_STA, EDCF_AC_BK_TXOP_STA},
+ {EDCF_AC_VI_ACI_STA, EDCF_AC_VI_ECW_STA, EDCF_AC_VI_TXOP_STA},
+ {EDCF_AC_VO_ACI_STA, EDCF_AC_VO_ECW_STA, EDCF_AC_VO_TXOP_STA}
+ }; /* ucode needs these parameters during its initialization */
+ const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
+
+ for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
+ /* find out which ac this set of params applies to */
+ aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
+
+ /* fill in shm ac params struct */
+ txq_pars.txop = edcf_acp->TXOP;
+ txq_pars.aifs = edcf_acp->ACI;
+
+ /* CWmin = 2^(ECWmin) - 1 */
+ txq_pars.cw_min = EDCF_ECW2CW(edcf_acp->ECW & EDCF_ECWMIN_MASK);
+ /* CWmax = 2^(ECWmax) - 1 */
+ txq_pars.cw_max = EDCF_ECW2CW((edcf_acp->ECW & EDCF_ECWMAX_MASK)
+ >> EDCF_ECWMAX_SHIFT);
+ brcms_c_wme_setparams(wlc, aci, &txq_pars, suspend);
+ }
+
+ if (suspend) {
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_enable_mac(wlc);
+ }
+}
+
+/* maintain LED behavior in down state */
+static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
+{
+ /*
+ * maintain LEDs while in down state, turn on sbclk if
+ * not available yet. Turn on sbclk if necessary
+ */
+ brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP);
+ brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP);
+}
+
+static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
+{
+ /* Don't start the timer if HWRADIO feature is disabled */
+ if (wlc->radio_monitor)
+ return;
+
+ wlc->radio_monitor = true;
+ brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_RADIO_MON);
+ brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
+}
+
+static void brcms_c_radio_disable(struct brcms_c_info *wlc)
+{
+ if (!wlc->pub->up) {
+ brcms_c_down_led_upd(wlc);
+ return;
+ }
+
+ brcms_c_radio_monitor_start(wlc);
+ brcms_down(wlc->wl);
+}
+
+static void brcms_c_radio_enable(struct brcms_c_info *wlc)
+{
+ if (wlc->pub->up)
+ return;
+
+ if (brcms_deviceremoved(wlc))
+ return;
+
+ brcms_up(wlc->wl);
+}
+
+static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
+{
+ if (!wlc->radio_monitor)
+ return true;
+
+ wlc->radio_monitor = false;
+ brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_RADIO_MON);
+ return brcms_del_timer(wlc->radio_timer);
+}
+
+/* read hwdisable state and propagate to wlc flag */
+static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
+{
+ if (wlc->pub->hw_off)
+ return;
+
+ if (brcms_b_radio_read_hwdisabled(wlc->hw))
+ mboolset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
+ else
+ mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
+}
+
+/*
+ * centralized radio disable/enable function,
+ * invoke radio enable/disable after updating hwradio status
+ */
+static void brcms_c_radio_upd(struct brcms_c_info *wlc)
+{
+ if (wlc->pub->radio_disabled)
+ brcms_c_radio_disable(wlc);
+ else
+ brcms_c_radio_enable(wlc);
+}
+
+/* update hwradio status and return it */
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
+{
+ brcms_c_radio_hwdisable_upd(wlc);
+
+ return mboolisset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE) ?
+ true : false;
+}
+
+/* periodical query hw radio button while driver is "down" */
+static void brcms_c_radio_timer(void *arg)
+{
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
+
+ if (brcms_deviceremoved(wlc)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
+ __func__);
+ brcms_down(wlc->wl);
+ return;
+ }
+
+ /* cap mpc off count */
+ if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
+ wlc->mpc_offcnt++;
+
+ brcms_c_radio_hwdisable_upd(wlc);
+ brcms_c_radio_upd(wlc);
+}
+
+/* common low-level watchdog code */
+static void brcms_b_watchdog(void *arg)
+{
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
+ struct brcms_hardware *wlc_hw = wlc->hw;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ if (!wlc_hw->up)
+ return;
+
+ /* increment second count */
+ wlc_hw->now++;
+
+ /* Check for FIFO error interrupts */
+ brcms_b_fifoerrors(wlc_hw);
+
+ /* make sure RX dma has buffers */
+ dma_rxfill(wlc->hw->di[RX_FIFO]);
+
+ wlc_phy_watchdog(wlc_hw->band->pi);
+}
+
+static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
+{
+ bool mpc_radio, radio_state;
+
+ /*
+ * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
+ * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
+ * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
+ * the radio is going down.
+ */
+ if (!wlc->mpc) {
+ if (!wlc->pub->radio_disabled)
+ return;
+ mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
+ brcms_c_radio_upd(wlc);
+ if (!wlc->pub->radio_disabled)
+ brcms_c_radio_monitor_stop(wlc);
+ return;
+ }
+
+ /*
+ * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in
+ * wlc->pub->radio_disabled to go ON, always call radio_upd
+ * synchronously to go OFF, postpone radio_upd to later when
+ * context is safe(e.g. watchdog)
+ */
+ radio_state =
+ (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
+ ON);
+ mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
+
+ if (radio_state == ON && mpc_radio == OFF)
+ wlc->mpc_delay_off = wlc->mpc_dlycnt;
+ else if (radio_state == OFF && mpc_radio == ON) {
+ mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
+ brcms_c_radio_upd(wlc);
+ if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
+ wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
+ else
+ wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+ }
+ /*
+ * Below logic is meant to capture the transition from mpc off
+ * to mpc on for reasons other than wlc->mpc_delay_off keeping
+ * the mpc off. In that case reset wlc->mpc_delay_off to
+ * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
+ */
+ if ((wlc->prev_non_delay_mpc == false) &&
+ (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off)
+ wlc->mpc_delay_off = wlc->mpc_dlycnt;
+
+ wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
+}
+
+/* common watchdog code */
+static void brcms_c_watchdog(void *arg)
+{
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ if (!wlc->pub->up)
+ return;
+
+ if (brcms_deviceremoved(wlc)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
+ __func__);
+ brcms_down(wlc->wl);
+ return;
+ }
+
+ /* increment second count */
+ wlc->pub->now++;
+
+ /* delay radio disable */
+ if (wlc->mpc_delay_off) {
+ if (--wlc->mpc_delay_off == 0) {
+ mboolset(wlc->pub->radio_disabled,
+ WL_RADIO_MPC_DISABLE);
+ if (wlc->mpc && brcms_c_ismpc(wlc))
+ wlc->mpc_offcnt = 0;
+ }
+ }
+
+ /* mpc sync */
+ brcms_c_radio_mpc_upd(wlc);
+ /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
+ brcms_c_radio_hwdisable_upd(wlc);
+ brcms_c_radio_upd(wlc);
+ /* if radio is disable, driver may be down, quit here */
+ if (wlc->pub->radio_disabled)
+ return;
+
+ brcms_b_watchdog(wlc);
+
+ /*
+ * occasionally sample mac stat counters to
+ * detect 16-bit counter wrap
+ */
+ if ((wlc->pub->now % SW_TIMER_MAC_STAT_UPD) == 0)
+ brcms_c_statsupd(wlc);
+
+ if (BRCMS_ISNPHY(wlc->band) &&
+ ((wlc->pub->now - wlc->tempsense_lasttime) >=
+ BRCMS_TEMPSENSE_PERIOD)) {
+ wlc->tempsense_lasttime = wlc->pub->now;
+ brcms_c_tempsense_upd(wlc);
+ }
+}
+
+static void brcms_c_watchdog_by_timer(void *arg)
+{
+ brcms_c_watchdog(arg);
+}
+
+static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
+{
+ wlc->wdtimer = brcms_init_timer(wlc->wl, brcms_c_watchdog_by_timer,
+ wlc, "watchdog");
+ if (!wlc->wdtimer) {
+ wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for wdtimer "
+ "failed\n", unit);
+ goto fail;
+ }
+
+ wlc->radio_timer = brcms_init_timer(wlc->wl, brcms_c_radio_timer,
+ wlc, "radio");
+ if (!wlc->radio_timer) {
+ wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for radio_timer "
+ "failed\n", unit);
+ goto fail;
+ }
+
+ return true;
+
+ fail:
+ return false;
+}
+
+/*
+ * Initialize brcms_c_info default values ...
+ * may get overrides later in this function
+ */
+static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
+{
+ int i;
+
+ /* Save our copy of the chanspec */
+ wlc->chanspec = ch20mhz_chspec(1);
+
+ /* various 802.11g modes */
+ wlc->shortslot = false;
+ wlc->shortslot_override = BRCMS_SHORTSLOT_AUTO;
+
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_OVR, BRCMS_PROTECTION_AUTO);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_SPEC, false);
+
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG_OVR,
+ BRCMS_PROTECTION_AUTO);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG, BRCMS_N_PROTECTION_OFF);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF_OVR,
+ BRCMS_PROTECTION_AUTO);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF, false);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, AUTO);
+
+ brcms_c_protection_upd(wlc, BRCMS_PROT_OVERLAP,
+ BRCMS_PROTECTION_CTL_OVERLAP);
+
+ /* 802.11g draft 4.0 NonERP elt advertisement */
+ wlc->include_legacy_erp = true;
+
+ wlc->stf->ant_rx_ovr = ANT_RX_DIV_DEF;
+ wlc->stf->txant = ANT_TX_DEF;
+
+ wlc->prb_resp_timeout = BRCMS_PRB_RESP_TIMEOUT;
+
+ wlc->usr_fragthresh = DOT11_DEFAULT_FRAG_LEN;
+ for (i = 0; i < NFIFO; i++)
+ wlc->fragthresh[i] = DOT11_DEFAULT_FRAG_LEN;
+ wlc->RTSThresh = DOT11_DEFAULT_RTS_LEN;
+
+ /* default rate fallback retry limits */
+ wlc->SFBL = RETRY_SHORT_FB;
+ wlc->LFBL = RETRY_LONG_FB;
+
+ /* default mac retry limits */
+ wlc->SRL = RETRY_SHORT_DEF;
+ wlc->LRL = RETRY_LONG_DEF;
+
+ /* WME QoS mode is Auto by default */
+ wlc->pub->_ampdu = AMPDU_AGG_HOST;
+ wlc->pub->bcmerror = 0;
+
+ /* initialize mpc delay */
+ wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+}
+
+static uint brcms_c_attach_module(struct brcms_c_info *wlc)
+{
+ uint err = 0;
+ uint unit;
+ unit = wlc->pub->unit;
+
+ wlc->asi = brcms_c_antsel_attach(wlc);
+ if (wlc->asi == NULL) {
+ wiphy_err(wlc->wiphy, "wl%d: attach: antsel_attach "
+ "failed\n", unit);
+ err = 44;
+ goto fail;
+ }
+
+ wlc->ampdu = brcms_c_ampdu_attach(wlc);
+ if (wlc->ampdu == NULL) {
+ wiphy_err(wlc->wiphy, "wl%d: attach: ampdu_attach "
+ "failed\n", unit);
+ err = 50;
+ goto fail;
+ }
+
+ if ((brcms_c_stf_attach(wlc) != 0)) {
+ wiphy_err(wlc->wiphy, "wl%d: attach: stf_attach "
+ "failed\n", unit);
+ err = 68;
+ goto fail;
+ }
+ fail:
+ return err;
+}
+
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
+{
+ return wlc->pub;
+}
+
+/* low level attach
+ * run backplane attach, init nvram
+ * run phy attach
+ * initialize software state for each core and band
+ * put the whole chip in reset(driver down state), no clock
+ */
+static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
+ uint unit, bool piomode, void __iomem *regsva,
+ struct pci_dev *btparam)
+{
+ struct brcms_hardware *wlc_hw;
+ struct d11regs __iomem *regs;
+ char *macaddr = NULL;
+ uint err = 0;
+ uint j;
+ bool wme = false;
+ struct shared_phy_params sha_params;
+ struct wiphy *wiphy = wlc->wiphy;
+
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
+ device);
+
+ wme = true;
+
+ wlc_hw = wlc->hw;
+ wlc_hw->wlc = wlc;
+ wlc_hw->unit = unit;
+ wlc_hw->band = wlc_hw->bandstate[0];
+ wlc_hw->_piomode = piomode;
+
+ /* populate struct brcms_hardware with default values */
+ brcms_b_info_init(wlc_hw);
+
+ /*
+ * Do the hardware portion of the attach. Also initialize software
+ * state that depends on the particular hardware we are running.
+ */
+ wlc_hw->sih = ai_attach(regsva, btparam);
+ if (wlc_hw->sih == NULL) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
+ unit);
+ err = 11;
+ goto fail;
+ }
+
+ /* verify again the device is supported */
+ if (!brcms_c_chipmatch(vendor, device)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
+ "vendor/device (0x%x/0x%x)\n",
+ unit, vendor, device);
+ err = 12;
+ goto fail;
+ }
+
+ wlc_hw->vendorid = vendor;
+ wlc_hw->deviceid = device;
+
+ /* set bar0 window to point at D11 core */
+ wlc_hw->regs = (struct d11regs __iomem *)
+ ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
+ wlc_hw->corerev = ai_corerev(wlc_hw->sih);
+
+ regs = wlc_hw->regs;
+
+ wlc->regs = wlc_hw->regs;
+
+ /* validate chip, chiprev and corerev */
+ if (!brcms_c_isgoodchip(wlc_hw)) {
+ err = 13;
+ goto fail;
+ }
+
+ /* initialize power control registers */
+ ai_clkctl_init(wlc_hw->sih);
+
+ /* request fastclock and force fastclock for the rest of attach
+ * bring the d11 core out of reset.
+ * For PMU chips, the first wlc_clkctl_clk is no-op since core-clk
+ * is still false; But it will be called again inside wlc_corereset,
+ * after d11 is out of reset.
+ */
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
+
+ if (!brcms_b_validate_chip_access(wlc_hw)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: validate_chip_access "
+ "failed\n", unit);
+ err = 14;
+ goto fail;
+ }
+
+ /* get the board rev, used just below */
+ j = getintvar(wlc_hw->sih, BRCMS_SROM_BOARDREV);
+ /* promote srom boardrev of 0xFF to 1 */
+ if (j == BOARDREV_PROMOTABLE)
+ j = BOARDREV_PROMOTED;
+ wlc_hw->boardrev = (u16) j;
+ if (!brcms_c_validboardtype(wlc_hw)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
+ "board type (0x%x)" " or revision level (0x%x)\n",
+ unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+ err = 15;
+ goto fail;
+ }
+ wlc_hw->sromrev = (u8) getintvar(wlc_hw->sih, BRCMS_SROM_REV);
+ wlc_hw->boardflags = (u32) getintvar(wlc_hw->sih,
+ BRCMS_SROM_BOARDFLAGS);
+ wlc_hw->boardflags2 = (u32) getintvar(wlc_hw->sih,
+ BRCMS_SROM_BOARDFLAGS2);
+
+ if (wlc_hw->boardflags & BFL_NOPLLDOWN)
+ brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED);
+
+ /* check device id(srom, nvram etc.) to set bands */
+ if (wlc_hw->deviceid == BCM43224_D11N_ID ||
+ wlc_hw->deviceid == BCM43224_D11N_ID_VEN1)
+ /* Dualband boards */
+ wlc_hw->_nbands = 2;
+ else
+ wlc_hw->_nbands = 1;
+
+ if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ wlc_hw->_nbands = 1;
+
+ /* BMAC_NOTE: remove init of pub values when brcms_c_attach()
+ * unconditionally does the init of these values
+ */
+ wlc->vendorid = wlc_hw->vendorid;
+ wlc->deviceid = wlc_hw->deviceid;
+ wlc->pub->sih = wlc_hw->sih;
+ wlc->pub->corerev = wlc_hw->corerev;
+ wlc->pub->sromrev = wlc_hw->sromrev;
+ wlc->pub->boardrev = wlc_hw->boardrev;
+ wlc->pub->boardflags = wlc_hw->boardflags;
+ wlc->pub->boardflags2 = wlc_hw->boardflags2;
+ wlc->pub->_nbands = wlc_hw->_nbands;
+
+ wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc);
+
+ if (wlc_hw->physhim == NULL) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_shim_attach "
+ "failed\n", unit);
+ err = 25;
+ goto fail;
+ }
+
+ /* pass all the parameters to wlc_phy_shared_attach in one struct */
+ sha_params.sih = wlc_hw->sih;
+ sha_params.physhim = wlc_hw->physhim;
+ sha_params.unit = unit;
+ sha_params.corerev = wlc_hw->corerev;
+ sha_params.vid = wlc_hw->vendorid;
+ sha_params.did = wlc_hw->deviceid;
+ sha_params.chip = wlc_hw->sih->chip;
+ sha_params.chiprev = wlc_hw->sih->chiprev;
+ sha_params.chippkg = wlc_hw->sih->chippkg;
+ sha_params.sromrev = wlc_hw->sromrev;
+ sha_params.boardtype = wlc_hw->sih->boardtype;
+ sha_params.boardrev = wlc_hw->boardrev;
+ sha_params.boardvendor = wlc_hw->sih->boardvendor;
+ sha_params.boardflags = wlc_hw->boardflags;
+ sha_params.boardflags2 = wlc_hw->boardflags2;
+ sha_params.buscorerev = wlc_hw->sih->buscorerev;
+
+ /* alloc and save pointer to shared phy state area */
+ wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
+ if (!wlc_hw->phy_sh) {
+ err = 16;
+ goto fail;
+ }
+
+ /* initialize software state for each core and band */
+ for (j = 0; j < wlc_hw->_nbands; j++) {
+ /*
+ * band0 is always 2.4Ghz
+ * band1, if present, is 5Ghz
+ */
+
+ brcms_c_setxband(wlc_hw, j);
+
+ wlc_hw->band->bandunit = j;
+ wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
+ wlc->band->bandunit = j;
+ wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
+ wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+
+ wlc_hw->machwcap = R_REG(&regs->machwcap);
+ wlc_hw->machwcap_backup = wlc_hw->machwcap;
+
+ /* init tx fifo size */
+ wlc_hw->xmtfifo_sz =
+ xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
+
+ /* Get a phy for this band */
+ wlc_hw->band->pi =
+ wlc_phy_attach(wlc_hw->phy_sh, regs,
+ wlc_hw->band->bandtype,
+ wlc->wiphy);
+ if (wlc_hw->band->pi == NULL) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_"
+ "attach failed\n", unit);
+ err = 17;
+ goto fail;
+ }
+
+ wlc_phy_machwcap_set(wlc_hw->band->pi, wlc_hw->machwcap);
+
+ wlc_phy_get_phyversion(wlc_hw->band->pi, &wlc_hw->band->phytype,
+ &wlc_hw->band->phyrev,
+ &wlc_hw->band->radioid,
+ &wlc_hw->band->radiorev);
+ wlc_hw->band->abgphy_encore =
+ wlc_phy_get_encore(wlc_hw->band->pi);
+ wlc->band->abgphy_encore = wlc_phy_get_encore(wlc_hw->band->pi);
+ wlc_hw->band->core_flags =
+ wlc_phy_get_coreflags(wlc_hw->band->pi);
+
+ /* verify good phy_type & supported phy revision */
+ if (BRCMS_ISNPHY(wlc_hw->band)) {
+ if (NCONF_HAS(wlc_hw->band->phyrev))
+ goto good_phy;
+ else
+ goto bad_phy;
+ } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
+ if (LCNCONF_HAS(wlc_hw->band->phyrev))
+ goto good_phy;
+ else
+ goto bad_phy;
+ } else {
+ bad_phy:
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: unsupported "
+ "phy type/rev (%d/%d)\n", unit,
+ wlc_hw->band->phytype, wlc_hw->band->phyrev);
+ err = 18;
+ goto fail;
+ }
+
+ good_phy:
+ /*
+ * BMAC_NOTE: wlc->band->pi should not be set below and should
+ * be done in the high level attach. However we can not make
+ * that change until all low level access is changed to
+ * wlc_hw->band->pi. Instead do the wlc->band->pi init below,
+ * keeping wlc_hw->band->pi as well for incremental update of
+ * low level fns, and cut over low only init when all fns
+ * updated.
+ */
+ wlc->band->pi = wlc_hw->band->pi;
+ wlc->band->phytype = wlc_hw->band->phytype;
+ wlc->band->phyrev = wlc_hw->band->phyrev;
+ wlc->band->radioid = wlc_hw->band->radioid;
+ wlc->band->radiorev = wlc_hw->band->radiorev;
+
+ /* default contention windows size limits */
+ wlc_hw->band->CWmin = APHY_CWMIN;
+ wlc_hw->band->CWmax = PHY_CWMAX;
+
+ if (!brcms_b_attach_dmapio(wlc, j, wme)) {
+ err = 19;
+ goto fail;
+ }
+ }
+
+ /* disable core to match driver "down" state */
+ brcms_c_coredisable(wlc_hw);
+
+ /* Match driver "down" state */
+ ai_pci_down(wlc_hw->sih);
+
+ /* register sb interrupt callback functions */
+ ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
+ (void *)brcms_c_wlintrsrestore, NULL, wlc);
+
+ /* turn off pll and xtal to match driver "down" state */
+ brcms_b_xtal(wlc_hw, OFF);
+
+ /* *******************************************************************
+ * The hardware is in the DOWN state at this point. D11 core
+ * or cores are in reset with clocks off, and the board PLLs
+ * are off if possible.
+ *
+ * Beyond this point, wlc->sbclk == false and chip registers
+ * should not be touched.
+ *********************************************************************
+ */
+
+ /* init etheraddr state variables */
+ macaddr = brcms_c_get_macaddr(wlc_hw);
+ if (macaddr == NULL) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: macaddr not found\n",
+ unit);
+ err = 21;
+ goto fail;
+ }
+ if (!mac_pton(macaddr, wlc_hw->etheraddr) ||
+ is_broadcast_ether_addr(wlc_hw->etheraddr) ||
+ is_zero_ether_addr(wlc_hw->etheraddr)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr %s\n",
+ unit, macaddr);
+ err = 22;
+ goto fail;
+ }
+
+ BCMMSG(wlc->wiphy,
+ "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+ wlc_hw->deviceid, wlc_hw->_nbands,
+ wlc_hw->sih->boardtype, macaddr);
+
+ return err;
+
+ fail:
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: failed with err %d\n", unit,
+ err);
+ return err;
+}
+
+static void brcms_c_attach_antgain_init(struct brcms_c_info *wlc)
+{
+ uint unit;
+ unit = wlc->pub->unit;
+
+ if ((wlc->band->antgain == -1) && (wlc->pub->sromrev == 1)) {
+ /* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */
+ wlc->band->antgain = 8;
+ } else if (wlc->band->antgain == -1) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
+ " srom, using 2dB\n", unit, __func__);
+ wlc->band->antgain = 8;
+ } else {
+ s8 gain, fract;
+ /* Older sroms specified gain in whole dbm only. In order
+ * be able to specify qdbm granularity and remain backward
+ * compatible the whole dbms are now encoded in only
+ * low 6 bits and remaining qdbms are encoded in the hi 2 bits.
+ * 6 bit signed number ranges from -32 - 31.
+ *
+ * Examples:
+ * 0x1 = 1 db,
+ * 0xc1 = 1.75 db (1 + 3 quarters),
+ * 0x3f = -1 (-1 + 0 quarters),
+ * 0x7f = -.75 (-1 + 1 quarters) = -3 qdbm.
+ * 0xbf = -.50 (-1 + 2 quarters) = -2 qdbm.
+ */
+ gain = wlc->band->antgain & 0x3f;
+ gain <<= 2; /* Sign extend */
+ gain >>= 2;
+ fract = (wlc->band->antgain & 0xc0) >> 6;
+ wlc->band->antgain = 4 * gain + fract;
+ }
+}
+
+static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
+{
+ int aa;
+ uint unit;
+ int bandtype;
+ struct si_pub *sih = wlc->hw->sih;
+
+ unit = wlc->pub->unit;
+ bandtype = wlc->band->bandtype;
+
+ /* get antennas available */
+ if (bandtype == BRCM_BAND_5G)
+ aa = (s8) getintvar(sih, BRCMS_SROM_AA5G);
+ else
+ aa = (s8) getintvar(sih, BRCMS_SROM_AA2G);
+
+ if ((aa < 1) || (aa > 15)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
+ " srom (0x%x), using 3\n", unit, __func__, aa);
+ aa = 3;
+ }
+
+ /* reset the defaults if we have a single antenna */
+ if (aa == 1) {
+ wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_0;
+ wlc->stf->txant = ANT_TX_FORCE_0;
+ } else if (aa == 2) {
+ wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_1;
+ wlc->stf->txant = ANT_TX_FORCE_1;
+ } else {
+ }
+
+ /* Compute Antenna Gain */
+ if (bandtype == BRCM_BAND_5G)
+ wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG1);
+ else
+ wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG0);
+
+ brcms_c_attach_antgain_init(wlc);
+
+ return true;
+}
+
+static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
+{
+ u16 chanspec;
+ struct brcms_band *band;
+ struct brcms_bss_info *bi = wlc->default_bss;
+
+ /* init default and target BSS with some sane initial values */
+ memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
+ bi->beacon_period = BEACON_INTERVAL_DEFAULT;
+
+ /* fill the default channel as the first valid channel
+ * starting from the 2G channels
+ */
+ chanspec = ch20mhz_chspec(1);
+ wlc->home_chanspec = bi->chanspec = chanspec;
+
+ /* find the band of our default channel */
+ band = wlc->band;
+ if (wlc->pub->_nbands > 1 &&
+ band->bandunit != chspec_bandunit(chanspec))
+ band = wlc->bandstate[OTHERBANDUNIT(wlc)];
+
+ /* init bss rates to the band specific default rate set */
+ brcms_c_rateset_default(&bi->rateset, NULL, band->phytype,
+ band->bandtype, false, BRCMS_RATE_MASK_FULL,
+ (bool) (wlc->pub->_n_enab & SUPPORT_11N),
+ brcms_chspec_bw(chanspec), wlc->stf->txstreams);
+
+ if (wlc->pub->_n_enab & SUPPORT_11N)
+ bi->flags |= BRCMS_BSS_HT;
+}
+
+static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc)
+{
+ struct brcms_txq_info *qi, *p;
+
+ qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC);
+ if (qi != NULL) {
+ /*
+ * Have enough room for control packets along with HI watermark
+ * Also, add room to txq for total psq packets if all the SCBs
+ * leave PS mode. The watermark for flowcontrol to OS packets
+ * will remain the same
+ */
+ brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT,
+ 2 * BRCMS_DATAHIWAT + PKTQ_LEN_DEFAULT);
+
+ /* add this queue to the the global list */
+ p = wlc->tx_queues;
+ if (p == NULL) {
+ wlc->tx_queues = qi;
+ } else {
+ while (p->next != NULL)
+ p = p->next;
+ p->next = qi;
+ }
+ }
+ return qi;
+}
+
+static void brcms_c_txq_free(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi)
+{
+ struct brcms_txq_info *p;
+
+ if (qi == NULL)
+ return;
+
+ /* remove the queue from the linked list */
+ p = wlc->tx_queues;
+ if (p == qi)
+ wlc->tx_queues = p->next;
+ else {
+ while (p != NULL && p->next != qi)
+ p = p->next;
+ if (p != NULL)
+ p->next = p->next->next;
+ }
+
+ kfree(qi);
+}
+
+static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
+{
+ uint i;
+ struct brcms_band *band;
+
+ for (i = 0; i < wlc->pub->_nbands; i++) {
+ band = wlc->bandstate[i];
+ if (band->bandtype == BRCM_BAND_5G) {
+ if ((bwcap == BRCMS_N_BW_40ALL)
+ || (bwcap == BRCMS_N_BW_20IN2G_40IN5G))
+ band->mimo_cap_40 = true;
+ else
+ band->mimo_cap_40 = false;
+ } else {
+ if (bwcap == BRCMS_N_BW_40ALL)
+ band->mimo_cap_40 = true;
+ else
+ band->mimo_cap_40 = false;
+ }
+ }
+}
+
+static void brcms_c_timers_deinit(struct brcms_c_info *wlc)
+{
+ /* free timer state */
+ if (wlc->wdtimer) {
+ brcms_free_timer(wlc->wdtimer);
+ wlc->wdtimer = NULL;
+ }
+ if (wlc->radio_timer) {
+ brcms_free_timer(wlc->radio_timer);
+ wlc->radio_timer = NULL;
+ }
+}
+
+static void brcms_c_detach_module(struct brcms_c_info *wlc)
+{
+ if (wlc->asi) {
+ brcms_c_antsel_detach(wlc->asi);
+ wlc->asi = NULL;
+ }
+
+ if (wlc->ampdu) {
+ brcms_c_ampdu_detach(wlc->ampdu);
+ wlc->ampdu = NULL;
+ }
+
+ brcms_c_stf_detach(wlc);
+}
+
+/*
+ * low level detach
+ */
+static int brcms_b_detach(struct brcms_c_info *wlc)
+{
+ uint i;
+ struct brcms_hw_band *band;
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ int callbacks;
+
+ callbacks = 0;
+
+ if (wlc_hw->sih) {
+ /*
+ * detach interrupt sync mechanism since interrupt is disabled
+ * and per-port interrupt object may has been freed. this must
+ * be done before sb core switch
+ */
+ ai_deregister_intr_callback(wlc_hw->sih);
+ ai_pci_sleep(wlc_hw->sih);
+ }
+
+ brcms_b_detach_dmapio(wlc_hw);
+
+ band = wlc_hw->band;
+ for (i = 0; i < wlc_hw->_nbands; i++) {
+ if (band->pi) {
+ /* Detach this band's phy */
+ wlc_phy_detach(band->pi);
+ band->pi = NULL;
+ }
+ band = wlc_hw->bandstate[OTHERBANDUNIT(wlc)];
+ }
+
+ /* Free shared phy state */
+ kfree(wlc_hw->phy_sh);
+
+ wlc_phy_shim_detach(wlc_hw->physhim);
+
+ if (wlc_hw->sih) {
+ ai_detach(wlc_hw->sih);
+ wlc_hw->sih = NULL;
+ }
+
+ return callbacks;
+
+}
+
+/*
+ * Return a count of the number of driver callbacks still pending.
+ *
+ * General policy is that brcms_c_detach can only dealloc/free software states.
+ * It can NOT touch hardware registers since the d11core may be in reset and
+ * clock may not be available.
+ * One exception is sb register access, which is possible if crystal is turned
+ * on after "down" state, driver should avoid software timer with the exception
+ * of radio_monitor.
+ */
+uint brcms_c_detach(struct brcms_c_info *wlc)
+{
+ uint callbacks = 0;
+
+ if (wlc == NULL)
+ return 0;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ callbacks += brcms_b_detach(wlc);
+
+ /* delete software timers */
+ if (!brcms_c_radio_monitor_stop(wlc))
+ callbacks++;
+
+ brcms_c_channel_mgr_detach(wlc->cmi);
+
+ brcms_c_timers_deinit(wlc);
+
+ brcms_c_detach_module(wlc);
+
+
+ while (wlc->tx_queues != NULL)
+ brcms_c_txq_free(wlc, wlc->tx_queues);
+
+ brcms_c_detach_mfree(wlc);
+ return callbacks;
+}
+
+/* update state that depends on the current value of "ap" */
+static void brcms_c_ap_upd(struct brcms_c_info *wlc)
+{
+ /* STA-BSS; short capable */
+ wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
+
+ /* fixup mpc */
+ wlc->mpc = true;
+}
+
+/* Initialize just the hardware when coming out of POR or S3/S5 system states */
+static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
+{
+ if (wlc_hw->wlc->pub->hw_up)
+ return;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ /*
+ * Enable pll and xtal, initialize the power control registers,
+ * and force fastclock for the remainder of brcms_c_up().
+ */
+ brcms_b_xtal(wlc_hw, ON);
+ ai_clkctl_init(wlc_hw->sih);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+
+ ai_pci_fixcfg(wlc_hw->sih);
+
+ /*
+ * AI chip doesn't restore bar0win2 on
+ * hibernation/resume, need sw fixup
+ */
+ if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ wlc_hw->regs = (struct d11regs __iomem *)
+ ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
+
+ /*
+ * Inform phy that a POR reset has occurred so
+ * it does a complete phy init
+ */
+ wlc_phy_por_inform(wlc_hw->band->pi);
+
+ wlc_hw->ucode_loaded = false;
+ wlc_hw->wlc->pub->hw_up = true;
+
+ if ((wlc_hw->boardflags & BFL_FEM)
+ && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+ if (!
+ (wlc_hw->boardrev >= 0x1250
+ && (wlc_hw->boardflags & BFL_FEM_BT)))
+ ai_epa_4313war(wlc_hw->sih);
+ }
+}
+
+static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
+{
+ uint coremask;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ /*
+ * Enable pll and xtal, initialize the power control registers,
+ * and force fastclock for the remainder of brcms_c_up().
+ */
+ brcms_b_xtal(wlc_hw, ON);
+ ai_clkctl_init(wlc_hw->sih);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+
+ /*
+ * Configure pci/pcmcia here instead of in brcms_c_attach()
+ * to allow mfg hotswap: down, hotswap (chip power cycle), up.
+ */
+ coremask = (1 << wlc_hw->wlc->core->coreidx);
+
+ ai_pci_setup(wlc_hw->sih, coremask);
+
+ /*
+ * Need to read the hwradio status here to cover the case where the
+ * system is loaded with the hw radio disabled. We do not want to
+ * bring the driver up in this case.
+ */
+ if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
+ /* put SB PCI in down state again */
+ ai_pci_down(wlc_hw->sih);
+ brcms_b_xtal(wlc_hw, OFF);
+ return -ENOMEDIUM;
+ }
+
+ ai_pci_up(wlc_hw->sih);
+
+ /* reset the d11 core */
+ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
+
+ return 0;
+}
+
+static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
+{
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ wlc_hw->up = true;
+ wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
+
+ /* FULLY enable dynamic power control and d11 core interrupt */
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_intrson(wlc_hw->wlc->wl);
+ return 0;
+}
+
+/*
+ * Write WME tunable parameters for retransmit/max rate
+ * from wlc struct to ucode
+ */
+static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
+{
+ int ac;
+
+ /* Need clock to do this */
+ if (!wlc->clk)
+ return;
+
+ for (ac = 0; ac < AC_COUNT; ac++)
+ brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
+ wlc->wme_retries[ac]);
+}
+
+/* make interface operational */
+int brcms_c_up(struct brcms_c_info *wlc)
+{
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ /* HW is turned off so don't try to access it */
+ if (wlc->pub->hw_off || brcms_deviceremoved(wlc))
+ return -ENOMEDIUM;
+
+ if (!wlc->pub->hw_up) {
+ brcms_b_hw_up(wlc->hw);
+ wlc->pub->hw_up = true;
+ }
+
+ if ((wlc->pub->boardflags & BFL_FEM)
+ && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+ if (wlc->pub->boardrev >= 0x1250
+ && (wlc->pub->boardflags & BFL_FEM_BT))
+ brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
+ MHF5_4313_GPIOCTRL, BRCM_BAND_ALL);
+ else
+ brcms_b_mhf(wlc->hw, MHF4, MHF4_EXTPA_ENABLE,
+ MHF4_EXTPA_ENABLE, BRCM_BAND_ALL);
+ }
+
+ /*
+ * Need to read the hwradio status here to cover the case where the
+ * system is loaded with the hw radio disabled. We do not want to bring
+ * the driver up in this case. If radio is disabled, abort up, lower
+ * power, start radio timer and return 0(for NDIS) don't call
+ * radio_update to avoid looping brcms_c_up.
+ *
+ * brcms_b_up_prep() returns either 0 or -BCME_RADIOOFF only
+ */
+ if (!wlc->pub->radio_disabled) {
+ int status = brcms_b_up_prep(wlc->hw);
+ if (status == -ENOMEDIUM) {
+ if (!mboolisset
+ (wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE)) {
+ struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
+ mboolset(wlc->pub->radio_disabled,
+ WL_RADIO_HW_DISABLE);
+
+ if (bsscfg->enable && bsscfg->BSS)
+ wiphy_err(wlc->wiphy, "wl%d: up"
+ ": rfdisable -> "
+ "bsscfg_disable()\n",
+ wlc->pub->unit);
+ }
+ }
+ }
+
+ if (wlc->pub->radio_disabled) {
+ brcms_c_radio_monitor_start(wlc);
+ return 0;
+ }
+
+ /* brcms_b_up_prep has done brcms_c_corereset(). so clk is on, set it */
+ wlc->clk = true;
+
+ brcms_c_radio_monitor_stop(wlc);
+
+ /* Set EDCF hostflags */
+ brcms_b_mhf(wlc->hw, MHF1, MHF1_EDCF, MHF1_EDCF, BRCM_BAND_ALL);
+
+ brcms_init(wlc->wl);
+ wlc->pub->up = true;
+
+ if (wlc->bandinit_pending) {
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec);
+ wlc->bandinit_pending = false;
+ brcms_c_enable_mac(wlc);
+ }
+
+ brcms_b_up_finish(wlc->hw);
+
+ /* Program the TX wme params with the current settings */
+ brcms_c_wme_retries_write(wlc);
+
+ /* start one second watchdog timer */
+ brcms_add_timer(wlc->wdtimer, TIMER_INTERVAL_WATCHDOG, true);
+ wlc->WDarmed = true;
+
+ /* ensure antenna config is up to date */
+ brcms_c_stf_phy_txant_upd(wlc);
+ /* ensure LDPC config is in sync */
+ brcms_c_ht_update_ldpc(wlc, wlc->stf->ldpc);
+
+ return 0;
+}
+
+static uint brcms_c_down_del_timer(struct brcms_c_info *wlc)
+{
+ uint callbacks = 0;
+
+ return callbacks;
+}
+
+static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
+{
+ bool dev_gone;
+ uint callbacks = 0;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ if (!wlc_hw->up)
+ return callbacks;
+
+ dev_gone = brcms_deviceremoved(wlc_hw->wlc);
+
+ /* disable interrupts */
+ if (dev_gone)
+ wlc_hw->wlc->macintmask = 0;
+ else {
+ /* now disable interrupts */
+ brcms_intrsoff(wlc_hw->wlc->wl);
+
+ /* ensure we're running on the pll clock again */
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ }
+ /* down phy at the last of this stage */
+ callbacks += wlc_phy_down(wlc_hw->band->pi);
+
+ return callbacks;
+}
+
+static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
+{
+ uint callbacks = 0;
+ bool dev_gone;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+
+ if (!wlc_hw->up)
+ return callbacks;
+
+ wlc_hw->up = false;
+ wlc_phy_hw_state_upd(wlc_hw->band->pi, false);
+
+ dev_gone = brcms_deviceremoved(wlc_hw->wlc);
+
+ if (dev_gone) {
+ wlc_hw->sbclk = false;
+ wlc_hw->clk = false;
+ wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
+
+ /* reclaim any posted packets */
+ brcms_c_flushqueues(wlc_hw->wlc);
+ } else {
+
+ /* Reset and disable the core */
+ if (ai_iscoreup(wlc_hw->sih)) {
+ if (R_REG(&wlc_hw->regs->maccontrol) &
+ MCTL_EN_MAC)
+ brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
+ callbacks += brcms_reset(wlc_hw->wlc->wl);
+ brcms_c_coredisable(wlc_hw);
+ }
+
+ /* turn off primary xtal and pll */
+ if (!wlc_hw->noreset) {
+ ai_pci_down(wlc_hw->sih);
+ brcms_b_xtal(wlc_hw, OFF);
+ }
+ }
+
+ return callbacks;
+}
+
+/*
+ * Mark the interface nonoperational, stop the software mechanisms,
+ * disable the hardware, free any transient buffer state.
+ * Return a count of the number of driver callbacks still pending.
+ */
+uint brcms_c_down(struct brcms_c_info *wlc)
+{
+
+ uint callbacks = 0;
+ int i;
+ bool dev_gone = false;
+ struct brcms_txq_info *qi;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ /* check if we are already in the going down path */
+ if (wlc->going_down) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Driver going down so return"
+ "\n", wlc->pub->unit, __func__);
+ return 0;
+ }
+ if (!wlc->pub->up)
+ return callbacks;
+
+ /* in between, mpc could try to bring down again.. */
+ wlc->going_down = true;
+
+ callbacks += brcms_b_bmac_down_prep(wlc->hw);
+
+ dev_gone = brcms_deviceremoved(wlc);
+
+ /* Call any registered down handlers */
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
+ if (wlc->modulecb[i].down_fn)
+ callbacks +=
+ wlc->modulecb[i].down_fn(wlc->modulecb[i].hdl);
+ }
+
+ /* cancel the watchdog timer */
+ if (wlc->WDarmed) {
+ if (!brcms_del_timer(wlc->wdtimer))
+ callbacks++;
+ wlc->WDarmed = false;
+ }
+ /* cancel all other timers */
+ callbacks += brcms_c_down_del_timer(wlc);
+
+ wlc->pub->up = false;
+
+ wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
+
+ /* clear txq flow control */
+ brcms_c_txflowcontrol_reset(wlc);
+
+ /* flush tx queues */
+ for (qi = wlc->tx_queues; qi != NULL; qi = qi->next)
+ brcmu_pktq_flush(&qi->q, true, NULL, NULL);
+
+ callbacks += brcms_b_down_finish(wlc->hw);
+
+ /* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
+ wlc->clk = false;
+
+ wlc->going_down = false;
+ return callbacks;
+}
+
+/* Set the current gmode configuration */
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
+{
+ int ret = 0;
+ uint i;
+ struct brcms_c_rateset rs;
+ /* Default to 54g Auto */
+ /* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
+ s8 shortslot = BRCMS_SHORTSLOT_AUTO;
+ bool shortslot_restrict = false; /* Restrict association to stations
+ * that support shortslot
+ */
+ bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */
+ /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
+ int preamble = BRCMS_PLCP_LONG;
+ bool preamble_restrict = false; /* Restrict association to stations
+ * that support short preambles
+ */
+ struct brcms_band *band;
+
+ /* if N-support is enabled, allow Gmode set as long as requested
+ * Gmode is not GMODE_LEGACY_B
+ */
+ if ((wlc->pub->_n_enab & SUPPORT_11N) && gmode == GMODE_LEGACY_B)
+ return -ENOTSUPP;
+
+ /* verify that we are dealing with 2G band and grab the band pointer */
+ if (wlc->band->bandtype == BRCM_BAND_2G)
+ band = wlc->band;
+ else if ((wlc->pub->_nbands > 1) &&
+ (wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype == BRCM_BAND_2G))
+ band = wlc->bandstate[OTHERBANDUNIT(wlc)];
+ else
+ return -EINVAL;
+
+ /* Legacy or bust when no OFDM is supported by regulatory */
+ if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
+ BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
+ return -EINVAL;
+
+ /* update configuration value */
+ if (config == true)
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
+
+ /* Clear rateset override */
+ memset(&rs, 0, sizeof(struct brcms_c_rateset));
+
+ switch (gmode) {
+ case GMODE_LEGACY_B:
+ shortslot = BRCMS_SHORTSLOT_OFF;
+ brcms_c_rateset_copy(&gphy_legacy_rates, &rs);
+
+ break;
+
+ case GMODE_LRS:
+ break;
+
+ case GMODE_AUTO:
+ /* Accept defaults */
+ break;
+
+ case GMODE_ONLY:
+ ofdm_basic = true;
+ preamble = BRCMS_PLCP_SHORT;
+ preamble_restrict = true;
+ break;
+
+ case GMODE_PERFORMANCE:
+ shortslot = BRCMS_SHORTSLOT_ON;
+ shortslot_restrict = true;
+ ofdm_basic = true;
+ preamble = BRCMS_PLCP_SHORT;
+ preamble_restrict = true;
+ break;
+
+ default:
+ /* Error */
+ wiphy_err(wlc->wiphy, "wl%d: %s: invalid gmode %d\n",
+ wlc->pub->unit, __func__, gmode);
+ return -ENOTSUPP;
+ }
+
+ band->gmode = gmode;
+
+ wlc->shortslot_override = shortslot;
+
+ /* Use the default 11g rateset */
+ if (!rs.count)
+ brcms_c_rateset_copy(&cck_ofdm_rates, &rs);
+
+ if (ofdm_basic) {
+ for (i = 0; i < rs.count; i++) {
+ if (rs.rates[i] == BRCM_RATE_6M
+ || rs.rates[i] == BRCM_RATE_12M
+ || rs.rates[i] == BRCM_RATE_24M)
+ rs.rates[i] |= BRCMS_RATE_FLAG;
+ }
+ }
+
+ /* Set default bss rateset */
+ wlc->default_bss->rateset.count = rs.count;
+ memcpy(wlc->default_bss->rateset.rates, rs.rates,
+ sizeof(wlc->default_bss->rateset.rates));
+
+ return ret;
+}
+
+int brcms_c_set_nmode(struct brcms_c_info *wlc)
+{
+ uint i;
+ s32 nmode = AUTO;
+
+ if (wlc->stf->txstreams == WL_11N_3x3)
+ nmode = WL_11N_3x3;
+ else
+ nmode = WL_11N_2x2;
+
+ /* force GMODE_AUTO if NMODE is ON */
+ brcms_c_set_gmode(wlc, GMODE_AUTO, true);
+ if (nmode == WL_11N_3x3)
+ wlc->pub->_n_enab = SUPPORT_HT;
+ else
+ wlc->pub->_n_enab = SUPPORT_11N;
+ wlc->default_bss->flags |= BRCMS_BSS_HT;
+ /* add the mcs rates to the default and hw ratesets */
+ brcms_c_rateset_mcs_build(&wlc->default_bss->rateset,
+ wlc->stf->txstreams);
+ for (i = 0; i < wlc->pub->_nbands; i++)
+ memcpy(wlc->bandstate[i]->hw_rateset.mcs,
+ wlc->default_bss->rateset.mcs, MCSSET_LEN);
+
+ return 0;
+}
+
+static int
+brcms_c_set_internal_rateset(struct brcms_c_info *wlc,
+ struct brcms_c_rateset *rs_arg)
+{
+ struct brcms_c_rateset rs, new;
+ uint bandunit;
+
+ memcpy(&rs, rs_arg, sizeof(struct brcms_c_rateset));
+
+ /* check for bad count value */
+ if ((rs.count == 0) || (rs.count > BRCMS_NUMRATES))
+ return -EINVAL;
+
+ /* try the current band */
+ bandunit = wlc->band->bandunit;
+ memcpy(&new, &rs, sizeof(struct brcms_c_rateset));
+ if (brcms_c_rate_hwrs_filter_sort_validate
+ (&new, &wlc->bandstate[bandunit]->hw_rateset, true,
+ wlc->stf->txstreams))
+ goto good;
+
+ /* try the other band */
+ if (brcms_is_mband_unlocked(wlc)) {
+ bandunit = OTHERBANDUNIT(wlc);
+ memcpy(&new, &rs, sizeof(struct brcms_c_rateset));
+ if (brcms_c_rate_hwrs_filter_sort_validate(&new,
+ &wlc->
+ bandstate[bandunit]->
+ hw_rateset, true,
+ wlc->stf->txstreams))
+ goto good;
+ }
+
+ return -EBADE;
+
+ good:
+ /* apply new rateset */
+ memcpy(&wlc->default_bss->rateset, &new,
+ sizeof(struct brcms_c_rateset));
+ memcpy(&wlc->bandstate[bandunit]->defrateset, &new,
+ sizeof(struct brcms_c_rateset));
+ return 0;
+}
+
+static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
+{
+ u8 r;
+ bool war = false;
+
+ if (wlc->bsscfg->associated)
+ r = wlc->bsscfg->current_bss->rateset.rates[0];
+ else
+ r = wlc->default_bss->rateset.rates[0];
+
+ wlc_phy_ofdm_rateset_war(wlc->band->pi, war);
+}
+
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
+{
+ u16 chspec = ch20mhz_chspec(channel);
+
+ if (channel < 0 || channel > MAXCHANNEL)
+ return -EINVAL;
+
+ if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec))
+ return -EINVAL;
+
+
+ if (!wlc->pub->up && brcms_is_mband_unlocked(wlc)) {
+ if (wlc->band->bandunit != chspec_bandunit(chspec))
+ wlc->bandinit_pending = true;
+ else
+ wlc->bandinit_pending = false;
+ }
+
+ wlc->default_bss->chanspec = chspec;
+ /* brcms_c_BSSinit() will sanitize the rateset before
+ * using it.. */
+ if (wlc->pub->up && (wlc_phy_chanspec_get(wlc->band->pi) != chspec)) {
+ brcms_c_set_home_chanspec(wlc, chspec);
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_set_chanspec(wlc, chspec);
+ brcms_c_enable_mac(wlc);
+ }
+ return 0;
+}
+
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
+{
+ int ac;
+
+ if (srl < 1 || srl > RETRY_SHORT_MAX ||
+ lrl < 1 || lrl > RETRY_SHORT_MAX)
+ return -EINVAL;
+
+ wlc->SRL = srl;
+ wlc->LRL = lrl;
+
+ brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
+
+ for (ac = 0; ac < AC_COUNT; ac++) {
+ wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
+ EDCF_SHORT, wlc->SRL);
+ wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
+ EDCF_LONG, wlc->LRL);
+ }
+ brcms_c_wme_retries_write(wlc);
+
+ return 0;
+}
+
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+ struct brcm_rateset *currs)
+{
+ struct brcms_c_rateset *rs;
+
+ if (wlc->pub->associated)
+ rs = &wlc->bsscfg->current_bss->rateset;
+ else
+ rs = &wlc->default_bss->rateset;
+
+ /* Copy only legacy rateset section */
+ currs->count = rs->count;
+ memcpy(&currs->rates, &rs->rates, rs->count);
+}
+
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
+{
+ struct brcms_c_rateset internal_rs;
+ int bcmerror;
+
+ if (rs->count > BRCMS_NUMRATES)
+ return -ENOBUFS;
+
+ memset(&internal_rs, 0, sizeof(struct brcms_c_rateset));
+
+ /* Copy only legacy rateset section */
+ internal_rs.count = rs->count;
+ memcpy(&internal_rs.rates, &rs->rates, internal_rs.count);
+
+ /* merge rateset coming in with the current mcsset */
+ if (wlc->pub->_n_enab & SUPPORT_11N) {
+ struct brcms_bss_info *mcsset_bss;
+ if (wlc->bsscfg->associated)
+ mcsset_bss = wlc->bsscfg->current_bss;
+ else
+ mcsset_bss = wlc->default_bss;
+ memcpy(internal_rs.mcs, &mcsset_bss->rateset.mcs[0],
+ MCSSET_LEN);
+ }
+
+ bcmerror = brcms_c_set_internal_rateset(wlc, &internal_rs);
+ if (!bcmerror)
+ brcms_c_ofdm_rateset_war(wlc);
+
+ return bcmerror;
+}
+
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
+{
+ if (period < DOT11_MIN_BEACON_PERIOD ||
+ period > DOT11_MAX_BEACON_PERIOD)
+ return -EINVAL;
+
+ wlc->default_bss->beacon_period = period;
+ return 0;
+}
+
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx)
+{
+ return wlc->band->phytype;
+}
+
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc, s8 sslot_override)
+{
+ wlc->shortslot_override = sslot_override;
+
+ /*
+ * shortslot is an 11g feature, so no more work if we are
+ * currently on the 5G band
+ */
+ if (wlc->band->bandtype == BRCM_BAND_5G)
+ return;
+
+ if (wlc->pub->up && wlc->pub->associated) {
+ /* let watchdog or beacon processing update shortslot */
+ } else if (wlc->pub->up) {
+ /* unassociated shortslot is off */
+ brcms_c_switch_shortslot(wlc, false);
+ } else {
+ /* driver is down, so just update the brcms_c_info
+ * value */
+ if (wlc->shortslot_override == BRCMS_SHORTSLOT_AUTO)
+ wlc->shortslot = false;
+ else
+ wlc->shortslot =
+ (wlc->shortslot_override ==
+ BRCMS_SHORTSLOT_ON);
+ }
+}
+
+/*
+ * register watchdog and down handlers.
+ */
+int brcms_c_module_register(struct brcms_pub *pub,
+ const char *name, struct brcms_info *hdl,
+ int (*d_fn)(void *handle))
+{
+ struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
+ int i;
+
+ /* find an empty entry and just add, no duplication check! */
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
+ if (wlc->modulecb[i].name[0] == '\0') {
+ strncpy(wlc->modulecb[i].name, name,
+ sizeof(wlc->modulecb[i].name) - 1);
+ wlc->modulecb[i].hdl = hdl;
+ wlc->modulecb[i].down_fn = d_fn;
+ return 0;
+ }
+ }
+
+ return -ENOSR;
+}
+
+/* unregister module callbacks */
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl)
+{
+ struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
+ int i;
+
+ if (wlc == NULL)
+ return -ENODATA;
+
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
+ if (!strcmp(wlc->modulecb[i].name, name) &&
+ (wlc->modulecb[i].hdl == hdl)) {
+ memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
+ return 0;
+ }
+ }
+
+ /* table not found! */
+ return -ENODATA;
+}
+
+#ifdef BCMDBG
+static const char * const supr_reason[] = {
+ "None", "PMQ Entry", "Flush request",
+ "Previous frag failure", "Channel mismatch",
+ "Lifetime Expiry", "Underflow"
+};
+
+static void brcms_c_print_txs_status(u16 s)
+{
+ printk(KERN_DEBUG "[15:12] %d frame attempts\n",
+ (s & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT);
+ printk(KERN_DEBUG " [11:8] %d rts attempts\n",
+ (s & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT);
+ printk(KERN_DEBUG " [7] %d PM mode indicated\n",
+ ((s & TX_STATUS_PMINDCTD) ? 1 : 0));
+ printk(KERN_DEBUG " [6] %d intermediate status\n",
+ ((s & TX_STATUS_INTERMEDIATE) ? 1 : 0));
+ printk(KERN_DEBUG " [5] %d AMPDU\n",
+ (s & TX_STATUS_AMPDU) ? 1 : 0);
+ printk(KERN_DEBUG " [4:2] %d Frame Suppressed Reason (%s)\n",
+ ((s & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT),
+ supr_reason[(s & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT]);
+ printk(KERN_DEBUG " [1] %d acked\n",
+ ((s & TX_STATUS_ACK_RCV) ? 1 : 0));
+}
+#endif /* BCMDBG */
+
+void brcms_c_print_txstatus(struct tx_status *txs)
+{
+#if defined(BCMDBG)
+ u16 s = txs->status;
+ u16 ackphyrxsh = txs->ackphyrxsh;
+
+ printk(KERN_DEBUG "\ntxpkt (MPDU) Complete\n");
+
+ printk(KERN_DEBUG "FrameID: %04x ", txs->frameid);
+ printk(KERN_DEBUG "TxStatus: %04x", s);
+ printk(KERN_DEBUG "\n");
+
+ brcms_c_print_txs_status(s);
+
+ printk(KERN_DEBUG "LastTxTime: %04x ", txs->lasttxtime);
+ printk(KERN_DEBUG "Seq: %04x ", txs->sequence);
+ printk(KERN_DEBUG "PHYTxStatus: %04x ", txs->phyerr);
+ printk(KERN_DEBUG "RxAckRSSI: %04x ",
+ (ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT);
+ printk(KERN_DEBUG "RxAckSQ: %04x",
+ (ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
+ printk(KERN_DEBUG "\n");
+#endif /* defined(BCMDBG) */
+}
+
+bool brcms_c_chipmatch(u16 vendor, u16 device)
+{
+ if (vendor != PCI_VENDOR_ID_BROADCOM) {
+ pr_err("chipmatch: unknown vendor id %04x\n", vendor);
+ return false;
+ }
+
+ if (device == BCM43224_D11N_ID_VEN1)
+ return true;
+ if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
+ return true;
+ if (device == BCM4313_D11N2G_ID)
+ return true;
+ if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
+ return true;
+
+ pr_err("chipmatch: unknown device id %04x\n", device);
+ return false;
+}
+
+#if defined(BCMDBG)
+void brcms_c_print_txdesc(struct d11txh *txh)
+{
+ u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
+ u16 mtch = le16_to_cpu(txh->MacTxControlHigh);
+ u16 mfc = le16_to_cpu(txh->MacFrameControl);
+ u16 tfest = le16_to_cpu(txh->TxFesTimeNormal);
+ u16 ptcw = le16_to_cpu(txh->PhyTxControlWord);
+ u16 ptcw_1 = le16_to_cpu(txh->PhyTxControlWord_1);
+ u16 ptcw_1_Fbr = le16_to_cpu(txh->PhyTxControlWord_1_Fbr);
+ u16 ptcw_1_Rts = le16_to_cpu(txh->PhyTxControlWord_1_Rts);
+ u16 ptcw_1_FbrRts = le16_to_cpu(txh->PhyTxControlWord_1_FbrRts);
+ u16 mainrates = le16_to_cpu(txh->MainRates);
+ u16 xtraft = le16_to_cpu(txh->XtraFrameTypes);
+ u8 *iv = txh->IV;
+ u8 *ra = txh->TxFrameRA;
+ u16 tfestfb = le16_to_cpu(txh->TxFesTimeFallback);
+ u8 *rtspfb = txh->RTSPLCPFallback;
+ u16 rtsdfb = le16_to_cpu(txh->RTSDurFallback);
+ u8 *fragpfb = txh->FragPLCPFallback;
+ u16 fragdfb = le16_to_cpu(txh->FragDurFallback);
+ u16 mmodelen = le16_to_cpu(txh->MModeLen);
+ u16 mmodefbrlen = le16_to_cpu(txh->MModeFbrLen);
+ u16 tfid = le16_to_cpu(txh->TxFrameID);
+ u16 txs = le16_to_cpu(txh->TxStatus);
+ u16 mnmpdu = le16_to_cpu(txh->MaxNMpdus);
+ u16 mabyte = le16_to_cpu(txh->MaxABytes_MRT);
+ u16 mabyte_f = le16_to_cpu(txh->MaxABytes_FBR);
+ u16 mmbyte = le16_to_cpu(txh->MinMBytes);
+
+ u8 *rtsph = txh->RTSPhyHeader;
+ struct ieee80211_rts rts = txh->rts_frame;
+ char hexbuf[256];
+
+ /* add plcp header along with txh descriptor */
+ printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ txh, sizeof(struct d11txh) + 48);
+
+ printk(KERN_DEBUG "TxCtlLow: %04x ", mtcl);
+ printk(KERN_DEBUG "TxCtlHigh: %04x ", mtch);
+ printk(KERN_DEBUG "FC: %04x ", mfc);
+ printk(KERN_DEBUG "FES Time: %04x\n", tfest);
+ printk(KERN_DEBUG "PhyCtl: %04x%s ", ptcw,
+ (ptcw & PHY_TXC_SHORT_HDR) ? " short" : "");
+ printk(KERN_DEBUG "PhyCtl_1: %04x ", ptcw_1);
+ printk(KERN_DEBUG "PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
+ printk(KERN_DEBUG "PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
+ printk(KERN_DEBUG "PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
+ printk(KERN_DEBUG "MainRates: %04x ", mainrates);
+ printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
+ printk(KERN_DEBUG "\n");
+
+ brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
+ printk(KERN_DEBUG "SecIV: %s\n", hexbuf);
+ brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
+ printk(KERN_DEBUG "RA: %s\n", hexbuf);
+
+ printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
+ brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
+ printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+ printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
+ brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
+ printk(KERN_DEBUG "PLCP: %s ", hexbuf);
+ printk(KERN_DEBUG "DUR: %04x", fragdfb);
+ printk(KERN_DEBUG "\n");
+
+ printk(KERN_DEBUG "MModeLen: %04x ", mmodelen);
+ printk(KERN_DEBUG "MModeFbrLen: %04x\n", mmodefbrlen);
+
+ printk(KERN_DEBUG "FrameID: %04x\n", tfid);
+ printk(KERN_DEBUG "TxStatus: %04x\n", txs);
+
+ printk(KERN_DEBUG "MaxNumMpdu: %04x\n", mnmpdu);
+ printk(KERN_DEBUG "MaxAggbyte: %04x\n", mabyte);
+ printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
+ printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
+
+ brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
+ printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+ brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
+ printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
+ printk(KERN_DEBUG "\n");
+}
+#endif /* defined(BCMDBG) */
+
+#if defined(BCMDBG)
+int
+brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
+ int len)
+{
+ int i;
+ char *p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ u32 bit;
+ const char *name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
+ len -= slen;
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len < 2)
+ p -= 2 - len; /* overwrite last char */
+ p += snprintf(p, 2, ">");
+ }
+
+ return (int)(p - buf);
+}
+#endif /* defined(BCMDBG) */
+
+#if defined(BCMDBG)
+void brcms_c_print_rxh(struct d11rxhdr *rxh)
+{
+ u16 len = rxh->RxFrameSize;
+ u16 phystatus_0 = rxh->PhyRxStatus_0;
+ u16 phystatus_1 = rxh->PhyRxStatus_1;
+ u16 phystatus_2 = rxh->PhyRxStatus_2;
+ u16 phystatus_3 = rxh->PhyRxStatus_3;
+ u16 macstatus1 = rxh->RxStatus1;
+ u16 macstatus2 = rxh->RxStatus2;
+ char flagstr[64];
+ char lenbuf[20];
+ static const struct brcms_c_bit_desc macstat_flags[] = {
+ {RXS_FCSERR, "FCSErr"},
+ {RXS_RESPFRAMETX, "Reply"},
+ {RXS_PBPRES, "PADDING"},
+ {RXS_DECATMPT, "DeCr"},
+ {RXS_DECERR, "DeCrErr"},
+ {RXS_BCNSENT, "Bcn"},
+ {0, NULL}
+ };
+
+ printk(KERN_DEBUG "Raw RxDesc:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, rxh,
+ sizeof(struct d11rxhdr));
+
+ brcms_c_format_flags(macstat_flags, macstatus1, flagstr, 64);
+
+ snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
+
+ printk(KERN_DEBUG "RxFrameSize: %6s (%d)%s\n", lenbuf, len,
+ (rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : "");
+ printk(KERN_DEBUG "RxPHYStatus: %04x %04x %04x %04x\n",
+ phystatus_0, phystatus_1, phystatus_2, phystatus_3);
+ printk(KERN_DEBUG "RxMACStatus: %x %s\n", macstatus1, flagstr);
+ printk(KERN_DEBUG "RXMACaggtype: %x\n",
+ (macstatus2 & RXS_AGGTYPE_MASK));
+ printk(KERN_DEBUG "RxTSFTime: %04x\n", rxh->RxTSFTime);
+}
+#endif /* defined(BCMDBG) */
+
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
+{
+ u16 table_ptr;
+ u8 phy_rate, index;
+
+ /* get the phy specific rate encoding for the PLCP SIGNAL field */
+ if (is_ofdm_rate(rate))
+ table_ptr = M_RT_DIRMAP_A;
+ else
+ table_ptr = M_RT_DIRMAP_B;
+
+ /* for a given rate, the LS-nibble of the PLCP SIGNAL field is
+ * the index into the rate table.
+ */
+ phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
+ index = phy_rate & 0xf;
+
+ /* Find the SHM pointer to the rate table entry by looking in the
+ * Direct-map Table
+ */
+ return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
+}
+
+static bool
+brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
+ struct sk_buff *pkt, int prec, bool head)
+{
+ struct sk_buff *p;
+ int eprec = -1; /* precedence to evict from */
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = brcmu_pktq_peek_tail(q, &eprec);
+ if (eprec > prec) {
+ wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d"
+ "\n", __func__, eprec, prec);
+ return false;
+ }
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ bool discard_oldest;
+
+ discard_oldest = ac_bitmap_tst(0, eprec);
+
+ /* Refuse newer packet unless configured to discard oldest */
+ if (eprec == prec && !discard_oldest) {
+ wiphy_err(wlc->wiphy, "%s: No where to go, prec == %d"
+ "\n", __func__, prec);
+ return false;
+ }
+
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
+ brcmu_pktq_pdeq_tail(q, eprec);
+ brcmu_pkt_buf_free_skb(p);
+ }
+
+ /* Enqueue */
+ if (head)
+ p = brcmu_pktq_penq_head(q, prec, pkt);
+ else
+ p = brcmu_pktq_penq(q, prec, pkt);
+
+ return true;
+}
+
+/*
+ * Attempts to queue a packet onto a multiple-precedence queue,
+ * if necessary evicting a lower precedence packet from the queue.
+ *
+ * 'prec' is the precedence number that has already been mapped
+ * from the packet priority.
+ *
+ * Returns true if packet consumed (queued), false if not.
+ */
+static bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q,
+ struct sk_buff *pkt, int prec)
+{
+ return brcms_c_prec_enq_head(wlc, q, pkt, prec, false);
+}
+
+void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
+ struct sk_buff *sdu, uint prec)
+{
+ struct brcms_txq_info *qi = wlc->pkt_queue; /* Check me */
+ struct pktq *q = &qi->q;
+ int prio;
+
+ prio = sdu->priority;
+
+ if (!brcms_c_prec_enq(wlc, q, sdu, prec)) {
+ /*
+ * we might hit this condtion in case
+ * packet flooding from mac80211 stack
+ */
+ brcmu_pkt_buf_free_skb(sdu);
+ }
+}
+
+/*
+ * bcmc_fid_generate:
+ * Generate frame ID for a BCMC packet. The frag field is not used
+ * for MC frames so is used as part of the sequence number.
+ */
+static inline u16
+bcmc_fid_generate(struct brcms_c_info *wlc, struct brcms_bss_cfg *bsscfg,
+ struct d11txh *txh)
+{
+ u16 frameid;
+
+ frameid = le16_to_cpu(txh->TxFrameID) & ~(TXFID_SEQ_MASK |
+ TXFID_QUEUE_MASK);
+ frameid |=
+ (((wlc->
+ mc_fid_counter++) << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
+ TX_BCMC_FIFO;
+
+ return frameid;
+}
+
+static uint
+brcms_c_calc_ack_time(struct brcms_c_info *wlc, u32 rspec,
+ u8 preamble_type)
+{
+ uint dur = 0;
+
+ BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d\n",
+ wlc->pub->unit, rspec, preamble_type);
+ /*
+ * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
+ * is less than or equal to the rate of the immediately previous
+ * frame in the FES
+ */
+ rspec = brcms_basic_rate(wlc, rspec);
+ /* ACK frame len == 14 == 2(fc) + 2(dur) + 6(ra) + 4(fcs) */
+ dur =
+ brcms_c_calc_frame_time(wlc, rspec, preamble_type,
+ (DOT11_ACK_LEN + FCS_LEN));
+ return dur;
+}
+
+static uint
+brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec,
+ u8 preamble_type)
+{
+ BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n",
+ wlc->pub->unit, rspec, preamble_type);
+ return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
+}
+
+static uint
+brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec,
+ u8 preamble_type)
+{
+ BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, "
+ "preamble_type %d\n", wlc->pub->unit, rspec, preamble_type);
+ /*
+ * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
+ * is less than or equal to the rate of the immediately previous
+ * frame in the FES
+ */
+ rspec = brcms_basic_rate(wlc, rspec);
+ /* BA len == 32 == 16(ctl hdr) + 4(ba len) + 8(bitmap) + 4(fcs) */
+ return brcms_c_calc_frame_time(wlc, rspec, preamble_type,
+ (DOT11_BA_LEN + DOT11_BA_BITMAP_LEN +
+ FCS_LEN));
+}
+
+/* brcms_c_compute_frame_dur()
+ *
+ * Calculate the 802.11 MAC header DUR field for MPDU
+ * DUR for a single frame = 1 SIFS + 1 ACK
+ * DUR for a frame with following frags = 3 SIFS + 2 ACK + next frag time
+ *
+ * rate MPDU rate in unit of 500kbps
+ * next_frag_len next MPDU length in bytes
+ * preamble_type use short/GF or long/MM PLCP header
+ */
+static u16
+brcms_c_compute_frame_dur(struct brcms_c_info *wlc, u32 rate,
+ u8 preamble_type, uint next_frag_len)
+{
+ u16 dur, sifs;
+
+ sifs = get_sifs(wlc->band);
+
+ dur = sifs;
+ dur += (u16) brcms_c_calc_ack_time(wlc, rate, preamble_type);
+
+ if (next_frag_len) {
+ /* Double the current DUR to get 2 SIFS + 2 ACKs */
+ dur *= 2;
+ /* add another SIFS and the frag time */
+ dur += sifs;
+ dur +=
+ (u16) brcms_c_calc_frame_time(wlc, rate, preamble_type,
+ next_frag_len);
+ }
+ return dur;
+}
+
+/* The opposite of brcms_c_calc_frame_time */
+static uint
+brcms_c_calc_frame_len(struct brcms_c_info *wlc, u32 ratespec,
+ u8 preamble_type, uint dur)
+{
+ uint nsyms, mac_len, Ndps, kNdps;
+ uint rate = rspec2rate(ratespec);
+
+ BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, dur %d\n",
+ wlc->pub->unit, ratespec, preamble_type, dur);
+
+ if (is_mcs_rate(ratespec)) {
+ uint mcs = ratespec & RSPEC_RATE_MASK;
+ int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
+ dur -= PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
+ /* payload calculation matches that of regular ofdm */
+ if (wlc->band->bandtype == BRCM_BAND_2G)
+ dur -= DOT11_OFDM_SIGNAL_EXTENSION;
+ /* kNdbps = kbps * 4 */
+ kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec),
+ rspec_issgi(ratespec)) * 4;
+ nsyms = dur / APHY_SYMBOL_TIME;
+ mac_len =
+ ((nsyms * kNdps) -
+ ((APHY_SERVICE_NBITS + APHY_TAIL_NBITS) * 1000)) / 8000;
+ } else if (is_ofdm_rate(ratespec)) {
+ dur -= APHY_PREAMBLE_TIME;
+ dur -= APHY_SIGNAL_TIME;
+ /* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */
+ Ndps = rate * 2;
+ nsyms = dur / APHY_SYMBOL_TIME;
+ mac_len =
+ ((nsyms * Ndps) -
+ (APHY_SERVICE_NBITS + APHY_TAIL_NBITS)) / 8;
+ } else {
+ if (preamble_type & BRCMS_SHORT_PREAMBLE)
+ dur -= BPHY_PLCP_SHORT_TIME;
+ else
+ dur -= BPHY_PLCP_TIME;
+ mac_len = dur * rate;
+ /* divide out factor of 2 in rate (1/2 mbps) */
+ mac_len = mac_len / 8 / 2;
+ }
+ return mac_len;
+}
+
+/*
+ * Return true if the specified rate is supported by the specified band.
+ * BRCM_BAND_AUTO indicates the current band.
+ */
+static bool brcms_c_valid_rate(struct brcms_c_info *wlc, u32 rspec, int band,
+ bool verbose)
+{
+ struct brcms_c_rateset *hw_rateset;
+ uint i;
+
+ if ((band == BRCM_BAND_AUTO) || (band == wlc->band->bandtype))
+ hw_rateset = &wlc->band->hw_rateset;
+ else if (wlc->pub->_nbands > 1)
+ hw_rateset = &wlc->bandstate[OTHERBANDUNIT(wlc)]->hw_rateset;
+ else
+ /* other band specified and we are a single band device */
+ return false;
+
+ /* check if this is a mimo rate */
+ if (is_mcs_rate(rspec)) {
+ if ((rspec & RSPEC_RATE_MASK) >= MCS_TABLE_SIZE)
+ goto error;
+
+ return isset(hw_rateset->mcs, (rspec & RSPEC_RATE_MASK));
+ }
+
+ for (i = 0; i < hw_rateset->count; i++)
+ if (hw_rateset->rates[i] == rspec2rate(rspec))
+ return true;
+ error:
+ if (verbose)
+ wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x "
+ "not in hw_rateset\n", wlc->pub->unit, rspec);
+
+ return false;
+}
+
+static u32
+mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
+ u32 int_val)
+{
+ u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
+ u8 rate = int_val & NRATE_RATE_MASK;
+ u32 rspec;
+ bool ismcs = ((int_val & NRATE_MCS_INUSE) == NRATE_MCS_INUSE);
+ bool issgi = ((int_val & NRATE_SGI_MASK) >> NRATE_SGI_SHIFT);
+ bool override_mcs_only = ((int_val & NRATE_OVERRIDE_MCS_ONLY)
+ == NRATE_OVERRIDE_MCS_ONLY);
+ int bcmerror = 0;
+
+ if (!ismcs)
+ return (u32) rate;
+
+ /* validate the combination of rate/mcs/stf is allowed */
+ if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) {
+ /* mcs only allowed when nmode */
+ if (stf > PHY_TXC1_MODE_SDM) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n",
+ wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+
+ /* mcs 32 is a special case, DUP mode 40 only */
+ if (rate == 32) {
+ if (!CHSPEC_IS40(wlc->home_chanspec) ||
+ ((stf != PHY_TXC1_MODE_SISO)
+ && (stf != PHY_TXC1_MODE_CDD))) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs "
+ "32\n", wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+ /* mcs > 7 must use stf SDM */
+ } else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
+ /* mcs > 7 must use stf SDM */
+ if (stf != PHY_TXC1_MODE_SDM) {
+ BCMMSG(wlc->wiphy, "wl%d: enabling "
+ "SDM mode for mcs %d\n",
+ wlc->pub->unit, rate);
+ stf = PHY_TXC1_MODE_SDM;
+ }
+ } else {
+ /*
+ * MCS 0-7 may use SISO, CDD, and for
+ * phy_rev >= 3 STBC
+ */
+ if ((stf > PHY_TXC1_MODE_STBC) ||
+ (!BRCMS_STBC_CAP_PHY(wlc)
+ && (stf == PHY_TXC1_MODE_STBC))) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC"
+ "\n", wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+ }
+ } else if (is_ofdm_rate(rate)) {
+ if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n",
+ wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+ } else if (is_cck_rate(rate)) {
+ if ((cur_band->bandtype != BRCM_BAND_2G)
+ || (stf != PHY_TXC1_MODE_SISO)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n",
+ wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+ } else {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n",
+ wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+ /* make sure multiple antennae are available for non-siso rates */
+ if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO "
+ "request\n", wlc->pub->unit, __func__);
+ bcmerror = -EINVAL;
+ goto done;
+ }
+
+ rspec = rate;
+ if (ismcs) {
+ rspec |= RSPEC_MIMORATE;
+ /* For STBC populate the STC field of the ratespec */
+ if (stf == PHY_TXC1_MODE_STBC) {
+ u8 stc;
+ stc = 1; /* Nss for single stream is always 1 */
+ rspec |= (stc << RSPEC_STC_SHIFT);
+ }
+ }
+
+ rspec |= (stf << RSPEC_STF_SHIFT);
+
+ if (override_mcs_only)
+ rspec |= RSPEC_OVERRIDE_MCS_ONLY;
+
+ if (issgi)
+ rspec |= RSPEC_SHORT_GI;
+
+ if ((rate != 0)
+ && !brcms_c_valid_rate(wlc, rspec, cur_band->bandtype, true))
+ return rate;
+
+ return rspec;
+done:
+ return rate;
+}
+
+/*
+ * Compute PLCP, but only requires actual rate and length of pkt.
+ * Rate is given in the driver standard multiple of 500 kbps.
+ * le is set for 11 Mbps rate if necessary.
+ * Broken out for PRQ.
+ */
+
+static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500,
+ uint length, u8 *plcp)
+{
+ u16 usec = 0;
+ u8 le = 0;
+
+ switch (rate_500) {
+ case BRCM_RATE_1M:
+ usec = length << 3;
+ break;
+ case BRCM_RATE_2M:
+ usec = length << 2;
+ break;
+ case BRCM_RATE_5M5:
+ usec = (length << 4) / 11;
+ if ((length << 4) - (usec * 11) > 0)
+ usec++;
+ break;
+ case BRCM_RATE_11M:
+ usec = (length << 3) / 11;
+ if ((length << 3) - (usec * 11) > 0) {
+ usec++;
+ if ((usec * 11) - (length << 3) >= 8)
+ le = D11B_PLCP_SIGNAL_LE;
+ }
+ break;
+
+ default:
+ wiphy_err(wlc->wiphy,
+ "brcms_c_cck_plcp_set: unsupported rate %d\n",
+ rate_500);
+ rate_500 = BRCM_RATE_1M;
+ usec = length << 3;
+ break;
+ }
+ /* PLCP signal byte */
+ plcp[0] = rate_500 * 5; /* r (500kbps) * 5 == r (100kbps) */
+ /* PLCP service byte */
+ plcp[1] = (u8) (le | D11B_PLCP_SIGNAL_LOCKED);
+ /* PLCP length u16, little endian */
+ plcp[2] = usec & 0xff;
+ plcp[3] = (usec >> 8) & 0xff;
+ /* PLCP CRC16 */
+ plcp[4] = 0;
+ plcp[5] = 0;
+}
+
+/* Rate: 802.11 rate code, length: PSDU length in octets */
+static void brcms_c_compute_mimo_plcp(u32 rspec, uint length, u8 *plcp)
+{
+ u8 mcs = (u8) (rspec & RSPEC_RATE_MASK);
+ plcp[0] = mcs;
+ if (rspec_is40mhz(rspec) || (mcs == 32))
+ plcp[0] |= MIMO_PLCP_40MHZ;
+ BRCMS_SET_MIMO_PLCP_LEN(plcp, length);
+ plcp[3] = rspec_mimoplcp3(rspec); /* rspec already holds this byte */
+ plcp[3] |= 0x7; /* set smoothing, not sounding ppdu & reserved */
+ plcp[4] = 0; /* number of extension spatial streams bit 0 & 1 */
+ plcp[5] = 0;
+}
+
+/* Rate: 802.11 rate code, length: PSDU length in octets */
+static void
+brcms_c_compute_ofdm_plcp(u32 rspec, u32 length, u8 *plcp)
+{
+ u8 rate_signal;
+ u32 tmp = 0;
+ int rate = rspec2rate(rspec);
+
+ /*
+ * encode rate per 802.11a-1999 sec 17.3.4.1, with lsb
+ * transmitted first
+ */
+ rate_signal = rate_info[rate] & BRCMS_RATE_MASK;
+ memset(plcp, 0, D11_PHY_HDR_LEN);
+ D11A_PHY_HDR_SRATE((struct ofdm_phy_hdr *) plcp, rate_signal);
+
+ tmp = (length & 0xfff) << 5;
+ plcp[2] |= (tmp >> 16) & 0xff;
+ plcp[1] |= (tmp >> 8) & 0xff;
+ plcp[0] |= tmp & 0xff;
+}
+
+/* Rate: 802.11 rate code, length: PSDU length in octets */
+static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, u32 rspec,
+ uint length, u8 *plcp)
+{
+ int rate = rspec2rate(rspec);
+
+ brcms_c_cck_plcp_set(wlc, rate, length, plcp);
+}
+
+static void
+brcms_c_compute_plcp(struct brcms_c_info *wlc, u32 rspec,
+ uint length, u8 *plcp)
+{
+ if (is_mcs_rate(rspec))
+ brcms_c_compute_mimo_plcp(rspec, length, plcp);
+ else if (is_ofdm_rate(rspec))
+ brcms_c_compute_ofdm_plcp(rspec, length, plcp);
+ else
+ brcms_c_compute_cck_plcp(wlc, rspec, length, plcp);
+}
+
+/* brcms_c_compute_rtscts_dur()
+ *
+ * Calculate the 802.11 MAC header DUR field for an RTS or CTS frame
+ * DUR for normal RTS/CTS w/ frame = 3 SIFS + 1 CTS + next frame time + 1 ACK
+ * DUR for CTS-TO-SELF w/ frame = 2 SIFS + next frame time + 1 ACK
+ *
+ * cts cts-to-self or rts/cts
+ * rts_rate rts or cts rate in unit of 500kbps
+ * rate next MPDU rate in unit of 500kbps
+ * frame_len next MPDU frame length in bytes
+ */
+u16
+brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ u32 rts_rate,
+ u32 frame_rate, u8 rts_preamble_type,
+ u8 frame_preamble_type, uint frame_len, bool ba)
+{
+ u16 dur, sifs;
+
+ sifs = get_sifs(wlc->band);
+
+ if (!cts_only) {
+ /* RTS/CTS */
+ dur = 3 * sifs;
+ dur +=
+ (u16) brcms_c_calc_cts_time(wlc, rts_rate,
+ rts_preamble_type);
+ } else {
+ /* CTS-TO-SELF */
+ dur = 2 * sifs;
+ }
+
+ dur +=
+ (u16) brcms_c_calc_frame_time(wlc, frame_rate, frame_preamble_type,
+ frame_len);
+ if (ba)
+ dur +=
+ (u16) brcms_c_calc_ba_time(wlc, frame_rate,
+ BRCMS_SHORT_PREAMBLE);
+ else
+ dur +=
+ (u16) brcms_c_calc_ack_time(wlc, frame_rate,
+ frame_preamble_type);
+ return dur;
+}
+
+static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec)
+{
+ u16 phyctl1 = 0;
+ u16 bw;
+
+ if (BRCMS_ISLCNPHY(wlc->band)) {
+ bw = PHY_TXC1_BW_20MHZ;
+ } else {
+ bw = rspec_get_bw(rspec);
+ /* 10Mhz is not supported yet */
+ if (bw < PHY_TXC1_BW_20MHZ) {
+ wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is "
+ "not supported yet, set to 20L\n", bw);
+ bw = PHY_TXC1_BW_20MHZ;
+ }
+ }
+
+ if (is_mcs_rate(rspec)) {
+ uint mcs = rspec & RSPEC_RATE_MASK;
+
+ /* bw, stf, coding-type is part of rspec_phytxbyte2 returns */
+ phyctl1 = rspec_phytxbyte2(rspec);
+ /* set the upper byte of phyctl1 */
+ phyctl1 |= (mcs_table[mcs].tx_phy_ctl3 << 8);
+ } else if (is_cck_rate(rspec) && !BRCMS_ISLCNPHY(wlc->band)
+ && !BRCMS_ISSSLPNPHY(wlc->band)) {
+ /*
+ * In CCK mode LPPHY overloads OFDM Modulation bits with CCK
+ * Data Rate. Eventually MIMOPHY would also be converted to
+ * this format
+ */
+ /* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */
+ phyctl1 = (bw | (rspec_stf(rspec) << PHY_TXC1_MODE_SHIFT));
+ } else { /* legacy OFDM/CCK */
+ s16 phycfg;
+ /* get the phyctl byte from rate phycfg table */
+ phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec));
+ if (phycfg == -1) {
+ wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong "
+ "legacy OFDM/CCK rate\n");
+ phycfg = 0;
+ }
+ /* set the upper byte of phyctl1 */
+ phyctl1 =
+ (bw | (phycfg << 8) |
+ (rspec_stf(rspec) << PHY_TXC1_MODE_SHIFT));
+ }
+ return phyctl1;
+}
+
+/*
+ * Add struct d11txh, struct cck_phy_hdr.
+ *
+ * 'p' data must start with 802.11 MAC header
+ * 'p' must allow enough bytes of local headers to be "pushed" onto the packet
+ *
+ * headroom == D11_PHY_HDR_LEN + D11_TXH_LEN (D11_TXH_LEN is now 104 bytes)
+ *
+ */
+static u16
+brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
+ struct sk_buff *p, struct scb *scb, uint frag,
+ uint nfrags, uint queue, uint next_frag_len)
+{
+ struct ieee80211_hdr *h;
+ struct d11txh *txh;
+ u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN];
+ int len, phylen, rts_phylen;
+ u16 mch, phyctl, xfts, mainrates;
+ u16 seq = 0, mcl = 0, status = 0, frameid = 0;
+ u32 rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M };
+ u32 rts_rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M };
+ bool use_rts = false;
+ bool use_cts = false;
+ bool use_rifs = false;
+ bool short_preamble[2] = { false, false };
+ u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
+ u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
+ u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN];
+ struct ieee80211_rts *rts = NULL;
+ bool qos;
+ uint ac;
+ bool hwtkmic = false;
+ u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
+#define ANTCFG_NONE 0xFF
+ u8 antcfg = ANTCFG_NONE;
+ u8 fbantcfg = ANTCFG_NONE;
+ uint phyctl1_stf = 0;
+ u16 durid = 0;
+ struct ieee80211_tx_rate *txrate[2];
+ int k;
+ struct ieee80211_tx_info *tx_info;
+ bool is_mcs;
+ u16 mimo_txbw;
+ u8 mimo_preamble_type;
+
+ /* locate 802.11 MAC header */
+ h = (struct ieee80211_hdr *)(p->data);
+ qos = ieee80211_is_data_qos(h->frame_control);
+
+ /* compute length of frame in bytes for use in PLCP computations */
+ len = brcmu_pkttotlen(p);
+ phylen = len + FCS_LEN;
+
+ /* Get tx_info */
+ tx_info = IEEE80211_SKB_CB(p);
+
+ /* add PLCP */
+ plcp = skb_push(p, D11_PHY_HDR_LEN);
+
+ /* add Broadcom tx descriptor header */
+ txh = (struct d11txh *) skb_push(p, D11_TXH_LEN);
+ memset(txh, 0, D11_TXH_LEN);
+
+ /* setup frameid */
+ if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ /* non-AP STA should never use BCMC queue */
+ if (queue == TX_BCMC_FIFO) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == "
+ "TX_BCMC!\n", wlc->pub->unit, __func__);
+ frameid = bcmc_fid_generate(wlc, NULL, txh);
+ } else {
+ /* Increment the counter for first fragment */
+ if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ scb->seqnum[p->priority]++;
+
+ /* extract fragment number from frame first */
+ seq = le16_to_cpu(h->seq_ctrl) & FRAGNUM_MASK;
+ seq |= (scb->seqnum[p->priority] << SEQNUM_SHIFT);
+ h->seq_ctrl = cpu_to_le16(seq);
+
+ frameid = ((seq << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
+ (queue & TXFID_QUEUE_MASK);
+ }
+ }
+ frameid |= queue & TXFID_QUEUE_MASK;
+
+ /* set the ignpmq bit for all pkts tx'd in PS mode and for beacons */
+ if (ieee80211_is_beacon(h->frame_control))
+ mcl |= TXC_IGNOREPMQ;
+
+ txrate[0] = tx_info->control.rates;
+ txrate[1] = txrate[0] + 1;
+
+ /*
+ * if rate control algorithm didn't give us a fallback
+ * rate, use the primary rate
+ */
+ if (txrate[1]->idx < 0)
+ txrate[1] = txrate[0];
+
+ for (k = 0; k < hw->max_rates; k++) {
+ is_mcs = txrate[k]->flags & IEEE80211_TX_RC_MCS ? true : false;
+ if (!is_mcs) {
+ if ((txrate[k]->idx >= 0)
+ && (txrate[k]->idx <
+ hw->wiphy->bands[tx_info->band]->n_bitrates)) {
+ rspec[k] =
+ hw->wiphy->bands[tx_info->band]->
+ bitrates[txrate[k]->idx].hw_value;
+ short_preamble[k] =
+ txrate[k]->
+ flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ?
+ true : false;
+ } else {
+ rspec[k] = BRCM_RATE_1M;
+ }
+ } else {
+ rspec[k] = mac80211_wlc_set_nrate(wlc, wlc->band,
+ NRATE_MCS_INUSE | txrate[k]->idx);
+ }
+
+ /*
+ * Currently only support same setting for primay and
+ * fallback rates. Unify flags for each rate into a
+ * single value for the frame
+ */
+ use_rts |=
+ txrate[k]->
+ flags & IEEE80211_TX_RC_USE_RTS_CTS ? true : false;
+ use_cts |=
+ txrate[k]->
+ flags & IEEE80211_TX_RC_USE_CTS_PROTECT ? true : false;
+
+
+ /*
+ * (1) RATE:
+ * determine and validate primary rate
+ * and fallback rates
+ */
+ if (!rspec_active(rspec[k])) {
+ rspec[k] = BRCM_RATE_1M;
+ } else {
+ if (!is_multicast_ether_addr(h->addr1)) {
+ /* set tx antenna config */
+ brcms_c_antsel_antcfg_get(wlc->asi, false,
+ false, 0, 0, &antcfg, &fbantcfg);
+ }
+ }
+ }
+
+ phyctl1_stf = wlc->stf->ss_opmode;
+
+ if (wlc->pub->_n_enab & SUPPORT_11N) {
+ for (k = 0; k < hw->max_rates; k++) {
+ /*
+ * apply siso/cdd to single stream mcs's or ofdm
+ * if rspec is auto selected
+ */
+ if (((is_mcs_rate(rspec[k]) &&
+ is_single_stream(rspec[k] & RSPEC_RATE_MASK)) ||
+ is_ofdm_rate(rspec[k]))
+ && ((rspec[k] & RSPEC_OVERRIDE_MCS_ONLY)
+ || !(rspec[k] & RSPEC_OVERRIDE))) {
+ rspec[k] &= ~(RSPEC_STF_MASK | RSPEC_STC_MASK);
+
+ /* For SISO MCS use STBC if possible */
+ if (is_mcs_rate(rspec[k])
+ && BRCMS_STF_SS_STBC_TX(wlc, scb)) {
+ u8 stc;
+
+ /* Nss for single stream is always 1 */
+ stc = 1;
+ rspec[k] |= (PHY_TXC1_MODE_STBC <<
+ RSPEC_STF_SHIFT) |
+ (stc << RSPEC_STC_SHIFT);
+ } else
+ rspec[k] |=
+ (phyctl1_stf << RSPEC_STF_SHIFT);
+ }
+
+ /*
+ * Is the phy configured to use 40MHZ frames? If
+ * so then pick the desired txbw
+ */
+ if (brcms_chspec_bw(wlc->chanspec) == BRCMS_40_MHZ) {
+ /* default txbw is 20in40 SB */
+ mimo_ctlchbw = mimo_txbw =
+ CHSPEC_SB_UPPER(wlc_phy_chanspec_get(
+ wlc->band->pi))
+ ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
+
+ if (is_mcs_rate(rspec[k])) {
+ /* mcs 32 must be 40b/w DUP */
+ if ((rspec[k] & RSPEC_RATE_MASK)
+ == 32) {
+ mimo_txbw =
+ PHY_TXC1_BW_40MHZ_DUP;
+ /* use override */
+ } else if (wlc->mimo_40txbw != AUTO)
+ mimo_txbw = wlc->mimo_40txbw;
+ /* else check if dst is using 40 Mhz */
+ else if (scb->flags & SCB_IS40)
+ mimo_txbw = PHY_TXC1_BW_40MHZ;
+ } else if (is_ofdm_rate(rspec[k])) {
+ if (wlc->ofdm_40txbw != AUTO)
+ mimo_txbw = wlc->ofdm_40txbw;
+ } else if (wlc->cck_40txbw != AUTO) {
+ mimo_txbw = wlc->cck_40txbw;
+ }
+ } else {
+ /*
+ * mcs32 is 40 b/w only.
+ * This is possible for probe packets on
+ * a STA during SCAN
+ */
+ if ((rspec[k] & RSPEC_RATE_MASK) == 32)
+ /* mcs 0 */
+ rspec[k] = RSPEC_MIMORATE;
+
+ mimo_txbw = PHY_TXC1_BW_20MHZ;
+ }
+
+ /* Set channel width */
+ rspec[k] &= ~RSPEC_BW_MASK;
+ if ((k == 0) || ((k > 0) && is_mcs_rate(rspec[k])))
+ rspec[k] |= (mimo_txbw << RSPEC_BW_SHIFT);
+ else
+ rspec[k] |= (mimo_ctlchbw << RSPEC_BW_SHIFT);
+
+ /* Disable short GI, not supported yet */
+ rspec[k] &= ~RSPEC_SHORT_GI;
+
+ mimo_preamble_type = BRCMS_MM_PREAMBLE;
+ if (txrate[k]->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ mimo_preamble_type = BRCMS_GF_PREAMBLE;
+
+ if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
+ && (!is_mcs_rate(rspec[k]))) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_"
+ "RC_MCS != is_mcs_rate(rspec)\n",
+ wlc->pub->unit, __func__);
+ }
+
+ if (is_mcs_rate(rspec[k])) {
+ preamble_type[k] = mimo_preamble_type;
+
+ /*
+ * if SGI is selected, then forced mm
+ * for single stream
+ */
+ if ((rspec[k] & RSPEC_SHORT_GI)
+ && is_single_stream(rspec[k] &
+ RSPEC_RATE_MASK))
+ preamble_type[k] = BRCMS_MM_PREAMBLE;
+ }
+
+ /* should be better conditionalized */
+ if (!is_mcs_rate(rspec[0])
+ && (tx_info->control.rates[0].
+ flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
+ preamble_type[k] = BRCMS_SHORT_PREAMBLE;
+ }
+ } else {
+ for (k = 0; k < hw->max_rates; k++) {
+ /* Set ctrlchbw as 20Mhz */
+ rspec[k] &= ~RSPEC_BW_MASK;
+ rspec[k] |= (PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT);
+
+ /* for nphy, stf of ofdm frames must follow policies */
+ if (BRCMS_ISNPHY(wlc->band) && is_ofdm_rate(rspec[k])) {
+ rspec[k] &= ~RSPEC_STF_MASK;
+ rspec[k] |= phyctl1_stf << RSPEC_STF_SHIFT;
+ }
+ }
+ }
+
+ /* Reset these for use with AMPDU's */
+ txrate[0]->count = 0;
+ txrate[1]->count = 0;
+
+ /* (2) PROTECTION, may change rspec */
+ if ((ieee80211_is_data(h->frame_control) ||
+ ieee80211_is_mgmt(h->frame_control)) &&
+ (phylen > wlc->RTSThresh) && !is_multicast_ether_addr(h->addr1))
+ use_rts = true;
+
+ /* (3) PLCP: determine PLCP header and MAC duration,
+ * fill struct d11txh */
+ brcms_c_compute_plcp(wlc, rspec[0], phylen, plcp);
+ brcms_c_compute_plcp(wlc, rspec[1], phylen, plcp_fallback);
+ memcpy(&txh->FragPLCPFallback,
+ plcp_fallback, sizeof(txh->FragPLCPFallback));
+
+ /* Length field now put in CCK FBR CRC field */
+ if (is_cck_rate(rspec[1])) {
+ txh->FragPLCPFallback[4] = phylen & 0xff;
+ txh->FragPLCPFallback[5] = (phylen & 0xff00) >> 8;
+ }
+
+ /* MIMO-RATE: need validation ?? */
+ mainrates = is_ofdm_rate(rspec[0]) ?
+ D11A_PHY_HDR_GRATE((struct ofdm_phy_hdr *) plcp) :
+ plcp[0];
+
+ /* DUR field for main rate */
+ if (!ieee80211_is_pspoll(h->frame_control) &&
+ !is_multicast_ether_addr(h->addr1) && !use_rifs) {
+ durid =
+ brcms_c_compute_frame_dur(wlc, rspec[0], preamble_type[0],
+ next_frag_len);
+ h->duration_id = cpu_to_le16(durid);
+ } else if (use_rifs) {
+ /* NAV protect to end of next max packet size */
+ durid =
+ (u16) brcms_c_calc_frame_time(wlc, rspec[0],
+ preamble_type[0],
+ DOT11_MAX_FRAG_LEN);
+ durid += RIFS_11N_TIME;
+ h->duration_id = cpu_to_le16(durid);
+ }
+
+ /* DUR field for fallback rate */
+ if (ieee80211_is_pspoll(h->frame_control))
+ txh->FragDurFallback = h->duration_id;
+ else if (is_multicast_ether_addr(h->addr1) || use_rifs)
+ txh->FragDurFallback = 0;
+ else {
+ durid = brcms_c_compute_frame_dur(wlc, rspec[1],
+ preamble_type[1], next_frag_len);
+ txh->FragDurFallback = cpu_to_le16(durid);
+ }
+
+ /* (4) MAC-HDR: MacTxControlLow */
+ if (frag == 0)
+ mcl |= TXC_STARTMSDU;
+
+ if (!is_multicast_ether_addr(h->addr1))
+ mcl |= TXC_IMMEDACK;
+
+ if (wlc->band->bandtype == BRCM_BAND_5G)
+ mcl |= TXC_FREQBAND_5G;
+
+ if (CHSPEC_IS40(wlc_phy_chanspec_get(wlc->band->pi)))
+ mcl |= TXC_BW_40;
+
+ /* set AMIC bit if using hardware TKIP MIC */
+ if (hwtkmic)
+ mcl |= TXC_AMIC;
+
+ txh->MacTxControlLow = cpu_to_le16(mcl);
+
+ /* MacTxControlHigh */
+ mch = 0;
+
+ /* Set fallback rate preamble type */
+ if ((preamble_type[1] == BRCMS_SHORT_PREAMBLE) ||
+ (preamble_type[1] == BRCMS_GF_PREAMBLE)) {
+ if (rspec2rate(rspec[1]) != BRCM_RATE_1M)
+ mch |= TXC_PREAMBLE_DATA_FB_SHORT;
+ }
+
+ /* MacFrameControl */
+ memcpy(&txh->MacFrameControl, &h->frame_control, sizeof(u16));
+ txh->TxFesTimeNormal = cpu_to_le16(0);
+
+ txh->TxFesTimeFallback = cpu_to_le16(0);
+
+ /* TxFrameRA */
+ memcpy(&txh->TxFrameRA, &h->addr1, ETH_ALEN);
+
+ /* TxFrameID */
+ txh->TxFrameID = cpu_to_le16(frameid);
+
+ /*
+ * TxStatus, Note the case of recreating the first frag of a suppressed
+ * frame then we may need to reset the retry cnt's via the status reg
+ */
+ txh->TxStatus = cpu_to_le16(status);
+
+ /*
+ * extra fields for ucode AMPDU aggregation, the new fields are added to
+ * the END of previous structure so that it's compatible in driver.
+ */
+ txh->MaxNMpdus = cpu_to_le16(0);
+ txh->MaxABytes_MRT = cpu_to_le16(0);
+ txh->MaxABytes_FBR = cpu_to_le16(0);
+ txh->MinMBytes = cpu_to_le16(0);
+
+ /* (5) RTS/CTS: determine RTS/CTS PLCP header and MAC duration,
+ * furnish struct d11txh */
+ /* RTS PLCP header and RTS frame */
+ if (use_rts || use_cts) {
+ if (use_rts && use_cts)
+ use_cts = false;
+
+ for (k = 0; k < 2; k++) {
+ rts_rspec[k] = brcms_c_rspec_to_rts_rspec(wlc, rspec[k],
+ false,
+ mimo_ctlchbw);
+ }
+
+ if (!is_ofdm_rate(rts_rspec[0]) &&
+ !((rspec2rate(rts_rspec[0]) == BRCM_RATE_1M) ||
+ (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
+ rts_preamble_type[0] = BRCMS_SHORT_PREAMBLE;
+ mch |= TXC_PREAMBLE_RTS_MAIN_SHORT;
+ }
+
+ if (!is_ofdm_rate(rts_rspec[1]) &&
+ !((rspec2rate(rts_rspec[1]) == BRCM_RATE_1M) ||
+ (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
+ rts_preamble_type[1] = BRCMS_SHORT_PREAMBLE;
+ mch |= TXC_PREAMBLE_RTS_FB_SHORT;
+ }
+
+ /* RTS/CTS additions to MacTxControlLow */
+ if (use_cts) {
+ txh->MacTxControlLow |= cpu_to_le16(TXC_SENDCTS);
+ } else {
+ txh->MacTxControlLow |= cpu_to_le16(TXC_SENDRTS);
+ txh->MacTxControlLow |= cpu_to_le16(TXC_LONGFRAME);
+ }
+
+ /* RTS PLCP header */
+ rts_plcp = txh->RTSPhyHeader;
+ if (use_cts)
+ rts_phylen = DOT11_CTS_LEN + FCS_LEN;
+ else
+ rts_phylen = DOT11_RTS_LEN + FCS_LEN;
+
+ brcms_c_compute_plcp(wlc, rts_rspec[0], rts_phylen, rts_plcp);
+
+ /* fallback rate version of RTS PLCP header */
+ brcms_c_compute_plcp(wlc, rts_rspec[1], rts_phylen,
+ rts_plcp_fallback);
+ memcpy(&txh->RTSPLCPFallback, rts_plcp_fallback,
+ sizeof(txh->RTSPLCPFallback));
+
+ /* RTS frame fields... */
+ rts = (struct ieee80211_rts *)&txh->rts_frame;
+
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec[0],
+ rspec[0], rts_preamble_type[0],
+ preamble_type[0], phylen, false);
+ rts->duration = cpu_to_le16(durid);
+ /* fallback rate version of RTS DUR field */
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
+ rts_rspec[1], rspec[1],
+ rts_preamble_type[1],
+ preamble_type[1], phylen, false);
+ txh->RTSDurFallback = cpu_to_le16(durid);
+
+ if (use_cts) {
+ rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+ IEEE80211_STYPE_CTS);
+
+ memcpy(&rts->ra, &h->addr2, ETH_ALEN);
+ } else {
+ rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+ IEEE80211_STYPE_RTS);
+
+ memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN);
+ }
+
+ /* mainrate
+ * low 8 bits: main frag rate/mcs,
+ * high 8 bits: rts/cts rate/mcs
+ */
+ mainrates |= (is_ofdm_rate(rts_rspec[0]) ?
+ D11A_PHY_HDR_GRATE(
+ (struct ofdm_phy_hdr *) rts_plcp) :
+ rts_plcp[0]) << 8;
+ } else {
+ memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+ memset((char *)&txh->rts_frame, 0,
+ sizeof(struct ieee80211_rts));
+ memset((char *)txh->RTSPLCPFallback, 0,
+ sizeof(txh->RTSPLCPFallback));
+ txh->RTSDurFallback = 0;
+ }
+
+#ifdef SUPPORT_40MHZ
+ /* add null delimiter count */
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && is_mcs_rate(rspec))
+ txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] =
+ brcm_c_ampdu_null_delim_cnt(wlc->ampdu, scb, rspec, phylen);
+
+#endif
+
+ /*
+ * Now that RTS/RTS FB preamble types are updated, write
+ * the final value
+ */
+ txh->MacTxControlHigh = cpu_to_le16(mch);
+
+ /*
+ * MainRates (both the rts and frag plcp rates have
+ * been calculated now)
+ */
+ txh->MainRates = cpu_to_le16(mainrates);
+
+ /* XtraFrameTypes */
+ xfts = frametype(rspec[1], wlc->mimoft);
+ xfts |= (frametype(rts_rspec[0], wlc->mimoft) << XFTS_RTS_FT_SHIFT);
+ xfts |= (frametype(rts_rspec[1], wlc->mimoft) << XFTS_FBRRTS_FT_SHIFT);
+ xfts |= CHSPEC_CHANNEL(wlc_phy_chanspec_get(wlc->band->pi)) <<
+ XFTS_CHANNEL_SHIFT;
+ txh->XtraFrameTypes = cpu_to_le16(xfts);
+
+ /* PhyTxControlWord */
+ phyctl = frametype(rspec[0], wlc->mimoft);
+ if ((preamble_type[0] == BRCMS_SHORT_PREAMBLE) ||
+ (preamble_type[0] == BRCMS_GF_PREAMBLE)) {
+ if (rspec2rate(rspec[0]) != BRCM_RATE_1M)
+ phyctl |= PHY_TXC_SHORT_HDR;
+ }
+
+ /* phytxant is properly bit shifted */
+ phyctl |= brcms_c_stf_d11hdrs_phyctl_txant(wlc, rspec[0]);
+ txh->PhyTxControlWord = cpu_to_le16(phyctl);
+
+ /* PhyTxControlWord_1 */
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
+ u16 phyctl1 = 0;
+
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[0]);
+ txh->PhyTxControlWord_1 = cpu_to_le16(phyctl1);
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[1]);
+ txh->PhyTxControlWord_1_Fbr = cpu_to_le16(phyctl1);
+
+ if (use_rts || use_cts) {
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[0]);
+ txh->PhyTxControlWord_1_Rts = cpu_to_le16(phyctl1);
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[1]);
+ txh->PhyTxControlWord_1_FbrRts = cpu_to_le16(phyctl1);
+ }
+
+ /*
+ * For mcs frames, if mixedmode(overloaded with long preamble)
+ * is going to be set, fill in non-zero MModeLen and/or
+ * MModeFbrLen it will be unnecessary if they are separated
+ */
+ if (is_mcs_rate(rspec[0]) &&
+ (preamble_type[0] == BRCMS_MM_PREAMBLE)) {
+ u16 mmodelen =
+ brcms_c_calc_lsig_len(wlc, rspec[0], phylen);
+ txh->MModeLen = cpu_to_le16(mmodelen);
+ }
+
+ if (is_mcs_rate(rspec[1]) &&
+ (preamble_type[1] == BRCMS_MM_PREAMBLE)) {
+ u16 mmodefbrlen =
+ brcms_c_calc_lsig_len(wlc, rspec[1], phylen);
+ txh->MModeFbrLen = cpu_to_le16(mmodefbrlen);
+ }
+ }
+
+ ac = skb_get_queue_mapping(p);
+ if ((scb->flags & SCB_WMECAP) && qos && wlc->edcf_txop[ac]) {
+ uint frag_dur, dur, dur_fallback;
+
+ /* WME: Update TXOP threshold */
+ if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) && frag == 0) {
+ frag_dur =
+ brcms_c_calc_frame_time(wlc, rspec[0],
+ preamble_type[0], phylen);
+
+ if (rts) {
+ /* 1 RTS or CTS-to-self frame */
+ dur =
+ brcms_c_calc_cts_time(wlc, rts_rspec[0],
+ rts_preamble_type[0]);
+ dur_fallback =
+ brcms_c_calc_cts_time(wlc, rts_rspec[1],
+ rts_preamble_type[1]);
+ /* (SIFS + CTS) + SIFS + frame + SIFS + ACK */
+ dur += le16_to_cpu(rts->duration);
+ dur_fallback +=
+ le16_to_cpu(txh->RTSDurFallback);
+ } else if (use_rifs) {
+ dur = frag_dur;
+ dur_fallback = 0;
+ } else {
+ /* frame + SIFS + ACK */
+ dur = frag_dur;
+ dur +=
+ brcms_c_compute_frame_dur(wlc, rspec[0],
+ preamble_type[0], 0);
+
+ dur_fallback =
+ brcms_c_calc_frame_time(wlc, rspec[1],
+ preamble_type[1],
+ phylen);
+ dur_fallback +=
+ brcms_c_compute_frame_dur(wlc, rspec[1],
+ preamble_type[1], 0);
+ }
+ /* NEED to set TxFesTimeNormal (hard) */
+ txh->TxFesTimeNormal = cpu_to_le16((u16) dur);
+ /*
+ * NEED to set fallback rate version of
+ * TxFesTimeNormal (hard)
+ */
+ txh->TxFesTimeFallback =
+ cpu_to_le16((u16) dur_fallback);
+
+ /*
+ * update txop byte threshold (txop minus intraframe
+ * overhead)
+ */
+ if (wlc->edcf_txop[ac] >= (dur - frag_dur)) {
+ uint newfragthresh;
+
+ newfragthresh =
+ brcms_c_calc_frame_len(wlc,
+ rspec[0], preamble_type[0],
+ (wlc->edcf_txop[ac] -
+ (dur - frag_dur)));
+ /* range bound the fragthreshold */
+ if (newfragthresh < DOT11_MIN_FRAG_LEN)
+ newfragthresh =
+ DOT11_MIN_FRAG_LEN;
+ else if (newfragthresh >
+ wlc->usr_fragthresh)
+ newfragthresh =
+ wlc->usr_fragthresh;
+ /* update the fragthresh and do txc update */
+ if (wlc->fragthresh[queue] !=
+ (u16) newfragthresh)
+ wlc->fragthresh[queue] =
+ (u16) newfragthresh;
+ } else {
+ wiphy_err(wlc->wiphy, "wl%d: %s txop invalid "
+ "for rate %d\n",
+ wlc->pub->unit, fifo_names[queue],
+ rspec2rate(rspec[0]));
+ }
+
+ if (dur > wlc->edcf_txop[ac])
+ wiphy_err(wlc->wiphy, "wl%d: %s: %s txop "
+ "exceeded phylen %d/%d dur %d/%d\n",
+ wlc->pub->unit, __func__,
+ fifo_names[queue],
+ phylen, wlc->fragthresh[queue],
+ dur, wlc->edcf_txop[ac]);
+ }
+ }
+
+ return 0;
+}
+
+void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw)
+{
+ u8 prio;
+ uint fifo;
+ struct scb *scb = &wlc->pri_scb;
+ struct ieee80211_hdr *d11_header = (struct ieee80211_hdr *)(sdu->data);
+
+ /*
+ * 802.11 standard requires management traffic
+ * to go at highest priority
+ */
+ prio = ieee80211_is_data(d11_header->frame_control) ? sdu->priority :
+ MAXPRIO;
+ fifo = prio2fifo[prio];
+ if (brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0))
+ return;
+ brcms_c_txq_enq(wlc, scb, sdu, BRCMS_PRIO_TO_PREC(prio));
+ brcms_c_send_q(wlc);
+}
+
+void brcms_c_send_q(struct brcms_c_info *wlc)
+{
+ struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
+ int prec;
+ u16 prec_map;
+ int err = 0, i, count;
+ uint fifo;
+ struct brcms_txq_info *qi = wlc->pkt_queue;
+ struct pktq *q = &qi->q;
+ struct ieee80211_tx_info *tx_info;
+
+ prec_map = wlc->tx_prec_map;
+
+ /* Send all the enq'd pkts that we can.
+ * Dequeue packets with precedence with empty HW fifo only
+ */
+ while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) {
+ tx_info = IEEE80211_SKB_CB(pkt[0]);
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec);
+ } else {
+ count = 1;
+ err = brcms_c_prep_pdu(wlc, pkt[0], &fifo);
+ if (!err) {
+ for (i = 0; i < count; i++)
+ brcms_c_txfifo(wlc, fifo, pkt[i], true,
+ 1);
+ }
+ }
+
+ if (err == -EBUSY) {
+ brcmu_pktq_penq_head(q, prec, pkt[0]);
+ /*
+ * If send failed due to any other reason than a
+ * change in HW FIFO condition, quit. Otherwise,
+ * read the new prec_map!
+ */
+ if (prec_map == wlc->tx_prec_map)
+ break;
+ prec_map = wlc->tx_prec_map;
+ }
+ }
+}
+
+void
+brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
+ bool commit, s8 txpktpend)
+{
+ u16 frameid = INVALIDFID;
+ struct d11txh *txh;
+
+ txh = (struct d11txh *) (p->data);
+
+ /* When a BC/MC frame is being committed to the BCMC fifo
+ * via DMA (NOT PIO), update ucode or BSS info as appropriate.
+ */
+ if (fifo == TX_BCMC_FIFO)
+ frameid = le16_to_cpu(txh->TxFrameID);
+
+ /*
+ * Bump up pending count for if not using rpc. If rpc is
+ * used, this will be handled in brcms_b_txfifo()
+ */
+ if (commit) {
+ wlc->core->txpktpend[fifo] += txpktpend;
+ BCMMSG(wlc->wiphy, "pktpend inc %d to %d\n",
+ txpktpend, wlc->core->txpktpend[fifo]);
+ }
+
+ /* Commit BCMC sequence number in the SHM frame ID location */
+ if (frameid != INVALIDFID) {
+ /*
+ * To inform the ucode of the last mcast frame posted
+ * so that it can clear moredata bit
+ */
+ brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid);
+ }
+
+ if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0)
+ wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
+}
+
+u32
+brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+ bool use_rspec, u16 mimo_ctlchbw)
+{
+ u32 rts_rspec = 0;
+
+ if (use_rspec)
+ /* use frame rate as rts rate */
+ rts_rspec = rspec;
+ else if (wlc->band->gmode && wlc->protection->_g && !is_cck_rate(rspec))
+ /* Use 11Mbps as the g protection RTS target rate and fallback.
+ * Use the brcms_basic_rate() lookup to find the best basic rate
+ * under the target in case 11 Mbps is not Basic.
+ * 6 and 9 Mbps are not usually selected by rate selection, but
+ * even if the OFDM rate we are protecting is 6 or 9 Mbps, 11
+ * is more robust.
+ */
+ rts_rspec = brcms_basic_rate(wlc, BRCM_RATE_11M);
+ else
+ /* calculate RTS rate and fallback rate based on the frame rate
+ * RTS must be sent at a basic rate since it is a
+ * control frame, sec 9.6 of 802.11 spec
+ */
+ rts_rspec = brcms_basic_rate(wlc, rspec);
+
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
+ /* set rts txbw to correct side band */
+ rts_rspec &= ~RSPEC_BW_MASK;
+
+ /*
+ * if rspec/rspec_fallback is 40MHz, then send RTS on both
+ * 20MHz channel (DUP), otherwise send RTS on control channel
+ */
+ if (rspec_is40mhz(rspec) && !is_cck_rate(rts_rspec))
+ rts_rspec |= (PHY_TXC1_BW_40MHZ_DUP << RSPEC_BW_SHIFT);
+ else
+ rts_rspec |= (mimo_ctlchbw << RSPEC_BW_SHIFT);
+
+ /* pick siso/cdd as default for ofdm */
+ if (is_ofdm_rate(rts_rspec)) {
+ rts_rspec &= ~RSPEC_STF_MASK;
+ rts_rspec |= (wlc->stf->ss_opmode << RSPEC_STF_SHIFT);
+ }
+ }
+ return rts_rspec;
+}
+
+void
+brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend)
+{
+ wlc->core->txpktpend[fifo] -= txpktpend;
+ BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend,
+ wlc->core->txpktpend[fifo]);
+
+ /* There is more room; mark precedences related to this FIFO sendable */
+ wlc->tx_prec_map |= wlc->fifo2prec_map[fifo];
+
+ /* figure out which bsscfg is being worked on... */
+}
+
+/* Update beacon listen interval in shared memory */
+static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
+{
+ /* wake up every DTIM is the default */
+ if (wlc->bcn_li_dtim == 1)
+ brcms_b_write_shm(wlc->hw, M_BCN_LI, 0);
+ else
+ brcms_b_write_shm(wlc->hw, M_BCN_LI,
+ (wlc->bcn_li_dtim << 8) | wlc->bcn_li_bcn);
+}
+
+static void
+brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
+ u32 *tsf_h_ptr)
+{
+ struct d11regs __iomem *regs = wlc_hw->regs;
+
+ /* read the tsf timer low, then high to get an atomic read */
+ *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
+ *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+}
+
+/*
+ * recover 64bit TSF value from the 16bit TSF value in the rx header
+ * given the assumption that the TSF passed in header is within 65ms
+ * of the current tsf.
+ *
+ * 6 5 4 4 3 2 1
+ * 3.......6.......8.......0.......2.......4.......6.......8......0
+ * |<---------- tsf_h ----------->||<--- tsf_l -->||<-RxTSFTime ->|
+ *
+ * The RxTSFTime are the lowest 16 bits and provided by the ucode. The
+ * tsf_l is filled in by brcms_b_recv, which is done earlier in the
+ * receive call sequence after rx interrupt. Only the higher 16 bits
+ * are used. Finally, the tsf_h is read from the tsf register.
+ */
+static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc,
+ struct d11rxhdr *rxh)
+{
+ u32 tsf_h, tsf_l;
+ u16 rx_tsf_0_15, rx_tsf_16_31;
+
+ brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h);
+
+ rx_tsf_16_31 = (u16)(tsf_l >> 16);
+ rx_tsf_0_15 = rxh->RxTSFTime;
+
+ /*
+ * a greater tsf time indicates the low 16 bits of
+ * tsf_l wrapped, so decrement the high 16 bits.
+ */
+ if ((u16)tsf_l < rx_tsf_0_15) {
+ rx_tsf_16_31 -= 1;
+ if (rx_tsf_16_31 == 0xffff)
+ tsf_h -= 1;
+ }
+
+ return ((u64)tsf_h << 32) | (((u32)rx_tsf_16_31 << 16) + rx_tsf_0_15);
+}
+
+static void
+prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ struct sk_buff *p,
+ struct ieee80211_rx_status *rx_status)
+{
+ int preamble;
+ int channel;
+ u32 rspec;
+ unsigned char *plcp;
+
+ /* fill in TSF and flag its presence */
+ rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh);
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+
+ channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
+
+ if (channel > 14) {
+ rx_status->band = IEEE80211_BAND_5GHZ;
+ rx_status->freq = ieee80211_ofdm_chan_to_freq(
+ WF_CHAN_FACTOR_5_G/2, channel);
+
+ } else {
+ rx_status->band = IEEE80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
+ }
+
+ rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
+
+ /* noise */
+ /* qual */
+ rx_status->antenna =
+ (rxh->PhyRxStatus_0 & PRXS0_RXANT_UPSUBBAND) ? 1 : 0;
+
+ plcp = p->data;
+
+ rspec = brcms_c_compute_rspec(rxh, plcp);
+ if (is_mcs_rate(rspec)) {
+ rx_status->rate_idx = rspec & RSPEC_RATE_MASK;
+ rx_status->flag |= RX_FLAG_HT;
+ if (rspec_is40mhz(rspec))
+ rx_status->flag |= RX_FLAG_40MHZ;
+ } else {
+ switch (rspec2rate(rspec)) {
+ case BRCM_RATE_1M:
+ rx_status->rate_idx = 0;
+ break;
+ case BRCM_RATE_2M:
+ rx_status->rate_idx = 1;
+ break;
+ case BRCM_RATE_5M5:
+ rx_status->rate_idx = 2;
+ break;
+ case BRCM_RATE_11M:
+ rx_status->rate_idx = 3;
+ break;
+ case BRCM_RATE_6M:
+ rx_status->rate_idx = 4;
+ break;
+ case BRCM_RATE_9M:
+ rx_status->rate_idx = 5;
+ break;
+ case BRCM_RATE_12M:
+ rx_status->rate_idx = 6;
+ break;
+ case BRCM_RATE_18M:
+ rx_status->rate_idx = 7;
+ break;
+ case BRCM_RATE_24M:
+ rx_status->rate_idx = 8;
+ break;
+ case BRCM_RATE_36M:
+ rx_status->rate_idx = 9;
+ break;
+ case BRCM_RATE_48M:
+ rx_status->rate_idx = 10;
+ break;
+ case BRCM_RATE_54M:
+ rx_status->rate_idx = 11;
+ break;
+ default:
+ wiphy_err(wlc->wiphy, "%s: Unknown rate\n", __func__);
+ }
+
+ /*
+ * For 5GHz, we should decrease the index as it is
+ * a subset of the 2.4G rates. See bitrates field
+ * of brcms_band_5GHz_nphy (in mac80211_if.c).
+ */
+ if (rx_status->band == IEEE80211_BAND_5GHZ)
+ rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
+
+ /* Determine short preamble and rate_idx */
+ preamble = 0;
+ if (is_cck_rate(rspec)) {
+ if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
+ rx_status->flag |= RX_FLAG_SHORTPRE;
+ } else if (is_ofdm_rate(rspec)) {
+ rx_status->flag |= RX_FLAG_SHORTPRE;
+ } else {
+ wiphy_err(wlc->wiphy, "%s: Unknown modulation\n",
+ __func__);
+ }
+ }
+
+ if (plcp3_issgi(plcp[3]))
+ rx_status->flag |= RX_FLAG_SHORT_GI;
+
+ if (rxh->RxStatus1 & RXS_DECERR) {
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+ wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_PLCP_CRC\n",
+ __func__);
+ }
+ if (rxh->RxStatus1 & RXS_FCSERR) {
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_FCS_CRC\n",
+ __func__);
+ }
+}
+
+static void
+brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ struct sk_buff *p)
+{
+ int len_mpdu;
+ struct ieee80211_rx_status rx_status;
+
+ memset(&rx_status, 0, sizeof(rx_status));
+ prep_mac80211_status(wlc, rxh, p, &rx_status);
+
+ /* mac header+body length, exclude CRC and plcp header */
+ len_mpdu = p->len - D11_PHY_HDR_LEN - FCS_LEN;
+ skb_pull(p, D11_PHY_HDR_LEN);
+ __skb_trim(p, len_mpdu);
+
+ memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
+}
+
+/* calculate frame duration for Mixed-mode L-SIG spoofing, return
+ * number of bytes goes in the length field
+ *
+ * Formula given by HT PHY Spec v 1.13
+ * len = 3(nsyms + nstream + 3) - 3
+ */
+u16
+brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
+ uint mac_len)
+{
+ uint nsyms, len = 0, kNdps;
+
+ BCMMSG(wlc->wiphy, "wl%d: rate %d, len%d\n",
+ wlc->pub->unit, rspec2rate(ratespec), mac_len);
+
+ if (is_mcs_rate(ratespec)) {
+ uint mcs = ratespec & RSPEC_RATE_MASK;
+ int tot_streams = (mcs_2_txstreams(mcs) + 1) +
+ rspec_stc(ratespec);
+
+ /*
+ * the payload duration calculation matches that
+ * of regular ofdm
+ */
+ /* 1000Ndbps = kbps * 4 */
+ kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec),
+ rspec_issgi(ratespec)) * 4;
+
+ if (rspec_stc(ratespec) == 0)
+ nsyms =
+ CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
+ APHY_TAIL_NBITS) * 1000, kNdps);
+ else
+ /* STBC needs to have even number of symbols */
+ nsyms =
+ 2 *
+ CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
+ APHY_TAIL_NBITS) * 1000, 2 * kNdps);
+
+ /* (+3) account for HT-SIG(2) and HT-STF(1) */
+ nsyms += (tot_streams + 3);
+ /*
+ * 3 bytes/symbol @ legacy 6Mbps rate
+ * (-3) excluding service bits and tail bits
+ */
+ len = (3 * nsyms) - 3;
+ }
+
+ return (u16) len;
+}
+
+static void
+brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc, uint frame_len)
+{
+ const struct brcms_c_rateset *rs_dflt;
+ struct brcms_c_rateset rs;
+ u8 rate;
+ u16 entry_ptr;
+ u8 plcp[D11_PHY_HDR_LEN];
+ u16 dur, sifs;
+ uint i;
+
+ sifs = get_sifs(wlc->band);
+
+ rs_dflt = brcms_c_rateset_get_hwrs(wlc);
+
+ brcms_c_rateset_copy(rs_dflt, &rs);
+ brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
+
+ /*
+ * walk the phy rate table and update MAC core SHM
+ * basic rate table entries
+ */
+ for (i = 0; i < rs.count; i++) {
+ rate = rs.rates[i] & BRCMS_RATE_MASK;
+
+ entry_ptr = brcms_b_rate_shm_offset(wlc->hw, rate);
+
+ /* Calculate the Probe Response PLCP for the given rate */
+ brcms_c_compute_plcp(wlc, rate, frame_len, plcp);
+
+ /*
+ * Calculate the duration of the Probe Response
+ * frame plus SIFS for the MAC
+ */
+ dur = (u16) brcms_c_calc_frame_time(wlc, rate,
+ BRCMS_LONG_PREAMBLE, frame_len);
+ dur += sifs;
+
+ /* Update the SHM Rate Table entry Probe Response values */
+ brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_PLCP_POS,
+ (u16) (plcp[0] + (plcp[1] << 8)));
+ brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_PLCP_POS + 2,
+ (u16) (plcp[2] + (plcp[3] << 8)));
+ brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_DUR_POS, dur);
+ }
+}
+
+/* Max buffering needed for beacon template/prb resp template is 142 bytes.
+ *
+ * PLCP header is 6 bytes.
+ * 802.11 A3 header is 24 bytes.
+ * Max beacon frame body template length is 112 bytes.
+ * Max probe resp frame body template length is 110 bytes.
+ *
+ * *len on input contains the max length of the packet available.
+ *
+ * The *len value is set to the number of bytes in buf used, and starts
+ * with the PLCP and included up to, but not including, the 4 byte FCS.
+ */
+static void
+brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
+ u32 bcn_rspec,
+ struct brcms_bss_cfg *cfg, u16 *buf, int *len)
+{
+ static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
+ struct cck_phy_hdr *plcp;
+ struct ieee80211_mgmt *h;
+ int hdr_len, body_len;
+
+ hdr_len = D11_PHY_HDR_LEN + DOT11_MAC_HDR_LEN;
+
+ /* calc buffer size provided for frame body */
+ body_len = *len - hdr_len;
+ /* return actual size */
+ *len = hdr_len + body_len;
+
+ /* format PHY and MAC headers */
+ memset((char *)buf, 0, hdr_len);
+
+ plcp = (struct cck_phy_hdr *) buf;
+
+ /*
+ * PLCP for Probe Response frames are filled in from
+ * core's rate table
+ */
+ if (type == IEEE80211_STYPE_BEACON)
+ /* fill in PLCP */
+ brcms_c_compute_plcp(wlc, bcn_rspec,
+ (DOT11_MAC_HDR_LEN + body_len + FCS_LEN),
+ (u8 *) plcp);
+
+ /* "Regular" and 16 MBSS but not for 4 MBSS */
+ /* Update the phytxctl for the beacon based on the rspec */
+ brcms_c_beacon_phytxctl_txant_upd(wlc, bcn_rspec);
+
+ h = (struct ieee80211_mgmt *)&plcp[1];
+
+ /* fill in 802.11 header */
+ h->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | type);
+
+ /* DUR is 0 for multicast bcn, or filled in by MAC for prb resp */
+ /* A1 filled in by MAC for prb resp, broadcast for bcn */
+ if (type == IEEE80211_STYPE_BEACON)
+ memcpy(&h->da, &ether_bcast, ETH_ALEN);
+ memcpy(&h->sa, &cfg->cur_etheraddr, ETH_ALEN);
+ memcpy(&h->bssid, &cfg->BSSID, ETH_ALEN);
+
+ /* SEQ filled in by MAC */
+}
+
+int brcms_c_get_header_len(void)
+{
+ return TXOFF;
+}
+
+/*
+ * Update all beacons for the system.
+ */
+void brcms_c_update_beacon(struct brcms_c_info *wlc)
+{
+ struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
+
+ if (bsscfg->up && !bsscfg->BSS)
+ /* Clear the soft intmask */
+ wlc->defmacintmask &= ~MI_BCNTPL;
+}
+
+/* Write ssid into shared memory */
+static void
+brcms_c_shm_ssid_upd(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg)
+{
+ u8 *ssidptr = cfg->SSID;
+ u16 base = M_SSID;
+ u8 ssidbuf[IEEE80211_MAX_SSID_LEN];
+
+ /* padding the ssid with zero and copy it into shm */
+ memset(ssidbuf, 0, IEEE80211_MAX_SSID_LEN);
+ memcpy(ssidbuf, ssidptr, cfg->SSID_len);
+
+ brcms_c_copyto_shm(wlc, base, ssidbuf, IEEE80211_MAX_SSID_LEN);
+ brcms_b_write_shm(wlc->hw, M_SSIDLEN, (u16) cfg->SSID_len);
+}
+
+static void
+brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *cfg,
+ bool suspend)
+{
+ u16 prb_resp[BCN_TMPL_LEN / 2];
+ int len = BCN_TMPL_LEN;
+
+ /*
+ * write the probe response to hardware, or save in
+ * the config structure
+ */
+
+ /* create the probe response template */
+ brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_PROBE_RESP, 0,
+ cfg, prb_resp, &len);
+
+ if (suspend)
+ brcms_c_suspend_mac_and_wait(wlc);
+
+ /* write the probe response into the template region */
+ brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE,
+ (len + 3) & ~3, prb_resp);
+
+ /* write the length of the probe response frame (+PLCP/-FCS) */
+ brcms_b_write_shm(wlc->hw, M_PRB_RESP_FRM_LEN, (u16) len);
+
+ /* write the SSID and SSID length */
+ brcms_c_shm_ssid_upd(wlc, cfg);
+
+ /*
+ * Write PLCP headers and durations for probe response frames
+ * at all rates. Use the actual frame length covered by the
+ * PLCP header for the call to brcms_c_mod_prb_rsp_rate_table()
+ * by subtracting the PLCP len and adding the FCS.
+ */
+ len += (-D11_PHY_HDR_LEN + FCS_LEN);
+ brcms_c_mod_prb_rsp_rate_table(wlc, (u16) len);
+
+ if (suspend)
+ brcms_c_enable_mac(wlc);
+}
+
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
+{
+ struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
+
+ /* update AP or IBSS probe responses */
+ if (bsscfg->up && !bsscfg->BSS)
+ brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend);
+}
+
+/* prepares pdu for transmission. returns BCM error codes */
+int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop)
+{
+ uint fifo;
+ struct d11txh *txh;
+ struct ieee80211_hdr *h;
+ struct scb *scb;
+
+ txh = (struct d11txh *) (pdu->data);
+ h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
+
+ /* get the pkt queue info. This was put at brcms_c_sendctl or
+ * brcms_c_send for PDU */
+ fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
+
+ scb = NULL;
+
+ *fifop = fifo;
+
+ /* return if insufficient dma resources */
+ if (*wlc->core->txavail[fifo] < MAX_DMA_SEGS) {
+ /* Mark precedences related to this FIFO, unsendable */
+ /* A fifo is full. Clear precedences related to that FIFO */
+ wlc->tx_prec_map &= ~(wlc->fifo2prec_map[fifo]);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks)
+{
+ if (fifo >= NFIFO)
+ return -EINVAL;
+
+ *blocks = wlc_hw->xmtfifo_sz[fifo];
+
+ return 0;
+}
+
+void
+brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+ const u8 *addr)
+{
+ brcms_b_set_addrmatch(wlc->hw, match_reg_offset, addr);
+ if (match_reg_offset == RCM_BSSID_OFFSET)
+ memcpy(wlc->bsscfg->BSSID, addr, ETH_ALEN);
+}
+
+/*
+ * Flag 'scan in progress' to withhold dynamic phy calibration
+ */
+void brcms_c_scan_start(struct brcms_c_info *wlc)
+{
+ wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
+}
+
+void brcms_c_scan_stop(struct brcms_c_info *wlc)
+{
+ wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
+}
+
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state)
+{
+ wlc->pub->associated = state;
+ wlc->bsscfg->associated = state;
+}
+
+/*
+ * When a remote STA/AP is removed by Mac80211, or when it can no longer accept
+ * AMPDU traffic, packets pending in hardware have to be invalidated so that
+ * when later on hardware releases them, they can be handled appropriately.
+ */
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+ struct ieee80211_sta *sta,
+ void (*dma_callback_fn))
+{
+ struct dma_pub *dmah;
+ int i;
+ for (i = 0; i < NFIFO; i++) {
+ dmah = hw->di[i];
+ if (dmah != NULL)
+ dma_walk_packets(dmah, dma_callback_fn, sta);
+ }
+}
+
+int brcms_c_get_curband(struct brcms_c_info *wlc)
+{
+ return wlc->band->bandunit;
+}
+
+void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+{
+ /* flush packet queue when requested */
+ if (drop)
+ brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
+
+ /* wait for queue and DMA fifos to run dry */
+ while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0)
+ brcms_msleep(wlc->wl, 1);
+}
+
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
+{
+ wlc->bcn_li_bcn = interval;
+ if (wlc->pub->up)
+ brcms_c_bcn_li_upd(wlc);
+}
+
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr)
+{
+ uint qdbm;
+
+ /* Remove override bit and clip to max qdbm value */
+ qdbm = min_t(uint, txpwr * BRCMS_TXPWR_DB_FACTOR, 0xff);
+ return wlc_phy_txpower_set(wlc->band->pi, qdbm, false);
+}
+
+int brcms_c_get_tx_power(struct brcms_c_info *wlc)
+{
+ uint qdbm;
+ bool override;
+
+ wlc_phy_txpower_get(wlc->band->pi, &qdbm, &override);
+
+ /* Return qdbm units */
+ return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
+}
+
+void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc)
+{
+ wlc->mpc = mpc;
+ brcms_c_radio_mpc_upd(wlc);
+}
+
+/* Process received frames */
+/*
+ * Return true if more frames need to be processed. false otherwise.
+ * Param 'bound' indicates max. # frames to process before break out.
+ */
+static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
+{
+ struct d11rxhdr *rxh;
+ struct ieee80211_hdr *h;
+ uint len;
+ bool is_amsdu;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ /* frame starts with rxhdr */
+ rxh = (struct d11rxhdr *) (p->data);
+
+ /* strip off rxhdr */
+ skb_pull(p, BRCMS_HWRXOFF);
+
+ /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
+ if (rxh->RxStatus1 & RXS_PBPRES) {
+ if (p->len < 2) {
+ wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of "
+ "len %d\n", wlc->pub->unit, p->len);
+ goto toss;
+ }
+ skb_pull(p, 2);
+ }
+
+ h = (struct ieee80211_hdr *)(p->data + D11_PHY_HDR_LEN);
+ len = p->len;
+
+ if (rxh->RxStatus1 & RXS_FCSERR) {
+ if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
+ wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
+ " tossing\n");
+ goto toss;
+ } else {
+ wiphy_err(wlc->wiphy, "RCSERR!!!\n");
+ goto toss;
+ }
+ }
+
+ /* check received pkt has at least frame control field */
+ if (len < D11_PHY_HDR_LEN + sizeof(h->frame_control))
+ goto toss;
+
+ /* not supporting A-MSDU */
+ is_amsdu = rxh->RxStatus2 & RXS_AMSDU_MASK;
+ if (is_amsdu)
+ goto toss;
+
+ brcms_c_recvctl(wlc, rxh, p);
+ return;
+
+ toss:
+ brcmu_pkt_buf_free_skb(p);
+}
+
+/* Process received frames */
+/*
+ * Return true if more frames need to be processed. false otherwise.
+ * Param 'bound' indicates max. # frames to process before break out.
+ */
+static bool
+brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
+{
+ struct sk_buff *p;
+ struct sk_buff *head = NULL;
+ struct sk_buff *tail = NULL;
+ uint n = 0;
+ uint bound_limit = bound ? RXBND : -1;
+
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ /* gather received frames */
+ while ((p = dma_rx(wlc_hw->di[fifo]))) {
+
+ if (!tail)
+ head = tail = p;
+ else {
+ tail->prev = p;
+ tail = p;
+ }
+
+ /* !give others some time to run! */
+ if (++n >= bound_limit)
+ break;
+ }
+
+ /* post more rbufs */
+ dma_rxfill(wlc_hw->di[fifo]);
+
+ /* process each frame */
+ while ((p = head) != NULL) {
+ struct d11rxhdr_le *rxh_le;
+ struct d11rxhdr *rxh;
+ head = head->prev;
+ p->prev = NULL;
+
+ rxh_le = (struct d11rxhdr_le *)p->data;
+ rxh = (struct d11rxhdr *)p->data;
+
+ /* fixup rx header endianness */
+ rxh->RxFrameSize = le16_to_cpu(rxh_le->RxFrameSize);
+ rxh->PhyRxStatus_0 = le16_to_cpu(rxh_le->PhyRxStatus_0);
+ rxh->PhyRxStatus_1 = le16_to_cpu(rxh_le->PhyRxStatus_1);
+ rxh->PhyRxStatus_2 = le16_to_cpu(rxh_le->PhyRxStatus_2);
+ rxh->PhyRxStatus_3 = le16_to_cpu(rxh_le->PhyRxStatus_3);
+ rxh->PhyRxStatus_4 = le16_to_cpu(rxh_le->PhyRxStatus_4);
+ rxh->PhyRxStatus_5 = le16_to_cpu(rxh_le->PhyRxStatus_5);
+ rxh->RxStatus1 = le16_to_cpu(rxh_le->RxStatus1);
+ rxh->RxStatus2 = le16_to_cpu(rxh_le->RxStatus2);
+ rxh->RxTSFTime = le16_to_cpu(rxh_le->RxTSFTime);
+ rxh->RxChan = le16_to_cpu(rxh_le->RxChan);
+
+ brcms_c_recv(wlc_hw->wlc, p);
+ }
+
+ return n >= bound_limit;
+}
+
+/* second-level interrupt processing
+ * Return true if another dpc needs to be re-scheduled. false otherwise.
+ * Param 'bounded' indicates if applicable loops should be bounded.
+ */
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
+{
+ u32 macintstatus;
+ struct brcms_hardware *wlc_hw = wlc->hw;
+ struct d11regs __iomem *regs = wlc_hw->regs;
+ struct wiphy *wiphy = wlc->wiphy;
+
+ if (brcms_deviceremoved(wlc)) {
+ wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ __func__);
+ brcms_down(wlc->wl);
+ return false;
+ }
+
+ /* grab and clear the saved software intstatus bits */
+ macintstatus = wlc->macintstatus;
+ wlc->macintstatus = 0;
+
+ BCMMSG(wlc->wiphy, "wl%d: macintstatus 0x%x\n",
+ wlc_hw->unit, macintstatus);
+
+ WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */
+
+ /* tx status */
+ if (macintstatus & MI_TFS) {
+ bool fatal;
+ if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
+ wlc->macintstatus |= MI_TFS;
+ if (fatal) {
+ wiphy_err(wiphy, "MI_TFS: fatal\n");
+ goto fatal;
+ }
+ }
+
+ if (macintstatus & (MI_TBTT | MI_DTIM_TBTT))
+ brcms_c_tbtt(wlc);
+
+ /* ATIM window end */
+ if (macintstatus & MI_ATIMWINEND) {
+ BCMMSG(wlc->wiphy, "end of ATIM window\n");
+ OR_REG(&regs->maccommand, wlc->qvalid);
+ wlc->qvalid = 0;
+ }
+
+ /*
+ * received data or control frame, MI_DMAINT is
+ * indication of RX_FIFO interrupt
+ */
+ if (macintstatus & MI_DMAINT)
+ if (brcms_b_recv(wlc_hw, RX_FIFO, bounded))
+ wlc->macintstatus |= MI_DMAINT;
+
+ /* noise sample collected */
+ if (macintstatus & MI_BG_NOISE)
+ wlc_phy_noise_sample_intr(wlc_hw->band->pi);
+
+ if (macintstatus & MI_GP0) {
+ wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
+ "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+
+ printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
+ __func__, wlc_hw->sih->chip,
+ wlc_hw->sih->chiprev);
+ /* big hammer */
+ brcms_init(wlc->wl);
+ }
+
+ /* gptimer timeout */
+ if (macintstatus & MI_TO)
+ W_REG(&regs->gptimer, 0);
+
+ if (macintstatus & MI_RFDISABLE) {
+ BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
+ " RF Disable Input\n", wlc_hw->unit);
+ brcms_rfkill_set_hw_state(wlc->wl);
+ }
+
+ /* send any enq'd tx packets. Just makes sure to jump start tx */
+ if (!pktq_empty(&wlc->pkt_queue->q))
+ brcms_c_send_q(wlc);
+
+ /* it isn't done and needs to be resched if macintstatus is non-zero */
+ return wlc->macintstatus != 0;
+
+ fatal:
+ brcms_init(wlc->wl);
+ return wlc->macintstatus != 0;
+}
+
+void brcms_c_init(struct brcms_c_info *wlc)
+{
+ struct d11regs __iomem *regs;
+ u16 chanspec;
+ bool mute = false;
+
+ BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+
+ regs = wlc->regs;
+
+ /*
+ * This will happen if a big-hammer was executed. In
+ * that case, we want to go back to the channel that
+ * we were on and not new channel
+ */
+ if (wlc->pub->associated)
+ chanspec = wlc->home_chanspec;
+ else
+ chanspec = brcms_c_init_chanspec(wlc);
+
+ brcms_b_init(wlc->hw, chanspec, mute);
+
+ /* update beacon listen interval */
+ brcms_c_bcn_li_upd(wlc);
+
+ /* write ethernet address to core */
+ brcms_c_set_mac(wlc->bsscfg);
+ brcms_c_set_bssid(wlc->bsscfg);
+
+ /* Update tsf_cfprep if associated and up */
+ if (wlc->pub->associated && wlc->bsscfg->up) {
+ u32 bi;
+
+ /* get beacon period and convert to uS */
+ bi = wlc->bsscfg->current_bss->beacon_period << 10;
+ /*
+ * update since init path would reset
+ * to default value
+ */
+ W_REG(&regs->tsf_cfprep,
+ (bi << CFPREP_CBI_SHIFT));
+
+ /* Update maccontrol PM related bits */
+ brcms_c_set_ps_ctrl(wlc);
+ }
+
+ brcms_c_bandinit_ordered(wlc, chanspec);
+
+ /* init probe response timeout */
+ brcms_b_write_shm(wlc->hw, M_PRS_MAXTIME, wlc->prb_resp_timeout);
+
+ /* init max burst txop (framebursting) */
+ brcms_b_write_shm(wlc->hw, M_MBURST_TXOP,
+ (wlc->
+ _rifs ? (EDCF_AC_VO_TXOP_AP << 5) : MAXFRAMEBURST_TXOP));
+
+ /* initialize maximum allowed duty cycle */
+ brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_ofdm, true, true);
+ brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_cck, false, true);
+
+ /*
+ * Update some shared memory locations related to
+ * max AMPDU size allowed to received
+ */
+ brcms_c_ampdu_shm_upd(wlc->ampdu);
+
+ /* band-specific inits */
+ brcms_c_bsinit(wlc);
+
+ /* Enable EDCF mode (while the MAC is suspended) */
+ OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+ brcms_c_edcf_setparams(wlc, false);
+
+ /* Init precedence maps for empty FIFOs */
+ brcms_c_tx_prec_map_init(wlc);
+
+ /* read the ucode version if we have not yet done so */
+ if (wlc->ucode_rev == 0) {
+ wlc->ucode_rev =
+ brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR) << NBITS(u16);
+ wlc->ucode_rev |= brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR);
+ }
+
+ /* ..now really unleash hell (allow the MAC out of suspend) */
+ brcms_c_enable_mac(wlc);
+
+ /* clear tx flow control */
+ brcms_c_txflowcontrol_reset(wlc);
+
+ /* enable the RF Disable Delay timer */
+ W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
+
+ /* initialize mpc delay */
+ wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+
+ /*
+ * Initialize WME parameters; if they haven't been set by some other
+ * mechanism (IOVar, etc) then read them from the hardware.
+ */
+ if (GFIELD(wlc->wme_retries[0], EDCF_SHORT) == 0) {
+ /* Uninitialized; read from HW */
+ int ac;
+
+ for (ac = 0; ac < AC_COUNT; ac++)
+ wlc->wme_retries[ac] =
+ brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
+ }
+}
+
+/*
+ * The common driver entry routine. Error codes should be unique
+ */
+struct brcms_c_info *
+brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
+ bool piomode, void __iomem *regsva, struct pci_dev *btparam,
+ uint *perr)
+{
+ struct brcms_c_info *wlc;
+ uint err = 0;
+ uint i, j;
+ struct brcms_pub *pub;
+
+ /* allocate struct brcms_c_info state and its substructures */
+ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+ if (wlc == NULL)
+ goto fail;
+ wlc->wiphy = wl->wiphy;
+ pub = wlc->pub;
+
+#if defined(BCMDBG)
+ wlc_info_dbg = wlc;
+#endif
+
+ wlc->band = wlc->bandstate[0];
+ wlc->core = wlc->corestate;
+ wlc->wl = wl;
+ pub->unit = unit;
+ pub->_piomode = piomode;
+ wlc->bandinit_pending = false;
+
+ /* populate struct brcms_c_info with default values */
+ brcms_c_info_init(wlc, unit);
+
+ /* update sta/ap related parameters */
+ brcms_c_ap_upd(wlc);
+
+ /*
+ * low level attach steps(all hw accesses go
+ * inside, no more in rest of the attach)
+ */
+ err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
+ btparam);
+ if (err)
+ goto fail;
+
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, OFF);
+
+ pub->phy_11ncapable = BRCMS_PHY_11N_CAP(wlc->band);
+
+ /* disable allowed duty cycle */
+ wlc->tx_duty_cycle_ofdm = 0;
+ wlc->tx_duty_cycle_cck = 0;
+
+ brcms_c_stf_phy_chain_calc(wlc);
+
+ /* txchain 1: txant 0, txchain 2: txant 1 */
+ if (BRCMS_ISNPHY(wlc->band) && (wlc->stf->txstreams == 1))
+ wlc->stf->txant = wlc->stf->hw_txchain - 1;
+
+ /* push to BMAC driver */
+ wlc_phy_stf_chain_init(wlc->band->pi, wlc->stf->hw_txchain,
+ wlc->stf->hw_rxchain);
+
+ /* pull up some info resulting from the low attach */
+ for (i = 0; i < NFIFO; i++)
+ wlc->core->txavail[i] = wlc->hw->txavail[i];
+
+ memcpy(&wlc->perm_etheraddr, &wlc->hw->etheraddr, ETH_ALEN);
+ memcpy(&pub->cur_etheraddr, &wlc->hw->etheraddr, ETH_ALEN);
+
+ for (j = 0; j < wlc->pub->_nbands; j++) {
+ wlc->band = wlc->bandstate[j];
+
+ if (!brcms_c_attach_stf_ant_init(wlc)) {
+ err = 24;
+ goto fail;
+ }
+
+ /* default contention windows size limits */
+ wlc->band->CWmin = APHY_CWMIN;
+ wlc->band->CWmax = PHY_CWMAX;
+
+ /* init gmode value */
+ if (wlc->band->bandtype == BRCM_BAND_2G) {
+ wlc->band->gmode = GMODE_AUTO;
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER,
+ wlc->band->gmode);
+ }
+
+ /* init _n_enab supported mode */
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
+ pub->_n_enab = SUPPORT_11N;
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER,
+ ((pub->_n_enab ==
+ SUPPORT_11N) ? WL_11N_2x2 :
+ WL_11N_3x3));
+ }
+
+ /* init per-band default rateset, depend on band->gmode */
+ brcms_default_rateset(wlc, &wlc->band->defrateset);
+
+ /* fill in hw_rateset */
+ brcms_c_rateset_filter(&wlc->band->defrateset,
+ &wlc->band->hw_rateset, false,
+ BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
+ (bool) (wlc->pub->_n_enab & SUPPORT_11N));
+ }
+
+ /*
+ * update antenna config due to
+ * wlc->stf->txant/txchain/ant_rx_ovr change
+ */
+ brcms_c_stf_phy_txant_upd(wlc);
+
+ /* attach each modules */
+ err = brcms_c_attach_module(wlc);
+ if (err != 0)
+ goto fail;
+
+ if (!brcms_c_timers_init(wlc, unit)) {
+ wiphy_err(wl->wiphy, "wl%d: %s: init_timer failed\n", unit,
+ __func__);
+ err = 32;
+ goto fail;
+ }
+
+ /* depend on rateset, gmode */
+ wlc->cmi = brcms_c_channel_mgr_attach(wlc);
+ if (!wlc->cmi) {
+ wiphy_err(wl->wiphy, "wl%d: %s: channel_mgr_attach failed"
+ "\n", unit, __func__);
+ err = 33;
+ goto fail;
+ }
+
+ /* init default when all parameters are ready, i.e. ->rateset */
+ brcms_c_bss_default_init(wlc);
+
+ /*
+ * Complete the wlc default state initializations..
+ */
+
+ /* allocate our initial queue */
+ wlc->pkt_queue = brcms_c_txq_alloc(wlc);
+ if (wlc->pkt_queue == NULL) {
+ wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n",
+ unit, __func__);
+ err = 100;
+ goto fail;
+ }
+
+ wlc->bsscfg->wlc = wlc;
+
+ wlc->mimoft = FT_HT;
+ wlc->mimo_40txbw = AUTO;
+ wlc->ofdm_40txbw = AUTO;
+ wlc->cck_40txbw = AUTO;
+ brcms_c_update_mimo_band_bwcap(wlc, BRCMS_N_BW_20IN2G_40IN5G);
+
+ /* Set default values of SGI */
+ if (BRCMS_SGI_CAP_PHY(wlc)) {
+ brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
+ BRCMS_N_SGI_40));
+ } else if (BRCMS_ISSSLPNPHY(wlc->band)) {
+ brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
+ BRCMS_N_SGI_40));
+ } else {
+ brcms_c_ht_update_sgi_rx(wlc, 0);
+ }
+
+ /* initialize radio_mpc_disable according to wlc->mpc */
+ brcms_c_radio_mpc_upd(wlc);
+ brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
+
+ if (perr)
+ *perr = 0;
+
+ return wlc;
+
+ fail:
+ wiphy_err(wl->wiphy, "wl%d: %s: failed with err %d\n",
+ unit, __func__, err);
+ if (wlc)
+ brcms_c_detach(wlc);
+
+ if (perr)
+ *perr = err;
+ return NULL;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
new file mode 100644
index 00000000000..c0e0fcfdfaf
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -0,0 +1,735 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_MAIN_H_
+#define _BRCM_MAIN_H_
+
+#include <linux/etherdevice.h>
+
+#include <brcmu_utils.h>
+#include "types.h"
+#include "d11.h"
+#include "scb.h"
+
+#define INVCHANNEL 255 /* invalid channel */
+
+/* max # brcms_c_module_register() calls */
+#define BRCMS_MAXMODULES 22
+
+#define SEQNUM_SHIFT 4
+#define SEQNUM_MAX 0x1000
+
+#define NTXRATE 64 /* # tx MPDUs rate is reported for */
+
+/* Maximum wait time for a MAC suspend */
+/* uS: 83mS is max packet time (64KB ampdu @ 6Mbps) */
+#define BRCMS_MAX_MAC_SUSPEND 83000
+
+/* responses for probe requests older that this are tossed, zero to disable */
+#define BRCMS_PRB_RESP_TIMEOUT 0 /* Disable probe response timeout */
+
+/* transmit buffer max headroom for protocol headers */
+#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
+
+#define AC_COUNT 4
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ * #define <NAME>_M BITFIELD_MASK(3)
+ * #define <NAME>_S 4
+ * ...
+ * regval = R_REG(osh, &regs->regfoo);
+ * field = GFIELD(regval, <NAME>);
+ * regval = SFIELD(regval, <NAME>, 1);
+ * W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
+
+#define SW_TIMER_MAC_STAT_UPD 30 /* periodic MAC stats update */
+
+/* max # supported core revisions (0 .. MAXCOREREV - 1) */
+#define MAXCOREREV 28
+
+/* Double check that unsupported cores are not enabled */
+#if CONF_MSK(D11CONF, 0x4f) || CONF_GE(D11CONF, MAXCOREREV)
+#error "Configuration for D11CONF includes unsupported versions."
+#endif /* Bad versions */
+
+/* values for shortslot_override */
+#define BRCMS_SHORTSLOT_AUTO -1 /* Driver will manage Shortslot setting */
+#define BRCMS_SHORTSLOT_OFF 0 /* Turn off short slot */
+#define BRCMS_SHORTSLOT_ON 1 /* Turn on short slot */
+
+/* value for short/long and mixmode/greenfield preamble */
+#define BRCMS_LONG_PREAMBLE (0)
+#define BRCMS_SHORT_PREAMBLE (1 << 0)
+#define BRCMS_GF_PREAMBLE (1 << 1)
+#define BRCMS_MM_PREAMBLE (1 << 2)
+#define BRCMS_IS_MIMO_PREAMBLE(_pre) (((_pre) == BRCMS_GF_PREAMBLE) || \
+ ((_pre) == BRCMS_MM_PREAMBLE))
+
+/* TxFrameID */
+/* seq and frag bits: SEQNUM_SHIFT, FRAGNUM_MASK (802.11.h) */
+/* rate epoch bits: TXFID_RATE_SHIFT, TXFID_RATE_MASK ((wlc_rate.c) */
+#define TXFID_QUEUE_MASK 0x0007 /* Bits 0-2 */
+#define TXFID_SEQ_MASK 0x7FE0 /* Bits 5-15 */
+#define TXFID_SEQ_SHIFT 5 /* Number of bit shifts */
+#define TXFID_RATE_PROBE_MASK 0x8000 /* Bit 15 for rate probe */
+#define TXFID_RATE_MASK 0x0018 /* Mask for bits 3 and 4 */
+#define TXFID_RATE_SHIFT 3 /* Shift 3 bits for rate mask */
+
+/* promote boardrev */
+#define BOARDREV_PROMOTABLE 0xFF /* from */
+#define BOARDREV_PROMOTED 1 /* to */
+
+#define DATA_BLOCK_TX_SUPR (1 << 4)
+
+/* 802.1D Priority to TX FIFO number for wme */
+extern const u8 prio2fifo[];
+
+/* Ucode MCTL_WAKE override bits */
+#define BRCMS_WAKE_OVERRIDE_CLKCTL 0x01
+#define BRCMS_WAKE_OVERRIDE_PHYREG 0x02
+#define BRCMS_WAKE_OVERRIDE_MACSUSPEND 0x04
+#define BRCMS_WAKE_OVERRIDE_TXFIFO 0x08
+#define BRCMS_WAKE_OVERRIDE_FORCEFAST 0x10
+
+/* stuff pulled in from wlc.c */
+
+/* Interrupt bit error summary. Don't include I_RU: we refill DMA at other
+ * times; and if we run out, constant I_RU interrupts may cause lockup. We
+ * will still get error counts from rx0ovfl.
+ */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RO | I_XU)
+/* default software intmasks */
+#define DEF_RXINTMASK (I_RI) /* enable rx int on rxfifo only */
+#define DEF_MACINTMASK (MI_TXSTOP | MI_TBTT | MI_ATIMWINEND | MI_PMQ | \
+ MI_PHYTXERR | MI_DMAINT | MI_TFS | MI_BG_NOISE | \
+ MI_CCA | MI_TO | MI_GP0 | MI_RFDISABLE | MI_PWRUP)
+
+#define MAXTXPKTS 6 /* max # pkts pending */
+
+/* frameburst */
+#define MAXTXFRAMEBURST 8 /* vanilla xpress mode: max frames/burst */
+#define MAXFRAMEBURST_TXOP 10000 /* Frameburst TXOP in usec */
+
+#define NFIFO 6 /* # tx/rx fifopairs */
+
+/* PLL requests */
+
+/* pll is shared on old chips */
+#define BRCMS_PLLREQ_SHARED 0x1
+/* hold pll for radio monitor register checking */
+#define BRCMS_PLLREQ_RADIO_MON 0x2
+/* hold/release pll for some short operation */
+#define BRCMS_PLLREQ_FLIP 0x4
+
+#define CHANNEL_BANDUNIT(wlc, ch) \
+ (((ch) <= CH_MAX_2G_CHANNEL) ? BAND_2G_INDEX : BAND_5G_INDEX)
+
+#define OTHERBANDUNIT(wlc) \
+ ((uint)((wlc)->band->bandunit ? BAND_2G_INDEX : BAND_5G_INDEX))
+
+/*
+ * 802.11 protection information
+ *
+ * _g: use g spec protection, driver internal.
+ * g_override: override for use of g spec protection.
+ * gmode_user: user config gmode, operating band->gmode is different.
+ * overlap: Overlap BSS/IBSS protection for both 11g and 11n.
+ * nmode_user: user config nmode, operating pub->nmode is different.
+ * n_cfg: use OFDM protection on MIMO frames.
+ * n_cfg_override: override for use of N protection.
+ * nongf: non-GF present protection.
+ * nongf_override: override for use of GF protection.
+ * n_pam_override: override for preamble: MM or GF.
+ * n_obss: indicated OBSS Non-HT STA present.
+*/
+struct brcms_protection {
+ bool _g;
+ s8 g_override;
+ u8 gmode_user;
+ s8 overlap;
+ s8 nmode_user;
+ s8 n_cfg;
+ s8 n_cfg_override;
+ bool nongf;
+ s8 nongf_override;
+ s8 n_pam_override;
+ bool n_obss;
+};
+
+/*
+ * anything affecting the single/dual streams/antenna operation
+ *
+ * hw_txchain: HW txchain bitmap cfg.
+ * txchain: txchain bitmap being used.
+ * txstreams: number of txchains being used.
+ * hw_rxchain: HW rxchain bitmap cfg.
+ * rxchain: rxchain bitmap being used.
+ * rxstreams: number of rxchains being used.
+ * ant_rx_ovr: rx antenna override.
+ * txant: userTx antenna setting.
+ * phytxant: phyTx antenna setting in txheader.
+ * ss_opmode: singlestream Operational mode, 0:siso; 1:cdd.
+ * ss_algosel_auto: if true, use wlc->stf->ss_algo_channel;
+ * else use wlc->band->stf->ss_mode_band.
+ * ss_algo_channel: ss based on per-channel algo: 0: SISO, 1: CDD 2: STBC.
+ * rxchain_restore_delay: delay time to restore default rxchain.
+ * ldpc: AUTO/ON/OFF ldpc cap supported.
+ * txcore[MAX_STREAMS_SUPPORTED + 1]: bitmap of selected core for each Nsts.
+ * spatial_policy:
+ */
+struct brcms_stf {
+ u8 hw_txchain;
+ u8 txchain;
+ u8 txstreams;
+ u8 hw_rxchain;
+ u8 rxchain;
+ u8 rxstreams;
+ u8 ant_rx_ovr;
+ s8 txant;
+ u16 phytxant;
+ u8 ss_opmode;
+ bool ss_algosel_auto;
+ u16 ss_algo_channel;
+ u8 rxchain_restore_delay;
+ s8 ldpc;
+ u8 txcore[MAX_STREAMS_SUPPORTED + 1];
+ s8 spatial_policy;
+};
+
+#define BRCMS_STF_SS_STBC_TX(wlc, scb) \
+ (((wlc)->stf->txstreams > 1) && (((wlc)->band->band_stf_stbc_tx == ON) \
+ || (((scb)->flags & SCB_STBCCAP) && \
+ (wlc)->band->band_stf_stbc_tx == AUTO && \
+ isset(&((wlc)->stf->ss_algo_channel), PHY_TXC1_MODE_STBC))))
+
+#define BRCMS_STBC_CAP_PHY(wlc) (BRCMS_ISNPHY(wlc->band) && \
+ NREV_GE(wlc->band->phyrev, 3))
+
+#define BRCMS_SGI_CAP_PHY(wlc) ((BRCMS_ISNPHY(wlc->band) && \
+ NREV_GE(wlc->band->phyrev, 3)) || \
+ BRCMS_ISLCNPHY(wlc->band))
+
+#define BRCMS_CHAN_PHYTYPE(x) (((x) & RXS_CHAN_PHYTYPE_MASK) \
+ >> RXS_CHAN_PHYTYPE_SHIFT)
+#define BRCMS_CHAN_CHANNEL(x) (((x) & RXS_CHAN_ID_MASK) \
+ >> RXS_CHAN_ID_SHIFT)
+
+/*
+ * core state (mac)
+ */
+struct brcms_core {
+ uint coreidx; /* # sb enumerated core */
+
+ /* fifo */
+ uint *txavail[NFIFO]; /* # tx descriptors available */
+ s16 txpktpend[NFIFO]; /* tx admission control */
+
+ struct macstat *macstat_snapshot; /* mac hw prev read values */
+};
+
+/*
+ * band state (phy+ana+radio)
+ */
+struct brcms_band {
+ int bandtype; /* BRCM_BAND_2G, BRCM_BAND_5G */
+ uint bandunit; /* bandstate[] index */
+
+ u16 phytype; /* phytype */
+ u16 phyrev;
+ u16 radioid;
+ u16 radiorev;
+ struct brcms_phy_pub *pi; /* pointer to phy specific information */
+ bool abgphy_encore;
+
+ u8 gmode; /* currently active gmode */
+
+ struct scb *hwrs_scb; /* permanent scb for hw rateset */
+
+ /* band-specific copy of default_bss.rateset */
+ struct brcms_c_rateset defrateset;
+
+ u8 band_stf_ss_mode; /* Configured STF type, 0:siso; 1:cdd */
+ s8 band_stf_stbc_tx; /* STBC TX 0:off; 1:force on; -1:auto */
+ /* rates supported by chip (phy-specific) */
+ struct brcms_c_rateset hw_rateset;
+ u8 basic_rate[BRCM_MAXRATE + 1]; /* basic rates indexed by rate */
+ bool mimo_cap_40; /* 40 MHz cap enabled on this band */
+ s8 antgain; /* antenna gain from srom */
+
+ u16 CWmin; /* minimum size of contention window, in unit of aSlotTime */
+ u16 CWmax; /* maximum size of contention window, in unit of aSlotTime */
+ struct ieee80211_supported_band band;
+};
+
+/* module control blocks */
+struct modulecb {
+ /* module name : NULL indicates empty array member */
+ char name[32];
+ /* handle passed when handler 'doiovar' is called */
+ struct brcms_info *hdl;
+
+ int (*down_fn)(void *handle); /* down handler. Note: the int returned
+ * by the down function is a count of the
+ * number of timers that could not be
+ * freed.
+ */
+
+};
+
+struct brcms_hw_band {
+ int bandtype; /* BRCM_BAND_2G, BRCM_BAND_5G */
+ uint bandunit; /* bandstate[] index */
+ u16 mhfs[MHFMAX]; /* MHF array shadow */
+ u8 bandhw_stf_ss_mode; /* HW configured STF type, 0:siso; 1:cdd */
+ u16 CWmin;
+ u16 CWmax;
+ u32 core_flags;
+
+ u16 phytype; /* phytype */
+ u16 phyrev;
+ u16 radioid;
+ u16 radiorev;
+ struct brcms_phy_pub *pi; /* pointer to phy specific information */
+ bool abgphy_encore;
+};
+
+struct brcms_hardware {
+ bool _piomode; /* true if pio mode */
+ struct brcms_c_info *wlc;
+
+ /* fifo */
+ struct dma_pub *di[NFIFO]; /* dma handles, per fifo */
+
+ uint unit; /* device instance number */
+
+ /* version info */
+ u16 vendorid; /* PCI vendor id */
+ u16 deviceid; /* PCI device id */
+ uint corerev; /* core revision */
+ u8 sromrev; /* version # of the srom */
+ u16 boardrev; /* version # of particular board */
+ u32 boardflags; /* Board specific flags from srom */
+ u32 boardflags2; /* More board flags if sromrev >= 4 */
+ u32 machwcap; /* MAC capabilities */
+ u32 machwcap_backup; /* backup of machwcap */
+
+ struct si_pub *sih; /* SI handle (cookie for siutils calls) */
+ struct d11regs __iomem *regs; /* pointer to device registers */
+ struct phy_shim_info *physhim; /* phy shim layer handler */
+ struct shared_phy *phy_sh; /* pointer to shared phy state */
+ struct brcms_hw_band *band;/* pointer to active per-band state */
+ /* band state per phy/radio */
+ struct brcms_hw_band *bandstate[MAXBANDS];
+ u16 bmac_phytxant; /* cache of high phytxant state */
+ bool shortslot; /* currently using 11g ShortSlot timing */
+ u16 SRL; /* 802.11 dot11ShortRetryLimit */
+ u16 LRL; /* 802.11 dot11LongRetryLimit */
+ u16 SFBL; /* Short Frame Rate Fallback Limit */
+ u16 LFBL; /* Long Frame Rate Fallback Limit */
+
+ bool up; /* d11 hardware up and running */
+ uint now; /* # elapsed seconds */
+ uint _nbands; /* # bands supported */
+ u16 chanspec; /* bmac chanspec shadow */
+
+ uint *txavail[NFIFO]; /* # tx descriptors available */
+ const u16 *xmtfifo_sz; /* fifo size in 256B for each xmt fifo */
+
+ u32 pllreq; /* pll requests to keep PLL on */
+
+ u8 suspended_fifos; /* Which TX fifo to remain awake for */
+ u32 maccontrol; /* Cached value of maccontrol */
+ uint mac_suspend_depth; /* current depth of mac_suspend levels */
+ u32 wake_override; /* bit flags to force MAC to WAKE mode */
+ u32 mute_override; /* Prevent ucode from sending beacons */
+ u8 etheraddr[ETH_ALEN]; /* currently configured ethernet address */
+ bool noreset; /* true= do not reset hw, used by WLC_OUT */
+ bool forcefastclk; /* true if h/w is forcing to use fast clk */
+ bool clk; /* core is out of reset and has clock */
+ bool sbclk; /* sb has clock */
+ bool phyclk; /* phy is out of reset and has clock */
+
+ bool ucode_loaded; /* true after ucode downloaded */
+
+
+ u8 hw_stf_ss_opmode; /* STF single stream operation mode */
+ u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
+ * 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
+ */
+ u32 antsel_avail; /*
+ * put struct antsel_info here if more info is
+ * needed
+ */
+};
+
+/* TX Queue information
+ *
+ * Each flow of traffic out of the device has a TX Queue with independent
+ * flow control. Several interfaces may be associated with a single TX Queue
+ * if they belong to the same flow of traffic from the device. For multi-channel
+ * operation there are independent TX Queues for each channel.
+ */
+struct brcms_txq_info {
+ struct brcms_txq_info *next;
+ struct pktq q;
+ uint stopped; /* tx flow control bits */
+};
+
+/*
+ * Principal common driver data structure.
+ *
+ * pub: pointer to driver public state.
+ * wl: pointer to specific private state.
+ * regs: pointer to device registers.
+ * hw: HW related state.
+ * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
+ * fastpwrup_dly: time in us needed to bring up d11 fast clock.
+ * macintstatus: bit channel between isr and dpc.
+ * macintmask: sw runtime master macintmask value.
+ * defmacintmask: default "on" macintmask value.
+ * clk: core is out of reset and has clock.
+ * core: pointer to active io core.
+ * band: pointer to active per-band state.
+ * corestate: per-core state (one per hw core).
+ * bandstate: per-band state (one per phy/radio).
+ * qvalid: DirFrmQValid and BcMcFrmQValid.
+ * ampdu: ampdu module handler.
+ * asi: antsel module handler.
+ * cmi: channel manager module handler.
+ * vendorid: PCI vendor id.
+ * deviceid: PCI device id.
+ * ucode_rev: microcode revision.
+ * machwcap: MAC capabilities, BMAC shadow.
+ * perm_etheraddr: original sprom local ethernet address.
+ * bandlocked: disable auto multi-band switching.
+ * bandinit_pending: track band init in auto band.
+ * radio_monitor: radio timer is running.
+ * going_down: down path intermediate variable.
+ * mpc: enable minimum power consumption.
+ * mpc_dlycnt: # of watchdog cnt before turn disable radio.
+ * mpc_offcnt: # of watchdog cnt that radio is disabled.
+ * mpc_delay_off: delay radio disable by # of watchdog cnt.
+ * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc.
+ * wdtimer: timer for watchdog routine.
+ * radio_timer: timer for hw radio button monitor routine.
+ * monitor: monitor (MPDU sniffing) mode.
+ * bcnmisc_monitor: bcns promisc mode override for monitor.
+ * _rifs: enable per-packet rifs.
+ * bcn_li_bcn: beacon listen interval in # beacons.
+ * bcn_li_dtim: beacon listen interval in # dtims.
+ * WDarmed: watchdog timer is armed.
+ * WDlast: last time wlc_watchdog() was called.
+ * edcf_txop[AC_COUNT]: current txop for each ac.
+ * wme_retries: per-AC retry limits.
+ * tx_prec_map: Precedence map based on HW FIFO space.
+ * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
+ * bsscfg: set of BSS configurations, idx 0 is default and always valid.
+ * cfg: the primary bsscfg (can be AP or STA).
+ * tx_queues: common TX Queue list.
+ * modulecb:
+ * mimoft: SIGN or 11N.
+ * cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode.
+ * ofdm_40txbw: 11N, ofdm tx b/w override when in 40MHZ mode.
+ * mimo_40txbw: 11N, mimo tx b/w override when in 40MHZ mode.
+ * default_bss: configured BSS parameters.
+ * mc_fid_counter: BC/MC FIFO frame ID counter.
+ * country_default: saved country for leaving 802.11d auto-country mode.
+ * autocountry_default: initial country for 802.11d auto-country mode.
+ * prb_resp_timeout: do not send prb resp if request older
+ * than this, 0 = disable.
+ * home_chanspec: shared home chanspec.
+ * chanspec: target operational channel.
+ * usr_fragthresh: user configured fragmentation threshold.
+ * fragthresh[NFIFO]: per-fifo fragmentation thresholds.
+ * RTSThresh: 802.11 dot11RTSThreshold.
+ * SRL: 802.11 dot11ShortRetryLimit.
+ * LRL: 802.11 dot11LongRetryLimit.
+ * SFBL: Short Frame Rate Fallback Limit.
+ * LFBL: Long Frame Rate Fallback Limit.
+ * shortslot: currently using 11g ShortSlot timing.
+ * shortslot_override: 11g ShortSlot override.
+ * include_legacy_erp: include Legacy ERP info elt ID 47 as well as g ID 42.
+ * PLCPHdr_override: 802.11b Preamble Type override.
+ * stf:
+ * bcn_rspec: save bcn ratespec purpose.
+ * tempsense_lasttime;
+ * tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM.
+ * tx_duty_cycle_cck: maximum allowed duty cycle for CCK.
+ * pkt_queue: txq for transmit packets.
+ * wiphy:
+ * pri_scb: primary Station Control Block
+ */
+struct brcms_c_info {
+ struct brcms_pub *pub;
+ struct brcms_info *wl;
+ struct d11regs __iomem *regs;
+ struct brcms_hardware *hw;
+
+ /* clock */
+ u16 fastpwrup_dly;
+
+ /* interrupt */
+ u32 macintstatus;
+ u32 macintmask;
+ u32 defmacintmask;
+
+ bool clk;
+
+ /* multiband */
+ struct brcms_core *core;
+ struct brcms_band *band;
+ struct brcms_core *corestate;
+ struct brcms_band *bandstate[MAXBANDS];
+
+ /* packet queue */
+ uint qvalid;
+
+ struct ampdu_info *ampdu;
+ struct antsel_info *asi;
+ struct brcms_cm_info *cmi;
+
+ u16 vendorid;
+ u16 deviceid;
+ uint ucode_rev;
+
+ u8 perm_etheraddr[ETH_ALEN];
+
+ bool bandlocked;
+ bool bandinit_pending;
+
+ bool radio_monitor;
+ bool going_down;
+
+ bool mpc;
+ u8 mpc_dlycnt;
+ u8 mpc_offcnt;
+ u8 mpc_delay_off;
+ u8 prev_non_delay_mpc;
+
+ struct brcms_timer *wdtimer;
+ struct brcms_timer *radio_timer;
+
+ /* promiscuous */
+ bool monitor;
+ bool bcnmisc_monitor;
+
+ /* driver feature */
+ bool _rifs;
+
+ /* AP-STA synchronization, power save */
+ u8 bcn_li_bcn;
+ u8 bcn_li_dtim;
+
+ bool WDarmed;
+ u32 WDlast;
+
+ /* WME */
+ u16 edcf_txop[AC_COUNT];
+
+ u16 wme_retries[AC_COUNT];
+ u16 tx_prec_map;
+ u16 fifo2prec_map[NFIFO];
+
+ struct brcms_bss_cfg *bsscfg;
+
+ /* tx queue */
+ struct brcms_txq_info *tx_queues;
+
+ struct modulecb *modulecb;
+
+ u8 mimoft;
+ s8 cck_40txbw;
+ s8 ofdm_40txbw;
+ s8 mimo_40txbw;
+
+ struct brcms_bss_info *default_bss;
+
+ u16 mc_fid_counter;
+
+ char country_default[BRCM_CNTRY_BUF_SZ];
+ char autocountry_default[BRCM_CNTRY_BUF_SZ];
+ u16 prb_resp_timeout;
+
+ u16 home_chanspec;
+
+ /* PHY parameters */
+ u16 chanspec;
+ u16 usr_fragthresh;
+ u16 fragthresh[NFIFO];
+ u16 RTSThresh;
+ u16 SRL;
+ u16 LRL;
+ u16 SFBL;
+ u16 LFBL;
+
+ /* network config */
+ bool shortslot;
+ s8 shortslot_override;
+ bool include_legacy_erp;
+
+ struct brcms_protection *protection;
+ s8 PLCPHdr_override;
+
+ struct brcms_stf *stf;
+
+ u32 bcn_rspec;
+
+ uint tempsense_lasttime;
+
+ u16 tx_duty_cycle_ofdm;
+ u16 tx_duty_cycle_cck;
+
+ struct brcms_txq_info *pkt_queue;
+ struct wiphy *wiphy;
+ struct scb pri_scb;
+};
+
+/* antsel module specific state */
+struct antsel_info {
+ struct brcms_c_info *wlc; /* pointer to main wlc structure */
+ struct brcms_pub *pub; /* pointer to public fn */
+ u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
+ * 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
+ */
+ u8 antsel_antswitch; /* board level antenna switch type */
+ bool antsel_avail; /* Ant selection availability (SROM based) */
+ struct brcms_antselcfg antcfg_11n; /* antenna configuration */
+ struct brcms_antselcfg antcfg_cur; /* current antenna config (auto) */
+};
+
+/*
+ * BSS configuration state
+ *
+ * wlc: wlc to which this bsscfg belongs to.
+ * up: is this configuration up operational
+ * enable: is this configuration enabled
+ * associated: is BSS in ASSOCIATED state
+ * BSS: infraustructure or adhoc
+ * SSID_len: the length of SSID
+ * SSID: SSID string
+ *
+ *
+ * BSSID: BSSID (associated)
+ * cur_etheraddr: h/w address
+ * flags: BSSCFG flags; see below
+ *
+ * current_bss: BSS parms in ASSOCIATED state
+ *
+ *
+ * ID: 'unique' ID of this bsscfg, assigned at bsscfg allocation
+ */
+struct brcms_bss_cfg {
+ struct brcms_c_info *wlc;
+ bool up;
+ bool enable;
+ bool associated;
+ bool BSS;
+ u8 SSID_len;
+ u8 SSID[IEEE80211_MAX_SSID_LEN];
+ u8 BSSID[ETH_ALEN];
+ u8 cur_etheraddr[ETH_ALEN];
+ struct brcms_bss_info *current_bss;
+};
+
+extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
+ struct sk_buff *p,
+ bool commit, s8 txpktpend);
+extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo,
+ s8 txpktpend);
+extern void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
+ struct sk_buff *sdu, uint prec);
+extern void brcms_c_print_txstatus(struct tx_status *txs);
+extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks);
+
+#if defined(BCMDBG)
+extern void brcms_c_print_txdesc(struct d11txh *txh);
+#else
+#define brcms_c_print_txdesc(a)
+#endif
+
+extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
+ bool promisc);
+extern void brcms_c_send_q(struct brcms_c_info *wlc);
+extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
+ uint *fifo);
+extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
+ uint mac_len);
+extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
+ u32 rspec,
+ bool use_rspec, u16 mimo_ctlchbw);
+extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ u32 rts_rate,
+ u32 frame_rate,
+ u8 rts_preamble_type,
+ u8 frame_preamble_type, uint frame_len,
+ bool ba);
+extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+ struct ieee80211_sta *sta,
+ void (*dma_callback_fn));
+extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
+extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
+extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
+ u32 bcn_rate);
+extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
+ u8 antsel_type);
+extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
+ u16 chanspec,
+ bool mute, struct txpwr_limits *txpwr);
+extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
+ u16 v);
+extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
+ u16 val, int bands);
+extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
+ int offset, int len, void *buf);
+extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
+ uint offset, const void *buf, int len,
+ u32 sel);
+extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ void *buf, int len, u32 sel);
+extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
+ u8 stf_mode);
+extern void brcms_c_init_scb(struct scb *scb);
+
+#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 3d71c590fce..0bcb2679204 100644
--- a/drivers/staging/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -127,6 +127,18 @@
/* PCIE protocol TLP diagnostic registers */
#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
+/* Sonics to PCI translation types */
+#define SBTOPCI_PREF 0x4 /* prefetch enable */
+#define SBTOPCI_BURST 0x8 /* burst enable */
+#define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
+
+#define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
+
+/* PCI core index in SROM shadow area */
+#define SRSH_PI_OFFSET 0 /* first word */
+#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
+#define SRSH_PI_SHIFT 12 /* bit 15:12 */
+
/* Sonics side: PCI core and host control registers */
struct sbpciregs {
u32 control; /* PCI control */
@@ -194,8 +206,8 @@ struct sbpcieregs {
struct pcicore_info {
union {
- struct sbpcieregs *pcieregs;
- struct sbpciregs *pciregs;
+ struct sbpcieregs __iomem *pcieregs;
+ struct sbpciregs __iomem *pciregs;
} regs; /* Memory mapped register to the core */
struct si_pub *sih; /* System interconnect handle */
@@ -211,52 +223,30 @@ struct pcicore_info {
bool pmecap; /* Capable of generating PME */
};
-/* debug/trace */
-#define PCI_ERROR(args)
-#define PCIE_PUB(sih) \
- (((sih)->bustype == PCI_BUS) && \
- ((sih)->buscoretype == PCIE_CORE_ID))
-
-/* routines to access mdio slave device registers */
-static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk);
-static int pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr,
- bool write, uint *val);
-static int pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint readdr,
- uint val);
-static int pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint readdr,
- uint *ret_val);
-
-static void pcie_extendL1timer(struct pcicore_info *pi, bool extend);
-static void pcie_clkreq_upd(struct pcicore_info *pi, uint state);
-
-static void pcie_war_aspm_clkreq(struct pcicore_info *pi);
-static void pcie_war_serdes(struct pcicore_info *pi);
-static void pcie_war_noplldown(struct pcicore_info *pi);
-static void pcie_war_polarity(struct pcicore_info *pi);
-static void pcie_war_pci_setup(struct pcicore_info *pi);
-
#define PCIE_ASPM(sih) \
- ((PCIE_PUB(sih)) && \
+ (((sih)->buscoretype == PCIE_CORE_ID) && \
(((sih)->buscorerev >= 3) && \
((sih)->buscorerev <= 5)))
/* delay needed between the mdio control/ mdiodata register data access */
-#define PR28829_DELAY() udelay(10)
+static void pr28829_delay(void)
+{
+ udelay(10);
+}
/* Initialize the PCI core.
* It's caller's responsibility to make sure that this is done only once
*/
-void *pcicore_init(struct si_pub *sih, void *pdev, void *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
+ void __iomem *regs)
{
struct pcicore_info *pi;
/* alloc struct pcicore_info */
pi = kzalloc(sizeof(struct pcicore_info), GFP_ATOMIC);
- if (pi == NULL) {
- PCI_ERROR(("pci_attach: malloc failed!\n"));
+ if (pi == NULL)
return NULL;
- }
pi->sih = sih;
pi->dev = pdev;
@@ -273,7 +263,7 @@ void *pcicore_init(struct si_pub *sih, void *pdev, void *regs)
return pi;
}
-void pcicore_deinit(void *pch)
+void pcicore_deinit(struct pcicore_info *pch)
{
kfree(pch);
}
@@ -281,7 +271,7 @@ void pcicore_deinit(void *pch)
/* return cap_offset if requested capability exists in the PCI config space */
/* Note that it's caller's responsibility to make sure it's a pci bus */
u8
-pcicore_find_pci_capability(void *dev, u8 req_cap_id,
+pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen)
{
u8 cap_id;
@@ -344,7 +334,7 @@ end:
/* ***** Register Access API */
static uint
-pcie_readreg(struct sbpcieregs *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
{
uint retval = 0xFFFFFFFF;
@@ -364,8 +354,8 @@ pcie_readreg(struct sbpcieregs *pcieregs, uint addrtype, uint offset)
return retval;
}
-static uint
-pcie_writereg(struct sbpcieregs *pcieregs, uint addrtype, uint offset, uint val)
+static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+ uint offset, uint val)
{
switch (addrtype) {
case PCIE_CONFIGREGS:
@@ -384,7 +374,7 @@ pcie_writereg(struct sbpcieregs *pcieregs, uint addrtype, uint offset, uint val)
static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
{
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata, i = 0;
uint pcie_serdes_spinwait = 200;
@@ -394,19 +384,18 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
(blk << 4));
W_REG(&pcieregs->mdiodata, mdiodata);
- PR28829_DELAY();
+ pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
break;
+
udelay(1000);
i++;
}
- if (i >= pcie_serdes_spinwait) {
- PCI_ERROR(("pcie_mdiosetblock: timed out\n"));
+ if (i >= pcie_serdes_spinwait)
return false;
- }
return true;
}
@@ -415,7 +404,7 @@ static int
pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
uint *val)
{
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata;
uint i = 0;
uint pcie_serdes_spinwait = 10;
@@ -445,13 +434,13 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
W_REG(&pcieregs->mdiodata, mdiodata);
- PR28829_DELAY();
+ pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
if (!write) {
- PR28829_DELAY();
+ pr28829_delay();
*val = (R_REG(&pcieregs->mdiodata) &
MDIODATA_MASK);
}
@@ -463,8 +452,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
i++;
}
- PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write));
- /* Disable mdio access to SERDES */
+ /* Timed out. Disable mdio access to SERDES. */
W_REG(&pcieregs->mdiocontrol, 0);
return 1;
}
@@ -485,9 +473,8 @@ pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint regaddr, uint val)
}
/* ***** Support functions ***** */
-static u8 pcie_clkreq(void *pch, u32 mask, u32 val)
+static u8 pcie_clkreq(struct pcicore_info *pi, u32 mask, u32 val)
{
- struct pcicore_info *pi = pch;
u32 reg_val;
u8 offset;
@@ -515,9 +502,9 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
{
u32 w;
struct si_pub *sih = pi->sih;
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- if (!PCIE_PUB(sih) || sih->buscorerev < 7)
+ if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
return;
w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
@@ -537,30 +524,30 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
switch (state) {
case SI_DOATTACH:
if (PCIE_ASPM(sih))
- pcie_clkreq((void *)pi, 1, 0);
+ pcie_clkreq(pi, 1, 0);
break;
case SI_PCIDOWN:
if (sih->buscorerev == 6) { /* turn on serdes PLL down */
ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_addr),
+ offsetof(struct chipcregs, chipcontrol_addr),
~0, 0);
ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data),
+ offsetof(struct chipcregs, chipcontrol_data),
~0x40, 0);
} else if (pi->pcie_pr42767) {
- pcie_clkreq((void *)pi, 1, 1);
+ pcie_clkreq(pi, 1, 1);
}
break;
case SI_PCIUP:
if (sih->buscorerev == 6) { /* turn off serdes PLL down */
ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_addr),
+ offsetof(struct chipcregs, chipcontrol_addr),
~0, 0);
ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data),
+ offsetof(struct chipcregs, chipcontrol_data),
~0x40, 0x40);
} else if (PCIE_ASPM(sih)) { /* disable clkreq */
- pcie_clkreq((void *)pi, 1, 0);
+ pcie_clkreq(pi, 1, 0);
}
break;
}
@@ -594,9 +581,10 @@ static void pcie_war_polarity(struct pcicore_info *pi)
*/
static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
{
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
struct si_pub *sih = pi->sih;
- u16 val16, *reg16;
+ u16 val16;
+ u16 __iomem *reg16;
u32 w;
if (!PCIE_ASPM(sih))
@@ -654,8 +642,9 @@ static void pcie_war_serdes(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_misc_config_fixup(struct pcicore_info *pi)
{
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
- u16 val16, *reg16;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
+ u16 val16;
+ u16 __iomem *reg16;
reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
val16 = R_REG(reg16);
@@ -670,11 +659,11 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_noplldown(struct pcicore_info *pi)
{
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
- u16 *reg16;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
+ u16 __iomem *reg16;
/* turn off serdes PLL down */
- ai_corereg(pi->sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol),
+ ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
/* clear srom shadow backdoor */
@@ -686,7 +675,7 @@ static void pcie_war_noplldown(struct pcicore_info *pi)
static void pcie_war_pci_setup(struct pcicore_info *pi)
{
struct si_pub *sih = pi->sih;
- struct sbpcieregs *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u32 w;
if (sih->buscorerev == 0 || sih->buscorerev == 1) {
@@ -730,14 +719,14 @@ static void pcie_war_pci_setup(struct pcicore_info *pi)
}
/* ***** Functions called during driver state changes ***** */
-void pcicore_attach(void *pch, char *pvars, int state)
+void pcicore_attach(struct pcicore_info *pi, int state)
{
- struct pcicore_info *pi = pch;
struct si_pub *sih = pi->sih;
+ u32 bfl2 = (u32)getintvar(sih, BRCMS_SROM_BOARDFLAGS2);
/* Determine if this board needs override */
if (PCIE_ASPM(sih)) {
- if ((u32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR)
+ if (bfl2 & BFL2_PCIEWAR_OVR)
pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
else
pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
@@ -754,21 +743,17 @@ void pcicore_attach(void *pch, char *pvars, int state)
}
-void pcicore_hwup(void *pch)
+void pcicore_hwup(struct pcicore_info *pi)
{
- struct pcicore_info *pi = pch;
-
- if (!pi || !PCIE_PUB(pi->sih))
+ if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
return;
pcie_war_pci_setup(pi);
}
-void pcicore_up(void *pch, int state)
+void pcicore_up(struct pcicore_info *pi, int state)
{
- struct pcicore_info *pi = pch;
-
- if (!pi || !PCIE_PUB(pi->sih))
+ if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
return;
/* Restore L1 timer for better performance */
@@ -780,9 +765,8 @@ void pcicore_up(void *pch, int state)
/* When the device is going to enter D3 state
* (or the system is going to enter S3/S4 states)
*/
-void pcicore_sleep(void *pch)
+void pcicore_sleep(struct pcicore_info *pi)
{
- struct pcicore_info *pi = pch;
u32 w;
if (!pi || !PCIE_ASPM(pi->sih))
@@ -795,11 +779,9 @@ void pcicore_sleep(void *pch)
pi->pcie_pr42767 = false;
}
-void pcicore_down(void *pch, int state)
+void pcicore_down(struct pcicore_info *pi, int state)
{
- struct pcicore_info *pi = pch;
-
- if (!pi || !PCIE_PUB(pi->sih))
+ if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
return;
pcie_clkreq_upd(pi, state);
@@ -809,20 +791,12 @@ void pcicore_down(void *pch, int state)
}
/* precondition: current core is sii->buscoretype */
-void pcicore_fixcfg(void *pch, void *regs)
+static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
{
- struct pcicore_info *pi = pch;
- struct si_info *sii = SI_INFO(pi->sih);
- struct sbpciregs *pciregs = regs;
- struct sbpcieregs *pcieregs = regs;
- u16 val16, *reg16 = NULL;
+ struct si_info *sii = (struct si_info *)(pi->sih);
+ u16 val16;
uint pciidx;
- /* check 'pi' is correct and fix it if not */
- if (sii->pub.buscoretype == PCIE_CORE_ID)
- reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
- else if (sii->pub.buscoretype == PCI_CORE_ID)
- reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
pciidx = ai_coreidx(&sii->pub);
val16 = R_REG(reg16);
if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
@@ -832,16 +806,27 @@ void pcicore_fixcfg(void *pch, void *regs)
}
}
+void
+pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+{
+ pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
+}
+
+void pcicore_fixcfg_pcie(struct pcicore_info *pi,
+ struct sbpcieregs __iomem *pcieregs)
+{
+ pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+}
+
/* precondition: current core is pci core */
-void pcicore_pci_setup(void *pch, void *regs)
+void
+pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
{
- struct pcicore_info *pi = pch;
- struct sbpciregs *pciregs = regs;
u32 w;
OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
- if (SI_INFO(pi->sih)->pub.buscorerev >= 11) {
+ if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
w = R_REG(&pciregs->clkrun);
W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
new file mode 100644
index 00000000000..58aa80dc332
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_NICPCI_H_
+#define _BRCM_NICPCI_H_
+
+#include "types.h"
+
+/* PCI configuration address space size */
+#define PCI_SZPCR 256
+
+/* Brcm PCI configuration registers */
+/* backplane address space accessed by BAR0 */
+#define PCI_BAR0_WIN 0x80
+/* sprom property control */
+#define PCI_SPROM_CONTROL 0x88
+/* mask of PCI and other cores interrupts */
+#define PCI_INT_MASK 0x94
+/* backplane core interrupt mask bits offset */
+#define PCI_SBIM_SHIFT 8
+/* backplane address space accessed by second 4KB of BAR0 */
+#define PCI_BAR0_WIN2 0xac
+/* pci config space gpio input (>=rev3) */
+#define PCI_GPIO_IN 0xb0
+/* pci config space gpio output (>=rev3) */
+#define PCI_GPIO_OUT 0xb4
+/* pci config space gpio output enable (>=rev3) */
+#define PCI_GPIO_OUTEN 0xb8
+
+/* bar0 + 4K accesses external sprom */
+#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
+/* bar0 + 6K accesses pci core registers */
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
+/*
+ * pci core SB registers are at the end of the
+ * 8KB window, so their address is the "regular"
+ * address plus 4K
+ */
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
+/* bar0 window size Match with corerev 13 */
+#define PCI_BAR0_WINSZ (16 * 1024)
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+/* bar0 + 8K accesses pci/pcie core registers */
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
+/* bar0 + 12K accesses chipc core registers */
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
+
+struct sbpciregs;
+struct sbpcieregs;
+
+extern struct pcicore_info *pcicore_init(struct si_pub *sih,
+ struct pci_dev *pdev,
+ void __iomem *regs);
+extern void pcicore_deinit(struct pcicore_info *pch);
+extern void pcicore_attach(struct pcicore_info *pch, int state);
+extern void pcicore_hwup(struct pcicore_info *pch);
+extern void pcicore_up(struct pcicore_info *pch, int state);
+extern void pcicore_sleep(struct pcicore_info *pch);
+extern void pcicore_down(struct pcicore_info *pch, int state);
+extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
+ unsigned char *buf, u32 *buflen);
+extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
+ struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
+ struct sbpcieregs __iomem *pciregs);
+extern void pcicore_pci_setup(struct pcicore_info *pch,
+ struct sbpciregs __iomem *pciregs);
+
+#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
new file mode 100644
index 00000000000..edf551561fd
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <brcm_hw_ids.h>
+#include <chipcommon.h>
+#include "aiutils.h"
+#include "otp.h"
+
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+/* h/w subregion is programmed */
+#define OTPS_GUP_HW 0x00000100
+/* s/w subregion is programmed */
+#define OTPS_GUP_SW 0x00000200
+/* chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_CI 0x00000400
+/* fuse subregion is programmed */
+#define OTPS_GUP_FUSE 0x00000800
+
+/* Fields in otpprog in rev >= 21 */
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+#define OTPP_READ 0x40000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
+
+#define OTPP_TRIES 10000000 /* # of tries for OTPP */
+
+#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
+
+/* Fixed size subregions sizes in words */
+#define OTPGU_CI_SZ 2
+
+struct otpinfo;
+
+/* OTP function struct */
+struct otp_fn_s {
+ int (*init)(struct si_pub *sih, struct otpinfo *oi);
+ int (*read_region)(struct otpinfo *oi, int region, u16 *data,
+ uint *wlen);
+};
+
+struct otpinfo {
+ uint ccrev; /* chipc revision */
+ const struct otp_fn_s *fn; /* OTP functions */
+ struct si_pub *sih; /* Saved sb handle */
+
+ /* IPX OTP section */
+ u16 wsize; /* Size of otp in words */
+ u16 rows; /* Geometry */
+ u16 cols; /* Geometry */
+ u32 status; /* Flag bits (lock/prog/rv).
+ * (Reflected only when OTP is power cycled)
+ */
+ u16 hwbase; /* hardware subregion offset */
+ u16 hwlim; /* hardware subregion boundary */
+ u16 swbase; /* software subregion offset */
+ u16 swlim; /* software subregion boundary */
+ u16 fbase; /* fuse subregion offset */
+ u16 flim; /* fuse subregion boundary */
+ int otpgu_base; /* offset to General Use Region */
+};
+
+/* OTP layout */
+/* CC revs 21, 24 and 27 OTP General Use Region word offset */
+#define REVA4_OTPGU_BASE 12
+
+/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
+#define REVB8_OTPGU_BASE 20
+
+/* CC rev 36 OTP General Use Region word offset */
+#define REV36_OTPGU_BASE 12
+
+/* Subregion word offsets in General Use region */
+#define OTPGU_HSB_OFF 0
+#define OTPGU_SFB_OFF 1
+#define OTPGU_CI_OFF 2
+#define OTPGU_P_OFF 3
+#define OTPGU_SROM_OFF 4
+
+/* Flag bit offsets in General Use region */
+#define OTPGU_HWP_OFF 60
+#define OTPGU_SWP_OFF 61
+#define OTPGU_CIP_OFF 62
+#define OTPGU_FUSEP_OFF 63
+#define OTPGU_CIP_MSK 0x4000
+#define OTPGU_P_MSK 0xf000
+#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
+
+/* OTP Size */
+#define OTP_SZ_FU_324 ((roundup(324, 8))/8) /* 324 bits */
+#define OTP_SZ_FU_288 (288/8) /* 288 bits */
+#define OTP_SZ_FU_216 (216/8) /* 216 bits */
+#define OTP_SZ_FU_72 (72/8) /* 72 bits */
+#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
+#define OTP4315_SWREG_SZ 178 /* 178 bytes */
+#define OTP_SZ_FU_144 (144/8) /* 144 bits */
+
+static u16
+ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+{
+ return R_REG(&cc->sromotp[wn]);
+}
+
+/*
+ * Calculate max HW/SW region byte size by subtracting fuse region
+ * and checksum size, osizew is oi->wsize (OTP size - GU size) in words
+ */
+static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
+{
+ int ret = 0;
+
+ switch (sih->chip) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
+ break;
+ case BCM4313_CHIP_ID:
+ ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
+ break;
+ default:
+ break; /* Don't know about this chip */
+ }
+
+ return ret;
+}
+
+static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+{
+ uint k;
+ u32 otpp, st;
+
+ /*
+ * record word offset of General Use Region
+ * for various chipcommon revs
+ */
+ if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
+ || oi->sih->ccrev == 27) {
+ oi->otpgu_base = REVA4_OTPGU_BASE;
+ } else if (oi->sih->ccrev == 36) {
+ /*
+ * OTP size greater than equal to 2KB (128 words),
+ * otpgu_base is similar to rev23
+ */
+ if (oi->wsize >= 128)
+ oi->otpgu_base = REVB8_OTPGU_BASE;
+ else
+ oi->otpgu_base = REV36_OTPGU_BASE;
+ } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ oi->otpgu_base = REVB8_OTPGU_BASE;
+ }
+
+ /* First issue an init command so the status is up to date */
+ otpp =
+ OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
+
+ W_REG(&cc->otpprog, otpp);
+ for (k = 0;
+ ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
+ && (k < OTPP_TRIES); k++)
+ ;
+ if (k >= OTPP_TRIES)
+ return;
+
+ /* Read OTP lock bits and subregion programmed indication bits */
+ oi->status = R_REG(&cc->otpstatus);
+
+ if ((oi->sih->chip == BCM43224_CHIP_ID)
+ || (oi->sih->chip == BCM43225_CHIP_ID)) {
+ u32 p_bits;
+ p_bits =
+ (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK)
+ >> OTPGU_P_SHIFT;
+ oi->status |= (p_bits << OTPS_GUP_SHIFT);
+ }
+
+ /*
+ * h/w region base and fuse region limit are fixed to
+ * the top and the bottom of the general use region.
+ * Everything else can be flexible.
+ */
+ oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
+ oi->hwlim = oi->wsize;
+ if (oi->status & OTPS_GUP_HW) {
+ oi->hwlim =
+ ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ oi->swbase = oi->hwlim;
+ } else
+ oi->swbase = oi->hwbase;
+
+ /* subtract fuse and checksum from beginning */
+ oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
+
+ if (oi->status & OTPS_GUP_SW) {
+ oi->swlim =
+ ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ oi->fbase = oi->swlim;
+ } else
+ oi->fbase = oi->swbase;
+
+ oi->flim = oi->wsize;
+}
+
+static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
+{
+ uint idx;
+ struct chipcregs __iomem *cc;
+
+ /* Make sure we're running IPX OTP */
+ if (!OTPTYPE_IPX(sih->ccrev))
+ return -EBADE;
+
+ /* Make sure OTP is not disabled */
+ if (ai_is_otp_disabled(sih))
+ return -EBADE;
+
+ /* Check for otp size */
+ switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ case 0:
+ /* Nothing there */
+ return -EBADE;
+ case 1: /* 32x64 */
+ oi->rows = 32;
+ oi->cols = 64;
+ oi->wsize = 128;
+ break;
+ case 2: /* 64x64 */
+ oi->rows = 64;
+ oi->cols = 64;
+ oi->wsize = 256;
+ break;
+ case 5: /* 96x64 */
+ oi->rows = 96;
+ oi->cols = 64;
+ oi->wsize = 384;
+ break;
+ case 7: /* 16x64 *//* 1024 bits */
+ oi->rows = 16;
+ oi->cols = 64;
+ oi->wsize = 64;
+ break;
+ default:
+ /* Don't know the geometry */
+ return -EBADE;
+ }
+
+ /* Retrieve OTP region info */
+ idx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ _ipxotp_init(oi, cc);
+
+ ai_setcoreidx(sih, idx);
+
+ return 0;
+}
+
+static int
+ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
+{
+ uint idx;
+ struct chipcregs __iomem *cc;
+ uint base, i, sz;
+
+ /* Validate region selection */
+ switch (region) {
+ case OTP_HW_RGN:
+ sz = (uint) oi->hwlim - oi->hwbase;
+ if (!(oi->status & OTPS_GUP_HW)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->hwbase;
+ break;
+ case OTP_SW_RGN:
+ sz = ((uint) oi->swlim - oi->swbase);
+ if (!(oi->status & OTPS_GUP_SW)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->swbase;
+ break;
+ case OTP_CI_RGN:
+ sz = OTPGU_CI_SZ;
+ if (!(oi->status & OTPS_GUP_CI)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->otpgu_base + OTPGU_CI_OFF;
+ break;
+ case OTP_FUSE_RGN:
+ sz = (uint) oi->flim - oi->fbase;
+ if (!(oi->status & OTPS_GUP_FUSE)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->fbase;
+ break;
+ case OTP_ALL_RGN:
+ sz = ((uint) oi->flim - oi->hwbase);
+ if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->hwbase;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ idx = ai_coreidx(oi->sih);
+ cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
+
+ /* Read the data */
+ for (i = 0; i < sz; i++)
+ data[i] = ipxotp_otpr(oi, cc, base + i);
+
+ ai_setcoreidx(oi->sih, idx);
+ *wlen = sz;
+ return 0;
+}
+
+static const struct otp_fn_s ipxotp_fn = {
+ (int (*)(struct si_pub *, struct otpinfo *)) ipxotp_init,
+ (int (*)(struct otpinfo *, int, u16 *, uint *)) ipxotp_read_region,
+};
+
+static int otp_init(struct si_pub *sih, struct otpinfo *oi)
+{
+
+ int ret;
+
+ memset(oi, 0, sizeof(struct otpinfo));
+
+ oi->ccrev = sih->ccrev;
+
+ if (OTPTYPE_IPX(oi->ccrev))
+ oi->fn = &ipxotp_fn;
+
+ if (oi->fn == NULL)
+ return -EBADE;
+
+ oi->sih = sih;
+
+ ret = (oi->fn->init) (sih, oi);
+
+ return ret;
+}
+
+int
+otp_read_region(struct si_pub *sih, int region, u16 *data, uint *wlen) {
+ struct otpinfo otpinfo;
+ struct otpinfo *oi = &otpinfo;
+ int err = 0;
+
+ if (ai_is_otp_disabled(sih)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ err = otp_init(sih, oi);
+ if (err)
+ goto out;
+
+ err = ((oi)->fn->read_region)(oi, region, data, wlen);
+
+ out:
+ return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.h b/drivers/net/wireless/brcm80211/brcmsmac/otp.h
new file mode 100644
index 00000000000..6b6d31cf956
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_OTP_H_
+#define _BRCM_OTP_H_
+
+#include "types.h"
+
+/* OTP regions */
+#define OTP_HW_RGN 1
+#define OTP_SW_RGN 2
+#define OTP_CI_RGN 4
+#define OTP_FUSE_RGN 8
+/* From h/w region to end of OTP including checksum */
+#define OTP_ALL_RGN 0xf
+
+/* OTP Size */
+#define OTP_SZ_MAX (6144/8) /* maximum bytes in one CIS */
+
+extern int otp_read_region(struct si_pub *sih, int region, u16 *data,
+ uint *wlen);
+
+#endif /* _BRCM_OTP_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index 17012fbe9c9..a3149254cbc 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -13,8 +13,9 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-
+#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/bitops.h>
#include <brcm_hw_ids.h>
#include <chipcommon.h>
@@ -27,14 +28,32 @@
#include "phy_lcn.h"
#include "phyreg_n.h"
-u32 phyhal_msg_level = PHYHAL_ERROR;
+#define VALID_N_RADIO(radioid) ((radioid == BCM2055_ID) || \
+ (radioid == BCM2056_ID) || \
+ (radioid == BCM2057_ID))
+
+#define VALID_LCN_RADIO(radioid) (radioid == BCM2064_ID)
+
+#define VALID_RADIO(pi, radioid) ( \
+ (ISNPHY(pi) ? VALID_N_RADIO(radioid) : false) || \
+ (ISLCNPHY(pi) ? VALID_LCN_RADIO(radioid) : false))
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
struct chan_info_basic {
u16 chan;
u16 freq;
};
-static struct chan_info_basic chan_info_all[] = {
+static const struct chan_info_basic chan_info_all[] = {
{1, 2412},
{2, 2417},
{3, 2422},
@@ -93,20 +112,6 @@ static struct chan_info_basic chan_info_all[] = {
{216, 50800}
};
-u16 ltrn_list[PHY_LTRN_LIST_LEN] = {
- 0x18f9, 0x0d01, 0x00e4, 0xdef4, 0x06f1, 0x0ffc,
- 0xfa27, 0x1dff, 0x10f0, 0x0918, 0xf20a, 0xe010,
- 0x1417, 0x1104, 0xf114, 0xf2fa, 0xf7db, 0xe2fc,
- 0xe1fb, 0x13ee, 0xff0d, 0xe91c, 0x171a, 0x0318,
- 0xda00, 0x03e8, 0x17e6, 0xe9e4, 0xfff3, 0x1312,
- 0xe105, 0xe204, 0xf725, 0xf206, 0xf1ec, 0x11fc,
- 0x14e9, 0xe0f0, 0xf2f6, 0x09e8, 0x1010, 0x1d01,
- 0xfad9, 0x0f04, 0x060f, 0xde0c, 0x001c, 0x0dff,
- 0x1807, 0xf61a, 0xe40e, 0x0f16, 0x05f9, 0x18ec,
- 0x0a1b, 0xff1e, 0x2600, 0xffe2, 0x0ae5, 0x1814,
- 0x0507, 0x0fea, 0xe4f2, 0xf6e6
-};
-
const u8 ofdm_rate_lookup[] = {
BRCM_RATE_48M,
@@ -119,66 +124,7 @@ const u8 ofdm_rate_lookup[] = {
BRCM_RATE_9M
};
-#define PHY_WREG_LIMIT 24
-
-static void wlc_set_phy_uninitted(struct brcms_phy *pi);
-static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi);
-static void wlc_phy_timercb_phycal(void *arg);
-
-static bool wlc_phy_noise_calc_phy(struct brcms_phy *pi, u32 *cmplx_pwr,
- s8 *pwr_ant);
-
-static void wlc_phy_cal_perical_mphase_schedule(struct brcms_phy *pi,
- uint delay);
-
-static void wlc_phy_noise_cb(struct brcms_phy *pi, u8 channel, s8 noise_dbm);
-static void wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason,
- u8 ch);
-
-static void wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi,
- struct txpwr_limits *tp, chanspec_t);
-static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi);
-
-static s8 wlc_user_txpwr_antport_to_rfport(struct brcms_phy *pi, uint chan,
- u32 band, u8 rate);
-static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band);
-static s8 wlc_phy_env_measure_vbat(struct brcms_phy *pi);
-static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi);
-
-char *phy_getvar(struct brcms_phy *pi, const char *name)
-{
- char *vars = pi->vars;
- char *s;
- int len;
-
- if (!name)
- return NULL;
-
- len = strlen(name);
- if (len == 0)
- return NULL;
-
- for (s = vars; s && *s;) {
- if ((memcmp(s, name, len) == 0) && (s[len] == '='))
- return &s[len + 1];
-
- while (*s++)
- ;
- }
-
- return NULL;
-}
-
-int phy_getintvar(struct brcms_phy *pi, const char *name)
-{
- char *val;
-
- val = PHY_GETVAR(pi, name);
- if (val == NULL)
- return 0;
-
- return simple_strtoul(val, NULL, 0);
-}
+#define PHY_WREG_LIMIT 24
void wlc_phyreg_enter(struct brcms_phy_pub *pih)
{
@@ -203,7 +149,7 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
void wlc_radioreg_exit(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- volatile u16 dummy;
+ u16 dummy;
dummy = R_REG(&pi->regs->phyversion);
pi->phy_wreg = 0;
@@ -217,12 +163,10 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
if ((addr == RADIO_IDCODE))
return 0xffff;
- if (NORADIO_ENAB(pi->pubpi))
- return NORADIO_IDCODE & 0xffff;
-
switch (pi->pubpi.phy_type) {
case PHY_TYPE_N:
- CASECHECK(PHYTYPE, PHY_TYPE_N);
+ if (!CONF_HAS(PHYTYPE, PHY_TYPE_N))
+ break;
if (NREV_GE(pi->pubpi.phy_rev, 7))
addr |= RADIO_2057_READ_OFF;
else
@@ -230,7 +174,8 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
break;
case PHY_TYPE_LCN:
- CASECHECK(PHYTYPE, PHY_TYPE_LCN);
+ if (!CONF_HAS(PHYTYPE, PHY_TYPE_LCN))
+ break;
addr |= RADIO_2064_READ_OFF;
break;
@@ -262,9 +207,6 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
@@ -276,11 +218,9 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
W_REG(&pi->regs->phy4wdatalo, val);
}
- if (pi->sh->bustype == PCI_BUS) {
- if (++pi->phy_wreg >= pi->phy_wreg_limit) {
- (void)R_REG(&pi->regs->maccontrol);
- pi->phy_wreg = 0;
- }
+ if (++pi->phy_wreg >= pi->phy_wreg_limit) {
+ (void)R_REG(&pi->regs->maccontrol);
+ pi->phy_wreg = 0;
}
}
@@ -288,9 +228,6 @@ static u32 read_radio_id(struct brcms_phy *pi)
{
u32 id;
- if (NORADIO_ENAB(pi->pubpi))
- return NORADIO_IDCODE;
-
if (D11REV_GE(pi->sh->corerev, 24)) {
u32 b0, b1, b2;
@@ -316,9 +253,6 @@ void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
u16 rval;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
rval = read_radio_reg(pi, addr);
write_radio_reg(pi, addr, (rval & val));
}
@@ -327,9 +261,6 @@ void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
u16 rval;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
rval = read_radio_reg(pi, addr);
write_radio_reg(pi, addr, (rval | val));
}
@@ -338,9 +269,6 @@ void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask)
{
u16 rval;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
rval = read_radio_reg(pi, addr);
write_radio_reg(pi, addr, (rval ^ mask));
}
@@ -349,9 +277,6 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
u16 rval;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
rval = read_radio_reg(pi, addr);
write_radio_reg(pi, addr, (rval & ~mask) | (val & mask));
}
@@ -363,7 +288,7 @@ void write_phy_channel_reg(struct brcms_phy *pi, uint val)
u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
- d11regs_t *regs;
+ struct d11regs __iomem *regs;
regs = pi->regs;
@@ -375,30 +300,27 @@ u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- d11regs_t *regs;
+ struct d11regs __iomem *regs;
regs = pi->regs;
-#ifdef __mips__
+#ifdef CONFIG_BCM47XX
W_REG_FLUSH(&regs->phyregaddr, addr);
W_REG(&regs->phyregdata, val);
if (addr == 0x72)
(void)R_REG(&regs->phyregdata);
#else
- W_REG((u32 *)(&regs->phyregaddr),
- addr | (val << 16));
- if (pi->sh->bustype == PCI_BUS) {
- if (++pi->phy_wreg >= pi->phy_wreg_limit) {
- pi->phy_wreg = 0;
- (void)R_REG(&regs->phyversion);
- }
+ W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+ if (++pi->phy_wreg >= pi->phy_wreg_limit) {
+ pi->phy_wreg = 0;
+ (void)R_REG(&regs->phyversion);
}
#endif
}
void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- d11regs_t *regs;
+ struct d11regs __iomem *regs;
regs = pi->regs;
@@ -410,7 +332,7 @@ void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- d11regs_t *regs;
+ struct d11regs __iomem *regs;
regs = pi->regs;
@@ -422,7 +344,7 @@ void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
- d11regs_t *regs;
+ struct d11regs __iomem *regs;
regs = pi->regs;
@@ -464,9 +386,8 @@ static void wlc_set_phy_uninitted(struct brcms_phy *pi)
}
pi->radiopwr = 0xffff;
for (i = 0; i < STATIC_NUM_RF; i++) {
- for (j = 0; j < STATIC_NUM_BB; j++) {
+ for (j = 0; j < STATIC_NUM_BB; j++)
pi->stats_11b_txpower[i][j] = -1;
- }
}
}
@@ -475,9 +396,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
struct shared_phy *sh;
sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC);
- if (sh == NULL) {
+ if (sh == NULL)
return NULL;
- }
sh->sih = shp->sih;
sh->physhim = shp->physhim;
@@ -495,7 +415,6 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
sh->boardvendor = shp->boardvendor;
sh->boardflags = shp->boardflags;
sh->boardflags2 = shp->boardflags2;
- sh->bustype = shp->bustype;
sh->buscorerev = shp->buscorerev;
sh->fast_timer = PHY_SW_TIMER_FAST;
@@ -507,9 +426,40 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
+static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+{
+ uint delay = 5;
+
+ if (PHY_PERICAL_MPHASE_PENDING(pi)) {
+ if (!pi->sh->up) {
+ wlc_phy_cal_perical_mphase_reset(pi);
+ return;
+ }
+
+ if (SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi)) {
+
+ delay = 1000;
+ wlc_phy_cal_perical_mphase_restart(pi);
+ } else
+ wlc_phy_cal_perical_nphy_run(pi, PHY_PERICAL_AUTO);
+ wlapi_add_timer(pi->phycal_timer, delay, 0);
+ return;
+ }
+
+}
+
+static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
+{
+ u32 ver;
+
+ ver = read_radio_id(pi);
+
+ return ver;
+}
+
struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
- char *vars, struct wiphy *wiphy)
+wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+ int bandtype, struct wiphy *wiphy)
{
struct brcms_phy *pi;
u32 sflags = 0;
@@ -522,42 +472,35 @@ wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
else
sflags = ai_core_sflags(sh->sih, 0, 0);
- if (BAND_5G(bandtype)) {
- if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0) {
+ if (bandtype == BRCM_BAND_5G) {
+ if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
return NULL;
- }
}
pi = sh->phy_head;
if ((sflags & SISF_DB_PHY) && pi) {
-
wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
pi->refcnt++;
return &pi->pubpi_ro;
}
pi = kzalloc(sizeof(struct brcms_phy), GFP_ATOMIC);
- if (pi == NULL) {
+ if (pi == NULL)
return NULL;
- }
pi->wiphy = wiphy;
- pi->regs = (d11regs_t *) regs;
+ pi->regs = regs;
pi->sh = sh;
pi->phy_init_por = true;
pi->phy_wreg_limit = PHY_WREG_LIMIT;
- pi->vars = vars;
-
pi->txpwr_percent = 100;
pi->do_initcal = true;
pi->phycal_tempdelta = 0;
- if (BAND_2G(bandtype) && (sflags & SISF_2G_PHY)) {
-
+ if (bandtype == BRCM_BAND_2G && (sflags & SISF_2G_PHY))
pi->pubpi.coreflags = SICF_GMODE;
- }
wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
phyversion = R_REG(&pi->regs->phyversion);
@@ -572,28 +515,26 @@ wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
- if (!VALID_PHYTYPE(pi->pubpi.phy_type)) {
+ if (!pi->pubpi.phy_type == PHY_TYPE_N &&
+ !pi->pubpi.phy_type == PHY_TYPE_LCN)
goto err;
- }
- if (BAND_5G(bandtype)) {
- if (!ISNPHY(pi)) {
- goto err;
- }
- } else {
- if (!ISNPHY(pi) && !ISLCNPHY(pi)) {
+
+ if (bandtype == BRCM_BAND_5G) {
+ if (!ISNPHY(pi))
goto err;
- }
+ } else if (!ISNPHY(pi) && !ISLCNPHY(pi)) {
+ goto err;
}
wlc_phy_anacore((struct brcms_phy_pub *) pi, ON);
idcode = wlc_phy_get_radio_ver(pi);
pi->pubpi.radioid =
- (idcode & IDCODE_ID_MASK) >> IDCODE_ID_SHIFT;
+ (idcode & IDCODE_ID_MASK) >> IDCODE_ID_SHIFT;
pi->pubpi.radiorev =
- (idcode & IDCODE_REV_MASK) >> IDCODE_REV_SHIFT;
+ (idcode & IDCODE_REV_MASK) >> IDCODE_REV_SHIFT;
pi->pubpi.radiover =
- (idcode & IDCODE_VER_MASK) >> IDCODE_VER_SHIFT;
+ (idcode & IDCODE_VER_MASK) >> IDCODE_VER_SHIFT;
if (!VALID_RADIO(pi, pi->pubpi.radioid))
goto err;
@@ -602,8 +543,8 @@ wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
wlc_set_phy_uninitted(pi);
pi->bw = WL_CHANSPEC_BW_20;
- pi->radio_chanspec =
- BAND_2G(bandtype) ? CH20MHZ_CHSPEC(1) : CH20MHZ_CHSPEC(36);
+ pi->radio_chanspec = (bandtype == BRCM_BAND_2G) ?
+ ch20mhz_chspec(1) : ch20mhz_chspec(36);
pi->rxiq_samps = PHY_NOISE_SAMPLE_LOG_NUM_NPHY;
pi->rxiq_antsel = ANT_RX_DIV_DEF;
@@ -625,7 +566,7 @@ wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
pi->phy_txcore_disable_temp = PHY_CHAIN_TX_DISABLE_TEMP;
pi->phy_txcore_enable_temp =
- PHY_CHAIN_TX_DISABLE_TEMP - PHY_HYSTERESIS_DELTATEMP;
+ PHY_CHAIN_TX_DISABLE_TEMP - PHY_HYSTERESIS_DELTATEMP;
pi->phy_tempsense_offset = 0;
pi->phy_txcore_heatedup = false;
@@ -648,11 +589,10 @@ wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
if (ISNPHY(pi)) {
pi->phycal_timer = wlapi_init_timer(pi->sh->physhim,
- wlc_phy_timercb_phycal,
- pi, "phycal");
- if (!pi->phycal_timer) {
+ wlc_phy_timercb_phycal,
+ pi, "phycal");
+ if (!pi->phycal_timer)
goto err;
- }
if (!wlc_phy_attach_nphy(pi))
goto err;
@@ -661,21 +601,17 @@ wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
if (!wlc_phy_attach_lcnphy(pi))
goto err;
- } else {
-
}
pi->refcnt++;
pi->next = pi->sh->phy_head;
sh->phy_head = pi;
- pi->vars = (char *)&pi->vars;
-
memcpy(&pi->pubpi_ro, &pi->pubpi, sizeof(struct brcms_phy_pub));
return &pi->pubpi_ro;
- err:
+err:
kfree(pi);
return NULL;
}
@@ -685,12 +621,11 @@ void wlc_phy_detach(struct brcms_phy_pub *pih)
struct brcms_phy *pi = (struct brcms_phy *) pih;
if (pih) {
- if (--pi->refcnt) {
+ if (--pi->refcnt)
return;
- }
if (pi->phycal_timer) {
- wlapi_free_timer(pi->sh->physhim, pi->phycal_timer);
+ wlapi_free_timer(pi->phycal_timer);
pi->phycal_timer = NULL;
}
@@ -700,7 +635,7 @@ void wlc_phy_detach(struct brcms_phy_pub *pih)
pi->sh->phy_head->next = NULL;
if (pi->pi_fptr.detach)
- (pi->pi_fptr.detach) (pi);
+ (pi->pi_fptr.detach)(pi);
kfree(pi);
}
@@ -731,29 +666,6 @@ u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih)
return pi->pubpi.coreflags;
}
-static void wlc_phy_timercb_phycal(void *arg)
-{
- struct brcms_phy *pi = (struct brcms_phy *) arg;
- uint delay = 5;
-
- if (PHY_PERICAL_MPHASE_PENDING(pi)) {
- if (!pi->sh->up) {
- wlc_phy_cal_perical_mphase_reset(pi);
- return;
- }
-
- if (SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi)) {
-
- delay = 1000;
- wlc_phy_cal_perical_mphase_restart(pi);
- } else
- wlc_phy_cal_perical_nphy_run(pi, PHY_PERICAL_AUTO);
- wlapi_add_timer(pi->sh->physhim, pi->phycal_timer, delay, 0);
- return;
- }
-
-}
-
void wlc_phy_anacore(struct brcms_phy_pub *pih, bool on)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
@@ -862,10 +774,10 @@ void wlc_phy_hw_state_upd(struct brcms_phy_pub *pih, bool newstate)
pi->sh->up = newstate;
}
-void wlc_phy_init(struct brcms_phy_pub *pih, chanspec_t chanspec)
+void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
{
u32 mc;
- initfn_t phy_init = NULL;
+ void (*phy_init)(struct brcms_phy *) = NULL;
struct brcms_phy *pi = (struct brcms_phy *) pih;
if (pi->init_in_progress)
@@ -879,9 +791,8 @@ void wlc_phy_init(struct brcms_phy_pub *pih, chanspec_t chanspec)
if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
return;
- if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN)) {
+ if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
- }
if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
"HW error SISF_FCLKA\n"))
@@ -889,9 +800,8 @@ void wlc_phy_init(struct brcms_phy_pub *pih, chanspec_t chanspec)
phy_init = pi->pi_fptr.init;
- if (phy_init == NULL) {
+ if (phy_init == NULL)
return;
- }
wlc_phy_anacore(pih, ON);
@@ -903,7 +813,7 @@ void wlc_phy_init(struct brcms_phy_pub *pih, chanspec_t chanspec)
wlc_phy_switch_radio((struct brcms_phy_pub *) pi, ON);
- (*phy_init) (pi);
+ (*phy_init)(pi);
pi->phy_init_por = false;
@@ -921,7 +831,7 @@ void wlc_phy_init(struct brcms_phy_pub *pih, chanspec_t chanspec)
void wlc_phy_cal_init(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- initfn_t cal_init = NULL;
+ void (*cal_init)(struct brcms_phy *) = NULL;
if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
"HW error: MAC enabled during phy cal\n"))
@@ -930,7 +840,7 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
if (!pi->initialized) {
cal_init = pi->pi_fptr.calinit;
if (cal_init)
- (*cal_init) (pi);
+ (*cal_init)(pi);
pi->initialized = true;
}
@@ -942,7 +852,7 @@ int wlc_phy_down(struct brcms_phy_pub *pih)
int callbacks = 0;
if (pi->phycal_timer
- && !wlapi_del_timer(pi->sh->physhim, pi->phycal_timer))
+ && !wlapi_del_timer(pi->phycal_timer))
callbacks++;
pi->nphy_iqcal_chanspec_2G = 0;
@@ -951,15 +861,6 @@ int wlc_phy_down(struct brcms_phy_pub *pih)
return callbacks;
}
-static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
-{
- u32 ver;
-
- ver = read_radio_id(pi);
-
- return ver;
-}
-
void
wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
u16 tblAddr, u16 tblDataHi, u16 tblDataLo)
@@ -990,11 +891,9 @@ void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val)
}
if (width == 32) {
-
write_phy_reg(pi, pi->tbl_data_hi, (u16) (val >> 16));
write_phy_reg(pi, pi->tbl_data_lo, (u16) val);
} else {
-
write_phy_reg(pi, pi->tbl_data_lo, (u16) val);
}
}
@@ -1025,15 +924,12 @@ wlc_phy_write_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
}
if (tbl_width == 32) {
-
write_phy_reg(pi, tblDataHi,
(u16) (ptbl_32b[idx] >> 16));
write_phy_reg(pi, tblDataLo, (u16) ptbl_32b[idx]);
} else if (tbl_width == 16) {
-
write_phy_reg(pi, tblDataLo, ptbl_16b[idx]);
} else {
-
write_phy_reg(pi, tblDataLo, ptbl_8b[idx]);
}
}
@@ -1064,14 +960,11 @@ wlc_phy_read_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
}
if (tbl_width == 32) {
-
ptbl_32b[idx] = read_phy_reg(pi, tblDataLo);
ptbl_32b[idx] |= (read_phy_reg(pi, tblDataHi) << 16);
} else if (tbl_width == 16) {
-
ptbl_16b[idx] = read_phy_reg(pi, tblDataLo);
} else {
-
ptbl_8b[idx] = (u8) read_phy_reg(pi, tblDataLo);
}
}
@@ -1084,10 +977,9 @@ wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
uint i = 0;
do {
- if (radioregs[i].do_init) {
+ if (radioregs[i].do_init)
write_radio_reg(pi, radioregs[i].address,
(u16) radioregs[i].init);
- }
i++;
} while (radioregs[i].address != 0xffff);
@@ -1096,7 +988,8 @@ wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
}
uint
-wlc_phy_init_radio_regs(struct brcms_phy *pi, struct radio_regs *radioregs,
+wlc_phy_init_radio_regs(struct brcms_phy *pi,
+ const struct radio_regs *radioregs,
u16 core_offset)
{
uint i = 0;
@@ -1131,8 +1024,8 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi, struct radio_regs *radioregs,
void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
{
-#define DUMMY_PKT_LEN 20
- d11regs_t *regs = pi->regs;
+#define DUMMY_PKT_LEN 20
+ struct d11regs __iomem *regs = pi->regs;
int i, count;
u8 ofdmpkt[DUMMY_PKT_LEN] = {
0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1156,9 +1049,8 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
W_REG(&regs->wepctl, 0);
W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
- if (ISNPHY(pi) || ISLCNPHY(pi)) {
+ if (ISNPHY(pi) || ISLCNPHY(pi))
W_REG(&regs->txe_phyctl1, 0x1A02);
- }
W_REG(&regs->txe_wm_0, 0);
W_REG(&regs->txe_wm_1, 0);
@@ -1185,16 +1077,14 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
i = 0;
count = ofdm ? 30 : 250;
while ((i++ < count)
- && (R_REG(&regs->txe_status) & (1 << 7))) {
+ && (R_REG(&regs->txe_status) & (1 << 7)))
udelay(10);
- }
i = 0;
while ((i++ < 10)
- && ((R_REG(&regs->txe_status) & (1 << 10)) == 0)) {
+ && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
udelay(10);
- }
i = 0;
@@ -1207,28 +1097,26 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
}
}
-void wlc_phy_hold_upd(struct brcms_phy_pub *pih, mbool id, bool set)
+void wlc_phy_hold_upd(struct brcms_phy_pub *pih, u32 id, bool set)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- if (set) {
+ if (set)
mboolset(pi->measure_hold, id);
- } else {
+ else
mboolclr(pi->measure_hold, id);
- }
return;
}
-void wlc_phy_mute_upd(struct brcms_phy_pub *pih, bool mute, mbool flags)
+void wlc_phy_mute_upd(struct brcms_phy_pub *pih, bool mute, u32 flags)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- if (mute) {
+ if (mute)
mboolset(pi->measure_hold, PHY_HOLD_FOR_MUTE);
- } else {
+ else
mboolclr(pi->measure_hold, PHY_HOLD_FOR_MUTE);
- }
if (!mute && (flags & PHY_MUTE_FOR_PREISM))
pi->nphy_perical_last = pi->sh->now - pi->sh->glacial_timer;
@@ -1257,19 +1145,10 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
-
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
- {
- uint mc;
-
- mc = R_REG(&pi->regs->maccontrol);
- }
+ (void)R_REG(&pi->regs->maccontrol);
if (ISNPHY(pi)) {
wlc_phy_switch_radio_nphy(pi, on);
-
} else if (ISLCNPHY(pi)) {
if (on) {
and_phy_reg(pi, 0x44c,
@@ -1311,26 +1190,25 @@ void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw)
pi->bw = bw;
}
-void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, chanspec_t newch)
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->radio_chanspec = newch;
}
-chanspec_t wlc_phy_chanspec_get(struct brcms_phy_pub *ppi)
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
return pi->radio_chanspec;
}
-void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, chanspec_t chanspec)
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
u16 m_cur_channel;
- chansetfn_t chanspec_set = NULL;
-
+ void (*chanspec_set)(struct brcms_phy *, u16) = NULL;
m_cur_channel = CHSPEC_CHANNEL(chanspec);
if (CHSPEC_IS5G(chanspec))
m_cur_channel |= D11_CURCHANNEL_5G;
@@ -1340,7 +1218,7 @@ void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, chanspec_t chanspec)
chanspec_set = pi->pi_fptr.chanset;
if (chanspec_set)
- (*chanspec_set) (pi, chanspec);
+ (*chanspec_set)(pi, chanspec);
}
@@ -1360,17 +1238,16 @@ int wlc_phy_chanspec_freq2bandrange_lpssn(uint freq)
return range;
}
-int wlc_phy_chanspec_bandrange_get(struct brcms_phy *pi, chanspec_t chanspec)
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *pi, u16 chanspec)
{
int range = -1;
uint channel = CHSPEC_CHANNEL(chanspec);
uint freq = wlc_phy_channel2freq(channel);
- if (ISNPHY(pi)) {
+ if (ISNPHY(pi))
range = wlc_phy_get_chan_freq_range_nphy(pi, channel);
- } else if (ISLCNPHY(pi)) {
+ else if (ISLCNPHY(pi))
range = wlc_phy_chanspec_freq2bandrange_lpssn(freq);
- }
return range;
}
@@ -1396,13 +1273,13 @@ int wlc_phy_channel2freq(uint channel)
void
wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
- chanvec_t *channels)
+ struct brcms_chanvec *channels)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
uint i;
uint channel;
- memset(channels, 0, sizeof(chanvec_t));
+ memset(channels, 0, sizeof(struct brcms_chanvec));
for (i = 0; i < ARRAY_SIZE(chan_info_all); i++) {
channel = chan_info_all[i].chan;
@@ -1417,17 +1294,17 @@ wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
}
}
-chanspec_t wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band)
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
uint i;
uint channel;
- chanspec_t chspec;
+ u16 chspec;
for (i = 0; i < ARRAY_SIZE(chan_info_all); i++) {
channel = chan_info_all[i].chan;
- if (ISNPHY(pi) && IS40MHZ(pi)) {
+ if (ISNPHY(pi) && pi->bw == WL_CHANSPEC_BW_40) {
uint j;
for (j = 0; j < ARRAY_SIZE(chan_info_all); j++) {
@@ -1439,16 +1316,15 @@ chanspec_t wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band)
if (j == ARRAY_SIZE(chan_info_all))
continue;
- channel = UPPER_20_SB(channel);
- chspec =
- channel | WL_CHANSPEC_BW_40 |
- WL_CHANSPEC_CTL_SB_LOWER;
+ channel = upper_20_sb(channel);
+ chspec = channel | WL_CHANSPEC_BW_40 |
+ WL_CHANSPEC_CTL_SB_LOWER;
if (band == BRCM_BAND_2G)
chspec |= WL_CHANSPEC_BAND_2G;
else
chspec |= WL_CHANSPEC_BAND_5G;
} else
- chspec = CH20MHZ_CHSPEC(channel);
+ chspec = ch20mhz_chspec(channel);
if ((pi->a_band_high_disable) && (channel >= FIRST_REF5_CHANNUM)
&& (channel <= LAST_REF5_CHANNUM))
@@ -1459,7 +1335,7 @@ chanspec_t wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band)
return chspec;
}
- return (chanspec_t) INVCHANSPEC;
+ return (u16) INVCHANSPEC;
}
int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override)
@@ -1528,7 +1404,7 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
int i;
if (qdbm > 127)
- return 5;
+ return -EINVAL;
for (i = 0; i < TXP_NUM_RATES; i++)
pi->tx_user_target[i] = (u8) qdbm;
@@ -1539,10 +1415,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
if (!SCAN_INPROG_PHY(pi)) {
bool suspend;
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) &
- MCTL_EN_MAC));
+ suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -1584,9 +1458,8 @@ wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint channel, u8 *min_pwr,
txp_rate_idx = TXP_FIRST_OFDM;
for (i = 0; i < ARRAY_SIZE(chan_info_all); i++) {
- if (channel == chan_info_all[i].chan) {
+ if (channel == chan_info_all[i].chan)
break;
- }
}
if (pi->hwtxpwr) {
@@ -1620,7 +1493,8 @@ wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
pactrl = 0;
max_num_rate = ISNPHY(pi) ? TXP_NUM_RATES :
- ISLCNPHY(pi) ? (TXP_LAST_SISO_MCS_20 + 1) : (TXP_LAST_OFDM + 1);
+ ISLCNPHY(pi) ? (TXP_LAST_SISO_MCS_20 +
+ 1) : (TXP_LAST_OFDM + 1);
for (rate = 0; rate < max_num_rate; rate++) {
@@ -1659,6 +1533,46 @@ u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi)
return pi->tx_power_max;
}
+static s8 wlc_phy_env_measure_vbat(struct brcms_phy *pi)
+{
+ if (ISLCNPHY(pi))
+ return wlc_lcnphy_vbatsense(pi, 0);
+ else
+ return 0;
+}
+
+static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi)
+{
+ if (ISLCNPHY(pi))
+ return wlc_lcnphy_tempsense_degree(pi, 0);
+ else
+ return 0;
+}
+
+static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band)
+{
+ u8 i;
+ s8 temp, vbat;
+
+ for (i = 0; i < TXP_NUM_RATES; i++)
+ pi->txpwr_env_limit[i] = BRCMS_TXPWR_MAX;
+
+ vbat = wlc_phy_env_measure_vbat(pi);
+ temp = wlc_phy_env_measure_temperature(pi);
+
+}
+
+static s8
+wlc_user_txpwr_antport_to_rfport(struct brcms_phy *pi, uint chan, u32 band,
+ u8 rate)
+{
+ s8 offset = 0;
+
+ if (!pi->user_txpwr_at_rfport)
+ return offset;
+ return offset;
+}
+
void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
{
u8 maxtxpwr, mintxpwr, rate, pactrl;
@@ -1669,17 +1583,17 @@ void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
u8 tx_pwr_max_rate_ind = 0;
u8 max_num_rate;
u8 start_rate = 0;
- chanspec_t chspec;
+ u16 chspec;
u32 band = CHSPEC2BAND(pi->radio_chanspec);
- initfn_t txpwr_recalc_fn = NULL;
+ void (*txpwr_recalc_fn)(struct brcms_phy *) = NULL;
chspec = pi->radio_chanspec;
if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE)
target_chan = CHSPEC_CHANNEL(chspec);
else if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER)
- target_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ target_chan = upper_20_sb(CHSPEC_CHANNEL(chspec));
else
- target_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ target_chan = lower_20_sb(CHSPEC_CHANNEL(chspec));
pactrl = 0;
if (ISLCNPHY(pi)) {
@@ -1690,8 +1604,8 @@ void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
for (i = TXP_FIRST_SISO_MCS_20;
i <= TXP_LAST_SISO_MCS_20; i++) {
pi->tx_srom_max_rate_2g[i - 8] =
- pi->tx_srom_max_2g -
- ((offset_mcs & 0xf) * 2);
+ pi->tx_srom_max_2g -
+ ((offset_mcs & 0xf) * 2);
offset_mcs >>= 4;
}
} else {
@@ -1699,19 +1613,16 @@ void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
for (i = TXP_FIRST_SISO_MCS_20;
i <= TXP_LAST_SISO_MCS_20; i++) {
pi->tx_srom_max_rate_2g[i - 8] =
- pi->tx_srom_max_2g -
- ((offset_mcs & 0xf) * 2);
+ pi->tx_srom_max_2g -
+ ((offset_mcs & 0xf) * 2);
offset_mcs >>= 4;
}
}
}
-#if WL11N
+
max_num_rate = ((ISNPHY(pi)) ? (TXP_NUM_RATES) :
((ISLCNPHY(pi)) ?
(TXP_LAST_SISO_MCS_20 + 1) : (TXP_LAST_OFDM + 1)));
-#else
- max_num_rate = ((ISNPHY(pi)) ? (TXP_NUM_RATES) : (TXP_LAST_OFDM + 1));
-#endif
wlc_phy_upd_env_txpwr_rate_limits(pi, band);
@@ -1719,35 +1630,32 @@ void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
tx_pwr_target[rate] = pi->tx_user_target[rate];
- if (pi->user_txpwr_at_rfport) {
+ if (pi->user_txpwr_at_rfport)
tx_pwr_target[rate] +=
- wlc_user_txpwr_antport_to_rfport(pi, target_chan,
- band, rate);
- }
+ wlc_user_txpwr_antport_to_rfport(pi,
+ target_chan,
+ band,
+ rate);
- {
+ wlc_phy_txpower_sromlimit((struct brcms_phy_pub *) pi,
+ target_chan,
+ &mintxpwr, &maxtxpwr, rate);
- wlc_phy_txpower_sromlimit((struct brcms_phy_pub *) pi,
- target_chan,
- &mintxpwr, &maxtxpwr, rate);
+ maxtxpwr = min(maxtxpwr, pi->txpwr_limit[rate]);
- maxtxpwr = min(maxtxpwr, pi->txpwr_limit[rate]);
-
- maxtxpwr =
- (maxtxpwr > pactrl) ? (maxtxpwr - pactrl) : 0;
+ maxtxpwr = (maxtxpwr > pactrl) ? (maxtxpwr - pactrl) : 0;
- maxtxpwr = (maxtxpwr > 6) ? (maxtxpwr - 6) : 0;
+ maxtxpwr = (maxtxpwr > 6) ? (maxtxpwr - 6) : 0;
- maxtxpwr = min(maxtxpwr, tx_pwr_target[rate]);
+ maxtxpwr = min(maxtxpwr, tx_pwr_target[rate]);
- if (pi->txpwr_percent <= 100)
- maxtxpwr = (maxtxpwr * pi->txpwr_percent) / 100;
+ if (pi->txpwr_percent <= 100)
+ maxtxpwr = (maxtxpwr * pi->txpwr_percent) / 100;
- tx_pwr_target[rate] = max(maxtxpwr, mintxpwr);
- }
+ tx_pwr_target[rate] = max(maxtxpwr, mintxpwr);
tx_pwr_target[rate] =
- min(tx_pwr_target[rate], pi->txpwr_env_limit[rate]);
+ min(tx_pwr_target[rate], pi->txpwr_env_limit[rate]);
if (tx_pwr_target[rate] > tx_pwr_max)
tx_pwr_max_rate_ind = rate;
@@ -1764,23 +1672,22 @@ void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
pi->tx_power_target[rate] = tx_pwr_target[rate];
- if (!pi->hwpwrctrl || ISNPHY(pi)) {
+ if (!pi->hwpwrctrl || ISNPHY(pi))
pi->tx_power_offset[rate] =
- pi->tx_power_max - pi->tx_power_target[rate];
- } else {
+ pi->tx_power_max - pi->tx_power_target[rate];
+ else
pi->tx_power_offset[rate] =
- pi->tx_power_target[rate] - pi->tx_power_min;
- }
+ pi->tx_power_target[rate] - pi->tx_power_min;
}
txpwr_recalc_fn = pi->pi_fptr.txpwrrecalc;
if (txpwr_recalc_fn)
- (*txpwr_recalc_fn) (pi);
+ (*txpwr_recalc_fn)(pi);
}
-void
+static void
wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
- chanspec_t chanspec)
+ u16 chanspec)
{
u8 tmp_txpwr_limit[2 * BRCMS_NUM_RATES_OFDM];
u8 *txpwr_ptr1 = NULL, *txpwr_ptr2 = NULL;
@@ -1815,7 +1722,7 @@ wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
txpwr_ptr1 = txpwr->mcs_40_siso;
txpwr_ptr2 = txpwr->ofdm_40_siso;
rate_start_index =
- WL_TX_POWER_OFDM40_SISO_FIRST;
+ WL_TX_POWER_OFDM40_SISO_FIRST;
break;
case 3:
@@ -1825,18 +1732,21 @@ wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
break;
}
- for (rate2 = 0; rate2 < BRCMS_NUM_RATES_OFDM; rate2++) {
+ for (rate2 = 0; rate2 < BRCMS_NUM_RATES_OFDM;
+ rate2++) {
tmp_txpwr_limit[rate2] = 0;
tmp_txpwr_limit[BRCMS_NUM_RATES_OFDM + rate2] =
- txpwr_ptr1[rate2];
+ txpwr_ptr1[rate2];
}
- wlc_phy_mcs_to_ofdm_powers_nphy(tmp_txpwr_limit, 0,
- BRCMS_NUM_RATES_OFDM - 1, BRCMS_NUM_RATES_OFDM);
+ wlc_phy_mcs_to_ofdm_powers_nphy(
+ tmp_txpwr_limit, 0,
+ BRCMS_NUM_RATES_OFDM -
+ 1, BRCMS_NUM_RATES_OFDM);
for (rate1 = rate_start_index, rate2 = 0;
rate2 < BRCMS_NUM_RATES_OFDM; rate1++, rate2++)
pi->txpwr_limit[rate1] =
- min(txpwr_ptr2[rate2],
- tmp_txpwr_limit[rate2]);
+ min(txpwr_ptr2[rate2],
+ tmp_txpwr_limit[rate2]);
}
for (k = 0; k < 4; k++) {
@@ -1866,19 +1776,22 @@ wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
rate_start_index = WL_TX_POWER_MCS40_CDD_FIRST;
break;
}
- for (rate2 = 0; rate2 < BRCMS_NUM_RATES_OFDM; rate2++) {
+ for (rate2 = 0; rate2 < BRCMS_NUM_RATES_OFDM;
+ rate2++) {
tmp_txpwr_limit[rate2] = 0;
tmp_txpwr_limit[BRCMS_NUM_RATES_OFDM + rate2] =
- txpwr_ptr1[rate2];
+ txpwr_ptr1[rate2];
}
- wlc_phy_ofdm_to_mcs_powers_nphy(tmp_txpwr_limit, 0,
- BRCMS_NUM_RATES_OFDM - 1, BRCMS_NUM_RATES_OFDM);
+ wlc_phy_ofdm_to_mcs_powers_nphy(
+ tmp_txpwr_limit, 0,
+ BRCMS_NUM_RATES_OFDM -
+ 1, BRCMS_NUM_RATES_OFDM);
for (rate1 = rate_start_index, rate2 = 0;
rate2 < BRCMS_NUM_RATES_MCS_1_STREAM;
rate1++, rate2++)
pi->txpwr_limit[rate1] =
- min(txpwr_ptr2[rate2],
- tmp_txpwr_limit[rate2]);
+ min(txpwr_ptr2[rate2],
+ tmp_txpwr_limit[rate2]);
}
for (k = 0; k < 2; k++) {
@@ -1922,10 +1835,10 @@ wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
pi->txpwr_limit[WL_TX_POWER_MCS_32] = txpwr->mcs32;
pi->txpwr_limit[WL_TX_POWER_MCS40_CDD_FIRST] =
- min(pi->txpwr_limit[WL_TX_POWER_MCS40_CDD_FIRST],
- pi->txpwr_limit[WL_TX_POWER_MCS_32]);
+ min(pi->txpwr_limit[WL_TX_POWER_MCS40_CDD_FIRST],
+ pi->txpwr_limit[WL_TX_POWER_MCS_32]);
pi->txpwr_limit[WL_TX_POWER_MCS_32] =
- pi->txpwr_limit[WL_TX_POWER_MCS40_CDD_FIRST];
+ pi->txpwr_limit[WL_TX_POWER_MCS40_CDD_FIRST];
}
}
@@ -1975,7 +1888,7 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
void
wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *txpwr,
- chanspec_t chanspec)
+ u16 chanspec)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
@@ -2016,9 +1929,8 @@ void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt)
void wlc_phy_txpower_update_shm(struct brcms_phy *pi)
{
int j;
- if (ISNPHY(pi)) {
+ if (ISNPHY(pi))
return;
- }
if (!pi->sh->clk)
return;
@@ -2040,9 +1952,9 @@ void wlc_phy_txpower_update_shm(struct brcms_phy *pi)
const u8 ucode_ofdm_rates[] = {
0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c
};
- offset = wlapi_bmac_rate_shm_offset(pi->sh->physhim,
- ucode_ofdm_rates[j -
- TXP_FIRST_OFDM]);
+ offset = wlapi_bmac_rate_shm_offset(
+ pi->sh->physhim,
+ ucode_ofdm_rates[j - TXP_FIRST_OFDM]);
wlapi_bmac_write_shm(pi->sh->physhim, offset + 6,
pi->tx_power_offset[j]);
wlapi_bmac_write_shm(pi->sh->physhim, offset + 14,
@@ -2056,11 +1968,11 @@ void wlc_phy_txpower_update_shm(struct brcms_phy *pi)
for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++)
pi->tx_power_offset[i] =
- (u8) roundup(pi->tx_power_offset[i], 8);
+ (u8) roundup(pi->tx_power_offset[i], 8);
wlapi_bmac_write_shm(pi->sh->physhim, M_OFDM_OFFSET,
- (u16) ((pi->
- tx_power_offset[TXP_FIRST_OFDM]
- + 7) >> 3));
+ (u16)
+ ((pi->tx_power_offset[TXP_FIRST_OFDM]
+ + 7) >> 3));
}
}
@@ -2068,48 +1980,38 @@ bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
- if (ISNPHY(pi)) {
+ if (ISNPHY(pi))
return pi->nphy_txpwrctrl;
- } else {
+ else
return pi->hwpwrctrl;
- }
}
void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
{
struct brcms_phy *pi = (struct brcms_phy *) ppi;
- bool cur_hwpwrctrl = pi->hwpwrctrl;
bool suspend;
- if (!pi->hwpwrctrl_capable) {
+ if (!pi->hwpwrctrl_capable)
return;
- }
pi->hwpwrctrl = hwpwrctrl;
pi->nphy_txpwrctrl = hwpwrctrl;
pi->txpwrctrl = hwpwrctrl;
if (ISNPHY(pi)) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_phy_txpwrctrl_enable_nphy(pi, pi->nphy_txpwrctrl);
- if (pi->nphy_txpwrctrl == PHY_TPC_HW_OFF) {
+ if (pi->nphy_txpwrctrl == PHY_TPC_HW_OFF)
wlc_phy_txpwr_fixpower_nphy(pi);
- } else {
-
+ else
mod_phy_reg(pi, 0x1e7, (0x7f << 0),
pi->saved_txpwr_idx);
- }
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
- } else if (hwpwrctrl != cur_hwpwrctrl) {
-
- return;
}
}
@@ -2125,8 +2027,6 @@ void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi)
}
}
-static u32 wlc_phy_txpower_est_power_nphy(struct brcms_phy *pi);
-
static u32 wlc_phy_txpower_est_power_nphy(struct brcms_phy *pi)
{
s16 tx0_status, tx1_status;
@@ -2137,42 +2037,31 @@ static u32 wlc_phy_txpower_est_power_nphy(struct brcms_phy *pi)
estPower1 = read_phy_reg(pi, 0x118);
estPower2 = read_phy_reg(pi, 0x119);
- if ((estPower1 & (0x1 << 8))
- == (0x1 << 8)) {
- pwr0 = (u8) (estPower1 & (0xff << 0))
- >> 0;
- } else {
+ if ((estPower1 & (0x1 << 8)) == (0x1 << 8))
+ pwr0 = (u8) (estPower1 & (0xff << 0)) >> 0;
+ else
pwr0 = 0x80;
- }
- if ((estPower2 & (0x1 << 8))
- == (0x1 << 8)) {
- pwr1 = (u8) (estPower2 & (0xff << 0))
- >> 0;
- } else {
+ if ((estPower2 & (0x1 << 8)) == (0x1 << 8))
+ pwr1 = (u8) (estPower2 & (0xff << 0)) >> 0;
+ else
pwr1 = 0x80;
- }
tx0_status = read_phy_reg(pi, 0x1ed);
tx1_status = read_phy_reg(pi, 0x1ee);
- if ((tx0_status & (0x1 << 15))
- == (0x1 << 15)) {
- adj_pwr0 = (u8) (tx0_status & (0xff << 0))
- >> 0;
- } else {
+ if ((tx0_status & (0x1 << 15)) == (0x1 << 15))
+ adj_pwr0 = (u8) (tx0_status & (0xff << 0)) >> 0;
+ else
adj_pwr0 = 0x80;
- }
- if ((tx1_status & (0x1 << 15))
- == (0x1 << 15)) {
- adj_pwr1 = (u8) (tx1_status & (0xff << 0))
- >> 0;
- } else {
+ if ((tx1_status & (0x1 << 15)) == (0x1 << 15))
+ adj_pwr1 = (u8) (tx1_status & (0xff << 0)) >> 0;
+ else
adj_pwr1 = 0x80;
- }
- est_pwr =
- (u32) ((pwr0 << 24) | (pwr1 << 16) | (adj_pwr0 << 8) | adj_pwr1);
+ est_pwr = (u32) ((pwr0 << 24) | (pwr1 << 16) | (adj_pwr0 << 8) |
+ adj_pwr1);
+
return est_pwr;
}
@@ -2193,7 +2082,7 @@ wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi, struct tx_power *power,
power->flags |= (WL_TX_POWER_F_MIMO);
if (pi->nphy_txpwrctrl == PHY_TPC_HW_ON)
power->flags |=
- (WL_TX_POWER_F_ENABLED | WL_TX_POWER_F_HW);
+ (WL_TX_POWER_F_ENABLED | WL_TX_POWER_F_HW);
} else if (ISLCNPHY(pi)) {
power->rf_cores = 1;
power->flags |= (WL_TX_POWER_F_SISO);
@@ -2256,16 +2145,18 @@ wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi, struct tx_power *power,
power->tx_power_max[1] = pi->tx_power_max;
power->tx_power_max_rate_ind[0] =
- pi->tx_power_max_rate_ind;
+ pi->tx_power_max_rate_ind;
power->tx_power_max_rate_ind[1] =
- pi->tx_power_max_rate_ind;
+ pi->tx_power_max_rate_ind;
if (wlc_phy_tpc_isenabled_lcnphy(pi))
power->flags |=
- (WL_TX_POWER_F_HW | WL_TX_POWER_F_ENABLED);
+ (WL_TX_POWER_F_HW |
+ WL_TX_POWER_F_ENABLED);
else
power->flags &=
- ~(WL_TX_POWER_F_HW | WL_TX_POWER_F_ENABLED);
+ ~(WL_TX_POWER_F_HW |
+ WL_TX_POWER_F_ENABLED);
wlc_lcnphy_get_tssi(pi, (s8 *) &power->est_Pout[0],
(s8 *) &power->est_Pout_cck);
@@ -2304,16 +2195,13 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
BRCM_BAND_ALL);
}
- if (ISNPHY(pi)) {
-
+ if (ISNPHY(pi))
return;
- }
if (!pi->sh->clk)
return;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2357,10 +2245,127 @@ wlc_phy_noise_calc_phy(struct brcms_phy *pi, u32 *cmplx_pwr, s8 *pwr_ant)
pwr_ant[i] = cmplx_pwr_dbm[i];
}
pi->nphy_noise_index =
- MODINC_POW2(pi->nphy_noise_index, PHY_NOISE_WINDOW_SZ);
+ MODINC_POW2(pi->nphy_noise_index, PHY_NOISE_WINDOW_SZ);
return true;
}
+static void wlc_phy_noise_cb(struct brcms_phy *pi, u8 channel, s8 noise_dbm)
+{
+ if (!pi->phynoise_state)
+ return;
+
+ if (pi->phynoise_state & PHY_NOISE_STATE_MON) {
+ if (pi->phynoise_chan_watchdog == channel) {
+ pi->sh->phy_noise_window[pi->sh->phy_noise_index] =
+ noise_dbm;
+ pi->sh->phy_noise_index =
+ MODINC(pi->sh->phy_noise_index, MA_WINDOW_SZ);
+ }
+ pi->phynoise_state &= ~PHY_NOISE_STATE_MON;
+ }
+
+ if (pi->phynoise_state & PHY_NOISE_STATE_EXTERNAL)
+ pi->phynoise_state &= ~PHY_NOISE_STATE_EXTERNAL;
+
+}
+
+static s8 wlc_phy_noise_read_shmem(struct brcms_phy *pi)
+{
+ u32 cmplx_pwr[PHY_CORE_MAX];
+ s8 noise_dbm_ant[PHY_CORE_MAX];
+ u16 lo, hi;
+ u32 cmplx_pwr_tot = 0;
+ s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
+ u8 idx, core;
+
+ memset((u8 *) cmplx_pwr, 0, sizeof(cmplx_pwr));
+ memset((u8 *) noise_dbm_ant, 0, sizeof(noise_dbm_ant));
+
+ for (idx = 0, core = 0; core < pi->pubpi.phy_corenum; idx += 2,
+ core++) {
+ lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx));
+ hi = wlapi_bmac_read_shm(pi->sh->physhim,
+ M_PWRIND_MAP(idx + 1));
+ cmplx_pwr[core] = (hi << 16) + lo;
+ cmplx_pwr_tot += cmplx_pwr[core];
+ if (cmplx_pwr[core] == 0)
+ noise_dbm_ant[core] = PHY_NOISE_FIXED_VAL_NPHY;
+ else
+ cmplx_pwr[core] >>= PHY_NOISE_SAMPLE_LOG_NUM_UCODE;
+ }
+
+ if (cmplx_pwr_tot != 0)
+ wlc_phy_noise_calc_phy(pi, cmplx_pwr, noise_dbm_ant);
+
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+ pi->nphy_noise_win[core][pi->nphy_noise_index] =
+ noise_dbm_ant[core];
+
+ if (noise_dbm_ant[core] > noise_dbm)
+ noise_dbm = noise_dbm_ant[core];
+ }
+ pi->nphy_noise_index =
+ MODINC_POW2(pi->nphy_noise_index, PHY_NOISE_WINDOW_SZ);
+
+ return noise_dbm;
+
+}
+
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *pih)
+{
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
+ u16 jssi_aux;
+ u8 channel = 0;
+ s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
+
+ if (ISLCNPHY(pi)) {
+ u32 cmplx_pwr, cmplx_pwr0, cmplx_pwr1;
+ u16 lo, hi;
+ s32 pwr_offset_dB, gain_dB;
+ u16 status_0, status_1;
+
+ jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX);
+ channel = jssi_aux & D11_CURCHANNEL_MAX;
+
+ lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP0);
+ hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP1);
+ cmplx_pwr0 = (hi << 16) + lo;
+
+ lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP2);
+ hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP3);
+ cmplx_pwr1 = (hi << 16) + lo;
+ cmplx_pwr = (cmplx_pwr0 + cmplx_pwr1) >> 6;
+
+ status_0 = 0x44;
+ status_1 = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_0);
+ if ((cmplx_pwr > 0 && cmplx_pwr < 500)
+ && ((status_1 & 0xc000) == 0x4000)) {
+
+ wlc_phy_compute_dB(&cmplx_pwr, &noise_dbm,
+ pi->pubpi.phy_corenum);
+ pwr_offset_dB = (read_phy_reg(pi, 0x434) & 0xFF);
+ if (pwr_offset_dB > 127)
+ pwr_offset_dB -= 256;
+
+ noise_dbm += (s8) (pwr_offset_dB - 30);
+
+ gain_dB = (status_0 & 0x1ff);
+ noise_dbm -= (s8) (gain_dB);
+ } else {
+ noise_dbm = PHY_NOISE_FIXED_VAL_LCNPHY;
+ }
+ } else if (ISNPHY(pi)) {
+
+ jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX);
+ channel = jssi_aux & D11_CURCHANNEL_MAX;
+
+ noise_dbm = wlc_phy_noise_read_shmem(pi);
+ }
+
+ wlc_phy_noise_cb(pi, channel, noise_dbm);
+
+}
+
static void
wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
{
@@ -2369,20 +2374,13 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
bool sampling_in_progress = (pi->phynoise_state != 0);
bool wait_for_intr = true;
- if (NORADIO_ENAB(pi->pubpi)) {
- return;
- }
-
switch (reason) {
case PHY_NOISE_SAMPLE_MON:
-
pi->phynoise_chan_watchdog = ch;
pi->phynoise_state |= PHY_NOISE_STATE_MON;
-
break;
case PHY_NOISE_SAMPLE_EXTERNAL:
-
pi->phynoise_state |= PHY_NOISE_STATE_EXTERNAL;
break;
@@ -2398,15 +2396,13 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
if (pi->phy_fixed_noise) {
if (ISNPHY(pi)) {
pi->nphy_noise_win[WL_ANT_IDX_1][pi->nphy_noise_index] =
- PHY_NOISE_FIXED_VAL_NPHY;
+ PHY_NOISE_FIXED_VAL_NPHY;
pi->nphy_noise_win[WL_ANT_IDX_2][pi->nphy_noise_index] =
- PHY_NOISE_FIXED_VAL_NPHY;
+ PHY_NOISE_FIXED_VAL_NPHY;
pi->nphy_noise_index = MODINC_POW2(pi->nphy_noise_index,
PHY_NOISE_WINDOW_SZ);
-
noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
} else {
-
noise_dbm = PHY_NOISE_FIXED_VAL;
}
@@ -2469,15 +2465,14 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_enable_mac(pi->sh->physhim);
for (i = 0; i < pi->pubpi.phy_corenum; i++)
- cmplx_pwr[i] =
- (est[i].i_pwr +
- est[i].q_pwr) >> log_num_samps;
+ cmplx_pwr[i] = (est[i].i_pwr + est[i].q_pwr) >>
+ log_num_samps;
wlc_phy_noise_calc_phy(pi, cmplx_pwr, noise_dbm_ant);
for (i = 0; i < pi->pubpi.phy_corenum; i++) {
pi->nphy_noise_win[i][pi->nphy_noise_index] =
- noise_dbm_ant[i];
+ noise_dbm_ant[i];
if (noise_dbm_ant[i] > noise_dbm)
noise_dbm = noise_dbm_ant[i];
@@ -2489,7 +2484,7 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
}
}
- done:
+done:
if (!wait_for_intr)
wlc_phy_noise_cb(pi, ch, noise_dbm);
@@ -2505,124 +2500,7 @@ void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *pih)
wlc_phy_noise_sample_request(pih, PHY_NOISE_SAMPLE_EXTERNAL, channel);
}
-static void wlc_phy_noise_cb(struct brcms_phy *pi, u8 channel, s8 noise_dbm)
-{
- if (!pi->phynoise_state)
- return;
-
- if (pi->phynoise_state & PHY_NOISE_STATE_MON) {
- if (pi->phynoise_chan_watchdog == channel) {
- pi->sh->phy_noise_window[pi->sh->phy_noise_index] =
- noise_dbm;
- pi->sh->phy_noise_index =
- MODINC(pi->sh->phy_noise_index, MA_WINDOW_SZ);
- }
- pi->phynoise_state &= ~PHY_NOISE_STATE_MON;
- }
-
- if (pi->phynoise_state & PHY_NOISE_STATE_EXTERNAL) {
- pi->phynoise_state &= ~PHY_NOISE_STATE_EXTERNAL;
- }
-
-}
-
-static s8 wlc_phy_noise_read_shmem(struct brcms_phy *pi)
-{
- u32 cmplx_pwr[PHY_CORE_MAX];
- s8 noise_dbm_ant[PHY_CORE_MAX];
- u16 lo, hi;
- u32 cmplx_pwr_tot = 0;
- s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
- u8 idx, core;
-
- memset((u8 *) cmplx_pwr, 0, sizeof(cmplx_pwr));
- memset((u8 *) noise_dbm_ant, 0, sizeof(noise_dbm_ant));
-
- for (idx = 0, core = 0; core < pi->pubpi.phy_corenum; idx += 2, core++) {
- lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx));
- hi = wlapi_bmac_read_shm(pi->sh->physhim,
- M_PWRIND_MAP(idx + 1));
- cmplx_pwr[core] = (hi << 16) + lo;
- cmplx_pwr_tot += cmplx_pwr[core];
- if (cmplx_pwr[core] == 0) {
- noise_dbm_ant[core] = PHY_NOISE_FIXED_VAL_NPHY;
- } else
- cmplx_pwr[core] >>= PHY_NOISE_SAMPLE_LOG_NUM_UCODE;
- }
-
- if (cmplx_pwr_tot != 0)
- wlc_phy_noise_calc_phy(pi, cmplx_pwr, noise_dbm_ant);
-
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- pi->nphy_noise_win[core][pi->nphy_noise_index] =
- noise_dbm_ant[core];
-
- if (noise_dbm_ant[core] > noise_dbm)
- noise_dbm = noise_dbm_ant[core];
- }
- pi->nphy_noise_index =
- MODINC_POW2(pi->nphy_noise_index, PHY_NOISE_WINDOW_SZ);
-
- return noise_dbm;
-
-}
-
-void wlc_phy_noise_sample_intr(struct brcms_phy_pub *pih)
-{
- struct brcms_phy *pi = (struct brcms_phy *) pih;
- u16 jssi_aux;
- u8 channel = 0;
- s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
-
- if (ISLCNPHY(pi)) {
- u32 cmplx_pwr, cmplx_pwr0, cmplx_pwr1;
- u16 lo, hi;
- s32 pwr_offset_dB, gain_dB;
- u16 status_0, status_1;
-
- jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX);
- channel = jssi_aux & D11_CURCHANNEL_MAX;
-
- lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP0);
- hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP1);
- cmplx_pwr0 = (hi << 16) + lo;
-
- lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP2);
- hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP3);
- cmplx_pwr1 = (hi << 16) + lo;
- cmplx_pwr = (cmplx_pwr0 + cmplx_pwr1) >> 6;
-
- status_0 = 0x44;
- status_1 = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_0);
- if ((cmplx_pwr > 0 && cmplx_pwr < 500)
- && ((status_1 & 0xc000) == 0x4000)) {
-
- wlc_phy_compute_dB(&cmplx_pwr, &noise_dbm,
- pi->pubpi.phy_corenum);
- pwr_offset_dB = (read_phy_reg(pi, 0x434) & 0xFF);
- if (pwr_offset_dB > 127)
- pwr_offset_dB -= 256;
-
- noise_dbm += (s8) (pwr_offset_dB - 30);
-
- gain_dB = (status_0 & 0x1ff);
- noise_dbm -= (s8) (gain_dB);
- } else {
- noise_dbm = PHY_NOISE_FIXED_VAL_LCNPHY;
- }
- } else if (ISNPHY(pi)) {
-
- jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX);
- channel = jssi_aux & D11_CURCHANNEL_MAX;
-
- noise_dbm = wlc_phy_noise_read_shmem(pi);
- }
-
- wlc_phy_noise_cb(pi, channel, noise_dbm);
-
-}
-
-s8 lcnphy_gain_index_offset_for_pkt_rssi[] = {
+static const s8 lcnphy_gain_index_offset_for_pkt_rssi[] = {
8,
8,
8,
@@ -2678,27 +2556,21 @@ void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_cmplx_pwr_dB, u8 core)
}
}
-void wlc_phy_rssi_compute(struct brcms_phy_pub *pih, void *ctx)
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
+ struct d11rxhdr *rxh)
{
- struct brcms_d11rxhdr *wlc_rxhdr = (struct brcms_d11rxhdr *) ctx;
- struct d11rxhdr *rxh = &wlc_rxhdr->rxhdr;
- int rssi = le16_to_cpu(rxh->PhyRxStatus_1) & PRXS1_JSSI_MASK;
+ int rssi = rxh->PhyRxStatus_1 & PRXS1_JSSI_MASK;
uint radioid = pih->radioid;
struct brcms_phy *pi = (struct brcms_phy *) pih;
- if (NORADIO_ENAB(pi->pubpi)) {
- rssi = BRCMS_RSSI_INVALID;
- goto end;
- }
-
if ((pi->sh->corerev >= 11)
- && !(le16_to_cpu(rxh->RxStatus2) & RXS_PHYRXST_VALID)) {
+ && !(rxh->RxStatus2 & RXS_PHYRXST_VALID)) {
rssi = BRCMS_RSSI_INVALID;
goto end;
}
if (ISLCNPHY(pi)) {
- u8 gidx = (le16_to_cpu(rxh->PhyRxStatus_2) & 0xFC00) >> 10;
+ u8 gidx = (rxh->PhyRxStatus_2 & 0xFC00) >> 10;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (rssi > 127)
@@ -2715,16 +2587,15 @@ void wlc_phy_rssi_compute(struct brcms_phy_pub *pih, void *ctx)
}
if (ISLCNPHY(pi)) {
-
if (rssi > 127)
rssi -= 256;
} else if (radioid == BCM2055_ID || radioid == BCM2056_ID
|| radioid == BCM2057_ID) {
- rssi = wlc_phy_rssi_compute_nphy(pi, wlc_rxhdr);
+ rssi = wlc_phy_rssi_compute_nphy(pi, rxh);
}
- end:
- wlc_rxhdr->rssi = (s8) rssi;
+end:
+ return rssi;
}
void wlc_phy_freqtrack_start(struct brcms_phy_pub *pih)
@@ -2757,28 +2628,22 @@ void wlc_phy_watchdog(struct brcms_phy_pub *pih)
if (!pi->watchdog_override)
return;
- if (!(SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi))) {
+ if (!(SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi)))
wlc_phy_noise_sample_request((struct brcms_phy_pub *) pi,
PHY_NOISE_SAMPLE_MON,
CHSPEC_CHANNEL(pi->
radio_chanspec));
- }
- if (pi->phynoise_state && (pi->sh->now - pi->phynoise_now) > 5) {
+ if (pi->phynoise_state && (pi->sh->now - pi->phynoise_now) > 5)
pi->phynoise_state = 0;
- }
if ((!pi->phycal_txpower) ||
((pi->sh->now - pi->phycal_txpower) >= pi->sh->fast_timer)) {
- if (!SCAN_INPROG_PHY(pi) && wlc_phy_cal_txpower_recalc_sw(pi)) {
+ if (!SCAN_INPROG_PHY(pi) && wlc_phy_cal_txpower_recalc_sw(pi))
pi->phycal_txpower = pi->sh->now;
- }
}
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
if ((SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi)
|| ASSOC_INPROG_PHY(pi)))
return;
@@ -2800,8 +2665,9 @@ void wlc_phy_watchdog(struct brcms_phy_pub *pih)
((pi->sh->now - pi->phy_lastcal) >=
pi->sh->glacial_timer)) {
if (!(SCAN_RM_IN_PROGRESS(pi) || ASSOC_INPROG_PHY(pi)))
- wlc_lcnphy_calib_modes(pi,
- LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL);
+ wlc_lcnphy_calib_modes(
+ pi,
+ LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL);
if (!
(SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi)
|| ASSOC_INPROG_PHY(pi)
@@ -2819,13 +2685,12 @@ void wlc_phy_BSSinit(struct brcms_phy_pub *pih, bool bonlyap, int rssi)
uint i;
uint k;
- for (i = 0; i < MA_WINDOW_SZ; i++) {
+ for (i = 0; i < MA_WINDOW_SZ; i++)
pi->sh->phy_noise_window[i] = (s8) (rssi & 0xff);
- }
if (ISLCNPHY(pi)) {
for (i = 0; i < MA_WINDOW_SZ; i++)
pi->sh->phy_noise_window[i] =
- PHY_NOISE_FIXED_VAL_LCNPHY;
+ PHY_NOISE_FIXED_VAL_LCNPHY;
}
pi->sh->phy_noise_index = 0;
@@ -2848,72 +2713,9 @@ wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag)
*eps_real -= 0x2000;
}
-static const fixed AtanTbl[] = {
- 2949120,
- 1740967,
- 919879,
- 466945,
- 234379,
- 117304,
- 58666,
- 29335,
- 14668,
- 7334,
- 3667,
- 1833,
- 917,
- 458,
- 229,
- 115,
- 57,
- 29
-};
-
-void wlc_phy_cordic(fixed theta, cs32 *val)
-{
- fixed angle, valtmp;
- unsigned iter;
- int signx = 1;
- int signtheta;
-
- val[0].i = CORDIC_AG;
- val[0].q = 0;
- angle = 0;
-
- signtheta = (theta < 0) ? -1 : 1;
- theta =
- ((theta + FIXED(180) * signtheta) % FIXED(360)) -
- FIXED(180) * signtheta;
-
- if (FLOAT(theta) > 90) {
- theta -= FIXED(180);
- signx = -1;
- } else if (FLOAT(theta) < -90) {
- theta += FIXED(180);
- signx = -1;
- }
-
- for (iter = 0; iter < CORDIC_NI; iter++) {
- if (theta > angle) {
- valtmp = val[0].i - (val[0].q >> iter);
- val[0].q = (val[0].i >> iter) + val[0].q;
- val[0].i = valtmp;
- angle += AtanTbl[iter];
- } else {
- valtmp = val[0].i + (val[0].q >> iter);
- val[0].q = -(val[0].i >> iter) + val[0].q;
- val[0].i = valtmp;
- angle -= AtanTbl[iter];
- }
- }
-
- val[0].i = val[0].i * signx;
- val[0].q = val[0].q * signx;
-}
-
void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi)
{
- wlapi_del_timer(pi->sh->physhim, pi->phycal_timer);
+ wlapi_del_timer(pi->phycal_timer);
pi->cal_type_override = PHY_PERICAL_AUTO;
pi->mphase_cal_phase_id = MPHASE_CAL_STATE_IDLE;
@@ -2928,10 +2730,10 @@ wlc_phy_cal_perical_mphase_schedule(struct brcms_phy *pi, uint delay)
(pi->nphy_perical != PHY_PERICAL_MANUAL))
return;
- wlapi_del_timer(pi->sh->physhim, pi->phycal_timer);
+ wlapi_del_timer(pi->phycal_timer);
pi->mphase_cal_phase_id = MPHASE_CAL_STATE_INIT;
- wlapi_add_timer(pi->sh->physhim, pi->phycal_timer, delay, 0);
+ wlapi_add_timer(pi->phycal_timer, delay, 0);
}
void wlc_phy_cal_perical(struct brcms_phy_pub *pih, u8 reason)
@@ -2954,11 +2756,12 @@ void wlc_phy_cal_perical(struct brcms_phy_pub *pih, u8 reason)
case PHY_PERICAL_PHYINIT:
if (pi->nphy_perical == PHY_PERICAL_MPHASE) {
- if (PHY_PERICAL_MPHASE_PENDING(pi)) {
+ if (PHY_PERICAL_MPHASE_PENDING(pi))
wlc_phy_cal_perical_mphase_reset(pi);
- }
- wlc_phy_cal_perical_mphase_schedule(pi,
- PHY_PERICAL_INIT_DELAY);
+
+ wlc_phy_cal_perical_mphase_schedule(
+ pi,
+ PHY_PERICAL_INIT_DELAY);
}
break;
@@ -2966,17 +2769,16 @@ void wlc_phy_cal_perical(struct brcms_phy_pub *pih, u8 reason)
case PHY_PERICAL_START_IBSS:
case PHY_PERICAL_UP_BSS:
if ((pi->nphy_perical == PHY_PERICAL_MPHASE) &&
- PHY_PERICAL_MPHASE_PENDING(pi)) {
+ PHY_PERICAL_MPHASE_PENDING(pi))
wlc_phy_cal_perical_mphase_reset(pi);
- }
pi->first_cal_after_assoc = true;
pi->cal_type_override = PHY_PERICAL_FULL;
- if (pi->phycal_tempdelta) {
+ if (pi->phycal_tempdelta)
pi->nphy_lastcal_temp = wlc_phy_tempsense_nphy(pi);
- }
+
wlc_phy_cal_perical_nphy_run(pi, PHY_PERICAL_FULL);
break;
@@ -2984,26 +2786,24 @@ void wlc_phy_cal_perical(struct brcms_phy_pub *pih, u8 reason)
if (pi->phycal_tempdelta) {
nphy_currtemp = wlc_phy_tempsense_nphy(pi);
delta_temp =
- (nphy_currtemp > pi->nphy_lastcal_temp) ?
- nphy_currtemp - pi->nphy_lastcal_temp :
- pi->nphy_lastcal_temp - nphy_currtemp;
+ (nphy_currtemp > pi->nphy_lastcal_temp) ?
+ nphy_currtemp - pi->nphy_lastcal_temp :
+ pi->nphy_lastcal_temp - nphy_currtemp;
if ((delta_temp < (s16) pi->phycal_tempdelta) &&
(pi->nphy_txiqlocal_chanspec ==
- pi->radio_chanspec)) {
+ pi->radio_chanspec))
do_periodic_cal = false;
- } else {
+ else
pi->nphy_lastcal_temp = nphy_currtemp;
- }
}
if (do_periodic_cal) {
-
if (pi->nphy_perical == PHY_PERICAL_MPHASE) {
-
if (!PHY_PERICAL_MPHASE_PENDING(pi))
- wlc_phy_cal_perical_mphase_schedule(pi,
- PHY_PERICAL_WDOG_DELAY);
+ wlc_phy_cal_perical_mphase_schedule(
+ pi,
+ PHY_PERICAL_WDOG_DELAY);
} else if (pi->nphy_perical == PHY_PERICAL_SPHASE)
wlc_phy_cal_perical_nphy_run(pi,
PHY_PERICAL_AUTO);
@@ -3025,7 +2825,7 @@ u8 wlc_phy_nbits(s32 value)
s32 abs_val;
u8 nbits = 0;
- abs_val = ABS(value);
+ abs_val = abs(value);
while ((abs_val >> nbits) > 0)
nbits++;
@@ -3040,7 +2840,7 @@ void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain)
pi->sh->hw_phyrxchain = rxchain;
pi->sh->phytxchain = txchain;
pi->sh->phyrxchain = rxchain;
- pi->pubpi.phy_corenum = (u8) PHY_BITSCNT(pi->sh->phyrxchain);
+ pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain);
}
void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain)
@@ -3049,10 +2849,10 @@ void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain)
pi->sh->phytxchain = txchain;
- if (ISNPHY(pi)) {
+ if (ISNPHY(pi))
wlc_phy_rxcore_setstate_nphy(pih, rxchain);
- }
- pi->pubpi.phy_corenum = (u8) PHY_BITSCNT(pi->sh->phyrxchain);
+
+ pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain);
}
void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain)
@@ -3095,17 +2895,17 @@ u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih)
return active_bitmap;
}
-s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, chanspec_t chanspec)
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
u8 siso_mcs_id, cdd_mcs_id;
siso_mcs_id =
- (CHSPEC_IS40(chanspec)) ? TXP_FIRST_MCS_40_SISO :
- TXP_FIRST_MCS_20_SISO;
+ (CHSPEC_IS40(chanspec)) ? TXP_FIRST_MCS_40_SISO :
+ TXP_FIRST_MCS_20_SISO;
cdd_mcs_id =
- (CHSPEC_IS40(chanspec)) ? TXP_FIRST_MCS_40_CDD :
- TXP_FIRST_MCS_20_CDD;
+ (CHSPEC_IS40(chanspec)) ? TXP_FIRST_MCS_40_CDD :
+ TXP_FIRST_MCS_20_CDD;
if (pi->tx_power_target[siso_mcs_id] >
(pi->tx_power_target[cdd_mcs_id] + 12))
@@ -3133,12 +2933,13 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
}
ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, gpiocontrol), ~0x0,
- 0x0);
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x0);
ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, gpioout), 0x40, 0x40);
+ offsetof(struct chipcregs, gpioout), 0x40,
+ 0x40);
ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, gpioouten), 0x40,
+ offsetof(struct chipcregs, gpioouten), 0x40,
0x40);
} else {
mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
@@ -3146,56 +2947,18 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, gpioout), 0x40, 0x00);
+ offsetof(struct chipcregs, gpioout), 0x40,
+ 0x00);
ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, gpioouten), 0x40, 0x0);
+ offsetof(struct chipcregs, gpioouten), 0x40,
+ 0x0);
ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, gpiocontrol), ~0x0,
- 0x40);
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x40);
}
}
}
-static s8
-wlc_user_txpwr_antport_to_rfport(struct brcms_phy *pi, uint chan, u32 band,
- u8 rate)
-{
- s8 offset = 0;
-
- if (!pi->user_txpwr_at_rfport)
- return offset;
- return offset;
-}
-
-static s8 wlc_phy_env_measure_vbat(struct brcms_phy *pi)
-{
- if (ISLCNPHY(pi))
- return wlc_lcnphy_vbatsense(pi, 0);
- else
- return 0;
-}
-
-static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi)
-{
- if (ISLCNPHY(pi))
- return wlc_lcnphy_tempsense_degree(pi, 0);
- else
- return 0;
-}
-
-static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band)
-{
- u8 i;
- s8 temp, vbat;
-
- for (i = 0; i < TXP_NUM_RATES; i++)
- pi->txpwr_env_limit[i] = BRCMS_TXPWR_MAX;
-
- vbat = wlc_phy_env_measure_vbat(pi);
- temp = wlc_phy_env_measure_temperature(pi);
-
-}
-
void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool ldpc)
{
return;
@@ -3208,7 +2971,7 @@ wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset, s8 *ofdmoffset)
*ofdmoffset = 0;
}
-s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, chanspec_t chanspec)
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec)
{
return rssi;
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index e27d9e95a2d..96e15163222 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -103,6 +103,9 @@
#define BRCMS_RSSI_INVALID 0 /* invalid RSSI value */
+struct d11regs;
+struct phy_shim_info;
+
struct txpwr_limits {
u8 cck[BRCMS_NUM_RATES_CCK];
u8 ofdm[BRCMS_NUM_RATES_OFDM];
@@ -126,42 +129,44 @@ struct txpwr_limits {
struct tx_power {
u32 flags;
- chanspec_t chanspec; /* txpwr report for this channel */
- chanspec_t local_chanspec; /* channel on which we are associated */
- u8 local_max; /* local max according to the AP */
- u8 local_constraint; /* local constraint according to the AP */
- s8 antgain[2]; /* Ant gain for each band - from SROM */
- u8 rf_cores; /* count of RF Cores being reported */
- u8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
- u8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
- * without adjustment
- */
- u8 est_Pout_cck; /* Latest CCK tx power out estimate */
- u8 tx_power_max[4]; /* Maximum target power among all rates */
- u8 tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
- u8 user_limit[WL_TX_POWER_RATES]; /* User limit */
- u8 reg_limit[WL_TX_POWER_RATES]; /* Regulatory power limit */
- u8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */
- u8 target[WL_TX_POWER_RATES]; /* Latest target power */
+ u16 chanspec; /* txpwr report for this channel */
+ u16 local_chanspec; /* channel on which we are associated */
+ u8 local_max; /* local max according to the AP */
+ u8 local_constraint; /* local constraint according to the AP */
+ s8 antgain[2]; /* Ant gain for each band - from SROM */
+ u8 rf_cores; /* count of RF Cores being reported */
+ u8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
+ u8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
+ * without adjustment */
+ u8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ u8 tx_power_max[4]; /* Maximum target power among all rates */
+ /* Index of the rate with the max target power */
+ u8 tx_power_max_rate_ind[4];
+ /* User limit */
+ u8 user_limit[WL_TX_POWER_RATES];
+ /* Regulatory power limit */
+ u8 reg_limit[WL_TX_POWER_RATES];
+ /* Max power board can support (SROM) */
+ u8 board_limit[WL_TX_POWER_RATES];
+ /* Latest target power */
+ u8 target[WL_TX_POWER_RATES];
};
struct tx_inst_power {
- u8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
- u8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+ u8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
+ u8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
};
-struct chanvec {
+struct brcms_chanvec {
u8 vec[MAXCHANNEL / NBBY];
};
struct shared_phy_params {
struct si_pub *sih;
- void *physhim;
+ struct phy_shim_info *physhim;
uint unit;
uint corerev;
- uint bustype;
uint buscorerev;
- char *vars;
u16 vid;
u16 did;
uint chip;
@@ -177,8 +182,9 @@ struct shared_phy_params {
extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, void *regs,
- int bandtype, char *vars, struct wiphy *wiphy);
+extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+ struct d11regs __iomem *regs,
+ int bandtype, struct wiphy *wiphy);
extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
@@ -189,7 +195,7 @@ extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, chanspec_t chanspec);
+extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
extern int wlc_phy_down(struct brcms_phy_pub *ppi);
extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
@@ -197,14 +203,15 @@ extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
- chanspec_t chanspec);
-extern chanspec_t wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+ u16 chanspec);
+extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
- chanspec_t newch);
+ u16 newch);
extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-extern void wlc_phy_rssi_compute(struct brcms_phy_pub *pih, void *ctx);
+extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
+ struct d11rxhdr *rxh);
extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
@@ -220,9 +227,9 @@ extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
bool wide_filter);
extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
- chanvec_t *channels);
-extern chanspec_t wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
- uint band);
+ struct brcms_chanvec *channels);
+extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
+ uint band);
extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
u8 *_min_, u8 *_max_, int rate);
@@ -232,7 +239,7 @@ extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
uint band, s32 *, s32 *, u32 *);
extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
struct txpwr_limits *,
- chanspec_t chanspec);
+ u16 chanspec);
extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
bool *override);
extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
@@ -254,7 +261,7 @@ extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
u8 *rxchain);
extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
- chanspec_t chanspec);
+ u16 chanspec);
extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
@@ -264,8 +271,8 @@ extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, mbool id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, mbool flags);
+extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
@@ -289,6 +296,6 @@ extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
- u8 mcs_offset);
+ u8 mcs_offset);
extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
-#endif /* _BRCM_PHY_HAL_H_ */
+#endif /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index a01b01ccd9f..bea85241a24 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -23,53 +23,26 @@
#define PHY_VERSION { 1, 82, 8, 0 }
-#define PHYHAL_ERROR 0x0001
-#define PHYHAL_TRACE 0x0002
-#define PHYHAL_INFORM 0x0004
-
-extern u32 phyhal_msg_level;
-
-#define PHY_INFORM_ON() (phyhal_msg_level & PHYHAL_INFORM)
-#define PHY_THERMAL_ON() (phyhal_msg_level & PHYHAL_THERMAL)
-#define PHY_CAL_ON() (phyhal_msg_level & PHYHAL_CAL)
-
-#ifdef BOARD_TYPE
-#define BOARDTYPE(_type) BOARD_TYPE
-#else
-#define BOARDTYPE(_type) _type
-#endif
-
#define LCNXN_BASEREV 16
+struct phy_shim_info;
+
struct brcms_phy_srom_fem {
- u8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
- u8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
- u8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
- u8 triso; /* TR switch isolation */
- u8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
+ /* TSSI positive slope, 1: positive, 0: negative */
+ u8 tssipos;
+ /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+ u8 extpagain;
+ /* support 32 combinations of different Pdet dynamic ranges */
+ u8 pdetrange;
+ /* TR switch isolation */
+ u8 triso;
+ /* antswctrl lookup table configuration: 32 possible choices */
+ u8 antswctrllut;
};
-typedef void (*initfn_t) (struct brcms_phy *);
-typedef void (*chansetfn_t) (struct brcms_phy *, chanspec_t);
-typedef int (*longtrnfn_t) (struct brcms_phy *, int);
-typedef void (*txiqccgetfn_t) (struct brcms_phy *, u16 *, u16 *);
-typedef void (*txiqccsetfn_t) (struct brcms_phy *, u16, u16);
-typedef u16(*txloccgetfn_t) (struct brcms_phy *);
-typedef void (*radioloftgetfn_t) (struct brcms_phy *, u8 *, u8 *, u8 *,
- u8 *);
-typedef s32(*rxsigpwrfn_t) (struct brcms_phy *, s32);
-typedef void (*detachfn_t) (struct brcms_phy *);
-
-#undef ISNPHY
-#undef ISLCNPHY
#define ISNPHY(pi) PHYTYPE_IS((pi)->pubpi.phy_type, PHY_TYPE_N)
#define ISLCNPHY(pi) PHYTYPE_IS((pi)->pubpi.phy_type, PHY_TYPE_LCN)
-#define ISPHY_11N_CAP(pi) (ISNPHY(pi) || ISLCNPHY(pi))
-
-#define IS20MHZ(pi) ((pi)->bw == WL_CHANSPEC_BW_20)
-#define IS40MHZ(pi) ((pi)->bw == WL_CHANSPEC_BW_40)
-
#define PHY_GET_RFATTN(rfgain) ((rfgain) & 0x0f)
#define PHY_GET_PADMIX(rfgain) (((rfgain) & 0x10) >> 4)
#define PHY_GET_RFGAINID(rfattn, padmix, width) ((rfattn) + ((padmix)*(width)))
@@ -203,7 +176,9 @@ typedef void (*detachfn_t) (struct brcms_phy *);
#define PHY_PERICAL_WDOG_DELAY 5
#define MPHASE_TXCAL_NUMCMDS 2
-#define PHY_PERICAL_MPHASE_PENDING(pi) (pi->mphase_cal_phase_id > MPHASE_CAL_STATE_IDLE)
+
+#define PHY_PERICAL_MPHASE_PENDING(pi) \
+ (pi->mphase_cal_phase_id > MPHASE_CAL_STATE_IDLE)
enum {
MPHASE_CAL_STATE_IDLE = 0,
@@ -248,47 +223,29 @@ enum phy_cal_mode {
#define CORDIC_AG 39797
#define CORDIC_NI 18
#define FIXED(X) ((s32)((X) << 16))
-#define FLOAT(X) (((X) >= 0) ? ((((X) >> 15) + 1) >> 1) : -((((-(X)) >> 15) + 1) >> 1))
+
+#define FLOAT(X) \
+ (((X) >= 0) ? ((((X) >> 15) + 1) >> 1) : -((((-(X)) >> 15) + 1) >> 1))
#define PHY_CHAIN_TX_DISABLE_TEMP 115
#define PHY_HYSTERESIS_DELTATEMP 5
-#define PHY_BITSCNT(x) brcmu_bitcount((u8 *)&(x), sizeof(u8))
-
-#define MOD_PHY_REG(pi, phy_type, reg_name, field, value) \
- mod_phy_reg(pi, phy_type##_##reg_name, phy_type##_##reg_name##_##field##_MASK, \
- (value) << phy_type##_##reg_name##_##field##_##SHIFT);
-#define READ_PHY_REG(pi, phy_type, reg_name, field) \
- ((read_phy_reg(pi, phy_type##_##reg_name) & phy_type##_##reg_name##_##field##_##MASK)\
- >> phy_type##_##reg_name##_##field##_##SHIFT)
+#define SCAN_INPROG_PHY(pi) \
+ (mboolisset(pi->measure_hold, PHY_HOLD_FOR_SCAN))
-#define VALID_PHYTYPE(phytype) (((uint)phytype == PHY_TYPE_N) || \
- ((uint)phytype == PHY_TYPE_LCN))
+#define PLT_INPROG_PHY(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_PLT))
-#define VALID_N_RADIO(radioid) ((radioid == BCM2055_ID) || (radioid == BCM2056_ID) || \
- (radioid == BCM2057_ID))
-#define VALID_LCN_RADIO(radioid) (radioid == BCM2064_ID)
+#define ASSOC_INPROG_PHY(pi) \
+ (mboolisset(pi->measure_hold, PHY_HOLD_FOR_ASSOC))
-#define VALID_RADIO(pi, radioid) (\
- (ISNPHY(pi) ? VALID_N_RADIO(radioid) : false) || \
- (ISLCNPHY(pi) ? VALID_LCN_RADIO(radioid) : false))
+#define SCAN_RM_IN_PROGRESS(pi) \
+ (mboolisset(pi->measure_hold, PHY_HOLD_FOR_SCAN | PHY_HOLD_FOR_RM))
-#define SCAN_INPROG_PHY(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_SCAN))
-#define RM_INPROG_PHY(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_RM))
-#define PLT_INPROG_PHY(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_PLT))
-#define ASSOC_INPROG_PHY(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_ASSOC))
-#define SCAN_RM_IN_PROGRESS(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_SCAN | PHY_HOLD_FOR_RM))
-#define PHY_MUTED(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_MUTE))
-#define PUB_NOT_ASSOC(pi) (mboolisset(pi->measure_hold, PHY_HOLD_FOR_NOT_ASSOC))
+#define PHY_MUTED(pi) \
+ (mboolisset(pi->measure_hold, PHY_HOLD_FOR_MUTE))
-#if defined(EXT_CBALL)
-#define NORADIO_ENAB(pub) ((pub).radioid == NORADIO_ID)
-#else
-#define NORADIO_ENAB(pub) 0
-#endif
-
-#define PHY_LTRN_LIST_LEN 64
-extern u16 ltrn_list[PHY_LTRN_LIST_LEN];
+#define PUB_NOT_ASSOC(pi) \
+ (mboolisset(pi->measure_hold, PHY_HOLD_FOR_NOT_ASSOC))
struct phy_table_info {
uint table;
@@ -532,7 +489,7 @@ struct shared_phy {
struct brcms_phy *phy_head;
uint unit;
struct si_pub *sih;
- void *physhim;
+ struct phy_shim_info *physhim;
uint corerev;
u32 machwcap;
bool up;
@@ -549,7 +506,6 @@ struct shared_phy {
uint boardvendor;
u32 boardflags;
u32 boardflags2;
- uint bustype;
uint buscorerev;
uint fast_timer;
uint slow_timer;
@@ -579,41 +535,39 @@ struct brcms_phy_pub {
};
struct phy_func_ptr {
- initfn_t init;
- initfn_t calinit;
- chansetfn_t chanset;
- initfn_t txpwrrecalc;
- longtrnfn_t longtrn;
- txiqccgetfn_t txiqccget;
- txiqccsetfn_t txiqccset;
- txloccgetfn_t txloccget;
- radioloftgetfn_t radioloftget;
- initfn_t carrsuppr;
- rxsigpwrfn_t rxsigpwr;
- detachfn_t detach;
+ void (*init)(struct brcms_phy *);
+ void (*calinit)(struct brcms_phy *);
+ void (*chanset)(struct brcms_phy *, u16 chanspec);
+ void (*txpwrrecalc)(struct brcms_phy *);
+ int (*longtrn)(struct brcms_phy *, int);
+ void (*txiqccget)(struct brcms_phy *, u16 *, u16 *);
+ void (*txiqccset)(struct brcms_phy *, u16, u16);
+ u16 (*txloccget)(struct brcms_phy *);
+ void (*radioloftget)(struct brcms_phy *, u8 *, u8 *, u8 *, u8 *);
+ void (*carrsuppr)(struct brcms_phy *);
+ s32 (*rxsigpwr)(struct brcms_phy *, s32);
+ void (*detach)(struct brcms_phy *);
};
struct brcms_phy {
struct brcms_phy_pub pubpi_ro;
struct shared_phy *sh;
struct phy_func_ptr pi_fptr;
- void *pi_ptr;
union {
struct brcms_phy_lcnphy *pi_lcnphy;
} u;
bool user_txpwr_at_rfport;
- d11regs_t *regs;
+ struct d11regs __iomem *regs;
struct brcms_phy *next;
- char *vars;
struct brcms_phy_pub pubpi;
bool do_initcal;
bool phytest_on;
bool ofdm_rateset_war;
bool bf_preempt_4306;
- chanspec_t radio_chanspec;
+ u16 radio_chanspec;
u8 antsel_type;
u16 bw;
u8 txpwr_percent;
@@ -629,7 +583,7 @@ struct brcms_phy {
int phynoise_chan_watchdog;
bool phynoise_polling;
bool disable_percal;
- mbool measure_hold;
+ u32 measure_hold;
s16 txpa_2g[PWRTBL_NUM_COEFF];
s16 txpa_2g_low_temp[PWRTBL_NUM_COEFF];
@@ -723,7 +677,7 @@ struct brcms_phy {
u16 mintxbias;
u16 mintxmag;
struct lo_complex_abgphy_info gphy_locomp_iq
- [STATIC_NUM_RF][STATIC_NUM_BB];
+ [STATIC_NUM_RF][STATIC_NUM_BB];
s8 stats_11b_txpower[STATIC_NUM_RF][STATIC_NUM_BB];
u16 gain_table[TX_GAIN_TABLE_LENGTH];
bool loopback_gain;
@@ -840,11 +794,11 @@ struct brcms_phy {
u8 mphase_txcal_cmdidx;
u8 mphase_txcal_numcmds;
u16 mphase_txcal_bestcoeffs[11];
- chanspec_t nphy_txiqlocal_chanspec;
- chanspec_t nphy_iqcal_chanspec_2G;
- chanspec_t nphy_iqcal_chanspec_5G;
- chanspec_t nphy_rssical_chanspec_2G;
- chanspec_t nphy_rssical_chanspec_5G;
+ u16 nphy_txiqlocal_chanspec;
+ u16 nphy_iqcal_chanspec_2G;
+ u16 nphy_iqcal_chanspec_5G;
+ u16 nphy_rssical_chanspec_2G;
+ u16 nphy_rssical_chanspec_5G;
struct wlapi_timer *phycal_timer;
bool use_int_tx_iqlo_cal_nphy;
bool internal_tx_iqlo_cal_tapoff_intpa_nphy;
@@ -937,9 +891,9 @@ struct brcms_phy {
struct wiphy *wiphy;
};
-struct _cs32 {
- fixed q;
- fixed i;
+struct cs32 {
+ s32 q;
+ s32 i;
};
struct radio_regs {
@@ -964,29 +918,6 @@ struct lcnphy_radio_regs {
u8 do_init_g;
};
-extern struct lcnphy_radio_regs lcnphy_radio_regs_2064[];
-extern struct lcnphy_radio_regs lcnphy_radio_regs_2066[];
-extern struct radio_regs regs_2055[], regs_SYN_2056[], regs_TX_2056[],
- regs_RX_2056[];
-extern struct radio_regs regs_SYN_2056_A1[], regs_TX_2056_A1[],
- regs_RX_2056_A1[];
-extern struct radio_regs regs_SYN_2056_rev5[], regs_TX_2056_rev5[],
- regs_RX_2056_rev5[];
-extern struct radio_regs regs_SYN_2056_rev6[], regs_TX_2056_rev6[],
- regs_RX_2056_rev6[];
-extern struct radio_regs regs_SYN_2056_rev7[], regs_TX_2056_rev7[],
- regs_RX_2056_rev7[];
-extern struct radio_regs regs_SYN_2056_rev8[], regs_TX_2056_rev8[],
- regs_RX_2056_rev8[];
-extern struct radio_20xx_regs regs_2057_rev4[], regs_2057_rev5[],
- regs_2057_rev5v1[];
-extern struct radio_20xx_regs regs_2057_rev7[], regs_2057_rev8[];
-
-extern char *phy_getvar(struct brcms_phy *pi, const char *name);
-extern int phy_getintvar(struct brcms_phy *pi, const char *name);
-#define PHY_GETVAR(pi, name) phy_getvar(pi, name)
-#define PHY_GETINTVAR(pi, name) phy_getintvar(pi, name)
-
extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
@@ -1022,14 +953,13 @@ extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-extern void wlc_phy_cordic(fixed theta, cs32 *val);
extern u8 wlc_phy_nbits(s32 value);
extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
struct radio_20xx_regs *radioregs);
extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
- struct radio_regs *radioregs,
+ const struct radio_regs *radioregs,
u16 core_offset);
extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
@@ -1053,14 +983,14 @@ extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
- chanspec_t chanspec);
+ u16 chanspec);
extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
- chanspec_t chanspec);
+ u16 chanspec);
extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
- chanspec_t chanspec);
+ u16 chanspec);
extern int wlc_phy_channel2freq(uint channel);
extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, chanspec_t);
+extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
@@ -1142,13 +1072,17 @@ extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
bool enable);
extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
-#define wlc_phy_write_table_nphy(pi, pti) wlc_phy_write_table(pi, pti, 0x72, \
- 0x74, 0x73)
-#define wlc_phy_read_table_nphy(pi, pti) wlc_phy_read_table(pi, pti, 0x72, \
- 0x74, 0x73)
-#define wlc_nphy_table_addr(pi, id, off) wlc_phy_table_addr((pi), (id), (off), \
- 0x72, 0x74, 0x73)
-#define wlc_nphy_table_data_write(pi, w, v) wlc_phy_table_data_write((pi), (w), (v))
+#define wlc_phy_write_table_nphy(pi, pti) \
+ wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
+
+#define wlc_phy_read_table_nphy(pi, pti) \
+ wlc_phy_read_table(pi, pti, 0x72, 0x74, 0x73)
+
+#define wlc_nphy_table_addr(pi, id, off) \
+ wlc_phy_table_addr((pi), (id), (off), 0x72, 0x74, 0x73)
+
+#define wlc_nphy_table_data_write(pi, w, v) \
+ wlc_phy_table_data_write((pi), (w), (v))
extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
u32 w, void *d);
@@ -1160,7 +1094,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
(pi->ipa5g_on && CHSPEC_IS5G(pi->radio_chanspec)))
#define BRCMS_PHY_WAR_PR51571(pi) \
- if (((pi)->sh->bustype == PCI_BUS) && NREV_LT((pi)->pubpi.phy_rev, 3)) \
+ if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
(void)R_REG(&(pi)->regs->maccontrol)
extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
@@ -1219,7 +1153,7 @@ extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
- struct brcms_d11rxhdr *wlc_rxh);
+ struct d11rxhdr *rxh);
#define NPHY_TESTPATTERN_BPHY_EVM 0
#define NPHY_TESTPATTERN_BPHY_RFCS 1
@@ -1229,7 +1163,7 @@ extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
s8 *ofdmoffset);
extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
- chanspec_t chanspec);
+ u16 chanspec);
extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 6a3fbe67302..a63aa99d981 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -14,7 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/cordic.h>
#include <pmu.h>
#include <d11.h>
@@ -67,6 +69,14 @@
#define LCN_TEMPSENSE_OFFSET 80812
#define LCN_TEMPSENSE_DEN 2647
+#define LCN_BW_LMT 200
+#define LCN_CUR_LMT 1250
+#define LCN_MULT 1
+#define LCN_VCO_DIV 30
+#define LCN_OFFSET 680
+#define LCN_FACT 490
+#define LCN_CUR_DIV 2640
+
#define LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT \
(0 + 8)
#define LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK \
@@ -79,15 +89,15 @@
#define wlc_lcnphy_enable_tx_gain_override(pi) \
wlc_lcnphy_set_tx_gain_override(pi, true)
-#define wlc_lcnphy_disable_tx_gain_override(pi) \
+#define wlc_lcnphy_disable_tx_gain_override(pi) \
wlc_lcnphy_set_tx_gain_override(pi, false)
#define wlc_lcnphy_iqcal_active(pi) \
(read_phy_reg((pi), 0x451) & \
- ((0x1 << 15) | (0x1 << 14)))
+ ((0x1 << 15) | (0x1 << 14)))
#define txpwrctrl_off(pi) (0x7 != ((read_phy_reg(pi, 0x4a4) & 0xE000) >> 13))
-#define wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) \
+#define wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) \
(pi->temppwrctrl_capable)
#define wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) \
(pi->hwpwrctrl_capable)
@@ -132,12 +142,12 @@
#define LCNPHY_ACI_DETECT_TIMEOUT 2
#define LCNPHY_ACI_START_DELAY 0
-#define wlc_lcnphy_tx_gain_override_enabled(pi) \
+#define wlc_lcnphy_tx_gain_override_enabled(pi) \
(0 != (read_phy_reg((pi), 0x43b) & (0x1 << 6)))
#define wlc_lcnphy_total_tx_frames(pi) \
- wlapi_bmac_read_shm((pi)->sh->physhim, \
- M_UCODE_MACSTAT + offsetof(struct macstat, txallfrm))
+ wlapi_bmac_read_shm((pi)->sh->physhim, M_UCODE_MACSTAT + \
+ offsetof(struct macstat, txallfrm))
struct lcnphy_txgains {
u16 gm_gain;
@@ -198,7 +208,7 @@ static const iqcal_gain_params_lcnphy *tbl_iqcal_gainparams_lcnphy[1] = {
static const u16 iqcal_gainparams_numgains_lcnphy[1] = {
sizeof(tbl_iqcal_gainparams_lcnphy_2G) /
- sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
+ sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
};
static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = {
@@ -548,7 +558,7 @@ struct chan_info_2064_lcnphy {
u8 rxrf_rxrf_spare1;
};
-static struct chan_info_2064_lcnphy chan_info_2064_lcnphy[] = {
+static const struct chan_info_2064_lcnphy chan_info_2064_lcnphy[] = {
{1, 2412, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{2, 2417, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{3, 2422, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
@@ -565,7 +575,7 @@ static struct chan_info_2064_lcnphy chan_info_2064_lcnphy[] = {
{14, 2484, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
};
-struct lcnphy_radio_regs lcnphy_radio_regs_2064[] = {
+static const struct lcnphy_radio_regs lcnphy_radio_regs_2064[] = {
{0x00, 0, 0, 0, 0},
{0x01, 0x64, 0x64, 0, 0},
{0x02, 0x20, 0x20, 0, 0},
@@ -877,7 +887,7 @@ struct lcnphy_radio_regs lcnphy_radio_regs_2064[] = {
#define LCNPHY_NUM_DIG_FILT_COEFFS 16
#define LCNPHY_NUM_TX_DIG_FILTERS_CCK 13
-u16 LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK]
+static const u16 LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK]
[LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
{0, 1, 415, 1874, 64, 128, 64, 792, 1656, 64, 128, 64, 778, 1582, 64,
128, 64,},
@@ -908,7 +918,7 @@ u16 LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK]
};
#define LCNPHY_NUM_TX_DIG_FILTERS_OFDM 3
-u16 LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM]
+static const u16 LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM]
[LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
{0, 0, 0xa2, 0x0, 0x100, 0x100, 0x0, 0x0, 0x0, 0x100, 0x0, 0x0,
0x278, 0xfea0, 0x80, 0x100, 0x80,},
@@ -920,108 +930,50 @@ u16 LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM]
#define wlc_lcnphy_set_start_tx_pwr_idx(pi, idx) \
mod_phy_reg(pi, 0x4a4, \
- (0x1ff << 0), \
- (u16)(idx) << 0)
+ (0x1ff << 0), \
+ (u16)(idx) << 0)
#define wlc_lcnphy_set_tx_pwr_npt(pi, npt) \
mod_phy_reg(pi, 0x4a5, \
- (0x7 << 8), \
- (u16)(npt) << 8)
+ (0x7 << 8), \
+ (u16)(npt) << 8)
#define wlc_lcnphy_get_tx_pwr_ctrl(pi) \
(read_phy_reg((pi), 0x4a4) & \
- ((0x1 << 15) | \
- (0x1 << 14) | \
- (0x1 << 13)))
+ ((0x1 << 15) | \
+ (0x1 << 14) | \
+ (0x1 << 13)))
#define wlc_lcnphy_get_tx_pwr_npt(pi) \
((read_phy_reg(pi, 0x4a5) & \
- (0x7 << 8)) >> \
- 8)
+ (0x7 << 8)) >> \
+ 8)
#define wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(pi) \
(read_phy_reg(pi, 0x473) & 0x1ff)
#define wlc_lcnphy_get_target_tx_pwr(pi) \
((read_phy_reg(pi, 0x4a7) & \
- (0xff << 0)) >> \
- 0)
+ (0xff << 0)) >> \
+ 0)
#define wlc_lcnphy_set_target_tx_pwr(pi, target) \
mod_phy_reg(pi, 0x4a7, \
- (0xff << 0), \
- (u16)(target) << 0)
+ (0xff << 0), \
+ (u16)(target) << 0)
+
+#define wlc_radio_2064_rcal_done(pi) \
+ (0 != (read_radio_reg(pi, RADIO_2064_REG05C) & 0x20))
+
+#define tempsense_done(pi) \
+ (0x8000 == (read_phy_reg(pi, 0x476) & 0x8000))
-#define wlc_radio_2064_rcal_done(pi) (0 != (read_radio_reg(pi, RADIO_2064_REG05C) & 0x20))
-#define tempsense_done(pi) (0x8000 == (read_phy_reg(pi, 0x476) & 0x8000))
+#define LCNPHY_IQLOCC_READ(val) \
+ ((u8)(-(s8)(((val) & 0xf0) >> 4) + (s8)((val) & 0x0f)))
-#define LCNPHY_IQLOCC_READ(val) ((u8)(-(s8)(((val) & 0xf0) >> 4) + (s8)((val) & 0x0f)))
#define FIXED_TXPWR 78
#define LCNPHY_TEMPSENSE(val) ((s16)((val > 255) ? (val - 512) : val))
-static u32 wlc_lcnphy_qdiv_roundup(u32 divident, u32 divisor,
- u8 precision);
-static void wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
- u16 ext_lna, u16 trsw,
- u16 biq2, u16 biq1,
- u16 tia, u16 lna2,
- u16 lna1);
-static void wlc_lcnphy_clear_tx_power_offsets(struct brcms_phy *pi);
-static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain);
-static void wlc_lcnphy_set_trsw_override(struct brcms_phy *pi, bool tx,
- bool rx);
-static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0);
-static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi);
-static void wlc_lcnphy_get_tx_gain(struct brcms_phy *pi,
- struct lcnphy_txgains *gains);
-static void wlc_lcnphy_set_tx_gain_override(struct brcms_phy *pi, bool bEnable);
-static void wlc_lcnphy_toggle_afe_pwdn(struct brcms_phy *pi);
-static void wlc_lcnphy_rx_gain_override_enable(struct brcms_phy *pi,
- bool enable);
-static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
- struct lcnphy_txgains *target_gains);
-static bool wlc_lcnphy_rx_iq_est(struct brcms_phy *pi, u16 num_samps,
- u8 wait_time, struct lcnphy_iq_est *iq_est);
-static bool wlc_lcnphy_calc_rx_iq_comp(struct brcms_phy *pi, u16 num_samps);
-static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi);
-static void wlc_lcnphy_afe_clk_init(struct brcms_phy *pi, u8 mode);
-static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi);
-static void wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi,
- u8 channel);
-
-static void wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
- const struct lcnphy_tx_gain_tbl_entry *g);
-
-static void wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo,
- u16 thresh, s16 *ptr, int mode);
-static int wlc_lcnphy_calc_floor(s16 coeff, int type);
-static void wlc_lcnphy_tx_iqlo_loopback(struct brcms_phy *pi,
- u16 *values_to_save);
-static void wlc_lcnphy_tx_iqlo_loopback_cleanup(struct brcms_phy *pi,
- u16 *values_to_save);
-static void wlc_lcnphy_set_cc(struct brcms_phy *pi, int cal_type, s16 coeff_x,
- s16 coeff_y);
-static struct lcnphy_unsign16_struct wlc_lcnphy_get_cc(struct brcms_phy *pi,
- int cal_type);
-static void wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type,
- int num_levels, int step_size_lg2);
-static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi);
-
-static void wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi,
- chanspec_t chanspec);
-static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi);
-static void wlc_lcnphy_temp_adj(struct brcms_phy *pi);
-static void wlc_lcnphy_clear_papd_comptable(struct brcms_phy *pi);
-static void wlc_lcnphy_baseband_init(struct brcms_phy *pi);
-static void wlc_lcnphy_radio_init(struct brcms_phy *pi);
-static void wlc_lcnphy_rc_cal(struct brcms_phy *pi);
-static void wlc_lcnphy_rcal(struct brcms_phy *pi);
-static void wlc_lcnphy_txrx_spur_avoidance_mode(struct brcms_phy *pi,
- bool enable);
-static int wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm,
- s16 filt_type);
-static void wlc_lcnphy_set_rx_iq_comp(struct brcms_phy *pi, u16 a, u16 b);
-
void wlc_lcnphy_write_table(struct brcms_phy *pi, const struct phytbl_info *pti)
{
wlc_phy_write_table(pi, pti, 0x455, 0x457, 0x456);
@@ -1034,7 +986,7 @@ void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti)
static void
wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id,
- const void *tbl_ptr, u32 tbl_len,
+ const u16 *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
struct phytbl_info tab;
@@ -1048,7 +1000,7 @@ wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id,
static void
wlc_lcnphy_common_write_table(struct brcms_phy *pi, u32 tbl_id,
- const void *tbl_ptr, u32 tbl_len,
+ const u16 *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
@@ -1092,12 +1044,12 @@ static int wlc_lcnphy_calc_floor(s16 coeff_x, int type)
int k;
k = 0;
if (type == 0) {
- if (coeff_x < 0) {
+ if (coeff_x < 0)
k = (coeff_x - 1) / 2;
- } else {
+ else
k = coeff_x / 2;
- }
}
+
if (type == 1) {
if ((coeff_x + 1) < 0)
k = (coeff_x) / 2;
@@ -1107,20 +1059,265 @@ static int wlc_lcnphy_calc_floor(s16 coeff_x, int type)
return k;
}
-s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi)
+static void
+wlc_lcnphy_get_tx_gain(struct brcms_phy *pi, struct lcnphy_txgains *gains)
{
- s8 index;
+ u16 dac_gain, rfgain0, rfgain1;
+
+ dac_gain = read_phy_reg(pi, 0x439) >> 0;
+ gains->dac_gain = (dac_gain & 0x380) >> 7;
+
+ rfgain0 = (read_phy_reg(pi, 0x4b5) & (0xffff << 0)) >> 0;
+ rfgain1 = (read_phy_reg(pi, 0x4fb) & (0x7fff << 0)) >> 0;
+
+ gains->gm_gain = rfgain0 & 0xff;
+ gains->pga_gain = (rfgain0 >> 8) & 0xff;
+ gains->pad_gain = rfgain1 & 0xff;
+}
+
+
+static void wlc_lcnphy_set_dac_gain(struct brcms_phy *pi, u16 dac_gain)
+{
+ u16 dac_ctrl;
+
+ dac_ctrl = (read_phy_reg(pi, 0x439) >> 0);
+ dac_ctrl = dac_ctrl & 0xc7f;
+ dac_ctrl = dac_ctrl | (dac_gain << 7);
+ mod_phy_reg(pi, 0x439, (0xfff << 0), (dac_ctrl) << 0);
+
+}
+
+static void wlc_lcnphy_set_tx_gain_override(struct brcms_phy *pi, bool bEnable)
+{
+ u16 bit = bEnable ? 1 : 0;
+
+ mod_phy_reg(pi, 0x4b0, (0x1 << 7), bit << 7);
+
+ mod_phy_reg(pi, 0x4b0, (0x1 << 14), bit << 14);
+
+ mod_phy_reg(pi, 0x43b, (0x1 << 6), bit << 6);
+}
+
+static void
+wlc_lcnphy_rx_gain_override_enable(struct brcms_phy *pi, bool enable)
+{
+ u16 ebit = enable ? 1 : 0;
+
+ mod_phy_reg(pi, 0x4b0, (0x1 << 8), ebit << 8);
+
+ mod_phy_reg(pi, 0x44c, (0x1 << 0), ebit << 0);
+
+ if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
+ mod_phy_reg(pi, 0x44c, (0x1 << 4), ebit << 4);
+ mod_phy_reg(pi, 0x44c, (0x1 << 6), ebit << 6);
+ mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5);
+ mod_phy_reg(pi, 0x4b0, (0x1 << 6), ebit << 6);
+ } else {
+ mod_phy_reg(pi, 0x4b0, (0x1 << 12), ebit << 12);
+ mod_phy_reg(pi, 0x4b0, (0x1 << 13), ebit << 13);
+ mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5);
+ }
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ mod_phy_reg(pi, 0x4b0, (0x1 << 10), ebit << 10);
+ mod_phy_reg(pi, 0x4e5, (0x1 << 3), ebit << 3);
+ }
+}
+
+static void
+wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
+ u16 trsw,
+ u16 ext_lna,
+ u16 biq2,
+ u16 biq1,
+ u16 tia, u16 lna2, u16 lna1)
+{
+ u16 gain0_15, gain16_19;
+
+ gain16_19 = biq2 & 0xf;
+ gain0_15 = ((biq1 & 0xf) << 12) |
+ ((tia & 0xf) << 8) |
+ ((lna2 & 0x3) << 6) |
+ ((lna2 &
+ 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
+
+ mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
+ mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
+ mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
+
+ if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
+ mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
+ mod_phy_reg(pi, 0x4b1, (0x1 << 10), ext_lna << 10);
+ } else {
+ mod_phy_reg(pi, 0x4b1, (0x1 << 10), 0 << 10);
+
+ mod_phy_reg(pi, 0x4b1, (0x1 << 15), 0 << 15);
+
+ mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
+ }
+
+ mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
+
+}
+
+static void wlc_lcnphy_set_trsw_override(struct brcms_phy *pi, bool tx, bool rx)
+{
+
+ mod_phy_reg(pi, 0x44d,
+ (0x1 << 1) |
+ (0x1 << 0), (tx ? (0x1 << 1) : 0) | (rx ? (0x1 << 0) : 0));
+
+ or_phy_reg(pi, 0x44c, (0x1 << 1) | (0x1 << 0));
+}
+
+static void wlc_lcnphy_clear_trsw_override(struct brcms_phy *pi)
+{
+
+ and_phy_reg(pi, 0x44c, (u16) ~((0x1 << 1) | (0x1 << 0)));
+}
+
+static void wlc_lcnphy_set_rx_iq_comp(struct brcms_phy *pi, u16 a, u16 b)
+{
+ mod_phy_reg(pi, 0x645, (0x3ff << 0), (a) << 0);
+
+ mod_phy_reg(pi, 0x646, (0x3ff << 0), (b) << 0);
+
+ mod_phy_reg(pi, 0x647, (0x3ff << 0), (a) << 0);
+
+ mod_phy_reg(pi, 0x648, (0x3ff << 0), (b) << 0);
+
+ mod_phy_reg(pi, 0x649, (0x3ff << 0), (a) << 0);
+
+ mod_phy_reg(pi, 0x64a, (0x3ff << 0), (b) << 0);
+
+}
+
+static bool
+wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
+ u16 num_samps,
+ u8 wait_time, struct lcnphy_iq_est *iq_est)
+{
+ int wait_count = 0;
+ bool result = true;
+ u8 phybw40;
+ phybw40 = CHSPEC_IS40(pi->radio_chanspec);
+
+ mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5);
+
+ mod_phy_reg(pi, 0x410, (0x1 << 3), (0) << 3);
+
+ mod_phy_reg(pi, 0x482, (0xffff << 0), (num_samps) << 0);
+
+ mod_phy_reg(pi, 0x481, (0xff << 0), ((u16) wait_time) << 0);
+
+ mod_phy_reg(pi, 0x481, (0x1 << 8), (0) << 8);
+
+ mod_phy_reg(pi, 0x481, (0x1 << 9), (1) << 9);
+
+ while (read_phy_reg(pi, 0x481) & (0x1 << 9)) {
+
+ if (wait_count > (10 * 500)) {
+ result = false;
+ goto cleanup;
+ }
+ udelay(100);
+ wait_count++;
+ }
+
+ iq_est->iq_prod = ((u32) read_phy_reg(pi, 0x483) << 16) |
+ (u32) read_phy_reg(pi, 0x484);
+ iq_est->i_pwr = ((u32) read_phy_reg(pi, 0x485) << 16) |
+ (u32) read_phy_reg(pi, 0x486);
+ iq_est->q_pwr = ((u32) read_phy_reg(pi, 0x487) << 16) |
+ (u32) read_phy_reg(pi, 0x488);
+
+cleanup:
+ mod_phy_reg(pi, 0x410, (0x1 << 3), (1) << 3);
+
+ mod_phy_reg(pi, 0x6da, (0x1 << 5), (0) << 5);
+
+ return result;
+}
+
+static bool wlc_lcnphy_calc_rx_iq_comp(struct brcms_phy *pi, u16 num_samps)
+{
+#define LCNPHY_MIN_RXIQ_PWR 2
+ bool result;
+ u16 a0_new, b0_new;
+ struct lcnphy_iq_est iq_est = { 0, 0, 0 };
+ s32 a, b, temp;
+ s16 iq_nbits, qq_nbits, arsh, brsh;
+ s32 iq;
+ u32 ii, qq;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- if (txpwrctrl_off(pi))
- index = pi_lcn->lcnphy_current_index;
- else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
- index =
- (s8) (wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(pi)
- / 2);
- else
- index = pi_lcn->lcnphy_current_index;
- return index;
+ a0_new = ((read_phy_reg(pi, 0x645) & (0x3ff << 0)) >> 0);
+ b0_new = ((read_phy_reg(pi, 0x646) & (0x3ff << 0)) >> 0);
+ mod_phy_reg(pi, 0x6d1, (0x1 << 2), (0) << 2);
+
+ mod_phy_reg(pi, 0x64b, (0x1 << 6), (1) << 6);
+
+ wlc_lcnphy_set_rx_iq_comp(pi, 0, 0);
+
+ result = wlc_lcnphy_rx_iq_est(pi, num_samps, 32, &iq_est);
+ if (!result)
+ goto cleanup;
+
+ iq = (s32) iq_est.iq_prod;
+ ii = iq_est.i_pwr;
+ qq = iq_est.q_pwr;
+
+ if ((ii + qq) < LCNPHY_MIN_RXIQ_PWR) {
+ result = false;
+ goto cleanup;
+ }
+
+ iq_nbits = wlc_phy_nbits(iq);
+ qq_nbits = wlc_phy_nbits(qq);
+
+ arsh = 10 - (30 - iq_nbits);
+ if (arsh >= 0) {
+ a = (-(iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ temp = (s32) (ii >> arsh);
+ if (temp == 0)
+ return false;
+ } else {
+ a = (-(iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ temp = (s32) (ii << -arsh);
+ if (temp == 0)
+ return false;
+ }
+ a /= temp;
+ brsh = qq_nbits - 31 + 20;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ temp = (s32) (ii >> brsh);
+ if (temp == 0)
+ return false;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ temp = (s32) (ii << -brsh);
+ if (temp == 0)
+ return false;
+ }
+ b /= temp;
+ b -= a * a;
+ b = (s32) int_sqrt((unsigned long) b);
+ b -= (1 << 10);
+ a0_new = (u16) (a & 0x3ff);
+ b0_new = (u16) (b & 0x3ff);
+cleanup:
+
+ wlc_lcnphy_set_rx_iq_comp(pi, a0_new, b0_new);
+
+ mod_phy_reg(pi, 0x64b, (0x1 << 0), (1) << 0);
+
+ mod_phy_reg(pi, 0x64b, (0x1 << 3), (1) << 3);
+
+ pi_lcn->lcnphy_cal_results.rxiqcal_coeff_a0 = a0_new;
+ pi_lcn->lcnphy_cal_results.rxiqcal_coeff_b0 = b0_new;
+
+ return result;
}
static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
@@ -1132,6 +1329,187 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
+static bool
+wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
+ const struct lcnphy_rx_iqcomp *iqcomp,
+ int iqcomp_sz, bool tx_switch, bool rx_switch, int module,
+ int tx_gain_idx)
+{
+ struct lcnphy_txgains old_gains;
+ u16 tx_pwr_ctrl;
+ u8 tx_gain_index_old = 0;
+ bool result = false, tx_gain_override_old = false;
+ u16 i, Core1TxControl_old, RFOverride0_old,
+ RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
+ rfoverride3_old, rfoverride3val_old, rfoverride4_old,
+ rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
+ int tia_gain;
+ u32 received_power, rx_pwr_threshold;
+ u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
+ u16 values_to_save[11];
+ s16 *ptr;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+
+ ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
+ if (NULL == ptr)
+ return false;
+ if (module == 2) {
+ while (iqcomp_sz--) {
+ if (iqcomp[iqcomp_sz].chan ==
+ CHSPEC_CHANNEL(pi->radio_chanspec)) {
+ wlc_lcnphy_set_rx_iq_comp(pi,
+ (u16)
+ iqcomp[iqcomp_sz].a,
+ (u16)
+ iqcomp[iqcomp_sz].b);
+ result = true;
+ break;
+ }
+ }
+ goto cal_done;
+ }
+
+ if (module == 1) {
+
+ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+
+ for (i = 0; i < 11; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+ Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+ or_phy_reg(pi, 0x631, 0x0015);
+
+ RFOverride0_old = read_phy_reg(pi, 0x44c);
+ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+ rfoverride2_old = read_phy_reg(pi, 0x4b0);
+ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+ rfoverride3_old = read_phy_reg(pi, 0x4f9);
+ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+ rfoverride4_old = read_phy_reg(pi, 0x938);
+ rfoverride4val_old = read_phy_reg(pi, 0x939);
+ afectrlovr_old = read_phy_reg(pi, 0x43b);
+ afectrlovrval_old = read_phy_reg(pi, 0x43c);
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
+
+ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+ if (tx_gain_override_old) {
+ wlc_lcnphy_get_tx_gain(pi, &old_gains);
+ tx_gain_index_old = pi_lcn->lcnphy_current_index;
+ }
+
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+
+ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+
+ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+ wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+ wlc_lcnphy_rx_gain_override_enable(pi, true);
+
+ tia_gain = 8;
+ rx_pwr_threshold = 950;
+ while (tia_gain > 0) {
+ tia_gain -= 1;
+ wlc_lcnphy_set_rx_gain_by_distribution(pi,
+ 0, 0, 2, 2,
+ (u16)
+ tia_gain, 1, 0);
+ udelay(500);
+
+ received_power =
+ wlc_lcnphy_measure_digital_power(pi, 2000);
+ if (received_power < rx_pwr_threshold)
+ break;
+ }
+ result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
+
+ wlc_lcnphy_stop_tx_tone(pi);
+
+ write_phy_reg(pi, 0x631, Core1TxControl_old);
+
+ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x4b0, rfoverride2_old);
+ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+ write_phy_reg(pi, 0x4f9, rfoverride3_old);
+ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+ write_phy_reg(pi, 0x938, rfoverride4_old);
+ write_phy_reg(pi, 0x939, rfoverride4val_old);
+ write_phy_reg(pi, 0x43b, afectrlovr_old);
+ write_phy_reg(pi, 0x43c, afectrlovrval_old);
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+
+ wlc_lcnphy_clear_trsw_override(pi);
+
+ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+
+ for (i = 0; i < 11; i++)
+ write_radio_reg(pi, rxiq_cal_rf_reg[i],
+ values_to_save[i]);
+
+ if (tx_gain_override_old)
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+ else
+ wlc_lcnphy_disable_tx_gain_override(pi);
+
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+ wlc_lcnphy_rx_gain_override_enable(pi, false);
+ }
+
+cal_done:
+ kfree(ptr);
+ return result;
+}
+
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi)
+{
+ s8 index;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+
+ if (txpwrctrl_off(pi))
+ index = pi_lcn->lcnphy_current_index;
+ else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
+ index = (s8) (wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(
+ pi) / 2);
+ else
+ index = pi_lcn->lcnphy_current_index;
+ return index;
+}
+
void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel)
{
u16 afectrlovr, afectrlovrval;
@@ -1200,7 +1578,290 @@ wlc_lcnphy_txrx_spur_avoidance_mode(struct brcms_phy *pi, bool enable)
wlapi_switch_macfreq(pi->sh->physhim, enable);
}
-void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, chanspec_t chanspec)
+static void
+wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
+{
+ u8 channel = CHSPEC_CHANNEL(chanspec);
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+
+ if (channel == 14)
+ mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
+ else
+ mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
+
+ pi_lcn->lcnphy_bandedge_corr = 2;
+ if (channel == 1)
+ pi_lcn->lcnphy_bandedge_corr = 4;
+
+ if (channel == 1 || channel == 2 || channel == 3 ||
+ channel == 4 || channel == 9 ||
+ channel == 10 || channel == 11 || channel == 12) {
+ si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03000c04);
+ si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x0);
+ si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x200005c0);
+
+ si_pmu_pllupd(pi->sh->sih);
+ write_phy_reg(pi, 0x942, 0);
+ wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
+ pi_lcn->lcnphy_spurmod = 0;
+ mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
+
+ write_phy_reg(pi, 0x425, 0x5907);
+ } else {
+ si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03140c04);
+ si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x333333);
+ si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x202c2820);
+
+ si_pmu_pllupd(pi->sh->sih);
+ write_phy_reg(pi, 0x942, 0);
+ wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
+
+ pi_lcn->lcnphy_spurmod = 0;
+ mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
+
+ write_phy_reg(pi, 0x425, 0x590a);
+ }
+
+ or_phy_reg(pi, 0x44a, 0x44);
+ write_phy_reg(pi, 0x44a, 0x80);
+}
+
+static void
+wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
+{
+ uint i;
+ const struct chan_info_2064_lcnphy *ci;
+ u8 rfpll_doubler = 0;
+ u8 pll_pwrup, pll_pwrup_ovr;
+ s32 qFxtal, qFref, qFvco, qFcal;
+ u8 d15, d16, f16, e44, e45;
+ u32 div_int, div_frac, fvco3, fpfd, fref3, fcal_div;
+ u16 loop_bw, d30, setCount;
+
+ u8 h29, h28_ten, e30, h30_ten, cp_current;
+ u16 g30, d28;
+
+ ci = &chan_info_2064_lcnphy[0];
+ rfpll_doubler = 1;
+
+ mod_radio_reg(pi, RADIO_2064_REG09D, 0x4, 0x1 << 2);
+
+ write_radio_reg(pi, RADIO_2064_REG09E, 0xf);
+ if (!rfpll_doubler) {
+ loop_bw = PLL_2064_LOOP_BW;
+ d30 = PLL_2064_D30;
+ } else {
+ loop_bw = PLL_2064_LOOP_BW_DOUBLER;
+ d30 = PLL_2064_D30_DOUBLER;
+ }
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ for (i = 0; i < ARRAY_SIZE(chan_info_2064_lcnphy); i++)
+ if (chan_info_2064_lcnphy[i].chan == channel)
+ break;
+
+ if (i >= ARRAY_SIZE(chan_info_2064_lcnphy))
+ return;
+
+ ci = &chan_info_2064_lcnphy[i];
+ }
+
+ write_radio_reg(pi, RADIO_2064_REG02A, ci->logen_buftune);
+
+ mod_radio_reg(pi, RADIO_2064_REG030, 0x3, ci->logen_rccr_tx);
+
+ mod_radio_reg(pi, RADIO_2064_REG091, 0x3, ci->txrf_mix_tune_ctrl);
+
+ mod_radio_reg(pi, RADIO_2064_REG038, 0xf, ci->pa_input_tune_g);
+
+ mod_radio_reg(pi, RADIO_2064_REG030, 0x3 << 2,
+ (ci->logen_rccr_rx) << 2);
+
+ mod_radio_reg(pi, RADIO_2064_REG05E, 0xf, ci->pa_rxrf_lna1_freq_tune);
+
+ mod_radio_reg(pi, RADIO_2064_REG05E, (0xf) << 4,
+ (ci->pa_rxrf_lna2_freq_tune) << 4);
+
+ write_radio_reg(pi, RADIO_2064_REG06C, ci->rxrf_rxrf_spare1);
+
+ pll_pwrup = (u8) read_radio_reg(pi, RADIO_2064_REG044);
+ pll_pwrup_ovr = (u8) read_radio_reg(pi, RADIO_2064_REG12B);
+
+ or_radio_reg(pi, RADIO_2064_REG044, 0x07);
+
+ or_radio_reg(pi, RADIO_2064_REG12B, (0x07) << 1);
+ e44 = 0;
+ e45 = 0;
+
+ fpfd = rfpll_doubler ? (pi->xtalfreq << 1) : (pi->xtalfreq);
+ if (pi->xtalfreq > 26000000)
+ e44 = 1;
+ if (pi->xtalfreq > 52000000)
+ e45 = 1;
+ if (e44 == 0)
+ fcal_div = 1;
+ else if (e45 == 0)
+ fcal_div = 2;
+ else
+ fcal_div = 4;
+ fvco3 = (ci->freq * 3);
+ fref3 = 2 * fpfd;
+
+ qFxtal = wlc_lcnphy_qdiv_roundup(pi->xtalfreq, PLL_2064_MHZ, 16);
+ qFref = wlc_lcnphy_qdiv_roundup(fpfd, PLL_2064_MHZ, 16);
+ qFcal = pi->xtalfreq * fcal_div / PLL_2064_MHZ;
+ qFvco = wlc_lcnphy_qdiv_roundup(fvco3, 2, 16);
+
+ write_radio_reg(pi, RADIO_2064_REG04F, 0x02);
+
+ d15 = (pi->xtalfreq * fcal_div * 4 / 5) / PLL_2064_MHZ - 1;
+ write_radio_reg(pi, RADIO_2064_REG052, (0x07 & (d15 >> 2)));
+ write_radio_reg(pi, RADIO_2064_REG053, (d15 & 0x3) << 5);
+
+ d16 = (qFcal * 8 / (d15 + 1)) - 1;
+ write_radio_reg(pi, RADIO_2064_REG051, d16);
+
+ f16 = ((d16 + 1) * (d15 + 1)) / qFcal;
+ setCount = f16 * 3 * (ci->freq) / 32 - 1;
+ mod_radio_reg(pi, RADIO_2064_REG053, (0x0f << 0),
+ (u8) (setCount >> 8));
+
+ or_radio_reg(pi, RADIO_2064_REG053, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG054, (u8) (setCount & 0xff));
+
+ div_int = ((fvco3 * (PLL_2064_MHZ >> 4)) / fref3) << 4;
+
+ div_frac = ((fvco3 * (PLL_2064_MHZ >> 4)) % fref3) << 4;
+ while (div_frac >= fref3) {
+ div_int++;
+ div_frac -= fref3;
+ }
+ div_frac = wlc_lcnphy_qdiv_roundup(div_frac, fref3, 20);
+
+ mod_radio_reg(pi, RADIO_2064_REG045, (0x1f << 0),
+ (u8) (div_int >> 4));
+ mod_radio_reg(pi, RADIO_2064_REG046, (0x1f << 4),
+ (u8) (div_int << 4));
+ mod_radio_reg(pi, RADIO_2064_REG046, (0x0f << 0),
+ (u8) (div_frac >> 16));
+ write_radio_reg(pi, RADIO_2064_REG047, (u8) (div_frac >> 8) & 0xff);
+ write_radio_reg(pi, RADIO_2064_REG048, (u8) div_frac & 0xff);
+
+ write_radio_reg(pi, RADIO_2064_REG040, 0xfb);
+
+ write_radio_reg(pi, RADIO_2064_REG041, 0x9A);
+ write_radio_reg(pi, RADIO_2064_REG042, 0xA3);
+ write_radio_reg(pi, RADIO_2064_REG043, 0x0C);
+
+ h29 = LCN_BW_LMT / loop_bw;
+ d28 = (((PLL_2064_HIGH_END_KVCO - PLL_2064_LOW_END_KVCO) *
+ (fvco3 / 2 - PLL_2064_LOW_END_VCO)) /
+ (PLL_2064_HIGH_END_VCO - PLL_2064_LOW_END_VCO))
+ + PLL_2064_LOW_END_KVCO;
+ h28_ten = (d28 * 10) / LCN_VCO_DIV;
+ e30 = (d30 - LCN_OFFSET) / LCN_FACT;
+ g30 = LCN_OFFSET + (e30 * LCN_FACT);
+ h30_ten = (g30 * 10) / LCN_CUR_DIV;
+ cp_current = ((LCN_CUR_LMT * h29 * LCN_MULT * 100) / h28_ten) / h30_ten;
+ mod_radio_reg(pi, RADIO_2064_REG03C, 0x3f, cp_current);
+
+ if (channel >= 1 && channel <= 5)
+ write_radio_reg(pi, RADIO_2064_REG03C, 0x8);
+ else
+ write_radio_reg(pi, RADIO_2064_REG03C, 0x7);
+ write_radio_reg(pi, RADIO_2064_REG03D, 0x3);
+
+ mod_radio_reg(pi, RADIO_2064_REG044, 0x0c, 0x0c);
+ udelay(1);
+
+ wlc_2064_vco_cal(pi);
+
+ write_radio_reg(pi, RADIO_2064_REG044, pll_pwrup);
+ write_radio_reg(pi, RADIO_2064_REG12B, pll_pwrup_ovr);
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+ write_radio_reg(pi, RADIO_2064_REG038, 3);
+ write_radio_reg(pi, RADIO_2064_REG091, 7);
+ }
+}
+
+static int
+wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm, s16 filt_type)
+{
+ s16 filt_index = -1;
+ int j;
+
+ u16 addr[] = {
+ 0x910,
+ 0x91e,
+ 0x91f,
+ 0x924,
+ 0x925,
+ 0x926,
+ 0x920,
+ 0x921,
+ 0x927,
+ 0x928,
+ 0x929,
+ 0x922,
+ 0x923,
+ 0x930,
+ 0x931,
+ 0x932
+ };
+
+ u16 addr_ofdm[] = {
+ 0x90f,
+ 0x900,
+ 0x901,
+ 0x906,
+ 0x907,
+ 0x908,
+ 0x902,
+ 0x903,
+ 0x909,
+ 0x90a,
+ 0x90b,
+ 0x904,
+ 0x905,
+ 0x90c,
+ 0x90d,
+ 0x90e
+ };
+
+ if (!is_ofdm) {
+ for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_CCK; j++) {
+ if (filt_type == LCNPHY_txdigfiltcoeffs_cck[j][0]) {
+ filt_index = (s16) j;
+ break;
+ }
+ }
+
+ if (filt_index != -1) {
+ for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, addr[j],
+ LCNPHY_txdigfiltcoeffs_cck
+ [filt_index][j + 1]);
+ }
+ } else {
+ for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_OFDM; j++) {
+ if (filt_type == LCNPHY_txdigfiltcoeffs_ofdm[j][0]) {
+ filt_index = (s16) j;
+ break;
+ }
+ }
+
+ if (filt_index != -1) {
+ for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, addr_ofdm[j],
+ LCNPHY_txdigfiltcoeffs_ofdm
+ [filt_index][j + 1]);
+ }
+ }
+
+ return (filt_index != -1) ? 0 : -1;
+}
+
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
{
u8 channel = CHSPEC_CHANNEL(chanspec);
@@ -1211,10 +1872,8 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, chanspec_t chanspec)
or_phy_reg(pi, 0x44a, 0x44);
write_phy_reg(pi, 0x44a, 0x80);
- if (!NORADIO_ENAB(pi->pubpi)) {
- wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
- udelay(1000);
- }
+ wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
+ udelay(1000);
wlc_lcnphy_toggle_afe_pwdn(pi);
@@ -1237,35 +1896,13 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, chanspec_t chanspec)
}
-static void wlc_lcnphy_set_dac_gain(struct brcms_phy *pi, u16 dac_gain)
-{
- u16 dac_ctrl;
-
- dac_ctrl = (read_phy_reg(pi, 0x439) >> 0);
- dac_ctrl = dac_ctrl & 0xc7f;
- dac_ctrl = dac_ctrl | (dac_gain << 7);
- mod_phy_reg(pi, 0x439, (0xfff << 0), (dac_ctrl) << 0);
-
-}
-
-static void wlc_lcnphy_set_tx_gain_override(struct brcms_phy *pi, bool bEnable)
-{
- u16 bit = bEnable ? 1 : 0;
-
- mod_phy_reg(pi, 0x4b0, (0x1 << 7), bit << 7);
-
- mod_phy_reg(pi, 0x4b0, (0x1 << 14), bit << 14);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 6), bit << 6);
-}
-
static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi)
{
u16 pa_gain;
pa_gain = (read_phy_reg(pi, 0x4fb) &
LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK) >>
- LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT;
+ LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT;
return pa_gain;
}
@@ -1275,18 +1912,22 @@ static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
{
u16 pa_gain = wlc_lcnphy_get_pa_gain(pi);
- mod_phy_reg(pi, 0x4b5,
- (0xffff << 0),
- ((target_gains->gm_gain) | (target_gains->pga_gain << 8)) <<
- 0);
+ mod_phy_reg(
+ pi, 0x4b5,
+ (0xffff << 0),
+ ((target_gains->gm_gain) |
+ (target_gains->pga_gain << 8)) <<
+ 0);
mod_phy_reg(pi, 0x4fb,
(0x7fff << 0),
((target_gains->pad_gain) | (pa_gain << 8)) << 0);
- mod_phy_reg(pi, 0x4fc,
- (0xffff << 0),
- ((target_gains->gm_gain) | (target_gains->pga_gain << 8)) <<
- 0);
+ mod_phy_reg(
+ pi, 0x4fc,
+ (0xffff << 0),
+ ((target_gains->gm_gain) |
+ (target_gains->pga_gain << 8)) <<
+ 0);
mod_phy_reg(pi, 0x4fd,
(0x7fff << 0),
((target_gains->pad_gain) | (pa_gain << 8)) << 0);
@@ -1404,8 +2045,8 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
u16 auxpga_vmid, auxpga_vmid_temp, auxpga_gain_temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- auxpga_vmid =
- (2 << 8) | (pi_lcn->lcnphy_rssi_vc << 4) | pi_lcn->lcnphy_rssi_vf;
+ auxpga_vmid = (2 << 8) |
+ (pi_lcn->lcnphy_rssi_vc << 4) | pi_lcn->lcnphy_rssi_vf;
auxpga_vmid_temp = (2 << 8) | (8 << 4) | 4;
auxpga_gain_temp = 2;
@@ -1531,10 +2172,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_radio_reg(pi, RADIO_2064_REG005, 0x8, 1 << 3);
- if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
+ if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
mod_phy_reg(pi, 0x4d7,
(0x1 << 3) | (0x7 << 12), 0 << 3 | 2 << 12);
- }
rfseq = wlc_lcnphy_rfseq_tbl_adc_pwrup(pi);
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
@@ -1601,7 +2241,7 @@ void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rate_table[BRCMS_NUM_RATES_CCK + BRCMS_NUM_RATES_OFDM +
- BRCMS_NUM_RATES_MCS_1_STREAM];
+ BRCMS_NUM_RATES_MCS_1_STREAM];
uint i, j;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return;
@@ -1694,34 +2334,28 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
index = FIXED_TXPWR;
- if (NORADIO_ENAB(pi->pubpi))
+ if (pi_lcn->lcnphy_tempsense_slope == 0)
return index;
- if (pi_lcn->lcnphy_tempsense_slope == 0) {
- return index;
- }
temp = (u16) wlc_lcnphy_tempsense(pi, 0);
meas_temp = LCNPHY_TEMPSENSE(temp);
- if (pi->tx_power_min != 0) {
+ if (pi->tx_power_min != 0)
delta_brd = (pi_lcn->lcnphy_measPower - pi->tx_power_min);
- } else {
+ else
delta_brd = 0;
- }
manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
temp_diff = manp - meas_temp;
if (temp_diff < 0) {
-
neg = 1;
-
temp_diff = -temp_diff;
}
delta_temp = (s8) wlc_lcnphy_qdiv_roundup((u32) (temp_diff * 192),
- (u32) (pi_lcn->
- lcnphy_tempsense_slope
- * 10), 0);
+ (u32) (pi_lcn->
+ lcnphy_tempsense_slope
+ * 10), 0);
if (neg)
delta_temp = -delta_temp;
@@ -1735,14 +2369,15 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
tempcorrx = 4;
new_index =
- index + delta_brd + delta_temp - pi_lcn->lcnphy_bandedge_corr;
+ index + delta_brd + delta_temp - pi_lcn->lcnphy_bandedge_corr;
new_index += tempcorrx;
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
index = 127;
- if (new_index < 0 || new_index > 126) {
+
+ if (new_index < 0 || new_index > 126)
return index;
- }
+
return new_index;
}
@@ -1792,7 +2427,7 @@ void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode)
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 0);
pi_lcn->lcnphy_tssi_tx_cnt =
- wlc_lcnphy_total_tx_frames(pi);
+ wlc_lcnphy_total_tx_frames(pi);
wlc_lcnphy_disable_tx_gain_override(pi);
pi_lcn->lcnphy_tx_power_idx_override = -1;
@@ -1805,11 +2440,105 @@ void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode)
index = wlc_lcnphy_tempcompensated_txpwrctrl(pi);
wlc_lcnphy_set_tx_pwr_soft_ctrl(pi, index);
pi_lcn->lcnphy_current_index = (s8)
- ((read_phy_reg(pi, 0x4a9) & 0xFF) / 2);
+ ((read_phy_reg(pi,
+ 0x4a9) &
+ 0xFF) / 2);
}
}
}
+static void
+wlc_lcnphy_tx_iqlo_loopback(struct brcms_phy *pi, u16 *values_to_save)
+{
+ u16 vmid;
+ int i;
+ for (i = 0; i < 20; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, iqlo_loopback_rf_regs[i]);
+
+ mod_phy_reg(pi, 0x44c, (0x1 << 12), 1 << 12);
+ mod_phy_reg(pi, 0x44d, (0x1 << 14), 1 << 14);
+
+ mod_phy_reg(pi, 0x44c, (0x1 << 11), 1 << 11);
+ mod_phy_reg(pi, 0x44d, (0x1 << 13), 0 << 13);
+
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 2))
+ and_radio_reg(pi, RADIO_2064_REG03A, 0xFD);
+ else
+ and_radio_reg(pi, RADIO_2064_REG03A, 0xF9);
+ or_radio_reg(pi, RADIO_2064_REG11A, 0x1);
+
+ or_radio_reg(pi, RADIO_2064_REG036, 0x01);
+ or_radio_reg(pi, RADIO_2064_REG11A, 0x18);
+ udelay(20);
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0);
+ else
+ or_radio_reg(pi, RADIO_2064_REG03A, 1);
+ } else {
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ mod_radio_reg(pi, RADIO_2064_REG03A, 3, 1);
+ else
+ or_radio_reg(pi, RADIO_2064_REG03A, 0x3);
+ }
+
+ udelay(20);
+
+ write_radio_reg(pi, RADIO_2064_REG025, 0xF);
+ if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x4);
+ else
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x6);
+ } else {
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x4 << 1);
+ else
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x6 << 1);
+ }
+
+ udelay(20);
+
+ write_radio_reg(pi, RADIO_2064_REG005, 0x8);
+ or_radio_reg(pi, RADIO_2064_REG112, 0x80);
+ udelay(20);
+
+ or_radio_reg(pi, RADIO_2064_REG0FF, 0x10);
+ or_radio_reg(pi, RADIO_2064_REG11F, 0x44);
+ udelay(20);
+
+ or_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ or_radio_reg(pi, RADIO_2064_REG113, 0x10);
+ udelay(20);
+
+ write_radio_reg(pi, RADIO_2064_REG007, 0x1);
+ udelay(20);
+
+ vmid = 0x2A6;
+ mod_radio_reg(pi, RADIO_2064_REG0FC, 0x3 << 0, (vmid >> 8) & 0x3);
+ write_radio_reg(pi, RADIO_2064_REG0FD, (vmid & 0xff));
+ or_radio_reg(pi, RADIO_2064_REG11F, 0x44);
+ udelay(20);
+
+ or_radio_reg(pi, RADIO_2064_REG0FF, 0x10);
+ udelay(20);
+ write_radio_reg(pi, RADIO_2064_REG012, 0x02);
+ or_radio_reg(pi, RADIO_2064_REG112, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG036, 0x11);
+ write_radio_reg(pi, RADIO_2064_REG059, 0xcc);
+ write_radio_reg(pi, RADIO_2064_REG05C, 0x2e);
+ write_radio_reg(pi, RADIO_2064_REG078, 0xd7);
+ write_radio_reg(pi, RADIO_2064_REG092, 0x15);
+}
+
static bool wlc_lcnphy_iqcal_wait(struct brcms_phy *pi)
{
uint delay_count = 0;
@@ -1826,6 +2555,20 @@ static bool wlc_lcnphy_iqcal_wait(struct brcms_phy *pi)
}
static void
+wlc_lcnphy_tx_iqlo_loopback_cleanup(struct brcms_phy *pi, u16 *values_to_save)
+{
+ int i;
+
+ and_phy_reg(pi, 0x44c, 0x0 >> 11);
+
+ and_phy_reg(pi, 0x43b, 0xC);
+
+ for (i = 0; i < 20; i++)
+ write_radio_reg(pi, iqlo_loopback_rf_regs[i],
+ values_to_save[i]);
+}
+
+static void
wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
struct lcnphy_txgains *target_gains,
enum lcnphy_cal_mode cal_mode, bool keep_tone)
@@ -1837,20 +2580,23 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
int j;
u16 ncorr_override[5];
u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000, 0x0000
- };
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000};
u16 commands_fullcal[] = {
- 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234 };
+ 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234
+ };
u16 commands_recal[] = {
- 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234 };
+ 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234
+ };
u16 command_nums_fullcal[] = {
- 0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97 };
+ 0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97
+ };
u16 command_nums_recal[] = {
- 0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97 };
+ 0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97
+ };
u16 *command_nums = command_nums_fullcal;
u16 *start_coeffs = NULL, *cal_cmds = NULL, cal_type, diq_start;
@@ -1862,13 +2608,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
u16 *values_to_save;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
values_to_save = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
- if (NULL == values_to_save) {
+ if (NULL == values_to_save)
return;
- }
save_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
save_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
@@ -1927,7 +2669,7 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
}
hash = (target_gains->gm_gain << 8) |
- (target_gains->pga_gain << 4) | (target_gains->pad_gain);
+ (target_gains->pga_gain << 4) | (target_gains->pad_gain);
band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
@@ -1936,11 +2678,11 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
cal_gains.gm_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
+ tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
cal_gains.pga_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
+ tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
cal_gains.pad_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
+ tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
memcpy(ncorr_override,
&tbl_iqcal_gainparams_lcnphy[band_idx][j][3],
sizeof(ncorr_override));
@@ -1954,14 +2696,14 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
write_phy_reg(pi, 0x93d, 0xc0);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
- (const void *)
lcnphy_iqcal_loft_gainladder,
ARRAY_SIZE(lcnphy_iqcal_loft_gainladder),
16, 0);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
- (const void *)lcnphy_iqcal_ir_gainladder,
- ARRAY_SIZE(lcnphy_iqcal_ir_gainladder), 16,
+ lcnphy_iqcal_ir_gainladder,
+ ARRAY_SIZE(
+ lcnphy_iqcal_ir_gainladder), 16,
32);
if (pi->phy_tx_tone_freq) {
@@ -1985,13 +2727,12 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
command_num = command_nums[i];
if (ncorr_override[cal_type])
command_num =
- ncorr_override[cal_type] << 8 | (command_num &
- 0xff);
+ ncorr_override[cal_type] << 8 | (command_num &
+ 0xff);
write_phy_reg(pi, 0x452, command_num);
if ((cal_type == 3) || (cal_type == 4)) {
-
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&diq_start, 1, 16, 69);
@@ -2001,10 +2742,8 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
write_phy_reg(pi, 0x451, cal_cmds[i]);
- if (!wlc_lcnphy_iqcal_wait(pi)) {
-
+ if (!wlc_lcnphy_iqcal_wait(pi))
goto cleanup;
- }
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
best_coeffs,
@@ -2013,16 +2752,15 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
best_coeffs,
ARRAY_SIZE(best_coeffs), 16, 64);
- if ((cal_type == 3) || (cal_type == 4)) {
+ if ((cal_type == 3) || (cal_type == 4))
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&diq_start, 1, 16, 69);
- }
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs,
ARRAY_SIZE(pi_lcn->
- lcnphy_cal_results.
- txiqlocal_bestcoeffs),
+ lcnphy_cal_results.
+ txiqlocal_bestcoeffs),
16, 96);
}
@@ -2030,7 +2768,7 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs,
ARRAY_SIZE(pi_lcn->lcnphy_cal_results.
- txiqlocal_bestcoeffs), 16, 96);
+ txiqlocal_bestcoeffs), 16, 96);
pi_lcn->lcnphy_cal_results.txiqlocal_bestcoeffs_valid = true;
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
@@ -2041,7 +2779,7 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
&pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs[5], 2, 16, 85);
- cleanup:
+cleanup:
wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, values_to_save);
kfree(values_to_save);
@@ -2071,14 +2809,14 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
u16 SAVE_lpfgain = read_radio_reg(pi, RADIO_2064_REG112);
u16 SAVE_jtag_bb_afe_switch =
- read_radio_reg(pi, RADIO_2064_REG007) & 1;
+ read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
idleTssi = read_phy_reg(pi, 0x4ab);
suspend =
- (0 ==
- (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
- MCTL_EN_MAC));
+ (0 ==
+ (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2136,7 +2874,7 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
struct phytbl_info tab;
u32 val;
u8 save_reg007, save_reg0FF, save_reg11F, save_reg005, save_reg025,
- save_reg112;
+ save_reg112;
u16 values_to_save[14];
s8 index;
int i;
@@ -2152,8 +2890,7 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
for (i = 0; i < 14; i++)
values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -2236,7 +2973,7 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
auxpga_gain = 2;
}
auxpga_vmid =
- (u16) ((2 << 8) | (auxpga_vmidcourse << 4) | auxpga_vmidfine);
+ (u16) ((2 << 8) | (auxpga_vmidcourse << 4) | auxpga_vmidfine);
mod_phy_reg(pi, 0x4d8, (0x1 << 0), (1) << 0);
mod_phy_reg(pi, 0x4d8, (0x3ff << 2), (auxpga_vmid) << 2);
@@ -2280,17 +3017,10 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
struct brcms_phy *pi = (struct brcms_phy *) ppi;
suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
- if (NORADIO_ENAB(pi->pubpi)) {
- wlc_lcnphy_set_bbmult(pi, 0x30);
- if (!suspend)
- wlapi_enable_mac(pi->sh->physhim);
- return;
- }
-
if (!pi->hwpwrctrl_capable) {
if (CHSPEC_IS2G(pi->radio_chanspec)) {
tx_gains.gm_gain = 4;
@@ -2382,26 +3112,6 @@ wlc_lcnphy_get_radio_loft(struct brcms_phy *pi,
*fq0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08C));
}
-static void
-wlc_lcnphy_get_tx_gain(struct brcms_phy *pi, struct lcnphy_txgains *gains)
-{
- u16 dac_gain;
-
- dac_gain = read_phy_reg(pi, 0x439) >> 0;
- gains->dac_gain = (dac_gain & 0x380) >> 7;
-
- {
- u16 rfgain0, rfgain1;
-
- rfgain0 = (read_phy_reg(pi, 0x4b5) & (0xffff << 0)) >> 0;
- rfgain1 = (read_phy_reg(pi, 0x4fb) & (0x7fff << 0)) >> 0;
-
- gains->gm_gain = rfgain0 & 0xff;
- gains->pga_gain = (rfgain0 >> 8) & 0xff;
- gains->pad_gain = rfgain1 & 0xff;
- }
-}
-
void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b)
{
struct phytbl_info tab;
@@ -2489,16 +3199,6 @@ void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index)
}
}
-static void wlc_lcnphy_set_trsw_override(struct brcms_phy *pi, bool tx, bool rx)
-{
-
- mod_phy_reg(pi, 0x44d,
- (0x1 << 1) |
- (0x1 << 0), (tx ? (0x1 << 1) : 0) | (rx ? (0x1 << 0) : 0));
-
- or_phy_reg(pi, 0x44c, (0x1 << 1) | (0x1 << 0));
-}
-
static void wlc_lcnphy_clear_papd_comptable(struct brcms_phy *pi)
{
u32 j;
@@ -2518,67 +3218,6 @@ static void wlc_lcnphy_clear_papd_comptable(struct brcms_phy *pi)
return;
}
-static void
-wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
- u16 trsw,
- u16 ext_lna,
- u16 biq2,
- u16 biq1,
- u16 tia, u16 lna2, u16 lna1)
-{
- u16 gain0_15, gain16_19;
-
- gain16_19 = biq2 & 0xf;
- gain0_15 = ((biq1 & 0xf) << 12) |
- ((tia & 0xf) << 8) |
- ((lna2 & 0x3) << 6) |
- ((lna2 & 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
-
- mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
- mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
- mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
-
- if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
- mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
- mod_phy_reg(pi, 0x4b1, (0x1 << 10), ext_lna << 10);
- } else {
- mod_phy_reg(pi, 0x4b1, (0x1 << 10), 0 << 10);
-
- mod_phy_reg(pi, 0x4b1, (0x1 << 15), 0 << 15);
-
- mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
- }
-
- mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
-
-}
-
-static void
-wlc_lcnphy_rx_gain_override_enable(struct brcms_phy *pi, bool enable)
-{
- u16 ebit = enable ? 1 : 0;
-
- mod_phy_reg(pi, 0x4b0, (0x1 << 8), ebit << 8);
-
- mod_phy_reg(pi, 0x44c, (0x1 << 0), ebit << 0);
-
- if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
- mod_phy_reg(pi, 0x44c, (0x1 << 4), ebit << 4);
- mod_phy_reg(pi, 0x44c, (0x1 << 6), ebit << 6);
- mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5);
- mod_phy_reg(pi, 0x4b0, (0x1 << 6), ebit << 6);
- } else {
- mod_phy_reg(pi, 0x4b0, (0x1 << 12), ebit << 12);
- mod_phy_reg(pi, 0x4b0, (0x1 << 13), ebit << 13);
- mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5);
- }
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- mod_phy_reg(pi, 0x4b0, (0x1 << 10), ebit << 10);
- mod_phy_reg(pi, 0x4e5, (0x1 << 3), ebit << 3);
- }
-}
-
void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable)
{
if (!bEnable) {
@@ -2589,9 +3228,9 @@ void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable)
and_phy_reg(pi, 0x44c,
~(u16) ((0x1 << 3) |
- (0x1 << 5) |
- (0x1 << 12) |
- (0x1 << 0) | (0x1 << 1) | (0x1 << 2)));
+ (0x1 << 5) |
+ (0x1 << 12) |
+ (0x1 << 0) | (0x1 << 1) | (0x1 << 2)));
and_phy_reg(pi, 0x44d,
~(u16) ((0x1 << 3) | (0x1 << 5) | (0x1 << 14)));
@@ -2701,7 +3340,8 @@ void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode)
mod_phy_reg((pi), 0x410,
(0x1 << 6) |
(0x1 << 5),
- ((CHSPEC_IS2G(pi->radio_chanspec)) ? (!mode) : 0) <<
+ ((CHSPEC_IS2G(
+ pi->radio_chanspec)) ? (!mode) : 0) <<
6 | (!mode) << 5);
mod_phy_reg(pi, 0x410, (0x1 << 7), (mode) << 7);
}
@@ -2714,8 +3354,8 @@ wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
u8 phy_bw;
u16 num_samps, t, k;
u32 bw;
- fixed theta = 0, rot = 0;
- cs32 tone_samp;
+ s32 theta = 0, rot = 0;
+ struct cordic_iq tone_samp;
u32 data_buf[64];
u16 i_samp, q_samp;
struct phytbl_info tab;
@@ -2737,18 +3377,18 @@ wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
k = 1;
do {
bw = phy_bw * 1000 * k;
- num_samps = bw / ABS(f_kHz);
+ num_samps = bw / abs(f_kHz);
k++;
- } while ((num_samps * (u32) (ABS(f_kHz))) != bw);
+ } while ((num_samps * (u32) (abs(f_kHz))) != bw);
} else
num_samps = 2;
- rot = FIXED((f_kHz * 36) / phy_bw) / 100;
+ rot = ((f_kHz * 36) / phy_bw) / 100;
theta = 0;
for (t = 0; t < num_samps; t++) {
- wlc_phy_cordic(theta, &tone_samp);
+ tone_samp = cordic_calc_iq(theta);
theta += rot;
@@ -2802,10 +3442,359 @@ void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi)
wlc_lcnphy_deaf_mode(pi, false);
}
-static void wlc_lcnphy_clear_trsw_override(struct brcms_phy *pi)
+static void
+wlc_lcnphy_set_cc(struct brcms_phy *pi, int cal_type, s16 coeff_x, s16 coeff_y)
{
+ u16 di0dq0;
+ u16 x, y, data_rf;
+ int k;
+ switch (cal_type) {
+ case 0:
+ wlc_lcnphy_set_tx_iqcc(pi, coeff_x, coeff_y);
+ break;
+ case 2:
+ di0dq0 = (coeff_x & 0xff) << 8 | (coeff_y & 0xff);
+ wlc_lcnphy_set_tx_locc(pi, di0dq0);
+ break;
+ case 3:
+ k = wlc_lcnphy_calc_floor(coeff_x, 0);
+ y = 8 + k;
+ k = wlc_lcnphy_calc_floor(coeff_x, 1);
+ x = 8 - k;
+ data_rf = (x * 16 + y);
+ write_radio_reg(pi, RADIO_2064_REG089, data_rf);
+ k = wlc_lcnphy_calc_floor(coeff_y, 0);
+ y = 8 + k;
+ k = wlc_lcnphy_calc_floor(coeff_y, 1);
+ x = 8 - k;
+ data_rf = (x * 16 + y);
+ write_radio_reg(pi, RADIO_2064_REG08A, data_rf);
+ break;
+ case 4:
+ k = wlc_lcnphy_calc_floor(coeff_x, 0);
+ y = 8 + k;
+ k = wlc_lcnphy_calc_floor(coeff_x, 1);
+ x = 8 - k;
+ data_rf = (x * 16 + y);
+ write_radio_reg(pi, RADIO_2064_REG08B, data_rf);
+ k = wlc_lcnphy_calc_floor(coeff_y, 0);
+ y = 8 + k;
+ k = wlc_lcnphy_calc_floor(coeff_y, 1);
+ x = 8 - k;
+ data_rf = (x * 16 + y);
+ write_radio_reg(pi, RADIO_2064_REG08C, data_rf);
+ break;
+ }
+}
- and_phy_reg(pi, 0x44c, (u16) ~((0x1 << 1) | (0x1 << 0)));
+static struct lcnphy_unsign16_struct
+wlc_lcnphy_get_cc(struct brcms_phy *pi, int cal_type)
+{
+ u16 a, b, didq;
+ u8 di0, dq0, ei, eq, fi, fq;
+ struct lcnphy_unsign16_struct cc;
+ cc.re = 0;
+ cc.im = 0;
+ switch (cal_type) {
+ case 0:
+ wlc_lcnphy_get_tx_iqcc(pi, &a, &b);
+ cc.re = a;
+ cc.im = b;
+ break;
+ case 2:
+ didq = wlc_lcnphy_get_tx_locc(pi);
+ di0 = (((didq & 0xff00) << 16) >> 24);
+ dq0 = (((didq & 0x00ff) << 24) >> 24);
+ cc.re = (u16) di0;
+ cc.im = (u16) dq0;
+ break;
+ case 3:
+ wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq);
+ cc.re = (u16) ei;
+ cc.im = (u16) eq;
+ break;
+ case 4:
+ wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq);
+ cc.re = (u16) fi;
+ cc.im = (u16) fq;
+ break;
+ }
+ return cc;
+}
+
+static void
+wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
+ s16 *ptr, int mode)
+{
+ u32 curval1, curval2, stpptr, curptr, strptr, val;
+ u16 sslpnCalibClkEnCtrl, timer;
+ u16 old_sslpnCalibClkEnCtrl;
+ s16 imag, real;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+
+ timer = 0;
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+
+ curval1 = R_REG(&pi->regs->psm_corectlsts);
+ ptr[130] = 0;
+ W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+
+ W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
+ W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+ udelay(20);
+ curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
+ W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+
+ write_phy_reg(pi, 0x555, 0x0);
+ write_phy_reg(pi, 0x5a6, 0x5);
+
+ write_phy_reg(pi, 0x5a2, (u16) (mode | mode << 6));
+ write_phy_reg(pi, 0x5cf, 3);
+ write_phy_reg(pi, 0x5a5, 0x3);
+ write_phy_reg(pi, 0x583, 0x0);
+ write_phy_reg(pi, 0x584, 0x0);
+ write_phy_reg(pi, 0x585, 0x0fff);
+ write_phy_reg(pi, 0x586, 0x0000);
+
+ write_phy_reg(pi, 0x580, 0x4501);
+
+ sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
+ stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
+ curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ do {
+ udelay(10);
+ curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ timer++;
+ } while ((curptr != stpptr) && (timer < 500));
+
+ W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+ strptr = 0x7E00;
+ W_REG(&pi->regs->tplatewrptr, strptr);
+ while (strptr < 0x8000) {
+ val = R_REG(&pi->regs->tplatewrdata);
+ imag = ((val >> 16) & 0x3ff);
+ real = ((val) & 0x3ff);
+ if (imag > 511)
+ imag -= 1024;
+
+ if (real > 511)
+ real -= 1024;
+
+ if (pi_lcn->lcnphy_iqcal_swp_dis)
+ ptr[(strptr - 0x7E00) / 4] = real;
+ else
+ ptr[(strptr - 0x7E00) / 4] = imag;
+
+ if (clip_detect_algo) {
+ if (imag > thresh || imag < -thresh) {
+ strptr = 0x8000;
+ ptr[130] = 1;
+ }
+ }
+
+ strptr += 4;
+ }
+
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ W_REG(&pi->regs->psm_phy_hdr_param, curval2);
+ W_REG(&pi->regs->psm_corectlsts, curval1);
+}
+
+static void
+wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
+ int step_size_lg2)
+{
+ const struct lcnphy_spb_tone *phy_c1;
+ struct lcnphy_spb_tone phy_c2;
+ struct lcnphy_unsign16_struct phy_c3;
+ int phy_c4, phy_c5, k, l, j, phy_c6;
+ u16 phy_c7, phy_c8, phy_c9;
+ s16 phy_c10, phy_c11, phy_c12, phy_c13, phy_c14, phy_c15, phy_c16;
+ s16 *ptr, phy_c17;
+ s32 phy_c18, phy_c19;
+ u32 phy_c20, phy_c21;
+ bool phy_c22, phy_c23, phy_c24, phy_c25;
+ u16 phy_c26, phy_c27;
+ u16 phy_c28, phy_c29, phy_c30;
+ u16 phy_c31;
+ u16 *phy_c32;
+ phy_c21 = 0;
+ phy_c10 = phy_c13 = phy_c14 = phy_c8 = 0;
+ ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
+ if (NULL == ptr)
+ return;
+
+ phy_c32 = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
+ if (NULL == phy_c32) {
+ kfree(ptr);
+ return;
+ }
+ phy_c26 = read_phy_reg(pi, 0x6da);
+ phy_c27 = read_phy_reg(pi, 0x6db);
+ phy_c31 = read_radio_reg(pi, RADIO_2064_REG026);
+ write_phy_reg(pi, 0x93d, 0xC0);
+
+ wlc_lcnphy_start_tx_tone(pi, 3750, 88, 0);
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+
+ wlc_lcnphy_tx_iqlo_loopback(pi, phy_c32);
+ udelay(500);
+ phy_c28 = read_phy_reg(pi, 0x938);
+ phy_c29 = read_phy_reg(pi, 0x4d7);
+ phy_c30 = read_phy_reg(pi, 0x4d8);
+ or_phy_reg(pi, 0x938, 0x1 << 2);
+ or_phy_reg(pi, 0x4d7, 0x1 << 2);
+ or_phy_reg(pi, 0x4d7, 0x1 << 3);
+ mod_phy_reg(pi, 0x4d7, (0x7 << 12), 0x2 << 12);
+ or_phy_reg(pi, 0x4d8, 1 << 0);
+ or_phy_reg(pi, 0x4d8, 1 << 1);
+ mod_phy_reg(pi, 0x4d8, (0x3ff << 2), 0x23A << 2);
+ mod_phy_reg(pi, 0x4d8, (0x7 << 12), 0x7 << 12);
+ phy_c1 = &lcnphy_spb_tone_3750[0];
+ phy_c4 = 32;
+
+ if (num_levels == 0) {
+ if (cal_type != 0)
+ num_levels = 4;
+ else
+ num_levels = 9;
+ }
+ if (step_size_lg2 == 0) {
+ if (cal_type != 0)
+ step_size_lg2 = 3;
+ else
+ step_size_lg2 = 8;
+ }
+
+ phy_c7 = (1 << step_size_lg2);
+ phy_c3 = wlc_lcnphy_get_cc(pi, cal_type);
+ phy_c15 = (s16) phy_c3.re;
+ phy_c16 = (s16) phy_c3.im;
+ if (cal_type == 2) {
+ if (phy_c3.re > 127)
+ phy_c15 = phy_c3.re - 256;
+ if (phy_c3.im > 127)
+ phy_c16 = phy_c3.im - 256;
+ }
+ wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
+ udelay(20);
+ for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
+ phy_c23 = 1;
+ phy_c22 = 0;
+ switch (cal_type) {
+ case 0:
+ phy_c10 = 511;
+ break;
+ case 2:
+ phy_c10 = 127;
+ break;
+ case 3:
+ phy_c10 = 15;
+ break;
+ case 4:
+ phy_c10 = 15;
+ break;
+ }
+
+ phy_c9 = read_phy_reg(pi, 0x93d);
+ phy_c9 = 2 * phy_c9;
+ phy_c24 = 0;
+ phy_c5 = 7;
+ phy_c25 = 1;
+ while (1) {
+ write_radio_reg(pi, RADIO_2064_REG026,
+ (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
+ udelay(50);
+ phy_c22 = 0;
+ ptr[130] = 0;
+ wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
+ if (ptr[130] == 1)
+ phy_c22 = 1;
+ if (phy_c22)
+ phy_c5 -= 1;
+ if ((phy_c22 != phy_c24) && (!phy_c25))
+ break;
+ if (!phy_c22)
+ phy_c5 += 1;
+ if (phy_c5 <= 0 || phy_c5 >= 7)
+ break;
+ phy_c24 = phy_c22;
+ phy_c25 = 0;
+ }
+
+ if (phy_c5 < 0)
+ phy_c5 = 0;
+ else if (phy_c5 > 7)
+ phy_c5 = 7;
+
+ for (k = -phy_c7; k <= phy_c7; k += phy_c7) {
+ for (l = -phy_c7; l <= phy_c7; l += phy_c7) {
+ phy_c11 = phy_c15 + k;
+ phy_c12 = phy_c16 + l;
+
+ if (phy_c11 < -phy_c10)
+ phy_c11 = -phy_c10;
+ else if (phy_c11 > phy_c10)
+ phy_c11 = phy_c10;
+ if (phy_c12 < -phy_c10)
+ phy_c12 = -phy_c10;
+ else if (phy_c12 > phy_c10)
+ phy_c12 = phy_c10;
+ wlc_lcnphy_set_cc(pi, cal_type, phy_c11,
+ phy_c12);
+ udelay(20);
+ wlc_lcnphy_samp_cap(pi, 0, 0, ptr, 2);
+
+ phy_c18 = 0;
+ phy_c19 = 0;
+ for (j = 0; j < 128; j++) {
+ if (cal_type != 0)
+ phy_c6 = j % phy_c4;
+ else
+ phy_c6 = (2 * j) % phy_c4;
+
+ phy_c2.re = phy_c1[phy_c6].re;
+ phy_c2.im = phy_c1[phy_c6].im;
+ phy_c17 = ptr[j];
+ phy_c18 = phy_c18 + phy_c17 * phy_c2.re;
+ phy_c19 = phy_c19 + phy_c17 * phy_c2.im;
+ }
+
+ phy_c18 = phy_c18 >> 10;
+ phy_c19 = phy_c19 >> 10;
+ phy_c20 = ((phy_c18 * phy_c18) +
+ (phy_c19 * phy_c19));
+
+ if (phy_c23 || phy_c20 < phy_c21) {
+ phy_c21 = phy_c20;
+ phy_c13 = phy_c11;
+ phy_c14 = phy_c12;
+ }
+ phy_c23 = 0;
+ }
+ }
+ phy_c23 = 1;
+ phy_c15 = phy_c13;
+ phy_c16 = phy_c14;
+ phy_c7 = phy_c7 >> 1;
+ wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
+ udelay(20);
+ }
+ goto cleanup;
+cleanup:
+ wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, phy_c32);
+ wlc_lcnphy_stop_tx_tone(pi);
+ write_phy_reg(pi, 0x6da, phy_c26);
+ write_phy_reg(pi, 0x6db, phy_c27);
+ write_phy_reg(pi, 0x938, phy_c28);
+ write_phy_reg(pi, 0x4d7, phy_c29);
+ write_phy_reg(pi, 0x4d8, phy_c30);
+ write_radio_reg(pi, RADIO_2064_REG026, phy_c31);
+
+ kfree(phy_c32);
+ kfree(ptr);
}
void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b)
@@ -2824,6 +3813,28 @@ void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b)
*b = iqcc[1];
}
+static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
+{
+ struct lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
+
+ wlc_lcnphy_set_cc(pi, 0, 0, 0);
+ wlc_lcnphy_set_cc(pi, 2, 0, 0);
+ wlc_lcnphy_set_cc(pi, 3, 0, 0);
+ wlc_lcnphy_set_cc(pi, 4, 0, 0);
+
+ wlc_lcnphy_a1(pi, 4, 0, 0);
+ wlc_lcnphy_a1(pi, 3, 0, 0);
+ wlc_lcnphy_a1(pi, 2, 3, 2);
+ wlc_lcnphy_a1(pi, 0, 5, 8);
+ wlc_lcnphy_a1(pi, 2, 2, 1);
+ wlc_lcnphy_a1(pi, 0, 4, 3);
+
+ iqcc0 = wlc_lcnphy_get_cc(pi, 0);
+ locc2 = wlc_lcnphy_get_cc(pi, 2);
+ locc3 = wlc_lcnphy_get_cc(pi, 3);
+ locc4 = wlc_lcnphy_get_cc(pi, 4);
+}
+
u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi)
{
struct phytbl_info tab;
@@ -2878,12 +3889,11 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
-
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
wlc_lcnphy_get_radio_loft(pi, &ei0, &eq0, &fi0, &fq0);
- if ((ABS((s8) fi0) == 15) && (ABS((s8) fq0) == 15)) {
+ if ((abs((s8) fi0) == 15) && (abs((s8) fq0) == 15)) {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
target_gains.gm_gain = 255;
target_gains.pga_gain = 255;
@@ -2905,10 +3915,8 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
wlc_lcnphy_tx_iqlo_cal(pi, &target_gains,
LCNPHY_CAL_FULL, false);
} else {
-
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
-
}
wlc_lcnphy_get_tx_iqcc(pi, &a, &b);
@@ -2927,7 +3935,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
wlc_lcnphy_read_table(pi, &tab);
val = (val & 0xfff00000) |
- ((u32) (a & 0x3FF) << 10) | (b & 0x3ff);
+ ((u32) (a & 0x3FF) << 10) | (b & 0x3ff);
wlc_lcnphy_write_table(pi, &tab);
val = didq;
@@ -2959,13 +3967,10 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
s16 avg = 0;
bool suspend = 0;
- if (NORADIO_ENAB(pi->pubpi))
- return -1;
-
if (mode == 1) {
suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ (0 ==
+ (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -3006,13 +4011,10 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- if (NORADIO_ENAB(pi->pubpi))
- return -1;
-
if (mode == 1) {
suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ (0 ==
+ (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -3063,8 +4065,9 @@ s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode)
{
s32 degree = wlc_lcnphy_tempsense_new(pi, mode);
degree =
- ((degree << 10) + LCN_TEMPSENSE_OFFSET + (LCN_TEMPSENSE_DEN >> 1))
- / LCN_TEMPSENSE_DEN;
+ ((degree <<
+ 10) + LCN_TEMPSENSE_OFFSET + (LCN_TEMPSENSE_DEN >> 1))
+ / LCN_TEMPSENSE_DEN;
return (s8) degree;
}
@@ -3074,13 +4077,10 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
s32 avg = 0;
bool suspend = 0;
- if (NORADIO_ENAB(pi->pubpi))
- return -1;
-
if (mode == 1) {
suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ (0 ==
+ (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -3093,9 +4093,8 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
else
avg = (s32) vbatsenseval;
- avg =
- (avg * LCN_VBAT_SCALE_NOM +
- (LCN_VBAT_SCALE_DEN >> 1)) / LCN_VBAT_SCALE_DEN;
+ avg = (avg * LCN_VBAT_SCALE_NOM +
+ (LCN_VBAT_SCALE_DEN >> 1)) / LCN_VBAT_SCALE_DEN;
if (mode == 1) {
if (!suspend)
@@ -3118,312 +4117,8 @@ static void wlc_lcnphy_afe_clk_init(struct brcms_phy *pi, u8 mode)
wlc_lcnphy_toggle_afe_pwdn(pi);
}
-static bool
-wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
- u16 num_samps,
- u8 wait_time, struct lcnphy_iq_est *iq_est)
-{
- int wait_count = 0;
- bool result = true;
- u8 phybw40;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
-
- mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5);
-
- mod_phy_reg(pi, 0x410, (0x1 << 3), (0) << 3);
-
- mod_phy_reg(pi, 0x482, (0xffff << 0), (num_samps) << 0);
-
- mod_phy_reg(pi, 0x481, (0xff << 0), ((u16) wait_time) << 0);
-
- mod_phy_reg(pi, 0x481, (0x1 << 8), (0) << 8);
-
- mod_phy_reg(pi, 0x481, (0x1 << 9), (1) << 9);
-
- while (read_phy_reg(pi, 0x481) & (0x1 << 9)) {
-
- if (wait_count > (10 * 500)) {
- result = false;
- goto cleanup;
- }
- udelay(100);
- wait_count++;
- }
-
- iq_est->iq_prod = ((u32) read_phy_reg(pi, 0x483) << 16) |
- (u32) read_phy_reg(pi, 0x484);
- iq_est->i_pwr = ((u32) read_phy_reg(pi, 0x485) << 16) |
- (u32) read_phy_reg(pi, 0x486);
- iq_est->q_pwr = ((u32) read_phy_reg(pi, 0x487) << 16) |
- (u32) read_phy_reg(pi, 0x488);
-
- cleanup:
- mod_phy_reg(pi, 0x410, (0x1 << 3), (1) << 3);
-
- mod_phy_reg(pi, 0x6da, (0x1 << 5), (0) << 5);
-
- return result;
-}
-
-static bool wlc_lcnphy_calc_rx_iq_comp(struct brcms_phy *pi, u16 num_samps)
-{
-#define LCNPHY_MIN_RXIQ_PWR 2
- bool result;
- u16 a0_new, b0_new;
- struct lcnphy_iq_est iq_est = { 0, 0, 0 };
- s32 a, b, temp;
- s16 iq_nbits, qq_nbits, arsh, brsh;
- s32 iq;
- u32 ii, qq;
- struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
-
- a0_new = ((read_phy_reg(pi, 0x645) & (0x3ff << 0)) >> 0);
- b0_new = ((read_phy_reg(pi, 0x646) & (0x3ff << 0)) >> 0);
- mod_phy_reg(pi, 0x6d1, (0x1 << 2), (0) << 2);
-
- mod_phy_reg(pi, 0x64b, (0x1 << 6), (1) << 6);
-
- wlc_lcnphy_set_rx_iq_comp(pi, 0, 0);
-
- result = wlc_lcnphy_rx_iq_est(pi, num_samps, 32, &iq_est);
- if (!result)
- goto cleanup;
-
- iq = (s32) iq_est.iq_prod;
- ii = iq_est.i_pwr;
- qq = iq_est.q_pwr;
-
- if ((ii + qq) < LCNPHY_MIN_RXIQ_PWR) {
- result = false;
- goto cleanup;
- }
-
- iq_nbits = wlc_phy_nbits(iq);
- qq_nbits = wlc_phy_nbits(qq);
-
- arsh = 10 - (30 - iq_nbits);
- if (arsh >= 0) {
- a = (-(iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
- temp = (s32) (ii >> arsh);
- if (temp == 0) {
- return false;
- }
- } else {
- a = (-(iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
- temp = (s32) (ii << -arsh);
- if (temp == 0) {
- return false;
- }
- }
- a /= temp;
- brsh = qq_nbits - 31 + 20;
- if (brsh >= 0) {
- b = (qq << (31 - qq_nbits));
- temp = (s32) (ii >> brsh);
- if (temp == 0) {
- return false;
- }
- } else {
- b = (qq << (31 - qq_nbits));
- temp = (s32) (ii << -brsh);
- if (temp == 0) {
- return false;
- }
- }
- b /= temp;
- b -= a * a;
- b = (s32) int_sqrt((unsigned long) b);
- b -= (1 << 10);
- a0_new = (u16) (a & 0x3ff);
- b0_new = (u16) (b & 0x3ff);
- cleanup:
-
- wlc_lcnphy_set_rx_iq_comp(pi, a0_new, b0_new);
-
- mod_phy_reg(pi, 0x64b, (0x1 << 0), (1) << 0);
-
- mod_phy_reg(pi, 0x64b, (0x1 << 3), (1) << 3);
-
- pi_lcn->lcnphy_cal_results.rxiqcal_coeff_a0 = a0_new;
- pi_lcn->lcnphy_cal_results.rxiqcal_coeff_b0 = b0_new;
-
- return result;
-}
-
-static bool
-wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
- const struct lcnphy_rx_iqcomp *iqcomp,
- int iqcomp_sz, bool tx_switch, bool rx_switch, int module,
- int tx_gain_idx)
-{
- struct lcnphy_txgains old_gains;
- u16 tx_pwr_ctrl;
- u8 tx_gain_index_old = 0;
- bool result = false, tx_gain_override_old = false;
- u16 i, Core1TxControl_old, RFOverride0_old,
- RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
- rfoverride3_old, rfoverride3val_old, rfoverride4_old,
- rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
- int tia_gain;
- u32 received_power, rx_pwr_threshold;
- u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
- u16 values_to_save[11];
- s16 *ptr;
- struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
-
- ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
- if (NULL == ptr) {
- return false;
- }
- if (module == 2) {
- while (iqcomp_sz--) {
- if (iqcomp[iqcomp_sz].chan ==
- CHSPEC_CHANNEL(pi->radio_chanspec)) {
-
- wlc_lcnphy_set_rx_iq_comp(pi,
- (u16)
- iqcomp[iqcomp_sz].a,
- (u16)
- iqcomp[iqcomp_sz].b);
- result = true;
- break;
- }
- }
- goto cal_done;
- }
-
- if (module == 1) {
-
- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
-
- for (i = 0; i < 11; i++) {
- values_to_save[i] =
- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
- }
- Core1TxControl_old = read_phy_reg(pi, 0x631);
-
- or_phy_reg(pi, 0x631, 0x0015);
-
- RFOverride0_old = read_phy_reg(pi, 0x44c);
- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
- rfoverride2_old = read_phy_reg(pi, 0x4b0);
- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
- rfoverride3_old = read_phy_reg(pi, 0x4f9);
- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
- rfoverride4_old = read_phy_reg(pi, 0x938);
- rfoverride4val_old = read_phy_reg(pi, 0x939);
- afectrlovr_old = read_phy_reg(pi, 0x43b);
- afectrlovrval_old = read_phy_reg(pi, 0x43c);
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
- if (tx_gain_override_old) {
- wlc_lcnphy_get_tx_gain(pi, &old_gains);
- tx_gain_index_old = pi_lcn->lcnphy_current_index;
- }
-
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
-
- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
-
- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
-
- wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
- wlc_lcnphy_rx_gain_override_enable(pi, true);
-
- tia_gain = 8;
- rx_pwr_threshold = 950;
- while (tia_gain > 0) {
- tia_gain -= 1;
- wlc_lcnphy_set_rx_gain_by_distribution(pi,
- 0, 0, 2, 2,
- (u16)
- tia_gain, 1, 0);
- udelay(500);
-
- received_power =
- wlc_lcnphy_measure_digital_power(pi, 2000);
- if (received_power < rx_pwr_threshold)
- break;
- }
- result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
-
- wlc_lcnphy_stop_tx_tone(pi);
-
- write_phy_reg(pi, 0x631, Core1TxControl_old);
-
- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
- write_phy_reg(pi, 0x4b0, rfoverride2_old);
- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
- write_phy_reg(pi, 0x4f9, rfoverride3_old);
- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
- write_phy_reg(pi, 0x938, rfoverride4_old);
- write_phy_reg(pi, 0x939, rfoverride4val_old);
- write_phy_reg(pi, 0x43b, afectrlovr_old);
- write_phy_reg(pi, 0x43c, afectrlovrval_old);
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
-
- wlc_lcnphy_clear_trsw_override(pi);
-
- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
-
- for (i = 0; i < 11; i++) {
- write_radio_reg(pi, rxiq_cal_rf_reg[i],
- values_to_save[i]);
- }
-
- if (tx_gain_override_old) {
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
- } else
- wlc_lcnphy_disable_tx_gain_override(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
-
- wlc_lcnphy_rx_gain_override_enable(pi, false);
- }
-
- cal_done:
- kfree(ptr);
- return result;
-}
-
static void wlc_lcnphy_temp_adj(struct brcms_phy *pi)
{
- if (NORADIO_ENAB(pi->pubpi))
- return;
}
static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
@@ -3433,7 +4128,7 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, true);
@@ -3463,24 +4158,21 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
pi->phy_lastcal = pi->sh->now;
pi->phy_forcecal = false;
full_cal =
- (pi_lcn->lcnphy_full_cal_channel !=
- CHSPEC_CHANNEL(pi->radio_chanspec));
+ (pi_lcn->lcnphy_full_cal_channel !=
+ CHSPEC_CHANNEL(pi->radio_chanspec));
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend) {
-
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
+
wlc_lcnphy_deaf_mode(pi, true);
wlc_lcnphy_txpwrtbl_iqlo_cal(pi);
@@ -3531,7 +4223,6 @@ void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode)
switch (mode) {
case PHY_PERICAL_CHAN:
-
break;
case PHY_FULLCAL:
wlc_lcnphy_periodic_cal(pi);
@@ -3558,7 +4249,7 @@ void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode)
case LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL:
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
wlc_lcnphy_tx_power_adjustment(
- (struct brcms_phy_pub *) pi);
+ (struct brcms_phy_pub *) pi);
break;
}
}
@@ -3571,7 +4262,7 @@ void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr)
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) &&
(status & (0x1 << 15))) {
*ofdm_pwr = (s8) (((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
- >> 0) >> 1);
+ >> 0) >> 1);
if (wlc_phy_tpc_isenabled_lcnphy(pi))
cck_offset = pi->tx_power_offset[TXP_FIRST_CCK];
@@ -3591,59 +4282,6 @@ void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi)
}
-static void
-wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, chanspec_t chanspec)
-{
- u8 channel = CHSPEC_CHANNEL(chanspec);
- struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
-
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
- if (channel == 14) {
- mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
-
- } else {
- mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
-
- }
- pi_lcn->lcnphy_bandedge_corr = 2;
- if (channel == 1)
- pi_lcn->lcnphy_bandedge_corr = 4;
-
- if (channel == 1 || channel == 2 || channel == 3 ||
- channel == 4 || channel == 9 ||
- channel == 10 || channel == 11 || channel == 12) {
- si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03000c04);
- si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x0);
- si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x200005c0);
-
- si_pmu_pllupd(pi->sh->sih);
- write_phy_reg(pi, 0x942, 0);
- wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
- pi_lcn->lcnphy_spurmod = 0;
- mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
-
- write_phy_reg(pi, 0x425, 0x5907);
- } else {
- si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03140c04);
- si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x333333);
- si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x202c2820);
-
- si_pmu_pllupd(pi->sh->sih);
- write_phy_reg(pi, 0x942, 0);
- wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
-
- pi_lcn->lcnphy_spurmod = 0;
- mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
-
- write_phy_reg(pi, 0x425, 0x590a);
- }
-
- or_phy_reg(pi, 0x44a, 0x44);
- write_phy_reg(pi, 0x44a, 0x80);
-}
-
void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi)
{
s8 index;
@@ -3651,570 +4289,21 @@ void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi)
struct brcms_phy *pi = (struct brcms_phy *) ppi;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) && SAVE_txpwrctrl) {
+ if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) &&
+ SAVE_txpwrctrl) {
index = wlc_lcnphy_tempcompensated_txpwrctrl(pi);
index2 = (u16) (index * 2);
mod_phy_reg(pi, 0x4a9, (0x1ff << 0), (index2) << 0);
- pi_lcn->lcnphy_current_index = (s8)
- ((read_phy_reg(pi, 0x4a9) & 0xFF) / 2);
- }
-}
-
-static void wlc_lcnphy_set_rx_iq_comp(struct brcms_phy *pi, u16 a, u16 b)
-{
- mod_phy_reg(pi, 0x645, (0x3ff << 0), (a) << 0);
-
- mod_phy_reg(pi, 0x646, (0x3ff << 0), (b) << 0);
-
- mod_phy_reg(pi, 0x647, (0x3ff << 0), (a) << 0);
-
- mod_phy_reg(pi, 0x648, (0x3ff << 0), (b) << 0);
-
- mod_phy_reg(pi, 0x649, (0x3ff << 0), (a) << 0);
-
- mod_phy_reg(pi, 0x64a, (0x3ff << 0), (b) << 0);
-
-}
-
-void wlc_phy_init_lcnphy(struct brcms_phy *pi)
-{
- u8 phybw40;
- struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
-
- pi_lcn->lcnphy_cal_counter = 0;
- pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense;
-
- or_phy_reg(pi, 0x44a, 0x80);
- and_phy_reg(pi, 0x44a, 0x7f);
-
- wlc_lcnphy_afe_clk_init(pi, AFE_CLK_INIT_MODE_TXRX2X);
-
- write_phy_reg(pi, 0x60a, 160);
-
- write_phy_reg(pi, 0x46a, 25);
-
- wlc_lcnphy_baseband_init(pi);
-
- wlc_lcnphy_radio_init(pi);
-
- if (CHSPEC_IS2G(pi->radio_chanspec))
- wlc_lcnphy_tx_pwr_ctrl_init((struct brcms_phy_pub *) pi);
-
- wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
-
- si_pmu_regcontrol(pi->sh->sih, 0, 0xf, 0x9);
-
- si_pmu_chipcontrol(pi->sh->sih, 0, 0xffffffff, 0x03CDDDDD);
-
- if ((pi->sh->boardflags & BFL_FEM)
- && wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
- wlc_lcnphy_set_tx_pwr_by_index(pi, FIXED_TXPWR);
-
- wlc_lcnphy_agc_temp_init(pi);
-
- wlc_lcnphy_temp_adj(pi);
-
- mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14);
-
- udelay(100);
- mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14);
-
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_HW);
- pi_lcn->lcnphy_noise_samples = LCNPHY_NOISE_SAMPLES_DEFAULT;
- wlc_lcnphy_calib_modes(pi, PHY_PERICAL_PHYINIT);
-}
-
-static void
-wlc_lcnphy_tx_iqlo_loopback(struct brcms_phy *pi, u16 *values_to_save)
-{
- u16 vmid;
- int i;
- for (i = 0; i < 20; i++) {
- values_to_save[i] =
- read_radio_reg(pi, iqlo_loopback_rf_regs[i]);
- }
-
- mod_phy_reg(pi, 0x44c, (0x1 << 12), 1 << 12);
- mod_phy_reg(pi, 0x44d, (0x1 << 14), 1 << 14);
-
- mod_phy_reg(pi, 0x44c, (0x1 << 11), 1 << 11);
- mod_phy_reg(pi, 0x44d, (0x1 << 13), 0 << 13);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
-
- if (LCNREV_IS(pi->pubpi.phy_rev, 2))
- and_radio_reg(pi, RADIO_2064_REG03A, 0xFD);
- else
- and_radio_reg(pi, RADIO_2064_REG03A, 0xF9);
- or_radio_reg(pi, RADIO_2064_REG11A, 0x1);
-
- or_radio_reg(pi, RADIO_2064_REG036, 0x01);
- or_radio_reg(pi, RADIO_2064_REG11A, 0x18);
- udelay(20);
-
- if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- if (CHSPEC_IS5G(pi->radio_chanspec))
- mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0);
- else
- or_radio_reg(pi, RADIO_2064_REG03A, 1);
- } else {
- if (CHSPEC_IS5G(pi->radio_chanspec))
- mod_radio_reg(pi, RADIO_2064_REG03A, 3, 1);
- else
- or_radio_reg(pi, RADIO_2064_REG03A, 0x3);
- }
-
- udelay(20);
-
- write_radio_reg(pi, RADIO_2064_REG025, 0xF);
- if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- if (CHSPEC_IS5G(pi->radio_chanspec))
- mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x4);
- else
- mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x6);
- } else {
- if (CHSPEC_IS5G(pi->radio_chanspec))
- mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x4 << 1);
- else
- mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x6 << 1);
- }
-
- udelay(20);
-
- write_radio_reg(pi, RADIO_2064_REG005, 0x8);
- or_radio_reg(pi, RADIO_2064_REG112, 0x80);
- udelay(20);
-
- or_radio_reg(pi, RADIO_2064_REG0FF, 0x10);
- or_radio_reg(pi, RADIO_2064_REG11F, 0x44);
- udelay(20);
-
- or_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- or_radio_reg(pi, RADIO_2064_REG113, 0x10);
- udelay(20);
-
- write_radio_reg(pi, RADIO_2064_REG007, 0x1);
- udelay(20);
-
- vmid = 0x2A6;
- mod_radio_reg(pi, RADIO_2064_REG0FC, 0x3 << 0, (vmid >> 8) & 0x3);
- write_radio_reg(pi, RADIO_2064_REG0FD, (vmid & 0xff));
- or_radio_reg(pi, RADIO_2064_REG11F, 0x44);
- udelay(20);
-
- or_radio_reg(pi, RADIO_2064_REG0FF, 0x10);
- udelay(20);
- write_radio_reg(pi, RADIO_2064_REG012, 0x02);
- or_radio_reg(pi, RADIO_2064_REG112, 0x06);
- write_radio_reg(pi, RADIO_2064_REG036, 0x11);
- write_radio_reg(pi, RADIO_2064_REG059, 0xcc);
- write_radio_reg(pi, RADIO_2064_REG05C, 0x2e);
- write_radio_reg(pi, RADIO_2064_REG078, 0xd7);
- write_radio_reg(pi, RADIO_2064_REG092, 0x15);
-}
-
-static void
-wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
- s16 *ptr, int mode)
-{
- u32 curval1, curval2, stpptr, curptr, strptr, val;
- u16 sslpnCalibClkEnCtrl, timer;
- u16 old_sslpnCalibClkEnCtrl;
- s16 imag, real;
- struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
-
- timer = 0;
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
-
- curval1 = R_REG(&pi->regs->psm_corectlsts);
- ptr[130] = 0;
- W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
-
- W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
- W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
- udelay(20);
- curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
-
- write_phy_reg(pi, 0x555, 0x0);
- write_phy_reg(pi, 0x5a6, 0x5);
-
- write_phy_reg(pi, 0x5a2, (u16) (mode | mode << 6));
- write_phy_reg(pi, 0x5cf, 3);
- write_phy_reg(pi, 0x5a5, 0x3);
- write_phy_reg(pi, 0x583, 0x0);
- write_phy_reg(pi, 0x584, 0x0);
- write_phy_reg(pi, 0x585, 0x0fff);
- write_phy_reg(pi, 0x586, 0x0000);
-
- write_phy_reg(pi, 0x580, 0x4501);
-
- sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
- stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
- do {
- udelay(10);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
- timer++;
- } while ((curptr != stpptr) && (timer < 500));
-
- W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
- strptr = 0x7E00;
- W_REG(&pi->regs->tplatewrptr, strptr);
- while (strptr < 0x8000) {
- val = R_REG(&pi->regs->tplatewrdata);
- imag = ((val >> 16) & 0x3ff);
- real = ((val) & 0x3ff);
- if (imag > 511) {
- imag -= 1024;
- }
- if (real > 511) {
- real -= 1024;
- }
- if (pi_lcn->lcnphy_iqcal_swp_dis)
- ptr[(strptr - 0x7E00) / 4] = real;
- else
- ptr[(strptr - 0x7E00) / 4] = imag;
- if (clip_detect_algo) {
- if (imag > thresh || imag < -thresh) {
- strptr = 0x8000;
- ptr[130] = 1;
- }
- }
- strptr += 4;
- }
-
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2);
- W_REG(&pi->regs->psm_corectlsts, curval1);
-}
-
-static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
-{
- struct lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
-
- wlc_lcnphy_set_cc(pi, 0, 0, 0);
- wlc_lcnphy_set_cc(pi, 2, 0, 0);
- wlc_lcnphy_set_cc(pi, 3, 0, 0);
- wlc_lcnphy_set_cc(pi, 4, 0, 0);
-
- wlc_lcnphy_a1(pi, 4, 0, 0);
- wlc_lcnphy_a1(pi, 3, 0, 0);
- wlc_lcnphy_a1(pi, 2, 3, 2);
- wlc_lcnphy_a1(pi, 0, 5, 8);
- wlc_lcnphy_a1(pi, 2, 2, 1);
- wlc_lcnphy_a1(pi, 0, 4, 3);
-
- iqcc0 = wlc_lcnphy_get_cc(pi, 0);
- locc2 = wlc_lcnphy_get_cc(pi, 2);
- locc3 = wlc_lcnphy_get_cc(pi, 3);
- locc4 = wlc_lcnphy_get_cc(pi, 4);
-}
-
-static void
-wlc_lcnphy_set_cc(struct brcms_phy *pi, int cal_type, s16 coeff_x, s16 coeff_y)
-{
- u16 di0dq0;
- u16 x, y, data_rf;
- int k;
- switch (cal_type) {
- case 0:
- wlc_lcnphy_set_tx_iqcc(pi, coeff_x, coeff_y);
- break;
- case 2:
- di0dq0 = (coeff_x & 0xff) << 8 | (coeff_y & 0xff);
- wlc_lcnphy_set_tx_locc(pi, di0dq0);
- break;
- case 3:
- k = wlc_lcnphy_calc_floor(coeff_x, 0);
- y = 8 + k;
- k = wlc_lcnphy_calc_floor(coeff_x, 1);
- x = 8 - k;
- data_rf = (x * 16 + y);
- write_radio_reg(pi, RADIO_2064_REG089, data_rf);
- k = wlc_lcnphy_calc_floor(coeff_y, 0);
- y = 8 + k;
- k = wlc_lcnphy_calc_floor(coeff_y, 1);
- x = 8 - k;
- data_rf = (x * 16 + y);
- write_radio_reg(pi, RADIO_2064_REG08A, data_rf);
- break;
- case 4:
- k = wlc_lcnphy_calc_floor(coeff_x, 0);
- y = 8 + k;
- k = wlc_lcnphy_calc_floor(coeff_x, 1);
- x = 8 - k;
- data_rf = (x * 16 + y);
- write_radio_reg(pi, RADIO_2064_REG08B, data_rf);
- k = wlc_lcnphy_calc_floor(coeff_y, 0);
- y = 8 + k;
- k = wlc_lcnphy_calc_floor(coeff_y, 1);
- x = 8 - k;
- data_rf = (x * 16 + y);
- write_radio_reg(pi, RADIO_2064_REG08C, data_rf);
- break;
- }
-}
-
-static struct lcnphy_unsign16_struct
-wlc_lcnphy_get_cc(struct brcms_phy *pi, int cal_type)
-{
- u16 a, b, didq;
- u8 di0, dq0, ei, eq, fi, fq;
- struct lcnphy_unsign16_struct cc;
- cc.re = 0;
- cc.im = 0;
- switch (cal_type) {
- case 0:
- wlc_lcnphy_get_tx_iqcc(pi, &a, &b);
- cc.re = a;
- cc.im = b;
- break;
- case 2:
- didq = wlc_lcnphy_get_tx_locc(pi);
- di0 = (((didq & 0xff00) << 16) >> 24);
- dq0 = (((didq & 0x00ff) << 24) >> 24);
- cc.re = (u16) di0;
- cc.im = (u16) dq0;
- break;
- case 3:
- wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq);
- cc.re = (u16) ei;
- cc.im = (u16) eq;
- break;
- case 4:
- wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq);
- cc.re = (u16) fi;
- cc.im = (u16) fq;
- break;
- }
- return cc;
-}
-
-static void
-wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
- int step_size_lg2)
-{
- const struct lcnphy_spb_tone *phy_c1;
- struct lcnphy_spb_tone phy_c2;
- struct lcnphy_unsign16_struct phy_c3;
- int phy_c4, phy_c5, k, l, j, phy_c6;
- u16 phy_c7, phy_c8, phy_c9;
- s16 phy_c10, phy_c11, phy_c12, phy_c13, phy_c14, phy_c15, phy_c16;
- s16 *ptr, phy_c17;
- s32 phy_c18, phy_c19;
- u32 phy_c20, phy_c21;
- bool phy_c22, phy_c23, phy_c24, phy_c25;
- u16 phy_c26, phy_c27;
- u16 phy_c28, phy_c29, phy_c30;
- u16 phy_c31;
- u16 *phy_c32;
- phy_c21 = 0;
- phy_c10 = phy_c13 = phy_c14 = phy_c8 = 0;
- ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
- if (NULL == ptr) {
- return;
- }
-
- phy_c32 = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
- if (NULL == phy_c32) {
- kfree(ptr);
- return;
- }
- phy_c26 = read_phy_reg(pi, 0x6da);
- phy_c27 = read_phy_reg(pi, 0x6db);
- phy_c31 = read_radio_reg(pi, RADIO_2064_REG026);
- write_phy_reg(pi, 0x93d, 0xC0);
-
- wlc_lcnphy_start_tx_tone(pi, 3750, 88, 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
-
- wlc_lcnphy_tx_iqlo_loopback(pi, phy_c32);
- udelay(500);
- phy_c28 = read_phy_reg(pi, 0x938);
- phy_c29 = read_phy_reg(pi, 0x4d7);
- phy_c30 = read_phy_reg(pi, 0x4d8);
- or_phy_reg(pi, 0x938, 0x1 << 2);
- or_phy_reg(pi, 0x4d7, 0x1 << 2);
- or_phy_reg(pi, 0x4d7, 0x1 << 3);
- mod_phy_reg(pi, 0x4d7, (0x7 << 12), 0x2 << 12);
- or_phy_reg(pi, 0x4d8, 1 << 0);
- or_phy_reg(pi, 0x4d8, 1 << 1);
- mod_phy_reg(pi, 0x4d8, (0x3ff << 2), 0x23A << 2);
- mod_phy_reg(pi, 0x4d8, (0x7 << 12), 0x7 << 12);
- phy_c1 = &lcnphy_spb_tone_3750[0];
- phy_c4 = 32;
-
- if (num_levels == 0) {
- if (cal_type != 0) {
- num_levels = 4;
- } else {
- num_levels = 9;
- }
- }
- if (step_size_lg2 == 0) {
- if (cal_type != 0) {
- step_size_lg2 = 3;
- } else {
- step_size_lg2 = 8;
- }
- }
-
- phy_c7 = (1 << step_size_lg2);
- phy_c3 = wlc_lcnphy_get_cc(pi, cal_type);
- phy_c15 = (s16) phy_c3.re;
- phy_c16 = (s16) phy_c3.im;
- if (cal_type == 2) {
- if (phy_c3.re > 127)
- phy_c15 = phy_c3.re - 256;
- if (phy_c3.im > 127)
- phy_c16 = phy_c3.im - 256;
- }
- wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
- udelay(20);
- for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
- phy_c23 = 1;
- phy_c22 = 0;
- switch (cal_type) {
- case 0:
- phy_c10 = 511;
- break;
- case 2:
- phy_c10 = 127;
- break;
- case 3:
- phy_c10 = 15;
- break;
- case 4:
- phy_c10 = 15;
- break;
- }
-
- phy_c9 = read_phy_reg(pi, 0x93d);
- phy_c9 = 2 * phy_c9;
- phy_c24 = 0;
- phy_c5 = 7;
- phy_c25 = 1;
- while (1) {
- write_radio_reg(pi, RADIO_2064_REG026,
- (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
- udelay(50);
- phy_c22 = 0;
- ptr[130] = 0;
- wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
- if (ptr[130] == 1)
- phy_c22 = 1;
- if (phy_c22)
- phy_c5 -= 1;
- if ((phy_c22 != phy_c24) && (!phy_c25))
- break;
- if (!phy_c22)
- phy_c5 += 1;
- if (phy_c5 <= 0 || phy_c5 >= 7)
- break;
- phy_c24 = phy_c22;
- phy_c25 = 0;
- }
-
- if (phy_c5 < 0)
- phy_c5 = 0;
- else if (phy_c5 > 7)
- phy_c5 = 7;
-
- for (k = -phy_c7; k <= phy_c7; k += phy_c7) {
- for (l = -phy_c7; l <= phy_c7; l += phy_c7) {
- phy_c11 = phy_c15 + k;
- phy_c12 = phy_c16 + l;
-
- if (phy_c11 < -phy_c10)
- phy_c11 = -phy_c10;
- else if (phy_c11 > phy_c10)
- phy_c11 = phy_c10;
- if (phy_c12 < -phy_c10)
- phy_c12 = -phy_c10;
- else if (phy_c12 > phy_c10)
- phy_c12 = phy_c10;
- wlc_lcnphy_set_cc(pi, cal_type, phy_c11,
- phy_c12);
- udelay(20);
- wlc_lcnphy_samp_cap(pi, 0, 0, ptr, 2);
-
- phy_c18 = 0;
- phy_c19 = 0;
- for (j = 0; j < 128; j++) {
- if (cal_type != 0) {
- phy_c6 = j % phy_c4;
- } else {
- phy_c6 = (2 * j) % phy_c4;
- }
- phy_c2.re = phy_c1[phy_c6].re;
- phy_c2.im = phy_c1[phy_c6].im;
- phy_c17 = ptr[j];
- phy_c18 = phy_c18 + phy_c17 * phy_c2.re;
- phy_c19 = phy_c19 + phy_c17 * phy_c2.im;
- }
-
- phy_c18 = phy_c18 >> 10;
- phy_c19 = phy_c19 >> 10;
- phy_c20 =
- ((phy_c18 * phy_c18) + (phy_c19 * phy_c19));
-
- if (phy_c23 || phy_c20 < phy_c21) {
- phy_c21 = phy_c20;
- phy_c13 = phy_c11;
- phy_c14 = phy_c12;
- }
- phy_c23 = 0;
- }
- }
- phy_c23 = 1;
- phy_c15 = phy_c13;
- phy_c16 = phy_c14;
- phy_c7 = phy_c7 >> 1;
- wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
- udelay(20);
- }
- goto cleanup;
- cleanup:
- wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, phy_c32);
- wlc_lcnphy_stop_tx_tone(pi);
- write_phy_reg(pi, 0x6da, phy_c26);
- write_phy_reg(pi, 0x6db, phy_c27);
- write_phy_reg(pi, 0x938, phy_c28);
- write_phy_reg(pi, 0x4d7, phy_c29);
- write_phy_reg(pi, 0x4d8, phy_c30);
- write_radio_reg(pi, RADIO_2064_REG026, phy_c31);
-
- kfree(phy_c32);
- kfree(ptr);
-}
-
-static void
-wlc_lcnphy_tx_iqlo_loopback_cleanup(struct brcms_phy *pi, u16 *values_to_save)
-{
- int i;
-
- and_phy_reg(pi, 0x44c, 0x0 >> 11);
-
- and_phy_reg(pi, 0x43b, 0xC);
-
- for (i = 0; i < 20; i++) {
- write_radio_reg(pi, iqlo_loopback_rf_regs[i],
- values_to_save[i]);
+ pi_lcn->lcnphy_current_index =
+ (s8)((read_phy_reg(pi, 0x4a9) & 0xFF) / 2);
}
}
static void
wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
- const struct lcnphy_tx_gain_tbl_entry *gain_table) {
+ const struct lcnphy_tx_gain_tbl_entry *gain_table)
+{
u32 j;
struct phytbl_info tab;
u32 val;
@@ -4297,6 +4386,172 @@ static void wlc_lcnphy_load_rfpower(struct brcms_phy *pi)
}
}
+static void wlc_lcnphy_bu_tweaks(struct brcms_phy *pi)
+{
+ or_phy_reg(pi, 0x805, 0x1);
+
+ mod_phy_reg(pi, 0x42f, (0x7 << 0), (0x3) << 0);
+
+ mod_phy_reg(pi, 0x030, (0x7 << 0), (0x3) << 0);
+
+ write_phy_reg(pi, 0x414, 0x1e10);
+ write_phy_reg(pi, 0x415, 0x0640);
+
+ mod_phy_reg(pi, 0x4df, (0xff << 8), -9 << 8);
+
+ or_phy_reg(pi, 0x44a, 0x44);
+ write_phy_reg(pi, 0x44a, 0x80);
+ mod_phy_reg(pi, 0x434, (0xff << 0), (0xFD) << 0);
+
+ mod_phy_reg(pi, 0x420, (0xff << 0), (16) << 0);
+
+ if (!(pi->sh->boardrev < 0x1204))
+ mod_radio_reg(pi, RADIO_2064_REG09B, 0xF0, 0xF0);
+
+ write_phy_reg(pi, 0x7d6, 0x0902);
+ mod_phy_reg(pi, 0x429, (0xf << 0), (0x9) << 0);
+
+ mod_phy_reg(pi, 0x429, (0x3f << 4), (0xe) << 4);
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+ mod_phy_reg(pi, 0x423, (0xff << 0), (0x46) << 0);
+
+ mod_phy_reg(pi, 0x411, (0xff << 0), (1) << 0);
+
+ mod_phy_reg(pi, 0x434, (0xff << 0), (0xFF) << 0);
+
+ mod_phy_reg(pi, 0x656, (0xf << 0), (2) << 0);
+
+ mod_phy_reg(pi, 0x44d, (0x1 << 2), (1) << 2);
+
+ mod_radio_reg(pi, RADIO_2064_REG0F7, 0x4, 0x4);
+ mod_radio_reg(pi, RADIO_2064_REG0F1, 0x3, 0);
+ mod_radio_reg(pi, RADIO_2064_REG0F2, 0xF8, 0x90);
+ mod_radio_reg(pi, RADIO_2064_REG0F3, 0x3, 0x2);
+ mod_radio_reg(pi, RADIO_2064_REG0F3, 0xf0, 0xa0);
+
+ mod_radio_reg(pi, RADIO_2064_REG11F, 0x2, 0x2);
+
+ wlc_lcnphy_clear_tx_power_offsets(pi);
+ mod_phy_reg(pi, 0x4d0, (0x1ff << 6), (10) << 6);
+
+ }
+}
+
+static void wlc_lcnphy_rcal(struct brcms_phy *pi)
+{
+ u8 rcal_value;
+
+ and_radio_reg(pi, RADIO_2064_REG05B, 0xfD);
+
+ or_radio_reg(pi, RADIO_2064_REG004, 0x40);
+ or_radio_reg(pi, RADIO_2064_REG120, 0x10);
+
+ or_radio_reg(pi, RADIO_2064_REG078, 0x80);
+ or_radio_reg(pi, RADIO_2064_REG129, 0x02);
+
+ or_radio_reg(pi, RADIO_2064_REG057, 0x01);
+
+ or_radio_reg(pi, RADIO_2064_REG05B, 0x02);
+ mdelay(5);
+ SPINWAIT(!wlc_radio_2064_rcal_done(pi), 10 * 1000 * 1000);
+
+ if (wlc_radio_2064_rcal_done(pi)) {
+ rcal_value = (u8) read_radio_reg(pi, RADIO_2064_REG05C);
+ rcal_value = rcal_value & 0x1f;
+ }
+
+ and_radio_reg(pi, RADIO_2064_REG05B, 0xfD);
+
+ and_radio_reg(pi, RADIO_2064_REG057, 0xFE);
+}
+
+static void wlc_lcnphy_rc_cal(struct brcms_phy *pi)
+{
+ u8 dflt_rc_cal_val;
+ u16 flt_val;
+
+ dflt_rc_cal_val = 7;
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1))
+ dflt_rc_cal_val = 11;
+ flt_val =
+ (dflt_rc_cal_val << 10) | (dflt_rc_cal_val << 5) |
+ (dflt_rc_cal_val);
+ write_phy_reg(pi, 0x933, flt_val);
+ write_phy_reg(pi, 0x934, flt_val);
+ write_phy_reg(pi, 0x935, flt_val);
+ write_phy_reg(pi, 0x936, flt_val);
+ write_phy_reg(pi, 0x937, (flt_val & 0x1FF));
+
+ return;
+}
+
+static void wlc_radio_2064_init(struct brcms_phy *pi)
+{
+ u32 i;
+ const struct lcnphy_radio_regs *lcnphyregs = NULL;
+
+ lcnphyregs = lcnphy_radio_regs_2064;
+
+ for (i = 0; lcnphyregs[i].address != 0xffff; i++)
+ if (CHSPEC_IS5G(pi->radio_chanspec) && lcnphyregs[i].do_init_a)
+ write_radio_reg(pi,
+ ((lcnphyregs[i].address & 0x3fff) |
+ RADIO_DEFAULT_CORE),
+ (u16) lcnphyregs[i].init_a);
+ else if (lcnphyregs[i].do_init_g)
+ write_radio_reg(pi,
+ ((lcnphyregs[i].address & 0x3fff) |
+ RADIO_DEFAULT_CORE),
+ (u16) lcnphyregs[i].init_g);
+
+ write_radio_reg(pi, RADIO_2064_REG032, 0x62);
+ write_radio_reg(pi, RADIO_2064_REG033, 0x19);
+
+ write_radio_reg(pi, RADIO_2064_REG090, 0x10);
+
+ write_radio_reg(pi, RADIO_2064_REG010, 0x00);
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+
+ write_radio_reg(pi, RADIO_2064_REG060, 0x7f);
+ write_radio_reg(pi, RADIO_2064_REG061, 0x72);
+ write_radio_reg(pi, RADIO_2064_REG062, 0x7f);
+ }
+
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x02);
+ write_radio_reg(pi, RADIO_2064_REG01E, 0x06);
+
+ mod_phy_reg(pi, 0x4ea, (0x7 << 0), 0 << 0);
+
+ mod_phy_reg(pi, 0x4ea, (0x7 << 3), 1 << 3);
+
+ mod_phy_reg(pi, 0x4ea, (0x7 << 6), 2 << 6);
+
+ mod_phy_reg(pi, 0x4ea, (0x7 << 9), 3 << 9);
+
+ mod_phy_reg(pi, 0x4ea, (0x7 << 12), 4 << 12);
+
+ write_phy_reg(pi, 0x4ea, 0x4688);
+
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+
+ mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
+
+ mod_phy_reg(pi, 0x46a, (0xffff << 0), 25 << 0);
+
+ wlc_lcnphy_set_tx_locc(pi, 0);
+
+ wlc_lcnphy_rcal(pi);
+
+ wlc_lcnphy_rc_cal(pi);
+}
+
+static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
+{
+ wlc_radio_2064_init(pi);
+}
+
static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
{
uint idx;
@@ -4306,9 +4561,8 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
- for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++) {
+ for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++)
wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]);
- }
if (pi->sh->boardflags & BFL_FEM_BT) {
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
@@ -4339,39 +4593,35 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
- wlc_lcnphy_load_tx_gain_table(pi,
- dot11lcnphy_2GHz_extPA_gaintable_rev0);
+ wlc_lcnphy_load_tx_gain_table(
+ pi,
+ dot11lcnphy_2GHz_extPA_gaintable_rev0);
else
- wlc_lcnphy_load_tx_gain_table(pi,
- dot11lcnphy_2GHz_gaintable_rev0);
+ wlc_lcnphy_load_tx_gain_table(
+ pi,
+ dot11lcnphy_2GHz_gaintable_rev0);
}
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
+ const struct phytbl_info *tb;
+ int l;
+
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- for (idx = 0;
- idx < dot11lcnphytbl_rx_gain_info_2G_rev2_sz;
- idx++)
- if (pi->sh->boardflags & BFL_EXTLNA)
- wlc_lcnphy_write_table(pi,
- &dot11lcnphytbl_rx_gain_info_extlna_2G_rev2
- [idx]);
- else
- wlc_lcnphy_write_table(pi,
- &dot11lcnphytbl_rx_gain_info_2G_rev2
- [idx]);
+ l = dot11lcnphytbl_rx_gain_info_2G_rev2_sz;
+ if (pi->sh->boardflags & BFL_EXTLNA)
+ tb = dot11lcnphytbl_rx_gain_info_extlna_2G_rev2;
+ else
+ tb = dot11lcnphytbl_rx_gain_info_2G_rev2;
} else {
- for (idx = 0;
- idx < dot11lcnphytbl_rx_gain_info_5G_rev2_sz;
- idx++)
- if (pi->sh->boardflags & BFL_EXTLNA_5GHz)
- wlc_lcnphy_write_table(pi,
- &dot11lcnphytbl_rx_gain_info_extlna_5G_rev2
- [idx]);
- else
- wlc_lcnphy_write_table(pi,
- &dot11lcnphytbl_rx_gain_info_5G_rev2
- [idx]);
+ l = dot11lcnphytbl_rx_gain_info_5G_rev2_sz;
+ if (pi->sh->boardflags & BFL_EXTLNA_5GHz)
+ tb = dot11lcnphytbl_rx_gain_info_extlna_5G_rev2;
+ else
+ tb = dot11lcnphytbl_rx_gain_info_5G_rev2;
}
+
+ for (idx = 0; idx < l; idx++)
+ wlc_lcnphy_write_table(pi, &tb[idx]);
}
if ((pi->sh->boardflags & BFL_FEM)
@@ -4379,11 +4629,13 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa);
else if (pi->sh->boardflags & BFL_FEM_BT) {
if (pi->sh->boardrev < 0x1250)
- wlc_lcnphy_write_table(pi,
- &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa);
+ wlc_lcnphy_write_table(
+ pi,
+ &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa);
else
- wlc_lcnphy_write_table(pi,
- &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250);
+ wlc_lcnphy_write_table(
+ pi,
+ &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250);
} else
wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313);
@@ -4420,9 +4672,8 @@ static void wlc_lcnphy_rev0_baseband_init(struct brcms_phy *pi)
if (0) {
afectrl1 = 0;
afectrl1 = (u16) ((pi_lcn->lcnphy_rssi_vf) |
- (pi_lcn->lcnphy_rssi_vc << 4) | (pi_lcn->
- lcnphy_rssi_gs
- << 10));
+ (pi_lcn->lcnphy_rssi_vc << 4) |
+ (pi_lcn->lcnphy_rssi_gs << 10));
write_phy_reg(pi, 0x43e, afectrl1);
}
@@ -4443,7 +4694,6 @@ static void wlc_lcnphy_rev2_baseband_init(struct brcms_phy *pi)
{
if (CHSPEC_IS5G(pi->radio_chanspec)) {
mod_phy_reg(pi, 0x416, (0xff << 0), 80 << 0);
-
mod_phy_reg(pi, 0x416, (0xff << 8), 80 << 8);
}
}
@@ -4455,9 +4705,6 @@ static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi)
u32 tableBuffer[2];
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
temp = (s16) read_phy_reg(pi, 0x4df);
pi_lcn->lcnphy_ofdmgainidxtableoffset = (temp & (0xff << 0)) >> 0;
@@ -4484,18 +4731,15 @@ static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi)
tableBuffer[1] -= 128;
pi_lcn->lcnphy_tr_T_gain_val = tableBuffer[1];
- temp = (s16) (read_phy_reg(pi, 0x434)
- & (0xff << 0));
+ temp = (s16) (read_phy_reg(pi, 0x434) & (0xff << 0));
if (temp > 127)
temp -= 256;
pi_lcn->lcnphy_input_pwr_offset_db = (s8) temp;
- pi_lcn->lcnphy_Med_Low_Gain_db = (read_phy_reg(pi, 0x424)
- & (0xff << 8))
- >> 8;
- pi_lcn->lcnphy_Very_Low_Gain_db = (read_phy_reg(pi, 0x425)
- & (0xff << 0))
- >> 0;
+ pi_lcn->lcnphy_Med_Low_Gain_db =
+ (read_phy_reg(pi, 0x424) & (0xff << 8)) >> 8;
+ pi_lcn->lcnphy_Very_Low_Gain_db =
+ (read_phy_reg(pi, 0x425) & (0xff << 0)) >> 0;
tab.tbl_ptr = tableBuffer;
tab.tbl_len = 2;
@@ -4509,61 +4753,6 @@ static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi)
}
-static void wlc_lcnphy_bu_tweaks(struct brcms_phy *pi)
-{
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
- or_phy_reg(pi, 0x805, 0x1);
-
- mod_phy_reg(pi, 0x42f, (0x7 << 0), (0x3) << 0);
-
- mod_phy_reg(pi, 0x030, (0x7 << 0), (0x3) << 0);
-
- write_phy_reg(pi, 0x414, 0x1e10);
- write_phy_reg(pi, 0x415, 0x0640);
-
- mod_phy_reg(pi, 0x4df, (0xff << 8), -9 << 8);
-
- or_phy_reg(pi, 0x44a, 0x44);
- write_phy_reg(pi, 0x44a, 0x80);
- mod_phy_reg(pi, 0x434, (0xff << 0), (0xFD) << 0);
-
- mod_phy_reg(pi, 0x420, (0xff << 0), (16) << 0);
-
- if (!(pi->sh->boardrev < 0x1204))
- mod_radio_reg(pi, RADIO_2064_REG09B, 0xF0, 0xF0);
-
- write_phy_reg(pi, 0x7d6, 0x0902);
- mod_phy_reg(pi, 0x429, (0xf << 0), (0x9) << 0);
-
- mod_phy_reg(pi, 0x429, (0x3f << 4), (0xe) << 4);
-
- if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
- mod_phy_reg(pi, 0x423, (0xff << 0), (0x46) << 0);
-
- mod_phy_reg(pi, 0x411, (0xff << 0), (1) << 0);
-
- mod_phy_reg(pi, 0x434, (0xff << 0), (0xFF) << 0);
-
- mod_phy_reg(pi, 0x656, (0xf << 0), (2) << 0);
-
- mod_phy_reg(pi, 0x44d, (0x1 << 2), (1) << 2);
-
- mod_radio_reg(pi, RADIO_2064_REG0F7, 0x4, 0x4);
- mod_radio_reg(pi, RADIO_2064_REG0F1, 0x3, 0);
- mod_radio_reg(pi, RADIO_2064_REG0F2, 0xF8, 0x90);
- mod_radio_reg(pi, RADIO_2064_REG0F3, 0x3, 0x2);
- mod_radio_reg(pi, RADIO_2064_REG0F3, 0xf0, 0xa0);
-
- mod_radio_reg(pi, RADIO_2064_REG11F, 0x2, 0x2);
-
- wlc_lcnphy_clear_tx_power_offsets(pi);
- mod_phy_reg(pi, 0x4d0, (0x1ff << 6), (10) << 6);
-
- }
-}
-
static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
{
@@ -4574,127 +4763,53 @@ static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
wlc_lcnphy_bu_tweaks(pi);
}
-static void wlc_radio_2064_init(struct brcms_phy *pi)
+void wlc_phy_init_lcnphy(struct brcms_phy *pi)
{
- u32 i;
- struct lcnphy_radio_regs *lcnphyregs = NULL;
-
- lcnphyregs = lcnphy_radio_regs_2064;
-
- for (i = 0; lcnphyregs[i].address != 0xffff; i++)
- if (CHSPEC_IS5G(pi->radio_chanspec) && lcnphyregs[i].do_init_a)
- write_radio_reg(pi,
- ((lcnphyregs[i].address & 0x3fff) |
- RADIO_DEFAULT_CORE),
- (u16) lcnphyregs[i].init_a);
- else if (lcnphyregs[i].do_init_g)
- write_radio_reg(pi,
- ((lcnphyregs[i].address & 0x3fff) |
- RADIO_DEFAULT_CORE),
- (u16) lcnphyregs[i].init_g);
-
- write_radio_reg(pi, RADIO_2064_REG032, 0x62);
- write_radio_reg(pi, RADIO_2064_REG033, 0x19);
-
- write_radio_reg(pi, RADIO_2064_REG090, 0x10);
-
- write_radio_reg(pi, RADIO_2064_REG010, 0x00);
-
- if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
-
- write_radio_reg(pi, RADIO_2064_REG060, 0x7f);
- write_radio_reg(pi, RADIO_2064_REG061, 0x72);
- write_radio_reg(pi, RADIO_2064_REG062, 0x7f);
- }
-
- write_radio_reg(pi, RADIO_2064_REG01D, 0x02);
- write_radio_reg(pi, RADIO_2064_REG01E, 0x06);
-
- mod_phy_reg(pi, 0x4ea, (0x7 << 0), 0 << 0);
-
- mod_phy_reg(pi, 0x4ea, (0x7 << 3), 1 << 3);
-
- mod_phy_reg(pi, 0x4ea, (0x7 << 6), 2 << 6);
-
- mod_phy_reg(pi, 0x4ea, (0x7 << 9), 3 << 9);
-
- mod_phy_reg(pi, 0x4ea, (0x7 << 12), 4 << 12);
-
- write_phy_reg(pi, 0x4ea, 0x4688);
-
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
-
- mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
-
- mod_phy_reg(pi, 0x46a, (0xffff << 0), 25 << 0);
-
- wlc_lcnphy_set_tx_locc(pi, 0);
-
- wlc_lcnphy_rcal(pi);
+ u8 phybw40;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+ phybw40 = CHSPEC_IS40(pi->radio_chanspec);
- wlc_lcnphy_rc_cal(pi);
-}
+ pi_lcn->lcnphy_cal_counter = 0;
+ pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense;
-static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
-{
- if (NORADIO_ENAB(pi->pubpi))
- return;
+ or_phy_reg(pi, 0x44a, 0x80);
+ and_phy_reg(pi, 0x44a, 0x7f);
- wlc_radio_2064_init(pi);
-}
+ wlc_lcnphy_afe_clk_init(pi, AFE_CLK_INIT_MODE_TXRX2X);
-static void wlc_lcnphy_rcal(struct brcms_phy *pi)
-{
- u8 rcal_value;
+ write_phy_reg(pi, 0x60a, 160);
- if (NORADIO_ENAB(pi->pubpi))
- return;
+ write_phy_reg(pi, 0x46a, 25);
- and_radio_reg(pi, RADIO_2064_REG05B, 0xfD);
+ wlc_lcnphy_baseband_init(pi);
- or_radio_reg(pi, RADIO_2064_REG004, 0x40);
- or_radio_reg(pi, RADIO_2064_REG120, 0x10);
+ wlc_lcnphy_radio_init(pi);
- or_radio_reg(pi, RADIO_2064_REG078, 0x80);
- or_radio_reg(pi, RADIO_2064_REG129, 0x02);
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ wlc_lcnphy_tx_pwr_ctrl_init((struct brcms_phy_pub *) pi);
- or_radio_reg(pi, RADIO_2064_REG057, 0x01);
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
- or_radio_reg(pi, RADIO_2064_REG05B, 0x02);
- mdelay(5);
- SPINWAIT(!wlc_radio_2064_rcal_done(pi), 10 * 1000 * 1000);
+ si_pmu_regcontrol(pi->sh->sih, 0, 0xf, 0x9);
- if (wlc_radio_2064_rcal_done(pi)) {
- rcal_value = (u8) read_radio_reg(pi, RADIO_2064_REG05C);
- rcal_value = rcal_value & 0x1f;
- }
+ si_pmu_chipcontrol(pi->sh->sih, 0, 0xffffffff, 0x03CDDDDD);
- and_radio_reg(pi, RADIO_2064_REG05B, 0xfD);
+ if ((pi->sh->boardflags & BFL_FEM)
+ && wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
+ wlc_lcnphy_set_tx_pwr_by_index(pi, FIXED_TXPWR);
- and_radio_reg(pi, RADIO_2064_REG057, 0xFE);
-}
+ wlc_lcnphy_agc_temp_init(pi);
-static void wlc_lcnphy_rc_cal(struct brcms_phy *pi)
-{
- u8 dflt_rc_cal_val;
- u16 flt_val;
+ wlc_lcnphy_temp_adj(pi);
- if (NORADIO_ENAB(pi->pubpi))
- return;
+ mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14);
- dflt_rc_cal_val = 7;
- if (LCNREV_IS(pi->pubpi.phy_rev, 1))
- dflt_rc_cal_val = 11;
- flt_val =
- (dflt_rc_cal_val << 10) | (dflt_rc_cal_val << 5) |
- (dflt_rc_cal_val);
- write_phy_reg(pi, 0x933, flt_val);
- write_phy_reg(pi, 0x934, flt_val);
- write_phy_reg(pi, 0x935, flt_val);
- write_phy_reg(pi, 0x936, flt_val);
- write_phy_reg(pi, 0x937, (flt_val & 0x1FF));
+ udelay(100);
+ mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14);
- return;
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_HW);
+ pi_lcn->lcnphy_noise_samples = LCNPHY_NOISE_SAMPLES_DEFAULT;
+ wlc_lcnphy_calib_modes(pi, PHY_PERICAL_PHYINIT);
}
static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
@@ -4702,39 +4817,38 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
s8 txpwr = 0;
int i;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+ struct phy_shim_info *shim = pi->sh->physhim;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
u16 cckpo = 0;
u32 offset_ofdm, offset_mcs;
pi_lcn->lcnphy_tr_isolation_mid =
- (u8) PHY_GETINTVAR(pi, "triso2g");
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
pi_lcn->lcnphy_rx_power_offset =
- (u8) PHY_GETINTVAR(pi, "rxpo2g");
-
- pi->txpa_2g[0] = (s16) PHY_GETINTVAR(pi, "pa0b0");
- pi->txpa_2g[1] = (s16) PHY_GETINTVAR(pi, "pa0b1");
- pi->txpa_2g[2] = (s16) PHY_GETINTVAR(pi, "pa0b2");
-
- pi_lcn->lcnphy_rssi_vf = (u8) PHY_GETINTVAR(pi, "rssismf2g");
- pi_lcn->lcnphy_rssi_vc = (u8) PHY_GETINTVAR(pi, "rssismc2g");
- pi_lcn->lcnphy_rssi_gs = (u8) PHY_GETINTVAR(pi, "rssisav2g");
-
- {
- pi_lcn->lcnphy_rssi_vf_lowtemp = pi_lcn->lcnphy_rssi_vf;
- pi_lcn->lcnphy_rssi_vc_lowtemp = pi_lcn->lcnphy_rssi_vc;
- pi_lcn->lcnphy_rssi_gs_lowtemp = pi_lcn->lcnphy_rssi_gs;
-
- pi_lcn->lcnphy_rssi_vf_hightemp =
- pi_lcn->lcnphy_rssi_vf;
- pi_lcn->lcnphy_rssi_vc_hightemp =
- pi_lcn->lcnphy_rssi_vc;
- pi_lcn->lcnphy_rssi_gs_hightemp =
- pi_lcn->lcnphy_rssi_gs;
- }
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_RXPO2G);
+
+ pi->txpa_2g[0] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B0);
+ pi->txpa_2g[1] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B1);
+ pi->txpa_2g[2] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B2);
- txpwr = (s8) PHY_GETINTVAR(pi, "maxp2ga0");
+ pi_lcn->lcnphy_rssi_vf =
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMF2G);
+ pi_lcn->lcnphy_rssi_vc =
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMC2G);
+ pi_lcn->lcnphy_rssi_gs =
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISAV2G);
+
+ pi_lcn->lcnphy_rssi_vf_lowtemp = pi_lcn->lcnphy_rssi_vf;
+ pi_lcn->lcnphy_rssi_vc_lowtemp = pi_lcn->lcnphy_rssi_vc;
+ pi_lcn->lcnphy_rssi_gs_lowtemp = pi_lcn->lcnphy_rssi_gs;
+
+ pi_lcn->lcnphy_rssi_vf_hightemp = pi_lcn->lcnphy_rssi_vf;
+ pi_lcn->lcnphy_rssi_vc_hightemp = pi_lcn->lcnphy_rssi_vc;
+ pi_lcn->lcnphy_rssi_gs_hightemp = pi_lcn->lcnphy_rssi_gs;
+
+ txpwr = (s8)wlapi_getintvar(shim, BRCMS_SROM_MAXP2GA0);
pi->tx_srom_max_2g = txpwr;
for (i = 0; i < PWRTBL_NUM_COEFF; i++) {
@@ -4742,79 +4856,72 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
pi->txpa_2g_high_temp[i] = pi->txpa_2g[i];
}
- cckpo = (u16) PHY_GETINTVAR(pi, "cck2gpo");
+ cckpo = (u16)wlapi_getintvar(shim, BRCMS_SROM_CCK2GPO);
+ offset_ofdm = (u32)wlapi_getintvar(shim, BRCMS_SROM_OFDM2GPO);
if (cckpo) {
uint max_pwr_chan = txpwr;
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++) {
- pi->tx_srom_max_rate_2g[i] = max_pwr_chan -
- ((cckpo & 0xf) * 2);
+ pi->tx_srom_max_rate_2g[i] =
+ max_pwr_chan - ((cckpo & 0xf) * 2);
cckpo >>= 4;
}
- offset_ofdm = (u32) PHY_GETINTVAR(pi, "ofdm2gpo");
for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++) {
- pi->tx_srom_max_rate_2g[i] = max_pwr_chan -
- ((offset_ofdm & 0xf) * 2);
+ pi->tx_srom_max_rate_2g[i] =
+ max_pwr_chan -
+ ((offset_ofdm & 0xf) * 2);
offset_ofdm >>= 4;
}
} else {
u8 opo = 0;
- opo = (u8) PHY_GETINTVAR(pi, "opo");
+ opo = (u8)wlapi_getintvar(shim, BRCMS_SROM_OPO);
- for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++) {
+ for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++)
pi->tx_srom_max_rate_2g[i] = txpwr;
- }
-
- offset_ofdm = (u32) PHY_GETINTVAR(pi, "ofdm2gpo");
for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++) {
pi->tx_srom_max_rate_2g[i] = txpwr -
- ((offset_ofdm & 0xf) * 2);
+ ((offset_ofdm & 0xf) * 2);
offset_ofdm >>= 4;
}
offset_mcs =
- ((u16) PHY_GETINTVAR(pi, "mcs2gpo1") << 16) |
- (u16) PHY_GETINTVAR(pi, "mcs2gpo0");
+ wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO1) << 16;
+ offset_mcs |=
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO0);
pi_lcn->lcnphy_mcs20_po = offset_mcs;
for (i = TXP_FIRST_SISO_MCS_20;
i <= TXP_LAST_SISO_MCS_20; i++) {
pi->tx_srom_max_rate_2g[i] =
- txpwr - ((offset_mcs & 0xf) * 2);
+ txpwr - ((offset_mcs & 0xf) * 2);
offset_mcs >>= 4;
}
}
pi_lcn->lcnphy_rawtempsense =
- (u16) PHY_GETINTVAR(pi, "rawtempsense");
+ (u16)wlapi_getintvar(shim, BRCMS_SROM_RAWTEMPSENSE);
pi_lcn->lcnphy_measPower =
- (u8) PHY_GETINTVAR(pi, "measpower");
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_MEASPOWER);
pi_lcn->lcnphy_tempsense_slope =
- (u8) PHY_GETINTVAR(pi, "tempsense_slope");
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_SLOPE);
pi_lcn->lcnphy_hw_iqcal_en =
- (bool) PHY_GETINTVAR(pi, "hw_iqcal_en");
+ (bool)wlapi_getintvar(shim, BRCMS_SROM_HW_IQCAL_EN);
pi_lcn->lcnphy_iqcal_swp_dis =
- (bool) PHY_GETINTVAR(pi, "iqcal_swp_dis");
+ (bool)wlapi_getintvar(shim, BRCMS_SROM_IQCAL_SWP_DIS);
pi_lcn->lcnphy_tempcorrx =
- (u8) PHY_GETINTVAR(pi, "tempcorrx");
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPCORRX);
pi_lcn->lcnphy_tempsense_option =
- (u8) PHY_GETINTVAR(pi, "tempsense_option");
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_OPTION);
pi_lcn->lcnphy_freqoffset_corr =
- (u8) PHY_GETINTVAR(pi, "freqoffset_corr");
- if ((u8) getintvar(pi->vars, "aa2g") > 1)
+ (u8)wlapi_getintvar(shim, BRCMS_SROM_FREQOFFSET_CORR);
+ if ((u8)wlapi_getintvar(shim, BRCMS_SROM_AA2G) > 1)
wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi,
- (u8) getintvar(pi->vars,
- "aa2g"));
+ (u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G));
}
pi_lcn->lcnphy_cck_dig_filt_type = -1;
- if (PHY_GETVAR(pi, "cckdigfilttype")) {
- s16 temp;
- temp = (s16) PHY_GETINTVAR(pi, "cckdigfilttype");
- if (temp >= 0) {
- pi_lcn->lcnphy_cck_dig_filt_type = temp;
- }
- }
return true;
}
@@ -4834,172 +4941,6 @@ void wlc_2064_vco_cal(struct brcms_phy *pi)
mod_radio_reg(pi, RADIO_2064_REG057, 1 << 3, 0);
}
-static void
-wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
-{
- uint i;
- const struct chan_info_2064_lcnphy *ci;
- u8 rfpll_doubler = 0;
- u8 pll_pwrup, pll_pwrup_ovr;
- fixed qFxtal, qFref, qFvco, qFcal;
- u8 d15, d16, f16, e44, e45;
- u32 div_int, div_frac, fvco3, fpfd, fref3, fcal_div;
- u16 loop_bw, d30, setCount;
- if (NORADIO_ENAB(pi->pubpi))
- return;
- ci = &chan_info_2064_lcnphy[0];
- rfpll_doubler = 1;
-
- mod_radio_reg(pi, RADIO_2064_REG09D, 0x4, 0x1 << 2);
-
- write_radio_reg(pi, RADIO_2064_REG09E, 0xf);
- if (!rfpll_doubler) {
- loop_bw = PLL_2064_LOOP_BW;
- d30 = PLL_2064_D30;
- } else {
- loop_bw = PLL_2064_LOOP_BW_DOUBLER;
- d30 = PLL_2064_D30_DOUBLER;
- }
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- for (i = 0; i < ARRAY_SIZE(chan_info_2064_lcnphy); i++)
- if (chan_info_2064_lcnphy[i].chan == channel)
- break;
-
- if (i >= ARRAY_SIZE(chan_info_2064_lcnphy)) {
- return;
- }
-
- ci = &chan_info_2064_lcnphy[i];
- }
-
- write_radio_reg(pi, RADIO_2064_REG02A, ci->logen_buftune);
-
- mod_radio_reg(pi, RADIO_2064_REG030, 0x3, ci->logen_rccr_tx);
-
- mod_radio_reg(pi, RADIO_2064_REG091, 0x3, ci->txrf_mix_tune_ctrl);
-
- mod_radio_reg(pi, RADIO_2064_REG038, 0xf, ci->pa_input_tune_g);
-
- mod_radio_reg(pi, RADIO_2064_REG030, 0x3 << 2,
- (ci->logen_rccr_rx) << 2);
-
- mod_radio_reg(pi, RADIO_2064_REG05E, 0xf, ci->pa_rxrf_lna1_freq_tune);
-
- mod_radio_reg(pi, RADIO_2064_REG05E, (0xf) << 4,
- (ci->pa_rxrf_lna2_freq_tune) << 4);
-
- write_radio_reg(pi, RADIO_2064_REG06C, ci->rxrf_rxrf_spare1);
-
- pll_pwrup = (u8) read_radio_reg(pi, RADIO_2064_REG044);
- pll_pwrup_ovr = (u8) read_radio_reg(pi, RADIO_2064_REG12B);
-
- or_radio_reg(pi, RADIO_2064_REG044, 0x07);
-
- or_radio_reg(pi, RADIO_2064_REG12B, (0x07) << 1);
- e44 = 0;
- e45 = 0;
-
- fpfd = rfpll_doubler ? (pi->xtalfreq << 1) : (pi->xtalfreq);
- if (pi->xtalfreq > 26000000)
- e44 = 1;
- if (pi->xtalfreq > 52000000)
- e45 = 1;
- if (e44 == 0)
- fcal_div = 1;
- else if (e45 == 0)
- fcal_div = 2;
- else
- fcal_div = 4;
- fvco3 = (ci->freq * 3);
- fref3 = 2 * fpfd;
-
- qFxtal = wlc_lcnphy_qdiv_roundup(pi->xtalfreq, PLL_2064_MHZ, 16);
- qFref = wlc_lcnphy_qdiv_roundup(fpfd, PLL_2064_MHZ, 16);
- qFcal = pi->xtalfreq * fcal_div / PLL_2064_MHZ;
- qFvco = wlc_lcnphy_qdiv_roundup(fvco3, 2, 16);
-
- write_radio_reg(pi, RADIO_2064_REG04F, 0x02);
-
- d15 = (pi->xtalfreq * fcal_div * 4 / 5) / PLL_2064_MHZ - 1;
- write_radio_reg(pi, RADIO_2064_REG052, (0x07 & (d15 >> 2)));
- write_radio_reg(pi, RADIO_2064_REG053, (d15 & 0x3) << 5);
-
- d16 = (qFcal * 8 / (d15 + 1)) - 1;
- write_radio_reg(pi, RADIO_2064_REG051, d16);
-
- f16 = ((d16 + 1) * (d15 + 1)) / qFcal;
- setCount = f16 * 3 * (ci->freq) / 32 - 1;
- mod_radio_reg(pi, RADIO_2064_REG053, (0x0f << 0),
- (u8) (setCount >> 8));
-
- or_radio_reg(pi, RADIO_2064_REG053, 0x10);
- write_radio_reg(pi, RADIO_2064_REG054, (u8) (setCount & 0xff));
-
- div_int = ((fvco3 * (PLL_2064_MHZ >> 4)) / fref3) << 4;
-
- div_frac = ((fvco3 * (PLL_2064_MHZ >> 4)) % fref3) << 4;
- while (div_frac >= fref3) {
- div_int++;
- div_frac -= fref3;
- }
- div_frac = wlc_lcnphy_qdiv_roundup(div_frac, fref3, 20);
-
- mod_radio_reg(pi, RADIO_2064_REG045, (0x1f << 0),
- (u8) (div_int >> 4));
- mod_radio_reg(pi, RADIO_2064_REG046, (0x1f << 4),
- (u8) (div_int << 4));
- mod_radio_reg(pi, RADIO_2064_REG046, (0x0f << 0),
- (u8) (div_frac >> 16));
- write_radio_reg(pi, RADIO_2064_REG047, (u8) (div_frac >> 8) & 0xff);
- write_radio_reg(pi, RADIO_2064_REG048, (u8) div_frac & 0xff);
-
- write_radio_reg(pi, RADIO_2064_REG040, 0xfb);
-
- write_radio_reg(pi, RADIO_2064_REG041, 0x9A);
- write_radio_reg(pi, RADIO_2064_REG042, 0xA3);
- write_radio_reg(pi, RADIO_2064_REG043, 0x0C);
-
- {
- u8 h29, h23, c28, d29, h28_ten, e30, h30_ten, cp_current;
- u16 c29, c38, c30, g30, d28;
- c29 = loop_bw;
- d29 = 200;
- c38 = 1250;
- h29 = d29 / c29;
- h23 = 1;
- c28 = 30;
- d28 = (((PLL_2064_HIGH_END_KVCO - PLL_2064_LOW_END_KVCO) *
- (fvco3 / 2 - PLL_2064_LOW_END_VCO)) /
- (PLL_2064_HIGH_END_VCO - PLL_2064_LOW_END_VCO))
- + PLL_2064_LOW_END_KVCO;
- h28_ten = (d28 * 10) / c28;
- c30 = 2640;
- e30 = (d30 - 680) / 490;
- g30 = 680 + (e30 * 490);
- h30_ten = (g30 * 10) / c30;
- cp_current = ((c38 * h29 * h23 * 100) / h28_ten) / h30_ten;
- mod_radio_reg(pi, RADIO_2064_REG03C, 0x3f, cp_current);
- }
- if (channel >= 1 && channel <= 5)
- write_radio_reg(pi, RADIO_2064_REG03C, 0x8);
- else
- write_radio_reg(pi, RADIO_2064_REG03C, 0x7);
- write_radio_reg(pi, RADIO_2064_REG03D, 0x3);
-
- mod_radio_reg(pi, RADIO_2064_REG044, 0x0c, 0x0c);
- udelay(1);
-
- wlc_2064_vco_cal(pi);
-
- write_radio_reg(pi, RADIO_2064_REG044, pll_pwrup);
- write_radio_reg(pi, RADIO_2064_REG12B, pll_pwrup_ovr);
- if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
- write_radio_reg(pi, RADIO_2064_REG038, 3);
- write_radio_reg(pi, RADIO_2064_REG091, 7);
- }
-}
-
bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi)
{
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
@@ -5015,14 +4956,11 @@ void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi)
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
wlc_lcnphy_calib_modes(pi, LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL);
} else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) {
-
pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
wlc_lcnphy_txpower_recalc_target(pi);
-
wlc_lcnphy_set_tx_pwr_ctrl(pi, pwr_ctrl);
- } else
- return;
+ }
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -5035,13 +4973,12 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
struct brcms_phy_lcnphy *pi_lcn;
pi->u.pi_lcnphy = kzalloc(sizeof(struct brcms_phy_lcnphy), GFP_ATOMIC);
- if (pi->u.pi_lcnphy == NULL) {
+ if (pi->u.pi_lcnphy == NULL)
return false;
- }
pi_lcn = pi->u.pi_lcnphy;
- if ((0 == (pi->sh->boardflags & BFL_NOPA)) && !NORADIO_ENAB(pi->pubpi)) {
+ if (0 == (pi->sh->boardflags & BFL_NOPA)) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
}
@@ -5062,7 +4999,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
- if ((pi->sh->boardflags & BFL_FEM) && (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
+ if ((pi->sh->boardflags & BFL_FEM) &&
+ (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
@@ -5090,8 +5028,8 @@ static void wlc_lcnphy_set_rx_gain(struct brcms_phy *pi, u32 gain)
biq1 = (u16) (gain >> 16) & 0xf;
gain0_15 = (u16) ((lna1 & 0x3) | ((lna1 & 0x3) << 2) |
- ((lna2 & 0x3) << 4) | ((lna2 & 0x3) << 6) |
- ((tia & 0xf) << 8) | ((biq0 & 0xf) << 12));
+ ((lna2 & 0x3) << 4) | ((lna2 & 0x3) << 6) |
+ ((tia & 0xf) << 8) | ((biq0 & 0xf) << 12));
gain16_19 = biq1;
mod_phy_reg(pi, 0x44d, (0x1 << 0), trsw << 0);
@@ -5126,18 +5064,19 @@ static u32 wlc_lcnphy_get_receive_power(struct brcms_phy *pi, s32 *gain_index)
lcnphy_23bitgaincode_table
[*gain_index]);
received_power =
- wlc_lcnphy_measure_digital_power(pi,
- pi_lcn->
- lcnphy_noise_samples);
+ wlc_lcnphy_measure_digital_power(
+ pi,
+ pi_lcn->
+ lcnphy_noise_samples);
(*gain_index)++;
}
(*gain_index)--;
} else {
wlc_lcnphy_set_rx_gain(pi, gain_code);
received_power =
- wlc_lcnphy_measure_digital_power(pi,
- pi_lcn->
- lcnphy_noise_samples);
+ wlc_lcnphy_measure_digital_power(pi,
+ pi_lcn->
+ lcnphy_noise_samples);
}
return received_power;
@@ -5150,6 +5089,8 @@ s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index)
s32 log_val, gain_mismatch, desired_gain, input_power_offset_db,
input_power_db;
s32 received_power, temperature;
+ u32 power;
+ u32 msb1, msb2, val1, val2, diff1, diff2;
uint freq;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
@@ -5159,20 +5100,17 @@ s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index)
nominal_power_db = read_phy_reg(pi, 0x425) >> 8;
- {
- u32 power = (received_power * 16);
- u32 msb1, msb2, val1, val2, diff1, diff2;
- msb1 = ffs(power) - 1;
- msb2 = msb1 + 1;
- val1 = 1 << msb1;
- val2 = 1 << msb2;
- diff1 = (power - val1);
- diff2 = (val2 - power);
- if (diff1 < diff2)
- log_val = msb1;
- else
- log_val = msb2;
- }
+ power = (received_power * 16);
+ msb1 = ffs(power) - 1;
+ msb2 = msb1 + 1;
+ val1 = 1 << msb1;
+ val2 = 1 << msb2;
+ diff1 = (power - val1);
+ diff2 = (val2 - power);
+ if (diff1 < diff2)
+ log_val = msb1;
+ else
+ log_val = msb2;
log_val = log_val * 3;
@@ -5188,7 +5126,7 @@ s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index)
input_power_db = input_power_offset_db - desired_gain;
input_power_db =
- input_power_db + lcnphy_gain_index_offset_for_rssi[gain_index];
+ input_power_db + lcnphy_gain_index_offset_for_rssi[gain_index];
freq = wlc_phy_channel2freq(CHSPEC_CHANNEL(pi->radio_chanspec));
if ((freq > 2427) && (freq <= 2467))
@@ -5196,99 +5134,21 @@ s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index)
temperature = pi_lcn->lcnphy_lastsensed_temperature;
- if ((temperature - 15) < -30) {
+ if ((temperature - 15) < -30)
input_power_db =
- input_power_db + (((temperature - 10 - 25) * 286) >> 12) -
- 7;
- } else if ((temperature - 15) < 4) {
+ input_power_db +
+ (((temperature - 10 - 25) * 286) >> 12) -
+ 7;
+ else if ((temperature - 15) < 4)
input_power_db =
- input_power_db + (((temperature - 10 - 25) * 286) >> 12) -
- 3;
- } else {
- input_power_db =
- input_power_db + (((temperature - 10 - 25) * 286) >> 12);
- }
+ input_power_db +
+ (((temperature - 10 - 25) * 286) >> 12) -
+ 3;
+ else
+ input_power_db = input_power_db +
+ (((temperature - 10 - 25) * 286) >> 12);
wlc_lcnphy_rx_gain_override_enable(pi, 0);
return input_power_db;
}
-
-static int
-wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm, s16 filt_type)
-{
- s16 filt_index = -1;
- int j;
-
- u16 addr[] = {
- 0x910,
- 0x91e,
- 0x91f,
- 0x924,
- 0x925,
- 0x926,
- 0x920,
- 0x921,
- 0x927,
- 0x928,
- 0x929,
- 0x922,
- 0x923,
- 0x930,
- 0x931,
- 0x932
- };
-
- u16 addr_ofdm[] = {
- 0x90f,
- 0x900,
- 0x901,
- 0x906,
- 0x907,
- 0x908,
- 0x902,
- 0x903,
- 0x909,
- 0x90a,
- 0x90b,
- 0x904,
- 0x905,
- 0x90c,
- 0x90d,
- 0x90e
- };
-
- if (!is_ofdm) {
- for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_CCK; j++) {
- if (filt_type == LCNPHY_txdigfiltcoeffs_cck[j][0]) {
- filt_index = (s16) j;
- break;
- }
- }
-
- if (filt_index != -1) {
- for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, addr[j],
- LCNPHY_txdigfiltcoeffs_cck
- [filt_index][j + 1]);
- }
- }
- } else {
- for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_OFDM; j++) {
- if (filt_type == LCNPHY_txdigfiltcoeffs_ofdm[j][0]) {
- filt_index = (s16) j;
- break;
- }
- }
-
- if (filt_index != -1) {
- for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, addr_ofdm[j],
- LCNPHY_txdigfiltcoeffs_ofdm
- [filt_index][j + 1]);
- }
- }
- }
-
- return (filt_index != -1) ? 0 : -1;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.h
index f4a8ab09da4..f4a8ab09da4 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.h
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index f8e41923942..cd19c2f7a34 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14,7 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/cordic.h>
#include <brcm_hw_ids.h>
#include <aiutils.h>
@@ -28,27 +30,42 @@
#include "phyreg_n.h"
#include "phytbl_n.h"
-#define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \
- read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
- ((core == PHY_CORE_0) ? radio_type##_##jspace##0 : radio_type##_##jspace##1))
-#define WRITE_RADIO_REG2(pi, radio_type, jspace, core, reg_name, value) \
+#define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \
+ read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
+ ((core == PHY_CORE_0) ? \
+ radio_type##_##jspace##0 : \
+ radio_type##_##jspace##1))
+
+#define WRITE_RADIO_REG2(pi, radio_type, jspace, core, reg_name, value) \
write_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
- ((core == PHY_CORE_0) ? radio_type##_##jspace##0 : radio_type##_##jspace##1), value);
-#define WRITE_RADIO_SYN(pi, radio_type, reg_name, value) \
- write_radio_reg(pi, radio_type##_##SYN##_##reg_name, value);
-
-#define READ_RADIO_REG3(pi, radio_type, jspace, core, reg_name) \
- read_radio_reg(pi, ((core == PHY_CORE_0) ? radio_type##_##jspace##0##_##reg_name : \
- radio_type##_##jspace##1##_##reg_name));
-#define WRITE_RADIO_REG3(pi, radio_type, jspace, core, reg_name, value) \
- write_radio_reg(pi, ((core == PHY_CORE_0) ? radio_type##_##jspace##0##_##reg_name : \
- radio_type##_##jspace##1##_##reg_name), value);
-#define READ_RADIO_REG4(pi, radio_type, jspace, core, reg_name) \
- read_radio_reg(pi, ((core == PHY_CORE_0) ? radio_type##_##reg_name##_##jspace##0 : \
- radio_type##_##reg_name##_##jspace##1));
-#define WRITE_RADIO_REG4(pi, radio_type, jspace, core, reg_name, value) \
- write_radio_reg(pi, ((core == PHY_CORE_0) ? radio_type##_##reg_name##_##jspace##0 : \
- radio_type##_##reg_name##_##jspace##1), value);
+ ((core == PHY_CORE_0) ? \
+ radio_type##_##jspace##0 : \
+ radio_type##_##jspace##1), value)
+
+#define WRITE_RADIO_SYN(pi, radio_type, reg_name, value) \
+ write_radio_reg(pi, radio_type##_##SYN##_##reg_name, value)
+
+#define READ_RADIO_REG3(pi, radio_type, jspace, core, reg_name) \
+ read_radio_reg(pi, ((core == PHY_CORE_0) ? \
+ radio_type##_##jspace##0##_##reg_name : \
+ radio_type##_##jspace##1##_##reg_name))
+
+#define WRITE_RADIO_REG3(pi, radio_type, jspace, core, reg_name, value) \
+ write_radio_reg(pi, ((core == PHY_CORE_0) ? \
+ radio_type##_##jspace##0##_##reg_name : \
+ radio_type##_##jspace##1##_##reg_name), \
+ value)
+
+#define READ_RADIO_REG4(pi, radio_type, jspace, core, reg_name) \
+ read_radio_reg(pi, ((core == PHY_CORE_0) ? \
+ radio_type##_##reg_name##_##jspace##0 : \
+ radio_type##_##reg_name##_##jspace##1))
+
+#define WRITE_RADIO_REG4(pi, radio_type, jspace, core, reg_name, value) \
+ write_radio_reg(pi, ((core == PHY_CORE_0) ? \
+ radio_type##_##reg_name##_##jspace##0 : \
+ radio_type##_##reg_name##_##jspace##1), \
+ value)
#define NPHY_ACI_MAX_UNDETECT_WINDOW_SZ 40
#define NPHY_ACI_CHANNEL_DELTA 5
@@ -103,9 +120,11 @@
#define NPHY_CALSANITY_RSSI_NB_MAX_POS 9
#define NPHY_CALSANITY_RSSI_NB_MAX_NEG -9
#define NPHY_CALSANITY_RSSI_W1_MAX_POS 12
-#define NPHY_CALSANITY_RSSI_W1_MAX_NEG (NPHY_RSSICAL_W1_TARGET - NPHY_RSSICAL_MAXREAD)
+#define NPHY_CALSANITY_RSSI_W1_MAX_NEG (NPHY_RSSICAL_W1_TARGET - \
+ NPHY_RSSICAL_MAXREAD)
#define NPHY_CALSANITY_RSSI_W2_MAX_POS NPHY_CALSANITY_RSSI_W1_MAX_POS
-#define NPHY_CALSANITY_RSSI_W2_MAX_NEG (NPHY_RSSICAL_W2_TARGET - NPHY_RSSICAL_MAXREAD)
+#define NPHY_CALSANITY_RSSI_W2_MAX_NEG (NPHY_RSSICAL_W2_TARGET - \
+ NPHY_RSSICAL_MAXREAD)
#define NPHY_RSSI_SXT(x) ((s8) (-((x) & 0x20) + ((x) & 0x1f)))
#define NPHY_RSSI_NB_VIOL(x) (((x) > NPHY_CALSANITY_RSSI_NB_MAX_POS) || \
((x) < NPHY_CALSANITY_RSSI_NB_MAX_NEG))
@@ -124,11 +143,11 @@
#define NPHY_PAPD_COMP_OFF 0
#define NPHY_PAPD_COMP_ON 1
-#define NPHY_SROM_TEMPSHIFT 32
-#define NPHY_SROM_MAXTEMPOFFSET 16
-#define NPHY_SROM_MINTEMPOFFSET -16
+#define NPHY_SROM_TEMPSHIFT 32
+#define NPHY_SROM_MAXTEMPOFFSET 16
+#define NPHY_SROM_MINTEMPOFFSET -16
-#define NPHY_CAL_MAXTEMPDELTA 64
+#define NPHY_CAL_MAXTEMPDELTA 64
#define NPHY_NOISEVAR_TBLLEN40 256
#define NPHY_NOISEVAR_TBLLEN20 128
@@ -138,8 +157,24 @@
#define NPHY_ADJUSTED_MINCRSPOWER 0x1e
/* 5357 Chip specific ChipControl register bits */
-#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
-#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
+
+#define NPHY_CAL_TSSISAMPS 64
+#define NPHY_TEST_TONE_FREQ_40MHz 4000
+#define NPHY_TEST_TONE_FREQ_20MHz 2500
+
+#define MAX_205x_RCAL_WAITLOOPS 10000
+
+#define NPHY_RXCAL_TONEAMP 181
+#define NPHY_RXCAL_TONEFREQ_40MHz 4000
+#define NPHY_RXCAL_TONEFREQ_20MHz 2000
+
+#define TXFILT_SHAPING_OFDM20 0
+#define TXFILT_SHAPING_OFDM40 1
+#define TXFILT_SHAPING_CCK 2
+#define TXFILT_DEFAULT_OFDM20 3
+#define TXFILT_DEFAULT_OFDM40 4
struct nphy_iqcal_params {
u16 txlpf;
@@ -184,44 +219,42 @@ struct nphy_ipa_txrxgain {
#define NPHY_IPA_RXCAL_MAXGAININDEX (6 - 1)
-struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_5GHz[] = { {0, 0, 0, 0, 0, 100},
-{0, 0, 0, 0, 0, 50},
-{0, 0, 0, 0, 0, -1},
-{0, 0, 0, 3, 0, -1},
-{0, 0, 3, 3, 0, -1},
-{0, 2, 3, 3, 0, -1}
+static const struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_5GHz[] = {
+ {0, 0, 0, 0, 0, 100},
+ {0, 0, 0, 0, 0, 50},
+ {0, 0, 0, 0, 0, -1},
+ {0, 0, 0, 3, 0, -1},
+ {0, 0, 3, 3, 0, -1},
+ {0, 2, 3, 3, 0, -1}
};
-struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_2GHz[] = { {0, 0, 0, 0, 0, 128},
-{0, 0, 0, 0, 0, 70},
-{0, 0, 0, 0, 0, 20},
-{0, 0, 0, 3, 0, 20},
-{0, 0, 3, 3, 0, 20},
-{0, 2, 3, 3, 0, 20}
+static const struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_2GHz[] = {
+ {0, 0, 0, 0, 0, 128},
+ {0, 0, 0, 0, 0, 70},
+ {0, 0, 0, 0, 0, 20},
+ {0, 0, 0, 3, 0, 20},
+ {0, 0, 3, 3, 0, 20},
+ {0, 2, 3, 3, 0, 20}
};
-struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_5GHz_rev7[] = {
-{0, 0, 0, 0, 0, 100},
-{0, 0, 0, 0, 0, 50},
-{0, 0, 0, 0, 0, -1},
-{0, 0, 0, 3, 0, -1},
-{0, 0, 3, 3, 0, -1},
-{0, 0, 5, 3, 0, -1}
+static const struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_5GHz_rev7[] = {
+ {0, 0, 0, 0, 0, 100},
+ {0, 0, 0, 0, 0, 50},
+ {0, 0, 0, 0, 0, -1},
+ {0, 0, 0, 3, 0, -1},
+ {0, 0, 3, 3, 0, -1},
+ {0, 0, 5, 3, 0, -1}
};
-struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_2GHz_rev7[] = {
-{0, 0, 0, 0, 0, 10},
-{0, 0, 0, 1, 0, 10},
-{0, 0, 1, 2, 0, 10},
-{0, 0, 1, 3, 0, 10},
-{0, 0, 4, 3, 0, 10},
-{0, 0, 6, 3, 0, 10}
+static const struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_2GHz_rev7[] = {
+ {0, 0, 0, 0, 0, 10},
+ {0, 0, 0, 1, 0, 10},
+ {0, 0, 1, 2, 0, 10},
+ {0, 0, 1, 3, 0, 10},
+ {0, 0, 4, 3, 0, 10},
+ {0, 0, 6, 3, 0, 10}
};
-#define NPHY_RXCAL_TONEAMP 181
-#define NPHY_RXCAL_TONEFREQ_40MHz 4000
-#define NPHY_RXCAL_TONEFREQ_20MHz 2000
-
enum {
NPHY_RXCAL_GAIN_INIT = 0,
NPHY_RXCAL_GAIN_UP,
@@ -230,17 +263,11 @@ enum {
#define wlc_phy_get_papd_nphy(pi) \
(read_phy_reg((pi), 0x1e7) & \
- ((0x1 << 15) | \
- (0x1 << 14) | \
- (0x1 << 13)))
+ ((0x1 << 15) | \
+ (0x1 << 14) | \
+ (0x1 << 13)))
-#define TXFILT_SHAPING_OFDM20 0
-#define TXFILT_SHAPING_OFDM40 1
-#define TXFILT_SHAPING_CCK 2
-#define TXFILT_DEFAULT_OFDM20 3
-#define TXFILT_DEFAULT_OFDM40 4
-
-u16 NPHY_IPA_REV4_txdigi_filtcoeffs[][NPHY_NUM_DIG_FILT_COEFFS] = {
+static const u16 NPHY_IPA_REV4_txdigi_filtcoeffs[][NPHY_NUM_DIG_FILT_COEFFS] = {
{-377, 137, -407, 208, -1527, 956, 93, 186, 93,
230, -44, 230, 201, -191, 201},
{-77, 20, -98, 49, -93, 60, 56, 111, 56, 26, -5,
@@ -416,7 +443,7 @@ struct nphy_sfo_cfg {
u16 PHY_BW6;
};
-static struct chan_info_nphy_2055 chan_info_nphy_2055[] = {
+static const struct chan_info_nphy_2055 chan_info_nphy_2055[] = {
{
184, 4920, 3280, 0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, 0x8F, 0xFF, 0x00, 0x0F,
@@ -915,7 +942,7 @@ static struct chan_info_nphy_2055 chan_info_nphy_2055[] = {
0x01, 0x80, 0x3E6, 0x3E2, 0x3DE, 0x41B, 0x41F, 0x424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev3_2056[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev3_2056[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f,
@@ -1538,7 +1565,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev3_2056[] = {
0x0f, 0x00, 0x0d, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev4_2056_A1[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev4_2056_A1[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f,
@@ -2161,7 +2188,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev4_2056_A1[] = {
0x0f, 0x00, 0x0e, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev5_2056v5[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev5_2056v5[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70,
@@ -2784,7 +2811,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev5_2056v5[] = {
0x0d, 0x00, 0x08, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v6[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v6[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77,
@@ -3407,7 +3434,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v6[] = {
0x09, 0x00, 0x09, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev5n6_2056v7[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev5n6_2056v7[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70,
@@ -4030,7 +4057,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev5n6_2056v7[] = {
0x0d, 0x00, 0x08, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v8[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v8[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77,
@@ -4653,7 +4680,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v8[] = {
0x09, 0x00, 0x09, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v11[] = {
+static const struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v11[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77,
@@ -5276,7 +5303,7 @@ static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v11[] = {
0x09, 0x00, 0x09, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio2057 chan_info_nphyrev7_2057_rev4[] = {
+static const struct chan_info_nphy_radio2057 chan_info_nphyrev7_2057_rev4[] = {
{
184, 4920, 0x68, 0x16, 0x10, 0x0c, 0x0c, 0x0c, 0x30, 0xec, 0x01, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0x00, 0x00, 0x0f, 0x0f, 0xf3, 0x00, 0xef, 0x00,
@@ -6139,7 +6166,8 @@ static struct chan_info_nphy_radio2057 chan_info_nphyrev7_2057_rev4[] = {
0x0424}
};
-static struct chan_info_nphy_radio2057_rev5 chan_info_nphyrev8_2057_rev5[] = {
+static const struct chan_info_nphy_radio2057_rev5
+chan_info_nphyrev8_2057_rev5[] = {
{
1, 2412, 0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, 0x09, 0x0d,
0x08, 0x0e, 0x61, 0x03, 0xff, 0x61, 0x03, 0xff, 0x03c9, 0x03c5, 0x03c1,
@@ -6198,7 +6226,8 @@ static struct chan_info_nphy_radio2057_rev5 chan_info_nphyrev8_2057_rev5[] = {
0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio2057_rev5 chan_info_nphyrev9_2057_rev5v1[] = {
+static const struct chan_info_nphy_radio2057_rev5
+chan_info_nphyrev9_2057_rev5v1[] = {
{
1, 2412, 0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, 0x09, 0x0d,
0x08, 0x0e, 0x61, 0x03, 0xff, 0x61, 0x03, 0xff, 0x03c9, 0x03c5, 0x03c1,
@@ -6257,7 +6286,7 @@ static struct chan_info_nphy_radio2057_rev5 chan_info_nphyrev9_2057_rev5v1[] = {
0x041b, 0x041f, 0x0424}
};
-static struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev7[] = {
+static const struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev7[] = {
{
184, 4920, 0x68, 0x16, 0x10, 0x0c, 0x0c, 0x0c, 0x30, 0xec, 0x01, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0x00, 0xd3, 0x0f, 0x0f, 0xd3, 0x00, 0xff, 0x00,
@@ -6998,7 +7027,7 @@ static struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev7[] = {
0x0424}
};
-static struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev8[] = {
+static const struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev8[] = {
{
186, 4930, 0x6b, 0x16, 0x10, 0x0c, 0x0c, 0x0c, 0x30, 0xed, 0x01, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0x00, 0xd3, 0x0f, 0x0f, 0xd3, 0x00, 0xff, 0x00,
@@ -7733,7 +7762,7 @@ static struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev8[] = {
0x0424}
};
-struct radio_regs regs_2055[] = {
+static struct radio_regs regs_2055[] = {
{0x02, 0x80, 0x80, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0x27, 0x27, 0, 0},
@@ -7962,7 +7991,7 @@ struct radio_regs regs_2055[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_SYN_2056[] = {
+static struct radio_regs regs_SYN_2056[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8147,7 +8176,7 @@ struct radio_regs regs_SYN_2056[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_TX_2056[] = {
+static struct radio_regs regs_TX_2056[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8296,7 +8325,7 @@ struct radio_regs regs_TX_2056[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_RX_2056[] = {
+static struct radio_regs regs_RX_2056[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8447,7 +8476,7 @@ struct radio_regs regs_RX_2056[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_SYN_2056_A1[] = {
+static struct radio_regs regs_SYN_2056_A1[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8632,7 +8661,7 @@ struct radio_regs regs_SYN_2056_A1[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_TX_2056_A1[] = {
+static struct radio_regs regs_TX_2056_A1[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8781,7 +8810,7 @@ struct radio_regs regs_TX_2056_A1[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_RX_2056_A1[] = {
+static struct radio_regs regs_RX_2056_A1[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8932,7 +8961,7 @@ struct radio_regs regs_RX_2056_A1[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_SYN_2056_rev5[] = {
+static struct radio_regs regs_SYN_2056_rev5[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9117,7 +9146,7 @@ struct radio_regs regs_SYN_2056_rev5[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_TX_2056_rev5[] = {
+static struct radio_regs regs_TX_2056_rev5[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9274,7 +9303,7 @@ struct radio_regs regs_TX_2056_rev5[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_RX_2056_rev5[] = {
+static struct radio_regs regs_RX_2056_rev5[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9425,7 +9454,7 @@ struct radio_regs regs_RX_2056_rev5[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_SYN_2056_rev6[] = {
+static struct radio_regs regs_SYN_2056_rev6[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9610,7 +9639,7 @@ struct radio_regs regs_SYN_2056_rev6[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_TX_2056_rev6[] = {
+static struct radio_regs regs_TX_2056_rev6[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9767,7 +9796,7 @@ struct radio_regs regs_TX_2056_rev6[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_RX_2056_rev6[] = {
+static struct radio_regs regs_RX_2056_rev6[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9918,7 +9947,7 @@ struct radio_regs regs_RX_2056_rev6[] = {
{0xFFFF, 0, 0, 0, 0}
};
-struct radio_regs regs_SYN_2056_rev7[] = {
+static struct radio_regs regs_SYN_2056_rev7[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10103,7 +10132,7 @@ struct radio_regs regs_SYN_2056_rev7[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_TX_2056_rev7[] = {
+static struct radio_regs regs_TX_2056_rev7[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10260,7 +10289,7 @@ struct radio_regs regs_TX_2056_rev7[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_RX_2056_rev7[] = {
+static struct radio_regs regs_RX_2056_rev7[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10411,7 +10440,7 @@ struct radio_regs regs_RX_2056_rev7[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_SYN_2056_rev8[] = {
+static struct radio_regs regs_SYN_2056_rev8[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10596,7 +10625,7 @@ struct radio_regs regs_SYN_2056_rev8[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_TX_2056_rev8[] = {
+static struct radio_regs regs_TX_2056_rev8[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10753,7 +10782,7 @@ struct radio_regs regs_TX_2056_rev8[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_RX_2056_rev8[] = {
+static struct radio_regs regs_RX_2056_rev8[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10904,7 +10933,7 @@ struct radio_regs regs_RX_2056_rev8[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_SYN_2056_rev11[] = {
+static const struct radio_regs regs_SYN_2056_rev11[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -11089,7 +11118,7 @@ struct radio_regs regs_SYN_2056_rev11[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_TX_2056_rev11[] = {
+static const struct radio_regs regs_TX_2056_rev11[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -11246,7 +11275,7 @@ struct radio_regs regs_TX_2056_rev11[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_regs regs_RX_2056_rev11[] = {
+static const struct radio_regs regs_RX_2056_rev11[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -11397,7 +11426,7 @@ struct radio_regs regs_RX_2056_rev11[] = {
{0xFFFF, 0, 0, 0, 0},
};
-struct radio_20xx_regs regs_2057_rev4[] = {
+static struct radio_20xx_regs regs_2057_rev4[] = {
{0x00, 0x84, 0},
{0x01, 0, 0},
{0x02, 0x60, 0},
@@ -11787,7 +11816,7 @@ struct radio_20xx_regs regs_2057_rev4[] = {
{0xFFFF, 0, 0},
};
-struct radio_20xx_regs regs_2057_rev5[] = {
+static struct radio_20xx_regs regs_2057_rev5[] = {
{0x00, 0, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -12119,7 +12148,7 @@ struct radio_20xx_regs regs_2057_rev5[] = {
{0xFFFF, 0, 0}
};
-struct radio_20xx_regs regs_2057_rev5v1[] = {
+static struct radio_20xx_regs regs_2057_rev5v1[] = {
{0x00, 0x15, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -12451,7 +12480,7 @@ struct radio_20xx_regs regs_2057_rev5v1[] = {
{0xFFFF, 0, 0}
};
-struct radio_20xx_regs regs_2057_rev7[] = {
+static struct radio_20xx_regs regs_2057_rev7[] = {
{0x00, 0, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -12867,7 +12896,7 @@ struct radio_20xx_regs regs_2057_rev7[] = {
{0xFFFF, 0, 0}
};
-struct radio_20xx_regs regs_2057_rev8[] = {
+static struct radio_20xx_regs regs_2057_rev8[] = {
{0x00, 0x8, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -13290,27 +13319,27 @@ static s32 nphy_lnagain_est1[] = { -224, 23242 };
static const u16 tbl_iqcal_gainparams_nphy[2][NPHY_IQCAL_NUMGAINS][8] = {
{
- {0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69},
- {0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69},
- {0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68},
- {0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67},
- {0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66},
- {0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65},
- {0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65},
- {0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65},
- {0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65}
- },
- {
- {0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79},
- {0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79},
- {0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79},
- {0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78},
- {0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78},
- {0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78},
- {0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78},
- {0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78},
- {0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78}
- }
+ {0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69},
+ {0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69},
+ {0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68},
+ {0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67},
+ {0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66},
+ {0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65},
+ {0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65},
+ {0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65},
+ {0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65}
+ },
+ {
+ {0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79},
+ {0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79},
+ {0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79},
+ {0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78},
+ {0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78},
+ {0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78},
+ {0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78},
+ {0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78},
+ {0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78}
+ }
};
static const u32 nphy_tpc_txgain[] = {
@@ -14081,118 +14110,11 @@ static u32 nphy_tpc_5GHz_txgain_HiPwrEPA[] = {
static u8 ant_sw_ctrl_tbl_rev8_2o3[] = { 0x14, 0x18 };
static u8 ant_sw_ctrl_tbl_rev8[] = { 0x4, 0x8, 0x4, 0x8, 0x11, 0x12 };
static u8 ant_sw_ctrl_tbl_rev8_2057v7_core0[] = {
- 0x09, 0x0a, 0x15, 0x16, 0x09, 0x0a };
+ 0x09, 0x0a, 0x15, 0x16, 0x09, 0x0a
+};
static u8 ant_sw_ctrl_tbl_rev8_2057v7_core1[] = {
- 0x09, 0x0a, 0x09, 0x0a, 0x15, 0x16 };
-
-static bool wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
- struct chan_info_nphy_radio2057 **t0,
- struct chan_info_nphy_radio205x **t1,
- struct chan_info_nphy_radio2057_rev5 **t2,
- struct chan_info_nphy_2055 **t3);
-static void wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chans,
- const struct nphy_sfo_cfg *c);
-
-static void wlc_phy_adjust_rx_analpfbw_nphy(struct brcms_phy *pi,
- u16 reduction_factr);
-static void wlc_phy_adjust_min_noisevar_nphy(struct brcms_phy *pi,
- int ntones, int *, u32 *buf);
-static void wlc_phy_adjust_crsminpwr_nphy(struct brcms_phy *pi, u8 minpwr);
-static void wlc_phy_txlpfbw_nphy(struct brcms_phy *pi);
-static void wlc_phy_spurwar_nphy(struct brcms_phy *pi);
-
-static void wlc_phy_radio_preinit_2055(struct brcms_phy *pi);
-static void wlc_phy_radio_init_2055(struct brcms_phy *pi);
-static void wlc_phy_radio_postinit_2055(struct brcms_phy *pi);
-static void wlc_phy_radio_preinit_205x(struct brcms_phy *pi);
-static void wlc_phy_radio_init_2056(struct brcms_phy *pi);
-static void wlc_phy_radio_postinit_2056(struct brcms_phy *pi);
-static void wlc_phy_radio_init_2057(struct brcms_phy *pi);
-static void wlc_phy_radio_postinit_2057(struct brcms_phy *pi);
-static void wlc_phy_workarounds_nphy(struct brcms_phy *pi);
-static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi);
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi);
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi);
-static void wlc_phy_adjust_lnagaintbl_nphy(struct brcms_phy *pi);
-
-static void wlc_phy_restore_rssical_nphy(struct brcms_phy *pi);
-static void wlc_phy_reapply_txcal_coeffs_nphy(struct brcms_phy *pi);
-static void wlc_phy_tx_iq_war_nphy(struct brcms_phy *pi);
-static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
- struct nphy_txgains tg, u8 type, bool d);
-static void wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rxcore,
- u16 *rg, u8 type);
-static void wlc_phy_update_mimoconfig_nphy(struct brcms_phy *pi, s32 preamble);
-static void wlc_phy_savecal_nphy(struct brcms_phy *pi);
-static void wlc_phy_restorecal_nphy(struct brcms_phy *pi);
-static void wlc_phy_resetcca_nphy(struct brcms_phy *pi);
-
-static void wlc_phy_txpwrctrl_config_nphy(struct brcms_phy *pi);
-static void wlc_phy_internal_cal_txgain_nphy(struct brcms_phy *pi);
-static void wlc_phy_precal_txgain_nphy(struct brcms_phy *pi);
-static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core);
-
-static void wlc_phy_extpa_set_tx_digi_filts_nphy(struct brcms_phy *pi);
-static void wlc_phy_ipa_set_tx_digi_filts_nphy(struct brcms_phy *pi);
-static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi);
-static u16 wlc_phy_ipa_get_bbmult_nphy(struct brcms_phy *pi);
-static void wlc_phy_ipa_set_bbmult_nphy(struct brcms_phy *pi, u8 m0, u8 m1);
-static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi);
-
-static void wlc_phy_a1_nphy(struct brcms_phy *pi, u8 core, u32 winsz, u32,
- u32 e);
-static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core);
-static void wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *,
- enum phy_cal_mode, u8);
-static void wlc_phy_papd_cal_cleanup_nphy(struct brcms_phy *pi,
- struct nphy_papd_restore_state *state);
-static void wlc_phy_papd_cal_setup_nphy(struct brcms_phy *pi,
- struct nphy_papd_restore_state *state, u8);
-
-static void wlc_phy_clip_det_nphy(struct brcms_phy *pi, u8 write, u16 *vals);
-
-static void wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *evts,
- u8 *dlys, u8 len);
-
-static u16 wlc_phy_read_lpf_bw_ctl_nphy(struct brcms_phy *pi, u16 offset);
-
-static void
-wlc_phy_rfctrl_override_nphy_rev7(struct brcms_phy *pi, u16 field, u16 value,
- u8 core_mask, u8 off,
- u8 override_id);
-
-static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type);
-static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi);
-
-static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi);
-static void wlc_phy_txpwr_nphy_srom_convert(u8 *srom_max,
- u16 *pwr_offset,
- u8 tmp_max_pwr, u8 rate_start,
- u8 rate_end);
-
-static void wlc_phy_txpwr_limit_to_tbl_nphy(struct brcms_phy *pi);
-static void wlc_phy_txpwrctrl_coeff_setup_nphy(struct brcms_phy *pi);
-static void wlc_phy_txpwrctrl_idle_tssi_nphy(struct brcms_phy *pi);
-static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi);
-
-static bool wlc_phy_txpwr_ison_nphy(struct brcms_phy *pi);
-static u8 wlc_phy_txpwr_idx_cur_get_nphy(struct brcms_phy *pi, u8 core);
-static void wlc_phy_txpwr_idx_cur_set_nphy(struct brcms_phy *pi, u8 idx0,
- u8 idx1);
-static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal);
-
-static u16 wlc_phy_radio205x_rcal(struct brcms_phy *pi);
-
-static u16 wlc_phy_radio2057_rccal(struct brcms_phy *pi);
-
-static u16 wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz,
- u16 max_val,
- u8 dac_test_mode);
-static void wlc_phy_loadsampletable_nphy(struct brcms_phy *pi, cs32 *tone_buf,
- u16 num_samps);
-static void wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 n, u16 lps,
- u16 wait, u8 iq, u8 dac_test_mode,
- bool modify_bbmult);
+ 0x09, 0x0a, 0x09, 0x0a, 0x15, 0x16
+};
bool wlc_phy_bist_check_phy(struct brcms_phy_pub *pih)
{
@@ -14209,9 +14131,8 @@ bool wlc_phy_bist_check_phy(struct brcms_phy_pub *pih)
phybist4 = read_phy_reg(pi, 0x156);
if ((phybist0 == 0) && (phybist1 == 0x4000) && (phybist2 == 0x1fe0) &&
- (phybist3 == 0) && (phybist4 == 0)) {
+ (phybist3 == 0) && (phybist4 == 0))
return true;
- }
return false;
}
@@ -14230,18 +14151,7 @@ static void wlc_phy_bphy_init_nphy(struct brcms_phy *pi)
val -= 0x0202;
}
- if (NORADIO_ENAB(pi->pubpi)) {
-
- write_phy_reg(pi, NPHY_TO_BPHY_OFF + BPHY_PHYCRSTH, 0x3206);
-
- write_phy_reg(pi, NPHY_TO_BPHY_OFF + BPHY_RSSI_TRESH, 0x281e);
-
- or_phy_reg(pi, NPHY_TO_BPHY_OFF + BPHY_LNA_GAIN_RANGE, 0x1a);
-
- } else {
-
- write_phy_reg(pi, NPHY_TO_BPHY_OFF + BPHY_STEP, 0x668);
- }
+ write_phy_reg(pi, NPHY_TO_BPHY_OFF + BPHY_STEP, 0x668);
}
void
@@ -14307,7 +14217,8 @@ static void wlc_phy_tbl_init_nphy(struct brcms_phy *pi)
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
antswctrllut = CHSPEC_IS2G(pi->radio_chanspec) ?
- pi->srom_fem2g.antswctrllut : pi->srom_fem5g.antswctrllut;
+ pi->srom_fem2g.antswctrllut : pi->srom_fem5g.
+ antswctrllut;
switch (antswctrllut) {
case 0:
@@ -14316,20 +14227,20 @@ static void wlc_phy_tbl_init_nphy(struct brcms_phy *pi)
case 1:
- if (pi->aa2g == 7) {
+ if (pi->aa2g == 7)
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x21, 8,
+ &ant_sw_ctrl_tbl_rev8_2o3[0]);
+ else
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x21, 8,
+ &ant_sw_ctrl_tbl_rev8
+ [0]);
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x21, 8,
- &ant_sw_ctrl_tbl_rev8_2o3
- [0]);
- } else {
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x21, 8,
- &ant_sw_ctrl_tbl_rev8
- [0]);
- }
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
2, 0x25, 8,
&ant_sw_ctrl_tbl_rev8[2]);
@@ -14340,31 +14251,31 @@ static void wlc_phy_tbl_init_nphy(struct brcms_phy *pi)
case 2:
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x1, 8,
- &ant_sw_ctrl_tbl_rev8_2057v7_core0
- [0]);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x5, 8,
- &ant_sw_ctrl_tbl_rev8_2057v7_core0
- [2]);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x9, 8,
- &ant_sw_ctrl_tbl_rev8_2057v7_core0
- [4]);
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x21, 8,
- &ant_sw_ctrl_tbl_rev8_2057v7_core1
- [0]);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x25, 8,
- &ant_sw_ctrl_tbl_rev8_2057v7_core1
- [2]);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_ANTSWCTRLLUT,
- 2, 0x29, 8,
- &ant_sw_ctrl_tbl_rev8_2057v7_core1
- [4]);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x1, 8,
+ &ant_sw_ctrl_tbl_rev8_2057v7_core0[0]);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x5, 8,
+ &ant_sw_ctrl_tbl_rev8_2057v7_core0[2]);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x9, 8,
+ &ant_sw_ctrl_tbl_rev8_2057v7_core0[4]);
+
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x21, 8,
+ &ant_sw_ctrl_tbl_rev8_2057v7_core1[0]);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x25, 8,
+ &ant_sw_ctrl_tbl_rev8_2057v7_core1[2]);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_ANTSWCTRLLUT,
+ 2, 0x29, 8,
+ &ant_sw_ctrl_tbl_rev8_2057v7_core1[4]);
break;
default:
@@ -14375,45 +14286,49 @@ static void wlc_phy_tbl_init_nphy(struct brcms_phy *pi)
for (idx = 0; idx < mimophytbl_info_sz_rev3_volatile; idx++) {
if (idx == ANT_SWCTRL_TBL_REV3_IDX) {
- antswctrllut = CHSPEC_IS2G(pi->radio_chanspec) ?
- pi->srom_fem2g.antswctrllut : pi->
- srom_fem5g.antswctrllut;
+ antswctrllut =
+ CHSPEC_IS2G(pi->radio_chanspec) ?
+ pi->srom_fem2g.antswctrllut :
+ pi->srom_fem5g.antswctrllut;
switch (antswctrllut) {
case 0:
- wlc_phy_write_table_nphy(pi,
- &mimophytbl_info_rev3_volatile
- [idx]);
+ wlc_phy_write_table_nphy(
+ pi,
+ &mimophytbl_info_rev3_volatile
+ [idx]);
break;
case 1:
- wlc_phy_write_table_nphy(pi,
- &mimophytbl_info_rev3_volatile1
- [idx]);
+ wlc_phy_write_table_nphy(
+ pi,
+ &mimophytbl_info_rev3_volatile1
+ [idx]);
break;
case 2:
- wlc_phy_write_table_nphy(pi,
- &mimophytbl_info_rev3_volatile2
- [idx]);
+ wlc_phy_write_table_nphy(
+ pi,
+ &mimophytbl_info_rev3_volatile2
+ [idx]);
break;
case 3:
- wlc_phy_write_table_nphy(pi,
- &mimophytbl_info_rev3_volatile3
- [idx]);
+ wlc_phy_write_table_nphy(
+ pi,
+ &mimophytbl_info_rev3_volatile3
+ [idx]);
break;
default:
break;
}
} else {
- wlc_phy_write_table_nphy(pi,
- &mimophytbl_info_rev3_volatile
- [idx]);
+ wlc_phy_write_table_nphy(
+ pi,
+ &mimophytbl_info_rev3_volatile[idx]);
}
}
} else {
- for (idx = 0; idx < mimophytbl_info_sz_rev0_volatile; idx++) {
+ for (idx = 0; idx < mimophytbl_info_sz_rev0_volatile; idx++)
wlc_phy_write_table_nphy(pi,
&mimophytbl_info_rev0_volatile
[idx]);
- }
}
}
@@ -14440,32 +14355,414 @@ void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs)
wlc_phy_write_txmacreg_nphy(pi, holdoff, delay);
- if (pi && pi->sh && (pi->sh->_rifs_phy != rifs)) {
+ if (pi && pi->sh && (pi->sh->_rifs_phy != rifs))
pi->sh->_rifs_phy = rifs;
+}
+
+static void wlc_phy_txpwrctrl_config_nphy(struct brcms_phy *pi)
+{
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ pi->nphy_txpwrctrl = PHY_TPC_HW_ON;
+ pi->phy_5g_pwrgain = true;
+ return;
+ }
+
+ pi->nphy_txpwrctrl = PHY_TPC_HW_OFF;
+ pi->phy_5g_pwrgain = false;
+
+ if ((pi->sh->boardflags2 & BFL2_TXPWRCTRL_EN) &&
+ NREV_GE(pi->pubpi.phy_rev, 2) && (pi->sh->sromrev >= 4))
+ pi->nphy_txpwrctrl = PHY_TPC_HW_ON;
+ else if ((pi->sh->sromrev >= 4)
+ && (pi->sh->boardflags2 & BFL2_5G_PWRGAIN))
+ pi->phy_5g_pwrgain = true;
+}
+
+static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
+{
+ u16 bw40po, cddpo, stbcpo, bwduppo;
+ uint band_num;
+ struct phy_shim_info *shim = pi->sh->physhim;
+
+ if (pi->sh->sromrev >= 9)
+ return;
+
+ bw40po = (u16) wlapi_getintvar(shim, BRCMS_SROM_BW40PO);
+ pi->bw402gpo = bw40po & 0xf;
+ pi->bw405gpo = (bw40po & 0xf0) >> 4;
+ pi->bw405glpo = (bw40po & 0xf00) >> 8;
+ pi->bw405ghpo = (bw40po & 0xf000) >> 12;
+
+ cddpo = (u16) wlapi_getintvar(shim, BRCMS_SROM_CDDPO);
+ pi->cdd2gpo = cddpo & 0xf;
+ pi->cdd5gpo = (cddpo & 0xf0) >> 4;
+ pi->cdd5glpo = (cddpo & 0xf00) >> 8;
+ pi->cdd5ghpo = (cddpo & 0xf000) >> 12;
+
+ stbcpo = (u16) wlapi_getintvar(shim, BRCMS_SROM_STBCPO);
+ pi->stbc2gpo = stbcpo & 0xf;
+ pi->stbc5gpo = (stbcpo & 0xf0) >> 4;
+ pi->stbc5glpo = (stbcpo & 0xf00) >> 8;
+ pi->stbc5ghpo = (stbcpo & 0xf000) >> 12;
+
+ bwduppo = (u16) wlapi_getintvar(shim, BRCMS_SROM_BWDUPPO);
+ pi->bwdup2gpo = bwduppo & 0xf;
+ pi->bwdup5gpo = (bwduppo & 0xf0) >> 4;
+ pi->bwdup5glpo = (bwduppo & 0xf00) >> 8;
+ pi->bwdup5ghpo = (bwduppo & 0xf000) >> 12;
+
+ for (band_num = 0; band_num < (CH_2G_GROUP + CH_5G_GROUP);
+ band_num++) {
+ switch (band_num) {
+ case 0:
+
+ pi->nphy_txpid2g[PHY_CORE_0] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID2GA0);
+ pi->nphy_txpid2g[PHY_CORE_1] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID2GA1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP2GA0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_2g =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP2GA1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA2GW0A0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA2GW0A1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA2GW1A0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA2GW1A1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA2GW2A0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA2GW2A1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_2g =
+ (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT2GA0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_2g =
+ (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT2GA1);
+
+ pi->cck2gpo = (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_CCK2GPO);
+
+ pi->ofdm2gpo =
+ (u32) wlapi_getintvar(shim,
+ BRCMS_SROM_OFDM2GPO);
+
+ pi->mcs2gpo[0] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO0);
+ pi->mcs2gpo[1] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO1);
+ pi->mcs2gpo[2] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO2);
+ pi->mcs2gpo[3] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO3);
+ pi->mcs2gpo[4] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO4);
+ pi->mcs2gpo[5] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO5);
+ pi->mcs2gpo[6] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO6);
+ pi->mcs2gpo[7] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS2GPO7);
+ break;
+ case 1:
+
+ pi->nphy_txpid5g[PHY_CORE_0] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID5GA0);
+ pi->nphy_txpid5g[PHY_CORE_1] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID5GA1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
+ (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP5GA1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GW0A0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GW0A1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GW1A0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GW1A1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GW2A0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GW2A1);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_5gm =
+ (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT5GA0);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_5gm =
+ (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT5GA1);
+
+ pi->ofdm5gpo =
+ (u32) wlapi_getintvar(shim,
+ BRCMS_SROM_OFDM5GPO);
+
+ pi->mcs5gpo[0] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO0);
+ pi->mcs5gpo[1] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO1);
+ pi->mcs5gpo[2] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO2);
+ pi->mcs5gpo[3] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO3);
+ pi->mcs5gpo[4] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO4);
+ pi->mcs5gpo[5] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO5);
+ pi->mcs5gpo[6] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO6);
+ pi->mcs5gpo[7] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GPO7);
+ break;
+ case 2:
+
+ pi->nphy_txpid5gl[0] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID5GLA0);
+ pi->nphy_txpid5gl[1] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID5GLA1);
+ pi->nphy_pwrctrl_info[0].max_pwr_5gl =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP5GLA0);
+ pi->nphy_pwrctrl_info[1].max_pwr_5gl =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP5GLA1);
+ pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GLW0A0);
+ pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GLW0A1);
+ pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GLW1A0);
+ pi->nphy_pwrctrl_info[1].pwrdet_5gl_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GLW1A1);
+ pi->nphy_pwrctrl_info[0].pwrdet_5gl_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GLW2A0);
+ pi->nphy_pwrctrl_info[1].pwrdet_5gl_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GLW2A1);
+ pi->nphy_pwrctrl_info[0].idle_targ_5gl = 0;
+ pi->nphy_pwrctrl_info[1].idle_targ_5gl = 0;
+
+ pi->ofdm5glpo =
+ (u32) wlapi_getintvar(shim,
+ BRCMS_SROM_OFDM5GLPO);
+
+ pi->mcs5glpo[0] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO0);
+ pi->mcs5glpo[1] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO1);
+ pi->mcs5glpo[2] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO2);
+ pi->mcs5glpo[3] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO3);
+ pi->mcs5glpo[4] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO4);
+ pi->mcs5glpo[5] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO5);
+ pi->mcs5glpo[6] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO6);
+ pi->mcs5glpo[7] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GLPO7);
+ break;
+ case 3:
+
+ pi->nphy_txpid5gh[0] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID5GHA0);
+ pi->nphy_txpid5gh[1] =
+ (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TXPID5GHA1);
+ pi->nphy_pwrctrl_info[0].max_pwr_5gh =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP5GHA0);
+ pi->nphy_pwrctrl_info[1].max_pwr_5gh =
+ (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_MAXP5GHA1);
+ pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GHW0A0);
+ pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GHW0A1);
+ pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GHW1A0);
+ pi->nphy_pwrctrl_info[1].pwrdet_5gh_b0 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GHW1A1);
+ pi->nphy_pwrctrl_info[0].pwrdet_5gh_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GHW2A0);
+ pi->nphy_pwrctrl_info[1].pwrdet_5gh_b1 =
+ (s16) wlapi_getintvar(shim,
+ BRCMS_SROM_PA5GHW2A1);
+ pi->nphy_pwrctrl_info[0].idle_targ_5gh = 0;
+ pi->nphy_pwrctrl_info[1].idle_targ_5gh = 0;
+
+ pi->ofdm5ghpo =
+ (u32) wlapi_getintvar(shim,
+ BRCMS_SROM_OFDM5GHPO);
+
+ pi->mcs5ghpo[0] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO0);
+ pi->mcs5ghpo[1] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO1);
+ pi->mcs5ghpo[2] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO2);
+ pi->mcs5ghpo[3] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO3);
+ pi->mcs5ghpo[4] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO4);
+ pi->mcs5ghpo[5] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO5);
+ pi->mcs5ghpo[6] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO6);
+ pi->mcs5ghpo[7] =
+ (u16) wlapi_getintvar(shim,
+ BRCMS_SROM_MCS5GHPO7);
+ break;
+ }
}
+
+ wlc_phy_txpwr_apply_nphy(pi);
+}
+
+static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
+{
+ struct phy_shim_info *shim = pi->sh->physhim;
+
+ pi->antswitch = (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWITCH);
+ pi->aa2g = (u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G);
+ pi->aa5g = (u8) wlapi_getintvar(shim, BRCMS_SROM_AA5G);
+
+ pi->srom_fem2g.tssipos = (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TSSIPOS2G);
+ pi->srom_fem2g.extpagain = (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_EXTPAGAIN2G);
+ pi->srom_fem2g.pdetrange = (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_PDETRANGE2G);
+ pi->srom_fem2g.triso = (u8) wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
+ pi->srom_fem2g.antswctrllut =
+ (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL2G);
+
+ pi->srom_fem5g.tssipos = (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_TSSIPOS5G);
+ pi->srom_fem5g.extpagain = (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_EXTPAGAIN5G);
+ pi->srom_fem5g.pdetrange = (u8) wlapi_getintvar(shim,
+ BRCMS_SROM_PDETRANGE5G);
+ pi->srom_fem5g.triso = (u8) wlapi_getintvar(shim, BRCMS_SROM_TRISO5G);
+ if (wlapi_getvar(shim, BRCMS_SROM_ANTSWCTL5G))
+ pi->srom_fem5g.antswctrllut =
+ (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL5G);
+ else
+ pi->srom_fem5g.antswctrllut =
+ (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL2G);
+
+ wlc_phy_txpower_ipa_upd(pi);
+
+ pi->phy_txcore_disable_temp =
+ (s16) wlapi_getintvar(shim, BRCMS_SROM_TEMPTHRESH);
+ if (pi->phy_txcore_disable_temp == 0)
+ pi->phy_txcore_disable_temp = PHY_CHAIN_TX_DISABLE_TEMP;
+
+ pi->phy_tempsense_offset = (s8) wlapi_getintvar(shim,
+ BRCMS_SROM_TEMPOFFSET);
+ if (pi->phy_tempsense_offset != 0) {
+ if (pi->phy_tempsense_offset >
+ (NPHY_SROM_TEMPSHIFT + NPHY_SROM_MAXTEMPOFFSET))
+ pi->phy_tempsense_offset = NPHY_SROM_MAXTEMPOFFSET;
+ else if (pi->phy_tempsense_offset < (NPHY_SROM_TEMPSHIFT +
+ NPHY_SROM_MINTEMPOFFSET))
+ pi->phy_tempsense_offset = NPHY_SROM_MINTEMPOFFSET;
+ else
+ pi->phy_tempsense_offset -= NPHY_SROM_TEMPSHIFT;
+ }
+
+ pi->phy_txcore_enable_temp =
+ pi->phy_txcore_disable_temp - PHY_HYSTERESIS_DELTATEMP;
+
+ pi->phycal_tempdelta =
+ (u8) wlapi_getintvar(shim, BRCMS_SROM_PHYCAL_TEMPDELTA);
+ if (pi->phycal_tempdelta > NPHY_CAL_MAXTEMPDELTA)
+ pi->phycal_tempdelta = 0;
+
+ wlc_phy_txpwr_srom_read_ppr_nphy(pi);
+
+ return true;
}
bool wlc_phy_attach_nphy(struct brcms_phy *pi)
{
uint i;
- if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 6)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 6))
pi->phyhang_avoid = true;
- }
if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
-
pi->nphy_gband_spurwar_en = true;
-
- if (pi->sh->boardflags2 & BFL2_SPUR_WAR) {
+ if (pi->sh->boardflags2 & BFL2_SPUR_WAR)
pi->nphy_aband_spurwar_en = true;
- }
}
if (NREV_GE(pi->pubpi.phy_rev, 6) && NREV_LT(pi->pubpi.phy_rev, 7)) {
-
- if (pi->sh->boardflags2 & BFL2_2G_SPUR_WAR) {
+ if (pi->sh->boardflags2 & BFL2_2G_SPUR_WAR)
pi->nphy_gband_spurwar2_en = true;
- }
}
pi->n_preamble_override = AUTO;
@@ -14485,9 +14782,8 @@ bool wlc_phy_attach_nphy(struct brcms_phy *pi)
pi->nphy_elna_gain_config = false;
pi->radio_is_on = false;
- for (i = 0; i < pi->pubpi.phy_corenum; i++) {
+ for (i = 0; i < pi->pubpi.phy_corenum; i++)
pi->nphy_txpwrindex[i].index = AUTO;
- }
wlc_phy_txpwrctrl_config_nphy(pi);
if (pi->nphy_txpwrctrl == PHY_TPC_HW_ON)
@@ -14504,751 +14800,1405 @@ bool wlc_phy_attach_nphy(struct brcms_phy *pi)
return true;
}
-static void wlc_phy_txpwrctrl_config_nphy(struct brcms_phy *pi)
+static s32 get_rf_pwr_offset(struct brcms_phy *pi, s16 pga_gn, s16 pad_gn)
{
+ s32 rfpwr_offset = 0;
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- pi->nphy_txpwrctrl = PHY_TPC_HW_ON;
- pi->phy_5g_pwrgain = true;
- return;
- }
-
- pi->nphy_txpwrctrl = PHY_TPC_HW_OFF;
- pi->phy_5g_pwrgain = false;
-
- if ((pi->sh->boardflags2 & BFL2_TXPWRCTRL_EN) &&
- NREV_GE(pi->pubpi.phy_rev, 2) && (pi->sh->sromrev >= 4))
- pi->nphy_txpwrctrl = PHY_TPC_HW_ON;
- else if ((pi->sh->sromrev >= 4)
- && (pi->sh->boardflags2 & BFL2_5G_PWRGAIN))
- pi->phy_5g_pwrgain = true;
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if ((pi->pubpi.radiorev == 3) ||
+ (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6))
+ rfpwr_offset = (s16)
+ nphy_papd_padgain_dlt_2g_2057rev3n4
+ [pad_gn];
+ else if (pi->pubpi.radiorev == 5)
+ rfpwr_offset = (s16)
+ nphy_papd_padgain_dlt_2g_2057rev5
+ [pad_gn];
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev ==
+ 8))
+ rfpwr_offset = (s16)
+ nphy_papd_padgain_dlt_2g_2057rev7
+ [pad_gn];
+ } else {
+ if ((pi->pubpi.radiorev == 3) ||
+ (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6))
+ rfpwr_offset = (s16)
+ nphy_papd_pgagain_dlt_5g_2057
+ [pga_gn];
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev ==
+ 8))
+ rfpwr_offset = (s16)
+ nphy_papd_pgagain_dlt_5g_2057rev7
+ [pga_gn];
+ }
+ return rfpwr_offset;
}
-void wlc_phy_init_nphy(struct brcms_phy *pi)
+static void wlc_phy_update_mimoconfig_nphy(struct brcms_phy *pi, s32 preamble)
{
+ bool gf_preamble = false;
u16 val;
- u16 clip1_ths[2];
- struct nphy_txgains target_gain;
- u8 tx_pwr_ctrl_state;
- bool do_nphy_cal = false;
- uint core;
- uint origidx, intr_val;
- d11regs_t *regs;
- u32 d11_clk_ctl_st;
- core = 0;
-
- if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN)) {
- pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
- }
+ if (preamble == BRCMS_N_PREAMBLE_GF)
+ gf_preamble = true;
- if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) &&
- ((pi->sh->chippkg == BCM4717_PKG_ID) ||
- (pi->sh->chippkg == BCM4718_PKG_ID))) {
- if ((pi->sh->boardflags & BFL_EXTLNA) &&
- (CHSPEC_IS2G(pi->radio_chanspec))) {
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol), 0x40,
- 0x40);
- }
- }
+ val = read_phy_reg(pi, 0xed);
- if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
- CHSPEC_IS40(pi->radio_chanspec)) {
+ val |= RX_GF_MM_AUTO;
+ val &= ~RX_GF_OR_MM;
+ if (gf_preamble)
+ val |= RX_GF_OR_MM;
- regs = (d11regs_t *) ai_switch_core(pi->sh->sih, D11_CORE_ID,
- &origidx, &intr_val);
- d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
- AND_REG(&regs->clk_ctl_st,
- ~(CCS_FORCEHT | CCS_HTAREQ));
+ write_phy_reg(pi, 0xed, val);
+}
- W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
+static void wlc_phy_ipa_set_tx_digi_filts_nphy(struct brcms_phy *pi)
+{
+ int j, type;
+ u16 addr_offset[] = { 0x186, 0x195, 0x2c5};
- ai_restore_core(pi->sh->sih, origidx, intr_val);
+ for (type = 0; type < 3; type++) {
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, addr_offset[type] + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[type][j]);
}
- pi->use_int_tx_iqlo_cal_nphy =
- (PHY_IPA(pi) ||
- (NREV_GE(pi->pubpi.phy_rev, 7) ||
- (NREV_GE(pi->pubpi.phy_rev, 5)
- && pi->sh->boardflags2 & BFL2_INTERNDET_TXIQCAL)));
-
- pi->internal_tx_iqlo_cal_tapoff_intpa_nphy = false;
-
- pi->nphy_deaf_count = 0;
-
- wlc_phy_tbl_init_nphy(pi);
-
- pi->nphy_crsminpwr_adjusted = false;
- pi->nphy_noisevars_adjusted = false;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- write_phy_reg(pi, 0xe7, 0);
- write_phy_reg(pi, 0xec, 0);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- write_phy_reg(pi, 0x342, 0);
- write_phy_reg(pi, 0x343, 0);
- write_phy_reg(pi, 0x346, 0);
- write_phy_reg(pi, 0x347, 0);
- }
- write_phy_reg(pi, 0xe5, 0);
- write_phy_reg(pi, 0xe6, 0);
+ if (pi->bw == WL_CHANSPEC_BW_40) {
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, 0x186 + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[3][j]);
} else {
- write_phy_reg(pi, 0xec, 0);
- }
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, 0x186 + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[5][j]);
+ }
- write_phy_reg(pi, 0x91, 0);
- write_phy_reg(pi, 0x92, 0);
- if (NREV_LT(pi->pubpi.phy_rev, 6)) {
- write_phy_reg(pi, 0x93, 0);
- write_phy_reg(pi, 0x94, 0);
+ if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, 0x2c5 + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[6][j]);
+ }
}
+}
- and_phy_reg(pi, 0xa1, ~3);
+static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
+{
+ int j;
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- write_phy_reg(pi, 0x8f, 0);
- write_phy_reg(pi, 0xa5, 0);
+ if (pi->bw == WL_CHANSPEC_BW_40) {
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, 0x195 + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[4][j]);
} else {
- write_phy_reg(pi, 0xa5, 0);
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, 0x186 + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[3][j]);
}
+}
- if (NREV_IS(pi->pubpi.phy_rev, 2))
- mod_phy_reg(pi, 0xdc, 0x00ff, 0x3b);
- else if (NREV_LT(pi->pubpi.phy_rev, 2))
- mod_phy_reg(pi, 0xdc, 0x00ff, 0x40);
-
- write_phy_reg(pi, 0x203, 32);
- write_phy_reg(pi, 0x201, 32);
-
- if (pi->sh->boardflags2 & BFL2_SKWRKFEM_BRD)
- write_phy_reg(pi, 0x20d, 160);
- else
- write_phy_reg(pi, 0x20d, 184);
-
- write_phy_reg(pi, 0x13a, 200);
-
- write_phy_reg(pi, 0x70, 80);
-
- write_phy_reg(pi, 0x1ff, 48);
+static void
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
+ u8 len)
+{
+ u32 t1_offset, t2_offset;
+ u8 ctr;
+ u8 end_event =
+ NREV_GE(pi->pubpi.phy_rev,
+ 3) ? NPHY_REV3_RFSEQ_CMD_END : NPHY_RFSEQ_CMD_END;
+ u8 end_dly = 1;
- if (NREV_LT(pi->pubpi.phy_rev, 8)) {
- wlc_phy_update_mimoconfig_nphy(pi, pi->n_preamble_override);
- }
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
- wlc_phy_stf_chain_upd_nphy(pi);
+ t1_offset = cmd << 4;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, len, t1_offset, 8,
+ events);
+ t2_offset = t1_offset + 0x080;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, len, t2_offset, 8,
+ dlys);
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
- write_phy_reg(pi, 0x180, 0xaa8);
- write_phy_reg(pi, 0x181, 0x9a4);
+ for (ctr = len; ctr < 16; ctr++) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ t1_offset + ctr, 8, &end_event);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ t2_offset + ctr, 8, &end_dly);
}
- if (PHY_IPA(pi)) {
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (1) << 0);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x298 :
- 0x29c, (0x1ff << 7),
- (pi->nphy_papd_epsilon_offset[core]) << 7);
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+}
- }
+static u16 wlc_phy_read_lpf_bw_ctl_nphy(struct brcms_phy *pi, u16 offset)
+{
+ u16 lpf_bw_ctl_val = 0;
+ u16 rx2tx_lpf_rc_lut_offset = 0;
- wlc_phy_ipa_set_tx_digi_filts_nphy(pi);
+ if (offset == 0) {
+ if (CHSPEC_IS40(pi->radio_chanspec))
+ rx2tx_lpf_rc_lut_offset = 0x159;
+ else
+ rx2tx_lpf_rc_lut_offset = 0x154;
} else {
-
- if (NREV_GE(pi->pubpi.phy_rev, 5)) {
- wlc_phy_extpa_set_tx_digi_filts_nphy(pi);
- }
+ rx2tx_lpf_rc_lut_offset = offset;
}
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ (u32) rx2tx_lpf_rc_lut_offset, 16,
+ &lpf_bw_ctl_val);
- wlc_phy_workarounds_nphy(pi);
-
- wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON);
-
- val = read_phy_reg(pi, 0x01);
- write_phy_reg(pi, 0x01, val | BBCFG_RESETCCA);
- write_phy_reg(pi, 0x01, val & (~BBCFG_RESETCCA));
- wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF);
-
- wlapi_bmac_macphyclk_set(pi->sh->physhim, ON);
-
- wlc_phy_pa_override_nphy(pi, OFF);
- wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX);
- wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
- wlc_phy_pa_override_nphy(pi, ON);
+ lpf_bw_ctl_val = lpf_bw_ctl_val & 0x7;
- wlc_phy_classifier_nphy(pi, 0, 0);
- wlc_phy_clip_det_nphy(pi, 0, clip1_ths);
+ return lpf_bw_ctl_val;
+}
- if (CHSPEC_IS2G(pi->radio_chanspec))
- wlc_phy_bphy_init_nphy(pi);
+static void
+wlc_phy_rfctrl_override_nphy_rev7(struct brcms_phy *pi, u16 field, u16 value,
+ u8 core_mask, u8 off, u8 override_id)
+{
+ u8 core_num;
+ u16 addr = 0, en_addr = 0, val_addr = 0, en_mask = 0, val_mask = 0;
+ u8 val_shift = 0;
- tx_pwr_ctrl_state = pi->nphy_txpwrctrl;
- wlc_phy_txpwrctrl_enable_nphy(pi, PHY_TPC_HW_OFF);
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ en_mask = field;
+ for (core_num = 0; core_num < 2; core_num++) {
+ if (override_id == NPHY_REV7_RFCTRLOVERRIDE_ID0) {
- wlc_phy_txpwr_fixpower_nphy(pi);
+ switch (field) {
+ case (0x1 << 2):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a :
+ 0x7d;
+ val_mask = (0x1 << 1);
+ val_shift = 1;
+ break;
+ case (0x1 << 3):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a :
+ 0x7d;
+ val_mask = (0x1 << 2);
+ val_shift = 2;
+ break;
+ case (0x1 << 4):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a :
+ 0x7d;
+ val_mask = (0x1 << 4);
+ val_shift = 4;
+ break;
+ case (0x1 << 5):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a :
+ 0x7d;
+ val_mask = (0x1 << 5);
+ val_shift = 5;
+ break;
+ case (0x1 << 6):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a :
+ 0x7d;
+ val_mask = (0x1 << 6);
+ val_shift = 6;
+ break;
+ case (0x1 << 7):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a :
+ 0x7d;
+ val_mask = (0x1 << 7);
+ val_shift = 7;
+ break;
+ case (0x1 << 10):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0xf8 :
+ 0xfa;
+ val_mask = (0x7 << 4);
+ val_shift = 4;
+ break;
+ case (0x1 << 11):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7b :
+ 0x7e;
+ val_mask = (0xffff << 0);
+ val_shift = 0;
+ break;
+ case (0x1 << 12):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7c :
+ 0x7f;
+ val_mask = (0xffff << 0);
+ val_shift = 0;
+ break;
+ case (0x3 << 13):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x348 :
+ 0x349;
+ val_mask = (0xff << 0);
+ val_shift = 0;
+ break;
+ case (0x1 << 13):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x348 :
+ 0x349;
+ val_mask = (0xf << 0);
+ val_shift = 0;
+ break;
+ default:
+ addr = 0xffff;
+ break;
+ }
+ } else if (override_id ==
+ NPHY_REV7_RFCTRLOVERRIDE_ID1) {
- wlc_phy_txpwrctrl_idle_tssi_nphy(pi);
+ switch (field) {
+ case (0x1 << 1):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 1);
+ val_shift = 1;
+ break;
+ case (0x1 << 3):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 3);
+ val_shift = 3;
+ break;
+ case (0x1 << 5):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 5);
+ val_shift = 5;
+ break;
+ case (0x1 << 4):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 4);
+ val_shift = 4;
+ break;
+ case (0x1 << 2):
- wlc_phy_txpwrctrl_pwr_setup_nphy(pi);
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 2);
+ val_shift = 2;
+ break;
+ case (0x1 << 7):
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- u32 *tx_pwrctrl_tbl = NULL;
- u16 idx;
- s16 pga_gn = 0;
- s16 pad_gn = 0;
- s32 rfpwr_offset = 0;
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x7 << 8);
+ val_shift = 8;
+ break;
+ case (0x1 << 11):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 14);
+ val_shift = 14;
+ break;
+ case (0x1 << 10):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 13);
+ val_shift = 13;
+ break;
+ case (0x1 << 9):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 12);
+ val_shift = 12;
+ break;
+ case (0x1 << 8):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 11);
+ val_shift = 11;
+ break;
+ case (0x1 << 6):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 6);
+ val_shift = 6;
+ break;
+ case (0x1 << 0):
+ en_addr = (core_num == 0) ? 0x342 :
+ 0x343;
+ val_addr = (core_num == 0) ? 0x340 :
+ 0x341;
+ val_mask = (0x1 << 0);
+ val_shift = 0;
+ break;
+ default:
+ addr = 0xffff;
+ break;
+ }
+ } else if (override_id ==
+ NPHY_REV7_RFCTRLOVERRIDE_ID2) {
- if (PHY_IPA(pi)) {
- tx_pwrctrl_tbl = wlc_phy_get_ipa_gaintbl_nphy(pi);
- } else {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if (NREV_IS(pi->pubpi.phy_rev, 3)) {
- tx_pwrctrl_tbl =
- nphy_tpc_5GHz_txgain_rev3;
- } else if (NREV_IS(pi->pubpi.phy_rev, 4)) {
- tx_pwrctrl_tbl =
- (pi->srom_fem5g.extpagain == 3) ?
- nphy_tpc_5GHz_txgain_HiPwrEPA :
- nphy_tpc_5GHz_txgain_rev4;
- } else {
- tx_pwrctrl_tbl =
- nphy_tpc_5GHz_txgain_rev5;
+ switch (field) {
+ case (0x1 << 3):
+ en_addr = (core_num == 0) ? 0x346 :
+ 0x347;
+ val_addr = (core_num == 0) ? 0x344 :
+ 0x345;
+ val_mask = (0x1 << 3);
+ val_shift = 3;
+ break;
+ case (0x1 << 1):
+ en_addr = (core_num == 0) ? 0x346 :
+ 0x347;
+ val_addr = (core_num == 0) ? 0x344 :
+ 0x345;
+ val_mask = (0x1 << 1);
+ val_shift = 1;
+ break;
+ case (0x1 << 0):
+ en_addr = (core_num == 0) ? 0x346 :
+ 0x347;
+ val_addr = (core_num == 0) ? 0x344 :
+ 0x345;
+ val_mask = (0x1 << 0);
+ val_shift = 0;
+ break;
+ case (0x1 << 2):
+ en_addr = (core_num == 0) ? 0x346 :
+ 0x347;
+ val_addr = (core_num == 0) ? 0x344 :
+ 0x345;
+ val_mask = (0x1 << 2);
+ val_shift = 2;
+ break;
+ case (0x1 << 4):
+ en_addr = (core_num == 0) ? 0x346 :
+ 0x347;
+ val_addr = (core_num == 0) ? 0x344 :
+ 0x345;
+ val_mask = (0x1 << 4);
+ val_shift = 4;
+ break;
+ default:
+ addr = 0xffff;
+ break;
}
+ }
+ if (off) {
+ and_phy_reg(pi, en_addr, ~en_mask);
+ and_phy_reg(pi, val_addr, ~val_mask);
} else {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (pi->pubpi.radiorev == 5) {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_epa_2057rev5;
- } else if (pi->pubpi.radiorev == 3) {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_epa_2057rev3;
- }
- } else {
- if (NREV_GE(pi->pubpi.phy_rev, 5) &&
- (pi->srom_fem2g.extpagain == 3)) {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_HiPwrEPA;
- } else {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_rev3;
- }
+ if ((core_mask == 0)
+ || (core_mask & (1 << core_num))) {
+ or_phy_reg(pi, en_addr, en_mask);
+
+ if (addr != 0xffff)
+ mod_phy_reg(pi, val_addr,
+ val_mask,
+ (value <<
+ val_shift));
}
}
}
+ }
+}
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 128,
- 192, 32, tx_pwrctrl_tbl);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE2TXPWRCTL, 128,
- 192, 32, tx_pwrctrl_tbl);
-
- pi->nphy_gmval = (u16) ((*tx_pwrctrl_tbl >> 16) & 0x7000);
+static void wlc_phy_adjust_lnagaintbl_nphy(struct brcms_phy *pi)
+{
+ uint core;
+ int ctr;
+ s16 gain_delta[2];
+ u8 curr_channel;
+ u16 minmax_gain[2];
+ u16 regval[4];
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
- for (idx = 0; idx < 128; idx++) {
- pga_gn = (tx_pwrctrl_tbl[idx] >> 24) & 0xf;
- pad_gn = (tx_pwrctrl_tbl[idx] >> 19) & 0x1f;
+ if (pi->nphy_gain_boost) {
+ if ((CHSPEC_IS2G(pi->radio_chanspec))) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if ((pi->pubpi.radiorev == 3) ||
- (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
- rfpwr_offset = (s16)
- nphy_papd_padgain_dlt_2g_2057rev3n4
- [pad_gn];
- } else if (pi->pubpi.radiorev == 5) {
- rfpwr_offset = (s16)
- nphy_papd_padgain_dlt_2g_2057rev5
- [pad_gn];
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev ==
- 8)) {
- rfpwr_offset = (s16)
- nphy_papd_padgain_dlt_2g_2057rev7
- [pad_gn];
- }
- } else {
- if ((pi->pubpi.radiorev == 3) ||
- (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
- rfpwr_offset = (s16)
- nphy_papd_pgagain_dlt_5g_2057
- [pga_gn];
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev ==
- 8)) {
- rfpwr_offset = (s16)
- nphy_papd_pgagain_dlt_5g_2057rev7
- [pga_gn];
- }
- }
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_CORE1TXPWRCTL,
- 1, 576 + idx, 32,
- &rfpwr_offset);
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_CORE2TXPWRCTL,
- 1, 576 + idx, 32,
- &rfpwr_offset);
- }
+ gain_delta[0] = 6;
+ gain_delta[1] = 6;
} else {
- for (idx = 0; idx < 128; idx++) {
- pga_gn = (tx_pwrctrl_tbl[idx] >> 24) & 0xf;
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- rfpwr_offset = (s16)
- nphy_papd_pga_gain_delta_ipa_2g
- [pga_gn];
- } else {
- rfpwr_offset = (s16)
- nphy_papd_pga_gain_delta_ipa_5g
- [pga_gn];
- }
-
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_CORE1TXPWRCTL,
- 1, 576 + idx, 32,
- &rfpwr_offset);
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_CORE2TXPWRCTL,
- 1, 576 + idx, 32,
- &rfpwr_offset);
- }
-
+ curr_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
+ gain_delta[0] =
+ (s16)
+ PHY_HW_ROUND(((nphy_lnagain_est0[0] *
+ curr_channel) +
+ nphy_lnagain_est0[1]), 13);
+ gain_delta[1] =
+ (s16)
+ PHY_HW_ROUND(((nphy_lnagain_est1[0] *
+ curr_channel) +
+ nphy_lnagain_est1[1]), 13);
}
} else {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 128,
- 192, 32, nphy_tpc_txgain);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE2TXPWRCTL, 128,
- 192, 32, nphy_tpc_txgain);
+ gain_delta[0] = 0;
+ gain_delta[1] = 0;
}
- if (pi->sh->phyrxchain != 0x3) {
- wlc_phy_rxcore_setstate_nphy((struct brcms_phy_pub *) pi,
- pi->sh->phyrxchain);
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+ if (pi->nphy_elna_gain_config) {
+
+ regval[0] = nphy_def_lnagains[2] + gain_delta[core];
+ regval[1] = nphy_def_lnagains[3] + gain_delta[core];
+ regval[2] = nphy_def_lnagains[3] + gain_delta[core];
+ regval[3] = nphy_def_lnagains[3] + gain_delta[core];
+ } else {
+ for (ctr = 0; ctr < 4; ctr++)
+ regval[ctr] =
+ nphy_def_lnagains[ctr] +
+ gain_delta[core];
+ }
+ wlc_phy_table_write_nphy(pi, core, 4, 8, 16, regval);
+
+ minmax_gain[core] =
+ (u16) (nphy_def_lnagains[2] + gain_delta[core] + 4);
}
- if (PHY_PERICAL_MPHASE_PENDING(pi)) {
- wlc_phy_cal_perical_mphase_restart(pi);
+ mod_phy_reg(pi, 0x1e, (0xff << 0), (minmax_gain[0] << 0));
+ mod_phy_reg(pi, 0x34, (0xff << 0), (minmax_gain[1] << 0));
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+}
+
+static void
+wlc_phy_war_force_trsw_to_R_cliplo_nphy(struct brcms_phy *pi, u8 core)
+{
+ if (core == PHY_CORE_0) {
+ write_phy_reg(pi, 0x38, 0x4);
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ write_phy_reg(pi, 0x37, 0x0060);
+ else
+ write_phy_reg(pi, 0x37, 0x1080);
+ } else if (core == PHY_CORE_1) {
+ write_phy_reg(pi, 0x2ae, 0x4);
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ write_phy_reg(pi, 0x2ad, 0x0060);
+ else
+ write_phy_reg(pi, 0x2ad, 0x1080);
}
+}
- if (!NORADIO_ENAB(pi->pubpi)) {
- bool do_rssi_cal = false;
+static void wlc_phy_war_txchain_upd_nphy(struct brcms_phy *pi, u8 txchain)
+{
+ u8 txchain0, txchain1;
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- do_rssi_cal = (CHSPEC_IS2G(pi->radio_chanspec)) ?
- (pi->nphy_rssical_chanspec_2G == 0) :
- (pi->nphy_rssical_chanspec_5G == 0);
+ txchain0 = txchain & 0x1;
+ txchain1 = (txchain & 0x2) >> 1;
+ if (!txchain0)
+ wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_0);
- if (do_rssi_cal) {
- wlc_phy_rssi_cal_nphy(pi);
- } else {
- wlc_phy_restore_rssical_nphy(pi);
- }
- } else {
- wlc_phy_rssi_cal_nphy(pi);
- }
+ if (!txchain1)
+ wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_1);
+}
- if (!SCAN_RM_IN_PROGRESS(pi)) {
- do_nphy_cal = (CHSPEC_IS2G(pi->radio_chanspec)) ?
- (pi->nphy_iqcal_chanspec_2G == 0) :
- (pi->nphy_iqcal_chanspec_5G == 0);
- }
+static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
+{
+ s8 lna1_gain_db[] = { 8, 13, 17, 22 };
+ s8 lna2_gain_db[] = { -2, 7, 11, 15 };
+ s8 tia_gain_db[] = { -4, -1, 2, 5, 5, 5, 5, 5, 5, 5 };
+ s8 tia_gainbits[] = {
+ 0x0, 0x01, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
- if (!pi->do_initcal)
- do_nphy_cal = false;
+ mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
+ mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
- if (do_nphy_cal) {
+ mod_phy_reg(pi, 0x289, (0xff << 0), (0x46 << 0));
- target_gain = wlc_phy_get_tx_gain_nphy(pi);
+ mod_phy_reg(pi, 0x283, (0xff << 0), (0x3c << 0));
+ mod_phy_reg(pi, 0x280, (0xff << 0), (0x3c << 0));
- if (pi->antsel_type == ANTSEL_2x3)
- wlc_phy_antsel_init((struct brcms_phy_pub *) pi,
- true);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x8, 8,
+ lna1_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x8, 8,
+ lna1_gain_db);
- if (pi->nphy_perical != PHY_PERICAL_MPHASE) {
- wlc_phy_rssi_cal_nphy(pi);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x10, 8,
+ lna2_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x10, 8,
+ lna2_gain_db);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- pi->nphy_cal_orig_pwr_idx[0] =
- pi->nphy_txpwrindex[PHY_CORE_0].
- index_internal;
- pi->nphy_cal_orig_pwr_idx[1] =
- pi->nphy_txpwrindex[PHY_CORE_1].
- index_internal;
-
- wlc_phy_precal_txgain_nphy(pi);
- target_gain =
- wlc_phy_get_tx_gain_nphy(pi);
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 10, 0x20, 8,
+ tia_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 10, 0x20, 8,
+ tia_gain_db);
- if (wlc_phy_cal_txiqlo_nphy
- (pi, target_gain, true, false) == 0) {
- if (wlc_phy_cal_rxiq_nphy
- (pi, target_gain, 2,
- false) == 0) {
- wlc_phy_savecal_nphy(pi);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 10, 0x20, 8,
+ tia_gainbits);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 10, 0x20, 8,
+ tia_gainbits);
- }
- }
- } else if (pi->mphase_cal_phase_id ==
- MPHASE_CAL_STATE_IDLE) {
+ write_phy_reg(pi, 0x37, 0x74);
+ write_phy_reg(pi, 0x2ad, 0x74);
+ write_phy_reg(pi, 0x38, 0x18);
+ write_phy_reg(pi, 0x2ae, 0x18);
- wlc_phy_cal_perical((struct brcms_phy_pub *) pi,
- PHY_PERICAL_PHYINIT);
- }
- } else {
- wlc_phy_restorecal_nphy(pi);
- }
- }
+ write_phy_reg(pi, 0x2b, 0xe8);
+ write_phy_reg(pi, 0x41, 0xe8);
- wlc_phy_txpwrctrl_coeff_setup_nphy(pi);
+ if (CHSPEC_IS20(pi->radio_chanspec)) {
- wlc_phy_txpwrctrl_enable_nphy(pi, tx_pwr_ctrl_state);
+ mod_phy_reg(pi, 0x300, (0x3f << 0), (0x12 << 0));
+ mod_phy_reg(pi, 0x301, (0x3f << 0), (0x12 << 0));
+ } else {
- wlc_phy_nphy_tkip_rifs_war(pi, pi->sh->_rifs_phy);
+ mod_phy_reg(pi, 0x300, (0x3f << 0), (0x10 << 0));
+ mod_phy_reg(pi, 0x301, (0x3f << 0), (0x10 << 0));
+ }
+}
- if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LE(pi->pubpi.phy_rev, 6))
+static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+{
+ u16 currband;
+ s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+ s8 *lna1_gain_db = NULL;
+ s8 *lna1_gain_db_2 = NULL;
+ s8 *lna2_gain_db = NULL;
+ s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+ s8 *tia_gain_db;
+ s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+ s8 *tia_gainbits;
+ u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+ u16 *rfseq_init_gain;
+ u16 init_gaincode;
+ u16 clip1hi_gaincode;
+ u16 clip1md_gaincode = 0;
+ u16 clip1md_gaincode_B;
+ u16 clip1lo_gaincode;
+ u16 clip1lo_gaincode_B;
+ u8 crsminl_th = 0;
+ u8 crsminu_th;
+ u16 nbclip_th = 0;
+ u8 w1clip_th;
+ u16 freq;
+ s8 nvar_baseline_offset0 = 0, nvar_baseline_offset1 = 0;
+ u8 chg_nbclip_th = 0;
- write_phy_reg(pi, 0x70, 50);
+ mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
+ mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
- wlc_phy_txlpfbw_nphy(pi);
+ currband = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
+ if (currband == 0) {
- wlc_phy_spurwar_nphy(pi);
+ lna1_gain_db = lna1G_gain_db_rev7;
-}
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 8, 8,
+ lna1_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 8, 8,
+ lna1_gain_db);
-static void wlc_phy_update_mimoconfig_nphy(struct brcms_phy *pi, s32 preamble)
-{
- bool gf_preamble = false;
- u16 val;
+ mod_phy_reg(pi, 0x283, (0xff << 0), (0x40 << 0));
- if (preamble == BRCMS_N_PREAMBLE_GF)
- gf_preamble = true;
+ if (CHSPEC_IS40(pi->radio_chanspec)) {
+ mod_phy_reg(pi, 0x280, (0xff << 0), (0x3e << 0));
+ mod_phy_reg(pi, 0x283, (0xff << 0), (0x3e << 0));
+ }
- val = read_phy_reg(pi, 0xed);
+ mod_phy_reg(pi, 0x289, (0xff << 0), (0x46 << 0));
- val |= RX_GF_MM_AUTO;
- val &= ~RX_GF_OR_MM;
- if (gf_preamble)
- val |= RX_GF_OR_MM;
+ if (CHSPEC_IS20(pi->radio_chanspec)) {
+ mod_phy_reg(pi, 0x300, (0x3f << 0), (13 << 0));
+ mod_phy_reg(pi, 0x301, (0x3f << 0), (13 << 0));
+ }
+ } else {
- write_phy_reg(pi, 0xed, val);
-}
+ init_gaincode = 0x9e;
+ clip1hi_gaincode = 0x9e;
+ clip1md_gaincode_B = 0x24;
+ clip1lo_gaincode = 0x8a;
+ clip1lo_gaincode_B = 8;
+ rfseq_init_gain = rfseqA_init_gain_rev7;
-static void wlc_phy_resetcca_nphy(struct brcms_phy *pi)
-{
- u16 val;
+ tia_gain_db = tiaA_gain_db_rev7;
+ tia_gainbits = tiaA_gainbits_rev7;
- wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON);
+ freq = CHAN5G_FREQ(CHSPEC_CHANNEL(pi->radio_chanspec));
+ if (CHSPEC_IS20(pi->radio_chanspec)) {
- val = read_phy_reg(pi, 0x01);
- write_phy_reg(pi, 0x01, val | BBCFG_RESETCCA);
- udelay(1);
- write_phy_reg(pi, 0x01, val & (~BBCFG_RESETCCA));
+ w1clip_th = 25;
+ clip1md_gaincode = 0x82;
- wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF);
+ if ((freq <= 5080) || (freq == 5825)) {
- wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
-}
+ s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+ s8 lna1A_gain_db_2_rev7[] = {
+ 11, 17, 22, 25};
+ s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
-void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en)
-{
- u16 rfctrlintc_override_val;
+ crsminu_th = 0x3e;
+ lna1_gain_db = lna1A_gain_db_rev7;
+ lna1_gain_db_2 = lna1A_gain_db_2_rev7;
+ lna2_gain_db = lna2A_gain_db_rev7;
+ } else if ((freq >= 5500) && (freq <= 5700)) {
- if (!en) {
+ s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+ s8 lna1A_gain_db_2_rev7[] = {
+ 12, 18, 22, 26};
+ s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
- pi->rfctrlIntc1_save = read_phy_reg(pi, 0x91);
- pi->rfctrlIntc2_save = read_phy_reg(pi, 0x92);
+ crsminu_th = 0x45;
+ clip1md_gaincode_B = 0x14;
+ nbclip_th = 0xff;
+ chg_nbclip_th = 1;
+ lna1_gain_db = lna1A_gain_db_rev7;
+ lna1_gain_db_2 = lna1A_gain_db_2_rev7;
+ lna2_gain_db = lna2A_gain_db_rev7;
+ } else {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- rfctrlintc_override_val = 0x1480;
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- rfctrlintc_override_val =
- CHSPEC_IS5G(pi->radio_chanspec) ? 0x600 : 0x480;
+ s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+ s8 lna1A_gain_db_2_rev7[] = {
+ 12, 18, 22, 26};
+ s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+
+ crsminu_th = 0x41;
+ lna1_gain_db = lna1A_gain_db_rev7;
+ lna1_gain_db_2 = lna1A_gain_db_2_rev7;
+ lna2_gain_db = lna2A_gain_db_rev7;
+ }
+
+ if (freq <= 4920) {
+ nvar_baseline_offset0 = 5;
+ nvar_baseline_offset1 = 5;
+ } else if ((freq > 4920) && (freq <= 5320)) {
+ nvar_baseline_offset0 = 3;
+ nvar_baseline_offset1 = 5;
+ } else if ((freq > 5320) && (freq <= 5700)) {
+ nvar_baseline_offset0 = 3;
+ nvar_baseline_offset1 = 2;
+ } else {
+ nvar_baseline_offset0 = 4;
+ nvar_baseline_offset1 = 0;
+ }
} else {
- rfctrlintc_override_val =
- CHSPEC_IS5G(pi->radio_chanspec) ? 0x180 : 0x120;
+
+ crsminu_th = 0x3a;
+ crsminl_th = 0x3a;
+ w1clip_th = 20;
+
+ if ((freq >= 4920) && (freq <= 5320)) {
+ nvar_baseline_offset0 = 4;
+ nvar_baseline_offset1 = 5;
+ } else if ((freq > 5320) && (freq <= 5550)) {
+ nvar_baseline_offset0 = 4;
+ nvar_baseline_offset1 = 2;
+ } else {
+ nvar_baseline_offset0 = 5;
+ nvar_baseline_offset1 = 3;
+ }
}
- write_phy_reg(pi, 0x91, rfctrlintc_override_val);
- write_phy_reg(pi, 0x92, rfctrlintc_override_val);
- } else {
+ write_phy_reg(pi, 0x20, init_gaincode);
+ write_phy_reg(pi, 0x2a7, init_gaincode);
- write_phy_reg(pi, 0x91, pi->rfctrlIntc1_save);
- write_phy_reg(pi, 0x92, pi->rfctrlIntc2_save);
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
+ pi->pubpi.phy_corenum, 0x106, 16,
+ rfseq_init_gain);
-}
+ write_phy_reg(pi, 0x22, clip1hi_gaincode);
+ write_phy_reg(pi, 0x2a9, clip1hi_gaincode);
-void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi)
-{
+ write_phy_reg(pi, 0x36, clip1md_gaincode_B);
+ write_phy_reg(pi, 0x2ac, clip1md_gaincode_B);
- u16 txrx_chain =
- (NPHY_RfseqCoreActv_TxRxChain0 | NPHY_RfseqCoreActv_TxRxChain1);
- bool CoreActv_override = false;
+ write_phy_reg(pi, 0x37, clip1lo_gaincode);
+ write_phy_reg(pi, 0x2ad, clip1lo_gaincode);
+ write_phy_reg(pi, 0x38, clip1lo_gaincode_B);
+ write_phy_reg(pi, 0x2ae, clip1lo_gaincode_B);
- if (pi->nphy_txrx_chain == BRCMS_N_TXRX_CHAIN0) {
- txrx_chain = NPHY_RfseqCoreActv_TxRxChain0;
- CoreActv_override = true;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 10, 0x20, 8,
+ tia_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 10, 0x20, 8,
+ tia_gain_db);
- if (NREV_LE(pi->pubpi.phy_rev, 2)) {
- and_phy_reg(pi, 0xa0, ~0x20);
- }
- } else if (pi->nphy_txrx_chain == BRCMS_N_TXRX_CHAIN1) {
- txrx_chain = NPHY_RfseqCoreActv_TxRxChain1;
- CoreActv_override = true;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 10, 0x20, 8,
+ tia_gainbits);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 10, 0x20, 8,
+ tia_gainbits);
- if (NREV_LE(pi->pubpi.phy_rev, 2)) {
- or_phy_reg(pi, 0xa0, 0x20);
+ mod_phy_reg(pi, 0x283, (0xff << 0), (crsminu_th << 0));
+
+ if (chg_nbclip_th == 1) {
+ write_phy_reg(pi, 0x2b, nbclip_th);
+ write_phy_reg(pi, 0x41, nbclip_th);
}
- }
- mod_phy_reg(pi, 0xa2, ((0xf << 0) | (0xf << 4)), txrx_chain);
+ mod_phy_reg(pi, 0x300, (0x3f << 0), (w1clip_th << 0));
+ mod_phy_reg(pi, 0x301, (0x3f << 0), (w1clip_th << 0));
- if (CoreActv_override) {
+ mod_phy_reg(pi, 0x2e4,
+ (0x3f << 0), (nvar_baseline_offset0 << 0));
- pi->nphy_perical = PHY_PERICAL_DISABLE;
- or_phy_reg(pi, 0xa1, NPHY_RfseqMode_CoreActv_override);
- } else {
- pi->nphy_perical = PHY_PERICAL_MPHASE;
- and_phy_reg(pi, 0xa1, ~NPHY_RfseqMode_CoreActv_override);
+ mod_phy_reg(pi, 0x2e4,
+ (0x3f << 6), (nvar_baseline_offset1 << 6));
+
+ if (CHSPEC_IS20(pi->radio_chanspec)) {
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 8, 8,
+ lna1_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 8, 8,
+ lna1_gain_db_2);
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x10,
+ 8, lna2_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x10,
+ 8, lna2_gain_db);
+
+ write_phy_reg(pi, 0x24, clip1md_gaincode);
+ write_phy_reg(pi, 0x2ab, clip1md_gaincode);
+ } else {
+ mod_phy_reg(pi, 0x280, (0xff << 0), (crsminl_th << 0));
+ }
}
}
-void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
+static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
{
- u16 regval;
- u16 tbl_buf[16];
- uint i;
- struct brcms_phy *pi = (struct brcms_phy *) pih;
- u16 tbl_opcode;
- bool suspend;
+ u16 w1th, hpf_code, currband;
+ int ctr;
+ u8 rfseq_updategainu_events[] = {
+ NPHY_RFSEQ_CMD_RX_GAIN,
+ NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
+ NPHY_RFSEQ_CMD_SET_HPF_BW
+ };
+ u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+ s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+ s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+ s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+ s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+ s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+ s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+ s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+ s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+ s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+ s8 *lna1_gain_db = NULL;
+ s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+ s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+ s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+ s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+ s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+ s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+ s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+ s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+ s8 *lna2_gain_db = NULL;
+ s8 tiaG_gain_db[] = {
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
+ s8 tiaA_gain_db[] = {
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
+ s8 tiaA_gain_db_rev4[] = {
+ 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+ s8 tiaA_gain_db_rev5[] = {
+ 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+ s8 tiaA_gain_db_rev6[] = {
+ 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+ s8 *tia_gain_db;
+ s8 tiaG_gainbits[] = {
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
+ s8 tiaA_gainbits[] = {
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
+ s8 tiaA_gainbits_rev4[] = {
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+ s8 tiaA_gainbits_rev5[] = {
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+ s8 tiaA_gainbits_rev6[] = {
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+ s8 *tia_gainbits;
+ s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+ s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+ u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+ u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+ u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+ u16 rfseqG_init_gain_rev5_elna[] = {
+ 0x013f, 0x013f, 0x013f, 0x013f };
+ u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+ u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+ u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+ u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+ u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+ u16 rfseqA_init_gain_rev4_elna[] = {
+ 0x314f, 0x314f, 0x314f, 0x314f };
+ u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+ u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+ u16 *rfseq_init_gain;
+ u16 initG_gaincode = 0x627e;
+ u16 initG_gaincode_rev4 = 0x527e;
+ u16 initG_gaincode_rev5 = 0x427e;
+ u16 initG_gaincode_rev5_elna = 0x027e;
+ u16 initG_gaincode_rev6 = 0x527e;
+ u16 initG_gaincode_rev6_224B0 = 0x427e;
+ u16 initG_gaincode_rev6_elna = 0x127e;
+ u16 initA_gaincode = 0x52de;
+ u16 initA_gaincode_rev4 = 0x629e;
+ u16 initA_gaincode_rev4_elna = 0x329e;
+ u16 initA_gaincode_rev5 = 0x729e;
+ u16 initA_gaincode_rev6 = 0x729e;
+ u16 init_gaincode;
+ u16 clip1hiG_gaincode = 0x107e;
+ u16 clip1hiG_gaincode_rev4 = 0x007e;
+ u16 clip1hiG_gaincode_rev5 = 0x1076;
+ u16 clip1hiG_gaincode_rev6 = 0x007e;
+ u16 clip1hiA_gaincode = 0x00de;
+ u16 clip1hiA_gaincode_rev4 = 0x029e;
+ u16 clip1hiA_gaincode_rev5 = 0x029e;
+ u16 clip1hiA_gaincode_rev6 = 0x029e;
+ u16 clip1hi_gaincode;
+ u16 clip1mdG_gaincode = 0x0066;
+ u16 clip1mdA_gaincode = 0x00ca;
+ u16 clip1mdA_gaincode_rev4 = 0x1084;
+ u16 clip1mdA_gaincode_rev5 = 0x2084;
+ u16 clip1mdA_gaincode_rev6 = 0x2084;
+ u16 clip1md_gaincode = 0;
+ u16 clip1loG_gaincode = 0x0074;
+ u16 clip1loG_gaincode_rev5[] = {
+ 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
+ };
+ u16 clip1loG_gaincode_rev6[] = {
+ 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
+ };
+ u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
+ u16 clip1loA_gaincode = 0x00cc;
+ u16 clip1loA_gaincode_rev4 = 0x0086;
+ u16 clip1loA_gaincode_rev5 = 0x2086;
+ u16 clip1loA_gaincode_rev6 = 0x2086;
+ u16 clip1lo_gaincode;
+ u8 crsminG_th = 0x18;
+ u8 crsminG_th_rev5 = 0x18;
+ u8 crsminG_th_rev6 = 0x18;
+ u8 crsminA_th = 0x1e;
+ u8 crsminA_th_rev4 = 0x24;
+ u8 crsminA_th_rev5 = 0x24;
+ u8 crsminA_th_rev6 = 0x24;
+ u8 crsmin_th;
+ u8 crsminlG_th = 0x18;
+ u8 crsminlG_th_rev5 = 0x18;
+ u8 crsminlG_th_rev6 = 0x18;
+ u8 crsminlA_th = 0x1e;
+ u8 crsminlA_th_rev4 = 0x24;
+ u8 crsminlA_th_rev5 = 0x24;
+ u8 crsminlA_th_rev6 = 0x24;
+ u8 crsminl_th = 0;
+ u8 crsminuG_th = 0x18;
+ u8 crsminuG_th_rev5 = 0x18;
+ u8 crsminuG_th_rev6 = 0x18;
+ u8 crsminuA_th = 0x1e;
+ u8 crsminuA_th_rev4 = 0x24;
+ u8 crsminuA_th_rev5 = 0x24;
+ u8 crsminuA_th_rev6 = 0x24;
+ u8 crsminuA_th_rev6_224B0 = 0x2d;
+ u8 crsminu_th;
+ u16 nbclipG_th = 0x20d;
+ u16 nbclipG_th_rev4 = 0x1a1;
+ u16 nbclipG_th_rev5 = 0x1d0;
+ u16 nbclipG_th_rev6 = 0x1d0;
+ u16 nbclipA_th = 0x1a1;
+ u16 nbclipA_th_rev4 = 0x107;
+ u16 nbclipA_th_rev5 = 0x0a9;
+ u16 nbclipA_th_rev6 = 0x0f0;
+ u16 nbclip_th = 0;
+ u8 w1clipG_th = 5;
+ u8 w1clipG_th_rev5 = 9;
+ u8 w1clipG_th_rev6 = 5;
+ u8 w1clipA_th = 25, w1clip_th;
+ u8 rssi_gain_default = 0x50;
+ u8 rssiG_gain_rev6_224B0 = 0x50;
+ u8 rssiA_gain_rev5 = 0x90;
+ u8 rssiA_gain_rev6 = 0x90;
+ u8 rssi_gain;
+ u16 regval[21];
+ u8 triso;
- pi->sh->phyrxchain = rxcore_bitmask;
+ triso = (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.triso :
+ pi->srom_fem2g.triso;
- if (!pi->sh->clk)
- return;
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (pi->pubpi.radiorev == 5) {
+ wlc_phy_workarounds_nphy_gainctrl_2057_rev5(pi);
+ } else if (pi->pubpi.radiorev == 7) {
+ wlc_phy_workarounds_nphy_gainctrl_2057_rev6(pi);
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
- if (!suspend)
- wlapi_suspend_mac_and_wait(pi->sh->physhim);
+ mod_phy_reg(pi, 0x283, (0xff << 0), (0x44 << 0));
+ mod_phy_reg(pi, 0x280, (0xff << 0), (0x44 << 0));
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
+ } else if ((pi->pubpi.radiorev == 3)
+ || (pi->pubpi.radiorev == 8)) {
+ wlc_phy_workarounds_nphy_gainctrl_2057_rev6(pi);
- regval = read_phy_reg(pi, 0xa2);
- regval &= ~(0xf << 4);
- regval |= ((u16) (rxcore_bitmask & 0x3)) << 4;
- write_phy_reg(pi, 0xa2, regval);
+ if (pi->pubpi.radiorev == 8) {
+ mod_phy_reg(pi, 0x283,
+ (0xff << 0), (0x44 << 0));
+ mod_phy_reg(pi, 0x280,
+ (0xff << 0), (0x44 << 0));
+ }
+ } else {
+ wlc_phy_workarounds_nphy_gainctrl_2057_rev6(pi);
+ }
+ } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if ((rxcore_bitmask & 0x3) != 0x3) {
+ mod_phy_reg(pi, 0xa0, (0x1 << 6), (1 << 6));
- write_phy_reg(pi, 0x20e, 1);
+ mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
+ mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if (pi->rx2tx_biasentry == -1) {
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ,
- ARRAY_SIZE(tbl_buf), 80,
- 16, tbl_buf);
+ currband =
+ read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
+ if (currband == 0) {
+ if (NREV_GE(pi->pubpi.phy_rev, 6)) {
+ if (pi->pubpi.radiorev == 11) {
+ lna1_gain_db = lna1G_gain_db_rev6_224B0;
+ lna2_gain_db = lna2G_gain_db_rev6_224B0;
+ rfseq_init_gain =
+ rfseqG_init_gain_rev6_224B0;
+ init_gaincode =
+ initG_gaincode_rev6_224B0;
+ clip1hi_gaincode =
+ clip1hiG_gaincode_rev6;
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6_224B0;
+ nbclip_th = nbclipG_th_rev6;
+ w1clip_th = w1clipG_th_rev6;
+ crsmin_th = crsminG_th_rev6;
+ crsminl_th = crsminlG_th_rev6;
+ crsminu_th = crsminuG_th_rev6;
+ rssi_gain = rssiG_gain_rev6_224B0;
+ } else {
+ lna1_gain_db = lna1G_gain_db_rev6;
+ lna2_gain_db = lna2G_gain_db_rev6;
+ if (pi->sh->boardflags & BFL_EXTLNA) {
- for (i = 0; i < ARRAY_SIZE(tbl_buf); i++) {
- if (tbl_buf[i] ==
- NPHY_REV3_RFSEQ_CMD_CLR_RXRX_BIAS) {
+ rfseq_init_gain =
+ rfseqG_init_gain_rev6_elna;
+ init_gaincode =
+ initG_gaincode_rev6_elna;
+ } else {
+ rfseq_init_gain =
+ rfseqG_init_gain_rev6;
+ init_gaincode =
+ initG_gaincode_rev6;
+ }
+ clip1hi_gaincode =
+ clip1hiG_gaincode_rev6;
+ switch (triso) {
+ case 0:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [0];
+ break;
+ case 1:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [1];
+ break;
+ case 2:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [2];
+ break;
+ case 3:
+ default:
- pi->rx2tx_biasentry = (u8) i;
- tbl_opcode =
- NPHY_REV3_RFSEQ_CMD_NOP;
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_RFSEQ,
- 1, i,
- 16,
- &tbl_opcode);
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [3];
break;
- } else if (tbl_buf[i] ==
- NPHY_REV3_RFSEQ_CMD_END) {
+ case 4:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [4];
+ break;
+ case 5:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [5];
+ break;
+ case 6:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [6];
+ break;
+ case 7:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev6
+ [7];
break;
}
+ nbclip_th = nbclipG_th_rev6;
+ w1clip_th = w1clipG_th_rev6;
+ crsmin_th = crsminG_th_rev6;
+ crsminl_th = crsminlG_th_rev6;
+ crsminu_th = crsminuG_th_rev6;
+ rssi_gain = rssi_gain_default;
+ }
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
+ lna1_gain_db = lna1G_gain_db_rev5;
+ lna2_gain_db = lna2G_gain_db_rev5;
+ if (pi->sh->boardflags & BFL_EXTLNA) {
+
+ rfseq_init_gain =
+ rfseqG_init_gain_rev5_elna;
+ init_gaincode =
+ initG_gaincode_rev5_elna;
+ } else {
+ rfseq_init_gain = rfseqG_init_gain_rev5;
+ init_gaincode = initG_gaincode_rev5;
}
+ clip1hi_gaincode = clip1hiG_gaincode_rev5;
+ switch (triso) {
+ case 0:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[0];
+ break;
+ case 1:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[1];
+ break;
+ case 2:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[2];
+ break;
+ case 3:
+
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[3];
+ break;
+ case 4:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[4];
+ break;
+ case 5:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[5];
+ break;
+ case 6:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[6];
+ break;
+ case 7:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[7];
+ break;
+ default:
+ clip1lo_gaincode =
+ clip1loG_gaincode_rev5[3];
+ break;
+ }
+ nbclip_th = nbclipG_th_rev5;
+ w1clip_th = w1clipG_th_rev5;
+ crsmin_th = crsminG_th_rev5;
+ crsminl_th = crsminlG_th_rev5;
+ crsminu_th = crsminuG_th_rev5;
+ rssi_gain = rssi_gain_default;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 4)) {
+ lna1_gain_db = lna1G_gain_db_rev4;
+ lna2_gain_db = lna2G_gain_db;
+ rfseq_init_gain = rfseqG_init_gain_rev4;
+ init_gaincode = initG_gaincode_rev4;
+ clip1hi_gaincode = clip1hiG_gaincode_rev4;
+ clip1lo_gaincode = clip1loG_gaincode;
+ nbclip_th = nbclipG_th_rev4;
+ w1clip_th = w1clipG_th;
+ crsmin_th = crsminG_th;
+ crsminl_th = crsminlG_th;
+ crsminu_th = crsminuG_th;
+ rssi_gain = rssi_gain_default;
+ } else {
+ lna1_gain_db = lna1G_gain_db;
+ lna2_gain_db = lna2G_gain_db;
+ rfseq_init_gain = rfseqG_init_gain;
+ init_gaincode = initG_gaincode;
+ clip1hi_gaincode = clip1hiG_gaincode;
+ clip1lo_gaincode = clip1loG_gaincode;
+ nbclip_th = nbclipG_th;
+ w1clip_th = w1clipG_th;
+ crsmin_th = crsminG_th;
+ crsminl_th = crsminlG_th;
+ crsminu_th = crsminuG_th;
+ rssi_gain = rssi_gain_default;
}
- }
- } else {
+ tia_gain_db = tiaG_gain_db;
+ tia_gainbits = tiaG_gainbits;
+ clip1md_gaincode = clip1mdG_gaincode;
+ } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 6)) {
+ lna1_gain_db = lna1A_gain_db_rev6;
+ lna2_gain_db = lna2A_gain_db_rev6;
+ tia_gain_db = tiaA_gain_db_rev6;
+ tia_gainbits = tiaA_gainbits_rev6;
+ rfseq_init_gain = rfseqA_init_gain_rev6;
+ init_gaincode = initA_gaincode_rev6;
+ clip1hi_gaincode = clip1hiA_gaincode_rev6;
+ clip1md_gaincode = clip1mdA_gaincode_rev6;
+ clip1lo_gaincode = clip1loA_gaincode_rev6;
+ crsmin_th = crsminA_th_rev6;
+ crsminl_th = crsminlA_th_rev6;
+ if ((pi->pubpi.radiorev == 11) &&
+ (CHSPEC_IS40(pi->radio_chanspec) == 0))
+ crsminu_th = crsminuA_th_rev6_224B0;
+ else
+ crsminu_th = crsminuA_th_rev6;
- write_phy_reg(pi, 0x20e, 30);
+ nbclip_th = nbclipA_th_rev6;
+ rssi_gain = rssiA_gain_rev6;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
+ lna1_gain_db = lna1A_gain_db_rev5;
+ lna2_gain_db = lna2A_gain_db_rev5;
+ tia_gain_db = tiaA_gain_db_rev5;
+ tia_gainbits = tiaA_gainbits_rev5;
+ rfseq_init_gain = rfseqA_init_gain_rev5;
+ init_gaincode = initA_gaincode_rev5;
+ clip1hi_gaincode = clip1hiA_gaincode_rev5;
+ clip1md_gaincode = clip1mdA_gaincode_rev5;
+ clip1lo_gaincode = clip1loA_gaincode_rev5;
+ crsmin_th = crsminA_th_rev5;
+ crsminl_th = crsminlA_th_rev5;
+ crsminu_th = crsminuA_th_rev5;
+ nbclip_th = nbclipA_th_rev5;
+ rssi_gain = rssiA_gain_rev5;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 4)) {
+ lna1_gain_db = lna1A_gain_db_rev4;
+ lna2_gain_db = lna2A_gain_db_rev4;
+ tia_gain_db = tiaA_gain_db_rev4;
+ tia_gainbits = tiaA_gainbits_rev4;
+ if (pi->sh->boardflags & BFL_EXTLNA_5GHz) {
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if (pi->rx2tx_biasentry != -1) {
- tbl_opcode = NPHY_REV3_RFSEQ_CMD_CLR_RXRX_BIAS;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1, pi->rx2tx_biasentry,
- 16, &tbl_opcode);
- pi->rx2tx_biasentry = -1;
+ rfseq_init_gain =
+ rfseqA_init_gain_rev4_elna;
+ init_gaincode =
+ initA_gaincode_rev4_elna;
+ } else {
+ rfseq_init_gain = rfseqA_init_gain_rev4;
+ init_gaincode = initA_gaincode_rev4;
+ }
+ clip1hi_gaincode = clip1hiA_gaincode_rev4;
+ clip1md_gaincode = clip1mdA_gaincode_rev4;
+ clip1lo_gaincode = clip1loA_gaincode_rev4;
+ crsmin_th = crsminA_th_rev4;
+ crsminl_th = crsminlA_th_rev4;
+ crsminu_th = crsminuA_th_rev4;
+ nbclip_th = nbclipA_th_rev4;
+ rssi_gain = rssi_gain_default;
+ } else {
+ lna1_gain_db = lna1A_gain_db;
+ lna2_gain_db = lna2A_gain_db;
+ tia_gain_db = tiaA_gain_db;
+ tia_gainbits = tiaA_gainbits;
+ rfseq_init_gain = rfseqA_init_gain;
+ init_gaincode = initA_gaincode;
+ clip1hi_gaincode = clip1hiA_gaincode;
+ clip1md_gaincode = clip1mdA_gaincode;
+ clip1lo_gaincode = clip1loA_gaincode;
+ crsmin_th = crsminA_th;
+ crsminl_th = crsminlA_th;
+ crsminu_th = crsminuA_th;
+ nbclip_th = nbclipA_th;
+ rssi_gain = rssi_gain_default;
}
+ w1clip_th = w1clipA_th;
}
- }
- wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
+ write_radio_reg(pi,
+ (RADIO_2056_RX_BIASPOLE_LNAG1_IDAC |
+ RADIO_2056_RX0), 0x17);
+ write_radio_reg(pi,
+ (RADIO_2056_RX_BIASPOLE_LNAG1_IDAC |
+ RADIO_2056_RX1), 0x17);
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
+ write_radio_reg(pi, (RADIO_2056_RX_LNAG2_IDAC | RADIO_2056_RX0),
+ 0xf0);
+ write_radio_reg(pi, (RADIO_2056_RX_LNAG2_IDAC | RADIO_2056_RX1),
+ 0xf0);
- if (!suspend)
- wlapi_enable_mac(pi->sh->physhim);
-}
+ write_radio_reg(pi, (RADIO_2056_RX_RSSI_POLE | RADIO_2056_RX0),
+ 0x0);
+ write_radio_reg(pi, (RADIO_2056_RX_RSSI_POLE | RADIO_2056_RX1),
+ 0x0);
-u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih)
-{
- u16 regval, rxen_bits;
- struct brcms_phy *pi = (struct brcms_phy *) pih;
+ write_radio_reg(pi, (RADIO_2056_RX_RSSI_GAIN | RADIO_2056_RX0),
+ rssi_gain);
+ write_radio_reg(pi, (RADIO_2056_RX_RSSI_GAIN | RADIO_2056_RX1),
+ rssi_gain);
- regval = read_phy_reg(pi, 0xa2);
- rxen_bits = (regval >> 4) & 0xf;
+ write_radio_reg(pi,
+ (RADIO_2056_RX_BIASPOLE_LNAA1_IDAC |
+ RADIO_2056_RX0), 0x17);
+ write_radio_reg(pi,
+ (RADIO_2056_RX_BIASPOLE_LNAA1_IDAC |
+ RADIO_2056_RX1), 0x17);
- return (u8) rxen_bits;
-}
+ write_radio_reg(pi, (RADIO_2056_RX_LNAA2_IDAC | RADIO_2056_RX0),
+ 0xFF);
+ write_radio_reg(pi, (RADIO_2056_RX_LNAA2_IDAC | RADIO_2056_RX1),
+ 0xFF);
-bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pi)
-{
- return PHY_IPA(pi);
-}
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 8,
+ 8, lna1_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 8,
+ 8, lna1_gain_db);
-static void wlc_phy_txpwr_limit_to_tbl_nphy(struct brcms_phy *pi)
-{
- u8 idx, idx2, i, delta_ind;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x10,
+ 8, lna2_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x10,
+ 8, lna2_gain_db);
- for (idx = TXP_FIRST_CCK; idx <= TXP_LAST_CCK; idx++) {
- pi->adj_pwr_tbl_nphy[idx] = pi->tx_power_offset[idx];
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 10, 0x20,
+ 8, tia_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 10, 0x20,
+ 8, tia_gain_db);
- for (i = 0; i < 4; i++) {
- idx2 = 0;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 10, 0x20,
+ 8, tia_gainbits);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 10, 0x20,
+ 8, tia_gainbits);
- delta_ind = 0;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 6, 0x40,
+ 8, &lpf_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 6, 0x40,
+ 8, &lpf_gain_db);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 6, 0x40,
+ 8, &lpf_gainbits);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 6, 0x40,
+ 8, &lpf_gainbits);
- switch (i) {
- case 0:
+ write_phy_reg(pi, 0x20, init_gaincode);
+ write_phy_reg(pi, 0x2a7, init_gaincode);
- if (CHSPEC_IS40(pi->radio_chanspec)
- && NPHY_IS_SROM_REINTERPRET) {
- idx = TXP_FIRST_MCS_40_SISO;
- } else {
- idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
- TXP_FIRST_OFDM_40_SISO : TXP_FIRST_OFDM;
- delta_ind = 1;
- }
- break;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
+ pi->pubpi.phy_corenum, 0x106, 16,
+ rfseq_init_gain);
- case 1:
+ write_phy_reg(pi, 0x22, clip1hi_gaincode);
+ write_phy_reg(pi, 0x2a9, clip1hi_gaincode);
- idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
- TXP_FIRST_MCS_40_CDD : TXP_FIRST_MCS_20_CDD;
- break;
+ write_phy_reg(pi, 0x24, clip1md_gaincode);
+ write_phy_reg(pi, 0x2ab, clip1md_gaincode);
- case 2:
+ write_phy_reg(pi, 0x37, clip1lo_gaincode);
+ write_phy_reg(pi, 0x2ad, clip1lo_gaincode);
- idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
- TXP_FIRST_MCS_40_STBC : TXP_FIRST_MCS_20_STBC;
- break;
+ mod_phy_reg(pi, 0x27d, (0xff << 0), (crsmin_th << 0));
+ mod_phy_reg(pi, 0x280, (0xff << 0), (crsminl_th << 0));
+ mod_phy_reg(pi, 0x283, (0xff << 0), (crsminu_th << 0));
- case 3:
+ write_phy_reg(pi, 0x2b, nbclip_th);
+ write_phy_reg(pi, 0x41, nbclip_th);
- idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
- TXP_FIRST_MCS_40_SDM : TXP_FIRST_MCS_20_SDM;
- break;
+ mod_phy_reg(pi, 0x27, (0x3f << 0), (w1clip_th << 0));
+ mod_phy_reg(pi, 0x3d, (0x3f << 0), (w1clip_th << 0));
+
+ write_phy_reg(pi, 0x150, 0x809c);
+
+ } else {
+
+ mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
+ mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
+
+ write_phy_reg(pi, 0x2b, 0x84);
+ write_phy_reg(pi, 0x41, 0x84);
+
+ if (CHSPEC_IS20(pi->radio_chanspec)) {
+ write_phy_reg(pi, 0x6b, 0x2b);
+ write_phy_reg(pi, 0x6c, 0x2b);
+ write_phy_reg(pi, 0x6d, 0x9);
+ write_phy_reg(pi, 0x6e, 0x9);
}
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- idx = idx + delta_ind;
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx++];
+ w1th = NPHY_RSSICAL_W1_TARGET - 4;
+ mod_phy_reg(pi, 0x27, (0x3f << 0), (w1th << 0));
+ mod_phy_reg(pi, 0x3d, (0x3f << 0), (w1th << 0));
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx++];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx++];
+ if (CHSPEC_IS20(pi->radio_chanspec)) {
+ mod_phy_reg(pi, 0x1c, (0x1f << 0), (0x1 << 0));
+ mod_phy_reg(pi, 0x32, (0x1f << 0), (0x1 << 0));
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx++];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx++];
+ mod_phy_reg(pi, 0x1d, (0x1f << 0), (0x1 << 0));
+ mod_phy_reg(pi, 0x33, (0x1f << 0), (0x1 << 0));
+ }
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx++];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- idx = idx + 1 - delta_ind;
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
+ write_phy_reg(pi, 0x150, 0x809c);
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
- pi->tx_power_offset[idx];
- }
-}
+ if (pi->nphy_gain_boost)
+ if ((CHSPEC_IS2G(pi->radio_chanspec)) &&
+ (CHSPEC_IS40(pi->radio_chanspec)))
+ hpf_code = 4;
+ else
+ hpf_code = 5;
+ else if (CHSPEC_IS40(pi->radio_chanspec))
+ hpf_code = 6;
+ else
+ hpf_code = 7;
-void wlc_phy_cal_init_nphy(struct brcms_phy *pi)
-{
-}
+ mod_phy_reg(pi, 0x20, (0x1f << 7), (hpf_code << 7));
+ mod_phy_reg(pi, 0x36, (0x1f << 7), (hpf_code << 7));
-static void
-wlc_phy_war_force_trsw_to_R_cliplo_nphy(struct brcms_phy *pi, u8 core)
-{
- if (core == PHY_CORE_0) {
- write_phy_reg(pi, 0x38, 0x4);
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- write_phy_reg(pi, 0x37, 0x0060);
- } else {
- write_phy_reg(pi, 0x37, 0x1080);
+ for (ctr = 0; ctr < 4; ctr++)
+ regval[ctr] = (hpf_code << 8) | 0x7c;
+ wlc_phy_table_write_nphy(pi, 7, 4, 0x106, 16, regval);
+
+ wlc_phy_adjust_lnagaintbl_nphy(pi);
+
+ if (pi->nphy_elna_gain_config) {
+ regval[0] = 0;
+ regval[1] = 1;
+ regval[2] = 1;
+ regval[3] = 1;
+ wlc_phy_table_write_nphy(pi, 2, 4, 8, 16, regval);
+ wlc_phy_table_write_nphy(pi, 3, 4, 8, 16, regval);
+
+ for (ctr = 0; ctr < 4; ctr++)
+ regval[ctr] = (hpf_code << 8) | 0x74;
+ wlc_phy_table_write_nphy(pi, 7, 4, 0x106, 16, regval);
}
- } else if (core == PHY_CORE_1) {
- write_phy_reg(pi, 0x2ae, 0x4);
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- write_phy_reg(pi, 0x2ad, 0x0060);
- } else {
- write_phy_reg(pi, 0x2ad, 0x1080);
+
+ if (NREV_IS(pi->pubpi.phy_rev, 2)) {
+ for (ctr = 0; ctr < 21; ctr++)
+ regval[ctr] = 3 * ctr;
+ wlc_phy_table_write_nphy(pi, 0, 21, 32, 16, regval);
+ wlc_phy_table_write_nphy(pi, 1, 21, 32, 16, regval);
+
+ for (ctr = 0; ctr < 21; ctr++)
+ regval[ctr] = (u16) ctr;
+ wlc_phy_table_write_nphy(pi, 2, 21, 32, 16, regval);
+ wlc_phy_table_write_nphy(pi, 3, 21, 32, 16, regval);
}
- }
-}
-static void wlc_phy_war_txchain_upd_nphy(struct brcms_phy *pi, u8 txchain)
-{
- u8 txchain0, txchain1;
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
+ rfseq_updategainu_events,
+ rfseq_updategainu_dlys,
+ sizeof(rfseq_updategainu_events) /
+ sizeof(rfseq_updategainu_events[0]));
- txchain0 = txchain & 0x1;
- txchain1 = (txchain & 0x2) >> 1;
- if (!txchain0) {
- wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_0);
- }
+ mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
- if (!txchain1) {
- wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_1);
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ mod_phy_reg(pi,
+ (NPHY_TO_BPHY_OFF + BPHY_OPTIONAL_MODES),
+ 0x7f, 0x4);
}
}
@@ -15362,11 +16312,10 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 freq;
int coreNum;
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ if (CHSPEC_IS5G(pi->radio_chanspec))
wlc_phy_classifier_nphy(pi, NPHY_ClassifierCtrl_cck_en, 0);
- } else {
+ else
wlc_phy_classifier_nphy(pi, NPHY_ClassifierCtrl_cck_en, 1);
- }
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
@@ -15401,9 +16350,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
write_phy_reg(pi, 0x240, 0x1b0);
}
- if (NREV_GE(pi->pubpi.phy_rev, 8)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 8))
mod_phy_reg(pi, 0xbd, (0xff << 0), (114 << 0));
- }
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x00, 16,
&dac_control);
@@ -15422,7 +16370,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x16e, 16,
rfseq_rx2tx_dacbufpu_rev7);
- if (PHY_IPA(pi)) {
+ if (PHY_IPA(pi))
wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
rfseq_rx2tx_events_rev3_ipa,
rfseq_rx2tx_dlys_rev3_ipa,
@@ -15431,7 +16379,6 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
sizeof
(rfseq_rx2tx_events_rev3_ipa
[0]));
- }
mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14));
mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14));
@@ -15448,11 +16395,13 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|| (pi->pubpi.radiorev == 8)) {
rccal_bcap_val =
- read_radio_reg(pi,
- RADIO_2057_RCCAL_BCAP_VAL);
+ read_radio_reg(
+ pi,
+ RADIO_2057_RCCAL_BCAP_VAL);
rccal_scap_val =
- read_radio_reg(pi,
- RADIO_2057_RCCAL_SCAP_VAL);
+ read_radio_reg(
+ pi,
+ RADIO_2057_RCCAL_SCAP_VAL);
rccal_tx20_11b_bcap = rccal_bcap_val;
rccal_tx20_11b_scap = rccal_scap_val;
@@ -15497,11 +16446,13 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
tx_lpf_bw_ofdm_40mhz = 3;
rccal_bcap_val =
- read_radio_reg(pi,
- RADIO_2057_RCCAL_BCAP_VAL);
+ read_radio_reg(
+ pi,
+ RADIO_2057_RCCAL_BCAP_VAL);
rccal_scap_val =
- read_radio_reg(pi,
- RADIO_2057_RCCAL_SCAP_VAL);
+ read_radio_reg(
+ pi,
+ RADIO_2057_RCCAL_SCAP_VAL);
rccal_tx20_11b_bcap = rccal_bcap_val;
rccal_tx20_11b_scap = rccal_scap_val;
@@ -15517,70 +16468,83 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
if (rccal_ovrd) {
- rx2tx_lpf_rc_lut_tx20_11b = (rccal_tx20_11b_bcap << 8) |
- (rccal_tx20_11b_scap << 3) | tx_lpf_bw_11b;
- rx2tx_lpf_rc_lut_tx20_11n = (rccal_tx20_11n_bcap << 8) |
- (rccal_tx20_11n_scap << 3) | tx_lpf_bw_ofdm_20mhz;
- rx2tx_lpf_rc_lut_tx40_11n = (rccal_tx40_11n_bcap << 8) |
- (rccal_tx40_11n_scap << 3) | tx_lpf_bw_ofdm_40mhz;
+ rx2tx_lpf_rc_lut_tx20_11b =
+ (rccal_tx20_11b_bcap << 8) |
+ (rccal_tx20_11b_scap << 3) |
+ tx_lpf_bw_11b;
+ rx2tx_lpf_rc_lut_tx20_11n =
+ (rccal_tx20_11n_bcap << 8) |
+ (rccal_tx20_11n_scap << 3) |
+ tx_lpf_bw_ofdm_20mhz;
+ rx2tx_lpf_rc_lut_tx40_11n =
+ (rccal_tx40_11n_bcap << 8) |
+ (rccal_tx40_11n_scap << 3) |
+ tx_lpf_bw_ofdm_40mhz;
for (coreNum = 0; coreNum <= 1; coreNum++) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x152 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx20_11b);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x153 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx20_11n);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x154 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx20_11n);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x155 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x156 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x157 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x158 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x159 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x152 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx20_11b);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x153 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx20_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x154 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx20_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x155 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x156 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x157 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x158 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_RFSEQ,
+ 1,
+ 0x159 + coreNum * 0x10,
+ 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
}
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4),
- 1, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 4),
+ 1, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
}
- if (!NORADIO_ENAB(pi->pubpi)) {
- write_phy_reg(pi, 0x32f, 0x3);
- }
+ write_phy_reg(pi, 0x32f, 0x3);
- if ((pi->pubpi.radiorev == 4) || (pi->pubpi.radiorev == 6)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2),
- 1, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- }
+ if ((pi->pubpi.radiorev == 4) || (pi->pubpi.radiorev == 6))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 2),
+ 1, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
if ((pi->pubpi.radiorev == 3) || (pi->pubpi.radiorev == 4) ||
(pi->pubpi.radiorev == 6)) {
@@ -15635,17 +16599,16 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if ((pi->pubpi.radiorev == 3)
|| (pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6)) {
-
+ || (pi->pubpi.radiorev == 6))
txgm_idac_bleed = 0x7f;
- }
for (coreNum = 0; coreNum <= 1; coreNum++) {
if (txgm_idac_bleed != 0)
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, coreNum,
- TXGM_IDAC_BLEED,
- txgm_idac_bleed);
+ WRITE_RADIO_REG4(
+ pi, RADIO_2057,
+ CORE, coreNum,
+ TXGM_IDAC_BLEED,
+ txgm_idac_bleed);
}
if (pi->pubpi.radiorev == 5) {
@@ -15660,18 +16623,20 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
CORE, coreNum,
IPA2G_IMAIN,
0x1f);
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, coreNum,
- IPA2G_BIAS_FILTER,
- 0xee);
+ WRITE_RADIO_REG4(
+ pi, RADIO_2057,
+ CORE, coreNum,
+ IPA2G_BIAS_FILTER,
+ 0xee);
WRITE_RADIO_REG4(pi, RADIO_2057,
CORE, coreNum,
PAD2G_IDACS,
0x8a);
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, coreNum,
- PAD_BIAS_FILTER_BWS,
- 0x3e);
+ WRITE_RADIO_REG4(
+ pi, RADIO_2057,
+ CORE, coreNum,
+ PAD_BIAS_FILTER_BWS,
+ 0x3e);
}
} else if ((pi->pubpi.radiorev == 7)
@@ -15700,9 +16665,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
}
} else {
- freq =
- CHAN5G_FREQ(CHSPEC_CHANNEL
- (pi->radio_chanspec));
+ freq = CHAN5G_FREQ(CHSPEC_CHANNEL(
+ pi->radio_chanspec));
if (((freq >= 5180) && (freq <= 5230))
|| ((freq >= 5745) && (freq <= 5805))) {
WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
@@ -15809,12 +16773,12 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
wlc_phy_workarounds_nphy_gainctrl(pi);
pdetrange =
- (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
- pdetrange : pi->srom_fem2g.pdetrange;
+ (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
+ pdetrange : pi->srom_fem2g.pdetrange;
if (pdetrange == 0) {
chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ wlc_phy_get_chan_freq_range_nphy(pi, 0);
if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
aux_adc_vmid_rev7_core0[3] = 0x70;
aux_adc_vmid_rev7_core1[3] = 0x70;
@@ -15842,15 +16806,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
if (chan_freq_range ==
WL_CHAN_FREQ_RANGE_2G) {
aux_adc_vmid_rev7_core0[3] =
- 0x8c;
+ 0x8c;
aux_adc_vmid_rev7_core1[3] =
- 0x8c;
+ 0x8c;
aux_adc_gain_rev7[3] = 0;
} else {
aux_adc_vmid_rev7_core0[3] =
- 0x96;
+ 0x96;
aux_adc_vmid_rev7_core1[3] =
- 0x96;
+ 0x96;
aux_adc_gain_rev7[3] = 0;
}
}
@@ -15918,7 +16882,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
sizeof(rfseq_tx2rx_events_rev3) /
sizeof(rfseq_tx2rx_events_rev3[0]));
- if (PHY_IPA(pi)) {
+ if (PHY_IPA(pi))
wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
rfseq_rx2tx_events_rev3_ipa,
rfseq_rx2tx_dlys_rev3_ipa,
@@ -15927,7 +16891,6 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
sizeof
(rfseq_rx2tx_events_rev3_ipa
[0]));
- }
if ((pi->sh->hw_phyrxchain != 0x3) &&
(pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) {
@@ -15936,22 +16899,21 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
rfseq_rx2tx_dlys_rev3[5] = 59;
rfseq_rx2tx_dlys_rev3[6] = 1;
rfseq_rx2tx_events_rev3[7] =
- NPHY_REV3_RFSEQ_CMD_END;
+ NPHY_REV3_RFSEQ_CMD_END;
}
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
- rfseq_rx2tx_events_rev3,
- rfseq_rx2tx_dlys_rev3,
- sizeof(rfseq_rx2tx_events_rev3) /
- sizeof(rfseq_rx2tx_events_rev3
- [0]));
+ wlc_phy_set_rfseq_nphy(
+ pi, NPHY_RFSEQ_RX2TX,
+ rfseq_rx2tx_events_rev3,
+ rfseq_rx2tx_dlys_rev3,
+ sizeof(rfseq_rx2tx_events_rev3) /
+ sizeof(rfseq_rx2tx_events_rev3[0]));
}
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec))
write_phy_reg(pi, 0x6a, 0x2);
- } else {
+ else
write_phy_reg(pi, 0x6a, 0x9c40);
- }
mod_phy_reg(pi, 0x294, (0xf << 8), (7 << 8));
@@ -15978,8 +16940,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
&dac_control);
pdetrange =
- (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
- pdetrange : pi->srom_fem2g.pdetrange;
+ (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
+ pdetrange : pi->srom_fem2g.pdetrange;
if (pdetrange == 0) {
if (NREV_GE(pi->pubpi.phy_rev, 4)) {
@@ -15990,7 +16952,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
aux_adc_gain = aux_adc_gain_rev3;
}
chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ wlc_phy_get_chan_freq_range_nphy(pi, 0);
if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
switch (chan_freq_range) {
case WL_CHAN_FREQ_RANGE_5GL:
@@ -16033,7 +16995,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
if (NREV_GE(pi->pubpi.phy_rev, 6)) {
chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ wlc_phy_get_chan_freq_range_nphy(pi, 0);
if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
bcm_adc_vmid[3] = 0x8e;
bcm_adc_gain[3] = 0x03;
@@ -16056,14 +17018,16 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
0x1c, 16, bcm_adc_gain);
} else if (pdetrange == 3) {
chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ wlc_phy_get_chan_freq_range_nphy(pi, 0);
if ((NREV_GE(pi->pubpi.phy_rev, 4))
&& (chan_freq_range == WL_CHAN_FREQ_RANGE_2G)) {
u16 auxadc_vmid[] = {
- 0xa2, 0xb4, 0xb4, 0x270 };
+ 0xa2, 0xb4, 0xb4, 0x270
+ };
u16 auxadc_gain[] = {
- 0x02, 0x02, 0x02, 0x00 };
+ 0x02, 0x02, 0x02, 0x00
+ };
wlc_phy_table_write_nphy(pi,
NPHY_TBL_ID_AFECTRL, 4,
@@ -16084,7 +17048,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 Vmid[2], Av[2];
chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ wlc_phy_get_chan_freq_range_nphy(pi, 0);
if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
Vmid[0] = (pdetrange == 4) ? 0x8e : 0x89;
Vmid[1] = (pdetrange == 4) ? 0x96 : 0x89;
@@ -16155,8 +17119,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
0x0);
triso =
- (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
- triso : pi->srom_fem2g.triso;
+ (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
+ triso : pi->srom_fem2g.triso;
if (triso == 7) {
wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_0);
wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_1);
@@ -16212,11 +17176,10 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- if (pi->sh->boardflags2 & BFL2_SINGLEANT_CCK) {
+ if (pi->sh->boardflags2 & BFL2_SINGLEANT_CCK)
wlapi_bmac_mhf(pi->sh->physhim, MHF4,
- MHF4_BPHY_TXCORE0,
- MHF4_BPHY_TXCORE0, BRCM_BAND_ALL);
- }
+ MHF4_BPHY_TXCORE0,
+ MHF4_BPHY_TXCORE0, BRCM_BAND_ALL);
}
} else {
@@ -16314,1121 +17277,2720 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
write_phy_reg(pi, 0x194, 0x0);
}
- if (NREV_IS(pi->pubpi.phy_rev, 2)) {
+ if (NREV_IS(pi->pubpi.phy_rev, 2))
mod_phy_reg(pi, 0x221,
NPHY_FORCESIG_DECODEGATEDCLKS,
NPHY_FORCESIG_DECODEGATEDCLKS);
- }
}
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+static void wlc_phy_extpa_set_tx_digi_filts_nphy(struct brcms_phy *pi)
{
- u16 w1th, hpf_code, currband;
- int ctr;
- u8 rfseq_updategainu_events[] = {
- NPHY_RFSEQ_CMD_RX_GAIN,
- NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
- NPHY_RFSEQ_CMD_SET_HPF_BW
- };
- u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
- s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
- s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
- s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
- s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
- s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
- s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
- s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
- s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
- s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
- s8 *lna1_gain_db = NULL;
- s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
- s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
- s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
- s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
- s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
- s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
- s8 *lna2_gain_db = NULL;
- s8 tiaG_gain_db[] = {
- 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
- s8 tiaA_gain_db[] = {
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
- s8 tiaA_gain_db_rev4[] = {
- 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev5[] = {
- 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev6[] = {
- 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 *tia_gain_db;
- s8 tiaG_gainbits[] = {
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
- s8 tiaA_gainbits[] = {
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
- s8 tiaA_gainbits_rev4[] = {
- 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev5[] = {
- 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev6[] = {
- 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 *tia_gainbits;
- s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
- s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
- u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
- u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev5_elna[] = {
- 0x013f, 0x013f, 0x013f, 0x013f };
- u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
- u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
- u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
- u16 rfseqA_init_gain_rev4_elna[] = {
- 0x314f, 0x314f, 0x314f, 0x314f };
- u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
- u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
- u16 *rfseq_init_gain;
- u16 initG_gaincode = 0x627e;
- u16 initG_gaincode_rev4 = 0x527e;
- u16 initG_gaincode_rev5 = 0x427e;
- u16 initG_gaincode_rev5_elna = 0x027e;
- u16 initG_gaincode_rev6 = 0x527e;
- u16 initG_gaincode_rev6_224B0 = 0x427e;
- u16 initG_gaincode_rev6_elna = 0x127e;
- u16 initA_gaincode = 0x52de;
- u16 initA_gaincode_rev4 = 0x629e;
- u16 initA_gaincode_rev4_elna = 0x329e;
- u16 initA_gaincode_rev5 = 0x729e;
- u16 initA_gaincode_rev6 = 0x729e;
- u16 init_gaincode;
- u16 clip1hiG_gaincode = 0x107e;
- u16 clip1hiG_gaincode_rev4 = 0x007e;
- u16 clip1hiG_gaincode_rev5 = 0x1076;
- u16 clip1hiG_gaincode_rev6 = 0x007e;
- u16 clip1hiA_gaincode = 0x00de;
- u16 clip1hiA_gaincode_rev4 = 0x029e;
- u16 clip1hiA_gaincode_rev5 = 0x029e;
- u16 clip1hiA_gaincode_rev6 = 0x029e;
- u16 clip1hi_gaincode;
- u16 clip1mdG_gaincode = 0x0066;
- u16 clip1mdA_gaincode = 0x00ca;
- u16 clip1mdA_gaincode_rev4 = 0x1084;
- u16 clip1mdA_gaincode_rev5 = 0x2084;
- u16 clip1mdA_gaincode_rev6 = 0x2084;
- u16 clip1md_gaincode = 0;
- u16 clip1loG_gaincode = 0x0074;
- u16 clip1loG_gaincode_rev5[] = {
- 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
- };
- u16 clip1loG_gaincode_rev6[] = {
- 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
- };
- u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
- u16 clip1loA_gaincode = 0x00cc;
- u16 clip1loA_gaincode_rev4 = 0x0086;
- u16 clip1loA_gaincode_rev5 = 0x2086;
- u16 clip1loA_gaincode_rev6 = 0x2086;
- u16 clip1lo_gaincode;
- u8 crsminG_th = 0x18;
- u8 crsminG_th_rev5 = 0x18;
- u8 crsminG_th_rev6 = 0x18;
- u8 crsminA_th = 0x1e;
- u8 crsminA_th_rev4 = 0x24;
- u8 crsminA_th_rev5 = 0x24;
- u8 crsminA_th_rev6 = 0x24;
- u8 crsmin_th;
- u8 crsminlG_th = 0x18;
- u8 crsminlG_th_rev5 = 0x18;
- u8 crsminlG_th_rev6 = 0x18;
- u8 crsminlA_th = 0x1e;
- u8 crsminlA_th_rev4 = 0x24;
- u8 crsminlA_th_rev5 = 0x24;
- u8 crsminlA_th_rev6 = 0x24;
- u8 crsminl_th = 0;
- u8 crsminuG_th = 0x18;
- u8 crsminuG_th_rev5 = 0x18;
- u8 crsminuG_th_rev6 = 0x18;
- u8 crsminuA_th = 0x1e;
- u8 crsminuA_th_rev4 = 0x24;
- u8 crsminuA_th_rev5 = 0x24;
- u8 crsminuA_th_rev6 = 0x24;
- u8 crsminuA_th_rev6_224B0 = 0x2d;
- u8 crsminu_th;
- u16 nbclipG_th = 0x20d;
- u16 nbclipG_th_rev4 = 0x1a1;
- u16 nbclipG_th_rev5 = 0x1d0;
- u16 nbclipG_th_rev6 = 0x1d0;
- u16 nbclipA_th = 0x1a1;
- u16 nbclipA_th_rev4 = 0x107;
- u16 nbclipA_th_rev5 = 0x0a9;
- u16 nbclipA_th_rev6 = 0x0f0;
- u16 nbclip_th = 0;
- u8 w1clipG_th = 5;
- u8 w1clipG_th_rev5 = 9;
- u8 w1clipG_th_rev6 = 5;
- u8 w1clipA_th = 25, w1clip_th;
- u8 rssi_gain_default = 0x50;
- u8 rssiG_gain_rev6_224B0 = 0x50;
- u8 rssiA_gain_rev5 = 0x90;
- u8 rssiA_gain_rev6 = 0x90;
- u8 rssi_gain;
- u16 regval[21];
- u8 triso;
+ int j, type = 2;
+ u16 addr_offset = 0x2c5;
- triso = (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.triso :
- pi->srom_fem2g.triso;
+ for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++)
+ write_phy_reg(pi, addr_offset + j,
+ NPHY_IPA_REV4_txdigi_filtcoeffs[type][j]);
+}
+
+static void wlc_phy_clip_det_nphy(struct brcms_phy *pi, u8 write, u16 *vals)
+{
+
+ if (write == 0) {
+ vals[0] = read_phy_reg(pi, 0x2c);
+ vals[1] = read_phy_reg(pi, 0x42);
+ } else {
+ write_phy_reg(pi, 0x2c, vals[0]);
+ write_phy_reg(pi, 0x42, vals[1]);
+ }
+}
+
+static void wlc_phy_ipa_internal_tssi_setup_nphy(struct brcms_phy *pi)
+{
+ u8 core;
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (pi->pubpi.radiorev == 5) {
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TX_SSI_MASTER, 0x5);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TX_SSI_MUX, 0xe);
- wlc_phy_workarounds_nphy_gainctrl_2057_rev5(pi);
- } else if (pi->pubpi.radiorev == 7) {
- wlc_phy_workarounds_nphy_gainctrl_2057_rev6(pi);
+ if (pi->pubpi.radiorev != 5)
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX,
+ core, TSSIA, 0);
- mod_phy_reg(pi, 0x283, (0xff << 0), (0x44 << 0));
- mod_phy_reg(pi, 0x280, (0xff << 0), (0x44 << 0));
+ if (!NREV_IS(pi->pubpi.phy_rev, 7))
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX,
+ core, TSSIG, 0x1);
+ else
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX,
+ core, TSSIG, 0x31);
+ } else {
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TX_SSI_MASTER, 0x9);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TX_SSI_MUX, 0xc);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TSSIG, 0);
- } else if ((pi->pubpi.radiorev == 3)
- || (pi->pubpi.radiorev == 8)) {
- wlc_phy_workarounds_nphy_gainctrl_2057_rev6(pi);
+ if (pi->pubpi.radiorev != 5) {
+ if (!NREV_IS(pi->pubpi.phy_rev, 7))
+ WRITE_RADIO_REG3(pi, RADIO_2057,
+ TX, core,
+ TSSIA, 0x1);
+ else
+ WRITE_RADIO_REG3(pi, RADIO_2057,
+ TX, core,
+ TSSIA, 0x31);
+ }
+ }
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, IQCAL_VCM_HG,
+ 0);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, IQCAL_IDAC,
+ 0);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, TSSI_VCM,
+ 0x3);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, TSSI_MISC1,
+ 0x0);
+ }
+ } else {
+ WRITE_RADIO_SYN(pi, RADIO_2056, RESERVED_ADDR31,
+ (CHSPEC_IS2G(pi->radio_chanspec)) ? 0x128 :
+ 0x80);
+ WRITE_RADIO_SYN(pi, RADIO_2056, RESERVED_ADDR30, 0x0);
+ WRITE_RADIO_SYN(pi, RADIO_2056, GPIO_MASTER1, 0x29);
- if (pi->pubpi.radiorev == 8) {
- mod_phy_reg(pi, 0x283,
- (0xff << 0), (0x44 << 0));
- mod_phy_reg(pi, 0x280,
- (0xff << 0), (0x44 << 0));
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, IQCAL_VCM_HG,
+ 0x0);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, IQCAL_IDAC,
+ 0x0);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_VCM,
+ 0x3);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TX_AMP_DET,
+ 0x0);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_MISC1,
+ 0x8);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_MISC2,
+ 0x0);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_MISC3,
+ 0x0);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ TX_SSI_MASTER, 0x5);
+
+ if (pi->pubpi.radiorev != 5)
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX,
+ core, TSSIA, 0x0);
+ if (NREV_GE(pi->pubpi.phy_rev, 5))
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX,
+ core, TSSIG, 0x31);
+ else
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX,
+ core, TSSIG, 0x11);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ TX_SSI_MUX, 0xe);
+ } else {
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ TX_SSI_MASTER, 0x9);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ TSSIA, 0x31);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ TSSIG, 0x0);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ TX_SSI_MUX, 0xc);
}
- } else {
- wlc_phy_workarounds_nphy_gainctrl_2057_rev6(pi);
}
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ }
+}
- mod_phy_reg(pi, 0xa0, (0x1 << 6), (1 << 6));
+static void
+wlc_phy_rfctrl_override_nphy(struct brcms_phy *pi, u16 field, u16 value,
+ u8 core_mask, u8 off)
+{
+ u8 core_num;
+ u16 addr = 0, mask = 0, en_addr = 0, val_addr = 0, en_mask =
+ 0, val_mask = 0;
+ u8 shift = 0, val_shift = 0;
- mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
- mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
+ if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
- currband =
- read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
- if (currband == 0) {
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- if (pi->pubpi.radiorev == 11) {
- lna1_gain_db = lna1G_gain_db_rev6_224B0;
- lna2_gain_db = lna2G_gain_db_rev6_224B0;
- rfseq_init_gain =
- rfseqG_init_gain_rev6_224B0;
- init_gaincode =
- initG_gaincode_rev6_224B0;
- clip1hi_gaincode =
- clip1hiG_gaincode_rev6;
- clip1lo_gaincode =
- clip1loG_gaincode_rev6_224B0;
- nbclip_th = nbclipG_th_rev6;
- w1clip_th = w1clipG_th_rev6;
- crsmin_th = crsminG_th_rev6;
- crsminl_th = crsminlG_th_rev6;
- crsminu_th = crsminuG_th_rev6;
- rssi_gain = rssiG_gain_rev6_224B0;
- } else {
- lna1_gain_db = lna1G_gain_db_rev6;
- lna2_gain_db = lna2G_gain_db_rev6;
- if (pi->sh->boardflags & BFL_EXTLNA) {
+ en_mask = field;
+ for (core_num = 0; core_num < 2; core_num++) {
- rfseq_init_gain =
- rfseqG_init_gain_rev6_elna;
- init_gaincode =
- initG_gaincode_rev6_elna;
- } else {
- rfseq_init_gain =
- rfseqG_init_gain_rev6;
- init_gaincode =
- initG_gaincode_rev6;
- }
- clip1hi_gaincode =
- clip1hiG_gaincode_rev6;
- switch (triso) {
- case 0:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[0];
- break;
- case 1:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[1];
- break;
- case 2:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[2];
- break;
- case 3:
- default:
+ switch (field) {
+ case (0x1 << 1):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 0);
+ val_shift = 0;
+ break;
+ case (0x1 << 2):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 1);
+ val_shift = 1;
+ break;
+ case (0x1 << 3):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 2);
+ val_shift = 2;
+ break;
+ case (0x1 << 4):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 4);
+ val_shift = 4;
+ break;
+ case (0x1 << 5):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 5);
+ val_shift = 5;
+ break;
+ case (0x1 << 6):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 6);
+ val_shift = 6;
+ break;
+ case (0x1 << 7):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x1 << 7);
+ val_shift = 7;
+ break;
+ case (0x1 << 8):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x7 << 8);
+ val_shift = 8;
+ break;
+ case (0x1 << 11):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7a : 0x7d;
+ val_mask = (0x7 << 13);
+ val_shift = 13;
+ break;
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[3];
- break;
- case 4:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[4];
- break;
- case 5:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[5];
- break;
- case 6:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[6];
- break;
- case 7:
- clip1lo_gaincode =
- clip1loG_gaincode_rev6[7];
- break;
- }
- nbclip_th = nbclipG_th_rev6;
- w1clip_th = w1clipG_th_rev6;
- crsmin_th = crsminG_th_rev6;
- crsminl_th = crsminlG_th_rev6;
- crsminu_th = crsminuG_th_rev6;
- rssi_gain = rssi_gain_default;
- }
- } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
- lna1_gain_db = lna1G_gain_db_rev5;
- lna2_gain_db = lna2G_gain_db_rev5;
- if (pi->sh->boardflags & BFL_EXTLNA) {
+ case (0x1 << 9):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0xf8 : 0xfa;
+ val_mask = (0x7 << 0);
+ val_shift = 0;
+ break;
- rfseq_init_gain =
- rfseqG_init_gain_rev5_elna;
- init_gaincode =
- initG_gaincode_rev5_elna;
- } else {
- rfseq_init_gain = rfseqG_init_gain_rev5;
- init_gaincode = initG_gaincode_rev5;
- }
- clip1hi_gaincode = clip1hiG_gaincode_rev5;
- switch (triso) {
- case 0:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[0];
- break;
- case 1:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[1];
- break;
- case 2:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[2];
- break;
- case 3:
+ case (0x1 << 10):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0xf8 : 0xfa;
+ val_mask = (0x7 << 4);
+ val_shift = 4;
+ break;
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[3];
- break;
- case 4:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[4];
- break;
- case 5:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[5];
- break;
- case 6:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[6];
- break;
- case 7:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[7];
- break;
- default:
- clip1lo_gaincode =
- clip1loG_gaincode_rev5[3];
- break;
- }
- nbclip_th = nbclipG_th_rev5;
- w1clip_th = w1clipG_th_rev5;
- crsmin_th = crsminG_th_rev5;
- crsminl_th = crsminlG_th_rev5;
- crsminu_th = crsminuG_th_rev5;
- rssi_gain = rssi_gain_default;
- } else if (NREV_IS(pi->pubpi.phy_rev, 4)) {
- lna1_gain_db = lna1G_gain_db_rev4;
- lna2_gain_db = lna2G_gain_db;
- rfseq_init_gain = rfseqG_init_gain_rev4;
- init_gaincode = initG_gaincode_rev4;
- clip1hi_gaincode = clip1hiG_gaincode_rev4;
- clip1lo_gaincode = clip1loG_gaincode;
- nbclip_th = nbclipG_th_rev4;
- w1clip_th = w1clipG_th;
- crsmin_th = crsminG_th;
- crsminl_th = crsminlG_th;
- crsminu_th = crsminuG_th;
- rssi_gain = rssi_gain_default;
- } else {
- lna1_gain_db = lna1G_gain_db;
- lna2_gain_db = lna2G_gain_db;
- rfseq_init_gain = rfseqG_init_gain;
- init_gaincode = initG_gaincode;
- clip1hi_gaincode = clip1hiG_gaincode;
- clip1lo_gaincode = clip1loG_gaincode;
- nbclip_th = nbclipG_th;
- w1clip_th = w1clipG_th;
- crsmin_th = crsminG_th;
- crsminl_th = crsminlG_th;
- crsminu_th = crsminuG_th;
- rssi_gain = rssi_gain_default;
+ case (0x1 << 12):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7b : 0x7e;
+ val_mask = (0xffff << 0);
+ val_shift = 0;
+ break;
+ case (0x1 << 13):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0x7c : 0x7f;
+ val_mask = (0xffff << 0);
+ val_shift = 0;
+ break;
+ case (0x1 << 14):
+ en_addr = (core_num == 0) ? 0xe7 : 0xec;
+ val_addr = (core_num == 0) ? 0xf9 : 0xfb;
+ val_mask = (0x3 << 6);
+ val_shift = 6;
+ break;
+ case (0x1 << 0):
+ en_addr = (core_num == 0) ? 0xe5 : 0xe6;
+ val_addr = (core_num == 0) ? 0xf9 : 0xfb;
+ val_mask = (0x1 << 15);
+ val_shift = 15;
+ break;
+ default:
+ addr = 0xffff;
+ break;
}
- tia_gain_db = tiaG_gain_db;
- tia_gainbits = tiaG_gainbits;
- clip1md_gaincode = clip1mdG_gaincode;
- } else {
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- lna1_gain_db = lna1A_gain_db_rev6;
- lna2_gain_db = lna2A_gain_db_rev6;
- tia_gain_db = tiaA_gain_db_rev6;
- tia_gainbits = tiaA_gainbits_rev6;
- rfseq_init_gain = rfseqA_init_gain_rev6;
- init_gaincode = initA_gaincode_rev6;
- clip1hi_gaincode = clip1hiA_gaincode_rev6;
- clip1md_gaincode = clip1mdA_gaincode_rev6;
- clip1lo_gaincode = clip1loA_gaincode_rev6;
- crsmin_th = crsminA_th_rev6;
- crsminl_th = crsminlA_th_rev6;
- if ((pi->pubpi.radiorev == 11) &&
- (CHSPEC_IS40(pi->radio_chanspec) == 0)) {
- crsminu_th = crsminuA_th_rev6_224B0;
- } else {
- crsminu_th = crsminuA_th_rev6;
- }
- nbclip_th = nbclipA_th_rev6;
- rssi_gain = rssiA_gain_rev6;
- } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
- lna1_gain_db = lna1A_gain_db_rev5;
- lna2_gain_db = lna2A_gain_db_rev5;
- tia_gain_db = tiaA_gain_db_rev5;
- tia_gainbits = tiaA_gainbits_rev5;
- rfseq_init_gain = rfseqA_init_gain_rev5;
- init_gaincode = initA_gaincode_rev5;
- clip1hi_gaincode = clip1hiA_gaincode_rev5;
- clip1md_gaincode = clip1mdA_gaincode_rev5;
- clip1lo_gaincode = clip1loA_gaincode_rev5;
- crsmin_th = crsminA_th_rev5;
- crsminl_th = crsminlA_th_rev5;
- crsminu_th = crsminuA_th_rev5;
- nbclip_th = nbclipA_th_rev5;
- rssi_gain = rssiA_gain_rev5;
- } else if (NREV_IS(pi->pubpi.phy_rev, 4)) {
- lna1_gain_db = lna1A_gain_db_rev4;
- lna2_gain_db = lna2A_gain_db_rev4;
- tia_gain_db = tiaA_gain_db_rev4;
- tia_gainbits = tiaA_gainbits_rev4;
- if (pi->sh->boardflags & BFL_EXTLNA_5GHz) {
- rfseq_init_gain =
- rfseqA_init_gain_rev4_elna;
- init_gaincode =
- initA_gaincode_rev4_elna;
- } else {
- rfseq_init_gain = rfseqA_init_gain_rev4;
- init_gaincode = initA_gaincode_rev4;
- }
- clip1hi_gaincode = clip1hiA_gaincode_rev4;
- clip1md_gaincode = clip1mdA_gaincode_rev4;
- clip1lo_gaincode = clip1loA_gaincode_rev4;
- crsmin_th = crsminA_th_rev4;
- crsminl_th = crsminlA_th_rev4;
- crsminu_th = crsminuA_th_rev4;
- nbclip_th = nbclipA_th_rev4;
- rssi_gain = rssi_gain_default;
+ if (off) {
+ and_phy_reg(pi, en_addr, ~en_mask);
+ and_phy_reg(pi, val_addr, ~val_mask);
} else {
- lna1_gain_db = lna1A_gain_db;
- lna2_gain_db = lna2A_gain_db;
- tia_gain_db = tiaA_gain_db;
- tia_gainbits = tiaA_gainbits;
- rfseq_init_gain = rfseqA_init_gain;
- init_gaincode = initA_gaincode;
- clip1hi_gaincode = clip1hiA_gaincode;
- clip1md_gaincode = clip1mdA_gaincode;
- clip1lo_gaincode = clip1loA_gaincode;
- crsmin_th = crsminA_th;
- crsminl_th = crsminlA_th;
- crsminu_th = crsminuA_th;
- nbclip_th = nbclipA_th;
- rssi_gain = rssi_gain_default;
+
+ if ((core_mask == 0)
+ || (core_mask & (1 << core_num))) {
+ or_phy_reg(pi, en_addr, en_mask);
+
+ if (addr != 0xffff)
+ mod_phy_reg(pi, val_addr,
+ val_mask,
+ (value <<
+ val_shift));
+ }
}
- w1clip_th = w1clipA_th;
}
+ } else {
- write_radio_reg(pi,
- (RADIO_2056_RX_BIASPOLE_LNAG1_IDAC |
- RADIO_2056_RX0), 0x17);
- write_radio_reg(pi,
- (RADIO_2056_RX_BIASPOLE_LNAG1_IDAC |
- RADIO_2056_RX1), 0x17);
+ if (off) {
+ and_phy_reg(pi, 0xec, ~field);
+ value = 0x0;
+ } else {
+ or_phy_reg(pi, 0xec, field);
+ }
- write_radio_reg(pi, (RADIO_2056_RX_LNAG2_IDAC | RADIO_2056_RX0),
- 0xf0);
- write_radio_reg(pi, (RADIO_2056_RX_LNAG2_IDAC | RADIO_2056_RX1),
- 0xf0);
+ for (core_num = 0; core_num < 2; core_num++) {
- write_radio_reg(pi, (RADIO_2056_RX_RSSI_POLE | RADIO_2056_RX0),
- 0x0);
- write_radio_reg(pi, (RADIO_2056_RX_RSSI_POLE | RADIO_2056_RX1),
- 0x0);
+ switch (field) {
+ case (0x1 << 1):
+ case (0x1 << 9):
+ case (0x1 << 12):
+ case (0x1 << 13):
+ case (0x1 << 14):
+ addr = 0x78;
- write_radio_reg(pi, (RADIO_2056_RX_RSSI_GAIN | RADIO_2056_RX0),
- rssi_gain);
- write_radio_reg(pi, (RADIO_2056_RX_RSSI_GAIN | RADIO_2056_RX1),
- rssi_gain);
+ core_mask = 0x1;
+ break;
+ case (0x1 << 2):
+ case (0x1 << 3):
+ case (0x1 << 4):
+ case (0x1 << 5):
+ case (0x1 << 6):
+ case (0x1 << 7):
+ case (0x1 << 8):
+ addr = (core_num == 0) ? 0x7a : 0x7d;
+ break;
+ case (0x1 << 10):
+ addr = (core_num == 0) ? 0x7b : 0x7e;
+ break;
+ case (0x1 << 11):
+ addr = (core_num == 0) ? 0x7c : 0x7f;
+ break;
+ default:
+ addr = 0xffff;
+ }
- write_radio_reg(pi,
- (RADIO_2056_RX_BIASPOLE_LNAA1_IDAC |
- RADIO_2056_RX0), 0x17);
- write_radio_reg(pi,
- (RADIO_2056_RX_BIASPOLE_LNAA1_IDAC |
- RADIO_2056_RX1), 0x17);
+ switch (field) {
+ case (0x1 << 1):
+ mask = (0x7 << 3);
+ shift = 3;
+ break;
+ case (0x1 << 9):
+ mask = (0x1 << 2);
+ shift = 2;
+ break;
+ case (0x1 << 12):
+ mask = (0x1 << 8);
+ shift = 8;
+ break;
+ case (0x1 << 13):
+ mask = (0x1 << 9);
+ shift = 9;
+ break;
+ case (0x1 << 14):
+ mask = (0xf << 12);
+ shift = 12;
+ break;
+ case (0x1 << 2):
+ mask = (0x1 << 0);
+ shift = 0;
+ break;
+ case (0x1 << 3):
+ mask = (0x1 << 1);
+ shift = 1;
+ break;
+ case (0x1 << 4):
+ mask = (0x1 << 2);
+ shift = 2;
+ break;
+ case (0x1 << 5):
+ mask = (0x3 << 4);
+ shift = 4;
+ break;
+ case (0x1 << 6):
+ mask = (0x3 << 6);
+ shift = 6;
+ break;
+ case (0x1 << 7):
+ mask = (0x1 << 8);
+ shift = 8;
+ break;
+ case (0x1 << 8):
+ mask = (0x1 << 9);
+ shift = 9;
+ break;
+ case (0x1 << 10):
+ mask = 0x1fff;
+ shift = 0x0;
+ break;
+ case (0x1 << 11):
+ mask = 0x1fff;
+ shift = 0x0;
+ break;
+ default:
+ mask = 0x0;
+ shift = 0x0;
+ break;
+ }
- write_radio_reg(pi, (RADIO_2056_RX_LNAA2_IDAC | RADIO_2056_RX0),
- 0xFF);
- write_radio_reg(pi, (RADIO_2056_RX_LNAA2_IDAC | RADIO_2056_RX1),
- 0xFF);
+ if ((addr != 0xffff) && (core_mask & (1 << core_num)))
+ mod_phy_reg(pi, addr, mask, (value << shift));
+ }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 8,
- 8, lna1_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 8,
- 8, lna1_gain_db);
+ or_phy_reg(pi, 0xec, (0x1 << 0));
+ or_phy_reg(pi, 0x78, (0x1 << 0));
+ udelay(1);
+ and_phy_reg(pi, 0xec, ~(0x1 << 0));
+ }
+}
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x10,
- 8, lna2_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x10,
- 8, lna2_gain_db);
+static void wlc_phy_txpwrctrl_idle_tssi_nphy(struct brcms_phy *pi)
+{
+ s32 rssi_buf[4];
+ s32 int_val;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 10, 0x20,
- 8, tia_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 10, 0x20,
- 8, tia_gain_db);
+ if (SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi) || PHY_MUTED(pi))
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 10, 0x20,
- 8, tia_gainbits);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 10, 0x20,
- 8, tia_gainbits);
+ return;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 6, 0x40,
- 8, &lpf_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 6, 0x40,
- 8, &lpf_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 6, 0x40,
- 8, &lpf_gainbits);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 6, 0x40,
- 8, &lpf_gainbits);
+ if (PHY_IPA(pi))
+ wlc_phy_ipa_internal_tssi_setup_nphy(pi);
- write_phy_reg(pi, 0x20, init_gaincode);
- write_phy_reg(pi, 0x2a7, init_gaincode);
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12),
+ 0, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ else if (NREV_GE(pi->pubpi.phy_rev, 3))
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 13), 0, 3, 0);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- pi->pubpi.phy_corenum, 0x106, 16,
- rfseq_init_gain);
+ wlc_phy_stopplayback_nphy(pi);
- write_phy_reg(pi, 0x22, clip1hi_gaincode);
- write_phy_reg(pi, 0x2a9, clip1hi_gaincode);
+ wlc_phy_tx_tone_nphy(pi, 4000, 0, 0, 0, false);
- write_phy_reg(pi, 0x24, clip1md_gaincode);
- write_phy_reg(pi, 0x2ab, clip1md_gaincode);
+ udelay(20);
+ int_val =
+ wlc_phy_poll_rssi_nphy(pi, (u8) NPHY_RSSI_SEL_TSSI_2G, rssi_buf,
+ 1);
+ wlc_phy_stopplayback_nphy(pi);
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, 0);
- write_phy_reg(pi, 0x37, clip1lo_gaincode);
- write_phy_reg(pi, 0x2ad, clip1lo_gaincode);
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12),
+ 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ else if (NREV_GE(pi->pubpi.phy_rev, 3))
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 13), 0, 3, 1);
- mod_phy_reg(pi, 0x27d, (0xff << 0), (crsmin_th << 0));
- mod_phy_reg(pi, 0x280, (0xff << 0), (crsminl_th << 0));
- mod_phy_reg(pi, 0x283, (0xff << 0), (crsminu_th << 0));
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- write_phy_reg(pi, 0x2b, nbclip_th);
- write_phy_reg(pi, 0x41, nbclip_th);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_2g =
+ (u8) ((int_val >> 24) & 0xff);
+ pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_5g =
+ (u8) ((int_val >> 24) & 0xff);
- mod_phy_reg(pi, 0x27, (0x3f << 0), (w1clip_th << 0));
- mod_phy_reg(pi, 0x3d, (0x3f << 0), (w1clip_th << 0));
+ pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_2g =
+ (u8) ((int_val >> 8) & 0xff);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_5g =
+ (u8) ((int_val >> 8) & 0xff);
+ } else {
+ pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_2g =
+ (u8) ((int_val >> 24) & 0xff);
- write_phy_reg(pi, 0x150, 0x809c);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_2g =
+ (u8) ((int_val >> 8) & 0xff);
- } else {
+ pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_5g =
+ (u8) ((int_val >> 16) & 0xff);
+ pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_5g =
+ (u8) ((int_val) & 0xff);
+ }
- mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
- mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
+}
- write_phy_reg(pi, 0x2b, 0x84);
- write_phy_reg(pi, 0x41, 0x84);
+static void wlc_phy_txpwr_limit_to_tbl_nphy(struct brcms_phy *pi)
+{
+ u8 idx, idx2, i, delta_ind;
- if (CHSPEC_IS20(pi->radio_chanspec)) {
- write_phy_reg(pi, 0x6b, 0x2b);
- write_phy_reg(pi, 0x6c, 0x2b);
- write_phy_reg(pi, 0x6d, 0x9);
- write_phy_reg(pi, 0x6e, 0x9);
- }
+ for (idx = TXP_FIRST_CCK; idx <= TXP_LAST_CCK; idx++)
+ pi->adj_pwr_tbl_nphy[idx] = pi->tx_power_offset[idx];
- w1th = NPHY_RSSICAL_W1_TARGET - 4;
- mod_phy_reg(pi, 0x27, (0x3f << 0), (w1th << 0));
- mod_phy_reg(pi, 0x3d, (0x3f << 0), (w1th << 0));
+ for (i = 0; i < 4; i++) {
+ idx2 = 0;
- if (CHSPEC_IS20(pi->radio_chanspec)) {
- mod_phy_reg(pi, 0x1c, (0x1f << 0), (0x1 << 0));
- mod_phy_reg(pi, 0x32, (0x1f << 0), (0x1 << 0));
+ delta_ind = 0;
- mod_phy_reg(pi, 0x1d, (0x1f << 0), (0x1 << 0));
- mod_phy_reg(pi, 0x33, (0x1f << 0), (0x1 << 0));
- }
+ switch (i) {
+ case 0:
- write_phy_reg(pi, 0x150, 0x809c);
+ if (CHSPEC_IS40(pi->radio_chanspec)
+ && NPHY_IS_SROM_REINTERPRET) {
+ idx = TXP_FIRST_MCS_40_SISO;
+ } else {
+ idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
+ TXP_FIRST_OFDM_40_SISO : TXP_FIRST_OFDM;
+ delta_ind = 1;
+ }
+ break;
- if (pi->nphy_gain_boost)
- if ((CHSPEC_IS2G(pi->radio_chanspec)) &&
- (CHSPEC_IS40(pi->radio_chanspec)))
- hpf_code = 4;
- else
- hpf_code = 5;
- else if (CHSPEC_IS40(pi->radio_chanspec))
- hpf_code = 6;
- else
- hpf_code = 7;
+ case 1:
- mod_phy_reg(pi, 0x20, (0x1f << 7), (hpf_code << 7));
- mod_phy_reg(pi, 0x36, (0x1f << 7), (hpf_code << 7));
+ idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
+ TXP_FIRST_MCS_40_CDD : TXP_FIRST_MCS_20_CDD;
+ break;
- for (ctr = 0; ctr < 4; ctr++) {
- regval[ctr] = (hpf_code << 8) | 0x7c;
- }
- wlc_phy_table_write_nphy(pi, 7, 4, 0x106, 16, regval);
+ case 2:
- wlc_phy_adjust_lnagaintbl_nphy(pi);
+ idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
+ TXP_FIRST_MCS_40_STBC : TXP_FIRST_MCS_20_STBC;
+ break;
- if (pi->nphy_elna_gain_config) {
- regval[0] = 0;
- regval[1] = 1;
- regval[2] = 1;
- regval[3] = 1;
- wlc_phy_table_write_nphy(pi, 2, 4, 8, 16, regval);
- wlc_phy_table_write_nphy(pi, 3, 4, 8, 16, regval);
+ case 3:
- for (ctr = 0; ctr < 4; ctr++) {
- regval[ctr] = (hpf_code << 8) | 0x74;
- }
- wlc_phy_table_write_nphy(pi, 7, 4, 0x106, 16, regval);
+ idx = (CHSPEC_IS40(pi->radio_chanspec)) ?
+ TXP_FIRST_MCS_40_SDM : TXP_FIRST_MCS_20_SDM;
+ break;
}
- if (NREV_IS(pi->pubpi.phy_rev, 2)) {
- for (ctr = 0; ctr < 21; ctr++) {
- regval[ctr] = 3 * ctr;
- }
- wlc_phy_table_write_nphy(pi, 0, 21, 32, 16, regval);
- wlc_phy_table_write_nphy(pi, 1, 21, 32, 16, regval);
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ idx = idx + delta_ind;
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx++];
- for (ctr = 0; ctr < 21; ctr++) {
- regval[ctr] = (u16) ctr;
- }
- wlc_phy_table_write_nphy(pi, 2, 21, 32, 16, regval);
- wlc_phy_table_write_nphy(pi, 3, 21, 32, 16, regval);
- }
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx++];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx++];
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
- rfseq_updategainu_events,
- rfseq_updategainu_dlys,
- sizeof(rfseq_updategainu_events) /
- sizeof(rfseq_updategainu_events[0]));
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx++];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx++];
- mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx++];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ idx = idx + 1 - delta_ind;
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
- if (CHSPEC_IS2G(pi->radio_chanspec))
- mod_phy_reg(pi,
- (NPHY_TO_BPHY_OFF + BPHY_OPTIONAL_MODES),
- 0x7f, 0x4);
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
+ pi->adj_pwr_tbl_nphy[4 + 4 * (idx2++) + i] =
+ pi->tx_power_offset[idx];
}
}
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
+static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
{
- s8 lna1_gain_db[] = { 8, 13, 17, 22 };
- s8 lna2_gain_db[] = { -2, 7, 11, 15 };
- s8 tia_gain_db[] = { -4, -1, 2, 5, 5, 5, 5, 5, 5, 5 };
- s8 tia_gainbits[] = {
- 0x0, 0x01, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
+ u32 idx;
+ s16 a1[2], b0[2], b1[2];
+ s8 target_pwr_qtrdbm[2];
+ s32 num, den, pwr_est;
+ u8 chan_freq_range;
+ u8 idle_tssi[2];
+ u32 tbl_id, tbl_len, tbl_offset;
+ u32 regval[64];
+ u8 core;
- mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
- mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
+ if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
+ wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
+ (void)R_REG(&pi->regs->maccontrol);
+ udelay(1);
+ }
- mod_phy_reg(pi, 0x289, (0xff << 0), (0x46 << 0));
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
- mod_phy_reg(pi, 0x283, (0xff << 0), (0x3c << 0));
- mod_phy_reg(pi, 0x280, (0xff << 0), (0x3c << 0));
+ or_phy_reg(pi, 0x122, (0x1 << 0));
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x8, 8,
- lna1_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x8, 8,
- lna1_gain_db);
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
+ and_phy_reg(pi, 0x1e7, (u16) (~(0x1 << 15)));
+ else
+ or_phy_reg(pi, 0x1e7, (0x1 << 15));
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x10, 8,
- lna2_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x10, 8,
- lna2_gain_db);
+ if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12))
+ wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 10, 0x20, 8,
- tia_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 10, 0x20, 8,
- tia_gain_db);
+ if (pi->sh->sromrev < 4) {
+ idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
+ idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
+ target_pwr_qtrdbm[0] = 13 * 4;
+ target_pwr_qtrdbm[1] = 13 * 4;
+ a1[0] = -424;
+ a1[1] = -424;
+ b0[0] = 5612;
+ b0[1] = 5612;
+ b1[1] = -1393;
+ b1[0] = -1393;
+ } else {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 10, 0x20, 8,
- tia_gainbits);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 10, 0x20, 8,
- tia_gainbits);
+ chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ switch (chan_freq_range) {
+ case WL_CHAN_FREQ_RANGE_2G:
+ idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
+ idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
+ target_pwr_qtrdbm[0] =
+ pi->nphy_pwrctrl_info[0].max_pwr_2g;
+ target_pwr_qtrdbm[1] =
+ pi->nphy_pwrctrl_info[1].max_pwr_2g;
+ a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_a1;
+ a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_a1;
+ b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_b0;
+ b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_b0;
+ b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_b1;
+ b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_b1;
+ break;
+ case WL_CHAN_FREQ_RANGE_5GL:
+ idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
+ idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
+ target_pwr_qtrdbm[0] =
+ pi->nphy_pwrctrl_info[0].max_pwr_5gl;
+ target_pwr_qtrdbm[1] =
+ pi->nphy_pwrctrl_info[1].max_pwr_5gl;
+ a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1;
+ a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1;
+ b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0;
+ b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_b0;
+ b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_b1;
+ b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_b1;
+ break;
+ case WL_CHAN_FREQ_RANGE_5GM:
+ idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
+ idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
+ target_pwr_qtrdbm[0] =
+ pi->nphy_pwrctrl_info[0].max_pwr_5gm;
+ target_pwr_qtrdbm[1] =
+ pi->nphy_pwrctrl_info[1].max_pwr_5gm;
+ a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_a1;
+ a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_a1;
+ b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_b0;
+ b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_b0;
+ b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_b1;
+ b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_b1;
+ break;
+ case WL_CHAN_FREQ_RANGE_5GH:
+ idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
+ idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
+ target_pwr_qtrdbm[0] =
+ pi->nphy_pwrctrl_info[0].max_pwr_5gh;
+ target_pwr_qtrdbm[1] =
+ pi->nphy_pwrctrl_info[1].max_pwr_5gh;
+ a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1;
+ a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1;
+ b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0;
+ b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_b0;
+ b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_b1;
+ b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_b1;
+ break;
+ default:
+ idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
+ idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
+ target_pwr_qtrdbm[0] = 13 * 4;
+ target_pwr_qtrdbm[1] = 13 * 4;
+ a1[0] = -424;
+ a1[1] = -424;
+ b0[0] = 5612;
+ b0[1] = 5612;
+ b1[1] = -1393;
+ b1[0] = -1393;
+ break;
+ }
+ }
- write_phy_reg(pi, 0x37, 0x74);
- write_phy_reg(pi, 0x2ad, 0x74);
- write_phy_reg(pi, 0x38, 0x18);
- write_phy_reg(pi, 0x2ae, 0x18);
+ target_pwr_qtrdbm[0] = (s8) pi->tx_power_max;
+ target_pwr_qtrdbm[1] = (s8) pi->tx_power_max;
- write_phy_reg(pi, 0x2b, 0xe8);
- write_phy_reg(pi, 0x41, 0xe8);
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (pi->srom_fem2g.tssipos)
+ or_phy_reg(pi, 0x1e9, (0x1 << 14));
- if (CHSPEC_IS20(pi->radio_chanspec)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ for (core = 0; core <= 1; core++) {
+ if (PHY_IPA(pi)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ WRITE_RADIO_REG3(pi, RADIO_2057,
+ TX, core,
+ TX_SSI_MUX,
+ 0xe);
+ else
+ WRITE_RADIO_REG3(pi, RADIO_2057,
+ TX, core,
+ TX_SSI_MUX,
+ 0xc);
+ }
+ }
+ } else {
+ if (PHY_IPA(pi)) {
- mod_phy_reg(pi, 0x300, (0x3f << 0), (0x12 << 0));
- mod_phy_reg(pi, 0x301, (0x3f << 0), (0x12 << 0));
- } else {
+ write_radio_reg(pi, RADIO_2056_TX_TX_SSI_MUX |
+ RADIO_2056_TX0,
+ (CHSPEC_IS5G
+ (pi->radio_chanspec)) ?
+ 0xc : 0xe);
+ write_radio_reg(pi,
+ RADIO_2056_TX_TX_SSI_MUX |
+ RADIO_2056_TX1,
+ (CHSPEC_IS5G
+ (pi->radio_chanspec)) ?
+ 0xc : 0xe);
+ } else {
- mod_phy_reg(pi, 0x300, (0x3f << 0), (0x10 << 0));
- mod_phy_reg(pi, 0x301, (0x3f << 0), (0x10 << 0));
+ write_radio_reg(pi, RADIO_2056_TX_TX_SSI_MUX |
+ RADIO_2056_TX0, 0x11);
+ write_radio_reg(pi, RADIO_2056_TX_TX_SSI_MUX |
+ RADIO_2056_TX1, 0x11);
+ }
+ }
}
-}
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
-{
- u16 currband;
- s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
- s8 *lna1_gain_db = NULL;
- s8 *lna1_gain_db_2 = NULL;
- s8 *lna2_gain_db = NULL;
- s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
- s8 *tia_gain_db;
- s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
- s8 *tia_gainbits;
- u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
- u16 *rfseq_init_gain;
- u16 init_gaincode;
- u16 clip1hi_gaincode;
- u16 clip1md_gaincode = 0;
- u16 clip1md_gaincode_B;
- u16 clip1lo_gaincode;
- u16 clip1lo_gaincode_B;
- u8 crsminl_th = 0;
- u8 crsminu_th;
- u16 nbclip_th = 0;
- u8 w1clip_th;
- u16 freq;
- s8 nvar_baseline_offset0 = 0, nvar_baseline_offset1 = 0;
- u8 chg_nbclip_th = 0;
+ if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
+ wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
+ (void)R_REG(&pi->regs->maccontrol);
+ udelay(1);
+ }
- mod_phy_reg(pi, 0x1c, (0x1 << 13), (1 << 13));
- mod_phy_reg(pi, 0x32, (0x1 << 13), (1 << 13));
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ mod_phy_reg(pi, 0x1e7, (0x7f << 0),
+ (NPHY_TxPwrCtrlCmd_pwrIndex_init_rev7 << 0));
+ else
+ mod_phy_reg(pi, 0x1e7, (0x7f << 0),
+ (NPHY_TxPwrCtrlCmd_pwrIndex_init << 0));
- currband = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
- if (currband == 0) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ mod_phy_reg(pi, 0x222, (0xff << 0),
+ (NPHY_TxPwrCtrlCmd_pwrIndex_init_rev7 << 0));
+ else if (NREV_GT(pi->pubpi.phy_rev, 1))
+ mod_phy_reg(pi, 0x222, (0xff << 0),
+ (NPHY_TxPwrCtrlCmd_pwrIndex_init << 0));
- lna1_gain_db = lna1G_gain_db_rev7;
+ if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12))
+ wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 8, 8,
- lna1_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 8, 8,
- lna1_gain_db);
+ write_phy_reg(pi, 0x1e8, (0x3 << 8) | (240 << 0));
- mod_phy_reg(pi, 0x283, (0xff << 0), (0x40 << 0));
+ write_phy_reg(pi, 0x1e9,
+ (1 << 15) | (idle_tssi[0] << 0) | (idle_tssi[1] << 8));
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- mod_phy_reg(pi, 0x280, (0xff << 0), (0x3e << 0));
- mod_phy_reg(pi, 0x283, (0xff << 0), (0x3e << 0));
+ write_phy_reg(pi, 0x1ea,
+ (target_pwr_qtrdbm[0] << 0) |
+ (target_pwr_qtrdbm[1] << 8));
+
+ tbl_len = 64;
+ tbl_offset = 0;
+ for (tbl_id = NPHY_TBL_ID_CORE1TXPWRCTL;
+ tbl_id <= NPHY_TBL_ID_CORE2TXPWRCTL; tbl_id++) {
+
+ for (idx = 0; idx < tbl_len; idx++) {
+ num = 8 *
+ (16 * b0[tbl_id - 26] + b1[tbl_id - 26] * idx);
+ den = 32768 + a1[tbl_id - 26] * idx;
+ pwr_est = max(((4 * num + den / 2) / den), -8);
+ if (NREV_LT(pi->pubpi.phy_rev, 3)) {
+ if (idx <=
+ (uint) (31 - idle_tssi[tbl_id - 26] + 1))
+ pwr_est =
+ max(pwr_est,
+ target_pwr_qtrdbm
+ [tbl_id - 26] + 1);
+ }
+ regval[idx] = (u32) pwr_est;
}
+ wlc_phy_table_write_nphy(pi, tbl_id, tbl_len, tbl_offset, 32,
+ regval);
+ }
- mod_phy_reg(pi, 0x289, (0xff << 0), (0x46 << 0));
+ wlc_phy_txpwr_limit_to_tbl_nphy(pi);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 84, 64, 8,
+ pi->adj_pwr_tbl_nphy);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE2TXPWRCTL, 84, 64, 8,
+ pi->adj_pwr_tbl_nphy);
- if (CHSPEC_IS20(pi->radio_chanspec)) {
- mod_phy_reg(pi, 0x300, (0x3f << 0), (13 << 0));
- mod_phy_reg(pi, 0x301, (0x3f << 0), (13 << 0));
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+}
+
+static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
+{
+ u32 *tx_pwrctrl_tbl = NULL;
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if ((pi->pubpi.radiorev == 4)
+ || (pi->pubpi.radiorev == 6))
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_ipa_2g_2057rev4n6;
+ else if (pi->pubpi.radiorev == 3)
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_ipa_2g_2057rev3;
+ else if (pi->pubpi.radiorev == 5)
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_ipa_2g_2057rev5;
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_ipa_2g_2057rev7;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
+ } else {
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa;
}
} else {
- init_gaincode = 0x9e;
- clip1hi_gaincode = 0x9e;
- clip1md_gaincode_B = 0x24;
- clip1lo_gaincode = 0x8a;
- clip1lo_gaincode_B = 8;
- rfseq_init_gain = rfseqA_init_gain_rev7;
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if ((pi->pubpi.radiorev == 3) ||
+ (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6))
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_5g_2057;
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_ipa_5g_2057rev7;
+ } else {
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_5g;
+ }
+ }
- tia_gain_db = tiaA_gain_db_rev7;
- tia_gainbits = tiaA_gainbits_rev7;
+ return tx_pwrctrl_tbl;
+}
- freq = CHAN5G_FREQ(CHSPEC_CHANNEL(pi->radio_chanspec));
- if (CHSPEC_IS20(pi->radio_chanspec)) {
+static void wlc_phy_restore_rssical_nphy(struct brcms_phy *pi)
+{
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (pi->nphy_rssical_chanspec_2G == 0)
+ return;
- w1clip_th = 25;
- clip1md_gaincode = 0x82;
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0,
+ RADIO_2057_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_2G[0]);
+ mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1,
+ RADIO_2057_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_2G[1]);
+ } else {
+ mod_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX0,
+ RADIO_2056_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_2G[0]);
+ mod_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX1,
+ RADIO_2056_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_2G[1]);
+ }
- if ((freq <= 5080) || (freq == 5825)) {
+ write_phy_reg(pi, 0x1a6,
+ pi->rssical_cache.rssical_phyregs_2G[0]);
+ write_phy_reg(pi, 0x1ac,
+ pi->rssical_cache.rssical_phyregs_2G[1]);
+ write_phy_reg(pi, 0x1b2,
+ pi->rssical_cache.rssical_phyregs_2G[2]);
+ write_phy_reg(pi, 0x1b8,
+ pi->rssical_cache.rssical_phyregs_2G[3]);
+ write_phy_reg(pi, 0x1a4,
+ pi->rssical_cache.rssical_phyregs_2G[4]);
+ write_phy_reg(pi, 0x1aa,
+ pi->rssical_cache.rssical_phyregs_2G[5]);
+ write_phy_reg(pi, 0x1b0,
+ pi->rssical_cache.rssical_phyregs_2G[6]);
+ write_phy_reg(pi, 0x1b6,
+ pi->rssical_cache.rssical_phyregs_2G[7]);
+ write_phy_reg(pi, 0x1a5,
+ pi->rssical_cache.rssical_phyregs_2G[8]);
+ write_phy_reg(pi, 0x1ab,
+ pi->rssical_cache.rssical_phyregs_2G[9]);
+ write_phy_reg(pi, 0x1b1,
+ pi->rssical_cache.rssical_phyregs_2G[10]);
+ write_phy_reg(pi, 0x1b7,
+ pi->rssical_cache.rssical_phyregs_2G[11]);
- s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
- s8 lna1A_gain_db_2_rev7[] = {
- 11, 17, 22, 25 };
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ } else {
+ if (pi->nphy_rssical_chanspec_5G == 0)
+ return;
- crsminu_th = 0x3e;
- lna1_gain_db = lna1A_gain_db_rev7;
- lna1_gain_db_2 = lna1A_gain_db_2_rev7;
- lna2_gain_db = lna2A_gain_db_rev7;
- } else if ((freq >= 5500) && (freq <= 5700)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0,
+ RADIO_2057_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_5G[0]);
+ mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1,
+ RADIO_2057_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_5G[1]);
+ } else {
+ mod_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX0,
+ RADIO_2056_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_5G[0]);
+ mod_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX1,
+ RADIO_2056_VCM_MASK,
+ pi->rssical_cache.
+ rssical_radio_regs_5G[1]);
+ }
- s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26 };
- s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ write_phy_reg(pi, 0x1a6,
+ pi->rssical_cache.rssical_phyregs_5G[0]);
+ write_phy_reg(pi, 0x1ac,
+ pi->rssical_cache.rssical_phyregs_5G[1]);
+ write_phy_reg(pi, 0x1b2,
+ pi->rssical_cache.rssical_phyregs_5G[2]);
+ write_phy_reg(pi, 0x1b8,
+ pi->rssical_cache.rssical_phyregs_5G[3]);
+ write_phy_reg(pi, 0x1a4,
+ pi->rssical_cache.rssical_phyregs_5G[4]);
+ write_phy_reg(pi, 0x1aa,
+ pi->rssical_cache.rssical_phyregs_5G[5]);
+ write_phy_reg(pi, 0x1b0,
+ pi->rssical_cache.rssical_phyregs_5G[6]);
+ write_phy_reg(pi, 0x1b6,
+ pi->rssical_cache.rssical_phyregs_5G[7]);
+ write_phy_reg(pi, 0x1a5,
+ pi->rssical_cache.rssical_phyregs_5G[8]);
+ write_phy_reg(pi, 0x1ab,
+ pi->rssical_cache.rssical_phyregs_5G[9]);
+ write_phy_reg(pi, 0x1b1,
+ pi->rssical_cache.rssical_phyregs_5G[10]);
+ write_phy_reg(pi, 0x1b7,
+ pi->rssical_cache.rssical_phyregs_5G[11]);
+ }
+}
- crsminu_th = 0x45;
- clip1md_gaincode_B = 0x14;
- nbclip_th = 0xff;
- chg_nbclip_th = 1;
- lna1_gain_db = lna1A_gain_db_rev7;
- lna1_gain_db_2 = lna1A_gain_db_2_rev7;
- lna2_gain_db = lna2A_gain_db_rev7;
+static void wlc_phy_internal_cal_txgain_nphy(struct brcms_phy *pi)
+{
+ u16 txcal_gain[2];
+
+ pi->nphy_txcal_pwr_idx[0] = pi->nphy_cal_orig_pwr_idx[0];
+ pi->nphy_txcal_pwr_idx[1] = pi->nphy_cal_orig_pwr_idx[0];
+ wlc_phy_txpwr_index_nphy(pi, 1, pi->nphy_cal_orig_pwr_idx[0], true);
+ wlc_phy_txpwr_index_nphy(pi, 2, pi->nphy_cal_orig_pwr_idx[1], true);
+
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16,
+ txcal_gain);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ txcal_gain[0] = (txcal_gain[0] & 0xF000) | 0x0F40;
+ txcal_gain[1] = (txcal_gain[1] & 0xF000) | 0x0F40;
+ } else {
+ txcal_gain[0] = (txcal_gain[0] & 0xF000) | 0x0F60;
+ txcal_gain[1] = (txcal_gain[1] & 0xF000) | 0x0F60;
+ }
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16,
+ txcal_gain);
+}
+
+static void wlc_phy_precal_txgain_nphy(struct brcms_phy *pi)
+{
+ bool save_bbmult = false;
+ u8 txcal_index_2057_rev5n7 = 0;
+ u8 txcal_index_2057_rev3n4n6 = 10;
+
+ if (pi->use_int_tx_iqlo_cal_nphy) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if ((pi->pubpi.radiorev == 3) ||
+ (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6)) {
+
+ pi->nphy_txcal_pwr_idx[0] =
+ txcal_index_2057_rev3n4n6;
+ pi->nphy_txcal_pwr_idx[1] =
+ txcal_index_2057_rev3n4n6;
+ wlc_phy_txpwr_index_nphy(
+ pi, 3,
+ txcal_index_2057_rev3n4n6,
+ false);
} else {
- s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26 };
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ pi->nphy_txcal_pwr_idx[0] =
+ txcal_index_2057_rev5n7;
+ pi->nphy_txcal_pwr_idx[1] =
+ txcal_index_2057_rev5n7;
+ wlc_phy_txpwr_index_nphy(
+ pi, 3,
+ txcal_index_2057_rev5n7,
+ false);
+ }
+ save_bbmult = true;
- crsminu_th = 0x41;
- lna1_gain_db = lna1A_gain_db_rev7;
- lna1_gain_db_2 = lna1A_gain_db_2_rev7;
- lna2_gain_db = lna2A_gain_db_rev7;
+ } else if (NREV_LT(pi->pubpi.phy_rev, 5)) {
+ wlc_phy_cal_txgainctrl_nphy(pi, 11, false);
+ if (pi->sh->hw_phytxchain != 3) {
+ pi->nphy_txcal_pwr_idx[1] =
+ pi->nphy_txcal_pwr_idx[0];
+ wlc_phy_txpwr_index_nphy(pi, 3,
+ pi->
+ nphy_txcal_pwr_idx[0],
+ true);
+ save_bbmult = true;
}
- if (freq <= 4920) {
- nvar_baseline_offset0 = 5;
- nvar_baseline_offset1 = 5;
- } else if ((freq > 4920) && (freq <= 5320)) {
- nvar_baseline_offset0 = 3;
- nvar_baseline_offset1 = 5;
- } else if ((freq > 5320) && (freq <= 5700)) {
- nvar_baseline_offset0 = 3;
- nvar_baseline_offset1 = 2;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
+ if (PHY_IPA(pi)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ wlc_phy_cal_txgainctrl_nphy(pi, 12,
+ false);
+ } else {
+ pi->nphy_txcal_pwr_idx[0] = 80;
+ pi->nphy_txcal_pwr_idx[1] = 80;
+ wlc_phy_txpwr_index_nphy(pi, 3, 80,
+ false);
+ save_bbmult = true;
+ }
} else {
- nvar_baseline_offset0 = 4;
- nvar_baseline_offset1 = 0;
+ wlc_phy_internal_cal_txgain_nphy(pi);
+ save_bbmult = true;
}
- } else {
-
- crsminu_th = 0x3a;
- crsminl_th = 0x3a;
- w1clip_th = 20;
- if ((freq >= 4920) && (freq <= 5320)) {
- nvar_baseline_offset0 = 4;
- nvar_baseline_offset1 = 5;
- } else if ((freq > 5320) && (freq <= 5550)) {
- nvar_baseline_offset0 = 4;
- nvar_baseline_offset1 = 2;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
+ if (PHY_IPA(pi)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ wlc_phy_cal_txgainctrl_nphy(pi, 12,
+ false);
+ else
+ wlc_phy_cal_txgainctrl_nphy(pi, 14,
+ false);
} else {
- nvar_baseline_offset0 = 5;
- nvar_baseline_offset1 = 3;
+ wlc_phy_internal_cal_txgain_nphy(pi);
+ save_bbmult = true;
}
}
- write_phy_reg(pi, 0x20, init_gaincode);
- write_phy_reg(pi, 0x2a7, init_gaincode);
+ } else {
+ wlc_phy_cal_txgainctrl_nphy(pi, 10, false);
+ }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- pi->pubpi.phy_corenum, 0x106, 16,
- rfseq_init_gain);
+ if (save_bbmult)
+ wlc_phy_table_read_nphy(pi, 15, 1, 87, 16,
+ &pi->nphy_txcal_bbmult);
+}
- write_phy_reg(pi, 0x22, clip1hi_gaincode);
- write_phy_reg(pi, 0x2a9, clip1hi_gaincode);
+static void
+wlc_phy_rfctrlintc_override_nphy(struct brcms_phy *pi, u8 field, u16 value,
+ u8 core_code)
+{
+ u16 mask;
+ u16 val;
+ u8 core;
- write_phy_reg(pi, 0x36, clip1md_gaincode_B);
- write_phy_reg(pi, 0x2ac, clip1md_gaincode_B);
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+ if (core_code == RADIO_MIMO_CORESEL_CORE1
+ && core == PHY_CORE_1)
+ continue;
+ else if (core_code == RADIO_MIMO_CORESEL_CORE2
+ && core == PHY_CORE_0)
+ continue;
- write_phy_reg(pi, 0x37, clip1lo_gaincode);
- write_phy_reg(pi, 0x2ad, clip1lo_gaincode);
- write_phy_reg(pi, 0x38, clip1lo_gaincode_B);
- write_phy_reg(pi, 0x2ae, clip1lo_gaincode_B);
+ if (NREV_LT(pi->pubpi.phy_rev, 7)) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 10, 0x20, 8,
- tia_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 10, 0x20, 8,
- tia_gain_db);
+ mask = (0x1 << 10);
+ val = 1 << 10;
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x91 :
+ 0x92, mask, val);
+ }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS1, 10, 0x20, 8,
- tia_gainbits);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAINBITS2, 10, 0x20, 8,
- tia_gainbits);
+ if (field == NPHY_RfctrlIntc_override_OFF) {
- mod_phy_reg(pi, 0x283, (0xff << 0), (crsminu_th << 0));
+ write_phy_reg(pi, (core == PHY_CORE_0) ? 0x91 :
+ 0x92, 0);
- if (chg_nbclip_th == 1) {
- write_phy_reg(pi, 0x2b, nbclip_th);
- write_phy_reg(pi, 0x41, nbclip_th);
- }
+ wlc_phy_force_rfseq_nphy(pi,
+ NPHY_RFSEQ_RESET2RX);
+ } else if (field == NPHY_RfctrlIntc_override_TRSW) {
- mod_phy_reg(pi, 0x300, (0x3f << 0), (w1clip_th << 0));
- mod_phy_reg(pi, 0x301, (0x3f << 0), (w1clip_th << 0));
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- mod_phy_reg(pi, 0x2e4,
- (0x3f << 0), (nvar_baseline_offset0 << 0));
+ mask = (0x1 << 6) | (0x1 << 7);
- mod_phy_reg(pi, 0x2e4,
- (0x3f << 6), (nvar_baseline_offset1 << 6));
+ val = value << 6;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
- if (CHSPEC_IS20(pi->radio_chanspec)) {
+ or_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ (0x1 << 10));
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 8, 8,
- lna1_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 8, 8,
- lna1_gain_db_2);
+ and_phy_reg(pi, 0x2ff, (u16)
+ ~(0x3 << 14));
+ or_phy_reg(pi, 0x2ff, (0x1 << 13));
+ or_phy_reg(pi, 0x2ff, (0x1 << 0));
+ } else {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN1, 4, 0x10,
- 8, lna2_gain_db);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_GAIN2, 4, 0x10,
- 8, lna2_gain_db);
+ mask = (0x1 << 6) |
+ (0x1 << 7) |
+ (0x1 << 8) | (0x1 << 9);
+ val = value << 6;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
- write_phy_reg(pi, 0x24, clip1md_gaincode);
- write_phy_reg(pi, 0x2ab, clip1md_gaincode);
- } else {
- mod_phy_reg(pi, 0x280, (0xff << 0), (crsminl_th << 0));
- }
+ mask = (0x1 << 0);
+ val = 1 << 0;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0xe7 : 0xec,
+ mask, val);
- }
+ mask = (core == PHY_CORE_0) ?
+ (0x1 << 0) : (0x1 << 1);
+ val = 1 << ((core == PHY_CORE_0) ?
+ 0 : 1);
+ mod_phy_reg(pi, 0x78, mask, val);
+
+ SPINWAIT(((read_phy_reg(pi, 0x78) & val)
+ != 0), 10000);
+ if (WARN(read_phy_reg(pi, 0x78) & val,
+ "HW error: override failed"))
+ return;
+
+ mask = (0x1 << 0);
+ val = 0 << 0;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0xe7 : 0xec,
+ mask, val);
+ }
+ } else if (field == NPHY_RfctrlIntc_override_PA) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+
+ mask = (0x1 << 4) | (0x1 << 5);
+
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ val = value << 5;
+ else
+ val = value << 4;
+
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
+ or_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ (0x1 << 12));
+ } else {
+
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ mask = (0x1 << 5);
+ val = value << 5;
+ } else {
+ mask = (0x1 << 4);
+ val = value << 4;
+ }
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
+ }
+ } else if (field ==
+ NPHY_RfctrlIntc_override_EXT_LNA_PU) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+
+ mask = (0x1 << 0);
+ val = value << 0;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, val);
+
+ mask = (0x1 << 2);
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, 0);
+ } else {
+
+ mask = (0x1 << 2);
+ val = value << 2;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, val);
+
+ mask = (0x1 << 0);
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, 0);
+ }
+
+ mask = (0x1 << 11);
+ val = 1 << 11;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
+ } else {
+
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ mask = (0x1 << 0);
+ val = value << 0;
+ } else {
+ mask = (0x1 << 2);
+ val = value << 2;
+ }
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
+ }
+ } else if (field ==
+ NPHY_RfctrlIntc_override_EXT_LNA_GAIN) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+
+ mask = (0x1 << 1);
+ val = value << 1;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, val);
+
+ mask = (0x1 << 3);
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, 0);
+ } else {
+
+ mask = (0x1 << 3);
+ val = value << 3;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, val);
+
+ mask = (0x1 << 1);
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91
+ : 0x92, mask, 0);
+ }
+
+ mask = (0x1 << 11);
+ val = 1 << 11;
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
+ } else {
+
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ mask = (0x1 << 1);
+ val = value << 1;
+ } else {
+ mask = (0x1 << 3);
+ val = value << 3;
+ }
+ mod_phy_reg(pi,
+ (core ==
+ PHY_CORE_0) ? 0x91 : 0x92,
+ mask, val);
+ }
+ }
+ }
+ }
}
-static void wlc_phy_adjust_lnagaintbl_nphy(struct brcms_phy *pi)
+void
+wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+ bool debug)
{
+ int gainctrl_loopidx;
uint core;
- int ctr;
- s16 gain_delta[2];
- u8 curr_channel;
- u16 minmax_gain[2];
- u16 regval[4];
+ u16 m0m1, curr_m0m1;
+ s32 delta_power;
+ s32 txpwrindex;
+ s32 qdBm_power[2];
+ u16 orig_BBConfig;
+ u16 phy_saveregs[4];
+ u32 freq_test;
+ u16 ampl_test = 250;
+ uint stepsize;
+ bool phyhang_avoid_state = false;
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ stepsize = 2;
+ else
+ stepsize = 1;
+
+ if (CHSPEC_IS40(pi->radio_chanspec))
+ freq_test = 5000;
+ else
+ freq_test = 2500;
+
+ wlc_phy_txpwr_index_nphy(pi, 1, pi->nphy_cal_orig_pwr_idx[0], true);
+ wlc_phy_txpwr_index_nphy(pi, 2, pi->nphy_cal_orig_pwr_idx[1], true);
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
- if (pi->nphy_gain_boost) {
- if ((CHSPEC_IS2G(pi->radio_chanspec))) {
+ phyhang_avoid_state = pi->phyhang_avoid;
+ pi->phyhang_avoid = false;
- gain_delta[0] = 6;
- gain_delta[1] = 6;
- } else {
+ phy_saveregs[0] = read_phy_reg(pi, 0x91);
+ phy_saveregs[1] = read_phy_reg(pi, 0x92);
+ phy_saveregs[2] = read_phy_reg(pi, 0xe7);
+ phy_saveregs[3] = read_phy_reg(pi, 0xec);
+ wlc_phy_rfctrlintc_override_nphy(pi, NPHY_RfctrlIntc_override_PA, 1,
+ RADIO_MIMO_CORESEL_CORE1 |
+ RADIO_MIMO_CORESEL_CORE2);
- curr_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
- gain_delta[0] =
- (s16)
- PHY_HW_ROUND(((nphy_lnagain_est0[0] *
- curr_channel) +
- nphy_lnagain_est0[1]), 13);
- gain_delta[1] =
- (s16)
- PHY_HW_ROUND(((nphy_lnagain_est1[0] *
- curr_channel) +
- nphy_lnagain_est1[1]), 13);
- }
+ if (!debug) {
+ wlc_phy_rfctrlintc_override_nphy(pi,
+ NPHY_RfctrlIntc_override_TRSW,
+ 0x2, RADIO_MIMO_CORESEL_CORE1);
+ wlc_phy_rfctrlintc_override_nphy(pi,
+ NPHY_RfctrlIntc_override_TRSW,
+ 0x8, RADIO_MIMO_CORESEL_CORE2);
} else {
-
- gain_delta[0] = 0;
- gain_delta[1] = 0;
+ wlc_phy_rfctrlintc_override_nphy(pi,
+ NPHY_RfctrlIntc_override_TRSW,
+ 0x1, RADIO_MIMO_CORESEL_CORE1);
+ wlc_phy_rfctrlintc_override_nphy(pi,
+ NPHY_RfctrlIntc_override_TRSW,
+ 0x7, RADIO_MIMO_CORESEL_CORE2);
}
+ orig_BBConfig = read_phy_reg(pi, 0x01);
+ mod_phy_reg(pi, 0x01, (0x1 << 15), 0);
+
+ wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &m0m1);
+
for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- if (pi->nphy_elna_gain_config) {
+ txpwrindex = (s32) pi->nphy_cal_orig_pwr_idx[core];
- regval[0] = nphy_def_lnagains[2] + gain_delta[core];
- regval[1] = nphy_def_lnagains[3] + gain_delta[core];
- regval[2] = nphy_def_lnagains[3] + gain_delta[core];
- regval[3] = nphy_def_lnagains[3] + gain_delta[core];
- } else {
- for (ctr = 0; ctr < 4; ctr++) {
- regval[ctr] =
- nphy_def_lnagains[ctr] + gain_delta[core];
+ for (gainctrl_loopidx = 0; gainctrl_loopidx < 2;
+ gainctrl_loopidx++) {
+ wlc_phy_tx_tone_nphy(pi, freq_test, ampl_test, 0, 0,
+ false);
+
+ if (core == PHY_CORE_0)
+ curr_m0m1 = m0m1 & 0xff00;
+ else
+ curr_m0m1 = m0m1 & 0x00ff;
+
+ wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &curr_m0m1);
+ wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &curr_m0m1);
+
+ udelay(50);
+
+ wlc_phy_est_tonepwr_nphy(pi, qdBm_power,
+ NPHY_CAL_TSSISAMPS);
+
+ pi->nphy_bb_mult_save = 0;
+ wlc_phy_stopplayback_nphy(pi);
+
+ delta_power = (dBm_targetpower * 4) - qdBm_power[core];
+
+ txpwrindex -= stepsize * delta_power;
+ if (txpwrindex < 0)
+ txpwrindex = 0;
+ else if (txpwrindex > 127)
+ txpwrindex = 127;
+
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ if (NREV_IS(pi->pubpi.phy_rev, 4) &&
+ (pi->srom_fem5g.extpagain == 3)) {
+ if (txpwrindex < 30)
+ txpwrindex = 30;
+ }
+ } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 5) &&
+ (pi->srom_fem2g.extpagain == 3)) {
+ if (txpwrindex < 50)
+ txpwrindex = 50;
+ }
}
+
+ wlc_phy_txpwr_index_nphy(pi, (1 << core),
+ (u8) txpwrindex, true);
}
- wlc_phy_table_write_nphy(pi, core, 4, 8, 16, regval);
- minmax_gain[core] =
- (u16) (nphy_def_lnagains[2] + gain_delta[core] + 4);
+ pi->nphy_txcal_pwr_idx[core] = (u8) txpwrindex;
+
+ if (debug) {
+ u16 radio_gain;
+ u16 dbg_m0m1;
+
+ wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &dbg_m0m1);
+
+ wlc_phy_tx_tone_nphy(pi, freq_test, ampl_test, 0, 0,
+ false);
+
+ wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &dbg_m0m1);
+ wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &dbg_m0m1);
+
+ udelay(100);
+
+ wlc_phy_est_tonepwr_nphy(pi, qdBm_power,
+ NPHY_CAL_TSSISAMPS);
+
+ wlc_phy_table_read_nphy(pi, 7, 1, (0x110 + core), 16,
+ &radio_gain);
+
+ mdelay(4000);
+ pi->nphy_bb_mult_save = 0;
+ wlc_phy_stopplayback_nphy(pi);
+ }
}
- mod_phy_reg(pi, 0x1e, (0xff << 0), (minmax_gain[0] << 0));
- mod_phy_reg(pi, 0x34, (0xff << 0), (minmax_gain[1] << 0));
+ wlc_phy_txpwr_index_nphy(pi, 1, pi->nphy_txcal_pwr_idx[0], true);
+ wlc_phy_txpwr_index_nphy(pi, 2, pi->nphy_txcal_pwr_idx[1], true);
+
+ wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &pi->nphy_txcal_bbmult);
+
+ write_phy_reg(pi, 0x01, orig_BBConfig);
+
+ write_phy_reg(pi, 0x91, phy_saveregs[0]);
+ write_phy_reg(pi, 0x92, phy_saveregs[1]);
+ write_phy_reg(pi, 0xe7, phy_saveregs[2]);
+ write_phy_reg(pi, 0xec, phy_saveregs[3]);
+
+ pi->phyhang_avoid = phyhang_avoid_state;
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on)
+static void wlc_phy_savecal_nphy(struct brcms_phy *pi)
{
- if (on) {
+ void *tbl_ptr;
+ int coreNum;
+ u16 *txcal_radio_regs = NULL;
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+
+ wlc_phy_rx_iq_coeffs_nphy(pi, 0,
+ &pi->calibration_cache.
+ rxcal_coeffs_2G);
+
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (!pi->radio_is_on) {
- wlc_phy_radio_preinit_205x(pi);
- wlc_phy_radio_init_2057(pi);
- wlc_phy_radio_postinit_2057(pi);
- }
+ txcal_radio_regs =
+ pi->calibration_cache.txcal_radio_regs_2G;
+ } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- wlc_phy_chanspec_set((struct brcms_phy_pub *) pi,
- pi->radio_chanspec);
+ pi->calibration_cache.txcal_radio_regs_2G[0] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_2G[1] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_2G[2] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX1);
+ pi->calibration_cache.txcal_radio_regs_2G[3] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX1);
+
+ pi->calibration_cache.txcal_radio_regs_2G[4] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_2G[5] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_2G[6] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX1);
+ pi->calibration_cache.txcal_radio_regs_2G[7] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX1);
+ } else {
+ pi->calibration_cache.txcal_radio_regs_2G[0] =
+ read_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL);
+ pi->calibration_cache.txcal_radio_regs_2G[1] =
+ read_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL);
+ pi->calibration_cache.txcal_radio_regs_2G[2] =
+ read_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM);
+ pi->calibration_cache.txcal_radio_regs_2G[3] =
+ read_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM);
+ }
+
+ pi->nphy_iqcal_chanspec_2G = pi->radio_chanspec;
+ tbl_ptr = pi->calibration_cache.txcal_coeffs_2G;
+ } else {
+
+ wlc_phy_rx_iq_coeffs_nphy(pi, 0,
+ &pi->calibration_cache.
+ rxcal_coeffs_5G);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ txcal_radio_regs =
+ pi->calibration_cache.txcal_radio_regs_5G;
} else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- wlc_phy_radio_preinit_205x(pi);
- wlc_phy_radio_init_2056(pi);
- wlc_phy_radio_postinit_2056(pi);
- wlc_phy_chanspec_set((struct brcms_phy_pub *) pi,
- pi->radio_chanspec);
+ pi->calibration_cache.txcal_radio_regs_5G[0] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_5G[1] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_5G[2] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX1);
+ pi->calibration_cache.txcal_radio_regs_5G[3] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX1);
+
+ pi->calibration_cache.txcal_radio_regs_5G[4] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_5G[5] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX0);
+ pi->calibration_cache.txcal_radio_regs_5G[6] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX1);
+ pi->calibration_cache.txcal_radio_regs_5G[7] =
+ read_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX1);
} else {
- wlc_phy_radio_preinit_2055(pi);
- wlc_phy_radio_init_2055(pi);
- wlc_phy_radio_postinit_2055(pi);
+ pi->calibration_cache.txcal_radio_regs_5G[0] =
+ read_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL);
+ pi->calibration_cache.txcal_radio_regs_5G[1] =
+ read_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL);
+ pi->calibration_cache.txcal_radio_regs_5G[2] =
+ read_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM);
+ pi->calibration_cache.txcal_radio_regs_5G[3] =
+ read_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM);
}
- pi->radio_is_on = true;
+ pi->nphy_iqcal_chanspec_5G = pi->radio_chanspec;
+ tbl_ptr = pi->calibration_cache.txcal_coeffs_5G;
+ }
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+
+ txcal_radio_regs[2 * coreNum] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_FINE_I);
+ txcal_radio_regs[2 * coreNum + 1] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_FINE_Q);
+
+ txcal_radio_regs[2 * coreNum + 4] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_COARSE_I);
+ txcal_radio_regs[2 * coreNum + 5] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_COARSE_Q);
+ }
+ }
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_IQLOCAL, 8, 80, 16, tbl_ptr);
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+}
+
+static void wlc_phy_tx_iq_war_nphy(struct brcms_phy *pi)
+{
+ struct nphy_iq_comp tx_comp;
+
+ wlc_phy_table_read_nphy(pi, 15, 4, 0x50, 16, &tx_comp);
+
+ wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ, tx_comp.a0);
+ wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 2, tx_comp.b0);
+ wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 4, tx_comp.a1);
+ wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 6, tx_comp.b1);
+}
+
+static void wlc_phy_restorecal_nphy(struct brcms_phy *pi)
+{
+ u16 *loft_comp;
+ u16 txcal_coeffs_bphy[4];
+ u16 *tbl_ptr;
+ int coreNum;
+ u16 *txcal_radio_regs = NULL;
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (pi->nphy_iqcal_chanspec_2G == 0)
+ return;
+
+ tbl_ptr = pi->calibration_cache.txcal_coeffs_2G;
+ loft_comp = &pi->calibration_cache.txcal_coeffs_2G[5];
} else {
+ if (pi->nphy_iqcal_chanspec_5G == 0)
+ return;
- if (NREV_GE(pi->pubpi.phy_rev, 3)
- && NREV_LT(pi->pubpi.phy_rev, 7)) {
- and_phy_reg(pi, 0x78, ~RFCC_CHIP0_PU);
- mod_radio_reg(pi, RADIO_2056_SYN_COM_PU, 0x2, 0x0);
+ tbl_ptr = pi->calibration_cache.txcal_coeffs_5G;
+ loft_comp = &pi->calibration_cache.txcal_coeffs_5G[5];
+ }
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 4, 80, 16, tbl_ptr);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ txcal_coeffs_bphy[0] = tbl_ptr[0];
+ txcal_coeffs_bphy[1] = tbl_ptr[1];
+ txcal_coeffs_bphy[2] = tbl_ptr[2];
+ txcal_coeffs_bphy[3] = tbl_ptr[3];
+ } else {
+ txcal_coeffs_bphy[0] = 0;
+ txcal_coeffs_bphy[1] = 0;
+ txcal_coeffs_bphy[2] = 0;
+ txcal_coeffs_bphy[3] = 0;
+ }
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 4, 88, 16,
+ txcal_coeffs_bphy);
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 2, 85, 16, loft_comp);
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 2, 93, 16, loft_comp);
+
+ if (NREV_LT(pi->pubpi.phy_rev, 2))
+ wlc_phy_tx_iq_war_nphy(pi);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ txcal_radio_regs =
+ pi->calibration_cache.txcal_radio_regs_2G;
+ } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
write_radio_reg(pi,
- RADIO_2056_TX_PADA_BOOST_TUNE |
- RADIO_2056_TX0, 0);
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[0]);
write_radio_reg(pi,
- RADIO_2056_TX_PADG_BOOST_TUNE |
- RADIO_2056_TX0, 0);
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[1]);
write_radio_reg(pi,
- RADIO_2056_TX_PGAA_BOOST_TUNE |
- RADIO_2056_TX0, 0);
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[2]);
write_radio_reg(pi,
- RADIO_2056_TX_PGAG_BOOST_TUNE |
- RADIO_2056_TX0, 0);
- mod_radio_reg(pi,
- RADIO_2056_TX_MIXA_BOOST_TUNE |
- RADIO_2056_TX0, 0xf0, 0);
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[3]);
+
write_radio_reg(pi,
- RADIO_2056_TX_MIXG_BOOST_TUNE |
- RADIO_2056_TX0, 0);
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[4]);
+ write_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[5]);
+ write_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[6]);
+ write_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[7]);
+ } else {
+ write_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[0]);
+ write_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[1]);
+ write_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[2]);
+ write_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM,
+ pi->calibration_cache.
+ txcal_radio_regs_2G[3]);
+ }
+
+ wlc_phy_rx_iq_coeffs_nphy(pi, 1,
+ &pi->calibration_cache.
+ rxcal_coeffs_2G);
+ } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ txcal_radio_regs =
+ pi->calibration_cache.txcal_radio_regs_5G;
+ } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
write_radio_reg(pi,
- RADIO_2056_TX_PADA_BOOST_TUNE |
- RADIO_2056_TX1, 0);
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[0]);
write_radio_reg(pi,
- RADIO_2056_TX_PADG_BOOST_TUNE |
- RADIO_2056_TX1, 0);
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[1]);
write_radio_reg(pi,
- RADIO_2056_TX_PGAA_BOOST_TUNE |
- RADIO_2056_TX1, 0);
+ RADIO_2056_TX_LOFT_FINE_I |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[2]);
write_radio_reg(pi,
- RADIO_2056_TX_PGAG_BOOST_TUNE |
- RADIO_2056_TX1, 0);
- mod_radio_reg(pi,
- RADIO_2056_TX_MIXA_BOOST_TUNE |
- RADIO_2056_TX1, 0xf0, 0);
+ RADIO_2056_TX_LOFT_FINE_Q |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[3]);
+
write_radio_reg(pi,
- RADIO_2056_TX_MIXG_BOOST_TUNE |
- RADIO_2056_TX1, 0);
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[4]);
+ write_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX0,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[5]);
+ write_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_I |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[6]);
+ write_radio_reg(pi,
+ RADIO_2056_TX_LOFT_COARSE_Q |
+ RADIO_2056_TX1,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[7]);
+ } else {
+ write_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[0]);
+ write_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[1]);
+ write_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[2]);
+ write_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM,
+ pi->calibration_cache.
+ txcal_radio_regs_5G[3]);
+ }
- pi->radio_is_on = false;
+ wlc_phy_rx_iq_coeffs_nphy(pi, 1,
+ &pi->calibration_cache.
+ rxcal_coeffs_5G);
+ }
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_FINE_I,
+ txcal_radio_regs[2 * coreNum]);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_FINE_Q,
+ txcal_radio_regs[2 * coreNum + 1]);
+
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_COARSE_I,
+ txcal_radio_regs[2 * coreNum + 4]);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
+ LOFT_COARSE_Q,
+ txcal_radio_regs[2 * coreNum + 5]);
}
+ }
+}
- if (NREV_GE(pi->pubpi.phy_rev, 8)) {
- and_phy_reg(pi, 0x78, ~RFCC_CHIP0_PU);
- pi->radio_is_on = false;
+static void wlc_phy_txpwrctrl_coeff_setup_nphy(struct brcms_phy *pi)
+{
+ u32 idx;
+ u16 iqloCalbuf[7];
+ u32 iqcomp, locomp, curr_locomp;
+ s8 locomp_i, locomp_q;
+ s8 curr_locomp_i, curr_locomp_q;
+ u32 tbl_id, tbl_len, tbl_offset;
+ u32 regval[128];
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
+
+ wlc_phy_table_read_nphy(pi, 15, 7, 80, 16, iqloCalbuf);
+
+ tbl_len = 128;
+ tbl_offset = 320;
+ for (tbl_id = NPHY_TBL_ID_CORE1TXPWRCTL;
+ tbl_id <= NPHY_TBL_ID_CORE2TXPWRCTL; tbl_id++) {
+ iqcomp =
+ (tbl_id ==
+ 26) ? (((u32) (iqloCalbuf[0] & 0x3ff)) << 10) |
+ (iqloCalbuf[1] & 0x3ff)
+ : (((u32) (iqloCalbuf[2] & 0x3ff)) << 10) |
+ (iqloCalbuf[3] & 0x3ff);
+
+ for (idx = 0; idx < tbl_len; idx++)
+ regval[idx] = iqcomp;
+ wlc_phy_table_write_nphy(pi, tbl_id, tbl_len, tbl_offset, 32,
+ regval);
+ }
+
+ tbl_offset = 448;
+ for (tbl_id = NPHY_TBL_ID_CORE1TXPWRCTL;
+ tbl_id <= NPHY_TBL_ID_CORE2TXPWRCTL; tbl_id++) {
+
+ locomp =
+ (u32) ((tbl_id == 26) ? iqloCalbuf[5] : iqloCalbuf[6]);
+ locomp_i = (s8) ((locomp >> 8) & 0xff);
+ locomp_q = (s8) ((locomp) & 0xff);
+ for (idx = 0; idx < tbl_len; idx++) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ curr_locomp_i = locomp_i;
+ curr_locomp_q = locomp_q;
+ } else {
+ curr_locomp_i = (s8) ((locomp_i *
+ nphy_tpc_loscale[idx] +
+ 128) >> 8);
+ curr_locomp_q =
+ (s8) ((locomp_q *
+ nphy_tpc_loscale[idx] +
+ 128) >> 8);
+ }
+ curr_locomp = (u32) ((curr_locomp_i & 0xff) << 8);
+ curr_locomp |= (u32) (curr_locomp_q & 0xff);
+ regval[idx] = curr_locomp;
}
+ wlc_phy_table_write_nphy(pi, tbl_id, tbl_len, tbl_offset, 32,
+ regval);
+ }
+
+ if (NREV_LT(pi->pubpi.phy_rev, 2)) {
+ wlapi_bmac_write_shm(pi->sh->physhim, M_CURR_IDX1, 0xFFFF);
+ wlapi_bmac_write_shm(pi->sh->physhim, M_CURR_IDX2, 0xFFFF);
}
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static void wlc_phy_radio_preinit_2055(struct brcms_phy *pi)
+static void wlc_phy_txlpfbw_nphy(struct brcms_phy *pi)
{
+ u8 tx_lpf_bw = 0;
- and_phy_reg(pi, 0x78, ~RFCC_POR_FORCE);
- or_phy_reg(pi, 0x78, RFCC_CHIP0_PU | RFCC_OE_POR_FORCE);
+ if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
+ if (CHSPEC_IS40(pi->radio_chanspec))
+ tx_lpf_bw = 3;
+ else
+ tx_lpf_bw = 1;
- or_phy_reg(pi, 0x78, RFCC_POR_FORCE);
+ if (PHY_IPA(pi)) {
+ if (CHSPEC_IS40(pi->radio_chanspec))
+ tx_lpf_bw = 5;
+ else
+ tx_lpf_bw = 4;
+ }
+
+ write_phy_reg(pi, 0xe8,
+ (tx_lpf_bw << 0) |
+ (tx_lpf_bw << 3) |
+ (tx_lpf_bw << 6) | (tx_lpf_bw << 9));
+
+ if (PHY_IPA(pi)) {
+
+ if (CHSPEC_IS40(pi->radio_chanspec))
+ tx_lpf_bw = 4;
+ else
+ tx_lpf_bw = 1;
+
+ write_phy_reg(pi, 0xe9,
+ (tx_lpf_bw << 0) |
+ (tx_lpf_bw << 3) |
+ (tx_lpf_bw << 6) | (tx_lpf_bw << 9));
+ }
+ }
}
-static void wlc_phy_radio_init_2055(struct brcms_phy *pi)
+static void
+wlc_phy_adjust_rx_analpfbw_nphy(struct brcms_phy *pi, u16 reduction_factr)
{
- wlc_phy_init_radio_regs(pi, regs_2055, RADIO_DEFAULT_CORE);
+ if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
+ if ((CHSPEC_CHANNEL(pi->radio_chanspec) == 11) &&
+ CHSPEC_IS40(pi->radio_chanspec)) {
+ if (!pi->nphy_anarxlpf_adjusted) {
+ write_radio_reg(pi,
+ (RADIO_2056_RX_RXLPF_RCCAL_LPC |
+ RADIO_2056_RX0),
+ ((pi->nphy_rccal_value +
+ reduction_factr) | 0x80));
+
+ pi->nphy_anarxlpf_adjusted = true;
+ }
+ } else {
+ if (pi->nphy_anarxlpf_adjusted) {
+ write_radio_reg(pi,
+ (RADIO_2056_RX_RXLPF_RCCAL_LPC |
+ RADIO_2056_RX0),
+ (pi->nphy_rccal_value | 0x80));
+
+ pi->nphy_anarxlpf_adjusted = false;
+ }
+ }
+ }
}
-static void wlc_phy_radio_postinit_2055(struct brcms_phy *pi)
+static void
+wlc_phy_adjust_min_noisevar_nphy(struct brcms_phy *pi, int ntones,
+ int *tone_id_buf, u32 *noise_var_buf)
{
+ int i;
+ u32 offset;
+ int tone_id;
+ int tbllen =
+ CHSPEC_IS40(pi->radio_chanspec) ?
+ NPHY_NOISEVAR_TBLLEN40 : NPHY_NOISEVAR_TBLLEN20;
- and_radio_reg(pi, RADIO_2055_MASTER_CNTRL1,
- ~(RADIO_2055_JTAGCTRL_MASK | RADIO_2055_JTAGSYNC_MASK));
+ if (pi->nphy_noisevars_adjusted) {
+ for (i = 0; i < pi->nphy_saved_noisevars.bufcount; i++) {
+ tone_id = pi->nphy_saved_noisevars.tone_id[i];
+ offset = (tone_id >= 0) ?
+ ((tone_id *
+ 2) + 1) : (tbllen + (tone_id * 2) + 1);
+ wlc_phy_table_write_nphy(
+ pi, NPHY_TBL_ID_NOISEVAR, 1,
+ offset, 32,
+ &pi->nphy_saved_noisevars.min_noise_vars[i]);
+ }
- if (((pi->sh->sromrev >= 4)
- && !(pi->sh->boardflags2 & BFL2_RXBB_INT_REG_DIS))
- || ((pi->sh->sromrev < 4))) {
- and_radio_reg(pi, RADIO_2055_CORE1_RXBB_REGULATOR, 0x7F);
- and_radio_reg(pi, RADIO_2055_CORE2_RXBB_REGULATOR, 0x7F);
+ pi->nphy_saved_noisevars.bufcount = 0;
+ pi->nphy_noisevars_adjusted = false;
}
- mod_radio_reg(pi, RADIO_2055_RRCCAL_N_OPT_SEL, 0x3F, 0x2C);
- write_radio_reg(pi, RADIO_2055_CAL_MISC, 0x3C);
+ if ((noise_var_buf != NULL) && (tone_id_buf != NULL)) {
+ pi->nphy_saved_noisevars.bufcount = 0;
- and_radio_reg(pi, RADIO_2055_CAL_MISC,
- ~(RADIO_2055_RRCAL_START | RADIO_2055_RRCAL_RST_N));
+ for (i = 0; i < ntones; i++) {
+ tone_id = tone_id_buf[i];
+ offset = (tone_id >= 0) ?
+ ((tone_id * 2) + 1) :
+ (tbllen + (tone_id * 2) + 1);
+ pi->nphy_saved_noisevars.tone_id[i] = tone_id;
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
+ offset, 32,
+ &pi->nphy_saved_noisevars.
+ min_noise_vars[i]);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
+ offset, 32, &noise_var_buf[i]);
+ pi->nphy_saved_noisevars.bufcount++;
+ }
- or_radio_reg(pi, RADIO_2055_CAL_LPO_CNTRL, RADIO_2055_CAL_LPO_ENABLE);
+ pi->nphy_noisevars_adjusted = true;
+ }
+}
- or_radio_reg(pi, RADIO_2055_CAL_MISC, RADIO_2055_RRCAL_RST_N);
+static void wlc_phy_adjust_crsminpwr_nphy(struct brcms_phy *pi, u8 minpwr)
+{
+ u16 regval;
- udelay(1000);
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if ((CHSPEC_CHANNEL(pi->radio_chanspec) == 11) &&
+ CHSPEC_IS40(pi->radio_chanspec)) {
+ if (!pi->nphy_crsminpwr_adjusted) {
+ regval = read_phy_reg(pi, 0x27d);
+ pi->nphy_crsminpwr[0] = regval & 0xff;
+ regval &= 0xff00;
+ regval |= (u16) minpwr;
+ write_phy_reg(pi, 0x27d, regval);
- or_radio_reg(pi, RADIO_2055_CAL_MISC, RADIO_2055_RRCAL_START);
+ regval = read_phy_reg(pi, 0x280);
+ pi->nphy_crsminpwr[1] = regval & 0xff;
+ regval &= 0xff00;
+ regval |= (u16) minpwr;
+ write_phy_reg(pi, 0x280, regval);
- SPINWAIT(((read_radio_reg(pi, RADIO_2055_CAL_COUNTER_OUT2) &
- RADIO_2055_RCAL_DONE) != RADIO_2055_RCAL_DONE), 2000);
+ regval = read_phy_reg(pi, 0x283);
+ pi->nphy_crsminpwr[2] = regval & 0xff;
+ regval &= 0xff00;
+ regval |= (u16) minpwr;
+ write_phy_reg(pi, 0x283, regval);
- if (WARN((read_radio_reg(pi, RADIO_2055_CAL_COUNTER_OUT2) &
- RADIO_2055_RCAL_DONE) != RADIO_2055_RCAL_DONE,
- "HW error: radio calibration1\n"))
+ pi->nphy_crsminpwr_adjusted = true;
+ }
+ } else {
+ if (pi->nphy_crsminpwr_adjusted) {
+ regval = read_phy_reg(pi, 0x27d);
+ regval &= 0xff00;
+ regval |= pi->nphy_crsminpwr[0];
+ write_phy_reg(pi, 0x27d, regval);
+
+ regval = read_phy_reg(pi, 0x280);
+ regval &= 0xff00;
+ regval |= pi->nphy_crsminpwr[1];
+ write_phy_reg(pi, 0x280, regval);
+
+ regval = read_phy_reg(pi, 0x283);
+ regval &= 0xff00;
+ regval |= pi->nphy_crsminpwr[2];
+ write_phy_reg(pi, 0x283, regval);
+
+ pi->nphy_crsminpwr_adjusted = false;
+ }
+ }
+ }
+}
+
+static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
+{
+ u16 cur_channel = 0;
+ int nphy_adj_tone_id_buf[] = { 57, 58 };
+ u32 nphy_adj_noise_var_buf[] = { 0x3ff, 0x3ff };
+ bool isAdjustNoiseVar = false;
+ uint numTonesAdjust = 0;
+ u32 tempval = 0;
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
+
+ cur_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
+
+ if (pi->nphy_gband_spurwar_en) {
+
+ wlc_phy_adjust_rx_analpfbw_nphy(
+ pi,
+ NPHY_ANARXLPFBW_REDUCTIONFACT);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if ((cur_channel == 11)
+ && CHSPEC_IS40(pi->radio_chanspec))
+ wlc_phy_adjust_min_noisevar_nphy(
+ pi, 2,
+ nphy_adj_tone_id_buf,
+ nphy_adj_noise_var_buf);
+ else
+ wlc_phy_adjust_min_noisevar_nphy(pi, 0,
+ NULL,
+ NULL);
+ }
+
+ wlc_phy_adjust_crsminpwr_nphy(pi,
+ NPHY_ADJUSTED_MINCRSPOWER);
+ }
+
+ if ((pi->nphy_gband_spurwar2_en)
+ && CHSPEC_IS2G(pi->radio_chanspec)) {
+
+ if (CHSPEC_IS40(pi->radio_chanspec)) {
+ switch (cur_channel) {
+ case 3:
+ nphy_adj_tone_id_buf[0] = 57;
+ nphy_adj_tone_id_buf[1] = 58;
+ nphy_adj_noise_var_buf[0] = 0x22f;
+ nphy_adj_noise_var_buf[1] = 0x25f;
+ isAdjustNoiseVar = true;
+ break;
+ case 4:
+ nphy_adj_tone_id_buf[0] = 41;
+ nphy_adj_tone_id_buf[1] = 42;
+ nphy_adj_noise_var_buf[0] = 0x22f;
+ nphy_adj_noise_var_buf[1] = 0x25f;
+ isAdjustNoiseVar = true;
+ break;
+ case 5:
+ nphy_adj_tone_id_buf[0] = 25;
+ nphy_adj_tone_id_buf[1] = 26;
+ nphy_adj_noise_var_buf[0] = 0x24f;
+ nphy_adj_noise_var_buf[1] = 0x25f;
+ isAdjustNoiseVar = true;
+ break;
+ case 6:
+ nphy_adj_tone_id_buf[0] = 9;
+ nphy_adj_tone_id_buf[1] = 10;
+ nphy_adj_noise_var_buf[0] = 0x22f;
+ nphy_adj_noise_var_buf[1] = 0x24f;
+ isAdjustNoiseVar = true;
+ break;
+ case 7:
+ nphy_adj_tone_id_buf[0] = 121;
+ nphy_adj_tone_id_buf[1] = 122;
+ nphy_adj_noise_var_buf[0] = 0x18f;
+ nphy_adj_noise_var_buf[1] = 0x24f;
+ isAdjustNoiseVar = true;
+ break;
+ case 8:
+ nphy_adj_tone_id_buf[0] = 105;
+ nphy_adj_tone_id_buf[1] = 106;
+ nphy_adj_noise_var_buf[0] = 0x22f;
+ nphy_adj_noise_var_buf[1] = 0x25f;
+ isAdjustNoiseVar = true;
+ break;
+ case 9:
+ nphy_adj_tone_id_buf[0] = 89;
+ nphy_adj_tone_id_buf[1] = 90;
+ nphy_adj_noise_var_buf[0] = 0x22f;
+ nphy_adj_noise_var_buf[1] = 0x24f;
+ isAdjustNoiseVar = true;
+ break;
+ case 10:
+ nphy_adj_tone_id_buf[0] = 73;
+ nphy_adj_tone_id_buf[1] = 74;
+ nphy_adj_noise_var_buf[0] = 0x22f;
+ nphy_adj_noise_var_buf[1] = 0x24f;
+ isAdjustNoiseVar = true;
+ break;
+ default:
+ isAdjustNoiseVar = false;
+ break;
+ }
+ }
+
+ if (isAdjustNoiseVar) {
+ numTonesAdjust = sizeof(nphy_adj_tone_id_buf) /
+ sizeof(nphy_adj_tone_id_buf[0]);
+
+ wlc_phy_adjust_min_noisevar_nphy(
+ pi,
+ numTonesAdjust,
+ nphy_adj_tone_id_buf,
+ nphy_adj_noise_var_buf);
+
+ tempval = 0;
+
+ } else {
+ wlc_phy_adjust_min_noisevar_nphy(pi, 0, NULL,
+ NULL);
+ }
+ }
+
+ if ((pi->nphy_aband_spurwar_en) &&
+ (CHSPEC_IS5G(pi->radio_chanspec))) {
+ switch (cur_channel) {
+ case 54:
+ nphy_adj_tone_id_buf[0] = 32;
+ nphy_adj_noise_var_buf[0] = 0x25f;
+ break;
+ case 38:
+ case 102:
+ case 118:
+ nphy_adj_tone_id_buf[0] = 0;
+ nphy_adj_noise_var_buf[0] = 0x0;
+ break;
+ case 134:
+ nphy_adj_tone_id_buf[0] = 32;
+ nphy_adj_noise_var_buf[0] = 0x21f;
+ break;
+ case 151:
+ nphy_adj_tone_id_buf[0] = 16;
+ nphy_adj_noise_var_buf[0] = 0x23f;
+ break;
+ case 153:
+ case 161:
+ nphy_adj_tone_id_buf[0] = 48;
+ nphy_adj_noise_var_buf[0] = 0x23f;
+ break;
+ default:
+ nphy_adj_tone_id_buf[0] = 0;
+ nphy_adj_noise_var_buf[0] = 0x0;
+ break;
+ }
+
+ if (nphy_adj_tone_id_buf[0]
+ && nphy_adj_noise_var_buf[0])
+ wlc_phy_adjust_min_noisevar_nphy(
+ pi, 1,
+ nphy_adj_tone_id_buf,
+ nphy_adj_noise_var_buf);
+ else
+ wlc_phy_adjust_min_noisevar_nphy(pi, 0, NULL,
+ NULL);
+ }
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+ }
+}
+
+void wlc_phy_init_nphy(struct brcms_phy *pi)
+{
+ u16 val;
+ u16 clip1_ths[2];
+ struct nphy_txgains target_gain;
+ u8 tx_pwr_ctrl_state;
+ bool do_nphy_cal = false;
+ uint core;
+ uint origidx, intr_val;
+ struct d11regs __iomem *regs;
+ u32 d11_clk_ctl_st;
+ bool do_rssi_cal = false;
+
+ core = 0;
+
+ if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
+ pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
+
+ if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) &&
+ ((pi->sh->chippkg == BCM4717_PKG_ID) ||
+ (pi->sh->chippkg == BCM4718_PKG_ID))) {
+ if ((pi->sh->boardflags & BFL_EXTLNA) &&
+ (CHSPEC_IS2G(pi->radio_chanspec)))
+ ai_corereg(pi->sh->sih, SI_CC_IDX,
+ offsetof(struct chipcregs, chipcontrol),
+ 0x40, 0x40);
+ }
+
+ if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
+ CHSPEC_IS40(pi->radio_chanspec)) {
+
+ regs = (struct d11regs __iomem *)
+ ai_switch_core(pi->sh->sih,
+ D11_CORE_ID, &origidx,
+ &intr_val);
+ d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
+ AND_REG(&regs->clk_ctl_st,
+ ~(CCS_FORCEHT | CCS_HTAREQ));
+
+ W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
+
+ ai_restore_core(pi->sh->sih, origidx, intr_val);
+ }
+
+ pi->use_int_tx_iqlo_cal_nphy =
+ (PHY_IPA(pi) ||
+ (NREV_GE(pi->pubpi.phy_rev, 7) ||
+ (NREV_GE(pi->pubpi.phy_rev, 5)
+ && pi->sh->boardflags2 & BFL2_INTERNDET_TXIQCAL)));
+
+ pi->internal_tx_iqlo_cal_tapoff_intpa_nphy = false;
+
+ pi->nphy_deaf_count = 0;
+
+ wlc_phy_tbl_init_nphy(pi);
+
+ pi->nphy_crsminpwr_adjusted = false;
+ pi->nphy_noisevars_adjusted = false;
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ write_phy_reg(pi, 0xe7, 0);
+ write_phy_reg(pi, 0xec, 0);
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ write_phy_reg(pi, 0x342, 0);
+ write_phy_reg(pi, 0x343, 0);
+ write_phy_reg(pi, 0x346, 0);
+ write_phy_reg(pi, 0x347, 0);
+ }
+ write_phy_reg(pi, 0xe5, 0);
+ write_phy_reg(pi, 0xe6, 0);
+ } else {
+ write_phy_reg(pi, 0xec, 0);
+ }
+
+ write_phy_reg(pi, 0x91, 0);
+ write_phy_reg(pi, 0x92, 0);
+ if (NREV_LT(pi->pubpi.phy_rev, 6)) {
+ write_phy_reg(pi, 0x93, 0);
+ write_phy_reg(pi, 0x94, 0);
+ }
+
+ and_phy_reg(pi, 0xa1, ~3);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ write_phy_reg(pi, 0x8f, 0);
+ write_phy_reg(pi, 0xa5, 0);
+ } else {
+ write_phy_reg(pi, 0xa5, 0);
+ }
+
+ if (NREV_IS(pi->pubpi.phy_rev, 2))
+ mod_phy_reg(pi, 0xdc, 0x00ff, 0x3b);
+ else if (NREV_LT(pi->pubpi.phy_rev, 2))
+ mod_phy_reg(pi, 0xdc, 0x00ff, 0x40);
+
+ write_phy_reg(pi, 0x203, 32);
+ write_phy_reg(pi, 0x201, 32);
+
+ if (pi->sh->boardflags2 & BFL2_SKWRKFEM_BRD)
+ write_phy_reg(pi, 0x20d, 160);
+ else
+ write_phy_reg(pi, 0x20d, 184);
+
+ write_phy_reg(pi, 0x13a, 200);
+
+ write_phy_reg(pi, 0x70, 80);
+
+ write_phy_reg(pi, 0x1ff, 48);
+
+ if (NREV_LT(pi->pubpi.phy_rev, 8))
+ wlc_phy_update_mimoconfig_nphy(pi, pi->n_preamble_override);
+
+ wlc_phy_stf_chain_upd_nphy(pi);
+
+ if (NREV_LT(pi->pubpi.phy_rev, 2)) {
+ write_phy_reg(pi, 0x180, 0xaa8);
+ write_phy_reg(pi, 0x181, 0x9a4);
+ }
+
+ if (PHY_IPA(pi)) {
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (1) << 0);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x298 :
+ 0x29c, (0x1ff << 7),
+ (pi->nphy_papd_epsilon_offset[core]) << 7);
+
+ }
+
+ wlc_phy_ipa_set_tx_digi_filts_nphy(pi);
+ } else if (NREV_GE(pi->pubpi.phy_rev, 5)) {
+ wlc_phy_extpa_set_tx_digi_filts_nphy(pi);
+ }
+
+ wlc_phy_workarounds_nphy(pi);
+
+ wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON);
+
+ val = read_phy_reg(pi, 0x01);
+ write_phy_reg(pi, 0x01, val | BBCFG_RESETCCA);
+ write_phy_reg(pi, 0x01, val & (~BBCFG_RESETCCA));
+ wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF);
+
+ wlapi_bmac_macphyclk_set(pi->sh->physhim, ON);
+
+ wlc_phy_pa_override_nphy(pi, OFF);
+ wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX);
+ wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
+ wlc_phy_pa_override_nphy(pi, ON);
+
+ wlc_phy_classifier_nphy(pi, 0, 0);
+ wlc_phy_clip_det_nphy(pi, 0, clip1_ths);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ wlc_phy_bphy_init_nphy(pi);
+
+ tx_pwr_ctrl_state = pi->nphy_txpwrctrl;
+ wlc_phy_txpwrctrl_enable_nphy(pi, PHY_TPC_HW_OFF);
+
+ wlc_phy_txpwr_fixpower_nphy(pi);
+
+ wlc_phy_txpwrctrl_idle_tssi_nphy(pi);
+
+ wlc_phy_txpwrctrl_pwr_setup_nphy(pi);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ u32 *tx_pwrctrl_tbl = NULL;
+ u16 idx;
+ s16 pga_gn = 0;
+ s16 pad_gn = 0;
+ s32 rfpwr_offset;
+
+ if (PHY_IPA(pi)) {
+ tx_pwrctrl_tbl = wlc_phy_get_ipa_gaintbl_nphy(pi);
+ } else {
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ if (NREV_IS(pi->pubpi.phy_rev, 3))
+ tx_pwrctrl_tbl =
+ nphy_tpc_5GHz_txgain_rev3;
+ else if (NREV_IS(pi->pubpi.phy_rev, 4))
+ tx_pwrctrl_tbl =
+ (pi->srom_fem5g.extpagain ==
+ 3) ?
+ nphy_tpc_5GHz_txgain_HiPwrEPA :
+ nphy_tpc_5GHz_txgain_rev4;
+ else
+ tx_pwrctrl_tbl =
+ nphy_tpc_5GHz_txgain_rev5;
+ } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (pi->pubpi.radiorev == 5)
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_epa_2057rev5;
+ else if (pi->pubpi.radiorev == 3)
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_epa_2057rev3;
+ } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 5) &&
+ (pi->srom_fem2g.extpagain == 3))
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_HiPwrEPA;
+ else
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_rev3;
+ }
+ }
+ }
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 128,
+ 192, 32, tx_pwrctrl_tbl);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE2TXPWRCTL, 128,
+ 192, 32, tx_pwrctrl_tbl);
+
+ pi->nphy_gmval = (u16) ((*tx_pwrctrl_tbl >> 16) & 0x7000);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+
+ for (idx = 0; idx < 128; idx++) {
+ pga_gn = (tx_pwrctrl_tbl[idx] >> 24) & 0xf;
+ pad_gn = (tx_pwrctrl_tbl[idx] >> 19) & 0x1f;
+ rfpwr_offset = get_rf_pwr_offset(pi, pga_gn,
+ pad_gn);
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_CORE1TXPWRCTL,
+ 1, 576 + idx, 32,
+ &rfpwr_offset);
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_CORE2TXPWRCTL,
+ 1, 576 + idx, 32,
+ &rfpwr_offset);
+ }
+ } else {
+
+ for (idx = 0; idx < 128; idx++) {
+ pga_gn = (tx_pwrctrl_tbl[idx] >> 24) & 0xf;
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ rfpwr_offset = (s16)
+ nphy_papd_pga_gain_delta_ipa_2g
+ [pga_gn];
+ else
+ rfpwr_offset = (s16)
+ nphy_papd_pga_gain_delta_ipa_5g
+ [pga_gn];
+
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_CORE1TXPWRCTL,
+ 1, 576 + idx, 32,
+ &rfpwr_offset);
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_CORE2TXPWRCTL,
+ 1, 576 + idx, 32,
+ &rfpwr_offset);
+ }
+
+ }
+ } else {
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 128,
+ 192, 32, nphy_tpc_txgain);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE2TXPWRCTL, 128,
+ 192, 32, nphy_tpc_txgain);
+ }
+
+ if (pi->sh->phyrxchain != 0x3)
+ wlc_phy_rxcore_setstate_nphy((struct brcms_phy_pub *) pi,
+ pi->sh->phyrxchain);
+
+ if (PHY_PERICAL_MPHASE_PENDING(pi))
+ wlc_phy_cal_perical_mphase_restart(pi);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ do_rssi_cal = (CHSPEC_IS2G(pi->radio_chanspec)) ?
+ (pi->nphy_rssical_chanspec_2G == 0) :
+ (pi->nphy_rssical_chanspec_5G == 0);
+
+ if (do_rssi_cal)
+ wlc_phy_rssi_cal_nphy(pi);
+ else
+ wlc_phy_restore_rssical_nphy(pi);
+ } else {
+ wlc_phy_rssi_cal_nphy(pi);
+ }
+
+ if (!SCAN_RM_IN_PROGRESS(pi))
+ do_nphy_cal = (CHSPEC_IS2G(pi->radio_chanspec)) ?
+ (pi->nphy_iqcal_chanspec_2G == 0) :
+ (pi->nphy_iqcal_chanspec_5G == 0);
+
+ if (!pi->do_initcal)
+ do_nphy_cal = false;
+
+ if (do_nphy_cal) {
+
+ target_gain = wlc_phy_get_tx_gain_nphy(pi);
+
+ if (pi->antsel_type == ANTSEL_2x3)
+ wlc_phy_antsel_init((struct brcms_phy_pub *) pi,
+ true);
+
+ if (pi->nphy_perical != PHY_PERICAL_MPHASE) {
+ wlc_phy_rssi_cal_nphy(pi);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ pi->nphy_cal_orig_pwr_idx[0] =
+ pi->nphy_txpwrindex[PHY_CORE_0]
+ .
+ index_internal;
+ pi->nphy_cal_orig_pwr_idx[1] =
+ pi->nphy_txpwrindex[PHY_CORE_1]
+ .
+ index_internal;
+
+ wlc_phy_precal_txgain_nphy(pi);
+ target_gain =
+ wlc_phy_get_tx_gain_nphy(pi);
+ }
+
+ if (wlc_phy_cal_txiqlo_nphy
+ (pi, target_gain, true,
+ false) == 0) {
+ if (wlc_phy_cal_rxiq_nphy
+ (pi, target_gain, 2,
+ false) == 0)
+ wlc_phy_savecal_nphy(pi);
+
+ }
+ } else if (pi->mphase_cal_phase_id ==
+ MPHASE_CAL_STATE_IDLE) {
+ wlc_phy_cal_perical((struct brcms_phy_pub *) pi,
+ PHY_PERICAL_PHYINIT);
+ }
+ } else {
+ wlc_phy_restorecal_nphy(pi);
+ }
+
+ wlc_phy_txpwrctrl_coeff_setup_nphy(pi);
+
+ wlc_phy_txpwrctrl_enable_nphy(pi, tx_pwr_ctrl_state);
+
+ wlc_phy_nphy_tkip_rifs_war(pi, pi->sh->_rifs_phy);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LE(pi->pubpi.phy_rev, 6))
+
+ write_phy_reg(pi, 0x70, 50);
+
+ wlc_phy_txlpfbw_nphy(pi);
+
+ wlc_phy_spurwar_nphy(pi);
+
+}
+
+static void wlc_phy_resetcca_nphy(struct brcms_phy *pi)
+{
+ u16 val;
+
+ wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON);
+
+ val = read_phy_reg(pi, 0x01);
+ write_phy_reg(pi, 0x01, val | BBCFG_RESETCCA);
+ udelay(1);
+ write_phy_reg(pi, 0x01, val & (~BBCFG_RESETCCA));
+
+ wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF);
+
+ wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
+}
+
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en)
+{
+ u16 rfctrlintc_override_val;
+
+ if (!en) {
+
+ pi->rfctrlIntc1_save = read_phy_reg(pi, 0x91);
+ pi->rfctrlIntc2_save = read_phy_reg(pi, 0x92);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ rfctrlintc_override_val = 0x1480;
+ else if (NREV_GE(pi->pubpi.phy_rev, 3))
+ rfctrlintc_override_val =
+ CHSPEC_IS5G(pi->radio_chanspec) ? 0x600 : 0x480;
+ else
+ rfctrlintc_override_val =
+ CHSPEC_IS5G(pi->radio_chanspec) ? 0x180 : 0x120;
+
+ write_phy_reg(pi, 0x91, rfctrlintc_override_val);
+ write_phy_reg(pi, 0x92, rfctrlintc_override_val);
+ } else {
+ write_phy_reg(pi, 0x91, pi->rfctrlIntc1_save);
+ write_phy_reg(pi, 0x92, pi->rfctrlIntc2_save);
+ }
+
+}
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi)
+{
+
+ u16 txrx_chain =
+ (NPHY_RfseqCoreActv_TxRxChain0 | NPHY_RfseqCoreActv_TxRxChain1);
+ bool CoreActv_override = false;
+
+ if (pi->nphy_txrx_chain == BRCMS_N_TXRX_CHAIN0) {
+ txrx_chain = NPHY_RfseqCoreActv_TxRxChain0;
+ CoreActv_override = true;
+
+ if (NREV_LE(pi->pubpi.phy_rev, 2))
+ and_phy_reg(pi, 0xa0, ~0x20);
+ } else if (pi->nphy_txrx_chain == BRCMS_N_TXRX_CHAIN1) {
+ txrx_chain = NPHY_RfseqCoreActv_TxRxChain1;
+ CoreActv_override = true;
+
+ if (NREV_LE(pi->pubpi.phy_rev, 2))
+ or_phy_reg(pi, 0xa0, 0x20);
+ }
+
+ mod_phy_reg(pi, 0xa2, ((0xf << 0) | (0xf << 4)), txrx_chain);
+
+ if (CoreActv_override) {
+ pi->nphy_perical = PHY_PERICAL_DISABLE;
+ or_phy_reg(pi, 0xa1, NPHY_RfseqMode_CoreActv_override);
+ } else {
+ pi->nphy_perical = PHY_PERICAL_MPHASE;
+ and_phy_reg(pi, 0xa1, ~NPHY_RfseqMode_CoreActv_override);
+ }
+}
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
+{
+ u16 regval;
+ u16 tbl_buf[16];
+ uint i;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
+ u16 tbl_opcode;
+ bool suspend;
+
+ pi->sh->phyrxchain = rxcore_bitmask;
+
+ if (!pi->sh->clk)
return;
- and_radio_reg(pi, RADIO_2055_CAL_LPO_CNTRL,
- ~(RADIO_2055_CAL_LPO_ENABLE));
+ suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ if (!suspend)
+ wlapi_suspend_mac_and_wait(pi->sh->physhim);
- wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
- write_radio_reg(pi, RADIO_2055_CORE1_RXBB_LPF, 9);
- write_radio_reg(pi, RADIO_2055_CORE2_RXBB_LPF, 9);
+ regval = read_phy_reg(pi, 0xa2);
+ regval &= ~(0xf << 4);
+ regval |= ((u16) (rxcore_bitmask & 0x3)) << 4;
+ write_phy_reg(pi, 0xa2, regval);
- write_radio_reg(pi, RADIO_2055_CORE1_RXBB_MIDAC_HIPAS, 0x83);
- write_radio_reg(pi, RADIO_2055_CORE2_RXBB_MIDAC_HIPAS, 0x83);
+ if ((rxcore_bitmask & 0x3) != 0x3) {
- mod_radio_reg(pi, RADIO_2055_CORE1_LNA_GAINBST,
- RADIO_2055_GAINBST_VAL_MASK, RADIO_2055_GAINBST_CODE);
- mod_radio_reg(pi, RADIO_2055_CORE2_LNA_GAINBST,
- RADIO_2055_GAINBST_VAL_MASK, RADIO_2055_GAINBST_CODE);
- if (pi->nphy_gain_boost) {
- and_radio_reg(pi, RADIO_2055_CORE1_RXRF_SPC1,
- ~(RADIO_2055_GAINBST_DISABLE));
- and_radio_reg(pi, RADIO_2055_CORE2_RXRF_SPC1,
- ~(RADIO_2055_GAINBST_DISABLE));
+ write_phy_reg(pi, 0x20e, 1);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (pi->rx2tx_biasentry == -1) {
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ,
+ ARRAY_SIZE(tbl_buf), 80,
+ 16, tbl_buf);
+
+ for (i = 0; i < ARRAY_SIZE(tbl_buf); i++) {
+ if (tbl_buf[i] ==
+ NPHY_REV3_RFSEQ_CMD_CLR_RXRX_BIAS) {
+ pi->rx2tx_biasentry = (u8) i;
+ tbl_opcode =
+ NPHY_REV3_RFSEQ_CMD_NOP;
+ wlc_phy_table_write_nphy(
+ pi,
+ NPHY_TBL_ID_RFSEQ,
+ 1, i,
+ 16,
+ &tbl_opcode);
+ break;
+ } else if (tbl_buf[i] ==
+ NPHY_REV3_RFSEQ_CMD_END)
+ break;
+ }
+ }
+ }
} else {
- or_radio_reg(pi, RADIO_2055_CORE1_RXRF_SPC1,
- RADIO_2055_GAINBST_DISABLE);
- or_radio_reg(pi, RADIO_2055_CORE2_RXRF_SPC1,
- RADIO_2055_GAINBST_DISABLE);
+
+ write_phy_reg(pi, 0x20e, 30);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (pi->rx2tx_biasentry != -1) {
+ tbl_opcode = NPHY_REV3_RFSEQ_CMD_CLR_RXRX_BIAS;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
+ 1, pi->rx2tx_biasentry,
+ 16, &tbl_opcode);
+ pi->rx2tx_biasentry = -1;
+ }
+ }
}
- udelay(2);
+ wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+
+ if (!suspend)
+ wlapi_enable_mac(pi->sh->physhim);
+}
+
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih)
+{
+ u16 regval, rxen_bits;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
+
+ regval = read_phy_reg(pi, 0xa2);
+ rxen_bits = (regval >> 4) & 0xf;
+
+ return (u8) rxen_bits;
+}
+
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pi)
+{
+ return PHY_IPA(pi);
+}
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi)
+{
}
static void wlc_phy_radio_preinit_205x(struct brcms_phy *pi)
@@ -17442,11 +20004,257 @@ static void wlc_phy_radio_preinit_205x(struct brcms_phy *pi)
}
+static void wlc_phy_radio_init_2057(struct brcms_phy *pi)
+{
+ struct radio_20xx_regs *regs_2057_ptr = NULL;
+
+ if (NREV_IS(pi->pubpi.phy_rev, 7)) {
+ regs_2057_ptr = regs_2057_rev4;
+ } else if (NREV_IS(pi->pubpi.phy_rev, 8)
+ || NREV_IS(pi->pubpi.phy_rev, 9)) {
+ switch (pi->pubpi.radiorev) {
+ case 5:
+
+ if (pi->pubpi.radiover == 0x0)
+ regs_2057_ptr = regs_2057_rev5;
+ else if (pi->pubpi.radiover == 0x1)
+ regs_2057_ptr = regs_2057_rev5v1;
+ else
+ break;
+
+ case 7:
+
+ regs_2057_ptr = regs_2057_rev7;
+ break;
+
+ case 8:
+
+ regs_2057_ptr = regs_2057_rev8;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ wlc_phy_init_radio_regs_allbands(pi, regs_2057_ptr);
+}
+
+static u16 wlc_phy_radio205x_rcal(struct brcms_phy *pi)
+{
+ u16 rcal_reg = 0;
+ int i;
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+
+ if (pi->pubpi.radiorev == 5) {
+
+ and_phy_reg(pi, 0x342, ~(0x1 << 1));
+
+ udelay(10);
+
+ mod_radio_reg(pi, RADIO_2057_IQTEST_SEL_PU, 0x1, 0x1);
+ mod_radio_reg(pi, RADIO_2057v7_IQTEST_SEL_PU2, 0x2,
+ 0x1);
+ }
+ mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x1, 0x1);
+
+ udelay(10);
+
+ mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x3, 0x3);
+
+ for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
+ rcal_reg = read_radio_reg(pi, RADIO_2057_RCAL_STATUS);
+ if (rcal_reg & 0x1)
+ break;
+
+ udelay(100);
+ }
+
+ if (WARN(i == MAX_205x_RCAL_WAITLOOPS,
+ "HW error: radio calib2"))
+ return 0;
+
+ mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x2, 0x0);
+
+ rcal_reg = read_radio_reg(pi, RADIO_2057_RCAL_STATUS) & 0x3e;
+
+ mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x1, 0x0);
+ if (pi->pubpi.radiorev == 5) {
+
+ mod_radio_reg(pi, RADIO_2057_IQTEST_SEL_PU, 0x1, 0x0);
+ mod_radio_reg(pi, RADIO_2057v7_IQTEST_SEL_PU2, 0x2,
+ 0x0);
+ }
+
+ if ((pi->pubpi.radiorev <= 4) || (pi->pubpi.radiorev == 6)) {
+
+ mod_radio_reg(pi, RADIO_2057_TEMPSENSE_CONFIG, 0x3c,
+ rcal_reg);
+ mod_radio_reg(pi, RADIO_2057_BANDGAP_RCAL_TRIM, 0xf0,
+ rcal_reg << 2);
+ }
+
+ } else if (NREV_IS(pi->pubpi.phy_rev, 3)) {
+ u16 savereg;
+
+ savereg =
+ read_radio_reg(
+ pi,
+ RADIO_2056_SYN_PLL_MAST2 |
+ RADIO_2056_SYN);
+ write_radio_reg(pi, RADIO_2056_SYN_PLL_MAST2 | RADIO_2056_SYN,
+ savereg | 0x7);
+ udelay(10);
+
+ write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
+ 0x1);
+ udelay(10);
+
+ write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
+ 0x9);
+
+ for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
+ rcal_reg = read_radio_reg(
+ pi,
+ RADIO_2056_SYN_RCAL_CODE_OUT |
+ RADIO_2056_SYN);
+ if (rcal_reg & 0x80)
+ break;
+
+ udelay(100);
+ }
+
+ if (WARN(i == MAX_205x_RCAL_WAITLOOPS,
+ "HW error: radio calib3"))
+ return 0;
+
+ write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
+ 0x1);
+
+ rcal_reg =
+ read_radio_reg(pi,
+ RADIO_2056_SYN_RCAL_CODE_OUT |
+ RADIO_2056_SYN);
+
+ write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
+ 0x0);
+
+ write_radio_reg(pi, RADIO_2056_SYN_PLL_MAST2 | RADIO_2056_SYN,
+ savereg);
+
+ return rcal_reg & 0x1f;
+ }
+ return rcal_reg & 0x3e;
+}
+
+static u16 wlc_phy_radio2057_rccal(struct brcms_phy *pi)
+{
+ u16 rccal_valid;
+ int i;
+ bool chip43226_6362A0;
+
+ chip43226_6362A0 = ((pi->pubpi.radiorev == 3)
+ || (pi->pubpi.radiorev == 4)
+ || (pi->pubpi.radiorev == 6));
+
+ rccal_valid = 0;
+ if (chip43226_6362A0) {
+ write_radio_reg(pi, RADIO_2057_RCCAL_MASTER, 0x61);
+ write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xc0);
+ } else {
+ write_radio_reg(pi, RADIO_2057v7_RCCAL_MASTER, 0x61);
+
+ write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xe9);
+ }
+ write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x6e);
+ write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x55);
+
+ for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
+ rccal_valid = read_radio_reg(pi, RADIO_2057_RCCAL_DONE_OSCCAP);
+ if (rccal_valid & 0x2)
+ break;
+
+ udelay(500);
+ }
+
+ write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x15);
+
+ rccal_valid = 0;
+ if (chip43226_6362A0) {
+ write_radio_reg(pi, RADIO_2057_RCCAL_MASTER, 0x69);
+ write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xb0);
+ } else {
+ write_radio_reg(pi, RADIO_2057v7_RCCAL_MASTER, 0x69);
+
+ write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xd5);
+ }
+ write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x6e);
+ write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x55);
+
+ for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
+ rccal_valid = read_radio_reg(pi, RADIO_2057_RCCAL_DONE_OSCCAP);
+ if (rccal_valid & 0x2)
+ break;
+
+ udelay(500);
+ }
+
+ write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x15);
+
+ rccal_valid = 0;
+ if (chip43226_6362A0) {
+ write_radio_reg(pi, RADIO_2057_RCCAL_MASTER, 0x73);
+
+ write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x28);
+ write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xb0);
+ } else {
+ write_radio_reg(pi, RADIO_2057v7_RCCAL_MASTER, 0x73);
+ write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x6e);
+ write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0x99);
+ }
+ write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x55);
+
+ for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
+ rccal_valid = read_radio_reg(pi, RADIO_2057_RCCAL_DONE_OSCCAP);
+ if (rccal_valid & 0x2)
+ break;
+
+ udelay(500);
+ }
+
+ if (WARN(!(rccal_valid & 0x2), "HW error: radio calib4"))
+ return 0;
+
+ write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x15);
+
+ return rccal_valid;
+}
+
+static void wlc_phy_radio_postinit_2057(struct brcms_phy *pi)
+{
+
+ mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x1, 0x1);
+
+ mod_radio_reg(pi, RADIO_2057_RFPLL_MISC_CAL_RESETN, 0x78, 0x78);
+ mod_radio_reg(pi, RADIO_2057_XTAL_CONFIG2, 0x80, 0x80);
+ mdelay(2);
+ mod_radio_reg(pi, RADIO_2057_RFPLL_MISC_CAL_RESETN, 0x78, 0x0);
+ mod_radio_reg(pi, RADIO_2057_XTAL_CONFIG2, 0x80, 0x0);
+
+ if (pi->phy_init_por) {
+ wlc_phy_radio205x_rcal(pi);
+ wlc_phy_radio2057_rccal(pi);
+ }
+
+ mod_radio_reg(pi, RADIO_2057_RFPLL_MASTER, 0x8, 0x0);
+}
+
static void wlc_phy_radio_init_2056(struct brcms_phy *pi)
{
- struct radio_regs *regs_SYN_2056_ptr = NULL;
- struct radio_regs *regs_TX_2056_ptr = NULL;
- struct radio_regs *regs_RX_2056_ptr = NULL;
+ const struct radio_regs *regs_SYN_2056_ptr = NULL;
+ const struct radio_regs *regs_TX_2056_ptr = NULL;
+ const struct radio_regs *regs_RX_2056_ptr = NULL;
if (NREV_IS(pi->pubpi.phy_rev, 3)) {
regs_SYN_2056_ptr = regs_SYN_2056;
@@ -17515,92 +20323,190 @@ static void wlc_phy_radio_postinit_2056(struct brcms_phy *pi)
mod_radio_reg(pi, RADIO_2056_SYN_COM_RESET, 0x2, 0x0);
if ((pi->sh->boardflags2 & BFL2_LEGACY)
- || (pi->sh->boardflags2 & BFL2_XTALBUFOUTEN)) {
-
+ || (pi->sh->boardflags2 & BFL2_XTALBUFOUTEN))
mod_radio_reg(pi, RADIO_2056_SYN_PLL_MAST2, 0xf4, 0x0);
- } else {
-
+ else
mod_radio_reg(pi, RADIO_2056_SYN_PLL_MAST2, 0xfc, 0x0);
- }
mod_radio_reg(pi, RADIO_2056_SYN_RCCAL_CTRL0, 0x1, 0x0);
- if (pi->phy_init_por) {
+ if (pi->phy_init_por)
wlc_phy_radio205x_rcal(pi);
- }
}
-static void wlc_phy_radio_init_2057(struct brcms_phy *pi)
+static void wlc_phy_radio_preinit_2055(struct brcms_phy *pi)
{
- struct radio_20xx_regs *regs_2057_ptr = NULL;
- if (NREV_IS(pi->pubpi.phy_rev, 7)) {
+ and_phy_reg(pi, 0x78, ~RFCC_POR_FORCE);
+ or_phy_reg(pi, 0x78, RFCC_CHIP0_PU | RFCC_OE_POR_FORCE);
- regs_2057_ptr = regs_2057_rev4;
- } else if (NREV_IS(pi->pubpi.phy_rev, 8)
- || NREV_IS(pi->pubpi.phy_rev, 9)) {
- switch (pi->pubpi.radiorev) {
- case 5:
+ or_phy_reg(pi, 0x78, RFCC_POR_FORCE);
+}
- if (pi->pubpi.radiover == 0x0) {
+static void wlc_phy_radio_init_2055(struct brcms_phy *pi)
+{
+ wlc_phy_init_radio_regs(pi, regs_2055, RADIO_DEFAULT_CORE);
+}
- regs_2057_ptr = regs_2057_rev5;
+static void wlc_phy_radio_postinit_2055(struct brcms_phy *pi)
+{
- } else if (pi->pubpi.radiover == 0x1) {
+ and_radio_reg(pi, RADIO_2055_MASTER_CNTRL1,
+ ~(RADIO_2055_JTAGCTRL_MASK | RADIO_2055_JTAGSYNC_MASK));
- regs_2057_ptr = regs_2057_rev5v1;
- } else {
- break;
- }
+ if (((pi->sh->sromrev >= 4)
+ && !(pi->sh->boardflags2 & BFL2_RXBB_INT_REG_DIS))
+ || ((pi->sh->sromrev < 4))) {
+ and_radio_reg(pi, RADIO_2055_CORE1_RXBB_REGULATOR, 0x7F);
+ and_radio_reg(pi, RADIO_2055_CORE2_RXBB_REGULATOR, 0x7F);
+ }
- case 7:
+ mod_radio_reg(pi, RADIO_2055_RRCCAL_N_OPT_SEL, 0x3F, 0x2C);
+ write_radio_reg(pi, RADIO_2055_CAL_MISC, 0x3C);
- regs_2057_ptr = regs_2057_rev7;
- break;
+ and_radio_reg(pi, RADIO_2055_CAL_MISC,
+ ~(RADIO_2055_RRCAL_START | RADIO_2055_RRCAL_RST_N));
- case 8:
+ or_radio_reg(pi, RADIO_2055_CAL_LPO_CNTRL, RADIO_2055_CAL_LPO_ENABLE);
- regs_2057_ptr = regs_2057_rev8;
- break;
+ or_radio_reg(pi, RADIO_2055_CAL_MISC, RADIO_2055_RRCAL_RST_N);
- default:
- break;
- }
+ udelay(1000);
+
+ or_radio_reg(pi, RADIO_2055_CAL_MISC, RADIO_2055_RRCAL_START);
+
+ SPINWAIT(((read_radio_reg(pi, RADIO_2055_CAL_COUNTER_OUT2) &
+ RADIO_2055_RCAL_DONE) != RADIO_2055_RCAL_DONE), 2000);
+
+ if (WARN((read_radio_reg(pi, RADIO_2055_CAL_COUNTER_OUT2) &
+ RADIO_2055_RCAL_DONE) != RADIO_2055_RCAL_DONE,
+ "HW error: radio calibration1\n"))
+ return;
+
+ and_radio_reg(pi, RADIO_2055_CAL_LPO_CNTRL,
+ ~(RADIO_2055_CAL_LPO_ENABLE));
+
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
+
+ write_radio_reg(pi, RADIO_2055_CORE1_RXBB_LPF, 9);
+ write_radio_reg(pi, RADIO_2055_CORE2_RXBB_LPF, 9);
+
+ write_radio_reg(pi, RADIO_2055_CORE1_RXBB_MIDAC_HIPAS, 0x83);
+ write_radio_reg(pi, RADIO_2055_CORE2_RXBB_MIDAC_HIPAS, 0x83);
+
+ mod_radio_reg(pi, RADIO_2055_CORE1_LNA_GAINBST,
+ RADIO_2055_GAINBST_VAL_MASK, RADIO_2055_GAINBST_CODE);
+ mod_radio_reg(pi, RADIO_2055_CORE2_LNA_GAINBST,
+ RADIO_2055_GAINBST_VAL_MASK, RADIO_2055_GAINBST_CODE);
+ if (pi->nphy_gain_boost) {
+ and_radio_reg(pi, RADIO_2055_CORE1_RXRF_SPC1,
+ ~(RADIO_2055_GAINBST_DISABLE));
+ and_radio_reg(pi, RADIO_2055_CORE2_RXRF_SPC1,
+ ~(RADIO_2055_GAINBST_DISABLE));
+ } else {
+ or_radio_reg(pi, RADIO_2055_CORE1_RXRF_SPC1,
+ RADIO_2055_GAINBST_DISABLE);
+ or_radio_reg(pi, RADIO_2055_CORE2_RXRF_SPC1,
+ RADIO_2055_GAINBST_DISABLE);
}
- wlc_phy_init_radio_regs_allbands(pi, regs_2057_ptr);
+ udelay(2);
}
-static void wlc_phy_radio_postinit_2057(struct brcms_phy *pi)
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on)
{
+ if (on) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (!pi->radio_is_on) {
+ wlc_phy_radio_preinit_205x(pi);
+ wlc_phy_radio_init_2057(pi);
+ wlc_phy_radio_postinit_2057(pi);
+ }
- mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x1, 0x1);
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi,
+ pi->radio_chanspec);
+ } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ wlc_phy_radio_preinit_205x(pi);
+ wlc_phy_radio_init_2056(pi);
+ wlc_phy_radio_postinit_2056(pi);
- mod_radio_reg(pi, RADIO_2057_RFPLL_MISC_CAL_RESETN, 0x78, 0x78);
- mod_radio_reg(pi, RADIO_2057_XTAL_CONFIG2, 0x80, 0x80);
- mdelay(2);
- mod_radio_reg(pi, RADIO_2057_RFPLL_MISC_CAL_RESETN, 0x78, 0x0);
- mod_radio_reg(pi, RADIO_2057_XTAL_CONFIG2, 0x80, 0x0);
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi,
+ pi->radio_chanspec);
+ } else {
+ wlc_phy_radio_preinit_2055(pi);
+ wlc_phy_radio_init_2055(pi);
+ wlc_phy_radio_postinit_2055(pi);
+ }
- if (pi->phy_init_por) {
- wlc_phy_radio205x_rcal(pi);
- wlc_phy_radio2057_rccal(pi);
- }
+ pi->radio_is_on = true;
- mod_radio_reg(pi, RADIO_2057_RFPLL_MASTER, 0x8, 0x0);
+ } else {
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3)
+ && NREV_LT(pi->pubpi.phy_rev, 7)) {
+ and_phy_reg(pi, 0x78, ~RFCC_CHIP0_PU);
+ mod_radio_reg(pi, RADIO_2056_SYN_COM_PU, 0x2, 0x0);
+
+ write_radio_reg(pi,
+ RADIO_2056_TX_PADA_BOOST_TUNE |
+ RADIO_2056_TX0, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_PADG_BOOST_TUNE |
+ RADIO_2056_TX0, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_PGAA_BOOST_TUNE |
+ RADIO_2056_TX0, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_PGAG_BOOST_TUNE |
+ RADIO_2056_TX0, 0);
+ mod_radio_reg(pi,
+ RADIO_2056_TX_MIXA_BOOST_TUNE |
+ RADIO_2056_TX0, 0xf0, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_MIXG_BOOST_TUNE |
+ RADIO_2056_TX0, 0);
+
+ write_radio_reg(pi,
+ RADIO_2056_TX_PADA_BOOST_TUNE |
+ RADIO_2056_TX1, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_PADG_BOOST_TUNE |
+ RADIO_2056_TX1, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_PGAA_BOOST_TUNE |
+ RADIO_2056_TX1, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_PGAG_BOOST_TUNE |
+ RADIO_2056_TX1, 0);
+ mod_radio_reg(pi,
+ RADIO_2056_TX_MIXA_BOOST_TUNE |
+ RADIO_2056_TX1, 0xf0, 0);
+ write_radio_reg(pi,
+ RADIO_2056_TX_MIXG_BOOST_TUNE |
+ RADIO_2056_TX1, 0);
+
+ pi->radio_is_on = false;
+ }
+
+ if (NREV_GE(pi->pubpi.phy_rev, 8)) {
+ and_phy_reg(pi, 0x78, ~RFCC_CHIP0_PU);
+ pi->radio_is_on = false;
+ }
+
+ }
}
static bool
wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
- struct chan_info_nphy_radio2057 **t0,
- struct chan_info_nphy_radio205x **t1,
- struct chan_info_nphy_radio2057_rev5 **t2,
- struct chan_info_nphy_2055 **t3)
+ const struct chan_info_nphy_radio2057 **t0,
+ const struct chan_info_nphy_radio205x **t1,
+ const struct chan_info_nphy_radio2057_rev5 **t2,
+ const struct chan_info_nphy_2055 **t3)
{
uint i;
- struct chan_info_nphy_radio2057 *chan_info_tbl_p_0 = NULL;
- struct chan_info_nphy_radio205x *chan_info_tbl_p_1 = NULL;
- struct chan_info_nphy_radio2057_rev5 *chan_info_tbl_p_2 = NULL;
+ const struct chan_info_nphy_radio2057 *chan_info_tbl_p_0 = NULL;
+ const struct chan_info_nphy_radio205x *chan_info_tbl_p_1 = NULL;
+ const struct chan_info_nphy_radio2057_rev5 *chan_info_tbl_p_2 = NULL;
u32 tbl_len = 0;
int freq = 0;
@@ -17621,40 +20527,35 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
if (pi->pubpi.radiover == 0x0) {
chan_info_tbl_p_2 =
- chan_info_nphyrev8_2057_rev5;
- tbl_len =
- ARRAY_SIZE
- (chan_info_nphyrev8_2057_rev5);
+ chan_info_nphyrev8_2057_rev5;
+ tbl_len = ARRAY_SIZE(
+ chan_info_nphyrev8_2057_rev5);
} else if (pi->pubpi.radiover == 0x1) {
chan_info_tbl_p_2 =
- chan_info_nphyrev9_2057_rev5v1;
- tbl_len =
- ARRAY_SIZE
- (chan_info_nphyrev9_2057_rev5v1);
+ chan_info_nphyrev9_2057_rev5v1;
+ tbl_len = ARRAY_SIZE(
+ chan_info_nphyrev9_2057_rev5v1);
}
break;
case 7:
chan_info_tbl_p_0 =
- chan_info_nphyrev8_2057_rev7;
- tbl_len =
- ARRAY_SIZE(chan_info_nphyrev8_2057_rev7);
+ chan_info_nphyrev8_2057_rev7;
+ tbl_len = ARRAY_SIZE(
+ chan_info_nphyrev8_2057_rev7);
break;
case 8:
chan_info_tbl_p_0 =
- chan_info_nphyrev8_2057_rev8;
- tbl_len =
- ARRAY_SIZE(chan_info_nphyrev8_2057_rev8);
+ chan_info_nphyrev8_2057_rev8;
+ tbl_len = ARRAY_SIZE(
+ chan_info_nphyrev8_2057_rev8);
break;
default:
- if (NORADIO_ENAB(pi->pubpi)) {
- goto fail;
- }
break;
}
} else if (NREV_IS(pi->pubpi.phy_rev, 16)) {
@@ -17677,9 +20578,9 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
}
}
- if (i >= tbl_len) {
+ if (i >= tbl_len)
goto fail;
- }
+
if (pi->pubpi.radiorev == 5) {
*t2 = &chan_info_tbl_p_2[i];
freq = chan_info_tbl_p_2[i].freq;
@@ -17710,7 +20611,7 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
case 9:
chan_info_tbl_p_1 = chan_info_nphyrev5n6_2056v7;
tbl_len =
- ARRAY_SIZE(chan_info_nphyrev5n6_2056v7);
+ ARRAY_SIZE(chan_info_nphyrev5n6_2056v7);
break;
case 8:
chan_info_tbl_p_1 = chan_info_nphyrev6_2056v8;
@@ -17718,12 +20619,10 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
break;
case 11:
chan_info_tbl_p_1 = chan_info_nphyrev6_2056v11;
- tbl_len = ARRAY_SIZE(chan_info_nphyrev6_2056v11);
+ tbl_len = ARRAY_SIZE(
+ chan_info_nphyrev6_2056v11);
break;
default:
- if (NORADIO_ENAB(pi->pubpi)) {
- goto fail;
- }
break;
}
}
@@ -17733,9 +20632,9 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
break;
}
- if (i >= tbl_len) {
+ if (i >= tbl_len)
goto fail;
- }
+
*t1 = &chan_info_tbl_p_1[i];
freq = chan_info_tbl_p_1[i].freq;
@@ -17744,9 +20643,9 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
if (chan_info_nphy_2055[i].chan == channel)
break;
- if (i >= ARRAY_SIZE(chan_info_nphy_2055)) {
+ if (i >= ARRAY_SIZE(chan_info_nphy_2055))
goto fail;
- }
+
*t3 = &chan_info_nphy_2055[i];
freq = chan_info_nphy_2055[i].freq;
}
@@ -17754,7 +20653,7 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
*f = freq;
return true;
- fail:
+fail:
*f = WL_CHAN_FREQ_RANGE_2G;
return false;
}
@@ -17762,13 +20661,10 @@ wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint channel)
{
int freq;
- struct chan_info_nphy_radio2057 *t0 = NULL;
- struct chan_info_nphy_radio205x *t1 = NULL;
- struct chan_info_nphy_radio2057_rev5 *t2 = NULL;
- struct chan_info_nphy_2055 *t3 = NULL;
-
- if (NORADIO_ENAB(pi->pubpi))
- return WL_CHAN_FREQ_RANGE_2G;
+ const struct chan_info_nphy_radio2057 *t0 = NULL;
+ const struct chan_info_nphy_radio205x *t1 = NULL;
+ const struct chan_info_nphy_radio2057_rev5 *t2 = NULL;
+ const struct chan_info_nphy_2055 *t3 = NULL;
if (channel == 0)
channel = CHSPEC_CHANNEL(pi->radio_chanspec);
@@ -17778,18 +20674,17 @@ u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint channel)
if (CHSPEC_IS2G(pi->radio_chanspec))
return WL_CHAN_FREQ_RANGE_2G;
- if ((freq >= BASE_LOW_5G_CHAN) && (freq < BASE_MID_5G_CHAN)) {
+ if ((freq >= BASE_LOW_5G_CHAN) && (freq < BASE_MID_5G_CHAN))
return WL_CHAN_FREQ_RANGE_5GL;
- } else if ((freq >= BASE_MID_5G_CHAN) && (freq < BASE_HIGH_5G_CHAN)) {
+ else if ((freq >= BASE_MID_5G_CHAN) && (freq < BASE_HIGH_5G_CHAN))
return WL_CHAN_FREQ_RANGE_5GM;
- } else {
+ else
return WL_CHAN_FREQ_RANGE_5GH;
- }
}
static void
wlc_phy_chanspec_radio2055_setup(struct brcms_phy *pi,
- struct chan_info_nphy_2055 *ci)
+ const struct chan_info_nphy_2055 *ci)
{
write_radio_reg(pi, RADIO_2055_PLL_REF, ci->RF_pll_ref);
@@ -17854,7 +20749,7 @@ static void
wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
const struct chan_info_nphy_radio205x *ci)
{
- struct radio_regs *regs_SYN_2056_ptr = NULL;
+ const struct radio_regs *regs_SYN_2056_ptr = NULL;
write_radio_reg(pi,
RADIO_2056_SYN_PLL_VCOCAL1 | RADIO_2056_SYN,
@@ -17960,15 +20855,14 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
break;
}
}
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec))
write_radio_reg(pi, RADIO_2056_SYN_PLL_CP2 |
RADIO_2056_SYN,
(u16) regs_SYN_2056_ptr[0x49 - 2].init_g);
- } else {
+ else
write_radio_reg(pi, RADIO_2056_SYN_PLL_CP2 |
RADIO_2056_SYN,
(u16) regs_SYN_2056_ptr[0x49 - 2].init_a);
- }
if (pi->sh->boardflags2 & BFL2_GPLL_WAR) {
if (CHSPEC_IS2G(pi->radio_chanspec)) {
@@ -18069,7 +20963,8 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
mixg_boost_tune);
} else {
- bias = IS40MHZ(pi) ? 0x40 : 0x20;
+ bias = (pi->bw == WL_CHANSPEC_BW_40) ?
+ 0x40 : 0x20;
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
INTPAG_IMAIN_STAT, bias);
@@ -18116,11 +21011,11 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
paa_boost_tune = 0x0;
pada_boost_tune = 0x77;
- if (freq != 5825) {
+ if (freq != 5825)
pgaa_boost_tune = -(int)(freq - 18) / 36 + 168;
- } else {
+ else
pgaa_boost_tune = 6;
- }
+
mixa_boost_tune = 0xf;
}
@@ -18146,9 +21041,8 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
- if (pi->sh->chippkg == BCM43224_FAB_SMIC) {
+ if (pi->sh->chippkg == BCM43224_FAB_SMIC)
cascbias = 0x35;
- }
}
pabias = (pi->phy_pabias == 0) ? 0x30 : pi->phy_pabias;
@@ -18186,117 +21080,12 @@ void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi)
udelay(300);
}
-#define MAX_205x_RCAL_WAITLOOPS 10000
-
-static u16 wlc_phy_radio205x_rcal(struct brcms_phy *pi)
-{
- u16 rcal_reg = 0;
- int i;
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- if (pi->pubpi.radiorev == 5) {
-
- and_phy_reg(pi, 0x342, ~(0x1 << 1));
-
- udelay(10);
-
- mod_radio_reg(pi, RADIO_2057_IQTEST_SEL_PU, 0x1, 0x1);
- mod_radio_reg(pi, RADIO_2057v7_IQTEST_SEL_PU2, 0x2,
- 0x1);
- }
- mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x1, 0x1);
-
- udelay(10);
-
- mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x3, 0x3);
-
- for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
- rcal_reg = read_radio_reg(pi, RADIO_2057_RCAL_STATUS);
- if (rcal_reg & 0x1) {
- break;
- }
- udelay(100);
- }
-
- if (WARN(i == MAX_205x_RCAL_WAITLOOPS,
- "HW error: radio calib2"))
- return 0;
-
- mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x2, 0x0);
-
- rcal_reg = read_radio_reg(pi, RADIO_2057_RCAL_STATUS) & 0x3e;
-
- mod_radio_reg(pi, RADIO_2057_RCAL_CONFIG, 0x1, 0x0);
- if (pi->pubpi.radiorev == 5) {
-
- mod_radio_reg(pi, RADIO_2057_IQTEST_SEL_PU, 0x1, 0x0);
- mod_radio_reg(pi, RADIO_2057v7_IQTEST_SEL_PU2, 0x2,
- 0x0);
- }
-
- if ((pi->pubpi.radiorev <= 4) || (pi->pubpi.radiorev == 6)) {
-
- mod_radio_reg(pi, RADIO_2057_TEMPSENSE_CONFIG, 0x3c,
- rcal_reg);
- mod_radio_reg(pi, RADIO_2057_BANDGAP_RCAL_TRIM, 0xf0,
- rcal_reg << 2);
- }
-
- } else if (NREV_IS(pi->pubpi.phy_rev, 3)) {
- u16 savereg;
-
- savereg =
- read_radio_reg(pi,
- RADIO_2056_SYN_PLL_MAST2 | RADIO_2056_SYN);
- write_radio_reg(pi, RADIO_2056_SYN_PLL_MAST2 | RADIO_2056_SYN,
- savereg | 0x7);
- udelay(10);
-
- write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
- 0x1);
- udelay(10);
-
- write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
- 0x9);
-
- for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
- rcal_reg = read_radio_reg(pi,
- RADIO_2056_SYN_RCAL_CODE_OUT |
- RADIO_2056_SYN);
- if (rcal_reg & 0x80) {
- break;
- }
- udelay(100);
- }
-
- if (WARN(i == MAX_205x_RCAL_WAITLOOPS,
- "HW error: radio calib3"))
- return 0;
-
- write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
- 0x1);
-
- rcal_reg =
- read_radio_reg(pi,
- RADIO_2056_SYN_RCAL_CODE_OUT |
- RADIO_2056_SYN);
-
- write_radio_reg(pi, RADIO_2056_SYN_RCAL_MASTER | RADIO_2056_SYN,
- 0x0);
-
- write_radio_reg(pi, RADIO_2056_SYN_PLL_MAST2 | RADIO_2056_SYN,
- savereg);
-
- return rcal_reg & 0x1f;
- }
- return rcal_reg & 0x3e;
-}
-
static void
-wlc_phy_chanspec_radio2057_setup(struct brcms_phy *pi,
- const struct chan_info_nphy_radio2057 *ci,
- const struct chan_info_nphy_radio2057_rev5 *ci2)
+wlc_phy_chanspec_radio2057_setup(
+ struct brcms_phy *pi,
+ const struct chan_info_nphy_radio2057 *ci,
+ const struct chan_info_nphy_radio2057_rev5 *
+ ci2)
{
int coreNum;
u16 txmix2g_tune_boost_pu = 0;
@@ -18455,9 +21244,8 @@ wlc_phy_chanspec_radio2057_setup(struct brcms_phy *pi,
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (PHY_IPA(pi)) {
- if (pi->pubpi.radiorev == 3) {
+ if (pi->pubpi.radiorev == 3)
txmix2g_tune_boost_pu = 0x6b;
- }
if (pi->pubpi.radiorev == 5)
pad2g_tune_pus = 0x73;
@@ -18489,430 +21277,8 @@ wlc_phy_chanspec_radio2057_setup(struct brcms_phy *pi,
wlc_phy_radio205x_vcocal_nphy(pi);
}
-static u16 wlc_phy_radio2057_rccal(struct brcms_phy *pi)
-{
- u16 rccal_valid;
- int i;
- bool chip43226_6362A0;
-
- chip43226_6362A0 = ((pi->pubpi.radiorev == 3)
- || (pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6));
-
- rccal_valid = 0;
- if (chip43226_6362A0) {
- write_radio_reg(pi, RADIO_2057_RCCAL_MASTER, 0x61);
- write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xc0);
- } else {
- write_radio_reg(pi, RADIO_2057v7_RCCAL_MASTER, 0x61);
-
- write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xe9);
- }
- write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x6e);
- write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x55);
-
- for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
- rccal_valid = read_radio_reg(pi, RADIO_2057_RCCAL_DONE_OSCCAP);
- if (rccal_valid & 0x2) {
- break;
- }
- udelay(500);
- }
-
- write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x15);
-
- rccal_valid = 0;
- if (chip43226_6362A0) {
- write_radio_reg(pi, RADIO_2057_RCCAL_MASTER, 0x69);
- write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xb0);
- } else {
- write_radio_reg(pi, RADIO_2057v7_RCCAL_MASTER, 0x69);
-
- write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xd5);
- }
- write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x6e);
- write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x55);
-
- for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
- rccal_valid = read_radio_reg(pi, RADIO_2057_RCCAL_DONE_OSCCAP);
- if (rccal_valid & 0x2) {
- break;
- }
- udelay(500);
- }
-
- write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x15);
-
- rccal_valid = 0;
- if (chip43226_6362A0) {
- write_radio_reg(pi, RADIO_2057_RCCAL_MASTER, 0x73);
-
- write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x28);
- write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0xb0);
- } else {
- write_radio_reg(pi, RADIO_2057v7_RCCAL_MASTER, 0x73);
- write_radio_reg(pi, RADIO_2057_RCCAL_X1, 0x6e);
- write_radio_reg(pi, RADIO_2057_RCCAL_TRC0, 0x99);
- }
- write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x55);
-
- for (i = 0; i < MAX_205x_RCAL_WAITLOOPS; i++) {
- rccal_valid = read_radio_reg(pi, RADIO_2057_RCCAL_DONE_OSCCAP);
- if (rccal_valid & 0x2) {
- break;
- }
- udelay(500);
- }
-
- if (WARN(!(rccal_valid & 0x2), "HW error: radio calib4"))
- return 0;
-
- write_radio_reg(pi, RADIO_2057_RCCAL_START_R1_Q1_P1, 0x15);
-
- return rccal_valid;
-}
-
-static void
-wlc_phy_adjust_rx_analpfbw_nphy(struct brcms_phy *pi, u16 reduction_factr)
-{
- if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
- if ((CHSPEC_CHANNEL(pi->radio_chanspec) == 11) &&
- CHSPEC_IS40(pi->radio_chanspec)) {
- if (!pi->nphy_anarxlpf_adjusted) {
- write_radio_reg(pi,
- (RADIO_2056_RX_RXLPF_RCCAL_LPC |
- RADIO_2056_RX0),
- ((pi->nphy_rccal_value +
- reduction_factr) | 0x80));
-
- pi->nphy_anarxlpf_adjusted = true;
- }
- } else {
- if (pi->nphy_anarxlpf_adjusted) {
- write_radio_reg(pi,
- (RADIO_2056_RX_RXLPF_RCCAL_LPC |
- RADIO_2056_RX0),
- (pi->nphy_rccal_value | 0x80));
-
- pi->nphy_anarxlpf_adjusted = false;
- }
- }
- }
-}
-
-static void
-wlc_phy_adjust_min_noisevar_nphy(struct brcms_phy *pi, int ntones,
- int *tone_id_buf, u32 *noise_var_buf)
-{
- int i;
- u32 offset;
- int tone_id;
- int tbllen =
- CHSPEC_IS40(pi->
- radio_chanspec) ? NPHY_NOISEVAR_TBLLEN40 :
- NPHY_NOISEVAR_TBLLEN20;
-
- if (pi->nphy_noisevars_adjusted) {
- for (i = 0; i < pi->nphy_saved_noisevars.bufcount; i++) {
- tone_id = pi->nphy_saved_noisevars.tone_id[i];
- offset = (tone_id >= 0) ?
- ((tone_id * 2) + 1) : (tbllen + (tone_id * 2) + 1);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- offset, 32,
- (void *)&pi->
- nphy_saved_noisevars.
- min_noise_vars[i]);
- }
-
- pi->nphy_saved_noisevars.bufcount = 0;
- pi->nphy_noisevars_adjusted = false;
- }
-
- if ((noise_var_buf != NULL) && (tone_id_buf != NULL)) {
- pi->nphy_saved_noisevars.bufcount = 0;
-
- for (i = 0; i < ntones; i++) {
- tone_id = tone_id_buf[i];
- offset = (tone_id >= 0) ?
- ((tone_id * 2) + 1) : (tbllen + (tone_id * 2) + 1);
- pi->nphy_saved_noisevars.tone_id[i] = tone_id;
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- offset, 32,
- &pi->nphy_saved_noisevars.
- min_noise_vars[i]);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- offset, 32,
- (void *)&noise_var_buf[i]);
- pi->nphy_saved_noisevars.bufcount++;
- }
-
- pi->nphy_noisevars_adjusted = true;
- }
-}
-
-static void wlc_phy_adjust_crsminpwr_nphy(struct brcms_phy *pi, u8 minpwr)
-{
- u16 regval;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if ((CHSPEC_CHANNEL(pi->radio_chanspec) == 11) &&
- CHSPEC_IS40(pi->radio_chanspec)) {
- if (!pi->nphy_crsminpwr_adjusted) {
- regval = read_phy_reg(pi, 0x27d);
- pi->nphy_crsminpwr[0] = regval & 0xff;
- regval &= 0xff00;
- regval |= (u16) minpwr;
- write_phy_reg(pi, 0x27d, regval);
-
- regval = read_phy_reg(pi, 0x280);
- pi->nphy_crsminpwr[1] = regval & 0xff;
- regval &= 0xff00;
- regval |= (u16) minpwr;
- write_phy_reg(pi, 0x280, regval);
-
- regval = read_phy_reg(pi, 0x283);
- pi->nphy_crsminpwr[2] = regval & 0xff;
- regval &= 0xff00;
- regval |= (u16) minpwr;
- write_phy_reg(pi, 0x283, regval);
-
- pi->nphy_crsminpwr_adjusted = true;
- }
- } else {
- if (pi->nphy_crsminpwr_adjusted) {
- regval = read_phy_reg(pi, 0x27d);
- regval &= 0xff00;
- regval |= pi->nphy_crsminpwr[0];
- write_phy_reg(pi, 0x27d, regval);
-
- regval = read_phy_reg(pi, 0x280);
- regval &= 0xff00;
- regval |= pi->nphy_crsminpwr[1];
- write_phy_reg(pi, 0x280, regval);
-
- regval = read_phy_reg(pi, 0x283);
- regval &= 0xff00;
- regval |= pi->nphy_crsminpwr[2];
- write_phy_reg(pi, 0x283, regval);
-
- pi->nphy_crsminpwr_adjusted = false;
- }
- }
- }
-}
-
-static void wlc_phy_txlpfbw_nphy(struct brcms_phy *pi)
-{
- u8 tx_lpf_bw = 0;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- tx_lpf_bw = 3;
- } else {
- tx_lpf_bw = 1;
- }
-
- if (PHY_IPA(pi)) {
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- tx_lpf_bw = 5;
- } else {
- tx_lpf_bw = 4;
- }
- }
- write_phy_reg(pi, 0xe8,
- (tx_lpf_bw << 0) |
- (tx_lpf_bw << 3) |
- (tx_lpf_bw << 6) | (tx_lpf_bw << 9));
-
- if (PHY_IPA(pi)) {
-
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- tx_lpf_bw = 4;
- } else {
- tx_lpf_bw = 1;
- }
-
- write_phy_reg(pi, 0xe9,
- (tx_lpf_bw << 0) |
- (tx_lpf_bw << 3) |
- (tx_lpf_bw << 6) | (tx_lpf_bw << 9));
- }
- }
-}
-
-static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
-{
- u16 cur_channel = 0;
- int nphy_adj_tone_id_buf[] = { 57, 58 };
- u32 nphy_adj_noise_var_buf[] = { 0x3ff, 0x3ff };
- bool isAdjustNoiseVar = false;
- uint numTonesAdjust = 0;
- u32 tempval = 0;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- cur_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
-
- if (pi->nphy_gband_spurwar_en) {
-
- wlc_phy_adjust_rx_analpfbw_nphy(pi,
- NPHY_ANARXLPFBW_REDUCTIONFACT);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if ((cur_channel == 11)
- && CHSPEC_IS40(pi->radio_chanspec)) {
-
- wlc_phy_adjust_min_noisevar_nphy(pi, 2,
- nphy_adj_tone_id_buf,
- nphy_adj_noise_var_buf);
- } else {
-
- wlc_phy_adjust_min_noisevar_nphy(pi, 0,
- NULL,
- NULL);
- }
- }
- wlc_phy_adjust_crsminpwr_nphy(pi,
- NPHY_ADJUSTED_MINCRSPOWER);
- }
-
- if ((pi->nphy_gband_spurwar2_en)
- && CHSPEC_IS2G(pi->radio_chanspec)) {
-
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- switch (cur_channel) {
- case 3:
- nphy_adj_tone_id_buf[0] = 57;
- nphy_adj_tone_id_buf[1] = 58;
- nphy_adj_noise_var_buf[0] = 0x22f;
- nphy_adj_noise_var_buf[1] = 0x25f;
- isAdjustNoiseVar = true;
- break;
- case 4:
- nphy_adj_tone_id_buf[0] = 41;
- nphy_adj_tone_id_buf[1] = 42;
- nphy_adj_noise_var_buf[0] = 0x22f;
- nphy_adj_noise_var_buf[1] = 0x25f;
- isAdjustNoiseVar = true;
- break;
- case 5:
- nphy_adj_tone_id_buf[0] = 25;
- nphy_adj_tone_id_buf[1] = 26;
- nphy_adj_noise_var_buf[0] = 0x24f;
- nphy_adj_noise_var_buf[1] = 0x25f;
- isAdjustNoiseVar = true;
- break;
- case 6:
- nphy_adj_tone_id_buf[0] = 9;
- nphy_adj_tone_id_buf[1] = 10;
- nphy_adj_noise_var_buf[0] = 0x22f;
- nphy_adj_noise_var_buf[1] = 0x24f;
- isAdjustNoiseVar = true;
- break;
- case 7:
- nphy_adj_tone_id_buf[0] = 121;
- nphy_adj_tone_id_buf[1] = 122;
- nphy_adj_noise_var_buf[0] = 0x18f;
- nphy_adj_noise_var_buf[1] = 0x24f;
- isAdjustNoiseVar = true;
- break;
- case 8:
- nphy_adj_tone_id_buf[0] = 105;
- nphy_adj_tone_id_buf[1] = 106;
- nphy_adj_noise_var_buf[0] = 0x22f;
- nphy_adj_noise_var_buf[1] = 0x25f;
- isAdjustNoiseVar = true;
- break;
- case 9:
- nphy_adj_tone_id_buf[0] = 89;
- nphy_adj_tone_id_buf[1] = 90;
- nphy_adj_noise_var_buf[0] = 0x22f;
- nphy_adj_noise_var_buf[1] = 0x24f;
- isAdjustNoiseVar = true;
- break;
- case 10:
- nphy_adj_tone_id_buf[0] = 73;
- nphy_adj_tone_id_buf[1] = 74;
- nphy_adj_noise_var_buf[0] = 0x22f;
- nphy_adj_noise_var_buf[1] = 0x24f;
- isAdjustNoiseVar = true;
- break;
- default:
- isAdjustNoiseVar = false;
- break;
- }
- }
-
- if (isAdjustNoiseVar) {
- numTonesAdjust = sizeof(nphy_adj_tone_id_buf) /
- sizeof(nphy_adj_tone_id_buf[0]);
-
- wlc_phy_adjust_min_noisevar_nphy(pi,
- numTonesAdjust,
- nphy_adj_tone_id_buf,
- nphy_adj_noise_var_buf);
-
- tempval = 0;
-
- } else {
-
- wlc_phy_adjust_min_noisevar_nphy(pi, 0, NULL,
- NULL);
- }
- }
-
- if ((pi->nphy_aband_spurwar_en) &&
- (CHSPEC_IS5G(pi->radio_chanspec))) {
- switch (cur_channel) {
- case 54:
- nphy_adj_tone_id_buf[0] = 32;
- nphy_adj_noise_var_buf[0] = 0x25f;
- break;
- case 38:
- case 102:
- case 118:
- nphy_adj_tone_id_buf[0] = 0;
- nphy_adj_noise_var_buf[0] = 0x0;
- break;
- case 134:
- nphy_adj_tone_id_buf[0] = 32;
- nphy_adj_noise_var_buf[0] = 0x21f;
- break;
- case 151:
- nphy_adj_tone_id_buf[0] = 16;
- nphy_adj_noise_var_buf[0] = 0x23f;
- break;
- case 153:
- case 161:
- nphy_adj_tone_id_buf[0] = 48;
- nphy_adj_noise_var_buf[0] = 0x23f;
- break;
- default:
- nphy_adj_tone_id_buf[0] = 0;
- nphy_adj_noise_var_buf[0] = 0x0;
- break;
- }
-
- if (nphy_adj_tone_id_buf[0]
- && nphy_adj_noise_var_buf[0]) {
- wlc_phy_adjust_min_noisevar_nphy(pi, 1,
- nphy_adj_tone_id_buf,
- nphy_adj_noise_var_buf);
- } else {
- wlc_phy_adjust_min_noisevar_nphy(pi, 0, NULL,
- NULL);
- }
- }
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
- }
-}
-
static void
-wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chanspec,
+wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
const struct nphy_sfo_cfg *ci)
{
u16 val;
@@ -18964,14 +21330,11 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chanspec,
and_phy_reg(pi, NPHY_TO_BPHY_OFF + BPHY_TEST, ~0x840);
}
- if (pi->nphy_txpwrctrl == PHY_TPC_HW_OFF) {
+ if (pi->nphy_txpwrctrl == PHY_TPC_HW_OFF)
wlc_phy_txpwr_fixpower_nphy(pi);
- }
-
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
+ if (NREV_LT(pi->pubpi.phy_rev, 3))
wlc_phy_adjust_lnagaintbl_nphy(pi);
- }
wlc_phy_txlpfbw_nphy(pi);
@@ -18982,28 +21345,20 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chanspec,
val = CHSPEC_CHANNEL(chanspec);
if (!CHSPEC_IS40(pi->radio_chanspec)) {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if ((val == 13) || (val == 14) || (val == 153)) {
- spuravoid = 1;
- }
- } else {
-
- if (((val >= 5) && (val <= 8)) || (val == 13)
- || (val == 14)) {
+ if ((val == 13) || (val == 14) || (val == 153))
spuravoid = 1;
- }
+ } else if (((val >= 5) && (val <= 8)) || (val == 13)
+ || (val == 14)) {
+ spuravoid = 1;
}
+ } else if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (val == 54)
+ spuravoid = 1;
} else {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (val == 54) {
- spuravoid = 1;
- }
- } else {
-
- if (pi->nphy_aband_spurwar_en &&
- ((val == 38) || (val == 102)
- || (val == 118)))
- spuravoid = 1;
- }
+ if (pi->nphy_aband_spurwar_en &&
+ ((val == 38) || (val == 102)
+ || (val == 118)))
+ spuravoid = 1;
}
if (pi->phy_spuravoid == SPURAVOID_FORCEON)
@@ -19047,20 +21402,16 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chanspec,
wlc_phy_spurwar_nphy(pi);
}
-void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, chanspec_t chanspec)
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec)
{
int freq;
- struct chan_info_nphy_radio2057 *t0 = NULL;
- struct chan_info_nphy_radio205x *t1 = NULL;
- struct chan_info_nphy_radio2057_rev5 *t2 = NULL;
- struct chan_info_nphy_2055 *t3 = NULL;
-
- if (NORADIO_ENAB(pi->pubpi)) {
- return;
- }
+ const struct chan_info_nphy_radio2057 *t0 = NULL;
+ const struct chan_info_nphy_radio205x *t1 = NULL;
+ const struct chan_info_nphy_radio2057_rev5 *t2 = NULL;
+ const struct chan_info_nphy_2055 *t3 = NULL;
if (!wlc_phy_chan2freq_nphy
- (pi, CHSPEC_CHANNEL(chanspec), &freq, &t0, &t1, &t2, &t3))
+ (pi, CHSPEC_CHANNEL(chanspec), &freq, &t0, &t1, &t2, &t3))
return;
wlc_phy_chanspec_radio_set((struct brcms_phy_pub *) pi, chanspec);
@@ -19071,15 +21422,13 @@ void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, chanspec_t chanspec)
if (CHSPEC_IS40(chanspec)) {
if (CHSPEC_SB_UPPER(chanspec)) {
or_phy_reg(pi, 0xa0, BPHY_BAND_SEL_UP20);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
or_phy_reg(pi, 0x310, PRIM_SEL_UP20);
- }
} else {
and_phy_reg(pi, 0xa0, ~BPHY_BAND_SEL_UP20);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
and_phy_reg(pi, 0x310,
(~PRIM_SEL_UP20 & 0xffff));
- }
}
}
@@ -19124,358 +21473,10 @@ void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, chanspec_t chanspec)
wlc_phy_chanspec_radio2055_setup(pi, t3);
wlc_phy_chanspec_nphy_setup(pi, chanspec,
- (const struct nphy_sfo_cfg *)&(t3->
- PHY_BW1a));
- }
-
-}
-
-static void wlc_phy_savecal_nphy(struct brcms_phy *pi)
-{
- void *tbl_ptr;
- int coreNum;
- u16 *txcal_radio_regs = NULL;
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
-
- wlc_phy_rx_iq_coeffs_nphy(pi, 0,
- &pi->calibration_cache.
- rxcal_coeffs_2G);
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- txcal_radio_regs =
- pi->calibration_cache.txcal_radio_regs_2G;
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
- pi->calibration_cache.txcal_radio_regs_2G[0] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_2G[1] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_2G[2] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX1);
- pi->calibration_cache.txcal_radio_regs_2G[3] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX1);
-
- pi->calibration_cache.txcal_radio_regs_2G[4] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_2G[5] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_2G[6] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX1);
- pi->calibration_cache.txcal_radio_regs_2G[7] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX1);
- } else {
- pi->calibration_cache.txcal_radio_regs_2G[0] =
- read_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL);
- pi->calibration_cache.txcal_radio_regs_2G[1] =
- read_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL);
- pi->calibration_cache.txcal_radio_regs_2G[2] =
- read_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM);
- pi->calibration_cache.txcal_radio_regs_2G[3] =
- read_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM);
- }
-
- pi->nphy_iqcal_chanspec_2G = pi->radio_chanspec;
- tbl_ptr = pi->calibration_cache.txcal_coeffs_2G;
- } else {
-
- wlc_phy_rx_iq_coeffs_nphy(pi, 0,
- &pi->calibration_cache.
- rxcal_coeffs_5G);
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- txcal_radio_regs =
- pi->calibration_cache.txcal_radio_regs_5G;
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
- pi->calibration_cache.txcal_radio_regs_5G[0] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_5G[1] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_5G[2] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX1);
- pi->calibration_cache.txcal_radio_regs_5G[3] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX1);
-
- pi->calibration_cache.txcal_radio_regs_5G[4] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_5G[5] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX0);
- pi->calibration_cache.txcal_radio_regs_5G[6] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX1);
- pi->calibration_cache.txcal_radio_regs_5G[7] =
- read_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX1);
- } else {
- pi->calibration_cache.txcal_radio_regs_5G[0] =
- read_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL);
- pi->calibration_cache.txcal_radio_regs_5G[1] =
- read_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL);
- pi->calibration_cache.txcal_radio_regs_5G[2] =
- read_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM);
- pi->calibration_cache.txcal_radio_regs_5G[3] =
- read_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM);
- }
-
- pi->nphy_iqcal_chanspec_5G = pi->radio_chanspec;
- tbl_ptr = pi->calibration_cache.txcal_coeffs_5G;
- }
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- for (coreNum = 0; coreNum <= 1; coreNum++) {
-
- txcal_radio_regs[2 * coreNum] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_FINE_I);
- txcal_radio_regs[2 * coreNum + 1] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_FINE_Q);
-
- txcal_radio_regs[2 * coreNum + 4] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_COARSE_I);
- txcal_radio_regs[2 * coreNum + 5] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_COARSE_Q);
- }
- }
-
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_IQLOCAL, 8, 80, 16, tbl_ptr);
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-}
-
-static void wlc_phy_restorecal_nphy(struct brcms_phy *pi)
-{
- u16 *loft_comp;
- u16 txcal_coeffs_bphy[4];
- u16 *tbl_ptr;
- int coreNum;
- u16 *txcal_radio_regs = NULL;
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (pi->nphy_iqcal_chanspec_2G == 0)
- return;
-
- tbl_ptr = pi->calibration_cache.txcal_coeffs_2G;
- loft_comp = &pi->calibration_cache.txcal_coeffs_2G[5];
- } else {
- if (pi->nphy_iqcal_chanspec_5G == 0)
- return;
-
- tbl_ptr = pi->calibration_cache.txcal_coeffs_5G;
- loft_comp = &pi->calibration_cache.txcal_coeffs_5G[5];
- }
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 4, 80, 16,
- (void *)tbl_ptr);
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- txcal_coeffs_bphy[0] = tbl_ptr[0];
- txcal_coeffs_bphy[1] = tbl_ptr[1];
- txcal_coeffs_bphy[2] = tbl_ptr[2];
- txcal_coeffs_bphy[3] = tbl_ptr[3];
- } else {
- txcal_coeffs_bphy[0] = 0;
- txcal_coeffs_bphy[1] = 0;
- txcal_coeffs_bphy[2] = 0;
- txcal_coeffs_bphy[3] = 0;
- }
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 4, 88, 16,
- txcal_coeffs_bphy);
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 2, 85, 16, loft_comp);
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 2, 93, 16, loft_comp);
-
- if (NREV_LT(pi->pubpi.phy_rev, 2))
- wlc_phy_tx_iq_war_nphy(pi);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- txcal_radio_regs =
- pi->calibration_cache.txcal_radio_regs_2G;
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_2G[0]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_2G[1]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_2G[2]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_2G[3]);
-
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_2G[4]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_2G[5]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_2G[6]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_2G[7]);
- } else {
- write_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL,
- pi->calibration_cache.
- txcal_radio_regs_2G[0]);
- write_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL,
- pi->calibration_cache.
- txcal_radio_regs_2G[1]);
- write_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM,
- pi->calibration_cache.
- txcal_radio_regs_2G[2]);
- write_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM,
- pi->calibration_cache.
- txcal_radio_regs_2G[3]);
- }
-
- wlc_phy_rx_iq_coeffs_nphy(pi, 1,
- &pi->calibration_cache.
- rxcal_coeffs_2G);
- } else {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- txcal_radio_regs =
- pi->calibration_cache.txcal_radio_regs_5G;
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_5G[0]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_5G[1]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_I |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_5G[2]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_FINE_Q |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_5G[3]);
-
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_5G[4]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX0,
- pi->calibration_cache.
- txcal_radio_regs_5G[5]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_I |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_5G[6]);
- write_radio_reg(pi,
- RADIO_2056_TX_LOFT_COARSE_Q |
- RADIO_2056_TX1,
- pi->calibration_cache.
- txcal_radio_regs_5G[7]);
- } else {
- write_radio_reg(pi, RADIO_2055_CORE1_TX_VOS_CNCL,
- pi->calibration_cache.
- txcal_radio_regs_5G[0]);
- write_radio_reg(pi, RADIO_2055_CORE2_TX_VOS_CNCL,
- pi->calibration_cache.
- txcal_radio_regs_5G[1]);
- write_radio_reg(pi, RADIO_2055_CORE1_TX_BB_MXGM,
- pi->calibration_cache.
- txcal_radio_regs_5G[2]);
- write_radio_reg(pi, RADIO_2055_CORE2_TX_BB_MXGM,
- pi->calibration_cache.
- txcal_radio_regs_5G[3]);
- }
-
- wlc_phy_rx_iq_coeffs_nphy(pi, 1,
- &pi->calibration_cache.
- rxcal_coeffs_5G);
+ (const struct nphy_sfo_cfg *)
+ &(t3->PHY_BW1a));
}
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- for (coreNum = 0; coreNum <= 1; coreNum++) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_FINE_I,
- txcal_radio_regs[2 * coreNum]);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_FINE_Q,
- txcal_radio_regs[2 * coreNum + 1]);
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_COARSE_I,
- txcal_radio_regs[2 * coreNum + 4]);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, coreNum,
- LOFT_COARSE_Q,
- txcal_radio_regs[2 * coreNum + 5]);
- }
- }
}
void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
@@ -19545,8 +21546,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
if (D11REV_IS(pi->sh->corerev, 16)) {
suspended =
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
- false : true;
+ (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
+ false : true;
if (!suspended)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
@@ -19563,18 +21564,6 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
return new_ctl;
}
-static void wlc_phy_clip_det_nphy(struct brcms_phy *pi, u8 write, u16 *vals)
-{
-
- if (write == 0) {
- vals[0] = read_phy_reg(pi, 0x2c);
- vals[1] = read_phy_reg(pi, 0x42);
- } else {
- write_phy_reg(pi, 0x2c, vals[0]);
- write_phy_reg(pi, 0x42, vals[1]);
- }
-}
-
void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd)
{
u16 trigger_mask, status_mask;
@@ -19620,577 +21609,6 @@ void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd)
}
static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
- u8 len)
-{
- u32 t1_offset, t2_offset;
- u8 ctr;
- u8 end_event =
- NREV_GE(pi->pubpi.phy_rev,
- 3) ? NPHY_REV3_RFSEQ_CMD_END : NPHY_RFSEQ_CMD_END;
- u8 end_dly = 1;
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- t1_offset = cmd << 4;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, len, t1_offset, 8,
- events);
- t2_offset = t1_offset + 0x080;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, len, t2_offset, 8,
- dlys);
-
- for (ctr = len; ctr < 16; ctr++) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
- t1_offset + ctr, 8, &end_event);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
- t2_offset + ctr, 8, &end_dly);
- }
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-}
-
-static u16 wlc_phy_read_lpf_bw_ctl_nphy(struct brcms_phy *pi, u16 offset)
-{
- u16 lpf_bw_ctl_val = 0;
- u16 rx2tx_lpf_rc_lut_offset = 0;
-
- if (offset == 0) {
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- rx2tx_lpf_rc_lut_offset = 0x159;
- } else {
- rx2tx_lpf_rc_lut_offset = 0x154;
- }
- } else {
- rx2tx_lpf_rc_lut_offset = offset;
- }
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
- (u32) rx2tx_lpf_rc_lut_offset, 16,
- &lpf_bw_ctl_val);
-
- lpf_bw_ctl_val = lpf_bw_ctl_val & 0x7;
-
- return lpf_bw_ctl_val;
-}
-
-static void
-wlc_phy_rfctrl_override_nphy_rev7(struct brcms_phy *pi, u16 field, u16 value,
- u8 core_mask, u8 off, u8 override_id)
-{
- u8 core_num;
- u16 addr = 0, en_addr = 0, val_addr = 0, en_mask = 0, val_mask = 0;
- u8 val_shift = 0;
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- en_mask = field;
- for (core_num = 0; core_num < 2; core_num++) {
- if (override_id == NPHY_REV7_RFCTRLOVERRIDE_ID0) {
-
- switch (field) {
- case (0x1 << 2):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a :
- 0x7d;
- val_mask = (0x1 << 1);
- val_shift = 1;
- break;
- case (0x1 << 3):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a :
- 0x7d;
- val_mask = (0x1 << 2);
- val_shift = 2;
- break;
- case (0x1 << 4):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a :
- 0x7d;
- val_mask = (0x1 << 4);
- val_shift = 4;
- break;
- case (0x1 << 5):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a :
- 0x7d;
- val_mask = (0x1 << 5);
- val_shift = 5;
- break;
- case (0x1 << 6):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a :
- 0x7d;
- val_mask = (0x1 << 6);
- val_shift = 6;
- break;
- case (0x1 << 7):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a :
- 0x7d;
- val_mask = (0x1 << 7);
- val_shift = 7;
- break;
- case (0x1 << 10):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0xf8 :
- 0xfa;
- val_mask = (0x7 << 4);
- val_shift = 4;
- break;
- case (0x1 << 11):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7b :
- 0x7e;
- val_mask = (0xffff << 0);
- val_shift = 0;
- break;
- case (0x1 << 12):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7c :
- 0x7f;
- val_mask = (0xffff << 0);
- val_shift = 0;
- break;
- case (0x3 << 13):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x348 :
- 0x349;
- val_mask = (0xff << 0);
- val_shift = 0;
- break;
- case (0x1 << 13):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x348 :
- 0x349;
- val_mask = (0xf << 0);
- val_shift = 0;
- break;
- default:
- addr = 0xffff;
- break;
- }
- } else if (override_id == NPHY_REV7_RFCTRLOVERRIDE_ID1) {
-
- switch (field) {
- case (0x1 << 1):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 1);
- val_shift = 1;
- break;
- case (0x1 << 3):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 3);
- val_shift = 3;
- break;
- case (0x1 << 5):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 5);
- val_shift = 5;
- break;
- case (0x1 << 4):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 4);
- val_shift = 4;
- break;
- case (0x1 << 2):
-
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 2);
- val_shift = 2;
- break;
- case (0x1 << 7):
-
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x7 << 8);
- val_shift = 8;
- break;
- case (0x1 << 11):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 14);
- val_shift = 14;
- break;
- case (0x1 << 10):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 13);
- val_shift = 13;
- break;
- case (0x1 << 9):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 12);
- val_shift = 12;
- break;
- case (0x1 << 8):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 11);
- val_shift = 11;
- break;
- case (0x1 << 6):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 6);
- val_shift = 6;
- break;
- case (0x1 << 0):
- en_addr = (core_num == 0) ? 0x342 :
- 0x343;
- val_addr = (core_num == 0) ? 0x340 :
- 0x341;
- val_mask = (0x1 << 0);
- val_shift = 0;
- break;
- default:
- addr = 0xffff;
- break;
- }
- } else if (override_id == NPHY_REV7_RFCTRLOVERRIDE_ID2) {
-
- switch (field) {
- case (0x1 << 3):
- en_addr = (core_num == 0) ? 0x346 :
- 0x347;
- val_addr = (core_num == 0) ? 0x344 :
- 0x345;
- val_mask = (0x1 << 3);
- val_shift = 3;
- break;
- case (0x1 << 1):
- en_addr = (core_num == 0) ? 0x346 :
- 0x347;
- val_addr = (core_num == 0) ? 0x344 :
- 0x345;
- val_mask = (0x1 << 1);
- val_shift = 1;
- break;
- case (0x1 << 0):
- en_addr = (core_num == 0) ? 0x346 :
- 0x347;
- val_addr = (core_num == 0) ? 0x344 :
- 0x345;
- val_mask = (0x1 << 0);
- val_shift = 0;
- break;
- case (0x1 << 2):
- en_addr = (core_num == 0) ? 0x346 :
- 0x347;
- val_addr = (core_num == 0) ? 0x344 :
- 0x345;
- val_mask = (0x1 << 2);
- val_shift = 2;
- break;
- case (0x1 << 4):
- en_addr = (core_num == 0) ? 0x346 :
- 0x347;
- val_addr = (core_num == 0) ? 0x344 :
- 0x345;
- val_mask = (0x1 << 4);
- val_shift = 4;
- break;
- default:
- addr = 0xffff;
- break;
- }
- }
-
- if (off) {
- and_phy_reg(pi, en_addr, ~en_mask);
- and_phy_reg(pi, val_addr, ~val_mask);
- } else {
-
- if ((core_mask == 0)
- || (core_mask & (1 << core_num))) {
- or_phy_reg(pi, en_addr, en_mask);
-
- if (addr != 0xffff) {
- mod_phy_reg(pi, val_addr,
- val_mask,
- (value <<
- val_shift));
- }
- }
- }
- }
- }
-}
-
-static void
-wlc_phy_rfctrl_override_nphy(struct brcms_phy *pi, u16 field, u16 value,
- u8 core_mask, u8 off)
-{
- u8 core_num;
- u16 addr = 0, mask = 0, en_addr = 0, val_addr = 0, en_mask =
- 0, val_mask = 0;
- u8 shift = 0, val_shift = 0;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
-
- en_mask = field;
- for (core_num = 0; core_num < 2; core_num++) {
-
- switch (field) {
- case (0x1 << 1):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 0);
- val_shift = 0;
- break;
- case (0x1 << 2):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 1);
- val_shift = 1;
- break;
- case (0x1 << 3):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 2);
- val_shift = 2;
- break;
- case (0x1 << 4):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 4);
- val_shift = 4;
- break;
- case (0x1 << 5):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 5);
- val_shift = 5;
- break;
- case (0x1 << 6):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 6);
- val_shift = 6;
- break;
- case (0x1 << 7):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x1 << 7);
- val_shift = 7;
- break;
- case (0x1 << 8):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x7 << 8);
- val_shift = 8;
- break;
- case (0x1 << 11):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7a : 0x7d;
- val_mask = (0x7 << 13);
- val_shift = 13;
- break;
-
- case (0x1 << 9):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0xf8 : 0xfa;
- val_mask = (0x7 << 0);
- val_shift = 0;
- break;
-
- case (0x1 << 10):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0xf8 : 0xfa;
- val_mask = (0x7 << 4);
- val_shift = 4;
- break;
-
- case (0x1 << 12):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7b : 0x7e;
- val_mask = (0xffff << 0);
- val_shift = 0;
- break;
- case (0x1 << 13):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0x7c : 0x7f;
- val_mask = (0xffff << 0);
- val_shift = 0;
- break;
- case (0x1 << 14):
- en_addr = (core_num == 0) ? 0xe7 : 0xec;
- val_addr = (core_num == 0) ? 0xf9 : 0xfb;
- val_mask = (0x3 << 6);
- val_shift = 6;
- break;
- case (0x1 << 0):
- en_addr = (core_num == 0) ? 0xe5 : 0xe6;
- val_addr = (core_num == 0) ? 0xf9 : 0xfb;
- val_mask = (0x1 << 15);
- val_shift = 15;
- break;
- default:
- addr = 0xffff;
- break;
- }
-
- if (off) {
- and_phy_reg(pi, en_addr, ~en_mask);
- and_phy_reg(pi, val_addr, ~val_mask);
- } else {
-
- if ((core_mask == 0)
- || (core_mask & (1 << core_num))) {
- or_phy_reg(pi, en_addr, en_mask);
-
- if (addr != 0xffff) {
- mod_phy_reg(pi, val_addr,
- val_mask,
- (value <<
- val_shift));
- }
- }
- }
- }
- } else {
-
- if (off) {
- and_phy_reg(pi, 0xec, ~field);
- value = 0x0;
- } else {
- or_phy_reg(pi, 0xec, field);
- }
-
- for (core_num = 0; core_num < 2; core_num++) {
-
- switch (field) {
- case (0x1 << 1):
- case (0x1 << 9):
- case (0x1 << 12):
- case (0x1 << 13):
- case (0x1 << 14):
- addr = 0x78;
-
- core_mask = 0x1;
- break;
- case (0x1 << 2):
- case (0x1 << 3):
- case (0x1 << 4):
- case (0x1 << 5):
- case (0x1 << 6):
- case (0x1 << 7):
- case (0x1 << 8):
- addr = (core_num == 0) ? 0x7a : 0x7d;
- break;
- case (0x1 << 10):
- addr = (core_num == 0) ? 0x7b : 0x7e;
- break;
- case (0x1 << 11):
- addr = (core_num == 0) ? 0x7c : 0x7f;
- break;
- default:
- addr = 0xffff;
- }
-
- switch (field) {
- case (0x1 << 1):
- mask = (0x7 << 3);
- shift = 3;
- break;
- case (0x1 << 9):
- mask = (0x1 << 2);
- shift = 2;
- break;
- case (0x1 << 12):
- mask = (0x1 << 8);
- shift = 8;
- break;
- case (0x1 << 13):
- mask = (0x1 << 9);
- shift = 9;
- break;
- case (0x1 << 14):
- mask = (0xf << 12);
- shift = 12;
- break;
- case (0x1 << 2):
- mask = (0x1 << 0);
- shift = 0;
- break;
- case (0x1 << 3):
- mask = (0x1 << 1);
- shift = 1;
- break;
- case (0x1 << 4):
- mask = (0x1 << 2);
- shift = 2;
- break;
- case (0x1 << 5):
- mask = (0x3 << 4);
- shift = 4;
- break;
- case (0x1 << 6):
- mask = (0x3 << 6);
- shift = 6;
- break;
- case (0x1 << 7):
- mask = (0x1 << 8);
- shift = 8;
- break;
- case (0x1 << 8):
- mask = (0x1 << 9);
- shift = 9;
- break;
- case (0x1 << 10):
- mask = 0x1fff;
- shift = 0x0;
- break;
- case (0x1 << 11):
- mask = 0x1fff;
- shift = 0x0;
- break;
- default:
- mask = 0x0;
- shift = 0x0;
- break;
- }
-
- if ((addr != 0xffff) && (core_mask & (1 << core_num))) {
- mod_phy_reg(pi, addr, mask, (value << shift));
- }
- }
-
- or_phy_reg(pi, 0xec, (0x1 << 0));
- or_phy_reg(pi, 0x78, (0x1 << 0));
- udelay(1);
- and_phy_reg(pi, 0xec, ~(0x1 << 0));
- }
-}
-
-static void
wlc_phy_rfctrl_override_1tomany_nphy(struct brcms_phy *pi, u16 cmd, u16 value,
u8 core_mask, u8 off)
{
@@ -20201,76 +21619,93 @@ wlc_phy_rfctrl_override_1tomany_nphy(struct brcms_phy *pi, u16 cmd, u16 value,
switch (cmd) {
case NPHY_REV7_RfctrlOverride_cmd_rxrf_pu:
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5),
- value, core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 5),
+ value, core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 4), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 3), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
break;
case NPHY_REV7_RfctrlOverride_cmd_rx_pu:
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2),
- value, core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 0,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 2),
+ value, core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 1), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 0), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 1), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 11), 0,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
break;
case NPHY_REV7_RfctrlOverride_cmd_tx_pu:
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2),
- value, core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), value,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 1,
- core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 2),
+ value, core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 1), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 0), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 2), value,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 11), 1,
+ core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
break;
case NPHY_REV7_RfctrlOverride_cmd_rxgain:
rfmxgain = value & 0x000ff;
lpfgain = value & 0x0ff00;
lpfgain = lpfgain >> 8;
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11),
- rfmxgain, core_mask,
- off,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x3 << 13),
- lpfgain, core_mask,
- off,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 11),
+ rfmxgain, core_mask,
+ off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x3 << 13),
+ lpfgain, core_mask,
+ off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
break;
case NPHY_REV7_RfctrlOverride_cmd_txgain:
tgain = value & 0x7fff;
lpfgain = value & 0x8000;
lpfgain = lpfgain >> 14;
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12),
- tgain, core_mask, off,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 13),
- lpfgain, core_mask,
- off,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 12),
+ tgain, core_mask, off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 13),
+ lpfgain, core_mask,
+ off,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
break;
}
}
@@ -20283,137 +21718,164 @@ wlc_phy_scale_offset_rssi_nphy(struct brcms_phy *pi, u16 scale, s8 offset,
u16 valuetostuff;
offset = (offset > NPHY_RSSICAL_MAXREAD) ?
- NPHY_RSSICAL_MAXREAD : offset;
+ NPHY_RSSICAL_MAXREAD : offset;
offset = (offset < (-NPHY_RSSICAL_MAXREAD - 1)) ?
- -NPHY_RSSICAL_MAXREAD - 1 : offset;
+ -NPHY_RSSICAL_MAXREAD - 1 : offset;
valuetostuff = ((scale & 0x3f) << 8) | (offset & 0x3f);
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_NB)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_NB))
write_phy_reg(pi, 0x1a6, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_NB)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_NB))
write_phy_reg(pi, 0x1ac, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_NB)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_NB))
write_phy_reg(pi, 0x1b2, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_NB)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_NB))
write_phy_reg(pi, 0x1b8, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W1)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W1))
write_phy_reg(pi, 0x1a4, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W1)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W1))
write_phy_reg(pi, 0x1aa, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W1)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W1))
write_phy_reg(pi, 0x1b0, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W1)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W1))
write_phy_reg(pi, 0x1b6, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W2)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W2))
write_phy_reg(pi, 0x1a5, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W2)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W2))
write_phy_reg(pi, 0x1ab, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W2)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_W2))
write_phy_reg(pi, 0x1b1, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W2)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_W2))
write_phy_reg(pi, 0x1b7, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_TBD)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_TBD))
write_phy_reg(pi, 0x1a7, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_TBD)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_TBD))
write_phy_reg(pi, 0x1ad, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_TBD)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_TBD))
write_phy_reg(pi, 0x1b3, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_TBD)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_TBD))
write_phy_reg(pi, 0x1b9, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_IQ)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_IQ))
write_phy_reg(pi, 0x1a8, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_IQ)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_IQ))
write_phy_reg(pi, 0x1ae, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_IQ)) {
+ (rail == NPHY_RAIL_I) && (rssi_type == NPHY_RSSI_SEL_IQ))
write_phy_reg(pi, 0x1b4, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_IQ)) {
+ (rail == NPHY_RAIL_Q) && (rssi_type == NPHY_RSSI_SEL_IQ))
write_phy_reg(pi, 0x1ba, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rssi_type == NPHY_RSSI_SEL_TSSI_2G)) {
+ (rssi_type == NPHY_RSSI_SEL_TSSI_2G))
write_phy_reg(pi, 0x1a9, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rssi_type == NPHY_RSSI_SEL_TSSI_2G)) {
+ (rssi_type == NPHY_RSSI_SEL_TSSI_2G))
write_phy_reg(pi, 0x1b5, valuetostuff);
- }
if (((coresel == RADIO_MIMO_CORESEL_CORE1) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rssi_type == NPHY_RSSI_SEL_TSSI_5G)) {
+ (rssi_type == NPHY_RSSI_SEL_TSSI_5G))
write_phy_reg(pi, 0x1af, valuetostuff);
- }
+
if (((coresel == RADIO_MIMO_CORESEL_CORE2) ||
(coresel == RADIO_MIMO_CORESEL_ALLRX)) &&
- (rssi_type == NPHY_RSSI_SEL_TSSI_5G)) {
+ (rssi_type == NPHY_RSSI_SEL_TSSI_5G))
write_phy_reg(pi, 0x1bb, valuetostuff);
+}
+
+static void brcms_phy_wr_tx_mux(struct brcms_phy *pi, u8 core)
+{
+ if (PHY_IPA(pi)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ write_radio_reg(pi,
+ ((core == PHY_CORE_0) ?
+ RADIO_2057_TX0_TX_SSI_MUX :
+ RADIO_2057_TX1_TX_SSI_MUX),
+ (CHSPEC_IS5G(pi->radio_chanspec) ?
+ 0xc : 0xe));
+ else
+ write_radio_reg(pi,
+ RADIO_2056_TX_TX_SSI_MUX |
+ ((core == PHY_CORE_0) ?
+ RADIO_2056_TX0 : RADIO_2056_TX1),
+ (CHSPEC_IS5G(pi->radio_chanspec) ?
+ 0xc : 0xe));
+ } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ write_radio_reg(pi,
+ ((core == PHY_CORE_0) ?
+ RADIO_2057_TX0_TX_SSI_MUX :
+ RADIO_2057_TX1_TX_SSI_MUX),
+ 0x11);
+
+ if (pi->pubpi.radioid == BCM2057_ID)
+ write_radio_reg(pi,
+ RADIO_2057_IQTEST_SEL_PU, 0x1);
+
+ } else {
+ write_radio_reg(pi,
+ RADIO_2056_TX_TX_SSI_MUX |
+ ((core == PHY_CORE_0) ?
+ RADIO_2056_TX0 : RADIO_2056_TX1),
+ 0x11);
+ }
}
}
@@ -20440,7 +21902,7 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
mod_phy_reg(pi, 0xe6, (0x1 << 5), 0);
mask = (0x1 << 2) |
- (0x1 << 3) | (0x1 << 4) | (0x1 << 5);
+ (0x1 << 3) | (0x1 << 4) | (0x1 << 5);
mod_phy_reg(pi, 0xf9, mask, 0);
mod_phy_reg(pi, 0xfb, mask, 0);
@@ -20459,23 +21921,22 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
if (rssi_type == NPHY_RSSI_SEL_W1 ||
rssi_type == NPHY_RSSI_SEL_W2 ||
rssi_type == NPHY_RSSI_SEL_NB) {
-
mod_phy_reg(pi,
(core ==
PHY_CORE_0) ? 0xa6 : 0xa7,
(0x3 << 8), 0);
mask = (0x1 << 2) |
- (0x1 << 3) |
- (0x1 << 4) | (0x1 << 5);
+ (0x1 << 3) |
+ (0x1 << 4) | (0x1 << 5);
mod_phy_reg(pi,
(core ==
PHY_CORE_0) ? 0xf9 : 0xfb,
mask, 0);
if (rssi_type == NPHY_RSSI_SEL_W1) {
- if (CHSPEC_IS5G
- (pi->radio_chanspec)) {
+ if (CHSPEC_IS5G(
+ pi->radio_chanspec)) {
mask = (0x1 << 2);
val = 1 << 2;
} else {
@@ -20501,7 +21962,6 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
0xe5 : 0xe6, mask, val);
} else {
if (rssi_type == NPHY_RSSI_SEL_TBD) {
-
mask = (0x3 << 8);
val = 1 << 8;
mod_phy_reg(pi,
@@ -20516,7 +21976,6 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
: 0xa7, mask, val);
} else if (rssi_type ==
NPHY_RSSI_SEL_IQ) {
-
mask = (0x3 << 8);
val = 2 << 8;
mod_phy_reg(pi,
@@ -20530,7 +21989,6 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
PHY_CORE_0) ? 0xa6
: 0xa7, mask, val);
} else {
-
mask = (0x3 << 8);
val = 3 << 8;
mod_phy_reg(pi,
@@ -20543,86 +22001,13 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
(core ==
PHY_CORE_0) ? 0xa6
: 0xa7, mask, val);
-
- if (PHY_IPA(pi)) {
- if (NREV_GE
- (pi->pubpi.phy_rev,
- 7)) {
-
- write_radio_reg
- (pi,
- ((core ==
- PHY_CORE_0)
- ?
- RADIO_2057_TX0_TX_SSI_MUX
- :
- RADIO_2057_TX1_TX_SSI_MUX),
- (CHSPEC_IS5G
- (pi->
- radio_chanspec)
- ? 0xc :
- 0xe));
- } else {
- write_radio_reg
- (pi,
- RADIO_2056_TX_TX_SSI_MUX
- |
- ((core ==
- PHY_CORE_0)
- ?
- RADIO_2056_TX0
- :
- RADIO_2056_TX1),
- (CHSPEC_IS5G
- (pi->
- radio_chanspec)
- ? 0xc :
- 0xe));
- }
- } else {
-
- if (NREV_GE
- (pi->pubpi.phy_rev,
- 7)) {
- write_radio_reg
- (pi,
- ((core ==
- PHY_CORE_0)
- ?
- RADIO_2057_TX0_TX_SSI_MUX
- :
- RADIO_2057_TX1_TX_SSI_MUX),
- 0x11);
-
- if (pi->pubpi.
- radioid ==
- BCM2057_ID)
- write_radio_reg
- (pi,
- RADIO_2057_IQTEST_SEL_PU,
- 0x1);
-
- } else {
- write_radio_reg
- (pi,
- RADIO_2056_TX_TX_SSI_MUX
- |
- ((core ==
- PHY_CORE_0)
- ?
- RADIO_2056_TX0
- :
- RADIO_2056_TX1),
- 0x11);
- }
- }
-
+ brcms_phy_wr_tx_mux(pi, core);
afectrlovr_rssi_val = 1 << 9;
mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x8f
- : 0xa5, (0x1 << 9),
- afectrlovr_rssi_val);
+ (core ==
+ PHY_CORE_0) ? 0x8f
+ : 0xa5, (0x1 << 9),
+ afectrlovr_rssi_val);
}
}
}
@@ -20631,19 +22016,15 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
if ((rssi_type == NPHY_RSSI_SEL_W1) ||
(rssi_type == NPHY_RSSI_SEL_W2) ||
- (rssi_type == NPHY_RSSI_SEL_NB)) {
-
+ (rssi_type == NPHY_RSSI_SEL_NB))
val = 0x0;
- } else if (rssi_type == NPHY_RSSI_SEL_TBD) {
-
+ else if (rssi_type == NPHY_RSSI_SEL_TBD)
val = 0x1;
- } else if (rssi_type == NPHY_RSSI_SEL_IQ) {
-
+ else if (rssi_type == NPHY_RSSI_SEL_IQ)
val = 0x2;
- } else {
-
+ else
val = 0x3;
- }
+
mask = ((0x3 << 12) | (0x3 << 14));
val = (val << 12) | (val << 14);
mod_phy_reg(pi, 0xa6, mask, val);
@@ -20652,15 +22033,13 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
if ((rssi_type == NPHY_RSSI_SEL_W1) ||
(rssi_type == NPHY_RSSI_SEL_W2) ||
(rssi_type == NPHY_RSSI_SEL_NB)) {
- if (rssi_type == NPHY_RSSI_SEL_W1) {
+ if (rssi_type == NPHY_RSSI_SEL_W1)
val = 0x1;
- }
- if (rssi_type == NPHY_RSSI_SEL_W2) {
+ if (rssi_type == NPHY_RSSI_SEL_W2)
val = 0x2;
- }
- if (rssi_type == NPHY_RSSI_SEL_NB) {
+ if (rssi_type == NPHY_RSSI_SEL_NB)
val = 0x3;
- }
+
mask = (0x3 << 4);
val = (val << 4);
mod_phy_reg(pi, 0x7a, mask, val);
@@ -20698,16 +22077,16 @@ void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
(rssi_type == NPHY_RSSI_SEL_NB)) {
rfctrlcmd_mask = ((0x1 << 8) | (0x7 << 3));
rfctrlcmd_val = (rfctrlcmd_rxen_val << 8) |
- (rfctrlcmd_coresel_val << 3);
+ (rfctrlcmd_coresel_val << 3);
rfctrlovr_mask = ((0x1 << 5) |
(0x1 << 12) |
(0x1 << 1) | (0x1 << 0));
rfctrlovr_val = (rfctrlovr_rssi_val <<
5) |
- (rfctrlovr_rxen_val << 12) |
- (rfctrlovr_coresel_val << 1) |
- (rfctrlovr_trigger_val << 0);
+ (rfctrlovr_rxen_val << 12) |
+ (rfctrlovr_coresel_val << 1) |
+ (rfctrlovr_trigger_val << 0);
mod_phy_reg(pi, 0x78, rfctrlcmd_mask, rfctrlcmd_val);
mod_phy_reg(pi, 0xec, rfctrlovr_mask, rfctrlovr_val);
@@ -20762,13 +22141,11 @@ wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type, s32 *rssi_buf,
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_ALLRX, rssi_type);
gpiosel_orig = read_phy_reg(pi, 0xca);
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
+ if (NREV_LT(pi->pubpi.phy_rev, 2))
write_phy_reg(pi, 0xca, 5);
- }
- for (ctr = 0; ctr < 4; ctr++) {
+ for (ctr = 0; ctr < 4; ctr++)
rssi_buf[ctr] = 0;
- }
for (samp = 0; samp < nsamps; samp++) {
if (NREV_LT(pi->pubpi.phy_rev, 2)) {
@@ -20785,9 +22162,8 @@ wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type, s32 *rssi_buf,
tmp_buf[ctr++] = ((s8) ((rssi1 & 0x3f) << 2)) >> 2;
tmp_buf[ctr++] = ((s8) (((rssi1 >> 8) & 0x3f) << 2)) >> 2;
- for (ctr = 0; ctr < 4; ctr++) {
+ for (ctr = 0; ctr < 4; ctr++)
rssi_buf[ctr] += tmp_buf[ctr];
- }
}
@@ -20796,9 +22172,8 @@ wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type, s32 *rssi_buf,
rssi_out_val |= (rssi_buf[1] & 0xff) << 16;
rssi_out_val |= (rssi_buf[0] & 0xff) << 24;
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
+ if (NREV_LT(pi->pubpi.phy_rev, 2))
write_phy_reg(pi, 0xca, gpiosel_orig);
- }
write_phy_reg(pi, 0xa6, afectrlCore1_save);
write_phy_reg(pi, 0xa7, afectrlCore2_save);
@@ -20848,7 +22223,7 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
u16 tempsense_Rcal;
syn_tempprocsense_save =
- read_radio_reg(pi, RADIO_2057_TEMPSENSE_CONFIG);
+ read_radio_reg(pi, RADIO_2057_TEMPSENSE_CONFIG);
afectrlCore1_save = read_phy_reg(pi, 0xa6);
afectrlCore2_save = read_phy_reg(pi, 0xa7);
@@ -20982,7 +22357,7 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
} else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
syn_tempprocsense_save =
- read_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE);
+ read_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE);
afectrlCore1_save = read_phy_reg(pi, 0xa6);
afectrlCore2_save = read_phy_reg(pi, 0xa7);
@@ -20997,14 +22372,13 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
write_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE, 0x05);
wlc_phy_poll_rssi_nphy(pi, NPHY_RSSI_SEL_IQ, radio_temp2, 1);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
write_radio_reg(pi, RADIO_2057_TEMPSENSE_CONFIG, 0x01);
- } else {
+ else
write_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE, 0x01);
- }
radio_temp[0] =
- (126 * (radio_temp[1] + radio_temp2[1]) + 3987) / 64;
+ (126 * (radio_temp[1] + radio_temp2[1]) + 3987) / 64;
write_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE,
syn_tempprocsense_save);
@@ -21019,17 +22393,17 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
} else {
pwrdet_rxtx_core1_save =
- read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE1);
+ read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE1);
pwrdet_rxtx_core2_save =
- read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE2);
+ read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE2);
core1_txrf_iqcal1_save =
- read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL1);
+ read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL1);
core1_txrf_iqcal2_save =
- read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL2);
+ read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL2);
core2_txrf_iqcal1_save =
- read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL1);
+ read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL1);
core2_txrf_iqcal2_save =
- read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL2);
+ read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL2);
pd_pll_ts_save = read_radio_reg(pi, RADIO_2055_PD_PLL_TS);
afectrlCore1_save = read_phy_reg(pi, 0xa6);
@@ -21060,11 +22434,12 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
radio_temp[3] = (radio_temp[3] + radio_temp2[3]);
radio_temp[0] =
- (radio_temp[0] + radio_temp[1] + radio_temp[2] +
- radio_temp[3]);
+ (radio_temp[0] + radio_temp[1] + radio_temp[2] +
+ radio_temp[3]);
radio_temp[0] =
- (radio_temp[0] + (8 * 32)) * (950 - 350) / 63 + (350 * 8);
+ (radio_temp[0] +
+ (8 * 32)) * (950 - 350) / 63 + (350 * 8);
radio_temp[0] = (radio_temp[0] - (8 * 420)) / 38;
@@ -21126,521 +22501,24 @@ wlc_phy_set_rssi_2055_vcm(struct brcms_phy *pi, u8 rssi_type, u8 *vcm_buf)
RADIO_2055_NBRSSI_VCM_Q_SHIFT);
}
} else {
-
- if (core == PHY_CORE_0) {
+ if (core == PHY_CORE_0)
mod_radio_reg(pi,
RADIO_2055_CORE1_RXBB_RSSI_CTRL5,
RADIO_2055_WBRSSI_VCM_IQ_MASK,
vcm_buf[2 *
core] <<
RADIO_2055_WBRSSI_VCM_IQ_SHIFT);
- } else {
+ else
mod_radio_reg(pi,
RADIO_2055_CORE2_RXBB_RSSI_CTRL5,
RADIO_2055_WBRSSI_VCM_IQ_MASK,
vcm_buf[2 *
core] <<
RADIO_2055_WBRSSI_VCM_IQ_SHIFT);
- }
}
}
}
-void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi)
-{
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
- wlc_phy_rssi_cal_nphy_rev3(pi);
- } else {
- wlc_phy_rssi_cal_nphy_rev2(pi, NPHY_RSSI_SEL_NB);
- wlc_phy_rssi_cal_nphy_rev2(pi, NPHY_RSSI_SEL_W1);
- wlc_phy_rssi_cal_nphy_rev2(pi, NPHY_RSSI_SEL_W2);
- }
-}
-
-static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
-{
- s32 target_code;
- u16 classif_state;
- u16 clip_state[2];
- u16 rssi_ctrl_state[2], pd_state[2];
- u16 rfctrlintc_state[2], rfpdcorerxtx_state[2];
- u16 rfctrlintc_override_val;
- u16 clip_off[] = { 0xffff, 0xffff };
- u16 rf_pd_val, pd_mask, rssi_ctrl_mask;
- u8 vcm, min_vcm, vcm_tmp[4];
- u8 vcm_final[4] = { 0, 0, 0, 0 };
- u8 result_idx, ctr;
- s32 poll_results[4][4] = {
- {0, 0, 0, 0},
- {0, 0, 0, 0},
- {0, 0, 0, 0},
- {0, 0, 0, 0}
- };
- s32 poll_miniq[4][2] = {
- {0, 0},
- {0, 0},
- {0, 0},
- {0, 0}
- };
- s32 min_d, curr_d;
- s32 fine_digital_offset[4];
- s32 poll_results_min[4] = { 0, 0, 0, 0 };
- s32 min_poll;
-
- switch (rssi_type) {
- case NPHY_RSSI_SEL_NB:
- target_code = NPHY_RSSICAL_NB_TARGET;
- break;
- case NPHY_RSSI_SEL_W1:
- target_code = NPHY_RSSICAL_W1_TARGET;
- break;
- case NPHY_RSSI_SEL_W2:
- target_code = NPHY_RSSICAL_W2_TARGET;
- break;
- default:
- return;
- break;
- }
-
- classif_state = wlc_phy_classifier_nphy(pi, 0, 0);
- wlc_phy_classifier_nphy(pi, (0x7 << 0), 4);
- wlc_phy_clip_det_nphy(pi, 0, clip_state);
- wlc_phy_clip_det_nphy(pi, 1, clip_off);
-
- rf_pd_val = (rssi_type == NPHY_RSSI_SEL_NB) ? 0x6 : 0x4;
- rfctrlintc_override_val =
- CHSPEC_IS5G(pi->radio_chanspec) ? 0x140 : 0x110;
-
- rfctrlintc_state[0] = read_phy_reg(pi, 0x91);
- rfpdcorerxtx_state[0] = read_radio_reg(pi, RADIO_2055_PD_CORE1_RXTX);
- write_phy_reg(pi, 0x91, rfctrlintc_override_val);
- write_radio_reg(pi, RADIO_2055_PD_CORE1_RXTX, rf_pd_val);
-
- rfctrlintc_state[1] = read_phy_reg(pi, 0x92);
- rfpdcorerxtx_state[1] = read_radio_reg(pi, RADIO_2055_PD_CORE2_RXTX);
- write_phy_reg(pi, 0x92, rfctrlintc_override_val);
- write_radio_reg(pi, RADIO_2055_PD_CORE2_RXTX, rf_pd_val);
-
- pd_mask = RADIO_2055_NBRSSI_PD | RADIO_2055_WBRSSI_G1_PD |
- RADIO_2055_WBRSSI_G2_PD;
- pd_state[0] =
- read_radio_reg(pi, RADIO_2055_PD_CORE1_RSSI_MISC) & pd_mask;
- pd_state[1] =
- read_radio_reg(pi, RADIO_2055_PD_CORE2_RSSI_MISC) & pd_mask;
- mod_radio_reg(pi, RADIO_2055_PD_CORE1_RSSI_MISC, pd_mask, 0);
- mod_radio_reg(pi, RADIO_2055_PD_CORE2_RSSI_MISC, pd_mask, 0);
- rssi_ctrl_mask = RADIO_2055_NBRSSI_SEL | RADIO_2055_WBRSSI_G1_SEL |
- RADIO_2055_WBRSSI_G2_SEL;
- rssi_ctrl_state[0] =
- read_radio_reg(pi, RADIO_2055_SP_RSSI_CORE1) & rssi_ctrl_mask;
- rssi_ctrl_state[1] =
- read_radio_reg(pi, RADIO_2055_SP_RSSI_CORE2) & rssi_ctrl_mask;
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_ALLRX, rssi_type);
-
- wlc_phy_scale_offset_rssi_nphy(pi, 0x0, 0x0, RADIO_MIMO_CORESEL_ALLRX,
- NPHY_RAIL_I, rssi_type);
- wlc_phy_scale_offset_rssi_nphy(pi, 0x0, 0x0, RADIO_MIMO_CORESEL_ALLRX,
- NPHY_RAIL_Q, rssi_type);
-
- for (vcm = 0; vcm < 4; vcm++) {
-
- vcm_tmp[0] = vcm_tmp[1] = vcm_tmp[2] = vcm_tmp[3] = vcm;
- if (rssi_type != NPHY_RSSI_SEL_W2) {
- wlc_phy_set_rssi_2055_vcm(pi, rssi_type, vcm_tmp);
- }
-
- wlc_phy_poll_rssi_nphy(pi, rssi_type, &poll_results[vcm][0],
- NPHY_RSSICAL_NPOLL);
-
- if ((rssi_type == NPHY_RSSI_SEL_W1)
- || (rssi_type == NPHY_RSSI_SEL_W2)) {
- for (ctr = 0; ctr < 2; ctr++) {
- poll_miniq[vcm][ctr] =
- min(poll_results[vcm][ctr * 2 + 0],
- poll_results[vcm][ctr * 2 + 1]);
- }
- }
- }
-
- for (result_idx = 0; result_idx < 4; result_idx++) {
- min_d = NPHY_RSSICAL_MAXD;
- min_vcm = 0;
- min_poll = NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL + 1;
- for (vcm = 0; vcm < 4; vcm++) {
- curr_d = ABS(((rssi_type == NPHY_RSSI_SEL_NB) ?
- poll_results[vcm][result_idx] :
- poll_miniq[vcm][result_idx / 2]) -
- (target_code * NPHY_RSSICAL_NPOLL));
- if (curr_d < min_d) {
- min_d = curr_d;
- min_vcm = vcm;
- }
- if (poll_results[vcm][result_idx] < min_poll) {
- min_poll = poll_results[vcm][result_idx];
- }
- }
- vcm_final[result_idx] = min_vcm;
- poll_results_min[result_idx] = min_poll;
- }
-
- if (rssi_type != NPHY_RSSI_SEL_W2) {
- wlc_phy_set_rssi_2055_vcm(pi, rssi_type, vcm_final);
- }
-
- for (result_idx = 0; result_idx < 4; result_idx++) {
- fine_digital_offset[result_idx] =
- (target_code * NPHY_RSSICAL_NPOLL) -
- poll_results[vcm_final[result_idx]][result_idx];
- if (fine_digital_offset[result_idx] < 0) {
- fine_digital_offset[result_idx] =
- ABS(fine_digital_offset[result_idx]);
- fine_digital_offset[result_idx] +=
- (NPHY_RSSICAL_NPOLL / 2);
- fine_digital_offset[result_idx] /= NPHY_RSSICAL_NPOLL;
- fine_digital_offset[result_idx] =
- -fine_digital_offset[result_idx];
- } else {
- fine_digital_offset[result_idx] +=
- (NPHY_RSSICAL_NPOLL / 2);
- fine_digital_offset[result_idx] /= NPHY_RSSICAL_NPOLL;
- }
-
- if (poll_results_min[result_idx] ==
- NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL) {
- fine_digital_offset[result_idx] =
- (target_code - NPHY_RSSICAL_MAXREAD - 1);
- }
-
- wlc_phy_scale_offset_rssi_nphy(pi, 0x0,
- (s8)
- fine_digital_offset[result_idx],
- (result_idx / 2 ==
- 0) ? RADIO_MIMO_CORESEL_CORE1 :
- RADIO_MIMO_CORESEL_CORE2,
- (result_idx % 2 ==
- 0) ? NPHY_RAIL_I : NPHY_RAIL_Q,
- rssi_type);
- }
-
- mod_radio_reg(pi, RADIO_2055_PD_CORE1_RSSI_MISC, pd_mask, pd_state[0]);
- mod_radio_reg(pi, RADIO_2055_PD_CORE2_RSSI_MISC, pd_mask, pd_state[1]);
- if (rssi_ctrl_state[0] == RADIO_2055_NBRSSI_SEL) {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
- NPHY_RSSI_SEL_NB);
- } else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL) {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
- NPHY_RSSI_SEL_W1);
- } else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL) {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
- NPHY_RSSI_SEL_W2);
- } else {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
- NPHY_RSSI_SEL_W2);
- }
- if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL) {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
- NPHY_RSSI_SEL_NB);
- } else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL) {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
- NPHY_RSSI_SEL_W1);
- } else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL) {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
- NPHY_RSSI_SEL_W2);
- } else {
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
- NPHY_RSSI_SEL_W2);
- }
-
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type);
-
- write_phy_reg(pi, 0x91, rfctrlintc_state[0]);
- write_radio_reg(pi, RADIO_2055_PD_CORE1_RXTX, rfpdcorerxtx_state[0]);
- write_phy_reg(pi, 0x92, rfctrlintc_state[1]);
- write_radio_reg(pi, RADIO_2055_PD_CORE2_RXTX, rfpdcorerxtx_state[1]);
-
- wlc_phy_classifier_nphy(pi, (0x7 << 0), classif_state);
- wlc_phy_clip_det_nphy(pi, 1, clip_state);
-
- wlc_phy_resetcca_nphy(pi);
-}
-
-int
-wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct brcms_d11rxhdr *wlc_rxh)
-{
- struct d11rxhdr *rxh = &wlc_rxh->rxhdr;
- s16 rxpwr, rxpwr0, rxpwr1;
- s16 phyRx0_l, phyRx2_l;
-
- rxpwr = 0;
- rxpwr0 = le16_to_cpu(rxh->PhyRxStatus_1) & PRXS1_nphy_PWR0_MASK;
- rxpwr1 = (le16_to_cpu(rxh->PhyRxStatus_1) & PRXS1_nphy_PWR1_MASK) >> 8;
-
- if (rxpwr0 > 127)
- rxpwr0 -= 256;
- if (rxpwr1 > 127)
- rxpwr1 -= 256;
-
- phyRx0_l = le16_to_cpu(rxh->PhyRxStatus_0) & 0x00ff;
- phyRx2_l = le16_to_cpu(rxh->PhyRxStatus_2) & 0x00ff;
- if (phyRx2_l > 127)
- phyRx2_l -= 256;
-
- if (((rxpwr0 == 16) || (rxpwr0 == 32))) {
- rxpwr0 = rxpwr1;
- rxpwr1 = phyRx2_l;
- }
-
- wlc_rxh->rxpwr[0] = (s8) rxpwr0;
- wlc_rxh->rxpwr[1] = (s8) rxpwr1;
- wlc_rxh->do_rssi_ma = 0;
-
- if (pi->sh->rssi_mode == RSSI_ANT_MERGE_MAX)
- rxpwr = (rxpwr0 > rxpwr1) ? rxpwr0 : rxpwr1;
- else if (pi->sh->rssi_mode == RSSI_ANT_MERGE_MIN)
- rxpwr = (rxpwr0 < rxpwr1) ? rxpwr0 : rxpwr1;
- else if (pi->sh->rssi_mode == RSSI_ANT_MERGE_AVG)
- rxpwr = (rxpwr0 + rxpwr1) >> 1;
-
- return rxpwr;
-}
-
-static void
-wlc_phy_rfctrlintc_override_nphy(struct brcms_phy *pi, u8 field, u16 value,
- u8 core_code)
-{
- u16 mask;
- u16 val;
- u8 core;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- if (core_code == RADIO_MIMO_CORESEL_CORE1
- && core == PHY_CORE_1)
- continue;
- else if (core_code == RADIO_MIMO_CORESEL_CORE2
- && core == PHY_CORE_0)
- continue;
-
- if (NREV_LT(pi->pubpi.phy_rev, 7)) {
-
- mask = (0x1 << 10);
- val = 1 << 10;
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x91 :
- 0x92, mask, val);
- }
-
- if (field == NPHY_RfctrlIntc_override_OFF) {
-
- write_phy_reg(pi, (core == PHY_CORE_0) ? 0x91 :
- 0x92, 0);
-
- wlc_phy_force_rfseq_nphy(pi,
- NPHY_RFSEQ_RESET2RX);
- } else if (field == NPHY_RfctrlIntc_override_TRSW) {
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- mask = (0x1 << 6) | (0x1 << 7);
-
- val = value << 6;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
-
- or_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- (0x1 << 10));
-
- and_phy_reg(pi, 0x2ff, (u16)
- ~(0x3 << 14));
- or_phy_reg(pi, 0x2ff, (0x1 << 13));
- or_phy_reg(pi, 0x2ff, (0x1 << 0));
- } else {
-
- mask = (0x1 << 6) |
- (0x1 << 7) |
- (0x1 << 8) | (0x1 << 9);
- val = value << 6;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
-
- mask = (0x1 << 0);
- val = 1 << 0;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0xe7 : 0xec,
- mask, val);
-
- mask = (core == PHY_CORE_0) ? (0x1 << 0)
- : (0x1 << 1);
- val = 1 << ((core == PHY_CORE_0) ?
- 0 : 1);
- mod_phy_reg(pi, 0x78, mask, val);
-
- SPINWAIT(((read_phy_reg(pi, 0x78) & val)
- != 0), 10000);
- if (WARN(read_phy_reg(pi, 0x78) & val,
- "HW error: override failed"))
- return;
-
- mask = (0x1 << 0);
- val = 0 << 0;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0xe7 : 0xec,
- mask, val);
- }
- } else if (field == NPHY_RfctrlIntc_override_PA) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- mask = (0x1 << 4) | (0x1 << 5);
-
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- val = value << 5;
- } else {
- val = value << 4;
- }
-
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
-
- or_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- (0x1 << 12));
- } else {
-
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- mask = (0x1 << 5);
- val = value << 5;
- } else {
- mask = (0x1 << 4);
- val = value << 4;
- }
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
- }
- } else if (field == NPHY_RfctrlIntc_override_EXT_LNA_PU) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
-
- mask = (0x1 << 0);
- val = value << 0;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, val);
-
- mask = (0x1 << 2);
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, 0);
- } else {
-
- mask = (0x1 << 2);
- val = value << 2;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, val);
-
- mask = (0x1 << 0);
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, 0);
- }
-
- mask = (0x1 << 11);
- val = 1 << 11;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
- } else {
-
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- mask = (0x1 << 0);
- val = value << 0;
- } else {
- mask = (0x1 << 2);
- val = value << 2;
- }
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
- }
- } else if (field ==
- NPHY_RfctrlIntc_override_EXT_LNA_GAIN) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
-
- mask = (0x1 << 1);
- val = value << 1;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, val);
-
- mask = (0x1 << 3);
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, 0);
- } else {
-
- mask = (0x1 << 3);
- val = value << 3;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, val);
-
- mask = (0x1 << 1);
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91
- : 0x92, mask, 0);
- }
-
- mask = (0x1 << 11);
- val = 1 << 11;
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
- } else {
-
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- mask = (0x1 << 1);
- val = value << 1;
- } else {
- mask = (0x1 << 3);
- val = value << 3;
- }
- mod_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0x91 : 0x92,
- mask, val);
- }
- }
- }
- } else {
- return;
- }
-}
-
static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
{
u16 classif_state;
@@ -21683,10 +22561,14 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
u16 NPHY_REV7_RfctrlMiscReg3_save, NPHY_REV7_RfctrlMiscReg4_save;
u16 NPHY_REV7_RfctrlMiscReg5_save, NPHY_REV7_RfctrlMiscReg6_save;
- NPHY_REV7_RfctrlOverride3_save = NPHY_REV7_RfctrlOverride4_save =
- NPHY_REV7_RfctrlOverride5_save = NPHY_REV7_RfctrlOverride6_save =
- NPHY_REV7_RfctrlMiscReg3_save = NPHY_REV7_RfctrlMiscReg4_save =
- NPHY_REV7_RfctrlMiscReg5_save = NPHY_REV7_RfctrlMiscReg6_save = 0;
+ NPHY_REV7_RfctrlOverride3_save =
+ NPHY_REV7_RfctrlOverride4_save =
+ NPHY_REV7_RfctrlOverride5_save =
+ NPHY_REV7_RfctrlOverride6_save =
+ NPHY_REV7_RfctrlMiscReg3_save =
+ NPHY_REV7_RfctrlMiscReg4_save =
+ NPHY_REV7_RfctrlMiscReg5_save =
+ NPHY_REV7_RfctrlMiscReg6_save = 0;
classif_state = wlc_phy_classifier_nphy(pi, 0, 0);
wlc_phy_classifier_nphy(pi, (0x7 << 0), 4);
@@ -21726,21 +22608,21 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
wlc_phy_rfctrlintc_override_nphy(pi, NPHY_RfctrlIntc_override_TRSW, 1,
RADIO_MIMO_CORESEL_ALLRXTX);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_rxrf_pu,
- 0, 0, 0);
- } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_rxrf_pu,
+ 0, 0, 0);
+ else
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 0), 0, 0, 0);
- }
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_rx_pu,
- 1, 0, 0);
- } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_rx_pu,
+ 1, 0, 0);
+ else
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 1), 1, 0, 0);
- }
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 7),
@@ -21755,12 +22637,14 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
if (CHSPEC_IS5G(pi->radio_chanspec)) {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5),
- 0, 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), 1, 0,
- 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 5),
+ 0, 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 4), 1, 0,
+ 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
} else {
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 5), 0, 0, 0);
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 4), 1, 0, 0);
@@ -21768,12 +22652,14 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
} else {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4),
- 0, 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5), 1, 0,
- 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 4),
+ 0, 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 5), 1, 0,
+ 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
} else {
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 4), 0, 0, 0);
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 5), 1, 0, 0);
@@ -21781,7 +22667,7 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
}
rxcore_state = wlc_phy_rxcore_getstate_nphy(
- (struct brcms_phy_pub *) pi);
+ (struct brcms_phy_pub *) pi);
vcm_level_max = 8;
@@ -21804,21 +22690,18 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
NPHY_RAIL_Q, NPHY_RSSI_SEL_NB);
for (vcm = 0; vcm < vcm_level_max; vcm++) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
mod_radio_reg(pi, (core == PHY_CORE_0) ?
RADIO_2057_NB_MASTER_CORE0 :
RADIO_2057_NB_MASTER_CORE1,
RADIO_2057_VCM_MASK, vcm);
- } else {
-
+ else
mod_radio_reg(pi, RADIO_2056_RX_RSSI_MISC |
((core ==
PHY_CORE_0) ? RADIO_2056_RX0 :
RADIO_2056_RX1),
RADIO_2056_VCM_MASK,
vcm << RADIO_2056_RSSI_VCM_SHIFT);
- }
wlc_phy_poll_rssi_nphy(pi, NPHY_RSSI_SEL_NB,
&poll_results[vcm][0],
@@ -21826,92 +22709,90 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
}
for (result_idx = 0; result_idx < 4; result_idx++) {
- if ((core == result_idx / 2) && (result_idx % 2 == 0)) {
+ if ((core == result_idx / 2) &&
+ (result_idx % 2 == 0)) {
min_d = NPHY_RSSICAL_MAXD;
min_vcm = 0;
min_poll =
- NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL +
- 1;
+ NPHY_RSSICAL_MAXREAD *
+ NPHY_RSSICAL_NPOLL + 1;
for (vcm = 0; vcm < vcm_level_max; vcm++) {
- curr_d = poll_results[vcm][result_idx] *
- poll_results[vcm][result_idx] +
- poll_results[vcm][result_idx + 1] *
- poll_results[vcm][result_idx + 1];
+ curr_d =
+ poll_results[vcm][result_idx] *
+ poll_results[vcm][result_idx] +
+ poll_results[vcm][result_idx +
+ 1] *
+ poll_results[vcm][result_idx +
+ 1];
if (curr_d < min_d) {
min_d = curr_d;
min_vcm = vcm;
}
if (poll_results[vcm][result_idx] <
- min_poll) {
+ min_poll)
min_poll =
- poll_results[vcm]
- [result_idx];
- }
+ poll_results[vcm]
+ [result_idx];
}
vcm_final = min_vcm;
poll_results_min[result_idx] = min_poll;
}
}
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
mod_radio_reg(pi, (core == PHY_CORE_0) ?
RADIO_2057_NB_MASTER_CORE0 :
RADIO_2057_NB_MASTER_CORE1,
RADIO_2057_VCM_MASK, vcm_final);
- } else {
+ else
mod_radio_reg(pi, RADIO_2056_RX_RSSI_MISC |
((core ==
PHY_CORE_0) ? RADIO_2056_RX0 :
RADIO_2056_RX1), RADIO_2056_VCM_MASK,
vcm_final << RADIO_2056_RSSI_VCM_SHIFT);
- }
for (result_idx = 0; result_idx < 4; result_idx++) {
if (core == result_idx / 2) {
fine_digital_offset[result_idx] =
- (NPHY_RSSICAL_NB_TARGET *
- NPHY_RSSICAL_NPOLL) -
- poll_results[vcm_final][result_idx];
+ (NPHY_RSSICAL_NB_TARGET *
+ NPHY_RSSICAL_NPOLL) -
+ poll_results[vcm_final][result_idx];
if (fine_digital_offset[result_idx] < 0) {
fine_digital_offset[result_idx] =
- ABS(fine_digital_offset
- [result_idx]);
+ abs(fine_digital_offset
+ [result_idx]);
fine_digital_offset[result_idx] +=
- (NPHY_RSSICAL_NPOLL / 2);
+ (NPHY_RSSICAL_NPOLL / 2);
fine_digital_offset[result_idx] /=
- NPHY_RSSICAL_NPOLL;
+ NPHY_RSSICAL_NPOLL;
fine_digital_offset[result_idx] =
- -fine_digital_offset[result_idx];
+ -fine_digital_offset[
+ result_idx];
} else {
fine_digital_offset[result_idx] +=
- (NPHY_RSSICAL_NPOLL / 2);
+ (NPHY_RSSICAL_NPOLL / 2);
fine_digital_offset[result_idx] /=
- NPHY_RSSICAL_NPOLL;
+ NPHY_RSSICAL_NPOLL;
}
if (poll_results_min[result_idx] ==
- NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL) {
+ NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL)
fine_digital_offset[result_idx] =
- (NPHY_RSSICAL_NB_TARGET -
- NPHY_RSSICAL_MAXREAD - 1);
- }
-
- wlc_phy_scale_offset_rssi_nphy(pi, 0x0,
- (s8)
- fine_digital_offset
- [result_idx],
- (result_idx /
- 2 ==
- 0) ?
- RADIO_MIMO_CORESEL_CORE1
- :
- RADIO_MIMO_CORESEL_CORE2,
- (result_idx %
- 2 ==
- 0) ? NPHY_RAIL_I
- : NPHY_RAIL_Q,
- NPHY_RSSI_SEL_NB);
+ (NPHY_RSSICAL_NB_TARGET -
+ NPHY_RSSICAL_MAXREAD - 1);
+
+ wlc_phy_scale_offset_rssi_nphy(
+ pi, 0x0,
+ (s8)
+ fine_digital_offset
+ [result_idx],
+ (result_idx / 2 == 0) ?
+ RADIO_MIMO_CORESEL_CORE1 :
+ RADIO_MIMO_CORESEL_CORE2,
+ (result_idx % 2 == 0) ?
+ NPHY_RAIL_I : NPHY_RAIL_Q,
+ NPHY_RSSI_SEL_NB);
}
}
@@ -21952,46 +22833,44 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
for (result_idx = 0; result_idx < 4; result_idx++) {
if (core == result_idx / 2) {
fine_digital_offset[result_idx] =
- (target_code * NPHY_RSSICAL_NPOLL) -
- poll_result_core[result_idx];
- if (fine_digital_offset[result_idx] < 0) {
+ (target_code *
+ NPHY_RSSICAL_NPOLL) -
+ poll_result_core[result_idx];
+ if (fine_digital_offset[result_idx] <
+ 0) {
fine_digital_offset[result_idx]
- =
- ABS(fine_digital_offset
- [result_idx]);
+ = abs(
+ fine_digital_offset
+ [result_idx]);
fine_digital_offset[result_idx]
- += (NPHY_RSSICAL_NPOLL / 2);
+ += (NPHY_RSSICAL_NPOLL
+ / 2);
fine_digital_offset[result_idx]
- /= NPHY_RSSICAL_NPOLL;
+ /= NPHY_RSSICAL_NPOLL;
fine_digital_offset[result_idx]
- =
- -fine_digital_offset
- [result_idx];
+ = -fine_digital_offset
+ [result_idx];
} else {
fine_digital_offset[result_idx]
- += (NPHY_RSSICAL_NPOLL / 2);
+ += (NPHY_RSSICAL_NPOLL
+ / 2);
fine_digital_offset[result_idx]
- /= NPHY_RSSICAL_NPOLL;
+ /= NPHY_RSSICAL_NPOLL;
}
- wlc_phy_scale_offset_rssi_nphy(pi, 0x0,
- (s8)
- fine_digital_offset
- [core *
- 2],
- (core ==
- PHY_CORE_0)
- ?
- RADIO_MIMO_CORESEL_CORE1
- :
- RADIO_MIMO_CORESEL_CORE2,
- (result_idx
- % 2 ==
- 0) ?
- NPHY_RAIL_I
- :
- NPHY_RAIL_Q,
- rssi_type);
+ wlc_phy_scale_offset_rssi_nphy(
+ pi, 0x0,
+ (s8)
+ fine_digital_offset
+ [core *
+ 2],
+ (core == PHY_CORE_0) ?
+ RADIO_MIMO_CORESEL_CORE1 :
+ RADIO_MIMO_CORESEL_CORE2,
+ (result_idx % 2 == 0) ?
+ NPHY_RAIL_I :
+ NPHY_RAIL_Q,
+ rssi_type);
}
}
@@ -22040,87 +22919,87 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
pi->rssical_cache.rssical_radio_regs_2G[0] =
- read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0);
+ read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0);
pi->rssical_cache.rssical_radio_regs_2G[1] =
- read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1);
+ read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1);
} else {
pi->rssical_cache.rssical_radio_regs_2G[0] =
- read_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC |
- RADIO_2056_RX0);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC |
+ RADIO_2056_RX0);
pi->rssical_cache.rssical_radio_regs_2G[1] =
- read_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC |
- RADIO_2056_RX1);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC |
+ RADIO_2056_RX1);
}
pi->rssical_cache.rssical_phyregs_2G[0] =
- read_phy_reg(pi, 0x1a6);
+ read_phy_reg(pi, 0x1a6);
pi->rssical_cache.rssical_phyregs_2G[1] =
- read_phy_reg(pi, 0x1ac);
+ read_phy_reg(pi, 0x1ac);
pi->rssical_cache.rssical_phyregs_2G[2] =
- read_phy_reg(pi, 0x1b2);
+ read_phy_reg(pi, 0x1b2);
pi->rssical_cache.rssical_phyregs_2G[3] =
- read_phy_reg(pi, 0x1b8);
+ read_phy_reg(pi, 0x1b8);
pi->rssical_cache.rssical_phyregs_2G[4] =
- read_phy_reg(pi, 0x1a4);
+ read_phy_reg(pi, 0x1a4);
pi->rssical_cache.rssical_phyregs_2G[5] =
- read_phy_reg(pi, 0x1aa);
+ read_phy_reg(pi, 0x1aa);
pi->rssical_cache.rssical_phyregs_2G[6] =
- read_phy_reg(pi, 0x1b0);
+ read_phy_reg(pi, 0x1b0);
pi->rssical_cache.rssical_phyregs_2G[7] =
- read_phy_reg(pi, 0x1b6);
+ read_phy_reg(pi, 0x1b6);
pi->rssical_cache.rssical_phyregs_2G[8] =
- read_phy_reg(pi, 0x1a5);
+ read_phy_reg(pi, 0x1a5);
pi->rssical_cache.rssical_phyregs_2G[9] =
- read_phy_reg(pi, 0x1ab);
+ read_phy_reg(pi, 0x1ab);
pi->rssical_cache.rssical_phyregs_2G[10] =
- read_phy_reg(pi, 0x1b1);
+ read_phy_reg(pi, 0x1b1);
pi->rssical_cache.rssical_phyregs_2G[11] =
- read_phy_reg(pi, 0x1b7);
+ read_phy_reg(pi, 0x1b7);
pi->nphy_rssical_chanspec_2G = pi->radio_chanspec;
} else {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
pi->rssical_cache.rssical_radio_regs_5G[0] =
- read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0);
+ read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0);
pi->rssical_cache.rssical_radio_regs_5G[1] =
- read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1);
+ read_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1);
} else {
pi->rssical_cache.rssical_radio_regs_5G[0] =
- read_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC |
- RADIO_2056_RX0);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC |
+ RADIO_2056_RX0);
pi->rssical_cache.rssical_radio_regs_5G[1] =
- read_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC |
- RADIO_2056_RX1);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RSSI_MISC |
+ RADIO_2056_RX1);
}
pi->rssical_cache.rssical_phyregs_5G[0] =
- read_phy_reg(pi, 0x1a6);
+ read_phy_reg(pi, 0x1a6);
pi->rssical_cache.rssical_phyregs_5G[1] =
- read_phy_reg(pi, 0x1ac);
+ read_phy_reg(pi, 0x1ac);
pi->rssical_cache.rssical_phyregs_5G[2] =
- read_phy_reg(pi, 0x1b2);
+ read_phy_reg(pi, 0x1b2);
pi->rssical_cache.rssical_phyregs_5G[3] =
- read_phy_reg(pi, 0x1b8);
+ read_phy_reg(pi, 0x1b8);
pi->rssical_cache.rssical_phyregs_5G[4] =
- read_phy_reg(pi, 0x1a4);
+ read_phy_reg(pi, 0x1a4);
pi->rssical_cache.rssical_phyregs_5G[5] =
- read_phy_reg(pi, 0x1aa);
+ read_phy_reg(pi, 0x1aa);
pi->rssical_cache.rssical_phyregs_5G[6] =
- read_phy_reg(pi, 0x1b0);
+ read_phy_reg(pi, 0x1b0);
pi->rssical_cache.rssical_phyregs_5G[7] =
- read_phy_reg(pi, 0x1b6);
+ read_phy_reg(pi, 0x1b6);
pi->rssical_cache.rssical_phyregs_5G[8] =
- read_phy_reg(pi, 0x1a5);
+ read_phy_reg(pi, 0x1a5);
pi->rssical_cache.rssical_phyregs_5G[9] =
- read_phy_reg(pi, 0x1ab);
+ read_phy_reg(pi, 0x1ab);
pi->rssical_cache.rssical_phyregs_5G[10] =
- read_phy_reg(pi, 0x1b1);
+ read_phy_reg(pi, 0x1b1);
pi->rssical_cache.rssical_phyregs_5G[11] =
- read_phy_reg(pi, 0x1b7);
+ read_phy_reg(pi, 0x1b7);
pi->nphy_rssical_chanspec_5G = pi->radio_chanspec;
}
@@ -22129,121 +23008,287 @@ static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
wlc_phy_clip_det_nphy(pi, 1, clip_state);
}
-static void wlc_phy_restore_rssical_nphy(struct brcms_phy *pi)
+static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
{
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (pi->nphy_rssical_chanspec_2G == 0)
- return;
+ s32 target_code;
+ u16 classif_state;
+ u16 clip_state[2];
+ u16 rssi_ctrl_state[2], pd_state[2];
+ u16 rfctrlintc_state[2], rfpdcorerxtx_state[2];
+ u16 rfctrlintc_override_val;
+ u16 clip_off[] = { 0xffff, 0xffff };
+ u16 rf_pd_val, pd_mask, rssi_ctrl_mask;
+ u8 vcm, min_vcm, vcm_tmp[4];
+ u8 vcm_final[4] = { 0, 0, 0, 0 };
+ u8 result_idx, ctr;
+ s32 poll_results[4][4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+ };
+ s32 poll_miniq[4][2] = {
+ {0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ };
+ s32 min_d, curr_d;
+ s32 fine_digital_offset[4];
+ s32 poll_results_min[4] = { 0, 0, 0, 0 };
+ s32 min_poll;
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0,
- RADIO_2057_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_2G[0]);
- mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1,
- RADIO_2057_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_2G[1]);
- } else {
- mod_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX0,
- RADIO_2056_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_2G[0]);
- mod_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX1,
- RADIO_2056_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_2G[1]);
+ switch (rssi_type) {
+ case NPHY_RSSI_SEL_NB:
+ target_code = NPHY_RSSICAL_NB_TARGET;
+ break;
+ case NPHY_RSSI_SEL_W1:
+ target_code = NPHY_RSSICAL_W1_TARGET;
+ break;
+ case NPHY_RSSI_SEL_W2:
+ target_code = NPHY_RSSICAL_W2_TARGET;
+ break;
+ default:
+ return;
+ break;
+ }
+
+ classif_state = wlc_phy_classifier_nphy(pi, 0, 0);
+ wlc_phy_classifier_nphy(pi, (0x7 << 0), 4);
+ wlc_phy_clip_det_nphy(pi, 0, clip_state);
+ wlc_phy_clip_det_nphy(pi, 1, clip_off);
+
+ rf_pd_val = (rssi_type == NPHY_RSSI_SEL_NB) ? 0x6 : 0x4;
+ rfctrlintc_override_val =
+ CHSPEC_IS5G(pi->radio_chanspec) ? 0x140 : 0x110;
+
+ rfctrlintc_state[0] = read_phy_reg(pi, 0x91);
+ rfpdcorerxtx_state[0] = read_radio_reg(pi, RADIO_2055_PD_CORE1_RXTX);
+ write_phy_reg(pi, 0x91, rfctrlintc_override_val);
+ write_radio_reg(pi, RADIO_2055_PD_CORE1_RXTX, rf_pd_val);
+
+ rfctrlintc_state[1] = read_phy_reg(pi, 0x92);
+ rfpdcorerxtx_state[1] = read_radio_reg(pi, RADIO_2055_PD_CORE2_RXTX);
+ write_phy_reg(pi, 0x92, rfctrlintc_override_val);
+ write_radio_reg(pi, RADIO_2055_PD_CORE2_RXTX, rf_pd_val);
+
+ pd_mask = RADIO_2055_NBRSSI_PD | RADIO_2055_WBRSSI_G1_PD |
+ RADIO_2055_WBRSSI_G2_PD;
+ pd_state[0] =
+ read_radio_reg(pi, RADIO_2055_PD_CORE1_RSSI_MISC) & pd_mask;
+ pd_state[1] =
+ read_radio_reg(pi, RADIO_2055_PD_CORE2_RSSI_MISC) & pd_mask;
+ mod_radio_reg(pi, RADIO_2055_PD_CORE1_RSSI_MISC, pd_mask, 0);
+ mod_radio_reg(pi, RADIO_2055_PD_CORE2_RSSI_MISC, pd_mask, 0);
+ rssi_ctrl_mask = RADIO_2055_NBRSSI_SEL | RADIO_2055_WBRSSI_G1_SEL |
+ RADIO_2055_WBRSSI_G2_SEL;
+ rssi_ctrl_state[0] =
+ read_radio_reg(pi, RADIO_2055_SP_RSSI_CORE1) & rssi_ctrl_mask;
+ rssi_ctrl_state[1] =
+ read_radio_reg(pi, RADIO_2055_SP_RSSI_CORE2) & rssi_ctrl_mask;
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_ALLRX, rssi_type);
+
+ wlc_phy_scale_offset_rssi_nphy(pi, 0x0, 0x0, RADIO_MIMO_CORESEL_ALLRX,
+ NPHY_RAIL_I, rssi_type);
+ wlc_phy_scale_offset_rssi_nphy(pi, 0x0, 0x0, RADIO_MIMO_CORESEL_ALLRX,
+ NPHY_RAIL_Q, rssi_type);
+
+ for (vcm = 0; vcm < 4; vcm++) {
+
+ vcm_tmp[0] = vcm_tmp[1] = vcm_tmp[2] = vcm_tmp[3] = vcm;
+ if (rssi_type != NPHY_RSSI_SEL_W2)
+ wlc_phy_set_rssi_2055_vcm(pi, rssi_type, vcm_tmp);
+
+ wlc_phy_poll_rssi_nphy(pi, rssi_type, &poll_results[vcm][0],
+ NPHY_RSSICAL_NPOLL);
+
+ if ((rssi_type == NPHY_RSSI_SEL_W1)
+ || (rssi_type == NPHY_RSSI_SEL_W2)) {
+ for (ctr = 0; ctr < 2; ctr++)
+ poll_miniq[vcm][ctr] =
+ min(poll_results[vcm][ctr * 2 + 0],
+ poll_results[vcm][ctr * 2 + 1]);
}
+ }
- write_phy_reg(pi, 0x1a6,
- pi->rssical_cache.rssical_phyregs_2G[0]);
- write_phy_reg(pi, 0x1ac,
- pi->rssical_cache.rssical_phyregs_2G[1]);
- write_phy_reg(pi, 0x1b2,
- pi->rssical_cache.rssical_phyregs_2G[2]);
- write_phy_reg(pi, 0x1b8,
- pi->rssical_cache.rssical_phyregs_2G[3]);
- write_phy_reg(pi, 0x1a4,
- pi->rssical_cache.rssical_phyregs_2G[4]);
- write_phy_reg(pi, 0x1aa,
- pi->rssical_cache.rssical_phyregs_2G[5]);
- write_phy_reg(pi, 0x1b0,
- pi->rssical_cache.rssical_phyregs_2G[6]);
- write_phy_reg(pi, 0x1b6,
- pi->rssical_cache.rssical_phyregs_2G[7]);
- write_phy_reg(pi, 0x1a5,
- pi->rssical_cache.rssical_phyregs_2G[8]);
- write_phy_reg(pi, 0x1ab,
- pi->rssical_cache.rssical_phyregs_2G[9]);
- write_phy_reg(pi, 0x1b1,
- pi->rssical_cache.rssical_phyregs_2G[10]);
- write_phy_reg(pi, 0x1b7,
- pi->rssical_cache.rssical_phyregs_2G[11]);
+ for (result_idx = 0; result_idx < 4; result_idx++) {
+ min_d = NPHY_RSSICAL_MAXD;
+ min_vcm = 0;
+ min_poll = NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL + 1;
+ for (vcm = 0; vcm < 4; vcm++) {
+ curr_d = abs(((rssi_type == NPHY_RSSI_SEL_NB) ?
+ poll_results[vcm][result_idx] :
+ poll_miniq[vcm][result_idx / 2]) -
+ (target_code * NPHY_RSSICAL_NPOLL));
+ if (curr_d < min_d) {
+ min_d = curr_d;
+ min_vcm = vcm;
+ }
+ if (poll_results[vcm][result_idx] < min_poll)
+ min_poll = poll_results[vcm][result_idx];
+ }
+ vcm_final[result_idx] = min_vcm;
+ poll_results_min[result_idx] = min_poll;
+ }
- } else {
- if (pi->nphy_rssical_chanspec_5G == 0)
- return;
+ if (rssi_type != NPHY_RSSI_SEL_W2)
+ wlc_phy_set_rssi_2055_vcm(pi, rssi_type, vcm_final);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE0,
- RADIO_2057_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_5G[0]);
- mod_radio_reg(pi, RADIO_2057_NB_MASTER_CORE1,
- RADIO_2057_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_5G[1]);
+ for (result_idx = 0; result_idx < 4; result_idx++) {
+ fine_digital_offset[result_idx] =
+ (target_code * NPHY_RSSICAL_NPOLL) -
+ poll_results[vcm_final[result_idx]][result_idx];
+ if (fine_digital_offset[result_idx] < 0) {
+ fine_digital_offset[result_idx] =
+ abs(fine_digital_offset[result_idx]);
+ fine_digital_offset[result_idx] +=
+ (NPHY_RSSICAL_NPOLL / 2);
+ fine_digital_offset[result_idx] /= NPHY_RSSICAL_NPOLL;
+ fine_digital_offset[result_idx] =
+ -fine_digital_offset[result_idx];
} else {
- mod_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX0,
- RADIO_2056_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_5G[0]);
- mod_radio_reg(pi,
- RADIO_2056_RX_RSSI_MISC | RADIO_2056_RX1,
- RADIO_2056_VCM_MASK,
- pi->rssical_cache.
- rssical_radio_regs_5G[1]);
+ fine_digital_offset[result_idx] +=
+ (NPHY_RSSICAL_NPOLL / 2);
+ fine_digital_offset[result_idx] /= NPHY_RSSICAL_NPOLL;
}
- write_phy_reg(pi, 0x1a6,
- pi->rssical_cache.rssical_phyregs_5G[0]);
- write_phy_reg(pi, 0x1ac,
- pi->rssical_cache.rssical_phyregs_5G[1]);
- write_phy_reg(pi, 0x1b2,
- pi->rssical_cache.rssical_phyregs_5G[2]);
- write_phy_reg(pi, 0x1b8,
- pi->rssical_cache.rssical_phyregs_5G[3]);
- write_phy_reg(pi, 0x1a4,
- pi->rssical_cache.rssical_phyregs_5G[4]);
- write_phy_reg(pi, 0x1aa,
- pi->rssical_cache.rssical_phyregs_5G[5]);
- write_phy_reg(pi, 0x1b0,
- pi->rssical_cache.rssical_phyregs_5G[6]);
- write_phy_reg(pi, 0x1b6,
- pi->rssical_cache.rssical_phyregs_5G[7]);
- write_phy_reg(pi, 0x1a5,
- pi->rssical_cache.rssical_phyregs_5G[8]);
- write_phy_reg(pi, 0x1ab,
- pi->rssical_cache.rssical_phyregs_5G[9]);
- write_phy_reg(pi, 0x1b1,
- pi->rssical_cache.rssical_phyregs_5G[10]);
- write_phy_reg(pi, 0x1b7,
- pi->rssical_cache.rssical_phyregs_5G[11]);
+ if (poll_results_min[result_idx] ==
+ NPHY_RSSICAL_MAXREAD * NPHY_RSSICAL_NPOLL)
+ fine_digital_offset[result_idx] =
+ (target_code - NPHY_RSSICAL_MAXREAD - 1);
+
+ wlc_phy_scale_offset_rssi_nphy(pi, 0x0,
+ (s8)
+ fine_digital_offset[result_idx],
+ (result_idx / 2 ==
+ 0) ? RADIO_MIMO_CORESEL_CORE1 :
+ RADIO_MIMO_CORESEL_CORE2,
+ (result_idx % 2 ==
+ 0) ? NPHY_RAIL_I : NPHY_RAIL_Q,
+ rssi_type);
+ }
+
+ mod_radio_reg(pi, RADIO_2055_PD_CORE1_RSSI_MISC, pd_mask, pd_state[0]);
+ mod_radio_reg(pi, RADIO_2055_PD_CORE2_RSSI_MISC, pd_mask, pd_state[1]);
+ if (rssi_ctrl_state[0] == RADIO_2055_NBRSSI_SEL)
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
+ NPHY_RSSI_SEL_NB);
+ else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL)
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
+ NPHY_RSSI_SEL_W1);
+ else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL)
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
+ NPHY_RSSI_SEL_W2);
+ else
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
+ NPHY_RSSI_SEL_W2);
+ if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL)
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
+ NPHY_RSSI_SEL_NB);
+ else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL)
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
+ NPHY_RSSI_SEL_W1);
+ else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL)
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
+ NPHY_RSSI_SEL_W2);
+ else
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
+ NPHY_RSSI_SEL_W2);
+
+ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type);
+
+ write_phy_reg(pi, 0x91, rfctrlintc_state[0]);
+ write_radio_reg(pi, RADIO_2055_PD_CORE1_RXTX, rfpdcorerxtx_state[0]);
+ write_phy_reg(pi, 0x92, rfctrlintc_state[1]);
+ write_radio_reg(pi, RADIO_2055_PD_CORE2_RXTX, rfpdcorerxtx_state[1]);
+
+ wlc_phy_classifier_nphy(pi, (0x7 << 0), classif_state);
+ wlc_phy_clip_det_nphy(pi, 1, clip_state);
+
+ wlc_phy_resetcca_nphy(pi);
+}
+
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi)
+{
+ if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ wlc_phy_rssi_cal_nphy_rev3(pi);
+ } else {
+ wlc_phy_rssi_cal_nphy_rev2(pi, NPHY_RSSI_SEL_NB);
+ wlc_phy_rssi_cal_nphy_rev2(pi, NPHY_RSSI_SEL_W1);
+ wlc_phy_rssi_cal_nphy_rev2(pi, NPHY_RSSI_SEL_W2);
}
}
+int
+wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh)
+{
+ s16 rxpwr, rxpwr0, rxpwr1;
+ s16 phyRx0_l, phyRx2_l;
+
+ rxpwr = 0;
+ rxpwr0 = rxh->PhyRxStatus_1 & PRXS1_nphy_PWR0_MASK;
+ rxpwr1 = (rxh->PhyRxStatus_1 & PRXS1_nphy_PWR1_MASK) >> 8;
+
+ if (rxpwr0 > 127)
+ rxpwr0 -= 256;
+ if (rxpwr1 > 127)
+ rxpwr1 -= 256;
+
+ phyRx0_l = rxh->PhyRxStatus_0 & 0x00ff;
+ phyRx2_l = rxh->PhyRxStatus_2 & 0x00ff;
+ if (phyRx2_l > 127)
+ phyRx2_l -= 256;
+
+ if (((rxpwr0 == 16) || (rxpwr0 == 32))) {
+ rxpwr0 = rxpwr1;
+ rxpwr1 = phyRx2_l;
+ }
+
+ if (pi->sh->rssi_mode == RSSI_ANT_MERGE_MAX)
+ rxpwr = (rxpwr0 > rxpwr1) ? rxpwr0 : rxpwr1;
+ else if (pi->sh->rssi_mode == RSSI_ANT_MERGE_MIN)
+ rxpwr = (rxpwr0 < rxpwr1) ? rxpwr0 : rxpwr1;
+ else if (pi->sh->rssi_mode == RSSI_ANT_MERGE_AVG)
+ rxpwr = (rxpwr0 + rxpwr1) >> 1;
+
+ return rxpwr;
+}
+
+static void
+wlc_phy_loadsampletable_nphy(struct brcms_phy *pi, struct cordic_iq *tone_buf,
+ u16 num_samps)
+{
+ u16 t;
+ u32 *data_buf = NULL;
+
+ data_buf = kmalloc(sizeof(u32) * num_samps, GFP_ATOMIC);
+ if (data_buf == NULL)
+ return;
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
+
+ for (t = 0; t < num_samps; t++)
+ data_buf[t] = ((((unsigned int)tone_buf[t].i) & 0x3ff) << 10) |
+ (((unsigned int)tone_buf[t].q) & 0x3ff);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_SAMPLEPLAY, num_samps, 0, 32,
+ data_buf);
+
+ kfree(data_buf);
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+}
+
static u16
wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
u8 dac_test_mode)
{
u8 phy_bw, is_phybw40;
u16 num_samps, t, spur;
- fixed theta = 0, rot = 0;
+ s32 theta = 0, rot = 0;
u32 tbl_len;
- cs32 *tone_buf = NULL;
+ struct cordic_iq *tone_buf = NULL;
is_phybw40 = CHSPEC_IS40(pi->radio_chanspec);
phy_bw = (is_phybw40 == 1) ? 40 : 20;
@@ -22258,18 +23303,17 @@ wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
tbl_len = (phy_bw << 1);
}
- tone_buf = kmalloc(sizeof(cs32) * tbl_len, GFP_ATOMIC);
- if (tone_buf == NULL) {
+ tone_buf = kmalloc(sizeof(struct cordic_iq) * tbl_len, GFP_ATOMIC);
+ if (tone_buf == NULL)
return 0;
- }
num_samps = (u16) tbl_len;
- rot = FIXED((f_kHz * 36) / phy_bw) / 100;
+ rot = ((f_kHz * 36) / phy_bw) / 100;
theta = 0;
for (t = 0; t < num_samps; t++) {
- wlc_phy_cordic(theta, &tone_buf[t]);
+ tone_buf[t] = cordic_calc_iq(theta);
theta += rot;
@@ -22284,54 +23328,6 @@ wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
return num_samps;
}
-int
-wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
- u8 iqmode, u8 dac_test_mode, bool modify_bbmult)
-{
- u16 num_samps;
- u16 loops = 0xffff;
- u16 wait = 0;
-
- num_samps =
- wlc_phy_gen_load_samples_nphy(pi, f_kHz, max_val, dac_test_mode);
- if (num_samps == 0) {
- return -EBADE;
- }
-
- wlc_phy_runsamples_nphy(pi, num_samps, loops, wait, iqmode,
- dac_test_mode, modify_bbmult);
-
- return 0;
-}
-
-static void
-wlc_phy_loadsampletable_nphy(struct brcms_phy *pi, cs32 *tone_buf,
- u16 num_samps)
-{
- u16 t;
- u32 *data_buf = NULL;
-
- data_buf = kmalloc(sizeof(u32) * num_samps, GFP_ATOMIC);
- if (data_buf == NULL) {
- return;
- }
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- for (t = 0; t < num_samps; t++) {
- data_buf[t] = ((((unsigned int)tone_buf[t].i) & 0x3ff) << 10) |
- (((unsigned int)tone_buf[t].q) & 0x3ff);
- }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_SAMPLEPLAY, num_samps, 0, 32,
- data_buf);
-
- kfree(data_buf);
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-}
-
static void
wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
u16 wait, u8 iqmode, u8 dac_test_mode,
@@ -22356,22 +23352,24 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
lpf_bw_ctl_override4 = read_phy_reg(pi, 0x343) & (0x1 << 7);
if (lpf_bw_ctl_override3 | lpf_bw_ctl_override4) {
lpf_bw_ctl_miscreg3 = read_phy_reg(pi, 0x340) &
- (0x7 << 8);
+ (0x7 << 8);
lpf_bw_ctl_miscreg4 = read_phy_reg(pi, 0x341) &
- (0x7 << 8);
+ (0x7 << 8);
} else {
- wlc_phy_rfctrl_override_nphy_rev7(pi,
- (0x1 << 7),
- wlc_phy_read_lpf_bw_ctl_nphy
- (pi, 0), 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi,
+ (0x1 << 7),
+ wlc_phy_read_lpf_bw_ctl_nphy
+ (pi,
+ 0), 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
pi->nphy_sample_play_lpf_bw_ctl_ovr = true;
lpf_bw_ctl_miscreg3 = read_phy_reg(pi, 0x340) &
- (0x7 << 8);
+ (0x7 << 8);
lpf_bw_ctl_miscreg4 = read_phy_reg(pi, 0x341) &
- (0x7 << 8);
+ (0x7 << 8);
}
}
@@ -22380,7 +23378,7 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_IQLOCAL, 1, 87, 16,
&bb_mult);
pi->nphy_bb_mult_save =
- BB_MULT_VALID_MASK | (bb_mult & BB_MULT_MASK);
+ BB_MULT_VALID_MASK | (bb_mult & BB_MULT_MASK);
}
if (modify_bbmult) {
@@ -22395,11 +23393,11 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
write_phy_reg(pi, 0xc6, num_samps - 1);
- if (loops != 0xffff) {
+ if (loops != 0xffff)
write_phy_reg(pi, 0xc4, loops - 1);
- } else {
+ else
write_phy_reg(pi, 0xc4, loops);
- }
+
write_phy_reg(pi, 0xc5, wait);
orig_RfseqCoreActv = read_phy_reg(pi, 0xa1);
@@ -22420,6 +23418,25 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
write_phy_reg(pi, 0xa1, orig_RfseqCoreActv);
}
+int
+wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
+ u8 iqmode, u8 dac_test_mode, bool modify_bbmult)
+{
+ u16 num_samps;
+ u16 loops = 0xffff;
+ u16 wait = 0;
+
+ num_samps = wlc_phy_gen_load_samples_nphy(pi, f_kHz, max_val,
+ dac_test_mode);
+ if (num_samps == 0)
+ return -EBADE;
+
+ wlc_phy_runsamples_nphy(pi, num_samps, loops, wait, iqmode,
+ dac_test_mode, modify_bbmult);
+
+ return 0;
+}
+
void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
{
u16 playback_status;
@@ -22429,13 +23446,11 @@ void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
playback_status = read_phy_reg(pi, 0xc7);
- if (playback_status & 0x1) {
+ if (playback_status & 0x1)
or_phy_reg(pi, 0xc3, NPHY_sampleCmd_STOP);
- } else if (playback_status & 0x2) {
-
+ else if (playback_status & 0x2)
and_phy_reg(pi, 0xc2,
(u16) ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
- }
and_phy_reg(pi, 0xc3, (u16) ~(0x1 << 2));
@@ -22450,10 +23465,11 @@ void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
if (NREV_IS(pi->pubpi.phy_rev, 7) || NREV_GE(pi->pubpi.phy_rev, 8)) {
if (pi->nphy_sample_play_lpf_bw_ctl_ovr) {
- wlc_phy_rfctrl_override_nphy_rev7(pi,
- (0x1 << 7),
- 0, 0, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi,
+ (0x1 << 7),
+ 0, 0, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
pi->nphy_sample_play_lpf_bw_ctl_ovr = false;
}
}
@@ -22462,6 +23478,47 @@ void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
+static u32 *brcms_phy_get_tx_pwrctrl_tbl(struct brcms_phy *pi)
+{
+ u32 *tx_pwrctrl_tbl = NULL;
+ uint phyrev = pi->pubpi.phy_rev;
+
+ if (PHY_IPA(pi)) {
+ tx_pwrctrl_tbl =
+ wlc_phy_get_ipa_gaintbl_nphy(pi);
+ } else {
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ if (NREV_IS(phyrev, 3))
+ tx_pwrctrl_tbl = nphy_tpc_5GHz_txgain_rev3;
+ else if (NREV_IS(phyrev, 4))
+ tx_pwrctrl_tbl =
+ (pi->srom_fem5g.extpagain == 3) ?
+ nphy_tpc_5GHz_txgain_HiPwrEPA :
+ nphy_tpc_5GHz_txgain_rev4;
+ else
+ tx_pwrctrl_tbl = nphy_tpc_5GHz_txgain_rev5;
+ } else {
+ if (NREV_GE(phyrev, 7)) {
+ if (pi->pubpi.radiorev == 3)
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_epa_2057rev3;
+ else if (pi->pubpi.radiorev == 5)
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_epa_2057rev5;
+ } else {
+ if (NREV_GE(phyrev, 5) &&
+ (pi->srom_fem2g.extpagain == 3))
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_HiPwrEPA;
+ else
+ tx_pwrctrl_tbl =
+ nphy_tpc_txgain_rev3;
+ }
+ }
+ }
+ return tx_pwrctrl_tbl;
+}
+
struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi)
{
u16 base_idx[2], curr_gain[2];
@@ -22482,33 +23539,33 @@ struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi)
for (core_no = 0; core_no < 2; core_no++) {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
target_gain.ipa[core_no] =
- curr_gain[core_no] & 0x0007;
+ curr_gain[core_no] & 0x0007;
target_gain.pad[core_no] =
- ((curr_gain[core_no] & 0x00F8) >> 3);
+ ((curr_gain[core_no] & 0x00F8) >> 3);
target_gain.pga[core_no] =
- ((curr_gain[core_no] & 0x0F00) >> 8);
+ ((curr_gain[core_no] & 0x0F00) >> 8);
target_gain.txgm[core_no] =
- ((curr_gain[core_no] & 0x7000) >> 12);
+ ((curr_gain[core_no] & 0x7000) >> 12);
target_gain.txlpf[core_no] =
- ((curr_gain[core_no] & 0x8000) >> 15);
+ ((curr_gain[core_no] & 0x8000) >> 15);
} else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
target_gain.ipa[core_no] =
- curr_gain[core_no] & 0x000F;
+ curr_gain[core_no] & 0x000F;
target_gain.pad[core_no] =
- ((curr_gain[core_no] & 0x00F0) >> 4);
+ ((curr_gain[core_no] & 0x00F0) >> 4);
target_gain.pga[core_no] =
- ((curr_gain[core_no] & 0x0F00) >> 8);
+ ((curr_gain[core_no] & 0x0F00) >> 8);
target_gain.txgm[core_no] =
- ((curr_gain[core_no] & 0x7000) >> 12);
+ ((curr_gain[core_no] & 0x7000) >> 12);
} else {
target_gain.ipa[core_no] =
- curr_gain[core_no] & 0x0003;
+ curr_gain[core_no] & 0x0003;
target_gain.pad[core_no] =
- ((curr_gain[core_no] & 0x000C) >> 2);
+ ((curr_gain[core_no] & 0x000C) >> 2);
target_gain.pga[core_no] =
- ((curr_gain[core_no] & 0x0070) >> 4);
+ ((curr_gain[core_no] & 0x0070) >> 4);
target_gain.txgm[core_no] =
- ((curr_gain[core_no] & 0x0380) >> 7);
+ ((curr_gain[core_no] & 0x0380) >> 7);
}
}
} else {
@@ -22518,96 +23575,60 @@ struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi)
base_idx[1] = (read_phy_reg(pi, 0x1ee) >> 8) & 0x7f;
for (core_no = 0; core_no < 2; core_no++) {
if (NREV_GE(phyrev, 3)) {
- if (PHY_IPA(pi)) {
- tx_pwrctrl_tbl =
- wlc_phy_get_ipa_gaintbl_nphy(pi);
- } else {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if (NREV_IS(phyrev, 3)) {
- tx_pwrctrl_tbl =
- nphy_tpc_5GHz_txgain_rev3;
- } else if (NREV_IS(phyrev, 4)) {
- tx_pwrctrl_tbl =
- (pi->srom_fem5g.
- extpagain ==
- 3) ?
- nphy_tpc_5GHz_txgain_HiPwrEPA
- :
- nphy_tpc_5GHz_txgain_rev4;
- } else {
- tx_pwrctrl_tbl =
- nphy_tpc_5GHz_txgain_rev5;
- }
- } else {
- if (NREV_GE(phyrev, 7)) {
- if (pi->pubpi.
- radiorev == 3) {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_epa_2057rev3;
- } else if (pi->pubpi.
- radiorev ==
- 5) {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_epa_2057rev5;
- }
-
- } else {
- if (NREV_GE(phyrev, 5)
- && (pi->srom_fem2g.
- extpagain ==
- 3)) {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_HiPwrEPA;
- } else {
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_rev3;
- }
- }
- }
- }
+ tx_pwrctrl_tbl =
+ brcms_phy_get_tx_pwrctrl_tbl(pi);
if (NREV_GE(phyrev, 7)) {
target_gain.ipa[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 16) & 0x7;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 16) & 0x7;
target_gain.pad[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 19) & 0x1f;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 19) & 0x1f;
target_gain.pga[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 24) & 0xf;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 24) & 0xf;
target_gain.txgm[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 28) & 0x7;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 28) & 0x7;
target_gain.txlpf[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 31) & 0x1;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 31) & 0x1;
} else {
target_gain.ipa[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 16) & 0xf;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 16) & 0xf;
target_gain.pad[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 20) & 0xf;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 20) & 0xf;
target_gain.pga[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 24) & 0xf;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 24) & 0xf;
target_gain.txgm[core_no] =
- (tx_pwrctrl_tbl[base_idx[core_no]]
- >> 28) & 0x7;
+ (tx_pwrctrl_tbl
+ [base_idx[core_no]]
+ >> 28) & 0x7;
}
} else {
target_gain.ipa[core_no] =
- (nphy_tpc_txgain[base_idx[core_no]] >> 16) &
- 0x3;
+ (nphy_tpc_txgain[base_idx[core_no]] >>
+ 16) & 0x3;
target_gain.pad[core_no] =
- (nphy_tpc_txgain[base_idx[core_no]] >> 18) &
- 0x3;
+ (nphy_tpc_txgain[base_idx[core_no]] >>
+ 18) & 0x3;
target_gain.pga[core_no] =
- (nphy_tpc_txgain[base_idx[core_no]] >> 20) &
- 0x7;
+ (nphy_tpc_txgain[base_idx[core_no]] >>
+ 20) & 0x7;
target_gain.txgm[core_no] =
- (nphy_tpc_txgain[base_idx[core_no]] >> 23) &
- 0x7;
+ (nphy_tpc_txgain[base_idx[core_no]] >>
+ 23) & 0x7;
}
}
}
@@ -22626,26 +23647,23 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
u8 band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
params->txlpf = target_gain.txlpf[core_no];
- }
+
params->txgm = target_gain.txgm[core_no];
params->pga = target_gain.pga[core_no];
params->pad = target_gain.pad[core_no];
params->ipa = target_gain.ipa[core_no];
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
params->cal_gain =
- ((params->txlpf << 15) | (params->
- txgm << 12) | (params->
- pga << 8) |
- (params->pad << 3) | (params->ipa));
- } else {
+ ((params->txlpf << 15) | (params->txgm << 12) |
+ (params->pga << 8) |
+ (params->pad << 3) | (params->ipa));
+ else
params->cal_gain =
- ((params->txgm << 12) | (params->
- pga << 8) | (params->
- pad << 4) |
- (params->ipa));
- }
+ ((params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa));
+
params->ncorr[0] = 0x79;
params->ncorr[1] = 0x79;
params->ncorr[2] = 0x79;
@@ -22654,9 +23672,8 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
} else {
gain_index = ((target_gain.pad[core_no] << 0) |
- (target_gain.pga[core_no] << 4) | (target_gain.
- txgm[core_no]
- << 8));
+ (target_gain.pga[core_no] << 4) |
+ (target_gain.txgm[core_no] << 8));
idx = -1;
for (k = 0; k < NPHY_IQCAL_NUMGAINS; k++) {
@@ -22688,37 +23705,39 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
for (core = 0; core <= 1; core++) {
pi->tx_rx_cal_radio_saveregs[(core * 11) + 0] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TX_SSI_MASTER);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TX_SSI_MASTER);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 1] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- IQCAL_VCM_HG);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ IQCAL_VCM_HG);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 2] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- IQCAL_IDAC);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ IQCAL_IDAC);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 3] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core, TSSI_VCM);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TSSI_VCM);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 4] = 0;
pi->tx_rx_cal_radio_saveregs[(core * 11) + 5] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TX_SSI_MUX);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TX_SSI_MUX);
if (pi->pubpi.radiorev != 5)
pi->tx_rx_cal_radio_saveregs[(core * 11) + 6] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TSSIA);
+ READ_RADIO_REG3(pi, RADIO_2057, TX,
+ core,
+ TSSIA);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 7] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core, TSSIG);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core, TSSIG);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 8] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TSSI_MISC1);
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TSSI_MISC1);
if (CHSPEC_IS5G(pi->radio_chanspec)) {
WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
@@ -22734,19 +23753,15 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
if (pi->use_int_tx_iqlo_cal_nphy) {
WRITE_RADIO_REG3(pi, RADIO_2057, TX,
core, TX_SSI_MUX, 0x4);
- if (!
- (pi->
- internal_tx_iqlo_cal_tapoff_intpa_nphy)) {
-
+ if (!(pi->
+ internal_tx_iqlo_cal_tapoff_intpa_nphy))
WRITE_RADIO_REG3(pi, RADIO_2057,
TX, core,
TSSIA, 0x31);
- } else {
-
+ else
WRITE_RADIO_REG3(pi, RADIO_2057,
TX, core,
TSSIA, 0x21);
- }
}
WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
TSSI_MISC1, 0x00);
@@ -22767,19 +23782,15 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
WRITE_RADIO_REG3(pi, RADIO_2057, TX,
core, TX_SSI_MUX,
0x06);
- if (!
- (pi->
- internal_tx_iqlo_cal_tapoff_intpa_nphy)) {
-
+ if (!(pi->
+ internal_tx_iqlo_cal_tapoff_intpa_nphy))
WRITE_RADIO_REG3(pi, RADIO_2057,
TX, core,
TSSIG, 0x31);
- } else {
-
+ else
WRITE_RADIO_REG3(pi, RADIO_2057,
TX, core,
TSSIG, 0x21);
- }
}
WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
TSSI_MISC1, 0x00);
@@ -22789,58 +23800,62 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
for (core = 0; core <= 1; core++) {
jtag_core =
- (core ==
- PHY_CORE_0) ? RADIO_2056_TX0 : RADIO_2056_TX1;
+ (core ==
+ PHY_CORE_0) ? RADIO_2056_TX0 : RADIO_2056_TX1;
pi->tx_rx_cal_radio_saveregs[(core * 11) + 0] =
- read_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MASTER |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TX_SSI_MASTER |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 1] =
- read_radio_reg(pi,
- RADIO_2056_TX_IQCAL_VCM_HG |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_IQCAL_VCM_HG |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 2] =
- read_radio_reg(pi,
- RADIO_2056_TX_IQCAL_IDAC |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_IQCAL_IDAC |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 3] =
- read_radio_reg(pi,
- RADIO_2056_TX_TSSI_VCM | jtag_core);
+ read_radio_reg(
+ pi,
+ RADIO_2056_TX_TSSI_VCM |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 4] =
- read_radio_reg(pi,
- RADIO_2056_TX_TX_AMP_DET |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TX_AMP_DET |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 5] =
- read_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MUX |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TX_SSI_MUX |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 6] =
- read_radio_reg(pi, RADIO_2056_TX_TSSIA | jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TSSIA | jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 7] =
- read_radio_reg(pi, RADIO_2056_TX_TSSIG | jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TSSIG | jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 8] =
- read_radio_reg(pi,
- RADIO_2056_TX_TSSI_MISC1 |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TSSI_MISC1 |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 9] =
- read_radio_reg(pi,
- RADIO_2056_TX_TSSI_MISC2 |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TSSI_MISC2 |
+ jtag_core);
pi->tx_rx_cal_radio_saveregs[(core * 11) + 10] =
- read_radio_reg(pi,
- RADIO_2056_TX_TSSI_MISC3 |
- jtag_core);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TSSI_MISC3 |
+ jtag_core);
if (CHSPEC_IS5G(pi->radio_chanspec)) {
write_radio_reg(pi,
@@ -22860,16 +23875,18 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
jtag_core, 0x00);
if (PHY_IPA(pi)) {
- write_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MUX
- | jtag_core, 0x4);
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TX_SSI_MUX
+ | jtag_core, 0x4);
write_radio_reg(pi,
RADIO_2056_TX_TSSIA |
jtag_core, 0x1);
} else {
- write_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MUX
- | jtag_core, 0x00);
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TX_SSI_MUX
+ | jtag_core, 0x00);
write_radio_reg(pi,
RADIO_2056_TX_TSSIA |
jtag_core, 0x2f);
@@ -22909,26 +23926,27 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
if (PHY_IPA(pi)) {
- write_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MUX
- | jtag_core, 0x06);
- if (NREV_LT(pi->pubpi.phy_rev, 5)) {
-
- write_radio_reg(pi,
- RADIO_2056_TX_TSSIG
- | jtag_core,
- 0x11);
- } else {
-
- write_radio_reg(pi,
- RADIO_2056_TX_TSSIG
- | jtag_core,
- 0x1);
- }
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TX_SSI_MUX
+ | jtag_core, 0x06);
+ if (NREV_LT(pi->pubpi.phy_rev, 5))
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TSSIG
+ | jtag_core,
+ 0x11);
+ else
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TSSIG
+ | jtag_core,
+ 0x1);
} else {
- write_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MUX
- | jtag_core, 0x00);
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TX_SSI_MUX
+ | jtag_core, 0x00);
write_radio_reg(pi,
RADIO_2056_TX_TSSIG |
jtag_core, 0x20);
@@ -22948,23 +23966,23 @@ static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
} else {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL1);
+ read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL1);
write_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL1, 0x29);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL2);
+ read_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL2);
write_radio_reg(pi, RADIO_2055_CORE1_TXRF_IQCAL2, 0x54);
pi->tx_rx_cal_radio_saveregs[2] =
- read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL1);
+ read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL1);
write_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL1, 0x29);
pi->tx_rx_cal_radio_saveregs[3] =
- read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL2);
+ read_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL2);
write_radio_reg(pi, RADIO_2055_CORE2_TXRF_IQCAL2, 0x54);
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE1);
+ read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE1);
pi->tx_rx_cal_radio_saveregs[5] =
- read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE2);
+ read_radio_reg(pi, RADIO_2055_PWRDET_RXTX_CORE2);
if ((read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand) ==
0) {
@@ -23025,11 +24043,8 @@ static void wlc_phy_txcal_radio_cleanup_nphy(struct brcms_phy *pi)
if (pi->pubpi.radiorev != 5)
WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
TSSIA,
- pi->
- tx_rx_cal_radio_saveregs[(core
- *
- 11) +
- 6]);
+ pi->tx_rx_cal_radio_saveregs
+ [(core * 11) + 6]);
WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, TSSIG,
pi->
@@ -23043,9 +24058,8 @@ static void wlc_phy_txcal_radio_cleanup_nphy(struct brcms_phy *pi)
}
} else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
for (core = 0; core <= 1; core++) {
- jtag_core =
- (core ==
- PHY_CORE_0) ? RADIO_2056_TX0 : RADIO_2056_TX1;
+ jtag_core = (core == PHY_CORE_0) ?
+ RADIO_2056_TX0 : RADIO_2056_TX1;
write_radio_reg(pi,
RADIO_2056_TX_TX_SSI_MASTER | jtag_core,
@@ -23171,23 +24185,22 @@ static void wlc_phy_txcal_physetup_nphy(struct brcms_phy *pi)
pi->tx_rx_cal_phy_saveregs[7] = read_phy_reg(pi, 0x91);
pi->tx_rx_cal_phy_saveregs[8] = read_phy_reg(pi, 0x92);
- if (!(pi->use_int_tx_iqlo_cal_nphy)) {
-
- wlc_phy_rfctrlintc_override_nphy(pi,
- NPHY_RfctrlIntc_override_PA,
- 1,
- RADIO_MIMO_CORESEL_CORE1
- |
- RADIO_MIMO_CORESEL_CORE2);
- } else {
-
- wlc_phy_rfctrlintc_override_nphy(pi,
- NPHY_RfctrlIntc_override_PA,
- 0,
- RADIO_MIMO_CORESEL_CORE1
- |
- RADIO_MIMO_CORESEL_CORE2);
- }
+ if (!(pi->use_int_tx_iqlo_cal_nphy))
+ wlc_phy_rfctrlintc_override_nphy(
+ pi,
+ NPHY_RfctrlIntc_override_PA,
+ 1,
+ RADIO_MIMO_CORESEL_CORE1
+ |
+ RADIO_MIMO_CORESEL_CORE2);
+ else
+ wlc_phy_rfctrlintc_override_nphy(
+ pi,
+ NPHY_RfctrlIntc_override_PA,
+ 0,
+ RADIO_MIMO_CORESEL_CORE1
+ |
+ RADIO_MIMO_CORESEL_CORE2);
wlc_phy_rfctrlintc_override_nphy(pi,
NPHY_RfctrlIntc_override_TRSW,
@@ -23205,12 +24218,13 @@ static void wlc_phy_txcal_physetup_nphy(struct brcms_phy *pi)
0x29b, (0x1 << 0), (0) << 0);
if (NREV_IS(pi->pubpi.phy_rev, 7)
- || NREV_GE(pi->pubpi.phy_rev, 8)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 7),
- wlc_phy_read_lpf_bw_ctl_nphy
- (pi, 0), 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- }
+ || NREV_GE(pi->pubpi.phy_rev, 8))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 7),
+ wlc_phy_read_lpf_bw_ctl_nphy
+ (pi,
+ 0), 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
if (pi->use_int_tx_iqlo_cal_nphy
&& !(pi->internal_tx_iqlo_cal_tapoff_intpa_nphy)) {
@@ -23221,25 +24235,30 @@ static void wlc_phy_txcal_physetup_nphy(struct brcms_phy *pi)
1 << 4);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- mod_radio_reg(pi,
- RADIO_2057_PAD2G_TUNE_PUS_CORE0,
- 1, 0);
- mod_radio_reg(pi,
- RADIO_2057_PAD2G_TUNE_PUS_CORE1,
- 1, 0);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_PAD2G_TUNE_PUS_CORE0,
+ 1, 0);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_PAD2G_TUNE_PUS_CORE1,
+ 1, 0);
} else {
- mod_radio_reg(pi,
- RADIO_2057_IPA5G_CASCOFFV_PU_CORE0,
- 1, 0);
- mod_radio_reg(pi,
- RADIO_2057_IPA5G_CASCOFFV_PU_CORE1,
- 1, 0);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_IPA5G_CASCOFFV_PU_CORE0,
+ 1, 0);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_IPA5G_CASCOFFV_PU_CORE1,
+ 1, 0);
}
} else if (NREV_GE(pi->pubpi.phy_rev, 8)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi,
- (0x1 << 3), 0,
- 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi,
+ (0x1 << 3), 0,
+ 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
}
}
} else {
@@ -23302,11 +24321,11 @@ static void wlc_phy_txcal_phycleanup_nphy(struct brcms_phy *pi)
write_phy_reg(pi, 0x29b, pi->tx_rx_cal_phy_saveregs[10]);
if (NREV_IS(pi->pubpi.phy_rev, 7)
- || NREV_GE(pi->pubpi.phy_rev, 8)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 7), 0, 0,
- 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- }
+ || NREV_GE(pi->pubpi.phy_rev, 8))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 7), 0, 0,
+ 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
wlc_phy_resetcca_nphy(pi);
@@ -23315,28 +24334,33 @@ static void wlc_phy_txcal_phycleanup_nphy(struct brcms_phy *pi)
if (NREV_IS(pi->pubpi.phy_rev, 7)) {
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- mod_radio_reg(pi,
- RADIO_2057_PAD2G_TUNE_PUS_CORE0,
- 1, 1);
- mod_radio_reg(pi,
- RADIO_2057_PAD2G_TUNE_PUS_CORE1,
- 1, 1);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_PAD2G_TUNE_PUS_CORE0,
+ 1, 1);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_PAD2G_TUNE_PUS_CORE1,
+ 1, 1);
} else {
- mod_radio_reg(pi,
- RADIO_2057_IPA5G_CASCOFFV_PU_CORE0,
- 1, 1);
- mod_radio_reg(pi,
- RADIO_2057_IPA5G_CASCOFFV_PU_CORE1,
- 1, 1);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_IPA5G_CASCOFFV_PU_CORE0,
+ 1, 1);
+ mod_radio_reg(
+ pi,
+ RADIO_2057_IPA5G_CASCOFFV_PU_CORE1,
+ 1, 1);
}
mod_radio_reg(pi, RADIO_2057_OVR_REG0, 1 << 4,
0);
} else if (NREV_GE(pi->pubpi.phy_rev, 8)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi,
- (0x1 << 3), 0,
- 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi,
+ (0x1 << 3), 0,
+ 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
}
}
} else {
@@ -23356,10 +24380,6 @@ static void wlc_phy_txcal_phycleanup_nphy(struct brcms_phy *pi)
}
}
-#define NPHY_CAL_TSSISAMPS 64
-#define NPHY_TEST_TONE_FREQ_40MHz 4000
-#define NPHY_TEST_TONE_FREQ_20MHz 2500
-
void
wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf, u8 num_samps)
{
@@ -23379,8 +24399,8 @@ wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf, u8 num_samps)
idle_tssi[1] = (temp <= 31) ? temp : (temp - 64);
tssi_type =
- CHSPEC_IS5G(pi->radio_chanspec) ?
- (u8)NPHY_RSSI_SEL_TSSI_5G : (u8)NPHY_RSSI_SEL_TSSI_2G;
+ CHSPEC_IS5G(pi->radio_chanspec) ?
+ (u8)NPHY_RSSI_SEL_TSSI_5G : (u8)NPHY_RSSI_SEL_TSSI_2G;
wlc_phy_poll_rssi_nphy(pi, tssi_type, rssi_buf, num_samps);
@@ -23390,17 +24410,15 @@ wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf, u8 num_samps)
pwrindex[0] = idle_tssi[0] - tssival[0] + 64;
pwrindex[1] = idle_tssi[1] - tssival[1] + 64;
- if (pwrindex[0] < 0) {
+ if (pwrindex[0] < 0)
pwrindex[0] = 0;
- } else if (pwrindex[0] > 63) {
+ else if (pwrindex[0] > 63)
pwrindex[0] = 63;
- }
- if (pwrindex[1] < 0) {
+ if (pwrindex[1] < 0)
pwrindex[1] = 0;
- } else if (pwrindex[1] > 63) {
+ else if (pwrindex[1] > 63)
pwrindex[1] = 63;
- }
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 1,
(u32) pwrindex[0], 32, &qdBm_pwrbuf[0]);
@@ -23408,326 +24426,1241 @@ wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf, u8 num_samps)
(u32) pwrindex[1], 32, &qdBm_pwrbuf[1]);
}
-static void wlc_phy_internal_cal_txgain_nphy(struct brcms_phy *pi)
+static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
{
- u16 txcal_gain[2];
+ int index;
+ u32 bbmult_scale;
+ u16 bbmult;
+ u16 tblentry;
- pi->nphy_txcal_pwr_idx[0] = pi->nphy_cal_orig_pwr_idx[0];
- pi->nphy_txcal_pwr_idx[1] = pi->nphy_cal_orig_pwr_idx[0];
- wlc_phy_txpwr_index_nphy(pi, 1, pi->nphy_cal_orig_pwr_idx[0], true);
- wlc_phy_txpwr_index_nphy(pi, 2, pi->nphy_cal_orig_pwr_idx[1], true);
+ struct nphy_txiqcal_ladder ladder_lo[] = {
+ {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
+ {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
+ {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
+ };
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16,
- txcal_gain);
+ struct nphy_txiqcal_ladder ladder_iq[] = {
+ {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
+ {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
+ {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
+ };
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- txcal_gain[0] = (txcal_gain[0] & 0xF000) | 0x0F40;
- txcal_gain[1] = (txcal_gain[1] & 0xF000) | 0x0F40;
- } else {
- txcal_gain[0] = (txcal_gain[0] & 0xF000) | 0x0F60;
- txcal_gain[1] = (txcal_gain[1] & 0xF000) | 0x0F60;
+ bbmult = (core == PHY_CORE_0) ?
+ ((pi->nphy_txcal_bbmult >> 8) & 0xff) :
+ (pi->nphy_txcal_bbmult & 0xff);
+
+ for (index = 0; index < 18; index++) {
+ bbmult_scale = ladder_lo[index].percent * bbmult;
+ bbmult_scale /= 100;
+
+ tblentry =
+ ((bbmult_scale & 0xff) << 8) | ladder_lo[index].g_env;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 1, index, 16,
+ &tblentry);
+
+ bbmult_scale = ladder_iq[index].percent * bbmult;
+ bbmult_scale /= 100;
+
+ tblentry =
+ ((bbmult_scale & 0xff) << 8) | ladder_iq[index].g_env;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 1, index + 32,
+ 16, &tblentry);
}
+}
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16,
- txcal_gain);
+static u8 wlc_phy_txpwr_idx_cur_get_nphy(struct brcms_phy *pi, u8 core)
+{
+ u16 tmp;
+ tmp = read_phy_reg(pi, ((core == PHY_CORE_0) ? 0x1ed : 0x1ee));
+
+ tmp = (tmp & (0x7f << 8)) >> 8;
+ return (u8) tmp;
}
-static void wlc_phy_precal_txgain_nphy(struct brcms_phy *pi)
+static void
+wlc_phy_txpwr_idx_cur_set_nphy(struct brcms_phy *pi, u8 idx0, u8 idx1)
{
- bool save_bbmult = false;
- u8 txcal_index_2057_rev5n7 = 0;
- u8 txcal_index_2057_rev3n4n6 = 10;
+ mod_phy_reg(pi, 0x1e7, (0x7f << 0), idx0);
+
+ if (NREV_GT(pi->pubpi.phy_rev, 1))
+ mod_phy_reg(pi, 0x222, (0xff << 0), idx1);
+}
+
+static u16 wlc_phy_ipa_get_bbmult_nphy(struct brcms_phy *pi)
+{
+ u16 m0m1;
+
+ wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &m0m1);
+
+ return m0m1;
+}
+
+static void wlc_phy_ipa_set_bbmult_nphy(struct brcms_phy *pi, u8 m0, u8 m1)
+{
+ u16 m0m1 = (u16) ((m0 << 8) | m1);
+
+ wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &m0m1);
+ wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &m0m1);
+}
+
+static void
+wlc_phy_papd_cal_setup_nphy(struct brcms_phy *pi,
+ struct nphy_papd_restore_state *state, u8 core)
+{
+ s32 tone_freq;
+ u8 off_core;
+ u16 mixgain = 0;
+
+ off_core = core ^ 0x1;
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+
+ if (NREV_IS(pi->pubpi.phy_rev, 7)
+ || NREV_GE(pi->pubpi.phy_rev, 8))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 7),
+ wlc_phy_read_lpf_bw_ctl_nphy
+ (pi,
+ 0), 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (pi->pubpi.radiorev == 5)
+ mixgain = (core == 0) ? 0x20 : 0x00;
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ mixgain = 0x00;
+ else if ((pi->pubpi.radiorev <= 4)
+ || (pi->pubpi.radiorev == 6))
+ mixgain = 0x00;
+ } else {
+ if ((pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6))
+ mixgain = 0x50;
+ else if ((pi->pubpi.radiorev == 3)
+ || (pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ mixgain = 0x0;
+ }
+
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11),
+ mixgain, (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_tx_pu,
+ 1, (1 << core), 0);
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_tx_pu,
+ 0, (1 << off_core), 0);
+
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
+ 0, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 1,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), 0,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), 1,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 8), 0,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 9), 1,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 10), 0,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), 1,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5),
+ 0, (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), 0,
+ (1 << core), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+
+ state->afectrl[core] = read_phy_reg(pi, (core == PHY_CORE_0) ?
+ 0xa6 : 0xa7);
+ state->afeoverride[core] =
+ read_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f : 0xa5);
+ state->afectrl[off_core] =
+ read_phy_reg(pi, (core == PHY_CORE_0) ? 0xa7 : 0xa6);
+ state->afeoverride[off_core] =
+ read_phy_reg(pi, (core == PHY_CORE_0) ? 0xa5 : 0x8f);
+
+ mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa6 : 0xa7),
+ (0x1 << 2), 0);
+ mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
+ 0xa5), (0x1 << 2), (0x1 << 2));
+
+ mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa7 : 0xa6),
+ (0x1 << 2), (0x1 << 2));
+ mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa5 :
+ 0x8f), (0x1 << 2), (0x1 << 2));
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ state->pwrup[core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_PWRUP);
+ state->atten[core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_ATTEN);
+ state->pwrup[off_core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_2G_PWRUP);
+ state->atten[off_core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_2G_ATTEN);
+
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_PWRUP, 0xc);
- if (pi->use_int_tx_iqlo_cal_nphy) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
if ((pi->pubpi.radiorev == 3) ||
(pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
+ (pi->pubpi.radiorev == 6))
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_ATTEN, 0xf0);
+ else if (pi->pubpi.radiorev == 5)
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_ATTEN,
+ (core == 0) ? 0xf7 : 0xf2);
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_ATTEN, 0xf0);
- pi->nphy_txcal_pwr_idx[0] =
- txcal_index_2057_rev3n4n6;
- pi->nphy_txcal_pwr_idx[1] =
- txcal_index_2057_rev3n4n6;
- wlc_phy_txpwr_index_nphy(pi, 3,
- txcal_index_2057_rev3n4n6,
- false);
- } else {
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_2G_PWRUP, 0x0);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_2G_ATTEN, 0xff);
+ } else {
+ state->pwrup[core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_PWRUP);
+ state->atten[core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_ATTEN);
+ state->pwrup[off_core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_5G_PWRUP);
+ state->atten[off_core] =
+ READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_5G_ATTEN);
- pi->nphy_txcal_pwr_idx[0] =
- txcal_index_2057_rev5n7;
- pi->nphy_txcal_pwr_idx[1] =
- txcal_index_2057_rev5n7;
- wlc_phy_txpwr_index_nphy(pi, 3,
- txcal_index_2057_rev5n7,
- false);
- }
- save_bbmult = true;
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_PWRUP, 0xc);
- } else if (NREV_LT(pi->pubpi.phy_rev, 5)) {
- wlc_phy_cal_txgainctrl_nphy(pi, 11, false);
- if (pi->sh->hw_phytxchain != 3) {
- pi->nphy_txcal_pwr_idx[1] =
- pi->nphy_txcal_pwr_idx[0];
- wlc_phy_txpwr_index_nphy(pi, 3,
- pi->
- nphy_txcal_pwr_idx[0],
- true);
- save_bbmult = true;
+ if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_ATTEN, 0xf4);
+
+ else
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_ATTEN, 0xf0);
+
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_5G_PWRUP, 0x0);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
+ TXRXCOUPLE_5G_ATTEN, 0xff);
+ }
+
+ tone_freq = 4000;
+
+ wlc_phy_tx_tone_nphy(pi, tone_freq, 181, 0, 0, false);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_ON) << 0);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (1) << 13);
+
+ mod_phy_reg(pi, (off_core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_OFF) << 0);
+
+ mod_phy_reg(pi, (off_core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (0) << 13);
+
+ } else {
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12), 0, 0x3, 0);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 1, 0, 0);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 0), 0, 0x3, 0);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 2), 1, 0x3, 0);
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 1), 1, 0x3, 0);
+
+ state->afectrl[core] = read_phy_reg(pi, (core == PHY_CORE_0) ?
+ 0xa6 : 0xa7);
+ state->afeoverride[core] =
+ read_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f : 0xa5);
+
+ mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa6 : 0xa7),
+ (0x1 << 0) | (0x1 << 1) | (0x1 << 2), 0);
+ mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
+ 0xa5),
+ (0x1 << 0) |
+ (0x1 << 1) |
+ (0x1 << 2), (0x1 << 0) | (0x1 << 1) | (0x1 << 2));
+
+ state->vga_master[core] =
+ READ_RADIO_REG2(pi, RADIO_2056, RX, core, VGA_MASTER);
+ WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, VGA_MASTER, 0x2b);
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ state->fbmix[core] =
+ READ_RADIO_REG2(pi, RADIO_2056, RX, core,
+ TXFBMIX_G);
+ state->intpa_master[core] =
+ READ_RADIO_REG2(pi, RADIO_2056, TX, core,
+ INTPAG_MASTER);
+
+ WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, TXFBMIX_G,
+ 0x03);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ INTPAG_MASTER, 0x04);
+ } else {
+ state->fbmix[core] =
+ READ_RADIO_REG2(pi, RADIO_2056, RX, core,
+ TXFBMIX_A);
+ state->intpa_master[core] =
+ READ_RADIO_REG2(pi, RADIO_2056, TX, core,
+ INTPAA_MASTER);
+
+ WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, TXFBMIX_A,
+ 0x03);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ INTPAA_MASTER, 0x04);
+
+ }
+
+ tone_freq = 4000;
+
+ wlc_phy_tx_tone_nphy(pi, tone_freq, 181, 0, 0, false);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (1) << 0);
+
+ mod_phy_reg(pi, (off_core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (0) << 0);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 0, 0x3, 0);
+ }
+}
+
+static void
+wlc_phy_papd_cal_cleanup_nphy(struct brcms_phy *pi,
+ struct nphy_papd_restore_state *state)
+{
+ u8 core;
+
+ wlc_phy_stopplayback_nphy(pi);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_PWRUP, 0);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_2G_ATTEN,
+ state->atten[core]);
+ } else {
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_PWRUP, 0);
+ WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
+ TXRXCOUPLE_5G_ATTEN,
+ state->atten[core]);
}
+ }
- } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
- if (PHY_IPA(pi)) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- wlc_phy_cal_txgainctrl_nphy(pi, 12,
- false);
- } else {
- pi->nphy_txcal_pwr_idx[0] = 80;
- pi->nphy_txcal_pwr_idx[1] = 80;
- wlc_phy_txpwr_index_nphy(pi, 3, 80,
- false);
- save_bbmult = true;
- }
+ if ((pi->pubpi.radiorev == 4) || (pi->pubpi.radiorev == 6))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 2),
+ 1, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ else
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 2),
+ 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1),
+ 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 1, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 1, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), 1, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 8), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 9), 1, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 10), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), 1, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), 0, 0x3, 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+
+ write_phy_reg(pi, (core == PHY_CORE_0) ?
+ 0xa6 : 0xa7, state->afectrl[core]);
+ write_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f :
+ 0xa5, state->afeoverride[core]);
+ }
+
+ wlc_phy_ipa_set_bbmult_nphy(pi, (state->mm >> 8) & 0xff,
+ (state->mm & 0xff));
+
+ if (NREV_IS(pi->pubpi.phy_rev, 7)
+ || NREV_GE(pi->pubpi.phy_rev, 8))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi, (0x1 << 7), 0, 0,
+ 1,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ } else {
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12), 0, 0x3, 1);
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 13), 0, 0x3, 1);
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 0), 0, 0x3, 1);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 2), 0, 0x3, 1);
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 1), 0, 0x3, 1);
+
+ for (core = 0; core < pi->pubpi.phy_corenum; core++) {
+
+ WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, VGA_MASTER,
+ state->vga_master[core]);
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ WRITE_RADIO_REG2(pi, RADIO_2056, RX, core,
+ TXFBMIX_G, state->fbmix[core]);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ INTPAG_MASTER,
+ state->intpa_master[core]);
} else {
+ WRITE_RADIO_REG2(pi, RADIO_2056, RX, core,
+ TXFBMIX_A, state->fbmix[core]);
+ WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
+ INTPAA_MASTER,
+ state->intpa_master[core]);
+ }
- wlc_phy_internal_cal_txgain_nphy(pi);
- save_bbmult = true;
+ write_phy_reg(pi, (core == PHY_CORE_0) ?
+ 0xa6 : 0xa7, state->afectrl[core]);
+ write_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f :
+ 0xa5, state->afeoverride[core]);
+ }
+
+ wlc_phy_ipa_set_bbmult_nphy(pi, (state->mm >> 8) & 0xff,
+ (state->mm & 0xff));
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 0, 0x3, 1);
+ }
+}
+
+static void
+wlc_phy_a1_nphy(struct brcms_phy *pi, u8 core, u32 winsz, u32 start,
+ u32 end)
+{
+ u32 *buf, *src, *dst, sz;
+
+ sz = end - start + 1;
+
+ buf = kmalloc(2 * sizeof(u32) * NPHY_PAPD_EPS_TBL_SIZE, GFP_ATOMIC);
+ if (NULL == buf)
+ return;
+
+ src = buf;
+ dst = buf + NPHY_PAPD_EPS_TBL_SIZE;
+
+ wlc_phy_table_read_nphy(pi,
+ (core ==
+ PHY_CORE_0 ? NPHY_TBL_ID_EPSILONTBL0 :
+ NPHY_TBL_ID_EPSILONTBL1),
+ NPHY_PAPD_EPS_TBL_SIZE, 0, 32, src);
+
+ do {
+ u32 phy_a1, phy_a2;
+ s32 phy_a3, phy_a4, phy_a5, phy_a6, phy_a7;
+
+ phy_a1 = end - min(end, (winsz >> 1));
+ phy_a2 = min_t(u32, NPHY_PAPD_EPS_TBL_SIZE - 1,
+ end + (winsz >> 1));
+ phy_a3 = phy_a2 - phy_a1 + 1;
+ phy_a6 = 0;
+ phy_a7 = 0;
+
+ do {
+ wlc_phy_papd_decode_epsilon(src[phy_a2], &phy_a4,
+ &phy_a5);
+ phy_a6 += phy_a4;
+ phy_a7 += phy_a5;
+ } while (phy_a2-- != phy_a1);
+
+ phy_a6 /= phy_a3;
+ phy_a7 /= phy_a3;
+ dst[end] = ((u32) phy_a7 << 13) | ((u32) phy_a6 & 0x1fff);
+ } while (end-- != start);
+
+ wlc_phy_table_write_nphy(pi,
+ (core ==
+ PHY_CORE_0) ? NPHY_TBL_ID_EPSILONTBL0 :
+ NPHY_TBL_ID_EPSILONTBL1, sz, start, 32, dst);
+
+ kfree(buf);
+}
+
+static void
+wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
+ enum phy_cal_mode cal_mode, u8 core)
+{
+ u16 phy_a1, phy_a2, phy_a3;
+ u16 phy_a4, phy_a5;
+ bool phy_a6;
+ u8 phy_a7, m[2];
+ u32 phy_a8 = 0;
+ struct nphy_txgains phy_a9;
+
+ if (NREV_LT(pi->pubpi.phy_rev, 3))
+ return;
+
+ phy_a7 = (core == PHY_CORE_0) ? 1 : 0;
+
+ phy_a6 = ((cal_mode == CAL_GCTRL)
+ || (cal_mode == CAL_SOFT)) ? true : false;
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+
+ phy_a9 = wlc_phy_get_tx_gain_nphy(pi);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ phy_a5 = ((phy_a9.txlpf[core] << 15) |
+ (phy_a9.txgm[core] << 12) |
+ (phy_a9.pga[core] << 8) |
+ (txgains->gains.pad[core] << 3) |
+ (phy_a9.ipa[core]));
+ else
+ phy_a5 = ((phy_a9.txlpf[core] << 15) |
+ (phy_a9.txgm[core] << 12) |
+ (txgains->gains.pga[core] << 8) |
+ (phy_a9.pad[core] << 3) | (phy_a9.ipa[core]));
+
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_txgain,
+ phy_a5, (1 << core), 0);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if ((pi->pubpi.radiorev <= 4)
+ || (pi->pubpi.radiorev == 6))
+ m[core] = (pi->bw == WL_CHANSPEC_BW_40) ?
+ 60 : 79;
+ else
+ m[core] = (pi->bw == WL_CHANSPEC_BW_40) ?
+ 45 : 64;
+ } else {
+ m[core] = (pi->bw == WL_CHANSPEC_BW_40) ? 75 : 107;
+ }
+
+ m[phy_a7] = 0;
+ wlc_phy_ipa_set_bbmult_nphy(pi, m[0], m[1]);
+
+ phy_a2 = 63;
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if ((pi->pubpi.radiorev == 4)
+ || (pi->pubpi.radiorev == 6)) {
+ phy_a1 = 30;
+ phy_a3 = 30;
+ } else {
+ phy_a1 = 25;
+ phy_a3 = 25;
+ }
+ } else {
+ if ((pi->pubpi.radiorev == 5)
+ || (pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8)) {
+ phy_a1 = 25;
+ phy_a3 = 25;
+ } else {
+ phy_a1 = 35;
+ phy_a3 = 35;
}
+ }
- } else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
- if (PHY_IPA(pi)) {
+ if (cal_mode == CAL_GCTRL) {
+ if ((pi->pubpi.radiorev == 5)
+ && (CHSPEC_IS2G(pi->radio_chanspec)))
+ phy_a1 = 55;
+ else if (((pi->pubpi.radiorev == 7) &&
+ (CHSPEC_IS2G(pi->radio_chanspec))) ||
+ ((pi->pubpi.radiorev == 8) &&
+ (CHSPEC_IS2G(pi->radio_chanspec))))
+ phy_a1 = 60;
+ else
+ phy_a1 = 63;
+
+ } else if ((cal_mode != CAL_FULL) && (cal_mode != CAL_SOFT)) {
+
+ phy_a1 = 35;
+ phy_a3 = 35;
+ }
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (1) << 0);
+
+ mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (0) << 0);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (1) << 13);
+
+ mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (0) << 13);
+
+ write_phy_reg(pi, 0x2a1, 0x80);
+ write_phy_reg(pi, 0x2a2, 0x100);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x7 << 4), (11) << 4);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x7 << 8), (11) << 8);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x7 << 0), (0x3) << 0);
+
+ write_phy_reg(pi, 0x2e5, 0x20);
+
+ mod_phy_reg(pi, 0x2a0, (0x3f << 0), (phy_a3) << 0);
+
+ mod_phy_reg(pi, 0x29f, (0x3f << 0), (phy_a1) << 0);
+
+ mod_phy_reg(pi, 0x29f, (0x3f << 8), (phy_a2) << 8);
+
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
+ 1, ((core == 0) ? 1 : 2), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
+ 0, ((core == 0) ? 2 : 1), 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+
+ write_phy_reg(pi, 0x2be, 1);
+ SPINWAIT(read_phy_reg(pi, 0x2be), 10 * 1000 * 1000);
+
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
+ 0, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
+
+ wlc_phy_table_write_nphy(pi,
+ (core ==
+ PHY_CORE_0) ? NPHY_TBL_ID_EPSILONTBL0
+ : NPHY_TBL_ID_EPSILONTBL1, 1, phy_a3,
+ 32, &phy_a8);
+
+ if (cal_mode != CAL_GCTRL) {
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ wlc_phy_a1_nphy(pi, core, 5, 0, 35);
+ }
+
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_txgain,
+ phy_a5, (1 << core), 1);
+
+ } else {
+
+ if (txgains) {
+ if (txgains->useindex) {
+ phy_a4 = 15 - ((txgains->index) >> 3);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- wlc_phy_cal_txgainctrl_nphy(pi, 12,
- false);
+ if (NREV_GE(pi->pubpi.phy_rev, 6))
+ phy_a5 = 0x00f7 | (phy_a4 << 8);
+
+ else
+ if (NREV_IS(pi->pubpi.phy_rev, 5))
+ phy_a5 = 0x10f7 | (phy_a4 << 8);
+ else
+ phy_a5 = 0x50f7 | (phy_a4 << 8);
} else {
- wlc_phy_cal_txgainctrl_nphy(pi, 14,
- false);
+ phy_a5 = 0x70f7 | (phy_a4 << 8);
}
+ wlc_phy_rfctrl_override_nphy(pi,
+ (0x1 << 13),
+ phy_a5,
+ (1 << core), 0);
} else {
-
- wlc_phy_internal_cal_txgain_nphy(pi);
- save_bbmult = true;
+ wlc_phy_rfctrl_override_nphy(pi,
+ (0x1 << 13),
+ 0x5bf7,
+ (1 << core), 0);
}
}
- } else {
- wlc_phy_cal_txgainctrl_nphy(pi, 10, false);
- }
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ m[core] = (pi->bw == WL_CHANSPEC_BW_40) ? 45 : 64;
+ else
+ m[core] = (pi->bw == WL_CHANSPEC_BW_40) ? 75 : 107;
- if (save_bbmult) {
- wlc_phy_table_read_nphy(pi, 15, 1, 87, 16,
- &pi->nphy_txcal_bbmult);
+ m[phy_a7] = 0;
+ wlc_phy_ipa_set_bbmult_nphy(pi, m[0], m[1]);
+
+ phy_a2 = 63;
+
+ if (cal_mode == CAL_FULL) {
+ phy_a1 = 25;
+ phy_a3 = 25;
+ } else if (cal_mode == CAL_SOFT) {
+ phy_a1 = 25;
+ phy_a3 = 25;
+ } else if (cal_mode == CAL_GCTRL) {
+ phy_a1 = 63;
+ phy_a3 = 25;
+ } else {
+
+ phy_a1 = 25;
+ phy_a3 = 25;
+ }
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (1) << 0);
+
+ mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (0) << 0);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 6)) {
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (1) << 13);
+
+ mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (0) << 13);
+
+ write_phy_reg(pi, 0x2a1, 0x20);
+ write_phy_reg(pi, 0x2a2, 0x60);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0xf << 4), (9) << 4);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0xf << 8), (9) << 8);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0xf << 0), (0x2) << 0);
+
+ write_phy_reg(pi, 0x2e5, 0x20);
+ } else {
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 11), (1) << 11);
+
+ mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 11), (0) << 11);
+
+ write_phy_reg(pi, 0x2a1, 0x80);
+ write_phy_reg(pi, 0x2a2, 0x600);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x7 << 4), (0) << 4);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x7 << 8), (0) << 8);
+
+ mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x7 << 0), (0x3) << 0);
+
+ mod_phy_reg(pi, 0x2a0, (0x3f << 8), (0x20) << 8);
+
+ }
+
+ mod_phy_reg(pi, 0x2a0, (0x3f << 0), (phy_a3) << 0);
+
+ mod_phy_reg(pi, 0x29f, (0x3f << 0), (phy_a1) << 0);
+
+ mod_phy_reg(pi, 0x29f, (0x3f << 8), (phy_a2) << 8);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 1, 0x3, 0);
+
+ write_phy_reg(pi, 0x2be, 1);
+ SPINWAIT(read_phy_reg(pi, 0x2be), 10 * 1000 * 1000);
+
+ wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 0, 0x3, 0);
+
+ wlc_phy_table_write_nphy(pi,
+ (core ==
+ PHY_CORE_0) ? NPHY_TBL_ID_EPSILONTBL0
+ : NPHY_TBL_ID_EPSILONTBL1, 1, phy_a3,
+ 32, &phy_a8);
+
+ if (cal_mode != CAL_GCTRL)
+ wlc_phy_a1_nphy(pi, core, 5, 0, 40);
}
}
-void
-wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
- bool debug)
+static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
{
- int gainctrl_loopidx;
- uint core;
- u16 m0m1, curr_m0m1;
- s32 delta_power;
- s32 txpwrindex;
- s32 qdBm_power[2];
- u16 orig_BBConfig;
- u16 phy_saveregs[4];
- u32 freq_test;
- u16 ampl_test = 250;
- uint stepsize;
- bool phyhang_avoid_state = false;
+ int phy_a1;
+ int phy_a2;
+ bool phy_a3;
+ struct nphy_ipa_txcalgains phy_a4;
+ bool phy_a5 = false;
+ bool phy_a6 = true;
+ s32 phy_a7, phy_a8;
+ u32 phy_a9;
+ int phy_a10;
+ bool phy_a11 = false;
+ int phy_a12;
+ u8 phy_a13 = 0;
+ u8 phy_a14;
+ u8 *phy_a15 = NULL;
+
+ phy_a4.useindex = true;
+ phy_a12 = start_gain;
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- stepsize = 2;
- } else {
+ phy_a2 = 20;
+ phy_a1 = 1;
- stepsize = 1;
- }
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (pi->pubpi.radiorev == 5) {
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- freq_test = 5000;
- } else {
- freq_test = 2500;
- }
+ phy_a15 = pad_gain_codes_used_2057rev5;
+ phy_a13 =
+ sizeof(pad_gain_codes_used_2057rev5) /
+ sizeof(pad_gain_codes_used_2057rev5
+ [0]) - 1;
- wlc_phy_txpwr_index_nphy(pi, 1, pi->nphy_cal_orig_pwr_idx[0], true);
- wlc_phy_txpwr_index_nphy(pi, 2, pi->nphy_cal_orig_pwr_idx[1], true);
+ } else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8)) {
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
+ phy_a15 = pad_gain_codes_used_2057rev7;
+ phy_a13 =
+ sizeof(pad_gain_codes_used_2057rev7) /
+ sizeof(pad_gain_codes_used_2057rev7
+ [0]) - 1;
- phyhang_avoid_state = pi->phyhang_avoid;
- pi->phyhang_avoid = false;
+ } else {
- phy_saveregs[0] = read_phy_reg(pi, 0x91);
- phy_saveregs[1] = read_phy_reg(pi, 0x92);
- phy_saveregs[2] = read_phy_reg(pi, 0xe7);
- phy_saveregs[3] = read_phy_reg(pi, 0xec);
- wlc_phy_rfctrlintc_override_nphy(pi, NPHY_RfctrlIntc_override_PA, 1,
- RADIO_MIMO_CORESEL_CORE1 |
- RADIO_MIMO_CORESEL_CORE2);
+ phy_a15 = pad_all_gain_codes_2057;
+ phy_a13 = sizeof(pad_all_gain_codes_2057) /
+ sizeof(pad_all_gain_codes_2057[0]) -
+ 1;
+ }
- if (!debug) {
- wlc_phy_rfctrlintc_override_nphy(pi,
- NPHY_RfctrlIntc_override_TRSW,
- 0x2, RADIO_MIMO_CORESEL_CORE1);
- wlc_phy_rfctrlintc_override_nphy(pi,
- NPHY_RfctrlIntc_override_TRSW,
- 0x8, RADIO_MIMO_CORESEL_CORE2);
- } else {
- wlc_phy_rfctrlintc_override_nphy(pi,
- NPHY_RfctrlIntc_override_TRSW,
- 0x1, RADIO_MIMO_CORESEL_CORE1);
- wlc_phy_rfctrlintc_override_nphy(pi,
- NPHY_RfctrlIntc_override_TRSW,
- 0x7, RADIO_MIMO_CORESEL_CORE2);
- }
+ } else {
- orig_BBConfig = read_phy_reg(pi, 0x01);
- mod_phy_reg(pi, 0x01, (0x1 << 15), 0);
+ phy_a15 = pga_all_gain_codes_2057;
+ phy_a13 = sizeof(pga_all_gain_codes_2057) /
+ sizeof(pga_all_gain_codes_2057[0]) - 1;
+ }
- wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &m0m1);
+ phy_a14 = 0;
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- txpwrindex = (s32) pi->nphy_cal_orig_pwr_idx[core];
+ for (phy_a10 = 0; phy_a10 < phy_a2; phy_a10++) {
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ phy_a4.gains.pad[core] =
+ (u16) phy_a15[phy_a12];
+ else
+ phy_a4.gains.pga[core] =
+ (u16) phy_a15[phy_a12];
- for (gainctrl_loopidx = 0; gainctrl_loopidx < 2;
- gainctrl_loopidx++) {
- wlc_phy_tx_tone_nphy(pi, freq_test, ampl_test, 0, 0,
- false);
+ wlc_phy_a2_nphy(pi, &phy_a4, CAL_GCTRL, core);
- if (core == PHY_CORE_0) {
- curr_m0m1 = m0m1 & 0xff00;
- } else {
- curr_m0m1 = m0m1 & 0x00ff;
+ wlc_phy_table_read_nphy(pi,
+ (core ==
+ PHY_CORE_0 ?
+ NPHY_TBL_ID_EPSILONTBL0 :
+ NPHY_TBL_ID_EPSILONTBL1), 1,
+ 63, 32, &phy_a9);
+
+ wlc_phy_papd_decode_epsilon(phy_a9, &phy_a7, &phy_a8);
+
+ phy_a3 = ((phy_a7 == 4095) || (phy_a7 == -4096) ||
+ (phy_a8 == 4095) || (phy_a8 == -4096));
+
+ if (!phy_a6 && (phy_a3 != phy_a5)) {
+ if (!phy_a3)
+ phy_a12 -= (u8) phy_a1;
+
+ phy_a11 = true;
+ break;
}
- wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &curr_m0m1);
- wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &curr_m0m1);
+ if (phy_a3)
+ phy_a12 += (u8) phy_a1;
+ else
+ phy_a12 -= (u8) phy_a1;
- udelay(50);
+ if ((phy_a12 < phy_a14) || (phy_a12 > phy_a13)) {
+ if (phy_a12 < phy_a14)
+ phy_a12 = phy_a14;
+ else
+ phy_a12 = phy_a13;
- wlc_phy_est_tonepwr_nphy(pi, qdBm_power,
- NPHY_CAL_TSSISAMPS);
+ phy_a11 = true;
+ break;
+ }
- pi->nphy_bb_mult_save = 0;
- wlc_phy_stopplayback_nphy(pi);
+ phy_a6 = false;
+ phy_a5 = phy_a3;
+ }
- delta_power = (dBm_targetpower * 4) - qdBm_power[core];
+ } else {
+ phy_a2 = 10;
+ phy_a1 = 8;
+ for (phy_a10 = 0; phy_a10 < phy_a2; phy_a10++) {
+ phy_a4.index = (u8) phy_a12;
+ wlc_phy_a2_nphy(pi, &phy_a4, CAL_GCTRL, core);
- txpwrindex -= stepsize * delta_power;
- if (txpwrindex < 0) {
- txpwrindex = 0;
- } else if (txpwrindex > 127) {
- txpwrindex = 127;
+ wlc_phy_table_read_nphy(pi,
+ (core ==
+ PHY_CORE_0 ?
+ NPHY_TBL_ID_EPSILONTBL0 :
+ NPHY_TBL_ID_EPSILONTBL1), 1,
+ 63, 32, &phy_a9);
+
+ wlc_phy_papd_decode_epsilon(phy_a9, &phy_a7, &phy_a8);
+
+ phy_a3 = ((phy_a7 == 4095) || (phy_a7 == -4096) ||
+ (phy_a8 == 4095) || (phy_a8 == -4096));
+
+ if (!phy_a6 && (phy_a3 != phy_a5)) {
+ if (!phy_a3)
+ phy_a12 -= (u8) phy_a1;
+
+ phy_a11 = true;
+ break;
}
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if (NREV_IS(pi->pubpi.phy_rev, 4) &&
- (pi->srom_fem5g.extpagain == 3)) {
- if (txpwrindex < 30) {
- txpwrindex = 30;
- }
- }
- } else {
- if (NREV_GE(pi->pubpi.phy_rev, 5) &&
- (pi->srom_fem2g.extpagain == 3)) {
- if (txpwrindex < 50) {
- txpwrindex = 50;
- }
- }
+ if (phy_a3)
+ phy_a12 += (u8) phy_a1;
+ else
+ phy_a12 -= (u8) phy_a1;
+
+ if ((phy_a12 < 0) || (phy_a12 > 127)) {
+ if (phy_a12 < 0)
+ phy_a12 = 0;
+ else
+ phy_a12 = 127;
+
+ phy_a11 = true;
+ break;
}
- wlc_phy_txpwr_index_nphy(pi, (1 << core),
- (u8) txpwrindex, true);
+ phy_a6 = false;
+ phy_a5 = phy_a3;
}
- pi->nphy_txcal_pwr_idx[core] = (u8) txpwrindex;
+ }
- if (debug) {
- u16 radio_gain;
- u16 dbg_m0m1;
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ return (u8) phy_a15[phy_a12];
+ else
+ return (u8) phy_a12;
- wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &dbg_m0m1);
+}
- wlc_phy_tx_tone_nphy(pi, freq_test, ampl_test, 0, 0,
- false);
+static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
+{
+ struct nphy_ipa_txcalgains phy_b1[2];
+ struct nphy_papd_restore_state phy_b2;
+ bool phy_b3;
+ u8 phy_b4;
+ u8 phy_b5;
+ s16 phy_b6, phy_b7, phy_b8;
+ u16 phy_b9;
+ s16 phy_b10, phy_b11, phy_b12;
- wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &dbg_m0m1);
- wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &dbg_m0m1);
+ phy_b11 = 0;
+ phy_b12 = 0;
+ phy_b7 = 0;
+ phy_b8 = 0;
+ phy_b6 = 0;
- udelay(100);
+ if (pi->nphy_papd_skip == 1)
+ return;
- wlc_phy_est_tonepwr_nphy(pi, qdBm_power,
- NPHY_CAL_TSSISAMPS);
+ phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ if (!phy_b3)
+ wlapi_suspend_mac_and_wait(pi->sh->physhim);
- wlc_phy_table_read_nphy(pi, 7, 1, (0x110 + core), 16,
- &radio_gain);
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
- mdelay(4000);
- pi->nphy_bb_mult_save = 0;
- wlc_phy_stopplayback_nphy(pi);
+ pi->nphy_force_papd_cal = false;
+
+ for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++)
+ pi->nphy_papd_tx_gain_at_last_cal[phy_b5] =
+ wlc_phy_txpwr_idx_cur_get_nphy(pi, phy_b5);
+
+ pi->nphy_papd_last_cal = pi->sh->now;
+ pi->nphy_papd_recal_counter++;
+
+ phy_b4 = pi->nphy_txpwrctrl;
+ wlc_phy_txpwrctrl_enable_nphy(pi, PHY_TPC_HW_OFF);
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_SCALARTBL0, 64, 0, 32,
+ nphy_papd_scaltbl);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_SCALARTBL1, 64, 0, 32,
+ nphy_papd_scaltbl);
+
+ phy_b9 = read_phy_reg(pi, 0x01);
+ mod_phy_reg(pi, 0x01, (0x1 << 15), 0);
+
+ for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++) {
+ s32 i, val = 0;
+ for (i = 0; i < 64; i++)
+ wlc_phy_table_write_nphy(pi,
+ ((phy_b5 ==
+ PHY_CORE_0) ?
+ NPHY_TBL_ID_EPSILONTBL0 :
+ NPHY_TBL_ID_EPSILONTBL1), 1,
+ i, 32, &val);
+ }
+
+ wlc_phy_ipa_restore_tx_digi_filts_nphy(pi);
+
+ phy_b2.mm = wlc_phy_ipa_get_bbmult_nphy(pi);
+ for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++) {
+ wlc_phy_papd_cal_setup_nphy(pi, &phy_b2, phy_b5);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if ((pi->pubpi.radiorev == 3)
+ || (pi->pubpi.radiorev == 4)
+ || (pi->pubpi.radiorev == 6)) {
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ 23;
+ } else if (pi->pubpi.radiorev == 5) {
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ 0;
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ wlc_phy_a3_nphy(
+ pi,
+ pi->
+ nphy_papd_cal_gain_index
+ [phy_b5],
+ phy_b5);
+
+ } else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8)) {
+
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ 0;
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ wlc_phy_a3_nphy(
+ pi,
+ pi->
+ nphy_papd_cal_gain_index
+ [phy_b5],
+ phy_b5);
+
+ }
+
+ phy_b1[phy_b5].gains.pad[phy_b5] =
+ pi->nphy_papd_cal_gain_index[phy_b5];
+
+ } else {
+ pi->nphy_papd_cal_gain_index[phy_b5] = 0;
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ wlc_phy_a3_nphy(
+ pi,
+ pi->
+ nphy_papd_cal_gain_index
+ [phy_b5], phy_b5);
+ phy_b1[phy_b5].gains.pga[phy_b5] =
+ pi->nphy_papd_cal_gain_index[phy_b5];
+ }
+ } else {
+ phy_b1[phy_b5].useindex = true;
+ phy_b1[phy_b5].index = 16;
+ phy_b1[phy_b5].index =
+ wlc_phy_a3_nphy(pi, phy_b1[phy_b5].index,
+ phy_b5);
+
+ pi->nphy_papd_cal_gain_index[phy_b5] =
+ 15 - ((phy_b1[phy_b5].index) >> 3);
}
+
+ switch (pi->nphy_papd_cal_type) {
+ case 0:
+ wlc_phy_a2_nphy(pi, &phy_b1[phy_b5], CAL_FULL, phy_b5);
+ break;
+ case 1:
+ wlc_phy_a2_nphy(pi, &phy_b1[phy_b5], CAL_SOFT, phy_b5);
+ break;
+ }
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_papd_cal_cleanup_nphy(pi, &phy_b2);
}
- wlc_phy_txpwr_index_nphy(pi, 1, pi->nphy_txcal_pwr_idx[0], true);
- wlc_phy_txpwr_index_nphy(pi, 2, pi->nphy_txcal_pwr_idx[1], true);
+ if (NREV_LT(pi->pubpi.phy_rev, 7))
+ wlc_phy_papd_cal_cleanup_nphy(pi, &phy_b2);
- wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &pi->nphy_txcal_bbmult);
+ for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++) {
+ int eps_offset = 0;
- write_phy_reg(pi, 0x01, orig_BBConfig);
+ if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (pi->pubpi.radiorev == 3)
+ eps_offset = -2;
+ else if (pi->pubpi.radiorev == 5)
+ eps_offset = 3;
+ else
+ eps_offset = -1;
+ } else {
+ eps_offset = 2;
+ }
- write_phy_reg(pi, 0x91, phy_saveregs[0]);
- write_phy_reg(pi, 0x92, phy_saveregs[1]);
- write_phy_reg(pi, 0xe7, phy_saveregs[2]);
- write_phy_reg(pi, 0xec, phy_saveregs[3]);
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ phy_b8 = phy_b1[phy_b5].gains.pad[phy_b5];
+ phy_b10 = 0;
+ if ((pi->pubpi.radiorev == 3) ||
+ (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6)) {
+ phy_b12 = -(
+ nphy_papd_padgain_dlt_2g_2057rev3n4
+ [phy_b8] + 1) / 2;
+ phy_b10 = -1;
+ } else if (pi->pubpi.radiorev == 5) {
+ phy_b12 = -(
+ nphy_papd_padgain_dlt_2g_2057rev5
+ [phy_b8] + 1) / 2;
+ } else if ((pi->pubpi.radiorev == 7) ||
+ (pi->pubpi.radiorev == 8)) {
+ phy_b12 = -(
+ nphy_papd_padgain_dlt_2g_2057rev7
+ [phy_b8] + 1) / 2;
+ }
+ } else {
+ phy_b7 = phy_b1[phy_b5].gains.pga[phy_b5];
+ if ((pi->pubpi.radiorev == 3) ||
+ (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6))
+ phy_b11 =
+ -(nphy_papd_pgagain_dlt_5g_2057
+ [phy_b7]
+ + 1) / 2;
+ else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8))
+ phy_b11 = -(
+ nphy_papd_pgagain_dlt_5g_2057rev7
+ [phy_b7] + 1) / 2;
- pi->phyhang_avoid = phyhang_avoid_state;
+ phy_b10 = -9;
+ }
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-}
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ phy_b6 =
+ -60 + 27 + eps_offset + phy_b12 +
+ phy_b10;
+ else
+ phy_b6 =
+ -60 + 27 + eps_offset + phy_b11 +
+ phy_b10;
-static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
-{
- int index;
- u32 bbmult_scale;
- u16 bbmult;
- u16 tblentry;
+ mod_phy_reg(pi, (phy_b5 == PHY_CORE_0) ? 0x298 :
+ 0x29c, (0x1ff << 7), (phy_b6) << 7);
- struct nphy_txiqcal_ladder ladder_lo[] = {
- {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
- {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
- {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
- };
+ pi->nphy_papd_epsilon_offset[phy_b5] = phy_b6;
+ } else {
+ if (NREV_LT(pi->pubpi.phy_rev, 5))
+ eps_offset = 4;
+ else
+ eps_offset = 2;
- struct nphy_txiqcal_ladder ladder_iq[] = {
- {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
- {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
- {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
- };
+ phy_b7 = 15 - ((phy_b1[phy_b5].index) >> 3);
- bbmult = (core == PHY_CORE_0) ?
- ((pi->nphy_txcal_bbmult >> 8) & 0xff) : (pi->
- nphy_txcal_bbmult & 0xff);
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ phy_b11 =
+ -(nphy_papd_pga_gain_delta_ipa_2g[
+ phy_b7] +
+ 1) / 2;
+ phy_b10 = 0;
+ } else {
+ phy_b11 =
+ -(nphy_papd_pga_gain_delta_ipa_5g[
+ phy_b7] +
+ 1) / 2;
+ phy_b10 = -9;
+ }
- for (index = 0; index < 18; index++) {
- bbmult_scale = ladder_lo[index].percent * bbmult;
- bbmult_scale /= 100;
+ phy_b6 = -60 + 27 + eps_offset + phy_b11 + phy_b10;
- tblentry =
- ((bbmult_scale & 0xff) << 8) | ladder_lo[index].g_env;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 1, index, 16,
- &tblentry);
+ mod_phy_reg(pi, (phy_b5 == PHY_CORE_0) ? 0x298 :
+ 0x29c, (0x1ff << 7), (phy_b6) << 7);
- bbmult_scale = ladder_iq[index].percent * bbmult;
- bbmult_scale /= 100;
+ pi->nphy_papd_epsilon_offset[phy_b5] = phy_b6;
+ }
+ }
+
+ mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_ON) << 0);
+
+ mod_phy_reg(pi, (1 == PHY_CORE_0) ? 0x297 :
+ 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_ON) << 0);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 6)) {
+ mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (0) << 13);
+
+ mod_phy_reg(pi, (1 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 13), (0) << 13);
+
+ } else {
+ mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 11), (0) << 11);
+
+ mod_phy_reg(pi, (1 == PHY_CORE_0) ? 0x2a3 :
+ 0x2a4, (0x1 << 11), (0) << 11);
- tblentry =
- ((bbmult_scale & 0xff) << 8) | ladder_iq[index].g_env;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_IQLOCAL, 1, index + 32,
- 16, &tblentry);
}
+ pi->nphy_papdcomp = NPHY_PAPD_COMP_ON;
+
+ write_phy_reg(pi, 0x01, phy_b9);
+
+ wlc_phy_ipa_set_tx_digi_filts_nphy(pi);
+
+ wlc_phy_txpwrctrl_enable_nphy(pi, phy_b4);
+ if (phy_b4 == PHY_TPC_HW_OFF) {
+ wlc_phy_txpwr_index_nphy(pi, (1 << 0),
+ (s8) (pi->nphy_txpwrindex[0].
+ index_internal), false);
+ wlc_phy_txpwr_index_nphy(pi, (1 << 1),
+ (s8) (pi->nphy_txpwrindex[1].
+ index_internal), false);
+ }
+
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
+
+ if (!phy_b3)
+ wlapi_enable_mac(pi->sh->physhim);
}
void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
@@ -23738,11 +25671,6 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
bool restore_tx_gain = false;
bool mphase;
- if (NORADIO_ENAB(pi->pubpi)) {
- wlc_phy_cal_perical_mphase_reset(pi);
- return;
- }
-
if (PHY_MUTED(pi))
return;
@@ -23751,19 +25679,18 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
else if (caltype == PHY_PERICAL_PARTIAL)
fullcal = false;
- if (pi->cal_type_override != PHY_PERICAL_AUTO) {
+ if (pi->cal_type_override != PHY_PERICAL_AUTO)
fullcal =
- (pi->cal_type_override == PHY_PERICAL_FULL) ? true : false;
- }
+ (pi->cal_type_override ==
+ PHY_PERICAL_FULL) ? true : false;
if ((pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT)) {
if (pi->nphy_txiqlocal_chanspec != pi->radio_chanspec)
wlc_phy_cal_perical_mphase_restart(pi);
}
- if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL)) {
+ if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL))
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
- }
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -23772,9 +25699,9 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_IDLE) ||
(pi->mphase_cal_phase_id == MPHASE_CAL_STATE_INIT)) {
pi->nphy_cal_orig_pwr_idx[0] =
- (u8) ((read_phy_reg(pi, 0x1ed) >> 8) & 0x7f);
+ (u8) ((read_phy_reg(pi, 0x1ed) >> 8) & 0x7f);
pi->nphy_cal_orig_pwr_idx[1] =
- (u8) ((read_phy_reg(pi, 0x1ee) >> 8) & 0x7f);
+ (u8) ((read_phy_reg(pi, 0x1ee) >> 8) & 0x7f);
if (pi->nphy_txpwrctrl != PHY_TPC_HW_OFF) {
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 2,
@@ -23803,7 +25730,8 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
target_gain = pi->nphy_cal_target_gain;
}
if (0 ==
- wlc_phy_cal_txiqlo_nphy(pi, target_gain, fullcal, mphase)) {
+ wlc_phy_cal_txiqlo_nphy(pi, target_gain, fullcal,
+ mphase)) {
if (PHY_IPA(pi))
wlc_phy_a4(pi, true);
@@ -23815,13 +25743,9 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
wlc_phyreg_enter((struct brcms_phy_pub *) pi);
if (0 == wlc_phy_cal_rxiq_nphy(pi, target_gain,
- (pi->
- first_cal_after_assoc
- || (pi->
- cal_type_override
- ==
- PHY_PERICAL_FULL))
- ? 2 : 0, false)) {
+ (pi->first_cal_after_assoc ||
+ (pi->cal_type_override ==
+ PHY_PERICAL_FULL)) ? 2 : 0, false)) {
wlc_phy_savecal_nphy(pi);
wlc_phy_txpwrctrl_coeff_setup_nphy(pi);
@@ -23829,9 +25753,8 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
pi->nphy_perical_last = pi->sh->now;
}
}
- if (caltype != PHY_PERICAL_AUTO) {
+ if (caltype != PHY_PERICAL_AUTO)
wlc_phy_rssi_cal_nphy(pi);
- }
if (pi->first_cal_after_assoc
|| (pi->cal_type_override == PHY_PERICAL_FULL)) {
@@ -23840,18 +25763,17 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
wlc_phy_txpwrctrl_pwr_setup_nphy(pi);
}
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
wlc_phy_radio205x_vcocal_nphy(pi);
- }
} else {
switch (pi->mphase_cal_phase_id) {
case MPHASE_CAL_STATE_INIT:
pi->nphy_perical_last = pi->sh->now;
pi->nphy_txiqlocal_chanspec = pi->radio_chanspec;
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
wlc_phy_precal_txgain_nphy(pi);
- }
+
pi->nphy_cal_target_gain = wlc_phy_get_tx_gain_nphy(pi);
pi->mphase_cal_phase_id++;
break;
@@ -23866,8 +25788,8 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
pi->nphy_rxcal_active = true;
if (wlc_phy_cal_txiqlo_nphy
- (pi, pi->nphy_cal_target_gain, fullcal,
- true) != 0) {
+ (pi, pi->nphy_cal_target_gain, fullcal,
+ true) != 0) {
wlc_phy_cal_perical_mphase_reset(pi);
break;
@@ -23875,20 +25797,19 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
if (NREV_LE(pi->pubpi.phy_rev, 2) &&
(pi->mphase_cal_phase_id ==
- MPHASE_CAL_STATE_TXPHASE4)) {
+ MPHASE_CAL_STATE_TXPHASE4))
pi->mphase_cal_phase_id += 2;
- } else {
+ else
pi->mphase_cal_phase_id++;
- }
break;
case MPHASE_CAL_STATE_PAPDCAL:
if ((pi->radar_percal_mask & 0x2) != 0)
pi->nphy_rxcal_active = true;
- if (PHY_IPA(pi)) {
+ if (PHY_IPA(pi))
wlc_phy_a4(pi, true);
- }
+
pi->mphase_cal_phase_id++;
break;
@@ -23899,9 +25820,8 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
(pi->first_cal_after_assoc ||
(pi->cal_type_override ==
PHY_PERICAL_FULL)) ? 2 : 0,
- false) == 0) {
+ false) == 0)
wlc_phy_savecal_nphy(pi);
- }
pi->mphase_cal_phase_id++;
break;
@@ -23912,16 +25832,15 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
wlc_phy_txpwrctrl_coeff_setup_nphy(pi);
wlc_phy_rssi_cal_nphy(pi);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
wlc_phy_radio205x_vcocal_nphy(pi);
- }
+
restore_tx_gain = true;
- if (pi->first_cal_after_assoc) {
+ if (pi->first_cal_after_assoc)
pi->mphase_cal_phase_id++;
- } else {
+ else
wlc_phy_cal_perical_mphase_reset(pi);
- }
break;
@@ -23962,15 +25881,15 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
} else {
wlc_phy_txpwr_index_nphy(pi, (1 << 0),
(s8) (pi->
- nphy_txpwrindex
- [0].
- index_internal),
+ nphy_txpwrindex
+ [0].
+ index_internal),
false);
wlc_phy_txpwr_index_nphy(pi, (1 << 1),
(s8) (pi->
- nphy_txpwrindex
- [1].
- index_internal),
+ nphy_txpwrindex
+ [1].
+ index_internal),
false);
}
}
@@ -24007,35 +25926,35 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
- 0x1902,
+ 0x1902,
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
- 0x6407
+ 0x6407
};
u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
- 0x3200,
+ 0x3200,
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
- 0x6407
+ 0x6407
};
u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
- 0x1202,
+ 0x1202,
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
- 0x4707
+ 0x4707
};
u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
- 0x2300,
+ 0x2300,
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
- 0x4707
+ 0x4707
};
u16 tbl_tx_iqlo_cal_startcoefs[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
- 0x0000
+ 0x0000
};
u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
@@ -24071,11 +25990,10 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
pi->phyhang_avoid = false;
}
- if (CHSPEC_IS40(pi->radio_chanspec)) {
+ if (CHSPEC_IS40(pi->radio_chanspec))
phy_bw = 40;
- } else {
+ else
phy_bw = 20;
- }
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16, gain_save);
@@ -24117,11 +26035,10 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
16, tbl_ptr);
}
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
write_phy_reg(pi, 0xc2, 0x8ad9);
- } else {
+ else
write_phy_reg(pi, 0xc2, 0x8aa9);
- }
max_val = 250;
tone_freq = (phy_bw == 20) ? 2500 : 5000;
@@ -24131,7 +26048,8 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
bcmerror = 0;
} else {
bcmerror =
- wlc_phy_tx_tone_nphy(pi, tone_freq, max_val, 1, 0, false);
+ wlc_phy_tx_tone_nphy(pi, tone_freq, max_val, 1, 0,
+ false);
}
if (bcmerror == 0) {
@@ -24139,19 +26057,15 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
if (pi->mphase_cal_phase_id > MPHASE_CAL_STATE_TXPHASE0) {
tbl_ptr = pi->mphase_txcal_bestcoeffs;
tbl_len = ARRAY_SIZE(pi->mphase_txcal_bestcoeffs);
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_LT(pi->pubpi.phy_rev, 3))
tbl_len -= 2;
- }
} else {
if ((!fullcal) && (pi->nphy_txiqlocal_coeffsvalid)) {
tbl_ptr = pi->nphy_txiqlocal_bestc;
tbl_len = ARRAY_SIZE(pi->nphy_txiqlocal_bestc);
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_LT(pi->pubpi.phy_rev, 3))
tbl_len -= 2;
- }
} else {
fullcal = true;
@@ -24159,14 +26073,12 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
tbl_ptr =
tbl_tx_iqlo_cal_startcoefs_nphyrev3;
- tbl_len =
- ARRAY_SIZE
- (tbl_tx_iqlo_cal_startcoefs_nphyrev3);
+ tbl_len = ARRAY_SIZE(
+ tbl_tx_iqlo_cal_startcoefs_nphyrev3);
} else {
tbl_ptr = tbl_tx_iqlo_cal_startcoefs;
- tbl_len =
- ARRAY_SIZE
- (tbl_tx_iqlo_cal_startcoefs);
+ tbl_len = ARRAY_SIZE(
+ tbl_tx_iqlo_cal_startcoefs);
}
}
}
@@ -24175,21 +26087,22 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
if (fullcal) {
max_cal_cmds = (NREV_GE(pi->pubpi.phy_rev, 3)) ?
- ARRAY_SIZE(tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3) :
- ARRAY_SIZE(tbl_tx_iqlo_cal_cmds_fullcal);
+ ARRAY_SIZE(
+ tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3) :
+ ARRAY_SIZE(tbl_tx_iqlo_cal_cmds_fullcal);
} else {
max_cal_cmds = (NREV_GE(pi->pubpi.phy_rev, 3)) ?
- ARRAY_SIZE(tbl_tx_iqlo_cal_cmds_recal_nphyrev3) :
- ARRAY_SIZE(tbl_tx_iqlo_cal_cmds_recal);
+ ARRAY_SIZE(
+ tbl_tx_iqlo_cal_cmds_recal_nphyrev3) :
+ ARRAY_SIZE(tbl_tx_iqlo_cal_cmds_recal);
}
if (mphase) {
cal_cnt = pi->mphase_txcal_cmdidx;
- if ((cal_cnt + pi->mphase_txcal_numcmds) < max_cal_cmds) {
+ if ((cal_cnt + pi->mphase_txcal_numcmds) < max_cal_cmds)
num_cals = cal_cnt + pi->mphase_txcal_numcmds;
- } else {
+ else
num_cals = max_cal_cmds;
- }
} else {
cal_cnt = 0;
num_cals = max_cal_cmds;
@@ -24199,13 +26112,14 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
if (fullcal) {
cal_cmd = (NREV_GE(pi->pubpi.phy_rev, 3)) ?
- tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3
- [cal_cnt] :
- tbl_tx_iqlo_cal_cmds_fullcal[cal_cnt];
+ tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3
+ [cal_cnt] :
+ tbl_tx_iqlo_cal_cmds_fullcal[cal_cnt];
} else {
cal_cmd = (NREV_GE(pi->pubpi.phy_rev, 3)) ?
- tbl_tx_iqlo_cal_cmds_recal_nphyrev3[cal_cnt]
- : tbl_tx_iqlo_cal_cmds_recal[cal_cnt];
+ tbl_tx_iqlo_cal_cmds_recal_nphyrev3[
+ cal_cnt]
+ : tbl_tx_iqlo_cal_cmds_recal[cal_cnt];
}
core_no = ((cal_cmd & 0x3000) >> 12);
@@ -24216,15 +26130,16 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
PHY_IPA(pi)
&& (CHSPEC_IS2G(pi->radio_chanspec)))) {
if (!ladder_updated[core_no]) {
- wlc_phy_update_txcal_ladder_nphy(pi,
- core_no);
+ wlc_phy_update_txcal_ladder_nphy(
+ pi,
+ core_no);
ladder_updated[core_no] = true;
}
}
val =
- (cal_params[core_no].
- ncorr[cal_type] << 8) | NPHY_N_GCTL;
+ (cal_params[core_no].
+ ncorr[cal_type] << 8) | NPHY_N_GCTL;
write_phy_reg(pi, 0xc1, val);
if ((cal_type == 1) || (cal_type == 3)
@@ -24272,8 +26187,8 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
}
mphase_cal_lastphase =
- (NREV_LE(pi->pubpi.phy_rev, 2)) ?
- MPHASE_CAL_STATE_TXPHASE4 : MPHASE_CAL_STATE_TXPHASE5;
+ (NREV_LE(pi->pubpi.phy_rev, 2)) ?
+ MPHASE_CAL_STATE_TXPHASE4 : MPHASE_CAL_STATE_TXPHASE5;
if (!mphase
|| (pi->mphase_cal_phase_id == mphase_cal_lastphase)) {
@@ -24303,10 +26218,9 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
16, tbl_buf);
tbl_len = ARRAY_SIZE(pi->nphy_txiqlocal_bestc);
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_LT(pi->pubpi.phy_rev, 3))
tbl_len -= 2;
- }
+
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_IQLOCAL,
tbl_len, 96, 16,
pi->nphy_txiqlocal_bestc);
@@ -24315,10 +26229,9 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
pi->nphy_txiqlocal_chanspec = pi->radio_chanspec;
} else {
tbl_len = ARRAY_SIZE(pi->mphase_txcal_bestcoeffs);
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_LT(pi->pubpi.phy_rev, 3))
tbl_len -= 2;
- }
+
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_IQLOCAL,
tbl_len, 96, 16,
pi->mphase_txcal_bestcoeffs);
@@ -24343,9 +26256,8 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
wlc_phy_tx_iq_war_nphy(pi);
}
- if (NREV_GE(pi->pubpi.phy_rev, 4)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 4))
pi->phyhang_avoid = phyhang_avoid_state;
- }
wlc_phy_stay_in_carriersearch_nphy(pi, false);
@@ -24387,18 +26299,6 @@ static void wlc_phy_reapply_txcal_coeffs_nphy(struct brcms_phy *pi)
}
}
-static void wlc_phy_tx_iq_war_nphy(struct brcms_phy *pi)
-{
- struct nphy_iq_comp tx_comp;
-
- wlc_phy_table_read_nphy(pi, 15, 4, 0x50, 16, (void *)&tx_comp);
-
- wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ, tx_comp.a0);
- wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 2, tx_comp.b0);
- wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 4, tx_comp.a1);
- wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 6, tx_comp.b1);
-}
-
void
wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
struct nphy_iq_comp *pcomp)
@@ -24438,14 +26338,17 @@ wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
if ((read_phy_reg(pi, 0x129) & NPHY_IqestCmd_iqstart) == 0) {
for (core = 0; core < pi->pubpi.phy_corenum; core++) {
est[core].i_pwr =
- (read_phy_reg(pi, NPHY_IqestipwrAccHi(core)) << 16)
- | read_phy_reg(pi, NPHY_IqestipwrAccLo(core));
+ (read_phy_reg(pi,
+ NPHY_IqestipwrAccHi(core)) << 16)
+ | read_phy_reg(pi, NPHY_IqestipwrAccLo(core));
est[core].q_pwr =
- (read_phy_reg(pi, NPHY_IqestqpwrAccHi(core)) << 16)
- | read_phy_reg(pi, NPHY_IqestqpwrAccLo(core));
+ (read_phy_reg(pi,
+ NPHY_IqestqpwrAccHi(core)) << 16)
+ | read_phy_reg(pi, NPHY_IqestqpwrAccLo(core));
est[core].iq_prod =
- (read_phy_reg(pi, NPHY_IqestIqAccHi(core)) << 16) |
- read_phy_reg(pi, NPHY_IqestIqAccLo(core));
+ (read_phy_reg(pi,
+ NPHY_IqestIqAccHi(core)) << 16) |
+ read_phy_reg(pi, NPHY_IqestIqAccLo(core));
}
}
}
@@ -24470,7 +26373,7 @@ static void wlc_phy_calc_rx_iq_comp_nphy(struct brcms_phy *pi, u8 core_mask)
new_comp.a0 = new_comp.b0 = new_comp.a1 = new_comp.b1 = 0x0;
wlc_phy_rx_iq_coeffs_nphy(pi, 1, &new_comp);
- cal_try:
+cal_try:
wlc_phy_rx_iq_est_nphy(pi, est, 0x4000, 32, 0);
new_comp = old_comp;
@@ -24561,7 +26464,7 @@ static void wlc_phy_calc_rx_iq_comp_nphy(struct brcms_phy *pi, u8 core_mask)
if (bcmerror != 0) {
printk(KERN_DEBUG "%s: Failed, cnt = %d\n", __func__,
- cal_retry);
+ cal_retry);
if (cal_retry < CAL_RETRY_CNT) {
cal_retry++;
@@ -24584,121 +26487,125 @@ static void wlc_phy_rxcal_radio_setup_nphy(struct brcms_phy *pi, u8 rx_core)
if (rx_core == PHY_CORE_0) {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_5G_PWRUP);
+ read_radio_reg(pi,
+ RADIO_2057_TX0_TXRXCOUPLE_5G_PWRUP);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_5G_ATTEN);
+ read_radio_reg(pi,
+ RADIO_2057_TX0_TXRXCOUPLE_5G_ATTEN);
write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_5G_PWRUP,
- 0x3);
+ RADIO_2057_TX0_TXRXCOUPLE_5G_PWRUP,
+ 0x3);
write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_5G_ATTEN,
- 0xaf);
+ RADIO_2057_TX0_TXRXCOUPLE_5G_ATTEN,
+ 0xaf);
} else {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_2G_PWRUP);
+ read_radio_reg(pi,
+ RADIO_2057_TX0_TXRXCOUPLE_2G_PWRUP);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_2G_ATTEN);
-
- write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_2G_PWRUP,
- 0x3);
- write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_2G_ATTEN,
- 0x7f);
+ read_radio_reg(pi,
+ RADIO_2057_TX0_TXRXCOUPLE_2G_ATTEN);
+
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX0_TXRXCOUPLE_2G_PWRUP,
+ 0x3);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX0_TXRXCOUPLE_2G_ATTEN,
+ 0x7f);
}
} else {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_5G_PWRUP);
+ read_radio_reg(pi,
+ RADIO_2057_TX1_TXRXCOUPLE_5G_PWRUP);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_5G_ATTEN);
-
- write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_5G_PWRUP,
- 0x3);
- write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_5G_ATTEN,
- 0xaf);
+ read_radio_reg(pi,
+ RADIO_2057_TX1_TXRXCOUPLE_5G_ATTEN);
+
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX1_TXRXCOUPLE_5G_PWRUP,
+ 0x3);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX1_TXRXCOUPLE_5G_ATTEN,
+ 0xaf);
} else {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_2G_PWRUP);
+ read_radio_reg(pi,
+ RADIO_2057_TX1_TXRXCOUPLE_2G_PWRUP);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_2G_ATTEN);
+ read_radio_reg(pi,
+ RADIO_2057_TX1_TXRXCOUPLE_2G_ATTEN);
write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_2G_PWRUP,
- 0x3);
+ RADIO_2057_TX1_TXRXCOUPLE_2G_PWRUP,
+ 0x3);
write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_2G_ATTEN,
- 0x7f);
+ RADIO_2057_TX1_TXRXCOUPLE_2G_ATTEN,
+ 0x7f);
}
}
} else {
if (rx_core == PHY_CORE_0) {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi,
- RADIO_2056_TX_RXIQCAL_TXMUX |
- RADIO_2056_TX1);
+ read_radio_reg(pi,
+ RADIO_2056_TX_RXIQCAL_TXMUX |
+ RADIO_2056_TX1);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi,
- RADIO_2056_RX_RXIQCAL_RXMUX |
- RADIO_2056_RX0);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RXIQCAL_RXMUX |
+ RADIO_2056_RX0);
if (pi->pubpi.radiorev >= 5) {
pi->tx_rx_cal_radio_saveregs[2] =
- read_radio_reg(pi,
- RADIO_2056_RX_RXSPARE2 |
- RADIO_2056_RX0);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RXSPARE2 |
+ RADIO_2056_RX0);
pi->tx_rx_cal_radio_saveregs[3] =
- read_radio_reg(pi,
- RADIO_2056_TX_TXSPARE2 |
- RADIO_2056_TX1);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TXSPARE2 |
+ RADIO_2056_TX1);
}
if (CHSPEC_IS5G(pi->radio_chanspec)) {
if (pi->pubpi.radiorev >= 5) {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAA_MASTER
- | RADIO_2056_RX0);
+ read_radio_reg(pi,
+ RADIO_2056_RX_LNAA_MASTER
+ | RADIO_2056_RX0);
- write_radio_reg(pi,
- RADIO_2056_RX_LNAA_MASTER
- | RADIO_2056_RX0, 0x40);
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_MASTER
+ | RADIO_2056_RX0, 0x40);
write_radio_reg(pi,
- RADIO_2056_TX_TXSPARE2 |
- RADIO_2056_TX1, bias_a);
+ RADIO_2056_TX_TXSPARE2 |
+ RADIO_2056_TX1, bias_a);
write_radio_reg(pi,
- RADIO_2056_RX_RXSPARE2 |
- RADIO_2056_RX0, bias_a);
+ RADIO_2056_RX_RXSPARE2 |
+ RADIO_2056_RX0, bias_a);
} else {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAA_TUNE
- | RADIO_2056_RX0);
+ read_radio_reg(pi,
+ RADIO_2056_RX_LNAA_TUNE
+ | RADIO_2056_RX0);
offtune_val =
- (pi->
- tx_rx_cal_radio_saveregs[2] & 0xF0)
- >> 8;
+ (pi->tx_rx_cal_radio_saveregs
+ [2] & 0xF0) >> 8;
offtune_val =
- (offtune_val <= 0x7) ? 0xF : 0;
+ (offtune_val <= 0x7) ? 0xF : 0;
mod_radio_reg(pi,
RADIO_2056_RX_LNAA_TUNE |
@@ -24715,34 +26622,41 @@ static void wlc_phy_rxcal_radio_setup_nphy(struct brcms_phy *pi, u8 rx_core)
} else {
if (pi->pubpi.radiorev >= 5) {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAG_MASTER
- | RADIO_2056_RX0);
-
- write_radio_reg(pi,
- RADIO_2056_RX_LNAG_MASTER
- | RADIO_2056_RX0, 0x40);
-
- write_radio_reg(pi,
- RADIO_2056_TX_TXSPARE2 |
- RADIO_2056_TX1, bias_g);
-
- write_radio_reg(pi,
- RADIO_2056_RX_RXSPARE2 |
- RADIO_2056_RX0, bias_g);
+ read_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_MASTER
+ | RADIO_2056_RX0);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_MASTER
+ | RADIO_2056_RX0, 0x40);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TXSPARE2
+ |
+ RADIO_2056_TX1, bias_g);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_RXSPARE2
+ |
+ RADIO_2056_RX0, bias_g);
} else {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAG_TUNE
- | RADIO_2056_RX0);
+ read_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_TUNE
+ | RADIO_2056_RX0);
offtune_val =
- (pi->
- tx_rx_cal_radio_saveregs[2] & 0xF0)
- >> 8;
+ (pi->
+ tx_rx_cal_radio_saveregs[2] &
+ 0xF0) >> 8;
offtune_val =
- (offtune_val <= 0x7) ? 0xF : 0;
+ (offtune_val <= 0x7) ? 0xF : 0;
mod_radio_reg(pi,
RADIO_2056_RX_LNAG_TUNE |
@@ -24760,56 +26674,63 @@ static void wlc_phy_rxcal_radio_setup_nphy(struct brcms_phy *pi, u8 rx_core)
} else {
pi->tx_rx_cal_radio_saveregs[0] =
- read_radio_reg(pi,
- RADIO_2056_TX_RXIQCAL_TXMUX |
- RADIO_2056_TX0);
+ read_radio_reg(pi,
+ RADIO_2056_TX_RXIQCAL_TXMUX |
+ RADIO_2056_TX0);
pi->tx_rx_cal_radio_saveregs[1] =
- read_radio_reg(pi,
- RADIO_2056_RX_RXIQCAL_RXMUX |
- RADIO_2056_RX1);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RXIQCAL_RXMUX |
+ RADIO_2056_RX1);
if (pi->pubpi.radiorev >= 5) {
pi->tx_rx_cal_radio_saveregs[2] =
- read_radio_reg(pi,
- RADIO_2056_RX_RXSPARE2 |
- RADIO_2056_RX1);
+ read_radio_reg(pi,
+ RADIO_2056_RX_RXSPARE2 |
+ RADIO_2056_RX1);
pi->tx_rx_cal_radio_saveregs[3] =
- read_radio_reg(pi,
- RADIO_2056_TX_TXSPARE2 |
- RADIO_2056_TX0);
+ read_radio_reg(pi,
+ RADIO_2056_TX_TXSPARE2 |
+ RADIO_2056_TX0);
}
if (CHSPEC_IS5G(pi->radio_chanspec)) {
if (pi->pubpi.radiorev >= 5) {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAA_MASTER
- | RADIO_2056_RX1);
-
- write_radio_reg(pi,
- RADIO_2056_RX_LNAA_MASTER
- | RADIO_2056_RX1, 0x40);
-
- write_radio_reg(pi,
- RADIO_2056_TX_TXSPARE2 |
- RADIO_2056_TX0, bias_a);
-
- write_radio_reg(pi,
- RADIO_2056_RX_RXSPARE2 |
- RADIO_2056_RX1, bias_a);
+ read_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_MASTER
+ | RADIO_2056_RX1);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_MASTER |
+ RADIO_2056_RX1, 0x40);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TXSPARE2
+ |
+ RADIO_2056_TX0, bias_a);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_RXSPARE2
+ |
+ RADIO_2056_RX1, bias_a);
} else {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAA_TUNE
- | RADIO_2056_RX1);
+ read_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_TUNE
+ | RADIO_2056_RX1);
offtune_val =
- (pi->
- tx_rx_cal_radio_saveregs[2] & 0xF0)
- >> 8;
+ (pi->
+ tx_rx_cal_radio_saveregs[2] &
+ 0xF0) >> 8;
offtune_val =
- (offtune_val <= 0x7) ? 0xF : 0;
+ (offtune_val <= 0x7) ? 0xF : 0;
mod_radio_reg(pi,
RADIO_2056_RX_LNAA_TUNE |
@@ -24826,33 +26747,40 @@ static void wlc_phy_rxcal_radio_setup_nphy(struct brcms_phy *pi, u8 rx_core)
} else {
if (pi->pubpi.radiorev >= 5) {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAG_MASTER
- | RADIO_2056_RX1);
-
- write_radio_reg(pi,
- RADIO_2056_RX_LNAG_MASTER
- | RADIO_2056_RX1, 0x40);
-
- write_radio_reg(pi,
- RADIO_2056_TX_TXSPARE2 |
- RADIO_2056_TX0, bias_g);
-
- write_radio_reg(pi,
- RADIO_2056_RX_RXSPARE2 |
- RADIO_2056_RX1, bias_g);
+ read_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_MASTER
+ | RADIO_2056_RX1);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_MASTER
+ | RADIO_2056_RX1, 0x40);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_TX_TXSPARE2
+ |
+ RADIO_2056_TX0, bias_g);
+
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_RXSPARE2
+ |
+ RADIO_2056_RX1, bias_g);
} else {
pi->tx_rx_cal_radio_saveregs[4] =
- read_radio_reg(pi,
- RADIO_2056_RX_LNAG_TUNE
- | RADIO_2056_RX1);
+ read_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_TUNE
+ | RADIO_2056_RX1);
offtune_val =
- (pi->
- tx_rx_cal_radio_saveregs[2] & 0xF0)
- >> 8;
+ (pi->
+ tx_rx_cal_radio_saveregs[2] &
+ 0xF0) >> 8;
offtune_val =
- (offtune_val <= 0x7) ? 0xF : 0;
+ (offtune_val <= 0x7) ? 0xF : 0;
mod_radio_reg(pi,
RADIO_2056_RX_LNAG_TUNE |
@@ -24876,46 +26804,54 @@ static void wlc_phy_rxcal_radio_cleanup_nphy(struct brcms_phy *pi, u8 rx_core)
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
if (rx_core == PHY_CORE_0) {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_5G_PWRUP,
- pi->
- tx_rx_cal_radio_saveregs[0]);
- write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_5G_ATTEN,
- pi->
- tx_rx_cal_radio_saveregs[1]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX0_TXRXCOUPLE_5G_PWRUP,
+ pi->
+ tx_rx_cal_radio_saveregs[0]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX0_TXRXCOUPLE_5G_ATTEN,
+ pi->
+ tx_rx_cal_radio_saveregs[1]);
} else {
- write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_2G_PWRUP,
- pi->
- tx_rx_cal_radio_saveregs[0]);
- write_radio_reg(pi,
- RADIO_2057_TX0_TXRXCOUPLE_2G_ATTEN,
- pi->
- tx_rx_cal_radio_saveregs[1]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX0_TXRXCOUPLE_2G_PWRUP,
+ pi->
+ tx_rx_cal_radio_saveregs[0]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX0_TXRXCOUPLE_2G_ATTEN,
+ pi->
+ tx_rx_cal_radio_saveregs[1]);
}
} else {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_5G_PWRUP,
- pi->
- tx_rx_cal_radio_saveregs[0]);
- write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_5G_ATTEN,
- pi->
- tx_rx_cal_radio_saveregs[1]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX1_TXRXCOUPLE_5G_PWRUP,
+ pi->
+ tx_rx_cal_radio_saveregs[0]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX1_TXRXCOUPLE_5G_ATTEN,
+ pi->
+ tx_rx_cal_radio_saveregs[1]);
} else {
- write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_2G_PWRUP,
- pi->
- tx_rx_cal_radio_saveregs[0]);
- write_radio_reg(pi,
- RADIO_2057_TX1_TXRXCOUPLE_2G_ATTEN,
- pi->
- tx_rx_cal_radio_saveregs[1]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX1_TXRXCOUPLE_2G_PWRUP,
+ pi->
+ tx_rx_cal_radio_saveregs[0]);
+ write_radio_reg(
+ pi,
+ RADIO_2057_TX1_TXRXCOUPLE_2G_ATTEN,
+ pi->
+ tx_rx_cal_radio_saveregs[1]);
}
}
@@ -24946,37 +26882,39 @@ static void wlc_phy_rxcal_radio_cleanup_nphy(struct brcms_phy *pi, u8 rx_core)
}
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if (pi->pubpi.radiorev >= 5) {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAA_MASTER
- | RADIO_2056_RX0,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- } else {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAA_TUNE
- | RADIO_2056_RX0,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- }
+ if (pi->pubpi.radiorev >= 5)
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_MASTER
+ | RADIO_2056_RX0,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
+ else
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_TUNE
+ | RADIO_2056_RX0,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
} else {
- if (pi->pubpi.radiorev >= 5) {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAG_MASTER
- | RADIO_2056_RX0,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- } else {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAG_TUNE
- | RADIO_2056_RX0,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- }
+ if (pi->pubpi.radiorev >= 5)
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_MASTER
+ | RADIO_2056_RX0,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
+ else
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_TUNE
+ | RADIO_2056_RX0,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
}
} else {
@@ -25005,37 +26943,39 @@ static void wlc_phy_rxcal_radio_cleanup_nphy(struct brcms_phy *pi, u8 rx_core)
}
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if (pi->pubpi.radiorev >= 5) {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAA_MASTER
- | RADIO_2056_RX1,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- } else {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAA_TUNE
- | RADIO_2056_RX1,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- }
+ if (pi->pubpi.radiorev >= 5)
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_MASTER
+ | RADIO_2056_RX1,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
+ else
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAA_TUNE
+ | RADIO_2056_RX1,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
} else {
- if (pi->pubpi.radiorev >= 5) {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAG_MASTER
- | RADIO_2056_RX1,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- } else {
- write_radio_reg(pi,
- RADIO_2056_RX_LNAG_TUNE
- | RADIO_2056_RX1,
- pi->
- tx_rx_cal_radio_saveregs
- [4]);
- }
+ if (pi->pubpi.radiorev >= 5)
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_MASTER
+ | RADIO_2056_RX1,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
+ else
+ write_radio_reg(
+ pi,
+ RADIO_2056_RX_LNAG_TUNE
+ | RADIO_2056_RX1,
+ pi->
+ tx_rx_cal_radio_saveregs
+ [4]);
}
}
}
@@ -25046,18 +26986,16 @@ static void wlc_phy_rxcal_physetup_nphy(struct brcms_phy *pi, u8 rx_core)
u8 tx_core;
u16 rx_antval, tx_antval;
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
tx_core = rx_core;
- } else {
+ else
tx_core = (rx_core == PHY_CORE_0) ? 1 : 0;
- }
pi->tx_rx_cal_phy_saveregs[0] = read_phy_reg(pi, 0xa2);
pi->tx_rx_cal_phy_saveregs[1] =
- read_phy_reg(pi, (rx_core == PHY_CORE_0) ? 0xa6 : 0xa7);
+ read_phy_reg(pi, (rx_core == PHY_CORE_0) ? 0xa6 : 0xa7);
pi->tx_rx_cal_phy_saveregs[2] =
- read_phy_reg(pi, (rx_core == PHY_CORE_0) ? 0x8f : 0xa5);
+ read_phy_reg(pi, (rx_core == PHY_CORE_0) ? 0x8f : 0xa5);
pi->tx_rx_cal_phy_saveregs[3] = read_phy_reg(pi, 0x91);
pi->tx_rx_cal_phy_saveregs[4] = read_phy_reg(pi, 0x92);
pi->tx_rx_cal_phy_saveregs[5] = read_phy_reg(pi, 0x7a);
@@ -25122,17 +27060,19 @@ static void wlc_phy_rxcal_physetup_nphy(struct brcms_phy *pi, u8 rx_core)
NPHY_REV7_RFCTRLOVERRIDE_ID2);
wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 0, 0, 0,
NPHY_REV7_RFCTRLOVERRIDE_ID1);
- if (CHSPEC_IS40(pi->radio_chanspec)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi,
- (0x1 << 7),
- 2, 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- } else {
- wlc_phy_rfctrl_override_nphy_rev7(pi,
- (0x1 << 7),
- 0, 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- }
+ if (CHSPEC_IS40(pi->radio_chanspec))
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi,
+ (0x1 << 7),
+ 2, 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+ else
+ wlc_phy_rfctrl_override_nphy_rev7(
+ pi,
+ (0x1 << 7),
+ 0, 0, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID1);
+
wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 7),
0, 0, 0,
NPHY_REV7_RFCTRLOVERRIDE_ID1);
@@ -25203,26 +27143,24 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
struct phy_iq_est est[PHY_CORE_MAX];
u8 tx_core;
struct nphy_iq_comp save_comp, zero_comp;
- u32 i_pwr, q_pwr, curr_pwr, optim_pwr = 0, prev_pwr = 0, thresh_pwr =
- 10000;
+ u32 i_pwr, q_pwr, curr_pwr, optim_pwr = 0, prev_pwr = 0,
+ thresh_pwr = 10000;
s16 desired_log2_pwr, actual_log2_pwr, delta_pwr;
bool gainctrl_done = false;
u8 mix_tia_gain = 3;
s8 optim_gaintbl_index = 0, prev_gaintbl_index = 0;
s8 curr_gaintbl_index = 3;
u8 gainctrl_dirn = NPHY_RXCAL_GAIN_INIT;
- struct nphy_ipa_txrxgain *nphy_rxcal_gaintbl;
+ const struct nphy_ipa_txrxgain *nphy_rxcal_gaintbl;
u16 hpvga, lpf_biq1, lpf_biq0, lna2, lna1;
int fine_gain_idx;
s8 txpwrindex;
u16 nphy_rxcal_txgain[2];
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
tx_core = rx_core;
- } else {
+ else
tx_core = 1 - rx_core;
- }
num_samps = 1024;
desired_log2_pwr = (cal_type == 0) ? 13 : 13;
@@ -25232,45 +27170,42 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
wlc_phy_rx_iq_coeffs_nphy(pi, 1, &zero_comp);
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
mix_tia_gain = 3;
- } else if (NREV_GE(pi->pubpi.phy_rev, 4)) {
+ else if (NREV_GE(pi->pubpi.phy_rev, 4))
mix_tia_gain = 4;
- } else {
+ else
mix_tia_gain = 6;
- }
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
nphy_rxcal_gaintbl = nphy_ipa_rxcal_gaintbl_5GHz_rev7;
- } else {
+ else
nphy_rxcal_gaintbl = nphy_ipa_rxcal_gaintbl_5GHz;
- }
} else {
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
nphy_rxcal_gaintbl = nphy_ipa_rxcal_gaintbl_2GHz_rev7;
- } else {
+ else
nphy_rxcal_gaintbl = nphy_ipa_rxcal_gaintbl_2GHz;
- }
}
do {
hpvga = (NREV_GE(pi->pubpi.phy_rev, 7)) ?
- 0 : nphy_rxcal_gaintbl[curr_gaintbl_index].hpvga;
+ 0 : nphy_rxcal_gaintbl[curr_gaintbl_index].hpvga;
lpf_biq1 = nphy_rxcal_gaintbl[curr_gaintbl_index].lpf_biq1;
lpf_biq0 = nphy_rxcal_gaintbl[curr_gaintbl_index].lpf_biq0;
lna2 = nphy_rxcal_gaintbl[curr_gaintbl_index].lna2;
lna1 = nphy_rxcal_gaintbl[curr_gaintbl_index].lna1;
txpwrindex = nphy_rxcal_gaintbl[curr_gaintbl_index].txpwrindex;
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_rxgain,
- ((lpf_biq1 << 12) |
- (lpf_biq0 << 8) |
- (mix_tia_gain <<
- 4) | (lna2 << 2)
- | lna1), 0x3, 0);
- } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_rxgain,
+ ((lpf_biq1 << 12) |
+ (lpf_biq0 << 8) |
+ (mix_tia_gain << 4) | (lna2 << 2)
+ | lna1), 0x3, 0);
+ else
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12),
((hpvga << 12) |
(lpf_biq1 << 10) |
@@ -25278,7 +27213,6 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
(mix_tia_gain << 4) |
(lna2 << 2) | lna1), 0x3,
0);
- }
pi->nphy_rxcal_pwr_idx[tx_core] = txpwrindex;
@@ -25367,27 +27301,28 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
fine_gain_idx = (int)lpf_biq1 + delta_pwr;
- if (fine_gain_idx + (int)lpf_biq0 > 10) {
+ if (fine_gain_idx + (int)lpf_biq0 > 10)
lpf_biq1 = 10 - lpf_biq0;
- } else {
+ else
lpf_biq1 = (u16) max(fine_gain_idx, 0);
- }
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_rxgain,
- ((lpf_biq1 << 12) |
- (lpf_biq0 << 8) |
- (mix_tia_gain << 4) |
- (lna2 << 2) | lna1), 0x3,
- 0);
+
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_rxgain,
+ ((lpf_biq1 << 12) |
+ (lpf_biq0 << 8) |
+ (mix_tia_gain << 4) |
+ (lna2 << 2) | lna1), 0x3,
+ 0);
} else {
hpvga = (u16) max(min(((int)hpvga) + delta_pwr, 10), 0);
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12),
- ((hpvga << 12) | (lpf_biq1 << 10) |
- (lpf_biq0 << 8) | (mix_tia_gain <<
- 4) | (lna2 <<
- 2) |
+ ((hpvga << 12) |
+ (lpf_biq1 << 10) |
+ (lpf_biq0 << 8) |
+ (mix_tia_gain << 4) |
+ (lna2 << 2) |
lna1), 0x3, 0);
-
}
if (rxgain != NULL) {
@@ -25444,9 +27379,8 @@ wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
u16 num_samps, log_num_samps = 10;
struct phy_iq_est est[PHY_CORE_MAX];
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
return 0;
- }
num_samps = (1 << log_num_samps);
@@ -25465,20 +27399,21 @@ wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
if (core_idx == 0) {
radio_addr_offset_rx = RADIO_2056_RX0;
radio_addr_offset_tx =
- (loopback_type == 0) ? RADIO_2056_TX0 : RADIO_2056_TX1;
+ (loopback_type == 0) ? RADIO_2056_TX0 : RADIO_2056_TX1;
} else {
radio_addr_offset_rx = RADIO_2056_RX1;
radio_addr_offset_tx =
- (loopback_type == 0) ? RADIO_2056_TX1 : RADIO_2056_TX0;
+ (loopback_type == 0) ? RADIO_2056_TX1 : RADIO_2056_TX0;
}
orig_txlpf_rccal_lpc_ovr_val =
- read_radio_reg(pi,
- (RADIO_2056_TX_TXLPF_RCCAL | radio_addr_offset_tx));
+ read_radio_reg(pi,
+ (RADIO_2056_TX_TXLPF_RCCAL |
+ radio_addr_offset_tx));
orig_rxlpf_rccal_hpc_ovr_val =
- read_radio_reg(pi,
- (RADIO_2056_RX_RXLPF_RCCAL_HPC |
- radio_addr_offset_rx));
+ read_radio_reg(pi,
+ (RADIO_2056_RX_RXLPF_RCCAL_HPC |
+ radio_addr_offset_rx));
orig_dcBypass = ((read_phy_reg(pi, 0x48) >> 8) & 1);
@@ -25549,17 +27484,18 @@ wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
wlc_phy_rx_iq_est_nphy(pi, est, num_samps, 32, 0);
- if (core_idx == 0) {
+ if (core_idx == 0)
ref_iq_vals =
- max_t(u32, (est[0].i_pwr +
- est[0].q_pwr) >> (log_num_samps + 1),
- 1);
- } else {
+ max_t(u32, (est[0].i_pwr +
+ est[0].q_pwr) >>
+ (log_num_samps + 1),
+ 1);
+ else
ref_iq_vals =
- max_t(u32, (est[1].i_pwr +
- est[1].q_pwr) >> (log_num_samps + 1),
- 1);
- }
+ max_t(u32, (est[1].i_pwr +
+ est[1].q_pwr) >>
+ (log_num_samps + 1),
+ 1);
wlc_phy_tx_tone_nphy(pi, target_bw, NPHY_RXCAL_TONEAMP,
0, 1, false);
@@ -25568,20 +27504,19 @@ wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
wlc_phy_rx_iq_est_nphy(pi, est, num_samps, 32, 0);
- if (core_idx == 0) {
- target_iq_vals =
- (est[0].i_pwr + est[0].q_pwr) >> (log_num_samps +
- 1);
- } else {
+ if (core_idx == 0)
+ target_iq_vals = (est[0].i_pwr + est[0].q_pwr) >>
+ (log_num_samps + 1);
+ else
target_iq_vals =
- (est[1].i_pwr + est[1].q_pwr) >> (log_num_samps +
- 1);
- }
+ (est[1].i_pwr +
+ est[1].q_pwr) >> (log_num_samps + 1);
+
pwr_ratio = (uint) ((target_iq_vals << 16) / ref_iq_vals);
- if (rccal_stepsize == 0) {
+ if (rccal_stepsize == 0)
rccal_stepsize--;
- } else if (rccal_stepsize == 1) {
+ else if (rccal_stepsize == 1) {
last_rccal_val = rccal_val;
rccal_val += (pwr_ratio > target_pwr_ratio) ? 1 : -1;
last_pwr_ratio = pwr_ratio;
@@ -25594,21 +27529,20 @@ wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
if (rccal_stepsize == -1) {
best_rccal_val =
- (ABS((int)last_pwr_ratio - (int)target_pwr_ratio) <
- ABS((int)pwr_ratio -
- (int)target_pwr_ratio)) ? last_rccal_val :
- rccal_val;
+ (abs((int)last_pwr_ratio -
+ (int)target_pwr_ratio) <
+ abs((int)pwr_ratio -
+ (int)target_pwr_ratio)) ? last_rccal_val :
+ rccal_val;
if (CHSPEC_IS40(pi->radio_chanspec)) {
if ((best_rccal_val > 140)
- || (best_rccal_val < 135)) {
+ || (best_rccal_val < 135))
best_rccal_val = 138;
- }
} else {
if ((best_rccal_val > 142)
- || (best_rccal_val < 137)) {
+ || (best_rccal_val < 137))
best_rccal_val = 140;
- }
}
write_radio_reg(pi,
@@ -25649,7 +27583,7 @@ wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
return best_rccal_val - 0x80;
}
-#define WAIT_FOR_SCOPE 4000
+#define WAIT_FOR_SCOPE 4000
static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
struct nphy_txgains target_gain,
u8 cal_type, bool debug)
@@ -25687,12 +27621,12 @@ static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16, cal_gain);
rxcore_state = wlc_phy_rxcore_getstate_nphy(
- (struct brcms_phy_pub *) pi);
+ (struct brcms_phy_pub *) pi);
for (rx_core = 0; rx_core < pi->pubpi.phy_corenum; rx_core++) {
skip_rxiqcal =
- ((rxcore_state & (1 << rx_core)) == 0) ? true : false;
+ ((rxcore_state & (1 << rx_core)) == 0) ? true : false;
wlc_phy_rxcal_physetup_nphy(pi, rx_core);
@@ -25703,7 +27637,8 @@ static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
wlc_phy_rxcal_gainctrl_nphy(pi, rx_core, NULL, 0);
wlc_phy_tx_tone_nphy(pi,
- (CHSPEC_IS40(pi->radio_chanspec)) ?
+ (CHSPEC_IS40(
+ pi->radio_chanspec)) ?
NPHY_RXCAL_TONEFREQ_40MHz :
NPHY_RXCAL_TONEFREQ_20MHz,
NPHY_RXCAL_TONEAMP, 0, cal_type,
@@ -25721,23 +27656,21 @@ static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
if (rx_core == PHY_CORE_1) {
- if (rxcore_state == 1) {
+ if (rxcore_state == 1)
wlc_phy_rxcore_setstate_nphy(
(struct brcms_phy_pub *) pi, 3);
- }
wlc_phy_rxcal_gainctrl_nphy(pi, rx_core, NULL,
1);
best_rccal[rx_core] =
- wlc_phy_rc_sweep_nphy(pi, rx_core, 1);
+ wlc_phy_rc_sweep_nphy(pi, rx_core, 1);
pi->nphy_rccal_value = best_rccal[rx_core];
- if (rxcore_state == 1) {
+ if (rxcore_state == 1)
wlc_phy_rxcore_setstate_nphy(
(struct brcms_phy_pub *) pi,
rxcore_state);
- }
}
}
@@ -25756,18 +27689,22 @@ static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
for (rx_core = 0; rx_core < pi->pubpi.phy_corenum; rx_core++) {
rxlpf_rccal_hpc =
- (((int)best_rccal[rx_core] - 12) >> 1) + 10;
+ (((int)best_rccal[rx_core] - 12) >> 1) + 10;
txlpf_rccal_lpc = ((int)best_rccal[rx_core] - 12) + 10;
if (PHY_IPA(pi)) {
- txlpf_rccal_lpc += IS40MHZ(pi) ? 24 : 12;
- txlpf_idac = IS40MHZ(pi) ? 0x0e : 0x13;
+ txlpf_rccal_lpc +=
+ (pi->bw == WL_CHANSPEC_BW_40) ? 24 : 12;
+ txlpf_idac = (pi->bw == WL_CHANSPEC_BW_40) ?
+ 0x0e : 0x13;
WRITE_RADIO_REG2(pi, RADIO_2056, TX, rx_core,
TXLPF_IDAC_4, txlpf_idac);
}
- rxlpf_rccal_hpc = max(min_t(u8, rxlpf_rccal_hpc, 31), 0);
- txlpf_rccal_lpc = max(min_t(u8, txlpf_rccal_lpc, 31), 0);
+ rxlpf_rccal_hpc = max(min_t(u8, rxlpf_rccal_hpc, 31),
+ 0);
+ txlpf_rccal_lpc = max(min_t(u8, txlpf_rccal_lpc, 31),
+ 0);
write_radio_reg(pi, (RADIO_2056_RX_RXLPF_RCCAL_HPC |
((rx_core ==
@@ -25787,21 +27724,21 @@ static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
wlc_phy_resetcca_nphy(pi);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_rxgain,
- 0, 0x3, 1);
- } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_rfctrl_override_1tomany_nphy(
+ pi,
+ NPHY_REV7_RfctrlOverride_cmd_rxgain,
+ 0, 0x3, 1);
+ else
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12), 0, 0x3, 1);
- }
+
wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16,
gain_save);
- if (NREV_GE(pi->pubpi.phy_rev, 4)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 4))
pi->phyhang_avoid = phyhang_avoid_state;
- }
wlc_phy_stay_in_carriersearch_nphy(pi, false);
@@ -25835,10 +27772,8 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
wlc_phy_stay_in_carriersearch_nphy(pi, true);
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
-
+ if (NREV_LT(pi->pubpi.phy_rev, 2))
wlc_phy_reapply_txcal_coeffs_nphy(pi);
- }
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16, gain_save);
@@ -25874,19 +27809,16 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
((0x1 << 1) | (0x1 << 2)));
or_phy_reg(pi, 0xa5, ((0x1 << 1) | (0x1 << 2)));
- if (((pi->nphy_rxcalparams) & 0xff000000)) {
-
+ if (((pi->nphy_rxcalparams) & 0xff000000))
write_phy_reg(pi,
(rx_core == PHY_CORE_0) ? 0x91 : 0x92,
- (CHSPEC_IS5G(pi->radio_chanspec) ? 0x140 :
- 0x110));
- } else {
-
+ (CHSPEC_IS5G(pi->radio_chanspec) ?
+ 0x140 : 0x110));
+ else
write_phy_reg(pi,
(rx_core == PHY_CORE_0) ? 0x91 : 0x92,
- (CHSPEC_IS5G(pi->radio_chanspec) ? 0x180 :
- 0x120));
- }
+ (CHSPEC_IS5G(pi->radio_chanspec) ?
+ 0x180 : 0x120));
write_phy_reg(pi, (tx_core == PHY_CORE_0) ? 0x91 : 0x92,
(CHSPEC_IS5G(pi->radio_chanspec) ? 0x148 :
@@ -25926,7 +27858,7 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
use_hpf_num = 1;
curr_hpf = curr_hpf1;
actual_log2_pwr =
- wlc_phy_nbits(tot_pwr[2]);
+ wlc_phy_nbits(tot_pwr[2]);
} else {
if (tot_pwr[0] > 10000) {
curr_lna = lna_vals[1];
@@ -25935,7 +27867,8 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
use_hpf_num = 1;
curr_hpf = curr_hpf1;
actual_log2_pwr =
- wlc_phy_nbits(tot_pwr[1]);
+ wlc_phy_nbits(
+ tot_pwr[1]);
} else {
curr_lna = lna_vals[0];
curr_hpf1 = hpf1_vals[0];
@@ -25943,18 +27876,18 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
use_hpf_num = 2;
curr_hpf = curr_hpf2;
actual_log2_pwr =
- wlc_phy_nbits(tot_pwr[0]);
+ wlc_phy_nbits(
+ tot_pwr[0]);
}
}
hpf_change = desired_log2_pwr - actual_log2_pwr;
curr_hpf += hpf_change;
curr_hpf = max(min_t(u16, curr_hpf, 10), 0);
- if (use_hpf_num == 1) {
+ if (use_hpf_num == 1)
curr_hpf1 = curr_hpf;
- } else {
+ else
curr_hpf2 = curr_hpf;
- }
}
wlc_phy_rfctrl_override_nphy(pi, (0x1 << 10),
@@ -25967,15 +27900,12 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
if (first_playtone) {
bcmerror = wlc_phy_tx_tone_nphy(pi, 4000,
- (u16) (pi->
- nphy_rxcalparams
- &
- 0xffff),
- 0, 0, true);
+ (u16) (pi->nphy_rxcalparams &
+ 0xffff), 0, 0, true);
first_playtone = false;
} else {
- phy_bw =
- (CHSPEC_IS40(pi->radio_chanspec)) ? 40 : 20;
+ phy_bw = (CHSPEC_IS40(pi->radio_chanspec)) ?
+ 40 : 20;
wlc_phy_runsamples_nphy(pi, phy_bw * 8, 0xffff,
0, 0, 0, true);
}
@@ -25986,12 +27916,10 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
wlc_phy_rx_iq_est_nphy(pi, est,
num_samps, 32,
0);
- i_pwr =
- (est[rx_core].i_pwr +
- num_samps / 2) / num_samps;
- q_pwr =
- (est[rx_core].q_pwr +
- num_samps / 2) / num_samps;
+ i_pwr = (est[rx_core].i_pwr +
+ num_samps / 2) / num_samps;
+ q_pwr = (est[rx_core].q_pwr +
+ num_samps / 2) / num_samps;
tot_pwr[gain_pass] = i_pwr + q_pwr;
} else {
@@ -26038,1350 +27966,14 @@ int
wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u8 cal_type, bool debug)
{
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
cal_type = 0;
- }
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
return wlc_phy_cal_rxiq_nphy_rev3(pi, target_gain, cal_type,
debug);
- } else {
+ else
return wlc_phy_cal_rxiq_nphy_rev2(pi, target_gain, debug);
- }
-}
-
-static void wlc_phy_extpa_set_tx_digi_filts_nphy(struct brcms_phy *pi)
-{
- int j, type = 2;
- u16 addr_offset = 0x2c5;
-
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, addr_offset + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[type][j]);
- }
-}
-
-static void wlc_phy_ipa_set_tx_digi_filts_nphy(struct brcms_phy *pi)
-{
- int j, type;
- u16 addr_offset[] = { 0x186, 0x195,
- 0x2c5
- };
-
- for (type = 0; type < 3; type++) {
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, addr_offset[type] + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[type][j]);
- }
- }
-
- if (IS40MHZ(pi)) {
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, 0x186 + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[3][j]);
- }
- } else {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, 0x186 + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[5]
- [j]);
- }
- }
-
- if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, 0x2c5 + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[6]
- [j]);
- }
- }
- }
-}
-
-static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
-{
- int j;
-
- if (IS40MHZ(pi)) {
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, 0x195 + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[4][j]);
- }
- } else {
- for (j = 0; j < NPHY_NUM_DIG_FILT_COEFFS; j++) {
- write_phy_reg(pi, 0x186 + j,
- NPHY_IPA_REV4_txdigi_filtcoeffs[3][j]);
- }
- }
-}
-
-static u16 wlc_phy_ipa_get_bbmult_nphy(struct brcms_phy *pi)
-{
- u16 m0m1;
-
- wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &m0m1);
-
- return m0m1;
-}
-
-static void wlc_phy_ipa_set_bbmult_nphy(struct brcms_phy *pi, u8 m0, u8 m1)
-{
- u16 m0m1 = (u16) ((m0 << 8) | m1);
-
- wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &m0m1);
- wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &m0m1);
-}
-
-static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
-{
- u32 *tx_pwrctrl_tbl = NULL;
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- if ((pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6)) {
-
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_ipa_2g_2057rev4n6;
- } else if (pi->pubpi.radiorev == 3) {
-
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_ipa_2g_2057rev3;
- } else if (pi->pubpi.radiorev == 5) {
-
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_ipa_2g_2057rev5;
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_ipa_2g_2057rev7;
- }
-
- } else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
-
- tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
- } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
-
- tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
- } else {
-
- tx_pwrctrl_tbl = nphy_tpc_txgain_ipa;
- }
-
- } else {
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if ((pi->pubpi.radiorev == 3) ||
- (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
-
- tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_5g_2057;
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- tx_pwrctrl_tbl =
- nphy_tpc_txgain_ipa_5g_2057rev7;
- }
-
- } else {
- tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_5g;
- }
- }
-
- return tx_pwrctrl_tbl;
-}
-
-static void
-wlc_phy_papd_cal_setup_nphy(struct brcms_phy *pi,
- struct nphy_papd_restore_state *state, u8 core)
-{
- s32 tone_freq;
- u8 off_core;
- u16 mixgain = 0;
-
- off_core = core ^ 0x1;
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- if (NREV_IS(pi->pubpi.phy_rev, 7)
- || NREV_GE(pi->pubpi.phy_rev, 8)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 7),
- wlc_phy_read_lpf_bw_ctl_nphy
- (pi, 0), 0, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- }
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (pi->pubpi.radiorev == 5) {
- mixgain = (core == 0) ? 0x20 : 0x00;
-
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- mixgain = 0x00;
-
- } else if ((pi->pubpi.radiorev <= 4)
- || (pi->pubpi.radiorev == 6)) {
-
- mixgain = 0x00;
- }
-
- } else {
- if ((pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
-
- mixgain = 0x50;
- } else if ((pi->pubpi.radiorev == 3)
- || (pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- mixgain = 0x0;
- }
- }
-
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11),
- mixgain, (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
-
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_tx_pu,
- 1, (1 << core), 0);
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_tx_pu,
- 0, (1 << off_core), 0);
-
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
- 0, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 1,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), 0,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), 1,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 8), 0,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 9), 1,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 10), 0,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), 1,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
-
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5),
- 0, (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), 0,
- (1 << core), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
-
- state->afectrl[core] = read_phy_reg(pi, (core == PHY_CORE_0) ?
- 0xa6 : 0xa7);
- state->afeoverride[core] =
- read_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f : 0xa5);
- state->afectrl[off_core] =
- read_phy_reg(pi, (core == PHY_CORE_0) ? 0xa7 : 0xa6);
- state->afeoverride[off_core] =
- read_phy_reg(pi, (core == PHY_CORE_0) ? 0xa5 : 0x8f);
-
- mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa6 : 0xa7),
- (0x1 << 2), 0);
- mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
- 0xa5), (0x1 << 2), (0x1 << 2));
-
- mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa7 : 0xa6),
- (0x1 << 2), (0x1 << 2));
- mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa5 :
- 0x8f), (0x1 << 2), (0x1 << 2));
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- state->pwrup[core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_PWRUP);
- state->atten[core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_ATTEN);
- state->pwrup[off_core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_2G_PWRUP);
- state->atten[off_core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_2G_ATTEN);
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_PWRUP, 0xc);
-
- if ((pi->pubpi.radiorev == 3) ||
- (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_ATTEN, 0xf0);
-
- } else if (pi->pubpi.radiorev == 5) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_ATTEN,
- (core == 0) ? 0xf7 : 0xf2);
-
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_ATTEN, 0xf0);
-
- }
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_2G_PWRUP, 0x0);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_2G_ATTEN, 0xff);
-
- } else {
- state->pwrup[core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_PWRUP);
- state->atten[core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_ATTEN);
- state->pwrup[off_core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_5G_PWRUP);
- state->atten[off_core] =
- READ_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_5G_ATTEN);
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_PWRUP, 0xc);
-
- if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_ATTEN, 0xf4);
-
- } else {
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_ATTEN, 0xf0);
- }
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_5G_PWRUP, 0x0);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, off_core,
- TXRXCOUPLE_5G_ATTEN, 0xff);
- }
-
- tone_freq = 4000;
-
- wlc_phy_tx_tone_nphy(pi, tone_freq, 181, 0, 0, false);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_ON) << 0);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (1) << 13);
-
- mod_phy_reg(pi, (off_core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_OFF) << 0);
-
- mod_phy_reg(pi, (off_core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (0) << 13);
-
- } else {
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12), 0, 0x3, 0);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 1, 0, 0);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 0), 0, 0x3, 0);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 2), 1, 0x3, 0);
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 1), 1, 0x3, 0);
-
- state->afectrl[core] = read_phy_reg(pi, (core == PHY_CORE_0) ?
- 0xa6 : 0xa7);
- state->afeoverride[core] =
- read_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f : 0xa5);
-
- mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0xa6 : 0xa7),
- (0x1 << 0) | (0x1 << 1) | (0x1 << 2), 0);
- mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
- 0xa5),
- (0x1 << 0) |
- (0x1 << 1) |
- (0x1 << 2), (0x1 << 0) | (0x1 << 1) | (0x1 << 2));
-
- state->vga_master[core] =
- READ_RADIO_REG2(pi, RADIO_2056, RX, core, VGA_MASTER);
- WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, VGA_MASTER, 0x2b);
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- state->fbmix[core] =
- READ_RADIO_REG2(pi, RADIO_2056, RX, core,
- TXFBMIX_G);
- state->intpa_master[core] =
- READ_RADIO_REG2(pi, RADIO_2056, TX, core,
- INTPAG_MASTER);
-
- WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, TXFBMIX_G,
- 0x03);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- INTPAG_MASTER, 0x04);
- } else {
- state->fbmix[core] =
- READ_RADIO_REG2(pi, RADIO_2056, RX, core,
- TXFBMIX_A);
- state->intpa_master[core] =
- READ_RADIO_REG2(pi, RADIO_2056, TX, core,
- INTPAA_MASTER);
-
- WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, TXFBMIX_A,
- 0x03);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- INTPAA_MASTER, 0x04);
-
- }
-
- tone_freq = 4000;
-
- wlc_phy_tx_tone_nphy(pi, tone_freq, 181, 0, 0, false);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (1) << 0);
-
- mod_phy_reg(pi, (off_core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (0) << 0);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 0, 0x3, 0);
- }
-}
-
-static void
-wlc_phy_papd_cal_cleanup_nphy(struct brcms_phy *pi,
- struct nphy_papd_restore_state *state)
-{
- u8 core;
-
- wlc_phy_stopplayback_nphy(pi);
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_PWRUP, 0);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_2G_ATTEN,
- state->atten[core]);
- } else {
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_PWRUP, 0);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TXRXCOUPLE_5G_ATTEN,
- state->atten[core]);
- }
- }
-
- if ((pi->pubpi.radiorev == 4) || (pi->pubpi.radiorev == 6)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2),
- 1, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- } else {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2),
- 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- }
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1),
- 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 1, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 11), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 1, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 0), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 1), 1, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 8), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 9), 1, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 10), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3), 1, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 5), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
-
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
-
- write_phy_reg(pi, (core == PHY_CORE_0) ?
- 0xa6 : 0xa7, state->afectrl[core]);
- write_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f :
- 0xa5, state->afeoverride[core]);
- }
-
- wlc_phy_ipa_set_bbmult_nphy(pi, (state->mm >> 8) & 0xff,
- (state->mm & 0xff));
-
- if (NREV_IS(pi->pubpi.phy_rev, 7)
- || NREV_GE(pi->pubpi.phy_rev, 8)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 7), 0, 0,
- 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID1);
- }
- } else {
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 12), 0, 0x3, 1);
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 13), 0, 0x3, 1);
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 0), 0, 0x3, 1);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 2), 0, 0x3, 1);
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 1), 0, 0x3, 1);
-
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
-
- WRITE_RADIO_REG2(pi, RADIO_2056, RX, core, VGA_MASTER,
- state->vga_master[core]);
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- WRITE_RADIO_REG2(pi, RADIO_2056, RX, core,
- TXFBMIX_G, state->fbmix[core]);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- INTPAG_MASTER,
- state->intpa_master[core]);
- } else {
- WRITE_RADIO_REG2(pi, RADIO_2056, RX, core,
- TXFBMIX_A, state->fbmix[core]);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- INTPAA_MASTER,
- state->intpa_master[core]);
- }
-
- write_phy_reg(pi, (core == PHY_CORE_0) ?
- 0xa6 : 0xa7, state->afectrl[core]);
- write_phy_reg(pi, (core == PHY_CORE_0) ? 0x8f :
- 0xa5, state->afeoverride[core]);
- }
-
- wlc_phy_ipa_set_bbmult_nphy(pi, (state->mm >> 8) & 0xff,
- (state->mm & 0xff));
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 0, 0x3, 1);
- }
-}
-
-static void
-wlc_phy_a1_nphy(struct brcms_phy *pi, u8 core, u32 winsz, u32 start,
- u32 end)
-{
- u32 *buf, *src, *dst, sz;
-
- sz = end - start + 1;
-
- buf = kmalloc(2 * sizeof(u32) * NPHY_PAPD_EPS_TBL_SIZE, GFP_ATOMIC);
- if (NULL == buf) {
- return;
- }
-
- src = buf;
- dst = buf + NPHY_PAPD_EPS_TBL_SIZE;
-
- wlc_phy_table_read_nphy(pi,
- (core ==
- PHY_CORE_0 ? NPHY_TBL_ID_EPSILONTBL0 :
- NPHY_TBL_ID_EPSILONTBL1),
- NPHY_PAPD_EPS_TBL_SIZE, 0, 32, src);
-
- do {
- u32 phy_a1, phy_a2;
- s32 phy_a3, phy_a4, phy_a5, phy_a6, phy_a7;
-
- phy_a1 = end - min(end, (winsz >> 1));
- phy_a2 = min_t(u32, NPHY_PAPD_EPS_TBL_SIZE - 1, end + (winsz >> 1));
- phy_a3 = phy_a2 - phy_a1 + 1;
- phy_a6 = 0;
- phy_a7 = 0;
-
- do {
- wlc_phy_papd_decode_epsilon(src[phy_a2], &phy_a4,
- &phy_a5);
- phy_a6 += phy_a4;
- phy_a7 += phy_a5;
- } while (phy_a2-- != phy_a1);
-
- phy_a6 /= phy_a3;
- phy_a7 /= phy_a3;
- dst[end] = ((u32) phy_a7 << 13) | ((u32) phy_a6 & 0x1fff);
- } while (end-- != start);
-
- wlc_phy_table_write_nphy(pi,
- (core ==
- PHY_CORE_0) ? NPHY_TBL_ID_EPSILONTBL0 :
- NPHY_TBL_ID_EPSILONTBL1, sz, start, 32, dst);
-
- kfree(buf);
-}
-
-static void
-wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
- enum phy_cal_mode cal_mode, u8 core)
-{
- u16 phy_a1, phy_a2, phy_a3;
- u16 phy_a4, phy_a5;
- bool phy_a6;
- u8 phy_a7, m[2];
- u32 phy_a8 = 0;
- struct nphy_txgains phy_a9;
-
- if (NREV_LT(pi->pubpi.phy_rev, 3))
- return;
-
- phy_a7 = (core == PHY_CORE_0) ? 1 : 0;
-
- phy_a6 = ((cal_mode == CAL_GCTRL)
- || (cal_mode == CAL_SOFT)) ? true : false;
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- phy_a9 = wlc_phy_get_tx_gain_nphy(pi);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- phy_a5 = ((phy_a9.txlpf[core] << 15) |
- (phy_a9.txgm[core] << 12) |
- (phy_a9.pga[core] << 8) |
- (txgains->gains.pad[core] << 3) |
- (phy_a9.ipa[core]));
- } else {
- phy_a5 = ((phy_a9.txlpf[core] << 15) |
- (phy_a9.txgm[core] << 12) |
- (txgains->gains.pga[core] << 8) |
- (phy_a9.pad[core] << 3) | (phy_a9.ipa[core]));
- }
-
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_txgain,
- phy_a5, (1 << core), 0);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if ((pi->pubpi.radiorev <= 4)
- || (pi->pubpi.radiorev == 6)) {
-
- m[core] = IS40MHZ(pi) ? 60 : 79;
- } else {
-
- m[core] = IS40MHZ(pi) ? 45 : 64;
- }
-
- } else {
- m[core] = IS40MHZ(pi) ? 75 : 107;
- }
-
- m[phy_a7] = 0;
- wlc_phy_ipa_set_bbmult_nphy(pi, m[0], m[1]);
-
- phy_a2 = 63;
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if ((pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6)) {
- phy_a1 = 30;
- phy_a3 = 30;
- } else {
- phy_a1 = 25;
- phy_a3 = 25;
- }
- } else {
- if ((pi->pubpi.radiorev == 5)
- || (pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
- phy_a1 = 25;
- phy_a3 = 25;
- } else {
- phy_a1 = 35;
- phy_a3 = 35;
- }
- }
-
- if (cal_mode == CAL_GCTRL) {
- if ((pi->pubpi.radiorev == 5)
- && (CHSPEC_IS2G(pi->radio_chanspec))) {
- phy_a1 = 55;
- } else if (((pi->pubpi.radiorev == 7) &&
- (CHSPEC_IS2G(pi->radio_chanspec))) ||
- ((pi->pubpi.radiorev == 8) &&
- (CHSPEC_IS2G(pi->radio_chanspec)))) {
- phy_a1 = 60;
- } else {
- phy_a1 = 63;
- }
-
- } else if ((cal_mode != CAL_FULL) && (cal_mode != CAL_SOFT)) {
-
- phy_a1 = 35;
- phy_a3 = 35;
- }
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (1) << 0);
-
- mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (0) << 0);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (1) << 13);
-
- mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (0) << 13);
-
- write_phy_reg(pi, 0x2a1, 0x80);
- write_phy_reg(pi, 0x2a2, 0x100);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x7 << 4), (11) << 4);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x7 << 8), (11) << 8);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x7 << 0), (0x3) << 0);
-
- write_phy_reg(pi, 0x2e5, 0x20);
-
- mod_phy_reg(pi, 0x2a0, (0x3f << 0), (phy_a3) << 0);
-
- mod_phy_reg(pi, 0x29f, (0x3f << 0), (phy_a1) << 0);
-
- mod_phy_reg(pi, 0x29f, (0x3f << 8), (phy_a2) << 8);
-
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
- 1, ((core == 0) ? 1 : 2), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
- 0, ((core == 0) ? 2 : 1), 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
-
- write_phy_reg(pi, 0x2be, 1);
- SPINWAIT(read_phy_reg(pi, 0x2be), 10 * 1000 * 1000);
-
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 3),
- 0, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
-
- wlc_phy_table_write_nphy(pi,
- (core ==
- PHY_CORE_0) ? NPHY_TBL_ID_EPSILONTBL0
- : NPHY_TBL_ID_EPSILONTBL1, 1, phy_a3,
- 32, &phy_a8);
-
- if (cal_mode != CAL_GCTRL) {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- wlc_phy_a1_nphy(pi, core, 5, 0, 35);
- }
- }
-
- wlc_phy_rfctrl_override_1tomany_nphy(pi,
- NPHY_REV7_RfctrlOverride_cmd_txgain,
- phy_a5, (1 << core), 1);
-
- } else {
-
- if (txgains) {
- if (txgains->useindex) {
- phy_a4 = 15 - ((txgains->index) >> 3);
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (NREV_GE(pi->pubpi.phy_rev, 6))
- phy_a5 = 0x00f7 | (phy_a4 << 8);
-
- else
- if (NREV_IS(pi->pubpi.phy_rev, 5))
- phy_a5 = 0x10f7 | (phy_a4 << 8);
- else
- phy_a5 = 0x50f7 | (phy_a4 << 8);
- } else {
- phy_a5 = 0x70f7 | (phy_a4 << 8);
- }
- wlc_phy_rfctrl_override_nphy(pi,
- (0x1 << 13),
- phy_a5,
- (1 << core), 0);
- } else {
- wlc_phy_rfctrl_override_nphy(pi,
- (0x1 << 13),
- 0x5bf7,
- (1 << core), 0);
- }
- }
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- m[core] = IS40MHZ(pi) ? 45 : 64;
- } else {
- m[core] = IS40MHZ(pi) ? 75 : 107;
- }
-
- m[phy_a7] = 0;
- wlc_phy_ipa_set_bbmult_nphy(pi, m[0], m[1]);
-
- phy_a2 = 63;
-
- if (cal_mode == CAL_FULL) {
- phy_a1 = 25;
- phy_a3 = 25;
- } else if (cal_mode == CAL_SOFT) {
- phy_a1 = 25;
- phy_a3 = 25;
- } else if (cal_mode == CAL_GCTRL) {
- phy_a1 = 63;
- phy_a3 = 25;
- } else {
-
- phy_a1 = 25;
- phy_a3 = 25;
- }
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (1) << 0);
-
- mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (0) << 0);
-
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (1) << 13);
-
- mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (0) << 13);
-
- write_phy_reg(pi, 0x2a1, 0x20);
- write_phy_reg(pi, 0x2a2, 0x60);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0xf << 4), (9) << 4);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0xf << 8), (9) << 8);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0xf << 0), (0x2) << 0);
-
- write_phy_reg(pi, 0x2e5, 0x20);
- } else {
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 11), (1) << 11);
-
- mod_phy_reg(pi, (phy_a7 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 11), (0) << 11);
-
- write_phy_reg(pi, 0x2a1, 0x80);
- write_phy_reg(pi, 0x2a2, 0x600);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x7 << 4), (0) << 4);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x7 << 8), (0) << 8);
-
- mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x7 << 0), (0x3) << 0);
-
- mod_phy_reg(pi, 0x2a0, (0x3f << 8), (0x20) << 8);
-
- }
-
- mod_phy_reg(pi, 0x2a0, (0x3f << 0), (phy_a3) << 0);
-
- mod_phy_reg(pi, 0x29f, (0x3f << 0), (phy_a1) << 0);
-
- mod_phy_reg(pi, 0x29f, (0x3f << 8), (phy_a2) << 8);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 1, 0x3, 0);
-
- write_phy_reg(pi, 0x2be, 1);
- SPINWAIT(read_phy_reg(pi, 0x2be), 10 * 1000 * 1000);
-
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 3), 0, 0x3, 0);
-
- wlc_phy_table_write_nphy(pi,
- (core ==
- PHY_CORE_0) ? NPHY_TBL_ID_EPSILONTBL0
- : NPHY_TBL_ID_EPSILONTBL1, 1, phy_a3,
- 32, &phy_a8);
-
- if (cal_mode != CAL_GCTRL) {
- wlc_phy_a1_nphy(pi, core, 5, 0, 40);
- }
- }
-}
-
-static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
-{
- int phy_a1;
- int phy_a2;
- bool phy_a3;
- struct nphy_ipa_txcalgains phy_a4;
- bool phy_a5 = false;
- bool phy_a6 = true;
- s32 phy_a7, phy_a8;
- u32 phy_a9;
- int phy_a10;
- bool phy_a11 = false;
- int phy_a12;
- u8 phy_a13 = 0;
- u8 phy_a14;
- u8 *phy_a15 = NULL;
-
- phy_a4.useindex = true;
- phy_a12 = start_gain;
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
-
- phy_a2 = 20;
- phy_a1 = 1;
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (pi->pubpi.radiorev == 5) {
-
- phy_a15 = pad_gain_codes_used_2057rev5;
- phy_a13 = sizeof(pad_gain_codes_used_2057rev5) /
- sizeof(pad_gain_codes_used_2057rev5[0]) - 1;
-
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- phy_a15 = pad_gain_codes_used_2057rev7;
- phy_a13 = sizeof(pad_gain_codes_used_2057rev7) /
- sizeof(pad_gain_codes_used_2057rev7[0]) - 1;
-
- } else {
-
- phy_a15 = pad_all_gain_codes_2057;
- phy_a13 = sizeof(pad_all_gain_codes_2057) /
- sizeof(pad_all_gain_codes_2057[0]) - 1;
- }
-
- } else {
-
- phy_a15 = pga_all_gain_codes_2057;
- phy_a13 = sizeof(pga_all_gain_codes_2057) /
- sizeof(pga_all_gain_codes_2057[0]) - 1;
- }
-
- phy_a14 = 0;
-
- for (phy_a10 = 0; phy_a10 < phy_a2; phy_a10++) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- phy_a4.gains.pad[core] =
- (u16) phy_a15[phy_a12];
- } else {
- phy_a4.gains.pga[core] =
- (u16) phy_a15[phy_a12];
- }
-
- wlc_phy_a2_nphy(pi, &phy_a4, CAL_GCTRL, core);
-
- wlc_phy_table_read_nphy(pi,
- (core ==
- PHY_CORE_0 ?
- NPHY_TBL_ID_EPSILONTBL0 :
- NPHY_TBL_ID_EPSILONTBL1), 1,
- 63, 32, &phy_a9);
-
- wlc_phy_papd_decode_epsilon(phy_a9, &phy_a7, &phy_a8);
-
- phy_a3 = ((phy_a7 == 4095) || (phy_a7 == -4096) ||
- (phy_a8 == 4095) || (phy_a8 == -4096));
-
- if (!phy_a6 && (phy_a3 != phy_a5)) {
- if (!phy_a3) {
- phy_a12 -= (u8) phy_a1;
- }
- phy_a11 = true;
- break;
- }
-
- if (phy_a3)
- phy_a12 += (u8) phy_a1;
- else
- phy_a12 -= (u8) phy_a1;
-
- if ((phy_a12 < phy_a14) || (phy_a12 > phy_a13)) {
- if (phy_a12 < phy_a14) {
- phy_a12 = phy_a14;
- } else {
- phy_a12 = phy_a13;
- }
- phy_a11 = true;
- break;
- }
-
- phy_a6 = false;
- phy_a5 = phy_a3;
- }
-
- } else {
- phy_a2 = 10;
- phy_a1 = 8;
- for (phy_a10 = 0; phy_a10 < phy_a2; phy_a10++) {
- phy_a4.index = (u8) phy_a12;
- wlc_phy_a2_nphy(pi, &phy_a4, CAL_GCTRL, core);
-
- wlc_phy_table_read_nphy(pi,
- (core ==
- PHY_CORE_0 ?
- NPHY_TBL_ID_EPSILONTBL0 :
- NPHY_TBL_ID_EPSILONTBL1), 1,
- 63, 32, &phy_a9);
-
- wlc_phy_papd_decode_epsilon(phy_a9, &phy_a7, &phy_a8);
-
- phy_a3 = ((phy_a7 == 4095) || (phy_a7 == -4096) ||
- (phy_a8 == 4095) || (phy_a8 == -4096));
-
- if (!phy_a6 && (phy_a3 != phy_a5)) {
- if (!phy_a3) {
- phy_a12 -= (u8) phy_a1;
- }
- phy_a11 = true;
- break;
- }
-
- if (phy_a3)
- phy_a12 += (u8) phy_a1;
- else
- phy_a12 -= (u8) phy_a1;
-
- if ((phy_a12 < 0) || (phy_a12 > 127)) {
- if (phy_a12 < 0) {
- phy_a12 = 0;
- } else {
- phy_a12 = 127;
- }
- phy_a11 = true;
- break;
- }
-
- phy_a6 = false;
- phy_a5 = phy_a3;
- }
-
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- return (u8) phy_a15[phy_a12];
- } else {
- return (u8) phy_a12;
- }
-
-}
-
-static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
-{
- struct nphy_ipa_txcalgains phy_b1[2];
- struct nphy_papd_restore_state phy_b2;
- bool phy_b3;
- u8 phy_b4;
- u8 phy_b5;
- s16 phy_b6, phy_b7, phy_b8;
- u16 phy_b9;
- s16 phy_b10, phy_b11, phy_b12;
-
- phy_b11 = 0;
- phy_b12 = 0;
- phy_b7 = 0;
- phy_b8 = 0;
- phy_b6 = 0;
-
- if (pi->nphy_papd_skip == 1)
- return;
-
- phy_b3 =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
- if (!phy_b3) {
- wlapi_suspend_mac_and_wait(pi->sh->physhim);
- }
-
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- pi->nphy_force_papd_cal = false;
-
- for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++)
- pi->nphy_papd_tx_gain_at_last_cal[phy_b5] =
- wlc_phy_txpwr_idx_cur_get_nphy(pi, phy_b5);
-
- pi->nphy_papd_last_cal = pi->sh->now;
- pi->nphy_papd_recal_counter++;
-
- if (NORADIO_ENAB(pi->pubpi))
- return;
-
- phy_b4 = pi->nphy_txpwrctrl;
- wlc_phy_txpwrctrl_enable_nphy(pi, PHY_TPC_HW_OFF);
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_SCALARTBL0, 64, 0, 32,
- nphy_papd_scaltbl);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_SCALARTBL1, 64, 0, 32,
- nphy_papd_scaltbl);
-
- phy_b9 = read_phy_reg(pi, 0x01);
- mod_phy_reg(pi, 0x01, (0x1 << 15), 0);
-
- for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++) {
- s32 i, val = 0;
- for (i = 0; i < 64; i++) {
- wlc_phy_table_write_nphy(pi,
- ((phy_b5 ==
- PHY_CORE_0) ?
- NPHY_TBL_ID_EPSILONTBL0 :
- NPHY_TBL_ID_EPSILONTBL1), 1,
- i, 32, &val);
- }
- }
-
- wlc_phy_ipa_restore_tx_digi_filts_nphy(pi);
-
- phy_b2.mm = wlc_phy_ipa_get_bbmult_nphy(pi);
- for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++) {
- wlc_phy_papd_cal_setup_nphy(pi, &phy_b2, phy_b5);
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
-
- if ((pi->pubpi.radiorev == 3)
- || (pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6)) {
-
- pi->nphy_papd_cal_gain_index[phy_b5] =
- 23;
-
- } else if (pi->pubpi.radiorev == 5) {
-
- pi->nphy_papd_cal_gain_index[phy_b5] =
- 0;
- pi->nphy_papd_cal_gain_index[phy_b5] =
- wlc_phy_a3_nphy(pi,
- pi->
- nphy_papd_cal_gain_index
- [phy_b5], phy_b5);
-
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
-
- pi->nphy_papd_cal_gain_index[phy_b5] =
- 0;
- pi->nphy_papd_cal_gain_index[phy_b5] =
- wlc_phy_a3_nphy(pi,
- pi->
- nphy_papd_cal_gain_index
- [phy_b5], phy_b5);
-
- }
-
- phy_b1[phy_b5].gains.pad[phy_b5] =
- pi->nphy_papd_cal_gain_index[phy_b5];
-
- } else {
- pi->nphy_papd_cal_gain_index[phy_b5] = 0;
- pi->nphy_papd_cal_gain_index[phy_b5] =
- wlc_phy_a3_nphy(pi,
- pi->
- nphy_papd_cal_gain_index
- [phy_b5], phy_b5);
- phy_b1[phy_b5].gains.pga[phy_b5] =
- pi->nphy_papd_cal_gain_index[phy_b5];
- }
- } else {
- phy_b1[phy_b5].useindex = true;
- phy_b1[phy_b5].index = 16;
- phy_b1[phy_b5].index =
- wlc_phy_a3_nphy(pi, phy_b1[phy_b5].index, phy_b5);
-
- pi->nphy_papd_cal_gain_index[phy_b5] =
- 15 - ((phy_b1[phy_b5].index) >> 3);
- }
-
- switch (pi->nphy_papd_cal_type) {
- case 0:
- wlc_phy_a2_nphy(pi, &phy_b1[phy_b5], CAL_FULL, phy_b5);
- break;
- case 1:
- wlc_phy_a2_nphy(pi, &phy_b1[phy_b5], CAL_SOFT, phy_b5);
- break;
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_papd_cal_cleanup_nphy(pi, &phy_b2);
- }
- }
-
- if (NREV_LT(pi->pubpi.phy_rev, 7)) {
- wlc_phy_papd_cal_cleanup_nphy(pi, &phy_b2);
- }
-
- for (phy_b5 = 0; phy_b5 < pi->pubpi.phy_corenum; phy_b5++) {
- int eps_offset = 0;
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (pi->pubpi.radiorev == 3) {
- eps_offset = -2;
- } else if (pi->pubpi.radiorev == 5) {
- eps_offset = 3;
- } else {
- eps_offset = -1;
- }
- } else {
- eps_offset = 2;
- }
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- phy_b8 = phy_b1[phy_b5].gains.pad[phy_b5];
- phy_b10 = 0;
- if ((pi->pubpi.radiorev == 3) ||
- (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
- phy_b12 =
- -
- (nphy_papd_padgain_dlt_2g_2057rev3n4
- [phy_b8]
- + 1) / 2;
- phy_b10 = -1;
- } else if (pi->pubpi.radiorev == 5) {
- phy_b12 =
- -(nphy_papd_padgain_dlt_2g_2057rev5
- [phy_b8]
- + 1) / 2;
- } else if ((pi->pubpi.radiorev == 7) ||
- (pi->pubpi.radiorev == 8)) {
- phy_b12 =
- -(nphy_papd_padgain_dlt_2g_2057rev7
- [phy_b8]
- + 1) / 2;
- }
- } else {
- phy_b7 = phy_b1[phy_b5].gains.pga[phy_b5];
- if ((pi->pubpi.radiorev == 3) ||
- (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
- phy_b11 =
- -(nphy_papd_pgagain_dlt_5g_2057
- [phy_b7]
- + 1) / 2;
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
- phy_b11 =
- -(nphy_papd_pgagain_dlt_5g_2057rev7
- [phy_b7]
- + 1) / 2;
- }
-
- phy_b10 = -9;
- }
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- phy_b6 =
- -60 + 27 + eps_offset + phy_b12 + phy_b10;
- } else {
- phy_b6 =
- -60 + 27 + eps_offset + phy_b11 + phy_b10;
- }
-
- mod_phy_reg(pi, (phy_b5 == PHY_CORE_0) ? 0x298 :
- 0x29c, (0x1ff << 7), (phy_b6) << 7);
-
- pi->nphy_papd_epsilon_offset[phy_b5] = phy_b6;
- } else {
- if (NREV_LT(pi->pubpi.phy_rev, 5)) {
- eps_offset = 4;
- } else {
- eps_offset = 2;
- }
-
- phy_b7 = 15 - ((phy_b1[phy_b5].index) >> 3);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- phy_b11 =
- -(nphy_papd_pga_gain_delta_ipa_2g[phy_b7] +
- 1) / 2;
- phy_b10 = 0;
- } else {
- phy_b11 =
- -(nphy_papd_pga_gain_delta_ipa_5g[phy_b7] +
- 1) / 2;
- phy_b10 = -9;
- }
-
- phy_b6 = -60 + 27 + eps_offset + phy_b11 + phy_b10;
-
- mod_phy_reg(pi, (phy_b5 == PHY_CORE_0) ? 0x298 :
- 0x29c, (0x1ff << 7), (phy_b6) << 7);
-
- pi->nphy_papd_epsilon_offset[phy_b5] = phy_b6;
- }
- }
-
- mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_ON) << 0);
-
- mod_phy_reg(pi, (1 == PHY_CORE_0) ? 0x297 :
- 0x29b, (0x1 << 0), (NPHY_PAPD_COMP_ON) << 0);
-
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (0) << 13);
-
- mod_phy_reg(pi, (1 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 13), (0) << 13);
-
- } else {
- mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 11), (0) << 11);
-
- mod_phy_reg(pi, (1 == PHY_CORE_0) ? 0x2a3 :
- 0x2a4, (0x1 << 11), (0) << 11);
-
- }
- pi->nphy_papdcomp = NPHY_PAPD_COMP_ON;
-
- write_phy_reg(pi, 0x01, phy_b9);
-
- wlc_phy_ipa_set_tx_digi_filts_nphy(pi);
-
- wlc_phy_txpwrctrl_enable_nphy(pi, phy_b4);
- if (phy_b4 == PHY_TPC_HW_OFF) {
- wlc_phy_txpwr_index_nphy(pi, (1 << 0),
- (s8) (pi->nphy_txpwrindex[0].
- index_internal), false);
- wlc_phy_txpwr_index_nphy(pi, (1 << 1),
- (s8) (pi->nphy_txpwrindex[1].
- index_internal), false);
- }
-
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-
- if (!phy_b3) {
- wlapi_enable_mac(pi->sh->physhim);
- }
}
void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
@@ -27423,11 +28015,10 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
}
}
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
txpi[0] = txpi[1] = 30;
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ else if (NREV_GE(pi->pubpi.phy_rev, 3))
txpi[0] = txpi[1] = 40;
- }
if (NREV_LT(pi->pubpi.phy_rev, 7)) {
@@ -27447,37 +28038,36 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
if (NREV_GE(phyrev, 3)) {
if (PHY_IPA(pi)) {
u32 *tx_gaintbl =
- wlc_phy_get_ipa_gaintbl_nphy(pi);
+ wlc_phy_get_ipa_gaintbl_nphy(pi);
txgain = tx_gaintbl[txpi[core]];
} else {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
if (NREV_IS(phyrev, 3)) {
txgain =
- nphy_tpc_5GHz_txgain_rev3
- [txpi[core]];
+ nphy_tpc_5GHz_txgain_rev3
+ [txpi[core]];
} else if (NREV_IS(phyrev, 4)) {
- txgain =
- (pi->srom_fem5g.extpagain ==
- 3) ?
- nphy_tpc_5GHz_txgain_HiPwrEPA
- [txpi[core]] :
- nphy_tpc_5GHz_txgain_rev4
- [txpi[core]];
+ txgain = (
+ pi->srom_fem5g.extpagain ==
+ 3) ?
+ nphy_tpc_5GHz_txgain_HiPwrEPA
+ [txpi[core]] :
+ nphy_tpc_5GHz_txgain_rev4
+ [txpi[core]];
} else {
txgain =
- nphy_tpc_5GHz_txgain_rev5
- [txpi[core]];
+ nphy_tpc_5GHz_txgain_rev5
+ [txpi[core]];
}
} else {
if (NREV_GE(phyrev, 5) &&
(pi->srom_fem2g.extpagain == 3)) {
txgain =
- nphy_tpc_txgain_HiPwrEPA
- [txpi[core]];
+ nphy_tpc_txgain_HiPwrEPA
+ [txpi[core]];
} else {
- txgain =
- nphy_tpc_txgain_rev3[txpi
- [core]];
+ txgain = nphy_tpc_txgain_rev3
+ [txpi[core]];
}
}
}
@@ -27497,12 +28087,12 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
bbmult = (txgain >> 0) & ((1 << (7 - 0 + 1)) - 1);
- if (NREV_GE(phyrev, 3)) {
+ if (NREV_GE(phyrev, 3))
mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
0xa5), (0x1 << 8), (0x1 << 8));
- } else {
+ else
mod_phy_reg(pi, 0xa5, (0x1 << 14), (0x1 << 14));
- }
+
write_phy_reg(pi, (core == PHY_CORE_0) ? 0xaa : 0xab, dac_gain);
wlc_phy_table_write_nphy(pi, 7, 1, (0x110 + core), 16,
@@ -27562,9 +28152,8 @@ wlc_phy_txpwr_nphy_po_apply(u8 *srom_max, u8 pwr_offset,
{
u8 rate;
- for (rate = rate_start; rate <= rate_end; rate++) {
+ for (rate = rate_start; rate <= rate_end; rate++)
srom_max[rate] -= 2 * pwr_offset;
- }
}
void
@@ -27603,7 +28192,8 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
u16 pwr_offsets1[2], *pwr_offsets2 = NULL;
u8 *tx_srom_max_rate = NULL;
- for (band_num = 0; band_num < (CH_2G_GROUP + CH_5G_GROUP); band_num++) {
+ for (band_num = 0; band_num < (CH_2G_GROUP + CH_5G_GROUP);
+ band_num++) {
switch (band_num) {
case 0:
@@ -27619,7 +28209,7 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
pwr_offsets1[0] = (u16) (pi->ofdm2gpo & 0xffff);
pwr_offsets1[1] =
- (u16) (pi->ofdm2gpo >> 16) & 0xffff;
+ (u16) (pi->ofdm2gpo >> 16) & 0xffff;
pwr_offsets2 = pi->mcs2gpo;
@@ -27636,7 +28226,7 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
pwr_offsets1[0] = (u16) (pi->ofdm5gpo & 0xffff);
pwr_offsets1[1] =
- (u16) (pi->ofdm5gpo >> 16) & 0xffff;
+ (u16) (pi->ofdm5gpo >> 16) & 0xffff;
pwr_offsets2 = pi->mcs5gpo;
@@ -27653,7 +28243,7 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
pwr_offsets1[0] = (u16) (pi->ofdm5glpo & 0xffff);
pwr_offsets1[1] =
- (u16) (pi->ofdm5glpo >> 16) & 0xffff;
+ (u16) (pi->ofdm5glpo >> 16) & 0xffff;
pwr_offsets2 = pi->mcs5glpo;
@@ -27670,7 +28260,7 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
pwr_offsets1[0] = (u16) (pi->ofdm5ghpo & 0xffff);
pwr_offsets1[1] =
- (u16) (pi->ofdm5ghpo >> 16) & 0xffff;
+ (u16) (pi->ofdm5ghpo >> 16) & 0xffff;
pwr_offsets2 = pi->mcs5ghpo;
@@ -27696,12 +28286,10 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
TXP_FIRST_MCS_20_CDD,
TXP_LAST_MCS_20_CDD);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
wlc_phy_txpwr_nphy_po_apply(tx_srom_max_rate, tmp_cddpo,
TXP_FIRST_MCS_20_CDD,
TXP_LAST_MCS_20_CDD);
- }
wlc_phy_mcs_to_ofdm_powers_nphy(tx_srom_max_rate,
TXP_FIRST_OFDM_20_CDD,
@@ -27713,13 +28301,11 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
TXP_FIRST_MCS_20_STBC,
TXP_LAST_MCS_20_STBC);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
wlc_phy_txpwr_nphy_po_apply(tx_srom_max_rate,
tmp_stbcpo,
TXP_FIRST_MCS_20_STBC,
TXP_LAST_MCS_20_STBC);
- }
wlc_phy_txpwr_nphy_srom_convert(tx_srom_max_rate,
&pwr_offsets2[2], tmp_max_pwr,
@@ -27773,294 +28359,26 @@ void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
} else {
for (rate1 = TXP_FIRST_OFDM_40_SISO, rate2 =
- TXP_FIRST_OFDM; rate1 <= TXP_LAST_MCS_40_SDM;
+ TXP_FIRST_OFDM;
+ rate1 <= TXP_LAST_MCS_40_SDM;
rate1++, rate2++)
tx_srom_max_rate[rate1] =
- tx_srom_max_rate[rate2];
+ tx_srom_max_rate[rate2];
}
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
wlc_phy_txpwr_nphy_po_apply(tx_srom_max_rate,
tmp_bw40po,
TXP_FIRST_OFDM_40_SISO,
TXP_LAST_MCS_40_SDM);
- }
tx_srom_max_rate[TXP_MCS_32] =
- tx_srom_max_rate[TXP_FIRST_MCS_40_CDD];
+ tx_srom_max_rate[TXP_FIRST_MCS_40_CDD];
}
return;
}
-static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
-{
- u16 bw40po, cddpo, stbcpo, bwduppo;
- uint band_num;
-
- if (pi->sh->sromrev >= 9) {
-
- return;
- }
-
- bw40po = (u16) PHY_GETINTVAR(pi, "bw40po");
- pi->bw402gpo = bw40po & 0xf;
- pi->bw405gpo = (bw40po & 0xf0) >> 4;
- pi->bw405glpo = (bw40po & 0xf00) >> 8;
- pi->bw405ghpo = (bw40po & 0xf000) >> 12;
-
- cddpo = (u16) PHY_GETINTVAR(pi, "cddpo");
- pi->cdd2gpo = cddpo & 0xf;
- pi->cdd5gpo = (cddpo & 0xf0) >> 4;
- pi->cdd5glpo = (cddpo & 0xf00) >> 8;
- pi->cdd5ghpo = (cddpo & 0xf000) >> 12;
-
- stbcpo = (u16) PHY_GETINTVAR(pi, "stbcpo");
- pi->stbc2gpo = stbcpo & 0xf;
- pi->stbc5gpo = (stbcpo & 0xf0) >> 4;
- pi->stbc5glpo = (stbcpo & 0xf00) >> 8;
- pi->stbc5ghpo = (stbcpo & 0xf000) >> 12;
-
- bwduppo = (u16) PHY_GETINTVAR(pi, "bwduppo");
- pi->bwdup2gpo = bwduppo & 0xf;
- pi->bwdup5gpo = (bwduppo & 0xf0) >> 4;
- pi->bwdup5glpo = (bwduppo & 0xf00) >> 8;
- pi->bwdup5ghpo = (bwduppo & 0xf000) >> 12;
-
- for (band_num = 0; band_num < (CH_2G_GROUP + CH_5G_GROUP); band_num++) {
- switch (band_num) {
- case 0:
-
- pi->nphy_txpid2g[PHY_CORE_0] =
- (u8) PHY_GETINTVAR(pi, "txpid2ga0");
- pi->nphy_txpid2g[PHY_CORE_1] =
- (u8) PHY_GETINTVAR(pi, "txpid2ga1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
- (s8) PHY_GETINTVAR(pi, "maxp2ga0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_2g =
- (s8) PHY_GETINTVAR(pi, "maxp2ga1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_a1 =
- (s16) PHY_GETINTVAR(pi, "pa2gw0a0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_a1 =
- (s16) PHY_GETINTVAR(pi, "pa2gw0a1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b0 =
- (s16) PHY_GETINTVAR(pi, "pa2gw1a0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b0 =
- (s16) PHY_GETINTVAR(pi, "pa2gw1a1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b1 =
- (s16) PHY_GETINTVAR(pi, "pa2gw2a0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b1 =
- (s16) PHY_GETINTVAR(pi, "pa2gw2a1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_2g =
- (s8) PHY_GETINTVAR(pi, "itt2ga0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_2g =
- (s8) PHY_GETINTVAR(pi, "itt2ga1");
-
- pi->cck2gpo = (u16) PHY_GETINTVAR(pi, "cck2gpo");
-
- pi->ofdm2gpo = (u32) PHY_GETINTVAR(pi, "ofdm2gpo");
-
- pi->mcs2gpo[0] = (u16) PHY_GETINTVAR(pi, "mcs2gpo0");
- pi->mcs2gpo[1] = (u16) PHY_GETINTVAR(pi, "mcs2gpo1");
- pi->mcs2gpo[2] = (u16) PHY_GETINTVAR(pi, "mcs2gpo2");
- pi->mcs2gpo[3] = (u16) PHY_GETINTVAR(pi, "mcs2gpo3");
- pi->mcs2gpo[4] = (u16) PHY_GETINTVAR(pi, "mcs2gpo4");
- pi->mcs2gpo[5] = (u16) PHY_GETINTVAR(pi, "mcs2gpo5");
- pi->mcs2gpo[6] = (u16) PHY_GETINTVAR(pi, "mcs2gpo6");
- pi->mcs2gpo[7] = (u16) PHY_GETINTVAR(pi, "mcs2gpo7");
- break;
- case 1:
-
- pi->nphy_txpid5g[PHY_CORE_0] =
- (u8) PHY_GETINTVAR(pi, "txpid5ga0");
- pi->nphy_txpid5g[PHY_CORE_1] =
- (u8) PHY_GETINTVAR(pi, "txpid5ga1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
- (s8) PHY_GETINTVAR(pi, "maxp5ga0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
- (s8) PHY_GETINTVAR(pi, "maxp5ga1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_a1 =
- (s16) PHY_GETINTVAR(pi, "pa5gw0a0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_a1 =
- (s16) PHY_GETINTVAR(pi, "pa5gw0a1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b0 =
- (s16) PHY_GETINTVAR(pi, "pa5gw1a0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b0 =
- (s16) PHY_GETINTVAR(pi, "pa5gw1a1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b1 =
- (s16) PHY_GETINTVAR(pi, "pa5gw2a0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b1 =
- (s16) PHY_GETINTVAR(pi, "pa5gw2a1");
- pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_5gm =
- (s8) PHY_GETINTVAR(pi, "itt5ga0");
- pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_5gm =
- (s8) PHY_GETINTVAR(pi, "itt5ga1");
-
- pi->ofdm5gpo = (u32) PHY_GETINTVAR(pi, "ofdm5gpo");
-
- pi->mcs5gpo[0] = (u16) PHY_GETINTVAR(pi, "mcs5gpo0");
- pi->mcs5gpo[1] = (u16) PHY_GETINTVAR(pi, "mcs5gpo1");
- pi->mcs5gpo[2] = (u16) PHY_GETINTVAR(pi, "mcs5gpo2");
- pi->mcs5gpo[3] = (u16) PHY_GETINTVAR(pi, "mcs5gpo3");
- pi->mcs5gpo[4] = (u16) PHY_GETINTVAR(pi, "mcs5gpo4");
- pi->mcs5gpo[5] = (u16) PHY_GETINTVAR(pi, "mcs5gpo5");
- pi->mcs5gpo[6] = (u16) PHY_GETINTVAR(pi, "mcs5gpo6");
- pi->mcs5gpo[7] = (u16) PHY_GETINTVAR(pi, "mcs5gpo7");
- break;
- case 2:
-
- pi->nphy_txpid5gl[0] =
- (u8) PHY_GETINTVAR(pi, "txpid5gla0");
- pi->nphy_txpid5gl[1] =
- (u8) PHY_GETINTVAR(pi, "txpid5gla1");
- pi->nphy_pwrctrl_info[0].max_pwr_5gl =
- (s8) PHY_GETINTVAR(pi, "maxp5gla0");
- pi->nphy_pwrctrl_info[1].max_pwr_5gl =
- (s8) PHY_GETINTVAR(pi, "maxp5gla1");
- pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1 =
- (s16) PHY_GETINTVAR(pi, "pa5glw0a0");
- pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1 =
- (s16) PHY_GETINTVAR(pi, "pa5glw0a1");
- pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0 =
- (s16) PHY_GETINTVAR(pi, "pa5glw1a0");
- pi->nphy_pwrctrl_info[1].pwrdet_5gl_b0 =
- (s16) PHY_GETINTVAR(pi, "pa5glw1a1");
- pi->nphy_pwrctrl_info[0].pwrdet_5gl_b1 =
- (s16) PHY_GETINTVAR(pi, "pa5glw2a0");
- pi->nphy_pwrctrl_info[1].pwrdet_5gl_b1 =
- (s16) PHY_GETINTVAR(pi, "pa5glw2a1");
- pi->nphy_pwrctrl_info[0].idle_targ_5gl = 0;
- pi->nphy_pwrctrl_info[1].idle_targ_5gl = 0;
-
- pi->ofdm5glpo = (u32) PHY_GETINTVAR(pi, "ofdm5glpo");
-
- pi->mcs5glpo[0] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo0");
- pi->mcs5glpo[1] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo1");
- pi->mcs5glpo[2] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo2");
- pi->mcs5glpo[3] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo3");
- pi->mcs5glpo[4] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo4");
- pi->mcs5glpo[5] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo5");
- pi->mcs5glpo[6] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo6");
- pi->mcs5glpo[7] =
- (u16) PHY_GETINTVAR(pi, "mcs5glpo7");
- break;
- case 3:
-
- pi->nphy_txpid5gh[0] =
- (u8) PHY_GETINTVAR(pi, "txpid5gha0");
- pi->nphy_txpid5gh[1] =
- (u8) PHY_GETINTVAR(pi, "txpid5gha1");
- pi->nphy_pwrctrl_info[0].max_pwr_5gh =
- (s8) PHY_GETINTVAR(pi, "maxp5gha0");
- pi->nphy_pwrctrl_info[1].max_pwr_5gh =
- (s8) PHY_GETINTVAR(pi, "maxp5gha1");
- pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1 =
- (s16) PHY_GETINTVAR(pi, "pa5ghw0a0");
- pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1 =
- (s16) PHY_GETINTVAR(pi, "pa5ghw0a1");
- pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0 =
- (s16) PHY_GETINTVAR(pi, "pa5ghw1a0");
- pi->nphy_pwrctrl_info[1].pwrdet_5gh_b0 =
- (s16) PHY_GETINTVAR(pi, "pa5ghw1a1");
- pi->nphy_pwrctrl_info[0].pwrdet_5gh_b1 =
- (s16) PHY_GETINTVAR(pi, "pa5ghw2a0");
- pi->nphy_pwrctrl_info[1].pwrdet_5gh_b1 =
- (s16) PHY_GETINTVAR(pi, "pa5ghw2a1");
- pi->nphy_pwrctrl_info[0].idle_targ_5gh = 0;
- pi->nphy_pwrctrl_info[1].idle_targ_5gh = 0;
-
- pi->ofdm5ghpo = (u32) PHY_GETINTVAR(pi, "ofdm5ghpo");
-
- pi->mcs5ghpo[0] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo0");
- pi->mcs5ghpo[1] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo1");
- pi->mcs5ghpo[2] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo2");
- pi->mcs5ghpo[3] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo3");
- pi->mcs5ghpo[4] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo4");
- pi->mcs5ghpo[5] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo5");
- pi->mcs5ghpo[6] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo6");
- pi->mcs5ghpo[7] =
- (u16) PHY_GETINTVAR(pi, "mcs5ghpo7");
- break;
- }
- }
-
- wlc_phy_txpwr_apply_nphy(pi);
-}
-
-static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
-{
-
- pi->antswitch = (u8) PHY_GETINTVAR(pi, "antswitch");
- pi->aa2g = (u8) PHY_GETINTVAR(pi, "aa2g");
- pi->aa5g = (u8) PHY_GETINTVAR(pi, "aa5g");
-
- pi->srom_fem2g.tssipos = (u8) PHY_GETINTVAR(pi, "tssipos2g");
- pi->srom_fem2g.extpagain = (u8) PHY_GETINTVAR(pi, "extpagain2g");
- pi->srom_fem2g.pdetrange = (u8) PHY_GETINTVAR(pi, "pdetrange2g");
- pi->srom_fem2g.triso = (u8) PHY_GETINTVAR(pi, "triso2g");
- pi->srom_fem2g.antswctrllut = (u8) PHY_GETINTVAR(pi, "antswctl2g");
-
- pi->srom_fem5g.tssipos = (u8) PHY_GETINTVAR(pi, "tssipos5g");
- pi->srom_fem5g.extpagain = (u8) PHY_GETINTVAR(pi, "extpagain5g");
- pi->srom_fem5g.pdetrange = (u8) PHY_GETINTVAR(pi, "pdetrange5g");
- pi->srom_fem5g.triso = (u8) PHY_GETINTVAR(pi, "triso5g");
- if (PHY_GETVAR(pi, "antswctl5g")) {
-
- pi->srom_fem5g.antswctrllut =
- (u8) PHY_GETINTVAR(pi, "antswctl5g");
- } else {
-
- pi->srom_fem5g.antswctrllut =
- (u8) PHY_GETINTVAR(pi, "antswctl2g");
- }
-
- wlc_phy_txpower_ipa_upd(pi);
-
- pi->phy_txcore_disable_temp = (s16) PHY_GETINTVAR(pi, "tempthresh");
- if (pi->phy_txcore_disable_temp == 0) {
- pi->phy_txcore_disable_temp = PHY_CHAIN_TX_DISABLE_TEMP;
- }
-
- pi->phy_tempsense_offset = (s8) PHY_GETINTVAR(pi, "tempoffset");
- if (pi->phy_tempsense_offset != 0) {
- if (pi->phy_tempsense_offset >
- (NPHY_SROM_TEMPSHIFT + NPHY_SROM_MAXTEMPOFFSET)) {
- pi->phy_tempsense_offset = NPHY_SROM_MAXTEMPOFFSET;
- } else if (pi->phy_tempsense_offset < (NPHY_SROM_TEMPSHIFT +
- NPHY_SROM_MINTEMPOFFSET)) {
- pi->phy_tempsense_offset = NPHY_SROM_MINTEMPOFFSET;
- } else {
- pi->phy_tempsense_offset -= NPHY_SROM_TEMPSHIFT;
- }
- }
-
- pi->phy_txcore_enable_temp =
- pi->phy_txcore_disable_temp - PHY_HYSTERESIS_DELTATEMP;
-
- pi->phycal_tempdelta = (u8) PHY_GETINTVAR(pi, "phycal_tempdelta");
- if (pi->phycal_tempdelta > NPHY_CAL_MAXTEMPDELTA) {
- pi->phycal_tempdelta = 0;
- }
-
- wlc_phy_txpwr_srom_read_ppr_nphy(pi);
-
- return true;
-}
-
void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
{
u8 tx_pwr_ctrl_state;
@@ -28081,512 +28399,10 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0);
}
-static void wlc_phy_txpwrctrl_coeff_setup_nphy(struct brcms_phy *pi)
-{
- u32 idx;
- u16 iqloCalbuf[7];
- u32 iqcomp, locomp, curr_locomp;
- s8 locomp_i, locomp_q;
- s8 curr_locomp_i, curr_locomp_q;
- u32 tbl_id, tbl_len, tbl_offset;
- u32 regval[128];
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- wlc_phy_table_read_nphy(pi, 15, 7, 80, 16, iqloCalbuf);
-
- tbl_len = 128;
- tbl_offset = 320;
- for (tbl_id = NPHY_TBL_ID_CORE1TXPWRCTL;
- tbl_id <= NPHY_TBL_ID_CORE2TXPWRCTL; tbl_id++) {
- iqcomp =
- (tbl_id ==
- 26) ? (((u32) (iqloCalbuf[0] & 0x3ff)) << 10) |
- (iqloCalbuf[1] & 0x3ff)
- : (((u32) (iqloCalbuf[2] & 0x3ff)) << 10) |
- (iqloCalbuf[3] & 0x3ff);
-
- for (idx = 0; idx < tbl_len; idx++) {
- regval[idx] = iqcomp;
- }
- wlc_phy_table_write_nphy(pi, tbl_id, tbl_len, tbl_offset, 32,
- regval);
- }
-
- tbl_offset = 448;
- for (tbl_id = NPHY_TBL_ID_CORE1TXPWRCTL;
- tbl_id <= NPHY_TBL_ID_CORE2TXPWRCTL; tbl_id++) {
-
- locomp =
- (u32) ((tbl_id == 26) ? iqloCalbuf[5] : iqloCalbuf[6]);
- locomp_i = (s8) ((locomp >> 8) & 0xff);
- locomp_q = (s8) ((locomp) & 0xff);
- for (idx = 0; idx < tbl_len; idx++) {
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- curr_locomp_i = locomp_i;
- curr_locomp_q = locomp_q;
- } else {
- curr_locomp_i = (s8) ((locomp_i *
- nphy_tpc_loscale[idx] +
- 128) >> 8);
- curr_locomp_q =
- (s8) ((locomp_q * nphy_tpc_loscale[idx] +
- 128) >> 8);
- }
- curr_locomp = (u32) ((curr_locomp_i & 0xff) << 8);
- curr_locomp |= (u32) (curr_locomp_q & 0xff);
- regval[idx] = curr_locomp;
- }
- wlc_phy_table_write_nphy(pi, tbl_id, tbl_len, tbl_offset, 32,
- regval);
- }
-
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
-
- wlapi_bmac_write_shm(pi->sh->physhim, M_CURR_IDX1, 0xFFFF);
- wlapi_bmac_write_shm(pi->sh->physhim, M_CURR_IDX2, 0xFFFF);
- }
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-}
-
-static void wlc_phy_ipa_internal_tssi_setup_nphy(struct brcms_phy *pi)
-{
- u8 core;
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TX_SSI_MASTER, 0x5);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TX_SSI_MUX, 0xe);
-
- if (pi->pubpi.radiorev != 5)
- WRITE_RADIO_REG3(pi, RADIO_2057, TX,
- core, TSSIA, 0);
-
- if (!NREV_IS(pi->pubpi.phy_rev, 7)) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX,
- core, TSSIG, 0x1);
- } else {
-
- WRITE_RADIO_REG3(pi, RADIO_2057, TX,
- core, TSSIG, 0x31);
- }
- } else {
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TX_SSI_MASTER, 0x9);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TX_SSI_MUX, 0xc);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core,
- TSSIG, 0);
-
- if (pi->pubpi.radiorev != 5) {
- if (!NREV_IS(pi->pubpi.phy_rev, 7)) {
-
- WRITE_RADIO_REG3(pi, RADIO_2057,
- TX, core,
- TSSIA, 0x1);
- } else {
-
- WRITE_RADIO_REG3(pi, RADIO_2057,
- TX, core,
- TSSIA, 0x31);
- }
- }
- }
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, IQCAL_VCM_HG,
- 0);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, IQCAL_IDAC,
- 0);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, TSSI_VCM,
- 0x3);
- WRITE_RADIO_REG3(pi, RADIO_2057, TX, core, TSSI_MISC1,
- 0x0);
- }
- } else {
- WRITE_RADIO_SYN(pi, RADIO_2056, RESERVED_ADDR31,
- (CHSPEC_IS2G(pi->radio_chanspec)) ? 0x128 :
- 0x80);
- WRITE_RADIO_SYN(pi, RADIO_2056, RESERVED_ADDR30, 0x0);
- WRITE_RADIO_SYN(pi, RADIO_2056, GPIO_MASTER1, 0x29);
-
- for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, IQCAL_VCM_HG,
- 0x0);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, IQCAL_IDAC,
- 0x0);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_VCM,
- 0x3);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TX_AMP_DET,
- 0x0);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_MISC1,
- 0x8);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_MISC2,
- 0x0);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, TSSI_MISC3,
- 0x0);
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- TX_SSI_MASTER, 0x5);
-
- if (pi->pubpi.radiorev != 5)
- WRITE_RADIO_REG2(pi, RADIO_2056, TX,
- core, TSSIA, 0x0);
- if (NREV_GE(pi->pubpi.phy_rev, 5)) {
-
- WRITE_RADIO_REG2(pi, RADIO_2056, TX,
- core, TSSIG, 0x31);
- } else {
- WRITE_RADIO_REG2(pi, RADIO_2056, TX,
- core, TSSIG, 0x11);
- }
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- TX_SSI_MUX, 0xe);
- } else {
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- TX_SSI_MASTER, 0x9);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- TSSIA, 0x31);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- TSSIG, 0x0);
- WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
- TX_SSI_MUX, 0xc);
- }
- }
- }
-}
-
-static void wlc_phy_txpwrctrl_idle_tssi_nphy(struct brcms_phy *pi)
-{
- s32 rssi_buf[4];
- s32 int_val;
-
- if (SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi) || PHY_MUTED(pi))
-
- return;
-
- if (PHY_IPA(pi)) {
- wlc_phy_ipa_internal_tssi_setup_nphy(pi);
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12),
- 0, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 13), 0, 3, 0);
- }
-
- wlc_phy_stopplayback_nphy(pi);
-
- wlc_phy_tx_tone_nphy(pi, 4000, 0, 0, 0, false);
-
- udelay(20);
- int_val =
- wlc_phy_poll_rssi_nphy(pi, (u8) NPHY_RSSI_SEL_TSSI_2G, rssi_buf,
- 1);
- wlc_phy_stopplayback_nphy(pi);
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, 0);
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 12),
- 0, 0x3, 1,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- wlc_phy_rfctrl_override_nphy(pi, (0x1 << 13), 0, 3, 1);
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
- pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_2g =
- (u8) ((int_val >> 24) & 0xff);
- pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_5g =
- (u8) ((int_val >> 24) & 0xff);
-
- pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_2g =
- (u8) ((int_val >> 8) & 0xff);
- pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_5g =
- (u8) ((int_val >> 8) & 0xff);
- } else {
- pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_2g =
- (u8) ((int_val >> 24) & 0xff);
-
- pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_2g =
- (u8) ((int_val >> 8) & 0xff);
-
- pi->nphy_pwrctrl_info[PHY_CORE_0].idle_tssi_5g =
- (u8) ((int_val >> 16) & 0xff);
- pi->nphy_pwrctrl_info[PHY_CORE_1].idle_tssi_5g =
- (u8) ((int_val) & 0xff);
- }
-
-}
-
-static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
-{
- u32 idx;
- s16 a1[2], b0[2], b1[2];
- s8 target_pwr_qtrdbm[2];
- s32 num, den, pwr_est;
- u8 chan_freq_range;
- u8 idle_tssi[2];
- u32 tbl_id, tbl_len, tbl_offset;
- u32 regval[64];
- u8 core;
-
- if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
- wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
- udelay(1);
- }
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
-
- or_phy_reg(pi, 0x122, (0x1 << 0));
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- and_phy_reg(pi, 0x1e7, (u16) (~(0x1 << 15)));
- } else {
-
- or_phy_reg(pi, 0x1e7, (0x1 << 15));
- }
-
- if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12))
- wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0);
-
- if (pi->sh->sromrev < 4) {
- idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
- idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
- target_pwr_qtrdbm[0] = 13 * 4;
- target_pwr_qtrdbm[1] = 13 * 4;
- a1[0] = -424;
- a1[1] = -424;
- b0[0] = 5612;
- b0[1] = 5612;
- b1[1] = -1393;
- b1[0] = -1393;
- } else {
-
- chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
- switch (chan_freq_range) {
- case WL_CHAN_FREQ_RANGE_2G:
- idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
- idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_2g;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_2g;
- a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_a1;
- a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_a1;
- b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_b0;
- b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_b0;
- b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_b1;
- b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_b1;
- break;
- case WL_CHAN_FREQ_RANGE_5GL:
- idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
- idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_5gl;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_5gl;
- a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1;
- a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1;
- b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0;
- b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_b0;
- b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_b1;
- b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_b1;
- break;
- case WL_CHAN_FREQ_RANGE_5GM:
- idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
- idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_5gm;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_5gm;
- a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_a1;
- a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_a1;
- b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_b0;
- b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_b0;
- b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_b1;
- b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_b1;
- break;
- case WL_CHAN_FREQ_RANGE_5GH:
- idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
- idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_5gh;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_5gh;
- a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1;
- a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1;
- b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0;
- b0[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_b0;
- b1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_b1;
- b1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_b1;
- break;
- default:
- idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
- idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
- target_pwr_qtrdbm[0] = 13 * 4;
- target_pwr_qtrdbm[1] = 13 * 4;
- a1[0] = -424;
- a1[1] = -424;
- b0[0] = 5612;
- b0[1] = 5612;
- b1[1] = -1393;
- b1[0] = -1393;
- break;
- }
- }
-
- target_pwr_qtrdbm[0] = (s8) pi->tx_power_max;
- target_pwr_qtrdbm[1] = (s8) pi->tx_power_max;
-
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- if (pi->srom_fem2g.tssipos) {
- or_phy_reg(pi, 0x1e9, (0x1 << 14));
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- for (core = 0; core <= 1; core++) {
- if (PHY_IPA(pi)) {
-
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- WRITE_RADIO_REG3(pi, RADIO_2057,
- TX, core,
- TX_SSI_MUX,
- 0xe);
- } else {
- WRITE_RADIO_REG3(pi, RADIO_2057,
- TX, core,
- TX_SSI_MUX,
- 0xc);
- }
- } else {
- }
- }
- } else {
- if (PHY_IPA(pi)) {
-
- write_radio_reg(pi, RADIO_2056_TX_TX_SSI_MUX |
- RADIO_2056_TX0,
- (CHSPEC_IS5G
- (pi->
- radio_chanspec)) ? 0xc : 0xe);
- write_radio_reg(pi,
- RADIO_2056_TX_TX_SSI_MUX |
- RADIO_2056_TX1,
- (CHSPEC_IS5G
- (pi->
- radio_chanspec)) ? 0xc : 0xe);
- } else {
-
- write_radio_reg(pi, RADIO_2056_TX_TX_SSI_MUX |
- RADIO_2056_TX0, 0x11);
- write_radio_reg(pi, RADIO_2056_TX_TX_SSI_MUX |
- RADIO_2056_TX1, 0x11);
- }
- }
- }
-
- if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
- wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
- udelay(1);
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- mod_phy_reg(pi, 0x1e7, (0x7f << 0),
- (NPHY_TxPwrCtrlCmd_pwrIndex_init_rev7 << 0));
- } else {
- mod_phy_reg(pi, 0x1e7, (0x7f << 0),
- (NPHY_TxPwrCtrlCmd_pwrIndex_init << 0));
- }
-
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- mod_phy_reg(pi, 0x222, (0xff << 0),
- (NPHY_TxPwrCtrlCmd_pwrIndex_init_rev7 << 0));
- } else if (NREV_GT(pi->pubpi.phy_rev, 1)) {
- mod_phy_reg(pi, 0x222, (0xff << 0),
- (NPHY_TxPwrCtrlCmd_pwrIndex_init << 0));
- }
-
- if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12))
- wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0);
-
- write_phy_reg(pi, 0x1e8, (0x3 << 8) | (240 << 0));
-
- write_phy_reg(pi, 0x1e9,
- (1 << 15) | (idle_tssi[0] << 0) | (idle_tssi[1] << 8));
-
- write_phy_reg(pi, 0x1ea,
- (target_pwr_qtrdbm[0] << 0) |
- (target_pwr_qtrdbm[1] << 8));
-
- tbl_len = 64;
- tbl_offset = 0;
- for (tbl_id = NPHY_TBL_ID_CORE1TXPWRCTL;
- tbl_id <= NPHY_TBL_ID_CORE2TXPWRCTL; tbl_id++) {
-
- for (idx = 0; idx < tbl_len; idx++) {
- num =
- 8 * (16 * b0[tbl_id - 26] + b1[tbl_id - 26] * idx);
- den = 32768 + a1[tbl_id - 26] * idx;
- pwr_est = max(((4 * num + den / 2) / den), -8);
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
- if (idx <=
- (uint) (31 - idle_tssi[tbl_id - 26] + 1))
- pwr_est =
- max(pwr_est,
- target_pwr_qtrdbm[tbl_id - 26] +
- 1);
- }
- regval[idx] = (u32) pwr_est;
- }
- wlc_phy_table_write_nphy(pi, tbl_id, tbl_len, tbl_offset, 32,
- regval);
- }
-
- wlc_phy_txpwr_limit_to_tbl_nphy(pi);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE1TXPWRCTL, 84, 64, 8,
- pi->adj_pwr_tbl_nphy);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CORE2TXPWRCTL, 84, 64, 8,
- pi->adj_pwr_tbl_nphy);
-
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
-}
-
static bool wlc_phy_txpwr_ison_nphy(struct brcms_phy *pi)
{
return read_phy_reg((pi), 0x1e7) & ((0x1 << 15) |
- (0x1 << 14) | (0x1 << 13));
-}
-
-static u8 wlc_phy_txpwr_idx_cur_get_nphy(struct brcms_phy *pi, u8 core)
-{
- u16 tmp;
- tmp = read_phy_reg(pi, ((core == PHY_CORE_0) ? 0x1ed : 0x1ee));
-
- tmp = (tmp & (0x7f << 8)) >> 8;
- return (u8) tmp;
-}
-
-static void
-wlc_phy_txpwr_idx_cur_set_nphy(struct brcms_phy *pi, u8 idx0, u8 idx1)
-{
- mod_phy_reg(pi, 0x1e7, (0x7f << 0), idx0);
-
- if (NREV_GT(pi->pubpi.phy_rev, 1))
- mod_phy_reg(pi, 0x222, (0xff << 0), idx1);
+ (0x1 << 14) | (0x1 << 13));
}
u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi)
@@ -28600,12 +28416,9 @@ u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi)
tmp = (pwr_idx[0] << 8) | pwr_idx[1];
} else {
- tmp =
- ((pi->nphy_txpwrindex[PHY_CORE_0].
- index_internal & 0xff) << 8) | (pi->
- nphy_txpwrindex
- [PHY_CORE_1].
- index_internal & 0xff);
+ tmp = ((pi->nphy_txpwrindex[PHY_CORE_0].index_internal & 0xff)
+ << 8) |
+ (pi->nphy_txpwrindex[PHY_CORE_1].index_internal & 0xff);
}
return tmp;
@@ -28618,13 +28431,12 @@ void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi)
|| (wlc_phy_txpwr_ison_nphy(pi)
&&
(((u32)
- ABS(wlc_phy_txpwr_idx_cur_get_nphy(pi, 0) -
+ abs(wlc_phy_txpwr_idx_cur_get_nphy(pi, 0) -
pi->nphy_papd_tx_gain_at_last_cal[0]) >= 4)
|| ((u32)
- ABS(wlc_phy_txpwr_idx_cur_get_nphy(pi, 1) -
- pi->nphy_papd_tx_gain_at_last_cal[1]) >= 4))))) {
+ abs(wlc_phy_txpwr_idx_cur_get_nphy(pi, 1) -
+ pi->nphy_papd_tx_gain_at_last_cal[1]) >= 4)))))
wlc_phy_a4(pi, true);
- }
}
void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
@@ -28655,32 +28467,29 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
for (core = 0; core < pi->pubpi.phy_corenum;
core++)
pi->nphy_txpwr_idx[core] =
- wlc_phy_txpwr_idx_cur_get_nphy(pi,
- (u8)
- core);
+ wlc_phy_txpwr_idx_cur_get_nphy(
+ pi,
+ (u8) core);
}
}
tbl_len = 84;
tbl_offset = 64;
- for (ctr = 0; ctr < tbl_len; ctr++) {
+ for (ctr = 0; ctr < tbl_len; ctr++)
regval[ctr] = 0;
- }
wlc_phy_table_write_nphy(pi, 26, tbl_len, tbl_offset, 16,
regval);
wlc_phy_table_write_nphy(pi, 27, tbl_len, tbl_offset, 16,
regval);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
-
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
and_phy_reg(pi, 0x1e7,
(u16) (~((0x1 << 15) |
- (0x1 << 14) | (0x1 << 13))));
- } else {
+ (0x1 << 14) | (0x1 << 13))));
+ else
and_phy_reg(pi, 0x1e7,
(u16) (~((0x1 << 14) | (0x1 << 13))));
- }
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
or_phy_reg(pi, 0x8f, (0x1 << 8));
@@ -28694,7 +28503,8 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
else if (NREV_LT(pi->pubpi.phy_rev, 2))
mod_phy_reg(pi, 0xdc, 0x00ff, 0x5a);
- if (NREV_LT(pi->pubpi.phy_rev, 2) && IS40MHZ(pi))
+ if (NREV_LT(pi->pubpi.phy_rev, 2) &&
+ pi->bw == WL_CHANSPEC_BW_40)
wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_IQSWAP_WAR,
MHF1_IQSWAP_WAR, BRCM_BAND_ALL);
@@ -28730,7 +28540,7 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
if ((pi->nphy_txpwr_idx[0] != 128)
- && (pi->nphy_txpwr_idx[1] != 128)) {
+ && (pi->nphy_txpwr_idx[1] != 128))
wlc_phy_txpwr_idx_cur_set_nphy(pi,
pi->
nphy_txpwr_idx
@@ -28738,7 +28548,6 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
pi->
nphy_txpwr_idx
[1]);
- }
}
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
@@ -28753,7 +28562,8 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
else if (NREV_LT(pi->pubpi.phy_rev, 2))
mod_phy_reg(pi, 0xdc, 0x00ff, 0x40);
- if (NREV_LT(pi->pubpi.phy_rev, 2) && IS40MHZ(pi))
+ if (NREV_LT(pi->pubpi.phy_rev, 2) &&
+ pi->bw == WL_CHANSPEC_BW_40)
wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_IQSWAP_WAR,
0x0, BRCM_BAND_ALL);
@@ -28799,17 +28609,14 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- if ((core_mask & (1 << core)) == 0) {
+ if ((core_mask & (1 << core)) == 0)
continue;
- }
txpwrctl_tbl = (core == PHY_CORE_0) ? 26 : 27;
if (txpwrindex < 0) {
- if (pi->nphy_txpwrindex[core].index < 0) {
-
+ if (pi->nphy_txpwrindex[core].index < 0)
continue;
- }
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
mod_phy_reg(pi, 0x8f,
@@ -28842,29 +28649,21 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &m1m2);
if (restore_cals) {
-
- wlc_phy_table_write_nphy(pi, 15, 2,
- (80 + 2 * core), 16,
- (void *)&pi->
- nphy_txpwrindex[core].
- iqcomp_a);
-
- wlc_phy_table_write_nphy(pi, 15, 1, (85 + core),
- 16,
- &pi->
- nphy_txpwrindex[core].
- locomp);
- wlc_phy_table_write_nphy(pi, 15, 1, (93 + core),
- 16,
- (void *)&pi->
- nphy_txpwrindex[core].
- locomp);
+ wlc_phy_table_write_nphy(
+ pi, 15, 2, (80 + 2 * core), 16,
+ &pi->nphy_txpwrindex[core].iqcomp_a);
+ wlc_phy_table_write_nphy(
+ pi, 15, 1, (85 + core), 16,
+ &pi->nphy_txpwrindex[core].locomp);
+ wlc_phy_table_write_nphy(
+ pi, 15, 1, (93 + core), 16,
+ &pi->nphy_txpwrindex[core].locomp);
}
wlc_phy_txpwrctrl_enable_nphy(pi, pi->nphy_txpwrctrl);
pi->nphy_txpwrindex[core].index_internal =
- pi->nphy_txpwrindex[core].index_internal_save;
+ pi->nphy_txpwrindex[core].index_internal_save;
} else {
if (pi->nphy_txpwrindex[core].index < 0) {
@@ -28879,14 +28678,13 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
AfectrlOverride);
} else {
pi->nphy_txpwrindex[core].
- AfectrlOverride =
- read_phy_reg(pi, 0xa5);
+ AfectrlOverride =
+ read_phy_reg(pi, 0xa5);
}
pi->nphy_txpwrindex[core].AfeCtrlDacGain =
- read_phy_reg(pi,
- (core ==
- PHY_CORE_0) ? 0xaa : 0xab);
+ read_phy_reg(pi, (core == PHY_CORE_0) ?
+ 0xaa : 0xab);
wlc_phy_table_read_nphy(pi, 7, 1,
(0x110 + core), 16,
@@ -28898,23 +28696,23 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
&tmpval);
tmpval >>= ((core == PHY_CORE_0) ? 8 : 0);
tmpval &= 0xff;
- pi->nphy_txpwrindex[core].bbmult =
- (u8) tmpval;
+ pi->nphy_txpwrindex[core].bbmult = (u8) tmpval;
wlc_phy_table_read_nphy(pi, 15, 2,
(80 + 2 * core), 16,
- (void *)&pi->
+ &pi->
nphy_txpwrindex[core].
iqcomp_a);
wlc_phy_table_read_nphy(pi, 15, 1, (85 + core),
16,
- (void *)&pi->
+ &pi->
nphy_txpwrindex[core].
locomp);
pi->nphy_txpwrindex[core].index_internal_save =
- pi->nphy_txpwrindex[core].index_internal;
+ pi->nphy_txpwrindex[core].
+ index_internal;
}
tx_pwr_ctrl_state = pi->nphy_txpwrctrl;
@@ -28927,22 +28725,22 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
(tx_ind0 + txpwrindex), 32,
&txgain);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
- rad_gain =
- (txgain >> 16) & ((1 << (32 - 16 + 1)) - 1);
- } else {
- rad_gain =
- (txgain >> 16) & ((1 << (28 - 16 + 1)) - 1);
- }
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
+ rad_gain = (txgain >> 16) &
+ ((1 << (32 - 16 + 1)) - 1);
+ else
+ rad_gain = (txgain >> 16) &
+ ((1 << (28 - 16 + 1)) - 1);
+
dac_gain = (txgain >> 8) & ((1 << (13 - 8 + 1)) - 1);
bbmult = (txgain >> 0) & ((1 << (7 - 0 + 1)) - 1);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 3))
mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
0xa5), (0x1 << 8), (0x1 << 8));
- } else {
+ else
mod_phy_reg(pi, 0xa5, (0x1 << 14), (0x1 << 14));
- }
+
write_phy_reg(pi, (core == PHY_CORE_0) ?
0xaa : 0xab, dac_gain);
@@ -28951,9 +28749,8 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
wlc_phy_table_read_nphy(pi, 15, 1, 87, 16, &m1m2);
m1m2 &= ((core == PHY_CORE_0) ? 0x00ff : 0xff00);
- m1m2 |=
- ((core ==
- PHY_CORE_0) ? (bbmult << 8) : (bbmult << 0));
+ m1m2 |= ((core == PHY_CORE_0) ?
+ (bbmult << 8) : (bbmult << 0));
wlc_phy_table_write_nphy(pi, 15, 1, 87, 16, &m1m2);
@@ -28974,23 +28771,20 @@ wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
wlc_phy_table_read_nphy(pi, txpwrctl_tbl, 1,
(lo_ind0 + txpwrindex), 32,
&locomp);
- if (restore_cals) {
+ if (restore_cals)
wlc_phy_table_write_nphy(pi, 15, 1, (85 + core),
16, &locomp);
- }
if (NREV_IS(pi->pubpi.phy_rev, 1))
wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF);
if (PHY_IPA(pi)) {
wlc_phy_table_read_nphy(pi,
- (core ==
- PHY_CORE_0 ?
- NPHY_TBL_ID_CORE1TXPWRCTL
- :
- NPHY_TBL_ID_CORE2TXPWRCTL),
- 1, 576 + txpwrindex, 32,
- &rfpwr_offset);
+ (core == PHY_CORE_0 ?
+ NPHY_TBL_ID_CORE1TXPWRCTL :
+ NPHY_TBL_ID_CORE2TXPWRCTL),
+ 1, 576 + txpwrindex, 32,
+ &rfpwr_offset);
mod_phy_reg(pi, (core == PHY_CORE_0) ? 0x297 :
0x29b, (0x1ff << 4),
@@ -29046,7 +28840,7 @@ void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable)
if (enable) {
if (pi->nphy_deaf_count == 0) {
pi->classifier_state =
- wlc_phy_classifier_nphy(pi, 0, 0);
+ wlc_phy_classifier_nphy(pi, 0, 0);
wlc_phy_classifier_nphy(pi, (0x7 << 0), 4);
wlc_phy_clip_det_nphy(pi, 0, pi->clip_state);
wlc_phy_clip_det_nphy(pi, 1, clip_off);
@@ -29074,9 +28868,9 @@ void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode)
if (mode) {
if (pi->nphy_deaf_count == 0)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
- } else {
- if (pi->nphy_deaf_count > 0)
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
+ } else if (pi->nphy_deaf_count > 0) {
+ wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
+
wlapi_enable_mac(pi->sh->physhim);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c
new file mode 100644
index 00000000000..faf1ebe7606
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "phy_qmath.h"
+
+/*
+ * Description: This function make 16 bit unsigned multiplication.
+ * To fit the output into 16 bits the 32 bit multiplication result is right
+ * shifted by 16 bits.
+ */
+u16 qm_mulu16(u16 op1, u16 op2)
+{
+ return (u16) (((u32) op1 * (u32) op2) >> 16);
+}
+
+/*
+ * Description: This function make 16 bit multiplication and return the result
+ * in 16 bits. To fit the multiplication result into 16 bits the multiplication
+ * result is right shifted by 15 bits. Right shifting 15 bits instead of 16 bits
+ * is done to remove the extra sign bit formed due to the multiplication.
+ * When both the 16bit inputs are 0x8000 then the output is saturated to
+ * 0x7fffffff.
+ */
+s16 qm_muls16(s16 op1, s16 op2)
+{
+ s32 result;
+ if (op1 == (s16) 0x8000 && op2 == (s16) 0x8000)
+ result = 0x7fffffff;
+ else
+ result = ((s32) (op1) * (s32) (op2));
+
+ return (s16) (result >> 15);
+}
+
+/*
+ * Description: This function add two 32 bit numbers and return the 32bit
+ * result. If the result overflow 32 bits, the output will be saturated to
+ * 32bits.
+ */
+s32 qm_add32(s32 op1, s32 op2)
+{
+ s32 result;
+ result = op1 + op2;
+ if (op1 < 0 && op2 < 0 && result > 0)
+ result = 0x80000000;
+ else if (op1 > 0 && op2 > 0 && result < 0)
+ result = 0x7fffffff;
+
+ return result;
+}
+
+/*
+ * Description: This function add two 16 bit numbers and return the 16bit
+ * result. If the result overflow 16 bits, the output will be saturated to
+ * 16bits.
+ */
+s16 qm_add16(s16 op1, s16 op2)
+{
+ s16 result;
+ s32 temp = (s32) op1 + (s32) op2;
+ if (temp > (s32) 0x7fff)
+ result = (s16) 0x7fff;
+ else if (temp < (s32) 0xffff8000)
+ result = (s16) 0xffff8000;
+ else
+ result = (s16) temp;
+
+ return result;
+}
+
+/*
+ * Description: This function make 16 bit subtraction and return the 16bit
+ * result. If the result overflow 16 bits, the output will be saturated to
+ * 16bits.
+ */
+s16 qm_sub16(s16 op1, s16 op2)
+{
+ s16 result;
+ s32 temp = (s32) op1 - (s32) op2;
+ if (temp > (s32) 0x7fff)
+ result = (s16) 0x7fff;
+ else if (temp < (s32) 0xffff8000)
+ result = (s16) 0xffff8000;
+ else
+ result = (s16) temp;
+
+ return result;
+}
+
+/*
+ * Description: This function make a 32 bit saturated left shift when the
+ * specified shift is +ve. This function will make a 32 bit right shift when
+ * the specified shift is -ve. This function return the result after shifting
+ * operation.
+ */
+s32 qm_shl32(s32 op, int shift)
+{
+ int i;
+ s32 result;
+ result = op;
+ if (shift > 31)
+ shift = 31;
+ else if (shift < -31)
+ shift = -31;
+ if (shift >= 0) {
+ for (i = 0; i < shift; i++)
+ result = qm_add32(result, result);
+ } else {
+ result = result >> (-shift);
+ }
+
+ return result;
+}
+
+/*
+ * Description: This function make a 16 bit saturated left shift when the
+ * specified shift is +ve. This function will make a 16 bit right shift when
+ * the specified shift is -ve. This function return the result after shifting
+ * operation.
+ */
+s16 qm_shl16(s16 op, int shift)
+{
+ int i;
+ s16 result;
+ result = op;
+ if (shift > 15)
+ shift = 15;
+ else if (shift < -15)
+ shift = -15;
+ if (shift > 0) {
+ for (i = 0; i < shift; i++)
+ result = qm_add16(result, result);
+ } else {
+ result = result >> (-shift);
+ }
+
+ return result;
+}
+
+/*
+ * Description: This function make a 16 bit right shift when shift is +ve.
+ * This function make a 16 bit saturated left shift when shift is -ve. This
+ * function return the result of the shift operation.
+ */
+s16 qm_shr16(s16 op, int shift)
+{
+ return qm_shl16(op, -shift);
+}
+
+/*
+ * Description: This function return the number of redundant sign bits in a
+ * 32 bit number. Example: qm_norm32(0x00000080) = 23
+ */
+s16 qm_norm32(s32 op)
+{
+ u16 u16extraSignBits;
+ if (op == 0) {
+ return 31;
+ } else {
+ u16extraSignBits = 0;
+ while ((op >> 31) == (op >> 30)) {
+ u16extraSignBits++;
+ op = op << 1;
+ }
+ }
+ return u16extraSignBits;
+}
+
+/* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */
+static const s16 log_table[] = {
+ 0,
+ 1455,
+ 2866,
+ 4236,
+ 5568,
+ 6863,
+ 8124,
+ 9352,
+ 10549,
+ 11716,
+ 12855,
+ 13968,
+ 15055,
+ 16117,
+ 17156,
+ 18173,
+ 19168,
+ 20143,
+ 21098,
+ 22034,
+ 22952,
+ 23852,
+ 24736,
+ 25604,
+ 26455,
+ 27292,
+ 28114,
+ 28922,
+ 29717,
+ 30498,
+ 31267,
+ 32024
+};
+
+#define LOG_TABLE_SIZE 32 /* log_table size */
+#define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */
+#define Q_LOG_TABLE 15 /* qformat of log_table */
+#define LOG10_2 19728 /* log10(2) in q.16 */
+
+/*
+ * Description:
+ * This routine takes the input number N and its q format qN and compute
+ * the log10(N). This routine first normalizes the input no N. Then N is in
+ * mag*(2^x) format. mag is any number in the range 2^30-(2^31 - 1).
+ * Then log2(mag * 2^x) = log2(mag) + x is computed. From that
+ * log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed.
+ * This routine looks the log2 value in the table considering
+ * LOG2_LOG_TABLE_SIZE+1 MSBs. As the MSB is always 1, only next
+ * LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup. Next 16 MSBs are used
+ * for interpolation.
+ * Inputs:
+ * N - number to which log10 has to be found.
+ * qN - q format of N
+ * log10N - address where log10(N) will be written.
+ * qLog10N - address where log10N qformat will be written.
+ * Note/Problem:
+ * For accurate results input should be in normalized or near normalized form.
+ */
+void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N)
+{
+ s16 s16norm, s16tableIndex, s16errorApproximation;
+ u16 u16offset;
+ s32 s32log;
+
+ /* normalize the N. */
+ s16norm = qm_norm32(N);
+ N = N << s16norm;
+
+ /* The qformat of N after normalization.
+ * -30 is added to treat the no as between 1.0 to 2.0
+ * i.e. after adding the -30 to the qformat the decimal point will be
+ * just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e.
+ * at the right side of 30th bit.
+ */
+ qN = qN + s16norm - 30;
+
+ /* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the
+ * MSB */
+ s16tableIndex = (s16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE)));
+
+ /* remove the MSB. the MSB is always 1 after normalization. */
+ s16tableIndex =
+ s16tableIndex & (s16) ((1 << LOG2_LOG_TABLE_SIZE) - 1);
+
+ /* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */
+ N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1);
+
+ /* take the offset as the 16 MSBS after table index.
+ */
+ u16offset = (u16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16)));
+
+ /* look the log value in the table. */
+ s32log = log_table[s16tableIndex]; /* q.15 format */
+
+ /* interpolate using the offset. q.15 format. */
+ s16errorApproximation = (s16) qm_mulu16(u16offset,
+ (u16) (log_table[s16tableIndex + 1] -
+ log_table[s16tableIndex]));
+
+ /* q.15 format */
+ s32log = qm_add16((s16) s32log, s16errorApproximation);
+
+ /* adjust for the qformat of the N as
+ * log2(mag * 2^x) = log2(mag) + x
+ */
+ s32log = qm_add32(s32log, ((s32) -qN) << 15); /* q.15 format */
+
+ /* normalize the result. */
+ s16norm = qm_norm32(s32log);
+
+ /* bring all the important bits into lower 16 bits */
+ /* q.15+s16norm-16 format */
+ s32log = qm_shl32(s32log, s16norm - 16);
+
+ /* compute the log10(N) by multiplying log2(N) with log10(2).
+ * as log10(mag * 2^x) = log2(mag * 2^x) * log10(2)
+ * log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16)
+ */
+ *log10N = qm_muls16((s16) s32log, (s16) LOG10_2);
+
+ /* write the q format of the result. */
+ *qLog10N = 15 + s16norm - 16 + 1;
+
+ return;
+}
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.h
index 20e3783f921..20e3783f921 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.h
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_radio.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_radio.h
index c3a675455ff..c3a675455ff 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_radio.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_radio.h
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phyreg_n.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phyreg_n.h
index a97c3a79947..a97c3a79947 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phyreg_n.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phyreg_n.h
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
new file mode 100644
index 00000000000..622c01ca72c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -0,0 +1,3250 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <types.h>
+#include "phytbl_lcn.h"
+
+static const u32 dot11lcn_gain_tbl_rev0[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000004,
+ 0x00000000,
+ 0x00000004,
+ 0x00000008,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000d,
+ 0x0000004d,
+ 0x0000008d,
+ 0x0000000d,
+ 0x0000004d,
+ 0x0000008d,
+ 0x000000cd,
+ 0x0000004f,
+ 0x0000008f,
+ 0x000000cf,
+ 0x000000d3,
+ 0x00000113,
+ 0x00000513,
+ 0x00000913,
+ 0x00000953,
+ 0x00000d53,
+ 0x00001153,
+ 0x00001193,
+ 0x00005193,
+ 0x00009193,
+ 0x0000d193,
+ 0x00011193,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000004,
+ 0x00000000,
+ 0x00000004,
+ 0x00000008,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000d,
+ 0x0000004d,
+ 0x0000008d,
+ 0x0000000d,
+ 0x0000004d,
+ 0x0000008d,
+ 0x000000cd,
+ 0x0000004f,
+ 0x0000008f,
+ 0x000000cf,
+ 0x000000d3,
+ 0x00000113,
+ 0x00000513,
+ 0x00000913,
+ 0x00000953,
+ 0x00000d53,
+ 0x00001153,
+ 0x00005153,
+ 0x00009153,
+ 0x0000d153,
+ 0x00011153,
+ 0x00015153,
+ 0x00019153,
+ 0x0001d153,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+static const u32 dot11lcn_gain_tbl_rev1[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000008,
+ 0x00000004,
+ 0x00000008,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000D,
+ 0x00000011,
+ 0x00000051,
+ 0x00000091,
+ 0x00000011,
+ 0x00000051,
+ 0x00000091,
+ 0x000000d1,
+ 0x00000053,
+ 0x00000093,
+ 0x000000d3,
+ 0x000000d7,
+ 0x00000117,
+ 0x00000517,
+ 0x00000917,
+ 0x00000957,
+ 0x00000d57,
+ 0x00001157,
+ 0x00001197,
+ 0x00005197,
+ 0x00009197,
+ 0x0000d197,
+ 0x00011197,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000008,
+ 0x00000004,
+ 0x00000008,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000D,
+ 0x00000011,
+ 0x00000051,
+ 0x00000091,
+ 0x00000011,
+ 0x00000051,
+ 0x00000091,
+ 0x000000d1,
+ 0x00000053,
+ 0x00000093,
+ 0x000000d3,
+ 0x000000d7,
+ 0x00000117,
+ 0x00000517,
+ 0x00000917,
+ 0x00000957,
+ 0x00000d57,
+ 0x00001157,
+ 0x00005157,
+ 0x00009157,
+ 0x0000d157,
+ 0x00011157,
+ 0x00015157,
+ 0x00019157,
+ 0x0001d157,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+static const u16 dot11lcn_aux_gain_idx_tbl_rev0[] = {
+ 0x0401,
+ 0x0402,
+ 0x0403,
+ 0x0404,
+ 0x0405,
+ 0x0406,
+ 0x0407,
+ 0x0408,
+ 0x0409,
+ 0x040a,
+ 0x058b,
+ 0x058c,
+ 0x058d,
+ 0x058e,
+ 0x058f,
+ 0x0090,
+ 0x0091,
+ 0x0092,
+ 0x0193,
+ 0x0194,
+ 0x0195,
+ 0x0196,
+ 0x0197,
+ 0x0198,
+ 0x0199,
+ 0x019a,
+ 0x019b,
+ 0x019c,
+ 0x019d,
+ 0x019e,
+ 0x019f,
+ 0x01a0,
+ 0x01a1,
+ 0x01a2,
+ 0x01a3,
+ 0x01a4,
+ 0x01a5,
+ 0x0000,
+};
+
+static const u32 dot11lcn_gain_idx_tbl_rev0[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x10000000,
+ 0x00000000,
+ 0x20000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000000,
+ 0x40000000,
+ 0x00000000,
+ 0x50000000,
+ 0x00000000,
+ 0x60000000,
+ 0x00000000,
+ 0x70000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x90000000,
+ 0x00000008,
+ 0xa0000000,
+ 0x00000008,
+ 0xb0000000,
+ 0x00000008,
+ 0xc0000000,
+ 0x00000008,
+ 0xd0000000,
+ 0x00000008,
+ 0xe0000000,
+ 0x00000008,
+ 0xf0000000,
+ 0x00000008,
+ 0x00000000,
+ 0x00000009,
+ 0x10000000,
+ 0x00000009,
+ 0x20000000,
+ 0x00000019,
+ 0x30000000,
+ 0x00000019,
+ 0x40000000,
+ 0x00000019,
+ 0x50000000,
+ 0x00000019,
+ 0x60000000,
+ 0x00000019,
+ 0x70000000,
+ 0x00000019,
+ 0x80000000,
+ 0x00000019,
+ 0x90000000,
+ 0x00000019,
+ 0xa0000000,
+ 0x00000019,
+ 0xb0000000,
+ 0x00000019,
+ 0xc0000000,
+ 0x00000019,
+ 0xd0000000,
+ 0x00000019,
+ 0xe0000000,
+ 0x00000019,
+ 0xf0000000,
+ 0x00000019,
+ 0x00000000,
+ 0x0000001a,
+ 0x10000000,
+ 0x0000001a,
+ 0x20000000,
+ 0x0000001a,
+ 0x30000000,
+ 0x0000001a,
+ 0x40000000,
+ 0x0000001a,
+ 0x50000000,
+ 0x00000002,
+ 0x60000000,
+ 0x00000002,
+ 0x70000000,
+ 0x00000002,
+ 0x80000000,
+ 0x00000002,
+ 0x90000000,
+ 0x00000002,
+ 0xa0000000,
+ 0x00000002,
+ 0xb0000000,
+ 0x00000002,
+ 0xc0000000,
+ 0x0000000a,
+ 0xd0000000,
+ 0x0000000a,
+ 0xe0000000,
+ 0x0000000a,
+ 0xf0000000,
+ 0x0000000a,
+ 0x00000000,
+ 0x0000000b,
+ 0x10000000,
+ 0x0000000b,
+ 0x20000000,
+ 0x0000000b,
+ 0x30000000,
+ 0x0000000b,
+ 0x40000000,
+ 0x0000000b,
+ 0x50000000,
+ 0x0000001b,
+ 0x60000000,
+ 0x0000001b,
+ 0x70000000,
+ 0x0000001b,
+ 0x80000000,
+ 0x0000001b,
+ 0x90000000,
+ 0x0000001b,
+ 0xa0000000,
+ 0x0000001b,
+ 0xb0000000,
+ 0x0000001b,
+ 0xc0000000,
+ 0x0000001b,
+ 0xd0000000,
+ 0x0000001b,
+ 0xe0000000,
+ 0x0000001b,
+ 0xf0000000,
+ 0x0000001b,
+ 0x00000000,
+ 0x0000001c,
+ 0x10000000,
+ 0x0000001c,
+ 0x20000000,
+ 0x0000001c,
+ 0x30000000,
+ 0x0000001c,
+ 0x40000000,
+ 0x0000001c,
+ 0x50000000,
+ 0x0000001c,
+ 0x60000000,
+ 0x0000001c,
+ 0x70000000,
+ 0x0000001c,
+ 0x80000000,
+ 0x0000001c,
+ 0x90000000,
+ 0x0000001c,
+};
+
+static const u16 dot11lcn_aux_gain_idx_tbl_2G[] = {
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ 0x0080,
+ 0x0081,
+ 0x0100,
+ 0x0101,
+ 0x0180,
+ 0x0181,
+ 0x0182,
+ 0x0183,
+ 0x0184,
+ 0x0185,
+ 0x0186,
+ 0x0187,
+ 0x0188,
+ 0x0285,
+ 0x0289,
+ 0x028a,
+ 0x028b,
+ 0x028c,
+ 0x028d,
+ 0x028e,
+ 0x028f,
+ 0x0290,
+ 0x0291,
+ 0x0292,
+ 0x0293,
+ 0x0294,
+ 0x0295,
+ 0x0296,
+ 0x0297,
+ 0x0298,
+ 0x0299,
+ 0x029a,
+ 0x0000
+};
+
+static const u8 dot11lcn_gain_val_tbl_2G[] = {
+ 0xfc,
+ 0x02,
+ 0x08,
+ 0x0e,
+ 0x13,
+ 0x1b,
+ 0xfc,
+ 0x02,
+ 0x08,
+ 0x0e,
+ 0x13,
+ 0x1b,
+ 0xfc,
+ 0x00,
+ 0x0c,
+ 0x03,
+ 0xeb,
+ 0xfe,
+ 0x07,
+ 0x0b,
+ 0x0f,
+ 0xfb,
+ 0xfe,
+ 0x01,
+ 0x05,
+ 0x08,
+ 0x0b,
+ 0x0e,
+ 0x11,
+ 0x14,
+ 0x17,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x15,
+ 0x18,
+ 0x1b,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00
+};
+
+static const u32 dot11lcn_gain_idx_tbl_2G[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000008,
+ 0x10000000,
+ 0x00000008,
+ 0x00000000,
+ 0x00000010,
+ 0x10000000,
+ 0x00000010,
+ 0x00000000,
+ 0x00000018,
+ 0x10000000,
+ 0x00000018,
+ 0x20000000,
+ 0x00000018,
+ 0x30000000,
+ 0x00000018,
+ 0x40000000,
+ 0x00000018,
+ 0x50000000,
+ 0x00000018,
+ 0x60000000,
+ 0x00000018,
+ 0x70000000,
+ 0x00000018,
+ 0x80000000,
+ 0x00000018,
+ 0x50000000,
+ 0x00000028,
+ 0x90000000,
+ 0x00000028,
+ 0xa0000000,
+ 0x00000028,
+ 0xb0000000,
+ 0x00000028,
+ 0xc0000000,
+ 0x00000028,
+ 0xd0000000,
+ 0x00000028,
+ 0xe0000000,
+ 0x00000028,
+ 0xf0000000,
+ 0x00000028,
+ 0x00000000,
+ 0x00000029,
+ 0x10000000,
+ 0x00000029,
+ 0x20000000,
+ 0x00000029,
+ 0x30000000,
+ 0x00000029,
+ 0x40000000,
+ 0x00000029,
+ 0x50000000,
+ 0x00000029,
+ 0x60000000,
+ 0x00000029,
+ 0x70000000,
+ 0x00000029,
+ 0x80000000,
+ 0x00000029,
+ 0x90000000,
+ 0x00000029,
+ 0xa0000000,
+ 0x00000029,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000008,
+ 0x10000000,
+ 0x00000008,
+ 0x00000000,
+ 0x00000010,
+ 0x10000000,
+ 0x00000010,
+ 0x00000000,
+ 0x00000018,
+ 0x10000000,
+ 0x00000018,
+ 0x20000000,
+ 0x00000018,
+ 0x30000000,
+ 0x00000018,
+ 0x40000000,
+ 0x00000018,
+ 0x50000000,
+ 0x00000018,
+ 0x60000000,
+ 0x00000018,
+ 0x70000000,
+ 0x00000018,
+ 0x80000000,
+ 0x00000018,
+ 0x50000000,
+ 0x00000028,
+ 0x90000000,
+ 0x00000028,
+ 0xa0000000,
+ 0x00000028,
+ 0xb0000000,
+ 0x00000028,
+ 0xc0000000,
+ 0x00000028,
+ 0xd0000000,
+ 0x00000028,
+ 0xe0000000,
+ 0x00000028,
+ 0xf0000000,
+ 0x00000028,
+ 0x00000000,
+ 0x00000029,
+ 0x10000000,
+ 0x00000029,
+ 0x20000000,
+ 0x00000029,
+ 0x30000000,
+ 0x00000029,
+ 0x40000000,
+ 0x00000029,
+ 0x50000000,
+ 0x00000029,
+ 0x60000000,
+ 0x00000029,
+ 0x70000000,
+ 0x00000029,
+ 0x80000000,
+ 0x00000029,
+ 0x90000000,
+ 0x00000029,
+ 0xa0000000,
+ 0x00000029,
+ 0xb0000000,
+ 0x00000029,
+ 0xc0000000,
+ 0x00000029,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static const u32 dot11lcn_gain_tbl_2G[] = {
+ 0x00000000,
+ 0x00000004,
+ 0x00000008,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000d,
+ 0x0000004d,
+ 0x0000008d,
+ 0x00000049,
+ 0x00000089,
+ 0x000000c9,
+ 0x0000004b,
+ 0x0000008b,
+ 0x000000cb,
+ 0x000000cf,
+ 0x0000010f,
+ 0x0000050f,
+ 0x0000090f,
+ 0x0000094f,
+ 0x00000d4f,
+ 0x0000114f,
+ 0x0000118f,
+ 0x0000518f,
+ 0x0000918f,
+ 0x0000d18f,
+ 0x0001118f,
+ 0x0001518f,
+ 0x0001918f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static const u32 dot11lcn_gain_tbl_extlna_2G[] = {
+ 0x00000000,
+ 0x00000004,
+ 0x00000008,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000d,
+ 0x00000003,
+ 0x00000007,
+ 0x0000000b,
+ 0x0000000f,
+ 0x0000004f,
+ 0x0000008f,
+ 0x000000cf,
+ 0x0000010f,
+ 0x0000014f,
+ 0x0000018f,
+ 0x0000058f,
+ 0x0000098f,
+ 0x00000d8f,
+ 0x00008000,
+ 0x00008004,
+ 0x00008008,
+ 0x00008001,
+ 0x00008005,
+ 0x00008009,
+ 0x0000800d,
+ 0x00008003,
+ 0x00008007,
+ 0x0000800b,
+ 0x0000800f,
+ 0x0000804f,
+ 0x0000808f,
+ 0x000080cf,
+ 0x0000810f,
+ 0x0000814f,
+ 0x0000818f,
+ 0x0000858f,
+ 0x0000898f,
+ 0x00008d8f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static const u16 dot11lcn_aux_gain_idx_tbl_extlna_2G[] = {
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0400,
+ 0x0401,
+ 0x0402,
+ 0x0403,
+ 0x0404,
+ 0x0483,
+ 0x0484,
+ 0x0485,
+ 0x0486,
+ 0x0583,
+ 0x0584,
+ 0x0585,
+ 0x0587,
+ 0x0588,
+ 0x0589,
+ 0x058a,
+ 0x0687,
+ 0x0688,
+ 0x0689,
+ 0x068a,
+ 0x068b,
+ 0x068c,
+ 0x068d,
+ 0x068e,
+ 0x068f,
+ 0x0690,
+ 0x0691,
+ 0x0692,
+ 0x0693,
+ 0x0000
+};
+
+static const u8 dot11lcn_gain_val_tbl_extlna_2G[] = {
+ 0xfc,
+ 0x02,
+ 0x08,
+ 0x0e,
+ 0x13,
+ 0x1b,
+ 0xfc,
+ 0x02,
+ 0x08,
+ 0x0e,
+ 0x13,
+ 0x1b,
+ 0xfc,
+ 0x00,
+ 0x0f,
+ 0x03,
+ 0xeb,
+ 0xfe,
+ 0x07,
+ 0x0b,
+ 0x0f,
+ 0xfb,
+ 0xfe,
+ 0x01,
+ 0x05,
+ 0x08,
+ 0x0b,
+ 0x0e,
+ 0x11,
+ 0x14,
+ 0x17,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x15,
+ 0x18,
+ 0x1b,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00
+};
+
+static const u32 dot11lcn_gain_idx_tbl_extlna_2G[] = {
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x00000000,
+ 0x00000040,
+ 0x10000000,
+ 0x00000040,
+ 0x20000000,
+ 0x00000040,
+ 0x30000000,
+ 0x00000040,
+ 0x40000000,
+ 0x00000040,
+ 0x30000000,
+ 0x00000048,
+ 0x40000000,
+ 0x00000048,
+ 0x50000000,
+ 0x00000048,
+ 0x60000000,
+ 0x00000048,
+ 0x30000000,
+ 0x00000058,
+ 0x40000000,
+ 0x00000058,
+ 0x50000000,
+ 0x00000058,
+ 0x70000000,
+ 0x00000058,
+ 0x80000000,
+ 0x00000058,
+ 0x90000000,
+ 0x00000058,
+ 0xa0000000,
+ 0x00000058,
+ 0x70000000,
+ 0x00000068,
+ 0x80000000,
+ 0x00000068,
+ 0x90000000,
+ 0x00000068,
+ 0xa0000000,
+ 0x00000068,
+ 0xb0000000,
+ 0x00000068,
+ 0xc0000000,
+ 0x00000068,
+ 0xd0000000,
+ 0x00000068,
+ 0xe0000000,
+ 0x00000068,
+ 0xf0000000,
+ 0x00000068,
+ 0x00000000,
+ 0x00000069,
+ 0x10000000,
+ 0x00000069,
+ 0x20000000,
+ 0x00000069,
+ 0x30000000,
+ 0x00000069,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x40000000,
+ 0x00000041,
+ 0x50000000,
+ 0x00000041,
+ 0x60000000,
+ 0x00000041,
+ 0x70000000,
+ 0x00000041,
+ 0x80000000,
+ 0x00000041,
+ 0x70000000,
+ 0x00000049,
+ 0x80000000,
+ 0x00000049,
+ 0x90000000,
+ 0x00000049,
+ 0xa0000000,
+ 0x00000049,
+ 0x70000000,
+ 0x00000059,
+ 0x80000000,
+ 0x00000059,
+ 0x90000000,
+ 0x00000059,
+ 0xb0000000,
+ 0x00000059,
+ 0xc0000000,
+ 0x00000059,
+ 0xd0000000,
+ 0x00000059,
+ 0xe0000000,
+ 0x00000059,
+ 0xb0000000,
+ 0x00000069,
+ 0xc0000000,
+ 0x00000069,
+ 0xd0000000,
+ 0x00000069,
+ 0xe0000000,
+ 0x00000069,
+ 0xf0000000,
+ 0x00000069,
+ 0x00000000,
+ 0x0000006a,
+ 0x10000000,
+ 0x0000006a,
+ 0x20000000,
+ 0x0000006a,
+ 0x30000000,
+ 0x0000006a,
+ 0x40000000,
+ 0x0000006a,
+ 0x50000000,
+ 0x0000006a,
+ 0x60000000,
+ 0x0000006a,
+ 0x70000000,
+ 0x0000006a,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static const u32 dot11lcn_aux_gain_idx_tbl_5G[] = {
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ 0x0002,
+ 0x0003,
+ 0x0004,
+ 0x0083,
+ 0x0084,
+ 0x0085,
+ 0x0086,
+ 0x0087,
+ 0x0186,
+ 0x0187,
+ 0x0188,
+ 0x0189,
+ 0x018a,
+ 0x018b,
+ 0x018c,
+ 0x018d,
+ 0x018e,
+ 0x018f,
+ 0x0190,
+ 0x0191,
+ 0x0192,
+ 0x0193,
+ 0x0194,
+ 0x0195,
+ 0x0196,
+ 0x0197,
+ 0x0198,
+ 0x0199,
+ 0x019a,
+ 0x019b,
+ 0x019c,
+ 0x019d,
+ 0x0000
+};
+
+static const u32 dot11lcn_gain_val_tbl_5G[] = {
+ 0xf7,
+ 0xfd,
+ 0x00,
+ 0x04,
+ 0x04,
+ 0x04,
+ 0xf7,
+ 0xfd,
+ 0x00,
+ 0x04,
+ 0x04,
+ 0x04,
+ 0xf6,
+ 0x00,
+ 0x0c,
+ 0x03,
+ 0xeb,
+ 0xfe,
+ 0x06,
+ 0x0a,
+ 0x10,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x15,
+ 0x18,
+ 0x1b,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x15,
+ 0x18,
+ 0x1b,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00
+};
+
+static const u32 dot11lcn_gain_idx_tbl_5G[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10000000,
+ 0x00000000,
+ 0x20000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000000,
+ 0x40000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000008,
+ 0x40000000,
+ 0x00000008,
+ 0x50000000,
+ 0x00000008,
+ 0x60000000,
+ 0x00000008,
+ 0x70000000,
+ 0x00000008,
+ 0x60000000,
+ 0x00000018,
+ 0x70000000,
+ 0x00000018,
+ 0x80000000,
+ 0x00000018,
+ 0x90000000,
+ 0x00000018,
+ 0xa0000000,
+ 0x00000018,
+ 0xb0000000,
+ 0x00000018,
+ 0xc0000000,
+ 0x00000018,
+ 0xd0000000,
+ 0x00000018,
+ 0xe0000000,
+ 0x00000018,
+ 0xf0000000,
+ 0x00000018,
+ 0x00000000,
+ 0x00000019,
+ 0x10000000,
+ 0x00000019,
+ 0x20000000,
+ 0x00000019,
+ 0x30000000,
+ 0x00000019,
+ 0x40000000,
+ 0x00000019,
+ 0x50000000,
+ 0x00000019,
+ 0x60000000,
+ 0x00000019,
+ 0x70000000,
+ 0x00000019,
+ 0x80000000,
+ 0x00000019,
+ 0x90000000,
+ 0x00000019,
+ 0xa0000000,
+ 0x00000019,
+ 0xb0000000,
+ 0x00000019,
+ 0xc0000000,
+ 0x00000019,
+ 0xd0000000,
+ 0x00000019,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static const u32 dot11lcn_gain_tbl_5G[] = {
+ 0x00000000,
+ 0x00000040,
+ 0x00000080,
+ 0x00000001,
+ 0x00000005,
+ 0x00000009,
+ 0x0000000d,
+ 0x00000011,
+ 0x00000015,
+ 0x00000055,
+ 0x00000095,
+ 0x00000017,
+ 0x0000001b,
+ 0x0000005b,
+ 0x0000009b,
+ 0x000000db,
+ 0x0000011b,
+ 0x0000015b,
+ 0x0000019b,
+ 0x0000059b,
+ 0x0000099b,
+ 0x00000d9b,
+ 0x0000119b,
+ 0x0000519b,
+ 0x0000919b,
+ 0x0000d19b,
+ 0x0001119b,
+ 0x0001519b,
+ 0x0001919b,
+ 0x0001d19b,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
+ {&dot11lcn_gain_tbl_rev0,
+ sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
+ 0, 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_rev0,
+ sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_gain_idx_tbl_rev0,
+ sizeof(dot11lcn_gain_idx_tbl_rev0) /
+ sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ,
+};
+
+static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
+ {&dot11lcn_gain_tbl_rev1,
+ sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18,
+ 0, 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_rev0,
+ sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_gain_idx_tbl_rev0,
+ sizeof(dot11lcn_gain_idx_tbl_rev0) /
+ sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ,
+};
+
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
+ {&dot11lcn_gain_tbl_2G,
+ sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0,
+ 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_2G,
+ sizeof(dot11lcn_aux_gain_idx_tbl_2G) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_gain_idx_tbl_2G,
+ sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]),
+ 13, 0, 32}
+ ,
+ {&dot11lcn_gain_val_tbl_2G,
+ sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]),
+ 17, 0, 8}
+};
+
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
+ {&dot11lcn_gain_tbl_5G,
+ sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
+ 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_5G,
+ sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_gain_idx_tbl_5G,
+ sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
+ 13, 0, 32}
+ ,
+ {&dot11lcn_gain_val_tbl_5G,
+ sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
+ 17, 0, 8}
+};
+
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
+ {&dot11lcn_gain_tbl_extlna_2G,
+ sizeof(dot11lcn_gain_tbl_extlna_2G) /
+ sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_extlna_2G,
+ sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_gain_idx_tbl_extlna_2G,
+ sizeof(dot11lcn_gain_idx_tbl_extlna_2G) /
+ sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32}
+ ,
+ {&dot11lcn_gain_val_tbl_extlna_2G,
+ sizeof(dot11lcn_gain_val_tbl_extlna_2G) /
+ sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
+};
+
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
+ {&dot11lcn_gain_tbl_5G,
+ sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
+ 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_5G,
+ sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_gain_idx_tbl_5G,
+ sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
+ 13, 0, 32}
+ ,
+ {&dot11lcn_gain_val_tbl_5G,
+ sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
+ 17, 0, 8}
+};
+
+const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
+ sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
+ sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
+
+const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
+ sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
+ sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
+
+const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
+ sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
+ sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
+
+static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+ 0x014d,
+};
+
+static const u16 dot11lcn_noise_scale_tbl_rev0[] = {
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+};
+
+static const u32 dot11lcn_fltr_ctrl_tbl_rev0[] = {
+ 0x000141f8,
+ 0x000021f8,
+ 0x000021fb,
+ 0x000041fb,
+ 0x0001fe4b,
+ 0x0000217b,
+ 0x00002133,
+ 0x000040eb,
+ 0x0001fea3,
+ 0x0000024b,
+};
+
+static const u32 dot11lcn_ps_ctrl_tbl_rev0[] = {
+ 0x00100001,
+ 0x00200010,
+ 0x00300001,
+ 0x00400010,
+ 0x00500022,
+ 0x00600122,
+ 0x00700222,
+ 0x00800322,
+ 0x00900422,
+ 0x00a00522,
+ 0x00b00622,
+ 0x00c00722,
+ 0x00d00822,
+ 0x00f00922,
+ 0x00100a22,
+ 0x00200b22,
+ 0x00300c22,
+ 0x00400d22,
+ 0x00500e22,
+ 0x00600f22,
+};
+
+static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[] = {
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x0007,
+ 0x0005,
+ 0x0006,
+ 0x0004,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+ 0x000b,
+ 0x000b,
+ 0x000a,
+ 0x000a,
+
+};
+
+static const u16 dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[] = {
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0005,
+ 0x0002,
+ 0x0000,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+ 0x0007,
+ 0x0007,
+ 0x0002,
+ 0x0002,
+};
+
+static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+ 0x0002,
+ 0x0008,
+ 0x0004,
+ 0x0001,
+};
+
+static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+ 0x000a,
+ 0x0009,
+ 0x0006,
+ 0x0005,
+};
+
+static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+ 0x0004,
+ 0x0004,
+ 0x0002,
+ 0x0002,
+};
+
+static const u8 dot11lcn_nf_table_rev0[] = {
+ 0x5f,
+ 0x36,
+ 0x29,
+ 0x1f,
+ 0x5f,
+ 0x36,
+ 0x29,
+ 0x1f,
+ 0x5f,
+ 0x36,
+ 0x29,
+ 0x1f,
+ 0x5f,
+ 0x36,
+ 0x29,
+ 0x1f,
+};
+
+static const u8 dot11lcn_gain_val_tbl_rev0[] = {
+ 0x09,
+ 0x0f,
+ 0x14,
+ 0x18,
+ 0xfe,
+ 0x07,
+ 0x0b,
+ 0x0f,
+ 0xfb,
+ 0xfe,
+ 0x01,
+ 0x05,
+ 0x08,
+ 0x0b,
+ 0x0e,
+ 0x11,
+ 0x14,
+ 0x17,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0x06,
+ 0x09,
+ 0x0c,
+ 0x0f,
+ 0x12,
+ 0x15,
+ 0x18,
+ 0x1b,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x03,
+ 0xeb,
+ 0x00,
+ 0x00,
+};
+
+static const u8 dot11lcn_spur_tbl_rev0[] = {
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x02,
+ 0x03,
+ 0x01,
+ 0x03,
+ 0x02,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x02,
+ 0x03,
+ 0x01,
+ 0x03,
+ 0x02,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+ 0x01,
+};
+
+static const u16 dot11lcn_unsup_mcs_tbl_rev0[] = {
+ 0x001a,
+ 0x0034,
+ 0x004e,
+ 0x0068,
+ 0x009c,
+ 0x00d0,
+ 0x00ea,
+ 0x0104,
+ 0x0034,
+ 0x0068,
+ 0x009c,
+ 0x00d0,
+ 0x0138,
+ 0x01a0,
+ 0x01d4,
+ 0x0208,
+ 0x004e,
+ 0x009c,
+ 0x00ea,
+ 0x0138,
+ 0x01d4,
+ 0x0270,
+ 0x02be,
+ 0x030c,
+ 0x0068,
+ 0x00d0,
+ 0x0138,
+ 0x01a0,
+ 0x0270,
+ 0x0340,
+ 0x03a8,
+ 0x0410,
+ 0x0018,
+ 0x009c,
+ 0x00d0,
+ 0x0104,
+ 0x00ea,
+ 0x0138,
+ 0x0186,
+ 0x00d0,
+ 0x0104,
+ 0x0104,
+ 0x0138,
+ 0x016c,
+ 0x016c,
+ 0x01a0,
+ 0x0138,
+ 0x0186,
+ 0x0186,
+ 0x01d4,
+ 0x0222,
+ 0x0222,
+ 0x0270,
+ 0x0104,
+ 0x0138,
+ 0x016c,
+ 0x0138,
+ 0x016c,
+ 0x01a0,
+ 0x01d4,
+ 0x01a0,
+ 0x01d4,
+ 0x0208,
+ 0x0208,
+ 0x023c,
+ 0x0186,
+ 0x01d4,
+ 0x0222,
+ 0x01d4,
+ 0x0222,
+ 0x0270,
+ 0x02be,
+ 0x0270,
+ 0x02be,
+ 0x030c,
+ 0x030c,
+ 0x035a,
+ 0x0036,
+ 0x006c,
+ 0x00a2,
+ 0x00d8,
+ 0x0144,
+ 0x01b0,
+ 0x01e6,
+ 0x021c,
+ 0x006c,
+ 0x00d8,
+ 0x0144,
+ 0x01b0,
+ 0x0288,
+ 0x0360,
+ 0x03cc,
+ 0x0438,
+ 0x00a2,
+ 0x0144,
+ 0x01e6,
+ 0x0288,
+ 0x03cc,
+ 0x0510,
+ 0x05b2,
+ 0x0654,
+ 0x00d8,
+ 0x01b0,
+ 0x0288,
+ 0x0360,
+ 0x0510,
+ 0x06c0,
+ 0x0798,
+ 0x0870,
+ 0x0018,
+ 0x0144,
+ 0x01b0,
+ 0x021c,
+ 0x01e6,
+ 0x0288,
+ 0x032a,
+ 0x01b0,
+ 0x021c,
+ 0x021c,
+ 0x0288,
+ 0x02f4,
+ 0x02f4,
+ 0x0360,
+ 0x0288,
+ 0x032a,
+ 0x032a,
+ 0x03cc,
+ 0x046e,
+ 0x046e,
+ 0x0510,
+ 0x021c,
+ 0x0288,
+ 0x02f4,
+ 0x0288,
+ 0x02f4,
+ 0x0360,
+ 0x03cc,
+ 0x0360,
+ 0x03cc,
+ 0x0438,
+ 0x0438,
+ 0x04a4,
+ 0x032a,
+ 0x03cc,
+ 0x046e,
+ 0x03cc,
+ 0x046e,
+ 0x0510,
+ 0x05b2,
+ 0x0510,
+ 0x05b2,
+ 0x0654,
+ 0x0654,
+ 0x06f6,
+};
+
+static const u16 dot11lcn_iq_local_tbl_rev0[] = {
+ 0x0200,
+ 0x0300,
+ 0x0400,
+ 0x0600,
+ 0x0800,
+ 0x0b00,
+ 0x1000,
+ 0x1001,
+ 0x1002,
+ 0x1003,
+ 0x1004,
+ 0x1005,
+ 0x1006,
+ 0x1007,
+ 0x1707,
+ 0x2007,
+ 0x2d07,
+ 0x4007,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0200,
+ 0x0300,
+ 0x0400,
+ 0x0600,
+ 0x0800,
+ 0x0b00,
+ 0x1000,
+ 0x1001,
+ 0x1002,
+ 0x1003,
+ 0x1004,
+ 0x1005,
+ 0x1006,
+ 0x1007,
+ 0x1707,
+ 0x2007,
+ 0x2d07,
+ 0x4007,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x4000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+};
+
+static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+ 0x00080000,
+};
+
+const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
+ {&dot11lcn_min_sig_sq_tbl_rev0,
+ sizeof(dot11lcn_min_sig_sq_tbl_rev0) /
+ sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
+ ,
+ {&dot11lcn_noise_scale_tbl_rev0,
+ sizeof(dot11lcn_noise_scale_tbl_rev0) /
+ sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16}
+ ,
+ {&dot11lcn_fltr_ctrl_tbl_rev0,
+ sizeof(dot11lcn_fltr_ctrl_tbl_rev0) /
+ sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32}
+ ,
+ {&dot11lcn_ps_ctrl_tbl_rev0,
+ sizeof(dot11lcn_ps_ctrl_tbl_rev0) /
+ sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32}
+ ,
+ {&dot11lcn_gain_idx_tbl_rev0,
+ sizeof(dot11lcn_gain_idx_tbl_rev0) /
+ sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ,
+ {&dot11lcn_aux_gain_idx_tbl_rev0,
+ sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
+ sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ,
+ {&dot11lcn_sw_ctrl_tbl_rev0,
+ sizeof(dot11lcn_sw_ctrl_tbl_rev0) /
+ sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16}
+ ,
+ {&dot11lcn_nf_table_rev0,
+ sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16,
+ 0, 8}
+ ,
+ {&dot11lcn_gain_val_tbl_rev0,
+ sizeof(dot11lcn_gain_val_tbl_rev0) /
+ sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8}
+ ,
+ {&dot11lcn_gain_tbl_rev0,
+ sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
+ 0, 32}
+ ,
+ {&dot11lcn_spur_tbl_rev0,
+ sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20,
+ 0, 8}
+ ,
+ {&dot11lcn_unsup_mcs_tbl_rev0,
+ sizeof(dot11lcn_unsup_mcs_tbl_rev0) /
+ sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16}
+ ,
+ {&dot11lcn_iq_local_tbl_rev0,
+ sizeof(dot11lcn_iq_local_tbl_rev0) /
+ sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16}
+ ,
+ {&dot11lcn_papd_compdelta_tbl_rev0,
+ sizeof(dot11lcn_papd_compdelta_tbl_rev0) /
+ sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32}
+ ,
+};
+
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
+ &dot11lcn_sw_ctrl_tbl_4313_rev0,
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) /
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16
+};
+
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
+ &dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) /
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
+};
+
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
+ &dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) /
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
+};
+
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
+ &dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) /
+ sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
+};
+
+const u32 dot11lcnphytbl_info_sz_rev0 =
+ sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
+
+const struct lcnphy_tx_gain_tbl_entry
+dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
+ {3, 0, 31, 0, 72},
+ {3, 0, 31, 0, 70},
+ {3, 0, 31, 0, 68},
+ {3, 0, 30, 0, 67},
+ {3, 0, 29, 0, 68},
+ {3, 0, 28, 0, 68},
+ {3, 0, 27, 0, 69},
+ {3, 0, 26, 0, 70},
+ {3, 0, 25, 0, 70},
+ {3, 0, 24, 0, 71},
+ {3, 0, 23, 0, 72},
+ {3, 0, 23, 0, 70},
+ {3, 0, 22, 0, 71},
+ {3, 0, 21, 0, 72},
+ {3, 0, 21, 0, 70},
+ {3, 0, 21, 0, 68},
+ {3, 0, 21, 0, 66},
+ {3, 0, 21, 0, 64},
+ {3, 0, 21, 0, 63},
+ {3, 0, 20, 0, 64},
+ {3, 0, 19, 0, 65},
+ {3, 0, 19, 0, 64},
+ {3, 0, 18, 0, 65},
+ {3, 0, 18, 0, 64},
+ {3, 0, 17, 0, 65},
+ {3, 0, 17, 0, 64},
+ {3, 0, 16, 0, 65},
+ {3, 0, 16, 0, 64},
+ {3, 0, 16, 0, 62},
+ {3, 0, 16, 0, 60},
+ {3, 0, 16, 0, 58},
+ {3, 0, 15, 0, 61},
+ {3, 0, 15, 0, 59},
+ {3, 0, 14, 0, 61},
+ {3, 0, 14, 0, 60},
+ {3, 0, 14, 0, 58},
+ {3, 0, 13, 0, 60},
+ {3, 0, 13, 0, 59},
+ {3, 0, 12, 0, 62},
+ {3, 0, 12, 0, 60},
+ {3, 0, 12, 0, 58},
+ {3, 0, 11, 0, 62},
+ {3, 0, 11, 0, 60},
+ {3, 0, 11, 0, 59},
+ {3, 0, 11, 0, 57},
+ {3, 0, 10, 0, 61},
+ {3, 0, 10, 0, 59},
+ {3, 0, 10, 0, 57},
+ {3, 0, 9, 0, 62},
+ {3, 0, 9, 0, 60},
+ {3, 0, 9, 0, 58},
+ {3, 0, 9, 0, 57},
+ {3, 0, 8, 0, 62},
+ {3, 0, 8, 0, 60},
+ {3, 0, 8, 0, 58},
+ {3, 0, 8, 0, 57},
+ {3, 0, 8, 0, 55},
+ {3, 0, 7, 0, 61},
+ {3, 0, 7, 0, 60},
+ {3, 0, 7, 0, 58},
+ {3, 0, 7, 0, 56},
+ {3, 0, 7, 0, 55},
+ {3, 0, 6, 0, 62},
+ {3, 0, 6, 0, 60},
+ {3, 0, 6, 0, 58},
+ {3, 0, 6, 0, 57},
+ {3, 0, 6, 0, 55},
+ {3, 0, 6, 0, 54},
+ {3, 0, 6, 0, 52},
+ {3, 0, 5, 0, 61},
+ {3, 0, 5, 0, 59},
+ {3, 0, 5, 0, 57},
+ {3, 0, 5, 0, 56},
+ {3, 0, 5, 0, 54},
+ {3, 0, 5, 0, 53},
+ {3, 0, 5, 0, 51},
+ {3, 0, 4, 0, 62},
+ {3, 0, 4, 0, 60},
+ {3, 0, 4, 0, 58},
+ {3, 0, 4, 0, 57},
+ {3, 0, 4, 0, 55},
+ {3, 0, 4, 0, 54},
+ {3, 0, 4, 0, 52},
+ {3, 0, 4, 0, 51},
+ {3, 0, 4, 0, 49},
+ {3, 0, 4, 0, 48},
+ {3, 0, 4, 0, 46},
+ {3, 0, 3, 0, 60},
+ {3, 0, 3, 0, 58},
+ {3, 0, 3, 0, 57},
+ {3, 0, 3, 0, 55},
+ {3, 0, 3, 0, 54},
+ {3, 0, 3, 0, 52},
+ {3, 0, 3, 0, 51},
+ {3, 0, 3, 0, 49},
+ {3, 0, 3, 0, 48},
+ {3, 0, 3, 0, 46},
+ {3, 0, 3, 0, 45},
+ {3, 0, 3, 0, 44},
+ {3, 0, 3, 0, 43},
+ {3, 0, 3, 0, 41},
+ {3, 0, 2, 0, 61},
+ {3, 0, 2, 0, 59},
+ {3, 0, 2, 0, 57},
+ {3, 0, 2, 0, 56},
+ {3, 0, 2, 0, 54},
+ {3, 0, 2, 0, 53},
+ {3, 0, 2, 0, 51},
+ {3, 0, 2, 0, 50},
+ {3, 0, 2, 0, 48},
+ {3, 0, 2, 0, 47},
+ {3, 0, 2, 0, 46},
+ {3, 0, 2, 0, 44},
+ {3, 0, 2, 0, 43},
+ {3, 0, 2, 0, 42},
+ {3, 0, 2, 0, 41},
+ {3, 0, 2, 0, 39},
+ {3, 0, 2, 0, 38},
+ {3, 0, 2, 0, 37},
+ {3, 0, 2, 0, 36},
+ {3, 0, 2, 0, 35},
+ {3, 0, 2, 0, 34},
+ {3, 0, 2, 0, 33},
+ {3, 0, 2, 0, 32},
+ {3, 0, 1, 0, 63},
+ {3, 0, 1, 0, 61},
+ {3, 0, 1, 0, 59},
+ {3, 0, 1, 0, 57},
+};
+
+const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
+ {7, 0, 31, 0, 72},
+ {7, 0, 31, 0, 70},
+ {7, 0, 31, 0, 68},
+ {7, 0, 30, 0, 67},
+ {7, 0, 29, 0, 68},
+ {7, 0, 28, 0, 68},
+ {7, 0, 27, 0, 69},
+ {7, 0, 26, 0, 70},
+ {7, 0, 25, 0, 70},
+ {7, 0, 24, 0, 71},
+ {7, 0, 23, 0, 72},
+ {7, 0, 23, 0, 70},
+ {7, 0, 22, 0, 71},
+ {7, 0, 21, 0, 72},
+ {7, 0, 21, 0, 70},
+ {7, 0, 21, 0, 68},
+ {7, 0, 21, 0, 66},
+ {7, 0, 21, 0, 64},
+ {7, 0, 21, 0, 63},
+ {7, 0, 20, 0, 64},
+ {7, 0, 19, 0, 65},
+ {7, 0, 19, 0, 64},
+ {7, 0, 18, 0, 65},
+ {7, 0, 18, 0, 64},
+ {7, 0, 17, 0, 65},
+ {7, 0, 17, 0, 64},
+ {7, 0, 16, 0, 65},
+ {7, 0, 16, 0, 64},
+ {7, 0, 16, 0, 62},
+ {7, 0, 16, 0, 60},
+ {7, 0, 16, 0, 58},
+ {7, 0, 15, 0, 61},
+ {7, 0, 15, 0, 59},
+ {7, 0, 14, 0, 61},
+ {7, 0, 14, 0, 60},
+ {7, 0, 14, 0, 58},
+ {7, 0, 13, 0, 60},
+ {7, 0, 13, 0, 59},
+ {7, 0, 12, 0, 62},
+ {7, 0, 12, 0, 60},
+ {7, 0, 12, 0, 58},
+ {7, 0, 11, 0, 62},
+ {7, 0, 11, 0, 60},
+ {7, 0, 11, 0, 59},
+ {7, 0, 11, 0, 57},
+ {7, 0, 10, 0, 61},
+ {7, 0, 10, 0, 59},
+ {7, 0, 10, 0, 57},
+ {7, 0, 9, 0, 62},
+ {7, 0, 9, 0, 60},
+ {7, 0, 9, 0, 58},
+ {7, 0, 9, 0, 57},
+ {7, 0, 8, 0, 62},
+ {7, 0, 8, 0, 60},
+ {7, 0, 8, 0, 58},
+ {7, 0, 8, 0, 57},
+ {7, 0, 8, 0, 55},
+ {7, 0, 7, 0, 61},
+ {7, 0, 7, 0, 60},
+ {7, 0, 7, 0, 58},
+ {7, 0, 7, 0, 56},
+ {7, 0, 7, 0, 55},
+ {7, 0, 6, 0, 62},
+ {7, 0, 6, 0, 60},
+ {7, 0, 6, 0, 58},
+ {7, 0, 6, 0, 57},
+ {7, 0, 6, 0, 55},
+ {7, 0, 6, 0, 54},
+ {7, 0, 6, 0, 52},
+ {7, 0, 5, 0, 61},
+ {7, 0, 5, 0, 59},
+ {7, 0, 5, 0, 57},
+ {7, 0, 5, 0, 56},
+ {7, 0, 5, 0, 54},
+ {7, 0, 5, 0, 53},
+ {7, 0, 5, 0, 51},
+ {7, 0, 4, 0, 62},
+ {7, 0, 4, 0, 60},
+ {7, 0, 4, 0, 58},
+ {7, 0, 4, 0, 57},
+ {7, 0, 4, 0, 55},
+ {7, 0, 4, 0, 54},
+ {7, 0, 4, 0, 52},
+ {7, 0, 4, 0, 51},
+ {7, 0, 4, 0, 49},
+ {7, 0, 4, 0, 48},
+ {7, 0, 4, 0, 46},
+ {7, 0, 3, 0, 60},
+ {7, 0, 3, 0, 58},
+ {7, 0, 3, 0, 57},
+ {7, 0, 3, 0, 55},
+ {7, 0, 3, 0, 54},
+ {7, 0, 3, 0, 52},
+ {7, 0, 3, 0, 51},
+ {7, 0, 3, 0, 49},
+ {7, 0, 3, 0, 48},
+ {7, 0, 3, 0, 46},
+ {7, 0, 3, 0, 45},
+ {7, 0, 3, 0, 44},
+ {7, 0, 3, 0, 43},
+ {7, 0, 3, 0, 41},
+ {7, 0, 2, 0, 61},
+ {7, 0, 2, 0, 59},
+ {7, 0, 2, 0, 57},
+ {7, 0, 2, 0, 56},
+ {7, 0, 2, 0, 54},
+ {7, 0, 2, 0, 53},
+ {7, 0, 2, 0, 51},
+ {7, 0, 2, 0, 50},
+ {7, 0, 2, 0, 48},
+ {7, 0, 2, 0, 47},
+ {7, 0, 2, 0, 46},
+ {7, 0, 2, 0, 44},
+ {7, 0, 2, 0, 43},
+ {7, 0, 2, 0, 42},
+ {7, 0, 2, 0, 41},
+ {7, 0, 2, 0, 39},
+ {7, 0, 2, 0, 38},
+ {7, 0, 2, 0, 37},
+ {7, 0, 2, 0, 36},
+ {7, 0, 2, 0, 35},
+ {7, 0, 2, 0, 34},
+ {7, 0, 2, 0, 33},
+ {7, 0, 2, 0, 32},
+ {7, 0, 1, 0, 63},
+ {7, 0, 1, 0, 61},
+ {7, 0, 1, 0, 59},
+ {7, 0, 1, 0, 57},
+};
+
+const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
+ {255, 255, 0xf0, 0, 152},
+ {255, 255, 0xf0, 0, 147},
+ {255, 255, 0xf0, 0, 143},
+ {255, 255, 0xf0, 0, 139},
+ {255, 255, 0xf0, 0, 135},
+ {255, 255, 0xf0, 0, 131},
+ {255, 255, 0xf0, 0, 128},
+ {255, 255, 0xf0, 0, 124},
+ {255, 255, 0xf0, 0, 121},
+ {255, 255, 0xf0, 0, 117},
+ {255, 255, 0xf0, 0, 114},
+ {255, 255, 0xf0, 0, 111},
+ {255, 255, 0xf0, 0, 107},
+ {255, 255, 0xf0, 0, 104},
+ {255, 255, 0xf0, 0, 101},
+ {255, 255, 0xf0, 0, 99},
+ {255, 255, 0xf0, 0, 96},
+ {255, 255, 0xf0, 0, 93},
+ {255, 255, 0xf0, 0, 90},
+ {255, 255, 0xf0, 0, 88},
+ {255, 255, 0xf0, 0, 85},
+ {255, 255, 0xf0, 0, 83},
+ {255, 255, 0xf0, 0, 81},
+ {255, 255, 0xf0, 0, 78},
+ {255, 255, 0xf0, 0, 76},
+ {255, 255, 0xf0, 0, 74},
+ {255, 255, 0xf0, 0, 72},
+ {255, 255, 0xf0, 0, 70},
+ {255, 255, 0xf0, 0, 68},
+ {255, 255, 0xf0, 0, 66},
+ {255, 255, 0xf0, 0, 64},
+ {255, 248, 0xf0, 0, 64},
+ {255, 241, 0xf0, 0, 64},
+ {255, 251, 0xe0, 0, 64},
+ {255, 244, 0xe0, 0, 64},
+ {255, 254, 0xd0, 0, 64},
+ {255, 246, 0xd0, 0, 64},
+ {255, 239, 0xd0, 0, 64},
+ {255, 249, 0xc0, 0, 64},
+ {255, 242, 0xc0, 0, 64},
+ {255, 255, 0xb0, 0, 64},
+ {255, 248, 0xb0, 0, 64},
+ {255, 241, 0xb0, 0, 64},
+ {255, 254, 0xa0, 0, 64},
+ {255, 246, 0xa0, 0, 64},
+ {255, 239, 0xa0, 0, 64},
+ {255, 255, 0x90, 0, 64},
+ {255, 248, 0x90, 0, 64},
+ {255, 241, 0x90, 0, 64},
+ {255, 234, 0x90, 0, 64},
+ {255, 255, 0x80, 0, 64},
+ {255, 248, 0x80, 0, 64},
+ {255, 241, 0x80, 0, 64},
+ {255, 234, 0x80, 0, 64},
+ {255, 255, 0x70, 0, 64},
+ {255, 248, 0x70, 0, 64},
+ {255, 241, 0x70, 0, 64},
+ {255, 234, 0x70, 0, 64},
+ {255, 227, 0x70, 0, 64},
+ {255, 221, 0x70, 0, 64},
+ {255, 215, 0x70, 0, 64},
+ {255, 208, 0x70, 0, 64},
+ {255, 203, 0x70, 0, 64},
+ {255, 197, 0x70, 0, 64},
+ {255, 255, 0x60, 0, 64},
+ {255, 248, 0x60, 0, 64},
+ {255, 241, 0x60, 0, 64},
+ {255, 234, 0x60, 0, 64},
+ {255, 227, 0x60, 0, 64},
+ {255, 221, 0x60, 0, 64},
+ {255, 255, 0x50, 0, 64},
+ {255, 248, 0x50, 0, 64},
+ {255, 241, 0x50, 0, 64},
+ {255, 234, 0x50, 0, 64},
+ {255, 227, 0x50, 0, 64},
+ {255, 221, 0x50, 0, 64},
+ {255, 215, 0x50, 0, 64},
+ {255, 208, 0x50, 0, 64},
+ {255, 255, 0x40, 0, 64},
+ {255, 248, 0x40, 0, 64},
+ {255, 241, 0x40, 0, 64},
+ {255, 234, 0x40, 0, 64},
+ {255, 227, 0x40, 0, 64},
+ {255, 221, 0x40, 0, 64},
+ {255, 215, 0x40, 0, 64},
+ {255, 208, 0x40, 0, 64},
+ {255, 203, 0x40, 0, 64},
+ {255, 197, 0x40, 0, 64},
+ {255, 255, 0x30, 0, 64},
+ {255, 248, 0x30, 0, 64},
+ {255, 241, 0x30, 0, 64},
+ {255, 234, 0x30, 0, 64},
+ {255, 227, 0x30, 0, 64},
+ {255, 221, 0x30, 0, 64},
+ {255, 215, 0x30, 0, 64},
+ {255, 208, 0x30, 0, 64},
+ {255, 203, 0x30, 0, 64},
+ {255, 197, 0x30, 0, 64},
+ {255, 191, 0x30, 0, 64},
+ {255, 186, 0x30, 0, 64},
+ {255, 181, 0x30, 0, 64},
+ {255, 175, 0x30, 0, 64},
+ {255, 255, 0x20, 0, 64},
+ {255, 248, 0x20, 0, 64},
+ {255, 241, 0x20, 0, 64},
+ {255, 234, 0x20, 0, 64},
+ {255, 227, 0x20, 0, 64},
+ {255, 221, 0x20, 0, 64},
+ {255, 215, 0x20, 0, 64},
+ {255, 208, 0x20, 0, 64},
+ {255, 203, 0x20, 0, 64},
+ {255, 197, 0x20, 0, 64},
+ {255, 191, 0x20, 0, 64},
+ {255, 186, 0x20, 0, 64},
+ {255, 181, 0x20, 0, 64},
+ {255, 175, 0x20, 0, 64},
+ {255, 170, 0x20, 0, 64},
+ {255, 166, 0x20, 0, 64},
+ {255, 161, 0x20, 0, 64},
+ {255, 156, 0x20, 0, 64},
+ {255, 152, 0x20, 0, 64},
+ {255, 148, 0x20, 0, 64},
+ {255, 143, 0x20, 0, 64},
+ {255, 139, 0x20, 0, 64},
+ {255, 135, 0x20, 0, 64},
+ {255, 132, 0x20, 0, 64},
+ {255, 255, 0x10, 0, 64},
+ {255, 248, 0x10, 0, 64},
+};
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
index 5f75e16bf5a..5f75e16bf5a 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.c
index 7f741f4868a..dbf50ef6cd7 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -17,7 +17,7 @@
#include <types.h>
#include "phytbl_n.h"
-const u32 frame_struct_rev0[] = {
+static const u32 frame_struct_rev0[] = {
0x08004a04,
0x00100000,
0x01000a05,
@@ -852,7 +852,7 @@ const u32 frame_struct_rev0[] = {
0x00000000,
};
-const u8 frame_lut_rev0[] = {
+static const u8 frame_lut_rev0[] = {
0x02,
0x04,
0x14,
@@ -887,7 +887,7 @@ const u8 frame_lut_rev0[] = {
0x2a,
};
-const u32 tmap_tbl_rev0[] = {
+static const u32 tmap_tbl_rev0[] = {
0x8a88aa80,
0x8aaaaa8a,
0x8a8a8aa8,
@@ -1338,7 +1338,7 @@ const u32 tmap_tbl_rev0[] = {
0x00000000,
};
-const u32 tdtrn_tbl_rev0[] = {
+static const u32 tdtrn_tbl_rev0[] = {
0x061c061c,
0x0050ee68,
0xf592fe36,
@@ -2045,7 +2045,7 @@ const u32 tdtrn_tbl_rev0[] = {
0x00f006be,
};
-const u32 intlv_tbl_rev0[] = {
+static const u32 intlv_tbl_rev0[] = {
0x00802070,
0x0671188d,
0x0a60192c,
@@ -2055,7 +2055,7 @@ const u32 intlv_tbl_rev0[] = {
0x00000070,
};
-const u16 pilot_tbl_rev0[] = {
+static const u16 pilot_tbl_rev0[] = {
0xff08,
0xff08,
0xff08,
@@ -2146,7 +2146,7 @@ const u16 pilot_tbl_rev0[] = {
0xffff,
};
-const u32 pltlut_tbl_rev0[] = {
+static const u32 pltlut_tbl_rev0[] = {
0x76540123,
0x62407351,
0x76543201,
@@ -2155,7 +2155,7 @@ const u32 pltlut_tbl_rev0[] = {
0x76430521,
};
-const u32 tdi_tbl20_ant0_rev0[] = {
+static const u32 tdi_tbl20_ant0_rev0[] = {
0x00091226,
0x000a1429,
0x000b56ad,
@@ -2213,7 +2213,7 @@ const u32 tdi_tbl20_ant0_rev0[] = {
0x00000000,
};
-const u32 tdi_tbl20_ant1_rev0[] = {
+static const u32 tdi_tbl20_ant1_rev0[] = {
0x00014b26,
0x00028d29,
0x000393ad,
@@ -2271,7 +2271,7 @@ const u32 tdi_tbl20_ant1_rev0[] = {
0x00000000,
};
-const u32 tdi_tbl40_ant0_rev0[] = {
+static const u32 tdi_tbl40_ant0_rev0[] = {
0x0011a346,
0x00136ccf,
0x0014f5d9,
@@ -2384,7 +2384,7 @@ const u32 tdi_tbl40_ant0_rev0[] = {
0x00000000,
};
-const u32 tdi_tbl40_ant1_rev0[] = {
+static const u32 tdi_tbl40_ant1_rev0[] = {
0x001edb36,
0x000129ca,
0x0002b353,
@@ -2497,7 +2497,7 @@ const u32 tdi_tbl40_ant1_rev0[] = {
0x00000000,
};
-const u16 bdi_tbl_rev0[] = {
+static const u16 bdi_tbl_rev0[] = {
0x0070,
0x0126,
0x012c,
@@ -2506,7 +2506,7 @@ const u16 bdi_tbl_rev0[] = {
0x04d2,
};
-const u32 chanest_tbl_rev0[] = {
+static const u32 chanest_tbl_rev0[] = {
0x44444444,
0x44444444,
0x44444444,
@@ -2605,7 +2605,7 @@ const u32 chanest_tbl_rev0[] = {
0x10101010,
};
-const u8 mcs_tbl_rev0[] = {
+static const u8 mcs_tbl_rev0[] = {
0x00,
0x08,
0x0a,
@@ -2736,7 +2736,7 @@ const u8 mcs_tbl_rev0[] = {
0x00,
};
-const u32 noise_var_tbl0_rev0[] = {
+static const u32 noise_var_tbl0_rev0[] = {
0x020c020c,
0x0000014d,
0x020c020c,
@@ -2995,7 +2995,7 @@ const u32 noise_var_tbl0_rev0[] = {
0x0000014d,
};
-const u32 noise_var_tbl1_rev0[] = {
+static const u32 noise_var_tbl1_rev0[] = {
0x020c020c,
0x0000014d,
0x020c020c,
@@ -3254,7 +3254,7 @@ const u32 noise_var_tbl1_rev0[] = {
0x0000014d,
};
-const u8 est_pwr_lut_core0_rev0[] = {
+static const u8 est_pwr_lut_core0_rev0[] = {
0x50,
0x4f,
0x4e,
@@ -3321,7 +3321,7 @@ const u8 est_pwr_lut_core0_rev0[] = {
0x11,
};
-const u8 est_pwr_lut_core1_rev0[] = {
+static const u8 est_pwr_lut_core1_rev0[] = {
0x50,
0x4f,
0x4e,
@@ -3388,7 +3388,7 @@ const u8 est_pwr_lut_core1_rev0[] = {
0x11,
};
-const u8 adj_pwr_lut_core0_rev0[] = {
+static const u8 adj_pwr_lut_core0_rev0[] = {
0x00,
0x00,
0x00,
@@ -3519,7 +3519,7 @@ const u8 adj_pwr_lut_core0_rev0[] = {
0x00,
};
-const u8 adj_pwr_lut_core1_rev0[] = {
+static const u8 adj_pwr_lut_core1_rev0[] = {
0x00,
0x00,
0x00,
@@ -3650,7 +3650,7 @@ const u8 adj_pwr_lut_core1_rev0[] = {
0x00,
};
-const u32 gainctrl_lut_core0_rev0[] = {
+static const u32 gainctrl_lut_core0_rev0[] = {
0x03cc2b44,
0x03cc2b42,
0x03cc2b40,
@@ -3781,7 +3781,7 @@ const u32 gainctrl_lut_core0_rev0[] = {
0x00002b00,
};
-const u32 gainctrl_lut_core1_rev0[] = {
+static const u32 gainctrl_lut_core1_rev0[] = {
0x03cc2b44,
0x03cc2b42,
0x03cc2b40,
@@ -3912,7 +3912,7 @@ const u32 gainctrl_lut_core1_rev0[] = {
0x00002b00,
};
-const u32 iq_lut_core0_rev0[] = {
+static const u32 iq_lut_core0_rev0[] = {
0x0000007f,
0x0000007f,
0x0000007f,
@@ -4043,7 +4043,7 @@ const u32 iq_lut_core0_rev0[] = {
0x0000007f,
};
-const u32 iq_lut_core1_rev0[] = {
+static const u32 iq_lut_core1_rev0[] = {
0x0000007f,
0x0000007f,
0x0000007f,
@@ -4174,7 +4174,7 @@ const u32 iq_lut_core1_rev0[] = {
0x0000007f,
};
-const u16 loft_lut_core0_rev0[] = {
+static const u16 loft_lut_core0_rev0[] = {
0x0000,
0x0101,
0x0002,
@@ -4305,7 +4305,7 @@ const u16 loft_lut_core0_rev0[] = {
0x0103,
};
-const u16 loft_lut_core1_rev0[] = {
+static const u16 loft_lut_core1_rev0[] = {
0x0000,
0x0101,
0x0002,
@@ -4522,7 +4522,8 @@ const struct phytbl_info mimophytbl_info_rev0[] = {
{&chanest_tbl_rev0,
sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32}
,
- {&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0, 8}
+ {&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0,
+ 8}
,
{&noise_var_tbl0_rev0,
sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0,
@@ -4540,7 +4541,7 @@ const u32 mimophytbl_info_sz_rev0_volatile =
sizeof(mimophytbl_info_rev0_volatile) /
sizeof(mimophytbl_info_rev0_volatile[0]);
-const u16 ant_swctrl_tbl_rev3[] = {
+static const u16 ant_swctrl_tbl_rev3[] = {
0x0082,
0x0082,
0x0211,
@@ -4575,7 +4576,7 @@ const u16 ant_swctrl_tbl_rev3[] = {
0x0000,
};
-const u16 ant_swctrl_tbl_rev3_1[] = {
+static const u16 ant_swctrl_tbl_rev3_1[] = {
0x0022,
0x0022,
0x0011,
@@ -4610,7 +4611,7 @@ const u16 ant_swctrl_tbl_rev3_1[] = {
0x0000,
};
-const u16 ant_swctrl_tbl_rev3_2[] = {
+static const u16 ant_swctrl_tbl_rev3_2[] = {
0x0088,
0x0088,
0x0044,
@@ -4645,7 +4646,7 @@ const u16 ant_swctrl_tbl_rev3_2[] = {
0x0000,
};
-const u16 ant_swctrl_tbl_rev3_3[] = {
+static const u16 ant_swctrl_tbl_rev3_3[] = {
0x022,
0x022,
0x011,
@@ -4680,7 +4681,7 @@ const u16 ant_swctrl_tbl_rev3_3[] = {
0x3cc
};
-const u32 frame_struct_rev3[] = {
+static const u32 frame_struct_rev3[] = {
0x08004a04,
0x00100000,
0x01000a05,
@@ -5515,7 +5516,7 @@ const u32 frame_struct_rev3[] = {
0x00000000,
};
-const u16 pilot_tbl_rev3[] = {
+static const u16 pilot_tbl_rev3[] = {
0xff08,
0xff08,
0xff08,
@@ -5606,7 +5607,7 @@ const u16 pilot_tbl_rev3[] = {
0xffff,
};
-const u32 tmap_tbl_rev3[] = {
+static const u32 tmap_tbl_rev3[] = {
0x8a88aa80,
0x8aaaaa8a,
0x8a8a8aa8,
@@ -6057,7 +6058,7 @@ const u32 tmap_tbl_rev3[] = {
0x00000000,
};
-const u32 intlv_tbl_rev3[] = {
+static const u32 intlv_tbl_rev3[] = {
0x00802070,
0x0671188d,
0x0a60192c,
@@ -6067,7 +6068,7 @@ const u32 intlv_tbl_rev3[] = {
0x00000070,
};
-const u32 tdtrn_tbl_rev3[] = {
+static const u32 tdtrn_tbl_rev3[] = {
0x061c061c,
0x0050ee68,
0xf592fe36,
@@ -7033,7 +7034,7 @@ const u32 noise_var_tbl_rev3[] = {
0x0000014d,
};
-const u16 mcs_tbl_rev3[] = {
+static const u16 mcs_tbl_rev3[] = {
0x0000,
0x0008,
0x000a,
@@ -7164,7 +7165,7 @@ const u16 mcs_tbl_rev3[] = {
0x0007,
};
-const u32 tdi_tbl20_ant0_rev3[] = {
+static const u32 tdi_tbl20_ant0_rev3[] = {
0x00091226,
0x000a1429,
0x000b56ad,
@@ -7222,7 +7223,7 @@ const u32 tdi_tbl20_ant0_rev3[] = {
0x00000000,
};
-const u32 tdi_tbl20_ant1_rev3[] = {
+static const u32 tdi_tbl20_ant1_rev3[] = {
0x00014b26,
0x00028d29,
0x000393ad,
@@ -7280,7 +7281,7 @@ const u32 tdi_tbl20_ant1_rev3[] = {
0x00000000,
};
-const u32 tdi_tbl40_ant0_rev3[] = {
+static const u32 tdi_tbl40_ant0_rev3[] = {
0x0011a346,
0x00136ccf,
0x0014f5d9,
@@ -7393,7 +7394,7 @@ const u32 tdi_tbl40_ant0_rev3[] = {
0x00000000,
};
-const u32 tdi_tbl40_ant1_rev3[] = {
+static const u32 tdi_tbl40_ant1_rev3[] = {
0x001edb36,
0x000129ca,
0x0002b353,
@@ -7506,7 +7507,7 @@ const u32 tdi_tbl40_ant1_rev3[] = {
0x00000000,
};
-const u32 pltlut_tbl_rev3[] = {
+static const u32 pltlut_tbl_rev3[] = {
0x76540213,
0x62407351,
0x76543210,
@@ -7515,7 +7516,7 @@ const u32 pltlut_tbl_rev3[] = {
0x76430521,
};
-const u32 chanest_tbl_rev3[] = {
+static const u32 chanest_tbl_rev3[] = {
0x44444444,
0x44444444,
0x44444444,
@@ -7614,7 +7615,7 @@ const u32 chanest_tbl_rev3[] = {
0x10101010,
};
-const u8 frame_lut_rev3[] = {
+static const u8 frame_lut_rev3[] = {
0x02,
0x04,
0x14,
@@ -7649,7 +7650,7 @@ const u8 frame_lut_rev3[] = {
0x2a,
};
-const u8 est_pwr_lut_core0_rev3[] = {
+static const u8 est_pwr_lut_core0_rev3[] = {
0x55,
0x54,
0x54,
@@ -7716,7 +7717,7 @@ const u8 est_pwr_lut_core0_rev3[] = {
0xfd,
};
-const u8 est_pwr_lut_core1_rev3[] = {
+static const u8 est_pwr_lut_core1_rev3[] = {
0x55,
0x54,
0x54,
@@ -7783,7 +7784,7 @@ const u8 est_pwr_lut_core1_rev3[] = {
0xfd,
};
-const u8 adj_pwr_lut_core0_rev3[] = {
+static const u8 adj_pwr_lut_core0_rev3[] = {
0x00,
0x00,
0x00,
@@ -7914,7 +7915,7 @@ const u8 adj_pwr_lut_core0_rev3[] = {
0x00,
};
-const u8 adj_pwr_lut_core1_rev3[] = {
+static const u8 adj_pwr_lut_core1_rev3[] = {
0x00,
0x00,
0x00,
@@ -8045,7 +8046,7 @@ const u8 adj_pwr_lut_core1_rev3[] = {
0x00,
};
-const u32 gainctrl_lut_core0_rev3[] = {
+static const u32 gainctrl_lut_core0_rev3[] = {
0x5bf70044,
0x5bf70042,
0x5bf70040,
@@ -8176,7 +8177,7 @@ const u32 gainctrl_lut_core0_rev3[] = {
0x5b07001c,
};
-const u32 gainctrl_lut_core1_rev3[] = {
+static const u32 gainctrl_lut_core1_rev3[] = {
0x5bf70044,
0x5bf70042,
0x5bf70040,
@@ -8307,7 +8308,7 @@ const u32 gainctrl_lut_core1_rev3[] = {
0x5b07001c,
};
-const u32 iq_lut_core0_rev3[] = {
+static const u32 iq_lut_core0_rev3[] = {
0x00000000,
0x00000000,
0x00000000,
@@ -8438,7 +8439,7 @@ const u32 iq_lut_core0_rev3[] = {
0x00000000,
};
-const u32 iq_lut_core1_rev3[] = {
+static const u32 iq_lut_core1_rev3[] = {
0x00000000,
0x00000000,
0x00000000,
@@ -8569,7 +8570,7 @@ const u32 iq_lut_core1_rev3[] = {
0x00000000,
};
-const u16 loft_lut_core0_rev3[] = {
+static const u16 loft_lut_core0_rev3[] = {
0x0000,
0x0000,
0x0000,
@@ -8700,7 +8701,7 @@ const u16 loft_lut_core0_rev3[] = {
0x0000,
};
-const u16 loft_lut_core1_rev3[] = {
+static const u16 loft_lut_core1_rev3[] = {
0x0000,
0x0000,
0x0000,
@@ -8831,7 +8832,7 @@ const u16 loft_lut_core1_rev3[] = {
0x0000,
};
-const u16 papd_comp_rfpwr_tbl_core0_rev3[] = {
+static const u16 papd_comp_rfpwr_tbl_core0_rev3[] = {
0x0036,
0x0036,
0x0036,
@@ -8962,7 +8963,7 @@ const u16 papd_comp_rfpwr_tbl_core0_rev3[] = {
0x01d6,
};
-const u16 papd_comp_rfpwr_tbl_core1_rev3[] = {
+static const u16 papd_comp_rfpwr_tbl_core1_rev3[] = {
0x0036,
0x0036,
0x0036,
@@ -9093,7 +9094,7 @@ const u16 papd_comp_rfpwr_tbl_core1_rev3[] = {
0x01d6,
};
-const u32 papd_comp_epsilon_tbl_core0_rev3[] = {
+static const u32 papd_comp_epsilon_tbl_core0_rev3[] = {
0x00000000,
0x00001fa0,
0x00019f78,
@@ -9160,7 +9161,7 @@ const u32 papd_comp_epsilon_tbl_core0_rev3[] = {
0x03e38ffe,
};
-const u32 papd_cal_scalars_tbl_core0_rev3[] = {
+static const u32 papd_cal_scalars_tbl_core0_rev3[] = {
0x05af005a,
0x0571005e,
0x05040066,
@@ -9227,7 +9228,7 @@ const u32 papd_cal_scalars_tbl_core0_rev3[] = {
0x002606a4,
};
-const u32 papd_comp_epsilon_tbl_core1_rev3[] = {
+static const u32 papd_comp_epsilon_tbl_core1_rev3[] = {
0x00000000,
0x00001fa0,
0x00019f78,
@@ -9294,7 +9295,7 @@ const u32 papd_comp_epsilon_tbl_core1_rev3[] = {
0x03e38ffe,
};
-const u32 papd_cal_scalars_tbl_core1_rev3[] = {
+static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
0x05af005a,
0x0571005e,
0x05040066,
@@ -9489,7 +9490,7 @@ const u32 mimophytbl_info_sz_rev3_volatile3 =
sizeof(mimophytbl_info_rev3_volatile3) /
sizeof(mimophytbl_info_rev3_volatile3[0]);
-const u32 tmap_tbl_rev7[] = {
+static const u32 tmap_tbl_rev7[] = {
0x8a88aa80,
0x8aaaaa8a,
0x8a8a8aa8,
@@ -10199,7 +10200,7 @@ const u32 noise_var_tbl_rev7[] = {
0x0000014d,
};
-const u32 papd_comp_epsilon_tbl_core0_rev7[] = {
+static const u32 papd_comp_epsilon_tbl_core0_rev7[] = {
0x00000000,
0x00000000,
0x00016023,
@@ -10266,7 +10267,7 @@ const u32 papd_comp_epsilon_tbl_core0_rev7[] = {
0x0156cfff,
};
-const u32 papd_cal_scalars_tbl_core0_rev7[] = {
+static const u32 papd_cal_scalars_tbl_core0_rev7[] = {
0x0b5e002d,
0x0ae2002f,
0x0a3b0032,
@@ -10333,7 +10334,7 @@ const u32 papd_cal_scalars_tbl_core0_rev7[] = {
0x004e068c,
};
-const u32 papd_comp_epsilon_tbl_core1_rev7[] = {
+static const u32 papd_comp_epsilon_tbl_core1_rev7[] = {
0x00000000,
0x00000000,
0x00016023,
@@ -10400,7 +10401,7 @@ const u32 papd_comp_epsilon_tbl_core1_rev7[] = {
0x0156cfff,
};
-const u32 papd_cal_scalars_tbl_core1_rev7[] = {
+static const u32 papd_cal_scalars_tbl_core1_rev7[] = {
0x0b5e002d,
0x0ae2002f,
0x0a3b0032,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.h
new file mode 100644
index 00000000000..dc8a84e8511
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_n.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define ANT_SWCTRL_TBL_REV3_IDX (0)
+
+#include <types.h>
+#include "phy_int.h"
+
+extern const struct phytbl_info mimophytbl_info_rev0[],
+ mimophytbl_info_rev0_volatile[];
+
+extern const u32 mimophytbl_info_sz_rev0,
+ mimophytbl_info_sz_rev0_volatile;
+
+extern const struct phytbl_info mimophytbl_info_rev3[],
+ mimophytbl_info_rev3_volatile[],
+ mimophytbl_info_rev3_volatile1[],
+ mimophytbl_info_rev3_volatile2[],
+ mimophytbl_info_rev3_volatile3[];
+
+extern const u32 mimophytbl_info_sz_rev3,
+ mimophytbl_info_sz_rev3_volatile,
+ mimophytbl_info_sz_rev3_volatile1,
+ mimophytbl_info_sz_rev3_volatile2,
+ mimophytbl_info_sz_rev3_volatile3;
+
+extern const u32 noise_var_tbl_rev3[];
+
+extern const struct phytbl_info mimophytbl_info_rev7[];
+
+extern const u32 mimophytbl_info_sz_rev7;
+
+extern const u32 noise_var_tbl_rev7[];
+
+extern const struct phytbl_info mimophytbl_info_rev16[];
+
+extern const u32 mimophytbl_info_sz_rev16;
diff --git a/drivers/staging/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
index 82ecdcda271..5926854f62e 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
@@ -15,15 +15,15 @@
*/
/*
- * This is "two-way" interface, acting as the SHIM layer between WL and PHY layer.
- * WL driver can optinally call this translation layer to do some preprocessing, then reach PHY.
- * On the PHY->WL driver direction, all calls go through this layer since PHY doesn't have the
- * access to wlc_hw pointer.
+ * This is "two-way" interface, acting as the SHIM layer between driver
+ * and PHY layer. The driver can optionally call this translation layer
+ * to do some preprocessing, then reach PHY. On the PHY->driver direction,
+ * all calls go through this layer since PHY doesn't have access to the
+ * driver's brcms_hardware pointer.
*/
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "bmac.h"
#include "main.h"
#include "mac80211_if.h"
#include "phy_shim.h"
@@ -31,21 +31,19 @@
/* PHY SHIM module specific state */
struct phy_shim_info {
struct brcms_hardware *wlc_hw; /* pointer to main wlc_hw structure */
- void *wlc; /* pointer to main wlc structure */
- void *wl; /* pointer to os-specific private state */
+ struct brcms_c_info *wlc; /* pointer to main wlc structure */
+ struct brcms_info *wl; /* pointer to os-specific private state */
};
struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
- void *wl, void *wlc) {
+ struct brcms_info *wl,
+ struct brcms_c_info *wlc) {
struct phy_shim_info *physhim = NULL;
physhim = kzalloc(sizeof(struct phy_shim_info), GFP_ATOMIC);
- if (!physhim) {
- wiphy_err(wlc_hw->wlc->wiphy,
- "wl%d: wlc_phy_shim_attach: out of mem\n",
- wlc_hw->unit);
+ if (!physhim)
return NULL;
- }
+
physhim->wlc_hw = wlc_hw;
physhim->wlc = wlc;
physhim->wl = wl;
@@ -59,28 +57,28 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn) (void *arg), void *arg,
- const char *name)
+ void (*fn)(struct brcms_phy *pi),
+ void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, fn, arg, name);
+ brcms_init_timer(physhim->wl, (void (*)(void *))fn,
+ arg, name);
}
-void wlapi_free_timer(struct phy_shim_info *physhim, struct wlapi_timer *t)
+void wlapi_free_timer(struct wlapi_timer *t)
{
- brcms_free_timer(physhim->wl, (struct brcms_timer *)t);
+ brcms_free_timer((struct brcms_timer *)t);
}
void
-wlapi_add_timer(struct phy_shim_info *physhim, struct wlapi_timer *t, uint ms,
- int periodic)
+wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic)
{
- brcms_add_timer(physhim->wl, (struct brcms_timer *)t, ms, periodic);
+ brcms_add_timer((struct brcms_timer *)t, ms, periodic);
}
-bool wlapi_del_timer(struct phy_shim_info *physhim, struct wlapi_timer *t)
+bool wlapi_del_timer(struct wlapi_timer *t)
{
- return brcms_del_timer(physhim->wl, (struct brcms_timer *)t);
+ return brcms_del_timer((struct brcms_timer *)t);
}
void wlapi_intrson(struct phy_shim_info *physhim)
@@ -216,3 +214,12 @@ wlapi_copyto_objmem(struct phy_shim_info *physhim, uint offset, const void *buf,
{
brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel);
}
+
+char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id)
+{
+ return getvar(physhim->wlc_hw->sih, id);
+}
+int wlapi_getintvar(struct phy_shim_info *physhim, enum brcms_srom_id id)
+{
+ return getintvar(physhim->wlc_hw->sih, id);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
new file mode 100644
index 00000000000..9168c459b18
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * phy_shim.h: stuff defined in phy_shim.c and included only by the phy
+ */
+
+#ifndef _BRCM_PHY_SHIM_H_
+#define _BRCM_PHY_SHIM_H_
+
+#include "types.h"
+
+#define RADAR_TYPE_NONE 0 /* Radar type None */
+#define RADAR_TYPE_ETSI_1 1 /* ETSI 1 Radar type */
+#define RADAR_TYPE_ETSI_2 2 /* ETSI 2 Radar type */
+#define RADAR_TYPE_ETSI_3 3 /* ETSI 3 Radar type */
+#define RADAR_TYPE_ITU_E 4 /* ITU E Radar type */
+#define RADAR_TYPE_ITU_K 5 /* ITU K Radar type */
+#define RADAR_TYPE_UNCLASSIFIED 6 /* Unclassified Radar type */
+#define RADAR_TYPE_BIN5 7 /* long pulse radar type */
+#define RADAR_TYPE_STG2 8 /* staggered-2 radar */
+#define RADAR_TYPE_STG3 9 /* staggered-3 radar */
+#define RADAR_TYPE_FRA 10 /* French radar */
+
+/* French radar pulse widths */
+#define FRA_T1_20MHZ 52770
+#define FRA_T2_20MHZ 61538
+#define FRA_T3_20MHZ 66002
+#define FRA_T1_40MHZ 105541
+#define FRA_T2_40MHZ 123077
+#define FRA_T3_40MHZ 132004
+#define FRA_ERR_20MHZ 60
+#define FRA_ERR_40MHZ 120
+
+#define ANTSEL_NA 0 /* No boardlevel selection available */
+#define ANTSEL_2x4 1 /* 2x4 boardlevel selection available */
+#define ANTSEL_2x3 2 /* 2x3 CB2 boardlevel selection available */
+
+/* Rx Antenna diversity control values */
+#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
+#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
+#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
+#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
+#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
+#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
+
+#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1 0 /* antenna index 1 */
+#define WL_ANT_IDX_2 1 /* antenna index 2 */
+
+/* values for n_preamble_type */
+#define BRCMS_N_PREAMBLE_MIXEDMODE 0
+#define BRCMS_N_PREAMBLE_GF 1
+#define BRCMS_N_PREAMBLE_GF_BRCM 2
+
+#define WL_TX_POWER_RATES_LEGACY 45
+#define WL_TX_POWER_MCS20_FIRST 12
+#define WL_TX_POWER_MCS20_NUM 16
+#define WL_TX_POWER_MCS40_FIRST 28
+#define WL_TX_POWER_MCS40_NUM 17
+
+
+#define WL_TX_POWER_RATES 101
+#define WL_TX_POWER_CCK_FIRST 0
+#define WL_TX_POWER_CCK_NUM 4
+/* Index for first 20MHz OFDM SISO rate */
+#define WL_TX_POWER_OFDM_FIRST 4
+/* Index for first 20MHz OFDM CDD rate */
+#define WL_TX_POWER_OFDM20_CDD_FIRST 12
+/* Index for first 40MHz OFDM SISO rate */
+#define WL_TX_POWER_OFDM40_SISO_FIRST 52
+/* Index for first 40MHz OFDM CDD rate */
+#define WL_TX_POWER_OFDM40_CDD_FIRST 60
+#define WL_TX_POWER_OFDM_NUM 8
+/* Index for first 20MHz MCS SISO rate */
+#define WL_TX_POWER_MCS20_SISO_FIRST 20
+/* Index for first 20MHz MCS CDD rate */
+#define WL_TX_POWER_MCS20_CDD_FIRST 28
+/* Index for first 20MHz MCS STBC rate */
+#define WL_TX_POWER_MCS20_STBC_FIRST 36
+/* Index for first 20MHz MCS SDM rate */
+#define WL_TX_POWER_MCS20_SDM_FIRST 44
+/* Index for first 40MHz MCS SISO rate */
+#define WL_TX_POWER_MCS40_SISO_FIRST 68
+/* Index for first 40MHz MCS CDD rate */
+#define WL_TX_POWER_MCS40_CDD_FIRST 76
+/* Index for first 40MHz MCS STBC rate */
+#define WL_TX_POWER_MCS40_STBC_FIRST 84
+/* Index for first 40MHz MCS SDM rate */
+#define WL_TX_POWER_MCS40_SDM_FIRST 92
+#define WL_TX_POWER_MCS_1_STREAM_NUM 8
+#define WL_TX_POWER_MCS_2_STREAM_NUM 8
+/* Index for 40MHz rate MCS 32 */
+#define WL_TX_POWER_MCS_32 100
+#define WL_TX_POWER_MCS_32_NUM 1
+
+/* sslpnphy specifics */
+/* Index for first 20MHz MCS SISO rate */
+#define WL_TX_POWER_MCS20_SISO_FIRST_SSN 12
+
+/* struct tx_power::flags bits */
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+
+/* values to force tx/rx chain */
+#define BRCMS_N_TXRX_CHAIN0 0
+#define BRCMS_N_TXRX_CHAIN1 1
+
+struct brcms_phy;
+
+extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+ struct brcms_info *wl,
+ struct brcms_c_info *wlc);
+extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+
+/* PHY to WL utility functions */
+extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+ void (*fn) (struct brcms_phy *pi),
+ void *arg, const char *name);
+extern void wlapi_free_timer(struct wlapi_timer *t);
+extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+extern bool wlapi_del_timer(struct wlapi_timer *t);
+extern void wlapi_intrson(struct phy_shim_info *physhim);
+extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
+ u32 macintmask);
+
+extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
+ u16 v);
+extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
+ u16 mask, u16 val, int bands);
+extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+extern void wlapi_enable_mac(struct phy_shim_info *physhim);
+extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
+ u32 val);
+extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
+ physhim);
+extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
+ physhim);
+extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+ int len, void *buf);
+extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
+ u8 rate);
+extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
+ void *buf, int, u32 sel);
+extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
+ const void *buf, int, u32);
+
+extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
+ u32 phy_mode);
+extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+extern char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id);
+extern int wlapi_getintvar(struct phy_shim_info *physhim,
+ enum brcms_srom_id id);
+
+#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index e8b2b81d2d0..3b36e3acfd7 100644
--- a/drivers/staging/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -70,11 +70,13 @@
#define PMURES_BIT(bit) (1 << (bit))
/* PMU corerev and chip specific PLL controls.
- * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
- * to differentiate different PLLs controlled by the same PMU rev.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary
+ * number to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers:
+ * ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>,
+ * p1div, p2div, _bypass_sdmod
*/
-/* pllcontrol registers */
-/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
#define PMU1_PLL0_PLLCTL0 0
#define PMU1_PLL0_PLLCTL1 1
#define PMU1_PLL0_PLLCTL2 2
@@ -137,7 +139,8 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
}
static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, chipcregs_t *cc, u8 spuravoid)
+si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
+ u8 spuravoid)
{
u32 tmp = 0;
@@ -199,28 +202,6 @@ si_pmu_spuravoid_pllupdate(struct si_pub *sih, chipcregs_t *cc, u8 spuravoid)
W_REG(&cc->pmucontrol, tmp);
}
-u32 si_pmu_ilp_clock(struct si_pub *sih)
-{
- static u32 ilpcycles_per_sec;
-
- if (!PMUCTL_ENAB(sih))
- return ILP_CLOCK;
-
- if (ilpcycles_per_sec == 0) {
- u32 start, end, delta;
- u32 origidx = ai_coreidx(sih);
- chipcregs_t *cc = ai_setcoreidx(sih, SI_CC_IDX);
- start = R_REG(&cc->pmutimer);
- mdelay(ILP_CALC_DUR);
- end = R_REG(&cc->pmutimer);
- delta = end - start;
- ilpcycles_per_sec = delta * (1000 / ILP_CALC_DUR);
- ai_setcoreidx(sih, origidx);
- }
-
- return ilpcycles_per_sec;
-}
-
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
@@ -240,7 +221,7 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
{
- chipcregs_t *cc;
+ struct chipcregs __iomem *cc;
uint origidx;
/* Remember original core before switch to chipc */
@@ -254,34 +235,37 @@ void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
/* Read/write a chipcontrol reg */
u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol_addr), ~0,
- reg);
+ ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, reg);
return ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data), mask, val);
+ offsetof(struct chipcregs, chipcontrol_data), mask,
+ val);
}
/* Read/write a regcontrol reg */
u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, regcontrol_addr), ~0,
- reg);
+ ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
+ ~0, reg);
return ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, regcontrol_data), mask, val);
+ offsetof(struct chipcregs, regcontrol_data), mask,
+ val);
}
/* Read/write a pllcontrol reg */
u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, pllcontrol_addr), ~0,
- reg);
+ ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
+ ~0, reg);
return ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, pllcontrol_data), mask, val);
+ offsetof(struct chipcregs, pllcontrol_data), mask,
+ val);
}
/* PMU PLL update */
void si_pmu_pllupd(struct si_pub *sih)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, pmucontrol),
+ ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
}
@@ -291,7 +275,7 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
u32 clock = ALP_CLOCK;
/* bail out with default */
- if (!PMUCTL_ENAB(sih))
+ if (!(sih->cccaps & CC_CAP_PMU))
return clock;
switch (sih->chip) {
@@ -310,12 +294,12 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
{
- chipcregs_t *cc;
+ struct chipcregs __iomem *cc;
uint origidx, intr_val;
/* Remember original core before switch to chipc */
- cc = (chipcregs_t *) ai_switch_core(sih, CC_CORE_ID, &origidx,
- &intr_val);
+ cc = (struct chipcregs __iomem *)
+ ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
/* update the pll changes */
si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
@@ -327,7 +311,7 @@ void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
/* initialize PMU */
void si_pmu_init(struct si_pub *sih)
{
- chipcregs_t *cc;
+ struct chipcregs __iomem *cc;
uint origidx;
/* Remember original core before switch to chipc */
@@ -366,7 +350,7 @@ void si_pmu_swreg_init(struct si_pub *sih)
/* initialize PLL */
void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
{
- chipcregs_t *cc;
+ struct chipcregs __iomem *cc;
uint origidx;
/* Remember original core before switch to chipc */
@@ -390,7 +374,7 @@ void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
/* initialize PMU resources */
void si_pmu_res_init(struct si_pub *sih)
{
- chipcregs_t *cc;
+ struct chipcregs __iomem *cc;
uint origidx;
u32 min_mask = 0, max_mask = 0;
@@ -422,7 +406,7 @@ void si_pmu_res_init(struct si_pub *sih)
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
- chipcregs_t *cc;
+ struct chipcregs __iomem *cc;
uint origidx;
u32 alp_khz;
diff --git a/drivers/staging/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 0c7e48c4bcd..3a08c620640 100644
--- a/drivers/staging/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -19,25 +19,11 @@
#define _BRCM_PMU_H_
#include "types.h"
-/*
- * LDO selections used in si_pmu_set_ldo_voltage
- */
-#define SET_LDO_VOLTAGE_LDO1 1
-#define SET_LDO_VOLTAGE_LDO2 2
-#define SET_LDO_VOLTAGE_LDO3 3
-#define SET_LDO_VOLTAGE_PAREF 4
-#define SET_LDO_VOLTAGE_CLDO_PWM 5
-#define SET_LDO_VOLTAGE_CLDO_BURST 6
-#define SET_LDO_VOLTAGE_CBUCK_PWM 7
-#define SET_LDO_VOLTAGE_CBUCK_BURST 8
-#define SET_LDO_VOLTAGE_LNLDO1 9
-#define SET_LDO_VOLTAGE_LNLDO2_SEL 10
extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
extern void si_pmu_sprom_enable(struct si_pub *sih, bool enable);
extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
-extern u32 si_pmu_ilp_clock(struct si_pub *sih);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
new file mode 100644
index 00000000000..37bb2dcc113
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -0,0 +1,634 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_PUB_H_
+#define _BRCM_PUB_H_
+
+#include <brcmu_wifi.h>
+#include "types.h"
+#include "defs.h"
+
+enum brcms_srom_id {
+ BRCMS_SROM_NULL,
+ BRCMS_SROM_CONT,
+ BRCMS_SROM_AA2G,
+ BRCMS_SROM_AA5G,
+ BRCMS_SROM_AG0,
+ BRCMS_SROM_AG1,
+ BRCMS_SROM_AG2,
+ BRCMS_SROM_AG3,
+ BRCMS_SROM_ANTSWCTL2G,
+ BRCMS_SROM_ANTSWCTL5G,
+ BRCMS_SROM_ANTSWITCH,
+ BRCMS_SROM_BOARDFLAGS2,
+ BRCMS_SROM_BOARDFLAGS,
+ BRCMS_SROM_BOARDNUM,
+ BRCMS_SROM_BOARDREV,
+ BRCMS_SROM_BOARDTYPE,
+ BRCMS_SROM_BW40PO,
+ BRCMS_SROM_BWDUPPO,
+ BRCMS_SROM_BXA2G,
+ BRCMS_SROM_BXA5G,
+ BRCMS_SROM_CC,
+ BRCMS_SROM_CCK2GPO,
+ BRCMS_SROM_CCKBW202GPO,
+ BRCMS_SROM_CCKBW20UL2GPO,
+ BRCMS_SROM_CCODE,
+ BRCMS_SROM_CDDPO,
+ BRCMS_SROM_DEVID,
+ BRCMS_SROM_ET1MACADDR,
+ BRCMS_SROM_EXTPAGAIN2G,
+ BRCMS_SROM_EXTPAGAIN5G,
+ BRCMS_SROM_FREQOFFSET_CORR,
+ BRCMS_SROM_HW_IQCAL_EN,
+ BRCMS_SROM_IL0MACADDR,
+ BRCMS_SROM_IQCAL_SWP_DIS,
+ BRCMS_SROM_LEDBH0,
+ BRCMS_SROM_LEDBH1,
+ BRCMS_SROM_LEDBH2,
+ BRCMS_SROM_LEDBH3,
+ BRCMS_SROM_LEDDC,
+ BRCMS_SROM_LEGOFDM40DUPPO,
+ BRCMS_SROM_LEGOFDMBW202GPO,
+ BRCMS_SROM_LEGOFDMBW205GHPO,
+ BRCMS_SROM_LEGOFDMBW205GLPO,
+ BRCMS_SROM_LEGOFDMBW205GMPO,
+ BRCMS_SROM_LEGOFDMBW20UL2GPO,
+ BRCMS_SROM_LEGOFDMBW20UL5GHPO,
+ BRCMS_SROM_LEGOFDMBW20UL5GLPO,
+ BRCMS_SROM_LEGOFDMBW20UL5GMPO,
+ BRCMS_SROM_MACADDR,
+ BRCMS_SROM_MCS2GPO0,
+ BRCMS_SROM_MCS2GPO1,
+ BRCMS_SROM_MCS2GPO2,
+ BRCMS_SROM_MCS2GPO3,
+ BRCMS_SROM_MCS2GPO4,
+ BRCMS_SROM_MCS2GPO5,
+ BRCMS_SROM_MCS2GPO6,
+ BRCMS_SROM_MCS2GPO7,
+ BRCMS_SROM_MCS32PO,
+ BRCMS_SROM_MCS5GHPO0,
+ BRCMS_SROM_MCS5GHPO1,
+ BRCMS_SROM_MCS5GHPO2,
+ BRCMS_SROM_MCS5GHPO3,
+ BRCMS_SROM_MCS5GHPO4,
+ BRCMS_SROM_MCS5GHPO5,
+ BRCMS_SROM_MCS5GHPO6,
+ BRCMS_SROM_MCS5GHPO7,
+ BRCMS_SROM_MCS5GLPO0,
+ BRCMS_SROM_MCS5GLPO1,
+ BRCMS_SROM_MCS5GLPO2,
+ BRCMS_SROM_MCS5GLPO3,
+ BRCMS_SROM_MCS5GLPO4,
+ BRCMS_SROM_MCS5GLPO5,
+ BRCMS_SROM_MCS5GLPO6,
+ BRCMS_SROM_MCS5GLPO7,
+ BRCMS_SROM_MCS5GPO0,
+ BRCMS_SROM_MCS5GPO1,
+ BRCMS_SROM_MCS5GPO2,
+ BRCMS_SROM_MCS5GPO3,
+ BRCMS_SROM_MCS5GPO4,
+ BRCMS_SROM_MCS5GPO5,
+ BRCMS_SROM_MCS5GPO6,
+ BRCMS_SROM_MCS5GPO7,
+ BRCMS_SROM_MCSBW202GPO,
+ BRCMS_SROM_MCSBW205GHPO,
+ BRCMS_SROM_MCSBW205GLPO,
+ BRCMS_SROM_MCSBW205GMPO,
+ BRCMS_SROM_MCSBW20UL2GPO,
+ BRCMS_SROM_MCSBW20UL5GHPO,
+ BRCMS_SROM_MCSBW20UL5GLPO,
+ BRCMS_SROM_MCSBW20UL5GMPO,
+ BRCMS_SROM_MCSBW402GPO,
+ BRCMS_SROM_MCSBW405GHPO,
+ BRCMS_SROM_MCSBW405GLPO,
+ BRCMS_SROM_MCSBW405GMPO,
+ BRCMS_SROM_MEASPOWER,
+ BRCMS_SROM_OFDM2GPO,
+ BRCMS_SROM_OFDM5GHPO,
+ BRCMS_SROM_OFDM5GLPO,
+ BRCMS_SROM_OFDM5GPO,
+ BRCMS_SROM_OPO,
+ BRCMS_SROM_PA0B0,
+ BRCMS_SROM_PA0B1,
+ BRCMS_SROM_PA0B2,
+ BRCMS_SROM_PA0ITSSIT,
+ BRCMS_SROM_PA0MAXPWR,
+ BRCMS_SROM_PA1B0,
+ BRCMS_SROM_PA1B1,
+ BRCMS_SROM_PA1B2,
+ BRCMS_SROM_PA1HIB0,
+ BRCMS_SROM_PA1HIB1,
+ BRCMS_SROM_PA1HIB2,
+ BRCMS_SROM_PA1HIMAXPWR,
+ BRCMS_SROM_PA1ITSSIT,
+ BRCMS_SROM_PA1LOB0,
+ BRCMS_SROM_PA1LOB1,
+ BRCMS_SROM_PA1LOB2,
+ BRCMS_SROM_PA1LOMAXPWR,
+ BRCMS_SROM_PA1MAXPWR,
+ BRCMS_SROM_PDETRANGE2G,
+ BRCMS_SROM_PDETRANGE5G,
+ BRCMS_SROM_PHYCAL_TEMPDELTA,
+ BRCMS_SROM_RAWTEMPSENSE,
+ BRCMS_SROM_REGREV,
+ BRCMS_SROM_REV,
+ BRCMS_SROM_RSSISAV2G,
+ BRCMS_SROM_RSSISAV5G,
+ BRCMS_SROM_RSSISMC2G,
+ BRCMS_SROM_RSSISMC5G,
+ BRCMS_SROM_RSSISMF2G,
+ BRCMS_SROM_RSSISMF5G,
+ BRCMS_SROM_RXCHAIN,
+ BRCMS_SROM_RXPO2G,
+ BRCMS_SROM_RXPO5G,
+ BRCMS_SROM_STBCPO,
+ BRCMS_SROM_TEMPCORRX,
+ BRCMS_SROM_TEMPOFFSET,
+ BRCMS_SROM_TEMPSENSE_OPTION,
+ BRCMS_SROM_TEMPSENSE_SLOPE,
+ BRCMS_SROM_TEMPTHRESH,
+ BRCMS_SROM_TRI2G,
+ BRCMS_SROM_TRI5GH,
+ BRCMS_SROM_TRI5GL,
+ BRCMS_SROM_TRI5G,
+ BRCMS_SROM_TRISO2G,
+ BRCMS_SROM_TRISO5G,
+ BRCMS_SROM_TSSIPOS2G,
+ BRCMS_SROM_TSSIPOS5G,
+ BRCMS_SROM_TXCHAIN,
+ BRCMS_SROM_TXPID2GA0,
+ BRCMS_SROM_TXPID2GA1,
+ BRCMS_SROM_TXPID2GA2,
+ BRCMS_SROM_TXPID2GA3,
+ BRCMS_SROM_TXPID5GA0,
+ BRCMS_SROM_TXPID5GA1,
+ BRCMS_SROM_TXPID5GA2,
+ BRCMS_SROM_TXPID5GA3,
+ BRCMS_SROM_TXPID5GHA0,
+ BRCMS_SROM_TXPID5GHA1,
+ BRCMS_SROM_TXPID5GHA2,
+ BRCMS_SROM_TXPID5GHA3,
+ BRCMS_SROM_TXPID5GLA0,
+ BRCMS_SROM_TXPID5GLA1,
+ BRCMS_SROM_TXPID5GLA2,
+ BRCMS_SROM_TXPID5GLA3,
+ /*
+ * per-path identifiers (see srom.c)
+ */
+ BRCMS_SROM_ITT2GA0,
+ BRCMS_SROM_ITT2GA1,
+ BRCMS_SROM_ITT2GA2,
+ BRCMS_SROM_ITT2GA3,
+ BRCMS_SROM_ITT5GA0,
+ BRCMS_SROM_ITT5GA1,
+ BRCMS_SROM_ITT5GA2,
+ BRCMS_SROM_ITT5GA3,
+ BRCMS_SROM_MAXP2GA0,
+ BRCMS_SROM_MAXP2GA1,
+ BRCMS_SROM_MAXP2GA2,
+ BRCMS_SROM_MAXP2GA3,
+ BRCMS_SROM_MAXP5GA0,
+ BRCMS_SROM_MAXP5GA1,
+ BRCMS_SROM_MAXP5GA2,
+ BRCMS_SROM_MAXP5GA3,
+ BRCMS_SROM_MAXP5GHA0,
+ BRCMS_SROM_MAXP5GHA1,
+ BRCMS_SROM_MAXP5GHA2,
+ BRCMS_SROM_MAXP5GHA3,
+ BRCMS_SROM_MAXP5GLA0,
+ BRCMS_SROM_MAXP5GLA1,
+ BRCMS_SROM_MAXP5GLA2,
+ BRCMS_SROM_MAXP5GLA3,
+ BRCMS_SROM_PA2GW0A0,
+ BRCMS_SROM_PA2GW0A1,
+ BRCMS_SROM_PA2GW0A2,
+ BRCMS_SROM_PA2GW0A3,
+ BRCMS_SROM_PA2GW1A0,
+ BRCMS_SROM_PA2GW1A1,
+ BRCMS_SROM_PA2GW1A2,
+ BRCMS_SROM_PA2GW1A3,
+ BRCMS_SROM_PA2GW2A0,
+ BRCMS_SROM_PA2GW2A1,
+ BRCMS_SROM_PA2GW2A2,
+ BRCMS_SROM_PA2GW2A3,
+ BRCMS_SROM_PA2GW3A0,
+ BRCMS_SROM_PA2GW3A1,
+ BRCMS_SROM_PA2GW3A2,
+ BRCMS_SROM_PA2GW3A3,
+ BRCMS_SROM_PA5GHW0A0,
+ BRCMS_SROM_PA5GHW0A1,
+ BRCMS_SROM_PA5GHW0A2,
+ BRCMS_SROM_PA5GHW0A3,
+ BRCMS_SROM_PA5GHW1A0,
+ BRCMS_SROM_PA5GHW1A1,
+ BRCMS_SROM_PA5GHW1A2,
+ BRCMS_SROM_PA5GHW1A3,
+ BRCMS_SROM_PA5GHW2A0,
+ BRCMS_SROM_PA5GHW2A1,
+ BRCMS_SROM_PA5GHW2A2,
+ BRCMS_SROM_PA5GHW2A3,
+ BRCMS_SROM_PA5GHW3A0,
+ BRCMS_SROM_PA5GHW3A1,
+ BRCMS_SROM_PA5GHW3A2,
+ BRCMS_SROM_PA5GHW3A3,
+ BRCMS_SROM_PA5GLW0A0,
+ BRCMS_SROM_PA5GLW0A1,
+ BRCMS_SROM_PA5GLW0A2,
+ BRCMS_SROM_PA5GLW0A3,
+ BRCMS_SROM_PA5GLW1A0,
+ BRCMS_SROM_PA5GLW1A1,
+ BRCMS_SROM_PA5GLW1A2,
+ BRCMS_SROM_PA5GLW1A3,
+ BRCMS_SROM_PA5GLW2A0,
+ BRCMS_SROM_PA5GLW2A1,
+ BRCMS_SROM_PA5GLW2A2,
+ BRCMS_SROM_PA5GLW2A3,
+ BRCMS_SROM_PA5GLW3A0,
+ BRCMS_SROM_PA5GLW3A1,
+ BRCMS_SROM_PA5GLW3A2,
+ BRCMS_SROM_PA5GLW3A3,
+ BRCMS_SROM_PA5GW0A0,
+ BRCMS_SROM_PA5GW0A1,
+ BRCMS_SROM_PA5GW0A2,
+ BRCMS_SROM_PA5GW0A3,
+ BRCMS_SROM_PA5GW1A0,
+ BRCMS_SROM_PA5GW1A1,
+ BRCMS_SROM_PA5GW1A2,
+ BRCMS_SROM_PA5GW1A3,
+ BRCMS_SROM_PA5GW2A0,
+ BRCMS_SROM_PA5GW2A1,
+ BRCMS_SROM_PA5GW2A2,
+ BRCMS_SROM_PA5GW2A3,
+ BRCMS_SROM_PA5GW3A0,
+ BRCMS_SROM_PA5GW3A1,
+ BRCMS_SROM_PA5GW3A2,
+ BRCMS_SROM_PA5GW3A3,
+};
+
+#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
+#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
+
+/* phy types */
+#define PHY_TYPE_A 0 /* Phy type A */
+#define PHY_TYPE_G 2 /* Phy type G */
+#define PHY_TYPE_N 4 /* Phy type N */
+#define PHY_TYPE_LP 5 /* Phy type Low Power A/B/G */
+#define PHY_TYPE_SSN 6 /* Phy type Single Stream N */
+#define PHY_TYPE_LCN 8 /* Phy type Single Stream N */
+#define PHY_TYPE_LCNXN 9 /* Phy type 2-stream N */
+#define PHY_TYPE_HT 7 /* Phy type 3-Stream N */
+
+/* bw */
+#define BRCMS_10_MHZ 10 /* 10Mhz nphy channel bandwidth */
+#define BRCMS_20_MHZ 20 /* 20Mhz nphy channel bandwidth */
+#define BRCMS_40_MHZ 40 /* 40Mhz nphy channel bandwidth */
+
+#define BRCMS_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */
+#define BRCMS_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */
+#define BRCMS_RSSI_VERY_LOW -80 /* Very low quality cutoffs */
+#define BRCMS_RSSI_LOW -70 /* Low quality cutoffs */
+#define BRCMS_RSSI_GOOD -68 /* Good quality cutoffs */
+#define BRCMS_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */
+#define BRCMS_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */
+
+/* a large TX Power as an init value to factor out of min() calculations,
+ * keep low enough to fit in an s8, units are .25 dBm
+ */
+#define BRCMS_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+
+/* rate related definitions */
+#define BRCMS_RATE_FLAG 0x80 /* Flag to indicate it is a basic rate */
+#define BRCMS_RATE_MASK 0x7f /* Rate value mask w/o basic rate flag */
+
+/* legacy rx Antenna diversity for SISO rates */
+#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
+#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
+#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
+#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
+#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
+/* default antdiv setting */
+#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0
+
+/* legacy rx Antenna diversity for SISO rates */
+/* Tx on antenna 0, "legacy term Main" */
+#define ANT_TX_FORCE_0 0
+/* Tx on antenna 1, "legacy term Aux" */
+#define ANT_TX_FORCE_1 1
+/* Tx on phy's last good Rx antenna */
+#define ANT_TX_LAST_RX 3
+/* driver's default tx antenna setting */
+#define ANT_TX_DEF 3
+
+/* Tx Chain values */
+/* def bitmap of txchain */
+#define TXCHAIN_DEF 0x1
+/* default bitmap of tx chains for nphy */
+#define TXCHAIN_DEF_NPHY 0x3
+/* default bitmap of tx chains for nphy */
+#define TXCHAIN_DEF_HTPHY 0x7
+/* def bitmap of rxchain */
+#define RXCHAIN_DEF 0x1
+/* default bitmap of rx chains for nphy */
+#define RXCHAIN_DEF_NPHY 0x3
+/* default bitmap of rx chains for nphy */
+#define RXCHAIN_DEF_HTPHY 0x7
+/* no antenna switch */
+#define ANTSWITCH_NONE 0
+/* antenna switch on 4321CB2, 2of3 */
+#define ANTSWITCH_TYPE_1 1
+/* antenna switch on 4321MPCI, 2of3 */
+#define ANTSWITCH_TYPE_2 2
+/* antenna switch on 4322, 2of3 */
+#define ANTSWITCH_TYPE_3 3
+
+#define RXBUFSZ PKTBUFSZ
+
+#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
+
+struct brcm_rateset {
+ /* # rates in this set */
+ u32 count;
+ /* rates in 500kbps units w/hi bit set if basic */
+ u8 rates[WL_NUMRATES];
+};
+
+struct brcms_c_rateset {
+ uint count; /* number of rates in rates[] */
+ /* rates in 500kbps units w/hi bit set if basic */
+ u8 rates[BRCMS_NUMRATES];
+ u8 htphy_membership; /* HT PHY Membership */
+ u8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
+};
+
+/* All the HT-specific default advertised capabilities (including AMPDU)
+ * should be grouped here at one place
+ */
+#define AMPDU_DEF_MPDU_DENSITY 6 /* default mpdu density (110 ==> 4us) */
+
+/* wlc internal bss_info */
+struct brcms_bss_info {
+ u8 BSSID[ETH_ALEN]; /* network BSSID */
+ u16 flags; /* flags for internal attributes */
+ u8 SSID_len; /* the length of SSID */
+ u8 SSID[32]; /* SSID string */
+ s16 RSSI; /* receive signal strength (in dBm) */
+ s16 SNR; /* receive signal SNR in dB */
+ u16 beacon_period; /* units are Kusec */
+ u16 chanspec; /* Channel num, bw, ctrl_sb and band */
+ struct brcms_c_rateset rateset; /* supported rates */
+};
+
+#define MAC80211_PROMISC_BCNS (1 << 0)
+#define MAC80211_SCAN (1 << 1)
+
+/*
+ * Public portion of common driver state structure.
+ * The wlc handle points at this.
+ */
+struct brcms_pub {
+ struct brcms_c_info *wlc;
+ struct ieee80211_hw *ieee_hw;
+ struct scb_ampdu *global_ampdu;
+ uint mac80211_state;
+ uint unit; /* device instance number */
+ uint corerev; /* core revision */
+ struct si_pub *sih; /* SI handle (cookie for siutils calls) */
+ bool up; /* interface up and running */
+ bool hw_off; /* HW is off */
+ bool hw_up; /* one time hw up/down */
+ bool _piomode; /* true if pio mode */
+ uint _nbands; /* # bands supported */
+ uint now; /* # elapsed seconds */
+
+ bool promisc; /* promiscuous destination address */
+ bool delayed_down; /* down delayed */
+ bool associated; /* true:part of [I]BSS, false: not */
+ /* (union of stas_associated, aps_associated) */
+ bool _ampdu; /* ampdu enabled or not */
+ u8 _n_enab; /* bitmap of 11N + HT support */
+
+ u8 cur_etheraddr[ETH_ALEN]; /* our local ethernet address */
+
+ int bcmerror; /* last bcm error */
+
+ u32 radio_disabled; /* bit vector for radio disabled reasons */
+
+ u16 boardrev; /* version # of particular board */
+ u8 sromrev; /* version # of the srom */
+ char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
+ u32 boardflags; /* Board specific flags from srom */
+ u32 boardflags2; /* More board flags if sromrev >= 4 */
+ bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */
+
+ struct wl_cnt *_cnt; /* low-level counters in driver */
+};
+
+enum wlc_par_id {
+ IOV_MPC = 1,
+ IOV_RTSTHRESH,
+ IOV_QTXPOWER,
+ IOV_BCN_LI_BCN /* Beacon listen interval in # of beacons */
+};
+
+/***********************************************
+ * Feature-related macros to optimize out code *
+ * *********************************************
+ */
+
+#define ENAB_1x1 0x01
+#define ENAB_2x2 0x02
+#define ENAB_3x3 0x04
+#define ENAB_4x4 0x08
+#define SUPPORT_11N (ENAB_1x1|ENAB_2x2)
+#define SUPPORT_HT (ENAB_1x1|ENAB_2x2|ENAB_3x3)
+
+/* WL11N Support */
+#define AMPDU_AGG_HOST 1
+
+/* pri is priority encoded in the packet. This maps the Packet priority to
+ * enqueue precedence as defined in wlc_prec_map
+ */
+extern const u8 wlc_prio2prec_map[];
+#define BRCMS_PRIO_TO_PREC(pri) wlc_prio2prec_map[(pri) & 7]
+
+#define BRCMS_PREC_COUNT 16 /* Max precedence level implemented */
+
+/* Mask to describe all precedence levels */
+#define BRCMS_PREC_BMP_ALL MAXBITVAL(BRCMS_PREC_COUNT)
+
+/*
+ * This maps priority to one precedence higher - Used by PS-Poll response
+ * packets to simulate enqueue-at-head operation, but still maintain the
+ * order on the queue
+ */
+#define BRCMS_PRIO_TO_HI_PREC(pri) min(BRCMS_PRIO_TO_PREC(pri) + 1,\
+ BRCMS_PREC_COUNT - 1)
+
+/* Define a bitmap of precedences comprised by each AC */
+#define BRCMS_PREC_BMP_AC_BE (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BE)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BE)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_EE)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
+#define BRCMS_PREC_BMP_AC_BK (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BK)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BK)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NONE)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
+#define BRCMS_PREC_BMP_AC_VI (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_CL)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_CL)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VI)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
+#define BRCMS_PREC_BMP_AC_VO (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VO)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VO)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NC)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
+
+/* network protection config */
+#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */
+#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */
+#define BRCMS_PROT_G_USER 3 /* gmode specified by user */
+#define BRCMS_PROT_OVERLAP 4 /* overlap */
+#define BRCMS_PROT_N_USER 10 /* nmode specified by user */
+#define BRCMS_PROT_N_CFG 11 /* n protection */
+#define BRCMS_PROT_N_CFG_OVR 12 /* n protection override */
+#define BRCMS_PROT_N_NONGF 13 /* non-GF protection */
+#define BRCMS_PROT_N_NONGF_OVR 14 /* non-GF protection override */
+#define BRCMS_PROT_N_PAM_OVR 15 /* n preamble override */
+#define BRCMS_PROT_N_OBSS 16 /* non-HT OBSS present */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B
+ * Rateset: 1b, 2b, 5.5, 11
+ * Preamble: Long
+ * Shortslot: Off
+ * GMODE_AUTO
+ * Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: Auto
+ * GMODE_ONLY
+ * Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ * Extended Rateset: 6b, 9, 12b, 48
+ * Preamble: Short required
+ * Shortslot: Auto
+ * GMODE_B_DEFERRED
+ * Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: On
+ * GMODE_PERFORMANCE
+ * Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ * Preamble: Short required
+ * Shortslot: On and required
+ * GMODE_LRS
+ * Rateset: 1b, 2b, 5.5b, 11b
+ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ * Preamble: Long
+ * Shortslot: Auto
+ */
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+/* MCS values greater than this enable multiple streams */
+#define HIGHEST_SINGLE_STREAM_MCS 7
+
+#define MAXBANDS 2 /* Maximum #of bands */
+
+/* max number of antenna configurations */
+#define ANT_SELCFG_MAX 4
+
+struct brcms_antselcfg {
+ u8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
+ u8 num_antcfg; /* number of available antenna configurations */
+};
+
+/* common functions for every port */
+extern struct brcms_c_info *
+brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
+ bool piomode, void __iomem *regsva, struct pci_dev *btparam,
+ uint *perr);
+extern uint brcms_c_detach(struct brcms_c_info *wlc);
+extern int brcms_c_up(struct brcms_c_info *wlc);
+extern uint brcms_c_down(struct brcms_c_info *wlc);
+
+extern bool brcms_c_chipmatch(u16 vendor, u16 device);
+extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_reset(struct brcms_c_info *wlc);
+
+extern void brcms_c_intrson(struct brcms_c_info *wlc);
+extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+extern bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc);
+extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+extern void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
+ struct sk_buff *sdu,
+ struct ieee80211_hw *hw);
+extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
+ int val);
+extern int brcms_c_get_header_len(void);
+extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
+ int match_reg_offset,
+ const u8 *addr);
+extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+ const struct ieee80211_tx_queue_params *arg,
+ bool suspend);
+extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
+ struct ieee80211_sta *sta, u16 tid);
+extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+ u8 ba_wsize, uint max_rx_ampdu_bytes);
+extern char *getvar(struct si_pub *sih, enum brcms_srom_id id);
+extern int getintvar(struct si_pub *sih, enum brcms_srom_id id);
+extern int brcms_c_module_register(struct brcms_pub *pub,
+ const char *name, struct brcms_info *hdl,
+ int (*down_fn)(void *handle));
+extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl);
+extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
+extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+extern void brcms_c_scan_start(struct brcms_c_info *wlc);
+extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
+extern int brcms_c_get_curband(struct brcms_c_info *wlc);
+extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
+ bool drop);
+extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+ struct brcm_rateset *currs);
+extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
+ struct brcm_rateset *rs);
+extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+ s8 sslot_override);
+extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
+ u8 interval);
+extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc);
+extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+
+#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.c b/drivers/net/wireless/brcm80211/brcmsmac/rate.c
new file mode 100644
index 00000000000..0a0c0ad4f96
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+
+#include "d11.h"
+#include "pub.h"
+#include "rate.h"
+
+/*
+ * Rate info per rate: It tells whether a rate is ofdm or not and its phy_rate
+ * value
+ */
+const u8 rate_info[BRCM_MAXRATE + 1] = {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+/* 0 */ 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 10 */ 0x00, 0x37, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x00,
+/* 20 */ 0x00, 0x00, 0x6e, 0x00, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00,
+/* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x00,
+/* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 70 */ 0x00, 0x00, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 90 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00,
+/* 100 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c
+};
+
+/* rates are in units of Kbps */
+const struct brcms_mcs_info mcs_table[MCS_TABLE_SIZE] = {
+ /* MCS 0: SS 1, MOD: BPSK, CR 1/2 */
+ {6500, 13500, CEIL(6500 * 10, 9), CEIL(13500 * 10, 9), 0x00,
+ BRCM_RATE_6M},
+ /* MCS 1: SS 1, MOD: QPSK, CR 1/2 */
+ {13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x08,
+ BRCM_RATE_12M},
+ /* MCS 2: SS 1, MOD: QPSK, CR 3/4 */
+ {19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x0A,
+ BRCM_RATE_18M},
+ /* MCS 3: SS 1, MOD: 16QAM, CR 1/2 */
+ {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x10,
+ BRCM_RATE_24M},
+ /* MCS 4: SS 1, MOD: 16QAM, CR 3/4 */
+ {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x12,
+ BRCM_RATE_36M},
+ /* MCS 5: SS 1, MOD: 64QAM, CR 2/3 */
+ {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x19,
+ BRCM_RATE_48M},
+ /* MCS 6: SS 1, MOD: 64QAM, CR 3/4 */
+ {58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x1A,
+ BRCM_RATE_54M},
+ /* MCS 7: SS 1, MOD: 64QAM, CR 5/6 */
+ {65000, 135000, CEIL(65000 * 10, 9), CEIL(135000 * 10, 9), 0x1C,
+ BRCM_RATE_54M},
+ /* MCS 8: SS 2, MOD: BPSK, CR 1/2 */
+ {13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x40,
+ BRCM_RATE_6M},
+ /* MCS 9: SS 2, MOD: QPSK, CR 1/2 */
+ {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x48,
+ BRCM_RATE_12M},
+ /* MCS 10: SS 2, MOD: QPSK, CR 3/4 */
+ {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x4A,
+ BRCM_RATE_18M},
+ /* MCS 11: SS 2, MOD: 16QAM, CR 1/2 */
+ {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x50,
+ BRCM_RATE_24M},
+ /* MCS 12: SS 2, MOD: 16QAM, CR 3/4 */
+ {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x52,
+ BRCM_RATE_36M},
+ /* MCS 13: SS 2, MOD: 64QAM, CR 2/3 */
+ {104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0x59,
+ BRCM_RATE_48M},
+ /* MCS 14: SS 2, MOD: 64QAM, CR 3/4 */
+ {117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x5A,
+ BRCM_RATE_54M},
+ /* MCS 15: SS 2, MOD: 64QAM, CR 5/6 */
+ {130000, 270000, CEIL(130000 * 10, 9), CEIL(270000 * 10, 9), 0x5C,
+ BRCM_RATE_54M},
+ /* MCS 16: SS 3, MOD: BPSK, CR 1/2 */
+ {19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x80,
+ BRCM_RATE_6M},
+ /* MCS 17: SS 3, MOD: QPSK, CR 1/2 */
+ {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x88,
+ BRCM_RATE_12M},
+ /* MCS 18: SS 3, MOD: QPSK, CR 3/4 */
+ {58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x8A,
+ BRCM_RATE_18M},
+ /* MCS 19: SS 3, MOD: 16QAM, CR 1/2 */
+ {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x90,
+ BRCM_RATE_24M},
+ /* MCS 20: SS 3, MOD: 16QAM, CR 3/4 */
+ {117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x92,
+ BRCM_RATE_36M},
+ /* MCS 21: SS 3, MOD: 64QAM, CR 2/3 */
+ {156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0x99,
+ BRCM_RATE_48M},
+ /* MCS 22: SS 3, MOD: 64QAM, CR 3/4 */
+ {175500, 364500, CEIL(175500 * 10, 9), CEIL(364500 * 10, 9), 0x9A,
+ BRCM_RATE_54M},
+ /* MCS 23: SS 3, MOD: 64QAM, CR 5/6 */
+ {195000, 405000, CEIL(195000 * 10, 9), CEIL(405000 * 10, 9), 0x9B,
+ BRCM_RATE_54M},
+ /* MCS 24: SS 4, MOD: BPSK, CR 1/2 */
+ {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0xC0,
+ BRCM_RATE_6M},
+ /* MCS 25: SS 4, MOD: QPSK, CR 1/2 */
+ {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0xC8,
+ BRCM_RATE_12M},
+ /* MCS 26: SS 4, MOD: QPSK, CR 3/4 */
+ {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0xCA,
+ BRCM_RATE_18M},
+ /* MCS 27: SS 4, MOD: 16QAM, CR 1/2 */
+ {104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0xD0,
+ BRCM_RATE_24M},
+ /* MCS 28: SS 4, MOD: 16QAM, CR 3/4 */
+ {156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0xD2,
+ BRCM_RATE_36M},
+ /* MCS 29: SS 4, MOD: 64QAM, CR 2/3 */
+ {208000, 432000, CEIL(208000 * 10, 9), CEIL(432000 * 10, 9), 0xD9,
+ BRCM_RATE_48M},
+ /* MCS 30: SS 4, MOD: 64QAM, CR 3/4 */
+ {234000, 486000, CEIL(234000 * 10, 9), CEIL(486000 * 10, 9), 0xDA,
+ BRCM_RATE_54M},
+ /* MCS 31: SS 4, MOD: 64QAM, CR 5/6 */
+ {260000, 540000, CEIL(260000 * 10, 9), CEIL(540000 * 10, 9), 0xDB,
+ BRCM_RATE_54M},
+ /* MCS 32: SS 1, MOD: BPSK, CR 1/2 */
+ {0, 6000, 0, CEIL(6000 * 10, 9), 0x00, BRCM_RATE_6M},
+};
+
+/*
+ * phycfg for legacy OFDM frames: code rate, modulation scheme, spatial streams
+ * Number of spatial streams: always 1 other fields: refer to table 78 of
+ * section 17.3.2.2 of the original .11a standard
+ */
+struct legacy_phycfg {
+ u32 rate_ofdm; /* ofdm mac rate */
+ /* phy ctl byte 3, code rate, modulation type, # of streams */
+ u8 tx_phy_ctl3;
+};
+
+/* Number of legacy_rate_cfg entries in the table */
+#define LEGACY_PHYCFG_TABLE_SIZE 12
+
+/*
+ * In CCK mode LPPHY overloads OFDM Modulation bits with CCK Data Rate
+ * Eventually MIMOPHY would also be converted to this format
+ * 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps
+ */
+static const struct
+legacy_phycfg legacy_phycfg_table[LEGACY_PHYCFG_TABLE_SIZE] = {
+ {BRCM_RATE_1M, 0x00}, /* CCK 1Mbps, data rate 0 */
+ {BRCM_RATE_2M, 0x08}, /* CCK 2Mbps, data rate 1 */
+ {BRCM_RATE_5M5, 0x10}, /* CCK 5.5Mbps, data rate 2 */
+ {BRCM_RATE_11M, 0x18}, /* CCK 11Mbps, data rate 3 */
+ /* OFDM 6Mbps, code rate 1/2, BPSK, 1 spatial stream */
+ {BRCM_RATE_6M, 0x00},
+ /* OFDM 9Mbps, code rate 3/4, BPSK, 1 spatial stream */
+ {BRCM_RATE_9M, 0x02},
+ /* OFDM 12Mbps, code rate 1/2, QPSK, 1 spatial stream */
+ {BRCM_RATE_12M, 0x08},
+ /* OFDM 18Mbps, code rate 3/4, QPSK, 1 spatial stream */
+ {BRCM_RATE_18M, 0x0A},
+ /* OFDM 24Mbps, code rate 1/2, 16-QAM, 1 spatial stream */
+ {BRCM_RATE_24M, 0x10},
+ /* OFDM 36Mbps, code rate 3/4, 16-QAM, 1 spatial stream */
+ {BRCM_RATE_36M, 0x12},
+ /* OFDM 48Mbps, code rate 2/3, 64-QAM, 1 spatial stream */
+ {BRCM_RATE_48M, 0x19},
+ /* OFDM 54Mbps, code rate 3/4, 64-QAM, 1 spatial stream */
+ {BRCM_RATE_54M, 0x1A},
+};
+
+/* Hardware rates (also encodes default basic rates) */
+
+const struct brcms_c_rateset cck_ofdm_mimo_rates = {
+ 12,
+ /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48, */
+ { 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60,
+ /* 54 Mbps */
+ 0x6c},
+ 0x00,
+ { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+const struct brcms_c_rateset ofdm_mimo_rates = {
+ 8,
+ /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */
+ { 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c},
+ 0x00,
+ { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+/* Default ratesets that include MCS32 for 40BW channels */
+static const struct brcms_c_rateset cck_ofdm_40bw_mimo_rates = {
+ 12,
+ /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48 */
+ { 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60,
+ /* 54 Mbps */
+ 0x6c},
+ 0x00,
+ { 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct brcms_c_rateset ofdm_40bw_mimo_rates = {
+ 8,
+ /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */
+ { 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c},
+ 0x00,
+ { 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+const struct brcms_c_rateset cck_ofdm_rates = {
+ 12,
+ /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48,*/
+ { 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60,
+ /*54 Mbps */
+ 0x6c},
+ 0x00,
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+const struct brcms_c_rateset gphy_legacy_rates = {
+ 4,
+ /* 1b, 2b, 5.5b, 11b Mbps */
+ { 0x82, 0x84, 0x8b, 0x96},
+ 0x00,
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+const struct brcms_c_rateset ofdm_rates = {
+ 8,
+ /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */
+ { 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c},
+ 0x00,
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+const struct brcms_c_rateset cck_rates = {
+ 4,
+ /* 1b, 2b, 5.5, 11 Mbps */
+ { 0x82, 0x84, 0x0b, 0x16},
+ 0x00,
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+/* check if rateset is valid.
+ * if check_brate is true, rateset without a basic rate is considered NOT valid.
+ */
+static bool brcms_c_rateset_valid(struct brcms_c_rateset *rs, bool check_brate)
+{
+ uint idx;
+
+ if (!rs->count)
+ return false;
+
+ if (!check_brate)
+ return true;
+
+ /* error if no basic rates */
+ for (idx = 0; idx < rs->count; idx++) {
+ if (rs->rates[idx] & BRCMS_RATE_FLAG)
+ return true;
+ }
+ return false;
+}
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams)
+{
+ int i;
+ for (i = txstreams; i < MAX_STREAMS_SUPPORTED; i++)
+ rs->mcs[i] = 0;
+}
+
+/*
+ * filter based on hardware rateset, and sort filtered rateset with basic
+ * bit(s) preserved, and check if resulting rateset is valid.
+*/
+bool
+brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+ const struct brcms_c_rateset *hw_rs,
+ bool check_brate, u8 txstreams)
+{
+ u8 rateset[BRCM_MAXRATE + 1];
+ u8 r;
+ uint count;
+ uint i;
+
+ memset(rateset, 0, sizeof(rateset));
+ count = rs->count;
+
+ for (i = 0; i < count; i++) {
+ /* mask off "basic rate" bit, BRCMS_RATE_FLAG */
+ r = (int)rs->rates[i] & BRCMS_RATE_MASK;
+ if ((r > BRCM_MAXRATE) || (rate_info[r] == 0))
+ continue;
+ rateset[r] = rs->rates[i]; /* preserve basic bit! */
+ }
+
+ /* fill out the rates in order, looking at only supported rates */
+ count = 0;
+ for (i = 0; i < hw_rs->count; i++) {
+ r = hw_rs->rates[i] & BRCMS_RATE_MASK;
+ if (rateset[r])
+ rs->rates[count++] = rateset[r];
+ }
+
+ rs->count = count;
+
+ /* only set the mcs rate bit if the equivalent hw mcs bit is set */
+ for (i = 0; i < MCSSET_LEN; i++)
+ rs->mcs[i] = (rs->mcs[i] & hw_rs->mcs[i]);
+
+ if (brcms_c_rateset_valid(rs, check_brate))
+ return true;
+ else
+ return false;
+}
+
+/* calculate the rate of a rx'd frame and return it as a ratespec */
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp)
+{
+ int phy_type;
+ u32 rspec = PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT;
+
+ phy_type =
+ ((rxh->RxChan & RXS_CHAN_PHYTYPE_MASK) >> RXS_CHAN_PHYTYPE_SHIFT);
+
+ if ((phy_type == PHY_TYPE_N) || (phy_type == PHY_TYPE_SSN) ||
+ (phy_type == PHY_TYPE_LCN) || (phy_type == PHY_TYPE_HT)) {
+ switch (rxh->PhyRxStatus_0 & PRXS0_FT_MASK) {
+ case PRXS0_CCK:
+ rspec =
+ cck_phy2mac_rate(
+ ((struct cck_phy_hdr *) plcp)->signal);
+ break;
+ case PRXS0_OFDM:
+ rspec =
+ ofdm_phy2mac_rate(
+ ((struct ofdm_phy_hdr *) plcp)->rlpt[0]);
+ break;
+ case PRXS0_PREN:
+ rspec = (plcp[0] & MIMO_PLCP_MCS_MASK) | RSPEC_MIMORATE;
+ if (plcp[0] & MIMO_PLCP_40MHZ) {
+ /* indicate rspec is for 40 MHz mode */
+ rspec &= ~RSPEC_BW_MASK;
+ rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
+ }
+ break;
+ case PRXS0_STDN:
+ /* fallthru */
+ default:
+ /* not supported, error condition */
+ break;
+ }
+ if (plcp3_issgi(plcp[3]))
+ rspec |= RSPEC_SHORT_GI;
+ } else
+ if ((phy_type == PHY_TYPE_A) || (rxh->PhyRxStatus_0 & PRXS0_OFDM))
+ rspec = ofdm_phy2mac_rate(
+ ((struct ofdm_phy_hdr *) plcp)->rlpt[0]);
+ else
+ rspec = cck_phy2mac_rate(
+ ((struct cck_phy_hdr *) plcp)->signal);
+
+ return rspec;
+}
+
+/* copy rateset src to dst as-is (no masking or sorting) */
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst)
+{
+ memcpy(dst, src, sizeof(struct brcms_c_rateset));
+}
+
+/*
+ * Copy and selectively filter one rateset to another.
+ * 'basic_only' means only copy basic rates.
+ * 'rates' indicates cck (11b) and ofdm rates combinations.
+ * - 0: cck and ofdm
+ * - 1: cck only
+ * - 2: ofdm only
+ * 'xmask' is the copy mask (typically 0x7f or 0xff).
+ */
+void
+brcms_c_rateset_filter(struct brcms_c_rateset *src, struct brcms_c_rateset *dst,
+ bool basic_only, u8 rates, uint xmask, bool mcsallow)
+{
+ uint i;
+ uint r;
+ uint count;
+
+ count = 0;
+ for (i = 0; i < src->count; i++) {
+ r = src->rates[i];
+ if (basic_only && !(r & BRCMS_RATE_FLAG))
+ continue;
+ if (rates == BRCMS_RATES_CCK &&
+ is_ofdm_rate((r & BRCMS_RATE_MASK)))
+ continue;
+ if (rates == BRCMS_RATES_OFDM &&
+ is_cck_rate((r & BRCMS_RATE_MASK)))
+ continue;
+ dst->rates[count++] = r & xmask;
+ }
+ dst->count = count;
+ dst->htphy_membership = src->htphy_membership;
+
+ if (mcsallow && rates != BRCMS_RATES_CCK)
+ memcpy(&dst->mcs[0], &src->mcs[0], MCSSET_LEN);
+ else
+ brcms_c_rateset_mcs_clear(dst);
+}
+
+/* select rateset for a given phy_type and bandtype and filter it, sort it
+ * and fill rs_tgt with result
+ */
+void
+brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+ const struct brcms_c_rateset *rs_hw,
+ uint phy_type, int bandtype, bool cck_only,
+ uint rate_mask, bool mcsallow, u8 bw, u8 txstreams)
+{
+ const struct brcms_c_rateset *rs_dflt;
+ struct brcms_c_rateset rs_sel;
+ if ((PHYTYPE_IS(phy_type, PHY_TYPE_HT)) ||
+ (PHYTYPE_IS(phy_type, PHY_TYPE_N)) ||
+ (PHYTYPE_IS(phy_type, PHY_TYPE_LCN)) ||
+ (PHYTYPE_IS(phy_type, PHY_TYPE_SSN))) {
+ if (bandtype == BRCM_BAND_5G)
+ rs_dflt = (bw == BRCMS_20_MHZ ?
+ &ofdm_mimo_rates : &ofdm_40bw_mimo_rates);
+ else
+ rs_dflt = (bw == BRCMS_20_MHZ ?
+ &cck_ofdm_mimo_rates :
+ &cck_ofdm_40bw_mimo_rates);
+ } else if (PHYTYPE_IS(phy_type, PHY_TYPE_LP)) {
+ rs_dflt = (bandtype == BRCM_BAND_5G) ?
+ &ofdm_rates : &cck_ofdm_rates;
+ } else if (PHYTYPE_IS(phy_type, PHY_TYPE_A)) {
+ rs_dflt = &ofdm_rates;
+ } else if (PHYTYPE_IS(phy_type, PHY_TYPE_G)) {
+ rs_dflt = &cck_ofdm_rates;
+ } else {
+ /* should not happen, error condition */
+ rs_dflt = &cck_rates; /* force cck */
+ }
+
+ /* if hw rateset is not supplied, assign selected rateset to it */
+ if (!rs_hw)
+ rs_hw = rs_dflt;
+
+ brcms_c_rateset_copy(rs_dflt, &rs_sel);
+ brcms_c_rateset_mcs_upd(&rs_sel, txstreams);
+ brcms_c_rateset_filter(&rs_sel, rs_tgt, false,
+ cck_only ? BRCMS_RATES_CCK : BRCMS_RATES_CCK_OFDM,
+ rate_mask, mcsallow);
+ brcms_c_rate_hwrs_filter_sort_validate(rs_tgt, rs_hw, false,
+ mcsallow ? txstreams : 1);
+}
+
+s16 brcms_c_rate_legacy_phyctl(uint rate)
+{
+ uint i;
+ for (i = 0; i < LEGACY_PHYCFG_TABLE_SIZE; i++)
+ if (rate == legacy_phycfg_table[i].rate_ofdm)
+ return legacy_phycfg_table[i].tx_phy_ctl3;
+
+ return -1;
+}
+
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset)
+{
+ uint i;
+ for (i = 0; i < MCSSET_LEN; i++)
+ rateset->mcs[i] = 0;
+}
+
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams)
+{
+ memcpy(&rateset->mcs[0], &cck_ofdm_mimo_rates.mcs[0], MCSSET_LEN);
+ brcms_c_rateset_mcs_upd(rateset, txstreams);
+}
+
+/* Based on bandwidth passed, allow/disallow MCS 32 in the rateset */
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw)
+{
+ if (bw == BRCMS_40_MHZ)
+ setbit(rateset->mcs, 32);
+ else
+ clrbit(rateset->mcs, 32);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
new file mode 100644
index 00000000000..e7b9dc2f273
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_RATE_H_
+#define _BRCM_RATE_H_
+
+#include "types.h"
+#include "d11.h"
+
+extern const u8 rate_info[];
+extern const struct brcms_c_rateset cck_ofdm_mimo_rates;
+extern const struct brcms_c_rateset ofdm_mimo_rates;
+extern const struct brcms_c_rateset cck_ofdm_rates;
+extern const struct brcms_c_rateset ofdm_rates;
+extern const struct brcms_c_rateset cck_rates;
+extern const struct brcms_c_rateset gphy_legacy_rates;
+extern const struct brcms_c_rateset rate_limit_1_2;
+
+struct brcms_mcs_info {
+ /* phy rate in kbps [20Mhz] */
+ u32 phy_rate_20;
+ /* phy rate in kbps [40Mhz] */
+ u32 phy_rate_40;
+ /* phy rate in kbps [20Mhz] with SGI */
+ u32 phy_rate_20_sgi;
+ /* phy rate in kbps [40Mhz] with SGI */
+ u32 phy_rate_40_sgi;
+ /* phy ctl byte 3, code rate, modulation type, # of streams */
+ u8 tx_phy_ctl3;
+ /* matching legacy ofdm rate in 500bkps */
+ u8 leg_ofdm;
+};
+
+#define BRCMS_MAXMCS 32 /* max valid mcs index */
+#define MCS_TABLE_SIZE 33 /* Number of mcs entries in the table */
+extern const struct brcms_mcs_info mcs_table[];
+
+#define MCS_TXS_MASK 0xc0 /* num tx streams - 1 bit mask */
+#define MCS_TXS_SHIFT 6 /* num tx streams - 1 bit shift */
+
+/* returns num tx streams - 1 */
+static inline u8 mcs_2_txstreams(u8 mcs)
+{
+ return (mcs_table[mcs].tx_phy_ctl3 & MCS_TXS_MASK) >> MCS_TXS_SHIFT;
+}
+
+static inline uint mcs_2_rate(u8 mcs, bool is40, bool sgi)
+{
+ if (sgi) {
+ if (is40)
+ return mcs_table[mcs].phy_rate_40_sgi;
+ return mcs_table[mcs].phy_rate_20_sgi;
+ }
+ if (is40)
+ return mcs_table[mcs].phy_rate_40;
+
+ return mcs_table[mcs].phy_rate_20;
+}
+
+/* Macro to use the rate_info table */
+#define BRCMS_RATE_MASK_FULL 0xff /* Rate value mask with basic rate flag */
+
+/*
+ * rate spec : holds rate and mode specific information required to generate a
+ * tx frame. Legacy CCK and OFDM information is held in the same manner as was
+ * done in the past (in the lower byte) the upper 3 bytes primarily hold MIMO
+ * specific information
+ */
+
+/* rate spec bit fields */
+
+/* Either 500Kbps units or MIMO MCS idx */
+#define RSPEC_RATE_MASK 0x0000007F
+/* mimo MCS is stored in RSPEC_RATE_MASK */
+#define RSPEC_MIMORATE 0x08000000
+/* mimo bw mask */
+#define RSPEC_BW_MASK 0x00000700
+/* mimo bw shift */
+#define RSPEC_BW_SHIFT 8
+/* mimo Space/Time/Frequency mode mask */
+#define RSPEC_STF_MASK 0x00003800
+/* mimo Space/Time/Frequency mode shift */
+#define RSPEC_STF_SHIFT 11
+/* mimo coding type mask */
+#define RSPEC_CT_MASK 0x0000C000
+/* mimo coding type shift */
+#define RSPEC_CT_SHIFT 14
+/* mimo num STC streams per PLCP defn. */
+#define RSPEC_STC_MASK 0x00300000
+/* mimo num STC streams per PLCP defn. */
+#define RSPEC_STC_SHIFT 20
+/* mimo bit indicates adv coding in use */
+#define RSPEC_LDPC_CODING 0x00400000
+/* mimo bit indicates short GI in use */
+#define RSPEC_SHORT_GI 0x00800000
+/* bit indicates override both rate & mode */
+#define RSPEC_OVERRIDE 0x80000000
+/* bit indicates override rate only */
+#define RSPEC_OVERRIDE_MCS_ONLY 0x40000000
+
+static inline bool rspec_active(u32 rspec)
+{
+ return rspec & (RSPEC_RATE_MASK | RSPEC_MIMORATE);
+}
+
+static inline u8 rspec_phytxbyte2(u32 rspec)
+{
+ return (rspec & 0xff00) >> 8;
+}
+
+static inline u32 rspec_get_bw(u32 rspec)
+{
+ return (rspec & RSPEC_BW_MASK) >> RSPEC_BW_SHIFT;
+}
+
+static inline bool rspec_issgi(u32 rspec)
+{
+ return (rspec & RSPEC_SHORT_GI) == RSPEC_SHORT_GI;
+}
+
+static inline bool rspec_is40mhz(u32 rspec)
+{
+ u32 bw = rspec_get_bw(rspec);
+
+ return bw == PHY_TXC1_BW_40MHZ || bw == PHY_TXC1_BW_40MHZ_DUP;
+}
+
+static inline uint rspec2rate(u32 rspec)
+{
+ if (rspec & RSPEC_MIMORATE)
+ return mcs_2_rate(rspec & RSPEC_RATE_MASK, rspec_is40mhz(rspec),
+ rspec_issgi(rspec));
+ return rspec & RSPEC_RATE_MASK;
+}
+
+static inline u8 rspec_mimoplcp3(u32 rspec)
+{
+ return (rspec & 0xf00000) >> 16;
+}
+
+static inline bool plcp3_issgi(u8 plcp)
+{
+ return (plcp & (RSPEC_SHORT_GI >> 16)) != 0;
+}
+
+static inline uint rspec_stc(u32 rspec)
+{
+ return (rspec & RSPEC_STC_MASK) >> RSPEC_STC_SHIFT;
+}
+
+static inline uint rspec_stf(u32 rspec)
+{
+ return (rspec & RSPEC_STF_MASK) >> RSPEC_STF_SHIFT;
+}
+
+static inline bool is_mcs_rate(u32 ratespec)
+{
+ return (ratespec & RSPEC_MIMORATE) != 0;
+}
+
+static inline bool is_ofdm_rate(u32 ratespec)
+{
+ return !is_mcs_rate(ratespec) &&
+ (rate_info[ratespec & RSPEC_RATE_MASK] & BRCMS_RATE_FLAG);
+}
+
+static inline bool is_cck_rate(u32 ratespec)
+{
+ u32 rate = (ratespec & BRCMS_RATE_MASK);
+
+ return !is_mcs_rate(ratespec) && (
+ rate == BRCM_RATE_1M || rate == BRCM_RATE_2M ||
+ rate == BRCM_RATE_5M5 || rate == BRCM_RATE_11M);
+}
+
+static inline bool is_single_stream(u8 mcs)
+{
+ return mcs <= HIGHEST_SINGLE_STREAM_MCS || mcs == 32;
+}
+
+static inline u8 cck_rspec(u8 cck)
+{
+ return cck & RSPEC_RATE_MASK;
+}
+
+/* Convert encoded rate value in plcp header to numerical rates in 500 KHz
+ * increments */
+extern const u8 ofdm_rate_lookup[];
+
+static inline u8 ofdm_phy2mac_rate(u8 rlpt)
+{
+ return ofdm_rate_lookup[rlpt & 0x7];
+}
+
+static inline u8 cck_phy2mac_rate(u8 signal)
+{
+ return signal/5;
+}
+
+/* Rates specified in brcms_c_rateset_filter() */
+#define BRCMS_RATES_CCK_OFDM 0
+#define BRCMS_RATES_CCK 1
+#define BRCMS_RATES_OFDM 2
+
+/* sanitize, and sort a rateset with the basic bit(s) preserved, validate
+ * rateset */
+extern bool
+brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+ const struct brcms_c_rateset *hw_rs,
+ bool check_brate, u8 txstreams);
+/* copy rateset src to dst as-is (no masking or sorting) */
+extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst);
+
+/* would be nice to have these documented ... */
+extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
+ bool mcsallow);
+
+extern void
+brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+ const struct brcms_c_rateset *rs_hw, uint phy_type,
+ int bandtype, bool cck_only, uint rate_mask,
+ bool mcsallow, u8 bw, u8 txstreams);
+
+extern s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
+ u8 txstreams);
+extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
+ u8 bw);
+
+#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/scb.h b/drivers/net/wireless/brcm80211/brcmsmac/scb.h
new file mode 100644
index 00000000000..51c79c7239b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/scb.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_SCB_H_
+#define _BRCM_SCB_H_
+
+#include <linux/if_ether.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include "types.h"
+
+#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
+
+#define AMPDU_MAX_SCB_TID NUMPRIO
+
+/* scb flags */
+#define SCB_WMECAP 0x0040
+#define SCB_HTCAP 0x10000 /* HT (MIMO) capable device */
+#define SCB_IS40 0x80000 /* 40MHz capable */
+#define SCB_STBCCAP 0x40000000 /* STBC Capable */
+
+#define SCB_MAGIC 0xbeefcafe
+
+/* structure to store per-tid state for the ampdu initiator */
+struct scb_ampdu_tid_ini {
+ u8 tx_in_transit; /* number of pending mpdus in transit in driver */
+ u8 tid; /* initiator tid for easy lookup */
+ /* tx retry count; indexed by seq modulo */
+ u8 txretry[AMPDU_TX_BA_MAX_WSIZE];
+ struct scb *scb; /* backptr for easy lookup */
+ u8 ba_wsize; /* negotiated ba window size (in pdu) */
+};
+
+struct scb_ampdu {
+ struct scb *scb; /* back pointer for easy reference */
+ u8 mpdu_density; /* mpdu density */
+ u8 max_pdu; /* max pdus allowed in ampdu */
+ u8 release; /* # of mpdus released at a time */
+ u16 min_len; /* min mpdu len to support the density */
+ u32 max_rx_ampdu_bytes; /* max ampdu rcv length; 8k, 16k, 32k, 64k */
+
+ /*
+ * This could easily be a ini[] pointer and we keep this info in wl
+ * itself instead of having mac80211 hold it for us. Also could be made
+ * dynamic per tid instead of static.
+ */
+ /* initiator info - per tid (NUMPRIO): */
+ struct scb_ampdu_tid_ini ini[AMPDU_MAX_SCB_TID];
+};
+
+/* station control block - one per remote MAC address */
+struct scb {
+ u32 magic;
+ u32 flags; /* various bit flags as defined below */
+ u32 flags2; /* various bit flags2 as defined below */
+ u8 state; /* current state bitfield of auth/assoc process */
+ u8 ea[ETH_ALEN]; /* station address */
+ uint fragresid[NUMPRIO];/* #bytes unused in frag buffer per prio */
+
+ u16 seqctl[NUMPRIO]; /* seqctl of last received frame (for dups) */
+ /* seqctl of last received frame (for dups) for non-QoS data and
+ * management */
+ u16 seqctl_nonqos;
+ u16 seqnum[NUMPRIO];/* WME: driver maintained sw seqnum per priority */
+
+ struct scb_ampdu scb_ampdu; /* AMPDU state including per tid info */
+};
+
+#endif /* _BRCM_SCB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
new file mode 100644
index 00000000000..99f791048e8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -0,0 +1,1298 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+#include <linux/crc8.h>
+#include <stdarg.h>
+
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include "pub.h"
+#include "nicpci.h"
+#include "aiutils.h"
+#include "otp.h"
+#include "srom.h"
+
+/*
+ * SROM CRC8 polynomial value:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ */
+#define SROM_CRC8_POLY 0xAB
+
+/* Maximum srom: 6 Kilobits == 768 bytes */
+#define SROM_MAX 768
+
+/* PCI fields */
+#define PCI_F0DEVID 48
+
+#define SROM_WORDS 64
+
+#define SROM_SSID 2
+
+#define SROM_WL1LHMAXP 29
+
+#define SROM_WL1LPAB0 30
+#define SROM_WL1LPAB1 31
+#define SROM_WL1LPAB2 32
+
+#define SROM_WL1HPAB0 33
+#define SROM_WL1HPAB1 34
+#define SROM_WL1HPAB2 35
+
+#define SROM_MACHI_IL0 36
+#define SROM_MACMID_IL0 37
+#define SROM_MACLO_IL0 38
+#define SROM_MACHI_ET1 42
+#define SROM_MACMID_ET1 43
+#define SROM_MACLO_ET1 44
+#define SROM3_MACHI 37
+#define SROM3_MACMID 38
+#define SROM3_MACLO 39
+
+#define SROM_BXARSSI2G 40
+#define SROM_BXARSSI5G 41
+
+#define SROM_TRI52G 42
+#define SROM_TRI5GHL 43
+
+#define SROM_RXPO52G 45
+
+#define SROM_AABREV 46
+/* Fields in AABREV */
+#define SROM_BR_MASK 0x00ff
+#define SROM_CC_MASK 0x0f00
+#define SROM_CC_SHIFT 8
+#define SROM_AA0_MASK 0x3000
+#define SROM_AA0_SHIFT 12
+#define SROM_AA1_MASK 0xc000
+#define SROM_AA1_SHIFT 14
+
+#define SROM_WL0PAB0 47
+#define SROM_WL0PAB1 48
+#define SROM_WL0PAB2 49
+
+#define SROM_LEDBH10 50
+#define SROM_LEDBH32 51
+
+#define SROM_WL10MAXP 52
+
+#define SROM_WL1PAB0 53
+#define SROM_WL1PAB1 54
+#define SROM_WL1PAB2 55
+
+#define SROM_ITT 56
+
+#define SROM_BFL 57
+#define SROM_BFL2 28
+#define SROM3_BFL2 61
+
+#define SROM_AG10 58
+
+#define SROM_CCODE 59
+
+#define SROM_OPO 60
+
+#define SROM3_LEDDC 62
+
+#define SROM_CRCREV 63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accommodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of usable srom i.e. the usable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define SROM4_WORDS 220
+
+#define SROM4_SIGN 32
+#define SROM4_SIGNATURE 0x5372
+
+#define SROM4_BREV 33
+
+#define SROM4_BFL0 34
+#define SROM4_BFL1 35
+#define SROM4_BFL2 36
+#define SROM4_BFL3 37
+#define SROM5_BFL0 37
+#define SROM5_BFL1 38
+#define SROM5_BFL2 39
+#define SROM5_BFL3 40
+
+#define SROM4_MACHI 38
+#define SROM4_MACMID 39
+#define SROM4_MACLO 40
+#define SROM5_MACHI 41
+#define SROM5_MACMID 42
+#define SROM5_MACLO 43
+
+#define SROM4_CCODE 41
+#define SROM4_REGREV 42
+#define SROM5_CCODE 34
+#define SROM5_REGREV 35
+
+#define SROM4_LEDBH10 43
+#define SROM4_LEDBH32 44
+#define SROM5_LEDBH10 59
+#define SROM5_LEDBH32 60
+
+#define SROM4_LEDDC 45
+#define SROM5_LEDDC 45
+
+#define SROM4_AA 46
+
+#define SROM4_AG10 47
+#define SROM4_AG32 48
+
+#define SROM4_TXPID2G 49
+#define SROM4_TXPID5G 51
+#define SROM4_TXPID5GL 53
+#define SROM4_TXPID5GH 55
+
+#define SROM4_TXRXC 61
+#define SROM4_TXCHAIN_MASK 0x000f
+#define SROM4_TXCHAIN_SHIFT 0
+#define SROM4_RXCHAIN_MASK 0x00f0
+#define SROM4_RXCHAIN_SHIFT 4
+#define SROM4_SWITCH_MASK 0xff00
+#define SROM4_SWITCH_SHIFT 8
+
+/* Per-path fields */
+#define MAX_PATH_SROM 4
+#define SROM4_PATH0 64
+#define SROM4_PATH1 87
+#define SROM4_PATH2 110
+#define SROM4_PATH3 133
+
+#define SROM4_2G_ITT_MAXP 0
+#define SROM4_2G_PA 1
+#define SROM4_5G_ITT_MAXP 5
+#define SROM4_5GLH_MAXP 6
+#define SROM4_5G_PA 7
+#define SROM4_5GL_PA 11
+#define SROM4_5GH_PA 15
+
+/* All the miriad power offsets */
+#define SROM4_2G_CCKPO 156
+#define SROM4_2G_OFDMPO 157
+#define SROM4_5G_OFDMPO 159
+#define SROM4_5GL_OFDMPO 161
+#define SROM4_5GH_OFDMPO 163
+#define SROM4_2G_MCSPO 165
+#define SROM4_5G_MCSPO 173
+#define SROM4_5GL_MCSPO 181
+#define SROM4_5GH_MCSPO 189
+#define SROM4_CDDPO 197
+#define SROM4_STBCPO 198
+#define SROM4_BW40PO 199
+#define SROM4_BWDUPPO 200
+
+#define SROM4_CRCREV 219
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+#define SROM8_BREV 65
+
+#define SROM8_BFL0 66
+#define SROM8_BFL1 67
+#define SROM8_BFL2 68
+#define SROM8_BFL3 69
+
+#define SROM8_MACHI 70
+#define SROM8_MACMID 71
+#define SROM8_MACLO 72
+
+#define SROM8_CCODE 73
+#define SROM8_REGREV 74
+
+#define SROM8_LEDBH10 75
+#define SROM8_LEDBH32 76
+
+#define SROM8_LEDDC 77
+
+#define SROM8_AA 78
+
+#define SROM8_AG10 79
+#define SROM8_AG32 80
+
+#define SROM8_TXRXC 81
+
+#define SROM8_BXARSSI2G 82
+#define SROM8_BXARSSI5G 83
+#define SROM8_TRI52G 84
+#define SROM8_TRI5GHL 85
+#define SROM8_RXPO52G 86
+
+#define SROM8_FEM2G 87
+#define SROM8_FEM5G 88
+#define SROM8_FEM_ANTSWLUT_MASK 0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SROM8_FEM_TR_ISO_MASK 0x0700
+#define SROM8_FEM_TR_ISO_SHIFT 8
+#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SROM8_FEM_TSSIPOS_MASK 0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT 0
+
+#define SROM8_THERMAL 89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS 90
+#define SROM8_TS_SLP_OPT_CORRX 91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable,
+ * IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP 92
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA 93
+
+/* Per-path offsets & fields */
+#define SROM8_PATH0 96
+#define SROM8_PATH1 112
+#define SROM8_PATH2 128
+#define SROM8_PATH3 144
+
+#define SROM8_2G_ITT_MAXP 0
+#define SROM8_2G_PA 1
+#define SROM8_5G_ITT_MAXP 4
+#define SROM8_5GLH_MAXP 5
+#define SROM8_5G_PA 6
+#define SROM8_5GL_PA 9
+#define SROM8_5GH_PA 12
+
+/* All the miriad power offsets */
+#define SROM8_2G_CCKPO 160
+
+#define SROM8_2G_OFDMPO 161
+#define SROM8_5G_OFDMPO 163
+#define SROM8_5GL_OFDMPO 165
+#define SROM8_5GH_OFDMPO 167
+
+#define SROM8_2G_MCSPO 169
+#define SROM8_5G_MCSPO 177
+#define SROM8_5GL_MCSPO 185
+#define SROM8_5GH_MCSPO 193
+
+#define SROM8_CDDPO 201
+#define SROM8_STBCPO 202
+#define SROM8_BW40PO 203
+#define SROM8_BWDUPPO 204
+
+/* SISO PA parameters are in the path0 spaces */
+#define SROM8_SISO 96
+
+/* Legacy names for SISO PA paramters */
+#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
+#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
+#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
+#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
+#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
+#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
+#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
+#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
+#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
+#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
+#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
+#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
+#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20 160
+#define SROM9_2GPO_CCKBW20UL 161
+#define SROM9_2GPO_LOFDMBW20 162
+#define SROM9_2GPO_LOFDMBW20UL 164
+
+#define SROM9_5GLPO_LOFDMBW20 166
+#define SROM9_5GLPO_LOFDMBW20UL 168
+#define SROM9_5GMPO_LOFDMBW20 170
+#define SROM9_5GMPO_LOFDMBW20UL 172
+#define SROM9_5GHPO_LOFDMBW20 174
+#define SROM9_5GHPO_LOFDMBW20UL 176
+
+#define SROM9_2GPO_MCSBW20 178
+#define SROM9_2GPO_MCSBW20UL 180
+#define SROM9_2GPO_MCSBW40 182
+
+#define SROM9_5GLPO_MCSBW20 184
+#define SROM9_5GLPO_MCSBW20UL 186
+#define SROM9_5GLPO_MCSBW40 188
+#define SROM9_5GMPO_MCSBW20 190
+#define SROM9_5GMPO_MCSBW20UL 192
+#define SROM9_5GMPO_MCSBW40 194
+#define SROM9_5GHPO_MCSBW20 196
+#define SROM9_5GHPO_MCSBW20UL 198
+#define SROM9_5GHPO_MCSBW40 200
+
+#define SROM9_PO_MCS32 202
+#define SROM9_PO_LOFDM40DUP 203
+
+/* SROM flags (see sromvar_t) */
+
+/* value continues as described by the next entry */
+#define SRFL_MORE 1
+#define SRFL_NOFFS 2 /* value bits can't be all one's */
+#define SRFL_PRHEX 4 /* value is in hexdecimal format */
+#define SRFL_PRSIGN 8 /* value is in signed decimal format */
+#define SRFL_CCODE 0x10 /* value is in country code format */
+#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
+#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
+/* do not generate a nvram param, entry is for mfgc */
+#define SRFL_NOVAR 0x80
+
+/* Max. nvram variable table size */
+#define MAXSZ_NVRAM_VARS 4096
+
+/*
+ * indicates type of value.
+ */
+enum brcms_srom_var_type {
+ BRCMS_SROM_STRING,
+ BRCMS_SROM_SNUMBER,
+ BRCMS_SROM_UNUMBER
+};
+
+/*
+ * storage type for srom variable.
+ *
+ * var_list: for linked list operations.
+ * varid: identifier of the variable.
+ * var_type: type of variable.
+ * buf: variable value when var_type == BRCMS_SROM_STRING.
+ * uval: unsigned variable value when var_type == BRCMS_SROM_UNUMBER.
+ * sval: signed variable value when var_type == BRCMS_SROM_SNUMBER.
+ */
+struct brcms_srom_list_head {
+ struct list_head var_list;
+ enum brcms_srom_id varid;
+ enum brcms_srom_var_type var_type;
+ union {
+ char buf[0];
+ u32 uval;
+ s32 sval;
+ };
+};
+
+struct brcms_sromvar {
+ enum brcms_srom_id varid;
+ u32 revmask;
+ u32 flags;
+ u16 off;
+ u16 mask;
+};
+
+struct brcms_varbuf {
+ char *base; /* pointer to buffer base */
+ char *buf; /* pointer to current position */
+ unsigned int size; /* current (residual) size in bytes */
+};
+
+/*
+ * Assumptions:
+ * - Ethernet address spans across 3 consecutive words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple
+ * words (even multiple fields in the same word) with each entry except the
+ * last having it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have
+ * SRFL_MORE bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table.
+ * Other entries must have non-NULL name.
+ */
+static const struct brcms_sromvar pci_sromvars[] = {
+ {BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
+ 0xffff},
+ {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV,
+ SROM_BR_MASK},
+ {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
+ {BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff},
+ {BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
+ {BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
+ {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
+ {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff},
+ {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff},
+ {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff},
+ {BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
+ {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
+ {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00},
+ {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff},
+ {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff},
+ {BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
+ {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
+ {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
+ {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
+ {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
+ {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
+ {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
+ {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
+ {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
+ {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
+ {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
+ {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
+ {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
+ {BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
+ {BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
+ {BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
+ {BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
+ {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
+ {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
+ {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
+ {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff},
+ {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
+ {BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
+ {BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
+ {BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
+ {BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
+ {BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
+ {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff},
+ {BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
+ {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
+ {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff},
+ {BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
+ {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
+ {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00},
+ {BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
+ {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff},
+ {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00},
+ {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff},
+ {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00},
+ {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff},
+ {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00},
+ {BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
+ {BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
+ {BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
+ {BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
+ {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
+ {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
+ {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
+ {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
+ {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
+ {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
+ {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
+ {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
+ {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
+ {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00},
+ {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
+ {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
+ {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
+ {BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
+ {BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
+ {BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
+ {BRCMS_SROM_PA1LOB0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
+ {BRCMS_SROM_PA1LOB1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
+ {BRCMS_SROM_PA1LOB2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
+ {BRCMS_SROM_PA1HIB0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
+ {BRCMS_SROM_PA1HIB1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
+ {BRCMS_SROM_PA1HIB2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
+ {BRCMS_SROM_PA1ITSSIT, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0xff00},
+ {BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
+ {BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
+ {BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
+ {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
+ {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
+ {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
+ {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
+ {BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
+ {BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
+ {BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
+ {BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
+ {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
+ {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
+ {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
+ {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
+ {BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
+ {BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
+ {BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
+ {BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
+ {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff},
+ {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00},
+ {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
+ {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00},
+ {BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
+ {BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
+ {BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
+ {BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
+ {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
+ {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
+ {BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
+ {BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
+ {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
+ SROM4_TXCHAIN_MASK},
+ {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
+ SROM4_RXCHAIN_MASK},
+ {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
+ SROM4_SWITCH_MASK},
+ {BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
+ SROM4_TXCHAIN_MASK},
+ {BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
+ SROM4_RXCHAIN_MASK},
+ {BRCMS_SROM_ANTSWITCH, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
+ SROM4_SWITCH_MASK},
+ {BRCMS_SROM_TSSIPOS2G, 0xffffff00, 0, SROM8_FEM2G,
+ SROM8_FEM_TSSIPOS_MASK},
+ {BRCMS_SROM_EXTPAGAIN2G, 0xffffff00, 0, SROM8_FEM2G,
+ SROM8_FEM_EXTPA_GAIN_MASK},
+ {BRCMS_SROM_PDETRANGE2G, 0xffffff00, 0, SROM8_FEM2G,
+ SROM8_FEM_PDET_RANGE_MASK},
+ {BRCMS_SROM_TRISO2G, 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
+ {BRCMS_SROM_ANTSWCTL2G, 0xffffff00, 0, SROM8_FEM2G,
+ SROM8_FEM_ANTSWLUT_MASK},
+ {BRCMS_SROM_TSSIPOS5G, 0xffffff00, 0, SROM8_FEM5G,
+ SROM8_FEM_TSSIPOS_MASK},
+ {BRCMS_SROM_EXTPAGAIN5G, 0xffffff00, 0, SROM8_FEM5G,
+ SROM8_FEM_EXTPA_GAIN_MASK},
+ {BRCMS_SROM_PDETRANGE5G, 0xffffff00, 0, SROM8_FEM5G,
+ SROM8_FEM_PDET_RANGE_MASK},
+ {BRCMS_SROM_TRISO5G, 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
+ {BRCMS_SROM_ANTSWCTL5G, 0xffffff00, 0, SROM8_FEM5G,
+ SROM8_FEM_ANTSWLUT_MASK},
+ {BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
+ {BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
+ {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
+ {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
+ {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
+ {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
+ {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
+ {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
+ {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
+ {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
+ {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
+ {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
+ {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
+ {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
+ {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
+ {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
+ {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
+ {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
+
+ {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
+ {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
+ {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+ {BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
+ {BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
+ {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
+ {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
+ {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
+ {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0,
+ 0xffff},
+ {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1,
+ 0xffff},
+ {BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
+ 0xffff},
+ {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC,
+ 0xffff},
+ {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC,
+ 0xffff},
+ {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC,
+ 0xffff},
+ {BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
+ 0x01ff},
+ {BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
+ 0xfe00},
+ {BRCMS_SROM_TEMPSENSE_SLOPE, 0xffffff00, SRFL_PRHEX,
+ SROM8_TS_SLP_OPT_CORRX, 0x00ff},
+ {BRCMS_SROM_TEMPCORRX, 0xffffff00, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX,
+ 0xfc00},
+ {BRCMS_SROM_TEMPSENSE_OPTION, 0xffffff00, SRFL_PRHEX,
+ SROM8_TS_SLP_OPT_CORRX, 0x0300},
+ {BRCMS_SROM_FREQOFFSET_CORR, 0xffffff00, SRFL_PRHEX,
+ SROM8_FOC_HWIQ_IQSWP, 0x000f},
+ {BRCMS_SROM_IQCAL_SWP_DIS, 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
+ 0x0010},
+ {BRCMS_SROM_HW_IQCAL_EN, 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
+ 0x0020},
+ {BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
+ 0x00ff},
+
+ {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
+ {BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
+ {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM5GLPO, 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
+ {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS2GPO3, 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS2GPO4, 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS2GPO5, 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS2GPO6, 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS2GPO7, 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS5GPO0, 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS5GPO1, 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS5GPO2, 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS5GPO3, 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS5GPO4, 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS5GPO5, 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS5GPO6, 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS5GPO7, 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS5GLPO0, 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS5GLPO1, 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS5GLPO2, 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS5GLPO3, 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS5GLPO4, 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS5GLPO5, 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS5GLPO6, 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS5GLPO7, 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_MCS5GHPO0, 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
+ {BRCMS_SROM_MCS5GHPO1, 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
+ {BRCMS_SROM_MCS5GHPO2, 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
+ {BRCMS_SROM_MCS5GHPO3, 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
+ {BRCMS_SROM_MCS5GHPO4, 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
+ {BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
+ {BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
+ {BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
+ {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff},
+ {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff},
+ {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff},
+ {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
+ {BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
+ {BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
+ {BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
+ {BRCMS_SROM_BWDUPPO, 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
+
+ /* power per rate from sromrev 9 */
+ {BRCMS_SROM_CCKBW202GPO, 0xfffffe00, 0, SROM9_2GPO_CCKBW20, 0xffff},
+ {BRCMS_SROM_CCKBW20UL2GPO, 0xfffffe00, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW202GPO, 0xfffffe00, SRFL_MORE,
+ SROM9_2GPO_LOFDMBW20, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW20UL2GPO, 0xfffffe00, SRFL_MORE,
+ SROM9_2GPO_LOFDMBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW205GLPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GLPO_LOFDMBW20, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW20UL5GLPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GLPO_LOFDMBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW205GMPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GMPO_LOFDMBW20, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW20UL5GMPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GMPO_LOFDMBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW205GHPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GHPO_LOFDMBW20, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
+ {BRCMS_SROM_LEGOFDMBW20UL5GHPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GHPO_LOFDMBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
+ {BRCMS_SROM_MCSBW202GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW20UL2GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20UL,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
+ {BRCMS_SROM_MCSBW402GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW40,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW205GLPO, 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW20,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW20UL5GLPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GLPO_MCSBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
+ {BRCMS_SROM_MCSBW405GLPO, 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW40,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW205GMPO, 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW20,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW20UL5GMPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GMPO_MCSBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
+ {BRCMS_SROM_MCSBW405GMPO, 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW40,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW205GHPO, 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW20,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
+ {BRCMS_SROM_MCSBW20UL5GHPO, 0xfffffe00, SRFL_MORE,
+ SROM9_5GHPO_MCSBW20UL, 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
+ {BRCMS_SROM_MCSBW405GHPO, 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW40,
+ 0xffff},
+ {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
+ {BRCMS_SROM_MCS32PO, 0xfffffe00, 0, SROM9_PO_MCS32, 0xffff},
+ {BRCMS_SROM_LEGOFDM40DUPPO, 0xfffffe00, 0, SROM9_PO_LOFDM40DUP, 0xffff},
+
+ {BRCMS_SROM_NULL, 0, 0, 0, 0}
+};
+
+static const struct brcms_sromvar perpath_pci_sromvars[] = {
+ {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
+ {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
+ {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
+ {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
+ {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
+ {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
+ {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
+ {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
+ {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
+ {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
+ {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
+ {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
+ {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
+ {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
+ {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
+ {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1,
+ 0xffff},
+ {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2,
+ 0xffff},
+ {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3,
+ 0xffff},
+ {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
+ {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1,
+ 0xffff},
+ {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2,
+ 0xffff},
+ {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3,
+ 0xffff},
+ {BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
+ {BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
+ {BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
+ {BRCMS_SROM_PA2GW0A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
+ {BRCMS_SROM_PA2GW1A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
+ {BRCMS_SROM_PA2GW2A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
+ {BRCMS_SROM_MAXP5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0x00ff},
+ {BRCMS_SROM_MAXP5GHA0, 0xffffff00, 0, SROM8_5GLH_MAXP, 0x00ff},
+ {BRCMS_SROM_MAXP5GLA0, 0xffffff00, 0, SROM8_5GLH_MAXP, 0xff00},
+ {BRCMS_SROM_PA5GW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
+ {BRCMS_SROM_PA5GW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
+ {BRCMS_SROM_PA5GW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
+ {BRCMS_SROM_PA5GLW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
+ {BRCMS_SROM_PA5GLW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 1,
+ 0xffff},
+ {BRCMS_SROM_PA5GLW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 2,
+ 0xffff},
+ {BRCMS_SROM_PA5GHW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
+ {BRCMS_SROM_PA5GHW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 1,
+ 0xffff},
+ {BRCMS_SROM_PA5GHW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 2,
+ 0xffff},
+ {BRCMS_SROM_NULL, 0, 0, 0, 0}
+};
+
+/* crc table has the same contents for every device instance, so it can be
+ * shared between devices. */
+static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
+
+static u16 __iomem *
+srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
+{
+ if (sih->ccrev < 32)
+ return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET);
+ if (sih->cccaps & CC_CAP_SROM)
+ return (u16 __iomem *)
+ (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP);
+
+ return NULL;
+}
+
+/* Parse SROM and create name=value pairs. 'srom' points to
+ * the SROM word array. 'off' specifies the offset of the
+ * first word 'srom' points to, which should be either 0 or
+ * SROM3_SWRG_OFF (full SROM or software region).
+ */
+
+static uint mask_shift(u16 mask)
+{
+ uint i;
+ for (i = 0; i < (sizeof(mask) << 3); i++) {
+ if (mask & (1 << i))
+ return i;
+ }
+ return 0;
+}
+
+static uint mask_width(u16 mask)
+{
+ int i;
+ for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
+ if (mask & (1 << i))
+ return (uint) (i - mask_shift(mask) + 1);
+ }
+ return 0;
+}
+
+static inline void ltoh16_buf(u16 *buf, unsigned int size)
+{
+ size /= 2;
+ while (size--)
+ *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size));
+}
+
+static inline void htol16_buf(u16 *buf, unsigned int size)
+{
+ size /= 2;
+ while (size--)
+ *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size));
+}
+
+/*
+ * convert binary srom data into linked list of srom variable items.
+ */
+static void
+_initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
+{
+ struct brcms_srom_list_head *entry;
+ enum brcms_srom_id id;
+ u16 w;
+ u32 val;
+ const struct brcms_sromvar *srv;
+ uint width;
+ uint flags;
+ u32 sr = (1 << sromrev);
+
+ /* first store the srom revision */
+ entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
+ entry->varid = BRCMS_SROM_REV;
+ entry->var_type = BRCMS_SROM_UNUMBER;
+ entry->uval = sromrev;
+ list_add(&entry->var_list, var_list);
+
+ for (srv = pci_sromvars; srv->varid != BRCMS_SROM_NULL; srv++) {
+ enum brcms_srom_var_type type;
+ u8 ea[ETH_ALEN];
+ u8 extra_space = 0;
+
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ flags = srv->flags;
+ id = srv->varid;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (flags & SRFL_NOVAR)
+ continue;
+
+ if (flags & SRFL_ETHADDR) {
+ /*
+ * stored in string format XX:XX:XX:XX:XX:XX (17 chars)
+ */
+ ea[0] = (srom[srv->off] >> 8) & 0xff;
+ ea[1] = srom[srv->off] & 0xff;
+ ea[2] = (srom[srv->off + 1] >> 8) & 0xff;
+ ea[3] = srom[srv->off + 1] & 0xff;
+ ea[4] = (srom[srv->off + 2] >> 8) & 0xff;
+ ea[5] = srom[srv->off + 2] & 0xff;
+ /* 17 characters + string terminator - union size */
+ extra_space = 18 - sizeof(s32);
+ type = BRCMS_SROM_STRING;
+ } else {
+ w = srom[srv->off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ while (srv->flags & SRFL_MORE) {
+ srv++;
+ if (srv->off == 0)
+ continue;
+
+ w = srom[srv->off];
+ val +=
+ ((w & srv->mask) >> mask_shift(srv->
+ mask)) <<
+ width;
+ width += mask_width(srv->mask);
+ }
+
+ if ((flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
+
+ if (flags & SRFL_CCODE) {
+ type = BRCMS_SROM_STRING;
+ } else if (flags & SRFL_LEDDC) {
+ /* LED Powersave duty cycle has to be scaled:
+ *(oncount >> 24) (offcount >> 8)
+ */
+ u32 w32 = /* oncount */
+ (((val >> 8) & 0xff) << 24) |
+ /* offcount */
+ (((val & 0xff)) << 8);
+ type = BRCMS_SROM_UNUMBER;
+ val = w32;
+ } else if ((flags & SRFL_PRSIGN)
+ && (val & (1 << (width - 1)))) {
+ type = BRCMS_SROM_SNUMBER;
+ val |= ~0 << width;
+ } else
+ type = BRCMS_SROM_UNUMBER;
+ }
+
+ entry = kzalloc(sizeof(struct brcms_srom_list_head) +
+ extra_space, GFP_KERNEL);
+ entry->varid = id;
+ entry->var_type = type;
+ if (flags & SRFL_ETHADDR) {
+ snprintf(entry->buf, 18, "%pM", ea);
+ } else if (flags & SRFL_CCODE) {
+ if (val == 0)
+ entry->buf[0] = '\0';
+ else
+ snprintf(entry->buf, 3, "%c%c",
+ (val >> 8), (val & 0xff));
+ } else {
+ entry->uval = val;
+ }
+
+ list_add(&entry->var_list, var_list);
+ }
+
+ if (sromrev >= 4) {
+ /* Do per-path variables */
+ uint p, pb, psz;
+
+ if (sromrev >= 8) {
+ pb = SROM8_PATH0;
+ psz = SROM8_PATH1 - SROM8_PATH0;
+ } else {
+ pb = SROM4_PATH0;
+ psz = SROM4_PATH1 - SROM4_PATH0;
+ }
+
+ for (p = 0; p < MAX_PATH_SROM; p++) {
+ for (srv = perpath_pci_sromvars;
+ srv->varid != BRCMS_SROM_NULL; srv++) {
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (srv->flags & SRFL_NOVAR)
+ continue;
+
+ w = srom[pb + srv->off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ /* Cheating: no per-path var is more than
+ * 1 word */
+ if ((srv->flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
+
+ entry =
+ kzalloc(sizeof(struct brcms_srom_list_head),
+ GFP_KERNEL);
+ entry->varid = srv->varid+p;
+ entry->var_type = BRCMS_SROM_UNUMBER;
+ entry->uval = val;
+ list_add(&entry->var_list, var_list);
+ }
+ pb += psz;
+ }
+ }
+}
+
+/*
+ * Read in and validate sprom.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff,
+ u16 *buf, uint nwords, bool check_crc)
+{
+ int err = 0;
+ uint i;
+
+ /* read the sprom */
+ for (i = 0; i < nwords; i++)
+ buf[i] = R_REG(&sprom[wordoff + i]);
+
+ if (check_crc) {
+
+ if (buf[0] == 0xffff)
+ /*
+ * The hardware thinks that an srom that starts with
+ * 0xffff is blank, regardless of the rest of the
+ * content, so declare it bad.
+ */
+ return -ENODATA;
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, nwords * 2);
+ if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2,
+ CRC8_INIT_VALUE) !=
+ CRC8_GOOD_VALUE(brcms_srom_crc8_table))
+ /* DBG only pci always read srom4 first, then srom8/9 */
+ err = -EIO;
+
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, nwords * 2);
+ }
+ return err;
+}
+
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+{
+ u8 *otp;
+ uint sz = OTP_SZ_MAX / 2; /* size in words */
+ int err = 0;
+
+ otp = kzalloc(OTP_SZ_MAX, GFP_ATOMIC);
+ if (otp == NULL)
+ return -ENOMEM;
+
+ err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
+
+ memcpy(buf, otp, bufsz);
+
+ kfree(otp);
+
+ /* Check CRC */
+ if (buf[0] == 0xffff)
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ return -ENODATA;
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, bufsz);
+ if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2,
+ CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
+ err = -EIO;
+
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, bufsz);
+
+ return err;
+}
+
+/*
+ * Initialize nonvolatile variable table from sprom.
+ * Return 0 on success, nonzero on error.
+ */
+static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+{
+ u16 *srom;
+ u16 __iomem *sromwindow;
+ u8 sromrev = 0;
+ u32 sr;
+ int err = 0;
+
+ /*
+ * Apply CRC over SROM content regardless SROM is present or not.
+ */
+ srom = kmalloc(SROM_MAX, GFP_ATOMIC);
+ if (!srom)
+ return -ENOMEM;
+
+ sromwindow = srom_window_address(sih, curmap);
+
+ crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
+ if (ai_is_sprom_available(sih)) {
+ err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
+ true);
+
+ if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
+ (((sih->buscoretype == PCIE_CORE_ID)
+ && (sih->buscorerev >= 6))
+ || ((sih->buscoretype == PCI_CORE_ID)
+ && (sih->buscorerev >= 0xe)))) {
+ /* sromrev >= 4, read more */
+ err = sprom_read_pci(sih, sromwindow, 0, srom,
+ SROM4_WORDS, true);
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ } else if (err == 0) {
+ /* srom is good and is rev < 4 */
+ /* top word of sprom contains version and crc8 */
+ sromrev = srom[SROM_CRCREV] & 0xff;
+ /* bcm4401 sroms misprogrammed */
+ if (sromrev == 0x10)
+ sromrev = 1;
+ }
+ } else {
+ /* Use OTP if SPROM not available */
+ err = otp_read_pci(sih, srom, SROM_MAX);
+ if (err == 0)
+ /* OTP only contain SROM rev8/rev9 for now */
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ }
+
+ if (!err) {
+ struct si_info *sii = (struct si_info *)sih;
+
+ /* Bitmask for the sromrev */
+ sr = 1 << sromrev;
+
+ /*
+ * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8,
+ * 9
+ */
+ if ((sr & 0x33e) == 0) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ INIT_LIST_HEAD(&sii->var_list);
+
+ /* parse SROM into name=value pairs. */
+ _initvars_srom_pci(sromrev, srom, &sii->var_list);
+ }
+
+errout:
+ kfree(srom);
+ return err;
+}
+
+void srom_free_vars(struct si_pub *sih)
+{
+ struct si_info *sii;
+ struct brcms_srom_list_head *entry, *next;
+
+ sii = (struct si_info *)sih;
+ list_for_each_entry_safe(entry, next, &sii->var_list, var_list) {
+ list_del(&entry->var_list);
+ kfree(entry);
+ }
+}
+/*
+ * Initialize local vars from the right source for this platform.
+ * Return 0 on success, nonzero on error.
+ */
+int srom_var_init(struct si_pub *sih, void __iomem *curmap)
+{
+ uint len;
+
+ len = 0;
+
+ if (curmap != NULL)
+ return initvars_srom_pci(sih, curmap);
+
+ return -EINVAL;
+}
+
+/*
+ * Search the name=value vars for a specific one and return its value.
+ * Returns NULL if not found.
+ */
+char *getvar(struct si_pub *sih, enum brcms_srom_id id)
+{
+ struct si_info *sii;
+ struct brcms_srom_list_head *entry;
+
+ sii = (struct si_info *)sih;
+
+ list_for_each_entry(entry, &sii->var_list, var_list)
+ if (entry->varid == id)
+ return &entry->buf[0];
+
+ /* nothing found */
+ return NULL;
+}
+
+/*
+ * Search the vars for a specific one and return its value as
+ * an integer. Returns 0 if not found.-
+ */
+int getintvar(struct si_pub *sih, enum brcms_srom_id id)
+{
+ struct si_info *sii;
+ struct brcms_srom_list_head *entry;
+ unsigned long res;
+
+ sii = (struct si_info *)sih;
+
+ list_for_each_entry(entry, &sii->var_list, var_list)
+ if (entry->varid == id) {
+ if (entry->var_type == BRCMS_SROM_SNUMBER ||
+ entry->var_type == BRCMS_SROM_UNUMBER)
+ return (int)entry->sval;
+ else if (!kstrtoul(&entry->buf[0], 0, &res))
+ return (int)res;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index efc4d1edd86..708c43ff51c 100644
--- a/drivers/staging/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,8 +20,8 @@
#include "types.h"
/* Prototypes */
-extern int srom_var_init(struct si_pub *sih, uint bus, void *curmap,
- char **vars, uint *count);
+extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern void srom_free_vars(struct si_pub *sih);
extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
uint byteoff, uint nbytes, u16 *buf, bool check_crc);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
new file mode 100644
index 00000000000..d8f528eb180
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <net/mac80211.h>
+
+#include "types.h"
+#include "d11.h"
+#include "rate.h"
+#include "phy/phy_hal.h"
+#include "channel.h"
+#include "main.h"
+#include "stf.h"
+
+#define MIN_SPATIAL_EXPANSION 0
+#define MAX_SPATIAL_EXPANSION 1
+
+#define BRCMS_STF_SS_STBC_RX(wlc) (BRCMS_ISNPHY(wlc->band) && \
+ NREV_GT(wlc->band->phyrev, 3) && NREV_LE(wlc->band->phyrev, 6))
+
+#define NSTS_1 1
+#define NSTS_2 2
+#define NSTS_3 3
+#define NSTS_4 4
+
+static const u8 txcore_default[5] = {
+ (0), /* bitmap of the core enabled */
+ (0x01), /* For Nsts = 1, enable core 1 */
+ (0x03), /* For Nsts = 2, enable core 1 & 2 */
+ (0x07), /* For Nsts = 3, enable core 1, 2 & 3 */
+ (0x0f) /* For Nsts = 4, enable all cores */
+};
+
+static void brcms_c_stf_stbc_rx_ht_update(struct brcms_c_info *wlc, int val)
+{
+ /* MIMOPHYs rev3-6 cannot receive STBC with only one rx core active */
+ if (BRCMS_STF_SS_STBC_RX(wlc)) {
+ if ((wlc->stf->rxstreams == 1) && (val != HT_CAP_RX_STBC_NO))
+ return;
+ }
+
+ if (wlc->pub->up) {
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, true);
+ }
+}
+
+/*
+ * every WLC_TEMPSENSE_PERIOD seconds temperature check to decide whether to
+ * turn on/off txchain.
+ */
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc)
+{
+ struct brcms_phy_pub *pi = wlc->band->pi;
+ uint active_chains, txchain;
+
+ /* Check if the chip is too hot. Disable one Tx chain, if it is */
+ /* high 4 bits are for Rx chain, low 4 bits are for Tx chain */
+ active_chains = wlc_phy_stf_chain_active_get(pi);
+ txchain = active_chains & 0xf;
+
+ if (wlc->stf->txchain == wlc->stf->hw_txchain) {
+ if (txchain && (txchain < wlc->stf->hw_txchain))
+ /* turn off 1 tx chain */
+ brcms_c_stf_txchain_set(wlc, txchain, true);
+ } else if (wlc->stf->txchain < wlc->stf->hw_txchain) {
+ if (txchain == wlc->stf->hw_txchain)
+ /* turn back on txchain */
+ brcms_c_stf_txchain_set(wlc, txchain, true);
+ }
+}
+
+void
+brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel,
+ u16 chanspec)
+{
+ struct tx_power power;
+ u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id;
+
+ /* Clear previous settings */
+ *ss_algo_channel = 0;
+
+ if (!wlc->pub->up) {
+ *ss_algo_channel = (u16) -1;
+ return;
+ }
+
+ wlc_phy_txpower_get_current(wlc->band->pi, &power,
+ CHSPEC_CHANNEL(chanspec));
+
+ siso_mcs_id = (CHSPEC_IS40(chanspec)) ?
+ WL_TX_POWER_MCS40_SISO_FIRST : WL_TX_POWER_MCS20_SISO_FIRST;
+ cdd_mcs_id = (CHSPEC_IS40(chanspec)) ?
+ WL_TX_POWER_MCS40_CDD_FIRST : WL_TX_POWER_MCS20_CDD_FIRST;
+ stbc_mcs_id = (CHSPEC_IS40(chanspec)) ?
+ WL_TX_POWER_MCS40_STBC_FIRST : WL_TX_POWER_MCS20_STBC_FIRST;
+
+ /* criteria to choose stf mode */
+
+ /*
+ * the "+3dbm (12 0.25db units)" is to account for the fact that with
+ * CDD, tx occurs on both chains
+ */
+ if (power.target[siso_mcs_id] > (power.target[cdd_mcs_id] + 12))
+ setbit(ss_algo_channel, PHY_TXC1_MODE_SISO);
+ else
+ setbit(ss_algo_channel, PHY_TXC1_MODE_CDD);
+
+ /*
+ * STBC is ORed into to algo channel as STBC requires per-packet SCB
+ * capability check so cannot be default mode of operation. One of
+ * SISO, CDD have to be set
+ */
+ if (power.target[siso_mcs_id] <= (power.target[stbc_mcs_id] + 12))
+ setbit(ss_algo_channel, PHY_TXC1_MODE_STBC);
+}
+
+static bool brcms_c_stf_stbc_tx_set(struct brcms_c_info *wlc, s32 int_val)
+{
+ if ((int_val != AUTO) && (int_val != OFF) && (int_val != ON))
+ return false;
+
+ if ((int_val == ON) && (wlc->stf->txstreams == 1))
+ return false;
+
+ wlc->bandstate[BAND_2G_INDEX]->band_stf_stbc_tx = (s8) int_val;
+ wlc->bandstate[BAND_5G_INDEX]->band_stf_stbc_tx = (s8) int_val;
+
+ return true;
+}
+
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val)
+{
+ if ((int_val != HT_CAP_RX_STBC_NO)
+ && (int_val != HT_CAP_RX_STBC_ONE_STREAM))
+ return false;
+
+ if (BRCMS_STF_SS_STBC_RX(wlc)) {
+ if ((int_val != HT_CAP_RX_STBC_NO)
+ && (wlc->stf->rxstreams == 1))
+ return false;
+ }
+
+ brcms_c_stf_stbc_rx_ht_update(wlc, int_val);
+ return true;
+}
+
+static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
+ u8 core_mask)
+{
+ BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n",
+ wlc->pub->unit, Nsts, core_mask);
+
+ if (hweight8(core_mask) > wlc->stf->txstreams)
+ core_mask = 0;
+
+ if ((hweight8(core_mask) == wlc->stf->txstreams) &&
+ ((core_mask & ~wlc->stf->txchain)
+ || !(core_mask & wlc->stf->txchain)))
+ core_mask = wlc->stf->txchain;
+
+ wlc->stf->txcore[Nsts] = core_mask;
+ /* Nsts = 1..4, txcore index = 1..4 */
+ if (Nsts == 1) {
+ /* Needs to update beacon and ucode generated response
+ * frames when 1 stream core map changed
+ */
+ wlc->stf->phytxant = core_mask << PHY_TXC_ANT_SHIFT;
+ brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
+ if (wlc->clk) {
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
+ brcms_c_enable_mac(wlc);
+ }
+ }
+
+ return 0;
+}
+
+static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val)
+{
+ int i;
+ u8 core_mask = 0;
+
+ BCMMSG(wlc->wiphy, "wl%d: val %x\n", wlc->pub->unit, val);
+
+ wlc->stf->spatial_policy = (s8) val;
+ for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
+ core_mask = (val == MAX_SPATIAL_EXPANSION) ?
+ wlc->stf->txchain : txcore_default[i];
+ brcms_c_stf_txcore_set(wlc, (u8) i, core_mask);
+ }
+ return 0;
+}
+
+/*
+ * Centralized txant update function. call it whenever wlc->stf->txant and/or
+ * wlc->stf->txchain change.
+ *
+ * Antennas are controlled by ucode indirectly, which drives PHY or GPIO to
+ * achieve various tx/rx antenna selection schemes
+ *
+ * legacy phy, bit 6 and bit 7 means antenna 0 and 1 respectively, bit6+bit7
+ * means auto(last rx).
+ * for NREV<3, bit 6 and bit 7 means antenna 0 and 1 respectively, bit6+bit7
+ * means last rx and do tx-antenna selection for SISO transmissions
+ * for NREV=3, bit 6 and bit _8_ means antenna 0 and 1 respectively, bit6+bit7
+ * means last rx and do tx-antenna selection for SISO transmissions
+ * for NREV>=7, bit 6 and bit 7 mean antenna 0 and 1 respectively, nit6+bit7
+ * means both cores active
+*/
+static void _brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
+{
+ s8 txant;
+
+ txant = (s8) wlc->stf->txant;
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
+ if (txant == ANT_TX_FORCE_0) {
+ wlc->stf->phytxant = PHY_TXC_ANT_0;
+ } else if (txant == ANT_TX_FORCE_1) {
+ wlc->stf->phytxant = PHY_TXC_ANT_1;
+
+ if (BRCMS_ISNPHY(wlc->band) &&
+ NREV_GE(wlc->band->phyrev, 3)
+ && NREV_LT(wlc->band->phyrev, 7))
+ wlc->stf->phytxant = PHY_TXC_ANT_2;
+ } else {
+ if (BRCMS_ISLCNPHY(wlc->band) ||
+ BRCMS_ISSSLPNPHY(wlc->band))
+ wlc->stf->phytxant = PHY_TXC_LCNPHY_ANT_LAST;
+ else {
+ /* catch out of sync wlc->stf->txcore */
+ WARN_ON(wlc->stf->txchain <= 0);
+ wlc->stf->phytxant =
+ wlc->stf->txchain << PHY_TXC_ANT_SHIFT;
+ }
+ }
+ } else {
+ if (txant == ANT_TX_FORCE_0)
+ wlc->stf->phytxant = PHY_TXC_OLD_ANT_0;
+ else if (txant == ANT_TX_FORCE_1)
+ wlc->stf->phytxant = PHY_TXC_OLD_ANT_1;
+ else
+ wlc->stf->phytxant = PHY_TXC_OLD_ANT_LAST;
+ }
+
+ brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
+}
+
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
+{
+ u8 txchain = (u8) int_val;
+ u8 txstreams;
+ uint i;
+
+ if (wlc->stf->txchain == txchain)
+ return 0;
+
+ if ((txchain & ~wlc->stf->hw_txchain)
+ || !(txchain & wlc->stf->hw_txchain))
+ return -EINVAL;
+
+ /*
+ * if nrate override is configured to be non-SISO STF mode, reject
+ * reducing txchain to 1
+ */
+ txstreams = (u8) hweight8(txchain);
+ if (txstreams > MAX_STREAMS_SUPPORTED)
+ return -EINVAL;
+
+ wlc->stf->txchain = txchain;
+ wlc->stf->txstreams = txstreams;
+ brcms_c_stf_stbc_tx_set(wlc, wlc->band->band_stf_stbc_tx);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
+ wlc->stf->txant =
+ (wlc->stf->txstreams == 1) ? ANT_TX_FORCE_0 : ANT_TX_DEF;
+ _brcms_c_stf_phy_txant_upd(wlc);
+
+ wlc_phy_stf_chain_set(wlc->band->pi, wlc->stf->txchain,
+ wlc->stf->rxchain);
+
+ for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++)
+ brcms_c_stf_txcore_set(wlc, (u8) i, txcore_default[i]);
+
+ return 0;
+}
+
+/*
+ * update wlc->stf->ss_opmode which represents the operational stf_ss mode
+ * we're using
+ */
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
+{
+ int ret_code = 0;
+ u8 prev_stf_ss;
+ u8 upd_stf_ss;
+
+ prev_stf_ss = wlc->stf->ss_opmode;
+
+ /*
+ * NOTE: opmode can only be SISO or CDD as STBC is decided on a
+ * per-packet basis
+ */
+ if (BRCMS_STBC_CAP_PHY(wlc) &&
+ wlc->stf->ss_algosel_auto
+ && (wlc->stf->ss_algo_channel != (u16) -1)) {
+ upd_stf_ss = (wlc->stf->txstreams == 1 ||
+ isset(&wlc->stf->ss_algo_channel,
+ PHY_TXC1_MODE_SISO)) ?
+ PHY_TXC1_MODE_SISO : PHY_TXC1_MODE_CDD;
+ } else {
+ if (wlc->band != band)
+ return ret_code;
+ upd_stf_ss = (wlc->stf->txstreams == 1) ?
+ PHY_TXC1_MODE_SISO : band->band_stf_ss_mode;
+ }
+ if (prev_stf_ss != upd_stf_ss) {
+ wlc->stf->ss_opmode = upd_stf_ss;
+ brcms_b_band_stf_ss_set(wlc->hw, upd_stf_ss);
+ }
+
+ return ret_code;
+}
+
+int brcms_c_stf_attach(struct brcms_c_info *wlc)
+{
+ wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_SISO;
+ wlc->bandstate[BAND_5G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_CDD;
+
+ if (BRCMS_ISNPHY(wlc->band) &&
+ (wlc_phy_txpower_hw_ctrl_get(wlc->band->pi) != PHY_TPC_HW_ON))
+ wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode =
+ PHY_TXC1_MODE_CDD;
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
+
+ brcms_c_stf_stbc_rx_ht_update(wlc, HT_CAP_RX_STBC_NO);
+ wlc->bandstate[BAND_2G_INDEX]->band_stf_stbc_tx = OFF;
+ wlc->bandstate[BAND_5G_INDEX]->band_stf_stbc_tx = OFF;
+
+ if (BRCMS_STBC_CAP_PHY(wlc)) {
+ wlc->stf->ss_algosel_auto = true;
+ /* Init the default value */
+ wlc->stf->ss_algo_channel = (u16) -1;
+ }
+ return 0;
+}
+
+void brcms_c_stf_detach(struct brcms_c_info *wlc)
+{
+}
+
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
+{
+ _brcms_c_stf_phy_txant_upd(wlc);
+}
+
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc)
+{
+ /* get available rx/tx chains */
+ wlc->stf->hw_txchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_TXCHAIN);
+ wlc->stf->hw_rxchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_RXCHAIN);
+
+ /* these parameter are intended to be used for all PHY types */
+ if (wlc->stf->hw_txchain == 0 || wlc->stf->hw_txchain == 0xf) {
+ if (BRCMS_ISNPHY(wlc->band))
+ wlc->stf->hw_txchain = TXCHAIN_DEF_NPHY;
+ else
+ wlc->stf->hw_txchain = TXCHAIN_DEF;
+ }
+
+ wlc->stf->txchain = wlc->stf->hw_txchain;
+ wlc->stf->txstreams = (u8) hweight8(wlc->stf->hw_txchain);
+
+ if (wlc->stf->hw_rxchain == 0 || wlc->stf->hw_rxchain == 0xf) {
+ if (BRCMS_ISNPHY(wlc->band))
+ wlc->stf->hw_rxchain = RXCHAIN_DEF_NPHY;
+ else
+ wlc->stf->hw_rxchain = RXCHAIN_DEF;
+ }
+
+ wlc->stf->rxchain = wlc->stf->hw_rxchain;
+ wlc->stf->rxstreams = (u8) hweight8(wlc->stf->hw_rxchain);
+
+ /* initialize the txcore table */
+ memcpy(wlc->stf->txcore, txcore_default, sizeof(wlc->stf->txcore));
+
+ /* default spatial_policy */
+ wlc->stf->spatial_policy = MIN_SPATIAL_EXPANSION;
+ brcms_c_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
+}
+
+static u16 _brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
+ u32 rspec)
+{
+ u16 phytxant = wlc->stf->phytxant;
+
+ if (rspec_stf(rspec) != PHY_TXC1_MODE_SISO)
+ phytxant = wlc->stf->txchain << PHY_TXC_ANT_SHIFT;
+ else if (wlc->stf->txant == ANT_TX_DEF)
+ phytxant = wlc->stf->txchain << PHY_TXC_ANT_SHIFT;
+ phytxant &= PHY_TXC_ANT_MASK;
+ return phytxant;
+}
+
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec)
+{
+ return _brcms_c_stf_phytxchain_sel(wlc, rspec);
+}
+
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec)
+{
+ u16 phytxant = wlc->stf->phytxant;
+ u16 mask = PHY_TXC_ANT_MASK;
+
+ /* for non-siso rates or default setting, use the available chains */
+ if (BRCMS_ISNPHY(wlc->band)) {
+ phytxant = _brcms_c_stf_phytxchain_sel(wlc, rspec);
+ mask = PHY_TXC_HTANT_MASK;
+ }
+ phytxant |= phytxant & mask;
+ return phytxant;
+}
diff --git a/drivers/staging/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
index 06c2a399649..19f6580f69b 100644
--- a/drivers/staging/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
@@ -25,7 +25,7 @@ extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
u16 *ss_algo_channel,
- chanspec_t chanspec);
+ u16 chanspec);
extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
struct brcms_band *band);
extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
@@ -35,8 +35,8 @@ extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
- ratespec_t rspec);
+ u32 rspec);
extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
- ratespec_t rspec);
+ u32 rspec);
#endif /* _BRCM_STF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
new file mode 100644
index 00000000000..27a814b0746
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_TYPES_H_
+#define _BRCM_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+/* boardflags */
+
+/* Board has gpio 9 controlling the PA */
+#define BFL_PACTRL 0x00000002
+/* Not ok to power down the chip pll and oscillator */
+#define BFL_NOPLLDOWN 0x00000020
+/* Board supports the Front End Module */
+#define BFL_FEM 0x00000800
+/* Board has an external LNA in 2.4GHz band */
+#define BFL_EXTLNA 0x00001000
+/* Board has no PA */
+#define BFL_NOPA 0x00010000
+/* Power topology uses BUCKBOOST */
+#define BFL_BUCKBOOST 0x00200000
+/* Board has FEM and switch to share antenna w/ BT */
+#define BFL_FEM_BT 0x00400000
+/* Power topology doesn't use CBUCK */
+#define BFL_NOCBUCK 0x00800000
+/* Power topology uses PALDO */
+#define BFL_PALDO 0x02000000
+/* Board has an external LNA in 5GHz band */
+#define BFL_EXTLNA_5GHz 0x10000000
+
+/* boardflags2 */
+
+/* Board has an external rxbb regulator */
+#define BFL2_RXBB_INT_REG_DIS 0x00000001
+/* Flag to implement alternative A-band PLL settings */
+#define BFL2_APLL_WAR 0x00000002
+/* Board permits enabling TX Power Control */
+#define BFL2_TXPWRCTRL_EN 0x00000004
+/* Board supports the 2X4 diversity switch */
+#define BFL2_2X4_DIV 0x00000008
+/* Board supports 5G band power gain */
+#define BFL2_5G_PWRGAIN 0x00000010
+/* Board overrides ASPM and Clkreq settings */
+#define BFL2_PCIEWAR_OVR 0x00000020
+#define BFL2_LEGACY 0x00000080
+/* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SKWRKFEM_BRD 0x00000100
+/* Board has a WAR for clock-harmonic spurs */
+#define BFL2_SPUR_WAR 0x00000200
+/* Flag to narrow G-band PLL loop b/w */
+#define BFL2_GPLL_WAR 0x00000400
+/* Tx CCK pkts on Ant 0 only */
+#define BFL2_SINGLEANT_CCK 0x00001000
+/* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_2G_SPUR_WAR 0x00002000
+/* Flag to widen G-band PLL loop b/w */
+#define BFL2_GPLL_WAR2 0x00010000
+#define BFL2_IPALVLSHIFT_3P3 0x00020000
+/* Use internal envelope detector for TX IQCAL */
+#define BFL2_INTERNDET_TXIQCAL 0x00040000
+/* Keep the buffered Xtal output from radio "ON". Most drivers will turn it
+ * off without this flag to save power. */
+#define BFL2_XTALBUFOUTEN 0x00080000
+
+/*
+ * board specific GPIO assignment, gpio 0-3 are also customer-configurable
+ * led
+ */
+
+/* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_PACTRL 0x200
+#define BOARD_GPIO_12 0x1000
+#define BOARD_GPIO_13 0x2000
+
+/* **** Core type/rev defaults **** */
+#define D11CONF 0x0fffffb0 /* Supported D11 revs: 4, 5, 7-27
+ * also need to update wlc.h MAXCOREREV
+ */
+
+#define NCONF 0x000001ff /* Supported nphy revs:
+ * 0 4321a0
+ * 1 4321a1
+ * 2 4321b0/b1/c0/c1
+ * 3 4322a0
+ * 4 4322a1
+ * 5 4716a0
+ * 6 43222a0, 43224a0
+ * 7 43226a0
+ * 8 5357a0, 43236a0
+ */
+
+#define LCNCONF 0x00000007 /* Supported lcnphy revs:
+ * 0 4313a0, 4336a0, 4330a0
+ * 1
+ * 2 4330a0
+ */
+
+#define SSLPNCONF 0x0000000f /* Supported sslpnphy revs:
+ * 0 4329a0/k0
+ * 1 4329b0/4329C0
+ * 2 4319a0
+ * 3 5356a0
+ */
+
+/********************************************************************
+ * Phy/Core Configuration. Defines macros to to check core phy/rev *
+ * compile-time configuration. Defines default core support. *
+ * ******************************************************************
+ */
+
+/* Basic macros to check a configuration bitmask */
+
+#define CONF_HAS(config, val) ((config) & (1 << (val)))
+#define CONF_MSK(config, mask) ((config) & (mask))
+#define MSK_RANGE(low, hi) ((1 << ((hi)+1)) - (1 << (low)))
+#define CONF_RANGE(config, low, hi) (CONF_MSK(config, MSK_RANGE(low, high)))
+
+#define CONF_IS(config, val) ((config) == (1 << (val)))
+#define CONF_GE(config, val) ((config) & (0-(1 << (val))))
+#define CONF_GT(config, val) ((config) & (0-2*(1 << (val))))
+#define CONF_LT(config, val) ((config) & ((1 << (val))-1))
+#define CONF_LE(config, val) ((config) & (2*(1 << (val))-1))
+
+/* Wrappers for some of the above, specific to config constants */
+
+#define NCONF_HAS(val) CONF_HAS(NCONF, val)
+#define NCONF_MSK(mask) CONF_MSK(NCONF, mask)
+#define NCONF_IS(val) CONF_IS(NCONF, val)
+#define NCONF_GE(val) CONF_GE(NCONF, val)
+#define NCONF_GT(val) CONF_GT(NCONF, val)
+#define NCONF_LT(val) CONF_LT(NCONF, val)
+#define NCONF_LE(val) CONF_LE(NCONF, val)
+
+#define LCNCONF_HAS(val) CONF_HAS(LCNCONF, val)
+#define LCNCONF_MSK(mask) CONF_MSK(LCNCONF, mask)
+#define LCNCONF_IS(val) CONF_IS(LCNCONF, val)
+#define LCNCONF_GE(val) CONF_GE(LCNCONF, val)
+#define LCNCONF_GT(val) CONF_GT(LCNCONF, val)
+#define LCNCONF_LT(val) CONF_LT(LCNCONF, val)
+#define LCNCONF_LE(val) CONF_LE(LCNCONF, val)
+
+#define D11CONF_HAS(val) CONF_HAS(D11CONF, val)
+#define D11CONF_MSK(mask) CONF_MSK(D11CONF, mask)
+#define D11CONF_IS(val) CONF_IS(D11CONF, val)
+#define D11CONF_GE(val) CONF_GE(D11CONF, val)
+#define D11CONF_GT(val) CONF_GT(D11CONF, val)
+#define D11CONF_LT(val) CONF_LT(D11CONF, val)
+#define D11CONF_LE(val) CONF_LE(D11CONF, val)
+
+#define PHYCONF_HAS(val) CONF_HAS(PHYTYPE, val)
+#define PHYCONF_IS(val) CONF_IS(PHYTYPE, val)
+
+#define NREV_IS(var, val) \
+ (NCONF_HAS(val) && (NCONF_IS(val) || ((var) == (val))))
+
+#define NREV_GE(var, val) \
+ (NCONF_GE(val) && (!NCONF_LT(val) || ((var) >= (val))))
+
+#define NREV_GT(var, val) \
+ (NCONF_GT(val) && (!NCONF_LE(val) || ((var) > (val))))
+
+#define NREV_LT(var, val) \
+ (NCONF_LT(val) && (!NCONF_GE(val) || ((var) < (val))))
+
+#define NREV_LE(var, val) \
+ (NCONF_LE(val) && (!NCONF_GT(val) || ((var) <= (val))))
+
+#define LCNREV_IS(var, val) \
+ (LCNCONF_HAS(val) && (LCNCONF_IS(val) || ((var) == (val))))
+
+#define LCNREV_GE(var, val) \
+ (LCNCONF_GE(val) && (!LCNCONF_LT(val) || ((var) >= (val))))
+
+#define LCNREV_GT(var, val) \
+ (LCNCONF_GT(val) && (!LCNCONF_LE(val) || ((var) > (val))))
+
+#define LCNREV_LT(var, val) \
+ (LCNCONF_LT(val) && (!LCNCONF_GE(val) || ((var) < (val))))
+
+#define LCNREV_LE(var, val) \
+ (LCNCONF_LE(val) && (!LCNCONF_GT(val) || ((var) <= (val))))
+
+#define D11REV_IS(var, val) \
+ (D11CONF_HAS(val) && (D11CONF_IS(val) || ((var) == (val))))
+
+#define D11REV_GE(var, val) \
+ (D11CONF_GE(val) && (!D11CONF_LT(val) || ((var) >= (val))))
+
+#define D11REV_GT(var, val) \
+ (D11CONF_GT(val) && (!D11CONF_LE(val) || ((var) > (val))))
+
+#define D11REV_LT(var, val) \
+ (D11CONF_LT(val) && (!D11CONF_GE(val) || ((var) < (val))))
+
+#define D11REV_LE(var, val) \
+ (D11CONF_LE(val) && (!D11CONF_GT(val) || ((var) <= (val))))
+
+#define PHYTYPE_IS(var, val)\
+ (PHYCONF_HAS(val) && (PHYCONF_IS(val) || ((var) == (val))))
+
+/* Set up PHYTYPE automatically: (depends on PHY_TYPE_X, from d11.h) */
+
+#define _PHYCONF_N (1 << PHY_TYPE_N)
+#define _PHYCONF_LCN (1 << PHY_TYPE_LCN)
+#define _PHYCONF_SSLPN (1 << PHY_TYPE_SSN)
+
+#define PHYTYPE (_PHYCONF_N | _PHYCONF_LCN | _PHYCONF_SSLPN)
+
+/* Utility macro to identify 802.11n (HT) capable PHYs */
+#define PHYTYPE_11N_CAP(phytype) \
+ (PHYTYPE_IS(phytype, PHY_TYPE_N) || \
+ PHYTYPE_IS(phytype, PHY_TYPE_LCN) || \
+ PHYTYPE_IS(phytype, PHY_TYPE_SSN))
+
+/* Last but not least: shorter wlc-specific var checks */
+#define BRCMS_ISNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_N)
+#define BRCMS_ISLCNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_LCN)
+#define BRCMS_ISSSLPNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_SSN)
+
+#define BRCMS_PHY_11N_CAP(band) PHYTYPE_11N_CAP((band)->phytype)
+
+/**********************************************************************
+ * ------------- End of Core phy/rev configuration. ----------------- *
+ * ********************************************************************
+ */
+
+#define BCMMSG(dev, fmt, args...) \
+do { \
+ if (brcm_msg_level & LOG_TRACE_VAL) \
+ wiphy_err(dev, "%s: " fmt, __func__, ##args); \
+} while (0)
+
+/*
+ * Register access macros.
+ *
+ * These macro's take a pointer to the address to read as one of their
+ * arguments. The macro itself deduces the size of the IO transaction (u8, u16
+ * or u32). Advantage of this approach in combination with using a struct to
+ * define the registers in a register block, is that access size and access
+ * location are defined in only one spot. This reduces the risk of the
+ * programmer trying to use an unsupported transaction size on a register.
+ *
+ */
+
+#define R_REG(r) \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ __osl_v = readb((u8 __iomem *)(r)); \
+ break; \
+ case sizeof(u16): \
+ __osl_v = readw((u16 __iomem *)(r)); \
+ break; \
+ case sizeof(u32): \
+ __osl_v = readl((u32 __iomem *)(r)); \
+ break; \
+ } \
+ __osl_v; \
+ })
+
+#define W_REG(r, v) do { \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
+ break; \
+ case sizeof(u16): \
+ writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
+ break; \
+ case sizeof(u32): \
+ writel((u32)(v), (u32 __iomem *)(r)); \
+ break; \
+ } \
+ } while (0)
+
+#ifdef CONFIG_BCM47XX
+/*
+ * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
+ * transactions. As a fix, a read after write is performed on certain places
+ * in the code. Older chips and the newer 5357 family don't require this fix.
+ */
+#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
+#else
+#define W_REG_FLUSH(r, v) W_REG((r), (v))
+#endif /* CONFIG_BCM47XX */
+
+#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
+#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
+
+#define SET_REG(r, mask, val) \
+ W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+
+/* set one bool */
+#define mboolset(mb, bit) ((mb) |= (bit))
+/* clear one bool */
+#define mboolclr(mb, bit) ((mb) &= ~(bit))
+/* true if one bool is set */
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0)
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
+
+/* forward declarations */
+struct wiphy;
+struct ieee80211_sta;
+struct ieee80211_tx_queue_params;
+struct brcms_info;
+struct brcms_c_info;
+struct brcms_hardware;
+struct brcms_txq_info;
+struct brcms_band;
+struct dma_pub;
+struct si_pub;
+struct tx_status;
+struct d11rxhdr;
+struct txpwr_limits;
+
+/* iovar structure */
+struct brcmu_iovar {
+ const char *name; /* name for lookup and display */
+ u16 varid; /* id for switch */
+ u16 flags; /* driver-specific flag bits */
+ u16 type; /* base type of argument */
+ u16 minlen; /* min length for buffer vars */
+};
+
+/* brcm_msg_level is a bit vector with defs in defs.h */
+extern u32 brcm_msg_level;
+
+#endif /* _BRCM_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c
new file mode 100644
index 00000000000..80e3ccf865e
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <defs.h>
+#include "types.h"
+#include <ucode_loader.h>
+
+enum {
+ D11UCODE_NAMETAG_START = 0,
+ D11LCN0BSINITVALS24,
+ D11LCN0INITVALS24,
+ D11LCN1BSINITVALS24,
+ D11LCN1INITVALS24,
+ D11LCN2BSINITVALS24,
+ D11LCN2INITVALS24,
+ D11N0ABSINITVALS16,
+ D11N0BSINITVALS16,
+ D11N0INITVALS16,
+ D11UCODE_OVERSIGHT16_MIMO,
+ D11UCODE_OVERSIGHT16_MIMOSZ,
+ D11UCODE_OVERSIGHT24_LCN,
+ D11UCODE_OVERSIGHT24_LCNSZ,
+ D11UCODE_OVERSIGHT_BOMMAJOR,
+ D11UCODE_OVERSIGHT_BOMMINOR
+};
+
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode)
+{
+ int rc;
+
+ rc = brcms_check_firmwares(wl);
+
+ rc = rc < 0 ? rc :
+ brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn0bsinitvals24,
+ D11LCN0BSINITVALS24);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn0initvals24,
+ D11LCN0INITVALS24);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn1bsinitvals24,
+ D11LCN1BSINITVALS24);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn1initvals24,
+ D11LCN1INITVALS24);
+ rc = rc < 0 ? rc :
+ brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn2bsinitvals24,
+ D11LCN2BSINITVALS24);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn2initvals24,
+ D11LCN2INITVALS24);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0absinitvals16,
+ D11N0ABSINITVALS16);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0bsinitvals16,
+ D11N0BSINITVALS16);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0initvals16,
+ D11N0INITVALS16);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_16_mimo,
+ D11UCODE_OVERSIGHT16_MIMO);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_uint(wl, &ucode->bcm43xx_16_mimosz,
+ D11UCODE_OVERSIGHT16_MIMOSZ);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_24_lcn,
+ D11UCODE_OVERSIGHT24_LCN);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_uint(wl, &ucode->bcm43xx_24_lcnsz,
+ D11UCODE_OVERSIGHT24_LCNSZ);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_bommajor,
+ D11UCODE_OVERSIGHT_BOMMAJOR);
+ rc = rc < 0 ?
+ rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_bomminor,
+ D11UCODE_OVERSIGHT_BOMMINOR);
+ return rc;
+}
+
+void brcms_ucode_data_free(struct brcms_ucode *ucode)
+{
+ brcms_ucode_free_buf((void *)ucode->d11lcn0bsinitvals24);
+ brcms_ucode_free_buf((void *)ucode->d11lcn0initvals24);
+ brcms_ucode_free_buf((void *)ucode->d11lcn1bsinitvals24);
+ brcms_ucode_free_buf((void *)ucode->d11lcn1initvals24);
+ brcms_ucode_free_buf((void *)ucode->d11lcn2bsinitvals24);
+ brcms_ucode_free_buf((void *)ucode->d11lcn2initvals24);
+ brcms_ucode_free_buf((void *)ucode->d11n0absinitvals16);
+ brcms_ucode_free_buf((void *)ucode->d11n0bsinitvals16);
+ brcms_ucode_free_buf((void *)ucode->d11n0initvals16);
+ brcms_ucode_free_buf((void *)ucode->bcm43xx_16_mimo);
+ brcms_ucode_free_buf((void *)ucode->bcm43xx_24_lcn);
+ brcms_ucode_free_buf((void *)ucode->bcm43xx_bommajor);
+ brcms_ucode_free_buf((void *)ucode->bcm43xx_bomminor);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
new file mode 100644
index 00000000000..18750a814b4
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _BRCM_UCODE_H_
+#define _BRCM_UCODE_H_
+
+#include "types.h" /* forward structure declarations */
+
+#define MIN_FW_SIZE 40000 /* minimum firmware file size in bytes */
+#define MAX_FW_SIZE 150000
+
+#define UCODE_LOADER_API_VER 0
+
+struct d11init;
+
+struct brcms_ucode {
+ struct d11init *d11lcn0bsinitvals24;
+ struct d11init *d11lcn0initvals24;
+ struct d11init *d11lcn1bsinitvals24;
+ struct d11init *d11lcn1initvals24;
+ struct d11init *d11lcn2bsinitvals24;
+ struct d11init *d11lcn2initvals24;
+ struct d11init *d11n0absinitvals16;
+ struct d11init *d11n0bsinitvals16;
+ struct d11init *d11n0initvals16;
+ __le32 *bcm43xx_16_mimo;
+ size_t bcm43xx_16_mimosz;
+ __le32 *bcm43xx_24_lcn;
+ size_t bcm43xx_24_lcnsz;
+ u32 *bcm43xx_bommajor;
+ u32 *bcm43xx_bomminor;
+};
+
+extern int
+brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+
+extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+
+extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
+ unsigned int idx);
+extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+ unsigned int idx);
+extern void brcms_ucode_free_buf(void *);
+extern int brcms_check_firmwares(struct brcms_info *wl);
+
+#endif /* _BRCM_UCODE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/Makefile b/drivers/net/wireless/brcm80211/brcmutil/Makefile
new file mode 100644
index 00000000000..6281c416289
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmutil/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile fragment for Broadcom 802.11n Networking Device Driver Utilities
+#
+# Copyright (c) 2011 Broadcom Corporation
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ccflags-y := \
+ -Idrivers/net/wireless/brcm80211/brcmutil \
+ -Idrivers/net/wireless/brcm80211/include
+
+BRCMUTIL_OFILES := \
+ utils.o
+
+MODULEPFX := brcmutil
+
+obj-$(CONFIG_BRCMUTIL) += $(MODULEPFX).o
+$(MODULEPFX)-objs = $(BRCMUTIL_OFILES)
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
new file mode 100644
index 00000000000..f27c4891082
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <brcmu_utils.h>
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(len);
+ if (skb) {
+ skb_put(skb, len);
+ skb->priority = 0;
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
+
+/* Free the driver packet. Free the tag if present */
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int nest = 0;
+
+ /* perversion: we use skb->next to chain multi-skb packets */
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+ if (skb->destructor)
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ else
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
+
+ nest++;
+ skb = nskb;
+ }
+}
+EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
+
+
+/* copy a buffer into a pkt buffer chain */
+uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
+ unsigned char *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = p->next) {
+ if (offset < (uint) (p->len))
+ break;
+ offset -= p->len;
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = p->next) {
+ n = min((uint) (p->len) - offset, (uint) len);
+ memcpy(p->data + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(brcmu_pktfrombuf);
+
+/* return total length of buffer chain */
+uint brcmu_pkttotlen(struct sk_buff *p)
+{
+ uint total;
+
+ total = 0;
+ for (; p; p = p->next)
+ total += p->len;
+ return total;
+}
+EXPORT_SYMBOL(brcmu_pkttotlen);
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
+ struct sk_buff *p)
+{
+ struct pktq_prec *q;
+
+ if (pktq_full(pq) || pktq_pfull(pq, prec))
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ q->tail->prev = p;
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (u8) prec;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_penq);
+
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p)
+{
+ struct pktq_prec *q;
+
+ if (pktq_full(pq) || pktq_pfull(pq, prec))
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ p->prev = q->head;
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (u8) prec;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_penq_head);
+
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p;
+
+ q = &pq->q[prec];
+
+ p = q->head;
+ if (p == NULL)
+ return NULL;
+
+ q->head = p->prev;
+ if (q->head == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ p->prev = NULL;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_pdeq);
+
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p, *prev;
+
+ q = &pq->q[prec];
+
+ p = q->head;
+ if (p == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = p->prev)
+ prev = p;
+
+ if (prev)
+ prev->prev = NULL;
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
+
+void
+brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn) (p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = p->prev;
+ else
+ prev->prev = p->prev;
+ p->prev = NULL;
+ brcmu_pkt_buf_free_skb(p);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : prev->prev);
+ } else {
+ prev = p;
+ p = p->prev;
+ }
+ }
+
+ if (q->head == NULL)
+ q->tail = NULL;
+}
+EXPORT_SYMBOL(brcmu_pktq_pflush);
+
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg)
+{
+ int prec;
+ for (prec = 0; prec < pq->num_prec; prec++)
+ brcmu_pktq_pflush(pq, prec, dir, fn, arg);
+}
+EXPORT_SYMBOL(brcmu_pktq_flush);
+
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ /* pq is variable size; only zero out what's requested */
+ memset(pq, 0,
+ offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (u16) num_prec;
+
+ pq->max = (u16) max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+EXPORT_SYMBOL(brcmu_pktq_init);
+
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return pq->q[prec].tail;
+}
+EXPORT_SYMBOL(brcmu_pktq_peek_tail);
+
+/* Return sum of lengths of a specific set of precedences */
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+EXPORT_SYMBOL(brcmu_pktq_mlen);
+
+/* Priority dequeue from a specific set of precedences */
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
+ int *prec_out)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ p = q->head;
+ if (p == NULL)
+ return NULL;
+
+ q->head = p->prev;
+ if (q->head == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ p->prev = NULL;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_mdeq);
+
+#if defined(BCMDBG)
+/* pretty hex print a pkt buffer chain */
+void brcmu_prpkt(const char *msg, struct sk_buff *p0)
+{
+ struct sk_buff *p;
+
+ if (msg && (msg[0] != '\0'))
+ printk(KERN_DEBUG "%s:\n", msg);
+
+ for (p = p0; p; p = p->next)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len);
+}
+EXPORT_SYMBOL(brcmu_prpkt);
+#endif /* defined(BCMDBG) */
+
+#if defined(BCMDBG)
+/*
+ * print bytes formatted as hex to a string. return the resulting
+ * string length
+ */
+int brcmu_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const u8 *src = (const u8 *)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+EXPORT_SYMBOL(brcmu_format_hex);
+#endif /* defined(BCMDBG) */
diff --git a/drivers/staging/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 5fb17d53c9b..5fb17d53c9b 100644
--- a/drivers/staging/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
new file mode 100644
index 00000000000..7d0f46e0eb9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMU_UTILS_H_
+#define _BRCMU_UTILS_H_
+
+#include <linux/skbuff.h>
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) {\
+ udelay(10); \
+ countdown -= 10; \
+ } \
+}
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
+#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
+
+#define BCME_STRLEN 64 /* Max string length for BCM errors */
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define PKTBUFSZ 2048
+
+#ifndef setbit
+#ifndef NBBY /* the BSD family defines NBBY */
+#define NBBY 8 /* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#define setbit(a, i) (((u8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define clrbit(a, i) (((u8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define isset(a, i) (((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define isclr(a, i) ((((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif /* setbit */
+
+#define NBITS(type) (sizeof(type) * 8)
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+/* crc defines */
+#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */
+
+/* 18-bytes of Ethernet address buffer length */
+#define ETHER_ADDR_STR_LEN 18
+
+struct pktq_prec {
+ struct sk_buff *head; /* first packet to dequeue */
+ struct sk_buff *tail; /* last packet to dequeue */
+ u16 len; /* number of queued packets */
+ u16 max; /* maximum number of queued packets */
+};
+
+/* multi-priority pkt queue */
+struct pktq {
+ u16 num_prec; /* number of precedences in use */
+ u16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */
+ u16 max; /* total max packets */
+ u16 len; /* total number of packets */
+ /*
+ * q array must be last since # of elements can be either
+ * PKTQ_MAX_PREC or 1
+ */
+ struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+/* operations on a specific precedence in packet queue */
+
+static inline int pktq_plen(struct pktq *pq, int prec)
+{
+ return pq->q[prec].len;
+}
+
+static inline int pktq_pavail(struct pktq *pq, int prec)
+{
+ return pq->q[prec].max - pq->q[prec].len;
+}
+
+static inline bool pktq_pfull(struct pktq *pq, int prec)
+{
+ return pq->q[prec].len >= pq->q[prec].max;
+}
+
+static inline bool pktq_pempty(struct pktq *pq, int prec)
+{
+ return pq->q[prec].len == 0;
+}
+
+static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
+{
+ return pq->q[prec].head;
+}
+
+static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
+{
+ return pq->q[prec].tail;
+}
+
+extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
+ struct sk_buff *p);
+extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p);
+extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+
+/* packet primitives */
+extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+
+/* Empty the queue at particular precedence level */
+/* callback function fn(pkt, arg) returns true if pkt belongs to if */
+extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
+ bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+
+/* operations on a set of precedences in packet queue */
+
+extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
+ int *prec_out);
+
+/* operations on packet queue as a whole */
+
+static inline int pktq_len(struct pktq *pq)
+{
+ return (int)pq->len;
+}
+
+static inline int pktq_max(struct pktq *pq)
+{
+ return (int)pq->max;
+}
+
+static inline int pktq_avail(struct pktq *pq)
+{
+ return (int)(pq->max - pq->len);
+}
+
+static inline bool pktq_full(struct pktq *pq)
+{
+ return pq->len >= pq->max;
+}
+
+static inline bool pktq_empty(struct pktq *pq)
+{
+ return pq->len == 0;
+}
+
+extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+/* prec_out may be NULL if caller is not interested in return value */
+extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
+
+/* externs */
+/* packet */
+extern uint brcmu_pktfrombuf(struct sk_buff *p,
+ uint offset, int len, unsigned char *buf);
+extern uint brcmu_pkttotlen(struct sk_buff *p);
+
+/* ip address */
+struct ipv4_addr;
+
+#ifdef BCMDBG
+extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+#else
+#define brcmu_prpkt(a, b)
+#endif /* BCMDBG */
+
+/* externs */
+/* format/print */
+#if defined(BCMDBG)
+extern int brcmu_format_hex(char *str, const void *bytes, int len);
+#endif
+
+#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
new file mode 100644
index 00000000000..f10d30274c2
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMU_WIFI_H_
+#define _BRCMU_WIFI_H_
+
+#include <linux/if_ether.h> /* for ETH_ALEN */
+#include <linux/ieee80211.h> /* for WLAN_PMKID_LEN */
+
+/*
+ * A chanspec (u16) holds the channel number, band, bandwidth and control
+ * sideband
+ */
+
+/* channel defines */
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
+#define CH_20MHZ_APART 4
+#define CH_10MHZ_APART 2
+#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+#define BRCM_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL /* legacy define */
+
+/* bandstate array indices */
+#define BAND_2G_INDEX 0 /* wlc->bandstate[x] index */
+#define BAND_5G_INDEX 1 /* wlc->bandstate[x] index */
+
+/*
+ * max # supported channels. The max channel no is 216, this is that + 1
+ * rounded up to a multiple of NBBY (8). DO NOT MAKE it > 255: channels are
+ * u8's all over
+*/
+#define MAXCHANNEL 224
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_CHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_CHANSPEC_BW_MASK 0x0C00
+#define WL_CHANSPEC_BW_SHIFT 10
+#define WL_CHANSPEC_BW_10 0x0400
+#define WL_CHANSPEC_BW_20 0x0800
+#define WL_CHANSPEC_BW_40 0x0C00
+
+#define WL_CHANSPEC_BAND_MASK 0xf000
+#define WL_CHANSPEC_BAND_SHIFT 12
+#define WL_CHANSPEC_BAND_5G 0x1000
+#define WL_CHANSPEC_BAND_2G 0x2000
+#define INVCHANSPEC 255
+
+/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
+#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
+#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
+#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
+
+#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+
+#define CHSPEC_IS10(chspec) \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+
+#define CHSPEC_IS20(chspec) \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#define CHSPEC_IS5G(chspec) \
+ (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+
+#define CHSPEC_IS2G(chspec) \
+ (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+
+#define CHSPEC_SB_NONE(chspec) \
+ (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+
+#define CHSPEC_SB_UPPER(chspec) \
+ (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+
+#define CHSPEC_SB_LOWER(chspec) \
+ (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+
+#define CHSPEC_CTL_CHAN(chspec) \
+ ((CHSPEC_SB_LOWER(chspec)) ? \
+ (lower_20_sb(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+ (upper_20_sb(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+
+#define CHSPEC2BAND(chspec) (CHSPEC_IS5G(chspec) ? BRCM_BAND_5G : BRCM_BAND_2G)
+
+#define CHANSPEC_STR_LEN 8
+
+static inline int lower_20_sb(int channel)
+{
+ return channel > CH_10MHZ_APART ? (channel - CH_10MHZ_APART) : 0;
+}
+
+static inline int upper_20_sb(int channel)
+{
+ return (channel < (MAXCHANNEL - CH_10MHZ_APART)) ?
+ channel + CH_10MHZ_APART : 0;
+}
+
+static inline int chspec_bandunit(u16 chspec)
+{
+ return CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX;
+}
+
+static inline u16 ch20mhz_chspec(int channel)
+{
+ u16 rc = channel <= CH_MAX_2G_CHANNEL ?
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G;
+
+ return (u16)((u16)channel | WL_CHANSPEC_BW_20 |
+ WL_CHANSPEC_CTL_SB_NONE | rc);
+}
+
+static inline int next_20mhz_chan(int channel)
+{
+ return channel < (MAXCHANNEL - CH_20MHZ_APART) ?
+ channel + CH_20MHZ_APART : 0;
+}
+
+/* defined rate in 500kbps */
+#define BRCM_MAXRATE 108 /* in 500kbps units */
+#define BRCM_RATE_1M 2 /* in 500kbps units */
+#define BRCM_RATE_2M 4 /* in 500kbps units */
+#define BRCM_RATE_5M5 11 /* in 500kbps units */
+#define BRCM_RATE_11M 22 /* in 500kbps units */
+#define BRCM_RATE_6M 12 /* in 500kbps units */
+#define BRCM_RATE_9M 18 /* in 500kbps units */
+#define BRCM_RATE_12M 24 /* in 500kbps units */
+#define BRCM_RATE_18M 36 /* in 500kbps units */
+#define BRCM_RATE_24M 48 /* in 500kbps units */
+#define BRCM_RATE_36M 72 /* in 500kbps units */
+#define BRCM_RATE_48M 96 /* in 500kbps units */
+#define BRCM_RATE_54M 108 /* in 500kbps units */
+
+#define BRCM_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
+
+#define MCSSET_LEN 16
+
+static inline bool ac_bitmap_tst(u8 bitmap, int prec)
+{
+ return (bitmap & (1 << (prec))) != 0;
+}
+
+/* Enumerate crypto algorithms */
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_RESERVED1 5
+#define CRYPTO_ALGO_AES_RESERVED2 6
+#define CRYPTO_ALGO_NALG 7
+
+/* wireless security bitvec */
+
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+/* to go into transition mode without setting wep */
+#define SES_OW_ENABLED 0x0040
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+#define WPA_AUTH_RESERVED1 0x0008
+#define WPA_AUTH_RESERVED2 0x0010
+
+#define WPA2_AUTH_RESERVED1 0x0020
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define WPA2_AUTH_RESERVED3 0x0200
+#define WPA2_AUTH_RESERVED4 0x0400
+#define WPA2_AUTH_RESERVED5 0x0800
+
+/* pmkid */
+#define MAXPMKID 16
+
+#define DOT11_DEFAULT_RTS_LEN 2347
+#define DOT11_DEFAULT_FRAG_LEN 2346
+
+#define DOT11_ICV_AES_LEN 8
+#define DOT11_QOS_LEN 2
+#define DOT11_IV_MAX_LEN 8
+#define DOT11_A4_HDR_LEN 30
+
+#define HT_CAP_RX_STBC_NO 0x0
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1
+
+struct pmkid {
+ u8 BSSID[ETH_ALEN];
+ u8 PMKID[WLAN_PMKID_LEN];
+};
+
+struct pmkid_list {
+ __le32 npmkid;
+ struct pmkid pmkid[1];
+};
+
+struct pmkid_cand {
+ u8 BSSID[ETH_ALEN];
+ u8 preauth;
+};
+
+struct pmkid_cand_list {
+ u32 npmkid_cand;
+ struct pmkid_cand pmkid_cand[1];
+};
+
+#endif /* _BRCMU_WIFI_H_ */
diff --git a/drivers/staging/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index 296582aced6..fefabc39e64 100644
--- a/drivers/staging/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,7 +19,7 @@
#include "defs.h" /* for PAD macro */
-typedef volatile struct {
+struct chipcregs {
u32 chipid; /* 0x0 */
u32 capabilities;
u32 corecontrol; /* corerev >= 1 */
@@ -214,7 +214,7 @@ typedef volatile struct {
u32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
u32 PAD[100];
u16 sromotp[768];
-} chipcregs_t;
+};
/* chipid */
#define CID_ID_MASK 0x0000ffff /* Chip Id mask */
@@ -231,7 +231,8 @@ typedef volatile struct {
#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */
#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */
#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */
-#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */
+/* UARTs are driven by internal divided clock */
+#define CC_CAP_UINTCLK 0x00000008
#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */
#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */
#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */
@@ -248,10 +249,12 @@ typedef volatile struct {
#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */
#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */
#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */
-#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */
+/* Nand flash present, rev >= 35 */
+#define CC_CAP_NFLASH 0x80000000
#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */
-#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */
+/* GSIO (spi/i2c) present, rev >= 37 */
+#define CC_CAP2_GSIO 0x00000002
/* pmucapabilities */
#define PCAP_REV_MASK 0x000000ff
diff --git a/drivers/staging/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index 8b3e17dec15..1e5f310af1e 100644
--- a/drivers/staging/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -27,14 +27,8 @@
#define USB_BUS 5
#define SPI_BUS 6
-#ifndef OFF
#define OFF 0
-#endif
-
-#ifndef ON
#define ON 1 /* ON = 1 */
-#endif
-
#define AUTO (-1) /* Auto = -1 */
/*
@@ -54,27 +48,23 @@
#define WL_NUMRATES 16 /* max # of rates in a rateset */
-typedef struct wl_rateset {
- u32 count; /* # rates in this set */
- u8 rates[WL_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
-} wl_rateset_t;
-
#define BRCM_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
-#define BRCM_SET_CHANNEL 30
-#define BRCM_SET_SRL 32
-#define BRCM_SET_LRL 34
+#define BRCM_SET_CHANNEL 30
+#define BRCM_SET_SRL 32
+#define BRCM_SET_LRL 34
+#define BRCM_SET_BCNPRD 76
-#define BRCM_SET_RATESET 72
-#define BRCM_SET_BCNPRD 76
-#define BRCM_GET_CURR_RATESET 114 /* current rateset */
-#define BRCM_GET_PHYLIST 180
+#define BRCM_GET_CURR_RATESET 114 /* current rateset */
+#define BRCM_GET_PHYLIST 180
/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+
#define WL_RADIO_SW_DISABLE (1<<0)
#define WL_RADIO_HW_DISABLE (1<<1)
#define WL_RADIO_MPC_DISABLE (1<<2)
-#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+/* some countries don't support any channel */
+#define WL_RADIO_COUNTRY_DISABLE (1<<3)
/* Override bit for SET_TXPWR. if set, ignore other level limits */
#define WL_TXPWR_OVERRIDE (1U<<31)
@@ -100,7 +90,9 @@ typedef struct wl_rateset {
/*
* Sonics Configuration Space Registers.
*/
-#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */
+
+/* core sbconfig regs are top 256bytes of regs */
+#define SBCONFIGOFF 0xf00
/* cpp contortions to concatenate w/arg prescan */
#ifndef PAD
diff --git a/drivers/staging/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 6e5a705c493..4fcb956ad9e 100644
--- a/drivers/staging/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -17,11 +17,7 @@
#ifndef _BRCM_SOC_H
#define _BRCM_SOC_H
-#ifdef SI_ENUM_BASE_VARIABLE
-#define SI_ENUM_BASE (sii->pub.si_enum_base)
-#else
#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
-#endif /* SI_ENUM_BASE_VARIABLE */
/* core codes */
#define NODEV_CORE_ID 0x700 /* Invalid coreid */
@@ -81,9 +77,8 @@
#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
- * unused address ranges
- */
+/* Default component, in ai chips it maps all unused address ranges */
+#define DEF_AI_COMP 0xfff
/* Common core control flags */
#define SICF_BIST_EN 0x8000
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 89a116fba1d..bfa0d54221e 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -816,7 +816,7 @@ static const struct net_device_ops hostap_netdev_ops = {
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
.ndo_set_mac_address = prism2_set_mac_address,
- .ndo_set_multicast_list = hostap_set_multicast_list,
+ .ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_change_mtu = prism2_change_mtu,
.ndo_tx_timeout = prism2_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@@ -829,7 +829,7 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = {
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
.ndo_set_mac_address = prism2_set_mac_address,
- .ndo_set_multicast_list = hostap_set_multicast_list,
+ .ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_change_mtu = prism2_change_mtu,
.ndo_tx_timeout = prism2_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@@ -842,7 +842,7 @@ static const struct net_device_ops hostap_master_ops = {
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
.ndo_set_mac_address = prism2_set_mac_address,
- .ndo_set_multicast_list = hostap_set_multicast_list,
+ .ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_change_mtu = prism2_change_mtu,
.ndo_tx_timeout = prism2_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index ef9ad79d1bf..127e9c63bea 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -161,7 +161,7 @@ that only one external action is invoked at a time.
#include <linux/firmware.h>
#include <linux/acpi.h>
#include <linux/ctype.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/lib80211.h>
@@ -174,7 +174,7 @@ that only one external action is invoked at a time.
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
-static struct pm_qos_request_list ipw2100_pm_qos_req;
+static struct pm_qos_request ipw2100_pm_qos_req;
/* Debugging stuff */
#ifdef CONFIG_IPW2100_DEBUG
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 4ffebede5e0..99a710dfe77 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -32,6 +32,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <net/cfg80211-wext.h>
#include "ipw2200.h"
@@ -11704,7 +11705,7 @@ static const struct net_device_ops ipw_netdev_ops = {
.ndo_init = ipw_net_init,
.ndo_open = ipw_net_open,
.ndo_stop = ipw_net_stop,
- .ndo_set_multicast_list = ipw_net_set_multicast_list,
+ .ndo_set_rx_mode = ipw_net_set_multicast_list,
.ndo_set_mac_address = ipw_net_set_mac_address,
.ndo_start_xmit = libipw_xmit,
.ndo_change_mtu = libipw_change_mtu,
diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c
index 01c88a71abe..e8c039879b0 100644
--- a/drivers/net/wireless/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_tx.c
@@ -395,7 +395,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
(CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
bytes_per_frag -= LIBIPW_FCS_LEN;
- /* Each fragment may need to have room for encryptiong
+ /* Each fragment may need to have room for encryption
* pre/postfix */
if (host_encrypt)
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
index abd923558d4..7a7f0f38c8a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -32,7 +32,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 164bcae821f..8faeaf2ddde 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -28,7 +28,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 73fe3cdf796..f7c0a743847 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -34,7 +34,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
index 26d324e3069..6862fdcaee6 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -32,7 +32,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
index 9b65153bdd0..57ebe214e68 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
index ac4f64de136..7f12e3638ba 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -335,7 +335,7 @@ int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
sta_priv = (void *)sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index ecdc6e55742..86f4fce193e 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -33,7 +33,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index e5971fe9d16..2bd5659310d 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -1250,7 +1250,8 @@ void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
}
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = hw->priv;
@@ -2220,7 +2221,8 @@ out:
}
EXPORT_SYMBOL(iwl_legacy_mac_config);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
unsigned long flags;
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index 84da79376ef..d1271fe07d4 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -286,7 +286,8 @@ struct iwl_cfg {
***************************/
struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
@@ -620,7 +621,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
/* mac80211 handlers */
int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
index bda0d61b2c0..dc568a474c5 100644
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -33,7 +33,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 66ee15629a7..b282d869a54 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -40,7 +40,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index aa0c2539761..d2fba9eae15 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -40,7 +40,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ad3bdba6bee..57703d5209d 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,5 +1,5 @@
-config IWLAGN
- tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
+config IWLWIFI
+ tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
depends on PCI && MAC80211
select FW_LOADER
select NEW_LEDS
@@ -39,14 +39,14 @@ config IWLAGN
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwlagn.
+ module will be called iwlwifi.
menu "Debugging Options"
- depends on IWLAGN
+ depends on IWLWIFI
config IWLWIFI_DEBUG
- bool "Enable full debugging output in the iwlagn driver"
- depends on IWLAGN
+ bool "Enable full debugging output in the iwlwifi driver"
+ depends on IWLWIFI
---help---
This option will enable debug tracing output for the iwlwifi drivers
@@ -54,13 +54,13 @@ config IWLWIFI_DEBUG
control which debug output is sent to the kernel log by setting the
value in
- /sys/class/net/wlan0/device/debug_level
+ /sys/module/iwlwifi/parameters/debug
This entry will only exist if this option is enabled.
To set a value, simply echo an 8-byte hex value to the same file:
- % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+ % echo 0x43fff > /sys/module/iwlwifi/parameters/debug
You can find the list of debug mask values in:
drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -70,8 +70,8 @@ config IWLWIFI_DEBUG
any problems you may encounter.
config IWLWIFI_DEBUGFS
- bool "iwlagn debugfs support"
- depends on IWLAGN && MAC80211_DEBUGFS
+ bool "iwlwifi debugfs support"
+ depends on IWLWIFI && MAC80211_DEBUGFS
---help---
Enable creation of debugfs files for the iwlwifi drivers. This
is a low-impact option that allows getting insight into the
@@ -79,13 +79,13 @@ config IWLWIFI_DEBUGFS
config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
bool "Experimental uCode support"
- depends on IWLAGN && IWLWIFI_DEBUG
+ depends on IWLWIFI && IWLWIFI_DEBUG
---help---
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
- depends on IWLAGN
+ depends on IWLWIFI
depends on EVENT_TRACING
help
Say Y here to trace all commands, including TX frames and IO
@@ -104,27 +104,10 @@ endmenu
config IWLWIFI_DEVICE_SVTOOL
bool "iwlwifi device svtool support"
- depends on IWLAGN
+ depends on IWLWIFI
select NL80211_TESTMODE
help
This option enables the svtool support for iwlwifi device through
NL80211_TESTMODE. svtool is a software validation tool that runs in
the user space and interacts with the device in the kernel space
through the generic netlink message via NL80211_TESTMODE channel.
-
-config IWL_P2P
- bool "iwlwifi experimental P2P support"
- depends on IWLAGN
- help
- This option enables experimental P2P support for some devices
- based on microcode support. Since P2P support is still under
- development, this option may even enable it for some devices
- now that turn out to not support it in the future due to
- microcode restrictions.
-
- To determine if your microcode supports the experimental P2P
- offered by this option, check if the driver advertises AP
- support when it is loaded.
-
- Say Y only if you want to experiment with P2P.
-
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 48ab9142af3..c73e5ed8db5 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,24 +1,24 @@
-# AGN
-obj-$(CONFIG_IWLAGN) += iwlagn.o
-iwlagn-objs := iwl-agn.o iwl-agn-rs.o
-iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
-iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
-iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
+# WIFI
+obj-$(CONFIG_IWLWIFI) += iwlwifi.o
+iwlwifi-objs := iwl-agn.o iwl-agn-rs.o
+iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
+iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
-iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o
-iwlagn-objs += iwl-rx.o iwl-sta.o
-iwlagn-objs += iwl-scan.o iwl-led.o
-iwlagn-objs += iwl-agn-rxon.o
-iwlagn-objs += iwl-5000.o
-iwlagn-objs += iwl-6000.o
-iwlagn-objs += iwl-1000.o
-iwlagn-objs += iwl-2000.o
-iwlagn-objs += iwl-pci.o
-iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o
+iwlwifi-objs += iwl-core.o iwl-eeprom.o iwl-power.o
+iwlwifi-objs += iwl-scan.o iwl-led.o
+iwlwifi-objs += iwl-agn-rxon.o
+iwlwifi-objs += iwl-5000.o
+iwlwifi-objs += iwl-6000.o
+iwlwifi-objs += iwl-1000.o
+iwlwifi-objs += iwl-2000.o
+iwlwifi-objs += iwl-pci.o
+iwlwifi-objs += iwl-trans.o
+iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlagn-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 01b49eb8c8e..e12b48c2cff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -40,14 +39,18 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL1000_UCODE_API_MAX 5
-#define IWL100_UCODE_API_MAX 5
+#define IWL1000_UCODE_API_MAX 6
+#define IWL100_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL1000_UCODE_API_OK 5
+#define IWL100_UCODE_API_OK 5
/* Lowest firmware API version supported */
#define IWL1000_UCODE_API_MIN 1
@@ -73,21 +76,21 @@
static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
/* NIC configuration for 1000 series */
static void iwl1000_nic_config(struct iwl_priv *priv)
{
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
- iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG,
+ iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
APMG_SVR_DIGITAL_VOLTAGE_1_32,
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
@@ -124,44 +127,36 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl1000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl1000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -191,7 +186,6 @@ static struct iwl_base_params iwl1000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
@@ -201,12 +195,13 @@ static struct iwl_base_params iwl1000_base_params = {
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .smps_mode = IEEE80211_SMPS_STATIC,
+ .smps_mode = IEEE80211_SMPS_DYNAMIC,
};
#define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL1000_UCODE_API_OK, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
@@ -228,6 +223,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
#define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \
+ .ucode_api_ok = IWL100_UCODE_API_OK, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 0e13f0bb2e1..79431977a96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -40,17 +39,22 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
-#include "iwl-6000-hw.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL2030_UCODE_API_MAX 5
-#define IWL2000_UCODE_API_MAX 5
-#define IWL105_UCODE_API_MAX 5
-#define IWL135_UCODE_API_MAX 5
+#define IWL2030_UCODE_API_MAX 6
+#define IWL2000_UCODE_API_MAX 6
+#define IWL105_UCODE_API_MAX 6
+#define IWL135_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL2030_UCODE_API_OK 5
+#define IWL2000_UCODE_API_OK 5
+#define IWL105_UCODE_API_OK 5
+#define IWL135_UCODE_API_OK 5
/* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5
@@ -68,13 +72,13 @@
#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
#define IWL135_FW_PRE "iwlwifi-135-"
-#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE #api ".ucode"
+#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
/* NIC configuration for 2000 series */
@@ -83,7 +87,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
if (priv->cfg->iq_invert)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -119,45 +123,37 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl2000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl2000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
+ hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -175,7 +171,7 @@ static struct iwl_lib_ops iwl2000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -196,7 +192,7 @@ static struct iwl_lib_ops iwl2030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -209,7 +205,6 @@ static struct iwl_base_params iwl2000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -217,6 +212,7 @@ static struct iwl_base_params iwl2000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
+ .hd_v2 = true,
};
@@ -228,7 +224,6 @@ static struct iwl_base_params iwl2030_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -236,6 +231,7 @@ static struct iwl_base_params iwl2030_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
+ .hd_v2 = true,
};
static struct iwl_ht_params iwl2000_ht_params = {
@@ -256,6 +252,7 @@ static struct iwl_bt_params iwl2030_bt_params = {
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL2000_UCODE_API_OK, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -263,6 +260,7 @@ static struct iwl_bt_params iwl2030_bt_params = {
.base_params = &iwl2000_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
@@ -277,9 +275,16 @@ struct iwl_cfg iwl2000_2bg_cfg = {
IWL_DEVICE_2000,
};
+struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ .name = "2000D Series 2x2 BGN",
+ IWL_DEVICE_2000,
+ .ht_params = &iwl2000_ht_params,
+};
+
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_ok = IWL2030_UCODE_API_OK, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -288,6 +293,7 @@ struct iwl_cfg iwl2000_2bg_cfg = {
.bt_params = &iwl2030_bt_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
.iq_invert = true \
@@ -306,6 +312,7 @@ struct iwl_cfg iwl2030_2bg_cfg = {
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
+ .ucode_api_ok = IWL105_UCODE_API_OK, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -313,6 +320,7 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.base_params = &iwl2000_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
.rx_with_siso_diversity = true, \
@@ -329,9 +337,16 @@ struct iwl_cfg iwl105_bgn_cfg = {
.ht_params = &iwl2000_ht_params,
};
+struct iwl_cfg iwl105_bgn_d_cfg = {
+ .name = "105D Series 1x1 BGN",
+ IWL_DEVICE_105,
+ .ht_params = &iwl2000_ht_params,
+};
+
#define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \
+ .ucode_api_ok = IWL135_UCODE_API_OK, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -340,6 +355,7 @@ struct iwl_cfg iwl105_bgn_cfg = {
.bt_params = &iwl2030_bt_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
.rx_with_siso_diversity = true, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
deleted file mode 100644
index f9630a3c79f..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- */
-
-#ifndef __iwl_5000_hw_h__
-#define __iwl_5000_hw_h__
-
-/* 5150 only */
-#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
-
-static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
-{
- u16 temperature, voltage;
- __le16 *temp_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
-
- temperature = le16_to_cpu(temp_calib[0]);
- voltage = le16_to_cpu(temp_calib[1]);
-
- /* offset = temp - volt / coeff */
- return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
-}
-
-#endif /* __iwl_5000_hw_h__ */
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c95cefd529d..c511c98a89a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -31,7 +31,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -41,12 +40,11 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
#include "iwl-agn.h"
#include "iwl-agn-hw.h"
-#include "iwl-5000-hw.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -69,27 +67,27 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
/* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
- .min_nrg_cck = 95,
+ .min_nrg_cck = 100,
.max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 120,
- .auto_corr_min_ofdm_mrc_x1 = 240,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
@@ -98,10 +96,10 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 170,
+ .auto_corr_min_cck_mrc = 200,
.auto_corr_max_cck_mrc = 400,
- .nrg_th_cck = 95,
- .nrg_th_ofdm = 95,
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
.barker_corr_th_min = 190,
.barker_corr_th_min_mrc = 390,
@@ -134,19 +132,34 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.nrg_th_cca = 62,
};
+#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
+
+static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+{
+ u16 temperature, voltage;
+ __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+ EEPROM_KELVIN_TEMPERATURE);
+
+ temperature = le16_to_cpu(temp_calib[0]);
+ voltage = le16_to_cpu(temp_calib[1]);
+
+ /* offset = temp - volt / coeff */
+ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+}
+
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
iwl_temp_calib_to_offset(priv);
- priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
+ hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
@@ -156,39 +169,32 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl5000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl5000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
-
return 0;
}
@@ -199,38 +205,31 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl5150_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl5150_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -252,7 +251,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
{
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl5000_channel_switch_cmd cmd;
@@ -315,7 +314,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return trans_send_cmd(&priv->trans, &hcmd);
+ return iwl_trans_send_cmd(trans(priv), &hcmd);
}
static struct iwl_lib_ops iwl5000_lib = {
@@ -360,7 +359,6 @@ static struct iwl_base_params iwl5000_base_params = {
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
deleted file mode 100644
index b27986e57c9..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- */
-
-#ifndef __iwl_6000_hw_h__
-#define __iwl_6000_hw_h__
-
-#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
-#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
-#define IWL60_RTC_INST_SIZE \
- (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
-#define IWL60_RTC_DATA_SIZE \
- (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
-
-#endif /* __iwl_6000_hw_h__ */
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 973d1972e8c..c840c78278d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -40,17 +39,19 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
-#include "iwl-6000-hw.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 5
-#define IWL6000G2_UCODE_API_MAX 5
+#define IWL6000G2_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL6000G2_UCODE_API_OK 5
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
@@ -72,15 +73,15 @@
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwlagn_eeprom_calib_version(priv) >= 6)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -88,9 +89,9 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwlagn_eeprom_calib_version(priv) >= 6)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_6050_1x2);
}
@@ -102,7 +103,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
/* no locking required for register write */
if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
- iwl_write32(priv, CSR_GP_DRIVER_REG,
+ iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
@@ -111,7 +112,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
- .min_nrg_cck = 97,
+ .min_nrg_cck = 110,
.max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 80,
.auto_corr_min_ofdm_mrc = 128,
@@ -127,11 +128,11 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.auto_corr_max_cck = 175,
.auto_corr_min_cck_mrc = 160,
.auto_corr_max_cck_mrc = 310,
- .nrg_th_cck = 97,
- .nrg_th_ofdm = 100,
+ .nrg_th_cck = 110,
+ .nrg_th_ofdm = 110,
.barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
+ .barker_corr_th_min_mrc = 336,
.nrg_th_cca = 62,
};
@@ -142,45 +143,38 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl6000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
+ hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -190,7 +184,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
{
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
@@ -253,7 +247,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return trans_send_cmd(&priv->trans, &hcmd);
+ return iwl_trans_send_cmd(trans(priv), &hcmd);
}
static struct iwl_lib_ops iwl6000_lib = {
@@ -270,7 +264,7 @@ static struct iwl_lib_ops iwl6000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -292,7 +286,7 @@ static struct iwl_lib_ops iwl6030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -305,7 +299,6 @@ static struct iwl_base_params iwl6000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -323,7 +316,6 @@ static struct iwl_base_params iwl6050_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -340,7 +332,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -365,8 +356,9 @@ static struct iwl_bt_params iwl6000_bt_params = {
};
#define IWL_DEVICE_6005 \
- .fw_name_pre = IWL6005_FW_PRE, \
+ .fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
@@ -392,9 +384,22 @@ struct iwl_cfg iwl6005_2bg_cfg = {
IWL_DEVICE_6005,
};
+struct iwl_cfg iwl6005_2agn_sff_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+struct iwl_cfg iwl6005_2agn_d_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
#define IWL_DEVICE_6030 \
- .fw_name_pre = IWL6030_FW_PRE, \
+ .fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 72d6297602b..03bac48558b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -93,12 +93,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
};
for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
+ if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
priv->calib_results[i].buf) {
hcmd.len[0] = priv->calib_results[i].buf_len;
hcmd.data[0] = priv->calib_results[i].buf;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = trans_send_cmd(&priv->trans, &hcmd);
+ ret = iwl_trans_send_cmd(trans(priv), &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d iteration %d\n",
ret, i);
@@ -174,7 +174,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
data = &(priv->sensitivity_data);
@@ -357,7 +357,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
data = &(priv->sensitivity_data);
@@ -484,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- return trans_send_cmd(&priv->trans, &cmd_out);
+ return iwl_trans_send_cmd(trans(priv), &cmd_out);
}
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
@@ -505,28 +505,53 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
- cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
- HD_INA_NON_SQUARE_DET_OFDM_DATA;
- cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
- HD_INA_NON_SQUARE_DET_CCK_DATA;
- cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
- HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
- HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
- HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
- HD_OFDM_NON_SQUARE_DET_SLOPE_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
- HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
- HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
- HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
- HD_CCK_NON_SQUARE_DET_SLOPE_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
- HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA;
+ if (priv->cfg->base_params->hd_v2) {
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
+ HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
+ HD_INA_NON_SQUARE_DET_CCK_DATA_V2;
+ cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
+ HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2;
+ } else {
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
+ HD_INA_NON_SQUARE_DET_OFDM_DATA_V1;
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
+ HD_INA_NON_SQUARE_DET_CCK_DATA_V1;
+ cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
+ HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1;
+ }
/* Update uCode's "work" table, and copy it to DSP */
cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
@@ -548,7 +573,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
- return trans_send_cmd(&priv->trans, &cmd_out);
+ return iwl_trans_send_cmd(trans(priv), &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -556,7 +581,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
int ret = 0;
int i;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
if (priv->disable_sens_cal)
return;
@@ -633,13 +658,13 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
rx_info = &priv->statistics.rx_non_phy;
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
@@ -663,7 +688,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
statis.beacon_energy_c =
le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
@@ -741,12 +766,9 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
u8 first_chain;
u16 i = 0;
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS;
+ average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS;
+ average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -796,21 +818,21 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= hw_params(priv).valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
* priv->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(hw_params(priv).valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == hw_params(priv).tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
@@ -827,12 +849,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
+ if (active_chains != hw_params(priv).valid_rx_ant &&
active_chains != priv->chain_noise_data.active_chains)
IWL_DEBUG_CALIB(priv,
"Detected that not all antennas are connected! "
"Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ active_chains,
+ hw_params(priv).valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
@@ -892,7 +915,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
priv->phy_calib_chain_noise_gain_cmd);
cmd.delta_gain_1 = data->delta_gain_code[1];
cmd.delta_gain_2 = data->delta_gain_code[2];
- trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
+ iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
CMD_ASYNC, sizeof(cmd), &cmd);
data->radio_write = 1;
@@ -950,13 +973,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
rx_info = &priv->statistics.rx_non_phy;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
@@ -971,7 +994,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
@@ -990,7 +1013,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
data->beacon_count++;
@@ -1012,8 +1035,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count !=
- priv->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != IWL_CAL_NUM_BEACONS)
return;
/* Analyze signal for disconnected antenna */
@@ -1021,7 +1043,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
- data->active_chains = priv->hw_params.valid_rx_ant;
+ data->active_chains = hw_params(priv).valid_rx_ant;
for (i = 0; i < NUM_RX_CHAINS; i++)
if (!(data->active_chains & (1<<i)))
data->disconn_array[i] = 1;
@@ -1029,12 +1051,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
iwl_find_disconn_antenna(priv, average_sig, data);
/* Analyze noise for rx balance */
- average_noise[0] = data->chain_noise_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[1] = data->chain_noise_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[2] = data->chain_noise_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS;
+ average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS;
+ average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS;
for (i = 0; i < NUM_RX_CHAINS; i++) {
if (!(data->disconn_array[i]) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
deleted file mode 100644
index b8347db850e..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-agn.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-int iwl_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwlagn_eeprom_calib_version(priv);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-int iwl_eeprom_check_sku(struct iwl_priv *priv)
-{
- u16 radio_cfg;
-
- if (!priv->cfg->sku) {
- /* not using sku overwrite */
- priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
- IWL_ERR(priv, "Invalid 11n configuration\n");
- return -EINVAL;
- }
- }
- if (!priv->cfg->sku) {
- IWL_ERR(priv, "Invalid device sku\n");
- return -EINVAL;
- }
-
- IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
-
- if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
- /* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
- priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
- priv->cfg->valid_tx_ant,
- priv->cfg->valid_rx_ant);
- return -EINVAL;
- }
- IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
- priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
- }
- /*
- * for some special cases,
- * EEPROM did not reflect the correct antenna setting
- * so overwrite the valid tx/rx antenna from .cfg
- */
- return 0;
-}
-
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
-
-/**
- * iwl_get_max_txpower_avg - get the highest tx power from all chains.
- * find the highest tx power from all chains for the channel
- */
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
- int element, s8 *max_txpower_in_half_dbm)
-{
- s8 max_txpower_avg = 0; /* (dBm) */
-
- /* Take the highest tx power from any valid chains */
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
- (enhanced_txpower[element].chain_a_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
- (enhanced_txpower[element].chain_b_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
- (enhanced_txpower[element].chain_c_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((priv->cfg->valid_tx_ant == ANT_AB) |
- (priv->cfg->valid_tx_ant == ANT_BC) |
- (priv->cfg->valid_tx_ant == ANT_AC)) &&
- (enhanced_txpower[element].mimo2_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
- (enhanced_txpower[element].mimo3_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo3_max;
-
- /*
- * max. tx power in EEPROM is in 1/2 dBm format
- * convert from 1/2 dBm to dBm (round-up convert)
- * but we also do not want to loss 1/2 dBm resolution which
- * will impact performance
- */
- *max_txpower_in_half_dbm = max_txpower_avg;
- return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
-}
-
-static void
-iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *txp,
- s8 max_txpower_avg)
-{
- int ch_idx;
- bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
- enum ieee80211_band band;
-
- band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
-
- for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
- struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
-
- /* update matching channel or from common data only */
- if (txp->channel != 0 && ch_info->channel != txp->channel)
- continue;
-
- /* update matching band only */
- if (band != ch_info->band)
- continue;
-
- if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
- ch_info->max_power_avg = max_txpower_avg;
- ch_info->curr_txpow = max_txpower_avg;
- ch_info->scan_power = max_txpower_avg;
- }
-
- if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
- ch_info->ht40_max_power_avg = max_txpower_avg;
- }
-}
-
-#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
-#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
-#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
-
-#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
- ? # x " " : "")
-
-void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
-{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
- int idx, entries;
- __le16 *txp_len;
- s8 max_txp_avg, max_txp_avg_halfdbm;
-
- BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
-
- /* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
- entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
-
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
-
- for (idx = 0; idx < entries; idx++) {
- txp = &txp_array[idx];
- /* skip invalid entries */
- if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
- continue;
-
- IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
- (txp->channel && (txp->flags &
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
- "Common " : (txp->channel) ?
- "Channel" : "Common",
- (txp->channel),
- TXP_CHECK_AND_PRINT(VALID),
- TXP_CHECK_AND_PRINT(BAND_52G),
- TXP_CHECK_AND_PRINT(OFDM),
- TXP_CHECK_AND_PRINT(40MHZ),
- TXP_CHECK_AND_PRINT(HT_AP),
- TXP_CHECK_AND_PRINT(RES1),
- TXP_CHECK_AND_PRINT(RES2),
- TXP_CHECK_AND_PRINT(COMMON_TYPE),
- txp->flags);
- IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
- "chain_B: 0X%02x chain_C: 0X%02x\n",
- txp->chain_a_max, txp->chain_b_max,
- txp->chain_c_max);
- IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
- "MIMO3: 0x%02x High 20_on_40: 0x%02x "
- "Low 20_on_40: 0x%02x\n",
- txp->mimo2_max, txp->mimo3_max,
- ((txp->delta_20_in_40 & 0xf0) >> 4),
- (txp->delta_20_in_40 & 0x0f));
-
- max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
- &max_txp_avg_halfdbm);
-
- /*
- * Update the user limit values values to the highest
- * power supported by any channel
- */
- if (max_txp_avg > priv->tx_power_user_lmt)
- priv->tx_power_user_lmt = max_txp_avg;
- if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
- priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
-
- iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
- }
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 0e5b842529c..123ef5e129d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -78,10 +78,23 @@
#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
IWLAGN_RTC_DATA_LOWER_BOUND)
+#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
+#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
+#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
+#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
+#define IWL60_RTC_INST_SIZE \
+ (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
+#define IWL60_RTC_DATA_SIZE \
+ (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
+
/* RSSI to dBm */
#define IWLAGN_RSSI_OFFSET 44
-#define IWLAGN_DEFAULT_TX_RETRY 15
+#define IWLAGN_DEFAULT_TX_RETRY 15
+#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60
+#define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60
+#define IWLAGN_LOW_RETRY_LIMIT 7
/* Limit range of txpower output target to be between these values */
#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
@@ -92,20 +105,7 @@
#define IWLAGN_CMD_FIFO_NUM 7
#define IWLAGN_NUM_QUEUES 20
-#define IWLAGN_NUM_AMPDU_QUEUES 10
-#define IWLAGN_FIRST_AMPDU_QUEUE 10
-
-/* Fixed (non-configurable) rx data from phy */
-
-/**
- * struct iwlagn_schedq_bc_tbl scheduler byte count table
- * base physical address provided by SCD_DRAM_BASE_ADDR
- * @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
- */
-struct iwlagn_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __packed;
-
+#define IWLAGN_NUM_AMPDU_QUEUES 9
+#define IWLAGN_FIRST_AMPDU_QUEUE 11
#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 3bee0f119bc..1a52ed29f2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -35,454 +35,10 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
-#include "iwl-sta.h"
#include "iwl-trans.h"
-
-static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
-static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_POSTPONE_DELAY:
- priv->reply_tx_stats.pp_delay++;
- break;
- case TX_STATUS_POSTPONE_FEW_BYTES:
- priv->reply_tx_stats.pp_few_bytes++;
- break;
- case TX_STATUS_POSTPONE_BT_PRIO:
- priv->reply_tx_stats.pp_bt_prio++;
- break;
- case TX_STATUS_POSTPONE_QUIET_PERIOD:
- priv->reply_tx_stats.pp_quiet_period++;
- break;
- case TX_STATUS_POSTPONE_CALC_TTAK:
- priv->reply_tx_stats.pp_calc_ttak++;
- break;
- case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
- priv->reply_tx_stats.int_crossed_retry++;
- break;
- case TX_STATUS_FAIL_SHORT_LIMIT:
- priv->reply_tx_stats.short_limit++;
- break;
- case TX_STATUS_FAIL_LONG_LIMIT:
- priv->reply_tx_stats.long_limit++;
- break;
- case TX_STATUS_FAIL_FIFO_UNDERRUN:
- priv->reply_tx_stats.fifo_underrun++;
- break;
- case TX_STATUS_FAIL_DRAIN_FLOW:
- priv->reply_tx_stats.drain_flow++;
- break;
- case TX_STATUS_FAIL_RFKILL_FLUSH:
- priv->reply_tx_stats.rfkill_flush++;
- break;
- case TX_STATUS_FAIL_LIFE_EXPIRE:
- priv->reply_tx_stats.life_expire++;
- break;
- case TX_STATUS_FAIL_DEST_PS:
- priv->reply_tx_stats.dest_ps++;
- break;
- case TX_STATUS_FAIL_HOST_ABORTED:
- priv->reply_tx_stats.host_abort++;
- break;
- case TX_STATUS_FAIL_BT_RETRY:
- priv->reply_tx_stats.bt_retry++;
- break;
- case TX_STATUS_FAIL_STA_INVALID:
- priv->reply_tx_stats.sta_invalid++;
- break;
- case TX_STATUS_FAIL_FRAG_DROPPED:
- priv->reply_tx_stats.frag_drop++;
- break;
- case TX_STATUS_FAIL_TID_DISABLE:
- priv->reply_tx_stats.tid_disable++;
- break;
- case TX_STATUS_FAIL_FIFO_FLUSHED:
- priv->reply_tx_stats.fifo_flush++;
- break;
- case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
- priv->reply_tx_stats.insuff_cf_poll++;
- break;
- case TX_STATUS_FAIL_PASSIVE_NO_RX:
- priv->reply_tx_stats.fail_hw_drop++;
- break;
- case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
- priv->reply_tx_stats.sta_color_mismatch++;
- break;
- default:
- priv->reply_tx_stats.unknown++;
- break;
- }
-}
-
-static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
-{
- status &= AGG_TX_STATUS_MSK;
-
- switch (status) {
- case AGG_TX_STATE_UNDERRUN_MSK:
- priv->reply_agg_tx_stats.underrun++;
- break;
- case AGG_TX_STATE_BT_PRIO_MSK:
- priv->reply_agg_tx_stats.bt_prio++;
- break;
- case AGG_TX_STATE_FEW_BYTES_MSK:
- priv->reply_agg_tx_stats.few_bytes++;
- break;
- case AGG_TX_STATE_ABORT_MSK:
- priv->reply_agg_tx_stats.abort++;
- break;
- case AGG_TX_STATE_LAST_SENT_TTL_MSK:
- priv->reply_agg_tx_stats.last_sent_ttl++;
- break;
- case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
- priv->reply_agg_tx_stats.last_sent_try++;
- break;
- case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
- priv->reply_agg_tx_stats.last_sent_bt_kill++;
- break;
- case AGG_TX_STATE_SCD_QUERY_MSK:
- priv->reply_agg_tx_stats.scd_query++;
- break;
- case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
- priv->reply_agg_tx_stats.bad_crc32++;
- break;
- case AGG_TX_STATE_RESPONSE_MSK:
- priv->reply_agg_tx_stats.response++;
- break;
- case AGG_TX_STATE_DUMP_TX_MSK:
- priv->reply_agg_tx_stats.dump_tx++;
- break;
- case AGG_TX_STATE_DELAY_TX_MSK:
- priv->reply_agg_tx_stats.delay_tx++;
- break;
- default:
- priv->reply_agg_tx_stats.unknown++;
- break;
- }
-}
-
-static void iwlagn_set_tx_status(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_rxon_context *ctx,
- struct iwlagn_tx_resp *tx_resp,
- int txq_id, bool is_agg)
-{
- u16 status = le16_to_cpu(tx_resp->status.status);
-
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- if (is_agg)
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
- info);
- if (!iwl_is_tx_success(status))
- iwlagn_count_tx_err_status(priv, status);
-
- if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
- iwl_is_associated_ctx(ctx) && ctx->vif &&
- ctx->vif->type == NL80211_IFTYPE_STATION) {
- ctx->last_tx_rejected = true;
- iwl_stop_queue(priv, &priv->txq[txq_id]);
- }
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
- "0x%x retries %d\n",
- txq_id,
- iwl_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
-
-const char *iwl_get_agg_tx_fail_reason(u16 status)
-{
- status &= AGG_TX_STATUS_MSK;
- switch (status) {
- case AGG_TX_STATE_TRANSMITTED:
- return "SUCCESS";
- AGG_TX_STATE_FAIL(UNDERRUN_MSK);
- AGG_TX_STATE_FAIL(BT_PRIO_MSK);
- AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
- AGG_TX_STATE_FAIL(ABORT_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
- AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
- AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
- AGG_TX_STATE_FAIL(RESPONSE_MSK);
- AGG_TX_STATE_FAIL(DUMP_TX_MSK);
- AGG_TX_STATE_FAIL(DELAY_TX_MSK);
- }
-
- return "UNKNOWN";
-}
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwlagn_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = &tx_resp->status;
- struct ieee80211_hdr *hdr = NULL;
- int i, sh, idx;
- u16 seq;
-
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- agg->bitmap = 0;
-
- /* # frames attempted by Tx command */
- if (agg->frame_count == 1) {
- struct iwl_tx_info *txb;
-
- /* Only one frame was attempted; no block-ack will arrive */
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
- txb = &priv->txq[txq_id].txb[idx];
- iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
- txb->ctx, tx_resp, txq_id, true);
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
-
- /*
- * Start is the lowest frame sent. It may not be the first
- * frame in the batch; we figure this out dynamically during
- * the following loop.
- */
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & AGG_TX_STATUS_MSK)
- iwlagn_count_agg_tx_err_status(priv, status);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
- IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
- "try-count (0x%08x)\n",
- iwl_get_agg_tx_fail_reason(status),
- status & AGG_TX_STATUS_MSK,
- status & AGG_TX_TRY_MSK);
-
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc),
- hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- /*
- * sh -> how many frames ahead of the starting frame is
- * the current one?
- *
- * Note that all frames sent in the batch must be in a
- * 64-frame window, so this number should be in [0,63].
- * If outside of this window, then we've found a new
- * "first" frame in the batch and need to change start.
- */
- sh = idx - start;
-
- /*
- * If >= 64, out of window. start must be at the front
- * of the circular buffer, idx must be near the end of
- * the buffer, and idx is the new "first" frame. Shift
- * the indices around.
- */
- if (sh >= 64) {
- /* Shift bitmap by start - idx, wrapped */
- sh = 0x100 - idx + start;
- bitmap = bitmap << sh;
- /* Now idx is the new start so sh = 0 */
- sh = 0;
- start = idx;
- /*
- * If <= -64 then wraps the 256-pkt circular buffer
- * (e.g., start = 255 and idx = 0, sh should be 1)
- */
- } else if (sh <= -64) {
- sh = 0x100 - start + idx;
- /*
- * If < 0 but > -64, out of window. idx is before start
- * but not wrapped. Shift the indices around.
- */
- } else if (sh < 0) {
- /* Shift by how far start is ahead of idx */
- sh = start - idx;
- bitmap = bitmap << sh;
- /* Now idx is the new start so sh = 0 */
- start = idx;
- sh = 0;
- }
- /* Sequence number start + sh was sent in this batch */
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- /*
- * Store the bitmap and possibly the new start, if we wrapped
- * the buffer above
- */
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-void iwl_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- struct ieee80211_hdr *hdr;
- struct iwl_tx_info *txb;
- u32 status = le16_to_cpu(tx_resp->status.status);
- int tid;
- int sta_id;
- int freed;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
- "index %d is out of range [0-%d] %d %d\n", __func__,
- txq_id, index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- txb = &txq->txb[txq->q.read_ptr];
- info = IEEE80211_SKB_CB(txb->skb);
- memset(&info->status, 0, sizeof(info->status));
-
- tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
- IWLAGN_TX_RES_TID_POS;
- sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
- IWLAGN_TX_RES_RA_POS;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- hdr = (void *)txb->skb->data;
- if (!ieee80211_is_data_qos(hdr->frame_control))
- priv->last_seq_ctl = tx_resp->seq_ctl;
-
- if (txq->sched_retry) {
- const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg;
-
- agg = &priv->stations[sta_id].tid[tid].agg;
- /*
- * If the BT kill count is non-zero, we'll get this
- * notification again.
- */
- if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
- }
- iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- scd_ssn , index, txq_id, txq->swq_id);
-
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq);
- }
- } else {
- iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
- txq_id, false);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- iwl_queue_space(&txq->q) > txq->q.low_mark &&
- status != TX_STATUS_FAIL_PASSIVE_NO_RX)
- iwl_wake_queue(priv, txq);
- }
-
- iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
+#include "iwl-shared.h"
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
@@ -495,7 +51,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
"TX Power requested while scanning!\n"))
return -EAGAIN;
@@ -525,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
sizeof(tx_power_cmd), &tx_power_cmd);
}
@@ -538,11 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv)
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
{
- struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- u16 voltage;
- } *hdr;
+ struct iwl_eeprom_calib_hdr *hdr;
hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
EEPROM_CALIB_ALL);
@@ -609,6 +161,9 @@ struct iwl_mod_params iwlagn_mod_params = {
.bt_coex_active = true,
.no_sleep_autoadjust = true,
.power_level = IWL_POWER_INDEX_1,
+ .bt_ch_announce = true,
+ .wanted_ucode_alternative = 1,
+ .auto_agg = true,
/* the rest are 0 by default */
};
@@ -633,449 +188,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return -1;
}
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added = 0;
- u16 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- channel = iwl_get_single_channel_number(priv, band);
- if (channel) {
- scan_ch->channel = cpu_to_le16(channel);
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
-static int iwl_get_channels_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = chan->hw_value;
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
-{
- struct sk_buff *skb = priv->offchan_tx_skb;
-
- if (skb->len < maxlen)
- maxlen = skb->len;
-
- memcpy(data, skb->data, maxlen);
-
- return maxlen;
-}
-
-int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = { sizeof(struct iwl_scan_cmd), },
- .flags = CMD_SYNC,
- };
- struct iwl_scan_cmd *scan;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (vif)
- ctx = iwl_rxon_ctx_from_vif(vif);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv,
- "fail to allocate memory for scan\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
- iwl_is_any_associated(priv)) {
- u16 interval = 0;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- switch (priv->scan_type) {
- case IWL_SCAN_OFFCH_TX:
- WARN_ON(1);
- break;
- case IWL_SCAN_RADIO_RESET:
- interval = 0;
- break;
- case IWL_SCAN_NORMAL:
- interval = vif->bss_conf.beacon_int;
- break;
- }
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
- scan->suspend_time = 0;
- scan->max_out_time =
- cpu_to_le32(1024 * priv->offchan_tx_timeout);
- }
-
- switch (priv->scan_type) {
- case IWL_SCAN_RADIO_RESET:
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- break;
- case IWL_SCAN_NORMAL:
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
- break;
- case IWL_SCAN_OFFCH_TX:
- IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
- break;
- }
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(
- priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- /*
- * Internal scans are passive, so we can indiscriminately set
- * the BT ignore flag on 2.4 GHz since it applies to TX only.
- */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist)
- scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
- break;
- case IEEE80211_BAND_5GHZ:
- rate = IWL_RATE_6M_PLCP;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- *
- * This was fixed in later versions along with some other
- * scan changes, and the threshold behaves as a flag in those
- * versions.
- */
- if (priv->new_scan_threshold_behaviour)
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_DISABLED;
- else
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- band = priv->scan_band;
-
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
-
- if (band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- /* transmit 2.4 GHz probes only on first antenna */
- scan_tx_antennas = first_antenna(scan_tx_antennas);
- }
-
- priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
- scan_tx_antennas);
- rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = first_antenna(active_chains);
- }
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
- priv->bt_full_concurrent) {
- /* operated as 1x1 in full concurrency mode */
- rx_ant = first_antenna(rx_ant);
- }
-
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
- switch (priv->scan_type) {
- case IWL_SCAN_NORMAL:
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- break;
- case IWL_SCAN_RADIO_RESET:
- /* use bcast addr, will not be transmitted but must be valid */
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- iwl_bcast_addr, NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- break;
- case IWL_SCAN_OFFCH_TX:
- cmd_len = iwl_fill_offch_tx(priv, scan->data,
- IWL_MAX_SCAN_SIZE
- - sizeof(*scan)
- - sizeof(struct iwl_scan_channel));
- scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
- break;
- default:
- BUG();
- }
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- switch (priv->scan_type) {
- case IWL_SCAN_RADIO_RESET:
- scan->channel_count =
- iwl_get_single_channel_for_scan(priv, vif, band,
- (void *)&scan->data[cmd_len]);
- break;
- case IWL_SCAN_NORMAL:
- scan->channel_count =
- iwl_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[cmd_len]);
- break;
- case IWL_SCAN_OFFCH_TX: {
- struct iwl_scan_channel *scan_ch;
-
- scan->channel_count = 1;
-
- scan_ch = (void *)&scan->data[cmd_len];
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
- scan_ch->channel =
- cpu_to_le16(priv->offchan_tx_chan->hw_value);
- scan_ch->active_dwell =
- cpu_to_le16(priv->offchan_tx_timeout);
- scan_ch->passive_dwell = 0;
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- }
- break;
- }
-
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data[0] = scan;
- cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- scan->len = cpu_to_le16(cmd.len[0]);
-
- /* set scan bit here for PAN params */
- set_bit(STATUS_SCAN_HW, &priv->status);
-
- ret = iwlagn_set_pan_params(priv);
- if (ret)
- return ret;
-
- ret = trans_send_cmd(&priv->trans, &cmd);
- if (ret) {
- clear_bit(STATUS_SCAN_HW, &priv->status);
- iwlagn_set_pan_params(priv);
- }
-
- return ret;
-}
-
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add)
{
@@ -1089,52 +201,6 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
vif->bss_conf.bssid);
}
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_FLUSH_WAIT_MS 2000
-
-int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- int cnt;
- unsigned long now = jiffies;
- int ret = 0;
-
- /* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- if (cnt == priv->cmd_queue)
- continue;
- txq = &priv->txq[cnt];
- q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
- msleep(1);
-
- if (q->read_ptr != q->write_ptr) {
- IWL_ERR(priv, "fail to flush all tx fifo queues\n");
- ret = -ETIMEDOUT;
- break;
- }
- }
- return ret;
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
/**
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
*
@@ -1160,7 +226,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
IWL_SCD_MGMT_MSK;
if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
- (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+ (priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
@@ -1173,22 +239,22 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ieee80211_stop_queues(priv->hw);
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwlagn_wait_tx_queue_empty(priv);
+ iwl_trans_wait_tx_queue_empty(trans(priv));
done:
ieee80211_wake_queues(priv->hw);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/*
@@ -1367,12 +433,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
if (priv->cfg->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
@@ -1385,7 +451,7 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
struct iwl_rxon_context *ctx, *found_ctx = NULL;
bool found_ap = false;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Check whether AP or GO mode is active. */
if (rssi_ena) {
@@ -1498,7 +564,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
break;
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
/*
* We can not send command to firmware while scanning. When the scan
@@ -1507,7 +573,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
* STATUS_SCANNING to avoid race when queue_work two times from
* different notifications, but quit and not perform any work at all.
*/
- if (test_bit(STATUS_SCAN_HW, &priv->status))
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
goto out;
iwl_update_chain_flags(priv);
@@ -1526,7 +592,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
*/
iwlagn_bt_coex_rssi_monitor(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/*
@@ -1633,12 +699,13 @@ static void iwlagn_set_kill_msk(struct iwl_priv *priv,
priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
/* schedule to send runtime bt_config */
- queue_work(priv->workqueue, &priv->bt_runtime_config);
+ queue_work(priv->shrd->workqueue, &priv->bt_runtime_config);
}
}
-void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1647,7 +714,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
/* bt coex disabled */
- return;
+ return 0;
}
IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -1677,7 +744,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = coex->bt_status;
- queue_work(priv->workqueue,
+ queue_work(priv->shrd->workqueue,
&priv->bt_traffic_change_work);
}
}
@@ -1686,9 +753,10 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
/* FIXME: based on notification, adjust the prio_boost */
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
priv->bt_ci_compliance = coex->bt_ci_compliance;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ return 0;
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
@@ -1788,7 +856,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
bool is_single = is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+ bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
u32 active_chains;
u16 rx_chain;
@@ -1800,7 +868,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (priv->chain_noise_data.active_chains)
active_chains = priv->chain_noise_data.active_chains;
else
- active_chains = priv->hw_params.valid_rx_ant;
+ active_chains = hw_params(priv).valid_rx_ant;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1865,136 +933,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
return ant;
}
-static const char *get_csr_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CSR_HW_IF_CONFIG_REG);
- IWL_CMD(CSR_INT_COALESCING);
- IWL_CMD(CSR_INT);
- IWL_CMD(CSR_INT_MASK);
- IWL_CMD(CSR_FH_INT_STATUS);
- IWL_CMD(CSR_GPIO_IN);
- IWL_CMD(CSR_RESET);
- IWL_CMD(CSR_GP_CNTRL);
- IWL_CMD(CSR_HW_REV);
- IWL_CMD(CSR_EEPROM_REG);
- IWL_CMD(CSR_EEPROM_GP);
- IWL_CMD(CSR_OTP_GP_REG);
- IWL_CMD(CSR_GIO_REG);
- IWL_CMD(CSR_GP_UCODE_REG);
- IWL_CMD(CSR_GP_DRIVER_REG);
- IWL_CMD(CSR_UCODE_DRV_GP1);
- IWL_CMD(CSR_UCODE_DRV_GP2);
- IWL_CMD(CSR_LED_REG);
- IWL_CMD(CSR_DRAM_INT_TBL_REG);
- IWL_CMD(CSR_GIO_CHICKEN_BITS);
- IWL_CMD(CSR_ANA_PLL_CFG);
- IWL_CMD(CSR_HW_REV_WA_REG);
- IWL_CMD(CSR_DBG_HPET_MEM_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-void iwl_dump_csr(struct iwl_priv *priv)
-{
- int i;
- static const u32 csr_tbl[] = {
- CSR_HW_IF_CONFIG_REG,
- CSR_INT_COALESCING,
- CSR_INT,
- CSR_INT_MASK,
- CSR_FH_INT_STATUS,
- CSR_GPIO_IN,
- CSR_RESET,
- CSR_GP_CNTRL,
- CSR_HW_REV,
- CSR_EEPROM_REG,
- CSR_EEPROM_GP,
- CSR_OTP_GP_REG,
- CSR_GIO_REG,
- CSR_GP_UCODE_REG,
- CSR_GP_DRIVER_REG,
- CSR_UCODE_DRV_GP1,
- CSR_UCODE_DRV_GP2,
- CSR_LED_REG,
- CSR_DRAM_INT_TBL_REG,
- CSR_GIO_CHICKEN_BITS,
- CSR_ANA_PLL_CFG,
- CSR_HW_REV_WA_REG,
- CSR_DBG_HPET_MEM_REG
- };
- IWL_ERR(priv, "CSR values:\n");
- IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
- "CSR_INT_PERIODIC_REG)\n");
- for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
- IWL_ERR(priv, " %25s: 0X%08x\n",
- get_csr_string(csr_tbl[i]),
- iwl_read32(priv, csr_tbl[i]));
- }
-}
-
-static const char *get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
-
/* notification wait support */
void iwlagn_init_notification_wait(struct iwl_priv *priv,
struct iwl_notification_wait *wait_entry,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 3789ff4bf53..66118cea2af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
@@ -37,7 +36,6 @@
#include <linux/workqueue.h>
#include "iwl-dev.h"
-#include "iwl-sta.h"
#include "iwl-core.h"
#include "iwl-agn.h"
@@ -298,10 +296,10 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
} else
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
@@ -314,7 +312,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
tl->queue_count = 1;
tl->head = 0;
tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
}
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
@@ -347,7 +345,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
{
struct iwl_station_priv *sta_priv =
container_of(lq_sta, struct iwl_station_priv, lq_sta);
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
@@ -421,7 +419,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
load = rs_tl_get_load(lq_data, tid);
- if (load > IWL_AGG_LOAD_THRESHOLD) {
+ if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -711,7 +709,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
static bool rs_use_green(struct ieee80211_sta *sta)
{
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!(ctx->ht.non_gf_sta_present);
@@ -820,7 +818,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
@@ -878,12 +876,12 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
@@ -894,7 +892,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
- queue_work(priv->workqueue, &priv->bt_full_concurrency);
+ queue_work(priv->shrd->workqueue, &priv->bt_full_concurrency);
}
}
@@ -918,7 +916,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
struct iwl_scale_tbl_info tbl_type;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -1284,7 +1282,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1294,7 +1292,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
return -1;
/* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
+ if (hw_params(priv).tx_chains_num < 2)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
@@ -1340,7 +1338,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1350,7 +1348,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
return -1;
/* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 3)
+ if (hw_params(priv).tx_chains_num < 3)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
@@ -1397,7 +1395,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
u8 is_green = lq_sta->is_green;
s32 rate;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1449,8 +1447,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
@@ -1460,14 +1458,16 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1490,7 +1490,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
}
start_action = tbl->action;
@@ -1624,8 +1625,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1635,14 +1636,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
@@ -1659,7 +1662,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
@@ -1795,8 +1799,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1965,8 +1969,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
@@ -2209,7 +2213,6 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
/*
* setup rate table in uCode
- * return rate_n_flags as used in the table
*/
static void rs_update_rate_tbl(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -2256,10 +2259,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
u8 done_search = 0;
u16 high_low;
s32 sr;
- u8 tid = MAX_TID_COUNT;
+ u8 tid = IWL_MAX_TID_COUNT;
struct iwl_tid_data *tid_data;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
@@ -2269,14 +2272,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- if (!sta || !lq_sta)
- return;
-
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if ((tid != IWL_MAX_TID_COUNT) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
@@ -2646,9 +2647,10 @@ lq_update:
iwl_ht_enabled(priv)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
+ (tid != IWL_MAX_TID_COUNT)) {
+ u8 sta_id = lq_sta->lq.sta_id;
tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ &priv->shrd->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
@@ -2663,8 +2665,7 @@ lq_update:
out:
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
- i = index;
- lq_sta->last_txrate_idx = i;
+ lq_sta->last_txrate_idx = index;
}
/**
@@ -2700,11 +2701,11 @@ static void rs_initialize_lq(struct iwl_priv *priv,
return;
sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
+ ctx = sta_priv->ctx;
i = lq_sta->last_txrate_idx;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2887,15 +2888,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant &
+ ~first_antenna(hw_params(priv).valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ hw_params(priv).valid_tx_ant;
}
/* as default allow aggregation for all tids */
@@ -2941,7 +2942,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
@@ -2973,7 +2974,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
}
/* Fill rest of rate table */
@@ -3007,7 +3008,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
@@ -3098,7 +3099,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u8 ant_sel_tx;
priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3169,9 +3170,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index bdae82e7fa9..f4f6deb829a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -27,6 +27,10 @@
#ifndef __iwl_agn_rs_h__
#define __iwl_agn_rs_h__
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
new file mode 100644
index 00000000000..5af9e6258a1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -0,0 +1,1143 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portionhelp of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
+
+const char *get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IWL_CMD(REPLY_ALIVE);
+ IWL_CMD(REPLY_ERROR);
+ IWL_CMD(REPLY_ECHO);
+ IWL_CMD(REPLY_RXON);
+ IWL_CMD(REPLY_RXON_ASSOC);
+ IWL_CMD(REPLY_QOS_PARAM);
+ IWL_CMD(REPLY_RXON_TIMING);
+ IWL_CMD(REPLY_ADD_STA);
+ IWL_CMD(REPLY_REMOVE_STA);
+ IWL_CMD(REPLY_REMOVE_ALL_STA);
+ IWL_CMD(REPLY_TXFIFO_FLUSH);
+ IWL_CMD(REPLY_WEPKEY);
+ IWL_CMD(REPLY_TX);
+ IWL_CMD(REPLY_LEDS_CMD);
+ IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+ IWL_CMD(COEX_PRIORITY_TABLE_CMD);
+ IWL_CMD(COEX_MEDIUM_NOTIFICATION);
+ IWL_CMD(COEX_EVENT_CMD);
+ IWL_CMD(REPLY_QUIET_CMD);
+ IWL_CMD(REPLY_CHANNEL_SWITCH);
+ IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+ IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+ IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+ IWL_CMD(POWER_TABLE_CMD);
+ IWL_CMD(PM_SLEEP_NOTIFICATION);
+ IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+ IWL_CMD(REPLY_SCAN_CMD);
+ IWL_CMD(REPLY_SCAN_ABORT_CMD);
+ IWL_CMD(SCAN_START_NOTIFICATION);
+ IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+ IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+ IWL_CMD(BEACON_NOTIFICATION);
+ IWL_CMD(REPLY_TX_BEACON);
+ IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
+ IWL_CMD(QUIET_NOTIFICATION);
+ IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+ IWL_CMD(MEASURE_ABORT_NOTIFICATION);
+ IWL_CMD(REPLY_BT_CONFIG);
+ IWL_CMD(REPLY_STATISTICS_CMD);
+ IWL_CMD(STATISTICS_NOTIFICATION);
+ IWL_CMD(REPLY_CARD_STATE_CMD);
+ IWL_CMD(CARD_STATE_NOTIFICATION);
+ IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+ IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+ IWL_CMD(SENSITIVITY_CMD);
+ IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+ IWL_CMD(REPLY_RX_PHY_CMD);
+ IWL_CMD(REPLY_RX_MPDU_CMD);
+ IWL_CMD(REPLY_RX);
+ IWL_CMD(REPLY_COMPRESSED_BA);
+ IWL_CMD(CALIBRATION_CFG_CMD);
+ IWL_CMD(CALIBRATION_RES_NOTIFICATION);
+ IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
+ IWL_CMD(REPLY_TX_POWER_DBM_CMD);
+ IWL_CMD(TEMPERATURE_NOTIFICATION);
+ IWL_CMD(TX_ANT_CONFIGURATION_CMD);
+ IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
+ IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
+ IWL_CMD(REPLY_BT_COEX_PROT_ENV);
+ IWL_CMD(REPLY_WIPAN_PARAMS);
+ IWL_CMD(REPLY_WIPAN_RXON);
+ IWL_CMD(REPLY_WIPAN_RXON_TIMING);
+ IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
+ IWL_CMD(REPLY_WIPAN_QOS_PARAM);
+ IWL_CMD(REPLY_WIPAN_WEPKEY);
+ IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
+ IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+ IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
+ IWL_CMD(REPLY_WOWLAN_PATTERNS);
+ IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
+ IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
+ IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
+ IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
+ IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+
+static int iwlagn_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+ return 0;
+}
+
+static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+ /*
+ * MULTI-FIXME
+ * See iwlagn_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ return 0;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_chswitch_done(priv, false);
+ }
+ return 0;
+}
+
+
+static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return 0;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+ return 0;
+}
+
+static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+ return 0;
+}
+
+static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 __maybe_unused len =
+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+ "notification for %s:\n", len,
+ get_cmd_string(pkt->hdr.cmd));
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+ return 0;
+}
+
+static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
+ u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ status & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+ return 0;
+}
+
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is low and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+static bool iwlagn_good_ack_health(struct iwl_priv *priv,
+ struct statistics_tx *cur)
+{
+ int actual_delta, expected_delta, ba_timeout_delta;
+ struct statistics_tx *old;
+
+ if (priv->agg_tids_count)
+ return true;
+
+ old = &priv->statistics.tx;
+
+ actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
+ le32_to_cpu(old->actual_ack_cnt);
+ expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
+ le32_to_cpu(old->expected_ack_cnt);
+
+ /* Values should not be negative, but we do not trust the firmware */
+ if (actual_delta <= 0 || expected_delta <= 0)
+ return true;
+
+ ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
+ le32_to_cpu(old->agg.ba_timeout);
+
+ if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
+ ba_timeout_delta > BA_TIMEOUT_CNT) {
+ IWL_DEBUG_RADIO(priv,
+ "deltas: actual %d expected %d ba_timeout %d\n",
+ actual_delta, expected_delta, ba_timeout_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /*
+ * This is ifdef'ed on DEBUGFS because otherwise the
+ * statistics aren't available. If DEBUGFS is set but
+ * DEBUG is not, these will just compile out.
+ */
+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
+ priv->delta_stats.tx.rx_detected_cnt);
+ IWL_DEBUG_RADIO(priv,
+ "ack_or_ba_timeout_collision delta %d\n",
+ priv->delta_stats.tx.ack_or_ba_timeout_collision);
+#endif
+
+ if (ba_timeout_delta >= BA_TIMEOUT_MAX)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
+ struct statistics_rx_phy *cur_ofdm,
+ struct statistics_rx_ht_phy *cur_ofdm_ht,
+ unsigned int msecs)
+{
+ int delta;
+ int threshold = priv->cfg->base_params->plcp_delta_threshold;
+
+ if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+ IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+ return true;
+ }
+
+ delta = le32_to_cpu(cur_ofdm->plcp_err) -
+ le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
+ le32_to_cpu(cur_ofdm_ht->plcp_err) -
+ le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
+
+ /* Can be negative if firmware reset statistics */
+ if (delta <= 0)
+ return true;
+
+ if ((delta * 100 / msecs) > threshold) {
+ IWL_DEBUG_RADIO(priv,
+ "plcp health threshold %u delta %d msecs %u\n",
+ threshold, delta, msecs);
+ return false;
+ }
+
+ return true;
+}
+
+static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
+ struct statistics_rx_phy *cur_ofdm,
+ struct statistics_rx_ht_phy *cur_ofdm_ht,
+ struct statistics_tx *tx,
+ unsigned long stamp)
+{
+ unsigned int msecs;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ return;
+
+ msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
+
+ /* Only gather statistics and update time stamp when not associated */
+ if (!iwl_is_any_associated(priv))
+ return;
+
+ /* Do not check/recover when do not have enough statistics data */
+ if (msecs < 99)
+ return;
+
+ if (iwlagn_mod_params.ack_check && !iwlagn_good_ack_health(priv, tx)) {
+ IWL_ERR(priv, "low ack count detected, restart firmware\n");
+ if (!iwl_force_reset(priv, IWL_FW_RESET, false))
+ return;
+ }
+
+ if (iwlagn_mod_params.plcp_check &&
+ !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
+ iwl_force_reset(priv, IWL_RF_RESET, false);
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void iwlagn_rx_calc_noise(struct iwl_priv *priv)
+{
+ struct statistics_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ rx_info = &priv->statistics.rx_non_phy;
+
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+ IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+ bcn_silence_a, bcn_silence_b, bcn_silence_c,
+ last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
+ __le32 *max_delta, __le32 *accum, int size)
+{
+ int i;
+
+ for (i = 0;
+ i < size / sizeof(__le32);
+ i++, prev++, cur++, delta++, max_delta++, accum++) {
+ if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
+ *delta = cpu_to_le32(
+ le32_to_cpu(*cur) - le32_to_cpu(*prev));
+ le32_add_cpu(accum, le32_to_cpu(*delta));
+ if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
+ *max_delta = *delta;
+ }
+ }
+}
+
+static void
+iwlagn_accumulative_statistics(struct iwl_priv *priv,
+ struct statistics_general_common *common,
+ struct statistics_rx_non_phy *rx_non_phy,
+ struct statistics_rx_phy *rx_ofdm,
+ struct statistics_rx_ht_phy *rx_ofdm_ht,
+ struct statistics_rx_phy *rx_cck,
+ struct statistics_tx *tx,
+ struct statistics_bt_activity *bt_activity)
+{
+#define ACCUM(_name) \
+ accum_stats((__le32 *)&priv->statistics._name, \
+ (__le32 *)_name, \
+ (__le32 *)&priv->delta_stats._name, \
+ (__le32 *)&priv->max_delta_stats._name, \
+ (__le32 *)&priv->accum_stats._name, \
+ sizeof(*_name));
+
+ ACCUM(common);
+ ACCUM(rx_non_phy);
+ ACCUM(rx_ofdm);
+ ACCUM(rx_ofdm_ht);
+ ACCUM(rx_cck);
+ ACCUM(tx);
+ if (bt_activity)
+ ACCUM(bt_activity);
+#undef ACCUM
+}
+#else
+static inline void
+iwlagn_accumulative_statistics(struct iwl_priv *priv,
+ struct statistics_general_common *common,
+ struct statistics_rx_non_phy *rx_non_phy,
+ struct statistics_rx_phy *rx_ofdm,
+ struct statistics_rx_ht_phy *rx_ofdm_ht,
+ struct statistics_rx_phy *rx_cck,
+ struct statistics_tx *tx,
+ struct statistics_bt_activity *bt_activity)
+{
+}
+#endif
+
+static int iwlagn_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ unsigned long stamp = jiffies;
+ const int reg_recalib_period = 60;
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ __le32 *flag;
+ struct statistics_general_common *common;
+ struct statistics_rx_non_phy *rx_non_phy;
+ struct statistics_rx_phy *rx_ofdm;
+ struct statistics_rx_ht_phy *rx_ofdm_ht;
+ struct statistics_rx_phy *rx_cck;
+ struct statistics_tx *tx;
+ struct statistics_bt_activity *bt_activity;
+
+ len -= sizeof(struct iwl_cmd_header); /* skip header */
+
+ IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
+ len);
+
+ if (len == sizeof(struct iwl_bt_notif_statistics)) {
+ struct iwl_bt_notif_statistics *stats;
+ stats = &pkt->u.stats_bt;
+ flag = &stats->flag;
+ common = &stats->general.common;
+ rx_non_phy = &stats->rx.general.common;
+ rx_ofdm = &stats->rx.ofdm;
+ rx_ofdm_ht = &stats->rx.ofdm_ht;
+ rx_cck = &stats->rx.cck;
+ tx = &stats->tx;
+ bt_activity = &stats->general.activity;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* handle this exception directly */
+ priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
+ le32_add_cpu(&priv->statistics.accum_num_bt_kills,
+ le32_to_cpu(stats->rx.general.num_bt_kills));
+#endif
+ } else if (len == sizeof(struct iwl_notif_statistics)) {
+ struct iwl_notif_statistics *stats;
+ stats = &pkt->u.stats;
+ flag = &stats->flag;
+ common = &stats->general.common;
+ rx_non_phy = &stats->rx.general;
+ rx_ofdm = &stats->rx.ofdm;
+ rx_ofdm_ht = &stats->rx.ofdm_ht;
+ rx_cck = &stats->rx.cck;
+ tx = &stats->tx;
+ bt_activity = NULL;
+ } else {
+ WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
+ len, sizeof(struct iwl_bt_notif_statistics),
+ sizeof(struct iwl_notif_statistics));
+ return 0;
+ }
+
+ change = common->temperature != priv->statistics.common.temperature ||
+ (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
+
+ iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
+ rx_ofdm_ht, rx_cck, tx, bt_activity);
+
+ iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
+
+ priv->statistics.flag = *flag;
+ memcpy(&priv->statistics.common, common, sizeof(*common));
+ memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
+ memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
+ memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
+ memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
+ memcpy(&priv->statistics.tx, tx, sizeof(*tx));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (bt_activity)
+ memcpy(&priv->statistics.bt_activity, bt_activity,
+ sizeof(*bt_activity));
+#endif
+
+ priv->rx_statistics_jiffies = stamp;
+
+ set_bit(STATUS_STATISTICS, &priv->shrd->status);
+
+ /* Reschedule the statistics timer to occur in
+ * reg_recalib_period seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&priv->statistics_periodic, jiffies +
+ msecs_to_jiffies(reg_recalib_period * 1000));
+
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->shrd->status)) &&
+ (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+ iwlagn_rx_calc_noise(priv);
+ queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
+ }
+ if (priv->cfg->lib->temperature && change)
+ priv->cfg->lib->temperature(priv);
+ return 0;
+}
+
+static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memset(&priv->accum_stats, 0,
+ sizeof(priv->accum_stats));
+ memset(&priv->delta_stats, 0,
+ sizeof(priv->delta_stats));
+ memset(&priv->max_delta_stats, 0,
+ sizeof(priv->max_delta_stats));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwlagn_rx_statistics(priv, rxb, cmd);
+ return 0;
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = priv->shrd->status;
+
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+ CT_CARD_DISABLED)) {
+
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ if (flags & CT_CARD_DISABLED)
+ iwl_tt_enter_ct_kill(priv);
+ }
+ if (!(flags & CT_CARD_DISABLED))
+ iwl_tt_exit_ct_kill(priv);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+
+
+ if (!(flags & RXON_CARD_DISABLED))
+ iwl_scan_cancel(priv);
+
+ if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)))
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
+ else
+ wake_up(&priv->shrd->wait_command_queue);
+ return 0;
+}
+
+static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
+ IWL_DEBUG_CALIB(priv,
+ "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(STATUS_SCANNING, &priv->shrd->status))
+ iwl_init_sensitivity(priv);
+ }
+ return 0;
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ priv->last_phy_res_valid = true;
+ memcpy(&priv->last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
+ return 0;
+}
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
+ RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ IWL_DEBUG_RX(priv, "Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+ struct iwl_rxon_context *ctx;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!iwlagn_mod_params.sw_crypto &&
+ iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_update_stats(priv, false, fc, len);
+
+ /*
+ * Wake any queues that were stopped due to a passive channel tx
+ * failure. This can happen because the regulatory enforcement in
+ * the device waits for a beacon before allowing transmission,
+ * sometimes even after already having transmitted frames for the
+ * association because the new RXON may reset the information.
+ */
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ for_each_context(priv, ctx) {
+ if (!ctx->last_tx_rejected)
+ continue;
+ if (compare_ether_addr(hdr->addr3,
+ ctx->active.bssid_addr))
+ continue;
+ ctx->last_tx_rejected = false;
+ iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
+ }
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ rxb->page = NULL;
+}
+
+static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host
+ */
+ struct iwlagn_non_cfg_phy *ncphy =
+ (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+ u8 agc;
+
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
+ agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
+ rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
+ IWLAGN_OFDM_RSSI_A_BIT_POS;
+ rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
+ IWLAGN_OFDM_RSSI_B_BIT_POS;
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
+ rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
+ IWLAGN_OFDM_RSSI_C_BIT_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return 0;
+ }
+ phy_res = &priv->last_phy_res;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwlagn_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return 0;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return 0;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.rate_idx =
+ iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
+
+ iwl_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+ return 0;
+}
+
+/**
+ * iwl_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ */
+void iwl_setup_rx_handlers(struct iwl_priv *priv)
+{
+ int (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+ handlers = priv->rx_handlers;
+
+ handlers[REPLY_ERROR] = iwlagn_rx_reply_error;
+ handlers[CHANNEL_SWITCH_NOTIFICATION] = iwlagn_rx_csa;
+ handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwlagn_rx_spectrum_measure_notif;
+ handlers[PM_SLEEP_NOTIFICATION] = iwlagn_rx_pm_sleep_notif;
+ handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+ iwlagn_rx_pm_debug_statistics_notif;
+ handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
+ handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * statistics request from the host as well as for the periodic
+ * statistics notifications (after received beacons) from the uCode.
+ */
+ handlers[REPLY_STATISTICS_CMD] = iwlagn_rx_reply_statistics;
+ handlers[STATISTICS_NOTIFICATION] = iwlagn_rx_statistics;
+
+ iwl_setup_rx_scan_handlers(priv);
+
+ handlers[CARD_STATE_NOTIFICATION] = iwlagn_rx_card_state_notif;
+ handlers[MISSED_BEACONS_NOTIFICATION] =
+ iwlagn_rx_missed_beacon_notif;
+
+ /* Rx handlers */
+ handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
+ handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
+
+ /* block ack */
+ handlers[REPLY_COMPRESSED_BA] =
+ iwlagn_rx_reply_compressed_ba;
+
+ /* init calibration handlers */
+ priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
+ iwlagn_rx_calib_result;
+ priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+ /* set up notification wait support */
+ spin_lock_init(&priv->notif_wait_lock);
+ INIT_LIST_HEAD(&priv->notif_waits);
+ init_waitqueue_head(&priv->notif_waitq);
+
+ /* Set up BT Rx handlers */
+ if (priv->cfg->lib->bt_rx_handler_setup)
+ priv->cfg->lib->bt_rx_handler_setup(priv);
+
+}
+
+int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int err = 0;
+
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ if (!list_empty(&priv->notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(&priv->notif_wait_lock);
+ list_for_each_entry(w, &priv->notif_waits, list) {
+ if (w->cmd != pkt->hdr.cmd)
+ continue;
+ IWL_DEBUG_RX(priv,
+ "Notif: %s, 0x%02x - wake the callers up\n",
+ get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
+ w->triggered = true;
+ if (w->fn)
+ w->fn(priv, pkt, w->fn_data);
+ }
+ spin_unlock(&priv->notif_wait_lock);
+
+ wake_up_all(&priv->notif_waitq);
+ }
+
+ if (priv->pre_rx_handler)
+ priv->pre_rx_handler(priv, rxb);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * rx_handlers table. See iwl_setup_rx_handlers() */
+ if (priv->rx_handlers[pkt->hdr.cmd]) {
+ priv->rx_handlers_stats[pkt->hdr.cmd]++;
+ err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+ } else {
+ /* No handling needed */
+ IWL_DEBUG_RX(priv,
+ "No handler needed for %s, 0x%02x\n",
+ get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+ return err;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index d42ef1763a7..58a381c01c8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -26,11 +26,10 @@
#include "iwl-dev.h"
#include "iwl-agn.h"
-#include "iwl-sta.h"
#include "iwl-core.h"
#include "iwl-agn-calib.h"
-#include "iwl-helpers.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
static int iwlagn_disable_bss(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -40,7 +39,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -66,7 +65,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -92,7 +91,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
sizeof(*send), send);
send->filter_flags = old_filter;
@@ -121,7 +120,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -131,7 +130,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
static int iwlagn_update_beacon(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
@@ -180,7 +179,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
@@ -266,7 +265,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -295,8 +294,8 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
return ret;
}
- if ((ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) &&
- priv->cfg->ht_params->smps_mode)
+ if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
+ priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
ieee80211_request_smps(ctx->vif,
priv->cfg->ht_params->smps_mode);
@@ -310,12 +309,12 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
int slot0 = 300, slot1 = 0;
int ret;
- if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
+ if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS))
return 0;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
@@ -337,10 +336,10 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (priv->hw_roc_channel) {
+ if (priv->hw_roc_setup) {
/* both contexts must be used for this to happen */
- slot1 = priv->hw_roc_duration;
- slot0 = IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
+ slot0 = 3000;
} else if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
@@ -362,14 +361,14 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
- if (test_bit(STATUS_SCAN_HW, &priv->status) ||
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
} else if (!ctx_pan->vif->bss_conf.idle &&
!ctx_pan->vif->bss_conf.assoc) {
- slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot0 = IWL_MIN_SLOT_TIME;
}
} else if (ctx_pan->vif) {
@@ -378,7 +377,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
ctx_pan->beacon_int;
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
}
@@ -387,7 +386,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -420,12 +419,12 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EINVAL;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EBUSY;
/* This function hardcodes a bunch of dual-mode assumptions */
@@ -434,26 +433,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (!ctx->is_active)
return 0;
+ /* override BSSID if necessary due to preauth */
+ if (ctx->preauth_bssid)
+ memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
+
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
- if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) {
- struct ieee80211_channel *chan = priv->hw_roc_channel;
-
- iwl_set_rxon_channel(priv, chan, ctx);
- iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
- ctx->staging.filter_flags |=
- RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- new_assoc = true;
-
- if (memcmp(&ctx->staging, &ctx->active,
- sizeof(ctx->staging)) == 0)
- return 0;
- }
-
/*
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
@@ -468,7 +454,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- iwl_print_rx_config_cmd(priv, ctx);
+ iwl_print_rx_config_cmd(priv, ctx->ctxid);
ret = iwl_check_rxon_cmd(priv, ctx);
if (ret) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
@@ -479,7 +465,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_channel));
@@ -551,16 +537,16 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
const struct iwl_channel_info *ch_info;
int ret = 0;
- IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
+ IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
}
- if (!iwl_is_ready(priv)) {
+ if (!iwl_is_ready(priv->shrd)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
@@ -592,7 +578,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
for_each_context(priv, ctx) {
/* Configure HT40 channels */
@@ -636,7 +622,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->vif);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
iwl_update_bcast_stations(priv);
@@ -668,7 +654,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
iwlagn_commit_rxon(priv, ctx);
}
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
return ret;
}
@@ -683,7 +671,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -787,7 +775,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr,
priv->phy_calib_chain_noise_reset_cmd);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_PHY_CALIBRATION_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
@@ -808,17 +796,17 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
int ret;
bool force = false;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (unlikely(!iwl_is_ready(priv))) {
+ if (unlikely(!iwl_is_ready(priv->shrd))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return;
}
if (unlikely(!ctx->vif)) {
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return;
}
@@ -851,7 +839,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
*/
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
- iwl_wake_any_queue(priv, ctx);
+ iwl_trans_wake_any_queue(trans(priv),
+ ctx->ctxid);
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -912,6 +901,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (!priv->disable_chain_noise_cal)
iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
+ WARN_ON(ctx->preauth_bssid);
}
if (changes & BSS_CHANGED_IBSS) {
@@ -929,7 +919,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
IWL_ERR(priv, "Error sending IBSS beacon\n");
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
void iwlagn_post_scan(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 37e624095e4..ed628362393 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -31,25 +31,833 @@
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-static struct iwl_link_quality_cmd *
-iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
+/* priv->shrd->sta_lock must be held */
+static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
+ "addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode "
+ "(according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ return ret;
+ }
+
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+ iwl_sta_ucode_activate(priv, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack "
+ "resource.\n", sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
+ break;
+ }
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ return ret;
+}
+
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *) cmd->payload;
+
+ return iwl_process_add_sta_resp(priv, addsta, pkt);
+}
+
+static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+{
+ u16 size = (u16)sizeof(struct iwl_addsta_cmd);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
+ return size;
+}
+
+int iwl_send_add_sta(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *sta, u8 flags)
+{
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ADD_STA,
+ .flags = flags,
+ .data = { data, },
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
+
+ if (!(flags & CMD_ASYNC)) {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+ /*else the command was successfully sent in SYNC mode, need to free
+ * the reply page */
+
+ iwl_free_pages(priv->shrd, cmd.reply_page);
+
+ if (cmd.handler_status)
+ IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
+ cmd.handler_status);
+
+ return cmd.handler_status;
+}
+
+static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ "dynamic" : "disabled");
+
+ sta_flags = priv->stations[index].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ priv->stations[index].sta.station_flags = sta_flags;
+ done:
+ return;
+}
+
+/**
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct iwl_station_entry *station;
+ int i;
+ u8 sta_id = IWL_INVALID_STATION;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
+ if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!priv->stations[i].used &&
+ sta_id == IWL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+ "added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+ "adding again.\n", sta_id, addr);
+ return sta_id;
+ }
+
+ station = &priv->stations[sta_id];
+ station->used = IWL_STA_DRIVER_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+ sta_id, addr);
+ priv->num_stations++;
+
+ /* Set up the REPLY_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct iwl_station_priv *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_set_ht_add_station(priv, sta_id, sta, ctx);
+
+ return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+ "added.\n", sta_id);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+ "adding again.\n", sta_id, addr);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_addsta_cmd));
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[sta_id].sta.sta.addr);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->shrd->sta_lock must be held
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+ IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_send_remove_station(struct iwl_priv *priv,
+ const u8 *addr, int sta_id,
+ bool temporary)
+{
+ struct iwl_rx_packet *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct iwl_rem_sta_cmd rm_sta_cmd;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_REMOVE_STA,
+ .len = { sizeof(struct iwl_rem_sta_cmd), },
+ .flags = CMD_SYNC,
+ .data = { &rm_sta_cmd, },
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&priv->shrd->sta_lock,
+ flags_spin);
+ iwl_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+ }
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
+ }
+ }
+ iwl_free_pages(priv->shrd, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * iwl_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ unsigned long flags;
+
+ if (!iwl_is_ready(priv->shrd)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ if (WARN_ON(priv->num_stations < 0))
+ priv->num_stations = 0;
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ return iwl_send_remove_station(priv, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return -EINVAL;
+}
+
+/**
+ * iwl_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv,
+ "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv,
+ "No active stations found to be cleared\n");
+}
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!iwl_is_ready(priv->shrd)) {
+ IWL_DEBUG_INFO(priv,
+ "Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &priv->stations[i].sta,
+ sizeof(struct iwl_addsta_cmd));
+ send_lq = false;
+ if (priv->stations[i].lq) {
+ if (priv->shrd->wowlan)
+ iwl_sta_fill_lq(priv, ctx, i, &lq);
+ else
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->shrd->sta_lock,
+ flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].used &=
+ ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ iwl_send_lq_cmd(priv, ctx, &lq,
+ CMD_SYNC, true);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+ "no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+ "complete.\n");
+}
+
+void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ unsigned long flags;
+ int sta_id = ctx->ap_sta_id;
+ int ret;
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ bool active;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return;
+ }
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+ sta_cmd.mode = 0;
+ memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+
+ active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ if (active) {
+ ret = iwl_send_remove_station(
+ priv, priv->stations[sta_id].sta.sta.addr,
+ sta_id, true);
+ if (ret)
+ IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
+ priv->stations[sta_id].sta.sta.addr, ret);
+ }
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret)
+ IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
+ priv->stations[sta_id].sta.sta.addr, ret);
+ iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+}
+
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &priv->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ if (WARN_ON(priv->num_stations < 0))
+ priv->num_stations = 0;
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static void iwl_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+ IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+ IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+ lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+ i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+ RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = { sizeof(struct iwl_link_quality_cmd), },
+ .flags = flags,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+
+ iwl_dump_lq_cmd(priv, lq);
+ if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+ return -EINVAL;
+
+ if (is_lq_table_valid(priv, ctx, lq))
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete, "
+ "clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ }
+ return ret;
+}
+
+int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+ "station %pM\n", sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
{
int i, r;
- struct iwl_link_quality_cmd *link_cmd;
u32 rate_flags = 0;
__le32 rate_n_flags;
- link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
- if (!link_cmd) {
- IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
- return NULL;
- }
+ lockdep_assert_held(&priv->shrd->mutex);
- lockdep_assert_held(&priv->mutex);
+ memset(link_cmd, 0, sizeof(*link_cmd));
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
@@ -63,30 +871,46 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant &
+ ~first_antenna(hw_params(priv).valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ hw_params(priv).valid_tx_ant;
}
- link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_dis_start_th =
+ LINK_QUAL_AGG_DISABLE_START_DEF;
link_cmd->agg_params.agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
link_cmd->sta_id = sta_id;
+}
+
+static struct iwl_link_quality_cmd *
+iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+
+ iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd);
return link_cmd;
}
@@ -96,7 +920,8 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
*
* Function sleeps.
*/
-int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+int iwlagn_add_bssid_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
const u8 *addr, u8 *sta_id_r)
{
int ret;
@@ -116,14 +941,15 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
if (sta_id_r)
*sta_id_r = sta_id;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
/* Set up default rate scaling table in device's station table */
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
- IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for station %pM.\n",
addr);
return -ENOMEM;
}
@@ -132,9 +958,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -189,7 +1015,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
cmd.len[0] = cmd_size;
if (not_empty || send_if_empty)
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
else
return 0;
}
@@ -197,7 +1023,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
return iwl_send_static_wepkey_cmd(priv, ctx, false);
}
@@ -208,14 +1034,15 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+ if (iwl_is_rfkill(priv->shrd)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
}
@@ -232,11 +1059,12 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
- IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
+ IWL_DEBUG_WEP(priv,
+ "Bad WEP key length %d\n", keyconf->keylen);
return -EINVAL;
}
@@ -311,9 +1139,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
struct iwl_addsta_cmd sta_cmd;
int i;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
@@ -388,16 +1216,16 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
if (sta_id == IWL_INVALID_STATION)
return -ENOENT;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
sta_id = IWL_INVALID_STATION;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
if (sta_id == IWL_INVALID_STATION)
return 0;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
ctx->key_mapping_keys--;
@@ -430,7 +1258,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
@@ -493,18 +1321,18 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
unsigned long flags;
u8 sta_id;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return -EINVAL;
}
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
@@ -513,9 +1341,9 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
return -ENOMEM;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -539,13 +1367,13 @@ int iwl_update_bcast_station(struct iwl_priv *priv,
return -ENOMEM;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (priv->stations[sta_id].lq)
kfree(priv->stations[sta_id].lq);
else
IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -572,15 +1400,15 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -592,20 +1420,20 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -617,7 +1445,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -625,13 +1453,13 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return -ENXIO;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -640,14 +1468,14 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask = 0;
priv->stations[sta_id].sta.sleep_tx_count = 0;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
@@ -655,7 +1483,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask =
@@ -663,7 +1491,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
@@ -676,6 +1504,8 @@ void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
switch (cmd) {
case STA_NOTIFY_SLEEP:
WARN_ON(!sta_priv->client);
@@ -695,4 +1525,5 @@ void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
default:
break;
}
+ IWL_DEBUG_MAC80211(priv, "leave\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index f501d742984..c27180a7335 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -114,9 +114,6 @@ static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
bool within_margin = false;
- if (priv->cfg->base_params->temperature_kelvin)
- temp = KELVIN_TO_CELSIUS(priv->temperature);
-
if (!priv->thermal_throttle.advanced_tt)
within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
CT_KILL_THRESHOLD_LEGACY) ? true : false;
@@ -176,24 +173,24 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (tt->state == IWL_TI_CT_KILL) {
if (priv->thermal_throttle.ct_kill_toggle) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = false;
} else {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = true;
}
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus(priv)))
+ iwl_release_nic_access(bus(priv));
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -227,7 +224,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
struct iwl_priv *priv = (struct iwl_priv *)data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* temperature timer expired, ready to go into CT_KILL state */
@@ -235,7 +232,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
"temperature timer expired\n");
tt->state = IWL_TI_CT_KILL;
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
}
}
@@ -313,23 +310,24 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
tt->tt_power_mode = IWL_POWER_INDEX_5;
break;
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->status);
+ clear_bit(STATUS_CT_KILL, &priv->shrd->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
* try again during next temperature read
*/
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
tt->state = old_state;
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
} else {
if (tt->state == IWL_TI_CT_KILL) {
if (force) {
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL,
+ &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -343,7 +341,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
tt->tt_power_mode);
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
@@ -453,9 +451,9 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
* in case get disabled before */
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->status);
+ clear_bit(STATUS_CT_KILL, &priv->shrd->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
@@ -464,7 +462,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
tt->state = old_state;
} else {
IWL_DEBUG_TEMP(priv,
@@ -475,7 +473,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
if (force) {
IWL_DEBUG_TEMP(priv,
"Enter IWL_TI_CT_KILL\n");
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL,
+ &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -487,7 +486,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
iwl_perform_ct_kill_task(priv, false);
}
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
@@ -506,10 +505,10 @@ static void iwl_bg_ct_enter(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (!iwl_is_ready(priv))
+ if (!iwl_is_ready(priv->shrd))
return;
if (tt->state != IWL_TI_CT_KILL) {
@@ -535,10 +534,10 @@ static void iwl_bg_ct_exit(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (!iwl_is_ready(priv))
+ if (!iwl_is_ready(priv->shrd))
return;
/* stop ct_kill_exit_tm timer */
@@ -565,20 +564,20 @@ static void iwl_bg_ct_exit(struct work_struct *work)
void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
- queue_work(priv->workqueue, &priv->ct_enter);
+ queue_work(priv->shrd->workqueue, &priv->ct_enter);
}
void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
- queue_work(priv->workqueue, &priv->ct_exit);
+ queue_work(priv->shrd->workqueue, &priv->ct_exit);
}
static void iwl_bg_tt_work(struct work_struct *work)
@@ -586,12 +585,9 @@ static void iwl_bg_tt_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (priv->cfg->base_params->temperature_kelvin)
- temp = KELVIN_TO_CELSIUS(priv->temperature);
-
if (!priv->thermal_throttle.advanced_tt)
iwl_legacy_tt_handler(priv, temp, false);
else
@@ -600,11 +596,11 @@ static void iwl_bg_tt_work(struct work_struct *work)
void iwl_tt_handler(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
- queue_work(priv->workqueue, &priv->tt_work);
+ queue_work(priv->shrd->workqueue, &priv->tt_work);
}
/* Thermal throttling initialization
@@ -639,11 +635,13 @@ void iwl_tt_initialize(struct iwl_priv *priv)
if (priv->cfg->base_params->adv_thermal_throttle) {
IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
- tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
- IWL_TI_STATE_MAX, GFP_KERNEL);
- tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
- IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
- GFP_KERNEL);
+ tt->restriction = kcalloc(IWL_TI_STATE_MAX,
+ sizeof(struct iwl_tt_restriction),
+ GFP_KERNEL);
+ tt->transaction = kcalloc(IWL_TI_STATE_MAX *
+ (IWL_TI_STATE_MAX - 1),
+ sizeof(struct iwl_tt_trans),
+ GFP_KERNEL);
if (!tt->restriction || !tt->transaction) {
IWL_ERR(priv, "Fallback to Legacy Throttling\n");
priv->thermal_throttle.advanced_tt = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
index d118ed29bf3..7282a23e8f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -117,7 +117,6 @@ struct iwl_tt_mgmt {
u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
bool iwl_ht_enabled(struct iwl_priv *priv);
-bool iwl_check_for_ct_kill(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 53bb59ee719..35a6b71f358 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -31,89 +31,15 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/ieee80211.h>
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
- int tid)
-{
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
- IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- /* Modify device's station table to Tx this TID */
- return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-}
-
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags)
@@ -128,11 +54,10 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
* handle build REPLY_TX command notification.
*/
static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
+ struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 sta_id)
{
__le16 fc = hdr->frame_control;
__le32 tx_flags = tx_cmd->tx_flags;
@@ -157,7 +82,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_IGNORE_BT;
- tx_cmd->sta_id = std_id;
+ tx_cmd->sta_id = sta_id;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
@@ -186,12 +111,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->next_frame_len = 0;
}
-#define RTS_DFAULT_RETRY_LIMIT 60
-
static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc)
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
{
u32 rate_flags;
int rate_idx;
@@ -199,17 +122,25 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
u8 data_retry_limit;
u8 rate_plcp;
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
+ if (priv->shrd->wowlan) {
+ rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
+ data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
+ } else {
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc)) {
+ data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
+ rts_retry_limit =
+ min(data_retry_limit, rts_retry_limit);
+ } else if (ieee80211_is_back_req(fc))
+ data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
+ else
+ data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
+ }
- /* Set retry limit on RTS packets */
- rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
+ tx_cmd->data_retry_limit = data_retry_limit;
tx_cmd->rts_retry_limit = rts_retry_limit;
/* DATA packets will use the uCode station table for rate/antenna
@@ -261,10 +192,10 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- first_antenna(priv->hw_params.valid_tx_ant));
+ first_antenna(hw_params(priv).valid_tx_ant));
} else
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
@@ -322,30 +253,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_device_cmd *dev_cmd = NULL;
struct iwl_tx_cmd *tx_cmd;
- int txq_id;
- u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
u16 len;
u8 sta_id;
- u8 tid = 0;
unsigned long flags;
bool is_agg = false;
- /*
- * If the frame needs to go out off-channel, then
- * we'll have put the PAN context to that channel,
- * so make the frame go out there.
- */
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- else if (info->control.vif)
+ if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+ if (iwl_is_rfkill(priv->shrd)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock_priv;
}
@@ -382,7 +304,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
sta_priv = (void *)info->control.sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
@@ -395,52 +317,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ is_agg = true;
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
+ /* irqs already disabled/saved above when locking priv->shrd->lock */
+ spin_lock(&priv->shrd->sta_lock);
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = NULL;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
- goto drop_unlock_sta;
-
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
+ dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);
- tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
- if (unlikely(!tx_cmd))
+ if (unlikely(!dev_cmd))
goto drop_unlock_sta;
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
+ memset(dev_cmd, 0, sizeof(*dev_cmd));
+ tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
/* Total # bytes to be transmitted */
len = (u16)skb->len;
@@ -457,17 +346,16 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_update_stats(priv, true, fc, len);
- if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
- goto drop_unlock_sta;
+ memset(&info->status, 0, sizeof(info->status));
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
+ info->driver_data[0] = ctx;
+ info->driver_data[1] = dev_cmd;
+
+ if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+ goto drop_unlock_sta;
- spin_unlock(&priv->sta_lock);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
/*
* Avoid atomic ops if it isn't an associated client.
@@ -482,41 +370,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
return 0;
drop_unlock_sta:
- spin_unlock(&priv->sta_lock);
+ if (dev_cmd)
+ kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
+ spin_unlock(&priv->shrd->sta_lock);
drop_unlock_priv:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return -1;
}
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
int sta_id;
- int tx_fifo;
- int txq_id;
int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
sta->addr, tid);
@@ -526,58 +393,29 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
}
- if (unlikely(tid >= MAX_TID_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
- txq_id = iwlagn_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- tid_data->agg.tx_fifo = tx_fifo;
- iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
+ ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
if (ret)
return ret;
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
+ tid, ssn);
+
return ret;
}
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
+ int sta_id;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
sta_id = iwl_sta_id(sta);
@@ -586,101 +424,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
return -ENXIO;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&priv->sta_lock);
- spin_lock(&priv->lock);
-
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
- return 0;
-}
-
-int iwlagn_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
- struct iwl_rxon_context *ctx;
-
- ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
- lockdep_assert_held(&priv->sta_lock);
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = get_fifo_from_tid(ctx, tid);
- IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
- trans_txq_agg_disable(&priv->trans, txq_id,
- ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- }
-
- return 0;
+ return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
+ sta_id, tid);
}
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -702,147 +447,394 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
rcu_read_unlock();
}
-static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
- bool is_agg)
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_get_tx_fail_reason(u32 status)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
- if (!is_agg)
- iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
+ return "UNKNOWN";
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
}
+#endif /* CONFIG_IWLWIFI_DEBUG */
-int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
+ status &= AGG_TX_STATUS_MSK;
- if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
- "index %d is out of range [0-%d] %d %d.\n", __func__,
- txq_id, index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
+ switch (status) {
+ case AGG_TX_STATE_UNDERRUN_MSK:
+ priv->reply_agg_tx_stats.underrun++;
+ break;
+ case AGG_TX_STATE_BT_PRIO_MSK:
+ priv->reply_agg_tx_stats.bt_prio++;
+ break;
+ case AGG_TX_STATE_FEW_BYTES_MSK:
+ priv->reply_agg_tx_stats.few_bytes++;
+ break;
+ case AGG_TX_STATE_ABORT_MSK:
+ priv->reply_agg_tx_stats.abort++;
+ break;
+ case AGG_TX_STATE_LAST_SENT_TTL_MSK:
+ priv->reply_agg_tx_stats.last_sent_ttl++;
+ break;
+ case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
+ priv->reply_agg_tx_stats.last_sent_try++;
+ break;
+ case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
+ priv->reply_agg_tx_stats.last_sent_bt_kill++;
+ break;
+ case AGG_TX_STATE_SCD_QUERY_MSK:
+ priv->reply_agg_tx_stats.scd_query++;
+ break;
+ case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
+ priv->reply_agg_tx_stats.bad_crc32++;
+ break;
+ case AGG_TX_STATE_RESPONSE_MSK:
+ priv->reply_agg_tx_stats.response++;
+ break;
+ case AGG_TX_STATE_DUMP_TX_MSK:
+ priv->reply_agg_tx_stats.dump_tx++;
+ break;
+ case AGG_TX_STATE_DELAY_TX_MSK:
+ priv->reply_agg_tx_stats.delay_tx++;
+ break;
+ default:
+ priv->reply_agg_tx_stats.unknown++;
+ break;
}
+}
+
+static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
+ struct iwlagn_tx_resp *tx_resp)
+{
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
+ IWLAGN_TX_RES_TID_POS;
+ int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
+ IWLAGN_TX_RES_RA_POS;
+ struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ int i;
+
+ if (agg->wait_for_ba)
+ IWL_DEBUG_TX_REPLY(priv,
+ "got tx response w/o block-ack\n");
- for (index = iwl_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ agg->wait_for_ba = (tx_resp->frame_count > 1);
- tx_info = &txq->txb[txq->q.read_ptr];
+ /*
+ * If the BT kill count is non-zero, we'll get this
+ * notification again.
+ */
+ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
+ priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
+ }
- if (WARN_ON_ONCE(tx_info->skb == NULL))
- continue;
+ if (tx_resp->frame_count == 1)
+ return;
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
+ /* Construct bit-map of pending frames within Tx window */
+ for (i = 0; i < tx_resp->frame_count; i++) {
+ u16 fstatus = le16_to_cpu(frame_status[i].status);
- iwlagn_tx_status(priv, tx_info,
- txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
+ if (status & AGG_TX_STATUS_MSK)
+ iwlagn_count_agg_tx_err_status(priv, fstatus);
- iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
+ if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
- iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr);
+ IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
+ "try-count (0x%08x)\n",
+ iwl_get_agg_tx_fail_reason(fstatus),
+ fstatus & AGG_TX_STATUS_MSK,
+ fstatus & AGG_TX_TRY_MSK);
}
- return nfreed;
}
-/**
- * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
+const char *iwl_get_agg_tx_fail_reason(u16 status)
{
- int sh;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- struct ieee80211_tx_info *info;
- u64 bitmap, sent_bitmap;
-
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
+ status &= AGG_TX_STATUS_MSK;
+ switch (status) {
+ case AGG_TX_STATE_TRANSMITTED:
+ return "SUCCESS";
+ AGG_TX_STATE_FAIL(UNDERRUN_MSK);
+ AGG_TX_STATE_FAIL(BT_PRIO_MSK);
+ AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
+ AGG_TX_STATE_FAIL(ABORT_MSK);
+ AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
+ AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
+ AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
+ AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
+ AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
+ AGG_TX_STATE_FAIL(RESPONSE_MSK);
+ AGG_TX_STATE_FAIL(DUMP_TX_MSK);
+ AGG_TX_STATE_FAIL(DELAY_TX_MSK);
}
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0)
- sh += 0x100;
+ return "UNKNOWN";
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
- /*
- * Check for success or failure according to the
- * transmitted bitmap and block-ack bitmap
- */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
- sent_bitmap = bitmap & agg->bitmap;
+static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
- /* Sanity check values reported by uCode */
- if (ba_resp->txed_2_done > ba_resp->txed) {
- IWL_DEBUG_TX_REPLY(priv,
- "bogus sent(%d) and ack(%d) count\n",
- ba_resp->txed, ba_resp->txed_2_done);
- /*
- * set txed_2_done = txed,
- * so it won't impact rate scale
- */
- ba_resp->txed = ba_resp->txed_2_done;
- }
- IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
- ba_resp->txed, ba_resp->txed_2_done);
+static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
+{
+ status &= TX_STATUS_MSK;
- /* Find the first ACKed frame to store the TX status */
- while (sent_bitmap && !(sent_bitmap & 1)) {
- agg->start_idx = (agg->start_idx + 1) & 0xff;
- sent_bitmap >>= 1;
+ switch (status) {
+ case TX_STATUS_POSTPONE_DELAY:
+ priv->reply_tx_stats.pp_delay++;
+ break;
+ case TX_STATUS_POSTPONE_FEW_BYTES:
+ priv->reply_tx_stats.pp_few_bytes++;
+ break;
+ case TX_STATUS_POSTPONE_BT_PRIO:
+ priv->reply_tx_stats.pp_bt_prio++;
+ break;
+ case TX_STATUS_POSTPONE_QUIET_PERIOD:
+ priv->reply_tx_stats.pp_quiet_period++;
+ break;
+ case TX_STATUS_POSTPONE_CALC_TTAK:
+ priv->reply_tx_stats.pp_calc_ttak++;
+ break;
+ case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ priv->reply_tx_stats.int_crossed_retry++;
+ break;
+ case TX_STATUS_FAIL_SHORT_LIMIT:
+ priv->reply_tx_stats.short_limit++;
+ break;
+ case TX_STATUS_FAIL_LONG_LIMIT:
+ priv->reply_tx_stats.long_limit++;
+ break;
+ case TX_STATUS_FAIL_FIFO_UNDERRUN:
+ priv->reply_tx_stats.fifo_underrun++;
+ break;
+ case TX_STATUS_FAIL_DRAIN_FLOW:
+ priv->reply_tx_stats.drain_flow++;
+ break;
+ case TX_STATUS_FAIL_RFKILL_FLUSH:
+ priv->reply_tx_stats.rfkill_flush++;
+ break;
+ case TX_STATUS_FAIL_LIFE_EXPIRE:
+ priv->reply_tx_stats.life_expire++;
+ break;
+ case TX_STATUS_FAIL_DEST_PS:
+ priv->reply_tx_stats.dest_ps++;
+ break;
+ case TX_STATUS_FAIL_HOST_ABORTED:
+ priv->reply_tx_stats.host_abort++;
+ break;
+ case TX_STATUS_FAIL_BT_RETRY:
+ priv->reply_tx_stats.bt_retry++;
+ break;
+ case TX_STATUS_FAIL_STA_INVALID:
+ priv->reply_tx_stats.sta_invalid++;
+ break;
+ case TX_STATUS_FAIL_FRAG_DROPPED:
+ priv->reply_tx_stats.frag_drop++;
+ break;
+ case TX_STATUS_FAIL_TID_DISABLE:
+ priv->reply_tx_stats.tid_disable++;
+ break;
+ case TX_STATUS_FAIL_FIFO_FLUSHED:
+ priv->reply_tx_stats.fifo_flush++;
+ break;
+ case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
+ priv->reply_tx_stats.insuff_cf_poll++;
+ break;
+ case TX_STATUS_FAIL_PASSIVE_NO_RX:
+ priv->reply_tx_stats.fail_hw_drop++;
+ break;
+ case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
+ priv->reply_tx_stats.sta_color_mismatch++;
+ break;
+ default:
+ priv->reply_tx_stats.unknown++;
+ break;
}
+}
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = ba_resp->txed_2_done;
- info->status.ampdu_len = ba_resp->txed;
- iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+static void iwlagn_set_tx_status(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwlagn_tx_resp *tx_resp,
+ bool is_agg)
+{
+ u16 status = le16_to_cpu(tx_resp->status.status);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ if (is_agg)
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+ if (!iwl_is_tx_success(status))
+ iwlagn_count_tx_err_status(priv, status);
+}
- return 0;
+static void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ queue_work(priv->shrd->workqueue, &priv->tx_flush);
+ }
}
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
+ struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ struct ieee80211_hdr *hdr;
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ u32 ssn = iwlagn_get_scd_ssn(tx_resp);
+ int tid;
+ int sta_id;
+ int freed;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+ struct sk_buff_head skbs;
+ struct sk_buff *skb;
+ struct iwl_rxon_context *ctx;
+ bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
+
+ tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
+ IWLAGN_TX_RES_TID_POS;
+ sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
+ IWLAGN_TX_RES_RA_POS;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ if (is_agg)
+ iwl_rx_reply_tx_agg(priv, tx_resp);
+
+ if (tx_resp->frame_count == 1) {
+ __skb_queue_head_init(&skbs);
+ /*we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+ ssn, status, &skbs);
+ freed = 0;
+ while (!skb_queue_empty(&skbs)) {
+ skb = __skb_dequeue(&skbs);
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ priv->last_seq_ctl = tx_resp->seq_ctl;
+
+ info = IEEE80211_SKB_CB(skb);
+ ctx = info->driver_data[0];
+ kmem_cache_free(priv->tx_cmd_pool,
+ (info->driver_data[1]));
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
+ iwl_is_associated_ctx(ctx) && ctx->vif &&
+ ctx->vif->type == NL80211_IFTYPE_STATION) {
+ ctx->last_tx_rejected = true;
+ iwl_trans_stop_queue(trans(priv), txq_id);
+
+ IWL_DEBUG_TX_REPLY(priv,
+ "TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n",
+ txq_id,
+ iwl_get_tx_fail_reason(status),
+ status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ IWL_DEBUG_TX_REPLY(priv,
+ "FrameCnt = %d, idx=%d\n",
+ tx_resp->frame_count, cmd_index);
+ }
+
+ /* check if BAR is needed */
+ if (is_agg && !iwl_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
+ tx_resp, is_agg);
+ if (!is_agg)
+ iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
+
+ freed++;
+ }
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+ WARN_ON(!is_agg && freed != 1);
+ }
+
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
}
/**
@@ -851,17 +843,21 @@ void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
-void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
struct iwl_ht_agg *agg;
- int index;
+ struct sk_buff_head reclaimed_skbs;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ unsigned long flags;
int sta_id;
int tid;
- unsigned long flags;
+ int freed;
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -870,16 +866,18 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
- if (scd_flow >= priv->hw_params.max_txq_num) {
+ if (scd_flow >= hw_params(priv).max_txq_num) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
- return;
+ return 0;
}
- txq = &priv->txq[scd_flow];
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
+ agg = &priv->shrd->tid_data[sta_id][tid].agg;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
if (unlikely(agg->txq_id != scd_flow)) {
/*
* FIXME: this is a uCode bug which need to be addressed,
@@ -890,88 +888,84 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv,
"BA scd_flow %d does not match txq_id %d\n",
scd_flow, agg->txq_id);
- return;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
}
- /* Find index just before block-ack window */
- index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
"sta_id = %d\n",
agg->wait_for_ba,
(u8 *) &ba_resp->sta_addr_lo32,
ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
- "%d, scd_ssn = %d\n",
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
+ "scd_flow = %d, scd_ssn = %d\n",
ba_resp->tid,
ba_resp->seq_ctl,
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
ba_resp->scd_flow,
ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
- /* Update driver's record of ACK vs. not for each frame in window */
- iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = false;
+
+ /* Sanity check values reported by uCode */
+ if (ba_resp->txed_2_done > ba_resp->txed) {
+ IWL_DEBUG_TX_REPLY(priv,
+ "bogus sent(%d) and ack(%d) count\n",
+ ba_resp->txed, ba_resp->txed_2_done);
+ /*
+ * set txed_2_done = txed,
+ * so it won't impact rate scale
+ */
+ ba_resp->txed = ba_resp->txed_2_done;
+ }
+ IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
+ ba_resp->txed, ba_resp->txed_2_done);
+
+ __skb_queue_head_init(&reclaimed_skbs);
/* Release all TFDs before the SSN, i.e. all TFDs in front of
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq);
-
- iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
+ iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
+ 0, &reclaimed_skbs);
+ freed = 0;
+ while (!skb_queue_empty(&reclaimed_skbs)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
+ skb = __skb_dequeue(&reclaimed_skbs);
+ hdr = (struct ieee80211_hdr *)skb->data;
-#ifdef CONFIG_IWLWIFI_DEBUG
-const char *iwl_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+ info = IEEE80211_SKB_CB(skb);
+ kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_resp->txed_2_done;
+ info->status.ampdu_len = ba_resp->txed;
+ iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
+ info);
+ }
- switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_POSTPONE(DELAY);
- TX_STATUS_POSTPONE(FEW_BYTES);
- TX_STATUS_POSTPONE(BT_PRIO);
- TX_STATUS_POSTPONE(QUIET_PERIOD);
- TX_STATUS_POSTPONE(CALC_TTAK);
- TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
- TX_STATUS_FAIL(SHORT_LIMIT);
- TX_STATUS_FAIL(LONG_LIMIT);
- TX_STATUS_FAIL(FIFO_UNDERRUN);
- TX_STATUS_FAIL(DRAIN_FLOW);
- TX_STATUS_FAIL(RFKILL_FLUSH);
- TX_STATUS_FAIL(LIFE_EXPIRE);
- TX_STATUS_FAIL(DEST_PS);
- TX_STATUS_FAIL(HOST_ABORTED);
- TX_STATUS_FAIL(BT_RETRY);
- TX_STATUS_FAIL(STA_INVALID);
- TX_STATUS_FAIL(FRAG_DROPPED);
- TX_STATUS_FAIL(TID_DISABLE);
- TX_STATUS_FAIL(FIFO_FLUSHED);
- TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
- TX_STATUS_FAIL(PASSIVE_NO_RX);
- TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
}
- return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
}
-#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 56211006a18..8ba0dd54e37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -35,11 +35,11 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-agn-calib.h"
#include "iwl-trans.h"
+#include "iwl-fh.h"
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@@ -84,42 +84,37 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
priv->ucode_write_complete = 0;
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the %s uCode section due "
- "to interrupt\n", name);
- return ret;
- }
+ ret = wait_event_timeout(priv->shrd->wait_command_queue,
+ priv->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(priv, "Could not load the %s uCode section\n",
name);
@@ -163,7 +158,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -177,6 +172,42 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
(u8 *)&cmd, sizeof(cmd));
}
+static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+{
+ struct iwl_calib_temperature_offset_v2_cmd cmd;
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
+ EEPROM_KELVIN_TEMPERATURE);
+ __le16 *offset_calib_low =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ struct iwl_eeprom_calib_hdr *hdr;
+
+ memset(&cmd, 0, sizeof(cmd));
+ iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ EEPROM_CALIB_ALL);
+ memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
+ sizeof(*offset_calib_high));
+ memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
+ sizeof(*offset_calib_low));
+ if (!(cmd.radio_sensor_offset_low)) {
+ IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+ cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
+ cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
+ }
+ memcpy(&cmd.burntVoltageRef, &hdr->voltage,
+ sizeof(hdr->voltage));
+
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+ le16_to_cpu(cmd.radio_sensor_offset_high));
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+ le16_to_cpu(cmd.radio_sensor_offset_low));
+ IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+ le16_to_cpu(cmd.burntVoltageRef));
+
+ return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
+ (u8 *)&cmd, sizeof(cmd));
+}
+
static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
@@ -193,11 +224,12 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
-void iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
@@ -230,9 +262,10 @@ void iwlagn_rx_calib_result(struct iwl_priv *priv,
default:
IWL_ERR(priv, "Unknown calibration notification %d\n",
hdr->op_code);
- return;
+ return -1;
}
iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+ return 0;
}
int iwlagn_init_alive_start(struct iwl_priv *priv)
@@ -262,8 +295,12 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (priv->cfg->need_temp_offset_calib)
- return iwlagn_set_temperature_offset_calib(priv);
+ if (priv->cfg->need_temp_offset_calib) {
+ if (priv->cfg->temp_offset_v2)
+ return iwlagn_set_temperature_offset_calib_v2(priv);
+ else
+ return iwlagn_set_temperature_offset_calib(priv);
+ }
return 0;
}
@@ -291,7 +328,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return trans_send_cmd_pdu(&priv->trans,
+ return iwl_trans_send_cmd_pdu(trans(priv),
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
@@ -324,7 +361,7 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
sizeof(iwlagn_bt_prio_tbl));
- if (trans_send_cmd_pdu(&priv->trans,
+ if (iwl_trans_send_cmd_pdu(trans(priv),
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(priv, "failed to send BT prio tbl command\n");
@@ -337,7 +374,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
env_cmd.action = action;
env_cmd.type = type;
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
@@ -348,9 +385,21 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
static int iwlagn_alive_notify(struct iwl_priv *priv)
{
+ struct iwl_rxon_context *ctx;
int ret;
- trans_tx_start(&priv->trans);
+ if (!priv->tx_cmd_pool)
+ priv->tx_cmd_pool =
+ kmem_cache_create("iwlagn_dev_cmd",
+ sizeof(struct iwl_device_cmd),
+ sizeof(void *), 0, NULL);
+
+ if (!priv->tx_cmd_pool)
+ return -ENOMEM;
+
+ iwl_trans_tx_start(trans(priv));
+ for_each_context(priv, ctx)
+ ctx->last_tx_rejected = false;
ret = iwlagn_send_wimax_coex(priv);
if (ret)
@@ -369,7 +418,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
+static int iwl_verify_inst_sparse(struct iwl_priv *priv,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -383,9 +432,9 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -404,14 +453,14 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -427,7 +476,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
*/
static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
{
- if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
+ if (!iwl_verify_inst_sparse(priv, &img->code)) {
IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
return 0;
}
@@ -478,7 +527,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
int ret;
enum iwlagn_ucode_type old_type;
- ret = trans_start_device(&priv->trans);
+ ret = iwl_trans_start_device(trans(priv));
if (ret)
return ret;
@@ -495,7 +544,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
return ret;
}
- trans_kick_nic(&priv->trans);
+ iwl_trans_kick_nic(trans(priv));
/*
* Some things may run in the background now, but we
@@ -545,7 +594,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
if (!priv->ucode_init.code.len)
@@ -580,6 +629,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
iwlagn_remove_notification(priv, &calib_wait);
out:
/* Whatever happened, stop the device */
- trans_stop_device(&priv->trans);
+ iwl_trans_stop_device(trans(priv));
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f9c3cd95d61..ccba69b7f8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -35,7 +35,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
@@ -48,10 +47,9 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
+#include "iwl-shared.h"
#include "iwl-bus.h"
#include "iwl-trans.h"
@@ -79,9 +77,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
-
-static int iwlagn_ant_coupling;
-static bool iwlagn_bt_ch_announce = 1;
+MODULE_ALIAS("iwlagn");
void iwl_update_chain_flags(struct iwl_priv *priv)
{
@@ -138,7 +134,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
* beacon contents.
*/
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
@@ -183,7 +179,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate = info->control.rates[0].idx;
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
@@ -203,7 +199,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
cmd.data[1] = priv->beacon_skb->data;
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -212,7 +208,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
container_of(work, struct iwl_priv, beacon_update);
struct sk_buff *beacon;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "updating beacon w/o beacon context!\n");
goto out;
@@ -242,7 +238,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
iwlagn_send_beacon_cmd(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwl_bg_bt_runtime_config(struct work_struct *work)
@@ -250,11 +246,11 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return;
iwlagn_send_advance_bt_config(priv);
}
@@ -265,13 +261,13 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
container_of(work, struct iwl_priv, bt_full_concurrency);
struct iwl_rxon_context *ctx;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
goto out;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
goto out;
IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
@@ -289,7 +285,7 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
iwlagn_send_advance_bt_config(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/**
@@ -306,11 +302,11 @@ static void iwl_bg_statistics_periodic(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return;
iwl_send_statistics_request(priv, CMD_ASYNC, false);
@@ -332,14 +328,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (iwl_grab_nic_access(priv)) {
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ spin_lock_irqsave(&bus(priv)->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(bus(priv))) {
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
return;
}
/* Set starting address; reads will auto-increment */
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, ptr);
rmb();
/*
@@ -347,20 +343,20 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
* place event id # at far right for easier visual parsing.
*/
for (i = 0; i < num_events; i++) {
- ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ ev = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ time = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
if (mode == 0) {
trace_iwlwifi_dev_ucode_cont_event(priv,
0, time, ev);
} else {
- data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ data = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
trace_iwlwifi_dev_ucode_cont_event(priv,
time, data, ev);
}
}
/* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ iwl_release_nic_access(bus(priv));
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -373,10 +369,12 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- capacity = iwl_read_targ_mem(priv, base);
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ capacity = iwl_read_targ_mem(bus(priv), base);
+ num_wraps = iwl_read_targ_mem(bus(priv),
+ base + (2 * sizeof(u32)));
+ mode = iwl_read_targ_mem(bus(priv), base + (1 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(bus(priv),
+ base + (3 * sizeof(u32)));
} else
return;
@@ -427,7 +425,7 @@ static void iwl_bg_ucode_trace(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (priv->event_log.ucode_trace) {
@@ -443,131 +441,17 @@ static void iwl_bg_tx_flush(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, tx_flush);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* do nothing if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return;
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
-}
-static ssize_t store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- show_debug_level, store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-
-static ssize_t show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
-
-static ssize_t show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_is_ready_rf(priv))
- return sprintf(buf, "off\n");
- else
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in decimal form.\n", buf);
- else {
- ret = iwl_set_tx_power(priv, val, false);
- if (ret)
- IWL_ERR(priv, "failed setting tx power (0x%d).\n",
- ret);
- else
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl_sysfs_entries,
-};
-
/******************************************************************************
*
* uCode download functions
@@ -577,7 +461,7 @@ static struct attribute_group iwl_attribute_group = {
static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
{
if (desc->v_addr)
- dma_free_coherent(priv->bus->dev, desc->len,
+ dma_free_coherent(bus(priv)->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
@@ -604,7 +488,7 @@ static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
return -EINVAL;
}
- desc->v_addr = dma_alloc_coherent(priv->bus->dev, len,
+ desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
&desc->p_addr, GFP_KERNEL);
if (!desc->v_addr)
return -ENOMEM;
@@ -614,6 +498,64 @@ static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
return 0;
}
+static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+{
+ int i;
+
+ /*
+ * The default context is always valid,
+ * the PAN context depends on uCode.
+ */
+ priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
+ priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
+
+ for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+ priv->contexts[i].ctxid = i;
+
+ priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+ priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+ priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+ BIT(NL80211_IFTYPE_ADHOC);
+ priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+ BIT(NL80211_IFTYPE_STATION);
+ priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+ priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+ priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+ priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
+ REPLY_WIPAN_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
+ REPLY_WIPAN_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
+ priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
+ priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
+ priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
+ priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
+ priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+}
+
+
struct iwlagn_ucode_capabilities {
u32 max_probe_length;
u32 standard_phy_calibration_size;
@@ -621,7 +563,7 @@ struct iwlagn_ucode_capabilities {
};
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwl_mac_setup_register(struct iwl_priv *priv,
+static int iwlagn_mac_setup_register(struct iwl_priv *priv,
struct iwlagn_ucode_capabilities *capa);
#define UCODE_EXPERIMENTAL_INDEX 100
@@ -658,7 +600,7 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
priv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- priv->bus->dev,
+ bus(priv)->dev,
GFP_KERNEL, priv, iwl_ucode_callback);
}
@@ -738,8 +680,6 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
return 0;
}
-static int iwlagn_wanted_ucode_alternative = 1;
-
static int iwlagn_load_firmware(struct iwl_priv *priv,
const struct firmware *ucode_raw,
struct iwlagn_firmware_pieces *pieces,
@@ -749,7 +689,8 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
- int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
+ int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
+ int tmp;
u64 alternatives;
u32 tlv_len;
enum iwl_ucode_tlv_type tlv_type;
@@ -952,6 +893,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
int err;
struct iwlagn_firmware_pieces pieces;
const unsigned int api_max = priv->cfg->ucode_api_max;
+ unsigned int api_ok = priv->cfg->ucode_api_ok;
const unsigned int api_min = priv->cfg->ucode_api_min;
u32 api_ver;
char buildstr[25];
@@ -962,10 +904,13 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
};
+ if (!api_ok)
+ api_ok = api_max;
+
memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
- if (priv->fw_index <= priv->cfg->ucode_api_max)
+ if (priv->fw_index <= api_ok)
IWL_ERR(priv,
"request for firmware file '%s' failed.\n",
priv->firmware_name);
@@ -1011,12 +956,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto try_again;
}
- if (api_ver != api_max)
- IWL_ERR(priv,
- "Firmware has old API version. Expected v%u, "
- "got v%u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
+ if (api_ver < api_ok) {
+ if (api_ok != api_max)
+ IWL_ERR(priv, "Firmware has old API version, "
+ "expected v%u through v%u, got v%u.\n",
+ api_ok, api_max, api_ver);
+ else
+ IWL_ERR(priv, "Firmware has old API version, "
+ "expected v%u, got v%u.\n",
+ api_max, api_ver);
+ IWL_ERR(priv, "New firmware can be obtained from "
+ "http://www.intellinuxwireless.org/.\n");
+ }
}
if (build)
@@ -1060,25 +1011,25 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
pieces.init_data_size);
/* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ if (pieces.inst_size > hw_params(priv).max_inst_size) {
IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
pieces.inst_size);
goto try_again;
}
- if (pieces.data_size > priv->hw_params.max_data_size) {
+ if (pieces.data_size > hw_params(priv).max_data_size) {
IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
pieces.data_size);
goto try_again;
}
- if (pieces.init_size > priv->hw_params.max_inst_size) {
+ if (pieces.init_size > hw_params(priv).max_inst_size) {
IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
pieces.init_size);
goto try_again;
}
- if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ if (pieces.init_data_size > hw_params(priv).max_data_size) {
IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
pieces.init_data_size);
goto try_again;
@@ -1143,17 +1094,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if ((priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE) &&
- (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN)) {
- priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
+ if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+
+ /*
+ * if not PAN, then don't support P2P -- might be a uCode
+ * packaging bug or due to the eeprom check above
+ */
+ if (!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN))
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
+
+ if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- } else
+ priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ } else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
- priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- else
- priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ }
/*
* figure out the offset of chain noise reset and gain commands
@@ -1169,12 +1126,15 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->phy_calib_chain_noise_gain_cmd =
ucode_capa.standard_phy_calibration_size + 1;
+ /* initialize all valid contexts */
+ iwl_init_context(priv, ucode_capa.flags);
+
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
- err = iwl_mac_setup_register(priv, &ucode_capa);
+ err = iwlagn_mac_setup_register(priv, &ucode_capa);
if (err)
goto out_unbind;
@@ -1182,13 +1142,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
if (err)
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
- err = sysfs_create_group(&(priv->bus->dev->kobj),
- &iwl_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_unbind;
- }
-
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
complete(&priv->firmware_loading_complete);
@@ -1206,368 +1159,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
iwl_dealloc_ucode(priv);
out_unbind:
complete(&priv->firmware_loading_complete);
- device_release_driver(priv->bus->dev);
+ device_release_driver(bus(priv)->dev);
release_firmware(ucode_raw);
}
-static const char * const desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT",
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *desc_lookup(u32 num)
-{
- int i;
- int max = ARRAY_SIZE(desc_lookup_text);
-
- if (num < max)
- return desc_lookup_text[num];
-
- max = ARRAY_SIZE(advanced_lookup) - 1;
- for (i = 0; i < max; i++) {
- if (advanced_lookup[i].num == num)
- break;
- }
- return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 base;
- struct iwl_error_event_table table;
-
- base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
- if (!base)
- base = priv->init_errlog_ptr;
- } else {
- if (!base)
- base = priv->inst_errlog_ptr;
- }
-
- if (!iwlagn_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (priv->ucode_type == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return;
- }
-
- iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
-
- if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, table.valid);
- }
-
- priv->isr_stats.err_code = table.error_id;
-
- trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
- table.data1, table.data2, table.line,
- table.blink1, table.blink2, table.ilink1,
- table.ilink2, table.bcon_time, table.gp1,
- table.gp2, table.gp3, table.ucode_ver,
- table.hw_ver, table.brd_ver);
- IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
- desc_lookup(table.error_id));
- IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
- IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
- IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
- IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
- IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
- IWL_ERR(priv, "0x%08X | data1\n", table.data1);
- IWL_ERR(priv, "0x%08X | data2\n", table.data2);
- IWL_ERR(priv, "0x%08X | line\n", table.line);
- IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
- IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
- IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
- IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
- IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
- IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
- IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
- IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
- IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
- IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
-}
-
-#define EVENT_START_OFFSET (4 * sizeof(u32))
-
-/**
- * iwl_print_event_log - Dump error event log to syslog
- *
- */
-static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- u32 i;
- u32 base; /* SRAM byte address of event log header */
- u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
- u32 ptr; /* SRAM byte address of log data */
- u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
-
- if (num_events == 0)
- return pos;
-
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
- if (!base)
- base = priv->init_evtlog_ptr;
- } else {
- if (!base)
- base = priv->inst_evtlog_ptr;
- }
-
- if (mode == 0)
- event_size = 2 * sizeof(u32);
- else
- event_size = 3 * sizeof(u32);
-
- ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
- /* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- /* Set starting address; reads will auto-increment */
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
- rmb();
-
- /* "time" is actually "data" for mode 0 (no timestamp).
- * place event id # at far right for easier visual parsing. */
- for (i = 0; i < num_events; i++) {
- ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- if (mode == 0) {
- /* data, ev */
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "EVT_LOG:0x%08x:%04u\n",
- time, ev);
- } else {
- trace_iwlwifi_dev_ucode_event(priv, 0,
- time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
- time, ev);
- }
- } else {
- data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- } else {
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time,
- data, ev);
- }
- }
- }
-
- /* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return pos;
-}
-
-/**
- * iwl_print_last_event_logs - Dump the newest # of event log to syslog
- */
-static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- /*
- * display the newest DEFAULT_LOG_ENTRIES entries
- * i.e the entries just before the next ont that uCode would fill.
- */
- if (num_wraps) {
- if (next_entry < size) {
- pos = iwl_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode,
- pos, buf, bufsz);
- pos = iwl_print_event_log(priv, 0,
- next_entry, mode,
- pos, buf, bufsz);
- } else
- pos = iwl_print_event_log(priv, next_entry - size,
- size, mode, pos, buf, bufsz);
- } else {
- if (next_entry < size) {
- pos = iwl_print_event_log(priv, 0, next_entry,
- mode, pos, buf, bufsz);
- } else {
- pos = iwl_print_event_log(priv, next_entry - size,
- size, mode, pos, buf, bufsz);
- }
- }
- return pos;
-}
-
-#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-
-int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display)
-{
- u32 base; /* SRAM byte address of event log header */
- u32 capacity; /* event log capacity in # entries */
- u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
- u32 num_wraps; /* # times uCode wrapped to top of log */
- u32 next_entry; /* index of next entry to be written by uCode */
- u32 size; /* # entries that we'll print */
- u32 logsize;
- int pos = 0;
- size_t bufsz = 0;
-
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
- logsize = priv->init_evtlog_size;
- if (!base)
- base = priv->init_evtlog_ptr;
- } else {
- logsize = priv->inst_evtlog_size;
- if (!base)
- base = priv->inst_evtlog_ptr;
- }
-
- if (!iwlagn_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Invalid event log pointer 0x%08X for %s uCode\n",
- base,
- (priv->ucode_type == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return -EINVAL;
- }
-
- /* event log header */
- capacity = iwl_read_targ_mem(priv, base);
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
- if (capacity > logsize) {
- IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, logsize);
- capacity = logsize;
- }
-
- if (next_entry > logsize) {
- IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, logsize);
- next_entry = logsize;
- }
-
- size = num_wraps ? capacity : next_entry;
-
- /* bail out if nothing in log */
- if (size == 0) {
- IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return pos;
- }
-
- /* enable/disable bt channel inhibition */
- priv->bt_ch_announce = iwlagn_bt_ch_announce;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#else
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
- IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
- size);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- if (full_log)
- bufsz = capacity * 48;
- else
- bufsz = size * 48;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- }
- if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
- /*
- * if uCode has wrapped back to top of log,
- * start at the oldest entry,
- * i.e the next one that uCode would fill.
- */
- if (num_wraps)
- pos = iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode,
- pos, buf, bufsz);
- /* (then/else) start at top of log */
- pos = iwl_print_event_log(priv, 0,
- next_entry, mode, pos, buf, bufsz);
- } else
- pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#else
- pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#endif
- return pos;
-}
-
static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
@@ -1575,44 +1170,43 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
priv->thermal_throttle.ct_kill_toggle = false;
if (priv->cfg->base_params->support_ct_kill_exit) {
adv_cmd.critical_temperature_enter =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
+ cpu_to_le32(hw_params(priv).ct_kill_threshold);
adv_cmd.critical_temperature_exit =
- cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
+ cpu_to_le32(hw_params(priv).ct_kill_exit_threshold);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature enter is %d,"
- "exit is %d\n",
- priv->hw_params.ct_kill_threshold,
- priv->hw_params.ct_kill_exit_threshold);
+ "succeeded, critical temperature enter is %d,"
+ "exit is %d\n",
+ hw_params(priv).ct_kill_threshold,
+ hw_params(priv).ct_kill_exit_threshold);
} else {
cmd.critical_temperature_R =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
+ cpu_to_le32(hw_params(priv).ct_kill_threshold);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature is %d\n",
- priv->hw_params.ct_kill_threshold);
+ "succeeded, "
+ "critical temperature is %d\n",
+ hw_params(priv).ct_kill_threshold);
}
}
@@ -1626,10 +1220,10 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
};
memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
- calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
@@ -1641,7 +1235,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
if (IWL_UCODE_API(priv->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return trans_send_cmd_pdu(&priv->trans,
+ return iwl_trans_send_cmd_pdu(trans(priv),
TX_ANT_CONFIGURATION_CMD,
CMD_SYNC,
sizeof(struct iwl_tx_ant_config_cmd),
@@ -1663,17 +1257,17 @@ int iwl_alive_start(struct iwl_priv *priv)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
/*TODO: this should go to the transport layer */
- iwl_reset_ict(priv);
+ iwl_reset_ict(trans(priv));
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
/* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->status);
+ set_bit(STATUS_ALIVE, &priv->shrd->status);
/* Enable watchdog to monitor the driver tx queues */
iwl_setup_watchdog(priv);
- if (iwl_is_rfkill(priv))
+ if (iwl_is_rfkill(priv->shrd))
return -ERFKILL;
/* download priority table before any calibration request */
@@ -1710,8 +1304,9 @@ int iwl_alive_start(struct iwl_priv *priv)
iwl_send_bt_config(priv);
}
- if (priv->hw_params.calib_rt_cfg)
- iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
+ if (hw_params(priv).calib_rt_cfg)
+ iwlagn_send_calib_cfg_rt(priv,
+ hw_params(priv).calib_rt_cfg);
ieee80211_wake_queues(priv->hw);
@@ -1720,7 +1315,7 @@ int iwl_alive_start(struct iwl_priv *priv)
/* Configure Tx antenna selection based on H/W config */
iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
- if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
+ if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
struct iwl_rxon_cmd *active_rxon =
(struct iwl_rxon_cmd *)&ctx->active;
/* apply any changes in staging */
@@ -1735,12 +1330,12 @@ int iwl_alive_start(struct iwl_priv *priv)
iwlagn_set_rxon_chain(priv, ctx);
}
- if (!priv->wowlan) {
+ if (!priv->shrd->wowlan) {
/* WoWLAN ucode will not reply in the same way, skip it */
iwl_reset_run_time_calib(priv);
}
- set_bit(STATUS_READY, &priv->status);
+ set_bit(STATUS_READY, &priv->shrd->status);
/* Configure the adapter for unassociated operation */
ret = iwlagn_commit_rxon(priv, ctx);
@@ -1765,7 +1360,15 @@ static void __iwl_down(struct iwl_priv *priv)
iwl_scan_cancel_timeout(priv, 200);
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+ /*
+ * If active, scanning won't cancel it, so say it expired.
+ * No race since we hold the mutex here and a new one
+ * can't come in at this time.
+ */
+ ieee80211_remain_on_channel_expired(priv->hw);
+
+ exit_pending =
+ test_and_set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
@@ -1790,32 +1393,33 @@ static void __iwl_down(struct iwl_priv *priv)
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
+ iwl_trans_stop_device(trans(priv));
+
/* Clear out all status bits but a few that are stable across reset */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+ priv->shrd->status &=
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+ test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+ test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) <<
STATUS_EXIT_PENDING;
- trans_stop_device(&priv->trans);
-
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = NULL;
}
static void iwl_down(struct iwl_priv *priv)
{
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
__iwl_down(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
iwl_cancel_deferred_work(priv);
}
@@ -1827,9 +1431,9 @@ static int __iwl_up(struct iwl_priv *priv)
struct iwl_rxon_context *ctx;
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
}
@@ -1862,9 +1466,9 @@ static int __iwl_up(struct iwl_priv *priv)
return 0;
error:
- set_bit(STATUS_EXIT_PENDING, &priv->status);
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
__iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
IWL_ERR(priv, "Unable to initialize device.\n");
return ret;
@@ -1882,11 +1486,11 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv,
run_time_calib_work);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- mutex_unlock(&priv->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ mutex_unlock(&priv->shrd->mutex);
return;
}
@@ -1895,7 +1499,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
iwl_sensitivity_calibration(priv);
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwlagn_prepare_restart(struct iwl_priv *priv)
@@ -1907,7 +1511,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
u8 bt_status;
bool bt_is_sco;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
for_each_context(priv, ctx)
ctx->vif = NULL;
@@ -1941,13 +1545,13 @@ static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- mutex_lock(&priv->mutex);
+ if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
+ mutex_lock(&priv->shrd->mutex);
iwlagn_prepare_restart(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
iwl_cancel_deferred_work(priv);
ieee80211_restart_hw(priv->hw);
} else {
@@ -1955,94 +1559,6 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int wait)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- /* Not supported if we don't have PAN */
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
- ret = -EOPNOTSUPP;
- goto free;
- }
-
- /* Not supported on pre-P2P firmware */
- if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
- BIT(NL80211_IFTYPE_P2P_CLIENT))) {
- ret = -EOPNOTSUPP;
- goto free;
- }
-
- mutex_lock(&priv->mutex);
-
- if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
- /*
- * If the PAN context is free, use the normal
- * way of doing remain-on-channel offload + TX.
- */
- ret = 1;
- goto out;
- }
-
- /* TODO: queue up if scanning? */
- if (test_bit(STATUS_SCANNING, &priv->status) ||
- priv->offchan_tx_skb) {
- ret = -EBUSY;
- goto out;
- }
-
- /*
- * max_scan_ie_len doesn't include the blank SSID or the header,
- * so need to add that again here.
- */
- if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
- ret = -ENOBUFS;
- goto out;
- }
-
- priv->offchan_tx_skb = skb;
- priv->offchan_tx_timeout = wait;
- priv->offchan_tx_chan = chan;
-
- ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
- IWL_SCAN_OFFCH_TX, chan->band);
- if (ret)
- priv->offchan_tx_skb = NULL;
- out:
- mutex_unlock(&priv->mutex);
- free:
- if (ret < 0)
- kfree_skb(skb);
-
- return ret;
-}
-
-static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- mutex_lock(&priv->mutex);
-
- if (!priv->offchan_tx_skb) {
- ret = -EINVAL;
- goto unlock;
- }
-
- priv->offchan_tx_skb = NULL;
-
- ret = iwl_scan_cancel_timeout(priv, 200);
- if (ret)
- ret = -EIO;
-unlock:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-
/*****************************************************************************
*
* mac80211 entry point functions
@@ -2124,7 +1640,7 @@ iwlagn_iface_combinations_p2p[] = {
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_mac_setup_register(struct iwl_priv *priv,
+static int iwlagn_mac_setup_register(struct iwl_priv *priv,
struct iwlagn_ucode_capabilities *capa)
{
int ret;
@@ -2183,7 +1699,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
- if (priv->ucode_wowlan.code.len && device_can_wakeup(priv->bus->dev)) {
+ if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
@@ -2242,16 +1758,16 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "enter\n");
/* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ret = __iwl_up(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
if (ret)
return ret;
IWL_DEBUG_INFO(priv, "Start UP work done.\n");
/* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
ret = -EIO;
iwlagn_led_enable(priv);
@@ -2274,17 +1790,17 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
iwl_down(priv);
- flush_workqueue(priv->workqueue);
+ flush_workqueue(priv->shrd->workqueue);
/* User space software may expect getting rfkill changes
* even if interface is down */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
iwl_enable_rfkill_int(priv);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int iwlagn_send_patterns(struct iwl_priv *priv,
struct cfg80211_wowlan *wowlan)
{
@@ -2322,7 +1838,7 @@ static int iwlagn_send_patterns(struct iwl_priv *priv,
}
cmd.data[0] = pattern_cmd;
- err = trans_send_cmd(&priv->trans, &cmd);
+ err = iwl_trans_send_cmd(trans(priv), &cmd);
kfree(pattern_cmd);
return err;
}
@@ -2337,7 +1853,8 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
if (iwlagn_mod_params.sw_crypto)
return;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
goto out;
@@ -2348,7 +1865,8 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
priv->have_rekey_data = true;
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
}
struct wowlan_key_data {
@@ -2359,7 +1877,7 @@ struct wowlan_key_data {
bool error, use_rsc_tsc, use_tkip;
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
int i;
@@ -2386,7 +1904,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
u16 p1k[IWLAGN_P1K_SIZE];
int ret, i;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
@@ -2491,7 +2009,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
break;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
@@ -2516,7 +2034,8 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (WARN_ON(!wowlan))
return -EINVAL;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
/* Don't attempt WoWLAN when not associated, tear down instead. */
if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
@@ -2545,7 +2064,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
* since the uCode will add 0x10 before using the value.
*/
for (i = 0; i < 8; i++) {
- seq = priv->stations[IWL_AP_ID].tid[i].seq_number;
+ seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
seq -= 0x10;
wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
}
@@ -2577,9 +2096,9 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
memcpy(&rxon, &ctx->active, sizeof(rxon));
- trans_stop_device(&priv->trans);
+ iwl_trans_stop_device(trans(priv));
- priv->wowlan = true;
+ priv->shrd->wowlan = true;
ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
IWL_UCODE_WOWLAN);
@@ -2610,11 +2129,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
ieee80211_iter_keys(priv->hw, ctx->vif,
iwlagn_wowlan_program_keys,
&key_data);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (key_data.error) {
ret = -EIO;
goto error;
@@ -2629,13 +2148,13 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
.len[0] = sizeof(*key_data.rsc_tsc),
};
- ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd);
+ ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
if (ret)
goto error;
}
if (key_data.use_tkip) {
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_WOWLAN_TKIP_PARAMS,
CMD_SYNC, sizeof(tkip_cmd),
&tkip_cmd);
@@ -2651,7 +2170,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
kek_kck_cmd.replay_ctr = priv->replay_ctr;
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_WOWLAN_KEK_KCK_MATERIAL,
CMD_SYNC, sizeof(kek_kck_cmd),
&kek_kck_cmd);
@@ -2660,7 +2179,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
}
}
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
CMD_SYNC, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
@@ -2670,21 +2189,23 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (ret)
goto error;
- device_set_wakeup_enable(priv->bus->dev, true);
+ device_set_wakeup_enable(bus(priv)->dev, true);
/* Now let the ucode operate on its own */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
goto out;
error:
- priv->wowlan = false;
+ priv->shrd->wowlan = false;
iwlagn_prepare_restart(priv);
ieee80211_restart_hw(priv->hw);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
kfree(key_data.rsc_tsc);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
return ret;
}
@@ -2697,21 +2218,22 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
u32 base, status = 0xffffffff;
int ret = -EIO;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
base = priv->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&priv->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(priv);
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(bus(priv));
if (ret == 0) {
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(priv);
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(bus(priv));
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (ret == 0) {
@@ -2722,7 +2244,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
if (priv->wowlan_sram)
_iwl_read_targ_mem_words(
- priv, 0x800000, priv->wowlan_sram,
+ bus(priv), 0x800000, priv->wowlan_sram,
priv->ucode_wowlan.data.len / 4);
}
#endif
@@ -2731,9 +2253,9 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- priv->wowlan = false;
+ priv->shrd->wowlan = false;
- device_set_wakeup_enable(priv->bus->dev, false);
+ device_set_wakeup_enable(bus(priv)->dev, false);
iwlagn_prepare_restart(priv);
@@ -2741,7 +2263,8 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
ieee80211_resume_disconnect(vif);
@@ -2810,7 +2333,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
return 0;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_scan_cancel_timeout(priv, 100);
BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
@@ -2861,7 +2384,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -2876,6 +2399,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct iwl_priv *priv = hw->priv;
int ret = -EINVAL;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -2883,7 +2407,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -2893,17 +2418,12 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
ret = 0;
break;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
- if (ret == 0) {
- priv->agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
- }
break;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
@@ -2913,9 +2433,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
priv->agg_tids_count);
}
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
ret = 0;
- if (priv->cfg->ht_params &&
+ if (!priv->agg_tids_count && priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation) {
/*
* switch off RTS/CTS if it was previously enabled
@@ -2929,8 +2449,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_OPERATIONAL:
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid,
- buf_size);
+ iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
+ tid, buf_size);
/*
* If the limit is 0, then it wasn't initialised yet,
@@ -2962,6 +2482,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
sta_priv->lq_sta.lq.general_params.flags |=
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
}
+ priv->agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
sta_priv->max_agg_bufsize;
@@ -2974,8 +2497,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = 0;
break;
}
- mutex_unlock(&priv->mutex);
-
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
@@ -2987,15 +2510,15 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret;
+ int ret = 0;
u8 sta_id;
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
sta->addr);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
+ sta_priv->sta_id = IWL_INVALID_STATION;
atomic_set(&sta_priv->pending_frames, 0);
if (vif->type == NL80211_IFTYPE_AP)
@@ -3007,19 +2530,20 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
+ goto out;
}
- sta_priv->common.sta_id = sta_id;
+ sta_priv->sta_id = sta_id;
/* Initialize rate scaling */
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return ret;
}
static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
@@ -3043,14 +2567,14 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (iwl_is_rfkill(priv))
+ if (iwl_is_rfkill(priv->shrd))
goto out;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
@@ -3069,7 +2593,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- spin_lock_irq(&priv->lock);
+ spin_lock_irq(&priv->shrd->lock);
priv->current_ht_config.smps = conf->smps_mode;
@@ -3099,23 +2623,23 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
- spin_unlock_irq(&priv->lock);
+ spin_unlock_irq(&priv->shrd->lock);
iwl_set_rate(priv);
/*
* at this point, staging_rxon has the
* configuration for channel switch
*/
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -3145,7 +2669,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
#undef CHK
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
for_each_context(priv, ctx) {
ctx->staging.filter_flags &= ~filter_nand;
@@ -3157,7 +2681,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
*/
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
/*
* Receiving all multicast frames is always enabled by the
@@ -3173,14 +2697,14 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = hw->priv;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
goto done;
}
- if (iwl_is_rfkill(priv)) {
+ if (iwl_is_rfkill(priv->shrd)) {
IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
goto done;
}
@@ -3197,101 +2721,198 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
}
}
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwlagn_wait_tx_queue_empty(priv);
+ iwl_trans_wait_tx_queue_empty(trans(priv));
done:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static void iwlagn_disable_roc(struct iwl_priv *priv)
+void iwlagn_disable_roc(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (!ctx->is_active)
+ if (!priv->hw_roc_setup)
return;
- ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_set_rxon_channel(priv, chan, ctx);
- iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
priv->hw_roc_channel = NULL;
+ memset(ctx->staging.node_addr, 0, ETH_ALEN);
+
iwlagn_commit_rxon(priv, ctx);
ctx->is_active = false;
+ priv->hw_roc_setup = false;
}
-static void iwlagn_bg_roc_done(struct work_struct *work)
+static void iwlagn_disable_roc_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
- hw_roc_work.work);
+ hw_roc_disable_work.work);
- mutex_lock(&priv->mutex);
- ieee80211_remain_on_channel_expired(priv->hw);
+ mutex_lock(&priv->shrd->mutex);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
-static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type,
int duration)
{
struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
int err = 0;
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
return -EOPNOTSUPP;
- if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
- BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
return -EOPNOTSUPP;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
- if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
- test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
err = -EBUSY;
goto out;
}
- priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
priv->hw_roc_channel = channel;
priv->hw_roc_chantype = channel_type;
- priv->hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
- iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
- queue_delayed_work(priv->workqueue, &priv->hw_roc_work,
- msecs_to_jiffies(duration + 20));
+ priv->hw_roc_duration = duration;
+ priv->hw_roc_start_notified = false;
+ cancel_delayed_work(&priv->hw_roc_disable_work);
+
+ if (!ctx->is_active) {
+ ctx->is_active = true;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ memcpy(ctx->staging.node_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ memcpy(ctx->staging.bssid_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err)
+ goto out;
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err) {
+ iwlagn_disable_roc(priv);
+ goto out;
+ }
+ priv->hw_roc_setup = true;
+ }
- msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
- ieee80211_ready_on_channel(priv->hw);
+ err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+ if (err)
+ iwlagn_disable_roc(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
}
-static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
return -EOPNOTSUPP;
- cancel_delayed_work_sync(&priv->hw_roc_work);
-
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+ if (ret)
+ goto out;
+
+ if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+ ret = -EIO;
+ goto out_remove_sta;
+ }
+
+ memcpy(ctx->bssid, bssid, ETH_ALEN);
+ ctx->preauth_bssid = true;
+
+ ret = iwlagn_commit_rxon(priv, ctx);
+
+ if (ret == 0)
+ goto out;
+
+ out_remove_sta:
+ iwl_remove_station(priv, sta_id, bssid);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx))
+ goto out;
+
+ iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+ ctx->preauth_bssid = false;
+ /* no need to commit */
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
/*****************************************************************************
*
* driver setup and teardown
@@ -3300,9 +2921,9 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+ priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME);
- init_waitqueue_head(&priv->wait_command_queue);
+ init_waitqueue_head(&priv->shrd->wait_command_queue);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3310,7 +2931,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
- INIT_DELAYED_WORK(&priv->hw_roc_work, iwlagn_bg_roc_done);
+ INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
+ iwlagn_disable_roc_work);
iwl_setup_scan_deferred_work(priv);
@@ -3342,6 +2964,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->bt_full_concurrency);
cancel_work_sync(&priv->bt_runtime_config);
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
@@ -3372,10 +2995,9 @@ static int iwl_init_drv(struct iwl_priv *priv)
{
int ret;
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
+ spin_lock_init(&priv->shrd->sta_lock);
- mutex_init(&priv->mutex);
+ mutex_init(&priv->shrd->mutex);
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
@@ -3416,7 +3038,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
goto err;
}
- ret = iwlcore_init_geos(priv);
+ ret = iwl_init_geos(priv);
if (ret) {
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
@@ -3434,8 +3056,10 @@ err:
static void iwl_uninit_drv(struct iwl_priv *priv)
{
iwl_calib_free_results(priv);
- iwlcore_free_geos(priv);
+ iwl_free_geos(priv);
iwl_free_channel_map(priv);
+ if (priv->tx_cmd_pool)
+ kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -3443,12 +3067,13 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
#endif
}
-static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = hw->priv;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist) {
@@ -3463,89 +3088,88 @@ static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
"ignoring RSSI callback\n");
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+ return 0;
}
struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
.start = iwlagn_mac_start,
.stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = iwlagn_mac_suspend,
.resume = iwlagn_mac_resume,
#endif
- .add_interface = iwl_mac_add_interface,
- .remove_interface = iwl_mac_remove_interface,
- .change_interface = iwl_mac_change_interface,
+ .add_interface = iwlagn_mac_add_interface,
+ .remove_interface = iwlagn_mac_remove_interface,
+ .change_interface = iwlagn_mac_change_interface,
.config = iwlagn_mac_config,
.configure_filter = iwlagn_configure_filter,
.set_key = iwlagn_mac_set_key,
.update_tkip_key = iwlagn_mac_update_tkip_key,
.set_rekey_data = iwlagn_mac_set_rekey_data,
- .conf_tx = iwl_mac_conf_tx,
+ .conf_tx = iwlagn_mac_conf_tx,
.bss_info_changed = iwlagn_bss_info_changed,
.ampdu_action = iwlagn_mac_ampdu_action,
- .hw_scan = iwl_mac_hw_scan,
+ .hw_scan = iwlagn_mac_hw_scan,
.sta_notify = iwlagn_mac_sta_notify,
.sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwl_mac_sta_remove,
+ .sta_remove = iwlagn_mac_sta_remove,
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
- .tx_last_beacon = iwl_mac_tx_last_beacon,
- .remain_on_channel = iwl_mac_remain_on_channel,
- .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
- .offchannel_tx = iwl_mac_offchannel_tx,
- .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
- .rssi_callback = iwl_mac_rssi_callback,
- CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
- CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
+ .tx_last_beacon = iwlagn_mac_tx_last_beacon,
+ .remain_on_channel = iwlagn_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+ .rssi_callback = iwlagn_mac_rssi_callback,
+ CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+ .tx_sync = iwlagn_mac_tx_sync,
+ .finish_tx_sync = iwlagn_mac_finish_tx_sync,
+ .set_tim = iwlagn_mac_set_tim,
};
static u32 iwl_hw_detect(struct iwl_priv *priv)
{
- return iwl_read32(priv, CSR_HW_REV);
+ return iwl_read32(bus(priv), CSR_HW_REV);
}
+/* Size of one Rx buffer in host DRAM */
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
+
static int iwl_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
if (iwlagn_mod_params.amsdu_size_8K)
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
+ hw_params(priv).rx_page_order =
+ get_order(IWL_RX_BUF_SIZE_8K);
else
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
- priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
+ hw_params(priv).rx_page_order =
+ get_order(IWL_RX_BUF_SIZE_4K);
if (iwlagn_mod_params.disable_11n)
priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ hw_params(priv).num_ampdu_queues =
+ priv->cfg->base_params->num_of_ampdu_queues;
+ hw_params(priv).shadow_reg_enable =
+ priv->cfg->base_params->shadow_reg_enable;
+ hw_params(priv).sku = priv->cfg->sku;
+ hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
+
/* Device-specific setup */
return priv->cfg->lib->set_hw_params(priv);
}
-static const u8 iwlagn_bss_ac_to_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
-};
-
-static const u8 iwlagn_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
-
-static const u8 iwlagn_pan_ac_to_fifo[] = {
- IWL_TX_FIFO_VO_IPAN,
- IWL_TX_FIFO_VI_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWL_TX_FIFO_BK_IPAN,
-};
-
-static const u8 iwlagn_pan_ac_to_queue[] = {
- 7, 6, 5, 4,
-};
-
/* This function both allocates and initializes hw and priv. */
static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
{
@@ -3568,66 +3192,8 @@ out:
return hw;
}
-static void iwl_init_context(struct iwl_priv *priv)
-{
- int i;
-
- /*
- * The default context is always valid,
- * more may be discovered when firmware
- * is loaded.
- */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION);
- priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
- priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
- REPLY_WIPAN_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
- REPLY_WIPAN_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
- priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
- priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
- priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
- priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
- priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
- BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
-#ifdef CONFIG_IWL_P2P
- priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
- BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
-#endif
- priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
- priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
- priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-}
-
-int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
+int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
+ struct iwl_cfg *cfg)
{
int err = 0;
struct iwl_priv *priv;
@@ -3645,24 +3211,32 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
}
priv = hw->priv;
- priv->bus = bus;
- bus_set_drv_data(priv->bus, priv);
+ priv->shrd = &priv->_shrd;
+ bus->shrd = priv->shrd;
+ priv->shrd->bus = bus;
+ priv->shrd->priv = priv;
+
+ priv->shrd->trans = trans_ops->alloc(priv->shrd);
+ if (priv->shrd->trans == NULL) {
+ err = -ENOMEM;
+ goto out_free_traffic_mem;
+ }
/* At this point both hw and priv are allocated. */
- SET_IEEE80211_DEV(hw, priv->bus->dev);
+ SET_IEEE80211_DEV(hw, bus(priv)->dev);
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
- priv->inta_mask = CSR_INI_SET_MASK;
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
- (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
- true : false;
+ (iwlagn_mod_params.ant_coupling >
+ IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
+ true : false;
/* enable/disable bt channel inhibition */
- priv->bt_ch_announce = iwlagn_bt_ch_announce;
+ priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
(priv->bt_ch_announce) ? "On" : "Off");
@@ -3672,15 +3246,15 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
+ spin_lock_init(&bus(priv)->reg_lock);
+ spin_lock_init(&priv->shrd->lock);
/*
* stop and reset the on-board processor just in case it is in a
* strange state ... like being left stranded by a primary kernel
* and this is now the kdump kernel trying to start up
*/
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/***********************
* 3. Read REV register
@@ -3689,11 +3263,11 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, hw_rev);
- err = iwl_trans_register(&priv->trans, priv);
+ err = iwl_trans_request_irq(trans(priv));
if (err)
- goto out_free_traffic_mem;
+ goto out_free_trans;
- if (trans_prepare_card_hw(&priv->trans)) {
+ if (iwl_trans_prepare_card_hw(trans(priv))) {
err = -EIO;
IWL_WARN(priv, "Failed, HW not ready\n");
goto out_free_trans;
@@ -3729,9 +3303,6 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
priv->hw->wiphy->n_addresses++;
}
- /* initialize all valid contexts */
- iwl_init_context(priv);
-
/************************
* 5. Setup HW constants
************************/
@@ -3764,13 +3335,14 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
iwl_enable_rfkill_int(priv);
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ if (iwl_read32(bus(priv),
+ CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
+ set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
@@ -3784,13 +3356,13 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
return 0;
out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+ destroy_workqueue(priv->shrd->workqueue);
+ priv->shrd->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
iwl_eeprom_free(priv);
out_free_trans:
- trans_free(&priv->trans);
+ iwl_trans_free(trans(priv));
out_free_traffic_mem:
iwl_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
@@ -3800,21 +3372,17 @@ out:
void __devexit iwl_remove(struct iwl_priv * priv)
{
- unsigned long flags;
-
wait_for_completion(&priv->firmware_loading_complete);
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
- sysfs_remove_group(&priv->bus->dev->kobj,
- &iwl_attribute_group);
- /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
+ /* ieee80211_unregister_hw call wil cause iwlagn_mac_stop to
* to be called and iwl_down since we are removing the device
* we need to set STATUS_EXIT_PENDING bit.
*/
- set_bit(STATUS_EXIT_PENDING, &priv->status);
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
iwl_testmode_cleanup(priv);
iwl_leds_exit(priv);
@@ -3824,40 +3392,26 @@ void __devexit iwl_remove(struct iwl_priv * priv)
priv->mac80211_registered = 0;
}
- /* Reset to low power before unloading driver. */
- iwl_apm_stop(priv);
-
iwl_tt_exit(priv);
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- trans_sync_irq(&priv->trans);
+ /*This will stop the queues, move the device to low power state */
+ iwl_trans_stop_device(trans(priv));
iwl_dealloc_ucode(priv);
- trans_rx_free(&priv->trans);
- trans_tx_free(&priv->trans);
-
iwl_eeprom_free(priv);
/*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
+ flush_workqueue(priv->shrd->workqueue);
- /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
+ /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
+ * priv->shrd->workqueue... so we can't take down the workqueue
* until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+ destroy_workqueue(priv->shrd->workqueue);
+ priv->shrd->workqueue = NULL;
iwl_free_traffic_mem(priv);
- trans_free(&priv->trans);
-
- bus_set_drv_data(priv->bus, NULL);
+ iwl_trans_free(trans(priv));
iwl_uninit_drv(priv);
@@ -3906,7 +3460,8 @@ module_exit(iwl_exit);
module_init(iwl_init);
#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug, iwlagn_mod_params.debug_level, uint,
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
@@ -3922,18 +3477,21 @@ MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
- S_IRUGO);
+module_param_named(ucode_alternative,
+ iwlagn_mod_params.wanted_ucode_alternative,
+ int, S_IRUGO);
MODULE_PARM_DESC(ucode_alternative,
"specify ucode alternative to use from ucode file");
-module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
+module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
+ int, S_IRUGO);
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (defualt: 0 dB)");
-module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
+module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
+ bool, S_IRUGO);
MODULE_PARM_DESC(bt_ch_inhibition,
- "Disable BT channel inhibition (default: enable)");
+ "Enable BT channel inhibition (default: enable)");
module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
@@ -3979,6 +3537,11 @@ module_param_named(power_level, iwlagn_mod_params.power_level,
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
+module_param_named(auto_agg, iwlagn_mod_params.auto_agg,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(auto_agg,
+ "enable agg w/o check traffic load (default: enable)");
+
/*
* For now, keep using power level 1 instead of automatically
* adjusting ...
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index d941c4c98e4..5b936ec1a54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,54 +65,9 @@
#include "iwl-dev.h"
-/* configuration for the _agn devices */
-extern struct iwl_cfg iwl5300_agn_cfg;
-extern struct iwl_cfg iwl5100_agn_cfg;
-extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bgn_cfg;
-extern struct iwl_cfg iwl5100_abg_cfg;
-extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6005_2agn_cfg;
-extern struct iwl_cfg iwl6005_2abg_cfg;
-extern struct iwl_cfg iwl6005_2bg_cfg;
-extern struct iwl_cfg iwl1030_bgn_cfg;
-extern struct iwl_cfg iwl1030_bg_cfg;
-extern struct iwl_cfg iwl6030_2agn_cfg;
-extern struct iwl_cfg iwl6030_2abg_cfg;
-extern struct iwl_cfg iwl6030_2bgn_cfg;
-extern struct iwl_cfg iwl6030_2bg_cfg;
-extern struct iwl_cfg iwl6000i_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2abg_cfg;
-extern struct iwl_cfg iwl6000i_2bg_cfg;
-extern struct iwl_cfg iwl6000_3agn_cfg;
-extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl6150_bgn_cfg;
-extern struct iwl_cfg iwl6150_bg_cfg;
-extern struct iwl_cfg iwl1000_bgn_cfg;
-extern struct iwl_cfg iwl1000_bg_cfg;
-extern struct iwl_cfg iwl100_bgn_cfg;
-extern struct iwl_cfg iwl100_bg_cfg;
-extern struct iwl_cfg iwl130_bgn_cfg;
-extern struct iwl_cfg iwl130_bg_cfg;
-extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
-extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
-extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
-extern struct iwl_cfg iwl105_bgn_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
-extern struct iwl_cfg iwl135_bgn_cfg;
-
-extern struct iwl_mod_params iwlagn_mod_params;
-
extern struct ieee80211_ops iwlagn_hw_ops;
-int iwl_reset_ict(struct iwl_priv *priv);
+int iwl_reset_ict(struct iwl_trans *trans);
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
@@ -122,10 +77,6 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
-/* tx queue */
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
-
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -137,8 +88,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
/* uCode */
-void iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+int iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
void iwlagn_send_prio_tbl(struct iwl_priv *priv);
int iwlagn_run_init_ucode(struct iwl_priv *priv);
@@ -147,13 +99,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwlagn_ucode_type ucode_type);
/* lib */
-void iwl_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status);
-int iwlagn_hw_valid_rtc_data_addr(u32 addr);
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
-int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
@@ -161,25 +109,19 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
/* rx */
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
-void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* tx */
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int index);
-void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
-int iwlagn_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id);
-void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
@@ -207,17 +149,14 @@ static inline bool iwl_is_tx_success(u32 status)
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* scan */
-int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwlagn_post_scan(struct iwl_priv *priv);
-
-/* station mgmt */
-int iwlagn_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
+void iwlagn_disable_roc(struct iwl_priv *priv);
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -232,7 +171,120 @@ static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
#endif
+
/* station management */
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
+int iwl_send_add_sta(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *sta, u8 flags);
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
+int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init);
+void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rxon_context *ctx;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+
+ priv->ucode_key_table = 0;
+
+ for_each_context(priv, ctx) {
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+}
+
+static inline int iwl_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
+ struct iwl_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
+
int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -286,7 +338,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
}
/* eeprom */
-void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
+void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
/* notification wait support */
@@ -309,20 +361,22 @@ extern int iwlagn_init_alive_start(struct iwl_priv *priv);
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
-extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
-extern int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len);
+extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
+ int len);
+extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
extern void iwl_testmode_cleanup(struct iwl_priv *priv);
#else
static inline
-int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
return -ENOSYS;
}
static inline
-int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index f3ee1c0c004..08b97594e30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -60,16 +60,68 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#ifndef __iwl_pci_h__
-#define __iwl_pci_h__
+#ifndef __iwl_bus_h__
+#define __iwl_bus_h__
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+/**
+ * DOC: Bus layer - role and goal
+ *
+ * iwl-bus.h defines the API to the bus layer of the iwlwifi driver.
+ * The bus layer is responsible for doing very basic bus operations that are
+ * listed in the iwl_bus_ops structure.
+ * The bus layer registers to the bus driver, advertises the supported HW and
+ * gets notifications about enumeration, suspend, resume.
+ * For the moment, the bus layer is not a linux kernel module as itself, and
+ * the module_init function of the driver must call the bus specific
+ * registration functions. These functions are listed at the end of this file.
+ * For the moment, there is only one implementation of this interface: PCI-e.
+ * This implementation is iwl-pci.c
+ */
+
+/**
+ * DOC: encapsulation and type safety
+ *
+ * The iwl_bus describes the data that is shared amongst all the bus layer
+ * implementations. This data is visible to other layers. Data in the bus
+ * specific area is not visible outside the bus specific implementation.
+ * iwl_bus holds a pointer to iwl_shared which holds pointer to all the other
+ * layers of the driver (iwl_priv, iwl_trans). In fact, this is the way to go
+ * when the transport layer needs to call a function of another layer.
+ *
+ * In order to achieve encapsulation, iwl_priv cannot be dereferenced from the
+ * bus layer. Type safety is still kept since functions that gets iwl_priv gets
+ * a typed pointer (as opposed to void *).
+ */
+
+/**
+ * DOC: probe flow
+ *
+ * The module_init calls the bus specific registration function. The
+ * registration to the bus layer will trigger an enumeration of the bus which
+ * will call the bus specific probe function.
+ * The first thing this function must do is to allocate the memory needed by
+ * iwl_bus + the bus_specific data.
+ * Once the bus specific probe function has configured the hardware, it
+ * chooses the appropriate transport layer and calls iwl_probe that will run
+ * the bus independent probe flow.
+ *
+ * Note: The bus specific code must set the following data in iwl_bus before it
+ * calls iwl_probe:
+ * * bus->dev
+ * * bus->irq
+ * * bus->ops
+ */
+
+struct iwl_shared;
struct iwl_bus;
/**
* struct iwl_bus_ops - bus specific operations
* @get_pm_support: must returns true if the bus can go to sleep
- * @apm_config: will be called during the config of the APM configuration
- * @set_drv_data: set the drv_data pointer to the bus layer
+ * @apm_config: will be called during the config of the APM
* @get_hw_id: prints the hw_id in the provided buffer
* @write8: write a byte to register at offset ofs
* @write32: write a dword to register at offset ofs
@@ -78,20 +130,32 @@ struct iwl_bus;
struct iwl_bus_ops {
bool (*get_pm_support)(struct iwl_bus *bus);
void (*apm_config)(struct iwl_bus *bus);
- void (*set_drv_data)(struct iwl_bus *bus, void *drv_data);
void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
};
+/**
+ * struct iwl_bus - bus common data
+ *
+ * This data is common to all bus layer implementations.
+ *
+ * @dev - pointer to struct device * that represents the device
+ * @ops - pointer to iwl_bus_ops
+ * @shrd - pointer to iwl_shared which holds shared data from the upper layer
+ * NB: for the time being this needs to be set by the upper layer since
+ * it allocates the shared data
+ * @irq - the irq number for the device
+ * @reg_lock - protect hw register access
+ */
struct iwl_bus {
- /* Common data to all buses */
- void *drv_data; /* driver's context */
struct device *dev;
- struct iwl_bus_ops *ops;
+ const struct iwl_bus_ops *ops;
+ struct iwl_shared *shrd;
unsigned int irq;
+ spinlock_t reg_lock;
/* pointer to bus specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -108,11 +172,6 @@ static inline void bus_apm_config(struct iwl_bus *bus)
bus->ops->apm_config(bus);
}
-static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data)
-{
- bus->ops->set_drv_data(bus, drv_data);
-}
-
static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
{
bus->ops->get_hw_id(bus, buf, buf_len);
@@ -133,7 +192,10 @@ static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
return bus->ops->read32(bus, ofs);
}
+/*****************************************************
+* Bus layer registration functions
+******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
-#endif
+#endif /* __iwl_bus_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
new file mode 100644
index 00000000000..2a2dc4597ba
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -0,0 +1,117 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_pci_h__
+#define __iwl_pci_h__
+
+
+/*
+ * This file declares the config structures for all devices.
+ */
+
+extern struct iwl_cfg iwl5300_agn_cfg;
+extern struct iwl_cfg iwl5100_agn_cfg;
+extern struct iwl_cfg iwl5350_agn_cfg;
+extern struct iwl_cfg iwl5100_bgn_cfg;
+extern struct iwl_cfg iwl5100_abg_cfg;
+extern struct iwl_cfg iwl5150_agn_cfg;
+extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6005_2agn_cfg;
+extern struct iwl_cfg iwl6005_2abg_cfg;
+extern struct iwl_cfg iwl6005_2bg_cfg;
+extern struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern struct iwl_cfg iwl6005_2agn_d_cfg;
+extern struct iwl_cfg iwl1030_bgn_cfg;
+extern struct iwl_cfg iwl1030_bg_cfg;
+extern struct iwl_cfg iwl6030_2agn_cfg;
+extern struct iwl_cfg iwl6030_2abg_cfg;
+extern struct iwl_cfg iwl6030_2bgn_cfg;
+extern struct iwl_cfg iwl6030_2bg_cfg;
+extern struct iwl_cfg iwl6000i_2agn_cfg;
+extern struct iwl_cfg iwl6000i_2abg_cfg;
+extern struct iwl_cfg iwl6000i_2bg_cfg;
+extern struct iwl_cfg iwl6000_3agn_cfg;
+extern struct iwl_cfg iwl6050_2agn_cfg;
+extern struct iwl_cfg iwl6050_2abg_cfg;
+extern struct iwl_cfg iwl6150_bgn_cfg;
+extern struct iwl_cfg iwl6150_bg_cfg;
+extern struct iwl_cfg iwl1000_bgn_cfg;
+extern struct iwl_cfg iwl1000_bg_cfg;
+extern struct iwl_cfg iwl100_bgn_cfg;
+extern struct iwl_cfg iwl100_bg_cfg;
+extern struct iwl_cfg iwl130_bgn_cfg;
+extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl105_bg_cfg;
+extern struct iwl_cfg iwl105_bgn_cfg;
+extern struct iwl_cfg iwl105_bgn_d_cfg;
+extern struct iwl_cfg iwl135_bg_cfg;
+extern struct iwl_cfg iwl135_bgn_cfg;
+
+#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9e9d1d1778..69d5f85d11e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -69,6 +69,9 @@
#ifndef __iwl_commands_h__
#define __iwl_commands_h__
+#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
+
struct iwl_priv;
/* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -86,6 +89,7 @@ struct iwl_priv;
enum {
REPLY_ALIVE = 0x1,
REPLY_ERROR = 0x2,
+ REPLY_ECHO = 0x3, /* test command */
/* RXON and QOS commands */
REPLY_RXON = 0x10,
@@ -670,7 +674,6 @@ struct iwl_rxon_assoc_cmd {
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
* REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
@@ -806,6 +809,7 @@ struct iwl_qosparam_cmd {
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
+#define IWL_MAX_TID_COUNT 9
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -3067,17 +3071,29 @@ struct iwl_missed_beacon_notif {
/* number of additional entries for enhanced tbl */
#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
-#define HD_INA_NON_SQUARE_DET_OFDM_DATA cpu_to_le16(0)
-#define HD_INA_NON_SQUARE_DET_CCK_DATA cpu_to_le16(0)
-#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA cpu_to_le16(0)
-#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(668)
-#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
-#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(486)
-#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(37)
-#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(853)
-#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
-#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(476)
-#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(99)
+#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1 cpu_to_le16(0)
+#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1 cpu_to_le16(0)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1 cpu_to_le16(0)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(668)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(486)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(37)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(853)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(476)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(99)
+
+#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2 cpu_to_le16(1)
+#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2 cpu_to_le16(1)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2 cpu_to_le16(1)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(600)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(40)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(486)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(45)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(853)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(60)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(476)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(99)
/* Control field in struct iwl_sensitivity_cmd */
@@ -3198,12 +3214,17 @@ enum iwl_ucode_calib_cfg {
IWL_CALIB_CFG_LO_IDX | \
IWL_CALIB_CFG_TX_IQ_IDX | \
IWL_CALIB_CFG_RX_IQ_IDX | \
- IWL_CALIB_CFG_NOISE_IDX | \
- IWL_CALIB_CFG_CRYSTAL_IDX | \
+ IWL_CALIB_CFG_CRYSTAL_IDX)
+
+#define IWL_CALIB_RT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
+ IWL_CALIB_CFG_DC_IDX | \
+ IWL_CALIB_CFG_LO_IDX | \
+ IWL_CALIB_CFG_TX_IQ_IDX | \
+ IWL_CALIB_CFG_RX_IQ_IDX | \
IWL_CALIB_CFG_TEMPERATURE_IDX | \
IWL_CALIB_CFG_PAPD_IDX | \
- IWL_CALIB_CFG_SENSITIVITY_IDX | \
- IWL_CALIB_CFG_TX_PWR_IDX)
+ IWL_CALIB_CFG_TX_PWR_IDX | \
+ IWL_CALIB_CFG_CRYSTAL_IDX)
#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
@@ -3253,6 +3274,14 @@ struct iwl_calib_temperature_offset_cmd {
__le16 reserved;
} __packed;
+struct iwl_calib_temperature_offset_v2_cmd {
+ struct iwl_calib_hdr hdr;
+ __le16 radio_sensor_offset_high;
+ __le16 radio_sensor_offset_low;
+ __le16 burntVoltageRef;
+ __le16 reserved;
+} __packed;
+
/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
struct iwl_calib_chain_noise_reset_cmd {
struct iwl_calib_hdr hdr;
@@ -3897,6 +3926,7 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
* Union of all expected notifications/responses:
*
*****************************************************************************/
+#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
struct iwl_rx_packet {
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index cf376f62b2f..b247a56d513 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -34,29 +34,26 @@
#include <net/mac80211.h>
#include "iwl-eeprom.h"
-#include "iwl-dev.h" /* FIXME: remove */
#include "iwl-debug.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-u32 iwl_debug_level;
-
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
+static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *ht_info,
enum ieee80211_band band)
{
u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 rx_chains_num = hw_params(priv).rx_chains_num;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
ht_info->cap = 0;
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
@@ -68,7 +65,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
+ if (hw_params(priv).ht40_channel & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
ht_info->mcs.rx_mask[4] = 0x01;
@@ -106,9 +103,9 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
}
/**
- * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
*/
-int iwlcore_init_geos(struct iwl_priv *priv)
+int iwl_init_geos(struct iwl_priv *priv)
{
struct iwl_channel_info *ch;
struct ieee80211_supported_band *sband;
@@ -121,16 +118,16 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
return 0;
}
- channels = kzalloc(sizeof(struct ieee80211_channel) *
- priv->channel_count, GFP_KERNEL);
+ channels = kcalloc(priv->channel_count,
+ sizeof(struct ieee80211_channel), GFP_KERNEL);
if (!channels)
return -ENOMEM;
- rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
+ rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
GFP_KERNEL);
if (!rates) {
kfree(channels);
@@ -145,7 +142,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
+ iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
sband = &priv->bands[IEEE80211_BAND_2GHZ];
@@ -155,7 +152,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
+ iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
priv->ieee_channels = channels;
@@ -211,7 +208,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
char buf[32];
- bus_get_hw_id(priv->bus, buf, sizeof(buf));
+ bus_get_hw_id(bus(priv), buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n", buf);
priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
@@ -221,19 +218,19 @@ int iwlcore_init_geos(struct iwl_priv *priv)
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
return 0;
}
/*
- * iwlcore_free_geos - undo allocations in iwlcore_init_geos
+ * iwl_free_geos - undo allocations in iwl_init_geos
*/
-void iwlcore_free_geos(struct iwl_priv *priv)
+void iwl_free_geos(struct iwl_priv *priv)
{
kfree(priv->ieee_channels);
kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ clear_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
}
static bool iwl_is_channel_extension(struct iwl_priv *priv,
@@ -323,9 +320,9 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
u16 beacon_int;
struct ieee80211_vif *vif = ctx->vif;
- conf = ieee80211_get_hw_conf(priv->hw);
+ conf = &priv->hw->conf;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
@@ -359,7 +356,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
} else {
beacon_int = iwl_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * TIME_UNIT);
+ IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
}
@@ -378,7 +375,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
le32_to_cpu(ctx->timing.beacon_init_val),
le16_to_cpu(ctx->timing.atim_window));
- return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd,
+ return iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_timing_cmd,
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
}
@@ -804,21 +801,23 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
{
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->shrd->status))
ieee80211_chswitch_done(ctx->vif, is_success);
}
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+ enum iwl_rxon_context_id ctxid)
{
+ struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
struct iwl_rxon_cmd *rxon = &ctx->staging;
IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
@@ -856,18 +855,18 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
unsigned long reload_jiffies;
/* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
+ set_bit(STATUS_FW_ERROR, &priv->shrd->status);
/* Cancel currently queued command. */
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
iwlagn_abort_notification_waits(priv);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
- clear_bit(STATUS_READY, &priv->status);
+ clear_bit(STATUS_READY, &priv->shrd->status);
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->shrd->wait_command_queue);
if (!ondemand) {
/*
@@ -890,63 +889,26 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
priv->reload_count = 0;
}
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
if (iwlagn_mod_params.restart_fw) {
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+ IWL_DEBUG_FW_ERRORS(priv,
"Restarting adapter due to uCode error.\n");
- queue_work(priv->workqueue, &priv->restart);
+ queue_work(priv->shrd->workqueue, &priv->restart);
} else
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+ IWL_DEBUG_FW_ERRORS(priv,
"Detected FW error, but not restarting\n");
}
}
-/**
- * iwl_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_irq_handle_error(struct iwl_priv *priv)
-{
- /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (priv->cfg->internal_wimax_coex &&
- (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
- APMS_CLK_VAL_MRB_FUNC_MODE) ||
- (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
- APMG_PS_CTRL_VAL_RESET_REQ))) {
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the ready bit.
- */
- clear_bit(STATUS_READY, &priv->status);
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- wake_up_interruptible(&priv->wait_command_queue);
- IWL_ERR(priv, "RF is used by WiMAX\n");
- return;
- }
-
- IWL_ERR(priv, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
-
- iwl_dump_nic_error_log(priv);
- iwl_dump_csr(priv);
- iwl_dump_fh(priv, NULL, false);
- iwl_dump_nic_event_log(priv, false, NULL, false);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
- iwl_print_rx_config_cmd(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
- iwlagn_fw_error(priv, false);
-}
-
static int iwl_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
/* stop device's busmaster DMA activity */
- iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+ iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ ret = iwl_poll_bit(bus(priv), CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret)
IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
@@ -960,13 +922,13 @@ void iwl_apm_stop(struct iwl_priv *priv)
{
IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &priv->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
/* Stop device's DMA activity */
iwl_apm_stop_master(priv);
/* Reset the entire device */
- iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
@@ -974,7 +936,7 @@ void iwl_apm_stop(struct iwl_priv *priv)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
@@ -994,45 +956,45 @@ int iwl_apm_init(struct iwl_priv *priv)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
* don't wait for ICH L0s (ICH bug W/A)
*/
- iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
/* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+ iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
/*
* Enable HAP INTA (interrupt from management bus) to
* wake device's PCI Express link L1a -> L0s
*/
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
- bus_apm_config(priv->bus);
+ bus_apm_config(bus(priv));
/* Configure analog phase-lock-loop before activating to D0A */
if (priv->cfg->base_params->pll_cfg_val)
- iwl_set_bit(priv, CSR_ANA_PLL_CFG,
+ iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
priv->cfg->base_params->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/*
* Wait for clock stabilization; once stabilized, access to
* device-internal resources is supported, e.g. iwl_write_prph()
* and accesses to uCode SRAM.
*/
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
if (ret < 0) {
@@ -1047,14 +1009,14 @@ int iwl_apm_init(struct iwl_priv *priv)
* do not disable clocks. This preserves any hardware bits already
* set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
/* Disable L1-Active */
- iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- set_bit(STATUS_DEVICE_ENABLED, &priv->status);
+ set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
out:
return ret;
@@ -1068,7 +1030,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
bool defer;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (priv->tx_power_user_lmt == tx_power && !force)
return 0;
@@ -1088,7 +1050,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return -EIO;
/* scan complete and commit_rxon use tx_power_next value,
@@ -1096,7 +1058,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
priv->tx_power_next = tx_power;
/* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ defer = test_bit(STATUS_SCANNING, &priv->shrd->status) ||
memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
if (defer && !force) {
IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
@@ -1134,7 +1096,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -1147,24 +1109,20 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
};
if (flags & CMD_ASYNC)
- return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
+ return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
CMD_ASYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
else
- return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
+ return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
CMD_SYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
-void iwl_clear_isr_stats(struct iwl_priv *priv)
-{
- memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx;
@@ -1173,7 +1131,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (!iwl_is_ready_rf(priv)) {
+ if (!iwl_is_ready_rf(priv->shrd)) {
IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
return -EIO;
}
@@ -1185,7 +1143,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
q = AC_NUM - 1 - queue;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
/*
* MULTI-FIXME
@@ -1203,13 +1161,13 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
-int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
+int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
@@ -1231,7 +1189,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
struct ieee80211_vif *vif = ctx->vif;
int err;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/*
* This variable will be correct only when there's just
@@ -1262,7 +1220,8 @@ static int iwl_setup_interface(struct iwl_priv *priv,
return 0;
}
-int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1273,9 +1232,13 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
viftype, vif->addr);
- mutex_lock(&priv->mutex);
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ iwlagn_disable_roc(priv);
- if (!iwl_is_ready_rf(priv)) {
+ if (!iwl_is_ready_rf(priv->shrd)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
goto out;
@@ -1318,7 +1281,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ctx->vif = NULL;
priv->iw_mode = NL80211_IFTYPE_STATION;
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1330,7 +1293,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (priv->scan_vif == vif) {
iwl_scan_cancel_timeout(priv, 200);
@@ -1354,7 +1317,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
priv->bt_traffic_load = priv->last_bt_traffic_load;
}
-void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -1362,14 +1325,20 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- WARN_ON(ctx->vif != vif);
+ if (WARN_ON(ctx->vif != vif)) {
+ struct iwl_rxon_context *tmp;
+ IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+ for_each_context(priv, tmp)
+ IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+ tmp->ctxid, tmp, tmp->vif);
+ }
ctx->vif = NULL;
iwl_teardown_interface(priv, vif, false);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1393,7 +1362,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
{
u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
- if (iwl_debug_level & IWL_DL_TX) {
+ if (iwl_get_debug_level(priv->shrd) & IWL_DL_TX) {
if (!priv->tx_traffic) {
priv->tx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1401,7 +1370,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
return -ENOMEM;
}
}
- if (iwl_debug_level & IWL_DL_RX) {
+ if (iwl_get_debug_level(priv->shrd) & IWL_DL_RX) {
if (!priv->rx_traffic) {
priv->rx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1428,7 +1397,7 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
__le16 fc;
u16 len;
- if (likely(!(iwl_debug_level & IWL_DL_TX)))
+ if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_TX)))
return;
if (!priv->tx_traffic)
@@ -1452,7 +1421,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
__le16 fc;
u16 len;
- if (likely(!(iwl_debug_level & IWL_DL_RX)))
+ if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_RX)))
return;
if (!priv->rx_traffic)
@@ -1609,7 +1578,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
static void iwl_force_rf_reset(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (!iwl_is_any_associated(priv)) {
@@ -1634,7 +1603,7 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
{
struct iwl_force_reset *force_reset;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EINVAL;
if (mode >= IWL_MAX_FORCE_RESET) {
@@ -1680,8 +1649,9 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
-int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
+int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
@@ -1691,11 +1661,13 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 interface_modes;
int err;
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
newtype = ieee80211_iftype_p2p(newtype, newp2p);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (!ctx->vif || !iwl_is_ready_rf(priv)) {
+ if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
/*
* Huh? But wait ... this can maybe happen when
* we're in the middle of a firmware restart!
@@ -1757,36 +1729,45 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = 0;
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
return err;
}
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
+int iwl_cmd_echo_test(struct iwl_priv *priv)
{
- struct iwl_tx_queue *txq = &priv->txq[cnt];
- struct iwl_queue *q = &txq->q;
- unsigned long timeout;
int ret;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ECHO,
+ .flags = CMD_SYNC,
+ };
- if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
- return 0;
- }
-
- timeout = txq->time_stamp +
- msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ if (ret)
+ IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
+ else
+ IWL_DEBUG_INFO(priv, "echo testing pass\n");
+ return ret;
+}
- if (time_after(jiffies, timeout)) {
- IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
- q->id, priv->cfg->base_params->wd_timeout);
+static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
+{
+ if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
+ int ret;
+ if (txq == priv->shrd->cmd_queue) {
+ /*
+ * validate command queue still working
+ * by sending "ECHO" command
+ */
+ if (!iwl_cmd_echo_test(priv))
+ return 0;
+ else
+ IWL_DEBUG_HC(priv, "echo testing fail\n");
+ }
ret = iwl_force_reset(priv, IWL_FW_RESET, false);
return (ret == -EAGAIN) ? 0 : 1;
}
-
return 0;
}
@@ -1806,7 +1787,10 @@ void iwl_bg_watchdog(unsigned long data)
int cnt;
unsigned long timeout;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ return;
+
+ if (iwl_is_rfkill(priv->shrd))
return;
timeout = priv->cfg->base_params->wd_timeout;
@@ -1814,14 +1798,14 @@ void iwl_bg_watchdog(unsigned long data)
return;
/* monitor and check for stuck cmd queue */
- if (iwl_check_stuck_queue(priv, priv->cmd_queue))
+ if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
return;
/* monitor and check for other stuck queues */
if (iwl_is_any_associated(priv)) {
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
/* skip as we already checked the command queue */
- if (cnt == priv->cmd_queue)
+ if (cnt == priv->shrd->cmd_queue)
continue;
if (iwl_check_stuck_queue(priv, cnt))
return;
@@ -1843,6 +1827,28 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
del_timer(&priv->watchdog);
}
+/**
+ * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
/*
* extended beacon time format
* time in usec will be changed into a 32-bit value in extended:internal format
@@ -1859,13 +1865,12 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
return 0;
quot = (usec / interval) &
- (iwl_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits) >>
- priv->hw_params.beacon_time_tsf_bits);
+ (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
+ IWLAGN_EXT_BEACON_TIME_POS);
rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
+ IWLAGN_EXT_BEACON_TIME_POS);
- return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+ return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
}
/* base is usually what we get from ucode with each received frame,
@@ -1875,64 +1880,80 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval)
{
u32 base_low = base & iwl_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
+ IWLAGN_EXT_BEACON_TIME_POS);
u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
+ IWLAGN_EXT_BEACON_TIME_POS);
u32 interval = beacon_interval * TIME_UNIT;
u32 res = (base & iwl_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits)) +
+ IWLAGN_EXT_BEACON_TIME_POS)) +
(addon & iwl_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits));
+ IWLAGN_EXT_BEACON_TIME_POS));
if (base_low > addon_low)
res += base_low - addon_low;
else if (base_low < addon_low) {
res += interval + base_low - addon_low;
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
} else
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
return cpu_to_le32(res);
}
-#ifdef CONFIG_PM
+void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid)
+{
+ struct ieee80211_vif *vif;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
-int iwl_suspend(struct iwl_priv *priv)
+ if (ctx == NUM_IWL_RXON_CTX)
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+}
+
+void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid)
{
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
- * first but since iwl_mac_stop() has no knowledge of who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- *
- * But of course ... if we have configured WoWLAN then we did other
- * things already :-)
- */
- if (!priv->wowlan)
- iwl_apm_stop(priv);
+ struct ieee80211_vif *vif;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
- return 0;
+ if (ctx == NUM_IWL_RXON_CTX)
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
}
-int iwl_resume(struct iwl_priv *priv)
+void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
{
- bool hw_rfkill = false;
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+}
- iwl_enable_interrupts(priv);
+void iwl_nic_config(struct iwl_priv *priv)
+{
+ priv->cfg->lib->nic_config(priv);
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
+}
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info;
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+ info = IEEE80211_SKB_CB(skb);
+ kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+ dev_kfree_skb_any(skb);
+}
- return 0;
+void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac)
+{
+ ieee80211_stop_queue(priv->hw, ac);
}
-#endif /* CONFIG_PM */
+void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac)
+{
+ ieee80211_wake_queue(priv->hw, ac);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 02817a43855..137da338070 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -64,6 +64,7 @@
#define __iwl_core_h__
#include "iwl-dev.h"
+#include "iwl-io.h"
/************************
* forward declarations *
@@ -71,15 +72,8 @@
struct iwl_host_cmd;
struct iwl_cmd;
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
#define TIME_UNIT 1024
-#define IWL_CMD(x) case x: return #x
-
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
@@ -101,23 +95,6 @@ struct iwl_lib_ops {
void (*temperature)(struct iwl_priv *priv);
};
-struct iwl_mod_params {
- int sw_crypto; /* def: 0 = using hardware encryption */
- int num_of_queues; /* def: HW dependent */
- int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
- int antenna; /* def: 0 = both antennas (use diversity) */
- int restart_fw; /* def: 1 = restart firmware */
- bool plcp_check; /* def: true = enable plcp health check */
- bool ack_check; /* def: false = disable ack health check */
- bool wd_disable; /* def: false = enable stuck queue check */
- bool bt_coex_active; /* def: true = enable bt coex */
- int led_mode; /* def: 0 = system default */
- bool no_sleep_autoadjust; /* def: true = disable autoadjust */
- bool power_save; /* def: false = disable power save */
- int power_level; /* def: 1 = power level */
-};
-
/*
* @max_ll_items: max number of OTP blocks
* @shadow_ram_support: shadow support for OTP memory
@@ -132,10 +109,10 @@ struct iwl_mod_params {
* radio tuning when there is a high receiving plcp error rate
* @chain_noise_scale: default chain noise scale used for gain computation
* @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
*/
struct iwl_base_params {
int eeprom_size;
@@ -147,17 +124,16 @@ struct iwl_base_params {
const u16 max_ll_items;
const bool shadow_ram_support;
u16 led_compensation;
- int chain_noise_num_beacons;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
s32 chain_noise_scale;
unsigned int wd_timeout;
- bool temperature_kelvin;
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool no_idle_support;
+ const bool hd_v2;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
@@ -194,6 +170,8 @@ struct iwl_ht_params {
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ * without a warning, for use in transitions
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @valid_tx_ant: valid transmit antenna
* @valid_rx_ant: valid receive antenna
@@ -214,20 +192,12 @@ struct iwl_ht_params {
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
* @iq_invert: I/Q inversion
+ * @temp_offset_v2: support v2 of temperature offset calibration
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
* highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- * Driver interacts with Firmware API version >= 2.
- * } else {
- * Driver interacts with Firmware API version 1.
- * }
+ * it has a supported API version.
*
* The ideal usage of this infrastructure is to treat a new ucode API
* release as a new hardware revision.
@@ -237,6 +207,7 @@ struct iwl_cfg {
const char *name;
const char *fw_name_pre;
const unsigned int ucode_api_max;
+ const unsigned int ucode_api_ok;
const unsigned int ucode_api_min;
u8 valid_tx_ant;
u8 valid_rx_ant;
@@ -259,15 +230,17 @@ struct iwl_cfg {
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
const bool iq_invert;
+ const bool temp_offset_v2;
};
/***************************
* L i b *
***************************/
-int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
-int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
+int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -287,18 +260,17 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
-void iwl_irq_handle_error(struct iwl_priv *priv);
-int iwl_mac_add_interface(struct ieee80211_hw *hw,
+int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-int iwl_mac_change_interface(struct ieee80211_hw *hw,
+int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
+int iwl_cmd_echo_test(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
-void iwl_reset_traffic_log(struct iwl_priv *priv);
void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header);
void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
@@ -349,9 +321,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
******************************************************************************/
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
-int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
-int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
@@ -359,12 +331,6 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes);
-u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
int __must_check iwl_scan_initiate(struct iwl_priv *priv,
@@ -387,117 +353,21 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
-const char *get_cmd_string(u8 cmd);
void iwl_bg_watchdog(unsigned long data);
u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval);
-#ifdef CONFIG_PM
-int iwl_suspend(struct iwl_priv *priv);
-int iwl_resume(struct iwl_priv *priv);
-#endif /* !CONFIG_PM */
-
-int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
-void __devexit iwl_remove(struct iwl_priv * priv);
-
-/*****************************************************
-* Error Handling Debugging
-******************************************************/
-void iwl_dump_nic_error_log(struct iwl_priv *priv);
-int iwl_dump_nic_event_log(struct iwl_priv *priv,
- bool full_log, char **buf, bool display);
-void iwl_dump_csr(struct iwl_priv *priv);
-int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-#ifdef CONFIG_IWLWIFI_DEBUG
-void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_clear_isr_stats(struct iwl_priv *priv);
/*****************************************************
* GEOS
******************************************************/
-int iwlcore_init_geos(struct iwl_priv *priv);
-void iwlcore_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS *****/
-
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_CT_KILL 4
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_DEVICE_ENABLED 18
-#define STATUS_CHANNEL_SWITCH_PENDING 19
-
-
-static inline int iwl_is_ready(struct iwl_priv *priv)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_is_alive(struct iwl_priv *priv)
-{
- return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_is_init(struct iwl_priv *priv)
-{
- return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_is_rfkill(struct iwl_priv *priv)
-{
- return iwl_is_rfkill_hw(priv);
-}
-
-static inline int iwl_is_ctkill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_is_ready_rf(struct iwl_priv *priv)
-{
-
- if (iwl_is_rfkill(priv))
- return 0;
-
- return iwl_is_ready(priv);
-}
+int iwl_init_geos(struct iwl_priv *priv);
+void iwl_free_geos(struct iwl_priv *priv);
extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
-void iwl_apm_stop(struct iwl_priv *priv);
-int iwl_apm_init(struct iwl_priv *priv);
int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -513,9 +383,12 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist;
}
-extern bool bt_siso_mode;
-
+static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
+{
+ IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
+ iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
-void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
+extern bool bt_siso_mode;
#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index d6dbb042304..b9f3267e720 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -439,4 +439,22 @@
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index f9a407e40af..69a77e24d22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -29,50 +29,51 @@
#ifndef __iwl_debug_h__
#define __iwl_debug_h__
+#include "iwl-bus.h"
+#include "iwl-shared.h"
+
struct iwl_priv;
-extern u32 iwl_debug_level;
-#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a)
+/*No matter what is m (priv, bus, trans), this will work */
+#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
+#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
+#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
+#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
-#define iwl_print_hex_error(priv, p, len) \
+#define iwl_print_hex_error(m, p, len) \
do { \
print_hex_dump(KERN_ERR, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...) \
+#define IWL_DEBUG(m, level, fmt, args...) \
do { \
- if (iwl_get_debug_level(__priv) & (level)) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ if (iwl_get_debug_level((m)->shrd) & (level)) \
+ dev_printk(KERN_ERR, bus(m)->dev, \
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
__func__ , ## args); \
} while (0)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
do { \
- if ((iwl_get_debug_level(__priv) & (level)) && net_ratelimit()) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
+ dev_printk(KERN_ERR, bus(m)->dev, \
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
__func__ , ## args); \
} while (0)
-#define iwl_print_hex_dump(priv, level, p, len) \
+#define iwl_print_hex_dump(m, level, p, len) \
do { \
- if (iwl_get_debug_level(priv) & level) \
+ if (iwl_get_debug_level((m)->shrd) & level) \
print_hex_dump(KERN_DEBUG, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
- const void *p, u32 len)
-{}
+#define IWL_DEBUG(m, level, fmt, args...)
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
+#define iwl_print_hex_dump(m, level, p, len)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -104,10 +105,12 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
*
* The active debug levels can be accessed via files
*
- * /sys/module/iwlagn/parameters/debug{50}
- * /sys/class/net/wlan0/device/debug_level
- *
+ * /sys/module/iwlwifi/parameters/debug
* when CONFIG_IWLWIFI_DEBUG=y.
+ *
+ * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level
+ * when CONFIG_IWLWIFI_DEBUGFS=y.
+ *
*/
/* 0x0000000F - 0x00000001 */
@@ -166,6 +169,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ec1485b2d3f..a1670e3f8bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -64,6 +64,14 @@
goto err; \
} while (0)
+#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_u32(#name, mode, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
/* file operation */
#define DEBUGFS_READ_FUNC(name) \
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -254,7 +262,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */
- val = iwl_read_targ_mem(priv, sram);
+ val = iwl_read_targ_mem(bus(priv), sram);
for (; len; len--) {
/* put the address at the start of every line */
@@ -273,7 +281,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) {
sram += 4;
offset = 0;
- val = iwl_read_targ_mem(priv, sram);
+ val = iwl_read_targ_mem(bus(priv), sram);
}
/* put in extra spaces and split lines for human readability */
@@ -340,7 +348,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
{
struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
- int max_sta = priv->hw_params.max_stations;
+ struct iwl_tid_data *tid_data;
char *buf;
int i, j, pos = 0;
ssize_t ret;
@@ -354,7 +362,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
priv->num_stations);
- for (i = 0; i < max_sta; i++) {
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
station = &priv->stations[i];
if (!station->used)
continue;
@@ -363,22 +371,18 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
i, station->sta.sta.addr,
station->sta.station_flags_msk);
pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\tframes\ttfds\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap\t\t\trate_n_flags\n");
+ "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
- for (j = 0; j < MAX_TID_COUNT; j++) {
+ for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
+ tid_data = &priv->shrd->tid_data[i][j];
pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
- j, station->tid[j].seq_number,
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].tfds_in_queue,
- station->tid[j].agg.start_idx,
- station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
-
- if (station->tid[j].agg.wait_for_ba)
+ "%d:\t%#x\t%#x\t%u\t%#x",
+ j, tid_data->seq_number,
+ tid_data->agg.txq_id,
+ tid_data->tfds_in_queue,
+ tid_data->agg.rate_n_flags);
+
+ if (tid_data->agg.wait_for_ba)
pos += scnprintf(buf + pos, bufsz - pos,
" - waitforba");
pos += scnprintf(buf + pos, bufsz - pos, "\n");
@@ -442,46 +446,6 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_log_event_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -ENOMEM;
-
- ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- }
- return ret;
-}
-
-static ssize_t iwl_dbgfs_log_event_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 event_log_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &event_log_flag) != 1)
- return -EFAULT;
- if (event_log_flag == 1)
- iwl_dump_nic_event_log(priv, true, NULL, false);
-
- return count;
-}
-
-
-
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -492,7 +456,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
char *buf;
ssize_t ret;
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+ if (!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -562,45 +526,46 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
- test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+ test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->status));
+ test_bit(STATUS_INT_ENABLED, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->status));
+ test_bit(STATUS_CT_KILL, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->status));
+ test_bit(STATUS_INIT, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->status));
+ test_bit(STATUS_ALIVE, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->status));
+ test_bit(STATUS_READY, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->status));
+ test_bit(STATUS_TEMPERATURE, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+ test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->status));
+ test_bit(STATUS_EXIT_PENDING, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->status));
+ test_bit(STATUS_STATISTICS, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->status));
+ test_bit(STATUS_SCANNING, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->status));
+ test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->status));
+ test_bit(STATUS_SCAN_HW, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
- test_bit(STATUS_POWER_PMI, &priv->status));
+ test_bit(STATUS_POWER_PMI, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
- test_bit(STATUS_FW_ERROR, &priv->status));
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = file->private_data;
+
int pos = 0;
int cnt = 0;
char *buf;
@@ -613,61 +578,25 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
return -ENOMEM;
}
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- priv->isr_stats.hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- priv->isr_stats.sw);
- if (priv->isr_stats.sw || priv->isr_stats.hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- priv->isr_stats.err_code);
- }
-#ifdef CONFIG_IWLWIFI_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- priv->isr_stats.sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- priv->isr_stats.alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n",
- priv->isr_stats.rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- priv->isr_stats.ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- priv->isr_stats.wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n",
- priv->isr_stats.rx);
for (cnt = 0; cnt < REPLY_MAX; cnt++) {
- if (priv->isr_stats.rx_handlers[cnt] > 0)
+ if (priv->rx_handlers_stats[cnt] > 0)
pos += scnprintf(buf + pos, bufsz - pos,
"\tRx handler[%36s]:\t\t %u\n",
get_cmd_string(cnt),
- priv->isr_stats.rx_handlers[cnt]);
+ priv->rx_handlers_stats[cnt]);
}
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- priv->isr_stats.tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- priv->isr_stats.unhandled);
-
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
-static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
+
char buf[8];
int buf_size;
u32 reset_flag;
@@ -679,7 +608,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
if (sscanf(buf, "%x", &reset_flag) != 1)
return -EFAULT;
if (reset_flag == 0)
- iwl_clear_isr_stats(priv);
+ memset(&priv->rx_handlers_stats[0], 0,
+ sizeof(priv->rx_handlers_stats));
return count;
}
@@ -784,6 +714,20 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_temperature_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+
static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -814,14 +758,14 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
return -EINVAL;
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return -EAGAIN;
priv->power_data.debug_sleep_level_override = value;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_power_update_mode(priv, true);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return count;
}
@@ -870,15 +814,15 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
DEBUGFS_READ_WRITE_FILE_OPS(sram);
DEBUGFS_READ_FILE_OPS(wowlan_sram);
-DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
DEBUGFS_READ_FILE_OPS(qos);
DEBUGFS_READ_FILE_OPS(thermal_throttling);
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+DEBUGFS_READ_FILE_OPS(temperature);
DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
DEBUGFS_READ_FILE_OPS(current_sleep_command);
@@ -889,36 +833,23 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0;
int cnt = 0, entry;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
+
char *buf;
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ (hw_params(priv).max_txq_num * 32 * 8) + 400;
const u8 *ptr;
ssize_t ret;
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate buffer\n");
return -ENOMEM;
}
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n",
- cnt, q->read_ptr, q->write_ptr);
- }
- if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) {
+ if (priv->tx_traffic &&
+ (iwl_get_debug_level(priv->shrd) & IWL_DL_TX)) {
ptr = priv->tx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+ "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
entry++, ofs += 16) {
@@ -933,15 +864,11 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
}
}
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) {
+ if (priv->rx_traffic &&
+ (iwl_get_debug_level(priv->shrd) & IWL_DL_RX)) {
ptr = priv->rx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+ "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
entry++, ofs += 16) {
@@ -982,76 +909,6 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
return count;
}
-static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u stop=%d"
- " swq_id=%#.2x (ac %d/hwq %d)\n",
- cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
- txq->swq_id, txq->swq_id & 3,
- (txq->swq_id >> 2) & 0x1f);
- if (cnt >= 4)
- continue;
- /* for the ACs, display the stop count too */
- pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static const char *fmt_value = " %-30s %10u\n";
static const char *fmt_hex = " %-30s 0x%02X\n";
static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
@@ -1096,7 +953,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
struct statistics_rx_non_phy *delta_general, *max_general;
struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1522,7 +1379,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
ssize_t ret;
struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1716,7 +1573,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct statistics_div *div, *accum_div, *delta_div, *max_div;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1829,16 +1686,16 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
ssize_t ret;
struct statistics_bt_activity *bt, *accum_bt;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
if (!priv->bt_enable_flag)
return -EINVAL;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
if (ret) {
IWL_ERR(priv,
@@ -1917,7 +1774,7 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
(sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
ssize_t ret;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2199,7 +2056,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
const size_t bufsz = sizeof(buf);
u32 pwrsave_status;
- pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+ pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
CSR_GP_REG_POWER_SAVE_STATUS_MSK;
pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2229,30 +2086,9 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return -EFAULT;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static ssize_t iwl_dbgfs_csr_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int csr;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &csr) != 1)
- return -EFAULT;
-
- iwl_dump_csr(priv);
+ mutex_unlock(&priv->shrd->mutex);
return count;
}
@@ -2333,25 +2169,6 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_dump_fh(priv, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -2434,8 +2251,8 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_priv *priv = file->private_data;
int i, pos = 0;
char buf[300];
@@ -2504,7 +2321,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
if (sscanf(buf, "%d", &flush) != 1)
return -EINVAL;
- if (iwl_is_rfkill(priv))
+ if (iwl_is_rfkill(priv->shrd))
return -EFAULT;
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
@@ -2514,8 +2331,8 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_priv *priv = file->private_data;
char buf[8];
int buf_size;
@@ -2626,11 +2443,26 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ iwl_cmd_echo_test(priv);
+ return count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2639,9 +2471,7 @@ DEBUGFS_READ_FILE_OPS(chain_noise);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
-DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
@@ -2653,6 +2483,53 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout);
DEBUGFS_READ_FILE_OPS(bt_traffic);
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
+DEBUGFS_WRITE_FILE_OPS(echo_test);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static ssize_t iwl_dbgfs_debug_level_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_shared *shrd = priv->shrd;
+ char buf[11];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "0x%.8x",
+ iwl_get_debug_level(shrd));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_debug_level_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_shared *shrd = priv->shrd;
+ char buf[11];
+ unsigned long val;
+ int ret;
+
+ if (count > sizeof(buf))
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ shrd->dbg_level_dev = val;
+ if (iwl_alloc_traffic_mem(priv))
+ IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+ return count;
+}
+DEBUGFS_READ_WRITE_FILE_OPS(debug_level);
+#endif /* CONFIG_IWLWIFI_DEBUG */
/*
* Create the debugfs files and directories
@@ -2682,26 +2559,23 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
+
DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
@@ -2710,7 +2584,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
-
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
@@ -2719,12 +2592,20 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ DEBUGFS_ADD_FILE(debug_level, dir_debug, S_IRUSR | S_IWUSR);
+#endif
+
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
&priv->disable_sens_cal);
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&priv->disable_chain_noise_cal);
+
+ if (iwl_trans_dbgfs_register(trans(priv), dir_debug))
+ goto err;
return 0;
err:
@@ -2745,6 +2626,3 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
debugfs_remove_recursive(priv->debugfs_dir);
priv->debugfs_dir = NULL;
}
-
-
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6c9790cac8d..6c00a447963 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -32,16 +32,15 @@
#define __iwl_dev_h__
#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/leds.h>
+#include <linux/slab.h>
#include <net/ieee80211_radiotap.h>
#include "iwl-eeprom.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
-#include "iwl-fh.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
@@ -50,8 +49,7 @@
#include "iwl-agn-tt.h"
#include "iwl-bus.h"
#include "iwl-trans.h"
-
-#define DRV_NAME "iwlagn"
+#include "iwl-shared.h"
struct iwl_tx_queue;
@@ -90,109 +88,6 @@ struct iwl_tx_queue;
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- /*
- * only for ASYNC commands
- * (which is somewhat stupid -- look at iwl-sta.c for instance
- * which duplicates a bunch of code because the callback isn't
- * invoked for SYNC commands, if it were and its result passed
- * through it would be simpler...)
- */
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
-
- u32 flags;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
- * in the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
- struct sk_buff *skb;
- struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
- struct iwl_queue q;
- struct iwl_tfd *tfds;
- struct iwl_device_cmd **cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_info *txb;
- unsigned long time_stamp;
- u8 need_update;
- u8 sched_retry;
- u8 active;
- u8 swq_id;
-};
-
#define IWL_NUM_SCAN_RATES (2)
/*
@@ -222,20 +117,16 @@ struct iwl_channel_info {
u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
};
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN 4
-#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN 5
-#define IWL_TX_FIFO_UNUSED -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES 10
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ * - 4 standard TX queues
+ * - the command queue
+ * - 4 PAN TX queues
+ * - the PAN multicast queue, and
+ * - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES 11
/*
* Command queue depends on iPAN support.
@@ -243,161 +134,20 @@ struct iwl_channel_info {
#define IWL_DEFAULT_CMD_QUEUE_NUM 4
#define IWL_IPAN_CMD_QUEUE_NUM 9
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE 8
-
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
- CMD_SYNC = 0,
- CMD_ASYNC = BIT(0),
- CMD_WANT_SKB = BIT(1),
- CMD_ON_DEMAND = BIT(2),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for commands that
- * aren't fully copied and use other TFD space.
- */
-struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- union {
- u32 flags;
- u8 val8;
- u16 val16;
- u32 val32;
- struct iwl_tx_cmd tx;
- struct iwl6000_channel_switch_cmd chswitch;
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-#define IWL_MAX_CMD_TFDS 2
-
-enum iwl_hcmd_dataflag {
- IWL_HCMD_DFL_NOCOPY = BIT(0),
-};
-
-/**
- * struct iwl_host_cmd - Host command to the uCode
- * @data: array of chunks that composes the data of the host command
- * @reply_page: pointer to the page that holds the response to the host command
- * @callback:
- * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
- * @len: array of the lenths of the chunks in data
- * @dataflags:
- * @id: id of the host command
- */
-struct iwl_host_cmd {
- const void *data[IWL_MAX_CMD_TFDS];
- unsigned long reply_page;
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
- u32 flags;
- u16 len[IWL_MAX_CMD_TFDS];
- u8 dataflags[IWL_MAX_CMD_TFDS];
- u8 id;
-};
-
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
#define IWL_SUPPORTED_RATES_IE_LEN 8
-#define MAX_TID_COUNT 9
-
#define IWL_INVALID_RATE 0xFF
#define IWL_INVALID_VALUE -1
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
- u16 txq_id;
- u16 frame_count;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
- u8 state;
- u8 tx_fifo;
-};
-
-
-struct iwl_tid_data {
- u16 seq_number; /* agn only */
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
-};
-
union iwl_ht_rate_supp {
u16 rates;
struct {
@@ -448,16 +198,10 @@ struct iwl_qos_info {
*/
struct iwl_station_entry {
struct iwl_addsta_cmd sta;
- struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used, ctxid;
struct iwl_link_quality_cmd *lq;
};
-struct iwl_station_priv_common {
- struct iwl_rxon_context *ctx;
- u8 sta_id;
-};
-
/*
* iwl_station_priv: Driver's private station information
*
@@ -466,12 +210,13 @@ struct iwl_station_priv_common {
* space.
*/
struct iwl_station_priv {
- struct iwl_station_priv_common common;
+ struct iwl_rxon_context *ctx;
struct iwl_lq_sta lq_sta;
atomic_t pending_frames;
bool client;
bool asleep;
u8 max_agg_bufsize;
+ u8 sta_id;
};
/**
@@ -564,11 +309,13 @@ enum iwl_ucode_tlv_type {
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
};
struct iwl_ucode_tlv {
@@ -634,54 +381,6 @@ struct iwl_sensitivity_ranges {
#define CELSIUS_TO_KELVIN(x) ((x)+273)
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @calib_init_cfg: setup initial calibrations for the hw
- * @calib_rt_cfg: setup runtime calibrations for the hw
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
- u8 max_txq_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
- u8 tx_chains_num;
- u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
- u32 rx_page_order;
- u8 max_stations;
- u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
- u32 max_inst_size;
- u32 max_data_size;
- u32 ct_kill_threshold; /* value in hw-dependent units */
- u32 ct_kill_exit_threshold; /* value in hw-dependent units */
- /* for 1000, 6000 series and up */
- u16 beacon_time_tsf_bits;
- u32 calib_init_cfg;
- u32 calib_rt_cfg;
- const struct iwl_sensitivity_ranges *sens;
-};
-
-
/******************************************************************************
*
* Functions implemented in core module which are forward declared here
@@ -697,35 +396,12 @@ struct iwl_hw_params {
****************************************************************************/
extern void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
-extern int iwl_queue_space(const struct iwl_queue *q);
-static inline int iwl_queue_used(const struct iwl_queue *q, int i)
-{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
#define IWL_OPERATION_MODE_AUTO 0
#define IWL_OPERATION_MODE_HT_ONLY 1
#define IWL_OPERATION_MODE_MIXED 2
#define IWL_OPERATION_MODE_20MHZ 3
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
/* Sensitivity and chain noise calibration */
@@ -849,9 +525,6 @@ struct iwl_chain_noise_data {
#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
-
enum {
MEASUREMENT_READY = (1 << 0),
MEASUREMENT_ACTIVE = (1 << 1),
@@ -884,22 +557,6 @@ enum iwl_pa_type {
IWL_PA_INTERNAL = 1,
};
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 rx_handlers[REPLY_MAX];
- u32 tx;
- u32 unhandled;
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -1009,21 +666,6 @@ struct iwl_event_log {
};
/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-/*
* This is the threshold value of plcp error rate per 100mSecs. It is
* used to set and check for the validity of plcp_delta.
*/
@@ -1101,20 +743,9 @@ struct iwl_notification_wait {
bool triggered, aborted;
};
-enum iwl_rxon_context_id {
- IWL_RXON_CTX_BSS,
- IWL_RXON_CTX_PAN,
-
- NUM_IWL_RXON_CTX
-};
-
struct iwl_rxon_context {
struct ieee80211_vif *vif;
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
/*
* We could use the vif to indicate active, but we
* also need it to be active during disabling when
@@ -1162,13 +793,16 @@ struct iwl_rxon_context {
u8 extension_chan_offset;
} ht;
+ u8 bssid[ETH_ALEN];
+ bool preauth_bssid;
+
bool last_tx_rejected;
};
enum iwl_scan_type {
IWL_SCAN_NORMAL,
IWL_SCAN_RADIO_RESET,
- IWL_SCAN_OFFCH_TX,
+ IWL_SCAN_ROC,
};
enum iwlagn_ucode_type {
@@ -1190,24 +824,26 @@ struct iwl_testmode_trace {
};
#endif
-/* uCode ownership */
-#define IWL_OWNERSHIP_DRIVER 0
-#define IWL_OWNERSHIP_TM 1
-
struct iwl_priv {
+ /*data shared among all the driver's layers */
+ struct iwl_shared _shrd;
+ struct iwl_shared *shrd;
+
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
+ struct kmem_cache *tx_cmd_pool;
struct iwl_cfg *cfg;
enum ieee80211_band band;
void (*pre_rx_handler)(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
- void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
@@ -1225,6 +861,9 @@ struct iwl_priv {
/* jiffies when last recovery from statistics was performed */
unsigned long rx_statistics_jiffies;
+ /*counters */
+ u32 rx_handlers_stats[REPLY_MAX];
+
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
@@ -1238,7 +877,7 @@ struct iwl_priv {
u8 channel_count; /* # of channels */
/* thermal calibration */
- s32 temperature; /* degrees Kelvin */
+ s32 temperature; /* Celsius */
s32 last_temperature;
/* init calibration results */
@@ -1255,21 +894,6 @@ struct iwl_priv {
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
- /* spinlock */
- spinlock_t lock; /* protect general shared data */
- spinlock_t hcmd_lock; /* protect hcmd */
- spinlock_t reg_lock; /* protect hw register access */
- struct mutex mutex;
-
- struct iwl_bus *bus; /* bus specific data */
- struct iwl_trans trans;
-
- /* microcode/device supports multiple contexts */
- u8 valid_contexts;
-
- /* command queue number */
- u8 cmd_queue;
-
/* max number of station keys */
u8 sta_key_max_num;
@@ -1283,9 +907,6 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
- /* uCode owner: default: IWL_OWNERSHIP_DRIVER */
- u8 ucode_owner;
-
struct fw_img ucode_rt;
struct fw_img ucode_init;
struct fw_img ucode_wowlan;
@@ -1317,52 +938,25 @@ struct iwl_priv {
/* Rate scaling data */
u8 retry_rate;
- wait_queue_head_t wait_command_queue;
-
int activity_timer_active;
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
- struct iwl_tx_queue *txq;
- unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
- struct iwl_dma_ptr scd_bc_tbls;
-
- u32 scd_base_addr; /* scheduler sram base address */
-
- unsigned long status;
-
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
struct traffic_stats rx_stats;
- /* counts interrupts */
- struct isr_statistics isr_stats;
-
struct iwl_power_mgr power_data;
struct iwl_tt_mgmt thermal_throttle;
/* station table variables */
-
- /* Note: if lock and sta_lock are needed, lock must be acquired first */
- spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
- /* queue refcounts */
-#define IWL_MAX_HW_QUEUES 32
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- /* for each AC */
- atomic_t queue_stop_count[4];
+ u8 mac80211_registered;
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
- u8 mac80211_registered;
-
- bool wowlan;
-
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
@@ -1398,14 +992,6 @@ struct iwl_priv {
} accum_stats, delta_stats, max_delta_stats;
#endif
- /* INT ICT Table */
- __le32 *ict_tbl;
- void *ict_tbl_vir;
- dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
- int ict_index;
- u32 inta;
- bool use_ict;
/*
* reporting the number of tids has AGG on. 0 means
* no AGGREGATION
@@ -1438,14 +1024,10 @@ struct iwl_priv {
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
- struct delayed_work hw_roc_work;
+ struct delayed_work hw_roc_disable_work;
enum nl80211_channel_type hw_roc_chantype;
int hw_roc_duration;
- bool hw_roc_setup;
-
- struct sk_buff *offchan_tx_skb;
- int offchan_tx_timeout;
- struct ieee80211_channel *offchan_tx_chan;
+ bool hw_roc_setup, hw_roc_start_notified;
/* bt coex */
u8 bt_enable_flag;
@@ -1466,15 +1048,8 @@ struct iwl_priv {
struct iwl_rxon_context *cur_rssi_ctx;
bool bt_is_sco;
- struct iwl_hw_params hw_params;
-
- u32 inta_mask;
-
- struct workqueue_struct *workqueue;
-
struct work_struct restart;
struct work_struct scan_completed;
- struct work_struct rx_replenish;
struct work_struct abort_scan;
struct work_struct beacon_update;
@@ -1490,8 +1065,6 @@ struct iwl_priv {
struct work_struct bt_full_concurrency;
struct work_struct bt_runtime_config;
- struct tasklet_struct irq_tasklet;
-
struct delayed_work scan_check;
/* TX Power */
@@ -1500,12 +1073,6 @@ struct iwl_priv {
s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
s8 tx_power_next;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- /* debugging info */
- u32 debug_level; /* per device debugging will override global
- iwl_debug_level if set */
-#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* debugfs */
u16 tx_traffic_idx;
@@ -1543,47 +1110,7 @@ struct iwl_priv {
bool have_rekey_data;
}; /*iwl_priv */
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
- set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
- clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-/*
- * iwl_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
-{
- if (priv->debug_level)
- return priv->debug_level;
- else
- return iwl_debug_level;
-}
-#else
-static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
-{
- return iwl_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
- int txq_id, int idx)
-{
- if (priv->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb->data;
- return NULL;
-}
+extern struct iwl_mod_params iwlagn_mod_params;
static inline struct iwl_rxon_context *
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
@@ -1596,7 +1123,7 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
#define for_each_context(priv, ctx) \
for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
- if (priv->valid_contexts & BIT(ctx->ctxid))
+ if (priv->shrd->valid_contexts & BIT(ctx->ctxid))
static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
{
@@ -1650,13 +1177,4 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
}
-static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
-{
- __free_pages(page, priv->hw_params.rx_page_order);
-}
-
-static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
-{
- free_pages(page, priv->hw_params.rx_page_order);
-}
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 2c84ba95afc..8a51c5ccda1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,6 +29,8 @@
#include <linux/tracepoint.h>
+struct iwl_priv;
+
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 19d31a5e32e..a4e43bd4a54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -72,6 +72,7 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
+#include "iwl-agn.h"
#include "iwl-eeprom.h"
#include "iwl-io.h"
@@ -138,7 +139,7 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
/******************************************************************************
*
- * EEPROM related functions
+ * generic NVM functions
*
******************************************************************************/
@@ -155,11 +156,11 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
@@ -176,14 +177,14 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
{
- iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
{
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
@@ -214,19 +215,106 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
return ret;
}
+u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+{
+ if (!priv->eeprom)
+ return 0;
+ return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+}
+
+int iwl_eeprom_check_version(struct iwl_priv *priv)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+ calib_ver = iwlagn_eeprom_calib_version(priv);
+
+ if (eeprom_ver < priv->cfg->eeprom_ver ||
+ calib_ver < priv->cfg->eeprom_calib_ver)
+ goto err;
+
+ IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n",
+ eeprom_ver, priv->cfg->eeprom_ver,
+ calib_ver, priv->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+int iwl_eeprom_check_sku(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ if (!priv->cfg->sku) {
+ /* not using sku overwrite */
+ priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !priv->cfg->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
+ }
+ }
+ if (!priv->cfg->sku) {
+ IWL_ERR(priv, "Invalid device sku\n");
+ return -EINVAL;
+ }
+
+ IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+
+ if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+ /* not using .cfg overwrite */
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
+ priv->cfg->valid_tx_ant,
+ priv->cfg->valid_rx_ant);
+ return -EINVAL;
+ }
+ IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
+ priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+ }
+ /*
+ * for some special cases,
+ * EEPROM did not reflect the correct antenna setting
+ * so overwrite the valid tx/rx antenna from .cfg
+ */
+ return 0;
+}
+
+void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+{
+ const u8 *addr = iwl_eeprom_query_addr(priv,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
+
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
+
static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
{
- iwl_read32(priv, CSR_OTP_GP_REG);
+ iwl_read32(bus(priv), CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(priv, CSR_OTP_GP_REG,
+ iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
-static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@@ -243,7 +331,7 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
- otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@@ -258,22 +346,22 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
int ret;
/* Enable 40MHz radio clock */
- iwl_write32(priv, CSR_GP_CNTRL,
- iwl_read32(priv, CSR_GP_CNTRL) |
+ iwl_write32(bus(priv), CSR_GP_CNTRL,
+ iwl_read32(bus(priv), CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
IWL_ERR(priv, "Time out access OTP\n");
else {
- iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+ iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
@@ -281,7 +369,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
* this is only applicable for HW with OTP shadow RAM
*/
if (priv->cfg->base_params->shadow_ram_support)
- iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
+ iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
@@ -293,9 +381,9 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
u32 r;
u32 otpgp;
- iwl_write32(priv, CSR_EEPROM_REG,
+ iwl_write32(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
@@ -303,13 +391,13 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
return ret;
}
- r = iwl_read32(priv, CSR_EEPROM_REG);
+ r = iwl_read32(bus(priv), CSR_EEPROM_REG);
/* check for ECC errors: */
- otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
@@ -317,7 +405,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
}
@@ -407,11 +495,152 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
return -EINVAL;
}
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+/******************************************************************************
+ *
+ * Tx Power related functions
+ *
+******************************************************************************/
+/**
+ * iwl_get_max_txpower_avg - get the highest tx power from all chains.
+ * find the highest tx power from all chains for the channel
+ */
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+ struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
+ int element, s8 *max_txpower_in_half_dbm)
{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+ s8 max_txpower_avg = 0; /* (dBm) */
+
+ /* Take the highest tx power from any valid chains */
+ if ((priv->cfg->valid_tx_ant & ANT_A) &&
+ (enhanced_txpower[element].chain_a_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].chain_a_max;
+ if ((priv->cfg->valid_tx_ant & ANT_B) &&
+ (enhanced_txpower[element].chain_b_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].chain_b_max;
+ if ((priv->cfg->valid_tx_ant & ANT_C) &&
+ (enhanced_txpower[element].chain_c_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].chain_c_max;
+ if (((priv->cfg->valid_tx_ant == ANT_AB) |
+ (priv->cfg->valid_tx_ant == ANT_BC) |
+ (priv->cfg->valid_tx_ant == ANT_AC)) &&
+ (enhanced_txpower[element].mimo2_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].mimo2_max;
+ if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+ (enhanced_txpower[element].mimo3_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].mimo3_max;
+
+ /*
+ * max. tx power in EEPROM is in 1/2 dBm format
+ * convert from 1/2 dBm to dBm (round-up convert)
+ * but we also do not want to loss 1/2 dBm resolution which
+ * will impact performance
+ */
+ *max_txpower_in_half_dbm = max_txpower_avg;
+ return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
+}
+
+static void
+iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
+ struct iwl_eeprom_enhanced_txpwr *txp,
+ s8 max_txpower_avg)
+{
+ int ch_idx;
+ bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
+ enum ieee80211_band band;
+
+ band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+
+ for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
+ struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
+
+ /* update matching channel or from common data only */
+ if (txp->channel != 0 && ch_info->channel != txp->channel)
+ continue;
+
+ /* update matching band only */
+ if (band != ch_info->band)
+ continue;
+
+ if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
+ ch_info->max_power_avg = max_txpower_avg;
+ ch_info->curr_txpow = max_txpower_avg;
+ ch_info->scan_power = max_txpower_avg;
+ }
+
+ if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
+ ch_info->ht40_max_power_avg = max_txpower_avg;
+ }
+}
+
+#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
+#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
+#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
+
+#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
+ ? # x " " : "")
+
+void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
+{
+ struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ int idx, entries;
+ __le16 *txp_len;
+ s8 max_txp_avg, max_txp_avg_halfdbm;
+
+ BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+
+ /* the length is in 16-bit words, but we want entries */
+ txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+
+ txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+
+ for (idx = 0; idx < entries; idx++) {
+ txp = &txp_array[idx];
+ /* skip invalid entries */
+ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
+ continue;
+
+ IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+ (txp->channel && (txp->flags &
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
+ "Common " : (txp->channel) ?
+ "Channel" : "Common",
+ (txp->channel),
+ TXP_CHECK_AND_PRINT(VALID),
+ TXP_CHECK_AND_PRINT(BAND_52G),
+ TXP_CHECK_AND_PRINT(OFDM),
+ TXP_CHECK_AND_PRINT(40MHZ),
+ TXP_CHECK_AND_PRINT(HT_AP),
+ TXP_CHECK_AND_PRINT(RES1),
+ TXP_CHECK_AND_PRINT(RES2),
+ TXP_CHECK_AND_PRINT(COMMON_TYPE),
+ txp->flags);
+ IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
+ "chain_B: 0X%02x chain_C: 0X%02x\n",
+ txp->chain_a_max, txp->chain_b_max,
+ txp->chain_c_max);
+ IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
+ "MIMO3: 0x%02x High 20_on_40: 0x%02x "
+ "Low 20_on_40: 0x%02x\n",
+ txp->mimo2_max, txp->mimo3_max,
+ ((txp->delta_20_in_40 & 0xf0) >> 4),
+ (txp->delta_20_in_40 & 0x0f));
+
+ max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+ &max_txp_avg_halfdbm);
+
+ /*
+ * Update the user limit values values to the highest
+ * power supported by any channel
+ */
+ if (max_txp_avg > priv->tx_power_user_lmt)
+ priv->tx_power_user_lmt = max_txp_avg;
+ if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
+ priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
+
+ iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
+ }
}
/**
@@ -424,14 +653,14 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
__le16 *e;
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+ u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
int sz;
int ret;
u16 addr;
u16 validblockaddr = 0;
u16 cache_addr = 0;
- priv->nvm_device_type = iwlcore_get_nvm_type(priv, hw_rev);
+ priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
if (priv->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
@@ -469,11 +698,11 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
ret = -ENOENT;
goto done;
}
- iwl_write32(priv, CSR_EEPROM_GP,
- iwl_read32(priv, CSR_EEPROM_GP) &
+ iwl_write32(bus(priv), CSR_EEPROM_GP,
+ iwl_read32(bus(priv), CSR_EEPROM_GP) &
~CSR_EEPROM_GP_IF_OWNER_MSK);
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
@@ -498,10 +727,10 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
for (addr = 0; addr < sz; addr += sizeof(u16)) {
u32 r;
- iwl_write32(priv, CSR_EEPROM_REG,
+ iwl_write32(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
@@ -509,7 +738,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
goto done;
}
- r = iwl_read32(priv, CSR_EEPROM_REG);
+ r = iwl_read32(bus(priv), CSR_EEPROM_REG);
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
@@ -670,8 +899,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
priv->channel_count);
- priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
- priv->channel_count, GFP_KERNEL);
+ priv->channel_info = kcalloc(priv->channel_count,
+ sizeof(struct iwl_channel_info),
+ GFP_KERNEL);
if (!priv->channel_info) {
IWL_ERR(priv, "Could not allocate channel_info\n");
priv->channel_count = 0;
@@ -838,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -850,7 +1080,7 @@ void iwl_rf_config(struct iwl_priv *priv)
WARN_ON(1);
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index e4bf8ac5e64..c94747e7299 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -163,11 +163,19 @@ struct iwl_eeprom_enhanced_txpwr {
} __packed;
/* calibration */
+struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ __le16 voltage;
+} __packed;
+
#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
/* temperature */
-#define EEPROM_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
+
/* agn links */
#define EEPROM_LINK_HOST (2*0x64)
@@ -301,7 +309,6 @@ void iwl_eeprom_free(struct iwl_priv *priv);
int iwl_eeprom_check_version(struct iwl_priv *priv);
int iwl_eeprom_check_sku(struct iwl_priv *priv);
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
void iwl_free_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 0ad60b3c04d..5bede9d7f95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -63,6 +63,8 @@
#ifndef __iwl_fh_h__
#define __iwl_fh_h__
+#include <linux/types.h>
+
/****************************/
/* Flow Handler Definitions */
/****************************/
@@ -266,8 +268,6 @@
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
/**
* Rx Shared Status Registers (RSSR)
*
@@ -422,10 +422,6 @@
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
/**
* struct iwl_rb_status - reseve buffer status
* host memory mapped FH registers
@@ -508,4 +504,16 @@ struct iwl_tfd {
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwlagn_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+} __packed;
+
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
deleted file mode 100644
index 9d91552d13c..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_helpers_h__
-#define __iwl_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *ieee80211_get_hw_conf(
- struct ieee80211_hw *hw)
-{
- return &hw->conf;
-}
-
-/**
- * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
- BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only use 5 bits */
-
- txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_wake_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (test_and_clear_bit(hwq, priv->queue_stopped))
- if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_stop_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (!test_and_set_bit(hwq, priv->queue_stopped))
- if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-static inline void iwl_wake_any_queue(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- u8 ac;
-
- for (ac = 0; ac < AC_NUM; ac++) {
- IWL_DEBUG_INFO(priv, "Queue Status: Q[%d] %s\n",
- ac, (atomic_read(&priv->queue_stop_count[ac]) > 0)
- ? "stopped" : "awake");
- iwl_wake_queue(priv, &priv->txq[ctx->ac_to_queue[ac]]);
- }
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_disable_interrupts(struct iwl_priv *priv)
-{
- clear_bit(STATUS_INT_ENABLED, &priv->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv, CSR_INT, 0xffffffff);
- iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
- IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_enable_interrupts(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index aa4a9067445..3ffa8e62b85 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -25,46 +25,50 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
+#include <linux/delay.h>
+#include <linux/device.h>
#include "iwl-io.h"
+#include"iwl-csr.h"
+#include "iwl-debug.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+static inline void __iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
- iwl_write32(priv, reg, iwl_read32(priv, reg) | mask);
+ iwl_write32(bus, reg, iwl_read32(bus, reg) | mask);
}
-static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+static inline void __iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
- iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask);
+ iwl_write32(bus, reg, iwl_read32(bus, reg) & ~mask);
}
-void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- __iwl_set_bit(priv, reg, mask);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ __iwl_set_bit(bus, reg, mask);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- __iwl_clear_bit(priv, reg, mask);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ __iwl_clear_bit(bus, reg, mask);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
+int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
- if ((iwl_read32(priv, addr) & mask) == (bits & mask))
+ if ((iwl_read32(bus, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
@@ -73,14 +77,14 @@ int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
return -ETIMEDOUT;
}
-int iwl_grab_nic_access_silent(struct iwl_priv *priv)
+int iwl_grab_nic_access_silent(struct iwl_bus *bus)
{
int ret;
- lockdep_assert_held(&priv->reg_lock);
+ lockdep_assert_held(&bus->reg_lock);
/* this bit wakes up the NIC */
- __iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ __iwl_set_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
@@ -101,70 +105,70 @@ int iwl_grab_nic_access_silent(struct iwl_priv *priv)
* 5000 series and later (including 1000 series) have non-volatile SRAM,
* and do not save/restore SRAM when power cycling.
*/
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (ret < 0) {
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ iwl_write32(bus, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
return -EIO;
}
return 0;
}
-int iwl_grab_nic_access(struct iwl_priv *priv)
+int iwl_grab_nic_access(struct iwl_bus *bus)
{
- int ret = iwl_grab_nic_access_silent(priv);
+ int ret = iwl_grab_nic_access_silent(bus);
if (ret) {
- u32 val = iwl_read32(priv, CSR_GP_CNTRL);
- IWL_ERR(priv,
+ u32 val = iwl_read32(bus, CSR_GP_CNTRL);
+ IWL_ERR(bus,
"MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
}
return ret;
}
-void iwl_release_nic_access(struct iwl_priv *priv)
+void iwl_release_nic_access(struct iwl_bus *bus)
{
- lockdep_assert_held(&priv->reg_lock);
- __iwl_clear_bit(priv, CSR_GP_CNTRL,
+ lockdep_assert_held(&bus->reg_lock);
+ __iwl_clear_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
-u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
+u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
{
u32 value;
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- value = iwl_read32(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ value = iwl_read32(bus(bus), reg);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
return value;
}
-void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
+void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write32(priv, reg, value);
- iwl_release_nic_access(priv);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus)) {
+ iwl_write32(bus, reg, value);
+ iwl_release_nic_access(bus);
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
+int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
int timeout)
{
int t = 0;
do {
- if ((iwl_read_direct32(priv, addr) & mask) == mask)
+ if ((iwl_read_direct32(bus, addr) & mask) == mask)
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
@@ -173,122 +177,122 @@ int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
return -ETIMEDOUT;
}
-static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg)
+static inline u32 __iwl_read_prph(struct iwl_bus *bus, u32 reg)
{
- iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ iwl_write32(bus, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
rmb();
- return iwl_read32(priv, HBUS_TARG_PRPH_RDAT);
+ return iwl_read32(bus, HBUS_TARG_PRPH_RDAT);
}
-static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+static inline void __iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
{
- iwl_write32(priv, HBUS_TARG_PRPH_WADDR,
+ iwl_write32(bus, HBUS_TARG_PRPH_WADDR,
((addr & 0x0000FFFF) | (3 << 24)));
wmb();
- iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val);
+ iwl_write32(bus, HBUS_TARG_PRPH_WDAT, val);
}
-u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
+u32 iwl_read_prph(struct iwl_bus *bus, u32 reg)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- val = __iwl_read_prph(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ val = __iwl_read_prph(bus, reg);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
return val;
}
-void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- __iwl_write_prph(priv, addr, val);
- iwl_release_nic_access(priv);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus)) {
+ __iwl_write_prph(bus, addr, val);
+ iwl_release_nic_access(bus);
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- __iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ __iwl_write_prph(bus, reg, __iwl_read_prph(bus, reg) | mask);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
u32 bits, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- __iwl_write_prph(priv, reg,
- (__iwl_read_prph(priv, reg) & mask) | bits);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ __iwl_write_prph(bus, reg,
+ (__iwl_read_prph(bus, reg) & mask) | bits);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- val = __iwl_read_prph(priv, reg);
- __iwl_write_prph(priv, reg, (val & ~mask));
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ val = __iwl_read_prph(bus, reg);
+ __iwl_write_prph(bus, reg, (val & ~mask));
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
+void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
void *buf, int words)
{
unsigned long flags;
int offs;
u32 *vals = buf;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr);
+ iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
rmb();
for (offs = 0; offs < words; offs++)
- vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
+u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
{
u32 value;
- _iwl_read_targ_mem_words(priv, addr, &value, 1);
+ _iwl_read_targ_mem_words(bus, addr, &value, 1);
return value;
}
-void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
+void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus)) {
+ iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
wmb();
- iwl_write32(priv, HBUS_TARG_MEM_WDAT, val);
- iwl_release_nic_access(priv);
+ iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
+ iwl_release_nic_access(bus);
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 19a09310112..ced2cbeb6ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -29,65 +29,62 @@
#ifndef __iwl_io_h__
#define __iwl_io_h__
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
#include "iwl-devtrace.h"
+#include "iwl-shared.h"
#include "iwl-bus.h"
-static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
- trace_iwlwifi_dev_iowrite8(priv, ofs, val);
- bus_write8(priv->bus, ofs, val);
+ trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
+ bus_write8(bus, ofs, val);
}
-static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
{
- trace_iwlwifi_dev_iowrite32(priv, ofs, val);
- bus_write32(priv->bus, ofs, val);
+ trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
+ bus_write32(bus, ofs, val);
}
-static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
+static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
{
- u32 val = bus_read32(priv->bus, ofs);
- trace_iwlwifi_dev_ioread32(priv, ofs, val);
+ u32 val = bus_read32(bus, ofs);
+ trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
return val;
}
-void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask);
+void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
+void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
-int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
+int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
u32 bits, u32 mask, int timeout);
-int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
+int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
int timeout);
-int iwl_grab_nic_access_silent(struct iwl_priv *priv);
-int iwl_grab_nic_access(struct iwl_priv *priv);
-void iwl_release_nic_access(struct iwl_priv *priv);
+int iwl_grab_nic_access_silent(struct iwl_bus *bus);
+int iwl_grab_nic_access(struct iwl_bus *bus);
+void iwl_release_nic_access(struct iwl_bus *bus);
-u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg);
-void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value);
+u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
+void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
-u32 iwl_read_prph(struct iwl_priv *priv, u32 reg);
-void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val);
-void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
-void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
+void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
+void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
+void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
u32 bits, u32 mask);
-void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
+void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
-void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
+void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
void *buf, int words);
-#define iwl_read_targ_mem_words(priv, addr, buf, bufsize) \
+#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_words(priv, addr, buf, \
+ _iwl_read_targ_mem_words(bus, addr, buf, \
(bufsize) / sizeof(u32));\
} while (0)
-u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr);
-void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val);
+u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
+void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a67ae56d546..eb541735296 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -41,6 +40,7 @@
#include "iwl-agn.h"
#include "iwl-io.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
@@ -71,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+ iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
/*
@@ -104,15 +104,14 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
.len = { sizeof(struct iwl_led_cmd), },
.data = { led_cmd, },
.flags = CMD_ASYNC,
- .callback = NULL,
};
u32 reg;
- reg = iwl_read32(priv, CSR_LED_REG);
+ reg = iwl_read32(bus(priv), CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+ iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
/* Set led pattern command */
@@ -126,7 +125,7 @@ static int iwl_led_cmd(struct iwl_priv *priv,
};
int ret;
- if (!test_bit(STATUS_READY, &priv->status))
+ if (!test_bit(STATUS_READY, &priv->shrd->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
@@ -203,8 +202,7 @@ void iwl_leds_init(struct iwl_priv *priv)
break;
}
- ret = led_classdev_register(priv->bus->dev,
- &priv->led);
+ ret = led_classdev_register(bus(priv)->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 2fdbffa079c..3b6cc66365e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -64,9 +64,11 @@
#include <linux/pci-aspm.h>
#include "iwl-bus.h"
-#include "iwl-agn.h"
-#include "iwl-core.h"
#include "iwl-io.h"
+#include "iwl-shared.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-cfg.h"
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -91,6 +93,7 @@ static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
{
int pos;
u16 pci_lnk_ctl;
+
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
pos = pci_pcie_cap(pci_dev);
@@ -120,23 +123,17 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
/* L1-ASPM enabled; disable(!) L0S */
- iwl_set_bit(bus->drv_data, CSR_GIO_REG,
+ iwl_set_bit(bus, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
} else {
/* L1-ASPM disabled; enable(!) L0S */
- iwl_clear_bit(bus->drv_data, CSR_GIO_REG,
+ iwl_clear_bit(bus, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
}
}
-static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
-{
- bus->drv_data = drv_data;
- pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
-}
-
static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
int buf_len)
{
@@ -162,10 +159,9 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
return val;
}
-static struct iwl_bus_ops pci_ops = {
+static const struct iwl_bus_ops bus_ops_pci = {
.get_pm_support = iwl_pci_is_pm_supported,
.apm_config = iwl_pci_apm_config,
- .set_drv_data = iwl_pci_set_drv_data,
.get_hw_id = iwl_pci_get_hw_id,
.write8 = iwl_pci_write8,
.write32 = iwl_pci_write32,
@@ -256,6 +252,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -328,6 +327,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
/* 2x30 Series */
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
@@ -355,6 +355,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
/* 135 Series */
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
@@ -387,6 +388,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_bus = IWL_BUS_GET_PCI_BUS(bus);
pci_bus->pci_dev = pdev;
+ pci_set_drvdata(pdev, bus);
+
/* W/A - seems to solve weird behavior. We need to remove this if we
* don't want to stay in L1 all the time. This wastes a lot of power */
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
@@ -457,9 +460,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus->dev = &pdev->dev;
bus->irq = pdev->irq;
- bus->ops = &pci_ops;
+ bus->ops = &bus_ops_pci;
- err = iwl_probe(bus, cfg);
+ err = iwl_probe(bus, &trans_ops_pcie, cfg);
if (err)
goto out_disable_msi;
return 0;
@@ -480,12 +483,12 @@ out_no_pci:
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- struct iwl_bus *bus = priv->bus;
+ struct iwl_bus *bus = pci_get_drvdata(pdev);
struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+ struct iwl_shared *shrd = bus->shrd;
- iwl_remove(priv);
+ iwl_remove(shrd->priv);
pci_disable_msi(pci_dev);
pci_iounmap(pci_dev, pci_bus->hw_base);
@@ -496,25 +499,27 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
kfree(bus);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int iwl_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
+ struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_shared *shrd = bus->shrd;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_suspend(priv);
+ return iwl_trans_suspend(shrd->trans);
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
+ struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_shared *shrd = bus->shrd;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -527,7 +532,7 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_resume(priv);
+ return iwl_trans_resume(shrd->trans);
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index cd64df05f9e..4eaab204322 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -43,6 +43,7 @@
#include "iwl-debug.h"
#include "iwl-power.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
/*
* Setting power level allows the card to go to sleep when not busy.
@@ -214,7 +215,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
- if (priv->cfg->base_params->shadow_reg_enable)
+ if (hw_params(priv).shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -300,7 +301,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
- if (priv->cfg->base_params->shadow_reg_enable)
+ if (hw_params(priv).shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -335,7 +336,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC,
+ return iwl_trans_send_cmd_pdu(trans(priv), POWER_TABLE_CMD, CMD_SYNC,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -347,7 +348,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
- if (priv->wowlan)
+ if (priv->shrd->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
else if (!priv->cfg->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
@@ -382,7 +383,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
int ret;
bool update_chains;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Don't update the RX chain when chain noise calibration is running */
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
@@ -391,23 +392,23 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
return 0;
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return -EIO;
/* scan complete use sleep_power_next, need to be updated */
memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+ if (test_bit(STATUS_SCANNING, &priv->shrd->status) && !force) {
IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
return 0;
}
if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
+ set_bit(STATUS_POWER_PMI, &priv->shrd->status);
ret = iwl_set_power(priv, cmd);
if (!ret) {
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
+ clear_bit(STATUS_POWER_PMI, &priv->shrd->status);
if (update_chains)
iwl_update_chain_flags(priv);
@@ -435,7 +436,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- priv->power_data.bus_pm = bus_get_pm_support(priv->bus);
+ priv->power_data.bus_pm = bus_get_pm_support(bus(priv));
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 2f267b8aabb..bebdd828f32 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -217,8 +217,8 @@
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
#define SCD_QUEUECHAIN_SEL_ALL(priv) \
- (((1<<(priv)->hw_params.max_txq_num) - 1) &\
- (~(1<<(priv)->cmd_queue)))
+ (((1<<hw_params(priv).max_txq_num) - 1) &\
+ (~(1<<(priv)->shrd->cmd_queue)))
#define SCD_BASE (PRPH_BASE + 0xa02c00)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
deleted file mode 100644
index 8e314003b63..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ /dev/null
@@ -1,1029 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
-
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-
-static void iwl_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
-}
-
-static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
- /*
- * MULTI-FIXME
- * See iwl_mac_channel_switch.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
-
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return;
-
- if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, false);
- }
-}
-
-
-static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-
-static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-
-static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n", len,
- get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
- u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
- "tsf:0x%.8x%.8x rate:%d\n",
- status & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->beacon_update);
-}
-
-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
-#define ACK_CNT_RATIO (50)
-#define BA_TIMEOUT_CNT (5)
-#define BA_TIMEOUT_MAX (16)
-
-/**
- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
- *
- * When the ACK count ratio is low and aggregated BA timeout retries exceeding
- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
- * operation state.
- */
-static bool iwl_good_ack_health(struct iwl_priv *priv,
- struct statistics_tx *cur)
-{
- int actual_delta, expected_delta, ba_timeout_delta;
- struct statistics_tx *old;
-
- if (priv->agg_tids_count)
- return true;
-
- old = &priv->statistics.tx;
-
- actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
- le32_to_cpu(old->actual_ack_cnt);
- expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
- le32_to_cpu(old->expected_ack_cnt);
-
- /* Values should not be negative, but we do not trust the firmware */
- if (actual_delta <= 0 || expected_delta <= 0)
- return true;
-
- ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
- le32_to_cpu(old->agg.ba_timeout);
-
- if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
- ba_timeout_delta > BA_TIMEOUT_CNT) {
- IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
- actual_delta, expected_delta, ba_timeout_delta);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /*
- * This is ifdef'ed on DEBUGFS because otherwise the
- * statistics aren't available. If DEBUGFS is set but
- * DEBUG is not, these will just compile out.
- */
- IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
- priv->delta_stats.tx.rx_detected_cnt);
- IWL_DEBUG_RADIO(priv,
- "ack_or_ba_timeout_collision delta %d\n",
- priv->delta_stats.tx.ack_or_ba_timeout_collision);
-#endif
-
- if (ba_timeout_delta >= BA_TIMEOUT_MAX)
- return false;
- }
-
- return true;
-}
-
-/**
- * iwl_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl_good_plcp_health(struct iwl_priv *priv,
- struct statistics_rx_phy *cur_ofdm,
- struct statistics_rx_ht_phy *cur_ofdm_ht,
- unsigned int msecs)
-{
- int delta;
- int threshold = priv->cfg->base_params->plcp_delta_threshold;
-
- if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
- IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
- return true;
- }
-
- delta = le32_to_cpu(cur_ofdm->plcp_err) -
- le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
- le32_to_cpu(cur_ofdm_ht->plcp_err) -
- le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
-
- /* Can be negative if firmware reset statistics */
- if (delta <= 0)
- return true;
-
- if ((delta * 100 / msecs) > threshold) {
- IWL_DEBUG_RADIO(priv,
- "plcp health threshold %u delta %d msecs %u\n",
- threshold, delta, msecs);
- return false;
- }
-
- return true;
-}
-
-static void iwl_recover_from_statistics(struct iwl_priv *priv,
- struct statistics_rx_phy *cur_ofdm,
- struct statistics_rx_ht_phy *cur_ofdm_ht,
- struct statistics_tx *tx,
- unsigned long stamp)
-{
- unsigned int msecs;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
-
- /* Only gather statistics and update time stamp when not associated */
- if (!iwl_is_any_associated(priv))
- return;
-
- /* Do not check/recover when do not have enough statistics data */
- if (msecs < 99)
- return;
-
- if (iwlagn_mod_params.ack_check && !iwl_good_ack_health(priv, tx)) {
- IWL_ERR(priv, "low ack count detected, restart firmware\n");
- if (!iwl_force_reset(priv, IWL_FW_RESET, false))
- return;
- }
-
- if (iwlagn_mod_params.plcp_check &&
- !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
- iwl_force_reset(priv, IWL_RF_RESET, false);
-}
-
-/* Calculate noise level, based on measurements during network silence just
- * before arriving beacon. This measurement can be done only if we know
- * exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
-{
- struct statistics_rx_non_phy *rx_info;
- int num_active_rx = 0;
- int total_silence = 0;
- int bcn_silence_a, bcn_silence_b, bcn_silence_c;
- int last_rx_noise;
-
- rx_info = &priv->statistics.rx_non_phy;
-
- bcn_silence_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
- bcn_silence_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
- bcn_silence_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
- if (bcn_silence_a) {
- total_silence += bcn_silence_a;
- num_active_rx++;
- }
- if (bcn_silence_b) {
- total_silence += bcn_silence_b;
- num_active_rx++;
- }
- if (bcn_silence_c) {
- total_silence += bcn_silence_c;
- num_active_rx++;
- }
-
- /* Average among active antennas */
- if (num_active_rx)
- last_rx_noise = (total_silence / num_active_rx) - 107;
- else
- last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
- bcn_silence_a, bcn_silence_b, bcn_silence_c,
- last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
-static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
- __le32 *max_delta, __le32 *accum, int size)
-{
- int i;
-
- for (i = 0;
- i < size / sizeof(__le32);
- i++, prev++, cur++, delta++, max_delta++, accum++) {
- if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
- *delta = cpu_to_le32(
- le32_to_cpu(*cur) - le32_to_cpu(*prev));
- le32_add_cpu(accum, le32_to_cpu(*delta));
- if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
- *max_delta = *delta;
- }
- }
-}
-
-static void
-iwl_accumulative_statistics(struct iwl_priv *priv,
- struct statistics_general_common *common,
- struct statistics_rx_non_phy *rx_non_phy,
- struct statistics_rx_phy *rx_ofdm,
- struct statistics_rx_ht_phy *rx_ofdm_ht,
- struct statistics_rx_phy *rx_cck,
- struct statistics_tx *tx,
- struct statistics_bt_activity *bt_activity)
-{
-#define ACCUM(_name) \
- accum_stats((__le32 *)&priv->statistics._name, \
- (__le32 *)_name, \
- (__le32 *)&priv->delta_stats._name, \
- (__le32 *)&priv->max_delta_stats._name, \
- (__le32 *)&priv->accum_stats._name, \
- sizeof(*_name));
-
- ACCUM(common);
- ACCUM(rx_non_phy);
- ACCUM(rx_ofdm);
- ACCUM(rx_ofdm_ht);
- ACCUM(rx_cck);
- ACCUM(tx);
- if (bt_activity)
- ACCUM(bt_activity);
-#undef ACCUM
-}
-#else
-static inline void
-iwl_accumulative_statistics(struct iwl_priv *priv,
- struct statistics_general_common *common,
- struct statistics_rx_non_phy *rx_non_phy,
- struct statistics_rx_phy *rx_ofdm,
- struct statistics_rx_ht_phy *rx_ofdm_ht,
- struct statistics_rx_phy *rx_cck,
- struct statistics_tx *tx,
- struct statistics_bt_activity *bt_activity)
-{
-}
-#endif
-
-static void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- unsigned long stamp = jiffies;
- const int reg_recalib_period = 60;
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- __le32 *flag;
- struct statistics_general_common *common;
- struct statistics_rx_non_phy *rx_non_phy;
- struct statistics_rx_phy *rx_ofdm;
- struct statistics_rx_ht_phy *rx_ofdm_ht;
- struct statistics_rx_phy *rx_cck;
- struct statistics_tx *tx;
- struct statistics_bt_activity *bt_activity;
-
- len -= sizeof(struct iwl_cmd_header); /* skip header */
-
- IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
- len);
-
- if (len == sizeof(struct iwl_bt_notif_statistics)) {
- struct iwl_bt_notif_statistics *stats;
- stats = &pkt->u.stats_bt;
- flag = &stats->flag;
- common = &stats->general.common;
- rx_non_phy = &stats->rx.general.common;
- rx_ofdm = &stats->rx.ofdm;
- rx_ofdm_ht = &stats->rx.ofdm_ht;
- rx_cck = &stats->rx.cck;
- tx = &stats->tx;
- bt_activity = &stats->general.activity;
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* handle this exception directly */
- priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
- le32_add_cpu(&priv->statistics.accum_num_bt_kills,
- le32_to_cpu(stats->rx.general.num_bt_kills));
-#endif
- } else if (len == sizeof(struct iwl_notif_statistics)) {
- struct iwl_notif_statistics *stats;
- stats = &pkt->u.stats;
- flag = &stats->flag;
- common = &stats->general.common;
- rx_non_phy = &stats->rx.general;
- rx_ofdm = &stats->rx.ofdm;
- rx_ofdm_ht = &stats->rx.ofdm_ht;
- rx_cck = &stats->rx.cck;
- tx = &stats->tx;
- bt_activity = NULL;
- } else {
- WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
- len, sizeof(struct iwl_bt_notif_statistics),
- sizeof(struct iwl_notif_statistics));
- return;
- }
-
- change = common->temperature != priv->statistics.common.temperature ||
- (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
-
- iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
- rx_ofdm_ht, rx_cck, tx, bt_activity);
-
- iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
-
- priv->statistics.flag = *flag;
- memcpy(&priv->statistics.common, common, sizeof(*common));
- memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
- memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
- memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
- memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
- memcpy(&priv->statistics.tx, tx, sizeof(*tx));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (bt_activity)
- memcpy(&priv->statistics.bt_activity, bt_activity,
- sizeof(*bt_activity));
-#endif
-
- priv->rx_statistics_jiffies = stamp;
-
- set_bit(STATUS_STATISTICS, &priv->status);
-
- /* Reschedule the statistics timer to occur in
- * reg_recalib_period seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
- mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(reg_recalib_period * 1000));
-
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
- }
- if (priv->cfg->lib->temperature && change)
- priv->cfg->lib->temperature(priv);
-}
-
-static void iwl_rx_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- memset(&priv->accum_stats, 0,
- sizeof(priv->accum_stats));
- memset(&priv->delta_stats, 0,
- sizeof(priv->delta_stats));
- memset(&priv->max_delta_stats, 0,
- sizeof(priv->max_delta_stats));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl_rx_statistics(priv, rxb);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_CARD_DISABLED) ?
- "Reached" : "Not reached");
-
- if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- CT_CARD_DISABLED)) {
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- iwl_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- }
- if (flags & CT_CARD_DISABLED)
- iwl_tt_enter_ct_kill(priv);
- }
- if (!(flags & CT_CARD_DISABLED))
- iwl_tt_exit_ct_kill(priv);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
- if (!(flags & RXON_CARD_DISABLED))
- iwl_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up_interruptible(&priv->wait_command_queue);
-}
-
-static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
-
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
- priv->missed_beacon_threshold) {
- IWL_DEBUG_CALIB(priv,
- "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consecutive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl_init_sensitivity(priv);
- }
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- priv->last_phy_res_valid = true;
- memcpy(&priv->last_phy_res, pkt->u.raw,
- sizeof(struct iwl_rx_phy_res));
-}
-
-/*
- * returns non-zero if packet should be dropped
- */
-static int iwl_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats)
-{
- u16 fc = le16_to_cpu(hdr->frame_control);
-
- /*
- * All contexts have the same setting here due to it being
- * a module parameter, so OK to check any context.
- */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
- RXON_FILTER_DIS_DECRYPT_MSK)
- return 0;
-
- if (!(fc & IEEE80211_FCTL_PROTECTED))
- return 0;
-
- IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
- switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- /* The uCode has got a bad phase 1 Key, pushes the packet.
- * Decryption will be done in SW. */
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_KEY_TTAK)
- break;
-
- case RX_RES_STATUS_SEC_TYPE_WEP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_ICV_MIC) {
- /* bad ICV, the packet is destroyed since the
- * decryption is inplace, drop it */
- IWL_DEBUG_RX(priv, "Packet destroyed\n");
- return -1;
- }
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_DECRYPT_OK) {
- IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
- stats->flag |= RX_FLAG_DECRYPTED;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
- struct iwl_rxon_context *ctx;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!iwlagn_mod_params.sw_crypto &&
- iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- iwl_update_stats(priv, false, fc, len);
-
- /*
- * Wake any queues that were stopped due to a passive channel tx
- * failure. This can happen because the regulatory enforcement in
- * the device waits for a beacon before allowing transmission,
- * sometimes even after already having transmitted frames for the
- * association because the new RXON may reset the information.
- */
- if (unlikely(ieee80211_is_beacon(fc))) {
- for_each_context(priv, ctx) {
- if (!ctx->last_tx_rejected)
- continue;
- if (compare_ether_addr(hdr->addr3,
- ctx->active.bssid_addr))
- continue;
- ctx->last_tx_rejected = false;
- iwl_wake_any_queue(priv, ctx);
- }
- }
-
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- rxb->page = NULL;
-}
-
-static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- }
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-/* Calc max signal level (dBm) among 3 possible receivers */
-static int iwlagn_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host
- */
- struct iwlagn_non_cfg_phy *ncphy =
- (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
- u8 agc;
-
- val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
- agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
- val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
- rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
- IWLAGN_OFDM_RSSI_A_BIT_POS;
- rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
- IWLAGN_OFDM_RSSI_B_BIT_POS;
- val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
- rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
- IWLAGN_OFDM_RSSI_C_BIT_POS;
-
- max_rssi = max_t(u32, rssi_a, rssi_b);
- max_rssi = max_t(u32, max_rssi, rssi_c);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- rssi_a, rssi_b, rssi_c, max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWLAGN_RSSI_OFFSET;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-static void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = &priv->last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
- rx_status.band);
- rx_status.rate_idx =
- iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
-
- iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
- rx_status.signal, (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-
-/**
- * iwl_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- */
-void iwl_setup_rx_handlers(struct iwl_priv *priv)
-{
- void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
- handlers = priv->rx_handlers;
-
- handlers[REPLY_ERROR] = iwl_rx_reply_error;
- handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
- handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
- handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
- handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
- handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
- handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
-
- iwl_setup_rx_scan_handlers(priv);
-
- handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
- handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
-
- /* Rx handlers */
- handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
- handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
-
- /* block ack */
- handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
-
- /* init calibration handlers */
- priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
- iwlagn_rx_calib_result;
- priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
-
- /* set up notification wait support */
- spin_lock_init(&priv->notif_wait_lock);
- INIT_LIST_HEAD(&priv->notif_waits);
- init_waitqueue_head(&priv->notif_waitq);
-
- /* Set up BT Rx handlers */
- if (priv->cfg->lib->bt_rx_handler_setup)
- priv->cfg->lib->bt_rx_handler_setup(priv);
-
-}
-
-void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- /*
- * Do the notification wait before RX handlers so
- * even if the RX handler consumes the RXB we have
- * access to it in the notification wait entry.
- */
- if (!list_empty(&priv->notif_waits)) {
- struct iwl_notification_wait *w;
-
- spin_lock(&priv->notif_wait_lock);
- list_for_each_entry(w, &priv->notif_waits, list) {
- if (w->cmd != pkt->hdr.cmd)
- continue;
- IWL_DEBUG_RX(priv,
- "Notif: %s, 0x%02x - wake the callers up\n",
- get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- w->triggered = true;
- if (w->fn)
- w->fn(priv, pkt, w->fn_data);
- }
- spin_unlock(&priv->notif_wait_lock);
-
- wake_up_all(&priv->notif_waitq);
- }
-
- if (priv->pre_rx_handler)
- priv->pre_rx_handler(priv, rxb);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "No handler needed for %s, 0x%02x\n",
- get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- }
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 77e528f5db8..e5d727f537d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -33,9 +33,7 @@
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
@@ -68,14 +66,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->status) ||
- test_bit(STATUS_FW_ERROR, &priv->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (!test_bit(STATUS_READY, &priv->shrd->status) ||
+ !test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) ||
+ !test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status) ||
+ test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EIO;
- ret = trans_send_cmd(&priv->trans, &cmd);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
if (ret)
return ret;
@@ -91,7 +89,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
ret = -EIO;
}
- iwl_free_pages(priv, cmd.reply_page);
+ iwl_free_pages(priv->shrd, cmd.reply_page);
return ret;
}
@@ -103,24 +101,90 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
+ if (priv->scan_type == IWL_SCAN_ROC) {
+ ieee80211_remain_on_channel_expired(priv->hw);
+ priv->hw_roc_channel = NULL;
+ schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
+ }
+
priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
priv->scan_request = NULL;
}
+static void iwl_process_scan_complete(struct iwl_priv *priv)
+{
+ bool aborted;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status))
+ return;
+
+ IWL_DEBUG_SCAN(priv, "Completed scan.\n");
+
+ cancel_delayed_work(&priv->scan_check);
+
+ aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
+ if (aborted)
+ IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+ goto out_settings;
+ }
+
+ if (priv->scan_type == IWL_SCAN_ROC) {
+ ieee80211_remain_on_channel_expired(priv->hw);
+ priv->hw_roc_channel = NULL;
+ schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
+ }
+
+ if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
+ int err;
+
+ /* Check if mac80211 requested scan during our internal scan */
+ if (priv->scan_request == NULL)
+ goto out_complete;
+
+ /* If so request a new scan */
+ err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
+ priv->scan_request->channels[0]->band);
+ if (err) {
+ IWL_DEBUG_SCAN(priv,
+ "failed to initiate pending scan: %d\n", err);
+ aborted = true;
+ goto out_complete;
+ }
+
+ return;
+ }
+
+out_complete:
+ iwl_complete_scan(priv, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!iwl_is_ready_rf(priv->shrd))
+ return;
+
+ iwlagn_post_scan(priv);
+}
+
void iwl_force_scan_end(struct iwl_priv *priv)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
return;
}
IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->status);
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
iwl_complete_scan(priv, true);
}
@@ -128,14 +192,14 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
return;
}
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
return;
}
@@ -154,7 +218,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
int iwl_scan_cancel(struct iwl_priv *priv)
{
IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ queue_work(priv->shrd->workqueue, &priv->abort_scan);
return 0;
}
@@ -163,28 +227,42 @@ int iwl_scan_cancel(struct iwl_priv *priv)
* @ms: amount of time to wait (in milliseconds) for scan to abort
*
*/
-int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies(ms);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
iwl_do_scan_abort(priv);
while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->status))
- break;
+ if (!test_bit(STATUS_SCAN_HW, &priv->shrd->status))
+ goto finished;
msleep(20);
}
- return test_bit(STATUS_SCAN_HW, &priv->status);
+ return;
+
+ finished:
+ /*
+ * Now STATUS_SCAN_HW is clear. This means that the
+ * device finished, but the background work is going
+ * to execute at best as soon as we release the mutex.
+ * Since we need to be able to issue a new scan right
+ * after this function returns, run the complete here.
+ * The STATUS_SCAN_COMPLETE bit will then be cleared
+ * and prevent the background work from "completing"
+ * a possible new scan.
+ */
+ iwl_process_scan_complete(priv);
}
/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_reply_scan(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -193,11 +271,13 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
+ return 0;
}
/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanstart_notification *notif =
@@ -211,11 +291,20 @@ static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
+
+ if (priv->scan_type == IWL_SCAN_ROC &&
+ !priv->hw_roc_start_notified) {
+ ieee80211_ready_on_channel(priv->hw);
+ priv->hw_roc_start_notified = true;
+ }
+
+ return 0;
}
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -224,20 +313,24 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
+ "probe status: %u:%u "
"(TSF: 0x%08X:%08X) - %d "
"elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
+ notif->probe_status, notif->num_probe_not_sent,
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
+ return 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
@@ -247,14 +340,21 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
scan_notif->tsf_low,
scan_notif->tsf_high, scan_notif->status);
- /* The HW is no longer scanning */
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
(priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - priv->scan_start));
- queue_work(priv->workqueue, &priv->scan_completed);
+ /*
+ * When aborting, we run the scan completed background work inline
+ * and the background work must then do nothing. The SCAN_COMPLETE
+ * bit helps implement that logic and thus needs to be set before
+ * queueing the work. Also, since the scan abort waits for SCAN_HW
+ * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
+ * to avoid a race there.
+ */
+ set_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ queue_work(priv->shrd->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
iwl_advanced_bt_coexist(priv) &&
@@ -274,8 +374,10 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = scan_notif->bt_status;
- queue_work(priv->workqueue, &priv->bt_traffic_change_work);
+ queue_work(priv->shrd->workqueue,
+ &priv->bt_traffic_change_work);
}
+ return 0;
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -289,9 +391,8 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
iwl_rx_scan_complete_notif;
}
-inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes)
+static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band, u8 n_probes)
{
if (band == IEEE80211_BAND_5GHZ)
return IWL_ACTIVE_DWELL_TIME_52 +
@@ -301,40 +402,486 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
}
-u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
+static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
{
struct iwl_rxon_context *ctx;
+
+ /*
+ * If we're associated, we clamp the dwell time 98%
+ * of the smallest beacon interval (minus 2 * channel
+ * tune time)
+ */
+ for_each_context(priv, ctx) {
+ u16 value;
+
+ if (!iwl_is_associated_ctx(ctx))
+ continue;
+ value = ctx->beacon_int;
+ if (!value)
+ value = IWL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ dwell_time = min(value, dwell_time);
+ }
+
+ return dwell_time;
+}
+
+static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band)
+{
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
- if (iwl_is_any_associated(priv)) {
+ return iwl_limit_dwell(priv, passive);
+}
+
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ channel = iwl_get_single_channel_number(priv, band);
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_get_channel_info(priv, band, channel);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv,
+ "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = { sizeof(struct iwl_scan_cmd), },
+ .flags = CMD_SYNC,
+ };
+ struct iwl_scan_cmd *scan;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = hw_params(priv).valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (vif)
+ ctx = iwl_rxon_ctx_from_vif(vif);
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (priv->scan_type != IWL_SCAN_ROC &&
+ iwl_is_any_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ switch (priv->scan_type) {
+ case IWL_SCAN_ROC:
+ WARN_ON(1);
+ break;
+ case IWL_SCAN_RADIO_RESET:
+ interval = 0;
+ break;
+ case IWL_SCAN_NORMAL:
+ interval = vif->bss_conf.beacon_int;
+ break;
+ }
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ } else if (priv->scan_type == IWL_SCAN_ROC) {
+ scan->suspend_time = 0;
+ scan->max_out_time = 0;
+ scan->quiet_time = 0;
+ scan->quiet_plcp_th = 0;
+ }
+
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ break;
+ case IWL_SCAN_NORMAL:
+ if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+ break;
+ case IWL_SCAN_ROC:
+ IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
+ break;
+ }
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(
+ priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+ RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
/*
- * If we're associated, we clamp the maximum passive
- * dwell time to be 98% of the smallest beacon interval
- * (minus 2 * channel tune time)
+ * Internal scans are passive, so we can indiscriminately set
+ * the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
- for_each_context(priv, ctx) {
- u16 value;
-
- if (!iwl_is_associated_ctx(ctx))
- continue;
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
- if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- passive = min(value, passive);
+ if (priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist)
+ scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ *
+ * This was fixed in later versions along with some other
+ * scan changes, and the threshold behaves as a flag in those
+ * versions.
+ */
+ if (priv->new_scan_threshold_behaviour)
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_DISABLED;
+ else
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_rx_antennas[band])
+ rx_ant = priv->cfg->scan_rx_antennas[band];
+
+ if (band == IEEE80211_BAND_2GHZ &&
+ priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ /* transmit 2.4 GHz probes only on first antenna */
+ scan_tx_antennas = first_antenna(scan_tx_antennas);
+ }
+
+ priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv,
+ priv->scan_tx_ant[band],
+ scan_tx_antennas);
+ rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = first_antenna(active_chains);
+ }
+ if (priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist &&
+ priv->bt_full_concurrent) {
+ /* operated as 1x1 in full concurrency mode */
+ rx_ant = first_antenna(rx_ant);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |=
+ hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ switch (priv->scan_type) {
+ case IWL_SCAN_NORMAL:
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ vif->addr,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ break;
+ case IWL_SCAN_RADIO_RESET:
+ case IWL_SCAN_ROC:
+ /* use bcast addr, will not be transmitted but must be valid */
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ iwl_bcast_addr, NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ break;
+ default:
+ BUG();
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_NORMAL:
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_ROC: {
+ struct iwl_scan_channel *scan_ch;
+ int n_chan, i;
+ u16 dwell;
+
+ dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
+ n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
+
+ scan->channel_count = n_chan;
+
+ scan_ch = (void *)&scan->data[cmd_len];
+
+ for (i = 0; i < n_chan; i++) {
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->channel =
+ cpu_to_le16(priv->hw_roc_channel->hw_value);
+
+ if (i == n_chan - 1)
+ dwell = priv->hw_roc_duration - i * dwell;
+
+ scan_ch->active_dwell =
+ scan_ch->passive_dwell = cpu_to_le16(dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ scan_ch++;
}
+ }
+
+ break;
+ }
+
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data[0] = scan;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ scan->len = cpu_to_le16(cmd.len[0]);
+
+ /* set scan bit here for PAN params */
+ set_bit(STATUS_SCAN_HW, &priv->shrd->status);
+
+ ret = iwlagn_set_pan_params(priv);
+ if (ret)
+ return ret;
+
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ if (ret) {
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ iwlagn_set_pan_params(priv);
}
- return passive;
+ return ret;
}
void iwl_init_scan_params(struct iwl_priv *priv)
{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+ u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1;
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -348,50 +895,50 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
cancel_delayed_work(&priv->scan_check);
- if (!iwl_is_ready_rf(priv)) {
+ if (!iwl_is_ready_rf(priv->shrd)) {
IWL_WARN(priv, "Request scan called when driver not ready.\n");
return -EIO;
}
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv,
"Multiple concurrent scan requests in parallel.\n");
return -EBUSY;
}
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
return -EBUSY;
}
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
scan_type == IWL_SCAN_NORMAL ? "" :
- scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
+ scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
"internal short ");
- set_bit(STATUS_SCANNING, &priv->status);
+ set_bit(STATUS_SCANNING, &priv->shrd->status);
priv->scan_type = scan_type;
priv->scan_start = jiffies;
priv->scan_band = band;
ret = iwlagn_request_scan(priv, vif);
if (ret) {
- clear_bit(STATUS_SCANNING, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->shrd->status);
priv->scan_type = IWL_SCAN_NORMAL;
return ret;
}
- queue_delayed_work(priv->workqueue, &priv->scan_check,
+ queue_delayed_work(priv->shrd->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
return 0;
}
-int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
@@ -403,7 +950,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
if (req->n_channels == 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
/*
* If an internal scan is in progress, just set
@@ -432,7 +979,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return ret;
}
@@ -443,7 +990,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
*/
void iwl_internal_short_hw_scan(struct iwl_priv *priv)
{
- queue_work(priv->workqueue, &priv->start_internal_scan);
+ queue_work(priv->shrd->workqueue, &priv->start_internal_scan);
}
static void iwl_bg_start_internal_scan(struct work_struct *work)
@@ -453,14 +1000,14 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
IWL_DEBUG_SCAN(priv, "Start internal scan\n");
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
goto unlock;
}
- if (test_bit(STATUS_SCANNING, &priv->status)) {
+ if (test_bit(STATUS_SCANNING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
goto unlock;
}
@@ -468,7 +1015,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
unlock:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwl_bg_scan_check(struct work_struct *data)
@@ -481,9 +1028,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
/* Since we are here firmware does not finish scan and
* most likely is in bad shape, so we don't bother to
* send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/**
@@ -541,70 +1088,19 @@ static void iwl_bg_abort_scan(struct work_struct *work)
/* We keep scan_check work queued in case when firmware will not
* report back scan completed notification */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwl_bg_scan_completed(struct work_struct *work)
{
struct iwl_priv *priv =
- container_of(work, struct iwl_priv, scan_completed);
- bool aborted;
-
- IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
- cancel_delayed_work(&priv->scan_check);
-
- mutex_lock(&priv->mutex);
-
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- if (aborted)
- IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
- goto out_settings;
- }
-
- if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->offchan_tx_skb) {
- ieee80211_tx_status_irqsafe(priv->hw,
- priv->offchan_tx_skb);
- priv->offchan_tx_skb = NULL;
- }
-
- if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
- int err;
-
- /* Check if mac80211 requested scan during our internal scan */
- if (priv->scan_request == NULL)
- goto out_complete;
-
- /* If so request a new scan */
- err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
- priv->scan_request->channels[0]->band);
- if (err) {
- IWL_DEBUG_SCAN(priv,
- "failed to initiate pending scan: %d\n", err);
- aborted = true;
- goto out_complete;
- }
-
- goto out;
- }
-
-out_complete:
- iwl_complete_scan(priv, aborted);
-
-out_settings:
- /* Can we still talk to firmware ? */
- if (!iwl_is_ready_rf(priv))
- goto out;
-
- iwlagn_post_scan(priv);
+ container_of(work, struct iwl_priv, scan_completed);
-out:
- mutex_unlock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
+ iwl_process_scan_complete(priv);
+ mutex_unlock(&priv->shrd->mutex);
}
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
@@ -622,8 +1118,8 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->scan_completed);
if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
new file mode 100644
index 00000000000..1f7a93c67c4
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -0,0 +1,534 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_shared_h__
+#define __iwl_shared_h__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
+#include <linux/mm.h> /* for page_address */
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+
+/**
+ * DOC: shared area - role and goal
+ *
+ * The shared area contains all the data exported by the upper layer to the
+ * other layers. Since the bus and transport layer shouldn't dereference
+ * iwl_priv, all the data needed by the upper layer and the transport / bus
+ * layer must be here.
+ * The shared area also holds pointer to all the other layers. This allows a
+ * layer to call a function from another layer.
+ *
+ * NOTE: All the layers hold a pointer to the shared area which must be shrd.
+ * A few macros assume that (_m)->shrd points to the shared area no matter
+ * what _m is.
+ *
+ * gets notifications about enumeration, suspend, resume.
+ * For the moment, the bus layer is not a linux kernel module as itself, and
+ * the module_init function of the driver must call the bus specific
+ * registration functions. These functions are listed at the end of this file.
+ * For the moment, there is only one implementation of this interface: PCI-e.
+ * This implementation is iwl-pci.c
+ */
+
+struct iwl_cfg;
+struct iwl_bus;
+struct iwl_priv;
+struct iwl_sensitivity_ranges;
+struct iwl_trans_ops;
+
+#define DRV_NAME "iwlwifi"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+extern struct iwl_mod_params iwlagn_mod_params;
+
+/**
+ * struct iwl_mod_params
+ *
+ * Holds the module parameters
+ *
+ * @sw_crypto: using hardware encryption, default = 0
+ * @num_of_queues: number of tx queue, HW dependent
+ * @disable_11n: 11n capabilities enabled, default = 0
+ * @amsdu_size_8K: enable 8K amsdu size, default = 1
+ * @antenna: both antennas (use diversity), default = 0
+ * @restart_fw: restart firmware, default = 1
+ * @plcp_check: enable plcp health check, default = true
+ * @ack_check: disable ack health check, default = false
+ * @wd_disable: enable stuck queue check, default = false
+ * @bt_coex_active: enable bt coex, default = true
+ * @led_mode: system default, default = 0
+ * @no_sleep_autoadjust: disable autoadjust, default = true
+ * @power_save: disable power save, default = false
+ * @power_level: power level, default = 1
+ * @debug_level: levels are IWL_DL_*
+ * @ant_coupling: antenna coupling in dB, default = 0
+ * @bt_ch_announce: BT channel inhibition, default = enable
+ * @wanted_ucode_alternative: ucode alternative to use, default = 1
+ * @auto_agg: enable agg. without check, default = true
+ */
+struct iwl_mod_params {
+ int sw_crypto;
+ int num_of_queues;
+ int disable_11n;
+ int amsdu_size_8K;
+ int antenna;
+ int restart_fw;
+ bool plcp_check;
+ bool ack_check;
+ bool wd_disable;
+ bool bt_coex_active;
+ int led_mode;
+ bool no_sleep_autoadjust;
+ bool power_save;
+ int power_level;
+ u32 debug_level;
+ int ant_coupling;
+ bool bt_ch_announce;
+ int wanted_ucode_alternative;
+ bool auto_agg;
+};
+
+/**
+ * struct iwl_hw_params
+ *
+ * Holds the module parameters
+ *
+ * @max_txq_num: Max # Tx queues supported
+ * @num_ampdu_queues: num of ampdu queues
+ * @tx_chains_num: Number of TX chains
+ * @rx_chains_num: Number of RX chains
+ * @valid_tx_ant: usable antennas for TX
+ * @valid_rx_ant: usable antennas for RX
+ * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
+ * @sku: sku read from EEPROM
+ * @rx_page_order: Rx buffer page order
+ * @max_inst_size: for ucode use
+ * @max_data_size: for ucode use
+ * @ct_kill_threshold: temperature threshold - in hw dependent unit
+ * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
+ * relevant for 1000, 6000 and up
+ * @wd_timeout: TX queues watchdog timeout
+ * @calib_init_cfg: setup initial calibrations for the hw
+ * @calib_rt_cfg: setup runtime calibrations for the hw
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ */
+struct iwl_hw_params {
+ u8 max_txq_num;
+ u8 num_ampdu_queues;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u8 ht40_channel;
+ bool shadow_reg_enable;
+ u16 sku;
+ u32 rx_page_order;
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 ct_kill_threshold;
+ u32 ct_kill_exit_threshold;
+ unsigned int wd_timeout;
+
+ u32 calib_init_cfg;
+ u32 calib_rt_cfg;
+ const struct iwl_sensitivity_ranges *sens;
+};
+
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (REPLY_TX), and the block ack notification
+ * (REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session - used by the transport layer.
+ * Needed by the upper layer for debugfs only.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+ u32 rate_n_flags;
+ enum iwl_agg_state state;
+ u16 txq_id;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @tfds_in_queue: number of packets sent to the HW queues.
+ * Exported for debugfs only
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+ u16 seq_number;
+ u16 tfds_in_queue;
+ struct iwl_ht_agg agg;
+};
+
+/**
+ * struct iwl_shared - shared fields for all the layers of the driver
+ *
+ * @dbg_level_dev: dbg level set per device. Prevails on
+ * iwlagn_mod_params.debug_level if set (!= 0)
+ * @ucode_owner: IWL_OWNERSHIP_*
+ * @cmd_queue: command queue number
+ * @status: STATUS_*
+ * @valid_contexts: microcode/device supports multiple contexts
+ * @bus: pointer to the bus layer data
+ * @priv: pointer to the upper layer data
+ * @hw_params: see struct iwl_hw_params
+ * @workqueue: the workqueue used by all the layers of the driver
+ * @lock: protect general shared data
+ * @sta_lock: protects the station table.
+ * If lock and sta_lock are needed, lock must be acquired first.
+ * @mutex:
+ */
+struct iwl_shared {
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 dbg_level_dev;
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+#define IWL_OWNERSHIP_DRIVER 0
+#define IWL_OWNERSHIP_TM 1
+ u8 ucode_owner;
+ u8 cmd_queue;
+ unsigned long status;
+ bool wowlan;
+ u8 valid_contexts;
+
+ struct iwl_bus *bus;
+ struct iwl_priv *priv;
+ struct iwl_trans *trans;
+ struct iwl_hw_params hw_params;
+
+ struct workqueue_struct *workqueue;
+ spinlock_t lock;
+ spinlock_t sta_lock;
+ struct mutex mutex;
+
+ struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
+
+ wait_queue_head_t wait_command_queue;
+};
+
+/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
+#define priv(_m) ((_m)->shrd->priv)
+#define bus(_m) ((_m)->shrd->bus)
+#define trans(_m) ((_m)->shrd->trans)
+#define hw_params(_m) ((_m)->shrd->hw_params)
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ * iwl_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
+{
+ if (shrd->dbg_level_dev)
+ return shrd->dbg_level_dev;
+ else
+ return iwlagn_mod_params.debug_level;
+}
+#else
+static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
+{
+ return iwlagn_mod_params.debug_level;
+}
+#endif
+
+static inline void iwl_free_pages(struct iwl_shared *shrd, unsigned long page)
+{
+ free_pages(page, shrd->hw_params.rx_page_order);
+}
+
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_inc_wrap(int index, int n_bd)
+{
+ return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_dec_wrap(int index, int n_bd)
+{
+ return --index & (n_bd - 1);
+}
+
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+enum iwl_rxon_context_id {
+ IWL_RXON_CTX_BSS,
+ IWL_RXON_CTX_PAN,
+
+ NUM_IWL_RXON_CTX
+};
+
+int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
+ struct iwl_cfg *cfg);
+void __devexit iwl_remove(struct iwl_priv * priv);
+struct iwl_device_cmd;
+int __must_check iwl_rx_dispatch(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
+void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid);
+void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid);
+void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
+void iwl_nic_config(struct iwl_priv *priv);
+void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
+void iwl_apm_stop(struct iwl_priv *priv);
+int iwl_apm_init(struct iwl_priv *priv);
+void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
+const char *get_cmd_string(u8 cmd);
+bool iwl_check_for_ct_kill(struct iwl_priv *priv);
+
+void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
+void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_reset_traffic_log(struct iwl_priv *priv);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid);
+#else
+static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid)
+{
+}
+#endif
+
+#define IWL_CMD(x) case x: return #x
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+#define IWL_TRAFFIC_ENTRIES (256)
+#define IWL_TRAFFIC_ENTRY_SIZE (64)
+
+/*****************************************************
+* DRIVER STATUS FUNCTIONS
+******************************************************/
+#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
+#define STATUS_INT_ENABLED 2
+#define STATUS_RF_KILL_HW 3
+#define STATUS_CT_KILL 4
+#define STATUS_INIT 5
+#define STATUS_ALIVE 6
+#define STATUS_READY 7
+#define STATUS_TEMPERATURE 8
+#define STATUS_GEO_CONFIGURED 9
+#define STATUS_EXIT_PENDING 10
+#define STATUS_STATISTICS 12
+#define STATUS_SCANNING 13
+#define STATUS_SCAN_ABORTING 14
+#define STATUS_SCAN_HW 15
+#define STATUS_POWER_PMI 16
+#define STATUS_FW_ERROR 17
+#define STATUS_DEVICE_ENABLED 18
+#define STATUS_CHANNEL_SWITCH_PENDING 19
+#define STATUS_SCAN_COMPLETE 20
+
+static inline int iwl_is_ready(struct iwl_shared *shrd)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(STATUS_READY, &shrd->status) &&
+ test_bit(STATUS_GEO_CONFIGURED, &shrd->status) &&
+ !test_bit(STATUS_EXIT_PENDING, &shrd->status);
+}
+
+static inline int iwl_is_alive(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_ALIVE, &shrd->status);
+}
+
+static inline int iwl_is_init(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_INIT, &shrd->status);
+}
+
+static inline int iwl_is_rfkill_hw(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_RF_KILL_HW, &shrd->status);
+}
+
+static inline int iwl_is_rfkill(struct iwl_shared *shrd)
+{
+ return iwl_is_rfkill_hw(shrd);
+}
+
+static inline int iwl_is_ctkill(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_CT_KILL, &shrd->status);
+}
+
+static inline int iwl_is_ready_rf(struct iwl_shared *shrd)
+{
+ if (iwl_is_rfkill(shrd))
+ return 0;
+
+ return iwl_is_ready(shrd);
+}
+
+#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
deleted file mode 100644
index 1ef3b7106ad..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ /dev/null
@@ -1,832 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/lockdep.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-trans.h"
-#include "iwl-agn.h"
-
-/* priv->sta_lock must be held */
-static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
-{
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_ASSOC(priv,
- "STA id %u addr %pM already present in uCode (according to driver)\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- } else {
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- }
-}
-
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
- bool sync)
-{
- u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
- int ret = -EIO;
-
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_sta_ucode_activate(priv, sta_id);
- ret = 0;
- break;
- case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
- break;
- case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
- sta_id);
- break;
- case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
- break;
- default:
- IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
- break;
- }
-
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static void iwl_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *)cmd->cmd.payload;
-
- iwl_process_add_sta_resp(priv, addsta, pkt, false);
-
-}
-
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
-int iwl_send_add_sta(struct iwl_priv *priv,
- struct iwl_addsta_cmd *sta, u8 flags)
-{
- struct iwl_rx_packet *pkt = NULL;
- int ret = 0;
- u8 data[sizeof(*sta)];
- struct iwl_host_cmd cmd = {
- .id = REPLY_ADD_STA,
- .flags = flags,
- .data = { data, },
- };
- u8 sta_id __maybe_unused = sta->sta.sta_id;
-
- IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
- sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
-
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_add_sta_callback;
- else {
- cmd.flags |= CMD_WANT_SKB;
- might_sleep();
- }
-
- cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
- ret = trans_send_cmd(&priv->trans, &cmd);
-
- if (ret || (flags & CMD_ASYNC))
- return ret;
-
- if (ret == 0) {
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
- }
- iwl_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
- u8 mimo_ps_mode;
-
- if (!sta || !sta_ht_inf->ht_supported)
- goto done;
-
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
- "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
- "dynamic" : "disabled");
-
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
- break;
- }
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
-
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
-
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
-}
-
-/**
- * iwl_prep_station - Prepare station information for addition
- *
- * should be called with sta_lock held
- */
-u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
-{
- struct iwl_station_entry *station;
- int i;
- u8 sta_id = IWL_INVALID_STATION;
-
- if (is_ap)
- sta_id = ctx->ap_sta_id;
- else if (is_broadcast_ether_addr(addr))
- sta_id = ctx->bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
-
- if (!priv->stations[i].used &&
- sta_id == IWL_INVALID_STATION)
- sta_id = i;
- }
-
- /*
- * These two conditions have the same outcome, but keep them
- * separate
- */
- if (unlikely(sta_id == IWL_INVALID_STATION))
- return sta_id;
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
- sta_id);
- return sta_id;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
- !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- return sta_id;
- }
-
- station = &priv->stations[sta_id];
- station->used = IWL_STA_DRIVER_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
- sta_id, addr);
- priv->num_stations++;
-
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
- memcpy(station->sta.sta.addr, addr, ETH_ALEN);
- station->sta.mode = 0;
- station->sta.sta.sta_id = sta_id;
- station->sta.station_flags = ctx->station_flags;
- station->ctxid = ctx->ctxid;
-
- if (sta) {
- struct iwl_station_priv_common *sta_priv;
-
- sta_priv = (void *)sta->drv_priv;
- sta_priv->ctx = ctx;
- }
-
- /*
- * OK to call unconditionally, since local stations (IBSS BSSID
- * STA and broadcast STA) pass in a NULL sta, and mac80211
- * doesn't allow HT IBSS.
- */
- iwl_set_ht_add_station(priv, sta_id, sta, ctx);
-
- return sta_id;
-
-}
-
-#define STA_WAIT_TIMEOUT (HZ/2)
-
-/**
- * iwl_add_station_common -
- */
-int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r)
-{
- unsigned long flags_spin;
- int ret = 0;
- u8 sta_id;
- struct iwl_addsta_cmd sta_cmd;
-
- *sta_id_r = 0;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
- addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
- sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- /* Add station to device's station table */
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[sta_id].sta.sta.addr);
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- *sta_id_r = sta_id;
- return ret;
-}
-
-/**
- * iwl_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->sta_lock must be held
- */
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
-{
- /* Ucode must be active and driver must be non active */
- if ((priv->stations[sta_id].used &
- (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %u\n", sta_id);
-
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
-
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
-}
-
-static int iwl_send_remove_station(struct iwl_priv *priv,
- const u8 *addr, int sta_id,
- bool temporary)
-{
- struct iwl_rx_packet *pkt;
- int ret;
-
- unsigned long flags_spin;
- struct iwl_rem_sta_cmd rm_sta_cmd;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_REMOVE_STA,
- .len = { sizeof(struct iwl_rem_sta_cmd), },
- .flags = CMD_SYNC,
- .data = { &rm_sta_cmd, },
- };
-
- memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
- rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
-
- cmd.flags |= CMD_WANT_SKB;
-
- ret = trans_send_cmd(&priv->trans, &cmd);
-
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
- if (!ret) {
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
- }
- iwl_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-/**
- * iwl_remove_station - Remove driver's knowledge of station.
- */
-int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr)
-{
- unsigned long flags;
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Unable to remove station %pM, device not ready.\n",
- addr);
- /*
- * It is typical for stations to be removed when we are
- * going down. Return success since device will be down
- * soon anyway
- */
- return 0;
- }
-
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
-
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
- addr);
- goto out_err;
- }
-
- if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
- addr);
- goto out_err;
- }
-
- if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
- kfree(priv->stations[sta_id].lq);
- priv->stations[sta_id].lq = NULL;
- }
-
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-
- priv->num_stations--;
-
- if (WARN_ON(priv->num_stations < 0))
- priv->num_stations = 0;
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_send_remove_station(priv, addr, sta_id, false);
-out_err:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return -EINVAL;
-}
-
-/**
- * iwl_clear_ucode_stations - clear ucode station table bits
- *
- * This function clears all the bits in the driver indicating
- * which stations are active in the ucode. Call when something
- * other than explicit station management would cause this in
- * the ucode, e.g. unassociated RXON.
- */
-void iwl_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int i;
- unsigned long flags_spin;
- bool cleared = false;
-
- IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != priv->stations[i].ctxid)
- continue;
-
- if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- cleared = true;
- }
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- if (!cleared)
- IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
-}
-
-/**
- * iwl_restore_stations() - Restore driver known stations to device
- *
- * All stations considered active by driver, but not present in ucode, is
- * restored.
- *
- * Function sleeps.
- */
-void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
- int i;
- bool found = false;
- int ret;
- bool send_lq;
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
- return;
- }
-
- IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx->ctxid != priv->stations[i].ctxid)
- continue;
- if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].sta.mode = 0;
- priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
- found = true;
- }
- }
-
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
- memcpy(&sta_cmd, &priv->stations[i].sta,
- sizeof(struct iwl_addsta_cmd));
- send_lq = false;
- if (priv->stations[i].lq) {
- memcpy(&lq, priv->stations[i].lq,
- sizeof(struct iwl_link_quality_cmd));
- send_lq = true;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- /*
- * Rate scaling has already been initialized, send
- * current LQ command
- */
- if (send_lq)
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- }
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- if (!found)
- IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
- else
- IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
-}
-
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- int sta_id = ctx->ap_sta_id;
- int ret;
- struct iwl_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- bool active;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return;
- }
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- sta_cmd.mode = 0;
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
-
- active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- if (active) {
- ret = iwl_send_remove_station(
- priv, priv->stations[sta_id].sta.sta.addr,
- sta_id, true);
- if (ret)
- IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- }
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret)
- IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
-}
-
-int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->sta_key_max_num; i++)
- if (!test_and_set_bit(i, &priv->ucode_key_table))
- return i;
-
- return WEP_INVALID_OFFSET;
-}
-
-void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (!(priv->stations[i].used & IWL_STA_BCAST))
- continue;
-
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- priv->num_stations--;
- if (WARN_ON(priv->num_stations < 0))
- priv->num_stations = 0;
- kfree(priv->stations[i].lq);
- priv->stations[i].lq = NULL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-static void iwl_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
- IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
- IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
- lq->general_params.single_stream_ant_msk,
- lq->general_params.dual_stream_ant_msk);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
- i, lq->rs_table[i].rate_n_flags);
-}
-#else
-static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
-}
-#endif
-
-/**
- * is_lq_table_valid() - Test one aspect of LQ cmd for validity
- *
- * It sometimes happens when a HT rate has been in use and we
- * loose connectivity with AP then mac80211 will first tell us that the
- * current channel is not HT anymore before removing the station. In such a
- * scenario the RXON flags will be updated to indicate we are not
- * communicating HT anymore, but the LQ command may still contain HT rates.
- * Test for this to prevent driver from sending LQ command between the time
- * RXON flags are updated and when LQ command is updated.
- */
-static bool is_lq_table_valid(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
-
- if (ctx->ht.enabled)
- return true;
-
- IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
- ctx->active.channel);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
- IWL_DEBUG_INFO(priv,
- "index %d of LQ expects HT channel\n",
- i);
- return false;
- }
- }
- return true;
-}
-
-/**
- * iwl_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- * after station has been added.
- *
- * The link quality command is sent as the last step of station creation.
- * This is the special case in which init is set and we call a callback in
- * this case to clear the state indicating that station creation is in
- * progress.
- */
-int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init)
-{
- int ret = 0;
- unsigned long flags_spin;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = { sizeof(struct iwl_link_quality_cmd), },
- .flags = flags,
- .data = { lq, },
- };
-
- if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- iwl_dump_lq_cmd(priv, lq);
- if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
- return -EINVAL;
-
- if (is_lq_table_valid(priv, ctx, lq))
- ret = trans_send_cmd(&priv->trans, &cmd);
- else
- ret = -EINVAL;
-
- if (cmd.flags & CMD_ASYNC)
- return ret;
-
- if (init) {
- IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
- lq->sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- return ret;
-}
-
-int iwl_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->mutex);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
deleted file mode 100644
index 9a6768d6685..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_sta_h__
-#define __iwl_sta_h__
-
-#include "iwl-dev.h"
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
- being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
- (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwl_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
-int iwl_send_add_sta(struct iwl_priv *priv,
- struct iwl_addsta_cmd *sta, u8 flags);
-int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr);
-int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
-
-int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-
-/**
- * iwl_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_sta_id(struct ieee80211_sta *sta)
-{
- if (WARN_ON(!sta))
- return IWL_INVALID_STATION;
-
- return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
index b11f60de4f1..5e50d88f302 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -63,6 +63,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <net/net_namespace.h>
#include <linux/netdevice.h>
#include <net/cfg80211.h>
@@ -72,7 +73,6 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
-#include "iwl-fh.h"
#include "iwl-io.h"
#include "iwl-agn.h"
#include "iwl-testmode.h"
@@ -184,7 +184,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
if (priv->testmode_trace.trace_enabled) {
if (priv->testmode_trace.cpu_addr &&
priv->testmode_trace.dma_addr)
- dma_free_coherent(priv->bus->dev,
+ dma_free_coherent(bus(priv)->dev,
priv->testmode_trace.total_size,
priv->testmode_trace.cpu_addr,
priv->testmode_trace.dma_addr);
@@ -239,7 +239,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
/* ok, let's submit the command to ucode */
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
@@ -277,7 +277,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_REG_READ32:
- val32 = iwl_read32(priv, ofs);
+ val32 = iwl_read32(bus(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -299,7 +299,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
- iwl_write32(priv, ofs, val32);
+ iwl_write32(bus(priv), ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_REG_WRITE8:
@@ -309,7 +309,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
- iwl_write8(priv, ofs, val8);
+ iwl_write8(bus(priv), ofs, val8);
}
break;
default:
@@ -405,7 +405,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
- trans_stop_device(&priv->trans);
+ iwl_trans_stop_device(trans(priv));
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
@@ -484,7 +484,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
struct iwl_priv *priv = hw->priv;
struct sk_buff *skb;
int status = 0;
- struct device *dev = priv->bus->dev;
+ struct device *dev = bus(priv)->dev;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
@@ -613,7 +613,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
- priv->ucode_owner = owner;
+ priv->shrd->ucode_owner = owner;
else {
IWL_DEBUG_INFO(priv, "Invalid owner\n");
return -EINVAL;
@@ -641,7 +641,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
* @data: pointer to user space message
* @len: length in byte of @data
*/
-int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
struct nlattr *tb[IWL_TM_ATTR_MAX];
struct iwl_priv *priv = hw->priv;
@@ -661,7 +661,7 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
return -ENOMSG;
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
@@ -702,11 +702,11 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
break;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return result;
}
-int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
@@ -738,7 +738,7 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
@@ -749,6 +749,6 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
break;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return result;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
deleted file mode 100644
index b79330d8418..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_trans_int_pcie_h__
-#define __iwl_trans_int_pcie_h__
-
-/*This file includes the declaration that are internal to the
- * trans_pcie layer */
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_bg_rx_replenish(struct work_struct *data);
-void iwl_irq_tasklet(struct iwl_priv *priv);
-void iwlagn_rx_replenish(struct iwl_priv *priv);
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-
-/*****************************************************
-* ICT
-******************************************************/
-int iwl_reset_ict(struct iwl_priv *priv);
-void iwl_disable_ict(struct iwl_priv *priv);
-int iwl_alloc_isr_ict(struct iwl_priv *priv);
-void iwl_free_isr_ict(struct iwl_priv *priv);
-irqreturn_t iwl_isr_ict(int irq, void *data);
-
-
-/*****************************************************
-* TX / HCMD
-******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int index);
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset);
-int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id);
-int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
- u16 len, const void *data);
-void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
-void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index);
-void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit);
-
-#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
new file mode 100644
index 00000000000..2b6756e8b8f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -0,0 +1,436 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+
+#include "iwl-fh.h"
+#include "iwl-csr.h"
+#include "iwl-shared.h"
+#include "iwl-trans.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+
+struct iwl_tx_queue;
+struct iwl_queue;
+struct iwl_host_cmd;
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+/**
+ * struct isr_statistics - interrupt statistics
+ *
+ */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 tx;
+ u32 unhandled;
+};
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @write_actual:
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock:
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct iwl_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+struct iwl_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE 8
+
+struct iwl_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct iwl_host_cmd *source;
+
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues.
+ *
+ * Note the difference between n_bd and n_window: the hardware
+ * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * the software buffers (in the variables @meta, @txb in struct
+ * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
+ * in the same struct) have 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
+ */
+struct iwl_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (index) host_w*/
+ int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_window; /* safe queue window */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write index
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ * @sta_id: valid if sched_retry is set
+ * @tid: valid if sched_retry is set
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_tx_queue {
+ struct iwl_queue q;
+ struct iwl_tfd *tfds;
+ struct iwl_device_cmd **cmd;
+ struct iwl_cmd_meta *meta;
+ struct sk_buff **skbs;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+
+ u16 sta_id;
+ u16 tid;
+};
+
+/**
+ * struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_replenish: work that will be called when buffers need to be allocated
+ * @trans: pointer to the generic transport area
+ * @scd_base_addr: scheduler sram base address in SRAM
+ * @scd_bc_tbls: pointer to the byte count table of the scheduler
+ * @kw: keep warm address
+ * @ac_to_fifo: to what fifo is a specifc AC mapped ?
+ * @ac_to_queue: to what tx queue is a specifc AC mapped ?
+ * @mcast_queue:
+ * @txq: Tx DMA processing queues
+ * @txq_ctx_active_msk: what queue is active
+ * queue_stopped: tracks what queue is stopped
+ * queue_stop_count: tracks what SW queue is stopped
+ */
+struct iwl_trans_pcie {
+ struct iwl_rx_queue rxq;
+ struct work_struct rx_replenish;
+ struct iwl_trans *trans;
+
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+ struct tasklet_struct irq_tasklet;
+ struct isr_statistics isr_stats;
+
+ u32 inta_mask;
+ u32 scd_base_addr;
+ struct iwl_dma_ptr scd_bc_tbls;
+ struct iwl_dma_ptr kw;
+
+ const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
+ const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
+ u8 mcast_queue[NUM_IWL_RXON_CTX];
+
+ struct iwl_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+#define IWL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ atomic_t queue_stop_count[4];
+};
+
+#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
+ ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_bg_rx_replenish(struct work_struct *data);
+void iwl_irq_tasklet(struct iwl_trans *trans);
+void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_rx_queue *q);
+
+/*****************************************************
+* ICT
+******************************************************/
+int iwl_reset_ict(struct iwl_trans *trans);
+void iwl_disable_ict(struct iwl_trans *trans);
+int iwl_alloc_isr_ict(struct iwl_trans *trans);
+void iwl_free_isr_ict(struct iwl_trans *trans);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+void iwl_txq_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq);
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset);
+int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
+int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_tx_cmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb, int handler_status);
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid);
+void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn);
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid, int frame_limit);
+void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int index, enum dma_data_direction dma_dir);
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+ struct sk_buff_head *skbs);
+int iwl_queue_space(const struct iwl_queue *q);
+
+/*****************************************************
+* Error handling
+******************************************************/
+int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
+ char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+void iwl_dump_csr(struct iwl_trans *trans);
+
+/*****************************************************
+* Helpers
+******************************************************/
+static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+{
+ clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(bus(trans), CSR_INT, 0xffffffff);
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
+ IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+}
+
+static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
+ set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+ iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
+ if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+ iwl_wake_sw_queue(priv(trans), ac);
+}
+
+static inline void iwl_stop_queue(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
+ if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+ iwl_stop_sw_queue(priv(trans), ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
+ int txq_id)
+{
+ set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
+}
+
+static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
+ int txq_id)
+{
+ clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
+}
+
+static inline int iwl_queue_used(const struct iwl_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ?
+ (i >= q->read_ptr && i < q->write_ptr) :
+ !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+#define IWL_TX_FIFO_BK 0 /* shared */
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2 /* shared */
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN 4
+#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN 5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX 5
+#define IWL_TX_FIFO_UNUSED -1
+
+/* AUX (TX during scan dwell) queue */
+#define IWL_AUX_QUEUE 10
+
+#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
new file mode 100644
index 00000000000..374c68cc1d7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -0,0 +1,1435 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+/*TODO: Remove include to iwl-core.h*/
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-trans-pcie-int.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rx_queue_alloc() Allocates rx_free
+ * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls iwl_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_rx_queue_space - Return number of free slots available in queue.
+ */
+static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+/**
+ * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_rx_queue *q)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ if (hw_params(trans).shadow_reg_enable) {
+ /* shadow register enabled */
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual);
+ } else {
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
+ reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans,
+ "Rx queue requesting wakeup,"
+ " GP1 = 0x%x\n", reg);
+ iwl_set_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
+ q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
+ q->write_actual);
+ }
+ }
+ q->need_update = 0;
+
+ exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_queue_update_write_ptr(trans, rxq);
+ }
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (hw_params(trans).rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask,
+ hw_params(trans).rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+ "order: %d\n",
+ hw_params(trans).rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(trans, "Failed to alloc_pages with %s."
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, hw_params(trans).rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwlagn_rx_replenish(struct iwl_trans *trans)
+{
+ unsigned long flags;
+
+ iwlagn_rx_allocate(trans, GFP_KERNEL);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwlagn_rx_queue_restock(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+}
+
+static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
+{
+ iwlagn_rx_allocate(trans, GFP_ATOMIC);
+
+ iwlagn_rx_queue_restock(trans);
+}
+
+void iwl_bg_rx_replenish(struct work_struct *data)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ container_of(data, struct iwl_trans_pcie, rx_replenish);
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ return;
+
+ mutex_lock(&trans->shrd->mutex);
+ iwlagn_rx_replenish(trans);
+ mutex_unlock(&trans->shrd->mutex);
+}
+
+/**
+ * iwl_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void iwl_rx_handle(struct iwl_trans *trans)
+{
+ struct iwl_rx_mem_buffer *rxb;
+ struct iwl_rx_packet *pkt;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_device_cmd *cmd;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+ int index, cmd_index;
+
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len, err;
+ u16 sequence;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ if (WARN_ON(rxb == NULL)) {
+ i = (i + 1) & RX_QUEUE_MASK;
+ continue;
+ }
+
+ rxq->queue[i] = NULL;
+
+ dma_unmap_page(bus(trans)->dev, rxb->page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ pkt = rxb_addr(rxb);
+
+ IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
+ i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv(trans), pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+ (pkt->hdr.cmd != REPLY_RX) &&
+ (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+ (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+ (pkt->hdr.cmd != REPLY_TX);
+
+ sequence = le16_to_cpu(pkt->hdr.sequence);
+ index = SEQ_TO_INDEX(sequence);
+ cmd_index = get_cmd_index(&txq->q, index);
+
+ if (reclaim)
+ cmd = txq->cmd[cmd_index];
+ else
+ cmd = NULL;
+
+ /* warn if this is cmd response / notification and the uCode
+ * didn't set the SEQ_RX_FRAME for a frame that is
+ * uCode-originated
+ * If you saw this code after the second half of 2012, then
+ * please remove it
+ */
+ WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
+ (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
+ "reclaim is false, SEQ_RX_FRAME unset: %s\n",
+ get_cmd_string(pkt->hdr.cmd));
+
+ err = iwl_rx_dispatch(priv(trans), rxb, cmd);
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * iwl_trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ iwl_tx_cmd_complete(trans, rxb, err);
+ else
+ IWL_WARN(trans, "Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
+ 0, PAGE_SIZE <<
+ hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ iwlagn_rx_replenish_now(trans);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ iwlagn_rx_replenish_now(trans);
+ else
+ iwlagn_rx_queue_restock(trans);
+}
+
+static const char * const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STABLE",
+ "FH_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT",
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+static void iwl_dump_nic_error_log(struct iwl_trans *trans)
+{
+ u32 base;
+ struct iwl_error_event_table table;
+ struct iwl_priv *priv = priv(trans);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ base = priv->device_pointers.error_event_table;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ if (!base)
+ base = priv->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = priv->inst_errlog_ptr;
+ }
+
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_ERR(trans,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (priv->ucode_type == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ trans->shrd->status, table.valid);
+ }
+
+ trans_pcie->isr_stats.err_code = table.error_id;
+
+ trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
+ table.data1, table.data2, table.line,
+ table.blink1, table.blink2, table.ilink1,
+ table.ilink2, table.bcon_time, table.gp1,
+ table.gp2, table.gp3, table.ucode_ver,
+ table.hw_ver, table.brd_ver);
+ IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
+ IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
+ IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(trans, "0x%08X | data1\n", table.data1);
+ IWL_ERR(trans, "0x%08X | data2\n", table.data2);
+ IWL_ERR(trans, "0x%08X | line\n", table.line);
+ IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
+ IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
+ IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+}
+
+/**
+ * iwl_irq_handle_error - called for HW or SW error interrupt from card
+ */
+static void iwl_irq_handle_error(struct iwl_trans *trans)
+{
+ struct iwl_priv *priv = priv(trans);
+ /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+ if (priv->cfg->internal_wimax_coex &&
+ (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the ready bit.
+ */
+ clear_bit(STATUS_READY, &trans->shrd->status);
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ wake_up(&priv->shrd->wait_command_queue);
+ IWL_ERR(trans, "RF is used by WiMAX\n");
+ return;
+ }
+
+ IWL_ERR(trans, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
+ iwl_dump_nic_error_log(trans);
+ iwl_dump_csr(trans);
+ iwl_dump_fh(trans, NULL, false);
+ iwl_dump_nic_event_log(trans, false, NULL, false);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
+ iwl_print_rx_config_cmd(priv(trans), IWL_RXON_CTX_BSS);
+#endif
+
+ iwlagn_fw_error(priv, false);
+}
+
+#define EVENT_START_OFFSET (4 * sizeof(u32))
+
+/**
+ * iwl_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ u32 i;
+ u32 base; /* SRAM byte address of event log header */
+ u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+ struct iwl_priv *priv = priv(trans);
+
+ if (num_events == 0)
+ return pos;
+
+ base = priv->device_pointers.log_event_table;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ if (!base)
+ base = priv->init_evtlog_ptr;
+ } else {
+ if (!base)
+ base = priv->inst_evtlog_ptr;
+ }
+
+ if (mode == 0)
+ event_size = 2 * sizeof(u32);
+ else
+ event_size = 3 * sizeof(u32);
+
+ ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags);
+ iwl_grab_nic_access(bus(trans));
+
+ /* Set starting address; reads will auto-increment */
+ iwl_write32(bus(trans), HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /* "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing. */
+ for (i = 0; i < num_events; i++) {
+ ev = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ time = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ /* data, ev */
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
+ } else {
+ data = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
+ }
+ }
+
+ /* Allow device to power down */
+ iwl_release_nic_access(bus(trans));
+ spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags);
+ return pos;
+}
+
+/**
+ * iwl_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ /*
+ * display the newest DEFAULT_LOG_ENTRIES entries
+ * i.e the entries just before the next ont that uCode would fill.
+ */
+ if (num_wraps) {
+ if (next_entry < size) {
+ pos = iwl_print_event_log(trans,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl_print_event_log(trans, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
+ } else
+ pos = iwl_print_event_log(trans, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ } else {
+ if (next_entry < size) {
+ pos = iwl_print_event_log(trans, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl_print_event_log(trans, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
+ }
+ return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
+ char **buf, bool display)
+{
+ u32 base; /* SRAM byte address of event log header */
+ u32 capacity; /* event log capacity in # entries */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+ u32 size; /* # entries that we'll print */
+ u32 logsize;
+ int pos = 0;
+ size_t bufsz = 0;
+ struct iwl_priv *priv = priv(trans);
+
+ base = priv->device_pointers.log_event_table;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ logsize = priv->init_evtlog_size;
+ if (!base)
+ base = priv->init_evtlog_ptr;
+ } else {
+ logsize = priv->inst_evtlog_size;
+ if (!base)
+ base = priv->inst_evtlog_ptr;
+ }
+
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_ERR(trans,
+ "Invalid event log pointer 0x%08X for %s uCode\n",
+ base,
+ (priv->ucode_type == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return -EINVAL;
+ }
+
+ /* event log header */
+ capacity = iwl_read_targ_mem(bus(trans), base);
+ mode = iwl_read_targ_mem(bus(trans), base + (1 * sizeof(u32)));
+ num_wraps = iwl_read_targ_mem(bus(trans), base + (2 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(bus(trans), base + (3 * sizeof(u32)));
+
+ if (capacity > logsize) {
+ IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
+ "entries\n", capacity, logsize);
+ capacity = logsize;
+ }
+
+ if (next_entry > logsize) {
+ IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
+ next_entry, logsize);
+ next_entry = logsize;
+ }
+
+ size = num_wraps ? capacity : next_entry;
+
+ /* bail out if nothing in log */
+ if (size == 0) {
+ IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
+ return pos;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+ IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
+ size);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
+ if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
+ /*
+ * if uCode has wrapped back to top of log,
+ * start at the oldest entry,
+ * i.e the next one that uCode would fill.
+ */
+ if (num_wraps)
+ pos = iwl_print_event_log(trans, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
+ /* (then/else) start at top of log */
+ pos = iwl_print_event_log(trans, 0,
+ next_entry, mode, pos, buf, bufsz);
+ } else
+ pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#else
+ pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#endif
+ return pos;
+}
+
+/* tasklet for iwlagn interrupt */
+void iwl_irq_tasklet(struct iwl_trans *trans)
+{
+ u32 inta = 0;
+ u32 handled = 0;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_mask;
+#endif
+
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ */
+ /* There is a hardware bug in the interrupt mask function that some
+ * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+ * they are disabled in the CSR_INT_MASK register. Furthermore the
+ * ICT interrupt handling mechanism has another bug that might cause
+ * these unmasked interrupts fail to be detected. We workaround the
+ * hardware bugs here by ACKing all the possible interrupts so that
+ * interrupt coalescing can still be achieved.
+ */
+ iwl_write32(bus(trans), CSR_INT,
+ trans_pcie->inta | ~trans_pcie->inta_mask);
+
+ inta = trans_pcie->inta;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
+ /* just for debug */
+ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
+ inta, inta_mask);
+ }
+#endif
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* saved interrupt in inta variable now we can reset trans_pcie->inta */
+ trans_pcie->inta = 0;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(trans, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ isr_stats->hw++;
+ iwl_irq_handle_error(trans);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
+ "the frame/frames.\n");
+ isr_stats->sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ isr_stats->rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
+ if (hw_rf_kill)
+ set_bit(STATUS_RF_KILL_HW,
+ &trans->shrd->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW,
+ &trans->shrd->status);
+ iwl_set_hw_rfkill_state(priv(trans), hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(trans, "Microcode CT kill error detected.\n");
+ isr_stats->ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(trans, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ isr_stats->sw++;
+ iwl_irq_handle_error(trans);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
+ for (i = 0; i < hw_params(trans).max_txq_num; i++)
+ iwl_txq_update_write_ptr(trans,
+ &trans_pcie->txq[i]);
+
+ isr_stats->wakeup++;
+
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+ CSR_INT_BIT_RX_PERIODIC)) {
+ IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS,
+ CSR_FH_INT_RX_MASK);
+ }
+ if (inta & CSR_INT_BIT_RX_PERIODIC) {
+ handled |= CSR_INT_BIT_RX_PERIODIC;
+ iwl_write32(bus(trans),
+ CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+ }
+ /* Sending RX interrupt require many steps to be done in the
+ * the device:
+ * 1- write interrupt to current index in ICT table.
+ * 2- dma RX frame.
+ * 3- update RX shared data to indicate last write index.
+ * 4- send interrupt.
+ * This could lead to RX race, driver could receive RX interrupt
+ * but the shared data changes does not reflect this;
+ * periodic interrupt will detect any dangling Rx activity.
+ */
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_DIS);
+ iwl_rx_handle(trans);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+ iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_ENA);
+
+ isr_stats->rx++;
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+ IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+ isr_stats->tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ priv(trans)->ucode_write_complete = 1;
+ wake_up(&trans->shrd->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ isr_stats->unhandled++;
+ }
+
+ if (inta & ~(trans_pcie->inta_mask)) {
+ IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~trans_pcie->inta_mask);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
+ iwl_enable_interrupts(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv(trans));
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->ict_tbl_vir) {
+ dma_free_coherent(bus(trans)->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ trans_pcie->ict_tbl_vir,
+ trans_pcie->ict_tbl_dma);
+ trans_pcie->ict_tbl_vir = NULL;
+ memset(&trans_pcie->ict_tbl_dma, 0,
+ sizeof(trans_pcie->ict_tbl_dma));
+ memset(&trans_pcie->aligned_ict_tbl_dma, 0,
+ sizeof(trans_pcie->aligned_ict_tbl_dma));
+ }
+}
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* allocate shrared data table */
+ trans_pcie->ict_tbl_vir =
+ dma_alloc_coherent(bus(trans)->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
+ if (!trans_pcie->ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundary */
+ trans_pcie->aligned_ict_tbl_dma =
+ ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
+ (int)(trans_pcie->aligned_ict_tbl_dma -
+ trans_pcie->ict_tbl_dma));
+
+ trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
+ (trans_pcie->aligned_ict_tbl_dma -
+ trans_pcie->ict_tbl_dma);
+
+ IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
+ trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
+ (int)(trans_pcie->aligned_ict_tbl_dma -
+ trans_pcie->ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(trans_pcie->ict_tbl_vir, 0,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ trans_pcie->ict_index = 0;
+
+ /* add periodic RX interrupt */
+ trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_trans *trans)
+{
+ u32 val;
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+
+ memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+
+ val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val,
+ (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
+
+ iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
+ trans_pcie->use_ict = true;
+ trans_pcie->ict_index = 0;
+ iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask);
+ iwl_enable_interrupts(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ return 0;
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ trans_pcie->use_ict = false;
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_trans *trans = data;
+ struct iwl_trans_pcie *trans_pcie;
+ u32 inta, inta_mask;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!trans)
+ return IRQ_NONE;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(bus(trans), CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ trans_pcie->inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&trans_pcie->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta)
+ iwl_enable_interrupts(trans);
+
+ unplugged:
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta)
+ iwl_enable_interrupts(trans);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_trans *trans = data;
+ struct iwl_trans_pcie *trans_pcie;
+ u32 inta, inta_mask;
+ u32 val = 0;
+ unsigned long flags;
+
+ if (!trans)
+ return IRQ_NONE;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!trans_pcie->use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+
+ val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index,
+ le32_to_cpu(
+ trans_pcie->ict_tbl[trans_pcie->ict_index]));
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= trans_pcie->inta_mask;
+ trans_pcie->inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&trans_pcie->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(trans);
+ }
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta)
+ iwl_enable_interrupts(trans);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
new file mode 100644
index 00000000000..4a0c95302a7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -0,0 +1,1141 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-agn-hw.h"
+#include "iwl-trans-pcie-int.h"
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/**
+ * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ int write_ptr = txq->q.write_ptr;
+ int txq_id = txq->q.id;
+ u8 sec_ctl = 0;
+ u8 sta_id = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
+
+ scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ sta_id = tx_cmd->sta_id;
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
+ }
+
+ bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+/**
+ * iwl_txq_update_write_ptr - Send new write index to hardware
+ */
+void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ if (hw_params(trans).shadow_reg_enable) {
+ /* shadow register enabled */
+ iwl_write32(bus(trans), HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+ } else {
+ /* if we're trying to save power */
+ if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans,
+ "Tx queue %d requesting wakeup,"
+ " GP1 = 0x%x\n", txq_id, reg);
+ iwl_set_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ iwl_write32(bus(trans), HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+ }
+ txq->need_update = 0;
+}
+
+static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+ return addr;
+}
+
+static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
+ struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+{
+ int i;
+ int num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ dma_unmap_single(bus(trans)->dev,
+ dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len),
+ DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
+ iwl_tfd_tb_get_len(tfd, i), dma_dir);
+}
+
+/**
+ * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans - transport private data
+ * @txq - tx queue
+ * @index - the index of the TFD to be freed
+ *@dma_dir - the direction of the DMA mapping
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int index, enum dma_data_direction dma_dir)
+{
+ struct iwl_tfd *tfd_tmp = txq->tfds;
+
+ iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
+
+ /* free SKB */
+ if (txq->skbs) {
+ struct sk_buff *skb;
+
+ skb = txq->skbs[index];
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_free_skb(priv(trans), skb);
+ txq->skbs[index] = NULL;
+ }
+ }
+}
+
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len,
+ u8 reset)
+{
+ struct iwl_queue *q;
+ struct iwl_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ IWL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+ return -EINVAL;
+
+ if (unlikely(addr & ~IWL_TX_DMA_MASK))
+ IWL_ERR(trans, "Unaligned address = %llx\n",
+ (unsigned long long)addr);
+
+ iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+
+int iwl_queue_space(const struct iwl_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_window;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+/**
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+{
+ q->n_bd = count;
+ q->n_window = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
+ * and iwl_queue_dec_wrap are broken. */
+ if (WARN_ON(!is_power_of_2(count)))
+ return -EINVAL;
+
+ /* slots_num must be power-of-two size, otherwise
+ * get_cmd_index is broken. */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans->shrd->cmd_queue)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_write_prph(bus(trans),
+ SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
+ int txq_id, u32 index)
+{
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id = txq->q.id;
+ int active =
+ test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
+
+ iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+}
+
+static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
+ u8 ctx, u16 tid)
+{
+ const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, int frame_limit)
+{
+ int tx_fifo, txq_id, ssn_idx;
+ u16 ra_tid;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
+
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return;
+ if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
+ return;
+
+ tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
+ if (WARN_ON(tx_fifo < 0)) {
+ IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
+ return;
+ }
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ ssn_idx = SEQ_TO_SN(tid_data->seq_number);
+ txq_id = tid_data->agg.txq_id;
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
+
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
+ sizeof(u32),
+ ((frame_limit <<
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((frame_limit <<
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
+ tx_fifo, 1);
+
+ trans_pcie->txq[txq_id].sta_id = sta_id;
+ trans_pcie->txq[txq_id].tid = tid;
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id,
+ &trans_pcie->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ int txq_id;
+
+ txq_id = iwlagn_txq_ctx_activate_free(trans);
+ if (txq_id == -1) {
+ IWL_ERR(trans, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
+
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(trans, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ } else {
+ IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
+ "queue\n", tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+
+ return 0;
+}
+
+void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
+
+ trans_pcie->txq[txq_id].q.read_ptr = 0;
+ trans_pcie->txq[txq_id].q.write_ptr = 0;
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(trans_pcie, txq_id);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
+}
+
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+ int read_ptr, write_ptr;
+ struct iwl_tid_data *tid_data;
+ int txq_id;
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ txq_id = tid_data->agg.txq_id;
+
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues <= txq_id)) {
+ IWL_ERR(trans,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues - 1);
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ return -EINVAL;
+ }
+
+ switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(trans, "Stopping AGG while state not ON "
+ "or starting for %d on %d (%d)\n", sta_id, tid,
+ trans->shrd->tid_data[sta_id][tid].agg.state);
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
+ read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
+ trans->shrd->tid_data[sta_id][tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_HT(trans, "HW queue is empty\n");
+turn_off:
+ trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&trans->shrd->sta_lock);
+ spin_lock(&trans->shrd->lock);
+
+ iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * iwl_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the index (> 0) of command in the
+ * command queue.
+ */
+static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ u32 idx;
+ u16 copy_size, cmd_size;
+ bool is_ct_kill = false;
+ bool had_nocopy = false;
+ int i;
+ u8 *cmd_dest;
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
+ int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
+ int trace_idx;
+#endif
+
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_WARN(trans, "fw recovery, no hcmd send\n");
+ return -EIO;
+ }
+
+ if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
+ !(cmd->flags & CMD_ON_DEMAND)) {
+ IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
+ return -EIO;
+ }
+
+ copy_size = sizeof(out_cmd->hdr);
+ cmd_size = sizeof(out_cmd->hdr);
+
+ /* need one for the header if the first is NOCOPY */
+ BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ if (!cmd->len[i])
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy))
+ return -EINVAL;
+ copy_size += cmd->len[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+
+ /*
+ * If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
+ * allocated into separate TFDs, then we will need to
+ * increase the size of the buffers.
+ */
+ if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
+ return -EINVAL;
+
+ if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
+ IWL_WARN(trans, "Not sending command - %s KILL\n",
+ iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&trans->hcmd_lock, flags);
+
+ if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+
+ IWL_ERR(trans, "No space in command queue\n");
+ is_ct_kill = iwl_check_for_ct_kill(priv(trans));
+ if (!is_ct_kill) {
+ IWL_ERR(trans, "Restarting adapter queue is full\n");
+ iwlagn_fw_error(priv(trans), false);
+ }
+ return -ENOSPC;
+ }
+
+ idx = get_cmd_index(q, q->write_ptr);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+
+ /* set up the header */
+
+ out_cmd->hdr.cmd = cmd->id;
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+
+ /* and copy the data that needs to be copied */
+
+ cmd_dest = out_cmd->payload;
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ if (!cmd->len[i])
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+ break;
+ memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
+ cmd_dest += cmd->len[i];
+ }
+
+ IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ get_cmd_string(out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
+ q->write_ptr, idx, trans->shrd->cmd_queue);
+
+ phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
+ idx = -ENOMEM;
+ goto out;
+ }
+
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, copy_size);
+
+ iwlagn_txq_attach_buf_to_tfd(trans, txq,
+ phys_addr, copy_size, 1);
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ trace_bufs[0] = &out_cmd->hdr;
+ trace_lens[0] = copy_size;
+ trace_idx = 1;
+#endif
+
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ if (!cmd->len[i])
+ continue;
+ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ continue;
+ phys_addr = dma_map_single(bus(trans)->dev,
+ (void *)cmd->data[i],
+ cmd->len[i], DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
+ iwlagn_unmap_tfd(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+ DMA_BIDIRECTIONAL);
+ idx = -ENOMEM;
+ goto out;
+ }
+
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+ cmd->len[i], 0);
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ trace_bufs[trace_idx] = cmd->data[i];
+ trace_lens[trace_idx] = cmd->len[i];
+ trace_idx++;
+#endif
+ }
+
+ out_meta->flags = cmd->flags;
+
+ txq->need_update = 1;
+
+ /* check that tracing gets all possible blocks */
+ BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
+ trace_bufs[0], trace_lens[0],
+ trace_bufs[1], trace_lens[1],
+ trace_bufs[2], trace_lens[2]);
+#endif
+
+ /* Increment and update queue's write index */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(trans, txq);
+
+ out:
+ spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
+ int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
+ IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
+ "index %d is out of range [0-%d] %d %d.\n", __func__,
+ txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ iwlagn_fw_error(priv(trans), false);
+ }
+
+ }
+}
+
+/**
+ * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ * @handler_status: return value of the handler of the command
+ * (put in setup_rx_handlers)
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
+ int handler_status)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index;
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ unsigned long flags;
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN(txq_id != trans->shrd->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, trans->shrd->cmd_queue, sequence,
+ trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
+ iwl_print_hex_error(trans, pkt, 32);
+ return;
+ }
+
+ cmd_index = get_cmd_index(&txq->q, index);
+ cmd = txq->cmd[cmd_index];
+ meta = &txq->meta[cmd_index];
+
+ txq->time_stamp = jiffies;
+
+ iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
+ DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ meta->source->handler_status = handler_status;
+ rxb->page = NULL;
+ }
+
+ spin_lock_irqsave(&trans->hcmd_lock, flags);
+
+ iwl_hcmd_queue_reclaim(trans, txq_id, index);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
+ IWL_WARN(trans,
+ "HCMD_ACTIVE already clear for command %s\n",
+ get_cmd_string(cmd->hdr.cmd));
+ }
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(cmd->hdr.cmd));
+ wake_up(&trans->shrd->wait_command_queue);
+ }
+
+ meta->flags = 0;
+
+ spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ return -EBUSY;
+
+ ret = iwl_enqueue_hcmd(trans, cmd);
+ if (ret < 0) {
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int cmd_idx;
+ int ret;
+
+ lockdep_assert_held(&trans->shrd->mutex);
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
+ get_cmd_string(cmd->id));
+
+ set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
+ get_cmd_string(cmd->id));
+
+ cmd_idx = iwl_enqueue_hcmd(trans, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans->shrd->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
+ struct iwl_tx_queue *txq =
+ &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+
+ IWL_ERR(trans,
+ "Error sending %s: time out after %dms.\n",
+ get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans,
+ "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
+ "%s\n", get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
+ get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s failed: FW Error\n",
+ get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n",
+ get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
+ ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ iwl_free_pages(trans->shrd, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return iwl_send_cmd_async(trans, cmd);
+
+ return iwl_send_cmd_sync(trans, cmd);
+}
+
+/* Frees buffers until index _not_ inclusive */
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int last_to_free;
+ int freed = 0;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans->shrd->cmd_queue))
+ return 0;
+
+ /*Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used */
+ last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
+
+ if ((index >= q->n_bd) ||
+ (iwl_queue_used(q, last_to_free) == 0)) {
+ IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
+ "last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
+ q->read_ptr, index);
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ return 0;
+
+ for (;
+ q->read_ptr != index;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
+ continue;
+
+ __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
+
+ txq->skbs[txq->q.read_ptr] = NULL;
+
+ iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
+
+ iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+ freed++;
+ }
+ return freed;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
new file mode 100644
index 00000000000..8e8c75c997e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -0,0 +1,1998 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include "iwl-trans.h"
+#include "iwl-trans-pcie-int.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-shared.h"
+#include "iwl-eeprom.h"
+#include "iwl-agn-hw.h"
+
+static int iwl_trans_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct device *dev = bus(trans)->dev;
+
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+ spin_lock_init(&rxq->lock);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+ memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
+}
+
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ int i;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page,
+ hw_params(trans).rx_page_order);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+}
+
+static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
+ struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ if (iwlagn_mod_params.amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+static int iwl_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
+ int i, err;
+ unsigned long flags;
+
+ if (!rxq->bd) {
+ err = iwl_trans_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ iwl_trans_rxq_free_rx_bufs(trans);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ iwlagn_rx_replenish(trans);
+
+ iwl_trans_rx_hw_init(trans, rxq);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(trans, rxq);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
+ unsigned long flags;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ iwl_trans_rxq_free_rx_bufs(trans);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(bus(trans)->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+ memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts = NULL;
+}
+
+static int iwl_trans_rx_stop(struct iwl_trans *trans)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static int iwl_trans_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ int i;
+
+ if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
+ return -EINVAL;
+
+ txq->q.n_window = slots_num;
+
+ txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
+ txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto error;
+
+ if (txq_id == trans->shrd->cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto error;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ /* Driver private data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (txq_id != trans->shrd->cmd_queue) {
+ txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
+ GFP_KERNEL);
+ if (!txq->skbs) {
+ IWL_ERR(trans, "kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->skbs = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
+ &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = txq_id;
+
+ return 0;
+error:
+ kfree(txq->skbs);
+ txq->skbs = NULL;
+ /* since txq->cmd has been zeroed,
+ * all non allocated cmd[i] will be NULL */
+ if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->cmd[i]);
+ kfree(txq->meta);
+ kfree(txq->cmd);
+ txq->meta = NULL;
+ txq->cmd = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = 0;
+ memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ iwl_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+ txq_id);
+ if (ret)
+ return ret;
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/**
+ * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ enum dma_data_direction dma_dir;
+ unsigned long flags;
+
+ if (!q->n_bd)
+ return;
+
+ /* In the command queue, all the TBs are mapped as BIDI
+ * so unmap them as such.
+ */
+ if (txq_id == trans->shrd->cmd_queue)
+ dma_dir = DMA_BIDIRECTIONAL;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ while (q->write_ptr != q->read_ptr) {
+ /* The read_ptr needs to bound by q->n_window */
+ iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
+ dma_dir);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+}
+
+/**
+ * iwl_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct device *dev = bus(trans)->dev;
+ int i;
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_tx_queue_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+
+ if (txq_id == trans->shrd->cmd_queue)
+ for (i = 0; i < txq->q.n_window; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd) {
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ }
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->skbs);
+ txq->skbs = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+/**
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
+{
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Tx queues */
+ if (trans_pcie->txq) {
+ for (txq_id = 0;
+ txq_id < hw_params(trans).max_txq_num; txq_id++)
+ iwl_tx_queue_free(trans, txq_id);
+ }
+
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
+
+ iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
+
+ iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/**
+ * iwl_trans_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+static int iwl_trans_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
+ sizeof(struct iwl_tx_queue), GFP_KERNEL);
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
+ slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_trans_pcie_tx_free(trans);
+
+ return ret;
+}
+static int iwl_tx_init(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+ bool alloc = false;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->txq) {
+ ret = iwl_trans_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_write_prph(bus(trans), SCD_TXFACT, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
+ slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_trans_pcie_tx_free(trans);
+ return ret;
+}
+
+static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+ iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+static int iwl_nic_init(struct iwl_trans *trans)
+{
+ unsigned long flags;
+
+ /* nic_init */
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_apm_init(priv(trans));
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(bus(trans), CSR_INT_COALESCING,
+ IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ iwl_set_pwr_vmain(trans);
+
+ iwl_nic_config(priv(trans));
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ iwl_rx_init(trans);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_tx_init(trans))
+ return -ENOMEM;
+
+ if (hw_params(trans).shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
+ 0x800FFFFF);
+ }
+
+ set_bit(STATUS_INIT, &trans->shrd->status);
+
+ return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_set_hw_ready(struct iwl_trans *trans)
+{
+ int ret;
+
+ iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+
+ IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+ return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
+{
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+ ret = iwl_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ if (ret < 0)
+ return ret;
+
+ /* HW should be ready by now, check again. */
+ ret = iwl_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
+ return ret;
+}
+
+#define IWL_AC_UNSET -1
+
+struct queue_to_fifo_ac {
+ s8 fifo, ac;
+};
+
+static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+};
+
+static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_BE_IPAN, 2, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
+};
+
+static const u8 iwlagn_bss_ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+static const u8 iwlagn_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+static const u8 iwlagn_pan_ac_to_fifo[] = {
+ IWL_TX_FIFO_VO_IPAN,
+ IWL_TX_FIFO_VI_IPAN,
+ IWL_TX_FIFO_BE_IPAN,
+ IWL_TX_FIFO_BK_IPAN,
+};
+static const u8 iwlagn_pan_ac_to_queue[] = {
+ 7, 6, 5, 4,
+};
+
+static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
+{
+ int ret;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
+ trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
+ trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
+
+ trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
+ trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
+
+ trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
+ trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
+
+ if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
+ iwl_trans_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+
+ if (iwl_is_rfkill(trans->shrd)) {
+ iwl_set_hw_rfkill_state(priv(trans), true);
+ iwl_enable_interrupts(trans);
+ return -ERFKILL;
+ }
+
+ iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+ iwl_enable_interrupts(trans);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ return 0;
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->shrd->lock and mac access
+ */
+static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+{
+ iwl_write_prph(bus(trans), SCD_TXFACT, mask);
+}
+
+static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
+{
+ const struct queue_to_fifo_ac *queue_to_fifo;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
+ a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ /* reset conext data memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(bus(trans), a, 0);
+ /* reset tx status memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(bus(trans), a, 0);
+ for (; a < trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
+ a += 4)
+ iwl_write_targ_mem(bus(trans), a, 0);
+
+ iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
+ SCD_QUEUECHAIN_SEL_ALL(trans));
+ iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
+
+ /* initiate the queues */
+ for (i = 0; i < hw_params(trans).max_txq_num; i++) {
+ iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ }
+
+ iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
+ IWL_MASK(0, hw_params(trans).max_txq_num));
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
+
+ /* map queues to FIFOs */
+ if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
+ queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
+ else
+ queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
+
+ iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&trans_pcie->queue_stopped[0], 0,
+ sizeof(trans_pcie->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&trans_pcie->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ trans_pcie->txq_ctx_active_msk = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
+ IWLAGN_FIRST_AMPDU_QUEUE);
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
+ IWLAGN_FIRST_AMPDU_QUEUE);
+
+ for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
+ int fifo = queue_to_fifo[i].fifo;
+ int ac = queue_to_fifo[i].ac;
+
+ iwl_txq_ctx_activate(trans_pcie, i);
+
+ if (fifo == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ if (ac != IWL_AC_UNSET)
+ iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
+ fifo, 0);
+ }
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+static int iwl_trans_tx_stop(struct iwl_trans *trans)
+{
+ int ch, txq_id;
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ iwl_trans_txq_set_sched(trans, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write_direct32(bus(trans),
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000))
+ IWL_ERR(trans, "Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ iwl_read_direct32(bus(trans),
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ if (!trans_pcie->txq) {
+ IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+ return 0;
+ }
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ iwl_tx_queue_unmap(trans, txq_id);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
+{
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(bus(trans)->irq);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+}
+
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+{
+ /* stop and reset the on-board processor */
+ iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ iwl_trans_pcie_disable_sync_irq(trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
+ iwl_trans_tx_stop(trans);
+ iwl_trans_rx_stop(trans);
+
+ /* Power-down device's busmaster DMA clocks */
+ iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_apm_stop(priv(trans));
+}
+
+static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
+ u8 sta_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+
+ dma_addr_t phys_addr = 0;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ u8 wait_write_ptr = 0;
+ u8 txq_id;
+ u8 tid = 0;
+ bool is_agg = false;
+ __le16 fc = hdr->frame_control;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = trans_pcie->mcast_queue[ctx];
+
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ txq_id = IWL_AUX_QUEUE;
+ else
+ txq_id =
+ trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
+
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ struct iwl_tid_data *tid_data;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -1;
+
+ seq_number = tid_data->seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
+ txq_id = tid_data->agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ txq = &trans_pcie->txq[txq_id];
+ q = &txq->q;
+
+ /* Set up driver data for this TFD */
+ txq->skbs[q->write_ptr] = skb;
+ txq->cmd[q->write_ptr] = dev_cmd;
+
+ dev_cmd->hdr.cmd = REPLY_TX;
+ dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->meta[q->write_ptr];
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = dma_map_single(bus(trans)->dev,
+ &dev_cmd->hdr, firstlen,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
+ return -1;
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+
+ if (!ieee80211_has_morefrags(fc)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
+ secondlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
+ dma_unmap_single(bus(trans)->dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ DMA_BIDIRECTIONAL);
+ return -1;
+ }
+ }
+
+ /* Attach buffers to TFD */
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
+ if (secondlen > 0)
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+ secondlen, 0);
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+ le16_to_cpu(dev_cmd->hdr.sequence));
+ IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (is_agg)
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(priv(trans),
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(trans, txq);
+
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ trans->shrd->tid_data[sta_id][tid].seq_number =
+ seq_number;
+ }
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+ if (iwl_queue_space(q) < q->high_mark) {
+ if (wait_write_ptr) {
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(trans, txq);
+ } else {
+ iwl_stop_queue(trans, txq);
+ }
+ }
+ return 0;
+}
+
+static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
+{
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(bus(trans), CSR_RESET, 0);
+}
+
+static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ int err;
+
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
+ tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)trans);
+
+ iwl_alloc_isr_ict(trans);
+
+ err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
+ DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
+ iwl_free_isr_ict(trans);
+ return err;
+ }
+
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
+ return 0;
+}
+
+static int iwlagn_txq_check_empty(struct iwl_trans *trans,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
+ struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
+
+ lockdep_assert_held(&trans->shrd->sta_lock);
+
+ switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ IWL_DEBUG_HT(trans,
+ "HW queue empty: continue DELBA flow\n");
+ iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ tid_data->agg.state = IWL_AGG_OFF;
+ iwl_stop_tx_ba_trans_ready(priv(trans),
+ NUM_IWL_RXON_CTX,
+ sta_id, tid);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(trans,
+ "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ iwl_start_tx_ba_trans_ready(priv(trans),
+ NUM_IWL_RXON_CTX,
+ sta_id, tid);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
+ int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&trans->shrd->sta_lock);
+
+ if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
+ freed);
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
+ }
+}
+
+static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+ int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ enum iwl_agg_state agg_state;
+ /* n_bd is usually 256 => n_bd - 1 = 0xff */
+ int tfd_num = ssn & (txq->q.n_bd - 1);
+ int freed = 0;
+ bool cond;
+
+ txq->time_stamp = jiffies;
+
+ if (txq->sched_retry) {
+ agg_state =
+ trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
+ cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
+ } else {
+ cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
+ }
+
+ if (txq->q.read_ptr != tfd_num) {
+ IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
+ "scd_ssn=%d idx=%d txq=%d swq=%d\n",
+ ssn , tfd_num, txq_id, txq->swq_id);
+ freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
+ iwl_wake_queue(trans, txq);
+ }
+
+ iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
+ iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
+}
+
+static void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_tx_free(trans);
+ iwl_trans_pcie_rx_free(trans);
+ free_irq(bus(trans)->irq, trans);
+ iwl_free_isr_ict(trans);
+ trans->shrd->trans = NULL;
+ kfree(trans);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend
+ * function first but since iwlagn_mac_stop() has no knowledge of
+ * who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ *
+ * But of course ... if we have configured WoWLAN then we did other
+ * things already :-)
+ */
+ if (!trans->shrd->wowlan) {
+ iwl_apm_stop(priv(trans));
+ } else {
+ iwl_disable_interrupts(trans);
+ iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ }
+
+ return 0;
+}
+
+static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+ bool hw_rfkill = false;
+
+ iwl_enable_interrupts(trans);
+
+ if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+
+ iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx)
+{
+ u8 ac, txq_id;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ for (ac = 0; ac < AC_NUM; ac++) {
+ txq_id = trans_pcie->ac_to_queue[ctx][ac];
+ IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+ ac,
+ (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
+ ? "stopped" : "awake");
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ }
+}
+
+const struct iwl_trans_ops trans_ops_pcie;
+
+static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
+{
+ struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
+ sizeof(struct iwl_trans_pcie),
+ GFP_KERNEL);
+ if (iwl_trans) {
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+ iwl_trans->ops = &trans_ops_pcie;
+ iwl_trans->shrd = shrd;
+ trans_pcie->trans = iwl_trans;
+ spin_lock_init(&iwl_trans->hcmd_lock);
+ }
+
+ return iwl_trans;
+}
+
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+}
+
+#define IWL_FLUSH_WAIT_MS 2000
+
+static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ int cnt;
+ unsigned long now = jiffies;
+ int ret = 0;
+
+ /* waiting for all the tx frames complete might take a while */
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ if (cnt == trans->shrd->cmd_queue)
+ continue;
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ while (q->read_ptr != q->write_ptr && !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ msleep(1);
+
+ if (q->read_ptr != q->write_ptr) {
+ IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
+ struct iwl_queue *q = &txq->q;
+ unsigned long timeout;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout = txq->time_stamp +
+ msecs_to_jiffies(hw_params(trans).wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
+ hw_params(trans).wd_timeout);
+ IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(trans, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+ }
+ return 0;
+}
+
+static const char *get_csr_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void iwl_dump_csr(struct iwl_trans *trans)
+{
+ int i;
+ static const u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(trans, "CSR values:\n");
+ IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(trans, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(bus(trans), csr_tbl[i]));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops)) \
+ return -ENOMEM; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+
+static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
+
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n",
+ cnt, q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, trans_pcie->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&trans_pcie->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ u32 event_log_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &event_log_flag) != 1)
+ return -EFAULT;
+ if (event_log_flag == 1)
+ iwl_dump_nic_event_log(trans, true, NULL, false);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ int pos = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(trans, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ isr_stats->hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ isr_stats->sw);
+ if (isr_stats->sw || isr_stats->hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ isr_stats->err_code);
+ }
+#ifdef CONFIG_IWLWIFI_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ isr_stats->sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ isr_stats->alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ isr_stats->ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ isr_stats->wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n", isr_stats->rx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ isr_stats->tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ isr_stats->unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ memset(isr_stats, 0, sizeof(*isr_stats));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ iwl_dump_csr(trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ ret = pos = iwl_dump_fh(trans, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+ return 0;
+}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{ return 0; }
+
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+const struct iwl_trans_ops trans_ops_pcie = {
+ .alloc = iwl_trans_pcie_alloc,
+ .request_irq = iwl_trans_pcie_request_irq,
+ .start_device = iwl_trans_pcie_start_device,
+ .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
+ .stop_device = iwl_trans_pcie_stop_device,
+
+ .tx_start = iwl_trans_pcie_tx_start,
+ .wake_any_queue = iwl_trans_pcie_wake_any_queue,
+
+ .send_cmd = iwl_trans_pcie_send_cmd,
+
+ .tx = iwl_trans_pcie_tx,
+ .reclaim = iwl_trans_pcie_reclaim,
+
+ .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
+ .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
+ .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
+
+ .kick_nic = iwl_trans_pcie_kick_nic,
+
+ .free = iwl_trans_pcie_free,
+ .stop_queue = iwl_trans_pcie_stop_queue,
+
+ .dbgfs_register = iwl_trans_pcie_dbgfs_register,
+
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+ .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
+
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwl_trans_pcie_suspend,
+ .resume = iwl_trans_pcie_resume,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
deleted file mode 100644
index 47486029040..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
+++ /dev/null
@@ -1,979 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/gfp.h>
-
-#include "iwl-dev.h"
-#include "iwl-agn.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-trans-int-pcie.h"
-
-/******************************************************************************
- *
- * RX path functions
- *
- ******************************************************************************/
-
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_rx_queue_space - Return number of free slots available in queue.
- */
-static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-
-/**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
- } else {
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
- }
- }
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-/**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s."
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = dma_map_page(priv->bus->dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwlagn_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwlagn_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwlagn_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwlagn_rx_replenish_now(struct iwl_priv *priv)
-{
- iwlagn_rx_allocate(priv, GFP_ATOMIC);
-
- iwlagn_rx_queue_restock(priv);
-}
-
-void iwl_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwlagn_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- if (WARN_ON(rxb == NULL)) {
- i = (i + 1) & RX_QUEUE_MASK;
- continue;
- }
-
- rxq->queue[i] = NULL;
-
- dma_unmap_page(priv->bus->dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- iwl_rx_dispatch(priv, rxb);
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking
- * trans_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwlagn_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwlagn_rx_replenish_now(priv);
- else
- iwlagn_rx_queue_restock(priv);
-}
-
-/* tasklet for iwlagn interrupt */
-void iwl_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta = 0;
- u32 handled = 0;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- */
- /* There is a hardware bug in the interrupt mask function that some
- * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
- * they are disabled in the CSR_INT_MASK register. Furthermore the
- * ICT interrupt handling mechanism has another bug that might cause
- * these unmasked interrupts fail to be detected. We workaround the
- * hardware bugs here by ACKing all the possible interrupts so that
- * interrupt coalescing can still be achieved.
- */
- iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
-
- inta = priv->inta;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
- inta, inta_mask);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* saved interrupt in inta variable now we can reset priv->inta */
- priv->inta = 0;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_txq_update_write_ptr(priv, &priv->txq[i]);
-
- priv->isr_stats.wakeup++;
-
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
- CSR_INT_BIT_RX_PERIODIC)) {
- IWL_DEBUG_ISR(priv, "Rx interrupt\n");
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- iwl_write32(priv, CSR_FH_INT_STATUS,
- CSR_FH_INT_RX_MASK);
- }
- if (inta & CSR_INT_BIT_RX_PERIODIC) {
- handled |= CSR_INT_BIT_RX_PERIODIC;
- iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
- }
- /* Sending RX interrupt require many steps to be done in the
- * the device:
- * 1- write interrupt to current index in ICT table.
- * 2- dma RX frame.
- * 3- update RX shared data to indicate last write index.
- * 4- send interrupt.
- * This could lead to RX race, driver could receive RX interrupt
- * but the shared data changes does not reflect this;
- * periodic interrupt will detect any dangling Rx activity.
- */
-
- /* Disable periodic interrupt; we use it as just a one-shot. */
- iwl_write8(priv, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_DIS);
- iwl_rx_handle(priv);
-
- /*
- * Enable periodic interrupt in 8 msec only if we received
- * real RX interrupt (instead of just periodic int), to catch
- * any dangling Rx interrupt. If it was just the periodic
- * interrupt, there was no dangling Rx activity, and no need
- * to extend the periodic interrupt; one-shot is enough.
- */
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
- iwl_write8(priv, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_ENA);
-
- priv->isr_stats.rx++;
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up_interruptible(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(priv);
-}
-
-/******************************************************************************
- *
- * ICT functions
- *
- ******************************************************************************/
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
-
-/* Free dram table */
-void iwl_free_isr_ict(struct iwl_priv *priv)
-{
- if (priv->ict_tbl_vir) {
- dma_free_coherent(priv->bus->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- priv->ict_tbl_vir,
- priv->ict_tbl_dma);
- priv->ict_tbl_vir = NULL;
- memset(&priv->ict_tbl_dma, 0,
- sizeof(priv->ict_tbl_dma));
- memset(&priv->aligned_ict_tbl_dma, 0,
- sizeof(priv->aligned_ict_tbl_dma));
- }
-}
-
-
-/* allocate dram shared table it is a PAGE_SIZE aligned
- * also reset all data related to ICT table interrupt.
- */
-int iwl_alloc_isr_ict(struct iwl_priv *priv)
-{
-
- /* allocate shrared data table */
- priv->ict_tbl_vir =
- dma_alloc_coherent(priv->bus->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma, GFP_KERNEL);
- if (!priv->ict_tbl_vir)
- return -ENOMEM;
-
- /* align table to PAGE_SIZE boundary */
- priv->aligned_ict_tbl_dma =
- ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)priv->ict_tbl_dma,
- (unsigned long long)priv->aligned_ict_tbl_dma,
- (int)(priv->aligned_ict_tbl_dma -
- priv->ict_tbl_dma));
-
- priv->ict_tbl = priv->ict_tbl_vir +
- (priv->aligned_ict_tbl_dma -
- priv->ict_tbl_dma);
-
- IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
- priv->ict_tbl, priv->ict_tbl_vir,
- (int)(priv->aligned_ict_tbl_dma -
- priv->ict_tbl_dma));
-
- /* reset table and index to all 0 */
- memset(priv->ict_tbl_vir, 0,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
- priv->ict_index = 0;
-
- /* add periodic RX interrupt */
- priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
- return 0;
-}
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-int iwl_reset_ict(struct iwl_priv *priv)
-{
- u32 val;
- unsigned long flags;
-
- if (!priv->ict_tbl_vir)
- return 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
-
- memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
-
- val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
-
- IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val,
- (unsigned long long)priv->aligned_ict_tbl_dma);
-
- iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
- priv->use_ict = true;
- priv->ict_index = 0;
- iwl_write32(priv, CSR_INT, priv->inta_mask);
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->use_ict = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static irqreturn_t iwl_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- priv->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
- !priv->inta)
- iwl_enable_interrupts(priv);
-
- unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_isr_ict(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 val = 0;
- unsigned long flags;
-
- if (!priv)
- return IRQ_NONE;
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (!priv->use_ict)
- return iwl_isr(irq, data);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!priv->ict_tbl[priv->ict_index]) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /* read all entries that not 0 start with ict_index */
- while (priv->ict_tbl[priv->ict_index]) {
-
- val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
- IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
- priv->ict_index,
- le32_to_cpu(
- priv->ict_tbl[priv->ict_index]));
- priv->ict_tbl[priv->ict_index] = 0;
- priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
- ICT_COUNT);
-
- }
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= priv->inta_mask;
- priv->inta |= inta;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
- !priv->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(priv);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
deleted file mode 100644
index 222d410c586..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ /dev/null
@@ -1,1038 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-agn.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-trans-int-pcie.h"
-
-/**
- * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
- u8 sec_ctl = 0;
- u8 sta_id = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
-
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl_txq_update_write_ptr - Send new write index to hardware
- */
-void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- u32 reg = 0;
- int txq_id = txq->q.id;
-
- if (txq->need_update == 0)
- return;
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- iwl_write32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- } else {
- /* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
- /*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- } else
- iwl_write32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- }
- txq->need_update = 0;
-}
-
-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
-{
- int i;
- int num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- dma_unmap_single(priv->bus->dev,
- dma_unmap_addr(meta, mapping),
- dma_unmap_len(meta, len),
- DMA_BIDIRECTIONAL);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++)
- dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), dma_dir);
-}
-
-/**
- * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- * @index - the index of the TFD to be freed
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int index)
-{
- struct iwl_tfd *tfd_tmp = txq->tfds;
-
- iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
- DMA_TO_DEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[index].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[index].skb = NULL;
- }
- }
-}
-
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset)
-{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
- u32 num_tbs;
-
- q = &txq->q;
- tfd_tmp = txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- num_tbs = iwl_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
- return -EINVAL;
-
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- ***************************************************/
-
-int iwl_queue_space(const struct iwl_queue *q)
-{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-
-/**
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id)
-{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
-
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
-
- /* slots_num must be power-of-two size, otherwise
- * get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = q->read_ptr = 0;
-
- return 0;
-}
-
-/*TODO: this functions should NOT be exported from trans module - export it
- * until the reclaim flow will be brought to the transport module too.
- * Add a declaration to make sparse happy */
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != priv->cmd_queue)
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
-static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id),
- (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
- SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
-}
-
-void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit)
-{
- int tx_fifo, txq_id, ssn_idx;
- u16 ra_tid;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return;
- if (WARN_ON(tid >= MAX_TID_COUNT))
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn_idx = SEQ_TO_SN(tid_data->seq_number);
- txq_id = tid_data->agg.txq_id;
- tx_fifo = tid_data->agg.tx_fifo;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwlagn_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
-
- /* enable aggregations for the queue */
- iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
- sizeof(u32),
- ((frame_limit <<
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((frame_limit <<
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_ERR(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
- IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwlagn_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/**
- * iwl_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- unsigned long flags;
- u32 idx;
- u16 copy_size, cmd_size;
- bool is_ct_kill = false;
- bool had_nocopy = false;
- int i;
- u8 *cmd_dest;
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
- int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
- int trace_idx;
-#endif
-
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_WARN(priv, "fw recovery, no hcmd send\n");
- return -EIO;
- }
-
- if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
- !(cmd->flags & CMD_ON_DEMAND)) {
- IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
- return -EIO;
- }
-
- copy_size = sizeof(out_cmd->hdr);
- cmd_size = sizeof(out_cmd->hdr);
-
- /* need one for the header if the first is NOCOPY */
- BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
-
- for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- if (!cmd->len[i])
- continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
- had_nocopy = true;
- } else {
- /* NOCOPY must not be followed by normal! */
- if (WARN_ON(had_nocopy))
- return -EINVAL;
- copy_size += cmd->len[i];
- }
- cmd_size += cmd->len[i];
- }
-
- /*
- * If any of the command structures end up being larger than
- * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
- * allocated into separate TFDs, then we will need to
- * increase the size of the buffers.
- */
- if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
- return -EINVAL;
-
- if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
- IWL_WARN(priv, "Not sending command - %s KILL\n",
- iwl_is_rfkill(priv) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
- IWL_ERR(priv, "No space in command queue\n");
- is_ct_kill = iwl_check_for_ct_kill(priv);
- if (!is_ct_kill) {
- IWL_ERR(priv, "Restarting adapter due to queue full\n");
- iwlagn_fw_error(priv, false);
- }
- return -ENOSPC;
- }
-
- idx = get_cmd_index(q, q->write_ptr);
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
-
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
- if (cmd->flags & CMD_ASYNC)
- out_meta->callback = cmd->callback;
-
- /* set up the header */
-
- out_cmd->hdr.cmd = cmd->id;
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
-
- /* and copy the data that needs to be copied */
-
- cmd_dest = &out_cmd->cmd.payload[0];
- for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- if (!cmd->len[i])
- continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
- break;
- memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
- cmd_dest += cmd->len[i];
- }
-
- IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, priv->cmd_queue);
-
- phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
- idx = -ENOMEM;
- goto out;
- }
-
- dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, copy_size);
-
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_bufs[0] = &out_cmd->hdr;
- trace_lens[0] = copy_size;
- trace_idx = 1;
-#endif
-
- for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- if (!cmd->len[i])
- continue;
- if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
- continue;
- phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
- cmd->len[i], DMA_BIDIRECTIONAL);
- if (dma_mapping_error(priv->bus->dev, phys_addr)) {
- iwlagn_unmap_tfd(priv, out_meta,
- &txq->tfds[q->write_ptr],
- DMA_BIDIRECTIONAL);
- idx = -ENOMEM;
- goto out;
- }
-
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
- cmd->len[i], 0);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_bufs[trace_idx] = cmd->data[i];
- trace_lens[trace_idx] = cmd->len[i];
- trace_idx++;
-#endif
- }
-
- out_meta->flags = cmd->flags;
-
- txq->need_update = 1;
-
- /* check that tracing gets all possible blocks */
- BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_iwlwifi_dev_hcmd(priv, cmd->flags,
- trace_bufs[0], trace_lens[0],
- trace_bufs[1], trace_lens[1],
- trace_bufs[2], trace_lens[2]);
-#endif
-
- /* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
-
- out:
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return idx;
-}
-
-/**
- * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
- "index %d is out of range [0-%d] %d %d.\n", __func__,
- txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
- iwlagn_fw_error(priv, false);
- }
-
- }
-}
-
-/**
- * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed. The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index;
- struct iwl_device_cmd *cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- unsigned long flags;
-
- /* If a Tx command is being handled and it isn't in the actual
- * command queue then there a command routing bug has been introduced
- * in the queue management code. */
- if (WARN(txq_id != priv->cmd_queue,
- "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, priv->cmd_queue, sequence,
- priv->txq[priv->cmd_queue].q.read_ptr,
- priv->txq[priv->cmd_queue].q.write_ptr)) {
- iwl_print_hex_error(priv, pkt, 32);
- return;
- }
-
- cmd_index = get_cmd_index(&txq->q, index);
- cmd = txq->cmd[cmd_index];
- meta = &txq->meta[cmd_index];
-
- txq->time_stamp = jiffies;
-
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
-
- /* Input error checking is done when commands are added to queue. */
- if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
- rxb->page = NULL;
- } else if (meta->callback)
- meta->callback(priv, cmd, pkt);
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- iwl_hcmd_queue_reclaim(priv, txq_id, index);
-
- if (!(meta->flags & CMD_ASYNC)) {
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
- get_cmd_string(cmd->hdr.cmd));
- wake_up_interruptible(&priv->wait_command_queue);
- }
-
- meta->flags = 0;
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-
-const char *get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_REMOVE_ALL_STA);
- IWL_CMD(REPLY_TXFIFO_FLUSH);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(COEX_PRIORITY_TABLE_CMD);
- IWL_CMD(COEX_MEDIUM_NOTIFICATION);
- IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(REPLY_QUIET_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
- IWL_CMD(QUIET_NOTIFICATION);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(MEASURE_ABORT_NOTIFICATION);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(REPLY_CARD_STATE_CMD);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- IWL_CMD(CALIBRATION_CFG_CMD);
- IWL_CMD(CALIBRATION_RES_NOTIFICATION);
- IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
- IWL_CMD(REPLY_TX_POWER_DBM_CMD);
- IWL_CMD(TEMPERATURE_NOTIFICATION);
- IWL_CMD(TX_ANT_CONFIGURATION_CMD);
- IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
- IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
- IWL_CMD(REPLY_BT_COEX_PROT_ENV);
- IWL_CMD(REPLY_WIPAN_PARAMS);
- IWL_CMD(REPLY_WIPAN_RXON);
- IWL_CMD(REPLY_WIPAN_RXON_TIMING);
- IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
- IWL_CMD(REPLY_WIPAN_QOS_PARAM);
- IWL_CMD(REPLY_WIPAN_WEPKEY);
- IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
- IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
- IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
- IWL_CMD(REPLY_WOWLAN_PATTERNS);
- IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
- IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
- IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
- IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
- IWL_CMD(REPLY_WOWLAN_GET_STATUS);
- default:
- return "UNKNOWN";
-
- }
-}
-
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static void iwl_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int ret;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_generic_cmd_callback;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EBUSY;
-
- ret = iwl_enqueue_hcmd(priv, cmd);
- if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- get_cmd_string(cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int cmd_idx;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- /* A synchronous command can not have a callback set. */
- if (WARN_ON(cmd->callback))
- return -EINVAL;
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
- get_cmd_string(cmd->id));
-
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
- get_cmd_string(cmd->id));
-
- cmd_idx = iwl_enqueue_hcmd(priv, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- get_cmd_string(cmd->id), ret);
- return ret;
- }
-
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: time out after %dms.\n",
- get_cmd_string(cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
- "%s\n", get_cmd_string(cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
- }
-
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
- ~CMD_WANT_SKB;
- }
-fail:
- if (cmd->reply_page) {
- iwl_free_pages(priv, cmd->reply_page);
- cmd->reply_page = 0;
- }
-
- return ret;
-}
-
-int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- if (cmd->flags & CMD_ASYNC)
- return iwl_send_cmd_async(priv, cmd);
-
- return iwl_send_cmd_sync(priv, cmd);
-}
-
-int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
- const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = { len, },
- .data = { data, },
- .flags = flags,
- };
-
- return iwl_send_cmd(priv, &cmd);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 41f0de91400..1b20c4fb791 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -60,1113 +60,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#include "iwl-dev.h"
-#include "iwl-trans.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-trans-int-pcie.h"
-/*TODO remove uneeded includes when the transport layer tx_free will be here */
-#include "iwl-agn.h"
-#include "iwl-core.h"
-
-static int iwl_trans_rx_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = priv->bus->dev;
-
- memset(&priv->rxq, 0, sizeof(priv->rxq));
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- if (WARN_ON(rxq->bd || rxq->rb_stts))
- return -EINVAL;
-
- /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
- memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
-
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb_stts;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
-
- return 0;
-
-err_rb_stts:
- dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-err_bd:
- return -ENOMEM;
-}
-
-static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- int i;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-}
-
-static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
- struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- rb_timeout = RX_RB_TIMEOUT;
-
- if (iwlagn_mod_params.amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-}
-
-static int iwl_rx_init(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- int i, err;
- unsigned long flags;
-
- if (!rxq->bd) {
- err = iwl_trans_rx_alloc(priv);
- if (err)
- return err;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- iwl_trans_rxq_free_rx_bufs(priv);
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- iwlagn_rx_replenish(priv);
-
- iwl_trans_rx_hw_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static void iwl_trans_rx_free(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- unsigned long flags;
-
- /*if rxq->bd is NULL, it means that nothing has been allocated,
- * exit now */
- if (!rxq->bd) {
- IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- iwl_trans_rxq_free_rx_bufs(priv);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(priv->bus->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
- rxq->rb_stts = NULL;
-}
-
-static int iwl_trans_rx_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-}
-
-static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- if (WARN_ON(ptr->addr))
- return -EINVAL;
-
- ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
- &ptr->dma, GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
- int i;
-
- if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
- return -EINVAL;
-
- txq->q.n_window = slots_num;
-
- txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
- GFP_KERNEL);
-
- if (!txq->meta || !txq->cmd)
- goto error;
-
- for (i = 0; i < slots_num; i++) {
- txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->cmd[i])
- goto error;
- }
-
- /* Alloc driver data array and TFD circular buffer */
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (txq_id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = txq_id;
-
- return 0;
-error:
- kfree(txq->txb);
- txq->txb = NULL;
- /* since txq->cmd has been zeroed,
- * all non allocated cmd[i] will be NULL */
- if (txq->cmd)
- for (i = 0; i < slots_num; i++)
- kfree(txq->cmd[i]);
- kfree(txq->meta);
- kfree(txq->cmd);
- txq->meta = NULL;
- txq->cmd = NULL;
-
- return -ENOMEM;
-
-}
-
-static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int ret;
-
- txq->need_update = 0;
- memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
-
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_set_swq_id(txq, txq_id, txq_id);
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
- if (ret)
- return ret;
-
- /*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- * Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/**
- * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (!q->n_bd)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- /* The read_ptr needs to bound by q->n_window */
- iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-
-/**
- * iwl_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = priv->bus->dev;
- int i;
- if (WARN_ON(!txq))
- return;
-
- iwl_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < txq->q.n_window; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
- }
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * iwl_trans_tx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-static void iwl_trans_tx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_free(priv, txq_id);
- }
-
- kfree(priv->txq);
- priv->txq = NULL;
-
- iwlagn_free_dma_ptr(priv, &priv->kw);
-
- iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
-}
-
-/**
- * iwl_trans_tx_alloc - allocate TX context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-static int iwl_trans_tx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
-
- /*It is not allowed to alloc twice, so warn when this happens.
- * We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(priv->txq)) {
- ret = -EINVAL;
- goto error;
- }
-
- ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error;
- }
-
- /* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error;
- }
-
- priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues, GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- ret = ENOMEM;
- goto error;
- }
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-
-error:
- trans_tx_free(&priv->trans);
-
- return ret;
-}
-static int iwl_tx_init(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
- bool alloc = false;
-
- if (!priv->txq) {
- ret = iwl_trans_tx_alloc(priv);
- if (ret)
- goto error;
- alloc = true;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl_write_prph(priv, SCD_TXFACT, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-error:
- /*Upon error, free only if we allocated something */
- if (alloc)
- trans_tx_free(&priv->trans);
- return ret;
-}
-
-static void iwl_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-static int iwl_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_apm_init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_set_pwr_vmain(priv);
-
- priv->cfg->lib->nic_config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- iwl_rx_init(priv);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (iwl_tx_init(priv))
- return -ENOMEM;
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* enable shadow regs in HW */
- iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
- 0x800FFFFF);
- }
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-#define HW_READY_TIMEOUT (50)
-
-/* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_set_hw_ready(struct iwl_priv *priv)
-{
- int ret;
-
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
-
- IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
- return ret;
-}
-
-/* Note: returns standard 0/-ERROR code */
-static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret;
-
- IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
-
- ret = iwl_set_hw_ready(priv);
- if (ret >= 0)
- return 0;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- if (ret < 0)
- return ret;
-
- /* HW should be ready by now, check again. */
- ret = iwl_set_hw_ready(priv);
- if (ret >= 0)
- return 0;
- return ret;
-}
-
-static int iwl_trans_start_device(struct iwl_priv *priv)
-{
- int ret;
-
- priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
-
- if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
- iwl_trans_prepare_card_hw(priv)) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
- iwl_enable_interrupts(priv);
- return -ERFKILL;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- ret = iwl_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- return 0;
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, SCD_TXFACT, mask);
-}
-
-#define IWL_AC_UNSET -1
-
-struct queue_to_fifo_ac {
- s8 fifo, ac;
-};
-
-static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
- { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
- { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-};
-
-static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
- { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
- { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
- { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_BE_IPAN, 2, },
- { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
-};
-static void iwl_trans_tx_start(struct iwl_priv *priv)
-{
- const struct queue_to_fifo_ac *queue_to_fifo;
- struct iwl_rxon_context *ctx;
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- /* reset tx status memory */
- for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(priv));
- iwl_write_prph(priv, SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
- iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(priv, SCD_INTERRUPT_MASK,
- IWL_MASK(0, priv->hw_params.max_txq_num));
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
-
- /* map queues to FIFOs */
- if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
- queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
- else
- queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-
- iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
- for_each_context(priv, ctx)
- ctx->last_tx_rejected = false;
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
- BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
-
- for (i = 0; i < 10; i++) {
- int fifo = queue_to_fifo[i].fifo;
- int ac = queue_to_fifo[i].ac;
-
- iwl_txq_ctx_activate(priv, i);
-
- if (fifo == IWL_TX_FIFO_UNUSED)
- continue;
-
- if (ac != IWL_AC_UNSET)
- iwl_set_swq_id(&priv->txq[i], ac, i);
- iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Enable L1-Active */
- iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-}
-
-/**
- * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
- */
-static int iwl_trans_tx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwl_trans_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq) {
- IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
- return 0;
- }
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_unmap(priv, txq_id);
-
- return 0;
-}
-
-static void iwl_trans_stop_device(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- trans_sync_irq(&priv->trans);
-
- /* device going down, Stop using ICT table */
- iwl_disable_ict(priv);
-
- /*
- * If a HW restart happens during firmware loading,
- * then the firmware loading might call this function
- * and later it might be called again due to the
- * restart. So don't process again if the device is
- * already dead.
- */
- if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
- iwl_trans_tx_stop(priv);
- iwl_trans_rx_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
- }
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_apm_stop(priv);
-}
-
-static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
- int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *dev_cmd;
-
- if (unlikely(iwl_queue_space(q) < q->high_mark))
- return NULL;
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- dev_cmd = txq->cmd[q->write_ptr];
- memset(dev_cmd, 0, sizeof(*dev_cmd));
- dev_cmd->hdr.cmd = REPLY_TX;
- dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
- return &dev_cmd->cmd.tx;
-}
-
-static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
- struct iwl_cmd_meta *out_meta;
-
- dma_addr_t phys_addr = 0;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u8 wait_write_ptr = 0;
- u8 hdr_len = ieee80211_hdrlen(fc);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->meta[q->write_ptr];
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = dma_map_single(priv->bus->dev,
- &dev_cmd->hdr, firstlen,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
- return -1;
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
-
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
- secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
- dma_unmap_single(priv->bus->dev,
- dma_unmap_addr(out_meta, mapping),
- dma_unmap_len(out_meta, len),
- DMA_BIDIRECTIONAL);
- return -1;
- }
- }
-
- /* Attach buffers to TFD */
- iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
- if (secondlen > 0)
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
- secondlen, 0);
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (ampdu)
- iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &dev_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
- if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
- if (wait_write_ptr) {
- txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
- } else {
- iwl_stop_queue(priv, txq);
- }
- }
- return 0;
-}
-
-static void iwl_trans_kick_nic(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl_trans_sync_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->bus->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl_trans_free(struct iwl_priv *priv)
-{
- free_irq(priv->bus->irq, priv);
- iwl_free_isr_ict(priv);
-}
-
-static const struct iwl_trans_ops trans_ops = {
- .start_device = iwl_trans_start_device,
- .prepare_card_hw = iwl_trans_prepare_card_hw,
- .stop_device = iwl_trans_stop_device,
-
- .tx_start = iwl_trans_tx_start,
-
- .rx_free = iwl_trans_rx_free,
- .tx_free = iwl_trans_tx_free,
-
- .send_cmd = iwl_send_cmd,
- .send_cmd_pdu = iwl_send_cmd_pdu,
-
- .get_tx_cmd = iwl_trans_get_tx_cmd,
- .tx = iwl_trans_tx,
-
- .txq_agg_disable = iwl_trans_txq_agg_disable,
- .txq_agg_setup = iwl_trans_txq_agg_setup,
-
- .kick_nic = iwl_trans_kick_nic,
-
- .sync_irq = iwl_trans_sync_irq,
- .free = iwl_trans_free,
-};
+#include "iwl-trans.h"
-int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
+int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
+ u32 flags, u16 len, const void *data)
{
- int err;
-
- priv->trans.ops = &trans_ops;
- priv->trans.priv = priv;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)priv);
-
- iwl_alloc_isr_ict(priv);
-
- err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
- iwl_free_isr_ict(priv);
- return err;
- }
-
- INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
- return 0;
+ return iwl_trans_send_cmd(trans, &cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 7993aa7ae66..c5923125c3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -63,163 +63,289 @@
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
+#include <linux/debugfs.h>
+#include <linux/skbuff.h>
+
+#include "iwl-shared.h"
+#include "iwl-commands.h"
+
/*This file includes the declaration that are exported from the transport
* layer */
struct iwl_priv;
-struct iwl_rxon_context;
-struct iwl_host_cmd;
+struct iwl_shared;
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_ASYNC = BIT(0),
+ CMD_WANT_SKB = BIT(1),
+ CMD_ON_DEMAND = BIT(2),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
+struct iwl_device_cmd {
+ struct iwl_cmd_header hdr; /* uCode API */
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+#define IWL_MAX_CMD_TFDS 2
+
+enum iwl_hcmd_dataflag {
+ IWL_HCMD_DFL_NOCOPY = BIT(0),
+};
+
+/**
+ * struct iwl_host_cmd - Host command to the uCode
+ * @data: array of chunks that composes the data of the host command
+ * @reply_page: pointer to the page that holds the response to the host command
+ * @handler_status: return value of the handler of the command
+ * (put in setup_rx_handlers) - valid for SYNC mode only
+ * @callback:
+ * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
+ * @len: array of the lenths of the chunks in data
+ * @dataflags:
+ * @id: id of the host command
+ */
+struct iwl_host_cmd {
+ const void *data[IWL_MAX_CMD_TFDS];
+ unsigned long reply_page;
+ int handler_status;
+
+ u32 flags;
+ u16 len[IWL_MAX_CMD_TFDS];
+ u8 dataflags[IWL_MAX_CMD_TFDS];
+ u8 id;
+};
/**
* struct iwl_trans_ops - transport specific operations
+ * @alloc: allocates the meta data (not the queues themselves)
+ * @request_irq: requests IRQ - will be called before the FW load in probe flow
* @start_device: allocates and inits all the resources for the transport
* layer.
* @prepare_card_hw: claim the ownership on the HW. Will be called during
* probe.
* @tx_start: starts and configures all the Tx fifo - usually done once the fw
* is alive.
+ * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
* @stop_device:stops the whole device (embedded CPU put to reset)
- * @rx_free: frees the rx memory
- * @tx_free: frees the tx memory
* @send_cmd:send a host command
- * @send_cmd_pdu:send a host command: flags can be CMD_*
- * @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use
* @tx: send an skb
- * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
+ * @reclaim: free packet until ssn. Returns a list of freed packets.
+ * @tx_agg_alloc: allocate resources for a TX BA session
+ * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
- * @txq_agg_disable: de-configure a Tx queue to send AMPDUs
+ * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
* @kick_nic: remove the RESET from the embedded CPU and let it run
- * @sync_irq: the upper layer will typically disable interrupt and call this
- * handler. After this handler returns, it is guaranteed that all
- * the ISR / tasklet etc... have finished running and the transport
- * layer shall not pass any Rx.
* @free: release all the ressource for the transport layer itself such as
* irq, tasklet etc...
+ * @stop_queue: stop a specific queue
+ * @check_stuck_queue: check if a specific queue is stuck
+ * @wait_tx_queue_empty: wait until all tx queues are empty
+ * @dbgfs_register: add the dbgfs files under this directory. Files will be
+ * automatically deleted.
+ * @suspend: stop the device unless WoWLAN is configured
+ * @resume: resume activity of the device
*/
struct iwl_trans_ops {
- int (*start_device)(struct iwl_priv *priv);
- int (*prepare_card_hw)(struct iwl_priv *priv);
- void (*stop_device)(struct iwl_priv *priv);
- void (*tx_start)(struct iwl_priv *priv);
- void (*tx_free)(struct iwl_priv *priv);
- void (*rx_free)(struct iwl_priv *priv);
+ struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
+ int (*request_irq)(struct iwl_trans *iwl_trans);
+ int (*start_device)(struct iwl_trans *trans);
+ int (*prepare_card_hw)(struct iwl_trans *trans);
+ void (*stop_device)(struct iwl_trans *trans);
+ void (*tx_start)(struct iwl_trans *trans);
+
+ void (*wake_any_queue)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx);
- int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+ int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
- int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
- const void *data);
- struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id);
- int (*tx)(struct iwl_priv *priv, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx);
+ int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
+ u8 sta_id);
+ void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+ int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs);
- int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
- void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit);
+ int (*tx_agg_disable)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid);
+ int (*tx_agg_alloc)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id, int tid,
+ u16 *ssn);
+ void (*tx_agg_setup)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id, int tid,
+ int frame_limit);
- void (*kick_nic)(struct iwl_priv *priv);
+ void (*kick_nic)(struct iwl_trans *trans);
- void (*sync_irq)(struct iwl_priv *priv);
- void (*free)(struct iwl_priv *priv);
+ void (*free)(struct iwl_trans *trans);
+
+ void (*stop_queue)(struct iwl_trans *trans, int q);
+
+ int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
+ int (*check_stuck_queue)(struct iwl_trans *trans, int q);
+ int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+#ifdef CONFIG_PM_SLEEP
+ int (*suspend)(struct iwl_trans *trans);
+ int (*resume)(struct iwl_trans *trans);
+#endif
};
+/**
+ * struct iwl_trans - transport common data
+ * @ops - pointer to iwl_trans_ops
+ * @shrd - pointer to iwl_shared which holds shared data from the upper layer
+ * @hcmd_lock: protects HCMD
+ */
struct iwl_trans {
const struct iwl_trans_ops *ops;
- struct iwl_priv *priv;
+ struct iwl_shared *shrd;
+ spinlock_t hcmd_lock;
+
+ /* pointer to trans specific struct */
+ /*Ensure that this pointer will always be aligned to sizeof pointer */
+ char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
};
-static inline int trans_start_device(struct iwl_trans *trans)
+static inline int iwl_trans_request_irq(struct iwl_trans *trans)
{
- return trans->ops->start_device(trans->priv);
+ return trans->ops->request_irq(trans);
}
-static inline int trans_prepare_card_hw(struct iwl_trans *trans)
+static inline int iwl_trans_start_device(struct iwl_trans *trans)
{
- return trans->ops->prepare_card_hw(trans->priv);
+ return trans->ops->start_device(trans);
}
-static inline void trans_stop_device(struct iwl_trans *trans)
+static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
{
- trans->ops->stop_device(trans->priv);
+ return trans->ops->prepare_card_hw(trans);
}
-static inline void trans_tx_start(struct iwl_trans *trans)
+static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
- trans->ops->tx_start(trans->priv);
+ trans->ops->stop_device(trans);
}
-static inline void trans_rx_free(struct iwl_trans *trans)
+static inline void iwl_trans_tx_start(struct iwl_trans *trans)
{
- trans->ops->rx_free(trans->priv);
+ trans->ops->tx_start(trans);
}
-static inline void trans_tx_free(struct iwl_trans *trans)
+static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx)
{
- trans->ops->tx_free(trans->priv);
+ trans->ops->wake_any_queue(trans, ctx);
}
-static inline int trans_send_cmd(struct iwl_trans *trans,
+
+static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
- return trans->ops->send_cmd(trans->priv, cmd);
+ return trans->ops->send_cmd(trans, cmd);
+}
+
+int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
+ u32 flags, u16 len, const void *data);
+
+static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
+ u8 sta_id)
+{
+ return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
}
-static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
- u16 len, const void *data)
+static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+ int tid, int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs)
{
- return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data);
+ trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
}
-static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans,
- int txq_id)
+static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid)
{
- return trans->ops->get_tx_cmd(trans->priv, txq_id);
+ return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
}
-static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx)
+static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid, u16 *ssn)
{
- return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx);
+ return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
}
-static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
+
+static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid,
+ int frame_limit)
{
- return trans->ops->txq_agg_disable(trans->priv, txq_id,
- ssn_idx, tx_fifo);
+ trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
}
-static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id,
- int tid, int frame_limit)
+static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
{
- trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit);
+ trans->ops->kick_nic(trans);
}
-static inline void trans_kick_nic(struct iwl_trans *trans)
+static inline void iwl_trans_free(struct iwl_trans *trans)
{
- trans->ops->kick_nic(trans->priv);
+ trans->ops->free(trans);
}
-static inline void trans_sync_irq(struct iwl_trans *trans)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
{
- trans->ops->sync_irq(trans->priv);
+ trans->ops->stop_queue(trans, q);
}
-static inline void trans_free(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+ return trans->ops->wait_tx_queue_empty(trans);
+}
+
+static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
+{
+ return trans->ops->check_stuck_queue(trans, q);
+}
+static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
{
- trans->ops->free(trans->priv);
+ return trans->ops->dbgfs_register(trans, dir);
}
-int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
+{
+ return trans->ops->suspend(trans);
+}
-/*TODO: this functions should NOT be exported from trans module - export it
- * until the reclaim flow will be brought to the transport module too */
+static inline int iwl_trans_resume(struct iwl_trans *trans)
+{
+ return trans->ops->resume(trans);
+}
+#endif
-struct iwl_tx_queue;
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
+/*****************************************************
+* Transport layers implementations
+******************************************************/
+extern const struct iwl_trans_ops trans_ops_pcie;
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index ed57e440280..c42be81e979 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -187,13 +187,17 @@ static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
struct key_params*))
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
struct key_params params;
IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
memset(&params, 0, sizeof(params));
+ key = &iwm->keys[key_index];
params.cipher = key->cipher;
params.key_len = key->key_len;
params.seq_len = key->seq_len;
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index 1453eec82a9..91f2ca90c70 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -238,28 +238,3 @@ hostsleep
echo "1" > hostsleep : enable host sleep.
echo "0" > hostsleep : disable host sleep
-========================
-IWCONFIG COMMANDS
-========================
-power period
-
- This command is used to configure the station in deep sleep mode /
- auto deep sleep mode.
-
- The timer is implemented to monitor the activities (command, event,
- etc.). When an activity is detected station will exit from deep
- sleep mode automatically and restart the timer. At timer expiry
- (no activity for defined time period) the deep sleep mode is entered
- automatically.
-
- Note: this command is for SDIO interface only.
-
- Usage:
- To enable deep sleep mode do:
- iwconfig wlan0 power period 0
- To enable auto deep sleep mode with idle time period 5 seconds do:
- iwconfig wlan0 power period 5
- To disable deep sleep/auto deep sleep mode do:
- iwconfig wlan0 power period -1
-
-==============================================================================
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index b456a53b64b..ff6378276ff 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -19,6 +19,7 @@
#include "decl.h"
#include "cfg.h"
#include "cmd.h"
+#include "mesh.h"
#define CHAN2G(_channel, _freq, _flags) { \
@@ -442,13 +443,16 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
- channel->center_freq, channel_type);
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d, type %d",
+ netdev_name(netdev), channel->center_freq, channel_type);
if (channel_type != NL80211_CHAN_NO_HT)
goto out;
- ret = lbs_set_channel(priv, channel->hw_value);
+ if (netdev == priv->mesh_dev)
+ ret = lbs_mesh_set_channel(priv, channel->hw_value);
+ else
+ ret = lbs_set_channel(priv, channel->hw_value);
out:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -691,7 +695,7 @@ static void lbs_scan_worker(struct work_struct *work)
tlv = scan_cmd->tlvbuffer;
/* add SSID TLV */
- if (priv->scan_req->n_ssids)
+ if (priv->scan_req->n_ssids && priv->scan_req->ssids[0].ssid_len > 0)
tlv += lbs_add_ssid_tlv(tlv,
priv->scan_req->ssids[0].ssid,
priv->scan_req->ssids[0].ssid_len);
@@ -708,7 +712,7 @@ static void lbs_scan_worker(struct work_struct *work)
if (priv->scan_channel < priv->scan_req->n_channels) {
cancel_delayed_work(&priv->scan_work);
- if (!priv->stopping)
+ if (netif_running(priv->dev))
queue_delayed_work(priv->work_thread, &priv->scan_work,
msecs_to_jiffies(300));
}
@@ -732,7 +736,6 @@ static void lbs_scan_worker(struct work_struct *work)
cfg80211_scan_done(priv->scan_req, false);
priv->scan_req = NULL;
- priv->last_scan = jiffies;
}
/* Restart network */
@@ -1292,27 +1295,32 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
int ret = 0;
u8 preamble = RADIO_PREAMBLE_SHORT;
+ if (dev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
lbs_deb_enter(LBS_DEB_CFG80211);
if (!sme->bssid) {
- /* Run a scan if one isn't in-progress already and if the last
- * scan was done more than 2 seconds ago.
- */
- if (priv->scan_req == NULL &&
- time_after(jiffies, priv->last_scan + (2 * HZ))) {
- struct cfg80211_scan_request *creq;
+ struct cfg80211_scan_request *creq;
- creq = _new_connect_scan_req(wiphy, sme);
- if (!creq) {
- ret = -EINVAL;
- goto done;
- }
+ /*
+ * Scan for the requested network after waiting for existing
+ * scans to finish.
+ */
+ lbs_deb_assoc("assoc: waiting for existing scans\n");
+ wait_event_interruptible_timeout(priv->scan_q,
+ (priv->scan_req == NULL),
+ (15 * HZ));
- lbs_deb_assoc("assoc: scanning for compatible AP\n");
- _internal_start_scan(priv, true, creq);
+ creq = _new_connect_scan_req(wiphy, sme);
+ if (!creq) {
+ ret = -EINVAL;
+ goto done;
}
- /* Wait for any in-progress scan to complete */
+ lbs_deb_assoc("assoc: scanning for compatible AP\n");
+ _internal_start_scan(priv, true, creq);
+
lbs_deb_assoc("assoc: waiting for scan to complete\n");
wait_event_interruptible_timeout(priv->scan_q,
(priv->scan_req == NULL),
@@ -1402,28 +1410,23 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
+int lbs_disconnect(struct lbs_private *priv, u16 reason)
{
- struct lbs_private *priv = wiphy_priv(wiphy);
struct cmd_ds_802_11_deauthenticate cmd;
-
- lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code);
-
- /* store for lbs_cfg_ret_disconnect() */
- priv->disassoc_reason = reason_code;
+ int ret;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
/* Mildly ugly to use a locally store my own BSSID ... */
memcpy(cmd.macaddr, &priv->assoc_bss, ETH_ALEN);
- cmd.reasoncode = cpu_to_le16(reason_code);
+ cmd.reasoncode = cpu_to_le16(reason);
- if (lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd))
- return -EFAULT;
+ ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
+ if (ret)
+ return ret;
cfg80211_disconnected(priv->dev,
- priv->disassoc_reason,
+ reason,
NULL, 0,
GFP_KERNEL);
priv->connect_status = LBS_DISCONNECTED;
@@ -1431,6 +1434,21 @@ static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+
+ if (dev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code);
+
+ /* store for lbs_cfg_ret_disconnect() */
+ priv->disassoc_reason = reason_code;
+
+ return lbs_disconnect(priv, reason_code);
+}
static int lbs_cfg_set_default_key(struct wiphy *wiphy,
struct net_device *netdev,
@@ -1439,6 +1457,9 @@ static int lbs_cfg_set_default_key(struct wiphy *wiphy,
{
struct lbs_private *priv = wiphy_priv(wiphy);
+ if (netdev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
lbs_deb_enter(LBS_DEB_CFG80211);
if (key_index != priv->wep_tx_key) {
@@ -1460,6 +1481,9 @@ static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
u16 key_type;
int ret = 0;
+ if (netdev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
lbs_deb_enter(LBS_DEB_CFG80211);
lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n",
@@ -1603,6 +1627,9 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
s8 signal, noise;
int ret;
+ if (dev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
if (idx != 0)
ret = -ENOENT;
@@ -1636,28 +1663,23 @@ static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = 0;
- lbs_deb_enter(LBS_DEB_CFG80211);
+ if (dev == priv->mesh_dev)
+ return -EOPNOTSUPP;
switch (type) {
case NL80211_IFTYPE_MONITOR:
- ret = lbs_set_monitor_mode(priv, 1);
- break;
case NL80211_IFTYPE_STATION:
- if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
- ret = lbs_set_monitor_mode(priv, 0);
- if (!ret)
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1);
- break;
case NL80211_IFTYPE_ADHOC:
- if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
- ret = lbs_set_monitor_mode(priv, 0);
- if (!ret)
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2);
break;
default:
- ret = -ENOTSUPP;
+ return -EOPNOTSUPP;
}
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ if (priv->iface_running)
+ ret = lbs_set_iface_type(priv, type);
+
if (!ret)
priv->wdev->iftype = type;
@@ -1959,6 +1981,9 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_bss *bss;
DECLARE_SSID_BUF(ssid_buf);
+ if (dev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
lbs_deb_enter(LBS_DEB_CFG80211);
if (!params->channel) {
@@ -1995,6 +2020,9 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
struct cmd_ds_802_11_ad_hoc_stop cmd;
int ret = 0;
+ if (dev == priv->mesh_dev)
+ return -EOPNOTSUPP;
+
lbs_deb_enter(LBS_DEB_CFG80211);
memset(&cmd, 0, sizeof(cmd));
@@ -2117,6 +2145,8 @@ int lbs_cfg_register(struct lbs_private *priv)
BIT(NL80211_IFTYPE_ADHOC);
if (lbs_rtap_supported(priv))
wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
+ if (lbs_mesh_activated(priv))
+ wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index 4f46bb744be..a02ee151710 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -17,5 +17,6 @@ void lbs_send_disconnect_notification(struct lbs_private *priv);
void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
void lbs_scan_deinit(struct lbs_private *priv);
+int lbs_disconnect(struct lbs_private *priv, u16 reason);
#endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index dbd24a4607e..e08ab1de3d9 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1088,7 +1088,7 @@ void __lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
if (!cmd->callback || cmd->callback == lbs_cmd_async_callback)
__lbs_cleanup_and_insert_cmd(priv, cmd);
priv->cur_cmd = NULL;
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
}
void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
@@ -1627,7 +1627,7 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
lbs_deb_host("PREP_CMD: cmdnode is NULL\n");
/* Wake up main thread to execute next command */
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
cmdnode = ERR_PTR(-ENOBUFS);
goto done;
}
@@ -1647,7 +1647,7 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
cmdnode->cmdwaitqwoken = 0;
lbs_queue_cmd(priv, cmdnode);
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
done:
lbs_deb_leave_args(LBS_DEB_HOST, "ret %p", cmdnode);
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index da0b05bb89f..bc951ab4b68 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/firmware.h>
+#include <linux/nl80211.h>
/* Should be terminated by a NULL entry */
struct lbs_fw_table {
@@ -43,10 +44,15 @@ int lbs_start_card(struct lbs_private *priv);
void lbs_stop_card(struct lbs_private *priv);
void lbs_host_to_card_done(struct lbs_private *priv);
+int lbs_start_iface(struct lbs_private *priv);
+int lbs_stop_iface(struct lbs_private *priv);
+int lbs_set_iface_type(struct lbs_private *priv, enum nl80211_iftype type);
+
int lbs_rtap_supported(struct lbs_private *priv);
int lbs_set_mac_address(struct net_device *dev, void *addr);
void lbs_set_multicast_list(struct net_device *dev);
+void lbs_update_mcast(struct lbs_private *priv);
int lbs_suspend(struct lbs_private *priv);
int lbs_resume(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index adb3490e3cf..f3fd447131c 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -6,7 +6,6 @@
#ifndef _LBS_DEV_H_
#define _LBS_DEV_H_
-#include "mesh.h"
#include "defs.h"
#include "host.h"
@@ -22,6 +21,17 @@ struct sleep_params {
uint16_t sp_reserved;
};
+/* Mesh statistics */
+struct lbs_mesh_stats {
+ u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
+ u32 fwd_unicast_cnt; /* Fwd: Unicast counter */
+ u32 fwd_drop_ttl; /* Fwd: TTL zero */
+ u32 fwd_drop_rbt; /* Fwd: Recently Broadcasted */
+ u32 fwd_drop_noroute; /* Fwd: No route to Destination */
+ u32 fwd_drop_nobuf; /* Fwd: Run out of internal buffers */
+ u32 drop_blind; /* Rx: Dropped by blinding table */
+ u32 tx_failed_cnt; /* Tx: Failed transmissions */
+};
/* Private structure for the MV device */
struct lbs_private {
@@ -36,7 +46,6 @@ struct lbs_private {
/* CFG80211 */
struct wireless_dev *wdev;
bool wiphy_registered;
- bool stopping;
struct cfg80211_scan_request *scan_req;
u8 assoc_bss[ETH_ALEN];
u8 disassoc_reason;
@@ -86,11 +95,14 @@ struct lbs_private {
/* Hardware access */
void *card;
+ bool iface_running;
u8 fw_ready;
u8 surpriseremoved;
u8 setup_fw_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
+ int (*power_save) (struct lbs_private *priv);
+ int (*power_restore) (struct lbs_private *priv);
int (*enter_deep_sleep) (struct lbs_private *priv);
int (*exit_deep_sleep) (struct lbs_private *priv);
int (*reset_deep_sleep_wakeup) (struct lbs_private *priv);
@@ -146,6 +158,7 @@ struct lbs_private {
/* protected by hard_start_xmit serialization */
u8 txretrycount;
struct sk_buff *currenttxskb;
+ struct timer_list tx_lockup_timer;
/* Locks */
struct mutex lock;
@@ -167,9 +180,20 @@ struct lbs_private {
wait_queue_head_t scan_q;
/* Whether the scan was initiated internally and not by cfg80211 */
bool internal_scan;
- unsigned long last_scan;
};
extern struct cmd_confirm_sleep confirm_sleep;
+/* Check if there is an interface active. */
+static inline int lbs_iface_active(struct lbs_private *priv)
+{
+ int r;
+
+ r = netif_running(priv->dev);
+ if (priv->mesh_dev)
+ r |= netif_running(priv->mesh_dev);
+
+ return r;
+}
+
#endif
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 4dfb3bfd2cf..885ddc1c4fe 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -5,6 +5,7 @@
#include "decl.h"
#include "cmd.h"
+#include "mesh.h"
static void lbs_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 387786e1b39..c962e21762d 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -39,6 +39,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/host.h>
+#include <linux/pm_runtime.h>
#include "host.h"
#include "decl.h"
@@ -47,6 +48,8 @@
#include "cmd.h"
#include "if_sdio.h"
+static void if_sdio_interrupt(struct sdio_func *func);
+
/* The if_sdio_remove() callback function is called when
* user removes this module from kernel space or ejects
* the card from the slot. The driver handles these 2 cases
@@ -757,6 +760,136 @@ out:
return ret;
}
+/********************************************************************/
+/* Power management */
+/********************************************************************/
+
+static int if_sdio_power_on(struct if_sdio_card *card)
+{
+ struct sdio_func *func = card->func;
+ struct lbs_private *priv = card->priv;
+ struct mmc_host *host = func->card->host;
+ int ret;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret)
+ goto release;
+
+ /* For 1-bit transfers to the 8686 model, we need to enable the
+ * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
+ * bit to allow access to non-vendor registers. */
+ if ((card->model == MODEL_8686) &&
+ (host->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
+ u8 reg;
+
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto disable;
+
+ reg |= SDIO_BUS_ECSI;
+ sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto disable;
+ }
+
+ card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
+ if (ret)
+ goto disable;
+
+ card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
+ if (ret)
+ goto disable;
+
+ card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
+ if (ret)
+ goto disable;
+
+ sdio_release_host(func);
+ ret = if_sdio_prog_firmware(card);
+ sdio_claim_host(func);
+ if (ret)
+ goto disable;
+
+ /*
+ * Get rx_unit if the chip is SD8688 or newer.
+ * SD8385 & SD8686 do not have rx_unit.
+ */
+ if ((card->model != MODEL_8385)
+ && (card->model != MODEL_8686))
+ card->rx_unit = if_sdio_read_rx_unit(card);
+ else
+ card->rx_unit = 0;
+
+ /*
+ * Set up the interrupt handler late.
+ *
+ * If we set it up earlier, the (buggy) hardware generates a spurious
+ * interrupt, even before the interrupt has been enabled, with
+ * CCCR_INTx = 0.
+ *
+ * We register the interrupt handler late so that we can handle any
+ * spurious interrupts, and also to avoid generation of that known
+ * spurious interrupt in the first place.
+ */
+ ret = sdio_claim_irq(func, if_sdio_interrupt);
+ if (ret)
+ goto disable;
+
+ /*
+ * Enable interrupts now that everything is set up
+ */
+ sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
+ if (ret)
+ goto release_irq;
+
+ sdio_release_host(func);
+
+ /*
+ * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
+ */
+ if (card->model == MODEL_8688) {
+ struct cmd_header cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ lbs_deb_sdio("send function INIT command\n");
+ if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
+ lbs_cmd_copyback, (unsigned long) &cmd))
+ netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
+ }
+
+ priv->fw_ready = 1;
+
+ return 0;
+
+release_irq:
+ sdio_release_irq(func);
+disable:
+ sdio_disable_func(func);
+release:
+ sdio_release_host(func);
+ return ret;
+}
+
+static int if_sdio_power_off(struct if_sdio_card *card)
+{
+ struct sdio_func *func = card->func;
+ struct lbs_private *priv = card->priv;
+
+ priv->fw_ready = 0;
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ return 0;
+}
+
+
/*******************************************************************/
/* Libertas callbacks */
/*******************************************************************/
@@ -923,6 +1056,32 @@ static void if_sdio_reset_card(struct lbs_private *priv)
schedule_work(&card_reset_work);
}
+static int if_sdio_power_save(struct lbs_private *priv)
+{
+ struct if_sdio_card *card = priv->card;
+ int ret;
+
+ flush_workqueue(card->workqueue);
+
+ ret = if_sdio_power_off(card);
+
+ /* Let runtime PM know the card is powered off */
+ pm_runtime_put_sync(&card->func->dev);
+
+ return ret;
+}
+
+static int if_sdio_power_restore(struct lbs_private *priv)
+{
+ struct if_sdio_card *card = priv->card;
+
+ /* Make sure the card will not be powered off by runtime PM */
+ pm_runtime_get_sync(&card->func->dev);
+
+ return if_sdio_power_on(card);
+}
+
+
/*******************************************************************/
/* SDIO callbacks */
/*******************************************************************/
@@ -976,7 +1135,6 @@ static int if_sdio_probe(struct sdio_func *func,
int ret, i;
unsigned int model;
struct if_sdio_packet *packet;
- struct mmc_host *host = func->card->host;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -1033,45 +1191,6 @@ static int if_sdio_probe(struct sdio_func *func,
goto free;
}
- sdio_claim_host(func);
-
- ret = sdio_enable_func(func);
- if (ret)
- goto release;
-
- /* For 1-bit transfers to the 8686 model, we need to enable the
- * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
- * bit to allow access to non-vendor registers. */
- if ((card->model == MODEL_8686) &&
- (host->caps & MMC_CAP_SDIO_IRQ) &&
- (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
- u8 reg;
-
- func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
- reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
- if (ret)
- goto release_int;
-
- reg |= SDIO_BUS_ECSI;
- sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
- if (ret)
- goto release_int;
- }
-
- card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
- if (ret)
- goto release_int;
-
- card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
- if (ret)
- goto release_int;
-
- card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
- if (ret)
- goto release_int;
-
- sdio_release_host(func);
-
sdio_set_drvdata(func, card);
lbs_deb_sdio("class = 0x%X, vendor = 0x%X, "
@@ -1079,14 +1198,11 @@ static int if_sdio_probe(struct sdio_func *func,
func->class, func->vendor, func->device,
model, (unsigned)card->ioport);
- ret = if_sdio_prog_firmware(card);
- if (ret)
- goto reclaim;
priv = lbs_add_card(card, &func->dev);
if (!priv) {
ret = -ENOMEM;
- goto reclaim;
+ goto free;
}
card->priv = priv;
@@ -1097,62 +1213,21 @@ static int if_sdio_probe(struct sdio_func *func,
priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
priv->reset_card = if_sdio_reset_card;
+ priv->power_save = if_sdio_power_save;
+ priv->power_restore = if_sdio_power_restore;
- sdio_claim_host(func);
-
- /*
- * Get rx_unit if the chip is SD8688 or newer.
- * SD8385 & SD8686 do not have rx_unit.
- */
- if ((card->model != MODEL_8385)
- && (card->model != MODEL_8686))
- card->rx_unit = if_sdio_read_rx_unit(card);
- else
- card->rx_unit = 0;
-
- /*
- * Set up the interrupt handler late.
- *
- * If we set it up earlier, the (buggy) hardware generates a spurious
- * interrupt, even before the interrupt has been enabled, with
- * CCCR_INTx = 0.
- *
- * We register the interrupt handler late so that we can handle any
- * spurious interrupts, and also to avoid generation of that known
- * spurious interrupt in the first place.
- */
- ret = sdio_claim_irq(func, if_sdio_interrupt);
- if (ret)
- goto disable;
-
- /*
- * Enable interrupts now that everything is set up
- */
- sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
- sdio_release_host(func);
+ ret = if_sdio_power_on(card);
if (ret)
- goto reclaim;
-
- priv->fw_ready = 1;
-
- /*
- * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
- */
- if (card->model == MODEL_8688) {
- struct cmd_header cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- lbs_deb_sdio("send function INIT command\n");
- if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
- lbs_cmd_copyback, (unsigned long) &cmd))
- netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
- }
+ goto err_activate_card;
ret = lbs_start_card(priv);
+ if_sdio_power_off(card);
if (ret)
goto err_activate_card;
+ /* Tell PM core that we don't need the card to be powered now */
+ pm_runtime_put_noidle(&func->dev);
+
out:
lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
@@ -1161,14 +1236,6 @@ out:
err_activate_card:
flush_workqueue(card->workqueue);
lbs_remove_card(priv);
-reclaim:
- sdio_claim_host(func);
-release_int:
- sdio_release_irq(func);
-disable:
- sdio_disable_func(func);
-release:
- sdio_release_host(func);
free:
destroy_workqueue(card->workqueue);
while (card->packets) {
@@ -1195,6 +1262,9 @@ static void if_sdio_remove(struct sdio_func *func)
card = sdio_get_drvdata(func);
+ /* Undo decrement done above in if_sdio_probe */
+ pm_runtime_get_noresume(&func->dev);
+
if (user_rmmod && (card->model == MODEL_8688)) {
/*
* FUNC_SHUTDOWN is required for SD8688 WLAN/BT
@@ -1219,11 +1289,6 @@ static void if_sdio_remove(struct sdio_func *func)
flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-
while (card->packets) {
packet = card->packets;
card->packets = card->packets->next;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index e0286cfbc91..622ae6de0d8 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -531,10 +531,6 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card,
goto out;
err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
IF_SPI_CIC_CMD_DOWNLOAD_OVER);
- goto out;
-
- lbs_deb_spi("waiting for helper to boot...\n");
-
out:
if (err)
pr_err("failed to load helper firmware (err=%d)\n", err);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index b5acc393a65..8147f1e2a0b 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -324,7 +324,7 @@ static int if_usb_probe(struct usb_interface *intf,
}
kparam_unblock_sysfs_write(fw_name);
- if (!(priv = lbs_add_card(cardp, &udev->dev)))
+ if (!(priv = lbs_add_card(cardp, &intf->dev)))
goto err_prog_firmware;
cardp->priv = priv;
@@ -956,7 +956,7 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp,
priv->dnld_sent = DNLD_RES_RECEIVED;
spin_unlock_irqrestore(&priv->driver_lock, flags);
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
return ret;
}
@@ -973,6 +973,23 @@ static const struct {
{ MODEL_8682, "libertas/usb8682.bin" }
};
+#ifdef CONFIG_OLPC
+
+static int try_olpc_fw(struct if_usb_card *cardp)
+{
+ int retval = -ENOENT;
+
+ /* try the OLPC firmware first; fall back to fw_table list */
+ if (machine_is_olpc() && cardp->model == MODEL_8388)
+ retval = request_firmware(&cardp->fw,
+ "libertas/usb8388_olpc.bin", &cardp->udev->dev);
+ return retval;
+}
+
+#else
+static int try_olpc_fw(struct if_usb_card *cardp) { return -ENOENT; }
+#endif /* !CONFIG_OLPC */
+
static int get_fw(struct if_usb_card *cardp, const char *fwname)
{
int i;
@@ -981,6 +998,10 @@ static int get_fw(struct if_usb_card *cardp, const char *fwname)
if (fwname)
return request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
+ /* Handle OLPC firmware */
+ if (try_olpc_fw(cardp) == 0)
+ return 0;
+
/* Otherwise search for firmware to use */
for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
if (fw_table[i].model != cardp->model)
@@ -1112,6 +1133,15 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (priv->psstate != PS_STATE_FULL_POWER)
return -1;
+#ifdef CONFIG_OLPC
+ if (machine_is_olpc()) {
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP)
+ olpc_ec_wakeup_clear(EC_SCI_SRC_WLAN);
+ else
+ olpc_ec_wakeup_set(EC_SCI_SRC_WLAN);
+ }
+#endif
+
ret = lbs_suspend(priv);
if (ret)
goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 94652c5a25d..b03779bcd54 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -23,6 +23,7 @@
#include "cfg.h"
#include "debugfs.h"
#include "cmd.h"
+#include "mesh.h"
#define DRIVER_RELEASE_VERSION "323.p0"
const char lbs_driver_version[] = "COMM-USB8388-" DRIVER_RELEASE_VERSION
@@ -98,6 +99,69 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
return 0;
}
+int lbs_set_iface_type(struct lbs_private *priv, enum nl80211_iftype type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ ret = lbs_set_monitor_mode(priv, 1);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ ret = lbs_set_monitor_mode(priv, 0);
+ if (!ret)
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ ret = lbs_set_monitor_mode(priv, 0);
+ if (!ret)
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+ return ret;
+}
+
+int lbs_start_iface(struct lbs_private *priv)
+{
+ struct cmd_ds_802_11_mac_address cmd;
+ int ret;
+
+ if (priv->power_restore) {
+ ret = priv->power_restore(priv);
+ if (ret)
+ return ret;
+ }
+
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.action = cpu_to_le16(CMD_ACT_SET);
+ memcpy(cmd.macadd, priv->current_addr, ETH_ALEN);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_MAC_ADDRESS, &cmd);
+ if (ret) {
+ lbs_deb_net("set MAC address failed\n");
+ goto err;
+ }
+
+ ret = lbs_set_iface_type(priv, priv->wdev->iftype);
+ if (ret) {
+ lbs_deb_net("set iface type failed\n");
+ goto err;
+ }
+
+ lbs_update_channel(priv);
+
+ priv->iface_running = true;
+ return 0;
+
+err:
+ if (priv->power_save)
+ priv->power_save(priv);
+ return ret;
+}
/**
* lbs_dev_open - open the ethX interface
@@ -111,23 +175,65 @@ static int lbs_dev_open(struct net_device *dev)
int ret = 0;
lbs_deb_enter(LBS_DEB_NET);
+ if (!priv->iface_running) {
+ ret = lbs_start_iface(priv);
+ if (ret)
+ goto out;
+ }
spin_lock_irq(&priv->driver_lock);
- priv->stopping = false;
- if (priv->connect_status == LBS_CONNECTED)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
+ netif_carrier_off(dev);
if (!priv->tx_pending_len)
netif_wake_queue(dev);
spin_unlock_irq(&priv->driver_lock);
+
+out:
lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
return ret;
}
+static bool lbs_command_queue_empty(struct lbs_private *priv)
+{
+ unsigned long flags;
+ bool ret;
+ spin_lock_irqsave(&priv->driver_lock, flags);
+ ret = priv->cur_cmd == NULL && list_empty(&priv->cmdpendingq);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
+ return ret;
+}
+
+int lbs_stop_iface(struct lbs_private *priv)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_MAIN);
+
+ spin_lock_irqsave(&priv->driver_lock, flags);
+ priv->iface_running = false;
+ kfree_skb(priv->currenttxskb);
+ priv->currenttxskb = NULL;
+ priv->tx_pending_len = 0;
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+ cancel_work_sync(&priv->mcast_work);
+ del_timer_sync(&priv->tx_lockup_timer);
+
+ /* Disable command processing, and wait for all commands to complete */
+ lbs_deb_main("waiting for commands to complete\n");
+ wait_event(priv->waitq, lbs_command_queue_empty(priv));
+ lbs_deb_main("all commands completed\n");
+
+ if (priv->power_save)
+ ret = priv->power_save(priv);
+
+ lbs_deb_leave(LBS_DEB_MAIN);
+ return ret;
+}
+
/**
* lbs_eth_stop - close the ethX interface
*
@@ -140,18 +246,25 @@ static int lbs_eth_stop(struct net_device *dev)
lbs_deb_enter(LBS_DEB_NET);
+ if (priv->connect_status == LBS_CONNECTED)
+ lbs_disconnect(priv, WLAN_REASON_DEAUTH_LEAVING);
+
spin_lock_irq(&priv->driver_lock);
- priv->stopping = true;
netif_stop_queue(dev);
spin_unlock_irq(&priv->driver_lock);
- schedule_work(&priv->mcast_work);
+ lbs_update_mcast(priv);
cancel_delayed_work_sync(&priv->scan_work);
if (priv->scan_req) {
cfg80211_scan_done(priv->scan_req, false);
priv->scan_req = NULL;
}
+ netif_carrier_off(priv->dev);
+
+ if (!lbs_iface_active(priv))
+ lbs_stop_iface(priv);
+
lbs_deb_leave(LBS_DEB_NET);
return 0;
}
@@ -163,13 +276,14 @@ void lbs_host_to_card_done(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_THREAD);
spin_lock_irqsave(&priv->driver_lock, flags);
+ del_timer(&priv->tx_lockup_timer);
priv->dnld_sent = DNLD_RES_RECEIVED;
/* Wake main thread if commands are pending */
if (!priv->cur_cmd || priv->tx_pending_len > 0) {
if (!priv->wakeup_dev_required)
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -182,29 +296,24 @@ int lbs_set_mac_address(struct net_device *dev, void *addr)
int ret = 0;
struct lbs_private *priv = dev->ml_priv;
struct sockaddr *phwaddr = addr;
- struct cmd_ds_802_11_mac_address cmd;
lbs_deb_enter(LBS_DEB_NET);
+ /*
+ * Can only set MAC address when all interfaces are down, to be written
+ * to the hardware when one of them is brought up.
+ */
+ if (lbs_iface_active(priv))
+ return -EBUSY;
+
/* In case it was called from the mesh device */
dev = priv->dev;
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- memcpy(cmd.macadd, phwaddr->sa_data, ETH_ALEN);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_MAC_ADDRESS, &cmd);
- if (ret) {
- lbs_deb_net("set MAC address failed\n");
- goto done;
- }
-
memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN);
memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
if (priv->mesh_dev)
memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
-done:
lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
return ret;
}
@@ -258,18 +367,18 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
return i;
}
-static void lbs_set_mcast_worker(struct work_struct *work)
+void lbs_update_mcast(struct lbs_private *priv)
{
- struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work);
struct cmd_ds_mac_multicast_adr mcast_cmd;
- int dev_flags;
+ int dev_flags = 0;
int nr_addrs;
int old_mac_control = priv->mac_control;
lbs_deb_enter(LBS_DEB_NET);
- dev_flags = priv->dev->flags;
- if (priv->mesh_dev)
+ if (netif_running(priv->dev))
+ dev_flags |= priv->dev->flags;
+ if (priv->mesh_dev && netif_running(priv->mesh_dev))
dev_flags |= priv->mesh_dev->flags;
if (dev_flags & IFF_PROMISC) {
@@ -315,6 +424,12 @@ static void lbs_set_mcast_worker(struct work_struct *work)
lbs_deb_leave(LBS_DEB_NET);
}
+static void lbs_set_mcast_worker(struct work_struct *work)
+{
+ struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work);
+ lbs_update_mcast(priv);
+}
+
void lbs_set_multicast_list(struct net_device *dev)
{
struct lbs_private *priv = dev->ml_priv;
@@ -504,6 +619,9 @@ static int lbs_thread(void *data)
if (ret) {
lbs_deb_tx("host_to_card failed %d\n", ret);
priv->dnld_sent = DNLD_RES_RECEIVED;
+ } else {
+ mod_timer(&priv->tx_lockup_timer,
+ jiffies + (HZ * 5));
}
priv->tx_pending_len = 0;
if (!priv->currenttxskb) {
@@ -520,6 +638,7 @@ static int lbs_thread(void *data)
}
del_timer(&priv->command_timer);
+ del_timer(&priv->tx_lockup_timer);
del_timer(&priv->auto_deepsleep_timer);
lbs_deb_leave(LBS_DEB_THREAD);
@@ -647,13 +766,39 @@ static void lbs_cmd_timeout_handler(unsigned long data)
if (priv->dnld_sent == DNLD_CMD_SENT)
priv->dnld_sent = DNLD_RES_RECEIVED;
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_leave(LBS_DEB_CMD);
}
/**
+ * lbs_tx_lockup_handler - handles the timeout of the passing of TX frames
+ * to the hardware. This is known to frequently happen with SD8686 when
+ * waking up after a Wake-on-WLAN-triggered resume.
+ *
+ * @data: &struct lbs_private pointer
+ */
+static void lbs_tx_lockup_handler(unsigned long data)
+{
+ struct lbs_private *priv = (struct lbs_private *)data;
+ unsigned long flags;
+
+ lbs_deb_enter(LBS_DEB_TX);
+ spin_lock_irqsave(&priv->driver_lock, flags);
+
+ netdev_info(priv->dev, "TX lockup detected\n");
+ if (priv->reset_card)
+ priv->reset_card(priv);
+
+ priv->dnld_sent = DNLD_RES_RECEIVED;
+ wake_up_interruptible(&priv->waitq);
+
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbs_deb_leave(LBS_DEB_TX);
+}
+
+/**
* auto_deepsleep_timer_fn - put the device back to deep sleep mode when
* timer expires and no activity (command, event, data etc.) is detected.
* @data: &struct lbs_private pointer
@@ -739,6 +884,8 @@ static int lbs_init_adapter(struct lbs_private *priv)
setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
(unsigned long)priv);
+ setup_timer(&priv->tx_lockup_timer, lbs_tx_lockup_handler,
+ (unsigned long)priv);
setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
(unsigned long)priv);
@@ -776,6 +923,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
lbs_free_cmd_buffer(priv);
kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
+ del_timer(&priv->tx_lockup_timer);
del_timer(&priv->auto_deepsleep_timer);
lbs_deb_leave(LBS_DEB_MAIN);
@@ -786,7 +934,7 @@ static const struct net_device_ops lbs_netdev_ops = {
.ndo_stop = lbs_eth_stop,
.ndo_start_xmit = lbs_hard_start_xmit,
.ndo_set_mac_address = lbs_set_mac_address,
- .ndo_set_multicast_list = lbs_set_multicast_list,
+ .ndo_set_rx_mode = lbs_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -889,10 +1037,6 @@ void lbs_remove_card(struct lbs_private *priv)
lbs_remove_mesh(priv);
lbs_scan_deinit(priv);
- dev = priv->dev;
-
- cancel_work_sync(&priv->mcast_work);
-
/* worker thread destruction blocks on the in-flight command which
* should have been cleared already in lbs_stop_card().
*/
@@ -950,17 +1094,18 @@ int lbs_start_card(struct lbs_private *priv)
if (ret)
goto done;
+ if (!lbs_disablemesh)
+ lbs_init_mesh(priv);
+ else
+ pr_info("%s: mesh disabled\n", dev->name);
+
if (lbs_cfg_register(priv)) {
pr_err("cannot register device\n");
goto done;
}
- lbs_update_channel(priv);
-
- if (!lbs_disablemesh)
- lbs_init_mesh(priv);
- else
- pr_info("%s: mesh disabled\n", dev->name);
+ if (lbs_mesh_activated(priv))
+ lbs_start_mesh(priv);
lbs_debugfs_init_one(priv, dev);
@@ -978,8 +1123,6 @@ EXPORT_SYMBOL_GPL(lbs_start_card);
void lbs_stop_card(struct lbs_private *priv)
{
struct net_device *dev;
- struct cmd_ctrl_node *cmdnode;
- unsigned long flags;
lbs_deb_enter(LBS_DEB_MAIN);
@@ -992,30 +1135,6 @@ void lbs_stop_card(struct lbs_private *priv)
lbs_debugfs_remove_one(priv);
lbs_deinit_mesh(priv);
-
- /* Delete the timeout of the currently processing command */
- del_timer_sync(&priv->command_timer);
- del_timer_sync(&priv->auto_deepsleep_timer);
-
- /* Flush pending command nodes */
- spin_lock_irqsave(&priv->driver_lock, flags);
- lbs_deb_main("clearing pending commands\n");
- list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
- cmdnode->result = -ENOENT;
- cmdnode->cmdwaitqwoken = 1;
- wake_up(&cmdnode->cmdwait_q);
- }
-
- /* Flush the command the card is currently processing */
- if (priv->cur_cmd) {
- lbs_deb_main("clearing current command\n");
- priv->cur_cmd->result = -ENOENT;
- priv->cur_cmd->cmdwaitqwoken = 1;
- wake_up(&priv->cur_cmd->cmdwait_q);
- }
- lbs_deb_main("done clearing commands\n");
- spin_unlock_irqrestore(&priv->driver_lock, flags);
-
unregister_netdev(dev);
out:
@@ -1036,7 +1155,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event)
kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32));
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_leave(LBS_DEB_THREAD);
@@ -1054,7 +1173,7 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
BUG_ON(resp_idx > 1);
priv->resp_idx = resp_idx;
- wake_up_interruptible(&priv->waitq);
+ wake_up(&priv->waitq);
lbs_deb_leave(LBS_DEB_THREAD);
}
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index be72c08ea2a..e87c031b298 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -129,6 +129,19 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
}
+int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel)
+{
+ return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel);
+}
+
+static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
+{
+ struct wireless_dev *mesh_wdev = priv->mesh_dev->ieee80211_ptr;
+ if (mesh_wdev->channel)
+ return mesh_wdev->channel->hw_value;
+ else
+ return 1;
+}
/***************************************************************************
* Mesh sysfs support
@@ -812,7 +825,6 @@ static void lbs_persist_config_remove(struct net_device *dev)
*/
int lbs_init_mesh(struct lbs_private *priv)
{
- struct net_device *dev = priv->dev;
int ret = 0;
lbs_deb_enter(LBS_DEB_MESH);
@@ -837,11 +849,9 @@ int lbs_init_mesh(struct lbs_private *priv)
useful */
priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
- if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel)) {
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) {
priv->mesh_tlv = TLV_TYPE_MESH_ID;
- if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel))
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1))
priv->mesh_tlv = 0;
}
} else
@@ -851,23 +861,16 @@ int lbs_init_mesh(struct lbs_private *priv)
* 0x100+37; Do not invoke command with old TLV.
*/
priv->mesh_tlv = TLV_TYPE_MESH_ID;
- if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel))
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1))
priv->mesh_tlv = 0;
}
/* Stop meshing until interface is brought up */
- lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, priv->channel);
+ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, 1);
if (priv->mesh_tlv) {
sprintf(priv->mesh_ssid, "mesh");
priv->mesh_ssid_len = 4;
-
- lbs_add_mesh(priv);
-
- if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
- netdev_err(dev, "cannot register lbs_mesh attribute\n");
-
ret = 1;
}
@@ -875,6 +878,13 @@ int lbs_init_mesh(struct lbs_private *priv)
return ret;
}
+void lbs_start_mesh(struct lbs_private *priv)
+{
+ lbs_add_mesh(priv);
+
+ if (device_create_file(&priv->dev->dev, &dev_attr_lbs_mesh))
+ netdev_err(priv->dev, "cannot register lbs_mesh attribute\n");
+}
int lbs_deinit_mesh(struct lbs_private *priv)
{
@@ -904,7 +914,8 @@ static int lbs_mesh_stop(struct net_device *dev)
struct lbs_private *priv = dev->ml_priv;
lbs_deb_enter(LBS_DEB_MESH);
- lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, priv->channel);
+ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
+ lbs_mesh_get_channel(priv));
spin_lock_irq(&priv->driver_lock);
@@ -913,7 +924,9 @@ static int lbs_mesh_stop(struct net_device *dev)
spin_unlock_irq(&priv->driver_lock);
- schedule_work(&priv->mcast_work);
+ lbs_update_mcast(priv);
+ if (!lbs_iface_active(priv))
+ lbs_stop_iface(priv);
lbs_deb_leave(LBS_DEB_MESH);
return 0;
@@ -931,6 +944,11 @@ static int lbs_mesh_dev_open(struct net_device *dev)
int ret = 0;
lbs_deb_enter(LBS_DEB_NET);
+ if (!priv->iface_running) {
+ ret = lbs_start_iface(priv);
+ if (ret)
+ goto out;
+ }
spin_lock_irq(&priv->driver_lock);
@@ -947,7 +965,8 @@ static int lbs_mesh_dev_open(struct net_device *dev)
spin_unlock_irq(&priv->driver_lock);
- ret = lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, priv->channel);
+ ret = lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+ lbs_mesh_get_channel(priv));
out:
lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
@@ -959,7 +978,7 @@ static const struct net_device_ops mesh_netdev_ops = {
.ndo_stop = lbs_mesh_stop,
.ndo_start_xmit = lbs_hard_start_xmit,
.ndo_set_mac_address = lbs_set_mac_address,
- .ndo_set_multicast_list = lbs_set_multicast_list,
+ .ndo_set_rx_mode = lbs_set_multicast_list,
};
/**
@@ -971,18 +990,32 @@ static const struct net_device_ops mesh_netdev_ops = {
static int lbs_add_mesh(struct lbs_private *priv)
{
struct net_device *mesh_dev = NULL;
+ struct wireless_dev *mesh_wdev;
int ret = 0;
lbs_deb_enter(LBS_DEB_MESH);
/* Allocate a virtual mesh device */
+ mesh_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!mesh_wdev) {
+ lbs_deb_mesh("init mshX wireless device failed\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
mesh_dev = alloc_netdev(0, "msh%d", ether_setup);
if (!mesh_dev) {
lbs_deb_mesh("init mshX device failed\n");
ret = -ENOMEM;
- goto done;
+ goto err_free_wdev;
}
+
+ mesh_wdev->iftype = NL80211_IFTYPE_MESH_POINT;
+ mesh_wdev->wiphy = priv->wdev->wiphy;
+ mesh_wdev->netdev = mesh_dev;
+
mesh_dev->ml_priv = priv;
+ mesh_dev->ieee80211_ptr = mesh_wdev;
priv->mesh_dev = mesh_dev;
mesh_dev->netdev_ops = &mesh_netdev_ops;
@@ -996,7 +1029,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
ret = register_netdev(mesh_dev);
if (ret) {
pr_err("cannot register mshX virtual interface\n");
- goto err_free;
+ goto err_free_netdev;
}
ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
@@ -1012,9 +1045,12 @@ static int lbs_add_mesh(struct lbs_private *priv)
err_unregister:
unregister_netdev(mesh_dev);
-err_free:
+err_free_netdev:
free_netdev(mesh_dev);
+err_free_wdev:
+ kfree(mesh_wdev);
+
done:
lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
return ret;
@@ -1035,6 +1071,7 @@ void lbs_remove_mesh(struct lbs_private *priv)
lbs_persist_config_remove(mesh_dev);
unregister_netdev(mesh_dev);
priv->mesh_dev = NULL;
+ kfree(mesh_dev->ieee80211_ptr);
free_netdev(mesh_dev);
lbs_deb_leave(LBS_DEB_MESH);
}
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index 50144913f2a..6603f341c87 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,30 +9,25 @@
#include <net/lib80211.h>
#include "host.h"
+#include "dev.h"
#ifdef CONFIG_LIBERTAS_MESH
-/* Mesh statistics */
-struct lbs_mesh_stats {
- u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
- u32 fwd_unicast_cnt; /* Fwd: Unicast counter */
- u32 fwd_drop_ttl; /* Fwd: TTL zero */
- u32 fwd_drop_rbt; /* Fwd: Recently Broadcasted */
- u32 fwd_drop_noroute; /* Fwd: No route to Destination */
- u32 fwd_drop_nobuf; /* Fwd: Run out of internal buffers */
- u32 drop_blind; /* Rx: Dropped by blinding table */
- u32 tx_failed_cnt; /* Tx: Failed transmissions */
-};
-
-
struct net_device;
-struct lbs_private;
int lbs_init_mesh(struct lbs_private *priv);
+void lbs_start_mesh(struct lbs_private *priv);
int lbs_deinit_mesh(struct lbs_private *priv);
void lbs_remove_mesh(struct lbs_private *priv);
+static inline bool lbs_mesh_activated(struct lbs_private *priv)
+{
+ /* Mesh SSID is only programmed after successful init */
+ return priv->mesh_ssid_len != 0;
+}
+
+int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel);
/* Sending / Receiving */
@@ -67,11 +62,13 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
#define lbs_init_mesh(priv)
#define lbs_deinit_mesh(priv)
+#define lbs_start_mesh(priv)
#define lbs_add_mesh(priv)
#define lbs_remove_mesh(priv)
#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
#define lbs_mesh_set_txpd(priv, dev, txpd)
-#define lbs_mesh_config(priv, enable, chan)
+#define lbs_mesh_set_channel(priv, channel) (0)
+#define lbs_mesh_activated(priv) (false)
#endif
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index bfb8898ae51..62e10eeadd7 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -15,6 +15,7 @@
#include "radiotap.h"
#include "decl.h"
#include "dev.h"
+#include "mesh.h"
struct eth803hdr {
u8 dest_addr[6];
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index a6e85134cfe..8f127520d78 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -12,6 +12,7 @@
#include "decl.h"
#include "defs.h"
#include "dev.h"
+#include "mesh.h"
/**
* convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
index ae753962d8b..4bd3dc5adf7 100644
--- a/drivers/net/wireless/libertas_tf/deb_defs.h
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -3,7 +3,7 @@
* global variable declaration.
*/
#ifndef _LBS_DEB_DEFS_H_
-#define _LBS_DEB_EFS_H_
+#define _LBS_DEB_DEFS_H_
#ifndef DRV_NAME
#define DRV_NAME "libertas_tf"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 031cd89b176..68455a2307c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -612,6 +612,12 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.freq = data->channel->center_freq;
rx_status.band = data->channel->band;
rx_status.rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
+ rx_status.flag |= RX_FLAG_HT;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
/* TODO: simulate real signal strength (and optional packet loss) */
rx_status.signal = data->power_level - 50;
@@ -964,7 +970,8 @@ static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
}
static int mac80211_hwsim_conf_tx(
- struct ieee80211_hw *hw, u16 queue,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
wiphy_debug(hw->wiphy,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 1a453a605b3..079e5532e68 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -193,7 +193,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_src = skb_dequeue(&pra_list->skb_head);
pra_list->total_pkts_size -= skb_src->len;
- pra_list->total_pkts--;
atomic_dec(&priv->wmm.tx_pkts_queued);
@@ -247,8 +246,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
- skb_aggr->data,
- skb_aggr->len, &tx_param);
+ skb_aggr, &tx_param);
switch (ret) {
case -EBUSY:
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
@@ -269,7 +267,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_queue_tail(&pra_list->skb_head, skb_aggr);
pra_list->total_pkts_size += skb_aggr->len;
- pra_list->total_pkts++;
atomic_inc(&priv->wmm.tx_pkts_queued);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 9c6dca7ab02..900e1c62a0c 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -21,6 +21,7 @@
#define _MWIFIEX_11N_AGGR_H_
#define PKT_TYPE_AMSDU 0xE6
+#define MIN_NUM_AMSDU 2
int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 86962920cef..8f2797aa0c6 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -19,3 +19,14 @@ config MWIFIEX_SDIO
If you choose to build it as a module, it will be called
mwifiex_sdio.
+
+config MWIFIEX_PCIE
+ tristate "Marvell WiFi-Ex Driver for PCIE 8766"
+ depends on MWIFIEX && PCI
+ select FW_LOADER
+ ---help---
+ This adds support for wireless adapters based on Marvell
+ 8766 chipset with PCIe interface.
+
+ If you choose to build it as a module, it will be called
+ mwifiex_pcie.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 42cb733ea33..b0257ad1bbe 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -39,3 +39,6 @@ obj-$(CONFIG_MWIFIEX) += mwifiex.o
mwifiex_sdio-y += sdio.o
obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
+
+mwifiex_pcie-y += pcie.o
+obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 352d2c5da1f..462c71067bf 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -543,12 +543,28 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
ret = -EFAULT;
}
+ /*
+ * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
+ * MCS index values for us are 0 to 7.
+ */
+ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
+ sinfo->txrate.mcs = priv->tx_rate;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ /* 40MHz rate */
+ if (priv->tx_htinfo & BIT(1))
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ /* SGI enabled */
+ if (priv->tx_htinfo & BIT(2))
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ }
+
sinfo->rx_bytes = priv->stats.rx_bytes;
sinfo->tx_bytes = priv->stats.tx_bytes;
sinfo->rx_packets = priv->stats.rx_packets;
sinfo->tx_packets = priv->stats.tx_packets;
- sinfo->signal = priv->w_stats.qual.level;
- sinfo->txrate.legacy = rate.rate;
+ sinfo->signal = priv->qual_level;
+ /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
+ sinfo->txrate.legacy = rate.rate * 5;
return ret;
}
@@ -565,8 +581,6 @@ mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- mwifiex_dump_station_info(priv, sinfo);
-
if (!priv->media_connected)
return -ENOENT;
if (memcmp(mac, priv->cfg_bssid, ETH_ALEN))
@@ -768,6 +782,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
struct mwifiex_bss_info bss_info;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
+ enum ieee80211_band band;
if (mwifiex_get_bss_info(priv, &bss_info))
return -1;
@@ -780,9 +795,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
bss_info.ssid.ssid_len);
ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
+ band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
chan = __ieee80211_get_channel(priv->wdev->wiphy,
ieee80211_channel_to_frequency(bss_info.bss_chan,
- priv->curr_bss_params.band));
+ band));
cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
@@ -793,139 +809,6 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
}
/*
- * This function informs the CFG802.11 subsystem of a new BSS connection.
- *
- * The following information are sent to the CFG802.11 subsystem
- * to register the new BSS connection. If we do not register the new BSS,
- * a kernel panic will result.
- * - MAC address
- * - Capabilities
- * - Beacon period
- * - RSSI value
- * - Channel
- * - Supported rates IE
- * - Extended capabilities IE
- * - DS parameter set IE
- * - HT Capability IE
- * - Vendor Specific IE (221)
- * - WPA IE
- * - RSN IE
- */
-static int mwifiex_inform_bss_from_scan_result(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *ssid)
-{
- struct mwifiex_bssdescriptor *scan_table;
- int i, j;
- struct ieee80211_channel *chan;
- u8 *ie, *ie_buf;
- u32 ie_len;
- u8 *beacon;
- int beacon_size;
- u8 element_id, element_len;
-
-#define MAX_IE_BUF 2048
- ie_buf = kzalloc(MAX_IE_BUF, GFP_KERNEL);
- if (!ie_buf) {
- dev_err(priv->adapter->dev, "%s: failed to alloc ie_buf\n",
- __func__);
- return -ENOMEM;
- }
-
- scan_table = priv->adapter->scan_table;
- for (i = 0; i < priv->adapter->num_in_scan_table; i++) {
- if (ssid) {
- /* Inform specific BSS only */
- if (memcmp(ssid->ssid, scan_table[i].ssid.ssid,
- ssid->ssid_len))
- continue;
- }
- memset(ie_buf, 0, MAX_IE_BUF);
- ie_buf[0] = WLAN_EID_SSID;
- ie_buf[1] = scan_table[i].ssid.ssid_len;
- memcpy(&ie_buf[sizeof(struct ieee_types_header)],
- scan_table[i].ssid.ssid, ie_buf[1]);
-
- ie = ie_buf + ie_buf[1] + sizeof(struct ieee_types_header);
- ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
-
- ie[0] = WLAN_EID_SUPP_RATES;
-
- for (j = 0; j < sizeof(scan_table[i].supported_rates); j++) {
- if (!scan_table[i].supported_rates[j])
- break;
- else
- ie[j + sizeof(struct ieee_types_header)] =
- scan_table[i].supported_rates[j];
- }
-
- ie[1] = j;
- ie_len += ie[1] + sizeof(struct ieee_types_header);
-
- beacon = scan_table[i].beacon_buf;
- beacon_size = scan_table[i].beacon_buf_size;
-
- /* Skip time stamp, beacon interval and capability */
-
- if (beacon) {
- beacon += sizeof(scan_table[i].beacon_period)
- + sizeof(scan_table[i].time_stamp) +
- +sizeof(scan_table[i].cap_info_bitmap);
-
- beacon_size -= sizeof(scan_table[i].beacon_period)
- + sizeof(scan_table[i].time_stamp)
- + sizeof(scan_table[i].cap_info_bitmap);
- }
-
- while (beacon_size >= sizeof(struct ieee_types_header)) {
- ie = ie_buf + ie_len;
- element_id = *beacon;
- element_len = *(beacon + 1);
- if (beacon_size < (int) element_len +
- sizeof(struct ieee_types_header)) {
- dev_err(priv->adapter->dev, "%s: in processing"
- " IE, bytes left < IE length\n",
- __func__);
- break;
- }
- switch (element_id) {
- case WLAN_EID_EXT_CAPABILITY:
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_HT_CAPABILITY:
- case WLAN_EID_VENDOR_SPECIFIC:
- case WLAN_EID_RSN:
- case WLAN_EID_BSS_AC_ACCESS_DELAY:
- ie[0] = element_id;
- ie[1] = element_len;
- memcpy(&ie[sizeof(struct ieee_types_header)],
- (u8 *) beacon
- + sizeof(struct ieee_types_header),
- element_len);
- ie_len += ie[1] +
- sizeof(struct ieee_types_header);
- break;
- default:
- break;
- }
- beacon += element_len +
- sizeof(struct ieee_types_header);
- beacon_size -= element_len +
- sizeof(struct ieee_types_header);
- }
- chan = ieee80211_get_channel(priv->wdev->wiphy,
- scan_table[i].freq);
- cfg80211_inform_bss(priv->wdev->wiphy, chan,
- scan_table[i].mac_address,
- 0, scan_table[i].cap_info_bitmap,
- scan_table[i].beacon_period,
- ie_buf, ie_len,
- scan_table[i].rssi, GFP_KERNEL);
- }
-
- kfree(ie_buf);
- return 0;
-}
-
-/*
* This function connects with a BSS.
*
* This function handles both Infra and Ad-Hoc modes. It also performs
@@ -937,8 +820,7 @@ static int mwifiex_inform_bss_from_scan_result(struct mwifiex_private *priv,
* For Infra mode, the function returns failure if the specified SSID
* is not found in scan table. However, for Ad-Hoc mode, it can create
* the IBSS if it does not exist. On successful completion in either case,
- * the function notifies the CFG802.11 subsystem of the new BSS connection,
- * otherwise the kernel will panic.
+ * the function notifies the CFG802.11 subsystem of the new BSS connection.
*/
static int
mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
@@ -946,11 +828,11 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
struct cfg80211_connect_params *sme, bool privacy)
{
struct mwifiex_802_11_ssid req_ssid;
- struct mwifiex_ssid_bssid ssid_bssid;
int ret, auth_type = 0;
+ struct cfg80211_bss *bss = NULL;
+ u8 is_scanning_required = 0;
memset(&req_ssid, 0, sizeof(struct mwifiex_802_11_ssid));
- memset(&ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
req_ssid.ssid_len = ssid_len;
if (ssid_len > IEEE80211_MAX_SSID_LEN) {
@@ -1028,30 +910,48 @@ done:
return -EFAULT;
}
+ /*
+ * Scan entries are valid for some time (15 sec). So we can save one
+ * active scan time if we just try cfg80211_get_bss first. If it fails
+ * then request scan and cfg80211_get_bss() again for final output.
+ */
+ while (1) {
+ if (is_scanning_required) {
+ /* Do specific SSID scanning */
+ if (mwifiex_request_scan(priv, &req_ssid)) {
+ dev_err(priv->adapter->dev, "scan error\n");
+ return -EFAULT;
+ }
+ }
- memcpy(&ssid_bssid.ssid, &req_ssid, sizeof(struct mwifiex_802_11_ssid));
-
- if (mode != NL80211_IFTYPE_ADHOC) {
- if (mwifiex_find_best_bss(priv, &ssid_bssid))
- return -EFAULT;
- /* Inform the BSS information to kernel, otherwise
- * kernel will give a panic after successful assoc */
- if (mwifiex_inform_bss_from_scan_result(priv, &req_ssid))
- return -EFAULT;
+ /* Find the BSS we want using available scan results */
+ if (mode == NL80211_IFTYPE_ADHOC)
+ bss = cfg80211_get_bss(priv->wdev->wiphy, channel,
+ bssid, ssid, ssid_len,
+ WLAN_CAPABILITY_IBSS,
+ WLAN_CAPABILITY_IBSS);
+ else
+ bss = cfg80211_get_bss(priv->wdev->wiphy, channel,
+ bssid, ssid, ssid_len,
+ WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+
+ if (!bss) {
+ if (is_scanning_required) {
+ dev_warn(priv->adapter->dev, "assoc: requested "
+ "bss not found in scan results\n");
+ break;
+ }
+ is_scanning_required = 1;
+ } else {
+ dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
+ (char *) req_ssid.ssid, bss->bssid);
+ memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
+ break;
+ }
}
- dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
- (char *) req_ssid.ssid, ssid_bssid.bssid);
-
- memcpy(&priv->cfg_bssid, ssid_bssid.bssid, 6);
-
- /* Connect to BSS by ESSID */
- memset(&ssid_bssid.bssid, 0, ETH_ALEN);
-
- if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
-
- if (mwifiex_bss_start(priv, &ssid_bssid))
+ if (mwifiex_bss_start(priv, bss, &req_ssid))
return -EFAULT;
if (mode == NL80211_IFTYPE_ADHOC) {
@@ -1262,8 +1162,150 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
+/*
+ * create a new virtual interface with the given name
+ */
+struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_adapter *adapter;
+ struct net_device *dev;
+ void *mdev_priv;
+
+ if (!priv)
+ return NULL;
+
+ adapter = priv->adapter;
+ if (!adapter)
+ return NULL;
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ if (priv->bss_mode) {
+ wiphy_err(wiphy, "cannot create multiple"
+ " station/adhoc interfaces\n");
+ return NULL;
+ }
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED)
+ priv->bss_mode = NL80211_IFTYPE_STATION;
+ else
+ priv->bss_mode = type;
+
+ priv->bss_type = MWIFIEX_BSS_TYPE_STA;
+ priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+ priv->bss_priority = 0;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_index = 0;
+ priv->bss_num = 0;
+
+ break;
+ default:
+ wiphy_err(wiphy, "type not supported\n");
+ return NULL;
+ }
+
+ dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
+ ether_setup, 1);
+ if (!dev) {
+ wiphy_err(wiphy, "no memory available for netdevice\n");
+ goto error;
+ }
+
+ dev_net_set(dev, wiphy_net(wiphy));
+ dev->ieee80211_ptr = priv->wdev;
+ dev->ieee80211_ptr->iftype = priv->bss_mode;
+ memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
+ memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
+ SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
+
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
+ dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
+
+ mdev_priv = netdev_priv(dev);
+ *((unsigned long *) mdev_priv) = (unsigned long) priv;
+
+ priv->netdev = dev;
+ mwifiex_init_priv_params(priv, dev);
+
+ SET_NETDEV_DEV(dev, adapter->dev);
+
+ /* Register network device */
+ if (register_netdevice(dev)) {
+ wiphy_err(wiphy, "cannot register virtual network device\n");
+ goto error;
+ }
+
+ sema_init(&priv->async_sem, 1);
+ priv->scan_pending_on_block = false;
+
+ dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
+
+#ifdef CONFIG_DEBUG_FS
+ mwifiex_dev_debugfs_init(priv);
+#endif
+ return dev;
+error:
+ if (dev && (dev->reg_state == NETREG_UNREGISTERED))
+ free_netdev(dev);
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
+
+/*
+ * del_virtual_intf: remove the virtual interface determined by dev
+ */
+int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+
+ if (!priv || !dev)
+ return 0;
+
+#ifdef CONFIG_DEBUG_FS
+ mwifiex_dev_debugfs_remove(priv);
+#endif
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
+
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
+
+ if (dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(dev);
+
+ if (dev->reg_state == NETREG_UNREGISTERED)
+ free_netdev(dev);
+
+ /* Clear the priv in adapter */
+ priv->netdev = NULL;
+
+ priv->media_connected = false;
+
+ cancel_work_sync(&priv->cfg_workqueue);
+ flush_workqueue(priv->workqueue);
+ destroy_workqueue(priv->workqueue);
+
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
+
/* station cfg80211 operations */
static struct cfg80211_ops mwifiex_cfg80211_ops = {
+ .add_virtual_intf = mwifiex_add_virtual_intf,
+ .del_virtual_intf = mwifiex_del_virtual_intf,
.change_virtual_intf = mwifiex_cfg80211_change_virtual_intf,
.scan = mwifiex_cfg80211_scan,
.connect = mwifiex_cfg80211_connect,
@@ -1288,8 +1330,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
* default parameters and handler function pointers, and finally
* registers the device.
*/
-int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
- struct mwifiex_private *priv)
+int mwifiex_register_cfg80211(struct mwifiex_private *priv)
{
int ret;
void *wdev_priv;
@@ -1329,12 +1370,15 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
wdev->wiphy->cipher_suites = mwifiex_cipher_suites;
wdev->wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
- memcpy(wdev->wiphy->perm_addr, mac, 6);
+ memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
/* We are using custom domains */
wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ /* Reserve space for bss band information */
+ wdev->wiphy->bss_priv_size = sizeof(u8);
+
wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
/* Set struct mwifiex_private pointer in wiphy_priv */
@@ -1356,17 +1400,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
"info: successfully registered wiphy device\n");
}
- dev_net_set(dev, wiphy_net(wdev->wiphy));
- dev->ieee80211_ptr = wdev;
- memcpy(dev->dev_addr, wdev->wiphy->perm_addr, 6);
- memcpy(dev->perm_addr, wdev->wiphy->perm_addr, 6);
- SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
priv->wdev = wdev;
- dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
- dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
-
return ret;
}
@@ -1416,13 +1451,8 @@ mwifiex_cfg80211_results(struct work_struct *work)
MWIFIEX_SCAN_TYPE_ACTIVE;
scan_req->chan_list[i].scan_time = 0;
}
- if (mwifiex_set_user_scan_ioctl(priv, scan_req)) {
+ if (mwifiex_set_user_scan_ioctl(priv, scan_req))
ret = -EFAULT;
- goto done;
- }
- if (mwifiex_inform_bss_from_scan_result(priv, NULL))
- ret = -EFAULT;
-done:
priv->scan_result_status = ret;
dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
__func__);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index c4db8f36aa1..8d010f2500c 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -24,8 +24,7 @@
#include "main.h"
-int mwifiex_register_cfg80211(struct net_device *, u8 *,
- struct mwifiex_private *);
+int mwifiex_register_cfg80211(struct mwifiex_private *);
void mwifiex_cfg80211_results(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index d0cada5a29a..f2e6de03805 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -48,7 +48,7 @@ static u8 adhoc_rates_bg[BG_SUPPORTED_RATES] = { 0x82, 0x84, 0x8b, 0x96,
static u8 adhoc_rates_a[A_SUPPORTED_RATES] = { 0x8c, 0x12, 0x98, 0x24,
0xb0, 0x48, 0x60, 0x6c, 0 };
-u8 supported_rates_a[A_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
+static u8 supported_rates_a[A_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
0xb0, 0x48, 0x60, 0x6c, 0 };
static u16 mwifiex_data_rates[MWIFIEX_SUPPORTED_RATES_EXT] = { 0x02, 0x04,
0x0B, 0x16, 0x00, 0x0C, 0x12, 0x18,
@@ -57,19 +57,19 @@ static u16 mwifiex_data_rates[MWIFIEX_SUPPORTED_RATES_EXT] = { 0x02, 0x04,
0x75, 0x82, 0x0C, 0x1B, 0x36, 0x51,
0x6C, 0xA2, 0xD8, 0xF3, 0x10E, 0x00 };
-u8 supported_rates_b[B_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x16, 0 };
+static u8 supported_rates_b[B_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x16, 0 };
-u8 supported_rates_g[G_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
+static u8 supported_rates_g[G_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
0x30, 0x48, 0x60, 0x6c, 0 };
-u8 supported_rates_bg[BG_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x0c,
+static u8 supported_rates_bg[BG_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x0c,
0x12, 0x16, 0x18, 0x24, 0x30, 0x48,
0x60, 0x6c, 0 };
u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
0x32, 0x40, 0x41, 0xff };
-u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
+static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
/*
* This function maps an index in supported rates table into
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index b5352afb871..ac278156d39 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -40,8 +40,12 @@ mwifiex_init_cmd_node(struct mwifiex_private *priv,
{
cmd_node->priv = priv;
cmd_node->cmd_oid = cmd_oid;
- cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
- priv->adapter->cmd_wait_q_required = false;
+ if (priv->adapter->cmd_wait_q_required) {
+ cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
+ priv->adapter->cmd_wait_q_required = false;
+ cmd_node->cmd_wait_q_woken = false;
+ cmd_node->condition = &cmd_node->cmd_wait_q_woken;
+ }
cmd_node->data_buf = data_buf;
cmd_node->cmd_skb = cmd_node->skb;
}
@@ -90,8 +94,11 @@ mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
cmd_node->data_buf = NULL;
cmd_node->wait_q_enabled = false;
+ if (cmd_node->cmd_skb)
+ skb_trim(cmd_node->cmd_skb, 0);
+
if (cmd_node->resp_skb) {
- dev_kfree_skb_any(cmd_node->resp_skb);
+ adapter->if_ops.cmdrsp_complete(adapter, cmd_node->resp_skb);
cmd_node->resp_skb = NULL;
}
}
@@ -173,8 +180,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
- cmd_node->cmd_skb->data,
- cmd_node->cmd_skb->len, NULL);
+ cmd_node->cmd_skb, NULL);
skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
@@ -235,8 +241,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
- adapter->sleep_cfm->data,
- adapter->sleep_cfm->len, NULL);
+ adapter->sleep_cfm, NULL);
skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
if (ret == -1) {
@@ -399,8 +404,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
adapter->event_cause = 0;
adapter->event_skb = NULL;
-
- dev_kfree_skb_any(skb);
+ adapter->if_ops.event_complete(adapter, skb);
return ret;
}
@@ -418,7 +422,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
struct mwifiex_adapter *adapter = priv->adapter;
adapter->cmd_wait_q_required = true;
- adapter->cmd_wait_q.condition = false;
ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
data_buf);
@@ -511,10 +514,12 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Send command */
- if (cmd_no == HostCmd_CMD_802_11_SCAN)
+ if (cmd_no == HostCmd_CMD_802_11_SCAN) {
mwifiex_queue_scan_cmd(priv, cmd_node);
- else
+ } else {
+ adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ }
return ret;
}
@@ -535,7 +540,7 @@ mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
return;
if (cmd_node->wait_q_enabled)
- mwifiex_complete_cmd(adapter);
+ mwifiex_complete_cmd(adapter, cmd_node);
/* Clean the node */
mwifiex_clean_cmd_node(adapter, cmd_node);
@@ -882,7 +887,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
adapter->curr_cmd->wait_q_enabled = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/* Cancel all pending command */
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
@@ -893,7 +898,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter);
+ mwifiex_complete_cmd(adapter, cmd_node);
cmd_node->wait_q_enabled = false;
}
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
@@ -976,7 +981,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 94ddc9038cb..ae17ce02a3d 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -98,7 +98,6 @@ struct mwifiex_802_11_ssid {
struct mwifiex_wait_queue {
wait_queue_head_t wait;
- u16 condition;
int status;
};
@@ -114,14 +113,6 @@ struct mwifiex_txinfo {
u8 bss_index;
};
-struct mwifiex_bss_attr {
- u8 bss_type;
- u8 frame_type;
- u8 active;
- u8 bss_priority;
- u8 bss_num;
-};
-
enum mwifiex_wmm_ac_e {
WMM_AC_BK,
WMM_AC_BE,
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 4fee0993b18..0cc5d73cb0c 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -57,12 +57,6 @@ struct tx_packet_hdr {
#define GET_FW_DEFAULT_BANDS(adapter) \
((adapter->fw_cap_info >> 8) & ALL_802_11_BANDS)
-extern u8 supported_rates_b[B_SUPPORTED_RATES];
-extern u8 supported_rates_g[G_SUPPORTED_RATES];
-extern u8 supported_rates_bg[BG_SUPPORTED_RATES];
-extern u8 supported_rates_a[A_SUPPORTED_RATES];
-extern u8 supported_rates_n[N_SUPPORTED_RATES];
-
#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
#define KEY_INFO_ENABLED 0x01
@@ -84,7 +78,8 @@ enum KEY_TYPE_ID {
#define MAX_FIRMWARE_POLL_TRIES 100
-#define FIRMWARE_READY 0xfedc
+#define FIRMWARE_READY_SDIO 0xfedc
+#define FIRMWARE_READY_PCIE 0xfedcba00
enum MWIFIEX_802_11_PRIVACY_FILTER {
MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL,
@@ -221,7 +216,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
-
+#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
enum ENH_PS_MODES {
EN_PS = 1,
@@ -821,6 +816,14 @@ struct host_cmd_ds_txpwr_cfg {
__le32 mode;
} __packed;
+struct mwifiex_bcn_param {
+ u8 bssid[ETH_ALEN];
+ u8 rssi;
+ __le32 timestamp[2];
+ __le16 beacon_period;
+ __le16 cap_info_bitmap;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
@@ -862,13 +865,6 @@ struct mwifiex_user_scan_ssid {
struct mwifiex_user_scan_cfg {
/*
- * Flag set to keep the previous scan table intact
- *
- * If set, the scan results will accumulate, replacing any previous
- * matched entries for a BSS with the new scan data
- */
- u8 keep_previous_scan;
- /*
* BSS mode to be sent in the firmware command
*/
u8 bss_mode;
@@ -1136,6 +1132,30 @@ struct host_cmd_ds_set_bss_mode {
u8 con_type;
} __packed;
+struct host_cmd_ds_pcie_details {
+ /* TX buffer descriptor ring address */
+ u32 txbd_addr_lo;
+ u32 txbd_addr_hi;
+ /* TX buffer descriptor ring count */
+ u32 txbd_count;
+
+ /* RX buffer descriptor ring address */
+ u32 rxbd_addr_lo;
+ u32 rxbd_addr_hi;
+ /* RX buffer descriptor ring count */
+ u32 rxbd_count;
+
+ /* Event buffer descriptor ring address */
+ u32 evtbd_addr_lo;
+ u32 evtbd_addr_hi;
+ /* Event buffer descriptor ring count */
+ u32 evtbd_count;
+
+ /* Sleep cookie buffer physical address */
+ u32 sleep_cookie_addr_lo;
+ u32 sleep_cookie_addr_hi;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -1183,6 +1203,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_rf_reg_access rf_reg;
struct host_cmd_ds_pmic_reg_access pmic_reg;
struct host_cmd_ds_set_bss_mode bss_mode;
+ struct host_cmd_ds_pcie_details pcie_host_spec;
struct host_cmd_ds_802_11_eeprom_access eeprom;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 3f1559e6132..d792b3fb7c1 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -76,7 +76,7 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
memset(priv->curr_addr, 0xff, ETH_ALEN);
priv->pkt_tx_ctrl = 0;
- priv->bss_mode = NL80211_IFTYPE_STATION;
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->data_rate = 0; /* Initially indicate the rate as auto */
priv->is_data_rate_auto = true;
priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR;
@@ -152,19 +152,6 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
{
int ret;
- u32 buf_size;
- struct mwifiex_bssdescriptor *temp_scan_table;
-
- /* Allocate buffer to store the BSSID list */
- buf_size = sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP;
- temp_scan_table = kzalloc(buf_size, GFP_KERNEL);
- if (!temp_scan_table) {
- dev_err(adapter->dev, "%s: failed to alloc temp_scan_table\n",
- __func__);
- return -ENOMEM;
- }
-
- adapter->scan_table = temp_scan_table;
/* Allocate command buffer */
ret = mwifiex_alloc_cmd_buffer(adapter);
@@ -204,7 +191,12 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
(adapter->sleep_cfm->data);
adapter->cmd_sent = false;
- adapter->data_sent = true;
+
+ if (adapter->iface_type == MWIFIEX_PCIE)
+ adapter->data_sent = false;
+ else
+ adapter->data_sent = true;
+
adapter->cmd_resp_received = false;
adapter->event_received = false;
adapter->data_received = false;
@@ -222,14 +214,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->active_scan_time = MWIFIEX_ACTIVE_SCAN_CHAN_TIME;
adapter->passive_scan_time = MWIFIEX_PASSIVE_SCAN_CHAN_TIME;
- adapter->num_in_scan_table = 0;
- memset(adapter->scan_table, 0,
- (sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP));
adapter->scan_probes = 1;
- memset(adapter->bcn_buf, 0, sizeof(adapter->bcn_buf));
- adapter->bcn_buf_end = adapter->bcn_buf;
-
adapter->multiple_dtim = 1;
adapter->local_listen_interval = 0; /* default value in firmware
@@ -297,6 +283,34 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function releases the lock variables and frees the locks and
+ * associated locks.
+ */
+static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_private *priv;
+ s32 i, j;
+
+ /* Free lists */
+ list_del(&adapter->cmd_free_q);
+ list_del(&adapter->cmd_pending_q);
+ list_del(&adapter->scan_pending_q);
+
+ for (i = 0; i < adapter->priv_num; i++)
+ list_del(&adapter->bss_prio_tbl[i].bss_prio_head);
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i]) {
+ priv = adapter->priv[i];
+ for (j = 0; j < MAX_NUM_TID; ++j)
+ list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
+ list_del(&priv->tx_ba_stream_tbl_ptr);
+ list_del(&priv->rx_reorder_tbl_ptr);
+ }
+ }
+}
+
+/*
* This function frees the adapter structure.
*
* The freeing operation is done recursively, by canceling all
@@ -326,8 +340,6 @@ mwifiex_free_adapter(struct mwifiex_adapter *adapter)
del_timer(&adapter->cmd_timer);
dev_dbg(adapter->dev, "info: free scan table\n");
- kfree(adapter->scan_table);
- adapter->scan_table = NULL;
adapter->if_ops.cleanup_if(adapter);
@@ -392,34 +404,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
}
/*
- * This function releases the lock variables and frees the locks and
- * associated locks.
- */
-void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
-{
- struct mwifiex_private *priv;
- s32 i, j;
-
- /* Free lists */
- list_del(&adapter->cmd_free_q);
- list_del(&adapter->cmd_pending_q);
- list_del(&adapter->scan_pending_q);
-
- for (i = 0; i < adapter->priv_num; i++)
- list_del(&adapter->bss_prio_tbl[i].bss_prio_head);
-
- for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- for (j = 0; j < MAX_NUM_TID; ++j)
- list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
- list_del(&priv->tx_ba_stream_tbl_ptr);
- list_del(&priv->rx_reorder_tbl_ptr);
- }
- }
-}
-
-/*
* This function initializes the firmware.
*
* The following operations are performed sequentially -
@@ -602,11 +586,13 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
struct mwifiex_fw_image *pmfw)
{
- int ret, winner;
+ int ret;
u32 poll_num = 1;
+ adapter->winner = 0;
+
/* Check if firmware is already running */
- ret = adapter->if_ops.check_fw_status(adapter, poll_num, &winner);
+ ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (!ret) {
dev_notice(adapter->dev,
"WLAN FW already running! Skip FW download\n");
@@ -615,7 +601,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
poll_num = MAX_FIRMWARE_POLL_TRIES;
/* Check if we are the winner for downloading FW */
- if (!winner) {
+ if (!adapter->winner) {
dev_notice(adapter->dev,
"Other interface already running!"
" Skip FW download\n");
@@ -633,7 +619,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
poll_fw:
/* Check if the firmware is downloaded successfully or not */
- ret = adapter->if_ops.check_fw_status(adapter, poll_num, NULL);
+ ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (ret) {
dev_err(adapter->dev, "FW failed to be active in time\n");
return -1;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index f6bcc868562..e0b68e7c8ca 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -134,7 +134,6 @@ struct mwifiex_ver_ext {
struct mwifiex_bss_info {
u32 bss_mode;
struct mwifiex_802_11_ssid ssid;
- u32 scan_table_idx;
u32 bss_chan;
u32 region_code;
u32 media_connected;
@@ -307,10 +306,12 @@ struct mwifiex_ds_read_eeprom {
u8 value[MAX_EEPROM_DATA];
};
+#define IEEE_MAX_IE_SIZE 256
+
struct mwifiex_ds_misc_gen_ie {
u32 type;
u32 len;
- u8 ie_data[IW_CUSTOM_MAX];
+ u8 ie_data[IEEE_MAX_IE_SIZE];
};
struct mwifiex_ds_misc_cmd {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 644e2e405cb..62b4c293860 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -147,13 +147,12 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
u8 *ptr = rate1, *tmp;
u32 i, j;
- tmp = kmalloc(rate1_size, GFP_KERNEL);
+ tmp = kmemdup(rate1, rate1_size, GFP_KERNEL);
if (!tmp) {
dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
return -ENOMEM;
}
- memcpy(tmp, rate1, rate1_size);
memset(rate1, 0, rate1_size);
for (i = 0; rate2[i] && i < rate2_size; i++) {
@@ -224,32 +223,6 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
}
/*
- * This function updates the scan entry TSF timestamps to reflect
- * a new association.
- */
-static void
-mwifiex_update_tsf_timestamps(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *new_bss_desc)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u32 table_idx;
- long long new_tsf_base;
- signed long long tsf_delta;
-
- memcpy(&new_tsf_base, new_bss_desc->time_stamp, sizeof(new_tsf_base));
-
- tsf_delta = new_tsf_base - new_bss_desc->network_tsf;
-
- dev_dbg(adapter->dev, "info: TSF: update TSF timestamps, "
- "0x%016llx -> 0x%016llx\n",
- new_bss_desc->network_tsf, new_tsf_base);
-
- for (table_idx = 0; table_idx < adapter->num_in_scan_table;
- table_idx++)
- adapter->scan_table[table_idx].network_tsf += tsf_delta;
-}
-
-/*
* This function appends a WAPI IE.
*
* This function is called from the network join command preparation routine.
@@ -639,12 +612,6 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->curr_bss_params.band = (u8) bss_desc->bss_band;
- /*
- * Adjust the timestamps in the scan table to be relative to the newly
- * associated AP's TSF
- */
- mwifiex_update_tsf_timestamps(priv, bss_desc);
-
if (bss_desc->wmm_ie.vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC)
priv->curr_bss_params.wmm_enabled = true;
else
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index e5fc53dc688..67e6db7d672 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -26,21 +26,6 @@
const char driver_version[] = "mwifiex " VERSION " (%s) ";
-static struct mwifiex_bss_attr mwifiex_bss_sta[] = {
- {MWIFIEX_BSS_TYPE_STA, MWIFIEX_DATA_FRAME_TYPE_ETH_II, true, 0, 0},
-};
-
-static int drv_mode = DRV_MODE_STA;
-
-/* Supported drv_mode table */
-static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
- {
- .drv_mode = DRV_MODE_STA,
- .intf_num = ARRAY_SIZE(mwifiex_bss_sta),
- .bss_attr = mwifiex_bss_sta,
- },
-};
-
/*
* This function registers the device and performs all the necessary
* initializations.
@@ -57,7 +42,6 @@ static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
* proper cleanup before exiting.
*/
static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
- struct mwifiex_drv_mode *drv_mode_ptr,
void **padapter)
{
struct mwifiex_adapter *adapter;
@@ -78,44 +62,20 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
goto error;
adapter->priv_num = 0;
- for (i = 0; i < drv_mode_ptr->intf_num; i++) {
- adapter->priv[i] = NULL;
-
- if (!drv_mode_ptr->bss_attr[i].active)
- continue;
- /* Allocate memory for private structure */
- adapter->priv[i] = kzalloc(sizeof(struct mwifiex_private),
- GFP_KERNEL);
- if (!adapter->priv[i]) {
- dev_err(adapter->dev, "%s: failed to alloc priv[%d]\n",
- __func__, i);
- goto error;
- }
-
- adapter->priv_num++;
- adapter->priv[i]->adapter = adapter;
- /* Save bss_type, frame_type & bss_priority */
- adapter->priv[i]->bss_type = drv_mode_ptr->bss_attr[i].bss_type;
- adapter->priv[i]->frame_type =
- drv_mode_ptr->bss_attr[i].frame_type;
- adapter->priv[i]->bss_priority =
- drv_mode_ptr->bss_attr[i].bss_priority;
-
- if (drv_mode_ptr->bss_attr[i].bss_type == MWIFIEX_BSS_TYPE_STA)
- adapter->priv[i]->bss_role = MWIFIEX_BSS_ROLE_STA;
- else if (drv_mode_ptr->bss_attr[i].bss_type ==
- MWIFIEX_BSS_TYPE_UAP)
- adapter->priv[i]->bss_role = MWIFIEX_BSS_ROLE_UAP;
-
- /* Save bss_index & bss_num */
- adapter->priv[i]->bss_index = i;
- adapter->priv[i]->bss_num = drv_mode_ptr->bss_attr[i].bss_num;
+ /* Allocate memory for private structure */
+ adapter->priv[0] = kzalloc(sizeof(struct mwifiex_private),
+ GFP_KERNEL);
+ if (!adapter->priv[0]) {
+ dev_err(adapter->dev, "%s: failed to alloc priv[0]\n",
+ __func__);
+ goto error;
}
- adapter->drv_mode = drv_mode_ptr;
- if (mwifiex_init_lock_list(adapter))
- goto error;
+ adapter->priv_num++;
+
+ adapter->priv[0]->adapter = adapter;
+ mwifiex_init_lock_list(adapter);
init_timer(&adapter->cmd_timer);
adapter->cmd_timer.function = mwifiex_cmd_timeout_func;
@@ -126,9 +86,9 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
error:
dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
- mwifiex_free_lock_list(adapter);
- for (i = 0; i < drv_mode_ptr->intf_num; i++)
+ for (i = 0; i < adapter->priv_num; i++)
kfree(adapter->priv[i]);
+
kfree(adapter);
return -1;
@@ -316,38 +276,6 @@ exit_main_proc:
}
/*
- * This function initializes the software.
- *
- * The main work includes allocating and initializing the adapter structure
- * and initializing the private structures.
- */
-static int
-mwifiex_init_sw(void *card, struct mwifiex_if_ops *if_ops, void **padapter)
-{
- int i;
- struct mwifiex_drv_mode *drv_mode_ptr;
-
- /* find mwifiex_drv_mode entry from mwifiex_drv_mode_tbl */
- drv_mode_ptr = NULL;
- for (i = 0; i < ARRAY_SIZE(mwifiex_drv_mode_tbl); i++) {
- if (mwifiex_drv_mode_tbl[i].drv_mode == drv_mode) {
- drv_mode_ptr = &mwifiex_drv_mode_tbl[i];
- break;
- }
- }
-
- if (!drv_mode_ptr) {
- pr_err("invalid drv_mode=%d\n", drv_mode);
- return -1;
- }
-
- if (mwifiex_register(card, if_ops, drv_mode_ptr, padapter))
- return -1;
-
- return 0;
-}
-
-/*
* This function frees the adapter structure.
*
* Additionally, this closes the netlink socket, frees the timers
@@ -627,7 +555,7 @@ static const struct net_device_ops mwifiex_netdev_ops = {
.ndo_set_mac_address = mwifiex_set_mac_address,
.ndo_tx_timeout = mwifiex_tx_timeout,
.ndo_get_stats = mwifiex_get_stats,
- .ndo_set_multicast_list = mwifiex_set_multicast_list,
+ .ndo_set_rx_mode = mwifiex_set_multicast_list,
};
/*
@@ -649,8 +577,8 @@ static const struct net_device_ops mwifiex_netdev_ops = {
*
* In addition, the CFG80211 work queue is also created.
*/
-static void
-mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev)
+void mwifiex_init_priv_params(struct mwifiex_private *priv,
+ struct net_device *dev)
{
dev->netdev_ops = &mwifiex_netdev_ops;
/* Initialize private structure */
@@ -664,118 +592,6 @@ mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev)
}
/*
- * This function adds a new logical interface.
- *
- * It allocates, initializes and registers the interface by performing
- * the following opearations -
- * - Allocate a new net device structure
- * - Assign device name
- * - Register the new device with CFG80211 subsystem
- * - Initialize semaphore and private structure
- * - Register the new device with kernel
- * - Create the complete debug FS structure if configured
- */
-static struct mwifiex_private *mwifiex_add_interface(
- struct mwifiex_adapter *adapter,
- u8 bss_index, u8 bss_type)
-{
- struct net_device *dev;
- struct mwifiex_private *priv;
- void *mdev_priv;
-
- dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), "mlan%d",
- ether_setup, 1);
- if (!dev) {
- dev_err(adapter->dev, "no memory available for netdevice\n");
- goto error;
- }
-
- if (mwifiex_register_cfg80211(dev, adapter->priv[bss_index]->curr_addr,
- adapter->priv[bss_index]) != 0) {
- dev_err(adapter->dev, "cannot register netdevice with cfg80211\n");
- goto error;
- }
- /* Save the priv pointer in netdev */
- priv = adapter->priv[bss_index];
- mdev_priv = netdev_priv(dev);
- *((unsigned long *) mdev_priv) = (unsigned long) priv;
-
- priv->netdev = dev;
-
- sema_init(&priv->async_sem, 1);
- priv->scan_pending_on_block = false;
-
- mwifiex_init_priv_params(priv, dev);
-
- SET_NETDEV_DEV(dev, adapter->dev);
-
- /* Register network device */
- if (register_netdev(dev)) {
- dev_err(adapter->dev, "cannot register virtual network device\n");
- goto error;
- }
-
- dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
-#ifdef CONFIG_DEBUG_FS
- mwifiex_dev_debugfs_init(priv);
-#endif
- return priv;
-error:
- if (dev)
- free_netdev(dev);
- return NULL;
-}
-
-/*
- * This function removes a logical interface.
- *
- * It deregisters, resets and frees the interface by performing
- * the following operations -
- * - Disconnect the device if connected, send wireless event to
- * notify applications.
- * - Remove the debug FS structure if configured
- * - Unregister the device from kernel
- * - Free the net device structure
- * - Cancel all works and destroy work queue
- * - Unregister and free the wireless device from CFG80211 subsystem
- */
-static void
-mwifiex_remove_interface(struct mwifiex_adapter *adapter, u8 bss_index)
-{
- struct net_device *dev;
- struct mwifiex_private *priv = adapter->priv[bss_index];
-
- if (!priv)
- return;
- dev = priv->netdev;
-
- if (priv->media_connected)
- priv->media_connected = false;
-
-#ifdef CONFIG_DEBUG_FS
- mwifiex_dev_debugfs_remove(priv);
-#endif
- /* Last reference is our one */
- dev_dbg(adapter->dev, "info: %s: refcnt = %d\n",
- dev->name, netdev_refcnt_read(dev));
-
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdev(dev);
-
- /* Clear the priv in adapter */
- priv->netdev = NULL;
- if (dev)
- free_netdev(dev);
-
- cancel_work_sync(&priv->cfg_workqueue);
- flush_workqueue(priv->workqueue);
- destroy_workqueue(priv->workqueue);
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
- kfree(priv->wdev);
-}
-
-/*
* This function check if command is pending.
*/
int is_command_pending(struct mwifiex_adapter *adapter)
@@ -845,19 +661,22 @@ mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
*/
int
mwifiex_add_card(void *card, struct semaphore *sem,
- struct mwifiex_if_ops *if_ops)
+ struct mwifiex_if_ops *if_ops, u8 iface_type)
{
- int i;
struct mwifiex_adapter *adapter;
+ char fmt[64];
+ struct mwifiex_private *priv;
if (down_interruptible(sem))
goto exit_sem_err;
- if (mwifiex_init_sw(card, if_ops, (void **)&adapter)) {
+ if (mwifiex_register(card, if_ops, (void **)&adapter)) {
pr_err("%s: software init failed\n", __func__);
goto err_init_sw;
}
+ adapter->iface_type = iface_type;
+
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
init_waitqueue_head(&adapter->init_wait_q);
@@ -866,8 +685,8 @@ mwifiex_add_card(void *card, struct semaphore *sem,
init_waitqueue_head(&adapter->hs_activate_wait_q);
adapter->cmd_wait_q_required = false;
init_waitqueue_head(&adapter->cmd_wait_q.wait);
- adapter->cmd_wait_q.condition = false;
adapter->cmd_wait_q.status = 0;
+ adapter->scan_wait_q_woken = false;
adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
if (!adapter->workqueue)
@@ -887,21 +706,37 @@ mwifiex_add_card(void *card, struct semaphore *sem,
goto err_init_fw;
}
- /* Add interfaces */
- for (i = 0; i < adapter->drv_mode->intf_num; i++) {
- if (!mwifiex_add_interface(adapter, i,
- adapter->drv_mode->bss_attr[i].bss_type)) {
- goto err_add_intf;
- }
+ priv = adapter->priv[0];
+
+ if (mwifiex_register_cfg80211(priv) != 0) {
+ dev_err(adapter->dev, "cannot register netdevice"
+ " with cfg80211\n");
+ goto err_init_fw;
}
+ rtnl_lock();
+ /* Create station interface by default */
+ if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
+ NL80211_IFTYPE_STATION, NULL, NULL)) {
+ rtnl_unlock();
+ dev_err(adapter->dev, "cannot create default station"
+ " interface\n");
+ goto err_add_intf;
+ }
+
+ rtnl_unlock();
+
up(sem);
+ mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
+ dev_notice(adapter->dev, "driver_version = %s\n", fmt);
+
return 0;
err_add_intf:
- for (i = 0; i < adapter->priv_num; i++)
- mwifiex_remove_interface(adapter, i);
+ rtnl_lock();
+ mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+ rtnl_unlock();
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
adapter->if_ops.unregister_dev(adapter);
@@ -956,7 +791,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
/* Stop data */
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv) {
+ if (priv && priv->netdev) {
if (!netif_queue_stopped(priv->netdev))
netif_stop_queue(priv->netdev);
if (netif_carrier_ok(priv->netdev))
@@ -981,9 +816,24 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
atomic_read(&adapter->cmd_pending));
}
- /* Remove interface */
- for (i = 0; i < adapter->priv_num; i++)
- mwifiex_remove_interface(adapter, i);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+
+ if (!priv)
+ continue;
+
+ rtnl_lock();
+ mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+ rtnl_unlock();
+ }
+
+ priv = adapter->priv[0];
+ if (!priv)
+ goto exit_remove;
+
+ wiphy_unregister(priv->wdev->wiphy);
+ wiphy_free(priv->wdev->wiphy);
+ kfree(priv->wdev);
mwifiex_terminate_workqueue(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 2215c3c9735..30f138b6fa4 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -37,6 +37,7 @@
#include "ioctl.h"
#include "util.h"
#include "fw.h"
+#include "pcie.h"
extern const char driver_version[];
@@ -45,14 +46,7 @@ enum {
MWIFIEX_SYNC_CMD
};
-#define DRV_MODE_STA 0x1
-
-struct mwifiex_drv_mode {
- u16 drv_mode;
- u16 intf_num;
- struct mwifiex_bss_attr *bss_attr;
-};
-
+#define MWIFIEX_MAX_AP 64
#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
@@ -114,6 +108,8 @@ struct mwifiex_drv_mode {
#define MAX_FREQUENCY_BAND_BG 2484
+#define MWIFIEX_EVENT_HEADER_LEN 4
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -163,6 +159,11 @@ enum MWIFIEX_PS_STATE {
PS_STATE_SLEEP
};
+enum mwifiex_iface_type {
+ MWIFIEX_SDIO,
+ MWIFIEX_PCIE,
+};
+
struct mwifiex_add_ba_param {
u32 tx_win_size;
u32 rx_win_size;
@@ -180,7 +181,6 @@ struct mwifiex_ra_list_tbl {
struct sk_buff_head skb_head;
u8 ra[ETH_ALEN];
u32 total_pkts_size;
- u32 total_pkts;
u32 is_11n_enabled;
};
@@ -227,27 +227,10 @@ struct ieee_types_header {
u8 len;
} __packed;
-struct ieee_obss_scan_param {
- u16 obss_scan_passive_dwell;
- u16 obss_scan_active_dwell;
- u16 bss_chan_width_trigger_scan_int;
- u16 obss_scan_passive_total;
- u16 obss_scan_active_total;
- u16 bss_width_chan_trans_delay;
- u16 obss_scan_active_threshold;
-} __packed;
-
-struct ieee_types_obss_scan_param {
- struct ieee_types_header ieee_hdr;
- struct ieee_obss_scan_param obss_scan;
-} __packed;
-
#define MWIFIEX_SUPPORTED_RATES 14
#define MWIFIEX_SUPPORTED_RATES_EXT 32
-#define IEEE_MAX_IE_SIZE 256
-
struct ieee_types_vendor_specific {
struct ieee_types_vendor_header vend_hdr;
u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
@@ -291,8 +274,6 @@ struct mwifiex_bssdescriptor {
u16 bss_co_2040_offset;
u8 *bcn_ext_cap;
u16 ext_cap_offset;
- struct ieee_types_obss_scan_param *bcn_obss_scan;
- u16 overlap_bss_offset;
struct ieee_types_vendor_specific *bcn_wpa_ie;
u16 wpa_offset;
struct ieee_types_generic *bcn_rsn_ie;
@@ -301,8 +282,6 @@ struct mwifiex_bssdescriptor {
u16 wapi_offset;
u8 *beacon_buf;
u32 beacon_buf_size;
- u32 beacon_buf_size_max;
-
};
struct mwifiex_current_bss_params {
@@ -468,7 +447,7 @@ struct mwifiex_private {
struct dentry *dfs_dev_dir;
#endif
u8 nick_name[16];
- struct iw_statistics w_stats;
+ u8 qual_level, qual_noise;
u16 current_key_index;
struct semaphore async_sem;
u8 scan_pending_on_block;
@@ -541,33 +520,38 @@ struct cmd_ctrl_node {
void *data_buf;
u32 wait_q_enabled;
struct sk_buff *skb;
+ u8 *condition;
+ u8 cmd_wait_q_woken;
};
struct mwifiex_if_ops {
int (*init_if) (struct mwifiex_adapter *);
void (*cleanup_if) (struct mwifiex_adapter *);
- int (*check_fw_status) (struct mwifiex_adapter *, u32, int *);
+ int (*check_fw_status) (struct mwifiex_adapter *, u32);
int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
int (*register_dev) (struct mwifiex_adapter *);
void (*unregister_dev) (struct mwifiex_adapter *);
int (*enable_int) (struct mwifiex_adapter *);
int (*process_int_status) (struct mwifiex_adapter *);
- int (*host_to_card) (struct mwifiex_adapter *, u8,
- u8 *payload, u32 pkt_len,
+ int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *,
struct mwifiex_tx_param *);
int (*wakeup) (struct mwifiex_adapter *);
int (*wakeup_complete) (struct mwifiex_adapter *);
+ /* Interface specific functions */
void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
+ int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
+ int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
};
struct mwifiex_adapter {
+ u8 iface_type;
struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
u8 priv_num;
- struct mwifiex_drv_mode *drv_mode;
const struct firmware *firmware;
char fw_name[32];
+ int winner;
struct device *dev;
bool surprise_removed;
u32 fw_release_number;
@@ -624,15 +608,11 @@ struct mwifiex_adapter {
u32 scan_processing;
u16 region_code;
struct mwifiex_802_11d_domain_reg domain_reg;
- struct mwifiex_bssdescriptor *scan_table;
- u32 num_in_scan_table;
u16 scan_probes;
u32 scan_mode;
u16 specific_scan_time;
u16 active_scan_time;
u16 passive_scan_time;
- u8 bcn_buf[MAX_SCAN_BEACON_BUFFER];
- u8 *bcn_buf_end;
u8 fw_bands;
u8 adhoc_start_band;
u8 config_bands;
@@ -673,10 +653,11 @@ struct mwifiex_adapter {
u32 arp_filter_size;
u16 cmd_wait_q_required;
struct mwifiex_wait_queue cmd_wait_q;
+ u8 scan_wait_q_woken;
+ struct cmd_ctrl_node *cmd_queued;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
-void mwifiex_free_lock_list(struct mwifiex_adapter *adapter);
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
@@ -692,7 +673,8 @@ int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
int mwifiex_process_event(struct mwifiex_adapter *adapter);
-int mwifiex_complete_cmd(struct mwifiex_adapter *adapter);
+int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
+ struct cmd_ctrl_node *cmd_node);
int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid, void *data_buf);
@@ -726,8 +708,6 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags);
int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb, int status);
-int mwifiex_recv_packet_complete(struct mwifiex_adapter *,
- struct sk_buff *skb, int status);
void mwifiex_clean_txrx(struct mwifiex_private *priv);
u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv);
void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter);
@@ -757,21 +737,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
int mwifiex_process_sta_event(struct mwifiex_private *);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
-int mwifiex_scan_networks(struct mwifiex_private *priv,
- const struct mwifiex_user_scan_cfg *user_scan_in);
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg);
void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node);
int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-s32 mwifiex_find_ssid_in_list(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *ssid, u8 *bssid,
- u32 mode);
-s32 mwifiex_find_bssid_in_list(struct mwifiex_private *priv, u8 *bssid,
- u32 mode);
-int mwifiex_find_best_network(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *req_ssid_bssid);
s32 mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
struct mwifiex_802_11_ssid *ssid2);
int mwifiex_associate(struct mwifiex_private *priv,
@@ -782,7 +753,6 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
void mwifiex_reset_connect_state(struct mwifiex_private *priv);
-void mwifiex_2040_coex_event(struct mwifiex_private *priv);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
int mwifiex_adhoc_start(struct mwifiex_private *priv,
@@ -823,6 +793,8 @@ int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv,
int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int is_command_pending(struct mwifiex_adapter *adapter);
+void mwifiex_init_priv_params(struct mwifiex_private *priv,
+ struct net_device *dev);
/*
* This function checks if the queuing is RA based or not.
@@ -912,7 +884,7 @@ struct mwifiex_private *mwifiex_bss_index_to_priv(struct mwifiex_adapter
*adapter, u8 bss_index);
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
-int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *);
+int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
int mwifiex_remove_card(struct mwifiex_adapter *, struct semaphore *);
void mwifiex_get_version(struct mwifiex_adapter *adapter, char *version,
@@ -922,11 +894,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
struct net_device *dev);
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
-int mwifiex_bss_start(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *ssid_bssid);
-int mwifiex_set_hs_params(struct mwifiex_private *priv,
- u16 action, int cmd_type,
- struct mwifiex_ds_hs_cfg *hscfg);
+int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+ struct mwifiex_802_11_ssid *req_ssid);
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
@@ -934,8 +903,6 @@ int mwifiex_get_signal_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_signal *signal);
int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate);
-int mwifiex_find_best_bss(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *ssid_bssid);
int mwifiex_request_scan(struct mwifiex_private *priv,
struct mwifiex_802_11_ssid *req_ssid);
int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
@@ -984,12 +951,26 @@ int mwifiex_main_process(struct mwifiex_adapter *);
int mwifiex_bss_set_channel(struct mwifiex_private *,
struct mwifiex_chan_freq_power *cfp);
-int mwifiex_bss_ioctl_find_bss(struct mwifiex_private *,
- struct mwifiex_ssid_bssid *);
int mwifiex_set_radio_band_cfg(struct mwifiex_private *,
struct mwifiex_ds_band_cfg *);
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
+int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
+ u8 *bssid, s32 rssi, u8 *ie_buf,
+ size_t ie_len, u16 beacon_period,
+ u16 cap_info_bitmap, u8 band,
+ struct mwifiex_bssdescriptor *bss_desc);
+int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ struct mwifiex_bssdescriptor *bss_entry,
+ u8 *ie_buf, u32 ie_len);
+int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc);
+
+struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ char *name, enum nl80211_iftype type,
+ u32 *flags, struct vif_params *params);
+int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
new file mode 100644
index 00000000000..d34acf082d3
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -0,0 +1,1948 @@
+/*
+ * Marvell Wireless LAN device driver: PCIE specific handling
+ *
+ * Copyright (C) 2011, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include <linux/firmware.h>
+
+#include "decl.h"
+#include "ioctl.h"
+#include "util.h"
+#include "fw.h"
+#include "main.h"
+#include "wmm.h"
+#include "11n.h"
+#include "pcie.h"
+
+#define PCIE_VERSION "1.0"
+#define DRV_NAME "Marvell mwifiex PCIe"
+
+static u8 user_rmmod;
+
+static struct mwifiex_if_ops pcie_ops;
+
+static struct semaphore add_remove_card_sem;
+static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter);
+static int mwifiex_pcie_resume(struct pci_dev *pdev);
+
+/*
+ * This function is called after skb allocation to update
+ * "skb->cb" with physical address of data pointer.
+ */
+static phys_addr_t *mwifiex_update_sk_buff_pa(struct sk_buff *skb)
+{
+ phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb);
+
+ *buf_pa = (phys_addr_t)virt_to_phys(skb->data);
+
+ return buf_pa;
+}
+
+/*
+ * This function reads sleep cookie and checks if FW is ready
+ */
+static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
+{
+ u32 *cookie_addr;
+ struct pcie_service_card *card = adapter->card;
+
+ if (card->sleep_cookie) {
+ cookie_addr = (u32 *)card->sleep_cookie->data;
+ dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n",
+ *cookie_addr);
+ if (*cookie_addr == FW_AWAKE_COOKIE)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This function probes an mwifiex device and registers it. It allocates
+ * the card structure, enables PCIE function number and initiates the
+ * device registration and initialization procedure by adding a logical
+ * interface.
+ */
+static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct pcie_service_card *card;
+
+ pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
+ pdev->vendor, pdev->device, pdev->revision);
+
+ card = kzalloc(sizeof(struct pcie_service_card), GFP_KERNEL);
+ if (!card) {
+ pr_err("%s: failed to alloc memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ card->dev = pdev;
+
+ if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
+ MWIFIEX_PCIE)) {
+ pr_err("%s failed\n", __func__);
+ kfree(card);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * This function removes the interface and frees up the card structure.
+ */
+static void mwifiex_pcie_remove(struct pci_dev *pdev)
+{
+ struct pcie_service_card *card;
+ struct mwifiex_adapter *adapter;
+ int i;
+
+ card = pci_get_drvdata(pdev);
+ if (!card)
+ return;
+
+ adapter = card->adapter;
+ if (!adapter || !adapter->priv_num)
+ return;
+
+ if (user_rmmod) {
+#ifdef CONFIG_PM
+ if (adapter->is_suspended)
+ mwifiex_pcie_resume(pdev);
+#endif
+
+ for (i = 0; i < adapter->priv_num; i++)
+ if ((GET_BSS_ROLE(adapter->priv[i]) ==
+ MWIFIEX_BSS_ROLE_STA) &&
+ adapter->priv[i]->media_connected)
+ mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+ mwifiex_disable_auto_ds(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY));
+
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
+ }
+
+ mwifiex_remove_card(card->adapter, &add_remove_card_sem);
+ kfree(card);
+}
+
+/*
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not suspended, this function allocates and sends a host
+ * sleep activate request to the firmware and turns off the traffic.
+ */
+static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mwifiex_adapter *adapter;
+ struct pcie_service_card *card;
+ int hs_actived, i;
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ if (!card || card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+ } else {
+ pr_err("PCIE device is not specified\n");
+ return 0;
+ }
+
+ adapter = card->adapter;
+
+ hs_actived = mwifiex_enable_hs(adapter);
+
+ /* Indicate device suspended */
+ adapter->is_suspended = true;
+
+ for (i = 0; i < adapter->priv_num; i++)
+ netif_carrier_off(adapter->priv[i]->netdev);
+
+ return 0;
+}
+
+/*
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not resumed, this function turns on the traffic and
+ * sends a host sleep cancel request to the firmware.
+ */
+static int mwifiex_pcie_resume(struct pci_dev *pdev)
+{
+ struct mwifiex_adapter *adapter;
+ struct pcie_service_card *card;
+ int i;
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ if (!card || !card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+ } else {
+ pr_err("PCIE device is not specified\n");
+ return 0;
+ }
+
+ adapter = card->adapter;
+
+ if (!adapter->is_suspended) {
+ dev_warn(adapter->dev, "Device already resumed\n");
+ return 0;
+ }
+
+ adapter->is_suspended = false;
+
+ for (i = 0; i < adapter->priv_num; i++)
+ if (adapter->priv[i]->media_connected)
+ netif_carrier_on(adapter->priv[i]->netdev);
+
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+ MWIFIEX_ASYNC_CMD);
+
+ return 0;
+}
+
+#define PCIE_VENDOR_ID_MARVELL (0x11ab)
+#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
+
+static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
+ {
+ PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, mwifiex_ids);
+
+/* PCI Device Driver */
+static struct pci_driver __refdata mwifiex_pcie = {
+ .name = "mwifiex_pcie",
+ .id_table = mwifiex_ids,
+ .probe = mwifiex_pcie_probe,
+ .remove = mwifiex_pcie_remove,
+#ifdef CONFIG_PM
+ /* Power Management Hooks */
+ .suspend = mwifiex_pcie_suspend,
+ .resume = mwifiex_pcie_resume,
+#endif
+};
+
+/*
+ * This function writes data into PCIE card register.
+ */
+static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ iowrite32(data, card->pci_mmap1 + reg);
+
+ return 0;
+}
+
+/*
+ * This function reads data from PCIE card register.
+ */
+static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ *data = ioread32(card->pci_mmap1 + reg);
+
+ return 0;
+}
+
+/*
+ * This function wakes up the card.
+ *
+ * A host power up command is written to the card configuration
+ * register to wake up the card.
+ */
+static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
+{
+ int i = 0;
+
+ while (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ i++;
+ udelay(10);
+ /* 50ms max wait */
+ if (i == 50000)
+ break;
+ }
+
+ dev_dbg(adapter->dev, "event: Wakeup device...\n");
+
+ /* Enable interrupts or any chip access will wakeup device */
+ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK)) {
+ dev_warn(adapter->dev, "Enable host interrupt failed\n");
+ return -1;
+ }
+
+ dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n");
+ adapter->ps_state = PS_STATE_AWAKE;
+
+ return 0;
+}
+
+/*
+ * This function is called after the card has woken up.
+ *
+ * The card configuration register is reset.
+ */
+static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
+{
+ dev_dbg(adapter->dev, "cmd: Wakeup device completed\n");
+
+ return 0;
+}
+
+/*
+ * This function disables the host interrupt.
+ *
+ * The host interrupt mask is read, the disable bit is reset and
+ * written back to the card host interrupt mask register.
+ */
+static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
+{
+ if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
+ 0x00000000)) {
+ dev_warn(adapter->dev, "Disable host interrupt failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function enables the host interrupt.
+ *
+ * The host interrupt enable mask is written to the card
+ * host interrupt mask register.
+ */
+static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
+{
+ if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ /* Simply write the mask to the register */
+ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
+ HOST_INTR_MASK)) {
+ dev_warn(adapter->dev, "Enable host interrupt failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function creates buffer descriptor ring for TX
+ */
+static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct sk_buff *skb;
+ int i;
+ phys_addr_t *buf_pa;
+
+ /*
+ * driver maintaines the write pointer and firmware maintaines the read
+ * pointer. The write pointer starts at 0 (zero) while the read pointer
+ * starts at zero with rollover bit set
+ */
+ card->txbd_wrptr = 0;
+ card->txbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+
+ /* allocate shared memory for the BD ring and divide the same in to
+ several descriptors */
+ card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+ dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
+ card->txbd_ring_size);
+ card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
+ if (!card->txbd_ring_vbase) {
+ dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
+ kfree(card->txbd_ring_vbase);
+ return -1;
+ }
+ card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
+
+ dev_dbg(adapter->dev, "info: txbd_ring - base: %p, pbase: %#x:%x,"
+ "len: %x\n", card->txbd_ring_vbase,
+ (u32)card->txbd_ring_pbase,
+ (u32)((u64)card->txbd_ring_pbase >> 32),
+ card->txbd_ring_size);
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ card->txbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
+ (card->txbd_ring_vbase +
+ (sizeof(struct mwifiex_pcie_buf_desc) * i));
+
+ /* Allocate buffer here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev, "Unable to allocate skb for TX ring.\n");
+ kfree(card->txbd_ring_vbase);
+ return -ENOMEM;
+ }
+ buf_pa = mwifiex_update_sk_buff_pa(skb);
+
+ skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE);
+ dev_dbg(adapter->dev, "info: TX ring: add new skb base: %p, "
+ "buf_base: %p, buf_pbase: %#x:%x, "
+ "buf_len: %#x\n", skb, skb->data,
+ (u32)*buf_pa, (u32)(((u64)*buf_pa >> 32)),
+ skb->len);
+
+ card->tx_buf_list[i] = skb;
+ card->txbd_ring[i]->paddr = *buf_pa;
+ card->txbd_ring[i]->len = (u16)skb->len;
+ card->txbd_ring[i]->flags = 0;
+ }
+
+ return 0;
+}
+
+static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ if (card->tx_buf_list[i])
+ dev_kfree_skb_any(card->tx_buf_list[i]);
+ card->tx_buf_list[i] = NULL;
+ card->txbd_ring[i]->paddr = 0;
+ card->txbd_ring[i]->len = 0;
+ card->txbd_ring[i]->flags = 0;
+ card->txbd_ring[i] = NULL;
+ }
+
+ kfree(card->txbd_ring_vbase);
+ card->txbd_ring_size = 0;
+ card->txbd_wrptr = 0;
+ card->txbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->txbd_ring_vbase = NULL;
+
+ return 0;
+}
+
+/*
+ * This function creates buffer descriptor ring for RX
+ */
+static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct sk_buff *skb;
+ int i;
+ phys_addr_t *buf_pa;
+
+ /*
+ * driver maintaines the read pointer and firmware maintaines the write
+ * pointer. The write pointer starts at 0 (zero) while the read pointer
+ * starts at zero with rollover bit set
+ */
+ card->rxbd_wrptr = 0;
+ card->rxbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+
+ card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+ dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
+ card->rxbd_ring_size);
+ card->rxbd_ring_vbase = kzalloc(card->rxbd_ring_size, GFP_KERNEL);
+ if (!card->rxbd_ring_vbase) {
+ dev_err(adapter->dev, "Unable to allocate buffer for "
+ "rxbd_ring.\n");
+ return -1;
+ }
+ card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
+
+ dev_dbg(adapter->dev, "info: rxbd_ring - base: %p, pbase: %#x:%x,"
+ "len: %#x\n", card->rxbd_ring_vbase,
+ (u32)card->rxbd_ring_pbase,
+ (u32)((u64)card->rxbd_ring_pbase >> 32),
+ card->rxbd_ring_size);
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ card->rxbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
+ (card->rxbd_ring_vbase +
+ (sizeof(struct mwifiex_pcie_buf_desc) * i));
+
+ /* Allocate skb here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev, "Unable to allocate skb for RX ring.\n");
+ kfree(card->rxbd_ring_vbase);
+ return -ENOMEM;
+ }
+ buf_pa = mwifiex_update_sk_buff_pa(skb);
+ skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE);
+
+ dev_dbg(adapter->dev, "info: RX ring: add new skb base: %p, "
+ "buf_base: %p, buf_pbase: %#x:%x, "
+ "buf_len: %#x\n", skb, skb->data,
+ (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
+ skb->len);
+
+ card->rx_buf_list[i] = skb;
+ card->rxbd_ring[i]->paddr = *buf_pa;
+ card->rxbd_ring[i]->len = (u16)skb->len;
+ card->rxbd_ring[i]->flags = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * This function deletes Buffer descriptor ring for RX
+ */
+static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ if (card->rx_buf_list[i])
+ dev_kfree_skb_any(card->rx_buf_list[i]);
+ card->rx_buf_list[i] = NULL;
+ card->rxbd_ring[i]->paddr = 0;
+ card->rxbd_ring[i]->len = 0;
+ card->rxbd_ring[i]->flags = 0;
+ card->rxbd_ring[i] = NULL;
+ }
+
+ kfree(card->rxbd_ring_vbase);
+ card->rxbd_ring_size = 0;
+ card->rxbd_wrptr = 0;
+ card->rxbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->rxbd_ring_vbase = NULL;
+
+ return 0;
+}
+
+/*
+ * This function creates buffer descriptor ring for Events
+ */
+static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct sk_buff *skb;
+ int i;
+ phys_addr_t *buf_pa;
+
+ /*
+ * driver maintaines the read pointer and firmware maintaines the write
+ * pointer. The write pointer starts at 0 (zero) while the read pointer
+ * starts at zero with rollover bit set
+ */
+ card->evtbd_wrptr = 0;
+ card->evtbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+
+ card->evtbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_EVT_BD;
+ dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
+ card->evtbd_ring_size);
+ card->evtbd_ring_vbase = kzalloc(card->evtbd_ring_size, GFP_KERNEL);
+ if (!card->evtbd_ring_vbase) {
+ dev_err(adapter->dev, "Unable to allocate buffer. "
+ "Terminating download\n");
+ return -1;
+ }
+ card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
+
+ dev_dbg(adapter->dev, "info: CMDRSP/EVT bd_ring - base: %p, "
+ "pbase: %#x:%x, len: %#x\n", card->evtbd_ring_vbase,
+ (u32)card->evtbd_ring_pbase,
+ (u32)((u64)card->evtbd_ring_pbase >> 32),
+ card->evtbd_ring_size);
+
+ for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+ card->evtbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
+ (card->evtbd_ring_vbase +
+ (sizeof(struct mwifiex_pcie_buf_desc) * i));
+
+ /* Allocate skb here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MAX_EVENT_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev, "Unable to allocate skb for EVENT buf.\n");
+ kfree(card->evtbd_ring_vbase);
+ return -ENOMEM;
+ }
+ buf_pa = mwifiex_update_sk_buff_pa(skb);
+ skb_put(skb, MAX_EVENT_SIZE);
+
+ dev_dbg(adapter->dev, "info: Evt ring: add new skb. base: %p, "
+ "buf_base: %p, buf_pbase: %#x:%x, "
+ "buf_len: %#x\n", skb, skb->data,
+ (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
+ skb->len);
+
+ card->evt_buf_list[i] = skb;
+ card->evtbd_ring[i]->paddr = *buf_pa;
+ card->evtbd_ring[i]->len = (u16)skb->len;
+ card->evtbd_ring[i]->flags = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * This function deletes Buffer descriptor ring for Events
+ */
+static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+ if (card->evt_buf_list[i])
+ dev_kfree_skb_any(card->evt_buf_list[i]);
+ card->evt_buf_list[i] = NULL;
+ card->evtbd_ring[i]->paddr = 0;
+ card->evtbd_ring[i]->len = 0;
+ card->evtbd_ring[i]->flags = 0;
+ card->evtbd_ring[i] = NULL;
+ }
+
+ kfree(card->evtbd_ring_vbase);
+ card->evtbd_wrptr = 0;
+ card->evtbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->evtbd_ring_size = 0;
+ card->evtbd_ring_vbase = NULL;
+
+ return 0;
+}
+
+/*
+ * This function allocates a buffer for CMDRSP
+ */
+static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct sk_buff *skb;
+
+ /* Allocate memory for receiving command response data */
+ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev, "Unable to allocate skb for command "
+ "response data.\n");
+ return -ENOMEM;
+ }
+ mwifiex_update_sk_buff_pa(skb);
+ skb_put(skb, MWIFIEX_UPLD_SIZE);
+ card->cmdrsp_buf = skb;
+
+ skb = NULL;
+ /* Allocate memory for sending command to firmware */
+ skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
+ if (!skb) {
+ dev_err(adapter->dev, "Unable to allocate skb for command "
+ "data.\n");
+ return -ENOMEM;
+ }
+ mwifiex_update_sk_buff_pa(skb);
+ skb_put(skb, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ card->cmd_buf = skb;
+
+ return 0;
+}
+
+/*
+ * This function deletes a buffer for CMDRSP
+ */
+static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card;
+
+ if (!adapter)
+ return 0;
+
+ card = adapter->card;
+
+ if (card && card->cmdrsp_buf)
+ dev_kfree_skb_any(card->cmdrsp_buf);
+
+ if (card && card->cmd_buf)
+ dev_kfree_skb_any(card->cmd_buf);
+
+ return 0;
+}
+
+/*
+ * This function allocates a buffer for sleep cookie
+ */
+static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
+{
+ struct sk_buff *skb;
+ struct pcie_service_card *card = adapter->card;
+
+ /* Allocate memory for sleep cookie */
+ skb = dev_alloc_skb(sizeof(u32));
+ if (!skb) {
+ dev_err(adapter->dev, "Unable to allocate skb for sleep "
+ "cookie!\n");
+ return -ENOMEM;
+ }
+ mwifiex_update_sk_buff_pa(skb);
+ skb_put(skb, sizeof(u32));
+
+ /* Init val of Sleep Cookie */
+ *(u32 *)skb->data = FW_AWAKE_COOKIE;
+
+ dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
+ *((u32 *)skb->data));
+
+ /* Save the sleep cookie */
+ card->sleep_cookie = skb;
+
+ return 0;
+}
+
+/*
+ * This function deletes buffer for sleep cookie
+ */
+static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card;
+
+ if (!adapter)
+ return 0;
+
+ card = adapter->card;
+
+ if (card && card->sleep_cookie) {
+ dev_kfree_skb_any(card->sleep_cookie);
+ card->sleep_cookie = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * This function sends data buffer to device
+ */
+static int
+mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb)
+{
+ struct pcie_service_card *card = adapter->card;
+ u32 wrindx, rdptr;
+ phys_addr_t *buf_pa;
+ __le16 *tmp;
+
+ if (!mwifiex_pcie_ok_to_access_hw(adapter))
+ mwifiex_pm_wakeup_card(adapter);
+
+ /* Read the TX ring read pointer set by firmware */
+ if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
+ dev_err(adapter->dev, "SEND DATA: failed to read "
+ "REG_TXBD_RDPTR\n");
+ return -1;
+ }
+
+ wrindx = card->txbd_wrptr & MWIFIEX_TXBD_MASK;
+
+ dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n", rdptr,
+ card->txbd_wrptr);
+ if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) !=
+ (rdptr & MWIFIEX_TXBD_MASK)) ||
+ ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
+ (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ struct sk_buff *skb_data;
+ u8 *payload;
+
+ adapter->data_sent = true;
+ skb_data = card->tx_buf_list[wrindx];
+ memcpy(skb_data->data, skb->data, skb->len);
+ payload = skb_data->data;
+ tmp = (__le16 *)&payload[0];
+ *tmp = cpu_to_le16((u16)skb->len);
+ tmp = (__le16 *)&payload[2];
+ *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
+ skb_put(skb_data, MWIFIEX_RX_DATA_BUF_SIZE - skb_data->len);
+ skb_trim(skb_data, skb->len);
+ buf_pa = MWIFIEX_SKB_PACB(skb_data);
+ card->txbd_ring[wrindx]->paddr = *buf_pa;
+ card->txbd_ring[wrindx]->len = (u16)skb_data->len;
+ card->txbd_ring[wrindx]->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+ MWIFIEX_BD_FLAG_LAST_DESC;
+
+ if ((++card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
+ MWIFIEX_MAX_TXRX_BD)
+ card->txbd_wrptr = ((card->txbd_wrptr &
+ MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
+ MWIFIEX_BD_FLAG_ROLLOVER_IND);
+
+ /* Write the TX ring write pointer in to REG_TXBD_WRPTR */
+ if (mwifiex_write_reg(adapter, REG_TXBD_WRPTR,
+ card->txbd_wrptr)) {
+ dev_err(adapter->dev, "SEND DATA: failed to write "
+ "REG_TXBD_WRPTR\n");
+ return 0;
+ }
+
+ /* Send the TX ready interrupt */
+ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
+ CPU_INTR_DNLD_RDY)) {
+ dev_err(adapter->dev, "SEND DATA: failed to assert "
+ "door-bell interrupt.\n");
+ return -1;
+ }
+ dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
+ "%#x> and sent packet to firmware "
+ "successfully\n", rdptr,
+ card->txbd_wrptr);
+ } else {
+ dev_dbg(adapter->dev, "info: TX Ring full, can't send anymore "
+ "packets to firmware\n");
+ adapter->data_sent = true;
+ /* Send the TX ready interrupt */
+ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
+ CPU_INTR_DNLD_RDY))
+ dev_err(adapter->dev, "SEND DATA: failed to assert "
+ "door-bell interrupt\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * This function handles received buffer ring and
+ * dispatches packets to upper
+ */
+static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ u32 wrptr, rd_index;
+ int ret = 0;
+ struct sk_buff *skb_tmp = NULL;
+
+ /* Read the RX ring Write pointer set by firmware */
+ if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+ dev_err(adapter->dev, "RECV DATA: failed to read "
+ "REG_TXBD_RDPTR\n");
+ ret = -1;
+ goto done;
+ }
+
+ while (((wrptr & MWIFIEX_RXBD_MASK) !=
+ (card->rxbd_rdptr & MWIFIEX_RXBD_MASK)) ||
+ ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
+ (card->rxbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ struct sk_buff *skb_data;
+ u16 rx_len;
+
+ rd_index = card->rxbd_rdptr & MWIFIEX_RXBD_MASK;
+ skb_data = card->rx_buf_list[rd_index];
+
+ /* Get data length from interface header -
+ first byte is len, second byte is type */
+ rx_len = *((u16 *)skb_data->data);
+ dev_dbg(adapter->dev, "info: RECV DATA: Rd=%#x, Wr=%#x, "
+ "Len=%d\n", card->rxbd_rdptr, wrptr, rx_len);
+ skb_tmp = dev_alloc_skb(rx_len);
+ if (!skb_tmp) {
+ dev_dbg(adapter->dev, "info: Failed to alloc skb "
+ "for RX\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ skb_put(skb_tmp, rx_len);
+
+ memcpy(skb_tmp->data, skb_data->data + INTF_HEADER_LEN, rx_len);
+ if ((++card->rxbd_rdptr & MWIFIEX_RXBD_MASK) ==
+ MWIFIEX_MAX_TXRX_BD) {
+ card->rxbd_rdptr = ((card->rxbd_rdptr &
+ MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
+ MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ }
+ dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
+ card->rxbd_rdptr, wrptr);
+
+ /* Write the RX ring read pointer in to REG_RXBD_RDPTR */
+ if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR,
+ card->rxbd_rdptr)) {
+ dev_err(adapter->dev, "RECV DATA: failed to "
+ "write REG_RXBD_RDPTR\n");
+ ret = -1;
+ goto done;
+ }
+
+ /* Read the RX ring Write pointer set by firmware */
+ if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+ dev_err(adapter->dev, "RECV DATA: failed to read "
+ "REG_TXBD_RDPTR\n");
+ ret = -1;
+ goto done;
+ }
+ dev_dbg(adapter->dev, "info: RECV DATA: Received packet from "
+ "firmware successfully\n");
+ mwifiex_handle_rx_packet(adapter, skb_tmp);
+ }
+
+done:
+ if (ret && skb_tmp)
+ dev_kfree_skb_any(skb_tmp);
+ return ret;
+}
+
+/*
+ * This function downloads the boot command to device
+ */
+static int
+mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
+{
+ phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb);
+
+ if (!(skb->data && skb->len && *buf_pa)) {
+ dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x:%x, "
+ "%x>\n", __func__, skb->data, skb->len,
+ (u32)*buf_pa, (u32)((u64)*buf_pa >> 32));
+ return -1;
+ }
+
+ /* Write the lower 32bits of the physical address to scratch
+ * register 0 */
+ if (mwifiex_write_reg(adapter, PCIE_SCRATCH_0_REG, (u32)*buf_pa)) {
+ dev_err(adapter->dev, "%s: failed to write download command "
+ "to boot code.\n", __func__);
+ return -1;
+ }
+
+ /* Write the upper 32bits of the physical address to scratch
+ * register 1 */
+ if (mwifiex_write_reg(adapter, PCIE_SCRATCH_1_REG,
+ (u32)((u64)*buf_pa >> 32))) {
+ dev_err(adapter->dev, "%s: failed to write download command "
+ "to boot code.\n", __func__);
+ return -1;
+ }
+
+ /* Write the command length to scratch register 2 */
+ if (mwifiex_write_reg(adapter, PCIE_SCRATCH_2_REG, skb->len)) {
+ dev_err(adapter->dev, "%s: failed to write command length to "
+ "scratch register 2\n", __func__);
+ return -1;
+ }
+
+ /* Ring the door bell */
+ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
+ CPU_INTR_DOOR_BELL)) {
+ dev_err(adapter->dev, "%s: failed to assert door-bell "
+ "interrupt.\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * This function downloads commands to the device
+ */
+static int
+mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
+{
+ struct pcie_service_card *card = adapter->card;
+ int ret = 0;
+ phys_addr_t *cmd_buf_pa;
+ phys_addr_t *cmdrsp_buf_pa;
+
+ if (!(skb->data && skb->len)) {
+ dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
+ __func__, skb->data, skb->len);
+ return -1;
+ }
+
+ /* Make sure a command response buffer is available */
+ if (!card->cmdrsp_buf) {
+ dev_err(adapter->dev, "No response buffer available, send "
+ "command failed\n");
+ return -EBUSY;
+ }
+
+ /* Make sure a command buffer is available */
+ if (!card->cmd_buf) {
+ dev_err(adapter->dev, "Command buffer not available\n");
+ return -EBUSY;
+ }
+
+ adapter->cmd_sent = true;
+ /* Copy the given skb in to DMA accessable shared buffer */
+ skb_put(card->cmd_buf, MWIFIEX_SIZE_OF_CMD_BUFFER - card->cmd_buf->len);
+ skb_trim(card->cmd_buf, skb->len);
+ memcpy(card->cmd_buf->data, skb->data, skb->len);
+
+ /* To send a command, the driver will:
+ 1. Write the 64bit physical address of the data buffer to
+ SCRATCH1 + SCRATCH0
+ 2. Ring the door bell (i.e. set the door bell interrupt)
+
+ In response to door bell interrupt, the firmware will perform
+ the DMA of the command packet (first header to obtain the total
+ length and then rest of the command).
+ */
+
+ if (card->cmdrsp_buf) {
+ cmdrsp_buf_pa = MWIFIEX_SKB_PACB(card->cmdrsp_buf);
+ /* Write the lower 32bits of the cmdrsp buffer physical
+ address */
+ if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO,
+ (u32)*cmdrsp_buf_pa)) {
+ dev_err(adapter->dev, "Failed to write download command to boot code.\n");
+ ret = -1;
+ goto done;
+ }
+ /* Write the upper 32bits of the cmdrsp buffer physical
+ address */
+ if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI,
+ (u32)((u64)*cmdrsp_buf_pa >> 32))) {
+ dev_err(adapter->dev, "Failed to write download command"
+ " to boot code.\n");
+ ret = -1;
+ goto done;
+ }
+ }
+
+ cmd_buf_pa = MWIFIEX_SKB_PACB(card->cmd_buf);
+ /* Write the lower 32bits of the physical address to REG_CMD_ADDR_LO */
+ if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO,
+ (u32)*cmd_buf_pa)) {
+ dev_err(adapter->dev, "Failed to write download command "
+ "to boot code.\n");
+ ret = -1;
+ goto done;
+ }
+ /* Write the upper 32bits of the physical address to REG_CMD_ADDR_HI */
+ if (mwifiex_write_reg(adapter, REG_CMD_ADDR_HI,
+ (u32)((u64)*cmd_buf_pa >> 32))) {
+ dev_err(adapter->dev, "Failed to write download command "
+ "to boot code.\n");
+ ret = -1;
+ goto done;
+ }
+
+ /* Write the command length to REG_CMD_SIZE */
+ if (mwifiex_write_reg(adapter, REG_CMD_SIZE,
+ card->cmd_buf->len)) {
+ dev_err(adapter->dev, "Failed to write command length to "
+ "REG_CMD_SIZE\n");
+ ret = -1;
+ goto done;
+ }
+
+ /* Ring the door bell */
+ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
+ CPU_INTR_DOOR_BELL)) {
+ dev_err(adapter->dev, "Failed to assert door-bell "
+ "interrupt.\n");
+ ret = -1;
+ goto done;
+ }
+
+done:
+ if (ret)
+ adapter->cmd_sent = false;
+
+ return 0;
+}
+
+/*
+ * This function handles command complete interrupt
+ */
+static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int count = 0;
+
+ dev_dbg(adapter->dev, "info: Rx CMD Response\n");
+
+ if (!adapter->curr_cmd) {
+ skb_pull(card->cmdrsp_buf, INTF_HEADER_LEN);
+ if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
+ mwifiex_process_sleep_confirm_resp(adapter,
+ card->cmdrsp_buf->data,
+ card->cmdrsp_buf->len);
+ while (mwifiex_pcie_ok_to_access_hw(adapter) &&
+ (count++ < 10))
+ udelay(50);
+ } else {
+ dev_err(adapter->dev, "There is no command but "
+ "got cmdrsp\n");
+ }
+ memcpy(adapter->upld_buf, card->cmdrsp_buf->data,
+ min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER,
+ card->cmdrsp_buf->len));
+ skb_push(card->cmdrsp_buf, INTF_HEADER_LEN);
+ } else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ skb_pull(card->cmdrsp_buf, INTF_HEADER_LEN);
+ adapter->curr_cmd->resp_skb = card->cmdrsp_buf;
+ adapter->cmd_resp_received = true;
+ /* Take the pointer and set it to CMD node and will
+ return in the response complete callback */
+ card->cmdrsp_buf = NULL;
+
+ /* Clear the cmd-rsp buffer address in scratch registers. This
+ will prevent firmware from writing to the same response
+ buffer again. */
+ if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, 0)) {
+ dev_err(adapter->dev, "cmd_done: failed to clear "
+ "cmd_rsp address.\n");
+ return -1;
+ }
+ /* Write the upper 32bits of the cmdrsp buffer physical
+ address */
+ if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, 0)) {
+ dev_err(adapter->dev, "cmd_done: failed to clear "
+ "cmd_rsp address.\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Command Response processing complete handler
+ */
+static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ if (skb) {
+ card->cmdrsp_buf = skb;
+ skb_push(card->cmdrsp_buf, INTF_HEADER_LEN);
+ }
+
+ return 0;
+}
+
+/*
+ * This function handles firmware event ready interrupt
+ */
+static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
+ u32 wrptr, event;
+
+ if (adapter->event_received) {
+ dev_dbg(adapter->dev, "info: Event being processed, "\
+ "do not process this interrupt just yet\n");
+ return 0;
+ }
+
+ if (rdptr >= MWIFIEX_MAX_EVT_BD) {
+ dev_dbg(adapter->dev, "info: Invalid read pointer...\n");
+ return -1;
+ }
+
+ /* Read the event ring write pointer set by firmware */
+ if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+ dev_err(adapter->dev, "EventReady: failed to read REG_EVTBD_WRPTR\n");
+ return -1;
+ }
+
+ dev_dbg(adapter->dev, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
+ card->evtbd_rdptr, wrptr);
+ if (((wrptr & MWIFIEX_EVTBD_MASK) !=
+ (card->evtbd_rdptr & MWIFIEX_EVTBD_MASK)) ||
+ ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
+ (card->evtbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ struct sk_buff *skb_cmd;
+ __le16 data_len = 0;
+ u16 evt_len;
+
+ dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
+ skb_cmd = card->evt_buf_list[rdptr];
+ /* Take the pointer and set it to event pointer in adapter
+ and will return back after event handling callback */
+ card->evt_buf_list[rdptr] = NULL;
+ card->evtbd_ring[rdptr]->paddr = 0;
+ card->evtbd_ring[rdptr]->len = 0;
+ card->evtbd_ring[rdptr]->flags = 0;
+
+ event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN];
+ adapter->event_cause = event;
+ /* The first 4bytes will be the event transfer header
+ len is 2 bytes followed by type which is 2 bytes */
+ memcpy(&data_len, skb_cmd->data, sizeof(__le16));
+ evt_len = le16_to_cpu(data_len);
+
+ skb_pull(skb_cmd, INTF_HEADER_LEN);
+ dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len);
+
+ if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE))
+ memcpy(adapter->event_body, skb_cmd->data +
+ MWIFIEX_EVENT_HEADER_LEN, evt_len -
+ MWIFIEX_EVENT_HEADER_LEN);
+
+ adapter->event_received = true;
+ adapter->event_skb = skb_cmd;
+
+ /* Do not update the event read pointer here, wait till the
+ buffer is released. This is just to make things simpler,
+ we need to find a better method of managing these buffers.
+ */
+ }
+
+ return 0;
+}
+
+/*
+ * Event processing complete handler
+ */
+static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct pcie_service_card *card = adapter->card;
+ int ret = 0;
+ u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
+ u32 wrptr;
+ phys_addr_t *buf_pa;
+
+ if (!skb)
+ return 0;
+
+ if (rdptr >= MWIFIEX_MAX_EVT_BD)
+ dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
+ rdptr);
+
+ /* Read the event ring write pointer set by firmware */
+ if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+ dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
+ ret = -1;
+ goto done;
+ }
+
+ if (!card->evt_buf_list[rdptr]) {
+ skb_push(skb, INTF_HEADER_LEN);
+ card->evt_buf_list[rdptr] = skb;
+ buf_pa = MWIFIEX_SKB_PACB(skb);
+ card->evtbd_ring[rdptr]->paddr = *buf_pa;
+ card->evtbd_ring[rdptr]->len = (u16)skb->len;
+ card->evtbd_ring[rdptr]->flags = 0;
+ skb = NULL;
+ } else {
+ dev_dbg(adapter->dev, "info: ERROR: Buffer is still valid at "
+ "index %d, <%p, %p>\n", rdptr,
+ card->evt_buf_list[rdptr], skb);
+ }
+
+ if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
+ card->evtbd_rdptr = ((card->evtbd_rdptr &
+ MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
+ MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ }
+
+ dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
+ card->evtbd_rdptr, wrptr);
+
+ /* Write the event ring read pointer in to REG_EVTBD_RDPTR */
+ if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
+ dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
+ ret = -1;
+ goto done;
+ }
+
+done:
+ /* Free the buffer for failure case */
+ if (ret && skb)
+ dev_kfree_skb_any(skb);
+
+ dev_dbg(adapter->dev, "info: Check Events Again\n");
+ ret = mwifiex_pcie_process_event_ready(adapter);
+
+ return ret;
+}
+
+/*
+ * This function downloads the firmware to the card.
+ *
+ * Firmware is downloaded to the card in blocks. Every block download
+ * is tested for CRC errors, and retried a number of times before
+ * returning failure.
+ */
+static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
+ struct mwifiex_fw_image *fw)
+{
+ int ret;
+ u8 *firmware = fw->fw_buf;
+ u32 firmware_len = fw->fw_len;
+ u32 offset = 0;
+ struct sk_buff *skb;
+ u32 txlen, tx_blocks = 0, tries, len;
+ u32 block_retry_cnt = 0;
+
+ if (!adapter) {
+ pr_err("adapter structure is not valid\n");
+ return -1;
+ }
+
+ if (!firmware || !firmware_len) {
+ dev_err(adapter->dev, "No firmware image found! "
+ "Terminating download\n");
+ return -1;
+ }
+
+ dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n",
+ firmware_len);
+
+ if (mwifiex_pcie_disable_host_int(adapter)) {
+ dev_err(adapter->dev, "%s: Disabling interrupts"
+ " failed.\n", __func__);
+ return -1;
+ }
+
+ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ mwifiex_update_sk_buff_pa(skb);
+
+ /* Perform firmware data transfer */
+ do {
+ u32 ireg_intr = 0;
+
+ /* More data? */
+ if (offset >= firmware_len)
+ break;
+
+ for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+ ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_2_REG,
+ &len);
+ if (ret) {
+ dev_warn(adapter->dev, "Failed reading length from boot code\n");
+ goto done;
+ }
+ if (len)
+ break;
+ udelay(10);
+ }
+
+ if (!len) {
+ break;
+ } else if (len > MWIFIEX_UPLD_SIZE) {
+ pr_err("FW download failure @ %d, invalid length %d\n",
+ offset, len);
+ ret = -1;
+ goto done;
+ }
+
+ txlen = len;
+
+ if (len & BIT(0)) {
+ block_retry_cnt++;
+ if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) {
+ pr_err("FW download failure @ %d, over max "
+ "retry count\n", offset);
+ ret = -1;
+ goto done;
+ }
+ dev_err(adapter->dev, "FW CRC error indicated by the "
+ "helper: len = 0x%04X, txlen = "
+ "%d\n", len, txlen);
+ len &= ~BIT(0);
+ /* Setting this to 0 to resend from same offset */
+ txlen = 0;
+ } else {
+ block_retry_cnt = 0;
+ /* Set blocksize to transfer - checking for
+ last block */
+ if (firmware_len - offset < txlen)
+ txlen = firmware_len - offset;
+
+ dev_dbg(adapter->dev, ".");
+
+ tx_blocks =
+ (txlen + MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) /
+ MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD;
+
+ /* Copy payload to buffer */
+ memmove(skb->data, &firmware[offset], txlen);
+ }
+
+ skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
+ skb_trim(skb, tx_blocks * MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD);
+
+ /* Send the boot command to device */
+ if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
+ dev_err(adapter->dev, "Failed to send firmware download command\n");
+ ret = -1;
+ goto done;
+ }
+ /* Wait for the command done interrupt */
+ do {
+ if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
+ &ireg_intr)) {
+ dev_err(adapter->dev, "%s: Failed to read "
+ "interrupt status during "
+ "fw dnld.\n", __func__);
+ ret = -1;
+ goto done;
+ }
+ } while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
+ CPU_INTR_DOOR_BELL);
+ offset += txlen;
+ } while (true);
+
+ dev_dbg(adapter->dev, "info:\nFW download over, size %d bytes\n",
+ offset);
+
+ ret = 0;
+
+done:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+/*
+ * This function checks the firmware status in card.
+ *
+ * The winner interface is also determined by this function.
+ */
+static int
+mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
+{
+ int ret = 0;
+ u32 firmware_stat, winner_status;
+ u32 tries;
+
+ /* Mask spurios interrupts */
+ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
+ HOST_INTR_MASK)) {
+ dev_warn(adapter->dev, "Write register failed\n");
+ return -1;
+ }
+
+ dev_dbg(adapter->dev, "Setting driver ready signature\n");
+ if (mwifiex_write_reg(adapter, REG_DRV_READY, FIRMWARE_READY_PCIE)) {
+ dev_err(adapter->dev, "Failed to write driver ready signature\n");
+ return -1;
+ }
+
+ /* Wait for firmware initialization event */
+ for (tries = 0; tries < poll_num; tries++) {
+ if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+ &firmware_stat))
+ ret = -1;
+ else
+ ret = 0;
+ if (ret)
+ continue;
+ if (firmware_stat == FIRMWARE_READY_PCIE) {
+ ret = 0;
+ break;
+ } else {
+ mdelay(100);
+ ret = -1;
+ }
+ }
+
+ if (ret) {
+ if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+ &winner_status))
+ ret = -1;
+ else if (!winner_status) {
+ dev_err(adapter->dev, "PCI-E is the winner\n");
+ adapter->winner = 1;
+ ret = -1;
+ } else {
+ dev_err(adapter->dev, "PCI-E is not the winner <%#x, %d>, exit download\n",
+ ret, adapter->winner);
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * This function reads the interrupt status from card.
+ */
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+{
+ u32 pcie_ireg;
+ unsigned long flags;
+
+ if (!mwifiex_pcie_ok_to_access_hw(adapter))
+ return;
+
+ if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
+ dev_warn(adapter->dev, "Read register failed\n");
+ return;
+ }
+
+ if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
+
+ mwifiex_pcie_disable_host_int(adapter);
+
+ /* Clear the pending interrupts */
+ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS,
+ ~pcie_ireg)) {
+ dev_warn(adapter->dev, "Write register failed\n");
+ return;
+ }
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ adapter->int_status |= pcie_ireg;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+
+ if (pcie_ireg & HOST_INTR_CMD_DONE) {
+ if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
+ (adapter->ps_state == PS_STATE_SLEEP)) {
+ mwifiex_pcie_enable_host_int(adapter);
+ if (mwifiex_write_reg(adapter,
+ PCIE_CPU_INT_EVENT,
+ CPU_INTR_SLEEP_CFM_DONE)) {
+ dev_warn(adapter->dev, "Write register"
+ " failed\n");
+ return;
+
+ }
+ }
+ } else if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP) {
+ /* Potentially for PCIe we could get other
+ * interrupts like shared. Don't change power
+ * state until cookie is set */
+ if (mwifiex_pcie_ok_to_access_hw(adapter))
+ adapter->ps_state = PS_STATE_AWAKE;
+ }
+ }
+}
+
+/*
+ * Interrupt handler for PCIe root port
+ *
+ * This function reads the interrupt status from firmware and assigns
+ * the main process in workqueue which will handle the interrupt.
+ */
+static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
+{
+ struct pci_dev *pdev = (struct pci_dev *)context;
+ struct pcie_service_card *card;
+ struct mwifiex_adapter *adapter;
+
+ if (!pdev) {
+ pr_debug("info: %s: pdev is NULL\n", (u8 *)pdev);
+ goto exit;
+ }
+
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ if (!card || !card->adapter) {
+ pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
+ card ? card->adapter : NULL);
+ goto exit;
+ }
+ adapter = card->adapter;
+
+ if (adapter->surprise_removed)
+ goto exit;
+
+ mwifiex_interrupt_status(adapter);
+ queue_work(adapter->workqueue, &adapter->main_work);
+
+exit:
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function checks the current interrupt status.
+ *
+ * The following interrupts are checked and handled by this function -
+ * - Data sent
+ * - Command sent
+ * - Command received
+ * - Packets received
+ * - Events received
+ *
+ * In case of Rx packets received, the packets are uploaded from card to
+ * host and processed accordingly.
+ */
+static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+{
+ int ret;
+ u32 pcie_ireg = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ /* Clear out unused interrupts */
+ adapter->int_status &= HOST_INTR_MASK;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+
+ while (adapter->int_status & HOST_INTR_MASK) {
+ if (adapter->int_status & HOST_INTR_DNLD_DONE) {
+ adapter->int_status &= ~HOST_INTR_DNLD_DONE;
+ if (adapter->data_sent) {
+ dev_dbg(adapter->dev, "info: DATA sent Interrupt\n");
+ adapter->data_sent = false;
+ }
+ }
+ if (adapter->int_status & HOST_INTR_UPLD_RDY) {
+ adapter->int_status &= ~HOST_INTR_UPLD_RDY;
+ dev_dbg(adapter->dev, "info: Rx DATA\n");
+ ret = mwifiex_pcie_process_recv_data(adapter);
+ if (ret)
+ return ret;
+ }
+ if (adapter->int_status & HOST_INTR_EVENT_RDY) {
+ adapter->int_status &= ~HOST_INTR_EVENT_RDY;
+ dev_dbg(adapter->dev, "info: Rx EVENT\n");
+ ret = mwifiex_pcie_process_event_ready(adapter);
+ if (ret)
+ return ret;
+ }
+
+ if (adapter->int_status & HOST_INTR_CMD_DONE) {
+ adapter->int_status &= ~HOST_INTR_CMD_DONE;
+ if (adapter->cmd_sent) {
+ dev_dbg(adapter->dev, "info: CMD sent Interrupt\n");
+ adapter->cmd_sent = false;
+ }
+ /* Handle command response */
+ ret = mwifiex_pcie_process_cmd_complete(adapter);
+ if (ret)
+ return ret;
+ }
+
+ if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
+ &pcie_ireg)) {
+ dev_warn(adapter->dev, "Read register failed\n");
+ return -1;
+ }
+
+ if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
+ if (mwifiex_write_reg(adapter,
+ PCIE_HOST_INT_STATUS, ~pcie_ireg)) {
+ dev_warn(adapter->dev, "Write register"
+ " failed\n");
+ return -1;
+ }
+ adapter->int_status |= pcie_ireg;
+ adapter->int_status &= HOST_INTR_MASK;
+ }
+
+ }
+ }
+ dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
+ adapter->cmd_sent, adapter->data_sent);
+ mwifiex_pcie_enable_host_int(adapter);
+
+ return 0;
+}
+
+/*
+ * This function downloads data from driver to card.
+ *
+ * Both commands and data packets are transferred to the card by this
+ * function.
+ *
+ * This function adds the PCIE specific header to the front of the buffer
+ * before transferring. The header contains the length of the packet and
+ * the type. The firmware handles the packets based upon this set type.
+ */
+static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
+ struct sk_buff *skb,
+ struct mwifiex_tx_param *tx_param)
+{
+ if (!adapter || !skb) {
+ dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n",
+ __func__, adapter, skb);
+ return -1;
+ }
+
+ if (type == MWIFIEX_TYPE_DATA)
+ return mwifiex_pcie_send_data(adapter, skb);
+ else if (type == MWIFIEX_TYPE_CMD)
+ return mwifiex_pcie_send_cmd(adapter, skb);
+
+ return 0;
+}
+
+/*
+ * This function initializes the PCI-E host memory space, WCB rings, etc.
+ *
+ * The following initializations steps are followed -
+ * - Allocate TXBD ring buffers
+ * - Allocate RXBD ring buffers
+ * - Allocate event BD ring buffers
+ * - Allocate command response ring buffer
+ * - Allocate sleep cookie buffer
+ */
+static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int ret;
+ struct pci_dev *pdev = card->dev;
+
+ pci_set_drvdata(pdev, card);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_enable_dev;
+
+ pci_set_master(pdev);
+
+ dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n");
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(adapter->dev, "set_dma_mask(32) failed\n");
+ goto err_set_dma_mask;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n");
+ goto err_set_dma_mask;
+ }
+
+ ret = pci_request_region(pdev, 0, DRV_NAME);
+ if (ret) {
+ dev_err(adapter->dev, "req_reg(0) error\n");
+ goto err_req_region0;
+ }
+ card->pci_mmap = pci_iomap(pdev, 0, 0);
+ if (!card->pci_mmap) {
+ dev_err(adapter->dev, "iomap(0) error\n");
+ goto err_iomap0;
+ }
+ ret = pci_request_region(pdev, 2, DRV_NAME);
+ if (ret) {
+ dev_err(adapter->dev, "req_reg(2) error\n");
+ goto err_req_region2;
+ }
+ card->pci_mmap1 = pci_iomap(pdev, 2, 0);
+ if (!card->pci_mmap1) {
+ dev_err(adapter->dev, "iomap(2) error\n");
+ goto err_iomap2;
+ }
+
+ dev_dbg(adapter->dev, "PCI memory map Virt0: %p PCI memory map Virt2: "
+ "%p\n", card->pci_mmap, card->pci_mmap1);
+
+ card->cmdrsp_buf = NULL;
+ ret = mwifiex_pcie_create_txbd_ring(adapter);
+ if (ret)
+ goto err_cre_txbd;
+ ret = mwifiex_pcie_create_rxbd_ring(adapter);
+ if (ret)
+ goto err_cre_rxbd;
+ ret = mwifiex_pcie_create_evtbd_ring(adapter);
+ if (ret)
+ goto err_cre_evtbd;
+ ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
+ if (ret)
+ goto err_alloc_cmdbuf;
+ ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+ if (ret)
+ goto err_alloc_cookie;
+
+ return ret;
+
+err_alloc_cookie:
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+err_alloc_cmdbuf:
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+err_cre_evtbd:
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+err_cre_rxbd:
+ mwifiex_pcie_delete_txbd_ring(adapter);
+err_cre_txbd:
+ pci_iounmap(pdev, card->pci_mmap1);
+err_iomap2:
+ pci_release_region(pdev, 2);
+err_req_region2:
+ pci_iounmap(pdev, card->pci_mmap);
+err_iomap0:
+ pci_release_region(pdev, 0);
+err_req_region0:
+err_set_dma_mask:
+ pci_disable_device(pdev);
+err_enable_dev:
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+/*
+ * This function cleans up the allocated card buffers.
+ *
+ * The following are freed by this function -
+ * - TXBD ring buffers
+ * - RXBD ring buffers
+ * - Event BD ring buffers
+ * - Command response ring buffer
+ * - Sleep cookie buffer
+ */
+static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct pci_dev *pdev = card->dev;
+
+ mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+ mwifiex_pcie_delete_txbd_ring(adapter);
+ card->cmdrsp_buf = NULL;
+
+ dev_dbg(adapter->dev, "Clearing driver ready signature\n");
+ if (user_rmmod) {
+ if (mwifiex_write_reg(adapter, REG_DRV_READY, 0x00000000))
+ dev_err(adapter->dev, "Failed to write driver not-ready signature\n");
+ }
+
+ if (pdev) {
+ pci_iounmap(pdev, card->pci_mmap);
+ pci_iounmap(pdev, card->pci_mmap1);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+/*
+ * This function registers the PCIE device.
+ *
+ * PCIE IRQ is claimed, block size is set and driver data is initialized.
+ */
+static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
+{
+ int ret;
+ struct pcie_service_card *card = adapter->card;
+ struct pci_dev *pdev = card->dev;
+
+ /* save adapter pointer in card */
+ card->adapter = adapter;
+
+ ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
+ "MRVL_PCIE", pdev);
+ if (ret) {
+ pr_err("request_irq failed: ret=%d\n", ret);
+ adapter->card = NULL;
+ return -1;
+ }
+
+ adapter->dev = &pdev->dev;
+ strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+
+ return 0;
+}
+
+/*
+ * This function unregisters the PCIE device.
+ *
+ * The PCIE IRQ is released, the function is disabled and driver
+ * data is set to null.
+ */
+static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ if (card) {
+ dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
+ free_irq(card->dev->irq, card->dev);
+ }
+}
+
+static struct mwifiex_if_ops pcie_ops = {
+ .init_if = mwifiex_pcie_init,
+ .cleanup_if = mwifiex_pcie_cleanup,
+ .check_fw_status = mwifiex_check_fw_status,
+ .prog_fw = mwifiex_prog_fw_w_helper,
+ .register_dev = mwifiex_register_dev,
+ .unregister_dev = mwifiex_unregister_dev,
+ .enable_int = mwifiex_pcie_enable_host_int,
+ .process_int_status = mwifiex_process_int_status,
+ .host_to_card = mwifiex_pcie_host_to_card,
+ .wakeup = mwifiex_pm_wakeup_card,
+ .wakeup_complete = mwifiex_pm_wakeup_card_complete,
+
+ /* PCIE specific */
+ .cmdrsp_complete = mwifiex_pcie_cmdrsp_complete,
+ .event_complete = mwifiex_pcie_event_complete,
+ .update_mp_end_port = NULL,
+ .cleanup_mpa_buf = NULL,
+};
+
+/*
+ * This function initializes the PCIE driver module.
+ *
+ * This initiates the semaphore and registers the device with
+ * PCIE bus.
+ */
+static int mwifiex_pcie_init_module(void)
+{
+ int ret;
+
+ pr_debug("Marvell 8766 PCIe Driver\n");
+
+ sema_init(&add_remove_card_sem, 1);
+
+ /* Clear the flag in case user removes the card. */
+ user_rmmod = 0;
+
+ ret = pci_register_driver(&mwifiex_pcie);
+ if (ret)
+ pr_err("Driver register failed!\n");
+ else
+ pr_debug("info: Driver registered successfully!\n");
+
+ return ret;
+}
+
+/*
+ * This function cleans up the PCIE driver.
+ *
+ * The following major steps are followed for cleanup -
+ * - Resume the device if its suspended
+ * - Disconnect the device if connected
+ * - Shutdown the firmware
+ * - Unregister the device from PCIE bus.
+ */
+static void mwifiex_pcie_cleanup_module(void)
+{
+ if (!down_interruptible(&add_remove_card_sem))
+ up(&add_remove_card_sem);
+
+ /* Set the flag as user is removing this module. */
+ user_rmmod = 1;
+
+ pci_unregister_driver(&mwifiex_pcie);
+}
+
+module_init(mwifiex_pcie_init_module);
+module_exit(mwifiex_pcie_cleanup_module);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
+MODULE_VERSION(PCIE_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE("mrvl/pcie8766_uapsta.bin");
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
new file mode 100644
index 00000000000..445ff21772e
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -0,0 +1,148 @@
+/* @file mwifiex_pcie.h
+ *
+ * @brief This file contains definitions for PCI-E interface.
+ * driver.
+ *
+ * Copyright (C) 2011, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _MWIFIEX_PCIE_H
+#define _MWIFIEX_PCIE_H
+
+#include <linux/pci.h>
+#include <linux/pcieport_if.h>
+#include <linux/interrupt.h>
+
+#include "main.h"
+
+#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
+
+/* Constants for Buffer Descriptor (BD) rings */
+#define MWIFIEX_MAX_TXRX_BD 0x20
+#define MWIFIEX_TXBD_MASK 0x3F
+#define MWIFIEX_RXBD_MASK 0x3F
+
+#define MWIFIEX_MAX_EVT_BD 0x04
+#define MWIFIEX_EVTBD_MASK 0x07
+
+/* PCIE INTERNAL REGISTERS */
+#define PCIE_SCRATCH_0_REG 0xC10
+#define PCIE_SCRATCH_1_REG 0xC14
+#define PCIE_CPU_INT_EVENT 0xC18
+#define PCIE_CPU_INT_STATUS 0xC1C
+#define PCIE_HOST_INT_STATUS 0xC30
+#define PCIE_HOST_INT_MASK 0xC34
+#define PCIE_HOST_INT_STATUS_MASK 0xC3C
+#define PCIE_SCRATCH_2_REG 0xC40
+#define PCIE_SCRATCH_3_REG 0xC44
+#define PCIE_SCRATCH_4_REG 0xCC0
+#define PCIE_SCRATCH_5_REG 0xCC4
+#define PCIE_SCRATCH_6_REG 0xCC8
+#define PCIE_SCRATCH_7_REG 0xCCC
+#define PCIE_SCRATCH_8_REG 0xCD0
+#define PCIE_SCRATCH_9_REG 0xCD4
+#define PCIE_SCRATCH_10_REG 0xCD8
+#define PCIE_SCRATCH_11_REG 0xCDC
+#define PCIE_SCRATCH_12_REG 0xCE0
+
+#define CPU_INTR_DNLD_RDY BIT(0)
+#define CPU_INTR_DOOR_BELL BIT(1)
+#define CPU_INTR_SLEEP_CFM_DONE BIT(2)
+#define CPU_INTR_RESET BIT(3)
+
+#define HOST_INTR_DNLD_DONE BIT(0)
+#define HOST_INTR_UPLD_RDY BIT(1)
+#define HOST_INTR_CMD_DONE BIT(2)
+#define HOST_INTR_EVENT_RDY BIT(3)
+#define HOST_INTR_MASK (HOST_INTR_DNLD_DONE | \
+ HOST_INTR_UPLD_RDY | \
+ HOST_INTR_CMD_DONE | \
+ HOST_INTR_EVENT_RDY)
+
+#define MWIFIEX_BD_FLAG_ROLLOVER_IND BIT(7)
+#define MWIFIEX_BD_FLAG_FIRST_DESC BIT(0)
+#define MWIFIEX_BD_FLAG_LAST_DESC BIT(1)
+#define REG_CMD_ADDR_LO PCIE_SCRATCH_0_REG
+#define REG_CMD_ADDR_HI PCIE_SCRATCH_1_REG
+#define REG_CMD_SIZE PCIE_SCRATCH_2_REG
+
+#define REG_CMDRSP_ADDR_LO PCIE_SCRATCH_4_REG
+#define REG_CMDRSP_ADDR_HI PCIE_SCRATCH_5_REG
+
+/* TX buffer description read pointer */
+#define REG_TXBD_RDPTR PCIE_SCRATCH_6_REG
+/* TX buffer description write pointer */
+#define REG_TXBD_WRPTR PCIE_SCRATCH_7_REG
+/* RX buffer description read pointer */
+#define REG_RXBD_RDPTR PCIE_SCRATCH_8_REG
+/* RX buffer description write pointer */
+#define REG_RXBD_WRPTR PCIE_SCRATCH_9_REG
+/* Event buffer description read pointer */
+#define REG_EVTBD_RDPTR PCIE_SCRATCH_10_REG
+/* Event buffer description write pointer */
+#define REG_EVTBD_WRPTR PCIE_SCRATCH_11_REG
+/* Driver ready signature write pointer */
+#define REG_DRV_READY PCIE_SCRATCH_12_REG
+
+/* Max retry number of command write */
+#define MAX_WRITE_IOMEM_RETRY 2
+/* Define PCIE block size for firmware download */
+#define MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD 256
+/* FW awake cookie after FW ready */
+#define FW_AWAKE_COOKIE (0xAA55AA55)
+
+struct mwifiex_pcie_buf_desc {
+ u64 paddr;
+ u16 len;
+ u16 flags;
+} __packed;
+
+struct pcie_service_card {
+ struct pci_dev *dev;
+ struct mwifiex_adapter *adapter;
+
+ u32 txbd_wrptr;
+ u32 txbd_rdptr;
+ u32 txbd_ring_size;
+ u8 *txbd_ring_vbase;
+ phys_addr_t txbd_ring_pbase;
+ struct mwifiex_pcie_buf_desc *txbd_ring[MWIFIEX_MAX_TXRX_BD];
+ struct sk_buff *tx_buf_list[MWIFIEX_MAX_TXRX_BD];
+
+ u32 rxbd_wrptr;
+ u32 rxbd_rdptr;
+ u32 rxbd_ring_size;
+ u8 *rxbd_ring_vbase;
+ phys_addr_t rxbd_ring_pbase;
+ struct mwifiex_pcie_buf_desc *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
+ struct sk_buff *rx_buf_list[MWIFIEX_MAX_TXRX_BD];
+
+ u32 evtbd_wrptr;
+ u32 evtbd_rdptr;
+ u32 evtbd_ring_size;
+ u8 *evtbd_ring_vbase;
+ phys_addr_t evtbd_ring_pbase;
+ struct mwifiex_pcie_buf_desc *evtbd_ring[MWIFIEX_MAX_EVT_BD];
+ struct sk_buff *evt_buf_list[MWIFIEX_MAX_EVT_BD];
+
+ struct sk_buff *cmd_buf;
+ struct sk_buff *cmdrsp_buf;
+ struct sk_buff *sleep_cookie;
+ void __iomem *pci_mmap;
+ void __iomem *pci_mmap1;
+};
+
+#endif /* _MWIFIEX_PCIE_H */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 6f88c8ab5de..dae8dbb24a0 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -172,59 +172,6 @@ mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
}
/*
- * Sends IOCTL request to get the best BSS.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- */
-int mwifiex_find_best_bss(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *ssid_bssid)
-{
- struct mwifiex_ssid_bssid tmp_ssid_bssid;
- u8 *mac;
-
- if (!ssid_bssid)
- return -1;
-
- memcpy(&tmp_ssid_bssid, ssid_bssid,
- sizeof(struct mwifiex_ssid_bssid));
-
- if (!mwifiex_bss_ioctl_find_bss(priv, &tmp_ssid_bssid)) {
- memcpy(ssid_bssid, &tmp_ssid_bssid,
- sizeof(struct mwifiex_ssid_bssid));
- mac = (u8 *) &ssid_bssid->bssid;
- dev_dbg(priv->adapter->dev, "cmd: found network: ssid=%s,"
- " %pM\n", ssid_bssid->ssid.ssid, mac);
- return 0;
- }
-
- return -1;
-}
-
-/*
- * Sends IOCTL request to start a scan with user configurations.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- *
- * Upon completion, it also generates a wireless event to notify
- * applications.
- */
-int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
- struct mwifiex_user_scan_cfg *scan_req)
-{
- int status;
-
- priv->adapter->cmd_wait_q.condition = false;
-
- status = mwifiex_scan_networks(priv, scan_req);
- if (!status)
- status = mwifiex_wait_queue_complete(priv->adapter);
-
- return status;
-}
-
-/*
* This function checks if wapi is enabled in driver and scanned network is
* compatible with it.
*/
@@ -286,8 +233,7 @@ mwifiex_is_network_compatible_for_static_wep(struct mwifiex_private *priv,
*/
static bool
mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc,
- int index)
+ struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
&& priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
@@ -298,9 +244,9 @@ mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
* LinkSys WRT54G && bss_desc->privacy
*/
) {
- dev_dbg(priv->adapter->dev, "info: %s: WPA: index=%d"
+ dev_dbg(priv->adapter->dev, "info: %s: WPA:"
" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
- "EncMode=%#x privacy=%#x\n", __func__, index,
+ "EncMode=%#x privacy=%#x\n", __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id : 0,
@@ -324,8 +270,7 @@ mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
*/
static bool
mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc,
- int index)
+ struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
&& !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled
@@ -336,9 +281,9 @@ mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
* LinkSys WRT54G && bss_desc->privacy
*/
) {
- dev_dbg(priv->adapter->dev, "info: %s: WPA2: index=%d"
+ dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
- "EncMode=%#x privacy=%#x\n", __func__, index,
+ "EncMode=%#x privacy=%#x\n", __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id : 0,
@@ -383,8 +328,7 @@ mwifiex_is_network_compatible_for_adhoc_aes(struct mwifiex_private *priv,
*/
static bool
mwifiex_is_network_compatible_for_dynamic_wep(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc,
- int index)
+ struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
&& !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
@@ -395,9 +339,9 @@ mwifiex_is_network_compatible_for_dynamic_wep(struct mwifiex_private *priv,
&& priv->sec_info.encryption_mode
&& bss_desc->privacy) {
dev_dbg(priv->adapter->dev, "info: %s: dynamic "
- "WEP: index=%d wpa_ie=%#x wpa2_ie=%#x "
+ "WEP: wpa_ie=%#x wpa2_ie=%#x "
"EncMode=%#x privacy=%#x\n",
- __func__, index,
+ __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id : 0,
@@ -430,42 +374,41 @@ mwifiex_is_network_compatible_for_dynamic_wep(struct mwifiex_private *priv,
* Compatibility is not matched while roaming, except for mode.
*/
static s32
-mwifiex_is_network_compatible(struct mwifiex_private *priv, u32 index, u32 mode)
+mwifiex_is_network_compatible(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc, u32 mode)
{
struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_bssdescriptor *bss_desc;
- bss_desc = &adapter->scan_table[index];
bss_desc->disable_11n = false;
/* Don't check for compatibility if roaming */
if (priv->media_connected && (priv->bss_mode == NL80211_IFTYPE_STATION)
&& (bss_desc->bss_mode == NL80211_IFTYPE_STATION))
- return index;
+ return 0;
if (priv->wps.session_enable) {
dev_dbg(adapter->dev,
"info: return success directly in WPS period\n");
- return index;
+ return 0;
}
if (mwifiex_is_network_compatible_for_wapi(priv, bss_desc)) {
dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
- return index;
+ return 0;
}
if (bss_desc->bss_mode == mode) {
if (mwifiex_is_network_compatible_for_no_sec(priv, bss_desc)) {
/* No security */
- return index;
+ return 0;
} else if (mwifiex_is_network_compatible_for_static_wep(priv,
bss_desc)) {
/* Static WEP enabled */
dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
bss_desc->disable_11n = true;
- return index;
- } else if (mwifiex_is_network_compatible_for_wpa(priv, bss_desc,
- index)) {
+ return 0;
+ } else if (mwifiex_is_network_compatible_for_wpa(priv,
+ bss_desc)) {
/* WPA enabled */
if (((priv->adapter->config_bands & BAND_GN
|| priv->adapter->config_bands & BAND_AN)
@@ -483,9 +426,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, u32 index, u32 mode)
return -1;
}
}
- return index;
+ return 0;
} else if (mwifiex_is_network_compatible_for_wpa2(priv,
- bss_desc, index)) {
+ bss_desc)) {
/* WPA2 enabled */
if (((priv->adapter->config_bands & BAND_GN
|| priv->adapter->config_bands & BAND_AN)
@@ -503,22 +446,22 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, u32 index, u32 mode)
return -1;
}
}
- return index;
+ return 0;
} else if (mwifiex_is_network_compatible_for_adhoc_aes(priv,
bss_desc)) {
/* Ad-hoc AES enabled */
- return index;
+ return 0;
} else if (mwifiex_is_network_compatible_for_dynamic_wep(priv,
- bss_desc, index)) {
+ bss_desc)) {
/* Dynamic WEP enabled */
- return index;
+ return 0;
}
/* Security doesn't match */
- dev_dbg(adapter->dev, "info: %s: failed: index=%d "
+ dev_dbg(adapter->dev, "info: %s: failed: "
"wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode"
"=%#x privacy=%#x\n",
- __func__, index,
+ __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).vend_hdr.
element_id : 0,
@@ -538,52 +481,6 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, u32 index, u32 mode)
}
/*
- * This function finds the best SSID in the scan list.
- *
- * It searches the scan table for the best SSID that also matches the current
- * adapter network preference (mode, security etc.).
- */
-static s32
-mwifiex_find_best_network_in_list(struct mwifiex_private *priv)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u32 mode = priv->bss_mode;
- s32 best_net = -1;
- s32 best_rssi = 0;
- u32 i;
-
- dev_dbg(adapter->dev, "info: num of BSSIDs = %d\n",
- adapter->num_in_scan_table);
-
- for (i = 0; i < adapter->num_in_scan_table; i++) {
- switch (mode) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- if (mwifiex_is_network_compatible(priv, i, mode) >= 0) {
- if (SCAN_RSSI(adapter->scan_table[i].rssi) >
- best_rssi) {
- best_rssi = SCAN_RSSI(adapter->
- scan_table[i].rssi);
- best_net = i;
- }
- }
- break;
- case NL80211_IFTYPE_UNSPECIFIED:
- default:
- if (SCAN_RSSI(adapter->scan_table[i].rssi) >
- best_rssi) {
- best_rssi = SCAN_RSSI(adapter->scan_table[i].
- rssi);
- best_net = i;
- }
- break;
- }
- }
-
- return best_net;
-}
-
-/*
* This function creates a channel list for the driver to scan, based
* on region/band information.
*
@@ -612,7 +509,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
sband = priv->wdev->wiphy->bands[band];
- for (i = 0; (i < sband->n_channels) ; i++, chan_idx++) {
+ for (i = 0; (i < sband->n_channels) ; i++) {
ch = &sband->channels[i];
if (ch->flags & IEEE80211_CHAN_DISABLED)
continue;
@@ -643,6 +540,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_DISABLE_CHAN_FILT;
}
+ chan_idx++;
}
}
@@ -1161,34 +1059,13 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
}
/*
- * This function interprets a BSS scan response returned from the firmware.
- *
- * The various fixed fields and IEs are parsed and passed back for a BSS
- * probe response or beacon from scan command. Information is recorded as
- * needed in the scan table for that entry.
- *
- * The following IE types are recognized and parsed -
- * - SSID
- * - Supported rates
- * - FH parameters set
- * - DS parameters set
- * - CF parameters set
- * - IBSS parameters set
- * - ERP information
- * - Extended supported rates
- * - Vendor specific (221)
- * - RSN IE
- * - WAPI IE
- * - HT capability
- * - HT operation
- * - BSS Coexistence 20/40
- * - Extended capability
- * - Overlapping BSS scan parameters
+ * This function parses provided beacon buffer and updates
+ * respective fields in bss descriptor structure.
*/
-static int
-mwifiex_interpret_bss_desc_with_ie(struct mwifiex_adapter *adapter,
- struct mwifiex_bssdescriptor *bss_entry,
- u8 **beacon_info, u32 *bytes_left)
+int
+mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ struct mwifiex_bssdescriptor *bss_entry,
+ u8 *ie_buf, u32 ie_len)
{
int ret = 0;
u8 element_id;
@@ -1196,135 +1073,43 @@ mwifiex_interpret_bss_desc_with_ie(struct mwifiex_adapter *adapter,
struct ieee_types_ds_param_set *ds_param_set;
struct ieee_types_cf_param_set *cf_param_set;
struct ieee_types_ibss_param_set *ibss_param_set;
- __le16 beacon_interval;
- __le16 capabilities;
u8 *current_ptr;
u8 *rate;
u8 element_len;
u16 total_ie_len;
u8 bytes_to_copy;
u8 rate_size;
- u16 beacon_size;
u8 found_data_rate_ie;
- u32 bytes_left_for_current_beacon;
+ u32 bytes_left;
struct ieee_types_vendor_specific *vendor_ie;
const u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 };
const u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 };
found_data_rate_ie = false;
rate_size = 0;
- beacon_size = 0;
-
- if (*bytes_left >= sizeof(beacon_size)) {
- /* Extract & convert beacon size from the command buffer */
- memcpy(&beacon_size, *beacon_info, sizeof(beacon_size));
- *bytes_left -= sizeof(beacon_size);
- *beacon_info += sizeof(beacon_size);
- }
-
- if (!beacon_size || beacon_size > *bytes_left) {
- *beacon_info += *bytes_left;
- *bytes_left = 0;
- return -1;
- }
-
- /* Initialize the current working beacon pointer for this BSS
- iteration */
- current_ptr = *beacon_info;
-
- /* Advance the return beacon pointer past the current beacon */
- *beacon_info += beacon_size;
- *bytes_left -= beacon_size;
-
- bytes_left_for_current_beacon = beacon_size;
-
- memcpy(bss_entry->mac_address, current_ptr, ETH_ALEN);
- dev_dbg(adapter->dev, "info: InterpretIE: AP MAC Addr: %pM\n",
- bss_entry->mac_address);
-
- current_ptr += ETH_ALEN;
- bytes_left_for_current_beacon -= ETH_ALEN;
-
- if (bytes_left_for_current_beacon < 12) {
- dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
- return -1;
- }
-
- /*
- * Next 4 fields are RSSI, time stamp, beacon interval,
- * and capability information
- */
-
- /* RSSI is 1 byte long */
- bss_entry->rssi = (s32) (*current_ptr);
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", *current_ptr);
- current_ptr += 1;
- bytes_left_for_current_beacon -= 1;
-
- /*
- * The RSSI is not part of the beacon/probe response. After we have
- * advanced current_ptr past the RSSI field, save the remaining
- * data for use at the application layer
- */
- bss_entry->beacon_buf = current_ptr;
- bss_entry->beacon_buf_size = bytes_left_for_current_beacon;
-
- /* Time stamp is 8 bytes long */
- memcpy(bss_entry->time_stamp, current_ptr, 8);
- current_ptr += 8;
- bytes_left_for_current_beacon -= 8;
-
- /* Beacon interval is 2 bytes long */
- memcpy(&beacon_interval, current_ptr, 2);
- bss_entry->beacon_period = le16_to_cpu(beacon_interval);
- current_ptr += 2;
- bytes_left_for_current_beacon -= 2;
-
- /* Capability information is 2 bytes long */
- memcpy(&capabilities, current_ptr, 2);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- capabilities);
- bss_entry->cap_info_bitmap = le16_to_cpu(capabilities);
- current_ptr += 2;
- bytes_left_for_current_beacon -= 2;
-
- /* Rest of the current buffer are IE's */
- dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
- bytes_left_for_current_beacon);
-
- if (bss_entry->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
- dev_dbg(adapter->dev, "info: InterpretIE: AP WEP enabled\n");
- bss_entry->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
- } else {
- bss_entry->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
- }
-
- if (bss_entry->cap_info_bitmap & WLAN_CAPABILITY_IBSS)
- bss_entry->bss_mode = NL80211_IFTYPE_ADHOC;
- else
- bss_entry->bss_mode = NL80211_IFTYPE_STATION;
-
+ current_ptr = ie_buf;
+ bytes_left = ie_len;
+ bss_entry->beacon_buf = ie_buf;
+ bss_entry->beacon_buf_size = ie_len;
/* Process variable IE */
- while (bytes_left_for_current_beacon >= 2) {
+ while (bytes_left >= 2) {
element_id = *current_ptr;
element_len = *(current_ptr + 1);
total_ie_len = element_len + sizeof(struct ieee_types_header);
- if (bytes_left_for_current_beacon < total_ie_len) {
+ if (bytes_left < total_ie_len) {
dev_err(adapter->dev, "err: InterpretIE: in processing"
" IE, bytes left < IE length\n");
- bytes_left_for_current_beacon = 0;
- ret = -1;
- continue;
+ return -1;
}
switch (element_id) {
case WLAN_EID_SSID:
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
- dev_dbg(adapter->dev, "info: InterpretIE: ssid: %-32s\n",
- bss_entry->ssid.ssid);
+ dev_dbg(adapter->dev, "info: InterpretIE: ssid: "
+ "%-32s\n", bss_entry->ssid.ssid);
break;
case WLAN_EID_SUPP_RATES:
@@ -1471,13 +1256,6 @@ mwifiex_interpret_bss_desc_with_ie(struct mwifiex_adapter *adapter,
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
- case WLAN_EID_OVERLAP_BSS_SCAN_PARAM:
- bss_entry->bcn_obss_scan =
- (struct ieee_types_obss_scan_param *)
- current_ptr;
- bss_entry->overlap_bss_offset = (u16) (current_ptr -
- bss_entry->beacon_buf);
- break;
default:
break;
}
@@ -1485,577 +1263,13 @@ mwifiex_interpret_bss_desc_with_ie(struct mwifiex_adapter *adapter,
current_ptr += element_len + 2;
/* Need to account for IE ID and IE Len */
- bytes_left_for_current_beacon -= (element_len + 2);
+ bytes_left -= (element_len + 2);
- } /* while (bytes_left_for_current_beacon > 2) */
+ } /* while (bytes_left > 2) */
return ret;
}
/*
- * This function adjusts the pointers used in beacon buffers to reflect
- * shifts.
- *
- * The memory allocated for beacon buffers is of fixed sizes where all the
- * saved beacons must be stored. New beacons are added in the free portion
- * of this memory, space permitting; while duplicate beacon buffers are
- * placed at the same start location. However, since duplicate beacon
- * buffers may not match the size of the old one, all the following buffers
- * in the memory must be shifted to either make space, or to fill up freed
- * up space.
- *
- * This function is used to update the beacon buffer pointers that are past
- * an existing beacon buffer that is updated with a new one of different
- * size. The pointers are shifted by a fixed amount, either forward or
- * backward.
- *
- * the following pointers in every affected beacon buffers are changed, if
- * present -
- * - WPA IE pointer
- * - RSN IE pointer
- * - WAPI IE pointer
- * - HT capability IE pointer
- * - HT information IE pointer
- * - BSS coexistence 20/40 IE pointer
- * - Extended capability IE pointer
- * - Overlapping BSS scan parameter IE pointer
- */
-static void
-mwifiex_adjust_beacon_buffer_ptrs(struct mwifiex_private *priv, u8 advance,
- u8 *bcn_store, u32 rem_bcn_size,
- u32 num_of_ent)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u32 adj_idx;
- for (adj_idx = 0; adj_idx < num_of_ent; adj_idx++) {
- if (adapter->scan_table[adj_idx].beacon_buf > bcn_store) {
-
- if (advance)
- adapter->scan_table[adj_idx].beacon_buf +=
- rem_bcn_size;
- else
- adapter->scan_table[adj_idx].beacon_buf -=
- rem_bcn_size;
-
- if (adapter->scan_table[adj_idx].bcn_wpa_ie)
- adapter->scan_table[adj_idx].bcn_wpa_ie =
- (struct ieee_types_vendor_specific *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].wpa_offset);
- if (adapter->scan_table[adj_idx].bcn_rsn_ie)
- adapter->scan_table[adj_idx].bcn_rsn_ie =
- (struct ieee_types_generic *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].rsn_offset);
- if (adapter->scan_table[adj_idx].bcn_wapi_ie)
- adapter->scan_table[adj_idx].bcn_wapi_ie =
- (struct ieee_types_generic *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].wapi_offset);
- if (adapter->scan_table[adj_idx].bcn_ht_cap)
- adapter->scan_table[adj_idx].bcn_ht_cap =
- (struct ieee80211_ht_cap *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].ht_cap_offset);
-
- if (adapter->scan_table[adj_idx].bcn_ht_info)
- adapter->scan_table[adj_idx].bcn_ht_info =
- (struct ieee80211_ht_info *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].ht_info_offset);
- if (adapter->scan_table[adj_idx].bcn_bss_co_2040)
- adapter->scan_table[adj_idx].bcn_bss_co_2040 =
- (u8 *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].bss_co_2040_offset);
- if (adapter->scan_table[adj_idx].bcn_ext_cap)
- adapter->scan_table[adj_idx].bcn_ext_cap =
- (u8 *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].ext_cap_offset);
- if (adapter->scan_table[adj_idx].bcn_obss_scan)
- adapter->scan_table[adj_idx].bcn_obss_scan =
- (struct ieee_types_obss_scan_param *)
- (adapter->scan_table[adj_idx].beacon_buf +
- adapter->scan_table[adj_idx].overlap_bss_offset);
- }
- }
-}
-
-/*
- * This function updates the pointers used in beacon buffer for given bss
- * descriptor to reflect shifts
- *
- * Following pointers are updated
- * - WPA IE pointer
- * - RSN IE pointer
- * - WAPI IE pointer
- * - HT capability IE pointer
- * - HT information IE pointer
- * - BSS coexistence 20/40 IE pointer
- * - Extended capability IE pointer
- * - Overlapping BSS scan parameter IE pointer
- */
-static void
-mwifiex_update_beacon_buffer_ptrs(struct mwifiex_bssdescriptor *beacon)
-{
- if (beacon->bcn_wpa_ie)
- beacon->bcn_wpa_ie = (struct ieee_types_vendor_specific *)
- (beacon->beacon_buf + beacon->wpa_offset);
- if (beacon->bcn_rsn_ie)
- beacon->bcn_rsn_ie = (struct ieee_types_generic *)
- (beacon->beacon_buf + beacon->rsn_offset);
- if (beacon->bcn_wapi_ie)
- beacon->bcn_wapi_ie = (struct ieee_types_generic *)
- (beacon->beacon_buf + beacon->wapi_offset);
- if (beacon->bcn_ht_cap)
- beacon->bcn_ht_cap = (struct ieee80211_ht_cap *)
- (beacon->beacon_buf + beacon->ht_cap_offset);
- if (beacon->bcn_ht_info)
- beacon->bcn_ht_info = (struct ieee80211_ht_info *)
- (beacon->beacon_buf + beacon->ht_info_offset);
- if (beacon->bcn_bss_co_2040)
- beacon->bcn_bss_co_2040 = (u8 *) (beacon->beacon_buf +
- beacon->bss_co_2040_offset);
- if (beacon->bcn_ext_cap)
- beacon->bcn_ext_cap = (u8 *) (beacon->beacon_buf +
- beacon->ext_cap_offset);
- if (beacon->bcn_obss_scan)
- beacon->bcn_obss_scan = (struct ieee_types_obss_scan_param *)
- (beacon->beacon_buf + beacon->overlap_bss_offset);
-}
-
-/*
- * This function stores a beacon or probe response for a BSS returned
- * in the scan.
- *
- * This stores a new scan response or an update for a previous scan response.
- * New entries need to verify that they do not exceed the total amount of
- * memory allocated for the table.
- *
- * Replacement entries need to take into consideration the amount of space
- * currently allocated for the beacon/probe response and adjust the entry
- * as needed.
- *
- * A small amount of extra pad (SCAN_BEACON_ENTRY_PAD) is generally reserved
- * for an entry in case it is a beacon since a probe response for the
- * network will by larger per the standard. This helps to reduce the
- * amount of memory copying to fit a new probe response into an entry
- * already occupied by a network's previously stored beacon.
- */
-static void
-mwifiex_ret_802_11_scan_store_beacon(struct mwifiex_private *priv,
- u32 beacon_idx, u32 num_of_ent,
- struct mwifiex_bssdescriptor *new_beacon)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u8 *bcn_store;
- u32 new_bcn_size;
- u32 old_bcn_size;
- u32 bcn_space;
-
- if (adapter->scan_table[beacon_idx].beacon_buf) {
-
- new_bcn_size = new_beacon->beacon_buf_size;
- old_bcn_size = adapter->scan_table[beacon_idx].beacon_buf_size;
- bcn_space = adapter->scan_table[beacon_idx].beacon_buf_size_max;
- bcn_store = adapter->scan_table[beacon_idx].beacon_buf;
-
- /* Set the max to be the same as current entry unless changed
- below */
- new_beacon->beacon_buf_size_max = bcn_space;
- if (new_bcn_size == old_bcn_size) {
- /*
- * Beacon is the same size as the previous entry.
- * Replace the previous contents with the scan result
- */
- memcpy(bcn_store, new_beacon->beacon_buf,
- new_beacon->beacon_buf_size);
-
- } else if (new_bcn_size <= bcn_space) {
- /*
- * New beacon size will fit in the amount of space
- * we have previously allocated for it
- */
-
- /* Copy the new beacon buffer entry over the old one */
- memcpy(bcn_store, new_beacon->beacon_buf, new_bcn_size);
-
- /*
- * If the old beacon size was less than the maximum
- * we had alloted for the entry, and the new entry
- * is even smaller, reset the max size to the old
- * beacon entry and compress the storage space
- * (leaving a new pad space of (old_bcn_size -
- * new_bcn_size).
- */
- if (old_bcn_size < bcn_space
- && new_bcn_size <= old_bcn_size) {
- /*
- * Old Beacon size is smaller than the alloted
- * storage size. Shrink the alloted storage
- * space.
- */
- dev_dbg(adapter->dev, "info: AppControl:"
- " smaller duplicate beacon "
- "(%d), old = %d, new = %d, space = %d,"
- "left = %d\n",
- beacon_idx, old_bcn_size, new_bcn_size,
- bcn_space,
- (int)(sizeof(adapter->bcn_buf) -
- (adapter->bcn_buf_end -
- adapter->bcn_buf)));
-
- /*
- * memmove (since the memory overlaps) the
- * data after the beacon we just stored to the
- * end of the current beacon. This cleans up
- * any unused space the old larger beacon was
- * using in the buffer
- */
- memmove(bcn_store + old_bcn_size,
- bcn_store + bcn_space,
- adapter->bcn_buf_end - (bcn_store +
- bcn_space));
-
- /*
- * Decrement the end pointer by the difference
- * between the old larger size and the new
- * smaller size since we are using less space
- * due to the new beacon being smaller
- */
- adapter->bcn_buf_end -=
- (bcn_space - old_bcn_size);
-
- /* Set the maximum storage size to the old
- beacon size */
- new_beacon->beacon_buf_size_max = old_bcn_size;
-
- /* Adjust beacon buffer pointers that are past
- the current */
- mwifiex_adjust_beacon_buffer_ptrs(priv, 0,
- bcn_store, (bcn_space - old_bcn_size),
- num_of_ent);
- }
- } else if (adapter->bcn_buf_end + (new_bcn_size - bcn_space)
- < (adapter->bcn_buf + sizeof(adapter->bcn_buf))) {
- /*
- * Beacon is larger than space previously allocated
- * (bcn_space) and there is enough space left in the
- * beaconBuffer to store the additional data
- */
- dev_dbg(adapter->dev, "info: AppControl:"
- " larger duplicate beacon (%d), "
- "old = %d, new = %d, space = %d, left = %d\n",
- beacon_idx, old_bcn_size, new_bcn_size,
- bcn_space,
- (int)(sizeof(adapter->bcn_buf) -
- (adapter->bcn_buf_end -
- adapter->bcn_buf)));
-
- /*
- * memmove (since the memory overlaps) the data
- * after the beacon we just stored to the end of
- * the current beacon. This moves the data for
- * the beacons after this further in memory to
- * make space for the new larger beacon we are
- * about to copy in.
- */
- memmove(bcn_store + new_bcn_size,
- bcn_store + bcn_space,
- adapter->bcn_buf_end - (bcn_store + bcn_space));
-
- /* Copy the new beacon buffer entry over the old one */
- memcpy(bcn_store, new_beacon->beacon_buf, new_bcn_size);
-
- /* Move the beacon end pointer by the amount of new
- beacon data we are adding */
- adapter->bcn_buf_end += (new_bcn_size - bcn_space);
-
- /*
- * This entry is bigger than the alloted max space
- * previously reserved. Increase the max space to
- * be equal to the new beacon size
- */
- new_beacon->beacon_buf_size_max = new_bcn_size;
-
- /* Adjust beacon buffer pointers that are past the
- current */
- mwifiex_adjust_beacon_buffer_ptrs(priv, 1, bcn_store,
- (new_bcn_size - bcn_space),
- num_of_ent);
- } else {
- /*
- * Beacon is larger than the previously allocated space,
- * but there is not enough free space to store the
- * additional data.
- */
- dev_err(adapter->dev, "AppControl: larger duplicate "
- " beacon (%d), old = %d new = %d, space = %d,"
- " left = %d\n", beacon_idx, old_bcn_size,
- new_bcn_size, bcn_space,
- (int)(sizeof(adapter->bcn_buf) -
- (adapter->bcn_buf_end - adapter->bcn_buf)));
-
- /* Storage failure, keep old beacon intact */
- new_beacon->beacon_buf_size = old_bcn_size;
- if (new_beacon->bcn_wpa_ie)
- new_beacon->wpa_offset =
- adapter->scan_table[beacon_idx].
- wpa_offset;
- if (new_beacon->bcn_rsn_ie)
- new_beacon->rsn_offset =
- adapter->scan_table[beacon_idx].
- rsn_offset;
- if (new_beacon->bcn_wapi_ie)
- new_beacon->wapi_offset =
- adapter->scan_table[beacon_idx].
- wapi_offset;
- if (new_beacon->bcn_ht_cap)
- new_beacon->ht_cap_offset =
- adapter->scan_table[beacon_idx].
- ht_cap_offset;
- if (new_beacon->bcn_ht_info)
- new_beacon->ht_info_offset =
- adapter->scan_table[beacon_idx].
- ht_info_offset;
- if (new_beacon->bcn_bss_co_2040)
- new_beacon->bss_co_2040_offset =
- adapter->scan_table[beacon_idx].
- bss_co_2040_offset;
- if (new_beacon->bcn_ext_cap)
- new_beacon->ext_cap_offset =
- adapter->scan_table[beacon_idx].
- ext_cap_offset;
- if (new_beacon->bcn_obss_scan)
- new_beacon->overlap_bss_offset =
- adapter->scan_table[beacon_idx].
- overlap_bss_offset;
- }
- /* Point the new entry to its permanent storage space */
- new_beacon->beacon_buf = bcn_store;
- mwifiex_update_beacon_buffer_ptrs(new_beacon);
- } else {
- /*
- * No existing beacon data exists for this entry, check to see
- * if we can fit it in the remaining space
- */
- if (adapter->bcn_buf_end + new_beacon->beacon_buf_size +
- SCAN_BEACON_ENTRY_PAD < (adapter->bcn_buf +
- sizeof(adapter->bcn_buf))) {
-
- /*
- * Copy the beacon buffer data from the local entry to
- * the adapter dev struct buffer space used to store
- * the raw beacon data for each entry in the scan table
- */
- memcpy(adapter->bcn_buf_end, new_beacon->beacon_buf,
- new_beacon->beacon_buf_size);
-
- /* Update the beacon ptr to point to the table save
- area */
- new_beacon->beacon_buf = adapter->bcn_buf_end;
- new_beacon->beacon_buf_size_max =
- (new_beacon->beacon_buf_size +
- SCAN_BEACON_ENTRY_PAD);
-
- mwifiex_update_beacon_buffer_ptrs(new_beacon);
-
- /* Increment the end pointer by the size reserved */
- adapter->bcn_buf_end += new_beacon->beacon_buf_size_max;
-
- dev_dbg(adapter->dev, "info: AppControl: beacon[%02d]"
- " sz=%03d, used = %04d, left = %04d\n",
- beacon_idx,
- new_beacon->beacon_buf_size,
- (int)(adapter->bcn_buf_end - adapter->bcn_buf),
- (int)(sizeof(adapter->bcn_buf) -
- (adapter->bcn_buf_end -
- adapter->bcn_buf)));
- } else {
- /* No space for new beacon */
- dev_dbg(adapter->dev, "info: AppControl: no space for"
- " beacon (%d): %pM sz=%03d, left=%03d\n",
- beacon_idx, new_beacon->mac_address,
- new_beacon->beacon_buf_size,
- (int)(sizeof(adapter->bcn_buf) -
- (adapter->bcn_buf_end -
- adapter->bcn_buf)));
-
- /* Storage failure; clear storage records for this
- bcn */
- new_beacon->beacon_buf = NULL;
- new_beacon->beacon_buf_size = 0;
- new_beacon->beacon_buf_size_max = 0;
- new_beacon->bcn_wpa_ie = NULL;
- new_beacon->wpa_offset = 0;
- new_beacon->bcn_rsn_ie = NULL;
- new_beacon->rsn_offset = 0;
- new_beacon->bcn_wapi_ie = NULL;
- new_beacon->wapi_offset = 0;
- new_beacon->bcn_ht_cap = NULL;
- new_beacon->ht_cap_offset = 0;
- new_beacon->bcn_ht_info = NULL;
- new_beacon->ht_info_offset = 0;
- new_beacon->bcn_bss_co_2040 = NULL;
- new_beacon->bss_co_2040_offset = 0;
- new_beacon->bcn_ext_cap = NULL;
- new_beacon->ext_cap_offset = 0;
- new_beacon->bcn_obss_scan = NULL;
- new_beacon->overlap_bss_offset = 0;
- }
- }
-}
-
-/*
- * This function restores a beacon buffer of the current BSS descriptor.
- */
-static void mwifiex_restore_curr_bcn(struct mwifiex_private *priv)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_bssdescriptor *curr_bss =
- &priv->curr_bss_params.bss_descriptor;
- unsigned long flags;
-
- if (priv->curr_bcn_buf &&
- ((adapter->bcn_buf_end + priv->curr_bcn_size) <
- (adapter->bcn_buf + sizeof(adapter->bcn_buf)))) {
- spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
-
- /* restore the current beacon buffer */
- memcpy(adapter->bcn_buf_end, priv->curr_bcn_buf,
- priv->curr_bcn_size);
- curr_bss->beacon_buf = adapter->bcn_buf_end;
- curr_bss->beacon_buf_size = priv->curr_bcn_size;
- adapter->bcn_buf_end += priv->curr_bcn_size;
-
- /* adjust the pointers in the current BSS descriptor */
- if (curr_bss->bcn_wpa_ie)
- curr_bss->bcn_wpa_ie =
- (struct ieee_types_vendor_specific *)
- (curr_bss->beacon_buf +
- curr_bss->wpa_offset);
-
- if (curr_bss->bcn_rsn_ie)
- curr_bss->bcn_rsn_ie = (struct ieee_types_generic *)
- (curr_bss->beacon_buf +
- curr_bss->rsn_offset);
-
- if (curr_bss->bcn_ht_cap)
- curr_bss->bcn_ht_cap = (struct ieee80211_ht_cap *)
- (curr_bss->beacon_buf +
- curr_bss->ht_cap_offset);
-
- if (curr_bss->bcn_ht_info)
- curr_bss->bcn_ht_info = (struct ieee80211_ht_info *)
- (curr_bss->beacon_buf +
- curr_bss->ht_info_offset);
-
- if (curr_bss->bcn_bss_co_2040)
- curr_bss->bcn_bss_co_2040 =
- (u8 *) (curr_bss->beacon_buf +
- curr_bss->bss_co_2040_offset);
-
- if (curr_bss->bcn_ext_cap)
- curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
- curr_bss->ext_cap_offset);
-
- if (curr_bss->bcn_obss_scan)
- curr_bss->bcn_obss_scan =
- (struct ieee_types_obss_scan_param *)
- (curr_bss->beacon_buf +
- curr_bss->overlap_bss_offset);
-
- spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
-
- dev_dbg(adapter->dev, "info: current beacon restored %d\n",
- priv->curr_bcn_size);
- } else {
- dev_warn(adapter->dev,
- "curr_bcn_buf not saved or bcn_buf has no space\n");
- }
-}
-
-/*
- * This function post processes the scan table after a new scan command has
- * completed.
- *
- * It inspects each entry of the scan table and tries to find an entry that
- * matches with our current associated/joined network from the scan. If
- * one is found, the stored copy of the BSS descriptor of our current network
- * is updated.
- *
- * It also debug dumps the current scan table contents after processing is over.
- */
-static void
-mwifiex_process_scan_results(struct mwifiex_private *priv)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- s32 j;
- u32 i;
- unsigned long flags;
-
- if (priv->media_connected) {
-
- j = mwifiex_find_ssid_in_list(priv, &priv->curr_bss_params.
- bss_descriptor.ssid,
- priv->curr_bss_params.
- bss_descriptor.mac_address,
- priv->bss_mode);
-
- if (j >= 0) {
- spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
- priv->curr_bss_params.bss_descriptor.bcn_wpa_ie = NULL;
- priv->curr_bss_params.bss_descriptor.wpa_offset = 0;
- priv->curr_bss_params.bss_descriptor.bcn_rsn_ie = NULL;
- priv->curr_bss_params.bss_descriptor.rsn_offset = 0;
- priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL;
- priv->curr_bss_params.bss_descriptor.wapi_offset = 0;
- priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
- priv->curr_bss_params.bss_descriptor.ht_cap_offset =
- 0;
- priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL;
- priv->curr_bss_params.bss_descriptor.ht_info_offset =
- 0;
- priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
- NULL;
- priv->curr_bss_params.bss_descriptor.
- bss_co_2040_offset = 0;
- priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL;
- priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0;
- priv->curr_bss_params.bss_descriptor.
- bcn_obss_scan = NULL;
- priv->curr_bss_params.bss_descriptor.
- overlap_bss_offset = 0;
- priv->curr_bss_params.bss_descriptor.beacon_buf = NULL;
- priv->curr_bss_params.bss_descriptor.beacon_buf_size =
- 0;
- priv->curr_bss_params.bss_descriptor.
- beacon_buf_size_max = 0;
-
- dev_dbg(adapter->dev, "info: Found current ssid/bssid"
- " in list @ index #%d\n", j);
- /* Make a copy of current BSSID descriptor */
- memcpy(&priv->curr_bss_params.bss_descriptor,
- &adapter->scan_table[j],
- sizeof(priv->curr_bss_params.bss_descriptor));
-
- mwifiex_save_curr_bcn(priv);
- spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
-
- } else {
- mwifiex_restore_curr_bcn(priv);
- }
- }
-
- for (i = 0; i < adapter->num_in_scan_table; i++)
- dev_dbg(adapter->dev, "info: scan:(%02d) %pM "
- "RSSI[%03d], SSID[%s]\n",
- i, adapter->scan_table[i].mac_address,
- (s32) adapter->scan_table[i].rssi,
- adapter->scan_table[i].ssid.ssid);
-}
-
-/*
* This function converts radio type scan parameter to a band configuration
* to be used in join command.
*/
@@ -2072,175 +1286,6 @@ mwifiex_radio_type_to_band(u8 radio_type)
}
/*
- * This function deletes a specific indexed entry from the scan table.
- *
- * This also compacts the remaining entries and adjusts any buffering
- * of beacon/probe response data if needed.
- */
-static void
-mwifiex_scan_delete_table_entry(struct mwifiex_private *priv, s32 table_idx)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u32 del_idx;
- u32 beacon_buf_adj;
- u8 *beacon_buf;
-
- /*
- * Shift the saved beacon buffer data for the scan table back over the
- * entry being removed. Update the end of buffer pointer. Save the
- * deleted buffer allocation size for pointer adjustments for entries
- * compacted after the deleted index.
- */
- beacon_buf_adj = adapter->scan_table[table_idx].beacon_buf_size_max;
-
- dev_dbg(adapter->dev, "info: Scan: Delete Entry %d, beacon buffer "
- "removal = %d bytes\n", table_idx, beacon_buf_adj);
-
- /* Check if the table entry had storage allocated for its beacon */
- if (beacon_buf_adj) {
- beacon_buf = adapter->scan_table[table_idx].beacon_buf;
-
- /*
- * Remove the entry's buffer space, decrement the table end
- * pointer by the amount we are removing
- */
- adapter->bcn_buf_end -= beacon_buf_adj;
-
- dev_dbg(adapter->dev, "info: scan: delete entry %d,"
- " compact data: %p <- %p (sz = %d)\n",
- table_idx, beacon_buf,
- beacon_buf + beacon_buf_adj,
- (int)(adapter->bcn_buf_end - beacon_buf));
-
- /*
- * Compact data storage. Copy all data after the deleted
- * entry's end address (beacon_buf + beacon_buf_adj) back
- * to the original start address (beacon_buf).
- *
- * Scan table entries affected by the move will have their
- * entry pointer adjusted below.
- *
- * Use memmove since the dest/src memory regions overlap.
- */
- memmove(beacon_buf, beacon_buf + beacon_buf_adj,
- adapter->bcn_buf_end - beacon_buf);
- }
-
- dev_dbg(adapter->dev,
- "info: Scan: Delete Entry %d, num_in_scan_table = %d\n",
- table_idx, adapter->num_in_scan_table);
-
- /* Shift all of the entries after the table_idx back by one, compacting
- the table and removing the requested entry */
- for (del_idx = table_idx; (del_idx + 1) < adapter->num_in_scan_table;
- del_idx++) {
- /* Copy the next entry over this one */
- memcpy(adapter->scan_table + del_idx,
- adapter->scan_table + del_idx + 1,
- sizeof(struct mwifiex_bssdescriptor));
-
- /*
- * Adjust this entry's pointer to its beacon buffer based on
- * the removed/compacted entry from the deleted index. Don't
- * decrement if the buffer pointer is NULL (no data stored for
- * this entry).
- */
- if (adapter->scan_table[del_idx].beacon_buf) {
- adapter->scan_table[del_idx].beacon_buf -=
- beacon_buf_adj;
- if (adapter->scan_table[del_idx].bcn_wpa_ie)
- adapter->scan_table[del_idx].bcn_wpa_ie =
- (struct ieee_types_vendor_specific *)
- (adapter->scan_table[del_idx].
- beacon_buf +
- adapter->scan_table[del_idx].
- wpa_offset);
- if (adapter->scan_table[del_idx].bcn_rsn_ie)
- adapter->scan_table[del_idx].bcn_rsn_ie =
- (struct ieee_types_generic *)
- (adapter->scan_table[del_idx].
- beacon_buf +
- adapter->scan_table[del_idx].
- rsn_offset);
- if (adapter->scan_table[del_idx].bcn_wapi_ie)
- adapter->scan_table[del_idx].bcn_wapi_ie =
- (struct ieee_types_generic *)
- (adapter->scan_table[del_idx].beacon_buf
- + adapter->scan_table[del_idx].
- wapi_offset);
- if (adapter->scan_table[del_idx].bcn_ht_cap)
- adapter->scan_table[del_idx].bcn_ht_cap =
- (struct ieee80211_ht_cap *)
- (adapter->scan_table[del_idx].beacon_buf
- + adapter->scan_table[del_idx].
- ht_cap_offset);
-
- if (adapter->scan_table[del_idx].bcn_ht_info)
- adapter->scan_table[del_idx].bcn_ht_info =
- (struct ieee80211_ht_info *)
- (adapter->scan_table[del_idx].beacon_buf
- + adapter->scan_table[del_idx].
- ht_info_offset);
- if (adapter->scan_table[del_idx].bcn_bss_co_2040)
- adapter->scan_table[del_idx].bcn_bss_co_2040 =
- (u8 *)
- (adapter->scan_table[del_idx].beacon_buf
- + adapter->scan_table[del_idx].
- bss_co_2040_offset);
- if (adapter->scan_table[del_idx].bcn_ext_cap)
- adapter->scan_table[del_idx].bcn_ext_cap =
- (u8 *)
- (adapter->scan_table[del_idx].beacon_buf
- + adapter->scan_table[del_idx].
- ext_cap_offset);
- if (adapter->scan_table[del_idx].bcn_obss_scan)
- adapter->scan_table[del_idx].
- bcn_obss_scan =
- (struct ieee_types_obss_scan_param *)
- (adapter->scan_table[del_idx].beacon_buf
- + adapter->scan_table[del_idx].
- overlap_bss_offset);
- }
- }
-
- /* The last entry is invalid now that it has been deleted or moved
- back */
- memset(adapter->scan_table + adapter->num_in_scan_table - 1,
- 0x00, sizeof(struct mwifiex_bssdescriptor));
-
- adapter->num_in_scan_table--;
-}
-
-/*
- * This function deletes all occurrences of a given SSID from the scan table.
- *
- * This iterates through the scan table and deletes all entries that match
- * the given SSID. It also compacts the remaining scan table entries.
- */
-static int
-mwifiex_scan_delete_ssid_table_entry(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *del_ssid)
-{
- s32 table_idx = -1;
-
- dev_dbg(priv->adapter->dev, "info: scan: delete ssid entry: %-32s\n",
- del_ssid->ssid);
-
- /* If the requested SSID is found in the table, delete it. Then keep
- searching the table for multiple entires for the SSID until no
- more are found */
- while ((table_idx = mwifiex_find_ssid_in_list(priv, del_ssid, NULL,
- NL80211_IFTYPE_UNSPECIFIED)) >= 0) {
- dev_dbg(priv->adapter->dev,
- "info: Scan: Delete SSID Entry: Found Idx = %d\n",
- table_idx);
- mwifiex_scan_delete_table_entry(priv, table_idx);
- }
-
- return table_idx == -1 ? -1 : 0;
-}
-
-/*
* This is an internal function used to start a scan based on an input
* configuration.
*
@@ -2248,8 +1293,8 @@ mwifiex_scan_delete_ssid_table_entry(struct mwifiex_private *priv,
* order to send the appropriate scan commands to firmware to populate or
* update the internal driver scan table.
*/
-int mwifiex_scan_networks(struct mwifiex_private *priv,
- const struct mwifiex_user_scan_cfg *user_scan_in)
+static int mwifiex_scan_networks(struct mwifiex_private *priv,
+ const struct mwifiex_user_scan_cfg *user_scan_in)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -2258,7 +1303,6 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
struct mwifiex_ie_types_chan_list_param_set *chan_list_out;
u32 buf_size;
struct mwifiex_chan_scan_param_set *scan_chan_list;
- u8 keep_previous_scan;
u8 filtered_scan;
u8 scan_current_chan_only;
u8 max_chan_per_scan;
@@ -2295,24 +1339,11 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -ENOMEM;
}
- keep_previous_scan = false;
-
mwifiex_scan_setup_scan_config(priv, user_scan_in,
&scan_cfg_out->config, &chan_list_out,
scan_chan_list, &max_chan_per_scan,
&filtered_scan, &scan_current_chan_only);
- if (user_scan_in)
- keep_previous_scan = user_scan_in->keep_previous_scan;
-
-
- if (!keep_previous_scan) {
- memset(adapter->scan_table, 0x00,
- sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP);
- adapter->num_in_scan_table = 0;
- adapter->bcn_buf_end = adapter->bcn_buf;
- }
-
ret = mwifiex_scan_channel_list(priv, max_chan_per_scan, filtered_scan,
&scan_cfg_out->config, chan_list_out,
scan_chan_list);
@@ -2326,6 +1357,7 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
+ adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
} else {
@@ -2344,6 +1376,29 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
}
/*
+ * Sends IOCTL request to start a scan with user configurations.
+ *
+ * This function allocates the IOCTL request buffer, fills it
+ * with requisite parameters and calls the IOCTL handler.
+ *
+ * Upon completion, it also generates a wireless event to notify
+ * applications.
+ */
+int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
+ struct mwifiex_user_scan_cfg *scan_req)
+{
+ int status;
+
+ priv->adapter->scan_wait_q_woken = false;
+
+ status = mwifiex_scan_networks(priv, scan_req);
+ if (!status)
+ status = mwifiex_wait_queue_complete(priv->adapter);
+
+ return status;
+}
+
+/*
* This function prepares a scan command to be sent to the firmware.
*
* This uses the scan command configuration sent to the command processing
@@ -2379,6 +1434,112 @@ int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
}
/*
+ * This function checks compatibility of requested network with current
+ * driver settings.
+ */
+int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
+{
+ int ret = -1;
+
+ if (!bss_desc)
+ return -1;
+
+ if ((mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
+ (u8) bss_desc->bss_band, (u16) bss_desc->channel))) {
+ switch (priv->bss_mode) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ ret = mwifiex_is_network_compatible(priv, bss_desc,
+ priv->bss_mode);
+ if (ret)
+ dev_err(priv->adapter->dev, "cannot find ssid "
+ "%s\n", bss_desc->ssid.ssid);
+ break;
+ default:
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
+ s32 rssi, const u8 *ie_buf, size_t ie_len,
+ u16 beacon_period, u16 cap_info_bitmap, u8 band)
+{
+ struct mwifiex_bssdescriptor *bss_desc = NULL;
+ int ret;
+ unsigned long flags;
+ u8 *beacon_ie;
+
+ /* Allocate and fill new bss descriptor */
+ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
+ GFP_KERNEL);
+ if (!bss_desc) {
+ dev_err(priv->adapter->dev, " failed to alloc bss_desc\n");
+ return -ENOMEM;
+ }
+
+ beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL);
+ if (!beacon_ie) {
+ dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+ return -ENOMEM;
+ }
+
+ ret = mwifiex_fill_new_bss_desc(priv, bssid, rssi, beacon_ie,
+ ie_len, beacon_period,
+ cap_info_bitmap, band, bss_desc);
+ if (ret)
+ goto done;
+
+ ret = mwifiex_check_network_compatibility(priv, bss_desc);
+ if (ret)
+ goto done;
+
+ /* Update current bss descriptor parameters */
+ spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
+ priv->curr_bss_params.bss_descriptor.bcn_wpa_ie = NULL;
+ priv->curr_bss_params.bss_descriptor.wpa_offset = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_rsn_ie = NULL;
+ priv->curr_bss_params.bss_descriptor.rsn_offset = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL;
+ priv->curr_bss_params.bss_descriptor.wapi_offset = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
+ priv->curr_bss_params.bss_descriptor.ht_cap_offset =
+ 0;
+ priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL;
+ priv->curr_bss_params.bss_descriptor.ht_info_offset =
+ 0;
+ priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
+ NULL;
+ priv->curr_bss_params.bss_descriptor.
+ bss_co_2040_offset = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL;
+ priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0;
+ priv->curr_bss_params.bss_descriptor.beacon_buf = NULL;
+ priv->curr_bss_params.bss_descriptor.beacon_buf_size =
+ 0;
+
+ /* Make a copy of current BSSID descriptor */
+ memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
+ sizeof(priv->curr_bss_params.bss_descriptor));
+ mwifiex_save_curr_bcn(priv);
+ spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
+
+done:
+ kfree(bss_desc);
+ kfree(beacon_ie);
+ return 0;
+}
+
+static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
+{
+ kfree(bss->priv);
+}
+
+/*
* This function handles the command response of scan.
*
* The response buffer for the scan command has the following
@@ -2404,23 +1565,19 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
- struct mwifiex_bssdescriptor *bss_new_entry = NULL;
struct mwifiex_ie_types_data *tlv_data;
struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
u8 *bss_info;
u32 scan_resp_size;
u32 bytes_left;
- u32 num_in_table;
- u32 bss_idx;
u32 idx;
u32 tlv_buf_size;
- long long tsf_val;
struct mwifiex_chan_freq_power *cfp;
struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
struct chan_band_param_set *chan_band;
- u8 band;
u8 is_bgscan_resp;
unsigned long flags;
+ struct cfg80211_bss *bss;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -2430,7 +1587,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
scan_rsp = &resp->params.scan_resp;
- if (scan_rsp->number_of_sets > IW_MAX_AP) {
+ if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
scan_rsp->number_of_sets);
ret = -1;
@@ -2447,7 +1604,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
"info: SCAN_RESP: returned %d APs before parsing\n",
scan_rsp->number_of_sets);
- num_in_table = adapter->num_in_scan_table;
bss_info = scan_rsp->bss_desc_and_tlv_buffer;
/*
@@ -2479,125 +1635,149 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
(struct mwifiex_ie_types_data **)
&chan_band_tlv);
- /*
- * Process each scan response returned (scan_rsp->number_of_sets).
- * Save the information in the bss_new_entry and then insert into the
- * driver scan table either as an update to an existing entry
- * or as an addition at the end of the table
- */
- bss_new_entry = kzalloc(sizeof(struct mwifiex_bssdescriptor),
- GFP_KERNEL);
- if (!bss_new_entry) {
- dev_err(adapter->dev, " failed to alloc bss_new_entry\n");
- return -ENOMEM;
- }
-
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
- /* Zero out the bss_new_entry we are about to store info in */
- memset(bss_new_entry, 0x00,
- sizeof(struct mwifiex_bssdescriptor));
-
- if (mwifiex_interpret_bss_desc_with_ie(adapter, bss_new_entry,
- &bss_info,
- &bytes_left)) {
- /* Error parsing/interpreting scan response, skipped */
- dev_err(adapter->dev, "SCAN_RESP: "
- "mwifiex_interpret_bss_desc_with_ie "
- "returned ERROR\n");
- continue;
+ u8 bssid[ETH_ALEN];
+ s32 rssi;
+ const u8 *ie_buf;
+ size_t ie_len;
+ int channel = -1;
+ u64 network_tsf = 0;
+ u16 beacon_size = 0;
+ u32 curr_bcn_bytes;
+ u32 freq;
+ u16 beacon_period;
+ u16 cap_info_bitmap;
+ u8 *current_ptr;
+ struct mwifiex_bcn_param *bcn_param;
+
+ if (bytes_left >= sizeof(beacon_size)) {
+ /* Extract & convert beacon size from command buffer */
+ memcpy(&beacon_size, bss_info, sizeof(beacon_size));
+ bytes_left -= sizeof(beacon_size);
+ bss_info += sizeof(beacon_size);
}
- /* Process the data fields and IEs returned for this BSS */
- dev_dbg(adapter->dev, "info: SCAN_RESP: BSSID = %pM\n",
- bss_new_entry->mac_address);
+ if (!beacon_size || beacon_size > bytes_left) {
+ bss_info += bytes_left;
+ bytes_left = 0;
+ return -1;
+ }
- /* Search the scan table for the same bssid */
- for (bss_idx = 0; bss_idx < num_in_table; bss_idx++) {
- if (memcmp(bss_new_entry->mac_address,
- adapter->scan_table[bss_idx].mac_address,
- sizeof(bss_new_entry->mac_address))) {
- continue;
+ /* Initialize the current working beacon pointer for this BSS
+ * iteration */
+ current_ptr = bss_info;
+
+ /* Advance the return beacon pointer past the current beacon */
+ bss_info += beacon_size;
+ bytes_left -= beacon_size;
+
+ curr_bcn_bytes = beacon_size;
+
+ /*
+ * First 5 fields are bssid, RSSI, time stamp, beacon interval,
+ * and capability information
+ */
+ if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
+ dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ continue;
+ }
+ bcn_param = (struct mwifiex_bcn_param *)current_ptr;
+ current_ptr += sizeof(*bcn_param);
+ curr_bcn_bytes -= sizeof(*bcn_param);
+
+ memcpy(bssid, bcn_param->bssid, ETH_ALEN);
+
+ rssi = (s32) (bcn_param->rssi);
+ dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n",
+ rssi);
+
+ beacon_period = le16_to_cpu(bcn_param->beacon_period);
+
+ cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
+ dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
+
+ /* Rest of the current buffer are IE's */
+ ie_buf = current_ptr;
+ ie_len = curr_bcn_bytes;
+ dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP"
+ " = %d\n", curr_bcn_bytes);
+
+ while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
+ u8 element_id, element_len;
+
+ element_id = *current_ptr;
+ element_len = *(current_ptr + 1);
+ if (curr_bcn_bytes < element_len +
+ sizeof(struct ieee_types_header)) {
+ dev_err(priv->adapter->dev, "%s: in processing"
+ " IE, bytes left < IE length\n",
+ __func__);
+ goto done;
}
- /*
- * If the SSID matches as well, it is a
- * duplicate of this entry. Keep the bss_idx
- * set to this entry so we replace the old
- * contents in the table
- */
- if ((bss_new_entry->ssid.ssid_len
- == adapter->scan_table[bss_idx]. ssid.ssid_len)
- && (!memcmp(bss_new_entry->ssid.ssid,
- adapter->scan_table[bss_idx].ssid.ssid,
- bss_new_entry->ssid.ssid_len))) {
- dev_dbg(adapter->dev, "info: SCAN_RESP:"
- " duplicate of index: %d\n", bss_idx);
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(u8 *) (current_ptr +
+ sizeof(struct ieee_types_header));
break;
}
- }
- /*
- * If the bss_idx is equal to the number of entries in
- * the table, the new entry was not a duplicate; append
- * it to the scan table
- */
- if (bss_idx == num_in_table) {
- /* Range check the bss_idx, keep it limited to
- the last entry */
- if (bss_idx == IW_MAX_AP)
- bss_idx--;
- else
- num_in_table++;
+
+ current_ptr += element_len +
+ sizeof(struct ieee_types_header);
+ curr_bcn_bytes -= element_len +
+ sizeof(struct ieee_types_header);
}
/*
- * Save the beacon/probe response returned for later application
- * retrieval. Duplicate beacon/probe responses are updated if
- * possible
- */
- mwifiex_ret_802_11_scan_store_beacon(priv, bss_idx,
- num_in_table, bss_new_entry);
- /*
* If the TSF TLV was appended to the scan results, save this
* entry's TSF value in the networkTSF field.The networkTSF is
* the firmware's TSF value at the time the beacon or probe
* response was received.
*/
- if (tsf_tlv) {
- memcpy(&tsf_val, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE]
- , sizeof(tsf_val));
- memcpy(&bss_new_entry->network_tsf, &tsf_val,
- sizeof(bss_new_entry->network_tsf));
- }
- band = BAND_G;
- if (chan_band_tlv) {
- chan_band = &chan_band_tlv->chan_band_param[idx];
- band = mwifiex_radio_type_to_band(chan_band->radio_type
- & (BIT(0) | BIT(1)));
- }
+ if (tsf_tlv)
+ memcpy(&network_tsf,
+ &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+ sizeof(network_tsf));
+
+ if (channel != -1) {
+ struct ieee80211_channel *chan;
+ u8 band;
+
+ band = BAND_G;
+ if (chan_band_tlv) {
+ chan_band =
+ &chan_band_tlv->chan_band_param[idx];
+ band = mwifiex_radio_type_to_band(
+ chan_band->radio_type
+ & (BIT(0) | BIT(1)));
+ }
- /* Save the band designation for this entry for use in join */
- bss_new_entry->bss_band = band;
- cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
- (u8) bss_new_entry->bss_band,
- (u16)bss_new_entry->channel);
+ cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(
+ priv, (u8)band, (u16)channel);
- if (cfp)
- bss_new_entry->freq = cfp->freq;
- else
- bss_new_entry->freq = 0;
+ freq = cfp ? cfp->freq : 0;
- /* Copy the locally created bss_new_entry to the scan table */
- memcpy(&adapter->scan_table[bss_idx], bss_new_entry,
- sizeof(adapter->scan_table[bss_idx]));
+ chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
- }
+ if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ chan, bssid, network_tsf,
+ cap_info_bitmap, beacon_period,
+ ie_buf, ie_len, rssi, GFP_KERNEL);
+ *(u8 *)bss->priv = band;
+ bss->free_priv = mwifiex_free_bss_priv;
- dev_dbg(adapter->dev,
- "info: SCAN_RESP: Scanned %2d APs, %d valid, %d total\n",
- scan_rsp->number_of_sets,
- num_in_table - adapter->num_in_scan_table, num_in_table);
-
- /* Update the total number of BSSIDs in the scan table */
- adapter->num_in_scan_table = num_in_table;
+ if (priv->media_connected && !memcmp(bssid,
+ priv->curr_bss_params.bss_descriptor
+ .mac_address, ETH_ALEN))
+ mwifiex_update_curr_bss_params(priv,
+ bssid, rssi, ie_buf,
+ ie_len, beacon_period,
+ cap_info_bitmap, band);
+ }
+ } else {
+ dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ }
+ }
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
@@ -2605,17 +1785,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- /*
- * Process the resulting scan table:
- * - Remove any bad ssids
- * - Update our current BSS information from scan data
- */
- mwifiex_process_scan_results(priv);
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
- mwifiex_complete_cmd(adapter);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
if (priv->report_scan_result)
priv->report_scan_result = false;
@@ -2636,7 +1810,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
done:
- kfree((u8 *) bss_new_entry);
return ret;
}
@@ -2663,141 +1836,6 @@ int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd)
}
/*
- * This function finds a SSID in the scan table.
- *
- * A BSSID may optionally be provided to qualify the SSID.
- * For non-Auto mode, further check is made to make sure the
- * BSS found in the scan table is compatible with the current
- * settings of the driver.
- */
-s32
-mwifiex_find_ssid_in_list(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *ssid, u8 *bssid,
- u32 mode)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- s32 net = -1, j;
- u8 best_rssi = 0;
- u32 i;
-
- dev_dbg(adapter->dev, "info: num of entries in table = %d\n",
- adapter->num_in_scan_table);
-
- /*
- * Loop through the table until the maximum is reached or until a match
- * is found based on the bssid field comparison
- */
- for (i = 0;
- i < adapter->num_in_scan_table && (!bssid || (bssid && net < 0));
- i++) {
- if (!mwifiex_ssid_cmp(&adapter->scan_table[i].ssid, ssid) &&
- (!bssid
- || !memcmp(adapter->scan_table[i].mac_address, bssid,
- ETH_ALEN))
- &&
- (mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, (u8) adapter->scan_table[i].bss_band,
- (u16) adapter->scan_table[i].channel))) {
- switch (mode) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- j = mwifiex_is_network_compatible(priv, i,
- mode);
-
- if (j >= 0) {
- if (SCAN_RSSI
- (adapter->scan_table[i].rssi) >
- best_rssi) {
- best_rssi = SCAN_RSSI(adapter->
- scan_table
- [i].rssi);
- net = i;
- }
- } else {
- if (net == -1)
- net = j;
- }
- break;
- case NL80211_IFTYPE_UNSPECIFIED:
- default:
- /*
- * Do not check compatibility if the mode
- * requested is Auto/Unknown. Allows generic
- * find to work without verifying against the
- * Adapter security settings
- */
- if (SCAN_RSSI(adapter->scan_table[i].rssi) >
- best_rssi) {
- best_rssi = SCAN_RSSI(adapter->
- scan_table[i].rssi);
- net = i;
- }
- break;
- }
- }
- }
-
- return net;
-}
-
-/*
- * This function finds a specific compatible BSSID in the scan list.
- *
- * This function loops through the scan table looking for a compatible
- * match. If a BSSID matches, but the BSS is found to be not compatible
- * the function ignores it and continues to search through the rest of
- * the entries in case there is an AP with multiple SSIDs assigned to
- * the same BSSID.
- */
-s32
-mwifiex_find_bssid_in_list(struct mwifiex_private *priv, u8 *bssid,
- u32 mode)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- s32 net = -1;
- u32 i;
-
- if (!bssid)
- return -1;
-
- dev_dbg(adapter->dev, "info: FindBSSID: Num of BSSIDs = %d\n",
- adapter->num_in_scan_table);
-
- /*
- * Look through the scan table for a compatible match. The ret return
- * variable will be equal to the index in the scan table (greater
- * than zero) if the network is compatible. The loop will continue
- * past a matched bssid that is not compatible in case there is an
- * AP with multiple SSIDs assigned to the same BSSID
- */
- for (i = 0; net < 0 && i < adapter->num_in_scan_table; i++) {
- if (!memcmp
- (adapter->scan_table[i].mac_address, bssid, ETH_ALEN)
- && mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv,
- (u8) adapter->
- scan_table[i].
- bss_band,
- (u16) adapter->
- scan_table[i].
- channel)) {
- switch (mode) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- net = mwifiex_is_network_compatible(priv, i,
- mode);
- break;
- default:
- net = i;
- break;
- }
- }
- }
-
- return net;
-}
-
-/*
* This function inserts scan command node to the scan pending queue.
*/
void
@@ -2808,48 +1846,13 @@ mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
unsigned long flags;
cmd_node->wait_q_enabled = true;
+ cmd_node->condition = &adapter->scan_wait_q_woken;
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
}
/*
- * This function finds an AP with specific ssid in the scan list.
- */
-int mwifiex_find_best_network(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *req_ssid_bssid)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_bssdescriptor *req_bss;
- s32 i;
-
- memset(req_ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
-
- i = mwifiex_find_best_network_in_list(priv);
-
- if (i >= 0) {
- req_bss = &adapter->scan_table[i];
- memcpy(&req_ssid_bssid->ssid, &req_bss->ssid,
- sizeof(struct mwifiex_802_11_ssid));
- memcpy((u8 *) &req_ssid_bssid->bssid,
- (u8 *) &req_bss->mac_address, ETH_ALEN);
-
- /* Make sure we are in the right mode */
- if (priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
- priv->bss_mode = req_bss->bss_mode;
- }
-
- if (!req_ssid_bssid->ssid.ssid_len)
- return -1;
-
- dev_dbg(adapter->dev, "info: Best network found = [%s], "
- "[%pM]\n", req_ssid_bssid->ssid.ssid,
- req_ssid_bssid->bssid);
-
- return 0;
-}
-
-/*
* This function sends a scan command for all available channels to the
* firmware, filtered on a specific SSID.
*/
@@ -2874,8 +1877,6 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
return ret;
}
- mwifiex_scan_delete_ssid_table_entry(priv, req_ssid);
-
scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
if (!scan_cfg) {
dev_err(adapter->dev, "failed to alloc scan_cfg\n");
@@ -2884,7 +1885,6 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
memcpy(scan_cfg->ssid_list[0].ssid, req_ssid->ssid,
req_ssid->ssid_len);
- scan_cfg->keep_previous_scan = true;
ret = mwifiex_scan_networks(priv, scan_cfg);
@@ -2913,7 +1913,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
}
priv->scan_pending_on_block = true;
- priv->adapter->cmd_wait_q.condition = false;
+ priv->adapter->scan_wait_q_woken = false;
if (req_ssid && req_ssid->ssid_len != 0)
/* Specific SSID scan */
@@ -2997,7 +1997,7 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
priv->curr_bcn_size = curr_bss->beacon_buf_size;
kfree(priv->curr_bcn_buf);
- priv->curr_bcn_buf = kzalloc(curr_bss->beacon_buf_size,
+ priv->curr_bcn_buf = kmalloc(curr_bss->beacon_buf_size,
GFP_KERNEL);
if (!priv->curr_bcn_buf) {
dev_err(priv->adapter->dev,
@@ -3010,6 +2010,39 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
curr_bss->beacon_buf_size);
dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
priv->curr_bcn_size);
+
+ curr_bss->beacon_buf = priv->curr_bcn_buf;
+
+ /* adjust the pointers in the current BSS descriptor */
+ if (curr_bss->bcn_wpa_ie)
+ curr_bss->bcn_wpa_ie =
+ (struct ieee_types_vendor_specific *)
+ (curr_bss->beacon_buf +
+ curr_bss->wpa_offset);
+
+ if (curr_bss->bcn_rsn_ie)
+ curr_bss->bcn_rsn_ie = (struct ieee_types_generic *)
+ (curr_bss->beacon_buf +
+ curr_bss->rsn_offset);
+
+ if (curr_bss->bcn_ht_cap)
+ curr_bss->bcn_ht_cap = (struct ieee80211_ht_cap *)
+ (curr_bss->beacon_buf +
+ curr_bss->ht_cap_offset);
+
+ if (curr_bss->bcn_ht_info)
+ curr_bss->bcn_ht_info = (struct ieee80211_ht_info *)
+ (curr_bss->beacon_buf +
+ curr_bss->ht_info_offset);
+
+ if (curr_bss->bcn_bss_co_2040)
+ curr_bss->bcn_bss_co_2040 =
+ (u8 *) (curr_bss->beacon_buf +
+ curr_bss->bss_co_2040_offset);
+
+ if (curr_bss->bcn_ext_cap)
+ curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
+ curr_bss->ext_cap_offset);
}
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 82098ac483b..283171bbced 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -89,7 +89,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
return -EIO;
}
- if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops)) {
+ if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
+ MWIFIEX_SDIO)) {
pr_err("%s: add card failed\n", __func__);
kfree(card);
sdio_claim_host(func);
@@ -830,7 +831,7 @@ done:
* The winner interface is also determined by this function.
*/
static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
- u32 poll_num, int *winner)
+ u32 poll_num)
{
int ret = 0;
u16 firmware_stat;
@@ -842,7 +843,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
if (ret)
continue;
- if (firmware_stat == FIRMWARE_READY) {
+ if (firmware_stat == FIRMWARE_READY_SDIO) {
ret = 0;
break;
} else {
@@ -851,15 +852,15 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
}
}
- if (winner && ret) {
+ if (ret) {
if (mwifiex_read_reg
(adapter, CARD_FW_STATUS0_REG, &winner_status))
winner_status = 0;
if (winner_status)
- *winner = 0;
+ adapter->winner = 0;
else
- *winner = 1;
+ adapter->winner = 1;
}
return ret;
}
@@ -1413,7 +1414,7 @@ tx_curr_single:
* the type. The firmware handles the packets based upon this set type.
*/
static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
- u8 type, u8 *payload, u32 pkt_len,
+ u8 type, struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
struct sdio_mmc_card *card = adapter->card;
@@ -1421,6 +1422,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
u32 buf_block_len;
u32 blk_size;
u8 port = CTRL_PORT;
+ u8 *payload = (u8 *)skb->data;
+ u32 pkt_len = skb->len;
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
@@ -1722,6 +1725,8 @@ static struct mwifiex_if_ops sdio_ops = {
/* SDIO specific */
.update_mp_end_port = mwifiex_update_mp_end_port,
.cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
+ .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
+ .event_complete = mwifiex_sdio_event_complete,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 524f78f4ee6..3f711801e58 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -169,9 +169,6 @@
/* Rx unit register */
#define CARD_RX_UNIT_REG 0x63
-/* Event header len w/o 4 bytes of interface header */
-#define MWIFIEX_EVENT_HEADER_LEN 4
-
/* Max retry number of CMD53 write */
#define MAX_WRITE_IOMEM_RETRY 2
@@ -304,4 +301,25 @@ struct sdio_mmc_card {
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
};
+
+/*
+ * .cmdrsp_complete handler
+ */
+static inline int mwifiex_sdio_cmdrsp_complete(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+/*
+ * .event_complete handler
+ */
+static inline int mwifiex_sdio_event_complete(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c54ee287b87..ea6518d1c9e 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -902,6 +902,59 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
}
/*
+ * This function prepares command to set PCI-Express
+ * host buffer configuration
+ *
+ * Preparation includes -
+ * - Setting command ID, action and proper size
+ * - Setting host buffer configuration
+ * - Ensuring correct endian-ness
+ */
+static int
+mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd, u16 action)
+{
+ struct host_cmd_ds_pcie_details *host_spec =
+ &cmd->params.pcie_host_spec;
+ struct pcie_service_card *card = priv->adapter->card;
+ phys_addr_t *buf_pa;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_PCIE_DESC_DETAILS);
+ cmd->size = cpu_to_le16(sizeof(struct
+ host_cmd_ds_pcie_details) + S_DS_GEN);
+ cmd->result = 0;
+
+ memset(host_spec, 0, sizeof(struct host_cmd_ds_pcie_details));
+
+ if (action == HostCmd_ACT_GEN_SET) {
+ /* Send the ring base addresses and count to firmware */
+ host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
+ host_spec->txbd_addr_hi =
+ (u32)(((u64)card->txbd_ring_pbase)>>32);
+ host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
+ host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
+ host_spec->rxbd_addr_hi =
+ (u32)(((u64)card->rxbd_ring_pbase)>>32);
+ host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
+ host_spec->evtbd_addr_lo =
+ (u32)(card->evtbd_ring_pbase);
+ host_spec->evtbd_addr_hi =
+ (u32)(((u64)card->evtbd_ring_pbase)>>32);
+ host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
+ if (card->sleep_cookie) {
+ buf_pa = MWIFIEX_SKB_PACB(card->sleep_cookie);
+ host_spec->sleep_cookie_addr_lo = (u32) *buf_pa;
+ host_spec->sleep_cookie_addr_hi =
+ (u32) (((u64)*buf_pa) >> 32);
+ dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: "
+ "0x%x\n", host_spec->sleep_cookie_addr_lo);
+ }
+ }
+
+ return 0;
+}
+
+/*
* This function prepares the commands before sending them to the firmware.
*
* This is a generic function which calls specific command preparation
@@ -1079,6 +1132,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
host_cmd_ds_set_bss_mode) + S_DS_GEN);
ret = 0;
break;
+ case HostCmd_CMD_PCIE_DESC_DETAILS:
+ ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1095,6 +1151,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
* working state.
*
* The following commands are issued sequentially -
+ * - Set PCI-Express host buffer configuration (PCIE only)
* - Function init (for first interface only)
* - Read MAC address (for first interface only)
* - Reconfigure Tx buffer size (for first interface only)
@@ -1116,6 +1173,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
struct mwifiex_ds_11n_tx_cfg tx_cfg;
if (first_sta) {
+ if (priv->adapter->iface_type == MWIFIEX_PCIE) {
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_PCIE_DESC_DETAILS,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+ if (ret)
+ return -1;
+ }
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT,
HostCmd_ACT_GEN_SET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 6804239d87b..7a16b0c417a 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -952,6 +952,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_11N_CFG:
ret = mwifiex_ret_11n_cfg(resp, data_buf);
break;
+ case HostCmd_CMD_PCIE_DESC_DETAILS:
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index fc265cab090..f204810e833 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -130,8 +130,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
/* Reset wireless stats signal info */
- priv->w_stats.qual.level = 0;
- priv->w_stats.qual.noise = 0;
+ priv->qual_level = 0;
+ priv->qual_noise = 0;
}
/*
@@ -299,11 +299,6 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_BG_SCAN_REPORT:
dev_dbg(adapter->dev, "event: BGS_REPORT\n");
- /* Clear the previous scan result */
- memset(adapter->scan_table, 0x00,
- sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP);
- adapter->num_in_scan_table = 0;
- adapter->bcn_buf_end = adapter->bcn_buf;
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_BG_SCAN_QUERY,
HostCmd_ACT_GEN_GET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c34ff8c4f4f..ea4a29b7e33 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -55,7 +55,9 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
int status = adapter->cmd_wait_q.status;
+ struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+ adapter->cmd_queued = NULL;
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
@@ -64,8 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
/* Wait for completion */
wait_event_interruptible(adapter->cmd_wait_q.wait,
- adapter->cmd_wait_q.condition);
- if (!adapter->cmd_wait_q.condition)
+ *(cmd_queued->condition));
+ if (!*(cmd_queued->condition))
cancel_flag = true;
if (cancel_flag) {
@@ -142,90 +144,146 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
}
/*
+ * This function fills bss descriptor structure using provided
+ * information.
+ */
+int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
+ u8 *bssid, s32 rssi, u8 *ie_buf,
+ size_t ie_len, u16 beacon_period,
+ u16 cap_info_bitmap, u8 band,
+ struct mwifiex_bssdescriptor *bss_desc)
+{
+ int ret;
+
+ memcpy(bss_desc->mac_address, bssid, ETH_ALEN);
+ bss_desc->rssi = rssi;
+ bss_desc->beacon_buf = ie_buf;
+ bss_desc->beacon_buf_size = ie_len;
+ bss_desc->beacon_period = beacon_period;
+ bss_desc->cap_info_bitmap = cap_info_bitmap;
+ bss_desc->bss_band = band;
+ if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
+ dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
+ bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
+ } else {
+ bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
+ }
+ if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_IBSS)
+ bss_desc->bss_mode = NL80211_IFTYPE_ADHOC;
+ else
+ bss_desc->bss_mode = NL80211_IFTYPE_STATION;
+
+ ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc,
+ ie_buf, ie_len);
+
+ return ret;
+}
+
+/*
* In Ad-Hoc mode, the IBSS is created if not found in scan list.
* In both Ad-Hoc and infra mode, an deauthentication is performed
* first.
*/
-int mwifiex_bss_start(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *ssid_bssid)
+int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+ struct mwifiex_802_11_ssid *req_ssid)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
- s32 i = -1;
+ struct mwifiex_bssdescriptor *bss_desc = NULL;
+ u8 *beacon_ie = NULL;
priv->scan_block = false;
- if (!ssid_bssid)
- return -1;
+
+ if (bss) {
+ /* Allocate and fill new bss descriptor */
+ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
+ GFP_KERNEL);
+ if (!bss_desc) {
+ dev_err(priv->adapter->dev, " failed to alloc bss_desc\n");
+ return -ENOMEM;
+ }
+
+ beacon_ie = kmemdup(bss->information_elements,
+ bss->len_beacon_ies, GFP_KERNEL);
+ if (!beacon_ie) {
+ kfree(bss_desc);
+ dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+ return -ENOMEM;
+ }
+
+ ret = mwifiex_fill_new_bss_desc(priv, bss->bssid, bss->signal,
+ beacon_ie, bss->len_beacon_ies,
+ bss->beacon_interval,
+ bss->capability,
+ *(u8 *)bss->priv, bss_desc);
+ if (ret)
+ goto done;
+ }
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
/* Infra mode */
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
- return ret;
+ goto done;
- /* Search for the requested SSID in the scan table */
- if (ssid_bssid->ssid.ssid_len)
- i = mwifiex_find_ssid_in_list(priv, &ssid_bssid->ssid,
- NULL, NL80211_IFTYPE_STATION);
- else
- i = mwifiex_find_bssid_in_list(priv,
- (u8 *) &ssid_bssid->bssid,
- NL80211_IFTYPE_STATION);
- if (i < 0)
- return -1;
+ ret = mwifiex_check_network_compatibility(priv, bss_desc);
+ if (ret)
+ goto done;
- dev_dbg(adapter->dev,
- "info: SSID found in scan list ... associating...\n");
+ dev_dbg(adapter->dev, "info: SSID found in scan list ... "
+ "associating...\n");
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
/* Clear any past association response stored for
* application retrieval */
priv->assoc_rsp_size = 0;
- ret = mwifiex_associate(priv, &adapter->scan_table[i]);
- if (ret)
- return ret;
+ ret = mwifiex_associate(priv, bss_desc);
+ if (bss)
+ cfg80211_put_bss(bss);
} else {
/* Adhoc mode */
/* If the requested SSID matches current SSID, return */
- if (ssid_bssid->ssid.ssid_len &&
+ if (bss_desc && bss_desc->ssid.ssid_len &&
(!mwifiex_ssid_cmp
(&priv->curr_bss_params.bss_descriptor.ssid,
- &ssid_bssid->ssid)))
+ &bss_desc->ssid))) {
+ kfree(bss_desc);
+ kfree(beacon_ie);
return 0;
+ }
/* Exit Adhoc mode first */
dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n");
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
- return ret;
+ goto done;
priv->adhoc_is_link_sensed = false;
- /* Search for the requested network in the scan table */
- if (ssid_bssid->ssid.ssid_len)
- i = mwifiex_find_ssid_in_list(priv,
- &ssid_bssid->ssid, NULL,
- NL80211_IFTYPE_ADHOC);
- else
- i = mwifiex_find_bssid_in_list(priv,
- (u8 *)&ssid_bssid->bssid,
- NL80211_IFTYPE_ADHOC);
+ ret = mwifiex_check_network_compatibility(priv, bss_desc);
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
- if (i >= 0) {
+ if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
" list. Joining...\n");
- ret = mwifiex_adhoc_join(priv, &adapter->scan_table[i]);
- if (ret)
- return ret;
+ ret = mwifiex_adhoc_join(priv, bss_desc);
+ if (bss)
+ cfg80211_put_bss(bss);
} else {
dev_dbg(adapter->dev, "info: Network not found in "
"the list, creating adhoc with ssid = %s\n",
- ssid_bssid->ssid.ssid);
- ret = mwifiex_adhoc_start(priv, &ssid_bssid->ssid);
- if (ret)
- return ret;
+ req_ssid->ssid);
+ ret = mwifiex_adhoc_start(priv, req_ssid);
}
}
+done:
+ kfree(bss_desc);
+ kfree(beacon_ie);
return ret;
}
@@ -235,8 +293,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv,
* This function prepares the correct firmware command and
* issues it.
*/
-int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
- int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg)
+static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
+ int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -376,7 +434,6 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bssdescriptor *bss_desc;
- s32 tbl_idx;
if (!info)
return -1;
@@ -394,17 +451,6 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
info->region_code = adapter->region_code;
- /* Scan table index if connected */
- info->scan_table_idx = 0;
- if (priv->media_connected) {
- tbl_idx =
- mwifiex_find_ssid_in_list(priv, &bss_desc->ssid,
- bss_desc->mac_address,
- priv->bss_mode);
- if (tbl_idx >= 0)
- info->scan_table_idx = tbl_idx;
- }
-
info->media_connected = priv->media_connected;
info->max_power_level = priv->max_tx_power_level;
@@ -586,50 +632,6 @@ static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
}
/*
- * IOCTL request handler to find a particular BSS.
- *
- * The BSS can be searched with either a BSSID or a SSID. If none of
- * these are provided, just the best BSS (best RSSI) is returned.
- */
-int mwifiex_bss_ioctl_find_bss(struct mwifiex_private *priv,
- struct mwifiex_ssid_bssid *ssid_bssid)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_bssdescriptor *bss_desc;
- u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
- u8 mac[ETH_ALEN];
- int i = 0;
-
- if (memcmp(ssid_bssid->bssid, zero_mac, sizeof(zero_mac))) {
- i = mwifiex_find_bssid_in_list(priv,
- (u8 *) ssid_bssid->bssid,
- priv->bss_mode);
- if (i < 0) {
- memcpy(mac, ssid_bssid->bssid, sizeof(mac));
- dev_err(adapter->dev, "cannot find bssid %pM\n", mac);
- return -1;
- }
- bss_desc = &adapter->scan_table[i];
- memcpy(&ssid_bssid->ssid, &bss_desc->ssid,
- sizeof(struct mwifiex_802_11_ssid));
- } else if (ssid_bssid->ssid.ssid_len) {
- i = mwifiex_find_ssid_in_list(priv, &ssid_bssid->ssid, NULL,
- priv->bss_mode);
- if (i < 0) {
- dev_err(adapter->dev, "cannot find ssid %s\n",
- ssid_bssid->ssid.ssid);
- return -1;
- }
- bss_desc = &adapter->scan_table[i];
- memcpy(ssid_bssid->bssid, bss_desc->mac_address, ETH_ALEN);
- } else {
- return mwifiex_find_best_network(priv, ssid_bssid);
- }
-
- return 0;
-}
-
-/*
* IOCTL request handler to change Ad-Hoc channel.
*
* This function allocates the IOCTL request buffer, fills it
@@ -653,6 +655,9 @@ mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
struct mwifiex_bss_info bss_info;
struct mwifiex_ssid_bssid ssid_bssid;
u16 curr_chan = 0;
+ struct cfg80211_bss *bss = NULL;
+ struct ieee80211_channel *chan;
+ enum ieee80211_band band;
memset(&bss_info, 0, sizeof(bss_info));
@@ -688,12 +693,20 @@ mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
ret = -1;
goto done;
}
- /* Start/Join Adhoc network */
- memset(&ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
- memcpy(&ssid_bssid.ssid, &bss_info.ssid,
- sizeof(struct mwifiex_802_11_ssid));
- ret = mwifiex_bss_start(priv, &ssid_bssid);
+ band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ chan = __ieee80211_get_channel(priv->wdev->wiphy,
+ ieee80211_channel_to_frequency(channel, band));
+
+ /* Find the BSS we want using available scan results */
+ bss = cfg80211_get_bss(priv->wdev->wiphy, chan, bss_info.bssid,
+ bss_info.ssid.ssid, bss_info.ssid.ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ if (!bss)
+ wiphy_warn(priv->wdev->wiphy, "assoc: bss %pM not in scan results\n",
+ bss_info.bssid);
+
+ ret = mwifiex_bss_start(priv, bss, &bss_info.ssid);
done:
return ret;
}
@@ -709,51 +722,9 @@ done:
static int mwifiex_rate_ioctl_get_rate_value(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate_cfg)
{
- struct mwifiex_adapter *adapter = priv->adapter;
-
rate_cfg->is_rate_auto = priv->is_data_rate_auto;
- if (!priv->media_connected) {
- switch (adapter->config_bands) {
- case BAND_B:
- /* Return the lowest supported rate for B band */
- rate_cfg->rate = supported_rates_b[0] & 0x7f;
- break;
- case BAND_G:
- case BAND_G | BAND_GN:
- /* Return the lowest supported rate for G band */
- rate_cfg->rate = supported_rates_g[0] & 0x7f;
- break;
- case BAND_B | BAND_G:
- case BAND_A | BAND_B | BAND_G:
- case BAND_A | BAND_B:
- case BAND_A | BAND_B | BAND_G | BAND_AN | BAND_GN:
- case BAND_B | BAND_G | BAND_GN:
- /* Return the lowest supported rate for BG band */
- rate_cfg->rate = supported_rates_bg[0] & 0x7f;
- break;
- case BAND_A:
- case BAND_A | BAND_G:
- case BAND_A | BAND_G | BAND_AN | BAND_GN:
- case BAND_A | BAND_AN:
- /* Return the lowest supported rate for A band */
- rate_cfg->rate = supported_rates_a[0] & 0x7f;
- break;
- case BAND_GN:
- /* Return the lowest supported rate for N band */
- rate_cfg->rate = supported_rates_n[0] & 0x7f;
- break;
- default:
- dev_warn(adapter->dev, "invalid band %#x\n",
- adapter->config_bands);
- break;
- }
- } else {
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
- }
-
- return 0;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL);
}
/*
@@ -794,7 +765,7 @@ static int mwifiex_rate_ioctl_set_rate_value(struct mwifiex_private *priv,
if ((rate[i] & 0x7f) == (rate_cfg->rate & 0x7f))
break;
}
- if (!rate[i] || (i == MWIFIEX_SUPPORTED_RATES)) {
+ if ((i == MWIFIEX_SUPPORTED_RATES) || !rate[i]) {
dev_err(adapter->dev, "fixed data rate %#x is out "
"of range\n", rate_cfg->rate);
return -1;
@@ -860,10 +831,10 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
ret = mwifiex_rate_ioctl_cfg(priv, rate);
if (!ret) {
- if (rate && rate->is_rate_auto)
+ if (rate->is_rate_auto)
rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
priv->tx_htinfo);
- else if (rate)
+ else
rate->rate = priv->data_rate;
} else {
ret = -1;
@@ -1280,9 +1251,9 @@ int mwifiex_get_signal_info(struct mwifiex_private *priv,
if (!status) {
if (signal->selector & BCN_RSSI_AVG_MASK)
- priv->w_stats.qual.level = signal->bcn_rssi_avg;
+ priv->qual_level = signal->bcn_rssi_avg;
if (signal->selector & BCN_NF_AVG_MASK)
- priv->w_stats.qual.noise = signal->bcn_nf_avg;
+ priv->qual_noise = signal->bcn_nf_avg;
}
return status;
@@ -1341,18 +1312,8 @@ int
mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log)
{
- int ret;
-
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
HostCmd_ACT_GEN_GET, 0, log);
-
- if (!ret) {
- priv->w_stats.discard.fragment = log->fcs_error;
- priv->w_stats.discard.retries = log->retry;
- priv->w_stats.discard.misc = log->ack_failure;
- }
-
- return ret;
}
/*
@@ -1594,7 +1555,7 @@ mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
{
struct mwifiex_ds_misc_gen_ie gen_ie;
- if (ie_len > IW_CUSTOM_MAX)
+ if (ie_len > IEEE_MAX_IE_SIZE)
return -EFAULT;
gen_ie.type = MWIFIEX_IE_TYPE_GEN_IE;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 1822bfad889..d97facd70e8 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -151,7 +151,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
skb_push(skb, INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
- skb->data, skb->len, NULL);
+ skb, NULL);
switch (ret) {
case -EBUSY:
adapter->data_sent = true;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 6190b2fa57a..a206f412875 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -78,7 +78,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
(struct txpd *) (head_ptr + INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
- skb->data, skb->len, tx_param);
+ skb, tx_param);
}
switch (ret) {
@@ -87,7 +87,8 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
(adapter->pps_uapsd_mode) &&
(adapter->tx_lock_flag)) {
priv->adapter->tx_lock_flag = false;
- local_tx_pd->flags = 0;
+ if (local_tx_pd)
+ local_tx_pd->flags = 0;
}
dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
break;
@@ -160,43 +161,3 @@ done:
return 0;
}
-/*
- * Packet receive completion callback handler.
- *
- * This function calls another completion callback handler which
- * updates the statistics, and optionally updates the parent buffer
- * use count before freeing the received packet.
- */
-int mwifiex_recv_packet_complete(struct mwifiex_adapter *adapter,
- struct sk_buff *skb, int status)
-{
- struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
- struct mwifiex_rxinfo *rx_info_parent;
- struct mwifiex_private *priv;
- struct sk_buff *skb_parent;
- unsigned long flags;
-
- priv = adapter->priv[rx_info->bss_index];
-
- if (priv && (status == -1))
- priv->stats.rx_dropped++;
-
- if (rx_info->parent) {
- skb_parent = rx_info->parent;
- rx_info_parent = MWIFIEX_SKB_RXCB(skb_parent);
-
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
- --rx_info_parent->use_count;
-
- if (!rx_info_parent->use_count) {
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- dev_kfree_skb_any(skb_parent);
- } else {
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- }
- } else {
- dev_kfree_skb_any(skb);
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index d41291529bc..06976f517f6 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -185,13 +185,14 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
* corresponding waiting function. Otherwise, it processes the
* IOCTL response and frees the response buffer.
*/
-int mwifiex_complete_cmd(struct mwifiex_adapter *adapter)
+int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
+ struct cmd_ctrl_node *cmd_node)
{
atomic_dec(&adapter->cmd_pending);
dev_dbg(adapter->dev, "cmd completed: status=%d\n",
adapter->cmd_wait_q.status);
- adapter->cmd_wait_q.condition = true;
+ *(cmd_node->condition) = true;
if (adapter->cmd_wait_q.status == -ETIMEDOUT)
dev_err(adapter->dev, "cmd timeout\n");
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index 9506afc6c0e..f6d36b9654a 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -22,11 +22,16 @@
static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
{
- return (struct mwifiex_rxinfo *)skb->cb;
+ return (struct mwifiex_rxinfo *)(skb->cb + sizeof(phys_addr_t));
}
static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
{
- return (struct mwifiex_txinfo *)skb->cb;
+ return (struct mwifiex_txinfo *)(skb->cb + sizeof(phys_addr_t));
+}
+
+static inline phys_addr_t *MWIFIEX_SKB_PACB(struct sk_buff *skb)
+{
+ return (phys_addr_t *)skb->cb;
}
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 69e260b4171..6c239c3c824 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -121,7 +121,6 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
memcpy(ra_list->ra, ra, ETH_ALEN);
ra_list->total_pkts_size = 0;
- ra_list->total_pkts = 0;
dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
@@ -648,7 +647,6 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
skb_queue_tail(&ra_list->skb_head, skb);
ra_list->total_pkts_size += skb->len;
- ra_list->total_pkts++;
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -975,6 +973,28 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
}
/*
+ * This function checks if 11n aggregation is possible.
+ */
+static int
+mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr,
+ int max_buf_size)
+{
+ int count = 0, total_size = 0;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
+ total_size += skb->len;
+ if (total_size >= max_buf_size)
+ break;
+ if (++count >= MIN_NUM_AMSDU)
+ return true;
+ }
+
+ return false;
+}
+
+/*
* This function sends a single packet to firmware for transmission.
*/
static void
@@ -1001,7 +1021,6 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
ptr->total_pkts_size -= skb->len;
- ptr->total_pkts--;
if (!skb_queue_empty(&ptr->skb_head))
skb_next = skb_peek(&ptr->skb_head);
@@ -1027,7 +1046,6 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
skb_queue_tail(&ptr->skb_head, skb);
ptr->total_pkts_size += skb->len;
- ptr->total_pkts++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
@@ -1107,8 +1125,8 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
tx_param.next_pkt_len =
((skb_next) ? skb_next->len +
sizeof(struct txpd) : 0);
- ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
- skb->data, skb->len, &tx_param);
+ ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb,
+ &tx_param);
switch (ret) {
case -EBUSY:
dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
@@ -1213,11 +1231,9 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_send_delba(priv, tid_del, ra, 1);
}
}
-/* Minimum number of AMSDU */
-#define MIN_NUM_AMSDU 2
-
if (mwifiex_is_amsdu_allowed(priv, tid) &&
- (ptr->total_pkts >= MIN_NUM_AMSDU))
+ mwifiex_is_11n_aggragation_possible(priv, ptr,
+ adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
ptr_index, flags);
/* ra_list_spinlock has been freed in
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index da36dbf8d87..995695c28d5 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -4097,9 +4097,6 @@ static int mwl8k_set_key(struct ieee80211_hw *hw,
if (rc)
goto out;
-
- mwl8k_vif->is_hw_crypto_enabled = false;
-
}
out:
return rc;
@@ -4918,7 +4915,8 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
return ret;
}
-static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int mwl8k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mwl8k_priv *priv = hw->priv;
@@ -5465,7 +5463,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
goto fail;
for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) {
- rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
+ rc = mwl8k_conf_tx(hw, NULL, i, &priv->wmm_params[i]);
if (rc)
goto fail;
}
@@ -5504,6 +5502,14 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
/* Set rssi values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
+
+ /*
+ * Ask mac80211 to not to trigger PS mode
+ * based on PM bit of incoming frames.
+ */
+ if (priv->ap_fw)
+ hw->flags |= IEEE80211_HW_AP_LINK_PS;
+
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index ef7efe839bb..b52acc4b408 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2135,7 +2135,7 @@ static const struct net_device_ops orinoco_netdev_ops = {
.ndo_open = orinoco_open,
.ndo_stop = orinoco_stop,
.ndo_start_xmit = orinoco_xmit,
- .ndo_set_multicast_list = orinoco_set_multicast_list,
+ .ndo_set_rx_mode = orinoco_set_multicast_list,
.ndo_change_mtu = orinoco_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 811e87f8a34..0793e4265b4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1562,7 +1562,7 @@ static const struct net_device_ops ezusb_netdev_ops = {
.ndo_open = orinoco_open,
.ndo_stop = orinoco_stop,
.ndo_start_xmit = ezusb_xmit,
- .ndo_set_multicast_list = orinoco_set_multicast_list,
+ .ndo_set_rx_mode = orinoco_set_multicast_list,
.ndo_change_mtu = orinoco_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index bbb9beb206b..33747e131a9 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -9,6 +9,7 @@
#include <linux/ieee80211.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "hermes.h"
#include "hermes_rid.h"
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 54cc0bba66b..8b6f363b3f7 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -145,6 +145,7 @@ static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
static int p54_generate_band(struct ieee80211_hw *dev,
struct p54_channel_list *list,
+ unsigned int *chan_num,
enum ieee80211_band band)
{
struct p54_common *priv = dev->priv;
@@ -190,7 +191,14 @@ static int p54_generate_band(struct ieee80211_hw *dev,
tmp->channels[j].band = chan->band;
tmp->channels[j].center_freq = chan->freq;
+ priv->survey[*chan_num].channel = &tmp->channels[j];
+ priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+ tmp->channels[j].hw_value = (*chan_num);
j++;
+ (*chan_num)++;
}
if (j == 0) {
@@ -263,7 +271,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
struct p54_channel_list *list;
- unsigned int i, j, max_channel_num;
+ unsigned int i, j, k, max_channel_num;
int ret = 0;
u16 freq;
@@ -283,6 +291,13 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
ret = -ENOMEM;
goto free;
}
+ priv->chan_num = max_channel_num;
+ priv->survey = kzalloc(sizeof(struct survey_info) * max_channel_num,
+ GFP_KERNEL);
+ if (!priv->survey) {
+ ret = -ENOMEM;
+ goto free;
+ }
list->max_entries = max_channel_num;
list->channels = kzalloc(sizeof(struct p54_channel_entry) *
@@ -321,8 +336,9 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
p54_compare_channels, NULL);
+ k = 0;
for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
- if (p54_generate_band(dev, list, i) == 0)
+ if (p54_generate_band(dev, list, &k, i) == 0)
j++;
}
if (j == 0) {
@@ -335,6 +351,10 @@ free:
kfree(list->channels);
kfree(list);
}
+ if (ret) {
+ kfree(priv->survey);
+ priv->survey = NULL;
+ }
return ret;
}
@@ -853,10 +873,12 @@ err:
kfree(priv->output_limit);
kfree(priv->curve_data);
kfree(priv->rssi_db);
+ kfree(priv->survey);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
priv->rssi_db = NULL;
+ priv->survey = NULL;
wiphy_err(dev->wiphy, "eeprom parse failed!\n");
return err;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index b6a061cbbde..53a3408931b 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -385,6 +385,7 @@ int p54_setup_mac(struct p54_common *priv)
setup->v2.osc_start_delay = cpu_to_le16(65535);
}
p54_tx(priv, skb);
+ priv->phy_idle = mode == P54_FILTER_TYPE_HIBERNATE;
return 0;
}
@@ -626,6 +627,7 @@ int p54_set_ps(struct p54_common *priv)
psm->exclude[0] = WLAN_EID_TIM;
p54_tx(priv, skb);
+ priv->phy_ps = mode != P54_PSM_CAM;
return 0;
}
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a5a6d9e647b..ad9ae04d07a 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -204,13 +204,11 @@ static void p54_stop(struct ieee80211_hw *dev)
struct p54_common *priv = dev->priv;
int i;
- mutex_lock(&priv->conf_mutex);
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
priv->softled_state = 0;
- p54_set_leds(priv);
-
cancel_delayed_work_sync(&priv->work);
-
+ mutex_lock(&priv->conf_mutex);
+ p54_set_leds(priv);
priv->stop(dev);
skb_queue_purge(&priv->tx_pending);
skb_queue_purge(&priv->tx_queue);
@@ -278,6 +276,42 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
mutex_unlock(&priv->conf_mutex);
}
+static int p54_wait_for_stats(struct ieee80211_hw *dev)
+{
+ struct p54_common *priv = dev->priv;
+ int ret;
+
+ priv->update_stats = true;
+ ret = p54_fetch_statistics(priv);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->stat_comp, HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void p54_reset_stats(struct p54_common *priv)
+{
+ struct ieee80211_channel *chan = priv->curchan;
+
+ if (chan) {
+ struct survey_info *info = &priv->survey[chan->hw_value];
+
+ /* only reset channel statistics, don't touch .filled, etc. */
+ info->channel_time = 0;
+ info->channel_time_busy = 0;
+ info->channel_time_tx = 0;
+ }
+
+ priv->update_stats = true;
+ priv->survey_raw.active = 0;
+ priv->survey_raw.cca = 0;
+ priv->survey_raw.tx = 0;
+}
+
static int p54_config(struct ieee80211_hw *dev, u32 changed)
{
int ret = 0;
@@ -288,19 +322,36 @@ static int p54_config(struct ieee80211_hw *dev, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_POWER)
priv->output_power = conf->power_level << 2;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *oldchan;
+ WARN_ON(p54_wait_for_stats(dev));
+ oldchan = priv->curchan;
+ priv->curchan = NULL;
ret = p54_scan(priv, P54_SCAN_EXIT, 0);
- if (ret)
+ if (ret) {
+ priv->curchan = oldchan;
goto out;
+ }
+ /*
+ * TODO: Use the LM_SCAN_TRAP to determine the current
+ * operating channel.
+ */
+ priv->curchan = priv->hw->conf.channel;
+ p54_reset_stats(priv);
+ WARN_ON(p54_fetch_statistics(priv));
}
if (changed & IEEE80211_CONF_CHANGE_PS) {
+ WARN_ON(p54_wait_for_stats(dev));
ret = p54_set_ps(priv);
if (ret)
goto out;
+ WARN_ON(p54_wait_for_stats(dev));
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ WARN_ON(p54_wait_for_stats(dev));
ret = p54_setup_mac(priv);
if (ret)
goto out;
+ WARN_ON(p54_wait_for_stats(dev));
}
out:
@@ -353,7 +404,8 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
p54_set_groupfilter(priv);
}
-static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
+static int p54_conf_tx(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct p54_common *priv = dev->priv;
@@ -384,7 +436,9 @@ static void p54_work(struct work_struct *work)
* 2. cancel stuck frames / reset the device if necessary.
*/
- p54_fetch_statistics(priv);
+ mutex_lock(&priv->conf_mutex);
+ WARN_ON_ONCE(p54_fetch_statistics(priv));
+ mutex_unlock(&priv->conf_mutex);
}
static int p54_get_stats(struct ieee80211_hw *dev,
@@ -541,16 +595,47 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
struct survey_info *survey)
{
struct p54_common *priv = dev->priv;
- struct ieee80211_conf *conf = &dev->conf;
+ struct ieee80211_channel *chan;
+ int err, tries;
+ bool in_use = false;
- if (idx != 0)
+ if (idx >= priv->chan_num)
return -ENOENT;
- survey->channel = conf->channel;
- survey->filled = SURVEY_INFO_NOISE_DBM;
- survey->noise = clamp_t(s8, priv->noise, -128, 127);
+#define MAX_TRIES 1
+ for (tries = 0; tries < MAX_TRIES; tries++) {
+ chan = priv->curchan;
+ if (chan && chan->hw_value == idx) {
+ mutex_lock(&priv->conf_mutex);
+ err = p54_wait_for_stats(dev);
+ mutex_unlock(&priv->conf_mutex);
+ if (err)
+ return err;
+
+ in_use = true;
+ }
- return 0;
+ memcpy(survey, &priv->survey[idx], sizeof(*survey));
+
+ if (in_use) {
+ /* test if the reported statistics are valid. */
+ if (survey->channel_time != 0) {
+ survey->filled |= SURVEY_INFO_IN_USE;
+ } else {
+ /*
+ * hw/fw has not accumulated enough sample sets.
+ * Wait for 100ms, this ought to be enough to
+ * to get at least one non-null set of channel
+ * usage statistics.
+ */
+ msleep(100);
+ continue;
+ }
+ }
+ return 0;
+ }
+ return -ETIMEDOUT;
+#undef MAX_TRIES
}
static unsigned int p54_flush_count(struct p54_common *priv)
@@ -686,11 +771,14 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
mutex_init(&priv->conf_mutex);
mutex_init(&priv->eeprom_mutex);
+ init_completion(&priv->stat_comp);
init_completion(&priv->eeprom_comp);
init_completion(&priv->beacon_comp);
INIT_DELAYED_WORK(&priv->work, p54_work);
memset(&priv->mc_maclist[0], ~0, ETH_ALEN);
+ priv->curchan = NULL;
+ p54_reset_stats(priv);
return dev;
}
EXPORT_SYMBOL_GPL(p54_init_common);
@@ -730,11 +818,13 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->curve_data);
kfree(priv->rssi_db);
kfree(priv->used_rxkeys);
+ kfree(priv->survey);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
priv->rssi_db = NULL;
priv->used_rxkeys = NULL;
+ priv->survey = NULL;
ieee80211_free_hw(dev);
}
EXPORT_SYMBOL_GPL(p54_free_common);
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 799d05e1259..452fa3a64aa 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -199,6 +199,22 @@ struct p54_common {
u8 tx_diversity_mask;
unsigned int output_power;
struct p54_rssi_db_entry *cur_rssi;
+ struct ieee80211_channel *curchan;
+ struct survey_info *survey;
+ unsigned int chan_num;
+ struct completion stat_comp;
+ bool update_stats;
+ struct {
+ unsigned int timestamp;
+ unsigned int cached_cca;
+ unsigned int cached_tx;
+ unsigned int cached_rssi;
+ u64 active;
+ u64 cca;
+ u64 tx;
+ u64 rssi;
+ } survey_raw;
+
int noise;
/* calibration, output power limit and rssi<->dBm conversation data */
struct pda_iq_autocal_entry *iq_autocal;
@@ -220,6 +236,8 @@ struct p54_common {
u32 basic_rate_mask;
u16 aid;
u8 coverage_class;
+ bool phy_idle;
+ bool phy_ps;
bool powersave_override;
__le32 beacon_req_id;
struct completion beacon_comp;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 6d9204fef90..f18df82eeb9 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -41,7 +41,6 @@
#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
MODULE_FIRMWARE("3826.arm");
-MODULE_ALIAS("stlc45xx");
/*
* gpios should be handled in board files and provided via platform data,
@@ -738,3 +737,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
MODULE_ALIAS("spi:p54spi");
+MODULE_ALIAS("spi:stlc45xx");
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 042842e704d..f485784a60a 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <asm/div64.h>
#include <net/mac80211.h>
@@ -507,6 +508,8 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
struct sk_buff *tmp;
+ struct ieee80211_channel *chan;
+ unsigned int i, rssi, tx, cca, dtime, dtotal, dcca, dtx, drssi, unit;
u32 tsf32;
if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
@@ -523,8 +526,75 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise));
+ /*
+ * STSW450X LMAC API page 26 - 3.8 Statistics
+ * "The exact measurement period can be derived from the
+ * timestamp member".
+ */
+ dtime = tsf32 - priv->survey_raw.timestamp;
+
+ /*
+ * STSW450X LMAC API page 26 - 3.8.1 Noise histogram
+ * The LMAC samples RSSI, CCA and transmit state at regular
+ * periods (typically 8 times per 1k [as in 1024] usec).
+ */
+ cca = le32_to_cpu(stats->sample_cca);
+ tx = le32_to_cpu(stats->sample_tx);
+ rssi = 0;
+ for (i = 0; i < ARRAY_SIZE(stats->sample_noise); i++)
+ rssi += le32_to_cpu(stats->sample_noise[i]);
+
+ dcca = cca - priv->survey_raw.cached_cca;
+ drssi = rssi - priv->survey_raw.cached_rssi;
+ dtx = tx - priv->survey_raw.cached_tx;
+ dtotal = dcca + drssi + dtx;
+
+ /*
+ * update statistics when more than a second is over since the
+ * last call, or when a update is badly needed.
+ */
+ if (dtotal && (priv->update_stats || dtime >= USEC_PER_SEC) &&
+ dtime >= dtotal) {
+ priv->survey_raw.timestamp = tsf32;
+ priv->update_stats = false;
+ unit = dtime / dtotal;
+
+ if (dcca) {
+ priv->survey_raw.cca += dcca * unit;
+ priv->survey_raw.cached_cca = cca;
+ }
+ if (dtx) {
+ priv->survey_raw.tx += dtx * unit;
+ priv->survey_raw.cached_tx = tx;
+ }
+ if (drssi) {
+ priv->survey_raw.rssi += drssi * unit;
+ priv->survey_raw.cached_rssi = rssi;
+ }
+
+ /* 1024 usec / 8 times = 128 usec / time */
+ if (!(priv->phy_ps || priv->phy_idle))
+ priv->survey_raw.active += dtotal * unit;
+ else
+ priv->survey_raw.active += (dcca + dtx) * unit;
+ }
+
+ chan = priv->curchan;
+ if (chan) {
+ struct survey_info *survey = &priv->survey[chan->hw_value];
+ survey->noise = clamp_t(s8, priv->noise, -128, 127);
+ survey->channel_time = priv->survey_raw.active;
+ survey->channel_time_tx = priv->survey_raw.tx;
+ survey->channel_time_busy = priv->survey_raw.tx +
+ priv->survey_raw.cca;
+ do_div(survey->channel_time, 1024);
+ do_div(survey->channel_time_tx, 1024);
+ do_div(survey->channel_time_busy, 1024);
+ }
+
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
dev_kfree_skb_any(tmp);
+ complete(&priv->stat_comp);
}
static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
@@ -619,7 +689,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
- if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
+ if (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)
*flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 2a06ebcd67c..0021e494851 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -273,7 +273,7 @@ static const struct net_device_ops ray_netdev_ops = {
.ndo_start_xmit = ray_dev_start_xmit,
.ndo_set_config = ray_dev_config,
.ndo_get_stats = ray_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 29f93893066..0c13840a7de 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -36,13 +36,11 @@
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
-#include <linux/wireless.h>
#include <linux/ieee80211.h>
#include <linux/if_arp.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/rndis_host.h>
@@ -3392,7 +3390,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = rndis_wlan_set_multicast_list,
+ .ndo_set_rx_mode = rndis_wlan_set_multicast_list,
};
static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 76bcc354797..3a6b40239bc 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -645,11 +645,6 @@ static void rt2400pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
- /*
- * Allow the tbtt tasklet to be scheduled.
- */
- tasklet_enable(&rt2x00dev->tbtt_tasklet);
-
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -715,7 +710,7 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
/*
* Wait for possibly running tbtt tasklets.
*/
- tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -982,12 +977,6 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
-
- /*
- * Enable tasklets.
- */
- tasklet_enable(&rt2x00dev->txstatus_tasklet);
- tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
@@ -1011,8 +1000,9 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
* Ensure that all tasklets are finished before
* disabling the interrupts.
*/
- tasklet_disable(&rt2x00dev->txstatus_tasklet);
- tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
}
}
@@ -1249,7 +1239,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
* call, we must decrease the higher 32bits with 1 to get
* to correct value.
*/
- tsf = rt2x00dev->ops->hw->get_tsf(rt2x00dev->hw);
+ tsf = rt2x00dev->ops->hw->get_tsf(rt2x00dev->hw, NULL);
rx_low = rt2x00_get_field32(word4, RXD_W4_RX_END_TIME);
rx_high = upper_32_bits(tsf);
@@ -1347,22 +1337,25 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
/*
* Enable all TXDONE interrupts again.
*/
- spin_lock_irq(&rt2x00dev->irqmask_lock);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
- rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
- rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
- rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
- rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- spin_unlock_irq(&rt2x00dev->irqmask_lock);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+ }
}
static void rt2400pci_tbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt2x00lib_beacondone(rt2x00dev);
- rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
}
static void rt2400pci_rxdone_tasklet(unsigned long data)
@@ -1370,7 +1363,7 @@ static void rt2400pci_rxdone_tasklet(unsigned long data)
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
if (rt2x00pci_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- else
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
@@ -1655,7 +1648,8 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* IEEE80211 stack callback functions.
*/
-static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -1668,7 +1662,7 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (queue != 0)
return -EINVAL;
- if (rt2x00mac_conf_tx(hw, queue, params))
+ if (rt2x00mac_conf_tx(hw, vif, queue, params))
return -EINVAL;
/*
@@ -1680,7 +1674,8 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
}
-static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw)
+static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index c288d951c03..dcc0e1fcca7 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -735,11 +735,6 @@ static void rt2500pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
- /*
- * Allow the tbtt tasklet to be scheduled.
- */
- tasklet_enable(&rt2x00dev->tbtt_tasklet);
-
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -805,7 +800,7 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
/*
* Wait for possibly running tbtt tasklets.
*/
- tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -1137,12 +1132,6 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
-
- /*
- * Enable tasklets.
- */
- tasklet_enable(&rt2x00dev->txstatus_tasklet);
- tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
@@ -1165,8 +1154,9 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
/*
* Ensure that all tasklets are finished.
*/
- tasklet_disable(&rt2x00dev->txstatus_tasklet);
- tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
}
}
@@ -1479,22 +1469,25 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
/*
* Enable all TXDONE interrupts again.
*/
- spin_lock_irq(&rt2x00dev->irqmask_lock);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
- rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
- rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
- rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
- rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- spin_unlock_irq(&rt2x00dev->irqmask_lock);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+ }
}
static void rt2500pci_tbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt2x00lib_beacondone(rt2x00dev);
- rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
}
static void rt2500pci_rxdone_tasklet(unsigned long data)
@@ -1502,7 +1495,7 @@ static void rt2500pci_rxdone_tasklet(unsigned long data)
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
if (rt2x00pci_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- else
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
@@ -1973,7 +1966,8 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* IEEE80211 stack callback functions.
*/
-static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw)
+static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index c69a7d71f4c..4778620347c 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -664,6 +664,9 @@
/*
* LED_CFG: LED control
+ * ON_PERIOD: LED active time (ms) during TX (only used for LED mode 1)
+ * OFF_PERIOD: LED inactive time (ms) during TX (only used for LED mode 1)
+ * SLOW_BLINK_PERIOD: LED blink interval in seconds (only used for LED mode 2)
* color LED's:
* 0: off
* 1: blinking upon TX2
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 0019dfd8fb0..3f183a15186 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -493,7 +493,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size);
rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
+ txdesc->key_idx : txdesc->u.ht.wcid);
rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
txdesc->length);
rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
@@ -601,7 +601,7 @@ void rt2800_process_rxwi(struct queue_entry *entry,
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
-void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
+void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
@@ -609,13 +609,11 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
u32 word;
u16 mcs, real_mcs;
int aggr, ampdu;
- __le32 *txwi;
/*
* Obtain the status about this packet.
*/
txdesc.flags = 0;
- txwi = rt2800_drv_get_txwi(entry);
rt2x00_desc_read(txwi, 0, &word);
mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
@@ -899,28 +897,12 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev,
}
}
-static int rt2800_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on, unsigned long *delay_off)
-{
- struct rt2x00_led *led =
- container_of(led_cdev, struct rt2x00_led, led_dev);
- u32 reg;
-
- rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
- rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
- rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
- rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
-
- return 0;
-}
-
static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
led->type = type;
led->led_dev.brightness_set = rt2800_brightness_set;
- led->led_dev.blink_set = rt2800_blink_set;
led->flags = LED_INITIALIZED;
}
#endif /* CONFIG_RT2X00_LIB_LEDS */
@@ -928,11 +910,51 @@ static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
/*
* Configuration handlers.
*/
-static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
- struct rt2x00lib_crypto *crypto,
- struct ieee80211_key_conf *key)
+static void rt2800_config_wcid(struct rt2x00_dev *rt2x00dev,
+ const u8 *address,
+ int wcid)
{
struct mac_wcid_entry wcid_entry;
+ u32 offset;
+
+ offset = MAC_WCID_ENTRY(wcid);
+
+ memset(&wcid_entry, 0xff, sizeof(wcid_entry));
+ if (address)
+ memcpy(wcid_entry.mac, address, ETH_ALEN);
+
+ rt2800_register_multiwrite(rt2x00dev, offset,
+ &wcid_entry, sizeof(wcid_entry));
+}
+
+static void rt2800_delete_wcid_attr(struct rt2x00_dev *rt2x00dev, int wcid)
+{
+ u32 offset;
+ offset = MAC_WCID_ATTR_ENTRY(wcid);
+ rt2800_register_write(rt2x00dev, offset, 0);
+}
+
+static void rt2800_config_wcid_attr_bssidx(struct rt2x00_dev *rt2x00dev,
+ int wcid, u32 bssidx)
+{
+ u32 offset = MAC_WCID_ATTR_ENTRY(wcid);
+ u32 reg;
+
+ /*
+ * The BSS Idx numbers is split in a main value of 3 bits,
+ * and a extended field for adding one additional bit to the value.
+ */
+ rt2800_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX, (bssidx & 0x7));
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
+ (bssidx & 0x8) >> 3);
+ rt2800_register_write(rt2x00dev, offset, reg);
+}
+
+static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_crypto *crypto,
+ struct ieee80211_key_conf *key)
+{
struct mac_iveiv_entry iveiv_entry;
u32 offset;
u32 reg;
@@ -952,14 +974,16 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
(crypto->cipher & 0x7));
rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT,
(crypto->cipher & 0x8) >> 3);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
- (crypto->bssidx & 0x7));
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
- (crypto->bssidx & 0x8) >> 3);
rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
rt2800_register_write(rt2x00dev, offset, reg);
} else {
- rt2800_register_write(rt2x00dev, offset, 0);
+ /* Delete the cipher without touching the bssidx */
+ rt2800_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB, 0);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER, 0);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT, 0);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, 0);
+ rt2800_register_write(rt2x00dev, offset, reg);
}
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
@@ -972,14 +996,6 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
iveiv_entry.iv[3] |= key->keyidx << 6;
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
-
- offset = MAC_WCID_ENTRY(key->hw_key_idx);
-
- memset(&wcid_entry, 0, sizeof(wcid_entry));
- if (crypto->cmd == SET_KEY)
- memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
- rt2800_register_multiwrite(rt2x00dev, offset,
- &wcid_entry, sizeof(wcid_entry));
}
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
@@ -1026,20 +1042,24 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
/*
* Update WCID information
*/
- rt2800_config_wcid_attr(rt2x00dev, crypto, key);
+ rt2800_config_wcid(rt2x00dev, crypto->address, key->hw_key_idx);
+ rt2800_config_wcid_attr_bssidx(rt2x00dev, key->hw_key_idx,
+ crypto->bssidx);
+ rt2800_config_wcid_attr_cipher(rt2x00dev, crypto, key);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
-static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev)
+static inline int rt2800_find_wcid(struct rt2x00_dev *rt2x00dev)
{
+ struct mac_wcid_entry wcid_entry;
int idx;
- u32 offset, reg;
+ u32 offset;
/*
- * Search for the first free pairwise key entry and return the
- * corresponding index.
+ * Search for the first free WCID entry and return the corresponding
+ * index.
*
* Make sure the WCID starts _after_ the last possible shared key
* entry (>32).
@@ -1049,11 +1069,17 @@ static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev)
* first 222 entries.
*/
for (idx = 33; idx <= 222; idx++) {
- offset = MAC_WCID_ATTR_ENTRY(idx);
- rt2800_register_read(rt2x00dev, offset, &reg);
- if (!reg)
+ offset = MAC_WCID_ENTRY(idx);
+ rt2800_register_multiread(rt2x00dev, offset, &wcid_entry,
+ sizeof(wcid_entry));
+ if (is_broadcast_ether_addr(wcid_entry.mac))
return idx;
}
+
+ /*
+ * Use -1 to indicate that we don't have any more space in the WCID
+ * table.
+ */
return -1;
}
@@ -1063,13 +1089,15 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
{
struct hw_key_entry key_entry;
u32 offset;
- int idx;
if (crypto->cmd == SET_KEY) {
- idx = rt2800_find_pairwise_keyslot(rt2x00dev);
- if (idx < 0)
+ /*
+ * Allow key configuration only for STAs that are
+ * known by the hw.
+ */
+ if (crypto->wcid < 0)
return -ENOSPC;
- key->hw_key_idx = idx;
+ key->hw_key_idx = crypto->wcid;
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
@@ -1086,12 +1114,59 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
/*
* Update WCID information
*/
- rt2800_config_wcid_attr(rt2x00dev, crypto, key);
+ rt2800_config_wcid_attr_cipher(rt2x00dev, crypto, key);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
+int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ int wcid;
+ struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+
+ /*
+ * Find next free WCID.
+ */
+ wcid = rt2800_find_wcid(rt2x00dev);
+
+ /*
+ * Store selected wcid even if it is invalid so that we can
+ * later decide if the STA is uploaded into the hw.
+ */
+ sta_priv->wcid = wcid;
+
+ /*
+ * No space left in the device, however, we can still communicate
+ * with the STA -> No error.
+ */
+ if (wcid < 0)
+ return 0;
+
+ /*
+ * Clean up WCID attributes and write STA address to the device.
+ */
+ rt2800_delete_wcid_attr(rt2x00dev, wcid);
+ rt2800_config_wcid(rt2x00dev, sta->addr, wcid);
+ rt2800_config_wcid_attr_bssidx(rt2x00dev, wcid,
+ rt2x00lib_get_bssidx(rt2x00dev, vif));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_sta_add);
+
+int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid)
+{
+ /*
+ * Remove WCID entry, no need to clean the attributes as they will
+ * get renewed when the WCID is reused.
+ */
+ rt2800_config_wcid(rt2x00dev, NULL, wcid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_sta_remove);
+
void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags)
{
@@ -2771,11 +2846,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
SHARED_KEY_MODE_ENTRY(i), 0);
for (i = 0; i < 256; i++) {
- static const u32 wcid[2] = { 0xffffffff, 0x00ffffff };
- rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
- wcid, sizeof(wcid));
-
- rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0);
+ rt2800_config_wcid(rt2x00dev, NULL, i);
+ rt2800_delete_wcid_attr(rt2x00dev, i);
rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
}
@@ -4326,7 +4398,8 @@ int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
}
EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold);
-int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+int rt2800_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -4342,7 +4415,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
if (retval)
return retval;
@@ -4394,7 +4467,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
}
EXPORT_SYMBOL_GPL(rt2800_conf_tx);
-u64 rt2800_get_tsf(struct ieee80211_hw *hw)
+u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
@@ -4414,8 +4487,19 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
+ struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
int ret = 0;
+ /*
+ * Don't allow aggregation for stations the hardware isn't aware
+ * of because tx status reports for frames to an unknown station
+ * always contain wcid=255 and thus we can't distinguish between
+ * multiple stations which leads to unwanted situations when the
+ * hw reorders frames due to aggregation.
+ */
+ if (sta_priv->wcid < 0)
+ return 1;
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 69deb3148ae..8c3c281904f 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -152,7 +152,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
struct txentry_desc *txdesc);
void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
-void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
+void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32* txwi);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
void rt2800_clear_beacon(struct queue_entry *entry);
@@ -166,6 +166,9 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
+int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid);
void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags);
void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
@@ -194,9 +197,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
u16 *iv16);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
-int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+int rt2800_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params);
-u64 rt2800_get_tsf(struct ieee80211_hw *hw);
+u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index ebc17ad61de..da48c8ac27b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -200,13 +200,6 @@ static void rt2800pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
- /*
- * Allow beacon tasklets to be scheduled for periodic
- * beacon updates.
- */
- tasklet_enable(&rt2x00dev->tbtt_tasklet);
- tasklet_enable(&rt2x00dev->pretbtt_tasklet);
-
rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
@@ -269,10 +262,13 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg);
/*
- * Wait for tbtt tasklets to finish.
+ * Wait for current invocation to finish. The tasklet
+ * won't be scheduled anymore afterwards since we disabled
+ * the TBTT and PRE TBTT timer.
*/
- tasklet_disable(&rt2x00dev->tbtt_tasklet);
- tasklet_disable(&rt2x00dev->pretbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+
break;
default:
break;
@@ -437,14 +433,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-
- /*
- * Enable tasklets. The beacon related tasklets are
- * enabled when the beacon queue is started.
- */
- tasklet_enable(&rt2x00dev->txstatus_tasklet);
- tasklet_enable(&rt2x00dev->rxdone_tasklet);
- tasklet_enable(&rt2x00dev->autowake_tasklet);
}
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
@@ -472,12 +460,13 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_OFF) {
/*
- * Ensure that all tasklets are finished before
- * disabling the interrupts.
+ * Wait for possibly running tasklets to finish.
*/
- tasklet_disable(&rt2x00dev->txstatus_tasklet);
- tasklet_disable(&rt2x00dev->rxdone_tasklet);
- tasklet_disable(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
}
}
@@ -630,11 +619,11 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
/*
* Initialize TX descriptor
*/
- rt2x00_desc_read(txd, 0, &word);
+ word = 0;
rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
rt2x00_desc_write(txd, 0, word);
- rt2x00_desc_read(txd, 1, &word);
+ word = 0;
rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
!test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
@@ -645,12 +634,12 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
rt2x00_desc_write(txd, 1, word);
- rt2x00_desc_read(txd, 2, &word);
+ word = 0;
rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
skbdesc->skb_dma + TXWI_DESC_SIZE);
rt2x00_desc_write(txd, 2, word);
- rt2x00_desc_read(txd, 3, &word);
+ word = 0;
rt2x00_set_field32(&word, TXD_W3_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
@@ -771,7 +760,7 @@ static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
}
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- rt2800_txdone_entry(entry, status);
+ rt2800_txdone_entry(entry, status, rt2800pci_get_txwi(entry));
if (--max_tx_done == 0)
break;
@@ -813,14 +802,16 @@ static void rt2800pci_pretbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt2x00lib_pretbtt(rt2x00dev);
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
}
static void rt2800pci_tbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt2x00lib_beacondone(rt2x00dev);
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
}
static void rt2800pci_rxdone_tasklet(unsigned long data)
@@ -828,7 +819,7 @@ static void rt2800pci_rxdone_tasklet(unsigned long data)
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
if (rt2x00pci_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- else
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
}
@@ -836,7 +827,8 @@ static void rt2800pci_autowake_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt2800pci_wakeup(rt2x00dev);
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
}
static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -1023,6 +1015,8 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.get_tkip_seq = rt2800_get_tkip_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
+ .sta_add = rt2x00mac_sta_add,
+ .sta_remove = rt2x00mac_sta_remove,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
.get_tsf = rt2800_get_tsf,
@@ -1084,6 +1078,8 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
};
static const struct data_queue_desc rt2800pci_queue_rx = {
@@ -1161,6 +1157,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
{ PCI_DEVICE(0x1814, 0x5390) },
+ { PCI_DEVICE(0x1814, 0x539a) },
{ PCI_DEVICE(0x1814, 0x539f) },
#endif
{ 0, }
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index dbf501ca317..f1565792f27 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -542,7 +542,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
}
if (entry)
- rt2800_txdone_entry(entry, reg);
+ rt2800_txdone_entry(entry, reg,
+ rt2800usb_get_txwi(entry));
}
}
@@ -759,6 +760,8 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.get_tkip_seq = rt2800_get_tkip_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
+ .sta_add = rt2x00mac_sta_add,
+ .sta_remove = rt2x00mac_sta_remove,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
.get_tsf = rt2800_get_tsf,
@@ -816,6 +819,8 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
};
static const struct data_queue_desc rt2800usb_queue_rx = {
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index f82bfeb79eb..2ec5c00235e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -478,6 +478,8 @@ struct rt2x00lib_crypto {
u8 key[16];
u8 tx_mic[8];
u8 rx_mic[8];
+
+ int wcid;
};
/*
@@ -512,6 +514,19 @@ struct rt2x00intf_conf {
};
/*
+ * Private structure for storing STA details
+ * wcid: Wireless Client ID
+ */
+struct rt2x00_sta {
+ int wcid;
+};
+
+static inline struct rt2x00_sta* sta_to_rt2x00_sta(struct ieee80211_sta *sta)
+{
+ return (struct rt2x00_sta *)sta->drv_priv;
+}
+
+/*
* rt2x00lib callback functions.
*/
struct rt2x00lib_ops {
@@ -620,6 +635,11 @@ struct rt2x00lib_ops {
void (*config) (struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf,
const unsigned int changed_flags);
+ int (*sta_add) (struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*sta_remove) (struct rt2x00_dev *rt2x00dev,
+ int wcid);
};
/*
@@ -1226,6 +1246,12 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
/*
+ * Utility functions.
+ */
+u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
+
+/*
* Interrupt context handlers.
*/
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
@@ -1261,6 +1287,10 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
#else
#define rt2x00mac_set_key NULL
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
+int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw);
void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw);
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
@@ -1269,7 +1299,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
-int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 0955c941317..e1fb2a8569b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -33,6 +33,22 @@
#include "rt2x00lib.h"
/*
+ * Utility functions.
+ */
+u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ /*
+ * When in STA mode, bssidx is always 0 otherwise local_address[5]
+ * contains the bss number, see BSS_ID_MASK comments for details.
+ */
+ if (rt2x00dev->intf_sta_count)
+ return 0;
+ return vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1);
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_get_bssidx);
+
+/*
* Radio control handlers.
*/
int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -915,6 +931,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
+ * Tell mac80211 about the size of our private STA structure.
+ */
+ rt2x00dev->hw->sta_data_size = sizeof(struct rt2x00_sta);
+
+ /*
* Allocate tx status FIFO for driver use.
*/
if (test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags)) {
@@ -946,7 +967,6 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
tasklet_init(&rt2x00dev->taskletname, \
rt2x00dev->ops->lib->taskletname, \
(unsigned long)rt2x00dev); \
- tasklet_disable(&rt2x00dev->taskletname); \
}
RT2X00_TASKLET_INIT(txstatus_tasklet);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 4ccf2380597..bf0acff0780 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -494,6 +494,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct rt2x00lib_crypto crypto;
static const u8 bcast_addr[ETH_ALEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, };
+ struct rt2x00_sta *sta_priv = NULL;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
@@ -504,24 +505,18 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
memset(&crypto, 0, sizeof(crypto));
- /*
- * When in STA mode, bssidx is always 0 otherwise local_address[5]
- * contains the bss number, see BSS_ID_MASK comments for details.
- */
- if (rt2x00dev->intf_sta_count)
- crypto.bssidx = 0;
- else
- crypto.bssidx = vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1);
-
+ crypto.bssidx = rt2x00lib_get_bssidx(rt2x00dev, vif);
crypto.cipher = rt2x00crypto_key_to_cipher(key);
if (crypto.cipher == CIPHER_NONE)
return -EOPNOTSUPP;
crypto.cmd = cmd;
- if (sta)
+ if (sta) {
crypto.address = sta->addr;
- else
+ sta_priv = sta_to_rt2x00_sta(sta);
+ crypto.wcid = sta_priv->wcid;
+ } else
crypto.address = bcast_addr;
if (crypto.cipher == CIPHER_TKIP)
@@ -560,6 +555,39 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
+int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+
+ /*
+ * If there's no space left in the device table store
+ * -1 as wcid but tell mac80211 everything went ok.
+ */
+ if (rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta))
+ sta_priv->wcid = -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
+
+int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+
+ /*
+ * If we never sent the STA to the device no need to clean it up.
+ */
+ if (sta_priv->wcid < 0)
+ return 0;
+
+ return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
+
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -685,7 +713,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
-int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 29edb9fbe6f..5adfb3eab9c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -310,11 +310,16 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rt2x00_sta *sta_priv = NULL;
- if (tx_info->control.sta)
+ if (tx_info->control.sta) {
txdesc->u.ht.mpdu_density =
tx_info->control.sta->ht_cap.ampdu_density;
+ sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
+ txdesc->u.ht.wcid = sta_priv->wcid;
+ }
+
txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index f2100f4ddcf..349008d1fb2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -288,8 +288,8 @@ enum txentry_desc_flags {
* @signal: PLCP signal.
* @service: PLCP service.
* @msc: MCS.
- * @stbc: STBC.
- * @ba_size: BA size.
+ * @stbc: Use Space Time Block Coding (only available for MCS rates < 8).
+ * @ba_size: Size of the recepients RX reorder buffer - 1.
* @rate_mode: Rate mode (See @enum rate_modulation).
* @mpdu_density: MDPU density.
* @retry_limit: Max number of retries.
@@ -321,6 +321,7 @@ struct txentry_desc {
u8 ba_size;
u8 mpdu_density;
enum txop txop;
+ int wcid;
} ht;
} u;
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 53110b83bf6..bf55b4a311e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1142,11 +1142,6 @@ static void rt61pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
- /*
- * Allow the tbtt tasklet to be scheduled.
- */
- tasklet_enable(&rt2x00dev->tbtt_tasklet);
-
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1230,7 +1225,7 @@ static void rt61pci_stop_queue(struct data_queue *queue)
/*
* Wait for possibly running tbtt tasklets.
*/
- tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -1731,13 +1726,6 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
-
- /*
- * Enable tasklets.
- */
- tasklet_enable(&rt2x00dev->txstatus_tasklet);
- tasklet_enable(&rt2x00dev->rxdone_tasklet);
- tasklet_enable(&rt2x00dev->autowake_tasklet);
}
/*
@@ -1772,9 +1760,10 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
/*
* Ensure that all tasklets are finished.
*/
- tasklet_disable(&rt2x00dev->txstatus_tasklet);
- tasklet_disable(&rt2x00dev->rxdone_tasklet);
- tasklet_disable(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
}
}
@@ -2300,22 +2289,24 @@ static void rt61pci_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt61pci_txdone(rt2x00dev);
- rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
}
static void rt61pci_tbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
rt2x00lib_beacondone(rt2x00dev);
- rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
}
static void rt61pci_rxdone_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
if (rt2x00pci_rxdone(rt2x00dev))
- rt2x00pci_rxdone(rt2x00dev);
- else
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
}
@@ -2325,7 +2316,8 @@ static void rt61pci_autowake_tasklet(unsigned long data)
rt61pci_wakeup(rt2x00dev);
rt2x00pci_register_write(rt2x00dev,
M2H_CMD_DONE_CSR, 0xffffffff);
- rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
}
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
@@ -2891,7 +2883,8 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* IEEE80211 stack callback functions.
*/
-static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+static int rt61pci_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2907,7 +2900,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
if (retval)
return retval;
@@ -2948,7 +2941,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
return 0;
}
-static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
+static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 0baeb894f09..cfb19dbb0a6 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2222,7 +2222,8 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* IEEE80211 stack callback functions.
*/
-static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+static int rt73usb_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2238,7 +2239,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
if (retval)
return retval;
@@ -2279,7 +2280,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
return 0;
}
-static u64 rt73usb_get_tsf(struct ieee80211_hw *hw)
+static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 66b29dc07cc..0082015ff66 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -669,7 +669,8 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
rtl8180_free_tx_ring(dev, i);
}
-static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
@@ -701,7 +702,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
* TODO: make hardware update beacon timestamp
*/
mgmt = (struct ieee80211_mgmt *)skb->data;
- mgmt->u.beacon.timestamp = cpu_to_le64(rtl8180_get_tsf(dev));
+ mgmt->u.beacon.timestamp = cpu_to_le64(rtl8180_get_tsf(dev, vif));
/* TODO: use actual beacon queue */
skb_set_queue_mapping(skb, 0);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 1e0be14d10d..24873b55b55 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1241,7 +1241,8 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf);
}
-static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
+static int rtl8187_conf_tx(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rtl8187_priv *priv = dev->priv;
@@ -1277,7 +1278,7 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
return 0;
}
-static u64 rtl8187_get_tsf(struct ieee80211_hw *dev)
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 0b598db38da..d4fdd2a5a73 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -311,6 +311,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
/* swlps or hwlps has been set in diff chip in init_sw_vars */
@@ -664,6 +666,167 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
return hw_rate;
}
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92_RATE1M-->DESC92_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92_RATE6M-->DESC92_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ */
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
+ bool isht, u8 desc_rate, bool first_ampdu)
+{
+ int rate_idx;
+
+ if (false == isht) {
+ if (IEEE80211_BAND_2GHZ == hw->conf.channel->band) {
+ switch (desc_rate) {
+ case DESC92_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC92_RATE6M:
+ rate_idx = 0;
+ break;
+ case DESC92_RATE9M:
+ rate_idx = 1;
+ break;
+ case DESC92_RATE12M:
+ rate_idx = 2;
+ break;
+ case DESC92_RATE18M:
+ rate_idx = 3;
+ break;
+ case DESC92_RATE24M:
+ rate_idx = 4;
+ break;
+ case DESC92_RATE36M:
+ rate_idx = 5;
+ break;
+ case DESC92_RATE48M:
+ rate_idx = 6;
+ break;
+ case DESC92_RATE54M:
+ rate_idx = 7;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+
+ } else {
+
+ switch (desc_rate) {
+ case DESC92_RATEMCS0:
+ rate_idx = 0;
+ break;
+ case DESC92_RATEMCS1:
+ rate_idx = 1;
+ break;
+ case DESC92_RATEMCS2:
+ rate_idx = 2;
+ break;
+ case DESC92_RATEMCS3:
+ rate_idx = 3;
+ break;
+ case DESC92_RATEMCS4:
+ rate_idx = 4;
+ break;
+ case DESC92_RATEMCS5:
+ rate_idx = 5;
+ break;
+ case DESC92_RATEMCS6:
+ rate_idx = 6;
+ break;
+ case DESC92_RATEMCS7:
+ rate_idx = 7;
+ break;
+ case DESC92_RATEMCS8:
+ rate_idx = 8;
+ break;
+ case DESC92_RATEMCS9:
+ rate_idx = 9;
+ break;
+ case DESC92_RATEMCS10:
+ rate_idx = 10;
+ break;
+ case DESC92_RATEMCS11:
+ rate_idx = 11;
+ break;
+ case DESC92_RATEMCS12:
+ rate_idx = 12;
+ break;
+ case DESC92_RATEMCS13:
+ rate_idx = 13;
+ break;
+ case DESC92_RATEMCS14:
+ rate_idx = 14;
+ break;
+ case DESC92_RATEMCS15:
+ rate_idx = 15;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ return rate_idx;
+}
+EXPORT_SYMBOL(rtlwifi_rate_mapping);
+
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -689,7 +852,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
*So tcb_desc->hw_rate is just used for
*special data and mgt frames
*/
- if (info->control.rates[0].idx == 0 &&
+ if (info->control.rates[0].idx == 0 ||
ieee80211_is_nullfunc(fc)) {
tcb_desc->use_driver_rate = true;
tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
@@ -977,7 +1140,7 @@ void rtl_watchdog_wq_callback(void *data)
}
/*
- *<3> to check if traffic busy, if
+ *<2> to check if traffic busy, if
* busytraffic we don't change channel
*/
if (mac->link_state >= MAC80211_LINKED) {
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index a91f3eee59c..4ae905983d0 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -140,4 +140,6 @@ u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
extern struct attribute_group rtl_attribute_group;
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
+ bool isht, u8 desc_rate, bool first_ampdu);
#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 04c4e9eb6ee..3f0f056fae9 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -504,7 +504,8 @@ static int _rtl_get_hal_qnum(u16 queue)
*for mac80211 VO=0, VI=1, BE=2, BK=3
*for rtl819x BE=0, BK=1, VI=2, VO=3
*/
-static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int rtl_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -775,7 +776,7 @@ out:
mutex_unlock(&rtlpriv->locks.conf_mutex);
}
-static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u64 tsf;
@@ -784,7 +785,8 @@ static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
return tsf;
}
-static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -794,7 +796,8 @@ static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
}
-static void rtl_op_reset_tsf(struct ieee80211_hw *hw)
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp = 0;
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 5fa73852cb6..1b5cb7153a5 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -33,8 +33,6 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 i;
- rtlpriv->dbg.global_debuglevel = DBG_EMERG;
-
rtlpriv->dbg.global_debugcomponents =
COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND |
COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 56f12358389..177a8e66924 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -218,7 +218,6 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
/*Retrieve original configuration settings. */
u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
@@ -254,9 +253,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
udelay(50);
/*4 Disable Pci Bridge ASPM */
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
- pcicfg_addrport + (num4bytes << 2));
- rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
+ pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+ pcibridge_linkctrlreg);
udelay(50);
}
@@ -277,7 +275,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
u16 aspmlevel;
u8 u_pcibridge_aspmsetting;
@@ -293,8 +290,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
}
/*4 Enable Pci Bridge ASPM */
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
- pcicfg_addrport + (num4bytes << 2));
u_pcibridge_aspmsetting =
pcipriv->ndis_adapter.pcibridge_linkctrlreg |
@@ -303,7 +298,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
u_pcibridge_aspmsetting &= ~BIT(0);
- rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
+ pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+ u_pcibridge_aspmsetting);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
("PlatformEnableASPM():PciBridge busnumber[%x], "
@@ -335,25 +331,18 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
bool status = false;
u8 offset_e0;
unsigned offset_e4;
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
- pcicfg_addrport + 0xE0);
- rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
+ pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
- pcicfg_addrport + 0xE0);
- rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
+ pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0);
if (offset_e0 == 0xA0) {
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
- pcicfg_addrport + 0xE4);
- rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
+ pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4);
if (offset_e4 & BIT(23))
status = true;
}
@@ -364,17 +353,15 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
- u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
u8 linkctrl_reg;
u8 num4bbytes;
num4bbytes = (capabilityoffset + 0x10) / 4;
/*Read Link Control Register */
- rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
- pcicfg_addrport + (num4bbytes << 2));
- rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
+ pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
}
@@ -488,7 +475,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct sk_buff *skb = NULL;
struct ieee80211_tx_info *info = NULL;
- int tid; /* should be int */
+ int tid;
if (!rtlpriv->rtlhal.earlymode_enable)
return;
@@ -1538,7 +1525,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtl_init_rx_config(hw);
- /*should after adapter start and interrupt enable. */
+ /*should be after adapter start and interrupt enable. */
set_hal_start(rtlhal);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
@@ -1559,7 +1546,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
u8 RFInProgressTimeOut = 0;
/*
- *should before disable interrrupt&adapter
+ *should be before disable interrupt&adapter
*and will do it immediately.
*/
set_hal_stop(rtlhal);
@@ -1718,10 +1705,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
PCI_SLOT(bridge_pdev->devfn);
pcipriv->ndis_adapter.pcibridge_funcnum =
PCI_FUNC(bridge_pdev->devfn);
- pcipriv->ndis_adapter.pcicfg_addrport =
- (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
- (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
- (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
pci_pcie_cap(bridge_pdev);
pcipriv->ndis_adapter.num4bytes =
@@ -2010,36 +1993,25 @@ call rtl_mac_stop() from the mac80211
suspend function first, So there is
no need to call hw_disable here.
****************************************/
-int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+int rtl_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_suspend(hw);
rtl_deinit_rfkill(hw);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
EXPORT_SYMBOL(rtl_pci_suspend);
-int rtl_pci_resume(struct pci_dev *pdev)
+int rtl_pci_resume(struct device *dev)
{
- int ret;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_device(pdev);
- if (ret) {
- RT_ASSERT(false, ("ERR: <======\n"));
- return ret;
- }
-
- pci_restore_state(pdev);
-
rtlpriv->cfg->ops->hw_resume(hw);
rtl_init_rfkill(hw);
return 0;
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index c53c6204674..ebe0b42c051 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -212,7 +212,6 @@ struct mp_adapter {
u16 pcibridge_vendorid;
u16 pcibridge_deviceid;
- u32 pcicfg_addrport;
u8 num4bytes;
u8 pcibridge_pciehdr_offset;
@@ -238,8 +237,8 @@ extern struct rtl_intf_ops rtl_pci_ops;
int __devinit rtl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
void rtl_pci_disconnect(struct pci_dev *pdev);
-int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-int rtl_pci_resume(struct pci_dev *pdev);
+int rtl_pci_suspend(struct device *dev);
+int rtl_pci_resume(struct device *dev);
static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
@@ -273,29 +272,4 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
}
-static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
-{
- outl(val, port);
-}
-
-static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val)
-{
- outb(val, port);
-}
-
-static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 *pval)
-{
- *pval = inb(port);
-}
-
-static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 *pval)
-{
- *pval = inw(port);
-}
-
-static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 *pval)
-{
- *pval = inl(port);
-}
-
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 35ff7df41a1..9fc804d89d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -142,8 +142,22 @@ enum version_8192c {
VERSION_UNKNOWN = 0x88,
};
+#define CUT_VERSION_MASK (BIT(6)|BIT(7))
+#define CHIP_VENDOR_UMC BIT(5)
+#define CHIP_VENDOR_UMC_B_CUT BIT(6) /* Chip version for ECO */
+#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
+ ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
+#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
+ ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
+#define IS_CHIP_VENDOR_UMC(version) \
+ ((version & CHIP_VENDOR_UMC) ? true : false)
+#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
+#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
+ ((IS_CHIP_VENDOR_UMC(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == CHIP_VENDOR_UMC_B_CUT) ? \
+ true : false) : false)
enum rtl819x_loopback_e {
RTL819X_NO_LOOPBACK = 0,
@@ -220,41 +234,6 @@ enum rtl_desc_qsel {
QSLT_CMD = 0x13,
};
-enum rtl_desc92c_rate {
- DESC92C_RATE1M = 0x00,
- DESC92C_RATE2M = 0x01,
- DESC92C_RATE5_5M = 0x02,
- DESC92C_RATE11M = 0x03,
-
- DESC92C_RATE6M = 0x04,
- DESC92C_RATE9M = 0x05,
- DESC92C_RATE12M = 0x06,
- DESC92C_RATE18M = 0x07,
- DESC92C_RATE24M = 0x08,
- DESC92C_RATE36M = 0x09,
- DESC92C_RATE48M = 0x0a,
- DESC92C_RATE54M = 0x0b,
-
- DESC92C_RATEMCS0 = 0x0c,
- DESC92C_RATEMCS1 = 0x0d,
- DESC92C_RATEMCS2 = 0x0e,
- DESC92C_RATEMCS3 = 0x0f,
- DESC92C_RATEMCS4 = 0x10,
- DESC92C_RATEMCS5 = 0x11,
- DESC92C_RATEMCS6 = 0x12,
- DESC92C_RATEMCS7 = 0x13,
- DESC92C_RATEMCS8 = 0x14,
- DESC92C_RATEMCS9 = 0x15,
- DESC92C_RATEMCS10 = 0x16,
- DESC92C_RATEMCS11 = 0x17,
- DESC92C_RATEMCS12 = 0x18,
- DESC92C_RATEMCS13 = 0x19,
- DESC92C_RATEMCS14 = 0x1a,
- DESC92C_RATEMCS15 = 0x1b,
- DESC92C_RATEMCS15_SG = 0x1c,
- DESC92C_RATEMCS32 = 0x20,
-};
-
struct phy_sts_cck_8192s_t {
u8 adc_pwdb_X[4];
u8 sq_rpt;
@@ -267,108 +246,4 @@ struct h2c_cmd_8192c {
u8 *p_cmdbuffer;
};
-/* NOTE: reference to rtl8192c_rates struct */
-static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
- u8 desc_rate, bool first_ampdu)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int rate_idx = 0;
-
- if (first_ampdu) {
- if (false == isHT) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- ("Rate %d is not support, set to "
- "1M rate.\n", desc_rate));
- rate_idx = 0;
- break;
- }
- } else {
- rate_idx = 11;
- }
- return rate_idx;
- }
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- /* TODO: How to mapping MCS rate? */
- /* NOTE: referenc to __ieee80211_rx */
- default:
- rate_idx = 11;
- break;
- }
- return rate_idx;
-}
-
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 373dc78af1d..a48404cc2b9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -92,6 +92,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
const struct firmware *firmware;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name = NULL;
rtl8192ce_bt_reg_init(hw);
@@ -129,10 +131,16 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
+ /* for debug level */
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("rtl8192ce: Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+ pr_info("rtl8192ce: FW Power Save off (module option)\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
/* for ASPM, you can close aspm through
@@ -155,8 +163,14 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
- rtlpriv->io.dev);
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+ !IS_92C_SERIAL(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU.bin";
+ else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ else
+ fw_name = rtlpriv->cfg->fw_name;
+ err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("Failed to request firmware!\n"));
@@ -241,6 +255,7 @@ static struct rtl_mod_params rtl92ce_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .debug = DBG_EMERG,
};
static struct rtl_hal_cfg rtl92ce_hal_cfg = {
@@ -318,21 +333,21 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
@@ -351,27 +366,35 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192cfw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cfwU.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cfwU_B.bin");
module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl92ce_mod_params.debug, int, 0444);
module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444);
-MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
-MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
-MODULE_PARM_DESC(fwlps, "using linked fw control power save "
- "(default 1 is open)\n");
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+static const struct dev_pm_ops rtlwifi_pm_ops = {
+ .suspend = rtl_pci_suspend,
+ .resume = rtl_pci_resume,
+ .freeze = rtl_pci_suspend,
+ .thaw = rtl_pci_resume,
+ .poweroff = rtl_pci_suspend,
+ .restore = rtl_pci_resume,
+};
static struct pci_driver rtl92ce_driver = {
.name = KBUILD_MODNAME,
.id_table = rtl92ce_pci_ids,
.probe = rtl_pci_probe,
.remove = rtl_pci_disconnect,
-
-#ifdef CONFIG_PM
- .suspend = rtl_pci_suspend,
- .resume = rtl_pci_resume,
-#endif
-
+ .driver.pm = &rtlwifi_pm_ops,
};
static int __init rtl92ce_module_init(void)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 230bbe900d8..4fb5ae24dee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -48,104 +48,6 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static int _rtl92ce_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu)
-{
- int rate_idx;
-
- if (first_ampdu) {
- if (false == isht) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- rate_idx = 11;
- }
-
- return rate_idx;
- }
-
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 11;
- break;
- }
- return rate_idx;
-}
-
static u8 _rtl92c_query_rxpwrpercentage(char antpower)
{
if ((antpower <= -100) || (antpower >= 20))
@@ -336,8 +238,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC92C_RATEMCS8 &&
- pdesc->rxmcs <= DESC92C_RATEMCS15)
+ if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 &&
+ pdesc->rxmcs <= DESC92_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -670,12 +572,10 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = _rtl92ce_rate_mapping((bool)
- GET_RX_DESC_RXHT(pdesc),
- (u8)
- GET_RX_DESC_RXMCS(pdesc),
- (bool)
- GET_RX_DESC_PAGGR(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw,
+ (bool)GET_RX_DESC_RXHT(pdesc),
+ (u8)GET_RX_DESC_RXMCS(pdesc),
+ (bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
@@ -768,7 +668,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc,
- ((tcb_desc->rts_rate <= DESC92C_RATE54M) ?
+ ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -886,7 +786,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 0f117713750..c8977a50ca3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -537,12 +537,6 @@ do { \
memset(__pdesc, 0, _size); \
} while (0);
-#define RX_HAL_IS_CCK_RATE(_pdesc)\
- (_pdesc->rxmcs == DESC92C_RATE1M || \
- _pdesc->rxmcs == DESC92C_RATE2M || \
- _pdesc->rxmcs == DESC92C_RATE5_5M || \
- _pdesc->rxmcs == DESC92C_RATE11M)
-
struct rx_fwinfo_92c {
u8 gain_trsw[4];
u8 pwdb_all;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
index c54940ea72f..d097efb1e71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -50,10 +50,6 @@
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version) \
- (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
- false : true) : false)
-
#define IS_VENDOR_8723_A_CUT(version) \
(((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
false : true) : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 194fc693c1f..060a06f4a88 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -892,8 +892,8 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
if (GET_RX_DESC_RX_MCS(pdesc) &&
- GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
- GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
+ GET_RX_DESC_RX_MCS(pdesc) >= DESC92_RATEMCS8 &&
+ GET_RX_DESC_RX_MCS(pdesc) <= DESC92_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index 298fdb724aa..626d88e88e2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -87,12 +87,6 @@ void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
-#define RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
- GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
- GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
- GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
-
struct rx_fwinfo_92c {
u8 gain_trsw[4];
u8 pwdb_all;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 17a8e962851..1e851aae58d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
tx_agc[RF90_PATH_A] = 0x10101010;
tx_agc[RF90_PATH_B] = 0x10101010;
} else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
- TXHIGHPWRLEVEL_LEVEL2) {
+ TXHIGHPWRLEVEL_LEVEL1) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
} else{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index ef63c0df006..feed1ed8d9b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -60,6 +60,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = 0;
rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
if (!rtlpriv->rtlhal.pfirmware) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -149,8 +150,14 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
static struct rtl_mod_params rtl92cu_mod_params = {
.sw_crypto = 0,
+ .debug = DBG_EMERG,
};
+module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl92cu_mod_params.debug, int, 0444);
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
.in_ep_num = RTL92C_USB_BULK_IN_NUM,
@@ -241,20 +248,20 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
- .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
#define USB_VENDER_ID_REALTEK 0x0bda
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 3e52a549622..bc33b147f44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -337,10 +337,10 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = _rtl92c_rate_mapping(hw,
- (bool)GET_RX_DESC_RX_HT(pdesc),
- (u8)GET_RX_DESC_RX_MCS(pdesc),
- (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw,
+ (bool)GET_RX_DESC_RX_HT(pdesc),
+ (u8)GET_RX_DESC_RX_MCS(pdesc),
+ (bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
@@ -406,11 +406,10 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
if (GET_RX_DESC_RX_HT(rxdesc))
rx_status->flag |= RX_FLAG_HT;
/* Data rate */
- rx_status->rate_idx = _rtl92c_rate_mapping(hw,
- (bool)GET_RX_DESC_RX_HT(rxdesc),
- (u8)GET_RX_DESC_RX_MCS(rxdesc),
- (bool)GET_RX_DESC_PAGGR(rxdesc)
- );
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw,
+ (bool)GET_RX_DESC_RX_HT(rxdesc),
+ (u8)GET_RX_DESC_RX_MCS(rxdesc),
+ (bool)GET_RX_DESC_PAGGR(rxdesc));
/* There is a phy status after this rx descriptor. */
if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
@@ -545,7 +544,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BW(txdesc, 0);
SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(txdesc,
- ((tcb_desc->rts_rate <= DESC92C_RATE54M) ?
+ ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
@@ -644,7 +643,7 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
}
SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
SET_TX_DESC_OWN(pDesc, 1);
- SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
+ SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M);
_rtl_tx_desc_checksum(pDesc);
}
@@ -660,7 +659,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
SET_TX_DESC_LINIP(pdesc, 0);
SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
index f0f5f9bfbb7..94630477174 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -122,60 +122,99 @@
#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
-/*
- * 92D chip ver:
- * BIT8: IS 92D
- * BIT9: single phy
- * BIT10: C-cut
- * BIT11: D-cut
- */
-
-/* Chip specific */
-#define CHIP_92C BIT(0)
-#define CHIP_92C_1T2R BIT(1)
-#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
-#define NORMAL_CHIP BIT(4)
-#define CHIP_VENDOR_UMC BIT(5)
-#define CHIP_VENDOR_UMC_B_CUT BIT(6) /* Chip version for ECO */
+enum version_8192d {
+ VERSION_TEST_CHIP_88C = 0x0000,
+ VERSION_TEST_CHIP_92C = 0x0020,
+ VERSION_TEST_UMC_CHIP_8723 = 0x0081,
+ VERSION_NORMAL_TSMC_CHIP_88C = 0x0008,
+ VERSION_NORMAL_TSMC_CHIP_92C = 0x0028,
+ VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x0018,
+ VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x0088,
+ VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x00a8,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x0098,
+ VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
+ VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
+ VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x1088,
+ VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x10a8,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x1090,
+ VERSION_TEST_CHIP_92D_SINGLEPHY = 0x0022,
+ VERSION_TEST_CHIP_92D_DUALPHY = 0x0002,
+ VERSION_NORMAL_CHIP_92D_SINGLEPHY = 0x002a,
+ VERSION_NORMAL_CHIP_92D_DUALPHY = 0x000a,
+ VERSION_NORMAL_CHIP_92D_C_CUT_SINGLEPHY = 0x202a,
+ VERSION_NORMAL_CHIP_92D_C_CUT_DUALPHY = 0x200a,
+ VERSION_NORMAL_CHIP_92D_D_CUT_SINGLEPHY = 0x302a,
+ VERSION_NORMAL_CHIP_92D_D_CUT_DUALPHY = 0x300a,
+ VERSION_NORMAL_CHIP_92D_E_CUT_SINGLEPHY = 0x402a,
+ VERSION_NORMAL_CHIP_92D_E_CUT_DUALPHY = 0x400a,
+};
/* for 92D */
-#define CHIP_92D BIT(8)
#define CHIP_92D_SINGLEPHY BIT(9)
+#define C_CUT_VERSION BIT(13)
+#define D_CUT_VERSION ((BIT(12)|BIT(13)))
+#define E_CUT_VERSION BIT(14)
+
+/* Chip specific */
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+#define CHIP_BONDING_88C_USB_MCARD 0x2
+#define CHIP_BONDING_88C_USB_HP 0x1
+
+/* [15:12] IC version(CUT): A-cut=0, B-cut=1, C-cut=2, D-cut=3 */
+/* [7] Manufacturer: TSMC=0, UMC=1 */
+/* [6:4] RF type: 1T1R=0, 1T2R=1, 2T2R=2 */
+/* [3] Chip type: TEST=0, NORMAL=1 */
+/* [2:0] IC type: 81xxC=0, 8723=1, 92D=2 */
+#define CHIP_8723 BIT(0)
+#define CHIP_92D BIT(1)
+#define NORMAL_CHIP BIT(3)
+#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6)))
+#define RF_TYPE_1T2R BIT(4)
+#define RF_TYPE_2T2R BIT(5)
+#define CHIP_VENDOR_UMC BIT(7)
+#define B_CUT_VERSION BIT(12)
+
+/* MASK */
+#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
+#define CHIP_TYPE_MASK BIT(3)
+#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6))
+#define MANUFACTUER_MASK BIT(7)
+#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8))
+#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12))
+
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
+
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? \
+ false : true)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == \
+ RF_TYPE_1T2R) ? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == \
+ RF_TYPE_2T2R) ? true : false)
+
+#define IS_92D_SINGLEPHY(version) ((IS_92D(version)) ? \
+ (IS_2T2R(version) ? true : false) : false)
+#define IS_92D(version) ((GET_CVID_IC_TYPE(version) == \
+ CHIP_92D) ? true : false)
+#define IS_92D_C_CUT(version) ((IS_92D(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == \
+ 0x2000) ? true : false) : false)
+#define IS_92D_D_CUT(version) ((IS_92D(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == \
+ 0x3000) ? true : false) : false)
+#define IS_92D_E_CUT(version) ((IS_92D(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == \
+ 0x4000) ? true : false) : false)
#define CHIP_92D_C_CUT BIT(10)
#define CHIP_92D_D_CUT BIT(11)
-enum version_8192d {
- VERSION_TEST_CHIP_88C = 0x00,
- VERSION_TEST_CHIP_92C = 0x01,
- VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
- VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
- VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
- VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
- VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
- VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
- VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
- VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
- VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
- VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
- VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
- VERSION_TEST_CHIP_92D_SINGLEPHY = 0x300,
- VERSION_TEST_CHIP_92D_DUALPHY = 0x100,
- VERSION_NORMAL_CHIP_92D_SINGLEPHY = 0x310,
- VERSION_NORMAL_CHIP_92D_DUALPHY = 0x110,
- VERSION_NORMAL_CHIP_92D_C_CUT_SINGLEPHY = 0x710,
- VERSION_NORMAL_CHIP_92D_C_CUT_DUALPHY = 0x510,
- VERSION_NORMAL_CHIP_92D_D_CUT_SINGLEPHY = 0xB10,
- VERSION_NORMAL_CHIP_92D_D_CUT_DUALPHY = 0x910,
-};
-
-#define IS_92D_SINGLEPHY(version) \
- ((version & CHIP_92D_SINGLEPHY) ? true : false)
-#define IS_92D_C_CUT(version) \
- ((version & CHIP_92D_C_CUT) ? true : false)
-#define IS_92D_D_CUT(version) \
- ((version & CHIP_92D_D_CUT) ? true : false)
-
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
RF_OP_BY_FW,
@@ -193,41 +232,6 @@ enum rtl_desc_qsel {
QSLT_CMD = 0x13,
};
-enum rtl_desc92d_rate {
- DESC92D_RATE1M = 0x00,
- DESC92D_RATE2M = 0x01,
- DESC92D_RATE5_5M = 0x02,
- DESC92D_RATE11M = 0x03,
-
- DESC92D_RATE6M = 0x04,
- DESC92D_RATE9M = 0x05,
- DESC92D_RATE12M = 0x06,
- DESC92D_RATE18M = 0x07,
- DESC92D_RATE24M = 0x08,
- DESC92D_RATE36M = 0x09,
- DESC92D_RATE48M = 0x0a,
- DESC92D_RATE54M = 0x0b,
-
- DESC92D_RATEMCS0 = 0x0c,
- DESC92D_RATEMCS1 = 0x0d,
- DESC92D_RATEMCS2 = 0x0e,
- DESC92D_RATEMCS3 = 0x0f,
- DESC92D_RATEMCS4 = 0x10,
- DESC92D_RATEMCS5 = 0x11,
- DESC92D_RATEMCS6 = 0x12,
- DESC92D_RATEMCS7 = 0x13,
- DESC92D_RATEMCS8 = 0x14,
- DESC92D_RATEMCS9 = 0x15,
- DESC92D_RATEMCS10 = 0x16,
- DESC92D_RATEMCS11 = 0x17,
- DESC92D_RATEMCS12 = 0x18,
- DESC92D_RATEMCS13 = 0x19,
- DESC92D_RATEMCS14 = 0x1a,
- DESC92D_RATEMCS15 = 0x1b,
- DESC92D_RATEMCS15_SG = 0x1c,
- DESC92D_RATEMCS32 = 0x20,
-};
-
enum channel_plan {
CHPL_FCC = 0,
CHPL_IC = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 0073cf106af..f5bd3a3cd34 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1608,17 +1608,16 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03;
tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2;
rtlefuse->txpwr_fromeprom = true;
- if (IS_92D_D_CUT(rtlpriv->rtlhal.version)) {
+ if (IS_92D_D_CUT(rtlpriv->rtlhal.version) ||
+ IS_92D_E_CUT(rtlpriv->rtlhal.version)) {
rtlefuse->internal_pa_5g[0] =
- !((hwinfo[EEPROM_TSSI_A_5G] &
- BIT(6)) >> 6);
+ !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
rtlefuse->internal_pa_5g[1] =
- !((hwinfo[EEPROM_TSSI_B_5G] &
- BIT(6)) >> 6);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("Is D cut,Internal PA0 %d Internal PA1 %d\n",
- rtlefuse->internal_pa_5g[0],
- rtlefuse->internal_pa_5g[1]))
+ rtlefuse->internal_pa_5g[0],
+ rtlefuse->internal_pa_5g[1]))
}
rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 351765df517..691f8009218 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -146,10 +146,16 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD);
+ /* for debug level */
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("rtl8192ce: Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+ pr_info("rtl8192ce: FW Power Save off (module option)\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
/* for ASPM, you can close aspm through
@@ -263,6 +269,7 @@ static struct rtl_mod_params rtl92de_mod_params = {
.inactiveps = true,
.swctrl_lps = true,
.fwctrl_lps = false,
+ .debug = DBG_EMERG,
};
static struct rtl_hal_cfg rtl92de_hal_cfg = {
@@ -340,21 +347,21 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BcnInt | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92D_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92D_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92D_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92D_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92D_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92D_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92D_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92D_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92D_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92D_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92D_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92D_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92D_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92D_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
static struct pci_device_id rtl92de_pci_ids[] __devinitdata = {
@@ -373,25 +380,31 @@ MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin");
module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl92de_mod_params.debug, int, 0444);
module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
-MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
-MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
-MODULE_PARM_DESC(swlps, "using linked sw control power save (default 1"
- " is open)\n");
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+static const struct dev_pm_ops rtlwifi_pm_ops = {
+ .suspend = rtl_pci_suspend,
+ .resume = rtl_pci_resume,
+ .freeze = rtl_pci_suspend,
+ .thaw = rtl_pci_resume,
+ .poweroff = rtl_pci_suspend,
+ .restore = rtl_pci_resume,
+};
static struct pci_driver rtl92de_driver = {
.name = KBUILD_MODNAME,
.id_table = rtl92de_pci_ids,
.probe = rtl_pci_probe,
.remove = rtl_pci_disconnect,
-
-#ifdef CONFIG_PM
- .suspend = rtl_pci_suspend,
- .resume = rtl_pci_resume,
-#endif
-
+ .driver.pm = &rtlwifi_pm_ops,
};
/* add global spin lock to solve the problem that
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index dc86fcb0b3a..3637c0c3352 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -48,99 +48,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static int _rtl92de_rate_mapping(bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (false == isht) {
- switch (desc_rate) {
- case DESC92D_RATE1M:
- rate_idx = 0;
- break;
- case DESC92D_RATE2M:
- rate_idx = 1;
- break;
- case DESC92D_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92D_RATE11M:
- rate_idx = 3;
- break;
- case DESC92D_RATE6M:
- rate_idx = 4;
- break;
- case DESC92D_RATE9M:
- rate_idx = 5;
- break;
- case DESC92D_RATE12M:
- rate_idx = 6;
- break;
- case DESC92D_RATE18M:
- rate_idx = 7;
- break;
- case DESC92D_RATE24M:
- rate_idx = 8;
- break;
- case DESC92D_RATE36M:
- rate_idx = 9;
- break;
- case DESC92D_RATE48M:
- rate_idx = 10;
- break;
- case DESC92D_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- return rate_idx;
- } else {
- switch (desc_rate) {
- case DESC92D_RATE1M:
- rate_idx = 0;
- break;
- case DESC92D_RATE2M:
- rate_idx = 1;
- break;
- case DESC92D_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92D_RATE11M:
- rate_idx = 3;
- break;
- case DESC92D_RATE6M:
- rate_idx = 4;
- break;
- case DESC92D_RATE9M:
- rate_idx = 5;
- break;
- case DESC92D_RATE12M:
- rate_idx = 6;
- break;
- case DESC92D_RATE18M:
- rate_idx = 7;
- break;
- case DESC92D_RATE24M:
- rate_idx = 8;
- break;
- case DESC92D_RATE36M:
- rate_idx = 9;
- break;
- case DESC92D_RATE48M:
- rate_idx = 10;
- break;
- case DESC92D_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 11;
- break;
- }
- return rate_idx;
- }
-}
-
static u8 _rtl92d_query_rxpwrpercentage(char antpower)
{
if ((antpower <= -100) || (antpower >= 20))
@@ -328,8 +235,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC92D_RATEMCS8 &&
- pdesc->rxmcs <= DESC92D_RATEMCS15)
+ if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 &&
+ pdesc->rxmcs <= DESC92_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -609,10 +516,10 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = _rtl92de_rate_mapping((bool)
- GET_RX_DESC_RXHT(pdesc),
- (u8)
- GET_RX_DESC_RXMCS(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw,
+ (bool)GET_RX_DESC_RXHT(pdesc),
+ (u8)GET_RX_DESC_RXMCS(pdesc),
+ (bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
@@ -705,14 +612,14 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
}
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
- if (ptcb_desc->hw_rate < DESC92D_RATE6M)
- ptcb_desc->hw_rate = DESC92D_RATE6M;
+ if (ptcb_desc->hw_rate < DESC92_RATE6M)
+ ptcb_desc->hw_rate = DESC92_RATE6M;
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
- ptcb_desc->hw_rate == DESC92D_RATEMCS7)
+ ptcb_desc->hw_rate == DESC92_RATEMCS7)
SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -728,13 +635,13 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
- if (ptcb_desc->rts_rate < DESC92D_RATE6M)
- ptcb_desc->rts_rate = DESC92D_RATE6M;
+ if (ptcb_desc->rts_rate < DESC92_RATE6M)
+ ptcb_desc->rts_rate = DESC92_RATE6M;
SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
- DESC92D_RATE54M) ?
+ DESC92_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
@@ -844,9 +751,9 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
* The braces are needed no matter what checkpatch says
*/
if (rtlhal->current_bandtype == BAND_ON_5G) {
- SET_TX_DESC_TX_RATE(pdesc, DESC92D_RATE6M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE6M);
} else {
- SET_TX_DESC_TX_RATE(pdesc, DESC92D_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
}
SET_TX_DESC_SEQ(pdesc, 0);
SET_TX_DESC_LINIP(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 992d6766e66..4d55d0b6816 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -537,12 +537,6 @@ do { \
memset((void *)__pdesc, 0, _size); \
} while (0);
-#define RX_HAL_IS_CCK_RATE(_pdesc)\
- (_pdesc->rxmcs == DESC92D_RATE1M || \
- _pdesc->rxmcs == DESC92D_RATE2M || \
- _pdesc->rxmcs == DESC92D_RATE5_5M || \
- _pdesc->rxmcs == DESC92D_RATE11M)
-
/* For 92D early mode */
#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
SET_BITS_OFFSET_LE(__paddr, 0, 3, __value)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 69828f2b3fa..c6c044816d3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -33,37 +33,6 @@
#define RX_CMD_QUEUE 1
#define RX_MAX_QUEUE 2
-#define DESC92S_RATE1M 0x00
-#define DESC92S_RATE2M 0x01
-#define DESC92S_RATE5_5M 0x02
-#define DESC92S_RATE11M 0x03
-#define DESC92S_RATE6M 0x04
-#define DESC92S_RATE9M 0x05
-#define DESC92S_RATE12M 0x06
-#define DESC92S_RATE18M 0x07
-#define DESC92S_RATE24M 0x08
-#define DESC92S_RATE36M 0x09
-#define DESC92S_RATE48M 0x0a
-#define DESC92S_RATE54M 0x0b
-#define DESC92S_RATEMCS0 0x0c
-#define DESC92S_RATEMCS1 0x0d
-#define DESC92S_RATEMCS2 0x0e
-#define DESC92S_RATEMCS3 0x0f
-#define DESC92S_RATEMCS4 0x10
-#define DESC92S_RATEMCS5 0x11
-#define DESC92S_RATEMCS6 0x12
-#define DESC92S_RATEMCS7 0x13
-#define DESC92S_RATEMCS8 0x14
-#define DESC92S_RATEMCS9 0x15
-#define DESC92S_RATEMCS10 0x16
-#define DESC92S_RATEMCS11 0x17
-#define DESC92S_RATEMCS12 0x18
-#define DESC92S_RATEMCS13 0x19
-#define DESC92S_RATEMCS14 0x1a
-#define DESC92S_RATEMCS15 0x1b
-#define DESC92S_RATEMCS15_SG 0x1c
-#define DESC92S_RATEMCS32 0x20
-
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
@@ -490,11 +459,11 @@ do { \
#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
-#define RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE1M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE2M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE5_5M ||\
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE11M)
+#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
+ (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE2M || \
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE5_5M ||\
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE11M)
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index d59f66cb776..c474486e391 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1382,7 +1382,7 @@ static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, LDOA15_CTRL, 0x34);
/* Reset MAC-IO and CPU and Core Digital BIT10/11/15 */
- tmpu1b = rtl_read_byte(rtlpriv, SYS_FUNC_EN + 1);
+ tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
/* If IPS we need to turn LED on. So we not
* not disable BIT 3/7 of reg3. */
@@ -1391,7 +1391,7 @@ static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
else
tmpu1b &= 0x73;
- rtl_write_byte(rtlpriv, SYS_FUNC_EN + 1, tmpu1b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmpu1b);
/* wait for BIT 10/11/15 to pull high automatically!! */
mdelay(1);
@@ -1428,15 +1428,15 @@ static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, LDOA15_CTRL, (tmpu1b | BIT(0)));
/* Set Digital Vdd to Retention isolation Path. */
- tmpu2b = rtl_read_word(rtlpriv, SYS_ISO_CTRL);
- rtl_write_word(rtlpriv, SYS_ISO_CTRL, (tmpu2b | BIT(11)));
+ tmpu2b = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+ rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, (tmpu2b | BIT(11)));
/* For warm reboot NIC disappera bug. */
- tmpu2b = rtl_read_word(rtlpriv, SYS_FUNC_EN);
- rtl_write_word(rtlpriv, SYS_FUNC_EN, (tmpu2b | BIT(13)));
+ tmpu2b = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | BIT(13)));
- rtl_write_byte(rtlpriv, SYS_ISO_CTRL + 1, 0x68);
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x68);
/* Enable AFE PLL Macro Block */
tmpu1b = rtl_read_byte(rtlpriv, AFE_PLL_CTRL);
@@ -1447,17 +1447,17 @@ static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
mdelay(1);
/* Release isolation AFE PLL & MD */
- rtl_write_byte(rtlpriv, SYS_ISO_CTRL, 0xA6);
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xA6);
/* Enable MAC clock */
tmpu2b = rtl_read_word(rtlpriv, SYS_CLKR);
rtl_write_word(rtlpriv, SYS_CLKR, (tmpu2b | BIT(12) | BIT(11)));
/* Enable Core digital and enable IOREG R/W */
- tmpu2b = rtl_read_word(rtlpriv, SYS_FUNC_EN);
- rtl_write_word(rtlpriv, SYS_FUNC_EN, (tmpu2b | BIT(11)));
+ tmpu2b = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | BIT(11)));
/* enable REG_EN */
- rtl_write_word(rtlpriv, SYS_FUNC_EN, (tmpu2b | BIT(11) | BIT(15)));
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | BIT(11) | BIT(15)));
/* Switch the control path. */
tmpu2b = rtl_read_word(rtlpriv, SYS_CLKR);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index ea32ef2d409..11f125c030c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -735,6 +735,7 @@
#define HWSET_MAX_SIZE_92S 128
#define EFUSE_MAX_SECTION 16
#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_OOB_PROTECT_BYTES 15
#define RTL8190_EEPROM_ID 0x8129
#define EEPROM_HPON 0x02
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 3876078a63d..3ec9a0d41ba 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -160,10 +160,16 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->first_init = true;
+ /* for debug level */
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("rtl8192ce: Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+ pr_info("rtl8192ce: FW Power Save off (module option)\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
/* for ASPM, you can close aspm through
@@ -268,6 +274,7 @@ static struct rtl_mod_params rtl92se_mod_params = {
.inactiveps = true,
.swctrl_lps = true,
.fwctrl_lps = false,
+ .debug = DBG_EMERG,
};
/* Because memory R/W bursting will cause system hang/crash
@@ -300,6 +307,7 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
.maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE_92S,
.maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
.maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+ .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
.maps[RWCAM] = REG_RWCAM,
.maps[WCAMI] = REG_WCAMI,
@@ -348,21 +356,21 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92S_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92S_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92S_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92S_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92S_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92S_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92S_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92S_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92S_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92S_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92S_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92S_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92S_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92S_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
static struct pci_device_id rtl92se_pci_ids[] __devinitdata = {
@@ -378,31 +386,37 @@ MODULE_DEVICE_TABLE(pci, rtl92se_pci_ids);
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin");
module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl92se_mod_params.debug, int, 0444);
module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
-MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
-MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
-MODULE_PARM_DESC(swlps, "using linked sw control power save (default 1 is "
- "open)\n");
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+static const struct dev_pm_ops rtlwifi_pm_ops = {
+ .suspend = rtl_pci_suspend,
+ .resume = rtl_pci_resume,
+ .freeze = rtl_pci_suspend,
+ .thaw = rtl_pci_resume,
+ .poweroff = rtl_pci_suspend,
+ .restore = rtl_pci_resume,
+};
static struct pci_driver rtl92se_driver = {
.name = KBUILD_MODNAME,
.id_table = rtl92se_pci_ids,
.probe = rtl_pci_probe,
.remove = rtl_pci_disconnect,
-
-#ifdef CONFIG_PM
- .suspend = rtl_pci_suspend,
- .resume = rtl_pci_resume,
-#endif
-
+ .driver.pm = &rtlwifi_pm_ops,
};
static int __init rtl92se_module_init(void)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index cffe30851f7..fbebe3ea0a2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -51,104 +51,6 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
return skb->priority;
}
-static int _rtl92se_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu)
-{
- int rate_idx = 0;
-
- if (first_ampdu) {
- if (false == isht) {
- switch (desc_rate) {
- case DESC92S_RATE1M:
- rate_idx = 0;
- break;
- case DESC92S_RATE2M:
- rate_idx = 1;
- break;
- case DESC92S_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92S_RATE11M:
- rate_idx = 3;
- break;
- case DESC92S_RATE6M:
- rate_idx = 4;
- break;
- case DESC92S_RATE9M:
- rate_idx = 5;
- break;
- case DESC92S_RATE12M:
- rate_idx = 6;
- break;
- case DESC92S_RATE18M:
- rate_idx = 7;
- break;
- case DESC92S_RATE24M:
- rate_idx = 8;
- break;
- case DESC92S_RATE36M:
- rate_idx = 9;
- break;
- case DESC92S_RATE48M:
- rate_idx = 10;
- break;
- case DESC92S_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- rate_idx = 11;
- }
-
- return rate_idx;
- }
-
- switch (desc_rate) {
- case DESC92S_RATE1M:
- rate_idx = 0;
- break;
- case DESC92S_RATE2M:
- rate_idx = 1;
- break;
- case DESC92S_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92S_RATE11M:
- rate_idx = 3;
- break;
- case DESC92S_RATE6M:
- rate_idx = 4;
- break;
- case DESC92S_RATE9M:
- rate_idx = 5;
- break;
- case DESC92S_RATE12M:
- rate_idx = 6;
- break;
- case DESC92S_RATE18M:
- rate_idx = 7;
- break;
- case DESC92S_RATE24M:
- rate_idx = 8;
- break;
- case DESC92S_RATE36M:
- rate_idx = 9;
- break;
- case DESC92S_RATE48M:
- rate_idx = 10;
- break;
- case DESC92S_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 11;
- break;
- }
- return rate_idx;
-}
-
static u8 _rtl92s_query_rxpwrpercentage(char antpower)
{
if ((antpower <= -100) || (antpower >= 20))
@@ -222,18 +124,15 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
bool in_powersavemode = false;
- bool is_cck_rate;
+ bool is_cck = pstats->is_cck;
- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
- pstats->is_cck = is_cck_rate;
pstats->packet_beacon = packet_beacon;
- pstats->is_cck = is_cck_rate;
pstats->rx_mimo_signalquality[0] = -1;
pstats->rx_mimo_signalquality[1] = -1;
- if (is_cck_rate) {
+ if (is_cck) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
@@ -344,9 +243,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (GET_RX_STATUS_DESC_RX_HT(pdesc) &&
- GET_RX_STATUS_DESC_RX_MCS(pdesc) >= DESC92S_RATEMCS8 &&
- GET_RX_STATUS_DESC_RX_MCS(pdesc) <= DESC92S_RATEMCS15)
+ if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
+ pstats->rate <= DESC92_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -364,7 +262,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
}
}
- if (is_cck_rate)
+ if (is_cck)
pstats->signalstrength = (u8)(_rtl92se_signal_scale_mapping(hw,
pwdb_all));
else if (rf_rx_num != 0)
@@ -616,6 +514,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
{
struct rx_fwinfo *p_drvinfo;
u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
+ struct ieee80211_hdr *hdr;
+ bool first_ampdu = false;
stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
@@ -628,8 +528,12 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
stats->rate = (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc);
stats->shortpreamble = (u16)GET_RX_STATUS_DESC_SPLCP(pdesc);
stats->isampdu = (bool)(GET_RX_STATUS_DESC_PAGGR(pdesc) == 1);
+ stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1)
+ && (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
+ stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc);
+ stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc);
if (stats->hwerror)
return false;
@@ -637,30 +541,39 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->freq = hw->conf.channel->center_freq;
rx_status->band = hw->conf.channel->band;
- if (GET_RX_STATUS_DESC_CRC32(pdesc))
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ hdr = (struct ieee80211_hdr *)(skb->data + stats->rx_drvinfo_size
+ + stats->rx_bufshift);
- if (!GET_RX_STATUS_DESC_SWDEC(pdesc))
- rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (stats->crc)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (GET_RX_STATUS_DESC_BW(pdesc))
+ if (stats->rx_is40Mhzpacket)
rx_status->flag |= RX_FLAG_40MHZ;
- if (GET_RX_STATUS_DESC_RX_HT(pdesc))
+ if (stats->is_ht)
rx_status->flag |= RX_FLAG_HT;
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
- if (stats->decrypted)
- rx_status->flag |= RX_FLAG_DECRYPTED;
-
- rx_status->rate_idx = _rtl92se_rate_mapping((bool)
- GET_RX_STATUS_DESC_RX_HT(pdesc),
- (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc),
- (bool)GET_RX_STATUS_DESC_PAGGR(pdesc));
+ /* hw will set stats->decrypted true, if it finds the
+ * frame is open data frame or mgmt frame,
+ * hw will not decrypt robust managment frame
+ * for IEEE80211w but still set stats->decrypted
+ * true, so here we should set it back to undecrypted
+ * for IEEE80211w frame, and mac80211 sw will help
+ * to decrypt it */
+ if (stats->decrypted) {
+ if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ (ieee80211_has_protected(hdr->frame_control)))
+ rx_status->flag &= ~RX_FLAG_DECRYPTED;
+ else
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw,
+ stats->is_ht, stats->rate, first_ampdu);
- rx_status->mactime = GET_RX_STATUS_DESC_TSFL(pdesc);
+ rx_status->mactime = stats->timestamp_low;
if (phystatus) {
p_drvinfo = (struct rx_fwinfo *)(skb->data +
stats->rx_bufshift);
@@ -723,14 +636,14 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
- DESC92S_RATEMCS0) ? 1 : 0));
+ DESC92_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
- if (ptcb_desc->hw_rate == DESC92S_RATE1M ||
- ptcb_desc->hw_rate == DESC92S_RATE2M ||
- ptcb_desc->hw_rate == DESC92S_RATE5_5M ||
- ptcb_desc->hw_rate == DESC92S_RATE11M) {
- ptcb_desc->hw_rate = DESC92S_RATE12M;
+ if (ptcb_desc->hw_rate == DESC92_RATE1M ||
+ ptcb_desc->hw_rate == DESC92_RATE2M ||
+ ptcb_desc->hw_rate == DESC92_RATE5_5M ||
+ ptcb_desc->hw_rate == DESC92_RATE11M) {
+ ptcb_desc->hw_rate = DESC92_RATE12M;
}
}
@@ -759,7 +672,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
- DESC92S_RATE54M) ?
+ DESC92_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 4bf3cf457ef..b42c2e2b205 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -191,44 +191,6 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
_usb_write_async(to_usb_device(dev), addr, val, 4);
}
-static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
- u16 len, u8 *pdata)
-{
- int status;
- u8 request;
- u16 wvalue;
- u16 index;
-
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
- wvalue = (u16)addr;
- if (read)
- status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
- index, pdata, len);
- else
- status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
- index, pdata, len);
- return status;
-}
-
-static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
- u8 *pdata)
-{
- struct device *dev = rtlpriv->io.dev;
-
- return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
- pdata);
-}
-
-static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
- u8 *pdata)
-{
- struct device *dev = rtlpriv->io.dev;
-
- return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
- pdata);
-}
-
static void _rtl_usb_io_handler_init(struct device *dev,
struct ieee80211_hw *hw)
{
@@ -239,11 +201,9 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.write8_async = _usb_write8_async;
rtlpriv->io.write16_async = _usb_write16_async;
rtlpriv->io.write32_async = _usb_write32_async;
- rtlpriv->io.writeN_async = _usb_writeN_async;
rtlpriv->io.read8_sync = _usb_read8_sync;
rtlpriv->io.read16_sync = _usb_read16_sync;
rtlpriv->io.read32_sync = _usb_read32_sync;
- rtlpriv->io.readN_sync = _usb_readN_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index d3c3ffd3898..713c7ddba8e 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -165,6 +165,12 @@ enum hardware_type {
#define IS_HARDWARE_TYPE_8723U(rtlhal) \
(rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+ (_pdesc->rxmcs == DESC92_RATE1M || \
+ _pdesc->rxmcs == DESC92_RATE2M || \
+ _pdesc->rxmcs == DESC92_RATE5_5M || \
+ _pdesc->rxmcs == DESC92_RATE11M)
+
enum scan_operation_backup_opt {
SCAN_OPT_BACKUP = 0,
SCAN_OPT_RESTORE,
@@ -386,6 +392,41 @@ enum rtl_hal_state {
_HAL_STATE_START = 1,
};
+enum rtl_desc92_rate {
+ DESC92_RATE1M = 0x00,
+ DESC92_RATE2M = 0x01,
+ DESC92_RATE5_5M = 0x02,
+ DESC92_RATE11M = 0x03,
+
+ DESC92_RATE6M = 0x04,
+ DESC92_RATE9M = 0x05,
+ DESC92_RATE12M = 0x06,
+ DESC92_RATE18M = 0x07,
+ DESC92_RATE24M = 0x08,
+ DESC92_RATE36M = 0x09,
+ DESC92_RATE48M = 0x0a,
+ DESC92_RATE54M = 0x0b,
+
+ DESC92_RATEMCS0 = 0x0c,
+ DESC92_RATEMCS1 = 0x0d,
+ DESC92_RATEMCS2 = 0x0e,
+ DESC92_RATEMCS3 = 0x0f,
+ DESC92_RATEMCS4 = 0x10,
+ DESC92_RATEMCS5 = 0x11,
+ DESC92_RATEMCS6 = 0x12,
+ DESC92_RATEMCS7 = 0x13,
+ DESC92_RATEMCS8 = 0x14,
+ DESC92_RATEMCS9 = 0x15,
+ DESC92_RATEMCS10 = 0x16,
+ DESC92_RATEMCS11 = 0x17,
+ DESC92_RATEMCS12 = 0x18,
+ DESC92_RATEMCS13 = 0x19,
+ DESC92_RATEMCS14 = 0x1a,
+ DESC92_RATEMCS15 = 0x1b,
+ DESC92_RATEMCS15_SG = 0x1c,
+ DESC92_RATEMCS32 = 0x20,
+};
+
enum rtl_var_map {
/*reg map */
SYS_ISO_CTRL = 0,
@@ -409,6 +450,7 @@ enum rtl_var_map {
EFUSE_HWSET_MAX_SIZE,
EFUSE_MAX_SECTION_MAP,
EFUSE_REAL_CONTENT_SIZE,
+ EFUSE_OOB_PROTECT_BYTES_LEN,
/*CAM map */
RWCAM,
@@ -901,16 +943,12 @@ struct rtl_io {
unsigned long pci_base_addr; /*device I/O address */
void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
- int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
- u8 *pdata);
+ void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
+ void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
- int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
- u8 *pdata);
};
@@ -1287,6 +1325,7 @@ struct rtl_stats {
s8 rx_mimo_signalquality[2];
bool packet_matchbssid;
bool is_cck;
+ bool is_ht;
bool packet_toself;
bool packet_beacon; /*for rssi */
char cck_adc_pwdb[4]; /*for rx path selection */
@@ -1448,6 +1487,9 @@ struct rtl_mod_params {
/* default: 0 = using hardware encryption */
int sw_crypto;
+ /* default: 0 = DBG_EMERG (0)*/
+ int debug;
+
/* default: 1 = using no linked power save */
bool inactiveps;
diff --git a/drivers/net/wireless/wl1251/cmd.h b/drivers/net/wireless/wl1251/cmd.h
index 79ca5273c9e..ee4f2b39182 100644
--- a/drivers/net/wireless/wl1251/cmd.h
+++ b/drivers/net/wireless/wl1251/cmd.h
@@ -269,7 +269,7 @@ struct cmd_join {
u8 bss_type;
u8 channel;
u8 ssid_len;
- u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ctrl; /* JOIN_CMD_CTRL_* */
u8 tx_mgt_frame_rate; /* OBSOLETE */
u8 tx_mgt_frame_mod; /* OBSOLETE */
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index a14a48c99cd..ba3268ea81f 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1158,7 +1158,8 @@ static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 13, .center_freq = 2472},
};
-static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int wl1251_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
enum wl1251_acx_ps_scheme ps_scheme;
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/wl1251/wl12xx_80211.h
index 1417b1445c3..04ed5149577 100644
--- a/drivers/net/wireless/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/wl1251/wl12xx_80211.h
@@ -76,7 +76,7 @@ struct wl12xx_ie_header {
struct wl12xx_ie_ssid {
struct wl12xx_ie_header header;
- char ssid[IW_ESSID_MAX_SIZE];
+ char ssid[IEEE80211_MAX_SSID_LEN];
} __packed;
struct wl12xx_ie_rates {
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 07bcb1548d8..3fe388b87c2 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -19,16 +19,6 @@ config WL12XX
If you choose to build a module, it will be called wl12xx. Say N if
unsure.
-config WL12XX_HT
- bool "TI wl12xx 802.11 HT support (EXPERIMENTAL)"
- depends on WL12XX && EXPERIMENTAL
- default n
- ---help---
- This will enable 802.11 HT support in the wl12xx module.
-
- That configuration is temporary due to the code incomplete and
- still in testing process.
-
config WL12XX_SPI
tristate "TI wl12xx SPI support"
depends on WL12XX && SPI_MASTER
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 521c0414e52..621b3483ca2 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -1,16 +1,16 @@
wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
boot.o init.o debugfs.o scan.o
-wl12xx_spi-objs = spi.o
+wl12xx_spi-objs = spi.o
wl12xx_sdio-objs = sdio.o
-wl12xx_sdio_test-objs = sdio_test.o
+wl12xx_sdio_test-objs = sdio_test.o
wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WL12XX) += wl12xx.o
obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
-obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
+obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 34f6ab53e51..ca044a74319 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -46,6 +46,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
goto out;
}
+ wake_up->role_id = wl->role_id;
wake_up->wake_up_event = wl->conf.conn.wake_up_event;
wake_up->listen_interval = wl->conf.conn.listen_interval;
@@ -99,6 +100,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
+ acx->role_id = wl->role_id;
acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -126,6 +128,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
}
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
+ feature->role_id = wl->role_id;
feature->data_flow_options = 0;
feature->options = 0;
@@ -181,34 +184,6 @@ out:
return ret;
}
-int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
-{
- struct acx_rx_config *rx_config;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "acx rx config");
-
- rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
- if (!rx_config) {
- ret = -ENOMEM;
- goto out;
- }
-
- rx_config->config_options = cpu_to_le32(config);
- rx_config->filter_options = cpu_to_le32(filter);
-
- ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
- rx_config, sizeof(*rx_config));
- if (ret < 0) {
- wl1271_warning("failed to set rx config: %d", ret);
- goto out;
- }
-
-out:
- kfree(rx_config);
- return ret;
-}
-
int wl1271_acx_pd_threshold(struct wl1271 *wl)
{
struct acx_packet_detection *pd;
@@ -248,6 +223,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
goto out;
}
+ slot->role_id = wl->role_id;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -277,6 +253,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
}
/* MAC filtering */
+ acx->role_id = wl->role_id;
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -306,6 +283,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
wl1271_debug(DEBUG_ACX, "acx service period timeout");
+ rx_timeout->role_id = wl->role_id;
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
@@ -342,6 +320,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
goto out;
}
+ rts->role_id = wl->role_id;
rts->threshold = cpu_to_le16((u16)rts_threshold);
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -401,6 +380,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
goto out;
}
+ beacon_filter->role_id = wl->role_id;
beacon_filter->enable = enable_filter;
/*
@@ -437,6 +417,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
}
/* configure default beacon pass-through rules */
+ ie_table->role_id = wl->role_id;
ie_table->num_ie = 0;
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -498,6 +479,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
timeout = wl->conf.conn.bss_lose_timeout;
}
+ acx->role_id = wl->role_id;
acx->synch_fail_thold = cpu_to_le32(threshold);
acx->bss_lose_timeout = cpu_to_le32(timeout);
@@ -544,43 +526,13 @@ out:
return ret;
}
-int wl1271_acx_sta_sg_cfg(struct wl1271 *wl)
-{
- struct acx_sta_bt_wlan_coex_param *param;
- struct conf_sg_settings *c = &wl->conf.sg;
- int i, ret;
-
- wl1271_debug(DEBUG_ACX, "acx sg sta cfg");
-
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* BT-WLAN coext parameters */
- for (i = 0; i < CONF_SG_STA_PARAMS_MAX; i++)
- param->params[i] = cpu_to_le32(c->sta_params[i]);
- param->param_idx = CONF_SG_PARAMS_ALL;
-
- ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
- if (ret < 0) {
- wl1271_warning("failed to set sg config: %d", ret);
- goto out;
- }
-
-out:
- kfree(param);
- return ret;
-}
-
-int wl1271_acx_ap_sg_cfg(struct wl1271 *wl)
+int wl12xx_acx_sg_cfg(struct wl1271 *wl)
{
- struct acx_ap_bt_wlan_coex_param *param;
+ struct acx_bt_wlan_coex_param *param;
struct conf_sg_settings *c = &wl->conf.sg;
int i, ret;
- wl1271_debug(DEBUG_ACX, "acx sg ap cfg");
+ wl1271_debug(DEBUG_ACX, "acx sg cfg");
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param) {
@@ -589,8 +541,8 @@ int wl1271_acx_ap_sg_cfg(struct wl1271 *wl)
}
/* BT-WLAN coext parameters */
- for (i = 0; i < CONF_SG_AP_PARAMS_MAX; i++)
- param->params[i] = cpu_to_le32(c->ap_params[i]);
+ for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
+ param->params[i] = cpu_to_le32(c->params[i]);
param->param_idx = CONF_SG_PARAMS_ALL;
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
@@ -643,6 +595,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
goto out;
}
+ bb->role_id = wl->role_id;
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -672,6 +625,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
goto out;
}
+ acx_aid->role_id = wl->role_id;
acx_aid->aid = cpu_to_le16(aid);
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -727,6 +681,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
goto out;
}
+ acx->role_id = wl->role_id;
acx->preamble = preamble;
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -754,6 +709,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
goto out;
}
+ acx->role_id = wl->role_id;
acx->ctsprotect = ctsprotect;
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -785,9 +741,8 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
{
- struct acx_sta_rate_policy *acx;
+ struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
- int idx = 0;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -799,25 +754,46 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
goto out;
}
+ wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
+ wl->basic_rate, wl->rate_set);
+
/* configure one basic rate class */
- idx = ACX_TX_BASIC_RATE;
- acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
- acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
- acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
- acx->rate_class[idx].aflags = c->aflags;
+ acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
+
+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of rate policies failed: %d", ret);
+ goto out;
+ }
/* configure one AP supported rate class */
- idx = ACX_TX_AP_FULL_RATE;
- acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
- acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
- acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
- acx->rate_class[idx].aflags = c->aflags;
+ acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
- acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of rate policies failed: %d", ret);
+ goto out;
+ }
- wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
- acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
- acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
+ /*
+ * configure one rate class for basic p2p operations.
+ * (p2p packets should always go out with OFDM rates, even
+ * if we are currently connected to 11b AP)
+ */
+ acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+ acx->rate_policy.enabled_rates =
+ cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
@@ -833,7 +809,7 @@ out:
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx)
{
- struct acx_ap_rate_policy *acx;
+ struct acx_rate_policy *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x",
@@ -879,6 +855,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
goto out;
}
+ acx->role_id = wl->role_id;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cpu_to_le16(cw_max);
@@ -912,6 +889,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
goto out;
}
+ acx->role_id = wl->role_id;
acx->queue_id = queue_id;
acx->channel_type = channel_type;
acx->tsid = tsid;
@@ -991,52 +969,9 @@ out:
return ret;
}
-int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
+int wl12xx_acx_mem_cfg(struct wl1271 *wl)
{
- struct wl1271_acx_ap_config_memory *mem_conf;
- struct conf_memory_settings *mem;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
-
- mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
- if (!mem_conf) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (wl->chip.id == CHIP_ID_1283_PG20)
- /*
- * FIXME: The 128x AP FW does not yet support dynamic memory.
- * Use the base memory configuration for 128x for now. This
- * should be fine tuned in the future.
- */
- mem = &wl->conf.mem_wl128x;
- else
- mem = &wl->conf.mem_wl127x;
-
- /* memory config */
- mem_conf->num_stations = mem->num_stations;
- mem_conf->rx_mem_block_num = mem->rx_block_num;
- mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
- mem_conf->num_ssid_profiles = mem->ssid_profiles;
- mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
-
- ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
- sizeof(*mem_conf));
- if (ret < 0) {
- wl1271_warning("wl1271 mem config failed: %d", ret);
- goto out;
- }
-
-out:
- kfree(mem_conf);
- return ret;
-}
-
-int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
-{
- struct wl1271_acx_sta_config_memory *mem_conf;
+ struct wl12xx_acx_config_memory *mem_conf;
struct conf_memory_settings *mem;
int ret;
@@ -1179,6 +1114,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
goto out;
}
+ acx->role_id = wl->role_id;
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
@@ -1206,6 +1142,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
goto out;
}
+ acx->role_id = wl->role_id;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
@@ -1265,6 +1202,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
goto out;
}
+ acx->role_id = wl->role_id;
acx->enabled = enable;
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1291,6 +1229,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
goto out;
}
+ acx->role_id = wl->role_id;
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
acx->index = index;
acx->tpl_validation = tpl_valid;
@@ -1324,6 +1263,7 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
wl->last_rssi_event = -1;
+ acx->role_id = wl->role_id;
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1362,6 +1302,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
goto out;
}
+ acx->role_id = wl->role_id;
acx->rssi_beacon = c->avg_weight_rssi_beacon;
acx->rssi_data = c->avg_weight_rssi_data;
acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1380,14 +1321,15 @@ out:
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
- bool allow_ht_operation)
+ bool allow_ht_operation, u8 hlid)
{
struct wl1271_acx_ht_capabilities *acx;
- u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret = 0;
u32 ht_capabilites = 0;
- wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
+ wl1271_debug(DEBUG_ACX, "acx ht capabilities setting "
+ "sta supp: %d sta cap: %d", ht_cap->ht_supported,
+ ht_cap->cap);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
@@ -1395,26 +1337,22 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
goto out;
}
- /* Allow HT Operation ? */
- if (allow_ht_operation) {
- ht_capabilites =
- WL1271_ACX_FW_CAP_HT_OPERATION;
- if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
- ht_capabilites |=
- WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- ht_capabilites |=
- WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
- if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
- ht_capabilites |=
- WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
+ if (allow_ht_operation && ht_cap->ht_supported) {
+ /* no need to translate capabilities - use the spec values */
+ ht_capabilites = ht_cap->cap;
+
+ /*
+ * this bit is not employed by the spec but only by FW to
+ * indicate peer HT support
+ */
+ ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
/* get data from A-MPDU parameters field */
acx->ampdu_max_length = ht_cap->ampdu_factor;
acx->ampdu_min_spacing = ht_cap->ampdu_density;
}
- memcpy(acx->mac_address, mac_address, ETH_ALEN);
+ acx->hlid = hlid;
acx->ht_capabilites = cpu_to_le32(ht_capabilites);
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
@@ -1442,6 +1380,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
goto out;
}
+ acx->role_id = wl->role_id;
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
@@ -1463,14 +1402,12 @@ out:
}
/* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl1271_acx_set_ba_session(struct wl1271 *wl,
- enum ieee80211_back_parties direction,
- u8 tid_index, u8 policy)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
{
- struct wl1271_acx_ba_session_policy *acx;
+ struct wl1271_acx_ba_initiator_policy *acx;
int ret;
- wl1271_debug(DEBUG_ACX, "acx ba session setting");
+ wl1271_debug(DEBUG_ACX, "acx ba initiator policy");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
@@ -1478,33 +1415,18 @@ int wl1271_acx_set_ba_session(struct wl1271 *wl,
goto out;
}
- /* ANY role */
- acx->role_id = 0xff;
- acx->tid = tid_index;
- acx->enable = policy;
- acx->ba_direction = direction;
-
- switch (direction) {
- case WLAN_BACK_INITIATOR:
- acx->win_size = wl->conf.ht.tx_ba_win_size;
- acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
- break;
- case WLAN_BACK_RECIPIENT:
- acx->win_size = RX_BA_WIN_SIZE;
- acx->inactivity_timeout = 0;
- break;
- default:
- wl1271_error("Incorrect acx command id=%x\n", direction);
- ret = -EINVAL;
- goto out;
- }
+ /* set for the current role */
+ acx->role_id = wl->role_id;
+ acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
+ acx->win_size = wl->conf.ht.tx_ba_win_size;
+ acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
ret = wl1271_cmd_configure(wl,
- ACX_BA_SESSION_POLICY_CFG,
+ ACX_BA_SESSION_INIT_POLICY,
acx,
sizeof(*acx));
if (ret < 0) {
- wl1271_warning("acx ba session setting failed: %d", ret);
+ wl1271_warning("acx ba initiator policy failed: %d", ret);
goto out;
}
@@ -1514,8 +1436,8 @@ out:
}
/* setup BA session receiver setting in the FW. */
-int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
- bool enable)
+int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
+ u16 ssn, bool enable, u8 peer_hlid)
{
struct wl1271_acx_ba_receiver_setup *acx;
int ret;
@@ -1528,11 +1450,10 @@ int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
goto out;
}
- /* Single link for now */
- acx->link_id = 1;
+ acx->hlid = peer_hlid;
acx->tid = tid_index;
acx->enable = enable;
- acx->win_size = 0;
+ acx->win_size = wl->conf.ht.rx_ba_win_size;
acx->ssn = ssn;
ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
@@ -1602,6 +1523,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
if (!(conf_queues & BIT(i)))
continue;
+ rx_streaming->role_id = wl->role_id;
rx_streaming->tid = i;
rx_streaming->enable = enable_queues & BIT(i);
rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1631,6 +1553,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
if (!acx)
return -ENOMEM;
+ acx->role_id = wl->role_id;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1699,31 +1622,6 @@ out:
return ret;
}
-int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable)
-{
- struct acx_ap_beacon_filter *acx = NULL;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "acx set ap beacon filter: %d", enable);
-
- acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx)
- return -ENOMEM;
-
- acx->enable = enable ? 1 : 0;
-
- ret = wl1271_cmd_configure(wl, ACX_AP_BEACON_FILTER_OPT,
- acx, sizeof(*acx));
- if (ret < 0) {
- wl1271_warning("acx set ap beacon filter failed: %d", ret);
- goto out;
- }
-
-out:
- kfree(acx);
- return ret;
-}
-
int wl1271_acx_fm_coex(struct wl1271 *wl)
{
struct wl1271_acx_fm_coex *acx;
@@ -1763,3 +1661,85 @@ out:
kfree(acx);
return ret;
}
+
+int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl)
+{
+ struct wl12xx_acx_set_rate_mgmt_params *acx = NULL;
+ struct conf_rate_policy_settings *conf = &wl->conf.rate;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx set rate mgmt params");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->index = ACX_RATE_MGMT_ALL_PARAMS;
+ acx->rate_retry_score = cpu_to_le16(conf->rate_retry_score);
+ acx->per_add = cpu_to_le16(conf->per_add);
+ acx->per_th1 = cpu_to_le16(conf->per_th1);
+ acx->per_th2 = cpu_to_le16(conf->per_th2);
+ acx->max_per = cpu_to_le16(conf->max_per);
+ acx->inverse_curiosity_factor = conf->inverse_curiosity_factor;
+ acx->tx_fail_low_th = conf->tx_fail_low_th;
+ acx->tx_fail_high_th = conf->tx_fail_high_th;
+ acx->per_alpha_shift = conf->per_alpha_shift;
+ acx->per_add_shift = conf->per_add_shift;
+ acx->per_beta1_shift = conf->per_beta1_shift;
+ acx->per_beta2_shift = conf->per_beta2_shift;
+ acx->rate_check_up = conf->rate_check_up;
+ acx->rate_check_down = conf->rate_check_down;
+ memcpy(acx->rate_retry_policy, conf->rate_retry_policy,
+ sizeof(acx->rate_retry_policy));
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_RATE_MGMT_PARAMS,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx set rate mgmt params failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl12xx_acx_config_hangover(struct wl1271 *wl)
+{
+ struct wl12xx_acx_config_hangover *acx;
+ struct conf_hangover_settings *conf = &wl->conf.hangover;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx config hangover");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->recover_time = cpu_to_le32(conf->recover_time);
+ acx->hangover_period = conf->hangover_period;
+ acx->dynamic_mode = conf->dynamic_mode;
+ acx->early_termination_mode = conf->early_termination_mode;
+ acx->max_period = conf->max_period;
+ acx->min_period = conf->min_period;
+ acx->increase_delta = conf->increase_delta;
+ acx->decrease_delta = conf->decrease_delta;
+ acx->quiet_time = conf->quiet_time;
+ acx->increase_time = conf->increase_time;
+ acx->window_size = acx->window_size;
+
+ ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx,
+ sizeof(*acx));
+
+ if (ret < 0) {
+ wl1271_warning("acx config hangover failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+
+}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index d2eb86eccc0..e3f93b4b342 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -101,6 +101,17 @@ struct acx_error_counter {
__le32 seq_num_miss;
} __packed;
+enum wl12xx_role {
+ WL1271_ROLE_STA = 0,
+ WL1271_ROLE_IBSS,
+ WL1271_ROLE_AP,
+ WL1271_ROLE_DEVICE,
+ WL1271_ROLE_P2P_CL,
+ WL1271_ROLE_P2P_GO,
+
+ WL12XX_INVALID_ROLE_TYPE = 0xff
+};
+
enum wl1271_psm_mode {
/* Active mode */
WL1271_PSM_CAM = 0,
@@ -160,94 +171,6 @@ struct acx_rx_msdu_lifetime {
__le32 lifetime;
} __packed;
-/*
- * RX Config Options Table
- * Bit Definition
- * === ==========
- * 31:14 Reserved
- * 13 Copy RX Status - when set, write three receive status words
- * to top of rx'd MPDUs.
- * When cleared, do not write three status words (added rev 1.5)
- * 12 Reserved
- * 11 RX Complete upon FCS error - when set, give rx complete
- * interrupt for FCS errors, after the rx filtering, e.g. unicast
- * frames not to us with FCS error will not generate an interrupt.
- * 10 SSID Filter Enable - When set, the WiLink discards all beacon,
- * probe request, and probe response frames with an SSID that does
- * not match the SSID specified by the host in the START/JOIN
- * command.
- * When clear, the WiLink receives frames with any SSID.
- * 9 Broadcast Filter Enable - When set, the WiLink discards all
- * broadcast frames. When clear, the WiLink receives all received
- * broadcast frames.
- * 8:6 Reserved
- * 5 BSSID Filter Enable - When set, the WiLink discards any frames
- * with a BSSID that does not match the BSSID specified by the
- * host.
- * When clear, the WiLink receives frames from any BSSID.
- * 4 MAC Addr Filter - When set, the WiLink discards any frames
- * with a destination address that does not match the MAC address
- * of the adaptor.
- * When clear, the WiLink receives frames destined to any MAC
- * address.
- * 3 Promiscuous - When set, the WiLink receives all valid frames
- * (i.e., all frames that pass the FCS check).
- * When clear, only frames that pass the other filters specified
- * are received.
- * 2 FCS - When set, the WiLink includes the FCS with the received
- * frame.
- * When cleared, the FCS is discarded.
- * 1 PLCP header - When set, write all data from baseband to frame
- * buffer including PHY header.
- * 0 Reserved - Always equal to 0.
- *
- * RX Filter Options Table
- * Bit Definition
- * === ==========
- * 31:12 Reserved - Always equal to 0.
- * 11 Association - When set, the WiLink receives all association
- * related frames (association request/response, reassocation
- * request/response, and disassociation). When clear, these frames
- * are discarded.
- * 10 Auth/De auth - When set, the WiLink receives all authentication
- * and de-authentication frames. When clear, these frames are
- * discarded.
- * 9 Beacon - When set, the WiLink receives all beacon frames.
- * When clear, these frames are discarded.
- * 8 Contention Free - When set, the WiLink receives all contention
- * free frames.
- * When clear, these frames are discarded.
- * 7 Control - When set, the WiLink receives all control frames.
- * When clear, these frames are discarded.
- * 6 Data - When set, the WiLink receives all data frames.
- * When clear, these frames are discarded.
- * 5 FCS Error - When set, the WiLink receives frames that have FCS
- * errors.
- * When clear, these frames are discarded.
- * 4 Management - When set, the WiLink receives all management
- * frames.
- * When clear, these frames are discarded.
- * 3 Probe Request - When set, the WiLink receives all probe request
- * frames.
- * When clear, these frames are discarded.
- * 2 Probe Response - When set, the WiLink receives all probe
- * response frames.
- * When clear, these frames are discarded.
- * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK
- * frames.
- * When clear, these frames are discarded.
- * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames
- * that have reserved frame types and sub types as defined by the
- * 802.11 specification.
- * When clear, these frames are discarded.
- */
-struct acx_rx_config {
- struct acx_header header;
-
- __le32 config_options;
- __le32 filter_options;
-} __packed;
-
struct acx_packet_detection {
struct acx_header header;
@@ -267,9 +190,10 @@ enum acx_slot_type {
struct acx_slot {
struct acx_header header;
+ u8 role_id;
u8 wone_index; /* Reserved */
u8 slot_time;
- u8 reserved[6];
+ u8 reserved[5];
} __packed;
@@ -279,29 +203,35 @@ struct acx_slot {
struct acx_dot11_grp_addr_tbl {
struct acx_header header;
+ u8 role_id;
u8 enabled;
u8 num_groups;
- u8 pad[2];
+ u8 pad[1];
u8 mac_table[ADDRESS_GROUP_MAX_LEN];
} __packed;
struct acx_rx_timeout {
struct acx_header header;
+ u8 role_id;
+ u8 reserved;
__le16 ps_poll_timeout;
__le16 upsd_timeout;
+ u8 padding[2];
} __packed;
struct acx_rts_threshold {
struct acx_header header;
+ u8 role_id;
+ u8 reserved;
__le16 threshold;
- u8 pad[2];
} __packed;
struct acx_beacon_filter_option {
struct acx_header header;
+ u8 role_id;
u8 enable;
/*
* The number of beacons without the unicast TIM
@@ -311,7 +241,7 @@ struct acx_beacon_filter_option {
* without the unicast TIM bit set are dropped.
*/
u8 max_num_beacons;
- u8 pad[2];
+ u8 pad[1];
} __packed;
/*
@@ -350,14 +280,17 @@ struct acx_beacon_filter_option {
struct acx_beacon_filter_ie_table {
struct acx_header header;
+ u8 role_id;
u8 num_ie;
- u8 pad[3];
+ u8 pad[2];
u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
} __packed;
struct acx_conn_monit_params {
struct acx_header header;
+ u8 role_id;
+ u8 padding[3];
__le32 synch_fail_thold; /* number of beacons missed */
__le32 bss_lose_timeout; /* number of TU's from synch fail */
} __packed;
@@ -369,23 +302,14 @@ struct acx_bt_wlan_coex {
u8 pad[3];
} __packed;
-struct acx_sta_bt_wlan_coex_param {
- struct acx_header header;
-
- __le32 params[CONF_SG_STA_PARAMS_MAX];
- u8 param_idx;
- u8 padding[3];
-} __packed;
-
-struct acx_ap_bt_wlan_coex_param {
+struct acx_bt_wlan_coex_param {
struct acx_header header;
- __le32 params[CONF_SG_AP_PARAMS_MAX];
+ __le32 params[CONF_SG_PARAMS_MAX];
u8 param_idx;
u8 padding[3];
} __packed;
-
struct acx_dco_itrim_params {
struct acx_header header;
@@ -406,15 +330,16 @@ struct acx_energy_detection {
struct acx_beacon_broadcast {
struct acx_header header;
- __le16 beacon_rx_timeout;
- __le16 broadcast_timeout;
-
+ u8 role_id;
/* Enables receiving of broadcast packets in PS mode */
u8 rx_broadcast_in_ps;
+ __le16 beacon_rx_timeout;
+ __le16 broadcast_timeout;
+
/* Consecutive PS Poll failures before updating the host */
u8 ps_poll_threshold;
- u8 pad[2];
+ u8 pad[1];
} __packed;
struct acx_event_mask {
@@ -424,35 +349,6 @@ struct acx_event_mask {
__le32 high_event_mask; /* Unused */
} __packed;
-#define CFG_RX_FCS BIT(2)
-#define CFG_RX_ALL_GOOD BIT(3)
-#define CFG_UNI_FILTER_EN BIT(4)
-#define CFG_BSSID_FILTER_EN BIT(5)
-#define CFG_MC_FILTER_EN BIT(6)
-#define CFG_MC_ADDR0_EN BIT(7)
-#define CFG_MC_ADDR1_EN BIT(8)
-#define CFG_BC_REJECT_EN BIT(9)
-#define CFG_SSID_FILTER_EN BIT(10)
-#define CFG_RX_INT_FCS_ERROR BIT(11)
-#define CFG_RX_INT_ENCRYPTED BIT(12)
-#define CFG_RX_WR_RX_STATUS BIT(13)
-#define CFG_RX_FILTER_NULTI BIT(14)
-#define CFG_RX_RESERVE BIT(15)
-#define CFG_RX_TIMESTAMP_TSF BIT(16)
-
-#define CFG_RX_RSV_EN BIT(0)
-#define CFG_RX_RCTS_ACK BIT(1)
-#define CFG_RX_PRSP_EN BIT(2)
-#define CFG_RX_PREQ_EN BIT(3)
-#define CFG_RX_MGMT_EN BIT(4)
-#define CFG_RX_FCS_ERROR BIT(5)
-#define CFG_RX_DATA_EN BIT(6)
-#define CFG_RX_CTL_EN BIT(7)
-#define CFG_RX_CF_EN BIT(8)
-#define CFG_RX_BCN_EN BIT(9)
-#define CFG_RX_AUTH_EN BIT(10)
-#define CFG_RX_ASSOC_EN BIT(11)
-
#define SCAN_PASSIVE BIT(0)
#define SCAN_5GHZ_BAND BIT(1)
#define SCAN_TRIGGERED BIT(2)
@@ -465,6 +361,8 @@ struct acx_event_mask {
struct acx_feature_config {
struct acx_header header;
+ u8 role_id;
+ u8 padding[3];
__le32 options;
__le32 data_flow_options;
} __packed;
@@ -472,16 +370,18 @@ struct acx_feature_config {
struct acx_current_tx_power {
struct acx_header header;
+ u8 role_id;
u8 current_tx_power;
- u8 padding[3];
+ u8 padding[2];
} __packed;
struct acx_wake_up_condition {
struct acx_header header;
+ u8 role_id;
u8 wake_up_event; /* Only one bit can be set */
u8 listen_interval;
- u8 pad[2];
+ u8 pad[1];
} __packed;
struct acx_aid {
@@ -490,8 +390,9 @@ struct acx_aid {
/*
* To be set when associated with an AP.
*/
+ u8 role_id;
+ u8 reserved;
__le16 aid;
- u8 pad[2];
} __packed;
enum acx_preamble_type {
@@ -506,8 +407,9 @@ struct acx_preamble {
* When set, the WiLink transmits the frames with a short preamble and
* when cleared, the WiLink transmits the frames with a long preamble.
*/
+ u8 role_id;
u8 preamble;
- u8 padding[3];
+ u8 padding[2];
} __packed;
enum acx_ctsprotect_type {
@@ -517,8 +419,9 @@ enum acx_ctsprotect_type {
struct acx_ctsprotect {
struct acx_header header;
+ u8 role_id;
u8 ctsprotect;
- u8 padding[3];
+ u8 padding[2];
} __packed;
struct acx_tx_statistics {
@@ -753,18 +656,10 @@ struct acx_rate_class {
#define ACX_TX_BASIC_RATE 0
#define ACX_TX_AP_FULL_RATE 1
-#define ACX_TX_RATE_POLICY_CNT 2
-struct acx_sta_rate_policy {
- struct acx_header header;
-
- __le32 rate_class_cnt;
- struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
-} __packed;
-
-
+#define ACX_TX_BASIC_RATE_P2P 2
#define ACX_TX_AP_MODE_MGMT_RATE 4
#define ACX_TX_AP_MODE_BCST_RATE 5
-struct acx_ap_rate_policy {
+struct acx_rate_policy {
struct acx_header header;
__le32 rate_policy_idx;
@@ -773,22 +668,23 @@ struct acx_ap_rate_policy {
struct acx_ac_cfg {
struct acx_header header;
+ u8 role_id;
u8 ac;
+ u8 aifsn;
u8 cw_min;
__le16 cw_max;
- u8 aifsn;
- u8 reserved;
__le16 tx_op_limit;
} __packed;
struct acx_tid_config {
struct acx_header header;
+ u8 role_id;
u8 queue_id;
u8 channel_type;
u8 tsid;
u8 ps_scheme;
u8 ack_policy;
- u8 padding[3];
+ u8 padding[2];
__le32 apsd_conf[2];
} __packed;
@@ -804,19 +700,7 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __packed;
-#define ACX_TX_DESCRIPTORS 32
-
-struct wl1271_acx_ap_config_memory {
- struct acx_header header;
-
- u8 rx_mem_block_num;
- u8 tx_min_mem_block_num;
- u8 num_stations;
- u8 num_ssid_profiles;
- __le32 total_tx_descriptors;
-} __packed;
-
-struct wl1271_acx_sta_config_memory {
+struct wl12xx_acx_config_memory {
struct acx_header header;
u8 rx_mem_block_num;
@@ -890,9 +774,10 @@ struct wl1271_acx_rx_config_opt {
struct wl1271_acx_bet_enable {
struct acx_header header;
+ u8 role_id;
u8 enable;
u8 max_consecutive;
- u8 padding[2];
+ u8 padding[1];
} __packed;
#define ACX_IPV4_VERSION 4
@@ -905,9 +790,10 @@ struct wl1271_acx_bet_enable {
struct wl1271_acx_arp_filter {
struct acx_header header;
+ u8 role_id;
u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
u8 enable; /* bitmap of enabled ARP filtering features */
- u8 padding[2];
+ u8 padding[1];
u8 address[16]; /* The configured device IP address - all ARP
requests directed to this IP address will pass
through. For IPv4, the first four bytes are
@@ -925,8 +811,9 @@ struct wl1271_acx_pm_config {
struct wl1271_acx_keep_alive_mode {
struct acx_header header;
+ u8 role_id;
u8 enabled;
- u8 padding[3];
+ u8 padding[2];
} __packed;
enum {
@@ -942,11 +829,11 @@ enum {
struct wl1271_acx_keep_alive_config {
struct acx_header header;
- __le32 period;
+ u8 role_id;
u8 index;
u8 tpl_validation;
u8 trigger;
- u8 padding;
+ __le32 period;
} __packed;
#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
@@ -990,26 +877,33 @@ enum {
struct wl1271_acx_rssi_snr_trigger {
struct acx_header header;
- __le16 threshold;
- __le16 pacing; /* 0 - 60000 ms */
+ u8 role_id;
u8 metric;
u8 type;
u8 dir;
+ __le16 threshold;
+ __le16 pacing; /* 0 - 60000 ms */
u8 hysteresis;
u8 index;
u8 enable;
- u8 padding[2];
+ u8 padding[1];
};
struct wl1271_acx_rssi_snr_avg_weights {
struct acx_header header;
+ u8 role_id;
+ u8 padding[3];
u8 rssi_beacon;
u8 rssi_data;
u8 snr_beacon;
u8 snr_data;
};
+
+/* special capability bit (not employed by the 802.11n spec) */
+#define WL12XX_HT_CAP_HT_OPERATION BIT(16)
+
/*
* ACX_PEER_HT_CAP
* Configure HT capabilities - declare the capabilities of the peer
@@ -1018,28 +912,11 @@ struct wl1271_acx_rssi_snr_avg_weights {
struct wl1271_acx_ht_capabilities {
struct acx_header header;
- /*
- * bit 0 - Allow HT Operation
- * bit 1 - Allow Greenfield format in TX
- * bit 2 - Allow Short GI in TX
- * bit 3 - Allow L-SIG TXOP Protection in TX
- * bit 4 - Allow HT Control fields in TX.
- * Note, driver will still leave space for HT control in packets
- * regardless of the value of this field. FW will be responsible
- * to drop the HT field from any frame when this Bit set to 0.
- * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
- * Exact policy setting for this feature is TBD.
- * Note, this bit can only be set to 1 if bit 3 is set to 1.
- */
+ /* bitmask of capability bits supported by the peer */
__le32 ht_capabilites;
- /*
- * Indicates to which peer these capabilities apply.
- * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
- * for all peers.
- * Only valid for IBSS/DLS operation.
- */
- u8 mac_address[ETH_ALEN];
+ /* Indicates to which link these capabilities apply. */
+ u8 hlid;
/*
* This the maximum A-MPDU length supported by the AP. The FW may not
@@ -1049,16 +926,9 @@ struct wl1271_acx_ht_capabilities {
/* This is the minimal spacing required when sending A-MPDUs to the AP*/
u8 ampdu_min_spacing;
-} __packed;
-
-/* HT Capabilites Fw Bit Mask Mapping */
-#define WL1271_ACX_FW_CAP_HT_OPERATION BIT(0)
-#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT BIT(1)
-#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS BIT(2)
-#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION BIT(3)
-#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS BIT(4)
-#define WL1271_ACX_FW_CAP_RD_INITIATION BIT(5)
+ u8 padding;
+} __packed;
/*
* ACX_HT_BSS_OPERATION
@@ -1067,6 +937,8 @@ struct wl1271_acx_ht_capabilities {
struct wl1271_acx_ht_information {
struct acx_header header;
+ u8 role_id;
+
/* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
u8 rifs_mode;
@@ -1088,60 +960,51 @@ struct wl1271_acx_ht_information {
*/
u8 dual_cts_protection;
- u8 padding[3];
+ u8 padding[2];
} __packed;
-#define RX_BA_WIN_SIZE 8
+#define RX_BA_MAX_SESSIONS 2
-struct wl1271_acx_ba_session_policy {
+struct wl1271_acx_ba_initiator_policy {
struct acx_header header;
- /*
- * Specifies role Id, Range 0-7, 0xFF means ANY role.
- * Future use. For now this field is irrelevant
- */
+
+ /* Specifies role Id, Range 0-7, 0xFF means ANY role. */
u8 role_id;
+
/*
- * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
- * Not applicable if Role Id is set to ANY.
+ * Per TID setting for allowing TX BA. Set a bit to 1 to allow
+ * TX BA sessions for the corresponding TID.
*/
- u8 link_id;
-
- u8 tid;
-
- u8 enable;
+ u8 tid_bitmap;
/* Windows size in number of packets */
- u16 win_size;
+ u8 win_size;
- /*
- * As initiator inactivity timeout in time units(TU) of 1024us.
- * As receiver reserved
- */
- u16 inactivity_timeout;
+ u8 padding1[1];
- /* Initiator = 1/Receiver = 0 */
- u8 ba_direction;
+ /* As initiator inactivity timeout in time units(TU) of 1024us */
+ u16 inactivity_timeout;
- u8 padding[3];
+ u8 padding[2];
} __packed;
struct wl1271_acx_ba_receiver_setup {
struct acx_header header;
- /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
- u8 link_id;
+ /* Specifies link id, range 0-31 */
+ u8 hlid;
u8 tid;
u8 enable;
- u8 padding[1];
-
/* Windows size in number of packets */
- u16 win_size;
+ u8 win_size;
/* BA session starting sequence number. RANGE 0-FFF */
u16 ssn;
+
+ u8 padding[2];
} __packed;
struct wl1271_acx_fw_tsf_information {
@@ -1158,6 +1021,7 @@ struct wl1271_acx_fw_tsf_information {
struct wl1271_acx_ps_rx_streaming {
struct acx_header header;
+ u8 role_id;
u8 tid;
u8 enable;
@@ -1166,17 +1030,20 @@ struct wl1271_acx_ps_rx_streaming {
/* timeout before first trigger (0-200 msec) */
u8 timeout;
+ u8 padding[3];
} __packed;
struct wl1271_acx_ap_max_tx_retry {
struct acx_header header;
+ u8 role_id;
+ u8 padding_1;
+
/*
* the number of frames transmission failures before
* issuing the aging event.
*/
__le16 max_tx_retry;
- u8 padding_1[2];
} __packed;
struct wl1271_acx_config_ps {
@@ -1195,13 +1062,6 @@ struct wl1271_acx_inconnection_sta {
u8 padding1[2];
} __packed;
-struct acx_ap_beacon_filter {
- struct acx_header header;
-
- u8 enable;
- u8 pad[3];
-} __packed;
-
/*
* ACX_FM_COEX_CFG
* set the FM co-existence parameters.
@@ -1261,6 +1121,47 @@ struct wl1271_acx_fm_coex {
u8 swallow_clk_diff;
} __packed;
+#define ACX_RATE_MGMT_ALL_PARAMS 0xff
+struct wl12xx_acx_set_rate_mgmt_params {
+ struct acx_header header;
+
+ u8 index; /* 0xff to configure all params */
+ u8 padding1;
+ __le16 rate_retry_score;
+ __le16 per_add;
+ __le16 per_th1;
+ __le16 per_th2;
+ __le16 max_per;
+ u8 inverse_curiosity_factor;
+ u8 tx_fail_low_th;
+ u8 tx_fail_high_th;
+ u8 per_alpha_shift;
+ u8 per_add_shift;
+ u8 per_beta1_shift;
+ u8 per_beta2_shift;
+ u8 rate_check_up;
+ u8 rate_check_down;
+ u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
+ u8 padding2[2];
+} __packed;
+
+struct wl12xx_acx_config_hangover {
+ struct acx_header header;
+
+ __le32 recover_time;
+ u8 hangover_period;
+ u8 dynamic_mode;
+ u8 early_termination_mode;
+ u8 max_period;
+ u8 min_period;
+ u8 increase_delta;
+ u8 decrease_delta;
+ u8 quiet_time;
+ u8 increase_time;
+ u8 window_size;
+ u8 padding[2];
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1268,10 +1169,7 @@ enum {
ACX_AC_CFG = 0x0007,
ACX_MEM_MAP = 0x0008,
ACX_AID = 0x000A,
- /* ACX_FW_REV is missing in the ref driver, but seems to work */
- ACX_FW_REV = 0x000D,
ACX_MEDIUM_USAGE = 0x000F,
- ACX_RX_CFG = 0x0010,
ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */
ACX_STATISTICS = 0x0013, /* Debug API */
ACX_PWR_CONSUMPTION_STATISTICS = 0x0014,
@@ -1279,7 +1177,6 @@ enum {
ACX_TID_CFG = 0x001A,
ACX_PS_RX_STREAMING = 0x001B,
ACX_BEACON_FILTER_OPT = 0x001F,
- ACX_AP_BEACON_FILTER_OPT = 0x0020,
ACX_NOISE_HIST = 0x0021,
ACX_HDK_VERSION = 0x0022, /* ??? */
ACX_PD_THRESHOLD = 0x0023,
@@ -1287,7 +1184,6 @@ enum {
ACX_CCA_THRESHOLD = 0x0025,
ACX_EVENT_MBOX_MASK = 0x0026,
ACX_CONN_MONIT_PARAMS = 0x002D,
- ACX_CONS_TX_FAILURE = 0x002F,
ACX_BCN_DTIM_OPTIONS = 0x0031,
ACX_SG_ENABLE = 0x0032,
ACX_SG_CFG = 0x0033,
@@ -1314,11 +1210,14 @@ enum {
ACX_RSSI_SNR_WEIGHTS = 0x0052,
ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
- ACX_BA_SESSION_POLICY_CFG = 0x0055,
+ ACX_BA_SESSION_INIT_POLICY = 0x0055,
ACX_BA_SESSION_RX_SETUP = 0x0056,
ACX_PEER_HT_CAP = 0x0057,
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
+ ACX_BURST_MODE = 0x005C,
+ ACX_SET_RATE_MGMT_PARAMS = 0x005D,
+ ACX_SET_RATE_ADAPT_PARAMS = 0x0060,
ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
ACX_GEN_FW_CMD = 0x0070,
ACX_HOST_IF_CFG_BITMAP = 0x0071,
@@ -1342,7 +1241,6 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl);
int wl1271_acx_mem_map(struct wl1271 *wl,
struct acx_header *mem_map, size_t len);
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
int wl1271_acx_pd_threshold(struct wl1271 *wl);
int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
@@ -1354,8 +1252,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_sta_sg_cfg(struct wl1271 *wl);
-int wl1271_acx_ap_sg_cfg(struct wl1271 *wl);
+int wl12xx_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
@@ -1374,8 +1271,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
int wl1271_acx_tx_config_options(struct wl1271 *wl);
-int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
-int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
+int wl12xx_acx_mem_cfg(struct wl1271 *wl);
int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
@@ -1390,20 +1286,19 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
- bool allow_ht_operation);
+ bool allow_ht_operation, u8 hlid);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
u16 ht_operation_mode);
-int wl1271_acx_set_ba_session(struct wl1271 *wl,
- enum ieee80211_back_parties direction,
- u8 tid_index, u8 policy);
-int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
- bool enable);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
+ u16 ssn, bool enable, u8 peer_hlid);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
int wl1271_acx_config_ps(struct wl1271 *wl);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
-int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable);
int wl1271_acx_fm_coex(struct wl1271 *wl);
+int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
+int wl12xx_acx_config_hangover(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 5ebc64d8940..d4e628db76b 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -107,16 +107,6 @@ static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
unsigned int quirks = 0;
unsigned int *fw_ver = wl->chip.fw_ver;
- /* Only for wl127x */
- if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
- /* Check STA version */
- (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
- (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
- /* Check AP version */
- ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
- (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
- quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
-
/* Only new station firmwares support routing fw logs to the host */
if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
(fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
@@ -304,9 +294,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
- /* for now 11a is unsupported in AP mode */
- if (wl->bss_type != BSS_TYPE_AP_BSS &&
- nvs->general_params.dual_mode_select)
+ if (nvs->general_params.dual_mode_select)
wl->enable_11a = true;
}
@@ -504,21 +492,19 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
wl->event_mask = BSS_LOSE_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
PS_REPORT_EVENT_ID |
- JOIN_EVENT_COMPLETE_ID |
DISCONNECT_EVENT_COMPLETE_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
SOFT_GEMINI_SENSE_EVENT_ID |
PERIODIC_SCAN_REPORT_EVENT_ID |
- PERIODIC_SCAN_COMPLETE_EVENT_ID;
-
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID |
- INACTIVE_STA_EVENT_ID |
- MAX_TX_RETRY_EVENT_ID;
- else
- wl->event_mask |= DUMMY_PACKET_EVENT_ID |
- BA_SESSION_RX_CONSTRAINT_EVENT_ID;
+ PERIODIC_SCAN_COMPLETE_EVENT_ID |
+ DUMMY_PACKET_EVENT_ID |
+ PEER_REMOVE_COMPLETE_EVENT_ID |
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID |
+ MAX_TX_RETRY_EVENT_ID |
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID;
ret = wl1271_event_unmask(wl);
if (ret < 0) {
@@ -549,13 +535,13 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
{
u32 fuse;
- fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1);
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fuse = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
+ else
+ fuse = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
wl->hw_pg_ver = (s8)fuse;
-
- if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
- wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
}
static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
@@ -696,7 +682,8 @@ static int wl127x_boot_clk(struct wl1271 *wl)
u32 pause;
u32 clk;
- wl1271_boot_hw_version(wl);
+ if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
+ wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
wl->ref_clock == CONF_REF_CLK_38_4_E ||
@@ -750,6 +737,8 @@ int wl1271_load_firmware(struct wl1271 *wl)
u32 tmp, clk;
int selected_clock = -1;
+ wl1271_boot_hw_version(wl);
+
if (wl->chip.id == CHIP_ID_1283_PG20) {
ret = wl128x_boot_clk(wl, &selected_clock);
if (ret < 0)
@@ -781,9 +770,6 @@ int wl1271_load_firmware(struct wl1271 *wl)
clk |= (wl->ref_clock << 1) << 4;
}
- if (wl->quirks & WL12XX_QUIRK_LPD_MODE)
- clk |= SCRATCH_ENABLE_LPD;
-
wl1271_write32(wl, DRPW_SCRATCH_START, clk);
wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -852,9 +838,6 @@ int wl1271_boot(struct wl1271 *wl)
/* Enable firmware interrupts now */
wl1271_boot_enable_interrupts(wl);
- /* set the wl1271 default filters */
- wl1271_set_default_filters(wl);
-
wl1271_event_mbox_config(wl);
out:
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
index e8f8255bbab..06dad9380fa 100644
--- a/drivers/net/wireless/wl12xx/boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -55,7 +55,8 @@ struct wl1271_static_data {
#define OCP_REG_CLK_POLARITY 0x0cb2
#define OCP_REG_CLK_PULL 0x0cb4
-#define REG_FUSE_DATA_2_1 0x050a
+#define WL127X_REG_FUSE_DATA_2_1 0x050a
+#define WL128X_REG_FUSE_DATA_2_1 0x2152
#define PG_VER_MASK 0x3c
#define PG_VER_OFFSET 2
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 97dd237a958..a52299e548f 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -134,11 +134,6 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
/* Override the REF CLK from the NVS with the one from platform data */
gen_parms->general_params.ref_clock = wl->ref_clock;
- /* LPD mode enable (bits 6-7) in WL1271 AP mode only */
- if (wl->quirks & WL12XX_QUIRK_LPD_MODE)
- gen_parms->general_params.general_settings |=
- GENERAL_SETTINGS_DRPW_LPD;
-
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -363,63 +358,476 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
-int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
{
- struct wl1271_cmd_join *join;
- int ret, i;
- u8 *bssid;
+ struct wl12xx_cmd_role_enable *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd role enable");
+
+ if (WARN_ON(*role_id != WL12XX_INVALID_ROLE_ID))
+ return -EBUSY;
- join = kzalloc(sizeof(*join), GFP_KERNEL);
- if (!join) {
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
ret = -ENOMEM;
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd join");
+ /* get role id */
+ cmd->role_id = find_first_zero_bit(wl->roles_map, WL12XX_MAX_ROLES);
+ if (cmd->role_id >= WL12XX_MAX_ROLES) {
+ ret = -EBUSY;
+ goto out_free;
+ }
+
+ memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+ cmd->role_type = role_type;
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role enable");
+ goto out_free;
+ }
+
+ __set_bit(cmd->role_id, wl->roles_map);
+ *role_id = cmd->role_id;
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id)
+{
+ struct wl12xx_cmd_role_disable *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd role disable");
+
+ if (WARN_ON(*role_id == WL12XX_INVALID_ROLE_ID))
+ return -ENOENT;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cmd->role_id = *role_id;
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_DISABLE, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role disable");
+ goto out_free;
+ }
+
+ __clear_bit(*role_id, wl->roles_map);
+ *role_id = WL12XX_INVALID_ROLE_ID;
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+{
+ u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
+ if (link >= WL12XX_MAX_LINKS)
+ return -EBUSY;
+
+ __set_bit(link, wl->links_map);
+ *hlid = link;
+ return 0;
+}
+
+static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+{
+ if (*hlid == WL12XX_INVALID_LINK_ID)
+ return;
+
+ __clear_bit(*hlid, wl->links_map);
+ *hlid = WL12XX_INVALID_LINK_ID;
+}
+
+static int wl12xx_get_new_session_id(struct wl1271 *wl)
+{
+ if (wl->session_counter >= SESSION_COUNTER_MAX)
+ wl->session_counter = 0;
+
+ wl->session_counter++;
+
+ return wl->session_counter;
+}
- /* Reverse order BSSID */
- bssid = (u8 *) &join->bssid_lsb;
- for (i = 0; i < ETH_ALEN; i++)
- bssid[i] = wl->bssid[ETH_ALEN - i - 1];
+int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_start *cmd;
+ int ret;
- join->rx_config_options = cpu_to_le32(wl->rx_config);
- join->rx_filter_options = cpu_to_le32(wl->rx_filter);
- join->bss_type = bss_type;
- join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- join->supported_rate_set = cpu_to_le32(wl->rate_set);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+
+ cmd->role_id = wl->dev_role_id;
if (wl->band == IEEE80211_BAND_5GHZ)
- join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
+ cmd->band = WL12XX_BAND_5GHZ;
+ cmd->channel = wl->channel;
+
+ if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+ if (ret)
+ goto out_free;
+ }
+ cmd->device.hlid = wl->dev_hlid;
+ cmd->device.session = wl->session_counter;
- join->beacon_interval = cpu_to_le16(wl->beacon_int);
- join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
+ wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
+ cmd->role_id, cmd->device.hlid, cmd->device.session);
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role enable");
+ goto err_hlid;
+ }
- join->channel = wl->channel;
- join->ssid_len = wl->ssid_len;
- memcpy(join->ssid, wl->ssid, wl->ssid_len);
+ goto out_free;
- join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
+err_hlid:
+ /* clear links on error */
+ __clear_bit(wl->dev_hlid, wl->links_map);
+ wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
- join->basic_rate_set, join->supported_rate_set);
- ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_stop *cmd;
+ int ret;
+
+ if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_CMD, "cmd role stop dev");
+
+ cmd->role_id = wl->dev_role_id;
+ cmd->disc_type = DISCONNECT_IMMEDIATE;
+ cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role stop");
+ goto out_free;
+ }
+
+ ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
if (ret < 0) {
- wl1271_error("failed to initiate cmd join");
+ wl1271_error("cmd role stop dev event completion error");
goto out_free;
}
- ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
+ wl12xx_free_link(wl, &wl->dev_hlid);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_start *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+
+ cmd->role_id = wl->role_id;
+ if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->band = WL12XX_BAND_5GHZ;
+ cmd->channel = wl->channel;
+ cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
+ cmd->sta.ssid_len = wl->ssid_len;
+ memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
+ memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+
+ if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (ret)
+ goto out_free;
+ }
+ cmd->sta.hlid = wl->sta_hlid;
+ cmd->sta.session = wl12xx_get_new_session_id(wl);
+ cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+
+ wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
+ "basic_rate_set: 0x%x, remote_rates: 0x%x",
+ wl->role_id, cmd->sta.hlid, cmd->sta.session,
+ wl->basic_rate_set, wl->rate_set);
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role start sta");
+ goto err_hlid;
+ }
+
+ goto out_free;
+
+err_hlid:
+ /* clear links on error. */
+ wl12xx_free_link(wl, &wl->sta_hlid);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+/* use this function to stop ibss as well */
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_stop *cmd;
+ int ret;
+
+ if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+
+ cmd->role_id = wl->role_id;
+ cmd->disc_type = DISCONNECT_IMMEDIATE;
+ cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role stop sta");
+ goto out_free;
+ }
+
+ wl12xx_free_link(wl, &wl->sta_hlid);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_start *cmd;
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+
+ /* trying to use hidden SSID with an old hostapd version */
+ if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+ wl1271_error("got a null SSID from beacon/bss");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
if (ret < 0)
- wl1271_error("cmd join event completion error");
+ goto out_free;
+
+ ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+ if (ret < 0)
+ goto out_free_global;
+
+ cmd->role_id = wl->role_id;
+ cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
+ cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
+ cmd->ap.global_hlid = wl->ap_global_hlid;
+ cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
+ cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->ap.dtim_interval = bss_conf->dtim_period;
+ cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+ cmd->channel = wl->channel;
+
+ if (!bss_conf->hidden_ssid) {
+ /* take the SSID from the beacon for backward compatibility */
+ cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
+ cmd->ap.ssid_len = wl->ssid_len;
+ memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+ } else {
+ cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
+ cmd->ap.ssid_len = bss_conf->ssid_len;
+ memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
+ }
+
+ cmd->ap.local_rates = cpu_to_le32(0xffffffff);
+
+ switch (wl->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = RADIO_BAND_5GHZ;
+ break;
+ default:
+ wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role start ap");
+ goto out_free_bcast;
+ }
+
+ goto out_free;
+
+out_free_bcast:
+ wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+
+out_free_global:
+ wl12xx_free_link(wl, &wl->ap_global_hlid);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_stop *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+
+ cmd->role_id = wl->role_id;
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role stop ap");
+ goto out_free;
+ }
+
+ wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+ wl12xx_free_link(wl, &wl->ap_global_hlid);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_role_start *cmd;
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+
+ cmd->role_id = wl->role_id;
+ if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->band = WL12XX_BAND_5GHZ;
+ cmd->channel = wl->channel;
+ cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->ibss.dtim_interval = bss_conf->dtim_period;
+ cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
+ cmd->ibss.ssid_len = wl->ssid_len;
+ memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
+ memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+
+ if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (ret)
+ goto out_free;
+ }
+ cmd->ibss.hlid = wl->sta_hlid;
+ cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+
+ wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
+ "basic_rate_set: 0x%x, remote_rates: 0x%x",
+ wl->role_id, cmd->sta.hlid, cmd->sta.session,
+ wl->basic_rate_set, wl->rate_set);
+
+ wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+
+ ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd role enable");
+ goto err_hlid;
+ }
+
+ goto out_free;
+
+err_hlid:
+ /* clear links on error. */
+ wl12xx_free_link(wl, &wl->sta_hlid);
out_free:
- kfree(join);
+ kfree(cmd);
out:
return ret;
}
+
/**
* send test command to firmware
*
@@ -488,7 +896,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
struct acx_header *acx = buf;
int ret;
- wl1271_debug(DEBUG_CMD, "cmd configure");
+ wl1271_debug(DEBUG_CMD, "cmd configure (%d)", id);
acx->id = cpu_to_le16(id);
@@ -567,6 +975,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
goto out;
}
+ ps_params->role_id = wl->role_id;
ps_params->ps_mode = ps_mode;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -698,6 +1107,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
{
struct sk_buff *skb;
int ret;
+ u32 rate;
skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
ie, ie_len);
@@ -708,14 +1118,13 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
+ rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- skb->data, skb->len, 0,
- wl->conf.tx.basic_rate);
+ skb->data, skb->len, 0, rate);
else
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- skb->data, skb->len, 0,
- wl->conf.tx.basic_rate_5);
+ skb->data, skb->len, 0, rate);
out:
dev_kfree_skb(skb);
@@ -726,6 +1135,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct sk_buff *skb)
{
int ret;
+ u32 rate;
if (!skb)
skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
@@ -734,14 +1144,13 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+ rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
if (wl->band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- skb->data, skb->len, 0,
- wl->conf.tx.basic_rate);
+ skb->data, skb->len, 0, rate);
else
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- skb->data, skb->len, 0,
- wl->conf.tx.basic_rate_5);
+ skb->data, skb->len, 0, rate);
if (ret < 0)
wl1271_error("Unable to set ap probe request template.");
@@ -813,9 +1222,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
wl->basic_rate);
}
-int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
+int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
{
- struct wl1271_cmd_set_sta_keys *cmd;
+ struct wl1271_cmd_set_keys *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -826,36 +1235,7 @@ int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
goto out;
}
- cmd->id = id;
- cmd->key_action = cpu_to_le16(KEY_SET_ID);
- cmd->key_type = KEY_WEP;
-
- ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_warning("cmd set_default_wep_key failed: %d", ret);
- goto out;
- }
-
-out:
- kfree(cmd);
-
- return ret;
-}
-
-int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
-{
- struct wl1271_cmd_set_ap_keys *cmd;
- int ret = 0;
-
- wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
-
- cmd->hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->hlid = hlid;
cmd->key_id = id;
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
cmd->key_action = cpu_to_le16(KEY_SET_ID);
@@ -863,7 +1243,7 @@ int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+ wl1271_warning("cmd set_default_wep_key failed: %d", ret);
goto out;
}
@@ -877,17 +1257,27 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
- struct wl1271_cmd_set_sta_keys *cmd;
+ struct wl1271_cmd_set_keys *cmd;
int ret = 0;
+ /* hlid might have already been deleted */
+ if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ return 0;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
- if (key_type != KEY_WEP)
- memcpy(cmd->addr, addr, ETH_ALEN);
+ cmd->hlid = wl->sta_hlid;
+
+ if (key_type == KEY_WEP)
+ cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+ else if (is_broadcast_ether_addr(addr))
+ cmd->lid_key_type = BROADCAST_LID_TYPE;
+ else
+ cmd->lid_key_type = UNICAST_LID_TYPE;
cmd->key_action = cpu_to_le16(action);
cmd->key_size = key_size;
@@ -896,10 +1286,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
- /* we have only one SSID profile */
- cmd->ssid_profile = 0;
-
- cmd->id = id;
+ cmd->key_id = id;
if (key_type == KEY_TKIP) {
/*
@@ -930,11 +1317,15 @@ out:
return ret;
}
+/*
+ * TODO: merge with sta/ibss into 1 set_key function.
+ * note there are slight diffs
+ */
int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16)
{
- struct wl1271_cmd_set_ap_keys *cmd;
+ struct wl1271_cmd_set_keys *cmd;
int ret = 0;
u8 lid_type;
@@ -942,7 +1333,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (!cmd)
return -ENOMEM;
- if (hlid == WL1271_AP_BROADCAST_HLID) {
+ if (hlid == wl->ap_bcast_hlid) {
if (key_type == KEY_WEP)
lid_type = WEP_DEFAULT_LID_TYPE;
else
@@ -991,12 +1382,12 @@ out:
return ret;
}
-int wl1271_cmd_disconnect(struct wl1271 *wl)
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
{
- struct wl1271_cmd_disconnect *cmd;
+ struct wl12xx_cmd_set_peer_state *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd disconnect");
+ wl1271_debug(DEBUG_CMD, "cmd set peer state (hlid=%d)", hlid);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1004,21 +1395,15 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
goto out;
}
- cmd->rx_config_options = cpu_to_le32(wl->rx_config);
- cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
- /* disconnect reason is not used in immediate disconnections */
- cmd->type = DISCONNECT_IMMEDIATE;
+ cmd->hlid = hlid;
+ cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
- ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0);
+ ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to send disconnect command");
+ wl1271_error("failed to send set peer state command");
goto out_free;
}
- ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
- if (ret < 0)
- wl1271_error("cmd disconnect event completion error");
-
out_free:
kfree(cmd);
@@ -1026,12 +1411,13 @@ out:
return ret;
}
-int wl1271_cmd_set_sta_state(struct wl1271 *wl)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
{
- struct wl1271_cmd_set_sta_state *cmd;
- int ret = 0;
+ struct wl12xx_cmd_add_peer *cmd;
+ int i, ret;
+ u32 sta_rates;
- wl1271_debug(DEBUG_CMD, "cmd set sta state");
+ wl1271_debug(DEBUG_CMD, "cmd add peer %d", (int)hlid);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1039,11 +1425,33 @@ int wl1271_cmd_set_sta_state(struct wl1271 *wl)
goto out;
}
- cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
+ memcpy(cmd->addr, sta->addr, ETH_ALEN);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->aid = sta->aid;
+ cmd->hlid = hlid;
+ cmd->sp_len = sta->max_sp;
+ cmd->wmm = sta->wme ? 1 : 0;
+
+ for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
+ if (sta->wme && (sta->uapsd_queues & BIT(i)))
+ cmd->psd_type[i] = WL1271_PSD_UPSD_TRIGGER;
+ else
+ cmd->psd_type[i] = WL1271_PSD_LEGACY;
+
+ sta_rates = sta->supp_rates[wl->band];
+ if (sta->ht_cap.ht_supported)
+ sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
- ret = wl1271_cmd_send(wl, CMD_SET_STA_STATE, cmd, sizeof(*cmd), 0);
+ cmd->supported_rates =
+ cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
+ wl->band));
+
+ wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
+ cmd->supported_rates, sta->uapsd_queues);
+
+ ret = wl1271_cmd_send(wl, CMD_ADD_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to send set STA state command");
+ wl1271_error("failed to initiate cmd add peer");
goto out_free;
}
@@ -1054,23 +1462,12 @@ out:
return ret;
}
-int wl1271_cmd_start_bss(struct wl1271 *wl)
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
{
- struct wl1271_cmd_bss_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct wl12xx_cmd_remove_peer *cmd;
int ret;
- wl1271_debug(DEBUG_CMD, "cmd start bss");
-
- /*
- * FIXME: We currently do not support hidden SSID. The real SSID
- * should be fetched from mac80211 first.
- */
- if (wl->ssid_len == 0) {
- wl1271_warning("Hidden SSID currently not supported for AP");
- ret = -EINVAL;
- goto out;
- }
+ wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1078,40 +1475,24 @@ int wl1271_cmd_start_bss(struct wl1271 *wl)
goto out;
}
- memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
-
- cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
- cmd->bss_index = WL1271_AP_BSS_INDEX;
- cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
- cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
- cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
- cmd->dtim_interval = bss_conf->dtim_period;
- cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
- cmd->channel = wl->channel;
- cmd->ssid_len = wl->ssid_len;
- cmd->ssid_type = SSID_TYPE_PUBLIC;
- memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
-
- switch (wl->band) {
- case IEEE80211_BAND_2GHZ:
- cmd->band = RADIO_BAND_2_4GHZ;
- break;
- case IEEE80211_BAND_5GHZ:
- cmd->band = RADIO_BAND_5GHZ;
- break;
- default:
- wl1271_warning("bss start - unknown band: %d", (int)wl->band);
- cmd->band = RADIO_BAND_2_4GHZ;
- break;
- }
+ cmd->hlid = hlid;
+ /* We never send a deauth, mac80211 is in charge of this */
+ cmd->reason_opcode = 0;
+ cmd->send_deauth_flag = 0;
- ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+ ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to initiate cmd start bss");
+ wl1271_error("failed to initiate cmd remove peer");
goto out_free;
}
+ /*
+ * We are ok with a timeout here. The event is sometimes not sent
+ * due to a firmware bug.
+ */
+ wl1271_cmd_wait_for_event_or_timeout(wl,
+ PEER_REMOVE_COMPLETE_EVENT_ID);
+
out_free:
kfree(cmd);
@@ -1119,12 +1500,12 @@ out:
return ret;
}
-int wl1271_cmd_stop_bss(struct wl1271 *wl)
+int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
{
- struct wl1271_cmd_bss_start *cmd;
- int ret;
+ struct wl12xx_cmd_config_fwlog *cmd;
+ int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd stop bss");
+ wl1271_debug(DEBUG_CMD, "cmd config firmware logger");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1132,11 +1513,15 @@ int wl1271_cmd_stop_bss(struct wl1271 *wl)
goto out;
}
- cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->logger_mode = wl->conf.fwlog.mode;
+ cmd->log_severity = wl->conf.fwlog.severity;
+ cmd->timestamp = wl->conf.fwlog.timestamp;
+ cmd->output = wl->conf.fwlog.output;
+ cmd->threshold = wl->conf.fwlog.threshold;
- ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+ ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to initiate cmd stop bss");
+ wl1271_error("failed to send config firmware logger command");
goto out_free;
}
@@ -1147,12 +1532,12 @@ out:
return ret;
}
-int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
{
- struct wl1271_cmd_add_sta *cmd;
- int ret;
+ struct wl12xx_cmd_start_fwlog *cmd;
+ int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+ wl1271_debug(DEBUG_CMD, "cmd start firmware logger");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1160,23 +1545,35 @@ int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
goto out;
}
- /* currently we don't support UAPSD */
- cmd->sp_len = 0;
+ ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send start firmware logger command");
+ goto out_free;
+ }
- memcpy(cmd->addr, sta->addr, ETH_ALEN);
- cmd->bss_index = WL1271_AP_BSS_INDEX;
- cmd->aid = sta->aid;
- cmd->hlid = hlid;
- cmd->wmm = sta->wme ? 1 : 0;
+out_free:
+ kfree(cmd);
- cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
- sta->supp_rates[wl->band]));
+out:
+ return ret;
+}
- wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_stop_fwlog *cmd;
+ int ret = 0;
- ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+ wl1271_debug(DEBUG_CMD, "cmd stop firmware logger");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to initiate cmd add sta");
+ wl1271_error("failed to send stop firmware logger command");
goto out_free;
}
@@ -1187,12 +1584,15 @@ out:
return ret;
}
-int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
{
- struct wl1271_cmd_remove_sta *cmd;
- int ret;
+ struct wl12xx_cmd_roc *cmd;
+ int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+
+ if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
+ return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1200,23 +1600,28 @@ int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
goto out;
}
- cmd->hlid = hlid;
- /* We never send a deauth, mac80211 is in charge of this */
- cmd->reason_opcode = 0;
- cmd->send_deauth_flag = 0;
+ cmd->role_id = role_id;
+ cmd->channel = wl->channel;
+ switch (wl->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = RADIO_BAND_5GHZ;
+ break;
+ default:
+ wl1271_error("roc - unknown band: %d", (int)wl->band);
+ ret = -EINVAL;
+ goto out_free;
+ }
- ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+
+ ret = wl1271_cmd_send(wl, CMD_REMAIN_ON_CHANNEL, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to initiate cmd remove sta");
+ wl1271_error("failed to send ROC command");
goto out_free;
}
- /*
- * We are ok with a timeout here. The event is sometimes not sent
- * due to a firmware bug.
- */
- wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
-
out_free:
kfree(cmd);
@@ -1224,28 +1629,24 @@ out:
return ret;
}
-int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
+static int wl12xx_cmd_croc(struct wl1271 *wl, u8 role_id)
{
- struct wl12xx_cmd_config_fwlog *cmd;
+ struct wl12xx_cmd_croc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd config firmware logger");
+ wl1271_debug(DEBUG_CMD, "cmd croc (%d)", role_id);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
+ cmd->role_id = role_id;
- cmd->logger_mode = wl->conf.fwlog.mode;
- cmd->log_severity = wl->conf.fwlog.severity;
- cmd->timestamp = wl->conf.fwlog.timestamp;
- cmd->output = wl->conf.fwlog.output;
- cmd->threshold = wl->conf.fwlog.threshold;
-
- ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0);
+ ret = wl1271_cmd_send(wl, CMD_CANCEL_REMAIN_ON_CHANNEL, cmd,
+ sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to send config firmware logger command");
+ wl1271_error("failed to send ROC command");
goto out_free;
}
@@ -1256,12 +1657,52 @@ out:
return ret;
}
-int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
+int wl12xx_roc(struct wl1271 *wl, u8 role_id)
{
- struct wl12xx_cmd_start_fwlog *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd start firmware logger");
+ if (WARN_ON(test_bit(role_id, wl->roc_map)))
+ return 0;
+
+ ret = wl12xx_cmd_roc(wl, role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_wait_for_event(wl,
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
+ if (ret < 0) {
+ wl1271_error("cmd roc event completion error");
+ goto out;
+ }
+
+ __set_bit(role_id, wl->roc_map);
+out:
+ return ret;
+}
+
+int wl12xx_croc(struct wl1271 *wl, u8 role_id)
+{
+ int ret = 0;
+
+ if (WARN_ON(!test_bit(role_id, wl->roc_map)))
+ return 0;
+
+ ret = wl12xx_cmd_croc(wl, role_id);
+ if (ret < 0)
+ goto out;
+
+ __clear_bit(role_id, wl->roc_map);
+out:
+ return ret;
+}
+
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct wl12xx_cmd_channel_switch *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "cmd channel switch");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1269,9 +1710,14 @@ int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
goto out;
}
- ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0);
+ cmd->channel = ch_switch->channel->hw_value;
+ cmd->switch_time = ch_switch->count;
+ cmd->tx_suspend = ch_switch->block_tx;
+ cmd->flush = 0; /* this value is ignored by the FW */
+
+ ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to send start firmware logger command");
+ wl1271_error("failed to send channel switch command");
goto out_free;
}
@@ -1282,12 +1728,12 @@ out:
return ret;
}
-int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
{
- struct wl12xx_cmd_stop_fwlog *cmd;
- int ret = 0;
+ struct wl12xx_cmd_stop_channel_switch *cmd;
+ int ret;
- wl1271_debug(DEBUG_CMD, "cmd stop firmware logger");
+ wl1271_debug(DEBUG_ACX, "cmd stop channel switch");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -1295,9 +1741,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
goto out;
}
- ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0);
+ ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
- wl1271_error("failed to send stop firmware logger command");
+ wl1271_error("failed to stop channel switch command");
goto out_free;
}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 1f7037292c1..b7bd42769aa 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,7 +36,15 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
+int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
+int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -56,23 +64,24 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
int wl1271_build_qos_null_data(struct wl1271 *wl);
int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16);
int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
-int wl1271_cmd_disconnect(struct wl1271 *wl);
-int wl1271_cmd_set_sta_state(struct wl1271 *wl);
-int wl1271_cmd_start_bss(struct wl1271 *wl);
-int wl1271_cmd_stop_bss(struct wl1271 *wl);
-int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
-int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
+int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_croc(struct wl1271 *wl, u8 role_id);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct ieee80211_channel_switch *ch_switch);
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -83,25 +92,21 @@ enum wl1271_commands {
CMD_DISABLE_TX = 6,
CMD_SCAN = 8,
CMD_STOP_SCAN = 9,
- CMD_START_JOIN = 11,
CMD_SET_KEYS = 12,
CMD_READ_MEMORY = 13,
CMD_WRITE_MEMORY = 14,
CMD_SET_TEMPLATE = 19,
CMD_TEST = 23,
CMD_NOISE_HIST = 28,
- CMD_LNA_CONTROL = 32,
+ CMD_QUIET_ELEMENT_SET_STATE = 29,
CMD_SET_BCN_MODE = 33,
CMD_MEASUREMENT = 34,
CMD_STOP_MEASUREMENT = 35,
- CMD_DISCONNECT = 36,
CMD_SET_PS_MODE = 37,
CMD_CHANNEL_SWITCH = 38,
CMD_STOP_CHANNEL_SWICTH = 39,
CMD_AP_DISCOVERY = 40,
CMD_STOP_AP_DISCOVERY = 41,
- CMD_SPS_SCAN = 42,
- CMD_STOP_SPS_SCAN = 43,
CMD_HEALTH_CHECK = 45,
CMD_DEBUG = 46,
CMD_TRIGGER_SCAN_TO = 47,
@@ -109,16 +114,30 @@ enum wl1271_commands {
CMD_CONNECTION_SCAN_SSID_CFG = 49,
CMD_START_PERIODIC_SCAN = 50,
CMD_STOP_PERIODIC_SCAN = 51,
- CMD_SET_STA_STATE = 52,
- CMD_CONFIG_FWLOGGER = 53,
- CMD_START_FWLOGGER = 54,
- CMD_STOP_FWLOGGER = 55,
+ CMD_SET_PEER_STATE = 52,
+ CMD_REMAIN_ON_CHANNEL = 53,
+ CMD_CANCEL_REMAIN_ON_CHANNEL = 54,
- /* AP mode commands */
- CMD_BSS_START = 60,
- CMD_BSS_STOP = 61,
- CMD_ADD_STA = 62,
- CMD_REMOVE_STA = 63,
+ CMD_CONFIG_FWLOGGER = 55,
+ CMD_START_FWLOGGER = 56,
+ CMD_STOP_FWLOGGER = 57,
+
+ /* AP commands */
+ CMD_ADD_PEER = 62,
+ CMD_REMOVE_PEER = 63,
+
+ /* Role API */
+ CMD_ROLE_ENABLE = 70,
+ CMD_ROLE_DISABLE = 71,
+ CMD_ROLE_START = 72,
+ CMD_ROLE_STOP = 73,
+
+ /* WIFI Direct */
+ CMD_WFD_START_DISCOVERY = 80,
+ CMD_WFD_STOP_DISCOVERY = 81,
+ CMD_WFD_ATTRIBUTE_CONFIG = 82,
+
+ CMD_NOP = 100,
NUM_COMMANDS,
MAX_COMMAND_ID = 0xFFFF,
@@ -147,21 +166,20 @@ enum cmd_templ {
CMD_TEMPL_CTS, /*
* For CTS-to-self (FastCTS) mechanism
* for BT/WLAN coexistence (SoftGemini). */
- CMD_TEMPL_ARP_RSP,
- CMD_TEMPL_LINK_MEASUREMENT_REPORT,
-
- /* AP-mode specific */
- CMD_TEMPL_AP_BEACON = 13,
+ CMD_TEMPL_AP_BEACON,
CMD_TEMPL_AP_PROBE_RESPONSE,
- CMD_TEMPL_AP_ARP_RSP,
+ CMD_TEMPL_ARP_RSP,
CMD_TEMPL_DEAUTH_AP,
+ CMD_TEMPL_TEMPORARY,
+ CMD_TEMPL_LINK_MEASUREMENT_REPORT,
CMD_TEMPL_MAX = 0xff
};
/* unit ms */
#define WL1271_COMMAND_TIMEOUT 2000
-#define WL1271_CMD_TEMPL_MAX_SIZE 252
+#define WL1271_CMD_TEMPL_DFLT_SIZE 252
+#define WL1271_CMD_TEMPL_MAX_SIZE 548
#define WL1271_EVENT_TIMEOUT 750
struct wl1271_cmd_header {
@@ -193,6 +211,8 @@ enum {
CMD_STATUS_WRONG_NESTING = 19,
CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/
CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/
+ CMD_STATUS_TEMPLATE_OOM = 23,
+ CMD_STATUS_NO_RX_BA_SESSION = 24,
MAX_COMMAND_STATUS = 0xff
};
@@ -210,38 +230,114 @@ enum {
#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
-struct wl1271_cmd_join {
+struct wl12xx_cmd_role_enable {
struct wl1271_cmd_header header;
- __le32 bssid_lsb;
- __le16 bssid_msb;
- __le16 beacon_interval; /* in TBTTs */
- __le32 rx_config_options;
- __le32 rx_filter_options;
+ u8 role_id;
+ u8 role_type;
+ u8 mac_address[ETH_ALEN];
+} __packed;
- /*
- * The target uses this field to determine the rate at
- * which to transmit control frame responses (such as
- * ACK or CTS frames).
- */
- __le32 basic_rate_set;
- __le32 supported_rate_set;
- u8 dtim_interval;
- /*
- * bits 0-2: This bitwise field specifies the type
- * of BSS to start or join (BSS_TYPE_*).
- * bit 4: Band - The radio band in which to join
- * or start.
- * 0 - 2.4GHz band
- * 1 - 5GHz band
- * bits 3, 5-7: Reserved
- */
- u8 bss_type;
+struct wl12xx_cmd_role_disable {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 padding[3];
+} __packed;
+
+enum wl12xx_band {
+ WL12XX_BAND_2_4GHZ = 0,
+ WL12XX_BAND_5GHZ = 1,
+ WL12XX_BAND_JAPAN_4_9_GHZ = 2,
+ WL12XX_BAND_DEFAULT = WL12XX_BAND_2_4GHZ,
+ WL12XX_BAND_INVALID = 0x7E,
+ WL12XX_BAND_MAX_RADIO = 0x7F,
+};
+
+struct wl12xx_cmd_role_start {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 band;
u8 channel;
- u8 ssid_len;
- u8 ssid[IW_ESSID_MAX_SIZE];
- u8 ctrl; /* JOIN_CMD_CTRL_* */
- u8 reserved[3];
+ u8 padding;
+
+ union {
+ struct {
+ u8 hlid;
+ u8 session;
+ u8 padding_1[54];
+ } __packed device;
+ /* sta & p2p_cli use the same struct */
+ struct {
+ u8 bssid[ETH_ALEN];
+ u8 hlid; /* data hlid */
+ u8 session;
+ __le32 remote_rates; /* remote supported rates */
+
+ /*
+ * The target uses this field to determine the rate at
+ * which to transmit control frame responses (such as
+ * ACK or CTS frames).
+ */
+ __le32 basic_rate_set;
+ __le32 local_rates; /* local supported rates */
+
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+
+ __le16 beacon_interval; /* in TBTTs */
+ } __packed sta;
+ struct {
+ u8 bssid[ETH_ALEN];
+ u8 hlid; /* data hlid */
+ u8 dtim_interval;
+ __le32 remote_rates; /* remote supported rates */
+
+ __le32 basic_rate_set;
+ __le32 local_rates; /* local supported rates */
+
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+
+ __le16 beacon_interval; /* in TBTTs */
+
+ u8 padding_1[4];
+ } __packed ibss;
+ /* ap & p2p_go use the same struct */
+ struct {
+ __le16 aging_period; /* in secs */
+ u8 beacon_expiry; /* in ms */
+ u8 bss_index;
+ /* The host link id for the AP's global queue */
+ u8 global_hlid;
+ /* The host link id for the AP's broadcast queue */
+ u8 broadcast_hlid;
+
+ __le16 beacon_interval; /* in TBTTs */
+
+ __le32 basic_rate_set;
+ __le32 local_rates; /* local supported rates */
+
+ u8 dtim_interval;
+
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+
+ u8 padding_1[5];
+ } __packed ap;
+ };
+} __packed;
+
+struct wl12xx_cmd_role_stop {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 disc_type; /* only STA and P2P_CLI */
+ __le16 reason; /* only STA and P2P_CLI */
} __packed;
struct cmd_enabledisable_path {
@@ -287,8 +383,9 @@ enum wl1271_cmd_ps_mode {
struct wl1271_cmd_ps_params {
struct wl1271_cmd_header header;
+ u8 role_id;
u8 ps_mode; /* STATION_* */
- u8 padding[3];
+ u8 padding[2];
} __packed;
/* HW encryption keys */
@@ -301,6 +398,12 @@ enum wl1271_cmd_key_action {
MAX_KEY_ACTION = 0xffff,
};
+enum wl1271_cmd_lid_key_type {
+ UNICAST_LID_TYPE = 0,
+ BROADCAST_LID_TYPE = 1,
+ WEP_DEFAULT_LID_TYPE = 2
+};
+
enum wl1271_cmd_key_type {
KEY_NONE = 0,
KEY_WEP = 1,
@@ -309,44 +412,7 @@ enum wl1271_cmd_key_type {
KEY_GEM = 4,
};
-/* FIXME: Add description for key-types */
-
-struct wl1271_cmd_set_sta_keys {
- struct wl1271_cmd_header header;
-
- /* Ignored for default WEP key */
- u8 addr[ETH_ALEN];
-
- /* key_action_e */
- __le16 key_action;
-
- __le16 reserved_1;
-
- /* key size in bytes */
- u8 key_size;
-
- /* key_type_e */
- u8 key_type;
- u8 ssid_profile;
-
- /*
- * TKIP, AES: frame's key id field.
- * For WEP default key: key id;
- */
- u8 id;
- u8 reserved_2[6];
- u8 key[MAX_KEY_SIZE];
- __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
- __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
-} __packed;
-
-enum wl1271_cmd_lid_key_type {
- UNICAST_LID_TYPE = 0,
- BROADCAST_LID_TYPE = 1,
- WEP_DEFAULT_LID_TYPE = 2
-};
-
-struct wl1271_cmd_set_ap_keys {
+struct wl1271_cmd_set_keys {
struct wl1271_cmd_header header;
/*
@@ -496,69 +562,46 @@ enum wl1271_disconnect_type {
DISCONNECT_DISASSOC
};
-struct wl1271_cmd_disconnect {
- struct wl1271_cmd_header header;
-
- __le32 rx_config_options;
- __le32 rx_filter_options;
-
- __le16 reason;
- u8 type;
-
- u8 padding;
-} __packed;
-
#define WL1271_CMD_STA_STATE_CONNECTED 1
-struct wl1271_cmd_set_sta_state {
+struct wl12xx_cmd_set_peer_state {
struct wl1271_cmd_header header;
+ u8 hlid;
u8 state;
- u8 padding[3];
+ u8 padding[2];
} __packed;
-enum wl1271_ssid_type {
- SSID_TYPE_PUBLIC = 0,
- SSID_TYPE_HIDDEN = 1
+struct wl12xx_cmd_roc {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 channel;
+ u8 band;
+ u8 padding;
};
-struct wl1271_cmd_bss_start {
+struct wl12xx_cmd_croc {
struct wl1271_cmd_header header;
- /* wl1271_ssid_type */
- u8 ssid_type;
- u8 ssid_len;
- u8 ssid[IW_ESSID_MAX_SIZE];
- u8 padding_1[2];
+ u8 role_id;
+ u8 padding[3];
+};
- /* Basic rate set */
- __le32 basic_rate_set;
- /* Aging period in seconds*/
- __le16 aging_period;
+enum wl12xx_ssid_type {
+ WL12XX_SSID_TYPE_PUBLIC = 0,
+ WL12XX_SSID_TYPE_HIDDEN = 1,
+ WL12XX_SSID_TYPE_ANY = 2,
+};
- /*
- * This field specifies the time between target beacon
- * transmission times (TBTTs), in time units (TUs).
- * Valid values are 1 to 1024.
- */
- __le16 beacon_interval;
- u8 bssid[ETH_ALEN];
- u8 bss_index;
- /* Radio band */
- u8 band;
- u8 channel;
- /* The host link id for the AP's global queue */
- u8 global_hlid;
- /* The host link id for the AP's broadcast queue */
- u8 broadcast_hlid;
- /* DTIM count */
- u8 dtim_interval;
- /* Beacon expiry time in ms */
- u8 beacon_expiry;
- u8 padding_2[3];
-} __packed;
+enum wl1271_psd_type {
+ WL1271_PSD_LEGACY = 0,
+ WL1271_PSD_UPSD_TRIGGER = 1,
+ WL1271_PSD_LEGACY_PSPOLL = 2,
+ WL1271_PSD_SAPSD = 3
+};
-struct wl1271_cmd_add_sta {
+struct wl12xx_cmd_add_peer {
struct wl1271_cmd_header header;
u8 addr[ETH_ALEN];
@@ -572,7 +615,7 @@ struct wl1271_cmd_add_sta {
u8 padding1;
} __packed;
-struct wl1271_cmd_remove_sta {
+struct wl12xx_cmd_remove_peer {
struct wl1271_cmd_header header;
u8 hlid;
@@ -637,4 +680,21 @@ struct wl12xx_cmd_stop_fwlog {
struct wl1271_cmd_header header;
} __packed;
+struct wl12xx_cmd_channel_switch {
+ struct wl1271_cmd_header header;
+
+ /* The new serving channel */
+ u8 channel;
+ /* Relative time of the serving channel switch in TBTT units */
+ u8 switch_time;
+ /* 1: Suspend TX till switch time; 0: Do not suspend TX */
+ u8 tx_suspend;
+ /* 1: Flush TX at switch time; 0: Do not flush */
+ u8 flush;
+} __packed;
+
+struct wl12xx_cmd_stop_channel_switch {
+ struct wl1271_cmd_header header;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 6080e01d92c..04bb8fbf93f 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -99,40 +99,75 @@ enum {
enum {
/*
- * PER threshold in PPM of the BT voice
+ * Configure the min and max time BT gains the antenna
+ * in WLAN / BT master basic rate
*
- * Range: 0 - 10000000
+ * Range: 0 - 255 (ms)
*/
- CONF_SG_BT_PER_THRESHOLD = 0,
+ CONF_SG_ACL_BT_MASTER_MIN_BR = 0,
+ CONF_SG_ACL_BT_MASTER_MAX_BR,
/*
- * Number of consequent RX_ACTIVE activities to override BT voice
- * frames to ensure WLAN connection
+ * Configure the min and max time BT gains the antenna
+ * in WLAN / BT slave basic rate
*
- * Range: 0 - 100
+ * Range: 0 - 255 (ms)
*/
- CONF_SG_HV3_MAX_OVERRIDE,
+ CONF_SG_ACL_BT_SLAVE_MIN_BR,
+ CONF_SG_ACL_BT_SLAVE_MAX_BR,
/*
- * Defines the PER threshold of the BT voice
+ * Configure the min and max time BT gains the antenna
+ * in WLAN / BT master EDR
*
- * Range: 0 - 65000
+ * Range: 0 - 255 (ms)
*/
- CONF_SG_BT_NFS_SAMPLE_INTERVAL,
+ CONF_SG_ACL_BT_MASTER_MIN_EDR,
+ CONF_SG_ACL_BT_MASTER_MAX_EDR,
/*
- * Defines the load ratio of BT
+ * Configure the min and max time BT gains the antenna
+ * in WLAN / BT slave EDR
*
- * Range: 0 - 100 (%)
+ * Range: 0 - 255 (ms)
*/
- CONF_SG_BT_LOAD_RATIO,
+ CONF_SG_ACL_BT_SLAVE_MIN_EDR,
+ CONF_SG_ACL_BT_SLAVE_MAX_EDR,
/*
- * Defines whether the SG will force WLAN host to enter/exit PSM
+ * The maximum time WLAN can gain the antenna
+ * in WLAN PSM / BT master/slave BR
*
- * Range: 1 - SG can force, 0 - host handles PSM
+ * Range: 0 - 255 (ms)
*/
- CONF_SG_AUTO_PS_MODE,
+ CONF_SG_ACL_WLAN_PS_MASTER_BR,
+ CONF_SG_ACL_WLAN_PS_SLAVE_BR,
+
+ /*
+ * The maximum time WLAN can gain the antenna
+ * in WLAN PSM / BT master/slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_ACL_WLAN_PS_MASTER_EDR,
+ CONF_SG_ACL_WLAN_PS_SLAVE_EDR,
+
+ /* TODO: explain these values */
+ CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR,
+ CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR,
+ CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR,
+ CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR,
+ CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR,
+ CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR,
+ CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR,
+ CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR,
+
+ CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR,
+ CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR,
+ CONF_SG_ACL_PASSIVE_SCAN_BT_BR,
+ CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR,
+ CONF_SG_ACL_PASSIVE_SCAN_BT_EDR,
+ CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR,
/*
* Compensation percentage of probe requests when scan initiated
@@ -151,102 +186,70 @@ enum {
CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
/*
- * Defines antenna configuration (single/dual antenna)
- *
- * Range: 0 - single antenna, 1 - dual antenna
- */
- CONF_SG_ANTENNA_CONFIGURATION,
-
- /*
- * The threshold (percent) of max consequtive beacon misses before
- * increasing priority of beacon reception.
- *
- * Range: 0 - 100 (%)
- */
- CONF_SG_BEACON_MISS_PERCENT,
-
- /*
- * The rate threshold below which receiving a data frame from the AP
- * will increase the priority of the data frame above BT traffic.
- *
- * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
- */
- CONF_SG_RATE_ADAPT_THRESH,
-
- /*
- * Not used currently.
+ * Compensation percentage of WLAN active scan window if initiated
+ * during BT A2DP
*
- * Range: 0
+ * Range: 0 - 1000 (%)
*/
- CONF_SG_RATE_ADAPT_SNR,
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
/*
- * Configure the min and max time BT gains the antenna
- * in WLAN PSM / BT master basic rate
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT A2DP BR
*
- * Range: 0 - 255 (ms)
+ * Range: 0 - 1000 (%)
*/
- CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
- CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR,
/*
- * The time after it expires no new WLAN trigger frame is trasmitted
- * in WLAN PSM / BT master basic rate
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT A2DP EDR
*
- * Range: 0 - 255 (ms)
+ * Range: 0 - 1000 (%)
*/
- CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR,
/*
- * Configure the min and max time BT gains the antenna
- * in WLAN PSM / BT slave basic rate
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT voice
*
- * Range: 0 - 255 (ms)
+ * Range: 0 - 1000 (%)
*/
- CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
- CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
- /*
- * The time after it expires no new WLAN trigger frame is trasmitted
- * in WLAN PSM / BT slave basic rate
- *
- * Range: 0 - 255 (ms)
- */
- CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
+ /* TODO: explain these values */
+ CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN,
+ CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN,
+ CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN,
/*
- * Configure the min and max time BT gains the antenna
- * in WLAN PSM / BT master EDR
+ * Defines whether the SG will force WLAN host to enter/exit PSM
*
- * Range: 0 - 255 (ms)
+ * Range: 1 - SG can force, 0 - host handles PSM
*/
- CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
- CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
+ CONF_SG_STA_FORCE_PS_IN_BT_SCO,
/*
- * The time after it expires no new WLAN trigger frame is trasmitted
- * in WLAN PSM / BT master EDR
+ * Defines antenna configuration (single/dual antenna)
*
- * Range: 0 - 255 (ms)
+ * Range: 0 - single antenna, 1 - dual antenna
*/
- CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
+ CONF_SG_ANTENNA_CONFIGURATION,
/*
- * Configure the min and max time BT gains the antenna
- * in WLAN PSM / BT slave EDR
+ * The threshold (percent) of max consecutive beacon misses before
+ * increasing priority of beacon reception.
*
- * Range: 0 - 255 (ms)
+ * Range: 0 - 100 (%)
*/
- CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
- CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
+ CONF_SG_BEACON_MISS_PERCENT,
/*
- * The time after it expires no new WLAN trigger frame is trasmitted
- * in WLAN PSM / BT slave EDR
+ * Protection time of the DHCP procedure.
*
- * Range: 0 - 255 (ms)
+ * Range: 0 - 100000 (ms)
*/
- CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
+ CONF_SG_DHCP_TIME,
/*
* RX guard time before the beginning of a new BT voice frame during
@@ -273,166 +276,59 @@ enum {
*/
CONF_SG_ADAPTIVE_RXT_TXT,
- /*
- * The used WLAN legacy service period during active BT ACL link
- *
- * Range: 0 - 255 (ms)
- */
- CONF_SG_PS_POLL_TIMEOUT,
+ /* TODO: explain this value */
+ CONF_SG_GENERAL_USAGE_BIT_MAP,
/*
- * The used WLAN UPSD service period during active BT ACL link
+ * Number of consecutive BT voice frames not interrupted by WLAN
*
- * Range: 0 - 255 (ms)
- */
- CONF_SG_UPSD_TIMEOUT,
-
- /*
- * Configure the min and max time BT gains the antenna
- * in WLAN Active / BT master EDR
- *
- * Range: 0 - 255 (ms)
- */
- CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
- CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
-
- /*
- * The maximum time WLAN can gain the antenna for
- * in WLAN Active / BT master EDR
- *
- * Range: 0 - 255 (ms)
- */
- CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
-
- /*
- * Configure the min and max time BT gains the antenna
- * in WLAN Active / BT slave EDR
- *
- * Range: 0 - 255 (ms)
- */
- CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
- CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
-
- /*
- * The maximum time WLAN can gain the antenna for
- * in WLAN Active / BT slave EDR
- *
- * Range: 0 - 255 (ms)
+ * Range: 0 - 100
*/
- CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
+ CONF_SG_HV3_MAX_SERVED,
/*
- * Configure the min and max time BT gains the antenna
- * in WLAN Active / BT basic rate
+ * The used WLAN legacy service period during active BT ACL link
*
* Range: 0 - 255 (ms)
*/
- CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
- CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
+ CONF_SG_PS_POLL_TIMEOUT,
/*
- * The maximum time WLAN can gain the antenna for
- * in WLAN Active / BT basic rate
+ * The used WLAN UPSD service period during active BT ACL link
*
* Range: 0 - 255 (ms)
*/
- CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
-
- /*
- * Compensation percentage of WLAN passive scan window if initiated
- * during BT voice
- *
- * Range: 0 - 1000 (%)
- */
- CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
-
- /*
- * Compensation percentage of WLAN passive scan window if initiated
- * during BT A2DP
- *
- * Range: 0 - 1000 (%)
- */
- CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
+ CONF_SG_UPSD_TIMEOUT,
- /*
- * Fixed time ensured for BT traffic to gain the antenna during WLAN
- * passive scan.
- *
- * Range: 0 - 1000 ms
- */
- CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
+ CONF_SG_CONSECUTIVE_CTS_THRESHOLD,
+ CONF_SG_STA_RX_WINDOW_AFTER_DTIM,
+ CONF_SG_STA_CONNECTION_PROTECTION_TIME,
- /*
- * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
- * passive scan.
- *
- * Range: 0 - 1000 ms
- */
- CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
+ /* AP params */
+ CONF_AP_BEACON_MISS_TX,
+ CONF_AP_RX_WINDOW_AFTER_BEACON,
+ CONF_AP_BEACON_WINDOW_INTERVAL,
+ CONF_AP_CONNECTION_PROTECTION_TIME,
+ CONF_AP_BT_ACL_VAL_BT_SERVE_TIME,
+ CONF_AP_BT_ACL_VAL_WL_SERVE_TIME,
- /*
- * Number of consequent BT voice frames not interrupted by WLAN
- *
- * Range: 0 - 100
- */
- CONF_SG_HV3_MAX_SERVED,
-
- /*
- * Protection time of the DHCP procedure.
- *
- * Range: 0 - 100000 (ms)
- */
- CONF_SG_DHCP_TIME,
-
- /*
- * Compensation percentage of WLAN active scan window if initiated
- * during BT A2DP
- *
- * Range: 0 - 1000 (%)
- */
- CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
CONF_SG_TEMP_PARAM_1,
CONF_SG_TEMP_PARAM_2,
CONF_SG_TEMP_PARAM_3,
CONF_SG_TEMP_PARAM_4,
CONF_SG_TEMP_PARAM_5,
-
- /*
- * AP beacon miss
- *
- * Range: 0 - 255
- */
- CONF_SG_AP_BEACON_MISS_TX,
-
- /*
- * AP RX window length
- *
- * Range: 0 - 50
- */
- CONF_SG_RX_WINDOW_LENGTH,
-
- /*
- * AP connection protection time
- *
- * Range: 0 - 5000
- */
- CONF_SG_AP_CONNECTION_PROTECTION_TIME,
-
CONF_SG_TEMP_PARAM_6,
CONF_SG_TEMP_PARAM_7,
CONF_SG_TEMP_PARAM_8,
CONF_SG_TEMP_PARAM_9,
CONF_SG_TEMP_PARAM_10,
- CONF_SG_STA_PARAMS_MAX = CONF_SG_TEMP_PARAM_5 + 1,
- CONF_SG_AP_PARAMS_MAX = CONF_SG_TEMP_PARAM_10 + 1,
-
+ CONF_SG_PARAMS_MAX,
CONF_SG_PARAMS_ALL = 0xff
};
struct conf_sg_settings {
- u32 sta_params[CONF_SG_STA_PARAMS_MAX];
- u32 ap_params[CONF_SG_AP_PARAMS_MAX];
+ u32 params[CONF_SG_PARAMS_MAX];
u8 state;
};
@@ -520,13 +416,17 @@ struct conf_rx_settings {
u8 queue_type;
};
-#define CONF_TX_MAX_RATE_CLASSES 8
+#define CONF_TX_MAX_RATE_CLASSES 10
#define CONF_TX_RATE_MASK_UNSPECIFIED 0
#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
+/* basic rates for p2p operations (probe req/resp, etc.) */
+#define CONF_TX_RATE_MASK_BASIC_P2P (CONF_HW_BIT_RATE_6MBPS | \
+ CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS)
+
/*
* Rates supported for data packets when operating as AP. Note the absence
* of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
@@ -545,6 +445,11 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
CONF_HW_BIT_RATE_54MBPS)
+#define CONF_TX_MCS_RATES (CONF_HW_BIT_RATE_MCS_0 | \
+ CONF_HW_BIT_RATE_MCS_1 | CONF_HW_BIT_RATE_MCS_2 | \
+ CONF_HW_BIT_RATE_MCS_3 | CONF_HW_BIT_RATE_MCS_4 | \
+ CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
+ CONF_HW_BIT_RATE_MCS_7)
/*
* Default rates for management traffic when operating in AP mode. This
@@ -553,12 +458,10 @@ struct conf_rx_settings {
#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
-/*
- * Default rates for working as IBSS. use 11b rates
- */
+/* default rates for working as IBSS (11b and OFDM) */
#define CONF_TX_IBSS_DEFAULT_RATES (CONF_HW_BIT_RATE_1MBPS | \
CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
- CONF_HW_BIT_RATE_11MBPS);
+ CONF_HW_BIT_RATE_11MBPS | CONF_TX_OFDM_RATES);
struct conf_tx_rate_class {
@@ -661,6 +564,9 @@ struct conf_tx_ac_category {
#define CONF_TX_MAX_TID_COUNT 8
+/* Allow TX BA on all TIDs but 6,7. These are currently reserved in the FW */
+#define CONF_TX_BA_ENABLED_TID_BITMAP 0x3F
+
enum {
CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/
@@ -913,7 +819,7 @@ struct conf_conn_settings {
struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
/*
- * The number of consequtive beacons to lose, before the firmware
+ * The number of consecutive beacons to lose, before the firmware
* becomes out of synch.
*
* Range: u32
@@ -951,7 +857,7 @@ struct conf_conn_settings {
u8 rx_broadcast_in_ps;
/*
- * Consequtive PS Poll failures before sending event to driver
+ * Consecutive PS Poll failures before sending event to driver
*
* Range: u8
*/
@@ -1012,14 +918,6 @@ struct conf_conn_settings {
u8 psm_entry_nullfunc_retries;
/*
- * Specifies the time to linger in active mode after successfully
- * transmitting the PSM entry null-func frame.
- *
- * Range 0 - 255 TU's
- */
- u8 psm_entry_hangover_period;
-
- /*
*
* Specifies the interval of the connection keep-alive null-func
* frame in ms.
@@ -1199,8 +1097,12 @@ struct conf_rf_settings {
};
struct conf_ht_setting {
- u16 tx_ba_win_size;
+ u8 rx_ba_win_size;
+ u8 tx_ba_win_size;
u16 inactivity_timeout;
+
+ /* bitmap of enabled TIDs for TX BA sessions */
+ u8 tx_ba_tid_bitmap;
};
struct conf_memory_settings {
@@ -1309,6 +1211,39 @@ struct conf_fwlog {
u8 threshold;
};
+#define ACX_RATE_MGMT_NUM_OF_RATES 13
+struct conf_rate_policy_settings {
+ u16 rate_retry_score;
+ u16 per_add;
+ u16 per_th1;
+ u16 per_th2;
+ u16 max_per;
+ u8 inverse_curiosity_factor;
+ u8 tx_fail_low_th;
+ u8 tx_fail_high_th;
+ u8 per_alpha_shift;
+ u8 per_add_shift;
+ u8 per_beta1_shift;
+ u8 per_beta2_shift;
+ u8 rate_check_up;
+ u8 rate_check_down;
+ u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
+};
+
+struct conf_hangover_settings {
+ u32 recover_time;
+ u8 hangover_period;
+ u8 dynamic_mode;
+ u8 early_termination_mode;
+ u8 max_period;
+ u8 min_period;
+ u8 increase_delta;
+ u8 decrease_delta;
+ u8 quiet_time;
+ u8 increase_time;
+ u8 window_size;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -1326,6 +1261,8 @@ struct conf_drv_settings {
struct conf_fm_coex fm_coex;
struct conf_rx_streaming_settings rx_streaming;
struct conf_fwlog fwlog;
+ struct conf_rate_policy_settings rate;
+ struct conf_hangover_settings hangover;
u8 hci_io_ds;
};
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 37934b5601c..3999fd52830 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -265,18 +265,10 @@ static ssize_t gpio_power_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
- char buf[10];
- size_t len;
unsigned long value;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len)) {
- return -EFAULT;
- }
- buf[len] = '\0';
-
- ret = kstrtoul(buf, 0, &value);
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in gpio_power");
return -EINVAL;
@@ -339,10 +331,11 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
DRIVER_STATE_PRINT_INT(tx_blocks_available);
- DRIVER_STATE_PRINT_INT(tx_allocated_blocks[0]);
- DRIVER_STATE_PRINT_INT(tx_allocated_blocks[1]);
- DRIVER_STATE_PRINT_INT(tx_allocated_blocks[2]);
- DRIVER_STATE_PRINT_INT(tx_allocated_blocks[3]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
+ DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_pkts[1]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_pkts[2]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_pkts[3]);
DRIVER_STATE_PRINT_INT(tx_frames_cnt);
DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
DRIVER_STATE_PRINT_INT(tx_queue_count[0]);
@@ -352,10 +345,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_packets_count);
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
- DRIVER_STATE_PRINT_INT(tx_blocks_freed[0]);
- DRIVER_STATE_PRINT_INT(tx_blocks_freed[1]);
- DRIVER_STATE_PRINT_INT(tx_blocks_freed[2]);
- DRIVER_STATE_PRINT_INT(tx_blocks_freed[3]);
+ DRIVER_STATE_PRINT_INT(tx_blocks_freed);
DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
DRIVER_STATE_PRINT_INT(session_counter);
@@ -369,9 +359,6 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(beacon_int);
DRIVER_STATE_PRINT_INT(psm_entry_retry);
DRIVER_STATE_PRINT_INT(ps_poll_failures);
- DRIVER_STATE_PRINT_HEX(filters);
- DRIVER_STATE_PRINT_HEX(rx_config);
- DRIVER_STATE_PRINT_HEX(rx_filter);
DRIVER_STATE_PRINT_INT(power_level);
DRIVER_STATE_PRINT_INT(rssi_thold);
DRIVER_STATE_PRINT_INT(last_rssi_event);
@@ -432,17 +419,10 @@ static ssize_t dtim_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
- char buf[10];
- size_t len;
unsigned long value;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = kstrtoul(buf, 0, &value);
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value for dtim_interval");
return -EINVAL;
@@ -497,17 +477,10 @@ static ssize_t beacon_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
- char buf[10];
- size_t len;
unsigned long value;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = kstrtoul(buf, 0, &value);
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value for beacon_interval");
return -EINVAL;
@@ -547,17 +520,10 @@ static ssize_t rx_streaming_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
- char buf[10];
- size_t len;
unsigned long value;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = kstrtoul(buf, 0, &value);
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in rx_streaming_interval!");
return -EINVAL;
@@ -606,17 +572,10 @@ static ssize_t rx_streaming_always_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
- char buf[10];
- size_t len;
unsigned long value;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = kstrtoul(buf, 0, &value);
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in rx_streaming_write!");
return -EINVAL;
@@ -660,6 +619,47 @@ static const struct file_operations rx_streaming_always_ops = {
.llseek = default_llseek,
};
+static ssize_t beacon_filtering_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value for beacon_filtering!");
+ return -EINVAL;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations beacon_filtering_ops = {
+ .write = beacon_filtering_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static int wl1271_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
@@ -772,6 +772,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(driver_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
+ DEBUGFS_ADD(beacon_filtering, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
if (!streaming || IS_ERR(streaming))
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 304aaa2ee01..674ad2a9e40 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -171,19 +171,26 @@ static void wl1271_event_rssi_trigger(struct wl1271 *wl,
wl->last_rssi_event = event;
}
-static void wl1271_stop_ba_event(struct wl1271 *wl, u8 ba_allowed)
+static void wl1271_stop_ba_event(struct wl1271 *wl)
{
- /* Convert the value to bool */
- wl->ba_allowed = !!ba_allowed;
-
- /*
- * Return in case:
- * there are not BA open or the event indication is to allowed BA
- */
- if ((!wl->ba_rx_bitmap) || (wl->ba_allowed))
- return;
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (!wl->ba_rx_bitmap)
+ return;
+ ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
+ wl->bssid);
+ } else {
+ int i;
+ struct wl1271_link *lnk;
+ for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
+ lnk = &wl->links[i];
+ if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+ continue;
- ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, wl->bssid);
+ ieee80211_stop_rx_ba_session(wl->vif,
+ lnk->ba_bitmap,
+ lnk->addr);
+ }
+ }
}
static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
@@ -283,15 +290,32 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_event_rssi_trigger(wl, mbox);
}
- if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) && !is_ap) {
+ if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x", mbox->ba_allowed);
+ "ba_allowed = 0x%x", mbox->rx_ba_allowed);
- if (wl->vif)
- wl1271_stop_ba_event(wl, mbox->ba_allowed);
+ wl->ba_allowed = !!mbox->rx_ba_allowed;
+
+ if (wl->vif && !wl->ba_allowed)
+ wl1271_stop_ba_event(wl);
+ }
+
+ if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+ wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
+ "status = 0x%x",
+ mbox->channel_switch_status);
+ /*
+ * That event uses for two cases:
+ * 1) channel switch complete with status=0
+ * 2) channel switch failed status=1
+ */
+ if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
+ (wl->vif))
+ ieee80211_chswitch_done(wl->vif,
+ mbox->channel_switch_status ? false : true);
}
- if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) {
+ if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
if (wl->vif)
wl1271_tx_dummy_packet(wl);
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index e524ad6fe4e..49c1a0ede5b 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -49,32 +49,27 @@ enum {
MEASUREMENT_START_EVENT_ID = BIT(8),
MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
SCAN_COMPLETE_EVENT_ID = BIT(10),
- SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(11),
+ WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
PS_REPORT_EVENT_ID = BIT(13),
PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
DISCONNECT_EVENT_COMPLETE_ID = BIT(15),
- JOIN_EVENT_COMPLETE_ID = BIT(16),
+ /* BIT(16) is reserved */
CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
MAX_TX_RETRY_EVENT_ID = BIT(20),
- /* STA: dummy paket for dynamic mem blocks */
- DUMMY_PACKET_EVENT_ID = BIT(21),
- /* AP: STA remove complete */
- STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
+ DUMMY_PACKET_EVENT_ID = BIT(21),
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
- /* STA: SG prediction */
- SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
- /* AP: Inactive STA */
- INACTIVE_STA_EVENT_ID = BIT(23),
+ CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
- DBG_EVENT_ID = BIT(26),
- HEALTH_CHECK_REPLY_EVENT_ID = BIT(27),
+ INACTIVE_STA_EVENT_ID = BIT(26),
+ PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
};
@@ -83,15 +78,6 @@ enum {
EVENT_ENTER_POWER_SAVE_SUCCESS,
};
-struct event_debug_report {
- u8 debug_event_id;
- u8 num_params;
- __le16 pad;
- __le32 report_1;
- __le32 report_2;
- __le32 report_3;
-} __packed;
-
#define NUM_OF_RSSI_SNR_TRIGGERS 8
struct event_mailbox {
@@ -100,49 +86,45 @@ struct event_mailbox {
__le32 reserved_1;
__le32 reserved_2;
- u8 dbg_event_id;
- u8 num_relevant_params;
- __le16 reserved_3;
- __le32 event_report_p1;
- __le32 event_report_p2;
- __le32 event_report_p3;
-
u8 number_of_scan_results;
u8 scan_tag;
- u8 reserved_4[2];
- __le32 compl_scheduled_scan_status;
+ u8 completed_scan_status;
+ u8 reserved_3;
- __le16 scheduled_scan_attended_channels;
u8 soft_gemini_sense_info;
u8 soft_gemini_protective_info;
s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
u8 channel_switch_status;
u8 scheduled_scan_status;
u8 ps_status;
+ /* tuned channel (roc) */
+ u8 roc_channel;
- /* AP FW only */
- u8 hlid_removed;
+ __le16 hlid_removed_bitmap;
- /* a bitmap of hlids for stations that have been inactive too long */
+ /* bitmap of aged stations (by HLID) */
__le16 sta_aging_status;
- /* a bitmap of hlids for stations which didn't respond to TX */
+ /* bitmap of stations (by HLID) which exceeded max tx retries */
__le16 sta_tx_retry_exceeded;
- /*
- * Bitmap, Each bit set represents the Role ID for which this constraint
- * is set. Range: 0 - FF, FF means ANY role
- */
- u8 ba_role_id;
- /*
- * Bitmap, Each bit set represents the Link ID for which this constraint
- * is set. Not applicable if ba_role_id is set to ANY role (FF).
- * Range: 0 - FFFF, FFFF means ANY link in that role
- */
- u8 ba_link_id;
- u8 ba_allowed;
-
- u8 reserved_5[21];
+ /* discovery completed results */
+ u8 discovery_tag;
+ u8 number_of_preq_results;
+ u8 number_of_prsp_results;
+ u8 reserved_5;
+
+ /* rx ba constraint */
+ u8 role_id; /* 0xFF means any role. */
+ u8 rx_ba_allowed;
+ u8 reserved_6[2];
+
+ u8 ps_poll_delivery_failure_role_ids;
+ u8 stopped_role_ids;
+ u8 started_role_ids;
+ u8 change_auto_mode_timeout;
+
+ u8 reserved_7[12];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index c3e9a2e4410..04db64c94e9 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -39,13 +39,13 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
+ WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
+ NULL, WL1271_CMD_TEMPL_DFLT_SIZE, 0,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -70,15 +70,13 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
- sizeof
- (struct wl12xx_probe_resp_template),
+ WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
- sizeof
- (struct wl12xx_beacon_template),
+ WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -92,7 +90,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE, i,
+ WL1271_CMD_TEMPL_DFLT_SIZE, i,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -105,6 +103,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
{
struct wl12xx_disconn_template *tmpl;
int ret;
+ u32 rate;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl) {
@@ -115,9 +114,9 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH);
+ rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
- tmpl, sizeof(*tmpl), 0,
- wl1271_tx_min_rate_get(wl));
+ tmpl, sizeof(*tmpl), 0, rate);
out:
kfree(tmpl);
@@ -128,6 +127,7 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
{
struct ieee80211_hdr_3addr *nullfunc;
int ret;
+ u32 rate;
nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
if (!nullfunc) {
@@ -144,9 +144,9 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+ rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
- sizeof(*nullfunc), 0,
- wl1271_tx_min_rate_get(wl));
+ sizeof(*nullfunc), 0, rate);
out:
kfree(nullfunc);
@@ -157,6 +157,7 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
{
struct ieee80211_qos_hdr *qosnull;
int ret;
+ u32 rate;
qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
if (!qosnull) {
@@ -173,9 +174,9 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+ rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
- sizeof(*qosnull), 0,
- wl1271_tx_min_rate_get(wl));
+ sizeof(*qosnull), 0, rate);
out:
kfree(qosnull);
@@ -191,15 +192,13 @@ static int wl1271_ap_init_templates_config(struct wl1271 *wl)
* reserve memory for later.
*/
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
- sizeof
- (struct wl12xx_probe_resp_template),
+ WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
- sizeof
- (struct wl12xx_beacon_template),
+ WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -227,7 +226,7 @@ static int wl1271_ap_init_templates_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
+static int wl12xx_init_rx_config(struct wl1271 *wl)
{
int ret;
@@ -235,10 +234,6 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
if (ret < 0)
return ret;
- ret = wl1271_acx_rx_config(wl, config, filter);
- if (ret < 0)
- return ret;
-
return 0;
}
@@ -285,10 +280,7 @@ int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- ret = wl1271_acx_ap_sg_cfg(wl);
- else
- ret = wl1271_acx_sta_sg_cfg(wl);
+ ret = wl12xx_acx_sg_cfg(wl);
if (ret < 0)
return ret;
@@ -392,7 +384,7 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_sta_mem_cfg(wl);
+ ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
return ret;
@@ -408,12 +400,6 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
{
int ret, i;
- ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
- if (ret < 0) {
- wl1271_warning("couldn't set default key");
- return ret;
- }
-
/* disable all keep-alive templates */
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_acx_keep_alive_config(wl, i,
@@ -451,7 +437,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_ap_mem_cfg(wl);
+ ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
return ret;
@@ -483,7 +469,7 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
* when operating as AP we want to receive external beacons for
* configuring ERP protection.
*/
- ret = wl1271_acx_set_ap_beacon_filter(wl, false);
+ ret = wl1271_acx_beacon_filter_opt(wl, false);
if (ret < 0)
return ret;
@@ -515,7 +501,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
return ret;
/* use the min basic rate for AP broadcast/multicast */
- rc.enabled_rates = wl1271_tx_min_rate_get(wl);
+ rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
@@ -532,6 +518,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
else
supported_rates = CONF_TX_AP_ENABLED_RATES;
+ /* unconditionally enable HT rates */
+ supported_rates |= CONF_TX_MCS_RATES;
+
/* configure unicast TX rate classes */
for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
rc.enabled_rates = supported_rates;
@@ -546,41 +535,24 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
return 0;
}
-static void wl1271_check_ba_support(struct wl1271 *wl)
-{
- /* validate FW cose ver x.x.x.50-60.x */
- if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
- (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
- wl->ba_support = true;
- return;
- }
-
- wl->ba_support = false;
-}
-
static int wl1271_set_ba_policies(struct wl1271 *wl)
{
- u8 tid_index;
- int ret = 0;
-
/* Reset the BA RX indicators */
wl->ba_rx_bitmap = 0;
wl->ba_allowed = true;
+ wl->ba_rx_session_count = 0;
- /* validate that FW support BA */
- wl1271_check_ba_support(wl);
+ /* BA is supported in STA/AP modes */
+ if (wl->bss_type != BSS_TYPE_AP_BSS &&
+ wl->bss_type != BSS_TYPE_STA_BSS) {
+ wl->ba_support = false;
+ return 0;
+ }
- if (wl->ba_support)
- /* 802.11n initiator BA session setting */
- for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
- ++tid_index) {
- ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
- tid_index, true);
- if (ret < 0)
- break;
- }
+ wl->ba_support = true;
- return ret;
+ /* 802.11n initiator BA session setting */
+ return wl12xx_acx_set_ba_initiator_policy(wl);
}
int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -650,11 +622,7 @@ int wl1271_hw_init(struct wl1271 *wl)
return ret;
/* RX config */
- ret = wl1271_init_rx_config(wl,
- RX_CFG_PROMISCUOUS | RX_CFG_TSF,
- RX_FILTER_OPTION_DEF);
- /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
- RX_FILTER_OPTION_FILTER_ALL); */
+ ret = wl12xx_init_rx_config(wl);
if (ret < 0)
goto out_free_memmap;
@@ -733,11 +701,20 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl12xx_acx_set_rate_mgmt_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Configure initiator BA sessions policies */
ret = wl1271_set_ba_policies(wl);
if (ret < 0)
goto out_free_memmap;
+ /* configure hangover */
+ ret = wl12xx_acx_config_hangover(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
return 0;
out_free_memmap:
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index a2fe4f506ad..e839341dfaf 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -186,6 +186,5 @@ int wl1271_free_hw(struct wl1271 *wl);
irqreturn_t wl1271_irq(int irq, void *data);
bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
-void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters);
#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index b70ae40ad66..884f82b6321 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -52,110 +52,67 @@
static struct conf_drv_settings default_conf = {
.sg = {
- .sta_params = {
- [CONF_SG_BT_PER_THRESHOLD] = 7500,
- [CONF_SG_HV3_MAX_OVERRIDE] = 0,
- [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
- [CONF_SG_BT_LOAD_RATIO] = 200,
- [CONF_SG_AUTO_PS_MODE] = 1,
- [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
- [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
- [CONF_SG_ANTENNA_CONFIGURATION] = 0,
- [CONF_SG_BEACON_MISS_PERCENT] = 60,
- [CONF_SG_RATE_ADAPT_THRESH] = 12,
- [CONF_SG_RATE_ADAPT_SNR] = 0,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
- [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
- /* Note: with UPSD, this should be 4 */
- [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
- [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
- /* Note: with UPDS, this should be 15 */
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
- /* Note: with UPDS, this should be 50 */
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
- /* Note: with UPDS, this should be 10 */
- [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
- [CONF_SG_RXT] = 1200,
- [CONF_SG_TXT] = 1000,
- [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
- [CONF_SG_PS_POLL_TIMEOUT] = 10,
- [CONF_SG_UPSD_TIMEOUT] = 10,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
- [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
- [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
- [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
- [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
- [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
- [CONF_SG_HV3_MAX_SERVED] = 6,
- [CONF_SG_DHCP_TIME] = 5000,
- [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
- },
- .ap_params = {
- [CONF_SG_BT_PER_THRESHOLD] = 7500,
- [CONF_SG_HV3_MAX_OVERRIDE] = 0,
- [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
- [CONF_SG_BT_LOAD_RATIO] = 50,
- [CONF_SG_AUTO_PS_MODE] = 1,
- [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
- [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
- [CONF_SG_ANTENNA_CONFIGURATION] = 0,
- [CONF_SG_BEACON_MISS_PERCENT] = 60,
- [CONF_SG_RATE_ADAPT_THRESH] = 64,
- [CONF_SG_RATE_ADAPT_SNR] = 1,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 25,
- [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 25,
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 25,
- [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 25,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
- [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
- [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 25,
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
- [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 25,
- [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 25,
- [CONF_SG_RXT] = 1200,
- [CONF_SG_TXT] = 1000,
- [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
- [CONF_SG_PS_POLL_TIMEOUT] = 10,
- [CONF_SG_UPSD_TIMEOUT] = 10,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
- [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
- [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
- [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
- [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
- [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
- [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
- [CONF_SG_HV3_MAX_SERVED] = 6,
- [CONF_SG_DHCP_TIME] = 5000,
- [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
- [CONF_SG_TEMP_PARAM_1] = 0,
- [CONF_SG_TEMP_PARAM_2] = 0,
- [CONF_SG_TEMP_PARAM_3] = 0,
- [CONF_SG_TEMP_PARAM_4] = 0,
- [CONF_SG_TEMP_PARAM_5] = 0,
- [CONF_SG_AP_BEACON_MISS_TX] = 3,
- [CONF_SG_RX_WINDOW_LENGTH] = 6,
- [CONF_SG_AP_CONNECTION_PROTECTION_TIME] = 50,
- [CONF_SG_TEMP_PARAM_6] = 1,
+ .params = {
+ [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
+ [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
+ [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
+ [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
+ [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
+ [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
+ [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
+ [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
+ [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
+ [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
+ [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
+ [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
+ [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
+ [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
+ [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
+ [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
+ [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
+ [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
+ /* active scan params */
+ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+ /* passive scan params */
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+ /* passive scan in dual antenna params */
+ [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
+ [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
+ [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
+ /* general params */
+ [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
+ [CONF_SG_ANTENNA_CONFIGURATION] = 0,
+ [CONF_SG_BEACON_MISS_PERCENT] = 60,
+ [CONF_SG_DHCP_TIME] = 5000,
+ [CONF_SG_RXT] = 1200,
+ [CONF_SG_TXT] = 1000,
+ [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+ [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
+ [CONF_SG_HV3_MAX_SERVED] = 6,
+ [CONF_SG_PS_POLL_TIMEOUT] = 10,
+ [CONF_SG_UPSD_TIMEOUT] = 10,
+ [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
+ [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
+ [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
+ /* AP params */
+ [CONF_AP_BEACON_MISS_TX] = 3,
+ [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
+ [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
+ [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
+ [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
+ [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
},
.state = CONF_SG_PROTECTIVE,
},
@@ -279,10 +236,9 @@ static struct conf_drv_settings default_conf = {
.ps_poll_recovery_period = 700,
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 50,
- .psm_entry_retries = 5,
+ .psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
- .psm_entry_hangover_period = 1,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
},
@@ -310,8 +266,8 @@ static struct conf_drv_settings default_conf = {
},
.sched_scan = {
/* sched_scan requires dwell times in TU instead of TU/1000 */
- .min_dwell_time_active = 8,
- .max_dwell_time_active = 30,
+ .min_dwell_time_active = 30,
+ .max_dwell_time_active = 60,
.dwell_time_passive = 100,
.dwell_time_dfs = 150,
.num_probe_reqs = 2,
@@ -329,8 +285,10 @@ static struct conf_drv_settings default_conf = {
},
},
.ht = {
+ .rx_ba_win_size = 8,
.tx_ba_win_size = 64,
.inactivity_timeout = 10000,
+ .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
},
.mem_wl127x = {
.num_stations = 1,
@@ -379,9 +337,44 @@ static struct conf_drv_settings default_conf = {
.threshold = 0,
},
.hci_io_ds = HCI_IO_DS_6MA,
+ .rate = {
+ .rate_retry_score = 32000,
+ .per_add = 8192,
+ .per_th1 = 2048,
+ .per_th2 = 4096,
+ .max_per = 8100,
+ .inverse_curiosity_factor = 5,
+ .tx_fail_low_th = 4,
+ .tx_fail_high_th = 10,
+ .per_alpha_shift = 4,
+ .per_add_shift = 13,
+ .per_beta1_shift = 10,
+ .per_beta2_shift = 8,
+ .rate_check_up = 2,
+ .rate_check_down = 12,
+ .rate_retry_policy = {
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+ },
+ },
+ .hangover = {
+ .recover_time = 0,
+ .hangover_period = 20,
+ .dynamic_mode = 1,
+ .early_termination_mode = 1,
+ .max_period = 20,
+ .min_period = 1,
+ .increase_delta = 1,
+ .decrease_delta = 2,
+ .quiet_time = 4,
+ .increase_time = 1,
+ .window_size = 16,
+ },
};
static char *fwlog_param;
+static bool bug_on_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
bool reset_tx_queues);
@@ -415,10 +408,12 @@ static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
return 0;
- ret = wl1271_cmd_set_sta_state(wl);
+ ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
if (ret < 0)
return ret;
+ wl12xx_croc(wl, wl->role_id);
+
wl1271_info("Association completed.");
return 0;
}
@@ -718,7 +713,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- ret = wl1271_acx_sta_mem_cfg(wl);
+ ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
goto out_free_memmap;
@@ -773,33 +768,52 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
{
- bool fw_ps;
+ bool fw_ps, single_sta;
/* only regulate station links */
if (hlid < WL1271_AP_STA_HLID_START)
return;
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ single_sta = (wl->active_sta_count == 1);
/*
* Wake up from high level PS if the STA is asleep with too little
- * blocks in FW or if the STA is awake.
+ * packets in FW or if the STA is awake.
*/
- if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+ if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
wl1271_ps_link_end(wl, hlid);
- /* Start high-level PS if the STA is asleep with enough blocks in FW */
- else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ /*
+ * Start high-level PS if the STA is asleep with enough blocks in FW.
+ * Make an exception if this is the only connected station. In this
+ * case FW-memory congestion is not a problem.
+ */
+ else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl1271_ps_link_start(wl, hlid, true);
}
-static void wl1271_irq_update_links_status(struct wl1271 *wl,
- struct wl1271_fw_ap_status *status)
+bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
+{
+ int id;
+
+ /* global/broadcast "stations" are always active */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return true;
+
+ id = hlid - WL1271_AP_STA_HLID_START;
+ return test_bit(id, wl->ap_hlid_map);
+}
+
+static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+ struct wl12xx_fw_status *status)
{
u32 cur_fw_ps_map;
- u8 hlid;
+ u8 hlid, cnt;
+
+ /* TODO: also use link_fast_bitmap here */
cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
@@ -812,45 +826,30 @@ static void wl1271_irq_update_links_status(struct wl1271 *wl,
}
for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
- u8 cnt = status->tx_lnk_free_blks[hlid] -
- wl->links[hlid].prev_freed_blks;
-
- wl->links[hlid].prev_freed_blks =
- status->tx_lnk_free_blks[hlid];
- wl->links[hlid].allocated_blks -= cnt;
-
- wl1271_irq_ps_regulate_link(wl, hlid,
- wl->links[hlid].allocated_blks);
- }
-}
+ if (!wl1271_is_active_sta(wl, hlid))
+ continue;
-static u32 wl1271_tx_allocated_blocks(struct wl1271 *wl)
-{
- int i;
- u32 total_alloc_blocks = 0;
+ cnt = status->tx_lnk_free_pkts[hlid] -
+ wl->links[hlid].prev_freed_pkts;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- total_alloc_blocks += wl->tx_allocated_blocks[i];
+ wl->links[hlid].prev_freed_pkts =
+ status->tx_lnk_free_pkts[hlid];
+ wl->links[hlid].allocated_pkts -= cnt;
- return total_alloc_blocks;
+ wl12xx_irq_ps_regulate_link(wl, hlid,
+ wl->links[hlid].allocated_pkts);
+ }
}
-static void wl1271_fw_status(struct wl1271 *wl,
- struct wl1271_fw_full_status *full_status)
+static void wl12xx_fw_status(struct wl1271 *wl,
+ struct wl12xx_fw_status *status)
{
- struct wl1271_fw_common_status *status = &full_status->common;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
- u32 freed_blocks = 0, ac_freed_blocks;
+ int avail, freed_blocks;
int i;
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- wl1271_raw_read(wl, FW_STATUS_ADDR, status,
- sizeof(struct wl1271_fw_ap_status), false);
- } else {
- wl1271_raw_read(wl, FW_STATUS_ADDR, status,
- sizeof(struct wl1271_fw_sta_status), false);
- }
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -859,42 +858,49 @@ static void wl1271_fw_status(struct wl1271 *wl,
status->drv_rx_counter,
status->tx_results_counter);
- /* update number of available TX blocks */
for (i = 0; i < NUM_TX_QUEUES; i++) {
- ac_freed_blocks = le32_to_cpu(status->tx_released_blks[i]) -
- wl->tx_blocks_freed[i];
- freed_blocks += ac_freed_blocks;
-
- wl->tx_allocated_blocks[i] -= ac_freed_blocks;
+ /* prevent wrap-around in freed-packets counter */
+ wl->tx_allocated_pkts[i] -=
+ (status->tx_released_pkts[i] -
+ wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_blocks_freed[i] =
- le32_to_cpu(status->tx_released_blks[i]);
+ wl->tx_pkts_freed[i] = status->tx_released_pkts[i];
}
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- /* Update num of allocated TX blocks per link and ps status */
- wl1271_irq_update_links_status(wl, &full_status->ap);
- wl->tx_blocks_available += freed_blocks;
- } else {
- int avail = full_status->sta.tx_total -
- wl1271_tx_allocated_blocks(wl);
+ /* prevent wrap-around in total blocks counter */
+ if (likely(wl->tx_blocks_freed <=
+ le32_to_cpu(status->total_released_blks)))
+ freed_blocks = le32_to_cpu(status->total_released_blks) -
+ wl->tx_blocks_freed;
+ else
+ freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
+ le32_to_cpu(status->total_released_blks);
- /*
- * The FW might change the total number of TX memblocks before
- * we get a notification about blocks being released. Thus, the
- * available blocks calculation might yield a temporary result
- * which is lower than the actual available blocks. Keeping in
- * mind that only blocks that were allocated can be moved from
- * TX to RX, tx_blocks_available should never decrease here.
- */
- wl->tx_blocks_available = max((int)wl->tx_blocks_available,
- avail);
- }
+ wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
+
+ wl->tx_allocated_blocks -= freed_blocks;
+
+ avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
+
+ /*
+ * The FW might change the total number of TX memblocks before
+ * we get a notification about blocks being released. Thus, the
+ * available blocks calculation might yield a temporary result
+ * which is lower than the actual available blocks. Keeping in
+ * mind that only blocks that were allocated can be moved from
+ * TX to RX, tx_blocks_available should never decrease here.
+ */
+ wl->tx_blocks_available = max((int)wl->tx_blocks_available,
+ avail);
/* if more blocks are available now, tx work can be scheduled */
if (wl->tx_blocks_available > old_tx_blk_count)
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
+ /* for AP update num of allocated TX blocks per link and ps status */
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl12xx_irq_update_links_status(wl, status);
+
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
@@ -967,8 +973,8 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->common.intr);
+ wl12xx_fw_status(wl, wl->fw_status);
+ intr = le32_to_cpu(wl->fw_status->intr);
intr &= WL1271_INTR_MASK;
if (!intr) {
done = true;
@@ -987,7 +993,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- wl1271_rx(wl, &wl->fw_status->common);
+ wl12xx_rx(wl, wl->fw_status);
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -1004,7 +1010,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
}
/* check for tx results */
- if (wl->fw_status->common.tx_results_counter !=
+ if (wl->fw_status->tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
@@ -1056,25 +1062,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
const char *fw_name;
int ret;
- switch (wl->bss_type) {
- case BSS_TYPE_AP_BSS:
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fw_name = WL128X_AP_FW_NAME;
- else
- fw_name = WL127X_AP_FW_NAME;
- break;
- case BSS_TYPE_IBSS:
- case BSS_TYPE_STA_BSS:
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fw_name = WL128X_FW_NAME;
- else
- fw_name = WL1271_FW_NAME;
- break;
- default:
- wl1271_error("no compatible firmware for bss_type %d",
- wl->bss_type);
- return -EINVAL;
- }
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fw_name = WL128X_FW_NAME;
+ else
+ fw_name = WL127X_FW_NAME;
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
@@ -1103,7 +1094,6 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
}
memcpy(wl->fw, fw->data, wl->fw_len);
- wl->fw_bss_type = wl->bss_type;
ret = 0;
out:
@@ -1194,8 +1184,8 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- wl1271_fw_status(wl, wl->fw_status);
- first_addr = __le32_to_cpu(wl->fw_status->sta.log_start_addr);
+ wl12xx_fw_status(wl, wl->fw_status);
+ first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
if (!first_addr)
goto out;
@@ -1211,7 +1201,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* of each memory block hold the hardware address of the next
* one. The last memory block points to the first one.
*/
- addr = __le32_to_cpup((__le32 *)block);
+ addr = le32_to_cpup((__le32 *)block);
if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
break;
@@ -1241,6 +1231,8 @@ static void wl1271_recovery_work(struct work_struct *work)
wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
+ BUG_ON(bug_on_recovery);
+
/*
* Advance security sequence number to overcome potential progress
* in the firmware during recovery. This doens't hurt if the network is
@@ -1250,9 +1242,6 @@ static void wl1271_recovery_work(struct work_struct *work)
test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- ieee80211_connection_loss(wl->vif);
-
/* Prevent spurious TX during FW restart */
ieee80211_stop_queues(wl->hw);
@@ -1344,14 +1333,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
- /*
- * 'end-of-transaction flag' and 'LPD mode flag'
- * should be set in wl127x AP mode only
- */
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl->quirks |= (WL12XX_QUIRK_END_OF_TRANSACTION |
- WL12XX_QUIRK_LPD_MODE);
-
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
@@ -1374,8 +1355,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
goto out;
}
- /* Make sure the firmware type matches the BSS type */
- if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
+ if (wl->fw == NULL) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
goto out;
@@ -1395,6 +1375,7 @@ out:
int wl1271_plt_start(struct wl1271 *wl)
{
int retries = WL1271_BOOT_RETRIES;
+ struct wiphy *wiphy = wl->hw->wiphy;
int ret;
mutex_lock(&wl->mutex);
@@ -1428,6 +1409,11 @@ int wl1271_plt_start(struct wl1271 *wl)
wl1271_notice("firmware booted in PLT mode (%s)",
wl->chip.fw_ver_str);
+ /* update hw/fw version info in wiphy struct */
+ wiphy->hw_version = wl->chip.id;
+ strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
+ sizeof(wiphy->fw_version));
+
goto out;
irq_disable:
@@ -1504,10 +1490,25 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
q = wl1271_tx_get_queue(mapping);
if (wl->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl1271_tx_get_hlid(skb);
+ hlid = wl12xx_tx_get_hlid_ap(wl, skb);
spin_lock_irqsave(&wl->wl_lock, flags);
+ /* queue the packet */
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (!wl1271_is_active_sta(wl, hlid)) {
+ wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
+ hlid, q);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+ } else {
+ skb_queue_tail(&wl->tx_queue[q], skb);
+ }
+
wl->tx_queue_count[q]++;
/*
@@ -1520,14 +1521,6 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
set_bit(q, &wl->stopped_queues_map);
}
- /* queue the packet */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- } else {
- skb_queue_tail(&wl->tx_queue[q], skb);
- }
-
/*
* The chip specific setup must run before the first TX packet -
* before that, the tx_work will not be initialized!
@@ -1537,13 +1530,20 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
!test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
+out:
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
int wl1271_tx_dummy_packet(struct wl1271 *wl)
{
unsigned long flags;
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
+ int q;
+
+ /* no need to queue a new dummy packet if one is already pending */
+ if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
+ return 0;
+
+ q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
@@ -1673,7 +1673,7 @@ static int wl1271_configure_suspend_ap(struct wl1271 *wl)
if (ret < 0)
goto out_unlock;
- ret = wl1271_acx_set_ap_beacon_filter(wl, true);
+ ret = wl1271_acx_beacon_filter_opt(wl, true);
wl1271_ps_elp_sleep(wl);
out_unlock:
@@ -1711,7 +1711,7 @@ static void wl1271_configure_resume(struct wl1271 *wl)
wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
wl->basic_rate, true);
} else if (is_ap) {
- wl1271_acx_set_ap_beacon_filter(wl, false);
+ wl1271_acx_beacon_filter_opt(wl, false);
}
wl1271_ps_elp_sleep(wl);
@@ -1803,9 +1803,6 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
*
* The MAC address is first known when the corresponding interface
* is added. That is where we will initialize the hardware.
- *
- * In addition, we currently have different firmwares for AP and managed
- * operation. We will know which to boot according to interface type.
*/
return 0;
@@ -1816,6 +1813,30 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
}
+static u8 wl12xx_get_role_type(struct wl1271 *wl)
+{
+ switch (wl->bss_type) {
+ case BSS_TYPE_AP_BSS:
+ if (wl->p2p)
+ return WL1271_ROLE_P2P_GO;
+ else
+ return WL1271_ROLE_AP;
+
+ case BSS_TYPE_STA_BSS:
+ if (wl->p2p)
+ return WL1271_ROLE_P2P_CL;
+ else
+ return WL1271_ROLE_STA;
+
+ case BSS_TYPE_IBSS:
+ return WL1271_ROLE_IBSS;
+
+ default:
+ wl1271_error("invalid bss_type: %d", wl->bss_type);
+ }
+ return WL12XX_INVALID_ROLE_TYPE;
+}
+
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1823,10 +1844,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct wiphy *wiphy = hw->wiphy;
int retries = WL1271_BOOT_RETRIES;
int ret = 0;
+ u8 role_type;
bool booted = false;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- vif->type, vif->addr);
+ ieee80211_vif_type_p2p(vif), vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -1846,7 +1868,10 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- switch (vif->type) {
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wl->p2p = 1;
+ /* fall-through */
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
wl->set_bss_type = BSS_TYPE_STA_BSS;
@@ -1855,6 +1880,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl->bss_type = BSS_TYPE_IBSS;
wl->set_bss_type = BSS_TYPE_STA_BSS;
break;
+ case NL80211_IFTYPE_P2P_GO:
+ wl->p2p = 1;
+ /* fall-through */
case NL80211_IFTYPE_AP:
wl->bss_type = BSS_TYPE_AP_BSS;
break;
@@ -1863,6 +1891,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ role_type = wl12xx_get_role_type(wl);
+ if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ ret = -EINVAL;
+ goto out;
+ }
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
if (wl->state != WL1271_STATE_OFF) {
@@ -1882,6 +1915,25 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto power_off;
+ if (wl->bss_type == BSS_TYPE_STA_BSS ||
+ wl->bss_type == BSS_TYPE_IBSS) {
+ /*
+ * The device role is a special role used for
+ * rx and tx frames prior to association (as
+ * the STA role can get packets only from
+ * its associated bssid)
+ */
+ ret = wl12xx_cmd_role_enable(wl,
+ WL1271_ROLE_DEVICE,
+ &wl->dev_role_id);
+ if (ret < 0)
+ goto irq_disable;
+ }
+
+ ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
+ if (ret < 0)
+ goto irq_disable;
+
ret = wl1271_hw_init(wl);
if (ret < 0)
goto irq_disable;
@@ -1946,7 +1998,7 @@ out:
static void __wl1271_op_remove_interface(struct wl1271 *wl,
bool reset_tx_queues)
{
- int i;
+ int ret, i;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
@@ -1971,6 +2023,31 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
ieee80211_scan_completed(wl->hw, true);
}
+ if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+ /* disable active roles */
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto deinit;
+
+ if (wl->bss_type == BSS_TYPE_STA_BSS) {
+ ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+ if (ret < 0)
+ goto deinit;
+ }
+
+ ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+ if (ret < 0)
+ goto deinit;
+
+ wl1271_ps_elp_sleep(wl);
+ }
+deinit:
+ /* clear all hlids (except system_hlid) */
+ wl->sta_hlid = WL12XX_INVALID_LINK_ID;
+ wl->dev_hlid = WL12XX_INVALID_LINK_ID;
+ wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+
/*
* this must be before the cancel_work calls below, so that the work
* functions don't perform further work.
@@ -1997,28 +2074,41 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl1271_power_off(wl);
memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
+ memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
wl->ssid_len = 0;
wl->bss_type = MAX_BSS_TYPE;
wl->set_bss_type = MAX_BSS_TYPE;
+ wl->p2p = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
+ wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
wl->vif = NULL;
- wl->filters = 0;
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
wl1271_free_ap_keys(wl);
memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
wl->ap_fw_ps_map = 0;
wl->ap_ps_map = 0;
wl->sched_scanning = false;
+ wl->role_id = WL12XX_INVALID_ROLE_ID;
+ wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
+ memset(wl->roles_map, 0, sizeof(wl->roles_map));
+ memset(wl->links_map, 0, sizeof(wl->links_map));
+ memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ wl->active_sta_count = 0;
+
+ /* The system link is always allocated */
+ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
/*
* this is performed after the cancel_work calls and the associated
@@ -2027,9 +2117,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
*/
wl->flags = 0;
+ wl->tx_blocks_freed = 0;
+
for (i = 0; i < NUM_TX_QUEUES; i++) {
- wl->tx_blocks_freed[i] = 0;
- wl->tx_allocated_blocks[i] = 0;
+ wl->tx_pkts_freed[i] = 0;
+ wl->tx_allocated_pkts[i] = 0;
}
wl1271_debugfs_reset(wl);
@@ -2061,64 +2153,10 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
cancel_work_sync(&wl->recovery_work);
}
-void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
-{
- wl1271_set_default_filters(wl);
-
- /* combine requested filters with current filter config */
- filters = wl->filters | filters;
-
- wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
-
- if (filters & FIF_PROMISC_IN_BSS) {
- wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
- wl->rx_config &= ~CFG_UNI_FILTER_EN;
- wl->rx_config |= CFG_BSSID_FILTER_EN;
- }
- if (filters & FIF_BCN_PRBRESP_PROMISC) {
- wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
- wl->rx_config &= ~CFG_BSSID_FILTER_EN;
- wl->rx_config &= ~CFG_SSID_FILTER_EN;
- }
- if (filters & FIF_OTHER_BSS) {
- wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
- wl->rx_config &= ~CFG_BSSID_FILTER_EN;
- }
- if (filters & FIF_CONTROL) {
- wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
- wl->rx_filter |= CFG_RX_CTL_EN;
- }
- if (filters & FIF_FCSFAIL) {
- wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
- wl->rx_filter |= CFG_RX_FCS_ERROR;
- }
-}
-
-static int wl1271_dummy_join(struct wl1271 *wl)
-{
- int ret = 0;
- /* we need to use a dummy BSSID for now */
- static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
- 0xad, 0xbe, 0xef };
-
- memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
-
- /* pass through frames from all BSS */
- wl1271_configure_filters(wl, FIF_OTHER_BSS);
-
- ret = wl1271_cmd_join(wl, wl->set_bss_type);
- if (ret < 0)
- goto out;
-
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
-
-out:
- return ret;
-}
-
static int wl1271_join(struct wl1271 *wl, bool set_assoc)
{
int ret;
+ bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
/*
* One of the side effects of the JOIN command is that is clears
@@ -2135,12 +2173,13 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
if (set_assoc)
set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
- ret = wl1271_cmd_join(wl, wl->set_bss_type);
+ if (is_ibss)
+ ret = wl12xx_cmd_role_start_ibss(wl);
+ else
+ ret = wl12xx_cmd_role_start_sta(wl);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
-
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
@@ -2175,31 +2214,41 @@ static int wl1271_unjoin(struct wl1271 *wl)
{
int ret;
+ if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+ wl12xx_cmd_stop_channel_switch(wl);
+ ieee80211_chswitch_done(wl->vif, false);
+ }
+
/* to stop listening to a channel, we disconnect */
- ret = wl1271_cmd_disconnect(wl);
+ ret = wl12xx_cmd_role_stop_sta(wl);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_JOINED, &wl->flags);
memset(wl->bssid, 0, ETH_ALEN);
/* reset TX security counters on a clean disconnect */
wl->tx_security_last_seq_lsb = 0;
wl->tx_security_seq = 0;
- /* stop filtering packets based on bssid */
- wl1271_configure_filters(wl, FIF_OTHER_BSS);
-
out:
return ret;
}
static void wl1271_set_band_rate(struct wl1271 *wl)
{
- if (wl->band == IEEE80211_BAND_2GHZ)
- wl->basic_rate_set = wl->conf.tx.basic_rate;
- else
- wl->basic_rate_set = wl->conf.tx.basic_rate_5;
+ wl->basic_rate_set = wl->bitrate_masks[wl->band];
+ wl->rate_set = wl->basic_rate_set;
+}
+
+static bool wl12xx_is_roc(struct wl1271 *wl)
+{
+ u8 role_id;
+
+ role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
+ if (role_id >= WL12XX_MAX_ROLES)
+ return false;
+
+ return true;
}
static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
@@ -2207,12 +2256,17 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
int ret;
if (idle) {
- if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- ret = wl1271_unjoin(wl);
+ /* no need to croc if we weren't busy (e.g. during boot) */
+ if (wl12xx_is_roc(wl)) {
+ ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_cmd_role_stop_dev(wl);
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_tx_min_rate_get(wl);
+ wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
@@ -2223,18 +2277,17 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
goto out;
set_bit(WL1271_FLAG_IDLE, &wl->flags);
} else {
- /* increment the session counter */
- wl->session_counter++;
- if (wl->session_counter >= SESSION_COUNTER_MAX)
- wl->session_counter = 0;
-
/* The current firmware only supports sched_scan in idle */
if (wl->sched_scanning) {
wl1271_scan_sched_scan_stop(wl);
ieee80211_sched_scan_stopped(wl->hw);
}
- ret = wl1271_dummy_join(wl);
+ ret = wl12xx_cmd_role_start_dev(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_roc(wl, wl->dev_role_id);
if (ret < 0)
goto out;
clear_bit(WL1271_FLAG_IDLE, &wl->flags);
@@ -2295,6 +2348,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
((wl->band != conf->channel->band) ||
(wl->channel != channel))) {
+ /* send all pending packets */
+ wl1271_tx_work_locked(wl);
wl->band = conf->channel->band;
wl->channel = channel;
@@ -2308,17 +2363,41 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
wl1271_set_band_rate(wl);
- wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl->basic_rate =
+ wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (wl12xx_is_roc(wl)) {
+ /* roaming */
+ ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (ret < 0)
+ goto out_sleep;
+ }
ret = wl1271_join(wl, false);
if (ret < 0)
wl1271_warning("cmd join on channel "
"failed %d", ret);
+ } else {
+ /*
+ * change the ROC channel. do it only if we are
+ * not idle. otherwise, CROC will be called
+ * anyway.
+ */
+ if (wl12xx_is_roc(wl) &&
+ !(conf->flags & IEEE80211_CONF_IDLE)) {
+ ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl12xx_roc(wl, wl->dev_role_id);
+ if (ret < 0)
+ wl1271_warning("roc failed %d",
+ ret);
+ }
}
}
}
@@ -2458,18 +2537,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
goto out_sleep;
}
- /* determine, whether supported filter values have changed */
- if (changed == 0)
- goto out_sleep;
-
- /* configure filters */
- wl->filters = *total;
- wl1271_configure_filters(wl, 0);
-
- /* apply configured filters */
- ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
- if (ret < 0)
- goto out_sleep;
+ /*
+ * the fw doesn't provide an api to configure the filters. instead,
+ * the filters configuration is based on the active roles / ROC
+ * state.
+ */
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -2541,14 +2613,19 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
bool wep_key_added = false;
for (i = 0; i < MAX_NUM_KEYS; i++) {
+ u8 hlid;
if (wl->recorded_ap_keys[i] == NULL)
break;
key = wl->recorded_ap_keys[i];
+ hlid = key->hlid;
+ if (hlid == WL12XX_INVALID_LINK_ID)
+ hlid = wl->ap_bcast_hlid;
+
ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
key->id, key->key_type,
key->key_size, key->key,
- key->hlid, key->tx_seq_32,
+ hlid, key->tx_seq_32,
key->tx_seq_16);
if (ret < 0)
goto out;
@@ -2558,7 +2635,8 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
}
if (wep_key_added) {
- ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+ ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
+ wl->ap_bcast_hlid);
if (ret < 0)
goto out;
}
@@ -2583,7 +2661,7 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
- hlid = WL1271_AP_BROADCAST_HLID;
+ hlid = wl->ap_bcast_hlid;
}
if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
@@ -2613,6 +2691,17 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
+ /*
+ * A STA set to GEM cipher requires 2 tx spare blocks.
+ * Return to default value when GEM cipher key is removed
+ */
+ if (key_type == KEY_GEM) {
+ if (action == KEY_ADD_OR_REPLACE)
+ wl->tx_spare_blocks = 2;
+ else if (action == KEY_REMOVE)
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ }
+
addr = sta ? sta->addr : bcast_addr;
if (is_zero_ether_addr(addr)) {
@@ -2627,6 +2716,11 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
return 0;
+ /* don't remove key if hlid was already deleted */
+ if (action == KEY_REMOVE &&
+ wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ return 0;
+
ret = wl1271_cmd_set_sta_key(wl, action,
id, key_type, key_size,
key, addr, tx_seq_32,
@@ -2636,8 +2730,9 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* the default WEP key needs to be configured at least once */
if (key_type == KEY_WEP) {
- ret = wl1271_cmd_set_sta_default_wep_key(wl,
- wl->default_key);
+ ret = wl12xx_cmd_set_default_wep_key(wl,
+ wl->default_key,
+ wl->sta_hlid);
if (ret < 0)
return ret;
}
@@ -2779,10 +2874,20 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan(hw->priv, ssid, len, req);
+ /* cancel ROC before scanning */
+ if (wl12xx_is_roc(wl)) {
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ /* don't allow scanning right now */
+ ret = -EBUSY;
+ goto out_sleep;
+ }
+ wl12xx_croc(wl, wl->dev_role_id);
+ wl12xx_cmd_role_stop_dev(wl);
+ }
+ ret = wl1271_scan(hw->priv, ssid, len, req);
+out_sleep:
wl1271_ps_elp_sleep(wl);
-
out:
mutex_unlock(&wl->mutex);
@@ -2960,6 +3065,93 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
return 0;
}
+static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
+{
+ int len;
+ const u8 *next, *end = skb->data + skb->len;
+ u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
+ skb->len - ieoffset);
+ if (!ie)
+ return;
+ len = ie[1] + 2;
+ next = ie + len;
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+}
+
+static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
+ unsigned int oui, u8 oui_type,
+ int ieoffset)
+{
+ int len;
+ const u8 *next, *end = skb->data + skb->len;
+ u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ieoffset,
+ skb->len - ieoffset);
+ if (!ie)
+ return;
+ len = ie[1] + 2;
+ next = ie + len;
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+}
+
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
+ u8 *probe_rsp_data,
+ size_t probe_rsp_len,
+ u32 rates)
+{
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
+ int ssid_ie_offset, ie_offset, templ_len;
+ const u8 *ptr;
+
+ /* no need to change probe response if the SSID is set correctly */
+ if (wl->ssid_len > 0)
+ return wl1271_cmd_template_set(wl,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ probe_rsp_data,
+ probe_rsp_len, 0,
+ rates);
+
+ if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
+ wl1271_error("probe_rsp template too big");
+ return -EINVAL;
+ }
+
+ /* start searching from IE offset */
+ ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+
+ ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
+ probe_rsp_len - ie_offset);
+ if (!ptr) {
+ wl1271_error("No SSID in beacon!");
+ return -EINVAL;
+ }
+
+ ssid_ie_offset = ptr - probe_rsp_data;
+ ptr += (ptr[1] + 2);
+
+ memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
+
+ /* insert SSID from bss_conf */
+ probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
+ probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
+ memcpy(probe_rsp_templ + ssid_ie_offset + 2,
+ bss_conf->ssid, bss_conf->ssid_len);
+ templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
+
+ memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
+ ptr, probe_rsp_len - (ptr - probe_rsp_data));
+ templ_len += probe_rsp_len - (ptr - probe_rsp_data);
+
+ return wl1271_cmd_template_set(wl,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ probe_rsp_templ,
+ templ_len, 0,
+ rates);
+}
+
static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
@@ -3016,6 +3208,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BEACON)) {
struct ieee80211_hdr *hdr;
+ u32 min_rate;
int ieoffset = offsetof(struct ieee80211_mgmt,
u.beacon.variable);
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
@@ -3031,28 +3224,46 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
dev_kfree_skb(beacon);
goto out;
}
+ min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
ret = wl1271_cmd_template_set(wl, tmpl_id,
beacon->data,
beacon->len, 0,
- wl1271_tx_min_rate_get(wl));
+ min_rate);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
+ /* remove TIM ie from probe response */
+ wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
+
+ /*
+ * remove p2p ie from probe response.
+ * the fw reponds to probe requests that don't include
+ * the p2p ie. probe requests with p2p ie will be passed,
+ * and will be responded by the supplicant (the spec
+ * forbids including the p2p ie when responding to probe
+ * requests that didn't include it).
+ */
+ wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P, ieoffset);
+
hdr = (struct ieee80211_hdr *) beacon->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
-
- tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
- CMD_TEMPL_PROBE_RESPONSE;
- ret = wl1271_cmd_template_set(wl,
- tmpl_id,
- beacon->data,
- beacon->len, 0,
- wl1271_tx_min_rate_get(wl));
+ if (is_ap)
+ ret = wl1271_ap_set_probe_resp_tmpl(wl,
+ beacon->data,
+ beacon->len,
+ min_rate);
+ else
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len, 0,
+ min_rate);
dev_kfree_skb(beacon);
if (ret < 0)
goto out;
@@ -3073,8 +3284,10 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BASIC_RATES)) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
- wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+ wl->band);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl,
+ wl->basic_rate_set);
ret = wl1271_init_ap_rates(wl);
if (ret < 0) {
@@ -3094,20 +3307,20 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
if (bss_conf->enable_beacon) {
if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl1271_cmd_start_bss(wl);
+ ret = wl12xx_cmd_role_start_ap(wl);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
- wl1271_debug(DEBUG_AP, "started AP");
-
ret = wl1271_ap_init_hwenc(wl);
if (ret < 0)
goto out;
+
+ set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "started AP");
}
} else {
if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl1271_cmd_stop_bss(wl);
+ ret = wl12xx_cmd_role_stop_ap(wl);
if (ret < 0)
goto out;
@@ -3120,6 +3333,18 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
if (ret < 0)
goto out;
+
+ /* Handle HT information change */
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_information(wl,
+ bss_conf->ht_operation_mode);
+ if (ret < 0) {
+ wl1271_warning("Set ht information failed %d", ret);
+ goto out;
+ }
+ }
+
out:
return;
}
@@ -3132,6 +3357,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
{
bool do_join = false, set_assoc = false;
bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool ibss_joined = false;
u32 sta_rate_set = 0;
int ret;
struct ieee80211_sta *sta;
@@ -3145,14 +3371,28 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
goto out;
}
- if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
+ if (changed & BSS_CHANGED_IBSS) {
+ if (bss_conf->ibss_joined) {
+ set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+ ibss_joined = true;
+ } else {
+ if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
+ &wl->flags)) {
+ wl1271_unjoin(wl);
+ wl12xx_cmd_role_start_dev(wl);
+ wl12xx_roc(wl, wl->dev_role_id);
+ }
+ }
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
do_join = true;
/* Need to update the SSID (for filtering etc) */
- if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+ if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
do_join = true;
- if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
@@ -3192,17 +3432,17 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (ret < 0)
goto out;
- /* filter out all packets not from this BSSID */
- wl1271_configure_filters(wl, 0);
-
/* Need to update the BSSID (for filtering etc) */
do_join = true;
}
}
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta)
+ goto sta_not_found;
+
/* save the supp_rates of the ap */
sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
if (sta->ht_cap.ht_supported)
@@ -3210,38 +3450,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
(sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
sta_ht_cap = sta->ht_cap;
sta_exists = true;
- }
- rcu_read_unlock();
- if (sta_exists) {
- /* handle new association with HT and HT information change */
- if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
- true);
- if (ret < 0) {
- wl1271_warning("Set ht cap true failed %d",
- ret);
- goto out;
- }
- ret = wl1271_acx_set_ht_information(wl,
- bss_conf->ht_operation_mode);
- if (ret < 0) {
- wl1271_warning("Set ht information failed %d",
- ret);
- goto out;
- }
- }
- /* handle new association without HT and disassociation */
- else if (changed & BSS_CHANGED_ASSOC) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
- false);
- if (ret < 0) {
- wl1271_warning("Set ht cap false failed %d",
- ret);
- goto out;
- }
- }
+sta_not_found:
+ rcu_read_unlock();
}
if ((changed & BSS_CHANGED_ASSOC)) {
@@ -3258,12 +3469,15 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
* to use with control frames.
*/
rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
- rates);
- wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl->basic_rate_set =
+ wl1271_tx_enabled_rates_get(wl, rates,
+ wl->band);
+ wl->basic_rate =
+ wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
if (sta_rate_set)
wl->rate_set = wl1271_tx_enabled_rates_get(wl,
- sta_rate_set);
+ sta_rate_set,
+ wl->band);
ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
@@ -3291,25 +3505,14 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
goto out;
-
- /* If we want to go in PSM but we're not there yet */
- if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
- !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- enum wl1271_cmd_ps_mode mode;
-
- mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode,
- wl->basic_rate,
- true);
- if (ret < 0)
- goto out;
- }
} else {
/* use defaults when not associated */
bool was_assoc =
!!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
&wl->flags);
- clear_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags);
+ bool was_ifup =
+ !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
+ &wl->flags);
wl->aid = 0;
/* free probe-request template */
@@ -3321,7 +3524,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl);
- wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl->basic_rate =
+ wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
@@ -3336,8 +3540,32 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* restore the bssid filter and go to dummy bssid */
if (was_assoc) {
+ u32 conf_flags = wl->hw->conf.flags;
+ /*
+ * we might have to disable roc, if there was
+ * no IF_OPER_UP notification.
+ */
+ if (!was_ifup) {
+ ret = wl12xx_croc(wl, wl->role_id);
+ if (ret < 0)
+ goto out;
+ }
+ /*
+ * (we also need to disable roc in case of
+ * roaming on the same channel. until we will
+ * have a better flow...)
+ */
+ if (test_bit(wl->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
wl1271_unjoin(wl);
- wl1271_dummy_join(wl);
+ if (!(conf_flags & IEEE80211_CONF_IDLE)) {
+ wl12xx_cmd_role_start_dev(wl);
+ wl12xx_roc(wl, wl->dev_role_id);
+ }
}
}
}
@@ -3348,11 +3576,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (bss_conf->ibss_joined) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
- rates);
- wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl->basic_rate_set =
+ wl1271_tx_enabled_rates_get(wl, rates,
+ wl->band);
+ wl->basic_rate =
+ wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- /* by default, use 11b rates */
+ /* by default, use 11b + OFDM rates */
wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
@@ -3398,7 +3628,81 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wl1271_warning("cmd join failed %d", ret);
goto out;
}
- wl1271_check_operstate(wl, ieee80211_get_operstate(vif));
+
+ /* ROC until connected (after EAPOL exchange) */
+ if (!is_ibss) {
+ ret = wl12xx_roc(wl, wl->role_id);
+ if (ret < 0)
+ goto out;
+
+ wl1271_check_operstate(wl,
+ ieee80211_get_operstate(vif));
+ }
+ /*
+ * stop device role if started (we might already be in
+ * STA role). TODO: make it better.
+ */
+ if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
+ ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_cmd_role_stop_dev(wl);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* If we want to go in PSM but we're not there yet */
+ if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
+ !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ enum wl1271_cmd_ps_mode mode;
+
+ mode = STATION_POWER_SAVE_MODE;
+ ret = wl1271_ps_set_mode(wl, mode,
+ wl->basic_rate,
+ true);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ /* Handle new association with HT. Do this after join. */
+ if (sta_exists) {
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_capabilities(wl,
+ &sta_ht_cap,
+ true,
+ wl->sta_hlid);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap true failed %d",
+ ret);
+ goto out;
+ }
+ }
+ /* handle new association without HT and disassociation */
+ else if (changed & BSS_CHANGED_ASSOC) {
+ ret = wl1271_acx_set_ht_capabilities(wl,
+ &sta_ht_cap,
+ false,
+ wl->sta_hlid);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap false failed %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ /* Handle HT information change. Done after join. */
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_information(wl,
+ bss_conf->ht_operation_mode);
+ if (ret < 0) {
+ wl1271_warning("Set ht information failed %d", ret);
+ goto out;
+ }
}
out:
@@ -3437,7 +3741,8 @@ out:
mutex_unlock(&wl->mutex);
}
-static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
@@ -3508,7 +3813,8 @@ out:
return ret;
}
-static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
+static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
@@ -3568,31 +3874,31 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
}
wl_sta = (struct wl1271_station *)sta->drv_priv;
- __set_bit(id, wl->ap_hlid_map);
+ set_bit(id, wl->ap_hlid_map);
wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
*hlid = wl_sta->hlid;
memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
+ wl->active_sta_count++;
return 0;
}
-static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
{
int id = hlid - WL1271_AP_STA_HLID_START;
- if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ if (!test_bit(id, wl->ap_hlid_map))
return;
- __clear_bit(id, wl->ap_hlid_map);
+ clear_bit(id, wl->ap_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
+ wl->links[hlid].ba_bitmap = 0;
wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
- int id = hlid - WL1271_AP_STA_HLID_START;
- return test_bit(id, wl->ap_hlid_map);
+ wl->active_sta_count--;
}
static int wl1271_op_sta_add(struct ieee80211_hw *hw,
@@ -3621,7 +3927,15 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (ret < 0)
goto out_free_sta;
- ret = wl1271_cmd_add_sta(wl, sta, hlid);
+ ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl12xx_cmd_set_peer_state(wl, hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, hlid);
if (ret < 0)
goto out_sleep;
@@ -3664,7 +3978,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+ ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
if (ret < 0)
goto out_sleep;
@@ -3686,6 +4000,14 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
int ret;
+ u8 hlid, *ba_bitmap;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
+ tid);
+
+ /* sanity check - the fields in FW are only 8bits wide */
+ if (WARN_ON(tid > 0xFF))
+ return -ENOTSUPP;
mutex_lock(&wl->mutex);
@@ -3694,6 +4016,20 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
goto out;
}
+ if (wl->bss_type == BSS_TYPE_STA_BSS) {
+ hlid = wl->sta_hlid;
+ ba_bitmap = &wl->ba_rx_bitmap;
+ } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ struct wl1271_station *wl_sta;
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+ ba_bitmap = &wl->links[hlid].ba_bitmap;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3703,20 +4039,46 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if ((wl->ba_support) && (wl->ba_allowed)) {
- ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
- true);
- if (!ret)
- wl->ba_rx_bitmap |= BIT(tid);
- } else {
+ if (!wl->ba_support || !wl->ba_allowed) {
ret = -ENOTSUPP;
+ break;
+ }
+
+ if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
+ ret = -EBUSY;
+ wl1271_error("exceeded max RX BA sessions");
+ break;
+ }
+
+ if (*ba_bitmap & BIT(tid)) {
+ ret = -EINVAL;
+ wl1271_error("cannot enable RX BA session on active "
+ "tid: %d", tid);
+ break;
+ }
+
+ ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
+ hlid);
+ if (!ret) {
+ *ba_bitmap |= BIT(tid);
+ wl->ba_rx_session_count++;
}
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
- if (!ret)
- wl->ba_rx_bitmap &= ~BIT(tid);
+ if (!(*ba_bitmap & BIT(tid))) {
+ ret = -EINVAL;
+ wl1271_error("no active RX BA session on tid: %d",
+ tid);
+ break;
+ }
+
+ ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
+ hlid);
+ if (!ret) {
+ *ba_bitmap &= ~BIT(tid);
+ wl->ba_rx_session_count--;
+ }
break;
/*
@@ -3742,6 +4104,60 @@ out:
return ret;
}
+static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct wl1271 *wl = hw->priv;
+ int i;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
+ mask->control[NL80211_BAND_2GHZ].legacy,
+ mask->control[NL80211_BAND_5GHZ].legacy);
+
+ mutex_lock(&wl->mutex);
+
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ wl->bitrate_masks[i] =
+ wl1271_tx_enabled_rates_get(wl,
+ mask->control[i].legacy,
+ i);
+ mutex_unlock(&wl->mutex);
+
+ return 0;
+}
+
+static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ mutex_unlock(&wl->mutex);
+ ieee80211_chswitch_done(wl->vif, false);
+ return;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+
+ if (!ret)
+ set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
@@ -3858,7 +4274,6 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
/* 11n STA capabilities */
#define HW_RX_HIGHEST_RATE 72
-#ifdef CONFIG_WL12XX_HT
#define WL12XX_HT_CAP { \
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
@@ -3871,11 +4286,6 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
.tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
}, \
}
-#else
-#define WL12XX_HT_CAP { \
- .ht_supported = false, \
-}
-#endif
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1271_band_2ghz = {
@@ -4023,6 +4433,8 @@ static const struct ieee80211_ops wl1271_ops = {
.sta_remove = wl1271_op_sta_remove,
.ampdu_action = wl1271_op_ampdu_action,
.tx_frames_pending = wl1271_tx_frames_pending,
+ .set_bitrate_mask = wl12xx_set_bitrate_mask,
+ .channel_switch = wl12xx_op_channel_switch,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -4126,7 +4538,7 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
return len;
}
-static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
wl1271_sysfs_show_hw_pg_ver, NULL);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
@@ -4275,23 +4687,32 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_SUPPORTS_CQM_RSSI |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_AP_LINK_PS;
+ IEEE80211_HW_AP_LINK_PS |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
wl->hw->wiphy->max_scan_ssids = 1;
- wl->hw->wiphy->max_sched_scan_ssids = 1;
+ wl->hw->wiphy->max_sched_scan_ssids = 16;
+ wl->hw->wiphy->max_match_sets = 16;
/*
* Maximum length of elements in scanning probe request templates
* should be the maximum length possible for a template, without
* the IEEE80211 header of the template
*/
- wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
+ wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_DFLT_SIZE -
sizeof(struct ieee80211_header);
+ wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_DFLT_SIZE -
+ sizeof(struct ieee80211_header);
+
+ wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
/* make sure all our channels fit in the scanned_ch bitmask */
BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
ARRAY_SIZE(wl1271_channels_5ghz) >
@@ -4335,6 +4756,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
int i, j, ret;
unsigned int order;
+ BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
@@ -4388,8 +4811,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
- wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -4402,7 +4823,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->hw_pg_ver = -1;
wl->bss_type = MAX_BSS_TYPE;
wl->set_bss_type = MAX_BSS_TYPE;
- wl->fw_bss_type = MAX_BSS_TYPE;
wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
@@ -4411,12 +4831,24 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->sched_scanning = false;
wl->tx_security_seq = 0;
wl->tx_security_last_seq_lsb = 0;
-
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ wl->role_id = WL12XX_INVALID_ROLE_ID;
+ wl->system_hlid = WL12XX_SYSTEM_HLID;
+ wl->sta_hlid = WL12XX_INVALID_LINK_ID;
+ wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
+ wl->dev_hlid = WL12XX_INVALID_LINK_ID;
+ wl->session_counter = 0;
+ wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+ wl->active_sta_count = 0;
setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
(unsigned long) wl);
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
+ /* The system link is always allocated */
+ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
@@ -4428,6 +4860,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
+ wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4523,6 +4957,10 @@ int wl1271_free_hw(struct wl1271 *wl)
mutex_unlock(&wl->mutex);
device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+
+ device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+
+ device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
platform_device_unregister(wl->plat_dev);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
@@ -4556,6 +4994,9 @@ module_param_named(fwlog, fwlog_param, charp, 0);
MODULE_PARM_DESC(keymap,
"FW logger options: continuous, ondemand, dbgpins or disable");
+module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 3548377ab9c..c15ebf2efd4 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -199,15 +199,19 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
unsigned long flags;
int filtered[NUM_TX_QUEUES];
- /* filter all frames currently the low level queus for this hlid */
+ /* filter all frames currently in the low level queues for this hlid */
for (i = 0; i < NUM_TX_QUEUES; i++) {
filtered[i] = 0;
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ filtered[i]++;
+
+ if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
+ continue;
+
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
info->status.rates[0].idx = -1;
ieee80211_tx_status_ni(wl->hw, skb);
- filtered[i]++;
}
}
@@ -226,8 +230,8 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
if (test_bit(hlid, &wl->ap_ps_map))
return;
- wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
- "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
+ wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
+ "clean_queues %d", hlid, wl->links[hlid].allocated_pkts,
clean_queues);
rcu_read_lock();
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 440a4ee9cb4..3f570f39758 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -296,81 +296,6 @@
===============================================*/
#define REG_EVENT_MAILBOX_PTR (SCR_PAD1)
-
-/* Misc */
-
-#define REG_ENABLE_TX_RX (ENABLE)
-/*
- * Rx configuration (filter) information element
- * ---------------------------------------------
- */
-#define REG_RX_CONFIG (RX_CFG)
-#define REG_RX_FILTER (RX_FILTER_CFG)
-
-
-#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002
-
-/* promiscuous - receives all valid frames */
-#define RX_CFG_PROMISCUOUS 0x0008
-
-/* receives frames from any BSSID */
-#define RX_CFG_BSSID 0x0020
-
-/* receives frames destined to any MAC address */
-#define RX_CFG_MAC 0x0010
-
-#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010
-#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000
-#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020
-#define RX_CFG_ENABLE_ANY_BSSID 0x0000
-
-/* discards all broadcast frames */
-#define RX_CFG_DISABLE_BCAST 0x0200
-
-#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400
-#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800
-#define RX_CFG_COPY_RX_STATUS 0x2000
-#define RX_CFG_TSF 0x10000
-
-#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
- RX_CFG_ENABLE_ONLY_MY_BSSID)
-
-#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
- | RX_CFG_ENABLE_ANY_BSSID)
-
-#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
- RX_CFG_ENABLE_ANY_BSSID)
-
-#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
- | RX_CFG_ENABLE_ONLY_MY_BSSID)
-
-#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \
- | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \
- | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF)
-
-#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC)
-
-#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \
- RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
-
-#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \
- RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
-
-#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
- | CFG_RX_CTL_EN | CFG_RX_BCN_EN\
- | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-
-#define RX_FILTER_OPTION_FILTER_ALL 0
-
-#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\
- | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN)
-
-#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
- | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\
- | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\
- | CFG_RX_PRSP_EN)
-
-
/*===============================================
EEPROM Read/Write Request 32bit RW
------------------------------------------
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 0450fb49dbb..dee4cfe9ccc 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -30,20 +30,28 @@
#include "rx.h"
#include "io.h"
-static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
+static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
u32 drv_rx_counter)
{
return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
RX_MEM_BLOCK_MASK;
}
-static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
+static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status,
u32 drv_rx_counter)
{
return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
}
+static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status,
+ u32 drv_rx_counter)
+{
+ /* Convert the value to bool */
+ return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
+ RX_BUF_UNALIGNED_PAYLOAD);
+}
+
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
@@ -58,11 +66,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band);
-#ifdef CONFIG_WL12XX_HT
/* 11n support */
if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
status->flag |= RX_FLAG_HT;
-#endif
status->signal = desc->rssi;
@@ -89,7 +95,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}
-static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
+static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
+ bool unaligned)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
@@ -97,6 +104,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
u8 *buf;
u8 beacon = 0;
u8 is_data = 0;
+ u8 reserved = unaligned ? NET_IP_ALIGN : 0;
+ u16 seq_num;
/*
* In PLT mode we seem to get frames and mac80211 warns about them,
@@ -131,17 +140,25 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
return -EINVAL;
}
- skb = __dev_alloc_skb(length, GFP_KERNEL);
+ /* skb length not included rx descriptor */
+ skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
return -ENOMEM;
}
- buf = skb_put(skb, length);
- memcpy(buf, data, length);
+ /* reserve the unaligned payload(if any) */
+ skb_reserve(skb, reserved);
- /* now we pull the descriptor out of the buffer */
- skb_pull(skb, sizeof(*desc));
+ buf = skb_put(skb, length - sizeof(*desc));
+
+ /*
+ * Copy packets from aggregation buffer to the skbs without rx
+ * descriptor and with packet payload aligned care. In case of unaligned
+ * packets copy the packets in offset of 2 bytes guarantee IP header
+ * payload aligned to 4 bytes.
+ */
+ memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
@@ -151,9 +168,11 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
- wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb,
+ seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
skb->len - desc->pad_len,
- beacon ? "beacon" : "");
+ beacon ? "beacon" : "",
+ seq_num);
skb_trim(skb, skb->len - desc->pad_len);
@@ -163,7 +182,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
return is_data;
}
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
+void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
u32 buf_size;
@@ -175,12 +194,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
u32 pkt_offset;
bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
bool had_data = false;
+ bool unaligned = false;
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
rx_counter = drv_rx_counter;
while (rx_counter != fw_rx_counter) {
- pkt_length = wl1271_rx_get_buf_size(status, rx_counter);
+ pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
break;
buf_size += pkt_length;
@@ -199,7 +219,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
* For aggregated packets, only the first memory block
* should be retrieved. The FW takes care of the rest.
*/
- mem_block = wl1271_rx_get_mem_block(status,
+ mem_block = wl12xx_rx_get_mem_block(status,
drv_rx_counter);
wl->rx_mem_pool_addr.addr = (mem_block << 8) +
@@ -220,8 +240,12 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
/* Split data into separate packets */
pkt_offset = 0;
while (pkt_offset < buf_size) {
- pkt_length = wl1271_rx_get_buf_size(status,
+ pkt_length = wl12xx_rx_get_buf_size(status,
drv_rx_counter);
+
+ unaligned = wl12xx_rx_get_unaligned(status,
+ drv_rx_counter);
+
/*
* the handle data call can only fail in memory-outage
* conditions, in that case the received frame will just
@@ -229,7 +253,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
- pkt_length) == 1)
+ pkt_length, unaligned) == 1)
had_data = true;
wl->rx_counter++;
@@ -260,14 +284,3 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
jiffies + msecs_to_jiffies(timeout));
}
}
-
-void wl1271_set_default_filters(struct wl1271 *wl)
-{
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
- } else {
- wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
- }
-}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index c88e3fa1d60..86ba6b1d0cd 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -86,16 +86,18 @@
* Bits 3-5 - process_id tag (AP mode FW)
* Bits 6-7 - reserved
*/
-#define WL1271_RX_DESC_STATUS_MASK 0x07
+#define WL1271_RX_DESC_STATUS_MASK 0x03
#define WL1271_RX_DESC_SUCCESS 0x00
#define WL1271_RX_DESC_DECRYPT_FAIL 0x01
#define WL1271_RX_DESC_MIC_FAIL 0x02
#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03
-#define RX_MEM_BLOCK_MASK 0xFF
-#define RX_BUF_SIZE_MASK 0xFFF00
-#define RX_BUF_SIZE_SHIFT_DIV 6
+#define RX_MEM_BLOCK_MASK 0xFF
+#define RX_BUF_SIZE_MASK 0xFFF00
+#define RX_BUF_SIZE_SHIFT_DIV 6
+/* If set, the start of IP payload is not 4 bytes aligned */
+#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
enum {
WL12XX_RX_CLASS_UNKNOWN,
@@ -119,16 +121,12 @@ struct wl1271_rx_descriptor {
u8 snr;
__le32 timestamp;
u8 packet_class;
- union {
- u8 process_id; /* STA FW */
- u8 hlid; /* AP FW */
- } __packed;
+ u8 hlid;
u8 pad_len;
u8 reserved;
} __packed;
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
+void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
-void wl1271_set_default_filters(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index edfe01c321c..128ccb79318 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -28,11 +28,14 @@
#include "scan.h"
#include "acx.h"
#include "ps.h"
+#include "tx.h"
void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ int ret;
+ bool is_sta, is_ibss;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, scan_complete_work);
@@ -50,21 +53,35 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
- ieee80211_scan_completed(wl->hw, false);
- /* restore hardware connection monitoring template */
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- if (wl1271_ps_elp_wakeup(wl) == 0) {
- wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
- wl1271_ps_elp_sleep(wl);
- }
+ /* restore hardware connection monitoring template */
+ wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ }
+
+ /* return to ROC if needed */
+ is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
+ is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
+ (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
+ !test_bit(wl->dev_role_id, wl->roc_map)) {
+ /* restore remain on channel */
+ wl12xx_cmd_role_start_dev(wl);
+ wl12xx_roc(wl, wl->dev_role_id);
}
+ wl1271_ps_elp_sleep(wl);
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
wl12xx_queue_recovery_work(wl);
}
+ ieee80211_scan_completed(wl->hw, false);
+
out:
mutex_unlock(&wl->mutex);
@@ -83,14 +100,18 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
for (i = 0, j = 0;
i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
i++) {
-
flags = req->channels[i]->flags;
if (!test_bit(i, wl->scan.scanned_ch) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
- ((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) &&
- (req->channels[i]->band == band)) {
-
+ (req->channels[i]->band == band) &&
+ /*
+ * In passive scans, we scan all remaining
+ * channels, even if not marked as such.
+ * In active scans, we only scan channels not
+ * marked as passive.
+ */
+ (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
@@ -142,6 +163,10 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
int ret;
u16 scan_options = 0;
+ /* skip active scans if we don't have SSIDs */
+ if (!passive && wl->scan.req->n_ssids == 0)
+ return WL1271_NOTHING_TO_SCAN;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
if (!cmd || !trigger) {
@@ -149,13 +174,14 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
goto out;
}
- /* We always use high priority scans */
- scan_options = WL1271_SCAN_OPT_PRIORITY_HIGH;
-
- /* No SSIDs means that we have a forced passive scan */
- if (passive || wl->scan.req->n_ssids == 0)
+ if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
+ if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd->params.role_id = wl->role_id;
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -167,10 +193,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
}
cmd->params.tx_rate = cpu_to_le32(basic_rate);
- cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
- cmd->params.rx_filter_options =
- cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
-
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.tid_trigger = 0;
@@ -186,6 +208,8 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
+ memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+
ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
wl->scan.req->ie, wl->scan.req->ie_len,
band);
@@ -220,14 +244,17 @@ out:
void wl1271_scan_stm(struct wl1271 *wl)
{
int ret = 0;
+ enum ieee80211_band band;
+ u32 rate;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
break;
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
- ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, false,
- wl->conf.tx.basic_rate);
+ band = IEEE80211_BAND_2GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
wl1271_scan_stm(wl);
@@ -236,8 +263,9 @@ void wl1271_scan_stm(struct wl1271 *wl)
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
- ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, true,
- wl->conf.tx.basic_rate);
+ band = IEEE80211_BAND_2GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
@@ -249,8 +277,9 @@ void wl1271_scan_stm(struct wl1271 *wl)
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
- ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, false,
- wl->conf.tx.basic_rate_5);
+ band = IEEE80211_BAND_5GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
wl1271_scan_stm(wl);
@@ -259,8 +288,9 @@ void wl1271_scan_stm(struct wl1271 *wl)
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
- ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, true,
- wl->conf.tx.basic_rate_5);
+ band = IEEE80211_BAND_5GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
wl1271_scan_stm(wl);
@@ -455,6 +485,105 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
cfg->passive[2] || cfg->active[2];
}
+/* Returns the scan type to be used or a negative value on error */
+static int
+wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL;
+ struct cfg80211_match_set *sets = req->match_sets;
+ struct cfg80211_ssid *ssids = req->ssids;
+ int ret = 0, type, i, j, n_match_ssids = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+
+ /* count the match sets that contain SSIDs */
+ for (i = 0; i < req->n_match_sets; i++)
+ if (sets[i].ssid.ssid_len > 0)
+ n_match_ssids++;
+
+ /* No filter, no ssids or only bcast ssid */
+ if (!n_match_ssids &&
+ (!req->n_ssids ||
+ (req->n_ssids == 1 && req->ssids[0].ssid_len == 0))) {
+ type = SCAN_SSID_FILTER_ANY;
+ goto out;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!n_match_ssids) {
+ /* No filter, with ssids */
+ type = SCAN_SSID_FILTER_DISABLED;
+
+ for (i = 0; i < req->n_ssids; i++) {
+ cmd->ssids[cmd->n_ssids].type = (ssids[i].ssid_len) ?
+ SCAN_SSID_TYPE_HIDDEN : SCAN_SSID_TYPE_PUBLIC;
+ cmd->ssids[cmd->n_ssids].len = ssids[i].ssid_len;
+ memcpy(cmd->ssids[cmd->n_ssids].ssid, ssids[i].ssid,
+ ssids[i].ssid_len);
+ cmd->n_ssids++;
+ }
+ } else {
+ type = SCAN_SSID_FILTER_LIST;
+
+ /* Add all SSIDs from the filters */
+ for (i = 0; i < req->n_match_sets; i++) {
+ /* ignore sets without SSIDs */
+ if (!sets[i].ssid.ssid_len)
+ continue;
+
+ cmd->ssids[cmd->n_ssids].type = SCAN_SSID_TYPE_PUBLIC;
+ cmd->ssids[cmd->n_ssids].len = sets[i].ssid.ssid_len;
+ memcpy(cmd->ssids[cmd->n_ssids].ssid,
+ sets[i].ssid.ssid, sets[i].ssid.ssid_len);
+ cmd->n_ssids++;
+ }
+ if ((req->n_ssids > 1) ||
+ (req->n_ssids == 1 && req->ssids[0].ssid_len > 0)) {
+ /*
+ * Mark all the SSIDs passed in the SSID list as HIDDEN,
+ * so they're used in probe requests.
+ */
+ for (i = 0; i < req->n_ssids; i++) {
+ for (j = 0; j < cmd->n_ssids; j++)
+ if (!memcmp(req->ssids[i].ssid,
+ cmd->ssids[j].ssid,
+ req->ssids[i].ssid_len)) {
+ cmd->ssids[j].type =
+ SCAN_SSID_TYPE_HIDDEN;
+ break;
+ }
+ /* Fail if SSID isn't present in the filters */
+ if (j == req->n_ssids) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
+ sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("cmd sched scan ssid list failed");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+out:
+ if (ret < 0)
+ return ret;
+ return type;
+}
+
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
@@ -486,15 +615,14 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
cfg->intervals[i] = cpu_to_le32(req->interval);
- if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
- cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
- cfg->ssid_len = req->ssids[0].ssid_len;
- memcpy(cfg->ssid, req->ssids[0].ssid,
- req->ssids[0].ssid_len);
- } else {
- cfg->filter_type = SCAN_SSID_FILTER_ANY;
- cfg->ssid_len = 0;
- }
+ cfg->ssid_len = 0;
+ ret = wl12xx_scan_sched_scan_ssid_list(wl, req);
+ if (ret < 0)
+ goto out;
+
+ cfg->filter_type = ret;
+
+ wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) {
wl1271_error("scan channel list is empty");
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index d882e4da71b..92115156522 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -46,7 +46,10 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl);
#define WL1271_SCAN_CURRENT_TX_PWR 0
#define WL1271_SCAN_OPT_ACTIVE 0
#define WL1271_SCAN_OPT_PASSIVE 1
+#define WL1271_SCAN_OPT_TRIGGERED_SCAN 2
#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
+/* scan even if we fail to enter psm */
+#define WL1271_SCAN_OPT_FORCE 8
#define WL1271_SCAN_BAND_2_4_GHZ 0
#define WL1271_SCAN_BAND_5_GHZ 1
@@ -62,27 +65,27 @@ enum {
};
struct basic_scan_params {
- __le32 rx_config_options;
- __le32 rx_filter_options;
/* Scan option flags (WL1271_SCAN_OPT_*) */
__le16 scan_options;
+ u8 role_id;
/* Number of scan channels in the list (maximum 30) */
u8 n_ch;
/* This field indicates the number of probe requests to send
per channel for an active scan */
u8 n_probe_reqs;
- /* Rate bit field for sending the probes */
- __le32 tx_rate;
u8 tid_trigger;
u8 ssid_len;
- /* in order to align */
- u8 padding1[2];
- u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 use_ssid_list;
+
+ /* Rate bit field for sending the probes */
+ __le32 tx_rate;
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
/* Band to scan */
u8 band;
- u8 use_ssid_list;
+
u8 scan_tag;
- u8 padding2;
+ u8 padding2[2];
} __packed;
struct basic_scan_channel_params {
@@ -105,6 +108,10 @@ struct wl1271_cmd_scan {
struct basic_scan_params params;
struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
+
+ /* src mac address */
+ u8 addr[ETH_ALEN];
+ u8 padding[2];
} __packed;
struct wl1271_cmd_trigger_scan_to {
@@ -167,7 +174,7 @@ struct wl1271_cmd_sched_scan_config {
u8 filter_type;
u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
- u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 n_probe_reqs; /* Number of probes requests per channel */
@@ -184,7 +191,7 @@ struct wl1271_cmd_sched_scan_config {
} __packed;
-#define SCHED_SCAN_MAX_SSIDS 8
+#define SCHED_SCAN_MAX_SSIDS 16
enum {
SCAN_SSID_TYPE_PUBLIC = 0,
@@ -194,7 +201,7 @@ enum {
struct wl1271_ssid {
u8 type;
u8 len;
- u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
/* u8 padding[2]; */
} __packed;
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index fb1fd5af75e..516a8980723 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -412,7 +412,5 @@ module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL127X_FW_NAME);
MODULE_FIRMWARE(WL128X_FW_NAME);
-MODULE_FIRMWARE(WL127X_AP_FW_NAME);
-MODULE_FIRMWARE(WL128X_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
index f2891539287..f25d5d9212e 100644
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ b/drivers/net/wireless/wl12xx/sdio_test.c
@@ -30,6 +30,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/kthread.h>
@@ -142,14 +143,23 @@ static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0)
goto out;
+
+ /* Runtime PM might be disabled, power up the card manually */
+ ret = mmc_power_restore_host(func->card->host);
+ if (ret < 0)
+ goto out;
+
sdio_claim_host(func);
sdio_enable_func(func);
- sdio_release_host(func);
} else {
- sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
+ /* Runtime PM might be disabled, power off the card manually */
+ ret = mmc_power_save_host(func->card->host);
+ if (ret < 0)
+ goto out;
+
/* Power down the card */
ret = pm_runtime_put_sync(&func->dev);
}
@@ -193,7 +203,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
ret = request_firmware(&fw, WL128X_FW_NAME,
wl1271_wl_to_dev(wl));
else
- ret = request_firmware(&fw, WL1271_FW_NAME,
+ ret = request_firmware(&fw, WL127X_FW_NAME,
wl1271_wl_to_dev(wl));
if (ret < 0) {
@@ -433,7 +443,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
sdio_set_drvdata(func, wl_test);
-
/* power up the device */
ret = wl1271_chip_wakeup(wl);
if (ret) {
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index e0b3736d7e1..0f971867786 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -486,8 +486,6 @@ module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL127X_FW_NAME);
MODULE_FIRMWARE(WL128X_FW_NAME);
-MODULE_FIRMWARE(WL127X_AP_FW_NAME);
-MODULE_FIRMWARE(WL128X_AP_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 48fde96ce0d..bad9e29d49b 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -30,6 +30,7 @@
#include "reg.h"
#include "ps.h"
#include "tx.h"
+#include "event.h"
static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
{
@@ -37,9 +38,10 @@ static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
if (is_ap)
- ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+ ret = wl12xx_cmd_set_default_wep_key(wl, id,
+ wl->ap_bcast_hlid);
else
- ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+ ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
if (ret < 0)
return ret;
@@ -77,9 +79,9 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
+ int ret;
- hdr = (struct ieee80211_hdr *)(skb->data +
- sizeof(struct wl1271_tx_hw_descr));
+ hdr = (struct ieee80211_hdr *)skb->data;
/*
* stop bssid-based filtering before transmitting authentication
@@ -90,9 +92,19 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
if (!ieee80211_is_auth(hdr->frame_control))
return 0;
- wl1271_configure_filters(wl, FIF_OTHER_BSS);
+ if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+ goto out;
- return wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+ wl1271_debug(DEBUG_CMD, "starting device role for roaming");
+ ret = wl12xx_cmd_role_start_dev(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_roc(wl, wl->dev_role_id);
+ if (ret < 0)
+ goto out;
+out:
+ return 0;
}
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
@@ -113,25 +125,36 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
{
- bool fw_ps;
- u8 tx_blks;
+ bool fw_ps, single_sta;
+ u8 tx_pkts;
/* only regulate station links */
if (hlid < WL1271_AP_STA_HLID_START)
return;
+ if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
+ return;
+
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
- tx_blks = wl->links[hlid].allocated_blks;
+ tx_pkts = wl->links[hlid].allocated_pkts;
+ single_sta = (wl->active_sta_count == 1);
/*
* if in FW PS and there is enough data in FW we can put the link
* into high-level PS and clean out its TX queues.
+ * Make an exception if this is the only connected station. In this
+ * case FW-memory congestion is not a problem.
*/
- if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl1271_ps_link_start(wl, hlid, true);
}
-u8 wl1271_tx_get_hlid(struct sk_buff *skb)
+bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
+{
+ return wl->dummy_packet == skb;
+}
+
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
{
struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
@@ -144,14 +167,38 @@ u8 wl1271_tx_get_hlid(struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr;
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ return wl->system_hlid;
+
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_mgmt(hdr->frame_control))
- return WL1271_AP_GLOBAL_HLID;
+ return wl->ap_global_hlid;
else
- return WL1271_AP_BROADCAST_HLID;
+ return wl->ap_bcast_hlid;
}
}
+static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (wl12xx_is_dummy_packet(wl, skb))
+ return wl->system_hlid;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ return wl12xx_tx_get_hlid_ap(wl, skb);
+
+ wl1271_tx_update_filters(wl, skb);
+
+ if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+ !ieee80211_is_auth(hdr->frame_control) &&
+ !ieee80211_is_assoc_req(hdr->frame_control))
+ return wl->sta_hlid;
+ else
+ return wl->dev_hlid;
+}
+
static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
@@ -169,12 +216,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 len;
u32 total_blocks;
int id, ret = -EBUSY, ac;
- u32 spare_blocks;
-
- if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
- spare_blocks = 2;
- else
- spare_blocks = 1;
+ u32 spare_blocks = wl->tx_spare_blocks;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
@@ -188,6 +230,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
in the firmware */
len = wl12xx_calc_packet_alignment(wl, total_len);
+ /* in case of a dummy packet, use default amount of spare mem blocks */
+ if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+ spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+
total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
spare_blocks;
@@ -206,12 +252,14 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
desc->id = id;
wl->tx_blocks_available -= total_blocks;
+ wl->tx_allocated_blocks += total_blocks;
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- wl->tx_allocated_blocks[ac] += total_blocks;
+ wl->tx_allocated_pkts[ac]++;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl->links[hlid].allocated_blks += total_blocks;
+ if (wl->bss_type == BSS_TYPE_AP_BSS &&
+ hlid >= WL1271_AP_STA_HLID_START)
+ wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -225,11 +273,6 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
return ret;
}
-static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
-{
- return wl->dummy_packet == skb;
-}
-
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
u32 extra, struct ieee80211_tx_info *control,
u8 hlid)
@@ -280,9 +323,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
}
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- desc->aid = hlid;
+ desc->hlid = hlid;
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
/* if the packets are destined for AP (have a STA entry)
send them with AP rate policies, otherwise use default
basic rates */
@@ -291,18 +334,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
else
rate_idx = ACX_TX_BASIC_RATE;
} else {
- desc->hlid = hlid;
- switch (hlid) {
- case WL1271_AP_GLOBAL_HLID:
+ if (hlid == wl->ap_global_hlid)
rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
- break;
- case WL1271_AP_BROADCAST_HLID:
+ else if (hlid == wl->ap_bcast_hlid)
rate_idx = ACX_TX_AP_MODE_BCST_RATE;
- break;
- default:
+ else
rate_idx = ac;
- break;
- }
}
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -376,10 +413,11 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
}
}
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl1271_tx_get_hlid(skb);
- else
- hlid = TX_HW_DEFAULT_AID;
+ hlid = wl1271_tx_get_hlid(wl, skb);
+ if (hlid == WL12XX_INVALID_LINK_ID) {
+ wl1271_error("invalid hlid. dropping skb 0x%p", skb);
+ return -EINVAL;
+ }
ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
if (ret < 0)
@@ -390,8 +428,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
if (wl->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
wl1271_tx_regulate_link(wl, hlid);
- } else {
- wl1271_tx_update_filters(wl, skb);
}
/*
@@ -414,20 +450,20 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
return total_len;
}
-u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
+ enum ieee80211_band rate_band)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
int bit;
- band = wl->hw->wiphy->bands[wl->band];
+ band = wl->hw->wiphy->bands[rate_band];
for (bit = 0; bit < band->n_bitrates; bit++) {
if (rate_set & 0x1)
enabled_rates |= band->bitrates[bit].hw_value;
rate_set >>= 1;
}
-#ifdef CONFIG_WL12XX_HT
/* MCS rates indication are on bits 16 - 23 */
rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
@@ -436,7 +472,6 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
rate_set >>= 1;
}
-#endif
return enabled_rates;
}
@@ -462,20 +497,24 @@ void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
struct sk_buff_head *queues)
{
- int i, q = -1;
- u32 min_blks = 0xffffffff;
+ int i, q = -1, ac;
+ u32 min_pkts = 0xffffffff;
/*
* Find a non-empty ac where:
* 1. There are packets to transmit
* 2. The FW has the least allocated blocks
+ *
+ * We prioritize the ACs according to VO>VI>BE>BK
*/
- for (i = 0; i < NUM_TX_QUEUES; i++)
- if (!skb_queue_empty(&queues[i]) &&
- (wl->tx_allocated_blocks[i] < min_blks)) {
- q = i;
- min_blks = wl->tx_allocated_blocks[q];
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ ac = wl1271_tx_get_queue(i);
+ if (!skb_queue_empty(&queues[ac]) &&
+ (wl->tx_allocated_pkts[ac] < min_pkts)) {
+ q = ac;
+ min_pkts = wl->tx_allocated_pkts[q];
}
+ }
if (q == -1)
return NULL;
@@ -579,7 +618,7 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
} else if (wl->bss_type == BSS_TYPE_AP_BSS) {
- u8 hlid = wl1271_tx_get_hlid(skb);
+ u8 hlid = wl1271_tx_get_hlid(wl, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
@@ -826,10 +865,14 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
total[i] = 0;
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
- info = IEEE80211_SKB_CB(skb);
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- ieee80211_tx_status_ni(wl->hw, skb);
+
+ if (!wl12xx_is_dummy_packet(wl, skb)) {
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status_ni(wl->hw, skb);
+ }
+
total[i]++;
}
}
@@ -852,9 +895,10 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
/* TX failure */
if (wl->bss_type == BSS_TYPE_AP_BSS) {
for (i = 0; i < AP_MAX_LINKS; i++) {
+ wl1271_free_sta(wl, i);
wl1271_tx_reset_link_queues(wl, i);
- wl->links[i].allocated_blks = 0;
- wl->links[i].prev_freed_blks = 0;
+ wl->links[i].allocated_pkts = 0;
+ wl->links[i].prev_freed_pkts = 0;
}
wl->last_tx_hlid = 0;
@@ -871,10 +915,14 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
ieee80211_tx_status_ni(wl->hw, skb);
}
}
- wl->tx_queue_count[i] = 0;
}
+
+ wl->ba_rx_bitmap = 0;
}
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wl->tx_queue_count[i] = 0;
+
wl->stopped_queues_map = 0;
/*
@@ -942,20 +990,10 @@ void wl1271_tx_flush(struct wl1271 *wl)
wl1271_warning("Unable to flush all TX buffers, timed out.");
}
-u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
{
- int i;
- u32 rate = 0;
-
- if (!wl->basic_rate_set) {
- WARN_ON(1);
- wl->basic_rate_set = wl->conf.tx.basic_rate;
- }
-
- for (i = 0; !rate; i++) {
- if ((wl->basic_rate_set >> i) & 0x1)
- rate = 1 << i;
- }
+ if (WARN_ON(!rate_set))
+ return 0;
- return rate;
+ return BIT(__ffs(rate_set));
}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 5d719b5a3d1..dc4f09adf08 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -25,13 +25,11 @@
#ifndef __TX_H__
#define __TX_H__
+#define TX_HW_BLOCK_SPARE_DEFAULT 1
#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
-/* The chipset reference driver states, that the "aid" value 1
- * is for infra-BSS, but is still always used */
-#define TX_HW_DEFAULT_AID 1
#define TX_HW_ATTR_SAVE_RETRIES BIT(0)
#define TX_HW_ATTR_HEADER_PAD BIT(1)
@@ -116,12 +114,8 @@ struct wl1271_tx_hw_descr {
u8 id;
/* The packet TID value (as User-Priority) */
u8 tid;
- union {
- /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
- u8 aid;
- /* AP - host link ID (HLID) */
- u8 hlid;
- } __packed;
+ /* host link ID (HLID) */
+ u8 hlid;
u8 reserved;
} __packed;
@@ -133,7 +127,8 @@ enum wl1271_tx_hw_res_status {
TX_TIMEOUT = 4,
TX_KEY_NOT_FOUND = 5,
TX_PEER_NOT_FOUND = 6,
- TX_SESSION_MISMATCH = 7
+ TX_SESSION_MISMATCH = 7,
+ TX_LINK_NOT_VALID = 8,
};
struct wl1271_tx_hw_res_descr {
@@ -214,10 +209,15 @@ void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
-u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
-u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
-u8 wl1271_tx_get_hlid(struct sk_buff *skb);
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
+ enum ieee80211_band rate_band);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
+bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+
+/* from main.c */
+void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1a8751eb814..1ec90fc7505 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -112,28 +112,8 @@ extern u32 wl12xx_debug_level;
true); \
} while (0)
-#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
- CFG_BSSID_FILTER_EN | \
- CFG_MC_FILTER_EN)
-
-#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
- CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
- CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
- CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-
-#define WL1271_DEFAULT_AP_RX_CONFIG 0
-
-#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
- CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
- CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
- CFG_RX_ASSOC_EN)
-
-
-
-#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
-#define WL128X_FW_NAME "ti-connectivity/wl128x-fw.bin"
-#define WL127X_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
-#define WL128X_AP_FW_NAME "ti-connectivity/wl128x-fw-ap.bin"
+#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
+#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
/*
* wl127x and wl128x are using the same NVS file name. However, the
@@ -157,25 +137,34 @@ extern u32 wl12xx_debug_level;
#define WL1271_DEFAULT_BEACON_INT 100
#define WL1271_DEFAULT_DTIM_PERIOD 1
-#define WL1271_AP_GLOBAL_HLID 0
-#define WL1271_AP_BROADCAST_HLID 1
-#define WL1271_AP_STA_HLID_START 2
+#define WL12XX_MAX_ROLES 4
+#define WL12XX_MAX_LINKS 12
+#define WL12XX_INVALID_ROLE_ID 0xff
+#define WL12XX_INVALID_LINK_ID 0xff
+
+/* Defined by FW as 0. Will not be freed or allocated. */
+#define WL12XX_SYSTEM_HLID 0
+
+/*
+ * TODO: we currently don't support multirole. remove
+ * this constant from the code when we do.
+ */
+#define WL1271_AP_STA_HLID_START 3
/*
- * When in AP-mode, we allow (at least) this number of mem-blocks
+ * When in AP-mode, we allow (at least) this number of packets
* to be transmitted to FW for a STA in PS-mode. Only when packets are
* present in the FW buffers it will wake the sleeping STA. We want to put
* enough packets for the driver to transmit all of its buffered data before
- * the STA goes to sleep again. But we don't want to take too much mem-blocks
+ * the STA goes to sleep again. But we don't want to take too much memory
* as it might hurt the throughput of active STAs.
- * The number of blocks (18) is enough for 2 large packets.
*/
-#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
+#define WL1271_PS_STA_MAX_PACKETS 2
#define WL1271_AP_BSS_INDEX 0
#define WL1271_AP_DEF_BEACON_EXP 20
-#define ACX_TX_DESCRIPTORS 32
+#define ACX_TX_DESCRIPTORS 16
#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -245,28 +234,24 @@ struct wl1271_stats {
#define NUM_TX_QUEUES 4
#define NUM_RX_PKT_DESC 8
-#define AP_MAX_STATIONS 5
+#define AP_MAX_STATIONS 8
-/* Broadcast and Global links + links to stations */
-#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
+/* Broadcast and Global links + system link + links to stations */
+/*
+ * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
+ * the places that use this.
+ */
+#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-/* FW status registers common for AP/STA */
-struct wl1271_fw_common_status {
+/* FW status registers */
+struct wl12xx_fw_status {
__le32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
u8 reserved;
u8 tx_results_counter;
__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
- __le32 tx_released_blks[NUM_TX_QUEUES];
__le32 fw_localtime;
-} __packed;
-
-/* FW status registers for AP */
-struct wl1271_fw_ap_status {
- struct wl1271_fw_common_status common;
-
- /* Next fields valid only in AP FW */
/*
* A bitmap (where each bit represents a single HLID)
@@ -274,29 +259,29 @@ struct wl1271_fw_ap_status {
*/
__le32 link_ps_bitmap;
- /* Number of freed MBs per HLID */
- u8 tx_lnk_free_blks[AP_MAX_LINKS];
- u8 padding_1[1];
-} __packed;
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
-/* FW status registers for STA */
-struct wl1271_fw_sta_status {
- struct wl1271_fw_common_status common;
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
- u8 tx_total;
- u8 reserved1;
- __le16 reserved2;
- __le32 log_start_addr;
-} __packed;
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
-struct wl1271_fw_full_status {
- union {
- struct wl1271_fw_common_status common;
- struct wl1271_fw_sta_status sta;
- struct wl1271_fw_ap_status ap;
- };
-} __packed;
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+ u8 padding_1[3];
+ __le32 log_start_addr;
+} __packed;
struct wl1271_rx_mem_pool_addr {
u32 addr;
@@ -309,7 +294,7 @@ struct wl1271_scan {
unsigned long scanned_ch[BITS_TO_LONGS(WL1271_MAX_CHANNELS)];
bool failed;
u8 state;
- u8 ssid[IW_ESSID_MAX_SIZE+1];
+ u8 ssid[IEEE80211_MAX_SSID_LEN+1];
size_t ssid_len;
};
@@ -342,7 +327,7 @@ struct wl1271_ap_key {
enum wl12xx_flags {
WL1271_FLAG_STA_ASSOCIATED,
- WL1271_FLAG_JOINED,
+ WL1271_FLAG_IBSS_JOINED,
WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
@@ -363,17 +348,21 @@ enum wl12xx_flags {
WL1271_FLAG_SOFT_GEMINI,
WL1271_FLAG_RX_STREAMING_STARTED,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ WL1271_FLAG_CS_PROGRESS,
};
struct wl1271_link {
/* AP-mode - TX queue per AC in link */
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
- /* accounting for allocated / available TX blocks in FW */
- u8 allocated_blks;
- u8 prev_freed_blks;
+ /* accounting for allocated / freed packets in FW */
+ u8 allocated_pkts;
+ u8 prev_freed_pkts;
u8 addr[ETH_ALEN];
+
+ /* bitmap of TIDs where RX BA sessions are active for this link */
+ u8 ba_bitmap;
};
struct wl1271 {
@@ -405,7 +394,6 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
- u8 fw_bss_type;
void *nvs;
size_t nvs_len;
@@ -415,18 +403,37 @@ struct wl1271 {
u8 mac_addr[ETH_ALEN];
u8 bss_type;
u8 set_bss_type;
- u8 ssid[IW_ESSID_MAX_SIZE + 1];
+ u8 p2p; /* we are using p2p role */
+ u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 ssid_len;
int channel;
+ u8 role_id;
+ u8 dev_role_id;
+ u8 system_hlid;
+ u8 sta_hlid;
+ u8 dev_hlid;
+ u8 ap_global_hlid;
+ u8 ap_bcast_hlid;
+
+ unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+ unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
struct wl1271_acx_mem_map *target_mem_map;
/* Accounting for allocated / available TX blocks on HW */
- u32 tx_blocks_freed[NUM_TX_QUEUES];
+ u32 tx_blocks_freed;
u32 tx_blocks_available;
- u32 tx_allocated_blocks[NUM_TX_QUEUES];
+ u32 tx_allocated_blocks;
u32 tx_results_count;
+ /* amount of spare TX blocks to use */
+ u32 tx_spare_blocks;
+
+ /* Accounting for allocated / available Tx packets in HW */
+ u32 tx_pkts_freed[NUM_TX_QUEUES];
+ u32 tx_allocated_pkts[NUM_TX_QUEUES];
+
/* Transmitted TX packets counter for chipset interface */
u32 tx_packets_count;
@@ -520,6 +527,7 @@ struct wl1271 {
u32 basic_rate_set;
u32 basic_rate;
u32 rate_set;
+ u32 bitrate_masks[IEEE80211_NUM_BANDS];
/* The current band */
enum ieee80211_band band;
@@ -535,10 +543,6 @@ struct wl1271 {
struct work_struct rx_streaming_disable_work;
struct timer_list rx_streaming_timer;
- unsigned int filters;
- unsigned int rx_config;
- unsigned int rx_filter;
-
struct completion *elp_compl;
struct completion *ps_compl;
struct delayed_work elp_work;
@@ -562,7 +566,7 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl1271_fw_full_status *fw_status;
+ struct wl12xx_fw_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
struct ieee80211_vif *vif;
@@ -622,6 +626,12 @@ struct wl1271 {
/* Platform limitations */
unsigned int platform_quirks;
+
+ /* number of currently active RX BA sessions */
+ int ba_rx_session_count;
+
+ /* AP-mode - number of currently connected stations */
+ int active_sta_count;
};
struct wl1271_station {
@@ -659,21 +669,9 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
-/*
- * Older firmwares use 2 spare TX blocks
- * (for STA < 6.1.3.50.58 or for AP < 6.2.0.0.47)
- */
-#define WL12XX_QUIRK_USE_2_SPARE_BLOCKS BIT(1)
-
/* WL128X requires aggregated packets to be aligned to the SDIO block size */
#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
-/*
- * WL127X AP mode requires Low Power DRPw (LPD) enable to reduce power
- * consumption
- */
-#define WL12XX_QUIRK_LPD_MODE BIT(3)
-
/* Older firmwares did not implement the FW logger over bus feature */
#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 18fe542360f..f7971d3b089 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -77,7 +77,7 @@ struct wl12xx_ie_header {
struct wl12xx_ie_ssid {
struct wl12xx_ie_header header;
- char ssid[IW_ESSID_MAX_SIZE];
+ char ssid[IEEE80211_MAX_SSID_LEN];
} __packed;
struct wl12xx_ie_rates {
@@ -105,18 +105,6 @@ struct wl12xx_ie_country {
/* Templates */
-struct wl12xx_beacon_template {
- struct ieee80211_header header;
- __le32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- struct wl12xx_ie_ssid ssid;
- struct wl12xx_ie_rates rates;
- struct wl12xx_ie_rates ext_rates;
- struct wl12xx_ie_ds_params ds_params;
- struct wl12xx_ie_country country;
-} __packed;
-
struct wl12xx_null_data_template {
struct ieee80211_header header;
} __packed;
@@ -146,19 +134,6 @@ struct wl12xx_arp_rsp_template {
__be32 target_ip;
} __packed;
-
-struct wl12xx_probe_resp_template {
- struct ieee80211_header header;
- __le32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- struct wl12xx_ie_ssid ssid;
- struct wl12xx_ie_rates rates;
- struct wl12xx_ie_rates ext_rates;
- struct wl12xx_ie_ds_params ds_params;
- struct wl12xx_ie_country country;
-} __packed;
-
struct wl12xx_disconn_template {
struct ieee80211_header header;
__le16 disconn_reason;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 6bc7c92fbff..98fbf54f600 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1781,7 +1781,7 @@ static int wl3501_get_encode(struct net_device *dev,
keys, len_keys);
if (rc)
goto out;
- tocopy = min_t(u8, len_keys, wrqu->encoding.length);
+ tocopy = min_t(u16, len_keys, wrqu->encoding.length);
tocopy = min_t(u8, tocopy, 100);
wrqu->encoding.length = tocopy;
memcpy(extra, keys, tocopy);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 415eec401e2..8efa2f2d957 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1722,7 +1722,7 @@ static const struct net_device_ops zd1201_netdev_ops = {
.ndo_stop = zd1201_net_stop,
.ndo_start_xmit = zd1201_hard_start_xmit,
.ndo_tx_timeout = zd1201_tx_timeout,
- .ndo_set_multicast_list = zd1201_set_multicast,
+ .ndo_set_rx_mode = zd1201_set_multicast,
.ndo_set_mac_address = zd1201_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cabfae1e70b..0a70149df3f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1332,7 +1332,7 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
-static u64 zd_op_get_tsf(struct ieee80211_hw *hw)
+static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
return zd_chip_get_tsf(&mac->chip);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fd00f25d985..d5508957200 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -60,6 +60,9 @@ struct netbk_rx_meta {
#define MAX_PENDING_REQS 256
+/* Discriminate from any valid pending_idx value. */
+#define INVALID_PENDING_IDX 0xFFFF
+
#define MAX_BUFFER_OFFSET PAGE_SIZE
/* extra field used in struct page */
@@ -155,13 +158,13 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 flags);
static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
- unsigned int idx)
+ u16 idx)
{
return page_to_pfn(netbk->mmap_pages[idx]);
}
static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
- unsigned int idx)
+ u16 idx)
{
return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
}
@@ -215,6 +218,16 @@ static int get_page_ext(struct page *pg,
sizeof(struct iphdr) + MAX_IPOPTLEN + \
sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+static u16 frag_get_pending_idx(skb_frag_t *frag)
+{
+ return (u16)frag->page_offset;
+}
+
+static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
+{
+ frag->page_offset = pending_idx;
+}
+
static inline pending_ring_idx_t pending_index(unsigned i)
{
return i & (MAX_PENDING_REQS-1);
@@ -321,7 +334,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_shinfo(skb)->frags[i].size;
+ unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long bytes;
while (size > 0) {
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
@@ -512,8 +525,8 @@ static int netbk_gop_skb(struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
@@ -890,7 +903,7 @@ static int netbk_count_requests(struct xenvif *vif,
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
struct sk_buff *skb,
- unsigned long pending_idx)
+ u16 pending_idx)
{
struct page *page;
page = alloc_page(GFP_KERNEL|__GFP_COLD);
@@ -909,11 +922,11 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- unsigned long pending_idx = *((u16 *)skb->data);
+ u16 pending_idx = *((u16 *)skb->data);
int i, start;
/* Skip first skb fragment if it is on same page as header fragment. */
- start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < shinfo->nr_frags; i++, txp++) {
struct page *page;
@@ -945,7 +958,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
xenvif_get(vif);
pending_tx_info[pending_idx].vif = vif;
- frags[i].page = (void *)pending_idx;
+ frag_set_pending_idx(&frags[i], pending_idx);
}
return gop;
@@ -956,7 +969,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
- int pending_idx = *((u16 *)skb->data);
+ u16 pending_idx = *((u16 *)skb->data);
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
struct xenvif *vif = pending_tx_info[pending_idx].vif;
struct xen_netif_tx_request *txp;
@@ -976,13 +989,13 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
}
/* Skip first skb fragment if it is on same page as header fragment. */
- start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
pending_ring_idx_t index;
- pending_idx = (unsigned long)shinfo->frags[i].page;
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = (++gop)->status;
@@ -1008,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = (unsigned long)shinfo->frags[i].page;
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1029,15 +1042,14 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
- unsigned long pending_idx;
+ struct page *page;
+ u16 pending_idx;
- pending_idx = (unsigned long)frag->page;
+ pending_idx = frag_get_pending_idx(frag);
txp = &netbk->pending_tx_info[pending_idx].req;
- frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
- frag->size = txp->size;
- frag->page_offset = txp->offset;
-
+ page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
@@ -1349,11 +1361,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->frags[0].page =
- (void *)(unsigned long)pending_idx;
+ frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+ pending_idx);
} else {
- /* Discriminate from any valid pending_idx value. */
- skb_shinfo(skb)->frags[0].page = (void *)~0UL;
+ frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+ INVALID_PENDING_IDX);
}
__skb_queue_tail(&netbk->tx_queue, skb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d7c8a98daff..226faab2360 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -275,7 +275,7 @@ no_skb:
break;
}
- skb_shinfo(skb)->frags[0].page = page;
+ __skb_fill_page_desc(skb, 0, page, 0, 0);
skb_shinfo(skb)->nr_frags = 1;
__skb_queue_tail(&np->rx_batch, skb);
}
@@ -309,8 +309,8 @@ no_skb:
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
- pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
- vaddr = page_address(skb_shinfo(skb)->frags[0].page);
+ pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+ vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
req = RING_GET_REQUEST(&np->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
@@ -461,13 +461,13 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
- mfn = pfn_to_mfn(page_to_pfn(frag->page));
+ mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = frag->page_offset;
- tx->size = frag->size;
+ tx->size = skb_frag_size(frag);
tx->flags = 0;
}
@@ -762,23 +762,22 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
RING_IDX cons = np->rx.rsp_cons;
- skb_frag_t *frag = shinfo->frags + nr_frags;
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx =
RING_GET_RESPONSE(&np->rx, ++cons);
+ skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
- frag->page = skb_shinfo(nskb)->frags[0].page;
- frag->page_offset = rx->offset;
- frag->size = rx->status;
+ __skb_fill_page_desc(skb, nr_frags,
+ skb_frag_page(nfrag),
+ rx->offset, rx->status);
skb->data_len += rx->status;
skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
- frag++;
nr_frags++;
}
@@ -873,7 +872,7 @@ static int handle_incoming_queue(struct net_device *dev,
memcpy(skb->data, vaddr + offset,
skb_headlen(skb));
- if (page != skb_shinfo(skb)->frags[0].page)
+ if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
__free_page(page);
/* Ethernet work: Delayed to here as it peeks the header. */
@@ -954,7 +953,8 @@ err:
}
}
- NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
+ NETFRONT_SKB_CB(skb)->page =
+ skb_frag_page(&skb_shinfo(skb)->frags[0]);
NETFRONT_SKB_CB(skb)->offset = rx->offset;
len = rx->status;
@@ -965,10 +965,10 @@ err:
if (rx->status > len) {
skb_shinfo(skb)->frags[0].page_offset =
rx->offset + len;
- skb_shinfo(skb)->frags[0].size = rx->status - len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
skb->data_len = rx->status - len;
} else {
- skb_shinfo(skb)->frags[0].page = NULL;
+ __skb_fill_page_desc(skb, 0, NULL, 0, 0);
skb_shinfo(skb)->nr_frags = 0;
}
@@ -1143,7 +1143,8 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Remap the page. */
- struct page *page = skb_shinfo(skb)->frags[0].page;
+ const struct page *page =
+ skb_frag_page(&skb_shinfo(skb)->frags[0]);
unsigned long pfn = page_to_pfn(page);
void *vaddr = page_address(page);
@@ -1650,6 +1651,8 @@ static int xennet_connect(struct net_device *dev)
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ skb_frag_t *frag;
+ const struct page *page;
if (!np->rx_skbs[i])
continue;
@@ -1657,10 +1660,11 @@ static int xennet_connect(struct net_device *dev)
ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
req = RING_GET_REQUEST(&np->rx, requeue_idx);
+ frag = &skb_shinfo(skb)->frags[0];
+ page = skb_frag_page(frag);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id,
- pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
- frags->page)),
+ pfn_to_mfn(page_to_pfn(page)),
0);
req->gref = ref;
req->id = requeue_idx;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 2acff4307ca..5af959274d4 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -27,4 +27,15 @@ config NFC_PN533
Say Y here to compile support for PN533 devices into the
kernel or say M to compile it as module (pn533).
+config NFC_WILINK
+ tristate "Texas Instruments NFC WiLink driver"
+ depends on TI_ST && NFC_NCI
+ help
+ This enables the NFC driver for Texas Instrument's BT/FM/GPS/NFC
+ combo devices. This makes use of shared transport line discipline
+ core driver to communicate with the NFC core of the combo chip.
+
+ Say Y here to compile support for Texas Instrument's NFC WiLink driver
+ into the kernel or say M to compile it as module.
+
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 8ef446d2c1b..ab99e8572f0 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_PN544_NFC) += pn544.o
obj-$(CONFIG_NFC_PN533) += pn533.o
+obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
new file mode 100644
index 00000000000..5b0f1ff8036
--- /dev/null
+++ b/drivers/nfc/nfcwilink.c
@@ -0,0 +1,342 @@
+/*
+ * Texas Instrument's NFC Driver For Shared Transport.
+ *
+ * NFC Driver acts as interface between NCI core and
+ * TI Shared Transport Layer.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on btwilink.c, which was written
+ * by Raja Mani and Pavan Savoy.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/ti_wilink_st.h>
+
+#define NFCWILINK_CHNL 12
+#define NFCWILINK_OPCODE 7
+#define NFCWILINK_MAX_FRAME_SIZE 300
+#define NFCWILINK_HDR_LEN 4
+#define NFCWILINK_OFFSET_LEN_IN_HDR 1
+#define NFCWILINK_LEN_SIZE 2
+#define NFCWILINK_REGISTER_TIMEOUT 8000 /* 8 sec */
+
+struct nfcwilink_hdr {
+ u8 chnl;
+ u8 opcode;
+ u16 len;
+} __packed;
+
+struct nfcwilink {
+ struct platform_device *pdev;
+ struct nci_dev *ndev;
+ unsigned long flags;
+
+ char st_register_cb_status;
+ long (*st_write) (struct sk_buff *);
+ struct completion st_register_completed;
+};
+
+/* NFCWILINK driver flags */
+enum {
+ NFCWILINK_RUNNING,
+};
+
+/* Called by ST when registration is complete */
+static void nfcwilink_register_complete(void *priv_data, char data)
+{
+ struct nfcwilink *drv = priv_data;
+
+ nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
+
+ /* store ST registration status */
+ drv->st_register_cb_status = data;
+
+ /* complete the wait in nfc_st_open() */
+ complete(&drv->st_register_completed);
+}
+
+/* Called by ST when receive data is available */
+static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
+{
+ struct nfcwilink *drv = priv_data;
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+
+ if (!skb)
+ return -EFAULT;
+
+ if (!drv) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ /* strip the ST header
+ (apart for the chnl byte, which is not received in the hdr) */
+ skb_pull(skb, (NFCWILINK_HDR_LEN-1));
+
+ skb->dev = (void *) drv->ndev;
+
+ /* Forward skb to NCI core layer */
+ rc = nci_recv_frame(skb);
+ if (rc < 0) {
+ nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* protocol structure registered with ST */
+static struct st_proto_s nfcwilink_proto = {
+ .chnl_id = NFCWILINK_CHNL,
+ .max_frame_size = NFCWILINK_MAX_FRAME_SIZE,
+ .hdr_len = (NFCWILINK_HDR_LEN-1), /* not including chnl byte */
+ .offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR,
+ .len_size = NFCWILINK_LEN_SIZE,
+ .reserve = 0,
+ .recv = nfcwilink_receive,
+ .reg_complete_cb = nfcwilink_register_complete,
+ .write = NULL,
+};
+
+static int nfcwilink_open(struct nci_dev *ndev)
+{
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ unsigned long comp_ret;
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "open entry");
+
+ if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
+ rc = -EBUSY;
+ goto exit;
+ }
+
+ nfcwilink_proto.priv_data = drv;
+
+ init_completion(&drv->st_register_completed);
+ drv->st_register_cb_status = -EINPROGRESS;
+
+ rc = st_register(&nfcwilink_proto);
+ if (rc < 0) {
+ if (rc == -EINPROGRESS) {
+ comp_ret = wait_for_completion_timeout(
+ &drv->st_register_completed,
+ msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
+
+ nfc_dev_dbg(&drv->pdev->dev,
+ "wait_for_completion_timeout returned %ld",
+ comp_ret);
+
+ if (comp_ret == 0) {
+ /* timeout */
+ rc = -ETIMEDOUT;
+ goto clear_exit;
+ } else if (drv->st_register_cb_status != 0) {
+ rc = drv->st_register_cb_status;
+ nfc_dev_err(&drv->pdev->dev,
+ "st_register_cb failed %d", rc);
+ goto clear_exit;
+ }
+ } else {
+ nfc_dev_err(&drv->pdev->dev,
+ "st_register failed %d", rc);
+ goto clear_exit;
+ }
+ }
+
+ /* st_register MUST fill the write callback */
+ BUG_ON(nfcwilink_proto.write == NULL);
+ drv->st_write = nfcwilink_proto.write;
+
+ goto exit;
+
+clear_exit:
+ clear_bit(NFCWILINK_RUNNING, &drv->flags);
+
+exit:
+ return rc;
+}
+
+static int nfcwilink_close(struct nci_dev *ndev)
+{
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "close entry");
+
+ if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
+ return 0;
+
+ rc = st_unregister(&nfcwilink_proto);
+ if (rc)
+ nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+
+ drv->st_write = NULL;
+
+ return rc;
+}
+
+static int nfcwilink_send(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *)skb->dev;
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
+ long len;
+
+ nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+
+ if (!test_bit(NFCWILINK_RUNNING, &drv->flags))
+ return -EBUSY;
+
+ /* add the ST hdr to the start of the buffer */
+ hdr.len = skb->len;
+ memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN);
+
+ /* Insert skb to shared transport layer's transmit queue.
+ * Freeing skb memory is taken care in shared transport layer,
+ * so don't free skb memory here.
+ */
+ len = drv->st_write(skb);
+ if (len < 0) {
+ kfree_skb(skb);
+ nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static struct nci_ops nfcwilink_ops = {
+ .open = nfcwilink_open,
+ .close = nfcwilink_close,
+ .send = nfcwilink_send,
+};
+
+static int nfcwilink_probe(struct platform_device *pdev)
+{
+ static struct nfcwilink *drv;
+ int rc;
+ u32 protocols;
+
+ nfc_dev_dbg(&pdev->dev, "probe entry");
+
+ drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL);
+ if (!drv) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ drv->pdev = pdev;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ drv->ndev = nci_allocate_device(&nfcwilink_ops,
+ protocols,
+ NFCWILINK_HDR_LEN,
+ 0);
+ if (!drv->ndev) {
+ nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+ rc = -ENOMEM;
+ goto free_exit;
+ }
+
+ nci_set_parent_dev(drv->ndev, &pdev->dev);
+ nci_set_drvdata(drv->ndev, drv);
+
+ rc = nci_register_device(drv->ndev);
+ if (rc < 0) {
+ nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+ goto free_dev_exit;
+ }
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ goto exit;
+
+free_dev_exit:
+ nci_free_device(drv->ndev);
+
+free_exit:
+ kfree(drv);
+
+exit:
+ return rc;
+}
+
+static int nfcwilink_remove(struct platform_device *pdev)
+{
+ struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
+ struct nci_dev *ndev;
+
+ nfc_dev_dbg(&pdev->dev, "remove entry");
+
+ if (!drv)
+ return -EFAULT;
+
+ ndev = drv->ndev;
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+
+ kfree(drv);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nfcwilink_driver = {
+ .probe = nfcwilink_probe,
+ .remove = nfcwilink_remove,
+ .driver = {
+ .name = "nfcwilink",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ------- Module Init/Exit interfaces ------ */
+static int __init nfcwilink_init(void)
+{
+ printk(KERN_INFO "NFC Driver for TI WiLink");
+
+ return platform_driver_register(&nfcwilink_driver);
+}
+
+static void __exit nfcwilink_exit(void)
+{
+ platform_driver_unregister(&nfcwilink_driver);
+}
+
+module_init(nfcwilink_init);
+module_exit(nfcwilink_exit);
+
+/* ------ Module Info ------ */
+
+MODULE_AUTHOR("Ilan Elias <ilane@ti.com>");
+MODULE_DESCRIPTION("NFC Driver for TI Shared Transport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index c77e0543e50..7bcb1febef0 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -28,7 +28,7 @@
#include <linux/usb.h>
#include <linux/nfc.h>
#include <linux/netdevice.h>
-#include <net/nfc.h>
+#include <net/nfc/nfc.h>
#define VERSION "0.1"
@@ -1246,7 +1246,6 @@ static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
{
int payload_len = skb->len;
struct pn533_frame *out_frame;
- struct sk_buff *discarded;
u8 tg;
nfc_dev_dbg(&dev->interface->dev, "%s - Sending %d bytes", __func__,
@@ -1260,18 +1259,6 @@ static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
return -ENOSYS;
}
- /* Reserving header space */
- if (skb_cow_head(skb, PN533_CMD_DATAEXCH_HEAD_LEN)) {
- nfc_dev_err(&dev->interface->dev, "Error to add header data");
- return -ENOMEM;
- }
-
- /* Reserving tail space, see pn533_tx_frame_finish */
- if (skb_cow_data(skb, PN533_FRAME_TAIL_SIZE, &discarded) < 0) {
- nfc_dev_err(&dev->interface->dev, "Error to add tail data");
- return -ENOMEM;
- }
-
skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
out_frame = (struct pn533_frame *) skb->data;
@@ -1445,6 +1432,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
}
struct nfc_ops pn533_nfc_ops = {
+ .dev_up = NULL,
+ .dev_down = NULL,
.start_poll = pn533_start_poll,
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
@@ -1536,7 +1525,9 @@ static int pn533_probe(struct usb_interface *interface,
| NFC_PROTO_ISO14443_MASK
| NFC_PROTO_NFC_DEP_MASK;
- dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols);
+ dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+ PN533_CMD_DATAEXCH_HEAD_LEN,
+ PN533_FRAME_TAIL_SIZE);
if (!dev->nfc_dev)
goto kill_tasklet;
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index dd87e86048b..c0cc4e7ff02 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -82,10 +82,10 @@ int alloc_event_buffer(void)
{
unsigned long flags;
- spin_lock_irqsave(&oprofilefs_lock, flags);
+ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
buffer_size = oprofile_buffer_size;
buffer_watershed = oprofile_buffer_watershed;
- spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
if (buffer_watershed >= buffer_size)
return -EINVAL;
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index 94796f39bc4..da14432806c 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -160,9 +160,9 @@ static int oprofile_perf_create_files(struct super_block *sb, struct dentry *roo
static int oprofile_perf_setup(void)
{
- spin_lock(&oprofilefs_lock);
+ raw_spin_lock(&oprofilefs_lock);
op_perf_setup();
- spin_unlock(&oprofilefs_lock);
+ raw_spin_unlock(&oprofilefs_lock);
return 0;
}
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index e9ff6f7770b..d0de6cc2d7a 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -21,7 +21,7 @@
#define OPROFILEFS_MAGIC 0x6f70726f
-DEFINE_SPINLOCK(oprofilefs_lock);
+DEFINE_RAW_SPINLOCK(oprofilefs_lock);
static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
{
@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
if (copy_from_user(tmpbuf, buf, count))
return -EFAULT;
- spin_lock_irqsave(&oprofilefs_lock, flags);
+ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
- spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
return 0;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 57a6d19eba4..a6f762188bc 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -668,7 +668,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
* @dev: instance of PCI owned by the driver that's asking
* @mask: number of address bits this PCI device can handle
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static int sba_dma_supported( struct device *dev, u64 mask)
{
@@ -680,7 +680,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return(0);
}
- /* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit
+ /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
* first, then fall back to 32-bit if that fails.
* We are just "encouraging" 32-bit DMA masks here since we can
* never allow IOMMU bypass unless we add special support for ZX1.
@@ -706,7 +706,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
* @size: number of bytes to map in driver buffer.
* @direction: R/W or both.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static dma_addr_t
sba_map_single(struct device *dev, void *addr, size_t size,
@@ -785,7 +785,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
* @size: number of bytes mapped in driver buffer.
* @direction: R/W or both.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void
sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
@@ -861,7 +861,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void *sba_alloc_consistent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
@@ -892,7 +892,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void
sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
@@ -927,7 +927,7 @@ int dump_run_sg = 0;
* @nents: number of entries in list
* @direction: R/W or both.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
@@ -1011,7 +1011,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
* @nents: number of entries in list
* @direction: R/W or both.
*
- * See Documentation/PCI/PCI-DMA-mapping.txt
+ * See Documentation/DMA-API-HOWTO.txt
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index d1cdb9449f8..d0b597b5039 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2595,14 +2595,17 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
break;
case 0x6:
printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
+ release_region(inta_addr[i], 32);
return 0;
case 0x8:
printk(KERN_INFO "parport_pc: ITE8874 found (2S)\n");
+ release_region(inta_addr[i], 32);
return 0;
default:
printk(KERN_INFO "parport_pc: unknown ITE887x\n");
printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
"output to Rich.Liu@ite.com.tw\n");
+ release_region(inta_addr[i], 32);
return 0;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1196f61a4ab..b23856aaf6e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2788,7 +2788,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
-#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
+#ifdef CONFIG_DMAR_TABLE
#define VTUNCERRMSK_REG 0x1ac
#define VTD_MSK_SPEC_ERRORS (1 << 31)
/*
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 6fa215a3861..90832a95599 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
pci_name(dev), i);
if (pci_claim_resource(dev, i)) {
- dev_err(&pdev->xdev->dev, "Could not claim "
- "resource %s/%d! Device offline. Try "
- "giving less than 4GB to domain.\n",
+ dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+ "Device offline. Try using e820_host=1 in the guest config.\n",
pci_name(dev), i);
}
}
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index c998f7aaadb..0fac9658b02 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -15,10 +15,6 @@
#include <mach/simpad.h>
#include "sa1100_generic.h"
-extern long get_cs3_shadow(void);
-extern void set_cs3_bit(int value);
-extern void clear_cs3_bit(int value);
-
static struct pcmcia_irqs irqs[] = {
{ 1, IRQ_GPIO_CF_CD, "CF_CD" },
};
@@ -26,7 +22,7 @@ static struct pcmcia_irqs irqs[] = {
static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
+ simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
skt->socket.pci_irq = IRQ_GPIO_CF_IRQ;
@@ -38,8 +34,8 @@ static void simpad_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
/* Disable CF bus: */
- //set_cs3_bit(PCMCIA_BUFF_DIS);
- clear_cs3_bit(PCMCIA_RESET);
+ /*simpad_set_cs3_bit(PCMCIA_BUFF_DIS);*/
+ simpad_clear_cs3_bit(PCMCIA_RESET);
}
static void
@@ -47,15 +43,16 @@ simpad_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
unsigned long levels = GPLR;
- long cs3reg = get_cs3_shadow();
+ long cs3reg = simpad_get_cs3_ro();
state->detect=((levels & GPIO_CF_CD)==0)?1:0;
state->ready=(levels & GPIO_CF_IRQ)?1:0;
- state->bvd1=1; /* Not available on Simpad. */
- state->bvd2=1; /* Not available on Simpad. */
+ state->bvd1 = 1; /* Might be cs3reg & PCMCIA_BVD1 */
+ state->bvd2 = 1; /* Might be cs3reg & PCMCIA_BVD2 */
state->wrprot=0; /* Not available on Simpad. */
-
- if((cs3reg & 0x0c) == 0x0c) {
+
+ if ((cs3reg & (PCMCIA_VS1|PCMCIA_VS2)) ==
+ (PCMCIA_VS1|PCMCIA_VS2)) {
state->vs_3v=0;
state->vs_Xv=0;
} else {
@@ -75,23 +72,23 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
/* Murphy: see table of MIC2562a-1 */
switch (state->Vcc) {
case 0:
- clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
+ simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
break;
case 33:
- clear_cs3_bit(VCC_3V_EN|EN1);
- set_cs3_bit(VCC_5V_EN|EN0);
+ simpad_clear_cs3_bit(VCC_3V_EN|EN1);
+ simpad_set_cs3_bit(VCC_5V_EN|EN0);
break;
case 50:
- clear_cs3_bit(VCC_5V_EN|EN1);
- set_cs3_bit(VCC_3V_EN|EN0);
+ simpad_clear_cs3_bit(VCC_5V_EN|EN1);
+ simpad_set_cs3_bit(VCC_3V_EN|EN0);
break;
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
__func__, state->Vcc);
- clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
+ simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
local_irq_restore(flags);
return -1;
}
@@ -110,7 +107,7 @@ static void simpad_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
{
soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs));
- set_cs3_bit(PCMCIA_RESET);
+ simpad_set_cs3_bit(PCMCIA_RESET);
}
static struct pcmcia_low_level simpad_pcmcia_ops = {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
new file mode 100644
index 00000000000..ef566443f94
--- /dev/null
+++ b/drivers/pinctrl/Kconfig
@@ -0,0 +1,43 @@
+#
+# PINCTRL infrastructure and drivers
+#
+
+menuconfig PINCTRL
+ bool "PINCTRL Support"
+ depends on EXPERIMENTAL
+ help
+ This enables the PINCTRL subsystem for controlling pins
+ on chip packages, for example multiplexing pins on primarily
+ PGA and BGA packages for systems on chip.
+
+ If unsure, say N.
+
+if PINCTRL
+
+config PINMUX
+ bool "Support pinmux controllers"
+ help
+ Say Y here if you want the pincontrol subsystem to handle pin
+ multiplexing drivers.
+
+config DEBUG_PINCTRL
+ bool "Debug PINCTRL calls"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to add some extra checks and diagnostics to PINCTRL calls.
+
+config PINMUX_SIRF
+ bool "CSR SiRFprimaII pinmux driver"
+ depends on ARCH_PRIMA2
+ select PINMUX
+ help
+ Say Y here to enable the SiRFprimaII pinmux driver
+
+config PINMUX_U300
+ bool "U300 pinmux driver"
+ depends on ARCH_U300
+ select PINMUX
+ help
+ Say Y here to enable the U300 pinmux driver
+
+endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
new file mode 100644
index 00000000000..bdc548a6b7e
--- /dev/null
+++ b/drivers/pinctrl/Makefile
@@ -0,0 +1,8 @@
+# generic pinmux support
+
+ccflags-$(CONFIG_DEBUG_PINMUX) += -DDEBUG
+
+obj-$(CONFIG_PINCTRL) += core.o
+obj-$(CONFIG_PINMUX) += pinmux.o
+obj-$(CONFIG_PINMUX_SIRF) += pinmux-sirf.o
+obj-$(CONFIG_PINMUX_U300) += pinmux-u300.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
new file mode 100644
index 00000000000..423522d8731
--- /dev/null
+++ b/drivers/pinctrl/core.c
@@ -0,0 +1,598 @@
+/*
+ * Core driver for the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinctrl core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/machine.h>
+#include "core.h"
+#include "pinmux.h"
+
+/* Global list of pin control devices */
+static DEFINE_MUTEX(pinctrldev_list_mutex);
+static LIST_HEAD(pinctrldev_list);
+
+static void pinctrl_dev_release(struct device *dev)
+{
+ struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
+ kfree(pctldev);
+}
+
+const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
+{
+ /* We're not allowed to register devices without name */
+ return pctldev->desc->name;
+}
+EXPORT_SYMBOL_GPL(pinctrl_dev_get_name);
+
+void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev)
+{
+ return pctldev->driver_data;
+}
+EXPORT_SYMBOL_GPL(pinctrl_dev_get_drvdata);
+
+/**
+ * get_pinctrl_dev_from_dev() - look up pin controller device
+ * @dev: a device pointer, this may be NULL but then devname needs to be
+ * defined instead
+ * @devname: the name of a device instance, as returned by dev_name(), this
+ * may be NULL but then dev needs to be defined instead
+ *
+ * Looks up a pin control device matching a certain device name or pure device
+ * pointer, the pure device pointer will take precedence.
+ */
+struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
+ const char *devname)
+{
+ struct pinctrl_dev *pctldev = NULL;
+ bool found = false;
+
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ if (dev && &pctldev->dev == dev) {
+ /* Matched on device pointer */
+ found = true;
+ break;
+ }
+
+ if (devname &&
+ !strcmp(dev_name(&pctldev->dev), devname)) {
+ /* Matched on device name */
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return found ? pctldev : NULL;
+}
+
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+{
+ struct pin_desc *pindesc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctldev->pin_desc_tree_lock, flags);
+ pindesc = radix_tree_lookup(&pctldev->pin_desc_tree, pin);
+ spin_unlock_irqrestore(&pctldev->pin_desc_tree_lock, flags);
+
+ return pindesc;
+}
+
+/**
+ * pin_is_valid() - check if pin exists on controller
+ * @pctldev: the pin control device to check the pin on
+ * @pin: pin to check, use the local pin controller index number
+ *
+ * This tells us whether a certain pin exist on a certain pin controller or
+ * not. Pin lists may be sparse, so some pins may not exist.
+ */
+bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
+{
+ struct pin_desc *pindesc;
+
+ if (pin < 0)
+ return false;
+
+ pindesc = pin_desc_get(pctldev, pin);
+ if (pindesc == NULL)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(pin_is_valid);
+
+/* Deletes a range of pin descriptors */
+static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pins,
+ unsigned num_pins)
+{
+ int i;
+
+ spin_lock(&pctldev->pin_desc_tree_lock);
+ for (i = 0; i < num_pins; i++) {
+ struct pin_desc *pindesc;
+
+ pindesc = radix_tree_lookup(&pctldev->pin_desc_tree,
+ pins[i].number);
+ if (pindesc != NULL) {
+ radix_tree_delete(&pctldev->pin_desc_tree,
+ pins[i].number);
+ }
+ kfree(pindesc);
+ }
+ spin_unlock(&pctldev->pin_desc_tree_lock);
+}
+
+static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ unsigned number, const char *name)
+{
+ struct pin_desc *pindesc;
+
+ pindesc = pin_desc_get(pctldev, number);
+ if (pindesc != NULL) {
+ pr_err("pin %d already registered on %s\n", number,
+ pctldev->desc->name);
+ return -EINVAL;
+ }
+
+ pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
+ if (pindesc == NULL)
+ return -ENOMEM;
+ spin_lock_init(&pindesc->lock);
+
+ /* Set owner */
+ pindesc->pctldev = pctldev;
+
+ /* Copy basic pin info */
+ pindesc->name = name;
+
+ spin_lock(&pctldev->pin_desc_tree_lock);
+ radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
+ spin_unlock(&pctldev->pin_desc_tree_lock);
+ pr_debug("registered pin %d (%s) on %s\n",
+ number, name ? name : "(unnamed)", pctldev->desc->name);
+ return 0;
+}
+
+static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
+ struct pinctrl_pin_desc const *pins,
+ unsigned num_descs)
+{
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < num_descs; i++) {
+ ret = pinctrl_register_one_pin(pctldev,
+ pins[i].number, pins[i].name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pinctrl_match_gpio_range() - check if a certain GPIO pin is in range
+ * @pctldev: pin controller device to check
+ * @gpio: gpio pin to check taken from the global GPIO pin space
+ *
+ * Tries to match a GPIO pin number to the ranges handled by a certain pin
+ * controller, return the range or NULL
+ */
+static struct pinctrl_gpio_range *
+pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
+{
+ struct pinctrl_gpio_range *range = NULL;
+
+ /* Loop over the ranges */
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ /* Check if we're in the valid range */
+ if (gpio >= range->base &&
+ gpio < range->base + range->npins) {
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+ return range;
+ }
+ }
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+
+ return NULL;
+}
+
+/**
+ * pinctrl_get_device_gpio_range() - find device for GPIO range
+ * @gpio: the pin to locate the pin controller for
+ * @outdev: the pin control device if found
+ * @outrange: the GPIO range if found
+ *
+ * Find the pin controller handling a certain GPIO pin from the pinspace of
+ * the GPIO subsystem, return the device and the matching GPIO range. Returns
+ * negative if the GPIO range could not be found in any device.
+ */
+int pinctrl_get_device_gpio_range(unsigned gpio,
+ struct pinctrl_dev **outdev,
+ struct pinctrl_gpio_range **outrange)
+{
+ struct pinctrl_dev *pctldev = NULL;
+
+ /* Loop over the pin controllers */
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ struct pinctrl_gpio_range *range;
+
+ range = pinctrl_match_gpio_range(pctldev, gpio);
+ if (range != NULL) {
+ *outdev = pctldev;
+ *outrange = range;
+ mutex_unlock(&pinctrldev_list_mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return -EINVAL;
+}
+
+/**
+ * pinctrl_add_gpio_range() - register a GPIO range for a controller
+ * @pctldev: pin controller device to add the range to
+ * @range: the GPIO range to add
+ *
+ * This adds a range of GPIOs to be handled by a certain pin controller. Call
+ * this to register handled ranges after registering your pin controller.
+ */
+void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range)
+{
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_add(&range->node, &pctldev->gpio_ranges);
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+}
+
+/**
+ * pinctrl_remove_gpio_range() - remove a range of GPIOs fro a pin controller
+ * @pctldev: pin controller device to remove the range from
+ * @range: the GPIO range to remove
+ */
+void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range)
+{
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_del(&range->node);
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int pinctrl_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ unsigned pin;
+
+ seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
+ seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
+
+ /* The highest pin number need to be included in the loop, thus <= */
+ for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ struct pin_desc *desc;
+
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s) ", pin,
+ desc->name ? desc->name : "unnamed");
+
+ /* Driver-specific info per pin */
+ if (ops->pin_dbg_show)
+ ops->pin_dbg_show(pctldev, s, pin);
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int pinctrl_groups_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ unsigned selector = 0;
+
+ /* No grouping */
+ if (!ops)
+ return 0;
+
+ seq_puts(s, "registered pin groups:\n");
+ while (ops->list_groups(pctldev, selector) >= 0) {
+ const unsigned *pins;
+ unsigned num_pins;
+ const char *gname = ops->get_group_name(pctldev, selector);
+ int ret;
+ int i;
+
+ ret = ops->get_group_pins(pctldev, selector,
+ &pins, &num_pins);
+ if (ret)
+ seq_printf(s, "%s [ERROR GETTING PINS]\n",
+ gname);
+ else {
+ seq_printf(s, "group: %s, pins = [ ", gname);
+ for (i = 0; i < num_pins; i++)
+ seq_printf(s, "%d ", pins[i]);
+ seq_puts(s, "]\n");
+ }
+ selector++;
+ }
+
+
+ return 0;
+}
+
+static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ struct pinctrl_gpio_range *range = NULL;
+
+ seq_puts(s, "GPIO ranges handled:\n");
+
+ /* Loop over the ranges */
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
+ range->base, (range->base + range->npins - 1));
+ }
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+
+ return 0;
+}
+
+static int pinctrl_devices_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev;
+
+ seq_puts(s, "name [pinmux]\n");
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ seq_printf(s, "%s ", pctldev->desc->name);
+ if (pctldev->desc->pmxops)
+ seq_puts(s, "yes");
+ else
+ seq_puts(s, "no");
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return 0;
+}
+
+static int pinctrl_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_pins_show, inode->i_private);
+}
+
+static int pinctrl_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_groups_show, inode->i_private);
+}
+
+static int pinctrl_gpioranges_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_gpioranges_show, inode->i_private);
+}
+
+static int pinctrl_devices_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_devices_show, NULL);
+}
+
+static const struct file_operations pinctrl_pins_ops = {
+ .open = pinctrl_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_groups_ops = {
+ .open = pinctrl_groups_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_gpioranges_ops = {
+ .open = pinctrl_gpioranges_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_devices_ops = {
+ .open = pinctrl_devices_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *debugfs_root;
+
+static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
+{
+ static struct dentry *device_root;
+
+ device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+ debugfs_root);
+ if (IS_ERR(device_root) || !device_root) {
+ pr_warn("failed to create debugfs directory for %s\n",
+ dev_name(&pctldev->dev));
+ return;
+ }
+ debugfs_create_file("pins", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_pins_ops);
+ debugfs_create_file("pingroups", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_groups_ops);
+ debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_gpioranges_ops);
+ pinmux_init_device_debugfs(device_root, pctldev);
+}
+
+static void pinctrl_init_debugfs(void)
+{
+ debugfs_root = debugfs_create_dir("pinctrl", NULL);
+ if (IS_ERR(debugfs_root) || !debugfs_root) {
+ pr_warn("failed to create debugfs directory\n");
+ debugfs_root = NULL;
+ return;
+ }
+
+ debugfs_create_file("pinctrl-devices", S_IFREG | S_IRUGO,
+ debugfs_root, NULL, &pinctrl_devices_ops);
+ pinmux_init_debugfs(debugfs_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
+{
+}
+
+static void pinctrl_init_debugfs(void)
+{
+}
+
+#endif
+
+/**
+ * pinctrl_register() - register a pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ */
+struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data)
+{
+ static atomic_t pinmux_no = ATOMIC_INIT(0);
+ struct pinctrl_dev *pctldev;
+ int ret;
+
+ if (pctldesc == NULL)
+ return NULL;
+ if (pctldesc->name == NULL)
+ return NULL;
+
+ /* If we're implementing pinmuxing, check the ops for sanity */
+ if (pctldesc->pmxops) {
+ ret = pinmux_check_ops(pctldesc->pmxops);
+ if (ret) {
+ pr_err("%s pinmux ops lacks necessary functions\n",
+ pctldesc->name);
+ return NULL;
+ }
+ }
+
+ pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
+ if (pctldev == NULL)
+ return NULL;
+
+ /* Initialize pin control device struct */
+ pctldev->owner = pctldesc->owner;
+ pctldev->desc = pctldesc;
+ pctldev->driver_data = driver_data;
+ INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
+ spin_lock_init(&pctldev->pin_desc_tree_lock);
+ INIT_LIST_HEAD(&pctldev->gpio_ranges);
+ mutex_init(&pctldev->gpio_ranges_lock);
+
+ /* Register device */
+ pctldev->dev.parent = dev;
+ dev_set_name(&pctldev->dev, "pinctrl.%d",
+ atomic_inc_return(&pinmux_no) - 1);
+ pctldev->dev.release = pinctrl_dev_release;
+ ret = device_register(&pctldev->dev);
+ if (ret != 0) {
+ pr_err("error in device registration\n");
+ goto out_reg_dev_err;
+ }
+ dev_set_drvdata(&pctldev->dev, pctldev);
+
+ /* Register all the pins */
+ pr_debug("try to register %d pins on %s...\n",
+ pctldesc->npins, pctldesc->name);
+ ret = pinctrl_register_pins(pctldev, pctldesc->pins, pctldesc->npins);
+ if (ret) {
+ pr_err("error during pin registration\n");
+ pinctrl_free_pindescs(pctldev, pctldesc->pins,
+ pctldesc->npins);
+ goto out_reg_pins_err;
+ }
+
+ pinctrl_init_device_debugfs(pctldev);
+ mutex_lock(&pinctrldev_list_mutex);
+ list_add(&pctldev->node, &pinctrldev_list);
+ mutex_unlock(&pinctrldev_list_mutex);
+ pinmux_hog_maps(pctldev);
+ return pctldev;
+
+out_reg_pins_err:
+ device_del(&pctldev->dev);
+out_reg_dev_err:
+ put_device(&pctldev->dev);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pinctrl_register);
+
+/**
+ * pinctrl_unregister() - unregister pinmux
+ * @pctldev: pin controller to unregister
+ *
+ * Called by pinmux drivers to unregister a pinmux.
+ */
+void pinctrl_unregister(struct pinctrl_dev *pctldev)
+{
+ if (pctldev == NULL)
+ return;
+
+ pinmux_unhog_maps(pctldev);
+ /* TODO: check that no pinmuxes are still active? */
+ mutex_lock(&pinctrldev_list_mutex);
+ list_del(&pctldev->node);
+ mutex_unlock(&pinctrldev_list_mutex);
+ /* Destroy descriptor tree */
+ pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
+ pctldev->desc->npins);
+ device_unregister(&pctldev->dev);
+}
+EXPORT_SYMBOL_GPL(pinctrl_unregister);
+
+static int __init pinctrl_init(void)
+{
+ pr_info("initialized pinctrl subsystem\n");
+ pinctrl_init_debugfs();
+ return 0;
+}
+
+/* init early since many drivers really need to initialized pinmux early */
+core_initcall(pinctrl_init);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
new file mode 100644
index 00000000000..472fa1341cc
--- /dev/null
+++ b/drivers/pinctrl/core.h
@@ -0,0 +1,71 @@
+/*
+ * Core private header for the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+/**
+ * struct pinctrl_dev - pin control class device
+ * @node: node to include this pin controller in the global pin controller list
+ * @desc: the pin controller descriptor supplied when initializing this pin
+ * controller
+ * @pin_desc_tree: each pin descriptor for this pin controller is stored in
+ * this radix tree
+ * @pin_desc_tree_lock: lock for the descriptor tree
+ * @gpio_ranges: a list of GPIO ranges that is handled by this pin controller,
+ * ranges are added to this list at runtime
+ * @gpio_ranges_lock: lock for the GPIO ranges list
+ * @dev: the device entry for this pin controller
+ * @owner: module providing the pin controller, used for refcounting
+ * @driver_data: driver data for drivers registering to the pin controller
+ * subsystem
+ * @pinmux_hogs_lock: lock for the pinmux hog list
+ * @pinmux_hogs: list of pinmux maps hogged by this device
+ */
+struct pinctrl_dev {
+ struct list_head node;
+ struct pinctrl_desc *desc;
+ struct radix_tree_root pin_desc_tree;
+ spinlock_t pin_desc_tree_lock;
+ struct list_head gpio_ranges;
+ struct mutex gpio_ranges_lock;
+ struct device dev;
+ struct module *owner;
+ void *driver_data;
+#ifdef CONFIG_PINMUX
+ struct mutex pinmux_hogs_lock;
+ struct list_head pinmux_hogs;
+#endif
+};
+
+/**
+ * struct pin_desc - pin descriptor for each physical pin in the arch
+ * @pctldev: corresponding pin control device
+ * @name: a name for the pin, e.g. the name of the pin/pad/finger on a
+ * datasheet or such
+ * @lock: a lock to protect the descriptor structure
+ * @mux_requested: whether the pin is already requested by pinmux or not
+ * @mux_function: a named muxing function for the pin that will be passed to
+ * subdrivers and shown in debugfs etc
+ */
+struct pin_desc {
+ struct pinctrl_dev *pctldev;
+ const char *name;
+ spinlock_t lock;
+ /* These fields only added when supporting pinmux drivers */
+#ifdef CONFIG_PINMUX
+ const char *mux_function;
+#endif
+};
+
+struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
+ const char *dev_name);
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin);
+int pinctrl_get_device_gpio_range(unsigned gpio,
+ struct pinctrl_dev **outdev,
+ struct pinctrl_gpio_range **outrange);
diff --git a/drivers/pinctrl/pinmux-sirf.c b/drivers/pinctrl/pinmux-sirf.c
new file mode 100644
index 00000000000..d76cae62095
--- /dev/null
+++ b/drivers/pinctrl/pinmux-sirf.c
@@ -0,0 +1,1215 @@
+/*
+ * pinmux driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/bitops.h>
+
+#define DRIVER_NAME "pinmux-sirf"
+
+#define SIRFSOC_NUM_PADS 622
+#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
+#define SIRFSOC_RSC_PIN_MUX 0x4
+
+/*
+ * pad list for the pinmux subsystem
+ * refer to CS-131858-DC-6A.xls
+ */
+static const struct pinctrl_pin_desc sirfsoc_pads[] = {
+ PINCTRL_PIN(4, "pwm0"),
+ PINCTRL_PIN(5, "pwm1"),
+ PINCTRL_PIN(6, "pwm2"),
+ PINCTRL_PIN(7, "pwm3"),
+ PINCTRL_PIN(8, "warm_rst_b"),
+ PINCTRL_PIN(9, "odo_0"),
+ PINCTRL_PIN(10, "odo_1"),
+ PINCTRL_PIN(11, "dr_dir"),
+ PINCTRL_PIN(13, "scl_1"),
+ PINCTRL_PIN(15, "sda_1"),
+ PINCTRL_PIN(16, "x_ldd[16]"),
+ PINCTRL_PIN(17, "x_ldd[17]"),
+ PINCTRL_PIN(18, "x_ldd[18]"),
+ PINCTRL_PIN(19, "x_ldd[19]"),
+ PINCTRL_PIN(20, "x_ldd[20]"),
+ PINCTRL_PIN(21, "x_ldd[21]"),
+ PINCTRL_PIN(22, "x_ldd[22]"),
+ PINCTRL_PIN(23, "x_ldd[23], lcdrom_frdy"),
+ PINCTRL_PIN(24, "gps_sgn"),
+ PINCTRL_PIN(25, "gps_mag"),
+ PINCTRL_PIN(26, "gps_clk"),
+ PINCTRL_PIN(27, "sd_cd_b_1"),
+ PINCTRL_PIN(28, "sd_vcc_on_1"),
+ PINCTRL_PIN(29, "sd_wp_b_1"),
+ PINCTRL_PIN(30, "sd_clk_3"),
+ PINCTRL_PIN(31, "sd_cmd_3"),
+
+ PINCTRL_PIN(32, "x_sd_dat_3[0]"),
+ PINCTRL_PIN(33, "x_sd_dat_3[1]"),
+ PINCTRL_PIN(34, "x_sd_dat_3[2]"),
+ PINCTRL_PIN(35, "x_sd_dat_3[3]"),
+ PINCTRL_PIN(36, "x_sd_clk_4"),
+ PINCTRL_PIN(37, "x_sd_cmd_4"),
+ PINCTRL_PIN(38, "x_sd_dat_4[0]"),
+ PINCTRL_PIN(39, "x_sd_dat_4[1]"),
+ PINCTRL_PIN(40, "x_sd_dat_4[2]"),
+ PINCTRL_PIN(41, "x_sd_dat_4[3]"),
+ PINCTRL_PIN(42, "x_cko_1"),
+ PINCTRL_PIN(43, "x_ac97_bit_clk"),
+ PINCTRL_PIN(44, "x_ac97_dout"),
+ PINCTRL_PIN(45, "x_ac97_din"),
+ PINCTRL_PIN(46, "x_ac97_sync"),
+ PINCTRL_PIN(47, "x_txd_1"),
+ PINCTRL_PIN(48, "x_txd_2"),
+ PINCTRL_PIN(49, "x_rxd_1"),
+ PINCTRL_PIN(50, "x_rxd_2"),
+ PINCTRL_PIN(51, "x_usclk_0"),
+ PINCTRL_PIN(52, "x_utxd_0"),
+ PINCTRL_PIN(53, "x_urxd_0"),
+ PINCTRL_PIN(54, "x_utfs_0"),
+ PINCTRL_PIN(55, "x_urfs_0"),
+ PINCTRL_PIN(56, "x_usclk_1"),
+ PINCTRL_PIN(57, "x_utxd_1"),
+ PINCTRL_PIN(58, "x_urxd_1"),
+ PINCTRL_PIN(59, "x_utfs_1"),
+ PINCTRL_PIN(60, "x_urfs_1"),
+ PINCTRL_PIN(61, "x_usclk_2"),
+ PINCTRL_PIN(62, "x_utxd_2"),
+ PINCTRL_PIN(63, "x_urxd_2"),
+
+ PINCTRL_PIN(64, "x_utfs_2"),
+ PINCTRL_PIN(65, "x_urfs_2"),
+ PINCTRL_PIN(66, "x_df_we_b"),
+ PINCTRL_PIN(67, "x_df_re_b"),
+ PINCTRL_PIN(68, "x_txd_0"),
+ PINCTRL_PIN(69, "x_rxd_0"),
+ PINCTRL_PIN(78, "x_cko_0"),
+ PINCTRL_PIN(79, "x_vip_pxd[7]"),
+ PINCTRL_PIN(80, "x_vip_pxd[6]"),
+ PINCTRL_PIN(81, "x_vip_pxd[5]"),
+ PINCTRL_PIN(82, "x_vip_pxd[4]"),
+ PINCTRL_PIN(83, "x_vip_pxd[3]"),
+ PINCTRL_PIN(84, "x_vip_pxd[2]"),
+ PINCTRL_PIN(85, "x_vip_pxd[1]"),
+ PINCTRL_PIN(86, "x_vip_pxd[0]"),
+ PINCTRL_PIN(87, "x_vip_vsync"),
+ PINCTRL_PIN(88, "x_vip_hsync"),
+ PINCTRL_PIN(89, "x_vip_pxclk"),
+ PINCTRL_PIN(90, "x_sda_0"),
+ PINCTRL_PIN(91, "x_scl_0"),
+ PINCTRL_PIN(92, "x_df_ry_by"),
+ PINCTRL_PIN(93, "x_df_cs_b[1]"),
+ PINCTRL_PIN(94, "x_df_cs_b[0]"),
+ PINCTRL_PIN(95, "x_l_pclk"),
+
+ PINCTRL_PIN(96, "x_l_lck"),
+ PINCTRL_PIN(97, "x_l_fck"),
+ PINCTRL_PIN(98, "x_l_de"),
+ PINCTRL_PIN(99, "x_ldd[0]"),
+ PINCTRL_PIN(100, "x_ldd[1]"),
+ PINCTRL_PIN(101, "x_ldd[2]"),
+ PINCTRL_PIN(102, "x_ldd[3]"),
+ PINCTRL_PIN(103, "x_ldd[4]"),
+ PINCTRL_PIN(104, "x_ldd[5]"),
+ PINCTRL_PIN(105, "x_ldd[6]"),
+ PINCTRL_PIN(106, "x_ldd[7]"),
+ PINCTRL_PIN(107, "x_ldd[8]"),
+ PINCTRL_PIN(108, "x_ldd[9]"),
+ PINCTRL_PIN(109, "x_ldd[10]"),
+ PINCTRL_PIN(110, "x_ldd[11]"),
+ PINCTRL_PIN(111, "x_ldd[12]"),
+ PINCTRL_PIN(112, "x_ldd[13]"),
+ PINCTRL_PIN(113, "x_ldd[14]"),
+ PINCTRL_PIN(114, "x_ldd[15]"),
+};
+
+/**
+ * @dev: a pointer back to containing device
+ * @virtbase: the offset to the controller in virtual memory
+ */
+struct sirfsoc_pmx {
+ struct device *dev;
+ struct pinctrl_dev *pmx;
+ void __iomem *gpio_virtbase;
+ void __iomem *rsc_virtbase;
+};
+
+/* SIRFSOC_GPIO_PAD_EN set */
+struct sirfsoc_muxmask {
+ unsigned long group;
+ unsigned long mask;
+};
+
+struct sirfsoc_padmux {
+ unsigned long muxmask_counts;
+ const struct sirfsoc_muxmask *muxmask;
+ /* RSC_PIN_MUX set */
+ unsigned long funcmask;
+ unsigned long funcval;
+};
+
+ /**
+ * struct sirfsoc_pin_group - describes a SiRFprimaII pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ * from the driver-local pin enumeration space
+ * @num_pins: the number of pins in this group array, i.e. the number of
+ * elements in .pins so we can iterate over that array
+ */
+struct sirfsoc_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned num_pins;
+};
+
+static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
+ {
+ .group = 3,
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+ BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(17) | BIT(18),
+ }, {
+ .group = 2,
+ .mask = BIT(31),
+ },
+};
+
+static const struct sirfsoc_padmux lcd_16bits_padmux = {
+ .muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
+ .muxmask = lcd_16bits_sirfsoc_muxmask,
+ .funcmask = BIT(4),
+ .funcval = 0,
+};
+
+static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+
+static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
+ {
+ .group = 3,
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+ BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(17) | BIT(18),
+ }, {
+ .group = 2,
+ .mask = BIT(31),
+ }, {
+ .group = 0,
+ .mask = BIT(16) | BIT(17),
+ },
+};
+
+static const struct sirfsoc_padmux lcd_18bits_padmux = {
+ .muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
+ .muxmask = lcd_18bits_muxmask,
+ .funcmask = BIT(4),
+ .funcval = 0,
+};
+
+static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114};
+
+static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
+ {
+ .group = 3,
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+ BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(17) | BIT(18),
+ }, {
+ .group = 2,
+ .mask = BIT(31),
+ }, {
+ .group = 0,
+ .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
+ },
+};
+
+static const struct sirfsoc_padmux lcd_24bits_padmux = {
+ .muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
+ .muxmask = lcd_24bits_muxmask,
+ .funcmask = BIT(4),
+ .funcval = 0,
+};
+
+static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+
+static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
+ {
+ .group = 3,
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+ BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(17) | BIT(18),
+ }, {
+ .group = 2,
+ .mask = BIT(31),
+ }, {
+ .group = 0,
+ .mask = BIT(23),
+ },
+};
+
+static const struct sirfsoc_padmux lcdrom_padmux = {
+ .muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
+ .muxmask = lcdrom_muxmask,
+ .funcmask = BIT(4),
+ .funcval = BIT(4),
+};
+
+static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+
+static const struct sirfsoc_muxmask uart0_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(4) | BIT(5),
+ }, {
+ .group = 1,
+ .mask = BIT(23) | BIT(28),
+ },
+};
+
+static const struct sirfsoc_padmux uart0_padmux = {
+ .muxmask_counts = ARRAY_SIZE(uart0_muxmask),
+ .muxmask = uart0_muxmask,
+ .funcmask = BIT(9),
+ .funcval = BIT(9),
+};
+
+static const unsigned uart0_pins[] = { 55, 60, 68, 69 };
+
+static const struct sirfsoc_muxmask uart0_nostreamctrl_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(4) | BIT(5),
+ },
+};
+
+static const struct sirfsoc_padmux uart0_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(uart0_nostreamctrl_muxmask),
+ .muxmask = uart0_nostreamctrl_muxmask,
+};
+
+static const unsigned uart0_nostreamctrl_pins[] = { 68, 39 };
+
+static const struct sirfsoc_muxmask uart1_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(15) | BIT(17),
+ },
+};
+
+static const struct sirfsoc_padmux uart1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(uart1_muxmask),
+ .muxmask = uart1_muxmask,
+};
+
+static const unsigned uart1_pins[] = { 47, 49 };
+
+static const struct sirfsoc_muxmask uart2_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(16) | BIT(18) | BIT(24) | BIT(27),
+ },
+};
+
+static const struct sirfsoc_padmux uart2_padmux = {
+ .muxmask_counts = ARRAY_SIZE(uart2_muxmask),
+ .muxmask = uart2_muxmask,
+ .funcmask = BIT(10),
+ .funcval = BIT(10),
+};
+
+static const unsigned uart2_pins[] = { 48, 50, 56, 59 };
+
+static const struct sirfsoc_muxmask uart2_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(16) | BIT(18),
+ },
+};
+
+static const struct sirfsoc_padmux uart2_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(uart2_nostreamctrl_muxmask),
+ .muxmask = uart2_nostreamctrl_muxmask,
+};
+
+static const unsigned uart2_nostreamctrl_pins[] = { 48, 50 };
+
+static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(30) | BIT(31),
+ }, {
+ .group = 1,
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
+ },
+};
+
+static const struct sirfsoc_padmux sdmmc3_padmux = {
+ .muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
+ .muxmask = sdmmc3_muxmask,
+ .funcmask = BIT(7),
+ .funcval = 0,
+};
+
+static const unsigned sdmmc3_pins[] = { 30, 31, 32, 33, 34, 35 };
+
+static const struct sirfsoc_muxmask spi0_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
+ },
+};
+
+static const struct sirfsoc_padmux spi0_padmux = {
+ .muxmask_counts = ARRAY_SIZE(spi0_muxmask),
+ .muxmask = spi0_muxmask,
+ .funcmask = BIT(7),
+ .funcval = BIT(7),
+};
+
+static const unsigned spi0_pins[] = { 32, 33, 34, 35 };
+
+static const struct sirfsoc_muxmask sdmmc4_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9),
+ },
+};
+
+static const struct sirfsoc_padmux sdmmc4_padmux = {
+ .muxmask_counts = ARRAY_SIZE(sdmmc4_muxmask),
+ .muxmask = sdmmc4_muxmask,
+};
+
+static const unsigned sdmmc4_pins[] = { 36, 37, 38, 39, 40, 41 };
+
+static const struct sirfsoc_muxmask cko1_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(10),
+ },
+};
+
+static const struct sirfsoc_padmux cko1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(cko1_muxmask),
+ .muxmask = cko1_muxmask,
+ .funcmask = BIT(3),
+ .funcval = 0,
+};
+
+static const unsigned cko1_pins[] = { 42 };
+
+static const struct sirfsoc_muxmask i2s_muxmask[] = {
+ {
+ .group = 1,
+ .mask =
+ BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(19)
+ | BIT(23) | BIT(28),
+ },
+};
+
+static const struct sirfsoc_padmux i2s_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_muxmask),
+ .muxmask = i2s_muxmask,
+ .funcmask = BIT(3) | BIT(9),
+ .funcval = BIT(3),
+};
+
+static const unsigned i2s_pins[] = { 42, 43, 44, 45, 46, 51, 55, 60 };
+
+static const struct sirfsoc_muxmask ac97_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
+ },
+};
+
+static const struct sirfsoc_padmux ac97_padmux = {
+ .muxmask_counts = ARRAY_SIZE(ac97_muxmask),
+ .muxmask = ac97_muxmask,
+ .funcmask = BIT(8),
+ .funcval = 0,
+};
+
+static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+
+static const struct sirfsoc_muxmask spi1_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
+ },
+};
+
+static const struct sirfsoc_padmux spi1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(spi1_muxmask),
+ .muxmask = spi1_muxmask,
+ .funcmask = BIT(8),
+ .funcval = BIT(8),
+};
+
+static const unsigned spi1_pins[] = { 33, 34, 35, 36 };
+
+static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(27) | BIT(28) | BIT(29),
+ },
+};
+
+static const struct sirfsoc_padmux sdmmc1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask),
+ .muxmask = sdmmc1_muxmask,
+};
+
+static const unsigned sdmmc1_pins[] = { 27, 28, 29 };
+
+static const struct sirfsoc_muxmask gps_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(24) | BIT(25) | BIT(26),
+ },
+};
+
+static const struct sirfsoc_padmux gps_padmux = {
+ .muxmask_counts = ARRAY_SIZE(gps_muxmask),
+ .muxmask = gps_muxmask,
+ .funcmask = BIT(12) | BIT(13) | BIT(14),
+ .funcval = BIT(12),
+};
+
+static const unsigned gps_pins[] = { 24, 25, 26 };
+
+static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(24) | BIT(25) | BIT(26),
+ }, {
+ .group = 1,
+ .mask = BIT(29),
+ }, {
+ .group = 2,
+ .mask = BIT(0) | BIT(1),
+ },
+};
+
+static const struct sirfsoc_padmux sdmmc5_padmux = {
+ .muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
+ .muxmask = sdmmc5_muxmask,
+ .funcmask = BIT(13) | BIT(14),
+ .funcval = BIT(13) | BIT(14),
+};
+
+static const unsigned sdmmc5_pins[] = { 24, 25, 26, 61, 64, 65 };
+
+static const struct sirfsoc_muxmask usp0_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_muxmask),
+ .muxmask = usp0_muxmask,
+ .funcmask = BIT(1) | BIT(2) | BIT(6) | BIT(9),
+ .funcval = 0,
+};
+
+static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+
+static const struct sirfsoc_muxmask usp1_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28),
+ },
+};
+
+static const struct sirfsoc_padmux usp1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp1_muxmask),
+ .muxmask = usp1_muxmask,
+ .funcmask = BIT(1) | BIT(9) | BIT(10) | BIT(11),
+ .funcval = 0,
+};
+
+static const unsigned usp1_pins[] = { 56, 57, 58, 59, 60 };
+
+static const struct sirfsoc_muxmask usp2_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(29) | BIT(30) | BIT(31),
+ }, {
+ .group = 2,
+ .mask = BIT(0) | BIT(1),
+ },
+};
+
+static const struct sirfsoc_padmux usp2_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp2_muxmask),
+ .muxmask = usp2_muxmask,
+ .funcmask = BIT(13) | BIT(14),
+ .funcval = 0,
+};
+
+static const unsigned usp2_pins[] = { 61, 62, 63, 64, 65 };
+
+static const struct sirfsoc_muxmask nand_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(2) | BIT(3) | BIT(28) | BIT(29) | BIT(30),
+ },
+};
+
+static const struct sirfsoc_padmux nand_padmux = {
+ .muxmask_counts = ARRAY_SIZE(nand_muxmask),
+ .muxmask = nand_muxmask,
+ .funcmask = BIT(5),
+ .funcval = 0,
+};
+
+static const unsigned nand_pins[] = { 64, 65, 92, 93, 94 };
+
+static const struct sirfsoc_padmux sdmmc0_padmux = {
+ .muxmask_counts = 0,
+ .funcmask = BIT(5),
+ .funcval = 0,
+};
+
+static const unsigned sdmmc0_pins[] = { };
+
+static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(2) | BIT(3),
+ },
+};
+
+static const struct sirfsoc_padmux sdmmc2_padmux = {
+ .muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
+ .muxmask = sdmmc2_muxmask,
+ .funcmask = BIT(5),
+ .funcval = BIT(5),
+};
+
+static const unsigned sdmmc2_pins[] = { 66, 67 };
+
+static const struct sirfsoc_muxmask cko0_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(14),
+ },
+};
+
+static const struct sirfsoc_padmux cko0_padmux = {
+ .muxmask_counts = ARRAY_SIZE(cko0_muxmask),
+ .muxmask = cko0_muxmask,
+};
+
+static const unsigned cko0_pins[] = { 78 };
+
+static const struct sirfsoc_muxmask vip_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
+ | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
+ BIT(25),
+ },
+};
+
+static const struct sirfsoc_padmux vip_padmux = {
+ .muxmask_counts = ARRAY_SIZE(vip_muxmask),
+ .muxmask = vip_muxmask,
+ .funcmask = BIT(0),
+ .funcval = 0,
+};
+
+static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
+
+static const struct sirfsoc_muxmask i2c0_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(26) | BIT(27),
+ },
+};
+
+static const struct sirfsoc_padmux i2c0_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2c0_muxmask),
+ .muxmask = i2c0_muxmask,
+};
+
+static const unsigned i2c0_pins[] = { 90, 91 };
+
+static const struct sirfsoc_muxmask i2c1_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(13) | BIT(15),
+ },
+};
+
+static const struct sirfsoc_padmux i2c1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2c1_muxmask),
+ .muxmask = i2c1_muxmask,
+};
+
+static const unsigned i2c1_pins[] = { 13, 15 };
+
+static const struct sirfsoc_muxmask viprom_muxmask[] = {
+ {
+ .group = 2,
+ .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
+ | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
+ BIT(25),
+ }, {
+ .group = 0,
+ .mask = BIT(12),
+ },
+};
+
+static const struct sirfsoc_padmux viprom_padmux = {
+ .muxmask_counts = ARRAY_SIZE(viprom_muxmask),
+ .muxmask = viprom_muxmask,
+ .funcmask = BIT(0),
+ .funcval = BIT(0),
+};
+
+static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
+
+static const struct sirfsoc_muxmask pwm0_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(4),
+ },
+};
+
+static const struct sirfsoc_padmux pwm0_padmux = {
+ .muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
+ .muxmask = pwm0_muxmask,
+ .funcmask = BIT(12),
+ .funcval = 0,
+};
+
+static const unsigned pwm0_pins[] = { 4 };
+
+static const struct sirfsoc_muxmask pwm1_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(5),
+ },
+};
+
+static const struct sirfsoc_padmux pwm1_padmux = {
+ .muxmask_counts = ARRAY_SIZE(pwm1_muxmask),
+ .muxmask = pwm1_muxmask,
+};
+
+static const unsigned pwm1_pins[] = { 5 };
+
+static const struct sirfsoc_muxmask pwm2_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(6),
+ },
+};
+
+static const struct sirfsoc_padmux pwm2_padmux = {
+ .muxmask_counts = ARRAY_SIZE(pwm2_muxmask),
+ .muxmask = pwm2_muxmask,
+};
+
+static const unsigned pwm2_pins[] = { 6 };
+
+static const struct sirfsoc_muxmask pwm3_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(7),
+ },
+};
+
+static const struct sirfsoc_padmux pwm3_padmux = {
+ .muxmask_counts = ARRAY_SIZE(pwm3_muxmask),
+ .muxmask = pwm3_muxmask,
+};
+
+static const unsigned pwm3_pins[] = { 7 };
+
+static const struct sirfsoc_muxmask warm_rst_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(8),
+ },
+};
+
+static const struct sirfsoc_padmux warm_rst_padmux = {
+ .muxmask_counts = ARRAY_SIZE(warm_rst_muxmask),
+ .muxmask = warm_rst_muxmask,
+};
+
+static const unsigned warm_rst_pins[] = { 8 };
+
+static const struct sirfsoc_muxmask usb0_utmi_drvbus_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(22),
+ },
+};
+static const struct sirfsoc_padmux usb0_utmi_drvbus_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usb0_utmi_drvbus_muxmask),
+ .muxmask = usb0_utmi_drvbus_muxmask,
+ .funcmask = BIT(6),
+ .funcval = BIT(6), /* refer to PAD_UTMI_DRVVBUS0_ENABLE */
+};
+
+static const unsigned usb0_utmi_drvbus_pins[] = { 54 };
+
+static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(27),
+ },
+};
+
+static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
+ .muxmask = usb1_utmi_drvbus_muxmask,
+ .funcmask = BIT(11),
+ .funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
+};
+
+static const unsigned usb1_utmi_drvbus_pins[] = { 59 };
+
+static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
+ {
+ .group = 0,
+ .mask = BIT(9) | BIT(10) | BIT(11),
+ },
+};
+
+static const struct sirfsoc_padmux pulse_count_padmux = {
+ .muxmask_counts = ARRAY_SIZE(pulse_count_muxmask),
+ .muxmask = pulse_count_muxmask,
+};
+
+static const unsigned pulse_count_pins[] = { 9, 10, 11 };
+
+#define SIRFSOC_PIN_GROUP(n, p) \
+ { \
+ .name = n, \
+ .pins = p, \
+ .num_pins = ARRAY_SIZE(p), \
+ }
+
+static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
+ SIRFSOC_PIN_GROUP("lcd_16bitsgrp", lcd_16bits_pins),
+ SIRFSOC_PIN_GROUP("lcd_18bitsgrp", lcd_18bits_pins),
+ SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
+ SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
+ SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
+ SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
+ SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
+ SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
+ SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
+ SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
+ SIRFSOC_PIN_GROUP("usp2grp", usp2_pins),
+ SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
+ SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
+ SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
+ SIRFSOC_PIN_GROUP("pwm1grp", pwm1_pins),
+ SIRFSOC_PIN_GROUP("pwm2grp", pwm2_pins),
+ SIRFSOC_PIN_GROUP("pwm3grp", pwm3_pins),
+ SIRFSOC_PIN_GROUP("vipgrp", vip_pins),
+ SIRFSOC_PIN_GROUP("vipromgrp", viprom_pins),
+ SIRFSOC_PIN_GROUP("warm_rstgrp", warm_rst_pins),
+ SIRFSOC_PIN_GROUP("cko0_rstgrp", cko0_pins),
+ SIRFSOC_PIN_GROUP("cko1_rstgrp", cko1_pins),
+ SIRFSOC_PIN_GROUP("sdmmc0grp", sdmmc0_pins),
+ SIRFSOC_PIN_GROUP("sdmmc1grp", sdmmc1_pins),
+ SIRFSOC_PIN_GROUP("sdmmc2grp", sdmmc2_pins),
+ SIRFSOC_PIN_GROUP("sdmmc3grp", sdmmc3_pins),
+ SIRFSOC_PIN_GROUP("sdmmc4grp", sdmmc4_pins),
+ SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
+ SIRFSOC_PIN_GROUP("usb0_utmi_drvbusgrp", usb0_utmi_drvbus_pins),
+ SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
+ SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
+ SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
+ SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
+ SIRFSOC_PIN_GROUP("nandgrp", nand_pins),
+ SIRFSOC_PIN_GROUP("spi0grp", spi0_pins),
+ SIRFSOC_PIN_GROUP("spi1grp", spi1_pins),
+ SIRFSOC_PIN_GROUP("gpsgrp", gps_pins),
+};
+
+static int sirfsoc_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
+ return NULL;
+ return sirfsoc_pin_groups[selector].name;
+}
+
+static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
+ return -EINVAL;
+ *pins = sirfsoc_pin_groups[selector].pins;
+ *num_pins = sirfsoc_pin_groups[selector].num_pins;
+ return 0;
+}
+
+static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " " DRIVER_NAME);
+}
+
+static struct pinctrl_ops sirfsoc_pctrl_ops = {
+ .list_groups = sirfsoc_list_groups,
+ .get_group_name = sirfsoc_get_group_name,
+ .get_group_pins = sirfsoc_get_group_pins,
+ .pin_dbg_show = sirfsoc_pin_dbg_show,
+};
+
+struct sirfsoc_pmx_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+ const struct sirfsoc_padmux *padmux;
+};
+
+static const char * const lcd_16bitsgrp[] = { "lcd_16bitsgrp" };
+static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
+static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
+static const char * const lcdromgrp[] = { "lcdromgrp" };
+static const char * const uart0grp[] = { "uart0grp" };
+static const char * const uart1grp[] = { "uart1grp" };
+static const char * const uart2grp[] = { "uart2grp" };
+static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
+static const char * const usp0grp[] = { "usp0grp" };
+static const char * const usp1grp[] = { "usp1grp" };
+static const char * const usp2grp[] = { "usp2grp" };
+static const char * const i2c0grp[] = { "i2c0grp" };
+static const char * const i2c1grp[] = { "i2c1grp" };
+static const char * const pwm0grp[] = { "pwm0grp" };
+static const char * const pwm1grp[] = { "pwm1grp" };
+static const char * const pwm2grp[] = { "pwm2grp" };
+static const char * const pwm3grp[] = { "pwm3grp" };
+static const char * const vipgrp[] = { "vipgrp" };
+static const char * const vipromgrp[] = { "vipromgrp" };
+static const char * const warm_rstgrp[] = { "warm_rstgrp" };
+static const char * const cko0grp[] = { "cko0grp" };
+static const char * const cko1grp[] = { "cko1grp" };
+static const char * const sdmmc0grp[] = { "sdmmc0grp" };
+static const char * const sdmmc1grp[] = { "sdmmc1grp" };
+static const char * const sdmmc2grp[] = { "sdmmc2grp" };
+static const char * const sdmmc3grp[] = { "sdmmc3grp" };
+static const char * const sdmmc4grp[] = { "sdmmc4grp" };
+static const char * const sdmmc5grp[] = { "sdmmc5grp" };
+static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" };
+static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
+static const char * const pulse_countgrp[] = { "pulse_countgrp" };
+static const char * const i2sgrp[] = { "i2sgrp" };
+static const char * const ac97grp[] = { "ac97grp" };
+static const char * const nandgrp[] = { "nandgrp" };
+static const char * const spi0grp[] = { "spi0grp" };
+static const char * const spi1grp[] = { "spi1grp" };
+static const char * const gpsgrp[] = { "gpsgrp" };
+
+#define SIRFSOC_PMX_FUNCTION(n, g, m) \
+ { \
+ .name = n, \
+ .groups = g, \
+ .num_groups = ARRAY_SIZE(g), \
+ .padmux = &m, \
+ }
+
+static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
+ SIRFSOC_PMX_FUNCTION("lcd_16bits", lcd_16bitsgrp, lcd_16bits_padmux),
+ SIRFSOC_PMX_FUNCTION("lcd_18bits", lcd_18bitsgrp, lcd_18bits_padmux),
+ SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
+ SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
+ SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
+ SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
+ SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
+ SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
+ SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
+ SIRFSOC_PMX_FUNCTION("usp2", usp2grp, usp2_padmux),
+ SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
+ SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
+ SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
+ SIRFSOC_PMX_FUNCTION("pwm1", pwm1grp, pwm1_padmux),
+ SIRFSOC_PMX_FUNCTION("pwm2", pwm2grp, pwm2_padmux),
+ SIRFSOC_PMX_FUNCTION("pwm3", pwm3grp, pwm3_padmux),
+ SIRFSOC_PMX_FUNCTION("vip", vipgrp, vip_padmux),
+ SIRFSOC_PMX_FUNCTION("viprom", vipromgrp, viprom_padmux),
+ SIRFSOC_PMX_FUNCTION("warm_rst", warm_rstgrp, warm_rst_padmux),
+ SIRFSOC_PMX_FUNCTION("cko0", cko0grp, cko0_padmux),
+ SIRFSOC_PMX_FUNCTION("cko1", cko1grp, cko1_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc0", sdmmc0grp, sdmmc0_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc1", sdmmc1grp, sdmmc1_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
+ SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
+ SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
+ SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux),
+ SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux),
+ SIRFSOC_PMX_FUNCTION("spi1", spi1grp, spi1_padmux),
+ SIRFSOC_PMX_FUNCTION("gps", gpsgrp, gps_padmux),
+};
+
+static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector,
+ bool enable)
+{
+ int i;
+ const struct sirfsoc_padmux *mux = sirfsoc_pmx_functions[selector].padmux;
+ const struct sirfsoc_muxmask *mask = mux->muxmask;
+
+ for (i = 0; i < mux->muxmask_counts; i++) {
+ u32 muxval;
+ muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
+ if (enable)
+ muxval = muxval & ~mask[i].mask;
+ else
+ muxval = muxval | mask[i].mask;
+ writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
+ }
+
+ if (mux->funcmask && enable) {
+ u32 func_en_val;
+ func_en_val =
+ readl(spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
+ func_en_val =
+ (func_en_val & ~mux->funcmask) | (mux->
+ funcval);
+ writel(func_en_val, spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
+ }
+}
+
+static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector,
+ unsigned group)
+{
+ struct sirfsoc_pmx *spmx;
+
+ spmx = pinctrl_dev_get_drvdata(pmxdev);
+ sirfsoc_pinmux_endisable(spmx, selector, true);
+
+ return 0;
+}
+
+static void sirfsoc_pinmux_disable(struct pinctrl_dev *pmxdev, unsigned selector,
+ unsigned group)
+{
+ struct sirfsoc_pmx *spmx;
+
+ spmx = pinctrl_dev_get_drvdata(pmxdev);
+ sirfsoc_pinmux_endisable(spmx, selector, false);
+}
+
+static int sirfsoc_pinmux_list_funcs(struct pinctrl_dev *pmxdev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(sirfsoc_pmx_functions))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return sirfsoc_pmx_functions[selector].name;
+}
+
+static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = sirfsoc_pmx_functions[selector].groups;
+ *num_groups = sirfsoc_pmx_functions[selector].num_groups;
+ return 0;
+}
+
+static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
+ struct pinctrl_gpio_range *range, unsigned offset)
+{
+ struct sirfsoc_pmx *spmx;
+
+ int group = range->id;
+
+ u32 muxval;
+
+ spmx = pinctrl_dev_get_drvdata(pmxdev);
+
+ muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
+ muxval = muxval | (1 << offset);
+ writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
+
+ return 0;
+}
+
+static struct pinmux_ops sirfsoc_pinmux_ops = {
+ .list_functions = sirfsoc_pinmux_list_funcs,
+ .enable = sirfsoc_pinmux_enable,
+ .disable = sirfsoc_pinmux_disable,
+ .get_function_name = sirfsoc_pinmux_get_func_name,
+ .get_function_groups = sirfsoc_pinmux_get_groups,
+ .gpio_request_enable = sirfsoc_pinmux_request_gpio,
+};
+
+static struct pinctrl_desc sirfsoc_pinmux_desc = {
+ .name = DRIVER_NAME,
+ .pins = sirfsoc_pads,
+ .npins = ARRAY_SIZE(sirfsoc_pads),
+ .maxpin = SIRFSOC_NUM_PADS - 1,
+ .pctlops = &sirfsoc_pctrl_ops,
+ .pmxops = &sirfsoc_pinmux_ops,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Todo: bind irq_chip to every pinctrl_gpio_range
+ */
+static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = {
+ {
+ .name = "sirfsoc-gpio*",
+ .id = 0,
+ .base = 0,
+ .npins = 32,
+ }, {
+ .name = "sirfsoc-gpio*",
+ .id = 1,
+ .base = 32,
+ .npins = 32,
+ }, {
+ .name = "sirfsoc-gpio*",
+ .id = 2,
+ .base = 64,
+ .npins = 32,
+ }, {
+ .name = "sirfsoc-gpio*",
+ .id = 3,
+ .base = 96,
+ .npins = 19,
+ },
+};
+
+static void __iomem *sirfsoc_rsc_of_iomap(void)
+{
+ const struct of_device_id rsc_ids[] = {
+ { .compatible = "sirf,prima2-rsc" },
+ {}
+ };
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, rsc_ids);
+ if (!np)
+ panic("unable to find compatible rsc node in dtb\n");
+
+ return of_iomap(np, 0);
+}
+
+static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct sirfsoc_pmx *spmx;
+ struct device_node *np = pdev->dev.of_node;
+ int i;
+
+ /* Create state holders etc for this driver */
+ spmx = devm_kzalloc(&pdev->dev, sizeof(*spmx), GFP_KERNEL);
+ if (!spmx)
+ return -ENOMEM;
+
+ spmx->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, spmx);
+
+ spmx->gpio_virtbase = of_iomap(np, 0);
+ if (!spmx->gpio_virtbase) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "can't map gpio registers\n");
+ goto out_no_gpio_remap;
+ }
+
+ spmx->rsc_virtbase = sirfsoc_rsc_of_iomap();
+ if (!spmx->rsc_virtbase) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "can't map rsc registers\n");
+ goto out_no_rsc_remap;
+ }
+
+ /* Now register the pin controller and all pins it handles */
+ spmx->pmx = pinctrl_register(&sirfsoc_pinmux_desc, &pdev->dev, spmx);
+ if (!spmx->pmx) {
+ dev_err(&pdev->dev, "could not register SIRFSOC pinmux driver\n");
+ ret = -EINVAL;
+ goto out_no_pmx;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sirfsoc_gpio_ranges); i++)
+ pinctrl_add_gpio_range(spmx->pmx, &sirfsoc_gpio_ranges[i]);
+
+ dev_info(&pdev->dev, "initialized SIRFSOC pinmux driver\n");
+
+ return 0;
+
+out_no_pmx:
+ iounmap(spmx->rsc_virtbase);
+out_no_rsc_remap:
+ iounmap(spmx->gpio_virtbase);
+out_no_gpio_remap:
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, spmx);
+ return ret;
+}
+
+static const struct of_device_id pinmux_ids[] = {
+ { .compatible = "sirf,prima2-gpio-pinmux" },
+ {}
+};
+
+static struct platform_driver sirfsoc_pinmux_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = pinmux_ids,
+ },
+ .probe = sirfsoc_pinmux_probe,
+};
+
+static int __init sirfsoc_pinmux_init(void)
+{
+ return platform_driver_register(&sirfsoc_pinmux_driver);
+}
+arch_initcall(sirfsoc_pinmux_init);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinmux-u300.c b/drivers/pinctrl/pinmux-u300.c
new file mode 100644
index 00000000000..4858a64131f
--- /dev/null
+++ b/drivers/pinctrl/pinmux-u300.c
@@ -0,0 +1,1135 @@
+/*
+ * Driver for the U300 pin controller
+ *
+ * Based on the original U300 padmux functions
+ * Copyright (C) 2009-2011 ST-Ericsson AB
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * The DB3350 design and control registers are oriented around pads rather than
+ * pins, so we enumerate the pads we can mux rather than actual pins. The pads
+ * are connected to different pins in different packaging types, so it would
+ * be confusing.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+/*
+ * Register definitions for the U300 Padmux control registers in the
+ * system controller
+ */
+
+/* PAD MUX Control register 1 (LOW) 16bit (R/W) */
+#define U300_SYSCON_PMC1LR 0x007C
+#define U300_SYSCON_PMC1LR_MASK 0xFFFF
+#define U300_SYSCON_PMC1LR_CDI_MASK 0xC000
+#define U300_SYSCON_PMC1LR_CDI_CDI 0x0000
+#define U300_SYSCON_PMC1LR_CDI_EMIF 0x4000
+/* For BS335 */
+#define U300_SYSCON_PMC1LR_CDI_CDI2 0x8000
+#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO 0xC000
+/* For BS365 */
+#define U300_SYSCON_PMC1LR_CDI_GPIO 0x8000
+#define U300_SYSCON_PMC1LR_CDI_WCDMA 0xC000
+/* Common defs */
+#define U300_SYSCON_PMC1LR_PDI_MASK 0x3000
+#define U300_SYSCON_PMC1LR_PDI_PDI 0x0000
+#define U300_SYSCON_PMC1LR_PDI_EGG 0x1000
+#define U300_SYSCON_PMC1LR_PDI_WCDMA 0x3000
+#define U300_SYSCON_PMC1LR_MMCSD_MASK 0x0C00
+#define U300_SYSCON_PMC1LR_MMCSD_MMCSD 0x0000
+#define U300_SYSCON_PMC1LR_MMCSD_MSPRO 0x0400
+#define U300_SYSCON_PMC1LR_MMCSD_DSP 0x0800
+#define U300_SYSCON_PMC1LR_MMCSD_WCDMA 0x0C00
+#define U300_SYSCON_PMC1LR_ETM_MASK 0x0300
+#define U300_SYSCON_PMC1LR_ETM_ACC 0x0000
+#define U300_SYSCON_PMC1LR_ETM_APP 0x0100
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK 0x00C0
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC 0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_NFIF 0x0040
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM 0x0080
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC_2GB 0x00C0
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK 0x0030
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC 0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_NFIF 0x0010
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SDRAM 0x0020
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SEMI 0x0030
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK 0x000C
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_STATIC 0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF 0x0004
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SDRAM 0x0008
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SEMI 0x000C
+#define U300_SYSCON_PMC1LR_EMIF_1_MASK 0x0003
+#define U300_SYSCON_PMC1LR_EMIF_1_STATIC 0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM0 0x0001
+#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM1 0x0002
+#define U300_SYSCON_PMC1LR_EMIF_1 0x0003
+/* PAD MUX Control register 2 (HIGH) 16bit (R/W) */
+#define U300_SYSCON_PMC1HR 0x007E
+#define U300_SYSCON_PMC1HR_MASK 0xFFFF
+#define U300_SYSCON_PMC1HR_MISC_2_MASK 0xC000
+#define U300_SYSCON_PMC1HR_MISC_2_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_MISC_2_MSPRO 0x4000
+#define U300_SYSCON_PMC1HR_MISC_2_DSP 0x8000
+#define U300_SYSCON_PMC1HR_MISC_2_AAIF 0xC000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_MASK 0x3000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_NFIF 0x1000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_DSP 0x2000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_AAIF 0x3000
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_MASK 0x0C00
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_MMC 0x0400
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_DSP 0x0800
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_AAIF 0x0C00
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK 0x0300
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI 0x0100
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_AAIF 0x0300
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK 0x00C0
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI 0x0040
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_AAIF 0x00C0
+#define U300_SYSCON_PMC1HR_APP_SPI_2_MASK 0x0030
+#define U300_SYSCON_PMC1HR_APP_SPI_2_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_SPI_2_SPI 0x0010
+#define U300_SYSCON_PMC1HR_APP_SPI_2_DSP 0x0020
+#define U300_SYSCON_PMC1HR_APP_SPI_2_AAIF 0x0030
+#define U300_SYSCON_PMC1HR_APP_UART0_2_MASK 0x000C
+#define U300_SYSCON_PMC1HR_APP_UART0_2_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_UART0_2_UART0 0x0004
+#define U300_SYSCON_PMC1HR_APP_UART0_2_NFIF_CS 0x0008
+#define U300_SYSCON_PMC1HR_APP_UART0_2_AAIF 0x000C
+#define U300_SYSCON_PMC1HR_APP_UART0_1_MASK 0x0003
+#define U300_SYSCON_PMC1HR_APP_UART0_1_APP_GPIO 0x0000
+#define U300_SYSCON_PMC1HR_APP_UART0_1_UART0 0x0001
+#define U300_SYSCON_PMC1HR_APP_UART0_1_AAIF 0x0003
+/* Padmux 2 control */
+#define U300_SYSCON_PMC2R 0x100
+#define U300_SYSCON_PMC2R_APP_MISC_0_MASK 0x00C0
+#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO 0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM 0x0040
+#define U300_SYSCON_PMC2R_APP_MISC_0_MMC 0x0080
+#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2 0x00C0
+#define U300_SYSCON_PMC2R_APP_MISC_1_MASK 0x0300
+#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO 0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM 0x0100
+#define U300_SYSCON_PMC2R_APP_MISC_1_MMC 0x0200
+#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2 0x0300
+#define U300_SYSCON_PMC2R_APP_MISC_2_MASK 0x0C00
+#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO 0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM 0x0400
+#define U300_SYSCON_PMC2R_APP_MISC_2_MMC 0x0800
+#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2 0x0C00
+#define U300_SYSCON_PMC2R_APP_MISC_3_MASK 0x3000
+#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO 0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM 0x1000
+#define U300_SYSCON_PMC2R_APP_MISC_3_MMC 0x2000
+#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2 0x3000
+#define U300_SYSCON_PMC2R_APP_MISC_4_MASK 0xC000
+#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO 0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM 0x4000
+#define U300_SYSCON_PMC2R_APP_MISC_4_MMC 0x8000
+#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO 0xC000
+/* TODO: More SYSCON registers missing */
+#define U300_SYSCON_PMC3R 0x10C
+#define U300_SYSCON_PMC3R_APP_MISC_11_MASK 0xC000
+#define U300_SYSCON_PMC3R_APP_MISC_11_SPI 0x4000
+#define U300_SYSCON_PMC3R_APP_MISC_10_MASK 0x3000
+#define U300_SYSCON_PMC3R_APP_MISC_10_SPI 0x1000
+/* TODO: Missing other configs */
+#define U300_SYSCON_PMC4R 0x168
+#define U300_SYSCON_PMC4R_APP_MISC_12_MASK 0x0003
+#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO 0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_13_MASK 0x000C
+#define U300_SYSCON_PMC4R_APP_MISC_13_CDI 0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA 0x0004
+#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2 0x0008
+#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO 0x000C
+#define U300_SYSCON_PMC4R_APP_MISC_14_MASK 0x0030
+#define U300_SYSCON_PMC4R_APP_MISC_14_CDI 0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA 0x0010
+#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2 0x0020
+#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO 0x0030
+#define U300_SYSCON_PMC4R_APP_MISC_16_MASK 0x0300
+#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13 0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS 0x0100
+#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N 0x0200
+
+#define DRIVER_NAME "pinmux-u300"
+
+/*
+ * The DB3350 has 467 pads, I have enumerated the pads clockwise around the
+ * edges of the silicon, finger by finger. LTCORNER upper left is pad 0.
+ * Data taken from the PadRing chart, arranged like this:
+ *
+ * 0 ..... 104
+ * 466 105
+ * . .
+ * . .
+ * 358 224
+ * 357 .... 225
+ */
+#define U300_NUM_PADS 467
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc u300_pads[] = {
+ /* Pads along the top edge of the chip */
+ PINCTRL_PIN(0, "P PAD VDD 28"),
+ PINCTRL_PIN(1, "P PAD GND 28"),
+ PINCTRL_PIN(2, "PO SIM RST N"),
+ PINCTRL_PIN(3, "VSSIO 25"),
+ PINCTRL_PIN(4, "VSSA ADDA ESDSUB"),
+ PINCTRL_PIN(5, "PWR VSSCOMMON"),
+ PINCTRL_PIN(6, "PI ADC I1 POS"),
+ PINCTRL_PIN(7, "PI ADC I1 NEG"),
+ PINCTRL_PIN(8, "PWR VSSAD0"),
+ PINCTRL_PIN(9, "PWR VCCAD0"),
+ PINCTRL_PIN(10, "PI ADC Q1 NEG"),
+ PINCTRL_PIN(11, "PI ADC Q1 POS"),
+ PINCTRL_PIN(12, "PWR VDDAD"),
+ PINCTRL_PIN(13, "PWR GNDAD"),
+ PINCTRL_PIN(14, "PI ADC I2 POS"),
+ PINCTRL_PIN(15, "PI ADC I2 NEG"),
+ PINCTRL_PIN(16, "PWR VSSAD1"),
+ PINCTRL_PIN(17, "PWR VCCAD1"),
+ PINCTRL_PIN(18, "PI ADC Q2 NEG"),
+ PINCTRL_PIN(19, "PI ADC Q2 POS"),
+ PINCTRL_PIN(20, "VSSA ADDA ESDSUB"),
+ PINCTRL_PIN(21, "PWR VCCGPAD"),
+ PINCTRL_PIN(22, "PI TX POW"),
+ PINCTRL_PIN(23, "PWR VSSGPAD"),
+ PINCTRL_PIN(24, "PO DAC I POS"),
+ PINCTRL_PIN(25, "PO DAC I NEG"),
+ PINCTRL_PIN(26, "PO DAC Q POS"),
+ PINCTRL_PIN(27, "PO DAC Q NEG"),
+ PINCTRL_PIN(28, "PWR VSSDA"),
+ PINCTRL_PIN(29, "PWR VCCDA"),
+ PINCTRL_PIN(30, "VSSA ADDA ESDSUB"),
+ PINCTRL_PIN(31, "P PAD VDDIO 11"),
+ PINCTRL_PIN(32, "PI PLL 26 FILTVDD"),
+ PINCTRL_PIN(33, "PI PLL 26 VCONT"),
+ PINCTRL_PIN(34, "PWR AGNDPLL2V5 32 13"),
+ PINCTRL_PIN(35, "PWR AVDDPLL2V5 32 13"),
+ PINCTRL_PIN(36, "VDDA PLL ESD"),
+ PINCTRL_PIN(37, "VSSA PLL ESD"),
+ PINCTRL_PIN(38, "VSS PLL"),
+ PINCTRL_PIN(39, "VDDC PLL"),
+ PINCTRL_PIN(40, "PWR AGNDPLL2V5 26 60"),
+ PINCTRL_PIN(41, "PWR AVDDPLL2V5 26 60"),
+ PINCTRL_PIN(42, "PWR AVDDPLL2V5 26 208"),
+ PINCTRL_PIN(43, "PWR AGNDPLL2V5 26 208"),
+ PINCTRL_PIN(44, "PWR AVDDPLL2V5 13 208"),
+ PINCTRL_PIN(45, "PWR AGNDPLL2V5 13 208"),
+ PINCTRL_PIN(46, "P PAD VSSIO 11"),
+ PINCTRL_PIN(47, "P PAD VSSIO 12"),
+ PINCTRL_PIN(48, "PI POW RST N"),
+ PINCTRL_PIN(49, "VDDC IO"),
+ PINCTRL_PIN(50, "P PAD VDDIO 16"),
+ PINCTRL_PIN(51, "PO RF WCDMA EN 4"),
+ PINCTRL_PIN(52, "PO RF WCDMA EN 3"),
+ PINCTRL_PIN(53, "PO RF WCDMA EN 2"),
+ PINCTRL_PIN(54, "PO RF WCDMA EN 1"),
+ PINCTRL_PIN(55, "PO RF WCDMA EN 0"),
+ PINCTRL_PIN(56, "PO GSM PA ENABLE"),
+ PINCTRL_PIN(57, "PO RF DATA STRB"),
+ PINCTRL_PIN(58, "PO RF DATA2"),
+ PINCTRL_PIN(59, "PIO RF DATA1"),
+ PINCTRL_PIN(60, "PIO RF DATA0"),
+ PINCTRL_PIN(61, "P PAD VDD 11"),
+ PINCTRL_PIN(62, "P PAD GND 11"),
+ PINCTRL_PIN(63, "P PAD VSSIO 16"),
+ PINCTRL_PIN(64, "P PAD VDDIO 18"),
+ PINCTRL_PIN(65, "PO RF CTRL STRB2"),
+ PINCTRL_PIN(66, "PO RF CTRL STRB1"),
+ PINCTRL_PIN(67, "PO RF CTRL STRB0"),
+ PINCTRL_PIN(68, "PIO RF CTRL DATA"),
+ PINCTRL_PIN(69, "PO RF CTRL CLK"),
+ PINCTRL_PIN(70, "PO TX ADC STRB"),
+ PINCTRL_PIN(71, "PO ANT SW 2"),
+ PINCTRL_PIN(72, "PO ANT SW 3"),
+ PINCTRL_PIN(73, "PO ANT SW 0"),
+ PINCTRL_PIN(74, "PO ANT SW 1"),
+ PINCTRL_PIN(75, "PO M CLKRQ"),
+ PINCTRL_PIN(76, "PI M CLK"),
+ PINCTRL_PIN(77, "PI RTC CLK"),
+ PINCTRL_PIN(78, "P PAD VDD 8"),
+ PINCTRL_PIN(79, "P PAD GND 8"),
+ PINCTRL_PIN(80, "P PAD VSSIO 13"),
+ PINCTRL_PIN(81, "P PAD VDDIO 13"),
+ PINCTRL_PIN(82, "PO SYS 1 CLK"),
+ PINCTRL_PIN(83, "PO SYS 2 CLK"),
+ PINCTRL_PIN(84, "PO SYS 0 CLK"),
+ PINCTRL_PIN(85, "PI SYS 0 CLKRQ"),
+ PINCTRL_PIN(86, "PO PWR MNGT CTRL 1"),
+ PINCTRL_PIN(87, "PO PWR MNGT CTRL 0"),
+ PINCTRL_PIN(88, "PO RESOUT2 RST N"),
+ PINCTRL_PIN(89, "PO RESOUT1 RST N"),
+ PINCTRL_PIN(90, "PO RESOUT0 RST N"),
+ PINCTRL_PIN(91, "PI SERVICE N"),
+ PINCTRL_PIN(92, "P PAD VDD 29"),
+ PINCTRL_PIN(93, "P PAD GND 29"),
+ PINCTRL_PIN(94, "P PAD VSSIO 8"),
+ PINCTRL_PIN(95, "P PAD VDDIO 8"),
+ PINCTRL_PIN(96, "PI EXT IRQ1 N"),
+ PINCTRL_PIN(97, "PI EXT IRQ0 N"),
+ PINCTRL_PIN(98, "PIO DC ON"),
+ PINCTRL_PIN(99, "PIO ACC APP I2C DATA"),
+ PINCTRL_PIN(100, "PIO ACC APP I2C CLK"),
+ PINCTRL_PIN(101, "P PAD VDD 12"),
+ PINCTRL_PIN(102, "P PAD GND 12"),
+ PINCTRL_PIN(103, "P PAD VSSIO 14"),
+ PINCTRL_PIN(104, "P PAD VDDIO 14"),
+ /* Pads along the right edge of the chip */
+ PINCTRL_PIN(105, "PIO APP I2C1 DATA"),
+ PINCTRL_PIN(106, "PIO APP I2C1 CLK"),
+ PINCTRL_PIN(107, "PO KEY OUT0"),
+ PINCTRL_PIN(108, "PO KEY OUT1"),
+ PINCTRL_PIN(109, "PO KEY OUT2"),
+ PINCTRL_PIN(110, "PO KEY OUT3"),
+ PINCTRL_PIN(111, "PO KEY OUT4"),
+ PINCTRL_PIN(112, "PI KEY IN0"),
+ PINCTRL_PIN(113, "PI KEY IN1"),
+ PINCTRL_PIN(114, "PI KEY IN2"),
+ PINCTRL_PIN(115, "P PAD VDDIO 15"),
+ PINCTRL_PIN(116, "P PAD VSSIO 15"),
+ PINCTRL_PIN(117, "P PAD GND 13"),
+ PINCTRL_PIN(118, "P PAD VDD 13"),
+ PINCTRL_PIN(119, "PI KEY IN3"),
+ PINCTRL_PIN(120, "PI KEY IN4"),
+ PINCTRL_PIN(121, "PI KEY IN5"),
+ PINCTRL_PIN(122, "PIO APP PCM I2S1 DATA B"),
+ PINCTRL_PIN(123, "PIO APP PCM I2S1 DATA A"),
+ PINCTRL_PIN(124, "PIO APP PCM I2S1 WS"),
+ PINCTRL_PIN(125, "PIO APP PCM I2S1 CLK"),
+ PINCTRL_PIN(126, "PIO APP PCM I2S0 DATA B"),
+ PINCTRL_PIN(127, "PIO APP PCM I2S0 DATA A"),
+ PINCTRL_PIN(128, "PIO APP PCM I2S0 WS"),
+ PINCTRL_PIN(129, "PIO APP PCM I2S0 CLK"),
+ PINCTRL_PIN(130, "P PAD VDD 17"),
+ PINCTRL_PIN(131, "P PAD GND 17"),
+ PINCTRL_PIN(132, "P PAD VSSIO 19"),
+ PINCTRL_PIN(133, "P PAD VDDIO 19"),
+ PINCTRL_PIN(134, "UART0 RTS"),
+ PINCTRL_PIN(135, "UART0 CTS"),
+ PINCTRL_PIN(136, "UART0 TX"),
+ PINCTRL_PIN(137, "UART0 RX"),
+ PINCTRL_PIN(138, "PIO ACC SPI DO"),
+ PINCTRL_PIN(139, "PIO ACC SPI DI"),
+ PINCTRL_PIN(140, "PIO ACC SPI CS0 N"),
+ PINCTRL_PIN(141, "PIO ACC SPI CS1 N"),
+ PINCTRL_PIN(142, "PIO ACC SPI CS2 N"),
+ PINCTRL_PIN(143, "PIO ACC SPI CLK"),
+ PINCTRL_PIN(144, "PO PDI EXT RST N"),
+ PINCTRL_PIN(145, "P PAD VDDIO 22"),
+ PINCTRL_PIN(146, "P PAD VSSIO 22"),
+ PINCTRL_PIN(147, "P PAD GND 18"),
+ PINCTRL_PIN(148, "P PAD VDD 18"),
+ PINCTRL_PIN(149, "PIO PDI C0"),
+ PINCTRL_PIN(150, "PIO PDI C1"),
+ PINCTRL_PIN(151, "PIO PDI C2"),
+ PINCTRL_PIN(152, "PIO PDI C3"),
+ PINCTRL_PIN(153, "PIO PDI C4"),
+ PINCTRL_PIN(154, "PIO PDI C5"),
+ PINCTRL_PIN(155, "PIO PDI D0"),
+ PINCTRL_PIN(156, "PIO PDI D1"),
+ PINCTRL_PIN(157, "PIO PDI D2"),
+ PINCTRL_PIN(158, "PIO PDI D3"),
+ PINCTRL_PIN(159, "P PAD VDDIO 21"),
+ PINCTRL_PIN(160, "P PAD VSSIO 21"),
+ PINCTRL_PIN(161, "PIO PDI D4"),
+ PINCTRL_PIN(162, "PIO PDI D5"),
+ PINCTRL_PIN(163, "PIO PDI D6"),
+ PINCTRL_PIN(164, "PIO PDI D7"),
+ PINCTRL_PIN(165, "PIO MS INS"),
+ PINCTRL_PIN(166, "MMC DATA DIR LS"),
+ PINCTRL_PIN(167, "MMC DATA 3"),
+ PINCTRL_PIN(168, "MMC DATA 2"),
+ PINCTRL_PIN(169, "MMC DATA 1"),
+ PINCTRL_PIN(170, "MMC DATA 0"),
+ PINCTRL_PIN(171, "MMC CMD DIR LS"),
+ PINCTRL_PIN(172, "P PAD VDD 27"),
+ PINCTRL_PIN(173, "P PAD GND 27"),
+ PINCTRL_PIN(174, "P PAD VSSIO 20"),
+ PINCTRL_PIN(175, "P PAD VDDIO 20"),
+ PINCTRL_PIN(176, "MMC CMD"),
+ PINCTRL_PIN(177, "MMC CLK"),
+ PINCTRL_PIN(178, "PIO APP GPIO 14"),
+ PINCTRL_PIN(179, "PIO APP GPIO 13"),
+ PINCTRL_PIN(180, "PIO APP GPIO 11"),
+ PINCTRL_PIN(181, "PIO APP GPIO 25"),
+ PINCTRL_PIN(182, "PIO APP GPIO 24"),
+ PINCTRL_PIN(183, "PIO APP GPIO 23"),
+ PINCTRL_PIN(184, "PIO APP GPIO 22"),
+ PINCTRL_PIN(185, "PIO APP GPIO 21"),
+ PINCTRL_PIN(186, "PIO APP GPIO 20"),
+ PINCTRL_PIN(187, "P PAD VDD 19"),
+ PINCTRL_PIN(188, "P PAD GND 19"),
+ PINCTRL_PIN(189, "P PAD VSSIO 23"),
+ PINCTRL_PIN(190, "P PAD VDDIO 23"),
+ PINCTRL_PIN(191, "PIO APP GPIO 19"),
+ PINCTRL_PIN(192, "PIO APP GPIO 18"),
+ PINCTRL_PIN(193, "PIO APP GPIO 17"),
+ PINCTRL_PIN(194, "PIO APP GPIO 16"),
+ PINCTRL_PIN(195, "PI CI D1"),
+ PINCTRL_PIN(196, "PI CI D0"),
+ PINCTRL_PIN(197, "PI CI HSYNC"),
+ PINCTRL_PIN(198, "PI CI VSYNC"),
+ PINCTRL_PIN(199, "PI CI EXT CLK"),
+ PINCTRL_PIN(200, "PO CI EXT RST N"),
+ PINCTRL_PIN(201, "P PAD VSSIO 43"),
+ PINCTRL_PIN(202, "P PAD VDDIO 43"),
+ PINCTRL_PIN(203, "PI CI D6"),
+ PINCTRL_PIN(204, "PI CI D7"),
+ PINCTRL_PIN(205, "PI CI D2"),
+ PINCTRL_PIN(206, "PI CI D3"),
+ PINCTRL_PIN(207, "PI CI D4"),
+ PINCTRL_PIN(208, "PI CI D5"),
+ PINCTRL_PIN(209, "PI CI D8"),
+ PINCTRL_PIN(210, "PI CI D9"),
+ PINCTRL_PIN(211, "P PAD VDD 20"),
+ PINCTRL_PIN(212, "P PAD GND 20"),
+ PINCTRL_PIN(213, "P PAD VSSIO 24"),
+ PINCTRL_PIN(214, "P PAD VDDIO 24"),
+ PINCTRL_PIN(215, "P PAD VDDIO 26"),
+ PINCTRL_PIN(216, "PO EMIF 1 A26"),
+ PINCTRL_PIN(217, "PO EMIF 1 A25"),
+ PINCTRL_PIN(218, "P PAD VSSIO 26"),
+ PINCTRL_PIN(219, "PO EMIF 1 A24"),
+ PINCTRL_PIN(220, "PO EMIF 1 A23"),
+ /* Pads along the bottom edge of the chip */
+ PINCTRL_PIN(221, "PO EMIF 1 A22"),
+ PINCTRL_PIN(222, "PO EMIF 1 A21"),
+ PINCTRL_PIN(223, "P PAD VDD 21"),
+ PINCTRL_PIN(224, "P PAD GND 21"),
+ PINCTRL_PIN(225, "P PAD VSSIO 27"),
+ PINCTRL_PIN(226, "P PAD VDDIO 27"),
+ PINCTRL_PIN(227, "PO EMIF 1 A20"),
+ PINCTRL_PIN(228, "PO EMIF 1 A19"),
+ PINCTRL_PIN(229, "PO EMIF 1 A18"),
+ PINCTRL_PIN(230, "PO EMIF 1 A17"),
+ PINCTRL_PIN(231, "P PAD VDDIO 28"),
+ PINCTRL_PIN(232, "P PAD VSSIO 28"),
+ PINCTRL_PIN(233, "PO EMIF 1 A16"),
+ PINCTRL_PIN(234, "PIO EMIF 1 D15"),
+ PINCTRL_PIN(235, "PO EMIF 1 A15"),
+ PINCTRL_PIN(236, "PIO EMIF 1 D14"),
+ PINCTRL_PIN(237, "P PAD VDD 22"),
+ PINCTRL_PIN(238, "P PAD GND 22"),
+ PINCTRL_PIN(239, "P PAD VSSIO 29"),
+ PINCTRL_PIN(240, "P PAD VDDIO 29"),
+ PINCTRL_PIN(241, "PO EMIF 1 A14"),
+ PINCTRL_PIN(242, "PIO EMIF 1 D13"),
+ PINCTRL_PIN(243, "PO EMIF 1 A13"),
+ PINCTRL_PIN(244, "PIO EMIF 1 D12"),
+ PINCTRL_PIN(245, "P PAD VSSIO 30"),
+ PINCTRL_PIN(246, "P PAD VDDIO 30"),
+ PINCTRL_PIN(247, "PO EMIF 1 A12"),
+ PINCTRL_PIN(248, "PIO EMIF 1 D11"),
+ PINCTRL_PIN(249, "PO EMIF 1 A11"),
+ PINCTRL_PIN(250, "PIO EMIF 1 D10"),
+ PINCTRL_PIN(251, "P PAD VSSIO 31"),
+ PINCTRL_PIN(252, "P PAD VDDIO 31"),
+ PINCTRL_PIN(253, "PO EMIF 1 A10"),
+ PINCTRL_PIN(254, "PIO EMIF 1 D09"),
+ PINCTRL_PIN(255, "PO EMIF 1 A09"),
+ PINCTRL_PIN(256, "P PAD VDDIO 32"),
+ PINCTRL_PIN(257, "P PAD VSSIO 32"),
+ PINCTRL_PIN(258, "P PAD GND 24"),
+ PINCTRL_PIN(259, "P PAD VDD 24"),
+ PINCTRL_PIN(260, "PIO EMIF 1 D08"),
+ PINCTRL_PIN(261, "PO EMIF 1 A08"),
+ PINCTRL_PIN(262, "PIO EMIF 1 D07"),
+ PINCTRL_PIN(263, "PO EMIF 1 A07"),
+ PINCTRL_PIN(264, "P PAD VDDIO 33"),
+ PINCTRL_PIN(265, "P PAD VSSIO 33"),
+ PINCTRL_PIN(266, "PIO EMIF 1 D06"),
+ PINCTRL_PIN(267, "PO EMIF 1 A06"),
+ PINCTRL_PIN(268, "PIO EMIF 1 D05"),
+ PINCTRL_PIN(269, "PO EMIF 1 A05"),
+ PINCTRL_PIN(270, "P PAD VDDIO 34"),
+ PINCTRL_PIN(271, "P PAD VSSIO 34"),
+ PINCTRL_PIN(272, "PIO EMIF 1 D04"),
+ PINCTRL_PIN(273, "PO EMIF 1 A04"),
+ PINCTRL_PIN(274, "PIO EMIF 1 D03"),
+ PINCTRL_PIN(275, "PO EMIF 1 A03"),
+ PINCTRL_PIN(276, "P PAD VDDIO 35"),
+ PINCTRL_PIN(277, "P PAD VSSIO 35"),
+ PINCTRL_PIN(278, "P PAD GND 23"),
+ PINCTRL_PIN(279, "P PAD VDD 23"),
+ PINCTRL_PIN(280, "PIO EMIF 1 D02"),
+ PINCTRL_PIN(281, "PO EMIF 1 A02"),
+ PINCTRL_PIN(282, "PIO EMIF 1 D01"),
+ PINCTRL_PIN(283, "PO EMIF 1 A01"),
+ PINCTRL_PIN(284, "P PAD VDDIO 36"),
+ PINCTRL_PIN(285, "P PAD VSSIO 36"),
+ PINCTRL_PIN(286, "PIO EMIF 1 D00"),
+ PINCTRL_PIN(287, "PO EMIF 1 BE1 N"),
+ PINCTRL_PIN(288, "PO EMIF 1 BE0 N"),
+ PINCTRL_PIN(289, "PO EMIF 1 ADV N"),
+ PINCTRL_PIN(290, "P PAD VDDIO 37"),
+ PINCTRL_PIN(291, "P PAD VSSIO 37"),
+ PINCTRL_PIN(292, "PO EMIF 1 SD CKE0"),
+ PINCTRL_PIN(293, "PO EMIF 1 OE N"),
+ PINCTRL_PIN(294, "PO EMIF 1 WE N"),
+ PINCTRL_PIN(295, "P PAD VDDIO 38"),
+ PINCTRL_PIN(296, "P PAD VSSIO 38"),
+ PINCTRL_PIN(297, "PO EMIF 1 CLK"),
+ PINCTRL_PIN(298, "PIO EMIF 1 SD CLK"),
+ PINCTRL_PIN(299, "P PAD VSSIO 45 (not bonded)"),
+ PINCTRL_PIN(300, "P PAD VDDIO 42"),
+ PINCTRL_PIN(301, "P PAD VSSIO 42"),
+ PINCTRL_PIN(302, "P PAD GND 31"),
+ PINCTRL_PIN(303, "P PAD VDD 31"),
+ PINCTRL_PIN(304, "PI EMIF 1 RET CLK"),
+ PINCTRL_PIN(305, "PI EMIF 1 WAIT N"),
+ PINCTRL_PIN(306, "PI EMIF 1 NFIF READY"),
+ PINCTRL_PIN(307, "PO EMIF 1 SD CKE1"),
+ PINCTRL_PIN(308, "PO EMIF 1 CS3 N"),
+ PINCTRL_PIN(309, "P PAD VDD 25"),
+ PINCTRL_PIN(310, "P PAD GND 25"),
+ PINCTRL_PIN(311, "P PAD VSSIO 39"),
+ PINCTRL_PIN(312, "P PAD VDDIO 39"),
+ PINCTRL_PIN(313, "PO EMIF 1 CS2 N"),
+ PINCTRL_PIN(314, "PO EMIF 1 CS1 N"),
+ PINCTRL_PIN(315, "PO EMIF 1 CS0 N"),
+ PINCTRL_PIN(316, "PO ETM TRACE PKT0"),
+ PINCTRL_PIN(317, "PO ETM TRACE PKT1"),
+ PINCTRL_PIN(318, "PO ETM TRACE PKT2"),
+ PINCTRL_PIN(319, "P PAD VDD 30"),
+ PINCTRL_PIN(320, "P PAD GND 30"),
+ PINCTRL_PIN(321, "P PAD VSSIO 44"),
+ PINCTRL_PIN(322, "P PAD VDDIO 44"),
+ PINCTRL_PIN(323, "PO ETM TRACE PKT3"),
+ PINCTRL_PIN(324, "PO ETM TRACE PKT4"),
+ PINCTRL_PIN(325, "PO ETM TRACE PKT5"),
+ PINCTRL_PIN(326, "PO ETM TRACE PKT6"),
+ PINCTRL_PIN(327, "PO ETM TRACE PKT7"),
+ PINCTRL_PIN(328, "PO ETM PIPE STAT0"),
+ PINCTRL_PIN(329, "P PAD VDD 26"),
+ PINCTRL_PIN(330, "P PAD GND 26"),
+ PINCTRL_PIN(331, "P PAD VSSIO 40"),
+ PINCTRL_PIN(332, "P PAD VDDIO 40"),
+ PINCTRL_PIN(333, "PO ETM PIPE STAT1"),
+ PINCTRL_PIN(334, "PO ETM PIPE STAT2"),
+ PINCTRL_PIN(335, "PO ETM TRACE CLK"),
+ PINCTRL_PIN(336, "PO ETM TRACE SYNC"),
+ PINCTRL_PIN(337, "PIO ACC GPIO 33"),
+ PINCTRL_PIN(338, "PIO ACC GPIO 32"),
+ PINCTRL_PIN(339, "PIO ACC GPIO 30"),
+ PINCTRL_PIN(340, "PIO ACC GPIO 29"),
+ PINCTRL_PIN(341, "P PAD VDDIO 17"),
+ PINCTRL_PIN(342, "P PAD VSSIO 17"),
+ PINCTRL_PIN(343, "P PAD GND 15"),
+ PINCTRL_PIN(344, "P PAD VDD 15"),
+ PINCTRL_PIN(345, "PIO ACC GPIO 28"),
+ PINCTRL_PIN(346, "PIO ACC GPIO 27"),
+ PINCTRL_PIN(347, "PIO ACC GPIO 16"),
+ PINCTRL_PIN(348, "PI TAP TMS"),
+ PINCTRL_PIN(349, "PI TAP TDI"),
+ PINCTRL_PIN(350, "PO TAP TDO"),
+ PINCTRL_PIN(351, "PI TAP RST N"),
+ /* Pads along the left edge of the chip */
+ PINCTRL_PIN(352, "PI EMU MODE 0"),
+ PINCTRL_PIN(353, "PO TAP RET CLK"),
+ PINCTRL_PIN(354, "PI TAP CLK"),
+ PINCTRL_PIN(355, "PO EMIF 0 SD CS N"),
+ PINCTRL_PIN(356, "PO EMIF 0 SD CAS N"),
+ PINCTRL_PIN(357, "PO EMIF 0 SD WE N"),
+ PINCTRL_PIN(358, "P PAD VDDIO 1"),
+ PINCTRL_PIN(359, "P PAD VSSIO 1"),
+ PINCTRL_PIN(360, "P PAD GND 1"),
+ PINCTRL_PIN(361, "P PAD VDD 1"),
+ PINCTRL_PIN(362, "PO EMIF 0 SD CKE"),
+ PINCTRL_PIN(363, "PO EMIF 0 SD DQML"),
+ PINCTRL_PIN(364, "PO EMIF 0 SD DQMU"),
+ PINCTRL_PIN(365, "PO EMIF 0 SD RAS N"),
+ PINCTRL_PIN(366, "PIO EMIF 0 D15"),
+ PINCTRL_PIN(367, "PO EMIF 0 A15"),
+ PINCTRL_PIN(368, "PIO EMIF 0 D14"),
+ PINCTRL_PIN(369, "PO EMIF 0 A14"),
+ PINCTRL_PIN(370, "PIO EMIF 0 D13"),
+ PINCTRL_PIN(371, "PO EMIF 0 A13"),
+ PINCTRL_PIN(372, "P PAD VDDIO 2"),
+ PINCTRL_PIN(373, "P PAD VSSIO 2"),
+ PINCTRL_PIN(374, "P PAD GND 2"),
+ PINCTRL_PIN(375, "P PAD VDD 2"),
+ PINCTRL_PIN(376, "PIO EMIF 0 D12"),
+ PINCTRL_PIN(377, "PO EMIF 0 A12"),
+ PINCTRL_PIN(378, "PIO EMIF 0 D11"),
+ PINCTRL_PIN(379, "PO EMIF 0 A11"),
+ PINCTRL_PIN(380, "PIO EMIF 0 D10"),
+ PINCTRL_PIN(381, "PO EMIF 0 A10"),
+ PINCTRL_PIN(382, "PIO EMIF 0 D09"),
+ PINCTRL_PIN(383, "PO EMIF 0 A09"),
+ PINCTRL_PIN(384, "PIO EMIF 0 D08"),
+ PINCTRL_PIN(385, "PO EMIF 0 A08"),
+ PINCTRL_PIN(386, "PIO EMIF 0 D07"),
+ PINCTRL_PIN(387, "PO EMIF 0 A07"),
+ PINCTRL_PIN(388, "P PAD VDDIO 3"),
+ PINCTRL_PIN(389, "P PAD VSSIO 3"),
+ PINCTRL_PIN(390, "P PAD GND 3"),
+ PINCTRL_PIN(391, "P PAD VDD 3"),
+ PINCTRL_PIN(392, "PO EFUSE RDOUT1"),
+ PINCTRL_PIN(393, "PIO EMIF 0 D06"),
+ PINCTRL_PIN(394, "PO EMIF 0 A06"),
+ PINCTRL_PIN(395, "PIO EMIF 0 D05"),
+ PINCTRL_PIN(396, "PO EMIF 0 A05"),
+ PINCTRL_PIN(397, "PIO EMIF 0 D04"),
+ PINCTRL_PIN(398, "PO EMIF 0 A04"),
+ PINCTRL_PIN(399, "A PADS/A VDDCO1v82v5 GND 80U SF LIN VDDCO AF"),
+ PINCTRL_PIN(400, "PWR VDDCO AF"),
+ PINCTRL_PIN(401, "PWR EFUSE HV1"),
+ PINCTRL_PIN(402, "P PAD VSSIO 4"),
+ PINCTRL_PIN(403, "P PAD VDDIO 4"),
+ PINCTRL_PIN(404, "P PAD GND 4"),
+ PINCTRL_PIN(405, "P PAD VDD 4"),
+ PINCTRL_PIN(406, "PIO EMIF 0 D03"),
+ PINCTRL_PIN(407, "PO EMIF 0 A03"),
+ PINCTRL_PIN(408, "PWR EFUSE HV2"),
+ PINCTRL_PIN(409, "PWR EFUSE HV3"),
+ PINCTRL_PIN(410, "PIO EMIF 0 D02"),
+ PINCTRL_PIN(411, "PO EMIF 0 A02"),
+ PINCTRL_PIN(412, "PIO EMIF 0 D01"),
+ PINCTRL_PIN(413, "P PAD VDDIO 5"),
+ PINCTRL_PIN(414, "P PAD VSSIO 5"),
+ PINCTRL_PIN(415, "P PAD GND 5"),
+ PINCTRL_PIN(416, "P PAD VDD 5"),
+ PINCTRL_PIN(417, "PO EMIF 0 A01"),
+ PINCTRL_PIN(418, "PIO EMIF 0 D00"),
+ PINCTRL_PIN(419, "IF 0 SD CLK"),
+ PINCTRL_PIN(420, "APP SPI CLK"),
+ PINCTRL_PIN(421, "APP SPI DO"),
+ PINCTRL_PIN(422, "APP SPI DI"),
+ PINCTRL_PIN(423, "APP SPI CS0"),
+ PINCTRL_PIN(424, "APP SPI CS1"),
+ PINCTRL_PIN(425, "APP SPI CS2"),
+ PINCTRL_PIN(426, "PIO APP GPIO 10"),
+ PINCTRL_PIN(427, "P PAD VDDIO 41"),
+ PINCTRL_PIN(428, "P PAD VSSIO 41"),
+ PINCTRL_PIN(429, "P PAD GND 6"),
+ PINCTRL_PIN(430, "P PAD VDD 6"),
+ PINCTRL_PIN(431, "PIO ACC SDIO0 CMD"),
+ PINCTRL_PIN(432, "PIO ACC SDIO0 CK"),
+ PINCTRL_PIN(433, "PIO ACC SDIO0 D3"),
+ PINCTRL_PIN(434, "PIO ACC SDIO0 D2"),
+ PINCTRL_PIN(435, "PIO ACC SDIO0 D1"),
+ PINCTRL_PIN(436, "PIO ACC SDIO0 D0"),
+ PINCTRL_PIN(437, "PIO USB PU"),
+ PINCTRL_PIN(438, "PIO USB SP"),
+ PINCTRL_PIN(439, "PIO USB DAT VP"),
+ PINCTRL_PIN(440, "PIO USB SE0 VM"),
+ PINCTRL_PIN(441, "PIO USB OE"),
+ PINCTRL_PIN(442, "PIO USB SUSP"),
+ PINCTRL_PIN(443, "P PAD VSSIO 6"),
+ PINCTRL_PIN(444, "P PAD VDDIO 6"),
+ PINCTRL_PIN(445, "PIO USB PUEN"),
+ PINCTRL_PIN(446, "PIO ACC UART0 RX"),
+ PINCTRL_PIN(447, "PIO ACC UART0 TX"),
+ PINCTRL_PIN(448, "PIO ACC UART0 CTS"),
+ PINCTRL_PIN(449, "PIO ACC UART0 RTS"),
+ PINCTRL_PIN(450, "PIO ACC UART3 RX"),
+ PINCTRL_PIN(451, "PIO ACC UART3 TX"),
+ PINCTRL_PIN(452, "PIO ACC UART3 CTS"),
+ PINCTRL_PIN(453, "PIO ACC UART3 RTS"),
+ PINCTRL_PIN(454, "PIO ACC IRDA TX"),
+ PINCTRL_PIN(455, "P PAD VDDIO 7"),
+ PINCTRL_PIN(456, "P PAD VSSIO 7"),
+ PINCTRL_PIN(457, "P PAD GND 7"),
+ PINCTRL_PIN(458, "P PAD VDD 7"),
+ PINCTRL_PIN(459, "PIO ACC IRDA RX"),
+ PINCTRL_PIN(460, "PIO ACC PCM I2S CLK"),
+ PINCTRL_PIN(461, "PIO ACC PCM I2S WS"),
+ PINCTRL_PIN(462, "PIO ACC PCM I2S DATA A"),
+ PINCTRL_PIN(463, "PIO ACC PCM I2S DATA B"),
+ PINCTRL_PIN(464, "PO SIM CLK"),
+ PINCTRL_PIN(465, "PIO ACC IRDA SD"),
+ PINCTRL_PIN(466, "PIO SIM DATA"),
+};
+
+/**
+ * @dev: a pointer back to containing device
+ * @virtbase: the offset to the controller in virtual memory
+ */
+struct u300_pmx {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ u32 phybase;
+ u32 physize;
+ void __iomem *virtbase;
+};
+
+/**
+ * u300_pmx_registers - the array of registers read/written for each pinmux
+ * shunt setting
+ */
+const u32 u300_pmx_registers[] = {
+ U300_SYSCON_PMC1LR,
+ U300_SYSCON_PMC1HR,
+ U300_SYSCON_PMC2R,
+ U300_SYSCON_PMC3R,
+ U300_SYSCON_PMC4R,
+};
+
+/**
+ * struct u300_pin_group - describes a U300 pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ * from the driver-local pin enumeration space
+ * @num_pins: the number of pins in this group array, i.e. the number of
+ * elements in .pins so we can iterate over that array
+ */
+struct u300_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned num_pins;
+};
+
+/**
+ * struct pmx_onmask - mask bits to enable/disable padmux
+ * @mask: mask bits to disable
+ * @val: mask bits to enable
+ *
+ * onmask lazy dog:
+ * onmask = {
+ * {"PMC1LR" mask, "PMC1LR" value},
+ * {"PMC1HR" mask, "PMC1HR" value},
+ * {"PMC2R" mask, "PMC2R" value},
+ * {"PMC3R" mask, "PMC3R" value},
+ * {"PMC4R" mask, "PMC4R" value}
+ * }
+ */
+struct u300_pmx_mask {
+ u16 mask;
+ u16 bits;
+};
+
+/* The chip power pins are VDD, GND, VDDIO and VSSIO */
+static const unsigned power_pins[] = { 0, 1, 3, 31, 46, 47, 49, 50, 61, 62, 63,
+ 64, 78, 79, 80, 81, 92, 93, 94, 95, 101, 102, 103, 104, 115, 116, 117,
+ 118, 130, 131, 132, 133, 145, 146, 147, 148, 159, 160, 172, 173, 174,
+ 175, 187, 188, 189, 190, 201, 202, 211, 212, 213, 214, 215, 218, 223,
+ 224, 225, 226, 231, 232, 237, 238, 239, 240, 245, 246, 251, 252, 256,
+ 257, 258, 259, 264, 265, 270, 271, 276, 277, 278, 279, 284, 285, 290,
+ 291, 295, 296, 299, 300, 301, 302, 303, 309, 310, 311, 312, 319, 320,
+ 321, 322, 329, 330, 331, 332, 341, 342, 343, 344, 358, 359, 360, 361,
+ 372, 373, 374, 375, 388, 389, 390, 391, 402, 403, 404, 405, 413, 414,
+ 415, 416, 427, 428, 429, 430, 443, 444, 455, 456, 457, 458 };
+static const unsigned emif0_pins[] = { 355, 356, 357, 362, 363, 364, 365, 366,
+ 367, 368, 369, 370, 371, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 393, 394, 395, 396, 397, 398, 406, 407, 410, 411, 412,
+ 417, 418 };
+static const unsigned emif1_pins[] = { 216, 217, 219, 220, 221, 222, 227, 228,
+ 229, 230, 233, 234, 235, 236, 241, 242, 243, 244, 247, 248, 249, 250,
+ 253, 254, 255, 260, 261, 262, 263, 266, 267, 268, 269, 272, 273, 274,
+ 275, 280, 281, 282, 283, 286, 287, 288, 289, 292, 293, 294, 297, 298,
+ 304, 305, 306, 307, 308, 313, 314, 315 };
+static const unsigned uart0_pins[] = { 134, 135, 136, 137 };
+static const unsigned mmc0_pins[] = { 166, 167, 168, 169, 170, 171, 176, 177 };
+static const unsigned spi0_pins[] = { 420, 421, 422, 423, 424, 425 };
+
+static const struct u300_pmx_mask emif0_mask[] = {
+ {0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0},
+};
+
+static const struct u300_pmx_mask emif1_mask[] = {
+ /*
+ * This connects the SDRAM to CS2 and a NAND flash to
+ * CS0 on the EMIF.
+ */
+ {
+ U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK |
+ U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK |
+ U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK |
+ U300_SYSCON_PMC1LR_EMIF_1_MASK,
+ U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM |
+ U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC |
+ U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF |
+ U300_SYSCON_PMC1LR_EMIF_1_SDRAM0
+ },
+ {0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0},
+};
+
+static const struct u300_pmx_mask uart0_mask[] = {
+ {0, 0},
+ {
+ U300_SYSCON_PMC1HR_APP_UART0_1_MASK |
+ U300_SYSCON_PMC1HR_APP_UART0_2_MASK,
+ U300_SYSCON_PMC1HR_APP_UART0_1_UART0 |
+ U300_SYSCON_PMC1HR_APP_UART0_2_UART0
+ },
+ {0, 0},
+ {0, 0},
+ {0, 0},
+};
+
+static const struct u300_pmx_mask mmc0_mask[] = {
+ { U300_SYSCON_PMC1LR_MMCSD_MASK, U300_SYSCON_PMC1LR_MMCSD_MMCSD},
+ {0, 0},
+ {0, 0},
+ {0, 0},
+ { U300_SYSCON_PMC4R_APP_MISC_12_MASK,
+ U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO }
+};
+
+static const struct u300_pmx_mask spi0_mask[] = {
+ {0, 0},
+ {
+ U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
+ U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI
+ },
+ {0, 0},
+ {0, 0},
+ {0, 0}
+};
+
+static const struct u300_pin_group u300_pin_groups[] = {
+ {
+ .name = "powergrp",
+ .pins = power_pins,
+ .num_pins = ARRAY_SIZE(power_pins),
+ },
+ {
+ .name = "emif0grp",
+ .pins = emif0_pins,
+ .num_pins = ARRAY_SIZE(emif0_pins),
+ },
+ {
+ .name = "emif1grp",
+ .pins = emif1_pins,
+ .num_pins = ARRAY_SIZE(emif1_pins),
+ },
+ {
+ .name = "uart0grp",
+ .pins = uart0_pins,
+ .num_pins = ARRAY_SIZE(uart0_pins),
+ },
+ {
+ .name = "mmc0grp",
+ .pins = mmc0_pins,
+ .num_pins = ARRAY_SIZE(mmc0_pins),
+ },
+ {
+ .name = "spi0grp",
+ .pins = spi0_pins,
+ .num_pins = ARRAY_SIZE(spi0_pins),
+ },
+};
+
+static int u300_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(u300_pin_groups))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *u300_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(u300_pin_groups))
+ return NULL;
+ return u300_pin_groups[selector].name;
+}
+
+static int u300_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ if (selector >= ARRAY_SIZE(u300_pin_groups))
+ return -EINVAL;
+ *pins = u300_pin_groups[selector].pins;
+ *num_pins = u300_pin_groups[selector].num_pins;
+ return 0;
+}
+
+static void u300_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " " DRIVER_NAME);
+}
+
+static struct pinctrl_ops u300_pctrl_ops = {
+ .list_groups = u300_list_groups,
+ .get_group_name = u300_get_group_name,
+ .get_group_pins = u300_get_group_pins,
+ .pin_dbg_show = u300_pin_dbg_show,
+};
+
+/*
+ * Here we define the available functions and their corresponding pin groups
+ */
+
+/**
+ * struct u300_pmx_func - describes U300 pinmux functions
+ * @name: the name of this specific function
+ * @groups: corresponding pin groups
+ * @onmask: bits to set to enable this when doing pin muxing
+ */
+struct u300_pmx_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+ const struct u300_pmx_mask *mask;
+};
+
+static const char * const powergrps[] = { "powergrp" };
+static const char * const emif0grps[] = { "emif0grp" };
+static const char * const emif1grps[] = { "emif1grp" };
+static const char * const uart0grps[] = { "uart0grp" };
+static const char * const mmc0grps[] = { "mmc0grp" };
+static const char * const spi0grps[] = { "spi0grp" };
+
+static const struct u300_pmx_func u300_pmx_functions[] = {
+ {
+ .name = "power",
+ .groups = powergrps,
+ .num_groups = ARRAY_SIZE(powergrps),
+ /* Mask is N/A */
+ },
+ {
+ .name = "emif0",
+ .groups = emif0grps,
+ .num_groups = ARRAY_SIZE(emif0grps),
+ .mask = emif0_mask,
+ },
+ {
+ .name = "emif1",
+ .groups = emif1grps,
+ .num_groups = ARRAY_SIZE(emif1grps),
+ .mask = emif1_mask,
+ },
+ {
+ .name = "uart0",
+ .groups = uart0grps,
+ .num_groups = ARRAY_SIZE(uart0grps),
+ .mask = uart0_mask,
+ },
+ {
+ .name = "mmc0",
+ .groups = mmc0grps,
+ .num_groups = ARRAY_SIZE(mmc0grps),
+ .mask = mmc0_mask,
+ },
+ {
+ .name = "spi0",
+ .groups = spi0grps,
+ .num_groups = ARRAY_SIZE(spi0grps),
+ .mask = spi0_mask,
+ },
+};
+
+static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
+ bool enable)
+{
+ u16 regval, val, mask;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
+ if (enable)
+ val = u300_pmx_functions[selector].mask->bits;
+ else
+ val = 0;
+
+ mask = u300_pmx_functions[selector].mask->mask;
+ if (mask != 0) {
+ regval = readw(upmx->virtbase + u300_pmx_registers[i]);
+ regval &= ~mask;
+ regval |= val;
+ writew(regval, upmx->virtbase + u300_pmx_registers[i]);
+ }
+ }
+}
+
+static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct u300_pmx *upmx;
+
+ /* There is nothing to do with the power pins */
+ if (selector == 0)
+ return 0;
+
+ upmx = pinctrl_dev_get_drvdata(pctldev);
+ u300_pmx_endisable(upmx, selector, true);
+
+ return 0;
+}
+
+static void u300_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct u300_pmx *upmx;
+
+ /* There is nothing to do with the power pins */
+ if (selector == 0)
+ return;
+
+ upmx = pinctrl_dev_get_drvdata(pctldev);
+ u300_pmx_endisable(upmx, selector, false);
+}
+
+static int u300_pmx_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(u300_pmx_functions))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *u300_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return u300_pmx_functions[selector].name;
+}
+
+static int u300_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = u300_pmx_functions[selector].groups;
+ *num_groups = u300_pmx_functions[selector].num_groups;
+ return 0;
+}
+
+static struct pinmux_ops u300_pmx_ops = {
+ .list_functions = u300_pmx_list_funcs,
+ .get_function_name = u300_pmx_get_func_name,
+ .get_function_groups = u300_pmx_get_groups,
+ .enable = u300_pmx_enable,
+ .disable = u300_pmx_disable,
+};
+
+/*
+ * FIXME: this will be set to sane values as this driver engulfs
+ * drivers/gpio/gpio-u300.c and we really know this stuff.
+ */
+static struct pinctrl_gpio_range u300_gpio_range = {
+ .name = "COH901*",
+ .id = 0,
+ .base = 0,
+ .npins = 64,
+};
+
+static struct pinctrl_desc u300_pmx_desc = {
+ .name = DRIVER_NAME,
+ .pins = u300_pads,
+ .npins = ARRAY_SIZE(u300_pads),
+ .maxpin = U300_NUM_PADS-1,
+ .pctlops = &u300_pctrl_ops,
+ .pmxops = &u300_pmx_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init u300_pmx_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct u300_pmx *upmx;
+ struct resource *res;
+
+ /* Create state holders etc for this driver */
+ upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
+ if (!upmx)
+ return -ENOMEM;
+
+ upmx->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_resource;
+ }
+ upmx->phybase = res->start;
+ upmx->physize = resource_size(res);
+
+ if (request_mem_region(upmx->phybase, upmx->physize,
+ DRIVER_NAME) == NULL) {
+ ret = -ENOMEM;
+ goto out_no_memregion;
+ }
+
+ upmx->virtbase = ioremap(upmx->phybase, upmx->physize);
+ if (!upmx->virtbase) {
+ ret = -ENOMEM;
+ goto out_no_remap;
+ }
+
+ upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx);
+ if (!upmx->pctl) {
+ dev_err(&pdev->dev, "could not register U300 pinmux driver\n");
+ ret = -EINVAL;
+ goto out_no_pmx;
+ }
+
+ /* We will handle a range of GPIO pins */
+ pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_range);
+
+ platform_set_drvdata(pdev, upmx);
+
+ dev_info(&pdev->dev, "initialized U300 pinmux driver\n");
+
+ return 0;
+
+out_no_pmx:
+ iounmap(upmx->virtbase);
+out_no_remap:
+ platform_set_drvdata(pdev, NULL);
+out_no_memregion:
+ release_mem_region(upmx->phybase, upmx->physize);
+out_no_resource:
+ devm_kfree(&pdev->dev, upmx);
+ return ret;
+}
+
+static int __exit u300_pmx_remove(struct platform_device *pdev)
+{
+ struct u300_pmx *upmx = platform_get_drvdata(pdev);
+
+ pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_range);
+ pinctrl_unregister(upmx->pctl);
+ iounmap(upmx->virtbase);
+ release_mem_region(upmx->phybase, upmx->physize);
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, upmx);
+
+ return 0;
+}
+
+static struct platform_driver u300_pmx_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(u300_pmx_remove),
+};
+
+static int __init u300_pmx_init(void)
+{
+ return platform_driver_probe(&u300_pmx_driver, u300_pmx_probe);
+}
+arch_initcall(u300_pmx_init);
+
+static void __exit u300_pmx_exit(void)
+{
+ platform_driver_unregister(&u300_pmx_driver);
+}
+module_exit(u300_pmx_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("U300 pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
new file mode 100644
index 00000000000..a5467f8709e
--- /dev/null
+++ b/drivers/pinctrl/pinmux.c
@@ -0,0 +1,1190 @@
+/*
+ * Core driver for the pin muxing portions of the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinmux core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinmux.h>
+#include "core.h"
+
+/* List of pinmuxes */
+static DEFINE_MUTEX(pinmux_list_mutex);
+static LIST_HEAD(pinmux_list);
+
+/* List of pinmux hogs */
+static DEFINE_MUTEX(pinmux_hoglist_mutex);
+static LIST_HEAD(pinmux_hoglist);
+
+/* Global pinmux maps, we allow one set only */
+static struct pinmux_map const *pinmux_maps;
+static unsigned pinmux_maps_num;
+
+/**
+ * struct pinmux_group - group list item for pinmux groups
+ * @node: pinmux group list node
+ * @group_selector: the group selector for this group
+ */
+struct pinmux_group {
+ struct list_head node;
+ unsigned group_selector;
+};
+
+/**
+ * struct pinmux - per-device pinmux state holder
+ * @node: global list node
+ * @dev: the device using this pinmux
+ * @usecount: the number of active users of this mux setting, used to keep
+ * track of nested use cases
+ * @pins: an array of discrete physical pins used in this mapping, taken
+ * from the global pin enumeration space (copied from pinmux map)
+ * @num_pins: the number of pins in this mapping array, i.e. the number of
+ * elements in .pins so we can iterate over that array (copied from
+ * pinmux map)
+ * @pctldev: pin control device handling this pinmux
+ * @func_selector: the function selector for the pinmux device handling
+ * this pinmux
+ * @groups: the group selectors for the pinmux device and
+ * selector combination handling this pinmux, this is a list that
+ * will be traversed on all pinmux operations such as
+ * get/put/enable/disable
+ * @mutex: a lock for the pinmux state holder
+ */
+struct pinmux {
+ struct list_head node;
+ struct device *dev;
+ unsigned usecount;
+ struct pinctrl_dev *pctldev;
+ unsigned func_selector;
+ struct list_head groups;
+ struct mutex mutex;
+};
+
+/**
+ * struct pinmux_hog - a list item to stash mux hogs
+ * @node: pinmux hog list node
+ * @map: map entry responsible for this hogging
+ * @pmx: the pinmux hogged by this item
+ */
+struct pinmux_hog {
+ struct list_head node;
+ struct pinmux_map const *map;
+ struct pinmux *pmx;
+};
+
+/**
+ * pin_request() - request a single pin to be muxed in, typically for GPIO
+ * @pin: the pin number in the global pin space
+ * @function: a functional name to give to this pin, passed to the driver
+ * so it knows what function to mux in, e.g. the string "gpioNN"
+ * means that you want to mux in the pin for use as GPIO number NN
+ * @gpio: if this request concerns a single GPIO pin
+ * @gpio_range: the range matching the GPIO pin if this is a request for a
+ * single GPIO pin
+ */
+static int pin_request(struct pinctrl_dev *pctldev,
+ int pin, const char *function, bool gpio,
+ struct pinctrl_gpio_range *gpio_range)
+{
+ struct pin_desc *desc;
+ const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ int status = -EINVAL;
+
+ dev_dbg(&pctldev->dev, "request pin %d for %s\n", pin, function);
+
+ if (!pin_is_valid(pctldev, pin)) {
+ dev_err(&pctldev->dev, "pin is invalid\n");
+ return -EINVAL;
+ }
+
+ if (!function) {
+ dev_err(&pctldev->dev, "no function name given\n");
+ return -EINVAL;
+ }
+
+ desc = pin_desc_get(pctldev, pin);
+ if (desc == NULL) {
+ dev_err(&pctldev->dev,
+ "pin is not registered so it cannot be requested\n");
+ goto out;
+ }
+
+ spin_lock(&desc->lock);
+ if (desc->mux_function) {
+ spin_unlock(&desc->lock);
+ dev_err(&pctldev->dev,
+ "pin already requested\n");
+ goto out;
+ }
+ desc->mux_function = function;
+ spin_unlock(&desc->lock);
+
+ /* Let each pin increase references to this module */
+ if (!try_module_get(pctldev->owner)) {
+ dev_err(&pctldev->dev,
+ "could not increase module refcount for pin %d\n",
+ pin);
+ status = -EINVAL;
+ goto out_free_pin;
+ }
+
+ /*
+ * If there is no kind of request function for the pin we just assume
+ * we got it by default and proceed.
+ */
+ if (gpio && ops->gpio_request_enable)
+ /* This requests and enables a single GPIO pin */
+ status = ops->gpio_request_enable(pctldev, gpio_range, pin);
+ else if (ops->request)
+ status = ops->request(pctldev, pin);
+ else
+ status = 0;
+
+ if (status)
+ dev_err(&pctldev->dev, "->request on device %s failed "
+ "for pin %d\n",
+ pctldev->desc->name, pin);
+out_free_pin:
+ if (status) {
+ spin_lock(&desc->lock);
+ desc->mux_function = NULL;
+ spin_unlock(&desc->lock);
+ }
+out:
+ if (status)
+ dev_err(&pctldev->dev, "pin-%d (%s) status %d\n",
+ pin, function ? : "?", status);
+
+ return status;
+}
+
+/**
+ * pin_free() - release a single muxed in pin so something else can be muxed
+ * @pctldev: pin controller device handling this pin
+ * @pin: the pin to free
+ * @free_func: whether to free the pin's assigned function name string
+ */
+static void pin_free(struct pinctrl_dev *pctldev, int pin, int free_func)
+{
+ const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ struct pin_desc *desc;
+
+ desc = pin_desc_get(pctldev, pin);
+ if (desc == NULL) {
+ dev_err(&pctldev->dev,
+ "pin is not registered so it cannot be freed\n");
+ return;
+ }
+
+ if (ops->free)
+ ops->free(pctldev, pin);
+
+ spin_lock(&desc->lock);
+ if (free_func)
+ kfree(desc->mux_function);
+ desc->mux_function = NULL;
+ spin_unlock(&desc->lock);
+ module_put(pctldev->owner);
+}
+
+/**
+ * pinmux_request_gpio() - request a single pin to be muxed in as GPIO
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ */
+int pinmux_request_gpio(unsigned gpio)
+{
+ char gpiostr[16];
+ const char *function;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range *range;
+ int ret;
+ int pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return -EINVAL;
+
+ /* Convert to the pin controllers number space */
+ pin = gpio - range->base;
+
+ /* Conjure some name stating what chip and pin this is taken by */
+ snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
+
+ function = kstrdup(gpiostr, GFP_KERNEL);
+ if (!function)
+ return -EINVAL;
+
+ ret = pin_request(pctldev, pin, function, true, range);
+ if (ret < 0)
+ kfree(function);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinmux_request_gpio);
+
+/**
+ * pinmux_free_gpio() - free a single pin, currently used as GPIO
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ */
+void pinmux_free_gpio(unsigned gpio)
+{
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range *range;
+ int ret;
+ int pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return;
+
+ /* Convert to the pin controllers number space */
+ pin = gpio - range->base;
+
+ pin_free(pctldev, pin, true);
+}
+EXPORT_SYMBOL_GPL(pinmux_free_gpio);
+
+/**
+ * pinmux_register_mappings() - register a set of pinmux mappings
+ * @maps: the pinmux mappings table to register
+ * @num_maps: the number of maps in the mapping table
+ *
+ * Only call this once during initialization of your machine, the function is
+ * tagged as __init and won't be callable after init has completed. The map
+ * passed into this function will be owned by the pinmux core and cannot be
+ * free:d.
+ */
+int __init pinmux_register_mappings(struct pinmux_map const *maps,
+ unsigned num_maps)
+{
+ int i;
+
+ if (pinmux_maps != NULL) {
+ pr_err("pinmux mappings already registered, you can only "
+ "register one set of maps\n");
+ return -EINVAL;
+ }
+
+ pr_debug("add %d pinmux maps\n", num_maps);
+ for (i = 0; i < num_maps; i++) {
+ /* Sanity check the mapping */
+ if (!maps[i].name) {
+ pr_err("failed to register map %d: "
+ "no map name given\n", i);
+ return -EINVAL;
+ }
+ if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
+ pr_err("failed to register map %s (%d): "
+ "no pin control device given\n",
+ maps[i].name, i);
+ return -EINVAL;
+ }
+ if (!maps[i].function) {
+ pr_err("failed to register map %s (%d): "
+ "no function ID given\n", maps[i].name, i);
+ return -EINVAL;
+ }
+
+ if (!maps[i].dev && !maps[i].dev_name)
+ pr_debug("add system map %s function %s with no device\n",
+ maps[i].name,
+ maps[i].function);
+ else
+ pr_debug("register map %s, function %s\n",
+ maps[i].name,
+ maps[i].function);
+ }
+
+ pinmux_maps = maps;
+ pinmux_maps_num = num_maps;
+
+ return 0;
+}
+
+/**
+ * acquire_pins() - acquire all the pins for a certain funcion on a pinmux
+ * @pctldev: the device to take the pins on
+ * @func_selector: the function selector to acquire the pins for
+ * @group_selector: the group selector containing the pins to acquire
+ */
+static int acquire_pins(struct pinctrl_dev *pctldev,
+ unsigned func_selector,
+ unsigned group_selector)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
+ const char *func = pmxops->get_function_name(pctldev,
+ func_selector);
+ const unsigned *pins;
+ unsigned num_pins;
+ int ret;
+ int i;
+
+ ret = pctlops->get_group_pins(pctldev, group_selector,
+ &pins, &num_pins);
+ if (ret)
+ return ret;
+
+ dev_dbg(&pctldev->dev, "requesting the %u pins from group %u\n",
+ num_pins, group_selector);
+
+ /* Try to allocate all pins in this group, one by one */
+ for (i = 0; i < num_pins; i++) {
+ ret = pin_request(pctldev, pins[i], func, false, NULL);
+ if (ret) {
+ dev_err(&pctldev->dev,
+ "could not get pin %d for function %s "
+ "on device %s - conflicting mux mappings?\n",
+ pins[i], func ? : "(undefined)",
+ pinctrl_dev_get_name(pctldev));
+ /* On error release all taken pins */
+ i--; /* this pin just failed */
+ for (; i >= 0; i--)
+ pin_free(pctldev, pins[i], false);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+/**
+ * release_pins() - release pins taken by earlier acquirement
+ * @pctldev: the device to free the pinx on
+ * @group_selector: the group selector containing the pins to free
+ */
+static void release_pins(struct pinctrl_dev *pctldev,
+ unsigned group_selector)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ const unsigned *pins;
+ unsigned num_pins;
+ int ret;
+ int i;
+
+ ret = pctlops->get_group_pins(pctldev, group_selector,
+ &pins, &num_pins);
+ if (ret) {
+ dev_err(&pctldev->dev, "could not get pins to release for "
+ "group selector %d\n",
+ group_selector);
+ return;
+ }
+ for (i = 0; i < num_pins; i++)
+ pin_free(pctldev, pins[i], false);
+}
+
+/**
+ * pinmux_get_group_selector() - returns the group selector for a group
+ * @pctldev: the pin controller handling the group
+ * @pin_group: the pin group to look up
+ */
+static int pinmux_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ unsigned group_selector = 0;
+
+ while (pctlops->list_groups(pctldev, group_selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev,
+ group_selector);
+ if (!strcmp(gname, pin_group)) {
+ dev_dbg(&pctldev->dev,
+ "found group selector %u for %s\n",
+ group_selector,
+ pin_group);
+ return group_selector;
+ }
+
+ group_selector++;
+ }
+
+ dev_err(&pctldev->dev, "does not have pin group %s\n",
+ pin_group);
+
+ return -EINVAL;
+}
+
+/**
+ * pinmux_check_pin_group() - check function and pin group combo
+ * @pctldev: device to check the pin group vs function for
+ * @func_selector: the function selector to check the pin group for, we have
+ * already looked this up in the calling function
+ * @pin_group: the pin group to match to the function
+ *
+ * This function will check that the pinmux driver can supply the
+ * selected pin group for a certain function, returns the group selector if
+ * the group and function selector will work fine together, else returns
+ * negative
+ */
+static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
+ unsigned func_selector,
+ const char *pin_group)
+{
+ const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ int ret;
+
+ /*
+ * If the driver does not support different pin groups for the
+ * functions, we only support group 0, and assume this exists.
+ */
+ if (!pctlops || !pctlops->list_groups)
+ return 0;
+
+ /*
+ * Passing NULL (no specific group) will select the first and
+ * hopefully only group of pins available for this function.
+ */
+ if (!pin_group) {
+ char const * const *groups;
+ unsigned num_groups;
+
+ ret = pmxops->get_function_groups(pctldev, func_selector,
+ &groups, &num_groups);
+ if (ret)
+ return ret;
+ if (num_groups < 1)
+ return -EINVAL;
+ ret = pinmux_get_group_selector(pctldev, groups[0]);
+ if (ret < 0) {
+ dev_err(&pctldev->dev,
+ "function %s wants group %s but the pin "
+ "controller does not seem to have that group\n",
+ pmxops->get_function_name(pctldev, func_selector),
+ groups[0]);
+ return ret;
+ }
+
+ if (num_groups > 1)
+ dev_dbg(&pctldev->dev,
+ "function %s support more than one group, "
+ "default-selecting first group %s (%d)\n",
+ pmxops->get_function_name(pctldev, func_selector),
+ groups[0],
+ ret);
+
+ return ret;
+ }
+
+ dev_dbg(&pctldev->dev,
+ "check if we have pin group %s on controller %s\n",
+ pin_group, pinctrl_dev_get_name(pctldev));
+
+ ret = pinmux_get_group_selector(pctldev, pin_group);
+ if (ret < 0) {
+ dev_dbg(&pctldev->dev,
+ "%s does not support pin group %s with function %s\n",
+ pinctrl_dev_get_name(pctldev),
+ pin_group,
+ pmxops->get_function_name(pctldev, func_selector));
+ }
+ return ret;
+}
+
+/**
+ * pinmux_search_function() - check pin control driver for a certain function
+ * @pctldev: device to check for function and position
+ * @map: function map containing the function and position to look for
+ * @func_selector: returns the applicable function selector if found
+ * @group_selector: returns the applicable group selector if found
+ *
+ * This will search the pinmux driver for an applicable
+ * function with a specific pin group, returns 0 if these can be mapped
+ * negative otherwise
+ */
+static int pinmux_search_function(struct pinctrl_dev *pctldev,
+ struct pinmux_map const *map,
+ unsigned *func_selector,
+ unsigned *group_selector)
+{
+ const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ unsigned selector = 0;
+
+ /* See if this pctldev has this function */
+ while (ops->list_functions(pctldev, selector) >= 0) {
+ const char *fname = ops->get_function_name(pctldev,
+ selector);
+ int ret;
+
+ if (!strcmp(map->function, fname)) {
+ /* Found the function, check pin group */
+ ret = pinmux_check_pin_group(pctldev, selector,
+ map->group);
+ if (ret < 0)
+ return ret;
+
+ /* This function and group selector can be used */
+ *func_selector = selector;
+ *group_selector = ret;
+ return 0;
+
+ }
+ selector++;
+ }
+
+ pr_err("%s does not support function %s\n",
+ pinctrl_dev_get_name(pctldev), map->function);
+ return -EINVAL;
+}
+
+/**
+ * pinmux_enable_muxmap() - enable a map entry for a certain pinmux
+ */
+static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
+ struct pinmux *pmx,
+ struct device *dev,
+ const char *devname,
+ struct pinmux_map const *map)
+{
+ unsigned func_selector;
+ unsigned group_selector;
+ struct pinmux_group *grp;
+ int ret;
+
+ /*
+ * Note that we're not locking the pinmux mutex here, because
+ * this is only called at pinmux initialization time when it
+ * has not been added to any list and thus is not reachable
+ * by anyone else.
+ */
+
+ if (pmx->pctldev && pmx->pctldev != pctldev) {
+ dev_err(&pctldev->dev,
+ "different pin control devices given for device %s, "
+ "function %s\n",
+ devname,
+ map->function);
+ return -EINVAL;
+ }
+ pmx->dev = dev;
+ pmx->pctldev = pctldev;
+
+ /* Now go into the driver and try to match a function and group */
+ ret = pinmux_search_function(pctldev, map, &func_selector,
+ &group_selector);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If the function selector is already set, it needs to be identical,
+ * we support several groups with one function but not several
+ * functions with one or several groups in the same pinmux.
+ */
+ if (pmx->func_selector != UINT_MAX &&
+ pmx->func_selector != func_selector) {
+ dev_err(&pctldev->dev,
+ "dual function defines in the map for device %s\n",
+ devname);
+ return -EINVAL;
+ }
+ pmx->func_selector = func_selector;
+
+ /* Now add this group selector, we may have many of them */
+ grp = kmalloc(sizeof(struct pinmux_group), GFP_KERNEL);
+ if (!grp)
+ return -ENOMEM;
+ grp->group_selector = group_selector;
+ ret = acquire_pins(pctldev, func_selector, group_selector);
+ if (ret) {
+ kfree(grp);
+ return ret;
+ }
+ list_add(&grp->node, &pmx->groups);
+
+ return 0;
+}
+
+static void pinmux_free_groups(struct pinmux *pmx)
+{
+ struct list_head *node, *tmp;
+
+ list_for_each_safe(node, tmp, &pmx->groups) {
+ struct pinmux_group *grp =
+ list_entry(node, struct pinmux_group, node);
+ /* Release all pins taken by this group */
+ release_pins(pmx->pctldev, grp->group_selector);
+ list_del(node);
+ kfree(grp);
+ }
+}
+
+/**
+ * pinmux_get() - retrieves the pinmux for a certain device
+ * @dev: the device to get the pinmux for
+ * @name: an optional specific mux mapping name or NULL, the name is only
+ * needed if you want to have more than one mapping per device, or if you
+ * need an anonymous pinmux (not tied to any specific device)
+ */
+struct pinmux *pinmux_get(struct device *dev, const char *name)
+{
+
+ struct pinmux_map const *map = NULL;
+ struct pinctrl_dev *pctldev = NULL;
+ const char *devname = NULL;
+ struct pinmux *pmx;
+ bool found_map;
+ unsigned num_maps = 0;
+ int ret = -ENODEV;
+ int i;
+
+ /* We must have dev or ID or both */
+ if (!dev && !name)
+ return ERR_PTR(-EINVAL);
+
+ if (dev)
+ devname = dev_name(dev);
+
+ pr_debug("get mux %s for device %s\n", name,
+ devname ? devname : "(none)");
+
+ /*
+ * create the state cookie holder struct pinmux for each
+ * mapping, this is what consumers will get when requesting
+ * a pinmux handle with pinmux_get()
+ */
+ pmx = kzalloc(sizeof(struct pinmux), GFP_KERNEL);
+ if (pmx == NULL)
+ return ERR_PTR(-ENOMEM);
+ mutex_init(&pmx->mutex);
+ pmx->func_selector = UINT_MAX;
+ INIT_LIST_HEAD(&pmx->groups);
+
+ /* Iterate over the pinmux maps to locate the right ones */
+ for (i = 0; i < pinmux_maps_num; i++) {
+ map = &pinmux_maps[i];
+ found_map = false;
+
+ /*
+ * First, try to find the pctldev given in the map
+ */
+ pctldev = get_pinctrl_dev_from_dev(map->ctrl_dev,
+ map->ctrl_dev_name);
+ if (!pctldev) {
+ const char *devname = NULL;
+
+ if (map->ctrl_dev)
+ devname = dev_name(map->ctrl_dev);
+ else if (map->ctrl_dev_name)
+ devname = map->ctrl_dev_name;
+
+ pr_warning("could not find a pinctrl device for pinmux "
+ "function %s, fishy, they shall all have one\n",
+ map->function);
+ pr_warning("given pinctrl device name: %s",
+ devname ? devname : "UNDEFINED");
+
+ /* Continue to check the other mappings anyway... */
+ continue;
+ }
+
+ pr_debug("in map, found pctldev %s to handle function %s",
+ dev_name(&pctldev->dev), map->function);
+
+
+ /*
+ * If we're looking for a specific named map, this must match,
+ * else we loop and look for the next.
+ */
+ if (name != NULL) {
+ if (map->name == NULL)
+ continue;
+ if (strcmp(map->name, name))
+ continue;
+ }
+
+ /*
+ * This is for the case where no device name is given, we
+ * already know that the function name matches from above
+ * code.
+ */
+ if (!map->dev_name && (name != NULL))
+ found_map = true;
+
+ /* If the mapping has a device set up it must match */
+ if (map->dev_name &&
+ (!devname || !strcmp(map->dev_name, devname)))
+ /* MATCH! */
+ found_map = true;
+
+ /* If this map is applicable, then apply it */
+ if (found_map) {
+ ret = pinmux_enable_muxmap(pctldev, pmx, dev,
+ devname, map);
+ if (ret) {
+ pinmux_free_groups(pmx);
+ kfree(pmx);
+ return ERR_PTR(ret);
+ }
+ num_maps++;
+ }
+ }
+
+
+ /* We should have atleast one map, right */
+ if (!num_maps) {
+ pr_err("could not find any mux maps for device %s, ID %s\n",
+ devname ? devname : "(anonymous)",
+ name ? name : "(undefined)");
+ kfree(pmx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pr_debug("found %u mux maps for device %s, UD %s\n",
+ num_maps,
+ devname ? devname : "(anonymous)",
+ name ? name : "(undefined)");
+
+ /* Add the pinmux to the global list */
+ mutex_lock(&pinmux_list_mutex);
+ list_add(&pmx->node, &pinmux_list);
+ mutex_unlock(&pinmux_list_mutex);
+
+ return pmx;
+}
+EXPORT_SYMBOL_GPL(pinmux_get);
+
+/**
+ * pinmux_put() - release a previously claimed pinmux
+ * @pmx: a pinmux previously claimed by pinmux_get()
+ */
+void pinmux_put(struct pinmux *pmx)
+{
+ if (pmx == NULL)
+ return;
+
+ mutex_lock(&pmx->mutex);
+ if (pmx->usecount)
+ pr_warn("releasing pinmux with active users!\n");
+ /* Free the groups and all acquired pins */
+ pinmux_free_groups(pmx);
+ mutex_unlock(&pmx->mutex);
+
+ /* Remove from list */
+ mutex_lock(&pinmux_list_mutex);
+ list_del(&pmx->node);
+ mutex_unlock(&pinmux_list_mutex);
+
+ kfree(pmx);
+}
+EXPORT_SYMBOL_GPL(pinmux_put);
+
+/**
+ * pinmux_enable() - enable a certain pinmux setting
+ * @pmx: the pinmux to enable, previously claimed by pinmux_get()
+ */
+int pinmux_enable(struct pinmux *pmx)
+{
+ int ret = 0;
+
+ if (pmx == NULL)
+ return -EINVAL;
+ mutex_lock(&pmx->mutex);
+ if (pmx->usecount++ == 0) {
+ struct pinctrl_dev *pctldev = pmx->pctldev;
+ const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ struct pinmux_group *grp;
+
+ list_for_each_entry(grp, &pmx->groups, node) {
+ ret = ops->enable(pctldev, pmx->func_selector,
+ grp->group_selector);
+ if (ret) {
+ /*
+ * TODO: call disable() on all groups we called
+ * enable() on to this point?
+ */
+ pmx->usecount--;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&pmx->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinmux_enable);
+
+/**
+ * pinmux_disable() - disable a certain pinmux setting
+ * @pmx: the pinmux to disable, previously claimed by pinmux_get()
+ */
+void pinmux_disable(struct pinmux *pmx)
+{
+ if (pmx == NULL)
+ return;
+
+ mutex_lock(&pmx->mutex);
+ if (--pmx->usecount == 0) {
+ struct pinctrl_dev *pctldev = pmx->pctldev;
+ const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ struct pinmux_group *grp;
+
+ list_for_each_entry(grp, &pmx->groups, node) {
+ ops->disable(pctldev, pmx->func_selector,
+ grp->group_selector);
+ }
+ }
+ mutex_unlock(&pmx->mutex);
+}
+EXPORT_SYMBOL_GPL(pinmux_disable);
+
+int pinmux_check_ops(const struct pinmux_ops *ops)
+{
+ /* Check that we implement required operations */
+ if (!ops->list_functions ||
+ !ops->get_function_name ||
+ !ops->get_function_groups ||
+ !ops->enable ||
+ !ops->disable)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Hog a single map entry and add to the hoglist */
+static int pinmux_hog_map(struct pinctrl_dev *pctldev,
+ struct pinmux_map const *map)
+{
+ struct pinmux_hog *hog;
+ struct pinmux *pmx;
+ int ret;
+
+ if (map->dev || map->dev_name) {
+ /*
+ * TODO: the day we have device tree support, we can
+ * traverse the device tree and hog to specific device nodes
+ * without any problems, so then we can hog pinmuxes for
+ * all devices that just want a static pin mux at this point.
+ */
+ dev_err(&pctldev->dev, "map %s wants to hog a non-system "
+ "pinmux, this is not going to work\n", map->name);
+ return -EINVAL;
+ }
+
+ hog = kzalloc(sizeof(struct pinmux_hog), GFP_KERNEL);
+ if (!hog)
+ return -ENOMEM;
+
+ pmx = pinmux_get(NULL, map->name);
+ if (IS_ERR(pmx)) {
+ kfree(hog);
+ dev_err(&pctldev->dev,
+ "could not get the %s pinmux mapping for hogging\n",
+ map->name);
+ return PTR_ERR(pmx);
+ }
+
+ ret = pinmux_enable(pmx);
+ if (ret) {
+ pinmux_put(pmx);
+ kfree(hog);
+ dev_err(&pctldev->dev,
+ "could not enable the %s pinmux mapping for hogging\n",
+ map->name);
+ return ret;
+ }
+
+ hog->map = map;
+ hog->pmx = pmx;
+
+ dev_info(&pctldev->dev, "hogged map %s, function %s\n", map->name,
+ map->function);
+ mutex_lock(&pctldev->pinmux_hogs_lock);
+ list_add(&hog->node, &pctldev->pinmux_hogs);
+ mutex_unlock(&pctldev->pinmux_hogs_lock);
+
+ return 0;
+}
+
+/**
+ * pinmux_hog_maps() - hog specific map entries on controller device
+ * @pctldev: the pin control device to hog entries on
+ *
+ * When the pin controllers are registered, there may be some specific pinmux
+ * map entries that need to be hogged, i.e. get+enabled until the system shuts
+ * down.
+ */
+int pinmux_hog_maps(struct pinctrl_dev *pctldev)
+{
+ struct device *dev = &pctldev->dev;
+ const char *devname = dev_name(dev);
+ int ret;
+ int i;
+
+ INIT_LIST_HEAD(&pctldev->pinmux_hogs);
+ mutex_init(&pctldev->pinmux_hogs_lock);
+
+ for (i = 0; i < pinmux_maps_num; i++) {
+ struct pinmux_map const *map = &pinmux_maps[i];
+
+ if (((map->ctrl_dev == dev) ||
+ !strcmp(map->ctrl_dev_name, devname)) &&
+ map->hog_on_boot) {
+ /* OK time to hog! */
+ ret = pinmux_hog_map(pctldev, map);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * pinmux_hog_maps() - unhog specific map entries on controller device
+ * @pctldev: the pin control device to unhog entries on
+ */
+void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
+{
+ struct list_head *node, *tmp;
+
+ mutex_lock(&pctldev->pinmux_hogs_lock);
+ list_for_each_safe(node, tmp, &pctldev->pinmux_hogs) {
+ struct pinmux_hog *hog =
+ list_entry(node, struct pinmux_hog, node);
+ pinmux_disable(hog->pmx);
+ pinmux_put(hog->pmx);
+ list_del(node);
+ kfree(hog);
+ }
+ mutex_unlock(&pctldev->pinmux_hogs_lock);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Called from pincontrol core */
+static int pinmux_functions_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
+ unsigned func_selector = 0;
+
+ while (pmxops->list_functions(pctldev, func_selector) >= 0) {
+ const char *func = pmxops->get_function_name(pctldev,
+ func_selector);
+ const char * const *groups;
+ unsigned num_groups;
+ int ret;
+ int i;
+
+ ret = pmxops->get_function_groups(pctldev, func_selector,
+ &groups, &num_groups);
+ if (ret)
+ seq_printf(s, "function %s: COULD NOT GET GROUPS\n",
+ func);
+
+ seq_printf(s, "function: %s, groups = [ ", func);
+ for (i = 0; i < num_groups; i++)
+ seq_printf(s, "%s ", groups[i]);
+ seq_puts(s, "]\n");
+
+ func_selector++;
+
+ }
+
+ return 0;
+}
+
+static int pinmux_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ unsigned pin;
+
+ seq_puts(s, "Pinmux settings per pin\n");
+ seq_puts(s, "Format: pin (name): pinmuxfunction\n");
+
+ /* The highest pin number need to be included in the loop, thus <= */
+ for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+
+ struct pin_desc *desc;
+
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s): %s\n", pin,
+ desc->name ? desc->name : "unnamed",
+ desc->mux_function ? desc->mux_function
+ : "UNCLAIMED");
+ }
+
+ return 0;
+}
+
+static int pinmux_hogs_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ struct pinmux_hog *hog;
+
+ seq_puts(s, "Pinmux map hogs held by device\n");
+
+ list_for_each_entry(hog, &pctldev->pinmux_hogs, node)
+ seq_printf(s, "%s\n", hog->map->name);
+
+ return 0;
+}
+
+static int pinmux_show(struct seq_file *s, void *what)
+{
+ struct pinmux *pmx;
+
+ seq_puts(s, "Requested pinmuxes and their maps:\n");
+ list_for_each_entry(pmx, &pinmux_list, node) {
+ struct pinctrl_dev *pctldev = pmx->pctldev;
+ const struct pinmux_ops *pmxops;
+ const struct pinctrl_ops *pctlops;
+ struct pinmux_group *grp;
+
+ if (!pctldev) {
+ seq_puts(s, "NO PIN CONTROLLER DEVICE\n");
+ continue;
+ }
+
+ pmxops = pctldev->desc->pmxops;
+ pctlops = pctldev->desc->pctlops;
+
+ seq_printf(s, "device: %s function: %s (%u),",
+ pinctrl_dev_get_name(pmx->pctldev),
+ pmxops->get_function_name(pctldev, pmx->func_selector),
+ pmx->func_selector);
+
+ seq_printf(s, " groups: [");
+ list_for_each_entry(grp, &pmx->groups, node) {
+ seq_printf(s, " %s (%u)",
+ pctlops->get_group_name(pctldev, grp->group_selector),
+ grp->group_selector);
+ }
+ seq_printf(s, " ]");
+
+ seq_printf(s, " users: %u map-> %s\n",
+ pmx->usecount,
+ pmx->dev ? dev_name(pmx->dev) : "(system)");
+ }
+
+ return 0;
+}
+
+static int pinmux_maps_show(struct seq_file *s, void *what)
+{
+ int i;
+
+ seq_puts(s, "Pinmux maps:\n");
+
+ for (i = 0; i < pinmux_maps_num; i++) {
+ struct pinmux_map const *map = &pinmux_maps[i];
+
+ seq_printf(s, "%s:\n", map->name);
+ if (map->dev || map->dev_name)
+ seq_printf(s, " device: %s\n",
+ map->dev ? dev_name(map->dev) :
+ map->dev_name);
+ else
+ seq_printf(s, " SYSTEM MUX\n");
+ seq_printf(s, " controlling device %s\n",
+ map->ctrl_dev ? dev_name(map->ctrl_dev) :
+ map->ctrl_dev_name);
+ seq_printf(s, " function: %s\n", map->function);
+ seq_printf(s, " group: %s\n", map->group ? map->group :
+ "(default)");
+ }
+ return 0;
+}
+
+static int pinmux_functions_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinmux_functions_show, inode->i_private);
+}
+
+static int pinmux_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinmux_pins_show, inode->i_private);
+}
+
+static int pinmux_hogs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinmux_hogs_show, inode->i_private);
+}
+
+static int pinmux_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinmux_show, NULL);
+}
+
+static int pinmux_maps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinmux_maps_show, NULL);
+}
+
+static const struct file_operations pinmux_functions_ops = {
+ .open = pinmux_functions_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinmux_pins_ops = {
+ .open = pinmux_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinmux_hogs_ops = {
+ .open = pinmux_hogs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinmux_ops = {
+ .open = pinmux_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinmux_maps_ops = {
+ .open = pinmux_maps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void pinmux_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+ debugfs_create_file("pinmux-functions", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinmux_functions_ops);
+ debugfs_create_file("pinmux-pins", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinmux_pins_ops);
+ debugfs_create_file("pinmux-hogs", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinmux_hogs_ops);
+}
+
+void pinmux_init_debugfs(struct dentry *subsys_root)
+{
+ debugfs_create_file("pinmuxes", S_IFREG | S_IRUGO,
+ subsys_root, NULL, &pinmux_ops);
+ debugfs_create_file("pinmux-maps", S_IFREG | S_IRUGO,
+ subsys_root, NULL, &pinmux_maps_ops);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
new file mode 100644
index 00000000000..844500b3331
--- /dev/null
+++ b/drivers/pinctrl/pinmux.h
@@ -0,0 +1,47 @@
+/*
+ * Internal interface between the core pin control system and the
+ * pinmux portions
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifdef CONFIG_PINMUX
+
+int pinmux_check_ops(const struct pinmux_ops *ops);
+void pinmux_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev);
+void pinmux_init_debugfs(struct dentry *subsys_root);
+int pinmux_hog_maps(struct pinctrl_dev *pctldev);
+void pinmux_unhog_maps(struct pinctrl_dev *pctldev);
+
+#else
+
+static inline int pinmux_check_ops(const struct pinmux_ops *ops)
+{
+ return 0;
+}
+
+static inline void pinmux_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+}
+
+static inline void pinmux_init_debugfs(struct dentry *subsys_root)
+{
+}
+
+static inline int pinmux_hog_maps(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static inline void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
+{
+}
+
+#endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1e88d478532..10cf2500522 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -31,9 +31,6 @@ config ACER_WMI
wireless radio and bluetooth control, and on some laptops,
exposes the mail LED and LCD backlight.
- For more information about this driver see
- <file:Documentation/laptops/acer-wmi.txt>
-
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
@@ -164,7 +161,7 @@ config HP_ACCEL
Support for a led indicating disk protection will be provided as
hp::hddprotect. For more information on the feature, refer to
- Documentation/hwmon/lis3lv02d.
+ Documentation/misc-devices/lis3lv02d.
To compile this driver as a module, choose M here: the module will
be called hp_accel.
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7bd829f247e..7b828680b21 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4007,7 +4007,7 @@ static void bluetooth_shutdown(void)
pr_notice("failed to save bluetooth state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
- "bluestooth state saved to NVRAM\n");
+ "bluetooth state saved to NVRAM\n");
}
static void bluetooth_exit(void)
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 76058a5166e..08c66035dd1 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -335,10 +335,9 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
return -EINVAL;
if (user_len > 65536)
user_len = 65536;
- buf = vmalloc (user_len + 1); /* maybe better use the stack? */
+ buf = vzalloc(user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
- memset(buf, 0, user_len + 1);
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
rc = -EFAULT;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index e5c966462c5..3dd86441da3 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -44,6 +44,7 @@ enum qdio_irq_states {
#define SLSB_STATE_NOT_INIT 0x0
#define SLSB_STATE_EMPTY 0x1
#define SLSB_STATE_PRIMED 0x2
+#define SLSB_STATE_PENDING 0x3
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_TYPE_INPUT 0x0
@@ -67,6 +68,8 @@ enum qdio_irq_states {
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
#define SLSB_P_OUTPUT_EMPTY \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
#define SLSB_CU_OUTPUT_PRIMED \
(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
#define SLSB_P_OUTPUT_HALTED \
@@ -84,19 +87,11 @@ enum qdio_irq_states {
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
-/* qdio adapter-characteristics-1 flag */
-#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
-#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
-#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
-#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
-#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
-#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
-#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
-
/* SIGA flags */
#define QDIO_SIGA_WRITE 0x00
#define QDIO_SIGA_READ 0x01
#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEQ 0x04
#define QDIO_SIGA_QEBSM_FLAG 0x80
#ifdef CONFIG_64BIT
@@ -253,6 +248,12 @@ struct qdio_input_q {
struct qdio_output_q {
/* PCIs are enabled for the queue */
int pci_out_enabled;
+ /* cq: use asynchronous output buffers */
+ int use_cq;
+ /* cq: aobs used for particual SBAL */
+ struct qaob **aobs;
+ /* cq: sbal state related to asynchronous operation */
+ struct qdio_outbuf_state *sbal_state;
/* timer to check for more outbound work */
struct timer_list timer;
/* used SBALs before tasklet schedule */
@@ -432,9 +433,20 @@ struct indicator_t {
extern struct indicator_t *q_indicators;
-static inline int shared_ind(u32 *dsci)
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq)
+{
+ return irq->nr_input_qs > 1;
+}
+
+static inline int references_shared_dsci(struct qdio_irq *irq)
{
- return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+ return irq->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static inline int shared_ind(struct qdio_q *q)
+{
+ struct qdio_irq *i = q->irq_ptr;
+ return references_shared_dsci(i) || has_multiple_inq_on_dsci(i);
}
/* prototypes for thin interrupt */
@@ -449,6 +461,7 @@ void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
+
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
void qdio_outbound_processing(unsigned long data);
@@ -469,6 +482,9 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev);
void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 0e615cb912d..aaf7f935bfd 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -76,6 +76,9 @@ static int qstat_show(struct seq_file *m, void *v)
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
+ case SLSB_P_OUTPUT_PENDING:
+ seq_printf(m, "P");
+ break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 288c9140290..9a122280246 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/gfp.h>
+#include <linux/io.h>
#include <linux/kernel_stat.h>
#include <linux/atomic.h>
#include <asm/debug.h>
@@ -77,11 +78,13 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- unsigned int *bb, unsigned int fc)
+ unsigned int *bb, unsigned int fc,
+ unsigned long aob)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
+ register unsigned long __aob asm("3") = aob;
int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
asm volatile(
@@ -90,7 +93,8 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
" srl %0,28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
+ : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
+ "+d" (__aob)
: : "cc", "memory");
*bb = ((unsigned int) __fc) >> 31;
return cc;
@@ -212,7 +216,7 @@ again:
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
- int auto_ack)
+ int auto_ack, int merge_pending)
{
unsigned char __state = 0;
int i;
@@ -224,9 +228,14 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) {
- if (!__state)
+ if (!__state) {
__state = q->slsb.val[bufnr];
- else if (q->slsb.val[bufnr] != __state)
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+ } else if (merge_pending) {
+ if ((q->slsb.val[bufnr] & __state) != __state)
+ break;
+ } else if (q->slsb.val[bufnr] != __state)
break;
bufnr = next_buf(bufnr);
}
@@ -237,7 +246,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, int auto_ack)
{
- return get_buf_states(q, bufnr, state, 1, auto_ack);
+ return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -308,19 +317,28 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+ unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
+ unsigned long laob = 0;
+
+ if (q->u.out.use_cq && aob != 0) {
+ fc = QDIO_SIGA_WRITEQ;
+ laob = aob;
+ }
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc);
+ WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
+ (aob && fc != QDIO_SIGA_WRITEQ));
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
@@ -379,7 +397,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0);
+ return get_buf_states(q, bufnr, state, 1, 0, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -507,7 +525,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1);
+ count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
if (!count)
goto out;
@@ -590,6 +608,107 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
return 0;
}
+static inline int contains_aobs(struct qdio_q *q)
+{
+ return !q->is_input_q && q->u.out.use_cq;
+}
+
+static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
+ int i, struct qaob *aob)
+{
+ int tmp;
+
+ DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
+ (unsigned long) virt_to_phys(aob));
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
+ (unsigned long) aob->res0[0]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
+ (unsigned long) aob->res0[1]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
+ (unsigned long) aob->res0[2]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
+ (unsigned long) aob->res0[3]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
+ (unsigned long) aob->res0[4]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
+ (unsigned long) aob->res0[5]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
+ DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
+ DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
+ DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
+ DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
+ for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
+ DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
+ (unsigned long) aob->sba[tmp]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
+ (unsigned long) q->sbal[i]->element[tmp].addr);
+ DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
+ DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
+ q->sbal[i]->element[tmp].length);
+ }
+ DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
+ for (tmp = 0; tmp < 2; ++tmp) {
+ DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
+ (unsigned long) aob->res4[tmp]);
+ }
+ DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
+ DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
+}
+
+static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
+{
+ unsigned char state = 0;
+ int j, b = start;
+
+ if (!contains_aobs(q))
+ return;
+
+ for (j = 0; j < count; ++j) {
+ get_buf_state(q, b, &state, 0);
+ if (state == SLSB_P_OUTPUT_PENDING) {
+ struct qaob *aob = q->u.out.aobs[b];
+ if (aob == NULL)
+ continue;
+
+ BUG_ON(q->u.out.sbal_state == NULL);
+ q->u.out.sbal_state[b].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[b] = NULL;
+ } else if (state == SLSB_P_OUTPUT_EMPTY) {
+ BUG_ON(q->u.out.sbal_state == NULL);
+ q->u.out.sbal_state[b].aob = NULL;
+ }
+ b = next_buf(b);
+ }
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+ int bufnr)
+{
+ unsigned long phys_aob = 0;
+
+ if (!q->use_cq)
+ goto out;
+
+ if (!q->aobs[bufnr]) {
+ struct qaob *aob = qdio_allocate_aob();
+ q->aobs[bufnr] = aob;
+ }
+ if (q->aobs[bufnr]) {
+ BUG_ON(q->sbal_state == NULL);
+ q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
+ q->sbal_state[bufnr].aob = q->aobs[bufnr];
+ q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+ phys_aob = virt_to_phys(q->aobs[bufnr]);
+ BUG_ON(phys_aob & 0xFF);
+ }
+
+out:
+ return phys_aob;
+}
+
static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
@@ -610,6 +729,8 @@ static void qdio_kick_handler(struct qdio_q *q)
start, count);
}
+ qdio_handle_aobs(q, start, count);
+
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
@@ -672,23 +793,26 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
-
if (q->first_to_check == stop)
- return q->first_to_check;
+ goto out;
- count = get_buf_states(q, q->first_to_check, &state, count, 0);
+ count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
if (!count)
- return q->first_to_check;
+ goto out;
switch (state) {
+ case SLSB_P_OUTPUT_PENDING:
+ BUG();
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+ "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
+
break;
case SLSB_P_OUTPUT_ERROR:
process_buffer_error(q, count);
@@ -701,7 +825,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+ q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
@@ -709,6 +834,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
default:
BUG();
}
+
+out:
return q->first_to_check;
}
@@ -732,7 +859,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
return 0;
}
-static int qdio_kick_outbound_q(struct qdio_q *q)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -744,7 +871,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
retry:
qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit);
+ cc = qdio_siga_output(q, &busy_bit, aob);
switch (cc) {
case 0:
break;
@@ -921,8 +1048,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
q->irq_ptr->int_parm);
- } else
+ } else {
tasklet_schedule(&q->tasklet);
+ }
}
if (!pci_out_supported(q))
@@ -1236,6 +1364,26 @@ out_err:
}
EXPORT_SYMBOL_GPL(qdio_allocate);
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q = irq_ptr->input_qs[0];
+ int i, use_cq = 0;
+
+ if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+ use_cq = 1;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (use_cq) {
+ if (qdio_enable_async_operation(&q->u.out) < 0) {
+ use_cq = 0;
+ continue;
+ }
+ } else
+ qdio_disable_async_operation(&q->u.out);
+ }
+ DBF_EVENT("use_cq:%d", use_cq);
+}
+
/**
* qdio_establish - establish queues on a qdio subchannel
* @init_data: initialization data
@@ -1301,6 +1449,8 @@ int qdio_establish(struct qdio_initialize *init_data)
qdio_setup_ssqd_info(irq_ptr);
DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
+ qdio_detect_hsicq(irq_ptr);
+
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
@@ -1442,12 +1592,9 @@ set:
used = atomic_add_return(count, &q->nr_buf_used) - count;
BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
- /* no need to signal as long as the adapter had free buffers */
- if (used)
- return 0;
-
if (need_siga_in(q))
return qdio_siga_input(q);
+
return 0;
}
@@ -1480,17 +1627,21 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- /* One SIGA-W per buffer required for unicast HiperSockets. */
+ unsigned long phys_aob = 0;
+
+ /* One SIGA-W per buffer required for unicast HSI */
WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
- rc = qdio_kick_outbound_q(q);
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+ rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
} else {
/* try to fast requeue buffers */
get_buf_state(q, prev_buf(bufnr), &state, 0);
if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q);
+ rc = qdio_kick_outbound_q(q, 0);
else
qperf_inc(q, fast_requeue);
}
@@ -1518,6 +1669,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
{
struct qdio_irq *irq_ptr;
+
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
@@ -1562,7 +1714,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
WARN_ON(queue_irqs_enabled(q));
- if (!shared_ind(q->irq_ptr->dsci))
+ if (!shared_ind(q))
xchg(q->irq_ptr->dsci, 0);
qdio_stop_polling(q);
@@ -1572,7 +1724,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
+ if (!shared_ind(q) && *q->irq_ptr->dsci)
goto rescan;
if (!qdio_inbound_q_done(q))
goto rescan;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 89107d0938c..dd8bd670a6b 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -19,6 +19,22 @@
#include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob()
+{
+ struct qaob *aob;
+
+ aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+ return aob;
+}
+EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+
+void qdio_release_aob(struct qaob *aob)
+{
+ kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
/*
* qebsm is only available under 64bit but the adapter sets the feature
@@ -154,29 +170,36 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ struct qdio_outbuf_state *output_sbal_state_array =
+ qdio_init->output_sbal_state_array;
int i;
for_each_input_queue(irq_ptr, q, i) {
- DBF_EVENT("in-q:%1d", i);
+ DBF_EVENT("inq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll;
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
+
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
- if (is_thinint_irq(irq_ptr))
+ if (is_thinint_irq(irq_ptr)) {
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
(unsigned long) q);
- else
+ } else {
tasklet_init(&q->tasklet, qdio_inbound_processing,
(unsigned long) q);
+ }
}
for_each_output_queue(irq_ptr, q, i) {
DBF_EVENT("outq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+ q->u.out.sbal_state = output_sbal_state_array;
+ output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
q->is_input_q = 0;
q->u.out.scan_threshold = qdio_init->scan_threshold;
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
@@ -311,6 +334,19 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->output_qs[i];
if (q) {
+ if (q->u.out.use_cq) {
+ int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
+ struct qaob *aob = q->u.out.aobs[n];
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
+ }
+
+ qdio_disable_async_operation(&q->u.out);
+ }
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
@@ -465,23 +501,60 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
printk(KERN_INFO "%s", s);
}
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+ outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
+ GFP_ATOMIC);
+ if (!outq->aobs) {
+ outq->use_cq = 0;
+ return -ENOMEM;
+ }
+ outq->use_cq = 1;
+ return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+ kfree(q->aobs);
+ q->aobs = NULL;
+ q->use_cq = 0;
+}
+
int __init qdio_setup_init(void)
{
+ int rc;
+
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
+ qdio_aob_cache = kmem_cache_create("qdio_aob",
+ sizeof(struct qaob),
+ sizeof(struct qaob),
+ 0,
+ NULL);
+ if (!qdio_aob_cache) {
+ rc = -ENOMEM;
+ goto free_qdio_q_cache;
+ }
+
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
- return 0;
+ rc = 0;
+out:
+ return rc;
+free_qdio_q_cache:
+ kmem_cache_destroy(qdio_q_cache);
+ goto out;
}
void qdio_setup_exit(void)
{
+ kmem_cache_destroy(qdio_aob_cache);
kmem_cache_destroy(qdio_q_cache);
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 2a1d4dfaf85..a3e3949d7b6 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -67,12 +67,9 @@ static void put_indicator(u32 *addr)
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
- int i;
-
mutex_lock(&tiq_list_lock);
- for_each_input_queue(irq_ptr, q, i)
- list_add_rcu(&q->entry, &tiq_list);
+ BUG_ON(irq_ptr->nr_input_qs < 1);
+ list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1 << 7);
}
@@ -80,19 +77,17 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
- int i;
- for (i = 0; i < irq_ptr->nr_input_qs; i++) {
- q = irq_ptr->input_qs[i];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
- continue;
+ BUG_ON(irq_ptr->nr_input_qs < 1);
+ q = irq_ptr->input_qs[0];
+ /* if establish triggered an error */
+ if (!q || !q->entry.prev || !q->entry.next)
+ return;
- mutex_lock(&tiq_list_lock);
- list_del_rcu(&q->entry);
- mutex_unlock(&tiq_list_lock);
- synchronize_rcu();
- }
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
}
static inline u32 clear_shared_ind(void)
@@ -102,6 +97,40 @@ static inline u32 clear_shared_ind(void)
return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq, q, i) {
+ if (!references_shared_dsci(irq) &&
+ has_multiple_inq_on_dsci(irq))
+ xchg(q->irq_ptr->dsci, 0);
+
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+
+ /* avoid dsci clear here, done after processing */
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
+ if (!shared_ind(q))
+ xchg(q->irq_ptr->dsci, 0);
+
+ /*
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
+ */
+ tasklet_schedule(&q->tasklet);
+ }
+ }
+}
+
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @alsi: pointer to adapter local summary indicator
@@ -120,35 +149,18 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
/* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry) {
+ struct qdio_irq *irq;
/* only process queues from changed sets */
- if (unlikely(shared_ind(q->irq_ptr->dsci))) {
+ irq = q->irq_ptr;
+ if (unlikely(references_shared_dsci(irq))) {
if (!si_used)
continue;
- } else if (!*q->irq_ptr->dsci)
+ } else if (!*irq->dsci)
continue;
- if (q->u.in.queue_start_poll) {
- /* skip if polling is enabled or already in work */
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state)) {
- qperf_inc(q, int_discarded);
- continue;
- }
+ tiqdio_call_inq_handlers(irq);
- /* avoid dsci clear here, done after processing */
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
- q->irq_ptr->int_parm);
- } else {
- /* only clear it if the indicator is non-shared */
- if (!shared_ind(q->irq_ptr->dsci))
- xchg(q->irq_ptr->dsci, 0);
- /*
- * Call inbound processing but not directly
- * since that could starve other thinint queues.
- */
- tasklet_schedule(&q->tasklet);
- }
qperf_inc(q, adapter_int);
}
rcu_read_unlock();
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c3b8064a102..fb246b944b1 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2122,7 +2122,7 @@ static const struct net_device_ops lcs_mc_netdev_ops = {
.ndo_stop = lcs_stop_device,
.ndo_get_stats = lcs_getstats,
.ndo_start_xmit = lcs_start_xmit,
- .ndo_set_multicast_list = lcs_set_multicast_list,
+ .ndo_set_rx_mode = lcs_set_multicast_list,
};
static int
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 26a4110eeb2..b77c65ed138 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -110,6 +110,10 @@ struct qeth_perf_stats {
unsigned int sc_dp_p;
unsigned int sc_p_dp;
+ /* qdio_cq_handler: number of times called, time spent in */
+ __u64 cq_start_time;
+ unsigned int cq_cnt;
+ unsigned int cq_time;
/* qdio_input_handler: number of times called, time spent in */
__u64 inbound_start_time;
unsigned int inbound_cnt;
@@ -213,6 +217,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
*/
#define QETH_TX_TIMEOUT 100 * HZ
#define QETH_RCD_TIMEOUT 60 * HZ
+#define QETH_RECLAIM_WORK_TIME HZ
#define QETH_HEADER_SIZE 32
#define QETH_MAX_PORTNO 15
@@ -231,7 +236,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_IN_BUF_COUNT_MAX 128
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
- ((card)->qdio.in_buf_pool.buf_count / 2)
+ ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \
+ ((card)->qdio.in_buf_pool.buf_count / 2))
/* buffers we have to be behind before we get a PCI */
#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
@@ -260,6 +266,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
+#define QETH_RX_PULL_LEN 256
struct qeth_hdr_layer3 {
__u8 id;
@@ -375,6 +382,21 @@ enum qeth_qdio_buffer_states {
* outbound: filled by driver; owned by hardware in order to be sent
*/
QETH_QDIO_BUF_PRIMED,
+ /*
+ * inbound: not applicable
+ * outbound: identified to be pending in TPQ
+ */
+ QETH_QDIO_BUF_PENDING,
+ /*
+ * inbound: not applicable
+ * outbound: found in completion queue
+ */
+ QETH_QDIO_BUF_IN_CQ,
+ /*
+ * inbound: not applicable
+ * outbound: handled via transfer pending / completion queue
+ */
+ QETH_QDIO_BUF_HANDLED_DELAYED,
};
enum qeth_qdio_info_states {
@@ -399,6 +421,7 @@ struct qeth_qdio_buffer {
struct qdio_buffer *buffer;
/* the buffer pool entry currently associated to this buffer */
struct qeth_buffer_pool_entry *pool_entry;
+ struct sk_buff *rx_skb;
};
struct qeth_qdio_q {
@@ -412,8 +435,11 @@ struct qeth_qdio_out_buffer {
atomic_t state;
int next_element_to_fill;
struct sk_buff_head skb_list;
- struct list_head ctx_list;
int is_header[16];
+
+ struct qaob *aob;
+ struct qeth_qdio_out_q *q;
+ struct qeth_qdio_out_buffer *next_pending;
};
struct qeth_card;
@@ -426,7 +452,8 @@ enum qeth_out_q_states {
struct qeth_qdio_out_q {
struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
- struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qdio_outbuf_state *bufstates; /* convenience pointer */
int queue_no;
struct qeth_card *card;
atomic_t state;
@@ -447,7 +474,9 @@ struct qeth_qdio_out_q {
struct qeth_qdio_info {
atomic_t state;
/* input */
+ int no_in_queues;
struct qeth_qdio_q *in_q;
+ struct qeth_qdio_q *c_q;
struct qeth_qdio_buffer_pool in_buf_pool;
struct qeth_qdio_buffer_pool init_pool;
int in_buf_size;
@@ -455,6 +484,7 @@ struct qeth_qdio_info {
/* output */
int no_out_queues;
struct qeth_qdio_out_q **out_qs;
+ struct qdio_outbuf_state *out_bufstates;
/* priority queueing */
int do_prio_queueing;
@@ -526,6 +556,12 @@ enum qeth_cmd_buffer_state {
BUF_STATE_PROCESSED,
};
+enum qeth_cq {
+ QETH_CQ_DISABLED = 0,
+ QETH_CQ_ENABLED = 1,
+ QETH_CQ_NOTAVAILABLE = 2,
+};
+
struct qeth_ipato {
int enabled;
int invert4;
@@ -650,6 +686,8 @@ struct qeth_card_options {
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
int sniffer;
+ enum qeth_cq cq;
+ char hsuid[9];
};
/*
@@ -747,6 +785,8 @@ struct qeth_card {
struct mutex discipline_mutex;
struct napi_struct napi;
struct qeth_rx rx;
+ struct delayed_work buffer_reclaim_work;
+ int reclaim_index;
};
struct qeth_card_list_struct {
@@ -812,6 +852,7 @@ int qeth_core_create_device_attributes(struct device *);
void qeth_core_remove_device_attributes(struct device *);
int qeth_core_create_osn_attributes(struct device *);
void qeth_core_remove_osn_attributes(struct device *);
+void qeth_buffer_reclaim_work(struct work_struct *);
/* exports for qeth discipline device drivers */
extern struct qeth_card_list_struct qeth_core_card_list;
@@ -840,7 +881,7 @@ int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
- struct qdio_buffer *, struct qdio_buffer_element **, int *,
+ struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
@@ -887,6 +928,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
int qeth_set_access_ctrl_online(struct qeth_card *card);
int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
+int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4550573c25e..81534437373 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -21,6 +21,7 @@
#include <linux/mii.h>
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <net/iucv/af_iucv.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
@@ -44,6 +45,7 @@ struct qeth_card_list_struct qeth_core_card_list;
EXPORT_SYMBOL_GPL(qeth_core_card_list);
struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
@@ -56,6 +58,14 @@ static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
+static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum iucv_tx_notify notification);
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum qeth_qdio_buffer_states newbufstate);
static inline const char *qeth_get_cardname(struct qeth_card *card)
@@ -199,7 +209,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
- pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
+ pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
if (!pool_entry) {
qeth_free_buffer_pool(card);
return -ENOMEM;
@@ -239,6 +249,196 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
}
EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+static inline int qeth_cq_init(struct qeth_card *card)
+{
+ int rc;
+
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ QETH_DBF_TEXT(SETUP, 2, "cqinit");
+ memset(card->qdio.c_q->qdio_bufs, 0,
+ QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ card->qdio.c_q->next_buf_to_init = 127;
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
+ card->qdio.no_in_queues - 1, 0,
+ 127);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ goto out;
+ }
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
+static inline int qeth_alloc_cq(struct qeth_card *card)
+{
+ int rc;
+
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ int i;
+ struct qdio_outbuf_state *outbuf_states;
+
+ QETH_DBF_TEXT(SETUP, 2, "cqon");
+ card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
+ GFP_KERNEL);
+ if (!card->qdio.c_q) {
+ rc = -1;
+ goto kmsg_out;
+ }
+ QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+ card->qdio.c_q->bufs[i].buffer =
+ &card->qdio.c_q->qdio_bufs[i];
+ }
+
+ card->qdio.no_in_queues = 2;
+
+ card->qdio.out_bufstates = (struct qdio_outbuf_state *)
+ kzalloc(card->qdio.no_out_queues *
+ QDIO_MAX_BUFFERS_PER_Q *
+ sizeof(struct qdio_outbuf_state), GFP_KERNEL);
+ outbuf_states = card->qdio.out_bufstates;
+ if (outbuf_states == NULL) {
+ rc = -1;
+ goto free_cq_out;
+ }
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ card->qdio.out_qs[i]->bufstates = outbuf_states;
+ outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
+ }
+ } else {
+ QETH_DBF_TEXT(SETUP, 2, "nocq");
+ card->qdio.c_q = NULL;
+ card->qdio.no_in_queues = 1;
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
+ rc = 0;
+out:
+ return rc;
+free_cq_out:
+ kfree(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+kmsg_out:
+ dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+ goto out;
+}
+
+static inline void qeth_free_cq(struct qeth_card *card)
+{
+ if (card->qdio.c_q) {
+ --card->qdio.no_in_queues;
+ kfree(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+ }
+ kfree(card->qdio.out_bufstates);
+ card->qdio.out_bufstates = NULL;
+}
+
+static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+ int delayed) {
+ enum iucv_tx_notify n;
+
+ switch (sbalf15) {
+ case 0:
+ n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
+ break;
+ case 4:
+ case 16:
+ case 17:
+ case 18:
+ n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
+ TX_NOTIFY_UNREACHABLE;
+ break;
+ default:
+ n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
+ TX_NOTIFY_GENERALERROR;
+ break;
+ }
+
+ return n;
+}
+
+static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
+ int bidx, int forced_cleanup)
+{
+ if (q->bufs[bidx]->next_pending != NULL) {
+ struct qeth_qdio_out_buffer *head = q->bufs[bidx];
+ struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
+
+ while (c) {
+ if (forced_cleanup ||
+ atomic_read(&c->state) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED) {
+ struct qeth_qdio_out_buffer *f = c;
+ QETH_CARD_TEXT(f->q->card, 5, "fp");
+ QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
+ /* release here to avoid interleaving between
+ outbound tasklet and inbound tasklet
+ regarding notifications and lifecycle */
+ qeth_release_skbs(c);
+
+ c = f->next_pending;
+ BUG_ON(head->next_pending != f);
+ head->next_pending = c;
+ kmem_cache_free(qeth_qdio_outbuf_cache, f);
+ } else {
+ head = c;
+ c = c->next_pending;
+ }
+
+ }
+ }
+}
+
+
+static inline void qeth_qdio_handle_aob(struct qeth_card *card,
+ unsigned long phys_aob_addr) {
+ struct qaob *aob;
+ struct qeth_qdio_out_buffer *buffer;
+ enum iucv_tx_notify notification;
+
+ aob = (struct qaob *) phys_to_virt(phys_aob_addr);
+ QETH_CARD_TEXT(card, 5, "haob");
+ QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
+ buffer = (struct qeth_qdio_out_buffer *) aob->user1;
+ QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
+
+ BUG_ON(buffer == NULL);
+
+ if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+ QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
+ notification = TX_NOTIFY_OK;
+ } else {
+ BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
+
+ atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
+ notification = TX_NOTIFY_DELAYED_OK;
+ }
+
+ if (aob->aorc != 0) {
+ QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+ notification = qeth_compute_cq_notification(aob->aorc, 1);
+ }
+ qeth_notify_skbs(buffer->q, buffer, notification);
+
+ buffer->aob = NULL;
+ qeth_clear_output_buffer(buffer->q, buffer,
+ QETH_QDIO_BUF_HANDLED_DELAYED);
+ /* from here on: do not touch buffer anymore */
+ qdio_release_aob(aob);
+}
+
+static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
+{
+ return card->options.cq == QETH_CQ_ENABLED &&
+ card->qdio.c_q != NULL &&
+ queue != 0 &&
+ queue == card->qdio.no_in_queues - 1;
+}
+
+
static int qeth_issue_next_read(struct qeth_card *card)
{
int rc;
@@ -589,7 +789,7 @@ static int qeth_setup_channel(struct qeth_channel *channel)
QETH_DBF_TEXT(SETUP, 2, "setupch");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
channel->iob[cnt].data =
- kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+ kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
channel->iob[cnt].state = BUF_STATE_FREE;
@@ -681,6 +881,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "startrec");
+ WARN_ON(1);
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
@@ -883,22 +1084,60 @@ out:
return;
}
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf)
+static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
+ struct qeth_qdio_out_buffer *buf,
+ enum iucv_tx_notify notification)
{
- int i;
struct sk_buff *skb;
- /* is PCI flag set on buffer? */
- if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
- atomic_dec(&queue->set_pci_flags_count);
+ if (skb_queue_empty(&buf->skb_list))
+ goto out;
+ skb = skb_peek(&buf->skb_list);
+ while (skb) {
+ QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
+ QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
+ if (skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ struct iucv_sock *iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, notification);
+ }
+ }
+ if (skb_queue_is_last(&buf->skb_list, skb))
+ skb = NULL;
+ else
+ skb = skb_queue_next(&buf->skb_list, skb);
+ }
+out:
+ return;
+}
+
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
+{
+ struct sk_buff *skb;
skb = skb_dequeue(&buf->skb_list);
while (skb) {
+ QETH_CARD_TEXT(buf->q->card, 5, "skbr");
+ QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
+}
+
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum qeth_qdio_buffer_states newbufstate)
+{
+ int i;
+
+ /* is PCI flag set on buffer? */
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+ atomic_dec(&queue->set_pci_flags_count);
+
+ if (newbufstate == QETH_QDIO_BUF_EMPTY) {
+ qeth_release_skbs(buf);
+ }
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
@@ -912,21 +1151,36 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
buf->buffer->element[15].eflags = 0;
buf->buffer->element[15].sflags = 0;
buf->next_element_to_fill = 0;
- atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
+ atomic_set(&buf->state, newbufstate);
+}
+
+static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+{
+ int j;
+
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+ if (!q->bufs[j])
+ continue;
+ qeth_cleanup_handled_pending(q, j, free);
+ qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
+ if (free) {
+ kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
+ q->bufs[j] = NULL;
+ }
+ }
}
void qeth_clear_qdio_buffers(struct qeth_card *card)
{
- int i, j;
+ int i;
QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
- for (i = 0; i < card->qdio.no_out_queues; ++i)
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
if (card->qdio.out_qs[i]) {
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- &card->qdio.out_qs[i]->bufs[j]);
+ qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
}
+ }
}
EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
@@ -950,6 +1204,11 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
+
+ qeth_free_cq(card);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
/* inbound buffer pool */
@@ -957,9 +1216,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
/* free outbound qdio_qs */
if (card->qdio.out_qs) {
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- &card->qdio.out_qs[i]->bufs[j]);
+ qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
kfree(card->qdio.out_qs[i]);
}
kfree(card->qdio.out_qs);
@@ -995,27 +1252,29 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
ccwdev = card->data.ccwdev;
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
- /* CHPP field bit 6 == 1 -> single queue */
- if ((chp_dsc->chpp & 0x02) == 0x02) {
- if ((atomic_read(&card->qdio.state) !=
- QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 4))
- /* change from 4 to 1 outbound queues */
- qeth_free_qdio_buffers(card);
- card->qdio.no_out_queues = 1;
- if (card->qdio.default_out_queue != 0)
- dev_info(&card->gdev->dev,
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ /* CHPP field bit 6 == 1 -> single queue */
+ if ((chp_dsc->chpp & 0x02) == 0x02) {
+ if ((atomic_read(&card->qdio.state) !=
+ QETH_QDIO_UNINITIALIZED) &&
+ (card->qdio.no_out_queues == 4))
+ /* change from 4 to 1 outbound queues */
+ qeth_free_qdio_buffers(card);
+ card->qdio.no_out_queues = 1;
+ if (card->qdio.default_out_queue != 0)
+ dev_info(&card->gdev->dev,
"Priority Queueing not supported\n");
- card->qdio.default_out_queue = 0;
- } else {
- if ((atomic_read(&card->qdio.state) !=
- QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 1)) {
- /* change from 1 to 4 outbound queues */
- qeth_free_qdio_buffers(card);
- card->qdio.default_out_queue = 2;
+ card->qdio.default_out_queue = 0;
+ } else {
+ if ((atomic_read(&card->qdio.state) !=
+ QETH_QDIO_UNINITIALIZED) &&
+ (card->qdio.no_out_queues == 1)) {
+ /* change from 1 to 4 outbound queues */
+ qeth_free_qdio_buffers(card);
+ card->qdio.default_out_queue = 2;
+ }
+ card->qdio.no_out_queues = 4;
}
- card->qdio.no_out_queues = 4;
}
card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
@@ -1051,6 +1310,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
card->options.performance_stats = 0;
card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
+ card->options.cq = QETH_CQ_DISABLED;
}
static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -1119,6 +1379,7 @@ static int qeth_setup_card(struct qeth_card *card)
card->ipato.invert6 = 0;
/* init QDIO stuff */
qeth_init_qdio_info(card);
+ INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
return 0;
}
@@ -1140,7 +1401,7 @@ static struct qeth_card *qeth_alloc_card(void)
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!card->ip_tbd_list) {
QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
goto out_card;
@@ -1180,6 +1441,7 @@ static int qeth_determine_card_type(struct qeth_card *card)
card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
card->qdio.no_out_queues =
known_devices[i][QETH_QUEUE_NO_IND];
+ card->qdio.no_in_queues = 1;
card->info.is_multicast_different =
known_devices[i][QETH_MULTICAST_IND];
qeth_get_channel_path_desc(card);
@@ -2027,6 +2289,37 @@ static int qeth_ulp_setup(struct qeth_card *card)
return rc;
}
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
+{
+ int rc;
+ struct qeth_qdio_out_buffer *newbuf;
+
+ rc = 0;
+ newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
+ if (!newbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ newbuf->buffer = &q->qdio_bufs[bidx];
+ skb_queue_head_init(&newbuf->skb_list);
+ lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
+ newbuf->q = q;
+ newbuf->aob = NULL;
+ newbuf->next_pending = q->bufs[bidx];
+ atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
+ q->bufs[bidx] = newbuf;
+ if (q->bufstates) {
+ q->bufstates[bidx].user = newbuf;
+ QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
+ QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
+ QETH_CARD_TEXT_(q->card, 2, "%lx",
+ (long) newbuf->next_pending);
+ }
+out:
+ return rc;
+}
+
+
static int qeth_alloc_qdio_buffers(struct qeth_card *card)
{
int i, j;
@@ -2037,52 +2330,63 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
- card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
- GFP_KERNEL);
+ card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q),
+ GFP_KERNEL);
if (!card->qdio.in_q)
goto out_nomem;
QETH_DBF_TEXT(SETUP, 2, "inq");
QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
/* give inbound qeth_qdio_buffers their qdio_buffers */
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
card->qdio.in_q->bufs[i].buffer =
&card->qdio.in_q->qdio_bufs[i];
+ card->qdio.in_q->bufs[i].rx_skb = NULL;
+ }
/* inbound buffer pool */
if (qeth_alloc_buffer_pool(card))
goto out_freeinq;
+
/* outbound */
card->qdio.out_qs =
- kmalloc(card->qdio.no_out_queues *
+ kzalloc(card->qdio.no_out_queues *
sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
if (!card->qdio.out_qs)
goto out_freepool;
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
+ card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q),
GFP_KERNEL);
if (!card->qdio.out_qs[i])
goto out_freeoutq;
QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
- memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
card->qdio.out_qs[i]->queue_no = i;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- card->qdio.out_qs[i]->bufs[j].buffer =
- &card->qdio.out_qs[i]->qdio_bufs[j];
- skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
- skb_list);
- lockdep_set_class(
- &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
- &qdio_out_skb_queue_key);
- INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
+ BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
+ if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
+ goto out_freeoutqbufs;
}
}
+
+ /* completion */
+ if (qeth_alloc_cq(card))
+ goto out_freeoutq;
+
return 0;
+out_freeoutqbufs:
+ while (j > 0) {
+ --j;
+ kmem_cache_free(qeth_qdio_outbuf_cache,
+ card->qdio.out_qs[i]->bufs[j]);
+ card->qdio.out_qs[i]->bufs[j] = NULL;
+ }
out_freeoutq:
- while (i > 0)
+ while (i > 0) {
kfree(card->qdio.out_qs[--i]);
+ qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+ }
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
out_freepool:
@@ -2353,6 +2657,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
struct qeth_buffer_pool_entry *pool_entry;
int i;
+ if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
+ buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
+ if (!buf->rx_skb)
+ return 1;
+ }
+
pool_entry = qeth_find_free_buffer_pool_entry(card);
if (!pool_entry)
return 1;
@@ -2399,13 +2709,21 @@ int qeth_init_qdio_queues(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc;
}
+
+ /* completion */
+ rc = qeth_cq_init(card);
+ if (rc) {
+ return rc;
+ }
+
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
memset(card->qdio.out_qs[i]->qdio_bufs, 0,
QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
qeth_clear_output_buffer(card->qdio.out_qs[i],
- &card->qdio.out_qs[i]->bufs[j]);
+ card->qdio.out_qs[i]->bufs[j],
+ QETH_QDIO_BUF_EMPTY);
}
card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->next_buf_to_fill = 0;
@@ -2734,9 +3052,19 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
}
EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
+void qeth_buffer_reclaim_work(struct work_struct *work)
+{
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ buffer_reclaim_work.work);
+
+ QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
+ qeth_queue_input_buffer(card, card->reclaim_index);
+}
+
void qeth_queue_input_buffer(struct qeth_card *card, int index)
{
struct qeth_qdio_q *queue = card->qdio.in_q;
+ struct list_head *lh;
int count;
int i;
int rc;
@@ -2768,6 +3096,20 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
atomic_add_unless(&card->force_alloc_skb, -1, 0);
}
+ if (!count) {
+ i = 0;
+ list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
+ i++;
+ if (i == card->qdio.in_buf_pool.buf_count) {
+ QETH_CARD_TEXT(card, 2, "qsarbw");
+ card->reclaim_index = index;
+ schedule_delayed_work(
+ &card->buffer_reclaim_work,
+ QETH_RECLAIM_WORK_TIME);
+ }
+ return;
+ }
+
/*
* according to old code it should be avoided to requeue all
* 128 buffers in order to benefit from PCI avoidance.
@@ -2787,8 +3129,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
qeth_get_micros() -
card->perf_stats.inbound_do_qdio_start_time;
if (rc) {
- dev_warn(&card->gdev->dev,
- "QDIO reported an error, rc=%i\n", rc);
QETH_CARD_TEXT(card, 2, "qinberr");
}
queue->next_buf_to_init = (queue->next_buf_to_init + count) %
@@ -2862,12 +3202,12 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
/* flush packing buffers */
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ buffer = queue->bufs[queue->next_buf_to_fill];
if ((atomic_read(&buffer->state) ==
QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
atomic_set(&buffer->state,
- QETH_QDIO_BUF_PRIMED);
+ QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) %
@@ -2878,6 +3218,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
return flush_count;
}
+
/*
* Called to flush a packing buffer if no more pci flags are on the queue.
* Checks if there is a packing buffer and prepares it to be flushed.
@@ -2887,7 +3228,7 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ buffer = queue->bufs[queue->next_buf_to_fill];
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
/* it's a packing buffer */
@@ -2908,10 +3249,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
- buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
+ int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
+ if (queue->bufstates)
+ queue->bufstates[bidx].user = buf;
+
if (queue->card->info.type == QETH_CARD_TYPE_IQD)
continue;
@@ -2963,6 +3308,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (rc == QDIO_ERROR_SIGA_TARGET)
return;
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+ QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
+ QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
+ QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
/* this must not happen under normal circumstances. if it
@@ -3024,14 +3372,120 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
}
EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
+int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+{
+ int rc;
+
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
+ rc = -1;
+ goto out;
+ } else {
+ if (card->options.cq == cq) {
+ rc = 0;
+ goto out;
+ }
+
+ if (card->state != CARD_STATE_DOWN &&
+ card->state != CARD_STATE_RECOVER) {
+ rc = -1;
+ goto out;
+ }
+
+ qeth_free_qdio_buffers(card);
+ card->options.cq = cq;
+ rc = 0;
+ }
+out:
+ return rc;
+
+}
+EXPORT_SYMBOL_GPL(qeth_configure_cq);
+
+
+static void qeth_qdio_cq_handler(struct qeth_card *card,
+ unsigned int qdio_err,
+ unsigned int queue, int first_element, int count) {
+ struct qeth_qdio_q *cq = card->qdio.c_q;
+ int i;
+ int rc;
+
+ if (!qeth_is_cq(card, queue))
+ goto out;
+
+ QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
+ QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
+ QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
+
+ if (qdio_err) {
+ netif_stop_queue(card->dev);
+ qeth_schedule_recovery(card);
+ goto out;
+ }
+
+ if (card->options.performance_stats) {
+ card->perf_stats.cq_cnt++;
+ card->perf_stats.cq_start_time = qeth_get_micros();
+ }
+
+ for (i = first_element; i < first_element + count; ++i) {
+ int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ struct qdio_buffer *buffer = &cq->qdio_bufs[bidx];
+ int e;
+
+ e = 0;
+ while (buffer->element[e].addr) {
+ unsigned long phys_aob_addr;
+
+ phys_aob_addr = (unsigned long) buffer->element[e].addr;
+ qeth_qdio_handle_aob(card, phys_aob_addr);
+ buffer->element[e].addr = NULL;
+ buffer->element[e].eflags = 0;
+ buffer->element[e].sflags = 0;
+ buffer->element[e].length = 0;
+
+ ++e;
+ }
+
+ buffer->element[15].eflags = 0;
+ buffer->element[15].sflags = 0;
+ }
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
+ card->qdio.c_q->next_buf_to_init,
+ count);
+ if (rc) {
+ dev_warn(&card->gdev->dev,
+ "QDIO reported an error, rc=%i\n", rc);
+ QETH_CARD_TEXT(card, 2, "qcqherr");
+ }
+ card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+ + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+ netif_wake_queue(card->dev);
+
+ if (card->options.performance_stats) {
+ int delta_t = qeth_get_micros();
+ delta_t -= card->perf_stats.cq_start_time;
+ card->perf_stats.cq_time += delta_t;
+ }
+out:
+ return;
+}
+
void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
- unsigned int queue, int first_element, int count,
+ unsigned int queue, int first_elem, int count,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (qdio_err)
+ QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
+ QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
+
+ if (qeth_is_cq(card, queue))
+ qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
+ else if (qdio_err)
qeth_schedule_recovery(card);
+
+
}
EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
@@ -3057,9 +3511,45 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_get_micros();
}
for (i = first_element; i < (first_element + count); ++i) {
- buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
+ int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ buffer = queue->bufs[bidx];
qeth_handle_send_error(card, buffer, qdio_error);
- qeth_clear_output_buffer(queue, buffer);
+
+ if (queue->bufstates &&
+ (queue->bufstates[bidx].flags &
+ QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
+ BUG_ON(card->options.cq != QETH_CQ_ENABLED);
+
+ if (atomic_cmpxchg(&buffer->state,
+ QETH_QDIO_BUF_PRIMED,
+ QETH_QDIO_BUF_PENDING) ==
+ QETH_QDIO_BUF_PRIMED) {
+ qeth_notify_skbs(queue, buffer,
+ TX_NOTIFY_PENDING);
+ }
+ buffer->aob = queue->bufstates[bidx].aob;
+ QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
+ QETH_CARD_TEXT(queue->card, 5, "aob");
+ QETH_CARD_TEXT_(queue->card, 5, "%lx",
+ virt_to_phys(buffer->aob));
+ BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q);
+ if (qeth_init_qdio_out_buf(queue, bidx)) {
+ QETH_CARD_TEXT(card, 2, "outofbuf");
+ qeth_schedule_recovery(card);
+ }
+ } else {
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ enum iucv_tx_notify n;
+
+ n = qeth_compute_cq_notification(
+ buffer->buffer->element[15].sflags, 0);
+ qeth_notify_skbs(queue, buffer, n);
+ }
+
+ qeth_clear_output_buffer(queue, buffer,
+ QETH_QDIO_BUF_EMPTY);
+ }
+ qeth_cleanup_handled_pending(queue, bidx, 0);
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
@@ -3204,7 +3694,8 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
- buffer->element[element].addr = (char *)page_to_phys(frag->page)
+ buffer->element[element].addr = (char *)
+ page_to_phys(skb_frag_page(frag))
+ frag->page_offset;
buffer->element[element].length = frag->size;
buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
@@ -3291,7 +3782,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
/* ... now we've got the queue */
index = queue->next_buf_to_fill;
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ buffer = queue->bufs[queue->next_buf_to_fill];
/*
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
@@ -3325,7 +3816,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ buffer = queue->bufs[queue->next_buf_to_fill];
/*
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
@@ -3347,7 +3838,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ buffer = queue->bufs[queue->next_buf_to_fill];
/* we did a step forward, so check buffer state
* again */
if (atomic_read(&buffer->state) !=
@@ -3925,6 +4416,20 @@ static void qeth_determine_capabilities(struct qeth_card *card)
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
+ QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1);
+ QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3);
+ QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
+ if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
+ ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
+ ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
+ dev_info(&card->gdev->dev,
+ "Completion Queueing supported\n");
+ } else {
+ card->options.cq = QETH_CQ_NOTAVAILABLE;
+ }
+
+
out_offline:
if (ddev_offline == 1)
ccw_device_set_offline(ddev);
@@ -3932,11 +4437,30 @@ out:
return;
}
+static inline void qeth_qdio_establish_cq(struct qeth_card *card,
+ struct qdio_buffer **in_sbal_ptrs,
+ void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
+ int i;
+
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ int offset = QDIO_MAX_BUFFERS_PER_Q *
+ (card->qdio.no_in_queues - 1);
+ i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1);
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+ in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
+ virt_to_phys(card->qdio.c_q->bufs[i].buffer);
+ }
+
+ queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
+ }
+}
+
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_initialize init_data;
char *qib_param_field;
struct qdio_buffer **in_sbal_ptrs;
+ void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
struct qdio_buffer **out_sbal_ptrs;
int i, j, k;
int rc = 0;
@@ -3945,34 +4469,48 @@ static int qeth_qdio_establish(struct qeth_card *card)
qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
GFP_KERNEL);
- if (!qib_param_field)
- return -ENOMEM;
+ if (!qib_param_field) {
+ rc = -ENOMEM;
+ goto out_free_nothing;
+ }
qeth_create_qib_param_field(card, qib_param_field);
qeth_create_qib_param_field_blkt(card, qib_param_field);
- in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
+ in_sbal_ptrs = kzalloc(card->qdio.no_in_queues *
+ QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
GFP_KERNEL);
if (!in_sbal_ptrs) {
- kfree(qib_param_field);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_free_qib_param;
}
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
in_sbal_ptrs[i] = (struct qdio_buffer *)
virt_to_phys(card->qdio.in_q->bufs[i].buffer);
+ }
+
+ queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues,
+ GFP_KERNEL);
+ if (!queue_start_poll) {
+ rc = -ENOMEM;
+ goto out_free_in_sbals;
+ }
+ for (i = 0; i < card->qdio.no_in_queues; ++i)
+ queue_start_poll[i] = card->discipline.start_poll;
+
+ qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
out_sbal_ptrs =
- kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
+ kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
sizeof(void *), GFP_KERNEL);
if (!out_sbal_ptrs) {
- kfree(in_sbal_ptrs);
- kfree(qib_param_field);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_free_queue_start_poll;
}
for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
- card->qdio.out_qs[i]->bufs[j].buffer);
+ card->qdio.out_qs[i]->bufs[j]->buffer);
}
memset(&init_data, 0, sizeof(struct qdio_initialize));
@@ -3980,14 +4518,15 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = qeth_get_qdio_q_format(card);
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
- init_data.no_input_qs = 1;
+ init_data.no_input_qs = card->qdio.no_in_queues;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
init_data.output_handler = card->discipline.output_handler;
- init_data.queue_start_poll = card->discipline.start_poll;
+ init_data.queue_start_poll = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+ init_data.output_sbal_state_array = card->qdio.out_bufstates;
init_data.scan_threshold =
(card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
@@ -4004,10 +4543,26 @@ static int qeth_qdio_establish(struct qeth_card *card)
qdio_free(CARD_DDEV(card));
}
}
+
+ switch (card->options.cq) {
+ case QETH_CQ_ENABLED:
+ dev_info(&card->gdev->dev, "Completion Queue support enabled");
+ break;
+ case QETH_CQ_DISABLED:
+ dev_info(&card->gdev->dev, "Completion Queue support disabled");
+ break;
+ default:
+ break;
+ }
out:
kfree(out_sbal_ptrs);
+out_free_queue_start_poll:
+ kfree(queue_start_poll);
+out_free_in_sbals:
kfree(in_sbal_ptrs);
+out_free_qib_param:
kfree(qib_param_field);
+out_free_nothing:
return rc;
}
@@ -4144,29 +4699,36 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
-static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
+static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
+ struct qdio_buffer_element *element,
struct sk_buff **pskb, int offset, int *pfrag, int data_len)
{
struct page *page = virt_to_page(element->addr);
if (*pskb == NULL) {
- /* the upper protocol layers assume that there is data in the
- * skb itself. Copy a small amount (64 bytes) to make them
- * happy. */
- *pskb = dev_alloc_skb(64 + ETH_HLEN);
- if (!(*pskb))
- return -ENOMEM;
+ if (qethbuffer->rx_skb) {
+ /* only if qeth_card.options.cq == QETH_CQ_ENABLED */
+ *pskb = qethbuffer->rx_skb;
+ qethbuffer->rx_skb = NULL;
+ } else {
+ *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
+ if (!(*pskb))
+ return -ENOMEM;
+ }
+
skb_reserve(*pskb, ETH_HLEN);
- if (data_len <= 64) {
+ if (data_len <= QETH_RX_PULL_LEN) {
memcpy(skb_put(*pskb, data_len), element->addr + offset,
data_len);
} else {
get_page(page);
- memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
- skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
- data_len - 64);
- (*pskb)->data_len += data_len - 64;
- (*pskb)->len += data_len - 64;
- (*pskb)->truesize += data_len - 64;
+ memcpy(skb_put(*pskb, QETH_RX_PULL_LEN),
+ element->addr + offset, QETH_RX_PULL_LEN);
+ skb_fill_page_desc(*pskb, *pfrag, page,
+ offset + QETH_RX_PULL_LEN,
+ data_len - QETH_RX_PULL_LEN);
+ (*pskb)->data_len += data_len - QETH_RX_PULL_LEN;
+ (*pskb)->len += data_len - QETH_RX_PULL_LEN;
+ (*pskb)->truesize += data_len - QETH_RX_PULL_LEN;
(*pfrag)++;
}
} else {
@@ -4177,15 +4739,18 @@ static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
(*pskb)->truesize += data_len;
(*pfrag)++;
}
+
+
return 0;
}
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
- struct qdio_buffer *buffer,
+ struct qeth_qdio_buffer *qethbuffer,
struct qdio_buffer_element **__element, int *__offset,
struct qeth_hdr **hdr)
{
struct qdio_buffer_element *element = *__element;
+ struct qdio_buffer *buffer = qethbuffer->buffer;
int offset = *__offset;
struct sk_buff *skb = NULL;
int skb_len = 0;
@@ -4230,9 +4795,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
if (!skb_len)
return NULL;
- if ((skb_len >= card->options.rx_sg_cb) &&
- (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
- (!atomic_read(&card->force_alloc_skb))) {
+ if (((skb_len >= card->options.rx_sg_cb) &&
+ (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+ (!atomic_read(&card->force_alloc_skb))) ||
+ (card->options.cq == QETH_CQ_ENABLED)) {
use_rx_sg = 1;
} else {
skb = dev_alloc_skb(skb_len + headroom);
@@ -4247,8 +4813,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
data_len = min(skb_len, (int)(element->length - offset));
if (data_len) {
if (use_rx_sg) {
- if (qeth_create_skb_frag(element, &skb, offset,
- &frag, data_len))
+ if (qeth_create_skb_frag(qethbuffer, element,
+ &skb, offset, &frag, data_len))
goto no_mem;
} else {
memcpy(skb_put(skb, data_len), data_ptr,
@@ -4650,6 +5216,8 @@ static struct {
{"tx do_QDIO count"},
{"tx csum"},
{"tx lin"},
+ {"cq handler count"},
+ {"cq handler time"}
};
int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -4708,6 +5276,8 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[32] = card->perf_stats.outbound_do_qdio_cnt;
data[33] = card->perf_stats.tx_csum;
data[34] = card->perf_stats.tx_lin;
+ data[35] = card->perf_stats.cq_cnt;
+ data[36] = card->perf_stats.cq_time;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
@@ -4866,7 +5436,16 @@ static int __init qeth_core_init(void)
goto slab_err;
}
+ qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
+ sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
+ if (!qeth_qdio_outbuf_cache) {
+ rc = -ENOMEM;
+ goto cqslab_err;
+ }
+
return 0;
+cqslab_err:
+ kmem_cache_destroy(qeth_core_header_cache);
slab_err:
root_device_unregister(qeth_core_root_dev);
register_err:
@@ -4891,6 +5470,7 @@ static void __exit qeth_core_exit(void)
&driver_attr_group);
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
+ kmem_cache_destroy(qeth_qdio_outbuf_cache);
kmem_cache_destroy(qeth_core_header_cache);
qeth_unregister_dbf_views();
pr_info("core functions removed\n");
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b70b47fbd6c..a21ae3d549d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -409,7 +409,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
BUG_ON(!budget);
while (budget) {
skb = qeth_core_get_next_skb(card,
- card->qdio.in_q->bufs[card->rx.b_index].buffer,
+ &card->qdio.in_q->bufs[card->rx.b_index],
&card->rx.b_element, &card->rx.e_offset, &hdr);
if (!skb) {
*done = 1;
@@ -925,7 +925,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = qeth_l2_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l2_set_multicast_list,
.ndo_do_ioctl = qeth_l2_do_ioctl,
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_change_mtu = qeth_change_mtu,
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 14a43aeb0c2..e367315a63f 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -63,5 +63,9 @@ int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
const u8 *);
int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
+int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
+int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_set_ip_addr_list(struct qeth_card *);
#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fafb8c29954..ce735204d31 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -29,6 +29,7 @@
#include <net/ip.h>
#include <net/arp.h>
#include <net/ip6_checksum.h>
+#include <net/iucv/af_iucv.h>
#include "qeth_l3.h"
@@ -267,7 +268,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
}
}
-static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
{
unsigned long flags;
int rc = 0;
@@ -286,7 +287,7 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
return rc;
}
-static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
{
unsigned long flags;
int rc = 0;
@@ -305,7 +306,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
}
-static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
+struct qeth_ipaddr *qeth_l3_get_addr_buffer(
enum qeth_prot_versions prot)
{
struct qeth_ipaddr *addr;
@@ -421,7 +422,7 @@ again:
list_splice(&fail_list, &card->ip_list);
}
-static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
+void qeth_l3_set_ip_addr_list(struct qeth_card *card)
{
struct list_head *tbd_list;
struct qeth_ipaddr *todo, *addr;
@@ -438,7 +439,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
spin_lock_irqsave(&card->ip_lock, flags);
tbd_list = card->ip_tbd_list;
- card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
+ card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
if (!card->ip_tbd_list) {
QETH_CARD_TEXT(card, 0, "silnomem");
card->ip_tbd_list = tbd_list;
@@ -1993,12 +1994,13 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
__u16 vlan_tag = 0;
int is_vlan;
unsigned int len;
+ __u16 magic;
*done = 0;
BUG_ON(!budget);
while (budget) {
skb = qeth_core_get_next_skb(card,
- card->qdio.in_q->bufs[card->rx.b_index].buffer,
+ &card->qdio.in_q->bufs[card->rx.b_index],
&card->rx.b_element, &card->rx.e_offset, &hdr);
if (!skb) {
*done = 1;
@@ -2007,12 +2009,26 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
skb->dev = card->dev;
switch (hdr->hdr.l3.id) {
case QETH_HEADER_TYPE_LAYER3:
- is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
+ magic = *(__u16 *)skb->data;
+ if ((card->info.type == QETH_CARD_TYPE_IQD) &&
+ (magic == ETH_P_AF_IUCV)) {
+ skb->protocol = ETH_P_AF_IUCV;
+ skb->pkt_type = PACKET_HOST;
+ skb->mac_header = NET_SKB_PAD;
+ skb->dev = card->dev;
+ len = skb->len;
+ card->dev->header_ops->create(skb, card->dev, 0,
+ card->dev->dev_addr, "FAKELL",
+ card->dev->addr_len);
+ netif_receive_skb(skb);
+ } else {
+ is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
&vlan_tag);
- len = skb->len;
- if (is_vlan && !card->options.sniffer)
- __vlan_hwaccel_put_tag(skb, vlan_tag);
- napi_gro_receive(&card->napi, skb);
+ len = skb->len;
+ if (is_vlan && !card->options.sniffer)
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ napi_gro_receive(&card->napi, skb);
+ }
break;
case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
skb->pkt_type = PACKET_HOST;
@@ -2784,6 +2800,30 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return cast_type;
}
+static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
+ struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+ char daddr[16];
+ struct af_iucv_trans_hdr *iucv_hdr;
+
+ skb_pull(skb, 14);
+ card->dev->header_ops->create(skb, card->dev, 0,
+ card->dev->dev_addr, card->dev->dev_addr,
+ card->dev->addr_len);
+ skb_pull(skb, 14);
+ iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ memset(hdr, 0, sizeof(struct qeth_hdr));
+ hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+ hdr->hdr.l3.ext_flags = 0;
+ hdr->hdr.l3.length = skb->len;
+ hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+ memset(daddr, 0, sizeof(daddr));
+ daddr[0] = 0xfe;
+ daddr[1] = 0x80;
+ memcpy(&daddr[8], iucv_hdr->destUserID, 8);
+ memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+}
+
static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type)
{
@@ -2936,8 +2976,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int data_offset = -1;
int nr_frags;
- if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) ||
- card->options.sniffer)
+ if (((card->info.type == QETH_CARD_TYPE_IQD) &&
+ (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
+ ((card->options.cq == QETH_CQ_ENABLED) &&
+ (skb->protocol != ETH_P_AF_IUCV)))) ||
+ card->options.sniffer)
goto tx_drop;
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -2959,7 +3002,10 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
new_skb = skb;
- data_offset = ETH_HLEN;
+ if (new_skb->protocol == ETH_P_AF_IUCV)
+ data_offset = 0;
+ else
+ data_offset = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
@@ -2993,7 +3039,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tag = (u16 *)(new_skb->data + 12);
*tag = __constant_htons(ETH_P_8021Q);
*(tag + 1) = htons(vlan_tx_tag_get(new_skb));
- new_skb->vlan_tci = 0;
}
}
@@ -3025,9 +3070,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
qeth_l3_fill_header(card, hdr, new_skb, ipv,
cast_type);
} else {
- qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type);
- hdr->hdr.l3.length = new_skb->len - data_offset;
+ if (new_skb->protocol == ETH_P_AF_IUCV)
+ qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
+ else {
+ qeth_l3_fill_header(card, hdr, new_skb, ipv,
+ cast_type);
+ hdr->hdr.l3.length = new_skb->len - data_offset;
+ }
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -3226,7 +3275,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = qeth_l3_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_l3_fix_features,
@@ -3242,7 +3291,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = qeth_l3_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_l3_fix_features,
@@ -3290,6 +3339,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
qeth_l3_iqd_read_initial_mac(card);
+ if (card->options.hsuid[0])
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
return -ENODEV;
@@ -3660,7 +3711,6 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
struct qeth_ipaddr *addr;
struct qeth_card *card;
-
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index cd99210296e..0ea2fbfe0e9 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -9,7 +9,7 @@
*/
#include <linux/slab.h>
-
+#include <asm/ebcdic.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
@@ -308,6 +308,8 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
+ if (card->options.cq == QETH_CQ_ENABLED)
+ return -EPERM;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
@@ -347,6 +349,111 @@ out:
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
qeth_l3_dev_sniffer_store);
+
+static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ char tmp_hsuid[9];
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ return -EPERM;
+
+ if (card->state == CARD_STATE_DOWN)
+ return -EPERM;
+
+ memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
+ EBCASC(tmp_hsuid, 8);
+ return sprintf(buf, "%s\n", tmp_hsuid);
+}
+
+static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ struct qeth_ipaddr *addr;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ return -EPERM;
+ if (card->state != CARD_STATE_DOWN &&
+ card->state != CARD_STATE_RECOVER)
+ return -EPERM;
+ if (card->options.sniffer)
+ return -EPERM;
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+ return -EPERM;
+
+ tmp = strsep((char **)&buf, "\n");
+ if (strlen(tmp) > 8)
+ return -EINVAL;
+
+ if (card->options.hsuid[0]) {
+ /* delete old ip address */
+ addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ if (addr != NULL) {
+ addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+ addr->u.a6.addr.s6_addr32[1] = 0x00000000;
+ for (i = 8; i < 16; i++)
+ addr->u.a6.addr.s6_addr[i] =
+ card->options.hsuid[i - 8];
+ addr->u.a6.pfxlen = 0;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ } else
+ return -ENOMEM;
+ if (!qeth_l3_delete_ip(card, addr))
+ kfree(addr);
+ qeth_l3_set_ip_addr_list(card);
+ }
+
+ if (strlen(tmp) == 0) {
+ /* delete ip address only */
+ card->options.hsuid[0] = '\0';
+ if (card->dev)
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ qeth_configure_cq(card, QETH_CQ_DISABLED);
+ return count;
+ }
+
+ if (qeth_configure_cq(card, QETH_CQ_ENABLED))
+ return -EPERM;
+
+ for (i = 0; i < 8; i++)
+ card->options.hsuid[i] = ' ';
+ card->options.hsuid[8] = '\0';
+ strncpy(card->options.hsuid, tmp, strlen(tmp));
+ ASCEBC(card->options.hsuid, 8);
+ if (card->dev)
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+
+ addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ if (addr != NULL) {
+ addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+ addr->u.a6.addr.s6_addr32[1] = 0x00000000;
+ for (i = 8; i < 16; i++)
+ addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8];
+ addr->u.a6.pfxlen = 0;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ } else
+ return -ENOMEM;
+ if (!qeth_l3_add_ip(card, addr))
+ kfree(addr);
+ qeth_l3_set_ip_addr_list(card);
+
+ return count;
+}
+
+static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
+ qeth_l3_dev_hsuid_store);
+
+
static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_route4.attr,
&dev_attr_route6.attr,
@@ -354,6 +461,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_broadcast_mode.attr,
&dev_attr_canonical_macaddr.attr,
&dev_attr_sniffer.attr,
+ &dev_attr_hsuid.attr,
NULL,
};
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index c2049466060..957595a7a45 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/completion.h>
#include <linux/time.h>
#include <linux/interrupt.h>
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 5e6620f8dab..6739069477d 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -31,8 +31,7 @@ config AIC79XX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See drivers/scsi/aic7xxx/README.aic79xx
- for details.
+ "tag_info" option. See Documentation/scsi/aic79xx.txt for details.
config AIC79XX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 88da670a791..55ac55ee606 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -36,8 +36,7 @@ config AIC7XXX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See drivers/scsi/aic7xxx/README.aic7xxx
- for details.
+ "tag_info" option. See Documentation/scsi/aic7xxx.txt for details.
config AIC7XXX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index 67eeba3bdb0..a16a77c8b9c 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -29,7 +29,7 @@
*
*/
-#include "linux/pci.h"
+#include <linux/pci.h>
#include "aic94xx.h"
#include "aic94xx_reg.h"
#include "aic94xx_reg_def.h"
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index bda999ad9f5..5e19a5f820e 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -27,7 +27,6 @@
#define __BFAD_DRV_H__
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index 6a38080e35e..cfcad8bde7c 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -2,7 +2,8 @@ config SCSI_BNX2X_FCOE
tristate "Broadcom NetXtreme II FCoE support"
depends on PCI
select NETDEVICES
- select NETDEV_1000
+ select ETHERNET
+ select NET_VENDOR_BROADCOM
select LIBFC
select LIBFCOE
select CNIC
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 42228ca5a9d..dd335a2a797 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -58,7 +58,7 @@
#include "57xx_hsi_bnx2fc.h"
#include "bnx2fc_debug.h"
-#include "../../net/cnic_if.h"
+#include "../../net/ethernet/broadcom/cnic_if.h"
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
@@ -81,7 +81,7 @@
#define BNX2FC_RQ_WQES_MAX 16
#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
-#define BNX2FC_NUM_MAX_SESS 128
+#define BNX2FC_NUM_MAX_SESS 1024
#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7cb2cd48b17..820a1840c3f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -302,7 +302,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
+ frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@ -673,7 +673,7 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
struct net_device *netdev = interface->netdev;
struct ethtool_cmd ecmd;
- if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
lport->link_supported_speeds &=
~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -1001,9 +1001,11 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
"this interface\n");
return -EIO;
}
+ rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
if (IS_ERR(vn_port)) {
printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 45a6154ce97..01cff1894b6 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -4,7 +4,8 @@ config SCSI_BNX2_ISCSI
depends on PCI
select SCSI_ISCSI_ATTRS
select NETDEVICES
- select NETDEV_1000
+ select ETHERNET
+ select NET_VENDOR_BROADCOM
select CNIC
---help---
This driver supports iSCSI offload for the Broadcom NetXtreme II
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index dc5700765db..0bd70e80efe 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -40,7 +40,7 @@
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
-#include "../../net/cnic_if.h"
+#include "../../net/ethernet/broadcom/cnic_if.h"
#include "57xx_iscsi_hsi.h"
#include "57xx_iscsi_constants.h"
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild
index 09dbf9efc8e..6f095e28a97 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild
@@ -1,3 +1,3 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index 11dff23f783..6bbc36fbd6e 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -2,7 +2,8 @@ config SCSI_CXGB3_ISCSI
tristate "Chelsio T3 iSCSI support"
depends on PCI && INET
select NETDEVICES
- select NETDEV_10000
+ select ETHERNET
+ select NET_VENDOR_CHELSIO
select CHELSIO_T3
select SCSI_ISCSI_ATTRS
---help---
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index f5864485033..1242c7c04a0 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -14,7 +14,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild
index b9f4af7454b..8290cdaa465 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild
@@ -1,3 +1,3 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb4
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index d5302c27f37..16b2c7d2661 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -2,7 +2,8 @@ config SCSI_CXGB4_ISCSI
tristate "Chelsio T4 iSCSI support"
depends on PCI && INET
select NETDEVICES
- select NETDEV_10000
+ select ETHERNET
+ select NET_VENDOR_CHELSIO
select CHELSIO_T4
select SCSI_ISCSI_ATTRS
---help---
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ae13c4993aa..31c79bde697 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -13,7 +13,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 77ac217ad5c..1c1329bc77c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1787,7 +1787,7 @@ static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
}
static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
- unsigned int dlen, skb_frag_t *frags,
+ unsigned int dlen, struct page_frag *frags,
int frag_max)
{
unsigned int datalen = dlen;
@@ -1814,7 +1814,7 @@ static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
copy = min(datalen, sglen);
if (i && page == frags[i - 1].page &&
sgoffset + sg->offset ==
- frags[i - 1].page_offset + frags[i - 1].size) {
+ frags[i - 1].offset + frags[i - 1].size) {
frags[i - 1].size += copy;
} else {
if (i >= frag_max) {
@@ -1824,7 +1824,7 @@ static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
}
frags[i].page = page;
- frags[i].page_offset = sg->offset + sgoffset;
+ frags[i].offset = sg->offset + sgoffset;
frags[i].size = copy;
i++;
}
@@ -1944,14 +1944,14 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
if (tdata->nr_frags > MAX_SKB_FRAGS ||
(padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
char *dst = skb->data + task->hdr_len;
- skb_frag_t *frag = tdata->frags;
+ struct page_frag *frag = tdata->frags;
/* data fits in the skb's headroom */
for (i = 0; i < tdata->nr_frags; i++, frag++) {
char *src = kmap_atomic(frag->page,
KM_SOFTIRQ0);
- memcpy(dst, src+frag->page_offset, frag->size);
+ memcpy(dst, src+frag->offset, frag->size);
dst += frag->size;
kunmap_atomic(src, KM_SOFTIRQ0);
}
@@ -1962,11 +1962,13 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
skb_put(skb, count + padlen);
} else {
/* data fit into frag_list */
- for (i = 0; i < tdata->nr_frags; i++)
- get_page(tdata->frags[i].page);
-
- memcpy(skb_shinfo(skb)->frags, tdata->frags,
- sizeof(skb_frag_t) * tdata->nr_frags);
+ for (i = 0; i < tdata->nr_frags; i++) {
+ __skb_fill_page_desc(skb, i,
+ tdata->frags[i].page,
+ tdata->frags[i].offset,
+ tdata->frags[i].size);
+ skb_frag_ref(skb, i);
+ }
skb_shinfo(skb)->nr_frags = tdata->nr_frags;
skb->len += count;
skb->data_len += count;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 9267844519c..3a25b1187c1 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -574,7 +574,7 @@ struct cxgbi_endpoint {
#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
struct cxgbi_task_data {
unsigned short nr_frags;
- skb_frag_t frags[MAX_PDU_FRAGS];
+ struct page_frag frags[MAX_PDU_FRAGS];
struct sk_buff *skb;
unsigned int offset;
unsigned int count;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 5d0e9a24ae9..a1c0ddd53aa 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -1518,7 +1517,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
+ frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@ -2046,7 +2045,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd;
- if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
lport->link_supported_speeds &=
~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -2455,7 +2454,9 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
}
mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
if (IS_ERR(vn_port)) {
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 41068e8748e..dac8e39a518 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -105,11 +105,12 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
off = frag->page_offset;
- len = frag->size;
+ len = skb_frag_size(frag);
while (len > 0) {
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
- data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
- KM_SKB_DATA_SOFTIRQ);
+ data = kmap_atomic(
+ skb_frag_page(frag) + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
off += clen;
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 430fc8ff014..09e61134037 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -708,7 +708,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
__func__,
event_code);
- return SCI_FAILURE;;
+ return SCI_FAILURE;
}
return SCI_SUCCESS;
case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c9e3dc024bc..16ad97df5ba 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- sas_port_delete_phy(phy->port, phy->phy);
- if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
- phy->port = NULL;
+ if (phy->port) {
+ sas_port_delete_phy(phy->port, phy->phy);
+ if (phy->port->num_phys == 0)
+ sas_port_delete(phy->port);
+ phy->port = NULL;
+ }
}
static int sas_discover_bfs_by_root_level(struct domain_device *root,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 2e6619eff3e..8883ca36f93 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -67,7 +67,7 @@
*
* NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
*
- * For history of changes, see Documentation/ChangeLog.megaraid
+ * For history of changes, see Documentation/scsi/ChangeLog.megaraid
*/
#include <linux/slab.h>
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 83035bd1c48..6825772cfd6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 6861244249a..2b1101076cf 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -41,7 +41,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 38ed0260959..246d5fbc6e5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 6abd2fcc43e..5202de3f3d3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -41,7 +41,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 44d7885a4a1..44b47451322 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -43,7 +43,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
-#include <linux/version.h>
#include "mv_defs.h"
#define DRV_NAME "mvsas"
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index d079f9a3c6b..b86db84d6f3 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -39,7 +39,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hdreg.h>
-#include <linux/version.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index f920baf3ff2..ca496c7474e 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -24,7 +24,6 @@
#ifndef _PMCRAID_H
#define _PMCRAID_H
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/list.h>
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4cace3f20c0..1e69527f1e4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD ||
- ctx->type == SRB_LOGOUT_CMD) {
- ctx->u.iocb_cmd->free(sp);
- } else {
+ if (ctx->type == SRB_ELS_CMD_RPT ||
+ ctx->type == SRB_ELS_CMD_HST ||
+ ctx->type == SRB_CT_CMD) {
struct fc_bsg_job *bsg_job =
ctx->u.bsg_job;
if (bsg_job->request->msgcode
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
kfree(sp->ctx);
mempool_free(sp,
ha->srb_mempool);
+ } else {
+ ctx->u.iocb_cmd->free(sp);
}
}
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 909ed9ed24c..441a1c5b897 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -50,6 +50,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
+#include <linux/ratelimit.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
@@ -626,14 +627,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
static char cmd[TASK_COMM_LEN];
- if (strcmp(current->comm, cmd) && printk_ratelimit()) {
- printk(KERN_WARNING
- "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
- "guessing data in;\n "
- "program %s not setting count and/or reply_len properly\n",
- old_hdr.reply_len - (int)SZ_SG_HEADER,
- input_size, (unsigned int) cmnd[0],
- current->comm);
+ if (strcmp(current->comm, cmd)) {
+ printk_ratelimited(KERN_WARNING
+ "sg_write: data in/out %d/%d bytes "
+ "for SCSI command 0x%x-- guessing "
+ "data in;\n program %s not setting "
+ "count and/or reply_len properly\n",
+ old_hdr.reply_len - (int)SZ_SG_HEADER,
+ input_size, (unsigned int) cmnd[0],
+ current->comm);
strcpy(cmd, current->comm);
}
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 29c7d4f9d1a..d0cbdb0cf9d 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -1260,16 +1260,34 @@ void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags)
}
EXPORT_SYMBOL(ssb_device_disable);
+/* Some chipsets need routing known for PCIe and 64-bit DMA */
+static bool ssb_dma_translation_special_bit(struct ssb_device *dev)
+{
+ u16 chip_id = dev->bus->chip_id;
+
+ if (dev->id.coreid == SSB_DEV_80211) {
+ return (chip_id == 0x4322 || chip_id == 43221 ||
+ chip_id == 43231 || chip_id == 43222);
+ }
+
+ return 0;
+}
+
u32 ssb_dma_translation(struct ssb_device *dev)
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_SSB:
return 0;
case SSB_BUSTYPE_PCI:
- if (ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)
+ if (pci_is_pcie(dev->bus->host_pci) &&
+ ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64) {
return SSB_PCIE_DMA_H32;
- else
- return SSB_PCI_DMA;
+ } else {
+ if (ssb_dma_translation_special_bit(dev))
+ return SSB_PCIE_DMA_H32;
+ else
+ return SSB_PCI_DMA;
+ }
default:
__ssb_dma_not_implemented(dev);
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 06c9081d596..89e50398dba 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,6 +24,8 @@ menuconfig STAGING
if STAGING
+source "drivers/staging/serial/Kconfig"
+
source "drivers/staging/et131x/Kconfig"
source "drivers/staging/slicoss/Kconfig"
@@ -44,8 +46,6 @@ source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/echo/Kconfig"
-source "drivers/staging/brcm80211/Kconfig"
-
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
@@ -64,6 +64,8 @@ source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rts_pstor/Kconfig"
+source "drivers/staging/rts5139/Kconfig"
+
source "drivers/staging/frontier/Kconfig"
source "drivers/staging/pohmelfs/Kconfig"
@@ -126,8 +128,6 @@ source "drivers/staging/quickstart/Kconfig"
source "drivers/staging/sbe-2t3e3/Kconfig"
-source "drivers/staging/ath6kl/Kconfig"
-
source "drivers/staging/keucr/Kconfig"
source "drivers/staging/bcm/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index f3c5e33bb26..d7a5a04d0a2 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -3,6 +3,7 @@
# fix for build system bug...
obj-$(CONFIG_STAGING) += staging.o
+obj-y += serial/
obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
@@ -14,8 +15,6 @@ obj-$(CONFIG_USBIP_CORE) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_BRCMSMAC) += brcm80211/
-obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_ASUS_OLED) += asus_oled/
@@ -25,6 +24,7 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
+obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_POHMELFS) += pohmelfs/
@@ -54,7 +54,6 @@ obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/
obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/
obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
-obj-$(CONFIG_ATH6K_LEGACY) += ath6kl/
obj-$(CONFIG_USB_ENESTORAGE) += keucr/
obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/staging/altera-stapl/altera.c
index 8d73a864273..c2eff6a82db 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/staging/altera-stapl/altera.c
@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "altera.h"
#include "altera-exprt.h"
#include "altera-jtag.h"
@@ -2384,7 +2385,7 @@ static int altera_get_act_info(u8 *p,
act_proc_attribute =
(p[proc_table + (13 * act_proc_id) + 8] & 0x03);
- procptr = (struct altera_procinfo *)
+ procptr =
kzalloc(sizeof(struct altera_procinfo),
GFP_KERNEL);
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
deleted file mode 100644
index afd6cc16a2b..00000000000
--- a/drivers/staging/ath6kl/Kconfig
+++ /dev/null
@@ -1,158 +0,0 @@
-config ATH6K_LEGACY
- tristate "Atheros AR6003 support (non mac80211)"
- depends on MMC && WLAN
- depends on CFG80211
- select WIRELESS_EXT
- select WEXT_PRIV
- help
- This module adds support for wireless adapters based on Atheros AR6003 chipset running over SDIO. If you choose to build it as a module, it will be called ath6kl. Pls note that AR6002 and AR6001 are not supported by this driver.
-
-choice
- prompt "AR6003 Board Data Configuration"
- depends on ATH6K_LEGACY
- default AR600x_SD31_XXX
- help
- Select the appropriate board data template from the list below that matches your AR6003 based reference design.
-
-config AR600x_SD31_XXX
- bool "SD31-xxx"
- help
- Board Data file for a standard SD31 reference design (File: bdata.SD31.bin)
-
-config AR600x_WB31_XXX
- bool "WB31-xxx"
- help
- Board Data file for a standard WB31 (BT/WiFi) reference design (File: bdata.WB31.bin)
-
-config AR600x_SD32_XXX
- bool "SD32-xxx"
- help
- Board Data file for a standard SD32 (5GHz) reference design (File: bdata.SD32.bin)
-
-config AR600x_CUSTOM_XXX
- bool "CUSTOM-xxx"
- help
- Board Data file for a custom reference design (File: should be named as bdata.CUSTOM.bin)
-endchoice
-
-config ATH6KL_ENABLE_COEXISTENCE
- bool "BT Coexistence support"
- depends on ATH6K_LEGACY
- help
- Enables WLAN/BT coexistence support. Select the apprpriate configuration from below.
-
-choice
- prompt "Front-End Antenna Configuration"
- depends on ATH6KL_ENABLE_COEXISTENCE
- default AR600x_DUAL_ANTENNA
- help
- Indicates the number of antennas being used by BT and WLAN. Select the appropriate configuration from the list below that matches your AR6003 based reference design.
-
-config AR600x_DUAL_ANTENNA
- bool "Dual Antenna"
- help
- Dual Antenna Design
-
-config AR600x_SINGLE_ANTENNA
- bool "Single Antenna"
- help
- Single Antenna Design
-endchoice
-
-choice
- prompt "Collocated Bluetooth Type"
- depends on ATH6KL_ENABLE_COEXISTENCE
- default AR600x_BT_AR3001
- help
- Select the appropriate configuration from the list below that matches your AR6003 based reference design.
-
-config AR600x_BT_QCOM
- bool "Qualcomm BTS4020X"
- help
- Qualcomm BT (3 Wire PTA)
-
-config AR600x_BT_CSR
- bool "CSR BC06"
- help
- CSR BT (3 Wire PTA)
-
-config AR600x_BT_AR3001
- bool "Atheros AR3001"
- help
- Atheros BT (3 Wire PTA)
-endchoice
-
-config ATH6KL_HCI_BRIDGE
- bool "HCI over SDIO support"
- depends on ATH6K_LEGACY
- help
- Enables BT over SDIO. Applicable only for combo designs (eg: WB31)
-
-config ATH6KL_CONFIG_GPIO_BT_RESET
- bool "Configure BT Reset GPIO"
- depends on ATH6KL_HCI_BRIDGE
- help
- Configure a WLAN GPIO for use with BT.
-
-config AR600x_BT_RESET_PIN
- int "GPIO"
- depends on ATH6KL_CONFIG_GPIO_BT_RESET
- default 22
- help
- WLAN GPIO to be used for resetting BT
-
-config ATH6KL_HTC_RAW_INTERFACE
- bool "RAW HTC support"
- depends on ATH6K_LEGACY
- help
- Enables raw HTC interface. Allows application to directly talk to the HTC interface via the ioctl interface
-
-config ATH6KL_VIRTUAL_SCATTER_GATHER
- bool "Virtual Scatter-Gather support"
- depends on ATH6K_LEGACY
- help
- Enables virtual scatter gather support for the hardware that does not support it natively.
-
-config ATH6KL_SKIP_ABI_VERSION_CHECK
- bool "Skip ABI version check support"
- depends on ATH6K_LEGACY
- help
- Forces the driver to disable ABI version check. Caution: Incompatilbity between the host driver and target firmware may lead to unknown side effects.
-
-config ATH6KL_BT_UART_FC_POLARITY
- int "UART Flow Control Polarity"
- depends on ATH6KL_LEGACY
- default 0
- help
- Configures the polarity of UART Flow Control. A value of 0 implies active low and is the default setting. Set it to 1 for active high.
-
-config ATH6KL_DEBUG
- bool "Debug support"
- depends on ATH6K_LEGACY
- help
- Enables debug support
-
-config ATH6KL_ENABLE_HOST_DEBUG
- bool "Host Debug support"
- depends on ATH6KL_DEBUG
- help
- Enables debug support in the driver
-
-config ATH6KL_ENABLE_TARGET_DEBUG_PRINTS
- bool "Target Debug support - Enable UART prints"
- depends on ATH6KL_DEBUG
- help
- Enables uart prints
-
-config AR600x_DEBUG_UART_TX_PIN
- int "GPIO"
- depends on ATH6KL_ENABLE_TARGET_DEBUG_PRINTS
- default 8
- help
- WLAN GPIO to be used for Debug UART (Tx)
-
-config ATH6KL_DISABLE_TARGET_DBGLOGS
- bool "Target Debug support - Disable Debug logs"
- depends on ATH6KL_DEBUG
- help
- Enables debug logs
diff --git a/drivers/staging/ath6kl/Makefile b/drivers/staging/ath6kl/Makefile
deleted file mode 100644
index 1d3f2390a17..00000000000
--- a/drivers/staging/ath6kl/Makefile
+++ /dev/null
@@ -1,122 +0,0 @@
-#------------------------------------------------------------------------------
-# Copyright (c) 2004-2010 Atheros Communications Inc.
-# All rights reserved.
-#
-#
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-#
-#
-# Author(s): ="Atheros"
-#------------------------------------------------------------------------------
-
-ccflags-y += -I$(obj)/include
-ccflags-y += -I$(obj)/include/common
-ccflags-y += -I$(obj)/wlan/include
-ccflags-y += -I$(obj)/os/linux/include
-ccflags-y += -I$(obj)/os
-ccflags-y += -I$(obj)/bmi/include
-ccflags-y += -I$(obj)/include/common/AR6002/hw4.0
-
-ifeq ($(CONFIG_AR600x_DUAL_ANTENNA),y)
-ccflags-y += -DAR600x_DUAL_ANTENNA
-endif
-
-ifeq ($(CONFIG_AR600x_SINGLE_ANTENNA),y)
-ccflags-y += -DAR600x_SINGLE_ANTENNA
-endif
-
-ifeq ($(CONFIG_AR600x_BT_QCOM),y)
-ccflags-y += -DAR600x_BT_QCOM
-endif
-
-ifeq ($(CONFIG_AR600x_BT_CSR),y)
-ccflags-y += -DAR600x_BT_CSR
-endif
-
-ifeq ($(CONFIG_AR600x_BT_AR3001),y)
-ccflags-y += -DAR600x_BT_AR3001
-endif
-
-ifeq ($(CONFIG_ATH6KL_HCI_BRIDGE),y)
-ccflags-y += -DATH_AR6K_ENABLE_GMBOX
-ccflags-y += -DHCI_TRANSPORT_SDIO
-ccflags-y += -DSETUPHCI_ENABLED
-ccflags-y += -DSETUPBTDEV_ENABLED
-ath6kl-y += htc2/AR6000/ar6k_gmbox.o
-ath6kl-y += htc2/AR6000/ar6k_gmbox_hciuart.o
-ath6kl-y += miscdrv/ar3kconfig.o
-ath6kl-y += miscdrv/ar3kps/ar3kpsconfig.o
-ath6kl-y += miscdrv/ar3kps/ar3kpsparser.o
-endif
-
-ifeq ($(CONFIG_ATH6KL_CONFIG_GPIO_BT_RESET),y)
-ccflags-y += -DATH6KL_CONFIG_GPIO_BT_RESET
-endif
-
-ifeq ($(CONFIG_ATH6KL_HTC_RAW_INTERFACE),y)
-ccflags-y += -DHTC_RAW_INTERFACE
-endif
-
-ifeq ($(CONFIG_ATH6KL_ENABLE_HOST_DEBUG),y)
-ccflags-y += -DDEBUG
-ccflags-y += -DATH_DEBUG_MODULE
-endif
-
-ifeq ($(CONFIG_ATH6KL_ENABLE_TARGET_DEBUG_PRINTS),y)
-ccflags-y += -DENABLEUARTPRINT_SET
-endif
-
-ifeq ($(CONFIG_ATH6KL_DISABLE_TARGET_DBGLOGS),y)
-ccflags-y += -DATH6KL_DISABLE_TARGET_DBGLOGS
-endif
-
-ifeq ($(CONFIG_ATH6KL_VIRTUAL_SCATTER_GATHER),y)
-ccflags-y += -DATH6KL_CONFIG_HIF_VIRTUAL_SCATTER
-endif
-
-ifeq ($(CONFIG_ATH6KL_SKIP_ABI_VERSION_CHECK),y)
-ccflags-y += -DATH6KL_SKIP_ABI_VERSION_CHECK
-endif
-
-ccflags-y += -DWAPI_ENABLE
-ccflags-y += -DCHECKSUM_OFFLOAD
-
-obj-$(CONFIG_ATH6K_LEGACY) := ath6kl.o
-ath6kl-y += htc2/AR6000/ar6k.o
-ath6kl-y += htc2/AR6000/ar6k_events.o
-ath6kl-y += htc2/htc_send.o
-ath6kl-y += htc2/htc_recv.o
-ath6kl-y += htc2/htc_services.o
-ath6kl-y += htc2/htc.o
-ath6kl-y += bmi/src/bmi.o
-ath6kl-y += os/linux/cfg80211.o
-ath6kl-y += os/linux/ar6000_drv.o
-ath6kl-y += os/linux/ar6000_raw_if.o
-ath6kl-y += os/linux/ar6000_pm.o
-ath6kl-y += os/linux/netbuf.o
-ath6kl-y += os/linux/hci_bridge.o
-ath6kl-y += miscdrv/common_drv.o
-ath6kl-y += miscdrv/credit_dist.o
-ath6kl-y += wmi/wmi.o
-ath6kl-y += reorder/rcv_aggr.o
-ath6kl-y += wlan/src/wlan_node.o
-ath6kl-y += wlan/src/wlan_recv_beacon.o
-ath6kl-y += wlan/src/wlan_utils.o
-
-# ATH_HIF_TYPE := sdio
-ccflags-y += -I$(obj)/hif/sdio/linux_sdio/include
-ccflags-y += -DSDIO
-ath6kl-y += hif/sdio/linux_sdio/src/hif.o
-ath6kl-y += hif/sdio/linux_sdio/src/hif_scatter.o
diff --git a/drivers/staging/ath6kl/TODO b/drivers/staging/ath6kl/TODO
deleted file mode 100644
index 7be4b46ebb5..00000000000
--- a/drivers/staging/ath6kl/TODO
+++ /dev/null
@@ -1,25 +0,0 @@
-TODO:
-
-We are working hard on cleaning up the driver. There's sooooooooo much todo
-so instead of editing this file please use the wiki:
-
-http://wireless.kernel.org/en/users/Drivers/ath6kl
-
-There's a respective TODO page there. Please also subscribe to the wiki page
-to get e-mail updates on changes.
-
-IRC:
-
-We *really* need to coordinate development for ath6kl as the cleanup
-patches will break pretty much any other patches. Please use IRC to
-help coordinate better:
-
-irc.freenode.net
-#ath6kl
-
-Send patches to:
-
- - Greg Kroah-Hartman <greg@kroah.com>
- - Luis R. Rodriguez <mcgrof@gmail.com>
- - Joe Perches <joe@perches.com>
- - Naveen Singh <nsingh@atheros.com>
diff --git a/drivers/staging/ath6kl/bmi/include/bmi_internal.h b/drivers/staging/ath6kl/bmi/include/bmi_internal.h
deleted file mode 100644
index 8e2577074d6..00000000000
--- a/drivers/staging/ath6kl/bmi/include/bmi_internal.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef BMI_INTERNAL_H
-#define BMI_INTERNAL_H
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#define ATH_MODULE_NAME bmi
-#include "a_debug.h"
-#include "hw/mbox_host_reg.h"
-#include "bmi_msg.h"
-
-#define ATH_DEBUG_BMI ATH_DEBUG_MAKE_MODULE_MASK(0)
-
-
-#define BMI_COMMUNICATION_TIMEOUT 100000
-
-/* ------ Global Variable Declarations ------- */
-static bool bmiDone;
-
-int
-bmiBufferSend(struct hif_device *device,
- u8 *buffer,
- u32 length);
-
-int
-bmiBufferReceive(struct hif_device *device,
- u8 *buffer,
- u32 length,
- bool want_timeout);
-
-#endif
diff --git a/drivers/staging/ath6kl/bmi/src/bmi.c b/drivers/staging/ath6kl/bmi/src/bmi.c
deleted file mode 100644
index f1f085eba9c..00000000000
--- a/drivers/staging/ath6kl/bmi/src/bmi.c
+++ /dev/null
@@ -1,1010 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="bmi.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-
-#ifdef THREAD_X
-#include <string.h>
-#endif
-
-#include "hif.h"
-#include "bmi.h"
-#include "htc_api.h"
-#include "bmi_internal.h"
-
-#ifdef ATH_DEBUG_MODULE
-static struct ath_debug_mask_description bmi_debug_desc[] = {
- { ATH_DEBUG_BMI , "BMI Tracing"},
-};
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(bmi,
- "bmi",
- "Boot Manager Interface",
- ATH_DEBUG_MASK_DEFAULTS,
- ATH_DEBUG_DESCRIPTION_COUNT(bmi_debug_desc),
- bmi_debug_desc);
-
-#endif
-
-/*
-Although we had envisioned BMI to run on top of HTC, this is not how the
-final implementation ended up. On the Target side, BMI is a part of the BSP
-and does not use the HTC protocol nor even DMA -- it is intentionally kept
-very simple.
-*/
-
-static bool pendingEventsFuncCheck = false;
-static u32 *pBMICmdCredits;
-static u8 *pBMICmdBuf;
-#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
- sizeof(u32) /* cmd */ + \
- sizeof(u32) /* addr */ + \
- sizeof(u32))/* length */
-#define BMI_COMMAND_FITS(sz) ((sz) <= MAX_BMI_CMDBUF_SZ)
-
-/* APIs visible to the driver */
-void
-BMIInit(void)
-{
- bmiDone = false;
- pendingEventsFuncCheck = false;
-
- /*
- * On some platforms, it's not possible to DMA to a static variable
- * in a device driver (e.g. Linux loadable driver module).
- * So we need to A_MALLOC space for "command credits" and for commands.
- *
- * Note: implicitly relies on A_MALLOC to provide a buffer that is
- * suitable for DMA (or PIO). This buffer will be passed down the
- * bus stack.
- */
- if (!pBMICmdCredits) {
- pBMICmdCredits = (u32 *)A_MALLOC_NOWAIT(4);
- A_ASSERT(pBMICmdCredits);
- }
-
- if (!pBMICmdBuf) {
- pBMICmdBuf = (u8 *)A_MALLOC_NOWAIT(MAX_BMI_CMDBUF_SZ);
- A_ASSERT(pBMICmdBuf);
- }
-
- A_REGISTER_MODULE_DEBUG_INFO(bmi);
-}
-
-void
-BMICleanup(void)
-{
- if (pBMICmdCredits) {
- kfree(pBMICmdCredits);
- pBMICmdCredits = NULL;
- }
-
- if (pBMICmdBuf) {
- kfree(pBMICmdBuf);
- pBMICmdBuf = NULL;
- }
-}
-
-int
-BMIDone(struct hif_device *device)
-{
- int status;
- u32 cid;
-
- if (bmiDone) {
- AR_DEBUG_PRINTF (ATH_DEBUG_BMI, ("BMIDone skipped\n"));
- return 0;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Done: Enter (device: 0x%p)\n", device));
- bmiDone = true;
- cid = BMI_DONE;
-
- status = bmiBufferSend(device, (u8 *)&cid, sizeof(cid));
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- if (pBMICmdCredits) {
- kfree(pBMICmdCredits);
- pBMICmdCredits = NULL;
- }
-
- if (pBMICmdBuf) {
- kfree(pBMICmdBuf);
- pBMICmdBuf = NULL;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Done: Exit\n"));
-
- return 0;
-}
-
-int
-BMIGetTargetInfo(struct hif_device *device, struct bmi_target_info *targ_info)
-{
- int status;
- u32 cid;
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Get Target Info: Enter (device: 0x%p)\n", device));
- cid = BMI_GET_TARGET_INFO;
-
- status = bmiBufferSend(device, (u8 *)&cid, sizeof(cid));
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- status = bmiBufferReceive(device, (u8 *)&targ_info->target_ver,
- sizeof(targ_info->target_ver), true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Version from the device\n"));
- return A_ERROR;
- }
-
- if (targ_info->target_ver == TARGET_VERSION_SENTINAL) {
- /* Determine how many bytes are in the Target's targ_info */
- status = bmiBufferReceive(device, (u8 *)&targ_info->target_info_byte_count,
- sizeof(targ_info->target_info_byte_count), true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Info Byte Count from the device\n"));
- return A_ERROR;
- }
-
- /*
- * The Target's targ_info doesn't match the Host's targ_info.
- * We need to do some backwards compatibility work to make this OK.
- */
- A_ASSERT(targ_info->target_info_byte_count == sizeof(*targ_info));
-
- /* Read the remainder of the targ_info */
- status = bmiBufferReceive(device,
- ((u8 *)targ_info)+sizeof(targ_info->target_info_byte_count),
- sizeof(*targ_info)-sizeof(targ_info->target_info_byte_count), true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Info (%d bytes) from the device\n",
- targ_info->target_info_byte_count));
- return A_ERROR;
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Get Target Info: Exit (ver: 0x%x type: 0x%x)\n",
- targ_info->target_ver, targ_info->target_type));
-
- return 0;
-}
-
-int
-BMIReadMemory(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length)
-{
- u32 cid;
- int status;
- u32 offset;
- u32 remaining, rxlen;
-
- A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) + sizeof(address) + sizeof(length)));
- memset (pBMICmdBuf, 0, BMI_DATASZ_MAX + sizeof(cid) + sizeof(address) + sizeof(length));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Read Memory: Enter (device: 0x%p, address: 0x%x, length: %d)\n",
- device, address, length));
-
- cid = BMI_READ_MEMORY;
-
- remaining = length;
-
- while (remaining)
- {
- rxlen = (remaining < BMI_DATASZ_MAX) ? remaining : BMI_DATASZ_MAX;
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
- memcpy(&(pBMICmdBuf[offset]), &rxlen, sizeof(rxlen));
- offset += sizeof(length);
-
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
- status = bmiBufferReceive(device, pBMICmdBuf, rxlen, true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
- return A_ERROR;
- }
- memcpy(&buffer[length - remaining], pBMICmdBuf, rxlen);
- remaining -= rxlen; address += rxlen;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read Memory: Exit\n"));
- return 0;
-}
-
-int
-BMIWriteMemory(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length)
-{
- u32 cid;
- int status;
- u32 offset;
- u32 remaining, txlen;
- const u32 header = sizeof(cid) + sizeof(address) + sizeof(length);
- u8 alignedBuffer[BMI_DATASZ_MAX];
- u8 *src;
-
- A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
- memset (pBMICmdBuf, 0, BMI_DATASZ_MAX + header);
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Write Memory: Enter (device: 0x%p, address: 0x%x, length: %d)\n",
- device, address, length));
-
- cid = BMI_WRITE_MEMORY;
-
- remaining = length;
- while (remaining)
- {
- src = &buffer[length - remaining];
- if (remaining < (BMI_DATASZ_MAX - header)) {
- if (remaining & 3) {
- /* align it with 4 bytes */
- remaining = remaining + (4 - (remaining & 3));
- memcpy(alignedBuffer, src, remaining);
- src = alignedBuffer;
- }
- txlen = remaining;
- } else {
- txlen = (BMI_DATASZ_MAX - header);
- }
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
- memcpy(&(pBMICmdBuf[offset]), &txlen, sizeof(txlen));
- offset += sizeof(txlen);
- memcpy(&(pBMICmdBuf[offset]), src, txlen);
- offset += txlen;
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
- remaining -= txlen; address += txlen;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Write Memory: Exit\n"));
-
- return 0;
-}
-
-int
-BMIExecute(struct hif_device *device,
- u32 address,
- u32 *param)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address) + sizeof(param)));
- memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address) + sizeof(param));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Execute: Enter (device: 0x%p, address: 0x%x, param: %d)\n",
- device, address, *param));
-
- cid = BMI_EXECUTE;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
- memcpy(&(pBMICmdBuf[offset]), param, sizeof(*param));
- offset += sizeof(*param);
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*param), false);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
- return A_ERROR;
- }
-
- memcpy(param, pBMICmdBuf, sizeof(*param));
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Execute: Exit (param: %d)\n", *param));
- return 0;
-}
-
-int
-BMISetAppStart(struct hif_device *device,
- u32 address)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
- memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Set App Start: Enter (device: 0x%p, address: 0x%x)\n",
- device, address));
-
- cid = BMI_SET_APP_START;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Set App Start: Exit\n"));
- return 0;
-}
-
-int
-BMIReadSOCRegister(struct hif_device *device,
- u32 address,
- u32 *param)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
- memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Read SOC Register: Enter (device: 0x%p, address: 0x%x)\n",
- device, address));
-
- cid = BMI_READ_SOC_REGISTER;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
-
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*param), true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
- return A_ERROR;
- }
- memcpy(param, pBMICmdBuf, sizeof(*param));
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Exit (value: %d)\n", *param));
- return 0;
-}
-
-int
-BMIWriteSOCRegister(struct hif_device *device,
- u32 address,
- u32 param)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address) + sizeof(param)));
- memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address) + sizeof(param));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Write SOC Register: Enter (device: 0x%p, address: 0x%x, param: %d)\n",
- device, address, param));
-
- cid = BMI_WRITE_SOC_REGISTER;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
- memcpy(&(pBMICmdBuf[offset]), &param, sizeof(param));
- offset += sizeof(param);
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Exit\n"));
- return 0;
-}
-
-int
-BMIrompatchInstall(struct hif_device *device,
- u32 ROM_addr,
- u32 RAM_addr,
- u32 nbytes,
- u32 do_activate,
- u32 *rompatch_id)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(ROM_addr) + sizeof(RAM_addr) +
- sizeof(nbytes) + sizeof(do_activate)));
- memset(pBMICmdBuf, 0, sizeof(cid) + sizeof(ROM_addr) + sizeof(RAM_addr) +
- sizeof(nbytes) + sizeof(do_activate));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI rompatch Install: Enter (device: 0x%p, ROMaddr: 0x%x, RAMaddr: 0x%x length: %d activate: %d)\n",
- device, ROM_addr, RAM_addr, nbytes, do_activate));
-
- cid = BMI_ROMPATCH_INSTALL;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &ROM_addr, sizeof(ROM_addr));
- offset += sizeof(ROM_addr);
- memcpy(&(pBMICmdBuf[offset]), &RAM_addr, sizeof(RAM_addr));
- offset += sizeof(RAM_addr);
- memcpy(&(pBMICmdBuf[offset]), &nbytes, sizeof(nbytes));
- offset += sizeof(nbytes);
- memcpy(&(pBMICmdBuf[offset]), &do_activate, sizeof(do_activate));
- offset += sizeof(do_activate);
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*rompatch_id), true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n"));
- return A_ERROR;
- }
- memcpy(rompatch_id, pBMICmdBuf, sizeof(*rompatch_id));
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch Install: (rompatch_id=%d)\n", *rompatch_id));
- return 0;
-}
-
-int
-BMIrompatchUninstall(struct hif_device *device,
- u32 rompatch_id)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(rompatch_id)));
- memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(rompatch_id));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI rompatch Uninstall: Enter (device: 0x%p, rompatch_id: %d)\n",
- device, rompatch_id));
-
- cid = BMI_ROMPATCH_UNINSTALL;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &rompatch_id, sizeof(rompatch_id));
- offset += sizeof(rompatch_id);
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch UNinstall: (rompatch_id=0x%x)\n", rompatch_id));
- return 0;
-}
-
-static int
-_BMIrompatchChangeActivation(struct hif_device *device,
- u32 rompatch_count,
- u32 *rompatch_list,
- u32 do_activate)
-{
- u32 cid;
- int status;
- u32 offset;
- u32 length;
-
- A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) + sizeof(rompatch_count)));
- memset(pBMICmdBuf, 0, BMI_DATASZ_MAX + sizeof(cid) + sizeof(rompatch_count));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Change rompatch Activation: Enter (device: 0x%p, count: %d)\n",
- device, rompatch_count));
-
- cid = do_activate ? BMI_ROMPATCH_ACTIVATE : BMI_ROMPATCH_DEACTIVATE;
-
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &rompatch_count, sizeof(rompatch_count));
- offset += sizeof(rompatch_count);
- length = rompatch_count * sizeof(*rompatch_list);
- memcpy(&(pBMICmdBuf[offset]), rompatch_list, length);
- offset += length;
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Change rompatch Activation: Exit\n"));
-
- return 0;
-}
-
-int
-BMIrompatchActivate(struct hif_device *device,
- u32 rompatch_count,
- u32 *rompatch_list)
-{
- return _BMIrompatchChangeActivation(device, rompatch_count, rompatch_list, 1);
-}
-
-int
-BMIrompatchDeactivate(struct hif_device *device,
- u32 rompatch_count,
- u32 *rompatch_list)
-{
- return _BMIrompatchChangeActivation(device, rompatch_count, rompatch_list, 0);
-}
-
-int
-BMILZData(struct hif_device *device,
- u8 *buffer,
- u32 length)
-{
- u32 cid;
- int status;
- u32 offset;
- u32 remaining, txlen;
- const u32 header = sizeof(cid) + sizeof(length);
-
- A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX+header));
- memset (pBMICmdBuf, 0, BMI_DATASZ_MAX+header);
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI Send LZ Data: Enter (device: 0x%p, length: %d)\n",
- device, length));
-
- cid = BMI_LZ_DATA;
-
- remaining = length;
- while (remaining)
- {
- txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
- remaining : (BMI_DATASZ_MAX - header);
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &txlen, sizeof(txlen));
- offset += sizeof(txlen);
- memcpy(&(pBMICmdBuf[offset]), &buffer[length - remaining], txlen);
- offset += txlen;
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n"));
- return A_ERROR;
- }
- remaining -= txlen;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Data: Exit\n"));
-
- return 0;
-}
-
-int
-BMILZStreamStart(struct hif_device *device,
- u32 address)
-{
- u32 cid;
- int status;
- u32 offset;
-
- A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
- memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address));
-
- if (bmiDone) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
- ("BMI LZ Stream Start: Enter (device: 0x%p, address: 0x%x)\n",
- device, address));
-
- cid = BMI_LZ_STREAM_START;
- offset = 0;
- memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid));
- offset += sizeof(cid);
- memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address));
- offset += sizeof(address);
- status = bmiBufferSend(device, pBMICmdBuf, offset);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to Start LZ Stream to the device\n"));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Stream Start: Exit\n"));
-
- return 0;
-}
-
-/* BMI Access routines */
-int
-bmiBufferSend(struct hif_device *device,
- u8 *buffer,
- u32 length)
-{
- int status;
- u32 timeout;
- u32 address;
- u32 mboxAddress[HTC_MAILBOX_NUM_MAX];
-
- HIFConfigureDevice(device, HIF_DEVICE_GET_MBOX_ADDR,
- &mboxAddress[0], sizeof(mboxAddress));
-
- *pBMICmdCredits = 0;
- timeout = BMI_COMMUNICATION_TIMEOUT;
-
- while(timeout-- && !(*pBMICmdCredits)) {
- /* Read the counter register to get the command credits */
- address = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
- /* hit the credit counter with a 4-byte access, the first byte read will hit the counter and cause
- * a decrement, while the remaining 3 bytes has no effect. The rationale behind this is to
- * make all HIF accesses 4-byte aligned */
- status = HIFReadWrite(device, address, (u8 *)pBMICmdCredits, 4,
- HIF_RD_SYNC_BYTE_INC, NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to decrement the command credit count register\n"));
- return A_ERROR;
- }
- /* the counter is only 8=bits, ignore anything in the upper 3 bytes */
- (*pBMICmdCredits) &= 0xFF;
- }
-
- if (*pBMICmdCredits) {
- address = mboxAddress[ENDPOINT1];
- status = HIFReadWrite(device, address, buffer, length,
- HIF_WR_SYNC_BYTE_INC, NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to send the BMI data to the device\n"));
- return A_ERROR;
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout - bmiBufferSend\n"));
- return A_ERROR;
- }
-
- return status;
-}
-
-int
-bmiBufferReceive(struct hif_device *device,
- u8 *buffer,
- u32 length,
- bool want_timeout)
-{
- int status;
- u32 address;
- u32 mboxAddress[HTC_MAILBOX_NUM_MAX];
- struct hif_pending_events_info hifPendingEvents;
- static HIF_PENDING_EVENTS_FUNC getPendingEventsFunc = NULL;
-
- if (!pendingEventsFuncCheck) {
- /* see if the HIF layer implements an alternative function to get pending events
- * do this only once! */
- HIFConfigureDevice(device,
- HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
- &getPendingEventsFunc,
- sizeof(getPendingEventsFunc));
- pendingEventsFuncCheck = true;
- }
-
- HIFConfigureDevice(device, HIF_DEVICE_GET_MBOX_ADDR,
- &mboxAddress[0], sizeof(mboxAddress));
-
- /*
- * During normal bootup, small reads may be required.
- * Rather than issue an HIF Read and then wait as the Target
- * adds successive bytes to the FIFO, we wait here until
- * we know that response data is available.
- *
- * This allows us to cleanly timeout on an unexpected
- * Target failure rather than risk problems at the HIF level. In
- * particular, this avoids SDIO timeouts and possibly garbage
- * data on some host controllers. And on an interconnect
- * such as Compact Flash (as well as some SDIO masters) which
- * does not provide any indication on data timeout, it avoids
- * a potential hang or garbage response.
- *
- * Synchronization is more difficult for reads larger than the
- * size of the MBOX FIFO (128B), because the Target is unable
- * to push the 129th byte of data until AFTER the Host posts an
- * HIF Read and removes some FIFO data. So for large reads the
- * Host proceeds to post an HIF Read BEFORE all the data is
- * actually available to read. Fortunately, large BMI reads do
- * not occur in practice -- they're supported for debug/development.
- *
- * So Host/Target BMI synchronization is divided into these cases:
- * CASE 1: length < 4
- * Should not happen
- *
- * CASE 2: 4 <= length <= 128
- * Wait for first 4 bytes to be in FIFO
- * If CONSERVATIVE_BMI_READ is enabled, also wait for
- * a BMI command credit, which indicates that the ENTIRE
- * response is available in the the FIFO
- *
- * CASE 3: length > 128
- * Wait for the first 4 bytes to be in FIFO
- *
- * For most uses, a small timeout should be sufficient and we will
- * usually see a response quickly; but there may be some unusual
- * (debug) cases of BMI_EXECUTE where we want an larger timeout.
- * For now, we use an unbounded busy loop while waiting for
- * BMI_EXECUTE.
- *
- * If BMI_EXECUTE ever needs to support longer-latency execution,
- * especially in production, this code needs to be enhanced to sleep
- * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
- * a function of Host processor speed.
- */
- if (length >= 4) { /* NB: Currently, always true */
- /*
- * NB: word_available is declared static for esoteric reasons
- * having to do with protection on some OSes.
- */
- static u32 word_available;
- u32 timeout;
-
- word_available = 0;
- timeout = BMI_COMMUNICATION_TIMEOUT;
- while((!want_timeout || timeout--) && !word_available) {
-
- if (getPendingEventsFunc != NULL) {
- status = getPendingEventsFunc(device,
- &hifPendingEvents,
- NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to get pending events \n"));
- break;
- }
-
- if (hifPendingEvents.AvailableRecvBytes >= sizeof(u32)) {
- word_available = 1;
- }
- continue;
- }
-
- status = HIFReadWrite(device, RX_LOOKAHEAD_VALID_ADDRESS, (u8 *)&word_available,
- sizeof(word_available), HIF_RD_SYNC_BYTE_INC, NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read RX_LOOKAHEAD_VALID register\n"));
- return A_ERROR;
- }
- /* We did a 4-byte read to the same register; all we really want is one bit */
- word_available &= (1 << ENDPOINT1);
- }
-
- if (!word_available) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout - bmiBufferReceive FIFO empty\n"));
- return A_ERROR;
- }
- }
-
-#define CONSERVATIVE_BMI_READ 0
-#if CONSERVATIVE_BMI_READ
- /*
- * This is an extra-conservative CREDIT check. It guarantees
- * that ALL data is available in the FIFO before we start to
- * read from the interconnect.
- *
- * This credit check is useless when firmware chooses to
- * allow multiple outstanding BMI Command Credits, since the next
- * credit will already be present. To restrict the Target to one
- * BMI Command Credit, see HI_OPTION_BMI_CRED_LIMIT.
- *
- * And for large reads (when HI_OPTION_BMI_CRED_LIMIT is set)
- * we cannot wait for the next credit because the Target's FIFO
- * will not hold the entire response. So we need the Host to
- * start to empty the FIFO sooner. (And again, large reads are
- * not used in practice; they are for debug/development only.)
- *
- * For a more conservative Host implementation (which would be
- * safer for a Compact Flash interconnect):
- * Set CONSERVATIVE_BMI_READ (above) to 1
- * Set HI_OPTION_BMI_CRED_LIMIT and
- * reduce BMI_DATASZ_MAX to 32 or 64
- */
- if ((length > 4) && (length < 128)) { /* check against MBOX FIFO size */
- u32 timeout;
-
- *pBMICmdCredits = 0;
- timeout = BMI_COMMUNICATION_TIMEOUT;
- while((!want_timeout || timeout--) && !(*pBMICmdCredits) {
- /* Read the counter register to get the command credits */
- address = COUNT_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 1;
- /* read the counter using a 4-byte read. Since the counter is NOT auto-decrementing,
- * we can read this counter multiple times using a non-incrementing address mode.
- * The rationale here is to make all HIF accesses a multiple of 4 bytes */
- status = HIFReadWrite(device, address, (u8 *)pBMICmdCredits, sizeof(*pBMICmdCredits),
- HIF_RD_SYNC_BYTE_FIX, NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read the command credit count register\n"));
- return A_ERROR;
- }
- /* we did a 4-byte read to the same count register so mask off upper bytes */
- (*pBMICmdCredits) &= 0xFF;
- }
-
- if (!(*pBMICmdCredits)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout- bmiBufferReceive no credit\n"));
- return A_ERROR;
- }
- }
-#endif
-
- address = mboxAddress[ENDPOINT1];
- status = HIFReadWrite(device, address, buffer, length, HIF_RD_SYNC_BYTE_INC, NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read the BMI data from the device\n"));
- return A_ERROR;
- }
-
- return 0;
-}
-
-int
-BMIFastDownload(struct hif_device *device, u32 address, u8 *buffer, u32 length)
-{
- int status = A_ERROR;
- u32 lastWord = 0;
- u32 lastWordOffset = length & ~0x3;
- u32 unalignedBytes = length & 0x3;
-
- status = BMILZStreamStart (device, address);
- if (status) {
- return A_ERROR;
- }
-
- if (unalignedBytes) {
- /* copy the last word into a zero padded buffer */
- memcpy(&lastWord, &buffer[lastWordOffset], unalignedBytes);
- }
-
- status = BMILZData(device, buffer, lastWordOffset);
-
- if (status) {
- return A_ERROR;
- }
-
- if (unalignedBytes) {
- status = BMILZData(device, (u8 *)&lastWord, 4);
- }
-
- if (!status) {
- //
- // Close compressed stream and open a new (fake) one. This serves mainly to flush Target caches.
- //
- status = BMILZStreamStart (device, 0x00);
- if (status) {
- return A_ERROR;
- }
- }
- return status;
-}
-
-int
-BMIRawWrite(struct hif_device *device, u8 *buffer, u32 length)
-{
- return bmiBufferSend(device, buffer, length);
-}
-
-int
-BMIRawRead(struct hif_device *device, u8 *buffer, u32 length, bool want_timeout)
-{
- return bmiBufferReceive(device, buffer, length, want_timeout);
-}
diff --git a/drivers/staging/ath6kl/hif/common/hif_sdio_common.h b/drivers/staging/ath6kl/hif/common/hif_sdio_common.h
deleted file mode 100644
index 93a2adceca3..00000000000
--- a/drivers/staging/ath6kl/hif/common/hif_sdio_common.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// common header file for HIF modules designed for SDIO
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef HIF_SDIO_COMMON_H_
-#define HIF_SDIO_COMMON_H_
-
- /* SDIO manufacturer ID and Codes */
-#define MANUFACTURER_ID_AR6002_BASE 0x200
-#define MANUFACTURER_ID_AR6003_BASE 0x300
-#define MANUFACTURER_ID_AR6K_BASE_MASK 0xFF00
-#define FUNCTION_CLASS 0x0
-#define MANUFACTURER_CODE 0x271 /* Atheros */
-
- /* Mailbox address in SDIO address space */
-#define HIF_MBOX_BASE_ADDR 0x800
-#define HIF_MBOX_WIDTH 0x800
-#define HIF_MBOX_START_ADDR(mbox) \
- ( HIF_MBOX_BASE_ADDR + mbox * HIF_MBOX_WIDTH)
-
-#define HIF_MBOX_END_ADDR(mbox) \
- (HIF_MBOX_START_ADDR(mbox) + HIF_MBOX_WIDTH - 1)
-
- /* extended MBOX address for larger MBOX writes to MBOX 0*/
-#define HIF_MBOX0_EXTENDED_BASE_ADDR 0x2800
-#define HIF_MBOX0_EXTENDED_WIDTH_AR6002 (6*1024)
-#define HIF_MBOX0_EXTENDED_WIDTH_AR6003 (18*1024)
-
- /* version 1 of the chip has only a 12K extended mbox range */
-#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1 0x4000
-#define HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1 (12*1024)
-
- /* GMBOX addresses */
-#define HIF_GMBOX_BASE_ADDR 0x7000
-#define HIF_GMBOX_WIDTH 0x4000
-
- /* for SDIO we recommend a 128-byte block size */
-#define HIF_DEFAULT_IO_BLOCK_SIZE 128
-
- /* set extended MBOX window information for SDIO interconnects */
-static INLINE void SetExtendedMboxWindowInfo(u16 Manfid, struct hif_device_mbox_info *pInfo)
-{
- switch (Manfid & MANUFACTURER_ID_AR6K_BASE_MASK) {
- case MANUFACTURER_ID_AR6002_BASE :
- /* MBOX 0 has an extended range */
- pInfo->MboxProp[0].ExtendedAddress = HIF_MBOX0_EXTENDED_BASE_ADDR;
- pInfo->MboxProp[0].ExtendedSize = HIF_MBOX0_EXTENDED_WIDTH_AR6002;
- break;
- case MANUFACTURER_ID_AR6003_BASE :
- /* MBOX 0 has an extended range */
- pInfo->MboxProp[0].ExtendedAddress = HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
- pInfo->MboxProp[0].ExtendedSize = HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
- pInfo->GMboxAddress = HIF_GMBOX_BASE_ADDR;
- pInfo->GMboxSize = HIF_GMBOX_WIDTH;
- break;
- default:
- A_ASSERT(false);
- break;
- }
-}
-
-/* special CCCR (func 0) registers */
-
-#define CCCR_SDIO_IRQ_MODE_REG 0xF0 /* interrupt mode register */
-#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) /* mode to enable special 4-bit interrupt assertion without clock*/
-
-#endif /*HIF_SDIO_COMMON_H_*/
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h b/drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h
deleted file mode 100644
index ed7ad4786f5..00000000000
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/include/hif_internal.h
+++ /dev/null
@@ -1,131 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="hif_internal.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// internal header file for hif layer
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HIF_INTERNAL_H_
-#define _HIF_INTERNAL_H_
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#include "hif.h"
-#include "../../../common/hif_sdio_common.h"
-#include <linux/scatterlist.h>
-#define HIF_LINUX_MMC_SCATTER_SUPPORT
-
-#define BUS_REQUEST_MAX_NUM 64
-
-#define SDIO_CLOCK_FREQUENCY_DEFAULT 25000000
-#define SDWLAN_ENABLE_DISABLE_TIMEOUT 20
-#define FLAGS_CARD_ENAB 0x02
-#define FLAGS_CARD_IRQ_UNMSK 0x04
-
-#define HIF_MBOX_BLOCK_SIZE HIF_DEFAULT_IO_BLOCK_SIZE
-#define HIF_MBOX0_BLOCK_SIZE 1
-#define HIF_MBOX1_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
-#define HIF_MBOX2_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
-#define HIF_MBOX3_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
-
-typedef struct bus_request {
- struct bus_request *next; /* link list of available requests */
- struct bus_request *inusenext; /* link list of in use requests */
- struct semaphore sem_req;
- u32 address; /* request data */
- u8 *buffer;
- u32 length;
- u32 request;
- void *context;
- int status;
- struct hif_scatter_req_priv *pScatterReq; /* this request is a scatter request */
-} BUS_REQUEST;
-
-struct hif_device {
- struct sdio_func *func;
- spinlock_t asynclock;
- struct task_struct* async_task; /* task to handle async commands */
- struct semaphore sem_async; /* wake up for async task */
- int async_shutdown; /* stop the async task */
- struct completion async_completion; /* thread completion */
- BUS_REQUEST *asyncreq; /* request for async tasklet */
- BUS_REQUEST *taskreq; /* async tasklet data */
- spinlock_t lock;
- BUS_REQUEST *s_busRequestFreeQueue; /* free list */
- BUS_REQUEST busRequest[BUS_REQUEST_MAX_NUM]; /* available bus requests */
- void *claimedContext;
- HTC_CALLBACKS htcCallbacks;
- u8 *dma_buffer;
- struct dl_list ScatterReqHead; /* scatter request list head */
- bool scatter_enabled; /* scatter enabled flag */
- bool is_suspend;
- bool is_disabled;
- atomic_t irqHandling;
- HIF_DEVICE_POWER_CHANGE_TYPE powerConfig;
- const struct sdio_device_id *id;
-};
-
-#define HIF_DMA_BUFFER_SIZE (32 * 1024)
-#define CMD53_FIXED_ADDRESS 1
-#define CMD53_INCR_ADDRESS 2
-
-BUS_REQUEST *hifAllocateBusRequest(struct hif_device *device);
-void hifFreeBusRequest(struct hif_device *device, BUS_REQUEST *busrequest);
-void AddToAsyncList(struct hif_device *device, BUS_REQUEST *busrequest);
-
-#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
-
-#define MAX_SCATTER_REQUESTS 4
-#define MAX_SCATTER_ENTRIES_PER_REQ 16
-#define MAX_SCATTER_REQ_TRANSFER_SIZE 32*1024
-
-struct hif_scatter_req_priv {
- struct hif_scatter_req *pHifScatterReq; /* HIF scatter request with allocated entries */
- struct hif_device *device; /* this device */
- BUS_REQUEST *busrequest; /* request associated with request */
- /* scatter list for linux */
- struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ];
-};
-
-#define ATH_DEBUG_SCATTER ATH_DEBUG_MAKE_MODULE_MASK(0)
-
-int SetupHIFScatterSupport(struct hif_device *device, struct hif_device_scatter_support_info *pInfo);
-void CleanupHIFScatterResources(struct hif_device *device);
-int DoHifReadWriteScatter(struct hif_device *device, BUS_REQUEST *busrequest);
-
-#else // HIF_LINUX_MMC_SCATTER_SUPPORT
-
-static inline int SetupHIFScatterSupport(struct hif_device *device, struct hif_device_scatter_support_info *pInfo)
-{
- return A_ENOTSUP;
-}
-
-static inline int DoHifReadWriteScatter(struct hif_device *device, BUS_REQUEST *busrequest)
-{
- return A_ENOTSUP;
-}
-
-#define CleanupHIFScatterResources(d) { }
-
-#endif // HIF_LINUX_MMC_SCATTER_SUPPORT
-
-#endif // _HIF_INTERNAL_H_
-
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
deleted file mode 100644
index 5f5d67720fa..00000000000
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
+++ /dev/null
@@ -1,1273 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="hif.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// HIF layer reference implementation for Linux Native MMC stack
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#include <linux/mmc/card.h>
-#include <linux/mmc/mmc.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sd.h>
-#include <linux/kthread.h>
-
-/* by default setup a bounce buffer for the data packets, if the underlying host controller driver
- does not use DMA you may be able to skip this step and save the memory allocation and transfer time */
-#define HIF_USE_DMA_BOUNCE_BUFFER 1
-#include "hif_internal.h"
-#define ATH_MODULE_NAME hif
-#include "a_debug.h"
-#include "hw/mbox_host_reg.h"
-
-#if HIF_USE_DMA_BOUNCE_BUFFER
-/* macro to check if DMA buffer is WORD-aligned and DMA-able. Most host controllers assume the
- * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
- * virt_addr_valid check fails on stack memory.
- */
-#define BUFFER_NEEDS_BOUNCE(buffer) (((unsigned long)(buffer) & 0x3) || !virt_addr_valid((buffer)))
-#else
-#define BUFFER_NEEDS_BOUNCE(buffer) (false)
-#endif
-
-/* ATHENV */
-#if defined(CONFIG_PM)
-#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
-#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
-#endif /* CONFIG_PM */
-static void delHifDevice(struct hif_device * device);
-static int Func0_CMD52WriteByte(struct mmc_card *card, unsigned int address, unsigned char byte);
-static int Func0_CMD52ReadByte(struct mmc_card *card, unsigned int address, unsigned char *byte);
-
-static int hifEnableFunc(struct hif_device *device, struct sdio_func *func);
-static int hifDisableFunc(struct hif_device *device, struct sdio_func *func);
-OSDRV_CALLBACKS osdrvCallbacks;
-
-int reset_sdio_on_unload = 0;
-module_param(reset_sdio_on_unload, int, 0644);
-
-extern u32 nohifscattersupport;
-
-static struct hif_device *ath6kl_alloc_hifdev(struct sdio_func *func)
-{
- struct hif_device *hifdevice;
-
- hifdevice = kzalloc(sizeof(struct hif_device), GFP_KERNEL);
-
-#if HIF_USE_DMA_BOUNCE_BUFFER
- hifdevice->dma_buffer = kmalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
-#endif
- hifdevice->func = func;
- hifdevice->powerConfig = HIF_DEVICE_POWER_UP;
- sdio_set_drvdata(func, hifdevice);
-
- return hifdevice;
-}
-
-static struct hif_device *ath6kl_get_hifdev(struct sdio_func *func)
-{
- return (struct hif_device *) sdio_get_drvdata(func);
-}
-
-static const struct sdio_device_id ath6kl_hifdev_ids[] = {
- { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0)) },
- { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1)) },
- { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0)) },
- { SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1)) },
- { /* null */ },
-};
-
-MODULE_DEVICE_TABLE(sdio, ath6kl_hifdev_ids);
-
-static int ath6kl_hifdev_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int ret;
- struct hif_device *device;
- int count;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
- ("ath6kl: Function: 0x%X, Vendor ID: 0x%X, "
- "Device ID: 0x%X, block size: 0x%X/0x%X\n",
- func->num, func->vendor, func->device,
- func->max_blksize, func->cur_blksize));
-
- ath6kl_alloc_hifdev(func);
- device = ath6kl_get_hifdev(func);
-
- device->id = id;
- device->is_disabled = true;
-
- spin_lock_init(&device->lock);
- spin_lock_init(&device->asynclock);
-
- DL_LIST_INIT(&device->ScatterReqHead);
-
- /* Try to allow scatter unless globally overridden */
- if (!nohifscattersupport)
- device->scatter_enabled = true;
-
- A_MEMZERO(device->busRequest, sizeof(device->busRequest));
-
- for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) {
- sema_init(&device->busRequest[count].sem_req, 0);
- hifFreeBusRequest(device, &device->busRequest[count]);
- }
-
- sema_init(&device->sem_async, 0);
-
- ret = hifEnableFunc(device, func);
-
- return ret;
-}
-
-static void ath6kl_hifdev_remove(struct sdio_func *func)
-{
- int status = 0;
- struct hif_device *device;
-
- device = ath6kl_get_hifdev(func);
- if (device->claimedContext != NULL)
- status = osdrvCallbacks.
- deviceRemovedHandler(device->claimedContext, device);
-
- if (device->is_disabled)
- device->is_disabled = false;
- else
- status = hifDisableFunc(device, func);
-
- CleanupHIFScatterResources(device);
-
- delHifDevice(device);
-}
-
-#if defined(CONFIG_PM)
-static int ath6kl_hifdev_suspend(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- int status = 0;
- struct hif_device *device;
-
- device = ath6kl_get_hifdev(func);
-
- if (device && device->claimedContext &&
- osdrvCallbacks.deviceSuspendHandler) {
- /* set true first for PowerStateChangeNotify(..) */
- device->is_suspend = true;
- status = osdrvCallbacks.
- deviceSuspendHandler(device->claimedContext);
- if (status)
- device->is_suspend = false;
- }
-
- CleanupHIFScatterResources(device);
-
- switch (status) {
- case 0:
- return 0;
- case A_EBUSY:
- /* Hack for kernel in order to support deep sleep and wow */
- return -EBUSY;
- default:
- return -1;
- }
-}
-
-static int ath6kl_hifdev_resume(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- int status = 0;
- struct hif_device *device;
-
- device = ath6kl_get_hifdev(func);
- if (device && device->claimedContext &&
- osdrvCallbacks.deviceSuspendHandler) {
- status = osdrvCallbacks.
- deviceResumeHandler(device->claimedContext);
- if (status == 0)
- device->is_suspend = false;
- }
-
- return status;
-}
-
-static const struct dev_pm_ops ath6kl_hifdev_pmops = {
- .suspend = ath6kl_hifdev_suspend,
- .resume = ath6kl_hifdev_resume,
-};
-#endif /* CONFIG_PM */
-
-static struct sdio_driver ath6kl_hifdev_driver = {
- .name = "ath6kl_hifdev",
- .id_table = ath6kl_hifdev_ids,
- .probe = ath6kl_hifdev_probe,
- .remove = ath6kl_hifdev_remove,
-#if defined(CONFIG_PM)
- .drv = {
- .pm = &ath6kl_hifdev_pmops,
- },
-#endif
-};
-
-/* make sure we only unregister when registered. */
-static int registered = 0;
-
-extern u32 onebitmode;
-extern u32 busspeedlow;
-extern u32 debughif;
-
-static void ResetAllCards(void);
-
-#ifdef DEBUG
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif,
- "hif",
- "(Linux MMC) Host Interconnect Framework",
- ATH_DEBUG_MASK_DEFAULTS,
- 0,
- NULL);
-
-#endif
-
-
-/* ------ Functions ------ */
-int HIFInit(OSDRV_CALLBACKS *callbacks)
-{
- int r;
- AR_DEBUG_ASSERT(callbacks != NULL);
-
- A_REGISTER_MODULE_DEBUG_INFO(hif);
-
- /* store the callback handlers */
- osdrvCallbacks = *callbacks;
-
- /* Register with bus driver core */
- registered = 1;
-
- r = sdio_register_driver(&ath6kl_hifdev_driver);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static int
-__HIFReadWrite(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length,
- u32 request,
- void *context)
-{
- u8 opcode;
- int status = 0;
- int ret;
- u8 *tbuffer;
- bool bounced = false;
-
- AR_DEBUG_ASSERT(device != NULL);
- AR_DEBUG_ASSERT(device->func != NULL);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device: 0x%p, buffer:0x%p (addr:0x%X)\n",
- device, buffer, address));
-
- do {
- if (request & HIF_EXTENDED_IO) {
- //AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Command type: CMD53\n"));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: Invalid command type: 0x%08x\n", request));
- status = A_EINVAL;
- break;
- }
-
- if (request & HIF_BLOCK_BASIS) {
- /* round to whole block length size */
- length = (length / HIF_MBOX_BLOCK_SIZE) * HIF_MBOX_BLOCK_SIZE;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
- ("AR6000: Block mode (BlockLen: %d)\n",
- length));
- } else if (request & HIF_BYTE_BASIS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
- ("AR6000: Byte mode (BlockLen: %d)\n",
- length));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: Invalid data mode: 0x%08x\n", request));
- status = A_EINVAL;
- break;
- }
-
-#if 0
- /* useful for checking register accesses */
- if (length & 0x3) {
- A_PRINTF(KERN_ALERT"AR6000: HIF (%s) is not a multiple of 4 bytes, addr:0x%X, len:%d\n",
- request & HIF_WRITE ? "write":"read", address, length);
- }
-#endif
-
- if (request & HIF_WRITE) {
- if ((address >= HIF_MBOX_START_ADDR(0)) &&
- (address <= HIF_MBOX_END_ADDR(3)))
- {
-
- AR_DEBUG_ASSERT(length <= HIF_MBOX_WIDTH);
-
- /*
- * Mailbox write. Adjust the address so that the last byte
- * falls on the EOM address.
- */
- address += (HIF_MBOX_WIDTH - length);
- }
- }
-
- if (request & HIF_FIXED_ADDRESS) {
- opcode = CMD53_FIXED_ADDRESS;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Address mode: Fixed 0x%X\n", address));
- } else if (request & HIF_INCREMENTAL_ADDRESS) {
- opcode = CMD53_INCR_ADDRESS;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Address mode: Incremental 0x%X\n", address));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: Invalid address mode: 0x%08x\n", request));
- status = A_EINVAL;
- break;
- }
-
- if (request & HIF_WRITE) {
-#if HIF_USE_DMA_BOUNCE_BUFFER
- if (BUFFER_NEEDS_BOUNCE(buffer)) {
- AR_DEBUG_ASSERT(device->dma_buffer != NULL);
- tbuffer = device->dma_buffer;
- /* copy the write data to the dma buffer */
- AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
- memcpy(tbuffer, buffer, length);
- bounced = true;
- } else {
- tbuffer = buffer;
- }
-#else
- tbuffer = buffer;
-#endif
- if (opcode == CMD53_FIXED_ADDRESS) {
- ret = sdio_writesb(device->func, address, tbuffer, length);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: writesb ret=%d address: 0x%X, len: %d, 0x%X\n",
- ret, address, length, *(int *)tbuffer));
- } else {
- ret = sdio_memcpy_toio(device->func, address, tbuffer, length);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: writeio ret=%d address: 0x%X, len: %d, 0x%X\n",
- ret, address, length, *(int *)tbuffer));
- }
- } else if (request & HIF_READ) {
-#if HIF_USE_DMA_BOUNCE_BUFFER
- if (BUFFER_NEEDS_BOUNCE(buffer)) {
- AR_DEBUG_ASSERT(device->dma_buffer != NULL);
- AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
- tbuffer = device->dma_buffer;
- bounced = true;
- } else {
- tbuffer = buffer;
- }
-#else
- tbuffer = buffer;
-#endif
- if (opcode == CMD53_FIXED_ADDRESS) {
- ret = sdio_readsb(device->func, tbuffer, address, length);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: readsb ret=%d address: 0x%X, len: %d, 0x%X\n",
- ret, address, length, *(int *)tbuffer));
- } else {
- ret = sdio_memcpy_fromio(device->func, tbuffer, address, length);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: readio ret=%d address: 0x%X, len: %d, 0x%X\n",
- ret, address, length, *(int *)tbuffer));
- }
-#if HIF_USE_DMA_BOUNCE_BUFFER
- if (bounced) {
- /* copy the read data from the dma buffer */
- memcpy(buffer, tbuffer, length);
- }
-#endif
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: Invalid direction: 0x%08x\n", request));
- status = A_EINVAL;
- break;
- }
-
- if (ret) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: SDIO bus operation failed! MMC stack returned : %d \n", ret));
- status = A_ERROR;
- }
- } while (false);
-
- return status;
-}
-
-void AddToAsyncList(struct hif_device *device, BUS_REQUEST *busrequest)
-{
- unsigned long flags;
- BUS_REQUEST *async;
- BUS_REQUEST *active;
-
- spin_lock_irqsave(&device->asynclock, flags);
- active = device->asyncreq;
- if (active == NULL) {
- device->asyncreq = busrequest;
- device->asyncreq->inusenext = NULL;
- } else {
- for (async = device->asyncreq;
- async != NULL;
- async = async->inusenext) {
- active = async;
- }
- active->inusenext = busrequest;
- busrequest->inusenext = NULL;
- }
- spin_unlock_irqrestore(&device->asynclock, flags);
-}
-
-
-/* queue a read/write request */
-int
-HIFReadWrite(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length,
- u32 request,
- void *context)
-{
- int status = 0;
- BUS_REQUEST *busrequest;
-
-
- AR_DEBUG_ASSERT(device != NULL);
- AR_DEBUG_ASSERT(device->func != NULL);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device: %p addr:0x%X\n", device,address));
-
- do {
- if ((request & HIF_ASYNCHRONOUS) || (request & HIF_SYNCHRONOUS)){
- /* serialize all requests through the async thread */
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Execution mode: %s\n",
- (request & HIF_ASYNCHRONOUS)?"Async":"Synch"));
- busrequest = hifAllocateBusRequest(device);
- if (busrequest == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: no async bus requests available (%s, addr:0x%X, len:%d) \n",
- request & HIF_READ ? "READ":"WRITE", address, length));
- return A_ERROR;
- }
- busrequest->address = address;
- busrequest->buffer = buffer;
- busrequest->length = length;
- busrequest->request = request;
- busrequest->context = context;
-
- AddToAsyncList(device, busrequest);
-
- if (request & HIF_SYNCHRONOUS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: queued sync req: 0x%lX\n", (unsigned long)busrequest));
-
- /* wait for completion */
- up(&device->sem_async);
- if (down_interruptible(&busrequest->sem_req) != 0) {
- /* interrupted, exit */
- return A_ERROR;
- } else {
- int status = busrequest->status;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: sync return freeing 0x%lX: 0x%X\n",
- (unsigned long)busrequest, busrequest->status));
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: freeing req: 0x%X\n", (unsigned int)request));
- hifFreeBusRequest(device, busrequest);
- return status;
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: queued async req: 0x%lX\n", (unsigned long)busrequest));
- up(&device->sem_async);
- return A_PENDING;
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: Invalid execution mode: 0x%08x\n", (unsigned int)request));
- status = A_EINVAL;
- break;
- }
- } while(0);
-
- return status;
-}
-/* thread to serialize all requests, both sync and async */
-static int async_task(void *param)
- {
- struct hif_device *device;
- BUS_REQUEST *request;
- int status;
- unsigned long flags;
-
- device = (struct hif_device *)param;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task\n"));
- set_current_state(TASK_INTERRUPTIBLE);
- while(!device->async_shutdown) {
- /* wait for work */
- if (down_interruptible(&device->sem_async) != 0) {
- /* interrupted, exit */
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task interrupted\n"));
- break;
- }
- if (device->async_shutdown) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async task stopping\n"));
- break;
- }
- /* we want to hold the host over multiple cmds if possible, but holding the host blocks card interrupts */
- sdio_claim_host(device->func);
- spin_lock_irqsave(&device->asynclock, flags);
- /* pull the request to work on */
- while (device->asyncreq != NULL) {
- request = device->asyncreq;
- if (request->inusenext != NULL) {
- device->asyncreq = request->inusenext;
- } else {
- device->asyncreq = NULL;
- }
- spin_unlock_irqrestore(&device->asynclock, flags);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task processing req: 0x%lX\n", (unsigned long)request));
-
- if (request->pScatterReq != NULL) {
- A_ASSERT(device->scatter_enabled);
- /* this is a queued scatter request, pass the request to scatter routine which
- * executes it synchronously, note, no need to free the request since scatter requests
- * are maintained on a separate list */
- status = DoHifReadWriteScatter(device,request);
- } else {
- /* call HIFReadWrite in sync mode to do the work */
- status = __HIFReadWrite(device, request->address, request->buffer,
- request->length, request->request & ~HIF_SYNCHRONOUS, NULL);
- if (request->request & HIF_ASYNCHRONOUS) {
- void *context = request->context;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task freeing req: 0x%lX\n", (unsigned long)request));
- hifFreeBusRequest(device, request);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task completion routine req: 0x%lX\n", (unsigned long)request));
- device->htcCallbacks.rwCompletionHandler(context, status);
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: async_task upping req: 0x%lX\n", (unsigned long)request));
- request->status = status;
- up(&request->sem_req);
- }
- }
- spin_lock_irqsave(&device->asynclock, flags);
- }
- spin_unlock_irqrestore(&device->asynclock, flags);
- sdio_release_host(device->func);
- }
-
- complete_and_exit(&device->async_completion, 0);
- return 0;
-}
-
-static s32 IssueSDCommand(struct hif_device *device, u32 opcode, u32 arg, u32 flags, u32 *resp)
-{
- struct mmc_command cmd;
- s32 err;
- struct mmc_host *host;
- struct sdio_func *func;
-
- func = device->func;
- host = func->card->host;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = opcode;
- cmd.arg = arg;
- cmd.flags = flags;
- err = mmc_wait_for_cmd(host, &cmd, 3);
-
- if ((!err) && (resp)) {
- *resp = cmd.resp[0];
- }
-
- return err;
-}
-
-int ReinitSDIO(struct hif_device *device)
-{
- s32 err;
- struct mmc_host *host;
- struct mmc_card *card;
- struct sdio_func *func;
- u8 cmd52_resp;
- u32 clock;
-
- func = device->func;
- card = func->card;
- host = card->host;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +ReinitSDIO \n"));
- sdio_claim_host(func);
-
- do {
- if (!device->is_suspend) {
- u32 resp;
- u16 rca;
- u32 i;
- int bit = fls(host->ocr_avail) - 1;
- /* emulate the mmc_power_up(...) */
- host->ios.vdd = bit;
- host->ios.chip_select = MMC_CS_DONTCARE;
- host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
- host->ios.power_mode = MMC_POWER_UP;
- host->ios.bus_width = MMC_BUS_WIDTH_1;
- host->ios.timing = MMC_TIMING_LEGACY;
- host->ops->set_ios(host, &host->ios);
- /*
- * This delay should be sufficient to allow the power supply
- * to reach the minimum voltage.
- */
- msleep(2);
-
- host->ios.clock = host->f_min;
- host->ios.power_mode = MMC_POWER_ON;
- host->ops->set_ios(host, &host->ios);
-
- /*
- * This delay must be at least 74 clock sizes, or 1 ms, or the
- * time required to reach a stable voltage.
- */
- msleep(2);
-
- /* Issue CMD0. Goto idle state */
- host->ios.chip_select = MMC_CS_HIGH;
- host->ops->set_ios(host, &host->ios);
- msleep(1);
- err = IssueSDCommand(device, MMC_GO_IDLE_STATE, 0, (MMC_RSP_NONE | MMC_CMD_BC), NULL);
- host->ios.chip_select = MMC_CS_DONTCARE;
- host->ops->set_ios(host, &host->ios);
- msleep(1);
- host->use_spi_crc = 0;
-
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD0 failed : %d \n",err));
- break;
- }
-
- if (!host->ocr) {
- /* Issue CMD5, arg = 0 */
- err = IssueSDCommand(device, SD_IO_SEND_OP_COND, 0, (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD5 failed : %d \n",err));
- break;
- }
- host->ocr = resp;
- }
-
- /* Issue CMD5, arg = ocr. Wait till card is ready */
- for (i=0;i<100;i++) {
- err = IssueSDCommand(device, SD_IO_SEND_OP_COND, host->ocr, (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD5 failed : %d \n",err));
- break;
- }
- if (resp & MMC_CARD_BUSY) {
- break;
- }
- msleep(10);
- }
-
- if ((i == 100) || (err)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: card in not ready : %d %d \n",i,err));
- break;
- }
-
- /* Issue CMD3, get RCA */
- err = IssueSDCommand(device, SD_SEND_RELATIVE_ADDR, 0, MMC_RSP_R6 | MMC_CMD_BCR, &resp);
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD3 failed : %d \n",err));
- break;
- }
- rca = resp >> 16;
- host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
- host->ops->set_ios(host, &host->ios);
-
- /* Issue CMD7, select card */
- err = IssueSDCommand(device, MMC_SELECT_CARD, (rca << 16), MMC_RSP_R1 | MMC_CMD_AC, NULL);
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD7 failed : %d \n",err));
- break;
- }
- }
-
- /* Enable high speed */
- if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("ReinitSDIO: Set high speed mode\n"));
- err = Func0_CMD52ReadByte(card, SDIO_CCCR_SPEED, &cmd52_resp);
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD52 read to CCCR speed register failed : %d \n",err));
- card->state &= ~MMC_STATE_HIGHSPEED;
- /* no need to break */
- } else {
- err = Func0_CMD52WriteByte(card, SDIO_CCCR_SPEED, (cmd52_resp | SDIO_SPEED_EHS));
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD52 write to CCCR speed register failed : %d \n",err));
- break;
- }
- mmc_card_set_highspeed(card);
- host->ios.timing = MMC_TIMING_SD_HS;
- host->ops->set_ios(host, &host->ios);
- }
- }
-
- /* Set clock */
- if (mmc_card_highspeed(card)) {
- clock = 50000000;
- } else {
- clock = card->cis.max_dtr;
- }
-
- if (clock > host->f_max) {
- clock = host->f_max;
- }
- host->ios.clock = clock;
- host->ops->set_ios(host, &host->ios);
-
-
- if (card->host->caps & MMC_CAP_4_BIT_DATA) {
- /* CMD52: Set bus width & disable card detect resistor */
- err = Func0_CMD52WriteByte(card, SDIO_CCCR_IF, SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_4BIT);
- if (err) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("ReinitSDIO: CMD52 to set bus mode failed : %d \n",err));
- break;
- }
- host->ios.bus_width = MMC_BUS_WIDTH_4;
- host->ops->set_ios(host, &host->ios);
- }
- } while (0);
-
- sdio_release_host(func);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -ReinitSDIO \n"));
-
- return (err) ? A_ERROR : 0;
-}
-
-int
-PowerStateChangeNotify(struct hif_device *device, HIF_DEVICE_POWER_CHANGE_TYPE config)
-{
- int status = 0;
-#if defined(CONFIG_PM)
- struct sdio_func *func = device->func;
- int old_reset_val;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +PowerStateChangeNotify %d\n", config));
- switch (config) {
- case HIF_DEVICE_POWER_DOWN:
- case HIF_DEVICE_POWER_CUT:
- old_reset_val = reset_sdio_on_unload;
- reset_sdio_on_unload = 1;
- status = hifDisableFunc(device, func);
- reset_sdio_on_unload = old_reset_val;
- if (!device->is_suspend) {
- struct mmc_host *host = func->card->host;
- host->ios.clock = 0;
- host->ios.vdd = 0;
- host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
- host->ios.chip_select = MMC_CS_DONTCARE;
- host->ios.power_mode = MMC_POWER_OFF;
- host->ios.bus_width = MMC_BUS_WIDTH_1;
- host->ios.timing = MMC_TIMING_LEGACY;
- host->ops->set_ios(host, &host->ios);
- }
- break;
- case HIF_DEVICE_POWER_UP:
- if (device->powerConfig == HIF_DEVICE_POWER_CUT) {
- status = ReinitSDIO(device);
- }
- if (status == 0) {
- status = hifEnableFunc(device, func);
- }
- break;
- }
- device->powerConfig = config;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -PowerStateChangeNotify\n"));
-#endif
- return status;
-}
-
-int
-HIFConfigureDevice(struct hif_device *device, HIF_DEVICE_CONFIG_OPCODE opcode,
- void *config, u32 configLen)
-{
- u32 count;
- int status = 0;
-
- switch(opcode) {
- case HIF_DEVICE_GET_MBOX_BLOCK_SIZE:
- ((u32 *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
- ((u32 *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
- ((u32 *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
- ((u32 *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
- break;
-
- case HIF_DEVICE_GET_MBOX_ADDR:
- for (count = 0; count < 4; count ++) {
- ((u32 *)config)[count] = HIF_MBOX_START_ADDR(count);
- }
-
- if (configLen >= sizeof(struct hif_device_mbox_info)) {
- SetExtendedMboxWindowInfo((u16)device->func->device,
- (struct hif_device_mbox_info *)config);
- }
-
- break;
- case HIF_DEVICE_GET_IRQ_PROC_MODE:
- *((HIF_DEVICE_IRQ_PROCESSING_MODE *)config) = HIF_DEVICE_IRQ_SYNC_ONLY;
- break;
- case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT:
- if (!device->scatter_enabled) {
- return A_ENOTSUP;
- }
- status = SetupHIFScatterSupport(device, (struct hif_device_scatter_support_info *)config);
- if (status) {
- device->scatter_enabled = false;
- }
- break;
- case HIF_DEVICE_GET_OS_DEVICE:
- /* pass back a pointer to the SDIO function's "dev" struct */
- ((struct hif_device_os_device_info *)config)->pOSDevice = &device->func->dev;
- break;
- case HIF_DEVICE_POWER_STATE_CHANGE:
- status = PowerStateChangeNotify(device, *(HIF_DEVICE_POWER_CHANGE_TYPE *)config);
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
- ("AR6000: Unsupported configuration opcode: %d\n", opcode));
- status = A_ERROR;
- }
-
- return status;
-}
-
-void
-HIFShutDownDevice(struct hif_device *device)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +HIFShutDownDevice\n"));
- if (device != NULL) {
- AR_DEBUG_ASSERT(device->func != NULL);
- } else {
- /* since we are unloading the driver anyways, reset all cards in case the SDIO card
- * is externally powered and we are unloading the SDIO stack. This avoids the problem when
- * the SDIO stack is reloaded and attempts are made to re-enumerate a card that is already
- * enumerated */
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFShutDownDevice, resetting\n"));
- ResetAllCards();
-
- /* Unregister with bus driver core */
- if (registered) {
- registered = 0;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
- ("AR6000: Unregistering with the bus driver\n"));
- sdio_unregister_driver(&ath6kl_hifdev_driver);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
- ("AR6000: Unregistered\n"));
- }
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -HIFShutDownDevice\n"));
-}
-
-static void
-hifIRQHandler(struct sdio_func *func)
-{
- int status;
- struct hif_device *device;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifIRQHandler\n"));
-
- device = ath6kl_get_hifdev(func);
- atomic_set(&device->irqHandling, 1);
- /* release the host during ints so we can pick it back up when we process cmds */
- sdio_release_host(device->func);
- status = device->htcCallbacks.dsrHandler(device->htcCallbacks.context);
- sdio_claim_host(device->func);
- atomic_set(&device->irqHandling, 0);
- AR_DEBUG_ASSERT(status == 0 || status == A_ECANCELED);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifIRQHandler\n"));
-}
-
-/* handle HTC startup via thread*/
-static int startup_task(void *param)
-{
- struct hif_device *device;
-
- device = (struct hif_device *)param;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: call HTC from startup_task\n"));
- /* start up inform DRV layer */
- if ((osdrvCallbacks.deviceInsertedHandler(osdrvCallbacks.context,device)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device rejected\n"));
- }
- return 0;
-}
-
-#if defined(CONFIG_PM)
-static int enable_task(void *param)
-{
- struct hif_device *device;
- device = (struct hif_device *)param;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: call from resume_task\n"));
-
- /* start up inform DRV layer */
- if (device &&
- device->claimedContext &&
- osdrvCallbacks.devicePowerChangeHandler &&
- osdrvCallbacks.devicePowerChangeHandler(device->claimedContext, HIF_DEVICE_POWER_UP) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: Device rejected\n"));
- }
-
- return 0;
-}
-#endif
-
-void
-HIFAckInterrupt(struct hif_device *device)
-{
- AR_DEBUG_ASSERT(device != NULL);
-
- /* Acknowledge our function IRQ */
-}
-
-void
-HIFUnMaskInterrupt(struct hif_device *device)
-{
- int ret;
-
- AR_DEBUG_ASSERT(device != NULL);
- AR_DEBUG_ASSERT(device->func != NULL);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFUnMaskInterrupt\n"));
-
- /* Register the IRQ Handler */
- sdio_claim_host(device->func);
- ret = sdio_claim_irq(device->func, hifIRQHandler);
- sdio_release_host(device->func);
- AR_DEBUG_ASSERT(ret == 0);
-}
-
-void HIFMaskInterrupt(struct hif_device *device)
-{
- int ret;
- AR_DEBUG_ASSERT(device != NULL);
- AR_DEBUG_ASSERT(device->func != NULL);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: HIFMaskInterrupt\n"));
-
- /* Mask our function IRQ */
- sdio_claim_host(device->func);
- while (atomic_read(&device->irqHandling)) {
- sdio_release_host(device->func);
- schedule_timeout(HZ/10);
- sdio_claim_host(device->func);
- }
- ret = sdio_release_irq(device->func);
- sdio_release_host(device->func);
- AR_DEBUG_ASSERT(ret == 0);
-}
-
-BUS_REQUEST *hifAllocateBusRequest(struct hif_device *device)
-{
- BUS_REQUEST *busrequest;
- unsigned long flag;
-
- /* Acquire lock */
- spin_lock_irqsave(&device->lock, flag);
-
- /* Remove first in list */
- if((busrequest = device->s_busRequestFreeQueue) != NULL)
- {
- device->s_busRequestFreeQueue = busrequest->next;
- }
- /* Release lock */
- spin_unlock_irqrestore(&device->lock, flag);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: hifAllocateBusRequest: 0x%p\n", busrequest));
- return busrequest;
-}
-
-void
-hifFreeBusRequest(struct hif_device *device, BUS_REQUEST *busrequest)
-{
- unsigned long flag;
-
- AR_DEBUG_ASSERT(busrequest != NULL);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: hifFreeBusRequest: 0x%p\n", busrequest));
- /* Acquire lock */
- spin_lock_irqsave(&device->lock, flag);
-
-
- /* Insert first in list */
- busrequest->next = device->s_busRequestFreeQueue;
- busrequest->inusenext = NULL;
- device->s_busRequestFreeQueue = busrequest;
-
- /* Release lock */
- spin_unlock_irqrestore(&device->lock, flag);
-}
-
-static int hifDisableFunc(struct hif_device *device, struct sdio_func *func)
-{
- int ret;
- int status = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifDisableFunc\n"));
- device = ath6kl_get_hifdev(func);
- if (!IS_ERR(device->async_task)) {
- init_completion(&device->async_completion);
- device->async_shutdown = 1;
- up(&device->sem_async);
- wait_for_completion(&device->async_completion);
- device->async_task = NULL;
- }
- /* Disable the card */
- sdio_claim_host(device->func);
- ret = sdio_disable_func(device->func);
- if (ret) {
- status = A_ERROR;
- }
-
- if (reset_sdio_on_unload) {
- /* reset the SDIO interface. This is useful in automated testing where the card
- * does not need to be removed at the end of the test. It is expected that the user will
- * also unload/reload the host controller driver to force the bus driver to re-enumerate the slot */
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6000: reseting SDIO card back to uninitialized state \n"));
-
- /* NOTE : sdio_f0_writeb() cannot be used here, that API only allows access
- * to undefined registers in the range of: 0xF0-0xFF */
-
- ret = Func0_CMD52WriteByte(device->func->card, SDIO_CCCR_ABORT, (1 << 3));
- if (ret) {
- status = A_ERROR;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6000: reset failed : %d \n",ret));
- }
- }
-
- sdio_release_host(device->func);
-
- if (status == 0) {
- device->is_disabled = true;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifDisableFunc\n"));
-
- return status;
-}
-
-static int hifEnableFunc(struct hif_device *device, struct sdio_func *func)
-{
- struct task_struct* pTask;
- const char *taskName = NULL;
- int (*taskFunc)(void *) = NULL;
- int ret = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: +hifEnableFunc\n"));
- device = ath6kl_get_hifdev(func);
-
- if (device->is_disabled) {
- /* enable the SDIO function */
- sdio_claim_host(func);
-
- if ((device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK) >= MANUFACTURER_ID_AR6003_BASE) {
- /* enable 4-bit ASYNC interrupt on AR6003 or later devices */
- ret = Func0_CMD52WriteByte(func->card, CCCR_SDIO_IRQ_MODE_REG, SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
- if (ret) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6000: failed to enable 4-bit ASYNC IRQ mode %d \n",ret));
- sdio_release_host(func);
- return ret;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: 4-bit ASYNC IRQ mode enabled\n"));
- }
- /* give us some time to enable, in ms */
- func->enable_timeout = 100;
- ret = sdio_enable_func(func);
- if (ret) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to enable AR6K: 0x%X\n",
- __FUNCTION__, ret));
- sdio_release_host(func);
- return ret;
- }
- ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
- sdio_release_host(func);
- if (ret) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), Unable to set block size 0x%x AR6K: 0x%X\n",
- __FUNCTION__, HIF_MBOX_BLOCK_SIZE, ret));
- return ret;
- }
- device->is_disabled = false;
- /* create async I/O thread */
- if (!device->async_task) {
- device->async_shutdown = 0;
- device->async_task = kthread_create(async_task,
- (void *)device,
- "AR6K Async");
- if (IS_ERR(device->async_task)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create async task\n", __FUNCTION__));
- return -ENOMEM;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: start async task\n"));
- wake_up_process(device->async_task );
- }
- }
-
- if (!device->claimedContext) {
- taskFunc = startup_task;
- taskName = "AR6K startup";
- ret = 0;
-#if defined(CONFIG_PM)
- } else {
- taskFunc = enable_task;
- taskName = "AR6K enable";
- ret = -ENOMEM;
-#endif /* CONFIG_PM */
- }
- /* create resume thread */
- pTask = kthread_create(taskFunc, (void *)device, taskName);
- if (IS_ERR(pTask)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("AR6000: %s(), to create enabel task\n", __FUNCTION__));
- return -ENOMEM;
- }
- wake_up_process(pTask);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: -hifEnableFunc\n"));
-
- /* task will call the enable func, indicate pending */
- return ret;
-}
-
-/*
- * This should be moved to AR6K HTC layer.
- */
-int hifWaitForPendingRecv(struct hif_device *device)
-{
- s32 cnt = 10;
- u8 host_int_status;
- int status = 0;
-
- do {
- while (atomic_read(&device->irqHandling)) {
- /* wait until irq handler finished all the jobs */
- schedule_timeout(HZ/10);
- }
- /* check if there is any pending irq due to force done */
- host_int_status = 0;
- status = HIFReadWrite(device, HOST_INT_STATUS_ADDRESS,
- (u8 *)&host_int_status, sizeof(host_int_status),
- HIF_RD_SYNC_BYTE_INC, NULL);
- host_int_status = !status ? (host_int_status & (1 << 0)) : 0;
- if (host_int_status) {
- schedule(); /* schedule for next dsrHandler */
- }
- } while (host_int_status && --cnt > 0);
-
- if (host_int_status && cnt == 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("AR6000: %s(), Unable clear up pending IRQ before the system suspended\n", __FUNCTION__));
- }
-
- return 0;
-}
-
-static void
-delHifDevice(struct hif_device * device)
-{
- AR_DEBUG_ASSERT(device!= NULL);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: delHifDevice; 0x%p\n", device));
- kfree(device->dma_buffer);
- kfree(device);
-}
-
-static void ResetAllCards(void)
-{
-}
-
-void HIFClaimDevice(struct hif_device *device, void *context)
-{
- device->claimedContext = context;
-}
-
-void HIFReleaseDevice(struct hif_device *device)
-{
- device->claimedContext = NULL;
-}
-
-int HIFAttachHTC(struct hif_device *device, HTC_CALLBACKS *callbacks)
-{
- if (device->htcCallbacks.context != NULL) {
- /* already in use! */
- return A_ERROR;
- }
- device->htcCallbacks = *callbacks;
- return 0;
-}
-
-void HIFDetachHTC(struct hif_device *device)
-{
- A_MEMZERO(&device->htcCallbacks,sizeof(device->htcCallbacks));
-}
-
-#define SDIO_SET_CMD52_ARG(arg,rw,func,raw,address,writedata) \
- (arg) = (((rw) & 1) << 31) | \
- (((func) & 0x7) << 28) | \
- (((raw) & 1) << 27) | \
- (1 << 26) | \
- (((address) & 0x1FFFF) << 9) | \
- (1 << 8) | \
- ((writedata) & 0xFF)
-
-#define SDIO_SET_CMD52_READ_ARG(arg,func,address) \
- SDIO_SET_CMD52_ARG(arg,0,(func),0,address,0x00)
-#define SDIO_SET_CMD52_WRITE_ARG(arg,func,address,value) \
- SDIO_SET_CMD52_ARG(arg,1,(func),0,address,value)
-
-static int Func0_CMD52WriteByte(struct mmc_card *card, unsigned int address, unsigned char byte)
-{
- struct mmc_command ioCmd;
- unsigned long arg;
-
- memset(&ioCmd,0,sizeof(ioCmd));
- SDIO_SET_CMD52_WRITE_ARG(arg,0,address,byte);
- ioCmd.opcode = SD_IO_RW_DIRECT;
- ioCmd.arg = arg;
- ioCmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
-
- return mmc_wait_for_cmd(card->host, &ioCmd, 0);
-}
-
-static int Func0_CMD52ReadByte(struct mmc_card *card, unsigned int address, unsigned char *byte)
-{
- struct mmc_command ioCmd;
- unsigned long arg;
- s32 err;
-
- memset(&ioCmd,0,sizeof(ioCmd));
- SDIO_SET_CMD52_READ_ARG(arg,0,address);
- ioCmd.opcode = SD_IO_RW_DIRECT;
- ioCmd.arg = arg;
- ioCmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &ioCmd, 0);
-
- if ((!err) && (byte)) {
- *byte = ioCmd.resp[0] & 0xFF;
- }
-
- return err;
-}
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
deleted file mode 100644
index 7516d913dab..00000000000
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
+++ /dev/null
@@ -1,393 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// HIF scatter implementation
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/kthread.h>
-#include "hif_internal.h"
-#define ATH_MODULE_NAME hif
-#include "a_debug.h"
-
-#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
-
-#define _CMD53_ARG_READ 0
-#define _CMD53_ARG_WRITE 1
-#define _CMD53_ARG_BLOCK_BASIS 1
-#define _CMD53_ARG_FIXED_ADDRESS 0
-#define _CMD53_ARG_INCR_ADDRESS 1
-
-#define SDIO_SET_CMD53_ARG(arg,rw,func,mode,opcode,address,bytes_blocks) \
- (arg) = (((rw) & 1) << 31) | \
- (((func) & 0x7) << 28) | \
- (((mode) & 1) << 27) | \
- (((opcode) & 1) << 26) | \
- (((address) & 0x1FFFF) << 9) | \
- ((bytes_blocks) & 0x1FF)
-
-static void FreeScatterReq(struct hif_device *device, struct hif_scatter_req *pReq)
-{
- unsigned long flag;
-
- spin_lock_irqsave(&device->lock, flag);
-
- DL_ListInsertTail(&device->ScatterReqHead, &pReq->ListLink);
-
- spin_unlock_irqrestore(&device->lock, flag);
-
-}
-
-static struct hif_scatter_req *AllocScatterReq(struct hif_device *device)
-{
- struct dl_list *pItem;
- unsigned long flag;
-
- spin_lock_irqsave(&device->lock, flag);
-
- pItem = DL_ListRemoveItemFromHead(&device->ScatterReqHead);
-
- spin_unlock_irqrestore(&device->lock, flag);
-
- if (pItem != NULL) {
- return A_CONTAINING_STRUCT(pItem, struct hif_scatter_req, ListLink);
- }
-
- return NULL;
-}
-
- /* called by async task to perform the operation synchronously using direct MMC APIs */
-int DoHifReadWriteScatter(struct hif_device *device, BUS_REQUEST *busrequest)
-{
- int i;
- u8 rw;
- u8 opcode;
- struct mmc_request mmcreq;
- struct mmc_command cmd;
- struct mmc_data data;
- struct hif_scatter_req_priv *pReqPriv;
- struct hif_scatter_req *pReq;
- int status = 0;
- struct scatterlist *pSg;
-
- pReqPriv = busrequest->pScatterReq;
-
- A_ASSERT(pReqPriv != NULL);
-
- pReq = pReqPriv->pHifScatterReq;
-
- memset(&mmcreq, 0, sizeof(struct mmc_request));
- memset(&cmd, 0, sizeof(struct mmc_command));
- memset(&data, 0, sizeof(struct mmc_data));
-
- data.blksz = HIF_MBOX_BLOCK_SIZE;
- data.blocks = pReq->TotalLength / HIF_MBOX_BLOCK_SIZE;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d) , (tot:%d,sg:%d)\n",
- (pReq->Request & HIF_WRITE) ? "WRITE":"READ", pReq->Address, data.blksz, data.blocks,
- pReq->TotalLength,pReq->ValidScatterEntries));
-
- if (pReq->Request & HIF_WRITE) {
- rw = _CMD53_ARG_WRITE;
- data.flags = MMC_DATA_WRITE;
- } else {
- rw = _CMD53_ARG_READ;
- data.flags = MMC_DATA_READ;
- }
-
- if (pReq->Request & HIF_FIXED_ADDRESS) {
- opcode = _CMD53_ARG_FIXED_ADDRESS;
- } else {
- opcode = _CMD53_ARG_INCR_ADDRESS;
- }
-
- /* fill SG entries */
- pSg = pReqPriv->sgentries;
- sg_init_table(pSg, pReq->ValidScatterEntries);
-
- /* assemble SG list */
- for (i = 0 ; i < pReq->ValidScatterEntries ; i++, pSg++) {
- /* setup each sg entry */
- if ((unsigned long)pReq->ScatterList[i].pBuffer & 0x3) {
- /* note some scatter engines can handle unaligned buffers, print this
- * as informational only */
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
- ("HIF: (%s) Scatter Buffer is unaligned 0x%lx\n",
- pReq->Request & HIF_WRITE ? "WRITE":"READ",
- (unsigned long)pReq->ScatterList[i].pBuffer));
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, (" %d: Addr:0x%lX, Len:%d \n",
- i,(unsigned long)pReq->ScatterList[i].pBuffer,pReq->ScatterList[i].Length));
-
- sg_set_buf(pSg, pReq->ScatterList[i].pBuffer, pReq->ScatterList[i].Length);
- }
- /* set scatter-gather table for request */
- data.sg = pReqPriv->sgentries;
- data.sg_len = pReq->ValidScatterEntries;
- /* set command argument */
- SDIO_SET_CMD53_ARG(cmd.arg,
- rw,
- device->func->num,
- _CMD53_ARG_BLOCK_BASIS,
- opcode,
- pReq->Address,
- data.blocks);
-
- cmd.opcode = SD_IO_RW_EXTENDED;
- cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
-
- mmcreq.cmd = &cmd;
- mmcreq.data = &data;
-
- mmc_set_data_timeout(&data, device->func->card);
- /* synchronous call to process request */
- mmc_wait_for_req(device->func->card->host, &mmcreq);
-
- if (cmd.error) {
- status = A_ERROR;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: cmd error: %d \n",cmd.error));
- }
-
- if (data.error) {
- status = A_ERROR;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: data error: %d \n",data.error));
- }
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n",
- (pReq->Request & HIF_WRITE) ? "WRITE":"READ",pReq->Address, data.blksz, data.blocks));
- }
-
- /* set completion status, fail or success */
- pReq->CompletionStatus = status;
-
- if (pReq->Request & HIF_ASYNCHRONOUS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",(unsigned long)busrequest, status));
- /* complete the request */
- A_ASSERT(pReq->CompletionRoutine != NULL);
- pReq->CompletionRoutine(pReq);
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER async_task upping busrequest : 0x%lX (%d)\n", (unsigned long)busrequest,status));
- /* signal wait */
- up(&busrequest->sem_req);
- }
-
- return status;
-}
-
- /* callback to issue a read-write scatter request */
-static int HifReadWriteScatter(struct hif_device *device, struct hif_scatter_req *pReq)
-{
- int status = A_EINVAL;
- u32 request = pReq->Request;
- struct hif_scatter_req_priv *pReqPriv = (struct hif_scatter_req_priv *)pReq->HIFPrivate[0];
-
- do {
-
- A_ASSERT(pReqPriv != NULL);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: total len: %d Scatter Entries: %d\n",
- pReq->TotalLength, pReq->ValidScatterEntries));
-
- if (!(request & HIF_EXTENDED_IO)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("HIF-SCATTER: Invalid command type: 0x%08x\n", request));
- break;
- }
-
- if (!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS))) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("HIF-SCATTER: Invalid execution mode: 0x%08x\n", request));
- break;
- }
-
- if (!(request & HIF_BLOCK_BASIS)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("HIF-SCATTER: Invalid data mode: 0x%08x\n", request));
- break;
- }
-
- if (pReq->TotalLength > MAX_SCATTER_REQ_TRANSFER_SIZE) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
- ("HIF-SCATTER: Invalid length: %d \n", pReq->TotalLength));
- break;
- }
-
- if (pReq->TotalLength == 0) {
- A_ASSERT(false);
- break;
- }
-
- /* add bus request to the async list for the async I/O thread to process */
- AddToAsyncList(device, pReqPriv->busrequest);
-
- if (request & HIF_SYNCHRONOUS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: queued sync req: 0x%lX\n", (unsigned long)pReqPriv->busrequest));
- /* signal thread and wait */
- up(&device->sem_async);
- if (down_interruptible(&pReqPriv->busrequest->sem_req) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,("HIF-SCATTER: interrupted! \n"));
- /* interrupted, exit */
- status = A_ERROR;
- break;
- } else {
- status = pReq->CompletionStatus;
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: queued async req: 0x%lX\n", (unsigned long)pReqPriv->busrequest));
- /* wake thread, it will process and then take care of the async callback */
- up(&device->sem_async);
- status = 0;
- }
-
- } while (false);
-
- if (status && (request & HIF_ASYNCHRONOUS)) {
- pReq->CompletionStatus = status;
- pReq->CompletionRoutine(pReq);
- status = 0;
- }
-
- return status;
-}
-
- /* setup of HIF scatter resources */
-int SetupHIFScatterSupport(struct hif_device *device, struct hif_device_scatter_support_info *pInfo)
-{
- int status = A_ERROR;
- int i;
- struct hif_scatter_req_priv *pReqPriv;
- BUS_REQUEST *busrequest;
-
- do {
-
- /* check if host supports scatter requests and it meets our requirements */
- if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
- device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
- status = A_ENOTSUP;
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HIF-SCATTER Enabled: max scatter req : %d entries: %d \n",
- MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ));
-
- for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
- /* allocate the private request blob */
- pReqPriv = (struct hif_scatter_req_priv *)A_MALLOC(sizeof(struct hif_scatter_req_priv));
- if (NULL == pReqPriv) {
- break;
- }
- A_MEMZERO(pReqPriv, sizeof(struct hif_scatter_req_priv));
- /* save the device instance*/
- pReqPriv->device = device;
- /* allocate the scatter request */
- pReqPriv->pHifScatterReq = (struct hif_scatter_req *)A_MALLOC(sizeof(struct hif_scatter_req) +
- (MAX_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(struct hif_scatter_item)));
-
- if (NULL == pReqPriv->pHifScatterReq) {
- kfree(pReqPriv);
- break;
- }
- /* just zero the main part of the scatter request */
- A_MEMZERO(pReqPriv->pHifScatterReq, sizeof(struct hif_scatter_req));
- /* back pointer to the private struct */
- pReqPriv->pHifScatterReq->HIFPrivate[0] = pReqPriv;
- /* allocate a bus request for this scatter request */
- busrequest = hifAllocateBusRequest(device);
- if (NULL == busrequest) {
- kfree(pReqPriv->pHifScatterReq);
- kfree(pReqPriv);
- break;
- }
- /* assign the scatter request to this bus request */
- busrequest->pScatterReq = pReqPriv;
- /* point back to the request */
- pReqPriv->busrequest = busrequest;
- /* add it to the scatter pool */
- FreeScatterReq(device,pReqPriv->pHifScatterReq);
- }
-
- if (i != MAX_SCATTER_REQUESTS) {
- status = A_NO_MEMORY;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : failed to alloc scatter resources !\n"));
- break;
- }
-
- /* set scatter function pointers */
- pInfo->pAllocateReqFunc = AllocScatterReq;
- pInfo->pFreeReqFunc = FreeScatterReq;
- pInfo->pReadWriteScatterFunc = HifReadWriteScatter;
- pInfo->MaxScatterEntries = MAX_SCATTER_ENTRIES_PER_REQ;
- pInfo->MaxTransferSizePerScatterReq = MAX_SCATTER_REQ_TRANSFER_SIZE;
-
- status = 0;
-
- } while (false);
-
- if (status) {
- CleanupHIFScatterResources(device);
- }
-
- return status;
-}
-
- /* clean up scatter support */
-void CleanupHIFScatterResources(struct hif_device *device)
-{
- struct hif_scatter_req_priv *pReqPriv;
- struct hif_scatter_req *pReq;
-
- /* empty the free list */
-
- while (1) {
-
- pReq = AllocScatterReq(device);
-
- if (NULL == pReq) {
- break;
- }
-
- pReqPriv = (struct hif_scatter_req_priv *)pReq->HIFPrivate[0];
- A_ASSERT(pReqPriv != NULL);
-
- if (pReqPriv->busrequest != NULL) {
- pReqPriv->busrequest->pScatterReq = NULL;
- /* free bus request */
- hifFreeBusRequest(device, pReqPriv->busrequest);
- pReqPriv->busrequest = NULL;
- }
-
- if (pReqPriv->pHifScatterReq != NULL) {
- kfree(pReqPriv->pHifScatterReq);
- pReqPriv->pHifScatterReq = NULL;
- }
-
- kfree(pReqPriv);
- }
-}
-
-#endif // HIF_LINUX_MMC_SCATTER_SUPPORT
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k.c
deleted file mode 100644
index f8607bc0892..00000000000
--- a/drivers/staging/ath6kl/htc2/AR6000/ar6k.c
+++ /dev/null
@@ -1,1479 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6k.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// AR6K device layer that handles register level I/O
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "hw/mbox_host_reg.h"
-#include "a_osapi.h"
-#include "../htc_debug.h"
-#include "hif.h"
-#include "htc_packet.h"
-#include "ar6k.h"
-
-#define MAILBOX_FOR_BLOCK_SIZE 1
-
-int DevEnableInterrupts(struct ar6k_device *pDev);
-int DevDisableInterrupts(struct ar6k_device *pDev);
-
-static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev);
-
-void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket)
-{
- LOCK_AR6K(pDev);
- HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket);
- UNLOCK_AR6K(pDev);
-}
-
-struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev)
-{
- struct htc_packet *pPacket;
-
- LOCK_AR6K(pDev);
- pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList);
- UNLOCK_AR6K(pDev);
-
- return pPacket;
-}
-
-void DevCleanup(struct ar6k_device *pDev)
-{
- DevCleanupGMbox(pDev);
-
- if (pDev->HifAttached) {
- HIFDetachHTC(pDev->HIFDevice);
- pDev->HifAttached = false;
- }
-
- DevCleanupVirtualScatterSupport(pDev);
-
- if (A_IS_MUTEX_VALID(&pDev->Lock)) {
- A_MUTEX_DELETE(&pDev->Lock);
- }
-}
-
-int DevSetup(struct ar6k_device *pDev)
-{
- u32 blocksizes[AR6K_MAILBOXES];
- int status = 0;
- int i;
- HTC_CALLBACKS htcCallbacks;
-
- do {
-
- DL_LIST_INIT(&pDev->ScatterReqHead);
- /* initialize our free list of IO packets */
- INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList);
- A_MUTEX_INIT(&pDev->Lock);
-
- A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
- /* the device layer handles these */
- htcCallbacks.rwCompletionHandler = DevRWCompletionHandler;
- htcCallbacks.dsrHandler = DevDsrHandler;
- htcCallbacks.context = pDev;
-
- status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);
-
- if (status) {
- break;
- }
-
- pDev->HifAttached = true;
-
- /* get the addresses for all 4 mailboxes */
- status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
- &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo));
-
- if (status) {
- A_ASSERT(false);
- break;
- }
-
- /* carve up register I/O packets (these are for ASYNC register I/O ) */
- for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) {
- struct htc_packet *pIOPacket;
- pIOPacket = &pDev->RegIOBuffers[i].HtcPacket;
- SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket,
- pDev,
- pDev->RegIOBuffers[i].Buffer,
- AR6K_REG_IO_BUFFER_SIZE,
- 0); /* don't care */
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
-
- /* get the block sizes */
- status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
- blocksizes, sizeof(blocksizes));
-
- if (status) {
- A_ASSERT(false);
- break;
- }
-
- /* note: we actually get the block size of a mailbox other than 0, for SDIO the block
- * size on mailbox 0 is artificially set to 1. So we use the block size that is set
- * for the other 3 mailboxes */
- pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
- /* must be a power of 2 */
- A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0);
-
- /* assemble mask, used for padding to a block */
- pDev->BlockMask = pDev->BlockSize - 1;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n",
- pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]));
-
- pDev->GetPendingEventsFunc = NULL;
- /* see if the HIF layer implements the get pending events function */
- HIFConfigureDevice(pDev->HIFDevice,
- HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
- &pDev->GetPendingEventsFunc,
- sizeof(pDev->GetPendingEventsFunc));
-
- /* assume we can process HIF interrupt events asynchronously */
- pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
-
- /* see if the HIF layer overrides this assumption */
- HIFConfigureDevice(pDev->HIFDevice,
- HIF_DEVICE_GET_IRQ_PROC_MODE,
- &pDev->HifIRQProcessingMode,
- sizeof(pDev->HifIRQProcessingMode));
-
- switch (pDev->HifIRQProcessingMode) {
- case HIF_DEVICE_IRQ_SYNC_ONLY:
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYNC ONLY\n"));
- /* see if HIF layer wants HTC to yield */
- HIFConfigureDevice(pDev->HIFDevice,
- HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
- &pDev->HifIRQYieldParams,
- sizeof(pDev->HifIRQYieldParams));
-
- if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
- ("HIF requests that DSR yield per %d RECV packets \n",
- pDev->HifIRQYieldParams.RecvPacketYieldCount));
- pDev->DSRCanYield = true;
- }
- break;
- case HIF_DEVICE_IRQ_ASYNC_SYNC:
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYNC and SYNC\n"));
- break;
- default:
- A_ASSERT(false);
- }
-
- pDev->HifMaskUmaskRecvEvent = NULL;
-
- /* see if the HIF layer implements the mask/unmask recv events function */
- HIFConfigureDevice(pDev->HIFDevice,
- HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
- &pDev->HifMaskUmaskRecvEvent,
- sizeof(pDev->HifMaskUmaskRecvEvent));
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%lX , 0x%lX\n",
- (unsigned long)pDev->GetPendingEventsFunc, (unsigned long)pDev->HifMaskUmaskRecvEvent));
-
- status = DevDisableInterrupts(pDev);
-
- if (status) {
- break;
- }
-
- status = DevSetupGMbox(pDev);
-
- } while (false);
-
- if (status) {
- if (pDev->HifAttached) {
- HIFDetachHTC(pDev->HIFDevice);
- pDev->HifAttached = false;
- }
- }
-
- return status;
-
-}
-
-int DevEnableInterrupts(struct ar6k_device *pDev)
-{
- int status;
- struct ar6k_irq_enable_registers regs;
-
- LOCK_AR6K(pDev);
-
- /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
- pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x01) |
- INT_STATUS_ENABLE_CPU_SET(0x01) |
- INT_STATUS_ENABLE_COUNTER_SET(0x01);
-
- if (NULL == pDev->GetPendingEventsFunc) {
- pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
- } else {
- /* The HIF layer provided us with a pending events function which means that
- * the detection of pending mbox messages is handled in the HIF layer.
- * This is the case for the SPI2 interface.
- * In the normal case we enable MBOX interrupts, for the case
- * with HIFs that offer this mechanism, we keep these interrupts
- * masked */
- pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
- }
-
-
- /* Set up the CPU Interrupt Status Register */
- pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
-
- /* Set up the Error Interrupt Status Register */
- pDev->IrqEnableRegisters.error_status_enable =
- ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
- ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
-
- /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
- pDev->IrqEnableRegisters.counter_int_status_enable =
- COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK);
-
- /* copy into our temp area */
- memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
-
- UNLOCK_AR6K(pDev);
-
- /* always synchronous */
- status = HIFReadWrite(pDev->HIFDevice,
- INT_STATUS_ENABLE_ADDRESS,
- &regs.int_status_enable,
- AR6K_IRQ_ENABLE_REGS_SIZE,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- /* Can't write it for some reason */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Failed to update interrupt control registers err: %d\n", status));
-
- }
-
- return status;
-}
-
-int DevDisableInterrupts(struct ar6k_device *pDev)
-{
- struct ar6k_irq_enable_registers regs;
-
- LOCK_AR6K(pDev);
- /* Disable all interrupts */
- pDev->IrqEnableRegisters.int_status_enable = 0;
- pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
- pDev->IrqEnableRegisters.error_status_enable = 0;
- pDev->IrqEnableRegisters.counter_int_status_enable = 0;
- /* copy into our temp area */
- memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
-
- UNLOCK_AR6K(pDev);
-
- /* always synchronous */
- return HIFReadWrite(pDev->HIFDevice,
- INT_STATUS_ENABLE_ADDRESS,
- &regs.int_status_enable,
- AR6K_IRQ_ENABLE_REGS_SIZE,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-}
-
-/* enable device interrupts */
-int DevUnmaskInterrupts(struct ar6k_device *pDev)
-{
- /* for good measure, make sure interrupt are disabled before unmasking at the HIF
- * layer.
- * The rationale here is that between device insertion (where we clear the interrupts the first time)
- * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
- * The AR6K interrupt enables reset back to an "enabled" state when this happens.
- * */
- int IntStatus = 0;
- DevDisableInterrupts(pDev);
-
-#ifdef THREAD_X
- // Tobe verified...
- IntStatus = DevEnableInterrupts(pDev);
- /* Unmask the host controller interrupts */
- HIFUnMaskInterrupt(pDev->HIFDevice);
-#else
- /* Unmask the host controller interrupts */
- HIFUnMaskInterrupt(pDev->HIFDevice);
- IntStatus = DevEnableInterrupts(pDev);
-#endif
-
- return IntStatus;
-}
-
-/* disable all device interrupts */
-int DevMaskInterrupts(struct ar6k_device *pDev)
-{
- /* mask the interrupt at the HIF layer, we don't want a stray interrupt taken while
- * we zero out our shadow registers in DevDisableInterrupts()*/
- HIFMaskInterrupt(pDev->HIFDevice);
-
- return DevDisableInterrupts(pDev);
-}
-
-/* callback when our fetch to enable/disable completes */
-static void DevDoEnableDisableRecvAsyncHandler(void *Context, struct htc_packet *pPacket)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- if (pPacket->Status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" Failed to disable receiver, status:%d \n", pPacket->Status));
- }
- /* free this IO packet */
- AR6KFreeIOPacket(pDev,pPacket);
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n"));
-}
-
-/* disable packet reception (used in case the host runs out of buffers)
- * this is the "override" method when the HIF reports another methods to
- * disable recv events */
-static int DevDoEnableDisableRecvOverride(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
-{
- int status = 0;
- struct htc_packet *pIOPacket = NULL;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mode:%d\n",
- EnableRecv,AsyncMode));
-
- do {
-
- if (AsyncMode) {
-
- pIOPacket = AR6KAllocIOPacket(pDev);
-
- if (NULL == pIOPacket) {
- status = A_NO_MEMORY;
- A_ASSERT(false);
- break;
- }
-
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
- pIOPacket->pContext = pDev;
-
- /* call the HIF layer override and do this asynchronously */
- status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
- EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
- pIOPacket);
- break;
- }
-
- /* if we get here we are doing it synchronously */
- status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
- EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
- NULL);
-
- } while (false);
-
- if (status && (pIOPacket != NULL)) {
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
-
- return status;
-}
-
-/* disable packet reception (used in case the host runs out of buffers)
- * this is the "normal" method using the interrupt enable registers through
- * the host I/F */
-static int DevDoEnableDisableRecvNormal(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
-{
- int status = 0;
- struct htc_packet *pIOPacket = NULL;
- struct ar6k_irq_enable_registers regs;
-
- /* take the lock to protect interrupt enable shadows */
- LOCK_AR6K(pDev);
-
- if (EnableRecv) {
- pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
- } else {
- pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
- }
-
- /* copy into our temp area */
- memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
- UNLOCK_AR6K(pDev);
-
- do {
-
- if (AsyncMode) {
-
- pIOPacket = AR6KAllocIOPacket(pDev);
-
- if (NULL == pIOPacket) {
- status = A_NO_MEMORY;
- A_ASSERT(false);
- break;
- }
-
- /* copy values to write to our async I/O buffer */
- memcpy(pIOPacket->pBuffer,&regs,AR6K_IRQ_ENABLE_REGS_SIZE);
-
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
- pIOPacket->pContext = pDev;
-
- /* write it out asynchronously */
- HIFReadWrite(pDev->HIFDevice,
- INT_STATUS_ENABLE_ADDRESS,
- pIOPacket->pBuffer,
- AR6K_IRQ_ENABLE_REGS_SIZE,
- HIF_WR_ASYNC_BYTE_INC,
- pIOPacket);
- break;
- }
-
- /* if we get here we are doing it synchronously */
-
- status = HIFReadWrite(pDev->HIFDevice,
- INT_STATUS_ENABLE_ADDRESS,
- &regs.int_status_enable,
- AR6K_IRQ_ENABLE_REGS_SIZE,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- } while (false);
-
- if (status && (pIOPacket != NULL)) {
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
-
- return status;
-}
-
-
-int DevStopRecv(struct ar6k_device *pDev, bool AsyncMode)
-{
- if (NULL == pDev->HifMaskUmaskRecvEvent) {
- return DevDoEnableDisableRecvNormal(pDev,false,AsyncMode);
- } else {
- return DevDoEnableDisableRecvOverride(pDev,false,AsyncMode);
- }
-}
-
-int DevEnableRecv(struct ar6k_device *pDev, bool AsyncMode)
-{
- if (NULL == pDev->HifMaskUmaskRecvEvent) {
- return DevDoEnableDisableRecvNormal(pDev,true,AsyncMode);
- } else {
- return DevDoEnableDisableRecvOverride(pDev,true,AsyncMode);
- }
-}
-
-int DevWaitForPendingRecv(struct ar6k_device *pDev,u32 TimeoutInMs,bool *pbIsRecvPending)
-{
- int status = 0;
- u8 host_int_status = 0x0;
- u32 counter = 0x0;
-
- if(TimeoutInMs < 100)
- {
- TimeoutInMs = 100;
- }
-
- counter = TimeoutInMs / 100;
-
- do
- {
- //Read the Host Interrupt Status Register
- status = HIFReadWrite(pDev->HIFDevice,
- HOST_INT_STATUS_ADDRESS,
- &host_int_status,
- sizeof(u8),
- HIF_RD_SYNC_BYTE_INC,
- NULL);
- if (status)
- {
- AR_DEBUG_PRINTF(ATH_LOG_ERR,("DevWaitForPendingRecv:Read HOST_INT_STATUS_ADDRESS Failed 0x%X\n",status));
- break;
- }
-
- host_int_status = !status ? (host_int_status & (1 << 0)):0;
- if(!host_int_status)
- {
- status = 0;
- *pbIsRecvPending = false;
- break;
- }
- else
- {
- *pbIsRecvPending = true;
- }
-
- A_MDELAY(100);
-
- counter--;
-
- }while(counter);
- return status;
-}
-
-void DevDumpRegisters(struct ar6k_device *pDev,
- struct ar6k_irq_proc_registers *pIrqProcRegs,
- struct ar6k_irq_enable_registers *pIrqEnableRegs)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n"));
-
- if (pIrqProcRegs != NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Host Int Status: 0x%x\n",pIrqProcRegs->host_int_status));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("CPU Int Status: 0x%x\n",pIrqProcRegs->cpu_int_status));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Error Int Status: 0x%x\n",pIrqProcRegs->error_int_status));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Counter Int Status: 0x%x\n",pIrqProcRegs->counter_int_status));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Mbox Frame: 0x%x\n",pIrqProcRegs->mbox_frame));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Rx Lookahead Valid: 0x%x\n",pIrqProcRegs->rx_lookahead_valid));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Rx Lookahead 0: 0x%x\n",pIrqProcRegs->rx_lookahead[0]));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Rx Lookahead 1: 0x%x\n",pIrqProcRegs->rx_lookahead[1]));
-
- if (pDev->MailBoxInfo.GMboxAddress != 0) {
- /* if the target supports GMBOX hardware, dump some additional state */
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("GMBOX Host Int Status 2: 0x%x\n",pIrqProcRegs->host_int_status2));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("GMBOX RX Avail: 0x%x\n",pIrqProcRegs->gmbox_rx_avail));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("GMBOX lookahead alias 0: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[0]));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("GMBOX lookahead alias 1: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[1]));
- }
-
- }
-
- if (pIrqEnableRegs != NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Int Status Enable: 0x%x\n",pIrqEnableRegs->int_status_enable));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_status_enable));
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n"));
-}
-
-
-#define DEV_GET_VIRT_DMA_INFO(p) ((struct dev_scatter_dma_virtual_info *)((p)->HIFPrivate[0]))
-
-static struct hif_scatter_req *DevAllocScatterReq(struct hif_device *Context)
-{
- struct dl_list *pItem;
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
- LOCK_AR6K(pDev);
- pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead);
- UNLOCK_AR6K(pDev);
- if (pItem != NULL) {
- return A_CONTAINING_STRUCT(pItem, struct hif_scatter_req, ListLink);
- }
- return NULL;
-}
-
-static void DevFreeScatterReq(struct hif_device *Context, struct hif_scatter_req *pReq)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
- LOCK_AR6K(pDev);
- DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink);
- UNLOCK_AR6K(pDev);
-}
-
-int DevCopyScatterListToFromDMABuffer(struct hif_scatter_req *pReq, bool FromDMA)
-{
- u8 *pDMABuffer = NULL;
- int i, remaining;
- u32 length;
-
- pDMABuffer = pReq->pScatterBounceBuffer;
-
- if (pDMABuffer == NULL) {
- A_ASSERT(false);
- return A_EINVAL;
- }
-
- remaining = (int)pReq->TotalLength;
-
- for (i = 0; i < pReq->ValidScatterEntries; i++) {
-
- length = min((int)pReq->ScatterList[i].Length, remaining);
-
- if (length != (int)pReq->ScatterList[i].Length) {
- A_ASSERT(false);
- /* there is a problem with the scatter list */
- return A_EINVAL;
- }
-
- if (FromDMA) {
- /* from DMA buffer */
- memcpy(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
- } else {
- /* to DMA buffer */
- memcpy(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
- }
-
- pDMABuffer += length;
- remaining -= length;
- }
-
- return 0;
-}
-
-static void DevReadWriteScatterAsyncHandler(void *Context, struct htc_packet *pPacket)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
- struct hif_scatter_req *pReq = (struct hif_scatter_req *)pPacket->pPktContext;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- pReq->CompletionStatus = pPacket->Status;
-
- AR6KFreeIOPacket(pDev,pPacket);
-
- pReq->CompletionRoutine(pReq);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n"));
-}
-
-static int DevReadWriteScatter(struct hif_device *Context, struct hif_scatter_req *pReq)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
- int status = 0;
- struct htc_packet *pIOPacket = NULL;
- u32 request = pReq->Request;
-
- do {
-
- if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Invalid length: %d \n", pReq->TotalLength));
- break;
- }
-
- if (pReq->TotalLength == 0) {
- A_ASSERT(false);
- break;
- }
-
- if (request & HIF_ASYNCHRONOUS) {
- /* use an I/O packet to carry this request */
- pIOPacket = AR6KAllocIOPacket(pDev);
- if (NULL == pIOPacket) {
- status = A_NO_MEMORY;
- break;
- }
-
- /* save the request */
- pIOPacket->pPktContext = pReq;
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevReadWriteScatterAsyncHandler;
- pIOPacket->pContext = pDev;
- }
-
- if (request & HIF_WRITE) {
- /* in virtual DMA, we are issuing the requests through the legacy HIFReadWrite API
- * this API will adjust the address automatically for the last byte to fall on the mailbox
- * EOM. */
-
- /* if the address is an extended address, we can adjust the address here since the extended
- * address will bypass the normal checks in legacy HIF layers */
- if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress) {
- pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize - pReq->TotalLength;
- }
- }
-
- /* use legacy readwrite */
- status = HIFReadWrite(pDev->HIFDevice,
- pReq->Address,
- DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer,
- pReq->TotalLength,
- request,
- (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL);
-
- } while (false);
-
- if ((status != A_PENDING) && status && (request & HIF_ASYNCHRONOUS)) {
- if (pIOPacket != NULL) {
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
- pReq->CompletionStatus = status;
- pReq->CompletionRoutine(pReq);
- status = 0;
- }
-
- return status;
-}
-
-
-static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev)
-{
- struct hif_scatter_req *pReq;
-
- while (1) {
- pReq = DevAllocScatterReq((struct hif_device *)pDev);
- if (NULL == pReq) {
- break;
- }
- kfree(pReq);
- }
-
-}
-
- /* function to set up virtual scatter support if HIF layer has not implemented the interface */
-static int DevSetupVirtualScatterSupport(struct ar6k_device *pDev)
-{
- int status = 0;
- int bufferSize, sgreqSize;
- int i;
- struct dev_scatter_dma_virtual_info *pVirtualInfo;
- struct hif_scatter_req *pReq;
-
- bufferSize = sizeof(struct dev_scatter_dma_virtual_info) +
- 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
-
- sgreqSize = sizeof(struct hif_scatter_req) +
- (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(struct hif_scatter_item));
-
- for (i = 0; i < AR6K_SCATTER_REQS; i++) {
- /* allocate the scatter request, buffer info and the actual virtual buffer itself */
- pReq = (struct hif_scatter_req *)A_MALLOC(sgreqSize + bufferSize);
-
- if (NULL == pReq) {
- status = A_NO_MEMORY;
- break;
- }
-
- A_MEMZERO(pReq, sgreqSize);
-
- /* the virtual DMA starts after the scatter request struct */
- pVirtualInfo = (struct dev_scatter_dma_virtual_info *)((u8 *)pReq + sgreqSize);
- A_MEMZERO(pVirtualInfo, sizeof(struct dev_scatter_dma_virtual_info));
-
- pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0];
- /* align buffer to cache line in case host controller can actually DMA this */
- pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirtDmaBuffer);
- /* store the structure in the private area */
- pReq->HIFPrivate[0] = pVirtualInfo;
- /* we emulate a DMA bounce interface */
- pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE;
- pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer;
- /* free request to the list */
- DevFreeScatterReq((struct hif_device *)pDev,pReq);
- }
-
- if (status) {
- DevCleanupVirtualScatterSupport(pDev);
- } else {
- pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq;
- pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq;
- pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter;
- if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K: SPI bus requires RX scatter limits\n"));
- pDev->HifScatterInfo.MaxScatterEntries = AR6K_MIN_SCATTER_ENTRIES_PER_REQ;
- pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MIN_TRANSFER_SIZE_PER_SCATTER;
- } else {
- pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ;
- pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
- }
- pDev->ScatterIsVirtual = true;
- }
-
- return status;
-}
-
-int DevCleanupMsgBundling(struct ar6k_device *pDev)
-{
- if(NULL != pDev)
- {
- DevCleanupVirtualScatterSupport(pDev);
- }
-
- return 0;
-}
-
-int DevSetupMsgBundling(struct ar6k_device *pDev, int MaxMsgsPerTransfer)
-{
- int status;
-
- if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
- return A_ENOTSUP;
- }
-
- status = HIFConfigureDevice(pDev->HIFDevice,
- HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
- &pDev->HifScatterInfo,
- sizeof(pDev->HifScatterInfo));
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
- ("AR6K: ** HIF layer does not support scatter requests (%d) \n",status));
-
- /* we can try to use a virtual DMA scatter mechanism using legacy HIFReadWrite() */
- status = DevSetupVirtualScatterSupport(pDev);
-
- if (!status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("AR6K: virtual scatter transfers enabled (max scatter items:%d: maxlen:%d) \n",
- DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
- }
-
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
- DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
- }
-
- if (!status) {
- /* for the recv path, the maximum number of bytes per recv bundle is just limited
- * by the maximum transfer size at the HIF layer */
- pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
-
- if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K : SPI bus requires TX bundling disabled\n"));
- pDev->MaxSendBundleSize = 0;
- } else {
- /* for the send path, the max transfer size is limited by the existence and size of
- * the extended mailbox address range */
- if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) {
- pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize;
- } else {
- /* legacy */
- pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH;
- }
-
- if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerScatterReq) {
- /* limit send bundle size to what the HIF can support for scatter requests */
- pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("AR6K: max recv: %d max send: %d \n",
- DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev)));
-
- }
- return status;
-}
-
-int DevSubmitScatterRequest(struct ar6k_device *pDev, struct hif_scatter_req *pScatterReq, bool Read, bool Async)
-{
- int status;
-
- if (Read) {
- /* read operation */
- pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
- pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
- A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev));
- } else {
- u32 mailboxWidth;
-
- /* write operation */
- pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BLOCK_INC;
- A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev));
- if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) {
- /* for large writes use the extended address */
- pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress;
- mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize;
- } else {
- pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
- mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH;
- }
-
- if (!pDev->ScatterIsVirtual) {
- /* we are passing this scatter list down to the HIF layer' scatter request handler, fixup the address
- * so that the last byte falls on the EOM, we do this for those HIFs that support the
- * scatter API */
- pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
- }
-
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND,
- ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
- pScatterReq->ValidScatterEntries,
- pScatterReq->TotalLength,
- pScatterReq->Address,
- Async ? "ASYNC" : "SYNC",
- (Read) ? "RD" : "WR"));
-
- status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);
-
- if (status) {
- if (Async) {
- pScatterReq->CompletionStatus = status;
- pScatterReq->CompletionRoutine(pScatterReq);
- return 0;
- }
- return status;
- }
-
- status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ? pDev : pDev->HIFDevice,
- pScatterReq);
- if (!Async) {
- /* in sync mode, we can touch the scatter request */
- pScatterReq->CompletionStatus = status;
- DEV_FINISH_SCATTER_OPERATION(pScatterReq);
- } else {
- if (status == A_PENDING) {
- status = 0;
- }
- }
-
- return status;
-}
-
-
-#ifdef MBOXHW_UNIT_TEST
-
-
-/* This is a mailbox hardware unit test that must be called in a schedulable context
- * This test is very simple, it will send a list of buffers with a counting pattern
- * and the target will invert the data and send the message back
- *
- * the unit test has the following constraints:
- *
- * The target has at least 8 buffers of 256 bytes each. The host will send
- * the following pattern of buffers in rapid succession :
- *
- * 1 buffer - 128 bytes
- * 1 buffer - 256 bytes
- * 1 buffer - 512 bytes
- * 1 buffer - 1024 bytes
- *
- * The host will send the buffers to one mailbox and wait for buffers to be reflected
- * back from the same mailbox. The target sends the buffers FIFO order.
- * Once the final buffer has been received for a mailbox, the next mailbox is tested.
- *
- *
- * Note: To simplifythe test , we assume that the chosen buffer sizes
- * will fall on a nice block pad
- *
- * It is expected that higher-order tests will be written to stress the mailboxes using
- * a message-based protocol (with some performance timming) that can create more
- * randomness in the packets sent over mailboxes.
- *
- * */
-
-#define A_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
-
-#define BUFFER_BLOCK_PAD 128
-
-#if 0
-#define BUFFER1 128
-#define BUFFER2 256
-#define BUFFER3 512
-#define BUFFER4 1024
-#endif
-
-#if 1
-#define BUFFER1 80
-#define BUFFER2 200
-#define BUFFER3 444
-#define BUFFER4 800
-#endif
-
-#define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \
- A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \
- A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \
- A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) )
-
-#define TEST_BYTES (BUFFER1 + BUFFER2 + BUFFER3 + BUFFER4)
-
-#define TEST_CREDITS_RECV_TIMEOUT 100
-
-static u8 g_Buffer[TOTAL_BYTES];
-static u32 g_MailboxAddrs[AR6K_MAILBOXES];
-static u32 g_BlockSizes[AR6K_MAILBOXES];
-
-#define BUFFER_PROC_LIST_DEPTH 4
-
-struct buffer_proc_list {
- u8 *pBuffer;
- u32 length;
-};
-
-
-#define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \
-{ \
- (pList)->pBuffer = (pCurrpos); \
- (pList)->length = (len); \
- (pCurrpos) += (len); \
- (pList)++; \
-}
-
-/* a simple and crude way to send different "message" sizes */
-static void AssembleBufferList(struct buffer_proc_list *pList)
-{
- u8 *pBuffer = g_Buffer;
-
-#if BUFFER_PROC_LIST_DEPTH < 4
-#error "Buffer processing list depth is not deep enough!!"
-#endif
-
- PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer);
- PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer);
- PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer);
- PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer);
-
-}
-
-#define FILL_ZERO true
-#define FILL_COUNTING false
-static void InitBuffers(bool Zero)
-{
- u16 *pBuffer16 = (u16 *)g_Buffer;
- int i;
-
- /* fill buffer with 16 bit counting pattern or zeros */
- for (i = 0; i < (TOTAL_BYTES / 2) ; i++) {
- if (!Zero) {
- pBuffer16[i] = (u16)i;
- } else {
- pBuffer16[i] = 0;
- }
- }
-}
-
-
-static bool CheckOneBuffer(u16 *pBuffer16, int Length)
-{
- int i;
- u16 startCount;
- bool success = true;
-
- /* get the starting count */
- startCount = pBuffer16[0];
- /* invert it, this is the expected value */
- startCount = ~startCount;
- /* scan the buffer and verify */
- for (i = 0; i < (Length / 2) ; i++,startCount++) {
- /* target will invert all the data */
- if ((u16)pBuffer16[i] != (u16)~startCount) {
- success = false;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x%X (offset:%d, total:%d) \n",
- pBuffer16[i], ((u16)~startCount), i, Length));
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n",
- pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer16[i+3]));
- break;
- }
- }
-
- return success;
-}
-
-static bool CheckBuffers(void)
-{
- int i;
- bool success = true;
- struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
-
- /* assemble the list */
- AssembleBufferList(checkList);
-
- /* scan the buffers and verify */
- for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) {
- success = CheckOneBuffer((u16 *)checkList[i].pBuffer, checkList[i].length);
- if (!success) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed verify \n",
- (u32)checkList[i].pBuffer, checkList[i].length));
- break;
- }
- }
-
- return success;
-}
-
- /* find the end marker for the last buffer we will be sending */
-static u16 GetEndMarker(void)
-{
- u8 *pBuffer;
- struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
-
- /* fill up buffers with the normal counting pattern */
- InitBuffers(FILL_COUNTING);
-
- /* assemble the list we will be sending down */
- AssembleBufferList(checkList);
- /* point to the last 2 bytes of the last buffer */
- pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_PROC_LIST_DEPTH - 1].length) - 2]);
-
- /* the last count in the last buffer is the marker */
- return (u16)pBuffer[0] | ((u16)pBuffer[1] << 8);
-}
-
-#define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR
-
-/* send the ordered buffers to the target */
-static int SendBuffers(struct ar6k_device *pDev, int mbox)
-{
- int status = 0;
- u32 request = HIF_WR_SYNC_BLOCK_INC;
- struct buffer_proc_list sendList[BUFFER_PROC_LIST_DEPTH];
- int i;
- int totalBytes = 0;
- int paddedLength;
- int totalwPadding = 0;
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mbox));
-
- /* fill buffer with counting pattern */
- InitBuffers(FILL_COUNTING);
-
- /* assemble the order in which we send */
- AssembleBufferList(sendList);
-
- for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) {
-
- /* we are doing block transfers, so we need to pad everything to a block size */
- paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) &
- (~(g_BlockSizes[mbox] - 1));
-
- /* send each buffer synchronously */
- status = HIFReadWrite(pDev->HIFDevice,
- g_MailboxAddrs[mbox],
- sendList[i].pBuffer,
- paddedLength,
- request,
- NULL);
- if (status) {
- break;
- }
- totalBytes += sendList[i].length;
- totalwPadding += paddedLength;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mailbox : %d \n",totalBytes,totalwPadding,mbox));
-
- return status;
-}
-
-/* poll the mailbox credit counter until we get a credit or timeout */
-static int GetCredits(struct ar6k_device *pDev, int mbox, int *pCredits)
-{
- int status = 0;
- int timeout = TEST_CREDITS_RECV_TIMEOUT;
- u8 credits = 0;
- u32 address;
-
- while (true) {
-
- /* Read the counter register to get credits, this auto-decrements */
- address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4;
- status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits),
- HIF_RD_SYNC_BYTE_FIX, NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Unable to decrement the command credit count register (mbox=%d)\n",mbox));
- status = A_ERROR;
- break;
- }
-
- if (credits) {
- break;
- }
-
- timeout--;
-
- if (timeout <= 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",mbox,address));
- status = A_ERROR;
- break;
- }
-
- /* delay a little, target may not be ready */
- A_MDELAY(1000);
-
- }
-
- if (status == 0) {
- *pCredits = credits;
- }
-
- return status;
-}
-
-
-/* wait for the buffers to come back */
-static int RecvBuffers(struct ar6k_device *pDev, int mbox)
-{
- int status = 0;
- u32 request = HIF_RD_SYNC_BLOCK_INC;
- struct buffer_proc_list recvList[BUFFER_PROC_LIST_DEPTH];
- int curBuffer;
- int credits;
- int i;
- int totalBytes = 0;
- int paddedLength;
- int totalwPadding = 0;
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n",mbox));
-
- /* zero the buffers */
- InitBuffers(FILL_ZERO);
-
- /* assemble the order in which we should receive */
- AssembleBufferList(recvList);
-
- curBuffer = 0;
-
- while (curBuffer < BUFFER_PROC_LIST_DEPTH) {
-
- /* get number of buffers that have been completed, this blocks
- * until we get at least 1 credit or it times out */
- status = GetCredits(pDev, mbox, &credits);
-
- if (status) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n",credits, mbox));
-
- /* get all the buffers that are sitting on the queue */
- for (i = 0; i < credits; i++) {
- A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH);
- /* recv the current buffer synchronously, the buffers should come back in
- * order... with padding applied by the target */
- paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1)) &
- (~(g_BlockSizes[mbox] - 1));
-
- status = HIFReadWrite(pDev->HIFDevice,
- g_MailboxAddrs[mbox],
- recvList[curBuffer].pBuffer,
- paddedLength,
- request,
- NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mailbox:%d : address:0x%X \n",
- recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]));
- break;
- }
-
- totalwPadding += paddedLength;
- totalBytes += recvList[curBuffer].length;
- curBuffer++;
- }
-
- if (status) {
- break;
- }
- /* go back and get some more */
- credits = 0;
- }
-
- if (totalBytes != TEST_BYTES) {
- A_ASSERT(false);
- } else {
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total recv :%d (w/Padding : %d) \n",
- mbox, totalBytes, totalwPadding));
- }
-
- return status;
-
-
-}
-
-static int DoOneMboxHWTest(struct ar6k_device *pDev, int mbox)
-{
- int status;
-
- do {
- /* send out buffers */
- status = SendBuffers(pDev,mbox);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d\n",status,mbox));
- break;
- }
-
- /* go get them, this will block */
- status = RecvBuffers(pDev, mbox);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n",status,mbox));
- break;
- }
-
- /* check the returned data patterns */
- if (!CheckBuffers()) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",mbox));
- status = A_ERROR;
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \n",mbox));
-
- } while (false);
-
- return status;
-}
-
-/* here is where the test starts */
-int DoMboxHWTest(struct ar6k_device *pDev)
-{
- int i;
- int status;
- int credits = 0;
- u8 params[4];
- int numBufs;
- int bufferSize;
- u16 temp;
-
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START - \n"));
-
- do {
- /* get the addresses for all 4 mailboxes */
- status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
- g_MailboxAddrs, sizeof(g_MailboxAddrs));
-
- if (status) {
- A_ASSERT(false);
- break;
- }
-
- /* get the block sizes */
- status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
- g_BlockSizes, sizeof(g_BlockSizes));
-
- if (status) {
- A_ASSERT(false);
- break;
- }
-
- /* note, the HIF layer usually reports mbox 0 to have a block size of
- * 1, but our test wants to run in block-mode for all mailboxes, so we treat all mailboxes
- * the same. */
- g_BlockSizes[0] = g_BlockSizes[1];
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockSizes[0]));
-
- if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) {
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for buffer pad %d\n",
- g_BlockSizes[1], BUFFER_BLOCK_PAD));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n"));
-
- /* the target lets us know it is ready by giving us 1 credit on
- * mailbox 0 */
- status = GetCredits(pDev, 0, &credits);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n"));
-
- /* read the first 4 scratch registers */
- status = HIFReadWrite(pDev->HIFDevice,
- SCRATCH_ADDRESS,
- params,
- 4,
- HIF_RD_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"));
- break;
- }
-
- numBufs = params[0];
- bufferSize = (int)(((u16)params[2] << 8) | (u16)params[1]);
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE,
- ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (total space: %d, minimum required space (w/padding): %d) \n",
- numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES));
-
- if ((numBufs * bufferSize) < TOTAL_BYTES) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test! need:%d, got:%d \n",
- TOTAL_BYTES, (numBufs*bufferSize)));
- status = A_ERROR;
- break;
- }
-
- temp = GetEndMarker();
-
- status = HIFReadWrite(pDev->HIFDevice,
- SCRATCH_ADDRESS + 4,
- (u8 *)&temp,
- 2,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n"));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp));
-
- temp = (u16)g_BlockSizes[1];
- /* convert to a mask */
- temp = temp - 1;
- status = HIFReadWrite(pDev->HIFDevice,
- SCRATCH_ADDRESS + 6,
- (u8 *)&temp,
- 2,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n"));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp));
-
- /* execute the test on each mailbox */
- for (i = 0; i < AR6K_MAILBOXES; i++) {
- status = DoOneMboxHWTest(pDev, i);
- if (status) {
- break;
- }
- }
-
- } while (false);
-
- if (status == 0) {
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! - \n"));
- } else {
- AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! - \n"));
- }
- /* don't let HTC_Start continue, the target is actually not running any HTC code */
- return A_ERROR;
-}
-#endif
-
-
-
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k.h b/drivers/staging/ath6kl/htc2/AR6000/ar6k.h
deleted file mode 100644
index e551dbe674d..00000000000
--- a/drivers/staging/ath6kl/htc2/AR6000/ar6k.h
+++ /dev/null
@@ -1,401 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6k.h" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// AR6K device layer that handles register level I/O
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef AR6K_H_
-#define AR6K_H_
-
-#include "hci_transport_api.h"
-#include "../htc_debug.h"
-
-#define AR6K_MAILBOXES 4
-
-/* HTC runs over mailbox 0 */
-#define HTC_MAILBOX 0
-
-#define AR6K_TARGET_DEBUG_INTR_MASK 0x01
-
-#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
- INT_STATUS_ENABLE_CPU_MASK | \
- INT_STATUS_ENABLE_COUNTER_MASK)
-
-
-//#define MBOXHW_UNIT_TEST 1
-
-PREPACK struct ar6k_irq_proc_registers {
- u8 host_int_status;
- u8 cpu_int_status;
- u8 error_int_status;
- u8 counter_int_status;
- u8 mbox_frame;
- u8 rx_lookahead_valid;
- u8 host_int_status2;
- u8 gmbox_rx_avail;
- u32 rx_lookahead[2];
- u32 rx_gmbox_lookahead_alias[2];
-} POSTPACK;
-
-#define AR6K_IRQ_PROC_REGS_SIZE sizeof(struct ar6k_irq_proc_registers)
-
-PREPACK struct ar6k_irq_enable_registers {
- u8 int_status_enable;
- u8 cpu_int_status_enable;
- u8 error_status_enable;
- u8 counter_int_status_enable;
-} POSTPACK;
-
-PREPACK struct ar6k_gmbox_ctrl_registers {
- u8 int_status_enable;
-} POSTPACK;
-
-#define AR6K_IRQ_ENABLE_REGS_SIZE sizeof(struct ar6k_irq_enable_registers)
-
-#define AR6K_REG_IO_BUFFER_SIZE 32
-#define AR6K_MAX_REG_IO_BUFFERS 8
-#define FROM_DMA_BUFFER true
-#define TO_DMA_BUFFER false
-#define AR6K_SCATTER_ENTRIES_PER_REQ 16
-#define AR6K_MAX_TRANSFER_SIZE_PER_SCATTER 16*1024
-#define AR6K_SCATTER_REQS 4
-#define AR6K_LEGACY_MAX_WRITE_LENGTH 2048
-
-#ifndef A_CACHE_LINE_PAD
-#define A_CACHE_LINE_PAD 128
-#endif
-#define AR6K_MIN_SCATTER_ENTRIES_PER_REQ 2
-#define AR6K_MIN_TRANSFER_SIZE_PER_SCATTER 4*1024
-
-/* buffers for ASYNC I/O */
-struct ar6k_async_reg_io_buffer {
- struct htc_packet HtcPacket; /* we use an HTC packet as a wrapper for our async register-based I/O */
- u8 _Pad1[A_CACHE_LINE_PAD];
- u8 Buffer[AR6K_REG_IO_BUFFER_SIZE]; /* cache-line safe with pads around */
- u8 _Pad2[A_CACHE_LINE_PAD];
-};
-
-struct ar6k_gmbox_info {
- void *pProtocolContext;
- int (*pMessagePendingCallBack)(void *pContext, u8 LookAheadBytes[], int ValidBytes);
- int (*pCreditsPendingCallback)(void *pContext, int NumCredits, bool CreditIRQEnabled);
- void (*pTargetFailureCallback)(void *pContext, int Status);
- void (*pStateDumpCallback)(void *pContext);
- bool CreditCountIRQEnabled;
-};
-
-struct ar6k_device {
- A_MUTEX_T Lock;
- u8 _Pad1[A_CACHE_LINE_PAD];
- struct ar6k_irq_proc_registers IrqProcRegisters; /* cache-line safe with pads around */
- u8 _Pad2[A_CACHE_LINE_PAD];
- struct ar6k_irq_enable_registers IrqEnableRegisters; /* cache-line safe with pads around */
- u8 _Pad3[A_CACHE_LINE_PAD];
- void *HIFDevice;
- u32 BlockSize;
- u32 BlockMask;
- struct hif_device_mbox_info MailBoxInfo;
- HIF_PENDING_EVENTS_FUNC GetPendingEventsFunc;
- void *HTCContext;
- struct htc_packet_queue RegisterIOList;
- struct ar6k_async_reg_io_buffer RegIOBuffers[AR6K_MAX_REG_IO_BUFFERS];
- void (*TargetFailureCallback)(void *Context);
- int (*MessagePendingCallback)(void *Context,
- u32 LookAheads[],
- int NumLookAheads,
- bool *pAsyncProc,
- int *pNumPktsFetched);
- HIF_DEVICE_IRQ_PROCESSING_MODE HifIRQProcessingMode;
- HIF_MASK_UNMASK_RECV_EVENT HifMaskUmaskRecvEvent;
- bool HifAttached;
- struct hif_device_irq_yield_params HifIRQYieldParams;
- bool DSRCanYield;
- int CurrentDSRRecvCount;
- struct hif_device_scatter_support_info HifScatterInfo;
- struct dl_list ScatterReqHead;
- bool ScatterIsVirtual;
- int MaxRecvBundleSize;
- int MaxSendBundleSize;
- struct ar6k_gmbox_info GMboxInfo;
- bool GMboxEnabled;
- struct ar6k_gmbox_ctrl_registers GMboxControlRegisters;
- int RecheckIRQStatusCnt;
-};
-
-#define LOCK_AR6K(p) A_MUTEX_LOCK(&(p)->Lock);
-#define UNLOCK_AR6K(p) A_MUTEX_UNLOCK(&(p)->Lock);
-#define REF_IRQ_STATUS_RECHECK(p) (p)->RecheckIRQStatusCnt = 1 /* note: no need to lock this, it only gets set */
-
-int DevSetup(struct ar6k_device *pDev);
-void DevCleanup(struct ar6k_device *pDev);
-int DevUnmaskInterrupts(struct ar6k_device *pDev);
-int DevMaskInterrupts(struct ar6k_device *pDev);
-int DevPollMboxMsgRecv(struct ar6k_device *pDev,
- u32 *pLookAhead,
- int TimeoutMS);
-int DevRWCompletionHandler(void *context, int status);
-int DevDsrHandler(void *context);
-int DevCheckPendingRecvMsgsAsync(void *context);
-void DevAsyncIrqProcessComplete(struct ar6k_device *pDev);
-void DevDumpRegisters(struct ar6k_device *pDev,
- struct ar6k_irq_proc_registers *pIrqProcRegs,
- struct ar6k_irq_enable_registers *pIrqEnableRegs);
-
-#define DEV_STOP_RECV_ASYNC true
-#define DEV_STOP_RECV_SYNC false
-#define DEV_ENABLE_RECV_ASYNC true
-#define DEV_ENABLE_RECV_SYNC false
-int DevStopRecv(struct ar6k_device *pDev, bool ASyncMode);
-int DevEnableRecv(struct ar6k_device *pDev, bool ASyncMode);
-int DevEnableInterrupts(struct ar6k_device *pDev);
-int DevDisableInterrupts(struct ar6k_device *pDev);
-int DevWaitForPendingRecv(struct ar6k_device *pDev,u32 TimeoutInMs,bool *pbIsRecvPending);
-
-#define DEV_CALC_RECV_PADDED_LEN(pDev, length) (((length) + (pDev)->BlockMask) & (~((pDev)->BlockMask)))
-#define DEV_CALC_SEND_PADDED_LEN(pDev, length) DEV_CALC_RECV_PADDED_LEN(pDev,length)
-#define DEV_IS_LEN_BLOCK_ALIGNED(pDev, length) (((length) % (pDev)->BlockSize) == 0)
-
-static INLINE int DevSendPacket(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 SendLength) {
- u32 paddedLength;
- bool sync = (pPacket->Completion == NULL) ? true : false;
- int status;
-
- /* adjust the length to be a multiple of block size if appropriate */
- paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, SendLength);
-
-#if 0
- if (paddedLength > pPacket->BufferLength) {
- A_ASSERT(false);
- if (pPacket->Completion != NULL) {
- COMPLETE_HTC_PACKET(pPacket,A_EINVAL);
- return 0;
- }
- return A_EINVAL;
- }
-#endif
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- ("DevSendPacket, Padded Length: %d Mbox:0x%X (mode:%s)\n",
- paddedLength,
- pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
- sync ? "SYNC" : "ASYNC"));
-
- status = HIFReadWrite(pDev->HIFDevice,
- pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
- pPacket->pBuffer,
- paddedLength, /* the padded length */
- sync ? HIF_WR_SYNC_BLOCK_INC : HIF_WR_ASYNC_BLOCK_INC,
- sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
-
- if (sync) {
- pPacket->Status = status;
- } else {
- if (status == A_PENDING) {
- status = 0;
- }
- }
-
- return status;
-}
-
-static INLINE int DevRecvPacket(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 RecvLength) {
- u32 paddedLength;
- int status;
- bool sync = (pPacket->Completion == NULL) ? true : false;
-
- /* adjust the length to be a multiple of block size if appropriate */
- paddedLength = DEV_CALC_RECV_PADDED_LEN(pDev, RecvLength);
-
- if (paddedLength > pPacket->BufferLength) {
- A_ASSERT(false);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("DevRecvPacket, Not enough space for padlen:%d recvlen:%d bufferlen:%d \n",
- paddedLength,RecvLength,pPacket->BufferLength));
- if (pPacket->Completion != NULL) {
- COMPLETE_HTC_PACKET(pPacket,A_EINVAL);
- return 0;
- }
- return A_EINVAL;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("DevRecvPacket (0x%lX : hdr:0x%X) Padded Length: %d Mbox:0x%X (mode:%s)\n",
- (unsigned long)pPacket, pPacket->PktInfo.AsRx.ExpectedHdr,
- paddedLength,
- pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
- sync ? "SYNC" : "ASYNC"));
-
- status = HIFReadWrite(pDev->HIFDevice,
- pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX],
- pPacket->pBuffer,
- paddedLength,
- sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX,
- sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
-
- if (sync) {
- pPacket->Status = status;
- }
-
- return status;
-}
-
-#define DEV_CHECK_RECV_YIELD(pDev) \
- ((pDev)->CurrentDSRRecvCount >= (pDev)->HifIRQYieldParams.RecvPacketYieldCount)
-
-#define IS_DEV_IRQ_PROC_SYNC_MODE(pDev) (HIF_DEVICE_IRQ_SYNC_ONLY == (pDev)->HifIRQProcessingMode)
-#define IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pDev) ((pDev)->HifIRQProcessingMode != HIF_DEVICE_IRQ_SYNC_ONLY)
-
-/**************************************************/
-/****** Scatter Function and Definitions
- *
- *
- */
-
-int DevCopyScatterListToFromDMABuffer(struct hif_scatter_req *pReq, bool FromDMA);
-
- /* copy any READ data back into scatter list */
-#define DEV_FINISH_SCATTER_OPERATION(pR) \
-do { \
- if (!((pR)->CompletionStatus) && \
- !((pR)->Request & HIF_WRITE) && \
- ((pR)->ScatterMethod == HIF_SCATTER_DMA_BOUNCE)) { \
- (pR)->CompletionStatus = \
- DevCopyScatterListToFromDMABuffer((pR), \
- FROM_DMA_BUFFER); \
- } \
-} while (0)
-
- /* copy any WRITE data to bounce buffer */
-static INLINE int DEV_PREPARE_SCATTER_OPERATION(struct hif_scatter_req *pReq) {
- if ((pReq->Request & HIF_WRITE) && (pReq->ScatterMethod == HIF_SCATTER_DMA_BOUNCE)) {
- return DevCopyScatterListToFromDMABuffer(pReq,TO_DMA_BUFFER);
- } else {
- return 0;
- }
-}
-
-
-int DevSetupMsgBundling(struct ar6k_device *pDev, int MaxMsgsPerTransfer);
-
-int DevCleanupMsgBundling(struct ar6k_device *pDev);
-
-#define DEV_GET_MAX_MSG_PER_BUNDLE(pDev) (pDev)->HifScatterInfo.MaxScatterEntries
-#define DEV_GET_MAX_BUNDLE_LENGTH(pDev) (pDev)->HifScatterInfo.MaxTransferSizePerScatterReq
-#define DEV_ALLOC_SCATTER_REQ(pDev) \
- (pDev)->HifScatterInfo.pAllocateReqFunc((pDev)->ScatterIsVirtual ? (pDev) : (pDev)->HIFDevice)
-
-#define DEV_FREE_SCATTER_REQ(pDev,pR) \
- (pDev)->HifScatterInfo.pFreeReqFunc((pDev)->ScatterIsVirtual ? (pDev) : (pDev)->HIFDevice,(pR))
-
-#define DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev) (pDev)->MaxRecvBundleSize
-#define DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev) (pDev)->MaxSendBundleSize
-
-#define DEV_SCATTER_READ true
-#define DEV_SCATTER_WRITE false
-#define DEV_SCATTER_ASYNC true
-#define DEV_SCATTER_SYNC false
-int DevSubmitScatterRequest(struct ar6k_device *pDev, struct hif_scatter_req *pScatterReq, bool Read, bool Async);
-
-#ifdef MBOXHW_UNIT_TEST
-int DoMboxHWTest(struct ar6k_device *pDev);
-#endif
-
- /* completely virtual */
-struct dev_scatter_dma_virtual_info {
- u8 *pVirtDmaBuffer; /* dma-able buffer - CPU accessible address */
- u8 DataArea[1]; /* start of data area */
-};
-
-
-
-void DumpAR6KDevState(struct ar6k_device *pDev);
-
-/**************************************************/
-/****** GMBOX functions and definitions
- *
- *
- */
-
-#ifdef ATH_AR6K_ENABLE_GMBOX
-
-void DevCleanupGMbox(struct ar6k_device *pDev);
-int DevSetupGMbox(struct ar6k_device *pDev);
-int DevCheckGMboxInterrupts(struct ar6k_device *pDev);
-void DevNotifyGMboxTargetFailure(struct ar6k_device *pDev);
-
-#else
-
- /* compiled out */
-#define DevCleanupGMbox(p)
-#define DevCheckGMboxInterrupts(p) 0
-#define DevNotifyGMboxTargetFailure(p)
-
-static INLINE int DevSetupGMbox(struct ar6k_device *pDev) {
- pDev->GMboxEnabled = false;
- return 0;
-}
-
-#endif
-
-#ifdef ATH_AR6K_ENABLE_GMBOX
-
- /* GMBOX protocol modules must expose each of these internal APIs */
-HCI_TRANSPORT_HANDLE GMboxAttachProtocol(struct ar6k_device *pDev, struct hci_transport_config_info *pInfo);
-int GMboxProtocolInstall(struct ar6k_device *pDev);
-void GMboxProtocolUninstall(struct ar6k_device *pDev);
-
- /* API used by GMBOX protocol modules */
-struct ar6k_device *HTCGetAR6KDevice(void *HTCHandle);
-#define DEV_GMBOX_SET_PROTOCOL(pDev,recv_callback,credits_pending,failure,statedump,context) \
-{ \
- (pDev)->GMboxInfo.pProtocolContext = (context); \
- (pDev)->GMboxInfo.pMessagePendingCallBack = (recv_callback); \
- (pDev)->GMboxInfo.pCreditsPendingCallback = (credits_pending); \
- (pDev)->GMboxInfo.pTargetFailureCallback = (failure); \
- (pDev)->GMboxInfo.pStateDumpCallback = (statedump); \
-}
-
-#define DEV_GMBOX_GET_PROTOCOL(pDev) (pDev)->GMboxInfo.pProtocolContext
-
-int DevGMboxWrite(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 WriteLength);
-int DevGMboxRead(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 ReadLength);
-
-#define PROC_IO_ASYNC true
-#define PROC_IO_SYNC false
-typedef enum GMBOX_IRQ_ACTION_TYPE {
- GMBOX_ACTION_NONE = 0,
- GMBOX_DISABLE_ALL,
- GMBOX_ERRORS_IRQ_ENABLE,
- GMBOX_RECV_IRQ_ENABLE,
- GMBOX_RECV_IRQ_DISABLE,
- GMBOX_CREDIT_IRQ_ENABLE,
- GMBOX_CREDIT_IRQ_DISABLE,
-} GMBOX_IRQ_ACTION_TYPE;
-
-int DevGMboxIRQAction(struct ar6k_device *pDev, GMBOX_IRQ_ACTION_TYPE, bool AsyncMode);
-int DevGMboxReadCreditCounter(struct ar6k_device *pDev, bool AsyncMode, int *pCredits);
-int DevGMboxReadCreditSize(struct ar6k_device *pDev, int *pCreditSize);
-int DevGMboxRecvLookAheadPeek(struct ar6k_device *pDev, u8 *pLookAheadBuffer, int *pLookAheadBytes);
-int DevGMboxSetTargetInterrupt(struct ar6k_device *pDev, int SignalNumber, int AckTimeoutMS);
-
-#endif
-
-#endif /*AR6K_H_*/
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c
deleted file mode 100644
index d7af68f7056..00000000000
--- a/drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c
+++ /dev/null
@@ -1,783 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6k_events.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// AR6K Driver layer event handling (i.e. interrupts, message polling)
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "hw/mbox_host_reg.h"
-#include "a_osapi.h"
-#include "../htc_debug.h"
-#include "hif.h"
-#include "htc_packet.h"
-#include "ar6k.h"
-
-extern void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket);
-extern struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev);
-
-static int DevServiceDebugInterrupt(struct ar6k_device *pDev);
-
-#define DELAY_PER_INTERVAL_MS 10 /* 10 MS delay per polling interval */
-
-/* completion routine for ALL HIF layer async I/O */
-int DevRWCompletionHandler(void *context, int status)
-{
- struct htc_packet *pPacket = (struct htc_packet *)context;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("+DevRWCompletionHandler (Pkt:0x%lX) , Status: %d \n",
- (unsigned long)pPacket,
- status));
-
- COMPLETE_HTC_PACKET(pPacket,status);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("-DevRWCompletionHandler\n"));
-
- return 0;
-}
-
-/* mailbox recv message polling */
-int DevPollMboxMsgRecv(struct ar6k_device *pDev,
- u32 *pLookAhead,
- int TimeoutMS)
-{
- int status = 0;
- int timeout = TimeoutMS/DELAY_PER_INTERVAL_MS;
-
- A_ASSERT(timeout > 0);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevPollMboxMsgRecv \n"));
-
- while (true) {
-
- if (pDev->GetPendingEventsFunc != NULL) {
-
- struct hif_pending_events_info events;
-
-#ifdef THREAD_X
- events.Polling =1;
-#endif
-
- /* the HIF layer uses a special mechanism to get events, do this
- * synchronously */
- status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
- &events,
- NULL);
- if (status)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to get pending events \n"));
- break;
- }
-
- if (events.Events & HIF_RECV_MSG_AVAIL)
- {
- /* there is a message available, the lookahead should be valid now */
- *pLookAhead = events.LookAhead;
-
- break;
- }
- } else {
-
- /* this is the standard HIF way.... */
- /* load the register table */
- status = HIFReadWrite(pDev->HIFDevice,
- HOST_INT_STATUS_ADDRESS,
- (u8 *)&pDev->IrqProcRegisters,
- AR6K_IRQ_PROC_REGS_SIZE,
- HIF_RD_SYNC_BYTE_INC,
- NULL);
-
- if (status){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to read register table \n"));
- break;
- }
-
- /* check for MBOX data and valid lookahead */
- if (pDev->IrqProcRegisters.host_int_status & (1 << HTC_MAILBOX)) {
- if (pDev->IrqProcRegisters.rx_lookahead_valid & (1 << HTC_MAILBOX))
- {
- /* mailbox has a message and the look ahead is valid */
- *pLookAhead = pDev->IrqProcRegisters.rx_lookahead[HTC_MAILBOX];
- break;
- }
- }
-
- }
-
- timeout--;
-
- if (timeout <= 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" Timeout waiting for recv message \n"));
- status = A_ERROR;
-
- /* check if the target asserted */
- if ( pDev->IrqProcRegisters.counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) {
- /* target signaled an assert, process this pending interrupt
- * this will call the target failure handler */
- DevServiceDebugInterrupt(pDev);
- }
-
- break;
- }
-
- /* delay a little */
- A_MDELAY(DELAY_PER_INTERVAL_MS);
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Retry Mbox Poll : %d \n",timeout));
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevPollMboxMsgRecv \n"));
-
- return status;
-}
-
-static int DevServiceCPUInterrupt(struct ar6k_device *pDev)
-{
- int status;
- u8 cpu_int_status;
- u8 regBuffer[4];
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("CPU Interrupt\n"));
- cpu_int_status = pDev->IrqProcRegisters.cpu_int_status &
- pDev->IrqEnableRegisters.cpu_int_status_enable;
- A_ASSERT(cpu_int_status);
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- ("Valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
- cpu_int_status));
-
- /* Clear the interrupt */
- pDev->IrqProcRegisters.cpu_int_status &= ~cpu_int_status; /* W1C */
-
- /* set up the register transfer buffer to hit the register 4 times , this is done
- * to make the access 4-byte aligned to mitigate issues with host bus interconnects that
- * restrict bus transfer lengths to be a multiple of 4-bytes */
-
- /* set W1C value to clear the interrupt, this hits the register first */
- regBuffer[0] = cpu_int_status;
- /* the remaining 4 values are set to zero which have no-effect */
- regBuffer[1] = 0;
- regBuffer[2] = 0;
- regBuffer[3] = 0;
-
- status = HIFReadWrite(pDev->HIFDevice,
- CPU_INT_STATUS_ADDRESS,
- regBuffer,
- 4,
- HIF_WR_SYNC_BYTE_FIX,
- NULL);
-
- A_ASSERT(status == 0);
- return status;
-}
-
-
-static int DevServiceErrorInterrupt(struct ar6k_device *pDev)
-{
- int status;
- u8 error_int_status;
- u8 regBuffer[4];
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error Interrupt\n"));
- error_int_status = pDev->IrqProcRegisters.error_int_status & 0x0F;
- A_ASSERT(error_int_status);
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- ("Valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
- error_int_status));
-
- if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status)) {
- /* Wakeup */
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error : Wakeup\n"));
- }
-
- if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status)) {
- /* Rx Underflow */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Rx Underflow\n"));
- }
-
- if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status)) {
- /* Tx Overflow */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Tx Overflow\n"));
- }
-
- /* Clear the interrupt */
- pDev->IrqProcRegisters.error_int_status &= ~error_int_status; /* W1C */
-
- /* set up the register transfer buffer to hit the register 4 times , this is done
- * to make the access 4-byte aligned to mitigate issues with host bus interconnects that
- * restrict bus transfer lengths to be a multiple of 4-bytes */
-
- /* set W1C value to clear the interrupt, this hits the register first */
- regBuffer[0] = error_int_status;
- /* the remaining 4 values are set to zero which have no-effect */
- regBuffer[1] = 0;
- regBuffer[2] = 0;
- regBuffer[3] = 0;
-
- status = HIFReadWrite(pDev->HIFDevice,
- ERROR_INT_STATUS_ADDRESS,
- regBuffer,
- 4,
- HIF_WR_SYNC_BYTE_FIX,
- NULL);
-
- A_ASSERT(status == 0);
- return status;
-}
-
-static int DevServiceDebugInterrupt(struct ar6k_device *pDev)
-{
- u32 dummy;
- int status;
-
- /* Send a target failure event to the application */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Target debug interrupt\n"));
-
- if (pDev->TargetFailureCallback != NULL) {
- pDev->TargetFailureCallback(pDev->HTCContext);
- }
-
- if (pDev->GMboxEnabled) {
- DevNotifyGMboxTargetFailure(pDev);
- }
-
- /* clear the interrupt , the debug error interrupt is
- * counter 0 */
- /* read counter to clear interrupt */
- status = HIFReadWrite(pDev->HIFDevice,
- COUNT_DEC_ADDRESS,
- (u8 *)&dummy,
- 4,
- HIF_RD_SYNC_BYTE_INC,
- NULL);
-
- A_ASSERT(status == 0);
- return status;
-}
-
-static int DevServiceCounterInterrupt(struct ar6k_device *pDev)
-{
- u8 counter_int_status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
-
- counter_int_status = pDev->IrqProcRegisters.counter_int_status &
- pDev->IrqEnableRegisters.counter_int_status_enable;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- ("Valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
- counter_int_status));
-
- /* Check if the debug interrupt is pending
- * NOTE: other modules like GMBOX may use the counter interrupt for
- * credit flow control on other counters, we only need to check for the debug assertion
- * counter interrupt */
- if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) {
- return DevServiceDebugInterrupt(pDev);
- }
-
- return 0;
-}
-
-/* callback when our fetch to get interrupt status registers completes */
-static void DevGetEventAsyncHandler(void *Context, struct htc_packet *pPacket)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
- u32 lookAhead = 0;
- bool otherInts = false;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGetEventAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- do {
-
- if (pPacket->Status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" GetEvents I/O request failed, status:%d \n", pPacket->Status));
- /* bail out, don't unmask HIF interrupt */
- break;
- }
-
- if (pDev->GetPendingEventsFunc != NULL) {
- /* the HIF layer collected the information for us */
- struct hif_pending_events_info *pEvents = (struct hif_pending_events_info *)pPacket->pBuffer;
- if (pEvents->Events & HIF_RECV_MSG_AVAIL) {
- lookAhead = pEvents->LookAhead;
- if (0 == lookAhead) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" DevGetEventAsyncHandler1, lookAhead is zero! \n"));
- }
- }
- if (pEvents->Events & HIF_OTHER_EVENTS) {
- otherInts = true;
- }
- } else {
- /* standard interrupt table handling.... */
- struct ar6k_irq_proc_registers *pReg = (struct ar6k_irq_proc_registers *)pPacket->pBuffer;
- u8 host_int_status;
-
- host_int_status = pReg->host_int_status & pDev->IrqEnableRegisters.int_status_enable;
-
- if (host_int_status & (1 << HTC_MAILBOX)) {
- host_int_status &= ~(1 << HTC_MAILBOX);
- if (pReg->rx_lookahead_valid & (1 << HTC_MAILBOX)) {
- /* mailbox has a message and the look ahead is valid */
- lookAhead = pReg->rx_lookahead[HTC_MAILBOX];
- if (0 == lookAhead) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" DevGetEventAsyncHandler2, lookAhead is zero! \n"));
- }
- }
- }
-
- if (host_int_status) {
- /* there are other interrupts to handle */
- otherInts = true;
- }
- }
-
- if (otherInts || (lookAhead == 0)) {
- /* if there are other interrupts to process, we cannot do this in the async handler so
- * ack the interrupt which will cause our sync handler to run again
- * if however there are no more messages, we can now ack the interrupt */
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- (" Acking interrupt from DevGetEventAsyncHandler (otherints:%d, lookahead:0x%X)\n",
- otherInts, lookAhead));
- HIFAckInterrupt(pDev->HIFDevice);
- } else {
- int fetched = 0;
- int status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- (" DevGetEventAsyncHandler : detected another message, lookahead :0x%X \n",
- lookAhead));
- /* lookahead is non-zero and there are no other interrupts to service,
- * go get the next message */
- status = pDev->MessagePendingCallback(pDev->HTCContext, &lookAhead, 1, NULL, &fetched);
-
- if (!status && !fetched) {
- /* HTC layer could not pull out messages due to lack of resources, stop IRQ processing */
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("MessagePendingCallback did not pull any messages, force-ack \n"));
- DevAsyncIrqProcessComplete(pDev);
- }
- }
-
- } while (false);
-
- /* free this IO packet */
- AR6KFreeIOPacket(pDev,pPacket);
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGetEventAsyncHandler \n"));
-}
-
-/* called by the HTC layer when it wants us to check if the device has any more pending
- * recv messages, this starts off a series of async requests to read interrupt registers */
-int DevCheckPendingRecvMsgsAsync(void *context)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)context;
- int status = 0;
- struct htc_packet *pIOPacket;
-
- /* this is called in an ASYNC only context, we may NOT block, sleep or call any apis that can
- * cause us to switch contexts */
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevCheckPendingRecvMsgsAsync: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- do {
-
- if (HIF_DEVICE_IRQ_SYNC_ONLY == pDev->HifIRQProcessingMode) {
- /* break the async processing chain right here, no need to continue.
- * The DevDsrHandler() will handle things in a loop when things are driven
- * synchronously */
- break;
- }
-
- /* an optimization to bypass reading the IRQ status registers unecessarily which can re-wake
- * the target, if upper layers determine that we are in a low-throughput mode, we can
- * rely on taking another interrupt rather than re-checking the status registers which can
- * re-wake the target */
- if (pDev->RecheckIRQStatusCnt == 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Bypassing IRQ Status re-check, re-acking HIF interrupts\n"));
- /* ack interrupt */
- HIFAckInterrupt(pDev->HIFDevice);
- break;
- }
-
- /* first allocate one of our HTC packets we created for async I/O
- * we reuse HTC packet definitions so that we can use the completion mechanism
- * in DevRWCompletionHandler() */
- pIOPacket = AR6KAllocIOPacket(pDev);
-
- if (NULL == pIOPacket) {
- /* there should be only 1 asynchronous request out at a time to read these registers
- * so this should actually never happen */
- status = A_NO_MEMORY;
- A_ASSERT(false);
- break;
- }
-
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevGetEventAsyncHandler;
- pIOPacket->pContext = pDev;
-
- if (pDev->GetPendingEventsFunc) {
- /* HIF layer has it's own mechanism, pass the IO to it.. */
- status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
- (struct hif_pending_events_info *)pIOPacket->pBuffer,
- pIOPacket);
-
- } else {
- /* standard way, read the interrupt register table asynchronously again */
- status = HIFReadWrite(pDev->HIFDevice,
- HOST_INT_STATUS_ADDRESS,
- pIOPacket->pBuffer,
- AR6K_IRQ_PROC_REGS_SIZE,
- HIF_RD_ASYNC_BYTE_INC,
- pIOPacket);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Async IO issued to get interrupt status...\n"));
- } while (false);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevCheckPendingRecvMsgsAsync \n"));
-
- return status;
-}
-
-void DevAsyncIrqProcessComplete(struct ar6k_device *pDev)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("DevAsyncIrqProcessComplete - forcing HIF IRQ ACK \n"));
- HIFAckInterrupt(pDev->HIFDevice);
-}
-
-/* process pending interrupts synchronously */
-static int ProcessPendingIRQs(struct ar6k_device *pDev, bool *pDone, bool *pASyncProcessing)
-{
- int status = 0;
- u8 host_int_status = 0;
- u32 lookAhead = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+ProcessPendingIRQs: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- /*** NOTE: the HIF implementation guarantees that the context of this call allows
- * us to perform SYNCHRONOUS I/O, that is we can block, sleep or call any API that
- * can block or switch thread/task ontexts.
- * This is a fully schedulable context.
- * */
- do {
-
- if (pDev->IrqEnableRegisters.int_status_enable == 0) {
- /* interrupt enables have been cleared, do not try to process any pending interrupts that
- * may result in more bus transactions. The target may be unresponsive at this
- * point. */
- break;
- }
-
- if (pDev->GetPendingEventsFunc != NULL) {
- struct hif_pending_events_info events;
-
-#ifdef THREAD_X
- events.Polling= 0;
-#endif
- /* the HIF layer uses a special mechanism to get events
- * get this synchronously */
- status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
- &events,
- NULL);
-
- if (status) {
- break;
- }
-
- if (events.Events & HIF_RECV_MSG_AVAIL) {
- lookAhead = events.LookAhead;
- if (0 == lookAhead) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" ProcessPendingIRQs1 lookAhead is zero! \n"));
- }
- }
-
- if (!(events.Events & HIF_OTHER_EVENTS) ||
- !(pDev->IrqEnableRegisters.int_status_enable & OTHER_INTS_ENABLED)) {
- /* no need to read the register table, no other interesting interrupts.
- * Some interfaces (like SPI) can shadow interrupt sources without
- * requiring the host to do a full table read */
- break;
- }
-
- /* otherwise fall through and read the register table */
- }
-
- /*
- * Read the first 28 bytes of the HTC register table. This will yield us
- * the value of different int status registers and the lookahead
- * registers.
- * length = sizeof(int_status) + sizeof(cpu_int_status) +
- * sizeof(error_int_status) + sizeof(counter_int_status) +
- * sizeof(mbox_frame) + sizeof(rx_lookahead_valid) +
- * sizeof(hole) + sizeof(rx_lookahead) +
- * sizeof(int_status_enable) + sizeof(cpu_int_status_enable) +
- * sizeof(error_status_enable) +
- * sizeof(counter_int_status_enable);
- *
- */
-#ifdef CONFIG_MMC_SDHCI_S3C
- pDev->IrqProcRegisters.host_int_status = 0;
- pDev->IrqProcRegisters.rx_lookahead_valid = 0;
- pDev->IrqProcRegisters.host_int_status2 = 0;
- pDev->IrqProcRegisters.rx_lookahead[0] = 0;
- pDev->IrqProcRegisters.rx_lookahead[1] = 0xaaa5555;
-#endif /* CONFIG_MMC_SDHCI_S3C */
- status = HIFReadWrite(pDev->HIFDevice,
- HOST_INT_STATUS_ADDRESS,
- (u8 *)&pDev->IrqProcRegisters,
- AR6K_IRQ_PROC_REGS_SIZE,
- HIF_RD_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- break;
- }
-
-#ifdef ATH_DEBUG_MODULE
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
- DevDumpRegisters(pDev,
- &pDev->IrqProcRegisters,
- &pDev->IrqEnableRegisters);
- }
-#endif
-
- /* Update only those registers that are enabled */
- host_int_status = pDev->IrqProcRegisters.host_int_status &
- pDev->IrqEnableRegisters.int_status_enable;
-
- if (NULL == pDev->GetPendingEventsFunc) {
- /* only look at mailbox status if the HIF layer did not provide this function,
- * on some HIF interfaces reading the RX lookahead is not valid to do */
- if (host_int_status & (1 << HTC_MAILBOX)) {
- /* mask out pending mailbox value, we use "lookAhead" as the real flag for
- * mailbox processing below */
- host_int_status &= ~(1 << HTC_MAILBOX);
- if (pDev->IrqProcRegisters.rx_lookahead_valid & (1 << HTC_MAILBOX)) {
- /* mailbox has a message and the look ahead is valid */
- lookAhead = pDev->IrqProcRegisters.rx_lookahead[HTC_MAILBOX];
- if (0 == lookAhead) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" ProcessPendingIRQs2, lookAhead is zero! \n"));
- }
- }
- }
- } else {
- /* not valid to check if the HIF has another mechanism for reading mailbox pending status*/
- host_int_status &= ~(1 << HTC_MAILBOX);
- }
-
- if (pDev->GMboxEnabled) {
- /*call GMBOX layer to process any interrupts of interest */
- status = DevCheckGMboxInterrupts(pDev);
- }
-
- } while (false);
-
-
- do {
-
- /* did the interrupt status fetches succeed? */
- if (status) {
- break;
- }
-
- if ((0 == host_int_status) && (0 == lookAhead)) {
- /* nothing to process, the caller can use this to break out of a loop */
- *pDone = true;
- break;
- }
-
- if (lookAhead != 0) {
- int fetched = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Pending mailbox message, LookAhead: 0x%X\n",lookAhead));
- /* Mailbox Interrupt, the HTC layer may issue async requests to empty the
- * mailbox...
- * When emptying the recv mailbox we use the async handler above called from the
- * completion routine of the callers read request. This can improve performance
- * by reducing context switching when we rapidly pull packets */
- status = pDev->MessagePendingCallback(pDev->HTCContext, &lookAhead, 1, pASyncProcessing, &fetched);
- if (status) {
- break;
- }
-
- if (!fetched) {
- /* HTC could not pull any messages out due to lack of resources */
- /* force DSR handler to ack the interrupt */
- *pASyncProcessing = false;
- pDev->RecheckIRQStatusCnt = 0;
- }
- }
-
- /* now handle the rest of them */
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- (" Valid interrupt source(s) for OTHER interrupts: 0x%x\n",
- host_int_status));
-
- if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
- /* CPU Interrupt */
- status = DevServiceCPUInterrupt(pDev);
- if (status){
- break;
- }
- }
-
- if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
- /* Error Interrupt */
- status = DevServiceErrorInterrupt(pDev);
- if (status){
- break;
- }
- }
-
- if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
- /* Counter Interrupt */
- status = DevServiceCounterInterrupt(pDev);
- if (status){
- break;
- }
- }
-
- } while (false);
-
- /* an optimization to bypass reading the IRQ status registers unecessarily which can re-wake
- * the target, if upper layers determine that we are in a low-throughput mode, we can
- * rely on taking another interrupt rather than re-checking the status registers which can
- * re-wake the target.
- *
- * NOTE : for host interfaces that use the special GetPendingEventsFunc, this optimization cannot
- * be used due to possible side-effects. For example, SPI requires the host to drain all
- * messages from the mailbox before exiting the ISR routine. */
- if (!(*pASyncProcessing) && (pDev->RecheckIRQStatusCnt == 0) && (pDev->GetPendingEventsFunc == NULL)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Bypassing IRQ Status re-check, forcing done \n"));
- *pDone = true;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-ProcessPendingIRQs: (done:%d, async:%d) status=%d \n",
- *pDone, *pASyncProcessing, status));
-
- return status;
-}
-
-
-/* Synchronousinterrupt handler, this handler kicks off all interrupt processing.*/
-int DevDsrHandler(void *context)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)context;
- int status = 0;
- bool done = false;
- bool asyncProc = false;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDsrHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- /* reset the recv counter that tracks when we need to yield from the DSR */
- pDev->CurrentDSRRecvCount = 0;
- /* reset counter used to flag a re-scan of IRQ status registers on the target */
- pDev->RecheckIRQStatusCnt = 0;
-
- while (!done) {
- status = ProcessPendingIRQs(pDev, &done, &asyncProc);
- if (status) {
- break;
- }
-
- if (HIF_DEVICE_IRQ_SYNC_ONLY == pDev->HifIRQProcessingMode) {
- /* the HIF layer does not allow async IRQ processing, override the asyncProc flag */
- asyncProc = false;
- /* this will cause us to re-enter ProcessPendingIRQ() and re-read interrupt status registers.
- * this has a nice side effect of blocking us until all async read requests are completed.
- * This behavior is required on some HIF implementations that do not allow ASYNC
- * processing in interrupt handlers (like Windows CE) */
-
- if (pDev->DSRCanYield && DEV_CHECK_RECV_YIELD(pDev)) {
- /* ProcessPendingIRQs() pulled enough recv messages to satisfy the yield count, stop
- * checking for more messages and return */
- break;
- }
- }
-
- if (asyncProc) {
- /* the function performed some async I/O for performance, we
- need to exit the ISR immediately, the check below will prevent the interrupt from being
- Ack'd while we handle it asynchronously */
- break;
- }
-
- }
-
- if (!status && !asyncProc) {
- /* Ack the interrupt only if :
- * 1. we did not get any errors in processing interrupts
- * 2. there are no outstanding async processing requests */
- if (pDev->DSRCanYield) {
- /* if the DSR can yield do not ACK the interrupt, there could be more pending messages.
- * The HIF layer must ACK the interrupt on behalf of HTC */
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Yield in effect (cur RX count: %d) \n", pDev->CurrentDSRRecvCount));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Acking interrupt from DevDsrHandler \n"));
- HIFAckInterrupt(pDev->HIFDevice);
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDsrHandler \n"));
- return status;
-}
-
-#ifdef ATH_DEBUG_MODULE
-void DumpAR6KDevState(struct ar6k_device *pDev)
-{
- int status;
- struct ar6k_irq_enable_registers regs;
- struct ar6k_irq_proc_registers procRegs;
-
- LOCK_AR6K(pDev);
- /* copy into our temp area */
- memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
- UNLOCK_AR6K(pDev);
-
- /* load the register table from the device */
- status = HIFReadWrite(pDev->HIFDevice,
- HOST_INT_STATUS_ADDRESS,
- (u8 *)&procRegs,
- AR6K_IRQ_PROC_REGS_SIZE,
- HIF_RD_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("DumpAR6KDevState : Failed to read register table (%d) \n",status));
- return;
- }
-
- DevDumpRegisters(pDev,&procRegs,&regs);
-
- if (pDev->GMboxInfo.pStateDumpCallback != NULL) {
- pDev->GMboxInfo.pStateDumpCallback(pDev->GMboxInfo.pProtocolContext);
- }
-
- /* dump any bus state at the HIF layer */
- HIFConfigureDevice(pDev->HIFDevice,HIF_DEVICE_DEBUG_BUS_STATE,NULL,0);
-
-}
-#endif
-
-
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c
deleted file mode 100644
index 725540f9add..00000000000
--- a/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox.c
+++ /dev/null
@@ -1,755 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6k_gmbox.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Generic MBOX API implementation
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#include "../htc_debug.h"
-#include "hif.h"
-#include "htc_packet.h"
-#include "ar6k.h"
-#include "hw/mbox_host_reg.h"
-#include "gmboxif.h"
-
-/*
- * This file provides management functions and a toolbox for GMBOX protocol modules.
- * Only one protocol module can be installed at a time. The determination of which protocol
- * module is installed is determined at compile time.
- *
- */
-#ifdef ATH_AR6K_ENABLE_GMBOX
- /* GMBOX definitions */
-#define GMBOX_INT_STATUS_ENABLE_REG 0x488
-#define GMBOX_INT_STATUS_RX_DATA (1 << 0)
-#define GMBOX_INT_STATUS_TX_OVERFLOW (1 << 1)
-#define GMBOX_INT_STATUS_RX_OVERFLOW (1 << 2)
-
-#define GMBOX_LOOKAHEAD_MUX_REG 0x498
-#define GMBOX_LA_MUX_OVERRIDE_2_3 (1 << 0)
-
-#define AR6K_GMBOX_CREDIT_DEC_ADDRESS (COUNT_DEC_ADDRESS + 4 * AR6K_GMBOX_CREDIT_COUNTER)
-#define AR6K_GMBOX_CREDIT_SIZE_ADDRESS (COUNT_ADDRESS + AR6K_GMBOX_CREDIT_SIZE_COUNTER)
-
-
- /* external APIs for allocating and freeing internal I/O packets to handle ASYNC I/O */
-extern void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket);
-extern struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev);
-
-
-/* callback when our fetch to enable/disable completes */
-static void DevGMboxIRQActionAsyncHandler(void *Context, struct htc_packet *pPacket)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGMboxIRQActionAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- if (pPacket->Status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("IRQAction Operation (%d) failed! status:%d \n", pPacket->PktInfo.AsRx.HTCRxFlags,pPacket->Status));
- }
- /* free this IO packet */
- AR6KFreeIOPacket(pDev,pPacket);
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGMboxIRQActionAsyncHandler \n"));
-}
-
-static int DevGMboxCounterEnableDisable(struct ar6k_device *pDev, GMBOX_IRQ_ACTION_TYPE IrqAction, bool AsyncMode)
-{
- int status = 0;
- struct ar6k_irq_enable_registers regs;
- struct htc_packet *pIOPacket = NULL;
-
- LOCK_AR6K(pDev);
-
- if (GMBOX_CREDIT_IRQ_ENABLE == IrqAction) {
- pDev->GMboxInfo.CreditCountIRQEnabled = true;
- pDev->IrqEnableRegisters.counter_int_status_enable |=
- COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER);
- pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_COUNTER_SET(0x01);
- } else {
- pDev->GMboxInfo.CreditCountIRQEnabled = false;
- pDev->IrqEnableRegisters.counter_int_status_enable &=
- ~(COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER));
- }
- /* copy into our temp area */
- memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
-
- UNLOCK_AR6K(pDev);
-
- do {
-
- if (AsyncMode) {
-
- pIOPacket = AR6KAllocIOPacket(pDev);
-
- if (NULL == pIOPacket) {
- status = A_NO_MEMORY;
- A_ASSERT(false);
- break;
- }
-
- /* copy values to write to our async I/O buffer */
- memcpy(pIOPacket->pBuffer,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
-
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevGMboxIRQActionAsyncHandler;
- pIOPacket->pContext = pDev;
- pIOPacket->PktInfo.AsRx.HTCRxFlags = IrqAction;
- /* write it out asynchronously */
- HIFReadWrite(pDev->HIFDevice,
- INT_STATUS_ENABLE_ADDRESS,
- pIOPacket->pBuffer,
- AR6K_IRQ_ENABLE_REGS_SIZE,
- HIF_WR_ASYNC_BYTE_INC,
- pIOPacket);
-
- pIOPacket = NULL;
- break;
- }
-
- /* if we get here we are doing it synchronously */
- status = HIFReadWrite(pDev->HIFDevice,
- INT_STATUS_ENABLE_ADDRESS,
- &regs.int_status_enable,
- AR6K_IRQ_ENABLE_REGS_SIZE,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
- } while (false);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" IRQAction Operation (%d) failed! status:%d \n", IrqAction, status));
- } else {
- if (!AsyncMode) {
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- (" IRQAction Operation (%d) success \n", IrqAction));
- }
- }
-
- if (pIOPacket != NULL) {
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
-
- return status;
-}
-
-
-int DevGMboxIRQAction(struct ar6k_device *pDev, GMBOX_IRQ_ACTION_TYPE IrqAction, bool AsyncMode)
-{
- int status = 0;
- struct htc_packet *pIOPacket = NULL;
- u8 GMboxIntControl[4];
-
- if (GMBOX_CREDIT_IRQ_ENABLE == IrqAction) {
- return DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_ENABLE, AsyncMode);
- } else if(GMBOX_CREDIT_IRQ_DISABLE == IrqAction) {
- return DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_DISABLE, AsyncMode);
- }
-
- if (GMBOX_DISABLE_ALL == IrqAction) {
- /* disable credit IRQ, those are on a different set of registers */
- DevGMboxCounterEnableDisable(pDev, GMBOX_CREDIT_IRQ_DISABLE, AsyncMode);
- }
-
- /* take the lock to protect interrupt enable shadows */
- LOCK_AR6K(pDev);
-
- switch (IrqAction) {
-
- case GMBOX_DISABLE_ALL:
- pDev->GMboxControlRegisters.int_status_enable = 0;
- break;
- case GMBOX_ERRORS_IRQ_ENABLE:
- pDev->GMboxControlRegisters.int_status_enable |= GMBOX_INT_STATUS_TX_OVERFLOW |
- GMBOX_INT_STATUS_RX_OVERFLOW;
- break;
- case GMBOX_RECV_IRQ_ENABLE:
- pDev->GMboxControlRegisters.int_status_enable |= GMBOX_INT_STATUS_RX_DATA;
- break;
- case GMBOX_RECV_IRQ_DISABLE:
- pDev->GMboxControlRegisters.int_status_enable &= ~GMBOX_INT_STATUS_RX_DATA;
- break;
- case GMBOX_ACTION_NONE:
- default:
- A_ASSERT(false);
- break;
- }
-
- GMboxIntControl[0] = pDev->GMboxControlRegisters.int_status_enable;
- GMboxIntControl[1] = GMboxIntControl[0];
- GMboxIntControl[2] = GMboxIntControl[0];
- GMboxIntControl[3] = GMboxIntControl[0];
-
- UNLOCK_AR6K(pDev);
-
- do {
-
- if (AsyncMode) {
-
- pIOPacket = AR6KAllocIOPacket(pDev);
-
- if (NULL == pIOPacket) {
- status = A_NO_MEMORY;
- A_ASSERT(false);
- break;
- }
-
- /* copy values to write to our async I/O buffer */
- memcpy(pIOPacket->pBuffer,GMboxIntControl,sizeof(GMboxIntControl));
-
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevGMboxIRQActionAsyncHandler;
- pIOPacket->pContext = pDev;
- pIOPacket->PktInfo.AsRx.HTCRxFlags = IrqAction;
- /* write it out asynchronously */
- HIFReadWrite(pDev->HIFDevice,
- GMBOX_INT_STATUS_ENABLE_REG,
- pIOPacket->pBuffer,
- sizeof(GMboxIntControl),
- HIF_WR_ASYNC_BYTE_FIX,
- pIOPacket);
- pIOPacket = NULL;
- break;
- }
-
- /* if we get here we are doing it synchronously */
-
- status = HIFReadWrite(pDev->HIFDevice,
- GMBOX_INT_STATUS_ENABLE_REG,
- GMboxIntControl,
- sizeof(GMboxIntControl),
- HIF_WR_SYNC_BYTE_FIX,
- NULL);
-
- } while (false);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" IRQAction Operation (%d) failed! status:%d \n", IrqAction, status));
- } else {
- if (!AsyncMode) {
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
- (" IRQAction Operation (%d) success \n", IrqAction));
- }
- }
-
- if (pIOPacket != NULL) {
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
-
- return status;
-}
-
-void DevCleanupGMbox(struct ar6k_device *pDev)
-{
- if (pDev->GMboxEnabled) {
- pDev->GMboxEnabled = false;
- GMboxProtocolUninstall(pDev);
- }
-}
-
-int DevSetupGMbox(struct ar6k_device *pDev)
-{
- int status = 0;
- u8 muxControl[4];
-
- do {
-
- if (0 == pDev->MailBoxInfo.GMboxAddress) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(" GMBOX Advertised: Address:0x%X , size:%d \n",
- pDev->MailBoxInfo.GMboxAddress, pDev->MailBoxInfo.GMboxSize));
-
- status = DevGMboxIRQAction(pDev, GMBOX_DISABLE_ALL, PROC_IO_SYNC);
-
- if (status) {
- break;
- }
-
- /* write to mailbox look ahead mux control register, we want the
- * GMBOX lookaheads to appear on lookaheads 2 and 3
- * the register is 1-byte wide so we need to hit it 4 times to align the operation
- * to 4-bytes */
- muxControl[0] = GMBOX_LA_MUX_OVERRIDE_2_3;
- muxControl[1] = GMBOX_LA_MUX_OVERRIDE_2_3;
- muxControl[2] = GMBOX_LA_MUX_OVERRIDE_2_3;
- muxControl[3] = GMBOX_LA_MUX_OVERRIDE_2_3;
-
- status = HIFReadWrite(pDev->HIFDevice,
- GMBOX_LOOKAHEAD_MUX_REG,
- muxControl,
- sizeof(muxControl),
- HIF_WR_SYNC_BYTE_FIX, /* hit this register 4 times */
- NULL);
-
- if (status) {
- break;
- }
-
- status = GMboxProtocolInstall(pDev);
-
- if (status) {
- break;
- }
-
- pDev->GMboxEnabled = true;
-
- } while (false);
-
- return status;
-}
-
-int DevCheckGMboxInterrupts(struct ar6k_device *pDev)
-{
- int status = 0;
- u8 counter_int_status;
- int credits;
- u8 host_int_status2;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("+DevCheckGMboxInterrupts \n"));
-
- /* the caller guarantees that this is a context that allows for blocking I/O */
-
- do {
-
- host_int_status2 = pDev->IrqProcRegisters.host_int_status2 &
- pDev->GMboxControlRegisters.int_status_enable;
-
- if (host_int_status2 & GMBOX_INT_STATUS_TX_OVERFLOW) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("GMBOX : TX Overflow \n"));
- status = A_ECOMM;
- }
-
- if (host_int_status2 & GMBOX_INT_STATUS_RX_OVERFLOW) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("GMBOX : RX Overflow \n"));
- status = A_ECOMM;
- }
-
- if (status) {
- if (pDev->GMboxInfo.pTargetFailureCallback != NULL) {
- pDev->GMboxInfo.pTargetFailureCallback(pDev->GMboxInfo.pProtocolContext, status);
- }
- break;
- }
-
- if (host_int_status2 & GMBOX_INT_STATUS_RX_DATA) {
- if (pDev->IrqProcRegisters.gmbox_rx_avail > 0) {
- A_ASSERT(pDev->GMboxInfo.pMessagePendingCallBack != NULL);
- status = pDev->GMboxInfo.pMessagePendingCallBack(
- pDev->GMboxInfo.pProtocolContext,
- (u8 *)&pDev->IrqProcRegisters.rx_gmbox_lookahead_alias[0],
- pDev->IrqProcRegisters.gmbox_rx_avail);
- }
- }
-
- if (status) {
- break;
- }
-
- counter_int_status = pDev->IrqProcRegisters.counter_int_status &
- pDev->IrqEnableRegisters.counter_int_status_enable;
-
- /* check if credit interrupt is pending */
- if (counter_int_status & (COUNTER_INT_STATUS_ENABLE_BIT_SET(1 << AR6K_GMBOX_CREDIT_COUNTER))) {
-
- /* do synchronous read */
- status = DevGMboxReadCreditCounter(pDev, PROC_IO_SYNC, &credits);
-
- if (status) {
- break;
- }
-
- A_ASSERT(pDev->GMboxInfo.pCreditsPendingCallback != NULL);
- status = pDev->GMboxInfo.pCreditsPendingCallback(pDev->GMboxInfo.pProtocolContext,
- credits,
- pDev->GMboxInfo.CreditCountIRQEnabled);
- }
-
- } while (false);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("-DevCheckGMboxInterrupts (%d) \n",status));
-
- return status;
-}
-
-
-int DevGMboxWrite(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 WriteLength)
-{
- u32 paddedLength;
- bool sync = (pPacket->Completion == NULL) ? true : false;
- int status;
- u32 address;
-
- /* adjust the length to be a multiple of block size if appropriate */
- paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, WriteLength);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- ("DevGMboxWrite, Padded Length: %d Mbox:0x%X (mode:%s)\n",
- WriteLength,
- pDev->MailBoxInfo.GMboxAddress,
- sync ? "SYNC" : "ASYNC"));
-
- /* last byte of packet has to hit the EOM marker */
- address = pDev->MailBoxInfo.GMboxAddress + pDev->MailBoxInfo.GMboxSize - paddedLength;
-
- status = HIFReadWrite(pDev->HIFDevice,
- address,
- pPacket->pBuffer,
- paddedLength, /* the padded length */
- sync ? HIF_WR_SYNC_BLOCK_INC : HIF_WR_ASYNC_BLOCK_INC,
- sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
-
- if (sync) {
- pPacket->Status = status;
- } else {
- if (status == A_PENDING) {
- status = 0;
- }
- }
-
- return status;
-}
-
-int DevGMboxRead(struct ar6k_device *pDev, struct htc_packet *pPacket, u32 ReadLength)
-{
-
- u32 paddedLength;
- int status;
- bool sync = (pPacket->Completion == NULL) ? true : false;
-
- /* adjust the length to be a multiple of block size if appropriate */
- paddedLength = DEV_CALC_RECV_PADDED_LEN(pDev, ReadLength);
-
- if (paddedLength > pPacket->BufferLength) {
- A_ASSERT(false);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("DevGMboxRead, Not enough space for padlen:%d recvlen:%d bufferlen:%d \n",
- paddedLength,ReadLength,pPacket->BufferLength));
- if (pPacket->Completion != NULL) {
- COMPLETE_HTC_PACKET(pPacket,A_EINVAL);
- return 0;
- }
- return A_EINVAL;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("DevGMboxRead (0x%lX : hdr:0x%X) Padded Length: %d Mbox:0x%X (mode:%s)\n",
- (unsigned long)pPacket, pPacket->PktInfo.AsRx.ExpectedHdr,
- paddedLength,
- pDev->MailBoxInfo.GMboxAddress,
- sync ? "SYNC" : "ASYNC"));
-
- status = HIFReadWrite(pDev->HIFDevice,
- pDev->MailBoxInfo.GMboxAddress,
- pPacket->pBuffer,
- paddedLength,
- sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX,
- sync ? NULL : pPacket); /* pass the packet as the context to the HIF request */
-
- if (sync) {
- pPacket->Status = status;
- }
-
- return status;
-}
-
-
-static int ProcessCreditCounterReadBuffer(u8 *pBuffer, int Length)
-{
- int credits = 0;
-
- /* theory of how this works:
- * We read the credit decrement register multiple times on a byte-wide basis.
- * The number of times (32) aligns the I/O operation to be a multiple of 4 bytes and provides a
- * reasonable chance to acquire "all" pending credits in a single I/O operation.
- *
- * Once we obtain the filled buffer, we can walk through it looking for credit decrement transitions.
- * Each non-zero byte represents a single credit decrement (which is a credit given back to the host)
- * For example if the target provides 3 credits and added 4 more during the 32-byte read operation the following
- * pattern "could" appear:
- *
- * 0x3 0x2 0x1 0x0 0x0 0x0 0x0 0x0 0x1 0x0 0x1 0x0 0x1 0x0 0x1 0x0 ......rest zeros
- * <---------> <----------------------------->
- * \_ credits aleady there \_ target adding 4 more credits
- *
- * The total available credits would be 7, since there are 7 non-zero bytes in the buffer.
- *
- * */
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
- DebugDumpBytes(pBuffer, Length, "GMBOX Credit read buffer");
- }
-
- while (Length) {
- if (*pBuffer != 0) {
- credits++;
- }
- Length--;
- pBuffer++;
- }
-
- return credits;
-}
-
-
-/* callback when our fetch to enable/disable completes */
-static void DevGMboxReadCreditsAsyncHandler(void *Context, struct htc_packet *pPacket)
-{
- struct ar6k_device *pDev = (struct ar6k_device *)Context;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGMboxReadCreditsAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
-
- if (pPacket->Status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Read Credit Operation failed! status:%d \n", pPacket->Status));
- } else {
- int credits = 0;
- credits = ProcessCreditCounterReadBuffer(pPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE);
- pDev->GMboxInfo.pCreditsPendingCallback(pDev->GMboxInfo.pProtocolContext,
- credits,
- pDev->GMboxInfo.CreditCountIRQEnabled);
-
-
- }
- /* free this IO packet */
- AR6KFreeIOPacket(pDev,pPacket);
- AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGMboxReadCreditsAsyncHandler \n"));
-}
-
-int DevGMboxReadCreditCounter(struct ar6k_device *pDev, bool AsyncMode, int *pCredits)
-{
- int status = 0;
- struct htc_packet *pIOPacket = NULL;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+DevGMboxReadCreditCounter (%s) \n", AsyncMode ? "ASYNC" : "SYNC"));
-
- do {
-
- pIOPacket = AR6KAllocIOPacket(pDev);
-
- if (NULL == pIOPacket) {
- status = A_NO_MEMORY;
- A_ASSERT(false);
- break;
- }
-
- A_MEMZERO(pIOPacket->pBuffer,AR6K_REG_IO_BUFFER_SIZE);
-
- if (AsyncMode) {
- /* stick in our completion routine when the I/O operation completes */
- pIOPacket->Completion = DevGMboxReadCreditsAsyncHandler;
- pIOPacket->pContext = pDev;
- /* read registers asynchronously */
- HIFReadWrite(pDev->HIFDevice,
- AR6K_GMBOX_CREDIT_DEC_ADDRESS,
- pIOPacket->pBuffer,
- AR6K_REG_IO_BUFFER_SIZE, /* hit the register multiple times */
- HIF_RD_ASYNC_BYTE_FIX,
- pIOPacket);
- pIOPacket = NULL;
- break;
- }
-
- pIOPacket->Completion = NULL;
- /* if we get here we are doing it synchronously */
- status = HIFReadWrite(pDev->HIFDevice,
- AR6K_GMBOX_CREDIT_DEC_ADDRESS,
- pIOPacket->pBuffer,
- AR6K_REG_IO_BUFFER_SIZE,
- HIF_RD_SYNC_BYTE_FIX,
- NULL);
- } while (false);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" DevGMboxReadCreditCounter failed! status:%d \n", status));
- }
-
- if (pIOPacket != NULL) {
- if (!status) {
- /* sync mode processing */
- *pCredits = ProcessCreditCounterReadBuffer(pIOPacket->pBuffer, AR6K_REG_IO_BUFFER_SIZE);
- }
- AR6KFreeIOPacket(pDev,pIOPacket);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-DevGMboxReadCreditCounter (%s) (%d) \n",
- AsyncMode ? "ASYNC" : "SYNC", status));
-
- return status;
-}
-
-int DevGMboxReadCreditSize(struct ar6k_device *pDev, int *pCreditSize)
-{
- int status;
- u8 buffer[4];
-
- status = HIFReadWrite(pDev->HIFDevice,
- AR6K_GMBOX_CREDIT_SIZE_ADDRESS,
- buffer,
- sizeof(buffer),
- HIF_RD_SYNC_BYTE_FIX, /* hit the register 4 times to align the I/O */
- NULL);
-
- if (!status) {
- if (buffer[0] == 0) {
- *pCreditSize = 256;
- } else {
- *pCreditSize = buffer[0];
- }
-
- }
-
- return status;
-}
-
-void DevNotifyGMboxTargetFailure(struct ar6k_device *pDev)
-{
- /* Target ASSERTED!!! */
- if (pDev->GMboxInfo.pTargetFailureCallback != NULL) {
- pDev->GMboxInfo.pTargetFailureCallback(pDev->GMboxInfo.pProtocolContext, A_HARDWARE);
- }
-}
-
-int DevGMboxRecvLookAheadPeek(struct ar6k_device *pDev, u8 *pLookAheadBuffer, int *pLookAheadBytes)
-{
-
- int status = 0;
- struct ar6k_irq_proc_registers procRegs;
- int maxCopy;
-
- do {
- /* on entry the caller provides the length of the lookahead buffer */
- if (*pLookAheadBytes > sizeof(procRegs.rx_gmbox_lookahead_alias)) {
- A_ASSERT(false);
- status = A_EINVAL;
- break;
- }
-
- maxCopy = *pLookAheadBytes;
- *pLookAheadBytes = 0;
- /* load the register table from the device */
- status = HIFReadWrite(pDev->HIFDevice,
- HOST_INT_STATUS_ADDRESS,
- (u8 *)&procRegs,
- AR6K_IRQ_PROC_REGS_SIZE,
- HIF_RD_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("DevGMboxRecvLookAheadPeek : Failed to read register table (%d) \n",status));
- break;
- }
-
- if (procRegs.gmbox_rx_avail > 0) {
- int bytes = procRegs.gmbox_rx_avail > maxCopy ? maxCopy : procRegs.gmbox_rx_avail;
- memcpy(pLookAheadBuffer,&procRegs.rx_gmbox_lookahead_alias[0],bytes);
- *pLookAheadBytes = bytes;
- }
-
- } while (false);
-
- return status;
-}
-
-int DevGMboxSetTargetInterrupt(struct ar6k_device *pDev, int Signal, int AckTimeoutMS)
-{
- int status = 0;
- int i;
- u8 buffer[4];
-
- A_MEMZERO(buffer, sizeof(buffer));
-
- do {
-
- if (Signal >= MBOX_SIG_HCI_BRIDGE_MAX) {
- status = A_EINVAL;
- break;
- }
-
- /* set the last buffer to do the actual signal trigger */
- buffer[3] = (1 << Signal);
-
- status = HIFReadWrite(pDev->HIFDevice,
- INT_WLAN_ADDRESS,
- buffer,
- sizeof(buffer),
- HIF_WR_SYNC_BYTE_FIX, /* hit the register 4 times to align the I/O */
- NULL);
-
- if (status) {
- break;
- }
-
- } while (false);
-
-
- if (!status) {
- /* now read back the register to see if the bit cleared */
- while (AckTimeoutMS) {
- status = HIFReadWrite(pDev->HIFDevice,
- INT_WLAN_ADDRESS,
- buffer,
- sizeof(buffer),
- HIF_RD_SYNC_BYTE_FIX,
- NULL);
-
- if (status) {
- break;
- }
-
- for (i = 0; i < sizeof(buffer); i++) {
- if (buffer[i] & (1 << Signal)) {
- /* bit is still set */
- break;
- }
- }
-
- if (i >= sizeof(buffer)) {
- /* done */
- break;
- }
-
- AckTimeoutMS--;
- A_MDELAY(1);
- }
-
- if (0 == AckTimeoutMS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("DevGMboxSetTargetInterrupt : Ack Timed-out (sig:%d) \n",Signal));
- status = A_ERROR;
- }
- }
-
- return status;
-
-}
-
-#endif //ATH_AR6K_ENABLE_GMBOX
-
-
-
-
diff --git a/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c b/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c
deleted file mode 100644
index 56a0d714380..00000000000
--- a/drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c
+++ /dev/null
@@ -1,1284 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6k_prot_hciUart.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Protocol module for use in bridging HCI-UART packets over the GMBOX interface
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#include "../htc_debug.h"
-#include "hif.h"
-#include "htc_packet.h"
-#include "ar6k.h"
-#include "hci_transport_api.h"
-#include "gmboxif.h"
-#include "ar6000_diag.h"
-#include "hw/apb_map.h"
-#include "hw/mbox_reg.h"
-
-#ifdef ATH_AR6K_ENABLE_GMBOX
-#define HCI_UART_COMMAND_PKT 0x01
-#define HCI_UART_ACL_PKT 0x02
-#define HCI_UART_SCO_PKT 0x03
-#define HCI_UART_EVENT_PKT 0x04
-
-#define HCI_RECV_WAIT_BUFFERS (1 << 0)
-
-#define HCI_SEND_WAIT_CREDITS (1 << 0)
-
-#define HCI_UART_BRIDGE_CREDIT_SIZE 128
-
-#define CREDIT_POLL_COUNT 256
-
-#define HCI_DELAY_PER_INTERVAL_MS 10
-#define BTON_TIMEOUT_MS 500
-#define BTOFF_TIMEOUT_MS 500
-#define BAUD_TIMEOUT_MS 1
-#define BTPWRSAV_TIMEOUT_MS 1
-
-struct gmbox_proto_hci_uart {
- struct hci_transport_config_info HCIConfig;
- bool HCIAttached;
- bool HCIStopped;
- u32 RecvStateFlags;
- u32 SendStateFlags;
- HCI_TRANSPORT_PACKET_TYPE WaitBufferType;
- struct htc_packet_queue SendQueue; /* write queue holding HCI Command and ACL packets */
- struct htc_packet_queue HCIACLRecvBuffers; /* recv queue holding buffers for incomming ACL packets */
- struct htc_packet_queue HCIEventBuffers; /* recv queue holding buffers for incomming event packets */
- struct ar6k_device *pDev;
- A_MUTEX_T HCIRxLock;
- A_MUTEX_T HCITxLock;
- int CreditsMax;
- int CreditsConsumed;
- int CreditsAvailable;
- int CreditSize;
- int CreditsCurrentSeek;
- int SendProcessCount;
-};
-
-#define LOCK_HCI_RX(t) A_MUTEX_LOCK(&(t)->HCIRxLock);
-#define UNLOCK_HCI_RX(t) A_MUTEX_UNLOCK(&(t)->HCIRxLock);
-#define LOCK_HCI_TX(t) A_MUTEX_LOCK(&(t)->HCITxLock);
-#define UNLOCK_HCI_TX(t) A_MUTEX_UNLOCK(&(t)->HCITxLock);
-
-#define DO_HCI_RECV_INDICATION(p, pt) \
-do { \
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, \
- ("HCI: Indicate Recv on packet:0x%lX status:%d len:%d type:%d \n", \
- (unsigned long)(pt), \
- (pt)->Status, \
- !(pt)->Status ? (pt)->ActualLength : 0, \
- HCI_GET_PACKET_TYPE(pt))); \
- (p)->HCIConfig.pHCIPktRecv((p)->HCIConfig.pContext, (pt)); \
-} while (0)
-
-#define DO_HCI_SEND_INDICATION(p,pt) \
-{ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Indicate Send on packet:0x%lX status:%d type:%d \n", \
- (unsigned long)(pt),(pt)->Status,HCI_GET_PACKET_TYPE(pt))); \
- (p)->HCIConfig.pHCISendComplete((p)->HCIConfig.pContext, (pt)); \
-}
-
-static int HCITrySend(struct gmbox_proto_hci_uart *pProt, struct htc_packet *pPacket, bool Synchronous);
-
-static void HCIUartCleanup(struct gmbox_proto_hci_uart *pProtocol)
-{
- A_ASSERT(pProtocol != NULL);
-
- A_MUTEX_DELETE(&pProtocol->HCIRxLock);
- A_MUTEX_DELETE(&pProtocol->HCITxLock);
-
- kfree(pProtocol);
-}
-
-static int InitTxCreditState(struct gmbox_proto_hci_uart *pProt)
-{
- int status;
- int credits;
- int creditPollCount = CREDIT_POLL_COUNT;
- bool gotCredits = false;
-
- pProt->CreditsConsumed = 0;
-
- do {
-
- if (pProt->CreditsMax != 0) {
- /* we can only call this only once per target reset */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI: InitTxCreditState - already called! \n"));
- A_ASSERT(false);
- status = A_EINVAL;
- break;
- }
-
- /* read the credit counter. At startup the target will set the credit counter
- * to the max available, we read this in a loop because it may take
- * multiple credit counter reads to get all credits */
-
- while (creditPollCount) {
-
- credits = 0;
-
- status = DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_SYNC, &credits);
-
- if (status) {
- break;
- }
-
- if (!gotCredits && (0 == credits)) {
- creditPollCount--;
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: credit is 0, retrying (%d) \n",creditPollCount));
- A_MDELAY(HCI_DELAY_PER_INTERVAL_MS);
- continue;
- } else {
- gotCredits = true;
- }
-
- if (0 == credits) {
- break;
- }
-
- pProt->CreditsMax += credits;
- }
-
- if (status) {
- break;
- }
-
- if (0 == creditPollCount) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("** HCI : Failed to get credits! GMBOX Target was not available \n"));
- status = A_ERROR;
- break;
- }
-
- /* now get the size */
- status = DevGMboxReadCreditSize(pProt->pDev, &pProt->CreditSize);
-
- if (status) {
- break;
- }
-
- } while (false);
-
- if (!status) {
- pProt->CreditsAvailable = pProt->CreditsMax;
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HCI : InitTxCreditState - credits avail: %d, size: %d \n",
- pProt->CreditsAvailable, pProt->CreditSize));
- }
-
- return status;
-}
-
-static int CreditsAvailableCallback(void *pContext, int Credits, bool CreditIRQEnabled)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)pContext;
- bool enableCreditIrq = false;
- bool disableCreditIrq = false;
- bool doPendingSends = false;
- int status = 0;
-
- /** this callback is called under 2 conditions:
- * 1. The credit IRQ interrupt was enabled and signaled.
- * 2. A credit counter read completed.
- *
- * The function must not assume that the calling context can block !
- */
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+CreditsAvailableCallback (Credits:%d, IRQ:%s) \n",
- Credits, CreditIRQEnabled ? "ON" : "OFF"));
-
- LOCK_HCI_TX(pProt);
-
- do {
-
- if (0 == Credits) {
- if (!CreditIRQEnabled) {
- /* enable credit IRQ */
- enableCreditIrq = true;
- }
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: current credit state, consumed:%d available:%d max:%d seek:%d\n",
- pProt->CreditsConsumed,
- pProt->CreditsAvailable,
- pProt->CreditsMax,
- pProt->CreditsCurrentSeek));
-
- pProt->CreditsAvailable += Credits;
- A_ASSERT(pProt->CreditsAvailable <= pProt->CreditsMax);
- pProt->CreditsConsumed -= Credits;
- A_ASSERT(pProt->CreditsConsumed >= 0);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: new credit state, consumed:%d available:%d max:%d seek:%d\n",
- pProt->CreditsConsumed,
- pProt->CreditsAvailable,
- pProt->CreditsMax,
- pProt->CreditsCurrentSeek));
-
- if (pProt->CreditsAvailable >= pProt->CreditsCurrentSeek) {
- /* we have enough credits to fulfill at least 1 packet waiting in the queue */
- pProt->CreditsCurrentSeek = 0;
- pProt->SendStateFlags &= ~HCI_SEND_WAIT_CREDITS;
- doPendingSends = true;
- if (CreditIRQEnabled) {
- /* credit IRQ was enabled, we shouldn't need it anymore */
- disableCreditIrq = true;
- }
- } else {
- /* not enough credits yet, enable credit IRQ if we haven't already */
- if (!CreditIRQEnabled) {
- enableCreditIrq = true;
- }
- }
-
- } while (false);
-
- UNLOCK_HCI_TX(pProt);
-
- if (enableCreditIrq) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Enabling credit count IRQ...\n"));
- /* must use async only */
- status = DevGMboxIRQAction(pProt->pDev, GMBOX_CREDIT_IRQ_ENABLE, PROC_IO_ASYNC);
- } else if (disableCreditIrq) {
- /* must use async only */
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Disabling credit count IRQ...\n"));
- status = DevGMboxIRQAction(pProt->pDev, GMBOX_CREDIT_IRQ_DISABLE, PROC_IO_ASYNC);
- }
-
- if (doPendingSends) {
- HCITrySend(pProt, NULL, false);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+CreditsAvailableCallback \n"));
- return status;
-}
-
-static INLINE void NotifyTransportFailure(struct gmbox_proto_hci_uart *pProt, int status)
-{
- if (pProt->HCIConfig.TransportFailure != NULL) {
- pProt->HCIConfig.TransportFailure(pProt->HCIConfig.pContext, status);
- }
-}
-
-static void FailureCallback(void *pContext, int Status)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)pContext;
-
- /* target assertion occurred */
- NotifyTransportFailure(pProt, Status);
-}
-
-static void StateDumpCallback(void *pContext)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)pContext;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("============ HCIUart State ======================\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("RecvStateFlags : 0x%X \n",pProt->RecvStateFlags));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("SendStateFlags : 0x%X \n",pProt->SendStateFlags));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("WaitBufferType : %d \n",pProt->WaitBufferType));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("SendQueue Depth : %d \n",HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue)));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsMax : %d \n",pProt->CreditsMax));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsConsumed : %d \n",pProt->CreditsConsumed));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsAvailable : %d \n",pProt->CreditsAvailable));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("==================================================\n"));
-}
-
-static int HCIUartMessagePending(void *pContext, u8 LookAheadBytes[], int ValidBytes)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)pContext;
- int status = 0;
- int totalRecvLength = 0;
- HCI_TRANSPORT_PACKET_TYPE pktType = HCI_PACKET_INVALID;
- bool recvRefillCalled = false;
- bool blockRecv = false;
- struct htc_packet *pPacket = NULL;
-
- /** caller guarantees that this is a fully block-able context (synch I/O is allowed) */
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HCIUartMessagePending Lookahead Bytes:%d \n",ValidBytes));
-
- LOCK_HCI_RX(pProt);
-
- do {
-
- if (ValidBytes < 3) {
- /* not enough for ACL or event header */
- break;
- }
-
- if ((LookAheadBytes[0] == HCI_UART_ACL_PKT) && (ValidBytes < 5)) {
- /* not enough for ACL data header */
- break;
- }
-
- switch (LookAheadBytes[0]) {
- case HCI_UART_EVENT_PKT:
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI Event: %d param length: %d \n",
- LookAheadBytes[1], LookAheadBytes[2]));
- totalRecvLength = LookAheadBytes[2];
- totalRecvLength += 3; /* add type + event code + length field */
- pktType = HCI_EVENT_TYPE;
- break;
- case HCI_UART_ACL_PKT:
- totalRecvLength = (LookAheadBytes[4] << 8) | LookAheadBytes[3];
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI ACL: conn:0x%X length: %d \n",
- ((LookAheadBytes[2] & 0xF0) << 8) | LookAheadBytes[1], totalRecvLength));
- totalRecvLength += 5; /* add type + connection handle + length field */
- pktType = HCI_ACL_TYPE;
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("**Invalid HCI packet type: %d \n",LookAheadBytes[0]));
- status = A_EPROTO;
- break;
- }
-
- if (status) {
- break;
- }
-
- if (pProt->HCIConfig.pHCIPktRecvAlloc != NULL) {
- UNLOCK_HCI_RX(pProt);
- /* user is using a per-packet allocation callback */
- pPacket = pProt->HCIConfig.pHCIPktRecvAlloc(pProt->HCIConfig.pContext,
- pktType,
- totalRecvLength);
- LOCK_HCI_RX(pProt);
-
- } else {
- struct htc_packet_queue *pQueue;
- /* user is using a refill handler that can refill multiple HTC buffers */
-
- /* select buffer queue */
- if (pktType == HCI_ACL_TYPE) {
- pQueue = &pProt->HCIACLRecvBuffers;
- } else {
- pQueue = &pProt->HCIEventBuffers;
- }
-
- if (HTC_QUEUE_EMPTY(pQueue)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("** HCI pkt type: %d has no buffers available calling allocation handler \n",
- pktType));
- /* check for refill handler */
- if (pProt->HCIConfig.pHCIPktRecvRefill != NULL) {
- recvRefillCalled = true;
- UNLOCK_HCI_RX(pProt);
- /* call the re-fill handler */
- pProt->HCIConfig.pHCIPktRecvRefill(pProt->HCIConfig.pContext,
- pktType,
- 0);
- LOCK_HCI_RX(pProt);
- /* check if we have more buffers */
- pPacket = HTC_PACKET_DEQUEUE(pQueue);
- /* fall through */
- }
- } else {
- pPacket = HTC_PACKET_DEQUEUE(pQueue);
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("HCI pkt type: %d now has %d recv buffers left \n",
- pktType, HTC_PACKET_QUEUE_DEPTH(pQueue)));
- }
- }
-
- if (NULL == pPacket) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("** HCI pkt type: %d has no buffers available stopping recv...\n", pktType));
- /* this is not an error, we simply need to mark that we are waiting for buffers.*/
- pProt->RecvStateFlags |= HCI_RECV_WAIT_BUFFERS;
- pProt->WaitBufferType = pktType;
- blockRecv = true;
- break;
- }
-
- if (totalRecvLength > (int)pPacket->BufferLength) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI-UART pkt: %d requires %d bytes (%d buffer bytes avail) ! \n",
- LookAheadBytes[0], totalRecvLength, pPacket->BufferLength));
- status = A_EINVAL;
- break;
- }
-
- } while (false);
-
- UNLOCK_HCI_RX(pProt);
-
- /* locks are released, we can go fetch the packet */
-
- do {
-
- if (status || (NULL == pPacket)) {
- break;
- }
-
- /* do this synchronously, we don't need to be fast here */
- pPacket->Completion = NULL;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI : getting recv packet len:%d hci-uart-type: %s \n",
- totalRecvLength, (LookAheadBytes[0] == HCI_UART_EVENT_PKT) ? "EVENT" : "ACL"));
-
- status = DevGMboxRead(pProt->pDev, pPacket, totalRecvLength);
-
- if (status) {
- break;
- }
-
- if (pPacket->pBuffer[0] != LookAheadBytes[0]) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not contain expected packet type: %d ! \n",
- pPacket->pBuffer[0]));
- status = A_EPROTO;
- break;
- }
-
- if (pPacket->pBuffer[0] == HCI_UART_EVENT_PKT) {
- /* validate event header fields */
- if ((pPacket->pBuffer[1] != LookAheadBytes[1]) ||
- (pPacket->pBuffer[2] != LookAheadBytes[2])) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not match lookahead! \n"));
- DebugDumpBytes(LookAheadBytes, 3, "Expected HCI-UART Header");
- DebugDumpBytes(pPacket->pBuffer, 3, "** Bad HCI-UART Header");
- status = A_EPROTO;
- break;
- }
- } else if (pPacket->pBuffer[0] == HCI_UART_ACL_PKT) {
- /* validate acl header fields */
- if ((pPacket->pBuffer[1] != LookAheadBytes[1]) ||
- (pPacket->pBuffer[2] != LookAheadBytes[2]) ||
- (pPacket->pBuffer[3] != LookAheadBytes[3]) ||
- (pPacket->pBuffer[4] != LookAheadBytes[4])) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not match lookahead! \n"));
- DebugDumpBytes(LookAheadBytes, 5, "Expected HCI-UART Header");
- DebugDumpBytes(pPacket->pBuffer, 5, "** Bad HCI-UART Header");
- status = A_EPROTO;
- break;
- }
- }
-
- /* adjust buffer to move past packet ID */
- pPacket->pBuffer++;
- pPacket->ActualLength = totalRecvLength - 1;
- pPacket->Status = 0;
- /* indicate packet */
- DO_HCI_RECV_INDICATION(pProt,pPacket);
- pPacket = NULL;
-
- /* check if we need to refill recv buffers */
- if ((pProt->HCIConfig.pHCIPktRecvRefill != NULL) && !recvRefillCalled) {
- struct htc_packet_queue *pQueue;
- int watermark;
-
- if (pktType == HCI_ACL_TYPE) {
- watermark = pProt->HCIConfig.ACLRecvBufferWaterMark;
- pQueue = &pProt->HCIACLRecvBuffers;
- } else {
- watermark = pProt->HCIConfig.EventRecvBufferWaterMark;
- pQueue = &pProt->HCIEventBuffers;
- }
-
- if (HTC_PACKET_QUEUE_DEPTH(pQueue) < watermark) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("** HCI pkt type: %d watermark hit (%d) current:%d \n",
- pktType, watermark, HTC_PACKET_QUEUE_DEPTH(pQueue)));
- /* call the re-fill handler */
- pProt->HCIConfig.pHCIPktRecvRefill(pProt->HCIConfig.pContext,
- pktType,
- HTC_PACKET_QUEUE_DEPTH(pQueue));
- }
- }
-
- } while (false);
-
- /* check if we need to disable the receiver */
- if (status || blockRecv) {
- DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_DISABLE, PROC_IO_SYNC);
- }
-
- /* see if we need to recycle the recv buffer */
- if (status && (pPacket != NULL)) {
- struct htc_packet_queue queue;
-
- if (A_EPROTO == status) {
- DebugDumpBytes(pPacket->pBuffer, totalRecvLength, "Bad HCI-UART Recv packet");
- }
- /* recycle packet */
- HTC_PACKET_RESET_RX(pPacket);
- INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
- HCI_TransportAddReceivePkts(pProt,&queue);
- NotifyTransportFailure(pProt,status);
- }
-
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HCIUartMessagePending \n"));
-
- return status;
-}
-
-static void HCISendPacketCompletion(void *Context, struct htc_packet *pPacket)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)Context;
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCISendPacketCompletion (pPacket:0x%lX) \n",(unsigned long)pPacket));
-
- if (pPacket->Status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Send Packet (0x%lX) failed: %d , len:%d \n",
- (unsigned long)pPacket, pPacket->Status, pPacket->ActualLength));
- }
-
- DO_HCI_SEND_INDICATION(pProt,pPacket);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCISendPacketCompletion \n"));
-}
-
-static int SeekCreditsSynch(struct gmbox_proto_hci_uart *pProt)
-{
- int status = 0;
- int credits;
- int retry = 100;
-
- while (true) {
- credits = 0;
- status = DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_SYNC, &credits);
- if (status) {
- break;
- }
- LOCK_HCI_TX(pProt);
- pProt->CreditsAvailable += credits;
- pProt->CreditsConsumed -= credits;
- if (pProt->CreditsAvailable >= pProt->CreditsCurrentSeek) {
- pProt->CreditsCurrentSeek = 0;
- UNLOCK_HCI_TX(pProt);
- break;
- }
- UNLOCK_HCI_TX(pProt);
- retry--;
- if (0 == retry) {
- status = A_EBUSY;
- break;
- }
- A_MDELAY(20);
- }
-
- return status;
-}
-
-static int HCITrySend(struct gmbox_proto_hci_uart *pProt, struct htc_packet *pPacket, bool Synchronous)
-{
- int status = 0;
- int transferLength;
- int creditsRequired, remainder;
- u8 hciUartType;
- bool synchSendComplete = false;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCITrySend (pPacket:0x%lX) %s \n",(unsigned long)pPacket,
- Synchronous ? "SYNC" :"ASYNC"));
-
- LOCK_HCI_TX(pProt);
-
- /* increment write processing count on entry */
- pProt->SendProcessCount++;
-
- do {
-
- if (pProt->HCIStopped) {
- status = A_ECANCELED;
- break;
- }
-
- if (pPacket != NULL) {
- /* packet was supplied */
- if (Synchronous) {
- /* in synchronous mode, the send queue can only hold 1 packet */
- if (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
- status = A_EBUSY;
- A_ASSERT(false);
- break;
- }
-
- if (pProt->SendProcessCount > 1) {
- /* another thread or task is draining the TX queues */
- status = A_EBUSY;
- A_ASSERT(false);
- break;
- }
-
- HTC_PACKET_ENQUEUE(&pProt->SendQueue,pPacket);
-
- } else {
- /* see if adding this packet hits the max depth (asynchronous mode only) */
- if ((pProt->HCIConfig.MaxSendQueueDepth > 0) &&
- ((HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue) + 1) >= pProt->HCIConfig.MaxSendQueueDepth)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("HCI Send queue is full, Depth:%d, Max:%d \n",
- HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue),
- pProt->HCIConfig.MaxSendQueueDepth));
- /* queue will be full, invoke any callbacks to determine what action to take */
- if (pProt->HCIConfig.pHCISendFull != NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- ("HCI : Calling driver's send full callback.... \n"));
- if (pProt->HCIConfig.pHCISendFull(pProt->HCIConfig.pContext,
- pPacket) == HCI_SEND_FULL_DROP) {
- /* drop it */
- status = A_NO_RESOURCE;
- break;
- }
- }
- }
-
- HTC_PACKET_ENQUEUE(&pProt->SendQueue,pPacket);
- }
-
- }
-
- if (pProt->SendStateFlags & HCI_SEND_WAIT_CREDITS) {
- break;
- }
-
- if (pProt->SendProcessCount > 1) {
- /* another thread or task is draining the TX queues */
- break;
- }
-
- /***** beyond this point only 1 thread may enter ******/
-
- /* now drain the send queue for transmission as long as we have enough
- * credits */
- while (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
-
- pPacket = HTC_PACKET_DEQUEUE(&pProt->SendQueue);
-
- switch (HCI_GET_PACKET_TYPE(pPacket)) {
- case HCI_COMMAND_TYPE:
- hciUartType = HCI_UART_COMMAND_PKT;
- break;
- case HCI_ACL_TYPE:
- hciUartType = HCI_UART_ACL_PKT;
- break;
- default:
- status = A_EINVAL;
- A_ASSERT(false);
- break;
- }
-
- if (status) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Got head packet:0x%lX , Type:%d Length: %d Remaining Queue Depth: %d\n",
- (unsigned long)pPacket, HCI_GET_PACKET_TYPE(pPacket), pPacket->ActualLength,
- HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue)));
-
- transferLength = 1; /* UART type header is 1 byte */
- transferLength += pPacket->ActualLength;
- transferLength = DEV_CALC_SEND_PADDED_LEN(pProt->pDev, transferLength);
-
- /* figure out how many credits this message requires */
- creditsRequired = transferLength / pProt->CreditSize;
- remainder = transferLength % pProt->CreditSize;
-
- if (remainder) {
- creditsRequired++;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Creds Required:%d Got:%d\n",
- creditsRequired, pProt->CreditsAvailable));
-
- if (creditsRequired > pProt->CreditsAvailable) {
- if (Synchronous) {
- /* in synchronous mode we need to seek credits in synchronously */
- pProt->CreditsCurrentSeek = creditsRequired;
- UNLOCK_HCI_TX(pProt);
- status = SeekCreditsSynch(pProt);
- LOCK_HCI_TX(pProt);
- if (status) {
- break;
- }
- /* fall through and continue processing this send op */
- } else {
- /* not enough credits, queue back to the head */
- HTC_PACKET_ENQUEUE_TO_HEAD(&pProt->SendQueue,pPacket);
- /* waiting for credits */
- pProt->SendStateFlags |= HCI_SEND_WAIT_CREDITS;
- /* provide a hint to reduce attempts to re-send if credits are dribbling back
- * this hint is the short fall of credits */
- pProt->CreditsCurrentSeek = creditsRequired;
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: packet:0x%lX placed back in queue. head packet needs: %d credits \n",
- (unsigned long)pPacket, pProt->CreditsCurrentSeek));
- pPacket = NULL;
- UNLOCK_HCI_TX(pProt);
-
- /* schedule a credit counter read, our CreditsAvailableCallback callback will be called
- * with the result */
- DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_ASYNC, NULL);
-
- LOCK_HCI_TX(pProt);
- break;
- }
- }
-
- /* caller guarantees some head room */
- pPacket->pBuffer--;
- pPacket->pBuffer[0] = hciUartType;
-
- pProt->CreditsAvailable -= creditsRequired;
- pProt->CreditsConsumed += creditsRequired;
- A_ASSERT(pProt->CreditsConsumed <= pProt->CreditsMax);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: new credit state: consumed:%d available:%d max:%d\n",
- pProt->CreditsConsumed, pProt->CreditsAvailable, pProt->CreditsMax));
-
- UNLOCK_HCI_TX(pProt);
-
- /* write it out */
- if (Synchronous) {
- pPacket->Completion = NULL;
- pPacket->pContext = NULL;
- } else {
- pPacket->Completion = HCISendPacketCompletion;
- pPacket->pContext = pProt;
- }
-
- status = DevGMboxWrite(pProt->pDev,pPacket,transferLength);
- if (Synchronous) {
- synchSendComplete = true;
- } else {
- pPacket = NULL;
- }
-
- LOCK_HCI_TX(pProt);
-
- }
-
- } while (false);
-
- pProt->SendProcessCount--;
- A_ASSERT(pProt->SendProcessCount >= 0);
- UNLOCK_HCI_TX(pProt);
-
- if (Synchronous) {
- A_ASSERT(pPacket != NULL);
- if (!status && (!synchSendComplete)) {
- status = A_EBUSY;
- A_ASSERT(false);
- LOCK_HCI_TX(pProt);
- if (pPacket->ListLink.pNext != NULL) {
- /* remove from the queue */
- HTC_PACKET_REMOVE(&pProt->SendQueue,pPacket);
- }
- UNLOCK_HCI_TX(pProt);
- }
- } else {
- if (status && (pPacket != NULL)) {
- pPacket->Status = status;
- DO_HCI_SEND_INDICATION(pProt,pPacket);
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HCITrySend: \n"));
- return status;
-}
-
-static void FlushSendQueue(struct gmbox_proto_hci_uart *pProt)
-{
- struct htc_packet *pPacket;
- struct htc_packet_queue discardQueue;
-
- INIT_HTC_PACKET_QUEUE(&discardQueue);
-
- LOCK_HCI_TX(pProt);
-
- if (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->SendQueue);
- }
-
- UNLOCK_HCI_TX(pProt);
-
- /* discard packets */
- while (!HTC_QUEUE_EMPTY(&discardQueue)) {
- pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
- pPacket->Status = A_ECANCELED;
- DO_HCI_SEND_INDICATION(pProt,pPacket);
- }
-
-}
-
-static void FlushRecvBuffers(struct gmbox_proto_hci_uart *pProt)
-{
- struct htc_packet_queue discardQueue;
- struct htc_packet *pPacket;
-
- INIT_HTC_PACKET_QUEUE(&discardQueue);
-
- LOCK_HCI_RX(pProt);
- /*transfer list items from ACL and event buffer queues to the discard queue */
- if (!HTC_QUEUE_EMPTY(&pProt->HCIACLRecvBuffers)) {
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->HCIACLRecvBuffers);
- }
- if (!HTC_QUEUE_EMPTY(&pProt->HCIEventBuffers)) {
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->HCIEventBuffers);
- }
- UNLOCK_HCI_RX(pProt);
-
- /* now empty the discard queue */
- while (!HTC_QUEUE_EMPTY(&discardQueue)) {
- pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
- pPacket->Status = A_ECANCELED;
- DO_HCI_RECV_INDICATION(pProt,pPacket);
- }
-
-}
-
-/*** protocol module install entry point ***/
-
-int GMboxProtocolInstall(struct ar6k_device *pDev)
-{
- int status = 0;
- struct gmbox_proto_hci_uart *pProtocol = NULL;
-
- do {
-
- pProtocol = A_MALLOC(sizeof(struct gmbox_proto_hci_uart));
-
- if (NULL == pProtocol) {
- status = A_NO_MEMORY;
- break;
- }
-
- A_MEMZERO(pProtocol, sizeof(*pProtocol));
- pProtocol->pDev = pDev;
- INIT_HTC_PACKET_QUEUE(&pProtocol->SendQueue);
- INIT_HTC_PACKET_QUEUE(&pProtocol->HCIACLRecvBuffers);
- INIT_HTC_PACKET_QUEUE(&pProtocol->HCIEventBuffers);
- A_MUTEX_INIT(&pProtocol->HCIRxLock);
- A_MUTEX_INIT(&pProtocol->HCITxLock);
-
- } while (false);
-
- if (!status) {
- LOCK_AR6K(pDev);
- DEV_GMBOX_SET_PROTOCOL(pDev,
- HCIUartMessagePending,
- CreditsAvailableCallback,
- FailureCallback,
- StateDumpCallback,
- pProtocol);
- UNLOCK_AR6K(pDev);
- } else {
- if (pProtocol != NULL) {
- HCIUartCleanup(pProtocol);
- }
- }
-
- return status;
-}
-
-/*** protocol module uninstall entry point ***/
-void GMboxProtocolUninstall(struct ar6k_device *pDev)
-{
- struct gmbox_proto_hci_uart *pProtocol = (struct gmbox_proto_hci_uart *)DEV_GMBOX_GET_PROTOCOL(pDev);
-
- if (pProtocol != NULL) {
-
- /* notify anyone attached */
- if (pProtocol->HCIAttached) {
- A_ASSERT(pProtocol->HCIConfig.TransportRemoved != NULL);
- pProtocol->HCIConfig.TransportRemoved(pProtocol->HCIConfig.pContext);
- pProtocol->HCIAttached = false;
- }
-
- HCIUartCleanup(pProtocol);
- DEV_GMBOX_SET_PROTOCOL(pDev,NULL,NULL,NULL,NULL,NULL);
- }
-
-}
-
-static int NotifyTransportReady(struct gmbox_proto_hci_uart *pProt)
-{
- struct hci_transport_properties props;
- int status = 0;
-
- do {
-
- A_MEMZERO(&props,sizeof(props));
-
- /* HCI UART only needs one extra byte at the head to indicate the packet TYPE */
- props.HeadRoom = 1;
- props.TailRoom = 0;
- props.IOBlockPad = pProt->pDev->BlockSize;
- if (pProt->HCIAttached) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HCI: notifying attached client to transport... \n"));
- A_ASSERT(pProt->HCIConfig.TransportReady != NULL);
- status = pProt->HCIConfig.TransportReady(pProt,
- &props,
- pProt->HCIConfig.pContext);
- }
-
- } while (false);
-
- return status;
-}
-
-/*********** HCI UART protocol implementation ************************************************/
-
-HCI_TRANSPORT_HANDLE HCI_TransportAttach(void *HTCHandle, struct hci_transport_config_info *pInfo)
-{
- struct gmbox_proto_hci_uart *pProtocol = NULL;
- struct ar6k_device *pDev;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportAttach \n"));
-
- pDev = HTCGetAR6KDevice(HTCHandle);
-
- LOCK_AR6K(pDev);
-
- do {
-
- pProtocol = (struct gmbox_proto_hci_uart *)DEV_GMBOX_GET_PROTOCOL(pDev);
-
- if (NULL == pProtocol) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol not installed! \n"));
- break;
- }
-
- if (pProtocol->HCIAttached) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol already attached! \n"));
- break;
- }
-
- memcpy(&pProtocol->HCIConfig, pInfo, sizeof(struct hci_transport_config_info));
-
- A_ASSERT(pProtocol->HCIConfig.pHCIPktRecv != NULL);
- A_ASSERT(pProtocol->HCIConfig.pHCISendComplete != NULL);
-
- pProtocol->HCIAttached = true;
-
- } while (false);
-
- UNLOCK_AR6K(pDev);
-
- if (pProtocol != NULL) {
- /* TODO ... should we use a worker? */
- NotifyTransportReady(pProtocol);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportAttach (0x%lX) \n",(unsigned long)pProtocol));
- return (HCI_TRANSPORT_HANDLE)pProtocol;
-}
-
-void HCI_TransportDetach(HCI_TRANSPORT_HANDLE HciTrans)
-{
- struct gmbox_proto_hci_uart *pProtocol = (struct gmbox_proto_hci_uart *)HciTrans;
- struct ar6k_device *pDev = pProtocol->pDev;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportDetach \n"));
-
- LOCK_AR6K(pDev);
- if (!pProtocol->HCIAttached) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol not attached! \n"));
- UNLOCK_AR6K(pDev);
- return;
- }
- pProtocol->HCIAttached = false;
- UNLOCK_AR6K(pDev);
-
- HCI_TransportStop(HciTrans);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportAttach \n"));
-}
-
-int HCI_TransportAddReceivePkts(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet_queue *pQueue)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
- int status = 0;
- bool unblockRecv = false;
- struct htc_packet *pPacket;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HCI_TransportAddReceivePkt \n"));
-
- LOCK_HCI_RX(pProt);
-
- do {
-
- if (pProt->HCIStopped) {
- status = A_ECANCELED;
- break;
- }
-
- pPacket = HTC_GET_PKT_AT_HEAD(pQueue);
-
- if (NULL == pPacket) {
- status = A_EINVAL;
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" HCI recv packet added, type :%d, len:%d num:%d \n",
- HCI_GET_PACKET_TYPE(pPacket), pPacket->BufferLength, HTC_PACKET_QUEUE_DEPTH(pQueue)));
-
- if (HCI_GET_PACKET_TYPE(pPacket) == HCI_EVENT_TYPE) {
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pProt->HCIEventBuffers, pQueue);
- } else if (HCI_GET_PACKET_TYPE(pPacket) == HCI_ACL_TYPE) {
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pProt->HCIACLRecvBuffers, pQueue);
- } else {
- status = A_EINVAL;
- break;
- }
-
- if (pProt->RecvStateFlags & HCI_RECV_WAIT_BUFFERS) {
- if (pProt->WaitBufferType == HCI_GET_PACKET_TYPE(pPacket)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" HCI recv was blocked on packet type :%d, unblocking.. \n",
- pProt->WaitBufferType));
- pProt->RecvStateFlags &= ~HCI_RECV_WAIT_BUFFERS;
- pProt->WaitBufferType = HCI_PACKET_INVALID;
- unblockRecv = true;
- }
- }
-
- } while (false);
-
- UNLOCK_HCI_RX(pProt);
-
- if (status) {
- while (!HTC_QUEUE_EMPTY(pQueue)) {
- pPacket = HTC_PACKET_DEQUEUE(pQueue);
- pPacket->Status = A_ECANCELED;
- DO_HCI_RECV_INDICATION(pProt,pPacket);
- }
- }
-
- if (unblockRecv) {
- DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_ENABLE, PROC_IO_ASYNC);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HCI_TransportAddReceivePkt \n"));
-
- return 0;
-}
-
-int HCI_TransportSendPkt(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet *pPacket, bool Synchronous)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
-
- return HCITrySend(pProt,pPacket,Synchronous);
-}
-
-void HCI_TransportStop(HCI_TRANSPORT_HANDLE HciTrans)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportStop \n"));
-
- LOCK_AR6K(pProt->pDev);
- if (pProt->HCIStopped) {
- UNLOCK_AR6K(pProt->pDev);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStop \n"));
- return;
- }
- pProt->HCIStopped = true;
- UNLOCK_AR6K(pProt->pDev);
-
- /* disable interrupts */
- DevGMboxIRQAction(pProt->pDev, GMBOX_DISABLE_ALL, PROC_IO_SYNC);
- FlushSendQueue(pProt);
- FlushRecvBuffers(pProt);
-
- /* signal bridge side to power down BT */
- DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BT_OFF, BTOFF_TIMEOUT_MS);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStop \n"));
-}
-
-int HCI_TransportStart(HCI_TRANSPORT_HANDLE HciTrans)
-{
- int status;
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportStart \n"));
-
- /* set stopped in case we have a problem in starting */
- pProt->HCIStopped = true;
-
- do {
-
- status = InitTxCreditState(pProt);
-
- if (status) {
- break;
- }
-
- status = DevGMboxIRQAction(pProt->pDev, GMBOX_ERRORS_IRQ_ENABLE, PROC_IO_SYNC);
-
- if (status) {
- break;
- }
- /* enable recv */
- status = DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_ENABLE, PROC_IO_SYNC);
-
- if (status) {
- break;
- }
- /* signal bridge side to power up BT */
- status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BT_ON, BTON_TIMEOUT_MS);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI_TransportStart : Failed to trigger BT ON \n"));
- break;
- }
-
- /* we made it */
- pProt->HCIStopped = false;
-
- } while (false);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStart \n"));
-
- return status;
-}
-
-int HCI_TransportEnableDisableAsyncRecv(HCI_TRANSPORT_HANDLE HciTrans, bool Enable)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
- return DevGMboxIRQAction(pProt->pDev,
- Enable ? GMBOX_RECV_IRQ_ENABLE : GMBOX_RECV_IRQ_DISABLE,
- PROC_IO_SYNC);
-
-}
-
-int HCI_TransportRecvHCIEventSync(HCI_TRANSPORT_HANDLE HciTrans,
- struct htc_packet *pPacket,
- int MaxPollMS)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
- int status = 0;
- u8 lookAhead[8];
- int bytes;
- int totalRecvLength;
-
- MaxPollMS = MaxPollMS / 16;
-
- if (MaxPollMS < 2) {
- MaxPollMS = 2;
- }
-
- while (MaxPollMS) {
-
- bytes = sizeof(lookAhead);
- status = DevGMboxRecvLookAheadPeek(pProt->pDev,lookAhead,&bytes);
- if (status) {
- break;
- }
-
- if (bytes < 3) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI recv poll got bytes: %d, retry : %d \n",
- bytes, MaxPollMS));
- A_MDELAY(16);
- MaxPollMS--;
- continue;
- }
-
- totalRecvLength = 0;
- switch (lookAhead[0]) {
- case HCI_UART_EVENT_PKT:
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI Event: %d param length: %d \n",
- lookAhead[1], lookAhead[2]));
- totalRecvLength = lookAhead[2];
- totalRecvLength += 3; /* add type + event code + length field */
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("**Invalid HCI packet type: %d \n",lookAhead[0]));
- status = A_EPROTO;
- break;
- }
-
- if (status) {
- break;
- }
-
- pPacket->Completion = NULL;
- status = DevGMboxRead(pProt->pDev,pPacket,totalRecvLength);
- if (status) {
- break;
- }
-
- pPacket->pBuffer++;
- pPacket->ActualLength = totalRecvLength - 1;
- pPacket->Status = 0;
- break;
- }
-
- if (MaxPollMS == 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI recv poll timeout! \n"));
- status = A_ERROR;
- }
-
- return status;
-}
-
-#define LSB_SCRATCH_IDX 4
-#define MSB_SCRATCH_IDX 5
-int HCI_TransportSetBaudRate(HCI_TRANSPORT_HANDLE HciTrans, u32 Baud)
-{
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
- struct hif_device *pHIFDevice = (struct hif_device *)(pProt->pDev->HIFDevice);
- u32 scaledBaud, scratchAddr;
- int status = 0;
-
- /* Divide the desired baud rate by 100
- * Store the LSB in the local scratch register 4 and the MSB in the local
- * scratch register 5 for the target to read
- */
- scratchAddr = MBOX_BASE_ADDRESS | (LOCAL_SCRATCH_ADDRESS + 4 * LSB_SCRATCH_IDX);
- scaledBaud = (Baud / 100) & LOCAL_SCRATCH_VALUE_MASK;
- status = ar6000_WriteRegDiag(pHIFDevice, &scratchAddr, &scaledBaud);
- scratchAddr = MBOX_BASE_ADDRESS | (LOCAL_SCRATCH_ADDRESS + 4 * MSB_SCRATCH_IDX);
- scaledBaud = ((Baud / 100) >> (LOCAL_SCRATCH_VALUE_MSB+1)) & LOCAL_SCRATCH_VALUE_MASK;
- status |= ar6000_WriteRegDiag(pHIFDevice, &scratchAddr, &scaledBaud);
- if (0 != status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to set up baud rate in scratch register!"));
- return status;
- }
-
- /* Now interrupt the target to tell it about the baud rate */
- status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BAUD_SET, BAUD_TIMEOUT_MS);
- if (0 != status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to tell target to change baud rate!"));
- }
-
- return status;
-}
-
-int HCI_TransportEnablePowerMgmt(HCI_TRANSPORT_HANDLE HciTrans, bool Enable)
-{
- int status;
- struct gmbox_proto_hci_uart *pProt = (struct gmbox_proto_hci_uart *)HciTrans;
-
- if (Enable) {
- status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_PWR_SAV_ON, BTPWRSAV_TIMEOUT_MS);
- } else {
- status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_PWR_SAV_OFF, BTPWRSAV_TIMEOUT_MS);
- }
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to enable/disable HCI power management!\n"));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI power management enabled/disabled!\n"));
- }
-
- return status;
-}
-
-#endif //ATH_AR6K_ENABLE_GMBOX
-
diff --git a/drivers/staging/ath6kl/htc2/htc.c b/drivers/staging/ath6kl/htc2/htc.c
deleted file mode 100644
index ae54e64b624..00000000000
--- a/drivers/staging/ath6kl/htc2/htc.c
+++ /dev/null
@@ -1,575 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#include "htc_internal.h"
-
-#ifdef ATH_DEBUG_MODULE
-static struct ath_debug_mask_description g_HTCDebugDescription[] = {
- { ATH_DEBUG_SEND , "Send"},
- { ATH_DEBUG_RECV , "Recv"},
- { ATH_DEBUG_SYNC , "Sync"},
- { ATH_DEBUG_DUMP , "Dump Data (RX or TX)"},
- { ATH_DEBUG_IRQ , "Interrupt Processing"}
-};
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(htc,
- "htc",
- "Host Target Communications",
- ATH_DEBUG_MASK_DEFAULTS,
- ATH_DEBUG_DESCRIPTION_COUNT(g_HTCDebugDescription),
- g_HTCDebugDescription);
-
-#endif
-
-static void HTCReportFailure(void *Context);
-static void ResetEndpointStates(struct htc_target *target);
-
-void HTCFreeControlBuffer(struct htc_target *target, struct htc_packet *pPacket, struct htc_packet_queue *pList)
-{
- LOCK_HTC(target);
- HTC_PACKET_ENQUEUE(pList,pPacket);
- UNLOCK_HTC(target);
-}
-
-struct htc_packet *HTCAllocControlBuffer(struct htc_target *target, struct htc_packet_queue *pList)
-{
- struct htc_packet *pPacket;
-
- LOCK_HTC(target);
- pPacket = HTC_PACKET_DEQUEUE(pList);
- UNLOCK_HTC(target);
-
- return pPacket;
-}
-
-/* cleanup the HTC instance */
-static void HTCCleanup(struct htc_target *target)
-{
- s32 i;
-
- DevCleanup(&target->Device);
-
- for (i = 0;i < NUM_CONTROL_BUFFERS;i++) {
- if (target->HTCControlBuffers[i].Buffer) {
- kfree(target->HTCControlBuffers[i].Buffer);
- }
- }
-
- if (A_IS_MUTEX_VALID(&target->HTCLock)) {
- A_MUTEX_DELETE(&target->HTCLock);
- }
-
- if (A_IS_MUTEX_VALID(&target->HTCRxLock)) {
- A_MUTEX_DELETE(&target->HTCRxLock);
- }
-
- if (A_IS_MUTEX_VALID(&target->HTCTxLock)) {
- A_MUTEX_DELETE(&target->HTCTxLock);
- }
- /* free our instance */
- kfree(target);
-}
-
-/* registered target arrival callback from the HIF layer */
-HTC_HANDLE HTCCreate(void *hif_handle, struct htc_init_info *pInfo)
-{
- struct htc_target *target = NULL;
- int status = 0;
- int i;
- u32 ctrl_bufsz;
- u32 blocksizes[HTC_MAILBOX_NUM_MAX];
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCCreate - Enter\n"));
-
- A_REGISTER_MODULE_DEBUG_INFO(htc);
-
- do {
-
- /* allocate target memory */
- if ((target = (struct htc_target *)A_MALLOC(sizeof(struct htc_target))) == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to allocate memory\n"));
- status = A_ERROR;
- break;
- }
-
- A_MEMZERO(target, sizeof(struct htc_target));
- A_MUTEX_INIT(&target->HTCLock);
- A_MUTEX_INIT(&target->HTCRxLock);
- A_MUTEX_INIT(&target->HTCTxLock);
- INIT_HTC_PACKET_QUEUE(&target->ControlBufferTXFreeList);
- INIT_HTC_PACKET_QUEUE(&target->ControlBufferRXFreeList);
-
- /* give device layer the hif device handle */
- target->Device.HIFDevice = hif_handle;
- /* give the device layer our context (for event processing)
- * the device layer will register it's own context with HIF
- * so we need to set this so we can fetch it in the target remove handler */
- target->Device.HTCContext = target;
- /* set device layer target failure callback */
- target->Device.TargetFailureCallback = HTCReportFailure;
- /* set device layer recv message pending callback */
- target->Device.MessagePendingCallback = HTCRecvMessagePendingHandler;
- target->EpWaitingForBuffers = ENDPOINT_MAX;
-
- memcpy(&target->HTCInitInfo,pInfo,sizeof(struct htc_init_info));
-
- ResetEndpointStates(target);
-
- /* setup device layer */
- status = DevSetup(&target->Device);
-
- if (status) {
- break;
- }
-
-
- /* get the block sizes */
- status = HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
- blocksizes, sizeof(blocksizes));
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to get block size info from HIF layer...\n"));
- break;
- }
-
- /* Set the control buffer size based on the block size */
- if (blocksizes[1] > HTC_MAX_CONTROL_MESSAGE_LENGTH) {
- ctrl_bufsz = blocksizes[1] + HTC_HDR_LENGTH;
- } else {
- ctrl_bufsz = HTC_MAX_CONTROL_MESSAGE_LENGTH + HTC_HDR_LENGTH;
- }
- for (i = 0;i < NUM_CONTROL_BUFFERS;i++) {
- target->HTCControlBuffers[i].Buffer = A_MALLOC(ctrl_bufsz);
- if (target->HTCControlBuffers[i].Buffer == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to allocate memory\n"));
- status = A_ERROR;
- break;
- }
- }
-
- if (status) {
- break;
- }
-
- /* carve up buffers/packets for control messages */
- for (i = 0; i < NUM_CONTROL_RX_BUFFERS; i++) {
- struct htc_packet *pControlPacket;
- pControlPacket = &target->HTCControlBuffers[i].HtcPacket;
- SET_HTC_PACKET_INFO_RX_REFILL(pControlPacket,
- target,
- target->HTCControlBuffers[i].Buffer,
- ctrl_bufsz,
- ENDPOINT_0);
- HTC_FREE_CONTROL_RX(target,pControlPacket);
- }
-
- for (;i < NUM_CONTROL_BUFFERS;i++) {
- struct htc_packet *pControlPacket;
- pControlPacket = &target->HTCControlBuffers[i].HtcPacket;
- INIT_HTC_PACKET_INFO(pControlPacket,
- target->HTCControlBuffers[i].Buffer,
- ctrl_bufsz);
- HTC_FREE_CONTROL_TX(target,pControlPacket);
- }
-
- } while (false);
-
- if (status) {
- if (target != NULL) {
- HTCCleanup(target);
- target = NULL;
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCCreate - Exit\n"));
-
- return target;
-}
-
-void HTCDestroy(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCDestroy .. Destroying :0x%lX \n",(unsigned long)target));
- HTCCleanup(target);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCDestroy \n"));
-}
-
-/* get the low level HIF device for the caller , the caller may wish to do low level
- * HIF requests */
-void *HTCGetHifDevice(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- return target->Device.HIFDevice;
-}
-
-/* wait for the target to arrive (sends HTC Ready message)
- * this operation is fully synchronous and the message is polled for */
-int HTCWaitTarget(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- int status;
- struct htc_packet *pPacket = NULL;
- HTC_READY_EX_MSG *pRdyMsg;
-
- struct htc_service_connect_req connect;
- struct htc_service_connect_resp resp;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCWaitTarget - Enter (target:0x%lX) \n", (unsigned long)target));
-
- do {
-
-#ifdef MBOXHW_UNIT_TEST
-
- status = DoMboxHWTest(&target->Device);
-
- if (status) {
- break;
- }
-
-#endif
-
- /* we should be getting 1 control message that the target is ready */
- status = HTCWaitforControlMessage(target, &pPacket);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" Target Not Available!!\n"));
- break;
- }
-
- /* we controlled the buffer creation so it has to be properly aligned */
- pRdyMsg = (HTC_READY_EX_MSG *)pPacket->pBuffer;
-
- if ((pRdyMsg->Version2_0_Info.MessageID != HTC_MSG_READY_ID) ||
- (pPacket->ActualLength < sizeof(HTC_READY_MSG))) {
- /* this message is not valid */
- AR_DEBUG_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
-
- if (pRdyMsg->Version2_0_Info.CreditCount == 0 || pRdyMsg->Version2_0_Info.CreditSize == 0) {
- /* this message is not valid */
- AR_DEBUG_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
- target->TargetCredits = pRdyMsg->Version2_0_Info.CreditCount;
- target->TargetCreditSize = pRdyMsg->Version2_0_Info.CreditSize;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, (" Target Ready: credits: %d credit size: %d\n",
- target->TargetCredits, target->TargetCreditSize));
-
- /* check if this is an extended ready message */
- if (pPacket->ActualLength >= sizeof(HTC_READY_EX_MSG)) {
- /* this is an extended message */
- target->HTCTargetVersion = pRdyMsg->HTCVersion;
- target->MaxMsgPerBundle = pRdyMsg->MaxMsgsPerHTCBundle;
- } else {
- /* legacy */
- target->HTCTargetVersion = HTC_VERSION_2P0;
- target->MaxMsgPerBundle = 0;
- }
-
-#ifdef HTC_FORCE_LEGACY_2P0
- /* for testing and comparison...*/
- target->HTCTargetVersion = HTC_VERSION_2P0;
- target->MaxMsgPerBundle = 0;
-#endif
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
- ("Using HTC Protocol Version : %s (%d)\n ",
- (target->HTCTargetVersion == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
- target->HTCTargetVersion));
-
- if (target->MaxMsgPerBundle > 0) {
- /* limit what HTC can handle */
- target->MaxMsgPerBundle = min(HTC_HOST_MAX_MSG_PER_BUNDLE, target->MaxMsgPerBundle);
- /* target supports message bundling, setup device layer */
- if (DevSetupMsgBundling(&target->Device,target->MaxMsgPerBundle)) {
- /* device layer can't handle bundling */
- target->MaxMsgPerBundle = 0;
- } else {
- /* limit bundle what the device layer can handle */
- target->MaxMsgPerBundle = min(DEV_GET_MAX_MSG_PER_BUNDLE(&target->Device),
- target->MaxMsgPerBundle);
- }
- }
-
- if (target->MaxMsgPerBundle > 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
- (" HTC bundling allowed. Max Msg Per HTC Bundle: %d\n", target->MaxMsgPerBundle));
-
- if (DEV_GET_MAX_BUNDLE_SEND_LENGTH(&target->Device) != 0) {
- target->SendBundlingEnabled = true;
- }
- if (DEV_GET_MAX_BUNDLE_RECV_LENGTH(&target->Device) != 0) {
- target->RecvBundlingEnabled = true;
- }
-
- if (!DEV_IS_LEN_BLOCK_ALIGNED(&target->Device,target->TargetCreditSize)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("*** Credit size: %d is not block aligned! Disabling send bundling \n",
- target->TargetCreditSize));
- /* disallow send bundling since the credit size is not aligned to a block size
- * the I/O block padding will spill into the next credit buffer which is fatal */
- target->SendBundlingEnabled = false;
- }
- }
-
- /* setup our pseudo HTC control endpoint connection */
- A_MEMZERO(&connect,sizeof(connect));
- A_MEMZERO(&resp,sizeof(resp));
- connect.EpCallbacks.pContext = target;
- connect.EpCallbacks.EpTxComplete = HTCControlTxComplete;
- connect.EpCallbacks.EpRecv = HTCControlRecv;
- connect.EpCallbacks.EpRecvRefill = NULL; /* not needed */
- connect.EpCallbacks.EpSendFull = NULL; /* not nedded */
- connect.MaxSendQueueDepth = NUM_CONTROL_BUFFERS;
- connect.ServiceID = HTC_CTRL_RSVD_SVC;
-
- /* connect fake service */
- status = HTCConnectService((HTC_HANDLE)target,
- &connect,
- &resp);
-
- if (!status) {
- break;
- }
-
- } while (false);
-
- if (pPacket != NULL) {
- HTC_FREE_CONTROL_RX(target,pPacket);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCWaitTarget - Exit\n"));
-
- return status;
-}
-
-
-
-/* Start HTC, enable interrupts and let the target know host has finished setup */
-int HTCStart(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- struct htc_packet *pPacket;
- int status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Enter\n"));
-
- /* make sure interrupts are disabled at the chip level,
- * this function can be called again from a reboot of the target without shutting down HTC */
- DevDisableInterrupts(&target->Device);
- /* make sure state is cleared again */
- target->OpStateFlags = 0;
- target->RecvStateFlags = 0;
-
- /* now that we are starting, push control receive buffers into the
- * HTC control endpoint */
-
- while (1) {
- pPacket = HTC_ALLOC_CONTROL_RX(target);
- if (NULL == pPacket) {
- break;
- }
- HTCAddReceivePkt((HTC_HANDLE)target,pPacket);
- }
-
- do {
-
- AR_DEBUG_ASSERT(target->InitCredits != NULL);
- AR_DEBUG_ASSERT(target->EpCreditDistributionListHead != NULL);
- AR_DEBUG_ASSERT(target->EpCreditDistributionListHead->pNext != NULL);
-
- /* call init credits callback to do the distribution ,
- * NOTE: the first entry in the distribution list is ENDPOINT_0, so
- * we pass the start of the list after this one. */
- target->InitCredits(target->pCredDistContext,
- target->EpCreditDistributionListHead->pNext,
- target->TargetCredits);
-
-#ifdef ATH_DEBUG_MODULE
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_TRC)) {
- DumpCreditDistStates(target);
- }
-#endif
-
- /* the caller is done connecting to services, so we can indicate to the
- * target that the setup phase is complete */
- status = HTCSendSetupComplete(target);
-
- if (status) {
- break;
- }
-
- /* unmask interrupts */
- status = DevUnmaskInterrupts(&target->Device);
-
- if (status) {
- HTCStop(target);
- }
-
- } while (false);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Exit\n"));
- return status;
-}
-
-static void ResetEndpointStates(struct htc_target *target)
-{
- struct htc_endpoint *pEndpoint;
- int i;
-
- for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
- pEndpoint = &target->EndPoint[i];
-
- A_MEMZERO(&pEndpoint->CreditDist, sizeof(pEndpoint->CreditDist));
- pEndpoint->ServiceID = 0;
- pEndpoint->MaxMsgLength = 0;
- pEndpoint->MaxTxQueueDepth = 0;
- A_MEMZERO(&pEndpoint->EndPointStats,sizeof(pEndpoint->EndPointStats));
- INIT_HTC_PACKET_QUEUE(&pEndpoint->RxBuffers);
- INIT_HTC_PACKET_QUEUE(&pEndpoint->TxQueue);
- INIT_HTC_PACKET_QUEUE(&pEndpoint->RecvIndicationQueue);
- pEndpoint->target = target;
- }
- /* reset distribution list */
- target->EpCreditDistributionListHead = NULL;
-}
-
-/* stop HTC communications, i.e. stop interrupt reception, and flush all queued buffers */
-void HTCStop(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCStop \n"));
-
- LOCK_HTC(target);
- /* mark that we are shutting down .. */
- target->OpStateFlags |= HTC_OP_STATE_STOPPING;
- UNLOCK_HTC(target);
-
- /* Masking interrupts is a synchronous operation, when this function returns
- * all pending HIF I/O has completed, we can safely flush the queues */
- DevMaskInterrupts(&target->Device);
-
-#ifdef THREAD_X
- //
- // Is this delay required
- //
- A_MDELAY(200); // wait for IRQ process done
-#endif
- /* flush all send packets */
- HTCFlushSendPkts(target);
- /* flush all recv buffers */
- HTCFlushRecvBuffers(target);
-
- DevCleanupMsgBundling(&target->Device);
-
- ResetEndpointStates(target);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCStop \n"));
-}
-
-#ifdef ATH_DEBUG_MODULE
-void HTCDumpCreditStates(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
-
- LOCK_HTC_TX(target);
-
- DumpCreditDistStates(target);
-
- UNLOCK_HTC_TX(target);
-
- DumpAR6KDevState(&target->Device);
-}
-#endif
-/* report a target failure from the device, this is a callback from the device layer
- * which uses a mechanism to report errors from the target (i.e. special interrupts) */
-static void HTCReportFailure(void *Context)
-{
- struct htc_target *target = (struct htc_target *)Context;
-
- target->TargetFailure = true;
-
- if (target->HTCInitInfo.TargetFailure != NULL) {
- /* let upper layer know, it needs to call HTCStop() */
- target->HTCInitInfo.TargetFailure(target->HTCInitInfo.pContext, A_ERROR);
- }
-}
-
-bool HTCGetEndpointStatistics(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint,
- HTC_ENDPOINT_STAT_ACTION Action,
- struct htc_endpoint_stats *pStats)
-{
-
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- bool clearStats = false;
- bool sample = false;
-
- switch (Action) {
- case HTC_EP_STAT_SAMPLE :
- sample = true;
- break;
- case HTC_EP_STAT_SAMPLE_AND_CLEAR :
- sample = true;
- clearStats = true;
- break;
- case HTC_EP_STAT_CLEAR :
- clearStats = true;
- break;
- default:
- break;
- }
-
- A_ASSERT(Endpoint < ENDPOINT_MAX);
-
- /* lock out TX and RX while we sample and/or clear */
- LOCK_HTC_TX(target);
- LOCK_HTC_RX(target);
-
- if (sample) {
- A_ASSERT(pStats != NULL);
- /* return the stats to the caller */
- memcpy(pStats, &target->EndPoint[Endpoint].EndPointStats, sizeof(struct htc_endpoint_stats));
- }
-
- if (clearStats) {
- /* reset stats */
- A_MEMZERO(&target->EndPoint[Endpoint].EndPointStats, sizeof(struct htc_endpoint_stats));
- }
-
- UNLOCK_HTC_RX(target);
- UNLOCK_HTC_TX(target);
-
- return true;
-}
-
-struct ar6k_device *HTCGetAR6KDevice(void *HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- return &target->Device;
-}
-
diff --git a/drivers/staging/ath6kl/htc2/htc_debug.h b/drivers/staging/ath6kl/htc2/htc_debug.h
deleted file mode 100644
index 8455703e221..00000000000
--- a/drivers/staging/ath6kl/htc2/htc_debug.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_debug.h" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef HTC_DEBUG_H_
-#define HTC_DEBUG_H_
-
-#define ATH_MODULE_NAME htc
-#include "a_debug.h"
-
-/* ------- Debug related stuff ------- */
-
-#define ATH_DEBUG_SEND ATH_DEBUG_MAKE_MODULE_MASK(0)
-#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(1)
-#define ATH_DEBUG_SYNC ATH_DEBUG_MAKE_MODULE_MASK(2)
-#define ATH_DEBUG_DUMP ATH_DEBUG_MAKE_MODULE_MASK(3)
-#define ATH_DEBUG_IRQ ATH_DEBUG_MAKE_MODULE_MASK(4)
-
-
-#endif /*HTC_DEBUG_H_*/
diff --git a/drivers/staging/ath6kl/htc2/htc_internal.h b/drivers/staging/ath6kl/htc2/htc_internal.h
deleted file mode 100644
index cac97351769..00000000000
--- a/drivers/staging/ath6kl/htc2/htc_internal.h
+++ /dev/null
@@ -1,211 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_internal.h" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HTC_INTERNAL_H_
-#define _HTC_INTERNAL_H_
-
-/* for debugging, uncomment this to capture the last frame header, on frame header
- * processing errors, the last frame header is dump for comparison */
-//#define HTC_CAPTURE_LAST_FRAME
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* Header files */
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#include "htc_debug.h"
-#include "htc.h"
-#include "htc_api.h"
-#include "bmi_msg.h"
-#include "hif.h"
-#include "AR6000/ar6k.h"
-
-/* HTC operational parameters */
-#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
-#define HTC_TARGET_DEBUG_INTR_MASK 0x01
-#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
-
-#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
-#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
-
-/* packet flags */
-
-#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
-#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
-#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
-#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
-
-/* scatter request flags */
-
-#define HTC_SCATTER_REQ_FLAGS_PARTIAL_BUNDLE (1 << 0)
-
-struct htc_endpoint {
- HTC_ENDPOINT_ID Id;
- HTC_SERVICE_ID ServiceID; /* service ID this endpoint is bound to
- non-zero value means this endpoint is in use */
- struct htc_packet_queue TxQueue; /* HTC frame buffer TX queue */
- struct htc_packet_queue RxBuffers; /* HTC frame buffer RX list */
- struct htc_endpoint_credit_dist CreditDist; /* credit distribution structure (exposed to driver layer) */
- struct htc_ep_callbacks EpCallBacks; /* callbacks associated with this endpoint */
- int MaxTxQueueDepth; /* max depth of the TX queue before we need to
- call driver's full handler */
- int MaxMsgLength; /* max length of endpoint message */
- int TxProcessCount; /* reference count to continue tx processing */
- struct htc_packet_queue RecvIndicationQueue; /* recv packets ready to be indicated */
- int RxProcessCount; /* reference count to allow single processing context */
- struct htc_target *target; /* back pointer to target */
- u8 SeqNo; /* TX seq no (helpful) for debugging */
- u32 LocalConnectionFlags; /* local connection flags */
- struct htc_endpoint_stats EndPointStats; /* endpoint statistics */
-};
-
-#define INC_HTC_EP_STAT(p,stat,count) (p)->EndPointStats.stat += (count);
-#define HTC_SERVICE_TX_PACKET_TAG HTC_TX_PACKET_TAG_INTERNAL
-
-#define NUM_CONTROL_BUFFERS 8
-#define NUM_CONTROL_TX_BUFFERS 2
-#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
-
-struct htc_control_buffer {
- struct htc_packet HtcPacket;
- u8 *Buffer;
-};
-
-#define HTC_RECV_WAIT_BUFFERS (1 << 0)
-#define HTC_OP_STATE_STOPPING (1 << 0)
-
-/* our HTC target state */
-struct htc_target {
- struct htc_endpoint EndPoint[ENDPOINT_MAX];
- struct htc_control_buffer HTCControlBuffers[NUM_CONTROL_BUFFERS];
- struct htc_endpoint_credit_dist *EpCreditDistributionListHead;
- struct htc_packet_queue ControlBufferTXFreeList;
- struct htc_packet_queue ControlBufferRXFreeList;
- HTC_CREDIT_DIST_CALLBACK DistributeCredits;
- HTC_CREDIT_INIT_CALLBACK InitCredits;
- void *pCredDistContext;
- int TargetCredits;
- unsigned int TargetCreditSize;
- A_MUTEX_T HTCLock;
- A_MUTEX_T HTCRxLock;
- A_MUTEX_T HTCTxLock;
- struct ar6k_device Device; /* AR6K - specific state */
- u32 OpStateFlags;
- u32 RecvStateFlags;
- HTC_ENDPOINT_ID EpWaitingForBuffers;
- bool TargetFailure;
-#ifdef HTC_CAPTURE_LAST_FRAME
- struct htc_frame_hdr LastFrameHdr; /* useful for debugging */
- u8 LastTrailer[256];
- u8 LastTrailerLength;
-#endif
- struct htc_init_info HTCInitInfo;
- u8 HTCTargetVersion;
- int MaxMsgPerBundle; /* max messages per bundle for HTC */
- bool SendBundlingEnabled; /* run time enable for send bundling (dynamic) */
- int RecvBundlingEnabled; /* run time enable for recv bundling (dynamic) */
-};
-
-#define HTC_STOPPING(t) ((t)->OpStateFlags & HTC_OP_STATE_STOPPING)
-#define LOCK_HTC(t) A_MUTEX_LOCK(&(t)->HTCLock);
-#define UNLOCK_HTC(t) A_MUTEX_UNLOCK(&(t)->HTCLock);
-#define LOCK_HTC_RX(t) A_MUTEX_LOCK(&(t)->HTCRxLock);
-#define UNLOCK_HTC_RX(t) A_MUTEX_UNLOCK(&(t)->HTCRxLock);
-#define LOCK_HTC_TX(t) A_MUTEX_LOCK(&(t)->HTCTxLock);
-#define UNLOCK_HTC_TX(t) A_MUTEX_UNLOCK(&(t)->HTCTxLock);
-
-#define GET_HTC_TARGET_FROM_HANDLE(hnd) ((struct htc_target *)(hnd))
-#define HTC_RECYCLE_RX_PKT(target,p,e) \
-{ \
- if ((p)->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_NO_RECYCLE) { \
- HTC_PACKET_RESET_RX(pPacket); \
- pPacket->Status = A_ECANCELED; \
- (e)->EpCallBacks.EpRecv((e)->EpCallBacks.pContext, \
- (p)); \
- } else { \
- HTC_PACKET_RESET_RX(pPacket); \
- HTCAddReceivePkt((HTC_HANDLE)(target),(p)); \
- } \
-}
-
-/* internal HTC functions */
-void HTCControlTxComplete(void *Context, struct htc_packet *pPacket);
-void HTCControlRecv(void *Context, struct htc_packet *pPacket);
-int HTCWaitforControlMessage(struct htc_target *target, struct htc_packet **ppControlPacket);
-struct htc_packet *HTCAllocControlBuffer(struct htc_target *target, struct htc_packet_queue *pList);
-void HTCFreeControlBuffer(struct htc_target *target, struct htc_packet *pPacket, struct htc_packet_queue *pList);
-int HTCIssueSend(struct htc_target *target, struct htc_packet *pPacket);
-void HTCRecvCompleteHandler(void *Context, struct htc_packet *pPacket);
-int HTCRecvMessagePendingHandler(void *Context, u32 MsgLookAheads[], int NumLookAheads, bool *pAsyncProc, int *pNumPktsFetched);
-void HTCProcessCreditRpt(struct htc_target *target, HTC_CREDIT_REPORT *pRpt, int NumEntries, HTC_ENDPOINT_ID FromEndpoint);
-int HTCSendSetupComplete(struct htc_target *target);
-void HTCFlushRecvBuffers(struct htc_target *target);
-void HTCFlushSendPkts(struct htc_target *target);
-
-#ifdef ATH_DEBUG_MODULE
-void DumpCreditDist(struct htc_endpoint_credit_dist *pEPDist);
-void DumpCreditDistStates(struct htc_target *target);
-void DebugDumpBytes(u8 *buffer, u16 length, char *pDescription);
-#endif
-
-static INLINE struct htc_packet *HTC_ALLOC_CONTROL_TX(struct htc_target *target) {
- struct htc_packet *pPacket = HTCAllocControlBuffer(target,&target->ControlBufferTXFreeList);
- if (pPacket != NULL) {
- /* set payload pointer area with some headroom */
- pPacket->pBuffer = pPacket->pBufferStart + HTC_HDR_LENGTH;
- }
- return pPacket;
-}
-
-#define HTC_FREE_CONTROL_TX(t,p) HTCFreeControlBuffer((t),(p),&(t)->ControlBufferTXFreeList)
-#define HTC_ALLOC_CONTROL_RX(t) HTCAllocControlBuffer((t),&(t)->ControlBufferRXFreeList)
-#define HTC_FREE_CONTROL_RX(t,p) \
-{ \
- HTC_PACKET_RESET_RX(p); \
- HTCFreeControlBuffer((t),(p),&(t)->ControlBufferRXFreeList); \
-}
-
-#define HTC_PREPARE_SEND_PKT(pP,sendflags,ctrl0,ctrl1) \
-{ \
- u8 *pHdrBuf; \
- (pP)->pBuffer -= HTC_HDR_LENGTH; \
- pHdrBuf = (pP)->pBuffer; \
- A_SET_UINT16_FIELD(pHdrBuf,struct htc_frame_hdr,PayloadLen,(u16)(pP)->ActualLength); \
- A_SET_UINT8_FIELD(pHdrBuf,struct htc_frame_hdr,Flags,(sendflags)); \
- A_SET_UINT8_FIELD(pHdrBuf,struct htc_frame_hdr,EndpointID, (u8)(pP)->Endpoint); \
- A_SET_UINT8_FIELD(pHdrBuf,struct htc_frame_hdr,ControlBytes[0], (u8)(ctrl0)); \
- A_SET_UINT8_FIELD(pHdrBuf,struct htc_frame_hdr,ControlBytes[1], (u8)(ctrl1)); \
-}
-
-#define HTC_UNPREPARE_SEND_PKT(pP) \
- (pP)->pBuffer += HTC_HDR_LENGTH; \
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _HTC_INTERNAL_H_ */
diff --git a/drivers/staging/ath6kl/htc2/htc_recv.c b/drivers/staging/ath6kl/htc2/htc_recv.c
deleted file mode 100644
index 974cc8cd693..00000000000
--- a/drivers/staging/ath6kl/htc2/htc_recv.c
+++ /dev/null
@@ -1,1572 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_recv.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#include "htc_internal.h"
-
-#define HTCIssueRecv(t, p) \
- DevRecvPacket(&(t)->Device, \
- (p), \
- (p)->ActualLength)
-
-#define DO_RCV_COMPLETION(e,q) DoRecvCompletion(e,q)
-
-#define DUMP_RECV_PKT_INFO(pP) \
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" HTC RECV packet 0x%lX (%d bytes) (hdr:0x%X) on ep : %d \n", \
- (unsigned long)(pP), \
- (pP)->ActualLength, \
- (pP)->PktInfo.AsRx.ExpectedHdr, \
- (pP)->Endpoint))
-
-#define HTC_RX_STAT_PROFILE(t,ep,numLookAheads) \
-{ \
- INC_HTC_EP_STAT((ep), RxReceived, 1); \
- if ((numLookAheads) == 1) { \
- INC_HTC_EP_STAT((ep), RxLookAheads, 1); \
- } else if ((numLookAheads) > 1) { \
- INC_HTC_EP_STAT((ep), RxBundleLookAheads, 1); \
- } \
-}
-
-static void DoRecvCompletion(struct htc_endpoint *pEndpoint,
- struct htc_packet_queue *pQueueToIndicate)
-{
-
- do {
-
- if (HTC_QUEUE_EMPTY(pQueueToIndicate)) {
- /* nothing to indicate */
- break;
- }
-
- if (pEndpoint->EpCallBacks.EpRecvPktMultiple != NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" HTC calling ep %d, recv multiple callback (%d pkts) \n",
- pEndpoint->Id, HTC_PACKET_QUEUE_DEPTH(pQueueToIndicate)));
- /* a recv multiple handler is being used, pass the queue to the handler */
- pEndpoint->EpCallBacks.EpRecvPktMultiple(pEndpoint->EpCallBacks.pContext,
- pQueueToIndicate);
- INIT_HTC_PACKET_QUEUE(pQueueToIndicate);
- } else {
- struct htc_packet *pPacket;
- /* using legacy EpRecv */
- do {
- pPacket = HTC_PACKET_DEQUEUE(pQueueToIndicate);
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" HTC calling ep %d recv callback on packet 0x%lX \n", \
- pEndpoint->Id, (unsigned long)(pPacket)));
- pEndpoint->EpCallBacks.EpRecv(pEndpoint->EpCallBacks.pContext, pPacket);
- } while (!HTC_QUEUE_EMPTY(pQueueToIndicate));
- }
-
- } while (false);
-
-}
-
-static INLINE int HTCProcessTrailer(struct htc_target *target,
- u8 *pBuffer,
- int Length,
- u32 *pNextLookAheads,
- int *pNumLookAheads,
- HTC_ENDPOINT_ID FromEndpoint)
-{
- HTC_RECORD_HDR *pRecord;
- u8 *pRecordBuf;
- HTC_LOOKAHEAD_REPORT *pLookAhead;
- u8 *pOrigBuffer;
- int origLength;
- int status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessTrailer (length:%d) \n", Length));
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
- AR_DEBUG_PRINTBUF(pBuffer,Length,"Recv Trailer");
- }
-
- pOrigBuffer = pBuffer;
- origLength = Length;
- status = 0;
-
- while (Length > 0) {
-
- if (Length < sizeof(HTC_RECORD_HDR)) {
- status = A_EPROTO;
- break;
- }
- /* these are byte aligned structs */
- pRecord = (HTC_RECORD_HDR *)pBuffer;
- Length -= sizeof(HTC_RECORD_HDR);
- pBuffer += sizeof(HTC_RECORD_HDR);
-
- if (pRecord->Length > Length) {
- /* no room left in buffer for record */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" invalid record length: %d (id:%d) buffer has: %d bytes left \n",
- pRecord->Length, pRecord->RecordID, Length));
- status = A_EPROTO;
- break;
- }
- /* start of record follows the header */
- pRecordBuf = pBuffer;
-
- switch (pRecord->RecordID) {
- case HTC_RECORD_CREDITS:
- AR_DEBUG_ASSERT(pRecord->Length >= sizeof(HTC_CREDIT_REPORT));
- HTCProcessCreditRpt(target,
- (HTC_CREDIT_REPORT *)pRecordBuf,
- pRecord->Length / (sizeof(HTC_CREDIT_REPORT)),
- FromEndpoint);
- break;
- case HTC_RECORD_LOOKAHEAD:
- AR_DEBUG_ASSERT(pRecord->Length >= sizeof(HTC_LOOKAHEAD_REPORT));
- pLookAhead = (HTC_LOOKAHEAD_REPORT *)pRecordBuf;
- if ((pLookAhead->PreValid == ((~pLookAhead->PostValid) & 0xFF)) &&
- (pNextLookAheads != NULL)) {
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- (" LookAhead Report Found (pre valid:0x%X, post valid:0x%X) \n",
- pLookAhead->PreValid,
- pLookAhead->PostValid));
-
- /* look ahead bytes are valid, copy them over */
- ((u8 *)(&pNextLookAheads[0]))[0] = pLookAhead->LookAhead[0];
- ((u8 *)(&pNextLookAheads[0]))[1] = pLookAhead->LookAhead[1];
- ((u8 *)(&pNextLookAheads[0]))[2] = pLookAhead->LookAhead[2];
- ((u8 *)(&pNextLookAheads[0]))[3] = pLookAhead->LookAhead[3];
-
-#ifdef ATH_DEBUG_MODULE
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
- DebugDumpBytes((u8 *)pNextLookAheads,4,"Next Look Ahead");
- }
-#endif
- /* just one normal lookahead */
- *pNumLookAheads = 1;
- }
- break;
- case HTC_RECORD_LOOKAHEAD_BUNDLE:
- AR_DEBUG_ASSERT(pRecord->Length >= sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT));
- if (pRecord->Length >= sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT) &&
- (pNextLookAheads != NULL)) {
- HTC_BUNDLED_LOOKAHEAD_REPORT *pBundledLookAheadRpt;
- int i;
-
- pBundledLookAheadRpt = (HTC_BUNDLED_LOOKAHEAD_REPORT *)pRecordBuf;
-
-#ifdef ATH_DEBUG_MODULE
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
- DebugDumpBytes(pRecordBuf,pRecord->Length,"Bundle LookAhead");
- }
-#endif
-
- if ((pRecord->Length / (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT))) >
- HTC_HOST_MAX_MSG_PER_BUNDLE) {
- /* this should never happen, the target restricts the number
- * of messages per bundle configured by the host */
- A_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
- for (i = 0; i < (int)(pRecord->Length / (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT))); i++) {
- ((u8 *)(&pNextLookAheads[i]))[0] = pBundledLookAheadRpt->LookAhead[0];
- ((u8 *)(&pNextLookAheads[i]))[1] = pBundledLookAheadRpt->LookAhead[1];
- ((u8 *)(&pNextLookAheads[i]))[2] = pBundledLookAheadRpt->LookAhead[2];
- ((u8 *)(&pNextLookAheads[i]))[3] = pBundledLookAheadRpt->LookAhead[3];
- pBundledLookAheadRpt++;
- }
-
- *pNumLookAheads = i;
- }
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" unhandled record: id:%d length:%d \n",
- pRecord->RecordID, pRecord->Length));
- break;
- }
-
- if (status) {
- break;
- }
-
- /* advance buffer past this record for next time around */
- pBuffer += pRecord->Length;
- Length -= pRecord->Length;
- }
-
-#ifdef ATH_DEBUG_MODULE
- if (status) {
- DebugDumpBytes(pOrigBuffer,origLength,"BAD Recv Trailer");
- }
-#endif
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCProcessTrailer \n"));
- return status;
-
-}
-
-/* process a received message (i.e. strip off header, process any trailer data)
- * note : locks must be released when this function is called */
-static int HTCProcessRecvHeader(struct htc_target *target,
- struct htc_packet *pPacket,
- u32 *pNextLookAheads,
- int *pNumLookAheads)
-{
- u8 temp;
- u8 *pBuf;
- int status = 0;
- u16 payloadLen;
- u32 lookAhead;
-
- pBuf = pPacket->pBuffer;
-
- if (pNumLookAheads != NULL) {
- *pNumLookAheads = 0;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessRecvHeader \n"));
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
- AR_DEBUG_PRINTBUF(pBuf,pPacket->ActualLength,"HTC Recv PKT");
- }
-
- do {
- /* note, we cannot assume the alignment of pBuffer, so we use the safe macros to
- * retrieve 16 bit fields */
- payloadLen = A_GET_UINT16_FIELD(pBuf, struct htc_frame_hdr, PayloadLen);
-
- ((u8 *)&lookAhead)[0] = pBuf[0];
- ((u8 *)&lookAhead)[1] = pBuf[1];
- ((u8 *)&lookAhead)[2] = pBuf[2];
- ((u8 *)&lookAhead)[3] = pBuf[3];
-
- if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_REFRESH_HDR) {
- /* refresh expected hdr, since this was unknown at the time we grabbed the packets
- * as part of a bundle */
- pPacket->PktInfo.AsRx.ExpectedHdr = lookAhead;
- /* refresh actual length since we now have the real header */
- pPacket->ActualLength = payloadLen + HTC_HDR_LENGTH;
-
- /* validate the actual header that was refreshed */
- if (pPacket->ActualLength > pPacket->BufferLength) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Refreshed HDR payload length (%d) in bundled RECV is invalid (hdr: 0x%X) \n",
- payloadLen, lookAhead));
- /* limit this to max buffer just to print out some of the buffer */
- pPacket->ActualLength = min(pPacket->ActualLength, pPacket->BufferLength);
- status = A_EPROTO;
- break;
- }
-
- if (pPacket->Endpoint != A_GET_UINT8_FIELD(pBuf, struct htc_frame_hdr, EndpointID)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Refreshed HDR endpoint (%d) does not match expected endpoint (%d) \n",
- A_GET_UINT8_FIELD(pBuf, struct htc_frame_hdr, EndpointID), pPacket->Endpoint));
- status = A_EPROTO;
- break;
- }
- }
-
- if (lookAhead != pPacket->PktInfo.AsRx.ExpectedHdr) {
- /* somehow the lookahead that gave us the full read length did not
- * reflect the actual header in the pending message */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("HTCProcessRecvHeader, lookahead mismatch! (pPkt:0x%lX flags:0x%X) \n",
- (unsigned long)pPacket, pPacket->PktInfo.AsRx.HTCRxFlags));
-#ifdef ATH_DEBUG_MODULE
- DebugDumpBytes((u8 *)&pPacket->PktInfo.AsRx.ExpectedHdr,4,"Expected Message LookAhead");
- DebugDumpBytes(pBuf,sizeof(struct htc_frame_hdr),"Current Frame Header");
-#ifdef HTC_CAPTURE_LAST_FRAME
- DebugDumpBytes((u8 *)&target->LastFrameHdr,sizeof(struct htc_frame_hdr),"Last Frame Header");
- if (target->LastTrailerLength != 0) {
- DebugDumpBytes(target->LastTrailer,
- target->LastTrailerLength,
- "Last trailer");
- }
-#endif
-#endif
- status = A_EPROTO;
- break;
- }
-
- /* get flags */
- temp = A_GET_UINT8_FIELD(pBuf, struct htc_frame_hdr, Flags);
-
- if (temp & HTC_FLAGS_RECV_TRAILER) {
- /* this packet has a trailer */
-
- /* extract the trailer length in control byte 0 */
- temp = A_GET_UINT8_FIELD(pBuf, struct htc_frame_hdr, ControlBytes[0]);
-
- if ((temp < sizeof(HTC_RECORD_HDR)) || (temp > payloadLen)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("HTCProcessRecvHeader, invalid header (payloadlength should be :%d, CB[0] is:%d) \n",
- payloadLen, temp));
- status = A_EPROTO;
- break;
- }
-
- if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
- /* this packet was fetched as part of an HTC bundle, the embedded lookahead is
- * not valid since the next packet may have already been fetched as part of the
- * bundle */
- pNextLookAheads = NULL;
- pNumLookAheads = NULL;
- }
-
- /* process trailer data that follows HDR + application payload */
- status = HTCProcessTrailer(target,
- (pBuf + HTC_HDR_LENGTH + payloadLen - temp),
- temp,
- pNextLookAheads,
- pNumLookAheads,
- pPacket->Endpoint);
-
- if (status) {
- break;
- }
-
-#ifdef HTC_CAPTURE_LAST_FRAME
- memcpy(target->LastTrailer, (pBuf + HTC_HDR_LENGTH + payloadLen - temp), temp);
- target->LastTrailerLength = temp;
-#endif
- /* trim length by trailer bytes */
- pPacket->ActualLength -= temp;
- }
-#ifdef HTC_CAPTURE_LAST_FRAME
- else {
- target->LastTrailerLength = 0;
- }
-#endif
-
- /* if we get to this point, the packet is good */
- /* remove header and adjust length */
- pPacket->pBuffer += HTC_HDR_LENGTH;
- pPacket->ActualLength -= HTC_HDR_LENGTH;
-
- } while (false);
-
- if (status) {
- /* dump the whole packet */
-#ifdef ATH_DEBUG_MODULE
- DebugDumpBytes(pBuf,pPacket->ActualLength < 256 ? pPacket->ActualLength : 256 ,"BAD HTC Recv PKT");
-#endif
- } else {
-#ifdef HTC_CAPTURE_LAST_FRAME
- memcpy(&target->LastFrameHdr,pBuf,sizeof(struct htc_frame_hdr));
-#endif
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
- if (pPacket->ActualLength > 0) {
- AR_DEBUG_PRINTBUF(pPacket->pBuffer,pPacket->ActualLength,"HTC - Application Msg");
- }
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCProcessRecvHeader \n"));
- return status;
-}
-
-static INLINE void HTCAsyncRecvCheckMorePackets(struct htc_target *target,
- u32 NextLookAheads[],
- int NumLookAheads,
- bool CheckMoreMsgs)
-{
- /* was there a lookahead for the next packet? */
- if (NumLookAheads > 0) {
- int nextStatus;
- int fetched = 0;
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("HTCAsyncRecvCheckMorePackets - num lookaheads were non-zero : %d \n",
- NumLookAheads));
- /* force status re-check */
- REF_IRQ_STATUS_RECHECK(&target->Device);
- /* we have more packets, get the next packet fetch started */
- nextStatus = HTCRecvMessagePendingHandler(target, NextLookAheads, NumLookAheads, NULL, &fetched);
- if (A_EPROTO == nextStatus) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Next look ahead from recv header was INVALID\n"));
-#ifdef ATH_DEBUG_MODULE
- DebugDumpBytes((u8 *)NextLookAheads,
- NumLookAheads * (sizeof(u32)),
- "BAD lookaheads from lookahead report");
-#endif
- }
- if (!nextStatus && !fetched) {
- /* we could not fetch any more packets due to resources */
- DevAsyncIrqProcessComplete(&target->Device);
- }
- } else {
- if (CheckMoreMsgs) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("HTCAsyncRecvCheckMorePackets - rechecking for more messages...\n"));
- /* if we did not get anything on the look-ahead,
- * call device layer to asynchronously re-check for messages. If we can keep the async
- * processing going we get better performance. If there is a pending message we will keep processing
- * messages asynchronously which should pipeline things nicely */
- DevCheckPendingRecvMsgsAsync(&target->Device);
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("HTCAsyncRecvCheckMorePackets - no check \n"));
- }
- }
-
-
-}
-
- /* unload the recv completion queue */
-static INLINE void DrainRecvIndicationQueue(struct htc_target *target, struct htc_endpoint *pEndpoint)
-{
- struct htc_packet_queue recvCompletions;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+DrainRecvIndicationQueue \n"));
-
- INIT_HTC_PACKET_QUEUE(&recvCompletions);
-
- LOCK_HTC_RX(target);
-
- /* increment rx processing count on entry */
- pEndpoint->RxProcessCount++;
- if (pEndpoint->RxProcessCount > 1) {
- pEndpoint->RxProcessCount--;
- /* another thread or task is draining the RX completion queue on this endpoint
- * that thread will reset the rx processing count when the queue is drained */
- UNLOCK_HTC_RX(target);
- return;
- }
-
- /******* at this point only 1 thread may enter ******/
-
- while (true) {
-
- /* transfer items from main recv queue to the local one so we can release the lock */
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&recvCompletions, &pEndpoint->RecvIndicationQueue);
-
- if (HTC_QUEUE_EMPTY(&recvCompletions)) {
- /* all drained */
- break;
- }
-
- /* release lock while we do the recv completions
- * other threads can now queue more recv completions */
- UNLOCK_HTC_RX(target);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("DrainRecvIndicationQueue : completing %d RECV packets \n",
- HTC_PACKET_QUEUE_DEPTH(&recvCompletions)));
- /* do completion */
- DO_RCV_COMPLETION(pEndpoint,&recvCompletions);
-
- /* re-acquire lock to grab some more completions */
- LOCK_HTC_RX(target);
- }
-
- /* reset count */
- pEndpoint->RxProcessCount = 0;
- UNLOCK_HTC_RX(target);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-DrainRecvIndicationQueue \n"));
-
-}
-
- /* optimization for recv packets, we can indicate a "hint" that there are more
- * single-packets to fetch on this endpoint */
-#define SET_MORE_RX_PACKET_INDICATION_FLAG(L,N,E,P) \
- if ((N) > 0) { SetRxPacketIndicationFlags((L)[0],(E),(P)); }
-
- /* for bundled frames, we can force the flag to indicate there are more packets */
-#define FORCE_MORE_RX_PACKET_INDICATION_FLAG(P) \
- (P)->PktInfo.AsRx.IndicationFlags |= HTC_RX_FLAGS_INDICATE_MORE_PKTS;
-
- /* note: this function can be called with the RX lock held */
-static INLINE void SetRxPacketIndicationFlags(u32 LookAhead,
- struct htc_endpoint *pEndpoint,
- struct htc_packet *pPacket)
-{
- struct htc_frame_hdr *pHdr = (struct htc_frame_hdr *)&LookAhead;
- /* check to see if the "next" packet is from the same endpoint of the
- completing packet */
- if (pHdr->EndpointID == pPacket->Endpoint) {
- /* check that there is a buffer available to actually fetch it */
- if (!HTC_QUEUE_EMPTY(&pEndpoint->RxBuffers)) {
- /* provide a hint that there are more RX packets to fetch */
- FORCE_MORE_RX_PACKET_INDICATION_FLAG(pPacket);
- }
- }
-}
-
-
-/* asynchronous completion handler for recv packet fetching, when the device layer
- * completes a read request, it will call this completion handler */
-void HTCRecvCompleteHandler(void *Context, struct htc_packet *pPacket)
-{
- struct htc_target *target = (struct htc_target *)Context;
- struct htc_endpoint *pEndpoint;
- u32 nextLookAheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
- int numLookAheads = 0;
- int status;
- bool checkMorePkts = true;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCRecvCompleteHandler (pkt:0x%lX, status:%d, ep:%d) \n",
- (unsigned long)pPacket, pPacket->Status, pPacket->Endpoint));
-
- A_ASSERT(!IS_DEV_IRQ_PROC_SYNC_MODE(&target->Device));
- AR_DEBUG_ASSERT(pPacket->Endpoint < ENDPOINT_MAX);
- pEndpoint = &target->EndPoint[pPacket->Endpoint];
- pPacket->Completion = NULL;
-
- /* get completion status */
- status = pPacket->Status;
-
- do {
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HTCRecvCompleteHandler: request failed (status:%d, ep:%d) \n",
- pPacket->Status, pPacket->Endpoint));
- break;
- }
- /* process the header for any trailer data */
- status = HTCProcessRecvHeader(target,pPacket,nextLookAheads,&numLookAheads);
-
- if (status) {
- break;
- }
-
- if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
- /* this packet was part of a bundle that had to be broken up.
- * It was fetched one message at a time. There may be other asynchronous reads queued behind this one.
- * Do no issue another check for more packets since the last one in the series of requests
- * will handle it */
- checkMorePkts = false;
- }
-
- DUMP_RECV_PKT_INFO(pPacket);
- LOCK_HTC_RX(target);
- SET_MORE_RX_PACKET_INDICATION_FLAG(nextLookAheads,numLookAheads,pEndpoint,pPacket);
- /* we have a good packet, queue it to the completion queue */
- HTC_PACKET_ENQUEUE(&pEndpoint->RecvIndicationQueue,pPacket);
- HTC_RX_STAT_PROFILE(target,pEndpoint,numLookAheads);
- UNLOCK_HTC_RX(target);
-
- /* check for more recv packets before indicating */
- HTCAsyncRecvCheckMorePackets(target,nextLookAheads,numLookAheads,checkMorePkts);
-
- } while (false);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("HTCRecvCompleteHandler , message fetch failed (status = %d) \n",
- status));
- /* recycle this packet */
- HTC_RECYCLE_RX_PKT(target, pPacket, pEndpoint);
- } else {
- /* a good packet was queued, drain the queue */
- DrainRecvIndicationQueue(target,pEndpoint);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvCompleteHandler\n"));
-}
-
-/* synchronously wait for a control message from the target,
- * This function is used at initialization time ONLY. At init messages
- * on ENDPOINT 0 are expected. */
-int HTCWaitforControlMessage(struct htc_target *target, struct htc_packet **ppControlPacket)
-{
- int status;
- u32 lookAhead;
- struct htc_packet *pPacket = NULL;
- struct htc_frame_hdr *pHdr;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCWaitforControlMessage \n"));
-
- do {
-
- *ppControlPacket = NULL;
-
- /* call the polling function to see if we have a message */
- status = DevPollMboxMsgRecv(&target->Device,
- &lookAhead,
- HTC_TARGET_RESPONSE_TIMEOUT);
-
- if (status) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("HTCWaitforControlMessage : lookAhead : 0x%X \n", lookAhead));
-
- /* check the lookahead */
- pHdr = (struct htc_frame_hdr *)&lookAhead;
-
- if (pHdr->EndpointID != ENDPOINT_0) {
- /* unexpected endpoint number, should be zero */
- AR_DEBUG_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
- if (status) {
- /* bad message */
- AR_DEBUG_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
- pPacket = HTC_ALLOC_CONTROL_RX(target);
-
- if (pPacket == NULL) {
- AR_DEBUG_ASSERT(false);
- status = A_NO_MEMORY;
- break;
- }
-
- pPacket->PktInfo.AsRx.HTCRxFlags = 0;
- pPacket->PktInfo.AsRx.ExpectedHdr = lookAhead;
- pPacket->ActualLength = pHdr->PayloadLen + HTC_HDR_LENGTH;
-
- if (pPacket->ActualLength > pPacket->BufferLength) {
- AR_DEBUG_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
- /* we want synchronous operation */
- pPacket->Completion = NULL;
-
- /* get the message from the device, this will block */
- status = HTCIssueRecv(target, pPacket);
-
- if (status) {
- break;
- }
-
- /* process receive header */
- status = HTCProcessRecvHeader(target,pPacket,NULL,NULL);
-
- pPacket->Status = status;
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("HTCWaitforControlMessage, HTCProcessRecvHeader failed (status = %d) \n",
- status));
- break;
- }
-
- /* give the caller this control message packet, they are responsible to free */
- *ppControlPacket = pPacket;
-
- } while (false);
-
- if (status) {
- if (pPacket != NULL) {
- /* cleanup buffer on error */
- HTC_FREE_CONTROL_RX(target,pPacket);
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCWaitforControlMessage \n"));
-
- return status;
-}
-
-static int AllocAndPrepareRxPackets(struct htc_target *target,
- u32 LookAheads[],
- int Messages,
- struct htc_endpoint *pEndpoint,
- struct htc_packet_queue *pQueue)
-{
- int status = 0;
- struct htc_packet *pPacket;
- struct htc_frame_hdr *pHdr;
- int i,j;
- int numMessages;
- int fullLength;
- bool noRecycle;
-
- /* lock RX while we assemble the packet buffers */
- LOCK_HTC_RX(target);
-
- for (i = 0; i < Messages; i++) {
-
- pHdr = (struct htc_frame_hdr *)&LookAheads[i];
-
- if (pHdr->EndpointID >= ENDPOINT_MAX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid Endpoint in look-ahead: %d \n",pHdr->EndpointID));
- /* invalid endpoint */
- status = A_EPROTO;
- break;
- }
-
- if (pHdr->EndpointID != pEndpoint->Id) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid Endpoint in look-ahead: %d should be : %d (index:%d)\n",
- pHdr->EndpointID, pEndpoint->Id, i));
- /* invalid endpoint */
- status = A_EPROTO;
- break;
- }
-
- if (pHdr->PayloadLen > HTC_MAX_PAYLOAD_LENGTH) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Payload length %d exceeds max HTC : %d !\n",
- pHdr->PayloadLen, (u32)HTC_MAX_PAYLOAD_LENGTH));
- status = A_EPROTO;
- break;
- }
-
- if (0 == pEndpoint->ServiceID) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Endpoint %d is not connected !\n",pHdr->EndpointID));
- /* endpoint isn't even connected */
- status = A_EPROTO;
- break;
- }
-
- if ((pHdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) == 0) {
- /* HTC header only indicates 1 message to fetch */
- numMessages = 1;
- } else {
- /* HTC header indicates that every packet to follow has the same padded length so that it can
- * be optimally fetched as a full bundle */
- numMessages = (pHdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) >> HTC_FLAGS_RECV_BUNDLE_CNT_SHIFT;
- /* the count doesn't include the starter frame, just a count of frames to follow */
- numMessages++;
- A_ASSERT(numMessages <= target->MaxMsgPerBundle);
- INC_HTC_EP_STAT(pEndpoint, RxBundleIndFromHdr, 1);
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("HTC header indicates :%d messages can be fetched as a bundle \n",numMessages));
- }
-
- fullLength = DEV_CALC_RECV_PADDED_LEN(&target->Device,pHdr->PayloadLen + sizeof(struct htc_frame_hdr));
-
- /* get packet buffers for each message, if there was a bundle detected in the header,
- * use pHdr as a template to fetch all packets in the bundle */
- for (j = 0; j < numMessages; j++) {
-
- /* reset flag, any packets allocated using the RecvAlloc() API cannot be recycled on cleanup,
- * they must be explicitly returned */
- noRecycle = false;
-
- if (pEndpoint->EpCallBacks.EpRecvAlloc != NULL) {
- UNLOCK_HTC_RX(target);
- noRecycle = true;
- /* user is using a per-packet allocation callback */
- pPacket = pEndpoint->EpCallBacks.EpRecvAlloc(pEndpoint->EpCallBacks.pContext,
- pEndpoint->Id,
- fullLength);
- LOCK_HTC_RX(target);
-
- } else if ((pEndpoint->EpCallBacks.EpRecvAllocThresh != NULL) &&
- (fullLength > pEndpoint->EpCallBacks.RecvAllocThreshold)) {
- INC_HTC_EP_STAT(pEndpoint,RxAllocThreshHit,1);
- INC_HTC_EP_STAT(pEndpoint,RxAllocThreshBytes,pHdr->PayloadLen);
- /* threshold was hit, call the special recv allocation callback */
- UNLOCK_HTC_RX(target);
- noRecycle = true;
- /* user wants to allocate packets above a certain threshold */
- pPacket = pEndpoint->EpCallBacks.EpRecvAllocThresh(pEndpoint->EpCallBacks.pContext,
- pEndpoint->Id,
- fullLength);
- LOCK_HTC_RX(target);
-
- } else {
- /* user is using a refill handler that can refill multiple HTC buffers */
-
- /* get a packet from the endpoint recv queue */
- pPacket = HTC_PACKET_DEQUEUE(&pEndpoint->RxBuffers);
-
- if (NULL == pPacket) {
- /* check for refill handler */
- if (pEndpoint->EpCallBacks.EpRecvRefill != NULL) {
- UNLOCK_HTC_RX(target);
- /* call the re-fill handler */
- pEndpoint->EpCallBacks.EpRecvRefill(pEndpoint->EpCallBacks.pContext,
- pEndpoint->Id);
- LOCK_HTC_RX(target);
- /* check if we have more buffers */
- pPacket = HTC_PACKET_DEQUEUE(&pEndpoint->RxBuffers);
- /* fall through */
- }
- }
- }
-
- if (NULL == pPacket) {
- /* this is not an error, we simply need to mark that we are waiting for buffers.*/
- target->RecvStateFlags |= HTC_RECV_WAIT_BUFFERS;
- target->EpWaitingForBuffers = pEndpoint->Id;
- status = A_NO_RESOURCE;
- break;
- }
-
- AR_DEBUG_ASSERT(pPacket->Endpoint == pEndpoint->Id);
- /* clear flags */
- pPacket->PktInfo.AsRx.HTCRxFlags = 0;
- pPacket->PktInfo.AsRx.IndicationFlags = 0;
- pPacket->Status = 0;
-
- if (noRecycle) {
- /* flag that these packets cannot be recycled, they have to be returned to the
- * user */
- pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_NO_RECYCLE;
- }
- /* add packet to queue (also incase we need to cleanup down below) */
- HTC_PACKET_ENQUEUE(pQueue,pPacket);
-
- if (HTC_STOPPING(target)) {
- status = A_ECANCELED;
- break;
- }
-
- /* make sure this message can fit in the endpoint buffer */
- if ((u32)fullLength > pPacket->BufferLength) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Payload Length Error : header reports payload of: %d (%d) endpoint buffer size: %d \n",
- pHdr->PayloadLen, fullLength, pPacket->BufferLength));
- status = A_EPROTO;
- break;
- }
-
- if (j > 0) {
- /* for messages fetched in a bundle the expected lookahead is unknown since we
- * are only using the lookahead of the first packet as a template of what to
- * expect for lengths */
- /* flag that once we get the real HTC header we need to refesh the information */
- pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_REFRESH_HDR;
- /* set it to something invalid */
- pPacket->PktInfo.AsRx.ExpectedHdr = 0xFFFFFFFF;
- } else {
-
- pPacket->PktInfo.AsRx.ExpectedHdr = LookAheads[i]; /* set expected look ahead */
- }
- /* set the amount of data to fetch */
- pPacket->ActualLength = pHdr->PayloadLen + HTC_HDR_LENGTH;
- }
-
- if (status) {
- if (A_NO_RESOURCE == status) {
- /* this is actually okay */
- status = 0;
- }
- break;
- }
-
- }
-
- UNLOCK_HTC_RX(target);
-
- if (status) {
- while (!HTC_QUEUE_EMPTY(pQueue)) {
- pPacket = HTC_PACKET_DEQUEUE(pQueue);
- /* recycle all allocated packets */
- HTC_RECYCLE_RX_PKT(target,pPacket,&target->EndPoint[pPacket->Endpoint]);
- }
- }
-
- return status;
-}
-
-static void HTCAsyncRecvScatterCompletion(struct hif_scatter_req *pScatterReq)
-{
- int i;
- struct htc_packet *pPacket;
- struct htc_endpoint *pEndpoint;
- u32 lookAheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
- int numLookAheads = 0;
- struct htc_target *target = (struct htc_target *)pScatterReq->Context;
- int status;
- bool partialBundle = false;
- struct htc_packet_queue localRecvQueue;
- bool procError = false;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCAsyncRecvScatterCompletion TotLen: %d Entries: %d\n",
- pScatterReq->TotalLength, pScatterReq->ValidScatterEntries));
-
- A_ASSERT(!IS_DEV_IRQ_PROC_SYNC_MODE(&target->Device));
-
- if (pScatterReq->CompletionStatus) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** Recv Scatter Request Failed: %d \n",pScatterReq->CompletionStatus));
- }
-
- if (pScatterReq->CallerFlags & HTC_SCATTER_REQ_FLAGS_PARTIAL_BUNDLE) {
- partialBundle = true;
- }
-
- DEV_FINISH_SCATTER_OPERATION(pScatterReq);
-
- INIT_HTC_PACKET_QUEUE(&localRecvQueue);
-
- pPacket = (struct htc_packet *)pScatterReq->ScatterList[0].pCallerContexts[0];
- /* note: all packets in a scatter req are for the same endpoint ! */
- pEndpoint = &target->EndPoint[pPacket->Endpoint];
-
- /* walk through the scatter list and process */
- /* **** NOTE: DO NOT HOLD ANY LOCKS here, HTCProcessRecvHeader can take the TX lock
- * as it processes credit reports */
- for (i = 0; i < pScatterReq->ValidScatterEntries; i++) {
- pPacket = (struct htc_packet *)pScatterReq->ScatterList[i].pCallerContexts[0];
- A_ASSERT(pPacket != NULL);
- /* reset count, we are only interested in the look ahead in the last packet when we
- * break out of this loop */
- numLookAheads = 0;
-
- if (!pScatterReq->CompletionStatus) {
- /* process header for each of the recv packets */
- status = HTCProcessRecvHeader(target,pPacket,lookAheads,&numLookAheads);
- } else {
- status = A_ERROR;
- }
-
- if (!status) {
- LOCK_HTC_RX(target);
- HTC_RX_STAT_PROFILE(target,pEndpoint,numLookAheads);
- INC_HTC_EP_STAT(pEndpoint, RxPacketsBundled, 1);
- UNLOCK_HTC_RX(target);
- if (i == (pScatterReq->ValidScatterEntries - 1)) {
- /* last packet's more packets flag is set based on the lookahead */
- SET_MORE_RX_PACKET_INDICATION_FLAG(lookAheads,numLookAheads,pEndpoint,pPacket);
- } else {
- /* packets in a bundle automatically have this flag set */
- FORCE_MORE_RX_PACKET_INDICATION_FLAG(pPacket);
- }
-
- DUMP_RECV_PKT_INFO(pPacket);
- /* since we can't hold a lock in this loop, we insert into our local recv queue for
- * storage until we can transfer them to the recv completion queue */
- HTC_PACKET_ENQUEUE(&localRecvQueue,pPacket);
-
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Recv packet scatter entry %d failed (out of %d) \n",
- i, pScatterReq->ValidScatterEntries));
- /* recycle failed recv */
- HTC_RECYCLE_RX_PKT(target, pPacket, pEndpoint);
- /* set flag and continue processing the remaining scatter entries */
- procError = true;
- }
-
- }
-
- /* free scatter request */
- DEV_FREE_SCATTER_REQ(&target->Device,pScatterReq);
-
- LOCK_HTC_RX(target);
- /* transfer the packets in the local recv queue to the recv completion queue */
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RecvIndicationQueue, &localRecvQueue);
-
- UNLOCK_HTC_RX(target);
-
- if (!procError) {
- /* pipeline the next check (asynchronously) for more packets */
- HTCAsyncRecvCheckMorePackets(target,
- lookAheads,
- numLookAheads,
- partialBundle ? false : true);
- }
-
- /* now drain the indication queue */
- DrainRecvIndicationQueue(target,pEndpoint);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCAsyncRecvScatterCompletion \n"));
-}
-
-static int HTCIssueRecvPacketBundle(struct htc_target *target,
- struct htc_packet_queue *pRecvPktQueue,
- struct htc_packet_queue *pSyncCompletionQueue,
- int *pNumPacketsFetched,
- bool PartialBundle)
-{
- int status = 0;
- struct hif_scatter_req *pScatterReq;
- int i, totalLength;
- int pktsToScatter;
- struct htc_packet *pPacket;
- bool asyncMode = (pSyncCompletionQueue == NULL) ? true : false;
- int scatterSpaceRemaining = DEV_GET_MAX_BUNDLE_RECV_LENGTH(&target->Device);
-
- pktsToScatter = HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue);
- pktsToScatter = min(pktsToScatter, target->MaxMsgPerBundle);
-
- if ((HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue) - pktsToScatter) > 0) {
- /* we were forced to split this bundle receive operation
- * all packets in this partial bundle must have their lookaheads ignored */
- PartialBundle = true;
- /* this would only happen if the target ignored our max bundle limit */
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
- ("HTCIssueRecvPacketBundle : partial bundle detected num:%d , %d \n",
- HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue), pktsToScatter));
- }
-
- totalLength = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCIssueRecvPacketBundle (Numpackets: %d , actual : %d) \n",
- HTC_PACKET_QUEUE_DEPTH(pRecvPktQueue), pktsToScatter));
-
- do {
-
- pScatterReq = DEV_ALLOC_SCATTER_REQ(&target->Device);
-
- if (pScatterReq == NULL) {
- /* no scatter resources left, just let caller handle it the legacy way */
- break;
- }
-
- pScatterReq->CallerFlags = 0;
-
- if (PartialBundle) {
- /* mark that this is a partial bundle, this has special ramifications to the
- * scatter completion routine */
- pScatterReq->CallerFlags |= HTC_SCATTER_REQ_FLAGS_PARTIAL_BUNDLE;
- }
-
- /* convert HTC packets to scatter list */
- for (i = 0; i < pktsToScatter; i++) {
- int paddedLength;
-
- pPacket = HTC_PACKET_DEQUEUE(pRecvPktQueue);
- A_ASSERT(pPacket != NULL);
-
- paddedLength = DEV_CALC_RECV_PADDED_LEN(&target->Device, pPacket->ActualLength);
-
- if ((scatterSpaceRemaining - paddedLength) < 0) {
- /* exceeds what we can transfer, put the packet back */
- HTC_PACKET_ENQUEUE_TO_HEAD(pRecvPktQueue,pPacket);
- break;
- }
-
- scatterSpaceRemaining -= paddedLength;
-
- if (PartialBundle || (i < (pktsToScatter - 1))) {
- /* packet 0..n-1 cannot be checked for look-aheads since we are fetching a bundle
- * the last packet however can have it's lookahead used */
- pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_IGNORE_LOOKAHEAD;
- }
-
- /* note: 1 HTC packet per scatter entry */
- /* setup packet into */
- pScatterReq->ScatterList[i].pBuffer = pPacket->pBuffer;
- pScatterReq->ScatterList[i].Length = paddedLength;
-
- pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
-
- if (asyncMode) {
- /* save HTC packet for async completion routine */
- pScatterReq->ScatterList[i].pCallerContexts[0] = pPacket;
- } else {
- /* queue to caller's sync completion queue, caller will unload this when we return */
- HTC_PACKET_ENQUEUE(pSyncCompletionQueue,pPacket);
- }
-
- A_ASSERT(pScatterReq->ScatterList[i].Length);
- totalLength += pScatterReq->ScatterList[i].Length;
- }
-
- pScatterReq->TotalLength = totalLength;
- pScatterReq->ValidScatterEntries = i;
-
- if (asyncMode) {
- pScatterReq->CompletionRoutine = HTCAsyncRecvScatterCompletion;
- pScatterReq->Context = target;
- }
-
- status = DevSubmitScatterRequest(&target->Device, pScatterReq, DEV_SCATTER_READ, asyncMode);
-
- if (!status) {
- *pNumPacketsFetched = i;
- }
-
- if (!asyncMode) {
- /* free scatter request */
- DEV_FREE_SCATTER_REQ(&target->Device, pScatterReq);
- }
-
- } while (false);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCIssueRecvPacketBundle (status:%d) (fetched:%d) \n",
- status,*pNumPacketsFetched));
-
- return status;
-}
-
-static INLINE void CheckRecvWaterMark(struct htc_endpoint *pEndpoint)
-{
- /* see if endpoint is using a refill watermark
- * ** no need to use a lock here, since we are only inspecting...
- * caller may must not hold locks when calling this function */
- if (pEndpoint->EpCallBacks.RecvRefillWaterMark > 0) {
- if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->RxBuffers) < pEndpoint->EpCallBacks.RecvRefillWaterMark) {
- /* call the re-fill handler before we continue */
- pEndpoint->EpCallBacks.EpRecvRefill(pEndpoint->EpCallBacks.pContext,
- pEndpoint->Id);
- }
- }
-}
-
-/* callback when device layer or lookahead report parsing detects a pending message */
-int HTCRecvMessagePendingHandler(void *Context, u32 MsgLookAheads[], int NumLookAheads, bool *pAsyncProc, int *pNumPktsFetched)
-{
- struct htc_target *target = (struct htc_target *)Context;
- int status = 0;
- struct htc_packet *pPacket;
- struct htc_endpoint *pEndpoint;
- bool asyncProc = false;
- u32 lookAheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
- int pktsFetched;
- struct htc_packet_queue recvPktQueue, syncCompletedPktsQueue;
- bool partialBundle;
- HTC_ENDPOINT_ID id;
- int totalFetched = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HTCRecvMessagePendingHandler NumLookAheads: %d \n",NumLookAheads));
-
- if (pNumPktsFetched != NULL) {
- *pNumPktsFetched = 0;
- }
-
- if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(&target->Device)) {
- /* We use async mode to get the packets if the device layer supports it.
- * The device layer interfaces with HIF in which HIF may have restrictions on
- * how interrupts are processed */
- asyncProc = true;
- }
-
- if (pAsyncProc != NULL) {
- /* indicate to caller how we decided to process this */
- *pAsyncProc = asyncProc;
- }
-
- if (NumLookAheads > HTC_HOST_MAX_MSG_PER_BUNDLE) {
- A_ASSERT(false);
- return A_EPROTO;
- }
-
- /* on first entry copy the lookaheads into our temp array for processing */
- memcpy(lookAheads, MsgLookAheads, (sizeof(u32)) * NumLookAheads);
-
- while (true) {
-
- /* reset packets queues */
- INIT_HTC_PACKET_QUEUE(&recvPktQueue);
- INIT_HTC_PACKET_QUEUE(&syncCompletedPktsQueue);
-
- if (NumLookAheads > HTC_HOST_MAX_MSG_PER_BUNDLE) {
- status = A_EPROTO;
- A_ASSERT(false);
- break;
- }
-
- /* first lookahead sets the expected endpoint IDs for all packets in a bundle */
- id = ((struct htc_frame_hdr *)&lookAheads[0])->EndpointID;
- pEndpoint = &target->EndPoint[id];
-
- if (id >= ENDPOINT_MAX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MsgPend, Invalid Endpoint in look-ahead: %d \n",id));
- status = A_EPROTO;
- break;
- }
-
- /* try to allocate as many HTC RX packets indicated by the lookaheads
- * these packets are stored in the recvPkt queue */
- status = AllocAndPrepareRxPackets(target,
- lookAheads,
- NumLookAheads,
- pEndpoint,
- &recvPktQueue);
- if (status) {
- break;
- }
-
- if (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) >= 2) {
- /* a recv bundle was detected, force IRQ status re-check again */
- REF_IRQ_STATUS_RECHECK(&target->Device);
- }
-
- totalFetched += HTC_PACKET_QUEUE_DEPTH(&recvPktQueue);
-
- /* we've got packet buffers for all we can currently fetch,
- * this count is not valid anymore */
- NumLookAheads = 0;
- partialBundle = false;
-
- /* now go fetch the list of HTC packets */
- while (!HTC_QUEUE_EMPTY(&recvPktQueue)) {
-
- pktsFetched = 0;
-
- if (target->RecvBundlingEnabled && (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) > 1)) {
- /* there are enough packets to attempt a bundle transfer and recv bundling is allowed */
- status = HTCIssueRecvPacketBundle(target,
- &recvPktQueue,
- asyncProc ? NULL : &syncCompletedPktsQueue,
- &pktsFetched,
- partialBundle);
- if (status) {
- break;
- }
-
- if (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) != 0) {
- /* we couldn't fetch all packets at one time, this creates a broken
- * bundle */
- partialBundle = true;
- }
- }
-
- /* see if the previous operation fetched any packets using bundling */
- if (0 == pktsFetched) {
- /* dequeue one packet */
- pPacket = HTC_PACKET_DEQUEUE(&recvPktQueue);
- A_ASSERT(pPacket != NULL);
-
- if (asyncProc) {
- /* we use async mode to get the packet if the device layer supports it
- * set our callback and context */
- pPacket->Completion = HTCRecvCompleteHandler;
- pPacket->pContext = target;
- } else {
- /* fully synchronous */
- pPacket->Completion = NULL;
- }
-
- if (HTC_PACKET_QUEUE_DEPTH(&recvPktQueue) > 0) {
- /* lookaheads in all packets except the last one in the bundle must be ignored */
- pPacket->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_IGNORE_LOOKAHEAD;
- }
-
- /* go fetch the packet */
- status = HTCIssueRecv(target, pPacket);
- if (status) {
- break;
- }
-
- if (!asyncProc) {
- /* sent synchronously, queue this packet for synchronous completion */
- HTC_PACKET_ENQUEUE(&syncCompletedPktsQueue,pPacket);
- }
-
- }
-
- }
-
- if (!status) {
- CheckRecvWaterMark(pEndpoint);
- }
-
- if (asyncProc) {
- /* we did this asynchronously so we can get out of the loop, the asynch processing
- * creates a chain of requests to continue processing pending messages in the
- * context of callbacks */
- break;
- }
-
- /* synchronous handling */
- if (target->Device.DSRCanYield) {
- /* for the SYNC case, increment count that tracks when the DSR should yield */
- target->Device.CurrentDSRRecvCount++;
- }
-
- /* in the sync case, all packet buffers are now filled,
- * we can process each packet, check lookaheads and then repeat */
-
- /* unload sync completion queue */
- while (!HTC_QUEUE_EMPTY(&syncCompletedPktsQueue)) {
- struct htc_packet_queue container;
-
- pPacket = HTC_PACKET_DEQUEUE(&syncCompletedPktsQueue);
- A_ASSERT(pPacket != NULL);
-
- pEndpoint = &target->EndPoint[pPacket->Endpoint];
- /* reset count on each iteration, we are only interested in the last packet's lookahead
- * information when we break out of this loop */
- NumLookAheads = 0;
- /* process header for each of the recv packets
- * note: the lookahead of the last packet is useful for us to continue in this loop */
- status = HTCProcessRecvHeader(target,pPacket,lookAheads,&NumLookAheads);
- if (status) {
- break;
- }
-
- if (HTC_QUEUE_EMPTY(&syncCompletedPktsQueue)) {
- /* last packet's more packets flag is set based on the lookahead */
- SET_MORE_RX_PACKET_INDICATION_FLAG(lookAheads,NumLookAheads,pEndpoint,pPacket);
- } else {
- /* packets in a bundle automatically have this flag set */
- FORCE_MORE_RX_PACKET_INDICATION_FLAG(pPacket);
- }
- /* good packet, indicate it */
- HTC_RX_STAT_PROFILE(target,pEndpoint,NumLookAheads);
-
- if (pPacket->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_PART_OF_BUNDLE) {
- INC_HTC_EP_STAT(pEndpoint, RxPacketsBundled, 1);
- }
-
- INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
- DO_RCV_COMPLETION(pEndpoint,&container);
- }
-
- if (status) {
- break;
- }
-
- if (NumLookAheads == 0) {
- /* no more look aheads */
- break;
- }
-
- /* when we process recv synchronously we need to check if we should yield and stop
- * fetching more packets indicated by the embedded lookaheads */
- if (target->Device.DSRCanYield) {
- if (DEV_CHECK_RECV_YIELD(&target->Device)) {
- /* break out, don't fetch any more packets */
- break;
- }
- }
-
-
- /* check whether other OS contexts have queued any WMI command/data for WLAN.
- * This check is needed only if WLAN Tx and Rx happens in same thread context */
- A_CHECK_DRV_TX();
-
- /* for SYNCH processing, if we get here, we are running through the loop again due to a detected lookahead.
- * Set flag that we should re-check IRQ status registers again before leaving IRQ processing,
- * this can net better performance in high throughput situations */
- REF_IRQ_STATUS_RECHECK(&target->Device);
- }
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Failed to get pending recv messages (%d) \n",status));
- /* cleanup any packets we allocated but didn't use to actually fetch any packets */
- while (!HTC_QUEUE_EMPTY(&recvPktQueue)) {
- pPacket = HTC_PACKET_DEQUEUE(&recvPktQueue);
- /* clean up packets */
- HTC_RECYCLE_RX_PKT(target, pPacket, &target->EndPoint[pPacket->Endpoint]);
- }
- /* cleanup any packets in sync completion queue */
- while (!HTC_QUEUE_EMPTY(&syncCompletedPktsQueue)) {
- pPacket = HTC_PACKET_DEQUEUE(&syncCompletedPktsQueue);
- /* clean up packets */
- HTC_RECYCLE_RX_PKT(target, pPacket, &target->EndPoint[pPacket->Endpoint]);
- }
- if (HTC_STOPPING(target)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
- (" Host is going to stop. blocking receiver for HTCStop.. \n"));
- DevStopRecv(&target->Device, asyncProc ? DEV_STOP_RECV_ASYNC : DEV_STOP_RECV_SYNC);
- }
- }
- /* before leaving, check to see if host ran out of buffers and needs to stop the
- * receiver */
- if (target->RecvStateFlags & HTC_RECV_WAIT_BUFFERS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
- (" Host has no RX buffers, blocking receiver to prevent overrun.. \n"));
- /* try to stop receive at the device layer */
- DevStopRecv(&target->Device, asyncProc ? DEV_STOP_RECV_ASYNC : DEV_STOP_RECV_SYNC);
- }
-
- if (pNumPktsFetched != NULL) {
- *pNumPktsFetched = totalFetched;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HTCRecvMessagePendingHandler \n"));
-
- return status;
-}
-
-int HTCAddReceivePktMultiple(HTC_HANDLE HTCHandle, struct htc_packet_queue *pPktQueue)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- struct htc_endpoint *pEndpoint;
- bool unblockRecv = false;
- int status = 0;
- struct htc_packet *pFirstPacket;
-
- pFirstPacket = HTC_GET_PKT_AT_HEAD(pPktQueue);
-
- if (NULL == pFirstPacket) {
- A_ASSERT(false);
- return A_EINVAL;
- }
-
- AR_DEBUG_ASSERT(pFirstPacket->Endpoint < ENDPOINT_MAX);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
- ("+- HTCAddReceivePktMultiple : endPointId: %d, cnt:%d, length: %d\n",
- pFirstPacket->Endpoint,
- HTC_PACKET_QUEUE_DEPTH(pPktQueue),
- pFirstPacket->BufferLength));
-
- do {
-
- pEndpoint = &target->EndPoint[pFirstPacket->Endpoint];
-
- LOCK_HTC_RX(target);
-
- if (HTC_STOPPING(target)) {
- struct htc_packet *pPacket;
-
- UNLOCK_HTC_RX(target);
-
- /* walk through queue and mark each one canceled */
- HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue,pPacket) {
- pPacket->Status = A_ECANCELED;
- } HTC_PACKET_QUEUE_ITERATE_END;
-
- DO_RCV_COMPLETION(pEndpoint,pPktQueue);
- break;
- }
-
- /* store receive packets */
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RxBuffers, pPktQueue);
-
- /* check if we are blocked waiting for a new buffer */
- if (target->RecvStateFlags & HTC_RECV_WAIT_BUFFERS) {
- if (target->EpWaitingForBuffers == pFirstPacket->Endpoint) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" receiver was blocked on ep:%d, unblocking.. \n",
- target->EpWaitingForBuffers));
- target->RecvStateFlags &= ~HTC_RECV_WAIT_BUFFERS;
- target->EpWaitingForBuffers = ENDPOINT_MAX;
- unblockRecv = true;
- }
- }
-
- UNLOCK_HTC_RX(target);
-
- if (unblockRecv && !HTC_STOPPING(target)) {
- /* TODO : implement a buffer threshold count? */
- DevEnableRecv(&target->Device,DEV_ENABLE_RECV_SYNC);
- }
-
- } while (false);
-
- return status;
-}
-
-/* Makes a buffer available to the HTC module */
-int HTCAddReceivePkt(HTC_HANDLE HTCHandle, struct htc_packet *pPacket)
-{
- struct htc_packet_queue queue;
- INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
- return HTCAddReceivePktMultiple(HTCHandle, &queue);
-}
-
-void HTCUnblockRecv(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- bool unblockRecv = false;
-
- LOCK_HTC_RX(target);
-
- /* check if we are blocked waiting for a new buffer */
- if (target->RecvStateFlags & HTC_RECV_WAIT_BUFFERS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HTCUnblockRx : receiver was blocked on ep:%d, unblocking.. \n",
- target->EpWaitingForBuffers));
- target->RecvStateFlags &= ~HTC_RECV_WAIT_BUFFERS;
- target->EpWaitingForBuffers = ENDPOINT_MAX;
- unblockRecv = true;
- }
-
- UNLOCK_HTC_RX(target);
-
- if (unblockRecv && !HTC_STOPPING(target)) {
- /* re-enable */
- DevEnableRecv(&target->Device,DEV_ENABLE_RECV_ASYNC);
- }
-}
-
-static void HTCFlushRxQueue(struct htc_target *target, struct htc_endpoint *pEndpoint, struct htc_packet_queue *pQueue)
-{
- struct htc_packet *pPacket;
- struct htc_packet_queue container;
-
- LOCK_HTC_RX(target);
-
- while (1) {
- pPacket = HTC_PACKET_DEQUEUE(pQueue);
- if (NULL == pPacket) {
- break;
- }
- UNLOCK_HTC_RX(target);
- pPacket->Status = A_ECANCELED;
- pPacket->ActualLength = 0;
- AR_DEBUG_PRINTF(ATH_DEBUG_RECV, (" Flushing RX packet:0x%lX, length:%d, ep:%d \n",
- (unsigned long)pPacket, pPacket->BufferLength, pPacket->Endpoint));
- INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
- /* give the packet back */
- DO_RCV_COMPLETION(pEndpoint,&container);
- LOCK_HTC_RX(target);
- }
-
- UNLOCK_HTC_RX(target);
-}
-
-static void HTCFlushEndpointRX(struct htc_target *target, struct htc_endpoint *pEndpoint)
-{
- /* flush any recv indications not already made */
- HTCFlushRxQueue(target,pEndpoint,&pEndpoint->RecvIndicationQueue);
- /* flush any rx buffers */
- HTCFlushRxQueue(target,pEndpoint,&pEndpoint->RxBuffers);
-}
-
-void HTCFlushRecvBuffers(struct htc_target *target)
-{
- struct htc_endpoint *pEndpoint;
- int i;
-
- for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
- pEndpoint = &target->EndPoint[i];
- if (pEndpoint->ServiceID == 0) {
- /* not in use.. */
- continue;
- }
- HTCFlushEndpointRX(target,pEndpoint);
- }
-}
-
-
-void HTCEnableRecv(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
-
- if (!HTC_STOPPING(target)) {
- /* re-enable */
- DevEnableRecv(&target->Device,DEV_ENABLE_RECV_SYNC);
- }
-}
-
-void HTCDisableRecv(HTC_HANDLE HTCHandle)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
-
- if (!HTC_STOPPING(target)) {
- /* disable */
- DevStopRecv(&target->Device,DEV_ENABLE_RECV_SYNC);
- }
-}
-
-int HTCGetNumRecvBuffers(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- return HTC_PACKET_QUEUE_DEPTH(&(target->EndPoint[Endpoint].RxBuffers));
-}
-
-int HTCWaitForPendingRecv(HTC_HANDLE HTCHandle,
- u32 TimeoutInMs,
- bool *pbIsRecvPending)
-{
- int status = 0;
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
-
- status = DevWaitForPendingRecv(&target->Device,
- TimeoutInMs,
- pbIsRecvPending);
-
- return status;
-}
diff --git a/drivers/staging/ath6kl/htc2/htc_send.c b/drivers/staging/ath6kl/htc2/htc_send.c
deleted file mode 100644
index 9310d4d5c99..00000000000
--- a/drivers/staging/ath6kl/htc2/htc_send.c
+++ /dev/null
@@ -1,1018 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_send.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#include "htc_internal.h"
-
-typedef enum _HTC_SEND_QUEUE_RESULT {
- HTC_SEND_QUEUE_OK = 0, /* packet was queued */
- HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
-} HTC_SEND_QUEUE_RESULT;
-
-#define DO_EP_TX_COMPLETION(ep,q) DoSendCompletion(ep,q)
-
-/* call the distribute credits callback with the distribution */
-#define DO_DISTRIBUTION(t,reason,description,pList) \
-{ \
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, \
- (" calling distribute function (%s) (dfn:0x%lX, ctxt:0x%lX, dist:0x%lX) \n", \
- (description), \
- (unsigned long)(t)->DistributeCredits, \
- (unsigned long)(t)->pCredDistContext, \
- (unsigned long)pList)); \
- (t)->DistributeCredits((t)->pCredDistContext, \
- (pList), \
- (reason)); \
-}
-
-static void DoSendCompletion(struct htc_endpoint *pEndpoint,
- struct htc_packet_queue *pQueueToIndicate)
-{
- do {
-
- if (HTC_QUEUE_EMPTY(pQueueToIndicate)) {
- /* nothing to indicate */
- break;
- }
-
- if (pEndpoint->EpCallBacks.EpTxCompleteMultiple != NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" HTC calling ep %d, send complete multiple callback (%d pkts) \n",
- pEndpoint->Id, HTC_PACKET_QUEUE_DEPTH(pQueueToIndicate)));
- /* a multiple send complete handler is being used, pass the queue to the handler */
- pEndpoint->EpCallBacks.EpTxCompleteMultiple(pEndpoint->EpCallBacks.pContext,
- pQueueToIndicate);
- /* all packets are now owned by the callback, reset queue to be safe */
- INIT_HTC_PACKET_QUEUE(pQueueToIndicate);
- } else {
- struct htc_packet *pPacket;
- /* using legacy EpTxComplete */
- do {
- pPacket = HTC_PACKET_DEQUEUE(pQueueToIndicate);
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" HTC calling ep %d send complete callback on packet 0x%lX \n", \
- pEndpoint->Id, (unsigned long)(pPacket)));
- pEndpoint->EpCallBacks.EpTxComplete(pEndpoint->EpCallBacks.pContext, pPacket);
- } while (!HTC_QUEUE_EMPTY(pQueueToIndicate));
- }
-
- } while (false);
-
-}
-
-/* do final completion on sent packet */
-static INLINE void CompleteSentPacket(struct htc_target *target, struct htc_endpoint *pEndpoint, struct htc_packet *pPacket)
-{
- pPacket->Completion = NULL;
-
- if (pPacket->Status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("CompleteSentPacket: request failed (status:%d, ep:%d, length:%d creds:%d) \n",
- pPacket->Status, pPacket->Endpoint, pPacket->ActualLength, pPacket->PktInfo.AsTx.CreditsUsed));
- /* on failure to submit, reclaim credits for this packet */
- LOCK_HTC_TX(target);
- pEndpoint->CreditDist.TxCreditsToDist += pPacket->PktInfo.AsTx.CreditsUsed;
- pEndpoint->CreditDist.TxQueueDepth = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
- DO_DISTRIBUTION(target,
- HTC_CREDIT_DIST_SEND_COMPLETE,
- "Send Complete",
- target->EpCreditDistributionListHead->pNext);
- UNLOCK_HTC_TX(target);
- }
- /* first, fixup the head room we allocated */
- pPacket->pBuffer += HTC_HDR_LENGTH;
-}
-
-/* our internal send packet completion handler when packets are submited to the AR6K device
- * layer */
-static void HTCSendPktCompletionHandler(void *Context, struct htc_packet *pPacket)
-{
- struct htc_target *target = (struct htc_target *)Context;
- struct htc_endpoint *pEndpoint = &target->EndPoint[pPacket->Endpoint];
- struct htc_packet_queue container;
-
- CompleteSentPacket(target,pEndpoint,pPacket);
- INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
- /* do completion */
- DO_EP_TX_COMPLETION(pEndpoint,&container);
-}
-
-int HTCIssueSend(struct htc_target *target, struct htc_packet *pPacket)
-{
- int status;
- bool sync = false;
-
- if (pPacket->Completion == NULL) {
- /* mark that this request was synchronously issued */
- sync = true;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- ("+-HTCIssueSend: transmit length : %d (%s) \n",
- pPacket->ActualLength + (u32)HTC_HDR_LENGTH,
- sync ? "SYNC" : "ASYNC" ));
-
- /* send message to device */
- status = DevSendPacket(&target->Device,
- pPacket,
- pPacket->ActualLength + HTC_HDR_LENGTH);
-
- if (sync) {
- /* use local sync variable. If this was issued asynchronously, pPacket is no longer
- * safe to access. */
- pPacket->pBuffer += HTC_HDR_LENGTH;
- }
-
- /* if this request was asynchronous, the packet completion routine will be invoked by
- * the device layer when the HIF layer completes the request */
-
- return status;
-}
-
- /* get HTC send packets from the TX queue on an endpoint */
-static INLINE void GetHTCSendPackets(struct htc_target *target,
- struct htc_endpoint *pEndpoint,
- struct htc_packet_queue *pQueue)
-{
- int creditsRequired;
- int remainder;
- u8 sendFlags;
- struct htc_packet *pPacket;
- unsigned int transferLength;
-
- /****** NOTE : the TX lock is held when this function is called *****************/
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+GetHTCSendPackets \n"));
-
- /* loop until we can grab as many packets out of the queue as we can */
- while (true) {
-
- sendFlags = 0;
- /* get packet at head, but don't remove it */
- pPacket = HTC_GET_PKT_AT_HEAD(&pEndpoint->TxQueue);
- if (pPacket == NULL) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Got head packet:0x%lX , Queue Depth: %d\n",
- (unsigned long)pPacket, HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)));
-
- transferLength = DEV_CALC_SEND_PADDED_LEN(&target->Device, pPacket->ActualLength + HTC_HDR_LENGTH);
-
- if (transferLength <= target->TargetCreditSize) {
- creditsRequired = 1;
- } else {
- /* figure out how many credits this message requires */
- creditsRequired = transferLength / target->TargetCreditSize;
- remainder = transferLength % target->TargetCreditSize;
-
- if (remainder) {
- creditsRequired++;
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Creds Required:%d Got:%d\n",
- creditsRequired, pEndpoint->CreditDist.TxCredits));
-
- if (pEndpoint->CreditDist.TxCredits < creditsRequired) {
-
- /* not enough credits */
- if (pPacket->Endpoint == ENDPOINT_0) {
- /* leave it in the queue */
- break;
- }
- /* invoke the registered distribution function only if this is not
- * endpoint 0, we let the driver layer provide more credits if it can.
- * We pass the credit distribution list starting at the endpoint in question
- * */
-
- /* set how many credits we need */
- pEndpoint->CreditDist.TxCreditsSeek =
- creditsRequired - pEndpoint->CreditDist.TxCredits;
- DO_DISTRIBUTION(target,
- HTC_CREDIT_DIST_SEEK_CREDITS,
- "Seek Credits",
- &pEndpoint->CreditDist);
- pEndpoint->CreditDist.TxCreditsSeek = 0;
-
- if (pEndpoint->CreditDist.TxCredits < creditsRequired) {
- /* still not enough credits to send, leave packet in the queue */
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- (" Not enough credits for ep %d leaving packet in queue..\n",
- pPacket->Endpoint));
- break;
- }
-
- }
-
- pEndpoint->CreditDist.TxCredits -= creditsRequired;
- INC_HTC_EP_STAT(pEndpoint, TxCreditsConsummed, creditsRequired);
-
- /* check if we need credits back from the target */
- if (pEndpoint->CreditDist.TxCredits < pEndpoint->CreditDist.TxCreditsPerMaxMsg) {
- /* we are getting low on credits, see if we can ask for more from the distribution function */
- pEndpoint->CreditDist.TxCreditsSeek =
- pEndpoint->CreditDist.TxCreditsPerMaxMsg - pEndpoint->CreditDist.TxCredits;
-
- DO_DISTRIBUTION(target,
- HTC_CREDIT_DIST_SEEK_CREDITS,
- "Seek Credits",
- &pEndpoint->CreditDist);
-
- pEndpoint->CreditDist.TxCreditsSeek = 0;
- /* see if we were successful in getting more */
- if (pEndpoint->CreditDist.TxCredits < pEndpoint->CreditDist.TxCreditsPerMaxMsg) {
- /* tell the target we need credits ASAP! */
- sendFlags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
- INC_HTC_EP_STAT(pEndpoint, TxCreditLowIndications, 1);
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Host Needs Credits \n"));
- }
- }
-
- /* now we can fully dequeue */
- pPacket = HTC_PACKET_DEQUEUE(&pEndpoint->TxQueue);
- /* save the number of credits this packet consumed */
- pPacket->PktInfo.AsTx.CreditsUsed = creditsRequired;
- /* all TX packets are handled asynchronously */
- pPacket->Completion = HTCSendPktCompletionHandler;
- pPacket->pContext = target;
- INC_HTC_EP_STAT(pEndpoint, TxIssued, 1);
- /* save send flags */
- pPacket->PktInfo.AsTx.SendFlags = sendFlags;
- pPacket->PktInfo.AsTx.SeqNo = pEndpoint->SeqNo;
- pEndpoint->SeqNo++;
- /* queue this packet into the caller's queue */
- HTC_PACKET_ENQUEUE(pQueue,pPacket);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-GetHTCSendPackets \n"));
-
-}
-
-static void HTCAsyncSendScatterCompletion(struct hif_scatter_req *pScatterReq)
-{
- int i;
- struct htc_packet *pPacket;
- struct htc_endpoint *pEndpoint = (struct htc_endpoint *)pScatterReq->Context;
- struct htc_target *target = (struct htc_target *)pEndpoint->target;
- int status = 0;
- struct htc_packet_queue sendCompletes;
-
- INIT_HTC_PACKET_QUEUE(&sendCompletes);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HTCAsyncSendScatterCompletion TotLen: %d Entries: %d\n",
- pScatterReq->TotalLength, pScatterReq->ValidScatterEntries));
-
- DEV_FINISH_SCATTER_OPERATION(pScatterReq);
-
- if (pScatterReq->CompletionStatus) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** Send Scatter Request Failed: %d \n",pScatterReq->CompletionStatus));
- status = A_ERROR;
- }
-
- /* walk through the scatter list and process */
- for (i = 0; i < pScatterReq->ValidScatterEntries; i++) {
- pPacket = (struct htc_packet *)(pScatterReq->ScatterList[i].pCallerContexts[0]);
- A_ASSERT(pPacket != NULL);
- pPacket->Status = status;
- CompleteSentPacket(target,pEndpoint,pPacket);
- /* add it to the completion queue */
- HTC_PACKET_ENQUEUE(&sendCompletes, pPacket);
- }
-
- /* free scatter request */
- DEV_FREE_SCATTER_REQ(&target->Device,pScatterReq);
- /* complete all packets */
- DO_EP_TX_COMPLETION(pEndpoint,&sendCompletes);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCAsyncSendScatterCompletion \n"));
-}
-
- /* drain a queue and send as bundles
- * this function may return without fully draining the queue under the following conditions :
- * - scatter resources are exhausted
- * - a message that will consume a partial credit will stop the bundling process early
- * - we drop below the minimum number of messages for a bundle
- * */
-static void HTCIssueSendBundle(struct htc_endpoint *pEndpoint,
- struct htc_packet_queue *pQueue,
- int *pBundlesSent,
- int *pTotalBundlesPkts)
-{
- int pktsToScatter;
- unsigned int scatterSpaceRemaining;
- struct hif_scatter_req *pScatterReq = NULL;
- int i, packetsInScatterReq;
- unsigned int transferLength;
- struct htc_packet *pPacket;
- bool done = false;
- int bundlesSent = 0;
- int totalPktsInBundle = 0;
- struct htc_target *target = pEndpoint->target;
- int creditRemainder = 0;
- int creditPad;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HTCIssueSendBundle \n"));
-
- while (!done) {
-
- pktsToScatter = HTC_PACKET_QUEUE_DEPTH(pQueue);
- pktsToScatter = min(pktsToScatter, target->MaxMsgPerBundle);
-
- if (pktsToScatter < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
- /* not enough to bundle */
- break;
- }
-
- pScatterReq = DEV_ALLOC_SCATTER_REQ(&target->Device);
-
- if (pScatterReq == NULL) {
- /* no scatter resources */
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" No more scatter resources \n"));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" pkts to scatter: %d \n", pktsToScatter));
-
- pScatterReq->TotalLength = 0;
- pScatterReq->ValidScatterEntries = 0;
-
- packetsInScatterReq = 0;
- scatterSpaceRemaining = DEV_GET_MAX_BUNDLE_SEND_LENGTH(&target->Device);
-
- for (i = 0; i < pktsToScatter; i++) {
-
- pScatterReq->ScatterList[i].pCallerContexts[0] = NULL;
-
- pPacket = HTC_GET_PKT_AT_HEAD(pQueue);
- if (pPacket == NULL) {
- A_ASSERT(false);
- break;
- }
-
- creditPad = 0;
- transferLength = DEV_CALC_SEND_PADDED_LEN(&target->Device,
- pPacket->ActualLength + HTC_HDR_LENGTH);
- /* see if the padded transfer length falls on a credit boundary */
- creditRemainder = transferLength % target->TargetCreditSize;
-
- if (creditRemainder != 0) {
- /* the transfer consumes a "partial" credit, this packet cannot be bundled unless
- * we add additional "dummy" padding (max 255 bytes) to consume the entire credit
- *** NOTE: only allow the send padding if the endpoint is allowed to */
- if (pEndpoint->LocalConnectionFlags & HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING) {
- if (transferLength < target->TargetCreditSize) {
- /* special case where the transfer is less than a credit */
- creditPad = target->TargetCreditSize - transferLength;
- } else {
- creditPad = creditRemainder;
- }
-
- /* now check to see if we can indicate padding in the HTC header */
- if ((creditPad > 0) && (creditPad <= 255)) {
- /* adjust the transferlength of this packet with the new credit padding */
- transferLength += creditPad;
- } else {
- /* the amount to pad is too large, bail on this packet, we have to
- * send it using the non-bundled method */
- pPacket = NULL;
- }
- } else {
- /* bail on this packet, user does not want padding applied */
- pPacket = NULL;
- }
- }
-
- if (NULL == pPacket) {
- /* can't bundle */
- done = true;
- break;
- }
-
- if (scatterSpaceRemaining < transferLength) {
- /* exceeds what we can transfer */
- break;
- }
-
- scatterSpaceRemaining -= transferLength;
- /* now remove it from the queue */
- pPacket = HTC_PACKET_DEQUEUE(pQueue);
- /* save it in the scatter list */
- pScatterReq->ScatterList[i].pCallerContexts[0] = pPacket;
- /* prepare packet and flag message as part of a send bundle */
- HTC_PREPARE_SEND_PKT(pPacket,
- pPacket->PktInfo.AsTx.SendFlags | HTC_FLAGS_SEND_BUNDLE,
- creditPad,
- pPacket->PktInfo.AsTx.SeqNo);
- pScatterReq->ScatterList[i].pBuffer = pPacket->pBuffer;
- pScatterReq->ScatterList[i].Length = transferLength;
- A_ASSERT(transferLength);
- pScatterReq->TotalLength += transferLength;
- pScatterReq->ValidScatterEntries++;
- packetsInScatterReq++;
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" %d, Adding packet : 0x%lX, len:%d (remaining space:%d) \n",
- i, (unsigned long)pPacket,transferLength,scatterSpaceRemaining));
- }
-
- if (packetsInScatterReq >= HTC_MIN_HTC_MSGS_TO_BUNDLE) {
- /* send path is always asynchronous */
- pScatterReq->CompletionRoutine = HTCAsyncSendScatterCompletion;
- pScatterReq->Context = pEndpoint;
- bundlesSent++;
- totalPktsInBundle += packetsInScatterReq;
- packetsInScatterReq = 0;
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,(" Send Scatter total bytes: %d , entries: %d\n",
- pScatterReq->TotalLength,pScatterReq->ValidScatterEntries));
- DevSubmitScatterRequest(&target->Device, pScatterReq, DEV_SCATTER_WRITE, DEV_SCATTER_ASYNC);
- /* we don't own this anymore */
- pScatterReq = NULL;
- /* try to send some more */
- continue;
- }
-
- /* not enough packets to use the scatter request, cleanup */
- if (pScatterReq != NULL) {
- if (packetsInScatterReq > 0) {
- /* work backwards to requeue requests */
- for (i = (packetsInScatterReq - 1); i >= 0; i--) {
- pPacket = (struct htc_packet *)(pScatterReq->ScatterList[i].pCallerContexts[0]);
- if (pPacket != NULL) {
- /* undo any prep */
- HTC_UNPREPARE_SEND_PKT(pPacket);
- /* queue back to the head */
- HTC_PACKET_ENQUEUE_TO_HEAD(pQueue,pPacket);
- }
- }
- }
- DEV_FREE_SCATTER_REQ(&target->Device,pScatterReq);
- }
-
- /* if we get here, we sent all that we could, get out */
- break;
-
- }
-
- *pBundlesSent = bundlesSent;
- *pTotalBundlesPkts = totalPktsInBundle;
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCIssueSendBundle (sent:%d) \n",bundlesSent));
-
- return;
-}
-
-/*
- * if there are no credits, the packet(s) remains in the queue.
- * this function returns the result of the attempt to send a queue of HTC packets */
-static HTC_SEND_QUEUE_RESULT HTCTrySend(struct htc_target *target,
- struct htc_endpoint *pEndpoint,
- struct htc_packet_queue *pCallersSendQueue)
-{
- struct htc_packet_queue sendQueue; /* temp queue to hold packets at various stages */
- struct htc_packet *pPacket;
- int bundlesSent;
- int pktsInBundles;
- int overflow;
- HTC_SEND_QUEUE_RESULT result = HTC_SEND_QUEUE_OK;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HTCTrySend (Queue:0x%lX Depth:%d)\n",
- (unsigned long)pCallersSendQueue,
- (pCallersSendQueue == NULL) ? 0 : HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue)));
-
- /* init the local send queue */
- INIT_HTC_PACKET_QUEUE(&sendQueue);
-
- do {
-
- if (NULL == pCallersSendQueue) {
- /* caller didn't provide a queue, just wants us to check queues and send */
- break;
- }
-
- if (HTC_QUEUE_EMPTY(pCallersSendQueue)) {
- /* empty queue */
- result = HTC_SEND_QUEUE_DROP;
- break;
- }
-
- if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) >= pEndpoint->MaxTxQueueDepth) {
- /* we've already overflowed */
- overflow = HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue);
- } else {
- /* figure out how much we will overflow by */
- overflow = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
- overflow += HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue);
- /* figure out how much we will overflow the TX queue by */
- overflow -= pEndpoint->MaxTxQueueDepth;
- }
-
- /* if overflow is negative or zero, we are okay */
- if (overflow > 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- (" Endpoint %d, TX queue will overflow :%d , Tx Depth:%d, Max:%d \n",
- pEndpoint->Id, overflow, HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue), pEndpoint->MaxTxQueueDepth));
- }
- if ((overflow <= 0) || (pEndpoint->EpCallBacks.EpSendFull == NULL)) {
- /* all packets will fit or caller did not provide send full indication handler
- * -- just move all of them to the local sendQueue object */
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&sendQueue, pCallersSendQueue);
- } else {
- int i;
- int goodPkts = HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue) - overflow;
-
- A_ASSERT(goodPkts >= 0);
- /* we have overflowed, and a callback is provided */
- /* dequeue all non-overflow packets into the sendqueue */
- for (i = 0; i < goodPkts; i++) {
- /* pop off caller's queue*/
- pPacket = HTC_PACKET_DEQUEUE(pCallersSendQueue);
- A_ASSERT(pPacket != NULL);
- /* insert into local queue */
- HTC_PACKET_ENQUEUE(&sendQueue,pPacket);
- }
-
- /* the caller's queue has all the packets that won't fit*/
- /* walk through the caller's queue and indicate each one to the send full handler */
- ITERATE_OVER_LIST_ALLOW_REMOVE(&pCallersSendQueue->QueueHead, pPacket, struct htc_packet, ListLink) {
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Indicating overflowed TX packet: 0x%lX \n",
- (unsigned long)pPacket));
- if (pEndpoint->EpCallBacks.EpSendFull(pEndpoint->EpCallBacks.pContext,
- pPacket) == HTC_SEND_FULL_DROP) {
- /* callback wants the packet dropped */
- INC_HTC_EP_STAT(pEndpoint, TxDropped, 1);
- /* leave this one in the caller's queue for cleanup */
- } else {
- /* callback wants to keep this packet, remove from caller's queue */
- HTC_PACKET_REMOVE(pCallersSendQueue, pPacket);
- /* put it in the send queue */
- HTC_PACKET_ENQUEUE(&sendQueue,pPacket);
- }
-
- } ITERATE_END;
-
- if (HTC_QUEUE_EMPTY(&sendQueue)) {
- /* no packets made it in, caller will cleanup */
- result = HTC_SEND_QUEUE_DROP;
- break;
- }
- }
-
- } while (false);
-
- if (result != HTC_SEND_QUEUE_OK) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCTrySend: \n"));
- return result;
- }
-
- LOCK_HTC_TX(target);
-
- if (!HTC_QUEUE_EMPTY(&sendQueue)) {
- /* transfer packets */
- HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->TxQueue,&sendQueue);
- A_ASSERT(HTC_QUEUE_EMPTY(&sendQueue));
- INIT_HTC_PACKET_QUEUE(&sendQueue);
- }
-
- /* increment tx processing count on entry */
- pEndpoint->TxProcessCount++;
- if (pEndpoint->TxProcessCount > 1) {
- /* another thread or task is draining the TX queues on this endpoint
- * that thread will reset the tx processing count when the queue is drained */
- pEndpoint->TxProcessCount--;
- UNLOCK_HTC_TX(target);
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCTrySend (busy) \n"));
- return HTC_SEND_QUEUE_OK;
- }
-
- /***** beyond this point only 1 thread may enter ******/
-
- /* now drain the endpoint TX queue for transmission as long as we have enough
- * credits */
- while (true) {
-
- if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) == 0) {
- break;
- }
-
- /* get all the packets for this endpoint that we can for this pass */
- GetHTCSendPackets(target, pEndpoint, &sendQueue);
-
- if (HTC_PACKET_QUEUE_DEPTH(&sendQueue) == 0) {
- /* didn't get any packets due to a lack of credits */
- break;
- }
-
- UNLOCK_HTC_TX(target);
-
- /* any packets to send are now in our local send queue */
-
- bundlesSent = 0;
- pktsInBundles = 0;
-
- while (true) {
-
- /* try to send a bundle on each pass */
- if ((target->SendBundlingEnabled) &&
- (HTC_PACKET_QUEUE_DEPTH(&sendQueue) >= HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
- int temp1,temp2;
- /* bundling is enabled and there is at least a minimum number of packets in the send queue
- * send what we can in this pass */
- HTCIssueSendBundle(pEndpoint, &sendQueue, &temp1, &temp2);
- bundlesSent += temp1;
- pktsInBundles += temp2;
- }
-
- /* if not bundling or there was a packet that could not be placed in a bundle, pull it out
- * and send it the normal way */
- pPacket = HTC_PACKET_DEQUEUE(&sendQueue);
- if (NULL == pPacket) {
- /* local queue is fully drained */
- break;
- }
- HTC_PREPARE_SEND_PKT(pPacket,
- pPacket->PktInfo.AsTx.SendFlags,
- 0,
- pPacket->PktInfo.AsTx.SeqNo);
- HTCIssueSend(target, pPacket);
-
- /* go back and see if we can bundle some more */
- }
-
- LOCK_HTC_TX(target);
-
- INC_HTC_EP_STAT(pEndpoint, TxBundles, bundlesSent);
- INC_HTC_EP_STAT(pEndpoint, TxPacketsBundled, pktsInBundles);
-
- }
-
- /* done with this endpoint, we can clear the count */
- pEndpoint->TxProcessCount = 0;
- UNLOCK_HTC_TX(target);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HTCTrySend: \n"));
-
- return HTC_SEND_QUEUE_OK;
-}
-
-int HTCSendPktsMultiple(HTC_HANDLE HTCHandle, struct htc_packet_queue *pPktQueue)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- struct htc_endpoint *pEndpoint;
- struct htc_packet *pPacket;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+HTCSendPktsMultiple: Queue: 0x%lX, Pkts %d \n",
- (unsigned long)pPktQueue, HTC_PACKET_QUEUE_DEPTH(pPktQueue)));
-
- /* get packet at head to figure out which endpoint these packets will go into */
- pPacket = HTC_GET_PKT_AT_HEAD(pPktQueue);
- if (NULL == pPacket) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCSendPktsMultiple \n"));
- return A_EINVAL;
- }
-
- AR_DEBUG_ASSERT(pPacket->Endpoint < ENDPOINT_MAX);
- pEndpoint = &target->EndPoint[pPacket->Endpoint];
-
- HTCTrySend(target, pEndpoint, pPktQueue);
-
- /* do completion on any packets that couldn't get in */
- if (!HTC_QUEUE_EMPTY(pPktQueue)) {
-
- HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue,pPacket) {
- if (HTC_STOPPING(target)) {
- pPacket->Status = A_ECANCELED;
- } else {
- pPacket->Status = A_NO_RESOURCE;
- }
- } HTC_PACKET_QUEUE_ITERATE_END;
-
- DO_EP_TX_COMPLETION(pEndpoint,pPktQueue);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCSendPktsMultiple \n"));
-
- return 0;
-}
-
-/* HTC API - HTCSendPkt */
-int HTCSendPkt(HTC_HANDLE HTCHandle, struct htc_packet *pPacket)
-{
- struct htc_packet_queue queue;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
- ("+-HTCSendPkt: Enter endPointId: %d, buffer: 0x%lX, length: %d \n",
- pPacket->Endpoint, (unsigned long)pPacket->pBuffer, pPacket->ActualLength));
- INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
- return HTCSendPktsMultiple(HTCHandle, &queue);
-}
-
-/* check TX queues to drain because of credit distribution update */
-static INLINE void HTCCheckEndpointTxQueues(struct htc_target *target)
-{
- struct htc_endpoint *pEndpoint;
- struct htc_endpoint_credit_dist *pDistItem;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+HTCCheckEndpointTxQueues \n"));
- pDistItem = target->EpCreditDistributionListHead;
-
- /* run through the credit distribution list to see
- * if there are packets queued
- * NOTE: no locks need to be taken since the distribution list
- * is not dynamic (cannot be re-ordered) and we are not modifying any state */
- while (pDistItem != NULL) {
- pEndpoint = (struct htc_endpoint *)pDistItem->pHTCReserved;
-
- if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) > 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Ep %d has %d credits and %d Packets in TX Queue \n",
- pDistItem->Endpoint, pEndpoint->CreditDist.TxCredits, HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)));
- /* try to start the stalled queue, this list is ordered by priority.
- * Highest priority queue get's processed first, if there are credits available the
- * highest priority queue will get a chance to reclaim credits from lower priority
- * ones */
- HTCTrySend(target, pEndpoint, NULL);
- }
-
- pDistItem = pDistItem->pNext;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCCheckEndpointTxQueues \n"));
-}
-
-/* process credit reports and call distribution function */
-void HTCProcessCreditRpt(struct htc_target *target, HTC_CREDIT_REPORT *pRpt, int NumEntries, HTC_ENDPOINT_ID FromEndpoint)
-{
- int i;
- struct htc_endpoint *pEndpoint;
- int totalCredits = 0;
- bool doDist = false;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+HTCProcessCreditRpt, Credit Report Entries:%d \n", NumEntries));
-
- /* lock out TX while we update credits */
- LOCK_HTC_TX(target);
-
- for (i = 0; i < NumEntries; i++, pRpt++) {
- if (pRpt->EndpointID >= ENDPOINT_MAX) {
- AR_DEBUG_ASSERT(false);
- break;
- }
-
- pEndpoint = &target->EndPoint[pRpt->EndpointID];
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Endpoint %d got %d credits \n",
- pRpt->EndpointID, pRpt->Credits));
-
- INC_HTC_EP_STAT(pEndpoint, TxCreditRpts, 1);
- INC_HTC_EP_STAT(pEndpoint, TxCreditsReturned, pRpt->Credits);
-
- if (FromEndpoint == pRpt->EndpointID) {
- /* this credit report arrived on the same endpoint indicating it arrived in an RX
- * packet */
- INC_HTC_EP_STAT(pEndpoint, TxCreditsFromRx, pRpt->Credits);
- INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromRx, 1);
- } else if (FromEndpoint == ENDPOINT_0) {
- /* this credit arrived on endpoint 0 as a NULL message */
- INC_HTC_EP_STAT(pEndpoint, TxCreditsFromEp0, pRpt->Credits);
- INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromEp0, 1);
- } else {
- /* arrived on another endpoint */
- INC_HTC_EP_STAT(pEndpoint, TxCreditsFromOther, pRpt->Credits);
- INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromOther, 1);
- }
-
- if (ENDPOINT_0 == pRpt->EndpointID) {
- /* always give endpoint 0 credits back */
- pEndpoint->CreditDist.TxCredits += pRpt->Credits;
- } else {
- /* for all other endpoints, update credits to distribute, the distribution function
- * will handle giving out credits back to the endpoints */
- pEndpoint->CreditDist.TxCreditsToDist += pRpt->Credits;
- /* flag that we have to do the distribution */
- doDist = true;
- }
-
- /* refresh tx depth for distribution function that will recover these credits
- * NOTE: this is only valid when there are credits to recover! */
- pEndpoint->CreditDist.TxQueueDepth = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
-
- totalCredits += pRpt->Credits;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, (" Report indicated %d credits to distribute \n", totalCredits));
-
- if (doDist) {
- /* this was a credit return based on a completed send operations
- * note, this is done with the lock held */
- DO_DISTRIBUTION(target,
- HTC_CREDIT_DIST_SEND_COMPLETE,
- "Send Complete",
- target->EpCreditDistributionListHead->pNext);
- }
-
- UNLOCK_HTC_TX(target);
-
- if (totalCredits) {
- HTCCheckEndpointTxQueues(target);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-HTCProcessCreditRpt \n"));
-}
-
-/* flush endpoint TX queue */
-static void HTCFlushEndpointTX(struct htc_target *target, struct htc_endpoint *pEndpoint, HTC_TX_TAG Tag)
-{
- struct htc_packet *pPacket;
- struct htc_packet_queue discardQueue;
- struct htc_packet_queue container;
-
- /* initialize the discard queue */
- INIT_HTC_PACKET_QUEUE(&discardQueue);
-
- LOCK_HTC_TX(target);
-
- /* interate from the front of the TX queue and flush out packets */
- ITERATE_OVER_LIST_ALLOW_REMOVE(&pEndpoint->TxQueue.QueueHead, pPacket, struct htc_packet, ListLink) {
-
- /* check for removal */
- if ((HTC_TX_PACKET_TAG_ALL == Tag) || (Tag == pPacket->PktInfo.AsTx.Tag)) {
- /* remove from queue */
- HTC_PACKET_REMOVE(&pEndpoint->TxQueue, pPacket);
- /* add it to the discard pile */
- HTC_PACKET_ENQUEUE(&discardQueue, pPacket);
- }
-
- } ITERATE_END;
-
- UNLOCK_HTC_TX(target);
-
- /* empty the discard queue */
- while (1) {
- pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
- if (NULL == pPacket) {
- break;
- }
- pPacket->Status = A_ECANCELED;
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, (" Flushing TX packet:0x%lX, length:%d, ep:%d tag:0x%X \n",
- (unsigned long)pPacket, pPacket->ActualLength, pPacket->Endpoint, pPacket->PktInfo.AsTx.Tag));
- INIT_HTC_PACKET_QUEUE_AND_ADD(&container,pPacket);
- DO_EP_TX_COMPLETION(pEndpoint,&container);
- }
-
-}
-
-void DumpCreditDist(struct htc_endpoint_credit_dist *pEPDist)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("--- EP : %d ServiceID: 0x%X --------------\n",
- pEPDist->Endpoint, pEPDist->ServiceID));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" this:0x%lX next:0x%lX prev:0x%lX\n",
- (unsigned long)pEPDist, (unsigned long)pEPDist->pNext, (unsigned long)pEPDist->pPrev));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" DistFlags : 0x%X \n", pEPDist->DistFlags));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsNorm : %d \n", pEPDist->TxCreditsNorm));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsMin : %d \n", pEPDist->TxCreditsMin));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCredits : %d \n", pEPDist->TxCredits));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsAssigned : %d \n", pEPDist->TxCreditsAssigned));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsSeek : %d \n", pEPDist->TxCreditsSeek));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditSize : %d \n", pEPDist->TxCreditSize));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsPerMaxMsg : %d \n", pEPDist->TxCreditsPerMaxMsg));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxCreditsToDist : %d \n", pEPDist->TxCreditsToDist));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, (" TxQueueDepth : %d \n",
- HTC_PACKET_QUEUE_DEPTH(&((struct htc_endpoint *)pEPDist->pHTCReserved)->TxQueue)));
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("----------------------------------------------------\n"));
-}
-
-void DumpCreditDistStates(struct htc_target *target)
-{
- struct htc_endpoint_credit_dist *pEPList = target->EpCreditDistributionListHead;
-
- while (pEPList != NULL) {
- DumpCreditDist(pEPList);
- pEPList = pEPList->pNext;
- }
-
- if (target->DistributeCredits != NULL) {
- DO_DISTRIBUTION(target,
- HTC_DUMP_CREDIT_STATE,
- "Dump State",
- NULL);
- }
-}
-
-/* flush all send packets from all endpoint queues */
-void HTCFlushSendPkts(struct htc_target *target)
-{
- struct htc_endpoint *pEndpoint;
- int i;
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_TRC)) {
- DumpCreditDistStates(target);
- }
-
- for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
- pEndpoint = &target->EndPoint[i];
- if (pEndpoint->ServiceID == 0) {
- /* not in use.. */
- continue;
- }
- HTCFlushEndpointTX(target,pEndpoint,HTC_TX_PACKET_TAG_ALL);
- }
-
-
-}
-
-/* HTC API to flush an endpoint's TX queue*/
-void HTCFlushEndpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, HTC_TX_TAG Tag)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- struct htc_endpoint *pEndpoint = &target->EndPoint[Endpoint];
-
- if (pEndpoint->ServiceID == 0) {
- AR_DEBUG_ASSERT(false);
- /* not in use.. */
- return;
- }
-
- HTCFlushEndpointTX(target, pEndpoint, Tag);
-}
-
-/* HTC API to indicate activity to the credit distribution function */
-void HTCIndicateActivityChange(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint,
- bool Active)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- struct htc_endpoint *pEndpoint = &target->EndPoint[Endpoint];
- bool doDist = false;
-
- if (pEndpoint->ServiceID == 0) {
- AR_DEBUG_ASSERT(false);
- /* not in use.. */
- return;
- }
-
- LOCK_HTC_TX(target);
-
- if (Active) {
- if (!(pEndpoint->CreditDist.DistFlags & HTC_EP_ACTIVE)) {
- /* mark active now */
- pEndpoint->CreditDist.DistFlags |= HTC_EP_ACTIVE;
- doDist = true;
- }
- } else {
- if (pEndpoint->CreditDist.DistFlags & HTC_EP_ACTIVE) {
- /* mark inactive now */
- pEndpoint->CreditDist.DistFlags &= ~HTC_EP_ACTIVE;
- doDist = true;
- }
- }
-
- if (doDist) {
- /* indicate current Tx Queue depth to the credit distribution function */
- pEndpoint->CreditDist.TxQueueDepth = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue);
- /* do distribution again based on activity change
- * note, this is done with the lock held */
- DO_DISTRIBUTION(target,
- HTC_CREDIT_DIST_ACTIVITY_CHANGE,
- "Activity Change",
- target->EpCreditDistributionListHead->pNext);
- }
-
- UNLOCK_HTC_TX(target);
-
- if (doDist && !Active) {
- /* if a stream went inactive and this resulted in a credit distribution change,
- * some credits may now be available for HTC packets that are stuck in
- * HTC queues */
- HTCCheckEndpointTxQueues(target);
- }
-}
-
-bool HTCIsEndpointActive(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- struct htc_endpoint *pEndpoint = &target->EndPoint[Endpoint];
-
- if (pEndpoint->ServiceID == 0) {
- return false;
- }
-
- if (pEndpoint->CreditDist.DistFlags & HTC_EP_ACTIVE) {
- return true;
- }
-
- return false;
-}
diff --git a/drivers/staging/ath6kl/htc2/htc_services.c b/drivers/staging/ath6kl/htc2/htc_services.c
deleted file mode 100644
index c48070cbd54..00000000000
--- a/drivers/staging/ath6kl/htc2/htc_services.c
+++ /dev/null
@@ -1,450 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_services.c" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#include "htc_internal.h"
-
-void HTCControlTxComplete(void *Context, struct htc_packet *pPacket)
-{
- /* not implemented
- * we do not send control TX frames during normal runtime, only during setup */
- AR_DEBUG_ASSERT(false);
-}
-
- /* callback when a control message arrives on this endpoint */
-void HTCControlRecv(void *Context, struct htc_packet *pPacket)
-{
- AR_DEBUG_ASSERT(pPacket->Endpoint == ENDPOINT_0);
-
- if (pPacket->Status == A_ECANCELED) {
- /* this is a flush operation, return the control packet back to the pool */
- HTC_FREE_CONTROL_RX((struct htc_target*)Context,pPacket);
- return;
- }
-
- /* the only control messages we are expecting are NULL messages (credit resports) */
- if (pPacket->ActualLength > 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("HTCControlRecv, got message with length:%d \n",
- pPacket->ActualLength + (u32)HTC_HDR_LENGTH));
-
-#ifdef ATH_DEBUG_MODULE
- /* dump header and message */
- DebugDumpBytes(pPacket->pBuffer - HTC_HDR_LENGTH,
- pPacket->ActualLength + HTC_HDR_LENGTH,
- "Unexpected ENDPOINT 0 Message");
-#endif
- }
-
- HTC_RECYCLE_RX_PKT((struct htc_target*)Context,pPacket,&((struct htc_target*)Context)->EndPoint[0]);
-}
-
-int HTCSendSetupComplete(struct htc_target *target)
-{
- struct htc_packet *pSendPacket = NULL;
- int status;
-
- do {
- /* allocate a packet to send to the target */
- pSendPacket = HTC_ALLOC_CONTROL_TX(target);
-
- if (NULL == pSendPacket) {
- status = A_NO_MEMORY;
- break;
- }
-
- if (target->HTCTargetVersion >= HTC_VERSION_2P1) {
- HTC_SETUP_COMPLETE_EX_MSG *pSetupCompleteEx;
- u32 setupFlags = 0;
-
- pSetupCompleteEx = (HTC_SETUP_COMPLETE_EX_MSG *)pSendPacket->pBuffer;
- A_MEMZERO(pSetupCompleteEx, sizeof(HTC_SETUP_COMPLETE_EX_MSG));
- pSetupCompleteEx->MessageID = HTC_MSG_SETUP_COMPLETE_EX_ID;
- if (target->MaxMsgPerBundle > 0) {
- /* host can do HTC bundling, indicate this to the target */
- setupFlags |= HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV;
- pSetupCompleteEx->MaxMsgsPerBundledRecv = target->MaxMsgPerBundle;
- }
- memcpy(&pSetupCompleteEx->SetupFlags, &setupFlags, sizeof(pSetupCompleteEx->SetupFlags));
- SET_HTC_PACKET_INFO_TX(pSendPacket,
- NULL,
- (u8 *)pSetupCompleteEx,
- sizeof(HTC_SETUP_COMPLETE_EX_MSG),
- ENDPOINT_0,
- HTC_SERVICE_TX_PACKET_TAG);
-
- } else {
- HTC_SETUP_COMPLETE_MSG *pSetupComplete;
- /* assemble setup complete message */
- pSetupComplete = (HTC_SETUP_COMPLETE_MSG *)pSendPacket->pBuffer;
- A_MEMZERO(pSetupComplete, sizeof(HTC_SETUP_COMPLETE_MSG));
- pSetupComplete->MessageID = HTC_MSG_SETUP_COMPLETE_ID;
- SET_HTC_PACKET_INFO_TX(pSendPacket,
- NULL,
- (u8 *)pSetupComplete,
- sizeof(HTC_SETUP_COMPLETE_MSG),
- ENDPOINT_0,
- HTC_SERVICE_TX_PACKET_TAG);
- }
-
- /* we want synchronous operation */
- pSendPacket->Completion = NULL;
- HTC_PREPARE_SEND_PKT(pSendPacket,0,0,0);
- /* send the message */
- status = HTCIssueSend(target,pSendPacket);
-
- } while (false);
-
- if (pSendPacket != NULL) {
- HTC_FREE_CONTROL_TX(target,pSendPacket);
- }
-
- return status;
-}
-
-
-int HTCConnectService(HTC_HANDLE HTCHandle,
- struct htc_service_connect_req *pConnectReq,
- struct htc_service_connect_resp *pConnectResp)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- int status = 0;
- struct htc_packet *pRecvPacket = NULL;
- struct htc_packet *pSendPacket = NULL;
- HTC_CONNECT_SERVICE_RESPONSE_MSG *pResponseMsg;
- HTC_CONNECT_SERVICE_MSG *pConnectMsg;
- HTC_ENDPOINT_ID assignedEndpoint = ENDPOINT_MAX;
- struct htc_endpoint *pEndpoint;
- unsigned int maxMsgSize = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCConnectService, target:0x%lX SvcID:0x%X \n",
- (unsigned long)target, pConnectReq->ServiceID));
-
- do {
-
- AR_DEBUG_ASSERT(pConnectReq->ServiceID != 0);
-
- if (HTC_CTRL_RSVD_SVC == pConnectReq->ServiceID) {
- /* special case for pseudo control service */
- assignedEndpoint = ENDPOINT_0;
- maxMsgSize = HTC_MAX_CONTROL_MESSAGE_LENGTH;
- } else {
- /* allocate a packet to send to the target */
- pSendPacket = HTC_ALLOC_CONTROL_TX(target);
-
- if (NULL == pSendPacket) {
- AR_DEBUG_ASSERT(false);
- status = A_NO_MEMORY;
- break;
- }
- /* assemble connect service message */
- pConnectMsg = (HTC_CONNECT_SERVICE_MSG *)pSendPacket->pBuffer;
- AR_DEBUG_ASSERT(pConnectMsg != NULL);
- A_MEMZERO(pConnectMsg,sizeof(HTC_CONNECT_SERVICE_MSG));
- pConnectMsg->MessageID = HTC_MSG_CONNECT_SERVICE_ID;
- pConnectMsg->ServiceID = pConnectReq->ServiceID;
- pConnectMsg->ConnectionFlags = pConnectReq->ConnectionFlags;
- /* check caller if it wants to transfer meta data */
- if ((pConnectReq->pMetaData != NULL) &&
- (pConnectReq->MetaDataLength <= HTC_SERVICE_META_DATA_MAX_LENGTH)) {
- /* copy meta data into message buffer (after header ) */
- memcpy((u8 *)pConnectMsg + sizeof(HTC_CONNECT_SERVICE_MSG),
- pConnectReq->pMetaData,
- pConnectReq->MetaDataLength);
- pConnectMsg->ServiceMetaLength = pConnectReq->MetaDataLength;
- }
-
- SET_HTC_PACKET_INFO_TX(pSendPacket,
- NULL,
- (u8 *)pConnectMsg,
- sizeof(HTC_CONNECT_SERVICE_MSG) + pConnectMsg->ServiceMetaLength,
- ENDPOINT_0,
- HTC_SERVICE_TX_PACKET_TAG);
-
- /* we want synchronous operation */
- pSendPacket->Completion = NULL;
- HTC_PREPARE_SEND_PKT(pSendPacket,0,0,0);
- status = HTCIssueSend(target,pSendPacket);
-
- if (status) {
- break;
- }
-
- /* wait for response */
- status = HTCWaitforControlMessage(target, &pRecvPacket);
-
- if (status) {
- break;
- }
- /* we controlled the buffer creation so it has to be properly aligned */
- pResponseMsg = (HTC_CONNECT_SERVICE_RESPONSE_MSG *)pRecvPacket->pBuffer;
-
- if ((pResponseMsg->MessageID != HTC_MSG_CONNECT_SERVICE_RESPONSE_ID) ||
- (pRecvPacket->ActualLength < sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG))) {
- /* this message is not valid */
- AR_DEBUG_ASSERT(false);
- status = A_EPROTO;
- break;
- }
-
- pConnectResp->ConnectRespCode = pResponseMsg->Status;
- /* check response status */
- if (pResponseMsg->Status != HTC_SERVICE_SUCCESS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- (" Target failed service 0x%X connect request (status:%d)\n",
- pResponseMsg->ServiceID, pResponseMsg->Status));
- status = A_EPROTO;
- break;
- }
-
- assignedEndpoint = (HTC_ENDPOINT_ID) pResponseMsg->EndpointID;
- maxMsgSize = pResponseMsg->MaxMsgSize;
-
- if ((pConnectResp->pMetaData != NULL) &&
- (pResponseMsg->ServiceMetaLength > 0) &&
- (pResponseMsg->ServiceMetaLength <= HTC_SERVICE_META_DATA_MAX_LENGTH)) {
- /* caller supplied a buffer and the target responded with data */
- int copyLength = min((int)pConnectResp->BufferLength, (int)pResponseMsg->ServiceMetaLength);
- /* copy the meta data */
- memcpy(pConnectResp->pMetaData,
- ((u8 *)pResponseMsg) + sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG),
- copyLength);
- pConnectResp->ActualLength = copyLength;
- }
-
- }
-
- /* the rest of these are parameter checks so set the error status */
- status = A_EPROTO;
-
- if (assignedEndpoint >= ENDPOINT_MAX) {
- AR_DEBUG_ASSERT(false);
- break;
- }
-
- if (0 == maxMsgSize) {
- AR_DEBUG_ASSERT(false);
- break;
- }
-
- pEndpoint = &target->EndPoint[assignedEndpoint];
- pEndpoint->Id = assignedEndpoint;
- if (pEndpoint->ServiceID != 0) {
- /* endpoint already in use! */
- AR_DEBUG_ASSERT(false);
- break;
- }
-
- /* return assigned endpoint to caller */
- pConnectResp->Endpoint = assignedEndpoint;
- pConnectResp->MaxMsgLength = maxMsgSize;
-
- /* setup the endpoint */
- pEndpoint->ServiceID = pConnectReq->ServiceID; /* this marks the endpoint in use */
- pEndpoint->MaxTxQueueDepth = pConnectReq->MaxSendQueueDepth;
- pEndpoint->MaxMsgLength = maxMsgSize;
- /* copy all the callbacks */
- pEndpoint->EpCallBacks = pConnectReq->EpCallbacks;
- /* set the credit distribution info for this endpoint, this information is
- * passed back to the credit distribution callback function */
- pEndpoint->CreditDist.ServiceID = pConnectReq->ServiceID;
- pEndpoint->CreditDist.pHTCReserved = pEndpoint;
- pEndpoint->CreditDist.Endpoint = assignedEndpoint;
- pEndpoint->CreditDist.TxCreditSize = target->TargetCreditSize;
-
- if (pConnectReq->MaxSendMsgSize != 0) {
- /* override TxCreditsPerMaxMsg calculation, this optimizes the credit-low indications
- * since the host will actually issue smaller messages in the Send path */
- if (pConnectReq->MaxSendMsgSize > maxMsgSize) {
- /* can't be larger than the maximum the target can support */
- AR_DEBUG_ASSERT(false);
- break;
- }
- pEndpoint->CreditDist.TxCreditsPerMaxMsg = pConnectReq->MaxSendMsgSize / target->TargetCreditSize;
- } else {
- pEndpoint->CreditDist.TxCreditsPerMaxMsg = maxMsgSize / target->TargetCreditSize;
- }
-
- if (0 == pEndpoint->CreditDist.TxCreditsPerMaxMsg) {
- pEndpoint->CreditDist.TxCreditsPerMaxMsg = 1;
- }
-
- /* save local connection flags */
- pEndpoint->LocalConnectionFlags = pConnectReq->LocalConnectionFlags;
-
- status = 0;
-
- } while (false);
-
- if (pSendPacket != NULL) {
- HTC_FREE_CONTROL_TX(target,pSendPacket);
- }
-
- if (pRecvPacket != NULL) {
- HTC_FREE_CONTROL_RX(target,pRecvPacket);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCConnectService \n"));
-
- return status;
-}
-
-static void AddToEndpointDistList(struct htc_target *target, struct htc_endpoint_credit_dist *pEpDist)
-{
- struct htc_endpoint_credit_dist *pCurEntry,*pLastEntry;
-
- if (NULL == target->EpCreditDistributionListHead) {
- target->EpCreditDistributionListHead = pEpDist;
- pEpDist->pNext = NULL;
- pEpDist->pPrev = NULL;
- return;
- }
-
- /* queue to the end of the list, this does not have to be very
- * fast since this list is built at startup time */
- pCurEntry = target->EpCreditDistributionListHead;
-
- while (pCurEntry) {
- pLastEntry = pCurEntry;
- pCurEntry = pCurEntry->pNext;
- }
-
- pLastEntry->pNext = pEpDist;
- pEpDist->pPrev = pLastEntry;
- pEpDist->pNext = NULL;
-}
-
-
-
-/* default credit init callback */
-static void HTCDefaultCreditInit(void *Context,
- struct htc_endpoint_credit_dist *pEPList,
- int TotalCredits)
-{
- struct htc_endpoint_credit_dist *pCurEpDist;
- int totalEps = 0;
- int creditsPerEndpoint;
-
- pCurEpDist = pEPList;
- /* first run through the list and figure out how many endpoints we are dealing with */
- while (pCurEpDist != NULL) {
- pCurEpDist = pCurEpDist->pNext;
- totalEps++;
- }
-
- /* even distribution */
- creditsPerEndpoint = TotalCredits/totalEps;
-
- pCurEpDist = pEPList;
- /* run through the list and set minimum and normal credits and
- * provide the endpoint with some credits to start */
- while (pCurEpDist != NULL) {
-
- if (creditsPerEndpoint < pCurEpDist->TxCreditsPerMaxMsg) {
- /* too many endpoints and not enough credits */
- AR_DEBUG_ASSERT(false);
- break;
- }
- /* our minimum is set for at least 1 max message */
- pCurEpDist->TxCreditsMin = pCurEpDist->TxCreditsPerMaxMsg;
- /* this value is ignored by our credit alg, since we do
- * not dynamically adjust credits, this is the policy of
- * the "default" credit distribution, something simple and easy */
- pCurEpDist->TxCreditsNorm = 0xFFFF;
- /* give the endpoint minimum credits */
- pCurEpDist->TxCredits = creditsPerEndpoint;
- pCurEpDist->TxCreditsAssigned = creditsPerEndpoint;
- pCurEpDist = pCurEpDist->pNext;
- }
-
-}
-
-/* default credit distribution callback, NOTE, this callback holds the TX lock */
-void HTCDefaultCreditDist(void *Context,
- struct htc_endpoint_credit_dist *pEPDistList,
- HTC_CREDIT_DIST_REASON Reason)
-{
- struct htc_endpoint_credit_dist *pCurEpDist;
-
- if (Reason == HTC_CREDIT_DIST_SEND_COMPLETE) {
- pCurEpDist = pEPDistList;
- /* simple distribution */
- while (pCurEpDist != NULL) {
- if (pCurEpDist->TxCreditsToDist > 0) {
- /* just give the endpoint back the credits */
- pCurEpDist->TxCredits += pCurEpDist->TxCreditsToDist;
- pCurEpDist->TxCreditsToDist = 0;
- }
- pCurEpDist = pCurEpDist->pNext;
- }
- }
-
- /* note we do not need to handle the other reason codes as this is a very
- * simple distribution scheme, no need to seek for more credits or handle inactivity */
-}
-
-void HTCSetCreditDistribution(HTC_HANDLE HTCHandle,
- void *pCreditDistContext,
- HTC_CREDIT_DIST_CALLBACK CreditDistFunc,
- HTC_CREDIT_INIT_CALLBACK CreditInitFunc,
- HTC_SERVICE_ID ServicePriorityOrder[],
- int ListLength)
-{
- struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
- int i;
- int ep;
-
- if (CreditInitFunc != NULL) {
- /* caller has supplied their own distribution functions */
- target->InitCredits = CreditInitFunc;
- AR_DEBUG_ASSERT(CreditDistFunc != NULL);
- target->DistributeCredits = CreditDistFunc;
- target->pCredDistContext = pCreditDistContext;
- } else {
- /* caller wants HTC to do distribution */
- /* if caller wants service to handle distributions then
- * it must set both of these to NULL! */
- AR_DEBUG_ASSERT(CreditDistFunc == NULL);
- target->InitCredits = HTCDefaultCreditInit;
- target->DistributeCredits = HTCDefaultCreditDist;
- target->pCredDistContext = target;
- }
-
- /* always add HTC control endpoint first, we only expose the list after the
- * first one, this is added for TX queue checking */
- AddToEndpointDistList(target, &target->EndPoint[ENDPOINT_0].CreditDist);
-
- /* build the list of credit distribution structures in priority order
- * supplied by the caller, these will follow endpoint 0 */
- for (i = 0; i < ListLength; i++) {
- /* match services with endpoints and add the endpoints to the distribution list
- * in FIFO order */
- for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
- if (target->EndPoint[ep].ServiceID == ServicePriorityOrder[i]) {
- /* queue this one to the list */
- AddToEndpointDistList(target, &target->EndPoint[ep].CreditDist);
- break;
- }
- }
- AR_DEBUG_ASSERT(ep < ENDPOINT_MAX);
- }
-
-}
diff --git a/drivers/staging/ath6kl/include/a_config.h b/drivers/staging/ath6kl/include/a_config.h
deleted file mode 100644
index f7c09319433..00000000000
--- a/drivers/staging/ath6kl/include/a_config.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="a_config.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains software configuration options that enables
-// specific software "features"
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _A_CONFIG_H_
-#define _A_CONFIG_H_
-
-#include "../os/linux/include/config_linux.h"
-
-#endif
diff --git a/drivers/staging/ath6kl/include/a_debug.h b/drivers/staging/ath6kl/include/a_debug.h
deleted file mode 100644
index 5154fcb1ca6..00000000000
--- a/drivers/staging/ath6kl/include/a_debug.h
+++ /dev/null
@@ -1,195 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="a_debug.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _A_DEBUG_H_
-#define _A_DEBUG_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include <a_osapi.h>
-
- /* standard debug print masks bits 0..7 */
-#define ATH_DEBUG_ERR (1 << 0) /* errors */
-#define ATH_DEBUG_WARN (1 << 1) /* warnings */
-#define ATH_DEBUG_INFO (1 << 2) /* informational (module startup info) */
-#define ATH_DEBUG_TRC (1 << 3) /* generic function call tracing */
-#define ATH_DEBUG_RSVD1 (1 << 4)
-#define ATH_DEBUG_RSVD2 (1 << 5)
-#define ATH_DEBUG_RSVD3 (1 << 6)
-#define ATH_DEBUG_RSVD4 (1 << 7)
-
-#define ATH_DEBUG_MASK_DEFAULTS (ATH_DEBUG_ERR | ATH_DEBUG_WARN)
-#define ATH_DEBUG_ANY 0xFFFF
-
- /* other aliases used throughout */
-#define ATH_DEBUG_ERROR ATH_DEBUG_ERR
-#define ATH_LOG_ERR ATH_DEBUG_ERR
-#define ATH_LOG_INF ATH_DEBUG_INFO
-#define ATH_LOG_TRC ATH_DEBUG_TRC
-#define ATH_DEBUG_TRACE ATH_DEBUG_TRC
-#define ATH_DEBUG_INIT ATH_DEBUG_INFO
-
- /* bits 8..31 are module-specific masks */
-#define ATH_DEBUG_MODULE_MASK_SHIFT 8
-
- /* macro to make a module-specific masks */
-#define ATH_DEBUG_MAKE_MODULE_MASK(index) (1 << (ATH_DEBUG_MODULE_MASK_SHIFT + (index)))
-
-void DebugDumpBytes(u8 *buffer, u16 length, char *pDescription);
-
-/* Debug support on a per-module basis
- *
- * Usage:
- *
- * Each module can utilize it's own debug mask variable. A set of commonly used
- * masks are provided (ERRORS, WARNINGS, TRACE etc..). It is up to each module
- * to define module-specific masks using the macros above.
- *
- * Each module defines a single debug mask variable debug_XXX where the "name" of the module is
- * common to all C-files within that module. This requires every C-file that includes a_debug.h
- * to define the module name in that file.
- *
- * Example:
- *
- * #define ATH_MODULE_NAME htc
- * #include "a_debug.h"
- *
- * This will define a debug mask structure called debug_htc and all debug macros will reference this
- * variable.
- *
- * A module can define module-specific bit masks using the ATH_DEBUG_MAKE_MODULE_MASK() macro:
- *
- * #define ATH_DEBUG_MY_MASK1 ATH_DEBUG_MAKE_MODULE_MASK(0)
- * #define ATH_DEBUG_MY_MASK2 ATH_DEBUG_MAKE_MODULE_MASK(1)
- *
- * The instantiation of the debug structure should be made by the module. When a module is
- * instantiated, the module can set a description string, a default mask and an array of description
- * entries containing information on each module-defined debug mask.
- * NOTE: The instantiation is statically allocated, only one instance can exist per module.
- *
- * Example:
- *
- *
- * #define ATH_DEBUG_BMI ATH_DEBUG_MAKE_MODULE_MASK(0)
- *
- * #ifdef DEBUG
- * static struct ath_debug_mask_description bmi_debug_desc[] = {
- * { ATH_DEBUG_BMI , "BMI Tracing"}, <== description of the module specific mask
- * };
- *
- * ATH_DEBUG_INSTANTIATE_MODULE_VAR(bmi,
- * "bmi" <== module name
- * "Boot Manager Interface", <== description of module
- * ATH_DEBUG_MASK_DEFAULTS, <== defaults
- * ATH_DEBUG_DESCRIPTION_COUNT(bmi_debug_desc),
- * bmi_debug_desc);
- *
- * #endif
- *
- * A module can optionally register it's debug module information in order for other tools to change the
- * bit mask at runtime. A module can call A_REGISTER_MODULE_DEBUG_INFO() in it's module
- * init code. This macro can be called multiple times without consequence. The debug info maintains
- * state to indicate whether the information was previously registered.
- *
- * */
-
-#define ATH_DEBUG_MAX_MASK_DESC_LENGTH 32
-#define ATH_DEBUG_MAX_MOD_DESC_LENGTH 64
-
-struct ath_debug_mask_description {
- u32 Mask;
- char Description[ATH_DEBUG_MAX_MASK_DESC_LENGTH];
-};
-
-#define ATH_DEBUG_INFO_FLAGS_REGISTERED (1 << 0)
-
-typedef struct _ATH_DEBUG_MODULE_DBG_INFO{
- struct _ATH_DEBUG_MODULE_DBG_INFO *pNext;
- char ModuleName[16];
- char ModuleDescription[ATH_DEBUG_MAX_MOD_DESC_LENGTH];
- u32 Flags;
- u32 CurrentMask;
- int MaxDescriptions;
- struct ath_debug_mask_description *pMaskDescriptions; /* pointer to array of descriptions */
-} ATH_DEBUG_MODULE_DBG_INFO;
-
-#define ATH_DEBUG_DESCRIPTION_COUNT(d) (int)((sizeof((d))) / (sizeof(struct ath_debug_mask_description)))
-
-#define GET_ATH_MODULE_DEBUG_VAR_NAME(s) _XGET_ATH_MODULE_NAME_DEBUG_(s)
-#define GET_ATH_MODULE_DEBUG_VAR_MASK(s) _XGET_ATH_MODULE_NAME_DEBUG_(s).CurrentMask
-#define _XGET_ATH_MODULE_NAME_DEBUG_(s) debug_ ## s
-
-#ifdef ATH_DEBUG_MODULE
-
- /* for source files that will instantiate the debug variables */
-#define ATH_DEBUG_INSTANTIATE_MODULE_VAR(s,name,moddesc,initmask,count,descriptions) \
-ATH_DEBUG_MODULE_DBG_INFO GET_ATH_MODULE_DEBUG_VAR_NAME(s) = \
- {NULL,(name),(moddesc),0,(initmask),count,(descriptions)}
-
-#ifdef ATH_MODULE_NAME
-extern ATH_DEBUG_MODULE_DBG_INFO GET_ATH_MODULE_DEBUG_VAR_NAME(ATH_MODULE_NAME);
-#define AR_DEBUG_LVL_CHECK(lvl) (GET_ATH_MODULE_DEBUG_VAR_MASK(ATH_MODULE_NAME) & (lvl))
-#endif /* ATH_MODULE_NAME */
-
-#define ATH_DEBUG_SET_DEBUG_MASK(s,lvl) GET_ATH_MODULE_DEBUG_VAR_MASK(s) = (lvl)
-
-#define ATH_DEBUG_DECLARE_EXTERN(s) \
- extern ATH_DEBUG_MODULE_DBG_INFO GET_ATH_MODULE_DEBUG_VAR_NAME(s)
-
-#define AR_DEBUG_PRINTBUF(buffer, length, desc) DebugDumpBytes(buffer,length,desc)
-
-
-#define AR_DEBUG_ASSERT A_ASSERT
-
-void a_dump_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo);
-void a_register_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo);
-#define A_DUMP_MODULE_DEBUG_INFO(s) a_dump_module_debug_info(&(GET_ATH_MODULE_DEBUG_VAR_NAME(s)))
-#define A_REGISTER_MODULE_DEBUG_INFO(s) a_register_module_debug_info(&(GET_ATH_MODULE_DEBUG_VAR_NAME(s)))
-
-#else /* !ATH_DEBUG_MODULE */
- /* NON ATH_DEBUG_MODULE */
-#define ATH_DEBUG_INSTANTIATE_MODULE_VAR(s,name,moddesc,initmask,count,descriptions)
-#define AR_DEBUG_LVL_CHECK(lvl) 0
-#define AR_DEBUG_PRINTBUF(buffer, length, desc)
-#define AR_DEBUG_ASSERT(test)
-#define ATH_DEBUG_DECLARE_EXTERN(s)
-#define ATH_DEBUG_SET_DEBUG_MASK(s,lvl)
-#define A_DUMP_MODULE_DEBUG_INFO(s)
-#define A_REGISTER_MODULE_DEBUG_INFO(s)
-
-#endif
-
-int a_get_module_mask(char *module_name, u32 *pMask);
-int a_set_module_mask(char *module_name, u32 Mask);
-void a_dump_module_debug_info_by_name(char *module_name);
-void a_module_debug_support_init(void);
-void a_module_debug_support_cleanup(void);
-
-#include "../os/linux/include/debug_linux.h"
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
diff --git a/drivers/staging/ath6kl/include/a_drv.h b/drivers/staging/ath6kl/include/a_drv.h
deleted file mode 100644
index 1548604e846..00000000000
--- a/drivers/staging/ath6kl/include/a_drv.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="a_drv.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains the definitions of the basic atheros data types.
-// It is used to map the data types in atheros files to a platform specific
-// type.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _A_DRV_H_
-#define _A_DRV_H_
-
-#include "../os/linux/include/athdrv_linux.h"
-
-#endif /* _ADRV_H_ */
diff --git a/drivers/staging/ath6kl/include/a_drv_api.h b/drivers/staging/ath6kl/include/a_drv_api.h
deleted file mode 100644
index a40d97a84ff..00000000000
--- a/drivers/staging/ath6kl/include/a_drv_api.h
+++ /dev/null
@@ -1,204 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="a_drv_api.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _A_DRV_API_H_
-#define _A_DRV_API_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/****************************************************************************/
-/****************************************************************************/
-/** **/
-/** WMI related hooks **/
-/** **/
-/****************************************************************************/
-/****************************************************************************/
-
-#include <ar6000_api.h>
-
-#define A_WMI_CHANNELLIST_RX(devt, numChan, chanList) \
- ar6000_channelList_rx((devt), (numChan), (chanList))
-
-#define A_WMI_SET_NUMDATAENDPTS(devt, num) \
- ar6000_set_numdataendpts((devt), (num))
-
-#define A_WMI_CONTROL_TX(devt, osbuf, streamID) \
- ar6000_control_tx((devt), (osbuf), (streamID))
-
-#define A_WMI_TARGETSTATS_EVENT(devt, pStats, len) \
- ar6000_targetStats_event((devt), (pStats), (len))
-
-#define A_WMI_SCANCOMPLETE_EVENT(devt, status) \
- ar6000_scanComplete_event((devt), (status))
-
-#ifdef CONFIG_HOST_DSET_SUPPORT
-
-#define A_WMI_DSET_DATA_REQ(devt, access_cookie, offset, length, targ_buf, targ_reply_fn, targ_reply_arg) \
- ar6000_dset_data_req((devt), (access_cookie), (offset), (length), (targ_buf), (targ_reply_fn), (targ_reply_arg))
-
-#define A_WMI_DSET_CLOSE(devt, access_cookie) \
- ar6000_dset_close((devt), (access_cookie))
-
-#endif
-
-#define A_WMI_DSET_OPEN_REQ(devt, id, targ_handle, targ_reply_fn, targ_reply_arg) \
- ar6000_dset_open_req((devt), (id), (targ_handle), (targ_reply_fn), (targ_reply_arg))
-
-#define A_WMI_CONNECT_EVENT(devt, channel, bssid, listenInterval, beaconInterval, networkType, beaconIeLen, assocReqLen, assocRespLen, assocInfo) \
- ar6000_connect_event((devt), (channel), (bssid), (listenInterval), (beaconInterval), (networkType), (beaconIeLen), (assocReqLen), (assocRespLen), (assocInfo))
-
-#define A_WMI_PSPOLL_EVENT(devt, aid)\
- ar6000_pspoll_event((devt),(aid))
-
-#define A_WMI_DTIMEXPIRY_EVENT(devt)\
- ar6000_dtimexpiry_event((devt))
-
-#ifdef WAPI_ENABLE
-#define A_WMI_WAPI_REKEY_EVENT(devt, type, mac)\
- ap_wapi_rekey_event((devt),(type),(mac))
-#endif
-
-#define A_WMI_REGDOMAIN_EVENT(devt, regCode) \
- ar6000_regDomain_event((devt), (regCode))
-
-#define A_WMI_NEIGHBORREPORT_EVENT(devt, numAps, info) \
- ar6000_neighborReport_event((devt), (numAps), (info))
-
-#define A_WMI_DISCONNECT_EVENT(devt, reason, bssid, assocRespLen, assocInfo, protocolReasonStatus) \
- ar6000_disconnect_event((devt), (reason), (bssid), (assocRespLen), (assocInfo), (protocolReasonStatus))
-
-#define A_WMI_TKIP_MICERR_EVENT(devt, keyid, ismcast) \
- ar6000_tkip_micerr_event((devt), (keyid), (ismcast))
-
-#define A_WMI_BITRATE_RX(devt, rateKbps) \
- ar6000_bitrate_rx((devt), (rateKbps))
-
-#define A_WMI_TXPWR_RX(devt, txPwr) \
- ar6000_txPwr_rx((devt), (txPwr))
-
-#define A_WMI_READY_EVENT(devt, datap, phyCap, sw_ver, abi_ver) \
- ar6000_ready_event((devt), (datap), (phyCap), (sw_ver), (abi_ver))
-
-#define A_WMI_DBGLOG_INIT_DONE(ar) \
- ar6000_dbglog_init_done(ar);
-
-#define A_WMI_RSSI_THRESHOLD_EVENT(devt, newThreshold, rssi) \
- ar6000_rssiThreshold_event((devt), (newThreshold), (rssi))
-
-#define A_WMI_REPORT_ERROR_EVENT(devt, errorVal) \
- ar6000_reportError_event((devt), (errorVal))
-
-#define A_WMI_ROAM_TABLE_EVENT(devt, pTbl) \
- ar6000_roam_tbl_event((devt), (pTbl))
-
-#define A_WMI_ROAM_DATA_EVENT(devt, p) \
- ar6000_roam_data_event((devt), (p))
-
-#define A_WMI_WOW_LIST_EVENT(devt, num_filters, wow_filters) \
- ar6000_wow_list_event((devt), (num_filters), (wow_filters))
-
-#define A_WMI_CAC_EVENT(devt, ac, cac_indication, statusCode, tspecSuggestion) \
- ar6000_cac_event((devt), (ac), (cac_indication), (statusCode), (tspecSuggestion))
-
-#define A_WMI_CHANNEL_CHANGE_EVENT(devt, oldChannel, newChannel) \
- ar6000_channel_change_event((devt), (oldChannel), (newChannel))
-
-#define A_WMI_PMKID_LIST_EVENT(devt, num_pmkid, pmkid_list, bssid_list) \
- ar6000_pmkid_list_event((devt), (num_pmkid), (pmkid_list), (bssid_list))
-
-#define A_WMI_PEER_EVENT(devt, eventCode, bssid) \
- ar6000_peer_event ((devt), (eventCode), (bssid))
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-#define A_WMI_TCMD_RX_REPORT_EVENT(devt, results, len) \
- ar6000_tcmd_rx_report_event((devt), (results), (len))
-#endif
-
-#define A_WMI_HBCHALLENGERESP_EVENT(devt, cookie, source) \
- ar6000_hbChallengeResp_event((devt), (cookie), (source))
-
-#define A_WMI_TX_RETRY_ERR_EVENT(devt) \
- ar6000_tx_retry_err_event((devt))
-
-#define A_WMI_SNR_THRESHOLD_EVENT_RX(devt, newThreshold, snr) \
- ar6000_snrThresholdEvent_rx((devt), (newThreshold), (snr))
-
-#define A_WMI_LQ_THRESHOLD_EVENT_RX(devt, range, lqVal) \
- ar6000_lqThresholdEvent_rx((devt), (range), (lqVal))
-
-#define A_WMI_RATEMASK_RX(devt, ratemask) \
- ar6000_ratemask_rx((devt), (ratemask))
-
-#define A_WMI_KEEPALIVE_RX(devt, configured) \
- ar6000_keepalive_rx((devt), (configured))
-
-#define A_WMI_BSSINFO_EVENT_RX(ar, datp, len) \
- ar6000_bssInfo_event_rx((ar), (datap), (len))
-
-#define A_WMI_DBGLOG_EVENT(ar, dropped, buffer, length) \
- ar6000_dbglog_event((ar), (dropped), (buffer), (length));
-
-#define A_WMI_STREAM_TX_ACTIVE(devt,trafficClass) \
- ar6000_indicate_tx_activity((devt),(trafficClass), true)
-
-#define A_WMI_STREAM_TX_INACTIVE(devt,trafficClass) \
- ar6000_indicate_tx_activity((devt),(trafficClass), false)
-#define A_WMI_Ac2EndpointID(devht, ac)\
- ar6000_ac2_endpoint_id((devht), (ac))
-
-#define A_WMI_AGGR_RECV_ADDBA_REQ_EVT(devt, cmd)\
- ar6000_aggr_rcv_addba_req_evt((devt), (cmd))
-#define A_WMI_AGGR_RECV_ADDBA_RESP_EVT(devt, cmd)\
- ar6000_aggr_rcv_addba_resp_evt((devt), (cmd))
-#define A_WMI_AGGR_RECV_DELBA_REQ_EVT(devt, cmd)\
- ar6000_aggr_rcv_delba_req_evt((devt), (cmd))
-#define A_WMI_HCI_EVENT_EVT(devt, cmd)\
- ar6000_hci_event_rcv_evt((devt), (cmd))
-
-#define A_WMI_Endpoint2Ac(devt, ep) \
- ar6000_endpoint_id2_ac((devt), (ep))
-
-#define A_WMI_BTCOEX_CONFIG_EVENT(devt, evt, len)\
- ar6000_btcoex_config_event((devt), (evt), (len))
-
-#define A_WMI_BTCOEX_STATS_EVENT(devt, datap, len)\
- ar6000_btcoex_stats_event((devt), (datap), (len))
-
-/****************************************************************************/
-/****************************************************************************/
-/** **/
-/** HTC related hooks **/
-/** **/
-/****************************************************************************/
-/****************************************************************************/
-
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
-#define A_WMI_PROF_COUNT_RX(addr, count) prof_count_rx((addr), (count))
-#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/drivers/staging/ath6kl/include/a_osapi.h b/drivers/staging/ath6kl/include/a_osapi.h
deleted file mode 100644
index fd7ae0d612c..00000000000
--- a/drivers/staging/ath6kl/include/a_osapi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="a_osapi.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains the definitions of the basic atheros data types.
-// It is used to map the data types in atheros files to a platform specific
-// type.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _A_OSAPI_H_
-#define _A_OSAPI_H_
-
-#include "../os/linux/include/osapi_linux.h"
-
-#endif /* _OSAPI_H_ */
diff --git a/drivers/staging/ath6kl/include/aggr_recv_api.h b/drivers/staging/ath6kl/include/aggr_recv_api.h
deleted file mode 100644
index 5ead58d5feb..00000000000
--- a/drivers/staging/ath6kl/include/aggr_recv_api.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *
- * Copyright (c) 2004-2010 Atheros Communications Inc.
- * All rights reserved.
- *
- *
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
- *
- */
-
-#ifndef __AGGR_RECV_API_H__
-#define __AGGR_RECV_API_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef void (* RX_CALLBACK)(void * dev, void *osbuf);
-
-typedef void (* ALLOC_NETBUFS)(A_NETBUF_QUEUE_T *q, u16 num);
-
-/*
- * aggr_init:
- * Initialises the data structures, allocates data queues and
- * os buffers. Netbuf allocator is the input param, used by the
- * aggr module for allocation of NETBUFs from driver context.
- * These NETBUFs are used for AMSDU processing.
- * Returns the context for the aggr module.
- */
-void *
-aggr_init(ALLOC_NETBUFS netbuf_allocator);
-
-
-/*
- * aggr_register_rx_dispatcher:
- * Registers OS call back function to deliver the
- * frames to OS. This is generally the topmost layer of
- * the driver context, after which the frames go to
- * IP stack via the call back function.
- * This dispatcher is active only when aggregation is ON.
- */
-void
-aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn);
-
-
-/*
- * aggr_process_bar:
- * When target receives BAR, it communicates to host driver
- * for modifying window parameters. Target indicates this via the
- * event: WMI_ADDBA_REQ_EVENTID. Host will dequeue all frames
- * up to the indicated sequence number.
- */
-void
-aggr_process_bar(void *cntxt, u8 tid, u16 seq_no);
-
-
-/*
- * aggr_recv_addba_req_evt:
- * This event is to initiate/modify the receive side window.
- * Target will send WMI_ADDBA_REQ_EVENTID event to host - to setup
- * recv re-ordering queues. Target will negotiate ADDBA with peer,
- * and indicate via this event after successfully completing the
- * negotiation. This happens in two situations:
- * 1. Initial setup of aggregation
- * 2. Renegotiation of current recv window.
- * Window size for re-ordering is limited by target buffer
- * space, which is reflected in win_sz.
- * (Re)Start the periodic timer to deliver long standing frames,
- * in hold_q to OS.
- */
-void
-aggr_recv_addba_req_evt(void * cntxt, u8 tid, u16 seq_no, u8 win_sz);
-
-
-/*
- * aggr_recv_delba_req_evt:
- * Target indicates deletion of a BA window for a tid via the
- * WMI_DELBA_EVENTID. Host would deliver all the frames in the
- * hold_q, reset tid config and disable the periodic timer, if
- * aggr is not enabled on any tid.
- */
-void
-aggr_recv_delba_req_evt(void * cntxt, u8 tid);
-
-
-
-/*
- * aggr_process_recv_frm:
- * Called only for data frames. When aggr is ON for a tid, the buffer
- * is always consumed, and osbuf would be NULL. For a non-aggr case,
- * osbuf is not modified.
- * AMSDU frames are consumed and are later freed. They are sliced and
- * diced to individual frames and dispatched to stack.
- * After consuming a osbuf(when aggr is ON), a previously registered
- * callback may be called to deliver frames in order.
- */
-void
-aggr_process_recv_frm(void *cntxt, u8 tid, u16 seq_no, bool is_amsdu, void **osbuf);
-
-
-/*
- * aggr_module_destroy:
- * Frees up all the queues and frames in them. Releases the cntxt to OS.
- */
-void
-aggr_module_destroy(void *cntxt);
-
-/*
- * Dumps the aggregation stats
- */
-void
-aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf);
-
-/*
- * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate
- * hold Q state. Examples include when a Connect event or disconnect event is
- * received.
- */
-void
-aggr_reset_state(void *cntxt);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*__AGGR_RECV_API_H__ */
diff --git a/drivers/staging/ath6kl/include/ar3kconfig.h b/drivers/staging/ath6kl/include/ar3kconfig.h
deleted file mode 100644
index 91bc4ee3512..00000000000
--- a/drivers/staging/ath6kl/include/ar3kconfig.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-/* AR3K module configuration APIs for HCI-bridge operation */
-
-#ifndef AR3KCONFIG_H_
-#define AR3KCONFIG_H_
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define AR3K_CONFIG_FLAG_FORCE_MINBOOT_EXIT (1 << 0)
-#define AR3K_CONFIG_FLAG_SET_AR3K_BAUD (1 << 1)
-#define AR3K_CONFIG_FLAG_AR3K_BAUD_CHANGE_DELAY (1 << 2)
-#define AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP (1 << 3)
-
-
-struct ar3k_config_info {
- u32 Flags; /* config flags */
- void *pHCIDev; /* HCI bridge device */
- struct hci_transport_properties *pHCIProps; /* HCI bridge props */
- struct hif_device *pHIFDevice; /* HIF layer device */
-
- u32 AR3KBaudRate; /* AR3K operational baud rate */
- u16 AR6KScale; /* AR6K UART scale value */
- u16 AR6KStep; /* AR6K UART step value */
- struct hci_dev *pBtStackHCIDev; /* BT Stack HCI dev */
- u32 PwrMgmtEnabled; /* TLPM enabled? */
- u16 IdleTimeout; /* TLPM idle timeout */
- u16 WakeupTimeout; /* TLPM wakeup timeout */
- u8 bdaddr[6]; /* Bluetooth device address */
-};
-
-int AR3KConfigure(struct ar3k_config_info *pConfigInfo);
-
-int AR3KConfigureExit(void *config);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*AR3KCONFIG_H_*/
diff --git a/drivers/staging/ath6kl/include/ar6000_api.h b/drivers/staging/ath6kl/include/ar6000_api.h
deleted file mode 100644
index e9460800272..00000000000
--- a/drivers/staging/ath6kl/include/ar6000_api.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6000_api.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains the API to access the OS dependent atheros host driver
-// by the WMI or WLAN generic modules.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _AR6000_API_H_
-#define _AR6000_API_H_
-
-#include "../os/linux/include/ar6xapi_linux.h"
-
-#endif /* _AR6000_API_H */
-
diff --git a/drivers/staging/ath6kl/include/ar6000_diag.h b/drivers/staging/ath6kl/include/ar6000_diag.h
deleted file mode 100644
index 739c01c53f0..00000000000
--- a/drivers/staging/ath6kl/include/ar6000_diag.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ar6000_diag.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef AR6000_DIAG_H_
-#define AR6000_DIAG_H_
-
-
-int
-ar6000_ReadRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data);
-
-int
-ar6000_WriteRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data);
-
-int
-ar6000_ReadDataDiag(struct hif_device *hifDevice, u32 address,
- u8 *data, u32 length);
-
-int
-ar6000_WriteDataDiag(struct hif_device *hifDevice, u32 address,
- u8 *data, u32 length);
-
-int
-ar6k_ReadTargetRegister(struct hif_device *hifDevice, int regsel, u32 *regval);
-
-void
-ar6k_FetchTargetRegs(struct hif_device *hifDevice, u32 *targregs);
-
-#endif /*AR6000_DIAG_H_*/
diff --git a/drivers/staging/ath6kl/include/ar6kap_common.h b/drivers/staging/ath6kl/include/ar6kap_common.h
deleted file mode 100644
index 532d8eba932..00000000000
--- a/drivers/staging/ath6kl/include/ar6kap_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//------------------------------------------------------------------------------
-
-// <copyright file="ar6kap_common.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-
-//==============================================================================
-
-// This file contains the definitions of common AP mode data structures.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef _AR6KAP_COMMON_H_
-#define _AR6KAP_COMMON_H_
-/*
- * Used with AR6000_XIOCTL_AP_GET_STA_LIST
- */
-typedef struct {
- u8 mac[ATH_MAC_LEN];
- u8 aid;
- u8 keymgmt;
- u8 ucipher;
- u8 auth;
-} station_t;
-typedef struct {
- station_t sta[AP_MAX_NUM_STA];
-} ap_get_sta_t;
-#endif /* _AR6KAP_COMMON_H_ */
diff --git a/drivers/staging/ath6kl/include/athbtfilter.h b/drivers/staging/ath6kl/include/athbtfilter.h
deleted file mode 100644
index 81456eea3b0..00000000000
--- a/drivers/staging/ath6kl/include/athbtfilter.h
+++ /dev/null
@@ -1,135 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="athbtfilter.h" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Public Bluetooth filter APIs
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef ATHBTFILTER_H_
-#define ATHBTFILTER_H_
-
-#define ATH_DEBUG_INFO (1 << 2)
-#define ATH_DEBUG_INF ATH_DEBUG_INFO
-
-typedef enum _ATHBT_HCI_CTRL_TYPE {
- ATHBT_HCI_COMMAND = 0,
- ATHBT_HCI_EVENT = 1,
-} ATHBT_HCI_CTRL_TYPE;
-
-typedef enum _ATHBT_STATE_INDICATION {
- ATH_BT_NOOP = 0,
- ATH_BT_INQUIRY = 1,
- ATH_BT_CONNECT = 2,
- ATH_BT_SCO = 3,
- ATH_BT_ACL = 4,
- ATH_BT_A2DP = 5,
- ATH_BT_ESCO = 6,
- /* new states go here.. */
-
- ATH_BT_MAX_STATE_INDICATION
-} ATHBT_STATE_INDICATION;
-
- /* filter function for OUTGOING commands and INCOMMING events */
-typedef void (*ATHBT_FILTER_CMD_EVENTS_FN)(void *pContext, ATHBT_HCI_CTRL_TYPE Type, unsigned char *pBuffer, int Length);
-
- /* filter function for OUTGOING data HCI packets */
-typedef void (*ATHBT_FILTER_DATA_FN)(void *pContext, unsigned char *pBuffer, int Length);
-
-typedef enum _ATHBT_STATE {
- STATE_OFF = 0,
- STATE_ON = 1,
- STATE_MAX
-} ATHBT_STATE;
-
- /* BT state indication (when filter functions are not used) */
-
-typedef void (*ATHBT_INDICATE_STATE_FN)(void *pContext, ATHBT_STATE_INDICATION Indication, ATHBT_STATE State, unsigned char LMPVersion);
-
-struct athbt_filter_instance {
-#ifdef UNDER_CE
- WCHAR *pWlanAdapterName; /* filled in by user */
-#else
- char *pWlanAdapterName; /* filled in by user */
-#endif /* UNDER_CE */
- int FilterEnabled; /* filtering is enabled */
- int Attached; /* filter library is attached */
- void *pContext; /* private context for filter library */
- ATHBT_FILTER_CMD_EVENTS_FN pFilterCmdEvents; /* function ptr to filter a command or event */
- ATHBT_FILTER_DATA_FN pFilterAclDataOut; /* function ptr to filter ACL data out (to radio) */
- ATHBT_FILTER_DATA_FN pFilterAclDataIn; /* function ptr to filter ACL data in (from radio) */
- ATHBT_INDICATE_STATE_FN pIndicateState; /* function ptr to indicate a state */
-}; /* XXX: unused ? */
-
-
-/* API MACROS */
-
-#define AthBtFilterHciCommand(instance,packet,length) \
- if ((instance)->FilterEnabled) { \
- (instance)->pFilterCmdEvents((instance)->pContext, \
- ATHBT_HCI_COMMAND, \
- (unsigned char *)(packet), \
- (length)); \
- }
-
-#define AthBtFilterHciEvent(instance,packet,length) \
- if ((instance)->FilterEnabled) { \
- (instance)->pFilterCmdEvents((instance)->pContext, \
- ATHBT_HCI_EVENT, \
- (unsigned char *)(packet), \
- (length)); \
- }
-
-#define AthBtFilterHciAclDataOut(instance,packet,length) \
- if ((instance)->FilterEnabled) { \
- (instance)->pFilterAclDataOut((instance)->pContext, \
- (unsigned char *)(packet), \
- (length)); \
- }
-
-#define AthBtFilterHciAclDataIn(instance,packet,length) \
- if ((instance)->FilterEnabled) { \
- (instance)->pFilterAclDataIn((instance)->pContext, \
- (unsigned char *)(packet), \
- (length)); \
- }
-
-/* if filtering is not desired, the application can indicate the state directly using this
- * macro:
- */
-#define AthBtIndicateState(instance,indication,state) \
- if ((instance)->FilterEnabled) { \
- (instance)->pIndicateState((instance)->pContext, \
- (indication), \
- (state), \
- 0); \
- }
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* API prototypes */
-int AthBtFilter_Attach(ATH_BT_FILTER_INSTANCE *pInstance, unsigned int flags);
-void AthBtFilter_Detach(ATH_BT_FILTER_INSTANCE *pInstance);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*ATHBTFILTER_H_*/
diff --git a/drivers/staging/ath6kl/include/bmi.h b/drivers/staging/ath6kl/include/bmi.h
deleted file mode 100644
index d3227f77fa5..00000000000
--- a/drivers/staging/ath6kl/include/bmi.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="bmi.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// BMI declarations and prototypes
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _BMI_H_
-#define _BMI_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* Header files */
-#include "a_config.h"
-#include "athdefs.h"
-#include "hif.h"
-#include "a_osapi.h"
-#include "bmi_msg.h"
-
-void
-BMIInit(void);
-
-void
-BMICleanup(void);
-
-int
-BMIDone(struct hif_device *device);
-
-int
-BMIGetTargetInfo(struct hif_device *device, struct bmi_target_info *targ_info);
-
-int
-BMIReadMemory(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length);
-
-int
-BMIWriteMemory(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length);
-
-int
-BMIExecute(struct hif_device *device,
- u32 address,
- u32 *param);
-
-int
-BMISetAppStart(struct hif_device *device,
- u32 address);
-
-int
-BMIReadSOCRegister(struct hif_device *device,
- u32 address,
- u32 *param);
-
-int
-BMIWriteSOCRegister(struct hif_device *device,
- u32 address,
- u32 param);
-
-int
-BMIrompatchInstall(struct hif_device *device,
- u32 ROM_addr,
- u32 RAM_addr,
- u32 nbytes,
- u32 do_activate,
- u32 *patch_id);
-
-int
-BMIrompatchUninstall(struct hif_device *device,
- u32 rompatch_id);
-
-int
-BMIrompatchActivate(struct hif_device *device,
- u32 rompatch_count,
- u32 *rompatch_list);
-
-int
-BMIrompatchDeactivate(struct hif_device *device,
- u32 rompatch_count,
- u32 *rompatch_list);
-
-int
-BMILZStreamStart(struct hif_device *device,
- u32 address);
-
-int
-BMILZData(struct hif_device *device,
- u8 *buffer,
- u32 length);
-
-int
-BMIFastDownload(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length);
-
-int
-BMIRawWrite(struct hif_device *device,
- u8 *buffer,
- u32 length);
-
-int
-BMIRawRead(struct hif_device *device,
- u8 *buffer,
- u32 length,
- bool want_timeout);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _BMI_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h b/drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h
deleted file mode 100644
index 5407e05d9b0..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/AR6K_version.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="AR6K_version.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#define __VER_MAJOR_ 3
-#define __VER_MINOR_ 0
-#define __VER_PATCH_ 0
-
-/* The makear6ksdk script (used for release builds) modifies the following line. */
-#define __BUILD_NUMBER_ 233
-
-
-/* Format of the version number. */
-#define VER_MAJOR_BIT_OFFSET 28
-#define VER_MINOR_BIT_OFFSET 24
-#define VER_PATCH_BIT_OFFSET 16
-#define VER_BUILD_NUM_BIT_OFFSET 0
-
-
-/*
- * The version has the following format:
- * Bits 28-31: Major version
- * Bits 24-27: Minor version
- * Bits 16-23: Patch version
- * Bits 0-15: Build number (automatically generated during build process )
- * E.g. Build 1.1.3.7 would be represented as 0x11030007.
- *
- * DO NOT split the following macro into multiple lines as this may confuse the build scripts.
- */
-#define AR6K_SW_VERSION ( ( __VER_MAJOR_ << VER_MAJOR_BIT_OFFSET ) + ( __VER_MINOR_ << VER_MINOR_BIT_OFFSET ) + ( __VER_PATCH_ << VER_PATCH_BIT_OFFSET ) + ( __BUILD_NUMBER_ << VER_BUILD_NUM_BIT_OFFSET ) )
-
-/* ABI Version. Reflects the version of binary interface exposed by AR6K target firmware. Needs to be incremented by 1 for any change in the firmware that requires upgrade of the driver on the host side for the change to work correctly */
-#define AR6K_ABI_VERSION 1
diff --git a/drivers/staging/ath6kl/include/common/AR6002/addrs.h b/drivers/staging/ath6kl/include/common/AR6002/addrs.h
deleted file mode 100644
index bbf8d42828c..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/addrs.h
+++ /dev/null
@@ -1,90 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef __ADDRS_H__
-#define __ADDRS_H__
-
-/*
- * Special AR6002 Addresses that may be needed by special
- * applications (e.g. ART) on the Host as well as Target.
- */
-
-#if defined(AR6002_REV2)
-#define AR6K_RAM_START 0x00500000
-#define TARG_RAM_OFFSET(vaddr) ((u32)(vaddr) & 0xfffff)
-#define TARG_RAM_SZ (184*1024)
-#define TARG_ROM_SZ (80*1024)
-#endif
-#if defined(AR6002_REV4) || defined(AR6003)
-#define AR6K_RAM_START 0x00540000
-#define TARG_RAM_OFFSET(vaddr) (((u32)(vaddr) & 0xfffff) - 0x40000)
-#define TARG_RAM_SZ (256*1024)
-#define TARG_ROM_SZ (256*1024)
-#endif
-
-#define AR6002_BOARD_DATA_SZ 768
-#define AR6002_BOARD_EXT_DATA_SZ 0
-#define AR6003_BOARD_DATA_SZ 1024
-#define AR6003_BOARD_EXT_DATA_SZ 768
-
-#define AR6K_RAM_ADDR(byte_offset) (AR6K_RAM_START+(byte_offset))
-#define TARG_RAM_ADDRS(byte_offset) AR6K_RAM_ADDR(byte_offset)
-
-#define AR6K_ROM_START 0x004e0000
-#define TARG_ROM_OFFSET(vaddr) (((u32)(vaddr) & 0x1fffff) - 0xe0000)
-#define AR6K_ROM_ADDR(byte_offset) (AR6K_ROM_START+(byte_offset))
-#define TARG_ROM_ADDRS(byte_offset) AR6K_ROM_ADDR(byte_offset)
-
-/*
- * At this ROM address is a pointer to the start of the ROM DataSet Index.
- * If there are no ROM DataSets, there's a 0 at this address.
- */
-#define ROM_DATASET_INDEX_ADDR (TARG_ROM_ADDRS(TARG_ROM_SZ)-8)
-#define ROM_MBIST_CKSUM_ADDR (TARG_ROM_ADDRS(TARG_ROM_SZ)-4)
-
-/*
- * The API A_BOARD_DATA_ADDR() is the proper way to get a read pointer to
- * board data.
- */
-
-/* Size of Board Data, in bytes */
-#if defined(AR6002_REV4) || defined(AR6003)
-#define BOARD_DATA_SZ AR6003_BOARD_DATA_SZ
-#else
-#define BOARD_DATA_SZ AR6002_BOARD_DATA_SZ
-#endif
-
-
-/*
- * Constants used by ASM code to access fields of host_interest_s,
- * which is at a fixed location in RAM.
- */
-#if defined(AR6002_REV4) || defined(AR6003)
-#define HOST_INTEREST_FLASH_IS_PRESENT_ADDR (AR6K_RAM_START + 0x60c)
-#else
-#define HOST_INTEREST_FLASH_IS_PRESENT_ADDR (AR6K_RAM_START + 0x40c)
-#endif
-#define FLASH_IS_PRESENT_TARGADDR HOST_INTEREST_FLASH_IS_PRESENT_ADDR
-
-#endif /* __ADDRS_H__ */
-
-
-
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h
deleted file mode 100644
index 609eb9841f5..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_athr_wlan_map.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#ifndef _APB_ATHR_WLAN_MAP_H_
-#define _APB_ATHR_WLAN_MAP_H_
-
-#define WLAN_RTC_BASE_ADDRESS 0x00004000
-#define WLAN_VMC_BASE_ADDRESS 0x00008000
-#define WLAN_UART_BASE_ADDRESS 0x0000c000
-#define WLAN_DBG_UART_BASE_ADDRESS 0x0000d000
-#define WLAN_UMBOX_BASE_ADDRESS 0x0000e000
-#define WLAN_SI_BASE_ADDRESS 0x00010000
-#define WLAN_GPIO_BASE_ADDRESS 0x00014000
-#define WLAN_MBOX_BASE_ADDRESS 0x00018000
-#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
-#define WLAN_MAC_BASE_ADDRESS 0x00020000
-#define WLAN_RDMA_BASE_ADDRESS 0x00030100
-#define EFUSE_BASE_ADDRESS 0x00031000
-
-#endif /* _APB_ATHR_WLAN_MAP_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h
deleted file mode 100644
index 0068ca31b05..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/apb_map.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#include "apb_athr_wlan_map.h"
-
-#ifndef BT_HEADERS
-
-#define RTC_BASE_ADDRESS WLAN_RTC_BASE_ADDRESS
-#define VMC_BASE_ADDRESS WLAN_VMC_BASE_ADDRESS
-#define UART_BASE_ADDRESS WLAN_UART_BASE_ADDRESS
-#define DBG_UART_BASE_ADDRESS WLAN_DBG_UART_BASE_ADDRESS
-#define UMBOX_BASE_ADDRESS WLAN_UMBOX_BASE_ADDRESS
-#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
-#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
-#define MBOX_BASE_ADDRESS WLAN_MBOX_BASE_ADDRESS
-#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
-#define MAC_BASE_ADDRESS WLAN_MAC_BASE_ADDRESS
-#define RDMA_BASE_ADDRESS WLAN_RDMA_BASE_ADDRESS
-
-#endif
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h
deleted file mode 100644
index 109f24e10a6..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_host_reg.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#include "mbox_wlan_host_reg.h"
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h
deleted file mode 100644
index 72fa483450d..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_reg.h
+++ /dev/null
@@ -1,552 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#include "mbox_wlan_reg.h"
-
-#ifndef BT_HEADERS
-
-#define MBOX_FIFO_ADDRESS WLAN_MBOX_FIFO_ADDRESS
-#define MBOX_FIFO_OFFSET WLAN_MBOX_FIFO_OFFSET
-#define MBOX_FIFO_DATA_MSB WLAN_MBOX_FIFO_DATA_MSB
-#define MBOX_FIFO_DATA_LSB WLAN_MBOX_FIFO_DATA_LSB
-#define MBOX_FIFO_DATA_MASK WLAN_MBOX_FIFO_DATA_MASK
-#define MBOX_FIFO_DATA_GET(x) WLAN_MBOX_FIFO_DATA_GET(x)
-#define MBOX_FIFO_DATA_SET(x) WLAN_MBOX_FIFO_DATA_SET(x)
-#define MBOX_FIFO_STATUS_ADDRESS WLAN_MBOX_FIFO_STATUS_ADDRESS
-#define MBOX_FIFO_STATUS_OFFSET WLAN_MBOX_FIFO_STATUS_OFFSET
-#define MBOX_FIFO_STATUS_EMPTY_MSB WLAN_MBOX_FIFO_STATUS_EMPTY_MSB
-#define MBOX_FIFO_STATUS_EMPTY_LSB WLAN_MBOX_FIFO_STATUS_EMPTY_LSB
-#define MBOX_FIFO_STATUS_EMPTY_MASK WLAN_MBOX_FIFO_STATUS_EMPTY_MASK
-#define MBOX_FIFO_STATUS_EMPTY_GET(x) WLAN_MBOX_FIFO_STATUS_EMPTY_GET(x)
-#define MBOX_FIFO_STATUS_EMPTY_SET(x) WLAN_MBOX_FIFO_STATUS_EMPTY_SET(x)
-#define MBOX_FIFO_STATUS_FULL_MSB WLAN_MBOX_FIFO_STATUS_FULL_MSB
-#define MBOX_FIFO_STATUS_FULL_LSB WLAN_MBOX_FIFO_STATUS_FULL_LSB
-#define MBOX_FIFO_STATUS_FULL_MASK WLAN_MBOX_FIFO_STATUS_FULL_MASK
-#define MBOX_FIFO_STATUS_FULL_GET(x) WLAN_MBOX_FIFO_STATUS_FULL_GET(x)
-#define MBOX_FIFO_STATUS_FULL_SET(x) WLAN_MBOX_FIFO_STATUS_FULL_SET(x)
-#define MBOX_DMA_POLICY_ADDRESS WLAN_MBOX_DMA_POLICY_ADDRESS
-#define MBOX_DMA_POLICY_OFFSET WLAN_MBOX_DMA_POLICY_OFFSET
-#define MBOX_DMA_POLICY_TX_QUANTUM_MSB WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MSB
-#define MBOX_DMA_POLICY_TX_QUANTUM_LSB WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB
-#define MBOX_DMA_POLICY_TX_QUANTUM_MASK WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK
-#define MBOX_DMA_POLICY_TX_QUANTUM_GET(x) WLAN_MBOX_DMA_POLICY_TX_QUANTUM_GET(x)
-#define MBOX_DMA_POLICY_TX_QUANTUM_SET(x) WLAN_MBOX_DMA_POLICY_TX_QUANTUM_SET(x)
-#define MBOX_DMA_POLICY_TX_ORDER_MSB WLAN_MBOX_DMA_POLICY_TX_ORDER_MSB
-#define MBOX_DMA_POLICY_TX_ORDER_LSB WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB
-#define MBOX_DMA_POLICY_TX_ORDER_MASK WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK
-#define MBOX_DMA_POLICY_TX_ORDER_GET(x) WLAN_MBOX_DMA_POLICY_TX_ORDER_GET(x)
-#define MBOX_DMA_POLICY_TX_ORDER_SET(x) WLAN_MBOX_DMA_POLICY_TX_ORDER_SET(x)
-#define MBOX_DMA_POLICY_RX_QUANTUM_MSB WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MSB
-#define MBOX_DMA_POLICY_RX_QUANTUM_LSB WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB
-#define MBOX_DMA_POLICY_RX_QUANTUM_MASK WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK
-#define MBOX_DMA_POLICY_RX_QUANTUM_GET(x) WLAN_MBOX_DMA_POLICY_RX_QUANTUM_GET(x)
-#define MBOX_DMA_POLICY_RX_QUANTUM_SET(x) WLAN_MBOX_DMA_POLICY_RX_QUANTUM_SET(x)
-#define MBOX_DMA_POLICY_RX_ORDER_MSB WLAN_MBOX_DMA_POLICY_RX_ORDER_MSB
-#define MBOX_DMA_POLICY_RX_ORDER_LSB WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB
-#define MBOX_DMA_POLICY_RX_ORDER_MASK WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK
-#define MBOX_DMA_POLICY_RX_ORDER_GET(x) WLAN_MBOX_DMA_POLICY_RX_ORDER_GET(x)
-#define MBOX_DMA_POLICY_RX_ORDER_SET(x) WLAN_MBOX_DMA_POLICY_RX_ORDER_SET(x)
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX0_DMA_RX_CONTROL_ADDRESS WLAN_MBOX0_DMA_RX_CONTROL_ADDRESS
-#define MBOX0_DMA_RX_CONTROL_OFFSET WLAN_MBOX0_DMA_RX_CONTROL_OFFSET
-#define MBOX0_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MSB
-#define MBOX0_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB
-#define MBOX0_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK
-#define MBOX0_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX0_DMA_RX_CONTROL_RESUME_GET(x)
-#define MBOX0_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX0_DMA_RX_CONTROL_RESUME_SET(x)
-#define MBOX0_DMA_RX_CONTROL_START_MSB WLAN_MBOX0_DMA_RX_CONTROL_START_MSB
-#define MBOX0_DMA_RX_CONTROL_START_LSB WLAN_MBOX0_DMA_RX_CONTROL_START_LSB
-#define MBOX0_DMA_RX_CONTROL_START_MASK WLAN_MBOX0_DMA_RX_CONTROL_START_MASK
-#define MBOX0_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX0_DMA_RX_CONTROL_START_GET(x)
-#define MBOX0_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX0_DMA_RX_CONTROL_START_SET(x)
-#define MBOX0_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX0_DMA_RX_CONTROL_STOP_MSB
-#define MBOX0_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB
-#define MBOX0_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK
-#define MBOX0_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX0_DMA_RX_CONTROL_STOP_GET(x)
-#define MBOX0_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX0_DMA_RX_CONTROL_STOP_SET(x)
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX0_DMA_TX_CONTROL_ADDRESS WLAN_MBOX0_DMA_TX_CONTROL_ADDRESS
-#define MBOX0_DMA_TX_CONTROL_OFFSET WLAN_MBOX0_DMA_TX_CONTROL_OFFSET
-#define MBOX0_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MSB
-#define MBOX0_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB
-#define MBOX0_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK
-#define MBOX0_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX0_DMA_TX_CONTROL_RESUME_GET(x)
-#define MBOX0_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX0_DMA_TX_CONTROL_RESUME_SET(x)
-#define MBOX0_DMA_TX_CONTROL_START_MSB WLAN_MBOX0_DMA_TX_CONTROL_START_MSB
-#define MBOX0_DMA_TX_CONTROL_START_LSB WLAN_MBOX0_DMA_TX_CONTROL_START_LSB
-#define MBOX0_DMA_TX_CONTROL_START_MASK WLAN_MBOX0_DMA_TX_CONTROL_START_MASK
-#define MBOX0_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX0_DMA_TX_CONTROL_START_GET(x)
-#define MBOX0_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX0_DMA_TX_CONTROL_START_SET(x)
-#define MBOX0_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX0_DMA_TX_CONTROL_STOP_MSB
-#define MBOX0_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB
-#define MBOX0_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK
-#define MBOX0_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX0_DMA_TX_CONTROL_STOP_GET(x)
-#define MBOX0_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX0_DMA_TX_CONTROL_STOP_SET(x)
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX1_DMA_RX_CONTROL_ADDRESS WLAN_MBOX1_DMA_RX_CONTROL_ADDRESS
-#define MBOX1_DMA_RX_CONTROL_OFFSET WLAN_MBOX1_DMA_RX_CONTROL_OFFSET
-#define MBOX1_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MSB
-#define MBOX1_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB
-#define MBOX1_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK
-#define MBOX1_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX1_DMA_RX_CONTROL_RESUME_GET(x)
-#define MBOX1_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX1_DMA_RX_CONTROL_RESUME_SET(x)
-#define MBOX1_DMA_RX_CONTROL_START_MSB WLAN_MBOX1_DMA_RX_CONTROL_START_MSB
-#define MBOX1_DMA_RX_CONTROL_START_LSB WLAN_MBOX1_DMA_RX_CONTROL_START_LSB
-#define MBOX1_DMA_RX_CONTROL_START_MASK WLAN_MBOX1_DMA_RX_CONTROL_START_MASK
-#define MBOX1_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX1_DMA_RX_CONTROL_START_GET(x)
-#define MBOX1_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX1_DMA_RX_CONTROL_START_SET(x)
-#define MBOX1_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX1_DMA_RX_CONTROL_STOP_MSB
-#define MBOX1_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB
-#define MBOX1_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK
-#define MBOX1_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX1_DMA_RX_CONTROL_STOP_GET(x)
-#define MBOX1_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX1_DMA_RX_CONTROL_STOP_SET(x)
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX1_DMA_TX_CONTROL_ADDRESS WLAN_MBOX1_DMA_TX_CONTROL_ADDRESS
-#define MBOX1_DMA_TX_CONTROL_OFFSET WLAN_MBOX1_DMA_TX_CONTROL_OFFSET
-#define MBOX1_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MSB
-#define MBOX1_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB
-#define MBOX1_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK
-#define MBOX1_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX1_DMA_TX_CONTROL_RESUME_GET(x)
-#define MBOX1_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX1_DMA_TX_CONTROL_RESUME_SET(x)
-#define MBOX1_DMA_TX_CONTROL_START_MSB WLAN_MBOX1_DMA_TX_CONTROL_START_MSB
-#define MBOX1_DMA_TX_CONTROL_START_LSB WLAN_MBOX1_DMA_TX_CONTROL_START_LSB
-#define MBOX1_DMA_TX_CONTROL_START_MASK WLAN_MBOX1_DMA_TX_CONTROL_START_MASK
-#define MBOX1_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX1_DMA_TX_CONTROL_START_GET(x)
-#define MBOX1_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX1_DMA_TX_CONTROL_START_SET(x)
-#define MBOX1_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX1_DMA_TX_CONTROL_STOP_MSB
-#define MBOX1_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB
-#define MBOX1_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK
-#define MBOX1_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX1_DMA_TX_CONTROL_STOP_GET(x)
-#define MBOX1_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX1_DMA_TX_CONTROL_STOP_SET(x)
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX2_DMA_RX_CONTROL_ADDRESS WLAN_MBOX2_DMA_RX_CONTROL_ADDRESS
-#define MBOX2_DMA_RX_CONTROL_OFFSET WLAN_MBOX2_DMA_RX_CONTROL_OFFSET
-#define MBOX2_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MSB
-#define MBOX2_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB
-#define MBOX2_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK
-#define MBOX2_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX2_DMA_RX_CONTROL_RESUME_GET(x)
-#define MBOX2_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX2_DMA_RX_CONTROL_RESUME_SET(x)
-#define MBOX2_DMA_RX_CONTROL_START_MSB WLAN_MBOX2_DMA_RX_CONTROL_START_MSB
-#define MBOX2_DMA_RX_CONTROL_START_LSB WLAN_MBOX2_DMA_RX_CONTROL_START_LSB
-#define MBOX2_DMA_RX_CONTROL_START_MASK WLAN_MBOX2_DMA_RX_CONTROL_START_MASK
-#define MBOX2_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX2_DMA_RX_CONTROL_START_GET(x)
-#define MBOX2_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX2_DMA_RX_CONTROL_START_SET(x)
-#define MBOX2_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX2_DMA_RX_CONTROL_STOP_MSB
-#define MBOX2_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB
-#define MBOX2_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK
-#define MBOX2_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX2_DMA_RX_CONTROL_STOP_GET(x)
-#define MBOX2_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX2_DMA_RX_CONTROL_STOP_SET(x)
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX2_DMA_TX_CONTROL_ADDRESS WLAN_MBOX2_DMA_TX_CONTROL_ADDRESS
-#define MBOX2_DMA_TX_CONTROL_OFFSET WLAN_MBOX2_DMA_TX_CONTROL_OFFSET
-#define MBOX2_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MSB
-#define MBOX2_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB
-#define MBOX2_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK
-#define MBOX2_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX2_DMA_TX_CONTROL_RESUME_GET(x)
-#define MBOX2_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX2_DMA_TX_CONTROL_RESUME_SET(x)
-#define MBOX2_DMA_TX_CONTROL_START_MSB WLAN_MBOX2_DMA_TX_CONTROL_START_MSB
-#define MBOX2_DMA_TX_CONTROL_START_LSB WLAN_MBOX2_DMA_TX_CONTROL_START_LSB
-#define MBOX2_DMA_TX_CONTROL_START_MASK WLAN_MBOX2_DMA_TX_CONTROL_START_MASK
-#define MBOX2_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX2_DMA_TX_CONTROL_START_GET(x)
-#define MBOX2_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX2_DMA_TX_CONTROL_START_SET(x)
-#define MBOX2_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX2_DMA_TX_CONTROL_STOP_MSB
-#define MBOX2_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB
-#define MBOX2_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK
-#define MBOX2_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX2_DMA_TX_CONTROL_STOP_GET(x)
-#define MBOX2_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX2_DMA_TX_CONTROL_STOP_SET(x)
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX3_DMA_RX_CONTROL_ADDRESS WLAN_MBOX3_DMA_RX_CONTROL_ADDRESS
-#define MBOX3_DMA_RX_CONTROL_OFFSET WLAN_MBOX3_DMA_RX_CONTROL_OFFSET
-#define MBOX3_DMA_RX_CONTROL_RESUME_MSB WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MSB
-#define MBOX3_DMA_RX_CONTROL_RESUME_LSB WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB
-#define MBOX3_DMA_RX_CONTROL_RESUME_MASK WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK
-#define MBOX3_DMA_RX_CONTROL_RESUME_GET(x) WLAN_MBOX3_DMA_RX_CONTROL_RESUME_GET(x)
-#define MBOX3_DMA_RX_CONTROL_RESUME_SET(x) WLAN_MBOX3_DMA_RX_CONTROL_RESUME_SET(x)
-#define MBOX3_DMA_RX_CONTROL_START_MSB WLAN_MBOX3_DMA_RX_CONTROL_START_MSB
-#define MBOX3_DMA_RX_CONTROL_START_LSB WLAN_MBOX3_DMA_RX_CONTROL_START_LSB
-#define MBOX3_DMA_RX_CONTROL_START_MASK WLAN_MBOX3_DMA_RX_CONTROL_START_MASK
-#define MBOX3_DMA_RX_CONTROL_START_GET(x) WLAN_MBOX3_DMA_RX_CONTROL_START_GET(x)
-#define MBOX3_DMA_RX_CONTROL_START_SET(x) WLAN_MBOX3_DMA_RX_CONTROL_START_SET(x)
-#define MBOX3_DMA_RX_CONTROL_STOP_MSB WLAN_MBOX3_DMA_RX_CONTROL_STOP_MSB
-#define MBOX3_DMA_RX_CONTROL_STOP_LSB WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB
-#define MBOX3_DMA_RX_CONTROL_STOP_MASK WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK
-#define MBOX3_DMA_RX_CONTROL_STOP_GET(x) WLAN_MBOX3_DMA_RX_CONTROL_STOP_GET(x)
-#define MBOX3_DMA_RX_CONTROL_STOP_SET(x) WLAN_MBOX3_DMA_RX_CONTROL_STOP_SET(x)
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define MBOX3_DMA_TX_CONTROL_ADDRESS WLAN_MBOX3_DMA_TX_CONTROL_ADDRESS
-#define MBOX3_DMA_TX_CONTROL_OFFSET WLAN_MBOX3_DMA_TX_CONTROL_OFFSET
-#define MBOX3_DMA_TX_CONTROL_RESUME_MSB WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MSB
-#define MBOX3_DMA_TX_CONTROL_RESUME_LSB WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB
-#define MBOX3_DMA_TX_CONTROL_RESUME_MASK WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK
-#define MBOX3_DMA_TX_CONTROL_RESUME_GET(x) WLAN_MBOX3_DMA_TX_CONTROL_RESUME_GET(x)
-#define MBOX3_DMA_TX_CONTROL_RESUME_SET(x) WLAN_MBOX3_DMA_TX_CONTROL_RESUME_SET(x)
-#define MBOX3_DMA_TX_CONTROL_START_MSB WLAN_MBOX3_DMA_TX_CONTROL_START_MSB
-#define MBOX3_DMA_TX_CONTROL_START_LSB WLAN_MBOX3_DMA_TX_CONTROL_START_LSB
-#define MBOX3_DMA_TX_CONTROL_START_MASK WLAN_MBOX3_DMA_TX_CONTROL_START_MASK
-#define MBOX3_DMA_TX_CONTROL_START_GET(x) WLAN_MBOX3_DMA_TX_CONTROL_START_GET(x)
-#define MBOX3_DMA_TX_CONTROL_START_SET(x) WLAN_MBOX3_DMA_TX_CONTROL_START_SET(x)
-#define MBOX3_DMA_TX_CONTROL_STOP_MSB WLAN_MBOX3_DMA_TX_CONTROL_STOP_MSB
-#define MBOX3_DMA_TX_CONTROL_STOP_LSB WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB
-#define MBOX3_DMA_TX_CONTROL_STOP_MASK WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK
-#define MBOX3_DMA_TX_CONTROL_STOP_GET(x) WLAN_MBOX3_DMA_TX_CONTROL_STOP_GET(x)
-#define MBOX3_DMA_TX_CONTROL_STOP_SET(x) WLAN_MBOX3_DMA_TX_CONTROL_STOP_SET(x)
-#define MBOX_INT_STATUS_ADDRESS WLAN_MBOX_INT_STATUS_ADDRESS
-#define MBOX_INT_STATUS_OFFSET WLAN_MBOX_INT_STATUS_OFFSET
-#define MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB
-#define MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB
-#define MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK
-#define MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x)
-#define MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x)
-#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB
-#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB
-#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK
-#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x)
-#define MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x)
-#define MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB
-#define MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB
-#define MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK
-#define MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x)
-#define MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x)
-#define MBOX_INT_STATUS_TX_OVERFLOW_MSB WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MSB
-#define MBOX_INT_STATUS_TX_OVERFLOW_LSB WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB
-#define MBOX_INT_STATUS_TX_OVERFLOW_MASK WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK
-#define MBOX_INT_STATUS_TX_OVERFLOW_GET(x) WLAN_MBOX_INT_STATUS_TX_OVERFLOW_GET(x)
-#define MBOX_INT_STATUS_TX_OVERFLOW_SET(x) WLAN_MBOX_INT_STATUS_TX_OVERFLOW_SET(x)
-#define MBOX_INT_STATUS_RX_UNDERFLOW_MSB WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MSB
-#define MBOX_INT_STATUS_RX_UNDERFLOW_LSB WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB
-#define MBOX_INT_STATUS_RX_UNDERFLOW_MASK WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK
-#define MBOX_INT_STATUS_RX_UNDERFLOW_GET(x) WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_GET(x)
-#define MBOX_INT_STATUS_RX_UNDERFLOW_SET(x) WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_SET(x)
-#define MBOX_INT_STATUS_TX_NOT_EMPTY_MSB WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MSB
-#define MBOX_INT_STATUS_TX_NOT_EMPTY_LSB WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB
-#define MBOX_INT_STATUS_TX_NOT_EMPTY_MASK WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK
-#define MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x)
-#define MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x)
-#define MBOX_INT_STATUS_RX_NOT_FULL_MSB WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MSB
-#define MBOX_INT_STATUS_RX_NOT_FULL_LSB WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB
-#define MBOX_INT_STATUS_RX_NOT_FULL_MASK WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK
-#define MBOX_INT_STATUS_RX_NOT_FULL_GET(x) WLAN_MBOX_INT_STATUS_RX_NOT_FULL_GET(x)
-#define MBOX_INT_STATUS_RX_NOT_FULL_SET(x) WLAN_MBOX_INT_STATUS_RX_NOT_FULL_SET(x)
-#define MBOX_INT_STATUS_HOST_MSB WLAN_MBOX_INT_STATUS_HOST_MSB
-#define MBOX_INT_STATUS_HOST_LSB WLAN_MBOX_INT_STATUS_HOST_LSB
-#define MBOX_INT_STATUS_HOST_MASK WLAN_MBOX_INT_STATUS_HOST_MASK
-#define MBOX_INT_STATUS_HOST_GET(x) WLAN_MBOX_INT_STATUS_HOST_GET(x)
-#define MBOX_INT_STATUS_HOST_SET(x) WLAN_MBOX_INT_STATUS_HOST_SET(x)
-#define MBOX_INT_ENABLE_ADDRESS WLAN_MBOX_INT_ENABLE_ADDRESS
-#define MBOX_INT_ENABLE_OFFSET WLAN_MBOX_INT_ENABLE_OFFSET
-#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB
-#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB
-#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK
-#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x)
-#define MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x)
-#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB
-#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB
-#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK
-#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x)
-#define MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x)
-#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB
-#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB
-#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK
-#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x)
-#define MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x)
-#define MBOX_INT_ENABLE_TX_OVERFLOW_MSB WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MSB
-#define MBOX_INT_ENABLE_TX_OVERFLOW_LSB WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB
-#define MBOX_INT_ENABLE_TX_OVERFLOW_MASK WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK
-#define MBOX_INT_ENABLE_TX_OVERFLOW_GET(x) WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_GET(x)
-#define MBOX_INT_ENABLE_TX_OVERFLOW_SET(x) WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_SET(x)
-#define MBOX_INT_ENABLE_RX_UNDERFLOW_MSB WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MSB
-#define MBOX_INT_ENABLE_RX_UNDERFLOW_LSB WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB
-#define MBOX_INT_ENABLE_RX_UNDERFLOW_MASK WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK
-#define MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x)
-#define MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x)
-#define MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB
-#define MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB
-#define MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK
-#define MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x)
-#define MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x)
-#define MBOX_INT_ENABLE_RX_NOT_FULL_MSB WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MSB
-#define MBOX_INT_ENABLE_RX_NOT_FULL_LSB WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB
-#define MBOX_INT_ENABLE_RX_NOT_FULL_MASK WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK
-#define MBOX_INT_ENABLE_RX_NOT_FULL_GET(x) WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_GET(x)
-#define MBOX_INT_ENABLE_RX_NOT_FULL_SET(x) WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_SET(x)
-#define MBOX_INT_ENABLE_HOST_MSB WLAN_MBOX_INT_ENABLE_HOST_MSB
-#define MBOX_INT_ENABLE_HOST_LSB WLAN_MBOX_INT_ENABLE_HOST_LSB
-#define MBOX_INT_ENABLE_HOST_MASK WLAN_MBOX_INT_ENABLE_HOST_MASK
-#define MBOX_INT_ENABLE_HOST_GET(x) WLAN_MBOX_INT_ENABLE_HOST_GET(x)
-#define MBOX_INT_ENABLE_HOST_SET(x) WLAN_MBOX_INT_ENABLE_HOST_SET(x)
-#define INT_HOST_ADDRESS WLAN_INT_HOST_ADDRESS
-#define INT_HOST_OFFSET WLAN_INT_HOST_OFFSET
-#define INT_HOST_VECTOR_MSB WLAN_INT_HOST_VECTOR_MSB
-#define INT_HOST_VECTOR_LSB WLAN_INT_HOST_VECTOR_LSB
-#define INT_HOST_VECTOR_MASK WLAN_INT_HOST_VECTOR_MASK
-#define INT_HOST_VECTOR_GET(x) WLAN_INT_HOST_VECTOR_GET(x)
-#define INT_HOST_VECTOR_SET(x) WLAN_INT_HOST_VECTOR_SET(x)
-#define LOCAL_COUNT_ADDRESS WLAN_LOCAL_COUNT_ADDRESS
-#define LOCAL_COUNT_OFFSET WLAN_LOCAL_COUNT_OFFSET
-#define LOCAL_COUNT_VALUE_MSB WLAN_LOCAL_COUNT_VALUE_MSB
-#define LOCAL_COUNT_VALUE_LSB WLAN_LOCAL_COUNT_VALUE_LSB
-#define LOCAL_COUNT_VALUE_MASK WLAN_LOCAL_COUNT_VALUE_MASK
-#define LOCAL_COUNT_VALUE_GET(x) WLAN_LOCAL_COUNT_VALUE_GET(x)
-#define LOCAL_COUNT_VALUE_SET(x) WLAN_LOCAL_COUNT_VALUE_SET(x)
-#define COUNT_INC_ADDRESS WLAN_COUNT_INC_ADDRESS
-#define COUNT_INC_OFFSET WLAN_COUNT_INC_OFFSET
-#define COUNT_INC_VALUE_MSB WLAN_COUNT_INC_VALUE_MSB
-#define COUNT_INC_VALUE_LSB WLAN_COUNT_INC_VALUE_LSB
-#define COUNT_INC_VALUE_MASK WLAN_COUNT_INC_VALUE_MASK
-#define COUNT_INC_VALUE_GET(x) WLAN_COUNT_INC_VALUE_GET(x)
-#define COUNT_INC_VALUE_SET(x) WLAN_COUNT_INC_VALUE_SET(x)
-#define LOCAL_SCRATCH_ADDRESS WLAN_LOCAL_SCRATCH_ADDRESS
-#define LOCAL_SCRATCH_OFFSET WLAN_LOCAL_SCRATCH_OFFSET
-#define LOCAL_SCRATCH_VALUE_MSB WLAN_LOCAL_SCRATCH_VALUE_MSB
-#define LOCAL_SCRATCH_VALUE_LSB WLAN_LOCAL_SCRATCH_VALUE_LSB
-#define LOCAL_SCRATCH_VALUE_MASK WLAN_LOCAL_SCRATCH_VALUE_MASK
-#define LOCAL_SCRATCH_VALUE_GET(x) WLAN_LOCAL_SCRATCH_VALUE_GET(x)
-#define LOCAL_SCRATCH_VALUE_SET(x) WLAN_LOCAL_SCRATCH_VALUE_SET(x)
-#define USE_LOCAL_BUS_ADDRESS WLAN_USE_LOCAL_BUS_ADDRESS
-#define USE_LOCAL_BUS_OFFSET WLAN_USE_LOCAL_BUS_OFFSET
-#define USE_LOCAL_BUS_PIN_INIT_MSB WLAN_USE_LOCAL_BUS_PIN_INIT_MSB
-#define USE_LOCAL_BUS_PIN_INIT_LSB WLAN_USE_LOCAL_BUS_PIN_INIT_LSB
-#define USE_LOCAL_BUS_PIN_INIT_MASK WLAN_USE_LOCAL_BUS_PIN_INIT_MASK
-#define USE_LOCAL_BUS_PIN_INIT_GET(x) WLAN_USE_LOCAL_BUS_PIN_INIT_GET(x)
-#define USE_LOCAL_BUS_PIN_INIT_SET(x) WLAN_USE_LOCAL_BUS_PIN_INIT_SET(x)
-#define SDIO_CONFIG_ADDRESS WLAN_SDIO_CONFIG_ADDRESS
-#define SDIO_CONFIG_OFFSET WLAN_SDIO_CONFIG_OFFSET
-#define SDIO_CONFIG_CCCR_IOR1_MSB WLAN_SDIO_CONFIG_CCCR_IOR1_MSB
-#define SDIO_CONFIG_CCCR_IOR1_LSB WLAN_SDIO_CONFIG_CCCR_IOR1_LSB
-#define SDIO_CONFIG_CCCR_IOR1_MASK WLAN_SDIO_CONFIG_CCCR_IOR1_MASK
-#define SDIO_CONFIG_CCCR_IOR1_GET(x) WLAN_SDIO_CONFIG_CCCR_IOR1_GET(x)
-#define SDIO_CONFIG_CCCR_IOR1_SET(x) WLAN_SDIO_CONFIG_CCCR_IOR1_SET(x)
-#define MBOX_DEBUG_ADDRESS WLAN_MBOX_DEBUG_ADDRESS
-#define MBOX_DEBUG_OFFSET WLAN_MBOX_DEBUG_OFFSET
-#define MBOX_DEBUG_SEL_MSB WLAN_MBOX_DEBUG_SEL_MSB
-#define MBOX_DEBUG_SEL_LSB WLAN_MBOX_DEBUG_SEL_LSB
-#define MBOX_DEBUG_SEL_MASK WLAN_MBOX_DEBUG_SEL_MASK
-#define MBOX_DEBUG_SEL_GET(x) WLAN_MBOX_DEBUG_SEL_GET(x)
-#define MBOX_DEBUG_SEL_SET(x) WLAN_MBOX_DEBUG_SEL_SET(x)
-#define MBOX_FIFO_RESET_ADDRESS WLAN_MBOX_FIFO_RESET_ADDRESS
-#define MBOX_FIFO_RESET_OFFSET WLAN_MBOX_FIFO_RESET_OFFSET
-#define MBOX_FIFO_RESET_INIT_MSB WLAN_MBOX_FIFO_RESET_INIT_MSB
-#define MBOX_FIFO_RESET_INIT_LSB WLAN_MBOX_FIFO_RESET_INIT_LSB
-#define MBOX_FIFO_RESET_INIT_MASK WLAN_MBOX_FIFO_RESET_INIT_MASK
-#define MBOX_FIFO_RESET_INIT_GET(x) WLAN_MBOX_FIFO_RESET_INIT_GET(x)
-#define MBOX_FIFO_RESET_INIT_SET(x) WLAN_MBOX_FIFO_RESET_INIT_SET(x)
-#define MBOX_TXFIFO_POP_ADDRESS WLAN_MBOX_TXFIFO_POP_ADDRESS
-#define MBOX_TXFIFO_POP_OFFSET WLAN_MBOX_TXFIFO_POP_OFFSET
-#define MBOX_TXFIFO_POP_DATA_MSB WLAN_MBOX_TXFIFO_POP_DATA_MSB
-#define MBOX_TXFIFO_POP_DATA_LSB WLAN_MBOX_TXFIFO_POP_DATA_LSB
-#define MBOX_TXFIFO_POP_DATA_MASK WLAN_MBOX_TXFIFO_POP_DATA_MASK
-#define MBOX_TXFIFO_POP_DATA_GET(x) WLAN_MBOX_TXFIFO_POP_DATA_GET(x)
-#define MBOX_TXFIFO_POP_DATA_SET(x) WLAN_MBOX_TXFIFO_POP_DATA_SET(x)
-#define MBOX_RXFIFO_POP_ADDRESS WLAN_MBOX_RXFIFO_POP_ADDRESS
-#define MBOX_RXFIFO_POP_OFFSET WLAN_MBOX_RXFIFO_POP_OFFSET
-#define MBOX_RXFIFO_POP_DATA_MSB WLAN_MBOX_RXFIFO_POP_DATA_MSB
-#define MBOX_RXFIFO_POP_DATA_LSB WLAN_MBOX_RXFIFO_POP_DATA_LSB
-#define MBOX_RXFIFO_POP_DATA_MASK WLAN_MBOX_RXFIFO_POP_DATA_MASK
-#define MBOX_RXFIFO_POP_DATA_GET(x) WLAN_MBOX_RXFIFO_POP_DATA_GET(x)
-#define MBOX_RXFIFO_POP_DATA_SET(x) WLAN_MBOX_RXFIFO_POP_DATA_SET(x)
-#define SDIO_DEBUG_ADDRESS WLAN_SDIO_DEBUG_ADDRESS
-#define SDIO_DEBUG_OFFSET WLAN_SDIO_DEBUG_OFFSET
-#define SDIO_DEBUG_SEL_MSB WLAN_SDIO_DEBUG_SEL_MSB
-#define SDIO_DEBUG_SEL_LSB WLAN_SDIO_DEBUG_SEL_LSB
-#define SDIO_DEBUG_SEL_MASK WLAN_SDIO_DEBUG_SEL_MASK
-#define SDIO_DEBUG_SEL_GET(x) WLAN_SDIO_DEBUG_SEL_GET(x)
-#define SDIO_DEBUG_SEL_SET(x) WLAN_SDIO_DEBUG_SEL_SET(x)
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define GMBOX0_DMA_RX_CONTROL_ADDRESS WLAN_GMBOX0_DMA_RX_CONTROL_ADDRESS
-#define GMBOX0_DMA_RX_CONTROL_OFFSET WLAN_GMBOX0_DMA_RX_CONTROL_OFFSET
-#define GMBOX0_DMA_RX_CONTROL_RESUME_MSB WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MSB
-#define GMBOX0_DMA_RX_CONTROL_RESUME_LSB WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB
-#define GMBOX0_DMA_RX_CONTROL_RESUME_MASK WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK
-#define GMBOX0_DMA_RX_CONTROL_RESUME_GET(x) WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_GET(x)
-#define GMBOX0_DMA_RX_CONTROL_RESUME_SET(x) WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_SET(x)
-#define GMBOX0_DMA_RX_CONTROL_START_MSB WLAN_GMBOX0_DMA_RX_CONTROL_START_MSB
-#define GMBOX0_DMA_RX_CONTROL_START_LSB WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB
-#define GMBOX0_DMA_RX_CONTROL_START_MASK WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK
-#define GMBOX0_DMA_RX_CONTROL_START_GET(x) WLAN_GMBOX0_DMA_RX_CONTROL_START_GET(x)
-#define GMBOX0_DMA_RX_CONTROL_START_SET(x) WLAN_GMBOX0_DMA_RX_CONTROL_START_SET(x)
-#define GMBOX0_DMA_RX_CONTROL_STOP_MSB WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MSB
-#define GMBOX0_DMA_RX_CONTROL_STOP_LSB WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB
-#define GMBOX0_DMA_RX_CONTROL_STOP_MASK WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK
-#define GMBOX0_DMA_RX_CONTROL_STOP_GET(x) WLAN_GMBOX0_DMA_RX_CONTROL_STOP_GET(x)
-#define GMBOX0_DMA_RX_CONTROL_STOP_SET(x) WLAN_GMBOX0_DMA_RX_CONTROL_STOP_SET(x)
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x)
-#define GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x)
-#define GMBOX0_DMA_TX_CONTROL_ADDRESS WLAN_GMBOX0_DMA_TX_CONTROL_ADDRESS
-#define GMBOX0_DMA_TX_CONTROL_OFFSET WLAN_GMBOX0_DMA_TX_CONTROL_OFFSET
-#define GMBOX0_DMA_TX_CONTROL_RESUME_MSB WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MSB
-#define GMBOX0_DMA_TX_CONTROL_RESUME_LSB WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB
-#define GMBOX0_DMA_TX_CONTROL_RESUME_MASK WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK
-#define GMBOX0_DMA_TX_CONTROL_RESUME_GET(x) WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_GET(x)
-#define GMBOX0_DMA_TX_CONTROL_RESUME_SET(x) WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_SET(x)
-#define GMBOX0_DMA_TX_CONTROL_START_MSB WLAN_GMBOX0_DMA_TX_CONTROL_START_MSB
-#define GMBOX0_DMA_TX_CONTROL_START_LSB WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB
-#define GMBOX0_DMA_TX_CONTROL_START_MASK WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK
-#define GMBOX0_DMA_TX_CONTROL_START_GET(x) WLAN_GMBOX0_DMA_TX_CONTROL_START_GET(x)
-#define GMBOX0_DMA_TX_CONTROL_START_SET(x) WLAN_GMBOX0_DMA_TX_CONTROL_START_SET(x)
-#define GMBOX0_DMA_TX_CONTROL_STOP_MSB WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MSB
-#define GMBOX0_DMA_TX_CONTROL_STOP_LSB WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB
-#define GMBOX0_DMA_TX_CONTROL_STOP_MASK WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK
-#define GMBOX0_DMA_TX_CONTROL_STOP_GET(x) WLAN_GMBOX0_DMA_TX_CONTROL_STOP_GET(x)
-#define GMBOX0_DMA_TX_CONTROL_STOP_SET(x) WLAN_GMBOX0_DMA_TX_CONTROL_STOP_SET(x)
-#define GMBOX_INT_STATUS_ADDRESS WLAN_GMBOX_INT_STATUS_ADDRESS
-#define GMBOX_INT_STATUS_OFFSET WLAN_GMBOX_INT_STATUS_OFFSET
-#define GMBOX_INT_STATUS_TX_OVERFLOW_MSB WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MSB
-#define GMBOX_INT_STATUS_TX_OVERFLOW_LSB WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB
-#define GMBOX_INT_STATUS_TX_OVERFLOW_MASK WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK
-#define GMBOX_INT_STATUS_TX_OVERFLOW_GET(x) WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_GET(x)
-#define GMBOX_INT_STATUS_TX_OVERFLOW_SET(x) WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_SET(x)
-#define GMBOX_INT_STATUS_RX_UNDERFLOW_MSB WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MSB
-#define GMBOX_INT_STATUS_RX_UNDERFLOW_LSB WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB
-#define GMBOX_INT_STATUS_RX_UNDERFLOW_MASK WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK
-#define GMBOX_INT_STATUS_RX_UNDERFLOW_GET(x) WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_GET(x)
-#define GMBOX_INT_STATUS_RX_UNDERFLOW_SET(x) WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_SET(x)
-#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB
-#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB
-#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK
-#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x)
-#define GMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x)
-#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB
-#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB
-#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK
-#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x)
-#define GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x)
-#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB
-#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB
-#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK
-#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x)
-#define GMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x)
-#define GMBOX_INT_STATUS_TX_NOT_EMPTY_MSB WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MSB
-#define GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB
-#define GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK
-#define GMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x)
-#define GMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x)
-#define GMBOX_INT_STATUS_RX_NOT_FULL_MSB WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MSB
-#define GMBOX_INT_STATUS_RX_NOT_FULL_LSB WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB
-#define GMBOX_INT_STATUS_RX_NOT_FULL_MASK WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK
-#define GMBOX_INT_STATUS_RX_NOT_FULL_GET(x) WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_GET(x)
-#define GMBOX_INT_STATUS_RX_NOT_FULL_SET(x) WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_SET(x)
-#define GMBOX_INT_ENABLE_ADDRESS WLAN_GMBOX_INT_ENABLE_ADDRESS
-#define GMBOX_INT_ENABLE_OFFSET WLAN_GMBOX_INT_ENABLE_OFFSET
-#define GMBOX_INT_ENABLE_TX_OVERFLOW_MSB WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MSB
-#define GMBOX_INT_ENABLE_TX_OVERFLOW_LSB WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB
-#define GMBOX_INT_ENABLE_TX_OVERFLOW_MASK WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK
-#define GMBOX_INT_ENABLE_TX_OVERFLOW_GET(x) WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_GET(x)
-#define GMBOX_INT_ENABLE_TX_OVERFLOW_SET(x) WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_SET(x)
-#define GMBOX_INT_ENABLE_RX_UNDERFLOW_MSB WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MSB
-#define GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB
-#define GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK
-#define GMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x)
-#define GMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x)
-#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB
-#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB
-#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK
-#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x)
-#define GMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x)
-#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB
-#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB
-#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK
-#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x)
-#define GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x)
-#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB
-#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB
-#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK
-#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x)
-#define GMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x)
-#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB
-#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB
-#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK
-#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x)
-#define GMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x)
-#define GMBOX_INT_ENABLE_RX_NOT_FULL_MSB WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MSB
-#define GMBOX_INT_ENABLE_RX_NOT_FULL_LSB WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB
-#define GMBOX_INT_ENABLE_RX_NOT_FULL_MASK WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK
-#define GMBOX_INT_ENABLE_RX_NOT_FULL_GET(x) WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_GET(x)
-#define GMBOX_INT_ENABLE_RX_NOT_FULL_SET(x) WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_SET(x)
-#define HOST_IF_WINDOW_ADDRESS WLAN_HOST_IF_WINDOW_ADDRESS
-#define HOST_IF_WINDOW_OFFSET WLAN_HOST_IF_WINDOW_OFFSET
-#define HOST_IF_WINDOW_DATA_MSB WLAN_HOST_IF_WINDOW_DATA_MSB
-#define HOST_IF_WINDOW_DATA_LSB WLAN_HOST_IF_WINDOW_DATA_LSB
-#define HOST_IF_WINDOW_DATA_MASK WLAN_HOST_IF_WINDOW_DATA_MASK
-#define HOST_IF_WINDOW_DATA_GET(x) WLAN_HOST_IF_WINDOW_DATA_GET(x)
-#define HOST_IF_WINDOW_DATA_SET(x) WLAN_HOST_IF_WINDOW_DATA_SET(x)
-
-#endif
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h
deleted file mode 100644
index 038d0d01927..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_host_reg.h
+++ /dev/null
@@ -1,471 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#ifndef _MBOX_WLAN_HOST_REG_REG_H_
-#define _MBOX_WLAN_HOST_REG_REG_H_
-
-#define HOST_INT_STATUS_ADDRESS 0x00000400
-#define HOST_INT_STATUS_OFFSET 0x00000400
-#define HOST_INT_STATUS_ERROR_MSB 7
-#define HOST_INT_STATUS_ERROR_LSB 7
-#define HOST_INT_STATUS_ERROR_MASK 0x00000080
-#define HOST_INT_STATUS_ERROR_GET(x) (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB)
-#define HOST_INT_STATUS_ERROR_SET(x) (((x) << HOST_INT_STATUS_ERROR_LSB) & HOST_INT_STATUS_ERROR_MASK)
-#define HOST_INT_STATUS_CPU_MSB 6
-#define HOST_INT_STATUS_CPU_LSB 6
-#define HOST_INT_STATUS_CPU_MASK 0x00000040
-#define HOST_INT_STATUS_CPU_GET(x) (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB)
-#define HOST_INT_STATUS_CPU_SET(x) (((x) << HOST_INT_STATUS_CPU_LSB) & HOST_INT_STATUS_CPU_MASK)
-#define HOST_INT_STATUS_INT_MSB 5
-#define HOST_INT_STATUS_INT_LSB 5
-#define HOST_INT_STATUS_INT_MASK 0x00000020
-#define HOST_INT_STATUS_INT_GET(x) (((x) & HOST_INT_STATUS_INT_MASK) >> HOST_INT_STATUS_INT_LSB)
-#define HOST_INT_STATUS_INT_SET(x) (((x) << HOST_INT_STATUS_INT_LSB) & HOST_INT_STATUS_INT_MASK)
-#define HOST_INT_STATUS_COUNTER_MSB 4
-#define HOST_INT_STATUS_COUNTER_LSB 4
-#define HOST_INT_STATUS_COUNTER_MASK 0x00000010
-#define HOST_INT_STATUS_COUNTER_GET(x) (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB)
-#define HOST_INT_STATUS_COUNTER_SET(x) (((x) << HOST_INT_STATUS_COUNTER_LSB) & HOST_INT_STATUS_COUNTER_MASK)
-#define HOST_INT_STATUS_MBOX_DATA_MSB 3
-#define HOST_INT_STATUS_MBOX_DATA_LSB 0
-#define HOST_INT_STATUS_MBOX_DATA_MASK 0x0000000f
-#define HOST_INT_STATUS_MBOX_DATA_GET(x) (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> HOST_INT_STATUS_MBOX_DATA_LSB)
-#define HOST_INT_STATUS_MBOX_DATA_SET(x) (((x) << HOST_INT_STATUS_MBOX_DATA_LSB) & HOST_INT_STATUS_MBOX_DATA_MASK)
-
-#define CPU_INT_STATUS_ADDRESS 0x00000401
-#define CPU_INT_STATUS_OFFSET 0x00000401
-#define CPU_INT_STATUS_BIT_MSB 7
-#define CPU_INT_STATUS_BIT_LSB 0
-#define CPU_INT_STATUS_BIT_MASK 0x000000ff
-#define CPU_INT_STATUS_BIT_GET(x) (((x) & CPU_INT_STATUS_BIT_MASK) >> CPU_INT_STATUS_BIT_LSB)
-#define CPU_INT_STATUS_BIT_SET(x) (((x) << CPU_INT_STATUS_BIT_LSB) & CPU_INT_STATUS_BIT_MASK)
-
-#define ERROR_INT_STATUS_ADDRESS 0x00000402
-#define ERROR_INT_STATUS_OFFSET 0x00000402
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MSB 6
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_LSB 6
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MASK 0x00000040
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_GET(x) (((x) & ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MASK) >> ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_LSB)
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_SET(x) (((x) << ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_LSB) & ERROR_INT_STATUS_UART_HCI_FRAMER_SYNC_ERROR_MASK)
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MSB 5
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_LSB 5
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MASK 0x00000020
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_GET(x) (((x) & ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MASK) >> ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_LSB)
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_SET(x) (((x) << ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_LSB) & ERROR_INT_STATUS_UART_HCI_FRAMER_OVERFLOW_MASK)
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MSB 4
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_LSB 4
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MASK 0x00000010
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MASK) >> ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_LSB)
-#define ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_LSB) & ERROR_INT_STATUS_UART_HCI_FRAMER_UNDERFLOW_MASK)
-#define ERROR_INT_STATUS_SPI_MSB 3
-#define ERROR_INT_STATUS_SPI_LSB 3
-#define ERROR_INT_STATUS_SPI_MASK 0x00000008
-#define ERROR_INT_STATUS_SPI_GET(x) (((x) & ERROR_INT_STATUS_SPI_MASK) >> ERROR_INT_STATUS_SPI_LSB)
-#define ERROR_INT_STATUS_SPI_SET(x) (((x) << ERROR_INT_STATUS_SPI_LSB) & ERROR_INT_STATUS_SPI_MASK)
-#define ERROR_INT_STATUS_WAKEUP_MSB 2
-#define ERROR_INT_STATUS_WAKEUP_LSB 2
-#define ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
-#define ERROR_INT_STATUS_WAKEUP_GET(x) (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> ERROR_INT_STATUS_WAKEUP_LSB)
-#define ERROR_INT_STATUS_WAKEUP_SET(x) (((x) << ERROR_INT_STATUS_WAKEUP_LSB) & ERROR_INT_STATUS_WAKEUP_MASK)
-#define ERROR_INT_STATUS_RX_UNDERFLOW_MSB 1
-#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
-#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
-#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> ERROR_INT_STATUS_RX_UNDERFLOW_LSB)
-#define ERROR_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << ERROR_INT_STATUS_RX_UNDERFLOW_LSB) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK)
-#define ERROR_INT_STATUS_TX_OVERFLOW_MSB 0
-#define ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
-#define ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
-#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> ERROR_INT_STATUS_TX_OVERFLOW_LSB)
-#define ERROR_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << ERROR_INT_STATUS_TX_OVERFLOW_LSB) & ERROR_INT_STATUS_TX_OVERFLOW_MASK)
-
-#define COUNTER_INT_STATUS_ADDRESS 0x00000403
-#define COUNTER_INT_STATUS_OFFSET 0x00000403
-#define COUNTER_INT_STATUS_COUNTER_MSB 7
-#define COUNTER_INT_STATUS_COUNTER_LSB 0
-#define COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
-#define COUNTER_INT_STATUS_COUNTER_GET(x) (((x) & COUNTER_INT_STATUS_COUNTER_MASK) >> COUNTER_INT_STATUS_COUNTER_LSB)
-#define COUNTER_INT_STATUS_COUNTER_SET(x) (((x) << COUNTER_INT_STATUS_COUNTER_LSB) & COUNTER_INT_STATUS_COUNTER_MASK)
-
-#define MBOX_FRAME_ADDRESS 0x00000404
-#define MBOX_FRAME_OFFSET 0x00000404
-#define MBOX_FRAME_RX_EOM_MSB 7
-#define MBOX_FRAME_RX_EOM_LSB 4
-#define MBOX_FRAME_RX_EOM_MASK 0x000000f0
-#define MBOX_FRAME_RX_EOM_GET(x) (((x) & MBOX_FRAME_RX_EOM_MASK) >> MBOX_FRAME_RX_EOM_LSB)
-#define MBOX_FRAME_RX_EOM_SET(x) (((x) << MBOX_FRAME_RX_EOM_LSB) & MBOX_FRAME_RX_EOM_MASK)
-#define MBOX_FRAME_RX_SOM_MSB 3
-#define MBOX_FRAME_RX_SOM_LSB 0
-#define MBOX_FRAME_RX_SOM_MASK 0x0000000f
-#define MBOX_FRAME_RX_SOM_GET(x) (((x) & MBOX_FRAME_RX_SOM_MASK) >> MBOX_FRAME_RX_SOM_LSB)
-#define MBOX_FRAME_RX_SOM_SET(x) (((x) << MBOX_FRAME_RX_SOM_LSB) & MBOX_FRAME_RX_SOM_MASK)
-
-#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
-#define RX_LOOKAHEAD_VALID_OFFSET 0x00000405
-#define RX_LOOKAHEAD_VALID_MBOX_MSB 3
-#define RX_LOOKAHEAD_VALID_MBOX_LSB 0
-#define RX_LOOKAHEAD_VALID_MBOX_MASK 0x0000000f
-#define RX_LOOKAHEAD_VALID_MBOX_GET(x) (((x) & RX_LOOKAHEAD_VALID_MBOX_MASK) >> RX_LOOKAHEAD_VALID_MBOX_LSB)
-#define RX_LOOKAHEAD_VALID_MBOX_SET(x) (((x) << RX_LOOKAHEAD_VALID_MBOX_LSB) & RX_LOOKAHEAD_VALID_MBOX_MASK)
-
-#define HOST_INT_STATUS2_ADDRESS 0x00000406
-#define HOST_INT_STATUS2_OFFSET 0x00000406
-#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MSB 2
-#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_LSB 2
-#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MASK 0x00000004
-#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_GET(x) (((x) & HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MASK) >> HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_LSB)
-#define HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_SET(x) (((x) << HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_LSB) & HOST_INT_STATUS2_GMBOX_RX_UNDERFLOW_MASK)
-#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MSB 1
-#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_LSB 1
-#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MASK 0x00000002
-#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_GET(x) (((x) & HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MASK) >> HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_LSB)
-#define HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_SET(x) (((x) << HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_LSB) & HOST_INT_STATUS2_GMBOX_TX_OVERFLOW_MASK)
-#define HOST_INT_STATUS2_GMBOX_DATA_MSB 0
-#define HOST_INT_STATUS2_GMBOX_DATA_LSB 0
-#define HOST_INT_STATUS2_GMBOX_DATA_MASK 0x00000001
-#define HOST_INT_STATUS2_GMBOX_DATA_GET(x) (((x) & HOST_INT_STATUS2_GMBOX_DATA_MASK) >> HOST_INT_STATUS2_GMBOX_DATA_LSB)
-#define HOST_INT_STATUS2_GMBOX_DATA_SET(x) (((x) << HOST_INT_STATUS2_GMBOX_DATA_LSB) & HOST_INT_STATUS2_GMBOX_DATA_MASK)
-
-#define GMBOX_RX_AVAIL_ADDRESS 0x00000407
-#define GMBOX_RX_AVAIL_OFFSET 0x00000407
-#define GMBOX_RX_AVAIL_BYTE_MSB 6
-#define GMBOX_RX_AVAIL_BYTE_LSB 0
-#define GMBOX_RX_AVAIL_BYTE_MASK 0x0000007f
-#define GMBOX_RX_AVAIL_BYTE_GET(x) (((x) & GMBOX_RX_AVAIL_BYTE_MASK) >> GMBOX_RX_AVAIL_BYTE_LSB)
-#define GMBOX_RX_AVAIL_BYTE_SET(x) (((x) << GMBOX_RX_AVAIL_BYTE_LSB) & GMBOX_RX_AVAIL_BYTE_MASK)
-
-#define RX_LOOKAHEAD0_ADDRESS 0x00000408
-#define RX_LOOKAHEAD0_OFFSET 0x00000408
-#define RX_LOOKAHEAD0_DATA_MSB 7
-#define RX_LOOKAHEAD0_DATA_LSB 0
-#define RX_LOOKAHEAD0_DATA_MASK 0x000000ff
-#define RX_LOOKAHEAD0_DATA_GET(x) (((x) & RX_LOOKAHEAD0_DATA_MASK) >> RX_LOOKAHEAD0_DATA_LSB)
-#define RX_LOOKAHEAD0_DATA_SET(x) (((x) << RX_LOOKAHEAD0_DATA_LSB) & RX_LOOKAHEAD0_DATA_MASK)
-
-#define RX_LOOKAHEAD1_ADDRESS 0x0000040c
-#define RX_LOOKAHEAD1_OFFSET 0x0000040c
-#define RX_LOOKAHEAD1_DATA_MSB 7
-#define RX_LOOKAHEAD1_DATA_LSB 0
-#define RX_LOOKAHEAD1_DATA_MASK 0x000000ff
-#define RX_LOOKAHEAD1_DATA_GET(x) (((x) & RX_LOOKAHEAD1_DATA_MASK) >> RX_LOOKAHEAD1_DATA_LSB)
-#define RX_LOOKAHEAD1_DATA_SET(x) (((x) << RX_LOOKAHEAD1_DATA_LSB) & RX_LOOKAHEAD1_DATA_MASK)
-
-#define RX_LOOKAHEAD2_ADDRESS 0x00000410
-#define RX_LOOKAHEAD2_OFFSET 0x00000410
-#define RX_LOOKAHEAD2_DATA_MSB 7
-#define RX_LOOKAHEAD2_DATA_LSB 0
-#define RX_LOOKAHEAD2_DATA_MASK 0x000000ff
-#define RX_LOOKAHEAD2_DATA_GET(x) (((x) & RX_LOOKAHEAD2_DATA_MASK) >> RX_LOOKAHEAD2_DATA_LSB)
-#define RX_LOOKAHEAD2_DATA_SET(x) (((x) << RX_LOOKAHEAD2_DATA_LSB) & RX_LOOKAHEAD2_DATA_MASK)
-
-#define RX_LOOKAHEAD3_ADDRESS 0x00000414
-#define RX_LOOKAHEAD3_OFFSET 0x00000414
-#define RX_LOOKAHEAD3_DATA_MSB 7
-#define RX_LOOKAHEAD3_DATA_LSB 0
-#define RX_LOOKAHEAD3_DATA_MASK 0x000000ff
-#define RX_LOOKAHEAD3_DATA_GET(x) (((x) & RX_LOOKAHEAD3_DATA_MASK) >> RX_LOOKAHEAD3_DATA_LSB)
-#define RX_LOOKAHEAD3_DATA_SET(x) (((x) << RX_LOOKAHEAD3_DATA_LSB) & RX_LOOKAHEAD3_DATA_MASK)
-
-#define INT_STATUS_ENABLE_ADDRESS 0x00000418
-#define INT_STATUS_ENABLE_OFFSET 0x00000418
-#define INT_STATUS_ENABLE_ERROR_MSB 7
-#define INT_STATUS_ENABLE_ERROR_LSB 7
-#define INT_STATUS_ENABLE_ERROR_MASK 0x00000080
-#define INT_STATUS_ENABLE_ERROR_GET(x) (((x) & INT_STATUS_ENABLE_ERROR_MASK) >> INT_STATUS_ENABLE_ERROR_LSB)
-#define INT_STATUS_ENABLE_ERROR_SET(x) (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK)
-#define INT_STATUS_ENABLE_CPU_MSB 6
-#define INT_STATUS_ENABLE_CPU_LSB 6
-#define INT_STATUS_ENABLE_CPU_MASK 0x00000040
-#define INT_STATUS_ENABLE_CPU_GET(x) (((x) & INT_STATUS_ENABLE_CPU_MASK) >> INT_STATUS_ENABLE_CPU_LSB)
-#define INT_STATUS_ENABLE_CPU_SET(x) (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK)
-#define INT_STATUS_ENABLE_INT_MSB 5
-#define INT_STATUS_ENABLE_INT_LSB 5
-#define INT_STATUS_ENABLE_INT_MASK 0x00000020
-#define INT_STATUS_ENABLE_INT_GET(x) (((x) & INT_STATUS_ENABLE_INT_MASK) >> INT_STATUS_ENABLE_INT_LSB)
-#define INT_STATUS_ENABLE_INT_SET(x) (((x) << INT_STATUS_ENABLE_INT_LSB) & INT_STATUS_ENABLE_INT_MASK)
-#define INT_STATUS_ENABLE_COUNTER_MSB 4
-#define INT_STATUS_ENABLE_COUNTER_LSB 4
-#define INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
-#define INT_STATUS_ENABLE_COUNTER_GET(x) (((x) & INT_STATUS_ENABLE_COUNTER_MASK) >> INT_STATUS_ENABLE_COUNTER_LSB)
-#define INT_STATUS_ENABLE_COUNTER_SET(x) (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & INT_STATUS_ENABLE_COUNTER_MASK)
-#define INT_STATUS_ENABLE_MBOX_DATA_MSB 3
-#define INT_STATUS_ENABLE_MBOX_DATA_LSB 0
-#define INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
-#define INT_STATUS_ENABLE_MBOX_DATA_GET(x) (((x) & INT_STATUS_ENABLE_MBOX_DATA_MASK) >> INT_STATUS_ENABLE_MBOX_DATA_LSB)
-#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & INT_STATUS_ENABLE_MBOX_DATA_MASK)
-
-#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
-#define CPU_INT_STATUS_ENABLE_OFFSET 0x00000419
-#define CPU_INT_STATUS_ENABLE_BIT_MSB 7
-#define CPU_INT_STATUS_ENABLE_BIT_LSB 0
-#define CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
-#define CPU_INT_STATUS_ENABLE_BIT_GET(x) (((x) & CPU_INT_STATUS_ENABLE_BIT_MASK) >> CPU_INT_STATUS_ENABLE_BIT_LSB)
-#define CPU_INT_STATUS_ENABLE_BIT_SET(x) (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & CPU_INT_STATUS_ENABLE_BIT_MASK)
-
-#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
-#define ERROR_STATUS_ENABLE_OFFSET 0x0000041a
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MSB 6
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_LSB 6
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MASK 0x00000040
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_GET(x) (((x) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MASK) >> ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_LSB)
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_SET(x) (((x) << ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_LSB) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_SYNC_ERROR_MASK)
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MSB 5
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_LSB 5
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MASK 0x00000020
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MASK) >> ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_LSB)
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_LSB) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_OVERFLOW_MASK)
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MSB 4
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_LSB 4
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MASK 0x00000010
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MASK) >> ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_LSB)
-#define ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_LSB) & ERROR_STATUS_ENABLE_UART_HCI_FRAMER_UNDERFLOW_MASK)
-#define ERROR_STATUS_ENABLE_WAKEUP_MSB 2
-#define ERROR_STATUS_ENABLE_WAKEUP_LSB 2
-#define ERROR_STATUS_ENABLE_WAKEUP_MASK 0x00000004
-#define ERROR_STATUS_ENABLE_WAKEUP_GET(x) (((x) & ERROR_STATUS_ENABLE_WAKEUP_MASK) >> ERROR_STATUS_ENABLE_WAKEUP_LSB)
-#define ERROR_STATUS_ENABLE_WAKEUP_SET(x) (((x) << ERROR_STATUS_ENABLE_WAKEUP_LSB) & ERROR_STATUS_ENABLE_WAKEUP_MASK)
-#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MSB 1
-#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
-#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
-#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) >> ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB)
-#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK)
-#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MSB 0
-#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
-#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
-#define ERROR_STATUS_ENABLE_TX_OVERFLOW_GET(x) (((x) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) >> ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB)
-#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK)
-
-#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
-#define COUNTER_INT_STATUS_ENABLE_OFFSET 0x0000041b
-#define COUNTER_INT_STATUS_ENABLE_BIT_MSB 7
-#define COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
-#define COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
-#define COUNTER_INT_STATUS_ENABLE_BIT_GET(x) (((x) & COUNTER_INT_STATUS_ENABLE_BIT_MASK) >> COUNTER_INT_STATUS_ENABLE_BIT_LSB)
-#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & COUNTER_INT_STATUS_ENABLE_BIT_MASK)
-
-#define COUNT_ADDRESS 0x00000420
-#define COUNT_OFFSET 0x00000420
-#define COUNT_VALUE_MSB 7
-#define COUNT_VALUE_LSB 0
-#define COUNT_VALUE_MASK 0x000000ff
-#define COUNT_VALUE_GET(x) (((x) & COUNT_VALUE_MASK) >> COUNT_VALUE_LSB)
-#define COUNT_VALUE_SET(x) (((x) << COUNT_VALUE_LSB) & COUNT_VALUE_MASK)
-
-#define COUNT_DEC_ADDRESS 0x00000440
-#define COUNT_DEC_OFFSET 0x00000440
-#define COUNT_DEC_VALUE_MSB 7
-#define COUNT_DEC_VALUE_LSB 0
-#define COUNT_DEC_VALUE_MASK 0x000000ff
-#define COUNT_DEC_VALUE_GET(x) (((x) & COUNT_DEC_VALUE_MASK) >> COUNT_DEC_VALUE_LSB)
-#define COUNT_DEC_VALUE_SET(x) (((x) << COUNT_DEC_VALUE_LSB) & COUNT_DEC_VALUE_MASK)
-
-#define SCRATCH_ADDRESS 0x00000460
-#define SCRATCH_OFFSET 0x00000460
-#define SCRATCH_VALUE_MSB 7
-#define SCRATCH_VALUE_LSB 0
-#define SCRATCH_VALUE_MASK 0x000000ff
-#define SCRATCH_VALUE_GET(x) (((x) & SCRATCH_VALUE_MASK) >> SCRATCH_VALUE_LSB)
-#define SCRATCH_VALUE_SET(x) (((x) << SCRATCH_VALUE_LSB) & SCRATCH_VALUE_MASK)
-
-#define FIFO_TIMEOUT_ADDRESS 0x00000468
-#define FIFO_TIMEOUT_OFFSET 0x00000468
-#define FIFO_TIMEOUT_VALUE_MSB 7
-#define FIFO_TIMEOUT_VALUE_LSB 0
-#define FIFO_TIMEOUT_VALUE_MASK 0x000000ff
-#define FIFO_TIMEOUT_VALUE_GET(x) (((x) & FIFO_TIMEOUT_VALUE_MASK) >> FIFO_TIMEOUT_VALUE_LSB)
-#define FIFO_TIMEOUT_VALUE_SET(x) (((x) << FIFO_TIMEOUT_VALUE_LSB) & FIFO_TIMEOUT_VALUE_MASK)
-
-#define FIFO_TIMEOUT_ENABLE_ADDRESS 0x00000469
-#define FIFO_TIMEOUT_ENABLE_OFFSET 0x00000469
-#define FIFO_TIMEOUT_ENABLE_SET_MSB 0
-#define FIFO_TIMEOUT_ENABLE_SET_LSB 0
-#define FIFO_TIMEOUT_ENABLE_SET_MASK 0x00000001
-#define FIFO_TIMEOUT_ENABLE_SET_GET(x) (((x) & FIFO_TIMEOUT_ENABLE_SET_MASK) >> FIFO_TIMEOUT_ENABLE_SET_LSB)
-#define FIFO_TIMEOUT_ENABLE_SET_SET(x) (((x) << FIFO_TIMEOUT_ENABLE_SET_LSB) & FIFO_TIMEOUT_ENABLE_SET_MASK)
-
-#define DISABLE_SLEEP_ADDRESS 0x0000046a
-#define DISABLE_SLEEP_OFFSET 0x0000046a
-#define DISABLE_SLEEP_FOR_INT_MSB 1
-#define DISABLE_SLEEP_FOR_INT_LSB 1
-#define DISABLE_SLEEP_FOR_INT_MASK 0x00000002
-#define DISABLE_SLEEP_FOR_INT_GET(x) (((x) & DISABLE_SLEEP_FOR_INT_MASK) >> DISABLE_SLEEP_FOR_INT_LSB)
-#define DISABLE_SLEEP_FOR_INT_SET(x) (((x) << DISABLE_SLEEP_FOR_INT_LSB) & DISABLE_SLEEP_FOR_INT_MASK)
-#define DISABLE_SLEEP_ON_MSB 0
-#define DISABLE_SLEEP_ON_LSB 0
-#define DISABLE_SLEEP_ON_MASK 0x00000001
-#define DISABLE_SLEEP_ON_GET(x) (((x) & DISABLE_SLEEP_ON_MASK) >> DISABLE_SLEEP_ON_LSB)
-#define DISABLE_SLEEP_ON_SET(x) (((x) << DISABLE_SLEEP_ON_LSB) & DISABLE_SLEEP_ON_MASK)
-
-#define LOCAL_BUS_ADDRESS 0x00000470
-#define LOCAL_BUS_OFFSET 0x00000470
-#define LOCAL_BUS_STATE_MSB 1
-#define LOCAL_BUS_STATE_LSB 0
-#define LOCAL_BUS_STATE_MASK 0x00000003
-#define LOCAL_BUS_STATE_GET(x) (((x) & LOCAL_BUS_STATE_MASK) >> LOCAL_BUS_STATE_LSB)
-#define LOCAL_BUS_STATE_SET(x) (((x) << LOCAL_BUS_STATE_LSB) & LOCAL_BUS_STATE_MASK)
-
-#define INT_WLAN_ADDRESS 0x00000472
-#define INT_WLAN_OFFSET 0x00000472
-#define INT_WLAN_VECTOR_MSB 7
-#define INT_WLAN_VECTOR_LSB 0
-#define INT_WLAN_VECTOR_MASK 0x000000ff
-#define INT_WLAN_VECTOR_GET(x) (((x) & INT_WLAN_VECTOR_MASK) >> INT_WLAN_VECTOR_LSB)
-#define INT_WLAN_VECTOR_SET(x) (((x) << INT_WLAN_VECTOR_LSB) & INT_WLAN_VECTOR_MASK)
-
-#define WINDOW_DATA_ADDRESS 0x00000474
-#define WINDOW_DATA_OFFSET 0x00000474
-#define WINDOW_DATA_DATA_MSB 7
-#define WINDOW_DATA_DATA_LSB 0
-#define WINDOW_DATA_DATA_MASK 0x000000ff
-#define WINDOW_DATA_DATA_GET(x) (((x) & WINDOW_DATA_DATA_MASK) >> WINDOW_DATA_DATA_LSB)
-#define WINDOW_DATA_DATA_SET(x) (((x) << WINDOW_DATA_DATA_LSB) & WINDOW_DATA_DATA_MASK)
-
-#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
-#define WINDOW_WRITE_ADDR_OFFSET 0x00000478
-#define WINDOW_WRITE_ADDR_ADDR_MSB 7
-#define WINDOW_WRITE_ADDR_ADDR_LSB 0
-#define WINDOW_WRITE_ADDR_ADDR_MASK 0x000000ff
-#define WINDOW_WRITE_ADDR_ADDR_GET(x) (((x) & WINDOW_WRITE_ADDR_ADDR_MASK) >> WINDOW_WRITE_ADDR_ADDR_LSB)
-#define WINDOW_WRITE_ADDR_ADDR_SET(x) (((x) << WINDOW_WRITE_ADDR_ADDR_LSB) & WINDOW_WRITE_ADDR_ADDR_MASK)
-
-#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
-#define WINDOW_READ_ADDR_OFFSET 0x0000047c
-#define WINDOW_READ_ADDR_ADDR_MSB 7
-#define WINDOW_READ_ADDR_ADDR_LSB 0
-#define WINDOW_READ_ADDR_ADDR_MASK 0x000000ff
-#define WINDOW_READ_ADDR_ADDR_GET(x) (((x) & WINDOW_READ_ADDR_ADDR_MASK) >> WINDOW_READ_ADDR_ADDR_LSB)
-#define WINDOW_READ_ADDR_ADDR_SET(x) (((x) << WINDOW_READ_ADDR_ADDR_LSB) & WINDOW_READ_ADDR_ADDR_MASK)
-
-#define HOST_CTRL_SPI_CONFIG_ADDRESS 0x00000480
-#define HOST_CTRL_SPI_CONFIG_OFFSET 0x00000480
-#define HOST_CTRL_SPI_CONFIG_SPI_RESET_MSB 4
-#define HOST_CTRL_SPI_CONFIG_SPI_RESET_LSB 4
-#define HOST_CTRL_SPI_CONFIG_SPI_RESET_MASK 0x00000010
-#define HOST_CTRL_SPI_CONFIG_SPI_RESET_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_SPI_RESET_MASK) >> HOST_CTRL_SPI_CONFIG_SPI_RESET_LSB)
-#define HOST_CTRL_SPI_CONFIG_SPI_RESET_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_SPI_RESET_LSB) & HOST_CTRL_SPI_CONFIG_SPI_RESET_MASK)
-#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MSB 3
-#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_LSB 3
-#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MASK 0x00000008
-#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MASK) >> HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_LSB)
-#define HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_LSB) & HOST_CTRL_SPI_CONFIG_INTERRUPT_ENABLE_MASK)
-#define HOST_CTRL_SPI_CONFIG_TEST_MODE_MSB 2
-#define HOST_CTRL_SPI_CONFIG_TEST_MODE_LSB 2
-#define HOST_CTRL_SPI_CONFIG_TEST_MODE_MASK 0x00000004
-#define HOST_CTRL_SPI_CONFIG_TEST_MODE_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_TEST_MODE_MASK) >> HOST_CTRL_SPI_CONFIG_TEST_MODE_LSB)
-#define HOST_CTRL_SPI_CONFIG_TEST_MODE_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_TEST_MODE_LSB) & HOST_CTRL_SPI_CONFIG_TEST_MODE_MASK)
-#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_MSB 1
-#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_LSB 0
-#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_MASK 0x00000003
-#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_GET(x) (((x) & HOST_CTRL_SPI_CONFIG_DATA_SIZE_MASK) >> HOST_CTRL_SPI_CONFIG_DATA_SIZE_LSB)
-#define HOST_CTRL_SPI_CONFIG_DATA_SIZE_SET(x) (((x) << HOST_CTRL_SPI_CONFIG_DATA_SIZE_LSB) & HOST_CTRL_SPI_CONFIG_DATA_SIZE_MASK)
-
-#define HOST_CTRL_SPI_STATUS_ADDRESS 0x00000481
-#define HOST_CTRL_SPI_STATUS_OFFSET 0x00000481
-#define HOST_CTRL_SPI_STATUS_ADDR_ERR_MSB 3
-#define HOST_CTRL_SPI_STATUS_ADDR_ERR_LSB 3
-#define HOST_CTRL_SPI_STATUS_ADDR_ERR_MASK 0x00000008
-#define HOST_CTRL_SPI_STATUS_ADDR_ERR_GET(x) (((x) & HOST_CTRL_SPI_STATUS_ADDR_ERR_MASK) >> HOST_CTRL_SPI_STATUS_ADDR_ERR_LSB)
-#define HOST_CTRL_SPI_STATUS_ADDR_ERR_SET(x) (((x) << HOST_CTRL_SPI_STATUS_ADDR_ERR_LSB) & HOST_CTRL_SPI_STATUS_ADDR_ERR_MASK)
-#define HOST_CTRL_SPI_STATUS_RD_ERR_MSB 2
-#define HOST_CTRL_SPI_STATUS_RD_ERR_LSB 2
-#define HOST_CTRL_SPI_STATUS_RD_ERR_MASK 0x00000004
-#define HOST_CTRL_SPI_STATUS_RD_ERR_GET(x) (((x) & HOST_CTRL_SPI_STATUS_RD_ERR_MASK) >> HOST_CTRL_SPI_STATUS_RD_ERR_LSB)
-#define HOST_CTRL_SPI_STATUS_RD_ERR_SET(x) (((x) << HOST_CTRL_SPI_STATUS_RD_ERR_LSB) & HOST_CTRL_SPI_STATUS_RD_ERR_MASK)
-#define HOST_CTRL_SPI_STATUS_WR_ERR_MSB 1
-#define HOST_CTRL_SPI_STATUS_WR_ERR_LSB 1
-#define HOST_CTRL_SPI_STATUS_WR_ERR_MASK 0x00000002
-#define HOST_CTRL_SPI_STATUS_WR_ERR_GET(x) (((x) & HOST_CTRL_SPI_STATUS_WR_ERR_MASK) >> HOST_CTRL_SPI_STATUS_WR_ERR_LSB)
-#define HOST_CTRL_SPI_STATUS_WR_ERR_SET(x) (((x) << HOST_CTRL_SPI_STATUS_WR_ERR_LSB) & HOST_CTRL_SPI_STATUS_WR_ERR_MASK)
-#define HOST_CTRL_SPI_STATUS_READY_MSB 0
-#define HOST_CTRL_SPI_STATUS_READY_LSB 0
-#define HOST_CTRL_SPI_STATUS_READY_MASK 0x00000001
-#define HOST_CTRL_SPI_STATUS_READY_GET(x) (((x) & HOST_CTRL_SPI_STATUS_READY_MASK) >> HOST_CTRL_SPI_STATUS_READY_LSB)
-#define HOST_CTRL_SPI_STATUS_READY_SET(x) (((x) << HOST_CTRL_SPI_STATUS_READY_LSB) & HOST_CTRL_SPI_STATUS_READY_MASK)
-
-#define NON_ASSOC_SLEEP_EN_ADDRESS 0x00000482
-#define NON_ASSOC_SLEEP_EN_OFFSET 0x00000482
-#define NON_ASSOC_SLEEP_EN_BIT_MSB 0
-#define NON_ASSOC_SLEEP_EN_BIT_LSB 0
-#define NON_ASSOC_SLEEP_EN_BIT_MASK 0x00000001
-#define NON_ASSOC_SLEEP_EN_BIT_GET(x) (((x) & NON_ASSOC_SLEEP_EN_BIT_MASK) >> NON_ASSOC_SLEEP_EN_BIT_LSB)
-#define NON_ASSOC_SLEEP_EN_BIT_SET(x) (((x) << NON_ASSOC_SLEEP_EN_BIT_LSB) & NON_ASSOC_SLEEP_EN_BIT_MASK)
-
-#define CPU_DBG_SEL_ADDRESS 0x00000483
-#define CPU_DBG_SEL_OFFSET 0x00000483
-#define CPU_DBG_SEL_BIT_MSB 5
-#define CPU_DBG_SEL_BIT_LSB 0
-#define CPU_DBG_SEL_BIT_MASK 0x0000003f
-#define CPU_DBG_SEL_BIT_GET(x) (((x) & CPU_DBG_SEL_BIT_MASK) >> CPU_DBG_SEL_BIT_LSB)
-#define CPU_DBG_SEL_BIT_SET(x) (((x) << CPU_DBG_SEL_BIT_LSB) & CPU_DBG_SEL_BIT_MASK)
-
-#define CPU_DBG_ADDRESS 0x00000484
-#define CPU_DBG_OFFSET 0x00000484
-#define CPU_DBG_DATA_MSB 7
-#define CPU_DBG_DATA_LSB 0
-#define CPU_DBG_DATA_MASK 0x000000ff
-#define CPU_DBG_DATA_GET(x) (((x) & CPU_DBG_DATA_MASK) >> CPU_DBG_DATA_LSB)
-#define CPU_DBG_DATA_SET(x) (((x) << CPU_DBG_DATA_LSB) & CPU_DBG_DATA_MASK)
-
-#define INT_STATUS2_ENABLE_ADDRESS 0x00000488
-#define INT_STATUS2_ENABLE_OFFSET 0x00000488
-#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MSB 2
-#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_LSB 2
-#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MASK 0x00000004
-#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_GET(x) (((x) & INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MASK) >> INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_LSB)
-#define INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_SET(x) (((x) << INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_LSB) & INT_STATUS2_ENABLE_GMBOX_RX_UNDERFLOW_MASK)
-#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MSB 1
-#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_LSB 1
-#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MASK 0x00000002
-#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_GET(x) (((x) & INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MASK) >> INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_LSB)
-#define INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_SET(x) (((x) << INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_LSB) & INT_STATUS2_ENABLE_GMBOX_TX_OVERFLOW_MASK)
-#define INT_STATUS2_ENABLE_GMBOX_DATA_MSB 0
-#define INT_STATUS2_ENABLE_GMBOX_DATA_LSB 0
-#define INT_STATUS2_ENABLE_GMBOX_DATA_MASK 0x00000001
-#define INT_STATUS2_ENABLE_GMBOX_DATA_GET(x) (((x) & INT_STATUS2_ENABLE_GMBOX_DATA_MASK) >> INT_STATUS2_ENABLE_GMBOX_DATA_LSB)
-#define INT_STATUS2_ENABLE_GMBOX_DATA_SET(x) (((x) << INT_STATUS2_ENABLE_GMBOX_DATA_LSB) & INT_STATUS2_ENABLE_GMBOX_DATA_MASK)
-
-#define GMBOX_RX_LOOKAHEAD_ADDRESS 0x00000490
-#define GMBOX_RX_LOOKAHEAD_OFFSET 0x00000490
-#define GMBOX_RX_LOOKAHEAD_DATA_MSB 7
-#define GMBOX_RX_LOOKAHEAD_DATA_LSB 0
-#define GMBOX_RX_LOOKAHEAD_DATA_MASK 0x000000ff
-#define GMBOX_RX_LOOKAHEAD_DATA_GET(x) (((x) & GMBOX_RX_LOOKAHEAD_DATA_MASK) >> GMBOX_RX_LOOKAHEAD_DATA_LSB)
-#define GMBOX_RX_LOOKAHEAD_DATA_SET(x) (((x) << GMBOX_RX_LOOKAHEAD_DATA_LSB) & GMBOX_RX_LOOKAHEAD_DATA_MASK)
-
-#define GMBOX_RX_LOOKAHEAD_MUX_ADDRESS 0x00000498
-#define GMBOX_RX_LOOKAHEAD_MUX_OFFSET 0x00000498
-#define GMBOX_RX_LOOKAHEAD_MUX_SEL_MSB 0
-#define GMBOX_RX_LOOKAHEAD_MUX_SEL_LSB 0
-#define GMBOX_RX_LOOKAHEAD_MUX_SEL_MASK 0x00000001
-#define GMBOX_RX_LOOKAHEAD_MUX_SEL_GET(x) (((x) & GMBOX_RX_LOOKAHEAD_MUX_SEL_MASK) >> GMBOX_RX_LOOKAHEAD_MUX_SEL_LSB)
-#define GMBOX_RX_LOOKAHEAD_MUX_SEL_SET(x) (((x) << GMBOX_RX_LOOKAHEAD_MUX_SEL_LSB) & GMBOX_RX_LOOKAHEAD_MUX_SEL_MASK)
-
-#define CIS_WINDOW_ADDRESS 0x00000600
-#define CIS_WINDOW_OFFSET 0x00000600
-#define CIS_WINDOW_DATA_MSB 7
-#define CIS_WINDOW_DATA_LSB 0
-#define CIS_WINDOW_DATA_MASK 0x000000ff
-#define CIS_WINDOW_DATA_GET(x) (((x) & CIS_WINDOW_DATA_MASK) >> CIS_WINDOW_DATA_LSB)
-#define CIS_WINDOW_DATA_SET(x) (((x) << CIS_WINDOW_DATA_LSB) & CIS_WINDOW_DATA_MASK)
-
-
-#endif /* _MBOX_WLAN_HOST_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h
deleted file mode 100644
index f5167b9ae8d..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/mbox_wlan_reg.h
+++ /dev/null
@@ -1,589 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#ifndef _MBOX_WLAN_REG_REG_H_
-#define _MBOX_WLAN_REG_REG_H_
-
-#define WLAN_MBOX_FIFO_ADDRESS 0x00000000
-#define WLAN_MBOX_FIFO_OFFSET 0x00000000
-#define WLAN_MBOX_FIFO_DATA_MSB 19
-#define WLAN_MBOX_FIFO_DATA_LSB 0
-#define WLAN_MBOX_FIFO_DATA_MASK 0x000fffff
-#define WLAN_MBOX_FIFO_DATA_GET(x) (((x) & WLAN_MBOX_FIFO_DATA_MASK) >> WLAN_MBOX_FIFO_DATA_LSB)
-#define WLAN_MBOX_FIFO_DATA_SET(x) (((x) << WLAN_MBOX_FIFO_DATA_LSB) & WLAN_MBOX_FIFO_DATA_MASK)
-
-#define WLAN_MBOX_FIFO_STATUS_ADDRESS 0x00000010
-#define WLAN_MBOX_FIFO_STATUS_OFFSET 0x00000010
-#define WLAN_MBOX_FIFO_STATUS_EMPTY_MSB 19
-#define WLAN_MBOX_FIFO_STATUS_EMPTY_LSB 16
-#define WLAN_MBOX_FIFO_STATUS_EMPTY_MASK 0x000f0000
-#define WLAN_MBOX_FIFO_STATUS_EMPTY_GET(x) (((x) & WLAN_MBOX_FIFO_STATUS_EMPTY_MASK) >> WLAN_MBOX_FIFO_STATUS_EMPTY_LSB)
-#define WLAN_MBOX_FIFO_STATUS_EMPTY_SET(x) (((x) << WLAN_MBOX_FIFO_STATUS_EMPTY_LSB) & WLAN_MBOX_FIFO_STATUS_EMPTY_MASK)
-#define WLAN_MBOX_FIFO_STATUS_FULL_MSB 15
-#define WLAN_MBOX_FIFO_STATUS_FULL_LSB 12
-#define WLAN_MBOX_FIFO_STATUS_FULL_MASK 0x0000f000
-#define WLAN_MBOX_FIFO_STATUS_FULL_GET(x) (((x) & WLAN_MBOX_FIFO_STATUS_FULL_MASK) >> WLAN_MBOX_FIFO_STATUS_FULL_LSB)
-#define WLAN_MBOX_FIFO_STATUS_FULL_SET(x) (((x) << WLAN_MBOX_FIFO_STATUS_FULL_LSB) & WLAN_MBOX_FIFO_STATUS_FULL_MASK)
-
-#define WLAN_MBOX_DMA_POLICY_ADDRESS 0x00000014
-#define WLAN_MBOX_DMA_POLICY_OFFSET 0x00000014
-#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MSB 3
-#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB 3
-#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK 0x00000008
-#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK) >> WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB)
-#define WLAN_MBOX_DMA_POLICY_TX_QUANTUM_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_TX_QUANTUM_LSB) & WLAN_MBOX_DMA_POLICY_TX_QUANTUM_MASK)
-#define WLAN_MBOX_DMA_POLICY_TX_ORDER_MSB 2
-#define WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB 2
-#define WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK 0x00000004
-#define WLAN_MBOX_DMA_POLICY_TX_ORDER_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK) >> WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB)
-#define WLAN_MBOX_DMA_POLICY_TX_ORDER_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_TX_ORDER_LSB) & WLAN_MBOX_DMA_POLICY_TX_ORDER_MASK)
-#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MSB 1
-#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB 1
-#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK 0x00000002
-#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK) >> WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB)
-#define WLAN_MBOX_DMA_POLICY_RX_QUANTUM_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_RX_QUANTUM_LSB) & WLAN_MBOX_DMA_POLICY_RX_QUANTUM_MASK)
-#define WLAN_MBOX_DMA_POLICY_RX_ORDER_MSB 0
-#define WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB 0
-#define WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK 0x00000001
-#define WLAN_MBOX_DMA_POLICY_RX_ORDER_GET(x) (((x) & WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK) >> WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB)
-#define WLAN_MBOX_DMA_POLICY_RX_ORDER_SET(x) (((x) << WLAN_MBOX_DMA_POLICY_RX_ORDER_LSB) & WLAN_MBOX_DMA_POLICY_RX_ORDER_MASK)
-
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000018
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000018
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX0_DMA_RX_CONTROL_ADDRESS 0x0000001c
-#define WLAN_MBOX0_DMA_RX_CONTROL_OFFSET 0x0000001c
-#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX0_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX0_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX0_DMA_RX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX0_DMA_RX_CONTROL_START_MSB 1
-#define WLAN_MBOX0_DMA_RX_CONTROL_START_LSB 1
-#define WLAN_MBOX0_DMA_RX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX0_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX0_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX0_DMA_RX_CONTROL_START_LSB)
-#define WLAN_MBOX0_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX0_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX0_DMA_RX_CONTROL_START_MASK)
-#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB)
-#define WLAN_MBOX0_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX0_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX0_DMA_RX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000020
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000020
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX0_DMA_TX_CONTROL_ADDRESS 0x00000024
-#define WLAN_MBOX0_DMA_TX_CONTROL_OFFSET 0x00000024
-#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX0_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX0_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX0_DMA_TX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX0_DMA_TX_CONTROL_START_MSB 1
-#define WLAN_MBOX0_DMA_TX_CONTROL_START_LSB 1
-#define WLAN_MBOX0_DMA_TX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX0_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX0_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX0_DMA_TX_CONTROL_START_LSB)
-#define WLAN_MBOX0_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX0_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX0_DMA_TX_CONTROL_START_MASK)
-#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB)
-#define WLAN_MBOX0_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX0_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX0_DMA_TX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000028
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000028
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX1_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX1_DMA_RX_CONTROL_ADDRESS 0x0000002c
-#define WLAN_MBOX1_DMA_RX_CONTROL_OFFSET 0x0000002c
-#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX1_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX1_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX1_DMA_RX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX1_DMA_RX_CONTROL_START_MSB 1
-#define WLAN_MBOX1_DMA_RX_CONTROL_START_LSB 1
-#define WLAN_MBOX1_DMA_RX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX1_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX1_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX1_DMA_RX_CONTROL_START_LSB)
-#define WLAN_MBOX1_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX1_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX1_DMA_RX_CONTROL_START_MASK)
-#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB)
-#define WLAN_MBOX1_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX1_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX1_DMA_RX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000030
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000030
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX1_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX1_DMA_TX_CONTROL_ADDRESS 0x00000034
-#define WLAN_MBOX1_DMA_TX_CONTROL_OFFSET 0x00000034
-#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX1_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX1_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX1_DMA_TX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX1_DMA_TX_CONTROL_START_MSB 1
-#define WLAN_MBOX1_DMA_TX_CONTROL_START_LSB 1
-#define WLAN_MBOX1_DMA_TX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX1_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX1_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX1_DMA_TX_CONTROL_START_LSB)
-#define WLAN_MBOX1_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX1_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX1_DMA_TX_CONTROL_START_MASK)
-#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB)
-#define WLAN_MBOX1_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX1_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX1_DMA_TX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000038
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000038
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX2_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX2_DMA_RX_CONTROL_ADDRESS 0x0000003c
-#define WLAN_MBOX2_DMA_RX_CONTROL_OFFSET 0x0000003c
-#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX2_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX2_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX2_DMA_RX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX2_DMA_RX_CONTROL_START_MSB 1
-#define WLAN_MBOX2_DMA_RX_CONTROL_START_LSB 1
-#define WLAN_MBOX2_DMA_RX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX2_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX2_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX2_DMA_RX_CONTROL_START_LSB)
-#define WLAN_MBOX2_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX2_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX2_DMA_RX_CONTROL_START_MASK)
-#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB)
-#define WLAN_MBOX2_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX2_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX2_DMA_RX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000040
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000040
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX2_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX2_DMA_TX_CONTROL_ADDRESS 0x00000044
-#define WLAN_MBOX2_DMA_TX_CONTROL_OFFSET 0x00000044
-#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX2_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX2_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX2_DMA_TX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX2_DMA_TX_CONTROL_START_MSB 1
-#define WLAN_MBOX2_DMA_TX_CONTROL_START_LSB 1
-#define WLAN_MBOX2_DMA_TX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX2_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX2_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX2_DMA_TX_CONTROL_START_LSB)
-#define WLAN_MBOX2_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX2_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX2_DMA_TX_CONTROL_START_MASK)
-#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB)
-#define WLAN_MBOX2_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX2_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX2_DMA_TX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000048
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000048
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX3_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX3_DMA_RX_CONTROL_ADDRESS 0x0000004c
-#define WLAN_MBOX3_DMA_RX_CONTROL_OFFSET 0x0000004c
-#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX3_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX3_DMA_RX_CONTROL_RESUME_LSB) & WLAN_MBOX3_DMA_RX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX3_DMA_RX_CONTROL_START_MSB 1
-#define WLAN_MBOX3_DMA_RX_CONTROL_START_LSB 1
-#define WLAN_MBOX3_DMA_RX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX3_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_MBOX3_DMA_RX_CONTROL_START_MASK) >> WLAN_MBOX3_DMA_RX_CONTROL_START_LSB)
-#define WLAN_MBOX3_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_MBOX3_DMA_RX_CONTROL_START_LSB) & WLAN_MBOX3_DMA_RX_CONTROL_START_MASK)
-#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK) >> WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB)
-#define WLAN_MBOX3_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX3_DMA_RX_CONTROL_STOP_LSB) & WLAN_MBOX3_DMA_RX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x00000050
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x00000050
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_MBOX3_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_MBOX3_DMA_TX_CONTROL_ADDRESS 0x00000054
-#define WLAN_MBOX3_DMA_TX_CONTROL_OFFSET 0x00000054
-#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MSB 2
-#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB 2
-#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB)
-#define WLAN_MBOX3_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_MBOX3_DMA_TX_CONTROL_RESUME_LSB) & WLAN_MBOX3_DMA_TX_CONTROL_RESUME_MASK)
-#define WLAN_MBOX3_DMA_TX_CONTROL_START_MSB 1
-#define WLAN_MBOX3_DMA_TX_CONTROL_START_LSB 1
-#define WLAN_MBOX3_DMA_TX_CONTROL_START_MASK 0x00000002
-#define WLAN_MBOX3_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_MBOX3_DMA_TX_CONTROL_START_MASK) >> WLAN_MBOX3_DMA_TX_CONTROL_START_LSB)
-#define WLAN_MBOX3_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_MBOX3_DMA_TX_CONTROL_START_LSB) & WLAN_MBOX3_DMA_TX_CONTROL_START_MASK)
-#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_MSB 0
-#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB 0
-#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK) >> WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB)
-#define WLAN_MBOX3_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_MBOX3_DMA_TX_CONTROL_STOP_LSB) & WLAN_MBOX3_DMA_TX_CONTROL_STOP_MASK)
-
-#define WLAN_MBOX_INT_STATUS_ADDRESS 0x00000058
-#define WLAN_MBOX_INT_STATUS_OFFSET 0x00000058
-#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MSB 31
-#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB 28
-#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK 0xf0000000
-#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB)
-#define WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_STATUS_RX_DMA_COMPLETE_MASK)
-#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB 27
-#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB 24
-#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK 0x0f000000
-#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB)
-#define WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB) & WLAN_MBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK)
-#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MSB 23
-#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB 20
-#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK 0x00f00000
-#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB)
-#define WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_STATUS_TX_DMA_COMPLETE_MASK)
-#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MSB 17
-#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB 17
-#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK 0x00020000
-#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK) >> WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB)
-#define WLAN_MBOX_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_OVERFLOW_LSB) & WLAN_MBOX_INT_STATUS_TX_OVERFLOW_MASK)
-#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MSB 16
-#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB 16
-#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK 0x00010000
-#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK) >> WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB)
-#define WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_LSB) & WLAN_MBOX_INT_STATUS_RX_UNDERFLOW_MASK)
-#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MSB 15
-#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB 12
-#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK 0x0000f000
-#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) (((x) & WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK) >> WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB)
-#define WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) (((x) << WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_LSB) & WLAN_MBOX_INT_STATUS_TX_NOT_EMPTY_MASK)
-#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MSB 11
-#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB 8
-#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK 0x00000f00
-#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_GET(x) (((x) & WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK) >> WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB)
-#define WLAN_MBOX_INT_STATUS_RX_NOT_FULL_SET(x) (((x) << WLAN_MBOX_INT_STATUS_RX_NOT_FULL_LSB) & WLAN_MBOX_INT_STATUS_RX_NOT_FULL_MASK)
-#define WLAN_MBOX_INT_STATUS_HOST_MSB 7
-#define WLAN_MBOX_INT_STATUS_HOST_LSB 0
-#define WLAN_MBOX_INT_STATUS_HOST_MASK 0x000000ff
-#define WLAN_MBOX_INT_STATUS_HOST_GET(x) (((x) & WLAN_MBOX_INT_STATUS_HOST_MASK) >> WLAN_MBOX_INT_STATUS_HOST_LSB)
-#define WLAN_MBOX_INT_STATUS_HOST_SET(x) (((x) << WLAN_MBOX_INT_STATUS_HOST_LSB) & WLAN_MBOX_INT_STATUS_HOST_MASK)
-
-#define WLAN_MBOX_INT_ENABLE_ADDRESS 0x0000005c
-#define WLAN_MBOX_INT_ENABLE_OFFSET 0x0000005c
-#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB 31
-#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB 28
-#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK 0xf0000000
-#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB)
-#define WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK)
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB 27
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB 24
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK 0x0f000000
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB)
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB) & WLAN_MBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK)
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB 23
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB 20
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK 0x00f00000
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK) >> WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB)
-#define WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB) & WLAN_MBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK)
-#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MSB 17
-#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB 17
-#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK 0x00020000
-#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK) >> WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB)
-#define WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_LSB) & WLAN_MBOX_INT_ENABLE_TX_OVERFLOW_MASK)
-#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MSB 16
-#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB 16
-#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK 0x00010000
-#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK) >> WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB)
-#define WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_LSB) & WLAN_MBOX_INT_ENABLE_RX_UNDERFLOW_MASK)
-#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MSB 15
-#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB 12
-#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK 0x0000f000
-#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK) >> WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB)
-#define WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_LSB) & WLAN_MBOX_INT_ENABLE_TX_NOT_EMPTY_MASK)
-#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MSB 11
-#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB 8
-#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK 0x00000f00
-#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK) >> WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB)
-#define WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_LSB) & WLAN_MBOX_INT_ENABLE_RX_NOT_FULL_MASK)
-#define WLAN_MBOX_INT_ENABLE_HOST_MSB 7
-#define WLAN_MBOX_INT_ENABLE_HOST_LSB 0
-#define WLAN_MBOX_INT_ENABLE_HOST_MASK 0x000000ff
-#define WLAN_MBOX_INT_ENABLE_HOST_GET(x) (((x) & WLAN_MBOX_INT_ENABLE_HOST_MASK) >> WLAN_MBOX_INT_ENABLE_HOST_LSB)
-#define WLAN_MBOX_INT_ENABLE_HOST_SET(x) (((x) << WLAN_MBOX_INT_ENABLE_HOST_LSB) & WLAN_MBOX_INT_ENABLE_HOST_MASK)
-
-#define WLAN_INT_HOST_ADDRESS 0x00000060
-#define WLAN_INT_HOST_OFFSET 0x00000060
-#define WLAN_INT_HOST_VECTOR_MSB 7
-#define WLAN_INT_HOST_VECTOR_LSB 0
-#define WLAN_INT_HOST_VECTOR_MASK 0x000000ff
-#define WLAN_INT_HOST_VECTOR_GET(x) (((x) & WLAN_INT_HOST_VECTOR_MASK) >> WLAN_INT_HOST_VECTOR_LSB)
-#define WLAN_INT_HOST_VECTOR_SET(x) (((x) << WLAN_INT_HOST_VECTOR_LSB) & WLAN_INT_HOST_VECTOR_MASK)
-
-#define WLAN_LOCAL_COUNT_ADDRESS 0x00000080
-#define WLAN_LOCAL_COUNT_OFFSET 0x00000080
-#define WLAN_LOCAL_COUNT_VALUE_MSB 7
-#define WLAN_LOCAL_COUNT_VALUE_LSB 0
-#define WLAN_LOCAL_COUNT_VALUE_MASK 0x000000ff
-#define WLAN_LOCAL_COUNT_VALUE_GET(x) (((x) & WLAN_LOCAL_COUNT_VALUE_MASK) >> WLAN_LOCAL_COUNT_VALUE_LSB)
-#define WLAN_LOCAL_COUNT_VALUE_SET(x) (((x) << WLAN_LOCAL_COUNT_VALUE_LSB) & WLAN_LOCAL_COUNT_VALUE_MASK)
-
-#define WLAN_COUNT_INC_ADDRESS 0x000000a0
-#define WLAN_COUNT_INC_OFFSET 0x000000a0
-#define WLAN_COUNT_INC_VALUE_MSB 7
-#define WLAN_COUNT_INC_VALUE_LSB 0
-#define WLAN_COUNT_INC_VALUE_MASK 0x000000ff
-#define WLAN_COUNT_INC_VALUE_GET(x) (((x) & WLAN_COUNT_INC_VALUE_MASK) >> WLAN_COUNT_INC_VALUE_LSB)
-#define WLAN_COUNT_INC_VALUE_SET(x) (((x) << WLAN_COUNT_INC_VALUE_LSB) & WLAN_COUNT_INC_VALUE_MASK)
-
-#define WLAN_LOCAL_SCRATCH_ADDRESS 0x000000c0
-#define WLAN_LOCAL_SCRATCH_OFFSET 0x000000c0
-#define WLAN_LOCAL_SCRATCH_VALUE_MSB 7
-#define WLAN_LOCAL_SCRATCH_VALUE_LSB 0
-#define WLAN_LOCAL_SCRATCH_VALUE_MASK 0x000000ff
-#define WLAN_LOCAL_SCRATCH_VALUE_GET(x) (((x) & WLAN_LOCAL_SCRATCH_VALUE_MASK) >> WLAN_LOCAL_SCRATCH_VALUE_LSB)
-#define WLAN_LOCAL_SCRATCH_VALUE_SET(x) (((x) << WLAN_LOCAL_SCRATCH_VALUE_LSB) & WLAN_LOCAL_SCRATCH_VALUE_MASK)
-
-#define WLAN_USE_LOCAL_BUS_ADDRESS 0x000000e0
-#define WLAN_USE_LOCAL_BUS_OFFSET 0x000000e0
-#define WLAN_USE_LOCAL_BUS_PIN_INIT_MSB 0
-#define WLAN_USE_LOCAL_BUS_PIN_INIT_LSB 0
-#define WLAN_USE_LOCAL_BUS_PIN_INIT_MASK 0x00000001
-#define WLAN_USE_LOCAL_BUS_PIN_INIT_GET(x) (((x) & WLAN_USE_LOCAL_BUS_PIN_INIT_MASK) >> WLAN_USE_LOCAL_BUS_PIN_INIT_LSB)
-#define WLAN_USE_LOCAL_BUS_PIN_INIT_SET(x) (((x) << WLAN_USE_LOCAL_BUS_PIN_INIT_LSB) & WLAN_USE_LOCAL_BUS_PIN_INIT_MASK)
-
-#define WLAN_SDIO_CONFIG_ADDRESS 0x000000e4
-#define WLAN_SDIO_CONFIG_OFFSET 0x000000e4
-#define WLAN_SDIO_CONFIG_CCCR_IOR1_MSB 0
-#define WLAN_SDIO_CONFIG_CCCR_IOR1_LSB 0
-#define WLAN_SDIO_CONFIG_CCCR_IOR1_MASK 0x00000001
-#define WLAN_SDIO_CONFIG_CCCR_IOR1_GET(x) (((x) & WLAN_SDIO_CONFIG_CCCR_IOR1_MASK) >> WLAN_SDIO_CONFIG_CCCR_IOR1_LSB)
-#define WLAN_SDIO_CONFIG_CCCR_IOR1_SET(x) (((x) << WLAN_SDIO_CONFIG_CCCR_IOR1_LSB) & WLAN_SDIO_CONFIG_CCCR_IOR1_MASK)
-
-#define WLAN_MBOX_DEBUG_ADDRESS 0x000000e8
-#define WLAN_MBOX_DEBUG_OFFSET 0x000000e8
-#define WLAN_MBOX_DEBUG_SEL_MSB 2
-#define WLAN_MBOX_DEBUG_SEL_LSB 0
-#define WLAN_MBOX_DEBUG_SEL_MASK 0x00000007
-#define WLAN_MBOX_DEBUG_SEL_GET(x) (((x) & WLAN_MBOX_DEBUG_SEL_MASK) >> WLAN_MBOX_DEBUG_SEL_LSB)
-#define WLAN_MBOX_DEBUG_SEL_SET(x) (((x) << WLAN_MBOX_DEBUG_SEL_LSB) & WLAN_MBOX_DEBUG_SEL_MASK)
-
-#define WLAN_MBOX_FIFO_RESET_ADDRESS 0x000000ec
-#define WLAN_MBOX_FIFO_RESET_OFFSET 0x000000ec
-#define WLAN_MBOX_FIFO_RESET_INIT_MSB 0
-#define WLAN_MBOX_FIFO_RESET_INIT_LSB 0
-#define WLAN_MBOX_FIFO_RESET_INIT_MASK 0x00000001
-#define WLAN_MBOX_FIFO_RESET_INIT_GET(x) (((x) & WLAN_MBOX_FIFO_RESET_INIT_MASK) >> WLAN_MBOX_FIFO_RESET_INIT_LSB)
-#define WLAN_MBOX_FIFO_RESET_INIT_SET(x) (((x) << WLAN_MBOX_FIFO_RESET_INIT_LSB) & WLAN_MBOX_FIFO_RESET_INIT_MASK)
-
-#define WLAN_MBOX_TXFIFO_POP_ADDRESS 0x000000f0
-#define WLAN_MBOX_TXFIFO_POP_OFFSET 0x000000f0
-#define WLAN_MBOX_TXFIFO_POP_DATA_MSB 0
-#define WLAN_MBOX_TXFIFO_POP_DATA_LSB 0
-#define WLAN_MBOX_TXFIFO_POP_DATA_MASK 0x00000001
-#define WLAN_MBOX_TXFIFO_POP_DATA_GET(x) (((x) & WLAN_MBOX_TXFIFO_POP_DATA_MASK) >> WLAN_MBOX_TXFIFO_POP_DATA_LSB)
-#define WLAN_MBOX_TXFIFO_POP_DATA_SET(x) (((x) << WLAN_MBOX_TXFIFO_POP_DATA_LSB) & WLAN_MBOX_TXFIFO_POP_DATA_MASK)
-
-#define WLAN_MBOX_RXFIFO_POP_ADDRESS 0x00000100
-#define WLAN_MBOX_RXFIFO_POP_OFFSET 0x00000100
-#define WLAN_MBOX_RXFIFO_POP_DATA_MSB 0
-#define WLAN_MBOX_RXFIFO_POP_DATA_LSB 0
-#define WLAN_MBOX_RXFIFO_POP_DATA_MASK 0x00000001
-#define WLAN_MBOX_RXFIFO_POP_DATA_GET(x) (((x) & WLAN_MBOX_RXFIFO_POP_DATA_MASK) >> WLAN_MBOX_RXFIFO_POP_DATA_LSB)
-#define WLAN_MBOX_RXFIFO_POP_DATA_SET(x) (((x) << WLAN_MBOX_RXFIFO_POP_DATA_LSB) & WLAN_MBOX_RXFIFO_POP_DATA_MASK)
-
-#define WLAN_SDIO_DEBUG_ADDRESS 0x00000110
-#define WLAN_SDIO_DEBUG_OFFSET 0x00000110
-#define WLAN_SDIO_DEBUG_SEL_MSB 3
-#define WLAN_SDIO_DEBUG_SEL_LSB 0
-#define WLAN_SDIO_DEBUG_SEL_MASK 0x0000000f
-#define WLAN_SDIO_DEBUG_SEL_GET(x) (((x) & WLAN_SDIO_DEBUG_SEL_MASK) >> WLAN_SDIO_DEBUG_SEL_LSB)
-#define WLAN_SDIO_DEBUG_SEL_SET(x) (((x) << WLAN_SDIO_DEBUG_SEL_LSB) & WLAN_SDIO_DEBUG_SEL_MASK)
-
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS 0x00000114
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_OFFSET 0x00000114
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_GMBOX0_DMA_RX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_GMBOX0_DMA_RX_CONTROL_ADDRESS 0x00000118
-#define WLAN_GMBOX0_DMA_RX_CONTROL_OFFSET 0x00000118
-#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MSB 2
-#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB 2
-#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK) >> WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB)
-#define WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_LSB) & WLAN_GMBOX0_DMA_RX_CONTROL_RESUME_MASK)
-#define WLAN_GMBOX0_DMA_RX_CONTROL_START_MSB 1
-#define WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB 1
-#define WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK 0x00000002
-#define WLAN_GMBOX0_DMA_RX_CONTROL_START_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK) >> WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB)
-#define WLAN_GMBOX0_DMA_RX_CONTROL_START_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_CONTROL_START_LSB) & WLAN_GMBOX0_DMA_RX_CONTROL_START_MASK)
-#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MSB 0
-#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB 0
-#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_GET(x) (((x) & WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK) >> WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB)
-#define WLAN_GMBOX0_DMA_RX_CONTROL_STOP_SET(x) (((x) << WLAN_GMBOX0_DMA_RX_CONTROL_STOP_LSB) & WLAN_GMBOX0_DMA_RX_CONTROL_STOP_MASK)
-
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS 0x0000011c
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_OFFSET 0x0000011c
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MSB 27
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB 2
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK 0x0ffffffc
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK) >> WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB)
-#define WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_LSB) & WLAN_GMBOX0_DMA_TX_DESCRIPTOR_BASE_ADDRESS_MASK)
-
-#define WLAN_GMBOX0_DMA_TX_CONTROL_ADDRESS 0x00000120
-#define WLAN_GMBOX0_DMA_TX_CONTROL_OFFSET 0x00000120
-#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MSB 2
-#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB 2
-#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK 0x00000004
-#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK) >> WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB)
-#define WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_LSB) & WLAN_GMBOX0_DMA_TX_CONTROL_RESUME_MASK)
-#define WLAN_GMBOX0_DMA_TX_CONTROL_START_MSB 1
-#define WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB 1
-#define WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK 0x00000002
-#define WLAN_GMBOX0_DMA_TX_CONTROL_START_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK) >> WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB)
-#define WLAN_GMBOX0_DMA_TX_CONTROL_START_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_CONTROL_START_LSB) & WLAN_GMBOX0_DMA_TX_CONTROL_START_MASK)
-#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MSB 0
-#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB 0
-#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK 0x00000001
-#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_GET(x) (((x) & WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK) >> WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB)
-#define WLAN_GMBOX0_DMA_TX_CONTROL_STOP_SET(x) (((x) << WLAN_GMBOX0_DMA_TX_CONTROL_STOP_LSB) & WLAN_GMBOX0_DMA_TX_CONTROL_STOP_MASK)
-
-#define WLAN_GMBOX_INT_STATUS_ADDRESS 0x00000124
-#define WLAN_GMBOX_INT_STATUS_OFFSET 0x00000124
-#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MSB 6
-#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB 6
-#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK 0x00000040
-#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK) >> WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB)
-#define WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_LSB) & WLAN_GMBOX_INT_STATUS_TX_OVERFLOW_MASK)
-#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MSB 5
-#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB 5
-#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK 0x00000020
-#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK) >> WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB)
-#define WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_LSB) & WLAN_GMBOX_INT_STATUS_RX_UNDERFLOW_MASK)
-#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MSB 4
-#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB 4
-#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK 0x00000010
-#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB)
-#define WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_STATUS_RX_DMA_COMPLETE_MASK)
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MSB 3
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB 3
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK 0x00000008
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB)
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_LSB) & WLAN_GMBOX_INT_STATUS_TX_DMA_EOM_COMPLETE_MASK)
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MSB 2
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB 2
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK 0x00000004
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB)
-#define WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_STATUS_TX_DMA_COMPLETE_MASK)
-#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MSB 1
-#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB 1
-#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK 0x00000002
-#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK) >> WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB)
-#define WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_LSB) & WLAN_GMBOX_INT_STATUS_TX_NOT_EMPTY_MASK)
-#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MSB 0
-#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB 0
-#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK 0x00000001
-#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_GET(x) (((x) & WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK) >> WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB)
-#define WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_SET(x) (((x) << WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_LSB) & WLAN_GMBOX_INT_STATUS_RX_NOT_FULL_MASK)
-
-#define WLAN_GMBOX_INT_ENABLE_ADDRESS 0x00000128
-#define WLAN_GMBOX_INT_ENABLE_OFFSET 0x00000128
-#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MSB 6
-#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB 6
-#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK 0x00000040
-#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB)
-#define WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_LSB) & WLAN_GMBOX_INT_ENABLE_TX_OVERFLOW_MASK)
-#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MSB 5
-#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB 5
-#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK 0x00000020
-#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK) >> WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB)
-#define WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_LSB) & WLAN_GMBOX_INT_ENABLE_RX_UNDERFLOW_MASK)
-#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MSB 4
-#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB 4
-#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK 0x00000010
-#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB)
-#define WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_ENABLE_RX_DMA_COMPLETE_MASK)
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MSB 3
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB 3
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK 0x00000008
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB)
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_LSB) & WLAN_GMBOX_INT_ENABLE_TX_DMA_EOM_COMPLETE_MASK)
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MSB 2
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB 2
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK 0x00000004
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB)
-#define WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_LSB) & WLAN_GMBOX_INT_ENABLE_TX_DMA_COMPLETE_MASK)
-#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MSB 1
-#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB 1
-#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK 0x00000002
-#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK) >> WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB)
-#define WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_LSB) & WLAN_GMBOX_INT_ENABLE_TX_NOT_EMPTY_MASK)
-#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MSB 0
-#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB 0
-#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK 0x00000001
-#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_GET(x) (((x) & WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK) >> WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB)
-#define WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_SET(x) (((x) << WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_LSB) & WLAN_GMBOX_INT_ENABLE_RX_NOT_FULL_MASK)
-
-#define WLAN_HOST_IF_WINDOW_ADDRESS 0x00002000
-#define WLAN_HOST_IF_WINDOW_OFFSET 0x00002000
-#define WLAN_HOST_IF_WINDOW_DATA_MSB 7
-#define WLAN_HOST_IF_WINDOW_DATA_LSB 0
-#define WLAN_HOST_IF_WINDOW_DATA_MASK 0x000000ff
-#define WLAN_HOST_IF_WINDOW_DATA_GET(x) (((x) & WLAN_HOST_IF_WINDOW_DATA_MASK) >> WLAN_HOST_IF_WINDOW_DATA_LSB)
-#define WLAN_HOST_IF_WINDOW_DATA_SET(x) (((x) << WLAN_HOST_IF_WINDOW_DATA_LSB) & WLAN_HOST_IF_WINDOW_DATA_MASK)
-
-#endif /* _MBOX_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h
deleted file mode 100644
index fcafec88a6b..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_reg.h
+++ /dev/null
@@ -1,187 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#include "rtc_wlan_reg.h"
-
-#ifndef BT_HEADERS
-
-#define RESET_CONTROL_ADDRESS WLAN_RESET_CONTROL_ADDRESS
-#define RESET_CONTROL_OFFSET WLAN_RESET_CONTROL_OFFSET
-#define RESET_CONTROL_DEBUG_UART_RST_MSB WLAN_RESET_CONTROL_DEBUG_UART_RST_MSB
-#define RESET_CONTROL_DEBUG_UART_RST_LSB WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB
-#define RESET_CONTROL_DEBUG_UART_RST_MASK WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK
-#define RESET_CONTROL_DEBUG_UART_RST_GET(x) WLAN_RESET_CONTROL_DEBUG_UART_RST_GET(x)
-#define RESET_CONTROL_DEBUG_UART_RST_SET(x) WLAN_RESET_CONTROL_DEBUG_UART_RST_SET(x)
-#define RESET_CONTROL_BB_COLD_RST_MSB WLAN_RESET_CONTROL_BB_COLD_RST_MSB
-#define RESET_CONTROL_BB_COLD_RST_LSB WLAN_RESET_CONTROL_BB_COLD_RST_LSB
-#define RESET_CONTROL_BB_COLD_RST_MASK WLAN_RESET_CONTROL_BB_COLD_RST_MASK
-#define RESET_CONTROL_BB_COLD_RST_GET(x) WLAN_RESET_CONTROL_BB_COLD_RST_GET(x)
-#define RESET_CONTROL_BB_COLD_RST_SET(x) WLAN_RESET_CONTROL_BB_COLD_RST_SET(x)
-#define RESET_CONTROL_BB_WARM_RST_MSB WLAN_RESET_CONTROL_BB_WARM_RST_MSB
-#define RESET_CONTROL_BB_WARM_RST_LSB WLAN_RESET_CONTROL_BB_WARM_RST_LSB
-#define RESET_CONTROL_BB_WARM_RST_MASK WLAN_RESET_CONTROL_BB_WARM_RST_MASK
-#define RESET_CONTROL_BB_WARM_RST_GET(x) WLAN_RESET_CONTROL_BB_WARM_RST_GET(x)
-#define RESET_CONTROL_BB_WARM_RST_SET(x) WLAN_RESET_CONTROL_BB_WARM_RST_SET(x)
-#define RESET_CONTROL_CPU_INIT_RESET_MSB WLAN_RESET_CONTROL_CPU_INIT_RESET_MSB
-#define RESET_CONTROL_CPU_INIT_RESET_LSB WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB
-#define RESET_CONTROL_CPU_INIT_RESET_MASK WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK
-#define RESET_CONTROL_CPU_INIT_RESET_GET(x) WLAN_RESET_CONTROL_CPU_INIT_RESET_GET(x)
-#define RESET_CONTROL_CPU_INIT_RESET_SET(x) WLAN_RESET_CONTROL_CPU_INIT_RESET_SET(x)
-#define RESET_CONTROL_VMC_REMAP_RESET_MSB WLAN_RESET_CONTROL_VMC_REMAP_RESET_MSB
-#define RESET_CONTROL_VMC_REMAP_RESET_LSB WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB
-#define RESET_CONTROL_VMC_REMAP_RESET_MASK WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK
-#define RESET_CONTROL_VMC_REMAP_RESET_GET(x) WLAN_RESET_CONTROL_VMC_REMAP_RESET_GET(x)
-#define RESET_CONTROL_VMC_REMAP_RESET_SET(x) WLAN_RESET_CONTROL_VMC_REMAP_RESET_SET(x)
-#define RESET_CONTROL_RST_OUT_MSB WLAN_RESET_CONTROL_RST_OUT_MSB
-#define RESET_CONTROL_RST_OUT_LSB WLAN_RESET_CONTROL_RST_OUT_LSB
-#define RESET_CONTROL_RST_OUT_MASK WLAN_RESET_CONTROL_RST_OUT_MASK
-#define RESET_CONTROL_RST_OUT_GET(x) WLAN_RESET_CONTROL_RST_OUT_GET(x)
-#define RESET_CONTROL_RST_OUT_SET(x) WLAN_RESET_CONTROL_RST_OUT_SET(x)
-#define RESET_CONTROL_COLD_RST_MSB WLAN_RESET_CONTROL_COLD_RST_MSB
-#define RESET_CONTROL_COLD_RST_LSB WLAN_RESET_CONTROL_COLD_RST_LSB
-#define RESET_CONTROL_COLD_RST_MASK WLAN_RESET_CONTROL_COLD_RST_MASK
-#define RESET_CONTROL_COLD_RST_GET(x) WLAN_RESET_CONTROL_COLD_RST_GET(x)
-#define RESET_CONTROL_COLD_RST_SET(x) WLAN_RESET_CONTROL_COLD_RST_SET(x)
-#define RESET_CONTROL_WARM_RST_MSB WLAN_RESET_CONTROL_WARM_RST_MSB
-#define RESET_CONTROL_WARM_RST_LSB WLAN_RESET_CONTROL_WARM_RST_LSB
-#define RESET_CONTROL_WARM_RST_MASK WLAN_RESET_CONTROL_WARM_RST_MASK
-#define RESET_CONTROL_WARM_RST_GET(x) WLAN_RESET_CONTROL_WARM_RST_GET(x)
-#define RESET_CONTROL_WARM_RST_SET(x) WLAN_RESET_CONTROL_WARM_RST_SET(x)
-#define RESET_CONTROL_CPU_WARM_RST_MSB WLAN_RESET_CONTROL_CPU_WARM_RST_MSB
-#define RESET_CONTROL_CPU_WARM_RST_LSB WLAN_RESET_CONTROL_CPU_WARM_RST_LSB
-#define RESET_CONTROL_CPU_WARM_RST_MASK WLAN_RESET_CONTROL_CPU_WARM_RST_MASK
-#define RESET_CONTROL_CPU_WARM_RST_GET(x) WLAN_RESET_CONTROL_CPU_WARM_RST_GET(x)
-#define RESET_CONTROL_CPU_WARM_RST_SET(x) WLAN_RESET_CONTROL_CPU_WARM_RST_SET(x)
-#define RESET_CONTROL_MAC_COLD_RST_MSB WLAN_RESET_CONTROL_MAC_COLD_RST_MSB
-#define RESET_CONTROL_MAC_COLD_RST_LSB WLAN_RESET_CONTROL_MAC_COLD_RST_LSB
-#define RESET_CONTROL_MAC_COLD_RST_MASK WLAN_RESET_CONTROL_MAC_COLD_RST_MASK
-#define RESET_CONTROL_MAC_COLD_RST_GET(x) WLAN_RESET_CONTROL_MAC_COLD_RST_GET(x)
-#define RESET_CONTROL_MAC_COLD_RST_SET(x) WLAN_RESET_CONTROL_MAC_COLD_RST_SET(x)
-#define RESET_CONTROL_MAC_WARM_RST_MSB WLAN_RESET_CONTROL_MAC_WARM_RST_MSB
-#define RESET_CONTROL_MAC_WARM_RST_LSB WLAN_RESET_CONTROL_MAC_WARM_RST_LSB
-#define RESET_CONTROL_MAC_WARM_RST_MASK WLAN_RESET_CONTROL_MAC_WARM_RST_MASK
-#define RESET_CONTROL_MAC_WARM_RST_GET(x) WLAN_RESET_CONTROL_MAC_WARM_RST_GET(x)
-#define RESET_CONTROL_MAC_WARM_RST_SET(x) WLAN_RESET_CONTROL_MAC_WARM_RST_SET(x)
-#define RESET_CONTROL_MBOX_RST_MSB WLAN_RESET_CONTROL_MBOX_RST_MSB
-#define RESET_CONTROL_MBOX_RST_LSB WLAN_RESET_CONTROL_MBOX_RST_LSB
-#define RESET_CONTROL_MBOX_RST_MASK WLAN_RESET_CONTROL_MBOX_RST_MASK
-#define RESET_CONTROL_MBOX_RST_GET(x) WLAN_RESET_CONTROL_MBOX_RST_GET(x)
-#define RESET_CONTROL_MBOX_RST_SET(x) WLAN_RESET_CONTROL_MBOX_RST_SET(x)
-#define RESET_CONTROL_UART_RST_MSB WLAN_RESET_CONTROL_UART_RST_MSB
-#define RESET_CONTROL_UART_RST_LSB WLAN_RESET_CONTROL_UART_RST_LSB
-#define RESET_CONTROL_UART_RST_MASK WLAN_RESET_CONTROL_UART_RST_MASK
-#define RESET_CONTROL_UART_RST_GET(x) WLAN_RESET_CONTROL_UART_RST_GET(x)
-#define RESET_CONTROL_UART_RST_SET(x) WLAN_RESET_CONTROL_UART_RST_SET(x)
-#define RESET_CONTROL_SI0_RST_MSB WLAN_RESET_CONTROL_SI0_RST_MSB
-#define RESET_CONTROL_SI0_RST_LSB WLAN_RESET_CONTROL_SI0_RST_LSB
-#define RESET_CONTROL_SI0_RST_MASK WLAN_RESET_CONTROL_SI0_RST_MASK
-#define RESET_CONTROL_SI0_RST_GET(x) WLAN_RESET_CONTROL_SI0_RST_GET(x)
-#define RESET_CONTROL_SI0_RST_SET(x) WLAN_RESET_CONTROL_SI0_RST_SET(x)
-#define CPU_CLOCK_ADDRESS WLAN_CPU_CLOCK_ADDRESS
-#define CPU_CLOCK_OFFSET WLAN_CPU_CLOCK_OFFSET
-#define CPU_CLOCK_STANDARD_MSB WLAN_CPU_CLOCK_STANDARD_MSB
-#define CPU_CLOCK_STANDARD_LSB WLAN_CPU_CLOCK_STANDARD_LSB
-#define CPU_CLOCK_STANDARD_MASK WLAN_CPU_CLOCK_STANDARD_MASK
-#define CPU_CLOCK_STANDARD_GET(x) WLAN_CPU_CLOCK_STANDARD_GET(x)
-#define CPU_CLOCK_STANDARD_SET(x) WLAN_CPU_CLOCK_STANDARD_SET(x)
-#define CLOCK_OUT_ADDRESS WLAN_CLOCK_OUT_ADDRESS
-#define CLOCK_OUT_OFFSET WLAN_CLOCK_OUT_OFFSET
-#define CLOCK_OUT_SELECT_MSB WLAN_CLOCK_OUT_SELECT_MSB
-#define CLOCK_OUT_SELECT_LSB WLAN_CLOCK_OUT_SELECT_LSB
-#define CLOCK_OUT_SELECT_MASK WLAN_CLOCK_OUT_SELECT_MASK
-#define CLOCK_OUT_SELECT_GET(x) WLAN_CLOCK_OUT_SELECT_GET(x)
-#define CLOCK_OUT_SELECT_SET(x) WLAN_CLOCK_OUT_SELECT_SET(x)
-#define CLOCK_CONTROL_ADDRESS WLAN_CLOCK_CONTROL_ADDRESS
-#define CLOCK_CONTROL_OFFSET WLAN_CLOCK_CONTROL_OFFSET
-#define CLOCK_CONTROL_LF_CLK32_MSB WLAN_CLOCK_CONTROL_LF_CLK32_MSB
-#define CLOCK_CONTROL_LF_CLK32_LSB WLAN_CLOCK_CONTROL_LF_CLK32_LSB
-#define CLOCK_CONTROL_LF_CLK32_MASK WLAN_CLOCK_CONTROL_LF_CLK32_MASK
-#define CLOCK_CONTROL_LF_CLK32_GET(x) WLAN_CLOCK_CONTROL_LF_CLK32_GET(x)
-#define CLOCK_CONTROL_LF_CLK32_SET(x) WLAN_CLOCK_CONTROL_LF_CLK32_SET(x)
-#define CLOCK_CONTROL_SI0_CLK_MSB WLAN_CLOCK_CONTROL_SI0_CLK_MSB
-#define CLOCK_CONTROL_SI0_CLK_LSB WLAN_CLOCK_CONTROL_SI0_CLK_LSB
-#define CLOCK_CONTROL_SI0_CLK_MASK WLAN_CLOCK_CONTROL_SI0_CLK_MASK
-#define CLOCK_CONTROL_SI0_CLK_GET(x) WLAN_CLOCK_CONTROL_SI0_CLK_GET(x)
-#define CLOCK_CONTROL_SI0_CLK_SET(x) WLAN_CLOCK_CONTROL_SI0_CLK_SET(x)
-#define RESET_CAUSE_ADDRESS WLAN_RESET_CAUSE_ADDRESS
-#define RESET_CAUSE_OFFSET WLAN_RESET_CAUSE_OFFSET
-#define RESET_CAUSE_LAST_MSB WLAN_RESET_CAUSE_LAST_MSB
-#define RESET_CAUSE_LAST_LSB WLAN_RESET_CAUSE_LAST_LSB
-#define RESET_CAUSE_LAST_MASK WLAN_RESET_CAUSE_LAST_MASK
-#define RESET_CAUSE_LAST_GET(x) WLAN_RESET_CAUSE_LAST_GET(x)
-#define RESET_CAUSE_LAST_SET(x) WLAN_RESET_CAUSE_LAST_SET(x)
-#define SYSTEM_SLEEP_ADDRESS WLAN_SYSTEM_SLEEP_ADDRESS
-#define SYSTEM_SLEEP_OFFSET WLAN_SYSTEM_SLEEP_OFFSET
-#define SYSTEM_SLEEP_HOST_IF_MSB WLAN_SYSTEM_SLEEP_HOST_IF_MSB
-#define SYSTEM_SLEEP_HOST_IF_LSB WLAN_SYSTEM_SLEEP_HOST_IF_LSB
-#define SYSTEM_SLEEP_HOST_IF_MASK WLAN_SYSTEM_SLEEP_HOST_IF_MASK
-#define SYSTEM_SLEEP_HOST_IF_GET(x) WLAN_SYSTEM_SLEEP_HOST_IF_GET(x)
-#define SYSTEM_SLEEP_HOST_IF_SET(x) WLAN_SYSTEM_SLEEP_HOST_IF_SET(x)
-#define SYSTEM_SLEEP_MBOX_MSB WLAN_SYSTEM_SLEEP_MBOX_MSB
-#define SYSTEM_SLEEP_MBOX_LSB WLAN_SYSTEM_SLEEP_MBOX_LSB
-#define SYSTEM_SLEEP_MBOX_MASK WLAN_SYSTEM_SLEEP_MBOX_MASK
-#define SYSTEM_SLEEP_MBOX_GET(x) WLAN_SYSTEM_SLEEP_MBOX_GET(x)
-#define SYSTEM_SLEEP_MBOX_SET(x) WLAN_SYSTEM_SLEEP_MBOX_SET(x)
-#define SYSTEM_SLEEP_MAC_IF_MSB WLAN_SYSTEM_SLEEP_MAC_IF_MSB
-#define SYSTEM_SLEEP_MAC_IF_LSB WLAN_SYSTEM_SLEEP_MAC_IF_LSB
-#define SYSTEM_SLEEP_MAC_IF_MASK WLAN_SYSTEM_SLEEP_MAC_IF_MASK
-#define SYSTEM_SLEEP_MAC_IF_GET(x) WLAN_SYSTEM_SLEEP_MAC_IF_GET(x)
-#define SYSTEM_SLEEP_MAC_IF_SET(x) WLAN_SYSTEM_SLEEP_MAC_IF_SET(x)
-#define SYSTEM_SLEEP_LIGHT_MSB WLAN_SYSTEM_SLEEP_LIGHT_MSB
-#define SYSTEM_SLEEP_LIGHT_LSB WLAN_SYSTEM_SLEEP_LIGHT_LSB
-#define SYSTEM_SLEEP_LIGHT_MASK WLAN_SYSTEM_SLEEP_LIGHT_MASK
-#define SYSTEM_SLEEP_LIGHT_GET(x) WLAN_SYSTEM_SLEEP_LIGHT_GET(x)
-#define SYSTEM_SLEEP_LIGHT_SET(x) WLAN_SYSTEM_SLEEP_LIGHT_SET(x)
-#define SYSTEM_SLEEP_DISABLE_MSB WLAN_SYSTEM_SLEEP_DISABLE_MSB
-#define SYSTEM_SLEEP_DISABLE_LSB WLAN_SYSTEM_SLEEP_DISABLE_LSB
-#define SYSTEM_SLEEP_DISABLE_MASK WLAN_SYSTEM_SLEEP_DISABLE_MASK
-#define SYSTEM_SLEEP_DISABLE_GET(x) WLAN_SYSTEM_SLEEP_DISABLE_GET(x)
-#define SYSTEM_SLEEP_DISABLE_SET(x) WLAN_SYSTEM_SLEEP_DISABLE_SET(x)
-#define LPO_INIT_DIVIDEND_INT_ADDRESS WLAN_LPO_INIT_DIVIDEND_INT_ADDRESS
-#define LPO_INIT_DIVIDEND_INT_OFFSET WLAN_LPO_INIT_DIVIDEND_INT_OFFSET
-#define LPO_INIT_DIVIDEND_INT_VALUE_MSB WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MSB
-#define LPO_INIT_DIVIDEND_INT_VALUE_LSB WLAN_LPO_INIT_DIVIDEND_INT_VALUE_LSB
-#define LPO_INIT_DIVIDEND_INT_VALUE_MASK WLAN_LPO_INIT_DIVIDEND_INT_VALUE_MASK
-#define LPO_INIT_DIVIDEND_INT_VALUE_GET(x) WLAN_LPO_INIT_DIVIDEND_INT_VALUE_GET(x)
-#define LPO_INIT_DIVIDEND_INT_VALUE_SET(x) WLAN_LPO_INIT_DIVIDEND_INT_VALUE_SET(x)
-#define LPO_INIT_DIVIDEND_FRACTION_ADDRESS WLAN_LPO_INIT_DIVIDEND_FRACTION_ADDRESS
-#define LPO_INIT_DIVIDEND_FRACTION_OFFSET WLAN_LPO_INIT_DIVIDEND_FRACTION_OFFSET
-#define LPO_INIT_DIVIDEND_FRACTION_VALUE_MSB WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MSB
-#define LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_LSB
-#define LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_MASK
-#define LPO_INIT_DIVIDEND_FRACTION_VALUE_GET(x) WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_GET(x)
-#define LPO_INIT_DIVIDEND_FRACTION_VALUE_SET(x) WLAN_LPO_INIT_DIVIDEND_FRACTION_VALUE_SET(x)
-#define LPO_CAL_ADDRESS WLAN_LPO_CAL_ADDRESS
-#define LPO_CAL_OFFSET WLAN_LPO_CAL_OFFSET
-#define LPO_CAL_ENABLE_MSB WLAN_LPO_CAL_ENABLE_MSB
-#define LPO_CAL_ENABLE_LSB WLAN_LPO_CAL_ENABLE_LSB
-#define LPO_CAL_ENABLE_MASK WLAN_LPO_CAL_ENABLE_MASK
-#define LPO_CAL_ENABLE_GET(x) WLAN_LPO_CAL_ENABLE_GET(x)
-#define LPO_CAL_ENABLE_SET(x) WLAN_LPO_CAL_ENABLE_SET(x)
-#define LPO_CAL_COUNT_MSB WLAN_LPO_CAL_COUNT_MSB
-#define LPO_CAL_COUNT_LSB WLAN_LPO_CAL_COUNT_LSB
-#define LPO_CAL_COUNT_MASK WLAN_LPO_CAL_COUNT_MASK
-#define LPO_CAL_COUNT_GET(x) WLAN_LPO_CAL_COUNT_GET(x)
-#define LPO_CAL_COUNT_SET(x) WLAN_LPO_CAL_COUNT_SET(x)
-
-#endif
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h
deleted file mode 100644
index 5c048ff51b0..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/rtc_wlan_reg.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#ifndef _RTC_WLAN_REG_REG_H_
-#define _RTC_WLAN_REG_REG_H_
-
-#define WLAN_RESET_CONTROL_ADDRESS 0x00000000
-#define WLAN_RESET_CONTROL_OFFSET 0x00000000
-#define WLAN_RESET_CONTROL_DEBUG_UART_RST_MSB 14
-#define WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB 14
-#define WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK 0x00004000
-#define WLAN_RESET_CONTROL_DEBUG_UART_RST_GET(x) (((x) & WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK) >> WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB)
-#define WLAN_RESET_CONTROL_DEBUG_UART_RST_SET(x) (((x) << WLAN_RESET_CONTROL_DEBUG_UART_RST_LSB) & WLAN_RESET_CONTROL_DEBUG_UART_RST_MASK)
-#define WLAN_RESET_CONTROL_BB_COLD_RST_MSB 13
-#define WLAN_RESET_CONTROL_BB_COLD_RST_LSB 13
-#define WLAN_RESET_CONTROL_BB_COLD_RST_MASK 0x00002000
-#define WLAN_RESET_CONTROL_BB_COLD_RST_GET(x) (((x) & WLAN_RESET_CONTROL_BB_COLD_RST_MASK) >> WLAN_RESET_CONTROL_BB_COLD_RST_LSB)
-#define WLAN_RESET_CONTROL_BB_COLD_RST_SET(x) (((x) << WLAN_RESET_CONTROL_BB_COLD_RST_LSB) & WLAN_RESET_CONTROL_BB_COLD_RST_MASK)
-#define WLAN_RESET_CONTROL_BB_WARM_RST_MSB 12
-#define WLAN_RESET_CONTROL_BB_WARM_RST_LSB 12
-#define WLAN_RESET_CONTROL_BB_WARM_RST_MASK 0x00001000
-#define WLAN_RESET_CONTROL_BB_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_BB_WARM_RST_MASK) >> WLAN_RESET_CONTROL_BB_WARM_RST_LSB)
-#define WLAN_RESET_CONTROL_BB_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_BB_WARM_RST_LSB) & WLAN_RESET_CONTROL_BB_WARM_RST_MASK)
-#define WLAN_RESET_CONTROL_CPU_INIT_RESET_MSB 11
-#define WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB 11
-#define WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK 0x00000800
-#define WLAN_RESET_CONTROL_CPU_INIT_RESET_GET(x) (((x) & WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK) >> WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB)
-#define WLAN_RESET_CONTROL_CPU_INIT_RESET_SET(x) (((x) << WLAN_RESET_CONTROL_CPU_INIT_RESET_LSB) & WLAN_RESET_CONTROL_CPU_INIT_RESET_MASK)
-#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_MSB 10
-#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB 10
-#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK 0x00000400
-#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_GET(x) (((x) & WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK) >> WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB)
-#define WLAN_RESET_CONTROL_VMC_REMAP_RESET_SET(x) (((x) << WLAN_RESET_CONTROL_VMC_REMAP_RESET_LSB) & WLAN_RESET_CONTROL_VMC_REMAP_RESET_MASK)
-#define WLAN_RESET_CONTROL_RST_OUT_MSB 9
-#define WLAN_RESET_CONTROL_RST_OUT_LSB 9
-#define WLAN_RESET_CONTROL_RST_OUT_MASK 0x00000200
-#define WLAN_RESET_CONTROL_RST_OUT_GET(x) (((x) & WLAN_RESET_CONTROL_RST_OUT_MASK) >> WLAN_RESET_CONTROL_RST_OUT_LSB)
-#define WLAN_RESET_CONTROL_RST_OUT_SET(x) (((x) << WLAN_RESET_CONTROL_RST_OUT_LSB) & WLAN_RESET_CONTROL_RST_OUT_MASK)
-#define WLAN_RESET_CONTROL_COLD_RST_MSB 8
-#define WLAN_RESET_CONTROL_COLD_RST_LSB 8
-#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000100
-#define WLAN_RESET_CONTROL_COLD_RST_GET(x) (((x) & WLAN_RESET_CONTROL_COLD_RST_MASK) >> WLAN_RESET_CONTROL_COLD_RST_LSB)
-#define WLAN_RESET_CONTROL_COLD_RST_SET(x) (((x) << WLAN_RESET_CONTROL_COLD_RST_LSB) & WLAN_RESET_CONTROL_COLD_RST_MASK)
-#define WLAN_RESET_CONTROL_WARM_RST_MSB 7
-#define WLAN_RESET_CONTROL_WARM_RST_LSB 7
-#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000080
-#define WLAN_RESET_CONTROL_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_WARM_RST_MASK) >> WLAN_RESET_CONTROL_WARM_RST_LSB)
-#define WLAN_RESET_CONTROL_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_WARM_RST_LSB) & WLAN_RESET_CONTROL_WARM_RST_MASK)
-#define WLAN_RESET_CONTROL_CPU_WARM_RST_MSB 6
-#define WLAN_RESET_CONTROL_CPU_WARM_RST_LSB 6
-#define WLAN_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
-#define WLAN_RESET_CONTROL_CPU_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_CPU_WARM_RST_MASK) >> WLAN_RESET_CONTROL_CPU_WARM_RST_LSB)
-#define WLAN_RESET_CONTROL_CPU_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_CPU_WARM_RST_LSB) & WLAN_RESET_CONTROL_CPU_WARM_RST_MASK)
-#define WLAN_RESET_CONTROL_MAC_COLD_RST_MSB 5
-#define WLAN_RESET_CONTROL_MAC_COLD_RST_LSB 5
-#define WLAN_RESET_CONTROL_MAC_COLD_RST_MASK 0x00000020
-#define WLAN_RESET_CONTROL_MAC_COLD_RST_GET(x) (((x) & WLAN_RESET_CONTROL_MAC_COLD_RST_MASK) >> WLAN_RESET_CONTROL_MAC_COLD_RST_LSB)
-#define WLAN_RESET_CONTROL_MAC_COLD_RST_SET(x) (((x) << WLAN_RESET_CONTROL_MAC_COLD_RST_LSB) & WLAN_RESET_CONTROL_MAC_COLD_RST_MASK)
-#define WLAN_RESET_CONTROL_MAC_WARM_RST_MSB 4
-#define WLAN_RESET_CONTROL_MAC_WARM_RST_LSB 4
-#define WLAN_RESET_CONTROL_MAC_WARM_RST_MASK 0x00000010
-#define WLAN_RESET_CONTROL_MAC_WARM_RST_GET(x) (((x) & WLAN_RESET_CONTROL_MAC_WARM_RST_MASK) >> WLAN_RESET_CONTROL_MAC_WARM_RST_LSB)
-#define WLAN_RESET_CONTROL_MAC_WARM_RST_SET(x) (((x) << WLAN_RESET_CONTROL_MAC_WARM_RST_LSB) & WLAN_RESET_CONTROL_MAC_WARM_RST_MASK)
-#define WLAN_RESET_CONTROL_MBOX_RST_MSB 2
-#define WLAN_RESET_CONTROL_MBOX_RST_LSB 2
-#define WLAN_RESET_CONTROL_MBOX_RST_MASK 0x00000004
-#define WLAN_RESET_CONTROL_MBOX_RST_GET(x) (((x) & WLAN_RESET_CONTROL_MBOX_RST_MASK) >> WLAN_RESET_CONTROL_MBOX_RST_LSB)
-#define WLAN_RESET_CONTROL_MBOX_RST_SET(x) (((x) << WLAN_RESET_CONTROL_MBOX_RST_LSB) & WLAN_RESET_CONTROL_MBOX_RST_MASK)
-#define WLAN_RESET_CONTROL_UART_RST_MSB 1
-#define WLAN_RESET_CONTROL_UART_RST_LSB 1
-#define WLAN_RESET_CONTROL_UART_RST_MASK 0x00000002
-#define WLAN_RESET_CONTROL_UART_RST_GET(x) (((x) & WLAN_RESET_CONTROL_UART_RST_MASK) >> WLAN_RESET_CONTROL_UART_RST_LSB)
-#define WLAN_RESET_CONTROL_UART_RST_SET(x) (((x) << WLAN_RESET_CONTROL_UART_RST_LSB) & WLAN_RESET_CONTROL_UART_RST_MASK)
-#define WLAN_RESET_CONTROL_SI0_RST_MSB 0
-#define WLAN_RESET_CONTROL_SI0_RST_LSB 0
-#define WLAN_RESET_CONTROL_SI0_RST_MASK 0x00000001
-#define WLAN_RESET_CONTROL_SI0_RST_GET(x) (((x) & WLAN_RESET_CONTROL_SI0_RST_MASK) >> WLAN_RESET_CONTROL_SI0_RST_LSB)
-#define WLAN_RESET_CONTROL_SI0_RST_SET(x) (((x) << WLAN_RESET_CONTROL_SI0_RST_LSB) & WLAN_RESET_CONTROL_SI0_RST_MASK)
-
-#define WLAN_CPU_CLOCK_ADDRESS 0x00000020
-#define WLAN_CPU_CLOCK_OFFSET 0x00000020
-#define WLAN_CPU_CLOCK_STANDARD_MSB 1
-#define WLAN_CPU_CLOCK_STANDARD_LSB 0
-#define WLAN_CPU_CLOCK_STANDARD_MASK 0x00000003
-#define WLAN_CPU_CLOCK_STANDARD_GET(x) (((x) & WLAN_CPU_CLOCK_STANDARD_MASK) >> WLAN_CPU_CLOCK_STANDARD_LSB)
-#define WLAN_CPU_CLOCK_STANDARD_SET(x) (((x) << WLAN_CPU_CLOCK_STANDARD_LSB) & WLAN_CPU_CLOCK_STANDARD_MASK)
-
-#define WLAN_CLOCK_CONTROL_ADDRESS 0x00000028
-#define WLAN_CLOCK_CONTROL_OFFSET 0x00000028
-#define WLAN_CLOCK_CONTROL_LF_CLK32_MSB 2
-#define WLAN_CLOCK_CONTROL_LF_CLK32_LSB 2
-#define WLAN_CLOCK_CONTROL_LF_CLK32_MASK 0x00000004
-#define WLAN_CLOCK_CONTROL_LF_CLK32_GET(x) (((x) & WLAN_CLOCK_CONTROL_LF_CLK32_MASK) >> WLAN_CLOCK_CONTROL_LF_CLK32_LSB)
-#define WLAN_CLOCK_CONTROL_LF_CLK32_SET(x) (((x) << WLAN_CLOCK_CONTROL_LF_CLK32_LSB) & WLAN_CLOCK_CONTROL_LF_CLK32_MASK)
-#define WLAN_CLOCK_CONTROL_SI0_CLK_MSB 0
-#define WLAN_CLOCK_CONTROL_SI0_CLK_LSB 0
-#define WLAN_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
-#define WLAN_CLOCK_CONTROL_SI0_CLK_GET(x) (((x) & WLAN_CLOCK_CONTROL_SI0_CLK_MASK) >> WLAN_CLOCK_CONTROL_SI0_CLK_LSB)
-#define WLAN_CLOCK_CONTROL_SI0_CLK_SET(x) (((x) << WLAN_CLOCK_CONTROL_SI0_CLK_LSB) & WLAN_CLOCK_CONTROL_SI0_CLK_MASK)
-
-#define WLAN_SYSTEM_SLEEP_ADDRESS 0x000000c4
-#define WLAN_SYSTEM_SLEEP_OFFSET 0x000000c4
-#define WLAN_SYSTEM_SLEEP_HOST_IF_MSB 4
-#define WLAN_SYSTEM_SLEEP_HOST_IF_LSB 4
-#define WLAN_SYSTEM_SLEEP_HOST_IF_MASK 0x00000010
-#define WLAN_SYSTEM_SLEEP_HOST_IF_GET(x) (((x) & WLAN_SYSTEM_SLEEP_HOST_IF_MASK) >> WLAN_SYSTEM_SLEEP_HOST_IF_LSB)
-#define WLAN_SYSTEM_SLEEP_HOST_IF_SET(x) (((x) << WLAN_SYSTEM_SLEEP_HOST_IF_LSB) & WLAN_SYSTEM_SLEEP_HOST_IF_MASK)
-#define WLAN_SYSTEM_SLEEP_MBOX_MSB 3
-#define WLAN_SYSTEM_SLEEP_MBOX_LSB 3
-#define WLAN_SYSTEM_SLEEP_MBOX_MASK 0x00000008
-#define WLAN_SYSTEM_SLEEP_MBOX_GET(x) (((x) & WLAN_SYSTEM_SLEEP_MBOX_MASK) >> WLAN_SYSTEM_SLEEP_MBOX_LSB)
-#define WLAN_SYSTEM_SLEEP_MBOX_SET(x) (((x) << WLAN_SYSTEM_SLEEP_MBOX_LSB) & WLAN_SYSTEM_SLEEP_MBOX_MASK)
-#define WLAN_SYSTEM_SLEEP_MAC_IF_MSB 2
-#define WLAN_SYSTEM_SLEEP_MAC_IF_LSB 2
-#define WLAN_SYSTEM_SLEEP_MAC_IF_MASK 0x00000004
-#define WLAN_SYSTEM_SLEEP_MAC_IF_GET(x) (((x) & WLAN_SYSTEM_SLEEP_MAC_IF_MASK) >> WLAN_SYSTEM_SLEEP_MAC_IF_LSB)
-#define WLAN_SYSTEM_SLEEP_MAC_IF_SET(x) (((x) << WLAN_SYSTEM_SLEEP_MAC_IF_LSB) & WLAN_SYSTEM_SLEEP_MAC_IF_MASK)
-#define WLAN_SYSTEM_SLEEP_LIGHT_MSB 1
-#define WLAN_SYSTEM_SLEEP_LIGHT_LSB 1
-#define WLAN_SYSTEM_SLEEP_LIGHT_MASK 0x00000002
-#define WLAN_SYSTEM_SLEEP_LIGHT_GET(x) (((x) & WLAN_SYSTEM_SLEEP_LIGHT_MASK) >> WLAN_SYSTEM_SLEEP_LIGHT_LSB)
-#define WLAN_SYSTEM_SLEEP_LIGHT_SET(x) (((x) << WLAN_SYSTEM_SLEEP_LIGHT_LSB) & WLAN_SYSTEM_SLEEP_LIGHT_MASK)
-#define WLAN_SYSTEM_SLEEP_DISABLE_MSB 0
-#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
-#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
-#define WLAN_SYSTEM_SLEEP_DISABLE_GET(x) (((x) & WLAN_SYSTEM_SLEEP_DISABLE_MASK) >> WLAN_SYSTEM_SLEEP_DISABLE_LSB)
-#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & WLAN_SYSTEM_SLEEP_DISABLE_MASK)
-
-#define WLAN_LPO_CAL_ADDRESS 0x000000e0
-#define WLAN_LPO_CAL_OFFSET 0x000000e0
-#define WLAN_LPO_CAL_ENABLE_MSB 20
-#define WLAN_LPO_CAL_ENABLE_LSB 20
-#define WLAN_LPO_CAL_ENABLE_MASK 0x00100000
-#define WLAN_LPO_CAL_ENABLE_GET(x) (((x) & WLAN_LPO_CAL_ENABLE_MASK) >> WLAN_LPO_CAL_ENABLE_LSB)
-#define WLAN_LPO_CAL_ENABLE_SET(x) (((x) << WLAN_LPO_CAL_ENABLE_LSB) & WLAN_LPO_CAL_ENABLE_MASK)
-#define WLAN_LPO_CAL_COUNT_MSB 19
-#define WLAN_LPO_CAL_COUNT_LSB 0
-#define WLAN_LPO_CAL_COUNT_MASK 0x000fffff
-#define WLAN_LPO_CAL_COUNT_GET(x) (((x) & WLAN_LPO_CAL_COUNT_MASK) >> WLAN_LPO_CAL_COUNT_LSB)
-#define WLAN_LPO_CAL_COUNT_SET(x) (((x) << WLAN_LPO_CAL_COUNT_LSB) & WLAN_LPO_CAL_COUNT_MASK)
-
-#endif /* _RTC_WLAN_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h b/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h
deleted file mode 100644
index 302b20bc1ba..00000000000
--- a/drivers/staging/ath6kl/include/common/AR6002/hw4.0/hw/uart_reg.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// ------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-// ------------------------------------------------------------------
-//===================================================================
-// Author(s): ="Atheros"
-//===================================================================
-
-
-#ifndef _UART_REG_REG_H_
-#define _UART_REG_REG_H_
-
-#define UART_CLKDIV_ADDRESS 0x00000008
-#define UART_CLKDIV_OFFSET 0x00000008
-#define UART_CLKDIV_CLK_SCALE_MSB 23
-#define UART_CLKDIV_CLK_SCALE_LSB 16
-#define UART_CLKDIV_CLK_SCALE_MASK 0x00ff0000
-#define UART_CLKDIV_CLK_SCALE_GET(x) (((x) & UART_CLKDIV_CLK_SCALE_MASK) >> UART_CLKDIV_CLK_SCALE_LSB)
-#define UART_CLKDIV_CLK_SCALE_SET(x) (((x) << UART_CLKDIV_CLK_SCALE_LSB) & UART_CLKDIV_CLK_SCALE_MASK)
-#define UART_CLKDIV_CLK_STEP_MSB 15
-#define UART_CLKDIV_CLK_STEP_LSB 0
-#define UART_CLKDIV_CLK_STEP_MASK 0x0000ffff
-#define UART_CLKDIV_CLK_STEP_GET(x) (((x) & UART_CLKDIV_CLK_STEP_MASK) >> UART_CLKDIV_CLK_STEP_LSB)
-#define UART_CLKDIV_CLK_STEP_SET(x) (((x) << UART_CLKDIV_CLK_STEP_LSB) & UART_CLKDIV_CLK_STEP_MASK)
-
-#endif /* _UART_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/athdefs.h b/drivers/staging/ath6kl/include/common/athdefs.h
deleted file mode 100644
index 74922481e06..00000000000
--- a/drivers/staging/ath6kl/include/common/athdefs.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="athdefs.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef __ATHDEFS_H__
-#define __ATHDEFS_H__
-
-/*
- * This file contains definitions that may be used across both
- * Host and Target software. Nothing here is module-dependent
- * or platform-dependent.
- */
-
-/*
- * Generic error codes that can be used by hw, sta, ap, sim, dk
- * and any other environments.
- * Feel free to add any more non-zero codes that you need.
- */
-
-#define A_ERROR (-1) /* Generic error return */
-#define A_DEVICE_NOT_FOUND 1 /* not able to find PCI device */
-#define A_NO_MEMORY 2 /* not able to allocate memory,
- * not avail#defineable */
-#define A_MEMORY_NOT_AVAIL 3 /* memory region is not free for
- * mapping */
-#define A_NO_FREE_DESC 4 /* no free descriptors available */
-#define A_BAD_ADDRESS 5 /* address does not match descriptor */
-#define A_WIN_DRIVER_ERROR 6 /* used in NT_HW version,
- * if problem at init */
-#define A_REGS_NOT_MAPPED 7 /* registers not correctly mapped */
-#define A_EPERM 8 /* Not superuser */
-#define A_EACCES 0 /* Access denied */
-#define A_ENOENT 10 /* No such entry, search failed, etc. */
-#define A_EEXIST 11 /* The object already exists
- * (can't create) */
-#define A_EFAULT 12 /* Bad address fault */
-#define A_EBUSY 13 /* Object is busy */
-#define A_EINVAL 14 /* Invalid parameter */
-#define A_EMSGSIZE 15 /* Bad message buffer length */
-#define A_ECANCELED 16 /* Operation canceled */
-#define A_ENOTSUP 17 /* Operation not supported */
-#define A_ECOMM 18 /* Communication error on send */
-#define A_EPROTO 19 /* Protocol error */
-#define A_ENODEV 20 /* No such device */
-#define A_EDEVNOTUP 21 /* device is not UP */
-#define A_NO_RESOURCE 22 /* No resources for
- * requested operation */
-#define A_HARDWARE 23 /* Hardware failure */
-#define A_PENDING 24 /* Asynchronous routine; will send up
- * results later
- * (typically in callback) */
-#define A_EBADCHANNEL 25 /* The channel cannot be used */
-#define A_DECRYPT_ERROR 26 /* Decryption error */
-#define A_PHY_ERROR 27 /* RX PHY error */
-#define A_CONSUMED 28 /* Object was consumed */
-
-#endif /* __ATHDEFS_H__ */
diff --git a/drivers/staging/ath6kl/include/common/bmi_msg.h b/drivers/staging/ath6kl/include/common/bmi_msg.h
deleted file mode 100644
index 84e8db569a9..00000000000
--- a/drivers/staging/ath6kl/include/common/bmi_msg.h
+++ /dev/null
@@ -1,233 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef __BMI_MSG_H__
-#define __BMI_MSG_H__
-
-/*
- * Bootloader Messaging Interface (BMI)
- *
- * BMI is a very simple messaging interface used during initialization
- * to read memory, write memory, execute code, and to define an
- * application entry PC.
- *
- * It is used to download an application to AR6K, to provide
- * patches to code that is already resident on AR6K, and generally
- * to examine and modify state. The Host has an opportunity to use
- * BMI only once during bootup. Once the Host issues a BMI_DONE
- * command, this opportunity ends.
- *
- * The Host writes BMI requests to mailbox0, and reads BMI responses
- * from mailbox0. BMI requests all begin with a command
- * (see below for specific commands), and are followed by
- * command-specific data.
- *
- * Flow control:
- * The Host can only issue a command once the Target gives it a
- * "BMI Command Credit", using AR6K Counter #4. As soon as the
- * Target has completed a command, it issues another BMI Command
- * Credit (so the Host can issue the next command).
- *
- * BMI handles all required Target-side cache flushing.
- */
-
-
-/* Maximum data size used for BMI transfers */
-#define BMI_DATASZ_MAX 256
-
-/* BMI Commands */
-
-#define BMI_NO_COMMAND 0
-
-#define BMI_DONE 1
- /*
- * Semantics: Host is done using BMI
- * Request format:
- * u32 command (BMI_DONE)
- * Response format: none
- */
-
-#define BMI_READ_MEMORY 2
- /*
- * Semantics: Host reads AR6K memory
- * Request format:
- * u32 command (BMI_READ_MEMORY)
- * u32 address
- * u32 length, at most BMI_DATASZ_MAX
- * Response format:
- * u8 data[length]
- */
-
-#define BMI_WRITE_MEMORY 3
- /*
- * Semantics: Host writes AR6K memory
- * Request format:
- * u32 command (BMI_WRITE_MEMORY)
- * u32 address
- * u32 length, at most BMI_DATASZ_MAX
- * u8 data[length]
- * Response format: none
- */
-
-#define BMI_EXECUTE 4
- /*
- * Semantics: Causes AR6K to execute code
- * Request format:
- * u32 command (BMI_EXECUTE)
- * u32 address
- * u32 parameter
- * Response format:
- * u32 return value
- */
-
-#define BMI_SET_APP_START 5
- /*
- * Semantics: Set Target application starting address
- * Request format:
- * u32 command (BMI_SET_APP_START)
- * u32 address
- * Response format: none
- */
-
-#define BMI_READ_SOC_REGISTER 6
- /*
- * Semantics: Read a 32-bit Target SOC register.
- * Request format:
- * u32 command (BMI_READ_REGISTER)
- * u32 address
- * Response format:
- * u32 value
- */
-
-#define BMI_WRITE_SOC_REGISTER 7
- /*
- * Semantics: Write a 32-bit Target SOC register.
- * Request format:
- * u32 command (BMI_WRITE_REGISTER)
- * u32 address
- * u32 value
- *
- * Response format: none
- */
-
-#define BMI_GET_TARGET_ID 8
-#define BMI_GET_TARGET_INFO 8
- /*
- * Semantics: Fetch the 4-byte Target information
- * Request format:
- * u32 command (BMI_GET_TARGET_ID/INFO)
- * Response format1 (old firmware):
- * u32 TargetVersionID
- * Response format2 (newer firmware):
- * u32 TARGET_VERSION_SENTINAL
- * struct bmi_target_info;
- */
-
-PREPACK struct bmi_target_info {
- u32 target_info_byte_count; /* size of this structure */
- u32 target_ver; /* Target Version ID */
- u32 target_type; /* Target type */
-} POSTPACK;
-#define TARGET_VERSION_SENTINAL 0xffffffff
-#define TARGET_TYPE_AR6001 1
-#define TARGET_TYPE_AR6002 2
-#define TARGET_TYPE_AR6003 3
-
-
-#define BMI_ROMPATCH_INSTALL 9
- /*
- * Semantics: Install a ROM Patch.
- * Request format:
- * u32 command (BMI_ROMPATCH_INSTALL)
- * u32 Target ROM Address
- * u32 Target RAM Address or Value (depending on Target Type)
- * u32 Size, in bytes
- * u32 Activate? 1-->activate;
- * 0-->install but do not activate
- * Response format:
- * u32 PatchID
- */
-
-#define BMI_ROMPATCH_UNINSTALL 10
- /*
- * Semantics: Uninstall a previously-installed ROM Patch,
- * automatically deactivating, if necessary.
- * Request format:
- * u32 command (BMI_ROMPATCH_UNINSTALL)
- * u32 PatchID
- *
- * Response format: none
- */
-
-#define BMI_ROMPATCH_ACTIVATE 11
- /*
- * Semantics: Activate a list of previously-installed ROM Patches.
- * Request format:
- * u32 command (BMI_ROMPATCH_ACTIVATE)
- * u32 rompatch_count
- * u32 PatchID[rompatch_count]
- *
- * Response format: none
- */
-
-#define BMI_ROMPATCH_DEACTIVATE 12
- /*
- * Semantics: Deactivate a list of active ROM Patches.
- * Request format:
- * u32 command (BMI_ROMPATCH_DEACTIVATE)
- * u32 rompatch_count
- * u32 PatchID[rompatch_count]
- *
- * Response format: none
- */
-
-
-#define BMI_LZ_STREAM_START 13
- /*
- * Semantics: Begin an LZ-compressed stream of input
- * which is to be uncompressed by the Target to an
- * output buffer at address. The output buffer must
- * be sufficiently large to hold the uncompressed
- * output from the compressed input stream. This BMI
- * command should be followed by a series of 1 or more
- * BMI_LZ_DATA commands.
- * u32 command (BMI_LZ_STREAM_START)
- * u32 address
- * Note: Not supported on all versions of ROM firmware.
- */
-
-#define BMI_LZ_DATA 14
- /*
- * Semantics: Host writes AR6K memory with LZ-compressed
- * data which is uncompressed by the Target. This command
- * must be preceded by a BMI_LZ_STREAM_START command. A series
- * of BMI_LZ_DATA commands are considered part of a single
- * input stream until another BMI_LZ_STREAM_START is issued.
- * Request format:
- * u32 command (BMI_LZ_DATA)
- * u32 length (of compressed data),
- * at most BMI_DATASZ_MAX
- * u8 CompressedData[length]
- * Response format: none
- * Note: Not supported on all versions of ROM firmware.
- */
-
-#endif /* __BMI_MSG_H__ */
diff --git a/drivers/staging/ath6kl/include/common/cnxmgmt.h b/drivers/staging/ath6kl/include/common/cnxmgmt.h
deleted file mode 100644
index 7a902cb5483..00000000000
--- a/drivers/staging/ath6kl/include/common/cnxmgmt.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="cnxmgmt.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef _CNXMGMT_H_
-#define _CNXMGMT_H_
-
-typedef enum {
- CM_CONNECT_WITHOUT_SCAN = 0x0001,
- CM_CONNECT_ASSOC_POLICY_USER = 0x0002,
- CM_CONNECT_SEND_REASSOC = 0x0004,
- CM_CONNECT_WITHOUT_ROAMTABLE_UPDATE = 0x0008,
- CM_CONNECT_DO_WPA_OFFLOAD = 0x0010,
- CM_CONNECT_DO_NOT_DEAUTH = 0x0020,
-} CM_CONNECT_TYPE;
-
-#endif /* _CNXMGMT_H_ */
diff --git a/drivers/staging/ath6kl/include/common/dbglog.h b/drivers/staging/ath6kl/include/common/dbglog.h
deleted file mode 100644
index 5566e568b83..00000000000
--- a/drivers/staging/ath6kl/include/common/dbglog.h
+++ /dev/null
@@ -1,126 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="dbglog.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef _DBGLOG_H_
-#define _DBGLOG_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define DBGLOG_TIMESTAMP_OFFSET 0
-#define DBGLOG_TIMESTAMP_MASK 0x0000FFFF /* Bit 0-15. Contains bit
- 8-23 of the LF0 timer */
-#define DBGLOG_DBGID_OFFSET 16
-#define DBGLOG_DBGID_MASK 0x03FF0000 /* Bit 16-25 */
-#define DBGLOG_DBGID_NUM_MAX 256 /* Upper limit is width of mask */
-
-#define DBGLOG_MODULEID_OFFSET 26
-#define DBGLOG_MODULEID_MASK 0x3C000000 /* Bit 26-29 */
-#define DBGLOG_MODULEID_NUM_MAX 16 /* Upper limit is width of mask */
-
-/*
- * Please ensure that the definition of any new module introduced is captured
- * between the DBGLOG_MODULEID_START and DBGLOG_MODULEID_END defines. The
- * structure is required for the parser to correctly pick up the values for
- * different modules.
- */
-#define DBGLOG_MODULEID_START
-#define DBGLOG_MODULEID_INF 0
-#define DBGLOG_MODULEID_WMI 1
-#define DBGLOG_MODULEID_MISC 2
-#define DBGLOG_MODULEID_PM 3
-#define DBGLOG_MODULEID_TXRX_MGMTBUF 4
-#define DBGLOG_MODULEID_TXRX_TXBUF 5
-#define DBGLOG_MODULEID_TXRX_RXBUF 6
-#define DBGLOG_MODULEID_WOW 7
-#define DBGLOG_MODULEID_WHAL 8
-#define DBGLOG_MODULEID_DC 9
-#define DBGLOG_MODULEID_CO 10
-#define DBGLOG_MODULEID_RO 11
-#define DBGLOG_MODULEID_CM 12
-#define DBGLOG_MODULEID_MGMT 13
-#define DBGLOG_MODULEID_TMR 14
-#define DBGLOG_MODULEID_BTCOEX 15
-#define DBGLOG_MODULEID_END
-
-#define DBGLOG_NUM_ARGS_OFFSET 30
-#define DBGLOG_NUM_ARGS_MASK 0xC0000000 /* Bit 30-31 */
-#define DBGLOG_NUM_ARGS_MAX 2 /* Upper limit is width of mask */
-
-#define DBGLOG_MODULE_LOG_ENABLE_OFFSET 0
-#define DBGLOG_MODULE_LOG_ENABLE_MASK 0x0000FFFF
-
-#define DBGLOG_REPORTING_ENABLED_OFFSET 16
-#define DBGLOG_REPORTING_ENABLED_MASK 0x00010000
-
-#define DBGLOG_TIMESTAMP_RESOLUTION_OFFSET 17
-#define DBGLOG_TIMESTAMP_RESOLUTION_MASK 0x000E0000
-
-#define DBGLOG_REPORT_SIZE_OFFSET 20
-#define DBGLOG_REPORT_SIZE_MASK 0x3FF00000
-
-#define DBGLOG_LOG_BUFFER_SIZE 1500
-#define DBGLOG_DBGID_DEFINITION_LEN_MAX 90
-
-PREPACK struct dbglog_buf_s {
- struct dbglog_buf_s *next;
- u8 *buffer;
- u32 bufsize;
- u32 length;
- u32 count;
- u32 free;
-} POSTPACK;
-
-PREPACK struct dbglog_hdr_s {
- struct dbglog_buf_s *dbuf;
- u32 dropped;
-} POSTPACK;
-
-PREPACK struct dbglog_config_s {
- u32 cfgvalid; /* Mask with valid config bits */
- union {
- /* TODO: Take care of endianness */
- struct {
- u32 mmask:16; /* Mask of modules with logging on */
- u32 rep:1; /* Reporting enabled or not */
- u32 tsr:3; /* Time stamp resolution. Def: 1 ms */
- u32 size:10; /* Report size in number of messages */
- u32 reserved:2;
- } dbglog_config;
-
- u32 value;
- } u;
-} POSTPACK;
-
-#define cfgmmask u.dbglog_config.mmask
-#define cfgrep u.dbglog_config.rep
-#define cfgtsr u.dbglog_config.tsr
-#define cfgsize u.dbglog_config.size
-#define cfgvalue u.value
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _DBGLOG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/dbglog_id.h b/drivers/staging/ath6kl/include/common/dbglog_id.h
deleted file mode 100644
index 15ef829cab2..00000000000
--- a/drivers/staging/ath6kl/include/common/dbglog_id.h
+++ /dev/null
@@ -1,558 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="dbglog_id.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef _DBGLOG_ID_H_
-#define _DBGLOG_ID_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * The nomenclature for the debug identifiers is MODULE_DESCRIPTION.
- * Please ensure that the definition of any new debugid introduced is captured
- * between the <MODULE>_DBGID_DEFINITION_START and
- * <MODULE>_DBGID_DEFINITION_END defines. The structure is required for the
- * parser to correctly pick up the values for different debug identifiers.
- */
-
-/* INF debug identifier definitions */
-#define INF_DBGID_DEFINITION_START
-#define INF_ASSERTION_FAILED 1
-#define INF_TARGET_ID 2
-#define INF_DBGID_DEFINITION_END
-
-/* WMI debug identifier definitions */
-#define WMI_DBGID_DEFINITION_START
-#define WMI_CMD_RX_XTND_PKT_TOO_SHORT 1
-#define WMI_EXTENDED_CMD_NOT_HANDLED 2
-#define WMI_CMD_RX_PKT_TOO_SHORT 3
-#define WMI_CALLING_WMI_EXTENSION_FN 4
-#define WMI_CMD_NOT_HANDLED 5
-#define WMI_IN_SYNC 6
-#define WMI_TARGET_WMI_SYNC_CMD 7
-#define WMI_SET_SNR_THRESHOLD_PARAMS 8
-#define WMI_SET_RSSI_THRESHOLD_PARAMS 9
-#define WMI_SET_LQ_TRESHOLD_PARAMS 10
-#define WMI_TARGET_CREATE_PSTREAM_CMD 11
-#define WMI_WI_DTM_INUSE 12
-#define WMI_TARGET_DELETE_PSTREAM_CMD 13
-#define WMI_TARGET_IMPLICIT_DELETE_PSTREAM_CMD 14
-#define WMI_TARGET_GET_BIT_RATE_CMD 15
-#define WMI_GET_RATE_MASK_CMD_FIX_RATE_MASK_IS 16
-#define WMI_TARGET_GET_AVAILABLE_CHANNELS_CMD 17
-#define WMI_TARGET_GET_TX_PWR_CMD 18
-#define WMI_FREE_EVBUF_WMIBUF 19
-#define WMI_FREE_EVBUF_DATABUF 20
-#define WMI_FREE_EVBUF_BADFLAG 21
-#define WMI_HTC_RX_ERROR_DATA_PACKET 22
-#define WMI_HTC_RX_SYNC_PAUSING_FOR_MBOX 23
-#define WMI_INCORRECT_WMI_DATA_HDR_DROPPING_PKT 24
-#define WMI_SENDING_READY_EVENT 25
-#define WMI_SETPOWER_MDOE_TO_MAXPERF 26
-#define WMI_SETPOWER_MDOE_TO_REC 27
-#define WMI_BSSINFO_EVENT_FROM 28
-#define WMI_TARGET_GET_STATS_CMD 29
-#define WMI_SENDING_SCAN_COMPLETE_EVENT 30
-#define WMI_SENDING_RSSI_INDB_THRESHOLD_EVENT 31
-#define WMI_SENDING_RSSI_INDBM_THRESHOLD_EVENT 32
-#define WMI_SENDING_LINK_QUALITY_THRESHOLD_EVENT 33
-#define WMI_SENDING_ERROR_REPORT_EVENT 34
-#define WMI_SENDING_CAC_EVENT 35
-#define WMI_TARGET_GET_ROAM_TABLE_CMD 36
-#define WMI_TARGET_GET_ROAM_DATA_CMD 37
-#define WMI_SENDING_GPIO_INTR_EVENT 38
-#define WMI_SENDING_GPIO_ACK_EVENT 39
-#define WMI_SENDING_GPIO_DATA_EVENT 40
-#define WMI_CMD_RX 41
-#define WMI_CMD_RX_XTND 42
-#define WMI_EVENT_SEND 43
-#define WMI_EVENT_SEND_XTND 44
-#define WMI_CMD_PARAMS_DUMP_START 45
-#define WMI_CMD_PARAMS_DUMP_END 46
-#define WMI_CMD_PARAMS 47
-#define WMI_DBGID_DEFINITION_END
-
-/* MISC debug identifier definitions */
-#define MISC_DBGID_DEFINITION_START
-#define MISC_WLAN_SCHEDULER_EVENT_REGISTER_ERROR 1
-#define TLPM_INIT 2
-#define TLPM_FILTER_POWER_STATE 3
-#define TLPM_NOTIFY_NOT_IDLE 4
-#define TLPM_TIMEOUT_IDLE_HANDLER 5
-#define TLPM_TIMEOUT_WAKEUP_HANDLER 6
-#define TLPM_WAKEUP_SIGNAL_HANDLER 7
-#define TLPM_UNEXPECTED_GPIO_INTR_ERROR 8
-#define TLPM_BREAK_ON_NOT_RECEIVED_ERROR 9
-#define TLPM_BREAK_OFF_NOT_RECIVED_ERROR 10
-#define TLPM_ACK_GPIO_INTR 11
-#define TLPM_ON 12
-#define TLPM_OFF 13
-#define TLPM_WAKEUP_FROM_HOST 14
-#define TLPM_WAKEUP_FROM_BT 15
-#define TLPM_TX_BREAK_RECIVED 16
-#define TLPM_IDLE_TIMER_NOT_RUNNING 17
-#define MISC_DBGID_DEFINITION_END
-
-/* TXRX debug identifier definitions */
-#define TXRX_TXBUF_DBGID_DEFINITION_START
-#define TXRX_TXBUF_ALLOCATE_BUF 1
-#define TXRX_TXBUF_QUEUE_BUF_TO_MBOX 2
-#define TXRX_TXBUF_QUEUE_BUF_TO_TXQ 3
-#define TXRX_TXBUF_TXQ_DEPTH 4
-#define TXRX_TXBUF_IBSS_QUEUE_TO_SFQ 5
-#define TXRX_TXBUF_IBSS_QUEUE_TO_TXQ_FRM_SFQ 6
-#define TXRX_TXBUF_INITIALIZE_TIMER 7
-#define TXRX_TXBUF_ARM_TIMER 8
-#define TXRX_TXBUF_DISARM_TIMER 9
-#define TXRX_TXBUF_UNINITIALIZE_TIMER 10
-#define TXRX_TXBUF_DBGID_DEFINITION_END
-
-#define TXRX_RXBUF_DBGID_DEFINITION_START
-#define TXRX_RXBUF_ALLOCATE_BUF 1
-#define TXRX_RXBUF_QUEUE_TO_HOST 2
-#define TXRX_RXBUF_QUEUE_TO_WLAN 3
-#define TXRX_RXBUF_ZERO_LEN_BUF 4
-#define TXRX_RXBUF_QUEUE_TO_HOST_LASTBUF_IN_RXCHAIN 5
-#define TXRX_RXBUF_LASTBUF_IN_RXCHAIN_ZEROBUF 6
-#define TXRX_RXBUF_QUEUE_EMPTY_QUEUE_TO_WLAN 7
-#define TXRX_RXBUF_SEND_TO_RECV_MGMT 8
-#define TXRX_RXBUF_SEND_TO_IEEE_LAYER 9
-#define TXRX_RXBUF_REQUEUE_ERROR 10
-#define TXRX_RXBUF_DBGID_DEFINITION_END
-
-#define TXRX_MGMTBUF_DBGID_DEFINITION_START
-#define TXRX_MGMTBUF_ALLOCATE_BUF 1
-#define TXRX_MGMTBUF_ALLOCATE_SM_BUF 2
-#define TXRX_MGMTBUF_ALLOCATE_RMBUF 3
-#define TXRX_MGMTBUF_GET_BUF 4
-#define TXRX_MGMTBUF_GET_SM_BUF 5
-#define TXRX_MGMTBUF_QUEUE_BUF_TO_TXQ 6
-#define TXRX_MGMTBUF_REAPED_BUF 7
-#define TXRX_MGMTBUF_REAPED_SM_BUF 8
-#define TXRX_MGMTBUF_WAIT_FOR_TXQ_DRAIN 9
-#define TXRX_MGMTBUF_WAIT_FOR_TXQ_SFQ_DRAIN 10
-#define TXRX_MGMTBUF_ENQUEUE_INTO_DATA_SFQ 11
-#define TXRX_MGMTBUF_DEQUEUE_FROM_DATA_SFQ 12
-#define TXRX_MGMTBUF_PAUSE_DATA_TXQ 13
-#define TXRX_MGMTBUF_RESUME_DATA_TXQ 14
-#define TXRX_MGMTBUF_WAIT_FORTXQ_DRAIN_TIMEOUT 15
-#define TXRX_MGMTBUF_DRAINQ 16
-#define TXRX_MGMTBUF_INDICATE_Q_DRAINED 17
-#define TXRX_MGMTBUF_ENQUEUE_INTO_HW_SFQ 18
-#define TXRX_MGMTBUF_DEQUEUE_FROM_HW_SFQ 19
-#define TXRX_MGMTBUF_PAUSE_HW_TXQ 20
-#define TXRX_MGMTBUF_RESUME_HW_TXQ 21
-#define TXRX_MGMTBUF_TEAR_DOWN_BA 22
-#define TXRX_MGMTBUF_PROCESS_ADDBA_REQ 23
-#define TXRX_MGMTBUF_PROCESS_DELBA 24
-#define TXRX_MGMTBUF_PERFORM_BA 25
-#define TXRX_MGMTBUF_WLAN_RESET_ON_ERROR 26
-#define TXRX_MGMTBUF_DBGID_DEFINITION_END
-
-/* PM (Power Module) debug identifier definitions */
-#define PM_DBGID_DEFINITION_START
-#define PM_INIT 1
-#define PM_ENABLE 2
-#define PM_SET_STATE 3
-#define PM_SET_POWERMODE 4
-#define PM_CONN_NOTIFY 5
-#define PM_REF_COUNT_NEGATIVE 6
-#define PM_INFRA_STA_APSD_ENABLE 7
-#define PM_INFRA_STA_UPDATE_APSD_STATE 8
-#define PM_CHAN_OP_REQ 9
-#define PM_SET_MY_BEACON_POLICY 10
-#define PM_SET_ALL_BEACON_POLICY 11
-#define PM_INFRA_STA_SET_PM_PARAMS1 12
-#define PM_INFRA_STA_SET_PM_PARAMS2 13
-#define PM_ADHOC_SET_PM_CAPS_FAIL 14
-#define PM_ADHOC_UNKNOWN_IBSS_ATTRIB_ID 15
-#define PM_ADHOC_SET_PM_PARAMS 16
-#define PM_ADHOC_STATE1 18
-#define PM_ADHOC_STATE2 19
-#define PM_ADHOC_CONN_MAP 20
-#define PM_FAKE_SLEEP 21
-#define PM_AP_STATE1 22
-#define PM_AP_SET_PM_PARAMS 23
-#define PM_DBGID_DEFINITION_END
-
-/* Wake on Wireless debug identifier definitions */
-#define WOW_DBGID_DEFINITION_START
-#define WOW_INIT 1
-#define WOW_GET_CONFIG_DSET 2
-#define WOW_NO_CONFIG_DSET 3
-#define WOW_INVALID_CONFIG_DSET 4
-#define WOW_USE_DEFAULT_CONFIG 5
-#define WOW_SETUP_GPIO 6
-#define WOW_INIT_DONE 7
-#define WOW_SET_GPIO_PIN 8
-#define WOW_CLEAR_GPIO_PIN 9
-#define WOW_SET_WOW_MODE_CMD 10
-#define WOW_SET_HOST_MODE_CMD 11
-#define WOW_ADD_WOW_PATTERN_CMD 12
-#define WOW_NEW_WOW_PATTERN_AT_INDEX 13
-#define WOW_DEL_WOW_PATTERN_CMD 14
-#define WOW_LIST_CONTAINS_PATTERNS 15
-#define WOW_GET_WOW_LIST_CMD 16
-#define WOW_INVALID_FILTER_ID 17
-#define WOW_INVALID_FILTER_LISTID 18
-#define WOW_NO_VALID_FILTER_AT_ID 19
-#define WOW_NO_VALID_LIST_AT_ID 20
-#define WOW_NUM_PATTERNS_EXCEEDED 21
-#define WOW_NUM_LISTS_EXCEEDED 22
-#define WOW_GET_WOW_STATS 23
-#define WOW_CLEAR_WOW_STATS 24
-#define WOW_WAKEUP_HOST 25
-#define WOW_EVENT_WAKEUP_HOST 26
-#define WOW_EVENT_DISCARD 27
-#define WOW_PATTERN_MATCH 28
-#define WOW_PATTERN_NOT_MATCH 29
-#define WOW_PATTERN_NOT_MATCH_OFFSET 30
-#define WOW_DISABLED_HOST_ASLEEP 31
-#define WOW_ENABLED_HOST_ASLEEP_NO_PATTERNS 32
-#define WOW_ENABLED_HOST_ASLEEP_NO_MATCH_FOUND 33
-#define WOW_DBGID_DEFINITION_END
-
-/* WHAL debug identifier definitions */
-#define WHAL_DBGID_DEFINITION_START
-#define WHAL_ERROR_ANI_CONTROL 1
-#define WHAL_ERROR_CHIP_TEST1 2
-#define WHAL_ERROR_CHIP_TEST2 3
-#define WHAL_ERROR_EEPROM_CHECKSUM 4
-#define WHAL_ERROR_EEPROM_MACADDR 5
-#define WHAL_ERROR_INTERRUPT_HIU 6
-#define WHAL_ERROR_KEYCACHE_RESET 7
-#define WHAL_ERROR_KEYCACHE_SET 8
-#define WHAL_ERROR_KEYCACHE_TYPE 9
-#define WHAL_ERROR_KEYCACHE_TKIPENTRY 10
-#define WHAL_ERROR_KEYCACHE_WEPLENGTH 11
-#define WHAL_ERROR_PHY_INVALID_CHANNEL 12
-#define WHAL_ERROR_POWER_AWAKE 13
-#define WHAL_ERROR_POWER_SET 14
-#define WHAL_ERROR_RECV_STOPDMA 15
-#define WHAL_ERROR_RECV_STOPPCU 16
-#define WHAL_ERROR_RESET_CHANNF1 17
-#define WHAL_ERROR_RESET_CHANNF2 18
-#define WHAL_ERROR_RESET_PM 19
-#define WHAL_ERROR_RESET_OFFSETCAL 20
-#define WHAL_ERROR_RESET_RFGRANT 21
-#define WHAL_ERROR_RESET_RXFRAME 22
-#define WHAL_ERROR_RESET_STOPDMA 23
-#define WHAL_ERROR_RESET_RECOVER 24
-#define WHAL_ERROR_XMIT_COMPUTE 25
-#define WHAL_ERROR_XMIT_NOQUEUE 26
-#define WHAL_ERROR_XMIT_ACTIVEQUEUE 27
-#define WHAL_ERROR_XMIT_BADTYPE 28
-#define WHAL_ERROR_XMIT_STOPDMA 29
-#define WHAL_ERROR_INTERRUPT_BB_PANIC 30
-#define WHAL_ERROR_RESET_TXIQCAL 31
-#define WHAL_ERROR_PAPRD_MAXGAIN_ABOVE_WINDOW 32
-#define WHAL_DBGID_DEFINITION_END
-
-/* DC debug identifier definitions */
-#define DC_DBGID_DEFINITION_START
-#define DC_SCAN_CHAN_START 1
-#define DC_SCAN_CHAN_FINISH 2
-#define DC_BEACON_RECEIVE7 3
-#define DC_SSID_PROBE_CB 4
-#define DC_SEND_NEXT_SSID_PROBE 5
-#define DC_START_SEARCH 6
-#define DC_CANCEL_SEARCH_CB 7
-#define DC_STOP_SEARCH 8
-#define DC_END_SEARCH 9
-#define DC_MIN_CHDWELL_TIMEOUT 10
-#define DC_START_SEARCH_CANCELED 11
-#define DC_SET_POWER_MODE 12
-#define DC_INIT 13
-#define DC_SEARCH_OPPORTUNITY 14
-#define DC_RECEIVED_ANY_BEACON 15
-#define DC_RECEIVED_MY_BEACON 16
-#define DC_PROFILE_IS_ADHOC_BUT_BSS_IS_INFRA 17
-#define DC_PS_ENABLED_BUT_ATHEROS_IE_ABSENT 18
-#define DC_BSS_ADHOC_CHANNEL_NOT_ALLOWED 19
-#define DC_SET_BEACON_UPDATE 20
-#define DC_BEACON_UPDATE_COMPLETE 21
-#define DC_END_SEARCH_BEACON_UPDATE_COMP_CB 22
-#define DC_BSSINFO_EVENT_DROPPED 23
-#define DC_IEEEPS_ENABLED_BUT_ATIM_ABSENT 24
-#define DC_DBGID_DEFINITION_END
-
-/* CO debug identifier definitions */
-#define CO_DBGID_DEFINITION_START
-#define CO_INIT 1
-#define CO_ACQUIRE_LOCK 2
-#define CO_START_OP1 3
-#define CO_START_OP2 4
-#define CO_DRAIN_TX_COMPLETE_CB 5
-#define CO_CHANGE_CHANNEL_CB 6
-#define CO_RETURN_TO_HOME_CHANNEL 7
-#define CO_FINISH_OP_TIMEOUT 8
-#define CO_OP_END 9
-#define CO_CANCEL_OP 10
-#define CO_CHANGE_CHANNEL 11
-#define CO_RELEASE_LOCK 12
-#define CO_CHANGE_STATE 13
-#define CO_DBGID_DEFINITION_END
-
-/* RO debug identifier definitions */
-#define RO_DBGID_DEFINITION_START
-#define RO_REFRESH_ROAM_TABLE 1
-#define RO_UPDATE_ROAM_CANDIDATE 2
-#define RO_UPDATE_ROAM_CANDIDATE_CB 3
-#define RO_UPDATE_ROAM_CANDIDATE_FINISH 4
-#define RO_REFRESH_ROAM_TABLE_DONE 5
-#define RO_PERIODIC_SEARCH_CB 6
-#define RO_PERIODIC_SEARCH_TIMEOUT 7
-#define RO_INIT 8
-#define RO_BMISS_STATE1 9
-#define RO_BMISS_STATE2 10
-#define RO_SET_PERIODIC_SEARCH_ENABLE 11
-#define RO_SET_PERIODIC_SEARCH_DISABLE 12
-#define RO_ENABLE_SQ_THRESHOLD 13
-#define RO_DISABLE_SQ_THRESHOLD 14
-#define RO_ADD_BSS_TO_ROAM_TABLE 15
-#define RO_SET_PERIODIC_SEARCH_MODE 16
-#define RO_CONFIGURE_SQ_THRESHOLD1 17
-#define RO_CONFIGURE_SQ_THRESHOLD2 18
-#define RO_CONFIGURE_SQ_PARAMS 19
-#define RO_LOW_SIGNAL_QUALITY_EVENT 20
-#define RO_HIGH_SIGNAL_QUALITY_EVENT 21
-#define RO_REMOVE_BSS_FROM_ROAM_TABLE 22
-#define RO_UPDATE_CONNECTION_STATE_METRIC 23
-#define RO_DBGID_DEFINITION_END
-
-/* CM debug identifier definitions */
-#define CM_DBGID_DEFINITION_START
-#define CM_INITIATE_HANDOFF 1
-#define CM_INITIATE_HANDOFF_CB 2
-#define CM_CONNECT_EVENT 3
-#define CM_DISCONNECT_EVENT 4
-#define CM_INIT 5
-#define CM_HANDOFF_SOURCE 6
-#define CM_SET_HANDOFF_TRIGGERS 7
-#define CM_CONNECT_REQUEST 8
-#define CM_CONNECT_REQUEST_CB 9
-#define CM_CONTINUE_SCAN_CB 10
-#define CM_DBGID_DEFINITION_END
-
-
-/* mgmt debug identifier definitions */
-#define MGMT_DBGID_DEFINITION_START
-#define KEYMGMT_CONNECTION_INIT 1
-#define KEYMGMT_CONNECTION_COMPLETE 2
-#define KEYMGMT_CONNECTION_CLOSE 3
-#define KEYMGMT_ADD_KEY 4
-#define MLME_NEW_STATE 5
-#define MLME_CONN_INIT 6
-#define MLME_CONN_COMPLETE 7
-#define MLME_CONN_CLOSE 8
-#define MGMT_DBGID_DEFINITION_END
-
-/* TMR debug identifier definitions */
-#define TMR_DBGID_DEFINITION_START
-#define TMR_HANG_DETECTED 1
-#define TMR_WDT_TRIGGERED 2
-#define TMR_WDT_RESET 3
-#define TMR_HANDLER_ENTRY 4
-#define TMR_HANDLER_EXIT 5
-#define TMR_SAVED_START 6
-#define TMR_SAVED_END 7
-#define TMR_DBGID_DEFINITION_END
-
-/* BTCOEX debug identifier definitions */
-#define BTCOEX_DBGID_DEFINITION_START
-#define BTCOEX_STATUS_CMD 1
-#define BTCOEX_PARAMS_CMD 2
-#define BTCOEX_ANT_CONFIG 3
-#define BTCOEX_COLOCATED_BT_DEVICE 4
-#define BTCOEX_CLOSE_RANGE_SCO_ON 5
-#define BTCOEX_CLOSE_RANGE_SCO_OFF 6
-#define BTCOEX_CLOSE_RANGE_A2DP_ON 7
-#define BTCOEX_CLOSE_RANGE_A2DP_OFF 8
-#define BTCOEX_A2DP_PROTECT_ON 9
-#define BTCOEX_A2DP_PROTECT_OFF 10
-#define BTCOEX_SCO_PROTECT_ON 11
-#define BTCOEX_SCO_PROTECT_OFF 12
-#define BTCOEX_CLOSE_RANGE_DETECTOR_START 13
-#define BTCOEX_CLOSE_RANGE_DETECTOR_STOP 14
-#define BTCOEX_CLOSE_RANGE_TOGGLE 15
-#define BTCOEX_CLOSE_RANGE_TOGGLE_RSSI_LRCNT 16
-#define BTCOEX_CLOSE_RANGE_RSSI_THRESH 17
-#define BTCOEX_CLOSE_RANGE_LOW_RATE_THRESH 18
-#define BTCOEX_PTA_PRI_INTR_HANDLER 19
-#define BTCOEX_PSPOLL_QUEUED 20
-#define BTCOEX_PSPOLL_COMPLETE 21
-#define BTCOEX_DBG_PM_AWAKE 22
-#define BTCOEX_DBG_PM_SLEEP 23
-#define BTCOEX_DBG_SCO_COEX_ON 24
-#define BTCOEX_SCO_DATARECEIVE 25
-#define BTCOEX_INTR_INIT 26
-#define BTCOEX_PTA_PRI_DIFF 27
-#define BTCOEX_TIM_NOTIFICATION 28
-#define BTCOEX_SCO_WAKEUP_ON_DATA 29
-#define BTCOEX_SCO_SLEEP 30
-#define BTCOEX_SET_WEIGHTS 31
-#define BTCOEX_SCO_DATARECEIVE_LATENCY_VAL 32
-#define BTCOEX_SCO_MEASURE_TIME_DIFF 33
-#define BTCOEX_SET_EOL_VAL 34
-#define BTCOEX_OPT_DETECT_HANDLER 35
-#define BTCOEX_SCO_TOGGLE_STATE 36
-#define BTCOEX_SCO_STOMP 37
-#define BTCOEX_NULL_COMP_CALLBACK 38
-#define BTCOEX_RX_INCOMING 39
-#define BTCOEX_RX_INCOMING_CTL 40
-#define BTCOEX_RX_INCOMING_MGMT 41
-#define BTCOEX_RX_INCOMING_DATA 42
-#define BTCOEX_RTS_RECEPTION 43
-#define BTCOEX_FRAME_PRI_LOW_RATE_THRES 44
-#define BTCOEX_PM_FAKE_SLEEP 45
-#define BTCOEX_ACL_COEX_STATUS 46
-#define BTCOEX_ACL_COEX_DETECTION 47
-#define BTCOEX_A2DP_COEX_STATUS 48
-#define BTCOEX_SCO_STATUS 49
-#define BTCOEX_WAKEUP_ON_DATA 50
-#define BTCOEX_DATARECEIVE 51
-#define BTCOEX_GET_MAX_AGGR_SIZE 53
-#define BTCOEX_MAX_AGGR_AVAIL_TIME 54
-#define BTCOEX_DBG_WBTIMER_INTR 55
-#define BTCOEX_DBG_SCO_SYNC 57
-#define BTCOEX_UPLINK_QUEUED_RATE 59
-#define BTCOEX_DBG_UPLINK_ENABLE_EOL 60
-#define BTCOEX_UPLINK_FRAME_DURATION 61
-#define BTCOEX_UPLINK_SET_EOL 62
-#define BTCOEX_DBG_EOL_EXPIRED 63
-#define BTCOEX_DBG_DATA_COMPLETE 64
-#define BTCOEX_UPLINK_QUEUED_TIMESTAMP 65
-#define BTCOEX_DBG_DATA_COMPLETE_TIME 66
-#define BTCOEX_DBG_A2DP_ROLE_IS_SLAVE 67
-#define BTCOEX_DBG_A2DP_ROLE_IS_MASTER 68
-#define BTCOEX_DBG_UPLINK_SEQ_NUM 69
-#define BTCOEX_UPLINK_AGGR_SEQ 70
-#define BTCOEX_DBG_TX_COMP_SEQ_NO 71
-#define BTCOEX_DBG_MAX_AGGR_PAUSE_STATE 72
-#define BTCOEX_DBG_ACL_TRAFFIC 73
-#define BTCOEX_CURR_AGGR_PROP 74
-#define BTCOEX_DBG_SCO_GET_PER_TIME_DIFF 75
-#define BTCOEX_PSPOLL_PROCESS 76
-#define BTCOEX_RETURN_FROM_MAC 77
-#define BTCOEX_FREED_REQUEUED_CNT 78
-#define BTCOEX_DBG_TOGGLE_LOW_RATES 79
-#define BTCOEX_MAC_GOES_TO_SLEEP 80
-#define BTCOEX_DBG_A2DP_NO_SYNC 81
-#define BTCOEX_RETURN_FROM_MAC_HOLD_Q_INFO 82
-#define BTCOEX_RETURN_FROM_MAC_AC 83
-#define BTCOEX_DBG_DTIM_RECV 84
-#define BTCOEX_IS_PRE_UPDATE 86
-#define BTCOEX_ENQUEUED_BIT_MAP 87
-#define BTCOEX_TX_COMPLETE_FIRST_DESC_STATS 88
-#define BTCOEX_UPLINK_DESC 89
-#define BTCOEX_SCO_GET_PER_FIRST_FRM_TIMESTAMP 90
-#define BTCOEX_DBG_RECV_ACK 94
-#define BTCOEX_DBG_ADDBA_INDICATION 95
-#define BTCOEX_TX_COMPLETE_EOL_FAILED 96
-#define BTCOEX_DBG_A2DP_USAGE_COMPLETE 97
-#define BTCOEX_DBG_A2DP_STOMP_FOR_BCN_HANDLER 98
-#define BTCOEX_DBG_A2DP_SYNC_INTR 99
-#define BTCOEX_DBG_A2DP_STOMP_FOR_BCN_RECEPTION 100
-#define BTCOEX_FORM_AGGR_CURR_AGGR 101
-#define BTCOEX_DBG_TOGGLE_A2DP_BURST_CNT 102
-#define BTCOEX_DBG_BT_TRAFFIC 103
-#define BTCOEX_DBG_STOMP_BT_TRAFFIC 104
-#define BTCOEX_RECV_NULL 105
-#define BTCOEX_DBG_A2DP_MASTER_BT_END 106
-#define BTCOEX_DBG_A2DP_BT_START 107
-#define BTCOEX_DBG_A2DP_SLAVE_BT_END 108
-#define BTCOEX_DBG_A2DP_STOMP_BT 109
-#define BTCOEX_DBG_GO_TO_SLEEP 110
-#define BTCOEX_DBG_A2DP_PKT 111
-#define BTCOEX_DBG_A2DP_PSPOLL_DATA_RECV 112
-#define BTCOEX_DBG_A2DP_NULL 113
-#define BTCOEX_DBG_UPLINK_DATA 114
-#define BTCOEX_DBG_A2DP_STOMP_LOW_PRIO_NULL 115
-#define BTCOEX_DBG_ADD_BA_RESP_TIMEOUT 116
-#define BTCOEX_DBG_TXQ_STATE 117
-#define BTCOEX_DBG_ALLOW_SCAN 118
-#define BTCOEX_DBG_SCAN_REQUEST 119
-#define BTCOEX_A2DP_SLEEP 127
-#define BTCOEX_DBG_DATA_ACTIV_TIMEOUT 128
-#define BTCOEX_DBG_SWITCH_TO_PSPOLL_ON_MODE 129
-#define BTCOEX_DBG_SWITCH_TO_PSPOLL_OFF_MODE 130
-#define BTCOEX_DATARECEIVE_AGGR 131
-#define BTCOEX_DBG_DATA_RECV_SLEEPING_PENDING 132
-#define BTCOEX_DBG_DATARESP_TIMEOUT 133
-#define BTCOEX_BDG_BMISS 134
-#define BTCOEX_DBG_DATA_RECV_WAKEUP_TIM 135
-#define BTCOEX_DBG_SECOND_BMISS 136
-#define BTCOEX_DBG_SET_WLAN_STATE 138
-#define BTCOEX_BDG_FIRST_BMISS 139
-#define BTCOEX_DBG_A2DP_CHAN_OP 140
-#define BTCOEX_DBG_A2DP_INTR 141
-#define BTCOEX_DBG_BT_INQUIRY 142
-#define BTCOEX_DBG_BT_INQUIRY_DATA_FETCH 143
-#define BTCOEX_DBG_POST_INQUIRY_FINISH 144
-#define BTCOEX_DBG_SCO_OPT_MODE_TIMER_HANDLER 145
-#define BTCOEX_DBG_NULL_FRAME_SLEEP 146
-#define BTCOEX_DBG_NULL_FRAME_AWAKE 147
-#define BTCOEX_DBG_SET_AGGR_SIZE 152
-#define BTCOEX_DBG_TEAR_BA_TIMEOUT 153
-#define BTCOEX_DBG_MGMT_FRAME_SEQ_NO 154
-#define BTCOEX_DBG_SCO_STOMP_HIGH_PRI 155
-#define BTCOEX_DBG_COLOCATED_BT_DEV 156
-#define BTCOEX_DBG_FE_ANT_TYPE 157
-#define BTCOEX_DBG_BT_INQUIRY_CMD 158
-#define BTCOEX_DBG_SCO_CONFIG 159
-#define BTCOEX_DBG_SCO_PSPOLL_CONFIG 160
-#define BTCOEX_DBG_SCO_OPTMODE_CONFIG 161
-#define BTCOEX_DBG_A2DP_CONFIG 162
-#define BTCOEX_DBG_A2DP_PSPOLL_CONFIG 163
-#define BTCOEX_DBG_A2DP_OPTMODE_CONFIG 164
-#define BTCOEX_DBG_ACLCOEX_CONFIG 165
-#define BTCOEX_DBG_ACLCOEX_PSPOLL_CONFIG 166
-#define BTCOEX_DBG_ACLCOEX_OPTMODE_CONFIG 167
-#define BTCOEX_DBG_DEBUG_CMD 168
-#define BTCOEX_DBG_SET_BT_OPERATING_STATUS 169
-#define BTCOEX_DBG_GET_CONFIG 170
-#define BTCOEX_DBG_GET_STATS 171
-#define BTCOEX_DBG_BT_OPERATING_STATUS 172
-#define BTCOEX_DBG_PERFORM_RECONNECT 173
-#define BTCOEX_DBG_ACL_WLAN_MED 175
-#define BTCOEX_DBG_ACL_BT_MED 176
-#define BTCOEX_DBG_WLAN_CONNECT 177
-#define BTCOEX_DBG_A2DP_DUAL_START 178
-#define BTCOEX_DBG_PMAWAKE_NOTIFY 179
-#define BTCOEX_DBG_BEACON_SCAN_ENABLE 180
-#define BTCOEX_DBG_BEACON_SCAN_DISABLE 181
-#define BTCOEX_DBG_RX_NOTIFY 182
-#define BTCOEX_SCO_GET_PER_SECOND_FRM_TIMESTAMP 183
-#define BTCOEX_DBG_TXQ_DETAILS 184
-#define BTCOEX_DBG_SCO_STOMP_LOW_PRI 185
-#define BTCOEX_DBG_A2DP_FORCE_SCAN 186
-#define BTCOEX_DBG_DTIM_STOMP_COMP 187
-#define BTCOEX_ACL_PRESENCE_TIMER 188
-#define BTCOEX_DBGID_DEFINITION_END
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _DBGLOG_ID_H_ */
diff --git a/drivers/staging/ath6kl/include/common/discovery.h b/drivers/staging/ath6kl/include/common/discovery.h
deleted file mode 100644
index da1b3324506..00000000000
--- a/drivers/staging/ath6kl/include/common/discovery.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="discovery.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef _DISCOVERY_H_
-#define _DISCOVERY_H_
-
-/*
- * DC_SCAN_PRIORITY is an 8-bit bitmap of the scan priority of a channel
- */
-typedef enum {
- DEFAULT_SCPRI = 0x01,
- POPULAR_SCPRI = 0x02,
- SSIDS_SCPRI = 0x04,
- PROF_SCPRI = 0x08,
-} DC_SCAN_PRIORITY;
-
-/* The following search type construct can be used to manipulate the behavior of the search module based on different bits set */
-typedef enum {
- SCAN_RESET = 0,
- SCAN_ALL = (DEFAULT_SCPRI | POPULAR_SCPRI | \
- SSIDS_SCPRI | PROF_SCPRI),
-
- SCAN_POPULAR = (POPULAR_SCPRI | SSIDS_SCPRI | PROF_SCPRI),
- SCAN_SSIDS = (SSIDS_SCPRI | PROF_SCPRI),
- SCAN_PROF_MASK = (PROF_SCPRI),
- SCAN_MULTI_CHANNEL = 0x000100,
- SCAN_DETERMINISTIC = 0x000200,
- SCAN_PROFILE_MATCH_TERMINATED = 0x000400,
- SCAN_HOME_CHANNEL_SKIP = 0x000800,
- SCAN_CHANNEL_LIST_CONTINUE = 0x001000,
- SCAN_CURRENT_SSID_SKIP = 0x002000,
- SCAN_ACTIVE_PROBE_DISABLE = 0x004000,
- SCAN_CHANNEL_HINT_ONLY = 0x008000,
- SCAN_ACTIVE_CHANNELS_ONLY = 0x010000,
- SCAN_UNUSED1 = 0x020000, /* unused */
- SCAN_PERIODIC = 0x040000,
- SCAN_FIXED_DURATION = 0x080000,
- SCAN_AP_ASSISTED = 0x100000,
-} DC_SCAN_TYPE;
-
-typedef enum {
- BSS_REPORTING_DEFAULT = 0x0,
- EXCLUDE_NON_SCAN_RESULTS = 0x1, /* Exclude results outside of scan */
-} DC_BSS_REPORTING_POLICY;
-
-typedef enum {
- DC_IGNORE_WPAx_GROUP_CIPHER = 0x01,
- DC_PROFILE_MATCH_DONE = 0x02,
- DC_IGNORE_AAC_BEACON = 0x04,
- DC_CSA_FOLLOW_BSS = 0x08,
-} DC_PROFILE_FILTER;
-
-#define DEFAULT_DC_PROFILE_FILTER (DC_CSA_FOLLOW_BSS)
-
-#endif /* _DISCOVERY_H_ */
diff --git a/drivers/staging/ath6kl/include/common/epping_test.h b/drivers/staging/ath6kl/include/common/epping_test.h
deleted file mode 100644
index 9eb5fdfa746..00000000000
--- a/drivers/staging/ath6kl/include/common/epping_test.h
+++ /dev/null
@@ -1,111 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//
-
-/* This file contains shared definitions for the host/target endpoint ping test */
-
-#ifndef EPPING_TEST_H_
-#define EPPING_TEST_H_
-
- /* alignment to 4-bytes */
-#define EPPING_ALIGNMENT_PAD (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) - sizeof(struct htc_frame_hdr))
-
-#ifndef A_OFFSETOF
-#define A_OFFSETOF(type,field) (int)(&(((type *)NULL)->field))
-#endif
-
-#define EPPING_RSVD_FILL 0xCC
-
-#define HCI_RSVD_EXPECTED_PKT_TYPE_RECV_OFFSET 7
-
-typedef PREPACK struct {
- u8 _HCIRsvd[8]; /* reserved for HCI packet header (GMBOX) testing */
- u8 StreamEcho_h; /* stream no. to echo this packet on (filled by host) */
- u8 StreamEchoSent_t; /* stream no. packet was echoed to (filled by target)
- When echoed: StreamEchoSent_t == StreamEcho_h */
- u8 StreamRecv_t; /* stream no. that target received this packet on (filled by target) */
- u8 StreamNo_h; /* stream number to send on (filled by host) */
- u8 Magic_h[4]; /* magic number to filter for this packet on the host*/
- u8 _rsvd[6]; /* reserved fields that must be set to a "reserved" value
- since this packet maps to a 14-byte ethernet frame we want
- to make sure ethertype field is set to something unknown */
-
- u8 _pad[2]; /* padding for alignment */
- u8 TimeStamp[8]; /* timestamp of packet (host or target) */
- u32 HostContext_h; /* 4 byte host context, target echos this back */
- u32 SeqNo; /* sequence number (set by host or target) */
- u16 Cmd_h; /* ping command (filled by host) */
- u16 CmdFlags_h; /* optional flags */
- u8 CmdBuffer_h[8]; /* buffer for command (host -> target) */
- u8 CmdBuffer_t[8]; /* buffer for command (target -> host) */
- u16 DataLength; /* length of data */
- u16 DataCRC; /* 16 bit CRC of data */
- u16 HeaderCRC; /* header CRC (fields : StreamNo_h to end, minus HeaderCRC) */
-} POSTPACK EPPING_HEADER;
-
-#define EPPING_PING_MAGIC_0 0xAA
-#define EPPING_PING_MAGIC_1 0x55
-#define EPPING_PING_MAGIC_2 0xCE
-#define EPPING_PING_MAGIC_3 0xEC
-
-
-
-#define IS_EPPING_PACKET(pPkt) (((pPkt)->Magic_h[0] == EPPING_PING_MAGIC_0) && \
- ((pPkt)->Magic_h[1] == EPPING_PING_MAGIC_1) && \
- ((pPkt)->Magic_h[2] == EPPING_PING_MAGIC_2) && \
- ((pPkt)->Magic_h[3] == EPPING_PING_MAGIC_3))
-
-#define SET_EPPING_PACKET_MAGIC(pPkt) { (pPkt)->Magic_h[0] = EPPING_PING_MAGIC_0; \
- (pPkt)->Magic_h[1] = EPPING_PING_MAGIC_1; \
- (pPkt)->Magic_h[2] = EPPING_PING_MAGIC_2; \
- (pPkt)->Magic_h[3] = EPPING_PING_MAGIC_3;}
-
-#define CMD_FLAGS_DATA_CRC (1 << 0) /* DataCRC field is valid */
-#define CMD_FLAGS_DELAY_ECHO (1 << 1) /* delay the echo of the packet */
-#define CMD_FLAGS_NO_DROP (1 << 2) /* do not drop at HTC layer no matter what the stream is */
-
-#define IS_EPING_PACKET_NO_DROP(pPkt) ((pPkt)->CmdFlags_h & CMD_FLAGS_NO_DROP)
-
-#define EPPING_CMD_ECHO_PACKET 1 /* echo packet test */
-#define EPPING_CMD_RESET_RECV_CNT 2 /* reset recv count */
-#define EPPING_CMD_CAPTURE_RECV_CNT 3 /* fetch recv count, 4-byte count returned in CmdBuffer_t */
-#define EPPING_CMD_NO_ECHO 4 /* non-echo packet test (tx-only) */
-#define EPPING_CMD_CONT_RX_START 5 /* continuous RX packets, parameters are in CmdBuffer_h */
-#define EPPING_CMD_CONT_RX_STOP 6 /* stop continuous RX packet transmission */
-
- /* test command parameters may be no more than 8 bytes */
-typedef PREPACK struct {
- u16 BurstCnt; /* number of packets to burst together (for HTC 2.1 testing) */
- u16 PacketLength; /* length of packet to generate including header */
- u16 Flags; /* flags */
-
-#define EPPING_CONT_RX_DATA_CRC (1 << 0) /* Add CRC to all data */
-#define EPPING_CONT_RX_RANDOM_DATA (1 << 1) /* randomize the data pattern */
-#define EPPING_CONT_RX_RANDOM_LEN (1 << 2) /* randomize the packet lengths */
-} POSTPACK EPPING_CONT_RX_PARAMS;
-
-#define EPPING_HDR_CRC_OFFSET A_OFFSETOF(EPPING_HEADER,StreamNo_h)
-#define EPPING_HDR_BYTES_CRC (sizeof(EPPING_HEADER) - EPPING_HDR_CRC_OFFSET - (sizeof(u16)))
-
-#define HCI_TRANSPORT_STREAM_NUM 16 /* this number is higher than the define WMM AC classes so we
- can use this to distinguish packets */
-
-#endif /*EPPING_TEST_H_*/
diff --git a/drivers/staging/ath6kl/include/common/gmboxif.h b/drivers/staging/ath6kl/include/common/gmboxif.h
deleted file mode 100644
index ea11c14def4..00000000000
--- a/drivers/staging/ath6kl/include/common/gmboxif.h
+++ /dev/null
@@ -1,70 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef __GMBOXIF_H__
-#define __GMBOXIF_H__
-
-/* GMBOX interface definitions */
-
-#define AR6K_GMBOX_CREDIT_COUNTER 1 /* we use credit counter 1 to track credits */
-#define AR6K_GMBOX_CREDIT_SIZE_COUNTER 2 /* credit counter 2 is used to pass the size of each credit */
-
-
- /* HCI UART transport definitions when used over GMBOX interface */
-#define HCI_UART_COMMAND_PKT 0x01
-#define HCI_UART_ACL_PKT 0x02
-#define HCI_UART_SCO_PKT 0x03
-#define HCI_UART_EVENT_PKT 0x04
-
- /* definitions for BT HCI packets */
-typedef PREPACK struct {
- u16 Flags_ConnHandle;
- u16 Length;
-} POSTPACK BT_HCI_ACL_HEADER;
-
-typedef PREPACK struct {
- u16 Flags_ConnHandle;
- u8 Length;
-} POSTPACK BT_HCI_SCO_HEADER;
-
-typedef PREPACK struct {
- u16 OpCode;
- u8 ParamLength;
-} POSTPACK BT_HCI_COMMAND_HEADER;
-
-typedef PREPACK struct {
- u8 EventCode;
- u8 ParamLength;
-} POSTPACK BT_HCI_EVENT_HEADER;
-
-/* MBOX host interrupt signal assignments */
-
-#define MBOX_SIG_HCI_BRIDGE_MAX 8
-#define MBOX_SIG_HCI_BRIDGE_BT_ON 0
-#define MBOX_SIG_HCI_BRIDGE_BT_OFF 1
-#define MBOX_SIG_HCI_BRIDGE_BAUD_SET 2
-#define MBOX_SIG_HCI_BRIDGE_PWR_SAV_ON 3
-#define MBOX_SIG_HCI_BRIDGE_PWR_SAV_OFF 4
-
-
-#endif /* __GMBOXIF_H__ */
-
diff --git a/drivers/staging/ath6kl/include/common/gpio_reg.h b/drivers/staging/ath6kl/include/common/gpio_reg.h
deleted file mode 100644
index f9d425d48dc..00000000000
--- a/drivers/staging/ath6kl/include/common/gpio_reg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _GPIO_REG_REG_H_
-#define _GPIO_REG_REG_H_
-
-#define GPIO_PIN10_ADDRESS 0x00000050
-#define GPIO_PIN11_ADDRESS 0x00000054
-#define GPIO_PIN12_ADDRESS 0x00000058
-#define GPIO_PIN13_ADDRESS 0x0000005c
-
-#endif /* _GPIO_REG_H_ */
diff --git a/drivers/staging/ath6kl/include/common/htc.h b/drivers/staging/ath6kl/include/common/htc.h
deleted file mode 100644
index 85cbfa89d67..00000000000
--- a/drivers/staging/ath6kl/include/common/htc.h
+++ /dev/null
@@ -1,227 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef __HTC_H__
-#define __HTC_H__
-
-#define A_OFFSETOF(type,field) (unsigned long)(&(((type *)NULL)->field))
-
-#define ASSEMBLE_UNALIGNED_UINT16(p,highbyte,lowbyte) \
- (((u16)(((u8 *)(p))[(highbyte)])) << 8 | (u16)(((u8 *)(p))[(lowbyte)]))
-
-/* alignment independent macros (little-endian) to fetch UINT16s or UINT8s from a
- * structure using only the type and field name.
- * Use these macros if there is the potential for unaligned buffer accesses. */
-#define A_GET_UINT16_FIELD(p,type,field) \
- ASSEMBLE_UNALIGNED_UINT16(p,\
- A_OFFSETOF(type,field) + 1, \
- A_OFFSETOF(type,field))
-
-#define A_SET_UINT16_FIELD(p,type,field,value) \
-{ \
- ((u8 *)(p))[A_OFFSETOF(type,field)] = (u8)(value); \
- ((u8 *)(p))[A_OFFSETOF(type,field) + 1] = (u8)((value) >> 8); \
-}
-
-#define A_GET_UINT8_FIELD(p,type,field) \
- ((u8 *)(p))[A_OFFSETOF(type,field)]
-
-#define A_SET_UINT8_FIELD(p,type,field,value) \
- ((u8 *)(p))[A_OFFSETOF(type,field)] = (value)
-
-/****** DANGER DANGER ***************
- *
- * The frame header length and message formats defined herein were
- * selected to accommodate optimal alignment for target processing. This reduces code
- * size and improves performance.
- *
- * Any changes to the header length may alter the alignment and cause exceptions
- * on the target. When adding to the message structures insure that fields are
- * properly aligned.
- *
- */
-
-/* HTC frame header */
-PREPACK struct htc_frame_hdr {
- /* do not remove or re-arrange these fields, these are minimally required
- * to take advantage of 4-byte lookaheads in some hardware implementations */
- u8 EndpointID;
- u8 Flags;
- u16 PayloadLen; /* length of data (including trailer) that follows the header */
-
- /***** end of 4-byte lookahead ****/
-
- u8 ControlBytes[2];
-
- /* message payload starts after the header */
-
-} POSTPACK;
-
-/* frame header flags */
-
- /* send direction */
-#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
-#define HTC_FLAGS_SEND_BUNDLE (1 << 1) /* start or part of bundle */
- /* receive direction */
-#define HTC_FLAGS_RECV_UNUSED_0 (1 << 0) /* bit 0 unused */
-#define HTC_FLAGS_RECV_TRAILER (1 << 1) /* bit 1 trailer data present */
-#define HTC_FLAGS_RECV_UNUSED_2 (1 << 0) /* bit 2 unused */
-#define HTC_FLAGS_RECV_UNUSED_3 (1 << 0) /* bit 3 unused */
-#define HTC_FLAGS_RECV_BUNDLE_CNT_MASK (0xF0) /* bits 7..4 */
-#define HTC_FLAGS_RECV_BUNDLE_CNT_SHIFT 4
-
-#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
-#define HTC_MAX_TRAILER_LENGTH 255
-#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
-
-/* HTC control message IDs */
-
-#define HTC_MSG_READY_ID 1
-#define HTC_MSG_CONNECT_SERVICE_ID 2
-#define HTC_MSG_CONNECT_SERVICE_RESPONSE_ID 3
-#define HTC_MSG_SETUP_COMPLETE_ID 4
-#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
-
-#define HTC_MAX_CONTROL_MESSAGE_LENGTH 256
-
-/* base message ID header */
-typedef PREPACK struct {
- u16 MessageID;
-} POSTPACK HTC_UNKNOWN_MSG;
-
-/* HTC ready message
- * direction : target-to-host */
-typedef PREPACK struct {
- u16 MessageID; /* ID */
- u16 CreditCount; /* number of credits the target can offer */
- u16 CreditSize; /* size of each credit */
- u8 MaxEndpoints; /* maximum number of endpoints the target has resources for */
- u8 _Pad1;
-} POSTPACK HTC_READY_MSG;
-
- /* extended HTC ready message */
-typedef PREPACK struct {
- HTC_READY_MSG Version2_0_Info; /* legacy version 2.0 information at the front... */
- /* extended information */
- u8 HTCVersion;
- u8 MaxMsgsPerHTCBundle;
-} POSTPACK HTC_READY_EX_MSG;
-
-#define HTC_VERSION_2P0 0x00
-#define HTC_VERSION_2P1 0x01 /* HTC 2.1 */
-
-#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
-
-/* connect service
- * direction : host-to-target */
-typedef PREPACK struct {
- u16 MessageID;
- u16 ServiceID; /* service ID of the service to connect to */
- u16 ConnectionFlags; /* connection flags */
-
-#define HTC_CONNECT_FLAGS_REDUCE_CREDIT_DRIBBLE (1 << 2) /* reduce credit dribbling when
- the host needs credits */
-#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK (0x3)
-#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH 0x0
-#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_HALF 0x1
-#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS 0x2
-#define HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_UNITY 0x3
-
- u8 ServiceMetaLength; /* length of meta data that follows */
- u8 _Pad1;
-
- /* service-specific meta data starts after the header */
-
-} POSTPACK HTC_CONNECT_SERVICE_MSG;
-
-/* connect response
- * direction : target-to-host */
-typedef PREPACK struct {
- u16 MessageID;
- u16 ServiceID; /* service ID that the connection request was made */
- u8 Status; /* service connection status */
- u8 EndpointID; /* assigned endpoint ID */
- u16 MaxMsgSize; /* maximum expected message size on this endpoint */
- u8 ServiceMetaLength; /* length of meta data that follows */
- u8 _Pad1;
-
- /* service-specific meta data starts after the header */
-
-} POSTPACK HTC_CONNECT_SERVICE_RESPONSE_MSG;
-
-typedef PREPACK struct {
- u16 MessageID;
- /* currently, no other fields */
-} POSTPACK HTC_SETUP_COMPLETE_MSG;
-
- /* extended setup completion message */
-typedef PREPACK struct {
- u16 MessageID;
- u32 SetupFlags;
- u8 MaxMsgsPerBundledRecv;
- u8 Rsvd[3];
-} POSTPACK HTC_SETUP_COMPLETE_EX_MSG;
-
-#define HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV (1 << 0)
-
-/* connect response status codes */
-#define HTC_SERVICE_SUCCESS 0 /* success */
-#define HTC_SERVICE_NOT_FOUND 1 /* service could not be found */
-#define HTC_SERVICE_FAILED 2 /* specific service failed the connect */
-#define HTC_SERVICE_NO_RESOURCES 3 /* no resources (i.e. no more endpoints) */
-#define HTC_SERVICE_NO_MORE_EP 4 /* specific service is not allowing any more
- endpoints */
-
-/* report record IDs */
-
-#define HTC_RECORD_NULL 0
-#define HTC_RECORD_CREDITS 1
-#define HTC_RECORD_LOOKAHEAD 2
-#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
-
-typedef PREPACK struct {
- u8 RecordID; /* Record ID */
- u8 Length; /* Length of record */
-} POSTPACK HTC_RECORD_HDR;
-
-typedef PREPACK struct {
- u8 EndpointID; /* Endpoint that owns these credits */
- u8 Credits; /* credits to report since last report */
-} POSTPACK HTC_CREDIT_REPORT;
-
-typedef PREPACK struct {
- u8 PreValid; /* pre valid guard */
- u8 LookAhead[4]; /* 4 byte lookahead */
- u8 PostValid; /* post valid guard */
-
- /* NOTE: the LookAhead array is guarded by a PreValid and Post Valid guard bytes.
- * The PreValid bytes must equal the inverse of the PostValid byte */
-
-} POSTPACK HTC_LOOKAHEAD_REPORT;
-
-typedef PREPACK struct {
- u8 LookAhead[4]; /* 4 byte lookahead */
-} POSTPACK HTC_BUNDLED_LOOKAHEAD_REPORT;
-
-#endif /* __HTC_H__ */
-
diff --git a/drivers/staging/ath6kl/include/common/htc_services.h b/drivers/staging/ath6kl/include/common/htc_services.h
deleted file mode 100644
index fb22268a8d8..00000000000
--- a/drivers/staging/ath6kl/include/common/htc_services.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_services.h" company="Atheros">
-// Copyright (c) 2007 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef __HTC_SERVICES_H__
-#define __HTC_SERVICES_H__
-
-/* Current service IDs */
-
-typedef enum {
- RSVD_SERVICE_GROUP = 0,
- WMI_SERVICE_GROUP = 1,
-
- HTC_TEST_GROUP = 254,
- HTC_SERVICE_GROUP_LAST = 255
-}HTC_SERVICE_GROUP_IDS;
-
-#define MAKE_SERVICE_ID(group,index) \
- (int)(((int)group << 8) | (int)(index))
-
-/* NOTE: service ID of 0x0000 is reserved and should never be used */
-#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP,1)
-#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,0)
-#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,1)
-#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,2)
-#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,3)
-#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP,4)
-#define WMI_MAX_SERVICES 5
-
-/* raw stream service (i.e. flash, tcmd, calibration apps) */
-#define HTC_RAW_STREAMS_SVC MAKE_SERVICE_ID(HTC_TEST_GROUP,0)
-
-#endif /*HTC_SERVICES_H_*/
diff --git a/drivers/staging/ath6kl/include/common/pkt_log.h b/drivers/staging/ath6kl/include/common/pkt_log.h
deleted file mode 100644
index a3719adf54c..00000000000
--- a/drivers/staging/ath6kl/include/common/pkt_log.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2005-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef __PKT_LOG_H__
-#define __PKT_LOG_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/* Pkt log info */
-typedef PREPACK struct pkt_log_t {
- struct info_t {
- u16 st;
- u16 end;
- u16 cur;
- }info[4096];
- u16 last_idx;
-}POSTPACK PACKET_LOG;
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* __PKT_LOG_H__ */
diff --git a/drivers/staging/ath6kl/include/common/roaming.h b/drivers/staging/ath6kl/include/common/roaming.h
deleted file mode 100644
index 8019850a057..00000000000
--- a/drivers/staging/ath6kl/include/common/roaming.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="roaming.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef _ROAMING_H_
-#define _ROAMING_H_
-
-/*
- * The signal quality could be in terms of either snr or rssi. We should
- * have an enum for both of them. For the time being, we are going to move
- * it to wmi.h that is shared by both host and the target, since we are
- * repartitioning the code to the host
- */
-#define SIGNAL_QUALITY_NOISE_FLOOR -96
-#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
-typedef enum {
- SIGNAL_QUALITY_METRICS_SNR = 0,
- SIGNAL_QUALITY_METRICS_RSSI,
- SIGNAL_QUALITY_METRICS_ALL,
-} SIGNAL_QUALITY_METRICS_TYPE;
-
-#endif /* _ROAMING_H_ */
diff --git a/drivers/staging/ath6kl/include/common/targaddrs.h b/drivers/staging/ath6kl/include/common/targaddrs.h
deleted file mode 100644
index c866cefbd8f..00000000000
--- a/drivers/staging/ath6kl/include/common/targaddrs.h
+++ /dev/null
@@ -1,395 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef __TARGADDRS_H__
-#define __TARGADDRS_H__
-
-#if defined(AR6002)
-#include "AR6002/addrs.h"
-#endif
-
-/*
- * AR6K option bits, to enable/disable various features.
- * By default, all option bits are 0.
- * These bits can be set in LOCAL_SCRATCH register 0.
- */
-#define AR6K_OPTION_BMI_DISABLE 0x01 /* Disable BMI comm with Host */
-#define AR6K_OPTION_SERIAL_ENABLE 0x02 /* Enable serial port msgs */
-#define AR6K_OPTION_WDT_DISABLE 0x04 /* WatchDog Timer override */
-#define AR6K_OPTION_SLEEP_DISABLE 0x08 /* Disable system sleep */
-#define AR6K_OPTION_STOP_BOOT 0x10 /* Stop boot processes (for ATE) */
-#define AR6K_OPTION_ENABLE_NOANI 0x20 /* Operate without ANI */
-#define AR6K_OPTION_DSET_DISABLE 0x40 /* Ignore DataSets */
-#define AR6K_OPTION_IGNORE_FLASH 0x80 /* Ignore flash during bootup */
-
-/*
- * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
- * host_interest structure. It must match the address of the _host_interest
- * symbol (see linker script).
- *
- * Host Interest is shared between Host and Target in order to coordinate
- * between the two, and is intended to remain constant (with additions only
- * at the end) across software releases.
- *
- * All addresses are available here so that it's possible to
- * write a single binary that works with all Target Types.
- * May be used in assembler code as well as C.
- */
-#define AR6002_HOST_INTEREST_ADDRESS 0x00500400
-#define AR6003_HOST_INTEREST_ADDRESS 0x00540600
-
-
-#define HOST_INTEREST_MAX_SIZE 0x100
-
-#if !defined(__ASSEMBLER__)
-struct register_dump_s;
-struct dbglog_hdr_s;
-
-/*
- * These are items that the Host may need to access
- * via BMI or via the Diagnostic Window. The position
- * of items in this structure must remain constant
- * across firmware revisions!
- *
- * Types for each item must be fixed size across
- * target and host platforms.
- *
- * More items may be added at the end.
- */
-PREPACK struct host_interest_s {
- /*
- * Pointer to application-defined area, if any.
- * Set by Target application during startup.
- */
- u32 hi_app_host_interest; /* 0x00 */
-
- /* Pointer to register dump area, valid after Target crash. */
- u32 hi_failure_state; /* 0x04 */
-
- /* Pointer to debug logging header */
- u32 hi_dbglog_hdr; /* 0x08 */
-
- u32 hi_unused1; /* 0x0c */
-
- /*
- * General-purpose flag bits, similar to AR6000_OPTION_* flags.
- * Can be used by application rather than by OS.
- */
- u32 hi_option_flag; /* 0x10 */
-
- /*
- * Boolean that determines whether or not to
- * display messages on the serial port.
- */
- u32 hi_serial_enable; /* 0x14 */
-
- /* Start address of DataSet index, if any */
- u32 hi_dset_list_head; /* 0x18 */
-
- /* Override Target application start address */
- u32 hi_app_start; /* 0x1c */
-
- /* Clock and voltage tuning */
- u32 hi_skip_clock_init; /* 0x20 */
- u32 hi_core_clock_setting; /* 0x24 */
- u32 hi_cpu_clock_setting; /* 0x28 */
- u32 hi_system_sleep_setting; /* 0x2c */
- u32 hi_xtal_control_setting; /* 0x30 */
- u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
- u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
- u32 hi_ref_voltage_trim_setting; /* 0x3c */
- u32 hi_clock_info; /* 0x40 */
-
- /*
- * Flash configuration overrides, used only
- * when firmware is not executing from flash.
- * (When using flash, modify the global variables
- * with equivalent names.)
- */
- u32 hi_bank0_addr_value; /* 0x44 */
- u32 hi_bank0_read_value; /* 0x48 */
- u32 hi_bank0_write_value; /* 0x4c */
- u32 hi_bank0_config_value; /* 0x50 */
-
- /* Pointer to Board Data */
- u32 hi_board_data; /* 0x54 */
- u32 hi_board_data_initialized; /* 0x58 */
-
- u32 hi_dset_RAM_index_table; /* 0x5c */
-
- u32 hi_desired_baud_rate; /* 0x60 */
- u32 hi_dbglog_config; /* 0x64 */
- u32 hi_end_RAM_reserve_sz; /* 0x68 */
- u32 hi_mbox_io_block_sz; /* 0x6c */
-
- u32 hi_num_bpatch_streams; /* 0x70 -- unused */
- u32 hi_mbox_isr_yield_limit; /* 0x74 */
-
- u32 hi_refclk_hz; /* 0x78 */
- u32 hi_ext_clk_detected; /* 0x7c */
- u32 hi_dbg_uart_txpin; /* 0x80 */
- u32 hi_dbg_uart_rxpin; /* 0x84 */
- u32 hi_hci_uart_baud; /* 0x88 */
- u32 hi_hci_uart_pin_assignments; /* 0x8C */
- /* NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts pin */
- u32 hi_hci_uart_baud_scale_val; /* 0x90 */
- u32 hi_hci_uart_baud_step_val; /* 0x94 */
-
- u32 hi_allocram_start; /* 0x98 */
- u32 hi_allocram_sz; /* 0x9c */
- u32 hi_hci_bridge_flags; /* 0xa0 */
- u32 hi_hci_uart_support_pins; /* 0xa4 */
- /* NOTE: byte [0] = RESET pin (bit 7 is polarity), bytes[1]..bytes[3] are for future use */
- u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
- /*
- * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
- * [31:16]: wakeup timeout in ms
- */
-
- /* Pointer to extended board data */
- u32 hi_board_ext_data; /* 0xac */
- u32 hi_board_ext_data_config; /* 0xb0 */
-
- /*
- * Bit [0] : valid
- * Bit[31:16: size
- */
- /*
- * hi_reset_flag is used to do some stuff when target reset.
- * such as restore app_start after warm reset or
- * preserve host Interest area, or preserve ROM data, literals etc.
- */
- u32 hi_reset_flag; /* 0xb4 */
- /* indicate hi_reset_flag is valid */
- u32 hi_reset_flag_valid; /* 0xb8 */
- u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
- /*
- * 0xbc - [31:0]: idle timeout in ms
- */
- /* ACS flags */
- u32 hi_acs_flags; /* 0xc0 */
- u32 hi_console_flags; /* 0xc4 */
- u32 hi_nvram_state; /* 0xc8 */
- u32 hi_option_flag2; /* 0xcc */
-
- /* If non-zero, override values sent to Host in WMI_READY event. */
- u32 hi_sw_version_override; /* 0xd0 */
- u32 hi_abi_version_override; /* 0xd4 */
-
- /*
- * Percentage of high priority RX traffic to total expected RX traffic -
- * applicable only to ar6004
- */
- u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
-
- /* test applications flags */
- u32 hi_test_apps_related ; /* 0xdc */
- /* location of test script */
- u32 hi_ota_testscript; /* 0xe0 */
- /* location of CAL data */
- u32 hi_cal_data; /* 0xe4 */
- /* Number of packet log buffers */
- u32 hi_pktlog_num_buffers; /* 0xe8 */
-
-} POSTPACK;
-
-/* Bits defined in hi_option_flag */
-#define HI_OPTION_TIMER_WAR 0x01 /* Enable timer workaround */
-#define HI_OPTION_BMI_CRED_LIMIT 0x02 /* Limit BMI command credits */
-#define HI_OPTION_RELAY_DOT11_HDR 0x04 /* Relay Dot11 hdr to/from host */
-/* MAC addr method 0-locally administred 1-globally unique addrs */
-#define HI_OPTION_MAC_ADDR_METHOD 0x08
-#define HI_OPTION_FW_BRIDGE 0x10 /* Firmware Bridging */
-#define HI_OPTION_ENABLE_PROFILE 0x20 /* Enable CPU profiling */
-#define HI_OPTION_DISABLE_DBGLOG 0x40 /* Disable debug logging */
-#define HI_OPTION_SKIP_ERA_TRACKING 0x80 /* Skip Era Tracking */
-#define HI_OPTION_PAPRD_DISABLE 0x100 /* Disable PAPRD (debug) */
-#define HI_OPTION_NUM_DEV_LSB 0x200
-#define HI_OPTION_NUM_DEV_MSB 0x800
-#define HI_OPTION_DEV_MODE_LSB 0x1000
-#define HI_OPTION_DEV_MODE_MSB 0x8000000
-/* Disable LowFreq Timer Stabilization */
-#define HI_OPTION_NO_LFT_STBL 0x10000000
-#define HI_OPTION_SKIP_REG_SCAN 0x20000000 /* Skip regulatory scan */
-/* Do regulatory scan during init beforesending WMI ready event to host */
-#define HI_OPTION_INIT_REG_SCAN 0x40000000
-#define HI_OPTION_SKIP_MEMMAP 0x80000000 /* REV6: Do not adjust memory
- map */
-
-/* hi_option_flag2 options */
-#define HI_OPTION_OFFLOAD_AMSDU 0x01
-#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
-
-#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
-
-/* 2 bits of hi_option_flag are used to represent 3 modes */
-#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
-#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
-#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
-
-/* 2 bits of hi_option flag are usedto represent 4 submodes */
-#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
-#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
-#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
-#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
-
-/* Num dev Mask */
-#define HI_OPTION_NUM_DEV_MASK 0x7
-#define HI_OPTION_NUM_DEV_SHIFT 0x9
-
-/* firmware bridging */
-#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
-
-/* Fw Mode/SubMode Mask
-|------------------------------------------------------------------------------|
-| SUB | SUB | SUB | SUB | | | |
-| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0|
-| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
-|------------------------------------------------------------------------------|
-*/
-#define HI_OPTION_FW_MODE_BITS 0x2
-#define HI_OPTION_FW_MODE_MASK 0x3
-#define HI_OPTION_FW_MODE_SHIFT 0xC
-#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
-
-#define HI_OPTION_FW_SUBMODE_BITS 0x2
-#define HI_OPTION_FW_SUBMODE_MASK 0x3
-#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
-#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
-#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
-
-/* hi_reset_flag */
-
-/* preserve App Start address */
-#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
-/* preserve host interest */
-#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
-#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04 /* preserve ROM data */
-#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
-#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
-
-#define HI_RESET_FLAG_IS_VALID 0x12345678 /* indicate the reset flag is
-valid */
-
-#define ON_RESET_FLAGS_VALID() \
- (HOST_INTEREST->hi_reset_flag_valid == HI_RESET_FLAG_IS_VALID)
-
-#define RESET_FLAGS_VALIDATE() \
- (HOST_INTEREST->hi_reset_flag_valid = HI_RESET_FLAG_IS_VALID)
-
-#define RESET_FLAGS_INVALIDATE() \
- (HOST_INTEREST->hi_reset_flag_valid = 0)
-
-#define ON_RESET_PRESERVE_APP_START() \
- (HOST_INTEREST->hi_reset_flag & HI_RESET_FLAG_PRESERVE_APP_START)
-
-#define ON_RESET_PRESERVE_NVRAM_STATE() \
- (HOST_INTEREST->hi_reset_flag & HI_RESET_FLAG_PRESERVE_NVRAM_STATE)
-
-#define ON_RESET_PRESERVE_HOST_INTEREST() \
- (HOST_INTEREST->hi_reset_flag & HI_RESET_FLAG_PRESERVE_HOST_INTEREST)
-
-#define ON_RESET_PRESERVE_ROMDATA() \
- (HOST_INTEREST->hi_reset_flag & HI_RESET_FLAG_PRESERVE_ROMDATA)
-
-#define ON_RESET_PRESERVE_BOOT_INFO() \
- (HOST_INTEREST->hi_reset_flag & HI_RESET_FLAG_PRESERVE_BOOT_INFO)
-
-#define HI_ACS_FLAGS_ENABLED (1 << 0) /* ACS is enabled */
-#define HI_ACS_FLAGS_USE_WWAN (1 << 1) /* Use physical WWAN device */
-#define HI_ACS_FLAGS_TEST_VAP (1 << 2) /* Use test VAP */
-
-/* CONSOLE FLAGS
- *
- * Bit Range Meaning
- * --------- --------------------------------
- * 2..0 UART ID (0 = Default)
- * 3 Baud Select (0 = 9600, 1 = 115200)
- * 30..4 Reserved
- * 31 Enable Console
- *
- */
-
-#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
-#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
-#define HI_CONSOLE_FLAGS_UART_SHIFT 0
-#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
-
-/*
- * Intended for use by Host software, this macro returns the Target RAM
- * address of any item in the host_interest structure.
- * Example: target_addr = AR6002_HOST_INTEREST_ITEM_ADDRESS(hi_board_data);
- */
-#define AR6002_HOST_INTEREST_ITEM_ADDRESS(item) \
- (u32)((unsigned long)&((((struct host_interest_s *)(AR6002_HOST_INTEREST_ADDRESS))->item)))
-
-#define AR6003_HOST_INTEREST_ITEM_ADDRESS(item) \
- (u32)((unsigned long)&((((struct host_interest_s *)(AR6003_HOST_INTEREST_ADDRESS))->item)))
-
-#define AR6004_HOST_INTEREST_ITEM_ADDRESS(item) \
- ((u32)&((((struct host_interest_s *)(AR6004_HOST_INTEREST_ADDRESS))->item)))
-
-
-#define HOST_INTEREST_DBGLOG_IS_ENABLED() \
- (!(HOST_INTEREST->hi_option_flag & HI_OPTION_DISABLE_DBGLOG))
-
-#define HOST_INTEREST_PKTLOG_IS_ENABLED() \
- ((HOST_INTEREST->hi_pktlog_num_buffers))
-
-
-#define HOST_INTEREST_PROFILE_IS_ENABLED() \
- (HOST_INTEREST->hi_option_flag & HI_OPTION_ENABLE_PROFILE)
-
-#define LF_TIMER_STABILIZATION_IS_ENABLED() \
- (!(HOST_INTEREST->hi_option_flag & HI_OPTION_NO_LFT_STBL))
-
-#define IS_AMSDU_OFFLAOD_ENABLED() \
- ((HOST_INTEREST->hi_option_flag2 & HI_OPTION_OFFLOAD_AMSDU))
-
-#define HOST_INTEREST_DFS_IS_ENABLED() \
- ((HOST_INTEREST->hi_option_flag2 & HI_OPTION_DFS_SUPPORT))
-
-/* Convert a Target virtual address into a Target physical address */
-#define AR6002_VTOP(vaddr) ((vaddr) & 0x001fffff)
-#define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff)
-#define TARG_VTOP(TargetType, vaddr) \
- (((TargetType) == TARGET_TYPE_AR6002) ? AR6002_VTOP(vaddr) : AR6003_VTOP(vaddr))
-
-#define AR6003_REV2_APP_START_OVERRIDE 0x944C00
-#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
-#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
-#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
-#define AR6003_REV2_RAM_RESERVE_SIZE 6912
-
-#define AR6003_REV3_APP_START_OVERRIDE 0x945d00
-#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
-#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
-#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
-#define AR6003_REV3_RAM_RESERVE_SIZE 512
-
-#define AR6003_BOARD_EXT_DATA_ADDRESS 0x57E600
-
-/* # of u32 entries in targregs, used by DIAG_FETCH_TARG_REGS */
-#define AR6003_FETCH_TARG_REGS_COUNT 64
-
-#endif /* !__ASSEMBLER__ */
-
-#endif /* __TARGADDRS_H__ */
diff --git a/drivers/staging/ath6kl/include/common/testcmd.h b/drivers/staging/ath6kl/include/common/testcmd.h
deleted file mode 100644
index 7d94aee508b..00000000000
--- a/drivers/staging/ath6kl/include/common/testcmd.h
+++ /dev/null
@@ -1,185 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="testcmd.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef TESTCMD_H_
-#define TESTCMD_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef AR6002_REV2
-#define TCMD_MAX_RATES 12
-#else
-#define TCMD_MAX_RATES 28
-#endif
-
-typedef enum {
- ZEROES_PATTERN = 0,
- ONES_PATTERN,
- REPEATING_10,
- PN7_PATTERN,
- PN9_PATTERN,
- PN15_PATTERN
-}TX_DATA_PATTERN;
-
-/* Continuous tx
- mode : TCMD_CONT_TX_OFF - Disabling continuous tx
- TCMD_CONT_TX_SINE - Enable continuous unmodulated tx
- TCMD_CONT_TX_FRAME- Enable continuous modulated tx
- freq : Channel freq in Mhz. (e.g 2412 for channel 1 in 11 g)
-dataRate: 0 - 1 Mbps
- 1 - 2 Mbps
- 2 - 5.5 Mbps
- 3 - 11 Mbps
- 4 - 6 Mbps
- 5 - 9 Mbps
- 6 - 12 Mbps
- 7 - 18 Mbps
- 8 - 24 Mbps
- 9 - 36 Mbps
- 10 - 28 Mbps
- 11 - 54 Mbps
- txPwr: Tx power in dBm[5 -11] for unmod Tx, [5-14] for mod Tx
-antenna: 1 - one antenna
- 2 - two antenna
-Note : Enable/disable continuous tx test cmd works only when target is awake.
-*/
-
-typedef enum {
- TCMD_CONT_TX_OFF = 0,
- TCMD_CONT_TX_SINE,
- TCMD_CONT_TX_FRAME,
- TCMD_CONT_TX_TX99,
- TCMD_CONT_TX_TX100
-} TCMD_CONT_TX_MODE;
-
-typedef enum {
- TCMD_WLAN_MODE_NOHT = 0,
- TCMD_WLAN_MODE_HT20 = 1,
- TCMD_WLAN_MODE_HT40PLUS = 2,
- TCMD_WLAN_MODE_HT40MINUS = 3,
-} TCMD_WLAN_MODE;
-
-typedef PREPACK struct {
- u32 testCmdId;
- u32 mode;
- u32 freq;
- u32 dataRate;
- s32 txPwr;
- u32 antenna;
- u32 enANI;
- u32 scramblerOff;
- u32 aifsn;
- u16 pktSz;
- u16 txPattern;
- u32 shortGuard;
- u32 numPackets;
- u32 wlanMode;
-} POSTPACK TCMD_CONT_TX;
-
-#define TCMD_TXPATTERN_ZERONE 0x1
-#define TCMD_TXPATTERN_ZERONE_DIS_SCRAMBLE 0x2
-
-/* Continuous Rx
- act: TCMD_CONT_RX_PROMIS - promiscuous mode (accept all incoming frames)
- TCMD_CONT_RX_FILTER - filter mode (accept only frames with dest
- address equal specified
- mac address (set via act =3)
- TCMD_CONT_RX_REPORT off mode (disable cont rx mode and get the
- report from the last cont
- Rx test)
-
- TCMD_CONT_RX_SETMAC - set MacAddr mode (sets the MAC address for the
- target. This Overrides
- the default MAC address.)
-
-*/
-typedef enum {
- TCMD_CONT_RX_PROMIS =0,
- TCMD_CONT_RX_FILTER,
- TCMD_CONT_RX_REPORT,
- TCMD_CONT_RX_SETMAC,
- TCMD_CONT_RX_SET_ANT_SWITCH_TABLE
-} TCMD_CONT_RX_ACT;
-
-typedef PREPACK struct {
- u32 testCmdId;
- u32 act;
- u32 enANI;
- PREPACK union {
- struct PREPACK TCMD_CONT_RX_PARA {
- u32 freq;
- u32 antenna;
- u32 wlanMode;
- } POSTPACK para;
- struct PREPACK TCMD_CONT_RX_REPORT {
- u32 totalPkt;
- s32 rssiInDBm;
- u32 crcErrPkt;
- u32 secErrPkt;
- u16 rateCnt[TCMD_MAX_RATES];
- u16 rateCntShortGuard[TCMD_MAX_RATES];
- } POSTPACK report;
- struct PREPACK TCMD_CONT_RX_MAC {
- u8 addr[ATH_MAC_LEN];
- } POSTPACK mac;
- struct PREPACK TCMD_CONT_RX_ANT_SWITCH_TABLE {
- u32 antswitch1;
- u32 antswitch2;
- }POSTPACK antswitchtable;
- } POSTPACK u;
-} POSTPACK TCMD_CONT_RX;
-
-/* Force sleep/wake test cmd
- mode: TCMD_PM_WAKEUP - Wakeup the target
- TCMD_PM_SLEEP - Force the target to sleep.
- */
-typedef enum {
- TCMD_PM_WAKEUP = 1, /* be consistent with target */
- TCMD_PM_SLEEP,
- TCMD_PM_DEEPSLEEP
-} TCMD_PM_MODE;
-
-typedef PREPACK struct {
- u32 testCmdId;
- u32 mode;
-} POSTPACK TCMD_PM;
-
-typedef enum {
- TCMD_CONT_TX_ID,
- TCMD_CONT_RX_ID,
- TCMD_PM_ID
-} TCMD_ID;
-
-typedef PREPACK union {
- TCMD_CONT_TX contTx;
- TCMD_CONT_RX contRx;
- TCMD_PM pm;
-} POSTPACK TEST_CMD;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* TESTCMD_H_ */
diff --git a/drivers/staging/ath6kl/include/common/tlpm.h b/drivers/staging/ath6kl/include/common/tlpm.h
deleted file mode 100644
index 659b1c07ba9..00000000000
--- a/drivers/staging/ath6kl/include/common/tlpm.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifndef __TLPM_H__
-#define __TLPM_H__
-
-/* idle timeout in 16-bit value as in HOST_INTEREST hi_hci_uart_pwr_mgmt_params */
-#define TLPM_DEFAULT_IDLE_TIMEOUT_MS 1000
-/* hex in LSB and MSB for HCI command */
-#define TLPM_DEFAULT_IDLE_TIMEOUT_LSB 0xE8
-#define TLPM_DEFAULT_IDLE_TIMEOUT_MSB 0x3
-
-/* wakeup timeout in 8-bit value as in HOST_INTEREST hi_hci_uart_pwr_mgmt_params */
-#define TLPM_DEFAULT_WAKEUP_TIMEOUT_MS 10
-
-/* default UART FC polarity is low */
-#define TLPM_DEFAULT_UART_FC_POLARITY 0
-
-#endif
diff --git a/drivers/staging/ath6kl/include/common/wlan_defs.h b/drivers/staging/ath6kl/include/common/wlan_defs.h
deleted file mode 100644
index 03e4d23788c..00000000000
--- a/drivers/staging/ath6kl/include/common/wlan_defs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wlan_defs.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef __WLAN_DEFS_H__
-#define __WLAN_DEFS_H__
-
-/*
- * This file contains WLAN definitions that may be used across both
- * Host and Target software.
- */
-
-typedef enum {
- MODE_11A = 0, /* 11a Mode */
- MODE_11G = 1, /* 11b/g Mode */
- MODE_11B = 2, /* 11b Mode */
- MODE_11GONLY = 3, /* 11g only Mode */
-#ifdef SUPPORT_11N
- MODE_11NA_HT20 = 4, /* 11a HT20 mode */
- MODE_11NG_HT20 = 5, /* 11g HT20 mode */
- MODE_11NA_HT40 = 6, /* 11a HT40 mode */
- MODE_11NG_HT40 = 7, /* 11g HT40 mode */
- MODE_UNKNOWN = 8,
- MODE_MAX = 8
-#else
- MODE_UNKNOWN = 4,
- MODE_MAX = 4
-#endif
-} WLAN_PHY_MODE;
-
-typedef enum {
- WLAN_11A_CAPABILITY = 1,
- WLAN_11G_CAPABILITY = 2,
- WLAN_11AG_CAPABILITY = 3,
-}WLAN_CAPABILITY;
-
-#ifdef SUPPORT_11N
-typedef unsigned long A_RATEMASK;
-#else
-typedef unsigned short A_RATEMASK;
-#endif
-
-#ifdef SUPPORT_11N
-#define IS_MODE_11A(mode) (((mode) == MODE_11A) || \
- ((mode) == MODE_11NA_HT20) || \
- ((mode) == MODE_11NA_HT40))
-#define IS_MODE_11B(mode) ((mode) == MODE_11B)
-#define IS_MODE_11G(mode) (((mode) == MODE_11G) || \
- ((mode) == MODE_11GONLY) || \
- ((mode) == MODE_11NG_HT20) || \
- ((mode) == MODE_11NG_HT40))
-#define IS_MODE_11GONLY(mode) ((mode) == MODE_11GONLY)
-#else
-#define IS_MODE_11A(mode) ((mode) == MODE_11A)
-#define IS_MODE_11B(mode) ((mode) == MODE_11B)
-#define IS_MODE_11G(mode) (((mode) == MODE_11G) || \
- ((mode) == MODE_11GONLY))
-#define IS_MODE_11GONLY(mode) ((mode) == MODE_11GONLY)
-#endif /* SUPPORT_11N */
-
-#endif /* __WLANDEFS_H__ */
diff --git a/drivers/staging/ath6kl/include/common/wmi.h b/drivers/staging/ath6kl/include/common/wmi.h
deleted file mode 100644
index d9687443d32..00000000000
--- a/drivers/staging/ath6kl/include/common/wmi.h
+++ /dev/null
@@ -1,3220 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-/*
- * This file contains the definitions of the WMI protocol specified in the
- * Wireless Module Interface (WMI). It includes definitions of all the
- * commands and events. Commands are messages from the host to the WM.
- * Events and Replies are messages from the WM to the host.
- *
- * Ownership of correctness in regards to commands
- * belongs to the host driver and the WMI is not required to validate
- * parameters for value, proper range, or any other checking.
- *
- */
-
-#ifndef _WMI_H_
-#define _WMI_H_
-
-#include "wmix.h"
-#include "wlan_defs.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define HTC_PROTOCOL_VERSION 0x0002
-#define HTC_PROTOCOL_REVISION 0x0000
-
-#define WMI_PROTOCOL_VERSION 0x0002
-#define WMI_PROTOCOL_REVISION 0x0000
-
-#define ATH_MAC_LEN 6 /* length of mac in bytes */
-#define WMI_CMD_MAX_LEN 100
-#define WMI_CONTROL_MSG_MAX_LEN 256
-#define WMI_OPT_CONTROL_MSG_MAX_LEN 1536
-#define IS_ETHERTYPE(_typeOrLen) ((_typeOrLen) >= 0x0600)
-#define RFC1042OUI {0x00, 0x00, 0x00}
-
-#define IP_ETHERTYPE 0x0800
-
-#define WMI_IMPLICIT_PSTREAM 0xFF
-#define WMI_MAX_THINSTREAM 15
-
-#ifdef AR6002_REV2
-#define IBSS_MAX_NUM_STA 4
-#else
-#define IBSS_MAX_NUM_STA 8
-#endif
-
-PREPACK struct host_app_area_s {
- u32 wmi_protocol_ver;
-} POSTPACK;
-
-/*
- * Data Path
- */
-typedef PREPACK struct {
- u8 dstMac[ATH_MAC_LEN];
- u8 srcMac[ATH_MAC_LEN];
- u16 typeOrLen;
-} POSTPACK ATH_MAC_HDR;
-
-typedef PREPACK struct {
- u8 dsap;
- u8 ssap;
- u8 cntl;
- u8 orgCode[3];
- u16 etherType;
-} POSTPACK ATH_LLC_SNAP_HDR;
-
-typedef enum {
- DATA_MSGTYPE = 0x0,
- CNTL_MSGTYPE,
- SYNC_MSGTYPE,
- OPT_MSGTYPE,
-} WMI_MSG_TYPE;
-
-
-/*
- * Macros for operating on WMI_DATA_HDR (info) field
- */
-
-#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
-#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
-#define WMI_DATA_HDR_UP_MASK 0x07
-#define WMI_DATA_HDR_UP_SHIFT 2
-/* In AP mode, the same bit (b5) is used to indicate Power save state in
- * the Rx dir and More data bit state in the tx direction.
- */
-#define WMI_DATA_HDR_PS_MASK 0x1
-#define WMI_DATA_HDR_PS_SHIFT 5
-
-#define WMI_DATA_HDR_MORE_MASK 0x1
-#define WMI_DATA_HDR_MORE_SHIFT 5
-
-typedef enum {
- WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
- WMI_DATA_HDR_DATA_TYPE_802_11,
- WMI_DATA_HDR_DATA_TYPE_ACL, /* used to be used for the PAL */
-} WMI_DATA_HDR_DATA_TYPE;
-
-#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
-#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
-
-#define WMI_DATA_HDR_SET_MORE_BIT(h) ((h)->info |= (WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT))
-
-#define WMI_DATA_HDR_IS_MSG_TYPE(h, t) (((h)->info & (WMI_DATA_HDR_MSG_TYPE_MASK)) == (t))
-#define WMI_DATA_HDR_SET_MSG_TYPE(h, t) (h)->info = (((h)->info & ~(WMI_DATA_HDR_MSG_TYPE_MASK << WMI_DATA_HDR_MSG_TYPE_SHIFT)) | (t << WMI_DATA_HDR_MSG_TYPE_SHIFT))
-#define WMI_DATA_HDR_GET_UP(h) (((h)->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK)
-#define WMI_DATA_HDR_SET_UP(h, p) (h)->info = (((h)->info & ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT)) | (p << WMI_DATA_HDR_UP_SHIFT))
-
-#define WMI_DATA_HDR_GET_DATA_TYPE(h) (((h)->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) & WMI_DATA_HDR_DATA_TYPE_MASK)
-#define WMI_DATA_HDR_SET_DATA_TYPE(h, p) (h)->info = (((h)->info & ~(WMI_DATA_HDR_DATA_TYPE_MASK << WMI_DATA_HDR_DATA_TYPE_SHIFT)) | ((p) << WMI_DATA_HDR_DATA_TYPE_SHIFT))
-
-#define WMI_DATA_HDR_GET_DOT11(h) (WMI_DATA_HDR_GET_DATA_TYPE((h)) == WMI_DATA_HDR_DATA_TYPE_802_11)
-#define WMI_DATA_HDR_SET_DOT11(h, p) WMI_DATA_HDR_SET_DATA_TYPE((h), (p))
-
-/* Macros for operating on WMI_DATA_HDR (info2) field */
-#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
-#define WMI_DATA_HDR_SEQNO_SHIFT 0
-
-#define WMI_DATA_HDR_AMSDU_MASK 0x1
-#define WMI_DATA_HDR_AMSDU_SHIFT 12
-
-#define WMI_DATA_HDR_META_MASK 0x7
-#define WMI_DATA_HDR_META_SHIFT 13
-
-#define GET_SEQ_NO(_v) ((_v) & WMI_DATA_HDR_SEQNO_MASK)
-#define GET_ISMSDU(_v) ((_v) & WMI_DATA_HDR_AMSDU_MASK)
-
-#define WMI_DATA_HDR_GET_SEQNO(h) GET_SEQ_NO((h)->info2 >> WMI_DATA_HDR_SEQNO_SHIFT)
-#define WMI_DATA_HDR_SET_SEQNO(h, _v) ((h)->info2 = ((h)->info2 & ~(WMI_DATA_HDR_SEQNO_MASK << WMI_DATA_HDR_SEQNO_SHIFT)) | (GET_SEQ_NO(_v) << WMI_DATA_HDR_SEQNO_SHIFT))
-
-#define WMI_DATA_HDR_IS_AMSDU(h) GET_ISMSDU((h)->info2 >> WMI_DATA_HDR_AMSDU_SHIFT)
-#define WMI_DATA_HDR_SET_AMSDU(h, _v) ((h)->info2 = ((h)->info2 & ~(WMI_DATA_HDR_AMSDU_MASK << WMI_DATA_HDR_AMSDU_SHIFT)) | (GET_ISMSDU(_v) << WMI_DATA_HDR_AMSDU_SHIFT))
-
-#define WMI_DATA_HDR_GET_META(h) (((h)->info2 >> WMI_DATA_HDR_META_SHIFT) & WMI_DATA_HDR_META_MASK)
-#define WMI_DATA_HDR_SET_META(h, _v) ((h)->info2 = ((h)->info2 & ~(WMI_DATA_HDR_META_MASK << WMI_DATA_HDR_META_SHIFT)) | ((_v) << WMI_DATA_HDR_META_SHIFT))
-
-/* Macros for operating on WMI_DATA_HDR (info3) field */
-#define WMI_DATA_HDR_DEVID_MASK 0xF
-#define WMI_DATA_HDR_DEVID_SHIFT 0
-#define GET_DEVID(_v) ((_v) & WMI_DATA_HDR_DEVID_MASK)
-
-#define WMI_DATA_HDR_GET_DEVID(h) \
- (((h)->info3 >> WMI_DATA_HDR_DEVID_SHIFT) & WMI_DATA_HDR_DEVID_MASK)
-#define WMI_DATA_HDR_SET_DEVID(h, _v) \
- ((h)->info3 = ((h)->info3 & ~(WMI_DATA_HDR_DEVID_MASK << WMI_DATA_HDR_DEVID_SHIFT)) | (GET_DEVID(_v) << WMI_DATA_HDR_DEVID_SHIFT))
-
-typedef PREPACK struct {
- s8 rssi;
- u8 info; /* usage of 'info' field(8-bit):
- * b1:b0 - WMI_MSG_TYPE
- * b4:b3:b2 - UP(tid)
- * b5 - Used in AP mode. More-data in tx dir, PS in rx.
- * b7:b6 - Dot3 header(0),
- * Dot11 Header(1),
- * ACL data(2)
- */
-
- u16 info2; /* usage of 'info2' field(16-bit):
- * b11:b0 - seq_no
- * b12 - A-MSDU?
- * b15:b13 - META_DATA_VERSION 0 - 7
- */
- u16 info3;
-} POSTPACK WMI_DATA_HDR;
-
-/*
- * TX META VERSION DEFINITIONS
- */
-#define WMI_MAX_TX_META_SZ (12)
-#define WMI_MAX_TX_META_VERSION (7)
-#define WMI_META_VERSION_1 (0x01)
-#define WMI_META_VERSION_2 (0X02)
-
-#define WMI_ACL_TO_DOT11_HEADROOM 36
-
-#if 0 /* removed to prevent compile errors for WM.. */
-typedef PREPACK struct {
-/* intentionally empty. Default version is no meta data. */
-} POSTPACK WMI_TX_META_V0;
-#endif
-
-typedef PREPACK struct {
- u8 pktID; /* The packet ID to identify the tx request */
- u8 ratePolicyID; /* The rate policy to be used for the tx of this frame */
-} POSTPACK WMI_TX_META_V1;
-
-
-#define WMI_CSUM_DIR_TX (0x1)
-#define TX_CSUM_CALC_FILL (0x1)
-typedef PREPACK struct {
- u8 csumStart; /*Offset from start of the WMI header for csum calculation to begin */
- u8 csumDest; /*Offset from start of WMI header where final csum goes*/
- u8 csumFlags; /*number of bytes over which csum is calculated*/
-} POSTPACK WMI_TX_META_V2;
-
-
-/*
- * RX META VERSION DEFINITIONS
- */
-/* if RX meta data is present at all then the meta data field
- * will consume WMI_MAX_RX_META_SZ bytes of space between the
- * WMI_DATA_HDR and the payload. How much of the available
- * Meta data is actually used depends on which meta data
- * version is active. */
-#define WMI_MAX_RX_META_SZ (12)
-#define WMI_MAX_RX_META_VERSION (7)
-
-#define WMI_RX_STATUS_OK 0 /* success */
-#define WMI_RX_STATUS_DECRYPT_ERR 1 /* decrypt error */
-#define WMI_RX_STATUS_MIC_ERR 2 /* tkip MIC error */
-#define WMI_RX_STATUS_ERR 3 /* undefined error */
-
-#define WMI_RX_FLAGS_AGGR 0x0001 /* part of AGGR */
-#define WMI_RX_FlAGS_STBC 0x0002 /* used STBC */
-#define WMI_RX_FLAGS_SGI 0x0004 /* used SGI */
-#define WMI_RX_FLAGS_HT 0x0008 /* is HT packet */
-/* the flags field is also used to store the CRYPTO_TYPE of the frame
- * that value is shifted by WMI_RX_FLAGS_CRYPTO_SHIFT */
-#define WMI_RX_FLAGS_CRYPTO_SHIFT 4
-#define WMI_RX_FLAGS_CRYPTO_MASK 0x1f
-#define WMI_RX_META_GET_CRYPTO(flags) (((flags) >> WMI_RX_FLAGS_CRYPTO_SHIFT) & WMI_RX_FLAGS_CRYPTO_MASK)
-
-#if 0 /* removed to prevent compile errors for WM.. */
-typedef PREPACK struct {
-/* intentionally empty. Default version is no meta data. */
-} POSTPACK WMI_RX_META_VERSION_0;
-#endif
-
-typedef PREPACK struct {
- u8 status; /* one of WMI_RX_STATUS_... */
- u8 rix; /* rate index mapped to rate at which this packet was received. */
- u8 rssi; /* rssi of packet */
- u8 channel;/* rf channel during packet reception */
- u16 flags; /* a combination of WMI_RX_FLAGS_... */
-} POSTPACK WMI_RX_META_V1;
-
-#define RX_CSUM_VALID_FLAG (0x1)
-typedef PREPACK struct {
- u16 csum;
- u8 csumFlags;/* bit 0 set -partial csum valid
- bit 1 set -test mode */
-} POSTPACK WMI_RX_META_V2;
-
-
-
-#define WMI_GET_DEVICE_ID(info1) ((info1) & 0xF)
-/* Macros for operating on WMI_CMD_HDR (info1) field */
-#define WMI_CMD_HDR_DEVID_MASK 0xF
-#define WMI_CMD_HDR_DEVID_SHIFT 0
-#define GET_CMD_DEVID(_v) ((_v) & WMI_CMD_HDR_DEVID_MASK)
-
-#define WMI_CMD_HDR_GET_DEVID(h) \
- (((h)->info1 >> WMI_CMD_HDR_DEVID_SHIFT) & WMI_CMD_HDR_DEVID_MASK)
-#define WMI_CMD_HDR_SET_DEVID(h, _v) \
- ((h)->info1 = ((h)->info1 & \
- ~(WMI_CMD_HDR_DEVID_MASK << WMI_CMD_HDR_DEVID_SHIFT)) | \
- (GET_CMD_DEVID(_v) << WMI_CMD_HDR_DEVID_SHIFT))
-
-/*
- * Control Path
- */
-typedef PREPACK struct {
- u16 commandId;
-/*
- * info1 - 16 bits
- * b03:b00 - id
- * b15:b04 - unused
- */
- u16 info1;
-
- u16 reserved; /* For alignment */
-} POSTPACK WMI_CMD_HDR; /* used for commands and events */
-
-/*
- * List of Commnands
- */
-typedef enum {
- WMI_CONNECT_CMDID = 0x0001,
- WMI_RECONNECT_CMDID,
- WMI_DISCONNECT_CMDID,
- WMI_SYNCHRONIZE_CMDID,
- WMI_CREATE_PSTREAM_CMDID,
- WMI_DELETE_PSTREAM_CMDID,
- WMI_START_SCAN_CMDID,
- WMI_SET_SCAN_PARAMS_CMDID,
- WMI_SET_BSS_FILTER_CMDID,
- WMI_SET_PROBED_SSID_CMDID, /* 10 */
- WMI_SET_LISTEN_INT_CMDID,
- WMI_SET_BMISS_TIME_CMDID,
- WMI_SET_DISC_TIMEOUT_CMDID,
- WMI_GET_CHANNEL_LIST_CMDID,
- WMI_SET_BEACON_INT_CMDID,
- WMI_GET_STATISTICS_CMDID,
- WMI_SET_CHANNEL_PARAMS_CMDID,
- WMI_SET_POWER_MODE_CMDID,
- WMI_SET_IBSS_PM_CAPS_CMDID,
- WMI_SET_POWER_PARAMS_CMDID, /* 20 */
- WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
- WMI_ADD_CIPHER_KEY_CMDID,
- WMI_DELETE_CIPHER_KEY_CMDID,
- WMI_ADD_KRK_CMDID,
- WMI_DELETE_KRK_CMDID,
- WMI_SET_PMKID_CMDID,
- WMI_SET_TX_PWR_CMDID,
- WMI_GET_TX_PWR_CMDID,
- WMI_SET_ASSOC_INFO_CMDID,
- WMI_ADD_BAD_AP_CMDID, /* 30 */
- WMI_DELETE_BAD_AP_CMDID,
- WMI_SET_TKIP_COUNTERMEASURES_CMDID,
- WMI_RSSI_THRESHOLD_PARAMS_CMDID,
- WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
- WMI_SET_ACCESS_PARAMS_CMDID,
- WMI_SET_RETRY_LIMITS_CMDID,
- WMI_SET_OPT_MODE_CMDID,
- WMI_OPT_TX_FRAME_CMDID,
- WMI_SET_VOICE_PKT_SIZE_CMDID,
- WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
- WMI_SET_ROAM_CTRL_CMDID,
- WMI_GET_ROAM_TBL_CMDID,
- WMI_GET_ROAM_DATA_CMDID,
- WMI_ENABLE_RM_CMDID,
- WMI_SET_MAX_OFFHOME_DURATION_CMDID,
- WMI_EXTENSION_CMDID, /* Non-wireless extensions */
- WMI_SNR_THRESHOLD_PARAMS_CMDID,
- WMI_LQ_THRESHOLD_PARAMS_CMDID,
- WMI_SET_LPREAMBLE_CMDID,
- WMI_SET_RTS_CMDID, /* 50 */
- WMI_CLR_RSSI_SNR_CMDID,
- WMI_SET_FIXRATES_CMDID,
- WMI_GET_FIXRATES_CMDID,
- WMI_SET_AUTH_MODE_CMDID,
- WMI_SET_REASSOC_MODE_CMDID,
- WMI_SET_WMM_CMDID,
- WMI_SET_WMM_TXOP_CMDID,
- WMI_TEST_CMDID,
- /* COEX AR6002 only*/
- WMI_SET_BT_STATUS_CMDID,
- WMI_SET_BT_PARAMS_CMDID, /* 60 */
-
- WMI_SET_KEEPALIVE_CMDID,
- WMI_GET_KEEPALIVE_CMDID,
- WMI_SET_APPIE_CMDID,
- WMI_GET_APPIE_CMDID,
- WMI_SET_WSC_STATUS_CMDID,
-
- /* Wake on Wireless */
- WMI_SET_HOST_SLEEP_MODE_CMDID,
- WMI_SET_WOW_MODE_CMDID,
- WMI_GET_WOW_LIST_CMDID,
- WMI_ADD_WOW_PATTERN_CMDID,
- WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
-
- WMI_SET_FRAMERATES_CMDID,
- WMI_SET_AP_PS_CMDID,
- WMI_SET_QOS_SUPP_CMDID,
- /* WMI_THIN_RESERVED_... mark the start and end
- * values for WMI_THIN_RESERVED command IDs. These
- * command IDs can be found in wmi_thin.h */
- WMI_THIN_RESERVED_START = 0x8000,
- WMI_THIN_RESERVED_END = 0x8fff,
- /*
- * Developer commands starts at 0xF000
- */
- WMI_SET_BITRATE_CMDID = 0xF000,
- WMI_GET_BITRATE_CMDID,
- WMI_SET_WHALPARAM_CMDID,
-
-
- /*Should add the new command to the tail for compatible with
- * etna.
- */
- WMI_SET_MAC_ADDRESS_CMDID,
- WMI_SET_AKMP_PARAMS_CMDID,
- WMI_SET_PMKID_LIST_CMDID,
- WMI_GET_PMKID_LIST_CMDID,
- WMI_ABORT_SCAN_CMDID,
- WMI_SET_TARGET_EVENT_REPORT_CMDID,
-
- // Unused
- WMI_UNUSED1,
- WMI_UNUSED2,
-
- /*
- * AP mode commands
- */
- WMI_AP_HIDDEN_SSID_CMDID,
- WMI_AP_SET_NUM_STA_CMDID,
- WMI_AP_ACL_POLICY_CMDID,
- WMI_AP_ACL_MAC_LIST_CMDID,
- WMI_AP_CONFIG_COMMIT_CMDID,
- WMI_AP_SET_MLME_CMDID,
- WMI_AP_SET_PVB_CMDID,
- WMI_AP_CONN_INACT_CMDID,
- WMI_AP_PROT_SCAN_TIME_CMDID,
- WMI_AP_SET_COUNTRY_CMDID,
- WMI_AP_SET_DTIM_CMDID,
- WMI_AP_MODE_STAT_CMDID,
-
- WMI_SET_IP_CMDID,
- WMI_SET_PARAMS_CMDID,
- WMI_SET_MCAST_FILTER_CMDID,
- WMI_DEL_MCAST_FILTER_CMDID,
-
- WMI_ALLOW_AGGR_CMDID,
- WMI_ADDBA_REQ_CMDID,
- WMI_DELBA_REQ_CMDID,
- WMI_SET_HT_CAP_CMDID,
- WMI_SET_HT_OP_CMDID,
- WMI_SET_TX_SELECT_RATES_CMDID,
- WMI_SET_TX_SGI_PARAM_CMDID,
- WMI_SET_RATE_POLICY_CMDID,
-
- WMI_HCI_CMD_CMDID,
- WMI_RX_FRAME_FORMAT_CMDID,
- WMI_SET_THIN_MODE_CMDID,
- WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
-
- WMI_AP_SET_11BG_RATESET_CMDID,
- WMI_SET_PMK_CMDID,
- WMI_MCAST_FILTER_CMDID,
- /* COEX CMDID AR6003*/
- WMI_SET_BTCOEX_FE_ANT_CMDID,
- WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
- WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
- WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
- WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
- WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
- WMI_SET_BTCOEX_DEBUG_CMDID,
- WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
- WMI_GET_BTCOEX_STATS_CMDID,
- WMI_GET_BTCOEX_CONFIG_CMDID,
-
- WMI_SET_DFS_ENABLE_CMDID, /* F034 */
- WMI_SET_DFS_MINRSSITHRESH_CMDID,
- WMI_SET_DFS_MAXPULSEDUR_CMDID,
- WMI_DFS_RADAR_DETECTED_CMDID,
-
- /* P2P CMDS */
- WMI_P2P_SET_CONFIG_CMDID, /* F038 */
- WMI_WPS_SET_CONFIG_CMDID,
- WMI_SET_REQ_DEV_ATTR_CMDID,
- WMI_P2P_FIND_CMDID,
- WMI_P2P_STOP_FIND_CMDID,
- WMI_P2P_GO_NEG_START_CMDID,
- WMI_P2P_LISTEN_CMDID,
-
- WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */
- WMI_SET_PROMISCUOUS_MODE_CMDID,
- WMI_RX_FRAME_FILTER_CMDID,
- WMI_SET_CHANNEL_CMDID,
-
- /* WAC commands */
- WMI_ENABLE_WAC_CMDID,
- WMI_WAC_SCAN_REPLY_CMDID,
- WMI_WAC_CTRL_REQ_CMDID,
- WMI_SET_DIV_PARAMS_CMDID,
-
- WMI_GET_PMK_CMDID,
- WMI_SET_PASSPHRASE_CMDID,
- WMI_SEND_ASSOC_RES_CMDID,
- WMI_SET_ASSOC_REQ_RELAY_CMDID,
- WMI_GET_RFKILL_MODE_CMDID,
-
- /* ACS command, consists of sub-commands */
- WMI_ACS_CTRL_CMDID,
-
- /* Ultra low power store / recall commands */
- WMI_STORERECALL_CONFIGURE_CMDID,
- WMI_STORERECALL_RECALL_CMDID,
- WMI_STORERECALL_HOST_READY_CMDID,
- WMI_FORCE_TARGET_ASSERT_CMDID,
- WMI_SET_EXCESS_TX_RETRY_THRES_CMDID,
-} WMI_COMMAND_ID;
-
-/*
- * Frame Types
- */
-typedef enum {
- WMI_FRAME_BEACON = 0,
- WMI_FRAME_PROBE_REQ,
- WMI_FRAME_PROBE_RESP,
- WMI_FRAME_ASSOC_REQ,
- WMI_FRAME_ASSOC_RESP,
- WMI_NUM_MGMT_FRAME
-} WMI_MGMT_FRAME_TYPE;
-
-/*
- * Connect Command
- */
-typedef enum {
- INFRA_NETWORK = 0x01,
- ADHOC_NETWORK = 0x02,
- ADHOC_CREATOR = 0x04,
- AP_NETWORK = 0x10,
-} NETWORK_TYPE;
-
-typedef enum {
- OPEN_AUTH = 0x01,
- SHARED_AUTH = 0x02,
- LEAP_AUTH = 0x04, /* different from IEEE_AUTH_MODE definitions */
-} DOT11_AUTH_MODE;
-
-enum {
- AUTH_IDLE,
- AUTH_OPEN_IN_PROGRESS,
-};
-
-typedef enum {
- NONE_AUTH = 0x01,
- WPA_AUTH = 0x02,
- WPA2_AUTH = 0x04,
- WPA_PSK_AUTH = 0x08,
- WPA2_PSK_AUTH = 0x10,
- WPA_AUTH_CCKM = 0x20,
- WPA2_AUTH_CCKM = 0x40,
-} AUTH_MODE;
-
-typedef enum {
- NONE_CRYPT = 0x01,
- WEP_CRYPT = 0x02,
- TKIP_CRYPT = 0x04,
- AES_CRYPT = 0x08,
-#ifdef WAPI_ENABLE
- WAPI_CRYPT = 0x10,
-#endif /*WAPI_ENABLE*/
-} CRYPTO_TYPE;
-
-#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
-#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
-
-#ifdef WAPI_ENABLE
-#undef WMI_MAX_CRYPTO_TYPE
-#define WMI_MAX_CRYPTO_TYPE (WAPI_CRYPT + 1)
-#endif /* WAPI_ENABLE */
-
-#ifdef WAPI_ENABLE
-#define IW_ENCODE_ALG_SM4 0x20
-#define IW_AUTH_WAPI_ENABLED 0x20
-#endif
-
-#define WMI_MIN_KEY_INDEX 0
-#define WMI_MAX_KEY_INDEX 3
-
-#ifdef WAPI_ENABLE
-#undef WMI_MAX_KEY_INDEX
-#define WMI_MAX_KEY_INDEX 7 /* wapi grpKey 0-3, prwKey 4-7 */
-#endif /* WAPI_ENABLE */
-
-#define WMI_MAX_KEY_LEN 32
-
-#define WMI_MAX_SSID_LEN 32
-
-typedef enum {
- CONNECT_ASSOC_POLICY_USER = 0x0001,
- CONNECT_SEND_REASSOC = 0x0002,
- CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
- CONNECT_PROFILE_MATCH_DONE = 0x0008,
- CONNECT_IGNORE_AAC_BEACON = 0x0010,
- CONNECT_CSA_FOLLOW_BSS = 0x0020,
- CONNECT_DO_WPA_OFFLOAD = 0x0040,
- CONNECT_DO_NOT_DEAUTH = 0x0080,
-} WMI_CONNECT_CTRL_FLAGS_BITS;
-
-#define DEFAULT_CONNECT_CTRL_FLAGS (CONNECT_CSA_FOLLOW_BSS)
-
-typedef PREPACK struct {
- u8 networkType;
- u8 dot11AuthMode;
- u8 authMode;
- u8 pairwiseCryptoType;
- u8 pairwiseCryptoLen;
- u8 groupCryptoType;
- u8 groupCryptoLen;
- u8 ssidLength;
- u8 ssid[WMI_MAX_SSID_LEN];
- u16 channel;
- u8 bssid[ATH_MAC_LEN];
- u32 ctrl_flags;
-} POSTPACK WMI_CONNECT_CMD;
-
-/*
- * WMI_RECONNECT_CMDID
- */
-typedef PREPACK struct {
- u16 channel; /* hint */
- u8 bssid[ATH_MAC_LEN]; /* mandatory if set */
-} POSTPACK WMI_RECONNECT_CMD;
-
-#define WMI_PMK_LEN 32
-typedef PREPACK struct {
- u8 pmk[WMI_PMK_LEN];
-} POSTPACK WMI_SET_PMK_CMD;
-
-/*
- * WMI_SET_EXCESS_TX_RETRY_THRES_CMDID
- */
-typedef PREPACK struct {
- u32 threshold;
-} POSTPACK WMI_SET_EXCESS_TX_RETRY_THRES_CMD;
-
-/*
- * WMI_ADD_CIPHER_KEY_CMDID
- */
-typedef enum {
- PAIRWISE_USAGE = 0x00,
- GROUP_USAGE = 0x01,
- TX_USAGE = 0x02, /* default Tx Key - Static WEP only */
-} KEY_USAGE;
-
-/*
- * Bit Flag
- * Bit 0 - Initialise TSC - default is Initialize
- */
-#define KEY_OP_INIT_TSC 0x01
-#define KEY_OP_INIT_RSC 0x02
-#ifdef WAPI_ENABLE
-#define KEY_OP_INIT_WAPIPN 0x10
-#endif /* WAPI_ENABLE */
-
-#define KEY_OP_INIT_VAL 0x03 /* Default Initialise the TSC & RSC */
-#define KEY_OP_VALID_MASK 0x03
-
-typedef PREPACK struct {
- u8 keyIndex;
- u8 keyType;
- u8 keyUsage; /* KEY_USAGE */
- u8 keyLength;
- u8 keyRSC[8]; /* key replay sequence counter */
- u8 key[WMI_MAX_KEY_LEN];
- u8 key_op_ctrl; /* Additional Key Control information */
- u8 key_macaddr[ATH_MAC_LEN];
-} POSTPACK WMI_ADD_CIPHER_KEY_CMD;
-
-/*
- * WMI_DELETE_CIPHER_KEY_CMDID
- */
-typedef PREPACK struct {
- u8 keyIndex;
-} POSTPACK WMI_DELETE_CIPHER_KEY_CMD;
-
-#define WMI_KRK_LEN 16
-/*
- * WMI_ADD_KRK_CMDID
- */
-typedef PREPACK struct {
- u8 krk[WMI_KRK_LEN];
-} POSTPACK WMI_ADD_KRK_CMD;
-
-/*
- * WMI_SET_TKIP_COUNTERMEASURES_CMDID
- */
-typedef enum {
- WMI_TKIP_CM_DISABLE = 0x0,
- WMI_TKIP_CM_ENABLE = 0x1,
-} WMI_TKIP_CM_CONTROL;
-
-typedef PREPACK struct {
- u8 cm_en; /* WMI_TKIP_CM_CONTROL */
-} POSTPACK WMI_SET_TKIP_COUNTERMEASURES_CMD;
-
-/*
- * WMI_SET_PMKID_CMDID
- */
-
-#define WMI_PMKID_LEN 16
-
-typedef enum {
- PMKID_DISABLE = 0,
- PMKID_ENABLE = 1,
-} PMKID_ENABLE_FLG;
-
-typedef PREPACK struct {
- u8 bssid[ATH_MAC_LEN];
- u8 enable; /* PMKID_ENABLE_FLG */
- u8 pmkid[WMI_PMKID_LEN];
-} POSTPACK WMI_SET_PMKID_CMD;
-
-/*
- * WMI_START_SCAN_CMD
- */
-typedef enum {
- WMI_LONG_SCAN = 0,
- WMI_SHORT_SCAN = 1,
-} WMI_SCAN_TYPE;
-
-typedef PREPACK struct {
- u32 forceFgScan;
- u32 isLegacy; /* For Legacy Cisco AP compatibility */
- u32 homeDwellTime; /* Maximum duration in the home channel(milliseconds) */
- u32 forceScanInterval; /* Time interval between scans (milliseconds)*/
- u8 scanType; /* WMI_SCAN_TYPE */
- u8 numChannels; /* how many channels follow */
- u16 channelList[1]; /* channels in Mhz */
-} POSTPACK WMI_START_SCAN_CMD;
-
-/*
- * WMI_SET_SCAN_PARAMS_CMDID
- */
-#define WMI_SHORTSCANRATIO_DEFAULT 3
-/*
- * Warning: ScanCtrlFlag value of 0xFF is used to disable all flags in WMI_SCAN_PARAMS_CMD
- * Do not add any more flags to WMI_SCAN_CTRL_FLAG_BITS
- */
-typedef enum {
- CONNECT_SCAN_CTRL_FLAGS = 0x01, /* set if can scan in the Connect cmd */
- SCAN_CONNECTED_CTRL_FLAGS = 0x02, /* set if scan for the SSID it is */
- /* already connected to */
- ACTIVE_SCAN_CTRL_FLAGS = 0x04, /* set if enable active scan */
- ROAM_SCAN_CTRL_FLAGS = 0x08, /* set if enable roam scan when bmiss and lowrssi */
- REPORT_BSSINFO_CTRL_FLAGS = 0x10, /* set if follows customer BSSINFO reporting rule */
- ENABLE_AUTO_CTRL_FLAGS = 0x20, /* if disabled, target doesn't
- scan after a disconnect event */
- ENABLE_SCAN_ABORT_EVENT = 0x40 /* Scan complete event with canceled status will be generated when a scan is prempted before it gets completed */
-} WMI_SCAN_CTRL_FLAGS_BITS;
-
-#define CAN_SCAN_IN_CONNECT(flags) (flags & CONNECT_SCAN_CTRL_FLAGS)
-#define CAN_SCAN_CONNECTED(flags) (flags & SCAN_CONNECTED_CTRL_FLAGS)
-#define ENABLE_ACTIVE_SCAN(flags) (flags & ACTIVE_SCAN_CTRL_FLAGS)
-#define ENABLE_ROAM_SCAN(flags) (flags & ROAM_SCAN_CTRL_FLAGS)
-#define CONFIG_REPORT_BSSINFO(flags) (flags & REPORT_BSSINFO_CTRL_FLAGS)
-#define IS_AUTO_SCAN_ENABLED(flags) (flags & ENABLE_AUTO_CTRL_FLAGS)
-#define SCAN_ABORT_EVENT_ENABLED(flags) (flags & ENABLE_SCAN_ABORT_EVENT)
-
-#define DEFAULT_SCAN_CTRL_FLAGS (CONNECT_SCAN_CTRL_FLAGS| SCAN_CONNECTED_CTRL_FLAGS| ACTIVE_SCAN_CTRL_FLAGS| ROAM_SCAN_CTRL_FLAGS | ENABLE_AUTO_CTRL_FLAGS)
-
-
-typedef PREPACK struct {
- u16 fg_start_period; /* seconds */
- u16 fg_end_period; /* seconds */
- u16 bg_period; /* seconds */
- u16 maxact_chdwell_time; /* msec */
- u16 pas_chdwell_time; /* msec */
- u8 shortScanRatio; /* how many shorts scan for one long */
- u8 scanCtrlFlags;
- u16 minact_chdwell_time; /* msec */
- u16 maxact_scan_per_ssid; /* max active scans per ssid */
- u32 max_dfsch_act_time; /* msecs */
-} POSTPACK WMI_SCAN_PARAMS_CMD;
-
-/*
- * WMI_SET_BSS_FILTER_CMDID
- */
-typedef enum {
- NONE_BSS_FILTER = 0x0, /* no beacons forwarded */
- ALL_BSS_FILTER, /* all beacons forwarded */
- PROFILE_FILTER, /* only beacons matching profile */
- ALL_BUT_PROFILE_FILTER, /* all but beacons matching profile */
- CURRENT_BSS_FILTER, /* only beacons matching current BSS */
- ALL_BUT_BSS_FILTER, /* all but beacons matching BSS */
- PROBED_SSID_FILTER, /* beacons matching probed ssid */
- LAST_BSS_FILTER, /* marker only */
-} WMI_BSS_FILTER;
-
-typedef PREPACK struct {
- u8 bssFilter; /* see WMI_BSS_FILTER */
- u8 reserved1; /* For alignment */
- u16 reserved2; /* For alignment */
- u32 ieMask;
-} POSTPACK WMI_BSS_FILTER_CMD;
-
-/*
- * WMI_SET_PROBED_SSID_CMDID
- */
-#define MAX_PROBED_SSID_INDEX 9
-
-typedef enum {
- DISABLE_SSID_FLAG = 0, /* disables entry */
- SPECIFIC_SSID_FLAG = 0x01, /* probes specified ssid */
- ANY_SSID_FLAG = 0x02, /* probes for any ssid */
-} WMI_SSID_FLAG;
-
-typedef PREPACK struct {
- u8 entryIndex; /* 0 to MAX_PROBED_SSID_INDEX */
- u8 flag; /* WMI_SSID_FLG */
- u8 ssidLength;
- u8 ssid[32];
-} POSTPACK WMI_PROBED_SSID_CMD;
-
-/*
- * WMI_SET_LISTEN_INT_CMDID
- * The Listen interval is between 15 and 3000 TUs
- */
-#define MIN_LISTEN_INTERVAL 15
-#define MAX_LISTEN_INTERVAL 5000
-#define MIN_LISTEN_BEACONS 1
-#define MAX_LISTEN_BEACONS 50
-
-typedef PREPACK struct {
- u16 listenInterval;
- u16 numBeacons;
-} POSTPACK WMI_LISTEN_INT_CMD;
-
-/*
- * WMI_SET_BEACON_INT_CMDID
- */
-typedef PREPACK struct {
- u16 beaconInterval;
-} POSTPACK WMI_BEACON_INT_CMD;
-
-/*
- * WMI_SET_BMISS_TIME_CMDID
- * valid values are between 1000 and 5000 TUs
- */
-
-#define MIN_BMISS_TIME 1000
-#define MAX_BMISS_TIME 5000
-#define MIN_BMISS_BEACONS 1
-#define MAX_BMISS_BEACONS 50
-
-typedef PREPACK struct {
- u16 bmissTime;
- u16 numBeacons;
-} POSTPACK WMI_BMISS_TIME_CMD;
-
-/*
- * WMI_SET_POWER_MODE_CMDID
- */
-typedef enum {
- REC_POWER = 0x01,
- MAX_PERF_POWER,
-} WMI_POWER_MODE;
-
-typedef PREPACK struct {
- u8 powerMode; /* WMI_POWER_MODE */
-} POSTPACK WMI_POWER_MODE_CMD;
-
-typedef PREPACK struct {
- s8 status; /* WMI_SET_PARAMS_REPLY */
-} POSTPACK WMI_SET_PARAMS_REPLY;
-
-typedef PREPACK struct {
- u32 opcode;
- u32 length;
- char buffer[1]; /* WMI_SET_PARAMS */
-} POSTPACK WMI_SET_PARAMS_CMD;
-
-typedef PREPACK struct {
- u8 multicast_mac[ATH_MAC_LEN]; /* WMI_SET_MCAST_FILTER */
-} POSTPACK WMI_SET_MCAST_FILTER_CMD;
-
-typedef PREPACK struct {
- u8 enable; /* WMI_MCAST_FILTER */
-} POSTPACK WMI_MCAST_FILTER_CMD;
-
-/*
- * WMI_SET_POWER_PARAMS_CMDID
- */
-typedef enum {
- IGNORE_DTIM = 0x01,
- NORMAL_DTIM = 0x02,
- STICK_DTIM = 0x03,
- AUTO_DTIM = 0x04,
-} WMI_DTIM_POLICY;
-
-/* Policy to determnine whether TX should wakeup WLAN if sleeping */
-typedef enum {
- TX_WAKEUP_UPON_SLEEP = 1,
- TX_DONT_WAKEUP_UPON_SLEEP = 2
-} WMI_TX_WAKEUP_POLICY_UPON_SLEEP;
-
-/*
- * Policy to determnine whether power save failure event should be sent to
- * host during scanning
- */
-typedef enum {
- SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
- IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
-} POWER_SAVE_FAIL_EVENT_POLICY;
-
-typedef PREPACK struct {
- u16 idle_period; /* msec */
- u16 pspoll_number;
- u16 dtim_policy;
- u16 tx_wakeup_policy;
- u16 num_tx_to_wakeup;
- u16 ps_fail_event_policy;
-} POSTPACK WMI_POWER_PARAMS_CMD;
-
-/* Adhoc power save types */
-typedef enum {
- ADHOC_PS_DISABLE=1,
- ADHOC_PS_ATH=2,
- ADHOC_PS_IEEE=3,
- ADHOC_PS_OTHER=4,
-} WMI_ADHOC_PS_TYPE;
-
-typedef PREPACK struct {
- u8 power_saving;
- u8 ttl; /* number of beacon periods */
- u16 atim_windows; /* msec */
- u16 timeout_value; /* msec */
-} POSTPACK WMI_IBSS_PM_CAPS_CMD;
-
-/* AP power save types */
-typedef enum {
- AP_PS_DISABLE=1,
- AP_PS_ATH=2,
-} WMI_AP_PS_TYPE;
-
-typedef PREPACK struct {
- u32 idle_time; /* in msec */
- u32 ps_period; /* in usec */
- u8 sleep_period; /* in ps periods */
- u8 psType;
-} POSTPACK WMI_AP_PS_CMD;
-
-/*
- * WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID
- */
-typedef enum {
- IGNORE_TIM_ALL_QUEUES_APSD = 0,
- PROCESS_TIM_ALL_QUEUES_APSD = 1,
- IGNORE_TIM_SIMULATED_APSD = 2,
- PROCESS_TIM_SIMULATED_APSD = 3,
-} APSD_TIM_POLICY;
-
-typedef PREPACK struct {
- u16 psPollTimeout; /* msec */
- u16 triggerTimeout; /* msec */
- u32 apsdTimPolicy; /* TIM behavior with ques APSD enabled. Default is IGNORE_TIM_ALL_QUEUES_APSD */
- u32 simulatedAPSDTimPolicy; /* TIM behavior with simulated APSD enabled. Default is PROCESS_TIM_SIMULATED_APSD */
-} POSTPACK WMI_POWERSAVE_TIMERS_POLICY_CMD;
-
-/*
- * WMI_SET_VOICE_PKT_SIZE_CMDID
- */
-typedef PREPACK struct {
- u16 voicePktSize;
-} POSTPACK WMI_SET_VOICE_PKT_SIZE_CMD;
-
-/*
- * WMI_SET_MAX_SP_LEN_CMDID
- */
-typedef enum {
- DELIVER_ALL_PKT = 0x0,
- DELIVER_2_PKT = 0x1,
- DELIVER_4_PKT = 0x2,
- DELIVER_6_PKT = 0x3,
-} APSD_SP_LEN_TYPE;
-
-typedef PREPACK struct {
- u8 maxSPLen;
-} POSTPACK WMI_SET_MAX_SP_LEN_CMD;
-
-/*
- * WMI_SET_DISC_TIMEOUT_CMDID
- */
-typedef PREPACK struct {
- u8 disconnectTimeout; /* seconds */
-} POSTPACK WMI_DISC_TIMEOUT_CMD;
-
-typedef enum {
- UPLINK_TRAFFIC = 0,
- DNLINK_TRAFFIC = 1,
- BIDIR_TRAFFIC = 2,
-} DIR_TYPE;
-
-typedef enum {
- DISABLE_FOR_THIS_AC = 0,
- ENABLE_FOR_THIS_AC = 1,
- ENABLE_FOR_ALL_AC = 2,
-} VOICEPS_CAP_TYPE;
-
-typedef enum {
- TRAFFIC_TYPE_APERIODIC = 0,
- TRAFFIC_TYPE_PERIODIC = 1,
-}TRAFFIC_TYPE;
-
-/*
- * WMI_SYNCHRONIZE_CMDID
- */
-typedef PREPACK struct {
- u8 dataSyncMap;
-} POSTPACK WMI_SYNC_CMD;
-
-/*
- * WMI_CREATE_PSTREAM_CMDID
- */
-typedef PREPACK struct {
- u32 minServiceInt; /* in milli-sec */
- u32 maxServiceInt; /* in milli-sec */
- u32 inactivityInt; /* in milli-sec */
- u32 suspensionInt; /* in milli-sec */
- u32 serviceStartTime;
- u32 minDataRate; /* in bps */
- u32 meanDataRate; /* in bps */
- u32 peakDataRate; /* in bps */
- u32 maxBurstSize;
- u32 delayBound;
- u32 minPhyRate; /* in bps */
- u32 sba;
- u32 mediumTime;
- u16 nominalMSDU; /* in octects */
- u16 maxMSDU; /* in octects */
- u8 trafficClass;
- u8 trafficDirection; /* DIR_TYPE */
- u8 rxQueueNum;
- u8 trafficType; /* TRAFFIC_TYPE */
- u8 voicePSCapability; /* VOICEPS_CAP_TYPE */
- u8 tsid;
- u8 userPriority; /* 802.1D user priority */
- u8 nominalPHY; /* nominal phy rate */
-} POSTPACK WMI_CREATE_PSTREAM_CMD;
-
-/*
- * WMI_DELETE_PSTREAM_CMDID
- */
-typedef PREPACK struct {
- u8 txQueueNumber;
- u8 rxQueueNumber;
- u8 trafficDirection;
- u8 trafficClass;
- u8 tsid;
-} POSTPACK WMI_DELETE_PSTREAM_CMD;
-
-/*
- * WMI_SET_CHANNEL_PARAMS_CMDID
- */
-typedef enum {
- WMI_11A_MODE = 0x1,
- WMI_11G_MODE = 0x2,
- WMI_11AG_MODE = 0x3,
- WMI_11B_MODE = 0x4,
- WMI_11GONLY_MODE = 0x5,
-} WMI_PHY_MODE;
-
-#define WMI_MAX_CHANNELS 32
-
-typedef PREPACK struct {
- u8 reserved1;
- u8 scanParam; /* set if enable scan */
- u8 phyMode; /* see WMI_PHY_MODE */
- u8 numChannels; /* how many channels follow */
- u16 channelList[1]; /* channels in Mhz */
-} POSTPACK WMI_CHANNEL_PARAMS_CMD;
-
-
-/*
- * WMI_RSSI_THRESHOLD_PARAMS_CMDID
- * Setting the polltime to 0 would disable polling.
- * Threshold values are in the ascending order, and should agree to:
- * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
- * < highThreshold_upperVal)
- */
-
-typedef PREPACK struct WMI_RSSI_THRESHOLD_PARAMS{
- u32 pollTime; /* Polling time as a factor of LI */
- s16 thresholdAbove1_Val; /* lowest of upper */
- s16 thresholdAbove2_Val;
- s16 thresholdAbove3_Val;
- s16 thresholdAbove4_Val;
- s16 thresholdAbove5_Val;
- s16 thresholdAbove6_Val; /* highest of upper */
- s16 thresholdBelow1_Val; /* lowest of bellow */
- s16 thresholdBelow2_Val;
- s16 thresholdBelow3_Val;
- s16 thresholdBelow4_Val;
- s16 thresholdBelow5_Val;
- s16 thresholdBelow6_Val; /* highest of bellow */
- u8 weight; /* "alpha" */
- u8 reserved[3];
-} POSTPACK WMI_RSSI_THRESHOLD_PARAMS_CMD;
-
-/*
- * WMI_SNR_THRESHOLD_PARAMS_CMDID
- * Setting the polltime to 0 would disable polling.
- */
-
-typedef PREPACK struct WMI_SNR_THRESHOLD_PARAMS{
- u32 pollTime; /* Polling time as a factor of LI */
- u8 weight; /* "alpha" */
- u8 thresholdAbove1_Val; /* lowest of uppper*/
- u8 thresholdAbove2_Val;
- u8 thresholdAbove3_Val;
- u8 thresholdAbove4_Val; /* highest of upper */
- u8 thresholdBelow1_Val; /* lowest of bellow */
- u8 thresholdBelow2_Val;
- u8 thresholdBelow3_Val;
- u8 thresholdBelow4_Val; /* highest of bellow */
- u8 reserved[3];
-} POSTPACK WMI_SNR_THRESHOLD_PARAMS_CMD;
-
-/*
- * WMI_LQ_THRESHOLD_PARAMS_CMDID
- */
-typedef PREPACK struct WMI_LQ_THRESHOLD_PARAMS {
- u8 enable;
- u8 thresholdAbove1_Val;
- u8 thresholdAbove2_Val;
- u8 thresholdAbove3_Val;
- u8 thresholdAbove4_Val;
- u8 thresholdBelow1_Val;
- u8 thresholdBelow2_Val;
- u8 thresholdBelow3_Val;
- u8 thresholdBelow4_Val;
- u8 reserved[3];
-} POSTPACK WMI_LQ_THRESHOLD_PARAMS_CMD;
-
-typedef enum {
- WMI_LPREAMBLE_DISABLED = 0,
- WMI_LPREAMBLE_ENABLED
-} WMI_LPREAMBLE_STATUS;
-
-typedef enum {
- WMI_IGNORE_BARKER_IN_ERP = 0,
- WMI_DONOT_IGNORE_BARKER_IN_ERP
-} WMI_PREAMBLE_POLICY;
-
-typedef PREPACK struct {
- u8 status;
- u8 preamblePolicy;
-}POSTPACK WMI_SET_LPREAMBLE_CMD;
-
-typedef PREPACK struct {
- u16 threshold;
-}POSTPACK WMI_SET_RTS_CMD;
-
-/*
- * WMI_TARGET_ERROR_REPORT_BITMASK_CMDID
- * Sets the error reporting event bitmask in target. Target clears it
- * upon an error. Subsequent errors are counted, but not reported
- * via event, unless the bitmask is set again.
- */
-typedef PREPACK struct {
- u32 bitmask;
-} POSTPACK WMI_TARGET_ERROR_REPORT_BITMASK;
-
-/*
- * WMI_SET_TX_PWR_CMDID
- */
-typedef PREPACK struct {
- u8 dbM; /* in dbM units */
-} POSTPACK WMI_SET_TX_PWR_CMD, WMI_TX_PWR_REPLY;
-
-/*
- * WMI_SET_ASSOC_INFO_CMDID
- *
- * A maximum of 2 private IEs can be sent in the [Re]Assoc request.
- * A 3rd one, the CCX version IE can also be set from the host.
- */
-#define WMI_MAX_ASSOC_INFO_TYPE 2
-#define WMI_CCX_VER_IE 2 /* ieType to set CCX Version IE */
-
-#define WMI_MAX_ASSOC_INFO_LEN 240
-
-typedef PREPACK struct {
- u8 ieType;
- u8 bufferSize;
- u8 assocInfo[1]; /* up to WMI_MAX_ASSOC_INFO_LEN */
-} POSTPACK WMI_SET_ASSOC_INFO_CMD;
-
-
-/*
- * WMI_GET_TX_PWR_CMDID does not take any parameters
- */
-
-/*
- * WMI_ADD_BAD_AP_CMDID
- */
-#define WMI_MAX_BAD_AP_INDEX 1
-
-typedef PREPACK struct {
- u8 badApIndex; /* 0 to WMI_MAX_BAD_AP_INDEX */
- u8 bssid[ATH_MAC_LEN];
-} POSTPACK WMI_ADD_BAD_AP_CMD;
-
-/*
- * WMI_DELETE_BAD_AP_CMDID
- */
-typedef PREPACK struct {
- u8 badApIndex; /* 0 to WMI_MAX_BAD_AP_INDEX */
-} POSTPACK WMI_DELETE_BAD_AP_CMD;
-
-/*
- * WMI_SET_ACCESS_PARAMS_CMDID
- */
-#define WMI_DEFAULT_TXOP_ACPARAM 0 /* implies one MSDU */
-#define WMI_DEFAULT_ECWMIN_ACPARAM 4 /* corresponds to CWmin of 15 */
-#define WMI_DEFAULT_ECWMAX_ACPARAM 10 /* corresponds to CWmax of 1023 */
-#define WMI_MAX_CW_ACPARAM 15 /* maximum eCWmin or eCWmax */
-#define WMI_DEFAULT_AIFSN_ACPARAM 2
-#define WMI_MAX_AIFSN_ACPARAM 15
-typedef PREPACK struct {
- u16 txop; /* in units of 32 usec */
- u8 eCWmin;
- u8 eCWmax;
- u8 aifsn;
- u8 ac;
-} POSTPACK WMI_SET_ACCESS_PARAMS_CMD;
-
-
-/*
- * WMI_SET_RETRY_LIMITS_CMDID
- *
- * This command is used to customize the number of retries the
- * wlan device will perform on a given frame.
- */
-#define WMI_MIN_RETRIES 2
-#define WMI_MAX_RETRIES 13
-typedef enum {
- MGMT_FRAMETYPE = 0,
- CONTROL_FRAMETYPE = 1,
- DATA_FRAMETYPE = 2
-} WMI_FRAMETYPE;
-
-typedef PREPACK struct {
- u8 frameType; /* WMI_FRAMETYPE */
- u8 trafficClass; /* applies only to DATA_FRAMETYPE */
- u8 maxRetries;
- u8 enableNotify;
-} POSTPACK WMI_SET_RETRY_LIMITS_CMD;
-
-/*
- * WMI_SET_ROAM_CTRL_CMDID
- *
- * This command is used to influence the Roaming behaviour
- * Set the host biases of the BSSs before setting the roam mode as bias
- * based.
- */
-
-/*
- * Different types of Roam Control
- */
-
-typedef enum {
- WMI_FORCE_ROAM = 1, /* Roam to the specified BSSID */
- WMI_SET_ROAM_MODE = 2, /* default ,progd bias, no roam */
- WMI_SET_HOST_BIAS = 3, /* Set the Host Bias */
- WMI_SET_LOWRSSI_SCAN_PARAMS = 4, /* Set lowrssi Scan parameters */
-} WMI_ROAM_CTRL_TYPE;
-
-#define WMI_MIN_ROAM_CTRL_TYPE WMI_FORCE_ROAM
-#define WMI_MAX_ROAM_CTRL_TYPE WMI_SET_LOWRSSI_SCAN_PARAMS
-
-/*
- * ROAM MODES
- */
-
-typedef enum {
- WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based ROAM */
- WMI_HOST_BIAS_ROAM_MODE = 2, /* HOST BIAS based ROAM */
- WMI_LOCK_BSS_MODE = 3 /* Lock to the Current BSS - no Roam */
-} WMI_ROAM_MODE;
-
-/*
- * BSS HOST BIAS INFO
- */
-
-typedef PREPACK struct {
- u8 bssid[ATH_MAC_LEN];
- s8 bias;
-} POSTPACK WMI_BSS_BIAS;
-
-typedef PREPACK struct {
- u8 numBss;
- WMI_BSS_BIAS bssBias[1];
-} POSTPACK WMI_BSS_BIAS_INFO;
-
-typedef PREPACK struct WMI_LOWRSSI_SCAN_PARAMS {
- u16 lowrssi_scan_period;
- s16 lowrssi_scan_threshold;
- s16 lowrssi_roam_threshold;
- u8 roam_rssi_floor;
- u8 reserved[1]; /* For alignment */
-} POSTPACK WMI_LOWRSSI_SCAN_PARAMS;
-
-typedef PREPACK struct {
- PREPACK union {
- u8 bssid[ATH_MAC_LEN]; /* WMI_FORCE_ROAM */
- u8 roamMode; /* WMI_SET_ROAM_MODE */
- WMI_BSS_BIAS_INFO bssBiasInfo; /* WMI_SET_HOST_BIAS */
- WMI_LOWRSSI_SCAN_PARAMS lrScanParams;
- } POSTPACK info;
- u8 roamCtrlType ;
-} POSTPACK WMI_SET_ROAM_CTRL_CMD;
-
-/*
- * WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID
- */
-typedef enum {
- BT_WLAN_CONN_PRECDENCE_WLAN=0, /* Default */
- BT_WLAN_CONN_PRECDENCE_PAL,
-} BT_WLAN_CONN_PRECEDENCE;
-
-typedef PREPACK struct {
- u8 precedence;
-} POSTPACK WMI_SET_BT_WLAN_CONN_PRECEDENCE;
-
-/*
- * WMI_ENABLE_RM_CMDID
- */
-typedef PREPACK struct {
- u32 enable_radio_measurements;
-} POSTPACK WMI_ENABLE_RM_CMD;
-
-/*
- * WMI_SET_MAX_OFFHOME_DURATION_CMDID
- */
-typedef PREPACK struct {
- u8 max_offhome_duration;
-} POSTPACK WMI_SET_MAX_OFFHOME_DURATION_CMD;
-
-typedef PREPACK struct {
- u32 frequency;
- u8 threshold;
-} POSTPACK WMI_SET_HB_CHALLENGE_RESP_PARAMS_CMD;
-/*---------------------- BTCOEX RELATED -------------------------------------*/
-/*----------------------COMMON to AR6002 and AR6003 -------------------------*/
-typedef enum {
- BT_STREAM_UNDEF = 0,
- BT_STREAM_SCO, /* SCO stream */
- BT_STREAM_A2DP, /* A2DP stream */
- BT_STREAM_SCAN, /* BT Discovery or Page */
- BT_STREAM_ESCO,
- BT_STREAM_MAX
-} BT_STREAM_TYPE;
-
-typedef enum {
- BT_PARAM_SCO_PSPOLL_LATENCY_ONE_FOURTH =1,
- BT_PARAM_SCO_PSPOLL_LATENCY_HALF,
- BT_PARAM_SCO_PSPOLL_LATENCY_THREE_FOURTH,
-} BT_PARAMS_SCO_PSPOLL_LATENCY;
-
-typedef enum {
- BT_PARAMS_SCO_STOMP_SCO_NEVER =1,
- BT_PARAMS_SCO_STOMP_SCO_ALWAYS,
- BT_PARAMS_SCO_STOMP_SCO_IN_LOWRSSI,
-} BT_PARAMS_SCO_STOMP_RULES;
-
-typedef enum {
- BT_STATUS_UNDEF = 0,
- BT_STATUS_ON,
- BT_STATUS_OFF,
- BT_STATUS_MAX
-} BT_STREAM_STATUS;
-
-typedef PREPACK struct {
- u8 streamType;
- u8 status;
-} POSTPACK WMI_SET_BT_STATUS_CMD;
-
-typedef enum {
- BT_ANT_TYPE_UNDEF=0,
- BT_ANT_TYPE_DUAL,
- BT_ANT_TYPE_SPLITTER,
- BT_ANT_TYPE_SWITCH,
- BT_ANT_TYPE_HIGH_ISO_DUAL
-} BT_ANT_FRONTEND_CONFIG;
-
-typedef enum {
- BT_COLOCATED_DEV_BTS4020=0,
- BT_COLCATED_DEV_CSR ,
- BT_COLOCATED_DEV_VALKYRIE
-} BT_COLOCATED_DEV_TYPE;
-
-/*********************** Applicable to AR6002 ONLY ******************************/
-
-typedef enum {
- BT_PARAM_SCO = 1, /* SCO stream parameters */
- BT_PARAM_A2DP ,
- BT_PARAM_ANTENNA_CONFIG,
- BT_PARAM_COLOCATED_BT_DEVICE,
- BT_PARAM_ACLCOEX,
- BT_PARAM_11A_SEPARATE_ANT,
- BT_PARAM_MAX
-} BT_PARAM_TYPE;
-
-
-#define BT_SCO_ALLOW_CLOSE_RANGE_OPT (1 << 0)
-#define BT_SCO_FORCE_AWAKE_OPT (1 << 1)
-#define BT_SCO_SET_RSSI_OVERRIDE(flags) ((flags) |= (1 << 2))
-#define BT_SCO_GET_RSSI_OVERRIDE(flags) (((flags) >> 2) & 0x1)
-#define BT_SCO_SET_RTS_OVERRIDE(flags) ((flags) |= (1 << 3))
-#define BT_SCO_GET_RTS_OVERRIDE(flags) (((flags) >> 3) & 0x1)
-#define BT_SCO_GET_MIN_LOW_RATE_CNT(flags) (((flags) >> 8) & 0xFF)
-#define BT_SCO_GET_MAX_LOW_RATE_CNT(flags) (((flags) >> 16) & 0xFF)
-#define BT_SCO_SET_MIN_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 8)
-#define BT_SCO_SET_MAX_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 16)
-
-typedef PREPACK struct {
- u32 numScoCyclesForceTrigger; /* Number SCO cycles after which
- force a pspoll. default = 10 */
- u32 dataResponseTimeout; /* Timeout Waiting for Downlink pkt
- in response for ps-poll,
- default = 10 msecs */
- u32 stompScoRules;
- u32 scoOptFlags; /* SCO Options Flags :
- bits: meaning:
- 0 Allow Close Range Optimization
- 1 Force awake during close range
- 2 If set use host supplied RSSI for OPT
- 3 If set use host supplied RTS COUNT for OPT
- 4..7 Unused
- 8..15 Low Data Rate Min Cnt
- 16..23 Low Data Rate Max Cnt
- */
-
- u8 stompDutyCyleVal; /* Sco cycles to limit ps-poll queuing
- if stomped */
- u8 stompDutyCyleMaxVal; /*firm ware increases stomp duty cycle
- gradually uptill this value on need basis*/
- u8 psPollLatencyFraction; /* Fraction of idle
- period, within which
- additional ps-polls
- can be queued */
- u8 noSCOSlots; /* Number of SCO Tx/Rx slots.
- HVx, EV3, 2EV3 = 2 */
- u8 noIdleSlots; /* Number of Bluetooth idle slots between
- consecutive SCO Tx/Rx slots
- HVx, EV3 = 4
- 2EV3 = 10 */
- u8 scoOptOffRssi;/*RSSI value below which we go to ps poll*/
- u8 scoOptOnRssi; /*RSSI value above which we reenter opt mode*/
- u8 scoOptRtsCount;
-} POSTPACK BT_PARAMS_SCO;
-
-#define BT_A2DP_ALLOW_CLOSE_RANGE_OPT (1 << 0)
-#define BT_A2DP_FORCE_AWAKE_OPT (1 << 1)
-#define BT_A2DP_SET_RSSI_OVERRIDE(flags) ((flags) |= (1 << 2))
-#define BT_A2DP_GET_RSSI_OVERRIDE(flags) (((flags) >> 2) & 0x1)
-#define BT_A2DP_SET_RTS_OVERRIDE(flags) ((flags) |= (1 << 3))
-#define BT_A2DP_GET_RTS_OVERRIDE(flags) (((flags) >> 3) & 0x1)
-#define BT_A2DP_GET_MIN_LOW_RATE_CNT(flags) (((flags) >> 8) & 0xFF)
-#define BT_A2DP_GET_MAX_LOW_RATE_CNT(flags) (((flags) >> 16) & 0xFF)
-#define BT_A2DP_SET_MIN_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 8)
-#define BT_A2DP_SET_MAX_LOW_RATE_CNT(flags,val) (flags) |= (((val) & 0xFF) << 16)
-
-typedef PREPACK struct {
- u32 a2dpWlanUsageLimit; /* MAX time firmware uses the medium for
- wlan, after it identifies the idle time
- default (30 msecs) */
- u32 a2dpBurstCntMin; /* Minimum number of bluetooth data frames
- to replenish Wlan Usage limit (default 3) */
- u32 a2dpDataRespTimeout;
- u32 a2dpOptFlags; /* A2DP Option flags:
- bits: meaning:
- 0 Allow Close Range Optimization
- 1 Force awake during close range
- 2 If set use host supplied RSSI for OPT
- 3 If set use host supplied RTS COUNT for OPT
- 4..7 Unused
- 8..15 Low Data Rate Min Cnt
- 16..23 Low Data Rate Max Cnt
- */
- u8 isCoLocatedBtRoleMaster;
- u8 a2dpOptOffRssi;/*RSSI value below which we go to ps poll*/
- u8 a2dpOptOnRssi; /*RSSI value above which we reenter opt mode*/
- u8 a2dpOptRtsCount;
-}POSTPACK BT_PARAMS_A2DP;
-
-/* During BT ftp/ BT OPP or any another data based acl profile on bluetooth
- (non a2dp).*/
-typedef PREPACK struct {
- u32 aclWlanMediumUsageTime; /* Wlan usage time during Acl (non-a2dp)
- coexistence (default 30 msecs) */
- u32 aclBtMediumUsageTime; /* Bt usage time during acl coexistence
- (default 30 msecs)*/
- u32 aclDataRespTimeout;
- u32 aclDetectTimeout; /* ACL coexistence enabled if we get
- 10 Pkts in X msec(default 100 msecs) */
- u32 aclmaxPktCnt; /* No of ACL pkts to receive before
- enabling ACL coex */
-
-}POSTPACK BT_PARAMS_ACLCOEX;
-
-typedef PREPACK struct {
- PREPACK union {
- BT_PARAMS_SCO scoParams;
- BT_PARAMS_A2DP a2dpParams;
- BT_PARAMS_ACLCOEX aclCoexParams;
- u8 antType; /* 0 -Disabled (default)
- 1 - BT_ANT_TYPE_DUAL
- 2 - BT_ANT_TYPE_SPLITTER
- 3 - BT_ANT_TYPE_SWITCH */
- u8 coLocatedBtDev; /* 0 - BT_COLOCATED_DEV_BTS4020 (default)
- 1 - BT_COLCATED_DEV_CSR
- 2 - BT_COLOCATED_DEV_VALKYRIe
- */
- } POSTPACK info;
- u8 paramType ;
-} POSTPACK WMI_SET_BT_PARAMS_CMD;
-
-/************************ END AR6002 BTCOEX *******************************/
-/*-----------------------AR6003 BTCOEX -----------------------------------*/
-
-/* ---------------WMI_SET_BTCOEX_FE_ANT_CMDID --------------------------*/
-/* Indicates front end antenna configuration. This command needs to be issued
- * right after initialization and after WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID.
- * AR6003 enables coexistence and antenna switching based on the configuration.
- */
-typedef enum {
- WMI_BTCOEX_NOT_ENABLED = 0,
- WMI_BTCOEX_FE_ANT_SINGLE =1,
- WMI_BTCOEX_FE_ANT_DUAL=2,
- WMI_BTCOEX_FE_ANT_DUAL_HIGH_ISO=3,
- WMI_BTCOEX_FE_ANT_TYPE_MAX
-}WMI_BTCOEX_FE_ANT_TYPE;
-
-typedef PREPACK struct {
- u8 btcoexFeAntType; /* 1 - WMI_BTCOEX_FE_ANT_SINGLE for single antenna front end
- 2 - WMI_BTCOEX_FE_ANT_DUAL for dual antenna front end
- (for isolations less 35dB, for higher isolation there
- is not need to pass this command).
- (not implemented)
- */
-}POSTPACK WMI_SET_BTCOEX_FE_ANT_CMD;
-
-/* -------------WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID ----------------*/
-/* Indicate the bluetooth chip to the firmware. Firmware can have different algorithm based
- * bluetooth chip type.Based on bluetooth device, different coexistence protocol would be used.
- */
-typedef PREPACK struct {
- u8 btcoexCoLocatedBTdev; /*1 - Qcom BT (3 -wire PTA)
- 2 - CSR BT (3 wire PTA)
- 3 - Atheros 3001 BT (3 wire PTA)
- 4 - STE bluetooth (4-wire ePTA)
- 5 - Atheros 3002 BT (4-wire MCI)
- defaults= 3 (Atheros 3001 BT )
- */
-}POSTPACK WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD;
-
-/* -------------WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID ------------*/
-/* Configuration parameters during bluetooth inquiry and page. Page configuration
- * is applicable only on interfaces which can distinguish page (applicable only for ePTA -
- * STE bluetooth).
- * Bluetooth inquiry start and end is indicated via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID.
- * During this the station will be power-save mode.
- */
-typedef PREPACK struct {
- u32 btInquiryDataFetchFrequency;/* The frequency of querying the AP for data
- (via pspoll) is configured by this parameter.
- "default = 10 ms" */
-
- u32 protectBmissDurPostBtInquiry;/* The firmware will continue to be in inquiry state
- for configured duration, after inquiry completion
- . This is to ensure other bluetooth transactions
- (RDP, SDP profiles, link key exchange ...etc)
- goes through smoothly without wifi stomping.
- default = 10 secs*/
-
- u32 maxpageStomp; /*Applicable only for STE-BT interface. Currently not
- used */
- u32 btInquiryPageFlag; /* Not used */
-}POSTPACK WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD;
-
-/*---------------------WMI_SET_BTCOEX_SCO_CONFIG_CMDID ---------------*/
-/* Configure SCO parameters. These parameters would be used whenever firmware is indicated
- * of (e)SCO profile on bluetooth ( via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID).
- * Configration of BTCOEX_SCO_CONFIG data structure are common configuration and applies
- * ps-poll mode and opt mode.
- * Ps-poll Mode - Station is in power-save and retrieves downlink data between sco gaps.
- * Opt Mode - station is in awake state and access point can send data to station any time.
- * BTCOEX_PSPOLLMODE_SCO_CONFIG - Configuration applied only during ps-poll mode.
- * BTCOEX_OPTMODE_SCO_CONFIG - Configuration applied only during opt mode.
- */
-#define WMI_SCO_CONFIG_FLAG_ALLOW_OPTIMIZATION (1 << 0)
-#define WMI_SCO_CONFIG_FLAG_IS_EDR_CAPABLE (1 << 1)
-#define WMI_SCO_CONFIG_FLAG_IS_BT_MASTER (1 << 2)
-#define WMI_SCO_CONFIG_FLAG_FW_DETECT_OF_PER (1 << 3)
-typedef PREPACK struct {
- u32 scoSlots; /* Number of SCO Tx/Rx slots.
- HVx, EV3, 2EV3 = 2 */
- u32 scoIdleSlots; /* Number of Bluetooth idle slots between
- consecutive SCO Tx/Rx slots
- HVx, EV3 = 4
- 2EV3 = 10
- */
- u32 scoFlags; /* SCO Options Flags :
- bits: meaning:
- 0 Allow Close Range Optimization
- 1 Is EDR capable or Not
- 2 IS Co-located Bt role Master
- 3 Firmware determines the periodicity of SCO.
- */
-
- u32 linkId; /* applicable to STE-BT - not used */
-}POSTPACK BTCOEX_SCO_CONFIG;
-
-typedef PREPACK struct {
- u32 scoCyclesForceTrigger; /* Number SCO cycles after which
- force a pspoll. default = 10 */
- u32 scoDataResponseTimeout; /* Timeout Waiting for Downlink pkt
- in response for ps-poll,
- default = 20 msecs */
-
- u32 scoStompDutyCyleVal; /* not implemented */
-
- u32 scoStompDutyCyleMaxVal; /*Not implemented */
-
- u32 scoPsPollLatencyFraction; /* Fraction of idle
- period, within which
- additional ps-polls can be queued
- 1 - 1/4 of idle duration
- 2 - 1/2 of idle duration
- 3 - 3/4 of idle duration
- default =2 (1/2)
- */
-}POSTPACK BTCOEX_PSPOLLMODE_SCO_CONFIG;
-
-typedef PREPACK struct {
- u32 scoStompCntIn100ms;/*max number of SCO stomp in 100ms allowed in
- opt mode. If exceeds the configured value,
- switch to ps-poll mode
- default = 3 */
-
- u32 scoContStompMax; /* max number of continuous stomp allowed in opt mode.
- if exceeded switch to pspoll mode
- default = 3 */
-
- u32 scoMinlowRateMbps; /* Low rate threshold */
-
- u32 scoLowRateCnt; /* number of low rate pkts (< scoMinlowRateMbps) allowed in 100 ms.
- If exceeded switch/stay to ps-poll mode, lower stay in opt mode.
- default = 36
- */
-
- u32 scoHighPktRatio; /*(Total Rx pkts in 100 ms + 1)/
- ((Total tx pkts in 100 ms - No of high rate pkts in 100 ms) + 1) in 100 ms,
- if exceeded switch/stay in opt mode and if lower switch/stay in pspoll mode.
- default = 5 (80% of high rates)
- */
-
- u32 scoMaxAggrSize; /* Max number of Rx subframes allowed in this mode. (Firmware re-negogiates
- max number of aggregates if it was negogiated to higher value
- default = 1
- Recommended value Basic rate headsets = 1, EDR (2-EV3) =4.
- */
-}POSTPACK BTCOEX_OPTMODE_SCO_CONFIG;
-
-typedef PREPACK struct {
- u32 scanInterval;
- u32 maxScanStompCnt;
-}POSTPACK BTCOEX_WLANSCAN_SCO_CONFIG;
-
-typedef PREPACK struct {
- BTCOEX_SCO_CONFIG scoConfig;
- BTCOEX_PSPOLLMODE_SCO_CONFIG scoPspollConfig;
- BTCOEX_OPTMODE_SCO_CONFIG scoOptModeConfig;
- BTCOEX_WLANSCAN_SCO_CONFIG scoWlanScanConfig;
-}POSTPACK WMI_SET_BTCOEX_SCO_CONFIG_CMD;
-
-/* ------------------WMI_SET_BTCOEX_A2DP_CONFIG_CMDID -------------------*/
-/* Configure A2DP profile parameters. These parameters would be used whenver firmware is indicated
- * of A2DP profile on bluetooth ( via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID).
- * Configuration of BTCOEX_A2DP_CONFIG data structure are common configuration and applies to
- * ps-poll mode and opt mode.
- * Ps-poll Mode - Station is in power-save and retrieves downlink data between a2dp data bursts.
- * Opt Mode - station is in power save during a2dp bursts and awake in the gaps.
- * BTCOEX_PSPOLLMODE_A2DP_CONFIG - Configuration applied only during ps-poll mode.
- * BTCOEX_OPTMODE_A2DP_CONFIG - Configuration applied only during opt mode.
- */
-
-#define WMI_A2DP_CONFIG_FLAG_ALLOW_OPTIMIZATION (1 << 0)
-#define WMI_A2DP_CONFIG_FLAG_IS_EDR_CAPABLE (1 << 1)
-#define WMI_A2DP_CONFIG_FLAG_IS_BT_ROLE_MASTER (1 << 2)
-#define WMI_A2DP_CONFIG_FLAG_IS_A2DP_HIGH_PRI (1 << 3)
-#define WMI_A2DP_CONFIG_FLAG_FIND_BT_ROLE (1 << 4)
-
-typedef PREPACK struct {
- u32 a2dpFlags; /* A2DP Option flags:
- bits: meaning:
- 0 Allow Close Range Optimization
- 1 IS EDR capable
- 2 IS Co-located Bt role Master
- 3 a2dp traffic is high priority
- 4 Fw detect the role of bluetooth.
- */
- u32 linkId; /* Applicable only to STE-BT - not used */
-
-}POSTPACK BTCOEX_A2DP_CONFIG;
-
-typedef PREPACK struct {
- u32 a2dpWlanMaxDur; /* MAX time firmware uses the medium for
- wlan, after it identifies the idle time
- default (30 msecs) */
-
- u32 a2dpMinBurstCnt; /* Minimum number of bluetooth data frames
- to replenish Wlan Usage limit (default 3) */
-
- u32 a2dpDataRespTimeout; /* Max duration firmware waits for downlink
- by stomping on bluetooth
- after ps-poll is acknowledged.
- default = 20 ms
- */
-}POSTPACK BTCOEX_PSPOLLMODE_A2DP_CONFIG;
-
-typedef PREPACK struct {
- u32 a2dpMinlowRateMbps; /* Low rate threshold */
-
- u32 a2dpLowRateCnt; /* number of low rate pkts (< a2dpMinlowRateMbps) allowed in 100 ms.
- If exceeded switch/stay to ps-poll mode, lower stay in opt mode.
- default = 36
- */
-
- u32 a2dpHighPktRatio; /*(Total Rx pkts in 100 ms + 1)/
- ((Total tx pkts in 100 ms - No of high rate pkts in 100 ms) + 1) in 100 ms,
- if exceeded switch/stay in opt mode and if lower switch/stay in pspoll mode.
- default = 5 (80% of high rates)
- */
-
- u32 a2dpMaxAggrSize; /* Max number of Rx subframes allowed in this mode. (Firmware re-negogiates
- max number of aggregates if it was negogiated to higher value
- default = 1
- Recommended value Basic rate headsets = 1, EDR (2-EV3) =8.
- */
- u32 a2dpPktStompCnt; /*number of a2dp pkts that can be stomped per burst.
- default = 6*/
-
-}POSTPACK BTCOEX_OPTMODE_A2DP_CONFIG;
-
-typedef PREPACK struct {
- BTCOEX_A2DP_CONFIG a2dpConfig;
- BTCOEX_PSPOLLMODE_A2DP_CONFIG a2dppspollConfig;
- BTCOEX_OPTMODE_A2DP_CONFIG a2dpOptConfig;
-}POSTPACK WMI_SET_BTCOEX_A2DP_CONFIG_CMD;
-
-/*------------ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID---------------------*/
-/* Configure non-A2dp ACL profile parameters.The starts of ACL profile can either be
- * indicated via WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID orenabled via firmware detection
- * which is configured via "aclCoexFlags".
- * Configration of BTCOEX_ACLCOEX_CONFIG data structure are common configuration and applies
- * ps-poll mode and opt mode.
- * Ps-poll Mode - Station is in power-save and retrieves downlink data during wlan medium.
- * Opt Mode - station is in power save during bluetooth medium time and awake during wlan duration.
- * (Not implemented yet)
- *
- * BTCOEX_PSPOLLMODE_ACLCOEX_CONFIG - Configuration applied only during ps-poll mode.
- * BTCOEX_OPTMODE_ACLCOEX_CONFIG - Configuration applied only during opt mode.
- */
-
-#define WMI_ACLCOEX_FLAGS_ALLOW_OPTIMIZATION (1 << 0)
-#define WMI_ACLCOEX_FLAGS_DISABLE_FW_DETECTION (1 << 1)
-
-typedef PREPACK struct {
- u32 aclWlanMediumDur; /* Wlan usage time during Acl (non-a2dp)
- coexistence (default 30 msecs)
- */
-
- u32 aclBtMediumDur; /* Bt usage time during acl coexistence
- (default 30 msecs)
- */
-
- u32 aclDetectTimeout; /* BT activity observation time limit.
- In this time duration, number of bt pkts are counted.
- If the Cnt reaches "aclPktCntLowerLimit" value
- for "aclIterToEnableCoex" iteration continuously,
- firmware gets into ACL coexistence mode.
- Similarly, if bt traffic count during ACL coexistence
- has not reached "aclPktCntLowerLimit" continuously
- for "aclIterToEnableCoex", then ACL coexistence is
- disabled.
- -default 100 msecs
- */
-
- u32 aclPktCntLowerLimit; /* Acl Pkt Cnt to be received in duration of
- "aclDetectTimeout" for
- "aclIterForEnDis" times to enabling ACL coex.
- Similar logic is used to disable acl coexistence.
- (If "aclPktCntLowerLimit" cnt of acl pkts
- are not seen by the for "aclIterForEnDis"
- then acl coexistence is disabled).
- default = 10
- */
-
- u32 aclIterForEnDis; /* number of Iteration of "aclPktCntLowerLimit" for Enabling and
- Disabling Acl Coexistence.
- default = 3
- */
-
- u32 aclPktCntUpperLimit; /* This is upperBound limit, if there is more than
- "aclPktCntUpperLimit" seen in "aclDetectTimeout",
- ACL coexistence is enabled right away.
- - default 15*/
-
- u32 aclCoexFlags; /* A2DP Option flags:
- bits: meaning:
- 0 Allow Close Range Optimization
- 1 disable Firmware detection
- (Currently supported configuration is aclCoexFlags =0)
- */
- u32 linkId; /* Applicable only for STE-BT - not used */
-
-}POSTPACK BTCOEX_ACLCOEX_CONFIG;
-
-typedef PREPACK struct {
- u32 aclDataRespTimeout; /* Max duration firmware waits for downlink
- by stomping on bluetooth
- after ps-poll is acknowledged.
- default = 20 ms */
-
-}POSTPACK BTCOEX_PSPOLLMODE_ACLCOEX_CONFIG;
-
-
-/* Not implemented yet*/
-typedef PREPACK struct {
- u32 aclCoexMinlowRateMbps;
- u32 aclCoexLowRateCnt;
- u32 aclCoexHighPktRatio;
- u32 aclCoexMaxAggrSize;
- u32 aclPktStompCnt;
-}POSTPACK BTCOEX_OPTMODE_ACLCOEX_CONFIG;
-
-typedef PREPACK struct {
- BTCOEX_ACLCOEX_CONFIG aclCoexConfig;
- BTCOEX_PSPOLLMODE_ACLCOEX_CONFIG aclCoexPspollConfig;
- BTCOEX_OPTMODE_ACLCOEX_CONFIG aclCoexOptConfig;
-}POSTPACK WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD;
-
-/* -----------WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID ------------------*/
-typedef enum {
- WMI_BTCOEX_BT_PROFILE_SCO =1,
- WMI_BTCOEX_BT_PROFILE_A2DP,
- WMI_BTCOEX_BT_PROFILE_INQUIRY_PAGE,
- WMI_BTCOEX_BT_PROFILE_ACLCOEX,
-}WMI_BTCOEX_BT_PROFILE;
-
-typedef PREPACK struct {
- u32 btProfileType;
- u32 btOperatingStatus;
- u32 btLinkId;
-}WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD;
-
-/*--------------------- WMI_SET_BTCOEX_DEBUG_CMDID ---------------------*/
-/* Used for firmware development and debugging */
-typedef PREPACK struct {
- u32 btcoexDbgParam1;
- u32 btcoexDbgParam2;
- u32 btcoexDbgParam3;
- u32 btcoexDbgParam4;
- u32 btcoexDbgParam5;
-}WMI_SET_BTCOEX_DEBUG_CMD;
-
-/*---------------------WMI_GET_BTCOEX_CONFIG_CMDID --------------------- */
-/* Command to firmware to get configuration parameters of the bt profile
- * reported via WMI_BTCOEX_CONFIG_EVENTID */
-typedef PREPACK struct {
- u32 btProfileType; /* 1 - SCO
- 2 - A2DP
- 3 - INQUIRY_PAGE
- 4 - ACLCOEX
- */
- u32 linkId; /* not used */
-}WMI_GET_BTCOEX_CONFIG_CMD;
-
-/*------------------WMI_REPORT_BTCOEX_CONFIG_EVENTID------------------- */
-/* Event from firmware to host, sent in response to WMI_GET_BTCOEX_CONFIG_CMDID
- * */
-typedef PREPACK struct {
- u32 btProfileType;
- u32 linkId; /* not used */
- PREPACK union {
- WMI_SET_BTCOEX_SCO_CONFIG_CMD scoConfigCmd;
- WMI_SET_BTCOEX_A2DP_CONFIG_CMD a2dpConfigCmd;
- WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD aclcoexConfig;
- WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD btinquiryPageConfigCmd;
- } POSTPACK info;
-} POSTPACK WMI_BTCOEX_CONFIG_EVENT;
-
-/*------------- WMI_REPORT_BTCOEX_BTCOEX_STATS_EVENTID--------------------*/
-/* Used for firmware development and debugging*/
-typedef PREPACK struct {
- u32 highRatePktCnt;
- u32 firstBmissCnt;
- u32 psPollFailureCnt;
- u32 nullFrameFailureCnt;
- u32 optModeTransitionCnt;
-}BTCOEX_GENERAL_STATS;
-
-typedef PREPACK struct {
- u32 scoStompCntAvg;
- u32 scoStompIn100ms;
- u32 scoMaxContStomp;
- u32 scoAvgNoRetries;
- u32 scoMaxNoRetriesIn100ms;
-}BTCOEX_SCO_STATS;
-
-typedef PREPACK struct {
- u32 a2dpBurstCnt;
- u32 a2dpMaxBurstCnt;
- u32 a2dpAvgIdletimeIn100ms;
- u32 a2dpAvgStompCnt;
-}BTCOEX_A2DP_STATS;
-
-typedef PREPACK struct {
- u32 aclPktCntInBtTime;
- u32 aclStompCntInWlanTime;
- u32 aclPktCntIn100ms;
-}BTCOEX_ACLCOEX_STATS;
-
-typedef PREPACK struct {
- BTCOEX_GENERAL_STATS coexStats;
- BTCOEX_SCO_STATS scoStats;
- BTCOEX_A2DP_STATS a2dpStats;
- BTCOEX_ACLCOEX_STATS aclCoexStats;
-}WMI_BTCOEX_STATS_EVENT;
-
-
-/*--------------------------END OF BTCOEX -------------------------------------*/
-typedef PREPACK struct {
- u32 sleepState;
-}WMI_REPORT_SLEEP_STATE_EVENT;
-
-typedef enum {
- WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP =0,
- WMI_REPORT_SLEEP_STATUS_IS_AWAKE
-} WMI_REPORT_SLEEP_STATUS;
-typedef enum {
- DISCONN_EVT_IN_RECONN = 0, /* default */
- NO_DISCONN_EVT_IN_RECONN
-} TARGET_EVENT_REPORT_CONFIG;
-
-typedef PREPACK struct {
- u32 evtConfig;
-} POSTPACK WMI_SET_TARGET_EVENT_REPORT_CMD;
-
-
-typedef PREPACK struct {
- u16 cmd_buf_sz; /* HCI cmd buffer size */
- u8 buf[1]; /* Absolute HCI cmd */
-} POSTPACK WMI_HCI_CMD;
-
-/*
- * Command Replies
- */
-
-/*
- * WMI_GET_CHANNEL_LIST_CMDID reply
- */
-typedef PREPACK struct {
- u8 reserved1;
- u8 numChannels; /* number of channels in reply */
- u16 channelList[1]; /* channel in Mhz */
-} POSTPACK WMI_CHANNEL_LIST_REPLY;
-
-typedef enum {
- A_SUCCEEDED = 0,
- A_FAILED_DELETE_STREAM_DOESNOT_EXIST=250,
- A_SUCCEEDED_MODIFY_STREAM=251,
- A_FAILED_INVALID_STREAM = 252,
- A_FAILED_MAX_THINSTREAMS = 253,
- A_FAILED_CREATE_REMOVE_PSTREAM_FIRST = 254,
-} PSTREAM_REPLY_STATUS;
-
-typedef PREPACK struct {
- u8 status; /* PSTREAM_REPLY_STATUS */
- u8 txQueueNumber;
- u8 rxQueueNumber;
- u8 trafficClass;
- u8 trafficDirection; /* DIR_TYPE */
-} POSTPACK WMI_CRE_PRIORITY_STREAM_REPLY;
-
-typedef PREPACK struct {
- u8 status; /* PSTREAM_REPLY_STATUS */
- u8 txQueueNumber;
- u8 rxQueueNumber;
- u8 trafficDirection; /* DIR_TYPE */
- u8 trafficClass;
-} POSTPACK WMI_DEL_PRIORITY_STREAM_REPLY;
-
-/*
- * List of Events (target to host)
- */
-typedef enum {
- WMI_READY_EVENTID = 0x1001,
- WMI_CONNECT_EVENTID,
- WMI_DISCONNECT_EVENTID,
- WMI_BSSINFO_EVENTID,
- WMI_CMDERROR_EVENTID,
- WMI_REGDOMAIN_EVENTID,
- WMI_PSTREAM_TIMEOUT_EVENTID,
- WMI_NEIGHBOR_REPORT_EVENTID,
- WMI_TKIP_MICERR_EVENTID,
- WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
- WMI_REPORT_STATISTICS_EVENTID,
- WMI_RSSI_THRESHOLD_EVENTID,
- WMI_ERROR_REPORT_EVENTID,
- WMI_OPT_RX_FRAME_EVENTID,
- WMI_REPORT_ROAM_TBL_EVENTID,
- WMI_EXTENSION_EVENTID,
- WMI_CAC_EVENTID,
- WMI_SNR_THRESHOLD_EVENTID,
- WMI_LQ_THRESHOLD_EVENTID,
- WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
- WMI_REPORT_ROAM_DATA_EVENTID,
- WMI_TEST_EVENTID,
- WMI_APLIST_EVENTID,
- WMI_GET_WOW_LIST_EVENTID,
- WMI_GET_PMKID_LIST_EVENTID,
- WMI_CHANNEL_CHANGE_EVENTID,
- WMI_PEER_NODE_EVENTID,
- WMI_PSPOLL_EVENTID,
- WMI_DTIMEXPIRY_EVENTID,
- WMI_WLAN_VERSION_EVENTID,
- WMI_SET_PARAMS_REPLY_EVENTID,
- WMI_ADDBA_REQ_EVENTID, /*0x1020 */
- WMI_ADDBA_RESP_EVENTID,
- WMI_DELBA_REQ_EVENTID,
- WMI_TX_COMPLETE_EVENTID,
- WMI_HCI_EVENT_EVENTID,
- WMI_ACL_DATA_EVENTID,
- WMI_REPORT_SLEEP_STATE_EVENTID,
-#ifdef WAPI_ENABLE
- WMI_WAPI_REKEY_EVENTID,
-#endif
- WMI_REPORT_BTCOEX_STATS_EVENTID,
- WMI_REPORT_BTCOEX_CONFIG_EVENTID,
- WMI_GET_PMK_EVENTID,
-
- /* DFS Events */
- WMI_DFS_HOST_ATTACH_EVENTID,
- WMI_DFS_HOST_INIT_EVENTID,
- WMI_DFS_RESET_DELAYLINES_EVENTID,
- WMI_DFS_RESET_RADARQ_EVENTID,
- WMI_DFS_RESET_AR_EVENTID,
- WMI_DFS_RESET_ARQ_EVENTID,
- WMI_DFS_SET_DUR_MULTIPLIER_EVENTID,
- WMI_DFS_SET_BANGRADAR_EVENTID,
- WMI_DFS_SET_DEBUGLEVEL_EVENTID,
- WMI_DFS_PHYERR_EVENTID,
- /* CCX Evants */
- WMI_CCX_RM_STATUS_EVENTID,
-
- /* P2P Events */
- WMI_P2P_GO_NEG_RESULT_EVENTID,
-
- WMI_WAC_SCAN_DONE_EVENTID,
- WMI_WAC_REPORT_BSS_EVENTID,
- WMI_WAC_START_WPS_EVENTID,
- WMI_WAC_CTRL_REQ_REPLY_EVENTID,
-
- /* RFKILL Events */
- WMI_RFKILL_STATE_CHANGE_EVENTID,
- WMI_RFKILL_GET_MODE_CMD_EVENTID,
- WMI_THIN_RESERVED_START_EVENTID = 0x8000,
-
- /*
- * Events in this range are reserved for thinmode
- * See wmi_thin.h for actual definitions
- */
- WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
-
- WMI_SET_CHANNEL_EVENTID,
- WMI_ASSOC_REQ_EVENTID,
-
- /* generic ACS event */
- WMI_ACS_EVENTID,
- WMI_REPORT_WMM_PARAMS_EVENTID
-} WMI_EVENT_ID;
-
-
-typedef enum {
- WMI_11A_CAPABILITY = 1,
- WMI_11G_CAPABILITY = 2,
- WMI_11AG_CAPABILITY = 3,
- WMI_11NA_CAPABILITY = 4,
- WMI_11NG_CAPABILITY = 5,
- WMI_11NAG_CAPABILITY = 6,
- // END CAPABILITY
- WMI_11N_CAPABILITY_OFFSET = (WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY),
-} WMI_PHY_CAPABILITY;
-
-typedef PREPACK struct {
- u8 macaddr[ATH_MAC_LEN];
- u8 phyCapability; /* WMI_PHY_CAPABILITY */
-} POSTPACK WMI_READY_EVENT_1;
-
-typedef PREPACK struct {
- u32 sw_version;
- u32 abi_version;
- u8 macaddr[ATH_MAC_LEN];
- u8 phyCapability; /* WMI_PHY_CAPABILITY */
-} POSTPACK WMI_READY_EVENT_2;
-
-#if defined(ATH_TARGET)
-#ifdef AR6002_REV2
-#define WMI_READY_EVENT WMI_READY_EVENT_1 /* AR6002_REV2 target code */
-#else
-#define WMI_READY_EVENT WMI_READY_EVENT_2 /* AR6001, AR6002_REV4, AR6002_REV5 */
-#endif
-#else
-#define WMI_READY_EVENT WMI_READY_EVENT_2 /* host code */
-#endif
-
-
-/*
- * Connect Event
- */
-typedef PREPACK struct {
- u16 channel;
- u8 bssid[ATH_MAC_LEN];
- u16 listenInterval;
- u16 beaconInterval;
- u32 networkType;
- u8 beaconIeLen;
- u8 assocReqLen;
- u8 assocRespLen;
- u8 assocInfo[1];
-} POSTPACK WMI_CONNECT_EVENT;
-
-/*
- * Disconnect Event
- */
-typedef enum {
- NO_NETWORK_AVAIL = 0x01,
- LOST_LINK = 0x02, /* bmiss */
- DISCONNECT_CMD = 0x03,
- BSS_DISCONNECTED = 0x04,
- AUTH_FAILED = 0x05,
- ASSOC_FAILED = 0x06,
- NO_RESOURCES_AVAIL = 0x07,
- CSERV_DISCONNECT = 0x08,
- INVALID_PROFILE = 0x0a,
- DOT11H_CHANNEL_SWITCH = 0x0b,
- PROFILE_MISMATCH = 0x0c,
- CONNECTION_EVICTED = 0x0d,
- IBSS_MERGE = 0xe,
-} WMI_DISCONNECT_REASON;
-
-typedef PREPACK struct {
- u16 protocolReasonStatus; /* reason code, see 802.11 spec. */
- u8 bssid[ATH_MAC_LEN]; /* set if known */
- u8 disconnectReason ; /* see WMI_DISCONNECT_REASON */
- u8 assocRespLen;
- u8 assocInfo[1];
-} POSTPACK WMI_DISCONNECT_EVENT;
-
-/*
- * BSS Info Event.
- * Mechanism used to inform host of the presence and characteristic of
- * wireless networks present. Consists of bss info header followed by
- * the beacon or probe-response frame body. The 802.11 header is not included.
- */
-typedef enum {
- BEACON_FTYPE = 0x1,
- PROBERESP_FTYPE,
- ACTION_MGMT_FTYPE,
- PROBEREQ_FTYPE,
-} WMI_BI_FTYPE;
-
-enum {
- BSS_ELEMID_CHANSWITCH = 0x01,
- BSS_ELEMID_ATHEROS = 0x02,
-};
-
-typedef PREPACK struct {
- u16 channel;
- u8 frameType; /* see WMI_BI_FTYPE */
- u8 snr;
- s16 rssi;
- u8 bssid[ATH_MAC_LEN];
- u32 ieMask;
-} POSTPACK WMI_BSS_INFO_HDR;
-
-/*
- * BSS INFO HDR version 2.0
- * With 6 bytes HTC header and 6 bytes of WMI header
- * WMI_BSS_INFO_HDR cannot be accommodated in the removed 802.11 management
- * header space.
- * - Reduce the ieMask to 2 bytes as only two bit flags are used
- * - Remove rssi and compute it on the host. rssi = snr - 95
- */
-typedef PREPACK struct {
- u16 channel;
- u8 frameType; /* see WMI_BI_FTYPE */
- u8 snr;
- u8 bssid[ATH_MAC_LEN];
- u16 ieMask;
-} POSTPACK WMI_BSS_INFO_HDR2;
-
-/*
- * Command Error Event
- */
-typedef enum {
- INVALID_PARAM = 0x01,
- ILLEGAL_STATE = 0x02,
- INTERNAL_ERROR = 0x03,
-} WMI_ERROR_CODE;
-
-typedef PREPACK struct {
- u16 commandId;
- u8 errorCode;
-} POSTPACK WMI_CMD_ERROR_EVENT;
-
-/*
- * New Regulatory Domain Event
- */
-typedef PREPACK struct {
- u32 regDomain;
-} POSTPACK WMI_REG_DOMAIN_EVENT;
-
-typedef PREPACK struct {
- u8 txQueueNumber;
- u8 rxQueueNumber;
- u8 trafficDirection;
- u8 trafficClass;
-} POSTPACK WMI_PSTREAM_TIMEOUT_EVENT;
-
-typedef PREPACK struct {
- u8 reserve1;
- u8 reserve2;
- u8 reserve3;
- u8 trafficClass;
-} POSTPACK WMI_ACM_REJECT_EVENT;
-
-/*
- * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
- * the host of BSS's it has found that matches the current profile.
- * It can be used by the host to cache PMKs and/to initiate pre-authentication
- * if the BSS supports it. The first bssid is always the current associated
- * BSS.
- * The bssid and bssFlags information repeats according to the number
- * or APs reported.
- */
-typedef enum {
- WMI_DEFAULT_BSS_FLAGS = 0x00,
- WMI_PREAUTH_CAPABLE_BSS = 0x01,
- WMI_PMKID_VALID_BSS = 0x02,
-} WMI_BSS_FLAGS;
-
-typedef PREPACK struct {
- u8 bssid[ATH_MAC_LEN];
- u8 bssFlags; /* see WMI_BSS_FLAGS */
-} POSTPACK WMI_NEIGHBOR_INFO;
-
-typedef PREPACK struct {
- s8 numberOfAps;
- WMI_NEIGHBOR_INFO neighbor[1];
-} POSTPACK WMI_NEIGHBOR_REPORT_EVENT;
-
-/*
- * TKIP MIC Error Event
- */
-typedef PREPACK struct {
- u8 keyid;
- u8 ismcast;
-} POSTPACK WMI_TKIP_MICERR_EVENT;
-
-/*
- * WMI_SCAN_COMPLETE_EVENTID - no parameters (old), staus parameter (new)
- */
-typedef PREPACK struct {
- s32 status;
-} POSTPACK WMI_SCAN_COMPLETE_EVENT;
-
-#define MAX_OPT_DATA_LEN 1400
-
-/*
- * WMI_SET_ADHOC_BSSID_CMDID
- */
-typedef PREPACK struct {
- u8 bssid[ATH_MAC_LEN];
-} POSTPACK WMI_SET_ADHOC_BSSID_CMD;
-
-/*
- * WMI_SET_OPT_MODE_CMDID
- */
-typedef enum {
- SPECIAL_OFF,
- SPECIAL_ON,
-} OPT_MODE_TYPE;
-
-typedef PREPACK struct {
- u8 optMode;
-} POSTPACK WMI_SET_OPT_MODE_CMD;
-
-/*
- * WMI_TX_OPT_FRAME_CMDID
- */
-typedef enum {
- OPT_PROBE_REQ = 0x01,
- OPT_PROBE_RESP = 0x02,
- OPT_CPPP_START = 0x03,
- OPT_CPPP_STOP = 0x04,
-} WMI_OPT_FTYPE;
-
-typedef PREPACK struct {
- u16 optIEDataLen;
- u8 frmType;
- u8 dstAddr[ATH_MAC_LEN];
- u8 bssid[ATH_MAC_LEN];
- u8 reserved; /* For alignment */
- u8 optIEData[1];
-} POSTPACK WMI_OPT_TX_FRAME_CMD;
-
-/*
- * Special frame receive Event.
- * Mechanism used to inform host of the receiption of the special frames.
- * Consists of special frame info header followed by special frame body.
- * The 802.11 header is not included.
- */
-typedef PREPACK struct {
- u16 channel;
- u8 frameType; /* see WMI_OPT_FTYPE */
- s8 snr;
- u8 srcAddr[ATH_MAC_LEN];
- u8 bssid[ATH_MAC_LEN];
-} POSTPACK WMI_OPT_RX_INFO_HDR;
-
-/*
- * Reporting statistics.
- */
-typedef PREPACK struct {
- u32 tx_packets;
- u32 tx_bytes;
- u32 tx_unicast_pkts;
- u32 tx_unicast_bytes;
- u32 tx_multicast_pkts;
- u32 tx_multicast_bytes;
- u32 tx_broadcast_pkts;
- u32 tx_broadcast_bytes;
- u32 tx_rts_success_cnt;
- u32 tx_packet_per_ac[4];
- u32 tx_errors_per_ac[4];
-
- u32 tx_errors;
- u32 tx_failed_cnt;
- u32 tx_retry_cnt;
- u32 tx_mult_retry_cnt;
- u32 tx_rts_fail_cnt;
- s32 tx_unicast_rate;
-}POSTPACK tx_stats_t;
-
-typedef PREPACK struct {
- u32 rx_packets;
- u32 rx_bytes;
- u32 rx_unicast_pkts;
- u32 rx_unicast_bytes;
- u32 rx_multicast_pkts;
- u32 rx_multicast_bytes;
- u32 rx_broadcast_pkts;
- u32 rx_broadcast_bytes;
- u32 rx_fragment_pkt;
-
- u32 rx_errors;
- u32 rx_crcerr;
- u32 rx_key_cache_miss;
- u32 rx_decrypt_err;
- u32 rx_duplicate_frames;
- s32 rx_unicast_rate;
-}POSTPACK rx_stats_t;
-
-typedef PREPACK struct {
- u32 tkip_local_mic_failure;
- u32 tkip_counter_measures_invoked;
- u32 tkip_replays;
- u32 tkip_format_errors;
- u32 ccmp_format_errors;
- u32 ccmp_replays;
-}POSTPACK tkip_ccmp_stats_t;
-
-typedef PREPACK struct {
- u32 power_save_failure_cnt;
- u16 stop_tx_failure_cnt;
- u16 atim_tx_failure_cnt;
- u16 atim_rx_failure_cnt;
- u16 bcn_rx_failure_cnt;
-}POSTPACK pm_stats_t;
-
-typedef PREPACK struct {
- u32 cs_bmiss_cnt;
- u32 cs_lowRssi_cnt;
- u16 cs_connect_cnt;
- u16 cs_disconnect_cnt;
- s16 cs_aveBeacon_rssi;
- u16 cs_roam_count;
- s16 cs_rssi;
- u8 cs_snr;
- u8 cs_aveBeacon_snr;
- u8 cs_lastRoam_msec;
-} POSTPACK cserv_stats_t;
-
-typedef PREPACK struct {
- tx_stats_t tx_stats;
- rx_stats_t rx_stats;
- tkip_ccmp_stats_t tkipCcmpStats;
-}POSTPACK wlan_net_stats_t;
-
-typedef PREPACK struct {
- u32 arp_received;
- u32 arp_matched;
- u32 arp_replied;
-} POSTPACK arp_stats_t;
-
-typedef PREPACK struct {
- u32 wow_num_pkts_dropped;
- u16 wow_num_events_discarded;
- u8 wow_num_host_pkt_wakeups;
- u8 wow_num_host_event_wakeups;
-} POSTPACK wlan_wow_stats_t;
-
-typedef PREPACK struct {
- u32 lqVal;
- s32 noise_floor_calibation;
- pm_stats_t pmStats;
- wlan_net_stats_t txrxStats;
- wlan_wow_stats_t wowStats;
- arp_stats_t arpStats;
- cserv_stats_t cservStats;
-} POSTPACK WMI_TARGET_STATS;
-
-/*
- * WMI_RSSI_THRESHOLD_EVENTID.
- * Indicate the RSSI events to host. Events are indicated when we breach a
- * thresold value.
- */
-typedef enum{
- WMI_RSSI_THRESHOLD1_ABOVE = 0,
- WMI_RSSI_THRESHOLD2_ABOVE,
- WMI_RSSI_THRESHOLD3_ABOVE,
- WMI_RSSI_THRESHOLD4_ABOVE,
- WMI_RSSI_THRESHOLD5_ABOVE,
- WMI_RSSI_THRESHOLD6_ABOVE,
- WMI_RSSI_THRESHOLD1_BELOW,
- WMI_RSSI_THRESHOLD2_BELOW,
- WMI_RSSI_THRESHOLD3_BELOW,
- WMI_RSSI_THRESHOLD4_BELOW,
- WMI_RSSI_THRESHOLD5_BELOW,
- WMI_RSSI_THRESHOLD6_BELOW
-}WMI_RSSI_THRESHOLD_VAL;
-
-typedef PREPACK struct {
- s16 rssi;
- u8 range;
-}POSTPACK WMI_RSSI_THRESHOLD_EVENT;
-
-/*
- * WMI_ERROR_REPORT_EVENTID
- */
-typedef enum{
- WMI_TARGET_PM_ERR_FAIL = 0x00000001,
- WMI_TARGET_KEY_NOT_FOUND = 0x00000002,
- WMI_TARGET_DECRYPTION_ERR = 0x00000004,
- WMI_TARGET_BMISS = 0x00000008,
- WMI_PSDISABLE_NODE_JOIN = 0x00000010,
- WMI_TARGET_COM_ERR = 0x00000020,
- WMI_TARGET_FATAL_ERR = 0x00000040
-} WMI_TARGET_ERROR_VAL;
-
-typedef PREPACK struct {
- u32 errorVal;
-}POSTPACK WMI_TARGET_ERROR_REPORT_EVENT;
-
-typedef PREPACK struct {
- u8 retrys;
-}POSTPACK WMI_TX_RETRY_ERR_EVENT;
-
-typedef enum{
- WMI_SNR_THRESHOLD1_ABOVE = 1,
- WMI_SNR_THRESHOLD1_BELOW,
- WMI_SNR_THRESHOLD2_ABOVE,
- WMI_SNR_THRESHOLD2_BELOW,
- WMI_SNR_THRESHOLD3_ABOVE,
- WMI_SNR_THRESHOLD3_BELOW,
- WMI_SNR_THRESHOLD4_ABOVE,
- WMI_SNR_THRESHOLD4_BELOW
-} WMI_SNR_THRESHOLD_VAL;
-
-typedef PREPACK struct {
- u8 range; /* WMI_SNR_THRESHOLD_VAL */
- u8 snr;
-}POSTPACK WMI_SNR_THRESHOLD_EVENT;
-
-typedef enum{
- WMI_LQ_THRESHOLD1_ABOVE = 1,
- WMI_LQ_THRESHOLD1_BELOW,
- WMI_LQ_THRESHOLD2_ABOVE,
- WMI_LQ_THRESHOLD2_BELOW,
- WMI_LQ_THRESHOLD3_ABOVE,
- WMI_LQ_THRESHOLD3_BELOW,
- WMI_LQ_THRESHOLD4_ABOVE,
- WMI_LQ_THRESHOLD4_BELOW
-} WMI_LQ_THRESHOLD_VAL;
-
-typedef PREPACK struct {
- s32 lq;
- u8 range; /* WMI_LQ_THRESHOLD_VAL */
-}POSTPACK WMI_LQ_THRESHOLD_EVENT;
-/*
- * WMI_REPORT_ROAM_TBL_EVENTID
- */
-#define MAX_ROAM_TBL_CAND 5
-
-typedef PREPACK struct {
- s32 roam_util;
- u8 bssid[ATH_MAC_LEN];
- s8 rssi;
- s8 rssidt;
- s8 last_rssi;
- s8 util;
- s8 bias;
- u8 reserved; /* For alignment */
-} POSTPACK WMI_BSS_ROAM_INFO;
-
-
-typedef PREPACK struct {
- u16 roamMode;
- u16 numEntries;
- WMI_BSS_ROAM_INFO bssRoamInfo[1];
-} POSTPACK WMI_TARGET_ROAM_TBL;
-
-/*
- * WMI_HCI_EVENT_EVENTID
- */
-typedef PREPACK struct {
- u16 evt_buf_sz; /* HCI event buffer size */
- u8 buf[1]; /* HCI event */
-} POSTPACK WMI_HCI_EVENT;
-
-/*
- * WMI_CAC_EVENTID
- */
-typedef enum {
- CAC_INDICATION_ADMISSION = 0x00,
- CAC_INDICATION_ADMISSION_RESP = 0x01,
- CAC_INDICATION_DELETE = 0x02,
- CAC_INDICATION_NO_RESP = 0x03,
-}CAC_INDICATION;
-
-#define WMM_TSPEC_IE_LEN 63
-
-typedef PREPACK struct {
- u8 ac;
- u8 cac_indication;
- u8 statusCode;
- u8 tspecSuggestion[WMM_TSPEC_IE_LEN];
-}POSTPACK WMI_CAC_EVENT;
-
-/*
- * WMI_APLIST_EVENTID
- */
-
-typedef enum {
- APLIST_VER1 = 1,
-} APLIST_VER;
-
-typedef PREPACK struct {
- u8 bssid[ATH_MAC_LEN];
- u16 channel;
-} POSTPACK WMI_AP_INFO_V1;
-
-typedef PREPACK union {
- WMI_AP_INFO_V1 apInfoV1;
-} POSTPACK WMI_AP_INFO;
-
-typedef PREPACK struct {
- u8 apListVer;
- u8 numAP;
- WMI_AP_INFO apList[1];
-} POSTPACK WMI_APLIST_EVENT;
-
-/*
- * developer commands
- */
-
-/*
- * WMI_SET_BITRATE_CMDID
- *
- * Get bit rate cmd uses same definition as set bit rate cmd
- */
-typedef enum {
- RATE_AUTO = -1,
- RATE_1Mb = 0,
- RATE_2Mb = 1,
- RATE_5_5Mb = 2,
- RATE_11Mb = 3,
- RATE_6Mb = 4,
- RATE_9Mb = 5,
- RATE_12Mb = 6,
- RATE_18Mb = 7,
- RATE_24Mb = 8,
- RATE_36Mb = 9,
- RATE_48Mb = 10,
- RATE_54Mb = 11,
- RATE_MCS_0_20 = 12,
- RATE_MCS_1_20 = 13,
- RATE_MCS_2_20 = 14,
- RATE_MCS_3_20 = 15,
- RATE_MCS_4_20 = 16,
- RATE_MCS_5_20 = 17,
- RATE_MCS_6_20 = 18,
- RATE_MCS_7_20 = 19,
- RATE_MCS_0_40 = 20,
- RATE_MCS_1_40 = 21,
- RATE_MCS_2_40 = 22,
- RATE_MCS_3_40 = 23,
- RATE_MCS_4_40 = 24,
- RATE_MCS_5_40 = 25,
- RATE_MCS_6_40 = 26,
- RATE_MCS_7_40 = 27,
-} WMI_BIT_RATE;
-
-typedef PREPACK struct {
- s8 rateIndex; /* see WMI_BIT_RATE */
- s8 mgmtRateIndex;
- s8 ctlRateIndex;
-} POSTPACK WMI_BIT_RATE_CMD;
-
-
-typedef PREPACK struct {
- s8 rateIndex; /* see WMI_BIT_RATE */
-} POSTPACK WMI_BIT_RATE_REPLY;
-
-
-/*
- * WMI_SET_FIXRATES_CMDID
- *
- * Get fix rates cmd uses same definition as set fix rates cmd
- */
-#define FIX_RATE_1Mb ((u32)0x1)
-#define FIX_RATE_2Mb ((u32)0x2)
-#define FIX_RATE_5_5Mb ((u32)0x4)
-#define FIX_RATE_11Mb ((u32)0x8)
-#define FIX_RATE_6Mb ((u32)0x10)
-#define FIX_RATE_9Mb ((u32)0x20)
-#define FIX_RATE_12Mb ((u32)0x40)
-#define FIX_RATE_18Mb ((u32)0x80)
-#define FIX_RATE_24Mb ((u32)0x100)
-#define FIX_RATE_36Mb ((u32)0x200)
-#define FIX_RATE_48Mb ((u32)0x400)
-#define FIX_RATE_54Mb ((u32)0x800)
-#define FIX_RATE_MCS_0_20 ((u32)0x1000)
-#define FIX_RATE_MCS_1_20 ((u32)0x2000)
-#define FIX_RATE_MCS_2_20 ((u32)0x4000)
-#define FIX_RATE_MCS_3_20 ((u32)0x8000)
-#define FIX_RATE_MCS_4_20 ((u32)0x10000)
-#define FIX_RATE_MCS_5_20 ((u32)0x20000)
-#define FIX_RATE_MCS_6_20 ((u32)0x40000)
-#define FIX_RATE_MCS_7_20 ((u32)0x80000)
-#define FIX_RATE_MCS_0_40 ((u32)0x100000)
-#define FIX_RATE_MCS_1_40 ((u32)0x200000)
-#define FIX_RATE_MCS_2_40 ((u32)0x400000)
-#define FIX_RATE_MCS_3_40 ((u32)0x800000)
-#define FIX_RATE_MCS_4_40 ((u32)0x1000000)
-#define FIX_RATE_MCS_5_40 ((u32)0x2000000)
-#define FIX_RATE_MCS_6_40 ((u32)0x4000000)
-#define FIX_RATE_MCS_7_40 ((u32)0x8000000)
-
-typedef PREPACK struct {
- u32 fixRateMask; /* see WMI_BIT_RATE */
-} POSTPACK WMI_FIX_RATES_CMD, WMI_FIX_RATES_REPLY;
-
-typedef PREPACK struct {
- u8 bEnableMask;
- u8 frameType; /*type and subtype*/
- u32 frameRateMask; /* see WMI_BIT_RATE */
-} POSTPACK WMI_FRAME_RATES_CMD, WMI_FRAME_RATES_REPLY;
-
-/*
- * WMI_SET_RECONNECT_AUTH_MODE_CMDID
- *
- * Set authentication mode
- */
-typedef enum {
- RECONN_DO_AUTH = 0x00,
- RECONN_NOT_AUTH = 0x01
-} WMI_AUTH_MODE;
-
-typedef PREPACK struct {
- u8 mode;
-} POSTPACK WMI_SET_AUTH_MODE_CMD;
-
-/*
- * WMI_SET_REASSOC_MODE_CMDID
- *
- * Set authentication mode
- */
-typedef enum {
- REASSOC_DO_DISASSOC = 0x00,
- REASSOC_DONOT_DISASSOC = 0x01
-} WMI_REASSOC_MODE;
-
-typedef PREPACK struct {
- u8 mode;
-}POSTPACK WMI_SET_REASSOC_MODE_CMD;
-
-typedef enum {
- ROAM_DATA_TIME = 1, /* Get The Roam Time Data */
-} ROAM_DATA_TYPE;
-
-typedef PREPACK struct {
- u32 disassoc_time;
- u32 no_txrx_time;
- u32 assoc_time;
- u32 allow_txrx_time;
- u8 disassoc_bssid[ATH_MAC_LEN];
- s8 disassoc_bss_rssi;
- u8 assoc_bssid[ATH_MAC_LEN];
- s8 assoc_bss_rssi;
-} POSTPACK WMI_TARGET_ROAM_TIME;
-
-typedef PREPACK struct {
- PREPACK union {
- WMI_TARGET_ROAM_TIME roamTime;
- } POSTPACK u;
- u8 roamDataType ;
-} POSTPACK WMI_TARGET_ROAM_DATA;
-
-typedef enum {
- WMI_WMM_DISABLED = 0,
- WMI_WMM_ENABLED
-} WMI_WMM_STATUS;
-
-typedef PREPACK struct {
- u8 status;
-}POSTPACK WMI_SET_WMM_CMD;
-
-typedef PREPACK struct {
- u8 status;
-}POSTPACK WMI_SET_QOS_SUPP_CMD;
-
-typedef enum {
- WMI_TXOP_DISABLED = 0,
- WMI_TXOP_ENABLED
-} WMI_TXOP_CFG;
-
-typedef PREPACK struct {
- u8 txopEnable;
-}POSTPACK WMI_SET_WMM_TXOP_CMD;
-
-typedef PREPACK struct {
- u8 keepaliveInterval;
-} POSTPACK WMI_SET_KEEPALIVE_CMD;
-
-typedef PREPACK struct {
- u32 configured;
- u8 keepaliveInterval;
-} POSTPACK WMI_GET_KEEPALIVE_CMD;
-
-/*
- * Add Application specified IE to a management frame
- */
-#define WMI_MAX_IE_LEN 255
-
-typedef PREPACK struct {
- u8 mgmtFrmType; /* one of WMI_MGMT_FRAME_TYPE */
- u8 ieLen; /* Length of the IE that should be added to the MGMT frame */
- u8 ieInfo[1];
-} POSTPACK WMI_SET_APPIE_CMD;
-
-/*
- * Notify the WSC registration status to the target
- */
-#define WSC_REG_ACTIVE 1
-#define WSC_REG_INACTIVE 0
-/* Generic Hal Interface for setting hal paramters. */
-/* Add new Set HAL Param cmdIds here for newer params */
-typedef enum {
- WHAL_SETCABTO_CMDID = 1,
-}WHAL_CMDID;
-
-typedef PREPACK struct {
- u8 cabTimeOut;
-} POSTPACK WHAL_SETCABTO_PARAM;
-
-typedef PREPACK struct {
- u8 whalCmdId;
- u8 data[1];
-} POSTPACK WHAL_PARAMCMD;
-
-
-#define WOW_MAX_FILTER_LISTS 1 /*4*/
-#define WOW_MAX_FILTERS_PER_LIST 4
-#define WOW_PATTERN_SIZE 64
-#define WOW_MASK_SIZE 64
-
-#define MAC_MAX_FILTERS_PER_LIST 4
-
-typedef PREPACK struct {
- u8 wow_valid_filter;
- u8 wow_filter_id;
- u8 wow_filter_size;
- u8 wow_filter_offset;
- u8 wow_filter_mask[WOW_MASK_SIZE];
- u8 wow_filter_pattern[WOW_PATTERN_SIZE];
-} POSTPACK WOW_FILTER;
-
-
-typedef PREPACK struct {
- u8 wow_valid_list;
- u8 wow_list_id;
- u8 wow_num_filters;
- u8 wow_total_list_size;
- WOW_FILTER list[WOW_MAX_FILTERS_PER_LIST];
-} POSTPACK WOW_FILTER_LIST;
-
-typedef PREPACK struct {
- u8 valid_filter;
- u8 mac_addr[ATH_MAC_LEN];
-} POSTPACK MAC_FILTER;
-
-
-typedef PREPACK struct {
- u8 total_list_size;
- u8 enable;
- MAC_FILTER list[MAC_MAX_FILTERS_PER_LIST];
-} POSTPACK MAC_FILTER_LIST;
-
-#define MAX_IP_ADDRS 2
-typedef PREPACK struct {
- u32 ips[MAX_IP_ADDRS]; /* IP in Network Byte Order */
-} POSTPACK WMI_SET_IP_CMD;
-
-typedef PREPACK struct {
- u32 awake;
- u32 asleep;
-} POSTPACK WMI_SET_HOST_SLEEP_MODE_CMD;
-
-typedef enum {
- WOW_FILTER_SSID = 0x1
-} WMI_WOW_FILTER;
-
-typedef PREPACK struct {
- u32 enable_wow;
- WMI_WOW_FILTER filter;
- u16 hostReqDelay;
-} POSTPACK WMI_SET_WOW_MODE_CMD;
-
-typedef PREPACK struct {
- u8 filter_list_id;
-} POSTPACK WMI_GET_WOW_LIST_CMD;
-
-/*
- * WMI_GET_WOW_LIST_CMD reply
- */
-typedef PREPACK struct {
- u8 num_filters; /* number of patterns in reply */
- u8 this_filter_num; /* this is filter # x of total num_filters */
- u8 wow_mode;
- u8 host_mode;
- WOW_FILTER wow_filters[1];
-} POSTPACK WMI_GET_WOW_LIST_REPLY;
-
-typedef PREPACK struct {
- u8 filter_list_id;
- u8 filter_size;
- u8 filter_offset;
- u8 filter[1];
-} POSTPACK WMI_ADD_WOW_PATTERN_CMD;
-
-typedef PREPACK struct {
- u16 filter_list_id;
- u16 filter_id;
-} POSTPACK WMI_DEL_WOW_PATTERN_CMD;
-
-typedef PREPACK struct {
- u8 macaddr[ATH_MAC_LEN];
-} POSTPACK WMI_SET_MAC_ADDRESS_CMD;
-
-/*
- * WMI_SET_AKMP_PARAMS_CMD
- */
-
-#define WMI_AKMP_MULTI_PMKID_EN 0x000001
-
-typedef PREPACK struct {
- u32 akmpInfo;
-} POSTPACK WMI_SET_AKMP_PARAMS_CMD;
-
-typedef PREPACK struct {
- u8 pmkid[WMI_PMKID_LEN];
-} POSTPACK WMI_PMKID;
-
-/*
- * WMI_SET_PMKID_LIST_CMD
- */
-#define WMI_MAX_PMKID_CACHE 8
-
-typedef PREPACK struct {
- u32 numPMKID;
- WMI_PMKID pmkidList[WMI_MAX_PMKID_CACHE];
-} POSTPACK WMI_SET_PMKID_LIST_CMD;
-
-/*
- * WMI_GET_PMKID_LIST_CMD Reply
- * Following the Number of PMKIDs is the list of PMKIDs
- */
-typedef PREPACK struct {
- u32 numPMKID;
- u8 bssidList[ATH_MAC_LEN][1];
- WMI_PMKID pmkidList[1];
-} POSTPACK WMI_PMKID_LIST_REPLY;
-
-typedef PREPACK struct {
- u16 oldChannel;
- u32 newChannel;
-} POSTPACK WMI_CHANNEL_CHANGE_EVENT;
-
-typedef PREPACK struct {
- u32 version;
-} POSTPACK WMI_WLAN_VERSION_EVENT;
-
-
-/* WMI_ADDBA_REQ_EVENTID */
-typedef PREPACK struct {
- u8 tid;
- u8 win_sz;
- u16 st_seq_no;
- u8 status; /* f/w response for ADDBA Req; OK(0) or failure(!=0) */
-} POSTPACK WMI_ADDBA_REQ_EVENT;
-
-/* WMI_ADDBA_RESP_EVENTID */
-typedef PREPACK struct {
- u8 tid;
- u8 status; /* OK(0), failure (!=0) */
- u16 amsdu_sz; /* Three values: Not supported(0), 3839, 8k */
-} POSTPACK WMI_ADDBA_RESP_EVENT;
-
-/* WMI_DELBA_EVENTID
- * f/w received a DELBA for peer and processed it.
- * Host is notified of this
- */
-typedef PREPACK struct {
- u8 tid;
- u8 is_peer_initiator;
- u16 reason_code;
-} POSTPACK WMI_DELBA_EVENT;
-
-
-#ifdef WAPI_ENABLE
-#define WAPI_REKEY_UCAST 1
-#define WAPI_REKEY_MCAST 2
-typedef PREPACK struct {
- u8 type;
- u8 macAddr[ATH_MAC_LEN];
-} POSTPACK WMI_WAPIREKEY_EVENT;
-#endif
-
-
-/* WMI_ALLOW_AGGR_CMDID
- * Configures tid's to allow ADDBA negotiations
- * on each tid, in each direction
- */
-typedef PREPACK struct {
- u16 tx_allow_aggr; /* 16-bit mask to allow uplink ADDBA negotiation - bit position indicates tid*/
- u16 rx_allow_aggr; /* 16-bit mask to allow donwlink ADDBA negotiation - bit position indicates tid*/
-} POSTPACK WMI_ALLOW_AGGR_CMD;
-
-/* WMI_ADDBA_REQ_CMDID
- * f/w starts performing ADDBA negotiations with peer
- * on the given tid
- */
-typedef PREPACK struct {
- u8 tid;
-} POSTPACK WMI_ADDBA_REQ_CMD;
-
-/* WMI_DELBA_REQ_CMDID
- * f/w would teardown BA with peer.
- * is_send_initiator indicates if it's or tx or rx side
- */
-typedef PREPACK struct {
- u8 tid;
- u8 is_sender_initiator;
-
-} POSTPACK WMI_DELBA_REQ_CMD;
-
-#define PEER_NODE_JOIN_EVENT 0x00
-#define PEER_NODE_LEAVE_EVENT 0x01
-#define PEER_FIRST_NODE_JOIN_EVENT 0x10
-#define PEER_LAST_NODE_LEAVE_EVENT 0x11
-typedef PREPACK struct {
- u8 eventCode;
- u8 peerMacAddr[ATH_MAC_LEN];
-} POSTPACK WMI_PEER_NODE_EVENT;
-
-#define IEEE80211_FRAME_TYPE_MGT 0x00
-#define IEEE80211_FRAME_TYPE_CTL 0x04
-
-/*
- * Transmit complete event data structure(s)
- */
-
-
-typedef PREPACK struct {
-#define TX_COMPLETE_STATUS_SUCCESS 0
-#define TX_COMPLETE_STATUS_RETRIES 1
-#define TX_COMPLETE_STATUS_NOLINK 2
-#define TX_COMPLETE_STATUS_TIMEOUT 3
-#define TX_COMPLETE_STATUS_OTHER 4
-
- u8 status; /* one of TX_COMPLETE_STATUS_... */
- u8 pktID; /* packet ID to identify parent packet */
- u8 rateIdx; /* rate index on successful transmission */
- u8 ackFailures; /* number of ACK failures in tx attempt */
-#if 0 /* optional params currently omitted. */
- u32 queueDelay; // usec delay measured Tx Start time - host delivery time
- u32 mediaDelay; // usec delay measured ACK rx time - host delivery time
-#endif
-} POSTPACK TX_COMPLETE_MSG_V1; /* version 1 of tx complete msg */
-
-typedef PREPACK struct {
- u8 numMessages; /* number of tx comp msgs following this struct */
- u8 msgLen; /* length in bytes for each individual msg following this struct */
- u8 msgType; /* version of tx complete msg data following this struct */
- u8 reserved; /* individual messages follow this header */
-} POSTPACK WMI_TX_COMPLETE_EVENT;
-
-#define WMI_TXCOMPLETE_VERSION_1 (0x01)
-
-
-/*
- * ------- AP Mode definitions --------------
- */
-
-/*
- * !!! Warning !!!
- * -Changing the following values needs compilation of both driver and firmware
- */
-#ifdef AR6002_REV2
-#define AP_MAX_NUM_STA 4
-#else
-#define AP_MAX_NUM_STA 8
-#endif
-#define AP_ACL_SIZE 10
-#define IEEE80211_MAX_IE 256
-#define MCAST_AID 0xFF /* Spl. AID used to set DTIM flag in the beacons */
-#define DEF_AP_COUNTRY_CODE "US "
-#define DEF_AP_WMODE_G WMI_11G_MODE
-#define DEF_AP_WMODE_AG WMI_11AG_MODE
-#define DEF_AP_DTIM 5
-#define DEF_BEACON_INTERVAL 100
-
-/* AP mode disconnect reasons */
-#define AP_DISCONNECT_STA_LEFT 101
-#define AP_DISCONNECT_FROM_HOST 102
-#define AP_DISCONNECT_COMM_TIMEOUT 103
-
-/*
- * Used with WMI_AP_HIDDEN_SSID_CMDID
- */
-#define HIDDEN_SSID_FALSE 0
-#define HIDDEN_SSID_TRUE 1
-typedef PREPACK struct {
- u8 hidden_ssid;
-} POSTPACK WMI_AP_HIDDEN_SSID_CMD;
-
-/*
- * Used with WMI_AP_ACL_POLICY_CMDID
- */
-#define AP_ACL_DISABLE 0x00
-#define AP_ACL_ALLOW_MAC 0x01
-#define AP_ACL_DENY_MAC 0x02
-#define AP_ACL_RETAIN_LIST_MASK 0x80
-typedef PREPACK struct {
- u8 policy;
-} POSTPACK WMI_AP_ACL_POLICY_CMD;
-
-/*
- * Used with WMI_AP_ACL_MAC_LIST_CMDID
- */
-#define ADD_MAC_ADDR 1
-#define DEL_MAC_ADDR 2
-typedef PREPACK struct {
- u8 action;
- u8 index;
- u8 mac[ATH_MAC_LEN];
- u8 wildcard;
-} POSTPACK WMI_AP_ACL_MAC_CMD;
-
-typedef PREPACK struct {
- u16 index;
- u8 acl_mac[AP_ACL_SIZE][ATH_MAC_LEN];
- u8 wildcard[AP_ACL_SIZE];
- u8 policy;
-} POSTPACK WMI_AP_ACL;
-
-/*
- * Used with WMI_AP_SET_NUM_STA_CMDID
- */
-typedef PREPACK struct {
- u8 num_sta;
-} POSTPACK WMI_AP_SET_NUM_STA_CMD;
-
-/*
- * Used with WMI_AP_SET_MLME_CMDID
- */
-typedef PREPACK struct {
- u8 mac[ATH_MAC_LEN];
- u16 reason; /* 802.11 reason code */
- u8 cmd; /* operation to perform */
-#define WMI_AP_MLME_ASSOC 1 /* associate station */
-#define WMI_AP_DISASSOC 2 /* disassociate station */
-#define WMI_AP_DEAUTH 3 /* deauthenticate station */
-#define WMI_AP_MLME_AUTHORIZE 4 /* authorize station */
-#define WMI_AP_MLME_UNAUTHORIZE 5 /* unauthorize station */
-} POSTPACK WMI_AP_SET_MLME_CMD;
-
-typedef PREPACK struct {
- u32 period;
-} POSTPACK WMI_AP_CONN_INACT_CMD;
-
-typedef PREPACK struct {
- u32 period_min;
- u32 dwell_ms;
-} POSTPACK WMI_AP_PROT_SCAN_TIME_CMD;
-
-typedef PREPACK struct {
- u32 flag;
- u16 aid;
-} POSTPACK WMI_AP_SET_PVB_CMD;
-
-#define WMI_DISABLE_REGULATORY_CODE "FF"
-
-typedef PREPACK struct {
- u8 countryCode[3];
-} POSTPACK WMI_AP_SET_COUNTRY_CMD;
-
-typedef PREPACK struct {
- u8 dtim;
-} POSTPACK WMI_AP_SET_DTIM_CMD;
-
-typedef PREPACK struct {
- u8 band; /* specifies which band to apply these values */
- u8 enable; /* allows 11n to be disabled on a per band basis */
- u8 chan_width_40M_supported;
- u8 short_GI_20MHz;
- u8 short_GI_40MHz;
- u8 intolerance_40MHz;
- u8 max_ampdu_len_exp;
-} POSTPACK WMI_SET_HT_CAP_CMD;
-
-typedef PREPACK struct {
- u8 sta_chan_width;
-} POSTPACK WMI_SET_HT_OP_CMD;
-
-typedef PREPACK struct {
- u32 rateMasks[8];
-} POSTPACK WMI_SET_TX_SELECT_RATES_CMD;
-
-typedef PREPACK struct {
- u32 sgiMask;
- u8 sgiPERThreshold;
-} POSTPACK WMI_SET_TX_SGI_PARAM_CMD;
-
-#define DEFAULT_SGI_MASK 0x08080000
-#define DEFAULT_SGI_PER 10
-
-typedef PREPACK struct {
- u32 rateField; /* 1 bit per rate corresponding to index */
- u8 id;
- u8 shortTrys;
- u8 longTrys;
- u8 reserved; /* padding */
-} POSTPACK WMI_SET_RATE_POLICY_CMD;
-
-typedef PREPACK struct {
- u8 metaVersion; /* version of meta data for rx packets <0 = default> (0-7 = valid) */
- u8 dot11Hdr; /* 1 == leave .11 header intact , 0 == replace .11 header with .3 <default> */
- u8 defragOnHost; /* 1 == defragmentation is performed by host, 0 == performed by target <default> */
- u8 reserved[1]; /* alignment */
-} POSTPACK WMI_RX_FRAME_FORMAT_CMD;
-
-
-typedef PREPACK struct {
- u8 enable; /* 1 == device operates in thin mode , 0 == normal mode <default> */
- u8 reserved[3];
-} POSTPACK WMI_SET_THIN_MODE_CMD;
-
-/* AP mode events */
-/* WMI_PS_POLL_EVENT */
-typedef PREPACK struct {
- u16 aid;
-} POSTPACK WMI_PSPOLL_EVENT;
-
-typedef PREPACK struct {
- u32 tx_bytes;
- u32 tx_pkts;
- u32 tx_error;
- u32 tx_discard;
- u32 rx_bytes;
- u32 rx_pkts;
- u32 rx_error;
- u32 rx_discard;
- u32 aid;
-} POSTPACK WMI_PER_STA_STAT;
-
-#define AP_GET_STATS 0
-#define AP_CLEAR_STATS 1
-
-typedef PREPACK struct {
- u32 action;
- WMI_PER_STA_STAT sta[AP_MAX_NUM_STA+1];
-} POSTPACK WMI_AP_MODE_STAT;
-#define WMI_AP_MODE_STAT_SIZE(numSta) (sizeof(u32) + ((numSta + 1) * sizeof(WMI_PER_STA_STAT)))
-
-#define AP_11BG_RATESET1 1
-#define AP_11BG_RATESET2 2
-#define DEF_AP_11BG_RATESET AP_11BG_RATESET1
-typedef PREPACK struct {
- u8 rateset;
-} POSTPACK WMI_AP_SET_11BG_RATESET_CMD;
-/*
- * End of AP mode definitions
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _WMI_H_ */
diff --git a/drivers/staging/ath6kl/include/common/wmix.h b/drivers/staging/ath6kl/include/common/wmix.h
deleted file mode 100644
index 9435eab1b7f..00000000000
--- a/drivers/staging/ath6kl/include/common/wmix.h
+++ /dev/null
@@ -1,271 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wmix.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-/*
- * This file contains extensions of the WMI protocol specified in the
- * Wireless Module Interface (WMI). It includes definitions of all
- * extended commands and events. Extensions include useful commands
- * that are not directly related to wireless activities. They may
- * be hardware-specific, and they might not be supported on all
- * implementations.
- *
- * Extended WMIX commands are encapsulated in a WMI message with
- * cmd=WMI_EXTENSION_CMD.
- */
-
-#ifndef _WMIX_H_
-#define _WMIX_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "dbglog.h"
-
-/*
- * Extended WMI commands are those that are needed during wireless
- * operation, but which are not really wireless commands. This allows,
- * for instance, platform-specific commands. Extended WMI commands are
- * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
- * Extended WMI events are similarly embedded in a WMI event message with
- * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
- */
-typedef PREPACK struct {
- u32 commandId;
-} POSTPACK WMIX_CMD_HDR;
-
-typedef enum {
- WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
- WMIX_DSETDATA_REPLY_CMDID,
- WMIX_GPIO_OUTPUT_SET_CMDID,
- WMIX_GPIO_INPUT_GET_CMDID,
- WMIX_GPIO_REGISTER_SET_CMDID,
- WMIX_GPIO_REGISTER_GET_CMDID,
- WMIX_GPIO_INTR_ACK_CMDID,
- WMIX_HB_CHALLENGE_RESP_CMDID,
- WMIX_DBGLOG_CFG_MODULE_CMDID,
- WMIX_PROF_CFG_CMDID, /* 0x200a */
- WMIX_PROF_ADDR_SET_CMDID,
- WMIX_PROF_START_CMDID,
- WMIX_PROF_STOP_CMDID,
- WMIX_PROF_COUNT_GET_CMDID,
-} WMIX_COMMAND_ID;
-
-typedef enum {
- WMIX_DSETOPENREQ_EVENTID = 0x3001,
- WMIX_DSETCLOSE_EVENTID,
- WMIX_DSETDATAREQ_EVENTID,
- WMIX_GPIO_INTR_EVENTID,
- WMIX_GPIO_DATA_EVENTID,
- WMIX_GPIO_ACK_EVENTID,
- WMIX_HB_CHALLENGE_RESP_EVENTID,
- WMIX_DBGLOG_EVENTID,
- WMIX_PROF_COUNT_EVENTID,
-} WMIX_EVENT_ID;
-
-/*
- * =============DataSet support=================
- */
-
-/*
- * WMIX_DSETOPENREQ_EVENTID
- * DataSet Open Request Event
- */
-typedef PREPACK struct {
- u32 dset_id;
- u32 targ_dset_handle; /* echo'ed, not used by Host, */
- u32 targ_reply_fn; /* echo'ed, not used by Host, */
- u32 targ_reply_arg; /* echo'ed, not used by Host, */
-} POSTPACK WMIX_DSETOPENREQ_EVENT;
-
-/*
- * WMIX_DSETCLOSE_EVENTID
- * DataSet Close Event
- */
-typedef PREPACK struct {
- u32 access_cookie;
-} POSTPACK WMIX_DSETCLOSE_EVENT;
-
-/*
- * WMIX_DSETDATAREQ_EVENTID
- * DataSet Data Request Event
- */
-typedef PREPACK struct {
- u32 access_cookie;
- u32 offset;
- u32 length;
- u32 targ_buf; /* echo'ed, not used by Host, */
- u32 targ_reply_fn; /* echo'ed, not used by Host, */
- u32 targ_reply_arg; /* echo'ed, not used by Host, */
-} POSTPACK WMIX_DSETDATAREQ_EVENT;
-
-typedef PREPACK struct {
- u32 status;
- u32 targ_dset_handle;
- u32 targ_reply_fn;
- u32 targ_reply_arg;
- u32 access_cookie;
- u32 size;
- u32 version;
-} POSTPACK WMIX_DSETOPEN_REPLY_CMD;
-
-typedef PREPACK struct {
- u32 status;
- u32 targ_buf;
- u32 targ_reply_fn;
- u32 targ_reply_arg;
- u32 length;
- u8 buf[1];
-} POSTPACK WMIX_DSETDATA_REPLY_CMD;
-
-
-/*
- * =============GPIO support=================
- * All masks are 18-bit masks with bit N operating on GPIO pin N.
- */
-
-
-/*
- * Set GPIO pin output state.
- * In order for output to be driven, a pin must be enabled for output.
- * This can be done during initialization through the GPIO Configuration
- * DataSet, or during operation with the enable_mask.
- *
- * If a request is made to simultaneously set/clear or set/disable or
- * clear/disable or disable/enable, results are undefined.
- */
-typedef PREPACK struct {
- u32 set_mask; /* pins to set */
- u32 clear_mask; /* pins to clear */
- u32 enable_mask; /* pins to enable for output */
- u32 disable_mask; /* pins to disable/tristate */
-} POSTPACK WMIX_GPIO_OUTPUT_SET_CMD;
-
-/*
- * Set a GPIO register. For debug/exceptional cases.
- * Values for gpioreg_id are GPIO_REGISTER_IDs, defined in a
- * platform-dependent header.
- */
-typedef PREPACK struct {
- u32 gpioreg_id; /* GPIO register ID */
- u32 value; /* value to write */
-} POSTPACK WMIX_GPIO_REGISTER_SET_CMD;
-
-/* Get a GPIO register. For debug/exceptional cases. */
-typedef PREPACK struct {
- u32 gpioreg_id; /* GPIO register to read */
-} POSTPACK WMIX_GPIO_REGISTER_GET_CMD;
-
-/*
- * Host acknowledges and re-arms GPIO interrupts. A single
- * message should be used to acknowledge all interrupts that
- * were delivered in an earlier WMIX_GPIO_INTR_EVENT message.
- */
-typedef PREPACK struct {
- u32 ack_mask; /* interrupts to acknowledge */
-} POSTPACK WMIX_GPIO_INTR_ACK_CMD;
-
-/*
- * Target informs Host of GPIO interrupts that have occurred since the
- * last WMIX_GIPO_INTR_ACK_CMD was received. Additional information --
- * the current GPIO input values is provided -- in order to support
- * use of a GPIO interrupt as a Data Valid signal for other GPIO pins.
- */
-typedef PREPACK struct {
- u32 intr_mask; /* pending GPIO interrupts */
- u32 input_values; /* recent GPIO input values */
-} POSTPACK WMIX_GPIO_INTR_EVENT;
-
-/*
- * Target responds to Host's earlier WMIX_GPIO_INPUT_GET_CMDID request
- * using a GPIO_DATA_EVENT with
- * value set to the mask of GPIO pin inputs and
- * reg_id set to GPIO_ID_NONE
- *
- *
- * Target responds to Hosts's earlier WMIX_GPIO_REGISTER_GET_CMDID request
- * using a GPIO_DATA_EVENT with
- * value set to the value of the requested register and
- * reg_id identifying the register (reflects the original request)
- * NB: reg_id supports the future possibility of unsolicited
- * WMIX_GPIO_DATA_EVENTs (for polling GPIO input), and it may
- * simplify Host GPIO support.
- */
-typedef PREPACK struct {
- u32 value;
- u32 reg_id;
-} POSTPACK WMIX_GPIO_DATA_EVENT;
-
-/*
- * =============Error Detection support=================
- */
-
-/*
- * WMIX_HB_CHALLENGE_RESP_CMDID
- * Heartbeat Challenge Response command
- */
-typedef PREPACK struct {
- u32 cookie;
- u32 source;
-} POSTPACK WMIX_HB_CHALLENGE_RESP_CMD;
-
-/*
- * WMIX_HB_CHALLENGE_RESP_EVENTID
- * Heartbeat Challenge Response Event
- */
-#define WMIX_HB_CHALLENGE_RESP_EVENT WMIX_HB_CHALLENGE_RESP_CMD
-
-typedef PREPACK struct {
- struct dbglog_config_s config;
-} POSTPACK WMIX_DBGLOG_CFG_MODULE_CMD;
-
-/*
- * =============Target Profiling support=================
- */
-
-typedef PREPACK struct {
- u32 period; /* Time (in 30.5us ticks) between samples */
- u32 nbins;
-} POSTPACK WMIX_PROF_CFG_CMD;
-
-typedef PREPACK struct {
- u32 addr;
-} POSTPACK WMIX_PROF_ADDR_SET_CMD;
-
-/*
- * Target responds to Hosts's earlier WMIX_PROF_COUNT_GET_CMDID request
- * using a WMIX_PROF_COUNT_EVENT with
- * addr set to the next address
- * count set to the corresponding count
- */
-typedef PREPACK struct {
- u32 addr;
- u32 count;
-} POSTPACK WMIX_PROF_COUNT_EVENT;
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _WMIX_H_ */
diff --git a/drivers/staging/ath6kl/include/common_drv.h b/drivers/staging/ath6kl/include/common_drv.h
deleted file mode 100644
index 34db29958bc..00000000000
--- a/drivers/staging/ath6kl/include/common_drv.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef COMMON_DRV_H_
-#define COMMON_DRV_H_
-
-#include "hif.h"
-#include "htc_packet.h"
-#include "htc_api.h"
-
-/* structure that is the state information for the default credit distribution callback
- * drivers should instantiate (zero-init as well) this structure in their driver instance
- * and pass it as a context to the HTC credit distribution functions */
-struct common_credit_state_info {
- int TotalAvailableCredits; /* total credits in the system at startup */
- int CurrentFreeCredits; /* credits available in the pool that have not been
- given out to endpoints */
- struct htc_endpoint_credit_dist *pLowestPriEpDist; /* pointer to the lowest priority endpoint dist struct */
-};
-
-struct hci_transport_callbacks {
- s32 (*setupTransport)(void *ar);
- void (*cleanupTransport)(void *ar);
-};
-
-struct hci_transport_misc_handles {
- void *netDevice;
- void *hifDevice;
- void *htcHandle;
-};
-
-/* HTC TX packet tagging definitions */
-#define AR6K_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
-#define AR6K_DATA_PKT_TAG (AR6K_CONTROL_PKT_TAG + 1)
-
-#define AR6002_VERSION_REV1 0x20000086
-#define AR6002_VERSION_REV2 0x20000188
-#define AR6003_VERSION_REV1 0x300002ba
-#define AR6003_VERSION_REV2 0x30000384
-
-#define AR6002_CUST_DATA_SIZE 112
-#define AR6003_CUST_DATA_SIZE 16
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* OS-independent APIs */
-int ar6000_setup_credit_dist(HTC_HANDLE HTCHandle, struct common_credit_state_info *pCredInfo);
-
-int ar6000_ReadRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data);
-
-int ar6000_WriteRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data);
-
-int ar6000_ReadDataDiag(struct hif_device *hifDevice, u32 address, u8 *data, u32 length);
-
-int ar6000_reset_device(struct hif_device *hifDevice, u32 TargetType, bool waitForCompletion, bool coldReset);
-
-void ar6000_dump_target_assert_info(struct hif_device *hifDevice, u32 TargetType);
-
-int ar6000_set_htc_params(struct hif_device *hifDevice,
- u32 TargetType,
- u32 MboxIsrYieldValue,
- u8 HtcControlBuffers);
-
-int ar6000_set_hci_bridge_flags(struct hif_device *hifDevice,
- u32 TargetType,
- u32 Flags);
-
-void ar6000_copy_cust_data_from_target(struct hif_device *hifDevice, u32 TargetType);
-
-u8 *ar6000_get_cust_data_buffer(u32 TargetType);
-
-int ar6000_setBTState(void *context, u8 *pInBuf, u32 InBufSize);
-
-int ar6000_setDevicePowerState(void *context, u8 *pInBuf, u32 InBufSize);
-
-int ar6000_setWowMode(void *context, u8 *pInBuf, u32 InBufSize);
-
-int ar6000_setHostMode(void *context, u8 *pInBuf, u32 InBufSize);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*COMMON_DRV_H_*/
diff --git a/drivers/staging/ath6kl/include/dbglog_api.h b/drivers/staging/ath6kl/include/dbglog_api.h
deleted file mode 100644
index a53aed316e3..00000000000
--- a/drivers/staging/ath6kl/include/dbglog_api.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="dbglog_api.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains host side debug primitives.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _DBGLOG_API_H_
-#define _DBGLOG_API_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "dbglog.h"
-
-#define DBGLOG_HOST_LOG_BUFFER_SIZE DBGLOG_LOG_BUFFER_SIZE
-
-#define DBGLOG_GET_DBGID(arg) \
- ((arg & DBGLOG_DBGID_MASK) >> DBGLOG_DBGID_OFFSET)
-
-#define DBGLOG_GET_MODULEID(arg) \
- ((arg & DBGLOG_MODULEID_MASK) >> DBGLOG_MODULEID_OFFSET)
-
-#define DBGLOG_GET_NUMARGS(arg) \
- ((arg & DBGLOG_NUM_ARGS_MASK) >> DBGLOG_NUM_ARGS_OFFSET)
-
-#define DBGLOG_GET_TIMESTAMP(arg) \
- ((arg & DBGLOG_TIMESTAMP_MASK) >> DBGLOG_TIMESTAMP_OFFSET)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _DBGLOG_API_H_ */
diff --git a/drivers/staging/ath6kl/include/dl_list.h b/drivers/staging/ath6kl/include/dl_list.h
deleted file mode 100644
index 13b1e6956c2..00000000000
--- a/drivers/staging/ath6kl/include/dl_list.h
+++ /dev/null
@@ -1,153 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="dl_list.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Double-link list definitions (adapted from Atheros SDIO stack)
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef __DL_LIST_H___
-#define __DL_LIST_H___
-
-#include "a_osapi.h"
-
-#define A_CONTAINING_STRUCT(address, struct_type, field_name)\
- ((struct_type *)((unsigned long)(address) - (unsigned long)(&((struct_type *)0)->field_name)))
-
-/* list functions */
-/* pointers for the list */
-struct dl_list {
- struct dl_list *pPrev;
- struct dl_list *pNext;
-};
-/*
- * DL_LIST_INIT , initialize doubly linked list
-*/
-#define DL_LIST_INIT(pList)\
- {(pList)->pPrev = pList; (pList)->pNext = pList;}
-
-/* faster macro to init list and add a single item */
-#define DL_LIST_INIT_AND_ADD(pList,pItem) \
-{ (pList)->pPrev = (pItem); \
- (pList)->pNext = (pItem); \
- (pItem)->pNext = (pList); \
- (pItem)->pPrev = (pList); \
-}
-
-#define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && ((pList)->pNext == (pList)))
-#define DL_LIST_GET_ITEM_AT_HEAD(pList) (pList)->pNext
-#define DL_LIST_GET_ITEM_AT_TAIL(pList) (pList)->pPrev
-/*
- * ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member
- * NOT: do not use this function if the items in the list are deleted inside the
- * iteration loop
-*/
-#define ITERATE_OVER_LIST(pStart, pTemp) \
- for((pTemp) =(pStart)->pNext; pTemp != (pStart); (pTemp) = (pTemp)->pNext)
-
-
-/* safe iterate macro that allows the item to be removed from the list
- * the iteration continues to the next item in the list
- */
-#define ITERATE_OVER_LIST_ALLOW_REMOVE(pStart,pItem,st,offset) \
-{ \
- struct dl_list * pTemp; \
- pTemp = (pStart)->pNext; \
- while (pTemp != (pStart)) { \
- (pItem) = A_CONTAINING_STRUCT(pTemp,st,offset); \
- pTemp = pTemp->pNext; \
-
-#define ITERATE_END }}
-
-/*
- * DL_ListInsertTail - insert pAdd to the end of the list
-*/
-static INLINE struct dl_list *DL_ListInsertTail(struct dl_list *pList, struct dl_list *pAdd) {
- /* insert at tail */
- pAdd->pPrev = pList->pPrev;
- pAdd->pNext = pList;
- pList->pPrev->pNext = pAdd;
- pList->pPrev = pAdd;
- return pAdd;
-}
-
-/*
- * DL_ListInsertHead - insert pAdd into the head of the list
-*/
-static INLINE struct dl_list * DL_ListInsertHead(struct dl_list * pList, struct dl_list * pAdd) {
- /* insert at head */
- pAdd->pPrev = pList;
- pAdd->pNext = pList->pNext;
- pList->pNext->pPrev = pAdd;
- pList->pNext = pAdd;
- return pAdd;
-}
-
-#define DL_ListAdd(pList,pItem) DL_ListInsertHead((pList),(pItem))
-/*
- * DL_ListRemove - remove pDel from list
-*/
-static INLINE struct dl_list * DL_ListRemove(struct dl_list * pDel) {
- pDel->pNext->pPrev = pDel->pPrev;
- pDel->pPrev->pNext = pDel->pNext;
- /* point back to itself just to be safe, incase remove is called again */
- pDel->pNext = pDel;
- pDel->pPrev = pDel;
- return pDel;
-}
-
-/*
- * DL_ListRemoveItemFromHead - get a list item from the head
-*/
-static INLINE struct dl_list * DL_ListRemoveItemFromHead(struct dl_list * pList) {
- struct dl_list * pItem = NULL;
- if (pList->pNext != pList) {
- pItem = pList->pNext;
- /* remove the first item from head */
- DL_ListRemove(pItem);
- }
- return pItem;
-}
-
-static INLINE struct dl_list * DL_ListRemoveItemFromTail(struct dl_list * pList) {
- struct dl_list * pItem = NULL;
- if (pList->pPrev != pList) {
- pItem = pList->pPrev;
- /* remove the item from tail */
- DL_ListRemove(pItem);
- }
- return pItem;
-}
-
-/* transfer src list items to the tail of the destination list */
-static INLINE void DL_ListTransferItemsToTail(struct dl_list * pDest, struct dl_list * pSrc) {
- /* only concatenate if src is not empty */
- if (!DL_LIST_IS_EMPTY(pSrc)) {
- /* cut out circular list in src and re-attach to end of dest */
- pSrc->pPrev->pNext = pDest;
- pSrc->pNext->pPrev = pDest->pPrev;
- pDest->pPrev->pNext = pSrc->pNext;
- pDest->pPrev = pSrc->pPrev;
- /* terminate src list, it is now empty */
- pSrc->pPrev = pSrc;
- pSrc->pNext = pSrc;
- }
-}
-
-#endif /* __DL_LIST_H___ */
diff --git a/drivers/staging/ath6kl/include/dset_api.h b/drivers/staging/ath6kl/include/dset_api.h
deleted file mode 100644
index fe901ba40ec..00000000000
--- a/drivers/staging/ath6kl/include/dset_api.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="dset_api.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Host-side DataSet API.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _DSET_API_H_
-#define _DSET_API_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/*
- * Host-side DataSet support is optional, and is not
- * currently required for correct operation. To disable
- * Host-side DataSet support, set this to 0.
- */
-#ifndef CONFIG_HOST_DSET_SUPPORT
-#define CONFIG_HOST_DSET_SUPPORT 1
-#endif
-
-/* Called to send a DataSet Open Reply back to the Target. */
-int wmi_dset_open_reply(struct wmi_t *wmip,
- u32 status,
- u32 access_cookie,
- u32 size,
- u32 version,
- u32 targ_handle,
- u32 targ_reply_fn,
- u32 targ_reply_arg);
-
-/* Called to send a DataSet Data Reply back to the Target. */
-int wmi_dset_data_reply(struct wmi_t *wmip,
- u32 status,
- u8 *host_buf,
- u32 length,
- u32 targ_buf,
- u32 targ_reply_fn,
- u32 targ_reply_arg);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-
-#endif /* _DSET_API_H_ */
diff --git a/drivers/staging/ath6kl/include/hci_transport_api.h b/drivers/staging/ath6kl/include/hci_transport_api.h
deleted file mode 100644
index 5e903fad23f..00000000000
--- a/drivers/staging/ath6kl/include/hci_transport_api.h
+++ /dev/null
@@ -1,259 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HCI_TRANSPORT_API_H_
-#define _HCI_TRANSPORT_API_H_
-
- /* Bluetooth HCI packets are stored in HTC packet containers */
-#include "htc_packet.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-typedef void *HCI_TRANSPORT_HANDLE;
-
-typedef HTC_ENDPOINT_ID HCI_TRANSPORT_PACKET_TYPE;
-
- /* we map each HCI packet class to a static Endpoint ID */
-#define HCI_COMMAND_TYPE ENDPOINT_1
-#define HCI_EVENT_TYPE ENDPOINT_2
-#define HCI_ACL_TYPE ENDPOINT_3
-#define HCI_PACKET_INVALID ENDPOINT_MAX
-
-#define HCI_GET_PACKET_TYPE(pP) (pP)->Endpoint
-#define HCI_SET_PACKET_TYPE(pP,s) (pP)->Endpoint = (s)
-
-/* callback when an HCI packet was completely sent */
-typedef void (*HCI_TRANSPORT_SEND_PKT_COMPLETE)(void *, struct htc_packet *);
-/* callback when an HCI packet is received */
-typedef void (*HCI_TRANSPORT_RECV_PKT)(void *, struct htc_packet *);
-/* Optional receive buffer re-fill callback,
- * On some OSes (like Linux) packets are allocated from a global pool and indicated up
- * to the network stack. The driver never gets the packets back from the OS. For these OSes
- * a refill callback can be used to allocate and re-queue buffers into HTC.
- * A refill callback is used for the reception of ACL and EVENT packets. The caller must
- * set the watermark trigger point to cause a refill.
- */
-typedef void (*HCI_TRANSPORT_RECV_REFILL)(void *, HCI_TRANSPORT_PACKET_TYPE Type, int BuffersAvailable);
-/* Optional receive packet refill
- * On some systems packet buffers are an extremely limited resource. Rather than
- * queue largest-possible-sized buffers to the HCI bridge, some systems would rather
- * allocate a specific size as the packet is received. The trade off is
- * slightly more processing (callback invoked for each RX packet)
- * for the benefit of committing fewer buffer resources into the bridge.
- *
- * The callback is provided the length of the pending packet to fetch. This includes the
- * full transport header, HCI header, plus the length of payload. The callback can return a pointer to
- * the allocated HTC packet for immediate use.
- *
- * NOTE*** This callback is mutually exclusive with the the refill callback above.
- *
- * */
-typedef struct htc_packet *(*HCI_TRANSPORT_RECV_ALLOC)(void *, HCI_TRANSPORT_PACKET_TYPE Type, int Length);
-
-typedef enum _HCI_SEND_FULL_ACTION {
- HCI_SEND_FULL_KEEP = 0, /* packet that overflowed should be kept in the queue */
- HCI_SEND_FULL_DROP = 1, /* packet that overflowed should be dropped */
-} HCI_SEND_FULL_ACTION;
-
-/* callback when an HCI send queue exceeds the caller's MaxSendQueueDepth threshold,
- * the callback must return the send full action to take (either DROP or KEEP) */
-typedef HCI_SEND_FULL_ACTION (*HCI_TRANSPORT_SEND_FULL)(void *, struct htc_packet *);
-
-struct hci_transport_properties {
- int HeadRoom; /* number of bytes in front of HCI packet for header space */
- int TailRoom; /* number of bytes at the end of the HCI packet for tail space */
- int IOBlockPad; /* I/O block padding required (always a power of 2) */
-};
-
-struct hci_transport_config_info {
- int ACLRecvBufferWaterMark; /* low watermark to trigger recv refill */
- int EventRecvBufferWaterMark; /* low watermark to trigger recv refill */
- int MaxSendQueueDepth; /* max number of packets in the single send queue */
- void *pContext; /* context for all callbacks */
- void (*TransportFailure)(void *pContext, int Status); /* transport failure callback */
- int (*TransportReady)(HCI_TRANSPORT_HANDLE, struct hci_transport_properties *,void *pContext); /* transport is ready */
- void (*TransportRemoved)(void *pContext); /* transport was removed */
- /* packet processing callbacks */
- HCI_TRANSPORT_SEND_PKT_COMPLETE pHCISendComplete;
- HCI_TRANSPORT_RECV_PKT pHCIPktRecv;
- HCI_TRANSPORT_RECV_REFILL pHCIPktRecvRefill;
- HCI_TRANSPORT_RECV_ALLOC pHCIPktRecvAlloc;
- HCI_TRANSPORT_SEND_FULL pHCISendFull;
-};
-
-/* ------ Function Prototypes ------ */
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Attach to the HCI transport module
- @function name: HCI_TransportAttach
- @input: HTCHandle - HTC handle (see HTC apis)
- pInfo - initialization information
- @output:
- @return: HCI_TRANSPORT_HANDLE on success, NULL on failure
- @notes: The HTC module provides HCI transport services.
- @example:
- @see also: HCI_TransportDetach
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-HCI_TRANSPORT_HANDLE HCI_TransportAttach(void *HTCHandle, struct hci_transport_config_info *pInfo);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Detach from the HCI transport module
- @function name: HCI_TransportDetach
- @input: HciTrans - HCI transport handle
- pInfo - initialization information
- @output:
- @return:
- @notes:
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HCI_TransportDetach(HCI_TRANSPORT_HANDLE HciTrans);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Add receive packets to the HCI transport
- @function name: HCI_TransportAddReceivePkts
- @input: HciTrans - HCI transport handle
- pQueue - a queue holding one or more packets
- @output:
- @return: 0 on success
- @notes: user must supply HTC packets for capturing incomming HCI packets. The caller
- must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
- macro. Each packet in the queue must be of the same type and length
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportAddReceivePkts(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet_queue *pQueue);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Send an HCI packet packet
- @function name: HCI_TransportSendPkt
- @input: HciTrans - HCI transport handle
- pPacket - packet to send
- Synchronous - send the packet synchronously (blocking)
- @output:
- @return: 0
- @notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() and
- HCI_SET_PACKET_TYPE() macros to prepare the packet.
- If Synchronous is set to false the call is fully asynchronous. On error or completion,
- the registered send complete callback will be called.
- If Synchronous is set to true, the call will block until the packet is sent, if the
- interface cannot send the packet within a 2 second timeout, the function will return
- the failure code : A_EBUSY.
-
- Synchronous Mode should only be used at start-up to initialize the HCI device using
- custom HCI commands. It should NOT be mixed with Asynchronous operations. Mixed synchronous
- and asynchronous operation behavior is undefined.
-
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportSendPkt(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet *pPacket, bool Synchronous);
-
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Stop HCI transport
- @function name: HCI_TransportStop
- @input: HciTrans - hci transport handle
- @output:
- @return:
- @notes: HCI transport communication will be halted. All receive and pending TX packets will
- be flushed.
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HCI_TransportStop(HCI_TRANSPORT_HANDLE HciTrans);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Start the HCI transport
- @function name: HCI_TransportStart
- @input: HciTrans - hci transport handle
- @output:
- @return: 0 on success
- @notes: HCI transport communication will begin, the caller can expect the arrival
- of HCI recv packets as soon as this call returns.
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportStart(HCI_TRANSPORT_HANDLE HciTrans);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Enable or Disable Asynchronous Recv
- @function name: HCI_TransportEnableDisableAsyncRecv
- @input: HciTrans - hci transport handle
- Enable - enable or disable asynchronous recv
- @output:
- @return: 0 on success
- @notes: This API must be called when HCI recv is handled synchronously
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportEnableDisableAsyncRecv(HCI_TRANSPORT_HANDLE HciTrans, bool Enable);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Receive an event packet from the HCI transport synchronously using polling
- @function name: HCI_TransportRecvHCIEventSync
- @input: HciTrans - hci transport handle
- pPacket - HTC packet to hold the recv data
- MaxPollMS - maximum polling duration in Milliseconds;
- @output:
- @return: 0 on success
- @notes: This API should be used only during HCI device initialization, the caller must call
- HCI_TransportEnableDisableAsyncRecv with Enable=false prior to using this API.
- This API will only capture HCI Event packets.
- @example:
- @see also: HCI_TransportEnableDisableAsyncRecv
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportRecvHCIEventSync(HCI_TRANSPORT_HANDLE HciTrans,
- struct htc_packet *pPacket,
- int MaxPollMS);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Set the desired baud rate for the underlying transport layer
- @function name: HCI_TransportSetBaudRate
- @input: HciTrans - hci transport handle
- Baud - baud rate in bps
- @output:
- @return: 0 on success
- @notes: This API should be used only after HCI device initialization
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportSetBaudRate(HCI_TRANSPORT_HANDLE HciTrans, u32 Baud);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Enable/Disable HCI Transport Power Management
- @function name: HCI_TransportEnablePowerMgmt
- @input: HciTrans - hci transport handle
- Enable - 1 = Enable, 0 = Disable
- @output:
- @return: 0 on success
- @notes:
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HCI_TransportEnablePowerMgmt(HCI_TRANSPORT_HANDLE HciTrans, bool Enable);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _HCI_TRANSPORT_API_H_ */
diff --git a/drivers/staging/ath6kl/include/hif.h b/drivers/staging/ath6kl/include/hif.h
deleted file mode 100644
index 24200e778c3..00000000000
--- a/drivers/staging/ath6kl/include/hif.h
+++ /dev/null
@@ -1,456 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="hif.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// HIF specific declarations and prototypes
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HIF_H_
-#define _HIF_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* Header files */
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#include "dl_list.h"
-
-
-typedef struct htc_callbacks HTC_CALLBACKS;
-struct hif_device;
-
-/*
- * direction - Direction of transfer (HIF_READ/HIF_WRITE).
- */
-#define HIF_READ 0x00000001
-#define HIF_WRITE 0x00000002
-#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
-
-/*
- * type - An interface may support different kind of read/write commands.
- * For example: SDIO supports CMD52/CMD53s. In case of MSIO it
- * translates to using different kinds of TPCs. The command type
- * is thus divided into a basic and an extended command and can
- * be specified using HIF_BASIC_IO/HIF_EXTENDED_IO.
- */
-#define HIF_BASIC_IO 0x00000004
-#define HIF_EXTENDED_IO 0x00000008
-#define HIF_TYPE_MASK (HIF_BASIC_IO | HIF_EXTENDED_IO)
-
-/*
- * emode - This indicates the whether the command is to be executed in a
- * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
- * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
- * implemented using the asynchronous mode allowing the the bus
- * driver to indicate the completion of operation through the
- * registered callback routine. The requirement primarily comes
- * from the contexts these operations get called from (a driver's
- * transmit context or the ISR context in case of receive).
- * Support for both of these modes is essential.
- */
-#define HIF_SYNCHRONOUS 0x00000010
-#define HIF_ASYNCHRONOUS 0x00000020
-#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
-
-/*
- * dmode - An interface may support different kinds of commands based on
- * the tradeoff between the amount of data it can carry and the
- * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
- * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
- * to the nearest block size by padding. The size of the block is
- * configurable at compile time using the HIF_BLOCK_SIZE and is
- * negotiated with the target during initialization after the
- * AR6000 interrupts are enabled.
- */
-#define HIF_BYTE_BASIS 0x00000040
-#define HIF_BLOCK_BASIS 0x00000080
-#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
-
-/*
- * amode - This indicates if the address has to be incremented on AR6000
- * after every read/write operation (HIF?FIXED_ADDRESS/
- * HIF_INCREMENTAL_ADDRESS).
- */
-#define HIF_FIXED_ADDRESS 0x00000100
-#define HIF_INCREMENTAL_ADDRESS 0x00000200
-#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
-
-#define HIF_WR_ASYNC_BYTE_FIX \
- (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_WR_ASYNC_BYTE_INC \
- (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_WR_ASYNC_BLOCK_INC \
- (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_WR_SYNC_BYTE_FIX \
- (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_WR_SYNC_BYTE_INC \
- (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_WR_SYNC_BLOCK_INC \
- (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_WR_ASYNC_BLOCK_FIX \
- (HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_WR_SYNC_BLOCK_FIX \
- (HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_RD_SYNC_BYTE_INC \
- (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_RD_SYNC_BYTE_FIX \
- (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_RD_ASYNC_BYTE_FIX \
- (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_RD_ASYNC_BLOCK_FIX \
- (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
-#define HIF_RD_ASYNC_BYTE_INC \
- (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_RD_ASYNC_BLOCK_INC \
- (HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_RD_SYNC_BLOCK_INC \
- (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
-#define HIF_RD_SYNC_BLOCK_FIX \
- (HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
-
-typedef enum {
- HIF_DEVICE_POWER_STATE = 0,
- HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
- HIF_DEVICE_GET_MBOX_ADDR,
- HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
- HIF_DEVICE_GET_IRQ_PROC_MODE,
- HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
- HIF_DEVICE_POWER_STATE_CHANGE,
- HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
- HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
- HIF_DEVICE_GET_OS_DEVICE,
- HIF_DEVICE_DEBUG_BUS_STATE,
-} HIF_DEVICE_CONFIG_OPCODE;
-
-/*
- * HIF CONFIGURE definitions:
- *
- * HIF_DEVICE_GET_MBOX_BLOCK_SIZE
- * input : none
- * output : array of 4 u32s
- * notes: block size is returned for each mailbox (4)
- *
- * HIF_DEVICE_GET_MBOX_ADDR
- * input : none
- * output : struct hif_device_mbox_info
- * notes:
- *
- * HIF_DEVICE_GET_PENDING_EVENTS_FUNC
- * input : none
- * output: HIF_PENDING_EVENTS_FUNC function pointer
- * notes: this is optional for the HIF layer, if the request is
- * not handled then it indicates that the upper layer can use
- * the standard device methods to get pending events (IRQs, mailbox messages etc..)
- * otherwise it can call the function pointer to check pending events.
- *
- * HIF_DEVICE_GET_IRQ_PROC_MODE
- * input : none
- * output : HIF_DEVICE_IRQ_PROCESSING_MODE (interrupt processing mode)
- * note: the hif layer interfaces with the underlying OS-specific bus driver. The HIF
- * layer can report whether IRQ processing is requires synchronous behavior or
- * can be processed using asynchronous bus requests (typically faster).
- *
- * HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC
- * input :
- * output : HIF_MASK_UNMASK_RECV_EVENT function pointer
- * notes: this is optional for the HIF layer. The HIF layer may require a special mechanism
- * to mask receive message events. The upper layer can call this pointer when it needs
- * to mask/unmask receive events (in case it runs out of buffers).
- *
- * HIF_DEVICE_POWER_STATE_CHANGE
- *
- * input : HIF_DEVICE_POWER_CHANGE_TYPE
- * output : none
- * note: this is optional for the HIF layer. The HIF layer can handle power on/off state change
- * requests in an interconnect specific way. This is highly OS and bus driver dependent.
- * The caller must guarantee that no HIF read/write requests will be made after the device
- * is powered down.
- *
- * HIF_DEVICE_GET_IRQ_YIELD_PARAMS
- *
- * input : none
- * output : struct hif_device_irq_yield_params
- * note: This query checks if the HIF layer wishes to impose a processing yield count for the DSR handler.
- * The DSR callback handler will exit after a fixed number of RX packets or events are processed.
- * This query is only made if the device reports an IRQ processing mode of HIF_DEVICE_IRQ_SYNC_ONLY.
- * The HIF implementation can ignore this command if it does not desire the DSR callback to yield.
- * The HIF layer can indicate the maximum number of IRQ processing units (RX packets) before the
- * DSR handler callback must yield and return control back to the HIF layer. When a yield limit is
- * used the DSR callback will not call HIFAckInterrupts() as it would normally do before returning.
- * The HIF implementation that requires a yield count must call HIFAckInterrupt() when it is prepared
- * to process interrupts again.
- *
- * HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT
- * input : none
- * output : struct hif_device_scatter_support_info
- * note: This query checks if the HIF layer implements the SCATTER request interface. Scatter requests
- * allows upper layers to submit mailbox I/O operations using a list of buffers. This is useful for
- * multi-message transfers that can better utilize the bus interconnect.
- *
- *
- * HIF_DEVICE_GET_OS_DEVICE
- * intput : none
- * output : struct hif_device_os_device_info;
- * note: On some operating systems, the HIF layer has a parent device object for the bus. This object
- * may be required to register certain types of logical devices.
- *
- * HIF_DEVICE_DEBUG_BUS_STATE
- * input : none
- * output : none
- * note: This configure option triggers the HIF interface to dump as much bus interface state. This
- * configuration request is optional (No-OP on some HIF implementations)
- *
- */
-
-struct hif_mbox_properties {
- u32 ExtendedAddress; /* extended address for larger writes */
- u32 ExtendedSize;
-};
-
-#define HIF_MBOX_FLAG_NO_BUNDLING (1 << 0) /* do not allow bundling over the mailbox */
-
-typedef enum _MBOX_BUF_IF_TYPE {
- MBOX_BUS_IF_SDIO = 0,
- MBOX_BUS_IF_SPI = 1,
-} MBOX_BUF_IF_TYPE;
-
-struct hif_device_mbox_info {
- u32 MboxAddresses[4]; /* must be first element for legacy HIFs that return the address in
- and ARRAY of 32-bit words */
-
- /* the following describe extended mailbox properties */
- struct hif_mbox_properties MboxProp[4];
- /* if the HIF supports the GMbox extended address region it can report it
- * here, some interfaces cannot support the GMBOX address range and not set this */
- u32 GMboxAddress;
- u32 GMboxSize;
- u32 Flags; /* flags to describe mbox behavior or usage */
- MBOX_BUF_IF_TYPE MboxBusIFType; /* mailbox bus interface type */
-};
-
-typedef enum {
- HIF_DEVICE_IRQ_SYNC_ONLY, /* for HIF implementations that require the DSR to process all
- interrupts before returning */
- HIF_DEVICE_IRQ_ASYNC_SYNC, /* for HIF implementations that allow DSR to process interrupts
- using ASYNC I/O (that is HIFAckInterrupt can be called at a
- later time */
-} HIF_DEVICE_IRQ_PROCESSING_MODE;
-
-typedef enum {
- HIF_DEVICE_POWER_UP, /* HIF layer should power up interface and/or module */
- HIF_DEVICE_POWER_DOWN, /* HIF layer should initiate bus-specific measures to minimize power */
- HIF_DEVICE_POWER_CUT /* HIF layer should initiate bus-specific AND/OR platform-specific measures
- to completely power-off the module and associated hardware (i.e. cut power supplies)
- */
-} HIF_DEVICE_POWER_CHANGE_TYPE;
-
-struct hif_device_irq_yield_params {
- int RecvPacketYieldCount; /* max number of packets to force DSR to return */
-};
-
-
-struct hif_scatter_item {
- u8 *pBuffer; /* CPU accessible address of buffer */
- int Length; /* length of transfer to/from this buffer */
- void *pCallerContexts[2]; /* space for caller to insert a context associated with this item */
-};
-
-struct hif_scatter_req;
-typedef void ( *HIF_SCATTER_COMP_CB)(struct hif_scatter_req *);
-
-typedef enum _HIF_SCATTER_METHOD {
- HIF_SCATTER_NONE = 0,
- HIF_SCATTER_DMA_REAL, /* Real SG support no restrictions */
- HIF_SCATTER_DMA_BOUNCE, /* Uses SG DMA but HIF layer uses an internal bounce buffer */
-} HIF_SCATTER_METHOD;
-
-struct hif_scatter_req {
- struct dl_list ListLink; /* link management */
- u32 Address; /* address for the read/write operation */
- u32 Request; /* request flags */
- u32 TotalLength; /* total length of entire transfer */
- u32 CallerFlags; /* caller specific flags can be stored here */
- HIF_SCATTER_COMP_CB CompletionRoutine; /* completion routine set by caller */
- int CompletionStatus; /* status of completion */
- void *Context; /* caller context for this request */
- int ValidScatterEntries; /* number of valid entries set by caller */
- HIF_SCATTER_METHOD ScatterMethod; /* scatter method handled by HIF */
- void *HIFPrivate[4]; /* HIF private area */
- u8 *pScatterBounceBuffer; /* bounce buffer for upper layers to copy to/from */
- struct hif_scatter_item ScatterList[1]; /* start of scatter list */
-};
-
-typedef struct hif_scatter_req * ( *HIF_ALLOCATE_SCATTER_REQUEST)(struct hif_device *device);
-typedef void ( *HIF_FREE_SCATTER_REQUEST)(struct hif_device *device, struct hif_scatter_req *request);
-typedef int ( *HIF_READWRITE_SCATTER)(struct hif_device *device, struct hif_scatter_req *request);
-
-struct hif_device_scatter_support_info {
- /* information returned from HIF layer */
- HIF_ALLOCATE_SCATTER_REQUEST pAllocateReqFunc;
- HIF_FREE_SCATTER_REQUEST pFreeReqFunc;
- HIF_READWRITE_SCATTER pReadWriteScatterFunc;
- int MaxScatterEntries;
- int MaxTransferSizePerScatterReq;
-};
-
-struct hif_device_os_device_info {
- void *pOSDevice;
-};
-
-#define HIF_MAX_DEVICES 1
-
-struct htc_callbacks {
- void *context; /* context to pass to the dsrhandler
- note : rwCompletionHandler is provided the context passed to HIFReadWrite */
- int (* rwCompletionHandler)(void *rwContext, int status);
- int (* dsrHandler)(void *context);
-};
-
-typedef struct osdrv_callbacks {
- void *context; /* context to pass for all callbacks except deviceRemovedHandler
- the deviceRemovedHandler is only called if the device is claimed */
- int (* deviceInsertedHandler)(void *context, void *hif_handle);
- int (* deviceRemovedHandler)(void *claimedContext, void *hif_handle);
- int (* deviceSuspendHandler)(void *context);
- int (* deviceResumeHandler)(void *context);
- int (* deviceWakeupHandler)(void *context);
- int (* devicePowerChangeHandler)(void *context, HIF_DEVICE_POWER_CHANGE_TYPE config);
-} OSDRV_CALLBACKS;
-
-#define HIF_OTHER_EVENTS (1 << 0) /* other interrupts (non-Recv) are pending, host
- needs to read the register table to figure out what */
-#define HIF_RECV_MSG_AVAIL (1 << 1) /* pending recv packet */
-
-struct hif_pending_events_info {
- u32 Events;
- u32 LookAhead;
- u32 AvailableRecvBytes;
-#ifdef THREAD_X
- u32 Polling;
- u32 INT_CAUSE_REG;
-#endif
-};
-
- /* function to get pending events , some HIF modules use special mechanisms
- * to detect packet available and other interrupts */
-typedef int ( *HIF_PENDING_EVENTS_FUNC)(struct hif_device *device,
- struct hif_pending_events_info *pEvents,
- void *AsyncContext);
-
-#define HIF_MASK_RECV true
-#define HIF_UNMASK_RECV false
- /* function to mask recv events */
-typedef int ( *HIF_MASK_UNMASK_RECV_EVENT)(struct hif_device *device,
- bool Mask,
- void *AsyncContext);
-
-
-/*
- * This API is used to perform any global initialization of the HIF layer
- * and to set OS driver callbacks (i.e. insertion/removal) to the HIF layer
- *
- */
-int HIFInit(OSDRV_CALLBACKS *callbacks);
-
-/* This API claims the HIF device and provides a context for handling removal.
- * The device removal callback is only called when the OSDRV layer claims
- * a device. The claimed context must be non-NULL */
-void HIFClaimDevice(struct hif_device *device, void *claimedContext);
-/* release the claimed device */
-void HIFReleaseDevice(struct hif_device *device);
-
-/* This API allows the HTC layer to attach to the HIF device */
-int HIFAttachHTC(struct hif_device *device, HTC_CALLBACKS *callbacks);
-/* This API detaches the HTC layer from the HIF device */
-void HIFDetachHTC(struct hif_device *device);
-
-/*
- * This API is used to provide the read/write interface over the specific bus
- * interface.
- * address - Starting address in the AR6000's address space. For mailbox
- * writes, it refers to the start of the mbox boundary. It should
- * be ensured that the last byte falls on the mailbox's EOM. For
- * mailbox reads, it refers to the end of the mbox boundary.
- * buffer - Pointer to the buffer containg the data to be transmitted or
- * received.
- * length - Amount of data to be transmitted or received.
- * request - Characterizes the attributes of the command.
- */
-int
-HIFReadWrite(struct hif_device *device,
- u32 address,
- u8 *buffer,
- u32 length,
- u32 request,
- void *context);
-
-/*
- * This can be initiated from the unload driver context when the OSDRV layer has no more use for
- * the device.
- */
-void HIFShutDownDevice(struct hif_device *device);
-
-/*
- * This should translate to an acknowledgment to the bus driver indicating that
- * the previous interrupt request has been serviced and the all the relevant
- * sources have been cleared. HTC is ready to process more interrupts.
- * This should prevent the bus driver from raising an interrupt unless the
- * previous one has been serviced and acknowledged using the previous API.
- */
-void HIFAckInterrupt(struct hif_device *device);
-
-void HIFMaskInterrupt(struct hif_device *device);
-
-void HIFUnMaskInterrupt(struct hif_device *device);
-
-#ifdef THREAD_X
-/*
- * This set of functions are to be used by the bus driver to notify
- * the HIF module about various events.
- * These are not implemented if the bus driver provides an alternative
- * way for this notification though callbacks for instance.
- */
-int HIFInsertEventNotify(void);
-
-int HIFRemoveEventNotify(void);
-
-int HIFIRQEventNotify(void);
-
-int HIFRWCompleteEventNotify(void);
-#endif
-
-int
-HIFConfigureDevice(struct hif_device *device, HIF_DEVICE_CONFIG_OPCODE opcode,
- void *config, u32 configLen);
-
-/*
- * This API wait for the remaining MBOX messages to be drained
- * This should be moved to HTC AR6K layer
- */
-int hifWaitForPendingRecv(struct hif_device *device);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _HIF_H_ */
diff --git a/drivers/staging/ath6kl/include/host_version.h b/drivers/staging/ath6kl/include/host_version.h
deleted file mode 100644
index 74f1982c681..00000000000
--- a/drivers/staging/ath6kl/include/host_version.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="host_version.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains version information for the sample host driver for the
-// AR6000 chip
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HOST_VERSION_H_
-#define _HOST_VERSION_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <AR6002/AR6K_version.h>
-
-/*
- * The version number is made up of major, minor, patch and build
- * numbers. These are 16 bit numbers. The build and release script will
- * set the build number using a Perforce counter. Here the build number is
- * set to 9999 so that builds done without the build-release script are easily
- * identifiable.
- */
-
-#define ATH_SW_VER_MAJOR __VER_MAJOR_
-#define ATH_SW_VER_MINOR __VER_MINOR_
-#define ATH_SW_VER_PATCH __VER_PATCH_
-#define ATH_SW_VER_BUILD __BUILD_NUMBER_
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _HOST_VERSION_H_ */
diff --git a/drivers/staging/ath6kl/include/htc_api.h b/drivers/staging/ath6kl/include/htc_api.h
deleted file mode 100644
index 4fb767559f8..00000000000
--- a/drivers/staging/ath6kl/include/htc_api.h
+++ /dev/null
@@ -1,575 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_api.h" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HTC_API_H_
-#define _HTC_API_H_
-
-#include "htc_packet.h"
-#include <htc.h>
-#include <htc_services.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* TODO.. for BMI */
-#define ENDPOINT1 0
-// TODO -remove me, but we have to fix BMI first
-#define HTC_MAILBOX_NUM_MAX 4
-
-/* this is the amount of header room required by users of HTC */
-#define HTC_HEADER_LEN HTC_HDR_LENGTH
-
-typedef void *HTC_HANDLE;
-
-typedef u16 HTC_SERVICE_ID;
-
-struct htc_init_info {
- void *pContext; /* context for target failure notification */
- void (*TargetFailure)(void *Instance, int Status);
-};
-
-/* per service connection send completion */
-typedef void (*HTC_EP_SEND_PKT_COMPLETE)(void *,struct htc_packet *);
-/* per service connection callback when a plurality of packets have been sent
- * The struct htc_packet_queue is a temporary queue object (e.g. freed on return from the callback)
- * to hold a list of completed send packets.
- * If the handler cannot fully traverse the packet queue before returning, it should
- * transfer the items of the queue into the caller's private queue using:
- * HTC_PACKET_ENQUEUE() */
-typedef void (*HTC_EP_SEND_PKT_COMP_MULTIPLE)(void *,struct htc_packet_queue *);
-/* per service connection pkt received */
-typedef void (*HTC_EP_RECV_PKT)(void *,struct htc_packet *);
-/* per service connection callback when a plurality of packets are received
- * The struct htc_packet_queue is a temporary queue object (e.g. freed on return from the callback)
- * to hold a list of recv packets.
- * If the handler cannot fully traverse the packet queue before returning, it should
- * transfer the items of the queue into the caller's private queue using:
- * HTC_PACKET_ENQUEUE() */
-typedef void (*HTC_EP_RECV_PKT_MULTIPLE)(void *,struct htc_packet_queue *);
-
-/* Optional per service connection receive buffer re-fill callback,
- * On some OSes (like Linux) packets are allocated from a global pool and indicated up
- * to the network stack. The driver never gets the packets back from the OS. For these OSes
- * a refill callback can be used to allocate and re-queue buffers into HTC.
- *
- * On other OSes, the network stack can call into the driver's OS-specifc "return_packet" handler and
- * the driver can re-queue these buffers into HTC. In this regard a refill callback is
- * unnecessary */
-typedef void (*HTC_EP_RECV_REFILL)(void *, HTC_ENDPOINT_ID Endpoint);
-
-/* Optional per service connection receive buffer allocation callback.
- * On some systems packet buffers are an extremely limited resource. Rather than
- * queue largest-possible-sized buffers to HTC, some systems would rather
- * allocate a specific size as the packet is received. The trade off is
- * slightly more processing (callback invoked for each RX packet)
- * for the benefit of committing fewer buffer resources into HTC.
- *
- * The callback is provided the length of the pending packet to fetch. This includes the
- * HTC header length plus the length of payload. The callback can return a pointer to
- * the allocated HTC packet for immediate use.
- *
- * Alternatively a variant of this handler can be used to allocate large receive packets as needed.
- * For example an application can use the refill mechanism for normal packets and the recv-alloc mechanism to
- * handle the case where a large packet buffer is required. This can significantly reduce the
- * amount of "committed" memory used to receive packets.
- *
- * */
-typedef struct htc_packet *(*HTC_EP_RECV_ALLOC)(void *, HTC_ENDPOINT_ID Endpoint, int Length);
-
-typedef enum _HTC_SEND_FULL_ACTION {
- HTC_SEND_FULL_KEEP = 0, /* packet that overflowed should be kept in the queue */
- HTC_SEND_FULL_DROP = 1, /* packet that overflowed should be dropped */
-} HTC_SEND_FULL_ACTION;
-
-/* Optional per service connection callback when a send queue is full. This can occur if the
- * host continues queueing up TX packets faster than credits can arrive
- * To prevent the host (on some Oses like Linux) from continuously queueing packets
- * and consuming resources, this callback is provided so that that the host
- * can disable TX in the subsystem (i.e. network stack).
- * This callback is invoked for each packet that "overflows" the HTC queue. The callback can
- * determine whether the new packet that overflowed the queue can be kept (HTC_SEND_FULL_KEEP) or
- * dropped (HTC_SEND_FULL_DROP). If a packet is dropped, the EpTxComplete handler will be called
- * and the packet's status field will be set to A_NO_RESOURCE.
- * Other OSes require a "per-packet" indication for each completed TX packet, this
- * closed loop mechanism will prevent the network stack from overunning the NIC
- * The packet to keep or drop is passed for inspection to the registered handler the handler
- * must ONLY inspect the packet, it may not free or reclaim the packet. */
-typedef HTC_SEND_FULL_ACTION (*HTC_EP_SEND_QUEUE_FULL)(void *, struct htc_packet *pPacket);
-
-struct htc_ep_callbacks {
- void *pContext; /* context for each callback */
- HTC_EP_SEND_PKT_COMPLETE EpTxComplete; /* tx completion callback for connected endpoint */
- HTC_EP_RECV_PKT EpRecv; /* receive callback for connected endpoint */
- HTC_EP_RECV_REFILL EpRecvRefill; /* OPTIONAL receive re-fill callback for connected endpoint */
- HTC_EP_SEND_QUEUE_FULL EpSendFull; /* OPTIONAL send full callback */
- HTC_EP_RECV_ALLOC EpRecvAlloc; /* OPTIONAL recv allocation callback */
- HTC_EP_RECV_ALLOC EpRecvAllocThresh; /* OPTIONAL recv allocation callback based on a threshold */
- HTC_EP_SEND_PKT_COMP_MULTIPLE EpTxCompleteMultiple; /* OPTIONAL completion handler for multiple complete
- indications (EpTxComplete must be NULL) */
- HTC_EP_RECV_PKT_MULTIPLE EpRecvPktMultiple; /* OPTIONAL completion handler for multiple
- recv packet indications (EpRecv must be NULL) */
- int RecvAllocThreshold; /* if EpRecvAllocThresh is non-NULL, HTC will compare the
- threshold value to the current recv packet length and invoke
- the EpRecvAllocThresh callback to acquire a packet buffer */
- int RecvRefillWaterMark; /* if a EpRecvRefill handler is provided, this value
- can be used to set a trigger refill callback
- when the recv queue drops below this value
- if set to 0, the refill is only called when packets
- are empty */
-};
-
-/* service connection information */
-struct htc_service_connect_req {
- HTC_SERVICE_ID ServiceID; /* service ID to connect to */
- u16 ConnectionFlags; /* connection flags, see htc protocol definition */
- u8 *pMetaData; /* ptr to optional service-specific meta-data */
- u8 MetaDataLength; /* optional meta data length */
- struct htc_ep_callbacks EpCallbacks; /* endpoint callbacks */
- int MaxSendQueueDepth; /* maximum depth of any send queue */
- u32 LocalConnectionFlags; /* HTC flags for the host-side (local) connection */
- unsigned int MaxSendMsgSize; /* override max message size in send direction */
-};
-
-#define HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING (1 << 0) /* enable send bundle padding for this endpoint */
-
-/* service connection response information */
-struct htc_service_connect_resp {
- u8 *pMetaData; /* caller supplied buffer to optional meta-data */
- u8 BufferLength; /* length of caller supplied buffer */
- u8 ActualLength; /* actual length of meta data */
- HTC_ENDPOINT_ID Endpoint; /* endpoint to communicate over */
- unsigned int MaxMsgLength; /* max length of all messages over this endpoint */
- u8 ConnectRespCode; /* connect response code from target */
-};
-
-/* endpoint distribution structure */
-struct htc_endpoint_credit_dist {
- struct htc_endpoint_credit_dist *pNext;
- struct htc_endpoint_credit_dist *pPrev;
- HTC_SERVICE_ID ServiceID; /* Service ID (set by HTC) */
- HTC_ENDPOINT_ID Endpoint; /* endpoint for this distribution struct (set by HTC) */
- u32 DistFlags; /* distribution flags, distribution function can
- set default activity using SET_EP_ACTIVE() macro */
- int TxCreditsNorm; /* credits for normal operation, anything above this
- indicates the endpoint is over-subscribed, this field
- is only relevant to the credit distribution function */
- int TxCreditsMin; /* floor for credit distribution, this field is
- only relevant to the credit distribution function */
- int TxCreditsAssigned; /* number of credits assigned to this EP, this field
- is only relevant to the credit dist function */
- int TxCredits; /* current credits available, this field is used by
- HTC to determine whether a message can be sent or
- must be queued */
- int TxCreditsToDist; /* pending credits to distribute on this endpoint, this
- is set by HTC when credit reports arrive.
- The credit distribution functions sets this to zero
- when it distributes the credits */
- int TxCreditsSeek; /* this is the number of credits that the current pending TX
- packet needs to transmit. This is set by HTC when
- and endpoint needs credits in order to transmit */
- int TxCreditSize; /* size in bytes of each credit (set by HTC) */
- int TxCreditsPerMaxMsg; /* credits required for a maximum sized messages (set by HTC) */
- void *pHTCReserved; /* reserved for HTC use */
- int TxQueueDepth; /* current depth of TX queue , i.e. messages waiting for credits
- This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
- or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
- that has non-zero credits to recover
- */
-};
-
-#define HTC_EP_ACTIVE ((u32) (1u << 31))
-
-/* macro to check if an endpoint has gone active, useful for credit
- * distributions */
-#define IS_EP_ACTIVE(epDist) ((epDist)->DistFlags & HTC_EP_ACTIVE)
-#define SET_EP_ACTIVE(epDist) (epDist)->DistFlags |= HTC_EP_ACTIVE
-
- /* credit distibution code that is passed into the distrbution function,
- * there are mandatory and optional codes that must be handled */
-typedef enum _HTC_CREDIT_DIST_REASON {
- HTC_CREDIT_DIST_SEND_COMPLETE = 0, /* credits available as a result of completed
- send operations (MANDATORY) resulting in credit reports */
- HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, /* a change in endpoint activity occurred (OPTIONAL) */
- HTC_CREDIT_DIST_SEEK_CREDITS, /* an endpoint needs to "seek" credits (OPTIONAL) */
- HTC_DUMP_CREDIT_STATE /* for debugging, dump any state information that is kept by
- the distribution function */
-} HTC_CREDIT_DIST_REASON;
-
-typedef void (*HTC_CREDIT_DIST_CALLBACK)(void *Context,
- struct htc_endpoint_credit_dist *pEPList,
- HTC_CREDIT_DIST_REASON Reason);
-
-typedef void (*HTC_CREDIT_INIT_CALLBACK)(void *Context,
- struct htc_endpoint_credit_dist *pEPList,
- int TotalCredits);
-
- /* endpoint statistics action */
-typedef enum _HTC_ENDPOINT_STAT_ACTION {
- HTC_EP_STAT_SAMPLE = 0, /* only read statistics */
- HTC_EP_STAT_SAMPLE_AND_CLEAR = 1, /* sample and immediately clear statistics */
- HTC_EP_STAT_CLEAR /* clear only */
-} HTC_ENDPOINT_STAT_ACTION;
-
- /* endpoint statistics */
-struct htc_endpoint_stats {
- u32 TxCreditLowIndications; /* number of times the host set the credit-low flag in a send message on
- this endpoint */
- u32 TxIssued; /* running count of total TX packets issued */
- u32 TxPacketsBundled; /* running count of TX packets that were issued in bundles */
- u32 TxBundles; /* running count of TX bundles that were issued */
- u32 TxDropped; /* tx packets that were dropped */
- u32 TxCreditRpts; /* running count of total credit reports received for this endpoint */
- u32 TxCreditRptsFromRx; /* credit reports received from this endpoint's RX packets */
- u32 TxCreditRptsFromOther; /* credit reports received from RX packets of other endpoints */
- u32 TxCreditRptsFromEp0; /* credit reports received from endpoint 0 RX packets */
- u32 TxCreditsFromRx; /* count of credits received via Rx packets on this endpoint */
- u32 TxCreditsFromOther; /* count of credits received via another endpoint */
- u32 TxCreditsFromEp0; /* count of credits received via another endpoint */
- u32 TxCreditsConsummed; /* count of consummed credits */
- u32 TxCreditsReturned; /* count of credits returned */
- u32 RxReceived; /* count of RX packets received */
- u32 RxLookAheads; /* count of lookahead records
- found in messages received on this endpoint */
- u32 RxPacketsBundled; /* count of recv packets received in a bundle */
- u32 RxBundleLookAheads; /* count of number of bundled lookaheads */
- u32 RxBundleIndFromHdr; /* count of the number of bundle indications from the HTC header */
- u32 RxAllocThreshHit; /* count of the number of times the recv allocation threshold was hit */
- u32 RxAllocThreshBytes; /* total number of bytes */
-};
-
-/* ------ Function Prototypes ------ */
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Create an instance of HTC over the underlying HIF device
- @function name: HTCCreate
- @input: HifDevice - hif device handle,
- pInfo - initialization information
- @output:
- @return: HTC_HANDLE on success, NULL on failure
- @notes:
- @example:
- @see also: HTCDestroy
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-HTC_HANDLE HTCCreate(void *HifDevice, struct htc_init_info *pInfo);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Get the underlying HIF device handle
- @function name: HTCGetHifDevice
- @input: HTCHandle - handle passed into the AddInstance callback
- @output:
- @return: opaque HIF device handle usable in HIF API calls.
- @notes:
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void *HTCGetHifDevice(HTC_HANDLE HTCHandle);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Set credit distribution parameters
- @function name: HTCSetCreditDistribution
- @input: HTCHandle - HTC handle
- pCreditDistCont - caller supplied context to pass into distribution functions
- CreditDistFunc - Distribution function callback
- CreditDistInit - Credit Distribution initialization callback
- ServicePriorityOrder - Array containing list of service IDs, lowest index is highest
- priority
- ListLength - number of elements in ServicePriorityOrder
- @output:
- @return:
- @notes: The user can set a custom credit distribution function to handle special requirements
- for each endpoint. A default credit distribution routine can be used by setting
- CreditInitFunc to NULL. The default credit distribution is only provided for simple
- "fair" credit distribution without regard to any prioritization.
-
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCSetCreditDistribution(HTC_HANDLE HTCHandle,
- void *pCreditDistContext,
- HTC_CREDIT_DIST_CALLBACK CreditDistFunc,
- HTC_CREDIT_INIT_CALLBACK CreditInitFunc,
- HTC_SERVICE_ID ServicePriorityOrder[],
- int ListLength);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Wait for the target to indicate the HTC layer is ready
- @function name: HTCWaitTarget
- @input: HTCHandle - HTC handle
- @output:
- @return:
- @notes: This API blocks until the target responds with an HTC ready message.
- The caller should not connect services until the target has indicated it is
- ready.
- @example:
- @see also: HTCConnectService
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCWaitTarget(HTC_HANDLE HTCHandle);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Start target service communications
- @function name: HTCStart
- @input: HTCHandle - HTC handle
- @output:
- @return:
- @notes: This API indicates to the target that the service connection phase is complete
- and the target can freely start all connected services. This API should only be
- called AFTER all service connections have been made. TCStart will issue a
- SETUP_COMPLETE message to the target to indicate that all service connections
- have been made and the target can start communicating over the endpoints.
- @example:
- @see also: HTCConnectService
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCStart(HTC_HANDLE HTCHandle);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Add receive packet to HTC
- @function name: HTCAddReceivePkt
- @input: HTCHandle - HTC handle
- pPacket - HTC receive packet to add
- @output:
- @return: 0 on success
- @notes: user must supply HTC packets for capturing incomming HTC frames. The caller
- must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
- macro.
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCAddReceivePkt(HTC_HANDLE HTCHandle, struct htc_packet *pPacket);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Connect to an HTC service
- @function name: HTCConnectService
- @input: HTCHandle - HTC handle
- pReq - connection details
- @output: pResp - connection response
- @return:
- @notes: Service connections must be performed before HTCStart. User provides callback handlers
- for various endpoint events.
- @example:
- @see also: HTCStart
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCConnectService(HTC_HANDLE HTCHandle,
- struct htc_service_connect_req *pReq,
- struct htc_service_connect_resp *pResp);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Send an HTC packet
- @function name: HTCSendPkt
- @input: HTCHandle - HTC handle
- pPacket - packet to send
- @output:
- @return: 0
- @notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro.
- This interface is fully asynchronous. On error, HTC SendPkt will
- call the registered Endpoint callback to cleanup the packet.
- @example:
- @see also: HTCFlushEndpoint
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCSendPkt(HTC_HANDLE HTCHandle, struct htc_packet *pPacket);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Stop HTC service communications
- @function name: HTCStop
- @input: HTCHandle - HTC handle
- @output:
- @return:
- @notes: HTC communications is halted. All receive and pending TX packets will
- be flushed.
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCStop(HTC_HANDLE HTCHandle);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Destroy HTC service
- @function name: HTCDestroy
- @input: HTCHandle
- @output:
- @return:
- @notes: This cleans up all resources allocated by HTCCreate().
- @example:
- @see also: HTCCreate
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCDestroy(HTC_HANDLE HTCHandle);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Flush pending TX packets
- @function name: HTCFlushEndpoint
- @input: HTCHandle - HTC handle
- Endpoint - Endpoint to flush
- Tag - flush tag
- @output:
- @return:
- @notes: The Tag parameter is used to selectively flush packets with matching tags.
- The value of 0 forces all packets to be flush regardless of tag.
- @example:
- @see also: HTCSendPkt
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCFlushEndpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, HTC_TX_TAG Tag);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Dump credit distribution state
- @function name: HTCDumpCreditStates
- @input: HTCHandle - HTC handle
- @output:
- @return:
- @notes: This dumps all credit distribution information to the debugger
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCDumpCreditStates(HTC_HANDLE HTCHandle);
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Indicate a traffic activity change on an endpoint
- @function name: HTCIndicateActivityChange
- @input: HTCHandle - HTC handle
- Endpoint - endpoint in which activity has changed
- Active - true if active, false if it has become inactive
- @output:
- @return:
- @notes: This triggers the registered credit distribution function to
- re-adjust credits for active/inactive endpoints.
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCIndicateActivityChange(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint,
- bool Active);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Get endpoint statistics
- @function name: HTCGetEndpointStatistics
- @input: HTCHandle - HTC handle
- Endpoint - Endpoint identifier
- Action - action to take with statistics
- @output:
- pStats - statistics that were sampled (can be NULL if Action is HTC_EP_STAT_CLEAR)
-
- @return: true if statistics profiling is enabled, otherwise false.
-
- @notes: Statistics is a compile-time option and this function may return false
- if HTC is not compiled with profiling.
-
- The caller can specify the statistic "action" to take when sampling
- the statistics. This includes:
-
- HTC_EP_STAT_SAMPLE: The pStats structure is filled with the current values.
- HTC_EP_STAT_SAMPLE_AND_CLEAR: The structure is filled and the current statistics
- are cleared.
- HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass a NULL value for
- pStats
-
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-bool HTCGetEndpointStatistics(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint,
- HTC_ENDPOINT_STAT_ACTION Action,
- struct htc_endpoint_stats *pStats);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Unblock HTC message reception
- @function name: HTCUnblockRecv
- @input: HTCHandle - HTC handle
- @output:
- @return:
- @notes:
- HTC will block the receiver if the EpRecvAlloc callback fails to provide a packet.
- The caller can use this API to indicate to HTC when resources (buffers) are available
- such that the receiver can be unblocked and HTC may re-attempt fetching the pending message.
-
- This API is not required if the user uses the EpRecvRefill callback or uses the HTCAddReceivePacket()
- API to recycle or provide receive packets to HTC.
-
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-void HTCUnblockRecv(HTC_HANDLE HTCHandle);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: send a series of HTC packets
- @function name: HTCSendPktsMultiple
- @input: HTCHandle - HTC handle
- pPktQueue - local queue holding packets to send
- @output:
- @return: 0
- @notes: Caller must initialize each packet using SET_HTC_PACKET_INFO_TX() macro.
- The queue must only contain packets directed at the same endpoint.
- Caller supplies a pointer to an struct htc_packet_queue structure holding the TX packets in FIFO order.
- This API will remove the packets from the pkt queue and place them into the HTC Tx Queue
- and bundle messages where possible.
- The caller may allocate the pkt queue on the stack to hold the packets.
- This interface is fully asynchronous. On error, HTCSendPkts will
- call the registered Endpoint callback to cleanup the packet.
- @example:
- @see also: HTCFlushEndpoint
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCSendPktsMultiple(HTC_HANDLE HTCHandle, struct htc_packet_queue *pPktQueue);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Add multiple receive packets to HTC
- @function name: HTCAddReceivePktMultiple
- @input: HTCHandle - HTC handle
- pPktQueue - HTC receive packet queue holding packets to add
- @output:
- @return: 0 on success
- @notes: user must supply HTC packets for capturing incomming HTC frames. The caller
- must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
- macro. The queue must only contain recv packets for the same endpoint.
- Caller supplies a pointer to an struct htc_packet_queue structure holding the recv packet.
- This API will remove the packets from the pkt queue and place them into internal
- recv packet list.
- The caller may allocate the pkt queue on the stack to hold the packets.
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCAddReceivePktMultiple(HTC_HANDLE HTCHandle, struct htc_packet_queue *pPktQueue);
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Check if an endpoint is marked active
- @function name: HTCIsEndpointActive
- @input: HTCHandle - HTC handle
- Endpoint - endpoint to check for active state
- @output:
- @return: returns true if Endpoint is Active
- @notes:
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-bool HTCIsEndpointActive(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint);
-
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- @desc: Get the number of recv buffers currently queued into an HTC endpoint
- @function name: HTCGetNumRecvBuffers
- @input: HTCHandle - HTC handle
- Endpoint - endpoint to check
- @output:
- @return: returns number of buffers in queue
- @notes:
- @example:
- @see also:
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-int HTCGetNumRecvBuffers(HTC_HANDLE HTCHandle,
- HTC_ENDPOINT_ID Endpoint);
-
-/* internally used functions for testing... */
-void HTCEnableRecv(HTC_HANDLE HTCHandle);
-void HTCDisableRecv(HTC_HANDLE HTCHandle);
-int HTCWaitForPendingRecv(HTC_HANDLE HTCHandle,
- u32 TimeoutInMs,
- bool *pbIsRecvPending);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _HTC_API_H_ */
diff --git a/drivers/staging/ath6kl/include/htc_packet.h b/drivers/staging/ath6kl/include/htc_packet.h
deleted file mode 100644
index ba65c34ebc9..00000000000
--- a/drivers/staging/ath6kl/include/htc_packet.h
+++ /dev/null
@@ -1,227 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="htc_packet.h" company="Atheros">
-// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef HTC_PACKET_H_
-#define HTC_PACKET_H_
-
-
-#include "dl_list.h"
-
-/* ------ Endpoint IDS ------ */
-typedef enum
-{
- ENDPOINT_UNUSED = -1,
- ENDPOINT_0 = 0,
- ENDPOINT_1 = 1,
- ENDPOINT_2 = 2,
- ENDPOINT_3,
- ENDPOINT_4,
- ENDPOINT_5,
- ENDPOINT_6,
- ENDPOINT_7,
- ENDPOINT_8,
- ENDPOINT_MAX,
-} HTC_ENDPOINT_ID;
-
-struct htc_packet;
-
-typedef void (* HTC_PACKET_COMPLETION)(void *,struct htc_packet *);
-
-typedef u16 HTC_TX_TAG;
-
-struct htc_tx_packet_info {
- HTC_TX_TAG Tag; /* tag used to selective flush packets */
- int CreditsUsed; /* number of credits used for this TX packet (HTC internal) */
- u8 SendFlags; /* send flags (HTC internal) */
- int SeqNo; /* internal seq no for debugging (HTC internal) */
-};
-
-#define HTC_TX_PACKET_TAG_ALL 0 /* a tag of zero is reserved and used to flush ALL packets */
-#define HTC_TX_PACKET_TAG_INTERNAL 1 /* internal tags start here */
-#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_TX_PACKET_TAG_INTERNAL + 9) /* user-defined tags start here */
-
-struct htc_rx_packet_info {
- u32 ExpectedHdr; /* HTC internal use */
- u32 HTCRxFlags; /* HTC internal use */
- u32 IndicationFlags; /* indication flags set on each RX packet indication */
-};
-
-#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0) /* more packets on this endpoint are being fetched */
-
-/* wrapper around endpoint-specific packets */
-struct htc_packet {
- struct dl_list ListLink; /* double link */
- void *pPktContext; /* caller's per packet specific context */
-
- u8 *pBufferStart; /* the true buffer start , the caller can
- store the real buffer start here. In
- receive callbacks, the HTC layer sets pBuffer
- to the start of the payload past the header. This
- field allows the caller to reset pBuffer when it
- recycles receive packets back to HTC */
- /*
- * Pointer to the start of the buffer. In the transmit
- * direction this points to the start of the payload. In the
- * receive direction, however, the buffer when queued up
- * points to the start of the HTC header but when returned
- * to the caller points to the start of the payload
- */
- u8 *pBuffer; /* payload start (RX/TX) */
- u32 BufferLength; /* length of buffer */
- u32 ActualLength; /* actual length of payload */
- HTC_ENDPOINT_ID Endpoint; /* endpoint that this packet was sent/recv'd from */
- int Status; /* completion status */
- union {
- struct htc_tx_packet_info AsTx; /* Tx Packet specific info */
- struct htc_rx_packet_info AsRx; /* Rx Packet specific info */
- } PktInfo;
-
- /* the following fields are for internal HTC use */
- HTC_PACKET_COMPLETION Completion; /* completion */
- void *pContext; /* HTC private completion context */
-};
-
-
-
-#define COMPLETE_HTC_PACKET(p,status) \
-{ \
- (p)->Status = (status); \
- (p)->Completion((p)->pContext,(p)); \
-}
-
-#define INIT_HTC_PACKET_INFO(p,b,len) \
-{ \
- (p)->pBufferStart = (b); \
- (p)->BufferLength = (len); \
-}
-
-/* macro to set an initial RX packet for refilling HTC */
-#define SET_HTC_PACKET_INFO_RX_REFILL(p,c,b,len,ep) \
-{ \
- (p)->pPktContext = (c); \
- (p)->pBuffer = (b); \
- (p)->pBufferStart = (b); \
- (p)->BufferLength = (len); \
- (p)->Endpoint = (ep); \
-}
-
-/* fast macro to recycle an RX packet that will be re-queued to HTC */
-#define HTC_PACKET_RESET_RX(p) \
- { (p)->pBuffer = (p)->pBufferStart; (p)->ActualLength = 0; }
-
-/* macro to set packet parameters for TX */
-#define SET_HTC_PACKET_INFO_TX(p,c,b,len,ep,tag) \
-{ \
- (p)->pPktContext = (c); \
- (p)->pBuffer = (b); \
- (p)->ActualLength = (len); \
- (p)->Endpoint = (ep); \
- (p)->PktInfo.AsTx.Tag = (tag); \
-}
-
-/* HTC Packet Queueing Macros */
-struct htc_packet_queue {
- struct dl_list QueueHead;
- int Depth;
-};
-
-/* initialize queue */
-#define INIT_HTC_PACKET_QUEUE(pQ) \
-{ \
- DL_LIST_INIT(&(pQ)->QueueHead); \
- (pQ)->Depth = 0; \
-}
-
-/* enqueue HTC packet to the tail of the queue */
-#define HTC_PACKET_ENQUEUE(pQ,p) \
-{ DL_ListInsertTail(&(pQ)->QueueHead,&(p)->ListLink); \
- (pQ)->Depth++; \
-}
-
-/* enqueue HTC packet to the tail of the queue */
-#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ,p) \
-{ DL_ListInsertHead(&(pQ)->QueueHead,&(p)->ListLink); \
- (pQ)->Depth++; \
-}
-/* test if a queue is empty */
-#define HTC_QUEUE_EMPTY(pQ) ((pQ)->Depth == 0)
-/* get packet at head without removing it */
-static INLINE struct htc_packet *HTC_GET_PKT_AT_HEAD(struct htc_packet_queue *queue) {
- if (queue->Depth == 0) {
- return NULL;
- }
- return A_CONTAINING_STRUCT((DL_LIST_GET_ITEM_AT_HEAD(&queue->QueueHead)),struct htc_packet,ListLink);
-}
-/* remove a packet from a queue, where-ever it is in the queue */
-#define HTC_PACKET_REMOVE(pQ,p) \
-{ \
- DL_ListRemove(&(p)->ListLink); \
- (pQ)->Depth--; \
-}
-
-/* dequeue an HTC packet from the head of the queue */
-static INLINE struct htc_packet *HTC_PACKET_DEQUEUE(struct htc_packet_queue *queue) {
- struct dl_list *pItem = DL_ListRemoveItemFromHead(&queue->QueueHead);
- if (pItem != NULL) {
- queue->Depth--;
- return A_CONTAINING_STRUCT(pItem, struct htc_packet, ListLink);
- }
- return NULL;
-}
-
-/* dequeue an HTC packet from the tail of the queue */
-static INLINE struct htc_packet *HTC_PACKET_DEQUEUE_TAIL(struct htc_packet_queue *queue) {
- struct dl_list *pItem = DL_ListRemoveItemFromTail(&queue->QueueHead);
- if (pItem != NULL) {
- queue->Depth--;
- return A_CONTAINING_STRUCT(pItem, struct htc_packet, ListLink);
- }
- return NULL;
-}
-
-#define HTC_PACKET_QUEUE_DEPTH(pQ) (pQ)->Depth
-
-
-#define HTC_GET_ENDPOINT_FROM_PKT(p) (p)->Endpoint
-#define HTC_GET_TAG_FROM_PKT(p) (p)->PktInfo.AsTx.Tag
-
- /* transfer the packets from one queue to the tail of another queue */
-#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest,pQSrc) \
-{ \
- DL_ListTransferItemsToTail(&(pQDest)->QueueHead,&(pQSrc)->QueueHead); \
- (pQDest)->Depth += (pQSrc)->Depth; \
- (pQSrc)->Depth = 0; \
-}
-
- /* fast version to init and add a single packet to a queue */
-#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ,pP) \
-{ \
- DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead,&(pP)->ListLink) \
- (pQ)->Depth = 1; \
-}
-
-#define HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQ, pPTemp) \
- ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead,(pPTemp), struct htc_packet, ListLink)
-
-#define HTC_PACKET_QUEUE_ITERATE_END ITERATE_END
-
-#endif /*HTC_PACKET_H_*/
diff --git a/drivers/staging/ath6kl/include/wlan_api.h b/drivers/staging/ath6kl/include/wlan_api.h
deleted file mode 100644
index 9eea5875dd3..00000000000
--- a/drivers/staging/ath6kl/include/wlan_api.h
+++ /dev/null
@@ -1,128 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains the API for the host wlan module
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HOST_WLAN_API_H_
-#define _HOST_WLAN_API_H_
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <a_osapi.h>
-
-struct ieee80211_node_table;
-struct ieee80211_frame;
-
-struct ieee80211_common_ie {
- u16 ie_chan;
- u8 *ie_tstamp;
- u8 *ie_ssid;
- u8 *ie_rates;
- u8 *ie_xrates;
- u8 *ie_country;
- u8 *ie_wpa;
- u8 *ie_rsn;
- u8 *ie_wmm;
- u8 *ie_ath;
- u16 ie_capInfo;
- u16 ie_beaconInt;
- u8 *ie_tim;
- u8 *ie_chswitch;
- u8 ie_erp;
- u8 *ie_wsc;
- u8 *ie_htcap;
- u8 *ie_htop;
-#ifdef WAPI_ENABLE
- u8 *ie_wapi;
-#endif
-};
-
-typedef struct bss {
- u8 ni_macaddr[6];
- u8 ni_snr;
- s16 ni_rssi;
- struct bss *ni_list_next;
- struct bss *ni_list_prev;
- struct bss *ni_hash_next;
- struct bss *ni_hash_prev;
- struct ieee80211_common_ie ni_cie;
- u8 *ni_buf;
- u16 ni_framelen;
- struct ieee80211_node_table *ni_table;
- u32 ni_refcnt;
- int ni_scangen;
-
- u32 ni_tstamp;
- u32 ni_actcnt;
-#ifdef OS_ROAM_MANAGEMENT
- u32 ni_si_gen;
-#endif
-} bss_t;
-
-typedef void wlan_node_iter_func(void *arg, bss_t *);
-
-bss_t *wlan_node_alloc(struct ieee80211_node_table *nt, int wh_size);
-void wlan_node_free(bss_t *ni);
-void wlan_setup_node(struct ieee80211_node_table *nt, bss_t *ni,
- const u8 *macaddr);
-bss_t *wlan_find_node(struct ieee80211_node_table *nt, const u8 *macaddr);
-void wlan_node_reclaim(struct ieee80211_node_table *nt, bss_t *ni);
-void wlan_free_allnodes(struct ieee80211_node_table *nt);
-void wlan_iterate_nodes(struct ieee80211_node_table *nt, wlan_node_iter_func *f,
- void *arg);
-
-void wlan_node_table_init(void *wmip, struct ieee80211_node_table *nt);
-void wlan_node_table_reset(struct ieee80211_node_table *nt);
-void wlan_node_table_cleanup(struct ieee80211_node_table *nt);
-
-int wlan_parse_beacon(u8 *buf, int framelen,
- struct ieee80211_common_ie *cie);
-
-u16 wlan_ieee2freq(int chan);
-u32 wlan_freq2ieee(u16 freq);
-
-void wlan_set_nodeage(struct ieee80211_node_table *nt, u32 nodeAge);
-
-void
-wlan_refresh_inactive_nodes (struct ieee80211_node_table *nt);
-
-bss_t *
-wlan_find_Ssidnode (struct ieee80211_node_table *nt, u8 *pSsid,
- u32 ssidLength, bool bIsWPA2, bool bMatchSSID);
-
-void
-wlan_node_return (struct ieee80211_node_table *nt, bss_t *ni);
-
-bss_t *wlan_node_remove(struct ieee80211_node_table *nt, u8 *bssid);
-
-bss_t *
-wlan_find_matching_Ssidnode (struct ieee80211_node_table *nt, u8 *pSsid,
- u32 ssidLength, u32 dot11AuthMode, u32 authMode,
- u32 pairwiseCryptoType, u32 grpwiseCryptoTyp);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _HOST_WLAN_API_H_ */
diff --git a/drivers/staging/ath6kl/include/wmi_api.h b/drivers/staging/ath6kl/include/wmi_api.h
deleted file mode 100644
index c8583e0c4a9..00000000000
--- a/drivers/staging/ath6kl/include/wmi_api.h
+++ /dev/null
@@ -1,441 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wmi_api.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains the definitions for the Wireless Module Interface (WMI).
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _WMI_API_H_
-#define _WMI_API_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
- /* WMI converts a dix frame with an ethernet payload (up to 1500 bytes)
- * to an 802.3 frame (adds SNAP header) and adds on a WMI data header */
-#define WMI_MAX_TX_DATA_FRAME_LENGTH (1500 + sizeof(WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR))
-
- /* A normal WMI data frame */
-#define WMI_MAX_NORMAL_RX_DATA_FRAME_LENGTH (1500 + sizeof(WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR))
-
- /* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
-#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH (3840 + sizeof(WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR))
-
-/*
- * IP QoS Field definitions according to 802.1p
- */
-#define BEST_EFFORT_PRI 0
-#define BACKGROUND_PRI 1
-#define EXCELLENT_EFFORT_PRI 3
-#define CONTROLLED_LOAD_PRI 4
-#define VIDEO_PRI 5
-#define VOICE_PRI 6
-#define NETWORK_CONTROL_PRI 7
-#define MAX_NUM_PRI 8
-
-#define UNDEFINED_PRI (0xff)
-
-#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000 /* 5 seconds */
-
-#define A_ROUND_UP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
-
-typedef enum {
- ATHEROS_COMPLIANCE = 0x1,
-}TSPEC_PARAM_COMPLIANCE;
-
-struct wmi_t;
-
-void *wmi_init(void *devt);
-
-void wmi_qos_state_init(struct wmi_t *wmip);
-void wmi_shutdown(struct wmi_t *wmip);
-HTC_ENDPOINT_ID wmi_get_control_ep(struct wmi_t * wmip);
-void wmi_set_control_ep(struct wmi_t * wmip, HTC_ENDPOINT_ID eid);
-u16 wmi_get_mapped_qos_queue(struct wmi_t *, u8 );
-int wmi_dix_2_dot3(struct wmi_t *wmip, void *osbuf);
-int wmi_data_hdr_add(struct wmi_t *wmip, void *osbuf, u8 msgType, bool bMoreData, WMI_DATA_HDR_DATA_TYPE data_type,u8 metaVersion, void *pTxMetaS);
-int wmi_dot3_2_dix(void *osbuf);
-
-int wmi_dot11_hdr_remove (struct wmi_t *wmip, void *osbuf);
-int wmi_dot11_hdr_add(struct wmi_t *wmip, void *osbuf, NETWORK_TYPE mode);
-
-int wmi_data_hdr_remove(struct wmi_t *wmip, void *osbuf);
-int wmi_syncpoint(struct wmi_t *wmip);
-int wmi_syncpoint_reset(struct wmi_t *wmip);
-u8 wmi_implicit_create_pstream(struct wmi_t *wmip, void *osbuf, u32 layer2Priority, bool wmmEnabled);
-
-u8 wmi_determine_userPriority (u8 *pkt, u32 layer2Pri);
-
-int wmi_control_rx(struct wmi_t *wmip, void *osbuf);
-void wmi_iterate_nodes(struct wmi_t *wmip, wlan_node_iter_func *f, void *arg);
-void wmi_free_allnodes(struct wmi_t *wmip);
-bss_t *wmi_find_node(struct wmi_t *wmip, const u8 *macaddr);
-void wmi_free_node(struct wmi_t *wmip, const u8 *macaddr);
-
-
-typedef enum {
- NO_SYNC_WMIFLAG = 0,
- SYNC_BEFORE_WMIFLAG, /* transmit all queued data before cmd */
- SYNC_AFTER_WMIFLAG, /* any new data waits until cmd execs */
- SYNC_BOTH_WMIFLAG,
- END_WMIFLAG /* end marker */
-} WMI_SYNC_FLAG;
-
-int wmi_cmd_send(struct wmi_t *wmip, void *osbuf, WMI_COMMAND_ID cmdId,
- WMI_SYNC_FLAG flag);
-
-int wmi_connect_cmd(struct wmi_t *wmip,
- NETWORK_TYPE netType,
- DOT11_AUTH_MODE dot11AuthMode,
- AUTH_MODE authMode,
- CRYPTO_TYPE pairwiseCrypto,
- u8 pairwiseCryptoLen,
- CRYPTO_TYPE groupCrypto,
- u8 groupCryptoLen,
- int ssidLength,
- u8 *ssid,
- u8 *bssid,
- u16 channel,
- u32 ctrl_flags);
-
-int wmi_reconnect_cmd(struct wmi_t *wmip,
- u8 *bssid,
- u16 channel);
-int wmi_disconnect_cmd(struct wmi_t *wmip);
-int wmi_getrev_cmd(struct wmi_t *wmip);
-int wmi_startscan_cmd(struct wmi_t *wmip, WMI_SCAN_TYPE scanType,
- u32 forceFgScan, u32 isLegacy,
- u32 homeDwellTime, u32 forceScanInterval,
- s8 numChan, u16 *channelList);
-int wmi_scanparams_cmd(struct wmi_t *wmip, u16 fg_start_sec,
- u16 fg_end_sec, u16 bg_sec,
- u16 minact_chdw_msec,
- u16 maxact_chdw_msec, u16 pas_chdw_msec,
- u8 shScanRatio, u8 scanCtrlFlags,
- u32 max_dfsch_act_time,
- u16 maxact_scan_per_ssid);
-int wmi_bssfilter_cmd(struct wmi_t *wmip, u8 filter, u32 ieMask);
-int wmi_probedSsid_cmd(struct wmi_t *wmip, u8 index, u8 flag,
- u8 ssidLength, u8 *ssid);
-int wmi_listeninterval_cmd(struct wmi_t *wmip, u16 listenInterval, u16 listenBeacons);
-int wmi_bmisstime_cmd(struct wmi_t *wmip, u16 bmisstime, u16 bmissbeacons);
-int wmi_associnfo_cmd(struct wmi_t *wmip, u8 ieType,
- u8 ieLen, u8 *ieInfo);
-int wmi_powermode_cmd(struct wmi_t *wmip, u8 powerMode);
-int wmi_ibsspmcaps_cmd(struct wmi_t *wmip, u8 pmEnable, u8 ttl,
- u16 atim_windows, u16 timeout_value);
-int wmi_apps_cmd(struct wmi_t *wmip, u8 psType, u32 idle_time,
- u32 ps_period, u8 sleep_period);
-int wmi_pmparams_cmd(struct wmi_t *wmip, u16 idlePeriod,
- u16 psPollNum, u16 dtimPolicy,
- u16 wakup_tx_policy, u16 num_tx_to_wakeup,
- u16 ps_fail_event_policy);
-int wmi_disctimeout_cmd(struct wmi_t *wmip, u8 timeout);
-int wmi_sync_cmd(struct wmi_t *wmip, u8 syncNumber);
-int wmi_create_pstream_cmd(struct wmi_t *wmip, WMI_CREATE_PSTREAM_CMD *pstream);
-int wmi_delete_pstream_cmd(struct wmi_t *wmip, u8 trafficClass, u8 streamID);
-int wmi_set_framerate_cmd(struct wmi_t *wmip, u8 bEnable, u8 type, u8 subType, u16 rateMask);
-int wmi_set_bitrate_cmd(struct wmi_t *wmip, s32 dataRate, s32 mgmtRate, s32 ctlRate);
-int wmi_get_bitrate_cmd(struct wmi_t *wmip);
-s8 wmi_validate_bitrate(struct wmi_t *wmip, s32 rate, s8 *rate_idx);
-int wmi_get_regDomain_cmd(struct wmi_t *wmip);
-int wmi_get_channelList_cmd(struct wmi_t *wmip);
-int wmi_set_channelParams_cmd(struct wmi_t *wmip, u8 scanParam,
- WMI_PHY_MODE mode, s8 numChan,
- u16 *channelList);
-
-int wmi_set_snr_threshold_params(struct wmi_t *wmip,
- WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd);
-int wmi_set_rssi_threshold_params(struct wmi_t *wmip,
- WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd);
-int wmi_clr_rssi_snr(struct wmi_t *wmip);
-int wmi_set_lq_threshold_params(struct wmi_t *wmip,
- WMI_LQ_THRESHOLD_PARAMS_CMD *lqCmd);
-int wmi_set_rts_cmd(struct wmi_t *wmip, u16 threshold);
-int wmi_set_lpreamble_cmd(struct wmi_t *wmip, u8 status, u8 preamblePolicy);
-
-int wmi_set_error_report_bitmask(struct wmi_t *wmip, u32 bitmask);
-
-int wmi_get_challenge_resp_cmd(struct wmi_t *wmip, u32 cookie,
- u32 source);
-
-int wmi_config_debug_module_cmd(struct wmi_t *wmip, u16 mmask,
- u16 tsr, bool rep, u16 size,
- u32 valid);
-
-int wmi_get_stats_cmd(struct wmi_t *wmip);
-
-int wmi_addKey_cmd(struct wmi_t *wmip, u8 keyIndex,
- CRYPTO_TYPE keyType, u8 keyUsage,
- u8 keyLength,u8 *keyRSC,
- u8 *keyMaterial, u8 key_op_ctrl, u8 *mac,
- WMI_SYNC_FLAG sync_flag);
-int wmi_add_krk_cmd(struct wmi_t *wmip, u8 *krk);
-int wmi_delete_krk_cmd(struct wmi_t *wmip);
-int wmi_deleteKey_cmd(struct wmi_t *wmip, u8 keyIndex);
-int wmi_set_akmp_params_cmd(struct wmi_t *wmip,
- WMI_SET_AKMP_PARAMS_CMD *akmpParams);
-int wmi_get_pmkid_list_cmd(struct wmi_t *wmip);
-int wmi_set_pmkid_list_cmd(struct wmi_t *wmip,
- WMI_SET_PMKID_LIST_CMD *pmkInfo);
-int wmi_abort_scan_cmd(struct wmi_t *wmip);
-int wmi_set_txPwr_cmd(struct wmi_t *wmip, u8 dbM);
-int wmi_get_txPwr_cmd(struct wmi_t *wmip);
-int wmi_addBadAp_cmd(struct wmi_t *wmip, u8 apIndex, u8 *bssid);
-int wmi_deleteBadAp_cmd(struct wmi_t *wmip, u8 apIndex);
-int wmi_set_tkip_countermeasures_cmd(struct wmi_t *wmip, bool en);
-int wmi_setPmkid_cmd(struct wmi_t *wmip, u8 *bssid, u8 *pmkId,
- bool set);
-int wmi_set_access_params_cmd(struct wmi_t *wmip, u8 ac, u16 txop,
- u8 eCWmin, u8 eCWmax,
- u8 aifsn);
-int wmi_set_retry_limits_cmd(struct wmi_t *wmip, u8 frameType,
- u8 trafficClass, u8 maxRetries,
- u8 enableNotify);
-
-void wmi_get_current_bssid(struct wmi_t *wmip, u8 *bssid);
-
-int wmi_get_roam_tbl_cmd(struct wmi_t *wmip);
-int wmi_get_roam_data_cmd(struct wmi_t *wmip, u8 roamDataType);
-int wmi_set_roam_ctrl_cmd(struct wmi_t *wmip, WMI_SET_ROAM_CTRL_CMD *p,
- u8 size);
-int wmi_set_powersave_timers_cmd(struct wmi_t *wmip,
- WMI_POWERSAVE_TIMERS_POLICY_CMD *pCmd,
- u8 size);
-
-int wmi_set_opt_mode_cmd(struct wmi_t *wmip, u8 optMode);
-int wmi_opt_tx_frame_cmd(struct wmi_t *wmip,
- u8 frmType,
- u8 *dstMacAddr,
- u8 *bssid,
- u16 optIEDataLen,
- u8 *optIEData);
-
-int wmi_set_adhoc_bconIntvl_cmd(struct wmi_t *wmip, u16 intvl);
-int wmi_set_voice_pkt_size_cmd(struct wmi_t *wmip, u16 voicePktSize);
-int wmi_set_max_sp_len_cmd(struct wmi_t *wmip, u8 maxSpLen);
-u8 convert_userPriority_to_trafficClass(u8 userPriority);
-u8 wmi_get_power_mode_cmd(struct wmi_t *wmip);
-int wmi_verify_tspec_params(WMI_CREATE_PSTREAM_CMD *pCmd, int tspecCompliance);
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-int wmi_test_cmd(struct wmi_t *wmip, u8 *buf, u32 len);
-#endif
-
-int wmi_set_bt_status_cmd(struct wmi_t *wmip, u8 streamType, u8 status);
-int wmi_set_bt_params_cmd(struct wmi_t *wmip, WMI_SET_BT_PARAMS_CMD* cmd);
-
-int wmi_set_btcoex_fe_ant_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_FE_ANT_CMD * cmd);
-
-int wmi_set_btcoex_colocated_bt_dev_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD * cmd);
-
-int wmi_set_btcoex_btinquiry_page_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD *cmd);
-
-int wmi_set_btcoex_sco_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_SCO_CONFIG_CMD * cmd);
-
-int wmi_set_btcoex_a2dp_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_A2DP_CONFIG_CMD* cmd);
-
-
-int wmi_set_btcoex_aclcoex_config_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD* cmd);
-
-int wmi_set_btcoex_debug_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_DEBUG_CMD * cmd);
-
-int wmi_set_btcoex_bt_operating_status_cmd(struct wmi_t * wmip,
- WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD * cmd);
-
-int wmi_get_btcoex_config_cmd(struct wmi_t * wmip, WMI_GET_BTCOEX_CONFIG_CMD * cmd);
-
-int wmi_get_btcoex_stats_cmd(struct wmi_t * wmip);
-
-int wmi_SGI_cmd(struct wmi_t *wmip, u32 sgiMask, u8 sgiPERThreshold);
-
-/*
- * This function is used to configure the fix rates mask to the target.
- */
-int wmi_set_fixrates_cmd(struct wmi_t *wmip, u32 fixRatesMask);
-int wmi_get_ratemask_cmd(struct wmi_t *wmip);
-
-int wmi_set_authmode_cmd(struct wmi_t *wmip, u8 mode);
-
-int wmi_set_reassocmode_cmd(struct wmi_t *wmip, u8 mode);
-
-int wmi_set_qos_supp_cmd(struct wmi_t *wmip,u8 status);
-int wmi_set_wmm_cmd(struct wmi_t *wmip, WMI_WMM_STATUS status);
-int wmi_set_wmm_txop(struct wmi_t *wmip, WMI_TXOP_CFG txEnable);
-int wmi_set_country(struct wmi_t *wmip, u8 *countryCode);
-
-int wmi_get_keepalive_configured(struct wmi_t *wmip);
-u8 wmi_get_keepalive_cmd(struct wmi_t *wmip);
-int wmi_set_keepalive_cmd(struct wmi_t *wmip, u8 keepaliveInterval);
-
-int wmi_set_appie_cmd(struct wmi_t *wmip, u8 mgmtFrmType,
- u8 ieLen,u8 *ieInfo);
-
-int wmi_set_halparam_cmd(struct wmi_t *wmip, u8 *cmd, u16 dataLen);
-
-s32 wmi_get_rate(s8 rateindex);
-
-int wmi_set_ip_cmd(struct wmi_t *wmip, WMI_SET_IP_CMD *cmd);
-
-/*Wake on Wireless WMI commands*/
-int wmi_set_host_sleep_mode_cmd(struct wmi_t *wmip, WMI_SET_HOST_SLEEP_MODE_CMD *cmd);
-int wmi_set_wow_mode_cmd(struct wmi_t *wmip, WMI_SET_WOW_MODE_CMD *cmd);
-int wmi_get_wow_list_cmd(struct wmi_t *wmip, WMI_GET_WOW_LIST_CMD *cmd);
-int wmi_add_wow_pattern_cmd(struct wmi_t *wmip,
- WMI_ADD_WOW_PATTERN_CMD *cmd, u8 *pattern, u8 *mask, u8 pattern_size);
-int wmi_del_wow_pattern_cmd(struct wmi_t *wmip,
- WMI_DEL_WOW_PATTERN_CMD *cmd);
-int wmi_set_wsc_status_cmd(struct wmi_t *wmip, u32 status);
-
-int
-wmi_set_params_cmd(struct wmi_t *wmip, u32 opcode, u32 length, char *buffer);
-
-int
-wmi_set_mcast_filter_cmd(struct wmi_t *wmip, u8 dot1, u8 dot2, u8 dot3, u8 dot4);
-
-int
-wmi_del_mcast_filter_cmd(struct wmi_t *wmip, u8 dot1, u8 dot2, u8 dot3, u8 dot4);
-
-int
-wmi_mcast_filter_cmd(struct wmi_t *wmip, u8 enable);
-
-bss_t *
-wmi_find_Ssidnode (struct wmi_t *wmip, u8 *pSsid,
- u32 ssidLength, bool bIsWPA2, bool bMatchSSID);
-
-
-void
-wmi_node_return (struct wmi_t *wmip, bss_t *bss);
-
-void
-wmi_set_nodeage(struct wmi_t *wmip, u32 nodeAge);
-
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
-int wmi_prof_cfg_cmd(struct wmi_t *wmip, u32 period, u32 nbins);
-int wmi_prof_addr_set_cmd(struct wmi_t *wmip, u32 addr);
-int wmi_prof_start_cmd(struct wmi_t *wmip);
-int wmi_prof_stop_cmd(struct wmi_t *wmip);
-int wmi_prof_count_get_cmd(struct wmi_t *wmip);
-#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
-#ifdef OS_ROAM_MANAGEMENT
-void wmi_scan_indication (struct wmi_t *wmip);
-#endif
-
-int
-wmi_set_target_event_report_cmd(struct wmi_t *wmip, WMI_SET_TARGET_EVENT_REPORT_CMD* cmd);
-
-bss_t *wmi_rm_current_bss (struct wmi_t *wmip, u8 *id);
-int wmi_add_current_bss (struct wmi_t *wmip, u8 *id, bss_t *bss);
-
-
-/*
- * AP mode
- */
-int
-wmi_ap_profile_commit(struct wmi_t *wmip, WMI_CONNECT_CMD *p);
-
-int
-wmi_ap_set_hidden_ssid(struct wmi_t *wmip, u8 hidden_ssid);
-
-int
-wmi_ap_set_num_sta(struct wmi_t *wmip, u8 num_sta);
-
-int
-wmi_ap_set_acl_policy(struct wmi_t *wmip, u8 policy);
-
-int
-wmi_ap_acl_mac_list(struct wmi_t *wmip, WMI_AP_ACL_MAC_CMD *a);
-
-u8 acl_add_del_mac(WMI_AP_ACL *a, WMI_AP_ACL_MAC_CMD *acl);
-
-int
-wmi_ap_set_mlme(struct wmi_t *wmip, u8 cmd, u8 *mac, u16 reason);
-
-int
-wmi_set_pvb_cmd(struct wmi_t *wmip, u16 aid, bool flag);
-
-int
-wmi_ap_conn_inact_time(struct wmi_t *wmip, u32 period);
-
-int
-wmi_ap_bgscan_time(struct wmi_t *wmip, u32 period, u32 dwell);
-
-int
-wmi_ap_set_dtim(struct wmi_t *wmip, u8 dtim);
-
-int
-wmi_ap_set_rateset(struct wmi_t *wmip, u8 rateset);
-
-int
-wmi_set_ht_cap_cmd(struct wmi_t *wmip, WMI_SET_HT_CAP_CMD *cmd);
-
-int
-wmi_set_ht_op_cmd(struct wmi_t *wmip, u8 sta_chan_width);
-
-int
-wmi_send_hci_cmd(struct wmi_t *wmip, u8 *buf, u16 sz);
-
-int
-wmi_set_tx_select_rates_cmd(struct wmi_t *wmip, u32 *pMaskArray);
-
-int
-wmi_setup_aggr_cmd(struct wmi_t *wmip, u8 tid);
-
-int
-wmi_delete_aggr_cmd(struct wmi_t *wmip, u8 tid, bool uplink);
-
-int
-wmi_allow_aggr_cmd(struct wmi_t *wmip, u16 tx_tidmask, u16 rx_tidmask);
-
-int
-wmi_set_rx_frame_format_cmd(struct wmi_t *wmip, u8 rxMetaVersion, bool rxDot11Hdr, bool defragOnHost);
-
-int
-wmi_set_thin_mode_cmd(struct wmi_t *wmip, bool bThinMode);
-
-int
-wmi_set_wlan_conn_precedence_cmd(struct wmi_t *wmip, BT_WLAN_CONN_PRECEDENCE precedence);
-
-int
-wmi_set_pmk_cmd(struct wmi_t *wmip, u8 *pmk);
-
-int
-wmi_set_excess_tx_retry_thres_cmd(struct wmi_t *wmip, WMI_SET_EXCESS_TX_RETRY_THRES_CMD *cmd);
-
-u16 wmi_ieee2freq (int chan);
-
-u32 wmi_freq2ieee (u16 freq);
-
-bss_t *
-wmi_find_matching_Ssidnode (struct wmi_t *wmip, u8 *pSsid,
- u32 ssidLength,
- u32 dot11AuthMode, u32 authMode,
- u32 pairwiseCryptoType, u32 grpwiseCryptoTyp);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _WMI_API_H_ */
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kconfig.c
deleted file mode 100644
index e0ea2183019..00000000000
--- a/drivers/staging/ath6kl/miscdrv/ar3kconfig.c
+++ /dev/null
@@ -1,565 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// AR3K configuration implementation
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#define ATH_MODULE_NAME misc
-#include "a_debug.h"
-#include "common_drv.h"
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-#include "export_hci_transport.h"
-#else
-#include "hci_transport_api.h"
-#endif
-#include "ar3kconfig.h"
-#include "tlpm.h"
-
-#define BAUD_CHANGE_COMMAND_STATUS_OFFSET 5
-#define HCI_EVENT_RESP_TIMEOUTMS 3000
-#define HCI_CMD_OPCODE_BYTE_LOW_OFFSET 0
-#define HCI_CMD_OPCODE_BYTE_HI_OFFSET 1
-#define HCI_EVENT_OPCODE_BYTE_LOW 3
-#define HCI_EVENT_OPCODE_BYTE_HI 4
-#define HCI_CMD_COMPLETE_EVENT_CODE 0xE
-#define HCI_MAX_EVT_RECV_LENGTH 257
-#define EXIT_MIN_BOOT_COMMAND_STATUS_OFFSET 5
-
-int AthPSInitialize(struct ar3k_config_info *hdev);
-
-static int SendHCICommand(struct ar3k_config_info *pConfig,
- u8 *pBuffer,
- int Length)
-{
- struct htc_packet *pPacket = NULL;
- int status = 0;
-
- do {
-
- pPacket = (struct htc_packet *)A_MALLOC(sizeof(struct htc_packet));
- if (NULL == pPacket) {
- status = A_NO_MEMORY;
- break;
- }
-
- A_MEMZERO(pPacket,sizeof(struct htc_packet));
- SET_HTC_PACKET_INFO_TX(pPacket,
- NULL,
- pBuffer,
- Length,
- HCI_COMMAND_TYPE,
- AR6K_CONTROL_PKT_TAG);
-
- /* issue synchronously */
- status = HCI_TransportSendPkt(pConfig->pHCIDev,pPacket,true);
-
- } while (false);
-
- if (pPacket != NULL) {
- kfree(pPacket);
- }
-
- return status;
-}
-
-static int RecvHCIEvent(struct ar3k_config_info *pConfig,
- u8 *pBuffer,
- int *pLength)
-{
- int status = 0;
- struct htc_packet *pRecvPacket = NULL;
-
- do {
-
- pRecvPacket = (struct htc_packet *)A_MALLOC(sizeof(struct htc_packet));
- if (NULL == pRecvPacket) {
- status = A_NO_MEMORY;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to alloc HTC struct \n"));
- break;
- }
-
- A_MEMZERO(pRecvPacket,sizeof(struct htc_packet));
-
- SET_HTC_PACKET_INFO_RX_REFILL(pRecvPacket,NULL,pBuffer,*pLength,HCI_EVENT_TYPE);
-
- status = HCI_TransportRecvHCIEventSync(pConfig->pHCIDev,
- pRecvPacket,
- HCI_EVENT_RESP_TIMEOUTMS);
- if (status) {
- break;
- }
-
- *pLength = pRecvPacket->ActualLength;
-
- } while (false);
-
- if (pRecvPacket != NULL) {
- kfree(pRecvPacket);
- }
-
- return status;
-}
-
-int SendHCICommandWaitCommandComplete(struct ar3k_config_info *pConfig,
- u8 *pHCICommand,
- int CmdLength,
- u8 **ppEventBuffer,
- u8 **ppBufferToFree)
-{
- int status = 0;
- u8 *pBuffer = NULL;
- u8 *pTemp;
- int length;
- bool commandComplete = false;
- u8 opCodeBytes[2];
-
- do {
-
- length = max(HCI_MAX_EVT_RECV_LENGTH,CmdLength);
- length += pConfig->pHCIProps->HeadRoom + pConfig->pHCIProps->TailRoom;
- length += pConfig->pHCIProps->IOBlockPad;
-
- pBuffer = (u8 *)A_MALLOC(length);
- if (NULL == pBuffer) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Failed to allocate bt buffer \n"));
- status = A_NO_MEMORY;
- break;
- }
-
- /* get the opcodes to check the command complete event */
- opCodeBytes[0] = pHCICommand[HCI_CMD_OPCODE_BYTE_LOW_OFFSET];
- opCodeBytes[1] = pHCICommand[HCI_CMD_OPCODE_BYTE_HI_OFFSET];
-
- /* copy HCI command */
- memcpy(pBuffer + pConfig->pHCIProps->HeadRoom,pHCICommand,CmdLength);
- /* send command */
- status = SendHCICommand(pConfig,
- pBuffer + pConfig->pHCIProps->HeadRoom,
- CmdLength);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Failed to send HCI Command (%d) \n", status));
- AR_DEBUG_PRINTBUF(pHCICommand,CmdLength,"HCI Bridge Failed HCI Command");
- break;
- }
-
- /* reuse buffer to capture command complete event */
- A_MEMZERO(pBuffer,length);
- status = RecvHCIEvent(pConfig,pBuffer,&length);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: HCI event recv failed \n"));
- AR_DEBUG_PRINTBUF(pHCICommand,CmdLength,"HCI Bridge Failed HCI Command");
- break;
- }
-
- pTemp = pBuffer + pConfig->pHCIProps->HeadRoom;
- if (pTemp[0] == HCI_CMD_COMPLETE_EVENT_CODE) {
- if ((pTemp[HCI_EVENT_OPCODE_BYTE_LOW] == opCodeBytes[0]) &&
- (pTemp[HCI_EVENT_OPCODE_BYTE_HI] == opCodeBytes[1])) {
- commandComplete = true;
- }
- }
-
- if (!commandComplete) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Unexpected HCI event : %d \n",pTemp[0]));
- AR_DEBUG_PRINTBUF(pTemp,pTemp[1],"Unexpected HCI event");
- status = A_ECOMM;
- break;
- }
-
- if (ppEventBuffer != NULL) {
- /* caller wants to look at the event */
- *ppEventBuffer = pTemp;
- if (ppBufferToFree == NULL) {
- status = A_EINVAL;
- break;
- }
- /* caller must free the buffer */
- *ppBufferToFree = pBuffer;
- pBuffer = NULL;
- }
-
- } while (false);
-
- if (pBuffer != NULL) {
- kfree(pBuffer);
- }
-
- return status;
-}
-
-static int AR3KConfigureHCIBaud(struct ar3k_config_info *pConfig)
-{
- int status = 0;
- u8 hciBaudChangeCommand[] = {0x0c,0xfc,0x2,0,0};
- u16 baudVal;
- u8 *pEvent = NULL;
- u8 *pBufferToFree = NULL;
-
- do {
-
- if (pConfig->Flags & AR3K_CONFIG_FLAG_SET_AR3K_BAUD) {
- baudVal = (u16)(pConfig->AR3KBaudRate / 100);
- hciBaudChangeCommand[3] = (u8)baudVal;
- hciBaudChangeCommand[4] = (u8)(baudVal >> 8);
-
- status = SendHCICommandWaitCommandComplete(pConfig,
- hciBaudChangeCommand,
- sizeof(hciBaudChangeCommand),
- &pEvent,
- &pBufferToFree);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Baud rate change failed! \n"));
- break;
- }
-
- if (pEvent[BAUD_CHANGE_COMMAND_STATUS_OFFSET] != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("AR3K Config: Baud change command event status failed: %d \n",
- pEvent[BAUD_CHANGE_COMMAND_STATUS_OFFSET]));
- status = A_ECOMM;
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("AR3K Config: Baud Changed to %d \n",pConfig->AR3KBaudRate));
- }
-
- if (pConfig->Flags & AR3K_CONFIG_FLAG_AR3K_BAUD_CHANGE_DELAY) {
- /* some versions of AR3K do not switch baud immediately, up to 300MS */
- A_MDELAY(325);
- }
-
- if (pConfig->Flags & AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP) {
- /* Tell target to change UART baud rate for AR6K */
- status = HCI_TransportSetBaudRate(pConfig->pHCIDev, pConfig->AR3KBaudRate);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("AR3K Config: failed to set scale and step values: %d \n", status));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
- ("AR3K Config: Baud changed to %d for AR6K\n", pConfig->AR3KBaudRate));
- }
-
- } while (false);
-
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
-
- return status;
-}
-
-static int AR3KExitMinBoot(struct ar3k_config_info *pConfig)
-{
- int status;
- char exitMinBootCmd[] = {0x25,0xFC,0x0c,0x03,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00};
- u8 *pEvent = NULL;
- u8 *pBufferToFree = NULL;
-
- status = SendHCICommandWaitCommandComplete(pConfig,
- exitMinBootCmd,
- sizeof(exitMinBootCmd),
- &pEvent,
- &pBufferToFree);
-
- if (!status) {
- if (pEvent[EXIT_MIN_BOOT_COMMAND_STATUS_OFFSET] != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("AR3K Config: MinBoot exit command event status failed: %d \n",
- pEvent[EXIT_MIN_BOOT_COMMAND_STATUS_OFFSET]));
- status = A_ECOMM;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("AR3K Config: MinBoot Exit Command Complete (Success) \n"));
- A_MDELAY(1);
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: MinBoot Exit Failed! \n"));
- }
-
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
-
- return status;
-}
-
-static int AR3KConfigureSendHCIReset(struct ar3k_config_info *pConfig)
-{
- int status = 0;
- u8 hciResetCommand[] = {0x03,0x0c,0x0};
- u8 *pEvent = NULL;
- u8 *pBufferToFree = NULL;
-
- status = SendHCICommandWaitCommandComplete( pConfig,
- hciResetCommand,
- sizeof(hciResetCommand),
- &pEvent,
- &pBufferToFree );
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: HCI reset failed! \n"));
- }
-
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
-
- return status;
-}
-
-static int AR3KEnableTLPM(struct ar3k_config_info *pConfig)
-{
- int status;
- /* AR3K vendor specific command for Host Wakeup Config */
- char hostWakeupConfig[] = {0x31,0xFC,0x18,
- 0x02,0x00,0x00,0x00,
- 0x01,0x00,0x00,0x00,
- TLPM_DEFAULT_IDLE_TIMEOUT_LSB,TLPM_DEFAULT_IDLE_TIMEOUT_MSB,0x00,0x00, //idle timeout in ms
- 0x00,0x00,0x00,0x00,
- TLPM_DEFAULT_WAKEUP_TIMEOUT_MS,0x00,0x00,0x00, //wakeup timeout in ms
- 0x00,0x00,0x00,0x00};
- /* AR3K vendor specific command for Target Wakeup Config */
- char targetWakeupConfig[] = {0x31,0xFC,0x18,
- 0x04,0x00,0x00,0x00,
- 0x01,0x00,0x00,0x00,
- TLPM_DEFAULT_IDLE_TIMEOUT_LSB,TLPM_DEFAULT_IDLE_TIMEOUT_MSB,0x00,0x00, //idle timeout in ms
- 0x00,0x00,0x00,0x00,
- TLPM_DEFAULT_WAKEUP_TIMEOUT_MS,0x00,0x00,0x00, //wakeup timeout in ms
- 0x00,0x00,0x00,0x00};
- /* AR3K vendor specific command for Host Wakeup Enable */
- char hostWakeupEnable[] = {0x31,0xFC,0x4,
- 0x01,0x00,0x00,0x00};
- /* AR3K vendor specific command for Target Wakeup Enable */
- char targetWakeupEnable[] = {0x31,0xFC,0x4,
- 0x06,0x00,0x00,0x00};
- /* AR3K vendor specific command for Sleep Enable */
- char sleepEnable[] = {0x4,0xFC,0x1,
- 0x1};
- u8 *pEvent = NULL;
- u8 *pBufferToFree = NULL;
-
- if (0 != pConfig->IdleTimeout) {
- u8 idle_lsb = pConfig->IdleTimeout & 0xFF;
- u8 idle_msb = (pConfig->IdleTimeout & 0xFF00) >> 8;
- hostWakeupConfig[11] = targetWakeupConfig[11] = idle_lsb;
- hostWakeupConfig[12] = targetWakeupConfig[12] = idle_msb;
- }
-
- if (0 != pConfig->WakeupTimeout) {
- hostWakeupConfig[19] = targetWakeupConfig[19] = (pConfig->WakeupTimeout & 0xFF);
- }
-
- status = SendHCICommandWaitCommandComplete(pConfig,
- hostWakeupConfig,
- sizeof(hostWakeupConfig),
- &pEvent,
- &pBufferToFree);
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HostWakeup Config Failed! \n"));
- return status;
- }
-
- pEvent = NULL;
- pBufferToFree = NULL;
- status = SendHCICommandWaitCommandComplete(pConfig,
- targetWakeupConfig,
- sizeof(targetWakeupConfig),
- &pEvent,
- &pBufferToFree);
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Target Wakeup Config Failed! \n"));
- return status;
- }
-
- pEvent = NULL;
- pBufferToFree = NULL;
- status = SendHCICommandWaitCommandComplete(pConfig,
- hostWakeupEnable,
- sizeof(hostWakeupEnable),
- &pEvent,
- &pBufferToFree);
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HostWakeup Enable Failed! \n"));
- return status;
- }
-
- pEvent = NULL;
- pBufferToFree = NULL;
- status = SendHCICommandWaitCommandComplete(pConfig,
- targetWakeupEnable,
- sizeof(targetWakeupEnable),
- &pEvent,
- &pBufferToFree);
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Target Wakeup Enable Failed! \n"));
- return status;
- }
-
- pEvent = NULL;
- pBufferToFree = NULL;
- status = SendHCICommandWaitCommandComplete(pConfig,
- sleepEnable,
- sizeof(sleepEnable),
- &pEvent,
- &pBufferToFree);
- if (pBufferToFree != NULL) {
- kfree(pBufferToFree);
- }
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Sleep Enable Failed! \n"));
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR3K Config: Enable TLPM Completed (status = %d) \n",status));
-
- return status;
-}
-
-int AR3KConfigure(struct ar3k_config_info *pConfig)
-{
- int status = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Configuring AR3K ...\n"));
-
- do {
-
- if ((pConfig->pHCIDev == NULL) || (pConfig->pHCIProps == NULL) || (pConfig->pHIFDevice == NULL)) {
- status = A_EINVAL;
- break;
- }
-
- /* disable asynchronous recv while we issue commands and receive events synchronously */
- status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,false);
- if (status) {
- break;
- }
-
- if (pConfig->Flags & AR3K_CONFIG_FLAG_FORCE_MINBOOT_EXIT) {
- status = AR3KExitMinBoot(pConfig);
- if (status) {
- break;
- }
- }
-
-
- /* Load patching and PST file if available*/
- if (0 != AthPSInitialize(pConfig)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Patch Download Failed!\n"));
- }
-
- /* Send HCI reset to make PS tags take effect*/
- AR3KConfigureSendHCIReset(pConfig);
-
- if (pConfig->Flags &
- (AR3K_CONFIG_FLAG_SET_AR3K_BAUD | AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP)) {
- status = AR3KConfigureHCIBaud(pConfig);
- if (status) {
- break;
- }
- }
-
-
-
- if (pConfig->PwrMgmtEnabled) {
- /* the delay is required after the previous HCI reset before further
- * HCI commands can be issued
- */
- A_MDELAY(200);
- AR3KEnableTLPM(pConfig);
- }
-
- /* re-enable asynchronous recv */
- status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,true);
- if (status) {
- break;
- }
-
-
- } while (false);
-
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Configuration Complete (status = %d) \n",status));
-
- return status;
-}
-
-int AR3KConfigureExit(void *config)
-{
- int status = 0;
- struct ar3k_config_info *pConfig = (struct ar3k_config_info *)config;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Cleaning up AR3K ...\n"));
-
- do {
-
- if ((pConfig->pHCIDev == NULL) || (pConfig->pHCIProps == NULL) || (pConfig->pHIFDevice == NULL)) {
- status = A_EINVAL;
- break;
- }
-
- /* disable asynchronous recv while we issue commands and receive events synchronously */
- status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,false);
- if (status) {
- break;
- }
-
- if (pConfig->Flags &
- (AR3K_CONFIG_FLAG_SET_AR3K_BAUD | AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP)) {
- status = AR3KConfigureHCIBaud(pConfig);
- if (status) {
- break;
- }
- }
-
- /* re-enable asynchronous recv */
- status = HCI_TransportEnableDisableAsyncRecv(pConfig->pHCIDev,true);
- if (status) {
- break;
- }
-
-
- } while (false);
-
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR3K Config: Cleanup Complete (status = %d) \n",status));
-
- return status;
-}
-
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
deleted file mode 100644
index 282ceac597b..00000000000
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * Copyright (c) 2004-2010 Atheros Communications Inc.
- * All rights reserved.
- *
- * This file implements the Atheros PS and patch downloaded for HCI UART Transport driver.
- * This file can be used for HCI SDIO transport implementation for AR6002 with HCI_TRANSPORT_SDIO
- * defined.
- *
- *
- * ar3kcpsconfig.c
- *
- *
- *
- * The software source and binaries included in this development package are
- * licensed, not sold. You, or your company, received the package under one
- * or more license agreements. The rights granted to you are specifically
- * listed in these license agreement(s). All other rights remain with Atheros
- * Communications, Inc., its subsidiaries, or the respective owner including
- * those listed on the included copyright notices.. Distribution of any
- * portion of this package must be in strict compliance with the license
- * agreement(s) terms.
- *
- *
- *
- */
-
-
-
-#include "ar3kpsconfig.h"
-#ifndef HCI_TRANSPORT_SDIO
-#include "hci_ath.h"
-#include "hci_uart.h"
-#endif /* #ifndef HCI_TRANSPORT_SDIO */
-
-#define MAX_FW_PATH_LEN 50
-#define MAX_BDADDR_FORMAT_LENGTH 30
-
-/*
- * Structure used to send HCI packet, hci packet length and device info
- * together as parameter to PSThread.
- */
-typedef struct {
-
- struct ps_cmd_packet *HciCmdList;
- u32 num_packets;
- struct ar3k_config_info *dev;
-}HciCommandListParam;
-
-int SendHCICommandWaitCommandComplete(struct ar3k_config_info *pConfig,
- u8 *pHCICommand,
- int CmdLength,
- u8 **ppEventBuffer,
- u8 **ppBufferToFree);
-
-u32 Rom_Version;
-u32 Build_Version;
-extern bool BDADDR;
-
-int getDeviceType(struct ar3k_config_info *pConfig, u32 *code);
-int ReadVersionInfo(struct ar3k_config_info *pConfig);
-#ifndef HCI_TRANSPORT_SDIO
-
-DECLARE_WAIT_QUEUE_HEAD(PsCompleteEvent);
-DECLARE_WAIT_QUEUE_HEAD(HciEvent);
-u8 *HciEventpacket;
-rwlock_t syncLock;
-wait_queue_t Eventwait;
-
-int PSHciWritepacket(struct hci_dev*,u8* Data, u32 len);
-extern char *bdaddr;
-#endif /* HCI_TRANSPORT_SDIO */
-
-int write_bdaddr(struct ar3k_config_info *pConfig,u8 *bdaddr,int type);
-
-int PSSendOps(void *arg);
-
-#ifdef BT_PS_DEBUG
-void Hci_log(u8 * log_string,u8 *data,u32 len)
-{
- int i;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s : ",log_string));
- for (i = 0; i < len; i++) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("0x%02x ", data[i]));
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("\n...................................\n"));
-}
-#else
-#define Hci_log(string,data,len)
-#endif /* BT_PS_DEBUG */
-
-
-
-
-int AthPSInitialize(struct ar3k_config_info *hdev)
-{
- int status = 0;
- if(hdev == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid Device handle received\n"));
- return A_ERROR;
- }
-
-#ifndef HCI_TRANSPORT_SDIO
- DECLARE_WAITQUEUE(wait, current);
-#endif /* HCI_TRANSPORT_SDIO */
-
-
-#ifdef HCI_TRANSPORT_SDIO
- status = PSSendOps((void*)hdev);
-#else
- if(InitPSState(hdev) == -1) {
- return A_ERROR;
- }
- allow_signal(SIGKILL);
- add_wait_queue(&PsCompleteEvent,&wait);
- set_current_state(TASK_INTERRUPTIBLE);
- if(!kernel_thread(PSSendOps,(void*)hdev,CLONE_FS|CLONE_FILES|CLONE_SIGHAND|SIGCHLD)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Kthread Failed\n"));
- remove_wait_queue(&PsCompleteEvent,&wait);
- return A_ERROR;
- }
- wait_event_interruptible(PsCompleteEvent,(PSTagMode == false));
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&PsCompleteEvent,&wait);
-
-#endif /* HCI_TRANSPORT_SDIO */
-
-
- return status;
-
-}
-
-int PSSendOps(void *arg)
-{
- int i;
- int status = 0;
- struct ps_cmd_packet *HciCmdList; /* List storing the commands */
- const struct firmware* firmware;
- u32 numCmds;
- u8 *event;
- u8 *bufferToFree;
- struct hci_dev *device;
- u8 *buffer;
- u32 len;
- u32 DevType;
- u8 *PsFileName;
- u8 *patchFileName;
- u8 *path = NULL;
- u8 *config_path = NULL;
- u8 config_bdaddr[MAX_BDADDR_FORMAT_LENGTH];
- struct ar3k_config_info *hdev = (struct ar3k_config_info*)arg;
- struct device *firmwareDev = NULL;
- status = 0;
- HciCmdList = NULL;
-#ifdef HCI_TRANSPORT_SDIO
- device = hdev->pBtStackHCIDev;
- firmwareDev = device->parent;
-#else
- device = hdev;
- firmwareDev = &device->dev;
- AthEnableSyncCommandOp(true);
-#endif /* HCI_TRANSPORT_SDIO */
- /* First verify if the controller is an FPGA or ASIC, so depending on the device type the PS file to be written will be different.
- */
-
- path =(u8 *)A_MALLOC(MAX_FW_PATH_LEN);
- if(path == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Malloc failed to allocate %d bytes for path\n", MAX_FW_PATH_LEN));
- goto complete;
- }
- config_path = (u8 *) A_MALLOC(MAX_FW_PATH_LEN);
- if(config_path == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Malloc failed to allocate %d bytes for config_path\n", MAX_FW_PATH_LEN));
- goto complete;
- }
-
- if(A_ERROR == getDeviceType(hdev,&DevType)) {
- status = 1;
- goto complete;
- }
- if(A_ERROR == ReadVersionInfo(hdev)) {
- status = 1;
- goto complete;
- }
-
- patchFileName = PATCH_FILE;
- snprintf(path, MAX_FW_PATH_LEN, "%s/%xcoex/",CONFIG_PATH,Rom_Version);
- if(DevType){
- if(DevType == 0xdeadc0de){
- PsFileName = PS_ASIC_FILE;
- } else{
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" FPGA Test Image : %x %x \n",Rom_Version,Build_Version));
- if((Rom_Version == 0x99999999) && (Build_Version == 1)){
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("FPGA Test Image : Skipping Patch File load\n"));
- patchFileName = NULL;
- }
- PsFileName = PS_FPGA_FILE;
- }
- }
- else{
- PsFileName = PS_ASIC_FILE;
- }
-
- snprintf(config_path, MAX_FW_PATH_LEN, "%s%s",path,PsFileName);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%x: FPGA/ASIC PS File Name %s\n", DevType,config_path));
- /* Read the PS file to a dynamically allocated buffer */
- if(A_REQUEST_FIRMWARE(&firmware,config_path,firmwareDev) < 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: firmware file open error\n", __FUNCTION__ ));
- status = 1;
- goto complete;
-
- }
- if(NULL == firmware || firmware->size == 0) {
- status = 1;
- goto complete;
- }
- buffer = (u8 *)A_MALLOC(firmware->size);
- if(buffer != NULL) {
- /* Copy the read file to a local Dynamic buffer */
- memcpy(buffer,firmware->data,firmware->size);
- len = firmware->size;
- A_RELEASE_FIRMWARE(firmware);
- /* Parse the PS buffer to a global variable */
- status = AthDoParsePS(buffer,len);
- kfree(buffer);
- } else {
- A_RELEASE_FIRMWARE(firmware);
- }
-
-
- /* Read the patch file to a dynamically allocated buffer */
- if(patchFileName != NULL)
- snprintf(config_path,
- MAX_FW_PATH_LEN, "%s%s",path,patchFileName);
- else {
- status = 0;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Patch File Name %s\n", config_path));
- if((patchFileName == NULL) || (A_REQUEST_FIRMWARE(&firmware,config_path,firmwareDev) < 0)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: firmware file open error\n", __FUNCTION__ ));
- /*
- * It is not necessary that Patch file be available, continue with PS Operations if.
- * failed.
- */
- status = 0;
-
- } else {
- if(NULL == firmware || firmware->size == 0) {
- status = 0;
- } else {
- buffer = (u8 *)A_MALLOC(firmware->size);
- if(buffer != NULL) {
- /* Copy the read file to a local Dynamic buffer */
- memcpy(buffer,firmware->data,firmware->size);
- len = firmware->size;
- A_RELEASE_FIRMWARE(firmware);
- /* parse and store the Patch file contents to a global variables */
- status = AthDoParsePatch(buffer,len);
- kfree(buffer);
- } else {
- A_RELEASE_FIRMWARE(firmware);
- }
- }
- }
-
- /* Create an HCI command list from the parsed PS and patch information */
- AthCreateCommandList(&HciCmdList,&numCmds);
-
- /* Form the parameter for PSSendOps() API */
-
-
- /*
- * First Send the CRC packet,
- * We have to continue with the PS operations only if the CRC packet has been replied with
- * a Command complete event with status Error.
- */
-
- if(SendHCICommandWaitCommandComplete
- (hdev,
- HciCmdList[0].Hcipacket,
- HciCmdList[0].packetLen,
- &event,
- &bufferToFree) == 0) {
- if(ReadPSEvent(event) == 0) { /* Exit if the status is success */
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
-
-#ifndef HCI_TRANSPORT_SDIO
- if(bdaddr && bdaddr[0] !='\0') {
- write_bdaddr(hdev,bdaddr,BDADDR_TYPE_STRING);
- }
-#endif
- status = 1;
- goto complete;
- }
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
- } else {
- status = 0;
- goto complete;
- }
-
- for(i = 1; i <numCmds; i++) {
-
- if(SendHCICommandWaitCommandComplete
- (hdev,
- HciCmdList[i].Hcipacket,
- HciCmdList[i].packetLen,
- &event,
- &bufferToFree) == 0) {
- if(ReadPSEvent(event) != 0) { /* Exit if the status is success */
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
- status = 1;
- goto complete;
- }
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
- } else {
- status = 0;
- goto complete;
- }
- }
-#ifdef HCI_TRANSPORT_SDIO
- if(BDADDR == false)
- if(hdev->bdaddr[0] !=0x00 ||
- hdev->bdaddr[1] !=0x00 ||
- hdev->bdaddr[2] !=0x00 ||
- hdev->bdaddr[3] !=0x00 ||
- hdev->bdaddr[4] !=0x00 ||
- hdev->bdaddr[5] !=0x00)
- write_bdaddr(hdev,hdev->bdaddr,BDADDR_TYPE_HEX);
-
-#ifndef HCI_TRANSPORT_SDIO
-
- if(bdaddr && bdaddr[0] != '\0') {
- write_bdaddr(hdev,bdaddr,BDADDR_TYPE_STRING);
- } else
-#endif /* HCI_TRANSPORT_SDIO */
- /* Write BDADDR Read from OTP here */
-
-
-
-#endif
-
- {
- /* Read Contents of BDADDR file if user has not provided any option */
- snprintf(config_path,MAX_FW_PATH_LEN, "%s%s",path,BDADDR_FILE);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Patch File Name %s\n", config_path));
- if(A_REQUEST_FIRMWARE(&firmware,config_path,firmwareDev) < 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: firmware file open error\n", __FUNCTION__ ));
- status = 1;
- goto complete;
- }
- if(NULL == firmware || firmware->size == 0) {
- status = 1;
- goto complete;
- }
- len = min_t(size_t, firmware->size, MAX_BDADDR_FORMAT_LENGTH - 1);
- memcpy(config_bdaddr, firmware->data, len);
- config_bdaddr[len] = '\0';
- write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
- A_RELEASE_FIRMWARE(firmware);
- }
-complete:
-#ifndef HCI_TRANSPORT_SDIO
- AthEnableSyncCommandOp(false);
- PSTagMode = false;
- wake_up_interruptible(&PsCompleteEvent);
-#endif /* HCI_TRANSPORT_SDIO */
- if(NULL != HciCmdList) {
- AthFreeCommandList(&HciCmdList,numCmds);
- }
- if(path) {
- kfree(path);
- }
- if(config_path) {
- kfree(config_path);
- }
- return status;
-}
-#ifndef HCI_TRANSPORT_SDIO
-/*
- * This API is used to send the HCI command to controller and return
- * with a HCI Command Complete event.
- * For HCI SDIO transport, this will be internally defined.
- */
-int SendHCICommandWaitCommandComplete(struct ar3k_config_info *pConfig,
- u8 *pHCICommand,
- int CmdLength,
- u8 **ppEventBuffer,
- u8 **ppBufferToFree)
-{
- if(CmdLength == 0) {
- return A_ERROR;
- }
- Hci_log("COM Write -->",pHCICommand,CmdLength);
- PSAcked = false;
- if(PSHciWritepacket(pConfig,pHCICommand,CmdLength) == 0) {
- /* If the controller is not available, return Error */
- return A_ERROR;
- }
- //add_timer(&psCmdTimer);
- wait_event_interruptible(HciEvent,(PSAcked == true));
- if(NULL != HciEventpacket) {
- *ppEventBuffer = HciEventpacket;
- *ppBufferToFree = HciEventpacket;
- } else {
- /* Did not get an event from controller. return error */
- *ppBufferToFree = NULL;
- return A_ERROR;
- }
-
- return 0;
-}
-#endif /* HCI_TRANSPORT_SDIO */
-
-int ReadPSEvent(u8* Data){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" PS Event %x %x %x\n",Data[4],Data[5],Data[3]));
-
- if(Data[4] == 0xFC && Data[5] == 0x00)
- {
- switch(Data[3]){
- case 0x0B:
- return 0;
- break;
- case 0x0C:
- /* Change Baudrate */
- return 0;
- break;
- case 0x04:
- return 0;
- break;
- case 0x1E:
- Rom_Version = Data[9];
- Rom_Version = ((Rom_Version << 8) |Data[8]);
- Rom_Version = ((Rom_Version << 8) |Data[7]);
- Rom_Version = ((Rom_Version << 8) |Data[6]);
-
- Build_Version = Data[13];
- Build_Version = ((Build_Version << 8) |Data[12]);
- Build_Version = ((Build_Version << 8) |Data[11]);
- Build_Version = ((Build_Version << 8) |Data[10]);
- return 0;
- break;
-
-
- }
- }
-
- return A_ERROR;
-}
-int str2ba(unsigned char *str_bdaddr,unsigned char *bdaddr)
-{
- unsigned char bdbyte[3];
- unsigned char *str_byte = str_bdaddr;
- int i,j;
- unsigned char colon_present = 0;
-
- if(NULL != strstr(str_bdaddr,":")) {
- colon_present = 1;
- }
-
-
- bdbyte[2] = '\0';
-
- for( i = 0,j = 5; i < 6; i++, j--) {
- bdbyte[0] = str_byte[0];
- bdbyte[1] = str_byte[1];
- bdaddr[j] = A_STRTOL(bdbyte,NULL,16);
- if(colon_present == 1) {
- str_byte+=3;
- } else {
- str_byte+=2;
- }
- }
- return 0;
-}
-
-int write_bdaddr(struct ar3k_config_info *pConfig,u8 *bdaddr,int type)
-{
- u8 bdaddr_cmd[] = { 0x0B, 0xFC, 0x0A, 0x01, 0x01,
- 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
- u8 *event;
- u8 *bufferToFree = NULL;
- int result = A_ERROR;
- int inc,outc;
-
- if (type == BDADDR_TYPE_STRING)
- str2ba(bdaddr,&bdaddr_cmd[7]);
- else {
- /* Bdaddr has to be sent as LAP first */
- for(inc = 5 ,outc = 7; inc >=0; inc--, outc++)
- bdaddr_cmd[outc] = bdaddr[inc];
- }
-
- if(0 == SendHCICommandWaitCommandComplete(pConfig,bdaddr_cmd,
- sizeof(bdaddr_cmd),
- &event,&bufferToFree)) {
-
- if(event[4] == 0xFC && event[5] == 0x00){
- if(event[3] == 0x0B){
- result = 0;
- }
- }
-
- }
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
- return result;
-
-}
-int ReadVersionInfo(struct ar3k_config_info *pConfig)
-{
- u8 hciCommand[] = {0x1E,0xfc,0x00};
- u8 *event;
- u8 *bufferToFree = NULL;
- int result = A_ERROR;
- if(0 == SendHCICommandWaitCommandComplete(pConfig,hciCommand,sizeof(hciCommand),&event,&bufferToFree)) {
- result = ReadPSEvent(event);
-
- }
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
- return result;
-}
-int getDeviceType(struct ar3k_config_info *pConfig, u32 *code)
-{
- u8 hciCommand[] = {0x05,0xfc,0x05,0x00,0x00,0x00,0x00,0x04};
- u8 *event;
- u8 *bufferToFree = NULL;
- u32 reg;
- int result = A_ERROR;
- *code = 0;
- hciCommand[3] = (u8)(FPGA_REGISTER & 0xFF);
- hciCommand[4] = (u8)((FPGA_REGISTER >> 8) & 0xFF);
- hciCommand[5] = (u8)((FPGA_REGISTER >> 16) & 0xFF);
- hciCommand[6] = (u8)((FPGA_REGISTER >> 24) & 0xFF);
- if(0 == SendHCICommandWaitCommandComplete(pConfig,hciCommand,sizeof(hciCommand),&event,&bufferToFree)) {
-
- if(event[4] == 0xFC && event[5] == 0x00){
- switch(event[3]){
- case 0x05:
- reg = event[9];
- reg = ((reg << 8) |event[8]);
- reg = ((reg << 8) |event[7]);
- reg = ((reg << 8) |event[6]);
- *code = reg;
- result = 0;
-
- break;
- case 0x06:
- //Sleep(500);
- break;
- }
- }
-
- }
- if(bufferToFree != NULL) {
- kfree(bufferToFree);
- }
- return result;
-}
-
-
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h
deleted file mode 100644
index d4435130780..00000000000
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2004-2010 Atheros Communications Inc.
- * All rights reserved.
- *
- * This file defines the symbols exported by Atheros PS and patch download module.
- * define the constant HCI_TRANSPORT_SDIO if the module is being used for HCI SDIO transport.
- * defined.
- *
- *
- * ar3kcpsconfig.h
- *
- *
- *
- * The software source and binaries included in this development package are
- * licensed, not sold. You, or your company, received the package under one
- * or more license agreements. The rights granted to you are specifically
- * listed in these license agreement(s). All other rights remain with Atheros
- * Communications, Inc., its subsidiaries, or the respective owner including
- * those listed on the included copyright notices.. Distribution of any
- * portion of this package must be in strict compliance with the license
- * agreement(s) terms.
- *
- *
- *
- */
-
-
-
-#ifndef __AR3KPSCONFIG_H
-#define __AR3KPSCONFIG_H
-
-/*
- * Define the flag HCI_TRANSPORT_SDIO and undefine HCI_TRANSPORT_UART if the transport being used is SDIO.
- */
-#undef HCI_TRANSPORT_UART
-
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-
-
-#include <linux/ioctl.h>
-#include <linux/firmware.h>
-
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-
-#include "ar3kpsparser.h"
-
-#define FPGA_REGISTER 0x4FFC
-#define BDADDR_TYPE_STRING 0
-#define BDADDR_TYPE_HEX 1
-#define CONFIG_PATH "ar3k"
-
-#define PS_ASIC_FILE "PS_ASIC.pst"
-#define PS_FPGA_FILE "PS_FPGA.pst"
-
-#define PATCH_FILE "RamPatch.txt"
-#define BDADDR_FILE "ar3kbdaddr.pst"
-
-#define ROM_VER_AR3001_3_1_0 30000
-#define ROM_VER_AR3001_3_1_1 30101
-
-
-#ifndef HCI_TRANSPORT_SDIO
-#define struct ar3k_config_info struct hci_dev
-extern wait_queue_head_t HciEvent;
-extern wait_queue_t Eventwait;
-extern u8 *HciEventpacket;
-#endif /* #ifndef HCI_TRANSPORT_SDIO */
-
-int AthPSInitialize(struct ar3k_config_info *hdev);
-int ReadPSEvent(u8* Data);
-#endif /* __AR3KPSCONFIG_H */
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
deleted file mode 100644
index b99a11a9dd6..00000000000
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
+++ /dev/null
@@ -1,969 +0,0 @@
-/*
- * Copyright (c) 2004-2010 Atheros Communications Inc.
- * All rights reserved.
- *
- * This file implements the Atheros PS and patch parser.
- * It implements APIs to parse data buffer with patch and PS information and convert it to HCI commands.
- *
- *
- *
- * ar3kpsparser.c
- *
- *
- *
- * The software source and binaries included in this development package are
- * licensed, not sold. You, or your company, received the package under one
- * or more license agreements. The rights granted to you are specifically
- * listed in these license agreement(s). All other rights remain with Atheros
- * Communications, Inc., its subsidiaries, or the respective owner including
- * those listed on the included copyright notices.. Distribution of any
- * portion of this package must be in strict compliance with the license
- * agreement(s) terms.
- *
- *
- *
- */
-
-
-#include "ar3kpsparser.h"
-
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-
-#define BD_ADDR_SIZE 6
-#define WRITE_PATCH 8
-#define ENABLE_PATCH 11
-#define PS_RESET 2
-#define PS_WRITE 1
-#define PS_VERIFY_CRC 9
-#define CHANGE_BDADDR 15
-
-#define HCI_COMMAND_HEADER 7
-
-#define HCI_EVENT_SIZE 7
-
-#define WRITE_PATCH_COMMAND_STATUS_OFFSET 5
-
-#define PS_RAM_SIZE 2048
-
-#define RAM_PS_REGION (1<<0)
-#define RAM_PATCH_REGION (1<<1)
-#define RAMPS_MAX_PS_DATA_PER_TAG 20000
-#define MAX_RADIO_CFG_TABLE_SIZE 244
-#define RAMPS_MAX_PS_TAGS_PER_FILE 50
-
-#define PS_MAX_LEN 500
-#define LINE_SIZE_MAX (PS_MAX_LEN *2)
-
-/* Constant values used by parser */
-#define BYTES_OF_PS_DATA_PER_LINE 16
-#define RAMPS_MAX_PS_DATA_PER_TAG 20000
-
-
-/* Number pf PS/Patch entries in an HCI packet */
-#define MAX_BYTE_LENGTH 244
-
-#define SKIP_BLANKS(str) while (*str == ' ') str++
-
-enum MinBootFileFormatE
-{
- MB_FILEFORMAT_RADIOTBL,
- MB_FILEFORMAT_PATCH,
- MB_FILEFORMAT_COEXCONFIG
-};
-
-enum RamPsSection
-{
- RAM_PS_SECTION,
- RAM_PATCH_SECTION,
- RAM_DYN_MEM_SECTION
-};
-
-enum eType {
- eHex,
- edecimal
-};
-
-
-typedef struct tPsTagEntry
-{
- u32 TagId;
- u32 TagLen;
- u8 *TagData;
-} tPsTagEntry, *tpPsTagEntry;
-
-typedef struct tRamPatch
-{
- u16 Len;
- u8 *Data;
-} tRamPatch, *ptRamPatch;
-
-
-
-struct st_ps_data_format {
- enum eType eDataType;
- bool bIsArray;
-};
-
-struct st_read_status {
- unsigned uTagID;
- unsigned uSection;
- unsigned uLineCount;
- unsigned uCharCount;
- unsigned uByteCount;
-};
-
-
-/* Stores the number of PS Tags */
-static u32 Tag_Count = 0;
-
-/* Stores the number of patch commands */
-static u32 Patch_Count = 0;
-static u32 Total_tag_lenght = 0;
-bool BDADDR = false;
-u32 StartTagId;
-
-tPsTagEntry PsTagEntry[RAMPS_MAX_PS_TAGS_PER_FILE];
-tRamPatch RamPatch[MAX_NUM_PATCH_ENTRY];
-
-
-int AthParseFilesUnified(u8 *srcbuffer,u32 srclen, int FileFormat);
-char AthReadChar(u8 *buffer, u32 len,u32 *pos);
-char *AthGetLine(char *buffer, int maxlen, u8 *srcbuffer,u32 len,u32 *pos);
-static int AthPSCreateHCICommand(u8 Opcode, u32 Param1,struct ps_cmd_packet *PSPatchPacket,u32 *index);
-
-/* Function to reads the next character from the input buffer */
-char AthReadChar(u8 *buffer, u32 len,u32 *pos)
-{
- char Ch;
- if(buffer == NULL || *pos >=len )
- {
- return '\0';
- } else {
- Ch = buffer[*pos];
- (*pos)++;
- return Ch;
- }
-}
-/* PS parser helper function */
-unsigned int uGetInputDataFormat(char *pCharLine, struct st_ps_data_format *pstFormat)
-{
- if(pCharLine[0] != '[') {
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- return 0;
- }
- switch(pCharLine[1]) {
- case 'H':
- case 'h':
- if(pCharLine[2]==':') {
- if((pCharLine[3]== 'a') || (pCharLine[3]== 'A')) {
- if(pCharLine[4] == ']') {
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 5;
- return 0;
- }
- else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n")); //[H:A
- return 1;
- }
- }
- if((pCharLine[3]== 'S') || (pCharLine[3]== 's')) {
- if(pCharLine[4] == ']') {
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = false;
- pCharLine += 5;
- return 0;
- }
- else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n")); //[H:A
- return 1;
- }
- }
- else if(pCharLine[3] == ']') { //[H:]
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 4;
- return 0;
- }
- else { //[H:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n"));
- return 1;
- }
- }
- else if(pCharLine[2]==']') { //[H]
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 3;
- return 0;
- }
- else { //[H
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format\n"));
- return 1;
- }
- break;
-
- case 'A':
- case 'a':
- if(pCharLine[2]==':') {
- if((pCharLine[3]== 'h') || (pCharLine[3]== 'H')) {
- if(pCharLine[4] == ']') {
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 5;
- return 0;
- }
- else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 1\n")); //[A:H
- return 1;
- }
- }
- else if(pCharLine[3]== ']') { //[A:]
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 4;
- return 0;
- }
- else { //[A:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 2\n"));
- return 1;
- }
- }
- else if(pCharLine[2]==']') { //[H]
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 3;
- return 0;
- }
- else { //[H
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 3\n"));
- return 1;
- }
- break;
-
- case 'S':
- case 's':
- if(pCharLine[2]==':') {
- if((pCharLine[3]== 'h') || (pCharLine[3]== 'H')) {
- if(pCharLine[4] == ']') {
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 5;
- return 0;
- }
- else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 5\n")); //[A:H
- return 1;
- }
- }
- else if(pCharLine[3]== ']') { //[A:]
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 4;
- return 0;
- }
- else { //[A:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 6\n"));
- return 1;
- }
- }
- else if(pCharLine[2]==']') { //[H]
- pstFormat->eDataType = eHex;
- pstFormat->bIsArray = true;
- pCharLine += 3;
- return 0;
- }
- else { //[H
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 7\n"));
- return 1;
- }
- break;
-
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Illegal Data format 8\n"));
- return 1;
- }
-}
-
-unsigned int uReadDataInSection(char *pCharLine, struct st_ps_data_format stPS_DataFormat)
-{
- char *pTokenPtr = pCharLine;
-
- if(pTokenPtr[0] == '[') {
- while(pTokenPtr[0] != ']' && pTokenPtr[0] != '\0') {
- pTokenPtr++;
- }
- if(pTokenPtr[0] == '\0') {
- return (0x0FFF);
- }
- pTokenPtr++;
-
-
- }
- if(stPS_DataFormat.eDataType == eHex) {
- if(stPS_DataFormat.bIsArray == true) {
- //Not implemented
- return (0x0FFF);
- }
- else {
- return (A_STRTOL(pTokenPtr, NULL, 16));
- }
- }
- else {
- //Not implemented
- return (0x0FFF);
- }
-}
-int AthParseFilesUnified(u8 *srcbuffer,u32 srclen, int FileFormat)
-{
- char *Buffer;
- char *pCharLine;
- u8 TagCount;
- u16 ByteCount;
- u8 ParseSection=RAM_PS_SECTION;
- u32 pos;
-
-
-
- int uReadCount;
- struct st_ps_data_format stPS_DataFormat;
- struct st_read_status stReadStatus = {0, 0, 0,0};
- pos = 0;
- Buffer = NULL;
-
- if (srcbuffer == NULL || srclen == 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Could not open .\n"));
- return A_ERROR;
- }
- TagCount = 0;
- ByteCount = 0;
- Buffer = A_MALLOC(LINE_SIZE_MAX + 1);
- if(NULL == Buffer) {
- return A_ERROR;
- }
- if (FileFormat == MB_FILEFORMAT_PATCH)
- {
- int LineRead = 0;
- while((pCharLine = AthGetLine(Buffer, LINE_SIZE_MAX, srcbuffer,srclen,&pos)) != NULL)
- {
-
- SKIP_BLANKS(pCharLine);
-
- // Comment line or empty line
- if ((pCharLine[0] == '/') && (pCharLine[1] == '/'))
- {
- continue;
- }
-
- if ((pCharLine[0] == '#')) {
- if (stReadStatus.uSection != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("error\n"));
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
- else {
- stReadStatus.uSection = 1;
- continue;
- }
- }
- if ((pCharLine[0] == '/') && (pCharLine[1] == '*'))
- {
- pCharLine+=2;
- SKIP_BLANKS(pCharLine);
-
- if(!strncmp(pCharLine,"PA",2)||!strncmp(pCharLine,"Pa",2)||!strncmp(pCharLine,"pa",2))
- ParseSection=RAM_PATCH_SECTION;
-
- if(!strncmp(pCharLine,"DY",2)||!strncmp(pCharLine,"Dy",2)||!strncmp(pCharLine,"dy",2))
- ParseSection=RAM_DYN_MEM_SECTION;
-
- if(!strncmp(pCharLine,"PS",2)||!strncmp(pCharLine,"Ps",2)||!strncmp(pCharLine,"ps",2))
- ParseSection=RAM_PS_SECTION;
-
- LineRead = 0;
- stReadStatus.uSection = 0;
-
- continue;
- }
-
- switch(ParseSection)
- {
- case RAM_PS_SECTION:
- {
- if (stReadStatus.uSection == 1) //TagID
- {
- SKIP_BLANKS(pCharLine);
- if(uGetInputDataFormat(pCharLine, &stPS_DataFormat)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("uGetInputDataFormat fail\n"));
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
- //pCharLine +=5;
- PsTagEntry[TagCount].TagId = uReadDataInSection(pCharLine, stPS_DataFormat);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" TAG ID %d \n",PsTagEntry[TagCount].TagId));
-
- //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("tag # %x\n", PsTagEntry[TagCount].TagId);
- if (TagCount == 0)
- {
- StartTagId = PsTagEntry[TagCount].TagId;
- }
- stReadStatus.uSection = 2;
- }
- else if (stReadStatus.uSection == 2) //TagLength
- {
-
- if(uGetInputDataFormat(pCharLine, &stPS_DataFormat)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("uGetInputDataFormat fail \n"));
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
- //pCharLine +=5;
- ByteCount = uReadDataInSection(pCharLine, stPS_DataFormat);
-
- //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("tag length %x\n", ByteCount));
- if (ByteCount > LINE_SIZE_MAX/2)
- {
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
- PsTagEntry[TagCount].TagLen = ByteCount;
- PsTagEntry[TagCount].TagData = (u8 *)A_MALLOC(ByteCount);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" TAG Length %d Tag Index %d \n",PsTagEntry[TagCount].TagLen,TagCount));
- stReadStatus.uSection = 3;
- stReadStatus.uLineCount = 0;
- }
- else if( stReadStatus.uSection == 3) { //Data
-
- if(stReadStatus.uLineCount == 0) {
- if(uGetInputDataFormat(pCharLine,&stPS_DataFormat)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("uGetInputDataFormat Fail\n"));
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
- //pCharLine +=5;
- }
- SKIP_BLANKS(pCharLine);
- stReadStatus.uCharCount = 0;
- if(pCharLine[stReadStatus.uCharCount] == '[') {
- while(pCharLine[stReadStatus.uCharCount] != ']' && pCharLine[stReadStatus.uCharCount] != '\0' ) {
- stReadStatus.uCharCount++;
- }
- if(pCharLine[stReadStatus.uCharCount] == ']' ) {
- stReadStatus.uCharCount++;
- } else {
- stReadStatus.uCharCount = 0;
- }
- }
- uReadCount = (ByteCount > BYTES_OF_PS_DATA_PER_LINE)? BYTES_OF_PS_DATA_PER_LINE: ByteCount;
- //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" "));
- if((stPS_DataFormat.eDataType == eHex) && stPS_DataFormat.bIsArray == true) {
- while(uReadCount > 0) {
- PsTagEntry[TagCount].TagData[stReadStatus.uByteCount] =
- (u8)(hex_to_bin(pCharLine[stReadStatus.uCharCount]) << 4)
- | (u8)(hex_to_bin(pCharLine[stReadStatus.uCharCount + 1]));
-
- PsTagEntry[TagCount].TagData[stReadStatus.uByteCount+1] =
- (u8)(hex_to_bin(pCharLine[stReadStatus.uCharCount + 3]) << 4)
- | (u8)(hex_to_bin(pCharLine[stReadStatus.uCharCount + 4]));
-
- stReadStatus.uCharCount += 6; // read two bytes, plus a space;
- stReadStatus.uByteCount += 2;
- uReadCount -= 2;
- }
- if(ByteCount > BYTES_OF_PS_DATA_PER_LINE) {
- ByteCount -= BYTES_OF_PS_DATA_PER_LINE;
- }
- else {
- ByteCount = 0;
- }
- }
- else {
- //to be implemented
- }
-
- stReadStatus.uLineCount++;
-
- if(ByteCount == 0) {
- stReadStatus.uSection = 0;
- stReadStatus.uCharCount = 0;
- stReadStatus.uLineCount = 0;
- stReadStatus.uByteCount = 0;
- }
- else {
- stReadStatus.uCharCount = 0;
- }
-
- if((stReadStatus.uSection == 0)&&(++TagCount == RAMPS_MAX_PS_TAGS_PER_FILE))
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("\n Buffer over flow PS File too big!!!"));
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- //Sleep (3000);
- //exit(1);
- }
-
- }
- }
-
- break;
- default:
- {
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
- break;
- }
- LineRead++;
- }
- Tag_Count = TagCount;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Number of Tags %d\n", Tag_Count));
- }
-
-
- if (TagCount > RAMPS_MAX_PS_TAGS_PER_FILE)
- {
-
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return A_ERROR;
- }
-
- if(Buffer != NULL) {
- kfree(Buffer);
- }
- return 0;
-
-}
-
-
-
-/********************/
-
-
-int GetNextTwoChar(u8 *srcbuffer,u32 len, u32 *pos, char *buffer)
-{
- unsigned char ch;
-
- ch = AthReadChar(srcbuffer,len,pos);
- if(ch != '\0' && isxdigit(ch)) {
- buffer[0] = ch;
- } else
- {
- return A_ERROR;
- }
- ch = AthReadChar(srcbuffer,len,pos);
- if(ch != '\0' && isxdigit(ch)) {
- buffer[1] = ch;
- } else
- {
- return A_ERROR;
- }
- return 0;
-}
-
-int AthDoParsePatch(u8 *patchbuffer, u32 patchlen)
-{
-
- char Byte[3];
- char Line[MAX_BYTE_LENGTH + 1];
- int ByteCount,ByteCount_Org;
- int count;
- int i,j,k;
- int data;
- u32 filepos;
- Byte[2] = '\0';
- j = 0;
- filepos = 0;
- Patch_Count = 0;
-
- while(NULL != AthGetLine(Line,MAX_BYTE_LENGTH,patchbuffer,patchlen,&filepos)) {
- if(strlen(Line) <= 1 || !isxdigit(Line[0])) {
- continue;
- } else {
- break;
- }
- }
- ByteCount = A_STRTOL(Line, NULL, 16);
- ByteCount_Org = ByteCount;
-
- while(ByteCount > MAX_BYTE_LENGTH){
-
- /* Handle case when the number of patch buffer is more than the 20K */
- if(MAX_NUM_PATCH_ENTRY == Patch_Count) {
- for(i = 0; i < Patch_Count; i++) {
- kfree(RamPatch[i].Data);
- }
- return A_ERROR;
- }
- RamPatch[Patch_Count].Len= MAX_BYTE_LENGTH;
- RamPatch[Patch_Count].Data = (u8 *)A_MALLOC(MAX_BYTE_LENGTH);
- Patch_Count ++;
-
-
- ByteCount= ByteCount - MAX_BYTE_LENGTH;
- }
-
- RamPatch[Patch_Count].Len= (ByteCount & 0xFF);
- if(ByteCount != 0) {
- RamPatch[Patch_Count].Data = (u8 *)A_MALLOC(ByteCount);
- Patch_Count ++;
- }
- count = 0;
- while(ByteCount_Org > MAX_BYTE_LENGTH){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Index [%d]\n",j));
- for (i = 0,k=0; i < MAX_BYTE_LENGTH*2; i += 2,k++,count +=2) {
- if(GetNextTwoChar(patchbuffer,patchlen,&filepos,Byte) == A_ERROR) {
- return A_ERROR;
- }
- data = A_STRTOUL(&Byte[0], NULL, 16);
- RamPatch[j].Data[k] = (data & 0xFF);
-
-
- }
- j++;
- ByteCount_Org = ByteCount_Org - MAX_BYTE_LENGTH;
- }
- if(j == 0){
- j++;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Index [%d]\n",j));
- for (k=0; k < ByteCount_Org; i += 2,k++,count+=2) {
- if(GetNextTwoChar(patchbuffer,patchlen,&filepos,Byte) == A_ERROR) {
- return A_ERROR;
- }
- data = A_STRTOUL(Byte, NULL, 16);
- RamPatch[j].Data[k] = (data & 0xFF);
-
-
- }
- return 0;
-}
-
-
-/********************/
-int AthDoParsePS(u8 *srcbuffer, u32 srclen)
-{
- int status;
- int i;
- bool BDADDR_Present = false;
-
- Tag_Count = 0;
-
- Total_tag_lenght = 0;
- BDADDR = false;
-
-
- status = A_ERROR;
-
- if(NULL != srcbuffer && srclen != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("File Open Operation Successful\n"));
-
- status = AthParseFilesUnified(srcbuffer,srclen,MB_FILEFORMAT_PATCH);
- }
-
-
-
- if(Tag_Count == 0){
- Total_tag_lenght = 10;
-
- }
- else{
- for(i=0; i<Tag_Count; i++){
- if(PsTagEntry[i].TagId == 1){
- BDADDR_Present = true;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BD ADDR is present in Patch File \r\n"));
-
- }
- if(PsTagEntry[i].TagLen % 2 == 1){
- Total_tag_lenght = Total_tag_lenght + PsTagEntry[i].TagLen + 1;
- }
- else{
- Total_tag_lenght = Total_tag_lenght + PsTagEntry[i].TagLen;
- }
-
- }
- }
-
- if(Tag_Count > 0 && !BDADDR_Present){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BD ADDR is not present adding 10 extra bytes \r\n"));
- Total_tag_lenght=Total_tag_lenght + 10;
- }
- Total_tag_lenght = Total_tag_lenght+ 10 + (Tag_Count*4);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** Total Length %d\n",Total_tag_lenght));
-
-
- return status;
-}
-char *AthGetLine(char *buffer, int maxlen, u8 *srcbuffer,u32 len,u32 *pos)
-{
-
- int count;
- static short flag;
- char CharRead;
- count = 0;
- flag = A_ERROR;
-
- do
- {
- CharRead = AthReadChar(srcbuffer,len,pos);
- if( CharRead == '\0' ) {
- buffer[count+1] = '\0';
- if(count == 0) {
- return NULL;
- }
- else {
- return buffer;
- }
- }
-
- if(CharRead == 13) {
- } else if(CharRead == 10) {
- buffer[count] ='\0';
- flag = A_ERROR;
- return buffer;
- }else {
- buffer[count++] = CharRead;
- }
-
- }
- while(count < maxlen-1 && CharRead != '\0');
- buffer[count] = '\0';
-
- return buffer;
-}
-
-static void LoadHeader(u8 *HCI_PS_Command,u8 opcode,int length,int index){
-
- HCI_PS_Command[0]= 0x0B;
- HCI_PS_Command[1]= 0xFC;
- HCI_PS_Command[2]= length + 4;
- HCI_PS_Command[3]= opcode;
- HCI_PS_Command[4]= (index & 0xFF);
- HCI_PS_Command[5]= ((index>>8) & 0xFF);
- HCI_PS_Command[6]= length;
-}
-
-/////////////////////////
-//
-int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets)
-{
-
- u8 count;
- u32 NumcmdEntry = 0;
-
- u32 Crc = 0;
- *numPackets = 0;
-
-
- if(Patch_Count > 0)
- Crc |= RAM_PATCH_REGION;
- if(Tag_Count > 0)
- Crc |= RAM_PS_REGION;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("PS Thread Started CRC %x Patch Count %d Tag Count %d \n",Crc,Patch_Count,Tag_Count));
-
- if(Patch_Count || Tag_Count ){
- NumcmdEntry+=(2 + Patch_Count + Tag_Count); /* CRC Packet + PS Reset Packet + Patch List + PS List*/
- if(Patch_Count > 0) {
- NumcmdEntry++; /* Patch Enable Command */
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Num Cmd Entries %d Size %d \r\n",NumcmdEntry,(u32)sizeof(struct ps_cmd_packet) * NumcmdEntry));
- (*HciPacketList) = A_MALLOC(sizeof(struct ps_cmd_packet) * NumcmdEntry);
- if(NULL == *HciPacketList) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("memory allocation failed \r\n"));
- }
- AthPSCreateHCICommand(PS_VERIFY_CRC,Crc,*HciPacketList,numPackets);
- if(Patch_Count > 0){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** Write Patch**** \r\n"));
- AthPSCreateHCICommand(WRITE_PATCH,Patch_Count,*HciPacketList,numPackets);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** Enable Patch**** \r\n"));
- AthPSCreateHCICommand(ENABLE_PATCH,0,*HciPacketList,numPackets);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** PS Reset**** %d[0x%x] \r\n",PS_RAM_SIZE,PS_RAM_SIZE));
- AthPSCreateHCICommand(PS_RESET,PS_RAM_SIZE,*HciPacketList,numPackets);
- if(Tag_Count > 0){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("*** PS Write**** \r\n"));
- AthPSCreateHCICommand(PS_WRITE,Tag_Count,*HciPacketList,numPackets);
- }
- }
- if(!BDADDR){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BD ADDR not present \r\n"));
-
- }
- for(count = 0; count < Patch_Count; count++) {
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count));
- kfree(RamPatch[count].Data);
- }
-
- for(count = 0; count < Tag_Count; count++) {
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing PS Buffer %d \r\n",count));
- kfree(PsTagEntry[count].TagData);
- }
-
-/*
- * SDIO Transport uses synchronous mode of data transfer
- * So, AthPSOperations() call returns only after receiving the
- * command complete event.
- */
- return *numPackets;
-}
-
-
-////////////////////////
-
-/////////////
-static int AthPSCreateHCICommand(u8 Opcode, u32 Param1,struct ps_cmd_packet *PSPatchPacket,u32 *index)
-{
- u8 *HCI_PS_Command;
- u32 Length;
- int i,j;
-
- switch(Opcode)
- {
- case WRITE_PATCH:
-
-
- for(i=0;i< Param1;i++){
-
- HCI_PS_Command = (u8 *) A_MALLOC(RamPatch[i].Len+HCI_COMMAND_HEADER);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Allocated Buffer Size %d\n",RamPatch[i].Len+HCI_COMMAND_HEADER));
- if(HCI_PS_Command == NULL){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
- return A_ERROR;
- }
- memset (HCI_PS_Command, 0, RamPatch[i].Len+HCI_COMMAND_HEADER);
- LoadHeader(HCI_PS_Command,Opcode,RamPatch[i].Len,i);
- for(j=0;j<RamPatch[i].Len;j++){
- HCI_PS_Command[HCI_COMMAND_HEADER+j]=RamPatch[i].Data[j];
- }
- PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
- PSPatchPacket[*index].packetLen = RamPatch[i].Len+HCI_COMMAND_HEADER;
- (*index)++;
-
-
- }
-
- break;
-
- case ENABLE_PATCH:
-
-
- Length = 0;
- i= 0;
- HCI_PS_Command = (u8 *) A_MALLOC(Length+HCI_COMMAND_HEADER);
- if(HCI_PS_Command == NULL){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
- return A_ERROR;
- }
-
- memset (HCI_PS_Command, 0, Length+HCI_COMMAND_HEADER);
- LoadHeader(HCI_PS_Command,Opcode,Length,i);
- PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
- PSPatchPacket[*index].packetLen = Length+HCI_COMMAND_HEADER;
- (*index)++;
-
- break;
-
- case PS_RESET:
- Length = 0x06;
- i=0;
- HCI_PS_Command = (u8 *) A_MALLOC(Length+HCI_COMMAND_HEADER);
- if(HCI_PS_Command == NULL){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
- return A_ERROR;
- }
- memset (HCI_PS_Command, 0, Length+HCI_COMMAND_HEADER);
- LoadHeader(HCI_PS_Command,Opcode,Length,i);
- HCI_PS_Command[7]= 0x00;
- HCI_PS_Command[Length+HCI_COMMAND_HEADER -2]= (Param1 & 0xFF);
- HCI_PS_Command[Length+HCI_COMMAND_HEADER -1]= ((Param1 >> 8) & 0xFF);
- PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
- PSPatchPacket[*index].packetLen = Length+HCI_COMMAND_HEADER;
- (*index)++;
-
- break;
-
- case PS_WRITE:
- for(i=0;i< Param1;i++){
- if(PsTagEntry[i].TagId ==1)
- BDADDR = true;
-
- HCI_PS_Command = (u8 *) A_MALLOC(PsTagEntry[i].TagLen+HCI_COMMAND_HEADER);
- if(HCI_PS_Command == NULL){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
- return A_ERROR;
- }
-
- memset (HCI_PS_Command, 0, PsTagEntry[i].TagLen+HCI_COMMAND_HEADER);
- LoadHeader(HCI_PS_Command,Opcode,PsTagEntry[i].TagLen,PsTagEntry[i].TagId);
-
- for(j=0;j<PsTagEntry[i].TagLen;j++){
- HCI_PS_Command[HCI_COMMAND_HEADER+j]=PsTagEntry[i].TagData[j];
- }
-
- PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
- PSPatchPacket[*index].packetLen = PsTagEntry[i].TagLen+HCI_COMMAND_HEADER;
- (*index)++;
-
- }
-
- break;
-
-
- case PS_VERIFY_CRC:
- Length = 0x0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("VALUE of CRC:%d At index %d\r\n",Param1,*index));
-
- HCI_PS_Command = (u8 *) A_MALLOC(Length+HCI_COMMAND_HEADER);
- if(HCI_PS_Command == NULL){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("MALLOC Failed\r\n"));
- return A_ERROR;
- }
- memset (HCI_PS_Command, 0, Length+HCI_COMMAND_HEADER);
- LoadHeader(HCI_PS_Command,Opcode,Length,Param1);
-
- PSPatchPacket[*index].Hcipacket = HCI_PS_Command;
- PSPatchPacket[*index].packetLen = Length+HCI_COMMAND_HEADER;
- (*index)++;
-
- break;
-
- case CHANGE_BDADDR:
- break;
- }
- return 0;
-}
-int AthFreeCommandList(struct ps_cmd_packet **HciPacketList, u32 numPackets)
-{
- int i;
- if(*HciPacketList == NULL) {
- return A_ERROR;
- }
- for(i = 0; i < numPackets;i++) {
- kfree((*HciPacketList)[i].Hcipacket);
- }
- kfree(*HciPacketList);
- return 0;
-}
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h
deleted file mode 100644
index 4e0f2f713a4..00000000000
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.h
+++ /dev/null
@@ -1,113 +0,0 @@
-//------------------------------------------------------------------------------
-//
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//
-// This file is the include file for Atheros PS and patch parser.
-// It implements APIs to parse data buffer with patch and PS information and convert it to HCI commands.
-//
-
-#ifndef __AR3KPSPARSER_H
-#define __AR3KPSPARSER_H
-
-
-
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include "athdefs.h"
-#ifdef HCI_TRANSPORT_SDIO
-#include "a_config.h"
-#include "a_osapi.h"
-#define ATH_MODULE_NAME misc
-#include "a_debug.h"
-#include "common_drv.h"
-#include "hci_transport_api.h"
-#include "ar3kconfig.h"
-#else
-#ifndef A_PRINTF
-#define A_PRINTF(args...) printk(KERN_ALERT args)
-#endif /* A_PRINTF */
-#include "debug_linux.h"
-
-/* Helper data type declaration */
-
-#define ATH_DEBUG_ERR (1 << 0)
-#define ATH_DEBUG_WARN (1 << 1)
-#define ATH_DEBUG_INFO (1 << 2)
-
-
-
-#define false 0
-#define true 1
-
-#ifndef A_MALLOC
-#define A_MALLOC(size) kmalloc((size),GFP_KERNEL)
-#endif /* A_MALLOC */
-#endif /* HCI_TRANSPORT_UART */
-
-/* String manipulation APIs */
-#ifndef A_STRTOUL
-#define A_STRTOUL simple_strtoul
-#endif /* A_STRTOL */
-
-#ifndef A_STRTOL
-#define A_STRTOL simple_strtol
-#endif /* A_STRTOL */
-
-
-/* The maximum number of bytes possible in a patch entry */
-#define MAX_PATCH_SIZE 20000
-
-/* Maximum HCI packets that will be formed from the Patch file */
-#define MAX_NUM_PATCH_ENTRY (MAX_PATCH_SIZE/MAX_BYTE_LENGTH) + 1
-
-
-
-
-
-
-
-struct ps_cmd_packet
-{
- u8 *Hcipacket;
- int packetLen;
-};
-
-/* Parses a Patch information buffer and store it in global structure */
-int AthDoParsePatch(u8 *, u32 );
-
-/* parses a PS information buffer and stores it in a global structure */
-int AthDoParsePS(u8 *, u32 );
-
-/*
- * Uses the output of Both AthDoParsePS and AthDoParsePatch APIs to form HCI command array with
- * all the PS and patch commands.
- * The list will have the below mentioned commands in order.
- * CRC command packet
- * Download patch command(s)
- * Enable patch Command
- * PS Reset Command
- * PS Tag Command(s)
- *
- */
-int AthCreateCommandList(struct ps_cmd_packet **, u32 *);
-
-/* Cleanup the dynamically allicated HCI command list */
-int AthFreeCommandList(struct ps_cmd_packet **HciPacketList, u32 numPackets);
-#endif /* __AR3KPSPARSER_H */
diff --git a/drivers/staging/ath6kl/miscdrv/common_drv.c b/drivers/staging/ath6kl/miscdrv/common_drv.c
deleted file mode 100644
index 1ce539aa019..00000000000
--- a/drivers/staging/ath6kl/miscdrv/common_drv.c
+++ /dev/null
@@ -1,910 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="common_drv.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "a_config.h"
-#include "athdefs.h"
-
-#include "hw/mbox_host_reg.h"
-#include "gpio_reg.h"
-#include "hw/rtc_reg.h"
-#include "hw/mbox_reg.h"
-#include "hw/apb_map.h"
-
-#include "a_osapi.h"
-#include "targaddrs.h"
-#include "hif.h"
-#include "htc_api.h"
-#include "wmi.h"
-#include "bmi.h"
-#include "bmi_msg.h"
-#include "common_drv.h"
-#define ATH_MODULE_NAME misc
-#include "a_debug.h"
-#include "ar6000_diag.h"
-
-static ATH_DEBUG_MODULE_DBG_INFO *g_pModuleInfoHead = NULL;
-static A_MUTEX_T g_ModuleListLock;
-static bool g_ModuleDebugInit = false;
-
-#ifdef ATH_DEBUG_MODULE
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(misc,
- "misc",
- "Common and misc APIs",
- ATH_DEBUG_MASK_DEFAULTS,
- 0,
- NULL);
-
-#endif
-
-#define HOST_INTEREST_ITEM_ADDRESS(target, item) \
- ((((target) == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \
- (((target) == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0)))
-
-
-#define AR6001_LOCAL_COUNT_ADDRESS 0x0c014080
-#define AR6002_LOCAL_COUNT_ADDRESS 0x00018080
-#define AR6003_LOCAL_COUNT_ADDRESS 0x00018080
-#define CPU_DBG_SEL_ADDRESS 0x00000483
-#define CPU_DBG_ADDRESS 0x00000484
-
-static u8 custDataAR6002[AR6002_CUST_DATA_SIZE];
-static u8 custDataAR6003[AR6003_CUST_DATA_SIZE];
-
-/* Compile the 4BYTE version of the window register setup routine,
- * This mitigates host interconnect issues with non-4byte aligned bus requests, some
- * interconnects use bus adapters that impose strict limitations.
- * Since diag window access is not intended for performance critical operations, the 4byte mode should
- * be satisfactory even though it generates 4X the bus activity. */
-
-#ifdef USE_4BYTE_REGISTER_ACCESS
-
- /* set the window address register (using 4-byte register access ). */
-int ar6000_SetAddressWindowRegister(struct hif_device *hifDevice, u32 RegisterAddr, u32 Address)
-{
- int status;
- u8 addrValue[4];
- s32 i;
-
- /* write bytes 1,2,3 of the register to set the upper address bytes, the LSB is written
- * last to initiate the access cycle */
-
- for (i = 1; i <= 3; i++) {
- /* fill the buffer with the address byte value we want to hit 4 times*/
- addrValue[0] = ((u8 *)&Address)[i];
- addrValue[1] = addrValue[0];
- addrValue[2] = addrValue[0];
- addrValue[3] = addrValue[0];
-
- /* hit each byte of the register address with a 4-byte write operation to the same address,
- * this is a harmless operation */
- status = HIFReadWrite(hifDevice,
- RegisterAddr+i,
- addrValue,
- 4,
- HIF_WR_SYNC_BYTE_FIX,
- NULL);
- if (status) {
- break;
- }
- }
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write initial bytes of 0x%x to window reg: 0x%X \n",
- Address, RegisterAddr));
- return status;
- }
-
- /* write the address register again, this time write the whole 4-byte value.
- * The effect here is that the LSB write causes the cycle to start, the extra
- * 3 byte write to bytes 1,2,3 has no effect since we are writing the same values again */
- status = HIFReadWrite(hifDevice,
- RegisterAddr,
- (u8 *)(&Address),
- 4,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write 0x%x to window reg: 0x%X \n",
- Address, RegisterAddr));
- return status;
- }
-
- return 0;
-
-
-
-}
-
-
-#else
-
- /* set the window address register */
-int ar6000_SetAddressWindowRegister(struct hif_device *hifDevice, u32 RegisterAddr, u32 Address)
-{
- int status;
-
- /* write bytes 1,2,3 of the register to set the upper address bytes, the LSB is written
- * last to initiate the access cycle */
- status = HIFReadWrite(hifDevice,
- RegisterAddr+1, /* write upper 3 bytes */
- ((u8 *)(&Address))+1,
- sizeof(u32)-1,
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write initial bytes of 0x%x to window reg: 0x%X \n",
- RegisterAddr, Address));
- return status;
- }
-
- /* write the LSB of the register, this initiates the operation */
- status = HIFReadWrite(hifDevice,
- RegisterAddr,
- (u8 *)(&Address),
- sizeof(u8),
- HIF_WR_SYNC_BYTE_INC,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write 0x%x to window reg: 0x%X \n",
- RegisterAddr, Address));
- return status;
- }
-
- return 0;
-}
-
-#endif
-
-/*
- * Read from the AR6000 through its diagnostic window.
- * No cooperation from the Target is required for this.
- */
-int
-ar6000_ReadRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data)
-{
- int status;
-
- /* set window register to start read cycle */
- status = ar6000_SetAddressWindowRegister(hifDevice,
- WINDOW_READ_ADDR_ADDRESS,
- *address);
-
- if (status) {
- return status;
- }
-
- /* read the data */
- status = HIFReadWrite(hifDevice,
- WINDOW_DATA_ADDRESS,
- (u8 *)data,
- sizeof(u32),
- HIF_RD_SYNC_BYTE_INC,
- NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot read from WINDOW_DATA_ADDRESS\n"));
- return status;
- }
-
- return status;
-}
-
-
-/*
- * Write to the AR6000 through its diagnostic window.
- * No cooperation from the Target is required for this.
- */
-int
-ar6000_WriteRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data)
-{
- int status;
-
- /* set write data */
- status = HIFReadWrite(hifDevice,
- WINDOW_DATA_ADDRESS,
- (u8 *)data,
- sizeof(u32),
- HIF_WR_SYNC_BYTE_INC,
- NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write 0x%x to WINDOW_DATA_ADDRESS\n", *data));
- return status;
- }
-
- /* set window register, which starts the write cycle */
- return ar6000_SetAddressWindowRegister(hifDevice,
- WINDOW_WRITE_ADDR_ADDRESS,
- *address);
- }
-
-int
-ar6000_ReadDataDiag(struct hif_device *hifDevice, u32 address,
- u8 *data, u32 length)
-{
- u32 count;
- int status = 0;
-
- for (count = 0; count < length; count += 4, address += 4) {
- if ((status = ar6000_ReadRegDiag(hifDevice, &address,
- (u32 *)&data[count])) != 0)
- {
- break;
- }
- }
-
- return status;
-}
-
-int
-ar6000_WriteDataDiag(struct hif_device *hifDevice, u32 address,
- u8 *data, u32 length)
-{
- u32 count;
- int status = 0;
-
- for (count = 0; count < length; count += 4, address += 4) {
- if ((status = ar6000_WriteRegDiag(hifDevice, &address,
- (u32 *)&data[count])) != 0)
- {
- break;
- }
- }
-
- return status;
-}
-
-int
-ar6k_ReadTargetRegister(struct hif_device *hifDevice, int regsel, u32 *regval)
-{
- int status;
- u8 vals[4];
- u8 register_selection[4];
-
- register_selection[0] = register_selection[1] = register_selection[2] = register_selection[3] = (regsel & 0xff);
- status = HIFReadWrite(hifDevice,
- CPU_DBG_SEL_ADDRESS,
- register_selection,
- 4,
- HIF_WR_SYNC_BYTE_FIX,
- NULL);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot write CPU_DBG_SEL (%d)\n", regsel));
- return status;
- }
-
- status = HIFReadWrite(hifDevice,
- CPU_DBG_ADDRESS,
- (u8 *)vals,
- sizeof(vals),
- HIF_RD_SYNC_BYTE_INC,
- NULL);
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot read from CPU_DBG_ADDRESS\n"));
- return status;
- }
-
- *regval = vals[0]<<0 | vals[1]<<8 | vals[2]<<16 | vals[3]<<24;
-
- return status;
-}
-
-void
-ar6k_FetchTargetRegs(struct hif_device *hifDevice, u32 *targregs)
-{
- int i;
- u32 val;
-
- for (i=0; i<AR6003_FETCH_TARG_REGS_COUNT; i++) {
- val=0xffffffff;
- (void)ar6k_ReadTargetRegister(hifDevice, i, &val);
- targregs[i] = val;
- }
-}
-
-#if 0
-static int
-_do_write_diag(struct hif_device *hifDevice, u32 addr, u32 value)
-{
- int status;
-
- status = ar6000_WriteRegDiag(hifDevice, &addr, &value);
- if (status)
- {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Cannot force Target to execute ROM!\n"));
- }
-
- return status;
-}
-#endif
-
-
-/*
- * Delay up to wait_msecs millisecs to allow Target to enter BMI phase,
- * which is a good sign that it's alive and well. This is used after
- * explicitly forcing the Target to reset.
- *
- * The wait_msecs time should be sufficiently long to cover any reasonable
- * boot-time delay. For instance, AR6001 firmware allow one second for a
- * low frequency crystal to settle before it calibrates the refclk frequency.
- *
- * TBD: Might want to add special handling for AR6K_OPTION_BMI_DISABLE.
- */
-#if 0
-static int
-_delay_until_target_alive(struct hif_device *hifDevice, s32 wait_msecs, u32 TargetType)
-{
- s32 actual_wait;
- s32 i;
- u32 address;
-
- actual_wait = 0;
-
- /* Hardcode the address of LOCAL_COUNT_ADDRESS based on the target type */
- if (TargetType == TARGET_TYPE_AR6002) {
- address = AR6002_LOCAL_COUNT_ADDRESS;
- } else if (TargetType == TARGET_TYPE_AR6003) {
- address = AR6003_LOCAL_COUNT_ADDRESS;
- } else {
- A_ASSERT(0);
- }
- address += 0x10;
- for (i=0; actual_wait < wait_msecs; i++) {
- u32 data;
-
- A_MDELAY(100);
- actual_wait += 100;
-
- data = 0;
- if (ar6000_ReadRegDiag(hifDevice, &address, &data) != 0) {
- return A_ERROR;
- }
-
- if (data != 0) {
- /* No need to wait longer -- we have a BMI credit */
- return 0;
- }
- }
- return A_ERROR; /* timed out */
-}
-#endif
-
-#define AR6001_RESET_CONTROL_ADDRESS 0x0C000000
-#define AR6002_RESET_CONTROL_ADDRESS 0x00004000
-#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
-/* reset device */
-int ar6000_reset_device(struct hif_device *hifDevice, u32 TargetType, bool waitForCompletion, bool coldReset)
-{
- int status = 0;
- u32 address;
- u32 data;
-
- do {
-// Workaround BEGIN
- // address = RESET_CONTROL_ADDRESS;
-
- if (coldReset) {
- data = RESET_CONTROL_COLD_RST_MASK;
- }
- else {
- data = RESET_CONTROL_MBOX_RST_MASK;
- }
-
- /* Hardcode the address of RESET_CONTROL_ADDRESS based on the target type */
- if (TargetType == TARGET_TYPE_AR6002) {
- address = AR6002_RESET_CONTROL_ADDRESS;
- } else if (TargetType == TARGET_TYPE_AR6003) {
- address = AR6003_RESET_CONTROL_ADDRESS;
- } else {
- A_ASSERT(0);
- }
-
-
- status = ar6000_WriteRegDiag(hifDevice, &address, &data);
-
- if (status) {
- break;
- }
-
- if (!waitForCompletion) {
- break;
- }
-
-#if 0
- /* Up to 2 second delay to allow things to settle down */
- (void)_delay_until_target_alive(hifDevice, 2000, TargetType);
-
- /*
- * Read back the RESET CAUSE register to ensure that the cold reset
- * went through.
- */
-
- // address = RESET_CAUSE_ADDRESS;
- /* Hardcode the address of RESET_CAUSE_ADDRESS based on the target type */
- if (TargetType == TARGET_TYPE_AR6002) {
- address = 0x000040C0;
- } else if (TargetType == TARGET_TYPE_AR6003) {
- address = 0x000040C0;
- } else {
- A_ASSERT(0);
- }
-
- data = 0;
- status = ar6000_ReadRegDiag(hifDevice, &address, &data);
-
- if (status) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Reset Cause readback: 0x%X \n",data));
- data &= RESET_CAUSE_LAST_MASK;
- if (data != 2) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Unable to cold reset the target \n"));
- }
-#endif
-// Workaroud END
-
- } while (false);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR, ("Failed to reset target \n"));
- }
-
- return 0;
-}
-
-/* This should be called in BMI phase after firmware is downloaded */
-void
-ar6000_copy_cust_data_from_target(struct hif_device *hifDevice, u32 TargetType)
-{
- u32 eepHeaderAddr;
- u8 AR6003CustDataShadow[AR6003_CUST_DATA_SIZE+4];
- s32 i;
-
- if (BMIReadMemory(hifDevice,
- HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_board_data),
- (u8 *)&eepHeaderAddr,
- 4)!= 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadMemory for reading board data address failed \n"));
- return;
- }
-
- if (TargetType == TARGET_TYPE_AR6003) {
- eepHeaderAddr += 36; /* AR6003 customer data section offset is 37 */
-
- for (i=0; i<AR6003_CUST_DATA_SIZE+4; i+=4){
- if (BMIReadSOCRegister(hifDevice, eepHeaderAddr, (u32 *)&AR6003CustDataShadow[i])!= 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadSOCRegister () failed \n"));
- return ;
- }
- eepHeaderAddr +=4;
- }
-
- memcpy(custDataAR6003, AR6003CustDataShadow+1, AR6003_CUST_DATA_SIZE);
- }
-
- if (TargetType == TARGET_TYPE_AR6002) {
- eepHeaderAddr += 64; /* AR6002 customer data sectioin offset is 64 */
-
- for (i=0; i<AR6002_CUST_DATA_SIZE; i+=4){
- if (BMIReadSOCRegister(hifDevice, eepHeaderAddr, (u32 *)&custDataAR6002[i])!= 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMIReadSOCRegister () failed \n"));
- return ;
- }
- eepHeaderAddr +=4;
- }
- }
-
- return;
-}
-
-/* This is the function to call when need to use the cust data */
-u8 *ar6000_get_cust_data_buffer(u32 TargetType)
-{
- if (TargetType == TARGET_TYPE_AR6003)
- return custDataAR6003;
-
- if (TargetType == TARGET_TYPE_AR6002)
- return custDataAR6002;
-
- return NULL;
-}
-
-#define REG_DUMP_COUNT_AR6001 38 /* WORDs, derived from AR600x_regdump.h */
-#define REG_DUMP_COUNT_AR6002 60
-#define REG_DUMP_COUNT_AR6003 60
-#define REGISTER_DUMP_LEN_MAX 60
-#if REG_DUMP_COUNT_AR6001 > REGISTER_DUMP_LEN_MAX
-#error "REG_DUMP_COUNT_AR6001 too large"
-#endif
-#if REG_DUMP_COUNT_AR6002 > REGISTER_DUMP_LEN_MAX
-#error "REG_DUMP_COUNT_AR6002 too large"
-#endif
-#if REG_DUMP_COUNT_AR6003 > REGISTER_DUMP_LEN_MAX
-#error "REG_DUMP_COUNT_AR6003 too large"
-#endif
-
-
-void ar6000_dump_target_assert_info(struct hif_device *hifDevice, u32 TargetType)
-{
- u32 address;
- u32 regDumpArea = 0;
- int status;
- u32 regDumpValues[REGISTER_DUMP_LEN_MAX];
- u32 regDumpCount = 0;
- u32 i;
-
- do {
-
- /* the reg dump pointer is copied to the host interest area */
- address = HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_failure_state);
- address = TARG_VTOP(TargetType, address);
-
- if (TargetType == TARGET_TYPE_AR6002) {
- regDumpCount = REG_DUMP_COUNT_AR6002;
- } else if (TargetType == TARGET_TYPE_AR6003) {
- regDumpCount = REG_DUMP_COUNT_AR6003;
- } else {
- A_ASSERT(0);
- }
-
- /* read RAM location through diagnostic window */
- status = ar6000_ReadRegDiag(hifDevice, &address, &regDumpArea);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Failed to get ptr to register dump area \n"));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Location of register dump data: 0x%X \n",regDumpArea));
-
- if (regDumpArea == 0) {
- /* no reg dump */
- break;
- }
-
- regDumpArea = TARG_VTOP(TargetType, regDumpArea);
-
- /* fetch register dump data */
- status = ar6000_ReadDataDiag(hifDevice,
- regDumpArea,
- (u8 *)&regDumpValues[0],
- regDumpCount * (sizeof(u32)));
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Failed to get register dump \n"));
- break;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("AR6K: Register Dump: \n"));
-
- for (i = 0; i < regDumpCount; i++) {
- //ATHR_DISPLAY_MSG (_T(" %d : 0x%8.8X \n"), i, regDumpValues[i]);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" %d : 0x%8.8X \n",i, regDumpValues[i]));
-
-#ifdef UNDER_CE
- /*
- * For Every logPrintf() Open the File so that in case of Crashes
- * We will have until the Last Message Flushed on to the File
- * So use logPrintf Sparingly..!!
- */
- tgtassertPrintf (ATH_DEBUG_TRC," %d: 0x%8.8X \n",i, regDumpValues[i]);
-#endif
- }
-
- } while (false);
-
-}
-
-/* set HTC/Mbox operational parameters, this can only be called when the target is in the
- * BMI phase */
-int ar6000_set_htc_params(struct hif_device *hifDevice,
- u32 TargetType,
- u32 MboxIsrYieldValue,
- u8 HtcControlBuffers)
-{
- int status;
- u32 blocksizes[HTC_MAILBOX_NUM_MAX];
-
- do {
- /* get the block sizes */
- status = HIFConfigureDevice(hifDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
- blocksizes, sizeof(blocksizes));
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR,("Failed to get block size info from HIF layer...\n"));
- break;
- }
- /* note: we actually get the block size for mailbox 1, for SDIO the block
- * size on mailbox 0 is artificially set to 1 */
- /* must be a power of 2 */
- A_ASSERT((blocksizes[1] & (blocksizes[1] - 1)) == 0);
-
- if (HtcControlBuffers != 0) {
- /* set override for number of control buffers to use */
- blocksizes[1] |= ((u32)HtcControlBuffers) << 16;
- }
-
- /* set the host interest area for the block size */
- status = BMIWriteMemory(hifDevice,
- HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_mbox_io_block_sz),
- (u8 *)&blocksizes[1],
- 4);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR,("BMIWriteMemory for IO block size failed \n"));
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_LOG_INF,("Block Size Set: %d (target address:0x%X)\n",
- blocksizes[1], HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_mbox_io_block_sz)));
-
- if (MboxIsrYieldValue != 0) {
- /* set the host interest area for the mbox ISR yield limit */
- status = BMIWriteMemory(hifDevice,
- HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_mbox_isr_yield_limit),
- (u8 *)&MboxIsrYieldValue,
- 4);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_LOG_ERR,("BMIWriteMemory for yield limit failed \n"));
- break;
- }
- }
-
- } while (false);
-
- return status;
-}
-
-void DebugDumpBytes(u8 *buffer, u16 length, char *pDescription)
-{
- char stream[60];
- char byteOffsetStr[10];
- u32 i;
- u16 offset, count, byteOffset;
-
- A_PRINTF("<---------Dumping %d Bytes : %s ------>\n", length, pDescription);
-
- count = 0;
- offset = 0;
- byteOffset = 0;
- for(i = 0; i < length; i++) {
- A_SPRINTF(stream + offset, "%2.2X ", buffer[i]);
- count ++;
- offset += 3;
-
- if(count == 16) {
- count = 0;
- offset = 0;
- A_SPRINTF(byteOffsetStr,"%4.4X",byteOffset);
- A_PRINTF("[%s]: %s\n", byteOffsetStr, stream);
- A_MEMZERO(stream, 60);
- byteOffset += 16;
- }
- }
-
- if(offset != 0) {
- A_SPRINTF(byteOffsetStr,"%4.4X",byteOffset);
- A_PRINTF("[%s]: %s\n", byteOffsetStr, stream);
- }
-
- A_PRINTF("<------------------------------------------------->\n");
-}
-
-void a_dump_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo)
-{
- int i;
- struct ath_debug_mask_description *pDesc;
-
- if (pInfo == NULL) {
- return;
- }
-
- pDesc = pInfo->pMaskDescriptions;
-
- A_PRINTF("========================================================\n\n");
- A_PRINTF("Module Debug Info => Name : %s \n", pInfo->ModuleName);
- A_PRINTF(" => Descr. : %s \n", pInfo->ModuleDescription);
- A_PRINTF("\n Current mask => 0x%8.8X \n", pInfo->CurrentMask);
- A_PRINTF("\n Avail. Debug Masks :\n\n");
-
- for (i = 0; i < pInfo->MaxDescriptions; i++,pDesc++) {
- A_PRINTF(" => 0x%8.8X -- %s \n", pDesc->Mask, pDesc->Description);
- }
-
- if (0 == i) {
- A_PRINTF(" => * none defined * \n");
- }
-
- A_PRINTF("\n Standard Debug Masks :\n\n");
- /* print standard masks */
- A_PRINTF(" => 0x%8.8X -- Errors \n", ATH_DEBUG_ERR);
- A_PRINTF(" => 0x%8.8X -- Warnings \n", ATH_DEBUG_WARN);
- A_PRINTF(" => 0x%8.8X -- Informational \n", ATH_DEBUG_INFO);
- A_PRINTF(" => 0x%8.8X -- Tracing \n", ATH_DEBUG_TRC);
- A_PRINTF("\n========================================================\n");
-
-}
-
-
-static ATH_DEBUG_MODULE_DBG_INFO *FindModule(char *module_name)
-{
- ATH_DEBUG_MODULE_DBG_INFO *pInfo = g_pModuleInfoHead;
-
- if (!g_ModuleDebugInit) {
- return NULL;
- }
-
- while (pInfo != NULL) {
- /* TODO: need to use something other than strlen */
- if (memcmp(pInfo->ModuleName,module_name,strlen(module_name)) == 0) {
- break;
- }
- pInfo = pInfo->pNext;
- }
-
- return pInfo;
-}
-
-
-void a_register_module_debug_info(ATH_DEBUG_MODULE_DBG_INFO *pInfo)
-{
- if (!g_ModuleDebugInit) {
- return;
- }
-
- A_MUTEX_LOCK(&g_ModuleListLock);
-
- if (!(pInfo->Flags & ATH_DEBUG_INFO_FLAGS_REGISTERED)) {
- if (g_pModuleInfoHead == NULL) {
- g_pModuleInfoHead = pInfo;
- } else {
- pInfo->pNext = g_pModuleInfoHead;
- g_pModuleInfoHead = pInfo;
- }
- pInfo->Flags |= ATH_DEBUG_INFO_FLAGS_REGISTERED;
- }
-
- A_MUTEX_UNLOCK(&g_ModuleListLock);
-}
-
-void a_dump_module_debug_info_by_name(char *module_name)
-{
- ATH_DEBUG_MODULE_DBG_INFO *pInfo = g_pModuleInfoHead;
-
- if (!g_ModuleDebugInit) {
- return;
- }
-
- if (memcmp(module_name,"all",3) == 0) {
- /* dump all */
- while (pInfo != NULL) {
- a_dump_module_debug_info(pInfo);
- pInfo = pInfo->pNext;
- }
- return;
- }
-
- pInfo = FindModule(module_name);
-
- if (pInfo != NULL) {
- a_dump_module_debug_info(pInfo);
- }
-
-}
-
-int a_get_module_mask(char *module_name, u32 *pMask)
-{
- ATH_DEBUG_MODULE_DBG_INFO *pInfo = FindModule(module_name);
-
- if (NULL == pInfo) {
- return A_ERROR;
- }
-
- *pMask = pInfo->CurrentMask;
- return 0;
-}
-
-int a_set_module_mask(char *module_name, u32 Mask)
-{
- ATH_DEBUG_MODULE_DBG_INFO *pInfo = FindModule(module_name);
-
- if (NULL == pInfo) {
- return A_ERROR;
- }
-
- pInfo->CurrentMask = Mask;
- A_PRINTF("Module %s, new mask: 0x%8.8X \n",module_name,pInfo->CurrentMask);
- return 0;
-}
-
-
-void a_module_debug_support_init(void)
-{
- if (g_ModuleDebugInit) {
- return;
- }
- A_MUTEX_INIT(&g_ModuleListLock);
- g_pModuleInfoHead = NULL;
- g_ModuleDebugInit = true;
- A_REGISTER_MODULE_DEBUG_INFO(misc);
-}
-
-void a_module_debug_support_cleanup(void)
-{
- ATH_DEBUG_MODULE_DBG_INFO *pInfo = g_pModuleInfoHead;
- ATH_DEBUG_MODULE_DBG_INFO *pCur;
-
- if (!g_ModuleDebugInit) {
- return;
- }
-
- g_ModuleDebugInit = false;
-
- A_MUTEX_LOCK(&g_ModuleListLock);
-
- while (pInfo != NULL) {
- pCur = pInfo;
- pInfo = pInfo->pNext;
- pCur->pNext = NULL;
- /* clear registered flag */
- pCur->Flags &= ~ATH_DEBUG_INFO_FLAGS_REGISTERED;
- }
-
- A_MUTEX_UNLOCK(&g_ModuleListLock);
-
- A_MUTEX_DELETE(&g_ModuleListLock);
- g_pModuleInfoHead = NULL;
-}
-
- /* can only be called during bmi init stage */
-int ar6000_set_hci_bridge_flags(struct hif_device *hifDevice,
- u32 TargetType,
- u32 Flags)
-{
- int status = 0;
-
- do {
-
- if (TargetType != TARGET_TYPE_AR6003) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("Target Type:%d, does not support HCI bridging! \n",
- TargetType));
- break;
- }
-
- /* set hci bridge flags */
- status = BMIWriteMemory(hifDevice,
- HOST_INTEREST_ITEM_ADDRESS(TargetType, hi_hci_bridge_flags),
- (u8 *)&Flags,
- 4);
-
-
- } while (false);
-
- return status;
-}
-
diff --git a/drivers/staging/ath6kl/miscdrv/credit_dist.c b/drivers/staging/ath6kl/miscdrv/credit_dist.c
deleted file mode 100644
index c777e98a756..00000000000
--- a/drivers/staging/ath6kl/miscdrv/credit_dist.c
+++ /dev/null
@@ -1,417 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="credit_dist.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#define ATH_MODULE_NAME misc
-#include "a_debug.h"
-#include "htc_api.h"
-#include "common_drv.h"
-
-/********* CREDIT DISTRIBUTION FUNCTIONS ******************************************/
-
-#define NO_VO_SERVICE 1 /* currently WMI only uses 3 data streams, so we leave VO service inactive */
-#define CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS 1
-
-#ifdef NO_VO_SERVICE
-#define DATA_SVCS_USED 3
-#else
-#define DATA_SVCS_USED 4
-#endif
-
-static void RedistributeCredits(struct common_credit_state_info *pCredInfo,
- struct htc_endpoint_credit_dist *pEPDistList);
-
-static void SeekCredits(struct common_credit_state_info *pCredInfo,
- struct htc_endpoint_credit_dist *pEPDistList);
-
-/* reduce an ep's credits back to a set limit */
-static INLINE void ReduceCredits(struct common_credit_state_info *pCredInfo,
- struct htc_endpoint_credit_dist *pEpDist,
- int Limit)
-{
- int credits;
-
- /* set the new limit */
- pEpDist->TxCreditsAssigned = Limit;
-
- if (pEpDist->TxCredits <= Limit) {
- return;
- }
-
- /* figure out how much to take away */
- credits = pEpDist->TxCredits - Limit;
- /* take them away */
- pEpDist->TxCredits -= credits;
- pCredInfo->CurrentFreeCredits += credits;
-}
-
-/* give an endpoint some credits from the free credit pool */
-#define GiveCredits(pCredInfo,pEpDist,credits) \
-{ \
- (pEpDist)->TxCredits += (credits); \
- (pEpDist)->TxCreditsAssigned += (credits); \
- (pCredInfo)->CurrentFreeCredits -= (credits); \
-}
-
-
-/* default credit init callback.
- * This function is called in the context of HTCStart() to setup initial (application-specific)
- * credit distributions */
-static void ar6000_credit_init(void *Context,
- struct htc_endpoint_credit_dist *pEPList,
- int TotalCredits)
-{
- struct htc_endpoint_credit_dist *pCurEpDist;
- int count;
- struct common_credit_state_info *pCredInfo = (struct common_credit_state_info *)Context;
-
- pCredInfo->CurrentFreeCredits = TotalCredits;
- pCredInfo->TotalAvailableCredits = TotalCredits;
-
- pCurEpDist = pEPList;
-
- /* run through the list and initialize */
- while (pCurEpDist != NULL) {
-
- /* set minimums for each endpoint */
- pCurEpDist->TxCreditsMin = pCurEpDist->TxCreditsPerMaxMsg;
-
-#ifdef CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS
-
- if (TotalCredits > 4)
- {
- if ((pCurEpDist->ServiceID == WMI_DATA_BK_SVC) || (pCurEpDist->ServiceID == WMI_DATA_BE_SVC)){
- /* assign at least min credits to lower than VO priority services */
- GiveCredits(pCredInfo,pCurEpDist,pCurEpDist->TxCreditsMin);
- /* force active */
- SET_EP_ACTIVE(pCurEpDist);
- }
- }
-
-#endif
-
- if (pCurEpDist->ServiceID == WMI_CONTROL_SVC) {
- /* give control service some credits */
- GiveCredits(pCredInfo,pCurEpDist,pCurEpDist->TxCreditsMin);
- /* control service is always marked active, it never goes inactive EVER */
- SET_EP_ACTIVE(pCurEpDist);
- } else if (pCurEpDist->ServiceID == WMI_DATA_BK_SVC) {
- /* this is the lowest priority data endpoint, save this off for easy access */
- pCredInfo->pLowestPriEpDist = pCurEpDist;
- }
-
- /* Streams have to be created (explicit | implicit)for all kinds
- * of traffic. BE endpoints are also inactive in the beginning.
- * When BE traffic starts it creates implicit streams that
- * redistributes credits.
- */
-
- /* note, all other endpoints have minimums set but are initially given NO credits.
- * Credits will be distributed as traffic activity demands */
- pCurEpDist = pCurEpDist->pNext;
- }
-
- if (pCredInfo->CurrentFreeCredits <= 0) {
- AR_DEBUG_PRINTF(ATH_LOG_INF, ("Not enough credits (%d) to do credit distributions \n", TotalCredits));
- A_ASSERT(false);
- return;
- }
-
- /* reset list */
- pCurEpDist = pEPList;
- /* now run through the list and set max operating credit limits for everyone */
- while (pCurEpDist != NULL) {
- if (pCurEpDist->ServiceID == WMI_CONTROL_SVC) {
- /* control service max is just 1 max message */
- pCurEpDist->TxCreditsNorm = pCurEpDist->TxCreditsPerMaxMsg;
- } else {
- /* for the remaining data endpoints, we assume that each TxCreditsPerMaxMsg are
- * the same.
- * We use a simple calculation here, we take the remaining credits and
- * determine how many max messages this can cover and then set each endpoint's
- * normal value equal to 3/4 this amount.
- * */
- count = (pCredInfo->CurrentFreeCredits/pCurEpDist->TxCreditsPerMaxMsg) * pCurEpDist->TxCreditsPerMaxMsg;
- count = (count * 3) >> 2;
- count = max(count,pCurEpDist->TxCreditsPerMaxMsg);
- /* set normal */
- pCurEpDist->TxCreditsNorm = count;
-
- }
- pCurEpDist = pCurEpDist->pNext;
- }
-
-}
-
-
-/* default credit distribution callback
- * This callback is invoked whenever endpoints require credit distributions.
- * A lock is held while this function is invoked, this function shall NOT block.
- * The pEPDistList is a list of distribution structures in prioritized order as
- * defined by the call to the HTCSetCreditDistribution() api.
- *
- */
-static void ar6000_credit_distribute(void *Context,
- struct htc_endpoint_credit_dist *pEPDistList,
- HTC_CREDIT_DIST_REASON Reason)
-{
- struct htc_endpoint_credit_dist *pCurEpDist;
- struct common_credit_state_info *pCredInfo = (struct common_credit_state_info *)Context;
-
- switch (Reason) {
- case HTC_CREDIT_DIST_SEND_COMPLETE :
- pCurEpDist = pEPDistList;
- /* we are given the start of the endpoint distribution list.
- * There may be one or more endpoints to service.
- * Run through the list and distribute credits */
- while (pCurEpDist != NULL) {
-
- if (pCurEpDist->TxCreditsToDist > 0) {
- /* return the credits back to the endpoint */
- pCurEpDist->TxCredits += pCurEpDist->TxCreditsToDist;
- /* always zero out when we are done */
- pCurEpDist->TxCreditsToDist = 0;
-
- if (pCurEpDist->TxCredits > pCurEpDist->TxCreditsAssigned) {
- /* reduce to the assigned limit, previous credit reductions
- * could have caused the limit to change */
- ReduceCredits(pCredInfo, pCurEpDist, pCurEpDist->TxCreditsAssigned);
- }
-
- if (pCurEpDist->TxCredits > pCurEpDist->TxCreditsNorm) {
- /* oversubscribed endpoints need to reduce back to normal */
- ReduceCredits(pCredInfo, pCurEpDist, pCurEpDist->TxCreditsNorm);
- }
-
- if (!IS_EP_ACTIVE(pCurEpDist)) {
- /* endpoint is inactive, now check for messages waiting for credits */
- if (pCurEpDist->TxQueueDepth == 0) {
- /* EP is inactive and there are no pending messages,
- * reduce credits back to zero to recover credits */
- ReduceCredits(pCredInfo, pCurEpDist, 0);
- }
- }
- }
-
- pCurEpDist = pCurEpDist->pNext;
- }
-
- break;
-
- case HTC_CREDIT_DIST_ACTIVITY_CHANGE :
- RedistributeCredits(pCredInfo,pEPDistList);
- break;
- case HTC_CREDIT_DIST_SEEK_CREDITS :
- SeekCredits(pCredInfo,pEPDistList);
- break;
- case HTC_DUMP_CREDIT_STATE :
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Credit Distribution, total : %d, free : %d\n",
- pCredInfo->TotalAvailableCredits, pCredInfo->CurrentFreeCredits));
- break;
- default:
- break;
-
- }
-
- /* sanity checks done after each distribution action */
- A_ASSERT(pCredInfo->CurrentFreeCredits <= pCredInfo->TotalAvailableCredits);
- A_ASSERT(pCredInfo->CurrentFreeCredits >= 0);
-
-}
-
-/* redistribute credits based on activity change */
-static void RedistributeCredits(struct common_credit_state_info *pCredInfo,
- struct htc_endpoint_credit_dist *pEPDistList)
-{
- struct htc_endpoint_credit_dist *pCurEpDist = pEPDistList;
-
- /* walk through the list and remove credits from inactive endpoints */
- while (pCurEpDist != NULL) {
-
-#ifdef CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS
-
- if ((pCurEpDist->ServiceID == WMI_DATA_BK_SVC) || (pCurEpDist->ServiceID == WMI_DATA_BE_SVC)) {
- /* force low priority streams to always be active to retain their minimum credit distribution */
- SET_EP_ACTIVE(pCurEpDist);
- }
-#endif
-
- if (pCurEpDist->ServiceID != WMI_CONTROL_SVC) {
- if (!IS_EP_ACTIVE(pCurEpDist)) {
- if (pCurEpDist->TxQueueDepth == 0) {
- /* EP is inactive and there are no pending messages, reduce credits back to zero */
- ReduceCredits(pCredInfo, pCurEpDist, 0);
- } else {
- /* we cannot zero the credits assigned to this EP, but to keep
- * the credits available for these leftover packets, reduce to
- * a minimum */
- ReduceCredits(pCredInfo, pCurEpDist, pCurEpDist->TxCreditsMin);
- }
- }
- }
-
- /* NOTE in the active case, we do not need to do anything further,
- * when an EP goes active and needs credits, HTC will call into
- * our distribution function using a reason code of HTC_CREDIT_DIST_SEEK_CREDITS */
-
- pCurEpDist = pCurEpDist->pNext;
- }
-
-}
-
-/* HTC has an endpoint that needs credits, pEPDist is the endpoint in question */
-static void SeekCredits(struct common_credit_state_info *pCredInfo,
- struct htc_endpoint_credit_dist *pEPDist)
-{
- struct htc_endpoint_credit_dist *pCurEpDist;
- int credits = 0;
- int need;
-
- do {
-
- if (pEPDist->ServiceID == WMI_CONTROL_SVC) {
- /* we never oversubscribe on the control service, this is not
- * a high performance path and the target never holds onto control
- * credits for too long */
- break;
- }
-
-#ifdef CONFIG_GIVE_LOW_PRIORITY_STREAMS_MIN_CREDITS
- if (pEPDist->ServiceID == WMI_DATA_VI_SVC) {
- if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm)) {
- /* limit VI service from oversubscribing */
- break;
- }
- }
-
- if (pEPDist->ServiceID == WMI_DATA_VO_SVC) {
- if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm)) {
- /* limit VO service from oversubscribing */
- break;
- }
- }
-#else
- if (pEPDist->ServiceID == WMI_DATA_VI_SVC) {
- if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm) ||
- (pCredInfo->CurrentFreeCredits <= pEPDist->TxCreditsPerMaxMsg)) {
- /* limit VI service from oversubscribing */
- /* at least one free credit will not be used by VI */
- break;
- }
- }
-
- if (pEPDist->ServiceID == WMI_DATA_VO_SVC) {
- if ((pEPDist->TxCreditsAssigned >= pEPDist->TxCreditsNorm) ||
- (pCredInfo->CurrentFreeCredits <= pEPDist->TxCreditsPerMaxMsg)) {
- /* limit VO service from oversubscribing */
- /* at least one free credit will not be used by VO */
- break;
- }
- }
-#endif
-
- /* for all other services, we follow a simple algorithm of
- * 1. checking the free pool for credits
- * 2. checking lower priority endpoints for credits to take */
-
- /* give what we can */
- credits = min(pCredInfo->CurrentFreeCredits,pEPDist->TxCreditsSeek);
-
- if (credits >= pEPDist->TxCreditsSeek) {
- /* we found some to fulfill the seek request */
- break;
- }
-
- /* we don't have enough in the free pool, try taking away from lower priority services
- *
- * The rule for taking away credits:
- * 1. Only take from lower priority endpoints
- * 2. Only take what is allocated above the minimum (never starve an endpoint completely)
- * 3. Only take what you need.
- *
- * */
-
- /* starting at the lowest priority */
- pCurEpDist = pCredInfo->pLowestPriEpDist;
-
- /* work backwards until we hit the endpoint again */
- while (pCurEpDist != pEPDist) {
- /* calculate how many we need so far */
- need = pEPDist->TxCreditsSeek - pCredInfo->CurrentFreeCredits;
-
- if ((pCurEpDist->TxCreditsAssigned - need) >= pCurEpDist->TxCreditsMin) {
- /* the current one has been allocated more than it's minimum and it
- * has enough credits assigned above it's minimum to fulfill our need
- * try to take away just enough to fulfill our need */
- ReduceCredits(pCredInfo,
- pCurEpDist,
- pCurEpDist->TxCreditsAssigned - need);
-
- if (pCredInfo->CurrentFreeCredits >= pEPDist->TxCreditsSeek) {
- /* we have enough */
- break;
- }
- }
-
- pCurEpDist = pCurEpDist->pPrev;
- }
-
- /* return what we can get */
- credits = min(pCredInfo->CurrentFreeCredits,pEPDist->TxCreditsSeek);
-
- } while (false);
-
- /* did we find some credits? */
- if (credits) {
- /* give what we can */
- GiveCredits(pCredInfo, pEPDist, credits);
- }
-
-}
-
-/* initialize and setup credit distribution */
-int ar6000_setup_credit_dist(HTC_HANDLE HTCHandle, struct common_credit_state_info *pCredInfo)
-{
- HTC_SERVICE_ID servicepriority[5];
-
- A_MEMZERO(pCredInfo,sizeof(struct common_credit_state_info));
-
- servicepriority[0] = WMI_CONTROL_SVC; /* highest */
- servicepriority[1] = WMI_DATA_VO_SVC;
- servicepriority[2] = WMI_DATA_VI_SVC;
- servicepriority[3] = WMI_DATA_BE_SVC;
- servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
-
- /* set callbacks and priority list */
- HTCSetCreditDistribution(HTCHandle,
- pCredInfo,
- ar6000_credit_distribute,
- ar6000_credit_init,
- servicepriority,
- 5);
-
- return 0;
-}
-
diff --git a/drivers/staging/ath6kl/miscdrv/miscdrv.h b/drivers/staging/ath6kl/miscdrv/miscdrv.h
deleted file mode 100644
index 41be5670db4..00000000000
--- a/drivers/staging/ath6kl/miscdrv/miscdrv.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="miscdrv.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _MISCDRV_H
-#define _MISCDRV_H
-
-
-#define HOST_INTEREST_ITEM_ADDRESS(target, item) \
- AR6002_HOST_INTEREST_ITEM_ADDRESS(item)
-
-u32 ar6kRev2Array[][128] = {
- {0xFFFF, 0xFFFF}, // No Patches
- };
-
-#define CFG_REV2_ITEMS 0 // no patches so far
-#define AR6K_RESET_ADDR 0x4000
-#define AR6K_RESET_VAL 0x100
-
-#define EEPROM_SZ 768
-#define EEPROM_WAIT_LIMIT 4
-
-#endif
-
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
deleted file mode 100644
index 32ee39ad00d..00000000000
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ /dev/null
@@ -1,6267 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-/*
- * This driver is a pseudo ethernet driver to access the Atheros AR6000
- * WLAN Device
- */
-
-#include "ar6000_drv.h"
-#include "cfg80211.h"
-#include "htc.h"
-#include "wmi_filter_linux.h"
-#include "epping_test.h"
-#include "wlan_config.h"
-#include "ar3kconfig.h"
-#include "ar6k_pal.h"
-#include "AR6002/addrs.h"
-
-
-/* LINUX_HACK_FUDGE_FACTOR -- this is used to provide a workaround for linux behavior. When
- * the meta data was added to the header it was found that linux did not correctly provide
- * enough headroom. However when more headroom was requested beyond what was truly needed
- * Linux gave the requested headroom. Therefore to get the necessary headroom from Linux
- * the driver requests more than is needed by the amount = LINUX_HACK_FUDGE_FACTOR */
-#define LINUX_HACK_FUDGE_FACTOR 16
-#define BDATA_BDADDR_OFFSET 28
-
-u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-u8 null_mac[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
-
-#ifdef DEBUG
-
-#define ATH_DEBUG_DBG_LOG ATH_DEBUG_MAKE_MODULE_MASK(0)
-#define ATH_DEBUG_WLAN_CONNECT ATH_DEBUG_MAKE_MODULE_MASK(1)
-#define ATH_DEBUG_WLAN_SCAN ATH_DEBUG_MAKE_MODULE_MASK(2)
-#define ATH_DEBUG_WLAN_TX ATH_DEBUG_MAKE_MODULE_MASK(3)
-#define ATH_DEBUG_WLAN_RX ATH_DEBUG_MAKE_MODULE_MASK(4)
-#define ATH_DEBUG_HTC_RAW ATH_DEBUG_MAKE_MODULE_MASK(5)
-#define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6)
-
-static struct ath_debug_mask_description driver_debug_desc[] = {
- { ATH_DEBUG_DBG_LOG , "Target Debug Logs"},
- { ATH_DEBUG_WLAN_CONNECT , "WLAN connect"},
- { ATH_DEBUG_WLAN_SCAN , "WLAN scan"},
- { ATH_DEBUG_WLAN_TX , "WLAN Tx"},
- { ATH_DEBUG_WLAN_RX , "WLAN Rx"},
- { ATH_DEBUG_HTC_RAW , "HTC Raw IF tracing"},
- { ATH_DEBUG_HCI_BRIDGE , "HCI Bridge Setup"},
- { ATH_DEBUG_HCI_RECV , "HCI Recv tracing"},
- { ATH_DEBUG_HCI_DUMP , "HCI Packet dumps"},
-};
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(driver,
- "driver",
- "Linux Driver Interface",
- ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_WLAN_SCAN |
- ATH_DEBUG_HCI_BRIDGE,
- ATH_DEBUG_DESCRIPTION_COUNT(driver_debug_desc),
- driver_debug_desc);
-
-#endif
-
-
-#define IS_MAC_NULL(mac) (mac[0]==0 && mac[1]==0 && mac[2]==0 && mac[3]==0 && mac[4]==0 && mac[5]==0)
-#define IS_MAC_BCAST(mac) (*mac==0xff)
-
-#define DESCRIPTION "Driver to access the Atheros AR600x Device, version " __stringify(__VER_MAJOR_) "." __stringify(__VER_MINOR_) "." __stringify(__VER_PATCH_) "." __stringify(__BUILD_NUMBER_)
-
-MODULE_AUTHOR("Atheros Communications, Inc.");
-MODULE_DESCRIPTION(DESCRIPTION);
-MODULE_LICENSE("Dual BSD/GPL");
-
-#ifndef REORG_APTC_HEURISTICS
-#undef ADAPTIVE_POWER_THROUGHPUT_CONTROL
-#endif /* REORG_APTC_HEURISTICS */
-
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
-#define APTC_TRAFFIC_SAMPLING_INTERVAL 100 /* msec */
-#define APTC_UPPER_THROUGHPUT_THRESHOLD 3000 /* Kbps */
-#define APTC_LOWER_THROUGHPUT_THRESHOLD 2000 /* Kbps */
-
-typedef struct aptc_traffic_record {
- bool timerScheduled;
- struct timeval samplingTS;
- unsigned long bytesReceived;
- unsigned long bytesTransmitted;
-} APTC_TRAFFIC_RECORD;
-
-A_TIMER aptcTimer;
-APTC_TRAFFIC_RECORD aptcTR;
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-// callbacks registered by HCI transport driver
-struct hci_transport_callbacks ar6kHciTransCallbacks = { NULL };
-#endif
-
-unsigned int processDot11Hdr = 0;
-
-char ifname[IFNAMSIZ] = {0,};
-
-int wlaninitmode = WLAN_INIT_MODE_DEFAULT;
-static bool bypasswmi;
-unsigned int debuglevel = 0;
-int tspecCompliance = ATHEROS_COMPLIANCE;
-unsigned int busspeedlow = 0;
-unsigned int onebitmode = 0;
-unsigned int skipflash = 0;
-unsigned int wmitimeout = 2;
-unsigned int wlanNodeCaching = 1;
-unsigned int enableuartprint = ENABLEUARTPRINT_DEFAULT;
-unsigned int logWmiRawMsgs = 0;
-unsigned int enabletimerwar = 0;
-unsigned int num_device = 1;
-unsigned int regscanmode;
-unsigned int fwmode = 1;
-unsigned int mbox_yield_limit = 99;
-unsigned int enablerssicompensation = 0;
-int reduce_credit_dribble = 1 + HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_ONE_HALF;
-int allow_trace_signal = 0;
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-unsigned int testmode =0;
-#endif
-
-unsigned int irqprocmode = HIF_DEVICE_IRQ_SYNC_ONLY;//HIF_DEVICE_IRQ_ASYNC_SYNC;
-unsigned int panic_on_assert = 1;
-unsigned int nohifscattersupport = NOHIFSCATTERSUPPORT_DEFAULT;
-
-unsigned int setuphci = SETUPHCI_DEFAULT;
-unsigned int loghci = 0;
-unsigned int setupbtdev = SETUPBTDEV_DEFAULT;
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
-unsigned int ar3khcibaud = AR3KHCIBAUD_DEFAULT;
-unsigned int hciuartscale = HCIUARTSCALE_DEFAULT;
-unsigned int hciuartstep = HCIUARTSTEP_DEFAULT;
-#endif
-unsigned int csumOffload=0;
-unsigned int csumOffloadTest=0;
-unsigned int eppingtest=0;
-unsigned int mac_addr_method;
-unsigned int firmware_bridge;
-
-module_param_string(ifname, ifname, sizeof(ifname), 0644);
-module_param(wlaninitmode, int, 0644);
-module_param(bypasswmi, bool, 0644);
-module_param(debuglevel, uint, 0644);
-module_param(tspecCompliance, int, 0644);
-module_param(onebitmode, uint, 0644);
-module_param(busspeedlow, uint, 0644);
-module_param(skipflash, uint, 0644);
-module_param(wmitimeout, uint, 0644);
-module_param(wlanNodeCaching, uint, 0644);
-module_param(logWmiRawMsgs, uint, 0644);
-module_param(enableuartprint, uint, 0644);
-module_param(enabletimerwar, uint, 0644);
-module_param(fwmode, uint, 0644);
-module_param(mbox_yield_limit, uint, 0644);
-module_param(reduce_credit_dribble, int, 0644);
-module_param(allow_trace_signal, int, 0644);
-module_param(enablerssicompensation, uint, 0644);
-module_param(processDot11Hdr, uint, 0644);
-module_param(csumOffload, uint, 0644);
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-module_param(testmode, uint, 0644);
-#endif
-module_param(irqprocmode, uint, 0644);
-module_param(nohifscattersupport, uint, 0644);
-module_param(panic_on_assert, uint, 0644);
-module_param(setuphci, uint, 0644);
-module_param(loghci, uint, 0644);
-module_param(setupbtdev, uint, 0644);
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
-module_param(ar3khcibaud, uint, 0644);
-module_param(hciuartscale, uint, 0644);
-module_param(hciuartstep, uint, 0644);
-#endif
-module_param(eppingtest, uint, 0644);
-
-/* in 2.6.10 and later this is now a pointer to a uint */
-unsigned int _mboxnum = HTC_MAILBOX_NUM_MAX;
-#define mboxnum &_mboxnum
-
-#ifdef DEBUG
-u32 g_dbg_flags = DBG_DEFAULTS;
-unsigned int debugflags = 0;
-int debugdriver = 0;
-unsigned int debughtc = 0;
-unsigned int debugbmi = 0;
-unsigned int debughif = 0;
-unsigned int txcreditsavailable[HTC_MAILBOX_NUM_MAX] = {0};
-unsigned int txcreditsconsumed[HTC_MAILBOX_NUM_MAX] = {0};
-unsigned int txcreditintrenable[HTC_MAILBOX_NUM_MAX] = {0};
-unsigned int txcreditintrenableaggregate[HTC_MAILBOX_NUM_MAX] = {0};
-module_param(debugflags, uint, 0644);
-module_param(debugdriver, int, 0644);
-module_param(debughtc, uint, 0644);
-module_param(debugbmi, uint, 0644);
-module_param(debughif, uint, 0644);
-module_param_array(txcreditsavailable, uint, mboxnum, 0644);
-module_param_array(txcreditsconsumed, uint, mboxnum, 0644);
-module_param_array(txcreditintrenable, uint, mboxnum, 0644);
-module_param_array(txcreditintrenableaggregate, uint, mboxnum, 0644);
-
-#endif /* DEBUG */
-
-unsigned int resetok = 1;
-unsigned int tx_attempt[HTC_MAILBOX_NUM_MAX] = {0};
-unsigned int tx_post[HTC_MAILBOX_NUM_MAX] = {0};
-unsigned int tx_complete[HTC_MAILBOX_NUM_MAX] = {0};
-unsigned int hifBusRequestNumMax = 40;
-unsigned int war23838_disabled = 0;
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
-unsigned int enableAPTCHeuristics = 1;
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-module_param_array(tx_attempt, uint, mboxnum, 0644);
-module_param_array(tx_post, uint, mboxnum, 0644);
-module_param_array(tx_complete, uint, mboxnum, 0644);
-module_param(hifBusRequestNumMax, uint, 0644);
-module_param(war23838_disabled, uint, 0644);
-module_param(resetok, uint, 0644);
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
-module_param(enableAPTCHeuristics, uint, 0644);
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
-#ifdef BLOCK_TX_PATH_FLAG
-int blocktx = 0;
-module_param(blocktx, int, 0644);
-#endif /* BLOCK_TX_PATH_FLAG */
-
-typedef struct user_rssi_compensation_t {
- u16 customerID;
- union {
- u16 a_enable;
- u16 bg_enable;
- u16 enable;
- };
- s16 bg_param_a;
- s16 bg_param_b;
- s16 a_param_a;
- s16 a_param_b;
- u32 reserved;
-} USER_RSSI_CPENSATION;
-
-static USER_RSSI_CPENSATION rssi_compensation_param;
-
-static s16 rssi_compensation_table[96];
-
-int reconnect_flag = 0;
-static ar6k_pal_config_t ar6k_pal_config_g;
-
-/* Function declarations */
-static int ar6000_init_module(void);
-static void ar6000_cleanup_module(void);
-
-int ar6000_init(struct net_device *dev);
-static int ar6000_open(struct net_device *dev);
-static int ar6000_close(struct net_device *dev);
-static void ar6000_init_control_info(struct ar6_softc *ar);
-static int ar6000_data_tx(struct sk_buff *skb, struct net_device *dev);
-
-void ar6000_destroy(struct net_device *dev, unsigned int unregister);
-static void ar6000_detect_error(unsigned long ptr);
-static void ar6000_set_multicast_list(struct net_device *dev);
-static struct net_device_stats *ar6000_get_stats(struct net_device *dev);
-
-static void disconnect_timer_handler(unsigned long ptr);
-
-void read_rssi_compensation_param(struct ar6_softc *ar);
-
-/*
- * HTC service connection handlers
- */
-static int ar6000_avail_ev(void *context, void *hif_handle);
-
-static int ar6000_unavail_ev(void *context, void *hif_handle);
-
-int ar6000_configure_target(struct ar6_softc *ar);
-
-static void ar6000_target_failure(void *Instance, int Status);
-
-static void ar6000_rx(void *Context, struct htc_packet *pPacket);
-
-static void ar6000_rx_refill(void *Context,HTC_ENDPOINT_ID Endpoint);
-
-static void ar6000_tx_complete(void *Context, struct htc_packet_queue *pPackets);
-
-static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, struct htc_packet *pPacket);
-
-static void ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, u16 num);
-static void ar6000_deliver_frames_to_nw_stack(void * dev, void *osbuf);
-//static void ar6000_deliver_frames_to_bt_stack(void * dev, void *osbuf);
-
-static struct htc_packet *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length);
-
-static void ar6000_refill_amsdu_rxbufs(struct ar6_softc *ar, int Count);
-
-static void ar6000_cleanup_amsdu_rxbufs(struct ar6_softc *ar);
-
-static ssize_t
-ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count);
-
-static ssize_t
-ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count);
-
-static int
-ar6000_sysfs_bmi_init(struct ar6_softc *ar);
-
-void ar6k_cleanup_hci_pal(struct ar6_softc *ar);
-
-static void
-ar6000_sysfs_bmi_deinit(struct ar6_softc *ar);
-
-int
-ar6000_sysfs_bmi_get_config(struct ar6_softc *ar, u32 mode);
-
-/*
- * Static variables
- */
-
-struct net_device *ar6000_devices[MAX_AR6000];
-static int is_netdev_registered;
-DECLARE_WAIT_QUEUE_HEAD(arEvent);
-static void ar6000_cookie_init(struct ar6_softc *ar);
-static void ar6000_cookie_cleanup(struct ar6_softc *ar);
-static void ar6000_free_cookie(struct ar6_softc *ar, struct ar_cookie * cookie);
-static struct ar_cookie *ar6000_alloc_cookie(struct ar6_softc *ar);
-
-static int ar6000_reinstall_keys(struct ar6_softc *ar,u8 key_op_ctrl);
-
-#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
-struct net_device *arApNetDev;
-#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
-
-static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
-
-#define HOST_INTEREST_ITEM_ADDRESS(ar, item) \
- (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \
- (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
-
-
-static struct net_device_ops ar6000_netdev_ops = {
- .ndo_init = NULL,
- .ndo_open = ar6000_open,
- .ndo_stop = ar6000_close,
- .ndo_get_stats = ar6000_get_stats,
- .ndo_start_xmit = ar6000_data_tx,
- .ndo_set_multicast_list = ar6000_set_multicast_list,
-};
-
-/* Debug log support */
-
-/*
- * Flag to govern whether the debug logs should be parsed in the kernel
- * or reported to the application.
- */
-#define REPORT_DEBUG_LOGS_TO_APP
-
-int
-ar6000_set_host_app_area(struct ar6_softc *ar)
-{
- u32 address, data;
- struct host_app_area_s host_app_area;
-
- /* Fetch the address of the host_app_area_s instance in the host interest area */
- address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest));
- if (ar6000_ReadRegDiag(ar->arHifDevice, &address, &data) != 0) {
- return A_ERROR;
- }
- address = TARG_VTOP(ar->arTargetType, data);
- host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
- if (ar6000_WriteDataDiag(ar->arHifDevice, address,
- (u8 *)&host_app_area,
- sizeof(struct host_app_area_s)) != 0)
- {
- return A_ERROR;
- }
-
- return 0;
-}
-
-u32 dbglog_get_debug_hdr_ptr(struct ar6_softc *ar)
-{
- u32 param;
- u32 address;
- int status;
-
- address = TARG_VTOP(ar->arTargetType, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbglog_hdr));
- if ((status = ar6000_ReadDataDiag(ar->arHifDevice, address,
- (u8 *)&param, 4)) != 0)
- {
- param = 0;
- }
-
- return param;
-}
-
-/*
- * The dbglog module has been initialized. Its ok to access the relevant
- * data stuctures over the diagnostic window.
- */
-void
-ar6000_dbglog_init_done(struct ar6_softc *ar)
-{
- ar->dbglog_init_done = true;
-}
-
-u32 dbglog_get_debug_fragment(s8 *datap, u32 len, u32 limit)
-{
- s32 *buffer;
- u32 count;
- u32 numargs;
- u32 length;
- u32 fraglen;
-
- count = fraglen = 0;
- buffer = (s32 *)datap;
- length = (limit >> 2);
-
- if (len <= limit) {
- fraglen = len;
- } else {
- while (count < length) {
- numargs = DBGLOG_GET_NUMARGS(buffer[count]);
- fraglen = (count << 2);
- count += numargs + 1;
- }
- }
-
- return fraglen;
-}
-
-void
-dbglog_parse_debug_logs(s8 *datap, u32 len)
-{
- s32 *buffer;
- u32 count;
- u32 timestamp;
- u32 debugid;
- u32 moduleid;
- u32 numargs;
- u32 length;
-
- count = 0;
- buffer = (s32 *)datap;
- length = (len >> 2);
- while (count < length) {
- debugid = DBGLOG_GET_DBGID(buffer[count]);
- moduleid = DBGLOG_GET_MODULEID(buffer[count]);
- numargs = DBGLOG_GET_NUMARGS(buffer[count]);
- timestamp = DBGLOG_GET_TIMESTAMP(buffer[count]);
- switch (numargs) {
- case 0:
- AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d)\n", moduleid, debugid, timestamp));
- break;
-
- case 1:
- AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x\n", moduleid, debugid,
- timestamp, buffer[count+1]));
- break;
-
- case 2:
- AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("%d %d (%d): 0x%x, 0x%x\n", moduleid, debugid,
- timestamp, buffer[count+1], buffer[count+2]));
- break;
-
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Invalid args: %d\n", numargs));
- }
- count += numargs + 1;
- }
-}
-
-int
-ar6000_dbglog_get_debug_logs(struct ar6_softc *ar)
-{
- u32 data[8]; /* Should be able to accommodate struct dbglog_buf_s */
- u32 address;
- u32 length;
- u32 dropped;
- u32 firstbuf;
- u32 debug_hdr_ptr;
-
- if (!ar->dbglog_init_done) return A_ERROR;
-
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- if (ar->dbgLogFetchInProgress) {
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- return A_EBUSY;
- }
-
- /* block out others */
- ar->dbgLogFetchInProgress = true;
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- debug_hdr_ptr = dbglog_get_debug_hdr_ptr(ar);
- printk("debug_hdr_ptr: 0x%x\n", debug_hdr_ptr);
-
- /* Get the contents of the ring buffer */
- if (debug_hdr_ptr) {
- address = TARG_VTOP(ar->arTargetType, debug_hdr_ptr);
- length = 4 /* sizeof(dbuf) */ + 4 /* sizeof(dropped) */;
- A_MEMZERO(data, sizeof(data));
- ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)data, length);
- address = TARG_VTOP(ar->arTargetType, data[0] /* dbuf */);
- firstbuf = address;
- dropped = data[1]; /* dropped */
- length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */;
- A_MEMZERO(data, sizeof(data));
- ar6000_ReadDataDiag(ar->arHifDevice, address, (u8 *)&data, length);
-
- do {
- address = TARG_VTOP(ar->arTargetType, data[1] /* buffer*/);
- length = data[3]; /* length */
- if ((length) && (length <= data[2] /* bufsize*/)) {
- /* Rewind the index if it is about to overrun the buffer */
- if (ar->log_cnt > (DBGLOG_HOST_LOG_BUFFER_SIZE - length)) {
- ar->log_cnt = 0;
- }
- if(0 != ar6000_ReadDataDiag(ar->arHifDevice, address,
- (u8 *)&ar->log_buffer[ar->log_cnt], length))
- {
- break;
- }
- ar6000_dbglog_event(ar, dropped, (s8 *)&ar->log_buffer[ar->log_cnt], length);
- ar->log_cnt += length;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_DBG_LOG,("Length: %d (Total size: %d)\n",
- data[3], data[2]));
- }
-
- address = TARG_VTOP(ar->arTargetType, data[0] /* next */);
- length = 4 /* sizeof(next) */ + 4 /* sizeof(buffer) */ + 4 /* sizeof(bufsize) */ + 4 /* sizeof(length) */ + 4 /* sizeof(count) */ + 4 /* sizeof(free) */;
- A_MEMZERO(data, sizeof(data));
- if(0 != ar6000_ReadDataDiag(ar->arHifDevice, address,
- (u8 *)&data, length))
- {
- break;
- }
-
- } while (address != firstbuf);
- }
-
- ar->dbgLogFetchInProgress = false;
-
- return 0;
-}
-
-void
-ar6000_dbglog_event(struct ar6_softc *ar, u32 dropped,
- s8 *buffer, u32 length)
-{
-#ifdef REPORT_DEBUG_LOGS_TO_APP
- #define MAX_WIRELESS_EVENT_SIZE 252
- /*
- * Break it up into chunks of MAX_WIRELESS_EVENT_SIZE bytes of messages.
- * There seems to be a limitation on the length of message that could be
- * transmitted to the user app via this mechanism.
- */
- u32 send, sent;
-
- sent = 0;
- send = dbglog_get_debug_fragment(&buffer[sent], length - sent,
- MAX_WIRELESS_EVENT_SIZE);
- while (send) {
- sent += send;
- send = dbglog_get_debug_fragment(&buffer[sent], length - sent,
- MAX_WIRELESS_EVENT_SIZE);
- }
-#else
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Dropped logs: 0x%x\nDebug info length: %d\n",
- dropped, length));
-
- /* Interpret the debug logs */
- dbglog_parse_debug_logs((s8 *)buffer, length);
-#endif /* REPORT_DEBUG_LOGS_TO_APP */
-}
-
-
-static int __init
-ar6000_init_module(void)
-{
- static int probed = 0;
- int r;
- OSDRV_CALLBACKS osdrvCallbacks;
-
- a_module_debug_support_init();
-
-#ifdef DEBUG
- /* check for debug mask overrides */
- if (debughtc != 0) {
- ATH_DEBUG_SET_DEBUG_MASK(htc,debughtc);
- }
- if (debugbmi != 0) {
- ATH_DEBUG_SET_DEBUG_MASK(bmi,debugbmi);
- }
- if (debughif != 0) {
- ATH_DEBUG_SET_DEBUG_MASK(hif,debughif);
- }
- if (debugdriver != 0) {
- ATH_DEBUG_SET_DEBUG_MASK(driver,debugdriver);
- }
-
-#endif
-
- A_REGISTER_MODULE_DEBUG_INFO(driver);
-
- A_MEMZERO(&osdrvCallbacks,sizeof(osdrvCallbacks));
- osdrvCallbacks.deviceInsertedHandler = ar6000_avail_ev;
- osdrvCallbacks.deviceRemovedHandler = ar6000_unavail_ev;
-#ifdef CONFIG_PM
- osdrvCallbacks.deviceSuspendHandler = ar6000_suspend_ev;
- osdrvCallbacks.deviceResumeHandler = ar6000_resume_ev;
- osdrvCallbacks.devicePowerChangeHandler = ar6000_power_change_ev;
-#endif
-
-#ifdef DEBUG
- /* Set the debug flags if specified at load time */
- if(debugflags != 0)
- {
- g_dbg_flags = debugflags;
- }
-#endif
-
- if (probed) {
- return -ENODEV;
- }
- probed++;
-
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
- memset(&aptcTR, 0, sizeof(APTC_TRAFFIC_RECORD));
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
- r = HIFInit(&osdrvCallbacks);
- if (r)
- return r;
-
- return 0;
-}
-
-static void __exit
-ar6000_cleanup_module(void)
-{
- int i = 0;
- struct net_device *ar6000_netdev;
-
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
- /* Delete the Adaptive Power Control timer */
- if (timer_pending(&aptcTimer)) {
- del_timer_sync(&aptcTimer);
- }
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
- for (i=0; i < MAX_AR6000; i++) {
- if (ar6000_devices[i] != NULL) {
- ar6000_netdev = ar6000_devices[i];
- ar6000_devices[i] = NULL;
- ar6000_destroy(ar6000_netdev, 1);
- }
- }
-
- HIFShutDownDevice(NULL);
-
- a_module_debug_support_cleanup();
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_cleanup: success\n"));
-}
-
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
-void
-aptcTimerHandler(unsigned long arg)
-{
- u32 numbytes;
- u32 throughput;
- struct ar6_softc *ar;
- int status;
-
- ar = (struct ar6_softc *)arg;
- A_ASSERT(ar != NULL);
- A_ASSERT(!timer_pending(&aptcTimer));
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- /* Get the number of bytes transferred */
- numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived;
- aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0;
-
- /* Calculate and decide based on throughput thresholds */
- throughput = ((numbytes * 8)/APTC_TRAFFIC_SAMPLING_INTERVAL); /* Kbps */
- if (throughput < APTC_LOWER_THROUGHPUT_THRESHOLD) {
- /* Enable Sleep and delete the timer */
- A_ASSERT(ar->arWmiReady == true);
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- status = wmi_powermode_cmd(ar->arWmi, REC_POWER);
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- A_ASSERT(status == 0);
- aptcTR.timerScheduled = false;
- } else {
- A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0);
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-}
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
-static void
-ar6000_alloc_netbufs(A_NETBUF_QUEUE_T *q, u16 num)
-{
- void * osbuf;
-
- while(num) {
- if((osbuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE))) {
- A_NETBUF_ENQUEUE(q, osbuf);
- } else {
- break;
- }
- num--;
- }
-
- if(num) {
- A_PRINTF("%s(), allocation of netbuf failed", __func__);
- }
-}
-
-static struct bin_attribute bmi_attr = {
- .attr = {.name = "bmi", .mode = 0600},
- .read = ar6000_sysfs_bmi_read,
- .write = ar6000_sysfs_bmi_write,
-};
-
-static ssize_t
-ar6000_sysfs_bmi_read(struct file *fp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- int index;
- struct ar6_softc *ar;
- struct hif_device_os_device_info *osDevInfo;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Read %d bytes\n", (u32)count));
- for (index=0; index < MAX_AR6000; index++) {
- ar = (struct ar6_softc *)ar6k_priv(ar6000_devices[index]);
- osDevInfo = &ar->osDevInfo;
- if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) {
- break;
- }
- }
-
- if (index == MAX_AR6000) return 0;
-
- if ((BMIRawRead(ar->arHifDevice, (u8*)buf, count, true)) != 0) {
- return 0;
- }
-
- return count;
-}
-
-static ssize_t
-ar6000_sysfs_bmi_write(struct file *fp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- int index;
- struct ar6_softc *ar;
- struct hif_device_os_device_info *osDevInfo;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Write %d bytes\n", (u32)count));
- for (index=0; index < MAX_AR6000; index++) {
- ar = (struct ar6_softc *)ar6k_priv(ar6000_devices[index]);
- osDevInfo = &ar->osDevInfo;
- if (kobj == (&(((struct device *)osDevInfo->pOSDevice)->kobj))) {
- break;
- }
- }
-
- if (index == MAX_AR6000) return 0;
-
- if ((BMIRawWrite(ar->arHifDevice, (u8*)buf, count)) != 0) {
- return 0;
- }
-
- return count;
-}
-
-static int
-ar6000_sysfs_bmi_init(struct ar6_softc *ar)
-{
- int status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Creating sysfs entry\n"));
- A_MEMZERO(&ar->osDevInfo, sizeof(struct hif_device_os_device_info));
-
- /* Get the underlying OS device */
- status = HIFConfigureDevice(ar->arHifDevice,
- HIF_DEVICE_GET_OS_DEVICE,
- &ar->osDevInfo,
- sizeof(struct hif_device_os_device_info));
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failed to get OS device info from HIF\n"));
- return A_ERROR;
- }
-
- /* Create a bmi entry in the sysfs filesystem */
- if ((sysfs_create_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr)) < 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to create entry for bmi in sysfs filesystem\n"));
- return A_ERROR;
- }
-
- return 0;
-}
-
-static void
-ar6000_sysfs_bmi_deinit(struct ar6_softc *ar)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Deleting sysfs entry\n"));
-
- sysfs_remove_bin_file(&(((struct device *)ar->osDevInfo.pOSDevice)->kobj), &bmi_attr);
-}
-
-#define bmifn(fn) do { \
- if ((fn) < 0) { \
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); \
- return A_ERROR; \
- } \
-} while(0)
-
-#ifdef SOFTMAC_FILE_USED
-#define AR6002_MAC_ADDRESS_OFFSET 0x0A
-#define AR6003_MAC_ADDRESS_OFFSET 0x16
-static
-void calculate_crc(u32 TargetType, u8 *eeprom_data)
-{
- u16 *ptr_crc;
- u16 *ptr16_eeprom;
- u16 checksum;
- u32 i;
- u32 eeprom_size;
-
- if (TargetType == TARGET_TYPE_AR6001)
- {
- eeprom_size = 512;
- ptr_crc = (u16 *)eeprom_data;
- }
- else if (TargetType == TARGET_TYPE_AR6003)
- {
- eeprom_size = 1024;
- ptr_crc = (u16 *)((u8 *)eeprom_data + 0x04);
- }
- else
- {
- eeprom_size = 768;
- ptr_crc = (u16 *)((u8 *)eeprom_data + 0x04);
- }
-
-
- // Clear the crc
- *ptr_crc = 0;
-
- // Recalculate new CRC
- checksum = 0;
- ptr16_eeprom = (u16 *)eeprom_data;
- for (i = 0;i < eeprom_size; i += 2)
- {
- checksum = checksum ^ (*ptr16_eeprom);
- ptr16_eeprom++;
- }
- checksum = 0xFFFF ^ checksum;
- *ptr_crc = checksum;
-}
-
-static void
-ar6000_softmac_update(struct ar6_softc *ar, u8 *eeprom_data, size_t size)
-{
- const char *source = "random generated";
- const struct firmware *softmac_entry;
- u8 *ptr_mac;
- switch (ar->arTargetType) {
- case TARGET_TYPE_AR6002:
- ptr_mac = (u8 *)((u8 *)eeprom_data + AR6002_MAC_ADDRESS_OFFSET);
- break;
- case TARGET_TYPE_AR6003:
- ptr_mac = (u8 *)((u8 *)eeprom_data + AR6003_MAC_ADDRESS_OFFSET);
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Target Type\n"));
- return;
- }
- printk(KERN_DEBUG "MAC from EEPROM %pM\n", ptr_mac);
-
- /* create a random MAC in case we cannot read file from system */
- ptr_mac[0] = 0;
- ptr_mac[1] = 0x03;
- ptr_mac[2] = 0x7F;
- ptr_mac[3] = random32() & 0xff;
- ptr_mac[4] = random32() & 0xff;
- ptr_mac[5] = random32() & 0xff;
- if ((A_REQUEST_FIRMWARE(&softmac_entry, "softmac", ((struct device *)ar->osDevInfo.pOSDevice))) == 0)
- {
- char *macbuf = A_MALLOC_NOWAIT(softmac_entry->size+1);
- if (macbuf) {
- unsigned int softmac[6];
- memcpy(macbuf, softmac_entry->data, softmac_entry->size);
- macbuf[softmac_entry->size] = '\0';
- if (sscanf(macbuf, "%02x:%02x:%02x:%02x:%02x:%02x",
- &softmac[0], &softmac[1], &softmac[2],
- &softmac[3], &softmac[4], &softmac[5])==6) {
- int i;
- for (i=0; i<6; ++i) {
- ptr_mac[i] = softmac[i] & 0xff;
- }
- source = "softmac file";
- }
- kfree(macbuf);
- }
- A_RELEASE_FIRMWARE(softmac_entry);
- }
- printk(KERN_DEBUG "MAC from %s %pM\n", source, ptr_mac);
- calculate_crc(ar->arTargetType, eeprom_data);
-}
-#endif /* SOFTMAC_FILE_USED */
-
-static int
-ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address, bool compressed)
-{
- int status;
- const char *filename;
- const struct firmware *fw_entry;
- u32 fw_entry_size;
- u8 **buf;
- size_t *buf_len;
-
- switch (file) {
- case AR6K_OTP_FILE:
- buf = &ar->fw_otp;
- buf_len = &ar->fw_otp_len;
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_OTP_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_OTP_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) {
- filename = AR6003_REV3_OTP_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
- return A_ERROR;
- }
- break;
-
- case AR6K_FIRMWARE_FILE:
- buf = &ar->fw;
- buf_len = &ar->fw_len;
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) {
- filename = AR6003_REV3_FIRMWARE_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
- return A_ERROR;
- }
-
- if (eppingtest) {
- bypasswmi = true;
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_EPPING_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_EPPING_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) {
- filename = AR6003_REV3_EPPING_FIRMWARE_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("eppingtest : unsupported firmware revision: %d\n",
- ar->arVersion.target_ver));
- return A_ERROR;
- }
- compressed = false;
- }
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
- if(testmode) {
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_TCMD_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) {
- filename = AR6003_REV3_TCMD_FIRMWARE_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
- return A_ERROR;
- }
- compressed = false;
- }
-#endif
-#ifdef HTC_RAW_INTERFACE
- if (!eppingtest && bypasswmi) {
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_ART_FIRMWARE_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_ART_FIRMWARE_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
- return A_ERROR;
- }
- compressed = false;
- }
-#endif
- break;
-
- case AR6K_PATCH_FILE:
- buf = &ar->fw_patch;
- buf_len = &ar->fw_patch_len;
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_PATCH_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_PATCH_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) {
- filename = AR6003_REV3_PATCH_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
- return A_ERROR;
- }
- break;
-
- case AR6K_BOARD_DATA_FILE:
- buf = &ar->fw_data;
- buf_len = &ar->fw_data_len;
- if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
- filename = AR6003_REV1_BOARD_DATA_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- filename = AR6003_REV2_BOARD_DATA_FILE;
- } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) {
- filename = AR6003_REV3_BOARD_DATA_FILE;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver));
- return A_ERROR;
- }
- break;
-
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown file type: %d\n", file));
- return A_ERROR;
- }
-
- if (*buf == NULL) {
- if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename));
- return A_ENOENT;
- }
-
- *buf = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- *buf_len = fw_entry->size;
- A_RELEASE_FIRMWARE(fw_entry);
- }
-
-#ifdef SOFTMAC_FILE_USED
- if (file==AR6K_BOARD_DATA_FILE && *buf_len) {
- ar6000_softmac_update(ar, *buf, *buf_len);
- }
-#endif
-
-
- fw_entry_size = *buf_len;
-
- /* Load extended board data for AR6003 */
- if ((file==AR6K_BOARD_DATA_FILE) && *buf) {
- u32 board_ext_address;
- u32 board_ext_data_size;
- u32 board_data_size;
-
- board_ext_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_EXT_DATA_SZ : \
- (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_EXT_DATA_SZ : 0));
-
- board_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_DATA_SZ : \
- (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_DATA_SZ : 0));
-
- /* Determine where in Target RAM to write Board Data */
- bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (u8 *)&board_ext_address, 4));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board extended Data download address: 0x%x\n", board_ext_address));
-
- /* check whether the target has allocated memory for extended board data and file contains extended board data */
- if ((board_ext_address) && (*buf_len == (board_data_size + board_ext_data_size))) {
- u32 param;
-
- status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (u8 *)(*buf + board_data_size), board_ext_data_size);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
- return A_ERROR;
- }
-
- /* Record the fact that extended board Data IS initialized */
- param = (board_ext_data_size << 16) | 1;
- bmifn(BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data_config),
- (unsigned char *)&param, 4));
- }
- fw_entry_size = board_data_size;
- }
-
- if (compressed) {
- status = BMIFastDownload(ar->arHifDevice, address, *buf, fw_entry_size);
- } else {
- status = BMIWriteMemory(ar->arHifDevice, address, *buf, fw_entry_size);
- }
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
- return A_ERROR;
- }
-
- return 0;
-}
-
-int
-ar6000_update_bdaddr(struct ar6_softc *ar)
-{
-
- if (setupbtdev != 0) {
- u32 address;
-
- if (BMIReadMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (u8 *)&address, 4) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for hi_board_data failed\n"));
- return A_ERROR;
- }
-
- if (BMIReadMemory(ar->arHifDevice, address + BDATA_BDADDR_OFFSET, (u8 *)ar->bdaddr, 6) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for BD address failed\n"));
- return A_ERROR;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BDADDR 0x%x:0x%x:0x%x:0x%x:0x%x:0x%x\n", ar->bdaddr[0],
- ar->bdaddr[1], ar->bdaddr[2], ar->bdaddr[3],
- ar->bdaddr[4], ar->bdaddr[5]));
- }
-
-return 0;
-}
-
-int
-ar6000_sysfs_bmi_get_config(struct ar6_softc *ar, u32 mode)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("BMI: Requesting device specific configuration\n"));
-
- if (mode == WLAN_INIT_MODE_UDEV) {
- char version[16];
- const struct firmware *fw_entry;
-
- /* Get config using udev through a script in user space */
- sprintf(version, "%2.2x", ar->arVersion.target_ver);
- if ((A_REQUEST_FIRMWARE(&fw_entry, version, ((struct device *)ar->osDevInfo.pOSDevice))) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI: Failure to get configuration for target version: %s\n", version));
- return A_ERROR;
- }
-
- A_RELEASE_FIRMWARE(fw_entry);
- } else {
- /* The config is contained within the driver itself */
- int status;
- u32 param, options, sleep, address;
-
- /* Temporarily disable system sleep */
- address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
- bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
- options = param;
- param |= AR6K_OPTION_SLEEP_DISABLE;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
-
- address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
- bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
- sleep = param;
- param |= WLAN_SYSTEM_SLEEP_DISABLE_SET(1);
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("old options: %d, old sleep: %d\n", options, sleep));
-
- if (ar->arTargetType == TARGET_TYPE_AR6003) {
- /* Program analog PLL register */
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, ANALOG_INTF_BASE_ADDRESS + 0x284, 0xF9104001));
- /* Run at 80/88MHz by default */
- param = CPU_CLOCK_STANDARD_SET(1);
- } else {
- /* Run at 40/44MHz by default */
- param = CPU_CLOCK_STANDARD_SET(0);
- }
- address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
-
- param = 0;
- if (ar->arTargetType == TARGET_TYPE_AR6002) {
- bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (u8 *)&param, 4));
- }
-
- /* LPO_CAL.ENABLE = 1 if no external clk is detected */
- if (param != 1) {
- address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
- param = LPO_CAL_ENABLE_SET(1);
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
- }
-
- /* Venus2.0: Lower SDIO pad drive strength,
- * temporary WAR to avoid SDIO CRC error */
- if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("AR6K: Temporary WAR to avoid SDIO CRC error\n"));
- param = 0x20;
- address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
-
- address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
-
- address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
-
- address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
- }
-
-#ifdef FORCE_INTERNAL_CLOCK
- /* Ignore external clock, if any, and force use of internal clock */
- if (ar->arTargetType == TARGET_TYPE_AR6003) {
- /* hi_ext_clk_detected = 0 */
- param = 0;
- bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_ext_clk_detected), (u8 *)&param, 4));
-
- /* CLOCK_CONTROL &= ~LF_CLK32 */
- address = RTC_BASE_ADDRESS + CLOCK_CONTROL_ADDRESS;
- bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
- param &= (~CLOCK_CONTROL_LF_CLK32_SET(1));
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
- }
-#endif /* FORCE_INTERNAL_CLOCK */
-
- /* Transfer Board Data from Target EEPROM to Target RAM */
- if (ar->arTargetType == TARGET_TYPE_AR6003) {
- /* Determine where in Target RAM to write Board Data */
- bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data), (u8 *)&address, 4));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board Data download address: 0x%x\n", address));
-
- /* Write EEPROM data to Target RAM */
- if ((ar6000_transfer_bin_file(ar, AR6K_BOARD_DATA_FILE, address, false)) != 0) {
- return A_ERROR;
- }
-
- /* Record the fact that Board Data IS initialized */
- param = 1;
- bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_data_initialized), (u8 *)&param, 4));
-
- /* Transfer One time Programmable data */
- AR6K_APP_LOAD_ADDRESS(address, ar->arVersion.target_ver);
- if (ar->arVersion.target_ver == AR6003_REV3_VERSION)
- address = 0x1234;
- status = ar6000_transfer_bin_file(ar, AR6K_OTP_FILE, address, true);
- if (status == 0) {
- /* Execute the OTP code */
- param = 0;
- AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver);
- bmifn(BMIExecute(ar->arHifDevice, address, &param));
- } else if (status != A_ENOENT) {
- return A_ERROR;
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Programming of board data for chip %d not supported\n", ar->arTargetType));
- return A_ERROR;
- }
-
- /* Download Target firmware */
- AR6K_APP_LOAD_ADDRESS(address, ar->arVersion.target_ver);
- if (ar->arVersion.target_ver == AR6003_REV3_VERSION)
- address = 0x1234;
- if ((ar6000_transfer_bin_file(ar, AR6K_FIRMWARE_FILE, address, true)) != 0) {
- return A_ERROR;
- }
-
- /* Set starting address for firmware */
- AR6K_APP_START_OVERRIDE_ADDRESS(address, ar->arVersion.target_ver);
- bmifn(BMISetAppStart(ar->arHifDevice, address));
-
- if(ar->arTargetType == TARGET_TYPE_AR6003) {
- AR6K_DATASET_PATCH_ADDRESS(address, ar->arVersion.target_ver);
- if ((ar6000_transfer_bin_file(ar, AR6K_PATCH_FILE,
- address, false)) != 0)
- return A_ERROR;
- param = address;
- bmifn(BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_dset_list_head),
- (unsigned char *)&param, 4));
- }
-
- /* Restore system sleep */
- address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, sleep));
-
- address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
- param = options | 0x20;
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
-
- if (ar->arTargetType == TARGET_TYPE_AR6003) {
- /* Configure GPIO AR6003 UART */
-#ifndef CONFIG_AR600x_DEBUG_UART_TX_PIN
-#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
-#endif
- param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
- bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_dbg_uart_txpin), (u8 *)&param, 4));
-
-#if (CONFIG_AR600x_DEBUG_UART_TX_PIN == 23)
- {
- address = GPIO_BASE_ADDRESS + CLOCK_GPIO_ADDRESS;
- bmifn(BMIReadSOCRegister(ar->arHifDevice, address, &param));
- param |= CLOCK_GPIO_BT_CLK_OUT_EN_SET(1);
- bmifn(BMIWriteSOCRegister(ar->arHifDevice, address, param));
- }
-#endif
-
- /* Configure GPIO for BT Reset */
-#ifdef ATH6KL_CONFIG_GPIO_BT_RESET
-#define CONFIG_AR600x_BT_RESET_PIN 0x16
- param = CONFIG_AR600x_BT_RESET_PIN;
- bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_support_pins), (u8 *)&param, 4));
-#endif /* ATH6KL_CONFIG_GPIO_BT_RESET */
-
- /* Configure UART flow control polarity */
-#ifndef CONFIG_ATH6KL_BT_UART_FC_POLARITY
-#define CONFIG_ATH6KL_BT_UART_FC_POLARITY 0
-#endif
-
-#if (CONFIG_ATH6KL_BT_UART_FC_POLARITY == 1)
- if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- param = ((CONFIG_ATH6KL_BT_UART_FC_POLARITY << 1) & 0x2);
- bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_hci_uart_pwr_mgmt_params), (u8 *)&param, 4));
- }
-#endif /* CONFIG_ATH6KL_BT_UART_FC_POLARITY */
- }
-
-#ifdef HTC_RAW_INTERFACE
- if (!eppingtest && bypasswmi) {
- /* Don't run BMIDone for ART mode and force resetok=0 */
- resetok = 0;
- msleep(1000);
- }
-#endif /* HTC_RAW_INTERFACE */
- }
-
- return 0;
-}
-
-int
-ar6000_configure_target(struct ar6_softc *ar)
-{
- u32 param;
- if (enableuartprint) {
- param = 1;
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_serial_enable),
- (u8 *)&param,
- 4)!= 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enableuartprint failed \n"));
- return A_ERROR;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Serial console prints enabled\n"));
- }
-
- /* Tell target which HTC version it is used*/
- param = HTC_PROTOCOL_VERSION;
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest),
- (u8 *)&param,
- 4)!= 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for htc version failed \n"));
- return A_ERROR;
- }
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
- if(testmode) {
- ar->arTargetMode = AR6000_TCMD_MODE;
- }else {
- ar->arTargetMode = AR6000_WLAN_MODE;
- }
-#endif
- if (enabletimerwar) {
- u32 param;
-
- if (BMIReadMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
- (u8 *)&param,
- 4)!= 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for enabletimerwar failed \n"));
- return A_ERROR;
- }
-
- param |= HI_OPTION_TIMER_WAR;
-
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
- (u8 *)&param,
- 4) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enabletimerwar failed \n"));
- return A_ERROR;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Timer WAR enabled\n"));
- }
-
- /* set the firmware mode to STA/IBSS/AP */
- {
- u32 param;
-
- if (BMIReadMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
- (u8 *)&param,
- 4)!= 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for setting fwmode failed \n"));
- return A_ERROR;
- }
-
- param |= (num_device << HI_OPTION_NUM_DEV_SHIFT);
- param |= (fwmode << HI_OPTION_FW_MODE_SHIFT);
- param |= (mac_addr_method << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
- param |= (firmware_bridge << HI_OPTION_FW_BRIDGE_SHIFT);
-
-
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
- (u8 *)&param,
- 4) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for setting fwmode failed \n"));
- return A_ERROR;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n"));
- }
-
-#ifdef ATH6KL_DISABLE_TARGET_DBGLOGS
- {
- u32 param;
-
- if (BMIReadMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
- (u8 *)&param,
- 4)!= 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for disabling debug logs failed\n"));
- return A_ERROR;
- }
-
- param |= HI_OPTION_DISABLE_DBGLOG;
-
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
- (u8 *)&param,
- 4) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for HI_OPTION_DISABLE_DBGLOG\n"));
- return A_ERROR;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n"));
- }
-#endif /* ATH6KL_DISABLE_TARGET_DBGLOGS */
-
- /*
- * Hardcode the address use for the extended board data
- * Ideally this should be pre-allocate by the OS at boot time
- * But since it is a new feature and board data is loaded
- * at init time, we have to workaround this from host.
- * It is difficult to patch the firmware boot code,
- * but possible in theory.
- */
-
- if (ar->arTargetType == TARGET_TYPE_AR6003) {
- u32 ramReservedSz;
- if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
- param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
- ramReservedSz = AR6003_REV2_RAM_RESERVE_SIZE;
- } else {
- param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
- ramReservedSz = AR6003_REV3_RAM_RESERVE_SIZE;
- }
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data),
- (u8 *)&param, 4) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("BMIWriteMemory for "
- "hi_board_ext_data failed\n"));
- return A_ERROR;
- }
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar,
- hi_end_RAM_reserve_sz),
- (u8 *)&ramReservedSz, 4) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR ,
- ("BMIWriteMemory for "
- "hi_end_RAM_reserve_sz failed\n"));
- return A_ERROR;
- }
- }
-
- /* since BMIInit is called in the driver layer, we have to set the block
- * size here for the target */
-
- if (ar6000_set_htc_params(ar->arHifDevice, ar->arTargetType,
- mbox_yield_limit, 0)) {
- /* use default number of control buffers */
- return A_ERROR;
- }
-
- if (setupbtdev != 0) {
- if (ar6000_set_hci_bridge_flags(ar->arHifDevice,
- ar->arTargetType,
- setupbtdev)) {
- return A_ERROR;
- }
- }
- return 0;
-}
-
-static void
-init_netdev(struct net_device *dev, char *name)
-{
- dev->netdev_ops = &ar6000_netdev_ops;
- dev->watchdog_timeo = AR6000_TX_TIMEOUT;
-
- /*
- * We need the OS to provide us with more headroom in order to
- * perform dix to 802.3, WMI header encap, and the HTC header
- */
- if (processDot11Hdr) {
- dev->hard_header_len = sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR;
- } else {
- dev->hard_header_len = ETH_HLEN + sizeof(ATH_LLC_SNAP_HDR) +
- sizeof(WMI_DATA_HDR) + HTC_HEADER_LEN + WMI_MAX_TX_META_SZ + LINUX_HACK_FUDGE_FACTOR;
- }
-
- if (name[0])
- {
- strcpy(dev->name, name);
- }
-
-#ifdef CONFIG_CHECKSUM_OFFLOAD
- if(csumOffload){
- dev->features |= NETIF_F_IP_CSUM; /*advertise kernel capability to do TCP/UDP CSUM offload for IPV4*/
- }
-#endif
-
- return;
-}
-
-static int __ath6kl_init_netdev(struct net_device *dev)
-{
- int r;
-
- rtnl_lock();
- r = ar6000_init(dev);
- rtnl_unlock();
-
- if (r) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_init\n"));
- return r;
- }
-
- return 0;
-}
-
-#ifdef HTC_RAW_INTERFACE
-static int ath6kl_init_netdev_wmi(struct net_device *dev)
-{
- if (!eppingtest && bypasswmi)
- return 0;
-
- return __ath6kl_init_netdev(dev);
-}
-#else
-static int ath6kl_init_netdev_wmi(struct net_device *dev)
-{
- return __ath6kl_init_netdev(dev);
-}
-#endif
-
-static int ath6kl_init_netdev(struct ar6_softc *ar)
-{
- int r;
-
- r = ar6000_sysfs_bmi_get_config(ar, wlaninitmode);
- if (r) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("ar6000_avail: "
- "ar6000_sysfs_bmi_get_config failed\n"));
- return r;
- }
-
- return ath6kl_init_netdev_wmi(ar->arNetDev);
-}
-
-/*
- * HTC Event handlers
- */
-static int
-ar6000_avail_ev(void *context, void *hif_handle)
-{
- int i;
- struct net_device *dev;
- void *ar_netif;
- struct ar6_softc *ar;
- int device_index = 0;
- struct htc_init_info htcInfo;
- struct wireless_dev *wdev;
- int r = 0;
- struct hif_device_os_device_info osDevInfo;
-
- memset(&osDevInfo, 0, sizeof(osDevInfo));
- if (HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_OS_DEVICE,
- &osDevInfo, sizeof(osDevInfo))) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: Failed to get OS device instance\n", __func__));
- return A_ERROR;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_available\n"));
-
- for (i=0; i < MAX_AR6000; i++) {
- if (ar6000_devices[i] == NULL) {
- break;
- }
- }
-
- if (i == MAX_AR6000) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_available: max devices reached\n"));
- return A_ERROR;
- }
-
- /* Save this. It gives a bit better readability especially since */
- /* we use another local "i" variable below. */
- device_index = i;
-
- wdev = ar6k_cfg80211_init(osDevInfo.pOSDevice);
- if (IS_ERR(wdev)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ar6k_cfg80211_init failed\n", __func__));
- return A_ERROR;
- }
- ar_netif = wdev_priv(wdev);
-
- if (ar_netif == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Can't allocate ar6k priv memory\n", __func__));
- return A_ERROR;
- }
-
- A_MEMZERO(ar_netif, sizeof(struct ar6_softc));
- ar = (struct ar6_softc *)ar_netif;
-
- ar->wdev = wdev;
- wdev->iftype = NL80211_IFTYPE_STATION;
-
- dev = alloc_netdev_mq(0, "wlan%d", ether_setup, 1);
- if (!dev) {
- printk(KERN_CRIT "AR6K: no memory for network device instance\n");
- ar6k_cfg80211_deinit(ar);
- return A_ERROR;
- }
-
- dev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
- wdev->netdev = dev;
- ar->arNetworkType = INFRA_NETWORK;
- ar->smeState = SME_DISCONNECTED;
- ar->arAutoAuthStage = AUTH_IDLE;
-
- init_netdev(dev, ifname);
-
-
- ar->arNetDev = dev;
- ar->arHifDevice = hif_handle;
- ar->arWlanState = WLAN_ENABLED;
- ar->arDeviceIndex = device_index;
-
- ar->arWlanPowerState = WLAN_POWER_STATE_ON;
- ar->arWlanOff = false; /* We are in ON state */
-#ifdef CONFIG_PM
- ar->arWowState = WLAN_WOW_STATE_NONE;
- ar->arBTOff = true; /* BT chip assumed to be OFF */
- ar->arBTSharing = WLAN_CONFIG_BT_SHARING;
- ar->arWlanOffConfig = WLAN_CONFIG_WLAN_OFF;
- ar->arSuspendConfig = WLAN_CONFIG_PM_SUSPEND;
- ar->arWow2Config = WLAN_CONFIG_PM_WOW2;
-#endif /* CONFIG_PM */
-
- A_INIT_TIMER(&ar->arHBChallengeResp.timer, ar6000_detect_error, dev);
- ar->arHBChallengeResp.seqNum = 0;
- ar->arHBChallengeResp.outstanding = false;
- ar->arHBChallengeResp.missCnt = 0;
- ar->arHBChallengeResp.frequency = AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT;
- ar->arHBChallengeResp.missThres = AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT;
-
- ar6000_init_control_info(ar);
- init_waitqueue_head(&arEvent);
- sema_init(&ar->arSem, 1);
- ar->bIsDestroyProgress = false;
-
- INIT_HTC_PACKET_QUEUE(&ar->amsdu_rx_buffer_queue);
-
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
- A_INIT_TIMER(&aptcTimer, aptcTimerHandler, ar);
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
- A_INIT_TIMER(&ar->disconnect_timer, disconnect_timer_handler, dev);
-
- BMIInit();
-
- ar6000_sysfs_bmi_init(ar);
-
- {
- struct bmi_target_info targ_info;
-
- r = BMIGetTargetInfo(ar->arHifDevice, &targ_info);
- if (r)
- goto avail_ev_failed;
-
- ar->arVersion.target_ver = targ_info.target_ver;
- ar->arTargetType = targ_info.target_type;
- wdev->wiphy->hw_version = targ_info.target_ver;
- }
-
- r = ar6000_configure_target(ar);
- if (r)
- goto avail_ev_failed;
-
- A_MEMZERO(&htcInfo,sizeof(htcInfo));
- htcInfo.pContext = ar;
- htcInfo.TargetFailure = ar6000_target_failure;
-
- ar->arHtcTarget = HTCCreate(ar->arHifDevice,&htcInfo);
-
- if (!ar->arHtcTarget) {
- r = -ENOMEM;
- goto avail_ev_failed;
- }
-
- spin_lock_init(&ar->arLock);
-
-#ifdef WAPI_ENABLE
- ar->arWapiEnable = 0;
-#endif
-
-
- if(csumOffload){
- /*if external frame work is also needed, change and use an extended rxMetaVerion*/
- ar->rxMetaVersion=WMI_META_VERSION_2;
- }
-
- ar->aggr_cntxt = aggr_init(ar6000_alloc_netbufs);
- if (!ar->aggr_cntxt) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize aggr.\n", __func__));
- r = -ENOMEM;
- goto avail_ev_failed;
- }
-
- aggr_register_rx_dispatcher(ar->aggr_cntxt, (void *)dev, ar6000_deliver_frames_to_nw_stack);
-
- HIFClaimDevice(ar->arHifDevice, ar);
-
- /* We only register the device in the global list if we succeed. */
- /* If the device is in the global list, it will be destroyed */
- /* when the module is unloaded. */
- ar6000_devices[device_index] = dev;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("BMI enabled: %d\n", wlaninitmode));
- if ((wlaninitmode == WLAN_INIT_MODE_UDEV) ||
- (wlaninitmode == WLAN_INIT_MODE_DRV)) {
- r = ath6kl_init_netdev(ar);
- if (r)
- goto avail_ev_failed;
- }
-
- /* This runs the init function if registered */
- r = register_netdev(dev);
- if (r) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: register_netdev failed\n"));
- ar6000_destroy(dev, 0);
- return r;
- }
-
- is_netdev_registered = 1;
-
-#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
- arApNetDev = NULL;
-#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_avail: name=%s hifdevice=0x%lx, dev=0x%lx (%d), ar=0x%lx\n",
- dev->name, (unsigned long)ar->arHifDevice, (unsigned long)dev, device_index,
- (unsigned long)ar));
-
-avail_ev_failed :
- if (r)
- ar6000_sysfs_bmi_deinit(ar);
-
- return r;
-}
-
-static void ar6000_target_failure(void *Instance, int Status)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Instance;
- WMI_TARGET_ERROR_REPORT_EVENT errEvent;
- static bool sip = false;
-
- if (Status != 0) {
-
- printk(KERN_ERR "ar6000_target_failure: target asserted \n");
-
- if (timer_pending(&ar->arHBChallengeResp.timer)) {
- A_UNTIMEOUT(&ar->arHBChallengeResp.timer);
- }
-
- /* try dumping target assertion information (if any) */
- ar6000_dump_target_assert_info(ar->arHifDevice,ar->arTargetType);
-
- /*
- * Fetch the logs from the target via the diagnostic
- * window.
- */
- ar6000_dbglog_get_debug_logs(ar);
-
- /* Report the error only once */
- if (!sip) {
- sip = true;
- errEvent.errorVal = WMI_TARGET_COM_ERR |
- WMI_TARGET_FATAL_ERR;
- }
- }
-}
-
-static int
-ar6000_unavail_ev(void *context, void *hif_handle)
-{
- struct ar6_softc *ar = (struct ar6_softc *)context;
- /* NULL out it's entry in the global list */
- ar6000_devices[ar->arDeviceIndex] = NULL;
- ar6000_destroy(ar->arNetDev, 1);
-
- return 0;
-}
-
-void
-ar6000_restart_endpoint(struct net_device *dev)
-{
- int status = 0;
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
-
- BMIInit();
- do {
- if ( (status=ar6000_configure_target(ar))!= 0)
- break;
- if ( (status=ar6000_sysfs_bmi_get_config(ar, wlaninitmode)) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: ar6000_sysfs_bmi_get_config failed\n"));
- break;
- }
- rtnl_lock();
- status = (ar6000_init(dev)==0) ? 0 : A_ERROR;
- rtnl_unlock();
-
- if (status) {
- break;
- }
- if (ar->arSsidLen && ar->arWlanState == WLAN_ENABLED) {
- ar6000_connect_to_ap(ar);
- }
- } while (0);
-
- if (status== 0) {
- return;
- }
-
- ar6000_devices[ar->arDeviceIndex] = NULL;
- ar6000_destroy(ar->arNetDev, 1);
-}
-
-void
-ar6000_stop_endpoint(struct net_device *dev, bool keepprofile, bool getdbglogs)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
-
- /* Stop the transmit queues */
- netif_stop_queue(dev);
-
- /* Disable the target and the interrupts associated with it */
- if (ar->arWmiReady == true)
- {
- if (!bypasswmi)
- {
- bool disconnectIssued;
-
- disconnectIssued = (ar->arConnected) || (ar->arConnectPending);
- ar6000_disconnect(ar);
- if (!keepprofile) {
- ar6000_init_profile_info(ar);
- }
-
- A_UNTIMEOUT(&ar->disconnect_timer);
-
- if (getdbglogs) {
- ar6000_dbglog_get_debug_logs(ar);
- }
-
- ar->arWmiReady = false;
- wmi_shutdown(ar->arWmi);
- ar->arWmiEnabled = false;
- ar->arWmi = NULL;
- /*
- * After wmi_shudown all WMI events will be dropped.
- * We need to cleanup the buffers allocated in AP mode
- * and give disconnect notification to stack, which usually
- * happens in the disconnect_event.
- * Simulate the disconnect_event by calling the function directly.
- * Sometimes disconnect_event will be received when the debug logs
- * are collected.
- */
- if (disconnectIssued) {
- if(ar->arNetworkType & AP_NETWORK) {
- ar6000_disconnect_event(ar, DISCONNECT_CMD, bcast_mac, 0, NULL, 0);
- } else {
- ar6000_disconnect_event(ar, DISCONNECT_CMD, ar->arBssid, 0, NULL, 0);
- }
- }
- ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT;
- ar->user_key_ctrl = 0;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI stopped\n", __func__));
- }
- else
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): WMI not ready 0x%lx 0x%lx\n",
- __func__, (unsigned long) ar, (unsigned long) ar->arWmi));
-
- /* Shut down WMI if we have started it */
- if(ar->arWmiEnabled == true)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("%s(): Shut down WMI\n", __func__));
- wmi_shutdown(ar->arWmi);
- ar->arWmiEnabled = false;
- ar->arWmi = NULL;
- }
- }
-
- if (ar->arHtcTarget != NULL) {
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- if (NULL != ar6kHciTransCallbacks.cleanupTransport) {
- ar6kHciTransCallbacks.cleanupTransport(NULL);
- }
-#else
- // FIXME: workaround to reset BT's UART baud rate to default
- if (NULL != ar->exitCallback) {
- struct ar3k_config_info ar3kconfig;
- int status;
-
- A_MEMZERO(&ar3kconfig,sizeof(ar3kconfig));
- ar6000_set_default_ar3kconfig(ar, (void *)&ar3kconfig);
- status = ar->exitCallback(&ar3kconfig);
- if (0 != status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to reset AR3K baud rate! \n"));
- }
- }
- // END workaround
- if (setuphci)
- ar6000_cleanup_hci(ar);
-#endif
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Shutting down HTC .... \n"));
- /* stop HTC */
- HTCStop(ar->arHtcTarget);
- }
-
- if (resetok) {
- /* try to reset the device if we can
- * The driver may have been configure NOT to reset the target during
- * a debug session */
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Attempting to reset target on instance destroy.... \n"));
- if (ar->arHifDevice != NULL) {
- bool coldReset = (ar->arTargetType == TARGET_TYPE_AR6003) ? true: false;
- ar6000_reset_device(ar->arHifDevice, ar->arTargetType, true, coldReset);
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,(" Host does not want target reset. \n"));
- }
- /* Done with cookies */
- ar6000_cookie_cleanup(ar);
-
- /* cleanup any allocated AMSDU buffers */
- ar6000_cleanup_amsdu_rxbufs(ar);
-}
-/*
- * We need to differentiate between the surprise and planned removal of the
- * device because of the following consideration:
- * - In case of surprise removal, the hcd already frees up the pending
- * for the device and hence there is no need to unregister the function
- * driver inorder to get these requests. For planned removal, the function
- * driver has to explicitly unregister itself to have the hcd return all the
- * pending requests before the data structures for the devices are freed up.
- * Note that as per the current implementation, the function driver will
- * end up releasing all the devices since there is no API to selectively
- * release a particular device.
- * - Certain commands issued to the target can be skipped for surprise
- * removal since they will anyway not go through.
- */
-void
-ar6000_destroy(struct net_device *dev, unsigned int unregister)
-{
- struct ar6_softc *ar;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("+ar6000_destroy \n"));
-
- if((dev == NULL) || ((ar = ar6k_priv(dev)) == NULL))
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): Failed to get device structure.\n", __func__));
- return;
- }
-
- ar->bIsDestroyProgress = true;
-
- if (down_interruptible(&ar->arSem)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s(): down_interruptible failed \n", __func__));
- return;
- }
-
- if (ar->arWlanPowerState != WLAN_POWER_STATE_CUT_PWR) {
- /* only stop endpoint if we are not stop it in suspend_ev */
- ar6000_stop_endpoint(dev, false, true);
- }
-
- ar->arWlanState = WLAN_DISABLED;
- if (ar->arHtcTarget != NULL) {
- /* destroy HTC */
- HTCDestroy(ar->arHtcTarget);
- }
- if (ar->arHifDevice != NULL) {
- /*release the device so we do not get called back on remove incase we
- * we're explicity destroyed by module unload */
- HIFReleaseDevice(ar->arHifDevice);
- HIFShutDownDevice(ar->arHifDevice);
- }
- aggr_module_destroy(ar->aggr_cntxt);
-
- /* Done with cookies */
- ar6000_cookie_cleanup(ar);
-
- /* cleanup any allocated AMSDU buffers */
- ar6000_cleanup_amsdu_rxbufs(ar);
-
- ar6000_sysfs_bmi_deinit(ar);
-
- /* Cleanup BMI */
- BMICleanup();
-
- /* Clear the tx counters */
- memset(tx_attempt, 0, sizeof(tx_attempt));
- memset(tx_post, 0, sizeof(tx_post));
- memset(tx_complete, 0, sizeof(tx_complete));
-
-#ifdef HTC_RAW_INTERFACE
- if (ar->arRawHtc) {
- kfree(ar->arRawHtc);
- ar->arRawHtc = NULL;
- }
-#endif
- /* Free up the device data structure */
- if (unregister && is_netdev_registered) {
- unregister_netdev(dev);
- is_netdev_registered = 0;
- }
- free_netdev(dev);
-
- ar6k_cfg80211_deinit(ar);
-
-#ifdef CONFIG_AP_VIRTUL_ADAPTER_SUPPORT
- ar6000_remove_ap_interface();
-#endif /*CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
-
- kfree(ar->fw_otp);
- kfree(ar->fw);
- kfree(ar->fw_patch);
- kfree(ar->fw_data);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("-ar6000_destroy \n"));
-}
-
-static void disconnect_timer_handler(unsigned long ptr)
-{
- struct net_device *dev = (struct net_device *)ptr;
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
-
- A_UNTIMEOUT(&ar->disconnect_timer);
-
- ar6000_init_profile_info(ar);
- ar6000_disconnect(ar);
-}
-
-static void ar6000_detect_error(unsigned long ptr)
-{
- struct net_device *dev = (struct net_device *)ptr;
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
- WMI_TARGET_ERROR_REPORT_EVENT errEvent;
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- if (ar->arHBChallengeResp.outstanding) {
- ar->arHBChallengeResp.missCnt++;
- } else {
- ar->arHBChallengeResp.missCnt = 0;
- }
-
- if (ar->arHBChallengeResp.missCnt > ar->arHBChallengeResp.missThres) {
- /* Send Error Detect event to the application layer and do not reschedule the error detection module timer */
- ar->arHBChallengeResp.missCnt = 0;
- ar->arHBChallengeResp.seqNum = 0;
- errEvent.errorVal = WMI_TARGET_COM_ERR | WMI_TARGET_FATAL_ERR;
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- return;
- }
-
- /* Generate the sequence number for the next challenge */
- ar->arHBChallengeResp.seqNum++;
- ar->arHBChallengeResp.outstanding = true;
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- /* Send the challenge on the control channel */
- if (wmi_get_challenge_resp_cmd(ar->arWmi, ar->arHBChallengeResp.seqNum, DRV_HB_CHALLENGE) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to send heart beat challenge\n"));
- }
-
-
- /* Reschedule the timer for the next challenge */
- A_TIMEOUT_MS(&ar->arHBChallengeResp.timer, ar->arHBChallengeResp.frequency * 1000, 0);
-}
-
-void ar6000_init_profile_info(struct ar6_softc *ar)
-{
- ar->arSsidLen = 0;
- A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
-
- switch(fwmode) {
- case HI_OPTION_FW_MODE_IBSS:
- ar->arNetworkType = ar->arNextMode = ADHOC_NETWORK;
- break;
- case HI_OPTION_FW_MODE_BSS_STA:
- ar->arNetworkType = ar->arNextMode = INFRA_NETWORK;
- break;
- case HI_OPTION_FW_MODE_AP:
- ar->arNetworkType = ar->arNextMode = AP_NETWORK;
- break;
- }
-
- ar->arDot11AuthMode = OPEN_AUTH;
- ar->arAuthMode = NONE_AUTH;
- ar->arPairwiseCrypto = NONE_CRYPT;
- ar->arPairwiseCryptoLen = 0;
- ar->arGroupCrypto = NONE_CRYPT;
- ar->arGroupCryptoLen = 0;
- A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList));
- A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
- A_MEMZERO(ar->arBssid, sizeof(ar->arBssid));
- ar->arBssChannel = 0;
-}
-
-static void
-ar6000_init_control_info(struct ar6_softc *ar)
-{
- ar->arWmiEnabled = false;
- ar6000_init_profile_info(ar);
- ar->arDefTxKeyIndex = 0;
- A_MEMZERO(ar->arWepKeyList, sizeof(ar->arWepKeyList));
- ar->arChannelHint = 0;
- ar->arListenIntervalT = A_DEFAULT_LISTEN_INTERVAL;
- ar->arListenIntervalB = 0;
- ar->arVersion.host_ver = AR6K_SW_VERSION;
- ar->arRssi = 0;
- ar->arTxPwr = 0;
- ar->arTxPwrSet = false;
- ar->arSkipScan = 0;
- ar->arBeaconInterval = 0;
- ar->arBitRate = 0;
- ar->arMaxRetries = 0;
- ar->arWmmEnabled = true;
- ar->intra_bss = 1;
- ar->scan_triggered = 0;
- A_MEMZERO(&ar->scParams, sizeof(ar->scParams));
- ar->scParams.shortScanRatio = WMI_SHORTSCANRATIO_DEFAULT;
- ar->scParams.scanCtrlFlags = DEFAULT_SCAN_CTRL_FLAGS;
-
- /* Initialize the AP mode state info */
- {
- u8 ctr;
- A_MEMZERO((u8 *)ar->sta_list, AP_MAX_NUM_STA * sizeof(sta_t));
-
- /* init the Mutexes */
- A_MUTEX_INIT(&ar->mcastpsqLock);
-
- /* Init the PS queues */
- for (ctr=0; ctr < AP_MAX_NUM_STA ; ctr++) {
- A_MUTEX_INIT(&ar->sta_list[ctr].psqLock);
- A_NETBUF_QUEUE_INIT(&ar->sta_list[ctr].psq);
- }
-
- ar->ap_profile_flag = 0;
- A_NETBUF_QUEUE_INIT(&ar->mcastpsq);
-
- memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
- ar->ap_wmode = DEF_AP_WMODE_G;
- ar->ap_dtim_period = DEF_AP_DTIM;
- ar->ap_beacon_interval = DEF_BEACON_INTERVAL;
- }
-}
-
-static int
-ar6000_open(struct net_device *dev)
-{
- unsigned long flags;
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
-
- spin_lock_irqsave(&ar->arLock, flags);
-
- if(ar->arWlanState == WLAN_DISABLED) {
- ar->arWlanState = WLAN_ENABLED;
- }
-
- if( ar->arConnected || bypasswmi) {
- netif_carrier_on(dev);
- /* Wake up the queues */
- netif_wake_queue(dev);
- }
- else
- netif_carrier_off(dev);
-
- spin_unlock_irqrestore(&ar->arLock, flags);
- return 0;
-}
-
-static int
-ar6000_close(struct net_device *dev)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
- netif_stop_queue(dev);
-
- ar6000_disconnect(ar);
-
- if(ar->arWmiReady == true) {
- if (wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0,
- 0, 0, 0, 0, 0, 0, 0, 0) != 0) {
- return -EIO;
- }
- ar->arWlanState = WLAN_DISABLED;
- }
- ar6k_cfg80211_scanComplete_event(ar, A_ECANCELED);
-
- return 0;
-}
-
-/* connect to a service */
-static int ar6000_connectservice(struct ar6_softc *ar,
- struct htc_service_connect_req *pConnect,
- char *pDesc)
-{
- int status;
- struct htc_service_connect_resp response;
-
- do {
-
- A_MEMZERO(&response,sizeof(response));
-
- status = HTCConnectService(ar->arHtcTarget,
- pConnect,
- &response);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Failed to connect to %s service status:%d \n",
- pDesc, status));
- break;
- }
- switch (pConnect->ServiceID) {
- case WMI_CONTROL_SVC :
- if (ar->arWmiEnabled) {
- /* set control endpoint for WMI use */
- wmi_set_control_ep(ar->arWmi, response.Endpoint);
- }
- /* save EP for fast lookup */
- ar->arControlEp = response.Endpoint;
- break;
- case WMI_DATA_BE_SVC :
- arSetAc2EndpointIDMap(ar, WMM_AC_BE, response.Endpoint);
- break;
- case WMI_DATA_BK_SVC :
- arSetAc2EndpointIDMap(ar, WMM_AC_BK, response.Endpoint);
- break;
- case WMI_DATA_VI_SVC :
- arSetAc2EndpointIDMap(ar, WMM_AC_VI, response.Endpoint);
- break;
- case WMI_DATA_VO_SVC :
- arSetAc2EndpointIDMap(ar, WMM_AC_VO, response.Endpoint);
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ServiceID not mapped %d\n", pConnect->ServiceID));
- status = A_EINVAL;
- break;
- }
-
- } while (false);
-
- return status;
-}
-
-void ar6000_TxDataCleanup(struct ar6_softc *ar)
-{
- /* flush all the data (non-control) streams
- * we only flush packets that are tagged as data, we leave any control packets that
- * were in the TX queues alone */
- HTCFlushEndpoint(ar->arHtcTarget,
- arAc2EndpointID(ar, WMM_AC_BE),
- AR6K_DATA_PKT_TAG);
- HTCFlushEndpoint(ar->arHtcTarget,
- arAc2EndpointID(ar, WMM_AC_BK),
- AR6K_DATA_PKT_TAG);
- HTCFlushEndpoint(ar->arHtcTarget,
- arAc2EndpointID(ar, WMM_AC_VI),
- AR6K_DATA_PKT_TAG);
- HTCFlushEndpoint(ar->arHtcTarget,
- arAc2EndpointID(ar, WMM_AC_VO),
- AR6K_DATA_PKT_TAG);
-}
-
-HTC_ENDPOINT_ID
-ar6000_ac2_endpoint_id ( void * devt, u8 ac)
-{
- struct ar6_softc *ar = (struct ar6_softc *) devt;
- return(arAc2EndpointID(ar, ac));
-}
-
-u8 ar6000_endpoint_id2_ac(void * devt, HTC_ENDPOINT_ID ep )
-{
- struct ar6_softc *ar = (struct ar6_softc *) devt;
- return(arEndpoint2Ac(ar, ep ));
-}
-
-#if defined(CONFIG_ATH6KL_ENABLE_COEXISTENCE)
-static int ath6kl_config_btcoex_params(struct ar6_softc *ar)
-{
- int r;
- WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD sbcb_cmd;
- WMI_SET_BTCOEX_FE_ANT_CMD sbfa_cmd;
-
- /* Configure the type of BT collocated with WLAN */
- memset(&sbcb_cmd, 0, sizeof(WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD));
- sbcb_cmd.btcoexCoLocatedBTdev = ATH6KL_BT_DEV;
-
- r = wmi_set_btcoex_colocated_bt_dev_cmd(ar->arWmi, &sbcb_cmd);
-
- if (r) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Unable to set collocated BT type\n"));
- return r;
- }
-
- /* Configure the type of BT collocated with WLAN */
- memset(&sbfa_cmd, 0, sizeof(WMI_SET_BTCOEX_FE_ANT_CMD));
-
- sbfa_cmd.btcoexFeAntType = ATH6KL_BT_ANTENNA;
-
- r = wmi_set_btcoex_fe_ant_cmd(ar->arWmi, &sbfa_cmd);
- if (r) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("Unable to set fornt end antenna configuration\n"));
- return r;
- }
-
- return 0;
-}
-#else
-static int ath6kl_config_btcoex_params(struct ar6_softc *ar)
-{
- return 0;
-}
-#endif /* CONFIG_ATH6KL_ENABLE_COEXISTENCE */
-
-/*
- * This function applies WLAN specific configuration defined in wlan_config.h
- */
-int ar6000_target_config_wlan_params(struct ar6_softc *ar)
-{
- int status = 0;
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
- if (ar->arTargetMode != AR6000_WLAN_MODE) {
- return 0;
- }
-#endif /* CONFIG_HOST_TCMD_SUPPORT */
-
- /*
- * configure the device for rx dot11 header rules 0,0 are the default values
- * therefore this command can be skipped if the inputs are 0,FALSE,FALSE.Required
- * if checksum offload is needed. Set RxMetaVersion to 2
- */
- if ((wmi_set_rx_frame_format_cmd(ar->arWmi,ar->rxMetaVersion, processDot11Hdr, processDot11Hdr)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the rx frame format.\n"));
- status = A_ERROR;
- }
-
- status = ath6kl_config_btcoex_params(ar);
- if (status)
- return status;
-
-#if WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN
- if ((wmi_pmparams_cmd(ar->arWmi, 0, 1, 0, 0, 1, IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set power save fail event policy\n"));
- status = A_ERROR;
- }
-#endif
-
-#if WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP
- if ((wmi_set_lpreamble_cmd(ar->arWmi, 0, WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set barker preamble policy\n"));
- status = A_ERROR;
- }
-#endif
-
- if ((wmi_set_keepalive_cmd(ar->arWmi, WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set keep alive interval\n"));
- status = A_ERROR;
- }
-
-#if WLAN_CONFIG_DISABLE_11N
- {
- WMI_SET_HT_CAP_CMD htCap;
-
- memset(&htCap, 0, sizeof(WMI_SET_HT_CAP_CMD));
- htCap.band = 0;
- if ((wmi_set_ht_cap_cmd(ar->arWmi, &htCap)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set ht capabilities \n"));
- status = A_ERROR;
- }
-
- htCap.band = 1;
- if ((wmi_set_ht_cap_cmd(ar->arWmi, &htCap)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set ht capabilities \n"));
- status = A_ERROR;
- }
- }
-#endif /* WLAN_CONFIG_DISABLE_11N */
-
-#ifdef ATH6K_CONFIG_OTA_MODE
- if ((wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set power mode \n"));
- status = A_ERROR;
- }
-#endif
-
- if ((wmi_disctimeout_cmd(ar->arWmi, WLAN_CONFIG_DISCONNECT_TIMEOUT)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set disconnect timeout \n"));
- status = A_ERROR;
- }
-
-#if WLAN_CONFIG_DISABLE_TX_BURSTING
- if ((wmi_set_wmm_txop(ar->arWmi, WMI_TXOP_DISABLED)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set txop bursting \n"));
- status = A_ERROR;
- }
-#endif
-
- return status;
-}
-
-/* This function does one time initialization for the lifetime of the device */
-int ar6000_init(struct net_device *dev)
-{
- struct ar6_softc *ar;
- int status;
- s32 timeleft;
- s16 i;
- int ret = 0;
-
- if((ar = ar6k_priv(dev)) == NULL)
- {
- return -EIO;
- }
-
- if (wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) {
-
- ar6000_update_bdaddr(ar);
-
- if (enablerssicompensation) {
- ar6000_copy_cust_data_from_target(ar->arHifDevice, ar->arTargetType);
- read_rssi_compensation_param(ar);
- for (i=-95; i<=0; i++) {
- rssi_compensation_table[0-i] = rssi_compensation_calc(ar,i);
- }
- }
- }
-
- dev_hold(dev);
- rtnl_unlock();
-
- /* Do we need to finish the BMI phase */
- if ((wlaninitmode == WLAN_INIT_MODE_USR || wlaninitmode == WLAN_INIT_MODE_DRV) &&
- (BMIDone(ar->arHifDevice) != 0))
- {
- ret = -EIO;
- goto ar6000_init_done;
- }
-
- if (!bypasswmi)
- {
-#if 0 /* TBDXXX */
- if (ar->arVersion.host_ver != ar->arVersion.target_ver) {
- A_PRINTF("WARNING: Host version 0x%x does not match Target "
- " version 0x%x!\n",
- ar->arVersion.host_ver, ar->arVersion.target_ver);
- }
-#endif
-
- /* Indicate that WMI is enabled (although not ready yet) */
- ar->arWmiEnabled = true;
- if ((ar->arWmi = wmi_init((void *) ar)) == NULL)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize WMI.\n", __func__));
- ret = -EIO;
- goto ar6000_init_done;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Got WMI @ 0x%lx.\n", __func__,
- (unsigned long) ar->arWmi));
- }
-
- do {
- struct htc_service_connect_req connect;
-
- /* the reason we have to wait for the target here is that the driver layer
- * has to init BMI in order to set the host block size,
- */
- status = HTCWaitTarget(ar->arHtcTarget);
-
- if (status) {
- break;
- }
-
- A_MEMZERO(&connect,sizeof(connect));
- /* meta data is unused for now */
- connect.pMetaData = NULL;
- connect.MetaDataLength = 0;
- /* these fields are the same for all service endpoints */
- connect.EpCallbacks.pContext = ar;
- connect.EpCallbacks.EpTxCompleteMultiple = ar6000_tx_complete;
- connect.EpCallbacks.EpRecv = ar6000_rx;
- connect.EpCallbacks.EpRecvRefill = ar6000_rx_refill;
- connect.EpCallbacks.EpSendFull = ar6000_tx_queue_full;
- /* set the max queue depth so that our ar6000_tx_queue_full handler gets called.
- * Linux has the peculiarity of not providing flow control between the
- * NIC and the network stack. There is no API to indicate that a TX packet
- * was sent which could provide some back pressure to the network stack.
- * Under linux you would have to wait till the network stack consumed all sk_buffs
- * before any back-flow kicked in. Which isn't very friendly.
- * So we have to manage this ourselves */
- connect.MaxSendQueueDepth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
- connect.EpCallbacks.RecvRefillWaterMark = AR6000_MAX_RX_BUFFERS / 4; /* set to 25 % */
- if (0 == connect.EpCallbacks.RecvRefillWaterMark) {
- connect.EpCallbacks.RecvRefillWaterMark++;
- }
- /* connect to control service */
- connect.ServiceID = WMI_CONTROL_SVC;
- status = ar6000_connectservice(ar,
- &connect,
- "WMI CONTROL");
- if (status) {
- break;
- }
-
- connect.LocalConnectionFlags |= HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING;
- /* limit the HTC message size on the send path, although we can receive A-MSDU frames of
- * 4K, we will only send ethernet-sized (802.3) frames on the send path. */
- connect.MaxSendMsgSize = WMI_MAX_TX_DATA_FRAME_LENGTH;
-
- /* to reduce the amount of committed memory for larger A_MSDU frames, use the recv-alloc threshold
- * mechanism for larger packets */
- connect.EpCallbacks.RecvAllocThreshold = AR6000_BUFFER_SIZE;
- connect.EpCallbacks.EpRecvAllocThresh = ar6000_alloc_amsdu_rxbuf;
-
- /* for the remaining data services set the connection flag to reduce dribbling,
- * if configured to do so */
- if (reduce_credit_dribble) {
- connect.ConnectionFlags |= HTC_CONNECT_FLAGS_REDUCE_CREDIT_DRIBBLE;
- /* the credit dribble trigger threshold is (reduce_credit_dribble - 1) for a value
- * of 0-3 */
- connect.ConnectionFlags &= ~HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK;
- connect.ConnectionFlags |=
- ((u16)reduce_credit_dribble - 1) & HTC_CONNECT_FLAGS_THRESHOLD_LEVEL_MASK;
- }
- /* connect to best-effort service */
- connect.ServiceID = WMI_DATA_BE_SVC;
-
- status = ar6000_connectservice(ar,
- &connect,
- "WMI DATA BE");
- if (status) {
- break;
- }
-
- /* connect to back-ground
- * map this to WMI LOW_PRI */
- connect.ServiceID = WMI_DATA_BK_SVC;
- status = ar6000_connectservice(ar,
- &connect,
- "WMI DATA BK");
- if (status) {
- break;
- }
-
- /* connect to Video service, map this to
- * to HI PRI */
- connect.ServiceID = WMI_DATA_VI_SVC;
- status = ar6000_connectservice(ar,
- &connect,
- "WMI DATA VI");
- if (status) {
- break;
- }
-
- /* connect to VO service, this is currently not
- * mapped to a WMI priority stream due to historical reasons.
- * WMI originally defined 3 priorities over 3 mailboxes
- * We can change this when WMI is reworked so that priorities are not
- * dependent on mailboxes */
- connect.ServiceID = WMI_DATA_VO_SVC;
- status = ar6000_connectservice(ar,
- &connect,
- "WMI DATA VO");
- if (status) {
- break;
- }
-
- A_ASSERT(arAc2EndpointID(ar,WMM_AC_BE) != 0);
- A_ASSERT(arAc2EndpointID(ar,WMM_AC_BK) != 0);
- A_ASSERT(arAc2EndpointID(ar,WMM_AC_VI) != 0);
- A_ASSERT(arAc2EndpointID(ar,WMM_AC_VO) != 0);
-
- /* setup access class priority mappings */
- ar->arAcStreamPriMap[WMM_AC_BK] = 0; /* lowest */
- ar->arAcStreamPriMap[WMM_AC_BE] = 1; /* */
- ar->arAcStreamPriMap[WMM_AC_VI] = 2; /* */
- ar->arAcStreamPriMap[WMM_AC_VO] = 3; /* highest */
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- if (setuphci && (NULL != ar6kHciTransCallbacks.setupTransport)) {
- struct hci_transport_misc_handles hciHandles;
-
- hciHandles.netDevice = ar->arNetDev;
- hciHandles.hifDevice = ar->arHifDevice;
- hciHandles.htcHandle = ar->arHtcTarget;
- status = (int)(ar6kHciTransCallbacks.setupTransport(&hciHandles));
- }
-#else
- if (setuphci) {
- /* setup HCI */
- status = ar6000_setup_hci(ar);
- }
-#endif
-
- } while (false);
-
- if (status) {
- ret = -EIO;
- goto ar6000_init_done;
- }
-
- if (regscanmode) {
- u32 param;
-
- if (BMIReadMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar,
- hi_option_flag),
- (u8 *)&param,
- 4) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("BMIReadMemory forsetting "
- "regscanmode failed\n"));
- return A_ERROR;
- }
-
- if (regscanmode == 1)
- param |= HI_OPTION_SKIP_REG_SCAN;
- else if (regscanmode == 2)
- param |= HI_OPTION_INIT_REG_SCAN;
-
- if (BMIWriteMemory(ar->arHifDevice,
- HOST_INTEREST_ITEM_ADDRESS(ar,
- hi_option_flag),
- (u8 *)&param,
- 4) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("BMIWriteMemory forsetting "
- "regscanmode failed\n"));
- return A_ERROR;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Regulatory scan mode set\n"));
- }
-
- /*
- * give our connected endpoints some buffers
- */
-
- ar6000_rx_refill(ar, ar->arControlEp);
- ar6000_rx_refill(ar, arAc2EndpointID(ar,WMM_AC_BE));
-
- /*
- * We will post the receive buffers only for SPE or endpoint ping testing so we are
- * making it conditional on the 'bypasswmi' flag.
- */
- if (bypasswmi) {
- ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_BK));
- ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VI));
- ar6000_rx_refill(ar,arAc2EndpointID(ar,WMM_AC_VO));
- }
-
- /* allocate some buffers that handle larger AMSDU frames */
- ar6000_refill_amsdu_rxbufs(ar,AR6000_MAX_AMSDU_RX_BUFFERS);
-
- /* setup credit distribution */
- ar6000_setup_credit_dist(ar->arHtcTarget, &ar->arCreditStateInfo);
-
- /* Since cookies are used for HTC transports, they should be */
- /* initialized prior to enabling HTC. */
- ar6000_cookie_init(ar);
-
- /* start HTC */
- status = HTCStart(ar->arHtcTarget);
-
- if (status) {
- if (ar->arWmiEnabled == true) {
- wmi_shutdown(ar->arWmi);
- ar->arWmiEnabled = false;
- ar->arWmi = NULL;
- }
- ar6000_cookie_cleanup(ar);
- ret = -EIO;
- goto ar6000_init_done;
- }
-
- if (!bypasswmi) {
- /* Wait for Wmi event to be ready */
- timeleft = wait_event_interruptible_timeout(arEvent,
- (ar->arWmiReady == true), wmitimeout * HZ);
-
- if (ar->arVersion.abi_ver != AR6K_ABI_VERSION) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ABI Version mismatch: Host(0x%x), Target(0x%x)\n", AR6K_ABI_VERSION, ar->arVersion.abi_ver));
-#ifndef ATH6K_SKIP_ABI_VERSION_CHECK
- ret = -EIO;
- goto ar6000_init_done;
-#endif /* ATH6K_SKIP_ABI_VERSION_CHECK */
- }
-
- if(!timeleft || signal_pending(current))
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI is not ready or wait was interrupted\n"));
- ret = -EIO;
- goto ar6000_init_done;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() WMI is ready\n", __func__));
-
- /* Communicate the wmi protocol verision to the target */
- if ((ar6000_set_host_app_area(ar)) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to set the host app area\n"));
- }
- ar6000_target_config_wlan_params(ar);
- }
-
- ar->arNumDataEndPts = 1;
-
- if (bypasswmi) {
- /* for tests like endpoint ping, the MAC address needs to be non-zero otherwise
- * the data path through a raw socket is disabled */
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x01;
- dev->dev_addr[2] = 0x02;
- dev->dev_addr[3] = 0xAA;
- dev->dev_addr[4] = 0xBB;
- dev->dev_addr[5] = 0xCC;
- }
-
-ar6000_init_done:
- rtnl_lock();
- dev_put(dev);
-
- return ret;
-}
-
-
-void
-ar6000_bitrate_rx(void *devt, s32 rateKbps)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
-
- ar->arBitRate = rateKbps;
- wake_up(&arEvent);
-}
-
-void
-ar6000_ratemask_rx(void *devt, u32 ratemask)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
-
- ar->arRateMask = ratemask;
- wake_up(&arEvent);
-}
-
-void
-ar6000_txPwr_rx(void *devt, u8 txPwr)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
-
- ar->arTxPwr = txPwr;
- wake_up(&arEvent);
-}
-
-
-void
-ar6000_channelList_rx(void *devt, s8 numChan, u16 *chanList)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
-
- memcpy(ar->arChannelList, chanList, numChan * sizeof (u16));
- ar->arNumChannels = numChan;
-
- wake_up(&arEvent);
-}
-
-u8 ar6000_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, u32 *mapNo)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
- u8 *datap;
- ATH_MAC_HDR *macHdr;
- u32 i, eptMap;
-
- (*mapNo) = 0;
- datap = A_NETBUF_DATA(skb);
- macHdr = (ATH_MAC_HDR *)(datap + sizeof(WMI_DATA_HDR));
- if (IEEE80211_IS_MULTICAST(macHdr->dstMac)) {
- return ENDPOINT_2;
- }
-
- eptMap = -1;
- for (i = 0; i < ar->arNodeNum; i ++) {
- if (IEEE80211_ADDR_EQ(macHdr->dstMac, ar->arNodeMap[i].macAddress)) {
- (*mapNo) = i + 1;
- ar->arNodeMap[i].txPending ++;
- return ar->arNodeMap[i].epId;
- }
-
- if ((eptMap == -1) && !ar->arNodeMap[i].txPending) {
- eptMap = i;
- }
- }
-
- if (eptMap == -1) {
- eptMap = ar->arNodeNum;
- ar->arNodeNum ++;
- A_ASSERT(ar->arNodeNum <= MAX_NODE_NUM);
- }
-
- memcpy(ar->arNodeMap[eptMap].macAddress, macHdr->dstMac, IEEE80211_ADDR_LEN);
-
- for (i = ENDPOINT_2; i <= ENDPOINT_5; i ++) {
- if (!ar->arTxPending[i]) {
- ar->arNodeMap[eptMap].epId = i;
- break;
- }
- // No free endpoint is available, start redistribution on the inuse endpoints.
- if (i == ENDPOINT_5) {
- ar->arNodeMap[eptMap].epId = ar->arNexEpId;
- ar->arNexEpId ++;
- if (ar->arNexEpId > ENDPOINT_5) {
- ar->arNexEpId = ENDPOINT_2;
- }
- }
- }
-
- (*mapNo) = eptMap + 1;
- ar->arNodeMap[eptMap].txPending ++;
-
- return ar->arNodeMap[eptMap].epId;
-}
-
-#ifdef DEBUG
-static void ar6000_dump_skb(struct sk_buff *skb)
-{
- u_char *ch;
- for (ch = A_NETBUF_DATA(skb);
- (unsigned long)ch < ((unsigned long)A_NETBUF_DATA(skb) +
- A_NETBUF_LEN(skb)); ch++)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("%2.2x ", *ch));
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("\n"));
-}
-#endif
-
-#ifdef HTC_TEST_SEND_PKTS
-static void DoHTCSendPktsTest(struct ar6_softc *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *skb);
-#endif
-
-static int
-ar6000_data_tx(struct sk_buff *skb, struct net_device *dev)
-{
-#define AC_NOT_MAPPED 99
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
- u8 ac = AC_NOT_MAPPED;
- HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED;
- u32 mapNo = 0;
- int len;
- struct ar_cookie *cookie;
- bool checkAdHocPsMapping = false,bMoreData = false;
- HTC_TX_TAG htc_tag = AR6K_DATA_PKT_TAG;
- u8 dot11Hdr = processDot11Hdr;
-#ifdef CONFIG_PM
- if (ar->arWowState != WLAN_WOW_STATE_NONE) {
- A_NETBUF_FREE(skb);
- return 0;
- }
-#endif /* CONFIG_PM */
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_data_tx start - skb=0x%lx, data=0x%lx, len=0x%x\n",
- (unsigned long)skb, (unsigned long)A_NETBUF_DATA(skb),
- A_NETBUF_LEN(skb)));
-
- /* If target is not associated */
- if( (!ar->arConnected && !bypasswmi)
-#ifdef CONFIG_HOST_TCMD_SUPPORT
- /* TCMD doesn't support any data, free the buf and return */
- || (ar->arTargetMode == AR6000_TCMD_MODE)
-#endif
- ) {
- A_NETBUF_FREE(skb);
- return 0;
- }
-
- do {
-
- if (ar->arWmiReady == false && bypasswmi == 0) {
- break;
- }
-
-#ifdef BLOCK_TX_PATH_FLAG
- if (blocktx) {
- break;
- }
-#endif /* BLOCK_TX_PATH_FLAG */
-
- /* AP mode Power save processing */
- /* If the dst STA is in sleep state, queue the pkt in its PS queue */
-
- if (ar->arNetworkType == AP_NETWORK) {
- ATH_MAC_HDR *datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb);
- sta_t *conn = NULL;
-
- /* If the dstMac is a Multicast address & atleast one of the
- * associated STA is in PS mode, then queue the pkt to the
- * mcastq
- */
- if (IEEE80211_IS_MULTICAST(datap->dstMac)) {
- u8 ctr=0;
- bool qMcast=false;
-
-
- for (ctr=0; ctr<AP_MAX_NUM_STA; ctr++) {
- if (STA_IS_PWR_SLEEP((&ar->sta_list[ctr]))) {
- qMcast = true;
- }
- }
- if(qMcast) {
-
- /* If this transmit is not because of a Dtim Expiry q it */
- if (ar->DTIMExpired == false) {
- bool isMcastqEmpty = false;
-
- A_MUTEX_LOCK(&ar->mcastpsqLock);
- isMcastqEmpty = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq);
- A_NETBUF_ENQUEUE(&ar->mcastpsq, skb);
- A_MUTEX_UNLOCK(&ar->mcastpsqLock);
-
- /* If this is the first Mcast pkt getting queued
- * indicate to the target to set the BitmapControl LSB
- * of the TIM IE.
- */
- if (isMcastqEmpty) {
- wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 1);
- }
- return 0;
- } else {
- /* This transmit is because of Dtim expiry. Determine if
- * MoreData bit has to be set.
- */
- A_MUTEX_LOCK(&ar->mcastpsqLock);
- if(!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
- bMoreData = true;
- }
- A_MUTEX_UNLOCK(&ar->mcastpsqLock);
- }
- }
- } else {
- conn = ieee80211_find_conn(ar, datap->dstMac);
- if (conn) {
- if (STA_IS_PWR_SLEEP(conn)) {
- /* If this transmit is not because of a PsPoll q it*/
- if (!STA_IS_PS_POLLED(conn)) {
- bool isPsqEmpty = false;
- /* Queue the frames if the STA is sleeping */
- A_MUTEX_LOCK(&conn->psqLock);
- isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq);
- A_NETBUF_ENQUEUE(&conn->psq, skb);
- A_MUTEX_UNLOCK(&conn->psqLock);
-
- /* If this is the first pkt getting queued
- * for this STA, update the PVB for this STA
- */
- if (isPsqEmpty) {
- wmi_set_pvb_cmd(ar->arWmi, conn->aid, 1);
- }
-
- return 0;
- } else {
- /* This tx is because of a PsPoll. Determine if
- * MoreData bit has to be set
- */
- A_MUTEX_LOCK(&conn->psqLock);
- if (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) {
- bMoreData = true;
- }
- A_MUTEX_UNLOCK(&conn->psqLock);
- }
- }
- } else {
-
- /* non existent STA. drop the frame */
- A_NETBUF_FREE(skb);
- return 0;
- }
- }
- }
-
- if (ar->arWmiEnabled) {
- u8 csumStart=0;
- u8 csumDest=0;
- u8 csum=skb->ip_summed;
- if(csumOffload && (csum==CHECKSUM_PARTIAL)){
- csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
- sizeof(ATH_LLC_SNAP_HDR));
- csumDest=skb->csum_offset+csumStart;
- }
- if (A_NETBUF_HEADROOM(skb) < dev->hard_header_len - LINUX_HACK_FUDGE_FACTOR) {
- struct sk_buff *newbuf;
-
- /*
- * We really should have gotten enough headroom but sometimes
- * we still get packets with not enough headroom. Copy the packet.
- */
- len = A_NETBUF_LEN(skb);
- newbuf = A_NETBUF_ALLOC(len);
- if (newbuf == NULL) {
- break;
- }
- A_NETBUF_PUT(newbuf, len);
- memcpy(A_NETBUF_DATA(newbuf), A_NETBUF_DATA(skb), len);
- A_NETBUF_FREE(skb);
- skb = newbuf;
- /* fall through and assemble header */
- }
-
- if (dot11Hdr) {
- if (wmi_dot11_hdr_add(ar->arWmi,skb,ar->arNetworkType) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx-wmi_dot11_hdr_add failed\n"));
- break;
- }
- } else {
- if (wmi_dix_2_dot3(ar->arWmi, skb) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_dix_2_dot3 failed\n"));
- break;
- }
- }
- if(csumOffload && (csum ==CHECKSUM_PARTIAL)){
- WMI_TX_META_V2 metaV2;
- metaV2.csumStart =csumStart;
- metaV2.csumDest = csumDest;
- metaV2.csumFlags = 0x1;/*instruct target to calculate checksum*/
- if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr,
- WMI_META_VERSION_2,&metaV2) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n"));
- break;
- }
-
- }
- else
- {
- if (wmi_data_hdr_add(ar->arWmi, skb, DATA_MSGTYPE, bMoreData, dot11Hdr,0,NULL) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_data_tx - wmi_data_hdr_add failed\n"));
- break;
- }
- }
-
-
- if ((ar->arNetworkType == ADHOC_NETWORK) &&
- ar->arIbssPsEnable && ar->arConnected) {
- /* flag to check adhoc mapping once we take the lock below: */
- checkAdHocPsMapping = true;
-
- } else {
- /* get the stream mapping */
- ac = wmi_implicit_create_pstream(ar->arWmi, skb, 0, ar->arWmmEnabled);
- }
-
- } else {
- EPPING_HEADER *eppingHdr;
-
- eppingHdr = A_NETBUF_DATA(skb);
-
- if (IS_EPPING_PACKET(eppingHdr)) {
- /* the stream ID is mapped to an access class */
- ac = eppingHdr->StreamNo_h;
- /* some EPPING packets cannot be dropped no matter what access class it was
- * sent on. We can change the packet tag to guarantee it will not get dropped */
- if (IS_EPING_PACKET_NO_DROP(eppingHdr)) {
- htc_tag = AR6K_CONTROL_PKT_TAG;
- }
-
- if (ac == HCI_TRANSPORT_STREAM_NUM) {
- /* pass this to HCI */
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
- if (!hci_test_send(ar,skb)) {
- return 0;
- }
-#endif
- /* set AC to discard this skb */
- ac = AC_NOT_MAPPED;
- } else {
- /* a quirk of linux, the payload of the frame is 32-bit aligned and thus the addition
- * of the HTC header will mis-align the start of the HTC frame, so we add some
- * padding which will be stripped off in the target */
- if (EPPING_ALIGNMENT_PAD > 0) {
- A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD);
- }
- }
-
- } else {
- /* not a ping packet, drop it */
- ac = AC_NOT_MAPPED;
- }
- }
-
- } while (false);
-
- /* did we succeed ? */
- if ((ac == AC_NOT_MAPPED) && !checkAdHocPsMapping) {
- /* cleanup and exit */
- A_NETBUF_FREE(skb);
- AR6000_STAT_INC(ar, tx_dropped);
- AR6000_STAT_INC(ar, tx_aborted_errors);
- return 0;
- }
-
- cookie = NULL;
-
- /* take the lock to protect driver data */
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- do {
-
- if (checkAdHocPsMapping) {
- eid = ar6000_ibss_map_epid(skb, dev, &mapNo);
- }else {
- eid = arAc2EndpointID (ar, ac);
- }
- /* validate that the endpoint is connected */
- if (eid == 0 || eid == ENDPOINT_UNUSED ) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" eid %d is NOT mapped!\n", eid));
- break;
- }
- /* allocate resource for this packet */
- cookie = ar6000_alloc_cookie(ar);
-
- if (cookie != NULL) {
- /* update counts while the lock is held */
- ar->arTxPending[eid]++;
- ar->arTotalTxDataPending++;
- }
-
- } while (false);
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- if (cookie != NULL) {
- cookie->arc_bp[0] = (unsigned long)skb;
- cookie->arc_bp[1] = mapNo;
- SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
- cookie,
- A_NETBUF_DATA(skb),
- A_NETBUF_LEN(skb),
- eid,
- htc_tag);
-
-#ifdef DEBUG
- if (debugdriver >= 3) {
- ar6000_dump_skb(skb);
- }
-#endif
-#ifdef HTC_TEST_SEND_PKTS
- DoHTCSendPktsTest(ar,mapNo,eid,skb);
-#endif
- /* HTC interface is asynchronous, if this fails, cleanup will happen in
- * the ar6000_tx_complete callback */
- HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
- } else {
- /* no packet to send, cleanup */
- A_NETBUF_FREE(skb);
- AR6000_STAT_INC(ar, tx_dropped);
- AR6000_STAT_INC(ar, tx_aborted_errors);
- }
-
- return 0;
-}
-
-int
-ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
- struct ar_cookie *cookie;
- HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED;
-
- cookie = NULL;
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- /* For now we send ACL on BE endpoint: We can also have a dedicated EP */
- eid = arAc2EndpointID (ar, 0);
- /* allocate resource for this packet */
- cookie = ar6000_alloc_cookie(ar);
-
- if (cookie != NULL) {
- /* update counts while the lock is held */
- ar->arTxPending[eid]++;
- ar->arTotalTxDataPending++;
- }
-
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- if (cookie != NULL) {
- cookie->arc_bp[0] = (unsigned long)skb;
- cookie->arc_bp[1] = 0;
- SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
- cookie,
- A_NETBUF_DATA(skb),
- A_NETBUF_LEN(skb),
- eid,
- AR6K_DATA_PKT_TAG);
-
- /* HTC interface is asynchronous, if this fails, cleanup will happen in
- * the ar6000_tx_complete callback */
- HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
- } else {
- /* no packet to send, cleanup */
- A_NETBUF_FREE(skb);
- AR6000_STAT_INC(ar, tx_dropped);
- AR6000_STAT_INC(ar, tx_aborted_errors);
- }
- return 0;
-}
-
-
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
-static void
-tvsub(register struct timeval *out, register struct timeval *in)
-{
- if((out->tv_usec -= in->tv_usec) < 0) {
- out->tv_sec--;
- out->tv_usec += 1000000;
- }
- out->tv_sec -= in->tv_sec;
-}
-
-void
-applyAPTCHeuristics(struct ar6_softc *ar)
-{
- u32 duration;
- u32 numbytes;
- u32 throughput;
- struct timeval ts;
- int status;
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- if ((enableAPTCHeuristics) && (!aptcTR.timerScheduled)) {
- do_gettimeofday(&ts);
- tvsub(&ts, &aptcTR.samplingTS);
- duration = ts.tv_sec * 1000 + ts.tv_usec / 1000; /* ms */
- numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived;
-
- if (duration > APTC_TRAFFIC_SAMPLING_INTERVAL) {
- /* Initialize the time stamp and byte count */
- aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0;
- do_gettimeofday(&aptcTR.samplingTS);
-
- /* Calculate and decide based on throughput thresholds */
- throughput = ((numbytes * 8) / duration);
- if (throughput > APTC_UPPER_THROUGHPUT_THRESHOLD) {
- /* Disable Sleep and schedule a timer */
- A_ASSERT(ar->arWmiReady == true);
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- status = wmi_powermode_cmd(ar->arWmi, MAX_PERF_POWER);
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0);
- aptcTR.timerScheduled = true;
- }
- }
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-}
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
-static HTC_SEND_FULL_ACTION ar6000_tx_queue_full(void *Context, struct htc_packet *pPacket)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- HTC_SEND_FULL_ACTION action = HTC_SEND_FULL_KEEP;
- bool stopNet = false;
- HTC_ENDPOINT_ID Endpoint = HTC_GET_ENDPOINT_FROM_PKT(pPacket);
-
- do {
-
- if (bypasswmi) {
- int accessClass;
-
- if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) {
- /* don't drop special control packets */
- break;
- }
-
- accessClass = arEndpoint2Ac(ar,Endpoint);
- /* for endpoint ping testing drop Best Effort and Background */
- if ((accessClass == WMM_AC_BE) || (accessClass == WMM_AC_BK)) {
- action = HTC_SEND_FULL_DROP;
- stopNet = false;
- } else {
- /* keep but stop the netqueues */
- stopNet = true;
- }
- break;
- }
-
- if (Endpoint == ar->arControlEp) {
- /* under normal WMI if this is getting full, then something is running rampant
- * the host should not be exhausting the WMI queue with too many commands
- * the only exception to this is during testing using endpointping */
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- /* set flag to handle subsequent messages */
- ar->arWMIControlEpFull = true;
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("WMI Control Endpoint is FULL!!! \n"));
- /* no need to stop the network */
- stopNet = false;
- break;
- }
-
- /* if we get here, we are dealing with data endpoints getting full */
-
- if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_CONTROL_PKT_TAG) {
- /* don't drop control packets issued on ANY data endpoint */
- break;
- }
-
- if (ar->arNetworkType == ADHOC_NETWORK) {
- /* in adhoc mode, we cannot differentiate traffic priorities so there is no need to
- * continue, however we should stop the network */
- stopNet = true;
- break;
- }
- /* the last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for the highest
- * active stream */
- if (ar->arAcStreamPriMap[arEndpoint2Ac(ar,Endpoint)] < ar->arHiAcStreamActivePri &&
- ar->arCookieCount <= MAX_HI_COOKIE_NUM) {
- /* this stream's priority is less than the highest active priority, we
- * give preference to the highest priority stream by directing
- * HTC to drop the packet that overflowed */
- action = HTC_SEND_FULL_DROP;
- /* since we are dropping packets, no need to stop the network */
- stopNet = false;
- break;
- }
-
- } while (false);
-
- if (stopNet) {
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- ar->arNetQueueStopped = true;
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- /* one of the data endpoints queues is getting full..need to stop network stack
- * the queue will resume in ar6000_tx_complete() */
- netif_stop_queue(ar->arNetDev);
- }
-
- return action;
-}
-
-
-static void
-ar6000_tx_complete(void *Context, struct htc_packet_queue *pPacketQueue)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- u32 mapNo = 0;
- int status;
- struct ar_cookie * ar_cookie;
- HTC_ENDPOINT_ID eid;
- bool wakeEvent = false;
- struct sk_buff_head skb_queue;
- struct htc_packet *pPacket;
- struct sk_buff *pktSkb;
- bool flushing = false;
-
- skb_queue_head_init(&skb_queue);
-
- /* lock the driver as we update internal state */
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- /* reap completed packets */
- while (!HTC_QUEUE_EMPTY(pPacketQueue)) {
-
- pPacket = HTC_PACKET_DEQUEUE(pPacketQueue);
-
- ar_cookie = (struct ar_cookie *)pPacket->pPktContext;
- A_ASSERT(ar_cookie);
-
- status = pPacket->Status;
- pktSkb = (struct sk_buff *)ar_cookie->arc_bp[0];
- eid = pPacket->Endpoint;
- mapNo = ar_cookie->arc_bp[1];
-
- A_ASSERT(pktSkb);
- A_ASSERT(pPacket->pBuffer == A_NETBUF_DATA(pktSkb));
-
- /* add this to the list, use faster non-lock API */
- __skb_queue_tail(&skb_queue,pktSkb);
-
- if (!status) {
- A_ASSERT(pPacket->ActualLength == A_NETBUF_LEN(pktSkb));
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar6000_tx_complete skb=0x%lx data=0x%lx len=0x%x eid=%d ",
- (unsigned long)pktSkb, (unsigned long)pPacket->pBuffer,
- pPacket->ActualLength,
- eid));
-
- ar->arTxPending[eid]--;
-
- if ((eid != ar->arControlEp) || bypasswmi) {
- ar->arTotalTxDataPending--;
- }
-
- if (eid == ar->arControlEp)
- {
- if (ar->arWMIControlEpFull) {
- /* since this packet completed, the WMI EP is no longer full */
- ar->arWMIControlEpFull = false;
- }
-
- if (ar->arTxPending[eid] == 0) {
- wakeEvent = true;
- }
- }
-
- if (status) {
- if (status == A_ECANCELED) {
- /* a packet was flushed */
- flushing = true;
- }
- AR6000_STAT_INC(ar, tx_errors);
- if (status != A_NO_RESOURCE) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() -TX ERROR, status: 0x%x\n", __func__,
- status));
- }
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("OK\n"));
- flushing = false;
- AR6000_STAT_INC(ar, tx_packets);
- ar->arNetStats.tx_bytes += A_NETBUF_LEN(pktSkb);
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
- aptcTR.bytesTransmitted += a_netbuf_to_len(pktSkb);
- applyAPTCHeuristics(ar);
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
- }
-
- // TODO this needs to be looked at
- if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable
- && (eid != ar->arControlEp) && mapNo)
- {
- mapNo --;
- ar->arNodeMap[mapNo].txPending --;
-
- if (!ar->arNodeMap[mapNo].txPending && (mapNo == (ar->arNodeNum - 1))) {
- u32 i;
- for (i = ar->arNodeNum; i > 0; i --) {
- if (!ar->arNodeMap[i - 1].txPending) {
- A_MEMZERO(&ar->arNodeMap[i - 1], sizeof(struct ar_node_mapping));
- ar->arNodeNum --;
- } else {
- break;
- }
- }
- }
- }
-
- ar6000_free_cookie(ar, ar_cookie);
-
- if (ar->arNetQueueStopped) {
- ar->arNetQueueStopped = false;
- }
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- /* lock is released, we can freely call other kernel APIs */
-
- /* free all skbs in our local list */
- while (!skb_queue_empty(&skb_queue)) {
- /* use non-lock version */
- pktSkb = __skb_dequeue(&skb_queue);
- A_NETBUF_FREE(pktSkb);
- }
-
- if ((ar->arConnected == true) || bypasswmi) {
- if (!flushing) {
- /* don't wake the queue if we are flushing, other wise it will just
- * keep queueing packets, which will keep failing */
- netif_wake_queue(ar->arNetDev);
- }
- }
-
- if (wakeEvent) {
- wake_up(&arEvent);
- }
-
-}
-
-sta_t *
-ieee80211_find_conn(struct ar6_softc *ar, u8 *node_addr)
-{
- sta_t *conn = NULL;
- u8 i, max_conn;
-
- switch(ar->arNetworkType) {
- case AP_NETWORK:
- max_conn = AP_MAX_NUM_STA;
- break;
- default:
- max_conn=0;
- break;
- }
-
- for (i = 0; i < max_conn; i++) {
- if (IEEE80211_ADDR_EQ(node_addr, ar->sta_list[i].mac)) {
- conn = &ar->sta_list[i];
- break;
- }
- }
-
- return conn;
-}
-
-sta_t *ieee80211_find_conn_for_aid(struct ar6_softc *ar, u8 aid)
-{
- sta_t *conn = NULL;
- u8 ctr;
-
- for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
- if (ar->sta_list[ctr].aid == aid) {
- conn = &ar->sta_list[ctr];
- break;
- }
- }
- return conn;
-}
-
-/*
- * Receive event handler. This is called by HTC when a packet is received
- */
-int pktcount;
-static void
-ar6000_rx(void *Context, struct htc_packet *pPacket)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- struct sk_buff *skb = (struct sk_buff *)pPacket->pPktContext;
- int minHdrLen;
- u8 containsDot11Hdr = 0;
- int status = pPacket->Status;
- HTC_ENDPOINT_ID ept = pPacket->Endpoint;
-
- A_ASSERT((status) ||
- (pPacket->pBuffer == (A_NETBUF_DATA(skb) + HTC_HEADER_LEN)));
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx ar=0x%lx eid=%d, skb=0x%lx, data=0x%lx, len=0x%x status:%d",
- (unsigned long)ar, ept, (unsigned long)skb, (unsigned long)pPacket->pBuffer,
- pPacket->ActualLength, status));
- if (status) {
- if (status != A_ECANCELED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("RX ERR (%d) \n",status));
- }
- }
-
- /* take lock to protect buffer counts
- * and adaptive power throughput state */
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- if (!status) {
- AR6000_STAT_INC(ar, rx_packets);
- ar->arNetStats.rx_bytes += pPacket->ActualLength;
-#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
- aptcTR.bytesReceived += a_netbuf_to_len(skb);
- applyAPTCHeuristics(ar);
-#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
-
- A_NETBUF_PUT(skb, pPacket->ActualLength + HTC_HEADER_LEN);
- A_NETBUF_PULL(skb, HTC_HEADER_LEN);
-
-#ifdef DEBUG
- if (debugdriver >= 2) {
- ar6000_dump_skb(skb);
- }
-#endif /* DEBUG */
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- skb->dev = ar->arNetDev;
- if (status) {
- AR6000_STAT_INC(ar, rx_errors);
- A_NETBUF_FREE(skb);
- } else if (ar->arWmiEnabled == true) {
- if (ept == ar->arControlEp) {
- /*
- * this is a wmi control msg
- */
-#ifdef CONFIG_PM
- ar6000_check_wow_status(ar, skb, true);
-#endif /* CONFIG_PM */
- wmi_control_rx(ar->arWmi, skb);
- } else {
- WMI_DATA_HDR *dhdr = (WMI_DATA_HDR *)A_NETBUF_DATA(skb);
- bool is_amsdu;
- u8 tid;
-
- /*
- * This check can be removed if after a while we do not
- * see the warning. For now we leave it to ensure
- * we drop these frames accordingly in case the
- * target generates them for some reason. These
- * were used for an internal PAL but that's not
- * used or supported anymore. These frames should
- * not come up from the target.
- */
- if (WARN_ON(WMI_DATA_HDR_GET_DATA_TYPE(dhdr) ==
- WMI_DATA_HDR_DATA_TYPE_ACL)) {
- AR6000_STAT_INC(ar, rx_errors);
- A_NETBUF_FREE(skb);
- return;
- }
-
-#ifdef CONFIG_PM
- ar6000_check_wow_status(ar, NULL, false);
-#endif /* CONFIG_PM */
- /*
- * this is a wmi data packet
- */
- // NWF
-
- if (processDot11Hdr) {
- minHdrLen = sizeof(WMI_DATA_HDR) + sizeof(struct ieee80211_frame) + sizeof(ATH_LLC_SNAP_HDR);
- } else {
- minHdrLen = sizeof (WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) +
- sizeof(ATH_LLC_SNAP_HDR);
- }
-
- /* In the case of AP mode we may receive NULL data frames
- * that do not have LLC hdr. They are 16 bytes in size.
- * Allow these frames in the AP mode.
- * ACL data frames don't follow ethernet frame bounds for
- * min length
- */
- if (ar->arNetworkType != AP_NETWORK &&
- ((pPacket->ActualLength < minHdrLen) ||
- (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)))
- {
- /*
- * packet is too short or too long
- */
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("TOO SHORT or TOO LONG\n"));
- AR6000_STAT_INC(ar, rx_errors);
- AR6000_STAT_INC(ar, rx_length_errors);
- A_NETBUF_FREE(skb);
- } else {
- u16 seq_no;
- u8 meta_type;
-
-#if 0
- /* Access RSSI values here */
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("RSSI %d\n",
- ((WMI_DATA_HDR *) A_NETBUF_DATA(skb))->rssi));
-#endif
- /* Get the Power save state of the STA */
- if (ar->arNetworkType == AP_NETWORK) {
- sta_t *conn = NULL;
- u8 psState=0,prevPsState;
- ATH_MAC_HDR *datap=NULL;
- u16 offset;
-
- meta_type = WMI_DATA_HDR_GET_META(dhdr);
-
- psState = (((WMI_DATA_HDR *)A_NETBUF_DATA(skb))->info
- >> WMI_DATA_HDR_PS_SHIFT) & WMI_DATA_HDR_PS_MASK;
-
- offset = sizeof(WMI_DATA_HDR);
-
- switch (meta_type) {
- case 0:
- break;
- case WMI_META_VERSION_1:
- offset += sizeof(WMI_RX_META_V1);
- break;
- case WMI_META_VERSION_2:
- offset += sizeof(WMI_RX_META_V2);
- break;
- default:
- break;
- }
-
- datap = (ATH_MAC_HDR *)(A_NETBUF_DATA(skb)+offset);
- conn = ieee80211_find_conn(ar, datap->srcMac);
-
- if (conn) {
- /* if there is a change in PS state of the STA,
- * take appropriate steps.
- * 1. If Sleep-->Awake, flush the psq for the STA
- * Clear the PVB for the STA.
- * 2. If Awake-->Sleep, Starting queueing frames
- * the STA.
- */
- prevPsState = STA_IS_PWR_SLEEP(conn);
- if (psState) {
- STA_SET_PWR_SLEEP(conn);
- } else {
- STA_CLR_PWR_SLEEP(conn);
- }
-
- if (prevPsState ^ STA_IS_PWR_SLEEP(conn)) {
-
- if (!STA_IS_PWR_SLEEP(conn)) {
-
- A_MUTEX_LOCK(&conn->psqLock);
- while (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) {
- struct sk_buff *skb=NULL;
-
- skb = A_NETBUF_DEQUEUE(&conn->psq);
- A_MUTEX_UNLOCK(&conn->psqLock);
- ar6000_data_tx(skb,ar->arNetDev);
- A_MUTEX_LOCK(&conn->psqLock);
- }
- A_MUTEX_UNLOCK(&conn->psqLock);
- /* Clear the PVB for this STA */
- wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0);
- }
- }
- } else {
- /* This frame is from a STA that is not associated*/
- A_ASSERT(false);
- }
-
- /* Drop NULL data frames here */
- if((pPacket->ActualLength < minHdrLen) ||
- (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)) {
- A_NETBUF_FREE(skb);
- goto rx_done;
- }
- }
-
- is_amsdu = WMI_DATA_HDR_IS_AMSDU(dhdr) ? true : false;
- tid = WMI_DATA_HDR_GET_UP(dhdr);
- seq_no = WMI_DATA_HDR_GET_SEQNO(dhdr);
- meta_type = WMI_DATA_HDR_GET_META(dhdr);
- containsDot11Hdr = WMI_DATA_HDR_GET_DOT11(dhdr);
-
- wmi_data_hdr_remove(ar->arWmi, skb);
-
- switch (meta_type) {
- case WMI_META_VERSION_1:
- {
- WMI_RX_META_V1 *pMeta = (WMI_RX_META_V1 *)A_NETBUF_DATA(skb);
- A_PRINTF("META %d %d %d %d %x\n", pMeta->status, pMeta->rix, pMeta->rssi, pMeta->channel, pMeta->flags);
- A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V1));
- break;
- }
- case WMI_META_VERSION_2:
- {
- WMI_RX_META_V2 *pMeta = (WMI_RX_META_V2 *)A_NETBUF_DATA(skb);
- if(pMeta->csumFlags & 0x1){
- skb->ip_summed=CHECKSUM_COMPLETE;
- skb->csum=(pMeta->csum);
- }
- A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V2));
- break;
- }
- default:
- break;
- }
-
- A_ASSERT(status == 0);
-
- /* NWF: print the 802.11 hdr bytes */
- if(containsDot11Hdr) {
- status = wmi_dot11_hdr_remove(ar->arWmi,skb);
- } else if(!is_amsdu) {
- status = wmi_dot3_2_dix(skb);
- }
-
- if (status) {
- /* Drop frames that could not be processed (lack of memory, etc.) */
- A_NETBUF_FREE(skb);
- goto rx_done;
- }
-
- if ((ar->arNetDev->flags & IFF_UP) == IFF_UP) {
- if (ar->arNetworkType == AP_NETWORK) {
- struct sk_buff *skb1 = NULL;
- ATH_MAC_HDR *datap;
-
- datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb);
- if (IEEE80211_IS_MULTICAST(datap->dstMac)) {
- /* Bcast/Mcast frames should be sent to the OS
- * stack as well as on the air.
- */
- skb1 = skb_copy(skb,GFP_ATOMIC);
- } else {
- /* Search for a connected STA with dstMac as
- * the Mac address. If found send the frame to
- * it on the air else send the frame up the
- * stack
- */
- sta_t *conn = NULL;
- conn = ieee80211_find_conn(ar, datap->dstMac);
-
- if (conn && ar->intra_bss) {
- skb1 = skb;
- skb = NULL;
- } else if(conn && !ar->intra_bss) {
- A_NETBUF_FREE(skb);
- skb = NULL;
- }
- }
- if (skb1) {
- ar6000_data_tx(skb1, ar->arNetDev);
- }
- }
- }
- aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, is_amsdu, (void **)&skb);
- ar6000_deliver_frames_to_nw_stack((void *) ar->arNetDev, (void *)skb);
- }
- }
- } else {
- if (EPPING_ALIGNMENT_PAD > 0) {
- A_NETBUF_PULL(skb, EPPING_ALIGNMENT_PAD);
- }
- ar6000_deliver_frames_to_nw_stack((void *)ar->arNetDev, (void *)skb);
- }
-
-rx_done:
-
- return;
-}
-
-static void
-ar6000_deliver_frames_to_nw_stack(void *dev, void *osbuf)
-{
- struct sk_buff *skb = (struct sk_buff *)osbuf;
-
- if(skb) {
- skb->dev = dev;
- if ((skb->dev->flags & IFF_UP) == IFF_UP) {
-#ifdef CONFIG_PM
- ar6000_check_wow_status((struct ar6_softc *)ar6k_priv(dev), skb, false);
-#endif /* CONFIG_PM */
- skb->protocol = eth_type_trans(skb, skb->dev);
- /*
- * If this routine is called on a ISR (Hard IRQ) or DSR (Soft IRQ)
- * or tasklet use the netif_rx to deliver the packet to the stack
- * netif_rx will queue the packet onto the receive queue and mark
- * the softirq thread has a pending action to complete. Kernel will
- * schedule the softIrq kernel thread after processing the DSR.
- *
- * If this routine is called on a process context, use netif_rx_ni
- * which will schedle the softIrq kernel thread after queuing the packet.
- */
- if (in_interrupt()) {
- netif_rx(skb);
- } else {
- netif_rx_ni(skb);
- }
- } else {
- A_NETBUF_FREE(skb);
- }
- }
-}
-
-#if 0
-static void
-ar6000_deliver_frames_to_bt_stack(void *dev, void *osbuf)
-{
- struct sk_buff *skb = (struct sk_buff *)osbuf;
-
- if(skb) {
- skb->dev = dev;
- if ((skb->dev->flags & IFF_UP) == IFF_UP) {
- skb->protocol = htons(ETH_P_CONTROL);
- netif_rx(skb);
- } else {
- A_NETBUF_FREE(skb);
- }
- }
-}
-#endif
-
-static void
-ar6000_rx_refill(void *Context, HTC_ENDPOINT_ID Endpoint)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- void *osBuf;
- int RxBuffers;
- int buffersToRefill;
- struct htc_packet *pPacket;
- struct htc_packet_queue queue;
-
- buffersToRefill = (int)AR6000_MAX_RX_BUFFERS -
- HTCGetNumRecvBuffers(ar->arHtcTarget, Endpoint);
-
- if (buffersToRefill <= 0) {
- /* fast return, nothing to fill */
- return;
- }
-
- INIT_HTC_PACKET_QUEUE(&queue);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx_refill: providing htc with %d buffers at eid=%d\n",
- buffersToRefill, Endpoint));
-
- for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) {
- osBuf = A_NETBUF_ALLOC(AR6000_BUFFER_SIZE);
- if (NULL == osBuf) {
- break;
- }
- /* the HTC packet wrapper is at the head of the reserved area
- * in the skb */
- pPacket = (struct htc_packet *)(A_NETBUF_HEAD(osBuf));
- /* set re-fill info */
- SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_BUFFER_SIZE,Endpoint);
- /* add to queue */
- HTC_PACKET_ENQUEUE(&queue,pPacket);
- }
-
- if (!HTC_QUEUE_EMPTY(&queue)) {
- /* add packets */
- HTCAddReceivePktMultiple(ar->arHtcTarget, &queue);
- }
-
-}
-
- /* clean up our amsdu buffer list */
-static void ar6000_cleanup_amsdu_rxbufs(struct ar6_softc *ar)
-{
- struct htc_packet *pPacket;
- void *osBuf;
-
- /* empty AMSDU buffer queue and free OS bufs */
- while (true) {
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue);
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- if (NULL == pPacket) {
- break;
- }
-
- osBuf = pPacket->pPktContext;
- if (NULL == osBuf) {
- A_ASSERT(false);
- break;
- }
-
- A_NETBUF_FREE(osBuf);
- }
-
-}
-
-
- /* refill the amsdu buffer list */
-static void ar6000_refill_amsdu_rxbufs(struct ar6_softc *ar, int Count)
-{
- struct htc_packet *pPacket;
- void *osBuf;
-
- while (Count > 0) {
- osBuf = A_NETBUF_ALLOC(AR6000_AMSDU_BUFFER_SIZE);
- if (NULL == osBuf) {
- break;
- }
- /* the HTC packet wrapper is at the head of the reserved area
- * in the skb */
- pPacket = (struct htc_packet *)(A_NETBUF_HEAD(osBuf));
- /* set re-fill info */
- SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),AR6000_AMSDU_BUFFER_SIZE,0);
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- /* put it in the list */
- HTC_PACKET_ENQUEUE(&ar->amsdu_rx_buffer_queue,pPacket);
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- Count--;
- }
-
-}
-
- /* callback to allocate a large receive buffer for a pending packet. This function is called when
- * an HTC packet arrives whose length exceeds a threshold value
- *
- * We use a pre-allocated list of buffers of maximum AMSDU size (4K). Under linux it is more optimal to
- * keep the allocation size the same to optimize cached-slab allocations.
- *
- * */
-static struct htc_packet *ar6000_alloc_amsdu_rxbuf(void *Context, HTC_ENDPOINT_ID Endpoint, int Length)
-{
- struct htc_packet *pPacket = NULL;
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- int refillCount = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_alloc_amsdu_rxbuf: eid=%d, Length:%d\n",Endpoint,Length));
-
- do {
-
- if (Length <= AR6000_BUFFER_SIZE) {
- /* shouldn't be getting called on normal sized packets */
- A_ASSERT(false);
- break;
- }
-
- if (Length > AR6000_AMSDU_BUFFER_SIZE) {
- A_ASSERT(false);
- break;
- }
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- /* allocate a packet from the list */
- pPacket = HTC_PACKET_DEQUEUE(&ar->amsdu_rx_buffer_queue);
- /* see if we need to refill again */
- refillCount = AR6000_MAX_AMSDU_RX_BUFFERS - HTC_PACKET_QUEUE_DEPTH(&ar->amsdu_rx_buffer_queue);
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- if (NULL == pPacket) {
- break;
- }
- /* set actual endpoint ID */
- pPacket->Endpoint = Endpoint;
-
- } while (false);
-
- if (refillCount >= AR6000_AMSDU_REFILL_THRESHOLD) {
- ar6000_refill_amsdu_rxbufs(ar,refillCount);
- }
-
- return pPacket;
-}
-
-static void
-ar6000_set_multicast_list(struct net_device *dev)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000: Multicast filter not supported\n"));
-}
-
-static struct net_device_stats *
-ar6000_get_stats(struct net_device *dev)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
- return &ar->arNetStats;
-}
-
-void
-ar6000_ready_event(void *devt, u8 *datap, u8 phyCap, u32 sw_ver, u32 abi_ver)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
- struct net_device *dev = ar->arNetDev;
-
- memcpy(dev->dev_addr, datap, AR6000_ETH_ADDR_LEN);
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("mac address = %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- dev->dev_addr[0], dev->dev_addr[1],
- dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5]));
-
- ar->arPhyCapability = phyCap;
- ar->arVersion.wlan_ver = sw_ver;
- ar->arVersion.abi_ver = abi_ver;
-
- snprintf(ar->wdev->wiphy->fw_version, sizeof(ar->wdev->wiphy->fw_version),
- "%u:%u:%u:%u",
- (ar->arVersion.wlan_ver & 0xf0000000) >> 28,
- (ar->arVersion.wlan_ver & 0x0f000000) >> 24,
- (ar->arVersion.wlan_ver & 0x00ff0000) >> 16,
- (ar->arVersion.wlan_ver & 0x0000ffff));
-
- /* Indicate to the waiting thread that the ready event was received */
- ar->arWmiReady = true;
- wake_up(&arEvent);
-}
-
-void ar6000_install_static_wep_keys(struct ar6_softc *ar)
-{
- u8 index;
- u8 keyUsage;
-
- for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
- if (ar->arWepKeyList[index].arKeyLen) {
- keyUsage = GROUP_USAGE;
- if (index == ar->arDefTxKeyIndex) {
- keyUsage |= TX_USAGE;
- }
- wmi_addKey_cmd(ar->arWmi,
- index,
- WEP_CRYPT,
- keyUsage,
- ar->arWepKeyList[index].arKeyLen,
- NULL,
- ar->arWepKeyList[index].arKey, KEY_OP_INIT_VAL, NULL,
- NO_SYNC_WMIFLAG);
- }
- }
-}
-
-void
-add_new_sta(struct ar6_softc *ar, u8 *mac, u16 aid, u8 *wpaie,
- u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
-{
- u8 free_slot=aid-1;
-
- memcpy(ar->sta_list[free_slot].mac, mac, ATH_MAC_LEN);
- memcpy(ar->sta_list[free_slot].wpa_ie, wpaie, ielen);
- ar->sta_list[free_slot].aid = aid;
- ar->sta_list[free_slot].keymgmt = keymgmt;
- ar->sta_list[free_slot].ucipher = ucipher;
- ar->sta_list[free_slot].auth = auth;
- ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
- ar->arAPStats.sta[free_slot].aid = aid;
-}
-
-void
-ar6000_connect_event(struct ar6_softc *ar, u16 channel, u8 *bssid,
- u16 listenInterval, u16 beaconInterval,
- NETWORK_TYPE networkType, u8 beaconIeLen,
- u8 assocReqLen, u8 assocRespLen,
- u8 *assocInfo)
-{
- union iwreq_data wrqu;
- int i, beacon_ie_pos, assoc_resp_ie_pos, assoc_req_ie_pos;
- static const char *tag1 = "ASSOCINFO(ReqIEs=";
- static const char *tag2 = "ASSOCRESPIE=";
- static const char *beaconIetag = "BEACONIE=";
- char buf[WMI_CONTROL_MSG_MAX_LEN * 2 + strlen(tag1) + 1];
- char *pos;
- u8 key_op_ctrl;
- unsigned long flags;
- struct ieee80211req_key *ik;
- CRYPTO_TYPE keyType = NONE_CRYPT;
-
- if(ar->arNetworkType & AP_NETWORK) {
- struct net_device *dev = ar->arNetDev;
- if(memcmp(dev->dev_addr, bssid, ATH_MAC_LEN)==0) {
- ar->arACS = channel;
- ik = &ar->ap_mode_bkey;
-
- switch(ar->arAuthMode) {
- case NONE_AUTH:
- if(ar->arPairwiseCrypto == WEP_CRYPT) {
- ar6000_install_static_wep_keys(ar);
- }
-#ifdef WAPI_ENABLE
- else if(ar->arPairwiseCrypto == WAPI_CRYPT) {
- ap_set_wapi_key(ar, ik);
- }
-#endif
- break;
- case WPA_PSK_AUTH:
- case WPA2_PSK_AUTH:
- case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
- switch (ik->ik_type) {
- case IEEE80211_CIPHER_TKIP:
- keyType = TKIP_CRYPT;
- break;
- case IEEE80211_CIPHER_AES_CCM:
- keyType = AES_CRYPT;
- break;
- default:
- goto skip_key;
- }
- wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, keyType, GROUP_USAGE,
- ik->ik_keylen, (u8 *)&ik->ik_keyrsc,
- ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr,
- SYNC_BOTH_WMIFLAG);
-
- break;
- }
-skip_key:
- ar->arConnected = true;
- return;
- }
-
- A_PRINTF("NEW STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n "
- " AID=%d \n", bssid[0], bssid[1], bssid[2],
- bssid[3], bssid[4], bssid[5], channel);
- switch ((listenInterval>>8)&0xFF) {
- case OPEN_AUTH:
- A_PRINTF("AUTH: OPEN\n");
- break;
- case SHARED_AUTH:
- A_PRINTF("AUTH: SHARED\n");
- break;
- default:
- A_PRINTF("AUTH: Unknown\n");
- break;
- }
- switch (listenInterval&0xFF) {
- case WPA_PSK_AUTH:
- A_PRINTF("KeyMgmt: WPA-PSK\n");
- break;
- case WPA2_PSK_AUTH:
- A_PRINTF("KeyMgmt: WPA2-PSK\n");
- break;
- default:
- A_PRINTF("KeyMgmt: NONE\n");
- break;
- }
- switch (beaconInterval) {
- case AES_CRYPT:
- A_PRINTF("Cipher: AES\n");
- break;
- case TKIP_CRYPT:
- A_PRINTF("Cipher: TKIP\n");
- break;
- case WEP_CRYPT:
- A_PRINTF("Cipher: WEP\n");
- break;
-#ifdef WAPI_ENABLE
- case WAPI_CRYPT:
- A_PRINTF("Cipher: WAPI\n");
- break;
-#endif
- default:
- A_PRINTF("Cipher: NONE\n");
- break;
- }
-
- add_new_sta(ar, bssid, channel /*aid*/,
- assocInfo /* WPA IE */, assocRespLen /* IE len */,
- listenInterval&0xFF /* Keymgmt */, beaconInterval /* cipher */,
- (listenInterval>>8)&0xFF /* auth alg */);
-
- /* Send event to application */
- A_MEMZERO(&wrqu, sizeof(wrqu));
- memcpy(wrqu.addr.sa_data, bssid, ATH_MAC_LEN);
- wireless_send_event(ar->arNetDev, IWEVREGISTERED, &wrqu, NULL);
- /* In case the queue is stopped when we switch modes, this will
- * wake it up
- */
- netif_wake_queue(ar->arNetDev);
- return;
- }
-
- ar6k_cfg80211_connect_event(ar, channel, bssid,
- listenInterval, beaconInterval,
- networkType, beaconIeLen,
- assocReqLen, assocRespLen,
- assocInfo);
-
- memcpy(ar->arBssid, bssid, sizeof(ar->arBssid));
- ar->arBssChannel = channel;
-
- A_PRINTF("AR6000 connected event on freq %d ", channel);
- A_PRINTF("with bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- " listenInterval=%d, beaconInterval = %d, beaconIeLen = %d assocReqLen=%d"
- " assocRespLen =%d\n",
- bssid[0], bssid[1], bssid[2],
- bssid[3], bssid[4], bssid[5],
- listenInterval, beaconInterval,
- beaconIeLen, assocReqLen, assocRespLen);
- if (networkType & ADHOC_NETWORK) {
- if (networkType & ADHOC_CREATOR) {
- A_PRINTF("Network: Adhoc (Creator)\n");
- } else {
- A_PRINTF("Network: Adhoc (Joiner)\n");
- }
- } else {
- A_PRINTF("Network: Infrastructure\n");
- }
-
- if ((ar->arNetworkType == INFRA_NETWORK)) {
- wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB);
- }
-
- if (beaconIeLen && (sizeof(buf) > (9 + beaconIeLen * 2))) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nBeaconIEs= "));
-
- beacon_ie_pos = 0;
- A_MEMZERO(buf, sizeof(buf));
- sprintf(buf, "%s", beaconIetag);
- pos = buf + 9;
- for (i = beacon_ie_pos; i < beacon_ie_pos + beaconIeLen; i++) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
- sprintf(pos, "%2.2x", assocInfo[i]);
- pos += 2;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
-
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
- }
-
- if (assocRespLen && (sizeof(buf) > (12 + (assocRespLen * 2))))
- {
- assoc_resp_ie_pos = beaconIeLen + assocReqLen +
- sizeof(u16) + /* capinfo*/
- sizeof(u16) + /* status Code */
- sizeof(u16) ; /* associd */
- A_MEMZERO(buf, sizeof(buf));
- sprintf(buf, "%s", tag2);
- pos = buf + 12;
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocRespIEs= "));
- /*
- * The Association Response Frame w.o. the WLAN header is delivered to
- * the host, so skip over to the IEs
- */
- for (i = assoc_resp_ie_pos; i < assoc_resp_ie_pos + assocRespLen - 6; i++)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
- sprintf(pos, "%2.2x", assocInfo[i]);
- pos += 2;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
-
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
- }
-
- if (assocReqLen && (sizeof(buf) > (17 + (assocReqLen * 2)))) {
- /*
- * assoc Request includes capability and listen interval. Skip these.
- */
- assoc_req_ie_pos = beaconIeLen +
- sizeof(u16) + /* capinfo*/
- sizeof(u16); /* listen interval */
-
- A_MEMZERO(buf, sizeof(buf));
- sprintf(buf, "%s", tag1);
- pos = buf + 17;
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("AssocReqIEs= "));
- for (i = assoc_req_ie_pos; i < assoc_req_ie_pos + assocReqLen - 4; i++) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
- sprintf(pos, "%2.2x", assocInfo[i]);
- pos += 2;
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
-
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
- }
-
- if (ar->user_savedkeys_stat == USER_SAVEDKEYS_STAT_RUN &&
- ar->user_saved_keys.keyOk == true)
- {
- key_op_ctrl = KEY_OP_VALID_MASK & ~KEY_OP_INIT_TSC;
-
- if (ar->user_key_ctrl & AR6000_USER_SETKEYS_RSC_UNCHANGED) {
- key_op_ctrl &= ~KEY_OP_INIT_RSC;
- } else {
- key_op_ctrl |= KEY_OP_INIT_RSC;
- }
- ar6000_reinstall_keys(ar, key_op_ctrl);
- }
-
- netif_wake_queue(ar->arNetDev);
-
- /* Update connect & link status atomically */
- spin_lock_irqsave(&ar->arLock, flags);
- ar->arConnected = true;
- ar->arConnectPending = false;
- netif_carrier_on(ar->arNetDev);
- spin_unlock_irqrestore(&ar->arLock, flags);
- /* reset the rx aggr state */
- aggr_reset_state(ar->aggr_cntxt);
- reconnect_flag = 0;
-
- A_MEMZERO(&wrqu, sizeof(wrqu));
- memcpy(wrqu.addr.sa_data, bssid, IEEE80211_ADDR_LEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL);
- if ((ar->arNetworkType == ADHOC_NETWORK) && ar->arIbssPsEnable) {
- A_MEMZERO(ar->arNodeMap, sizeof(ar->arNodeMap));
- ar->arNodeNum = 0;
- ar->arNexEpId = ENDPOINT_2;
- }
- if (!ar->arUserBssFilter) {
- wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
- }
-
-}
-
-void ar6000_set_numdataendpts(struct ar6_softc *ar, u32 num)
-{
- A_ASSERT(num <= (HTC_MAILBOX_NUM_MAX - 1));
- ar->arNumDataEndPts = num;
-}
-
-void
-sta_cleanup(struct ar6_softc *ar, u8 i)
-{
- struct sk_buff *skb;
-
- /* empty the queued pkts in the PS queue if any */
- A_MUTEX_LOCK(&ar->sta_list[i].psqLock);
- while (!A_NETBUF_QUEUE_EMPTY(&ar->sta_list[i].psq)) {
- skb = A_NETBUF_DEQUEUE(&ar->sta_list[i].psq);
- A_NETBUF_FREE(skb);
- }
- A_MUTEX_UNLOCK(&ar->sta_list[i].psqLock);
-
- /* Zero out the state fields */
- A_MEMZERO(&ar->arAPStats.sta[ar->sta_list[i].aid-1], sizeof(WMI_PER_STA_STAT));
- A_MEMZERO(&ar->sta_list[i].mac, ATH_MAC_LEN);
- A_MEMZERO(&ar->sta_list[i].wpa_ie, IEEE80211_MAX_IE);
- ar->sta_list[i].aid = 0;
- ar->sta_list[i].flags = 0;
-
- ar->sta_list_index = ar->sta_list_index & ~(1 << i);
-
-}
-
-u8 remove_sta(struct ar6_softc *ar, u8 *mac, u16 reason)
-{
- u8 i, removed=0;
-
- if(IS_MAC_NULL(mac)) {
- return removed;
- }
-
- if(IS_MAC_BCAST(mac)) {
- A_PRINTF("DEL ALL STA\n");
- for(i=0; i < AP_MAX_NUM_STA; i++) {
- if(!IS_MAC_NULL(ar->sta_list[i].mac)) {
- sta_cleanup(ar, i);
- removed = 1;
- }
- }
- } else {
- for(i=0; i < AP_MAX_NUM_STA; i++) {
- if(memcmp(ar->sta_list[i].mac, mac, ATH_MAC_LEN)==0) {
- A_PRINTF("DEL STA %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- " aid=%d REASON=%d\n", mac[0], mac[1], mac[2],
- mac[3], mac[4], mac[5], ar->sta_list[i].aid, reason);
-
- sta_cleanup(ar, i);
- removed = 1;
- break;
- }
- }
- }
- return removed;
-}
-
-void
-ar6000_disconnect_event(struct ar6_softc *ar, u8 reason, u8 *bssid,
- u8 assocRespLen, u8 *assocInfo, u16 protocolReasonStatus)
-{
- u8 i;
- unsigned long flags;
- union iwreq_data wrqu;
-
- if(ar->arNetworkType & AP_NETWORK) {
- union iwreq_data wrqu;
- struct sk_buff *skb;
-
- if(!remove_sta(ar, bssid, protocolReasonStatus)) {
- return;
- }
-
- /* If there are no more associated STAs, empty the mcast PS q */
- if (ar->sta_list_index == 0) {
- A_MUTEX_LOCK(&ar->mcastpsqLock);
- while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
- skb = A_NETBUF_DEQUEUE(&ar->mcastpsq);
- A_NETBUF_FREE(skb);
- }
- A_MUTEX_UNLOCK(&ar->mcastpsqLock);
-
- /* Clear the LSB of the BitMapCtl field of the TIM IE */
- if (ar->arWmiReady) {
- wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0);
- }
- }
-
- if(!IS_MAC_BCAST(bssid)) {
- /* Send event to application */
- A_MEMZERO(&wrqu, sizeof(wrqu));
- memcpy(wrqu.addr.sa_data, bssid, ATH_MAC_LEN);
- wireless_send_event(ar->arNetDev, IWEVEXPIRED, &wrqu, NULL);
- }
-
- ar->arConnected = false;
- return;
- }
-
- ar6k_cfg80211_disconnect_event(ar, reason, bssid,
- assocRespLen, assocInfo,
- protocolReasonStatus);
-
- /* Send disconnect event to supplicant */
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wrqu.addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(ar->arNetDev, SIOCGIWAP, &wrqu, NULL);
-
- /* it is necessary to clear the host-side rx aggregation state */
- aggr_reset_state(ar->aggr_cntxt);
-
- A_UNTIMEOUT(&ar->disconnect_timer);
-
- A_PRINTF("AR6000 disconnected");
- if (bssid[0] || bssid[1] || bssid[2] || bssid[3] || bssid[4] || bssid[5]) {
- A_PRINTF(" from %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",
- bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nDisconnect Reason is %d", reason));
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nProtocol Reason/Status Code is %d", protocolReasonStatus));
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\nAssocResp Frame = %s",
- assocRespLen ? " " : "NULL"));
- for (i = 0; i < assocRespLen; i++) {
- if (!(i % 0x10)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
- /*
- * If the event is due to disconnect cmd from the host, only they the target
- * would stop trying to connect. Under any other condition, target would
- * keep trying to connect.
- *
- */
- if( reason == DISCONNECT_CMD)
- {
- if ((!ar->arUserBssFilter) && (ar->arWmiReady)) {
- wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
- }
- } else {
- ar->arConnectPending = true;
- if (((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x11)) ||
- ((reason == ASSOC_FAILED) && (protocolReasonStatus == 0x0) && (reconnect_flag == 1))) {
- ar->arConnected = true;
- return;
- }
- }
-
- if ((reason == NO_NETWORK_AVAIL) && (ar->arWmiReady))
- {
- bss_t *pWmiSsidnode = NULL;
-
- /* remove the current associated bssid node */
- wmi_free_node (ar->arWmi, bssid);
-
- /*
- * In case any other same SSID nodes are present
- * remove it, since those nodes also not available now
- */
- do
- {
- /*
- * Find the nodes based on SSID and remove it
- * NOTE :: This case will not work out for Hidden-SSID
- */
- pWmiSsidnode = wmi_find_Ssidnode (ar->arWmi, ar->arSsid, ar->arSsidLen, false, true);
-
- if (pWmiSsidnode)
- {
- wmi_free_node (ar->arWmi, pWmiSsidnode->ni_macaddr);
- }
-
- } while (pWmiSsidnode);
- }
-
- /* Update connect & link status atomically */
- spin_lock_irqsave(&ar->arLock, flags);
- ar->arConnected = false;
- netif_carrier_off(ar->arNetDev);
- spin_unlock_irqrestore(&ar->arLock, flags);
-
- if( (reason != CSERV_DISCONNECT) || (reconnect_flag != 1) ) {
- reconnect_flag = 0;
- }
-
- if (reason != CSERV_DISCONNECT)
- {
- ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT;
- ar->user_key_ctrl = 0;
- }
-
- netif_stop_queue(ar->arNetDev);
- A_MEMZERO(ar->arBssid, sizeof(ar->arBssid));
- ar->arBssChannel = 0;
- ar->arBeaconInterval = 0;
-
- ar6000_TxDataCleanup(ar);
-}
-
-void
-ar6000_regDomain_event(struct ar6_softc *ar, u32 regCode)
-{
- A_PRINTF("AR6000 Reg Code = 0x%x\n", regCode);
- ar->arRegCode = regCode;
-}
-
-void
-ar6000_aggr_rcv_addba_req_evt(struct ar6_softc *ar, WMI_ADDBA_REQ_EVENT *evt)
-{
- if(evt->status == 0) {
- aggr_recv_addba_req_evt(ar->aggr_cntxt, evt->tid, evt->st_seq_no, evt->win_sz);
- }
-}
-
-void
-ar6000_aggr_rcv_addba_resp_evt(struct ar6_softc *ar, WMI_ADDBA_RESP_EVENT *evt)
-{
- A_PRINTF("ADDBA RESP. tid %d status %d, sz %d\n", evt->tid, evt->status, evt->amsdu_sz);
- if(evt->status == 0) {
- }
-}
-
-void
-ar6000_aggr_rcv_delba_req_evt(struct ar6_softc *ar, WMI_DELBA_EVENT *evt)
-{
- aggr_recv_delba_req_evt(ar->aggr_cntxt, evt->tid);
-}
-
-void register_pal_cb(ar6k_pal_config_t *palConfig_p)
-{
- ar6k_pal_config_g = *palConfig_p;
-}
-
-void
-ar6000_hci_event_rcv_evt(struct ar6_softc *ar, WMI_HCI_EVENT *cmd)
-{
- void *osbuf = NULL;
- s8 i;
- u8 size, *buf;
- int ret = 0;
-
- size = cmd->evt_buf_sz + 4;
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- ret = A_NO_MEMORY;
- A_PRINTF("Error in allocating netbuf \n");
- return;
- }
-
- A_NETBUF_PUT(osbuf, size);
- buf = (u8 *)A_NETBUF_DATA(osbuf);
- /* First 2-bytes carry HCI event/ACL data type
- * the next 2 are free
- */
- *((short *)buf) = WMI_HCI_EVENT_EVENTID;
- buf += sizeof(int);
- memcpy(buf, cmd->buf, cmd->evt_buf_sz);
-
- ar6000_deliver_frames_to_nw_stack(ar->arNetDev, osbuf);
- if(loghci) {
- A_PRINTF_LOG("HCI Event From PAL <-- \n");
- for(i = 0; i < cmd->evt_buf_sz; i++) {
- A_PRINTF_LOG("0x%02x ", cmd->buf[i]);
- if((i % 10) == 0) {
- A_PRINTF_LOG("\n");
- }
- }
- A_PRINTF_LOG("\n");
- A_PRINTF_LOG("==================================\n");
- }
-}
-
-void
-ar6000_neighborReport_event(struct ar6_softc *ar, int numAps, WMI_NEIGHBOR_INFO *info)
-{
-#if WIRELESS_EXT >= 18
- struct iw_pmkid_cand *pmkcand;
-#else /* WIRELESS_EXT >= 18 */
- static const char *tag = "PRE-AUTH";
- char buf[128];
-#endif /* WIRELESS_EXT >= 18 */
-
- union iwreq_data wrqu;
- int i;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("AR6000 Neighbor Report Event\n"));
- for (i=0; i < numAps; info++, i++) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",
- info->bssid[0], info->bssid[1], info->bssid[2],
- info->bssid[3], info->bssid[4], info->bssid[5]));
- if (info->bssFlags & WMI_PREAUTH_CAPABLE_BSS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("preauth-cap"));
- }
- if (info->bssFlags & WMI_PMKID_VALID_BSS) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,(" pmkid-valid\n"));
- continue; /* we skip bss if the pmkid is already valid */
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,("\n"));
- A_MEMZERO(&wrqu, sizeof(wrqu));
-#if WIRELESS_EXT >= 18
- pmkcand = A_MALLOC_NOWAIT(sizeof(struct iw_pmkid_cand));
- A_MEMZERO(pmkcand, sizeof(struct iw_pmkid_cand));
- pmkcand->index = i;
- pmkcand->flags = info->bssFlags;
- memcpy(pmkcand->bssid.sa_data, info->bssid, ATH_MAC_LEN);
- wrqu.data.length = sizeof(struct iw_pmkid_cand);
- wireless_send_event(ar->arNetDev, IWEVPMKIDCAND, &wrqu, (char *)pmkcand);
- kfree(pmkcand);
-#else /* WIRELESS_EXT >= 18 */
- snprintf(buf, sizeof(buf), "%s%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x",
- tag,
- info->bssid[0], info->bssid[1], info->bssid[2],
- info->bssid[3], info->bssid[4], info->bssid[5],
- i, info->bssFlags);
- wrqu.data.length = strlen(buf);
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
-#endif /* WIRELESS_EXT >= 18 */
- }
-}
-
-void
-ar6000_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast)
-{
- static const char *tag = "MLME-MICHAELMICFAILURE.indication";
- char buf[128];
- union iwreq_data wrqu;
-
- /*
- * For AP case, keyid will have aid of STA which sent pkt with
- * MIC error. Use this aid to get MAC & send it to hostapd.
- */
- if (ar->arNetworkType == AP_NETWORK) {
- sta_t *s = ieee80211_find_conn_for_aid(ar, (keyid >> 2));
- if(!s){
- A_PRINTF("AP TKIP MIC error received from Invalid aid / STA not found =%d\n", keyid);
- return;
- }
- A_PRINTF("AP TKIP MIC error received from aid=%d\n", keyid);
- snprintf(buf,sizeof(buf), "%s addr=%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
- tag, s->mac[0],s->mac[1],s->mac[2],s->mac[3],s->mac[4],s->mac[5]);
- } else {
-
- ar6k_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
-
- A_PRINTF("AR6000 TKIP MIC error received for keyid %d %scast\n",
- keyid & 0x3, ismcast ? "multi": "uni");
- snprintf(buf, sizeof(buf), "%s(keyid=%d %sicast)", tag, keyid & 0x3,
- ismcast ? "mult" : "un");
- }
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
-}
-
-void
-ar6000_scanComplete_event(struct ar6_softc *ar, int status)
-{
-
- ar6k_cfg80211_scanComplete_event(ar, status);
-
- if (!ar->arUserBssFilter) {
- wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
- }
- if (ar->scan_triggered) {
- if (status== 0) {
- union iwreq_data wrqu;
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wireless_send_event(ar->arNetDev, SIOCGIWSCAN, &wrqu, NULL);
- }
- ar->scan_triggered = 0;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_SCAN,( "AR6000 scan complete: %d\n", status));
-}
-
-void
-ar6000_targetStats_event(struct ar6_softc *ar, u8 *ptr, u32 len)
-{
- u8 ac;
-
- if(ar->arNetworkType == AP_NETWORK) {
- WMI_AP_MODE_STAT *p = (WMI_AP_MODE_STAT *)ptr;
- WMI_AP_MODE_STAT *ap = &ar->arAPStats;
-
- if (len < sizeof(*p)) {
- return;
- }
-
- for(ac=0;ac<AP_MAX_NUM_STA;ac++) {
- ap->sta[ac].tx_bytes += p->sta[ac].tx_bytes;
- ap->sta[ac].tx_pkts += p->sta[ac].tx_pkts;
- ap->sta[ac].tx_error += p->sta[ac].tx_error;
- ap->sta[ac].tx_discard += p->sta[ac].tx_discard;
- ap->sta[ac].rx_bytes += p->sta[ac].rx_bytes;
- ap->sta[ac].rx_pkts += p->sta[ac].rx_pkts;
- ap->sta[ac].rx_error += p->sta[ac].rx_error;
- ap->sta[ac].rx_discard += p->sta[ac].rx_discard;
- }
-
- } else {
- WMI_TARGET_STATS *pTarget = (WMI_TARGET_STATS *)ptr;
- TARGET_STATS *pStats = &ar->arTargetStats;
-
- if (len < sizeof(*pTarget)) {
- return;
- }
-
- // Update the RSSI of the connected bss.
- if (ar->arConnected) {
- bss_t *pConnBss = NULL;
-
- pConnBss = wmi_find_node(ar->arWmi,ar->arBssid);
- if (pConnBss)
- {
- pConnBss->ni_rssi = pTarget->cservStats.cs_aveBeacon_rssi;
- pConnBss->ni_snr = pTarget->cservStats.cs_aveBeacon_snr;
- wmi_node_return(ar->arWmi, pConnBss);
- }
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 updating target stats\n"));
- pStats->tx_packets += pTarget->txrxStats.tx_stats.tx_packets;
- pStats->tx_bytes += pTarget->txrxStats.tx_stats.tx_bytes;
- pStats->tx_unicast_pkts += pTarget->txrxStats.tx_stats.tx_unicast_pkts;
- pStats->tx_unicast_bytes += pTarget->txrxStats.tx_stats.tx_unicast_bytes;
- pStats->tx_multicast_pkts += pTarget->txrxStats.tx_stats.tx_multicast_pkts;
- pStats->tx_multicast_bytes += pTarget->txrxStats.tx_stats.tx_multicast_bytes;
- pStats->tx_broadcast_pkts += pTarget->txrxStats.tx_stats.tx_broadcast_pkts;
- pStats->tx_broadcast_bytes += pTarget->txrxStats.tx_stats.tx_broadcast_bytes;
- pStats->tx_rts_success_cnt += pTarget->txrxStats.tx_stats.tx_rts_success_cnt;
- for(ac = 0; ac < WMM_NUM_AC; ac++)
- pStats->tx_packet_per_ac[ac] += pTarget->txrxStats.tx_stats.tx_packet_per_ac[ac];
- pStats->tx_errors += pTarget->txrxStats.tx_stats.tx_errors;
- pStats->tx_failed_cnt += pTarget->txrxStats.tx_stats.tx_failed_cnt;
- pStats->tx_retry_cnt += pTarget->txrxStats.tx_stats.tx_retry_cnt;
- pStats->tx_mult_retry_cnt += pTarget->txrxStats.tx_stats.tx_mult_retry_cnt;
- pStats->tx_rts_fail_cnt += pTarget->txrxStats.tx_stats.tx_rts_fail_cnt;
- pStats->tx_unicast_rate = wmi_get_rate(pTarget->txrxStats.tx_stats.tx_unicast_rate);
-
- pStats->rx_packets += pTarget->txrxStats.rx_stats.rx_packets;
- pStats->rx_bytes += pTarget->txrxStats.rx_stats.rx_bytes;
- pStats->rx_unicast_pkts += pTarget->txrxStats.rx_stats.rx_unicast_pkts;
- pStats->rx_unicast_bytes += pTarget->txrxStats.rx_stats.rx_unicast_bytes;
- pStats->rx_multicast_pkts += pTarget->txrxStats.rx_stats.rx_multicast_pkts;
- pStats->rx_multicast_bytes += pTarget->txrxStats.rx_stats.rx_multicast_bytes;
- pStats->rx_broadcast_pkts += pTarget->txrxStats.rx_stats.rx_broadcast_pkts;
- pStats->rx_broadcast_bytes += pTarget->txrxStats.rx_stats.rx_broadcast_bytes;
- pStats->rx_fragment_pkt += pTarget->txrxStats.rx_stats.rx_fragment_pkt;
- pStats->rx_errors += pTarget->txrxStats.rx_stats.rx_errors;
- pStats->rx_crcerr += pTarget->txrxStats.rx_stats.rx_crcerr;
- pStats->rx_key_cache_miss += pTarget->txrxStats.rx_stats.rx_key_cache_miss;
- pStats->rx_decrypt_err += pTarget->txrxStats.rx_stats.rx_decrypt_err;
- pStats->rx_duplicate_frames += pTarget->txrxStats.rx_stats.rx_duplicate_frames;
- pStats->rx_unicast_rate = wmi_get_rate(pTarget->txrxStats.rx_stats.rx_unicast_rate);
-
-
- pStats->tkip_local_mic_failure
- += pTarget->txrxStats.tkipCcmpStats.tkip_local_mic_failure;
- pStats->tkip_counter_measures_invoked
- += pTarget->txrxStats.tkipCcmpStats.tkip_counter_measures_invoked;
- pStats->tkip_replays += pTarget->txrxStats.tkipCcmpStats.tkip_replays;
- pStats->tkip_format_errors += pTarget->txrxStats.tkipCcmpStats.tkip_format_errors;
- pStats->ccmp_format_errors += pTarget->txrxStats.tkipCcmpStats.ccmp_format_errors;
- pStats->ccmp_replays += pTarget->txrxStats.tkipCcmpStats.ccmp_replays;
-
- pStats->power_save_failure_cnt += pTarget->pmStats.power_save_failure_cnt;
- pStats->noise_floor_calibation = pTarget->noise_floor_calibation;
-
- pStats->cs_bmiss_cnt += pTarget->cservStats.cs_bmiss_cnt;
- pStats->cs_lowRssi_cnt += pTarget->cservStats.cs_lowRssi_cnt;
- pStats->cs_connect_cnt += pTarget->cservStats.cs_connect_cnt;
- pStats->cs_disconnect_cnt += pTarget->cservStats.cs_disconnect_cnt;
- pStats->cs_aveBeacon_snr = pTarget->cservStats.cs_aveBeacon_snr;
- pStats->cs_aveBeacon_rssi = pTarget->cservStats.cs_aveBeacon_rssi;
-
- if (enablerssicompensation) {
- pStats->cs_aveBeacon_rssi =
- rssi_compensation_calc(ar, pStats->cs_aveBeacon_rssi);
- }
- pStats->cs_lastRoam_msec = pTarget->cservStats.cs_lastRoam_msec;
- pStats->cs_snr = pTarget->cservStats.cs_snr;
- pStats->cs_rssi = pTarget->cservStats.cs_rssi;
-
- pStats->lq_val = pTarget->lqVal;
-
- pStats->wow_num_pkts_dropped += pTarget->wowStats.wow_num_pkts_dropped;
- pStats->wow_num_host_pkt_wakeups += pTarget->wowStats.wow_num_host_pkt_wakeups;
- pStats->wow_num_host_event_wakeups += pTarget->wowStats.wow_num_host_event_wakeups;
- pStats->wow_num_events_discarded += pTarget->wowStats.wow_num_events_discarded;
- pStats->arp_received += pTarget->arpStats.arp_received;
- pStats->arp_matched += pTarget->arpStats.arp_matched;
- pStats->arp_replied += pTarget->arpStats.arp_replied;
-
- if (ar->statsUpdatePending) {
- ar->statsUpdatePending = false;
- wake_up(&arEvent);
- }
- }
-}
-
-void
-ar6000_rssiThreshold_event(struct ar6_softc *ar, WMI_RSSI_THRESHOLD_VAL newThreshold, s16 rssi)
-{
- USER_RSSI_THOLD userRssiThold;
-
- rssi = rssi + SIGNAL_QUALITY_NOISE_FLOOR;
-
- if (enablerssicompensation) {
- rssi = rssi_compensation_calc(ar, rssi);
- }
-
- /* Send an event to the app */
- userRssiThold.tag = ar->rssi_map[newThreshold].tag;
- userRssiThold.rssi = rssi;
- A_PRINTF("rssi Threshold range = %d tag = %d rssi = %d\n", newThreshold,
- userRssiThold.tag, userRssiThold.rssi);
-}
-
-
-void
-ar6000_hbChallengeResp_event(struct ar6_softc *ar, u32 cookie, u32 source)
-{
- if (source != APP_HB_CHALLENGE) {
- /* This would ignore the replys that come in after their due time */
- if (cookie == ar->arHBChallengeResp.seqNum) {
- ar->arHBChallengeResp.outstanding = false;
- }
- }
-}
-
-
-void
-ar6000_reportError_event(struct ar6_softc *ar, WMI_TARGET_ERROR_VAL errorVal)
-{
- static const char * const errString[] = {
- [WMI_TARGET_PM_ERR_FAIL] "WMI_TARGET_PM_ERR_FAIL",
- [WMI_TARGET_KEY_NOT_FOUND] "WMI_TARGET_KEY_NOT_FOUND",
- [WMI_TARGET_DECRYPTION_ERR] "WMI_TARGET_DECRYPTION_ERR",
- [WMI_TARGET_BMISS] "WMI_TARGET_BMISS",
- [WMI_PSDISABLE_NODE_JOIN] "WMI_PSDISABLE_NODE_JOIN"
- };
-
- A_PRINTF("AR6000 Error on Target. Error = 0x%x\n", errorVal);
-
- /* One error is reported at a time, and errorval is a bitmask */
- if(errorVal & (errorVal - 1))
- return;
-
- A_PRINTF("AR6000 Error type = ");
- switch(errorVal)
- {
- case WMI_TARGET_PM_ERR_FAIL:
- case WMI_TARGET_KEY_NOT_FOUND:
- case WMI_TARGET_DECRYPTION_ERR:
- case WMI_TARGET_BMISS:
- case WMI_PSDISABLE_NODE_JOIN:
- A_PRINTF("%s\n", errString[errorVal]);
- break;
- default:
- A_PRINTF("INVALID\n");
- break;
- }
-
-}
-
-
-void
-ar6000_cac_event(struct ar6_softc *ar, u8 ac, u8 cacIndication,
- u8 statusCode, u8 *tspecSuggestion)
-{
- WMM_TSPEC_IE *tspecIe;
-
- /*
- * This is the TSPEC IE suggestion from AP.
- * Suggestion provided by AP under some error
- * cases, could be helpful for the host app.
- * Check documentation.
- */
- tspecIe = (WMM_TSPEC_IE *)tspecSuggestion;
-
- /*
- * What do we do, if we get TSPEC rejection? One thought
- * that comes to mind is implictly delete the pstream...
- */
- A_PRINTF("AR6000 CAC notification. "
- "AC = %d, cacIndication = 0x%x, statusCode = 0x%x\n",
- ac, cacIndication, statusCode);
-}
-
-void
-ar6000_channel_change_event(struct ar6_softc *ar, u16 oldChannel,
- u16 newChannel)
-{
- A_PRINTF("Channel Change notification\nOld Channel: %d, New Channel: %d\n",
- oldChannel, newChannel);
-}
-
-#define AR6000_PRINT_BSSID(_pBss) do { \
- A_PRINTF("%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ",\
- (_pBss)[0],(_pBss)[1],(_pBss)[2],(_pBss)[3],\
- (_pBss)[4],(_pBss)[5]); \
-} while(0)
-
-void
-ar6000_roam_tbl_event(struct ar6_softc *ar, WMI_TARGET_ROAM_TBL *pTbl)
-{
- u8 i;
-
- A_PRINTF("ROAM TABLE NO OF ENTRIES is %d ROAM MODE is %d\n",
- pTbl->numEntries, pTbl->roamMode);
- for (i= 0; i < pTbl->numEntries; i++) {
- A_PRINTF("[%d]bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x ", i,
- pTbl->bssRoamInfo[i].bssid[0], pTbl->bssRoamInfo[i].bssid[1],
- pTbl->bssRoamInfo[i].bssid[2],
- pTbl->bssRoamInfo[i].bssid[3],
- pTbl->bssRoamInfo[i].bssid[4],
- pTbl->bssRoamInfo[i].bssid[5]);
- A_PRINTF("RSSI %d RSSIDT %d LAST RSSI %d UTIL %d ROAM_UTIL %d"
- " BIAS %d\n",
- pTbl->bssRoamInfo[i].rssi,
- pTbl->bssRoamInfo[i].rssidt,
- pTbl->bssRoamInfo[i].last_rssi,
- pTbl->bssRoamInfo[i].util,
- pTbl->bssRoamInfo[i].roam_util,
- pTbl->bssRoamInfo[i].bias);
- }
-}
-
-void
-ar6000_wow_list_event(struct ar6_softc *ar, u8 num_filters, WMI_GET_WOW_LIST_REPLY *wow_reply)
-{
- u8 i,j;
-
- /*Each event now contains exactly one filter, see bug 26613*/
- A_PRINTF("WOW pattern %d of %d patterns\n", wow_reply->this_filter_num, wow_reply->num_filters);
- A_PRINTF("wow mode = %s host mode = %s\n",
- (wow_reply->wow_mode == 0? "disabled":"enabled"),
- (wow_reply->host_mode == 1 ? "awake":"asleep"));
-
-
- /*If there are no patterns, the reply will only contain generic
- WoW information. Pattern information will exist only if there are
- patterns present. Bug 26716*/
-
- /* If this event contains pattern information, display it*/
- if (wow_reply->this_filter_num) {
- i=0;
- A_PRINTF("id=%d size=%d offset=%d\n",
- wow_reply->wow_filters[i].wow_filter_id,
- wow_reply->wow_filters[i].wow_filter_size,
- wow_reply->wow_filters[i].wow_filter_offset);
- A_PRINTF("wow pattern = ");
- for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) {
- A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_pattern[j]);
- }
-
- A_PRINTF("\nwow mask = ");
- for (j=0; j< wow_reply->wow_filters[i].wow_filter_size; j++) {
- A_PRINTF("%2.2x",wow_reply->wow_filters[i].wow_filter_mask[j]);
- }
- A_PRINTF("\n");
- }
-}
-
-/*
- * Report the Roaming related data collected on the target
- */
-void
-ar6000_display_roam_time(WMI_TARGET_ROAM_TIME *p)
-{
- A_PRINTF("Disconnect Data : BSSID: ");
- AR6000_PRINT_BSSID(p->disassoc_bssid);
- A_PRINTF(" RSSI %d DISASSOC Time %d NO_TXRX_TIME %d\n",
- p->disassoc_bss_rssi,p->disassoc_time,
- p->no_txrx_time);
- A_PRINTF("Connect Data: BSSID: ");
- AR6000_PRINT_BSSID(p->assoc_bssid);
- A_PRINTF(" RSSI %d ASSOC Time %d TXRX_TIME %d\n",
- p->assoc_bss_rssi,p->assoc_time,
- p->allow_txrx_time);
-}
-
-void
-ar6000_roam_data_event(struct ar6_softc *ar, WMI_TARGET_ROAM_DATA *p)
-{
- switch (p->roamDataType) {
- case ROAM_DATA_TIME:
- ar6000_display_roam_time(&p->u.roamTime);
- break;
- default:
- break;
- }
-}
-
-void
-ar6000_bssInfo_event_rx(struct ar6_softc *ar, u8 *datap, int len)
-{
- struct sk_buff *skb;
- WMI_BSS_INFO_HDR *bih = (WMI_BSS_INFO_HDR *)datap;
-
-
- if (!ar->arMgmtFilter) {
- return;
- }
- if (((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_BEACON) &&
- (bih->frameType != BEACON_FTYPE)) ||
- ((ar->arMgmtFilter & IEEE80211_FILTER_TYPE_PROBE_RESP) &&
- (bih->frameType != PROBERESP_FTYPE)))
- {
- return;
- }
-
- if ((skb = A_NETBUF_ALLOC_RAW(len)) != NULL) {
-
- A_NETBUF_PUT(skb, len);
- memcpy(A_NETBUF_DATA(skb), datap, len);
- skb->dev = ar->arNetDev;
- memcpy(skb_mac_header(skb), A_NETBUF_DATA(skb), 6);
- skb->ip_summed = CHECKSUM_NONE;
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = __constant_htons(0x0019);
- netif_rx(skb);
- }
-}
-
-u32 wmiSendCmdNum;
-
-int
-ar6000_control_tx(void *devt, void *osbuf, HTC_ENDPOINT_ID eid)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
- int status = 0;
- struct ar_cookie *cookie = NULL;
- int i;
-#ifdef CONFIG_PM
- if (ar->arWowState != WLAN_WOW_STATE_NONE) {
- A_NETBUF_FREE(osbuf);
- return A_EACCES;
- }
-#endif /* CONFIG_PM */
- /* take lock to protect ar6000_alloc_cookie() */
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- do {
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_TX,("ar_contrstatus = ol_tx: skb=0x%lx, len=0x%x eid =%d\n",
- (unsigned long)osbuf, A_NETBUF_LEN(osbuf), eid));
-
- if (ar->arWMIControlEpFull && (eid == ar->arControlEp)) {
- /* control endpoint is full, don't allocate resources, we
- * are just going to drop this packet */
- cookie = NULL;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" WMI Control EP full, dropping packet : 0x%lX, len:%d \n",
- (unsigned long)osbuf, A_NETBUF_LEN(osbuf)));
- } else {
- cookie = ar6000_alloc_cookie(ar);
- }
-
- if (cookie == NULL) {
- status = A_NO_MEMORY;
- break;
- }
-
- if(logWmiRawMsgs) {
- A_PRINTF("WMI cmd send, msgNo %d :", wmiSendCmdNum);
- for(i = 0; i < a_netbuf_to_len(osbuf); i++)
- A_PRINTF("%x ", ((u8 *)a_netbuf_to_data(osbuf))[i]);
- A_PRINTF("\n");
- }
-
- wmiSendCmdNum++;
-
- } while (false);
-
- if (cookie != NULL) {
- /* got a structure to send it out on */
- ar->arTxPending[eid]++;
-
- if (eid != ar->arControlEp) {
- ar->arTotalTxDataPending++;
- }
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- if (cookie != NULL) {
- cookie->arc_bp[0] = (unsigned long)osbuf;
- cookie->arc_bp[1] = 0;
- SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
- cookie,
- A_NETBUF_DATA(osbuf),
- A_NETBUF_LEN(osbuf),
- eid,
- AR6K_CONTROL_PKT_TAG);
- /* this interface is asynchronous, if there is an error, cleanup will happen in the
- * TX completion callback */
- HTCSendPkt(ar->arHtcTarget, &cookie->HtcPkt);
- status = 0;
- }
-
- if (status) {
- A_NETBUF_FREE(osbuf);
- }
- return status;
-}
-
-/* indicate tx activity or inactivity on a WMI stream */
-void ar6000_indicate_tx_activity(void *devt, u8 TrafficClass, bool Active)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
- HTC_ENDPOINT_ID eid ;
- int i;
-
- if (ar->arWmiEnabled) {
- eid = arAc2EndpointID(ar, TrafficClass);
-
- AR6000_SPIN_LOCK(&ar->arLock, 0);
-
- ar->arAcStreamActive[TrafficClass] = Active;
-
- if (Active) {
- /* when a stream goes active, keep track of the active stream with the highest priority */
-
- if (ar->arAcStreamPriMap[TrafficClass] > ar->arHiAcStreamActivePri) {
- /* set the new highest active priority */
- ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[TrafficClass];
- }
-
- } else {
- /* when a stream goes inactive, we may have to search for the next active stream
- * that is the highest priority */
-
- if (ar->arHiAcStreamActivePri == ar->arAcStreamPriMap[TrafficClass]) {
-
- /* the highest priority stream just went inactive */
-
- /* reset and search for the "next" highest "active" priority stream */
- ar->arHiAcStreamActivePri = 0;
- for (i = 0; i < WMM_NUM_AC; i++) {
- if (ar->arAcStreamActive[i]) {
- if (ar->arAcStreamPriMap[i] > ar->arHiAcStreamActivePri) {
- /* set the new highest active priority */
- ar->arHiAcStreamActivePri = ar->arAcStreamPriMap[i];
- }
- }
- }
- }
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- } else {
- /* for mbox ping testing, the traffic class is mapped directly as a stream ID,
- * see handling of AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE in ioctl.c
- * convert the stream ID to a endpoint */
- eid = arAc2EndpointID(ar, TrafficClass);
- }
-
- /* notify HTC, this may cause credit distribution changes */
-
- HTCIndicateActivityChange(ar->arHtcTarget,
- eid,
- Active);
-
-}
-
-void
-ar6000_btcoex_config_event(struct ar6_softc *ar, u8 *ptr, u32 len)
-{
-
- WMI_BTCOEX_CONFIG_EVENT *pBtcoexConfig = (WMI_BTCOEX_CONFIG_EVENT *)ptr;
- WMI_BTCOEX_CONFIG_EVENT *pArbtcoexConfig =&ar->arBtcoexConfig;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n"));
-
- A_PRINTF("received config event\n");
- pArbtcoexConfig->btProfileType = pBtcoexConfig->btProfileType;
- pArbtcoexConfig->linkId = pBtcoexConfig->linkId;
-
- switch (pBtcoexConfig->btProfileType) {
- case WMI_BTCOEX_BT_PROFILE_SCO:
- memcpy(&pArbtcoexConfig->info.scoConfigCmd, &pBtcoexConfig->info.scoConfigCmd,
- sizeof(WMI_SET_BTCOEX_SCO_CONFIG_CMD));
- break;
- case WMI_BTCOEX_BT_PROFILE_A2DP:
- memcpy(&pArbtcoexConfig->info.a2dpConfigCmd, &pBtcoexConfig->info.a2dpConfigCmd,
- sizeof(WMI_SET_BTCOEX_A2DP_CONFIG_CMD));
- break;
- case WMI_BTCOEX_BT_PROFILE_ACLCOEX:
- memcpy(&pArbtcoexConfig->info.aclcoexConfig, &pBtcoexConfig->info.aclcoexConfig,
- sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD));
- break;
- case WMI_BTCOEX_BT_PROFILE_INQUIRY_PAGE:
- memcpy(&pArbtcoexConfig->info.btinquiryPageConfigCmd, &pBtcoexConfig->info.btinquiryPageConfigCmd,
- sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD));
- break;
- }
- if (ar->statsUpdatePending) {
- ar->statsUpdatePending = false;
- wake_up(&arEvent);
- }
-}
-
-void
-ar6000_btcoex_stats_event(struct ar6_softc *ar, u8 *ptr, u32 len)
-{
- WMI_BTCOEX_STATS_EVENT *pBtcoexStats = (WMI_BTCOEX_STATS_EVENT *)ptr;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("AR6000 BTCOEX CONFIG EVENT \n"));
-
- memcpy(&ar->arBtcoexStats, pBtcoexStats, sizeof(WMI_BTCOEX_STATS_EVENT));
-
- if (ar->statsUpdatePending) {
- ar->statsUpdatePending = false;
- wake_up(&arEvent);
- }
-
-}
-module_init(ar6000_init_module);
-module_exit(ar6000_cleanup_module);
-
-/* Init cookie queue */
-static void
-ar6000_cookie_init(struct ar6_softc *ar)
-{
- u32 i;
-
- ar->arCookieList = NULL;
- ar->arCookieCount = 0;
-
- A_MEMZERO(s_ar_cookie_mem, sizeof(s_ar_cookie_mem));
-
- for (i = 0; i < MAX_COOKIE_NUM; i++) {
- ar6000_free_cookie(ar, &s_ar_cookie_mem[i]);
- }
-}
-
-/* cleanup cookie queue */
-static void
-ar6000_cookie_cleanup(struct ar6_softc *ar)
-{
- /* It is gone .... */
- ar->arCookieList = NULL;
- ar->arCookieCount = 0;
-}
-
-/* Init cookie queue */
-static void
-ar6000_free_cookie(struct ar6_softc *ar, struct ar_cookie * cookie)
-{
- /* Insert first */
- A_ASSERT(ar != NULL);
- A_ASSERT(cookie != NULL);
-
- cookie->arc_list_next = ar->arCookieList;
- ar->arCookieList = cookie;
- ar->arCookieCount++;
-}
-
-/* cleanup cookie queue */
-static struct ar_cookie *
-ar6000_alloc_cookie(struct ar6_softc *ar)
-{
- struct ar_cookie *cookie;
-
- cookie = ar->arCookieList;
- if(cookie != NULL)
- {
- ar->arCookieList = cookie->arc_list_next;
- ar->arCookieCount--;
- }
-
- return cookie;
-}
-
-void
-ar6000_tx_retry_err_event(void *devt)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Tx retries reach maximum!\n"));
-}
-
-void
-ar6000_snrThresholdEvent_rx(void *devt, WMI_SNR_THRESHOLD_VAL newThreshold, u8 snr)
-{
- WMI_SNR_THRESHOLD_EVENT event;
-
- event.range = newThreshold;
- event.snr = snr;
-}
-
-void
-ar6000_lqThresholdEvent_rx(void *devt, WMI_LQ_THRESHOLD_VAL newThreshold, u8 lq)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("lq threshold range %d, lq %d\n", newThreshold, lq));
-}
-
-
-
-u32 a_copy_to_user(void *to, const void *from, u32 n)
-{
- return(copy_to_user(to, from, n));
-}
-
-u32 a_copy_from_user(void *to, const void *from, u32 n)
-{
- return(copy_from_user(to, from, n));
-}
-
-
-int
-ar6000_get_driver_cfg(struct net_device *dev,
- u16 cfgParam,
- void *result)
-{
-
- int ret = 0;
-
- switch(cfgParam)
- {
- case AR6000_DRIVER_CFG_GET_WLANNODECACHING:
- *((u32 *)result) = wlanNodeCaching;
- break;
- case AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS:
- *((u32 *)result) = logWmiRawMsgs;
- break;
- default:
- ret = EINVAL;
- break;
- }
-
- return ret;
-}
-
-void
-ar6000_keepalive_rx(void *devt, u8 configured)
-{
- struct ar6_softc *ar = (struct ar6_softc *)devt;
-
- ar->arKeepaliveConfigured = configured;
- wake_up(&arEvent);
-}
-
-void
-ar6000_pmkid_list_event(void *devt, u8 numPMKID, WMI_PMKID *pmkidList,
- u8 *bssidList)
-{
- u8 i, j;
-
- A_PRINTF("Number of Cached PMKIDs is %d\n", numPMKID);
-
- for (i = 0; i < numPMKID; i++) {
- A_PRINTF("\nBSSID %d ", i);
- for (j = 0; j < ATH_MAC_LEN; j++) {
- A_PRINTF("%2.2x", bssidList[j]);
- }
- bssidList += (ATH_MAC_LEN + WMI_PMKID_LEN);
- A_PRINTF("\nPMKID %d ", i);
- for (j = 0; j < WMI_PMKID_LEN; j++) {
- A_PRINTF("%2.2x", pmkidList->pmkid[j]);
- }
- pmkidList = (WMI_PMKID *)((u8 *)pmkidList + ATH_MAC_LEN +
- WMI_PMKID_LEN);
- }
-}
-
-void ar6000_pspoll_event(struct ar6_softc *ar,u8 aid)
-{
- sta_t *conn=NULL;
- bool isPsqEmpty = false;
-
- conn = ieee80211_find_conn_for_aid(ar, aid);
-
- /* If the PS q for this STA is not empty, dequeue and send a pkt from
- * the head of the q. Also update the More data bit in the WMI_DATA_HDR
- * if there are more pkts for this STA in the PS q. If there are no more
- * pkts for this STA, update the PVB for this STA.
- */
- A_MUTEX_LOCK(&conn->psqLock);
- isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq);
- A_MUTEX_UNLOCK(&conn->psqLock);
-
- if (isPsqEmpty) {
- /* TODO:No buffered pkts for this STA. Send out a NULL data frame */
- } else {
- struct sk_buff *skb = NULL;
-
- A_MUTEX_LOCK(&conn->psqLock);
- skb = A_NETBUF_DEQUEUE(&conn->psq);
- A_MUTEX_UNLOCK(&conn->psqLock);
- /* Set the STA flag to PSPolled, so that the frame will go out */
- STA_SET_PS_POLLED(conn);
- ar6000_data_tx(skb, ar->arNetDev);
- STA_CLR_PS_POLLED(conn);
-
- /* Clear the PVB for this STA if the queue has become empty */
- A_MUTEX_LOCK(&conn->psqLock);
- isPsqEmpty = A_NETBUF_QUEUE_EMPTY(&conn->psq);
- A_MUTEX_UNLOCK(&conn->psqLock);
-
- if (isPsqEmpty) {
- wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0);
- }
- }
-}
-
-void ar6000_dtimexpiry_event(struct ar6_softc *ar)
-{
- bool isMcastQueued = false;
- struct sk_buff *skb = NULL;
-
- /* If there are no associated STAs, ignore the DTIM expiry event.
- * There can be potential race conditions where the last associated
- * STA may disconnect & before the host could clear the 'Indicate DTIM'
- * request to the firmware, the firmware would have just indicated a DTIM
- * expiry event. The race is between 'clear DTIM expiry cmd' going
- * from the host to the firmware & the DTIM expiry event happening from
- * the firmware to the host.
- */
- if (ar->sta_list_index == 0) {
- return;
- }
-
- A_MUTEX_LOCK(&ar->mcastpsqLock);
- isMcastQueued = A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq);
- A_MUTEX_UNLOCK(&ar->mcastpsqLock);
-
- A_ASSERT(isMcastQueued == false);
-
- /* Flush the mcast psq to the target */
- /* Set the STA flag to DTIMExpired, so that the frame will go out */
- ar->DTIMExpired = true;
-
- A_MUTEX_LOCK(&ar->mcastpsqLock);
- while (!A_NETBUF_QUEUE_EMPTY(&ar->mcastpsq)) {
- skb = A_NETBUF_DEQUEUE(&ar->mcastpsq);
- A_MUTEX_UNLOCK(&ar->mcastpsqLock);
-
- ar6000_data_tx(skb, ar->arNetDev);
-
- A_MUTEX_LOCK(&ar->mcastpsqLock);
- }
- A_MUTEX_UNLOCK(&ar->mcastpsqLock);
-
- /* Reset the DTIMExpired flag back to 0 */
- ar->DTIMExpired = false;
-
- /* Clear the LSB of the BitMapCtl field of the TIM IE */
- wmi_set_pvb_cmd(ar->arWmi, MCAST_AID, 0);
-}
-
-void
-read_rssi_compensation_param(struct ar6_softc *ar)
-{
- u8 *cust_data_ptr;
-
-//#define RSSICOMPENSATION_PRINT
-
-#ifdef RSSICOMPENSATION_PRINT
- s16 i;
- cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType);
- for (i=0; i<16; i++) {
- A_PRINTF("cust_data_%d = %x \n", i, *(u8 *)cust_data_ptr);
- cust_data_ptr += 1;
- }
-#endif
-
- cust_data_ptr = ar6000_get_cust_data_buffer(ar->arTargetType);
-
- rssi_compensation_param.customerID = *(u16 *)cust_data_ptr & 0xffff;
- rssi_compensation_param.enable = *(u16 *)(cust_data_ptr+2) & 0xffff;
- rssi_compensation_param.bg_param_a = *(u16 *)(cust_data_ptr+4) & 0xffff;
- rssi_compensation_param.bg_param_b = *(u16 *)(cust_data_ptr+6) & 0xffff;
- rssi_compensation_param.a_param_a = *(u16 *)(cust_data_ptr+8) & 0xffff;
- rssi_compensation_param.a_param_b = *(u16 *)(cust_data_ptr+10) &0xffff;
- rssi_compensation_param.reserved = *(u32 *)(cust_data_ptr+12);
-
-#ifdef RSSICOMPENSATION_PRINT
- A_PRINTF("customerID = 0x%x \n", rssi_compensation_param.customerID);
- A_PRINTF("enable = 0x%x \n", rssi_compensation_param.enable);
- A_PRINTF("bg_param_a = 0x%x and %d \n", rssi_compensation_param.bg_param_a, rssi_compensation_param.bg_param_a);
- A_PRINTF("bg_param_b = 0x%x and %d \n", rssi_compensation_param.bg_param_b, rssi_compensation_param.bg_param_b);
- A_PRINTF("a_param_a = 0x%x and %d \n", rssi_compensation_param.a_param_a, rssi_compensation_param.a_param_a);
- A_PRINTF("a_param_b = 0x%x and %d \n", rssi_compensation_param.a_param_b, rssi_compensation_param.a_param_b);
- A_PRINTF("Last 4 bytes = 0x%x \n", rssi_compensation_param.reserved);
-#endif
-
- if (rssi_compensation_param.enable != 0x1) {
- rssi_compensation_param.enable = 0;
- }
-
- return;
-}
-
-s32 rssi_compensation_calc_tcmd(u32 freq, s32 rssi, u32 totalPkt)
-{
-
- if (freq > 5000)
- {
- if (rssi_compensation_param.enable)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt));
- rssi = rssi * rssi_compensation_param.a_param_a + totalPkt * rssi_compensation_param.a_param_b;
- rssi = (rssi-50) /100;
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
- }
- }
- else
- {
- if (rssi_compensation_param.enable)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d, totalPkt = %d\n", rssi,totalPkt));
- rssi = rssi * rssi_compensation_param.bg_param_a + totalPkt * rssi_compensation_param.bg_param_b;
- rssi = (rssi-50) /100;
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
- }
- }
-
- return rssi;
-}
-
-s16 rssi_compensation_calc(struct ar6_softc *ar, s16 rssi)
-{
- if (ar->arBssChannel > 5000)
- {
- if (rssi_compensation_param.enable)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi));
- rssi = rssi * rssi_compensation_param.a_param_a + rssi_compensation_param.a_param_b;
- rssi = (rssi-50) /100;
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
- }
- }
- else
- {
- if (rssi_compensation_param.enable)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before compensation = %d\n", rssi));
- rssi = rssi * rssi_compensation_param.bg_param_a + rssi_compensation_param.bg_param_b;
- rssi = (rssi-50) /100;
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after compensation = %d\n", rssi));
- }
- }
-
- return rssi;
-}
-
-s16 rssi_compensation_reverse_calc(struct ar6_softc *ar, s16 rssi, bool Above)
-{
- s16 i;
-
- if (ar->arBssChannel > 5000)
- {
- if (rssi_compensation_param.enable)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11a\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi));
- rssi = rssi * 100;
- rssi = (rssi - rssi_compensation_param.a_param_b) / rssi_compensation_param.a_param_a;
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi));
- }
- }
- else
- {
- if (rssi_compensation_param.enable)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (">>> 11bg\n"));
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi before rev compensation = %d\n", rssi));
-
- if (Above) {
- for (i=95; i>=0; i--) {
- if (rssi <= rssi_compensation_table[i]) {
- rssi = 0 - i;
- break;
- }
- }
- } else {
- for (i=0; i<=95; i++) {
- if (rssi >= rssi_compensation_table[i]) {
- rssi = 0 - i;
- break;
- }
- }
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("rssi after rev compensation = %d\n", rssi));
- }
- }
-
- return rssi;
-}
-
-#ifdef WAPI_ENABLE
-void ap_wapi_rekey_event(struct ar6_softc *ar, u8 type, u8 *mac)
-{
- union iwreq_data wrqu;
- char buf[20];
-
- A_MEMZERO(buf, sizeof(buf));
-
- strcpy(buf, "WAPI_REKEY");
- buf[10] = type;
- memcpy(&buf[11], mac, ATH_MAC_LEN);
-
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wrqu.data.length = 10+1+ATH_MAC_LEN;
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
-
- A_PRINTF("WAPI REKEY - %d - %02x:%02x\n", type, mac[4], mac[5]);
-}
-#endif
-
-static int
-ar6000_reinstall_keys(struct ar6_softc *ar, u8 key_op_ctrl)
-{
- int status = 0;
- struct ieee80211req_key *uik = &ar->user_saved_keys.ucast_ik;
- struct ieee80211req_key *bik = &ar->user_saved_keys.bcast_ik;
- CRYPTO_TYPE keyType = ar->user_saved_keys.keyType;
-
- if (IEEE80211_CIPHER_CCKM_KRK != uik->ik_type) {
- if (NONE_CRYPT == keyType) {
- goto _reinstall_keys_out;
- }
-
- if (uik->ik_keylen) {
- status = wmi_addKey_cmd(ar->arWmi, uik->ik_keyix,
- ar->user_saved_keys.keyType, PAIRWISE_USAGE,
- uik->ik_keylen, (u8 *)&uik->ik_keyrsc,
- uik->ik_keydata, key_op_ctrl, uik->ik_macaddr, SYNC_BEFORE_WMIFLAG);
- }
-
- } else {
- status = wmi_add_krk_cmd(ar->arWmi, uik->ik_keydata);
- }
-
- if (IEEE80211_CIPHER_CCKM_KRK != bik->ik_type) {
- if (NONE_CRYPT == keyType) {
- goto _reinstall_keys_out;
- }
-
- if (bik->ik_keylen) {
- status = wmi_addKey_cmd(ar->arWmi, bik->ik_keyix,
- ar->user_saved_keys.keyType, GROUP_USAGE,
- bik->ik_keylen, (u8 *)&bik->ik_keyrsc,
- bik->ik_keydata, key_op_ctrl, bik->ik_macaddr, NO_SYNC_WMIFLAG);
- }
- } else {
- status = wmi_add_krk_cmd(ar->arWmi, bik->ik_keydata);
- }
-
-_reinstall_keys_out:
- ar->user_savedkeys_stat = USER_SAVEDKEYS_STAT_INIT;
- ar->user_key_ctrl = 0;
-
- return status;
-}
-
-
-void
-ar6000_dset_open_req(
- void *context,
- u32 id,
- u32 targHandle,
- u32 targReplyFn,
- u32 targReplyArg)
-{
-}
-
-void
-ar6000_dset_close(
- void *context,
- u32 access_cookie)
-{
- return;
-}
-
-void
-ar6000_dset_data_req(
- void *context,
- u32 accessCookie,
- u32 offset,
- u32 length,
- u32 targBuf,
- u32 targReplyFn,
- u32 targReplyArg)
-{
-}
-
-int
-ar6000_ap_mode_profile_commit(struct ar6_softc *ar)
-{
- WMI_CONNECT_CMD p;
- unsigned long flags;
-
- /* No change in AP's profile configuration */
- if(ar->ap_profile_flag==0) {
- A_PRINTF("COMMIT: No change in profile!!!\n");
- return -ENODATA;
- }
-
- if(!ar->arSsidLen) {
- A_PRINTF("SSID not set!!!\n");
- return -ECHRNG;
- }
-
- switch(ar->arAuthMode) {
- case NONE_AUTH:
- if((ar->arPairwiseCrypto != NONE_CRYPT) &&
-#ifdef WAPI_ENABLE
- (ar->arPairwiseCrypto != WAPI_CRYPT) &&
-#endif
- (ar->arPairwiseCrypto != WEP_CRYPT)) {
- A_PRINTF("Cipher not supported in AP mode Open auth\n");
- return -EOPNOTSUPP;
- }
- break;
- case WPA_PSK_AUTH:
- case WPA2_PSK_AUTH:
- case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
- break;
- default:
- A_PRINTF("This key mgmt type not supported in AP mode\n");
- return -EOPNOTSUPP;
- }
-
- /* Update the arNetworkType */
- ar->arNetworkType = ar->arNextMode;
-
- A_MEMZERO(&p,sizeof(p));
- p.ssidLength = ar->arSsidLen;
- memcpy(p.ssid,ar->arSsid,p.ssidLength);
- p.channel = ar->arChannelHint;
- p.networkType = ar->arNetworkType;
-
- p.dot11AuthMode = ar->arDot11AuthMode;
- p.authMode = ar->arAuthMode;
- p.pairwiseCryptoType = ar->arPairwiseCrypto;
- p.pairwiseCryptoLen = ar->arPairwiseCryptoLen;
- p.groupCryptoType = ar->arGroupCrypto;
- p.groupCryptoLen = ar->arGroupCryptoLen;
- p.ctrl_flags = ar->arConnectCtrlFlags;
-
- wmi_ap_profile_commit(ar->arWmi, &p);
- spin_lock_irqsave(&ar->arLock, flags);
- ar->arConnected = true;
- netif_carrier_on(ar->arNetDev);
- spin_unlock_irqrestore(&ar->arLock, flags);
- ar->ap_profile_flag = 0;
- return 0;
-}
-
-int
-ar6000_connect_to_ap(struct ar6_softc *ar)
-{
- /* The ssid length check prevents second "essid off" from the user,
- to be treated as a connect cmd. The second "essid off" is ignored.
- */
- if((ar->arWmiReady == true) && (ar->arSsidLen > 0) && ar->arNetworkType!=AP_NETWORK)
- {
- int status;
- if((ADHOC_NETWORK != ar->arNetworkType) &&
- (NONE_AUTH==ar->arAuthMode) &&
- (WEP_CRYPT==ar->arPairwiseCrypto)) {
- ar6000_install_static_wep_keys(ar);
- }
-
- if (!ar->arUserBssFilter) {
- if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != 0) {
- return -EIO;
- }
- }
-#ifdef WAPI_ENABLE
- if (ar->arWapiEnable) {
- ar->arPairwiseCrypto = WAPI_CRYPT;
- ar->arPairwiseCryptoLen = 0;
- ar->arGroupCrypto = WAPI_CRYPT;
- ar->arGroupCryptoLen = 0;
- ar->arAuthMode = NONE_AUTH;
- ar->arConnectCtrlFlags |= CONNECT_IGNORE_WPAx_GROUP_CIPHER;
- }
-#endif
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("Connect called with authmode %d dot11 auth %d"\
- " PW crypto %d PW crypto Len %d GRP crypto %d"\
- " GRP crypto Len %d\n",
- ar->arAuthMode, ar->arDot11AuthMode,
- ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
- ar->arGroupCrypto, ar->arGroupCryptoLen));
- reconnect_flag = 0;
- /* Set the listen interval into 1000TUs or more. This value will be indicated to Ap in the conn.
- later set it back locally at the STA to 100/1000 TUs depending on the power mode */
- if ((ar->arNetworkType == INFRA_NETWORK)) {
- wmi_listeninterval_cmd(ar->arWmi, max(ar->arListenIntervalT, (u16)A_MAX_WOW_LISTEN_INTERVAL), 0);
- }
- status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType,
- ar->arDot11AuthMode, ar->arAuthMode,
- ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
- ar->arGroupCrypto,ar->arGroupCryptoLen,
- ar->arSsidLen, ar->arSsid,
- ar->arReqBssid, ar->arChannelHint,
- ar->arConnectCtrlFlags);
- if (status) {
- wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB);
- if (!ar->arUserBssFilter) {
- wmi_bssfilter_cmd(ar->arWmi, NONE_BSS_FILTER, 0);
- }
- return status;
- }
-
- if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) &&
- ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)))
- {
- A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0);
- }
-
- ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD;
-
- ar->arConnectPending = true;
- return status;
- }
- return A_ERROR;
-}
-
-int
-ar6000_disconnect(struct ar6_softc *ar)
-{
- if ((ar->arConnected == true) || (ar->arConnectPending == true)) {
- wmi_disconnect_cmd(ar->arWmi);
- /*
- * Disconnect cmd is issued, clear connectPending.
- * arConnected will be cleard in disconnect_event notification.
- */
- ar->arConnectPending = false;
- }
-
- return 0;
-}
-
-int
-ar6000_ap_mode_get_wpa_ie(struct ar6_softc *ar, struct ieee80211req_wpaie *wpaie)
-{
- sta_t *conn = NULL;
- conn = ieee80211_find_conn(ar, wpaie->wpa_macaddr);
-
- A_MEMZERO(wpaie->wpa_ie, IEEE80211_MAX_IE);
- A_MEMZERO(wpaie->rsn_ie, IEEE80211_MAX_IE);
-
- if(conn) {
- memcpy(wpaie->wpa_ie, conn->wpa_ie, IEEE80211_MAX_IE);
- }
-
- return 0;
-}
-
-int
-is_iwioctl_allowed(u8 mode, u16 cmd)
-{
- if(cmd >= SIOCSIWCOMMIT && cmd <= SIOCGIWPOWER) {
- cmd -= SIOCSIWCOMMIT;
- if(sioctl_filter[cmd] == 0xFF) return 0;
- if(sioctl_filter[cmd] & mode) return 0;
- } else if(cmd >= SIOCIWFIRSTPRIV && cmd <= (SIOCIWFIRSTPRIV+30)) {
- cmd -= SIOCIWFIRSTPRIV;
- if(pioctl_filter[cmd] == 0xFF) return 0;
- if(pioctl_filter[cmd] & mode) return 0;
- } else {
- return A_ERROR;
- }
- return A_ENOTSUP;
-}
-
-int
-is_xioctl_allowed(u8 mode, int cmd)
-{
- if(sizeof(xioctl_filter)-1 < cmd) {
- A_PRINTF("Filter for this cmd=%d not defined\n",cmd);
- return 0;
- }
- if(xioctl_filter[cmd] == 0xFF) return 0;
- if(xioctl_filter[cmd] & mode) return 0;
- return A_ERROR;
-}
-
-#ifdef WAPI_ENABLE
-int
-ap_set_wapi_key(struct ar6_softc *ar, void *ikey)
-{
- struct ieee80211req_key *ik = (struct ieee80211req_key *)ikey;
- KEY_USAGE keyUsage = 0;
- int status;
-
- if (memcmp(ik->ik_macaddr, bcast_mac, IEEE80211_ADDR_LEN) == 0) {
- keyUsage = GROUP_USAGE;
- } else {
- keyUsage = PAIRWISE_USAGE;
- }
- A_PRINTF("WAPI_KEY: Type:%d ix:%d mac:%02x:%02x len:%d\n",
- keyUsage, ik->ik_keyix, ik->ik_macaddr[4], ik->ik_macaddr[5],
- ik->ik_keylen);
-
- status = wmi_addKey_cmd(ar->arWmi, ik->ik_keyix, WAPI_CRYPT, keyUsage,
- ik->ik_keylen, (u8 *)&ik->ik_keyrsc,
- ik->ik_keydata, KEY_OP_INIT_VAL, ik->ik_macaddr,
- SYNC_BOTH_WMIFLAG);
-
- if (0 != status) {
- return -EIO;
- }
- return 0;
-}
-#endif
-
-void ar6000_peer_event(
- void *context,
- u8 eventCode,
- u8 *macAddr)
-{
- u8 pos;
-
- for (pos=0;pos<6;pos++)
- printk("%02x: ",*(macAddr+pos));
- printk("\n");
-}
-
-#ifdef HTC_TEST_SEND_PKTS
-#define HTC_TEST_DUPLICATE 8
-static void DoHTCSendPktsTest(struct ar6_softc *ar, int MapNo, HTC_ENDPOINT_ID eid, struct sk_buff *dupskb)
-{
- struct ar_cookie *cookie;
- struct ar_cookie *cookieArray[HTC_TEST_DUPLICATE];
- struct sk_buff *new_skb;
- int i;
- int pkts = 0;
- struct htc_packet_queue pktQueue;
- EPPING_HEADER *eppingHdr;
-
- eppingHdr = A_NETBUF_DATA(dupskb);
-
- if (eppingHdr->Cmd_h == EPPING_CMD_NO_ECHO) {
- /* skip test if this is already a tx perf test */
- return;
- }
-
- for (i = 0; i < HTC_TEST_DUPLICATE; i++,pkts++) {
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- cookie = ar6000_alloc_cookie(ar);
- if (cookie != NULL) {
- ar->arTxPending[eid]++;
- ar->arTotalTxDataPending++;
- }
-
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
-
- if (NULL == cookie) {
- break;
- }
-
- new_skb = A_NETBUF_ALLOC(A_NETBUF_LEN(dupskb));
-
- if (new_skb == NULL) {
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- ar6000_free_cookie(ar,cookie);
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- break;
- }
-
- A_NETBUF_PUT_DATA(new_skb, A_NETBUF_DATA(dupskb), A_NETBUF_LEN(dupskb));
- cookie->arc_bp[0] = (unsigned long)new_skb;
- cookie->arc_bp[1] = MapNo;
- SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
- cookie,
- A_NETBUF_DATA(new_skb),
- A_NETBUF_LEN(new_skb),
- eid,
- AR6K_DATA_PKT_TAG);
-
- cookieArray[i] = cookie;
-
- {
- EPPING_HEADER *pHdr = (EPPING_HEADER *)A_NETBUF_DATA(new_skb);
- pHdr->Cmd_h = EPPING_CMD_NO_ECHO; /* do not echo the packet */
- }
- }
-
- if (pkts == 0) {
- return;
- }
-
- INIT_HTC_PACKET_QUEUE(&pktQueue);
-
- for (i = 0; i < pkts; i++) {
- HTC_PACKET_ENQUEUE(&pktQueue,&cookieArray[i]->HtcPkt);
- }
-
- HTCSendPktsMultiple(ar->arHtcTarget, &pktQueue);
-
-}
-#endif
-
-#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
-/*
- * Add support for adding and removing a virtual adapter for soft AP.
- * Some OS requires different adapters names for station and soft AP mode.
- * To support these requirement, create and destroy a netdevice instance
- * when the AP mode is operational. A full fledged support for virual device
- * is not implemented. Rather a virtual interface is created and is linked
- * with the existing physical device instance during the operation of the
- * AP mode.
- */
-
-int ar6000_start_ap_interface(struct ar6_softc *ar)
-{
- struct ar_virtual_interface *arApDev;
-
- /* Change net_device to point to AP instance */
- arApDev = (struct ar_virtual_interface *)ar->arApDev;
- ar->arNetDev = arApDev->arNetDev;
-
- return 0;
-}
-
-int ar6000_stop_ap_interface(struct ar6_softc *ar)
-{
- struct ar_virtual_interface *arApDev;
-
- /* Change net_device to point to sta instance */
- arApDev = (struct ar_virtual_interface *)ar->arApDev;
- if (arApDev) {
- ar->arNetDev = arApDev->arStaNetDev;
- }
-
- return 0;
-}
-
-
-int ar6000_create_ap_interface(struct ar6_softc *ar, char *ap_ifname)
-{
- struct net_device *dev;
- struct ar_virtual_interface *arApDev;
-
- dev = alloc_etherdev(sizeof(struct ar_virtual_interface));
- if (dev == NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: can't alloc etherdev\n"));
- return A_ERROR;
- }
-
- ether_setup(dev);
- init_netdev(dev, ap_ifname);
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-
- if (register_netdev(dev)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n"));
- return A_ERROR;
- }
-
- arApDev = netdev_priv(dev);
- arApDev->arDev = ar;
- arApDev->arNetDev = dev;
- arApDev->arStaNetDev = ar->arNetDev;
-
- ar->arApDev = arApDev;
- arApNetDev = dev;
-
- /* Copy the MAC address */
- memcpy(dev->dev_addr, ar->arNetDev->dev_addr, AR6000_ETH_ADDR_LEN);
-
- return 0;
-}
-
-int ar6000_add_ap_interface(struct ar6_softc *ar, char *ap_ifname)
-{
- /* Interface already added, need not proceed further */
- if (ar->arApDev != NULL) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_add_ap_interface: interface already present \n"));
- return 0;
- }
-
- if (ar6000_create_ap_interface(ar, ap_ifname) != 0) {
- return A_ERROR;
- }
-
- A_PRINTF("Add AP interface %s \n",ap_ifname);
-
- return ar6000_start_ap_interface(ar);
-}
-
-int ar6000_remove_ap_interface(struct ar6_softc *ar)
-{
- if (arApNetDev) {
- ar6000_stop_ap_interface(ar);
-
- unregister_netdev(arApNetDev);
- free_netdev(apApNetDev);
-
- A_PRINTF("Remove AP interface\n");
- }
- ar->arApDev = NULL;
- arApNetDev = NULL;
-
-
- return 0;
-}
-#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
-
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-EXPORT_SYMBOL(setupbtdev);
-#endif
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_pm.c b/drivers/staging/ath6kl/os/linux/ar6000_pm.c
deleted file mode 100644
index 1e0ace8b6d1..00000000000
--- a/drivers/staging/ath6kl/os/linux/ar6000_pm.c
+++ /dev/null
@@ -1,626 +0,0 @@
-/*
- *
- * Copyright (c) 2004-2010 Atheros Communications Inc.
- * All rights reserved.
- *
- *
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
- *
- */
-
-/*
- * Implementation of system power management
- */
-
-#include "ar6000_drv.h"
-#include <linux/inetdevice.h>
-#include <linux/platform_device.h>
-#include "wlan_config.h"
-
-#define WOW_ENABLE_MAX_INTERVAL 0
-#define WOW_SET_SCAN_PARAMS 0
-
-extern unsigned int wmitimeout;
-extern wait_queue_head_t arEvent;
-
-#undef ATH_MODULE_NAME
-#define ATH_MODULE_NAME pm
-#define ATH_DEBUG_PM ATH_DEBUG_MAKE_MODULE_MASK(0)
-
-#ifdef DEBUG
-static struct ath_debug_mask_description pm_debug_desc[] = {
- { ATH_DEBUG_PM , "System power management"},
-};
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(pm,
- "pm",
- "System Power Management",
- ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_PM,
- ATH_DEBUG_DESCRIPTION_COUNT(pm_debug_desc),
- pm_debug_desc);
-
-#endif /* DEBUG */
-
-int ar6000_exit_cut_power_state(struct ar6_softc *ar);
-
-#ifdef CONFIG_PM
-static void ar6k_send_asleep_event_to_app(struct ar6_softc *ar, bool asleep)
-{
- char buf[128];
- union iwreq_data wrqu;
-
- snprintf(buf, sizeof(buf), "HOST_ASLEEP=%s", asleep ? "asleep" : "awake");
- A_MEMZERO(&wrqu, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(ar->arNetDev, IWEVCUSTOM, &wrqu, buf);
-}
-
-static void ar6000_wow_resume(struct ar6_softc *ar)
-{
- if (ar->arWowState!= WLAN_WOW_STATE_NONE) {
- u16 fg_start_period = (ar->scParams.fg_start_period==0) ? 1 : ar->scParams.fg_start_period;
- u16 bg_period = (ar->scParams.bg_period==0) ? 60 : ar->scParams.bg_period;
- WMI_SET_HOST_SLEEP_MODE_CMD hostSleepMode = {true, false};
- ar->arWowState = WLAN_WOW_STATE_NONE;
- if (wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode)!= 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup restore host awake\n"));
- }
-#if WOW_SET_SCAN_PARAMS
- wmi_scanparams_cmd(ar->arWmi, fg_start_period,
- ar->scParams.fg_end_period,
- bg_period,
- ar->scParams.minact_chdwell_time,
- ar->scParams.maxact_chdwell_time,
- ar->scParams.pas_chdwell_time,
- ar->scParams.shortScanRatio,
- ar->scParams.scanCtrlFlags,
- ar->scParams.max_dfsch_act_time,
- ar->scParams.maxact_scan_per_ssid);
-#else
- (void)fg_start_period;
- (void)bg_period;
-#endif
-
-
-#if WOW_ENABLE_MAX_INTERVAL /* we don't do it if the power consumption is already good enough. */
- if (wmi_listeninterval_cmd(ar->arWmi, ar->arListenIntervalT, ar->arListenIntervalB) == 0) {
- }
-#endif
- ar6k_send_asleep_event_to_app(ar, false);
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Resume WoW successfully\n"));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("WoW does not invoked. skip resume"));
- }
- ar->arWlanPowerState = WLAN_POWER_STATE_ON;
-}
-
-static void ar6000_wow_suspend(struct ar6_softc *ar)
-{
-#define WOW_LIST_ID 1
- if (ar->arNetworkType != AP_NETWORK) {
- /* Setup WoW for unicast & Arp request for our own IP
- disable background scan. Set listen interval into 1000 TUs
- Enable keepliave for 110 seconds
- */
- struct in_ifaddr **ifap = NULL;
- struct in_ifaddr *ifa = NULL;
- struct in_device *in_dev;
- u8 macMask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- int status;
- WMI_ADD_WOW_PATTERN_CMD addWowCmd = { .filter = { 0 } };
- WMI_DEL_WOW_PATTERN_CMD delWowCmd;
- WMI_SET_HOST_SLEEP_MODE_CMD hostSleepMode = {false, true};
- WMI_SET_WOW_MODE_CMD wowMode = { .enable_wow = true,
- .hostReqDelay = 500 };/*500 ms delay*/
-
- if (ar->arWowState!= WLAN_WOW_STATE_NONE) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("System already go into wow mode!\n"));
- return;
- }
-
- ar6000_TxDataCleanup(ar); /* IMPORTANT, otherwise there will be 11mA after listen interval as 1000*/
-
-#if WOW_ENABLE_MAX_INTERVAL /* we don't do it if the power consumption is already good enough. */
- if (wmi_listeninterval_cmd(ar->arWmi, A_MAX_WOW_LISTEN_INTERVAL, 0) == 0) {
- }
-#endif
-
-#if WOW_SET_SCAN_PARAMS
- status = wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0, 0xFFFF, 0, 0, 0, 0, 0, 0, 0);
-#endif
- /* clear up our WoW pattern first */
- delWowCmd.filter_list_id = WOW_LIST_ID;
- delWowCmd.filter_id = 0;
- wmi_del_wow_pattern_cmd(ar->arWmi, &delWowCmd);
-
- /* setup unicast packet pattern for WoW */
- if (ar->arNetDev->dev_addr[1]) {
- addWowCmd.filter_list_id = WOW_LIST_ID;
- addWowCmd.filter_size = 6; /* MAC address */
- addWowCmd.filter_offset = 0;
- status = wmi_add_wow_pattern_cmd(ar->arWmi, &addWowCmd, ar->arNetDev->dev_addr, macMask, addWowCmd.filter_size);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to add WoW pattern\n"));
- }
- }
- /* setup ARP request for our own IP */
- if ((in_dev = __in_dev_get_rtnl(ar->arNetDev)) != NULL) {
- for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) {
- if (!strcmp(ar->arNetDev->name, ifa->ifa_label)) {
- break; /* found */
- }
- }
- }
- if (ifa && ifa->ifa_local) {
- WMI_SET_IP_CMD ipCmd;
- memset(&ipCmd, 0, sizeof(ipCmd));
- ipCmd.ips[0] = ifa->ifa_local;
- status = wmi_set_ip_cmd(ar->arWmi, &ipCmd);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup IP for ARP agent\n"));
- }
- }
-
-#ifndef ATH6K_CONFIG_OTA_MODE
- wmi_powermode_cmd(ar->arWmi, REC_POWER);
-#endif
-
- status = wmi_set_wow_mode_cmd(ar->arWmi, &wowMode);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to enable wow mode\n"));
- }
- ar6k_send_asleep_event_to_app(ar, true);
-
- status = wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to set host asleep\n"));
- }
-
- ar->arWowState = WLAN_WOW_STATE_SUSPENDING;
- if (ar->arTxPending[ar->arControlEp]) {
- u32 timeleft = wait_event_interruptible_timeout(arEvent,
- ar->arTxPending[ar->arControlEp] == 0, wmitimeout * HZ);
- if (!timeleft || signal_pending(current)) {
- /* what can I do? wow resume at once */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup WoW. Pending wmi control data %d\n", ar->arTxPending[ar->arControlEp]));
- }
- }
-
- status = hifWaitForPendingRecv(ar->arHifDevice);
-
- ar->arWowState = WLAN_WOW_STATE_SUSPENDED;
- ar->arWlanPowerState = WLAN_POWER_STATE_WOW;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Not allowed to go to WOW at this moment.\n"));
- }
-}
-
-int ar6000_suspend_ev(void *context)
-{
- int status = 0;
- struct ar6_softc *ar = (struct ar6_softc *)context;
- s16 pmmode = ar->arSuspendConfig;
-wow_not_connected:
- switch (pmmode) {
- case WLAN_SUSPEND_WOW:
- if (ar->arWmiReady && ar->arWlanState==WLAN_ENABLED && ar->arConnected) {
- ar6000_wow_suspend(ar);
- AR_DEBUG_PRINTF(ATH_DEBUG_PM,("%s:Suspend for wow mode %d\n", __func__, ar->arWlanPowerState));
- } else {
- pmmode = ar->arWow2Config;
- goto wow_not_connected;
- }
- break;
- case WLAN_SUSPEND_CUT_PWR:
- /* fall through */
- case WLAN_SUSPEND_CUT_PWR_IF_BT_OFF:
- /* fall through */
- case WLAN_SUSPEND_DEEP_SLEEP:
- /* fall through */
- default:
- status = ar6000_update_wlan_pwr_state(ar, WLAN_DISABLED, true);
- if (ar->arWlanPowerState==WLAN_POWER_STATE_ON ||
- ar->arWlanPowerState==WLAN_POWER_STATE_WOW) {
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Strange suspend state for not wow mode %d", ar->arWlanPowerState));
- }
- AR_DEBUG_PRINTF(ATH_DEBUG_PM,("%s:Suspend for %d mode pwr %d status %d\n", __func__, pmmode, ar->arWlanPowerState, status));
- status = (ar->arWlanPowerState == WLAN_POWER_STATE_CUT_PWR) ? 0 : A_EBUSY;
- break;
- }
-
- ar->scan_triggered = 0;
- return status;
-}
-
-int ar6000_resume_ev(void *context)
-{
- struct ar6_softc *ar = (struct ar6_softc *)context;
- u16 powerState = ar->arWlanPowerState;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: enter previous state %d wowState %d\n", __func__, powerState, ar->arWowState));
- switch (powerState) {
- case WLAN_POWER_STATE_WOW:
- ar6000_wow_resume(ar);
- break;
- case WLAN_POWER_STATE_CUT_PWR:
- /* fall through */
- case WLAN_POWER_STATE_DEEP_SLEEP:
- ar6000_update_wlan_pwr_state(ar, WLAN_ENABLED, true);
- AR_DEBUG_PRINTF(ATH_DEBUG_PM,("%s:Resume for %d mode pwr %d\n", __func__, powerState, ar->arWlanPowerState));
- break;
- case WLAN_POWER_STATE_ON:
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Strange SDIO bus power mode!!\n"));
- break;
- }
- return 0;
-}
-
-void ar6000_check_wow_status(struct ar6_softc *ar, struct sk_buff *skb, bool isEvent)
-{
- if (ar->arWowState!=WLAN_WOW_STATE_NONE) {
- if (ar->arWowState==WLAN_WOW_STATE_SUSPENDING) {
- AR_DEBUG_PRINTF(ATH_DEBUG_PM,("\n%s: Received IRQ while we are wow suspending!!!\n\n", __func__));
- return;
- }
- /* Wow resume from irq interrupt */
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: WoW resume from irq thread status %d\n", __func__, ar->arWlanPowerState));
- ar6000_wow_resume(ar);
- }
-}
-
-int ar6000_power_change_ev(void *context, u32 config)
-{
- struct ar6_softc *ar = (struct ar6_softc *)context;
- int status = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: power change event callback %d \n", __func__, config));
- switch (config) {
- case HIF_DEVICE_POWER_UP:
- ar6000_restart_endpoint(ar->arNetDev);
- status = 0;
- break;
- case HIF_DEVICE_POWER_DOWN:
- case HIF_DEVICE_POWER_CUT:
- status = 0;
- break;
- }
- return status;
-}
-
-#endif /* CONFIG_PM */
-
-int
-ar6000_setup_cut_power_state(struct ar6_softc *ar, AR6000_WLAN_STATE state)
-{
- int status = 0;
- HIF_DEVICE_POWER_CHANGE_TYPE config;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: Cut power %d %d \n", __func__,state, ar->arWlanPowerState));
-#ifdef CONFIG_PM
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Wlan OFF %d BT OFf %d \n", ar->arWlanOff, ar->arBTOff));
-#endif
- do {
- if (state == WLAN_ENABLED) {
- /* Not in cut power state.. exit */
- if (ar->arWlanPowerState != WLAN_POWER_STATE_CUT_PWR) {
- break;
- }
-
- /* Change the state to ON */
- ar->arWlanPowerState = WLAN_POWER_STATE_ON;
-
-
- /* Indicate POWER_UP to HIF */
- config = HIF_DEVICE_POWER_UP;
- status = HIFConfigureDevice(ar->arHifDevice,
- HIF_DEVICE_POWER_STATE_CHANGE,
- &config,
- sizeof(HIF_DEVICE_POWER_CHANGE_TYPE));
-
- if (status == A_PENDING) {
- } else if (status == 0) {
- ar6000_restart_endpoint(ar->arNetDev);
- status = 0;
- }
- } else if (state == WLAN_DISABLED) {
-
-
- /* Already in cut power state.. exit */
- if (ar->arWlanPowerState == WLAN_POWER_STATE_CUT_PWR) {
- break;
- }
- ar6000_stop_endpoint(ar->arNetDev, true, false);
-
- config = HIF_DEVICE_POWER_CUT;
- status = HIFConfigureDevice(ar->arHifDevice,
- HIF_DEVICE_POWER_STATE_CHANGE,
- &config,
- sizeof(HIF_DEVICE_POWER_CHANGE_TYPE));
-
- ar->arWlanPowerState = WLAN_POWER_STATE_CUT_PWR;
- }
- } while (0);
-
- return status;
-}
-
-int
-ar6000_setup_deep_sleep_state(struct ar6_softc *ar, AR6000_WLAN_STATE state)
-{
- int status = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("%s: Deep sleep %d %d \n", __func__,state, ar->arWlanPowerState));
-#ifdef CONFIG_PM
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Wlan OFF %d BT OFf %d \n", ar->arWlanOff, ar->arBTOff));
-#endif
- do {
- WMI_SET_HOST_SLEEP_MODE_CMD hostSleepMode;
-
- if (state == WLAN_ENABLED) {
- u16 fg_start_period;
-
- /* Not in deep sleep state.. exit */
- if (ar->arWlanPowerState != WLAN_POWER_STATE_DEEP_SLEEP) {
- if (ar->arWlanPowerState != WLAN_POWER_STATE_ON) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Strange state when we resume from deep sleep %d\n", ar->arWlanPowerState));
- }
- break;
- }
-
- fg_start_period = (ar->scParams.fg_start_period==0) ? 1 : ar->scParams.fg_start_period;
- hostSleepMode.awake = true;
- hostSleepMode.asleep = false;
-
- if ((status=wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode)) != 0) {
- break;
- }
-
- /* Change the state to ON */
- ar->arWlanPowerState = WLAN_POWER_STATE_ON;
-
- /* Enable foreground scanning */
- if ((status=wmi_scanparams_cmd(ar->arWmi, fg_start_period,
- ar->scParams.fg_end_period,
- ar->scParams.bg_period,
- ar->scParams.minact_chdwell_time,
- ar->scParams.maxact_chdwell_time,
- ar->scParams.pas_chdwell_time,
- ar->scParams.shortScanRatio,
- ar->scParams.scanCtrlFlags,
- ar->scParams.max_dfsch_act_time,
- ar->scParams.maxact_scan_per_ssid)) != 0)
- {
- break;
- }
-
- if (ar->arNetworkType != AP_NETWORK)
- {
- if (ar->arSsidLen) {
- if (ar6000_connect_to_ap(ar) != 0) {
- /* no need to report error if connection failed */
- break;
- }
- }
- }
- } else if (state == WLAN_DISABLED){
- WMI_SET_WOW_MODE_CMD wowMode = { .enable_wow = false };
-
- /* Already in deep sleep state.. exit */
- if (ar->arWlanPowerState != WLAN_POWER_STATE_ON) {
- if (ar->arWlanPowerState != WLAN_POWER_STATE_DEEP_SLEEP) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Strange state when we suspend for deep sleep %d\n", ar->arWlanPowerState));
- }
- break;
- }
-
- if (ar->arNetworkType != AP_NETWORK)
- {
- /* Disconnect from the AP and disable foreground scanning */
- AR6000_SPIN_LOCK(&ar->arLock, 0);
- if (ar->arConnected == true || ar->arConnectPending == true) {
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- wmi_disconnect_cmd(ar->arWmi);
- } else {
- AR6000_SPIN_UNLOCK(&ar->arLock, 0);
- }
- }
-
- ar->scan_triggered = 0;
-
- if ((status=wmi_scanparams_cmd(ar->arWmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0, 0, 0)) != 0) {
- break;
- }
-
- /* make sure we disable wow for deep sleep */
- if ((status=wmi_set_wow_mode_cmd(ar->arWmi, &wowMode))!= 0)
- {
- break;
- }
-
- ar6000_TxDataCleanup(ar);
-#ifndef ATH6K_CONFIG_OTA_MODE
- wmi_powermode_cmd(ar->arWmi, REC_POWER);
-#endif
-
- hostSleepMode.awake = false;
- hostSleepMode.asleep = true;
- if ((status=wmi_set_host_sleep_mode_cmd(ar->arWmi, &hostSleepMode))!= 0) {
- break;
- }
- if (ar->arTxPending[ar->arControlEp]) {
- u32 timeleft = wait_event_interruptible_timeout(arEvent,
- ar->arTxPending[ar->arControlEp] == 0, wmitimeout * HZ);
- if (!timeleft || signal_pending(current)) {
- status = A_ERROR;
- break;
- }
- }
- status = hifWaitForPendingRecv(ar->arHifDevice);
-
- ar->arWlanPowerState = WLAN_POWER_STATE_DEEP_SLEEP;
- }
- } while (0);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to enter/exit deep sleep %d\n", state));
- }
-
- return status;
-}
-
-int
-ar6000_update_wlan_pwr_state(struct ar6_softc *ar, AR6000_WLAN_STATE state, bool pmEvent)
-{
- int status = 0;
- u16 powerState, oldPowerState;
- AR6000_WLAN_STATE oldstate = ar->arWlanState;
- bool wlanOff = ar->arWlanOff;
-#ifdef CONFIG_PM
- bool btOff = ar->arBTOff;
-#endif /* CONFIG_PM */
-
- if ((state!=WLAN_DISABLED && state!=WLAN_ENABLED)) {
- return A_ERROR;
- }
-
- if (ar->bIsDestroyProgress) {
- return A_EBUSY;
- }
-
- if (down_interruptible(&ar->arSem)) {
- return A_ERROR;
- }
-
- if (ar->bIsDestroyProgress) {
- up(&ar->arSem);
- return A_EBUSY;
- }
-
- ar->arWlanState = wlanOff ? WLAN_DISABLED : state;
- oldPowerState = ar->arWlanPowerState;
- if (state == WLAN_ENABLED) {
- powerState = ar->arWlanPowerState;
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("WLAN PWR set to ENABLE^^\n"));
- if (!wlanOff) {
- if (powerState == WLAN_POWER_STATE_DEEP_SLEEP) {
- status = ar6000_setup_deep_sleep_state(ar, WLAN_ENABLED);
- } else if (powerState == WLAN_POWER_STATE_CUT_PWR) {
- status = ar6000_setup_cut_power_state(ar, WLAN_ENABLED);
- }
- }
-#ifdef CONFIG_PM
- else if (pmEvent && wlanOff) {
- bool allowCutPwr = ((!ar->arBTSharing) || btOff);
- if ((powerState==WLAN_POWER_STATE_CUT_PWR) && (!allowCutPwr)) {
- /* Come out of cut power */
- ar6000_setup_cut_power_state(ar, WLAN_ENABLED);
- status = ar6000_setup_deep_sleep_state(ar, WLAN_DISABLED);
- }
- }
-#endif /* CONFIG_PM */
- } else if (state == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("WLAN PWR set to DISABLED~\n"));
- powerState = WLAN_POWER_STATE_DEEP_SLEEP;
-#ifdef CONFIG_PM
- if (pmEvent) { /* disable due to suspend */
- bool suspendCutPwr = (ar->arSuspendConfig == WLAN_SUSPEND_CUT_PWR ||
- (ar->arSuspendConfig == WLAN_SUSPEND_WOW &&
- ar->arWow2Config==WLAN_SUSPEND_CUT_PWR));
- bool suspendCutIfBtOff = ((ar->arSuspendConfig ==
- WLAN_SUSPEND_CUT_PWR_IF_BT_OFF ||
- (ar->arSuspendConfig == WLAN_SUSPEND_WOW &&
- ar->arWow2Config==WLAN_SUSPEND_CUT_PWR_IF_BT_OFF)) &&
- (!ar->arBTSharing || btOff));
- if ((suspendCutPwr) ||
- (suspendCutIfBtOff) ||
- (ar->arWlanState==WLAN_POWER_STATE_CUT_PWR))
- {
- powerState = WLAN_POWER_STATE_CUT_PWR;
- }
- } else {
- if ((wlanOff) &&
- (ar->arWlanOffConfig == WLAN_OFF_CUT_PWR) &&
- (!ar->arBTSharing || btOff))
- {
- /* For BT clock sharing designs, CUT_POWER depend on BT state */
- powerState = WLAN_POWER_STATE_CUT_PWR;
- }
- }
-#endif /* CONFIG_PM */
-
- if (powerState == WLAN_POWER_STATE_DEEP_SLEEP) {
- if (ar->arWlanPowerState == WLAN_POWER_STATE_CUT_PWR) {
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("Load firmware before set to deep sleep\n"));
- ar6000_setup_cut_power_state(ar, WLAN_ENABLED);
- }
- status = ar6000_setup_deep_sleep_state(ar, WLAN_DISABLED);
- } else if (powerState == WLAN_POWER_STATE_CUT_PWR) {
- status = ar6000_setup_cut_power_state(ar, WLAN_DISABLED);
- }
-
- }
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Fail to setup WLAN state %d\n", ar->arWlanState));
- ar->arWlanState = oldstate;
- } else if (status == 0) {
- WMI_REPORT_SLEEP_STATE_EVENT wmiSleepEvent, *pSleepEvent = NULL;
- if ((ar->arWlanPowerState == WLAN_POWER_STATE_ON) && (oldPowerState != WLAN_POWER_STATE_ON)) {
- wmiSleepEvent.sleepState = WMI_REPORT_SLEEP_STATUS_IS_AWAKE;
- pSleepEvent = &wmiSleepEvent;
- } else if ((ar->arWlanPowerState != WLAN_POWER_STATE_ON) && (oldPowerState == WLAN_POWER_STATE_ON)) {
- wmiSleepEvent.sleepState = WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP;
- pSleepEvent = &wmiSleepEvent;
- }
- if (pSleepEvent) {
- AR_DEBUG_PRINTF(ATH_DEBUG_PM, ("SENT WLAN Sleep Event %d\n", wmiSleepEvent.sleepState));
- }
- }
- up(&ar->arSem);
- return status;
-}
-
-int
-ar6000_set_bt_hw_state(struct ar6_softc *ar, u32 enable)
-{
-#ifdef CONFIG_PM
- bool off = (enable == 0);
- int status;
- if (ar->arBTOff == off) {
- return 0;
- }
- ar->arBTOff = off;
- status = ar6000_update_wlan_pwr_state(ar, ar->arWlanOff ? WLAN_DISABLED : WLAN_ENABLED, false);
- return status;
-#else
- return 0;
-#endif
-}
-
-int
-ar6000_set_wlan_state(struct ar6_softc *ar, AR6000_WLAN_STATE state)
-{
- int status;
- bool off = (state == WLAN_DISABLED);
- if (ar->arWlanOff == off) {
- return 0;
- }
- ar->arWlanOff = off;
- status = ar6000_update_wlan_pwr_state(ar, state, false);
- return status;
-}
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
deleted file mode 100644
index ae7c1dd96d8..00000000000
--- a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
+++ /dev/null
@@ -1,455 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#include "ar6000_drv.h"
-
-#ifdef HTC_RAW_INTERFACE
-
-static void
-ar6000_htc_raw_read_cb(void *Context, struct htc_packet *pPacket)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- raw_htc_buffer *busy;
- HTC_RAW_STREAM_ID streamID;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
-
- busy = (raw_htc_buffer *)pPacket->pPktContext;
- A_ASSERT(busy != NULL);
-
- if (pPacket->Status == A_ECANCELED) {
- /*
- * HTC provides A_ECANCELED status when it doesn't want to be refilled
- * (probably due to a shutdown)
- */
- return;
- }
-
- streamID = arEndpoint2RawStreamID(ar,pPacket->Endpoint);
- A_ASSERT(streamID != HTC_RAW_STREAM_NOT_MAPPED);
-
-#ifdef CF
- if (down_trylock(&arRaw->raw_htc_read_sem[streamID])) {
-#else
- if (down_interruptible(&arRaw->raw_htc_read_sem[streamID])) {
-#endif /* CF */
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to down the semaphore\n"));
- }
-
- A_ASSERT((pPacket->Status != 0) ||
- (pPacket->pBuffer == (busy->data + HTC_HEADER_LEN)));
-
- busy->length = pPacket->ActualLength + HTC_HEADER_LEN;
- busy->currPtr = HTC_HEADER_LEN;
- arRaw->read_buffer_available[streamID] = true;
- //AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("raw read cb: 0x%X 0x%X \n", busy->currPtr,busy->length);
- up(&arRaw->raw_htc_read_sem[streamID]);
-
- /* Signal the waiting process */
- AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Waking up the StreamID(%d) read process\n", streamID));
- wake_up_interruptible(&arRaw->raw_htc_read_queue[streamID]);
-}
-
-static void
-ar6000_htc_raw_write_cb(void *Context, struct htc_packet *pPacket)
-{
- struct ar6_softc *ar = (struct ar6_softc *)Context;
- raw_htc_buffer *free;
- HTC_RAW_STREAM_ID streamID;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
-
- free = (raw_htc_buffer *)pPacket->pPktContext;
- A_ASSERT(free != NULL);
-
- if (pPacket->Status == A_ECANCELED) {
- /*
- * HTC provides A_ECANCELED status when it doesn't want to be refilled
- * (probably due to a shutdown)
- */
- return;
- }
-
- streamID = arEndpoint2RawStreamID(ar,pPacket->Endpoint);
- A_ASSERT(streamID != HTC_RAW_STREAM_NOT_MAPPED);
-
-#ifdef CF
- if (down_trylock(&arRaw->raw_htc_write_sem[streamID])) {
-#else
- if (down_interruptible(&arRaw->raw_htc_write_sem[streamID])) {
-#endif
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Unable to down the semaphore\n"));
- }
-
- A_ASSERT(pPacket->pBuffer == (free->data + HTC_HEADER_LEN));
-
- free->length = 0;
- arRaw->write_buffer_available[streamID] = true;
- up(&arRaw->raw_htc_write_sem[streamID]);
-
- /* Signal the waiting process */
- AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Waking up the StreamID(%d) write process\n", streamID));
- wake_up_interruptible(&arRaw->raw_htc_write_queue[streamID]);
-}
-
-/* connect to a service */
-static int ar6000_connect_raw_service(struct ar6_softc *ar,
- HTC_RAW_STREAM_ID StreamID)
-{
- int status;
- struct htc_service_connect_resp response;
- u8 streamNo;
- struct htc_service_connect_req connect;
-
- do {
-
- A_MEMZERO(&connect,sizeof(connect));
- /* pass the stream ID as meta data to the RAW streams service */
- streamNo = (u8)StreamID;
- connect.pMetaData = &streamNo;
- connect.MetaDataLength = sizeof(u8);
- /* these fields are the same for all endpoints */
- connect.EpCallbacks.pContext = ar;
- connect.EpCallbacks.EpTxComplete = ar6000_htc_raw_write_cb;
- connect.EpCallbacks.EpRecv = ar6000_htc_raw_read_cb;
- /* simple interface, we don't need these optional callbacks */
- connect.EpCallbacks.EpRecvRefill = NULL;
- connect.EpCallbacks.EpSendFull = NULL;
- connect.MaxSendQueueDepth = RAW_HTC_WRITE_BUFFERS_NUM;
-
- /* connect to the raw streams service, we may be able to get 1 or more
- * connections, depending on WHAT is running on the target */
- connect.ServiceID = HTC_RAW_STREAMS_SVC;
-
- A_MEMZERO(&response,sizeof(response));
-
- /* try to connect to the raw stream, it is okay if this fails with
- * status HTC_SERVICE_NO_MORE_EP */
- status = HTCConnectService(ar->arHtcTarget,
- &connect,
- &response);
-
- if (status) {
- if (response.ConnectRespCode == HTC_SERVICE_NO_MORE_EP) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC RAW , No more streams allowed \n"));
- status = 0;
- }
- break;
- }
-
- /* set endpoint mapping for the RAW HTC streams */
- arSetRawStream2EndpointIDMap(ar,StreamID,response.Endpoint);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("HTC RAW : stream ID: %d, endpoint: %d\n",
- StreamID, arRawStream2EndpointID(ar,StreamID)));
-
- } while (false);
-
- return status;
-}
-
-int ar6000_htc_raw_open(struct ar6_softc *ar)
-{
- int status;
- int streamID, endPt, count2;
- raw_htc_buffer *buffer;
- HTC_SERVICE_ID servicepriority;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
- if (!arRaw) {
- arRaw = ar->arRawHtc = A_MALLOC(sizeof(AR_RAW_HTC_T));
- if (arRaw) {
- A_MEMZERO(arRaw, sizeof(AR_RAW_HTC_T));
- }
- }
- A_ASSERT(ar->arHtcTarget != NULL);
- if (!arRaw) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Faile to allocate memory for HTC RAW interface\n"));
- return -ENOMEM;
- }
- /* wait for target */
- status = HTCWaitTarget(ar->arHtcTarget);
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTCWaitTarget failed (%d)\n", status));
- return -ENODEV;
- }
-
- for (endPt = 0; endPt < ENDPOINT_MAX; endPt++) {
- arRaw->arEp2RawMapping[endPt] = HTC_RAW_STREAM_NOT_MAPPED;
- }
-
- for (streamID = HTC_RAW_STREAM_0; streamID < HTC_RAW_STREAM_NUM_MAX; streamID++) {
- /* Initialize the data structures */
- sema_init(&arRaw->raw_htc_read_sem[streamID], 1);
- sema_init(&arRaw->raw_htc_write_sem[streamID], 1);
- init_waitqueue_head(&arRaw->raw_htc_read_queue[streamID]);
- init_waitqueue_head(&arRaw->raw_htc_write_queue[streamID]);
-
- /* try to connect to the raw service */
- status = ar6000_connect_raw_service(ar,streamID);
-
- if (status) {
- break;
- }
-
- if (arRawStream2EndpointID(ar,streamID) == 0) {
- break;
- }
-
- for (count2 = 0; count2 < RAW_HTC_READ_BUFFERS_NUM; count2 ++) {
- /* Initialize the receive buffers */
- buffer = &arRaw->raw_htc_write_buffer[streamID][count2];
- memset(buffer, 0, sizeof(raw_htc_buffer));
- buffer = &arRaw->raw_htc_read_buffer[streamID][count2];
- memset(buffer, 0, sizeof(raw_htc_buffer));
-
- SET_HTC_PACKET_INFO_RX_REFILL(&buffer->HTCPacket,
- buffer,
- buffer->data,
- HTC_RAW_BUFFER_SIZE,
- arRawStream2EndpointID(ar,streamID));
-
- /* Queue buffers to HTC for receive */
- if ((status = HTCAddReceivePkt(ar->arHtcTarget, &buffer->HTCPacket)) != 0)
- {
- BMIInit();
- return -EIO;
- }
- }
-
- for (count2 = 0; count2 < RAW_HTC_WRITE_BUFFERS_NUM; count2 ++) {
- /* Initialize the receive buffers */
- buffer = &arRaw->raw_htc_write_buffer[streamID][count2];
- memset(buffer, 0, sizeof(raw_htc_buffer));
- }
-
- arRaw->read_buffer_available[streamID] = false;
- arRaw->write_buffer_available[streamID] = true;
- }
-
- if (status) {
- return -EIO;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("HTC RAW, number of streams the target supports: %d \n", streamID));
-
- servicepriority = HTC_RAW_STREAMS_SVC; /* only 1 */
-
- /* set callbacks and priority list */
- HTCSetCreditDistribution(ar->arHtcTarget,
- ar,
- NULL, /* use default */
- NULL, /* use default */
- &servicepriority,
- 1);
-
- /* Start the HTC component */
- if ((status = HTCStart(ar->arHtcTarget)) != 0) {
- BMIInit();
- return -EIO;
- }
-
- (ar)->arRawIfInit = true;
-
- return 0;
-}
-
-int ar6000_htc_raw_close(struct ar6_softc *ar)
-{
- A_PRINTF("ar6000_htc_raw_close called \n");
- HTCStop(ar->arHtcTarget);
-
- /* reset the device */
- ar6000_reset_device(ar->arHifDevice, ar->arTargetType, true, false);
- /* Initialize the BMI component */
- BMIInit();
-
- return 0;
-}
-
-raw_htc_buffer *
-get_filled_buffer(struct ar6_softc *ar, HTC_RAW_STREAM_ID StreamID)
-{
- int count;
- raw_htc_buffer *busy;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
-
- /* Check for data */
- for (count = 0; count < RAW_HTC_READ_BUFFERS_NUM; count ++) {
- busy = &arRaw->raw_htc_read_buffer[StreamID][count];
- if (busy->length) {
- break;
- }
- }
- if (busy->length) {
- arRaw->read_buffer_available[StreamID] = true;
- } else {
- arRaw->read_buffer_available[StreamID] = false;
- }
-
- return busy;
-}
-
-ssize_t ar6000_htc_raw_read(struct ar6_softc *ar, HTC_RAW_STREAM_ID StreamID,
- char __user *buffer, size_t length)
-{
- int readPtr;
- raw_htc_buffer *busy;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
-
- if (arRawStream2EndpointID(ar,StreamID) == 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("StreamID(%d) not connected! \n", StreamID));
- return -EFAULT;
- }
-
- if (down_interruptible(&arRaw->raw_htc_read_sem[StreamID])) {
- return -ERESTARTSYS;
- }
-
- busy = get_filled_buffer(ar,StreamID);
- while (!arRaw->read_buffer_available[StreamID]) {
- up(&arRaw->raw_htc_read_sem[StreamID]);
-
- /* Wait for the data */
- AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Sleeping StreamID(%d) read process\n", StreamID));
- if (wait_event_interruptible(arRaw->raw_htc_read_queue[StreamID],
- arRaw->read_buffer_available[StreamID]))
- {
- return -EINTR;
- }
- if (down_interruptible(&arRaw->raw_htc_read_sem[StreamID])) {
- return -ERESTARTSYS;
- }
- busy = get_filled_buffer(ar,StreamID);
- }
-
- /* Read the data */
- readPtr = busy->currPtr;
- if (length > busy->length - HTC_HEADER_LEN) {
- length = busy->length - HTC_HEADER_LEN;
- }
- if (copy_to_user(buffer, &busy->data[readPtr], length)) {
- up(&arRaw->raw_htc_read_sem[StreamID]);
- return -EFAULT;
- }
-
- busy->currPtr += length;
-
- if (busy->currPtr == busy->length)
- {
- busy->currPtr = 0;
- busy->length = 0;
- HTC_PACKET_RESET_RX(&busy->HTCPacket);
- //AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("raw read ioctl: ep for packet:%d \n", busy->HTCPacket.Endpoint));
- HTCAddReceivePkt(ar->arHtcTarget, &busy->HTCPacket);
- }
- arRaw->read_buffer_available[StreamID] = false;
- up(&arRaw->raw_htc_read_sem[StreamID]);
-
- return length;
-}
-
-static raw_htc_buffer *
-get_free_buffer(struct ar6_softc *ar, HTC_ENDPOINT_ID StreamID)
-{
- int count;
- raw_htc_buffer *free;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
-
- free = NULL;
- for (count = 0; count < RAW_HTC_WRITE_BUFFERS_NUM; count ++) {
- free = &arRaw->raw_htc_write_buffer[StreamID][count];
- if (free->length == 0) {
- break;
- }
- }
- if (!free->length) {
- arRaw->write_buffer_available[StreamID] = true;
- } else {
- arRaw->write_buffer_available[StreamID] = false;
- }
-
- return free;
-}
-
-ssize_t ar6000_htc_raw_write(struct ar6_softc *ar, HTC_RAW_STREAM_ID StreamID,
- char __user *buffer, size_t length)
-{
- int writePtr;
- raw_htc_buffer *free;
- AR_RAW_HTC_T *arRaw = ar->arRawHtc;
- if (arRawStream2EndpointID(ar,StreamID) == 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("StreamID(%d) not connected! \n", StreamID));
- return -EFAULT;
- }
-
- if (down_interruptible(&arRaw->raw_htc_write_sem[StreamID])) {
- return -ERESTARTSYS;
- }
-
- /* Search for a free buffer */
- free = get_free_buffer(ar,StreamID);
-
- /* Check if there is space to write else wait */
- while (!arRaw->write_buffer_available[StreamID]) {
- up(&arRaw->raw_htc_write_sem[StreamID]);
-
- /* Wait for buffer to become free */
- AR_DEBUG_PRINTF(ATH_DEBUG_HTC_RAW,("Sleeping StreamID(%d) write process\n", StreamID));
- if (wait_event_interruptible(arRaw->raw_htc_write_queue[StreamID],
- arRaw->write_buffer_available[StreamID]))
- {
- return -EINTR;
- }
- if (down_interruptible(&arRaw->raw_htc_write_sem[StreamID])) {
- return -ERESTARTSYS;
- }
- free = get_free_buffer(ar,StreamID);
- }
-
- /* Send the data */
- writePtr = HTC_HEADER_LEN;
- if (length > (HTC_RAW_BUFFER_SIZE - HTC_HEADER_LEN)) {
- length = HTC_RAW_BUFFER_SIZE - HTC_HEADER_LEN;
- }
-
- if (copy_from_user(&free->data[writePtr], buffer, length)) {
- up(&arRaw->raw_htc_read_sem[StreamID]);
- return -EFAULT;
- }
-
- free->length = length;
-
- SET_HTC_PACKET_INFO_TX(&free->HTCPacket,
- free,
- &free->data[writePtr],
- length,
- arRawStream2EndpointID(ar,StreamID),
- AR6K_DATA_PKT_TAG);
-
- HTCSendPkt(ar->arHtcTarget,&free->HTCPacket);
-
- arRaw->write_buffer_available[StreamID] = false;
- up(&arRaw->raw_htc_write_sem[StreamID]);
-
- return length;
-}
-#endif /* HTC_RAW_INTERFACE */
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
deleted file mode 100644
index 5fdda4aa2fe..00000000000
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ /dev/null
@@ -1,1892 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <net/cfg80211.h>
-#include <net/netlink.h>
-
-#include "ar6000_drv.h"
-
-
-extern A_WAITQUEUE_HEAD arEvent;
-extern unsigned int wmitimeout;
-extern int reconnect_flag;
-
-
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
- .bitrate = (_rate), \
- .flags = (_flags), \
- .hw_value = (_rateid), \
-}
-
-#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
- .hw_value = (_channel), \
- .center_freq = (_freq), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
- .hw_value = (_channel), \
- .center_freq = 5000 + (5 * (_channel)), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-static struct
-ieee80211_rate ar6k_rates[] = {
- RATETAB_ENT(10, 0x1, 0),
- RATETAB_ENT(20, 0x2, 0),
- RATETAB_ENT(55, 0x4, 0),
- RATETAB_ENT(110, 0x8, 0),
- RATETAB_ENT(60, 0x10, 0),
- RATETAB_ENT(90, 0x20, 0),
- RATETAB_ENT(120, 0x40, 0),
- RATETAB_ENT(180, 0x80, 0),
- RATETAB_ENT(240, 0x100, 0),
- RATETAB_ENT(360, 0x200, 0),
- RATETAB_ENT(480, 0x400, 0),
- RATETAB_ENT(540, 0x800, 0),
-};
-
-#define ar6k_a_rates (ar6k_rates + 4)
-#define ar6k_a_rates_size 8
-#define ar6k_g_rates (ar6k_rates + 0)
-#define ar6k_g_rates_size 12
-
-static struct
-ieee80211_channel ar6k_2ghz_channels[] = {
- CHAN2G(1, 2412, 0),
- CHAN2G(2, 2417, 0),
- CHAN2G(3, 2422, 0),
- CHAN2G(4, 2427, 0),
- CHAN2G(5, 2432, 0),
- CHAN2G(6, 2437, 0),
- CHAN2G(7, 2442, 0),
- CHAN2G(8, 2447, 0),
- CHAN2G(9, 2452, 0),
- CHAN2G(10, 2457, 0),
- CHAN2G(11, 2462, 0),
- CHAN2G(12, 2467, 0),
- CHAN2G(13, 2472, 0),
- CHAN2G(14, 2484, 0),
-};
-
-static struct
-ieee80211_channel ar6k_5ghz_a_channels[] = {
- CHAN5G(34, 0), CHAN5G(36, 0),
- CHAN5G(38, 0), CHAN5G(40, 0),
- CHAN5G(42, 0), CHAN5G(44, 0),
- CHAN5G(46, 0), CHAN5G(48, 0),
- CHAN5G(52, 0), CHAN5G(56, 0),
- CHAN5G(60, 0), CHAN5G(64, 0),
- CHAN5G(100, 0), CHAN5G(104, 0),
- CHAN5G(108, 0), CHAN5G(112, 0),
- CHAN5G(116, 0), CHAN5G(120, 0),
- CHAN5G(124, 0), CHAN5G(128, 0),
- CHAN5G(132, 0), CHAN5G(136, 0),
- CHAN5G(140, 0), CHAN5G(149, 0),
- CHAN5G(153, 0), CHAN5G(157, 0),
- CHAN5G(161, 0), CHAN5G(165, 0),
- CHAN5G(184, 0), CHAN5G(188, 0),
- CHAN5G(192, 0), CHAN5G(196, 0),
- CHAN5G(200, 0), CHAN5G(204, 0),
- CHAN5G(208, 0), CHAN5G(212, 0),
- CHAN5G(216, 0),
-};
-
-static struct
-ieee80211_supported_band ar6k_band_2ghz = {
- .n_channels = ARRAY_SIZE(ar6k_2ghz_channels),
- .channels = ar6k_2ghz_channels,
- .n_bitrates = ar6k_g_rates_size,
- .bitrates = ar6k_g_rates,
-};
-
-static struct
-ieee80211_supported_band ar6k_band_5ghz = {
- .n_channels = ARRAY_SIZE(ar6k_5ghz_a_channels),
- .channels = ar6k_5ghz_a_channels,
- .n_bitrates = ar6k_a_rates_size,
- .bitrates = ar6k_a_rates,
-};
-
-static int
-ar6k_set_wpa_version(struct ar6_softc *ar, enum nl80211_wpa_versions wpa_version)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: %u\n", __func__, wpa_version));
-
- if (!wpa_version) {
- ar->arAuthMode = NONE_AUTH;
- } else if (wpa_version & NL80211_WPA_VERSION_1) {
- ar->arAuthMode = WPA_AUTH;
- } else if (wpa_version & NL80211_WPA_VERSION_2) {
- ar->arAuthMode = WPA2_AUTH;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: %u not spported\n", __func__, wpa_version));
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int
-ar6k_set_auth_type(struct ar6_softc *ar, enum nl80211_auth_type auth_type)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x\n", __func__, auth_type));
-
- switch (auth_type) {
- case NL80211_AUTHTYPE_OPEN_SYSTEM:
- ar->arDot11AuthMode = OPEN_AUTH;
- break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- ar->arDot11AuthMode = SHARED_AUTH;
- break;
- case NL80211_AUTHTYPE_NETWORK_EAP:
- ar->arDot11AuthMode = LEAP_AUTH;
- break;
-
- case NL80211_AUTHTYPE_AUTOMATIC:
- ar->arDot11AuthMode = OPEN_AUTH;
- ar->arAutoAuthStage = AUTH_OPEN_IN_PROGRESS;
- break;
-
- default:
- ar->arDot11AuthMode = OPEN_AUTH;
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: 0x%x not spported\n", __func__, auth_type));
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int
-ar6k_set_cipher(struct ar6_softc *ar, u32 cipher, bool ucast)
-{
- u8 *ar_cipher = ucast ? &ar->arPairwiseCrypto :
- &ar->arGroupCrypto;
- u8 *ar_cipher_len = ucast ? &ar->arPairwiseCryptoLen :
- &ar->arGroupCryptoLen;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: cipher 0x%x, ucast %u\n", __func__, cipher, ucast));
-
- switch (cipher) {
- case 0:
- case IW_AUTH_CIPHER_NONE:
- *ar_cipher = NONE_CRYPT;
- *ar_cipher_len = 0;
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- *ar_cipher = WEP_CRYPT;
- *ar_cipher_len = 5;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- *ar_cipher = WEP_CRYPT;
- *ar_cipher_len = 13;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- *ar_cipher = TKIP_CRYPT;
- *ar_cipher_len = 0;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- *ar_cipher = AES_CRYPT;
- *ar_cipher_len = 0;
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: cipher 0x%x not supported\n", __func__, cipher));
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static void
-ar6k_set_key_mgmt(struct ar6_softc *ar, u32 key_mgmt)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x\n", __func__, key_mgmt));
-
- if (WLAN_AKM_SUITE_PSK == key_mgmt) {
- if (WPA_AUTH == ar->arAuthMode) {
- ar->arAuthMode = WPA_PSK_AUTH;
- } else if (WPA2_AUTH == ar->arAuthMode) {
- ar->arAuthMode = WPA2_PSK_AUTH;
- }
- } else if (WLAN_AKM_SUITE_8021X != key_mgmt) {
- ar->arAuthMode = NONE_AUTH;
- }
-}
-
-static int
-ar6k_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- struct ar6_softc *ar = ar6k_priv(dev);
- int status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
- ar->smeState = SME_CONNECTING;
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready yet\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(ar->bIsDestroyProgress) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: destroy in progress\n", __func__));
- return -EBUSY;
- }
-
- if(!sme->ssid_len || IEEE80211_MAX_SSID_LEN < sme->ssid_len) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ssid invalid\n", __func__));
- return -EINVAL;
- }
-
- if(ar->arSkipScan == true &&
- ((sme->channel && sme->channel->center_freq == 0) ||
- (sme->bssid && !sme->bssid[0] && !sme->bssid[1] && !sme->bssid[2] &&
- !sme->bssid[3] && !sme->bssid[4] && !sme->bssid[5])))
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s:SkipScan: channel or bssid invalid\n", __func__));
- return -EINVAL;
- }
-
- if(down_interruptible(&ar->arSem)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__));
- return -ERESTARTSYS;
- }
-
- if(ar->bIsDestroyProgress) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, destroy in progress\n", __func__));
- up(&ar->arSem);
- return -EBUSY;
- }
-
- if(ar->arTxPending[wmi_get_control_ep(ar->arWmi)]) {
- /*
- * sleep until the command queue drains
- */
- wait_event_interruptible_timeout(arEvent,
- ar->arTxPending[wmi_get_control_ep(ar->arWmi)] == 0, wmitimeout * HZ);
- if (signal_pending(current)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: cmd queue drain timeout\n", __func__));
- up(&ar->arSem);
- return -EINTR;
- }
- }
-
- if(ar->arConnected == true &&
- ar->arSsidLen == sme->ssid_len &&
- !memcmp(ar->arSsid, sme->ssid, ar->arSsidLen)) {
- reconnect_flag = true;
- status = wmi_reconnect_cmd(ar->arWmi,
- ar->arReqBssid,
- ar->arChannelHint);
-
- up(&ar->arSem);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_reconnect_cmd failed\n", __func__));
- return -EIO;
- }
- return 0;
- } else if(ar->arSsidLen == sme->ssid_len &&
- !memcmp(ar->arSsid, sme->ssid, ar->arSsidLen)) {
- ar6000_disconnect(ar);
- }
-
- A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
- ar->arSsidLen = sme->ssid_len;
- memcpy(ar->arSsid, sme->ssid, sme->ssid_len);
-
- if(sme->channel){
- ar->arChannelHint = sme->channel->center_freq;
- }
-
- A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
- if(sme->bssid){
- if(memcmp(&sme->bssid, bcast_mac, AR6000_ETH_ADDR_LEN)) {
- memcpy(ar->arReqBssid, sme->bssid, sizeof(ar->arReqBssid));
- }
- }
-
- ar6k_set_wpa_version(ar, sme->crypto.wpa_versions);
- ar6k_set_auth_type(ar, sme->auth_type);
-
- if(sme->crypto.n_ciphers_pairwise) {
- ar6k_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
- } else {
- ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, true);
- }
- ar6k_set_cipher(ar, sme->crypto.cipher_group, false);
-
- if(sme->crypto.n_akm_suites) {
- ar6k_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
- }
-
- if((sme->key_len) &&
- (NONE_AUTH == ar->arAuthMode) &&
- (WEP_CRYPT == ar->arPairwiseCrypto)) {
- struct ar_key *key = NULL;
-
- if(sme->key_idx < WMI_MIN_KEY_INDEX || sme->key_idx > WMI_MAX_KEY_INDEX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: key index %d out of bounds\n", __func__, sme->key_idx));
- up(&ar->arSem);
- return -ENOENT;
- }
-
- key = &ar->keys[sme->key_idx];
- key->key_len = sme->key_len;
- memcpy(key->key, sme->key, key->key_len);
- key->cipher = ar->arPairwiseCrypto;
- ar->arDefTxKeyIndex = sme->key_idx;
-
- wmi_addKey_cmd(ar->arWmi, sme->key_idx,
- ar->arPairwiseCrypto,
- GROUP_USAGE | TX_USAGE,
- key->key_len,
- NULL,
- key->key, KEY_OP_INIT_VAL, NULL,
- NO_SYNC_WMIFLAG);
- }
-
- if (!ar->arUserBssFilter) {
- if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't set bss filtering\n", __func__));
- up(&ar->arSem);
- return -EIO;
- }
- }
-
- ar->arNetworkType = ar->arNextMode;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Connect called with authmode %d dot11 auth %d"\
- " PW crypto %d PW crypto Len %d GRP crypto %d"\
- " GRP crypto Len %d channel hint %u\n",
- __func__, ar->arAuthMode, ar->arDot11AuthMode,
- ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
- ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arChannelHint));
-
- reconnect_flag = 0;
- status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType,
- ar->arDot11AuthMode, ar->arAuthMode,
- ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
- ar->arGroupCrypto,ar->arGroupCryptoLen,
- ar->arSsidLen, ar->arSsid,
- ar->arReqBssid, ar->arChannelHint,
- ar->arConnectCtrlFlags);
-
- up(&ar->arSem);
-
- if (A_EINVAL == status) {
- A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
- ar->arSsidLen = 0;
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Invalid request\n", __func__));
- return -ENOENT;
- } else if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_connect_cmd failed\n", __func__));
- return -EIO;
- }
-
- if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) &&
- ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)))
- {
- A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0);
- }
-
- ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD;
- ar->arConnectPending = true;
-
- return 0;
-}
-
-void
-ar6k_cfg80211_connect_event(struct ar6_softc *ar, u16 channel,
- u8 *bssid, u16 listenInterval,
- u16 beaconInterval,NETWORK_TYPE networkType,
- u8 beaconIeLen, u8 assocReqLen,
- u8 assocRespLen, u8 *assocInfo)
-{
- u16 size = 0;
- u16 capability = 0;
- struct cfg80211_bss *bss = NULL;
- struct ieee80211_mgmt *mgmt = NULL;
- struct ieee80211_channel *ibss_channel = NULL;
- s32 signal = 50 * 100;
- u8 ie_buf_len = 0;
- unsigned char ie_buf[256];
- unsigned char *ptr_ie_buf = ie_buf;
- unsigned char *ieeemgmtbuf = NULL;
- u8 source_mac[ATH_MAC_LEN];
-
- u8 assocReqIeOffset = sizeof(u16) + /* capinfo*/
- sizeof(u16); /* listen interval */
- u8 assocRespIeOffset = sizeof(u16) + /* capinfo*/
- sizeof(u16) + /* status Code */
- sizeof(u16); /* associd */
- u8 *assocReqIe = assocInfo + beaconIeLen + assocReqIeOffset;
- u8 *assocRespIe = assocInfo + beaconIeLen + assocReqLen + assocRespIeOffset;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- assocReqLen -= assocReqIeOffset;
- assocRespLen -= assocRespIeOffset;
-
- ar->arAutoAuthStage = AUTH_IDLE;
-
- if((ADHOC_NETWORK & networkType)) {
- if(NL80211_IFTYPE_ADHOC != ar->wdev->iftype) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: ath6k not in ibss mode\n", __func__));
- return;
- }
- }
-
- if((INFRA_NETWORK & networkType)) {
- if(NL80211_IFTYPE_STATION != ar->wdev->iftype) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: ath6k not in station mode\n", __func__));
- return;
- }
- }
-
- /* Before informing the join/connect event, make sure that
- * bss entry is present in scan list, if it not present
- * construct and insert into scan list, otherwise that
- * event will be dropped on the way by cfg80211, due to
- * this keys will not be plumbed in case of WEP and
- * application will not be aware of join/connect status. */
- bss = cfg80211_get_bss(ar->wdev->wiphy, NULL, bssid,
- ar->wdev->ssid, ar->wdev->ssid_len,
- ((ADHOC_NETWORK & networkType) ? WLAN_CAPABILITY_IBSS : WLAN_CAPABILITY_ESS),
- ((ADHOC_NETWORK & networkType) ? WLAN_CAPABILITY_IBSS : WLAN_CAPABILITY_ESS));
-
- /*
- * Earlier we were updating the cfg about bss by making a beacon frame
- * only if the entry for bss is not there. This can have some issue if
- * ROAM event is generated and a heavy traffic is ongoing. The ROAM
- * event is handled through a work queue and by the time it really gets
- * handled, BSS would have been aged out. So it is better to update the
- * cfg about BSS irrespective of its entry being present right now or
- * not.
- */
-
- if (ADHOC_NETWORK & networkType) {
- /* construct 802.11 mgmt beacon */
- if(ptr_ie_buf) {
- *ptr_ie_buf++ = WLAN_EID_SSID;
- *ptr_ie_buf++ = ar->arSsidLen;
- memcpy(ptr_ie_buf, ar->arSsid, ar->arSsidLen);
- ptr_ie_buf +=ar->arSsidLen;
-
- *ptr_ie_buf++ = WLAN_EID_IBSS_PARAMS;
- *ptr_ie_buf++ = 2; /* length */
- *ptr_ie_buf++ = 0; /* ATIM window */
- *ptr_ie_buf++ = 0; /* ATIM window */
-
- /* TODO: update ibss params and include supported rates,
- * DS param set, extened support rates, wmm. */
-
- ie_buf_len = ptr_ie_buf - ie_buf;
- }
-
- capability |= IEEE80211_CAPINFO_IBSS;
- if(WEP_CRYPT == ar->arPairwiseCrypto) {
- capability |= IEEE80211_CAPINFO_PRIVACY;
- }
- memcpy(source_mac, ar->arNetDev->dev_addr, ATH_MAC_LEN);
- ptr_ie_buf = ie_buf;
- } else {
- capability = *(u16 *)(&assocInfo[beaconIeLen]);
- memcpy(source_mac, bssid, ATH_MAC_LEN);
- ptr_ie_buf = assocReqIe;
- ie_buf_len = assocReqLen;
- }
-
- size = offsetof(struct ieee80211_mgmt, u)
- + sizeof(mgmt->u.beacon)
- + ie_buf_len;
-
- ieeemgmtbuf = A_MALLOC_NOWAIT(size);
- if(!ieeemgmtbuf) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: ieeeMgmtbuf alloc error\n", __func__));
- cfg80211_put_bss(bss);
- return;
- }
-
- A_MEMZERO(ieeemgmtbuf, size);
- mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
- mgmt->frame_control = (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
- memcpy(mgmt->da, bcast_mac, ATH_MAC_LEN);
- memcpy(mgmt->sa, source_mac, ATH_MAC_LEN);
- memcpy(mgmt->bssid, bssid, ATH_MAC_LEN);
- mgmt->u.beacon.beacon_int = beaconInterval;
- mgmt->u.beacon.capab_info = capability;
- memcpy(mgmt->u.beacon.variable, ptr_ie_buf, ie_buf_len);
-
- ibss_channel = ieee80211_get_channel(ar->wdev->wiphy, (int)channel);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: inform bss with bssid %pM channel %d beaconInterval %d "
- "capability 0x%x\n", __func__, mgmt->bssid,
- ibss_channel->hw_value, beaconInterval, capability));
-
- bss = cfg80211_inform_bss_frame(ar->wdev->wiphy,
- ibss_channel, mgmt,
- le16_to_cpu(size),
- signal, GFP_KERNEL);
- kfree(ieeemgmtbuf);
- cfg80211_put_bss(bss);
-
- if((ADHOC_NETWORK & networkType)) {
- cfg80211_ibss_joined(ar->arNetDev, bssid, GFP_KERNEL);
- return;
- }
-
- if (false == ar->arConnected) {
- /* inform connect result to cfg80211 */
- ar->smeState = SME_DISCONNECTED;
- cfg80211_connect_result(ar->arNetDev, bssid,
- assocReqIe, assocReqLen,
- assocRespIe, assocRespLen,
- WLAN_STATUS_SUCCESS, GFP_KERNEL);
- } else {
- /* inform roam event to cfg80211 */
- cfg80211_roamed(ar->arNetDev, ibss_channel, bssid,
- assocReqIe, assocReqLen,
- assocRespIe, assocRespLen,
- GFP_KERNEL);
- }
-}
-
-static int
-ar6k_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: reason=%u\n", __func__, reason_code));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(ar->bIsDestroyProgress) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, destroy in progress\n", __func__));
- return -EBUSY;
- }
-
- if(down_interruptible(&ar->arSem)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__));
- return -ERESTARTSYS;
- }
-
- reconnect_flag = 0;
- ar6000_disconnect(ar);
- A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
- ar->arSsidLen = 0;
-
- if (ar->arSkipScan == false) {
- A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
- }
-
- up(&ar->arSem);
-
- return 0;
-}
-
-void
-ar6k_cfg80211_disconnect_event(struct ar6_softc *ar, u8 reason,
- u8 *bssid, u8 assocRespLen,
- u8 *assocInfo, u16 protocolReasonStatus)
-{
-
- u16 status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: reason=%u\n", __func__, reason));
-
- if (ar->scan_request) {
- cfg80211_scan_done(ar->scan_request, true);
- ar->scan_request = NULL;
- }
- if((ADHOC_NETWORK & ar->arNetworkType)) {
- if(NL80211_IFTYPE_ADHOC != ar->wdev->iftype) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: ath6k not in ibss mode\n", __func__));
- return;
- }
- A_MEMZERO(bssid, ETH_ALEN);
- cfg80211_ibss_joined(ar->arNetDev, bssid, GFP_KERNEL);
- return;
- }
-
- if((INFRA_NETWORK & ar->arNetworkType)) {
- if(NL80211_IFTYPE_STATION != ar->wdev->iftype) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: ath6k not in station mode\n", __func__));
- return;
- }
- }
-
- if(true == ar->arConnectPending) {
- if(NO_NETWORK_AVAIL == reason) {
- /* connect cmd failed */
- wmi_disconnect_cmd(ar->arWmi);
- } else if (reason == DISCONNECT_CMD) {
- if (ar->arAutoAuthStage) {
- /*
- * If the current auth algorithm is open try shared
- * and make autoAuthStage idle. We do not make it
- * leap for now being.
- */
- if (ar->arDot11AuthMode == OPEN_AUTH) {
- struct ar_key *key = NULL;
- key = &ar->keys[ar->arDefTxKeyIndex];
- if (down_interruptible(&ar->arSem)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__));
- return;
- }
-
-
- ar->arDot11AuthMode = SHARED_AUTH;
- ar->arAutoAuthStage = AUTH_IDLE;
-
- wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex,
- ar->arPairwiseCrypto,
- GROUP_USAGE | TX_USAGE,
- key->key_len,
- NULL,
- key->key, KEY_OP_INIT_VAL, NULL,
- NO_SYNC_WMIFLAG);
-
- status = wmi_connect_cmd(ar->arWmi,
- ar->arNetworkType,
- ar->arDot11AuthMode,
- ar->arAuthMode,
- ar->arPairwiseCrypto,
- ar->arPairwiseCryptoLen,
- ar->arGroupCrypto,
- ar->arGroupCryptoLen,
- ar->arSsidLen,
- ar->arSsid,
- ar->arReqBssid,
- ar->arChannelHint,
- ar->arConnectCtrlFlags);
- up(&ar->arSem);
-
- } else if (ar->arDot11AuthMode == SHARED_AUTH) {
- /* should not reach here */
- }
- } else {
- ar->arConnectPending = false;
- if (ar->smeState == SME_CONNECTING) {
- cfg80211_connect_result(ar->arNetDev, bssid,
- NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- } else {
- cfg80211_disconnected(ar->arNetDev,
- reason,
- NULL, 0,
- GFP_KERNEL);
- }
- ar->smeState = SME_DISCONNECTED;
- }
- }
- } else {
- if (reason != DISCONNECT_CMD)
- wmi_disconnect_cmd(ar->arWmi);
- }
-}
-
-void
-ar6k_cfg80211_scan_node(void *arg, bss_t *ni)
-{
- struct wiphy *wiphy = (struct wiphy *)arg;
- u16 size;
- unsigned char *ieeemgmtbuf = NULL;
- struct ieee80211_mgmt *mgmt;
- struct ieee80211_channel *channel;
- struct ieee80211_supported_band *band;
- struct ieee80211_common_ie *cie;
- s32 signal;
- int freq;
-
- cie = &ni->ni_cie;
-
-#define CHAN_IS_11A(x) (!((x >= 2412) && (x <= 2484)))
- if(CHAN_IS_11A(cie->ie_chan)) {
- /* 11a */
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- } else if((cie->ie_erp) || (cie->ie_xrates)) {
- /* 11g */
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- } else {
- /* 11b */
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- }
-
- size = ni->ni_framelen + offsetof(struct ieee80211_mgmt, u);
- ieeemgmtbuf = A_MALLOC_NOWAIT(size);
- if(!ieeemgmtbuf)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ieeeMgmtbuf alloc error\n", __func__));
- return;
- }
-
- /* Note:
- TODO: Update target to include 802.11 mac header while sending bss info.
- Target removes 802.11 mac header while sending the bss info to host,
- cfg80211 needs it, for time being just filling the da, sa and bssid fields alone.
- */
- mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
- memcpy(mgmt->da, bcast_mac, ATH_MAC_LEN);
- memcpy(mgmt->sa, ni->ni_macaddr, ATH_MAC_LEN);
- memcpy(mgmt->bssid, ni->ni_macaddr, ATH_MAC_LEN);
- memcpy(ieeemgmtbuf + offsetof(struct ieee80211_mgmt, u),
- ni->ni_buf, ni->ni_framelen);
-
- freq = cie->ie_chan;
- channel = ieee80211_get_channel(wiphy, freq);
- signal = ni->ni_snr * 100;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: bssid %pM channel %d freq %d size %d\n", __func__,
- mgmt->bssid, channel->hw_value, freq, size));
- cfg80211_inform_bss_frame(wiphy, channel, mgmt,
- le16_to_cpu(size),
- signal, GFP_KERNEL);
-
- kfree (ieeemgmtbuf);
-}
-
-static int
-ar6k_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev);
- int ret = 0;
- u32 forceFgScan = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if (!ar->arUserBssFilter) {
- if (wmi_bssfilter_cmd(ar->arWmi,
- (ar->arConnected ? ALL_BUT_BSS_FILTER : ALL_BSS_FILTER),
- 0) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't set bss filtering\n", __func__));
- return -EIO;
- }
- }
-
- if(request->n_ssids &&
- request->ssids[0].ssid_len) {
- u8 i;
-
- if(request->n_ssids > (MAX_PROBED_SSID_INDEX - 1)) {
- request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
- }
-
- for (i = 0; i < request->n_ssids; i++) {
- wmi_probedSsid_cmd(ar->arWmi, i+1, SPECIFIC_SSID_FLAG,
- request->ssids[i].ssid_len,
- request->ssids[i].ssid);
- }
- }
-
- if(ar->arConnected) {
- forceFgScan = 1;
- }
-
- if(wmi_startscan_cmd(ar->arWmi, WMI_LONG_SCAN, forceFgScan, false, \
- 0, 0, 0, NULL) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_startscan_cmd failed\n", __func__));
- ret = -EIO;
- }
-
- ar->scan_request = request;
-
- return ret;
-}
-
-void
-ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: status %d\n", __func__, status));
-
- if (!ar->scan_request)
- return;
-
- if ((status == A_ECANCELED) || (status == A_EBUSY)) {
- cfg80211_scan_done(ar->scan_request, true);
- goto out;
- }
-
- /* Translate data to cfg80211 mgmt format */
- wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
-
- cfg80211_scan_done(ar->scan_request, false);
-
- if(ar->scan_request->n_ssids &&
- ar->scan_request->ssids[0].ssid_len) {
- u8 i;
-
- for (i = 0; i < ar->scan_request->n_ssids; i++) {
- wmi_probedSsid_cmd(ar->arWmi, i+1, DISABLE_SSID_FLAG,
- 0, NULL);
- }
- }
-
-out:
- ar->scan_request = NULL;
-}
-
-static int
-ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev);
- struct ar_key *key = NULL;
- u8 key_usage;
- u8 key_type;
- int status = 0;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s:\n", __func__));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: key index %d out of bounds\n", __func__, key_index));
- return -ENOENT;
- }
-
- key = &ar->keys[key_index];
- A_MEMZERO(key, sizeof(struct ar_key));
-
- if(!mac_addr || is_broadcast_ether_addr(mac_addr)) {
- key_usage = GROUP_USAGE;
- } else {
- key_usage = PAIRWISE_USAGE;
- }
-
- if(params) {
- if(params->key_len > WLAN_MAX_KEY_LEN ||
- params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
- return -EINVAL;
-
- key->key_len = params->key_len;
- memcpy(key->key, params->key, key->key_len);
- key->seq_len = params->seq_len;
- memcpy(key->seq, params->seq, key->seq_len);
- key->cipher = params->cipher;
- }
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- key_type = WEP_CRYPT;
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- key_type = TKIP_CRYPT;
- break;
-
- case WLAN_CIPHER_SUITE_CCMP:
- key_type = AES_CRYPT;
- break;
-
- default:
- return -ENOTSUPP;
- }
-
- if (((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)) &&
- (GROUP_USAGE & key_usage))
- {
- A_UNTIMEOUT(&ar->disconnect_timer);
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: index %d, key_len %d, key_type 0x%x,"\
- " key_usage 0x%x, seq_len %d\n",
- __func__, key_index, key->key_len, key_type,
- key_usage, key->seq_len));
-
- ar->arDefTxKeyIndex = key_index;
- status = wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex, key_type, key_usage,
- key->key_len, key->seq, key->key, KEY_OP_INIT_VAL,
- (u8 *)mac_addr, SYNC_BOTH_WMIFLAG);
-
-
- if (status) {
- return -EIO;
- }
-
- return 0;
-}
-
-static int
-ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: key index %d out of bounds\n", __func__, key_index));
- return -ENOENT;
- }
-
- if(!ar->keys[key_index].key_len) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d is empty\n", __func__, key_index));
- return 0;
- }
-
- ar->keys[key_index].key_len = 0;
-
- return wmi_deleteKey_cmd(ar->arWmi, key_index);
-}
-
-
-static int
-ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
- void (*callback)(void *cookie, struct key_params*))
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev);
- struct ar_key *key = NULL;
- struct key_params params;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: key index %d out of bounds\n", __func__, key_index));
- return -ENOENT;
- }
-
- key = &ar->keys[key_index];
- A_MEMZERO(&params, sizeof(params));
- params.cipher = key->cipher;
- params.key_len = key->key_len;
- params.seq_len = key->seq_len;
- params.seq = key->seq;
- params.key = key->key;
-
- callback(cookie, &params);
-
- return key->key_len ? 0 : -ENOENT;
-}
-
-
-static int
-ar6k_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool unicast, bool multicast)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev);
- struct ar_key *key = NULL;
- int status = 0;
- u8 key_usage;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: key index %d out of bounds\n",
- __func__, key_index));
- return -ENOENT;
- }
-
- if(!ar->keys[key_index].key_len) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: invalid key index %d\n",
- __func__, key_index));
- return -EINVAL;
- }
-
- ar->arDefTxKeyIndex = key_index;
- key = &ar->keys[ar->arDefTxKeyIndex];
- key_usage = GROUP_USAGE;
- if (WEP_CRYPT == ar->arPairwiseCrypto) {
- key_usage |= TX_USAGE;
- }
-
- status = wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex,
- ar->arPairwiseCrypto, key_usage,
- key->key_len, key->seq, key->key, KEY_OP_INIT_VAL,
- NULL, SYNC_BOTH_WMIFLAG);
- if (status) {
- return -EIO;
- }
-
- return 0;
-}
-
-static int
-ar6k_cfg80211_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__));
- return -ENOTSUPP;
-}
-
-void
-ar6k_cfg80211_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast));
-
- cfg80211_michael_mic_failure(ar->arNetDev, ar->arBssid,
- (ismcast ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE),
- keyid, NULL, GFP_KERNEL);
-}
-
-static int
-ar6k_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct ar6_softc *ar = (struct ar6_softc *)wiphy_priv(wiphy);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: changed 0x%x\n", __func__, changed));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- if (wmi_set_rts_cmd(ar->arWmi,wiphy->rts_threshold) != 0){
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_set_rts_cmd failed\n", __func__));
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int
-ar6k_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer,
- const struct cfg80211_bitrate_mask *mask)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Setting rates: Not supported\n"));
- return -EIO;
-}
-
-/* The type nl80211_tx_power_setting replaces the following data type from 2.6.36 onwards */
-static int
-ar6k_cfg80211_set_txpower(struct wiphy *wiphy, enum nl80211_tx_power_setting type, int dbm)
-{
- struct ar6_softc *ar = (struct ar6_softc *)wiphy_priv(wiphy);
- u8 ar_dbm;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type 0x%x, dbm %d\n", __func__, type, dbm));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- ar->arTxPwrSet = false;
- switch(type) {
- case NL80211_TX_POWER_AUTOMATIC:
- return 0;
- case NL80211_TX_POWER_LIMITED:
- ar->arTxPwr = ar_dbm = dbm;
- ar->arTxPwrSet = true;
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type 0x%x not supported\n", __func__, type));
- return -EOPNOTSUPP;
- }
-
- wmi_set_txPwr_cmd(ar->arWmi, ar_dbm);
-
- return 0;
-}
-
-static int
-ar6k_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
-{
- struct ar6_softc *ar = (struct ar6_softc *)wiphy_priv(wiphy);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if((ar->arConnected == true)) {
- ar->arTxPwr = 0;
-
- if(wmi_get_txPwr_cmd(ar->arWmi) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_get_txPwr_cmd failed\n", __func__));
- return -EIO;
- }
-
- wait_event_interruptible_timeout(arEvent, ar->arTxPwr != 0, 5 * HZ);
-
- if(signal_pending(current)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Target did not respond\n", __func__));
- return -EINTR;
- }
- }
-
- *dbm = ar->arTxPwr;
- return 0;
-}
-
-static int
-ar6k_cfg80211_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *dev,
- bool pmgmt, int timeout)
-{
- struct ar6_softc *ar = ar6k_priv(dev);
- WMI_POWER_MODE_CMD pwrMode;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: pmgmt %d, timeout %d\n", __func__, pmgmt, timeout));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(pmgmt) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Max Perf\n", __func__));
- pwrMode.powerMode = REC_POWER;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Rec Power\n", __func__));
- pwrMode.powerMode = MAX_PERF_POWER;
- }
-
- if(wmi_powermode_cmd(ar->arWmi, pwrMode.powerMode) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_powermode_cmd failed\n", __func__));
- return -EIO;
- }
-
- return 0;
-}
-
-static struct net_device *
-ar6k_cfg80211_add_virtual_intf(struct wiphy *wiphy, char *name,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__));
-
- /* Multiple virtual interface is not supported.
- * The default interface supports STA and IBSS type
- */
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static int
-ar6k_cfg80211_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__));
-
- /* Multiple virtual interface is not supported.
- * The default interface supports STA and IBSS type
- */
- return -EOPNOTSUPP;
-}
-
-static int
-ar6k_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
- struct ar6_softc *ar = ar6k_priv(ndev);
- struct wireless_dev *wdev = ar->wdev;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type %u\n", __func__, type));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- switch (type) {
- case NL80211_IFTYPE_STATION:
- ar->arNextMode = INFRA_NETWORK;
- break;
- case NL80211_IFTYPE_ADHOC:
- ar->arNextMode = ADHOC_NETWORK;
- break;
- default:
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: type %u\n", __func__, type));
- return -EOPNOTSUPP;
- }
-
- wdev->iftype = type;
-
- return 0;
-}
-
-static int
-ar6k_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *ibss_param)
-{
- struct ar6_softc *ar = ar6k_priv(dev);
- int status;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- if(!ibss_param->ssid_len || IEEE80211_MAX_SSID_LEN < ibss_param->ssid_len) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ssid invalid\n", __func__));
- return -EINVAL;
- }
-
- ar->arSsidLen = ibss_param->ssid_len;
- memcpy(ar->arSsid, ibss_param->ssid, ar->arSsidLen);
-
- if(ibss_param->channel) {
- ar->arChannelHint = ibss_param->channel->center_freq;
- }
-
- if(ibss_param->channel_fixed) {
- /* TODO: channel_fixed: The channel should be fixed, do not search for
- * IBSSs to join on other channels. Target firmware does not support this
- * feature, needs to be updated.*/
- }
-
- A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid));
- if(ibss_param->bssid) {
- if(memcmp(&ibss_param->bssid, bcast_mac, AR6000_ETH_ADDR_LEN)) {
- memcpy(ar->arReqBssid, ibss_param->bssid, sizeof(ar->arReqBssid));
- }
- }
-
- ar6k_set_wpa_version(ar, 0);
- ar6k_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
-
- if(ibss_param->privacy) {
- ar6k_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
- ar6k_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
- } else {
- ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, true);
- ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, false);
- }
-
- ar->arNetworkType = ar->arNextMode;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Connect called with authmode %d dot11 auth %d"\
- " PW crypto %d PW crypto Len %d GRP crypto %d"\
- " GRP crypto Len %d channel hint %u\n",
- __func__, ar->arAuthMode, ar->arDot11AuthMode,
- ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
- ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arChannelHint));
-
- status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType,
- ar->arDot11AuthMode, ar->arAuthMode,
- ar->arPairwiseCrypto, ar->arPairwiseCryptoLen,
- ar->arGroupCrypto,ar->arGroupCryptoLen,
- ar->arSsidLen, ar->arSsid,
- ar->arReqBssid, ar->arChannelHint,
- ar->arConnectCtrlFlags);
- ar->arConnectPending = true;
-
- return 0;
-}
-
-static int
-ar6k_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
-{
- struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- if(ar->arWmiReady == false) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__));
- return -EIO;
- }
-
- if(ar->arWlanState == WLAN_DISABLED) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__));
- return -EIO;
- }
-
- ar6000_disconnect(ar);
- A_MEMZERO(ar->arSsid, sizeof(ar->arSsid));
- ar->arSsidLen = 0;
-
- return 0;
-}
-
-#ifdef CONFIG_NL80211_TESTMODE
-enum ar6k_testmode_attr {
- __AR6K_TM_ATTR_INVALID = 0,
- AR6K_TM_ATTR_CMD = 1,
- AR6K_TM_ATTR_DATA = 2,
-
- /* keep last */
- __AR6K_TM_ATTR_AFTER_LAST,
- AR6K_TM_ATTR_MAX = __AR6K_TM_ATTR_AFTER_LAST - 1
-};
-
-enum ar6k_testmode_cmd {
- AR6K_TM_CMD_TCMD = 0,
- AR6K_TM_CMD_RX_REPORT = 1,
-};
-
-#define AR6K_TM_DATA_MAX_LEN 5000
-
-static const struct nla_policy ar6k_testmode_policy[AR6K_TM_ATTR_MAX + 1] = {
- [AR6K_TM_ATTR_CMD] = { .type = NLA_U32 },
- [AR6K_TM_ATTR_DATA] = { .type = NLA_BINARY,
- .len = AR6K_TM_DATA_MAX_LEN },
-};
-
-void ar6000_testmode_rx_report_event(struct ar6_softc *ar, void *buf,
- int buf_len)
-{
- if (down_interruptible(&ar->arSem))
- return;
-
- kfree(ar->tcmd_rx_report);
-
- ar->tcmd_rx_report = kmemdup(buf, buf_len, GFP_KERNEL);
- ar->tcmd_rx_report_len = buf_len;
-
- up(&ar->arSem);
-
- wake_up(&arEvent);
-}
-
-static int ar6000_testmode_rx_report(struct ar6_softc *ar, void *buf,
- int buf_len, struct sk_buff *skb)
-{
- int ret = 0;
- long left;
-
- if (down_interruptible(&ar->arSem))
- return -ERESTARTSYS;
-
- if (ar->arWmiReady == false) {
- ret = -EIO;
- goto out;
- }
-
- if (ar->bIsDestroyProgress) {
- ret = -EBUSY;
- goto out;
- }
-
- WARN_ON(ar->tcmd_rx_report != NULL);
- WARN_ON(ar->tcmd_rx_report_len > 0);
-
- if (wmi_test_cmd(ar->arWmi, buf, buf_len) < 0) {
- up(&ar->arSem);
- return -EIO;
- }
-
- left = wait_event_interruptible_timeout(arEvent,
- ar->tcmd_rx_report != NULL,
- wmitimeout * HZ);
-
- if (left == 0) {
- ret = -ETIMEDOUT;
- goto out;
- } else if (left < 0) {
- ret = left;
- goto out;
- }
-
- if (ar->tcmd_rx_report == NULL || ar->tcmd_rx_report_len == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- NLA_PUT(skb, AR6K_TM_ATTR_DATA, ar->tcmd_rx_report_len,
- ar->tcmd_rx_report);
-
- kfree(ar->tcmd_rx_report);
- ar->tcmd_rx_report = NULL;
-
-out:
- up(&ar->arSem);
-
- return ret;
-
-nla_put_failure:
- ret = -ENOBUFS;
- goto out;
-}
-
-static int ar6k_testmode_cmd(struct wiphy *wiphy, void *data, int len)
-{
- struct ar6_softc *ar = wiphy_priv(wiphy);
- struct nlattr *tb[AR6K_TM_ATTR_MAX + 1];
- int err, buf_len, reply_len;
- struct sk_buff *skb;
- void *buf;
-
- err = nla_parse(tb, AR6K_TM_ATTR_MAX, data, len,
- ar6k_testmode_policy);
- if (err)
- return err;
-
- if (!tb[AR6K_TM_ATTR_CMD])
- return -EINVAL;
-
- switch (nla_get_u32(tb[AR6K_TM_ATTR_CMD])) {
- case AR6K_TM_CMD_TCMD:
- if (!tb[AR6K_TM_ATTR_DATA])
- return -EINVAL;
-
- buf = nla_data(tb[AR6K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[AR6K_TM_ATTR_DATA]);
-
- wmi_test_cmd(ar->arWmi, buf, buf_len);
-
- return 0;
-
- break;
- case AR6K_TM_CMD_RX_REPORT:
- if (!tb[AR6K_TM_ATTR_DATA])
- return -EINVAL;
-
- buf = nla_data(tb[AR6K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[AR6K_TM_ATTR_DATA]);
-
- reply_len = nla_total_size(AR6K_TM_DATA_MAX_LEN);
- skb = cfg80211_testmode_alloc_reply_skb(wiphy, reply_len);
- if (!skb)
- return -ENOMEM;
-
- err = ar6000_testmode_rx_report(ar, buf, buf_len, skb);
- if (err < 0) {
- kfree_skb(skb);
- return err;
- }
-
- return cfg80211_testmode_reply(skb);
- default:
- return -EOPNOTSUPP;
- }
-}
-#endif
-
-static const
-u32 cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
- WLAN_CIPHER_SUITE_TKIP,
- WLAN_CIPHER_SUITE_CCMP,
-};
-
-bool is_rate_legacy(s32 rate)
-{
- static const s32 legacy[] = { 1000, 2000, 5500, 11000,
- 6000, 9000, 12000, 18000, 24000,
- 36000, 48000, 54000 };
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(legacy); i++) {
- if (rate == legacy[i])
- return true;
- }
-
- return false;
-}
-
-bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
-{
- static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
- 52000, 58500, 65000, 72200 };
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(ht20); i++) {
- if (rate == ht20[i]) {
- if (i == ARRAY_SIZE(ht20) - 1)
- /* last rate uses sgi */
- *sgi = true;
- else
- *sgi = false;
-
- *mcs = i;
- return true;
- }
- }
- return false;
-}
-
-bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
-{
- static const s32 ht40[] = { 13500, 27000, 40500, 54000,
- 81000, 108000, 121500, 135000,
- 150000 };
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(ht40); i++) {
- if (rate == ht40[i]) {
- if (i == ARRAY_SIZE(ht40) - 1)
- /* last rate uses sgi */
- *sgi = true;
- else
- *sgi = false;
-
- *mcs = i;
- return true;
- }
- }
-
- return false;
-}
-
-static int ar6k_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
-{
- struct ar6_softc *ar = ar6k_priv(dev);
- long left;
- bool sgi;
- s32 rate;
- int ret;
- u8 mcs;
-
- if (memcmp(mac, ar->arBssid, ETH_ALEN) != 0)
- return -ENOENT;
-
- if (down_interruptible(&ar->arSem))
- return -EBUSY;
-
- ar->statsUpdatePending = true;
-
- ret = wmi_get_stats_cmd(ar->arWmi);
-
- if (ret != 0) {
- up(&ar->arSem);
- return -EIO;
- }
-
- left = wait_event_interruptible_timeout(arEvent,
- ar->statsUpdatePending == false,
- wmitimeout * HZ);
-
- up(&ar->arSem);
-
- if (left == 0)
- return -ETIMEDOUT;
- else if (left < 0)
- return left;
-
- if (ar->arTargetStats.rx_bytes) {
- sinfo->rx_bytes = ar->arTargetStats.rx_bytes;
- sinfo->filled |= STATION_INFO_RX_BYTES;
- sinfo->rx_packets = ar->arTargetStats.rx_packets;
- sinfo->filled |= STATION_INFO_RX_PACKETS;
- }
-
- if (ar->arTargetStats.tx_bytes) {
- sinfo->tx_bytes = ar->arTargetStats.tx_bytes;
- sinfo->filled |= STATION_INFO_TX_BYTES;
- sinfo->tx_packets = ar->arTargetStats.tx_packets;
- sinfo->filled |= STATION_INFO_TX_PACKETS;
- }
-
- sinfo->signal = ar->arTargetStats.cs_rssi;
- sinfo->filled |= STATION_INFO_SIGNAL;
-
- rate = ar->arTargetStats.tx_unicast_rate;
-
- if (is_rate_legacy(rate)) {
- sinfo->txrate.legacy = rate / 100;
- } else if (is_rate_ht20(rate, &mcs, &sgi)) {
- if (sgi) {
- sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- sinfo->txrate.mcs = mcs - 1;
- } else {
- sinfo->txrate.mcs = mcs;
- }
-
- sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
- } else if (is_rate_ht40(rate, &mcs, &sgi)) {
- if (sgi) {
- sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- sinfo->txrate.mcs = mcs - 1;
- } else {
- sinfo->txrate.mcs = mcs;
- }
-
- sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
- } else {
- WARN(1, "invalid rate: %d", rate);
- return 0;
- }
-
- sinfo->filled |= STATION_INFO_TX_BITRATE;
-
- return 0;
-}
-
-static int ar6k_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct ar6_softc *ar = ar6k_priv(netdev);
- return wmi_setPmkid_cmd(ar->arWmi, pmksa->bssid, pmksa->pmkid, true);
-}
-
-static int ar6k_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct ar6_softc *ar = ar6k_priv(netdev);
- return wmi_setPmkid_cmd(ar->arWmi, pmksa->bssid, pmksa->pmkid, false);
-}
-
-static int ar6k_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
-{
- struct ar6_softc *ar = ar6k_priv(netdev);
- if (ar->arConnected)
- return wmi_setPmkid_cmd(ar->arWmi, ar->arBssid, NULL, false);
- return 0;
-}
-
-static struct
-cfg80211_ops ar6k_cfg80211_ops = {
- .change_virtual_intf = ar6k_cfg80211_change_iface,
- .add_virtual_intf = ar6k_cfg80211_add_virtual_intf,
- .del_virtual_intf = ar6k_cfg80211_del_virtual_intf,
- .scan = ar6k_cfg80211_scan,
- .connect = ar6k_cfg80211_connect,
- .disconnect = ar6k_cfg80211_disconnect,
- .add_key = ar6k_cfg80211_add_key,
- .get_key = ar6k_cfg80211_get_key,
- .del_key = ar6k_cfg80211_del_key,
- .set_default_key = ar6k_cfg80211_set_default_key,
- .set_default_mgmt_key = ar6k_cfg80211_set_default_mgmt_key,
- .set_wiphy_params = ar6k_cfg80211_set_wiphy_params,
- .set_bitrate_mask = ar6k_cfg80211_set_bitrate_mask,
- .set_tx_power = ar6k_cfg80211_set_txpower,
- .get_tx_power = ar6k_cfg80211_get_txpower,
- .set_power_mgmt = ar6k_cfg80211_set_power_mgmt,
- .join_ibss = ar6k_cfg80211_join_ibss,
- .leave_ibss = ar6k_cfg80211_leave_ibss,
- .get_station = ar6k_get_station,
- .set_pmksa = ar6k_set_pmksa,
- .del_pmksa = ar6k_del_pmksa,
- .flush_pmksa = ar6k_flush_pmksa,
- CFG80211_TESTMODE_CMD(ar6k_testmode_cmd)
-};
-
-struct wireless_dev *
-ar6k_cfg80211_init(struct device *dev)
-{
- int ret = 0;
- struct wireless_dev *wdev;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if(!wdev) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: Couldn't allocate wireless device\n", __func__));
- return ERR_PTR(-ENOMEM);
- }
-
- /* create a new wiphy for use with cfg80211 */
- wdev->wiphy = wiphy_new(&ar6k_cfg80211_ops, sizeof(struct ar6_softc));
- if(!wdev->wiphy) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: Couldn't allocate wiphy device\n", __func__));
- kfree(wdev);
- return ERR_PTR(-ENOMEM);
- }
-
- /* set device pointer for wiphy */
- set_wiphy_dev(wdev->wiphy, dev);
-
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- /* max num of ssids that can be probed during scanning */
- wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar6k_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar6k_band_5ghz;
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wdev->wiphy->cipher_suites = cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
-
- ret = wiphy_register(wdev->wiphy);
- if(ret < 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
- ("%s: Couldn't register wiphy device\n", __func__));
- wiphy_free(wdev->wiphy);
- return ERR_PTR(ret);
- }
-
- return wdev;
-}
-
-void
-ar6k_cfg80211_deinit(struct ar6_softc *ar)
-{
- struct wireless_dev *wdev = ar->wdev;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__));
-
- if(ar->scan_request) {
- cfg80211_scan_done(ar->scan_request, true);
- ar->scan_request = NULL;
- }
-
- if(!wdev)
- return;
-
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
-}
-
-
-
-
-
-
-
diff --git a/drivers/staging/ath6kl/os/linux/export_hci_transport.c b/drivers/staging/ath6kl/os/linux/export_hci_transport.c
deleted file mode 100644
index 430998edacc..00000000000
--- a/drivers/staging/ath6kl/os/linux/export_hci_transport.c
+++ /dev/null
@@ -1,124 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// HCI bridge implementation
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#include <a_config.h>
-#include <athdefs.h>
-#include "a_osapi.h"
-#include "htc_api.h"
-#include "a_drv.h"
-#include "hif.h"
-#include "common_drv.h"
-#include "a_debug.h"
-#include "hci_transport_api.h"
-
-#include "AR6002/hw4.0/hw/apb_athr_wlan_map.h"
-#include "AR6002/hw4.0/hw/uart_reg.h"
-#include "AR6002/hw4.0/hw/rtc_wlan_reg.h"
-
-HCI_TRANSPORT_HANDLE (*_HCI_TransportAttach)(void *HTCHandle, struct hci_transport_config_info *pInfo);
-void (*_HCI_TransportDetach)(HCI_TRANSPORT_HANDLE HciTrans);
-int (*_HCI_TransportAddReceivePkts)(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet_queue *pQueue);
-int (*_HCI_TransportSendPkt)(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet *pPacket, bool Synchronous);
-void (*_HCI_TransportStop)(HCI_TRANSPORT_HANDLE HciTrans);
-int (*_HCI_TransportStart)(HCI_TRANSPORT_HANDLE HciTrans);
-int (*_HCI_TransportEnableDisableAsyncRecv)(HCI_TRANSPORT_HANDLE HciTrans, bool Enable);
-int (*_HCI_TransportRecvHCIEventSync)(HCI_TRANSPORT_HANDLE HciTrans,
- struct htc_packet *pPacket,
- int MaxPollMS);
-int (*_HCI_TransportSetBaudRate)(HCI_TRANSPORT_HANDLE HciTrans, u32 Baud);
-int (*_HCI_TransportEnablePowerMgmt)(HCI_TRANSPORT_HANDLE HciTrans, bool Enable);
-
-extern struct hci_transport_callbacks ar6kHciTransCallbacks;
-
-int ar6000_register_hci_transport(struct hci_transport_callbacks *hciTransCallbacks)
-{
- ar6kHciTransCallbacks = *hciTransCallbacks;
-
- _HCI_TransportAttach = HCI_TransportAttach;
- _HCI_TransportDetach = HCI_TransportDetach;
- _HCI_TransportAddReceivePkts = HCI_TransportAddReceivePkts;
- _HCI_TransportSendPkt = HCI_TransportSendPkt;
- _HCI_TransportStop = HCI_TransportStop;
- _HCI_TransportStart = HCI_TransportStart;
- _HCI_TransportEnableDisableAsyncRecv = HCI_TransportEnableDisableAsyncRecv;
- _HCI_TransportRecvHCIEventSync = HCI_TransportRecvHCIEventSync;
- _HCI_TransportSetBaudRate = HCI_TransportSetBaudRate;
- _HCI_TransportEnablePowerMgmt = HCI_TransportEnablePowerMgmt;
-
- return 0;
-}
-
-int
-ar6000_get_hif_dev(struct hif_device *device, void *config)
-{
- int status;
-
- status = HIFConfigureDevice(device,
- HIF_DEVICE_GET_OS_DEVICE,
- (struct hif_device_os_device_info *)config,
- sizeof(struct hif_device_os_device_info));
- return status;
-}
-
-int ar6000_set_uart_config(struct hif_device *hifDevice,
- u32 scale,
- u32 step)
-{
- u32 regAddress;
- u32 regVal;
- int status;
-
- regAddress = WLAN_UART_BASE_ADDRESS | UART_CLKDIV_ADDRESS;
- regVal = ((u32)scale << 16) | step;
- /* change the HCI UART scale/step values through the diagnostic window */
- status = ar6000_WriteRegDiag(hifDevice, &regAddress, &regVal);
-
- return status;
-}
-
-int ar6000_get_core_clock_config(struct hif_device *hifDevice, u32 *data)
-{
- u32 regAddress;
- int status;
-
- regAddress = WLAN_RTC_BASE_ADDRESS | WLAN_CPU_CLOCK_ADDRESS;
- /* read CPU clock settings*/
- status = ar6000_ReadRegDiag(hifDevice, &regAddress, data);
-
- return status;
-}
-
-EXPORT_SYMBOL(ar6000_register_hci_transport);
-EXPORT_SYMBOL(ar6000_get_hif_dev);
-EXPORT_SYMBOL(ar6000_set_uart_config);
-EXPORT_SYMBOL(ar6000_get_core_clock_config);
-EXPORT_SYMBOL(_HCI_TransportAttach);
-EXPORT_SYMBOL(_HCI_TransportDetach);
-EXPORT_SYMBOL(_HCI_TransportAddReceivePkts);
-EXPORT_SYMBOL(_HCI_TransportSendPkt);
-EXPORT_SYMBOL(_HCI_TransportStop);
-EXPORT_SYMBOL(_HCI_TransportStart);
-EXPORT_SYMBOL(_HCI_TransportEnableDisableAsyncRecv);
-EXPORT_SYMBOL(_HCI_TransportRecvHCIEventSync);
-EXPORT_SYMBOL(_HCI_TransportSetBaudRate);
-EXPORT_SYMBOL(_HCI_TransportEnablePowerMgmt);
diff --git a/drivers/staging/ath6kl/os/linux/hci_bridge.c b/drivers/staging/ath6kl/os/linux/hci_bridge.c
deleted file mode 100644
index 6087edcb1d6..00000000000
--- a/drivers/staging/ath6kl/os/linux/hci_bridge.c
+++ /dev/null
@@ -1,1141 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// HCI bridge implementation
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-#include <linux/etherdevice.h>
-#include <a_config.h>
-#include <athdefs.h>
-#include "a_osapi.h"
-#include "htc_api.h"
-#include "wmi.h"
-#include "a_drv.h"
-#include "hif.h"
-#include "common_drv.h"
-#include "a_debug.h"
-#define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6)
-#define ATH_DEBUG_HCI_RECV ATH_DEBUG_MAKE_MODULE_MASK(7)
-#define ATH_DEBUG_HCI_SEND ATH_DEBUG_MAKE_MODULE_MASK(8)
-#define ATH_DEBUG_HCI_DUMP ATH_DEBUG_MAKE_MODULE_MASK(9)
-#else
-#include "ar6000_drv.h"
-#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
-
-#ifdef ATH_AR6K_ENABLE_GMBOX
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-#include "export_hci_transport.h"
-#else
-#include "hci_transport_api.h"
-#endif
-#include "epping_test.h"
-#include "gmboxif.h"
-#include "ar3kconfig.h"
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-
- /* only build on newer kernels which have BT configured */
-#if defined(CONFIG_BT_MODULE) || defined(CONFIG_BT)
-#define CONFIG_BLUEZ_HCI_BRIDGE
-#endif
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-unsigned int ar3khcibaud = 0;
-unsigned int hciuartscale = 0;
-unsigned int hciuartstep = 0;
-
-module_param(ar3khcibaud, int, 0644);
-module_param(hciuartscale, int, 0644);
-module_param(hciuartstep, int, 0644);
-#else
-extern unsigned int ar3khcibaud;
-extern unsigned int hciuartscale;
-extern unsigned int hciuartstep;
-#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
-
-struct ar6k_hci_bridge_info {
- void *pHCIDev; /* HCI bridge device */
- struct hci_transport_properties HCIProps; /* HCI bridge props */
- struct hci_dev *pBtStackHCIDev; /* BT Stack HCI dev */
- bool HciNormalMode; /* Actual HCI mode enabled (non-TEST)*/
- bool HciRegistered; /* HCI device registered with stack */
- struct htc_packet_queue HTCPacketStructHead;
- u8 *pHTCStructAlloc;
- spinlock_t BridgeLock;
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- struct hci_transport_misc_handles HCITransHdl;
-#else
- struct ar6_softc *ar;
-#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
-};
-
-#define MAX_ACL_RECV_BUFS 16
-#define MAX_EVT_RECV_BUFS 8
-#define MAX_HCI_WRITE_QUEUE_DEPTH 32
-#define MAX_ACL_RECV_LENGTH 1200
-#define MAX_EVT_RECV_LENGTH 257
-#define TX_PACKET_RSV_OFFSET 32
-#define NUM_HTC_PACKET_STRUCTS ((MAX_ACL_RECV_BUFS + MAX_EVT_RECV_BUFS + MAX_HCI_WRITE_QUEUE_DEPTH) * 2)
-
-#define HCI_GET_OP_CODE(p) (((u16)((p)[1])) << 8) | ((u16)((p)[0]))
-
-extern unsigned int setupbtdev;
-struct ar3k_config_info ar3kconfig;
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-struct ar6k_hci_bridge_info *g_pHcidevInfo;
-#endif
-
-static int bt_setup_hci(struct ar6k_hci_bridge_info *pHcidevInfo);
-static void bt_cleanup_hci(struct ar6k_hci_bridge_info *pHcidevInfo);
-static int bt_register_hci(struct ar6k_hci_bridge_info *pHcidevInfo);
-static bool bt_indicate_recv(struct ar6k_hci_bridge_info *pHcidevInfo,
- HCI_TRANSPORT_PACKET_TYPE Type,
- struct sk_buff *skb);
-static struct sk_buff *bt_alloc_buffer(struct ar6k_hci_bridge_info *pHcidevInfo, int Length);
-static void bt_free_buffer(struct ar6k_hci_bridge_info *pHcidevInfo, struct sk_buff *skb);
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-int ar6000_setup_hci(void *ar);
-void ar6000_cleanup_hci(void *ar);
-int hci_test_send(void *ar, struct sk_buff *skb);
-#else
-int ar6000_setup_hci(struct ar6_softc *ar);
-void ar6000_cleanup_hci(struct ar6_softc *ar);
-/* HCI bridge testing */
-int hci_test_send(struct ar6_softc *ar, struct sk_buff *skb);
-#endif /* EXPORT_HCI_BRIDGE_INTERFACE */
-
-#define LOCK_BRIDGE(dev) spin_lock_bh(&(dev)->BridgeLock)
-#define UNLOCK_BRIDGE(dev) spin_unlock_bh(&(dev)->BridgeLock)
-
-static inline void FreeBtOsBuf(struct ar6k_hci_bridge_info *pHcidevInfo, void *osbuf)
-{
- if (pHcidevInfo->HciNormalMode) {
- bt_free_buffer(pHcidevInfo, (struct sk_buff *)osbuf);
- } else {
- /* in test mode, these are just ordinary netbuf allocations */
- A_NETBUF_FREE(osbuf);
- }
-}
-
-static void FreeHTCStruct(struct ar6k_hci_bridge_info *pHcidevInfo, struct htc_packet *pPacket)
-{
- LOCK_BRIDGE(pHcidevInfo);
- HTC_PACKET_ENQUEUE(&pHcidevInfo->HTCPacketStructHead,pPacket);
- UNLOCK_BRIDGE(pHcidevInfo);
-}
-
-static struct htc_packet * AllocHTCStruct(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
- struct htc_packet *pPacket = NULL;
- LOCK_BRIDGE(pHcidevInfo);
- pPacket = HTC_PACKET_DEQUEUE(&pHcidevInfo->HTCPacketStructHead);
- UNLOCK_BRIDGE(pHcidevInfo);
- return pPacket;
-}
-
-#define BLOCK_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
-
-static void RefillRecvBuffers(struct ar6k_hci_bridge_info *pHcidevInfo,
- HCI_TRANSPORT_PACKET_TYPE Type,
- int NumBuffers)
-{
- int length, i;
- void *osBuf = NULL;
- struct htc_packet_queue queue;
- struct htc_packet *pPacket;
-
- INIT_HTC_PACKET_QUEUE(&queue);
-
- if (Type == HCI_ACL_TYPE) {
- if (pHcidevInfo->HciNormalMode) {
- length = HCI_MAX_FRAME_SIZE;
- } else {
- length = MAX_ACL_RECV_LENGTH;
- }
- } else {
- length = MAX_EVT_RECV_LENGTH;
- }
-
- /* add on transport head and tail room */
- length += pHcidevInfo->HCIProps.HeadRoom + pHcidevInfo->HCIProps.TailRoom;
- /* round up to the required I/O padding */
- length = BLOCK_ROUND_UP_PWR2(length,pHcidevInfo->HCIProps.IOBlockPad);
-
- for (i = 0; i < NumBuffers; i++) {
-
- if (pHcidevInfo->HciNormalMode) {
- osBuf = bt_alloc_buffer(pHcidevInfo,length);
- } else {
- osBuf = A_NETBUF_ALLOC(length);
- }
-
- if (NULL == osBuf) {
- break;
- }
-
- pPacket = AllocHTCStruct(pHcidevInfo);
- if (NULL == pPacket) {
- FreeBtOsBuf(pHcidevInfo,osBuf);
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to alloc HTC struct \n"));
- break;
- }
-
- SET_HTC_PACKET_INFO_RX_REFILL(pPacket,osBuf,A_NETBUF_DATA(osBuf),length,Type);
- /* add to queue */
- HTC_PACKET_ENQUEUE(&queue,pPacket);
- }
-
- if (i > 0) {
- HCI_TransportAddReceivePkts(pHcidevInfo->pHCIDev, &queue);
- }
-}
-
-#define HOST_INTEREST_ITEM_ADDRESS(ar, item) \
- (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_HOST_INTEREST_ITEM_ADDRESS(item) : \
- (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
-static int ar6000_hci_transport_ready(HCI_TRANSPORT_HANDLE HCIHandle,
- struct hci_transport_properties *pProps,
- void *pContext)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
- int status;
- u32 address, hci_uart_pwr_mgmt_params;
-// struct ar3k_config_info ar3kconfig;
-
- pHcidevInfo->pHCIDev = HCIHandle;
-
- memcpy(&pHcidevInfo->HCIProps,pProps,sizeof(*pProps));
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE,("HCI ready (hci:0x%lX, headroom:%d, tailroom:%d blockpad:%d) \n",
- (unsigned long)HCIHandle,
- pHcidevInfo->HCIProps.HeadRoom,
- pHcidevInfo->HCIProps.TailRoom,
- pHcidevInfo->HCIProps.IOBlockPad));
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- A_ASSERT((pProps->HeadRoom + pProps->TailRoom) <= (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice)->hard_header_len);
-#else
- A_ASSERT((pProps->HeadRoom + pProps->TailRoom) <= pHcidevInfo->ar->arNetDev->hard_header_len);
-#endif
-
- /* provide buffers */
- RefillRecvBuffers(pHcidevInfo, HCI_ACL_TYPE, MAX_ACL_RECV_BUFS);
- RefillRecvBuffers(pHcidevInfo, HCI_EVENT_TYPE, MAX_EVT_RECV_BUFS);
-
- do {
- /* start transport */
- status = HCI_TransportStart(pHcidevInfo->pHCIDev);
-
- if (status) {
- break;
- }
-
- if (!pHcidevInfo->HciNormalMode) {
- /* in test mode, no need to go any further */
- break;
- }
-
- // The delay is required when AR6K is driving the BT reset line
- // where time is needed after the BT chip is out of reset (HCI_TransportStart)
- // and before the first HCI command is issued (AR3KConfigure)
- // FIXME
- // The delay should be configurable and be only applied when AR6K driving the BT
- // reset line. This could be done by some module parameter or based on some HW config
- // info. For now apply 100ms delay blindly
- A_MDELAY(100);
-
- A_MEMZERO(&ar3kconfig,sizeof(ar3kconfig));
- ar3kconfig.pHCIDev = pHcidevInfo->pHCIDev;
- ar3kconfig.pHCIProps = &pHcidevInfo->HCIProps;
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- ar3kconfig.pHIFDevice = (struct hif_device *)(pHcidevInfo->HCITransHdl.hifDevice);
-#else
- ar3kconfig.pHIFDevice = pHcidevInfo->ar->arHifDevice;
-#endif
- ar3kconfig.pBtStackHCIDev = pHcidevInfo->pBtStackHCIDev;
-
- if (ar3khcibaud != 0) {
- /* user wants ar3k baud rate change */
- ar3kconfig.Flags |= AR3K_CONFIG_FLAG_SET_AR3K_BAUD;
- ar3kconfig.Flags |= AR3K_CONFIG_FLAG_AR3K_BAUD_CHANGE_DELAY;
- ar3kconfig.AR3KBaudRate = ar3khcibaud;
- }
-
- if ((hciuartscale != 0) || (hciuartstep != 0)) {
- /* user wants to tune HCI bridge UART scale/step values */
- ar3kconfig.AR6KScale = (u16)hciuartscale;
- ar3kconfig.AR6KStep = (u16)hciuartstep;
- ar3kconfig.Flags |= AR3K_CONFIG_FLAG_SET_AR6K_SCALE_STEP;
- }
-
- /* Fetch the address of the hi_hci_uart_pwr_mgmt_params instance in the host interest area */
- address = TARG_VTOP(pHcidevInfo->ar->arTargetType,
- HOST_INTEREST_ITEM_ADDRESS(pHcidevInfo->ar, hi_hci_uart_pwr_mgmt_params));
- status = ar6000_ReadRegDiag(pHcidevInfo->ar->arHifDevice, &address, &hci_uart_pwr_mgmt_params);
- if (0 == status) {
- ar3kconfig.PwrMgmtEnabled = (hci_uart_pwr_mgmt_params & 0x1);
- ar3kconfig.IdleTimeout = (hci_uart_pwr_mgmt_params & 0xFFFF0000) >> 16;
- ar3kconfig.WakeupTimeout = (hci_uart_pwr_mgmt_params & 0xFF00) >> 8;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to read hci_uart_pwr_mgmt_params! \n"));
- }
- /* configure the AR3K device */
- memcpy(ar3kconfig.bdaddr,pHcidevInfo->ar->bdaddr,6);
- status = AR3KConfigure(&ar3kconfig);
- if (status) {
- break;
- }
-
- /* Make sure both AR6K and AR3K have power management enabled */
- if (ar3kconfig.PwrMgmtEnabled) {
- status = HCI_TransportEnablePowerMgmt(pHcidevInfo->pHCIDev, true);
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to enable TLPM for AR6K! \n"));
- }
- }
-
- status = bt_register_hci(pHcidevInfo);
-
- } while (false);
-
- return status;
-}
-
-static void ar6000_hci_transport_failure(void *pContext, int Status)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: transport failure! \n"));
-
- if (pHcidevInfo->HciNormalMode) {
- /* TODO .. */
- }
-}
-
-static void ar6000_hci_transport_removed(void *pContext)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: transport removed. \n"));
-
- A_ASSERT(pHcidevInfo->pHCIDev != NULL);
-
- HCI_TransportDetach(pHcidevInfo->pHCIDev);
- bt_cleanup_hci(pHcidevInfo);
- pHcidevInfo->pHCIDev = NULL;
-}
-
-static void ar6000_hci_send_complete(void *pContext, struct htc_packet *pPacket)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
- void *osbuf = pPacket->pPktContext;
- A_ASSERT(osbuf != NULL);
- A_ASSERT(pHcidevInfo != NULL);
-
- if (pPacket->Status) {
- if ((pPacket->Status != A_ECANCELED) && (pPacket->Status != A_NO_RESOURCE)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: Send Packet Failed: %d \n",pPacket->Status));
- }
- }
-
- FreeHTCStruct(pHcidevInfo,pPacket);
- FreeBtOsBuf(pHcidevInfo,osbuf);
-
-}
-
-static void ar6000_hci_pkt_recv(void *pContext, struct htc_packet *pPacket)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
- struct sk_buff *skb;
-
- A_ASSERT(pHcidevInfo != NULL);
- skb = (struct sk_buff *)pPacket->pPktContext;
- A_ASSERT(skb != NULL);
-
- do {
-
- if (pPacket->Status) {
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV,
- ("HCI Bridge, packet received type : %d len:%d \n",
- HCI_GET_PACKET_TYPE(pPacket),pPacket->ActualLength));
-
- /* set the actual buffer position in the os buffer, HTC recv buffers posted to HCI are set
- * to fill the front of the buffer */
- A_NETBUF_PUT(skb,pPacket->ActualLength + pHcidevInfo->HCIProps.HeadRoom);
- A_NETBUF_PULL(skb,pHcidevInfo->HCIProps.HeadRoom);
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_DUMP)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("<<< Recv HCI %s packet len:%d \n",
- (HCI_GET_PACKET_TYPE(pPacket) == HCI_EVENT_TYPE) ? "EVENT" : "ACL",
- skb->len));
- AR_DEBUG_PRINTBUF(skb->data, skb->len,"BT HCI RECV Packet Dump");
- }
-
- if (pHcidevInfo->HciNormalMode) {
- /* indicate the packet */
- if (bt_indicate_recv(pHcidevInfo,HCI_GET_PACKET_TYPE(pPacket),skb)) {
- /* bt stack accepted the packet */
- skb = NULL;
- }
- break;
- }
-
- /* for testing, indicate packet to the network stack */
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- skb->dev = (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice);
- if ((((struct net_device *)pHcidevInfo->HCITransHdl.netDevice)->flags & IFF_UP) == IFF_UP) {
- skb->protocol = eth_type_trans(skb, (struct net_device *)(pHcidevInfo->HCITransHdl.netDevice));
-#else
- skb->dev = pHcidevInfo->ar->arNetDev;
- if ((pHcidevInfo->ar->arNetDev->flags & IFF_UP) == IFF_UP) {
- skb->protocol = eth_type_trans(skb, pHcidevInfo->ar->arNetDev);
-#endif
- netif_rx(skb);
- skb = NULL;
- }
-
- } while (false);
-
- FreeHTCStruct(pHcidevInfo,pPacket);
-
- if (skb != NULL) {
- /* packet was not accepted, free it */
- FreeBtOsBuf(pHcidevInfo,skb);
- }
-
-}
-
-static void ar6000_hci_pkt_refill(void *pContext, HCI_TRANSPORT_PACKET_TYPE Type, int BuffersAvailable)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
- int refillCount;
-
- if (Type == HCI_ACL_TYPE) {
- refillCount = MAX_ACL_RECV_BUFS - BuffersAvailable;
- } else {
- refillCount = MAX_EVT_RECV_BUFS - BuffersAvailable;
- }
-
- if (refillCount > 0) {
- RefillRecvBuffers(pHcidevInfo,Type,refillCount);
- }
-
-}
-
-static HCI_SEND_FULL_ACTION ar6000_hci_pkt_send_full(void *pContext, struct htc_packet *pPacket)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)pContext;
- HCI_SEND_FULL_ACTION action = HCI_SEND_FULL_KEEP;
-
- if (!pHcidevInfo->HciNormalMode) {
- /* for epping testing, check packet tag, some epping packets are
- * special and cannot be dropped */
- if (HTC_GET_TAG_FROM_PKT(pPacket) == AR6K_DATA_PKT_TAG) {
- action = HCI_SEND_FULL_DROP;
- }
- }
-
- return action;
-}
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-int ar6000_setup_hci(void *ar)
-#else
-int ar6000_setup_hci(struct ar6_softc *ar)
-#endif
-{
- struct hci_transport_config_info config;
- int status = 0;
- int i;
- struct htc_packet *pPacket;
- struct ar6k_hci_bridge_info *pHcidevInfo;
-
-
- do {
-
- pHcidevInfo = (struct ar6k_hci_bridge_info *)A_MALLOC(sizeof(struct ar6k_hci_bridge_info));
-
- if (NULL == pHcidevInfo) {
- status = A_NO_MEMORY;
- break;
- }
-
- A_MEMZERO(pHcidevInfo, sizeof(struct ar6k_hci_bridge_info));
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- g_pHcidevInfo = pHcidevInfo;
- pHcidevInfo->HCITransHdl = *(struct hci_transport_misc_handles *)ar;
-#else
- ar->hcidev_info = pHcidevInfo;
- pHcidevInfo->ar = ar;
-#endif
- spin_lock_init(&pHcidevInfo->BridgeLock);
- INIT_HTC_PACKET_QUEUE(&pHcidevInfo->HTCPacketStructHead);
-
- ar->exitCallback = AR3KConfigureExit;
-
- status = bt_setup_hci(pHcidevInfo);
- if (status) {
- break;
- }
-
- if (pHcidevInfo->HciNormalMode) {
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: running in normal mode... \n"));
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: running in test mode... \n"));
- }
-
- pHcidevInfo->pHTCStructAlloc = (u8 *)A_MALLOC((sizeof(struct htc_packet)) * NUM_HTC_PACKET_STRUCTS);
-
- if (NULL == pHcidevInfo->pHTCStructAlloc) {
- status = A_NO_MEMORY;
- break;
- }
-
- pPacket = (struct htc_packet *)pHcidevInfo->pHTCStructAlloc;
- for (i = 0; i < NUM_HTC_PACKET_STRUCTS; i++,pPacket++) {
- FreeHTCStruct(pHcidevInfo,pPacket);
- }
-
- A_MEMZERO(&config,sizeof(struct hci_transport_config_info));
- config.ACLRecvBufferWaterMark = MAX_ACL_RECV_BUFS / 2;
- config.EventRecvBufferWaterMark = MAX_EVT_RECV_BUFS / 2;
- config.MaxSendQueueDepth = MAX_HCI_WRITE_QUEUE_DEPTH;
- config.pContext = pHcidevInfo;
- config.TransportFailure = ar6000_hci_transport_failure;
- config.TransportReady = ar6000_hci_transport_ready;
- config.TransportRemoved = ar6000_hci_transport_removed;
- config.pHCISendComplete = ar6000_hci_send_complete;
- config.pHCIPktRecv = ar6000_hci_pkt_recv;
- config.pHCIPktRecvRefill = ar6000_hci_pkt_refill;
- config.pHCISendFull = ar6000_hci_pkt_send_full;
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- pHcidevInfo->pHCIDev = HCI_TransportAttach(pHcidevInfo->HCITransHdl.htcHandle, &config);
-#else
- pHcidevInfo->pHCIDev = HCI_TransportAttach(ar->arHtcTarget, &config);
-#endif
-
- if (NULL == pHcidevInfo->pHCIDev) {
- status = A_ERROR;
- }
-
- } while (false);
-
- if (status) {
- if (pHcidevInfo != NULL) {
- if (NULL == pHcidevInfo->pHCIDev) {
- /* GMBOX may not be present in older chips */
- /* just return success */
- status = 0;
- }
- }
- ar6000_cleanup_hci(ar);
- }
-
- return status;
-}
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-void ar6000_cleanup_hci(void *ar)
-#else
-void ar6000_cleanup_hci(struct ar6_softc *ar)
-#endif
-{
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- struct ar6k_hci_bridge_info *pHcidevInfo = g_pHcidevInfo;
-#else
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)ar->hcidev_info;
-#endif
-
- if (pHcidevInfo != NULL) {
- bt_cleanup_hci(pHcidevInfo);
-
- if (pHcidevInfo->pHCIDev != NULL) {
- HCI_TransportStop(pHcidevInfo->pHCIDev);
- HCI_TransportDetach(pHcidevInfo->pHCIDev);
- pHcidevInfo->pHCIDev = NULL;
- }
-
- if (pHcidevInfo->pHTCStructAlloc != NULL) {
- kfree(pHcidevInfo->pHTCStructAlloc);
- pHcidevInfo->pHTCStructAlloc = NULL;
- }
-
- kfree(pHcidevInfo);
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
- ar->hcidev_info = NULL;
-#endif
- }
-
-
-}
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-int hci_test_send(void *ar, struct sk_buff *skb)
-#else
-int hci_test_send(struct ar6_softc *ar, struct sk_buff *skb)
-#endif
-{
- int status = 0;
- int length;
- EPPING_HEADER *pHeader;
- struct htc_packet *pPacket;
- HTC_TX_TAG htc_tag = AR6K_DATA_PKT_TAG;
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- struct ar6k_hci_bridge_info *pHcidevInfo = g_pHcidevInfo;
-#else
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)ar->hcidev_info;
-#endif
-
- do {
-
- if (NULL == pHcidevInfo) {
- status = A_ERROR;
- break;
- }
-
- if (NULL == pHcidevInfo->pHCIDev) {
- status = A_ERROR;
- break;
- }
-
- if (pHcidevInfo->HciNormalMode) {
- /* this interface cannot run when normal WMI is running */
- status = A_ERROR;
- break;
- }
-
- pHeader = (EPPING_HEADER *)A_NETBUF_DATA(skb);
-
- if (!IS_EPPING_PACKET(pHeader)) {
- status = A_EINVAL;
- break;
- }
-
- if (IS_EPING_PACKET_NO_DROP(pHeader)) {
- htc_tag = AR6K_CONTROL_PKT_TAG;
- }
-
- length = sizeof(EPPING_HEADER) + pHeader->DataLength;
-
- pPacket = AllocHTCStruct(pHcidevInfo);
- if (NULL == pPacket) {
- status = A_NO_MEMORY;
- break;
- }
-
- SET_HTC_PACKET_INFO_TX(pPacket,
- skb,
- A_NETBUF_DATA(skb),
- length,
- HCI_ACL_TYPE, /* send every thing out as ACL */
- htc_tag);
-
- HCI_TransportSendPkt(pHcidevInfo->pHCIDev,pPacket,false);
- pPacket = NULL;
-
- } while (false);
-
- return status;
-}
-
-void ar6000_set_default_ar3kconfig(struct ar6_softc *ar, void *ar3kconfig)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo = (struct ar6k_hci_bridge_info *)ar->hcidev_info;
- struct ar3k_config_info *config = (struct ar3k_config_info *)ar3kconfig;
-
- config->pHCIDev = pHcidevInfo->pHCIDev;
- config->pHCIProps = &pHcidevInfo->HCIProps;
- config->pHIFDevice = ar->arHifDevice;
- config->pBtStackHCIDev = pHcidevInfo->pBtStackHCIDev;
- config->Flags |= AR3K_CONFIG_FLAG_SET_AR3K_BAUD;
- config->AR3KBaudRate = 115200;
-}
-
-#ifdef CONFIG_BLUEZ_HCI_BRIDGE
-/*** BT Stack Entrypoints *******/
-
-/*
- * bt_open - open a handle to the device
-*/
-static int bt_open(struct hci_dev *hdev)
-{
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_open - enter - x\n"));
- set_bit(HCI_RUNNING, &hdev->flags);
- set_bit(HCI_UP, &hdev->flags);
- set_bit(HCI_INIT, &hdev->flags);
- return 0;
-}
-
-/*
- * bt_close - close handle to the device
-*/
-static int bt_close(struct hci_dev *hdev)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_close - enter\n"));
- clear_bit(HCI_RUNNING, &hdev->flags);
- return 0;
-}
-
-/*
- * bt_send_frame - send data frames
-*/
-static int bt_send_frame(struct sk_buff *skb)
-{
- struct hci_dev *hdev = (struct hci_dev *)skb->dev;
- HCI_TRANSPORT_PACKET_TYPE type;
- struct ar6k_hci_bridge_info *pHcidevInfo;
- struct htc_packet *pPacket;
- int status = 0;
- struct sk_buff *txSkb = NULL;
-
- if (!hdev) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HCI Bridge: bt_send_frame - no device\n"));
- return -ENODEV;
- }
-
- if (!test_bit(HCI_RUNNING, &hdev->flags)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_send_frame - not open\n"));
- return -EBUSY;
- }
-
- pHcidevInfo = (struct ar6k_hci_bridge_info *)hdev->driver_data;
- A_ASSERT(pHcidevInfo != NULL);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("+bt_send_frame type: %d \n",bt_cb(skb)->pkt_type));
- type = HCI_COMMAND_TYPE;
-
- switch (bt_cb(skb)->pkt_type) {
- case HCI_COMMAND_PKT:
- type = HCI_COMMAND_TYPE;
- hdev->stat.cmd_tx++;
- break;
-
- case HCI_ACLDATA_PKT:
- type = HCI_ACL_TYPE;
- hdev->stat.acl_tx++;
- break;
-
- case HCI_SCODATA_PKT:
- /* we don't support SCO over the bridge */
- kfree_skb(skb);
- return 0;
- default:
- A_ASSERT(false);
- kfree_skb(skb);
- return 0;
- }
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_DUMP)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(">>> Send HCI %s packet len: %d\n",
- (type == HCI_COMMAND_TYPE) ? "COMMAND" : "ACL",
- skb->len));
- if (type == HCI_COMMAND_TYPE) {
- u16 opcode = HCI_GET_OP_CODE(skb->data);
- AR_DEBUG_PRINTF(ATH_DEBUG_ANY,(" HCI Command: OGF:0x%X OCF:0x%X \r\n",
- opcode >> 10, opcode & 0x3FF));
- }
- AR_DEBUG_PRINTBUF(skb->data,skb->len,"BT HCI SEND Packet Dump");
- }
-
- do {
-
- txSkb = bt_skb_alloc(TX_PACKET_RSV_OFFSET + pHcidevInfo->HCIProps.HeadRoom +
- pHcidevInfo->HCIProps.TailRoom + skb->len,
- GFP_ATOMIC);
-
- if (txSkb == NULL) {
- status = A_NO_MEMORY;
- break;
- }
-
- bt_cb(txSkb)->pkt_type = bt_cb(skb)->pkt_type;
- txSkb->dev = (void *)pHcidevInfo->pBtStackHCIDev;
- skb_reserve(txSkb, TX_PACKET_RSV_OFFSET + pHcidevInfo->HCIProps.HeadRoom);
- memcpy(txSkb->data, skb->data, skb->len);
- skb_put(txSkb,skb->len);
-
- pPacket = AllocHTCStruct(pHcidevInfo);
- if (NULL == pPacket) {
- status = A_NO_MEMORY;
- break;
- }
-
- /* HCI packet length here doesn't include the 1-byte transport header which
- * will be handled by the HCI transport layer. Enough headroom has already
- * been reserved above for the transport header
- */
- SET_HTC_PACKET_INFO_TX(pPacket,
- txSkb,
- txSkb->data,
- txSkb->len,
- type,
- AR6K_CONTROL_PKT_TAG); /* HCI packets cannot be dropped */
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("HCI Bridge: bt_send_frame skb:0x%lX \n",(unsigned long)txSkb));
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("HCI Bridge: type:%d, Total Length:%d Bytes \n",
- type, txSkb->len));
-
- status = HCI_TransportSendPkt(pHcidevInfo->pHCIDev,pPacket,false);
- pPacket = NULL;
- txSkb = NULL;
-
- } while (false);
-
- if (txSkb != NULL) {
- kfree_skb(txSkb);
- }
-
- kfree_skb(skb);
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_SEND, ("-bt_send_frame \n"));
- return 0;
-}
-
-/*
- * bt_ioctl - ioctl processing
-*/
-static int bt_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_ioctl - enter\n"));
- return -ENOIOCTLCMD;
-}
-
-/*
- * bt_flush - flush outstandingbpackets
-*/
-static int bt_flush(struct hci_dev *hdev)
-{
- struct ar6k_hci_bridge_info *pHcidevInfo;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_flush - enter\n"));
-
- pHcidevInfo = (struct ar6k_hci_bridge_info *)hdev->driver_data;
-
- /* TODO??? */
-
- return 0;
-}
-
-
-/*
- * bt_destruct -
-*/
-static void bt_destruct(struct hci_dev *hdev)
-{
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HCI Bridge: bt_destruct - enter\n"));
- /* nothing to do here */
-}
-
-static int bt_setup_hci(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
- int status = 0;
- struct hci_dev *pHciDev = NULL;
- struct hif_device_os_device_info osDevInfo;
-
- if (!setupbtdev) {
- return 0;
- }
-
- do {
-
- A_MEMZERO(&osDevInfo,sizeof(osDevInfo));
- /* get the underlying OS device */
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
- status = ar6000_get_hif_dev((struct hif_device *)(pHcidevInfo->HCITransHdl.hifDevice),
- &osDevInfo);
-#else
- status = HIFConfigureDevice(pHcidevInfo->ar->arHifDevice,
- HIF_DEVICE_GET_OS_DEVICE,
- &osDevInfo,
- sizeof(osDevInfo));
-#endif
-
- if (status) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to OS device info from HIF\n"));
- break;
- }
-
- /* allocate a BT HCI struct for this device */
- pHciDev = hci_alloc_dev();
- if (NULL == pHciDev) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge - failed to allocate bt struct \n"));
- status = A_NO_MEMORY;
- break;
- }
- /* save the device, we'll register this later */
- pHcidevInfo->pBtStackHCIDev = pHciDev;
- SET_HCIDEV_DEV(pHciDev,osDevInfo.pOSDevice);
- SET_HCI_BUS_TYPE(pHciDev, HCI_VIRTUAL, HCI_BREDR);
- pHciDev->driver_data = pHcidevInfo;
- pHciDev->open = bt_open;
- pHciDev->close = bt_close;
- pHciDev->send = bt_send_frame;
- pHciDev->ioctl = bt_ioctl;
- pHciDev->flush = bt_flush;
- pHciDev->destruct = bt_destruct;
- pHciDev->owner = THIS_MODULE;
- /* driver is running in normal BT mode */
- pHcidevInfo->HciNormalMode = true;
-
- } while (false);
-
- if (status) {
- bt_cleanup_hci(pHcidevInfo);
- }
-
- return status;
-}
-
-static void bt_cleanup_hci(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
- int err;
-
- if (pHcidevInfo->HciRegistered) {
- pHcidevInfo->HciRegistered = false;
- clear_bit(HCI_RUNNING, &pHcidevInfo->pBtStackHCIDev->flags);
- clear_bit(HCI_UP, &pHcidevInfo->pBtStackHCIDev->flags);
- clear_bit(HCI_INIT, &pHcidevInfo->pBtStackHCIDev->flags);
- A_ASSERT(pHcidevInfo->pBtStackHCIDev != NULL);
- /* unregister */
- if ((err = hci_unregister_dev(pHcidevInfo->pBtStackHCIDev)) < 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to unregister with bluetooth %d\n",err));
- }
- }
-
- kfree(pHcidevInfo->pBtStackHCIDev);
- pHcidevInfo->pBtStackHCIDev = NULL;
-}
-
-static int bt_register_hci(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
- int err;
- int status = 0;
-
- do {
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: registering HCI... \n"));
- A_ASSERT(pHcidevInfo->pBtStackHCIDev != NULL);
- /* mark that we are registered */
- pHcidevInfo->HciRegistered = true;
- if ((err = hci_register_dev(pHcidevInfo->pBtStackHCIDev)) < 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: failed to register with bluetooth %d\n",err));
- pHcidevInfo->HciRegistered = false;
- status = A_ERROR;
- break;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_BRIDGE, ("HCI Bridge: HCI registered \n"));
-
- } while (false);
-
- return status;
-}
-
-static bool bt_indicate_recv(struct ar6k_hci_bridge_info *pHcidevInfo,
- HCI_TRANSPORT_PACKET_TYPE Type,
- struct sk_buff *skb)
-{
- u8 btType;
- int len;
- bool success = false;
- BT_HCI_EVENT_HEADER *pEvent;
-
- do {
-
- if (!test_bit(HCI_RUNNING, &pHcidevInfo->pBtStackHCIDev->flags)) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HCI Bridge: bt_indicate_recv - not running\n"));
- break;
- }
-
- switch (Type) {
- case HCI_ACL_TYPE:
- btType = HCI_ACLDATA_PKT;
- break;
- case HCI_EVENT_TYPE:
- btType = HCI_EVENT_PKT;
- break;
- default:
- btType = 0;
- A_ASSERT(false);
- break;
- }
-
- if (0 == btType) {
- break;
- }
-
- /* set the final type */
- bt_cb(skb)->pkt_type = btType;
- /* set dev */
- skb->dev = (void *)pHcidevInfo->pBtStackHCIDev;
- len = skb->len;
-
- if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_HCI_RECV)) {
- if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
- pEvent = (BT_HCI_EVENT_HEADER *)skb->data;
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV, ("BT HCI EventCode: %d, len:%d \n",
- pEvent->EventCode, pEvent->ParamLength));
- }
- }
-
- /* pass receive packet up the stack */
- if (hci_recv_frame(skb) != 0) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HCI Bridge: hci_recv_frame failed \n"));
- break;
- } else {
- AR_DEBUG_PRINTF(ATH_DEBUG_HCI_RECV,
- ("HCI Bridge: Indicated RCV of type:%d, Length:%d \n",btType,len));
- }
-
- success = true;
-
- } while (false);
-
- return success;
-}
-
-static struct sk_buff* bt_alloc_buffer(struct ar6k_hci_bridge_info *pHcidevInfo, int Length)
-{
- struct sk_buff *skb;
- /* in normal HCI mode we need to alloc from the bt core APIs */
- skb = bt_skb_alloc(Length, GFP_ATOMIC);
- if (NULL == skb) {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to alloc bt sk_buff \n"));
- }
- return skb;
-}
-
-static void bt_free_buffer(struct ar6k_hci_bridge_info *pHcidevInfo, struct sk_buff *skb)
-{
- kfree_skb(skb);
-}
-
-#else // { CONFIG_BLUEZ_HCI_BRIDGE
-
- /* stubs when we only want to test the HCI bridging Interface without the HT stack */
-static int bt_setup_hci(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
- return 0;
-}
-static void bt_cleanup_hci(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
-
-}
-static int bt_register_hci(struct ar6k_hci_bridge_info *pHcidevInfo)
-{
- A_ASSERT(false);
- return A_ERROR;
-}
-
-static bool bt_indicate_recv(struct ar6k_hci_bridge_info *pHcidevInfo,
- HCI_TRANSPORT_PACKET_TYPE Type,
- struct sk_buff *skb)
-{
- A_ASSERT(false);
- return false;
-}
-
-static struct sk_buff* bt_alloc_buffer(struct ar6k_hci_bridge_info *pHcidevInfo, int Length)
-{
- A_ASSERT(false);
- return NULL;
-}
-static void bt_free_buffer(struct ar6k_hci_bridge_info *pHcidevInfo, struct sk_buff *skb)
-{
- A_ASSERT(false);
-}
-
-#endif // } CONFIG_BLUEZ_HCI_BRIDGE
-
-#else // { ATH_AR6K_ENABLE_GMBOX
-
- /* stubs when GMBOX support is not needed */
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-int ar6000_setup_hci(void *ar)
-#else
-int ar6000_setup_hci(struct ar6_softc *ar)
-#endif
-{
- return 0;
-}
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-void ar6000_cleanup_hci(void *ar)
-#else
-void ar6000_cleanup_hci(struct ar6_softc *ar)
-#endif
-{
- return;
-}
-
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
-void ar6000_set_default_ar3kconfig(struct ar6_softc *ar, void *ar3kconfig)
-{
- return;
-}
-#endif
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-int hci_test_send(void *ar, struct sk_buff *skb)
-#else
-int hci_test_send(struct ar6_softc *ar, struct sk_buff *skb)
-#endif
-{
- return -EOPNOTSUPP;
-}
-
-#endif // } ATH_AR6K_ENABLE_GMBOX
-
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-static int __init
-hcibridge_init_module(void)
-{
- int status;
- struct hci_transport_callbacks hciTransCallbacks;
-
- hciTransCallbacks.setupTransport = ar6000_setup_hci;
- hciTransCallbacks.cleanupTransport = ar6000_cleanup_hci;
-
- status = ar6000_register_hci_transport(&hciTransCallbacks);
- if (status)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit
-hcibridge_cleanup_module(void)
-{
-}
-
-module_init(hcibridge_init_module);
-module_exit(hcibridge_cleanup_module);
-MODULE_LICENSE("Dual BSD/GPL");
-#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h b/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
deleted file mode 100644
index 80cef77738f..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
+++ /dev/null
@@ -1,776 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _AR6000_H_
-#define _AR6000_H_
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/if_ether.h>
-#include <linux/etherdevice.h>
-#include <net/iw_handler.h>
-#include <linux/if_arp.h>
-#include <linux/ip.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-#include <linux/module.h>
-#include <asm/io.h>
-
-#include <a_config.h>
-#include <athdefs.h>
-#include "a_osapi.h"
-#include "htc_api.h"
-#include "wmi.h"
-#include "a_drv.h"
-#include "bmi.h"
-#include <ieee80211.h>
-#include <ieee80211_ioctl.h>
-#include <wlan_api.h>
-#include <wmi_api.h>
-#include "pkt_log.h"
-#include "aggr_recv_api.h"
-#include <host_version.h>
-#include <linux/rtnetlink.h>
-#include <linux/moduleparam.h>
-#include "ar6000_api.h"
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-#include <testcmd.h>
-#endif
-#include <linux/firmware.h>
-
-#include "targaddrs.h"
-#include "dbglog_api.h"
-#include "ar6000_diag.h"
-#include "common_drv.h"
-#include "roaming.h"
-#include "hci_transport_api.h"
-#define ATH_MODULE_NAME driver
-#include "a_debug.h"
-#include "hw/apb_map.h"
-#include "hw/rtc_reg.h"
-#include "hw/mbox_reg.h"
-#include "gpio_reg.h"
-
-#define ATH_DEBUG_DBG_LOG ATH_DEBUG_MAKE_MODULE_MASK(0)
-#define ATH_DEBUG_WLAN_CONNECT ATH_DEBUG_MAKE_MODULE_MASK(1)
-#define ATH_DEBUG_WLAN_SCAN ATH_DEBUG_MAKE_MODULE_MASK(2)
-#define ATH_DEBUG_WLAN_TX ATH_DEBUG_MAKE_MODULE_MASK(3)
-#define ATH_DEBUG_WLAN_RX ATH_DEBUG_MAKE_MODULE_MASK(4)
-#define ATH_DEBUG_HTC_RAW ATH_DEBUG_MAKE_MODULE_MASK(5)
-#define ATH_DEBUG_HCI_BRIDGE ATH_DEBUG_MAKE_MODULE_MASK(6)
-#define ATH_DEBUG_HCI_RECV ATH_DEBUG_MAKE_MODULE_MASK(7)
-#define ATH_DEBUG_HCI_SEND ATH_DEBUG_MAKE_MODULE_MASK(8)
-#define ATH_DEBUG_HCI_DUMP ATH_DEBUG_MAKE_MODULE_MASK(9)
-
-#ifndef __dev_put
-#define __dev_put(dev) dev_put(dev)
-#endif
-
-
-#define USER_SAVEDKEYS_STAT_INIT 0
-#define USER_SAVEDKEYS_STAT_RUN 1
-
-// TODO this needs to move into the AR_SOFTC struct
-struct USER_SAVEDKEYS {
- struct ieee80211req_key ucast_ik;
- struct ieee80211req_key bcast_ik;
- CRYPTO_TYPE keyType;
- bool keyOk;
-};
-
-#define DBG_INFO 0x00000001
-#define DBG_ERROR 0x00000002
-#define DBG_WARNING 0x00000004
-#define DBG_SDIO 0x00000008
-#define DBG_HIF 0x00000010
-#define DBG_HTC 0x00000020
-#define DBG_WMI 0x00000040
-#define DBG_WMI2 0x00000080
-#define DBG_DRIVER 0x00000100
-
-#define DBG_DEFAULTS (DBG_ERROR|DBG_WARNING)
-
-
-int ar6000_ReadRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data);
-int ar6000_WriteRegDiag(struct hif_device *hifDevice, u32 *address, u32 *data);
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define MAX_AR6000 1
-#define AR6000_MAX_RX_BUFFERS 16
-#define AR6000_BUFFER_SIZE 1664
-#define AR6000_MAX_AMSDU_RX_BUFFERS 4
-#define AR6000_AMSDU_REFILL_THRESHOLD 3
-#define AR6000_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
-#define AR6000_MAX_RX_MESSAGE_SIZE (max(WMI_MAX_NORMAL_RX_DATA_FRAME_LENGTH,WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))
-
-#define AR6000_TX_TIMEOUT 10
-#define AR6000_ETH_ADDR_LEN 6
-#define AR6000_MAX_ENDPOINTS 4
-#define MAX_NODE_NUM 15
-/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
-#define MAX_DEF_COOKIE_NUM 180
-#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
-#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
-
-/* MAX_DEFAULT_SEND_QUEUE_DEPTH is used to set the default queue depth for the
- * WMM send queues. If a queue exceeds this depth htc will query back to the
- * OS specific layer by calling EpSendFull(). This gives the OS layer the
- * opportunity to drop the packet if desired. Therefore changing
- * MAX_DEFAULT_SEND_QUEUE_DEPTH does not affect resource utilization but
- * does impact the threshold used to identify if a packet should be
- * dropped. */
-#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
-
-#define AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT 1
-#define AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT 1
-#define A_DISCONNECT_TIMER_INTERVAL 10 * 1000
-#define A_DEFAULT_LISTEN_INTERVAL 100
-#define A_MAX_WOW_LISTEN_INTERVAL 1000
-
-enum {
- DRV_HB_CHALLENGE = 0,
- APP_HB_CHALLENGE
-};
-
-enum {
- WLAN_INIT_MODE_NONE = 0,
- WLAN_INIT_MODE_USR,
- WLAN_INIT_MODE_UDEV,
- WLAN_INIT_MODE_DRV
-};
-
-/* Suspend - configuration */
-enum {
- WLAN_SUSPEND_CUT_PWR = 0,
- WLAN_SUSPEND_DEEP_SLEEP,
- WLAN_SUSPEND_WOW,
- WLAN_SUSPEND_CUT_PWR_IF_BT_OFF
-};
-
-/* WiFi OFF - configuration */
-enum {
- WLAN_OFF_CUT_PWR = 0,
- WLAN_OFF_DEEP_SLEEP,
-};
-
-/* WLAN low power state */
-enum {
- WLAN_POWER_STATE_ON = 0,
- WLAN_POWER_STATE_CUT_PWR = 1,
- WLAN_POWER_STATE_DEEP_SLEEP,
- WLAN_POWER_STATE_WOW
-};
-
-/* WLAN WoW State */
-enum {
- WLAN_WOW_STATE_NONE = 0,
- WLAN_WOW_STATE_SUSPENDED,
- WLAN_WOW_STATE_SUSPENDING
-};
-
-
-typedef enum _AR6K_BIN_FILE {
- AR6K_OTP_FILE,
- AR6K_FIRMWARE_FILE,
- AR6K_PATCH_FILE,
- AR6K_BOARD_DATA_FILE,
-} AR6K_BIN_FILE;
-
-#ifdef SETUPHCI_ENABLED
-#define SETUPHCI_DEFAULT 1
-#else
-#define SETUPHCI_DEFAULT 0
-#endif /* SETUPHCI_ENABLED */
-
-#ifdef SETUPBTDEV_ENABLED
-#define SETUPBTDEV_DEFAULT 1
-#else
-#define SETUPBTDEV_DEFAULT 0
-#endif /* SETUPBTDEV_ENABLED */
-
-#ifdef ENABLEUARTPRINT_SET
-#define ENABLEUARTPRINT_DEFAULT 1
-#else
-#define ENABLEUARTPRINT_DEFAULT 0
-#endif /* ENABLEARTPRINT_SET */
-
-#ifdef ATH6KL_CONFIG_HIF_VIRTUAL_SCATTER
-#define NOHIFSCATTERSUPPORT_DEFAULT 1
-#else /* ATH6KL_CONFIG_HIF_VIRTUAL_SCATTER */
-#define NOHIFSCATTERSUPPORT_DEFAULT 0
-#endif /* ATH6KL_CONFIG_HIF_VIRTUAL_SCATTER */
-
-
-#if defined(CONFIG_ATH6KL_ENABLE_COEXISTENCE)
-
-#ifdef CONFIG_AR600x_BT_QCOM
-#define ATH6KL_BT_DEV 1
-#elif defined(CONFIG_AR600x_BT_CSR)
-#define ATH6KL_BT_DEV 2
-#else
-#define ATH6KL_BT_DEV 3
-#endif
-
-#ifdef CONFIG_AR600x_DUAL_ANTENNA
-#define ATH6KL_BT_ANTENNA 2
-#else
-#define ATH6KL_BT_ANTENNA 1
-#endif
-
-#endif /* CONFIG_ATH6KL_ENABLE_COEXISTENCE */
-
-#ifdef AR600x_BT_AR3001
-#define AR3KHCIBAUD_DEFAULT 3000000
-#define HCIUARTSCALE_DEFAULT 1
-#define HCIUARTSTEP_DEFAULT 8937
-#else
-#define AR3KHCIBAUD_DEFAULT 0
-#define HCIUARTSCALE_DEFAULT 0
-#define HCIUARTSTEP_DEFAULT 0
-#endif /* AR600x_BT_AR3001 */
-
-#define WLAN_INIT_MODE_DEFAULT WLAN_INIT_MODE_DRV
-
-#define AR6K_PATCH_DOWNLOAD_ADDRESS(_param, _ver) do { \
- if ((_ver) == AR6003_REV1_VERSION) { \
- (_param) = AR6003_REV1_PATCH_DOWNLOAD_ADDRESS; \
- } else if ((_ver) == AR6003_REV2_VERSION) { \
- (_param) = AR6003_REV2_PATCH_DOWNLOAD_ADDRESS; \
- } else { \
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
- A_ASSERT(0); \
- } \
-} while (0)
-
-#define AR6K_DATA_DOWNLOAD_ADDRESS(_param, _ver) do { \
- if ((_ver) == AR6003_REV1_VERSION) { \
- (_param) = AR6003_REV1_DATA_DOWNLOAD_ADDRESS; \
- } else if ((_ver) == AR6003_REV2_VERSION) { \
- (_param) = AR6003_REV2_DATA_DOWNLOAD_ADDRESS; \
- } else { \
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
- A_ASSERT(0); \
- } \
-} while (0)
-
-#define AR6K_DATASET_PATCH_ADDRESS(_param, _ver) do { \
- if ((_ver) == AR6003_REV2_VERSION) { \
- (_param) = AR6003_REV2_DATASET_PATCH_ADDRESS; \
- } else if ((_ver) == AR6003_REV3_VERSION) { \
- (_param) = AR6003_REV3_DATASET_PATCH_ADDRESS; \
- } else { \
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
- A_ASSERT(0); \
- } \
-} while (0)
-
-#define AR6K_APP_LOAD_ADDRESS(_param, _ver) do { \
- if ((_ver) == AR6003_REV2_VERSION) { \
- (_param) = AR6003_REV2_APP_LOAD_ADDRESS; \
- } else if ((_ver) == AR6003_REV3_VERSION) { \
- (_param) = AR6003_REV3_APP_LOAD_ADDRESS; \
- } else { \
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
- A_ASSERT(0); \
- } \
-} while (0)
-
-#define AR6K_APP_START_OVERRIDE_ADDRESS(_param, _ver) do { \
- if ((_ver) == AR6003_REV2_VERSION) { \
- (_param) = AR6003_REV2_APP_START_OVERRIDE; \
- } else if ((_ver) == AR6003_REV3_VERSION) { \
- (_param) = AR6003_REV3_APP_START_OVERRIDE; \
- } else { \
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown Version: %d\n", _ver)); \
- A_ASSERT(0); \
- } \
-} while (0)
-
-/* AR6003 1.0 definitions */
-#define AR6003_REV1_VERSION 0x300002ba
-#define AR6003_REV1_DATA_DOWNLOAD_ADDRESS AR6003_REV1_OTP_DATA_ADDRESS
-#define AR6003_REV1_PATCH_DOWNLOAD_ADDRESS 0x57ea6c
-#define AR6003_REV1_OTP_FILE "ath6k/AR6003/hw1.0/otp.bin.z77"
-#define AR6003_REV1_FIRMWARE_FILE "ath6k/AR6003/hw1.0/athwlan.bin.z77"
-#define AR6003_REV1_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw1.0/athtcmd_ram.bin"
-#define AR6003_REV1_ART_FIRMWARE_FILE "ath6k/AR6003/hw1.0/device.bin"
-#define AR6003_REV1_PATCH_FILE "ath6k/AR6003/hw1.0/data.patch.bin"
-#define AR6003_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6003/hw1.0/endpointping.bin"
-#ifdef CONFIG_AR600x_SD31_XXX
-#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.SD31.bin"
-#elif defined(CONFIG_AR600x_SD32_XXX)
-#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.SD32.bin"
-#elif defined(CONFIG_AR600x_WB31_XXX)
-#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.WB31.bin"
-#else
-#define AR6003_REV1_BOARD_DATA_FILE "ath6k/AR6003/hw1.0/bdata.CUSTOM.bin"
-#endif /* Board Data File */
-
-/* AR6003 2.0 definitions */
-#define AR6003_REV2_VERSION 0x30000384
-#define AR6003_REV2_DATA_DOWNLOAD_ADDRESS AR6003_REV2_OTP_DATA_ADDRESS
-#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
-#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
-#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
-#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
-#define AR6003_REV2_ART_FIRMWARE_FILE "ath6k/AR6003/hw2.0/device.bin"
-#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
-#define AR6003_REV2_EPPING_FIRMWARE_FILE "ath6k/AR6003/hw2.0/endpointping.bin"
-#ifdef CONFIG_AR600x_SD31_XXX
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
-#elif defined(CONFIG_AR600x_SD32_XXX)
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD32.bin"
-#elif defined(CONFIG_AR600x_WB31_XXX)
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.WB31.bin"
-#else
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.CUSTOM.bin"
-#endif /* Board Data File */
-
-/* AR6003 3.0 definitions */
-#define AR6003_REV3_VERSION 0x30000582
-#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
-#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
-#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
-#define AR6003_REV3_ART_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/device.bin"
-#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
-#define AR6003_REV3_EPPING_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/endpointping.bin"
-#ifdef CONFIG_AR600x_SD31_XXX
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
-#elif defined(CONFIG_AR600x_SD32_XXX)
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.SD32.bin"
-#elif defined(CONFIG_AR600x_WB31_XXX)
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.WB31.bin"
-#else
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.CUSTOM.bin"
-#endif /* Board Data File */
-
-
-/* Power states */
-enum {
- WLAN_PWR_CTRL_UP = 0,
- WLAN_PWR_CTRL_CUT_PWR,
- WLAN_PWR_CTRL_DEEP_SLEEP,
- WLAN_PWR_CTRL_WOW,
- WLAN_PWR_CTRL_DEEP_SLEEP_DISABLED
-};
-
-/* HTC RAW streams */
-typedef enum _HTC_RAW_STREAM_ID {
- HTC_RAW_STREAM_NOT_MAPPED = -1,
- HTC_RAW_STREAM_0 = 0,
- HTC_RAW_STREAM_1 = 1,
- HTC_RAW_STREAM_2 = 2,
- HTC_RAW_STREAM_3 = 3,
- HTC_RAW_STREAM_NUM_MAX
-} HTC_RAW_STREAM_ID;
-
-#define RAW_HTC_READ_BUFFERS_NUM 4
-#define RAW_HTC_WRITE_BUFFERS_NUM 4
-
-#define HTC_RAW_BUFFER_SIZE 1664
-
-typedef struct {
- int currPtr;
- int length;
- unsigned char data[HTC_RAW_BUFFER_SIZE];
- struct htc_packet HTCPacket;
-} raw_htc_buffer;
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-/*
- * add TCMD_MODE besides wmi and bypasswmi
- * in TCMD_MODE, only few TCMD releated wmi commands
- * counld be hanlder
- */
-enum {
- AR6000_WMI_MODE = 0,
- AR6000_BYPASS_MODE,
- AR6000_TCMD_MODE,
- AR6000_WLAN_MODE
-};
-#endif /* CONFIG_HOST_TCMD_SUPPORT */
-
-struct ar_wep_key {
- u8 arKeyIndex;
- u8 arKeyLen;
- u8 arKey[64];
-} ;
-
-struct ar_key {
- u8 key[WLAN_MAX_KEY_LEN];
- u8 key_len;
- u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
- u8 seq_len;
- u32 cipher;
-};
-
-enum {
- SME_DISCONNECTED,
- SME_CONNECTING,
- SME_CONNECTED
-};
-
-struct ar_node_mapping {
- u8 macAddress[6];
- u8 epId;
- u8 txPending;
-};
-
-struct ar_cookie {
- unsigned long arc_bp[2]; /* Must be first field */
- struct htc_packet HtcPkt; /* HTC packet wrapper */
- struct ar_cookie *arc_list_next;
-};
-
-struct ar_hb_chlng_resp {
- A_TIMER timer;
- u32 frequency;
- u32 seqNum;
- bool outstanding;
- u8 missCnt;
- u8 missThres;
-};
-
-/* Per STA data, used in AP mode */
-/*TODO: All this should move to OS independent dir */
-
-#define STA_PWR_MGMT_MASK 0x1
-#define STA_PWR_MGMT_SHIFT 0x0
-#define STA_PWR_MGMT_AWAKE 0x0
-#define STA_PWR_MGMT_SLEEP 0x1
-
-#define STA_SET_PWR_SLEEP(sta) (sta->flags |= (STA_PWR_MGMT_MASK << STA_PWR_MGMT_SHIFT))
-#define STA_CLR_PWR_SLEEP(sta) (sta->flags &= ~(STA_PWR_MGMT_MASK << STA_PWR_MGMT_SHIFT))
-#define STA_IS_PWR_SLEEP(sta) ((sta->flags >> STA_PWR_MGMT_SHIFT) & STA_PWR_MGMT_MASK)
-
-#define STA_PS_POLLED_MASK 0x1
-#define STA_PS_POLLED_SHIFT 0x1
-#define STA_SET_PS_POLLED(sta) (sta->flags |= (STA_PS_POLLED_MASK << STA_PS_POLLED_SHIFT))
-#define STA_CLR_PS_POLLED(sta) (sta->flags &= ~(STA_PS_POLLED_MASK << STA_PS_POLLED_SHIFT))
-#define STA_IS_PS_POLLED(sta) (sta->flags & (STA_PS_POLLED_MASK << STA_PS_POLLED_SHIFT))
-
-typedef struct {
- u16 flags;
- u8 mac[ATH_MAC_LEN];
- u8 aid;
- u8 keymgmt;
- u8 ucipher;
- u8 auth;
- u8 wpa_ie[IEEE80211_MAX_IE];
- A_NETBUF_QUEUE_T psq; /* power save q */
- A_MUTEX_T psqLock;
-} sta_t;
-
-typedef struct ar6_raw_htc {
- HTC_ENDPOINT_ID arRaw2EpMapping[HTC_RAW_STREAM_NUM_MAX];
- HTC_RAW_STREAM_ID arEp2RawMapping[ENDPOINT_MAX];
- struct semaphore raw_htc_read_sem[HTC_RAW_STREAM_NUM_MAX];
- struct semaphore raw_htc_write_sem[HTC_RAW_STREAM_NUM_MAX];
- wait_queue_head_t raw_htc_read_queue[HTC_RAW_STREAM_NUM_MAX];
- wait_queue_head_t raw_htc_write_queue[HTC_RAW_STREAM_NUM_MAX];
- raw_htc_buffer raw_htc_read_buffer[HTC_RAW_STREAM_NUM_MAX][RAW_HTC_READ_BUFFERS_NUM];
- raw_htc_buffer raw_htc_write_buffer[HTC_RAW_STREAM_NUM_MAX][RAW_HTC_WRITE_BUFFERS_NUM];
- bool write_buffer_available[HTC_RAW_STREAM_NUM_MAX];
- bool read_buffer_available[HTC_RAW_STREAM_NUM_MAX];
-} AR_RAW_HTC_T;
-
-struct ar6_softc {
- struct net_device *arNetDev; /* net_device pointer */
- void *arWmi;
- int arTxPending[ENDPOINT_MAX];
- int arTotalTxDataPending;
- u8 arNumDataEndPts;
- bool arWmiEnabled;
- bool arWmiReady;
- bool arConnected;
- HTC_HANDLE arHtcTarget;
- void *arHifDevice;
- spinlock_t arLock;
- struct semaphore arSem;
- int arSsidLen;
- u_char arSsid[32];
- u8 arNextMode;
- u8 arNetworkType;
- u8 arDot11AuthMode;
- u8 arAuthMode;
- u8 arPairwiseCrypto;
- u8 arPairwiseCryptoLen;
- u8 arGroupCrypto;
- u8 arGroupCryptoLen;
- u8 arDefTxKeyIndex;
- struct ar_wep_key arWepKeyList[WMI_MAX_KEY_INDEX + 1];
- u8 arBssid[6];
- u8 arReqBssid[6];
- u16 arChannelHint;
- u16 arBssChannel;
- u16 arListenIntervalB;
- u16 arListenIntervalT;
- struct ar6000_version arVersion;
- u32 arTargetType;
- s8 arRssi;
- u8 arTxPwr;
- bool arTxPwrSet;
- s32 arBitRate;
- struct net_device_stats arNetStats;
- struct iw_statistics arIwStats;
- s8 arNumChannels;
- u16 arChannelList[32];
- u32 arRegCode;
- bool statsUpdatePending;
- TARGET_STATS arTargetStats;
- s8 arMaxRetries;
- u8 arPhyCapability;
-#ifdef CONFIG_HOST_TCMD_SUPPORT
- u32 arTargetMode;
- void *tcmd_rx_report;
- int tcmd_rx_report_len;
-#endif
- AR6000_WLAN_STATE arWlanState;
- struct ar_node_mapping arNodeMap[MAX_NODE_NUM];
- u8 arIbssPsEnable;
- u8 arNodeNum;
- u8 arNexEpId;
- struct ar_cookie *arCookieList;
- u32 arCookieCount;
- u32 arRateMask;
- u8 arSkipScan;
- u16 arBeaconInterval;
- bool arConnectPending;
- bool arWmmEnabled;
- struct ar_hb_chlng_resp arHBChallengeResp;
- u8 arKeepaliveConfigured;
- u32 arMgmtFilter;
- HTC_ENDPOINT_ID arAc2EpMapping[WMM_NUM_AC];
- bool arAcStreamActive[WMM_NUM_AC];
- u8 arAcStreamPriMap[WMM_NUM_AC];
- u8 arHiAcStreamActivePri;
- u8 arEp2AcMapping[ENDPOINT_MAX];
- HTC_ENDPOINT_ID arControlEp;
-#ifdef HTC_RAW_INTERFACE
- AR_RAW_HTC_T *arRawHtc;
-#endif
- bool arNetQueueStopped;
- bool arRawIfInit;
- int arDeviceIndex;
- struct common_credit_state_info arCreditStateInfo;
- bool arWMIControlEpFull;
- bool dbgLogFetchInProgress;
- u8 log_buffer[DBGLOG_HOST_LOG_BUFFER_SIZE];
- u32 log_cnt;
- u32 dbglog_init_done;
- u32 arConnectCtrlFlags;
- s32 user_savedkeys_stat;
- u32 user_key_ctrl;
- struct USER_SAVEDKEYS user_saved_keys;
- USER_RSSI_THOLD rssi_map[12];
- u8 arUserBssFilter;
- u16 ap_profile_flag; /* AP mode */
- WMI_AP_ACL g_acl; /* AP mode */
- sta_t sta_list[AP_MAX_NUM_STA]; /* AP mode */
- u8 sta_list_index; /* AP mode */
- struct ieee80211req_key ap_mode_bkey; /* AP mode */
- A_NETBUF_QUEUE_T mcastpsq; /* power save q for Mcast frames */
- A_MUTEX_T mcastpsqLock;
- bool DTIMExpired; /* flag to indicate DTIM expired */
- u8 intra_bss; /* enable/disable intra bss data forward */
- void *aggr_cntxt;
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
- void *hcidev_info;
-#endif
- WMI_AP_MODE_STAT arAPStats;
- u8 ap_hidden_ssid;
- u8 ap_country_code[3];
- u8 ap_wmode;
- u8 ap_dtim_period;
- u16 ap_beacon_interval;
- u16 arRTS;
- u16 arACS; /* AP mode - Auto Channel Selection */
- struct htc_packet_queue amsdu_rx_buffer_queue;
- bool bIsDestroyProgress; /* flag to indicate ar6k destroy is in progress */
- A_TIMER disconnect_timer;
- u8 rxMetaVersion;
-#ifdef WAPI_ENABLE
- u8 arWapiEnable;
-#endif
- WMI_BTCOEX_CONFIG_EVENT arBtcoexConfig;
- WMI_BTCOEX_STATS_EVENT arBtcoexStats;
- s32 (*exitCallback)(void *config); /* generic callback at AR6K exit */
- struct hif_device_os_device_info osDevInfo;
- struct wireless_dev *wdev;
- struct cfg80211_scan_request *scan_request;
- struct ar_key keys[WMI_MAX_KEY_INDEX + 1];
- u32 smeState;
- u16 arWlanPowerState;
- bool arWlanOff;
-#ifdef CONFIG_PM
- u16 arWowState;
- bool arBTOff;
- bool arBTSharing;
- u16 arSuspendConfig;
- u16 arWlanOffConfig;
- u16 arWow2Config;
-#endif
- u8 scan_triggered;
- WMI_SCAN_PARAMS_CMD scParams;
-#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
- u8 mcast_filters[MAC_MAX_FILTERS_PER_LIST][AR_MCAST_FILTER_MAC_ADDR_SIZE];
- u8 bdaddr[6];
- bool scanSpecificSsid;
-#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
- void *arApDev;
-#endif
- u8 arAutoAuthStage;
-
- u8 *fw_otp;
- size_t fw_otp_len;
- u8 *fw;
- size_t fw_len;
- u8 *fw_patch;
- size_t fw_patch_len;
- u8 *fw_data;
- size_t fw_data_len;
-};
-
-#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
-struct ar_virtual_interface {
- struct net_device *arNetDev; /* net_device pointer */
- struct ar6_softc *arDev; /* ar device pointer */
- struct net_device *arStaNetDev; /* net_device pointer */
-};
-#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
-
-static inline void *ar6k_priv(struct net_device *dev)
-{
- return (wdev_priv(dev->ieee80211_ptr));
-}
-
-#define SET_HCI_BUS_TYPE(pHciDev, __bus, __type) do { \
- (pHciDev)->bus = (__bus); \
- (pHciDev)->dev_type = (__type); \
-} while(0)
-
-#define GET_INODE_FROM_FILEP(filp) \
- (filp)->f_path.dentry->d_inode
-
-#define arAc2EndpointID(ar,ac) (ar)->arAc2EpMapping[(ac)]
-#define arSetAc2EndpointIDMap(ar,ac,ep) \
-{ (ar)->arAc2EpMapping[(ac)] = (ep); \
- (ar)->arEp2AcMapping[(ep)] = (ac); }
-#define arEndpoint2Ac(ar,ep) (ar)->arEp2AcMapping[(ep)]
-
-#define arRawIfEnabled(ar) (ar)->arRawIfInit
-#define arRawStream2EndpointID(ar,raw) (ar)->arRawHtc->arRaw2EpMapping[(raw)]
-#define arSetRawStream2EndpointIDMap(ar,raw,ep) \
-{ (ar)->arRawHtc->arRaw2EpMapping[(raw)] = (ep); \
- (ar)->arRawHtc->arEp2RawMapping[(ep)] = (raw); }
-#define arEndpoint2RawStreamID(ar,ep) (ar)->arRawHtc->arEp2RawMapping[(ep)]
-
-struct ar_giwscan_param {
- char *current_ev;
- char *end_buf;
- u32 bytes_needed;
- struct iw_request_info *info;
-};
-
-#define AR6000_STAT_INC(ar, stat) (ar->arNetStats.stat++)
-
-#define AR6000_SPIN_LOCK(lock, param) do { \
- if (irqs_disabled()) { \
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("IRQs disabled:AR6000_LOCK\n")); \
- } \
- spin_lock_bh(lock); \
-} while (0)
-
-#define AR6000_SPIN_UNLOCK(lock, param) do { \
- if (irqs_disabled()) { \
- AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("IRQs disabled: AR6000_UNLOCK\n")); \
- } \
- spin_unlock_bh(lock); \
-} while (0)
-
-void ar6000_init_profile_info(struct ar6_softc *ar);
-void ar6000_install_static_wep_keys(struct ar6_softc *ar);
-int ar6000_init(struct net_device *dev);
-int ar6000_dbglog_get_debug_logs(struct ar6_softc *ar);
-void ar6000_TxDataCleanup(struct ar6_softc *ar);
-int ar6000_acl_data_tx(struct sk_buff *skb, struct net_device *dev);
-void ar6000_restart_endpoint(struct net_device *dev);
-void ar6000_stop_endpoint(struct net_device *dev, bool keepprofile, bool getdbglogs);
-
-#ifdef HTC_RAW_INTERFACE
-
-#ifndef __user
-#define __user
-#endif
-
-int ar6000_htc_raw_open(struct ar6_softc *ar);
-int ar6000_htc_raw_close(struct ar6_softc *ar);
-ssize_t ar6000_htc_raw_read(struct ar6_softc *ar,
- HTC_RAW_STREAM_ID StreamID,
- char __user *buffer, size_t count);
-ssize_t ar6000_htc_raw_write(struct ar6_softc *ar,
- HTC_RAW_STREAM_ID StreamID,
- char __user *buffer, size_t count);
-
-#endif /* HTC_RAW_INTERFACE */
-
-/* AP mode */
-/*TODO: These routines should be moved to a file that is common across OS */
-sta_t *
-ieee80211_find_conn(struct ar6_softc *ar, u8 *node_addr);
-
-sta_t *
-ieee80211_find_conn_for_aid(struct ar6_softc *ar, u8 aid);
-
-u8 remove_sta(struct ar6_softc *ar, u8 *mac, u16 reason);
-
-/* HCI support */
-
-#ifndef EXPORT_HCI_BRIDGE_INTERFACE
-int ar6000_setup_hci(struct ar6_softc *ar);
-void ar6000_cleanup_hci(struct ar6_softc *ar);
-void ar6000_set_default_ar3kconfig(struct ar6_softc *ar, void *ar3kconfig);
-
-/* HCI bridge testing */
-int hci_test_send(struct ar6_softc *ar, struct sk_buff *skb);
-#endif
-
-ATH_DEBUG_DECLARE_EXTERN(htc);
-ATH_DEBUG_DECLARE_EXTERN(wmi);
-ATH_DEBUG_DECLARE_EXTERN(bmi);
-ATH_DEBUG_DECLARE_EXTERN(hif);
-ATH_DEBUG_DECLARE_EXTERN(wlan);
-ATH_DEBUG_DECLARE_EXTERN(misc);
-
-extern u8 bcast_mac[];
-extern u8 null_mac[];
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _AR6000_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
deleted file mode 100644
index 39e0873aff2..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-// The software source and binaries included in this development package are
-// licensed, not sold. You, or your company, received the package under one
-// or more license agreements. The rights granted to you are specifically
-// listed in these license agreement(s). All other rights remain with Atheros
-// Communications, Inc., its subsidiaries, or the respective owner including
-// those listed on the included copyright notices. Distribution of any
-// portion of this package must be in strict compliance with the license
-// agreement(s) terms.
-// </copyright>
-//
-// <summary>
-// PAL driver for AR6003
-// </summary>
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _AR6K_PAL_H_
-#define _AR6K_PAL_H_
-#define HCI_GET_OP_CODE(p) (((u16)((p)[1])) << 8) | ((u16)((p)[0]))
-
-/* transmit packet reserve offset */
-#define TX_PACKET_RSV_OFFSET 32
-/* pal specific config structure */
-typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
-typedef struct ar6k_pal_config_s
-{
- ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
-}ar6k_pal_config_t;
-
-void register_pal_cb(ar6k_pal_config_t *palConfig_p);
-#endif /* _AR6K_PAL_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h b/drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h
deleted file mode 100644
index 184dbdb5049..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/ar6xapi_linux.h
+++ /dev/null
@@ -1,190 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _AR6XAPI_LINUX_H
-#define _AR6XAPI_LINUX_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct ar6_softc;
-
-void ar6000_ready_event(void *devt, u8 *datap, u8 phyCap,
- u32 sw_ver, u32 abi_ver);
-int ar6000_control_tx(void *devt, void *osbuf, HTC_ENDPOINT_ID eid);
-void ar6000_connect_event(struct ar6_softc *ar, u16 channel,
- u8 *bssid, u16 listenInterval,
- u16 beaconInterval, NETWORK_TYPE networkType,
- u8 beaconIeLen, u8 assocReqLen,
- u8 assocRespLen,u8 *assocInfo);
-void ar6000_disconnect_event(struct ar6_softc *ar, u8 reason,
- u8 *bssid, u8 assocRespLen,
- u8 *assocInfo, u16 protocolReasonStatus);
-void ar6000_tkip_micerr_event(struct ar6_softc *ar, u8 keyid,
- bool ismcast);
-void ar6000_bitrate_rx(void *devt, s32 rateKbps);
-void ar6000_channelList_rx(void *devt, s8 numChan, u16 *chanList);
-void ar6000_regDomain_event(struct ar6_softc *ar, u32 regCode);
-void ar6000_txPwr_rx(void *devt, u8 txPwr);
-void ar6000_keepalive_rx(void *devt, u8 configured);
-void ar6000_neighborReport_event(struct ar6_softc *ar, int numAps,
- WMI_NEIGHBOR_INFO *info);
-void ar6000_set_numdataendpts(struct ar6_softc *ar, u32 num);
-void ar6000_scanComplete_event(struct ar6_softc *ar, int status);
-void ar6000_targetStats_event(struct ar6_softc *ar, u8 *ptr, u32 len);
-void ar6000_rssiThreshold_event(struct ar6_softc *ar,
- WMI_RSSI_THRESHOLD_VAL newThreshold,
- s16 rssi);
-void ar6000_reportError_event(struct ar6_softc *, WMI_TARGET_ERROR_VAL errorVal);
-void ar6000_cac_event(struct ar6_softc *ar, u8 ac, u8 cac_indication,
- u8 statusCode, u8 *tspecSuggestion);
-void ar6000_channel_change_event(struct ar6_softc *ar, u16 oldChannel, u16 newChannel);
-void ar6000_hbChallengeResp_event(struct ar6_softc *, u32 cookie, u32 source);
-void
-ar6000_roam_tbl_event(struct ar6_softc *ar, WMI_TARGET_ROAM_TBL *pTbl);
-
-void
-ar6000_roam_data_event(struct ar6_softc *ar, WMI_TARGET_ROAM_DATA *p);
-
-void
-ar6000_wow_list_event(struct ar6_softc *ar, u8 num_filters,
- WMI_GET_WOW_LIST_REPLY *wow_reply);
-
-void ar6000_pmkid_list_event(void *devt, u8 numPMKID,
- WMI_PMKID *pmkidList, u8 *bssidList);
-
-void ar6000_gpio_intr_rx(u32 intr_mask, u32 input_values);
-void ar6000_gpio_data_rx(u32 reg_id, u32 value);
-void ar6000_gpio_ack_rx(void);
-
-s32 rssi_compensation_calc_tcmd(u32 freq, s32 rssi, u32 totalPkt);
-s16 rssi_compensation_calc(struct ar6_softc *ar, s16 rssi);
-s16 rssi_compensation_reverse_calc(struct ar6_softc *ar, s16 rssi, bool Above);
-
-void ar6000_dbglog_init_done(struct ar6_softc *ar);
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-void ar6000_tcmd_rx_report_event(void *devt, u8 *results, int len);
-#endif
-
-void ar6000_tx_retry_err_event(void *devt);
-
-void ar6000_snrThresholdEvent_rx(void *devt,
- WMI_SNR_THRESHOLD_VAL newThreshold,
- u8 snr);
-
-void ar6000_lqThresholdEvent_rx(void *devt, WMI_LQ_THRESHOLD_VAL range, u8 lqVal);
-
-
-void ar6000_ratemask_rx(void *devt, u32 ratemask);
-
-int ar6000_get_driver_cfg(struct net_device *dev,
- u16 cfgParam,
- void *result);
-void ar6000_bssInfo_event_rx(struct ar6_softc *ar, u8 *data, int len);
-
-void ar6000_dbglog_event(struct ar6_softc *ar, u32 dropped,
- s8 *buffer, u32 length);
-
-int ar6000_dbglog_get_debug_logs(struct ar6_softc *ar);
-
-void ar6000_peer_event(void *devt, u8 eventCode, u8 *bssid);
-
-void ar6000_indicate_tx_activity(void *devt, u8 trafficClass, bool Active);
-HTC_ENDPOINT_ID ar6000_ac2_endpoint_id ( void * devt, u8 ac);
-u8 ar6000_endpoint_id2_ac (void * devt, HTC_ENDPOINT_ID ep );
-
-void ar6000_btcoex_config_event(struct ar6_softc *ar, u8 *ptr, u32 len);
-
-void ar6000_btcoex_stats_event(struct ar6_softc *ar, u8 *ptr, u32 len) ;
-
-void ar6000_dset_open_req(void *devt,
- u32 id,
- u32 targ_handle,
- u32 targ_reply_fn,
- u32 targ_reply_arg);
-void ar6000_dset_close(void *devt, u32 access_cookie);
-void ar6000_dset_data_req(void *devt,
- u32 access_cookie,
- u32 offset,
- u32 length,
- u32 targ_buf,
- u32 targ_reply_fn,
- u32 targ_reply_arg);
-
-
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
-void prof_count_rx(unsigned int addr, unsigned int count);
-#endif
-
-u32 ar6000_getnodeAge (void);
-
-u32 ar6000_getclkfreq (void);
-
-int ar6000_ap_mode_profile_commit(struct ar6_softc *ar);
-
-struct ieee80211req_wpaie;
-int
-ar6000_ap_mode_get_wpa_ie(struct ar6_softc *ar, struct ieee80211req_wpaie *wpaie);
-
-int is_iwioctl_allowed(u8 mode, u16 cmd);
-
-int is_xioctl_allowed(u8 mode, int cmd);
-
-void ar6000_pspoll_event(struct ar6_softc *ar,u8 aid);
-
-void ar6000_dtimexpiry_event(struct ar6_softc *ar);
-
-void ar6000_aggr_rcv_addba_req_evt(struct ar6_softc *ar, WMI_ADDBA_REQ_EVENT *cmd);
-void ar6000_aggr_rcv_addba_resp_evt(struct ar6_softc *ar, WMI_ADDBA_RESP_EVENT *cmd);
-void ar6000_aggr_rcv_delba_req_evt(struct ar6_softc *ar, WMI_DELBA_EVENT *cmd);
-void ar6000_hci_event_rcv_evt(struct ar6_softc *ar, WMI_HCI_EVENT *cmd);
-
-#ifdef WAPI_ENABLE
-int ap_set_wapi_key(struct ar6_softc *ar, void *ik);
-void ap_wapi_rekey_event(struct ar6_softc *ar, u8 type, u8 *mac);
-#endif
-
-int ar6000_connect_to_ap(struct ar6_softc *ar);
-int ar6000_disconnect(struct ar6_softc *ar);
-int ar6000_update_wlan_pwr_state(struct ar6_softc *ar, AR6000_WLAN_STATE state, bool suspending);
-int ar6000_set_wlan_state(struct ar6_softc *ar, AR6000_WLAN_STATE state);
-int ar6000_set_bt_hw_state(struct ar6_softc *ar, u32 state);
-
-#ifdef CONFIG_PM
-int ar6000_suspend_ev(void *context);
-int ar6000_resume_ev(void *context);
-int ar6000_power_change_ev(void *context, u32 config);
-void ar6000_check_wow_status(struct ar6_softc *ar, struct sk_buff *skb, bool isEvent);
-#endif
-
-#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
-int ar6000_add_ap_interface(struct ar6_softc *ar, char *ifname);
-int ar6000_remove_ap_interface(struct ar6_softc *ar);
-#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/athdrv_linux.h b/drivers/staging/ath6kl/os/linux/include/athdrv_linux.h
deleted file mode 100644
index 3d5f01da543..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/athdrv_linux.h
+++ /dev/null
@@ -1,1217 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _ATHDRV_LINUX_H
-#define _ATHDRV_LINUX_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/*
- * There are two types of ioctl's here: Standard ioctls and
- * eXtended ioctls. All extended ioctls (XIOCTL) are multiplexed
- * off of the single ioctl command, AR6000_IOCTL_EXTENDED. The
- * arguments for every XIOCTL starts with a 32-bit command word
- * that is used to select which extended ioctl is in use. After
- * the command word are command-specific arguments.
- */
-
-/* Linux standard Wireless Extensions, private ioctl interfaces */
-#define IEEE80211_IOCTL_SETPARAM (SIOCIWFIRSTPRIV+0)
-#define IEEE80211_IOCTL_SETKEY (SIOCIWFIRSTPRIV+1)
-#define IEEE80211_IOCTL_DELKEY (SIOCIWFIRSTPRIV+2)
-#define IEEE80211_IOCTL_SETMLME (SIOCIWFIRSTPRIV+3)
-#define IEEE80211_IOCTL_ADDPMKID (SIOCIWFIRSTPRIV+4)
-#define IEEE80211_IOCTL_SETOPTIE (SIOCIWFIRSTPRIV+5)
-//#define IEEE80211_IOCTL_GETPARAM (SIOCIWFIRSTPRIV+6)
-//#define IEEE80211_IOCTL_SETWMMPARAMS (SIOCIWFIRSTPRIV+7)
-//#define IEEE80211_IOCTL_GETWMMPARAMS (SIOCIWFIRSTPRIV+8)
-//#define IEEE80211_IOCTL_GETOPTIE (SIOCIWFIRSTPRIV+9)
-//#define IEEE80211_IOCTL_SETAUTHALG (SIOCIWFIRSTPRIV+10)
-#define IEEE80211_IOCTL_LASTONE (SIOCIWFIRSTPRIV+10)
-
-
-
-/* ====WMI Ioctls==== */
-/*
- *
- * Many ioctls simply provide WMI services to application code:
- * an application makes such an ioctl call with a set of arguments
- * that are packaged into the corresponding WMI message, and sent
- * to the Target.
- */
-
-#define AR6000_IOCTL_WMI_GETREV (SIOCIWFIRSTPRIV+11)
-/*
- * arguments:
- * ar6000_version *revision
- */
-
-#define AR6000_IOCTL_WMI_SETPWR (SIOCIWFIRSTPRIV+12)
-/*
- * arguments:
- * WMI_POWER_MODE_CMD pwrModeCmd (see include/wmi.h)
- * uses: WMI_SET_POWER_MODE_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SETSCAN (SIOCIWFIRSTPRIV+13)
-/*
- * arguments:
- * WMI_SCAN_PARAMS_CMD scanParams (see include/wmi.h)
- * uses: WMI_SET_SCAN_PARAMS_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SETLISTENINT (SIOCIWFIRSTPRIV+14)
-/*
- * arguments:
- * UINT32 listenInterval
- * uses: WMI_SET_LISTEN_INT_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SETBSSFILTER (SIOCIWFIRSTPRIV+15)
-/*
- * arguments:
- * WMI_BSS_FILTER filter (see include/wmi.h)
- * uses: WMI_SET_BSS_FILTER_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_CHANNELPARAMS (SIOCIWFIRSTPRIV+16)
-/*
- * arguments:
- * WMI_CHANNEL_PARAMS_CMD chParams
- * uses: WMI_SET_CHANNEL_PARAMS_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_PROBEDSSID (SIOCIWFIRSTPRIV+17)
-/*
- * arguments:
- * WMI_PROBED_SSID_CMD probedSsids (see include/wmi.h)
- * uses: WMI_SETPROBED_SSID_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_PMPARAMS (SIOCIWFIRSTPRIV+18)
-/*
- * arguments:
- * WMI_POWER_PARAMS_CMD powerParams (see include/wmi.h)
- * uses: WMI_SET_POWER_PARAMS_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_BADAP (SIOCIWFIRSTPRIV+19)
-/*
- * arguments:
- * WMI_ADD_BAD_AP_CMD badAPs (see include/wmi.h)
- * uses: WMI_ADD_BAD_AP_CMDID
- */
-
-#define AR6000_IOCTL_WMI_GET_QOS_QUEUE (SIOCIWFIRSTPRIV+20)
-/*
- * arguments:
- * ar6000_queuereq queueRequest (see below)
- */
-
-#define AR6000_IOCTL_WMI_CREATE_QOS (SIOCIWFIRSTPRIV+21)
-/*
- * arguments:
- * WMI_CREATE_PSTREAM createPstreamCmd (see include/wmi.h)
- * uses: WMI_CREATE_PSTREAM_CMDID
- */
-
-#define AR6000_IOCTL_WMI_DELETE_QOS (SIOCIWFIRSTPRIV+22)
-/*
- * arguments:
- * WMI_DELETE_PSTREAM_CMD deletePstreamCmd (see include/wmi.h)
- * uses: WMI_DELETE_PSTREAM_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_SNRTHRESHOLD (SIOCIWFIRSTPRIV+23)
-/*
- * arguments:
- * WMI_SNR_THRESHOLD_PARAMS_CMD thresholdParams (see include/wmi.h)
- * uses: WMI_SNR_THRESHOLD_PARAMS_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_ERROR_REPORT_BITMASK (SIOCIWFIRSTPRIV+24)
-/*
- * arguments:
- * WMI_TARGET_ERROR_REPORT_BITMASK errorReportBitMask (see include/wmi.h)
- * uses: WMI_TARGET_ERROR_REPORT_BITMASK_CMDID
- */
-
-#define AR6000_IOCTL_WMI_GET_TARGET_STATS (SIOCIWFIRSTPRIV+25)
-/*
- * arguments:
- * TARGET_STATS *targetStats (see below)
- * uses: WMI_GET_STATISTICS_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_ASSOC_INFO (SIOCIWFIRSTPRIV+26)
-/*
- * arguments:
- * WMI_SET_ASSOC_INFO_CMD setAssocInfoCmd
- * uses: WMI_SET_ASSOC_INFO_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_ACCESS_PARAMS (SIOCIWFIRSTPRIV+27)
-/*
- * arguments:
- * WMI_SET_ACCESS_PARAMS_CMD setAccessParams (see include/wmi.h)
- * uses: WMI_SET_ACCESS_PARAMS_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_BMISS_TIME (SIOCIWFIRSTPRIV+28)
-/*
- * arguments:
- * UINT32 beaconMissTime
- * uses: WMI_SET_BMISS_TIME_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_DISC_TIMEOUT (SIOCIWFIRSTPRIV+29)
-/*
- * arguments:
- * WMI_DISC_TIMEOUT_CMD disconnectTimeoutCmd (see include/wmi.h)
- * uses: WMI_SET_DISC_TIMEOUT_CMDID
- */
-
-#define AR6000_IOCTL_WMI_SET_IBSS_PM_CAPS (SIOCIWFIRSTPRIV+30)
-/*
- * arguments:
- * WMI_IBSS_PM_CAPS_CMD ibssPowerMgmtCapsCmd
- * uses: WMI_SET_IBSS_PM_CAPS_CMDID
- */
-
-/*
- * There is a very small space available for driver-private
- * wireless ioctls. In order to circumvent this limitation,
- * we multiplex a bunch of ioctls (XIOCTLs) on top of a
- * single AR6000_IOCTL_EXTENDED ioctl.
- */
-#define AR6000_IOCTL_EXTENDED (SIOCIWFIRSTPRIV+31)
-
-
-/* ====BMI Extended Ioctls==== */
-
-#define AR6000_XIOCTL_BMI_DONE 1
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_DONE)
- * uses: BMI_DONE
- */
-
-#define AR6000_XIOCTL_BMI_READ_MEMORY 2
-/*
- * arguments:
- * union {
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_READ_MEMORY)
- * UINT32 address
- * UINT32 length
- * }
- * char results[length]
- * }
- * uses: BMI_READ_MEMORY
- */
-
-#define AR6000_XIOCTL_BMI_WRITE_MEMORY 3
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_WRITE_MEMORY)
- * UINT32 address
- * UINT32 length
- * char data[length]
- * uses: BMI_WRITE_MEMORY
- */
-
-#define AR6000_XIOCTL_BMI_EXECUTE 4
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_EXECUTE)
- * UINT32 TargetAddress
- * UINT32 parameter
- * uses: BMI_EXECUTE
- */
-
-#define AR6000_XIOCTL_BMI_SET_APP_START 5
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_SET_APP_START)
- * UINT32 TargetAddress
- * uses: BMI_SET_APP_START
- */
-
-#define AR6000_XIOCTL_BMI_READ_SOC_REGISTER 6
-/*
- * arguments:
- * union {
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_READ_SOC_REGISTER)
- * UINT32 TargetAddress, 32-bit aligned
- * }
- * UINT32 result
- * }
- * uses: BMI_READ_SOC_REGISTER
- */
-
-#define AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER 7
-/*
- * arguments:
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER)
- * UINT32 TargetAddress, 32-bit aligned
- * UINT32 newValue
- * }
- * uses: BMI_WRITE_SOC_REGISTER
- */
-
-#define AR6000_XIOCTL_BMI_TEST 8
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_TEST)
- * UINT32 address
- * UINT32 length
- * UINT32 count
- */
-
-
-
-/* Historical Host-side DataSet support */
-#define AR6000_XIOCTL_UNUSED9 9
-#define AR6000_XIOCTL_UNUSED10 10
-#define AR6000_XIOCTL_UNUSED11 11
-
-/* ====Misc Extended Ioctls==== */
-
-#define AR6000_XIOCTL_FORCE_TARGET_RESET 12
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_FORCE_TARGET_RESET)
- */
-
-
-#ifdef HTC_RAW_INTERFACE
-/* HTC Raw Interface Ioctls */
-#define AR6000_XIOCTL_HTC_RAW_OPEN 13
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_OPEN)
- */
-
-#define AR6000_XIOCTL_HTC_RAW_CLOSE 14
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_CLOSE)
- */
-
-#define AR6000_XIOCTL_HTC_RAW_READ 15
-/*
- * arguments:
- * union {
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_READ)
- * UINT32 mailboxID
- * UINT32 length
- * }
- * results[length]
- * }
- */
-
-#define AR6000_XIOCTL_HTC_RAW_WRITE 16
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_HTC_RAW_WRITE)
- * UINT32 mailboxID
- * UINT32 length
- * char buffer[length]
- */
-#endif /* HTC_RAW_INTERFACE */
-
-#define AR6000_XIOCTL_CHECK_TARGET_READY 17
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_CHECK_TARGET_READY)
- */
-
-
-
-/* ====GPIO (General Purpose I/O) Extended Ioctls==== */
-
-#define AR6000_XIOCTL_GPIO_OUTPUT_SET 18
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_GPIO_OUTPUT_SET)
- * ar6000_gpio_output_set_cmd_s (see below)
- * uses: WMIX_GPIO_OUTPUT_SET_CMDID
- */
-
-#define AR6000_XIOCTL_GPIO_INPUT_GET 19
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_GPIO_INPUT_GET)
- * uses: WMIX_GPIO_INPUT_GET_CMDID
- */
-
-#define AR6000_XIOCTL_GPIO_REGISTER_SET 20
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_GPIO_REGISTER_SET)
- * ar6000_gpio_register_cmd_s (see below)
- * uses: WMIX_GPIO_REGISTER_SET_CMDID
- */
-
-#define AR6000_XIOCTL_GPIO_REGISTER_GET 21
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_GPIO_REGISTER_GET)
- * ar6000_gpio_register_cmd_s (see below)
- * uses: WMIX_GPIO_REGISTER_GET_CMDID
- */
-
-#define AR6000_XIOCTL_GPIO_INTR_ACK 22
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_GPIO_INTR_ACK)
- * ar6000_cpio_intr_ack_cmd_s (see below)
- * uses: WMIX_GPIO_INTR_ACK_CMDID
- */
-
-#define AR6000_XIOCTL_GPIO_INTR_WAIT 23
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_GPIO_INTR_WAIT)
- */
-
-
-
-/* ====more wireless commands==== */
-
-#define AR6000_XIOCTL_SET_ADHOC_BSSID 24
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_SET_ADHOC_BSSID)
- * WMI_SET_ADHOC_BSSID_CMD setAdHocBssidCmd (see include/wmi.h)
- */
-
-#define AR6000_XIOCTL_SET_OPT_MODE 25
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_SET_OPT_MODE)
- * WMI_SET_OPT_MODE_CMD setOptModeCmd (see include/wmi.h)
- * uses: WMI_SET_OPT_MODE_CMDID
- */
-
-#define AR6000_XIOCTL_OPT_SEND_FRAME 26
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_OPT_SEND_FRAME)
- * WMI_OPT_TX_FRAME_CMD optTxFrameCmd (see include/wmi.h)
- * uses: WMI_OPT_TX_FRAME_CMDID
- */
-
-#define AR6000_XIOCTL_SET_BEACON_INTVAL 27
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_SET_BEACON_INTVAL)
- * WMI_BEACON_INT_CMD beaconIntCmd (see include/wmi.h)
- * uses: WMI_SET_BEACON_INT_CMDID
- */
-
-
-#define IEEE80211_IOCTL_SETAUTHALG 28
-
-
-#define AR6000_XIOCTL_SET_VOICE_PKT_SIZE 29
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_SET_VOICE_PKT_SIZE)
- * WMI_SET_VOICE_PKT_SIZE_CMD setVoicePktSizeCmd (see include/wmi.h)
- * uses: WMI_SET_VOICE_PKT_SIZE_CMDID
- */
-
-
-#define AR6000_XIOCTL_SET_MAX_SP 30
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_SET_MAX_SP)
- * WMI_SET_MAX_SP_LEN_CMD maxSPLen(see include/wmi.h)
- * uses: WMI_SET_MAX_SP_LEN_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_GET_ROAM_TBL 31
-
-#define AR6000_XIOCTL_WMI_SET_ROAM_CTRL 32
-
-#define AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS 33
-
-
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS)
- * WMI_SET_POWERSAVE_TIMERS_CMD powerSaveTimers(see include/wmi.h)
- * WMI_SET_POWERSAVE_TIMERS_CMDID
- */
-
-#define AR6000_XIOCTRL_WMI_GET_POWER_MODE 34
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTRL_WMI_GET_POWER_MODE)
- */
-
-#define AR6000_XIOCTRL_WMI_SET_WLAN_STATE 35
-typedef enum {
- WLAN_DISABLED,
- WLAN_ENABLED
-} AR6000_WLAN_STATE;
-/*
- * arguments:
- * enable/disable
- */
-
-#define AR6000_XIOCTL_WMI_GET_ROAM_DATA 36
-
-#define AR6000_XIOCTL_WMI_SETRETRYLIMITS 37
-/*
- * arguments:
- * WMI_SET_RETRY_LIMITS_CMD ibssSetRetryLimitsCmd
- * uses: WMI_SET_RETRY_LIMITS_CMDID
- */
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-/* ====extended commands for radio test ==== */
-
-#define AR6000_XIOCTL_TCMD_CONT_TX 38
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_TCMD_CONT_TX)
- * WMI_TCMD_CONT_TX_CMD contTxCmd (see include/wmi.h)
- * uses: WMI_TCMD_CONT_TX_CMDID
- */
-
-#define AR6000_XIOCTL_TCMD_CONT_RX 39
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_TCMD_CONT_RX)
- * WMI_TCMD_CONT_RX_CMD rxCmd (see include/wmi.h)
- * uses: WMI_TCMD_CONT_RX_CMDID
- */
-
-#define AR6000_XIOCTL_TCMD_PM 40
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_TCMD_PM)
- * WMI_TCMD_PM_CMD pmCmd (see include/wmi.h)
- * uses: WMI_TCMD_PM_CMDID
- */
-
-#endif /* CONFIG_HOST_TCMD_SUPPORT */
-
-#define AR6000_XIOCTL_WMI_STARTSCAN 41
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_STARTSCAN)
- * UINT8 scanType
- * UINT8 scanConnected
- * u32 forceFgScan
- * uses: WMI_START_SCAN_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_SETFIXRATES 42
-
-#define AR6000_XIOCTL_WMI_GETFIXRATES 43
-
-
-#define AR6000_XIOCTL_WMI_SET_RSSITHRESHOLD 44
-/*
- * arguments:
- * WMI_RSSI_THRESHOLD_PARAMS_CMD thresholdParams (see include/wmi.h)
- * uses: WMI_RSSI_THRESHOLD_PARAMS_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_CLR_RSSISNR 45
-/*
- * arguments:
- * WMI_CLR_RSSISNR_CMD thresholdParams (see include/wmi.h)
- * uses: WMI_CLR_RSSISNR_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_SET_LQTHRESHOLD 46
-/*
- * arguments:
- * WMI_LQ_THRESHOLD_PARAMS_CMD thresholdParams (see include/wmi.h)
- * uses: WMI_LQ_THRESHOLD_PARAMS_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_SET_RTS 47
-/*
- * arguments:
- * WMI_SET_RTS_MODE_CMD (see include/wmi.h)
- * uses: WMI_SET_RTS_MODE_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_SET_LPREAMBLE 48
-
-#define AR6000_XIOCTL_WMI_SET_AUTHMODE 49
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_SET_AUTHMODE)
- * UINT8 mode
- * uses: WMI_SET_RECONNECT_AUTH_MODE_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_SET_REASSOCMODE 50
-
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_SET_WMM)
- * UINT8 mode
- * uses: WMI_SET_WMM_CMDID
- */
-#define AR6000_XIOCTL_WMI_SET_WMM 51
-
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS)
- * UINT32 frequency
- * UINT8 threshold
- */
-#define AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS 52
-
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP)
- * UINT32 cookie
- */
-#define AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP 53
-
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_GET_RD)
- * UINT32 regDomain
- */
-#define AR6000_XIOCTL_WMI_GET_RD 54
-
-#define AR6000_XIOCTL_DIAG_READ 55
-
-#define AR6000_XIOCTL_DIAG_WRITE 56
-
-/*
- * arguments cmd (AR6000_XIOCTL_SET_TXOP)
- * WMI_TXOP_CFG txopEnable
- */
-#define AR6000_XIOCTL_WMI_SET_TXOP 57
-
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_USER_SETKEYS)
- * UINT32 keyOpCtrl
- * uses struct ar6000_user_setkeys_info
- */
-#define AR6000_XIOCTL_USER_SETKEYS 58
-
-#define AR6000_XIOCTL_WMI_SET_KEEPALIVE 59
-/*
- * arguments:
- * UINT8 cmd (AR6000_XIOCTL_WMI_SET_KEEPALIVE)
- * UINT8 keepaliveInterval
- * uses: WMI_SET_KEEPALIVE_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_GET_KEEPALIVE 60
-/*
- * arguments:
- * UINT8 cmd (AR6000_XIOCTL_WMI_GET_KEEPALIVE)
- * UINT8 keepaliveInterval
- * u32 configured
- * uses: WMI_GET_KEEPALIVE_CMDID
- */
-
-/* ====ROM Patching Extended Ioctls==== */
-
-#define AR6000_XIOCTL_BMI_ROMPATCH_INSTALL 61
-/*
- * arguments:
- * union {
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_INSTALL)
- * UINT32 ROM Address
- * UINT32 RAM Address
- * UINT32 number of bytes
- * UINT32 activate? (0 or 1)
- * }
- * u32 resulting rompatch ID
- * }
- * uses: BMI_ROMPATCH_INSTALL
- */
-
-#define AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL 62
-/*
- * arguments:
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL)
- * UINT32 rompatch ID
- * }
- * uses: BMI_ROMPATCH_UNINSTALL
- */
-
-#define AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE 63
-/*
- * arguments:
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE)
- * UINT32 rompatch count
- * UINT32 rompatch IDs[rompatch count]
- * }
- * uses: BMI_ROMPATCH_ACTIVATE
- */
-
-#define AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE 64
-/*
- * arguments:
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE)
- * UINT32 rompatch count
- * UINT32 rompatch IDs[rompatch count]
- * }
- * uses: BMI_ROMPATCH_DEACTIVATE
- */
-
-#define AR6000_XIOCTL_WMI_SET_APPIE 65
-/*
- * arguments:
- * struct {
- * UINT32 cmd (AR6000_XIOCTL_WMI_SET_APPIE)
- * UINT32 app_frmtype;
- * UINT32 app_buflen;
- * UINT8 app_buf[];
- * }
- */
-#define AR6000_XIOCTL_WMI_SET_MGMT_FRM_RX_FILTER 66
-/*
- * arguments:
- * u32 filter_type;
- */
-
-#define AR6000_XIOCTL_DBGLOG_CFG_MODULE 67
-
-#define AR6000_XIOCTL_DBGLOG_GET_DEBUG_LOGS 68
-
-#define AR6000_XIOCTL_WMI_SET_WSC_STATUS 70
-/*
- * arguments:
- * u32 wsc_status;
- * (WSC_REG_INACTIVE or WSC_REG_ACTIVE)
- */
-
-/*
- * arguments:
- * struct {
- * u8 streamType;
- * u8 status;
- * }
- * uses: WMI_SET_BT_STATUS_CMDID
- */
-#define AR6000_XIOCTL_WMI_SET_BT_STATUS 71
-
-/*
- * arguments:
- * struct {
- * u8 paramType;
- * union {
- * u8 noSCOPkts;
- * BT_PARAMS_A2DP a2dpParams;
- * BT_COEX_REGS regs;
- * };
- * }
- * uses: WMI_SET_BT_PARAM_CMDID
- */
-#define AR6000_XIOCTL_WMI_SET_BT_PARAMS 72
-
-#define AR6000_XIOCTL_WMI_SET_HOST_SLEEP_MODE 73
-#define AR6000_XIOCTL_WMI_SET_WOW_MODE 74
-#define AR6000_XIOCTL_WMI_GET_WOW_LIST 75
-#define AR6000_XIOCTL_WMI_ADD_WOW_PATTERN 76
-#define AR6000_XIOCTL_WMI_DEL_WOW_PATTERN 77
-
-
-
-#define AR6000_XIOCTL_TARGET_INFO 78
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_TARGET_INFO)
- * u32 TargetVersion (returned)
- * u32 TargetType (returned)
- * (See also bmi_msg.h target_ver and target_type)
- */
-
-#define AR6000_XIOCTL_DUMP_HTC_CREDIT_STATE 79
-/*
- * arguments:
- * none
- */
-
-#define AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE 80
-/*
- * This ioctl is used to emulate traffic activity
- * timeouts. Activity/inactivity will trigger the driver
- * to re-balance credits.
- *
- * arguments:
- * ar6000_traffic_activity_change
- */
-
-#define AR6000_XIOCTL_WMI_SET_CONNECT_CTRL_FLAGS 81
-/*
- * This ioctl is used to set the connect control flags
- *
- * arguments:
- * u32 connectCtrlFlags
- */
-
-#define AR6000_XIOCTL_WMI_SET_AKMP_PARAMS 82
-/*
- * This IOCTL sets any Authentication,Key Management and Protection
- * related parameters. This is used along with the information set in
- * Connect Command.
- * Currently this enables Multiple PMKIDs to an AP.
- *
- * arguments:
- * struct {
- * u32 akmpInfo;
- * }
- * uses: WMI_SET_AKMP_PARAMS_CMD
- */
-
-#define AR6000_XIOCTL_WMI_GET_PMKID_LIST 83
-
-#define AR6000_XIOCTL_WMI_SET_PMKID_LIST 84
-/*
- * This IOCTL is used to set a list of PMKIDs. This list of
- * PMKIDs is used in the [Re]AssocReq Frame. This list is used
- * only if the MultiPMKID option is enabled via the
- * AR6000_XIOCTL_WMI_SET_AKMP_PARAMS IOCTL.
- *
- * arguments:
- * struct {
- * u32 numPMKID;
- * WMI_PMKID pmkidList[WMI_MAX_PMKID_CACHE];
- * }
- * uses: WMI_SET_PMKIDLIST_CMD
- */
-
-#define AR6000_XIOCTL_WMI_SET_PARAMS 85
-#define AR6000_XIOCTL_WMI_SET_MCAST_FILTER 86
-#define AR6000_XIOCTL_WMI_DEL_MCAST_FILTER 87
-
-
-/* Historical DSETPATCH support for INI patches */
-#define AR6000_XIOCTL_UNUSED90 90
-
-
-/* Support LZ-compressed firmware download */
-#define AR6000_XIOCTL_BMI_LZ_STREAM_START 91
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_LZ_STREAM_START)
- * UINT32 address
- * uses: BMI_LZ_STREAM_START
- */
-
-#define AR6000_XIOCTL_BMI_LZ_DATA 92
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_BMI_LZ_DATA)
- * UINT32 length
- * char data[length]
- * uses: BMI_LZ_DATA
- */
-
-#define AR6000_XIOCTL_PROF_CFG 93
-/*
- * arguments:
- * u32 period
- * u32 nbins
- */
-
-#define AR6000_XIOCTL_PROF_ADDR_SET 94
-/*
- * arguments:
- * u32 Target address
- */
-
-#define AR6000_XIOCTL_PROF_START 95
-
-#define AR6000_XIOCTL_PROF_STOP 96
-
-#define AR6000_XIOCTL_PROF_COUNT_GET 97
-
-#define AR6000_XIOCTL_WMI_ABORT_SCAN 98
-
-/*
- * AP mode
- */
-#define AR6000_XIOCTL_AP_GET_STA_LIST 99
-
-#define AR6000_XIOCTL_AP_HIDDEN_SSID 100
-
-#define AR6000_XIOCTL_AP_SET_NUM_STA 101
-
-#define AR6000_XIOCTL_AP_SET_ACL_MAC 102
-
-#define AR6000_XIOCTL_AP_GET_ACL_LIST 103
-
-#define AR6000_XIOCTL_AP_COMMIT_CONFIG 104
-
-#define IEEE80211_IOCTL_GETWPAIE 105
-
-#define AR6000_XIOCTL_AP_CONN_INACT_TIME 106
-
-#define AR6000_XIOCTL_AP_PROT_SCAN_TIME 107
-
-#define AR6000_XIOCTL_AP_SET_COUNTRY 108
-
-#define AR6000_XIOCTL_AP_SET_DTIM 109
-
-
-
-
-#define AR6000_XIOCTL_WMI_TARGET_EVENT_REPORT 110
-
-#define AR6000_XIOCTL_SET_IP 111
-
-#define AR6000_XIOCTL_AP_SET_ACL_POLICY 112
-
-#define AR6000_XIOCTL_AP_INTRA_BSS_COMM 113
-
-#define AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO 114
-
-#define AR6000_XIOCTL_MODULE_DEBUG_SET_MASK 115
-
-#define AR6000_XIOCTL_MODULE_DEBUG_GET_MASK 116
-
-#define AR6000_XIOCTL_DUMP_RCV_AGGR_STATS 117
-
-#define AR6000_XIOCTL_SET_HT_CAP 118
-
-#define AR6000_XIOCTL_SET_HT_OP 119
-
-#define AR6000_XIOCTL_AP_GET_STAT 120
-
-#define AR6000_XIOCTL_SET_TX_SELECT_RATES 121
-
-#define AR6000_XIOCTL_SETUP_AGGR 122
-
-#define AR6000_XIOCTL_ALLOW_AGGR 123
-
-#define AR6000_XIOCTL_AP_GET_HIDDEN_SSID 124
-
-#define AR6000_XIOCTL_AP_GET_COUNTRY 125
-
-#define AR6000_XIOCTL_AP_GET_WMODE 126
-
-#define AR6000_XIOCTL_AP_GET_DTIM 127
-
-#define AR6000_XIOCTL_AP_GET_BINTVL 128
-
-#define AR6000_XIOCTL_AP_GET_RTS 129
-
-#define AR6000_XIOCTL_DELE_AGGR 130
-
-#define AR6000_XIOCTL_FETCH_TARGET_REGS 131
-
-#define AR6000_XIOCTL_HCI_CMD 132
-
-#define AR6000_XIOCTL_ACL_DATA 133 /* used to be used for PAL */
-
-#define AR6000_XIOCTL_WLAN_CONN_PRECEDENCE 134
-
-#define AR6000_XIOCTL_AP_SET_11BG_RATESET 135
-
-/*
- * arguments:
- * WMI_AP_PS_CMD apPsCmd
- * uses: WMI_AP_PS_CMDID
- */
-
-#define AR6000_XIOCTL_WMI_SET_AP_PS 136
-
-#define AR6000_XIOCTL_WMI_MCAST_FILTER 137
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_FE_ANT 138
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_COLOCATED_BT_DEV 139
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG 140
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_SCO_CONFIG 141
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_A2DP_CONFIG 142
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_ACLCOEX_CONFIG 143
-
-#define AR6000_XIOCTL_WMI_SET_BTCOEX_DEBUG 144
-
-#define AR6000_XIOCTL_WMI_SET_BT_OPERATING_STATUS 145
-
-#define AR6000_XIOCTL_WMI_GET_BTCOEX_CONFIG 146
-
-#define AR6000_XIOCTL_WMI_GET_BTCOEX_STATS 147
-/*
- * arguments:
- * UINT32 cmd (AR6000_XIOCTL_WMI_SET_QOS_SUPP)
- * UINT8 mode
- * uses: WMI_SET_QOS_SUPP_CMDID
- */
-#define AR6000_XIOCTL_WMI_SET_QOS_SUPP 148
-
-#define AR6000_XIOCTL_GET_WLAN_SLEEP_STATE 149
-
-#define AR6000_XIOCTL_SET_BT_HW_POWER_STATE 150
-
-#define AR6000_XIOCTL_GET_BT_HW_POWER_STATE 151
-
-#define AR6000_XIOCTL_ADD_AP_INTERFACE 152
-
-#define AR6000_XIOCTL_REMOVE_AP_INTERFACE 153
-
-#define AR6000_XIOCTL_WMI_SET_TX_SGI_PARAM 154
-
-#define AR6000_XIOCTL_WMI_SET_EXCESS_TX_RETRY_THRES 161
-
-/* used by AR6000_IOCTL_WMI_GETREV */
-struct ar6000_version {
- u32 host_ver;
- u32 target_ver;
- u32 wlan_ver;
- u32 abi_ver;
-};
-
-/* used by AR6000_IOCTL_WMI_GET_QOS_QUEUE */
-struct ar6000_queuereq {
- u8 trafficClass;
- u16 activeTsids;
-};
-
-/* used by AR6000_IOCTL_WMI_GET_TARGET_STATS */
-typedef struct targetStats_t {
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_unicast_pkts;
- u64 tx_unicast_bytes;
- u64 tx_multicast_pkts;
- u64 tx_multicast_bytes;
- u64 tx_broadcast_pkts;
- u64 tx_broadcast_bytes;
- u64 tx_rts_success_cnt;
- u64 tx_packet_per_ac[4];
-
- u64 tx_errors;
- u64 tx_failed_cnt;
- u64 tx_retry_cnt;
- u64 tx_mult_retry_cnt;
- u64 tx_rts_fail_cnt;
-
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_unicast_pkts;
- u64 rx_unicast_bytes;
- u64 rx_multicast_pkts;
- u64 rx_multicast_bytes;
- u64 rx_broadcast_pkts;
- u64 rx_broadcast_bytes;
- u64 rx_fragment_pkt;
-
- u64 rx_errors;
- u64 rx_crcerr;
- u64 rx_key_cache_miss;
- u64 rx_decrypt_err;
- u64 rx_duplicate_frames;
-
- u64 tkip_local_mic_failure;
- u64 tkip_counter_measures_invoked;
- u64 tkip_replays;
- u64 tkip_format_errors;
- u64 ccmp_format_errors;
- u64 ccmp_replays;
-
- u64 power_save_failure_cnt;
-
- u64 cs_bmiss_cnt;
- u64 cs_lowRssi_cnt;
- u64 cs_connect_cnt;
- u64 cs_disconnect_cnt;
-
- s32 tx_unicast_rate;
- s32 rx_unicast_rate;
-
- u32 lq_val;
-
- u32 wow_num_pkts_dropped;
- u16 wow_num_events_discarded;
-
- s16 noise_floor_calibation;
- s16 cs_rssi;
- s16 cs_aveBeacon_rssi;
- u8 cs_aveBeacon_snr;
- u8 cs_lastRoam_msec;
- u8 cs_snr;
-
- u8 wow_num_host_pkt_wakeups;
- u8 wow_num_host_event_wakeups;
-
- u32 arp_received;
- u32 arp_matched;
- u32 arp_replied;
-}TARGET_STATS;
-
-typedef struct targetStats_cmd_t {
- TARGET_STATS targetStats;
- int clearStats;
-} TARGET_STATS_CMD;
-
-/* used by AR6000_XIOCTL_USER_SETKEYS */
-
-/*
- * Setting this bit to 1 doesnot initialize the RSC on the firmware
- */
-#define AR6000_XIOCTL_USER_SETKEYS_RSC_CTRL 1
-#define AR6000_USER_SETKEYS_RSC_UNCHANGED 0x00000002
-
-struct ar6000_user_setkeys_info {
- u32 keyOpCtrl; /* Bit Map of Key Mgmt Ctrl Flags */
-}; /* XXX: unused !? */
-
-/* used by AR6000_XIOCTL_GPIO_OUTPUT_SET */
-struct ar6000_gpio_output_set_cmd_s {
- u32 set_mask;
- u32 clear_mask;
- u32 enable_mask;
- u32 disable_mask;
-};
-
-/*
- * used by AR6000_XIOCTL_GPIO_REGISTER_GET and AR6000_XIOCTL_GPIO_REGISTER_SET
- */
-struct ar6000_gpio_register_cmd_s {
- u32 gpioreg_id;
- u32 value;
-};
-
-/* used by AR6000_XIOCTL_GPIO_INTR_ACK */
-struct ar6000_gpio_intr_ack_cmd_s {
- u32 ack_mask;
-};
-
-/* used by AR6000_XIOCTL_GPIO_INTR_WAIT */
-struct ar6000_gpio_intr_wait_cmd_s {
- u32 intr_mask;
- u32 input_values;
-};
-
-/* used by the AR6000_XIOCTL_DBGLOG_CFG_MODULE */
-typedef struct ar6000_dbglog_module_config_s {
- u32 valid;
- u16 mmask;
- u16 tsr;
- u32 rep;
- u16 size;
-} DBGLOG_MODULE_CONFIG;
-
-typedef struct user_rssi_thold_t {
- s16 tag;
- s16 rssi;
-} USER_RSSI_THOLD;
-
-typedef struct user_rssi_params_t {
- u8 weight;
- u32 pollTime;
- USER_RSSI_THOLD tholds[12];
-} USER_RSSI_PARAMS;
-
-typedef struct ar6000_get_btcoex_config_cmd_t{
- u32 btProfileType;
- u32 linkId;
- }AR6000_GET_BTCOEX_CONFIG_CMD;
-
-typedef struct ar6000_btcoex_config_t {
- AR6000_GET_BTCOEX_CONFIG_CMD configCmd;
- u32 *configEvent;
-} AR6000_BTCOEX_CONFIG;
-
-typedef struct ar6000_btcoex_stats_t {
- u32 *statsEvent;
- }AR6000_BTCOEX_STATS;
-/*
- * Host driver may have some config parameters. Typically, these
- * config params are one time config parameters. These could
- * correspond to any of the underlying modules. Host driver exposes
- * an api for the underlying modules to get this config.
- */
-#define AR6000_DRIVER_CFG_BASE 0x8000
-
-/* Should driver perform wlan node caching? */
-#define AR6000_DRIVER_CFG_GET_WLANNODECACHING 0x8001
-/*Should we log raw WMI msgs */
-#define AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS 0x8002
-
-/* used by AR6000_XIOCTL_DIAG_READ & AR6000_XIOCTL_DIAG_WRITE */
-struct ar6000_diag_window_cmd_s {
- unsigned int addr;
- unsigned int value;
-};
-
-
-struct ar6000_traffic_activity_change {
- u32 StreamID; /* stream ID to indicate activity change */
- u32 Active; /* active (1) or inactive (0) */
-};
-
-/* Used with AR6000_XIOCTL_PROF_COUNT_GET */
-struct prof_count_s {
- u32 addr; /* bin start address */
- u32 count; /* hit count */
-};
-
-
-/* used by AR6000_XIOCTL_MODULE_DEBUG_SET_MASK */
-/* AR6000_XIOCTL_MODULE_DEBUG_GET_MASK */
-/* AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO */
-struct drv_debug_module_s {
- char modulename[128]; /* name of module */
- u32 mask; /* new mask to set .. or .. current mask */
-};
-
-
-/* All HCI related rx events are sent up to the host app
- * via a wmi event id. It can contain ACL data or HCI event,
- * based on which it will be de-multiplexed.
- */
-typedef enum {
- PAL_HCI_EVENT = 0,
- PAL_HCI_RX_DATA,
-} WMI_PAL_EVENT_INFO;
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/cfg80211.h b/drivers/staging/ath6kl/os/linux/include/cfg80211.h
deleted file mode 100644
index d5253207b19..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/cfg80211.h
+++ /dev/null
@@ -1,61 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _AR6K_CFG80211_H_
-#define _AR6K_CFG80211_H_
-
-struct wireless_dev *ar6k_cfg80211_init(struct device *dev);
-void ar6k_cfg80211_deinit(struct ar6_softc *ar);
-
-void ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status);
-
-void ar6k_cfg80211_connect_event(struct ar6_softc *ar, u16 channel,
- u8 *bssid, u16 listenInterval,
- u16 beaconInterval,NETWORK_TYPE networkType,
- u8 beaconIeLen, u8 assocReqLen,
- u8 assocRespLen, u8 *assocInfo);
-
-void ar6k_cfg80211_disconnect_event(struct ar6_softc *ar, u8 reason,
- u8 *bssid, u8 assocRespLen,
- u8 *assocInfo, u16 protocolReasonStatus);
-
-void ar6k_cfg80211_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast);
-
-#ifdef CONFIG_NL80211_TESTMODE
-void ar6000_testmode_rx_report_event(struct ar6_softc *ar, void *buf,
- int buf_len);
-#else
-static inline void ar6000_testmode_rx_report_event(struct ar6_softc *ar,
- void *buf, int buf_len)
-{
-}
-#endif
-
-
-#endif /* _AR6K_CFG80211_H_ */
-
-
-
-
-
-
diff --git a/drivers/staging/ath6kl/os/linux/include/config_linux.h b/drivers/staging/ath6kl/os/linux/include/config_linux.h
deleted file mode 100644
index dbbe1a00b92..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/config_linux.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _CONFIG_LINUX_H_
-#define _CONFIG_LINUX_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Host side Test Command support
- */
-#define CONFIG_HOST_TCMD_SUPPORT
-
-#define USE_4BYTE_REGISTER_ACCESS
-
-/* Host-side support for Target-side profiling */
-#undef CONFIG_TARGET_PROFILE_SUPPORT
-
-/* IP/TCP checksum offload */
-/* Checksum offload is currently not supported for 64 bit platforms */
-#ifndef __LP64__
-#define CONFIG_CHECKSUM_OFFLOAD
-#endif /* __LP64__ */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/debug_linux.h b/drivers/staging/ath6kl/os/linux/include/debug_linux.h
deleted file mode 100644
index b8dba52badc..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/debug_linux.h
+++ /dev/null
@@ -1,50 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _DEBUG_LINUX_H_
-#define _DEBUG_LINUX_H_
-
- /* macro to remove parens */
-#define ATH_PRINTX_ARG(arg...) arg
-
-#ifdef DEBUG
- /* NOTE: the AR_DEBUG_PRINTF macro is defined here to handle special handling of variable arg macros
- * which may be compiler dependent. */
-#define AR_DEBUG_PRINTF(mask, args) do { \
- if (GET_ATH_MODULE_DEBUG_VAR_MASK(ATH_MODULE_NAME) & (mask)) { \
- A_LOGGER(mask, ATH_MODULE_NAME, ATH_PRINTX_ARG args); \
- } \
-} while (0)
-#else
- /* on non-debug builds, keep in error and warning messages in the driver, all other
- * message tracing will get compiled out */
-#define AR_DEBUG_PRINTF(mask, args) \
- if ((mask) & (ATH_DEBUG_ERR | ATH_DEBUG_WARN)) { A_PRINTF(ATH_PRINTX_ARG args); }
-
-#endif
-
- /* compile specific macro to get the function name string */
-#define _A_FUNCNAME_ __func__
-
-
-#endif /* _DEBUG_LINUX_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/export_hci_transport.h b/drivers/staging/ath6kl/os/linux/include/export_hci_transport.h
deleted file mode 100644
index 74f98618334..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/export_hci_transport.h
+++ /dev/null
@@ -1,76 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2009-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// HCI bridge implementation
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "hci_transport_api.h"
-#include "common_drv.h"
-
-extern HCI_TRANSPORT_HANDLE (*_HCI_TransportAttach)(void *HTCHandle, struct hci_transport_config_info *pInfo);
-extern void (*_HCI_TransportDetach)(HCI_TRANSPORT_HANDLE HciTrans);
-extern int (*_HCI_TransportAddReceivePkts)(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet_queue *pQueue);
-extern int (*_HCI_TransportSendPkt)(HCI_TRANSPORT_HANDLE HciTrans, struct htc_packet *pPacket, bool Synchronous);
-extern void (*_HCI_TransportStop)(HCI_TRANSPORT_HANDLE HciTrans);
-extern int (*_HCI_TransportStart)(HCI_TRANSPORT_HANDLE HciTrans);
-extern int (*_HCI_TransportEnableDisableAsyncRecv)(HCI_TRANSPORT_HANDLE HciTrans, bool Enable);
-extern int (*_HCI_TransportRecvHCIEventSync)(HCI_TRANSPORT_HANDLE HciTrans,
- struct htc_packet *pPacket,
- int MaxPollMS);
-extern int (*_HCI_TransportSetBaudRate)(HCI_TRANSPORT_HANDLE HciTrans, u32 Baud);
-extern int (*_HCI_TransportEnablePowerMgmt)(HCI_TRANSPORT_HANDLE HciTrans, bool Enable);
-
-
-#define HCI_TransportAttach(HTCHandle, pInfo) \
- _HCI_TransportAttach((HTCHandle), (pInfo))
-#define HCI_TransportDetach(HciTrans) \
- _HCI_TransportDetach(HciTrans)
-#define HCI_TransportAddReceivePkts(HciTrans, pQueue) \
- _HCI_TransportAddReceivePkts((HciTrans), (pQueue))
-#define HCI_TransportSendPkt(HciTrans, pPacket, Synchronous) \
- _HCI_TransportSendPkt((HciTrans), (pPacket), (Synchronous))
-#define HCI_TransportStop(HciTrans) \
- _HCI_TransportStop((HciTrans))
-#define HCI_TransportStart(HciTrans) \
- _HCI_TransportStart((HciTrans))
-#define HCI_TransportEnableDisableAsyncRecv(HciTrans, Enable) \
- _HCI_TransportEnableDisableAsyncRecv((HciTrans), (Enable))
-#define HCI_TransportRecvHCIEventSync(HciTrans, pPacket, MaxPollMS) \
- _HCI_TransportRecvHCIEventSync((HciTrans), (pPacket), (MaxPollMS))
-#define HCI_TransportSetBaudRate(HciTrans, Baud) \
- _HCI_TransportSetBaudRate((HciTrans), (Baud))
-#define HCI_TransportEnablePowerMgmt(HciTrans, Enable) \
- _HCI_TransportEnablePowerMgmt((HciTrans), (Enable))
-
-
-extern int ar6000_register_hci_transport(struct hci_transport_callbacks *hciTransCallbacks);
-
-extern int ar6000_get_hif_dev(struct hif_device *device, void *config);
-
-extern int ar6000_set_uart_config(struct hif_device *hifDevice, u32 scale, u32 step);
-
-/* get core clock register settings
- * data: 0 - 40/44MHz
- * 1 - 80/88MHz
- * where (5G band/2.4G band)
- * assume 2.4G band for now
- */
-extern int ar6000_get_core_clock_config(struct hif_device *hifDevice, u32 *data);
diff --git a/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h b/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
deleted file mode 100644
index e6e96de3fc6..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
+++ /dev/null
@@ -1,177 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _IEEE80211_IOCTL_H_
-#define _IEEE80211_IOCTL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Extracted from the MADWIFI net80211/ieee80211_ioctl.h
- */
-
-/*
- * WPA/RSN get/set key request. Specify the key/cipher
- * type and whether the key is to be used for sending and/or
- * receiving. The key index should be set only when working
- * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
- * Otherwise a unicast/pairwise key is specified by the bssid
- * (on a station) or mac address (on an ap). They key length
- * must include any MIC key data; otherwise it should be no
- more than IEEE80211_KEYBUF_SIZE.
- */
-struct ieee80211req_key {
- u_int8_t ik_type; /* key/cipher type */
- u_int8_t ik_pad;
- u_int16_t ik_keyix; /* key index */
- u_int8_t ik_keylen; /* key length in bytes */
- u_int8_t ik_flags;
-#define IEEE80211_KEY_XMIT 0x01
-#define IEEE80211_KEY_RECV 0x02
-#define IEEE80211_KEY_DEFAULT 0x80 /* default xmit key */
- u_int8_t ik_macaddr[IEEE80211_ADDR_LEN];
- u_int64_t ik_keyrsc; /* key receive sequence counter */
- u_int64_t ik_keytsc; /* key transmit sequence counter */
- u_int8_t ik_keydata[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
-};
-/*
- * Delete a key either by index or address. Set the index
- * to IEEE80211_KEYIX_NONE when deleting a unicast key.
- */
-struct ieee80211req_del_key {
- u_int8_t idk_keyix; /* key index */
- u_int8_t idk_macaddr[IEEE80211_ADDR_LEN];
-};
-/*
- * MLME state manipulation request. IEEE80211_MLME_ASSOC
- * only makes sense when operating as a station. The other
- * requests can be used when operating as a station or an
- * ap (to effect a station).
- */
-struct ieee80211req_mlme {
- u_int8_t im_op; /* operation to perform */
-#define IEEE80211_MLME_ASSOC 1 /* associate station */
-#define IEEE80211_MLME_DISASSOC 2 /* disassociate station */
-#define IEEE80211_MLME_DEAUTH 3 /* deauthenticate station */
-#define IEEE80211_MLME_AUTHORIZE 4 /* authorize station */
-#define IEEE80211_MLME_UNAUTHORIZE 5 /* unauthorize station */
- u_int16_t im_reason; /* 802.11 reason code */
- u_int8_t im_macaddr[IEEE80211_ADDR_LEN];
-};
-
-struct ieee80211req_addpmkid {
- u_int8_t pi_bssid[IEEE80211_ADDR_LEN];
- u_int8_t pi_enable;
- u_int8_t pi_pmkid[16];
-};
-
-#define AUTH_ALG_OPEN_SYSTEM 0x01
-#define AUTH_ALG_SHARED_KEY 0x02
-#define AUTH_ALG_LEAP 0x04
-
-struct ieee80211req_authalg {
- u_int8_t auth_alg;
-};
-
-/*
- * Request to add an IE to a Management Frame
- */
-enum{
- IEEE80211_APPIE_FRAME_BEACON = 0,
- IEEE80211_APPIE_FRAME_PROBE_REQ = 1,
- IEEE80211_APPIE_FRAME_PROBE_RESP = 2,
- IEEE80211_APPIE_FRAME_ASSOC_REQ = 3,
- IEEE80211_APPIE_FRAME_ASSOC_RESP = 4,
- IEEE80211_APPIE_NUM_OF_FRAME = 5
-};
-
-/*
- * The Maximum length of the IE that can be added to a Management frame
- */
-#define IEEE80211_APPIE_FRAME_MAX_LEN 200
-
-struct ieee80211req_getset_appiebuf {
- u_int32_t app_frmtype; /* management frame type for which buffer is added */
- u_int32_t app_buflen; /*application supplied buffer length */
- u_int8_t app_buf[];
-};
-
-/*
- * The following definitions are used by an application to set filter
- * for receiving management frames
- */
-enum {
- IEEE80211_FILTER_TYPE_BEACON = 0x1,
- IEEE80211_FILTER_TYPE_PROBE_REQ = 0x2,
- IEEE80211_FILTER_TYPE_PROBE_RESP = 0x4,
- IEEE80211_FILTER_TYPE_ASSOC_REQ = 0x8,
- IEEE80211_FILTER_TYPE_ASSOC_RESP = 0x10,
- IEEE80211_FILTER_TYPE_AUTH = 0x20,
- IEEE80211_FILTER_TYPE_DEAUTH = 0x40,
- IEEE80211_FILTER_TYPE_DISASSOC = 0x80,
- IEEE80211_FILTER_TYPE_ALL = 0xFF /* used to check the valid filter bits */
-};
-
-struct ieee80211req_set_filter {
- u_int32_t app_filterype; /* management frame filter type */
-};
-
-enum {
- IEEE80211_PARAM_AUTHMODE = 3, /* Authentication Mode */
- IEEE80211_PARAM_MCASTCIPHER = 5,
- IEEE80211_PARAM_MCASTKEYLEN = 6, /* multicast key length */
- IEEE80211_PARAM_UCASTCIPHER = 8,
- IEEE80211_PARAM_UCASTKEYLEN = 9, /* unicast key length */
- IEEE80211_PARAM_WPA = 10, /* WPA mode (0,1,2) */
- IEEE80211_PARAM_ROAMING = 12, /* roaming mode */
- IEEE80211_PARAM_PRIVACY = 13, /* privacy invoked */
- IEEE80211_PARAM_COUNTERMEASURES = 14, /* WPA/TKIP countermeasures */
- IEEE80211_PARAM_DROPUNENCRYPTED = 15, /* discard unencrypted frames */
- IEEE80211_PARAM_WAPI = 16, /* WAPI policy from wapid */
-};
-
-/*
- * Values for IEEE80211_PARAM_WPA
- */
-#define WPA_MODE_WPA1 1
-#define WPA_MODE_WPA2 2
-#define WPA_MODE_AUTO 3
-#define WPA_MODE_NONE 4
-
-struct ieee80211req_wpaie {
- u_int8_t wpa_macaddr[IEEE80211_ADDR_LEN];
- u_int8_t wpa_ie[IEEE80211_MAX_IE];
- u_int8_t rsn_ie[IEEE80211_MAX_IE];
-};
-
-#ifndef IW_ENCODE_ALG_PMK
-#define IW_ENCODE_ALG_PMK 4
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _IEEE80211_IOCTL_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/osapi_linux.h b/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
deleted file mode 100644
index 41f43730772..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
+++ /dev/null
@@ -1,339 +0,0 @@
-//------------------------------------------------------------------------------
-// This file contains the definitions of the basic atheros data types.
-// It is used to map the data types in atheros files to a platform specific
-// type.
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _OSAPI_LINUX_H_
-#define _OSAPI_LINUX_H_
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/jiffies.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/semaphore.h>
-#include <linux/cache.h>
-
-#ifdef __GNUC__
-#define __ATTRIB_PACK __attribute__ ((packed))
-#define __ATTRIB_PRINTF __attribute__ ((format (printf, 1, 2)))
-#define __ATTRIB_NORETURN __attribute__ ((noreturn))
-#ifndef INLINE
-#define INLINE __inline__
-#endif
-#else /* Not GCC */
-#define __ATTRIB_PACK
-#define __ATTRIB_PRINTF
-#define __ATTRIB_NORETURN
-#ifndef INLINE
-#define INLINE __inline
-#endif
-#endif /* End __GNUC__ */
-
-#define PREPACK
-#define POSTPACK __ATTRIB_PACK
-
-/*
- * Endianes macros
- */
-#define A_BE2CPU8(x) ntohb(x)
-#define A_BE2CPU16(x) ntohs(x)
-#define A_BE2CPU32(x) ntohl(x)
-
-#define A_LE2CPU8(x) (x)
-#define A_LE2CPU16(x) (x)
-#define A_LE2CPU32(x) (x)
-
-#define A_CPU2BE8(x) htonb(x)
-#define A_CPU2BE16(x) htons(x)
-#define A_CPU2BE32(x) htonl(x)
-
-#define A_MEMZERO(addr, len) memset(addr, 0, len)
-#define A_MALLOC(size) kmalloc((size), GFP_KERNEL)
-#define A_MALLOC_NOWAIT(size) kmalloc((size), GFP_ATOMIC)
-
-#define A_LOGGER(mask, mod, args...) printk(KERN_ALERT args)
-#define A_PRINTF(args...) printk(KERN_ALERT args)
-
-#define A_PRINTF_LOG(args...) printk(args)
-#define A_SPRINTF(buf, args...) sprintf (buf, args)
-
-/* Mutual Exclusion */
-typedef spinlock_t A_MUTEX_T;
-#define A_MUTEX_INIT(mutex) spin_lock_init(mutex)
-#define A_MUTEX_LOCK(mutex) spin_lock_bh(mutex)
-#define A_MUTEX_UNLOCK(mutex) spin_unlock_bh(mutex)
-#define A_IS_MUTEX_VALID(mutex) true /* okay to return true, since A_MUTEX_DELETE does nothing */
-#define A_MUTEX_DELETE(mutex) /* spin locks are not kernel resources so nothing to free.. */
-
-/* Get current time in ms adding a constant offset (in ms) */
-#define A_GET_MS(offset) \
- (((jiffies / HZ) * 1000) + (offset))
-
-/*
- * Timer Functions
- */
-#define A_MDELAY(msecs) mdelay(msecs)
-typedef struct timer_list A_TIMER;
-
-#define A_INIT_TIMER(pTimer, pFunction, pArg) do { \
- init_timer(pTimer); \
- (pTimer)->function = (pFunction); \
- (pTimer)->data = (unsigned long)(pArg); \
-} while (0)
-
-/*
- * Start a Timer that elapses after 'periodMSec' milli-seconds
- * Support is provided for a one-shot timer. The 'repeatFlag' is
- * ignored.
- */
-#define A_TIMEOUT_MS(pTimer, periodMSec, repeatFlag) do { \
- if (repeatFlag) { \
- printk("\n" __FILE__ ":%d: Timer Repeat requested\n",__LINE__); \
- panic("Timer Repeat"); \
- } \
- mod_timer((pTimer), jiffies + HZ * (periodMSec) / 1000); \
-} while (0)
-
-/*
- * Cancel the Timer.
- */
-#define A_UNTIMEOUT(pTimer) do { \
- del_timer((pTimer)); \
-} while (0)
-
-#define A_DELETE_TIMER(pTimer) do { \
-} while (0)
-
-/*
- * Wait Queue related functions
- */
-typedef wait_queue_head_t A_WAITQUEUE_HEAD;
-#define A_INIT_WAITQUEUE_HEAD(head) init_waitqueue_head(head)
-#ifndef wait_event_interruptible_timeout
-#define __wait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __wait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-#endif /* wait_event_interruptible_timeout */
-
-#define A_WAIT_EVENT_INTERRUPTIBLE_TIMEOUT(head, condition, timeout) do { \
- wait_event_interruptible_timeout(head, condition, timeout); \
-} while (0)
-
-#define A_WAKE_UP(head) wake_up(head)
-
-#ifdef DEBUG
-extern unsigned int panic_on_assert;
-#define A_ASSERT(expr) \
- if (!(expr)) { \
- printk(KERN_ALERT"Debug Assert Caught, File %s, Line: %d, Test:%s \n",__FILE__, __LINE__,#expr); \
- if (panic_on_assert) panic(#expr); \
- }
-#else
-#define A_ASSERT(expr)
-#endif /* DEBUG */
-
-#define A_REQUEST_FIRMWARE(_ppf, _pfile, _dev) request_firmware(_ppf, _pfile, _dev)
-#define A_RELEASE_FIRMWARE(_pf) release_firmware(_pf)
-
-/*
- * Initialization of the network buffer subsystem
- */
-#define A_NETBUF_INIT()
-
-/*
- * Network buffer queue support
- */
-typedef struct sk_buff_head A_NETBUF_QUEUE_T;
-
-#define A_NETBUF_QUEUE_INIT(q) \
- a_netbuf_queue_init(q)
-
-#define A_NETBUF_ENQUEUE(q, pkt) \
- a_netbuf_enqueue((q), (pkt))
-#define A_NETBUF_PREQUEUE(q, pkt) \
- a_netbuf_prequeue((q), (pkt))
-#define A_NETBUF_DEQUEUE(q) \
- (a_netbuf_dequeue(q))
-#define A_NETBUF_QUEUE_SIZE(q) \
- a_netbuf_queue_size(q)
-#define A_NETBUF_QUEUE_EMPTY(q) \
- (a_netbuf_queue_empty(q) ? true : false)
-
-/*
- * Network buffer support
- */
-#define A_NETBUF_ALLOC(size) \
- a_netbuf_alloc(size)
-#define A_NETBUF_ALLOC_RAW(size) \
- a_netbuf_alloc_raw(size)
-#define A_NETBUF_FREE(bufPtr) \
- a_netbuf_free(bufPtr)
-#define A_NETBUF_DATA(bufPtr) \
- a_netbuf_to_data(bufPtr)
-#define A_NETBUF_LEN(bufPtr) \
- a_netbuf_to_len(bufPtr)
-#define A_NETBUF_PUSH(bufPtr, len) \
- a_netbuf_push(bufPtr, len)
-#define A_NETBUF_PUT(bufPtr, len) \
- a_netbuf_put(bufPtr, len)
-#define A_NETBUF_TRIM(bufPtr,len) \
- a_netbuf_trim(bufPtr, len)
-#define A_NETBUF_PULL(bufPtr, len) \
- a_netbuf_pull(bufPtr, len)
-#define A_NETBUF_HEADROOM(bufPtr)\
- a_netbuf_headroom(bufPtr)
-#define A_NETBUF_SETLEN(bufPtr,len) \
- a_netbuf_setlen(bufPtr, len)
-
-/* Add data to end of a buffer */
-#define A_NETBUF_PUT_DATA(bufPtr, srcPtr, len) \
- a_netbuf_put_data(bufPtr, srcPtr, len)
-
-/* Add data to start of the buffer */
-#define A_NETBUF_PUSH_DATA(bufPtr, srcPtr, len) \
- a_netbuf_push_data(bufPtr, srcPtr, len)
-
-/* Remove data at start of the buffer */
-#define A_NETBUF_PULL_DATA(bufPtr, dstPtr, len) \
- a_netbuf_pull_data(bufPtr, dstPtr, len)
-
-/* Remove data from the end of the buffer */
-#define A_NETBUF_TRIM_DATA(bufPtr, dstPtr, len) \
- a_netbuf_trim_data(bufPtr, dstPtr, len)
-
-/* View data as "size" contiguous bytes of type "t" */
-#define A_NETBUF_VIEW_DATA(bufPtr, t, size) \
- (t )( ((struct skbuf *)(bufPtr))->data)
-
-/* return the beginning of the headroom for the buffer */
-#define A_NETBUF_HEAD(bufPtr) \
- ((((struct sk_buff *)(bufPtr))->head))
-
-/*
- * OS specific network buffer access routines
- */
-void *a_netbuf_alloc(int size);
-void *a_netbuf_alloc_raw(int size);
-void a_netbuf_free(void *bufPtr);
-void *a_netbuf_to_data(void *bufPtr);
-u32 a_netbuf_to_len(void *bufPtr);
-int a_netbuf_push(void *bufPtr, s32 len);
-int a_netbuf_push_data(void *bufPtr, char *srcPtr, s32 len);
-int a_netbuf_put(void *bufPtr, s32 len);
-int a_netbuf_put_data(void *bufPtr, char *srcPtr, s32 len);
-int a_netbuf_pull(void *bufPtr, s32 len);
-int a_netbuf_pull_data(void *bufPtr, char *dstPtr, s32 len);
-int a_netbuf_trim(void *bufPtr, s32 len);
-int a_netbuf_trim_data(void *bufPtr, char *dstPtr, s32 len);
-int a_netbuf_setlen(void *bufPtr, s32 len);
-s32 a_netbuf_headroom(void *bufPtr);
-void a_netbuf_enqueue(A_NETBUF_QUEUE_T *q, void *pkt);
-void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt);
-void *a_netbuf_dequeue(A_NETBUF_QUEUE_T *q);
-int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q);
-int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q);
-int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q);
-void a_netbuf_queue_init(A_NETBUF_QUEUE_T *q);
-
-/*
- * Kernel v.s User space functions
- */
-u32 a_copy_to_user(void *to, const void *from, u32 n);
-u32 a_copy_from_user(void *to, const void *from, u32 n);
-
-/* In linux, WLAN Rx and Tx run in different contexts, so no need to check
- * for any commands/data queued for WLAN */
-#define A_CHECK_DRV_TX()
-
-#define A_GET_CACHE_LINE_BYTES() L1_CACHE_BYTES
-
-#define A_CACHE_LINE_PAD 128
-
-static inline void *A_ALIGN_TO_CACHE_LINE(void *ptr) {
- return (void *)L1_CACHE_ALIGN((unsigned long)ptr);
-}
-
-#else /* __KERNEL__ */
-
-#ifdef __GNUC__
-#define __ATTRIB_PACK __attribute__ ((packed))
-#define __ATTRIB_PRINTF __attribute__ ((format (printf, 1, 2)))
-#define __ATTRIB_NORETURN __attribute__ ((noreturn))
-#ifndef INLINE
-#define INLINE __inline__
-#endif
-#else /* Not GCC */
-#define __ATTRIB_PACK
-#define __ATTRIB_PRINTF
-#define __ATTRIB_NORETURN
-#ifndef INLINE
-#define INLINE __inline
-#endif
-#endif /* End __GNUC__ */
-
-#define PREPACK
-#define POSTPACK __ATTRIB_PACK
-
-#define A_MEMZERO(addr, len) memset((addr), 0, (len))
-#define A_MALLOC(size) malloc(size)
-
-#include <err.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _OSAPI_LINUX_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/wlan_config.h b/drivers/staging/ath6kl/os/linux/include/wlan_config.h
deleted file mode 100644
index c1fe0c6e4fa..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/wlan_config.h
+++ /dev/null
@@ -1,108 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains the tunable configuration items for the WLAN module
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _HOST_WLAN_CONFIG_H_
-#define _HOST_WLAN_CONFIG_H_
-
-/* Include definitions here that can be used to tune the WLAN module behavior.
- * Different customers can tune the behavior as per their needs, here.
- */
-
-/* This configuration item when defined will consider the barker preamble
- * mentioned in the ERP IE of the beacons from the AP to determine the short
- * preamble support sent in the (Re)Assoc request frames.
- */
-#define WLAN_CONFIG_DONOT_IGNORE_BARKER_IN_ERP 0
-
-/* This config item when defined will not send the power module state transition
- * failure events that happen during scan, to the host.
- */
-#define WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN 0
-
-/*
- * This configuration item enable/disable keepalive support.
- * Keepalive support: In the absence of any data traffic to AP, null
- * frames will be sent to the AP at periodic interval, to keep the association
- * active. This configuration item defines the periodic interval.
- * Use value of zero to disable keepalive support
- * Default: 60 seconds
- */
-#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
-
-/*
- * This configuration item sets the value of disconnect timeout
- * Firmware delays sending the disconnec event to the host for this
- * timeout after is gets disconnected from the current AP.
- * If the firmware successly roams within the disconnect timeout
- * it sends a new connect event
- */
-#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
-
-/*
- * This configuration item disables 11n support.
- * 0 - Enable
- * 1 - Disable
- */
-#define WLAN_CONFIG_DISABLE_11N 0
-
-/*
- * This configuration item enable BT clock sharing support
- * 1 - Enable
- * 0 - Disable (Default)
- */
-#define WLAN_CONFIG_BT_SHARING 0
-
-/*
- * This configuration item sets WIFI OFF policy
- * 0 - CUT_POWER
- * 1 - DEEP_SLEEP (Default)
- */
-#define WLAN_CONFIG_WLAN_OFF 1
-
-/*
- * This configuration item sets suspend policy
- * 0 - CUT_POWER (Default)
- * 1 - DEEP_SLEEP
- * 2 - WoW
- * 3 - CUT_POWER if BT OFF (clock sharing designs only)
- */
-#define WLAN_CONFIG_PM_SUSPEND 0
-
-/*
- * This configuration item sets suspend policy to use if PM_SUSPEND is
- * set to WoW and device is not connected at the time of suspend
- * 0 - CUT_POWER (Default)
- * 1 - DEEP_SLEEP
- * 2 - WoW
- * 3 - CUT_POWER if BT OFF (clock sharing designs only)
- */
-#define WLAN_CONFIG_PM_WOW2 0
-
-/*
- * This configuration item enables/disables transmit bursting
- * 0 - Enable tx Bursting (default)
- * 1 - Disable tx bursting
- */
-#define WLAN_CONFIG_DISABLE_TX_BURSTING 0
-
-#endif /* _HOST_WLAN_CONFIG_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h b/drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h
deleted file mode 100644
index 1eb6f822d64..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/wmi_filter_linux.h
+++ /dev/null
@@ -1,300 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-
-#ifndef _WMI_FILTER_LINUX_H_
-#define _WMI_FILTER_LINUX_H_
-
-/*
- * sioctl_filter - Standard ioctl
- * pioctl_filter - Priv ioctl
- * xioctl_filter - eXtended ioctl
- *
- * ---- Possible values for the WMI filter ---------------
- * (0) - Block this cmd always (or) not implemented
- * (INFRA_NETWORK) - Allow this cmd only in STA mode
- * (ADHOC_NETWORK) - Allow this cmd only in IBSS mode
- * (AP_NETWORK) - Allow this cmd only in AP mode
- * (INFRA_NETWORK | ADHOC_NETWORK) - Block this cmd in AP mode
- * (ADHOC_NETWORK | AP_NETWORK) - Block this cmd in STA mode
- * (INFRA_NETWORK | AP_NETWORK) - Block this cmd in IBSS mode
- * (INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK)- allow only when mode is set
- * (0xFF) - Allow this cmd always irrespective of mode
- */
-
-u8 sioctl_filter[] = {
-(AP_NETWORK), /* SIOCSIWCOMMIT 0x8B00 */
-(0xFF), /* SIOCGIWNAME 0x8B01 */
-(0), /* SIOCSIWNWID 0x8B02 */
-(0), /* SIOCGIWNWID 0x8B03 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWFREQ 0x8B04 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWFREQ 0x8B05 */
-(0xFF), /* SIOCSIWMODE 0x8B06 */
-(0xFF), /* SIOCGIWMODE 0x8B07 */
-(0), /* SIOCSIWSENS 0x8B08 */
-(0), /* SIOCGIWSENS 0x8B09 */
-(0), /* SIOCSIWRANGE 0x8B0A */
-(0xFF), /* SIOCGIWRANGE 0x8B0B */
-(0), /* SIOCSIWPRIV 0x8B0C */
-(0), /* SIOCGIWPRIV 0x8B0D */
-(0), /* SIOCSIWSTATS 0x8B0E */
-(0), /* SIOCGIWSTATS 0x8B0F */
-(0), /* SIOCSIWSPY 0x8B10 */
-(0), /* SIOCGIWSPY 0x8B11 */
-(0), /* SIOCSIWTHRSPY 0x8B12 */
-(0), /* SIOCGIWTHRSPY 0x8B13 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWAP 0x8B14 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWAP 0x8B15 */
-#if (WIRELESS_EXT >= 18)
-(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCSIWMLME 0X8B16 */
-#else
-(0), /* Dummy 0 */
-#endif /* WIRELESS_EXT */
-(0), /* SIOCGIWAPLIST 0x8B17 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCSIWSCAN 0x8B18 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCGIWSCAN 0x8B19 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWESSID 0x8B1A */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWESSID 0x8B1B */
-(0), /* SIOCSIWNICKN 0x8B1C */
-(0), /* SIOCGIWNICKN 0x8B1D */
-(0), /* Dummy 0 */
-(0), /* Dummy 0 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWRATE 0x8B20 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWRATE 0x8B21 */
-(0), /* SIOCSIWRTS 0x8B22 */
-(0), /* SIOCGIWRTS 0x8B23 */
-(0), /* SIOCSIWFRAG 0x8B24 */
-(0), /* SIOCGIWFRAG 0x8B25 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWTXPOW 0x8B26 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWTXPOW 0x8B27 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCSIWRETRY 0x8B28 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* SIOCGIWRETRY 0x8B29 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWENCODE 0x8B2A */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWENCODE 0x8B2B */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCSIWPOWER 0x8B2C */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* SIOCGIWPOWER 0x8B2D */
-};
-
-
-
-u8 pioctl_filter[] = {
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_SETPARAM (SIOCIWFIRSTPRIV+0) */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_SETKEY (SIOCIWFIRSTPRIV+1) */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_DELKEY (SIOCIWFIRSTPRIV+2) */
-(AP_NETWORK), /* IEEE80211_IOCTL_SETMLME (SIOCIWFIRSTPRIV+3) */
-(INFRA_NETWORK), /* IEEE80211_IOCTL_ADDPMKID (SIOCIWFIRSTPRIV+4) */
-(0), /* IEEE80211_IOCTL_SETOPTIE (SIOCIWFIRSTPRIV+5) */
-(0), /* (SIOCIWFIRSTPRIV+6) */
-(0), /* (SIOCIWFIRSTPRIV+7) */
-(0), /* (SIOCIWFIRSTPRIV+8) */
-(0), /* (SIOCIWFIRSTPRIV+9) */
-(0), /* IEEE80211_IOCTL_LASTONE (SIOCIWFIRSTPRIV+10) */
-(0xFF), /* AR6000_IOCTL_WMI_GETREV (SIOCIWFIRSTPRIV+11) */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_IOCTL_WMI_SETPWR (SIOCIWFIRSTPRIV+12) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SETSCAN (SIOCIWFIRSTPRIV+13) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SETLISTENINT (SIOCIWFIRSTPRIV+14) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SETBSSFILTER (SIOCIWFIRSTPRIV+15) */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_IOCTL_WMI_SET_CHANNELPARAMS (SIOCIWFIRSTPRIV+16) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_PROBEDSSID (SIOCIWFIRSTPRIV+17) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_PMPARAMS (SIOCIWFIRSTPRIV+18) */
-(INFRA_NETWORK), /* AR6000_IOCTL_WMI_SET_BADAP (SIOCIWFIRSTPRIV+19) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_GET_QOS_QUEUE (SIOCIWFIRSTPRIV+20) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_CREATE_QOS (SIOCIWFIRSTPRIV+21) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_DELETE_QOS (SIOCIWFIRSTPRIV+22) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_SNRTHRESHOLD (SIOCIWFIRSTPRIV+23) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_ERROR_REPORT_BITMASK (SIOCIWFIRSTPRIV+24)*/
-(0xFF), /* AR6000_IOCTL_WMI_GET_TARGET_STATS (SIOCIWFIRSTPRIV+25) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_ASSOC_INFO (SIOCIWFIRSTPRIV+26) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_ACCESS_PARAMS (SIOCIWFIRSTPRIV+27) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_BMISS_TIME (SIOCIWFIRSTPRIV+28) */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_DISC_TIMEOUT (SIOCIWFIRSTPRIV+29) */
-(ADHOC_NETWORK), /* AR6000_IOCTL_WMI_SET_IBSS_PM_CAPS (SIOCIWFIRSTPRIV+30) */
-};
-
-
-
-u8 xioctl_filter[] = {
-(0xFF), /* Dummy 0 */
-(0xFF), /* AR6000_XIOCTL_BMI_DONE 1 */
-(0xFF), /* AR6000_XIOCTL_BMI_READ_MEMORY 2 */
-(0xFF), /* AR6000_XIOCTL_BMI_WRITE_MEMORY 3 */
-(0xFF), /* AR6000_XIOCTL_BMI_EXECUTE 4 */
-(0xFF), /* AR6000_XIOCTL_BMI_SET_APP_START 5 */
-(0xFF), /* AR6000_XIOCTL_BMI_READ_SOC_REGISTER 6 */
-(0xFF), /* AR6000_XIOCTL_BMI_WRITE_SOC_REGISTER 7 */
-(0xFF), /* AR6000_XIOCTL_BMI_TEST 8 */
-(0xFF), /* AR6000_XIOCTL_UNUSED9 9 */
-(0xFF), /* AR6000_XIOCTL_UNUSED10 10 */
-(0xFF), /* AR6000_XIOCTL_UNUSED11 11 */
-(0xFF), /* AR6000_XIOCTL_FORCE_TARGET_RESET 12 */
-(0xFF), /* AR6000_XIOCTL_HTC_RAW_OPEN 13 */
-(0xFF), /* AR6000_XIOCTL_HTC_RAW_CLOSE 14 */
-(0xFF), /* AR6000_XIOCTL_HTC_RAW_READ 15 */
-(0xFF), /* AR6000_XIOCTL_HTC_RAW_WRITE 16 */
-(0xFF), /* AR6000_XIOCTL_CHECK_TARGET_READY 17 */
-(0xFF), /* AR6000_XIOCTL_GPIO_OUTPUT_SET 18 */
-(0xFF), /* AR6000_XIOCTL_GPIO_INPUT_GET 19 */
-(0xFF), /* AR6000_XIOCTL_GPIO_REGISTER_SET 20 */
-(0xFF), /* AR6000_XIOCTL_GPIO_REGISTER_GET 21 */
-(0xFF), /* AR6000_XIOCTL_GPIO_INTR_ACK 22 */
-(0xFF), /* AR6000_XIOCTL_GPIO_INTR_WAIT 23 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_ADHOC_BSSID 24 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_OPT_MODE 25 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_OPT_SEND_FRAME 26 */
-(ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_SET_BEACON_INTVAL 27 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* IEEE80211_IOCTL_SETAUTHALG 28 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_VOICE_PKT_SIZE 29 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_MAX_SP 30 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_ROAM_TBL 31 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_ROAM_CTRL 32 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTRL_WMI_SET_POWERSAVE_TIMERS 33 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTRL_WMI_GET_POWER_MODE 34 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTRL_WMI_SET_WLAN_STATE 35 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_ROAM_DATA 36 */
-(0xFF), /* AR6000_XIOCTL_WMI_SETRETRYLIMITS 37 */
-(0xFF), /* AR6000_XIOCTL_TCMD_CONT_TX 38 */
-(0xFF), /* AR6000_XIOCTL_TCMD_CONT_RX 39 */
-(0xFF), /* AR6000_XIOCTL_TCMD_PM 40 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_STARTSCAN 41 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SETFIXRATES 42 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_GETFIXRATES 43 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_RSSITHRESHOLD 44 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_CLR_RSSISNR 45 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_LQTHRESHOLD 46 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_RTS 47 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_LPREAMBLE 48 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_AUTHMODE 49 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_REASSOCMODE 50 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_WMM 51 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_HB_CHALLENGE_RESP_PARAMS 52 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_HB_CHALLENGE_RESP 53 */
-(INFRA_NETWORK | ADHOC_NETWORK | AP_NETWORK), /* AR6000_XIOCTL_WMI_GET_RD 54 */
-(0xFF), /* AR6000_XIOCTL_DIAG_READ 55 */
-(0xFF), /* AR6000_XIOCTL_DIAG_WRITE 56 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_TXOP 57 */
-(INFRA_NETWORK), /* AR6000_XIOCTL_USER_SETKEYS 58 */
-(INFRA_NETWORK), /* AR6000_XIOCTL_WMI_SET_KEEPALIVE 59 */
-(INFRA_NETWORK), /* AR6000_XIOCTL_WMI_GET_KEEPALIVE 60 */
-(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_INSTALL 61 */
-(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_UNINSTALL 62 */
-(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_ACTIVATE 63 */
-(0xFF), /* AR6000_XIOCTL_BMI_ROMPATCH_DEACTIVATE 64 */
-(0xFF), /* AR6000_XIOCTL_WMI_SET_APPIE 65 */
-(0xFF), /* AR6000_XIOCTL_WMI_SET_MGMT_FRM_RX_FILTER 66 */
-(0xFF), /* AR6000_XIOCTL_DBGLOG_CFG_MODULE 67 */
-(0xFF), /* AR6000_XIOCTL_DBGLOG_GET_DEBUG_LOGS 68 */
-(0xFF), /* Dummy 69 */
-(0xFF), /* AR6000_XIOCTL_WMI_SET_WSC_STATUS 70 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BT_STATUS 71 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BT_PARAMS 72 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_HOST_SLEEP_MODE 73 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_WOW_MODE 74 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_WOW_LIST 75 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_ADD_WOW_PATTERN 76 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_DEL_WOW_PATTERN 77 */
-(0xFF), /* AR6000_XIOCTL_TARGET_INFO 78 */
-(0xFF), /* AR6000_XIOCTL_DUMP_HTC_CREDIT_STATE 79 */
-(0xFF), /* AR6000_XIOCTL_TRAFFIC_ACTIVITY_CHANGE 80 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_CONNECT_CTRL_FLAGS 81 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_AKMP_PARAMS 82 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_PMKID_LIST 83 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_PMKID_LIST 84 */
-(0xFF), /* Dummy 85 */
-(0xFF), /* Dummy 86 */
-(0xFF), /* Dummy 87 */
-(0xFF), /* Dummy 88 */
-(0xFF), /* Dummy 89 */
-(0xFF), /* AR6000_XIOCTL_UNUSED90 90 */
-(0xFF), /* AR6000_XIOCTL_BMI_LZ_STREAM_START 91 */
-(0xFF), /* AR6000_XIOCTL_BMI_LZ_DATA 92 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_CFG 93 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_ADDR_SET 94 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_START 95 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_STOP 96 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_PROF_COUNT_GET 97 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_ABORT_SCAN 98 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_STA_LIST 99 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_HIDDEN_SSID 100 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_NUM_STA 101 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_ACL_MAC 102 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_ACL_LIST 103 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_COMMIT_CONFIG 104 */
-(AP_NETWORK), /* IEEE80211_IOCTL_GETWPAIE 105 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_CONN_INACT_TIME 106 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_PROT_SCAN_TIME 107 */
-(AP_NETWORK), /* AR6000_XIOCTL_WMI_SET_COUNTRY 108 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_DTIM 109 */
-(0xFF), /* AR6000_XIOCTL_WMI_TARGET_EVENT_REPORT 110 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_SET_IP 111 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_ACL_POLICY 112 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_INTRA_BSS_COMM 113 */
-(0xFF), /* AR6000_XIOCTL_DUMP_MODULE_DEBUG_INFO 114 */
-(0xFF), /* AR6000_XIOCTL_MODULE_DEBUG_SET_MASK 115 */
-(0xFF), /* AR6000_XIOCTL_MODULE_DEBUG_GET_MASK 116 */
-(0xFF), /* AR6000_XIOCTL_DUMP_RCV_AGGR_STATS 117 */
-(0xFF), /* AR6000_XIOCTL_SET_HT_CAP 118 */
-(0xFF), /* AR6000_XIOCTL_SET_HT_OP 119 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_STAT 120 */
-(0xFF), /* AR6000_XIOCTL_SET_TX_SELECT_RATES 121 */
-(0xFF), /* AR6000_XIOCTL_SETUP_AGGR 122 */
-(0xFF), /* AR6000_XIOCTL_ALLOW_AGGR 123 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_HIDDEN_SSID 124 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_COUNTRY 125 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_WMODE 126 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_GET_DTIM 127 */
-(AP_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_AP_GET_BINTVL 128 */
-(0xFF), /* AR6000_XIOCTL_AP_GET_RTS 129 */
-(0xFF), /* AR6000_XIOCTL_DELE_AGGR 130 */
-(0xFF), /* AR6000_XIOCTL_FETCH_TARGET_REGS 131 */
-(0xFF), /* AR6000_XIOCTL_HCI_CMD 132 */
-(0xFF), /* AR6000_XIOCTL_ACL_DATA(used to be used for PAL) 133 */
-(0xFF), /* AR6000_XIOCTL_WLAN_CONN_PRECEDENCE 134 */
-(AP_NETWORK), /* AR6000_XIOCTL_AP_SET_11BG_RATESET 135 */
-(0xFF),
-(0xFF),
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_FE_ANT 138 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_COLOCATED_BT_DEV 139 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG 140 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_SCO_CONFIG 141 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_A2DP_CONFIG 142 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_ACLCOEX_CONFIG 143 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BTCOEX_DEBUG 144 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_BT_OPERATING_STATUS 145 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_BTCOEX_CONFIG 146 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_GET_BTCOEX_GET_STATS 147 */
-(0xFF), /* AR6000_XIOCTL_WMI_SET_QOS_SUPP 148 */
-(0xFF), /* AR6000_XIOCTL_GET_WLAN_SLEEP_STATE 149 */
-(0xFF), /* AR6000_XIOCTL_SET_BT_HW_POWER_STATE 150 */
-(0xFF), /* AR6000_XIOCTL_GET_BT_HW_POWER_STATE 151 */
-(0xFF), /* AR6000_XIOCTL_ADD_AP_INTERFACE 152 */
-(0xFF), /* AR6000_XIOCTL_REMOVE_AP_INTERFACE 153 */
-(0xFF), /* AR6000_XIOCTL_WMI_SET_TX_SGI_PARAM 154 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_WPA_OFFLOAD_STATE 155 */
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_PASSPHRASE 156 */
-(0xFF),
-(0xFF),
-(0xFF),
-(0xFF),
-(INFRA_NETWORK | ADHOC_NETWORK), /* AR6000_XIOCTL_WMI_SET_EXCESS_TX_RETRY_THRES 161 */
-};
-
-#endif /*_WMI_FILTER_LINUX_H_*/
diff --git a/drivers/staging/ath6kl/os/linux/netbuf.c b/drivers/staging/ath6kl/os/linux/netbuf.c
deleted file mode 100644
index 963a2fb76a9..00000000000
--- a/drivers/staging/ath6kl/os/linux/netbuf.c
+++ /dev/null
@@ -1,231 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Communications Inc.
-// All rights reserved.
-//
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//
-// Author(s): ="Atheros"
-//------------------------------------------------------------------------------
-#include <a_config.h>
-#include "athdefs.h"
-#include "a_osapi.h"
-#include "htc_packet.h"
-
-#define AR6000_DATA_OFFSET 64
-
-void a_netbuf_enqueue(A_NETBUF_QUEUE_T *q, void *pkt)
-{
- skb_queue_tail((struct sk_buff_head *) q, (struct sk_buff *) pkt);
-}
-
-void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt)
-{
- skb_queue_head((struct sk_buff_head *) q, (struct sk_buff *) pkt);
-}
-
-void *a_netbuf_dequeue(A_NETBUF_QUEUE_T *q)
-{
- return((void *) skb_dequeue((struct sk_buff_head *) q));
-}
-
-int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q)
-{
- return(skb_queue_len((struct sk_buff_head *) q));
-}
-
-int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q)
-{
- return(skb_queue_empty((struct sk_buff_head *) q));
-}
-
-void a_netbuf_queue_init(A_NETBUF_QUEUE_T *q)
-{
- skb_queue_head_init((struct sk_buff_head *) q);
-}
-
-void *
-a_netbuf_alloc(int size)
-{
- struct sk_buff *skb;
- size += 2 * (A_GET_CACHE_LINE_BYTES()); /* add some cacheline space at front and back of buffer */
- skb = dev_alloc_skb(AR6000_DATA_OFFSET + sizeof(struct htc_packet) + size);
- skb_reserve(skb, AR6000_DATA_OFFSET + sizeof(struct htc_packet) + A_GET_CACHE_LINE_BYTES());
- return ((void *)skb);
-}
-
-/*
- * Allocate an SKB w.o. any encapsulation requirement.
- */
-void *
-a_netbuf_alloc_raw(int size)
-{
- struct sk_buff *skb;
-
- skb = dev_alloc_skb(size);
-
- return ((void *)skb);
-}
-
-void
-a_netbuf_free(void *bufPtr)
-{
- struct sk_buff *skb = (struct sk_buff *)bufPtr;
-
- dev_kfree_skb(skb);
-}
-
-u32 a_netbuf_to_len(void *bufPtr)
-{
- return (((struct sk_buff *)bufPtr)->len);
-}
-
-void *
-a_netbuf_to_data(void *bufPtr)
-{
- return (((struct sk_buff *)bufPtr)->data);
-}
-
-/*
- * Add len # of bytes to the beginning of the network buffer
- * pointed to by bufPtr
- */
-int
-a_netbuf_push(void *bufPtr, s32 len)
-{
- skb_push((struct sk_buff *)bufPtr, len);
-
- return 0;
-}
-
-/*
- * Add len # of bytes to the beginning of the network buffer
- * pointed to by bufPtr and also fill with data
- */
-int
-a_netbuf_push_data(void *bufPtr, char *srcPtr, s32 len)
-{
- skb_push((struct sk_buff *) bufPtr, len);
- memcpy(((struct sk_buff *)bufPtr)->data, srcPtr, len);
-
- return 0;
-}
-
-/*
- * Add len # of bytes to the end of the network buffer
- * pointed to by bufPtr
- */
-int
-a_netbuf_put(void *bufPtr, s32 len)
-{
- skb_put((struct sk_buff *)bufPtr, len);
-
- return 0;
-}
-
-/*
- * Add len # of bytes to the end of the network buffer
- * pointed to by bufPtr and also fill with data
- */
-int
-a_netbuf_put_data(void *bufPtr, char *srcPtr, s32 len)
-{
- char *start = (char*)(((struct sk_buff *)bufPtr)->data +
- ((struct sk_buff *)bufPtr)->len);
- skb_put((struct sk_buff *)bufPtr, len);
- memcpy(start, srcPtr, len);
-
- return 0;
-}
-
-
-/*
- * Trim the network buffer pointed to by bufPtr to len # of bytes
- */
-int
-a_netbuf_setlen(void *bufPtr, s32 len)
-{
- skb_trim((struct sk_buff *)bufPtr, len);
-
- return 0;
-}
-
-/*
- * Chop of len # of bytes from the end of the buffer.
- */
-int
-a_netbuf_trim(void *bufPtr, s32 len)
-{
- skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len);
-
- return 0;
-}
-
-/*
- * Chop of len # of bytes from the end of the buffer and return the data.
- */
-int
-a_netbuf_trim_data(void *bufPtr, char *dstPtr, s32 len)
-{
- char *start = (char*)(((struct sk_buff *)bufPtr)->data +
- (((struct sk_buff *)bufPtr)->len - len));
-
- memcpy(dstPtr, start, len);
- skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len);
-
- return 0;
-}
-
-
-/*
- * Returns the number of bytes available to a a_netbuf_push()
- */
-s32 a_netbuf_headroom(void *bufPtr)
-{
- return (skb_headroom((struct sk_buff *)bufPtr));
-}
-
-/*
- * Removes specified number of bytes from the beginning of the buffer
- */
-int
-a_netbuf_pull(void *bufPtr, s32 len)
-{
- skb_pull((struct sk_buff *)bufPtr, len);
-
- return 0;
-}
-
-/*
- * Removes specified number of bytes from the beginning of the buffer
- * and return the data
- */
-int
-a_netbuf_pull_data(void *bufPtr, char *dstPtr, s32 len)
-{
- memcpy(dstPtr, ((struct sk_buff *)bufPtr)->data, len);
- skb_pull((struct sk_buff *)bufPtr, len);
-
- return 0;
-}
-
-#ifdef EXPORT_HCI_BRIDGE_INTERFACE
-EXPORT_SYMBOL(a_netbuf_to_data);
-EXPORT_SYMBOL(a_netbuf_put);
-EXPORT_SYMBOL(a_netbuf_pull);
-EXPORT_SYMBOL(a_netbuf_alloc);
-EXPORT_SYMBOL(a_netbuf_free);
-#endif
diff --git a/drivers/staging/ath6kl/reorder/aggr_rx_internal.h b/drivers/staging/ath6kl/reorder/aggr_rx_internal.h
deleted file mode 100644
index 11125967d53..00000000000
--- a/drivers/staging/ath6kl/reorder/aggr_rx_internal.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Copyright (c) 2004-2010 Atheros Communications Inc.
- * All rights reserved.
- *
- *
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
- *
- */
-
-#ifndef __AGGR_RX_INTERNAL_H__
-#define __AGGR_RX_INTERNAL_H__
-
-#include "a_osapi.h"
-#include "aggr_recv_api.h"
-
-#define AGGR_WIN_IDX(x, y) ((x) % (y))
-#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x)+1), (y))
-#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x)-1), (y))
-#define IEEE80211_MAX_SEQ_NO 0xFFF
-#define IEEE80211_NEXT_SEQ_NO(x) (((x) + 1) & IEEE80211_MAX_SEQ_NO)
-
-
-#define NUM_OF_TIDS 8
-#define AGGR_SZ_DEFAULT 8
-
-#define AGGR_WIN_SZ_MIN 2
-#define AGGR_WIN_SZ_MAX 8
-/* TID Window sz is double of what is negotiated. Derive TID_WINDOW_SZ from win_sz, per tid */
-#define TID_WINDOW_SZ(_x) ((_x) << 1)
-
-#define AGGR_NUM_OF_FREE_NETBUFS 16
-
-#define AGGR_GET_RXTID_STATS(_p, _x) (&(_p->stat[(_x)]))
-#define AGGR_GET_RXTID(_p, _x) (&(_p->RxTid[(_x)]))
-
-/* Hold q is a function of win_sz, which is negotiated per tid */
-#define HOLD_Q_SZ(_x) (TID_WINDOW_SZ((_x))*sizeof(struct osbuf_hold_q))
-/* AGGR_RX_TIMEOUT value is important as a (too) small value can cause frames to be
- * delivered out of order and a (too) large value can cause undesirable latency in
- * certain situations. */
-#define AGGR_RX_TIMEOUT 400 /* Timeout(in ms) for delivery of frames, if they are stuck */
-
-typedef enum {
- ALL_SEQNO = 0,
- CONTIGUOUS_SEQNO = 1,
-}DELIVERY_ORDER;
-
-struct osbuf_hold_q {
- void *osbuf;
- bool is_amsdu;
- u16 seq_no;
-};
-
-
-#if 0
-/* XXX: unused ? */
-struct window_snapshot {
- u16 seqno_st;
- u16 seqno_end;
-};
-#endif
-
-struct rxtid {
- bool aggr; /* is it ON or OFF */
- bool progress; /* true when frames have arrived after a timer start */
- bool timerMon; /* true if the timer started for the sake of this TID */
- u16 win_sz; /* negotiated window size */
- u16 seq_next; /* Next seq no, in current window */
- u32 hold_q_sz; /* Num of frames that can be held in hold q */
- struct osbuf_hold_q *hold_q; /* Hold q for re-order */
-#if 0
- struct window_snapshot old_win; /* Sliding window snapshot - for timeout */
-#endif
- A_NETBUF_QUEUE_T q; /* q head for enqueuing frames for dispatch */
- A_MUTEX_T lock;
-};
-
-struct rxtid_stats {
- u32 num_into_aggr; /* hitting at the input of this module */
- u32 num_dups; /* duplicate */
- u32 num_oow; /* out of window */
- u32 num_mpdu; /* single payload 802.3/802.11 frame */
- u32 num_amsdu; /* AMSDU */
- u32 num_delivered; /* frames delivered to IP stack */
- u32 num_timeouts; /* num of timeouts, during which frames delivered */
- u32 num_hole; /* frame not present, when window moved over */
- u32 num_bar; /* num of resets of seq_num, via BAR */
-};
-
-struct aggr_info {
- u8 aggr_sz; /* config value of aggregation size */
- u8 timerScheduled;
- A_TIMER timer; /* timer for returning held up pkts in re-order que */
- void *dev; /* dev handle */
- RX_CALLBACK rx_fn; /* callback function to return frames; to upper layer */
- struct rxtid RxTid[NUM_OF_TIDS]; /* Per tid window */
- ALLOC_NETBUFS netbuf_allocator; /* OS netbuf alloc fn */
- A_NETBUF_QUEUE_T freeQ; /* pre-allocated buffers - for A_MSDU slicing */
- struct rxtid_stats stat[NUM_OF_TIDS]; /* Tid based statistics */
- PACKET_LOG pkt_log; /* Log info of the packets */
-};
-
-#endif /* __AGGR_RX_INTERNAL_H__ */
diff --git a/drivers/staging/ath6kl/reorder/rcv_aggr.c b/drivers/staging/ath6kl/reorder/rcv_aggr.c
deleted file mode 100644
index 9b1509ec5a7..00000000000
--- a/drivers/staging/ath6kl/reorder/rcv_aggr.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- *
- * Copyright (c) 2010 Atheros Communications Inc.
- * All rights reserved.
- *
- *
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
- *
- */
-
-#include <a_config.h>
-#include <athdefs.h>
-#include <a_osapi.h>
-#include <a_debug.h>
-#include "pkt_log.h"
-#include "aggr_recv_api.h"
-#include "aggr_rx_internal.h"
-#include "wmi.h"
-
-extern int
-wmi_dot3_2_dix(void *osbuf);
-
-static void
-aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf);
-
-static void
-aggr_timeout(unsigned long arg);
-
-static void
-aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order);
-
-static void
-aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q);
-
-static void *
-aggr_get_osbuf(struct aggr_info *p_aggr);
-
-void *
-aggr_init(ALLOC_NETBUFS netbuf_allocator)
-{
- struct aggr_info *p_aggr = NULL;
- struct rxtid *rxtid;
- u8 i;
- int status = 0;
-
- A_PRINTF("In aggr_init..\n");
-
- do {
- p_aggr = A_MALLOC(sizeof(struct aggr_info));
- if(!p_aggr) {
- A_PRINTF("Failed to allocate memory for aggr_node\n");
- status = A_ERROR;
- break;
- }
-
- /* Init timer and data structures */
- A_MEMZERO(p_aggr, sizeof(struct aggr_info));
- p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
- A_INIT_TIMER(&p_aggr->timer, aggr_timeout, p_aggr);
- p_aggr->timerScheduled = false;
- A_NETBUF_QUEUE_INIT(&p_aggr->freeQ);
-
- p_aggr->netbuf_allocator = netbuf_allocator;
- p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
-
- for(i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = AGGR_GET_RXTID(p_aggr, i);
- rxtid->aggr = false;
- rxtid->progress = false;
- rxtid->timerMon = false;
- A_NETBUF_QUEUE_INIT(&rxtid->q);
- A_MUTEX_INIT(&rxtid->lock);
- }
- }while(false);
-
- A_PRINTF("going out of aggr_init..status %s\n",
- (status == 0) ? "OK":"Error");
-
- if (status) {
- /* Cleanup */
- aggr_module_destroy(p_aggr);
- }
- return ((status == 0) ? p_aggr : NULL);
-}
-
-/* utility function to clear rx hold_q for a tid */
-static void
-aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
-{
- struct rxtid *rxtid;
- struct rxtid_stats *stats;
-
- A_ASSERT(tid < NUM_OF_TIDS && p_aggr);
-
- rxtid = AGGR_GET_RXTID(p_aggr, tid);
- stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
-
- if(rxtid->aggr) {
- aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
- }
-
- rxtid->aggr = false;
- rxtid->progress = false;
- rxtid->timerMon = false;
- rxtid->win_sz = 0;
- rxtid->seq_next = 0;
- rxtid->hold_q_sz = 0;
-
- if(rxtid->hold_q) {
- kfree(rxtid->hold_q);
- rxtid->hold_q = NULL;
- }
-
- A_MEMZERO(stats, sizeof(struct rxtid_stats));
-}
-
-void
-aggr_module_destroy(void *cntxt)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
- struct rxtid *rxtid;
- u8 i, k;
- A_PRINTF("%s(): aggr = %p\n",_A_FUNCNAME_, p_aggr);
- A_ASSERT(p_aggr);
-
- if(p_aggr) {
- if(p_aggr->timerScheduled) {
- A_UNTIMEOUT(&p_aggr->timer);
- p_aggr->timerScheduled = false;
- }
-
- for(i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = AGGR_GET_RXTID(p_aggr, i);
- /* Free the hold q contents and hold_q*/
- if(rxtid->hold_q) {
- for(k = 0; k< rxtid->hold_q_sz; k++) {
- if(rxtid->hold_q[k].osbuf) {
- A_NETBUF_FREE(rxtid->hold_q[k].osbuf);
- }
- }
- kfree(rxtid->hold_q);
- }
- /* Free the dispatch q contents*/
- while(A_NETBUF_QUEUE_SIZE(&rxtid->q)) {
- A_NETBUF_FREE(A_NETBUF_DEQUEUE(&rxtid->q));
- }
- if (A_IS_MUTEX_VALID(&rxtid->lock)) {
- A_MUTEX_DELETE(&rxtid->lock);
- }
- }
- /* free the freeQ and its contents*/
- while(A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
- A_NETBUF_FREE(A_NETBUF_DEQUEUE(&p_aggr->freeQ));
- }
- kfree(p_aggr);
- }
- A_PRINTF("out aggr_module_destroy\n");
-}
-
-
-void
-aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
-
- A_ASSERT(p_aggr && fn && dev);
-
- p_aggr->rx_fn = fn;
- p_aggr->dev = dev;
-}
-
-
-void
-aggr_process_bar(void *cntxt, u8 tid, u16 seq_no)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
- struct rxtid_stats *stats;
-
- A_ASSERT(p_aggr);
- stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
- stats->num_bar++;
-
- aggr_deque_frms(p_aggr, tid, seq_no, ALL_SEQNO);
-}
-
-
-void
-aggr_recv_addba_req_evt(void *cntxt, u8 tid, u16 seq_no, u8 win_sz)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
- struct rxtid *rxtid;
- struct rxtid_stats *stats;
-
- A_ASSERT(p_aggr);
- rxtid = AGGR_GET_RXTID(p_aggr, tid);
- stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
-
- A_PRINTF("%s(): win_sz = %d aggr %d\n", _A_FUNCNAME_, win_sz, rxtid->aggr);
- if(win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) {
- A_PRINTF("win_sz %d, tid %d\n", win_sz, tid);
- }
-
- if(rxtid->aggr) {
- /* Just go and deliver all the frames up from this
- * queue, as if we got DELBA and re-initialize the queue
- */
- aggr_delete_tid_state(p_aggr, tid);
- }
-
- rxtid->seq_next = seq_no;
- /* create these queues, only upon receiving of ADDBA for a
- * tid, reducing memory requirement
- */
- rxtid->hold_q = A_MALLOC(HOLD_Q_SZ(win_sz));
- if((rxtid->hold_q == NULL)) {
- A_PRINTF("Failed to allocate memory, tid = %d\n", tid);
- A_ASSERT(0);
- }
- A_MEMZERO(rxtid->hold_q, HOLD_Q_SZ(win_sz));
-
- /* Update rxtid for the window sz */
- rxtid->win_sz = win_sz;
- /* hold_q_sz inicates the depth of holding q - which is
- * a factor of win_sz. Compute once, as it will be used often
- */
- rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
- /* There should be no frames on q - even when second ADDBA comes in.
- * If aggr was previously ON on this tid, we would have cleaned up
- * the q
- */
- if(A_NETBUF_QUEUE_SIZE(&rxtid->q) != 0) {
- A_PRINTF("ERROR: Frames still on queue ?\n");
- A_ASSERT(0);
- }
-
- rxtid->aggr = true;
-}
-
-void
-aggr_recv_delba_req_evt(void *cntxt, u8 tid)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
- struct rxtid *rxtid;
-
- A_ASSERT(p_aggr);
- A_PRINTF("%s(): tid %d\n", _A_FUNCNAME_, tid);
-
- rxtid = AGGR_GET_RXTID(p_aggr, tid);
-
- if(rxtid->aggr) {
- aggr_delete_tid_state(p_aggr, tid);
- }
-}
-
-static void
-aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order)
-{
- struct rxtid *rxtid;
- struct osbuf_hold_q *node;
- u16 idx, idx_end, seq_end;
- struct rxtid_stats *stats;
-
- A_ASSERT(p_aggr);
- rxtid = AGGR_GET_RXTID(p_aggr, tid);
- stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
-
- /* idx is absolute location for first frame */
- idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
-
- /* idx_end is typically the last possible frame in the window,
- * but changes to 'the' seq_no, when BAR comes. If seq_no
- * is non-zero, we will go up to that and stop.
- * Note: last seq no in current window will occupy the same
- * index position as index that is just previous to start.
- * An imp point : if win_sz is 7, for seq_no space of 4095,
- * then, there would be holes when sequence wrap around occurs.
- * Target should judiciously choose the win_sz, based on
- * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
- * 2, 4, 8, 16 win_sz works fine).
- * We must deque from "idx" to "idx_end", including both.
- */
- seq_end = (seq_no) ? seq_no : rxtid->seq_next;
- idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
-
- /* Critical section begins */
- A_MUTEX_LOCK(&rxtid->lock);
- do {
-
- node = &rxtid->hold_q[idx];
-
- if((order == CONTIGUOUS_SEQNO) && (!node->osbuf))
- break;
-
- /* chain frames and deliver frames bcos:
- * 1. either the frames are in order and window is contiguous, OR
- * 2. we need to deque frames, irrespective of holes
- */
- if(node->osbuf) {
- if(node->is_amsdu) {
- aggr_slice_amsdu(p_aggr, rxtid, &node->osbuf);
- } else {
- A_NETBUF_ENQUEUE(&rxtid->q, node->osbuf);
- }
- node->osbuf = NULL;
- } else {
- stats->num_hole++;
- }
-
- /* window is moving */
- rxtid->seq_next = IEEE80211_NEXT_SEQ_NO(rxtid->seq_next);
- idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
- } while(idx != idx_end);
- /* Critical section ends */
- A_MUTEX_UNLOCK(&rxtid->lock);
-
- stats->num_delivered += A_NETBUF_QUEUE_SIZE(&rxtid->q);
- aggr_dispatch_frames(p_aggr, &rxtid->q);
-}
-
-static void *
-aggr_get_osbuf(struct aggr_info *p_aggr)
-{
- void *buf = NULL;
-
- /* Starving for buffers? get more from OS
- * check for low netbuffers( < 1/4 AGGR_NUM_OF_FREE_NETBUFS) :
- * re-allocate bufs if so
- * allocate a free buf from freeQ
- */
- if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) {
- p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
- }
-
- if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
- buf = A_NETBUF_DEQUEUE(&p_aggr->freeQ);
- }
-
- return buf;
-}
-
-
-static void
-aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf)
-{
- void *new_buf;
- u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
- u8 *framep;
-
- /* Frame format at this point:
- * [DIX hdr | 802.3 | 802.3 | ... | 802.3]
- *
- * Strip the DIX header.
- * Iterate through the osbuf and do:
- * grab a free netbuf from freeQ
- * find the start and end of a frame
- * copy it to netbuf(Vista can do better here)
- * convert all msdu's(802.3) frames to upper layer format - os routine
- * -for now lets convert from 802.3 to dix
- * enque this to dispatch q of tid
- * repeat
- * free the osbuf - to OS. It's been sliced.
- */
-
- mac_hdr_len = sizeof(ATH_MAC_HDR);
- framep = A_NETBUF_DATA(*osbuf) + mac_hdr_len;
- amsdu_len = A_NETBUF_LEN(*osbuf) - mac_hdr_len;
-
- while(amsdu_len > mac_hdr_len) {
- /* Begin of a 802.3 frame */
- payload_8023_len = A_BE2CPU16(((ATH_MAC_HDR *)framep)->typeOrLen);
-#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
-#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
- if(payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
- A_PRINTF("802.3 AMSDU frame bound check failed. len %d\n", payload_8023_len);
- break;
- }
- frame_8023_len = payload_8023_len + mac_hdr_len;
- new_buf = aggr_get_osbuf(p_aggr);
- if(new_buf == NULL) {
- A_PRINTF("No buffer available \n");
- break;
- }
-
- memcpy(A_NETBUF_DATA(new_buf), framep, frame_8023_len);
- A_NETBUF_PUT(new_buf, frame_8023_len);
- if (wmi_dot3_2_dix(new_buf) != 0) {
- A_PRINTF("dot3_2_dix err..\n");
- A_NETBUF_FREE(new_buf);
- break;
- }
-
- A_NETBUF_ENQUEUE(&rxtid->q, new_buf);
-
- /* Is this the last subframe within this aggregate ? */
- if ((amsdu_len - frame_8023_len) == 0) {
- break;
- }
-
- /* Add the length of A-MSDU subframe padding bytes -
- * Round to nearest word.
- */
- frame_8023_len = ((frame_8023_len + 3) & ~3);
-
- framep += frame_8023_len;
- amsdu_len -= frame_8023_len;
- }
-
- A_NETBUF_FREE(*osbuf);
- *osbuf = NULL;
-}
-
-void
-aggr_process_recv_frm(void *cntxt, u8 tid, u16 seq_no, bool is_amsdu, void **osbuf)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
- struct rxtid *rxtid;
- struct rxtid_stats *stats;
- u16 idx, st, cur, end;
- u16 *log_idx;
- struct osbuf_hold_q *node;
- PACKET_LOG *log;
-
- A_ASSERT(p_aggr);
- A_ASSERT(tid < NUM_OF_TIDS);
-
- rxtid = AGGR_GET_RXTID(p_aggr, tid);
- stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
-
- stats->num_into_aggr++;
-
- if(!rxtid->aggr) {
- if(is_amsdu) {
- aggr_slice_amsdu(p_aggr, rxtid, osbuf);
- stats->num_amsdu++;
- aggr_dispatch_frames(p_aggr, &rxtid->q);
- }
- return;
- }
-
- /* Check the incoming sequence no, if it's in the window */
- st = rxtid->seq_next;
- cur = seq_no;
- end = (st + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
- /* Log the pkt info for future analysis */
- log = &p_aggr->pkt_log;
- log_idx = &log->last_idx;
- log->info[*log_idx].cur = cur;
- log->info[*log_idx].st = st;
- log->info[*log_idx].end = end;
- *log_idx = IEEE80211_NEXT_SEQ_NO(*log_idx);
-
- if(((st < end) && (cur < st || cur > end)) ||
- ((st > end) && (cur > end) && (cur < st))) {
- /* the cur frame is outside the window. Since we know
- * our target would not do this without reason it must
- * be assumed that the window has moved for some valid reason.
- * Therefore, we dequeue all frames and start fresh.
- */
- u16 extended_end;
-
- extended_end = (end + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
-
- if(((end < extended_end) && (cur < end || cur > extended_end)) ||
- ((end > extended_end) && (cur > extended_end) && (cur < end))) {
- // dequeue all frames in queue and shift window to new frame
- aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
- //set window start so that new frame is last frame in window
- if(cur >= rxtid->hold_q_sz-1) {
- rxtid->seq_next = cur - (rxtid->hold_q_sz-1);
- }else{
- rxtid->seq_next = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
- }
- } else {
- // dequeue only those frames that are outside the new shifted window
- if(cur >= rxtid->hold_q_sz-1) {
- st = cur - (rxtid->hold_q_sz-1);
- }else{
- st = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
- }
-
- aggr_deque_frms(p_aggr, tid, st, ALL_SEQNO);
- }
-
- stats->num_oow++;
- }
-
- idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
-
- /*enque the frame, in hold_q */
- node = &rxtid->hold_q[idx];
-
- A_MUTEX_LOCK(&rxtid->lock);
- if(node->osbuf) {
- /* Is the cur frame duplicate or something beyond our
- * window(hold_q -> which is 2x, already)?
- * 1. Duplicate is easy - drop incoming frame.
- * 2. Not falling in current sliding window.
- * 2a. is the frame_seq_no preceding current tid_seq_no?
- * -> drop the frame. perhaps sender did not get our ACK.
- * this is taken care of above.
- * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
- * -> Taken care of it above, by moving window forward.
- *
- */
- A_NETBUF_FREE(node->osbuf);
- stats->num_dups++;
- }
-
- node->osbuf = *osbuf;
- node->is_amsdu = is_amsdu;
- node->seq_no = seq_no;
- if(node->is_amsdu) {
- stats->num_amsdu++;
- } else {
- stats->num_mpdu++;
- }
- A_MUTEX_UNLOCK(&rxtid->lock);
-
- *osbuf = NULL;
- aggr_deque_frms(p_aggr, tid, 0, CONTIGUOUS_SEQNO);
-
- if(p_aggr->timerScheduled) {
- rxtid->progress = true;
- }else{
- for(idx=0 ; idx<rxtid->hold_q_sz ; idx++) {
- if(rxtid->hold_q[idx].osbuf) {
- /* there is a frame in the queue and no timer so
- * start a timer to ensure that the frame doesn't remain
- * stuck forever. */
- p_aggr->timerScheduled = true;
- A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
- rxtid->progress = false;
- rxtid->timerMon = true;
- break;
- }
- }
- }
-}
-
-/*
- * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate
- * hold Q state. Examples include when a Connect event or disconnect event is
- * received.
- */
-void
-aggr_reset_state(void *cntxt)
-{
- u8 tid;
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
-
- A_ASSERT(p_aggr);
-
- for(tid=0 ; tid<NUM_OF_TIDS ; tid++) {
- aggr_delete_tid_state(p_aggr, tid);
- }
-}
-
-
-static void
-aggr_timeout(unsigned long arg)
-{
- u8 i,j;
- struct aggr_info *p_aggr = (struct aggr_info *)arg;
- struct rxtid *rxtid;
- struct rxtid_stats *stats;
- /*
- * If the q for which the timer was originally started has
- * not progressed then it is necessary to dequeue all the
- * contained frames so that they are not held forever.
- */
- for(i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = AGGR_GET_RXTID(p_aggr, i);
- stats = AGGR_GET_RXTID_STATS(p_aggr, i);
-
- if(rxtid->aggr == false ||
- rxtid->timerMon == false ||
- rxtid->progress == true) {
- continue;
- }
- // dequeue all frames in for this tid
- stats->num_timeouts++;
- A_PRINTF("TO: st %d end %d\n", rxtid->seq_next, ((rxtid->seq_next + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO));
- aggr_deque_frms(p_aggr, i, 0, ALL_SEQNO);
- }
-
- p_aggr->timerScheduled = false;
- // determine whether a new timer should be started.
- for(i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = AGGR_GET_RXTID(p_aggr, i);
-
- if(rxtid->aggr == true && rxtid->hold_q) {
- for(j = 0 ; j < rxtid->hold_q_sz ; j++)
- {
- if(rxtid->hold_q[j].osbuf)
- {
- p_aggr->timerScheduled = true;
- rxtid->timerMon = true;
- rxtid->progress = false;
- break;
- }
- }
-
- if(j >= rxtid->hold_q_sz) {
- rxtid->timerMon = false;
- }
- }
- }
-
- if(p_aggr->timerScheduled) {
- /* Rearm the timer*/
- A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
- }
-
-}
-
-static void
-aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q)
-{
- void *osbuf;
-
- while((osbuf = A_NETBUF_DEQUEUE(q))) {
- p_aggr->rx_fn(p_aggr->dev, osbuf);
- }
-}
-
-void
-aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf)
-{
- struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
- struct rxtid *rxtid;
- struct rxtid_stats *stats;
- u8 i;
-
- *log_buf = &p_aggr->pkt_log;
- A_PRINTF("\n\n================================================\n");
- A_PRINTF("tid: num_into_aggr, dups, oow, mpdu, amsdu, delivered, timeouts, holes, bar, seq_next\n");
- for(i = 0; i < NUM_OF_TIDS; i++) {
- stats = AGGR_GET_RXTID_STATS(p_aggr, i);
- rxtid = AGGR_GET_RXTID(p_aggr, i);
- A_PRINTF("%d: %d %d %d %d %d %d %d %d %d : %d\n", i, stats->num_into_aggr, stats->num_dups,
- stats->num_oow, stats->num_mpdu,
- stats->num_amsdu, stats->num_delivered, stats->num_timeouts,
- stats->num_hole, stats->num_bar,
- rxtid->seq_next);
- }
- A_PRINTF("================================================\n\n");
-
-}
diff --git a/drivers/staging/ath6kl/wlan/include/ieee80211.h b/drivers/staging/ath6kl/wlan/include/ieee80211.h
deleted file mode 100644
index cf47d0657e7..00000000000
--- a/drivers/staging/ath6kl/wlan/include/ieee80211.h
+++ /dev/null
@@ -1,397 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ieee80211.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _NET80211_IEEE80211_H_
-#define _NET80211_IEEE80211_H_
-
-/*
- * 802.11 protocol definitions.
- */
-#define IEEE80211_WEP_KEYLEN 5 /* 40bit */
-#define IEEE80211_WEP_IVLEN 3 /* 24bit */
-#define IEEE80211_WEP_KIDLEN 1 /* 1 octet */
-#define IEEE80211_WEP_CRCLEN 4 /* CRC-32 */
-#define IEEE80211_WEP_NKID 4 /* number of key ids */
-
-/*
- * 802.11i defines an extended IV for use with non-WEP ciphers.
- * When the EXTIV bit is set in the key id byte an additional
- * 4 bytes immediately follow the IV for TKIP. For CCMP the
- * EXTIV bit is likewise set but the 8 bytes represent the
- * CCMP header rather than IV+extended-IV.
- */
-#define IEEE80211_WEP_EXTIV 0x20
-#define IEEE80211_WEP_EXTIVLEN 4 /* extended IV length */
-#define IEEE80211_WEP_MICLEN 8 /* trailing MIC */
-
-#define IEEE80211_CRC_LEN 4
-
-#ifdef WAPI_ENABLE
-#define IEEE80211_WAPI_EXTIVLEN 10 /* extended IV length */
-#endif /* WAPI ENABLE */
-
-
-#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */
-/* is 802.11 address multicast/broadcast? */
-#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01)
-#define IEEE80211_IS_BROADCAST(_a) (*(_a) == 0xFF)
-#define WEP_HEADER (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN)
-#define WEP_TRAILER IEEE80211_WEP_CRCLEN
-#define CCMP_HEADER (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + \
- IEEE80211_WEP_EXTIVLEN)
-#define CCMP_TRAILER IEEE80211_WEP_MICLEN
-#define TKIP_HEADER (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + \
- IEEE80211_WEP_EXTIVLEN)
-#define TKIP_TRAILER IEEE80211_WEP_CRCLEN
-#define TKIP_MICLEN IEEE80211_WEP_MICLEN
-
-
-#define IEEE80211_ADDR_EQ(addr1, addr2) \
- (memcmp(addr1, addr2, IEEE80211_ADDR_LEN) == 0)
-
-#define IEEE80211_ADDR_COPY(dst,src) memcpy(dst,src,IEEE80211_ADDR_LEN)
-
-#define IEEE80211_KEYBUF_SIZE 16
-#define IEEE80211_MICBUF_SIZE (8+8) /* space for both tx and rx */
-
-/*
- * NB: these values are ordered carefully; there are lots of
- * of implications in any reordering. In particular beware
- * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
- */
-#define IEEE80211_CIPHER_WEP 0
-#define IEEE80211_CIPHER_TKIP 1
-#define IEEE80211_CIPHER_AES_OCB 2
-#define IEEE80211_CIPHER_AES_CCM 3
-#define IEEE80211_CIPHER_CKIP 5
-#define IEEE80211_CIPHER_CCKM_KRK 6
-#define IEEE80211_CIPHER_NONE 7 /* pseudo value */
-
-#define IEEE80211_CIPHER_MAX (IEEE80211_CIPHER_NONE+1)
-
-#define IEEE80211_IS_VALID_WEP_CIPHER_LEN(len) \
- (((len) == 5) || ((len) == 13) || ((len) == 16))
-
-
-
-/*
- * generic definitions for IEEE 802.11 frames
- */
-PREPACK struct ieee80211_frame {
- u8 i_fc[2];
- u8 i_dur[2];
- u8 i_addr1[IEEE80211_ADDR_LEN];
- u8 i_addr2[IEEE80211_ADDR_LEN];
- u8 i_addr3[IEEE80211_ADDR_LEN];
- u8 i_seq[2];
- /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
- /* see below */
-} POSTPACK;
-
-PREPACK struct ieee80211_qosframe {
- u8 i_fc[2];
- u8 i_dur[2];
- u8 i_addr1[IEEE80211_ADDR_LEN];
- u8 i_addr2[IEEE80211_ADDR_LEN];
- u8 i_addr3[IEEE80211_ADDR_LEN];
- u8 i_seq[2];
- u8 i_qos[2];
-} POSTPACK;
-
-#define IEEE80211_FC0_VERSION_MASK 0x03
-#define IEEE80211_FC0_VERSION_SHIFT 0
-#define IEEE80211_FC0_VERSION_0 0x00
-#define IEEE80211_FC0_TYPE_MASK 0x0c
-#define IEEE80211_FC0_TYPE_SHIFT 2
-#define IEEE80211_FC0_TYPE_MGT 0x00
-#define IEEE80211_FC0_TYPE_CTL 0x04
-#define IEEE80211_FC0_TYPE_DATA 0x08
-
-#define IEEE80211_FC0_SUBTYPE_MASK 0xf0
-#define IEEE80211_FC0_SUBTYPE_SHIFT 4
-/* for TYPE_MGT */
-#define IEEE80211_FC0_SUBTYPE_ASSOC_REQ 0x00
-#define IEEE80211_FC0_SUBTYPE_ASSOC_RESP 0x10
-#define IEEE80211_FC0_SUBTYPE_REASSOC_REQ 0x20
-#define IEEE80211_FC0_SUBTYPE_REASSOC_RESP 0x30
-#define IEEE80211_FC0_SUBTYPE_PROBE_REQ 0x40
-#define IEEE80211_FC0_SUBTYPE_PROBE_RESP 0x50
-#define IEEE80211_FC0_SUBTYPE_BEACON 0x80
-#define IEEE80211_FC0_SUBTYPE_ATIM 0x90
-#define IEEE80211_FC0_SUBTYPE_DISASSOC 0xa0
-#define IEEE80211_FC0_SUBTYPE_AUTH 0xb0
-#define IEEE80211_FC0_SUBTYPE_DEAUTH 0xc0
-/* for TYPE_CTL */
-#define IEEE80211_FC0_SUBTYPE_PS_POLL 0xa0
-#define IEEE80211_FC0_SUBTYPE_RTS 0xb0
-#define IEEE80211_FC0_SUBTYPE_CTS 0xc0
-#define IEEE80211_FC0_SUBTYPE_ACK 0xd0
-#define IEEE80211_FC0_SUBTYPE_CF_END 0xe0
-#define IEEE80211_FC0_SUBTYPE_CF_END_ACK 0xf0
-/* for TYPE_DATA (bit combination) */
-#define IEEE80211_FC0_SUBTYPE_DATA 0x00
-#define IEEE80211_FC0_SUBTYPE_CF_ACK 0x10
-#define IEEE80211_FC0_SUBTYPE_CF_POLL 0x20
-#define IEEE80211_FC0_SUBTYPE_CF_ACPL 0x30
-#define IEEE80211_FC0_SUBTYPE_NODATA 0x40
-#define IEEE80211_FC0_SUBTYPE_CFACK 0x50
-#define IEEE80211_FC0_SUBTYPE_CFPOLL 0x60
-#define IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK 0x70
-#define IEEE80211_FC0_SUBTYPE_QOS 0x80
-#define IEEE80211_FC0_SUBTYPE_QOS_NULL 0xc0
-
-#define IEEE80211_FC1_DIR_MASK 0x03
-#define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */
-#define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */
-#define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */
-#define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */
-
-#define IEEE80211_FC1_MORE_FRAG 0x04
-#define IEEE80211_FC1_RETRY 0x08
-#define IEEE80211_FC1_PWR_MGT 0x10
-#define IEEE80211_FC1_MORE_DATA 0x20
-#define IEEE80211_FC1_WEP 0x40
-#define IEEE80211_FC1_ORDER 0x80
-
-#define IEEE80211_SEQ_FRAG_MASK 0x000f
-#define IEEE80211_SEQ_FRAG_SHIFT 0
-#define IEEE80211_SEQ_SEQ_MASK 0xfff0
-#define IEEE80211_SEQ_SEQ_SHIFT 4
-
-#define IEEE80211_NWID_LEN 32
-
-/*
- * 802.11 rate set.
- */
-#define IEEE80211_RATE_SIZE 8 /* 802.11 standard */
-#define IEEE80211_RATE_MAXSIZE 15 /* max rates we'll handle */
-
-#define WMM_NUM_AC 4 /* 4 AC categories */
-
-#define WMM_PARAM_ACI_M 0x60 /* Mask for ACI field */
-#define WMM_PARAM_ACI_S 5 /* Shift for ACI field */
-#define WMM_PARAM_ACM_M 0x10 /* Mask for ACM bit */
-#define WMM_PARAM_ACM_S 4 /* Shift for ACM bit */
-#define WMM_PARAM_AIFSN_M 0x0f /* Mask for aifsn field */
-#define WMM_PARAM_LOGCWMIN_M 0x0f /* Mask for CwMin field (in log) */
-#define WMM_PARAM_LOGCWMAX_M 0xf0 /* Mask for CwMax field (in log) */
-#define WMM_PARAM_LOGCWMAX_S 4 /* Shift for CwMax field */
-
-#define WMM_AC_TO_TID(_ac) ( \
- ((_ac) == WMM_AC_VO) ? 6 : \
- ((_ac) == WMM_AC_VI) ? 5 : \
- ((_ac) == WMM_AC_BK) ? 1 : \
- 0)
-
-#define TID_TO_WMM_AC(_tid) ( \
- ((_tid) < 1) ? WMM_AC_BE : \
- ((_tid) < 3) ? WMM_AC_BK : \
- ((_tid) < 6) ? WMM_AC_VI : \
- WMM_AC_VO)
-/*
- * Management information element payloads.
- */
-
-enum {
- IEEE80211_ELEMID_SSID = 0,
- IEEE80211_ELEMID_RATES = 1,
- IEEE80211_ELEMID_FHPARMS = 2,
- IEEE80211_ELEMID_DSPARMS = 3,
- IEEE80211_ELEMID_CFPARMS = 4,
- IEEE80211_ELEMID_TIM = 5,
- IEEE80211_ELEMID_IBSSPARMS = 6,
- IEEE80211_ELEMID_COUNTRY = 7,
- IEEE80211_ELEMID_CHALLENGE = 16,
- /* 17-31 reserved for challenge text extension */
- IEEE80211_ELEMID_PWRCNSTR = 32,
- IEEE80211_ELEMID_PWRCAP = 33,
- IEEE80211_ELEMID_TPCREQ = 34,
- IEEE80211_ELEMID_TPCREP = 35,
- IEEE80211_ELEMID_SUPPCHAN = 36,
- IEEE80211_ELEMID_CHANSWITCH = 37,
- IEEE80211_ELEMID_MEASREQ = 38,
- IEEE80211_ELEMID_MEASREP = 39,
- IEEE80211_ELEMID_QUIET = 40,
- IEEE80211_ELEMID_IBSSDFS = 41,
- IEEE80211_ELEMID_ERP = 42,
- IEEE80211_ELEMID_HTCAP_ANA = 45, /* Address ANA, and non-ANA story, for interop. CL#171733 */
- IEEE80211_ELEMID_RSN = 48,
- IEEE80211_ELEMID_XRATES = 50,
- IEEE80211_ELEMID_HTINFO_ANA = 61,
-#ifdef WAPI_ENABLE
- IEEE80211_ELEMID_WAPI = 68,
-#endif
- IEEE80211_ELEMID_TPC = 150,
- IEEE80211_ELEMID_CCKM = 156,
- IEEE80211_ELEMID_VENDOR = 221, /* vendor private */
-};
-
-#define ATH_OUI 0x7f0300 /* Atheros OUI */
-#define ATH_OUI_TYPE 0x01
-#define ATH_OUI_SUBTYPE 0x01
-#define ATH_OUI_VERSION 0x00
-
-#define WPA_OUI 0xf25000
-#define WPA_OUI_TYPE 0x01
-#define WPA_VERSION 1 /* current supported version */
-
-#define WPA_CSE_NULL 0x00
-#define WPA_CSE_WEP40 0x01
-#define WPA_CSE_TKIP 0x02
-#define WPA_CSE_CCMP 0x04
-#define WPA_CSE_WEP104 0x05
-
-#define WPA_ASE_NONE 0x00
-#define WPA_ASE_8021X_UNSPEC 0x01
-#define WPA_ASE_8021X_PSK 0x02
-
-#define RSN_OUI 0xac0f00
-#define RSN_VERSION 1 /* current supported version */
-
-#define RSN_CSE_NULL 0x00
-#define RSN_CSE_WEP40 0x01
-#define RSN_CSE_TKIP 0x02
-#define RSN_CSE_WRAP 0x03
-#define RSN_CSE_CCMP 0x04
-#define RSN_CSE_WEP104 0x05
-
-#define RSN_ASE_NONE 0x00
-#define RSN_ASE_8021X_UNSPEC 0x01
-#define RSN_ASE_8021X_PSK 0x02
-
-#define RSN_CAP_PREAUTH 0x01
-
-#define WMM_OUI 0xf25000
-#define WMM_OUI_TYPE 0x02
-#define WMM_INFO_OUI_SUBTYPE 0x00
-#define WMM_PARAM_OUI_SUBTYPE 0x01
-#define WMM_VERSION 1
-
-/* WMM stream classes */
-#define WMM_NUM_AC 4
-#define WMM_AC_BE 0 /* best effort */
-#define WMM_AC_BK 1 /* background */
-#define WMM_AC_VI 2 /* video */
-#define WMM_AC_VO 3 /* voice */
-
-/* TSPEC related */
-#define ACTION_CATEGORY_CODE_TSPEC 17
-#define ACTION_CODE_TSPEC_ADDTS 0
-#define ACTION_CODE_TSPEC_ADDTS_RESP 1
-#define ACTION_CODE_TSPEC_DELTS 2
-
-typedef enum {
- TSPEC_STATUS_CODE_ADMISSION_ACCEPTED = 0,
- TSPEC_STATUS_CODE_ADDTS_INVALID_PARAMS = 0x1,
- TSPEC_STATUS_CODE_ADDTS_REQUEST_REFUSED = 0x3,
- TSPEC_STATUS_CODE_UNSPECIFIED_QOS_RELATED_FAILURE = 0xC8,
- TSPEC_STATUS_CODE_REQUESTED_REFUSED_POLICY_CONFIGURATION = 0xC9,
- TSPEC_STATUS_CODE_INSUFFCIENT_BANDWIDTH = 0xCA,
- TSPEC_STATUS_CODE_INVALID_PARAMS = 0xCB,
- TSPEC_STATUS_CODE_DELTS_SENT = 0x30,
- TSPEC_STATUS_CODE_DELTS_RECV = 0x31,
-} TSPEC_STATUS_CODE;
-
-#define TSPEC_TSID_MASK 0xF
-#define TSPEC_TSID_S 1
-
-/*
- * WMM/802.11e Tspec Element
- */
-typedef PREPACK struct wmm_tspec_ie_t {
- u8 elementId;
- u8 len;
- u8 oui[3];
- u8 ouiType;
- u8 ouiSubType;
- u8 version;
- u16 tsInfo_info;
- u8 tsInfo_reserved;
- u16 nominalMSDU;
- u16 maxMSDU;
- u32 minServiceInt;
- u32 maxServiceInt;
- u32 inactivityInt;
- u32 suspensionInt;
- u32 serviceStartTime;
- u32 minDataRate;
- u32 meanDataRate;
- u32 peakDataRate;
- u32 maxBurstSize;
- u32 delayBound;
- u32 minPhyRate;
- u16 sba;
- u16 mediumTime;
-} POSTPACK WMM_TSPEC_IE;
-
-
-/*
- * BEACON management packets
- *
- * octet timestamp[8]
- * octet beacon interval[2]
- * octet capability information[2]
- * information element
- * octet elemid
- * octet length
- * octet information[length]
- */
-
-#define IEEE80211_BEACON_INTERVAL(beacon) \
- ((beacon)[8] | ((beacon)[9] << 8))
-#define IEEE80211_BEACON_CAPABILITY(beacon) \
- ((beacon)[10] | ((beacon)[11] << 8))
-
-#define IEEE80211_CAPINFO_ESS 0x0001
-#define IEEE80211_CAPINFO_IBSS 0x0002
-#define IEEE80211_CAPINFO_CF_POLLABLE 0x0004
-#define IEEE80211_CAPINFO_CF_POLLREQ 0x0008
-#define IEEE80211_CAPINFO_PRIVACY 0x0010
-#define IEEE80211_CAPINFO_SHORT_PREAMBLE 0x0020
-#define IEEE80211_CAPINFO_PBCC 0x0040
-#define IEEE80211_CAPINFO_CHNL_AGILITY 0x0080
-/* bits 8-9 are reserved */
-#define IEEE80211_CAPINFO_SHORT_SLOTTIME 0x0400
-#define IEEE80211_CAPINFO_APSD 0x0800
-/* bit 12 is reserved */
-#define IEEE80211_CAPINFO_DSSSOFDM 0x2000
-/* bits 14-15 are reserved */
-
-/*
- * Authentication Modes
- */
-
-enum ieee80211_authmode {
- IEEE80211_AUTH_NONE = 0,
- IEEE80211_AUTH_OPEN = 1,
- IEEE80211_AUTH_SHARED = 2,
- IEEE80211_AUTH_8021X = 3,
- IEEE80211_AUTH_AUTO = 4, /* auto-select/accept */
- /* NB: these are used only for ioctls */
- IEEE80211_AUTH_WPA = 5, /* WPA/RSN w/ 802.1x */
- IEEE80211_AUTH_WPA_PSK = 6, /* WPA/RSN w/ PSK */
- IEEE80211_AUTH_WPA_CCKM = 7, /* WPA/RSN IE w/ CCKM */
-};
-
-#define IEEE80211_PS_MAX_QUEUE 50 /*Maximum no of buffers that can be queues for PS*/
-
-#endif /* _NET80211_IEEE80211_H_ */
diff --git a/drivers/staging/ath6kl/wlan/include/ieee80211_node.h b/drivers/staging/ath6kl/wlan/include/ieee80211_node.h
deleted file mode 100644
index 1cb01671c0d..00000000000
--- a/drivers/staging/ath6kl/wlan/include/ieee80211_node.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="ieee80211_node.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _IEEE80211_NODE_H_
-#define _IEEE80211_NODE_H_
-
-/*
- * Node locking definitions.
- */
-#define IEEE80211_NODE_LOCK_INIT(_nt) A_MUTEX_INIT(&(_nt)->nt_nodelock)
-#define IEEE80211_NODE_LOCK_DESTROY(_nt) if (A_IS_MUTEX_VALID(&(_nt)->nt_nodelock)) { \
- A_MUTEX_DELETE(&(_nt)->nt_nodelock); }
-
-#define IEEE80211_NODE_LOCK(_nt) A_MUTEX_LOCK(&(_nt)->nt_nodelock)
-#define IEEE80211_NODE_UNLOCK(_nt) A_MUTEX_UNLOCK(&(_nt)->nt_nodelock)
-#define IEEE80211_NODE_LOCK_BH(_nt) A_MUTEX_LOCK(&(_nt)->nt_nodelock)
-#define IEEE80211_NODE_UNLOCK_BH(_nt) A_MUTEX_UNLOCK(&(_nt)->nt_nodelock)
-#define IEEE80211_NODE_LOCK_ASSERT(_nt)
-
-/*
- * Node reference counting definitions.
- *
- * ieee80211_node_initref initialize the reference count to 1
- * ieee80211_node_incref add a reference
- * ieee80211_node_decref remove a reference
- * ieee80211_node_dectestref remove a reference and return 1 if this
- * is the last reference, otherwise 0
- * ieee80211_node_refcnt reference count for printing (only)
- */
-#define ieee80211_node_initref(_ni) ((_ni)->ni_refcnt = 1)
-#define ieee80211_node_incref(_ni) ((_ni)->ni_refcnt++)
-#define ieee80211_node_decref(_ni) ((_ni)->ni_refcnt--)
-#define ieee80211_node_dectestref(_ni) (((_ni)->ni_refcnt--) == 1)
-#define ieee80211_node_refcnt(_ni) ((_ni)->ni_refcnt)
-
-#define IEEE80211_NODE_HASHSIZE 32
-/* simple hash is enough for variation of macaddr */
-#define IEEE80211_NODE_HASH(addr) \
- (((const u8 *)(addr))[IEEE80211_ADDR_LEN - 1] % \
- IEEE80211_NODE_HASHSIZE)
-
-/*
- * Table of ieee80211_node instances. Each ieee80211com
- * has at least one for holding the scan candidates.
- * When operating as an access point or in ibss mode there
- * is a second table for associated stations or neighbors.
- */
-struct ieee80211_node_table {
- void *nt_wmip; /* back reference */
- A_MUTEX_T nt_nodelock; /* on node table */
- struct bss *nt_node_first; /* information of all nodes */
- struct bss *nt_node_last; /* information of all nodes */
- struct bss *nt_hash[IEEE80211_NODE_HASHSIZE];
- const char *nt_name; /* for debugging */
- u32 nt_scangen; /* gen# for timeout scan */
-#ifdef THREAD_X
- A_TIMER nt_inact_timer;
- u8 isTimerArmed; /* is the node timer armed */
-#endif
- u32 nt_nodeAge; /* node aging time */
-#ifdef OS_ROAM_MANAGEMENT
- u32 nt_si_gen; /* gen# for scan indication*/
-#endif
-};
-
-#ifdef THREAD_X
-#define WLAN_NODE_INACT_TIMEOUT_MSEC 20000
-#else
-#define WLAN_NODE_INACT_TIMEOUT_MSEC 120000
-#endif
-
-#define WLAN_NODE_INACT_CNT 4
-
-#endif /* _IEEE80211_NODE_H_ */
diff --git a/drivers/staging/ath6kl/wlan/src/wlan_node.c b/drivers/staging/ath6kl/wlan/src/wlan_node.c
deleted file mode 100644
index 0fe5f4b1346..00000000000
--- a/drivers/staging/ath6kl/wlan/src/wlan_node.c
+++ /dev/null
@@ -1,636 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wlan_node.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// IEEE 802.11 node handling support.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#include <a_config.h>
-#include <athdefs.h>
-#include <a_osapi.h>
-#define ATH_MODULE_NAME wlan
-#include <a_debug.h>
-#include "htc.h"
-#include "htc_api.h"
-#include <wmi.h>
-#include <ieee80211.h>
-#include <wlan_api.h>
-#include <wmi_api.h>
-#include <ieee80211_node.h>
-
-#define ATH_DEBUG_WLAN ATH_DEBUG_MAKE_MODULE_MASK(0)
-
-#ifdef ATH_DEBUG_MODULE
-
-static struct ath_debug_mask_description wlan_debug_desc[] = {
- { ATH_DEBUG_WLAN , "General WLAN Node Tracing"},
-};
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(wlan,
- "wlan",
- "WLAN Node Management",
- ATH_DEBUG_MASK_DEFAULTS,
- ATH_DEBUG_DESCRIPTION_COUNT(wlan_debug_desc),
- wlan_debug_desc);
-
-#endif
-
-#ifdef THREAD_X
-static void wlan_node_timeout(unsigned long arg);
-#endif
-
-static bss_t * _ieee80211_find_node (struct ieee80211_node_table *nt,
- const u8 *macaddr);
-
-bss_t *
-wlan_node_alloc(struct ieee80211_node_table *nt, int wh_size)
-{
- bss_t *ni;
-
- ni = A_MALLOC_NOWAIT(sizeof(bss_t));
-
- if (ni != NULL) {
- if (wh_size)
- {
- ni->ni_buf = A_MALLOC_NOWAIT(wh_size);
- if (ni->ni_buf == NULL) {
- kfree(ni);
- ni = NULL;
- return ni;
- }
- }
- } else {
- return ni;
- }
-
- /* Make sure our lists are clean */
- ni->ni_list_next = NULL;
- ni->ni_list_prev = NULL;
- ni->ni_hash_next = NULL;
- ni->ni_hash_prev = NULL;
-
- //
- // ni_scangen never initialized before and during suspend/resume of winmobile,
- // that some junk has been stored in this, due to this scan list didn't properly updated
- //
- ni->ni_scangen = 0;
-
-#ifdef OS_ROAM_MANAGEMENT
- ni->ni_si_gen = 0;
-#endif
-
- return ni;
-}
-
-void
-wlan_node_free(bss_t *ni)
-{
- if (ni->ni_buf != NULL) {
- kfree(ni->ni_buf);
- }
- kfree(ni);
-}
-
-void
-wlan_setup_node(struct ieee80211_node_table *nt, bss_t *ni,
- const u8 *macaddr)
-{
- int hash;
- u32 timeoutValue = 0;
-
- memcpy(ni->ni_macaddr, macaddr, IEEE80211_ADDR_LEN);
- hash = IEEE80211_NODE_HASH (macaddr);
- ieee80211_node_initref (ni); /* mark referenced */
-
- timeoutValue = nt->nt_nodeAge;
-
- ni->ni_tstamp = A_GET_MS (0);
- ni->ni_actcnt = WLAN_NODE_INACT_CNT;
-
- IEEE80211_NODE_LOCK_BH(nt);
-
- /* Insert at the end of the node list */
- ni->ni_list_next = NULL;
- ni->ni_list_prev = nt->nt_node_last;
- if(nt->nt_node_last != NULL)
- {
- nt->nt_node_last->ni_list_next = ni;
- }
- nt->nt_node_last = ni;
- if(nt->nt_node_first == NULL)
- {
- nt->nt_node_first = ni;
- }
-
- /* Insert into the hash list i.e. the bucket */
- if((ni->ni_hash_next = nt->nt_hash[hash]) != NULL)
- {
- nt->nt_hash[hash]->ni_hash_prev = ni;
- }
- ni->ni_hash_prev = NULL;
- nt->nt_hash[hash] = ni;
-
-#ifdef THREAD_X
- if (!nt->isTimerArmed) {
- A_TIMEOUT_MS(&nt->nt_inact_timer, timeoutValue, 0);
- nt->isTimerArmed = true;
- }
-#endif
-
- IEEE80211_NODE_UNLOCK_BH(nt);
-}
-
-static bss_t *
-_ieee80211_find_node(struct ieee80211_node_table *nt,
- const u8 *macaddr)
-{
- bss_t *ni;
- int hash;
-
- IEEE80211_NODE_LOCK_ASSERT(nt);
-
- hash = IEEE80211_NODE_HASH(macaddr);
- for(ni = nt->nt_hash[hash]; ni; ni = ni->ni_hash_next) {
- if (IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) {
- ieee80211_node_incref(ni); /* mark referenced */
- return ni;
- }
- }
- return NULL;
-}
-
-bss_t *
-wlan_find_node(struct ieee80211_node_table *nt, const u8 *macaddr)
-{
- bss_t *ni;
-
- IEEE80211_NODE_LOCK(nt);
- ni = _ieee80211_find_node(nt, macaddr);
- IEEE80211_NODE_UNLOCK(nt);
- return ni;
-}
-
-/*
- * Reclaim a node. If this is the last reference count then
- * do the normal free work. Otherwise remove it from the node
- * table and mark it gone by clearing the back-reference.
- */
-void
-wlan_node_reclaim(struct ieee80211_node_table *nt, bss_t *ni)
-{
- IEEE80211_NODE_LOCK(nt);
-
- if(ni->ni_list_prev == NULL)
- {
- /* First in list so fix the list head */
- nt->nt_node_first = ni->ni_list_next;
- }
- else
- {
- ni->ni_list_prev->ni_list_next = ni->ni_list_next;
- }
-
- if(ni->ni_list_next == NULL)
- {
- /* Last in list so fix list tail */
- nt->nt_node_last = ni->ni_list_prev;
- }
- else
- {
- ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
- }
-
- if(ni->ni_hash_prev == NULL)
- {
- /* First in list so fix the list head */
- int hash;
- hash = IEEE80211_NODE_HASH(ni->ni_macaddr);
- nt->nt_hash[hash] = ni->ni_hash_next;
- }
- else
- {
- ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
- }
-
- if(ni->ni_hash_next != NULL)
- {
- ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
- }
- wlan_node_free(ni);
-
- IEEE80211_NODE_UNLOCK(nt);
-}
-
-static void
-wlan_node_dec_free(bss_t *ni)
-{
- if (ieee80211_node_dectestref(ni)) {
- wlan_node_free(ni);
- }
-}
-
-void
-wlan_free_allnodes(struct ieee80211_node_table *nt)
-{
- bss_t *ni;
-
- while ((ni = nt->nt_node_first) != NULL) {
- wlan_node_reclaim(nt, ni);
- }
-}
-
-void
-wlan_iterate_nodes(struct ieee80211_node_table *nt, wlan_node_iter_func *f,
- void *arg)
-{
- bss_t *ni;
- u32 gen;
-
- gen = ++nt->nt_scangen;
-
- IEEE80211_NODE_LOCK(nt);
- for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
- if (ni->ni_scangen != gen) {
- ni->ni_scangen = gen;
- (void) ieee80211_node_incref(ni);
- (*f)(arg, ni);
- wlan_node_dec_free(ni);
- }
- }
- IEEE80211_NODE_UNLOCK(nt);
-}
-
-/*
- * Node table support.
- */
-void
-wlan_node_table_init(void *wmip, struct ieee80211_node_table *nt)
-{
- int i;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WLAN, ("node table = 0x%lx\n", (unsigned long)nt));
- IEEE80211_NODE_LOCK_INIT(nt);
-
- A_REGISTER_MODULE_DEBUG_INFO(wlan);
-
- nt->nt_node_first = nt->nt_node_last = NULL;
- for(i = 0; i < IEEE80211_NODE_HASHSIZE; i++)
- {
- nt->nt_hash[i] = NULL;
- }
-
-#ifdef THREAD_X
- A_INIT_TIMER(&nt->nt_inact_timer, wlan_node_timeout, nt);
- nt->isTimerArmed = false;
-#endif
- nt->nt_wmip = wmip;
- nt->nt_nodeAge = WLAN_NODE_INACT_TIMEOUT_MSEC;
-
- //
- // nt_scangen never initialized before and during suspend/resume of winmobile,
- // that some junk has been stored in this, due to this scan list didn't properly updated
- //
- nt->nt_scangen = 0;
-
-#ifdef OS_ROAM_MANAGEMENT
- nt->nt_si_gen = 0;
-#endif
-}
-
-void
-wlan_set_nodeage(struct ieee80211_node_table *nt, u32 nodeAge)
-{
- nt->nt_nodeAge = nodeAge;
- return;
-}
-void
-wlan_refresh_inactive_nodes (struct ieee80211_node_table *nt)
-{
-#ifdef THREAD_X
- bss_t *bss, *nextBss;
- u8 myBssid[IEEE80211_ADDR_LEN], reArmTimer = false;
-
- wmi_get_current_bssid(nt->nt_wmip, myBssid);
-
- bss = nt->nt_node_first;
- while (bss != NULL)
- {
- nextBss = bss->ni_list_next;
- if (memcmp(myBssid, bss->ni_macaddr, sizeof(myBssid)) != 0)
- {
- /*
- * free up all but the current bss - if set
- */
- wlan_node_reclaim(nt, bss);
-
- }
- bss = nextBss;
- }
-#else
- bss_t *bss, *nextBss;
- u8 myBssid[IEEE80211_ADDR_LEN];
- u32 timeoutValue = 0;
- u32 now = A_GET_MS(0);
- timeoutValue = nt->nt_nodeAge;
-
- wmi_get_current_bssid(nt->nt_wmip, myBssid);
-
- bss = nt->nt_node_first;
- while (bss != NULL)
- {
- nextBss = bss->ni_list_next;
- if (memcmp(myBssid, bss->ni_macaddr, sizeof(myBssid)) != 0)
- {
-
- if (((now - bss->ni_tstamp) > timeoutValue) || --bss->ni_actcnt == 0)
- {
- /*
- * free up all but the current bss - if set
- */
- wlan_node_reclaim(nt, bss);
- }
- }
- bss = nextBss;
- }
-#endif
-}
-
-#ifdef THREAD_X
-static void
-wlan_node_timeout (unsigned long arg)
-{
- struct ieee80211_node_table *nt = (struct ieee80211_node_table *)arg;
- bss_t *bss, *nextBss;
- u8 myBssid[IEEE80211_ADDR_LEN], reArmTimer = false;
- u32 timeoutValue = 0;
- u32 now = A_GET_MS(0);
-
- timeoutValue = nt->nt_nodeAge;
-
- wmi_get_current_bssid(nt->nt_wmip, myBssid);
-
- bss = nt->nt_node_first;
- while (bss != NULL)
- {
- nextBss = bss->ni_list_next;
- if (memcmp(myBssid, bss->ni_macaddr, sizeof(myBssid)) != 0)
- {
-
- if ((now - bss->ni_tstamp) > timeoutValue)
- {
- /*
- * free up all but the current bss - if set
- */
- wlan_node_reclaim(nt, bss);
- }
- else
- {
- /*
- * Re-arm timer, only when we have a bss other than
- * current bss AND it is not aged-out.
- */
- reArmTimer = true;
- }
- }
- bss = nextBss;
- }
-
- if (reArmTimer)
- A_TIMEOUT_MS (&nt->nt_inact_timer, timeoutValue, 0);
-
- nt->isTimerArmed = reArmTimer;
-}
-#endif
-
-void
-wlan_node_table_cleanup(struct ieee80211_node_table *nt)
-{
-#ifdef THREAD_X
- A_UNTIMEOUT(&nt->nt_inact_timer);
- A_DELETE_TIMER(&nt->nt_inact_timer);
-#endif
- wlan_free_allnodes(nt);
- IEEE80211_NODE_LOCK_DESTROY(nt);
-}
-
-bss_t *
-wlan_find_Ssidnode (struct ieee80211_node_table *nt, u8 *pSsid,
- u32 ssidLength, bool bIsWPA2, bool bMatchSSID)
-{
- bss_t *ni = NULL;
- u8 *pIESsid = NULL;
-
- IEEE80211_NODE_LOCK (nt);
-
- for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
- pIESsid = ni->ni_cie.ie_ssid;
- if (pIESsid[1] <= 32) {
-
- // Step 1 : Check SSID
- if (0x00 == memcmp (pSsid, &pIESsid[2], ssidLength)) {
-
- //
- // Step 2.1 : Check MatchSSID is true, if so, return Matched SSID
- // Profile, otherwise check whether WPA2 or WPA
- //
- if (true == bMatchSSID) {
- ieee80211_node_incref (ni); /* mark referenced */
- IEEE80211_NODE_UNLOCK (nt);
- return ni;
- }
-
- // Step 2 : if SSID matches, check WPA or WPA2
- if (true == bIsWPA2 && NULL != ni->ni_cie.ie_rsn) {
- ieee80211_node_incref (ni); /* mark referenced */
- IEEE80211_NODE_UNLOCK (nt);
- return ni;
- }
- if (false == bIsWPA2 && NULL != ni->ni_cie.ie_wpa) {
- ieee80211_node_incref(ni); /* mark referenced */
- IEEE80211_NODE_UNLOCK (nt);
- return ni;
- }
- }
- }
- }
-
- IEEE80211_NODE_UNLOCK (nt);
-
- return NULL;
-}
-
-void
-wlan_node_return (struct ieee80211_node_table *nt, bss_t *ni)
-{
- IEEE80211_NODE_LOCK (nt);
- wlan_node_dec_free (ni);
- IEEE80211_NODE_UNLOCK (nt);
-}
-
-void
-wlan_node_remove_core (struct ieee80211_node_table *nt, bss_t *ni)
-{
- if(ni->ni_list_prev == NULL)
- {
- /* First in list so fix the list head */
- nt->nt_node_first = ni->ni_list_next;
- }
- else
- {
- ni->ni_list_prev->ni_list_next = ni->ni_list_next;
- }
-
- if(ni->ni_list_next == NULL)
- {
- /* Last in list so fix list tail */
- nt->nt_node_last = ni->ni_list_prev;
- }
- else
- {
- ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
- }
-
- if(ni->ni_hash_prev == NULL)
- {
- /* First in list so fix the list head */
- int hash;
- hash = IEEE80211_NODE_HASH(ni->ni_macaddr);
- nt->nt_hash[hash] = ni->ni_hash_next;
- }
- else
- {
- ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
- }
-
- if(ni->ni_hash_next != NULL)
- {
- ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
- }
-}
-
-bss_t *
-wlan_node_remove(struct ieee80211_node_table *nt, u8 *bssid)
-{
- bss_t *bss, *nextBss;
-
- IEEE80211_NODE_LOCK(nt);
-
- bss = nt->nt_node_first;
-
- while (bss != NULL)
- {
- nextBss = bss->ni_list_next;
-
- if (memcmp(bssid, bss->ni_macaddr, 6) == 0)
- {
- wlan_node_remove_core (nt, bss);
- IEEE80211_NODE_UNLOCK(nt);
- return bss;
- }
-
- bss = nextBss;
- }
-
- IEEE80211_NODE_UNLOCK(nt);
- return NULL;
-}
-
-bss_t *
-wlan_find_matching_Ssidnode (struct ieee80211_node_table *nt, u8 *pSsid,
- u32 ssidLength, u32 dot11AuthMode, u32 authMode,
- u32 pairwiseCryptoType, u32 grpwiseCryptoTyp)
-{
- bss_t *ni = NULL;
- bss_t *best_ni = NULL;
- u8 *pIESsid = NULL;
-
- IEEE80211_NODE_LOCK (nt);
-
- for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
- pIESsid = ni->ni_cie.ie_ssid;
- if (pIESsid[1] <= 32) {
-
- // Step 1 : Check SSID
- if (0x00 == memcmp (pSsid, &pIESsid[2], ssidLength)) {
-
- if (ni->ni_cie.ie_capInfo & 0x10)
- {
-
- if ((NULL != ni->ni_cie.ie_rsn) && (WPA2_PSK_AUTH == authMode))
- {
- /* WPA2 */
- if (NULL == best_ni)
- {
- best_ni = ni;
- }
- else if (ni->ni_rssi > best_ni->ni_rssi)
- {
- best_ni = ni;
- }
- }
- else if ((NULL != ni->ni_cie.ie_wpa) && (WPA_PSK_AUTH == authMode))
- {
- /* WPA */
- if (NULL == best_ni)
- {
- best_ni = ni;
- }
- else if (ni->ni_rssi > best_ni->ni_rssi)
- {
- best_ni = ni;
- }
- }
- else if (WEP_CRYPT == pairwiseCryptoType)
- {
- /* WEP */
- if (NULL == best_ni)
- {
- best_ni = ni;
- }
- else if (ni->ni_rssi > best_ni->ni_rssi)
- {
- best_ni = ni;
- }
- }
- }
- else
- {
- /* open AP */
- if ((OPEN_AUTH == authMode) && (NONE_CRYPT == pairwiseCryptoType))
- {
- if (NULL == best_ni)
- {
- best_ni = ni;
- }
- else if (ni->ni_rssi > best_ni->ni_rssi)
- {
- best_ni = ni;
- }
- }
- }
- }
- }
- }
-
- IEEE80211_NODE_UNLOCK (nt);
-
- return best_ni;
-}
-
diff --git a/drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c b/drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c
deleted file mode 100644
index 07b8313b16e..00000000000
--- a/drivers/staging/ath6kl/wlan/src/wlan_recv_beacon.c
+++ /dev/null
@@ -1,199 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wlan_recv_beacon.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// IEEE 802.11 input handling.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include "a_config.h"
-#include "athdefs.h"
-#include "a_osapi.h"
-#include <wmi.h>
-#include <ieee80211.h>
-#include <wlan_api.h>
-
-#define IEEE80211_VERIFY_LENGTH(_len, _minlen) do { \
- if ((_len) < (_minlen)) { \
- return A_EINVAL; \
- } \
-} while (0)
-
-#define IEEE80211_VERIFY_ELEMENT(__elem, __maxlen) do { \
- if ((__elem) == NULL) { \
- return A_EINVAL; \
- } \
- if ((__elem)[1] > (__maxlen)) { \
- return A_EINVAL; \
- } \
-} while (0)
-
-
-/* unaligned little endian access */
-#define LE_READ_2(p) \
- ((u16) \
- ((((u8 *)(p))[0] ) | (((u8 *)(p))[1] << 8)))
-
-#define LE_READ_4(p) \
- ((u32) \
- ((((u8 *)(p))[0] ) | (((u8 *)(p))[1] << 8) | \
- (((u8 *)(p))[2] << 16) | (((u8 *)(p))[3] << 24)))
-
-
-static int __inline
-iswpaoui(const u8 *frm)
-{
- return frm[1] > 3 && LE_READ_4(frm+2) == ((WPA_OUI_TYPE<<24)|WPA_OUI);
-}
-
-static int __inline
-iswmmoui(const u8 *frm)
-{
- return frm[1] > 3 && LE_READ_4(frm+2) == ((WMM_OUI_TYPE<<24)|WMM_OUI);
-}
-
-/* unused functions for now */
-#if 0
-static int __inline
-iswmmparam(const u8 *frm)
-{
- return frm[1] > 5 && frm[6] == WMM_PARAM_OUI_SUBTYPE;
-}
-
-static int __inline
-iswmminfo(const u8 *frm)
-{
- return frm[1] > 5 && frm[6] == WMM_INFO_OUI_SUBTYPE;
-}
-#endif
-
-static int __inline
-isatherosoui(const u8 *frm)
-{
- return frm[1] > 3 && LE_READ_4(frm+2) == ((ATH_OUI_TYPE<<24)|ATH_OUI);
-}
-
-static int __inline
-iswscoui(const u8 *frm)
-{
- return frm[1] > 3 && LE_READ_4(frm+2) == ((0x04<<24)|WPA_OUI);
-}
-
-int
-wlan_parse_beacon(u8 *buf, int framelen, struct ieee80211_common_ie *cie)
-{
- u8 *frm, *efrm;
- u8 elemid_ssid = false;
-
- frm = buf;
- efrm = (u8 *) (frm + framelen);
-
- /*
- * beacon/probe response frame format
- * [8] time stamp
- * [2] beacon interval
- * [2] capability information
- * [tlv] ssid
- * [tlv] supported rates
- * [tlv] country information
- * [tlv] parameter set (FH/DS)
- * [tlv] erp information
- * [tlv] extended supported rates
- * [tlv] WMM
- * [tlv] WPA or RSN
- * [tlv] Atheros Advanced Capabilities
- */
- IEEE80211_VERIFY_LENGTH(efrm - frm, 12);
- A_MEMZERO(cie, sizeof(*cie));
-
- cie->ie_tstamp = frm; frm += 8;
- cie->ie_beaconInt = A_LE2CPU16(*(u16 *)frm); frm += 2;
- cie->ie_capInfo = A_LE2CPU16(*(u16 *)frm); frm += 2;
- cie->ie_chan = 0;
-
- while (frm < efrm) {
- switch (*frm) {
- case IEEE80211_ELEMID_SSID:
- if (!elemid_ssid) {
- cie->ie_ssid = frm;
- elemid_ssid = true;
- }
- break;
- case IEEE80211_ELEMID_RATES:
- cie->ie_rates = frm;
- break;
- case IEEE80211_ELEMID_COUNTRY:
- cie->ie_country = frm;
- break;
- case IEEE80211_ELEMID_FHPARMS:
- break;
- case IEEE80211_ELEMID_DSPARMS:
- cie->ie_chan = frm[2];
- break;
- case IEEE80211_ELEMID_TIM:
- cie->ie_tim = frm;
- break;
- case IEEE80211_ELEMID_IBSSPARMS:
- break;
- case IEEE80211_ELEMID_XRATES:
- cie->ie_xrates = frm;
- break;
- case IEEE80211_ELEMID_ERP:
- if (frm[1] != 1) {
- //A_PRINTF("Discarding ERP Element - Bad Len\n");
- return A_EINVAL;
- }
- cie->ie_erp = frm[2];
- break;
- case IEEE80211_ELEMID_RSN:
- cie->ie_rsn = frm;
- break;
- case IEEE80211_ELEMID_HTCAP_ANA:
- cie->ie_htcap = frm;
- break;
- case IEEE80211_ELEMID_HTINFO_ANA:
- cie->ie_htop = frm;
- break;
-#ifdef WAPI_ENABLE
- case IEEE80211_ELEMID_WAPI:
- cie->ie_wapi = frm;
- break;
-#endif
- case IEEE80211_ELEMID_VENDOR:
- if (iswpaoui(frm)) {
- cie->ie_wpa = frm;
- } else if (iswmmoui(frm)) {
- cie->ie_wmm = frm;
- } else if (isatherosoui(frm)) {
- cie->ie_ath = frm;
- } else if(iswscoui(frm)) {
- cie->ie_wsc = frm;
- }
- break;
- default:
- break;
- }
- frm += frm[1] + 2;
- }
- IEEE80211_VERIFY_ELEMENT(cie->ie_rates, IEEE80211_RATE_MAXSIZE);
- IEEE80211_VERIFY_ELEMENT(cie->ie_ssid, IEEE80211_NWID_LEN);
-
- return 0;
-}
diff --git a/drivers/staging/ath6kl/wlan/src/wlan_utils.c b/drivers/staging/ath6kl/wlan/src/wlan_utils.c
deleted file mode 100644
index bc91599d9bf..00000000000
--- a/drivers/staging/ath6kl/wlan/src/wlan_utils.c
+++ /dev/null
@@ -1,58 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wlan_utils.c" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This module implements frequently used wlan utilies
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#include <a_config.h>
-#include <athdefs.h>
-#include <a_osapi.h>
-
-/*
- * converts ieee channel number to frequency
- */
-u16 wlan_ieee2freq(int chan)
-{
- if (chan == 14) {
- return 2484;
- }
- if (chan < 14) { /* 0-13 */
- return (2407 + (chan*5));
- }
- if (chan < 27) { /* 15-26 */
- return (2512 + ((chan-15)*20));
- }
- return (5000 + (chan*5));
-}
-
-/*
- * Converts MHz frequency to IEEE channel number.
- */
-u32 wlan_freq2ieee(u16 freq)
-{
- if (freq == 2484)
- return 14;
- if (freq < 2484)
- return (freq - 2407) / 5;
- if (freq < 5000)
- return 15 + ((freq - 2512) / 20);
- return (freq - 5000) / 5;
-}
diff --git a/drivers/staging/ath6kl/wmi/wmi.c b/drivers/staging/ath6kl/wmi/wmi.c
deleted file mode 100644
index c7b5e5cf9df..00000000000
--- a/drivers/staging/ath6kl/wmi/wmi.c
+++ /dev/null
@@ -1,6444 +0,0 @@
-//------------------------------------------------------------------------------
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This module implements the hardware independent layer of the
-// Wireless Module Interface (WMI) protocol.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-
-#include <a_config.h>
-#include <athdefs.h>
-#include <a_osapi.h>
-#include "htc.h"
-#include "htc_api.h"
-#include "wmi.h"
-#include <wlan_api.h>
-#include <wmi_api.h>
-#include <ieee80211.h>
-#include <ieee80211_node.h>
-#include "dset_api.h"
-#include "wmi_host.h"
-#include "a_drv.h"
-#include "a_drv_api.h"
-#define ATH_MODULE_NAME wmi
-#include "a_debug.h"
-#include "dbglog_api.h"
-#include "roaming.h"
-#include "cfg80211.h"
-
-#define ATH_DEBUG_WMI ATH_DEBUG_MAKE_MODULE_MASK(0)
-
-#ifdef ATH_DEBUG_MODULE
-
-static struct ath_debug_mask_description wmi_debug_desc[] = {
- { ATH_DEBUG_WMI , "General WMI Tracing"},
-};
-
-ATH_DEBUG_INSTANTIATE_MODULE_VAR(wmi,
- "wmi",
- "Wireless Module Interface",
- ATH_DEBUG_MASK_DEFAULTS,
- ATH_DEBUG_DESCRIPTION_COUNT(wmi_debug_desc),
- wmi_debug_desc);
-
-#endif
-
-#ifndef REXOS
-#define DBGARG _A_FUNCNAME_
-#define DBGFMT "%s() : "
-#define DBG_WMI ATH_DEBUG_WMI
-#define DBG_ERROR ATH_DEBUG_ERR
-#define DBG_WMI2 ATH_DEBUG_WMI
-#define A_DPRINTF AR_DEBUG_PRINTF
-#endif
-
-static int wmi_ready_event_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-static int wmi_connect_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_disconnect_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-
-static int wmi_tkip_micerr_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_bssInfo_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_opt_frame_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_pstream_timeout_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_sync_point(struct wmi_t *wmip);
-
-static int wmi_bitrate_reply_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_ratemask_reply_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_channelList_reply_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_regDomain_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_txPwr_reply_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_neighborReport_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-
-static int wmi_dset_open_req_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-#ifdef CONFIG_HOST_DSET_SUPPORT
-static int wmi_dset_close_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_dset_data_req_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-#endif /* CONFIG_HOST_DSET_SUPPORT */
-
-
-static int wmi_scanComplete_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_errorEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_statsEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_rssiThresholdEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_hbChallengeResp_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_reportErrorEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_cac_event_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_channel_change_event_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_roam_tbl_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_roam_data_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_get_wow_list_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int
-wmi_get_pmkid_list_event_rx(struct wmi_t *wmip, u8 *datap, u32 len);
-
-static int
-wmi_set_params_event_rx(struct wmi_t *wmip, u8 *datap, u32 len);
-
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-static int
-wmi_tcmd_test_report_rx(struct wmi_t *wmip, u8 *datap, int len);
-#endif
-
-static int
-wmi_txRetryErrEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-static int
-wmi_snrThresholdEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-static int
-wmi_lqThresholdEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-static bool
-wmi_is_bitrate_index_valid(struct wmi_t *wmip, s32 rateIndex);
-
-static int
-wmi_aplistEvent_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-static int
-wmi_dbglog_event_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-static int wmi_keepalive_reply_rx(struct wmi_t *wmip, u8 *datap, int len);
-
-int wmi_cmd_send_xtnd(struct wmi_t *wmip, void *osbuf, WMIX_COMMAND_ID cmdId,
- WMI_SYNC_FLAG syncflag);
-
-u8 ar6000_get_upper_threshold(s16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, u32 size);
-u8 ar6000_get_lower_threshold(s16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, u32 size);
-
-void wmi_cache_configure_rssithreshold(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd);
-void wmi_cache_configure_snrthreshold(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd);
-static int wmi_send_rssi_threshold_params(struct wmi_t *wmip,
- WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd);
-static int wmi_send_snr_threshold_params(struct wmi_t *wmip,
- WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd);
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
-static int
-wmi_prof_count_rx(struct wmi_t *wmip, u8 *datap, int len);
-#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
-
-static int wmi_pspoll_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_dtimexpiry_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-
-static int wmi_peer_node_event_rx (struct wmi_t *wmip, u8 *datap,
- int len);
-static int wmi_addba_req_event_rx(struct wmi_t *, u8 *, int);
-static int wmi_addba_resp_event_rx(struct wmi_t *, u8 *, int);
-static int wmi_delba_req_event_rx(struct wmi_t *, u8 *, int);
-static int wmi_btcoex_config_event_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_btcoex_stats_event_rx(struct wmi_t *wmip, u8 *datap, int len);
-static int wmi_hci_event_rx(struct wmi_t *, u8 *, int);
-
-#ifdef WAPI_ENABLE
-static int wmi_wapi_rekey_event_rx(struct wmi_t *wmip, u8 *datap,
- int len);
-#endif
-
-#if defined(UNDER_CE)
-#if defined(NDIS51_MINIPORT)
-unsigned int processDot11Hdr = 0;
-#else
-unsigned int processDot11Hdr = 1;
-#endif
-#else
-extern unsigned int processDot11Hdr;
-#endif
-
-int wps_enable;
-static const s32 wmi_rateTable[][2] = {
- //{W/O SGI, with SGI}
- {1000, 1000},
- {2000, 2000},
- {5500, 5500},
- {11000, 11000},
- {6000, 6000},
- {9000, 9000},
- {12000, 12000},
- {18000, 18000},
- {24000, 24000},
- {36000, 36000},
- {48000, 48000},
- {54000, 54000},
- {6500, 7200},
- {13000, 14400},
- {19500, 21700},
- {26000, 28900},
- {39000, 43300},
- {52000, 57800},
- {58500, 65000},
- {65000, 72200},
- {13500, 15000},
- {27000, 30000},
- {40500, 45000},
- {54000, 60000},
- {81000, 90000},
- {108000, 120000},
- {121500, 135000},
- {135000, 150000},
- {0, 0}};
-
-#define MODE_A_SUPPORT_RATE_START ((s32) 4)
-#define MODE_A_SUPPORT_RATE_STOP ((s32) 11)
-
-#define MODE_GONLY_SUPPORT_RATE_START MODE_A_SUPPORT_RATE_START
-#define MODE_GONLY_SUPPORT_RATE_STOP MODE_A_SUPPORT_RATE_STOP
-
-#define MODE_B_SUPPORT_RATE_START ((s32) 0)
-#define MODE_B_SUPPORT_RATE_STOP ((s32) 3)
-
-#define MODE_G_SUPPORT_RATE_START ((s32) 0)
-#define MODE_G_SUPPORT_RATE_STOP ((s32) 11)
-
-#define MODE_GHT20_SUPPORT_RATE_START ((s32) 0)
-#define MODE_GHT20_SUPPORT_RATE_STOP ((s32) 19)
-
-#define MAX_NUMBER_OF_SUPPORT_RATES (MODE_GHT20_SUPPORT_RATE_STOP + 1)
-
-/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
-const u8 up_to_ac[]= {
- WMM_AC_BE,
- WMM_AC_BK,
- WMM_AC_BK,
- WMM_AC_BE,
- WMM_AC_VI,
- WMM_AC_VI,
- WMM_AC_VO,
- WMM_AC_VO,
- };
-
-/* This stuff is used when we want a simple layer-3 visibility */
-typedef PREPACK struct _iphdr {
- u8 ip_ver_hdrlen; /* version and hdr length */
- u8 ip_tos; /* type of service */
- u16 ip_len; /* total length */
- u16 ip_id; /* identification */
- s16 ip_off; /* fragment offset field */
-#define IP_DF 0x4000 /* dont fragment flag */
-#define IP_MF 0x2000 /* more fragments flag */
-#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
- u8 ip_ttl; /* time to live */
- u8 ip_p; /* protocol */
- u16 ip_sum; /* checksum */
- u8 ip_src[4]; /* source and dest address */
- u8 ip_dst[4];
-} POSTPACK iphdr;
-
-static s16 rssi_event_value = 0;
-static s16 snr_event_value = 0;
-
-bool is_probe_ssid = false;
-
-void *
-wmi_init(void *devt)
-{
- struct wmi_t *wmip;
-
- A_REGISTER_MODULE_DEBUG_INFO(wmi);
-
- wmip = A_MALLOC (sizeof(struct wmi_t));
- if (wmip == NULL) {
- return (NULL);
- }
- A_MEMZERO(wmip, sizeof(struct wmi_t ));
-#ifdef THREAD_X
- INIT_WMI_LOCK(wmip);
-#else
- A_MUTEX_INIT(&wmip->wmi_lock);
-#endif
- wmip->wmi_devt = devt;
- wlan_node_table_init(wmip, &wmip->wmi_scan_table);
- wmi_qos_state_init(wmip);
-
- wmip->wmi_powerMode = REC_POWER;
- wmip->wmi_phyMode = WMI_11G_MODE;
-
- wmip->wmi_pair_crypto_type = NONE_CRYPT;
- wmip->wmi_grp_crypto_type = NONE_CRYPT;
-
- wmip->wmi_ht_allowed[A_BAND_24GHZ] = 1;
- wmip->wmi_ht_allowed[A_BAND_5GHZ] = 1;
-
- return (wmip);
-}
-
-void
-wmi_qos_state_init(struct wmi_t *wmip)
-{
- u8 i;
-
- if (wmip == NULL) {
- return;
- }
- LOCK_WMI(wmip);
-
- /* Initialize QoS States */
- wmip->wmi_numQoSStream = 0;
-
- wmip->wmi_fatPipeExists = 0;
-
- for (i=0; i < WMM_NUM_AC; i++) {
- wmip->wmi_streamExistsForAC[i]=0;
- }
-
- UNLOCK_WMI(wmip);
-
- A_WMI_SET_NUMDATAENDPTS(wmip->wmi_devt, 1);
-}
-
-void
-wmi_set_control_ep(struct wmi_t * wmip, HTC_ENDPOINT_ID eid)
-{
- A_ASSERT( eid != ENDPOINT_UNUSED);
- wmip->wmi_endpoint_id = eid;
-}
-
-HTC_ENDPOINT_ID
-wmi_get_control_ep(struct wmi_t * wmip)
-{
- return(wmip->wmi_endpoint_id);
-}
-
-void
-wmi_shutdown(struct wmi_t *wmip)
-{
- if (wmip != NULL) {
- wlan_node_table_cleanup(&wmip->wmi_scan_table);
- if (A_IS_MUTEX_VALID(&wmip->wmi_lock)) {
-#ifdef THREAD_X
- DELETE_WMI_LOCK(&wmip);
-#else
- A_MUTEX_DELETE(&wmip->wmi_lock);
-#endif
- }
- kfree(wmip);
- }
-}
-
-/*
- * performs DIX to 802.3 encapsulation for transmit packets.
- * uses passed in buffer. Returns buffer or NULL if failed.
- * Assumes the entire DIX header is contigous and that there is
- * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
- */
-int
-wmi_dix_2_dot3(struct wmi_t *wmip, void *osbuf)
-{
- u8 *datap;
- u16 typeorlen;
- ATH_MAC_HDR macHdr;
- ATH_LLC_SNAP_HDR *llcHdr;
-
- A_ASSERT(osbuf != NULL);
-
- if (A_NETBUF_HEADROOM(osbuf) <
- (sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR)))
- {
- return A_NO_MEMORY;
- }
-
- datap = A_NETBUF_DATA(osbuf);
-
- typeorlen = *(u16 *)(datap + ATH_MAC_LEN + ATH_MAC_LEN);
-
- if (!IS_ETHERTYPE(A_BE2CPU16(typeorlen))) {
- /*
- * packet is already in 802.3 format - return success
- */
- A_DPRINTF(DBG_WMI, (DBGFMT "packet already 802.3\n", DBGARG));
- return (0);
- }
-
- /*
- * Save mac fields and length to be inserted later
- */
- memcpy(macHdr.dstMac, datap, ATH_MAC_LEN);
- memcpy(macHdr.srcMac, datap + ATH_MAC_LEN, ATH_MAC_LEN);
- macHdr.typeOrLen = A_CPU2BE16(A_NETBUF_LEN(osbuf) - sizeof(ATH_MAC_HDR) +
- sizeof(ATH_LLC_SNAP_HDR));
-
- /*
- * Make room for LLC+SNAP headers
- */
- if (A_NETBUF_PUSH(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != 0) {
- return A_NO_MEMORY;
- }
- datap = A_NETBUF_DATA(osbuf);
-
- memcpy(datap, &macHdr, sizeof (ATH_MAC_HDR));
-
- llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(ATH_MAC_HDR));
- llcHdr->dsap = 0xAA;
- llcHdr->ssap = 0xAA;
- llcHdr->cntl = 0x03;
- llcHdr->orgCode[0] = 0x0;
- llcHdr->orgCode[1] = 0x0;
- llcHdr->orgCode[2] = 0x0;
- llcHdr->etherType = typeorlen;
-
- return (0);
-}
-
-int wmi_meta_add(struct wmi_t *wmip, void *osbuf, u8 *pVersion,void *pTxMetaS)
-{
- switch(*pVersion){
- case 0:
- return (0);
- case WMI_META_VERSION_1:
- {
- WMI_TX_META_V1 *pV1= NULL;
- A_ASSERT(osbuf != NULL);
- if (A_NETBUF_PUSH(osbuf, WMI_MAX_TX_META_SZ) != 0) {
- return A_NO_MEMORY;
- }
-
- pV1 = (WMI_TX_META_V1 *)A_NETBUF_DATA(osbuf);
- /* the pktID is used in conjunction with txComplete messages
- * allowing the target to notify which tx requests have been
- * completed and how. */
- pV1->pktID = 0;
- /* the ratePolicyID allows the host to specify which rate policy
- * to use for transmitting this packet. 0 means use default behavior. */
- pV1->ratePolicyID = 0;
- A_ASSERT(pVersion != NULL);
- /* the version must be used to populate the meta field of the WMI_DATA_HDR */
- *pVersion = WMI_META_VERSION_1;
- return (0);
- }
- case WMI_META_VERSION_2:
- {
- WMI_TX_META_V2 *pV2 ;
- A_ASSERT(osbuf != NULL);
- if (A_NETBUF_PUSH(osbuf, WMI_MAX_TX_META_SZ) != 0) {
- return A_NO_MEMORY;
- }
- pV2 = (WMI_TX_META_V2 *)A_NETBUF_DATA(osbuf);
- memcpy(pV2,(WMI_TX_META_V2 *)pTxMetaS,sizeof(WMI_TX_META_V2));
- return (0);
- }
- default:
- return (0);
- }
-}
-
-/* Adds a WMI data header */
-int
-wmi_data_hdr_add(struct wmi_t *wmip, void *osbuf, u8 msgType, bool bMoreData,
- WMI_DATA_HDR_DATA_TYPE data_type,u8 metaVersion, void *pTxMetaS)
-{
- WMI_DATA_HDR *dtHdr;
-// u8 metaVersion = 0;
- int status;
-
- A_ASSERT(osbuf != NULL);
-
- /* adds the meta data field after the wmi data hdr. If metaVersion
- * is returns 0 then no meta field was added. */
- if ((status = wmi_meta_add(wmip, osbuf, &metaVersion,pTxMetaS)) != 0) {
- return status;
- }
-
- if (A_NETBUF_PUSH(osbuf, sizeof(WMI_DATA_HDR)) != 0) {
- return A_NO_MEMORY;
- }
-
- dtHdr = (WMI_DATA_HDR *)A_NETBUF_DATA(osbuf);
- A_MEMZERO(dtHdr, sizeof(WMI_DATA_HDR));
-
- WMI_DATA_HDR_SET_MSG_TYPE(dtHdr, msgType);
- WMI_DATA_HDR_SET_DATA_TYPE(dtHdr, data_type);
-
- if (bMoreData) {
- WMI_DATA_HDR_SET_MORE_BIT(dtHdr);
- }
-
- WMI_DATA_HDR_SET_META(dtHdr, metaVersion);
-
- dtHdr->info3 = 0;
-
- return (0);
-}
-
-
-u8 wmi_implicit_create_pstream(struct wmi_t *wmip, void *osbuf, u32 layer2Priority, bool wmmEnabled)
-{
- u8 *datap;
- u8 trafficClass = WMM_AC_BE;
- u16 ipType = IP_ETHERTYPE;
- WMI_DATA_HDR *dtHdr;
- u8 streamExists = 0;
- u8 userPriority;
- u32 hdrsize, metasize;
- ATH_LLC_SNAP_HDR *llcHdr;
-
- WMI_CREATE_PSTREAM_CMD cmd;
-
- A_ASSERT(osbuf != NULL);
-
- //
- // Initialize header size
- //
- hdrsize = 0;
-
- datap = A_NETBUF_DATA(osbuf);
- dtHdr = (WMI_DATA_HDR *)datap;
- metasize = (WMI_DATA_HDR_GET_META(dtHdr))? WMI_MAX_TX_META_SZ : 0;
-
- if (!wmmEnabled)
- {
- /* If WMM is disabled all traffic goes as BE traffic */
- userPriority = 0;
- }
- else
- {
- if (processDot11Hdr)
- {
- hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(u32));
- llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(WMI_DATA_HDR) + metasize +
- hdrsize);
-
-
- }
- else
- {
- llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(WMI_DATA_HDR) + metasize +
- sizeof(ATH_MAC_HDR));
- }
-
- if (llcHdr->etherType == A_CPU2BE16(ipType))
- {
- /* Extract the endpoint info from the TOS field in the IP header */
-
- userPriority = wmi_determine_userPriority (((u8 *)llcHdr) + sizeof(ATH_LLC_SNAP_HDR),layer2Priority);
- }
- else
- {
- userPriority = layer2Priority & 0x7;
- }
- }
-
-
- /* workaround for WMM S5 */
- if ((WMM_AC_VI == wmip->wmi_traffic_class) && ((5 == userPriority) || (4 == userPriority)))
- {
- userPriority = 1;
- }
-
- trafficClass = convert_userPriority_to_trafficClass(userPriority);
-
- WMI_DATA_HDR_SET_UP(dtHdr, userPriority);
- /* lower 3-bits are 802.1d priority */
- //dtHdr->info |= (userPriority & WMI_DATA_HDR_UP_MASK) << WMI_DATA_HDR_UP_SHIFT;
-
- LOCK_WMI(wmip);
- streamExists = wmip->wmi_fatPipeExists;
- UNLOCK_WMI(wmip);
-
- if (!(streamExists & (1 << trafficClass)))
- {
-
- A_MEMZERO(&cmd, sizeof(cmd));
- cmd.trafficClass = trafficClass;
- cmd.userPriority = userPriority;
- cmd.inactivityInt = WMI_IMPLICIT_PSTREAM_INACTIVITY_INT;
- /* Implicit streams are created with TSID 0xFF */
-
- cmd.tsid = WMI_IMPLICIT_PSTREAM;
- wmi_create_pstream_cmd(wmip, &cmd);
- }
-
- return trafficClass;
-}
-
-int
-wmi_dot11_hdr_add (struct wmi_t *wmip, void *osbuf, NETWORK_TYPE mode)
-{
- u8 *datap;
- u16 typeorlen;
- ATH_MAC_HDR macHdr;
- ATH_LLC_SNAP_HDR *llcHdr;
- struct ieee80211_frame *wh;
- u32 hdrsize;
-
- A_ASSERT(osbuf != NULL);
-
- if (A_NETBUF_HEADROOM(osbuf) <
- (sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR)))
- {
- return A_NO_MEMORY;
- }
-
- datap = A_NETBUF_DATA(osbuf);
-
- typeorlen = *(u16 *)(datap + ATH_MAC_LEN + ATH_MAC_LEN);
-
- if (!IS_ETHERTYPE(A_BE2CPU16(typeorlen))) {
-/*
- * packet is already in 802.3 format - return success
- */
- A_DPRINTF(DBG_WMI, (DBGFMT "packet already 802.3\n", DBGARG));
- goto AddDot11Hdr;
- }
-
- /*
- * Save mac fields and length to be inserted later
- */
- memcpy(macHdr.dstMac, datap, ATH_MAC_LEN);
- memcpy(macHdr.srcMac, datap + ATH_MAC_LEN, ATH_MAC_LEN);
- macHdr.typeOrLen = A_CPU2BE16(A_NETBUF_LEN(osbuf) - sizeof(ATH_MAC_HDR) +
- sizeof(ATH_LLC_SNAP_HDR));
-
- // Remove the Ethernet hdr
- A_NETBUF_PULL(osbuf, sizeof(ATH_MAC_HDR));
- /*
- * Make room for LLC+SNAP headers
- */
- if (A_NETBUF_PUSH(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != 0) {
- return A_NO_MEMORY;
- }
- datap = A_NETBUF_DATA(osbuf);
-
- llcHdr = (ATH_LLC_SNAP_HDR *)(datap);
- llcHdr->dsap = 0xAA;
- llcHdr->ssap = 0xAA;
- llcHdr->cntl = 0x03;
- llcHdr->orgCode[0] = 0x0;
- llcHdr->orgCode[1] = 0x0;
- llcHdr->orgCode[2] = 0x0;
- llcHdr->etherType = typeorlen;
-
-AddDot11Hdr:
- /* Make room for 802.11 hdr */
- if (wmip->wmi_is_wmm_enabled)
- {
- hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(u32));
- if (A_NETBUF_PUSH(osbuf, hdrsize) != 0)
- {
- return A_NO_MEMORY;
- }
- wh = (struct ieee80211_frame *) A_NETBUF_DATA(osbuf);
- wh->i_fc[0] = IEEE80211_FC0_SUBTYPE_QOS;
- }
- else
- {
- hdrsize = A_ROUND_UP(sizeof(struct ieee80211_frame),sizeof(u32));
- if (A_NETBUF_PUSH(osbuf, hdrsize) != 0)
- {
- return A_NO_MEMORY;
- }
- wh = (struct ieee80211_frame *) A_NETBUF_DATA(osbuf);
- wh->i_fc[0] = IEEE80211_FC0_SUBTYPE_DATA;
- }
- /* Setup the SA & DA */
- IEEE80211_ADDR_COPY(wh->i_addr2, macHdr.srcMac);
-
- if (mode == INFRA_NETWORK) {
- IEEE80211_ADDR_COPY(wh->i_addr3, macHdr.dstMac);
- }
- else if (mode == ADHOC_NETWORK) {
- IEEE80211_ADDR_COPY(wh->i_addr1, macHdr.dstMac);
- }
-
- return (0);
-}
-
-int
-wmi_dot11_hdr_remove(struct wmi_t *wmip, void *osbuf)
-{
- u8 *datap;
- struct ieee80211_frame *pwh,wh;
- u8 type,subtype;
- ATH_LLC_SNAP_HDR *llcHdr;
- ATH_MAC_HDR macHdr;
- u32 hdrsize;
-
- A_ASSERT(osbuf != NULL);
- datap = A_NETBUF_DATA(osbuf);
-
- pwh = (struct ieee80211_frame *)datap;
- type = pwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
- subtype = pwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
-
- memcpy((u8 *)&wh, datap, sizeof(struct ieee80211_frame));
-
- /* strip off the 802.11 hdr*/
- if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
- hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(u32));
- A_NETBUF_PULL(osbuf, hdrsize);
- } else if (subtype == IEEE80211_FC0_SUBTYPE_DATA) {
- A_NETBUF_PULL(osbuf, sizeof(struct ieee80211_frame));
- }
-
- datap = A_NETBUF_DATA(osbuf);
- llcHdr = (ATH_LLC_SNAP_HDR *)(datap);
-
- macHdr.typeOrLen = llcHdr->etherType;
- A_MEMZERO(macHdr.dstMac, sizeof(macHdr.dstMac));
- A_MEMZERO(macHdr.srcMac, sizeof(macHdr.srcMac));
-
- switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
- case IEEE80211_FC1_DIR_NODS:
- IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr1);
- IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr2);
- break;
- case IEEE80211_FC1_DIR_TODS:
- IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr3);
- IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr2);
- break;
- case IEEE80211_FC1_DIR_FROMDS:
- IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr1);
- IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr3);
- break;
- case IEEE80211_FC1_DIR_DSTODS:
- break;
- }
-
- // Remove the LLC Hdr.
- A_NETBUF_PULL(osbuf, sizeof(ATH_LLC_SNAP_HDR));
-
- // Insert the ATH MAC hdr.
-
- A_NETBUF_PUSH(osbuf, sizeof(ATH_MAC_HDR));
- datap = A_NETBUF_DATA(osbuf);
-
- memcpy (datap, &macHdr, sizeof(ATH_MAC_HDR));
-
- return 0;
-}
-
-/*
- * performs 802.3 to DIX encapsulation for received packets.
- * Assumes the entire 802.3 header is contigous.
- */
-int
-wmi_dot3_2_dix(void *osbuf)
-{
- u8 *datap;
- ATH_MAC_HDR macHdr;
- ATH_LLC_SNAP_HDR *llcHdr;
-
- A_ASSERT(osbuf != NULL);
- datap = A_NETBUF_DATA(osbuf);
-
- memcpy(&macHdr, datap, sizeof(ATH_MAC_HDR));
- llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(ATH_MAC_HDR));
- macHdr.typeOrLen = llcHdr->etherType;
-
- if (A_NETBUF_PULL(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != 0) {
- return A_NO_MEMORY;
- }
-
- datap = A_NETBUF_DATA(osbuf);
-
- memcpy(datap, &macHdr, sizeof (ATH_MAC_HDR));
-
- return (0);
-}
-
-/*
- * Removes a WMI data header
- */
-int
-wmi_data_hdr_remove(struct wmi_t *wmip, void *osbuf)
-{
- A_ASSERT(osbuf != NULL);
-
- return (A_NETBUF_PULL(osbuf, sizeof(WMI_DATA_HDR)));
-}
-
-void
-wmi_iterate_nodes(struct wmi_t *wmip, wlan_node_iter_func *f, void *arg)
-{
- wlan_iterate_nodes(&wmip->wmi_scan_table, f, arg);
-}
-
-/*
- * WMI Extended Event received from Target.
- */
-int
-wmi_control_rx_xtnd(struct wmi_t *wmip, void *osbuf)
-{
- WMIX_CMD_HDR *cmd;
- u16 id;
- u8 *datap;
- u32 len;
- int status = 0;
-
- if (A_NETBUF_LEN(osbuf) < sizeof(WMIX_CMD_HDR)) {
- A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 1\n", DBGARG));
- wmip->wmi_stats.cmd_len_err++;
- return A_ERROR;
- }
-
- cmd = (WMIX_CMD_HDR *)A_NETBUF_DATA(osbuf);
- id = cmd->commandId;
-
- if (A_NETBUF_PULL(osbuf, sizeof(WMIX_CMD_HDR)) != 0) {
- A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 2\n", DBGARG));
- wmip->wmi_stats.cmd_len_err++;
- return A_ERROR;
- }
-
- datap = A_NETBUF_DATA(osbuf);
- len = A_NETBUF_LEN(osbuf);
-
- switch (id) {
- case (WMIX_DSETOPENREQ_EVENTID):
- status = wmi_dset_open_req_rx(wmip, datap, len);
- break;
-#ifdef CONFIG_HOST_DSET_SUPPORT
- case (WMIX_DSETCLOSE_EVENTID):
- status = wmi_dset_close_rx(wmip, datap, len);
- break;
- case (WMIX_DSETDATAREQ_EVENTID):
- status = wmi_dset_data_req_rx(wmip, datap, len);
- break;
-#endif /* CONFIG_HOST_DSET_SUPPORT */
- case (WMIX_HB_CHALLENGE_RESP_EVENTID):
- wmi_hbChallengeResp_rx(wmip, datap, len);
- break;
- case (WMIX_DBGLOG_EVENTID):
- wmi_dbglog_event_rx(wmip, datap, len);
- break;
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
- case (WMIX_PROF_COUNT_EVENTID):
- wmi_prof_count_rx(wmip, datap, len);
- break;
-#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
- default:
- A_DPRINTF(DBG_WMI|DBG_ERROR,
- (DBGFMT "Unknown id 0x%x\n", DBGARG, id));
- wmip->wmi_stats.cmd_id_err++;
- status = A_ERROR;
- break;
- }
-
- return status;
-}
-
-/*
- * Control Path
- */
-u32 cmdRecvNum;
-
-int
-wmi_control_rx(struct wmi_t *wmip, void *osbuf)
-{
- WMI_CMD_HDR *cmd;
- u16 id;
- u8 *datap;
- u32 len, i, loggingReq;
- int status = 0;
-
- A_ASSERT(osbuf != NULL);
- if (A_NETBUF_LEN(osbuf) < sizeof(WMI_CMD_HDR)) {
- A_NETBUF_FREE(osbuf);
- A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 1\n", DBGARG));
- wmip->wmi_stats.cmd_len_err++;
- return A_ERROR;
- }
-
- cmd = (WMI_CMD_HDR *)A_NETBUF_DATA(osbuf);
- id = cmd->commandId;
-
- if (A_NETBUF_PULL(osbuf, sizeof(WMI_CMD_HDR)) != 0) {
- A_NETBUF_FREE(osbuf);
- A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 2\n", DBGARG));
- wmip->wmi_stats.cmd_len_err++;
- return A_ERROR;
- }
-
- datap = A_NETBUF_DATA(osbuf);
- len = A_NETBUF_LEN(osbuf);
-
- loggingReq = 0;
-
- ar6000_get_driver_cfg(wmip->wmi_devt,
- AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS,
- &loggingReq);
-
- if(loggingReq) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("WMI %d \n",id));
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("WMI recv, MsgNo %d : ", cmdRecvNum));
- for(i = 0; i < len; i++)
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("%x ", datap[i]));
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("\n"));
- }
-
- LOCK_WMI(wmip);
- cmdRecvNum++;
- UNLOCK_WMI(wmip);
-
- switch (id) {
- case (WMI_GET_BITRATE_CMDID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_BITRATE_CMDID\n", DBGARG));
- status = wmi_bitrate_reply_rx(wmip, datap, len);
- break;
- case (WMI_GET_CHANNEL_LIST_CMDID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_CHANNEL_LIST_CMDID\n", DBGARG));
- status = wmi_channelList_reply_rx(wmip, datap, len);
- break;
- case (WMI_GET_TX_PWR_CMDID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_TX_PWR_CMDID\n", DBGARG));
- status = wmi_txPwr_reply_rx(wmip, datap, len);
- break;
- case (WMI_READY_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_READY_EVENTID\n", DBGARG));
- status = wmi_ready_event_rx(wmip, datap, len);
- A_WMI_DBGLOG_INIT_DONE(wmip->wmi_devt);
- break;
- case (WMI_CONNECT_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CONNECT_EVENTID\n", DBGARG));
- status = wmi_connect_event_rx(wmip, datap, len);
- break;
- case (WMI_DISCONNECT_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_DISCONNECT_EVENTID\n", DBGARG));
- status = wmi_disconnect_event_rx(wmip, datap, len);
- break;
- case (WMI_PEER_NODE_EVENTID):
- A_DPRINTF (DBG_WMI, (DBGFMT "WMI_PEER_NODE_EVENTID\n", DBGARG));
- status = wmi_peer_node_event_rx(wmip, datap, len);
- break;
- case (WMI_TKIP_MICERR_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TKIP_MICERR_EVENTID\n", DBGARG));
- status = wmi_tkip_micerr_event_rx(wmip, datap, len);
- break;
- case (WMI_BSSINFO_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BSSINFO_EVENTID\n", DBGARG));
- {
- /*
- * convert WMI_BSS_INFO_HDR2 to WMI_BSS_INFO_HDR
- * Take a local copy of the WMI_BSS_INFO_HDR2 from the wmi buffer
- * and reconstruct the WMI_BSS_INFO_HDR in its place
- */
- WMI_BSS_INFO_HDR2 bih2;
- WMI_BSS_INFO_HDR *bih;
- memcpy(&bih2, datap, sizeof(WMI_BSS_INFO_HDR2));
-
- A_NETBUF_PUSH(osbuf, 4);
- datap = A_NETBUF_DATA(osbuf);
- len = A_NETBUF_LEN(osbuf);
- bih = (WMI_BSS_INFO_HDR *)datap;
-
- bih->channel = bih2.channel;
- bih->frameType = bih2.frameType;
- bih->snr = bih2.snr;
- bih->rssi = bih2.snr - 95;
- bih->ieMask = bih2.ieMask;
- memcpy(bih->bssid, bih2.bssid, ATH_MAC_LEN);
-
- status = wmi_bssInfo_event_rx(wmip, datap, len);
- }
- break;
- case (WMI_REGDOMAIN_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REGDOMAIN_EVENTID\n", DBGARG));
- status = wmi_regDomain_event_rx(wmip, datap, len);
- break;
- case (WMI_PSTREAM_TIMEOUT_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_PSTREAM_TIMEOUT_EVENTID\n", DBGARG));
- status = wmi_pstream_timeout_event_rx(wmip, datap, len);
- break;
- case (WMI_NEIGHBOR_REPORT_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_NEIGHBOR_REPORT_EVENTID\n", DBGARG));
- status = wmi_neighborReport_event_rx(wmip, datap, len);
- break;
- case (WMI_SCAN_COMPLETE_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SCAN_COMPLETE_EVENTID\n", DBGARG));
- status = wmi_scanComplete_rx(wmip, datap, len);
- break;
- case (WMI_CMDERROR_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CMDERROR_EVENTID\n", DBGARG));
- status = wmi_errorEvent_rx(wmip, datap, len);
- break;
- case (WMI_REPORT_STATISTICS_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_STATISTICS_EVENTID\n", DBGARG));
- status = wmi_statsEvent_rx(wmip, datap, len);
- break;
- case (WMI_RSSI_THRESHOLD_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_RSSI_THRESHOLD_EVENTID\n", DBGARG));
- status = wmi_rssiThresholdEvent_rx(wmip, datap, len);
- break;
- case (WMI_ERROR_REPORT_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_ERROR_REPORT_EVENTID\n", DBGARG));
- status = wmi_reportErrorEvent_rx(wmip, datap, len);
- break;
- case (WMI_OPT_RX_FRAME_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_OPT_RX_FRAME_EVENTID\n", DBGARG));
- status = wmi_opt_frame_event_rx(wmip, datap, len);
- break;
- case (WMI_REPORT_ROAM_TBL_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_ROAM_TBL_EVENTID\n", DBGARG));
- status = wmi_roam_tbl_event_rx(wmip, datap, len);
- break;
- case (WMI_EXTENSION_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_EXTENSION_EVENTID\n", DBGARG));
- status = wmi_control_rx_xtnd(wmip, osbuf);
- break;
- case (WMI_CAC_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CAC_EVENTID\n", DBGARG));
- status = wmi_cac_event_rx(wmip, datap, len);
- break;
- case (WMI_CHANNEL_CHANGE_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CHANNEL_CHANGE_EVENTID\n", DBGARG));
- status = wmi_channel_change_event_rx(wmip, datap, len);
- break;
- case (WMI_REPORT_ROAM_DATA_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_ROAM_DATA_EVENTID\n", DBGARG));
- status = wmi_roam_data_event_rx(wmip, datap, len);
- break;
-#ifdef CONFIG_HOST_TCMD_SUPPORT
- case (WMI_TEST_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TEST_EVENTID\n", DBGARG));
- status = wmi_tcmd_test_report_rx(wmip, datap, len);
- break;
-#endif
- case (WMI_GET_FIXRATES_CMDID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_FIXRATES_CMDID\n", DBGARG));
- status = wmi_ratemask_reply_rx(wmip, datap, len);
- break;
- case (WMI_TX_RETRY_ERR_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TX_RETRY_ERR_EVENTID\n", DBGARG));
- status = wmi_txRetryErrEvent_rx(wmip, datap, len);
- break;
- case (WMI_SNR_THRESHOLD_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SNR_THRESHOLD_EVENTID\n", DBGARG));
- status = wmi_snrThresholdEvent_rx(wmip, datap, len);
- break;
- case (WMI_LQ_THRESHOLD_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_LQ_THRESHOLD_EVENTID\n", DBGARG));
- status = wmi_lqThresholdEvent_rx(wmip, datap, len);
- break;
- case (WMI_APLIST_EVENTID):
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Received APLIST Event\n"));
- status = wmi_aplistEvent_rx(wmip, datap, len);
- break;
- case (WMI_GET_KEEPALIVE_CMDID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_KEEPALIVE_CMDID\n", DBGARG));
- status = wmi_keepalive_reply_rx(wmip, datap, len);
- break;
- case (WMI_GET_WOW_LIST_EVENTID):
- status = wmi_get_wow_list_event_rx(wmip, datap, len);
- break;
- case (WMI_GET_PMKID_LIST_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_PMKID_LIST Event\n", DBGARG));
- status = wmi_get_pmkid_list_event_rx(wmip, datap, len);
- break;
- case (WMI_PSPOLL_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_PSPOLL_EVENT\n", DBGARG));
- status = wmi_pspoll_event_rx(wmip, datap, len);
- break;
- case (WMI_DTIMEXPIRY_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_DTIMEXPIRY_EVENT\n", DBGARG));
- status = wmi_dtimexpiry_event_rx(wmip, datap, len);
- break;
- case (WMI_SET_PARAMS_REPLY_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SET_PARAMS_REPLY Event\n", DBGARG));
- status = wmi_set_params_event_rx(wmip, datap, len);
- break;
- case (WMI_ADDBA_REQ_EVENTID):
- status = wmi_addba_req_event_rx(wmip, datap, len);
- break;
- case (WMI_ADDBA_RESP_EVENTID):
- status = wmi_addba_resp_event_rx(wmip, datap, len);
- break;
- case (WMI_DELBA_REQ_EVENTID):
- status = wmi_delba_req_event_rx(wmip, datap, len);
- break;
- case (WMI_REPORT_BTCOEX_CONFIG_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BTCOEX_CONFIG_EVENTID", DBGARG));
- status = wmi_btcoex_config_event_rx(wmip, datap, len);
- break;
- case (WMI_REPORT_BTCOEX_STATS_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BTCOEX_STATS_EVENTID", DBGARG));
- status = wmi_btcoex_stats_event_rx(wmip, datap, len);
- break;
- case (WMI_TX_COMPLETE_EVENTID):
- {
- int index;
- TX_COMPLETE_MSG_V1 *pV1;
- WMI_TX_COMPLETE_EVENT *pEv = (WMI_TX_COMPLETE_EVENT *)datap;
- A_PRINTF("comp: %d %d %d\n", pEv->numMessages, pEv->msgLen, pEv->msgType);
-
- for(index = 0 ; index < pEv->numMessages ; index++) {
- pV1 = (TX_COMPLETE_MSG_V1 *)(datap + sizeof(WMI_TX_COMPLETE_EVENT) + index*sizeof(TX_COMPLETE_MSG_V1));
- A_PRINTF("msg: %d %d %d %d\n", pV1->status, pV1->pktID, pV1->rateIdx, pV1->ackFailures);
- }
- }
- break;
- case (WMI_HCI_EVENT_EVENTID):
- status = wmi_hci_event_rx(wmip, datap, len);
- break;
-#ifdef WAPI_ENABLE
- case (WMI_WAPI_REKEY_EVENTID):
- A_DPRINTF(DBG_WMI, (DBGFMT "WMI_WAPI_REKEY_EVENTID", DBGARG));
- status = wmi_wapi_rekey_event_rx(wmip, datap, len);
- break;
-#endif
- default:
- A_DPRINTF(DBG_WMI|DBG_ERROR,
- (DBGFMT "Unknown id 0x%x\n", DBGARG, id));
- wmip->wmi_stats.cmd_id_err++;
- status = A_ERROR;
- break;
- }
-
- A_NETBUF_FREE(osbuf);
-
- return status;
-}
-
-/* Send a "simple" wmi command -- one with no arguments */
-static int
-wmi_simple_cmd(struct wmi_t *wmip, WMI_COMMAND_ID cmdid)
-{
- void *osbuf;
-
- osbuf = A_NETBUF_ALLOC(0);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- return (wmi_cmd_send(wmip, osbuf, cmdid, NO_SYNC_WMIFLAG));
-}
-
-/* Send a "simple" extended wmi command -- one with no arguments.
- Enabling this command only if GPIO or profiling support is enabled.
- This is to suppress warnings on some platforms */
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
-static int
-wmi_simple_cmd_xtnd(struct wmi_t *wmip, WMIX_COMMAND_ID cmdid)
-{
- void *osbuf;
-
- osbuf = A_NETBUF_ALLOC(0);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, cmdid, NO_SYNC_WMIFLAG));
-}
-#endif
-
-static int
-wmi_ready_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_READY_EVENT *ev = (WMI_READY_EVENT *)datap;
-
- if (len < sizeof(WMI_READY_EVENT)) {
- return A_EINVAL;
- }
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
- wmip->wmi_ready = true;
- A_WMI_READY_EVENT(wmip->wmi_devt, ev->macaddr, ev->phyCapability,
- ev->sw_version, ev->abi_version);
-
- return 0;
-}
-
-#define LE_READ_4(p) \
- ((u32) \
- ((((u8 *)(p))[0] ) | (((u8 *)(p))[1] << 8) | \
- (((u8 *)(p))[2] << 16) | (((u8 *)(p))[3] << 24)))
-
-static int __inline
-iswmmoui(const u8 *frm)
-{
- return frm[1] > 3 && LE_READ_4(frm+2) == ((WMM_OUI_TYPE<<24)|WMM_OUI);
-}
-
-static int __inline
-iswmmparam(const u8 *frm)
-{
- return frm[1] > 5 && frm[6] == WMM_PARAM_OUI_SUBTYPE;
-}
-
-
-static int
-wmi_connect_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_CONNECT_EVENT *ev;
- u8 *pie,*peie;
-
- if (len < sizeof(WMI_CONNECT_EVENT))
- {
- return A_EINVAL;
- }
- ev = (WMI_CONNECT_EVENT *)datap;
-
- A_DPRINTF(DBG_WMI,
- (DBGFMT "freq %d bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- DBGARG, ev->channel,
- ev->bssid[0], ev->bssid[1], ev->bssid[2],
- ev->bssid[3], ev->bssid[4], ev->bssid[5]));
-
- memcpy(wmip->wmi_bssid, ev->bssid, ATH_MAC_LEN);
-
- /* initialize pointer to start of assoc rsp IEs */
- pie = ev->assocInfo + ev->beaconIeLen + ev->assocReqLen +
- sizeof(u16) + /* capinfo*/
- sizeof(u16) + /* status Code */
- sizeof(u16) ; /* associd */
-
- /* initialize pointer to end of assoc rsp IEs */
- peie = ev->assocInfo + ev->beaconIeLen + ev->assocReqLen + ev->assocRespLen;
-
- while (pie < peie)
- {
- switch (*pie)
- {
- case IEEE80211_ELEMID_VENDOR:
- if (iswmmoui(pie))
- {
- if(iswmmparam (pie))
- {
- wmip->wmi_is_wmm_enabled = true;
- }
- }
- break;
- }
-
- if (wmip->wmi_is_wmm_enabled)
- {
- break;
- }
- pie += pie[1] + 2;
- }
-
- A_WMI_CONNECT_EVENT(wmip->wmi_devt, ev->channel, ev->bssid,
- ev->listenInterval, ev->beaconInterval,
- (NETWORK_TYPE) ev->networkType, ev->beaconIeLen,
- ev->assocReqLen, ev->assocRespLen,
- ev->assocInfo);
-
- return 0;
-}
-
-static int
-wmi_regDomain_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_REG_DOMAIN_EVENT *ev;
-
- if (len < sizeof(*ev)) {
- return A_EINVAL;
- }
- ev = (WMI_REG_DOMAIN_EVENT *)datap;
-
- A_WMI_REGDOMAIN_EVENT(wmip->wmi_devt, ev->regDomain);
-
- return 0;
-}
-
-static int
-wmi_neighborReport_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_NEIGHBOR_REPORT_EVENT *ev;
- int numAps;
-
- if (len < sizeof(*ev)) {
- return A_EINVAL;
- }
- ev = (WMI_NEIGHBOR_REPORT_EVENT *)datap;
- numAps = ev->numberOfAps;
-
- if (len < (int)(sizeof(*ev) + ((numAps - 1) * sizeof(WMI_NEIGHBOR_INFO)))) {
- return A_EINVAL;
- }
-
- A_WMI_NEIGHBORREPORT_EVENT(wmip->wmi_devt, numAps, ev->neighbor);
-
- return 0;
-}
-
-static int
-wmi_disconnect_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_DISCONNECT_EVENT *ev;
- wmip->wmi_traffic_class = 100;
-
- if (len < sizeof(WMI_DISCONNECT_EVENT)) {
- return A_EINVAL;
- }
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- ev = (WMI_DISCONNECT_EVENT *)datap;
-
- A_MEMZERO(wmip->wmi_bssid, sizeof(wmip->wmi_bssid));
-
- wmip->wmi_is_wmm_enabled = false;
- wmip->wmi_pair_crypto_type = NONE_CRYPT;
- wmip->wmi_grp_crypto_type = NONE_CRYPT;
-
- A_WMI_DISCONNECT_EVENT(wmip->wmi_devt, ev->disconnectReason, ev->bssid,
- ev->assocRespLen, ev->assocInfo, ev->protocolReasonStatus);
-
- return 0;
-}
-
-static int
-wmi_peer_node_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_PEER_NODE_EVENT *ev;
-
- if (len < sizeof(WMI_PEER_NODE_EVENT)) {
- return A_EINVAL;
- }
- ev = (WMI_PEER_NODE_EVENT *)datap;
- if (ev->eventCode == PEER_NODE_JOIN_EVENT) {
- A_DPRINTF (DBG_WMI, (DBGFMT "Joined node with Macaddr: ", DBGARG));
- } else if(ev->eventCode == PEER_NODE_LEAVE_EVENT) {
- A_DPRINTF (DBG_WMI, (DBGFMT "left node with Macaddr: ", DBGARG));
- }
-
- A_WMI_PEER_EVENT (wmip->wmi_devt, ev->eventCode, ev->peerMacAddr);
-
- return 0;
-}
-
-static int
-wmi_tkip_micerr_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_TKIP_MICERR_EVENT *ev;
-
- if (len < sizeof(*ev)) {
- return A_EINVAL;
- }
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- ev = (WMI_TKIP_MICERR_EVENT *)datap;
- A_WMI_TKIP_MICERR_EVENT(wmip->wmi_devt, ev->keyid, ev->ismcast);
-
- return 0;
-}
-
-static int
-wmi_bssInfo_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- bss_t *bss = NULL;
- WMI_BSS_INFO_HDR *bih;
- u8 *buf;
- u32 nodeCachingAllowed = 1;
- u8 cached_ssid_len = 0;
- u8 cached_ssid_buf[IEEE80211_NWID_LEN] = {0};
- u8 beacon_ssid_len = 0;
-
- if (len <= sizeof(WMI_BSS_INFO_HDR)) {
- return A_EINVAL;
- }
-
- bih = (WMI_BSS_INFO_HDR *)datap;
- bss = wlan_find_node(&wmip->wmi_scan_table, bih->bssid);
-
- if (bih->rssi > 0) {
- if (NULL == bss)
- return 0; //no node found in the table, just drop the node with incorrect RSSI
- else
- bih->rssi = bss->ni_rssi; //Adjust RSSI in datap in case it is used in A_WMI_BSSINFO_EVENT_RX
- }
-
- A_WMI_BSSINFO_EVENT_RX(wmip->wmi_devt, datap, len);
- /* What is driver config for wlan node caching? */
- if(ar6000_get_driver_cfg(wmip->wmi_devt,
- AR6000_DRIVER_CFG_GET_WLANNODECACHING,
- &nodeCachingAllowed) != 0) {
- wmi_node_return(wmip, bss);
- return A_EINVAL;
- }
-
- if(!nodeCachingAllowed) {
- wmi_node_return(wmip, bss);
- return 0;
- }
-
- buf = datap + sizeof(WMI_BSS_INFO_HDR);
- len -= sizeof(WMI_BSS_INFO_HDR);
-
- A_DPRINTF(DBG_WMI2, (DBGFMT "bssInfo event - ch %u, rssi %02x, "
- "bssid \"%pM\"\n", DBGARG, bih->channel,
- (unsigned char) bih->rssi, bih->bssid));
-
- if(wps_enable && (bih->frameType == PROBERESP_FTYPE) ) {
- wmi_node_return(wmip, bss);
- return 0;
- }
-
- if (bss != NULL) {
- /*
- * Free up the node. Not the most efficient process given
- * we are about to allocate a new node but it is simple and should be
- * adequate.
- */
-
- /* In case of hidden AP, beacon will not have ssid,
- * but a directed probe response will have it,
- * so cache the probe-resp-ssid if already present. */
- if ((true == is_probe_ssid) && (BEACON_FTYPE == bih->frameType))
- {
- u8 *ie_ssid;
-
- ie_ssid = bss->ni_cie.ie_ssid;
- if(ie_ssid && (ie_ssid[1] <= IEEE80211_NWID_LEN) && (ie_ssid[2] != 0))
- {
- cached_ssid_len = ie_ssid[1];
- memcpy(cached_ssid_buf, ie_ssid + 2, cached_ssid_len);
- }
- }
-
- /*
- * Use the current average rssi of associated AP base on assumpiton
- * 1. Most os with GUI will update RSSI by wmi_get_stats_cmd() periodically
- * 2. wmi_get_stats_cmd(..) will be called when calling wmi_startscan_cmd(...)
- * The average value of RSSI give end-user better feeling for instance value of scan result
- * It also sync up RSSI info in GUI between scan result and RSSI signal icon
- */
- if (IEEE80211_ADDR_EQ(wmip->wmi_bssid, bih->bssid)) {
- bih->rssi = bss->ni_rssi;
- bih->snr = bss->ni_snr;
- }
-
- wlan_node_reclaim(&wmip->wmi_scan_table, bss);
- }
-
- /* beacon/probe response frame format
- * [8] time stamp
- * [2] beacon interval
- * [2] capability information
- * [tlv] ssid */
- beacon_ssid_len = buf[SSID_IE_LEN_INDEX];
-
- /* If ssid is cached for this hidden AP, then change buffer len accordingly. */
- if ((true == is_probe_ssid) && (BEACON_FTYPE == bih->frameType) &&
- (0 != cached_ssid_len) &&
- (0 == beacon_ssid_len || (cached_ssid_len > beacon_ssid_len && 0 == buf[SSID_IE_LEN_INDEX + 1])))
- {
- len += (cached_ssid_len - beacon_ssid_len);
- }
-
- bss = wlan_node_alloc(&wmip->wmi_scan_table, len);
- if (bss == NULL) {
- return A_NO_MEMORY;
- }
-
- bss->ni_snr = bih->snr;
- bss->ni_rssi = bih->rssi;
- A_ASSERT(bss->ni_buf != NULL);
-
- /* In case of hidden AP, beacon will not have ssid,
- * but a directed probe response will have it,
- * so place the cached-ssid(probe-resp) in the bssinfo. */
- if ((true == is_probe_ssid) && (BEACON_FTYPE == bih->frameType) &&
- (0 != cached_ssid_len) &&
- (0 == beacon_ssid_len || (beacon_ssid_len && 0 == buf[SSID_IE_LEN_INDEX + 1])))
- {
- u8 *ni_buf = bss->ni_buf;
- int buf_len = len;
-
- /* copy the first 14 bytes such as
- * time-stamp(8), beacon-interval(2), cap-info(2), ssid-id(1), ssid-len(1). */
- memcpy(ni_buf, buf, SSID_IE_LEN_INDEX + 1);
-
- ni_buf[SSID_IE_LEN_INDEX] = cached_ssid_len;
- ni_buf += (SSID_IE_LEN_INDEX + 1);
-
- buf += (SSID_IE_LEN_INDEX + 1);
- buf_len -= (SSID_IE_LEN_INDEX + 1);
-
- /* copy the cached ssid */
- memcpy(ni_buf, cached_ssid_buf, cached_ssid_len);
- ni_buf += cached_ssid_len;
-
- buf += beacon_ssid_len;
- buf_len -= beacon_ssid_len;
-
- if (cached_ssid_len > beacon_ssid_len)
- buf_len -= (cached_ssid_len - beacon_ssid_len);
-
- /* now copy the rest of bytes */
- memcpy(ni_buf, buf, buf_len);
- }
- else
- memcpy(bss->ni_buf, buf, len);
-
- bss->ni_framelen = len;
- if (wlan_parse_beacon(bss->ni_buf, len, &bss->ni_cie) != 0) {
- wlan_node_free(bss);
- return A_EINVAL;
- }
-
- /*
- * Update the frequency in ie_chan, overwriting of channel number
- * which is done in wlan_parse_beacon
- */
- bss->ni_cie.ie_chan = bih->channel;
- wlan_setup_node(&wmip->wmi_scan_table, bss, bih->bssid);
-
- return 0;
-}
-
-static int
-wmi_opt_frame_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- bss_t *bss;
- WMI_OPT_RX_INFO_HDR *bih;
- u8 *buf;
-
- if (len <= sizeof(WMI_OPT_RX_INFO_HDR)) {
- return A_EINVAL;
- }
-
- bih = (WMI_OPT_RX_INFO_HDR *)datap;
- buf = datap + sizeof(WMI_OPT_RX_INFO_HDR);
- len -= sizeof(WMI_OPT_RX_INFO_HDR);
-
- A_DPRINTF(DBG_WMI2, (DBGFMT "opt frame event %2.2x:%2.2x\n", DBGARG,
- bih->bssid[4], bih->bssid[5]));
-
- bss = wlan_find_node(&wmip->wmi_scan_table, bih->bssid);
- if (bss != NULL) {
- /*
- * Free up the node. Not the most efficient process given
- * we are about to allocate a new node but it is simple and should be
- * adequate.
- */
- wlan_node_reclaim(&wmip->wmi_scan_table, bss);
- }
-
- bss = wlan_node_alloc(&wmip->wmi_scan_table, len);
- if (bss == NULL) {
- return A_NO_MEMORY;
- }
-
- bss->ni_snr = bih->snr;
- bss->ni_cie.ie_chan = bih->channel;
- A_ASSERT(bss->ni_buf != NULL);
- memcpy(bss->ni_buf, buf, len);
- wlan_setup_node(&wmip->wmi_scan_table, bss, bih->bssid);
-
- return 0;
-}
-
- /* This event indicates inactivity timeout of a fatpipe(pstream)
- * at the target
- */
-static int
-wmi_pstream_timeout_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_PSTREAM_TIMEOUT_EVENT *ev;
-
- if (len < sizeof(WMI_PSTREAM_TIMEOUT_EVENT)) {
- return A_EINVAL;
- }
-
- A_DPRINTF(DBG_WMI, (DBGFMT "wmi_pstream_timeout_event_rx\n", DBGARG));
-
- ev = (WMI_PSTREAM_TIMEOUT_EVENT *)datap;
-
- /* When the pstream (fat pipe == AC) timesout, it means there were no
- * thinStreams within this pstream & it got implicitly created due to
- * data flow on this AC. We start the inactivity timer only for
- * implicitly created pstream. Just reset the host state.
- */
- /* Set the activeTsids for this AC to 0 */
- LOCK_WMI(wmip);
- wmip->wmi_streamExistsForAC[ev->trafficClass]=0;
- wmip->wmi_fatPipeExists &= ~(1 << ev->trafficClass);
- UNLOCK_WMI(wmip);
-
- /*Indicate inactivity to driver layer for this fatpipe (pstream)*/
- A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, ev->trafficClass);
-
- return 0;
-}
-
-static int
-wmi_bitrate_reply_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_BIT_RATE_REPLY *reply;
- s32 rate;
- u32 sgi,index;
- /* 54149:
- * WMI_BIT_RATE_CMD structure is changed to WMI_BIT_RATE_REPLY.
- * since there is difference in the length and to avoid returning
- * error value.
- */
- if (len < sizeof(WMI_BIT_RATE_REPLY)) {
- return A_EINVAL;
- }
- reply = (WMI_BIT_RATE_REPLY *)datap;
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Enter - rateindex %d\n", DBGARG, reply->rateIndex));
-
- if (reply->rateIndex == (s8) RATE_AUTO) {
- rate = RATE_AUTO;
- } else {
- // the SGI state is stored as the MSb of the rateIndex
- index = reply->rateIndex & 0x7f;
- sgi = (reply->rateIndex & 0x80)? 1:0;
- rate = wmi_rateTable[index][sgi];
- }
-
- A_WMI_BITRATE_RX(wmip->wmi_devt, rate);
- return 0;
-}
-
-static int
-wmi_ratemask_reply_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_FIX_RATES_REPLY *reply;
-
- if (len < sizeof(WMI_FIX_RATES_REPLY)) {
- return A_EINVAL;
- }
- reply = (WMI_FIX_RATES_REPLY *)datap;
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Enter - fixed rate mask %x\n", DBGARG, reply->fixRateMask));
-
- A_WMI_RATEMASK_RX(wmip->wmi_devt, reply->fixRateMask);
-
- return 0;
-}
-
-static int
-wmi_channelList_reply_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_CHANNEL_LIST_REPLY *reply;
-
- if (len < sizeof(WMI_CHANNEL_LIST_REPLY)) {
- return A_EINVAL;
- }
- reply = (WMI_CHANNEL_LIST_REPLY *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_CHANNELLIST_RX(wmip->wmi_devt, reply->numChannels,
- reply->channelList);
-
- return 0;
-}
-
-static int
-wmi_txPwr_reply_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_TX_PWR_REPLY *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_TX_PWR_REPLY *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_TXPWR_RX(wmip->wmi_devt, reply->dbM);
-
- return 0;
-}
-static int
-wmi_keepalive_reply_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_GET_KEEPALIVE_CMD *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_GET_KEEPALIVE_CMD *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_KEEPALIVE_RX(wmip->wmi_devt, reply->configured);
-
- return 0;
-}
-
-
-static int
-wmi_dset_open_req_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMIX_DSETOPENREQ_EVENT *dsetopenreq;
-
- if (len < sizeof(WMIX_DSETOPENREQ_EVENT)) {
- return A_EINVAL;
- }
- dsetopenreq = (WMIX_DSETOPENREQ_EVENT *)datap;
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Enter - dset_id=0x%x\n", DBGARG, dsetopenreq->dset_id));
- A_WMI_DSET_OPEN_REQ(wmip->wmi_devt,
- dsetopenreq->dset_id,
- dsetopenreq->targ_dset_handle,
- dsetopenreq->targ_reply_fn,
- dsetopenreq->targ_reply_arg);
-
- return 0;
-}
-
-#ifdef CONFIG_HOST_DSET_SUPPORT
-static int
-wmi_dset_close_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMIX_DSETCLOSE_EVENT *dsetclose;
-
- if (len < sizeof(WMIX_DSETCLOSE_EVENT)) {
- return A_EINVAL;
- }
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- dsetclose = (WMIX_DSETCLOSE_EVENT *)datap;
- A_WMI_DSET_CLOSE(wmip->wmi_devt, dsetclose->access_cookie);
-
- return 0;
-}
-
-static int
-wmi_dset_data_req_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMIX_DSETDATAREQ_EVENT *dsetdatareq;
-
- if (len < sizeof(WMIX_DSETDATAREQ_EVENT)) {
- return A_EINVAL;
- }
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- dsetdatareq = (WMIX_DSETDATAREQ_EVENT *)datap;
- A_WMI_DSET_DATA_REQ(wmip->wmi_devt,
- dsetdatareq->access_cookie,
- dsetdatareq->offset,
- dsetdatareq->length,
- dsetdatareq->targ_buf,
- dsetdatareq->targ_reply_fn,
- dsetdatareq->targ_reply_arg);
-
- return 0;
-}
-#endif /* CONFIG_HOST_DSET_SUPPORT */
-
-static int
-wmi_scanComplete_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_SCAN_COMPLETE_EVENT *ev;
-
- ev = (WMI_SCAN_COMPLETE_EVENT *)datap;
- if ((int)ev->status == 0) {
- wlan_refresh_inactive_nodes(&wmip->wmi_scan_table);
- }
- A_WMI_SCANCOMPLETE_EVENT(wmip->wmi_devt, (int) ev->status);
- is_probe_ssid = false;
-
- return 0;
-}
-
-/*
- * Target is reporting a programming error. This is for
- * developer aid only. Target only checks a few common violations
- * and it is responsibility of host to do all error checking.
- * Behavior of target after wmi error event is undefined.
- * A reset is recommended.
- */
-static int
-wmi_errorEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_CMD_ERROR_EVENT *ev;
-
- ev = (WMI_CMD_ERROR_EVENT *)datap;
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Programming Error: cmd=%d ", ev->commandId));
- switch (ev->errorCode) {
- case (INVALID_PARAM):
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Illegal Parameter\n"));
- break;
- case (ILLEGAL_STATE):
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Illegal State\n"));
- break;
- case (INTERNAL_ERROR):
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Internal Error\n"));
- break;
- }
-
- return 0;
-}
-
-
-static int
-wmi_statsEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_TARGETSTATS_EVENT(wmip->wmi_devt, datap, len);
-
- return 0;
-}
-
-static int
-wmi_rssiThresholdEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_RSSI_THRESHOLD_EVENT *reply;
- WMI_RSSI_THRESHOLD_VAL newThreshold;
- WMI_RSSI_THRESHOLD_PARAMS_CMD cmd;
- SQ_THRESHOLD_PARAMS *sq_thresh =
- &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_RSSI];
- u8 upper_rssi_threshold, lower_rssi_threshold;
- s16 rssi;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_RSSI_THRESHOLD_EVENT *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
- newThreshold = (WMI_RSSI_THRESHOLD_VAL) reply->range;
- rssi = reply->rssi;
-
- /*
- * Identify the threshold breached and communicate that to the app. After
- * that install a new set of thresholds based on the signal quality
- * reported by the target
- */
- if (newThreshold) {
- /* Upper threshold breached */
- if (rssi < sq_thresh->upper_threshold[0]) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Spurious upper RSSI threshold event: "
- " %d\n", DBGARG, rssi));
- } else if ((rssi < sq_thresh->upper_threshold[1]) &&
- (rssi >= sq_thresh->upper_threshold[0]))
- {
- newThreshold = WMI_RSSI_THRESHOLD1_ABOVE;
- } else if ((rssi < sq_thresh->upper_threshold[2]) &&
- (rssi >= sq_thresh->upper_threshold[1]))
- {
- newThreshold = WMI_RSSI_THRESHOLD2_ABOVE;
- } else if ((rssi < sq_thresh->upper_threshold[3]) &&
- (rssi >= sq_thresh->upper_threshold[2]))
- {
- newThreshold = WMI_RSSI_THRESHOLD3_ABOVE;
- } else if ((rssi < sq_thresh->upper_threshold[4]) &&
- (rssi >= sq_thresh->upper_threshold[3]))
- {
- newThreshold = WMI_RSSI_THRESHOLD4_ABOVE;
- } else if ((rssi < sq_thresh->upper_threshold[5]) &&
- (rssi >= sq_thresh->upper_threshold[4]))
- {
- newThreshold = WMI_RSSI_THRESHOLD5_ABOVE;
- } else if (rssi >= sq_thresh->upper_threshold[5]) {
- newThreshold = WMI_RSSI_THRESHOLD6_ABOVE;
- }
- } else {
- /* Lower threshold breached */
- if (rssi > sq_thresh->lower_threshold[0]) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Spurious lower RSSI threshold event: "
- "%d %d\n", DBGARG, rssi, sq_thresh->lower_threshold[0]));
- } else if ((rssi > sq_thresh->lower_threshold[1]) &&
- (rssi <= sq_thresh->lower_threshold[0]))
- {
- newThreshold = WMI_RSSI_THRESHOLD6_BELOW;
- } else if ((rssi > sq_thresh->lower_threshold[2]) &&
- (rssi <= sq_thresh->lower_threshold[1]))
- {
- newThreshold = WMI_RSSI_THRESHOLD5_BELOW;
- } else if ((rssi > sq_thresh->lower_threshold[3]) &&
- (rssi <= sq_thresh->lower_threshold[2]))
- {
- newThreshold = WMI_RSSI_THRESHOLD4_BELOW;
- } else if ((rssi > sq_thresh->lower_threshold[4]) &&
- (rssi <= sq_thresh->lower_threshold[3]))
- {
- newThreshold = WMI_RSSI_THRESHOLD3_BELOW;
- } else if ((rssi > sq_thresh->lower_threshold[5]) &&
- (rssi <= sq_thresh->lower_threshold[4]))
- {
- newThreshold = WMI_RSSI_THRESHOLD2_BELOW;
- } else if (rssi <= sq_thresh->lower_threshold[5]) {
- newThreshold = WMI_RSSI_THRESHOLD1_BELOW;
- }
- }
- /* Calculate and install the next set of thresholds */
- lower_rssi_threshold = ar6000_get_lower_threshold(rssi, sq_thresh,
- sq_thresh->lower_threshold_valid_count);
- upper_rssi_threshold = ar6000_get_upper_threshold(rssi, sq_thresh,
- sq_thresh->upper_threshold_valid_count);
- /* Issue a wmi command to install the thresholds */
- cmd.thresholdAbove1_Val = upper_rssi_threshold;
- cmd.thresholdBelow1_Val = lower_rssi_threshold;
- cmd.weight = sq_thresh->weight;
- cmd.pollTime = sq_thresh->polling_interval;
-
- rssi_event_value = rssi;
-
- if (wmi_send_rssi_threshold_params(wmip, &cmd) != 0) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Unable to configure the RSSI thresholds\n",
- DBGARG));
- }
-
- A_WMI_RSSI_THRESHOLD_EVENT(wmip->wmi_devt, newThreshold, reply->rssi);
-
- return 0;
-}
-
-
-static int
-wmi_reportErrorEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_TARGET_ERROR_REPORT_EVENT *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_TARGET_ERROR_REPORT_EVENT *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_REPORT_ERROR_EVENT(wmip->wmi_devt, (WMI_TARGET_ERROR_VAL) reply->errorVal);
-
- return 0;
-}
-
-static int
-wmi_cac_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_CAC_EVENT *reply;
- WMM_TSPEC_IE *tspec_ie;
- u16 activeTsids;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_CAC_EVENT *)datap;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
- (reply->statusCode != TSPEC_STATUS_CODE_ADMISSION_ACCEPTED)) {
- tspec_ie = (WMM_TSPEC_IE *) &(reply->tspecSuggestion);
-
- wmi_delete_pstream_cmd(wmip, reply->ac,
- (tspec_ie->tsInfo_info >> TSPEC_TSID_S) & TSPEC_TSID_MASK);
- }
- else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
- u8 i;
-
- /* following assumes that there is only one outstanding ADDTS request
- when this event is received */
- LOCK_WMI(wmip);
- activeTsids = wmip->wmi_streamExistsForAC[reply->ac];
- UNLOCK_WMI(wmip);
-
- for (i = 0; i < sizeof(activeTsids) * 8; i++) {
- if ((activeTsids >> i) & 1) {
- break;
- }
- }
- if (i < (sizeof(activeTsids) * 8)) {
- wmi_delete_pstream_cmd(wmip, reply->ac, i);
- }
- }
- /*
- * Ev#72990: Clear active tsids and Add missing handling
- * for delete qos stream from AP
- */
- else if (reply->cac_indication == CAC_INDICATION_DELETE) {
- u8 tsid = 0;
-
- tspec_ie = (WMM_TSPEC_IE *) &(reply->tspecSuggestion);
- tsid= ((tspec_ie->tsInfo_info >> TSPEC_TSID_S) & TSPEC_TSID_MASK);
- LOCK_WMI(wmip);
- wmip->wmi_streamExistsForAC[reply->ac] &= ~(1<<tsid);
- activeTsids = wmip->wmi_streamExistsForAC[reply->ac];
- UNLOCK_WMI(wmip);
-
-
- /* Indicate stream inactivity to driver layer only if all tsids
- * within this AC are deleted.
- */
- if (!activeTsids) {
- A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, reply->ac);
- wmip->wmi_fatPipeExists &= ~(1 << reply->ac);
- }
- }
-
- A_WMI_CAC_EVENT(wmip->wmi_devt, reply->ac,
- reply->cac_indication, reply->statusCode,
- reply->tspecSuggestion);
-
- return 0;
-}
-
-static int
-wmi_channel_change_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_CHANNEL_CHANGE_EVENT *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_CHANNEL_CHANGE_EVENT *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_CHANNEL_CHANGE_EVENT(wmip->wmi_devt, reply->oldChannel,
- reply->newChannel);
-
- return 0;
-}
-
-static int
-wmi_hbChallengeResp_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMIX_HB_CHALLENGE_RESP_EVENT *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMIX_HB_CHALLENGE_RESP_EVENT *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "wmi: challenge response event\n", DBGARG));
-
- A_WMI_HBCHALLENGERESP_EVENT(wmip->wmi_devt, reply->cookie, reply->source);
-
- return 0;
-}
-
-static int
-wmi_roam_tbl_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_TARGET_ROAM_TBL *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_TARGET_ROAM_TBL *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_ROAM_TABLE_EVENT(wmip->wmi_devt, reply);
-
- return 0;
-}
-
-static int
-wmi_roam_data_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_TARGET_ROAM_DATA *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_TARGET_ROAM_DATA *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_ROAM_DATA_EVENT(wmip->wmi_devt, reply);
-
- return 0;
-}
-
-static int
-wmi_txRetryErrEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- if (len < sizeof(WMI_TX_RETRY_ERR_EVENT)) {
- return A_EINVAL;
- }
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_TX_RETRY_ERR_EVENT(wmip->wmi_devt);
-
- return 0;
-}
-
-static int
-wmi_snrThresholdEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_SNR_THRESHOLD_EVENT *reply;
- SQ_THRESHOLD_PARAMS *sq_thresh =
- &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_SNR];
- WMI_SNR_THRESHOLD_VAL newThreshold;
- WMI_SNR_THRESHOLD_PARAMS_CMD cmd;
- u8 upper_snr_threshold, lower_snr_threshold;
- s16 snr;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_SNR_THRESHOLD_EVENT *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- newThreshold = (WMI_SNR_THRESHOLD_VAL) reply->range;
- snr = reply->snr;
- /*
- * Identify the threshold breached and communicate that to the app. After
- * that install a new set of thresholds based on the signal quality
- * reported by the target
- */
- if (newThreshold) {
- /* Upper threshold breached */
- if (snr < sq_thresh->upper_threshold[0]) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Spurious upper SNR threshold event: "
- "%d\n", DBGARG, snr));
- } else if ((snr < sq_thresh->upper_threshold[1]) &&
- (snr >= sq_thresh->upper_threshold[0]))
- {
- newThreshold = WMI_SNR_THRESHOLD1_ABOVE;
- } else if ((snr < sq_thresh->upper_threshold[2]) &&
- (snr >= sq_thresh->upper_threshold[1]))
- {
- newThreshold = WMI_SNR_THRESHOLD2_ABOVE;
- } else if ((snr < sq_thresh->upper_threshold[3]) &&
- (snr >= sq_thresh->upper_threshold[2]))
- {
- newThreshold = WMI_SNR_THRESHOLD3_ABOVE;
- } else if (snr >= sq_thresh->upper_threshold[3]) {
- newThreshold = WMI_SNR_THRESHOLD4_ABOVE;
- }
- } else {
- /* Lower threshold breached */
- if (snr > sq_thresh->lower_threshold[0]) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Spurious lower SNR threshold event: "
- "%d %d\n", DBGARG, snr, sq_thresh->lower_threshold[0]));
- } else if ((snr > sq_thresh->lower_threshold[1]) &&
- (snr <= sq_thresh->lower_threshold[0]))
- {
- newThreshold = WMI_SNR_THRESHOLD4_BELOW;
- } else if ((snr > sq_thresh->lower_threshold[2]) &&
- (snr <= sq_thresh->lower_threshold[1]))
- {
- newThreshold = WMI_SNR_THRESHOLD3_BELOW;
- } else if ((snr > sq_thresh->lower_threshold[3]) &&
- (snr <= sq_thresh->lower_threshold[2]))
- {
- newThreshold = WMI_SNR_THRESHOLD2_BELOW;
- } else if (snr <= sq_thresh->lower_threshold[3]) {
- newThreshold = WMI_SNR_THRESHOLD1_BELOW;
- }
- }
-
- /* Calculate and install the next set of thresholds */
- lower_snr_threshold = ar6000_get_lower_threshold(snr, sq_thresh,
- sq_thresh->lower_threshold_valid_count);
- upper_snr_threshold = ar6000_get_upper_threshold(snr, sq_thresh,
- sq_thresh->upper_threshold_valid_count);
-
- /* Issue a wmi command to install the thresholds */
- cmd.thresholdAbove1_Val = upper_snr_threshold;
- cmd.thresholdBelow1_Val = lower_snr_threshold;
- cmd.weight = sq_thresh->weight;
- cmd.pollTime = sq_thresh->polling_interval;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "snr: %d, threshold: %d, lower: %d, upper: %d\n"
- ,DBGARG, snr, newThreshold, lower_snr_threshold,
- upper_snr_threshold));
-
- snr_event_value = snr;
-
- if (wmi_send_snr_threshold_params(wmip, &cmd) != 0) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Unable to configure the SNR thresholds\n",
- DBGARG));
- }
- A_WMI_SNR_THRESHOLD_EVENT_RX(wmip->wmi_devt, newThreshold, reply->snr);
-
- return 0;
-}
-
-static int
-wmi_lqThresholdEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_LQ_THRESHOLD_EVENT *reply;
-
- if (len < sizeof(*reply)) {
- return A_EINVAL;
- }
- reply = (WMI_LQ_THRESHOLD_EVENT *)datap;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_LQ_THRESHOLD_EVENT_RX(wmip->wmi_devt,
- (WMI_LQ_THRESHOLD_VAL) reply->range,
- reply->lq);
-
- return 0;
-}
-
-static int
-wmi_aplistEvent_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- u16 ap_info_entry_size;
- WMI_APLIST_EVENT *ev = (WMI_APLIST_EVENT *)datap;
- WMI_AP_INFO_V1 *ap_info_v1;
- u8 i;
-
- if (len < sizeof(WMI_APLIST_EVENT)) {
- return A_EINVAL;
- }
-
- if (ev->apListVer == APLIST_VER1) {
- ap_info_entry_size = sizeof(WMI_AP_INFO_V1);
- ap_info_v1 = (WMI_AP_INFO_V1 *)ev->apList;
- } else {
- return A_EINVAL;
- }
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Number of APs in APLIST Event is %d\n", ev->numAP));
- if (len < (int)(sizeof(WMI_APLIST_EVENT) +
- (ev->numAP - 1) * ap_info_entry_size))
- {
- return A_EINVAL;
- }
-
- /*
- * AP List Ver1 Contents
- */
- for (i = 0; i < ev->numAP; i++) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("AP#%d BSSID %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x "\
- "Channel %d\n", i,
- ap_info_v1->bssid[0], ap_info_v1->bssid[1],
- ap_info_v1->bssid[2], ap_info_v1->bssid[3],
- ap_info_v1->bssid[4], ap_info_v1->bssid[5],
- ap_info_v1->channel));
- ap_info_v1++;
- }
- return 0;
-}
-
-static int
-wmi_dbglog_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- u32 dropped;
-
- dropped = *((u32 *)datap);
- datap += sizeof(dropped);
- len -= sizeof(dropped);
- A_WMI_DBGLOG_EVENT(wmip->wmi_devt, dropped, (s8 *)datap, len);
- return 0;
-}
-
-/*
- * Called to send a wmi command. Command specific data is already built
- * on osbuf and current osbuf->data points to it.
- */
-int
-wmi_cmd_send(struct wmi_t *wmip, void *osbuf, WMI_COMMAND_ID cmdId,
- WMI_SYNC_FLAG syncflag)
-{
- int status;
-#define IS_OPT_TX_CMD(cmdId) ((cmdId == WMI_OPT_TX_FRAME_CMDID))
- WMI_CMD_HDR *cHdr;
- HTC_ENDPOINT_ID eid = wmip->wmi_endpoint_id;
-
- A_ASSERT(osbuf != NULL);
-
- if (syncflag >= END_WMIFLAG) {
- A_NETBUF_FREE(osbuf);
- return A_EINVAL;
- }
-
- if ((syncflag == SYNC_BEFORE_WMIFLAG) || (syncflag == SYNC_BOTH_WMIFLAG)) {
- /*
- * We want to make sure all data currently queued is transmitted before
- * the cmd execution. Establish a new sync point.
- */
- wmi_sync_point(wmip);
- }
-
- if (A_NETBUF_PUSH(osbuf, sizeof(WMI_CMD_HDR)) != 0) {
- A_NETBUF_FREE(osbuf);
- return A_NO_MEMORY;
- }
-
- cHdr = (WMI_CMD_HDR *)A_NETBUF_DATA(osbuf);
- cHdr->commandId = (u16) cmdId;
- cHdr->info1 = 0; // added for virtual interface
-
- /*
- * Only for OPT_TX_CMD, use BE endpoint.
- */
- if (IS_OPT_TX_CMD(cmdId)) {
- if ((status=wmi_data_hdr_add(wmip, osbuf, OPT_MSGTYPE, false, false,0,NULL)) != 0) {
- A_NETBUF_FREE(osbuf);
- return status;
- }
- eid = A_WMI_Ac2EndpointID(wmip->wmi_devt, WMM_AC_BE);
- }
- A_WMI_CONTROL_TX(wmip->wmi_devt, osbuf, eid);
-
- if ((syncflag == SYNC_AFTER_WMIFLAG) || (syncflag == SYNC_BOTH_WMIFLAG)) {
- /*
- * We want to make sure all new data queued waits for the command to
- * execute. Establish a new sync point.
- */
- wmi_sync_point(wmip);
- }
- return (0);
-#undef IS_OPT_TX_CMD
-}
-
-int
-wmi_cmd_send_xtnd(struct wmi_t *wmip, void *osbuf, WMIX_COMMAND_ID cmdId,
- WMI_SYNC_FLAG syncflag)
-{
- WMIX_CMD_HDR *cHdr;
-
- if (A_NETBUF_PUSH(osbuf, sizeof(WMIX_CMD_HDR)) != 0) {
- A_NETBUF_FREE(osbuf);
- return A_NO_MEMORY;
- }
-
- cHdr = (WMIX_CMD_HDR *)A_NETBUF_DATA(osbuf);
- cHdr->commandId = (u32) cmdId;
-
- return wmi_cmd_send(wmip, osbuf, WMI_EXTENSION_CMDID, syncflag);
-}
-
-int
-wmi_connect_cmd(struct wmi_t *wmip, NETWORK_TYPE netType,
- DOT11_AUTH_MODE dot11AuthMode, AUTH_MODE authMode,
- CRYPTO_TYPE pairwiseCrypto, u8 pairwiseCryptoLen,
- CRYPTO_TYPE groupCrypto, u8 groupCryptoLen,
- int ssidLength, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags)
-{
- void *osbuf;
- WMI_CONNECT_CMD *cc;
- wmip->wmi_traffic_class = 100;
-
- if ((pairwiseCrypto == NONE_CRYPT) && (groupCrypto != NONE_CRYPT)) {
- return A_EINVAL;
- }
- if ((pairwiseCrypto != NONE_CRYPT) && (groupCrypto == NONE_CRYPT)) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_CONNECT_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_CONNECT_CMD));
-
- cc = (WMI_CONNECT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cc, sizeof(*cc));
-
- if (ssidLength)
- {
- memcpy(cc->ssid, ssid, ssidLength);
- }
-
- cc->ssidLength = ssidLength;
- cc->networkType = netType;
- cc->dot11AuthMode = dot11AuthMode;
- cc->authMode = authMode;
- cc->pairwiseCryptoType = pairwiseCrypto;
- cc->pairwiseCryptoLen = pairwiseCryptoLen;
- cc->groupCryptoType = groupCrypto;
- cc->groupCryptoLen = groupCryptoLen;
- cc->channel = channel;
- cc->ctrl_flags = ctrl_flags;
-
- if (bssid != NULL) {
- memcpy(cc->bssid, bssid, ATH_MAC_LEN);
- }
-
- wmip->wmi_pair_crypto_type = pairwiseCrypto;
- wmip->wmi_grp_crypto_type = groupCrypto;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_reconnect_cmd(struct wmi_t *wmip, u8 *bssid, u16 channel)
-{
- void *osbuf;
- WMI_RECONNECT_CMD *cc;
- wmip->wmi_traffic_class = 100;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_RECONNECT_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_RECONNECT_CMD));
-
- cc = (WMI_RECONNECT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cc, sizeof(*cc));
-
- cc->channel = channel;
-
- if (bssid != NULL) {
- memcpy(cc->bssid, bssid, ATH_MAC_LEN);
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_RECONNECT_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_disconnect_cmd(struct wmi_t *wmip)
-{
- int status;
- wmip->wmi_traffic_class = 100;
-
- /* Bug fix for 24817(elevator bug) - the disconnect command does not
- need to do a SYNC before.*/
- status = wmi_simple_cmd(wmip, WMI_DISCONNECT_CMDID);
-
- return status;
-}
-
-int
-wmi_startscan_cmd(struct wmi_t *wmip, WMI_SCAN_TYPE scanType,
- u32 forceFgScan, u32 isLegacy,
- u32 homeDwellTime, u32 forceScanInterval,
- s8 numChan, u16 *channelList)
-{
- void *osbuf;
- WMI_START_SCAN_CMD *sc;
- s8 size;
-
- size = sizeof (*sc);
-
- if ((scanType != WMI_LONG_SCAN) && (scanType != WMI_SHORT_SCAN)) {
- return A_EINVAL;
- }
-
- if (numChan) {
- if (numChan > WMI_MAX_CHANNELS) {
- return A_EINVAL;
- }
- size += sizeof(u16) * (numChan - 1);
- }
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- sc = (WMI_START_SCAN_CMD *)(A_NETBUF_DATA(osbuf));
- sc->scanType = scanType;
- sc->forceFgScan = forceFgScan;
- sc->isLegacy = isLegacy;
- sc->homeDwellTime = homeDwellTime;
- sc->forceScanInterval = forceScanInterval;
- sc->numChannels = numChan;
- if (numChan) {
- memcpy(sc->channelList, channelList, numChan * sizeof(u16));
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_START_SCAN_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_scanparams_cmd(struct wmi_t *wmip, u16 fg_start_sec,
- u16 fg_end_sec, u16 bg_sec,
- u16 minact_chdw_msec, u16 maxact_chdw_msec,
- u16 pas_chdw_msec,
- u8 shScanRatio, u8 scanCtrlFlags,
- u32 max_dfsch_act_time, u16 maxact_scan_per_ssid)
-{
- void *osbuf;
- WMI_SCAN_PARAMS_CMD *sc;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*sc));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*sc));
-
- sc = (WMI_SCAN_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(sc, sizeof(*sc));
- sc->fg_start_period = fg_start_sec;
- sc->fg_end_period = fg_end_sec;
- sc->bg_period = bg_sec;
- sc->minact_chdwell_time = minact_chdw_msec;
- sc->maxact_chdwell_time = maxact_chdw_msec;
- sc->pas_chdwell_time = pas_chdw_msec;
- sc->shortScanRatio = shScanRatio;
- sc->scanCtrlFlags = scanCtrlFlags;
- sc->max_dfsch_act_time = max_dfsch_act_time;
- sc->maxact_scan_per_ssid = maxact_scan_per_ssid;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_SCAN_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_bssfilter_cmd(struct wmi_t *wmip, u8 filter, u32 ieMask)
-{
- void *osbuf;
- WMI_BSS_FILTER_CMD *cmd;
-
- if (filter >= LAST_BSS_FILTER) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_BSS_FILTER_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->bssFilter = filter;
- cmd->ieMask = ieMask;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BSS_FILTER_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_probedSsid_cmd(struct wmi_t *wmip, u8 index, u8 flag,
- u8 ssidLength, u8 *ssid)
-{
- void *osbuf;
- WMI_PROBED_SSID_CMD *cmd;
-
- if (index > MAX_PROBED_SSID_INDEX) {
- return A_EINVAL;
- }
- if (ssidLength > sizeof(cmd->ssid)) {
- return A_EINVAL;
- }
- if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssidLength > 0)) {
- return A_EINVAL;
- }
- if ((flag & SPECIFIC_SSID_FLAG) && !ssidLength) {
- return A_EINVAL;
- }
-
- if (flag & SPECIFIC_SSID_FLAG) {
- is_probe_ssid = true;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_PROBED_SSID_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->entryIndex = index;
- cmd->flag = flag;
- cmd->ssidLength = ssidLength;
- memcpy(cmd->ssid, ssid, ssidLength);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_PROBED_SSID_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_listeninterval_cmd(struct wmi_t *wmip, u16 listenInterval, u16 listenBeacons)
-{
- void *osbuf;
- WMI_LISTEN_INT_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_LISTEN_INT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->listenInterval = listenInterval;
- cmd->numBeacons = listenBeacons;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_LISTEN_INT_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_bmisstime_cmd(struct wmi_t *wmip, u16 bmissTime, u16 bmissBeacons)
-{
- void *osbuf;
- WMI_BMISS_TIME_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_BMISS_TIME_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->bmissTime = bmissTime;
- cmd->numBeacons = bmissBeacons;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BMISS_TIME_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_associnfo_cmd(struct wmi_t *wmip, u8 ieType,
- u8 ieLen, u8 *ieInfo)
-{
- void *osbuf;
- WMI_SET_ASSOC_INFO_CMD *cmd;
- u16 cmdLen;
-
- cmdLen = sizeof(*cmd) + ieLen - 1;
- osbuf = A_NETBUF_ALLOC(cmdLen);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, cmdLen);
-
- cmd = (WMI_SET_ASSOC_INFO_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, cmdLen);
- cmd->ieType = ieType;
- cmd->bufferSize = ieLen;
- memcpy(cmd->assocInfo, ieInfo, ieLen);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_ASSOC_INFO_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_powermode_cmd(struct wmi_t *wmip, u8 powerMode)
-{
- void *osbuf;
- WMI_POWER_MODE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_POWER_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->powerMode = powerMode;
- wmip->wmi_powerMode = powerMode;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_POWER_MODE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_ibsspmcaps_cmd(struct wmi_t *wmip, u8 pmEnable, u8 ttl,
- u16 atim_windows, u16 timeout_value)
-{
- void *osbuf;
- WMI_IBSS_PM_CAPS_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_IBSS_PM_CAPS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->power_saving = pmEnable;
- cmd->ttl = ttl;
- cmd->atim_windows = atim_windows;
- cmd->timeout_value = timeout_value;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_IBSS_PM_CAPS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_apps_cmd(struct wmi_t *wmip, u8 psType, u32 idle_time,
- u32 ps_period, u8 sleep_period)
-{
- void *osbuf;
- WMI_AP_PS_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_AP_PS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->psType = psType;
- cmd->idle_time = idle_time;
- cmd->ps_period = ps_period;
- cmd->sleep_period = sleep_period;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_AP_PS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_pmparams_cmd(struct wmi_t *wmip, u16 idlePeriod,
- u16 psPollNum, u16 dtimPolicy,
- u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
- u16 ps_fail_event_policy)
-{
- void *osbuf;
- WMI_POWER_PARAMS_CMD *pm;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*pm));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*pm));
-
- pm = (WMI_POWER_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(pm, sizeof(*pm));
- pm->idle_period = idlePeriod;
- pm->pspoll_number = psPollNum;
- pm->dtim_policy = dtimPolicy;
- pm->tx_wakeup_policy = tx_wakeup_policy;
- pm->num_tx_to_wakeup = num_tx_to_wakeup;
- pm->ps_fail_event_policy = ps_fail_event_policy;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_POWER_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_disctimeout_cmd(struct wmi_t *wmip, u8 timeout)
-{
- void *osbuf;
- WMI_DISC_TIMEOUT_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_DISC_TIMEOUT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->disconnectTimeout = timeout;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_DISC_TIMEOUT_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_addKey_cmd(struct wmi_t *wmip, u8 keyIndex, CRYPTO_TYPE keyType,
- u8 keyUsage, u8 keyLength, u8 *keyRSC,
- u8 *keyMaterial, u8 key_op_ctrl, u8 *macAddr,
- WMI_SYNC_FLAG sync_flag)
-{
- void *osbuf;
- WMI_ADD_CIPHER_KEY_CMD *cmd;
-
- if ((keyIndex > WMI_MAX_KEY_INDEX) || (keyLength > WMI_MAX_KEY_LEN) ||
- (keyMaterial == NULL))
- {
- return A_EINVAL;
- }
-
- if ((WEP_CRYPT != keyType) && (NULL == keyRSC)) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_ADD_CIPHER_KEY_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->keyIndex = keyIndex;
- cmd->keyType = keyType;
- cmd->keyUsage = keyUsage;
- cmd->keyLength = keyLength;
- memcpy(cmd->key, keyMaterial, keyLength);
-#ifdef WAPI_ENABLE
- if (NULL != keyRSC && key_op_ctrl != KEY_OP_INIT_WAPIPN) {
-#else
- if (NULL != keyRSC) {
-#endif // WAPI_ENABLE
- memcpy(cmd->keyRSC, keyRSC, sizeof(cmd->keyRSC));
- }
- cmd->key_op_ctrl = key_op_ctrl;
-
- if(macAddr) {
- memcpy(cmd->key_macaddr,macAddr,IEEE80211_ADDR_LEN);
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_ADD_CIPHER_KEY_CMDID, sync_flag));
-}
-
-int
-wmi_add_krk_cmd(struct wmi_t *wmip, u8 *krk)
-{
- void *osbuf;
- WMI_ADD_KRK_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_ADD_KRK_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- memcpy(cmd->krk, krk, WMI_KRK_LEN);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_delete_krk_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_DELETE_KRK_CMDID);
-}
-
-int
-wmi_deleteKey_cmd(struct wmi_t *wmip, u8 keyIndex)
-{
- void *osbuf;
- WMI_DELETE_CIPHER_KEY_CMD *cmd;
-
- if (keyIndex > WMI_MAX_KEY_INDEX) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_DELETE_CIPHER_KEY_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->keyIndex = keyIndex;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_DELETE_CIPHER_KEY_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_setPmkid_cmd(struct wmi_t *wmip, u8 *bssid, u8 *pmkId,
- bool set)
-{
- void *osbuf;
- WMI_SET_PMKID_CMD *cmd;
-
- if (bssid == NULL) {
- return A_EINVAL;
- }
-
- if ((set == true) && (pmkId == NULL)) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_PMKID_CMD *)(A_NETBUF_DATA(osbuf));
- memcpy(cmd->bssid, bssid, sizeof(cmd->bssid));
- if (set == true) {
- memcpy(cmd->pmkid, pmkId, sizeof(cmd->pmkid));
- cmd->enable = PMKID_ENABLE;
- } else {
- A_MEMZERO(cmd->pmkid, sizeof(cmd->pmkid));
- cmd->enable = PMKID_DISABLE;
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_PMKID_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_tkip_countermeasures_cmd(struct wmi_t *wmip, bool en)
-{
- void *osbuf;
- WMI_SET_TKIP_COUNTERMEASURES_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_TKIP_COUNTERMEASURES_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->cm_en = (en == true)? WMI_TKIP_CM_ENABLE : WMI_TKIP_CM_DISABLE;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_TKIP_COUNTERMEASURES_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_akmp_params_cmd(struct wmi_t *wmip,
- WMI_SET_AKMP_PARAMS_CMD *akmpParams)
-{
- void *osbuf;
- WMI_SET_AKMP_PARAMS_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- cmd = (WMI_SET_AKMP_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->akmpInfo = akmpParams->akmpInfo;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_AKMP_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_pmkid_list_cmd(struct wmi_t *wmip,
- WMI_SET_PMKID_LIST_CMD *pmkInfo)
-{
- void *osbuf;
- WMI_SET_PMKID_LIST_CMD *cmd;
- u16 cmdLen;
- u8 i;
-
- cmdLen = sizeof(pmkInfo->numPMKID) +
- pmkInfo->numPMKID * sizeof(WMI_PMKID);
-
- osbuf = A_NETBUF_ALLOC(cmdLen);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, cmdLen);
- cmd = (WMI_SET_PMKID_LIST_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->numPMKID = pmkInfo->numPMKID;
-
- for (i = 0; i < cmd->numPMKID; i++) {
- memcpy(&cmd->pmkidList[i], &pmkInfo->pmkidList[i],
- WMI_PMKID_LEN);
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_PMKID_LIST_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_get_pmkid_list_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_PMKID_LIST_CMDID);
-}
-
-int
-wmi_dataSync_send(struct wmi_t *wmip, void *osbuf, HTC_ENDPOINT_ID eid)
-{
- WMI_DATA_HDR *dtHdr;
-
- A_ASSERT( eid != wmip->wmi_endpoint_id);
- A_ASSERT(osbuf != NULL);
-
- if (A_NETBUF_PUSH(osbuf, sizeof(WMI_DATA_HDR)) != 0) {
- return A_NO_MEMORY;
- }
-
- dtHdr = (WMI_DATA_HDR *)A_NETBUF_DATA(osbuf);
- dtHdr->info =
- (SYNC_MSGTYPE & WMI_DATA_HDR_MSG_TYPE_MASK) << WMI_DATA_HDR_MSG_TYPE_SHIFT;
-
- dtHdr->info3 = 0;
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter - eid %d\n", DBGARG, eid));
-
- return (A_WMI_CONTROL_TX(wmip->wmi_devt, osbuf, eid));
-}
-
-typedef struct _WMI_DATA_SYNC_BUFS {
- u8 trafficClass;
- void *osbuf;
-}WMI_DATA_SYNC_BUFS;
-
-static int
-wmi_sync_point(struct wmi_t *wmip)
-{
- void *cmd_osbuf;
- WMI_SYNC_CMD *cmd;
- WMI_DATA_SYNC_BUFS dataSyncBufs[WMM_NUM_AC];
- u8 i,numPriStreams=0;
- int status = 0;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- memset(dataSyncBufs,0,sizeof(dataSyncBufs));
-
- /* lock out while we walk through the priority list and assemble our local array */
- LOCK_WMI(wmip);
-
- for (i=0; i < WMM_NUM_AC ; i++) {
- if (wmip->wmi_fatPipeExists & (1 << i)) {
- numPriStreams++;
- dataSyncBufs[numPriStreams-1].trafficClass = i;
- }
- }
-
- UNLOCK_WMI(wmip);
-
- /* dataSyncBufs is now filled with entries (starting at index 0) containing valid streamIDs */
-
- do {
- /*
- * We allocate all network buffers needed so we will be able to
- * send all required frames.
- */
- cmd_osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (cmd_osbuf == NULL) {
- status = A_NO_MEMORY;
- break;
- }
-
- A_NETBUF_PUT(cmd_osbuf, sizeof(*cmd));
-
- cmd = (WMI_SYNC_CMD *)(A_NETBUF_DATA(cmd_osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- /* In the SYNC cmd sent on the control Ep, send a bitmap of the data
- * eps on which the Data Sync will be sent
- */
- cmd->dataSyncMap = wmip->wmi_fatPipeExists;
-
- for (i=0; i < numPriStreams ; i++) {
- dataSyncBufs[i].osbuf = A_NETBUF_ALLOC(0);
- if (dataSyncBufs[i].osbuf == NULL) {
- status = A_NO_MEMORY;
- break;
- }
- } //end for
-
- /* if Buffer allocation for any of the dataSync fails, then do not
- * send the Synchronize cmd on the control ep
- */
- if (status) {
- break;
- }
-
- /*
- * Send sync cmd followed by sync data messages on all endpoints being
- * used
- */
- status = wmi_cmd_send(wmip, cmd_osbuf, WMI_SYNCHRONIZE_CMDID,
- NO_SYNC_WMIFLAG);
-
- if (status) {
- break;
- }
- /* cmd buffer sent, we no longer own it */
- cmd_osbuf = NULL;
-
- for(i=0; i < numPriStreams; i++) {
- A_ASSERT(dataSyncBufs[i].osbuf != NULL);
- status = wmi_dataSync_send(wmip,
- dataSyncBufs[i].osbuf,
- A_WMI_Ac2EndpointID(wmip->wmi_devt,
- dataSyncBufs[i].
- trafficClass)
- );
-
- if (status) {
- break;
- }
- /* we don't own this buffer anymore, NULL it out of the array so it
- * won't get cleaned up */
- dataSyncBufs[i].osbuf = NULL;
- } //end for
-
- } while(false);
-
- /* free up any resources left over (possibly due to an error) */
-
- if (cmd_osbuf != NULL) {
- A_NETBUF_FREE(cmd_osbuf);
- }
-
- for (i = 0; i < numPriStreams; i++) {
- if (dataSyncBufs[i].osbuf != NULL) {
- A_NETBUF_FREE(dataSyncBufs[i].osbuf);
- }
- }
-
- return (status);
-}
-
-int
-wmi_create_pstream_cmd(struct wmi_t *wmip, WMI_CREATE_PSTREAM_CMD *params)
-{
- void *osbuf;
- WMI_CREATE_PSTREAM_CMD *cmd;
- u8 fatPipeExistsForAC=0;
- s32 minimalPHY = 0;
- s32 nominalPHY = 0;
-
- /* Validate all the parameters. */
- if( !((params->userPriority < 8) &&
- (params->userPriority <= 0x7) &&
- (convert_userPriority_to_trafficClass(params->userPriority) == params->trafficClass) &&
- (params->trafficDirection == UPLINK_TRAFFIC ||
- params->trafficDirection == DNLINK_TRAFFIC ||
- params->trafficDirection == BIDIR_TRAFFIC) &&
- (params->trafficType == TRAFFIC_TYPE_APERIODIC ||
- params->trafficType == TRAFFIC_TYPE_PERIODIC ) &&
- (params->voicePSCapability == DISABLE_FOR_THIS_AC ||
- params->voicePSCapability == ENABLE_FOR_THIS_AC ||
- params->voicePSCapability == ENABLE_FOR_ALL_AC) &&
- (params->tsid == WMI_IMPLICIT_PSTREAM || params->tsid <= WMI_MAX_THINSTREAM)) )
- {
- return A_EINVAL;
- }
-
- //
- // check nominal PHY rate is >= minimalPHY, so that DUT
- // can allow TSRS IE
- //
-
- // get the physical rate
- minimalPHY = ((params->minPhyRate / 1000)/1000); // unit of bps
-
- // check minimal phy < nominal phy rate
- //
- if (params->nominalPHY >= minimalPHY)
- {
- nominalPHY = (params->nominalPHY * 1000)/500; // unit of 500 kbps
- A_DPRINTF(DBG_WMI,
- (DBGFMT "TSRS IE Enabled::MinPhy %x->NominalPhy ===> %x\n", DBGARG,
- minimalPHY, nominalPHY));
-
- params->nominalPHY = nominalPHY;
- }
- else
- {
- params->nominalPHY = 0;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Sending create_pstream_cmd: ac=%d tsid:%d\n", DBGARG,
- params->trafficClass, params->tsid));
-
- cmd = (WMI_CREATE_PSTREAM_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- memcpy(cmd, params, sizeof(*cmd));
-
- /* this is an implicitly created Fat pipe */
- if ((u32)params->tsid == (u32)WMI_IMPLICIT_PSTREAM) {
- LOCK_WMI(wmip);
- fatPipeExistsForAC = (wmip->wmi_fatPipeExists & (1 << params->trafficClass));
- wmip->wmi_fatPipeExists |= (1<<params->trafficClass);
- UNLOCK_WMI(wmip);
- } else {
- /* this is an explicitly created thin stream within a fat pipe */
- LOCK_WMI(wmip);
- fatPipeExistsForAC = (wmip->wmi_fatPipeExists & (1 << params->trafficClass));
- wmip->wmi_streamExistsForAC[params->trafficClass] |= (1<<params->tsid);
- /* if a thinstream becomes active, the fat pipe automatically
- * becomes active
- */
- wmip->wmi_fatPipeExists |= (1<<params->trafficClass);
- UNLOCK_WMI(wmip);
- }
-
- /* Indicate activty change to driver layer only if this is the
- * first TSID to get created in this AC explicitly or an implicit
- * fat pipe is getting created.
- */
- if (!fatPipeExistsForAC) {
- A_WMI_STREAM_TX_ACTIVE(wmip->wmi_devt, params->trafficClass);
- }
-
- /* mike: should be SYNC_BEFORE_WMIFLAG */
- return (wmi_cmd_send(wmip, osbuf, WMI_CREATE_PSTREAM_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_delete_pstream_cmd(struct wmi_t *wmip, u8 trafficClass, u8 tsid)
-{
- void *osbuf;
- WMI_DELETE_PSTREAM_CMD *cmd;
- int status;
- u16 activeTsids=0;
-
- /* validate the parameters */
- if (trafficClass > 3) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Invalid trafficClass: %d\n", DBGARG, trafficClass));
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_DELETE_PSTREAM_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->trafficClass = trafficClass;
- cmd->tsid = tsid;
-
- LOCK_WMI(wmip);
- activeTsids = wmip->wmi_streamExistsForAC[trafficClass];
- UNLOCK_WMI(wmip);
-
- /* Check if the tsid was created & exists */
- if (!(activeTsids & (1<<tsid))) {
-
- A_NETBUF_FREE(osbuf);
- A_DPRINTF(DBG_WMI,
- (DBGFMT "TSID %d does'nt exist for trafficClass: %d\n", DBGARG, tsid, trafficClass));
- /* TODO: return a more appropriate err code */
- return A_ERROR;
- }
-
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Sending delete_pstream_cmd: trafficClass: %d tsid=%d\n", DBGARG, trafficClass, tsid));
-
- status = (wmi_cmd_send(wmip, osbuf, WMI_DELETE_PSTREAM_CMDID,
- SYNC_BEFORE_WMIFLAG));
-
- LOCK_WMI(wmip);
- wmip->wmi_streamExistsForAC[trafficClass] &= ~(1<<tsid);
- activeTsids = wmip->wmi_streamExistsForAC[trafficClass];
- UNLOCK_WMI(wmip);
-
-
- /* Indicate stream inactivity to driver layer only if all tsids
- * within this AC are deleted.
- */
- if(!activeTsids) {
- A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, trafficClass);
- wmip->wmi_fatPipeExists &= ~(1<<trafficClass);
- }
-
- return status;
-}
-
-int
-wmi_set_framerate_cmd(struct wmi_t *wmip, u8 bEnable, u8 type, u8 subType, u16 rateMask)
-{
- void *osbuf;
- WMI_FRAME_RATES_CMD *cmd;
- u8 frameType;
-
- A_DPRINTF(DBG_WMI,
- (DBGFMT " type %02X, subType %02X, rateMask %04x\n", DBGARG, type, subType, rateMask));
-
- if((type != IEEE80211_FRAME_TYPE_MGT && type != IEEE80211_FRAME_TYPE_CTL) ||
- (subType > 15)){
-
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_FRAME_RATES_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- frameType = (u8)((subType << 4) | type);
-
- cmd->bEnableMask = bEnable;
- cmd->frameType = frameType;
- cmd->frameRateMask = rateMask;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_FRAMERATES_CMDID, NO_SYNC_WMIFLAG));
-}
-
-/*
- * used to set the bit rate. rate is in Kbps. If rate == -1
- * then auto selection is used.
- */
-int
-wmi_set_bitrate_cmd(struct wmi_t *wmip, s32 dataRate, s32 mgmtRate, s32 ctlRate)
-{
- void *osbuf;
- WMI_BIT_RATE_CMD *cmd;
- s8 drix, mrix, crix, ret_val;
-
- if (dataRate != -1) {
- ret_val = wmi_validate_bitrate(wmip, dataRate, &drix);
- if(ret_val == A_EINVAL){
- return A_EINVAL;
- }
- } else {
- drix = -1;
- }
-
- if (mgmtRate != -1) {
- ret_val = wmi_validate_bitrate(wmip, mgmtRate, &mrix);
- if(ret_val == A_EINVAL){
- return A_EINVAL;
- }
- } else {
- mrix = -1;
- }
- if (ctlRate != -1) {
- ret_val = wmi_validate_bitrate(wmip, ctlRate, &crix);
- if(ret_val == A_EINVAL){
- return A_EINVAL;
- }
- } else {
- crix = -1;
- }
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_BIT_RATE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->rateIndex = drix;
- cmd->mgmtRateIndex = mrix;
- cmd->ctlRateIndex = crix;
-
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BITRATE_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_get_bitrate_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_BITRATE_CMDID);
-}
-
-/*
- * Returns true iff the given rate index is legal in the current PHY mode.
- */
-bool
-wmi_is_bitrate_index_valid(struct wmi_t *wmip, s32 rateIndex)
-{
- WMI_PHY_MODE phyMode = (WMI_PHY_MODE) wmip->wmi_phyMode;
- bool isValid = true;
- switch(phyMode) {
- case WMI_11A_MODE:
- if (wmip->wmi_ht_allowed[A_BAND_5GHZ]){
- if ((rateIndex < MODE_A_SUPPORT_RATE_START) || (rateIndex > MODE_GHT20_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- } else {
- if ((rateIndex < MODE_A_SUPPORT_RATE_START) || (rateIndex > MODE_A_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- }
- break;
-
- case WMI_11B_MODE:
- if ((rateIndex < MODE_B_SUPPORT_RATE_START) || (rateIndex > MODE_B_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- break;
-
- case WMI_11GONLY_MODE:
- if (wmip->wmi_ht_allowed[A_BAND_24GHZ]){
- if ((rateIndex < MODE_GONLY_SUPPORT_RATE_START) || (rateIndex > MODE_GHT20_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- } else {
- if ((rateIndex < MODE_GONLY_SUPPORT_RATE_START) || (rateIndex > MODE_GONLY_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- }
- break;
-
- case WMI_11G_MODE:
- case WMI_11AG_MODE:
- if (wmip->wmi_ht_allowed[A_BAND_24GHZ]){
- if ((rateIndex < MODE_G_SUPPORT_RATE_START) || (rateIndex > MODE_GHT20_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- } else {
- if ((rateIndex < MODE_G_SUPPORT_RATE_START) || (rateIndex > MODE_G_SUPPORT_RATE_STOP)) {
- isValid = false;
- }
- }
- break;
- default:
- A_ASSERT(false);
- break;
- }
-
- return isValid;
-}
-
-s8 wmi_validate_bitrate(struct wmi_t *wmip, s32 rate, s8 *rate_idx)
-{
- s8 i;
-
- for (i=0;;i++)
- {
- if (wmi_rateTable[(u32) i][0] == 0) {
- return A_EINVAL;
- }
- if (wmi_rateTable[(u32) i][0] == rate) {
- break;
- }
- }
-
- if(wmi_is_bitrate_index_valid(wmip, (s32) i) != true) {
- return A_EINVAL;
- }
-
- *rate_idx = i;
- return 0;
-}
-
-int
-wmi_set_fixrates_cmd(struct wmi_t *wmip, u32 fixRatesMask)
-{
- void *osbuf;
- WMI_FIX_RATES_CMD *cmd;
-#if 0
- s32 rateIndex;
-/* This check does not work for AR6003 as the HT modes are enabled only when
- * the STA is connected to a HT_BSS and is not based only on channel. It is
- * safe to skip this check however because rate control will only use rates
- * that are permitted by the valid rate mask and the fix rate mask. Meaning
- * the fix rate mask is not sufficient by itself to cause an invalid rate
- * to be used. */
- /* Make sure all rates in the mask are valid in the current PHY mode */
- for(rateIndex = 0; rateIndex < MAX_NUMBER_OF_SUPPORT_RATES; rateIndex++) {
- if((1 << rateIndex) & (u32)fixRatesMask) {
- if(wmi_is_bitrate_index_valid(wmip, rateIndex) != true) {
- A_DPRINTF(DBG_WMI, (DBGFMT "Set Fix Rates command failed: Given rate is illegal in current PHY mode\n", DBGARG));
- return A_EINVAL;
- }
- }
- }
-#endif
-
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_FIX_RATES_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->fixRateMask = fixRatesMask;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_FIXRATES_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_get_ratemask_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_FIXRATES_CMDID);
-}
-
-int
-wmi_get_channelList_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_CHANNEL_LIST_CMDID);
-}
-
-/*
- * used to generate a wmi sey channel Parameters cmd.
- * mode should always be specified and corresponds to the phy mode of the
- * wlan.
- * numChan should alway sbe specified. If zero indicates that all available
- * channels should be used.
- * channelList is an array of channel frequencies (in Mhz) which the radio
- * should limit its operation to. It should be NULL if numChan == 0. Size of
- * array should correspond to numChan entries.
- */
-int
-wmi_set_channelParams_cmd(struct wmi_t *wmip, u8 scanParam,
- WMI_PHY_MODE mode, s8 numChan,
- u16 *channelList)
-{
- void *osbuf;
- WMI_CHANNEL_PARAMS_CMD *cmd;
- s8 size;
-
- size = sizeof (*cmd);
-
- if (numChan) {
- if (numChan > WMI_MAX_CHANNELS) {
- return A_EINVAL;
- }
- size += sizeof(u16) * (numChan - 1);
- }
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_CHANNEL_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
-
- wmip->wmi_phyMode = mode;
- cmd->scanParam = scanParam;
- cmd->phyMode = mode;
- cmd->numChannels = numChan;
- memcpy(cmd->channelList, channelList, numChan * sizeof(u16));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_CHANNEL_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-void
-wmi_cache_configure_rssithreshold(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd)
-{
- SQ_THRESHOLD_PARAMS *sq_thresh =
- &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_RSSI];
- /*
- * Parse the command and store the threshold values here. The checks
- * for valid values can be put here
- */
- sq_thresh->weight = rssiCmd->weight;
- sq_thresh->polling_interval = rssiCmd->pollTime;
-
- sq_thresh->upper_threshold[0] = rssiCmd->thresholdAbove1_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->upper_threshold[1] = rssiCmd->thresholdAbove2_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->upper_threshold[2] = rssiCmd->thresholdAbove3_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->upper_threshold[3] = rssiCmd->thresholdAbove4_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->upper_threshold[4] = rssiCmd->thresholdAbove5_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->upper_threshold[5] = rssiCmd->thresholdAbove6_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->upper_threshold_valid_count = 6;
-
- /* List sorted in descending order */
- sq_thresh->lower_threshold[0] = rssiCmd->thresholdBelow6_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->lower_threshold[1] = rssiCmd->thresholdBelow5_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->lower_threshold[2] = rssiCmd->thresholdBelow4_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->lower_threshold[3] = rssiCmd->thresholdBelow3_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->lower_threshold[4] = rssiCmd->thresholdBelow2_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->lower_threshold[5] = rssiCmd->thresholdBelow1_Val - SIGNAL_QUALITY_NOISE_FLOOR;
- sq_thresh->lower_threshold_valid_count = 6;
-
- if (!rssi_event_value) {
- /*
- * Configuring the thresholds to their extremes allows the host to get an
- * event from the target which is used for the configuring the correct
- * thresholds
- */
- rssiCmd->thresholdAbove1_Val = sq_thresh->upper_threshold[0];
- rssiCmd->thresholdBelow1_Val = sq_thresh->lower_threshold[0];
- } else {
- /*
- * In case the user issues multiple times of rssi_threshold_setting,
- * we should not use the extreames anymore, the target does not expect that.
- */
- rssiCmd->thresholdAbove1_Val = ar6000_get_upper_threshold(rssi_event_value, sq_thresh,
- sq_thresh->upper_threshold_valid_count);
- rssiCmd->thresholdBelow1_Val = ar6000_get_lower_threshold(rssi_event_value, sq_thresh,
- sq_thresh->lower_threshold_valid_count);
-}
-}
-
-int
-wmi_set_rssi_threshold_params(struct wmi_t *wmip,
- WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd)
-{
-
- /* Check these values are in ascending order */
- if( rssiCmd->thresholdAbove6_Val <= rssiCmd->thresholdAbove5_Val ||
- rssiCmd->thresholdAbove5_Val <= rssiCmd->thresholdAbove4_Val ||
- rssiCmd->thresholdAbove4_Val <= rssiCmd->thresholdAbove3_Val ||
- rssiCmd->thresholdAbove3_Val <= rssiCmd->thresholdAbove2_Val ||
- rssiCmd->thresholdAbove2_Val <= rssiCmd->thresholdAbove1_Val ||
- rssiCmd->thresholdBelow6_Val <= rssiCmd->thresholdBelow5_Val ||
- rssiCmd->thresholdBelow5_Val <= rssiCmd->thresholdBelow4_Val ||
- rssiCmd->thresholdBelow4_Val <= rssiCmd->thresholdBelow3_Val ||
- rssiCmd->thresholdBelow3_Val <= rssiCmd->thresholdBelow2_Val ||
- rssiCmd->thresholdBelow2_Val <= rssiCmd->thresholdBelow1_Val)
- {
- return A_EINVAL;
- }
-
- wmi_cache_configure_rssithreshold(wmip, rssiCmd);
-
- return (wmi_send_rssi_threshold_params(wmip, rssiCmd));
-}
-
-int
-wmi_set_ip_cmd(struct wmi_t *wmip, WMI_SET_IP_CMD *ipCmd)
-{
- void *osbuf;
- WMI_SET_IP_CMD *cmd;
-
- /* Multicast address are not valid */
- if((*((u8 *)&ipCmd->ips[0]) >= 0xE0) ||
- (*((u8 *)&ipCmd->ips[1]) >= 0xE0)) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_SET_IP_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_SET_IP_CMD));
- cmd = (WMI_SET_IP_CMD *)(A_NETBUF_DATA(osbuf));
- memcpy(cmd, ipCmd, sizeof(WMI_SET_IP_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_IP_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_host_sleep_mode_cmd(struct wmi_t *wmip,
- WMI_SET_HOST_SLEEP_MODE_CMD *hostModeCmd)
-{
- void *osbuf;
- s8 size;
- WMI_SET_HOST_SLEEP_MODE_CMD *cmd;
- u16 activeTsids=0;
- u8 streamExists=0;
- u8 i;
-
- if( hostModeCmd->awake == hostModeCmd->asleep) {
- return A_EINVAL;
- }
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_SET_HOST_SLEEP_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, hostModeCmd, sizeof(WMI_SET_HOST_SLEEP_MODE_CMD));
-
- if(hostModeCmd->asleep) {
- /*
- * Relinquish credits from all implicitly created pstreams since when we
- * go to sleep. If user created explicit thinstreams exists with in a
- * fatpipe leave them intact for the user to delete
- */
- LOCK_WMI(wmip);
- streamExists = wmip->wmi_fatPipeExists;
- UNLOCK_WMI(wmip);
-
- for(i=0;i< WMM_NUM_AC;i++) {
- if (streamExists & (1<<i)) {
- LOCK_WMI(wmip);
- activeTsids = wmip->wmi_streamExistsForAC[i];
- UNLOCK_WMI(wmip);
- /* If there are no user created thin streams delete the fatpipe */
- if(!activeTsids) {
- streamExists &= ~(1<<i);
- /*Indicate inactivity to drv layer for this fatpipe(pstream)*/
- A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt,i);
- }
- }
- }
-
- /* Update the fatpipes that exists*/
- LOCK_WMI(wmip);
- wmip->wmi_fatPipeExists = streamExists;
- UNLOCK_WMI(wmip);
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_HOST_SLEEP_MODE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_wow_mode_cmd(struct wmi_t *wmip,
- WMI_SET_WOW_MODE_CMD *wowModeCmd)
-{
- void *osbuf;
- s8 size;
- WMI_SET_WOW_MODE_CMD *cmd;
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_SET_WOW_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, wowModeCmd, sizeof(WMI_SET_WOW_MODE_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_WOW_MODE_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_get_wow_list_cmd(struct wmi_t *wmip,
- WMI_GET_WOW_LIST_CMD *wowListCmd)
-{
- void *osbuf;
- s8 size;
- WMI_GET_WOW_LIST_CMD *cmd;
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_GET_WOW_LIST_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, wowListCmd, sizeof(WMI_GET_WOW_LIST_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_GET_WOW_LIST_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-static int
-wmi_get_wow_list_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_GET_WOW_LIST_REPLY *reply;
-
- if (len < sizeof(WMI_GET_WOW_LIST_REPLY)) {
- return A_EINVAL;
- }
- reply = (WMI_GET_WOW_LIST_REPLY *)datap;
-
- A_WMI_WOW_LIST_EVENT(wmip->wmi_devt, reply->num_filters,
- reply);
-
- return 0;
-}
-
-int wmi_add_wow_pattern_cmd(struct wmi_t *wmip,
- WMI_ADD_WOW_PATTERN_CMD *addWowCmd,
- u8 *pattern, u8 *mask,
- u8 pattern_size)
-{
- void *osbuf;
- s8 size;
- WMI_ADD_WOW_PATTERN_CMD *cmd;
- u8 *filter_mask = NULL;
-
- size = sizeof (*cmd);
-
- size += ((2 * addWowCmd->filter_size)* sizeof(u8));
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_ADD_WOW_PATTERN_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->filter_list_id = addWowCmd->filter_list_id;
- cmd->filter_offset = addWowCmd->filter_offset;
- cmd->filter_size = addWowCmd->filter_size;
-
- memcpy(cmd->filter, pattern, addWowCmd->filter_size);
-
- filter_mask = (u8 *)(cmd->filter + cmd->filter_size);
- memcpy(filter_mask, mask, addWowCmd->filter_size);
-
-
- return (wmi_cmd_send(wmip, osbuf, WMI_ADD_WOW_PATTERN_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_del_wow_pattern_cmd(struct wmi_t *wmip,
- WMI_DEL_WOW_PATTERN_CMD *delWowCmd)
-{
- void *osbuf;
- s8 size;
- WMI_DEL_WOW_PATTERN_CMD *cmd;
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_DEL_WOW_PATTERN_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, delWowCmd, sizeof(WMI_DEL_WOW_PATTERN_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_DEL_WOW_PATTERN_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-void
-wmi_cache_configure_snrthreshold(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd)
-{
- SQ_THRESHOLD_PARAMS *sq_thresh =
- &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_SNR];
- /*
- * Parse the command and store the threshold values here. The checks
- * for valid values can be put here
- */
- sq_thresh->weight = snrCmd->weight;
- sq_thresh->polling_interval = snrCmd->pollTime;
-
- sq_thresh->upper_threshold[0] = snrCmd->thresholdAbove1_Val;
- sq_thresh->upper_threshold[1] = snrCmd->thresholdAbove2_Val;
- sq_thresh->upper_threshold[2] = snrCmd->thresholdAbove3_Val;
- sq_thresh->upper_threshold[3] = snrCmd->thresholdAbove4_Val;
- sq_thresh->upper_threshold_valid_count = 4;
-
- /* List sorted in descending order */
- sq_thresh->lower_threshold[0] = snrCmd->thresholdBelow4_Val;
- sq_thresh->lower_threshold[1] = snrCmd->thresholdBelow3_Val;
- sq_thresh->lower_threshold[2] = snrCmd->thresholdBelow2_Val;
- sq_thresh->lower_threshold[3] = snrCmd->thresholdBelow1_Val;
- sq_thresh->lower_threshold_valid_count = 4;
-
- if (!snr_event_value) {
- /*
- * Configuring the thresholds to their extremes allows the host to get an
- * event from the target which is used for the configuring the correct
- * thresholds
- */
- snrCmd->thresholdAbove1_Val = (u8)sq_thresh->upper_threshold[0];
- snrCmd->thresholdBelow1_Val = (u8)sq_thresh->lower_threshold[0];
- } else {
- /*
- * In case the user issues multiple times of snr_threshold_setting,
- * we should not use the extreames anymore, the target does not expect that.
- */
- snrCmd->thresholdAbove1_Val = ar6000_get_upper_threshold(snr_event_value, sq_thresh,
- sq_thresh->upper_threshold_valid_count);
- snrCmd->thresholdBelow1_Val = ar6000_get_lower_threshold(snr_event_value, sq_thresh,
- sq_thresh->lower_threshold_valid_count);
- }
-
-}
-int
-wmi_set_snr_threshold_params(struct wmi_t *wmip,
- WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd)
-{
- if( snrCmd->thresholdAbove4_Val <= snrCmd->thresholdAbove3_Val ||
- snrCmd->thresholdAbove3_Val <= snrCmd->thresholdAbove2_Val ||
- snrCmd->thresholdAbove2_Val <= snrCmd->thresholdAbove1_Val ||
- snrCmd->thresholdBelow4_Val <= snrCmd->thresholdBelow3_Val ||
- snrCmd->thresholdBelow3_Val <= snrCmd->thresholdBelow2_Val ||
- snrCmd->thresholdBelow2_Val <= snrCmd->thresholdBelow1_Val)
- {
- return A_EINVAL;
- }
- wmi_cache_configure_snrthreshold(wmip, snrCmd);
- return (wmi_send_snr_threshold_params(wmip, snrCmd));
-}
-
-int
-wmi_clr_rssi_snr(struct wmi_t *wmip)
-{
- void *osbuf;
-
- osbuf = A_NETBUF_ALLOC(sizeof(int));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- return (wmi_cmd_send(wmip, osbuf, WMI_CLR_RSSI_SNR_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_lq_threshold_params(struct wmi_t *wmip,
- WMI_LQ_THRESHOLD_PARAMS_CMD *lqCmd)
-{
- void *osbuf;
- s8 size;
- WMI_LQ_THRESHOLD_PARAMS_CMD *cmd;
- /* These values are in ascending order */
- if( lqCmd->thresholdAbove4_Val <= lqCmd->thresholdAbove3_Val ||
- lqCmd->thresholdAbove3_Val <= lqCmd->thresholdAbove2_Val ||
- lqCmd->thresholdAbove2_Val <= lqCmd->thresholdAbove1_Val ||
- lqCmd->thresholdBelow4_Val <= lqCmd->thresholdBelow3_Val ||
- lqCmd->thresholdBelow3_Val <= lqCmd->thresholdBelow2_Val ||
- lqCmd->thresholdBelow2_Val <= lqCmd->thresholdBelow1_Val ) {
-
- return A_EINVAL;
- }
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_LQ_THRESHOLD_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, lqCmd, sizeof(WMI_LQ_THRESHOLD_PARAMS_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_LQ_THRESHOLD_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_error_report_bitmask(struct wmi_t *wmip, u32 mask)
-{
- void *osbuf;
- s8 size;
- WMI_TARGET_ERROR_REPORT_BITMASK *cmd;
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_TARGET_ERROR_REPORT_BITMASK *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
-
- cmd->bitmask = mask;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_get_challenge_resp_cmd(struct wmi_t *wmip, u32 cookie, u32 source)
-{
- void *osbuf;
- WMIX_HB_CHALLENGE_RESP_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMIX_HB_CHALLENGE_RESP_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->cookie = cookie;
- cmd->source = source;
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_HB_CHALLENGE_RESP_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_config_debug_module_cmd(struct wmi_t *wmip, u16 mmask,
- u16 tsr, bool rep, u16 size,
- u32 valid)
-{
- void *osbuf;
- WMIX_DBGLOG_CFG_MODULE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMIX_DBGLOG_CFG_MODULE_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->config.cfgmmask = mmask;
- cmd->config.cfgtsr = tsr;
- cmd->config.cfgrep = rep;
- cmd->config.cfgsize = size;
- cmd->config.cfgvalid = valid;
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_DBGLOG_CFG_MODULE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_get_stats_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_STATISTICS_CMDID);
-}
-
-int
-wmi_addBadAp_cmd(struct wmi_t *wmip, u8 apIndex, u8 *bssid)
-{
- void *osbuf;
- WMI_ADD_BAD_AP_CMD *cmd;
-
- if ((bssid == NULL) || (apIndex > WMI_MAX_BAD_AP_INDEX)) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_ADD_BAD_AP_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->badApIndex = apIndex;
- memcpy(cmd->bssid, bssid, sizeof(cmd->bssid));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_ADD_BAD_AP_CMDID, SYNC_BEFORE_WMIFLAG));
-}
-
-int
-wmi_deleteBadAp_cmd(struct wmi_t *wmip, u8 apIndex)
-{
- void *osbuf;
- WMI_DELETE_BAD_AP_CMD *cmd;
-
- if (apIndex > WMI_MAX_BAD_AP_INDEX) {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_DELETE_BAD_AP_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->badApIndex = apIndex;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_DELETE_BAD_AP_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_abort_scan_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_ABORT_SCAN_CMDID);
-}
-
-int
-wmi_set_txPwr_cmd(struct wmi_t *wmip, u8 dbM)
-{
- void *osbuf;
- WMI_SET_TX_PWR_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_TX_PWR_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->dbM = dbM;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_TX_PWR_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_get_txPwr_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_TX_PWR_CMDID);
-}
-
-u16 wmi_get_mapped_qos_queue(struct wmi_t *wmip, u8 trafficClass)
-{
- u16 activeTsids=0;
-
- LOCK_WMI(wmip);
- activeTsids = wmip->wmi_streamExistsForAC[trafficClass];
- UNLOCK_WMI(wmip);
-
- return activeTsids;
-}
-
-int
-wmi_get_roam_tbl_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd(wmip, WMI_GET_ROAM_TBL_CMDID);
-}
-
-int
-wmi_get_roam_data_cmd(struct wmi_t *wmip, u8 roamDataType)
-{
- void *osbuf;
- u32 size = sizeof(u8);
- WMI_TARGET_ROAM_DATA *cmd;
-
- osbuf = A_NETBUF_ALLOC(size); /* no payload */
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_TARGET_ROAM_DATA *)(A_NETBUF_DATA(osbuf));
- cmd->roamDataType = roamDataType;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_GET_ROAM_DATA_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_roam_ctrl_cmd(struct wmi_t *wmip, WMI_SET_ROAM_CTRL_CMD *p,
- u8 size)
-{
- void *osbuf;
- WMI_SET_ROAM_CTRL_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_SET_ROAM_CTRL_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
-
- memcpy(cmd, p, size);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_ROAM_CTRL_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_powersave_timers_cmd(struct wmi_t *wmip,
- WMI_POWERSAVE_TIMERS_POLICY_CMD *pCmd,
- u8 size)
-{
- void *osbuf;
- WMI_POWERSAVE_TIMERS_POLICY_CMD *cmd;
-
- /* These timers can't be zero */
- if(!pCmd->psPollTimeout || !pCmd->triggerTimeout ||
- !(pCmd->apsdTimPolicy == IGNORE_TIM_ALL_QUEUES_APSD ||
- pCmd->apsdTimPolicy == PROCESS_TIM_ALL_QUEUES_APSD) ||
- !(pCmd->simulatedAPSDTimPolicy == IGNORE_TIM_SIMULATED_APSD ||
- pCmd->simulatedAPSDTimPolicy == PROCESS_TIM_SIMULATED_APSD))
- return A_EINVAL;
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_POWERSAVE_TIMERS_POLICY_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
-
- memcpy(cmd, pCmd, size);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_access_params_cmd(struct wmi_t *wmip, u8 ac, u16 txop, u8 eCWmin,
- u8 eCWmax, u8 aifsn)
-{
- void *osbuf;
- WMI_SET_ACCESS_PARAMS_CMD *cmd;
-
- if ((eCWmin > WMI_MAX_CW_ACPARAM) || (eCWmax > WMI_MAX_CW_ACPARAM) ||
- (aifsn > WMI_MAX_AIFSN_ACPARAM) || (ac >= WMM_NUM_AC))
- {
- return A_EINVAL;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_ACCESS_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->txop = txop;
- cmd->eCWmin = eCWmin;
- cmd->eCWmax = eCWmax;
- cmd->aifsn = aifsn;
- cmd->ac = ac;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_ACCESS_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_retry_limits_cmd(struct wmi_t *wmip, u8 frameType,
- u8 trafficClass, u8 maxRetries,
- u8 enableNotify)
-{
- void *osbuf;
- WMI_SET_RETRY_LIMITS_CMD *cmd;
-
- if ((frameType != MGMT_FRAMETYPE) && (frameType != CONTROL_FRAMETYPE) &&
- (frameType != DATA_FRAMETYPE))
- {
- return A_EINVAL;
- }
-
- if (maxRetries > WMI_MAX_RETRIES) {
- return A_EINVAL;
- }
-
- if (frameType != DATA_FRAMETYPE) {
- trafficClass = 0;
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_RETRY_LIMITS_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->frameType = frameType;
- cmd->trafficClass = trafficClass;
- cmd->maxRetries = maxRetries;
- cmd->enableNotify = enableNotify;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_RETRY_LIMITS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-void
-wmi_get_current_bssid(struct wmi_t *wmip, u8 *bssid)
-{
- if (bssid != NULL) {
- memcpy(bssid, wmip->wmi_bssid, ATH_MAC_LEN);
- }
-}
-
-int
-wmi_set_opt_mode_cmd(struct wmi_t *wmip, u8 optMode)
-{
- void *osbuf;
- WMI_SET_OPT_MODE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_OPT_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->optMode = optMode;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_OPT_MODE_CMDID,
- SYNC_BOTH_WMIFLAG));
-}
-
-int
-wmi_opt_tx_frame_cmd(struct wmi_t *wmip,
- u8 frmType,
- u8 *dstMacAddr,
- u8 *bssid,
- u16 optIEDataLen,
- u8 *optIEData)
-{
- void *osbuf;
- WMI_OPT_TX_FRAME_CMD *cmd;
- osbuf = A_NETBUF_ALLOC(optIEDataLen + sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, (optIEDataLen + sizeof(*cmd)));
-
- cmd = (WMI_OPT_TX_FRAME_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, (optIEDataLen + sizeof(*cmd)-1));
-
- cmd->frmType = frmType;
- cmd->optIEDataLen = optIEDataLen;
- //cmd->optIEData = (u8 *)((int)cmd + sizeof(*cmd));
- memcpy(cmd->bssid, bssid, sizeof(cmd->bssid));
- memcpy(cmd->dstAddr, dstMacAddr, sizeof(cmd->dstAddr));
- memcpy(&cmd->optIEData[0], optIEData, optIEDataLen);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_OPT_TX_FRAME_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_adhoc_bconIntvl_cmd(struct wmi_t *wmip, u16 intvl)
-{
- void *osbuf;
- WMI_BEACON_INT_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_BEACON_INT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->beaconInterval = intvl;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BEACON_INT_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_set_voice_pkt_size_cmd(struct wmi_t *wmip, u16 voicePktSize)
-{
- void *osbuf;
- WMI_SET_VOICE_PKT_SIZE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_VOICE_PKT_SIZE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->voicePktSize = voicePktSize;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_VOICE_PKT_SIZE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_set_max_sp_len_cmd(struct wmi_t *wmip, u8 maxSPLen)
-{
- void *osbuf;
- WMI_SET_MAX_SP_LEN_CMD *cmd;
-
- /* maxSPLen is a two-bit value. If user trys to set anything
- * other than this, then its invalid
- */
- if(maxSPLen & ~0x03)
- return A_EINVAL;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_MAX_SP_LEN_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->maxSPLen = maxSPLen;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_MAX_SP_LEN_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-u8 wmi_determine_userPriority(
- u8 *pkt,
- u32 layer2Pri)
-{
- u8 ipPri;
- iphdr *ipHdr = (iphdr *)pkt;
-
- /* Determine IPTOS priority */
- /*
- * IP Tos format :
- * (Refer Pg 57 WMM-test-plan-v1.2)
- * IP-TOS - 8bits
- * : DSCP(6-bits) ECN(2-bits)
- * : DSCP - P2 P1 P0 X X X
- * where (P2 P1 P0) form 802.1D
- */
- ipPri = ipHdr->ip_tos >> 5;
- ipPri &= 0x7;
-
- if ((layer2Pri & 0x7) > ipPri)
- return ((u8)layer2Pri & 0x7);
- else
- return ipPri;
-}
-
-u8 convert_userPriority_to_trafficClass(u8 userPriority)
-{
- return (up_to_ac[userPriority & 0x7]);
-}
-
-u8 wmi_get_power_mode_cmd(struct wmi_t *wmip)
-{
- return wmip->wmi_powerMode;
-}
-
-int
-wmi_verify_tspec_params(WMI_CREATE_PSTREAM_CMD *pCmd, int tspecCompliance)
-{
- int ret = 0;
-
-#define TSPEC_SUSPENSION_INTERVAL_ATHEROS_DEF (~0)
-#define TSPEC_SERVICE_START_TIME_ATHEROS_DEF 0
-#define TSPEC_MAX_BURST_SIZE_ATHEROS_DEF 0
-#define TSPEC_DELAY_BOUND_ATHEROS_DEF 0
-#define TSPEC_MEDIUM_TIME_ATHEROS_DEF 0
-#define TSPEC_SBA_ATHEROS_DEF 0x2000 /* factor is 1 */
-
- /* Verify TSPEC params for ATHEROS compliance */
- if(tspecCompliance == ATHEROS_COMPLIANCE) {
- if ((pCmd->suspensionInt != TSPEC_SUSPENSION_INTERVAL_ATHEROS_DEF) ||
- (pCmd->serviceStartTime != TSPEC_SERVICE_START_TIME_ATHEROS_DEF) ||
- (pCmd->minDataRate != pCmd->meanDataRate) ||
- (pCmd->minDataRate != pCmd->peakDataRate) ||
- (pCmd->maxBurstSize != TSPEC_MAX_BURST_SIZE_ATHEROS_DEF) ||
- (pCmd->delayBound != TSPEC_DELAY_BOUND_ATHEROS_DEF) ||
- (pCmd->sba != TSPEC_SBA_ATHEROS_DEF) ||
- (pCmd->mediumTime != TSPEC_MEDIUM_TIME_ATHEROS_DEF)) {
-
- A_DPRINTF(DBG_WMI, (DBGFMT "Invalid TSPEC params\n", DBGARG));
- //A_PRINTF("%s: Invalid TSPEC params\n", __func__);
- ret = A_EINVAL;
- }
- }
-
- return ret;
-}
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-static int
-wmi_tcmd_test_report_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- ar6000_testmode_rx_report_event(wmip->wmi_devt, datap, len);
-
- return 0;
-}
-
-#endif /* CONFIG_HOST_TCMD_SUPPORT*/
-
-int
-wmi_set_authmode_cmd(struct wmi_t *wmip, u8 mode)
-{
- void *osbuf;
- WMI_SET_AUTH_MODE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_AUTH_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->mode = mode;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_AUTH_MODE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_reassocmode_cmd(struct wmi_t *wmip, u8 mode)
-{
- void *osbuf;
- WMI_SET_REASSOC_MODE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_REASSOC_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->mode = mode;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_REASSOC_MODE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_lpreamble_cmd(struct wmi_t *wmip, u8 status, u8 preamblePolicy)
-{
- void *osbuf;
- WMI_SET_LPREAMBLE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_LPREAMBLE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->status = status;
- cmd->preamblePolicy = preamblePolicy;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_LPREAMBLE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_rts_cmd(struct wmi_t *wmip, u16 threshold)
-{
- void *osbuf;
- WMI_SET_RTS_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_RTS_CMD*)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->threshold = threshold;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_RTS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_wmm_cmd(struct wmi_t *wmip, WMI_WMM_STATUS status)
-{
- void *osbuf;
- WMI_SET_WMM_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_WMM_CMD*)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->status = status;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_WMM_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_qos_supp_cmd(struct wmi_t *wmip, u8 status)
-{
- void *osbuf;
- WMI_SET_QOS_SUPP_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_QOS_SUPP_CMD*)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->status = status;
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_QOS_SUPP_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_set_wmm_txop(struct wmi_t *wmip, WMI_TXOP_CFG cfg)
-{
- void *osbuf;
- WMI_SET_WMM_TXOP_CMD *cmd;
-
- if( !((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)) )
- return A_EINVAL;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_WMM_TXOP_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->txopEnable = cfg;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_WMM_TXOP_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_country(struct wmi_t *wmip, u8 *countryCode)
-{
- void *osbuf;
- WMI_AP_SET_COUNTRY_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_AP_SET_COUNTRY_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- memcpy(cmd->countryCode,countryCode,3);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_COUNTRY_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-#ifdef CONFIG_HOST_TCMD_SUPPORT
-/* WMI layer doesn't need to know the data type of the test cmd.
- This would be beneficial for customers like Qualcomm, who might
- have different test command requirements from different manufacturers
- */
-int
-wmi_test_cmd(struct wmi_t *wmip, u8 *buf, u32 len)
-{
- void *osbuf;
- char *data;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- osbuf= A_NETBUF_ALLOC(len);
- if(osbuf == NULL)
- {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, len);
- data = A_NETBUF_DATA(osbuf);
- memcpy(data, buf, len);
-
- return(wmi_cmd_send(wmip, osbuf, WMI_TEST_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-#endif
-
-int
-wmi_set_bt_status_cmd(struct wmi_t *wmip, u8 streamType, u8 status)
-{
- void *osbuf;
- WMI_SET_BT_STATUS_CMD *cmd;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("Enter - streamType=%d, status=%d\n", streamType, status));
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_BT_STATUS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->streamType = streamType;
- cmd->status = status;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BT_STATUS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_bt_params_cmd(struct wmi_t *wmip, WMI_SET_BT_PARAMS_CMD* cmd)
-{
- void *osbuf;
- WMI_SET_BT_PARAMS_CMD* alloc_cmd;
-
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("cmd params is %d\n", cmd->paramType));
-
- if (cmd->paramType == BT_PARAM_SCO) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("sco params %d %d %d %d %d %d %d %d %d %d %d %d\n", cmd->info.scoParams.numScoCyclesForceTrigger,
- cmd->info.scoParams.dataResponseTimeout,
- cmd->info.scoParams.stompScoRules,
- cmd->info.scoParams.scoOptFlags,
- cmd->info.scoParams.stompDutyCyleVal,
- cmd->info.scoParams.stompDutyCyleMaxVal,
- cmd->info.scoParams.psPollLatencyFraction,
- cmd->info.scoParams.noSCOSlots,
- cmd->info.scoParams.noIdleSlots,
- cmd->info.scoParams.scoOptOffRssi,
- cmd->info.scoParams.scoOptOnRssi,
- cmd->info.scoParams.scoOptRtsCount));
- }
- else if (cmd->paramType == BT_PARAM_A2DP) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("A2DP params %d %d %d %d %d %d %d %d\n", cmd->info.a2dpParams.a2dpWlanUsageLimit,
- cmd->info.a2dpParams.a2dpBurstCntMin,
- cmd->info.a2dpParams.a2dpDataRespTimeout,
- cmd->info.a2dpParams.a2dpOptFlags,
- cmd->info.a2dpParams.isCoLocatedBtRoleMaster,
- cmd->info.a2dpParams.a2dpOptOffRssi,
- cmd->info.a2dpParams.a2dpOptOnRssi,
- cmd->info.a2dpParams.a2dpOptRtsCount));
- }
- else if (cmd->paramType == BT_PARAM_ANTENNA_CONFIG) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("Ant config %d\n", cmd->info.antType));
- }
- else if (cmd->paramType == BT_PARAM_COLOCATED_BT_DEVICE) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("co-located BT %d\n", cmd->info.coLocatedBtDev));
- }
- else if (cmd->paramType == BT_PARAM_ACLCOEX) {
- AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("ACL params %d %d %d\n", cmd->info.aclCoexParams.aclWlanMediumUsageTime,
- cmd->info.aclCoexParams.aclBtMediumUsageTime,
- cmd->info.aclCoexParams.aclDataRespTimeout));
- }
- else if (cmd->paramType == BT_PARAM_11A_SEPARATE_ANT) {
- A_DPRINTF(DBG_WMI, (DBGFMT "11A ant\n", DBGARG));
- }
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- alloc_cmd = (WMI_SET_BT_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd, cmd, sizeof(*cmd));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BT_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_btcoex_fe_ant_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_FE_ANT_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_FE_ANT_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_FE_ANT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_FE_ANT_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_FE_ANT_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-
-int
-wmi_set_btcoex_colocated_bt_dev_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD));
- A_PRINTF("colocated bt = %d\n", alloc_cmd->btcoexCoLocatedBTdev);
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_btcoex_btinquiry_page_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD* cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_btcoex_sco_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_SCO_CONFIG_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_SCO_CONFIG_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_SCO_CONFIG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_SCO_CONFIG_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_SCO_CONFIG_CMDID ,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_btcoex_a2dp_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_A2DP_CONFIG_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_A2DP_CONFIG_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_A2DP_CONFIG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_A2DP_CONFIG_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_A2DP_CONFIG_CMDID ,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_btcoex_aclcoex_config_cmd(struct wmi_t *wmip,
- WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID ,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_btcoex_debug_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_DEBUG_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_DEBUG_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_DEBUG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_DEBUG_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_DEBUG_CMDID ,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_set_btcoex_bt_operating_status_cmd(struct wmi_t * wmip,
- WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD * cmd)
-{
- void *osbuf;
- WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID ,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_get_btcoex_config_cmd(struct wmi_t * wmip, WMI_GET_BTCOEX_CONFIG_CMD * cmd)
-{
- void *osbuf;
- WMI_GET_BTCOEX_CONFIG_CMD *alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- alloc_cmd = (WMI_GET_BTCOEX_CONFIG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd,cmd,sizeof(WMI_GET_BTCOEX_CONFIG_CMD));
- return (wmi_cmd_send(wmip, osbuf, WMI_GET_BTCOEX_CONFIG_CMDID ,
- NO_SYNC_WMIFLAG));
-
-}
-
-int
-wmi_get_btcoex_stats_cmd(struct wmi_t *wmip)
-{
-
- return wmi_simple_cmd(wmip, WMI_GET_BTCOEX_STATS_CMDID);
-
-}
-
-int
-wmi_get_keepalive_configured(struct wmi_t *wmip)
-{
- void *osbuf;
- WMI_GET_KEEPALIVE_CMD *cmd;
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
- cmd = (WMI_GET_KEEPALIVE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- return (wmi_cmd_send(wmip, osbuf, WMI_GET_KEEPALIVE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-u8 wmi_get_keepalive_cmd(struct wmi_t *wmip)
-{
- return wmip->wmi_keepaliveInterval;
-}
-
-int
-wmi_set_keepalive_cmd(struct wmi_t *wmip, u8 keepaliveInterval)
-{
- void *osbuf;
- WMI_SET_KEEPALIVE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_KEEPALIVE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->keepaliveInterval = keepaliveInterval;
- wmip->wmi_keepaliveInterval = keepaliveInterval;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_KEEPALIVE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_params_cmd(struct wmi_t *wmip, u32 opcode, u32 length, char *buffer)
-{
- void *osbuf;
- WMI_SET_PARAMS_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd) + length);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd) + length);
-
- cmd = (WMI_SET_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->opcode = opcode;
- cmd->length = length;
- memcpy(cmd->buffer, buffer, length);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_set_mcast_filter_cmd(struct wmi_t *wmip, u8 dot1, u8 dot2, u8 dot3, u8 dot4)
-{
- void *osbuf;
- WMI_SET_MCAST_FILTER_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_MCAST_FILTER_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->multicast_mac[0] = 0x01;
- cmd->multicast_mac[1] = 0x00;
- cmd->multicast_mac[2] = 0x5e;
- cmd->multicast_mac[3] = dot2&0x7F;
- cmd->multicast_mac[4] = dot3;
- cmd->multicast_mac[5] = dot4;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_MCAST_FILTER_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_del_mcast_filter_cmd(struct wmi_t *wmip, u8 dot1, u8 dot2, u8 dot3, u8 dot4)
-{
- void *osbuf;
- WMI_SET_MCAST_FILTER_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_MCAST_FILTER_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->multicast_mac[0] = 0x01;
- cmd->multicast_mac[1] = 0x00;
- cmd->multicast_mac[2] = 0x5e;
- cmd->multicast_mac[3] = dot2&0x7F;
- cmd->multicast_mac[4] = dot3;
- cmd->multicast_mac[5] = dot4;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_DEL_MCAST_FILTER_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_mcast_filter_cmd(struct wmi_t *wmip, u8 enable)
-{
- void *osbuf;
- WMI_MCAST_FILTER_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_MCAST_FILTER_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->enable = enable;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_MCAST_FILTER_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_appie_cmd(struct wmi_t *wmip, u8 mgmtFrmType, u8 ieLen,
- u8 *ieInfo)
-{
- void *osbuf;
- WMI_SET_APPIE_CMD *cmd;
- u16 cmdLen;
-
- cmdLen = sizeof(*cmd) + ieLen - 1;
- osbuf = A_NETBUF_ALLOC(cmdLen);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, cmdLen);
-
- cmd = (WMI_SET_APPIE_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, cmdLen);
-
- cmd->mgmtFrmType = mgmtFrmType;
- cmd->ieLen = ieLen;
- memcpy(cmd->ieInfo, ieInfo, ieLen);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_APPIE_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_halparam_cmd(struct wmi_t *wmip, u8 *cmd, u16 dataLen)
-{
- void *osbuf;
- u8 *data;
-
- osbuf = A_NETBUF_ALLOC(dataLen);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, dataLen);
-
- data = A_NETBUF_DATA(osbuf);
-
- memcpy(data, cmd, dataLen);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_WHALPARAM_CMDID, NO_SYNC_WMIFLAG));
-}
-
-s32 wmi_get_rate(s8 rateindex)
-{
- if (rateindex == RATE_AUTO) {
- return 0;
- } else {
- return(wmi_rateTable[(u32) rateindex][0]);
- }
-}
-
-void
-wmi_node_return (struct wmi_t *wmip, bss_t *bss)
-{
- if (NULL != bss)
- {
- wlan_node_return (&wmip->wmi_scan_table, bss);
- }
-}
-
-void
-wmi_set_nodeage(struct wmi_t *wmip, u32 nodeAge)
-{
- wlan_set_nodeage(&wmip->wmi_scan_table,nodeAge);
-}
-
-bss_t *
-wmi_find_Ssidnode (struct wmi_t *wmip, u8 *pSsid,
- u32 ssidLength, bool bIsWPA2, bool bMatchSSID)
-{
- bss_t *node = NULL;
- node = wlan_find_Ssidnode (&wmip->wmi_scan_table, pSsid,
- ssidLength, bIsWPA2, bMatchSSID);
- return node;
-}
-
-
-#ifdef THREAD_X
-void
-wmi_refresh_scan_table (struct wmi_t *wmip)
-{
- wlan_refresh_inactive_nodes (&wmip->wmi_scan_table);
-}
-#endif
-
-void
-wmi_free_allnodes(struct wmi_t *wmip)
-{
- wlan_free_allnodes(&wmip->wmi_scan_table);
-}
-
-bss_t *
-wmi_find_node(struct wmi_t *wmip, const u8 *macaddr)
-{
- bss_t *ni=NULL;
- ni=wlan_find_node(&wmip->wmi_scan_table,macaddr);
- return ni;
-}
-
-void
-wmi_free_node(struct wmi_t *wmip, const u8 *macaddr)
-{
- bss_t *ni=NULL;
-
- ni=wlan_find_node(&wmip->wmi_scan_table,macaddr);
- if (ni != NULL) {
- wlan_node_reclaim(&wmip->wmi_scan_table, ni);
- }
-
- return;
-}
-
-int
-wmi_dset_open_reply(struct wmi_t *wmip,
- u32 status,
- u32 access_cookie,
- u32 dset_size,
- u32 dset_version,
- u32 targ_handle,
- u32 targ_reply_fn,
- u32 targ_reply_arg)
-{
- void *osbuf;
- WMIX_DSETOPEN_REPLY_CMD *open_reply;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter - wmip=0x%lx\n", DBGARG, (unsigned long)wmip));
-
- osbuf = A_NETBUF_ALLOC(sizeof(*open_reply));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*open_reply));
- open_reply = (WMIX_DSETOPEN_REPLY_CMD *)(A_NETBUF_DATA(osbuf));
-
- open_reply->status = status;
- open_reply->targ_dset_handle = targ_handle;
- open_reply->targ_reply_fn = targ_reply_fn;
- open_reply->targ_reply_arg = targ_reply_arg;
- open_reply->access_cookie = access_cookie;
- open_reply->size = dset_size;
- open_reply->version = dset_version;
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_DSETOPEN_REPLY_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-static int
-wmi_get_pmkid_list_event_rx(struct wmi_t *wmip, u8 *datap, u32 len)
-{
- WMI_PMKID_LIST_REPLY *reply;
- u32 expected_len;
-
- if (len < sizeof(WMI_PMKID_LIST_REPLY)) {
- return A_EINVAL;
- }
- reply = (WMI_PMKID_LIST_REPLY *)datap;
- expected_len = sizeof(reply->numPMKID) + reply->numPMKID * WMI_PMKID_LEN;
-
- if (len < expected_len) {
- return A_EINVAL;
- }
-
- A_WMI_PMKID_LIST_EVENT(wmip->wmi_devt, reply->numPMKID,
- reply->pmkidList, reply->bssidList[0]);
-
- return 0;
-}
-
-
-static int
-wmi_set_params_event_rx(struct wmi_t *wmip, u8 *datap, u32 len)
-{
- WMI_SET_PARAMS_REPLY *reply;
-
- if (len < sizeof(WMI_SET_PARAMS_REPLY)) {
- return A_EINVAL;
- }
- reply = (WMI_SET_PARAMS_REPLY *)datap;
-
- if (0 == reply->status)
- {
-
- }
- else
- {
-
- }
-
- return 0;
-}
-
-
-#ifdef CONFIG_HOST_DSET_SUPPORT
-int
-wmi_dset_data_reply(struct wmi_t *wmip,
- u32 status,
- u8 *user_buf,
- u32 length,
- u32 targ_buf,
- u32 targ_reply_fn,
- u32 targ_reply_arg)
-{
- void *osbuf;
- WMIX_DSETDATA_REPLY_CMD *data_reply;
- u32 size;
-
- size = sizeof(*data_reply) + length;
-
- if (size <= length) {
- return A_ERROR;
- }
-
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Enter - length=%d status=%d\n", DBGARG, length, status));
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
- A_NETBUF_PUT(osbuf, size);
- data_reply = (WMIX_DSETDATA_REPLY_CMD *)(A_NETBUF_DATA(osbuf));
-
- data_reply->status = status;
- data_reply->targ_buf = targ_buf;
- data_reply->targ_reply_fn = targ_reply_fn;
- data_reply->targ_reply_arg = targ_reply_arg;
- data_reply->length = length;
-
- if (status == 0) {
- if (a_copy_from_user(data_reply->buf, user_buf, length)) {
- A_NETBUF_FREE(osbuf);
- return A_ERROR;
- }
- }
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_DSETDATA_REPLY_CMDID,
- NO_SYNC_WMIFLAG));
-}
-#endif /* CONFIG_HOST_DSET_SUPPORT */
-
-int
-wmi_set_wsc_status_cmd(struct wmi_t *wmip, u32 status)
-{
- void *osbuf;
- char *cmd;
-
- wps_enable = status;
-
- osbuf = a_netbuf_alloc(sizeof(1));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- a_netbuf_put(osbuf, sizeof(1));
-
- cmd = (char *)(a_netbuf_to_data(osbuf));
-
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd[0] = (status?1:0);
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_WSC_STATUS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-#if defined(CONFIG_TARGET_PROFILE_SUPPORT)
-int
-wmi_prof_cfg_cmd(struct wmi_t *wmip,
- u32 period,
- u32 nbins)
-{
- void *osbuf;
- WMIX_PROF_CFG_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMIX_PROF_CFG_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->period = period;
- cmd->nbins = nbins;
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_PROF_CFG_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_prof_addr_set_cmd(struct wmi_t *wmip, u32 addr)
-{
- void *osbuf;
- WMIX_PROF_ADDR_SET_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMIX_PROF_ADDR_SET_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->addr = addr;
-
- return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_PROF_ADDR_SET_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_prof_start_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd_xtnd(wmip, WMIX_PROF_START_CMDID);
-}
-
-int
-wmi_prof_stop_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd_xtnd(wmip, WMIX_PROF_STOP_CMDID);
-}
-
-int
-wmi_prof_count_get_cmd(struct wmi_t *wmip)
-{
- return wmi_simple_cmd_xtnd(wmip, WMIX_PROF_COUNT_GET_CMDID);
-}
-
-/* Called to handle WMIX_PROF_CONT_EVENTID */
-static int
-wmi_prof_count_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMIX_PROF_COUNT_EVENT *prof_data = (WMIX_PROF_COUNT_EVENT *)datap;
-
- A_DPRINTF(DBG_WMI,
- (DBGFMT "Enter - addr=0x%x count=%d\n", DBGARG,
- prof_data->addr, prof_data->count));
-
- A_WMI_PROF_COUNT_RX(prof_data->addr, prof_data->count);
-
- return 0;
-}
-#endif /* CONFIG_TARGET_PROFILE_SUPPORT */
-
-#ifdef OS_ROAM_MANAGEMENT
-
-#define ETHERNET_MAC_ADDRESS_LENGTH 6
-
-void
-wmi_scan_indication (struct wmi_t *wmip)
-{
- struct ieee80211_node_table *nt;
- u32 gen;
- u32 size;
- u32 bsssize;
- bss_t *bss;
- u32 numbss;
- PNDIS_802_11_BSSID_SCAN_INFO psi;
- PBYTE pie;
- NDIS_802_11_FIXED_IEs *pFixed;
- NDIS_802_11_VARIABLE_IEs *pVar;
- u32 RateSize;
-
- struct ar6kScanIndication
- {
- NDIS_802_11_STATUS_INDICATION ind;
- NDIS_802_11_BSSID_SCAN_INFO_LIST slist;
- } *pAr6kScanIndEvent;
-
- nt = &wmip->wmi_scan_table;
-
- ++nt->nt_si_gen;
-
-
- gen = nt->nt_si_gen;
-
- size = offsetof(struct ar6kScanIndication, slist) +
- offsetof(NDIS_802_11_BSSID_SCAN_INFO_LIST, BssidScanInfo);
-
- numbss = 0;
-
- IEEE80211_NODE_LOCK(nt);
-
- //calc size
- for (bss = nt->nt_node_first; bss; bss = bss->ni_list_next) {
- if (bss->ni_si_gen != gen) {
- bsssize = offsetof(NDIS_802_11_BSSID_SCAN_INFO, Bssid) + offsetof(NDIS_WLAN_BSSID_EX, IEs);
- bsssize = bsssize + sizeof(NDIS_802_11_FIXED_IEs);
-
-#ifdef SUPPORT_WPA2
- if (bss->ni_cie.ie_rsn) {
- bsssize = bsssize + bss->ni_cie.ie_rsn[1] + 2;
- }
-#endif
- if (bss->ni_cie.ie_wpa) {
- bsssize = bsssize + bss->ni_cie.ie_wpa[1] + 2;
- }
-
- // bsssize must be a multiple of 4 to maintain alignment.
- bsssize = (bsssize + 3) & ~3;
-
- size += bsssize;
-
- numbss++;
- }
- }
-
- if (0 == numbss)
- {
-// RETAILMSG(1, (L"AR6K: scan indication: 0 bss\n"));
- ar6000_scan_indication (wmip->wmi_devt, NULL, 0);
- IEEE80211_NODE_UNLOCK (nt);
- return;
- }
-
- pAr6kScanIndEvent = A_MALLOC(size);
-
- if (NULL == pAr6kScanIndEvent)
- {
- IEEE80211_NODE_UNLOCK(nt);
- return;
- }
-
- A_MEMZERO(pAr6kScanIndEvent, size);
-
- //copy data
- pAr6kScanIndEvent->ind.StatusType = Ndis802_11StatusType_BssidScanInfoList;
- pAr6kScanIndEvent->slist.Version = 1;
- pAr6kScanIndEvent->slist.NumItems = numbss;
-
- psi = &pAr6kScanIndEvent->slist.BssidScanInfo[0];
-
- for (bss = nt->nt_node_first; bss; bss = bss->ni_list_next) {
- if (bss->ni_si_gen != gen) {
-
- bss->ni_si_gen = gen;
-
- //Set scan time
- psi->ScanTime = bss->ni_tstamp - WLAN_NODE_INACT_TIMEOUT_MSEC;
-
- // Copy data to bssid_ex
- bsssize = offsetof(NDIS_WLAN_BSSID_EX, IEs);
- bsssize = bsssize + sizeof(NDIS_802_11_FIXED_IEs);
-
-#ifdef SUPPORT_WPA2
- if (bss->ni_cie.ie_rsn) {
- bsssize = bsssize + bss->ni_cie.ie_rsn[1] + 2;
- }
-#endif
- if (bss->ni_cie.ie_wpa) {
- bsssize = bsssize + bss->ni_cie.ie_wpa[1] + 2;
- }
-
- // bsssize must be a multiple of 4 to maintain alignment.
- bsssize = (bsssize + 3) & ~3;
-
- psi->Bssid.Length = bsssize;
-
- memcpy (psi->Bssid.MacAddress, bss->ni_macaddr, ETHERNET_MAC_ADDRESS_LENGTH);
-
-
-//if (((bss->ni_macaddr[3] == 0xCE) && (bss->ni_macaddr[4] == 0xF0) && (bss->ni_macaddr[5] == 0xE7)) ||
-// ((bss->ni_macaddr[3] == 0x03) && (bss->ni_macaddr[4] == 0xE2) && (bss->ni_macaddr[5] == 0x70)))
-// RETAILMSG (1, (L"%x\n",bss->ni_macaddr[5]));
-
- psi->Bssid.Ssid.SsidLength = 0;
- pie = bss->ni_cie.ie_ssid;
-
- if (pie) {
- // Format of SSID IE is:
- // Type (1 octet)
- // Length (1 octet)
- // SSID (Length octets)
- //
- // Validation of the IE should have occurred within WMI.
- //
- if (pie[1] <= 32) {
- psi->Bssid.Ssid.SsidLength = pie[1];
- memcpy(psi->Bssid.Ssid.Ssid, &pie[2], psi->Bssid.Ssid.SsidLength);
- }
- }
- psi->Bssid.Privacy = (bss->ni_cie.ie_capInfo & 0x10) ? 1 : 0;
-
- //Post the RSSI value relative to the Standard Noise floor value.
- psi->Bssid.Rssi = bss->ni_rssi;
-
- if (bss->ni_cie.ie_chan >= 2412 && bss->ni_cie.ie_chan <= 2484) {
-
- if (bss->ni_cie.ie_rates && bss->ni_cie.ie_xrates) {
- psi->Bssid.NetworkTypeInUse = Ndis802_11OFDM24;
- }
- else {
- psi->Bssid.NetworkTypeInUse = Ndis802_11DS;
- }
- }
- else {
- psi->Bssid.NetworkTypeInUse = Ndis802_11OFDM5;
- }
-
- psi->Bssid.Configuration.Length = sizeof(psi->Bssid.Configuration);
- psi->Bssid.Configuration.BeaconPeriod = bss->ni_cie.ie_beaconInt; // Units are Kmicroseconds (1024 us)
- psi->Bssid.Configuration.ATIMWindow = 0;
- psi->Bssid.Configuration.DSConfig = bss->ni_cie.ie_chan * 1000;
- psi->Bssid.InfrastructureMode = ((bss->ni_cie.ie_capInfo & 0x03) == 0x01 ) ? Ndis802_11Infrastructure : Ndis802_11IBSS;
-
- RateSize = 0;
- pie = bss->ni_cie.ie_rates;
- if (pie) {
- RateSize = (pie[1] < NDIS_802_11_LENGTH_RATES_EX) ? pie[1] : NDIS_802_11_LENGTH_RATES_EX;
- memcpy(psi->Bssid.SupportedRates, &pie[2], RateSize);
- }
- pie = bss->ni_cie.ie_xrates;
- if (pie && RateSize < NDIS_802_11_LENGTH_RATES_EX) {
- memcpy(psi->Bssid.SupportedRates + RateSize, &pie[2],
- (pie[1] < (NDIS_802_11_LENGTH_RATES_EX - RateSize)) ? pie[1] : (NDIS_802_11_LENGTH_RATES_EX - RateSize));
- }
-
- // Copy the fixed IEs
- psi->Bssid.IELength = sizeof(NDIS_802_11_FIXED_IEs);
-
- pFixed = (NDIS_802_11_FIXED_IEs *)psi->Bssid.IEs;
- memcpy(pFixed->Timestamp, bss->ni_cie.ie_tstamp, sizeof(pFixed->Timestamp));
- pFixed->BeaconInterval = bss->ni_cie.ie_beaconInt;
- pFixed->Capabilities = bss->ni_cie.ie_capInfo;
-
- // Copy selected variable IEs
-
- pVar = (NDIS_802_11_VARIABLE_IEs *)((PBYTE)pFixed + sizeof(NDIS_802_11_FIXED_IEs));
-
-#ifdef SUPPORT_WPA2
- // Copy the WPAv2 IE
- if (bss->ni_cie.ie_rsn) {
- pie = bss->ni_cie.ie_rsn;
- psi->Bssid.IELength += pie[1] + 2;
- memcpy(pVar, pie, pie[1] + 2);
- pVar = (NDIS_802_11_VARIABLE_IEs *)((PBYTE)pVar + pie[1] + 2);
- }
-#endif
- // Copy the WPAv1 IE
- if (bss->ni_cie.ie_wpa) {
- pie = bss->ni_cie.ie_wpa;
- psi->Bssid.IELength += pie[1] + 2;
- memcpy(pVar, pie, pie[1] + 2);
- pVar = (NDIS_802_11_VARIABLE_IEs *)((PBYTE)pVar + pie[1] + 2);
- }
-
- // Advance buffer pointer
- psi = (PNDIS_802_11_BSSID_SCAN_INFO)((BYTE*)psi + bsssize + FIELD_OFFSET(NDIS_802_11_BSSID_SCAN_INFO, Bssid));
- }
- }
-
- IEEE80211_NODE_UNLOCK(nt);
-
-// wmi_free_allnodes(wmip);
-
-// RETAILMSG(1, (L"AR6K: scan indication: %u bss\n", numbss));
-
- ar6000_scan_indication (wmip->wmi_devt, pAr6kScanIndEvent, size);
-
- kfree(pAr6kScanIndEvent);
-}
-#endif
-
-u8 ar6000_get_upper_threshold(s16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh,
- u32 size)
-{
- u32 index;
- u8 threshold = (u8)sq_thresh->upper_threshold[size - 1];
-
- /* The list is already in sorted order. Get the next lower value */
- for (index = 0; index < size; index ++) {
- if (rssi < sq_thresh->upper_threshold[index]) {
- threshold = (u8)sq_thresh->upper_threshold[index];
- break;
- }
- }
-
- return threshold;
-}
-
-u8 ar6000_get_lower_threshold(s16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh,
- u32 size)
-{
- u32 index;
- u8 threshold = (u8)sq_thresh->lower_threshold[size - 1];
-
- /* The list is already in sorted order. Get the next lower value */
- for (index = 0; index < size; index ++) {
- if (rssi > sq_thresh->lower_threshold[index]) {
- threshold = (u8)sq_thresh->lower_threshold[index];
- break;
- }
- }
-
- return threshold;
-}
-static int
-wmi_send_rssi_threshold_params(struct wmi_t *wmip,
- WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd)
-{
- void *osbuf;
- s8 size;
- WMI_RSSI_THRESHOLD_PARAMS_CMD *cmd;
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
-
- cmd = (WMI_RSSI_THRESHOLD_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, rssiCmd, sizeof(WMI_RSSI_THRESHOLD_PARAMS_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-static int
-wmi_send_snr_threshold_params(struct wmi_t *wmip,
- WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd)
-{
- void *osbuf;
- s8 size;
- WMI_SNR_THRESHOLD_PARAMS_CMD *cmd;
-
- size = sizeof (*cmd);
-
- osbuf = A_NETBUF_ALLOC(size);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, size);
- cmd = (WMI_SNR_THRESHOLD_PARAMS_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, size);
- memcpy(cmd, snrCmd, sizeof(WMI_SNR_THRESHOLD_PARAMS_CMD));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SNR_THRESHOLD_PARAMS_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_target_event_report_cmd(struct wmi_t *wmip, WMI_SET_TARGET_EVENT_REPORT_CMD* cmd)
-{
- void *osbuf;
- WMI_SET_TARGET_EVENT_REPORT_CMD* alloc_cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- alloc_cmd = (WMI_SET_TARGET_EVENT_REPORT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(alloc_cmd, sizeof(*cmd));
- memcpy(alloc_cmd, cmd, sizeof(*cmd));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_TARGET_EVENT_REPORT_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-bss_t *wmi_rm_current_bss (struct wmi_t *wmip, u8 *id)
-{
- wmi_get_current_bssid (wmip, id);
- return wlan_node_remove (&wmip->wmi_scan_table, id);
-}
-
-int wmi_add_current_bss (struct wmi_t *wmip, u8 *id, bss_t *bss)
-{
- wlan_setup_node (&wmip->wmi_scan_table, bss, id);
- return 0;
-}
-
-static int
-wmi_addba_req_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_ADDBA_REQ_EVENT *cmd = (WMI_ADDBA_REQ_EVENT *)datap;
-
- A_WMI_AGGR_RECV_ADDBA_REQ_EVT(wmip->wmi_devt, cmd);
-
- return 0;
-}
-
-
-static int
-wmi_addba_resp_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_ADDBA_RESP_EVENT *cmd = (WMI_ADDBA_RESP_EVENT *)datap;
-
- A_WMI_AGGR_RECV_ADDBA_RESP_EVT(wmip->wmi_devt, cmd);
-
- return 0;
-}
-
-static int
-wmi_delba_req_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_DELBA_EVENT *cmd = (WMI_DELBA_EVENT *)datap;
-
- A_WMI_AGGR_RECV_DELBA_REQ_EVT(wmip->wmi_devt, cmd);
-
- return 0;
-}
-
-int
-wmi_btcoex_config_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_BTCOEX_CONFIG_EVENT(wmip->wmi_devt, datap, len);
-
- return 0;
-}
-
-
-int
-wmi_btcoex_stats_event_rx(struct wmi_t * wmip,u8 *datap,int len)
-{
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- A_WMI_BTCOEX_STATS_EVENT(wmip->wmi_devt, datap, len);
-
- return 0;
-
-}
-
-static int
-wmi_hci_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_HCI_EVENT *cmd = (WMI_HCI_EVENT *)datap;
- A_WMI_HCI_EVENT_EVT(wmip->wmi_devt, cmd);
-
- return 0;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//// ////
-//// AP mode functions ////
-//// ////
-////////////////////////////////////////////////////////////////////////////////
-/*
- * IOCTL: AR6000_XIOCTL_AP_COMMIT_CONFIG
- *
- * When AR6K in AP mode, This command will be called after
- * changing ssid, channel etc. It will pass the profile to
- * target with a flag which will indicate which parameter changed,
- * also if this flag is 0, there was no change in parametes, so
- * commit cmd will not be sent to target. Without calling this IOCTL
- * the changes will not take effect.
- */
-int
-wmi_ap_profile_commit(struct wmi_t *wmip, WMI_CONNECT_CMD *p)
-{
- void *osbuf;
- WMI_CONNECT_CMD *cm;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cm));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cm));
- cm = (WMI_CONNECT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cm, sizeof(*cm));
-
- memcpy(cm,p,sizeof(*cm));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_CONFIG_COMMIT_CMDID, NO_SYNC_WMIFLAG));
-}
-
-/*
- * IOCTL: AR6000_XIOCTL_AP_HIDDEN_SSID
- *
- * This command will be used to enable/disable hidden ssid functioanlity of
- * beacon. If it is enabled, ssid will be NULL in beacon.
- */
-int
-wmi_ap_set_hidden_ssid(struct wmi_t *wmip, u8 hidden_ssid)
-{
- void *osbuf;
- WMI_AP_HIDDEN_SSID_CMD *hs;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_HIDDEN_SSID_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_HIDDEN_SSID_CMD));
- hs = (WMI_AP_HIDDEN_SSID_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(hs, sizeof(*hs));
-
- hs->hidden_ssid = hidden_ssid;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "AR6000_XIOCTL_AP_HIDDEN_SSID %d\n", DBGARG , hidden_ssid));
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_HIDDEN_SSID_CMDID, NO_SYNC_WMIFLAG));
-}
-
-/*
- * IOCTL: AR6000_XIOCTL_AP_SET_MAX_NUM_STA
- *
- * This command is used to limit max num of STA that can connect
- * with this AP. This value should not exceed AP_MAX_NUM_STA (this
- * is max num of STA supported by AP). Value was already validated
- * in ioctl.c
- */
-int
-wmi_ap_set_num_sta(struct wmi_t *wmip, u8 num_sta)
-{
- void *osbuf;
- WMI_AP_SET_NUM_STA_CMD *ns;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_NUM_STA_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_NUM_STA_CMD));
- ns = (WMI_AP_SET_NUM_STA_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(ns, sizeof(*ns));
-
- ns->num_sta = num_sta;
-
- A_DPRINTF(DBG_WMI, (DBGFMT "AR6000_XIOCTL_AP_SET_MAX_NUM_STA %d\n", DBGARG , num_sta));
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_NUM_STA_CMDID, NO_SYNC_WMIFLAG));
-}
-
-/*
- * IOCTL: AR6000_XIOCTL_AP_SET_ACL_MAC
- *
- * This command is used to send list of mac of STAs which will
- * be allowed to connect with this AP. When this list is empty
- * firware will allow all STAs till the count reaches AP_MAX_NUM_STA.
- */
-int
-wmi_ap_acl_mac_list(struct wmi_t *wmip, WMI_AP_ACL_MAC_CMD *acl)
-{
- void *osbuf;
- WMI_AP_ACL_MAC_CMD *a;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_ACL_MAC_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_ACL_MAC_CMD));
- a = (WMI_AP_ACL_MAC_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(a, sizeof(*a));
- memcpy(a,acl,sizeof(*acl));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_ACL_MAC_LIST_CMDID, NO_SYNC_WMIFLAG));
-}
-
-/*
- * IOCTL: AR6000_XIOCTL_AP_SET_MLME
- *
- * This command is used to send list of mac of STAs which will
- * be allowed to connect with this AP. When this list is empty
- * firware will allow all STAs till the count reaches AP_MAX_NUM_STA.
- */
-int
-wmi_ap_set_mlme(struct wmi_t *wmip, u8 cmd, u8 *mac, u16 reason)
-{
- void *osbuf;
- WMI_AP_SET_MLME_CMD *mlme;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_MLME_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_MLME_CMD));
- mlme = (WMI_AP_SET_MLME_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(mlme, sizeof(*mlme));
-
- mlme->cmd = cmd;
- memcpy(mlme->mac, mac, ATH_MAC_LEN);
- mlme->reason = reason;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_MLME_CMDID, NO_SYNC_WMIFLAG));
-}
-
-static int
-wmi_pspoll_event_rx(struct wmi_t *wmip, u8 *datap, int len)
-{
- WMI_PSPOLL_EVENT *ev;
-
- if (len < sizeof(WMI_PSPOLL_EVENT)) {
- return A_EINVAL;
- }
- ev = (WMI_PSPOLL_EVENT *)datap;
-
- A_WMI_PSPOLL_EVENT(wmip->wmi_devt, ev->aid);
- return 0;
-}
-
-static int
-wmi_dtimexpiry_event_rx(struct wmi_t *wmip, u8 *datap,int len)
-{
- A_WMI_DTIMEXPIRY_EVENT(wmip->wmi_devt);
- return 0;
-}
-
-#ifdef WAPI_ENABLE
-static int
-wmi_wapi_rekey_event_rx(struct wmi_t *wmip, u8 *datap,int len)
-{
- u8 *ev;
-
- if (len < 7) {
- return A_EINVAL;
- }
- ev = (u8 *)datap;
-
- A_WMI_WAPI_REKEY_EVENT(wmip->wmi_devt, *ev, &ev[1]);
- return 0;
-}
-#endif
-
-int
-wmi_set_pvb_cmd(struct wmi_t *wmip, u16 aid, bool flag)
-{
- WMI_AP_SET_PVB_CMD *cmd;
- void *osbuf = NULL;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_PVB_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_PVB_CMD));
- cmd = (WMI_AP_SET_PVB_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->aid = aid;
- cmd->flag = flag;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_PVB_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_ap_conn_inact_time(struct wmi_t *wmip, u32 period)
-{
- WMI_AP_CONN_INACT_CMD *cmd;
- void *osbuf = NULL;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_CONN_INACT_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_CONN_INACT_CMD));
- cmd = (WMI_AP_CONN_INACT_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->period = period;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_CONN_INACT_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_ap_bgscan_time(struct wmi_t *wmip, u32 period, u32 dwell)
-{
- WMI_AP_PROT_SCAN_TIME_CMD *cmd;
- void *osbuf = NULL;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_PROT_SCAN_TIME_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_PROT_SCAN_TIME_CMD));
- cmd = (WMI_AP_PROT_SCAN_TIME_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->period_min = period;
- cmd->dwell_ms = dwell;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_PROT_SCAN_TIME_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_ap_set_dtim(struct wmi_t *wmip, u8 dtim)
-{
- WMI_AP_SET_DTIM_CMD *cmd;
- void *osbuf = NULL;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_DTIM_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_DTIM_CMD));
- cmd = (WMI_AP_SET_DTIM_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
-
- cmd->dtim = dtim;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG));
-}
-
-/*
- * IOCTL: AR6000_XIOCTL_AP_SET_ACL_POLICY
- *
- * This command is used to set ACL policay. While changing policy, if you
- * want to retain the existing MAC addresses in the ACL list, policy should be
- * OR with AP_ACL_RETAIN_LIST_MASK, else the existing list will be cleared.
- * If there is no chage in policy, the list will be intact.
- */
-int
-wmi_ap_set_acl_policy(struct wmi_t *wmip, u8 policy)
-{
- void *osbuf;
- WMI_AP_ACL_POLICY_CMD *po;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_ACL_POLICY_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
-}
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_ACL_POLICY_CMD));
- po = (WMI_AP_ACL_POLICY_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(po, sizeof(*po));
-
- po->policy = policy;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_ACL_POLICY_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_ap_set_rateset(struct wmi_t *wmip, u8 rateset)
-{
- void *osbuf;
- WMI_AP_SET_11BG_RATESET_CMD *rs;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_11BG_RATESET_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_11BG_RATESET_CMD));
- rs = (WMI_AP_SET_11BG_RATESET_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(rs, sizeof(*rs));
-
- rs->rateset = rateset;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_11BG_RATESET_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_ht_cap_cmd(struct wmi_t *wmip, WMI_SET_HT_CAP_CMD *cmd)
-{
- void *osbuf;
- WMI_SET_HT_CAP_CMD *htCap;
- u8 band;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*htCap));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*htCap));
-
- band = (cmd->band)? A_BAND_5GHZ : A_BAND_24GHZ;
- wmip->wmi_ht_allowed[band] = (cmd->enable)? 1:0;
-
- htCap = (WMI_SET_HT_CAP_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(htCap, sizeof(*htCap));
- memcpy(htCap, cmd, sizeof(*htCap));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_HT_CAP_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_ht_op_cmd(struct wmi_t *wmip, u8 sta_chan_width)
-{
- void *osbuf;
- WMI_SET_HT_OP_CMD *htInfo;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*htInfo));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*htInfo));
-
- htInfo = (WMI_SET_HT_OP_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(htInfo, sizeof(*htInfo));
- htInfo->sta_chan_width = sta_chan_width;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_HT_OP_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_tx_select_rates_cmd(struct wmi_t *wmip, u32 *pMaskArray)
-{
- void *osbuf;
- WMI_SET_TX_SELECT_RATES_CMD *pData;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*pData));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*pData));
-
- pData = (WMI_SET_TX_SELECT_RATES_CMD *)(A_NETBUF_DATA(osbuf));
- memcpy(pData, pMaskArray, sizeof(*pData));
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_TX_SELECT_RATES_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_send_hci_cmd(struct wmi_t *wmip, u8 *buf, u16 sz)
-{
- void *osbuf;
- WMI_HCI_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd) + sz);
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd) + sz);
- cmd = (WMI_HCI_CMD *)(A_NETBUF_DATA(osbuf));
-
- cmd->cmd_buf_sz = sz;
- memcpy(cmd->buf, buf, sz);
- return (wmi_cmd_send(wmip, osbuf, WMI_HCI_CMD_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_allow_aggr_cmd(struct wmi_t *wmip, u16 tx_tidmask, u16 rx_tidmask)
-{
- void *osbuf;
- WMI_ALLOW_AGGR_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_ALLOW_AGGR_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->tx_allow_aggr = tx_tidmask;
- cmd->rx_allow_aggr = rx_tidmask;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_ALLOW_AGGR_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_setup_aggr_cmd(struct wmi_t *wmip, u8 tid)
-{
- void *osbuf;
- WMI_ADDBA_REQ_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_ADDBA_REQ_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->tid = tid;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_ADDBA_REQ_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_delete_aggr_cmd(struct wmi_t *wmip, u8 tid, bool uplink)
-{
- void *osbuf;
- WMI_DELBA_REQ_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_DELBA_REQ_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->tid = tid;
- cmd->is_sender_initiator = uplink; /* uplink =1 - uplink direction, 0=downlink direction */
-
- /* Delete the local aggr state, on host */
- return (wmi_cmd_send(wmip, osbuf, WMI_DELBA_REQ_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_rx_frame_format_cmd(struct wmi_t *wmip, u8 rxMetaVersion,
- bool rxDot11Hdr, bool defragOnHost)
-{
- void *osbuf;
- WMI_RX_FRAME_FORMAT_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_RX_FRAME_FORMAT_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->dot11Hdr = (rxDot11Hdr==true)? 1:0;
- cmd->defragOnHost = (defragOnHost==true)? 1:0;
- cmd->metaVersion = rxMetaVersion; /* */
-
- /* Delete the local aggr state, on host */
- return (wmi_cmd_send(wmip, osbuf, WMI_RX_FRAME_FORMAT_CMDID, NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_set_thin_mode_cmd(struct wmi_t *wmip, bool bThinMode)
-{
- void *osbuf;
- WMI_SET_THIN_MODE_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_THIN_MODE_CMD *)(A_NETBUF_DATA(osbuf));
- cmd->enable = (bThinMode==true)? 1:0;
-
- /* Delete the local aggr state, on host */
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_THIN_MODE_CMDID, NO_SYNC_WMIFLAG));
-}
-
-
-int
-wmi_set_wlan_conn_precedence_cmd(struct wmi_t *wmip, BT_WLAN_CONN_PRECEDENCE precedence)
-{
- void *osbuf;
- WMI_SET_BT_WLAN_CONN_PRECEDENCE *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_BT_WLAN_CONN_PRECEDENCE *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->precedence = precedence;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_pmk_cmd(struct wmi_t *wmip, u8 *pmk)
-{
- void *osbuf;
- WMI_SET_PMK_CMD *p;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_SET_PMK_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_SET_PMK_CMD));
-
- p = (WMI_SET_PMK_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(p, sizeof(*p));
-
- memcpy(p->pmk, pmk, WMI_PMK_LEN);
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_PMK_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_set_excess_tx_retry_thres_cmd(struct wmi_t *wmip, WMI_SET_EXCESS_TX_RETRY_THRES_CMD *cmd)
-{
- void *osbuf;
- WMI_SET_EXCESS_TX_RETRY_THRES_CMD *p;
-
- osbuf = A_NETBUF_ALLOC(sizeof(WMI_SET_EXCESS_TX_RETRY_THRES_CMD));
- if (osbuf == NULL) {
- return A_NO_MEMORY;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(WMI_SET_EXCESS_TX_RETRY_THRES_CMD));
-
- p = (WMI_SET_EXCESS_TX_RETRY_THRES_CMD *)(A_NETBUF_DATA(osbuf));
- memset(p, 0, sizeof(*p));
-
- p->threshold = cmd->threshold;
-
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_EXCESS_TX_RETRY_THRES_CMDID, NO_SYNC_WMIFLAG));
-}
-
-int
-wmi_SGI_cmd(struct wmi_t *wmip, u32 sgiMask, u8 sgiPERThreshold)
-{
- void *osbuf;
- WMI_SET_TX_SGI_PARAM_CMD *cmd;
-
- osbuf = A_NETBUF_ALLOC(sizeof(*cmd));
- if (osbuf == NULL) {
- return A_NO_MEMORY ;
- }
-
- A_NETBUF_PUT(osbuf, sizeof(*cmd));
-
- cmd = (WMI_SET_TX_SGI_PARAM_CMD *)(A_NETBUF_DATA(osbuf));
- A_MEMZERO(cmd, sizeof(*cmd));
- cmd->sgiMask = sgiMask;
- cmd->sgiPERThreshold = sgiPERThreshold;
- return (wmi_cmd_send(wmip, osbuf, WMI_SET_TX_SGI_PARAM_CMDID,
- NO_SYNC_WMIFLAG));
-}
-
-bss_t *
-wmi_find_matching_Ssidnode (struct wmi_t *wmip, u8 *pSsid,
- u32 ssidLength,
- u32 dot11AuthMode, u32 authMode,
- u32 pairwiseCryptoType, u32 grpwiseCryptoTyp)
-{
- bss_t *node = NULL;
- node = wlan_find_matching_Ssidnode (&wmip->wmi_scan_table, pSsid,
- ssidLength, dot11AuthMode, authMode, pairwiseCryptoType, grpwiseCryptoTyp);
-
- return node;
-}
-
-u16 wmi_ieee2freq (int chan)
-{
- u16 freq = 0;
- freq = wlan_ieee2freq (chan);
- return freq;
-
-}
-
-u32 wmi_freq2ieee (u16 freq)
-{
- u16 chan = 0;
- chan = wlan_freq2ieee (freq);
- return chan;
-}
diff --git a/drivers/staging/ath6kl/wmi/wmi_host.h b/drivers/staging/ath6kl/wmi/wmi_host.h
deleted file mode 100644
index 53e4f085dfe..00000000000
--- a/drivers/staging/ath6kl/wmi/wmi_host.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//------------------------------------------------------------------------------
-// <copyright file="wmi_host.h" company="Atheros">
-// Copyright (c) 2004-2010 Atheros Corporation. All rights reserved.
-//
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-//
-//
-//------------------------------------------------------------------------------
-//==============================================================================
-// This file contains local definitios for the wmi host module.
-//
-// Author(s): ="Atheros"
-//==============================================================================
-#ifndef _WMI_HOST_H_
-#define _WMI_HOST_H_
-
-#include "roaming.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct wmi_stats {
- u32 cmd_len_err;
- u32 cmd_id_err;
-};
-
-#define SSID_IE_LEN_INDEX 13
-
-/* Host side link management data structures */
-#define SIGNAL_QUALITY_THRESHOLD_LEVELS 6
-#define SIGNAL_QUALITY_UPPER_THRESHOLD_LEVELS SIGNAL_QUALITY_THRESHOLD_LEVELS
-#define SIGNAL_QUALITY_LOWER_THRESHOLD_LEVELS SIGNAL_QUALITY_THRESHOLD_LEVELS
-typedef struct sq_threshold_params_s {
- s16 upper_threshold[SIGNAL_QUALITY_UPPER_THRESHOLD_LEVELS];
- s16 lower_threshold[SIGNAL_QUALITY_LOWER_THRESHOLD_LEVELS];
- u32 upper_threshold_valid_count;
- u32 lower_threshold_valid_count;
- u32 polling_interval;
- u8 weight;
- u8 last_rssi; //normally you would expect this to be bss specific but we keep only one instance because its only valid when the device is in a connected state. Not sure if it belongs to host or target.
- u8 last_rssi_poll_event; //Not sure if it belongs to host or target
-} SQ_THRESHOLD_PARAMS;
-
-/*
- * These constants are used with A_WLAN_BAND_SET.
- */
-#define A_BAND_24GHZ 0
-#define A_BAND_5GHZ 1
-#define A_NUM_BANDS 2
-
-struct wmi_t {
- bool wmi_ready;
- bool wmi_numQoSStream;
- u16 wmi_streamExistsForAC[WMM_NUM_AC];
- u8 wmi_fatPipeExists;
- void *wmi_devt;
- struct wmi_stats wmi_stats;
- struct ieee80211_node_table wmi_scan_table;
- u8 wmi_bssid[ATH_MAC_LEN];
- u8 wmi_powerMode;
- u8 wmi_phyMode;
- u8 wmi_keepaliveInterval;
-#ifdef THREAD_X
- A_CSECT_T wmi_lock;
-#else
- A_MUTEX_T wmi_lock;
-#endif
- HTC_ENDPOINT_ID wmi_endpoint_id;
- SQ_THRESHOLD_PARAMS wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_NUM_MAX];
- CRYPTO_TYPE wmi_pair_crypto_type;
- CRYPTO_TYPE wmi_grp_crypto_type;
- bool wmi_is_wmm_enabled;
- u8 wmi_ht_allowed[A_NUM_BANDS];
- u8 wmi_traffic_class;
-};
-
-#ifdef THREAD_X
-#define INIT_WMI_LOCK(w) A_CSECT_INIT(&(w)->wmi_lock)
-#define LOCK_WMI(w) A_CSECT_ENTER(&(w)->wmi_lock);
-#define UNLOCK_WMI(w) A_CSECT_LEAVE(&(w)->wmi_lock);
-#define DELETE_WMI_LOCK(w) A_CSECT_DELETE(&(w)->wmi_lock);
-#else
-#define LOCK_WMI(w) A_MUTEX_LOCK(&(w)->wmi_lock);
-#define UNLOCK_WMI(w) A_MUTEX_UNLOCK(&(w)->wmi_lock);
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _WMI_HOST_H_ */
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 867dbf1c992..2fa658eb74d 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -34,7 +34,7 @@ static int bcm_char_open(struct inode *inode, struct file * filp)
/* Store the Adapter structure */
filp->private_data = pTarang;
- /*Start Queuing the control response Packets*/
+ /* Start Queuing the control response Packets */
atomic_inc(&Adapter->ApplicationRunning);
nonseekable_open(inode, filp);
@@ -84,7 +84,7 @@ static int bcm_char_release(struct inode *inode, struct file *filp)
up(&Adapter->RxAppControlQueuelock);
- /*Stop Queuing the control response Packets*/
+ /* Stop Queuing the control response Packets */
atomic_dec(&Adapter->ApplicationRunning);
kfree(pTarang);
@@ -100,8 +100,8 @@ static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size,
PPER_TARANG_DATA pTarang = filp->private_data;
PMINI_ADAPTER Adapter = pTarang->Adapter;
struct sk_buff *Packet = NULL;
- ssize_t PktLen = 0;
- int wait_ret_val = 0;
+ ssize_t PktLen = 0;
+ int wait_ret_val = 0;
unsigned long ret = 0;
wait_ret_val = wait_event_interruptible(Adapter->process_read_wait_queue,
@@ -157,1942 +157,1824 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
PPER_TARANG_DATA pTarang = filp->private_data;
void __user *argp = (void __user *)arg;
- PMINI_ADAPTER Adapter = pTarang->Adapter;
- INT Status = STATUS_FAILURE;
+ PMINI_ADAPTER Adapter = pTarang->Adapter;
+ INT Status = STATUS_FAILURE;
int timeout = 0;
- IOCTL_BUFFER IoBuffer;
+ IOCTL_BUFFER IoBuffer;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
- if(_IOC_TYPE(cmd) != BCM_IOCTL)
+ if (_IOC_TYPE(cmd) != BCM_IOCTL)
return -EFAULT;
- if(_IOC_DIR(cmd) & _IOC_READ)
+ if (_IOC_DIR(cmd) & _IOC_READ)
Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
- Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
+ Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
- Status = STATUS_SUCCESS;
+ Status = STATUS_SUCCESS;
- if(Status)
+ if (Status)
return -EFAULT;
- if(Adapter->device_removed)
- {
+ if (Adapter->device_removed)
return -EFAULT;
- }
- if(FALSE == Adapter->fw_download_done)
- {
- switch (cmd)
- {
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- case IOCTL_BCM_GPIO_SET_REQUEST:
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- return -EACCES;
- default:
- break;
+ if (FALSE == Adapter->fw_download_done) {
+ switch (cmd) {
+ case IOCTL_MAC_ADDR_REQ:
+ case IOCTL_LINK_REQ:
+ case IOCTL_CM_REQUEST:
+ case IOCTL_SS_INFO_REQ:
+ case IOCTL_SEND_CONTROL_MESSAGE:
+ case IOCTL_IDLE_REQ:
+ case IOCTL_BCM_GPIO_SET_REQUEST:
+ case IOCTL_BCM_GPIO_STATUS_REQUEST:
+ return -EACCES;
+ default:
+ break;
}
}
Status = vendorextnIoctl(Adapter, cmd, arg);
- if(Status != CONTINUE_COMMON_PATH )
- return Status;
+ if (Status != CONTINUE_COMMON_PATH)
+ return Status;
+
+ switch (cmd) {
+ /* Rdms for Swin Idle... */
+ case IOCTL_BCM_REGISTER_READ_PRIVATE: {
+ RDM_BUFFER sRdmBuffer = {0};
+ PCHAR temp_buff;
+ UINT Bufflen;
+ u16 temp_value;
+
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- switch(cmd){
- // Rdms for Swin Idle...
- case IOCTL_BCM_REGISTER_READ_PRIVATE:
- {
- RDM_BUFFER sRdmBuffer = {0};
- PCHAR temp_buff;
- UINT Bufflen;
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sRdmBuffer))
- return -EINVAL;
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
+ }
- if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ Bufflen = IoBuffer.OutputLength;
+ temp_value = 4 - (Bufflen % 4);
+ Bufflen += temp_value % 4;
- /* FIXME: need to restrict BuffLen */
- Bufflen = IoBuffer.OutputLength + (4 - IoBuffer.OutputLength%4)%4;
- temp_buff = kmalloc(Bufflen, GFP_KERNEL);
- if(!temp_buff)
- return -ENOMEM;
+ temp_buff = kmalloc(Bufflen, GFP_KERNEL);
+ if (!temp_buff)
+ return -ENOMEM;
- Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
- (PUINT)temp_buff, Bufflen);
- if(Status == STATUS_SUCCESS)
- {
- if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
- }
-
- kfree(temp_buff);
- break;
+ Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, Bufflen);
+ if (Status == STATUS_SUCCESS) {
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
+ Status = -EFAULT;
}
- case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
- {
- WRM_BUFFER sWrmBuffer = {0};
- UINT uiTempVar=0;
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ kfree(temp_buff);
+ break;
+ }
- if (IoBuffer.InputLength > sizeof(sWrmBuffer))
- return -EINVAL;
+ case IOCTL_BCM_REGISTER_WRITE_PRIVATE: {
+ WRM_BUFFER sWrmBuffer = {0};
+ UINT uiTempVar = 0;
+ /* Copy Ioctl Buffer structure */
- /* Get WrmBuffer structure */
- if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
- uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
- if(!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1)||
+ /* Get WrmBuffer structure */
+ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
(uiTempVar == EEPROM_REJECT_REG_2) ||
(uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
- Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data, sizeof(ULONG));
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"WRM Done\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
- Status = -EFAULT;
- }
- break;
+ (uiTempVar == EEPROM_REJECT_REG_4))) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
}
- case IOCTL_BCM_REGISTER_READ:
- case IOCTL_BCM_EEPROM_REGISTER_READ:
- {
- RDM_BUFFER sRdmBuffer = {0};
- PCHAR temp_buff = NULL;
- UINT uiTempVar = 0;
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
- return -EACCES;
- }
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
+ (PUINT)sWrmBuffer.Data, sizeof(ULONG));
- if (IoBuffer.InputLength > sizeof(sRdmBuffer))
- return -EINVAL;
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
+ Status = -EFAULT;
+ }
+ break;
+ }
- if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ case IOCTL_BCM_REGISTER_READ:
+ case IOCTL_BCM_EEPROM_REGISTER_READ: {
+ RDM_BUFFER sRdmBuffer = {0};
+ PCHAR temp_buff = NULL;
+ UINT uiTempVar = 0;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
+ return -EACCES;
+ }
- /* FIXME: don't trust user supplied length */
- temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
- if(!temp_buff)
- return STATUS_FAILURE;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- if((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)sRdmBuffer.Register & 0x3))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
- (int)sRdmBuffer.Register);
- return -EINVAL;
- }
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
+
+ if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ /* FIXME: don't trust user supplied length */
+ temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
+ if (!temp_buff)
+ return STATUS_FAILURE;
- uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
- (PUINT)temp_buff, IoBuffer.OutputLength);
- if(Status == STATUS_SUCCESS)
- if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sRdmBuffer.Register & 0x3)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
+ (int)sRdmBuffer.Register);
kfree(temp_buff);
- break;
+ return -EINVAL;
}
- case IOCTL_BCM_REGISTER_WRITE:
- case IOCTL_BCM_EEPROM_REGISTER_WRITE:
- {
- WRM_BUFFER sWrmBuffer = {0};
- UINT uiTempVar=0;
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
- return -EACCES;
- }
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
+ Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
- if (IoBuffer.InputLength > sizeof(sWrmBuffer))
- return -EINVAL;
+ if (Status == STATUS_SUCCESS)
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
+ Status = -EFAULT;
- /* Get WrmBuffer structure */
- if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ kfree(temp_buff);
+ break;
+ }
+ case IOCTL_BCM_REGISTER_WRITE:
+ case IOCTL_BCM_EEPROM_REGISTER_WRITE: {
+ WRM_BUFFER sWrmBuffer = {0};
+ UINT uiTempVar = 0;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
+ return -EACCES;
+ }
- if( (((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)sWrmBuffer.Register & 0x3) )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n",
- (int)sWrmBuffer.Register);
- return -EINVAL;
- }
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
- uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
- if(!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1)||
+ /* Get WrmBuffer structure */
+ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sWrmBuffer.Register & 0x3)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)sWrmBuffer.Register);
+ return -EINVAL;
+ }
+
+ uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
(uiTempVar == EEPROM_REJECT_REG_2) ||
(uiTempVar == EEPROM_REJECT_REG_3) ||
(uiTempVar == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ (cmd == IOCTL_BCM_REGISTER_WRITE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
- }
+ }
- Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data, sWrmBuffer.Length);
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
- Status = -EFAULT;
- }
+ Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
+ (PUINT)sWrmBuffer.Data, sWrmBuffer.Length);
+
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
+ Status = -EFAULT;
+ }
+ break;
+ }
+ case IOCTL_BCM_GPIO_SET_REQUEST: {
+ UCHAR ucResetValue[4];
+ UINT value = 0;
+ UINT uiBit = 0;
+ UINT uiOperation = 0;
+
+ GPIO_INFO gpio_info = {0};
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
+ return -EACCES;
+ }
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
+
+ if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ uiBit = gpio_info.uiGpioNumber;
+ uiOperation = gpio_info.uiGpioValue;
+ value = (1<<uiBit);
+
+ if (IsReqGpioIsLedInNVM(Adapter, value) == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", value);
+ Status = -EINVAL;
break;
}
- case IOCTL_BCM_GPIO_SET_REQUEST:
- {
- UCHAR ucResetValue[4];
- UINT value =0;
- UINT uiBit = 0;
- UINT uiOperation = 0;
-
- GPIO_INFO gpio_info = {0};
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
- if (IoBuffer.InputLength > sizeof(gpio_info))
- return -EINVAL;
- if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
- uiBit = gpio_info.uiGpioNumber;
- uiOperation = gpio_info.uiGpioValue;
- value= (1<<uiBit);
+ /* Set - setting 1 */
+ if (uiOperation) {
+ /* Set the gpio output register */
+ Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)(&value), sizeof(UINT));
- if(IsReqGpioIsLedInNVM(Adapter,value) ==FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",value);
- Status = -EINVAL;
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to set the %dth GPIO\n", uiBit);
+ break;
+ }
+ } else {
+ /* Set the gpio output register */
+ Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)(&value), sizeof(UINT));
+
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to clear the %dth GPIO\n", uiBit);
break;
}
+ }
+ Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if(uiOperation)//Set - setting 1
- {
- //Set the gpio output register
- Status = wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG ,
- (PUINT)(&value), sizeof(UINT));
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Failed to set the %dth GPIO \n",uiBit);
- break;
- }
- }
- else//Unset - setting 0
- {
- //Set the gpio output register
- Status = wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG ,
- (PUINT)(&value), sizeof(UINT));
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Set the GPIO bit\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Failed to clear the %dth GPIO \n",uiBit);
- break;
- }
- }
+ if (STATUS_SUCCESS != Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "GPIO_MODE_REGISTER read failed");
+ break;
+ }
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
- (PUINT)ucResetValue, sizeof(UINT));
- if (STATUS_SUCCESS != Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"GPIO_MODE_REGISTER read failed");
- break;
- }
- //Set the gpio mode register to output
- *(UINT*)ucResetValue |= (1<<uiBit);
- Status = wrmaltWithLock(Adapter,GPIO_MODE_REGISTER ,
+ /* Set the gpio mode register to output */
+ *(UINT *)ucResetValue |= (1<<uiBit);
+ Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
(PUINT)ucResetValue, sizeof(UINT));
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Set the GPIO to output Mode\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n");
- break;
- }
+
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO to output Mode\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n");
+ break;
}
- break;
- case BCM_LED_THREAD_STATE_CHANGE_REQ:
- {
- USER_THREAD_REQ threadReq = { 0 };
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"User made LED thread InActive");
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"GPIO Can't be set/clear in Low power Mode");
- Status = -EACCES;
- break;
- }
+ }
+ break;
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ case BCM_LED_THREAD_STATE_CHANGE_REQ: {
+ USER_THREAD_REQ threadReq = {0};
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "User made LED thread InActive");
- if (IoBuffer.InputLength > sizeof(threadReq))
- return -EINVAL;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
+ Status = -EACCES;
+ break;
+ }
- //if LED thread is running(Actively or Inactively) set it state to make inactive
- if(Adapter->LEDInfo.led_thread_running)
- {
- if(threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Activating thread req");
- Adapter->DriverState = LED_THREAD_ACTIVE;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DeActivating Thread req.....");
- Adapter->DriverState = LED_THREAD_INACTIVE;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- //signal thread.
- wake_up(&Adapter->LEDInfo.notify_led_event);
+ if (IoBuffer.InputLength > sizeof(threadReq))
+ return -EINVAL;
+
+ if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+ /* if LED thread is running(Actively or Inactively) set it state to make inactive */
+ if (Adapter->LEDInfo.led_thread_running) {
+ if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Activating thread req");
+ Adapter->DriverState = LED_THREAD_ACTIVE;
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DeActivating Thread req.....");
+ Adapter->DriverState = LED_THREAD_INACTIVE;
}
+
+ /* signal thread. */
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- break;
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- {
- ULONG uiBit = 0;
- UCHAR ucRead[4];
- GPIO_INFO gpio_info = {0};
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- return -EACCES;
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
- if (IoBuffer.InputLength > sizeof(gpio_info))
- return -EINVAL;
- if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
- uiBit = gpio_info.uiGpioNumber;
- //Set the gpio output register
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)ucRead, sizeof(UINT));
- if(Status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
- return Status;
- }
+ }
+ break;
- }
- break;
- case IOCTL_BCM_GPIO_MULTI_REQUEST:
- {
- UCHAR ucResetValue[4];
- GPIO_MULTI_INFO gpio_multi_info[MAX_IDX];
- PGPIO_MULTI_INFO pgpio_multi_info = (PGPIO_MULTI_INFO)gpio_multi_info;
-
- memset( pgpio_multi_info, 0, MAX_IDX * sizeof( GPIO_MULTI_INFO));
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- return -EINVAL;
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
- if (IoBuffer.InputLength > sizeof(gpio_multi_info))
- return -EINVAL;
- if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ case IOCTL_BCM_GPIO_STATUS_REQUEST: {
+ ULONG uiBit = 0;
+ UCHAR ucRead[4];
+ GPIO_INFO gpio_info = {0};
- if(IsReqGpioIsLedInNVM(Adapter,pgpio_multi_info[WIMAX_IDX].uiGPIOMask)== FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",pgpio_multi_info[WIMAX_IDX].uiGPIOMask,Adapter->gpioBitMap);
- Status = -EINVAL;
- break;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EACCES;
- /* Set the gpio output register */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- if( ( pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
- ( pgpio_multi_info[WIMAX_IDX].uiGPIOCommand))
- {
- /* Set 1's in GPIO OUTPUT REGISTER */
- *(UINT*) ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
- if( *(UINT*) ucResetValue)
- Status = wrmaltWithLock( Adapter, BCM_GPIO_OUTPUT_SET_REG , (PUINT) ucResetValue, sizeof(ULONG));
+ if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- if( Status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0,"WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
- return Status;
- }
+ uiBit = gpio_info.uiGpioNumber;
+
+ /* Set the gpio output register */
+ Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ (PUINT)ucRead, sizeof(UINT));
+
+ if (Status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
+ return Status;
+ }
+ }
+ break;
- /* Clear to 0's in GPIO OUTPUT REGISTER */
- *(UINT*) ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- ( ~( pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
+ case IOCTL_BCM_GPIO_MULTI_REQUEST: {
+ UCHAR ucResetValue[4];
+ GPIO_MULTI_INFO gpio_multi_info[MAX_IDX];
+ PGPIO_MULTI_INFO pgpio_multi_info = (PGPIO_MULTI_INFO)gpio_multi_info;
- if( *(UINT*) ucResetValue)
- Status = wrmaltWithLock( Adapter, BCM_GPIO_OUTPUT_CLR_REG , (PUINT) ucResetValue, sizeof(ULONG));
+ memset(pgpio_multi_info, 0, MAX_IDX * sizeof(GPIO_MULTI_INFO));
- if( Status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0,"WRM to BCM_GPIO_OUTPUT_CLR_REG Failed." );
- return Status;
- }
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EINVAL;
- if( pgpio_multi_info[WIMAX_IDX].uiGPIOMask)
- {
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- if(Status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM to GPIO_PIN_STATE_REGISTER Failed.");
- return Status;
- }
+ if (IoBuffer.InputLength > sizeof(gpio_multi_info))
+ return -EINVAL;
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue = ( *(UINT*)ucResetValue &
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
- }
+ if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info, IoBuffer.OutputLength);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying Content to IOBufer for user space err:%d",Status);
- break;
- }
- }
+ if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
+ Status = -EINVAL;
break;
- case IOCTL_BCM_GPIO_MODE_REQUEST:
- {
- UCHAR ucResetValue[4];
- GPIO_MULTI_MODE gpio_multi_mode[MAX_IDX];
- PGPIO_MULTI_MODE pgpio_multi_mode = ( PGPIO_MULTI_MODE) gpio_multi_mode;
+ }
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- return -EINVAL;
+ /* Set the gpio output register */
+ if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
+ (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
+ /* Set 1's in GPIO OUTPUT REGISTER */
+ *(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
- if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
- return -EINVAL;
- if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (*(UINT *) ucResetValue)
+ Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)ucResetValue, sizeof(ULONG));
- Status = rdmaltWithLock( Adapter, ( UINT) GPIO_MODE_REGISTER, ( PUINT) ucResetValue, sizeof( UINT));
- if( STATUS_SUCCESS != Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Read of GPIO_MODE_REGISTER failed");
+ if (Status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
return Status;
}
- //Validating the request
- if(IsReqGpioIsLedInNVM(Adapter,pgpio_multi_mode[WIMAX_IDX].uiGPIOMask)== FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",pgpio_multi_mode[WIMAX_IDX].uiGPIOMask,Adapter->gpioBitMap);
- Status = -EINVAL;
- break;
- }
+ /* Clear to 0's in GPIO OUTPUT REGISTER */
+ *(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
+ (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
- if( pgpio_multi_mode[WIMAX_IDX].uiGPIOMask)
- {
- /* write all OUT's (1's) */
- *( UINT*) ucResetValue |= ( pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
- /* write all IN's (0's) */
- *( UINT*) ucResetValue &= ~( ( ~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
-
- /* Currently implemented return the modes of all GPIO's
- * else needs to bit AND with mask
- * */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT*)ucResetValue;
-
- Status = wrmaltWithLock( Adapter, GPIO_MODE_REGISTER , ( PUINT) ucResetValue, sizeof( ULONG));
- if( Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM to GPIO_MODE_REGISTER Done");
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0,"WRM to GPIO_MODE_REGISTER Failed");
- Status = -EFAULT;
- break;
- }
- }
- else /* if uiGPIOMask is 0 then return mode register configuration */
- {
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *( UINT*) ucResetValue;
+ if (*(UINT *) ucResetValue)
+ Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG));
+
+ if (Status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
+ return Status;
}
- Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode, IoBuffer.OutputLength);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying Content to IOBufer for user space err:%d",Status);
- break;
+ }
+
+ if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
+ Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+
+ if (Status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
+ return Status;
}
+
+ pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
}
- break;
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- {
- PVOID pvBuffer=NULL;
+ Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info, IoBuffer.OutputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed while copying Content to IOBufer for user space err:%d", Status);
+ break;
+ }
+ }
+ break;
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ case IOCTL_BCM_GPIO_MODE_REQUEST: {
+ UCHAR ucResetValue[4];
+ GPIO_MULTI_MODE gpio_multi_mode[MAX_IDX];
+ PGPIO_MULTI_MODE pgpio_multi_mode = (PGPIO_MULTI_MODE)gpio_multi_mode;
- /* FIXME: don't accept any length from user */
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if(!pvBuffer)
- return -ENOMEM;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EINVAL;
- if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- {
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
+ return -EINVAL;
+
+ if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+
+ if (STATUS_SUCCESS != Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
+ return Status;
+ }
+
+ /* Validating the request */
+ if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
+ Status = -EINVAL;
+ break;
+ }
+
+ if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
+ /* write all OUT's (1's) */
+ *(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+
+ /* write all IN's (0's) */
+ *(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+
+ /* Currently implemented return the modes of all GPIO's
+ * else needs to bit AND with mask
+ */
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
+
+ Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(ULONG));
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "WRM to GPIO_MODE_REGISTER Done");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to GPIO_MODE_REGISTER Failed");
Status = -EFAULT;
- kfree(pvBuffer);
break;
}
+ } else {
+/* if uiGPIOMask is 0 then return mode register configuration */
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
+ }
- down(&Adapter->LowPowerModeSync);
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
- !Adapter->bPreparingForLowPowerMode,
- (1 * HZ));
- if(Status == -ERESTARTSYS)
- goto cntrlEnd;
-
- if(Adapter->bPreparingForLowPowerMode)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Preparing Idle Mode is still True - Hence Rejecting control message\n");
- Status = STATUS_FAILURE ;
- goto cntrlEnd ;
- }
- Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
- cntrlEnd:
- up(&Adapter->LowPowerModeSync);
+ Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode, IoBuffer.OutputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed while copying Content to IOBufer for user space err:%d", Status);
+ break;
+ }
+ }
+ break;
+
+ case IOCTL_MAC_ADDR_REQ:
+ case IOCTL_LINK_REQ:
+ case IOCTL_CM_REQUEST:
+ case IOCTL_SS_INFO_REQ:
+ case IOCTL_SEND_CONTROL_MESSAGE:
+ case IOCTL_IDLE_REQ: {
+ PVOID pvBuffer = NULL;
+
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength < sizeof(struct link_request))
+ return -EINVAL;
+
+ if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
+ return -EINVAL;
+
+ pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
+ if (!pvBuffer)
+ return -ENOMEM;
+
+ if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
+ Status = -EFAULT;
kfree(pvBuffer);
break;
}
- case IOCTL_BCM_BUFFER_DOWNLOAD_START:
- {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock) ;
- if(NVMAccess)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
- if(!down_trylock(&Adapter->fw_download_sema))
- {
- Adapter->bBinDownloaded=FALSE;
- Adapter->fw_download_process_pid=current->pid;
- Adapter->bCfgDownloaded=FALSE;
- Adapter->fw_download_done=FALSE;
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- Status = reset_card_proc(Adapter);
- if(Status)
- {
- pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- }
- mdelay(10);
- }
- else
- {
- Status = -EBUSY;
+ down(&Adapter->LowPowerModeSync);
+ Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
+ !Adapter->bPreparingForLowPowerMode,
+ (1 * HZ));
+ if (Status == -ERESTARTSYS)
+ goto cntrlEnd;
+
+ if (Adapter->bPreparingForLowPowerMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Preparing Idle Mode is still True - Hence Rejecting control message\n");
+ Status = STATUS_FAILURE;
+ goto cntrlEnd;
+ }
+ Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
+
+cntrlEnd:
+ up(&Adapter->LowPowerModeSync);
+ kfree(pvBuffer);
+ break;
+ }
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
+ INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (NVMAccess) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
+ return -EACCES;
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+
+ if (!down_trylock(&Adapter->fw_download_sema)) {
+ Adapter->bBinDownloaded = FALSE;
+ Adapter->fw_download_process_pid = current->pid;
+ Adapter->bCfgDownloaded = FALSE;
+ Adapter->fw_download_done = FALSE;
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
+ Status = reset_card_proc(Adapter);
+ if (Status) {
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ break;
}
- up(&Adapter->NVMRdmWrmLock);
- break;
+ mdelay(10);
+ } else {
+ Status = -EBUSY;
}
- case IOCTL_BCM_BUFFER_DOWNLOAD:
- {
- FIRMWARE_INFO *psFwInfo = NULL;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
- do{
- if(!down_trylock(&Adapter->fw_download_sema))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid way to download buffer. Use Start and then call this!!!\n");
- Status=-EINVAL;
- break;
- }
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ up(&Adapter->NVMRdmWrmLock);
+ break;
+ }
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD: {
+ FIRMWARE_INFO *psFwInfo = NULL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+ do {
+ if (!down_trylock(&Adapter->fw_download_sema)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Invalid way to download buffer. Use Start and then call this!!!\n");
+ Status = -EINVAL;
+ break;
+ }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Length for FW DLD is : %lx\n",
- IoBuffer.InputLength);
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
- return -EINVAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
- psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
- if(!psFwInfo)
- return -ENOMEM;
+ if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
+ return -EINVAL;
- if(copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+ if (!psFwInfo)
+ return -ENOMEM;
- if(!psFwInfo->pvMappedFirmwareAddress ||
- (psFwInfo->u32FirmwareLength == 0))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
- psFwInfo->u32FirmwareLength);
- Status = -EINVAL;
- break;
- }
- Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
- if(Status != STATUS_SUCCESS)
- {
- if(psFwInfo->u32StartingAddress==CONFIG_BEGIN_ADDR)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
- }
- //up(&Adapter->fw_download_sema);
+ if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
- Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = FALSE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
- break ;
- }while(0);
+ if (!psFwInfo->pvMappedFirmwareAddress ||
+ (psFwInfo->u32FirmwareLength == 0)) {
- if(Status != STATUS_SUCCESS)
- up(&Adapter->fw_download_sema);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
- kfree(psFwInfo);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
+ psFwInfo->u32FirmwareLength);
+ Status = -EINVAL;
break;
}
- case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
- {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if(NVMAccess)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, " FW download blocked as EEPROM Read/Write is in progress\n");
- up(&Adapter->fw_download_sema);
- return -EACCES;
- }
- if(down_trylock(&Adapter->fw_download_sema))
- {
- Adapter->bBinDownloaded=TRUE;
- Adapter->bCfgDownloaded=TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
-
- Adapter->CurrNumRecvDescs=0;
- Adapter->downloadDDR = 0;
-
- //setting the Mips to Run
- Status = run_card_proc(Adapter);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- }
+
+ Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
+
+ if (Status != STATUS_SUCCESS) {
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
else
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Firm Download Over...\n");
- mdelay(10);
- /* Wait for MailBox Interrupt */
- if(StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
- }
- timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = FALSE;
- wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
- Adapter->waiting_to_fw_download_done, timeout);
- Adapter->fw_download_process_pid=INVALID_PID;
- Adapter->fw_download_done=TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- atomic_set(&Adapter->cntrlpktCnt,0);
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
-
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
- Adapter->DriverState = FW_DOWNLOAD_DONE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+
+ /* up(&Adapter->fw_download_sema); */
+
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = DRIVER_INIT;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
+ }
+ break;
- if(!timeout)
- {
- Status = -ENODEV;
- }
+ } while (0);
+
+ if (Status != STATUS_SUCCESS)
+ up(&Adapter->fw_download_sema);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
+ kfree(psFwInfo);
+ break;
+ }
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
+ INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+
+ if (NVMAccess) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "FW download blocked as EEPROM Read/Write is in progress\n");
+ up(&Adapter->fw_download_sema);
+ return -EACCES;
+ }
+
+ if (down_trylock(&Adapter->fw_download_sema)) {
+ Adapter->bBinDownloaded = TRUE;
+ Adapter->bCfgDownloaded = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->downloadDDR = 0;
+
+ /* setting the Mips to Run */
+ Status = run_card_proc(Adapter);
+
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ break;
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Firm Download Over...\n");
}
- else
- {
- Status = -EINVAL;
+
+ mdelay(10);
+
+ /* Wait for MailBox Interrupt */
+ if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
+
+ timeout = 5*HZ;
+ Adapter->waiting_to_fw_download_done = FALSE;
+ wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
+ Adapter->waiting_to_fw_download_done, timeout);
+ Adapter->fw_download_process_pid = INVALID_PID;
+ Adapter->fw_download_done = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = FW_DOWNLOAD_DONE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
+
+ if (!timeout)
+ Status = -ENODEV;
+ } else {
+ Status = -EINVAL;
}
- case IOCTL_BE_BUCKET_SIZE:
- Status = 0;
- if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
- Status = -EFAULT;
- break;
- case IOCTL_RTPS_BUCKET_SIZE:
- Status = 0;
- if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ break;
+ }
+
+ case IOCTL_BE_BUCKET_SIZE:
+ Status = 0;
+ if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
+ Status = -EFAULT;
+ break;
+
+ case IOCTL_RTPS_BUCKET_SIZE:
+ Status = 0;
+ if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
+ Status = -EFAULT;
+ break;
+
+ case IOCTL_CHIP_RESET: {
+ INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (NVMAccess) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
+ return -EACCES;
+ }
+
+ down(&Adapter->RxAppControlQueuelock);
+ Status = reset_card_proc(Adapter);
+ flushAllAppQ();
+ up(&Adapter->RxAppControlQueuelock);
+ up(&Adapter->NVMRdmWrmLock);
+ ResetCounters(Adapter);
+ break;
+ }
+
+ case IOCTL_QOS_THRESHOLD: {
+ USHORT uiLoopIndex;
+
+ Status = 0;
+ for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
+ if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
+ (unsigned long __user *)arg)) {
Status = -EFAULT;
- break;
- case IOCTL_CHIP_RESET:
- {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if(NVMAccess)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
+ break;
}
- down(&Adapter->RxAppControlQueuelock);
- Status = reset_card_proc(Adapter);
- flushAllAppQ();
- up(&Adapter->RxAppControlQueuelock);
- up(&Adapter->NVMRdmWrmLock);
- ResetCounters(Adapter);
- break;
}
- case IOCTL_QOS_THRESHOLD:
- {
- USHORT uiLoopIndex;
+ break;
+ }
- Status = 0;
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
- if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
- (unsigned long __user *)arg)) {
- Status = -EFAULT;
- break;
- }
- }
- break;
+ case IOCTL_DUMP_PACKET_INFO:
+ DumpPackInfo(Adapter);
+ DumpPhsRules(&Adapter->stBCMPhsContext);
+ Status = STATUS_SUCCESS;
+ break;
+
+ case IOCTL_GET_PACK_INFO:
+ if (copy_to_user(argp, &Adapter->PackInfo, sizeof(PacketInfo)*NO_OF_QUEUES))
+ return -EFAULT;
+ Status = STATUS_SUCCESS;
+ break;
+
+ case IOCTL_BCM_SWITCH_TRANSFER_MODE: {
+ UINT uiData = 0;
+ if (copy_from_user(&uiData, argp, sizeof(UINT)))
+ return -EFAULT;
+
+ if (uiData) {
+ /* Allow All Packets */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
+ Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
+ } else {
+ /* Allow IP only Packets */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
+ Adapter->TransferMode = IP_PACKET_ONLY_MODE;
}
+ Status = STATUS_SUCCESS;
+ break;
+ }
- case IOCTL_DUMP_PACKET_INFO:
+ case IOCTL_BCM_GET_DRIVER_VERSION: {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- DumpPackInfo(Adapter);
- DumpPhsRules(&Adapter->stBCMPhsContext);
- Status = STATUS_SUCCESS;
- break;
+ if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
+ return -EFAULT;
+ Status = STATUS_SUCCESS;
+ break;
+ }
- case IOCTL_GET_PACK_INFO:
- if(copy_to_user(argp, &Adapter->PackInfo, sizeof(PacketInfo)*NO_OF_QUEUES))
- return -EFAULT;
- Status = STATUS_SUCCESS;
+ case IOCTL_BCM_GET_CURRENT_STATUS: {
+ LINK_STATE link_state;
+
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
+ Status = -EFAULT;
break;
- case IOCTL_BCM_SWITCH_TRANSFER_MODE:
- {
- UINT uiData = 0;
- if(copy_from_user(&uiData, argp, sizeof(UINT)))
- return -EFAULT;
+ }
- if(uiData) /* Allow All Packets */
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
- Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
- }
- else /* Allow IP only Packets */
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
- Adapter->TransferMode = IP_PACKET_ONLY_MODE;
- }
- Status = STATUS_SUCCESS;
+ if (IoBuffer.OutputLength != sizeof(link_state)) {
+ Status = -EINVAL;
break;
}
- case IOCTL_BCM_GET_DRIVER_VERSION:
- {
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ memset(&link_state, 0, sizeof(link_state));
+ link_state.bIdleMode = Adapter->IdleMode;
+ link_state.bShutdownMode = Adapter->bShutStatus;
+ link_state.ucLinkStatus = Adapter->LinkStatus;
- if(copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
- return -EFAULT;
- Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
+ Status = -EFAULT;
break;
}
- case IOCTL_BCM_GET_CURRENT_STATUS:
- {
- LINK_STATE link_state;
+ Status = STATUS_SUCCESS;
+ break;
+ }
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
- Status = -EFAULT;
- break;
- }
- if (IoBuffer.OutputLength != sizeof(link_state)) {
- Status = -EINVAL;
- break;
- }
+ case IOCTL_BCM_SET_MAC_TRACING: {
+ UINT tracing_flag;
- memset(&link_state, 0, sizeof(link_state));
- link_state.bIdleMode = Adapter->IdleMode;
- link_state.bShutdownMode = Adapter->bShutStatus;
- link_state.ucLinkStatus = Adapter->LinkStatus;
+ /* copy ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- if (copy_to_user(IoBuffer.OutputBuffer, &link_state,
- min_t(size_t, sizeof(link_state), IoBuffer.OutputLength)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
- Status = -EFAULT;
- break;
- }
- Status = STATUS_SUCCESS;
- break;
+ if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT)))
+ return -EFAULT;
+
+ if (tracing_flag)
+ Adapter->pTarangs->MacTracingEnabled = TRUE;
+ else
+ Adapter->pTarangs->MacTracingEnabled = FALSE;
+ break;
+ }
+
+ case IOCTL_BCM_GET_DSX_INDICATION: {
+ ULONG ulSFId = 0;
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(stLocalSFAddIndicationAlt)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Mismatch req: %lx needed is =0x%zx!!!",
+ IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt));
+ return -EINVAL;
}
- case IOCTL_BCM_SET_MAC_TRACING:
- {
- UINT tracing_flag;
-
- /* copy ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
-
- if(copy_from_user(&tracing_flag,IoBuffer.InputBuffer,sizeof(UINT)))
- return -EFAULT;
-
- if (tracing_flag)
- Adapter->pTarangs->MacTracingEnabled = TRUE;
- else
- Adapter->pTarangs->MacTracingEnabled = FALSE;
- break;
- }
- case IOCTL_BCM_GET_DSX_INDICATION:
- {
- ULONG ulSFId=0;
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
- if(IoBuffer.OutputLength < sizeof(stLocalSFAddIndicationAlt))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,
- "Mismatch req: %lx needed is =0x%zx!!!",
- IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt));
- return -EINVAL;
- }
+ if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
+ return -EFAULT;
- if(copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId);
+ get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
+ Status = STATUS_SUCCESS;
+ }
+ break;
+
+ case IOCTL_BCM_GET_HOST_MIBS: {
+ PVOID temp_buff;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId );
- get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
- Status=STATUS_SUCCESS;
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength != sizeof(S_MIBS_HOST_STATS_MIBS)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length Check failed %lu %zd\n",
+ IoBuffer.OutputLength, sizeof(S_MIBS_HOST_STATS_MIBS));
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_GET_HOST_MIBS:
- {
- PVOID temp_buff;
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
+ temp_buff = kzalloc(sizeof(S_MIBS_HOST_STATS_MIBS), GFP_KERNEL);
+ if (!temp_buff)
+ return STATUS_FAILURE;
- if(IoBuffer.OutputLength != sizeof(S_MIBS_HOST_STATS_MIBS))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,
- "Length Check failed %lu %zd\n",
- IoBuffer.OutputLength, sizeof(S_MIBS_HOST_STATS_MIBS));
- return -EINVAL;
- }
+ Status = ProcessGetHostMibs(Adapter, temp_buff);
+ GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
- /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
- temp_buff = kzalloc(sizeof(S_MIBS_HOST_STATS_MIBS), GFP_KERNEL);
- if(!temp_buff)
- return STATUS_FAILURE;
+ if (Status != STATUS_FAILURE)
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
+ Status = -EFAULT;
- Status = ProcessGetHostMibs(Adapter, temp_buff);
- GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
+ kfree(temp_buff);
+ break;
+ }
- if (Status != STATUS_FAILURE)
- if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
- Status = -EFAULT;
+ case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
+ if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) {
+ Adapter->usIdleModePattern = ABORT_IDLE_MODE;
+ Adapter->bWakeUpDevice = TRUE;
+ wake_up(&Adapter->process_rx_cntrlpkt);
+ }
- kfree(temp_buff);
+ Status = STATUS_SUCCESS;
+ break;
+
+ case IOCTL_BCM_BULK_WRM: {
+ PBULKWRM_BUFFER pBulkBuffer;
+ UINT uiTempVar = 0;
+ PCHAR pvBuffer = NULL;
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle/Shutdown Mode, Blocking Wrms\n");
+ Status = -EACCES;
break;
}
- case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
- if((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE==Adapter->IdleMode))
- {
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- }
- Status = STATUS_SUCCESS;
- break;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- case IOCTL_BCM_BULK_WRM:
- {
- PBULKWRM_BUFFER pBulkBuffer;
- UINT uiTempVar=0;
- PCHAR pvBuffer = NULL;
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle/Shutdown Mode, Blocking Wrms\n");
- Status = -EACCES;
- break;
- }
+ /* FIXME: restrict length */
+ pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
+ if (!pvBuffer)
+ return -ENOMEM;
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ /* Get WrmBuffer structure */
+ if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
+ kfree(pvBuffer);
+ Status = -EFAULT;
+ break;
+ }
- /* FIXME: restrict length */
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if(!pvBuffer)
- return -ENOMEM;
-
- /* Get WrmBuffer structure */
- if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- {
- kfree(pvBuffer);
- Status = -EFAULT;
- break;
- }
+ pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
- pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
+ if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
+ ((ULONG)pBulkBuffer->Register & 0x3)) {
+ kfree(pvBuffer);
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)pBulkBuffer->Register);
+ Status = -EINVAL;
+ break;
+ }
- if(((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
- ((ULONG)pBulkBuffer->Register & 0x3))
- {
- kfree(pvBuffer);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM Done On invalid Address : %x Access Denied.\n",(int)pBulkBuffer->Register);
- Status = -EINVAL;
- break;
- }
+ uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4)) &&
+ (cmd == IOCTL_BCM_REGISTER_WRITE)) {
+ kfree(pvBuffer);
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ Status = -EFAULT;
+ break;
+ }
- uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
- if(!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE)
- && ((uiTempVar == EEPROM_REJECT_REG_1)||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE))
- {
- kfree(pvBuffer);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,"EEPROM Access Denied, not in VSG Mode\n");
- Status = -EFAULT;
- break;
- }
+ if (pBulkBuffer->SwapEndian == FALSE)
+ Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
+ else
+ Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
- if(pBulkBuffer->SwapEndian == FALSE)
- Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
- else
- Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
+ if (Status != STATUS_SUCCESS)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
- if(Status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
- }
+ kfree(pvBuffer);
+ break;
+ }
- kfree(pvBuffer);
- break;
- }
+ case IOCTL_BCM_GET_NVM_SIZE:
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- case IOCTL_BCM_GET_NVM_SIZE:
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) {
+ if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
return -EFAULT;
+ }
- if(Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH ) {
- if(copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
- return -EFAULT;
- }
- Status = STATUS_SUCCESS ;
- break;
+ Status = STATUS_SUCCESS;
+ break;
- case IOCTL_BCM_CAL_INIT :
+ case IOCTL_BCM_CAL_INIT: {
+ UINT uiSectorSize = 0 ;
+ if (Adapter->eNVMType == NVM_FLASH) {
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- {
- UINT uiSectorSize = 0 ;
- if(Adapter->eNVMType == NVM_FLASH)
- {
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
+ return -EFAULT;
- if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
+ if ((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) {
+ if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
+ sizeof(UINT)))
+ return -EFAULT;
+ } else {
+ if (IsFlash2x(Adapter)) {
+ if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize, sizeof(UINT)))
return -EFAULT;
-
- if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
- {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
- sizeof(UINT)))
- return -EFAULT;
- }
- else
- {
- if(IsFlash2x(Adapter))
- {
- if (copy_to_user(IoBuffer.OutputBuffer,
- &Adapter->uiSectorSize ,
- sizeof(UINT)))
- return -EFAULT;
- }
- else
- {
- if((TRUE == Adapter->bShutStatus) ||
- (TRUE == Adapter->IdleMode))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle/Shutdown Mode\n");
- return -EACCES;
- }
-
- Adapter->uiSectorSize = uiSectorSize ;
- BcmUpdateSectorSize(Adapter,Adapter->uiSectorSize);
- }
+ } else {
+ if ((TRUE == Adapter->bShutStatus) || (TRUE == Adapter->IdleMode)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is in Idle/Shutdown Mode\n");
+ return -EACCES;
}
- Status = STATUS_SUCCESS ;
- }
- else
- {
- Status = STATUS_FAILURE;
+
+ Adapter->uiSectorSize = uiSectorSize;
+ BcmUpdateSectorSize(Adapter, Adapter->uiSectorSize);
}
}
- break;
- case IOCTL_BCM_SET_DEBUG :
-#ifdef DEBUG
- {
- USER_BCM_DBG_STATE sUserDebugState;
-
-// BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Entered the ioctl %x \n", IOCTL_BCM_SET_DEBUG );
+ Status = STATUS_SUCCESS;
+ } else {
+ Status = STATUS_FAILURE;
+ }
+ }
+ break;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ case IOCTL_BCM_SET_DEBUG:
+#ifdef DEBUG
+ {
+ USER_BCM_DBG_STATE sUserDebugState;
- if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+ if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE)))
+ return -EFAULT;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
sUserDebugState.OnOff, sUserDebugState.Type);
- //sUserDebugState.Subtype <<= 1;
- sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "actual Subtype=0x%x\n", sUserDebugState.Subtype);
-
- // Update new 'DebugState' in the Adapter
- Adapter->stDebugState.type |= sUserDebugState.Type;
- /* Subtype: A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array: 1,2,4,8
- * corresponding to valid Type values. Hence we can use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- if (sUserDebugState.OnOff)
- Adapter->stDebugState.subtype[sUserDebugState.Type] |= sUserDebugState.Subtype;
- else
- Adapter->stDebugState.subtype[sUserDebugState.Type] &= ~sUserDebugState.Subtype;
-
- BCM_SHOW_DEBUG_BITMAP(Adapter);
+ /* sUserDebugState.Subtype <<= 1; */
+ sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "actual Subtype=0x%x\n", sUserDebugState.Subtype);
+
+ /* Update new 'DebugState' in the Adapter */
+ Adapter->stDebugState.type |= sUserDebugState.Type;
+ /* Subtype: A bitmap of 32 bits for Subtype per Type.
+ * Valid indexes in 'subtype' array: 1,2,4,8
+ * corresponding to valid Type values. Hence we can use the 'Type' field
+ * as the index value, ignoring the array entries 0,3,5,6,7 !
+ */
+ if (sUserDebugState.OnOff)
+ Adapter->stDebugState.subtype[sUserDebugState.Type] |= sUserDebugState.Subtype;
+ else
+ Adapter->stDebugState.subtype[sUserDebugState.Type] &= ~sUserDebugState.Subtype;
- }
+ BCM_SHOW_DEBUG_BITMAP(Adapter);
+ }
#endif
+ break;
+
+ case IOCTL_BCM_NVM_READ:
+ case IOCTL_BCM_NVM_WRITE: {
+ NVM_READWRITE stNVMReadWrite;
+ PUCHAR pReadData = NULL;
+ ULONG ulDSDMagicNumInUsrBuff = 0;
+ struct timeval tv0, tv1;
+ memset(&tv0, 0, sizeof(struct timeval));
+ memset(&tv1, 0, sizeof(struct timeval));
+ if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
+ Status = -EFAULT;
break;
- case IOCTL_BCM_NVM_READ:
- case IOCTL_BCM_NVM_WRITE:
- {
- NVM_READWRITE stNVMReadWrite;
- PUCHAR pReadData = NULL;
- ULONG ulDSDMagicNumInUsrBuff = 0;
- struct timeval tv0, tv1;
- memset(&tv0,0,sizeof(struct timeval));
- memset(&tv1,0,sizeof(struct timeval));
- if((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0))
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,"The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- Status = -EFAULT;
- break;
- }
+ }
- if(IsFlash2x(Adapter))
- {
- if((Adapter->eActiveDSD != DSD0) &&
- (Adapter->eActiveDSD != DSD1) &&
- (Adapter->eActiveDSD != DSD2))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE ;
- }
- }
+ if (IsFlash2x(Adapter)) {
+ if ((Adapter->eActiveDSD != DSD0) &&
+ (Adapter->eActiveDSD != DSD1) &&
+ (Adapter->eActiveDSD != DSD2)) {
- /* Copy Ioctl Buffer structure */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
+ return STATUS_FAILURE ;
+ }
+ }
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- if(copy_from_user(&stNVMReadWrite,
- (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
- sizeof(NVM_READWRITE)))
- return -EFAULT;
+ if (copy_from_user(&stNVMReadWrite,
+ (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
+ sizeof(NVM_READWRITE)))
+ return -EFAULT;
- //
- // Deny the access if the offset crosses the cal area limit.
- //
- if((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize)
- {
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset ,
-// stNVMReadWrite.uiNumBytes);
- Status = STATUS_FAILURE;
- break;
- }
+ /*
+ * Deny the access if the offset crosses the cal area limit.
+ */
- pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
- if(!pReadData)
- return -ENOMEM;
+ if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
+ /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
+ Status = STATUS_FAILURE;
+ break;
+ }
- if(copy_from_user(pReadData, stNVMReadWrite.pBuffer,
- stNVMReadWrite.uiNumBytes))
- {
- Status = -EFAULT;
- kfree(pReadData);
- break;
- }
+ pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
+ if (!pReadData)
+ return -ENOMEM;
- do_gettimeofday(&tv0);
- if(IOCTL_BCM_NVM_READ == cmd)
- {
- down(&Adapter->NVMRdmWrmLock);
+ if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
+ Status = -EFAULT;
+ kfree(pReadData);
+ break;
+ }
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return -EACCES;
- }
+ do_gettimeofday(&tv0);
+ if (IOCTL_BCM_NVM_READ == cmd) {
+ down(&Adapter->NVMRdmWrmLock);
- Status = BeceemNVMRead(Adapter, (PUINT)pReadData,
- stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
-
- up(&Adapter->NVMRdmWrmLock);
-
- if(Status != STATUS_SUCCESS)
- {
- kfree(pReadData);
- return Status;
- }
- if(copy_to_user(stNVMReadWrite.pBuffer,pReadData, stNVMReadWrite.uiNumBytes))
- {
- kfree(pReadData);
- Status = -EFAULT;
- }
- }
- else
- {
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- down(&Adapter->NVMRdmWrmLock);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return -EACCES;
+ }
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return -EACCES;
- }
+ Status = BeceemNVMRead(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
+ up(&Adapter->NVMRdmWrmLock);
- Adapter->bHeaderChangeAllowed = TRUE ;
- if(IsFlash2x(Adapter))
- {
- /*
- New Requirement:-
- DSD section updation will be allowed in two case:-
- 1. if DSD sig is present in DSD header means dongle is ok and updation is fruitfull
- 2. if point 1 failes then user buff should have DSD sig. this point ensures that if dongle is
- corrupted then user space program first modify the DSD header with valid DSD sig so
- that this as well as further write may be worthwhile.
-
- This restriction has been put assuming that if DSD sig is corrupted, DSD
- data won't be considered valid.
-
-
- */
- Status = BcmFlash2xCorruptSig(Adapter,Adapter->eActiveDSD);
- if(Status != STATUS_SUCCESS)
- {
- if(( (stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) != Adapter->uiNVMDSDSize ) ||
- (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DSD Sig is present neither in Flash nor User provided Input..");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return Status;
- }
-
- ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
- if(ulDSDMagicNumInUsrBuff != DSD_IMAGE_MAGIC_NUMBER)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DSD Sig is present neither in Flash nor User provided Input..");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return Status;
- }
- }
- }
- Status = BeceemNVMWrite(Adapter, (PUINT )pReadData,
- stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes, stNVMReadWrite.bVerify);
- if(IsFlash2x(Adapter))
- BcmFlash2xWriteSig(Adapter,Adapter->eActiveDSD);
+ if (Status != STATUS_SUCCESS) {
+ kfree(pReadData);
+ return Status;
+ }
- Adapter->bHeaderChangeAllowed = FALSE ;
+ if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
+ kfree(pReadData);
+ Status = -EFAULT;
+ }
+ } else {
+ down(&Adapter->NVMRdmWrmLock);
- up(&Adapter->NVMRdmWrmLock);
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return -EACCES;
+ }
- if(Status != STATUS_SUCCESS)
- {
+ Adapter->bHeaderChangeAllowed = TRUE;
+ if (IsFlash2x(Adapter)) {
+ /*
+ * New Requirement:-
+ * DSD section updation will be allowed in two case:-
+ * 1. if DSD sig is present in DSD header means dongle is ok and updation is fruitfull
+ * 2. if point 1 failes then user buff should have DSD sig. this point ensures that if dongle is
+ * corrupted then user space program first modify the DSD header with valid DSD sig so
+ * that this as well as further write may be worthwhile.
+ *
+ * This restriction has been put assuming that if DSD sig is corrupted, DSD
+ * data won't be considered valid.
+ */
+
+ Status = BcmFlash2xCorruptSig(Adapter, Adapter->eActiveDSD);
+ if (Status != STATUS_SUCCESS) {
+ if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) != Adapter->uiNVMDSDSize) ||
+ (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return Status;
+ }
+
+ ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
+ if (ulDSDMagicNumInUsrBuff != DSD_IMAGE_MAGIC_NUMBER) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
+ up(&Adapter->NVMRdmWrmLock);
kfree(pReadData);
return Status;
}
}
- do_gettimeofday(&tv1);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n",(tv1.tv_sec - tv0.tv_sec)*1000 +(tv1.tv_usec - tv0.tv_usec)/1000);
+ }
+
+ Status = BeceemNVMWrite(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes, stNVMReadWrite.bVerify);
+ if (IsFlash2x(Adapter))
+ BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
+
+ Adapter->bHeaderChangeAllowed = FALSE;
+ up(&Adapter->NVMRdmWrmLock);
+ if (Status != STATUS_SUCCESS) {
kfree(pReadData);
- Status = STATUS_SUCCESS;
+ return Status;
}
- break;
- case IOCTL_BCM_FLASH2X_SECTION_READ :
- {
-
- FLASH2X_READWRITE sFlash2xRead = {0};
- PUCHAR pReadBuff = NULL ;
- UINT NOB = 0;
- UINT BuffSize = 0;
- UINT ReadBytes = 0;
- UINT ReadOffset = 0;
- void __user *OutPutBuff;
-
- if(IsFlash2x(Adapter) != TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash Does not have 2.x map");
- return -EINVAL;
- }
+ }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ do_gettimeofday(&tv1);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
- //Reading FLASH 2.x READ structure
- if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,sizeof(FLASH2X_READWRITE)))
- return -EFAULT;
+ kfree(pReadData);
+ Status = STATUS_SUCCESS;
+ }
+ break;
+
+ case IOCTL_BCM_FLASH2X_SECTION_READ: {
+ FLASH2X_READWRITE sFlash2xRead = {0};
+ PUCHAR pReadBuff = NULL ;
+ UINT NOB = 0;
+ UINT BuffSize = 0;
+ UINT ReadBytes = 0;
+ UINT ReadOffset = 0;
+ void __user *OutPutBuff;
+
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.Section :%x" ,sFlash2xRead.Section);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.offset :%x" ,sFlash2xRead.offset);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.numOfBytes :%x" ,sFlash2xRead.numOfBytes);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.bVerify :%x\n" ,sFlash2xRead.bVerify);
+ /* Reading FLASH 2.x READ structure */
+ if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE)))
+ return -EFAULT;
- //This was internal to driver for raw read. now it has ben exposed to user space app.
- if(validateFlash2xReadWrite(Adapter,&sFlash2xRead) == FALSE)
- return STATUS_FAILURE ;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xRead.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%x", sFlash2xRead.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
- NOB = sFlash2xRead.numOfBytes;
- if(NOB > Adapter->uiSectorSize )
- BuffSize = Adapter->uiSectorSize;
- else
- BuffSize = NOB ;
+ /* This was internal to driver for raw read. now it has ben exposed to user space app. */
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == FALSE)
+ return STATUS_FAILURE;
- ReadOffset = sFlash2xRead.offset ;
- OutPutBuff = IoBuffer.OutputBuffer;
+ NOB = sFlash2xRead.numOfBytes;
+ if (NOB > Adapter->uiSectorSize)
+ BuffSize = Adapter->uiSectorSize;
+ else
+ BuffSize = NOB;
+ ReadOffset = sFlash2xRead.offset ;
+ OutPutBuff = IoBuffer.OutputBuffer;
+ pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
- pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
- if(pReadBuff == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
- }
- down(&Adapter->NVMRdmWrmLock);
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
- return -EACCES;
- }
+ if (pReadBuff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
+ return -ENOMEM;
+ }
+ down(&Adapter->NVMRdmWrmLock);
- while(NOB)
- {
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if(NOB > Adapter->uiSectorSize )
- ReadBytes = Adapter->uiSectorSize;
- else
- ReadBytes = NOB;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EACCES;
+ }
+ while (NOB) {
+ if (NOB > Adapter->uiSectorSize)
+ ReadBytes = Adapter->uiSectorSize;
+ else
+ ReadBytes = NOB;
- //Reading the data from Flash 2.x
+ /* Reading the data from Flash 2.x */
+ Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff, sFlash2xRead.Section, ReadOffset, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Flash 2x read err with Status :%d", Status);
+ break;
+ }
- Status = BcmFlash2xBulkRead(Adapter,(PUINT)pReadBuff,sFlash2xRead.Section,ReadOffset,ReadBytes);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Flash 2x read err with Status :%d", Status);
- break ;
- }
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pReadBuff, ReadBytes);
-
- Status = copy_to_user(OutPutBuff, pReadBuff,ReadBytes);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Copy to use failed with status :%d", Status);
- break;
- }
- NOB = NOB - ReadBytes;
- if(NOB)
- {
- ReadOffset = ReadOffset + ReadBytes ;
- OutPutBuff = OutPutBuff + ReadBytes ;
- }
+ Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
+ break;
+ }
+ NOB = NOB - ReadBytes;
+ if (NOB) {
+ ReadOffset = ReadOffset + ReadBytes;
+ OutPutBuff = OutPutBuff + ReadBytes ;
+ }
+ }
- }
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
-
- }
- break ;
- case IOCTL_BCM_FLASH2X_SECTION_WRITE :
- {
- FLASH2X_READWRITE sFlash2xWrite = {0};
- PUCHAR pWriteBuff;
- void __user *InputAddr;
- UINT NOB = 0;
- UINT BuffSize = 0;
- UINT WriteOffset = 0;
- UINT WriteBytes = 0;
-
- if(IsFlash2x(Adapter) != TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash Does not have 2.x map");
- return -EINVAL;
- }
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ }
+ break;
+
+ case IOCTL_BCM_FLASH2X_SECTION_WRITE: {
+ FLASH2X_READWRITE sFlash2xWrite = {0};
+ PUCHAR pWriteBuff;
+ void __user *InputAddr;
+ UINT NOB = 0;
+ UINT BuffSize = 0;
+ UINT WriteOffset = 0;
+ UINT WriteBytes = 0;
+
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- //First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite
- Adapter->bAllDSDWriteAllow = FALSE;
+ /* First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite */
+ Adapter->bAllDSDWriteAllow = FALSE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- //Reading FLASH 2.x READ structure
- if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE)))
- return -EFAULT;
+ /* Reading FLASH 2.x READ structure */
+ if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.Section :%x" ,sFlash2xWrite.Section);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.offset :%d" ,sFlash2xWrite.offset);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.numOfBytes :%x" ,sFlash2xWrite.numOfBytes);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.bVerify :%x\n" ,sFlash2xWrite.bVerify);
- if((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) &&
- (sFlash2xWrite.Section != VSA2) )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Only VSA write is allowed");
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify);
+
+ if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) && (sFlash2xWrite.Section != VSA2)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Only VSA write is allowed");
+ return -EINVAL;
+ }
- if(validateFlash2xReadWrite(Adapter,&sFlash2xWrite) == FALSE)
- return STATUS_FAILURE ;
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == FALSE)
+ return STATUS_FAILURE;
- InputAddr = sFlash2xWrite.pDataBuff;
- WriteOffset = sFlash2xWrite.offset ;
- NOB = sFlash2xWrite.numOfBytes;
+ InputAddr = sFlash2xWrite.pDataBuff;
+ WriteOffset = sFlash2xWrite.offset;
+ NOB = sFlash2xWrite.numOfBytes;
- if(NOB > Adapter->uiSectorSize )
- BuffSize = Adapter->uiSectorSize;
- else
- BuffSize = NOB ;
+ if (NOB > Adapter->uiSectorSize)
+ BuffSize = Adapter->uiSectorSize;
+ else
+ BuffSize = NOB ;
- pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
- if(pWriteBuff == NULL)
- return -ENOMEM;
+ pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
+ if (pWriteBuff == NULL)
+ return -ENOMEM;
- //extracting the remainder of the given offset.
- WriteBytes = Adapter->uiSectorSize ;
- if(WriteOffset % Adapter->uiSectorSize)
- WriteBytes =Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
- if(NOB < WriteBytes)
+ /* extracting the remainder of the given offset. */
+ WriteBytes = Adapter->uiSectorSize;
+ if (WriteOffset % Adapter->uiSectorSize)
+ WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
+
+ if (NOB < WriteBytes)
+ WriteBytes = NOB;
+
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return -EACCES;
+ }
+
+ BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section);
+ do {
+ Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
+ break;
+ }
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
+
+ /* Writing the data from Flash 2.x */
+ Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff, sFlash2xWrite.Section, WriteOffset, WriteBytes, sFlash2xWrite.bVerify);
+
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
+ break;
+ }
+
+ NOB = NOB - WriteBytes;
+ if (NOB) {
+ WriteOffset = WriteOffset + WriteBytes;
+ InputAddr = InputAddr + WriteBytes;
+ if (NOB > Adapter->uiSectorSize)
+ WriteBytes = Adapter->uiSectorSize;
+ else
WriteBytes = NOB;
+ }
+ } while (NOB > 0);
- down(&Adapter->NVMRdmWrmLock);
+ BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ }
+ break;
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pWriteBuff);
- return -EACCES;
- }
+ case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP: {
+ PFLASH2X_BITMAP psFlash2xBitMap;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
- BcmFlash2xCorruptSig(Adapter,sFlash2xWrite.Section);
- do
- {
- Status = copy_from_user(pWriteBuff,InputAddr,WriteBytes);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy to user failed with status :%d", Status);
- break ;
- }
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pWriteBuff,WriteBytes);
- //Writing the data from Flash 2.x
- Status = BcmFlash2xBulkWrite(Adapter,(PUINT)pWriteBuff,sFlash2xWrite.Section,WriteOffset,WriteBytes,sFlash2xWrite.bVerify);
-
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash 2x read err with Status :%d", Status);
- break ;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- NOB = NOB - WriteBytes;
- if(NOB)
- {
- WriteOffset = WriteOffset + WriteBytes ;
- InputAddr = InputAddr + WriteBytes ;
- if(NOB > Adapter->uiSectorSize )
- WriteBytes = Adapter->uiSectorSize;
- else
- WriteBytes = NOB;
- }
+ if (IoBuffer.OutputLength != sizeof(FLASH2X_BITMAP))
+ return -EINVAL;
+ psFlash2xBitMap = kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL);
+ if (psFlash2xBitMap == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory is not available");
+ return -ENOMEM;
+ }
- } while(NOB > 0);
- BcmFlash2xWriteSig(Adapter,sFlash2xWrite.Section);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pWriteBuff);
- }
- break ;
- case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP :
- {
+ /* Reading the Flash Sectio Bit map */
+ down(&Adapter->NVMRdmWrmLock);
- PFLASH2X_BITMAP psFlash2xBitMap;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(psFlash2xBitMap);
+ return -EACCES;
+ }
- if(IoBuffer.OutputLength != sizeof(FLASH2X_BITMAP))
- return -EINVAL;
+ BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
+ up(&Adapter->NVMRdmWrmLock);
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
+ Status = -EFAULT;
- psFlash2xBitMap = kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL);
- if(psFlash2xBitMap == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory is not available");
- return -ENOMEM ;
- }
- //Reading the Flash Sectio Bit map
- down(&Adapter->NVMRdmWrmLock);
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(psFlash2xBitMap);
- return -EACCES;
- }
+ kfree(psFlash2xBitMap);
+ }
+ break;
- BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
- up(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
- Status = -EFAULT;
-
- kfree(psFlash2xBitMap);
- }
- break ;
- case IOCTL_BCM_SET_ACTIVE_SECTION :
- {
- FLASH2X_SECTION_VAL eFlash2xSectionVal = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SET_ACTIVE_SECTION Called");
-
- if(IsFlash2x(Adapter) != TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash Does not have 2.x map");
- return -EINVAL;
- }
+ case IOCTL_BCM_SET_ACTIVE_SECTION: {
+ FLASH2X_SECTION_VAL eFlash2xSectionVal = 0;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SET_ACTIVE_SECTION Called");
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
- }
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- Status = copy_from_user(&eFlash2xSectionVal,IoBuffer.InputBuffer, sizeof(INT));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
- }
+ Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+ return Status;
+ }
- down(&Adapter->NVMRdmWrmLock);
+ Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
+ return Status;
+ }
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ down(&Adapter->NVMRdmWrmLock);
- Status = BcmSetActiveSection(Adapter,eFlash2xSectionVal);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed to make it's priority Highest. Status %d", Status);
- }
- up(&Adapter->NVMRdmWrmLock);
- }
- break ;
- case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION :
- {
- //Right Now we are taking care of only DSD
- Adapter->bAllDSDWriteAllow = FALSE ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
-
- Status = STATUS_SUCCESS ;
- }
- break ;
- case IOCTL_BCM_COPY_SECTION :
- {
- FLASH2X_COPY_SECTION sCopySectStrut = {0};
- Status = STATUS_SUCCESS;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called");
-
- Adapter->bAllDSDWriteAllow = FALSE ;
- if(IsFlash2x(Adapter) != TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash Does not have 2.x map");
- return -EINVAL;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
- return Status;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
+ }
- Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
- return Status;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
+ Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal);
+ if (Status)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed to make it's priority Highest. Status %d", Status);
+ up(&Adapter->NVMRdmWrmLock);
+ }
+ break;
- if(IsSectionExistInFlash(Adapter,sCopySectStrut.SrcSection) == FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection);
- return -EINVAL;
- }
+ case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: {
+ /* Right Now we are taking care of only DSD */
+ Adapter->bAllDSDWriteAllow = FALSE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
+ Status = STATUS_SUCCESS;
+ }
+ break;
- if(IsSectionExistInFlash(Adapter,sCopySectStrut.DstSection) == FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection);
- return -EINVAL;
- }
+ case IOCTL_BCM_COPY_SECTION: {
+ FLASH2X_COPY_SECTION sCopySectStrut = {0};
+ Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called");
- if(sCopySectStrut.SrcSection == sCopySectStrut.DstSection)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Source and Destination section should be different");
- return -EINVAL;
- }
+ Adapter->bAllDSDWriteAllow = FALSE;
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- down(&Adapter->NVMRdmWrmLock);
+ Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
+ return Status;
+ }
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
+ return Status;
+ }
- if(sCopySectStrut.SrcSection == ISO_IMAGE1 || sCopySectStrut.SrcSection == ISO_IMAGE2)
- {
- if(IsNonCDLessDevice(Adapter))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is Non-CDLess hence won't have ISO !!");
- Status = -EINVAL ;
- }
- else if(sCopySectStrut.numOfBytes == 0)
- {
- Status = BcmCopyISO(Adapter,sCopySectStrut);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Partial Copy of ISO section is not Allowed..");
- Status = STATUS_FAILURE ;
- }
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
- Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
- sCopySectStrut.DstSection,sCopySectStrut.offset,sCopySectStrut.numOfBytes);
- up(&Adapter->NVMRdmWrmLock);
- }
- break ;
- case IOCTL_BCM_GET_FLASH_CS_INFO :
- {
- Status = STATUS_SUCCESS;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_GET_FLASH_CS_INFO Called");
-
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- break;
- }
- if(Adapter->eNVMType != NVM_FLASH)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Connected device does not have flash");
- Status = -EINVAL;
- break;
- }
- if(IsFlash2x(Adapter) == TRUE)
- {
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection);
+ return -EINVAL;
+ }
- if(IoBuffer.OutputLength < sizeof(FLASH2X_CS_INFO))
- return -EINVAL;
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection);
+ return -EINVAL;
+ }
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO)))
- return -EFAULT;
- }
- else
- {
- if(IoBuffer.OutputLength < sizeof(FLASH_CS_INFO))
- return -EINVAL;
+ if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source and Destination section should be different");
+ return -EINVAL;
+ }
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO)))
- return -EFAULT;
+ down(&Adapter->NVMRdmWrmLock);
- }
- }
- break ;
- case IOCTL_BCM_SELECT_DSD :
- {
- UINT SectOfset = 0;
- FLASH2X_SECTION_VAL eFlash2xSectionVal;
- eFlash2xSectionVal = NO_SECTION_VAL ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_SELECT_DSD Called");
-
- if(IsFlash2x(Adapter) != TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash Does not have 2.x map");
- return -EINVAL;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
- }
- Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
+ }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Read Section :%d", eFlash2xSectionVal);
- if((eFlash2xSectionVal != DSD0) &&
- (eFlash2xSectionVal != DSD1) &&
- (eFlash2xSectionVal != DSD2) )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Passed section<%x> is not DSD section", eFlash2xSectionVal);
- return STATUS_FAILURE ;
- }
+ if (sCopySectStrut.SrcSection == ISO_IMAGE1 || sCopySectStrut.SrcSection == ISO_IMAGE2) {
+ if (IsNonCDLessDevice(Adapter)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is Non-CDLess hence won't have ISO !!");
+ Status = -EINVAL;
+ } else if (sCopySectStrut.numOfBytes == 0) {
+ Status = BcmCopyISO(Adapter, sCopySectStrut);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Partial Copy of ISO section is not Allowed..");
+ Status = STATUS_FAILURE;
+ }
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+ }
- SectOfset= BcmGetSectionValStartOffset(Adapter,eFlash2xSectionVal);
- if(SectOfset == INVALID_OFFSET)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal);
- return -EINVAL;
- }
+ Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
+ sCopySectStrut.DstSection, sCopySectStrut.offset, sCopySectStrut.numOfBytes);
+ up(&Adapter->NVMRdmWrmLock);
+ }
+ break;
- Adapter->bAllDSDWriteAllow = TRUE ;
-
- Adapter->ulFlashCalStart = SectOfset ;
- Adapter->eActiveDSD = eFlash2xSectionVal;
- }
- Status = STATUS_SUCCESS ;
- break;
-
- case IOCTL_BCM_NVM_RAW_READ :
- {
-
- NVM_READWRITE stNVMRead;
- INT NOB ;
- INT BuffSize ;
- INT ReadOffset = 0;
- UINT ReadBytes = 0 ;
- PUCHAR pReadBuff;
- void __user *OutPutBuff;
-
- if(Adapter->eNVMType != NVM_FLASH)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"NVM TYPE is not Flash ");
- return -EINVAL ;
- }
+ case IOCTL_BCM_GET_FLASH_CS_INFO: {
+ Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_GET_FLASH_CS_INFO Called");
- /* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- Status = -EFAULT;
- break;
- }
+ Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+ break;
+ }
- if(copy_from_user(&stNVMRead, IoBuffer.OutputBuffer,sizeof(NVM_READWRITE)))
- return -EFAULT;
+ if (Adapter->eNVMType != NVM_FLASH) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Connected device does not have flash");
+ Status = -EINVAL;
+ break;
+ }
- NOB = stNVMRead.uiNumBytes;
- //In Raw-Read max Buff size : 64MB
+ if (IsFlash2x(Adapter) == TRUE) {
+ if (IoBuffer.OutputLength < sizeof(FLASH2X_CS_INFO))
+ return -EINVAL;
- if(NOB > DEFAULT_BUFF_SIZE)
- BuffSize = DEFAULT_BUFF_SIZE;
- else
- BuffSize = NOB ;
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO)))
+ return -EFAULT;
+ } else {
+ if (IoBuffer.OutputLength < sizeof(FLASH_CS_INFO))
+ return -EINVAL;
- ReadOffset = stNVMRead.uiOffset;
- OutPutBuff = stNVMRead.pBuffer;
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO)))
+ return -EFAULT;
+ }
+ }
+ break;
- pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
- if(pReadBuff == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory allocation failed for Flash 2.x Read Structure");
- Status = -ENOMEM;
- break;
- }
- down(&Adapter->NVMRdmWrmLock);
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- kfree(pReadBuff);
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ case IOCTL_BCM_SELECT_DSD: {
+ UINT SectOfset = 0;
+ FLASH2X_SECTION_VAL eFlash2xSectionVal;
+ eFlash2xSectionVal = NO_SECTION_VAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SELECT_DSD Called");
- Adapter->bFlashRawRead = TRUE ;
- while(NOB)
- {
- if(NOB > DEFAULT_BUFF_SIZE )
- ReadBytes = DEFAULT_BUFF_SIZE;
- else
- ReadBytes = NOB;
-
- //Reading the data from Flash 2.x
- Status = BeceemNVMRead(Adapter,(PUINT)pReadBuff,ReadOffset,ReadBytes);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Flash 2x read err with Status :%d", Status);
- break;
- }
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pReadBuff,ReadBytes);
-
- Status = copy_to_user(OutPutBuff, pReadBuff,ReadBytes);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy to use failed with status :%d", Status);
- break;
- }
- NOB = NOB - ReadBytes;
- if(NOB)
- {
- ReadOffset = ReadOffset + ReadBytes ;
- OutPutBuff = OutPutBuff + ReadBytes ;
- }
+ Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+ return Status;
+ }
+ Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
+ return Status;
+ }
- }
- Adapter->bFlashRawRead = FALSE ;
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
- break ;
- }
-
- case IOCTL_BCM_CNTRLMSG_MASK:
- {
- ULONG RxCntrlMsgBitMask = 0 ;
-
- /* Copy Ioctl Buffer structure */
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"copy of Ioctl buffer is failed from user space");
- Status = -EFAULT;
- break;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
+ if ((eFlash2xSectionVal != DSD0) &&
+ (eFlash2xSectionVal != DSD1) &&
+ (eFlash2xSectionVal != DSD2)) {
- if (IoBuffer.InputLength != sizeof(unsigned long)) {
- Status = -EINVAL;
- break;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Passed section<%x> is not DSD section", eFlash2xSectionVal);
+ return STATUS_FAILURE;
+ }
- Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"copy of control bit mask failed from user space");
- Status = -EFAULT;
- break;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
- pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask ;
- }
- break;
- case IOCTL_BCM_GET_DEVICE_DRIVER_INFO:
- {
- DEVICE_DRIVER_INFO DevInfo;
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
-
- DevInfo.MaxRDMBufferSize = BUFFER_4K;
- DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
- DevInfo.u32RxAlignmentCorrection = 0;
- DevInfo.u32NVMType = Adapter->eNVMType;
- DevInfo.u32InterfaceType = BCM_USB;
-
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
+ if (SectOfset == INVALID_OFFSET) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal);
+ return -EINVAL;
+ }
- if(IoBuffer.OutputLength < sizeof(DevInfo))
- return -EINVAL;
+ Adapter->bAllDSDWriteAllow = TRUE;
+ Adapter->ulFlashCalStart = SectOfset;
+ Adapter->eActiveDSD = eFlash2xSectionVal;
+ }
+ Status = STATUS_SUCCESS;
+ break;
+
+ case IOCTL_BCM_NVM_RAW_READ: {
+ NVM_READWRITE stNVMRead;
+ INT NOB ;
+ INT BuffSize ;
+ INT ReadOffset = 0;
+ UINT ReadBytes = 0 ;
+ PUCHAR pReadBuff;
+ void __user *OutPutBuff;
+
+ if (Adapter->eNVMType != NVM_FLASH) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NVM TYPE is not Flash");
+ return -EINVAL;
+ }
- if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
- return -EFAULT;
- }
- break ;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
+ Status = -EFAULT;
+ break;
+ }
- case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
- {
- ST_TIME_ELAPSED stTimeElapsedSinceNetEntry = {0};
+ if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(NVM_READWRITE)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
+ NOB = stNVMRead.uiNumBytes;
+ /* In Raw-Read max Buff size : 64MB */
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (NOB > DEFAULT_BUFF_SIZE)
+ BuffSize = DEFAULT_BUFF_SIZE;
+ else
+ BuffSize = NOB;
- if(IoBuffer.OutputLength < sizeof(ST_TIME_ELAPSED))
- return -EINVAL;
+ ReadOffset = stNVMRead.uiOffset;
+ OutPutBuff = stNVMRead.pBuffer;
- stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
+ pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
+ if (pReadBuff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
+ Status = -ENOMEM;
+ break;
+ }
+ down(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED)))
- return -EFAULT;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ kfree(pReadBuff);
+ up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
+ }
+
+ Adapter->bFlashRawRead = TRUE;
+
+ while (NOB) {
+ if (NOB > DEFAULT_BUFF_SIZE)
+ ReadBytes = DEFAULT_BUFF_SIZE;
+ else
+ ReadBytes = NOB;
+
+ /* Reading the data from Flash 2.x */
+ Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff, ReadOffset, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
+ break;
}
+
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
+
+ Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
+ break;
+ }
+ NOB = NOB - ReadBytes;
+ if (NOB) {
+ ReadOffset = ReadOffset + ReadBytes;
+ OutPutBuff = OutPutBuff + ReadBytes;
+ }
+ }
+ Adapter->bFlashRawRead = FALSE;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ break;
+ }
+
+ case IOCTL_BCM_CNTRLMSG_MASK: {
+ ULONG RxCntrlMsgBitMask = 0;
+
+ /* Copy Ioctl Buffer structure */
+ Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
+ Status = -EFAULT;
break;
+ }
- case IOCTL_CLOSE_NOTIFICATION:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_CLOSE_NOTIFICATION");
+ if (IoBuffer.InputLength != sizeof(unsigned long)) {
+ Status = -EINVAL;
break;
+ }
- default:
- pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
- Status = STATUS_FAILURE;
+ Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
+ Status = -EFAULT;
break;
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
+ pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
+ }
+ break;
+
+ case IOCTL_BCM_GET_DEVICE_DRIVER_INFO: {
+ DEVICE_DRIVER_INFO DevInfo;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+
+ DevInfo.MaxRDMBufferSize = BUFFER_4K;
+ DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
+ DevInfo.u32RxAlignmentCorrection = 0;
+ DevInfo.u32NVMType = Adapter->eNVMType;
+ DevInfo.u32InterfaceType = BCM_USB;
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(DevInfo))
+ return -EINVAL;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
+ return -EFAULT;
+ }
+ break;
+
+ case IOCTL_BCM_TIME_SINCE_NET_ENTRY: {
+ ST_TIME_ELAPSED stTimeElapsedSinceNetEntry = {0};
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(ST_TIME_ELAPSED))
+ return -EINVAL;
+
+ stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED)))
+ return -EFAULT;
+ }
+ break;
+
+ case IOCTL_CLOSE_NOTIFICATION:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION");
+ break;
+
+ default:
+ pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
+ Status = STATUS_FAILURE;
+ break;
}
return Status;
}
@@ -2107,8 +1989,6 @@ static const struct file_operations bcm_fops = {
.llseek = no_llseek,
};
-extern struct class *bcm_class;
-
int register_control_device_interface(PMINI_ADAPTER Adapter)
{
@@ -2122,8 +2002,8 @@ int register_control_device_interface(PMINI_ADAPTER Adapter)
}
Adapter->pstCreatedClassDevice = device_create(bcm_class, NULL,
- MKDEV(Adapter->major, 0),
- Adapter, DEV_NAME);
+ MKDEV(Adapter->major, 0),
+ Adapter, DEV_NAME);
if (IS_ERR(Adapter->pstCreatedClassDevice)) {
pr_err(DRV_NAME ": class device create failed\n");
@@ -2141,3 +2021,4 @@ void unregister_control_device_interface(PMINI_ADAPTER Adapter)
unregister_chrdev(Adapter->major, DEV_NAME);
}
}
+
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index df64acb0612..bcd86bbef2f 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -1,204 +1,183 @@
#include "headers.h"
-
-int InterfaceFileDownload( PVOID arg,
- struct file *flp,
- unsigned int on_chip_loc)
+int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
{
- // unsigned int reg=0;
- mm_segment_t oldfs={0};
- int errno=0, len=0 /*,is_config_file = 0*/;
- loff_t pos=0;
+ /* unsigned int reg = 0; */
+ mm_segment_t oldfs = {0};
+ int errno = 0, len = 0; /* ,is_config_file = 0 */
+ loff_t pos = 0;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
- //PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
- char *buff=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
-
- if(!buff)
- {
- return -ENOMEM;
- }
- while(1)
- {
- oldfs=get_fs(); set_fs(get_ds());
- len=vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos);
- set_fs(oldfs);
- if(len<=0)
- {
- if(len<0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0");
- errno=len;
- }
- else
- {
- errno = 0;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!");
- }
- break;
- }
- //BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, buff, MAX_TRANSFER_CTRL_BYTE_USB);
- errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len) ;
- if(errno)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "WRM Failed! status: %d", errno);
- break;
+ /* PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter; */
+ char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+ while (1) {
+ oldfs = get_fs();
+ set_fs(get_ds());
+ len = vfs_read(flp, (void __force __user *)buff,
+ MAX_TRANSFER_CTRL_BYTE_USB, &pos);
+ set_fs(oldfs);
+ if (len <= 0) {
+ if (len < 0) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, MP_INIT,
+ DBG_LVL_ALL, "len < 0");
+ errno = len;
+ } else {
+ errno = 0;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, MP_INIT,
+ DBG_LVL_ALL,
+ "Got end of file!");
+ }
+ break;
}
- on_chip_loc+=MAX_TRANSFER_CTRL_BYTE_USB;
- }/* End of for(;;)*/
+ /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT,
+ * DBG_LVL_ALL, buff,
+ * MAX_TRANSFER_CTRL_BYTE_USB);
+ */
+ errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len);
+ if (errno) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "WRM Failed! status: %d", errno);
+ break;
+ }
+ on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
+ }
kfree(buff);
- return errno;
+ return errno;
}
-int InterfaceFileReadbackFromChip( PVOID arg,
- struct file *flp,
- unsigned int on_chip_loc)
+int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_chip_loc)
{
- char *buff, *buff_readback;
- unsigned int reg=0;
- mm_segment_t oldfs={0};
- int errno=0, len=0, is_config_file = 0;
- loff_t pos=0;
- static int fw_down = 0;
- INT Status = STATUS_SUCCESS;
+ char *buff, *buff_readback;
+ unsigned int reg = 0;
+ mm_segment_t oldfs = {0};
+ int errno = 0, len = 0, is_config_file = 0;
+ loff_t pos = 0;
+ static int fw_down;
+ INT Status = STATUS_SUCCESS;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
- buff=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
- buff_readback=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
- if(!buff || !buff_readback)
- {
- kfree(buff);
- kfree(buff_readback);
+ buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
+ buff_readback = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
+ if (!buff || !buff_readback) {
+ kfree(buff);
+ kfree(buff_readback);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR)? 1:0;
+ is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR) ? 1 : 0;
memset(buff_readback, 0, MAX_TRANSFER_CTRL_BYTE_USB);
memset(buff, 0, MAX_TRANSFER_CTRL_BYTE_USB);
- while(1)
- {
- oldfs=get_fs(); set_fs(get_ds());
- len=vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos);
- set_fs(oldfs);
- fw_down++;
- if(len<=0)
- {
- if(len<0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0");
- errno=len;
- }
- else
- {
- errno = 0;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!");
- }
- break;
- }
-
+ while (1) {
+ oldfs = get_fs();
+ set_fs(get_ds());
+ len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos);
+ set_fs(oldfs);
+ fw_down++;
+
+ if (len <= 0) {
+ if (len < 0) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0");
+ errno = len;
+ } else {
+ errno = 0;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!");
+ }
+ break;
+ }
Status = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
- if(Status)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
+ if (Status) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
goto exit;
}
reg++;
- if((len-sizeof(unsigned int))<4)
- {
- if(memcmp(buff_readback, buff, len))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT,DBG_LVL_ALL,"Length is: %d",len);
+ if ((len-sizeof(unsigned int)) < 4) {
+ if (memcmp(buff_readback, buff, len)) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Length is: %d", len);
Status = -EIO;
goto exit;
- }
- }
- else
- {
- len-=4;
- while(len)
- {
- if(*(unsigned int*)&buff_readback[len] != *(unsigned int *)&buff[len])
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len);
+ }
+ } else {
+ len -= 4;
+
+ while (len) {
+ if (*(unsigned int *)&buff_readback[len] != *(unsigned int *)&buff[len]) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len);
Status = -EIO;
goto exit;
- }
- len-=4;
- }
- }
- on_chip_loc+=MAX_TRANSFER_CTRL_BYTE_USB;
- }/* End of while(1)*/
+ }
+ len -= 4;
+ }
+ }
+ on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
+ } /* End of while(1) */
+
exit:
- kfree(buff);
- kfree(buff_readback);
+ kfree(buff);
+ kfree(buff_readback);
return Status;
}
-static int bcm_download_config_file(PMINI_ADAPTER Adapter,
- FIRMWARE_INFO *psFwInfo)
+static int bcm_download_config_file(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
{
int retval = STATUS_SUCCESS;
B_UINT32 value = 0;
- if(Adapter->pstargetparams == NULL)
- {
- if((Adapter->pstargetparams =
- kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL)) == NULL)
- {
- return -ENOMEM;
- }
- }
- if(psFwInfo->u32FirmwareLength != sizeof(STARGETPARAMS))
- {
- return -EIO;
+ if (Adapter->pstargetparams == NULL) {
+ Adapter->pstargetparams = kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL);
+ if (Adapter->pstargetparams == NULL)
+ return -ENOMEM;
}
- retval = copy_from_user(Adapter->pstargetparams,
- psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
- if(retval)
- {
+
+ if (psFwInfo->u32FirmwareLength != sizeof(STARGETPARAMS))
+ return -EIO;
+
+ retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
+ if (retval) {
kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
return -EFAULT;
}
+
/* Parse the structure and then Download the Firmware */
beceem_parse_target_struct(Adapter);
- //Initializing the NVM.
+ /* Initializing the NVM. */
BcmInitNVM(Adapter);
+ retval = InitLedSettings(Adapter);
- retval = InitLedSettings (Adapter);
-
- if(retval)
- {
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n");
+ if (retval) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n");
return retval;
}
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->LEDInfo.bLedInitDone = FALSE;
Adapter->DriverState = DRIVER_INIT;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = FW_DOWNLOAD;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
/* Initialize the DDR Controller */
retval = ddr_init(Adapter);
- if(retval)
- {
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n");
+ if (retval) {
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n");
return retval;
}
@@ -206,197 +185,166 @@ static int bcm_download_config_file(PMINI_ADAPTER Adapter,
wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
- if(Adapter->eNVMType == NVM_FLASH)
- {
+ if (Adapter->eNVMType == NVM_FLASH) {
retval = PropagateCalParamsFromFlashToMemory(Adapter);
- if(retval)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"propagaion of cal param failed with status :%d", retval);
+ if (retval) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "propagaion of cal param failed with status :%d", retval);
return retval;
}
}
+ retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(STARGETPARAMS), CONFIG_BEGIN_ADDR);
- retval =buffDnldVerify(Adapter,(PUCHAR)Adapter->pstargetparams,sizeof(STARGETPARAMS),CONFIG_BEGIN_ADDR);
-
- if(retval)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly");
- }
+ if (retval)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly");
else
Adapter->bCfgDownloaded = TRUE;
-
return retval;
}
-static int bcm_compare_buff_contents(unsigned char *readbackbuff,
- unsigned char *buff,unsigned int len)
+
+static int bcm_compare_buff_contents(unsigned char *readbackbuff, unsigned char *buff, unsigned int len)
{
int retval = STATUS_SUCCESS;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if((len-sizeof(unsigned int))<4)
- {
- if(memcmp(readbackbuff , buff, len))
- {
- retval=-EINVAL;
- }
- }
- else
- {
- len-=4;
- while(len)
- {
- if(*(unsigned int*)&readbackbuff[len] !=
- *(unsigned int *)&buff[len])
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&readbackbuff[len]);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len);
- retval=-EINVAL;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ if ((len-sizeof(unsigned int)) < 4) {
+ if (memcmp(readbackbuff , buff, len))
+ retval = -EINVAL;
+ } else {
+ len -= 4;
+
+ while (len) {
+ if (*(unsigned int *)&readbackbuff[len] != *(unsigned int *)&buff[len]) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&readbackbuff[len]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len);
+ retval = -EINVAL;
break;
}
- len-=4;
+ len -= 4;
}
}
return retval;
}
+
int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
{
int retval = STATUS_SUCCESS;
PUCHAR buff = NULL;
- /* Config File is needed for the Driver to download the Config file and
- Firmware. Check for the Config file to be first to be sent from the
- Application
- */
- atomic_set (&Adapter->uiMBupdate, FALSE);
- if(!Adapter->bCfgDownloaded &&
- psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR)
- {
- /*Can't Download Firmware.*/
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Download the config File first\n");
+ /* Config File is needed for the Driver to download the Config file and
+ * Firmware. Check for the Config file to be first to be sent from the
+ * Application
+ */
+ atomic_set(&Adapter->uiMBupdate, FALSE);
+ if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
+ /* Can't Download Firmware. */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n");
return -EINVAL;
}
/* If Config File, Finish the DDR Settings and then Download CFG File */
- if(psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
- {
- retval = bcm_download_config_file (Adapter, psFwInfo);
- }
- else
- {
-
- buff = kzalloc(psFwInfo->u32FirmwareLength,GFP_KERNEL);
- if(buff==NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Failed in allocation memory");
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) {
+ retval = bcm_download_config_file(Adapter, psFwInfo);
+ } else {
+ buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL);
+ if (buff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed in allocation memory");
return -ENOMEM;
}
- retval = copy_from_user(buff,psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
- if(retval != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed");
+
+ retval = copy_from_user(buff, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
+ if (retval != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed");
retval = -EFAULT;
- goto error ;
+ goto error;
}
retval = buffDnldVerify(Adapter,
buff,
psFwInfo->u32FirmwareLength,
psFwInfo->u32StartingAddress);
- if(retval != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"f/w download failed status :%d", retval);
+
+ if (retval != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "f/w download failed status :%d", retval);
goto error;
}
}
+
error:
kfree(buff);
return retval;
}
-static INT buffDnld(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength,
- ULONG u32StartingAddress)
+static INT buffDnld(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress)
{
-
- unsigned int len = 0;
+ unsigned int len = 0;
int retval = STATUS_SUCCESS;
len = u32FirmwareLength;
- while(u32FirmwareLength)
- {
- len = MIN_VAL (u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- retval = wrm (Adapter, u32StartingAddress, mappedbuffer, len);
- if(retval)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval);
+ while (u32FirmwareLength) {
+ len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
+ retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len);
+
+ if (retval) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval);
break;
}
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer +=len;
+ u32StartingAddress += len;
+ u32FirmwareLength -= len;
+ mappedbuffer += len;
}
return retval;
-
}
-static INT buffRdbkVerify(PMINI_ADAPTER Adapter,
- PUCHAR mappedbuffer, UINT u32FirmwareLength,
- ULONG u32StartingAddress)
+static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress)
{
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
- PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,GFP_KERNEL);
+ PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
- if(NULL == readbackbuff)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
+ if (NULL == readbackbuff) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
return -ENOMEM;
}
- while (u32FirmwareLength && !retval)
- {
- len = MIN_VAL (u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
+ while (u32FirmwareLength && !retval) {
+ len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
+ retval = rdm(Adapter, u32StartingAddress, readbackbuff, len);
- retval = rdm (Adapter, u32StartingAddress, readbackbuff, len);
- if(retval)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d" ,retval);
+ if (retval) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval);
break;
}
- if (STATUS_SUCCESS != (retval = bcm_compare_buff_contents (readbackbuff, mappedbuffer, len)))
- {
+ retval = bcm_compare_buff_contents(readbackbuff, mappedbuffer, len);
+ if (STATUS_SUCCESS != retval)
break;
- }
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer +=len;
- }/* end of while (u32FirmwareLength && !retval) */
+
+ u32StartingAddress += len;
+ u32FirmwareLength -= len;
+ mappedbuffer += len;
+
+ } /* end of while (u32FirmwareLength && !retval) */
kfree(readbackbuff);
return retval;
}
-INT buffDnldVerify(PMINI_ADAPTER Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress)
+INT buffDnldVerify(PMINI_ADAPTER Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength, unsigned long u32StartingAddress)
{
INT status = STATUS_SUCCESS;
- status = buffDnld(Adapter,mappedbuffer,u32FirmwareLength,u32StartingAddress);
- if(status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Buffer download failed");
+ status = buffDnld(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress);
+ if (status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer download failed");
goto error;
}
- status= buffRdbkVerify(Adapter,mappedbuffer,u32FirmwareLength,u32StartingAddress);
- if(status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Buffer readback verifier failed");
+ status = buffRdbkVerify(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress);
+ if (status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer readback verifier failed");
goto error;
}
error:
return status;
}
-
-
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index d78d5ef1f29..a09d35108f0 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -139,8 +139,7 @@ static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C2, 4, TRUE);
}
-static int
-usbbcm_device_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
int retval;
@@ -281,8 +280,9 @@ static int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
int i = 0;
for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- if ((psIntfAdapter->asUsbTcb[i].urb =
- usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
+ psIntfAdapter->asUsbTcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (psIntfAdapter->asUsbTcb[i].urb == NULL) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
"Can't allocate Tx urb for index %d\n", i);
return -ENOMEM;
@@ -290,14 +290,17 @@ static int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
}
for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- if ((psIntfAdapter->asUsbRcb[i].urb =
- usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
+ psIntfAdapter->asUsbRcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (psIntfAdapter->asUsbRcb[i].urb == NULL) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
"Can't allocate Rx urb for index %d\n", i);
return -ENOMEM;
}
- if ((psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
- kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL)) == NULL) {
+
+ psIntfAdapter->asUsbRcb[i].urb->transfer_buffer = kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
+
+ if (psIntfAdapter->asUsbRcb[i].urb->transfer_buffer == NULL) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
"Can't allocate Rx buffer for index %d\n", i);
return -ENOMEM;
@@ -389,32 +392,32 @@ static inline int bcm_usb_endpoint_xfer_isoc(const struct usb_endpoint_descripto
static inline int bcm_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
{
- return (bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_in(epd));
+ return bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_in(epd);
}
static inline int bcm_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
{
- return (bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_out(epd));
+ return bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_out(epd);
}
static inline int bcm_usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
{
- return (bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_in(epd));
+ return bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_in(epd);
}
static inline int bcm_usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
{
- return (bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_out(epd));
+ return bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_out(epd);
}
static inline int bcm_usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
{
- return (bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_in(epd));
+ return bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_in(epd);
}
static inline int bcm_usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
{
- return (bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_out(epd));
+ return bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_out(epd);
}
static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
@@ -462,7 +465,7 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
if (bBcm16 == TRUE) {
/* selecting alternate setting one as a default setting for High Speed modem. */
if (psIntfAdapter->bHighSpeedDevice)
- retval= usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
+ retval = usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"BCM16 is applicable on this dongle\n");
if (retval || (psIntfAdapter->bHighSpeedDevice == FALSE)) {
@@ -497,7 +500,7 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
if ((psIntfAdapter->bHighSpeedDevice == FALSE) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
/* Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail. */
UINT _uiData = ntohl(EP2_CFG_INT);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"Reverting Bulk to INT as it is in Full Speed mode.\n");
BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter, (PUCHAR)&_uiData, 0x136, 4, TRUE);
}
@@ -579,7 +582,7 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
psIntfAdapter->sIntrOut.int_out_size = buffer_size;
psIntfAdapter->sIntrOut.int_out_endpointAddr = endpoint->bEndpointAddress;
psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
- psIntfAdapter->sIntrOut.int_out_buffer= kmalloc(buffer_size, GFP_KERNEL);
+ psIntfAdapter->sIntrOut.int_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!psIntfAdapter->sIntrOut.int_out_buffer) {
dev_err(&psIntfAdapter->udev->dev,
"could not allocate interrupt_out_buffer\n");
@@ -641,8 +644,8 @@ static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
static int InterfaceResume(struct usb_interface *intf)
{
PS_INTERFACE_ADAPTER psIntfAdapter = usb_get_intfdata(intf);
- mdelay(100);
+ mdelay(100);
psIntfAdapter->bSuspended = FALSE;
StartInterruptUrb(psIntfAdapter);
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index a51185b522c..61f878b4f56 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -1,246 +1,219 @@
#include "headers.h"
-INT
-InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
- UINT addr,
- PVOID buff,
- INT len)
+INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
+ UINT addr,
+ PVOID buff,
+ INT len)
{
int retval = 0;
- USHORT usRetries = 0 ;
- if(psIntfAdapter == NULL )
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0,"Interface Adapter is NULL");
- return -EINVAL ;
+ USHORT usRetries = 0;
+
+ if (psIntfAdapter == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Interface Adapter is NULL");
+ return -EINVAL;
}
- if(psIntfAdapter->psAdapter->device_removed == TRUE)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0,"Device got removed");
+ if (psIntfAdapter->psAdapter->device_removed == TRUE) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Device got removed");
return -ENODEV;
}
- if((psIntfAdapter->psAdapter->StopAllXaction == TRUE) && (psIntfAdapter->psAdapter->chip_id >= T3LPB))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL,"Currently Xaction is not allowed on the bus");
+ if ((psIntfAdapter->psAdapter->StopAllXaction == TRUE) && (psIntfAdapter->psAdapter->chip_id >= T3LPB)) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "Currently Xaction is not allowed on the bus");
return -EACCES;
}
- if(psIntfAdapter->bSuspended ==TRUE || psIntfAdapter->bPreparingForBusSuspend == TRUE)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL,"Bus is in suspended states hence RDM not allowed..");
+ if (psIntfAdapter->bSuspended == TRUE || psIntfAdapter->bPreparingForBusSuspend == TRUE) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "Bus is in suspended states hence RDM not allowed..");
return -EACCES;
}
- psIntfAdapter->psAdapter->DeviceAccess = TRUE ;
+ psIntfAdapter->psAdapter->DeviceAccess = TRUE;
+
do {
retval = usb_control_msg(psIntfAdapter->udev,
- usb_rcvctrlpipe(psIntfAdapter->udev,0),
- 0x02,
- 0xC2,
- (addr & 0xFFFF),
- ((addr >> 16) & 0xFFFF),
- buff,
- len,
- 5000);
-
- usRetries++ ;
- if(-ENODEV == retval)
- {
- psIntfAdapter->psAdapter->device_removed =TRUE;
+ usb_rcvctrlpipe(psIntfAdapter->udev, 0),
+ 0x02,
+ 0xC2,
+ (addr & 0xFFFF),
+ ((addr >> 16) & 0xFFFF),
+ buff,
+ len,
+ 5000);
+
+ usRetries++;
+ if (-ENODEV == retval) {
+ psIntfAdapter->psAdapter->device_removed = TRUE;
break;
}
- }while((retval < 0) && (usRetries < MAX_RDM_WRM_RETIRES ) );
+ } while ((retval < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
- if(retval < 0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", retval,usRetries);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE ;
- return retval;
- }
- else
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE ;
- return STATUS_SUCCESS;
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", retval, usRetries);
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ return retval;
+ } else {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", retval);
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ return STATUS_SUCCESS;
}
}
-INT
-InterfaceWRM(PS_INTERFACE_ADAPTER psIntfAdapter,
- UINT addr,
- PVOID buff,
- INT len)
+INT InterfaceWRM(PS_INTERFACE_ADAPTER psIntfAdapter,
+ UINT addr,
+ PVOID buff,
+ INT len)
{
int retval = 0;
- USHORT usRetries = 0 ;
+ USHORT usRetries = 0;
- if(psIntfAdapter == NULL )
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Interface Adapter is NULL");
+ if (psIntfAdapter == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Interface Adapter is NULL");
return -EINVAL;
}
- if(psIntfAdapter->psAdapter->device_removed == TRUE)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0,"Device got removed");
+ if (psIntfAdapter->psAdapter->device_removed == TRUE) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Device got removed");
return -ENODEV;
}
- if((psIntfAdapter->psAdapter->StopAllXaction == TRUE) && (psIntfAdapter->psAdapter->chip_id >= T3LPB))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL,"Currently Xaction is not allowed on the bus...");
+ if ((psIntfAdapter->psAdapter->StopAllXaction == TRUE) && (psIntfAdapter->psAdapter->chip_id >= T3LPB)) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "Currently Xaction is not allowed on the bus...");
return -EACCES;
}
- if(psIntfAdapter->bSuspended ==TRUE || psIntfAdapter->bPreparingForBusSuspend == TRUE)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL,"Bus is in suspended states hence RDM not allowed..");
+ if (psIntfAdapter->bSuspended == TRUE || psIntfAdapter->bPreparingForBusSuspend == TRUE) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "Bus is in suspended states hence RDM not allowed..");
return -EACCES;
}
- psIntfAdapter->psAdapter->DeviceAccess = TRUE ;
- do{
+
+ psIntfAdapter->psAdapter->DeviceAccess = TRUE;
+
+ do {
retval = usb_control_msg(psIntfAdapter->udev,
- usb_sndctrlpipe(psIntfAdapter->udev,0),
- 0x01,
- 0x42,
- (addr & 0xFFFF),
- ((addr >> 16) & 0xFFFF),
- buff,
- len,
- 5000);
-
- usRetries++ ;
- if(-ENODEV == retval)
- {
- psIntfAdapter->psAdapter->device_removed = TRUE ;
+ usb_sndctrlpipe(psIntfAdapter->udev, 0),
+ 0x01,
+ 0x42,
+ (addr & 0xFFFF),
+ ((addr >> 16) & 0xFFFF),
+ buff,
+ len,
+ 5000);
+
+ usRetries++;
+ if (-ENODEV == retval) {
+ psIntfAdapter->psAdapter->device_removed = TRUE;
break;
}
- }while((retval < 0) && ( usRetries < MAX_RDM_WRM_RETIRES));
+ } while ((retval < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
- if(retval < 0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "WRM failed status :%d, retires :%d", retval, usRetries);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE ;
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "WRM failed status :%d, retires :%d", retval, usRetries);
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
return retval;
- }
- else
- {
- psIntfAdapter->psAdapter->DeviceAccess = FALSE ;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "WRM sent %d", retval);
+ } else {
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL, "WRM sent %d", retval);
return STATUS_SUCCESS;
-
}
-
}
-INT
-BcmRDM(PVOID arg,
- UINT addr,
- PVOID buff,
- INT len)
+INT BcmRDM(PVOID arg,
+ UINT addr,
+ PVOID buff,
+ INT len)
{
return InterfaceRDM((PS_INTERFACE_ADAPTER)arg, addr, buff, len);
}
-INT
-BcmWRM(PVOID arg,
- UINT addr,
- PVOID buff,
- INT len)
+INT BcmWRM(PVOID arg,
+ UINT addr,
+ PVOID buff,
+ INT len)
{
return InterfaceWRM((PS_INTERFACE_ADAPTER)arg, addr, buff, len);
}
-
-
INT Bcm_clear_halt_of_endpoints(PMINI_ADAPTER Adapter)
{
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter);
- INT status = STATUS_SUCCESS ;
+ INT status = STATUS_SUCCESS;
/*
- usb_clear_halt - tells device to clear endpoint halt/stall condition
- @dev: device whose endpoint is halted
- @pipe: endpoint "pipe" being cleared
- @ Context: !in_interrupt ()
-
- usb_clear_halt is the synchrnous call and returns 0 on success else returns with error code.
- This is used to clear halt conditions for bulk and interrupt endpoints only.
- Control and isochronous endpoints never halts.
-
- Any URBs queued for such an endpoint should normally be unlinked by the driver
- before clearing the halt condition.
-
- */
-
- //Killing all the submitted urbs to different end points.
+ * usb_clear_halt - tells device to clear endpoint halt/stall condition
+ * @dev: device whose endpoint is halted
+ * @pipe: endpoint "pipe" being cleared
+ * @ Context: !in_interrupt ()
+ *
+ * usb_clear_halt is the synchrnous call and returns 0 on success else returns with error code.
+ * This is used to clear halt conditions for bulk and interrupt endpoints only.
+ * Control and isochronous endpoints never halts.
+ *
+ * Any URBs queued for such an endpoint should normally be unlinked by the driver
+ * before clearing the halt condition.
+ *
+ */
+
+ /* Killing all the submitted urbs to different end points. */
Bcm_kill_all_URBs(psIntfAdapter);
+ /* clear the halted/stalled state for every end point */
+ status = usb_clear_halt(psIntfAdapter->udev, psIntfAdapter->sIntrIn.int_in_pipe);
+ if (status != STATUS_SUCCESS)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Unable to Clear Halt of Interrupt IN end point. :%d ", status);
- //clear the halted/stalled state for every end point
- status = usb_clear_halt(psIntfAdapter->udev,psIntfAdapter->sIntrIn.int_in_pipe);
- if(status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Unable to Clear Halt of Interrupt IN end point. :%d ", status);
+ status = usb_clear_halt(psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_pipe);
+ if (status != STATUS_SUCCESS)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Unable to Clear Halt of Bulk IN end point. :%d ", status);
- status = usb_clear_halt(psIntfAdapter->udev,psIntfAdapter->sBulkIn.bulk_in_pipe);
- if(status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Unable to Clear Halt of Bulk IN end point. :%d ", status);
+ status = usb_clear_halt(psIntfAdapter->udev, psIntfAdapter->sBulkOut.bulk_out_pipe);
+ if (status != STATUS_SUCCESS)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Unable to Clear Halt of Bulk OUT end point. :%d ", status);
- status = usb_clear_halt(psIntfAdapter->udev,psIntfAdapter->sBulkOut.bulk_out_pipe);
- if(status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Unable to Clear Halt of Bulk OUT end point. :%d ", status);
-
- return status ;
+ return status;
}
-
VOID Bcm_kill_all_URBs(PS_INTERFACE_ADAPTER psIntfAdapter)
{
struct urb *tempUrb = NULL;
UINT i;
- /**
- * usb_kill_urb - cancel a transfer request and wait for it to finish
- * @urb: pointer to URB describing a previously submitted request,
- * returns nothing as it is void returned API.
- *
- * This routine cancels an in-progress request. It is guaranteed that
- * upon return all completion handlers will have finished and the URB
- * will be totally idle and available for reuse
-
- * This routine may not be used in an interrupt context (such as a bottom
- * half or a completion handler), or when holding a spinlock, or in other
- * situations where the caller can't schedule().
- *
- **/
+ /*
+ * usb_kill_urb - cancel a transfer request and wait for it to finish
+ * @urb: pointer to URB describing a previously submitted request,
+ * returns nothing as it is void returned API.
+ *
+ * This routine cancels an in-progress request. It is guaranteed that
+ * upon return all completion handlers will have finished and the URB
+ * will be totally idle and available for reuse
+ *
+ * This routine may not be used in an interrupt context (such as a bottom
+ * half or a completion handler), or when holding a spinlock, or in other
+ * situations where the caller can't schedule().
+ *
+ */
/* Cancel submitted Interrupt-URB's */
- if(psIntfAdapter->psInterruptUrb != NULL)
- {
- if(psIntfAdapter->psInterruptUrb->status == -EINPROGRESS)
- usb_kill_urb(psIntfAdapter->psInterruptUrb);
+ if (psIntfAdapter->psInterruptUrb != NULL) {
+ if (psIntfAdapter->psInterruptUrb->status == -EINPROGRESS)
+ usb_kill_urb(psIntfAdapter->psInterruptUrb);
}
/* Cancel All submitted TX URB's */
- for(i = 0; i < MAXIMUM_USB_TCB; i++)
- {
+ for (i = 0; i < MAXIMUM_USB_TCB; i++) {
tempUrb = psIntfAdapter->asUsbTcb[i].urb;
- if(tempUrb)
- {
- if(tempUrb->status == -EINPROGRESS)
+ if (tempUrb) {
+ if (tempUrb->status == -EINPROGRESS)
usb_kill_urb(tempUrb);
}
}
- for(i = 0; i < MAXIMUM_USB_RCB; i++)
- {
+ for (i = 0; i < MAXIMUM_USB_RCB; i++) {
tempUrb = psIntfAdapter->asUsbRcb[i].urb;
- if(tempUrb)
- {
- if(tempUrb->status == -EINPROGRESS)
- usb_kill_urb(tempUrb);
+ if (tempUrb) {
+ if (tempUrb->status == -EINPROGRESS)
+ usb_kill_urb(tempUrb);
}
}
@@ -253,13 +226,12 @@ VOID Bcm_kill_all_URBs(PS_INTERFACE_ADAPTER psIntfAdapter)
VOID putUsbSuspend(struct work_struct *work)
{
- PS_INTERFACE_ADAPTER psIntfAdapter = NULL ;
- struct usb_interface *intf = NULL ;
- psIntfAdapter = container_of(work, S_INTERFACE_ADAPTER,usbSuspendWork);
- intf=psIntfAdapter->interface ;
+ PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
+ struct usb_interface *intf = NULL;
+ psIntfAdapter = container_of(work, S_INTERFACE_ADAPTER, usbSuspendWork);
+ intf = psIntfAdapter->interface;
- if(psIntfAdapter->bSuspended == FALSE)
+ if (psIntfAdapter->bSuspended == FALSE)
usb_autopm_put_interface(intf);
-
}
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
index 916bebb7ed5..46ed99c5376 100644
--- a/drivers/staging/bcm/Macros.h
+++ b/drivers/staging/bcm/Macros.h
@@ -1,17 +1,17 @@
/*************************************
-* Macros.h
+* Macros.h
**************************************/
-#ifndef __MACROS_H__
+#ifndef __MACROS_H__
#define __MACROS_H__
-#define TX_TIMER_PERIOD 10 //10 msec
+#define TX_TIMER_PERIOD 10 /*10 msec*/
#define MAX_CLASSIFIERS 100
-//#define MAX_CLASSIFIERS_PER_SF 20
+/* #define MAX_CLASSIFIERS_PER_SF 20 */
#define MAX_TARGET_DSX_BUFFERS 24
-#define MAX_CNTRL_PKTS 100
-#define MAX_DATA_PKTS 200
-#define MAX_ETH_SIZE 1536
+#define MAX_CNTRL_PKTS 100
+#define MAX_DATA_PKTS 200
+#define MAX_ETH_SIZE 1536
#define MAX_CNTL_PKT_SIZE 2048
#define MTU_SIZE 1400
@@ -20,102 +20,102 @@
#define MAC_ADDR_REGISTER 0xbf60d000
-///////////Quality of Service///////////////////////////
-#define NO_OF_QUEUES 17
-#define HiPriority NO_OF_QUEUES-1
-#define LowPriority 0
-#define BE 2
-#define rtPS 4
-#define ERTPS 5
-#define UGS 6
-
-#define BE_BUCKET_SIZE 1024*1024*100 //32kb
-#define rtPS_BUCKET_SIZE 1024*1024*100 //8kb
-#define MAX_ALLOWED_RATE 1024*1024*100
-#define TX_PACKET_THRESHOLD 10
-#define XSECONDS 1*HZ
-#define DSC_ACTIVATE_REQUEST 248
-#define QUEUE_DEPTH_OFFSET 0x1fc01000
-#define MAX_DEVICE_DESC_SIZE 2040
-#define MAX_CTRL_QUEUE_LEN 100
-#define MAX_APP_QUEUE_LEN 200
-#define MAX_LATENCY_ALLOWED 0xFFFFFFFF
-#define DEFAULT_UG_INTERVAL 250
-#define DEFAULT_UGI_FACTOR 4
-
-#define DEFAULT_PERSFCOUNT 60
-#define MAX_CONNECTIONS 10
-#define MAX_CLASS_NAME_LENGTH 32
-
-#define ETH_LENGTH_OF_ADDRESS 6
-#define MAX_MULTICAST_ADDRESSES 32
-#define IP_LENGTH_OF_ADDRESS 4
-
-#define IP_PACKET_ONLY_MODE 0
-#define ETH_PACKET_TUNNELING_MODE 1
-
-////////////Link Request//////////////
-#define SET_MAC_ADDRESS_REQUEST 0
-#define SYNC_UP_REQUEST 1
-#define SYNCED_UP 2
-#define LINK_UP_REQUEST 3
-#define LINK_CONNECTED 4
-#define SYNC_UP_NOTIFICATION 2
-#define LINK_UP_NOTIFICATION 4
+/* Quality of Service */
+#define NO_OF_QUEUES 17
+#define HiPriority (NO_OF_QUEUES-1)
+#define LowPriority 0
+#define BE 2
+#define rtPS 4
+#define ERTPS 5
+#define UGS 6
+
+#define BE_BUCKET_SIZE (1024*1024*100) /* 32kb */
+#define rtPS_BUCKET_SIZE (1024*1024*100) /* 8kb */
+#define MAX_ALLOWED_RATE (1024*1024*100)
+#define TX_PACKET_THRESHOLD 10
+#define XSECONDS (1*HZ)
+#define DSC_ACTIVATE_REQUEST 248
+#define QUEUE_DEPTH_OFFSET 0x1fc01000
+#define MAX_DEVICE_DESC_SIZE 2040
+#define MAX_CTRL_QUEUE_LEN 100
+#define MAX_APP_QUEUE_LEN 200
+#define MAX_LATENCY_ALLOWED 0xFFFFFFFF
+#define DEFAULT_UG_INTERVAL 250
+#define DEFAULT_UGI_FACTOR 4
+
+#define DEFAULT_PERSFCOUNT 60
+#define MAX_CONNECTIONS 10
+#define MAX_CLASS_NAME_LENGTH 32
+
+#define ETH_LENGTH_OF_ADDRESS 6
+#define MAX_MULTICAST_ADDRESSES 32
+#define IP_LENGTH_OF_ADDRESS 4
+
+#define IP_PACKET_ONLY_MODE 0
+#define ETH_PACKET_TUNNELING_MODE 1
+
+/* Link Request */
+#define SET_MAC_ADDRESS_REQUEST 0
+#define SYNC_UP_REQUEST 1
+#define SYNCED_UP 2
+#define LINK_UP_REQUEST 3
+#define LINK_CONNECTED 4
+#define SYNC_UP_NOTIFICATION 2
+#define LINK_UP_NOTIFICATION 4
#define LINK_NET_ENTRY 0x0002
-#define HMC_STATUS 0x0004
+#define HMC_STATUS 0x0004
#define LINK_UP_CONTROL_REQ 0x83
#define STATS_POINTER_REQ_STATUS 0x86
#define NETWORK_ENTRY_REQ_PAYLOAD 198
-#define LINK_DOWN_REQ_PAYLOAD 226
+#define LINK_DOWN_REQ_PAYLOAD 226
#define SYNC_UP_REQ_PAYLOAD 228
-#define STATISTICS_POINTER_REQ 237
+#define STATISTICS_POINTER_REQ 237
#define LINK_UP_REQ_PAYLOAD 245
#define LINK_UP_ACK 246
#define STATS_MSG_SIZE 4
#define INDEX_TO_DATA 4
-#define GO_TO_IDLE_MODE_PAYLOAD 210
-#define COME_UP_FROM_IDLE_MODE_PAYLOAD 211
-#define IDLE_MODE_SF_UPDATE_MSG 187
+#define GO_TO_IDLE_MODE_PAYLOAD 210
+#define COME_UP_FROM_IDLE_MODE_PAYLOAD 211
+#define IDLE_MODE_SF_UPDATE_MSG 187
-#define SKB_RESERVE_ETHERNET_HEADER 16
-#define SKB_RESERVE_PHS_BYTES 32
+#define SKB_RESERVE_ETHERNET_HEADER 16
+#define SKB_RESERVE_PHS_BYTES 32
-#define IP_PACKET_ONLY_MODE 0
-#define ETH_PACKET_TUNNELING_MODE 1
+#define IP_PACKET_ONLY_MODE 0
+#define ETH_PACKET_TUNNELING_MODE 1
-#define ETH_CS_802_3 1
-#define ETH_CS_802_1Q_VLAN 3
-#define IPV4_CS 1
-#define IPV6_CS 2
-#define ETH_CS_MASK 0x3f
+#define ETH_CS_802_3 1
+#define ETH_CS_802_1Q_VLAN 3
+#define IPV4_CS 1
+#define IPV6_CS 2
+#define ETH_CS_MASK 0x3f
/** \brief Validity bit maps for TLVs in packet classification rule */
-#define PKT_CLASSIFICATION_USER_PRIORITY_VALID 0
-#define PKT_CLASSIFICATION_VLANID_VALID 1
+#define PKT_CLASSIFICATION_USER_PRIORITY_VALID 0
+#define PKT_CLASSIFICATION_VLANID_VALID 1
#ifndef MIN
-#define MIN(_a, _b) ((_a) < (_b)? (_a): (_b))
+#define MIN(_a, _b) ((_a) < (_b) ? (_a) : (_b))
#endif
/*Leader related terms */
-#define LEADER_STATUS 0x00
-#define LEADER_STATUS_TCP_ACK 0x1
-#define LEADER_SIZE sizeof(LEADER)
-#define MAC_ADDR_REQ_SIZE sizeof(PACKETTOSEND)
-#define SS_INFO_REQ_SIZE sizeof(PACKETTOSEND)
-#define CM_REQUEST_SIZE LEADER_SIZE + sizeof(stLocalSFChangeRequest)
-#define IDLE_REQ_SIZE sizeof(PACKETTOSEND)
+#define LEADER_STATUS 0x00
+#define LEADER_STATUS_TCP_ACK 0x1
+#define LEADER_SIZE sizeof(LEADER)
+#define MAC_ADDR_REQ_SIZE sizeof(PACKETTOSEND)
+#define SS_INFO_REQ_SIZE sizeof(PACKETTOSEND)
+#define CM_REQUEST_SIZE (LEADER_SIZE + sizeof(stLocalSFChangeRequest))
+#define IDLE_REQ_SIZE sizeof(PACKETTOSEND)
-#define MAX_TRANSFER_CTRL_BYTE_USB 2 * 1024
+#define MAX_TRANSFER_CTRL_BYTE_USB (2*1024)
#define GET_MAILBOX1_REG_REQUEST 0x87
#define GET_MAILBOX1_REG_RESPONSE 0x67
@@ -124,40 +124,40 @@
#define TRANSMIT_NETWORK_DATA 0x00
#define RECEIVED_NETWORK_DATA 0x20
-#define CM_RESPONSES 0xA0
-#define STATUS_RSP 0xA1
-#define LINK_CONTROL_RESP 0xA2
-#define IDLE_MODE_STATUS 0xA3
-#define STATS_POINTER_RESP 0xA6
-#define MGMT_MSG_INFO_SW_STATUS 0xA7
-#define AUTH_SS_HOST_MSG 0xA8
-
-#define CM_DSA_ACK_PAYLOAD 247
-#define CM_DSC_ACK_PAYLOAD 248
-#define CM_DSD_ACK_PAYLOAD 249
-#define CM_DSDEACTVATE 250
-#define TOTAL_MASKED_ADDRESS_IN_BYTES 32
-
-#define MAC_REQ 0
-#define LINK_RESP 1
-#define RSSI_INDICATION 2
-
-#define SS_INFO 4
-#define STATISTICS_INFO 5
-#define CM_INDICATION 6
-#define PARAM_RESP 7
-#define BUFFER_1K 1024
-#define BUFFER_2K BUFFER_1K*2
-#define BUFFER_4K BUFFER_2K*2
-#define BUFFER_8K BUFFER_4K*2
-#define BUFFER_16K BUFFER_8K*2
-#define DOWNLINK_DIR 0
-#define UPLINK_DIR 1
-
-#define BCM_SIGNATURE "BECEEM"
-
-
-#define GPIO_OUTPUT_REGISTER 0x0F00003C
+#define CM_RESPONSES 0xA0
+#define STATUS_RSP 0xA1
+#define LINK_CONTROL_RESP 0xA2
+#define IDLE_MODE_STATUS 0xA3
+#define STATS_POINTER_RESP 0xA6
+#define MGMT_MSG_INFO_SW_STATUS 0xA7
+#define AUTH_SS_HOST_MSG 0xA8
+
+#define CM_DSA_ACK_PAYLOAD 247
+#define CM_DSC_ACK_PAYLOAD 248
+#define CM_DSD_ACK_PAYLOAD 249
+#define CM_DSDEACTVATE 250
+#define TOTAL_MASKED_ADDRESS_IN_BYTES 32
+
+#define MAC_REQ 0
+#define LINK_RESP 1
+#define RSSI_INDICATION 2
+
+#define SS_INFO 4
+#define STATISTICS_INFO 5
+#define CM_INDICATION 6
+#define PARAM_RESP 7
+#define BUFFER_1K 1024
+#define BUFFER_2K (BUFFER_1K*2)
+#define BUFFER_4K (BUFFER_2K*2)
+#define BUFFER_8K (BUFFER_4K*2)
+#define BUFFER_16K (BUFFER_8K*2)
+#define DOWNLINK_DIR 0
+#define UPLINK_DIR 1
+
+#define BCM_SIGNATURE "BECEEM"
+
+
+#define GPIO_OUTPUT_REGISTER 0x0F00003C
#define BCM_GPIO_OUTPUT_SET_REG 0x0F000040
#define BCM_GPIO_OUTPUT_CLR_REG 0x0F000044
#define GPIO_MODE_REGISTER 0x0F000034
@@ -165,44 +165,43 @@
typedef struct _LINK_STATE {
- UCHAR ucLinkStatus;
- UCHAR bIdleMode;
- UCHAR bShutdownMode;
-}LINK_STATE, *PLINK_STATE;
+ UCHAR ucLinkStatus;
+ UCHAR bIdleMode;
+ UCHAR bShutdownMode;
+} LINK_STATE, *PLINK_STATE;
enum enLinkStatus {
- WAIT_FOR_SYNC = 1,
- PHY_SYNC_ACHIVED = 2,
- LINKUP_IN_PROGRESS = 3,
- LINKUP_DONE = 4,
- DREG_RECEIVED = 5,
- LINK_STATUS_RESET_RECEIVED = 6,
- PERIODIC_WAKE_UP_NOTIFICATION_FRM_FW = 7,
- LINK_SHUTDOWN_REQ_FROM_FIRMWARE = 8,
- COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW =9
+ WAIT_FOR_SYNC = 1,
+ PHY_SYNC_ACHIVED = 2,
+ LINKUP_IN_PROGRESS = 3,
+ LINKUP_DONE = 4,
+ DREG_RECEIVED = 5,
+ LINK_STATUS_RESET_RECEIVED = 6,
+ PERIODIC_WAKE_UP_NOTIFICATION_FRM_FW = 7,
+ LINK_SHUTDOWN_REQ_FROM_FIRMWARE = 8,
+ COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW = 9
};
-typedef enum _E_PHS_DSC_ACTION
-{
- eAddPHSRule=0,
+typedef enum _E_PHS_DSC_ACTION {
+ eAddPHSRule = 0,
eSetPHSRule,
eDeletePHSRule,
eDeleteAllPHSRules
-}E_PHS_DSC_ACTION;
+} E_PHS_DSC_ACTION;
-#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ 0x89 // Host to Mac
-#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP 0xA9 // Mac to Host
-#define MASK_DISABLE_HEADER_SUPPRESSION 0x10 //0b000010000
-#define MINIMUM_PENDING_DESCRIPTORS 5
+#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ 0x89 /* Host to Mac */
+#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP 0xA9 /* Mac to Host */
+#define MASK_DISABLE_HEADER_SUPPRESSION 0x10 /* 0b000010000 */
+#define MINIMUM_PENDING_DESCRIPTORS 5
#define SHUTDOWN_HOSTINITIATED_REQUESTPAYLOAD 0xCC
#define SHUTDOWN_ACK_FROM_DRIVER 0x1
#define SHUTDOWN_NACK_FROM_DRIVER 0x2
-#define LINK_SYNC_UP_SUBTYPE 0x0001
-#define LINK_SYNC_DOWN_SUBTYPE 0x0001
+#define LINK_SYNC_UP_SUBTYPE 0x0001
+#define LINK_SYNC_DOWN_SUBTYPE 0x0001
@@ -226,9 +225,9 @@ typedef enum _E_PHS_DSC_ACTION
#define MAX_PENDING_CTRL_PACKET (MAX_CTRL_QUEUE_LEN-10)
-#define WIMAX_MAX_MTU (MTU_SIZE + ETH_HLEN)
+#define WIMAX_MAX_MTU (MTU_SIZE + ETH_HLEN)
#define AUTO_LINKUP_ENABLE 0x2
-#define AUTO_SYNC_DISABLE 0x1
+#define AUTO_SYNC_DISABLE 0x1
#define AUTO_FIRM_DOWNLOAD 0x1
#define SETTLE_DOWN_TIME 50
@@ -242,9 +241,9 @@ typedef enum _E_PHS_DSC_ACTION
#define TARGET_CAN_NOT_GO_TO_IDLE_MODE 3
#define IDLE_MODE_PAYLOAD_LENGTH 8
-#define IP_HEADER(Buffer) ((IPHeaderFormat*)(Buffer))
-#define IPV4 4
-#define IP_VERSION(byte) (((byte&0xF0)>>4))
+#define IP_HEADER(Buffer) ((IPHeaderFormat *)(Buffer))
+#define IPV4 4
+#define IP_VERSION(byte) (((byte&0xF0)>>4))
#define SET_MAC_ADDRESS 193
#define SET_MAC_ADDRESS_RESPONSE 236
@@ -263,15 +262,15 @@ typedef enum _E_PHS_DSC_ACTION
#define INVALID_QUEUE_INDEX NO_OF_QUEUES
-#define INVALID_PID (pid_t)-1
-#define DDR_80_MHZ 0
-#define DDR_100_MHZ 1
-#define DDR_120_MHZ 2 // Additional Frequency for T3LP
-#define DDR_133_MHZ 3
-#define DDR_140_MHZ 4 // Not Used (Reserved for future)
-#define DDR_160_MHZ 5 // Additional Frequency for T3LP
-#define DDR_180_MHZ 6 // Not Used (Reserved for future)
-#define DDR_200_MHZ 7 // Not Used (Reserved for future)
+#define INVALID_PID ((pid_t)-1)
+#define DDR_80_MHZ 0
+#define DDR_100_MHZ 1
+#define DDR_120_MHZ 2 /* Additional Frequency for T3LP */
+#define DDR_133_MHZ 3
+#define DDR_140_MHZ 4 /* Not Used (Reserved for future) */
+#define DDR_160_MHZ 5 /* Additional Frequency for T3LP */
+#define DDR_180_MHZ 6 /* Not Used (Reserved for future) */
+#define DDR_200_MHZ 7 /* Not Used (Reserved for future) */
#define MIPS_200_MHZ 0
#define MIPS_160_MHZ 1
@@ -291,27 +290,27 @@ typedef enum _E_PHS_DSC_ACTION
#define EEPROM_REJECT_REG_3 0x0f003008
#define EEPROM_REJECT_REG_4 0x0f003020
#define EEPROM_REJECT_MASK 0x0fffffff
-#define VSG_MODE 0x3
+#define VSG_MODE 0x3
/* Idle Mode Related Registers */
#define DEBUG_INTERRUPT_GENERATOR_REGISTOR 0x0F00007C
-#define SW_ABORT_IDLEMODE_LOC 0x0FF01FFC
+#define SW_ABORT_IDLEMODE_LOC 0x0FF01FFC
-#define SW_ABORT_IDLEMODE_PATTERN 0xd0ea1d1e
-#define DEVICE_INT_OUT_EP_REG0 0x0F011870
-#define DEVICE_INT_OUT_EP_REG1 0x0F011874
+#define SW_ABORT_IDLEMODE_PATTERN 0xd0ea1d1e
+#define DEVICE_INT_OUT_EP_REG0 0x0F011870
+#define DEVICE_INT_OUT_EP_REG1 0x0F011874
#define BIN_FILE "/lib/firmware/macxvi200.bin"
#define CFG_FILE "/lib/firmware/macxvi.cfg"
#define SF_MAX_ALLOWED_PACKETS_TO_BACKUP 128
-#define MIN_VAL(x,y) ((x)<(y)?(x):(y))
+#define MIN_VAL(x, y) ((x) < (y) ? (x) : (y))
#define MAC_ADDRESS_SIZE 6
#define EEPROM_COMMAND_Q_REG 0x0F003018
#define EEPROM_READ_DATA_Q_REG 0x0F003020
-#define CHIP_ID_REG 0x0F000000
-#define GPIO_MODE_REG 0x0F000034
-#define GPIO_OUTPUT_REG 0x0F00003C
-#define WIMAX_MAX_ALLOWED_RATE 1024*1024*50
+#define CHIP_ID_REG 0x0F000000
+#define GPIO_MODE_REG 0x0F000034
+#define GPIO_OUTPUT_REG 0x0F00003C
+#define WIMAX_MAX_ALLOWED_RATE (1024*1024*50)
#define T3 0xbece0300
#define TARGET_SFID_TXDESC_MAP_LOC 0xBFFFF400
@@ -319,31 +318,29 @@ typedef enum _E_PHS_DSC_ACTION
#define RWM_READ 0
#define RWM_WRITE 1
-#define T3LPB 0xbece3300
-#define BCS220_2 0xbece3311
-#define BCS220_2BC 0xBECE3310
-#define BCS250_BC 0xbece3301
-#define BCS220_3 0xbece3321
+#define T3LPB 0xbece3300
+#define BCS220_2 0xbece3311
+#define BCS220_2BC 0xBECE3310
+#define BCS250_BC 0xbece3301
+#define BCS220_3 0xbece3321
-#define HPM_CONFIG_LDO145 0x0F000D54
-#define HPM_CONFIG_MSW 0x0F000D58
+#define HPM_CONFIG_LDO145 0x0F000D54
+#define HPM_CONFIG_MSW 0x0F000D58
#define T3B 0xbece0310
-typedef enum eNVM_TYPE
-{
+typedef enum eNVM_TYPE {
NVM_AUTODETECT = 0,
NVM_EEPROM,
NVM_FLASH,
NVM_UNKNOWN
-}NVM_TYPE;
+} NVM_TYPE;
-typedef enum ePMU_MODES
-{
+typedef enum ePMU_MODES {
HYBRID_MODE_7C = 0,
INTERNAL_MODE_6 = 1,
HYBRID_MODE_6 = 2
-}PMU_MODE;
+} PMU_MODE;
#define MAX_RDM_WRM_RETIRES 1
@@ -360,4 +357,4 @@ enum eAbortPattern {
#define SKB_CB_LATENCY_OFFSET 1
#define SKB_CB_TCPACK_OFFSET 2
-#endif //__MACROS_H__
+#endif /* __MACROS_H__ */
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index c5b3a3666bc..e9f29d59751 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -1,113 +1,94 @@
#include "headers.h"
-static int BcmFileDownload(PMINI_ADAPTER Adapter, const char *path,
- unsigned int loc);
+static int BcmFileDownload(PMINI_ADAPTER Adapter, const char *path, unsigned int loc);
static VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter);
-static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer);
+static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter, PUCHAR pucBuffer);
static int bcm_parse_target_params(PMINI_ADAPTER Adapter);
-static void beceem_protocol_reset (PMINI_ADAPTER Adapter);
+static void beceem_protocol_reset(PMINI_ADAPTER Adapter);
static VOID default_wimax_protocol_initialize(PMINI_ADAPTER Adapter)
{
+ UINT uiLoopIndex;
- UINT uiLoopIndex;
-
- for(uiLoopIndex=0; uiLoopIndex < NO_OF_QUEUES-1; uiLoopIndex++)
- {
- Adapter->PackInfo[uiLoopIndex].uiThreshold=TX_PACKET_THRESHOLD;
- Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate=MAX_ALLOWED_RATE;
- Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize=20*1024*1024;
- }
-
- Adapter->BEBucketSize=BE_BUCKET_SIZE;
- Adapter->rtPSBucketSize=rtPS_BUCKET_SIZE;
- Adapter->LinkStatus=SYNC_UP_REQUEST;
- Adapter->TransferMode=IP_PACKET_ONLY_MODE;
- Adapter->usBestEffortQueueIndex=-1;
- return;
-}
+ for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES-1; uiLoopIndex++) {
+ Adapter->PackInfo[uiLoopIndex].uiThreshold = TX_PACKET_THRESHOLD;
+ Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate = MAX_ALLOWED_RATE;
+ Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize = 20*1024*1024;
+ }
+ Adapter->BEBucketSize = BE_BUCKET_SIZE;
+ Adapter->rtPSBucketSize = rtPS_BUCKET_SIZE;
+ Adapter->LinkStatus = SYNC_UP_REQUEST;
+ Adapter->TransferMode = IP_PACKET_ONLY_MODE;
+ Adapter->usBestEffortQueueIndex = -1;
+ return;
+}
-INT
-InitAdapter(PMINI_ADAPTER psAdapter)
+INT InitAdapter(PMINI_ADAPTER psAdapter)
{
- int i = 0;
- INT Status = STATUS_SUCCESS ;
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Initialising Adapter = %p", psAdapter);
+ int i = 0;
+ INT Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Initialising Adapter = %p", psAdapter);
- if(psAdapter == NULL)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter is NULL");
+ if (psAdapter == NULL) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter is NULL");
return -EINVAL;
}
- sema_init(&psAdapter->NVMRdmWrmLock,1);
-// psAdapter->ulFlashCalStart = FLASH_AUTO_INIT_BASE_ADDR;
-
+ sema_init(&psAdapter->NVMRdmWrmLock, 1);
sema_init(&psAdapter->rdmwrmsync, 1);
spin_lock_init(&psAdapter->control_queue_lock);
spin_lock_init(&psAdapter->txtransmitlock);
- sema_init(&psAdapter->RxAppControlQueuelock, 1);
-// sema_init(&psAdapter->data_packet_queue_lock, 1);
- sema_init(&psAdapter->fw_download_sema, 1);
- sema_init(&psAdapter->LowPowerModeSync,1);
-
- // spin_lock_init(&psAdapter->sleeper_lock);
-
- for(i=0;i<NO_OF_QUEUES; i++)
- spin_lock_init(&psAdapter->PackInfo[i].SFQueueLock);
- i=0;
-
- init_waitqueue_head(&psAdapter->process_rx_cntrlpkt);
- init_waitqueue_head(&psAdapter->tx_packet_wait_queue);
- init_waitqueue_head(&psAdapter->process_read_wait_queue);
- init_waitqueue_head(&psAdapter->ioctl_fw_dnld_wait_queue);
- init_waitqueue_head(&psAdapter->lowpower_mode_wait_queue);
+ sema_init(&psAdapter->RxAppControlQueuelock, 1);
+ sema_init(&psAdapter->fw_download_sema, 1);
+ sema_init(&psAdapter->LowPowerModeSync, 1);
+
+ for (i = 0; i < NO_OF_QUEUES; i++)
+ spin_lock_init(&psAdapter->PackInfo[i].SFQueueLock);
+ i = 0;
+
+ init_waitqueue_head(&psAdapter->process_rx_cntrlpkt);
+ init_waitqueue_head(&psAdapter->tx_packet_wait_queue);
+ init_waitqueue_head(&psAdapter->process_read_wait_queue);
+ init_waitqueue_head(&psAdapter->ioctl_fw_dnld_wait_queue);
+ init_waitqueue_head(&psAdapter->lowpower_mode_wait_queue);
psAdapter->waiting_to_fw_download_done = TRUE;
- //init_waitqueue_head(&psAdapter->device_wake_queue);
- psAdapter->fw_download_done=FALSE;
-
+ psAdapter->fw_download_done = FALSE;
default_wimax_protocol_initialize(psAdapter);
- for (i=0;i<MAX_CNTRL_PKTS;i++)
- {
+ for (i = 0; i < MAX_CNTRL_PKTS; i++) {
psAdapter->txctlpacket[i] = kmalloc(MAX_CNTL_PKT_SIZE, GFP_KERNEL);
- if(!psAdapter->txctlpacket[i])
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i);
+ if (!psAdapter->txctlpacket[i]) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i);
return -ENOMEM;
}
}
- if(AllocAdapterDsxBuffer(psAdapter))
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to allocate DSX buffers");
+
+ if (AllocAdapterDsxBuffer(psAdapter)) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to allocate DSX buffers");
return -EINVAL;
}
- //Initialize PHS interface
- if(phs_init(&psAdapter->stBCMPhsContext,psAdapter)!=0)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"%s:%s:%d:Error PHS Init Failed=====>\n", __FILE__, __FUNCTION__, __LINE__);
+ /* Initialize PHS interface */
+ if (phs_init(&psAdapter->stBCMPhsContext, psAdapter) != 0) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%s:%d:Error PHS Init Failed=====>\n", __FILE__, __func__, __LINE__);
return -ENOMEM;
}
Status = BcmAllocFlashCSStructure(psAdapter);
- if(Status)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Memory Allocation for Flash structure failed");
- return Status ;
+ if (Status) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Memory Allocation for Flash structure failed");
+ return Status;
}
Status = vendorextnInit(psAdapter);
- if(STATUS_SUCCESS != Status)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Vendor Init Failed");
- return Status ;
+ if (STATUS_SUCCESS != Status) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Vendor Init Failed");
+ return Status;
}
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter initialised");
-
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter initialised");
return STATUS_SUCCESS;
}
@@ -115,42 +96,37 @@ InitAdapter(PMINI_ADAPTER psAdapter)
VOID AdapterFree(PMINI_ADAPTER Adapter)
{
int count;
-
beceem_protocol_reset(Adapter);
-
vendorextnExit(Adapter);
- if(Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler))
- kthread_stop (Adapter->control_packet_handler);
+ if (Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler))
+ kthread_stop(Adapter->control_packet_handler);
- if(Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread))
- kthread_stop (Adapter->transmit_packet_thread);
+ if (Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread))
+ kthread_stop(Adapter->transmit_packet_thread);
wake_up(&Adapter->process_read_wait_queue);
- if(Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY))
- kthread_stop (Adapter->LEDInfo.led_cntrl_threadid);
+ if (Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY))
+ kthread_stop(Adapter->LEDInfo.led_cntrl_threadid);
unregister_networkdev(Adapter);
/* FIXME: use proper wait_event and refcounting */
- while(atomic_read(&Adapter->ApplicationRunning))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n",atomic_read(&Adapter->ApplicationRunning));
+ while (atomic_read(&Adapter->ApplicationRunning)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n", atomic_read(&Adapter->ApplicationRunning));
msleep(100);
}
unregister_control_device_interface(Adapter);
-
kfree(Adapter->pstargetparams);
- for (count =0;count < MAX_CNTRL_PKTS;count++)
+ for (count = 0; count < MAX_CNTRL_PKTS; count++)
kfree(Adapter->txctlpacket[count]);
FreeAdapterDsxBuffer(Adapter);
-
kfree(Adapter->pvInterfaceAdapter);
- //Free the PHS Interface
+ /* Free the PHS Interface */
PhsCleanup(&Adapter->stBCMPhsContext);
BcmDeAllocFlashCSStructure(Adapter);
@@ -160,20 +136,18 @@ VOID AdapterFree(PMINI_ADAPTER Adapter)
static int create_worker_threads(PMINI_ADAPTER psAdapter)
{
- // Rx Control Packets Processing
+ /* Rx Control Packets Processing */
psAdapter->control_packet_handler = kthread_run((int (*)(void *))
control_packet_handler, psAdapter, "%s-rx", DRV_NAME);
- if(IS_ERR(psAdapter->control_packet_handler))
- {
+ if (IS_ERR(psAdapter->control_packet_handler)) {
pr_notice(DRV_NAME ": could not create control thread\n");
return PTR_ERR(psAdapter->control_packet_handler);
}
- // Tx Thread
+ /* Tx Thread */
psAdapter->transmit_packet_thread = kthread_run((int (*)(void *))
tx_pkt_handler, psAdapter, "%s-tx", DRV_NAME);
- if(IS_ERR (psAdapter->transmit_packet_thread))
- {
+ if (IS_ERR(psAdapter->transmit_packet_thread)) {
pr_notice(DRV_NAME ": could not creat transmit thread\n");
kthread_stop(psAdapter->control_packet_handler);
return PTR_ERR(psAdapter->transmit_packet_thread);
@@ -183,275 +157,248 @@ static int create_worker_threads(PMINI_ADAPTER psAdapter)
static struct file *open_firmware_file(PMINI_ADAPTER Adapter, const char *path)
{
- struct file *flp=NULL;
- mm_segment_t oldfs;
- oldfs=get_fs();
+ struct file *flp = NULL;
+ mm_segment_t oldfs;
+ oldfs = get_fs();
set_fs(get_ds());
- flp=filp_open(path, O_RDONLY, S_IRWXU);
- set_fs(oldfs);
- if(IS_ERR(flp))
- {
- pr_err(DRV_NAME "Unable To Open File %s, err %ld",
- path, PTR_ERR(flp));
- flp = NULL;
- }
-
- if(Adapter->device_removed)
- flp = NULL;
-
- return flp;
-}
+ flp = filp_open(path, O_RDONLY, S_IRWXU);
+ set_fs(oldfs);
+ if (IS_ERR(flp)) {
+ pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp));
+ flp = NULL;
+ }
+ if (Adapter->device_removed)
+ flp = NULL;
-static int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
- const char *path, /**< path to image file */
- unsigned int loc /**< Download Address on the chip*/
- )
+ return flp;
+}
+
+/* Arguments:
+ * Logical Adapter
+ * Path to image file
+ * Download Address on the chip
+ */
+static int BcmFileDownload(PMINI_ADAPTER Adapter, const char *path, unsigned int loc)
{
- int errorno=0;
- struct file *flp=NULL;
- mm_segment_t oldfs;
- struct timeval tv={0};
-
- flp=open_firmware_file(Adapter, path);
- if(!flp)
- {
- errorno = -ENOENT;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
- goto exit_download;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path,(unsigned long)flp->f_dentry->d_inode->i_size, loc);
- do_gettimeofday(&tv);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) +
- (tv.tv_usec/1000)));
- if(Adapter->bcm_file_download(Adapter->pvInterfaceAdapter, flp, loc))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to download the firmware with error\
- %x!!!", -EIO);
- errorno=-EIO;
- goto exit_download;
- }
- oldfs=get_fs();set_fs(get_ds());
- vfs_llseek(flp, 0, 0);
- set_fs(oldfs);
- if(Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter,
- flp, loc))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!");
- errorno=-EIO;
- goto exit_download;
- }
+ int errorno = 0;
+ struct file *flp = NULL;
+ mm_segment_t oldfs;
+ struct timeval tv = {0};
+
+ flp = open_firmware_file(Adapter, path);
+ if (!flp) {
+ errorno = -ENOENT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
+ goto exit_download;
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc);
+ do_gettimeofday(&tv);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000)));
+ if (Adapter->bcm_file_download(Adapter->pvInterfaceAdapter, flp, loc)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to download the firmware with error %x!!!", -EIO);
+ errorno = -EIO;
+ goto exit_download;
+ }
+ oldfs = get_fs();
+ set_fs(get_ds());
+ vfs_llseek(flp, 0, 0);
+ set_fs(oldfs);
+ if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!");
+ errorno = -EIO;
+ goto exit_download;
+ }
exit_download:
- oldfs=get_fs();set_fs(get_ds());
- if(flp && !(IS_ERR(flp)))
- filp_close(flp, current->files);
- set_fs(oldfs);
+ oldfs = get_fs();
+ set_fs(get_ds());
+ if (flp && !(IS_ERR(flp)))
+ filp_close(flp, current->files);
+ set_fs(oldfs);
- return errorno;
+ return errorno;
}
/**
-@ingroup ctrl_pkt_functions
-This function copies the contents of given buffer
-to the control packet and queues it for transmission.
-@note Do not acquire the spinock, as it it already acquired.
-@return SUCCESS/FAILURE.
-*/
-INT CopyBufferToControlPacket(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
- PVOID ioBuffer/**<Control Packet Buffer*/
- )
+ * @ingroup ctrl_pkt_functions
+ * This function copies the contents of given buffer
+ * to the control packet and queues it for transmission.
+ * @note Do not acquire the spinock, as it it already acquired.
+ * @return SUCCESS/FAILURE.
+ * Arguments:
+ * Logical Adapter
+ * Control Packet Buffer
+ */
+INT CopyBufferToControlPacket(PMINI_ADAPTER Adapter, PVOID ioBuffer)
{
- PLEADER pLeader=NULL;
- INT Status=0;
- unsigned char *ctrl_buff=NULL;
- UINT pktlen=0;
- PLINK_REQUEST pLinkReq = NULL;
- PUCHAR pucAddIndication = NULL;
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "======>");
- if(!ioBuffer)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Got Null Buffer\n");
+ PLEADER pLeader = NULL;
+ INT Status = 0;
+ unsigned char *ctrl_buff = NULL;
+ UINT pktlen = 0;
+ PLINK_REQUEST pLinkReq = NULL;
+ PUCHAR pucAddIndication = NULL;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "======>");
+ if (!ioBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Got Null Buffer\n");
return -EINVAL;
}
pLinkReq = (PLINK_REQUEST)ioBuffer;
- pLeader=(PLEADER)ioBuffer; //ioBuffer Contains sw_Status and Payload
+ pLeader = (PLEADER)ioBuffer; /* ioBuffer Contains sw_Status and Payload */
- if(Adapter->bShutStatus == TRUE &&
+ if (Adapter->bShutStatus == TRUE &&
pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD &&
- pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE)
- {
- //Got sync down in SHUTDOWN..we could not process this.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "SYNC DOWN Request in Shut Down Mode..\n");
+ pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE) {
+
+ /* Got sync down in SHUTDOWN..we could not process this. */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC DOWN Request in Shut Down Mode..\n");
return STATUS_FAILURE;
}
- if((pLeader->Status == LINK_UP_CONTROL_REQ) &&
+ if ((pLeader->Status == LINK_UP_CONTROL_REQ) &&
((pLinkReq->szData[0] == LINK_UP_REQ_PAYLOAD &&
- (pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE)) ||//Sync Up Command
- pLinkReq->szData[0] == NETWORK_ENTRY_REQ_PAYLOAD)) //Net Entry Command
- {
- if(Adapter->LinkStatus > PHY_SYNC_ACHIVED)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL,"LinkStatus is Greater than PHY_SYN_ACHIEVED");
+ (pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE)) || /* Sync Up Command */
+ pLinkReq->szData[0] == NETWORK_ENTRY_REQ_PAYLOAD)) /* Net Entry Command */ {
+
+ if (Adapter->LinkStatus > PHY_SYNC_ACHIVED) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "LinkStatus is Greater than PHY_SYN_ACHIEVED");
return STATUS_FAILURE;
}
- if(TRUE == Adapter->bShutStatus)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "SYNC UP IN SHUTDOWN..Device WakeUp\n");
- if(Adapter->bTriedToWakeUpFromlowPowerMode == FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Waking up for the First Time..\n");
- Adapter->usIdleModePattern = ABORT_SHUTDOWN_MODE; // change it to 1 for current support.
+
+ if (TRUE == Adapter->bShutStatus) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC UP IN SHUTDOWN..Device WakeUp\n");
+ if (Adapter->bTriedToWakeUpFromlowPowerMode == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Waking up for the First Time..\n");
+ Adapter->usIdleModePattern = ABORT_SHUTDOWN_MODE; /* change it to 1 for current support. */
Adapter->bWakeUpDevice = TRUE;
wake_up(&Adapter->process_rx_cntrlpkt);
+ Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->bShutStatus, (5 * HZ));
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
- !Adapter->bShutStatus, (5 * HZ));
-
- if(Status == -ERESTARTSYS)
+ if (Status == -ERESTARTSYS)
return Status;
- if(Adapter->bShutStatus)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Shutdown Mode Wake up Failed - No Wake Up Received\n");
+ if (Adapter->bShutStatus) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Shutdown Mode Wake up Failed - No Wake Up Received\n");
return STATUS_FAILURE;
}
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Wakeup has been tried already...\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Wakeup has been tried already...\n");
}
}
-
}
- if(TRUE == Adapter->IdleMode)
- {
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle mode ... hence \n");
- if(pLeader->Status == LINK_UP_CONTROL_REQ || pLeader->Status == 0x80 ||
- pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ )
- {
- if((pLeader->Status == LINK_UP_CONTROL_REQ) && (pLinkReq->szData[0]==LINK_DOWN_REQ_PAYLOAD))
- {
- if((pLinkReq->szData[1] == LINK_SYNC_DOWN_SUBTYPE))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Link Down Sent in Idle Mode\n");
- Adapter->usIdleModePattern = ABORT_IDLE_SYNCDOWN;//LINK DOWN sent in Idle Mode
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL,"ABORT_IDLE_MODE pattern is being written\n");
+ if (TRUE == Adapter->IdleMode) {
+ /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle mode ... hence\n"); */
+ if (pLeader->Status == LINK_UP_CONTROL_REQ || pLeader->Status == 0x80 ||
+ pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ) {
+
+ if ((pLeader->Status == LINK_UP_CONTROL_REQ) && (pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD)) {
+ if ((pLinkReq->szData[1] == LINK_SYNC_DOWN_SUBTYPE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Link Down Sent in Idle Mode\n");
+ Adapter->usIdleModePattern = ABORT_IDLE_SYNCDOWN; /* LINK DOWN sent in Idle Mode */
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n");
Adapter->usIdleModePattern = ABORT_IDLE_REG;
}
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL,"ABORT_IDLE_MODE pattern is being written\n");
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n");
+ Adapter->usIdleModePattern = ABORT_IDLE_MODE;
}
/*Setting bIdleMode_tx_from_host to TRUE to indicate LED control thread to represent
- the wake up from idlemode is from host*/
- //Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE;
+ * the wake up from idlemode is from host
+ */
+ /* Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE; */
Adapter->bWakeUpDevice = TRUE;
wake_up(&Adapter->process_rx_cntrlpkt);
-
-
- if(LINK_DOWN_REQ_PAYLOAD == pLinkReq->szData[0])
- {
- // We should not send DREG message down while in idlemode.
+ /* We should not send DREG message down while in idlemode. */
+ if (LINK_DOWN_REQ_PAYLOAD == pLinkReq->szData[0])
return STATUS_SUCCESS;
- }
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
- !Adapter->IdleMode, (5 * HZ));
+ Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->IdleMode, (5 * HZ));
- if(Status == -ERESTARTSYS)
+ if (Status == -ERESTARTSYS)
return Status;
- if(Adapter->IdleMode)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Idle Mode Wake up Failed - No Wake Up Received\n");
+ if (Adapter->IdleMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Idle Mode Wake up Failed - No Wake Up Received\n");
return STATUS_FAILURE;
}
- }
- else
+ } else {
return STATUS_SUCCESS;
+ }
}
- //The Driver has to send control messages with a particular VCID
- pLeader->Vcid = VCID_CONTROL_PACKET;//VCID for control packet.
+
+ /* The Driver has to send control messages with a particular VCID */
+ pLeader->Vcid = VCID_CONTROL_PACKET; /* VCID for control packet. */
/* Allocate skb for Control Packet */
pktlen = pLeader->PLength;
ctrl_buff = (char *)Adapter->txctlpacket[atomic_read(&Adapter->index_wr_txcntrlpkt)%MAX_CNTRL_PKTS];
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Control packet to be taken =%d and address is =%pincoming address is =%p and packet len=%x",
- atomic_read(&Adapter->index_wr_txcntrlpkt), ctrl_buff, ioBuffer, pktlen);
- if(ctrl_buff)
- {
- if(pLeader)
- {
- if((pLeader->Status == 0x80) ||
- (pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ))
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Control packet to be taken =%d and address is =%pincoming address is =%p and packet len=%x",
+ atomic_read(&Adapter->index_wr_txcntrlpkt), ctrl_buff, ioBuffer, pktlen);
+ if (ctrl_buff) {
+ if (pLeader) {
+ if ((pLeader->Status == 0x80) ||
+ (pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ)) {
/*
- //Restructure the DSX message to handle Multiple classifier Support
- // Write the Service Flow param Structures directly to the target
- //and embed the pointers in the DSX messages sent to target.
- */
- //Lets store the current length of the control packet we are transmitting
+ * Restructure the DSX message to handle Multiple classifier Support
+ * Write the Service Flow param Structures directly to the target
+ * and embed the pointers in the DSX messages sent to target.
+ */
+ /* Lets store the current length of the control packet we are transmitting */
pucAddIndication = (PUCHAR)ioBuffer + LEADER_SIZE;
pktlen = pLeader->PLength;
- Status = StoreCmControlResponseMessage(Adapter,pucAddIndication, &pktlen);
- if(Status != 1)
- {
- ClearTargetDSXBuffer(Adapter,((stLocalSFAddIndicationAlt *)pucAddIndication)->u16TID, FALSE);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, " Error Restoring The DSX Control Packet. Dsx Buffers on Target may not be Setup Properly ");
+ Status = StoreCmControlResponseMessage(Adapter, pucAddIndication, &pktlen);
+ if (Status != 1) {
+ ClearTargetDSXBuffer(Adapter, ((stLocalSFAddIndicationAlt *)pucAddIndication)->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, " Error Restoring The DSX Control Packet. Dsx Buffers on Target may not be Setup Properly ");
return STATUS_FAILURE;
}
/*
- //update the leader to use the new length
- //The length of the control packet is length of message being sent + Leader length
- */
+ * update the leader to use the new length
+ * The length of the control packet is length of message being sent + Leader length
+ */
pLeader->PLength = pktlen;
}
}
+
+ if (pktlen + LEADER_SIZE > MAX_CNTL_PKT_SIZE)
+ return -EINVAL;
+
memset(ctrl_buff, 0, pktlen+LEADER_SIZE);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Copying the Control Packet Buffer with length=%d\n", pLeader->PLength);
- *(PLEADER)ctrl_buff=*pLeader;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Copying the Control Packet Buffer with length=%d\n", pLeader->PLength);
+ *(PLEADER)ctrl_buff = *pLeader;
memcpy(ctrl_buff + LEADER_SIZE, ((PUCHAR)ioBuffer + LEADER_SIZE), pLeader->PLength);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Enqueuing the Control Packet");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Enqueuing the Control Packet");
- /*Update the statistics counters */
+ /* Update the statistics counters */
spin_lock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock);
- Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost+=pLeader->PLength;
+ Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost += pLeader->PLength;
Adapter->PackInfo[HiPriority].uiCurrentPacketsOnHost++;
atomic_inc(&Adapter->TotalPacketCount);
spin_unlock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock);
-
Adapter->PackInfo[HiPriority].bValid = TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "CurrBytesOnHost: %x bValid: %x",
- Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost,
- Adapter->PackInfo[HiPriority].bValid);
- Status=STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "CurrBytesOnHost: %x bValid: %x",
+ Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost,
+ Adapter->PackInfo[HiPriority].bValid);
+ Status = STATUS_SUCCESS;
/*Queue the packet for transmission */
atomic_inc(&Adapter->index_wr_txcntrlpkt);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Calling transmit_packets");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Calling transmit_packets");
atomic_set(&Adapter->TxPktAvail, 1);
wake_up(&Adapter->tx_packet_wait_queue);
+ } else {
+ Status = -ENOMEM;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "mem allocation Failed");
}
- else
- {
- Status=-ENOMEM;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "mem allocation Failed");
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<====");
return Status;
}
@@ -460,33 +407,30 @@ INT CopyBufferToControlPacket(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
* Function - SendStatisticsPointerRequest()
*
* Description - This function builds and forwards the Statistics
-* Pointer Request control Packet.
+* Pointer Request control Packet.
*
* Parameters - Adapter : Pointer to Adapter structure.
-* - pstStatisticsPtrRequest : Pointer to link request.
+* - pstStatisticsPtrRequest : Pointer to link request.
*
* Returns - None.
*****************************************************************/
-static VOID SendStatisticsPointerRequest(PMINI_ADAPTER Adapter,
- PLINK_REQUEST pstStatisticsPtrRequest)
+static VOID SendStatisticsPointerRequest(PMINI_ADAPTER Adapter, PLINK_REQUEST pstStatisticsPtrRequest)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "======>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "======>");
pstStatisticsPtrRequest->Leader.Status = STATS_POINTER_REQ_STATUS;
- pstStatisticsPtrRequest->Leader.PLength = sizeof(ULONG);//minimum 4 bytes
+ pstStatisticsPtrRequest->Leader.PLength = sizeof(ULONG); /* minimum 4 bytes */
pstStatisticsPtrRequest->szData[0] = STATISTICS_POINTER_REQ;
-
- CopyBufferToControlPacket(Adapter,pstStatisticsPtrRequest);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "<=====");
+ CopyBufferToControlPacket(Adapter, pstStatisticsPtrRequest);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "<=====");
return;
}
#endif
-
/******************************************************************
* Function - LinkMessage()
*
* Description - This function builds the Sync-up and Link-up request
-* packet messages depending on the device Link status.
+* packet messages depending on the device Link status.
*
* Parameters - Adapter: Pointer to the Adapter structure.
*
@@ -494,102 +438,90 @@ static VOID SendStatisticsPointerRequest(PMINI_ADAPTER Adapter,
*******************************************************************/
VOID LinkMessage(PMINI_ADAPTER Adapter)
{
- PLINK_REQUEST pstLinkRequest=NULL;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>");
- if(Adapter->LinkStatus == SYNC_UP_REQUEST && Adapter->AutoSyncup)
- {
+ PLINK_REQUEST pstLinkRequest = NULL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>");
+ if (Adapter->LinkStatus == SYNC_UP_REQUEST && Adapter->AutoSyncup) {
pstLinkRequest = kzalloc(sizeof(LINK_REQUEST), GFP_ATOMIC);
- if(!pstLinkRequest)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
+ if (!pstLinkRequest) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
return;
}
- //sync up request...
- Adapter->LinkStatus = WAIT_FOR_SYNC;// current link status
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For SyncUp...");
- pstLinkRequest->szData[0]=LINK_UP_REQ_PAYLOAD;
- pstLinkRequest->szData[1]=LINK_SYNC_UP_SUBTYPE;
- pstLinkRequest->Leader.Status=LINK_UP_CONTROL_REQ;
- pstLinkRequest->Leader.PLength=sizeof(ULONG);
+ /* sync up request... */
+ Adapter->LinkStatus = WAIT_FOR_SYNC; /* current link status */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For SyncUp...");
+ pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD;
+ pstLinkRequest->szData[1] = LINK_SYNC_UP_SUBTYPE;
+ pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ;
+ pstLinkRequest->Leader.PLength = sizeof(ULONG);
Adapter->bSyncUpRequestSent = TRUE;
- }
- else if(Adapter->LinkStatus == PHY_SYNC_ACHIVED && Adapter->AutoLinkUp)
- {
+
+ } else if (Adapter->LinkStatus == PHY_SYNC_ACHIVED && Adapter->AutoLinkUp) {
pstLinkRequest = kzalloc(sizeof(LINK_REQUEST), GFP_ATOMIC);
- if(!pstLinkRequest)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
+ if (!pstLinkRequest) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
return;
}
- //LINK_UP_REQUEST
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For LinkUp...");
- pstLinkRequest->szData[0]=LINK_UP_REQ_PAYLOAD;
- pstLinkRequest->szData[1]=LINK_NET_ENTRY;
- pstLinkRequest->Leader.Status=LINK_UP_CONTROL_REQ;
- pstLinkRequest->Leader.PLength=sizeof(ULONG);
- }
- if(pstLinkRequest)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket");
+ /* LINK_UP_REQUEST */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For LinkUp...");
+ pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD;
+ pstLinkRequest->szData[1] = LINK_NET_ENTRY;
+ pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ;
+ pstLinkRequest->Leader.PLength = sizeof(ULONG);
+ }
+ if (pstLinkRequest) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket");
CopyBufferToControlPacket(Adapter, pstLinkRequest);
kfree(pstLinkRequest);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <=====");
return;
}
-
/**********************************************************************
* Function - StatisticsResponse()
*
* Description - This function handles the Statistics response packet.
*
* Parameters - Adapter : Pointer to the Adapter structure.
-* - pvBuffer: Starting address of Statistic response data.
+* - pvBuffer: Starting address of Statistic response data.
*
* Returns - None.
************************************************************************/
-VOID StatisticsResponse(PMINI_ADAPTER Adapter,PVOID pvBuffer)
+VOID StatisticsResponse(PMINI_ADAPTER Adapter, PVOID pvBuffer)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>",__FUNCTION__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>", __func__);
Adapter->StatisticsPointer = ntohl(*(__be32 *)pvBuffer);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (UINT)Adapter->StatisticsPointer);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====",__FUNCTION__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (UINT)Adapter->StatisticsPointer);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====", __func__);
return;
}
-
/**********************************************************************
* Function - LinkControlResponseMessage()
*
* Description - This function handles the Link response packets.
*
* Parameters - Adapter : Pointer to the Adapter structure.
-* - pucBuffer: Starting address of Link response data.
+* - pucBuffer: Starting address of Link response data.
*
* Returns - None.
***********************************************************************/
-VOID LinkControlResponseMessage(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
+VOID LinkControlResponseMessage(PMINI_ADAPTER Adapter, PUCHAR pucBuffer)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "=====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "=====>");
- if(*pucBuffer==LINK_UP_ACK)
- {
- switch(*(pucBuffer+1))
- {
- case PHY_SYNC_ACHIVED: //SYNCed UP
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "PHY_SYNC_ACHIVED");
+ if (*pucBuffer == LINK_UP_ACK) {
+ switch (*(pucBuffer+1)) {
+ case PHY_SYNC_ACHIVED: /* SYNCed UP */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHY_SYNC_ACHIVED");
- if(Adapter->LinkStatus == LINKUP_DONE)
- {
+ if (Adapter->LinkStatus == LINKUP_DONE)
beceem_protocol_reset(Adapter);
- }
- Adapter->usBestEffortQueueIndex=INVALID_QUEUE_INDEX ;
- Adapter->LinkStatus=PHY_SYNC_ACHIVED;
+ Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
+ Adapter->LinkStatus = PHY_SYNC_ACHIVED;
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = NO_NETWORK_ENTRY;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
@@ -597,161 +529,145 @@ VOID LinkControlResponseMessage(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
LinkMessage(Adapter);
break;
- case LINKUP_DONE:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LINKUP_DONE");
- Adapter->LinkStatus=LINKUP_DONE;
- Adapter->bPHSEnabled = *(pucBuffer+3);
- Adapter->bETHCSEnabled = *(pucBuffer+4) & ETH_CS_MASK;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "PHS Support Status Received In LinkUp Ack : %x \n",Adapter->bPHSEnabled);
- if((FALSE == Adapter->bShutStatus)&&
- (FALSE == Adapter->IdleMode))
- {
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
- Adapter->DriverState = NORMAL_OPERATION;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
+ case LINKUP_DONE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LINKUP_DONE");
+ Adapter->LinkStatus = LINKUP_DONE;
+ Adapter->bPHSEnabled = *(pucBuffer+3);
+ Adapter->bETHCSEnabled = *(pucBuffer+4) & ETH_CS_MASK;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHS Support Status Received In LinkUp Ack : %x\n", Adapter->bPHSEnabled);
+
+ if ((FALSE == Adapter->bShutStatus) && (FALSE == Adapter->IdleMode)) {
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = NORMAL_OPERATION;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- LinkMessage(Adapter);
- break;
- case WAIT_FOR_SYNC:
+ }
+ LinkMessage(Adapter);
+ break;
- /*
- * Driver to ignore the DREG_RECEIVED
- * WiMAX Application should handle this Message
- */
- //Adapter->liTimeSinceLastNetEntry = 0;
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
- Adapter->usBestEffortQueueIndex=INVALID_QUEUE_INDEX ;
- Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
- Adapter->IdleMode = FALSE;
- beceem_protocol_reset(Adapter);
+ case WAIT_FOR_SYNC:
+ /*
+ * Driver to ignore the DREG_RECEIVED
+ * WiMAX Application should handle this Message
+ */
+ /* Adapter->liTimeSinceLastNetEntry = 0; */
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+ Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
+ Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
+ Adapter->IdleMode = FALSE;
+ beceem_protocol_reset(Adapter);
- break;
- case LINK_SHUTDOWN_REQ_FROM_FIRMWARE:
- case COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW:
- {
- HandleShutDownModeRequest(Adapter, pucBuffer);
- }
- break;
- default:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "default case:LinkResponse %x",*(pucBuffer+1));
- break;
+ break;
+ case LINK_SHUTDOWN_REQ_FROM_FIRMWARE:
+ case COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW:
+ {
+ HandleShutDownModeRequest(Adapter, pucBuffer);
}
- }
- else if(SET_MAC_ADDRESS_RESPONSE==*pucBuffer)
- {
+ break;
+ default:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "default case:LinkResponse %x", *(pucBuffer + 1));
+ break;
+ }
+ } else if (SET_MAC_ADDRESS_RESPONSE == *pucBuffer) {
PUCHAR puMacAddr = (pucBuffer + 1);
- Adapter->LinkStatus=SYNC_UP_REQUEST;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "MAC address response, sending SYNC_UP");
+ Adapter->LinkStatus = SYNC_UP_REQUEST;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "MAC address response, sending SYNC_UP");
LinkMessage(Adapter);
memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "%s <=====",__FUNCTION__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "%s <=====", __func__);
return;
}
void SendIdleModeResponse(PMINI_ADAPTER Adapter)
{
- INT status = 0, NVMAccess = 0,lowPwrAbortMsg = 0;
+ INT status = 0, NVMAccess = 0, lowPwrAbortMsg = 0;
struct timeval tv;
- CONTROL_MESSAGE stIdleResponse = {{0}};
+ CONTROL_MESSAGE stIdleResponse = {{0} };
memset(&tv, 0, sizeof(tv));
- stIdleResponse.Leader.Status = IDLE_MESSAGE;
+ stIdleResponse.Leader.Status = IDLE_MESSAGE;
stIdleResponse.Leader.PLength = IDLE_MODE_PAYLOAD_LENGTH;
stIdleResponse.szData[0] = GO_TO_IDLE_MODE_PAYLOAD;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL," ============>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, " ============>");
/*********************************
- **down_trylock -
- ** if [ semaphore is available ]
- ** acquire semaphone and return value 0 ;
- ** else
- ** return non-zero value ;
- **
- ***********************************/
+ *down_trylock -
+ * if [ semaphore is available ]
+ * acquire semaphone and return value 0 ;
+ * else
+ * return non-zero value ;
+ *
+ ***********************************/
NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync);
- lowPwrAbortMsg= down_trylock(&Adapter->LowPowerModeSync);
+ if ((NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) &&
+ (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
- if((NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) &&
- (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) )
- {
- if(!NVMAccess)
+ if (!NVMAccess)
up(&Adapter->NVMRdmWrmLock);
- if(!lowPwrAbortMsg)
+ if (!lowPwrAbortMsg)
up(&Adapter->LowPowerModeSync);
- stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE;//NACK- device access is going on.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "HOST IS NACKING Idle mode To F/W!!!!!!!!");
+ stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "HOST IS NACKING Idle mode To F/W!!!!!!!!");
Adapter->bPreparingForLowPowerMode = FALSE;
- }
- else
- {
- stIdleResponse.szData[1] = TARGET_CAN_GO_TO_IDLE_MODE; //2;//Idle ACK
+ } else {
+ stIdleResponse.szData[1] = TARGET_CAN_GO_TO_IDLE_MODE; /* 2; Idle ACK */
Adapter->StatisticsPointer = 0;
/* Wait for the LED to TURN OFF before sending ACK response */
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
INT iRetVal = 0;
/* Wake the LED Thread with IDLEMODE_ENTER State */
Adapter->DriverState = LOWPOWER_MODE_ENTER;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld",jiffies);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld", jiffies);
wake_up(&Adapter->LEDInfo.notify_led_event);
/* Wait for 1 SEC for LED to OFF */
- iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, \
- Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
-
+ iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
/* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */
- if(iRetVal <= 0)
- {
- stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE;//NACK- device access is going on.
+ if (iRetVal <= 0) {
+ stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
Adapter->DriverState = NORMAL_OPERATION;
wake_up(&Adapter->LEDInfo.notify_led_event);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "NACKING Idle mode as time out happen from LED side!!!!!!!!");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "NACKING Idle mode as time out happen from LED side!!!!!!!!");
}
}
- if(stIdleResponse.szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"ACKING IDLE MODE !!!!!!!!!");
+
+ if (stIdleResponse.szData[1] == TARGET_CAN_GO_TO_IDLE_MODE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "ACKING IDLE MODE !!!!!!!!!");
down(&Adapter->rdmwrmsync);
Adapter->bPreparingForLowPowerMode = TRUE;
up(&Adapter->rdmwrmsync);
- //Killing all URBS.
- if(Adapter->bDoSuspend == TRUE)
+ /* Killing all URBS. */
+ if (Adapter->bDoSuspend == TRUE)
Bcm_kill_all_URBs((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-
- }
- else
- {
+ } else {
Adapter->bPreparingForLowPowerMode = FALSE;
}
- if(!NVMAccess)
+ if (!NVMAccess)
up(&Adapter->NVMRdmWrmLock);
- if(!lowPwrAbortMsg)
+ if (!lowPwrAbortMsg)
up(&Adapter->LowPowerModeSync);
-
}
- status = CopyBufferToControlPacket(Adapter,&stIdleResponse);
- if((status != STATUS_SUCCESS))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"fail to send the Idle mode Request \n");
+
+ status = CopyBufferToControlPacket(Adapter, &stIdleResponse);
+ if ((status != STATUS_SUCCESS)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "fail to send the Idle mode Request\n");
Adapter->bPreparingForLowPowerMode = FALSE;
StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
}
do_gettimeofday(&tv);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
-
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec * 1000 + tv.tv_usec / 1000);
}
/******************************************************************
@@ -765,307 +681,264 @@ void SendIdleModeResponse(PMINI_ADAPTER Adapter)
*******************************************************************/
VOID DumpPackInfo(PMINI_ADAPTER Adapter)
{
-
- UINT uiLoopIndex = 0;
+ UINT uiLoopIndex = 0;
UINT uiIndex = 0;
UINT uiClsfrIndex = 0;
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- for(uiLoopIndex=0;uiLoopIndex<NO_OF_QUEUES;uiLoopIndex++)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"*********** Showing Details Of Queue %d***** ******",uiLoopIndex);
- if(FALSE == Adapter->PackInfo[uiLoopIndex].bValid)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"bValid is FALSE for %X index\n",uiLoopIndex);
+ for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "*********** Showing Details Of Queue %d***** ******", uiLoopIndex);
+ if (FALSE == Adapter->PackInfo[uiLoopIndex].bValid) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid is FALSE for %X index\n", uiLoopIndex);
continue;
}
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL," Dumping SF Rule Entry For SFID %lX \n",Adapter->PackInfo[uiLoopIndex].ulSFID);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL," ucDirection %X \n",Adapter->PackInfo[uiLoopIndex].ucDirection);
- if(Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"Ipv6 Service Flow \n");
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " Dumping SF Rule Entry For SFID %lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " ucDirection %X\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
+
+ if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv6 Service Flow\n");
else
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"Ipv4 Service Flow \n");
- }
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL," SF Traffic Priority %X \n",Adapter->PackInfo[uiLoopIndex].u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv4 Service Flow\n");
- for(uiClsfrIndex=0;uiClsfrIndex<MAX_CLASSIFIERS;uiClsfrIndex++)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SF Traffic Priority %X\n", Adapter->PackInfo[uiLoopIndex].u8TrafficPriority);
+
+ for (uiClsfrIndex = 0; uiClsfrIndex < MAX_CLASSIFIERS; uiClsfrIndex++) {
pstClassifierEntry = &Adapter->astClassifierTable[uiClsfrIndex];
- if(!pstClassifierEntry->bUsed)
+ if (!pstClassifierEntry->bUsed)
continue;
- if(pstClassifierEntry->ulSFID != Adapter->PackInfo[uiLoopIndex].ulSFID)
+ if (pstClassifierEntry->ulSFID != Adapter->PackInfo[uiLoopIndex].ulSFID)
continue;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tDumping Classifier Rule Entry For Index: %X Classifier Rule ID : %X\n",uiClsfrIndex,pstClassifierEntry->uiClassifierRuleIndex);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tDumping Classifier Rule Entry For Index: %X usVCID_Value : %X\n",uiClsfrIndex,pstClassifierEntry->usVCID_Value);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tDumping Classifier Rule Entry For Index: %X bProtocolValid : %X\n",uiClsfrIndex,pstClassifierEntry->bProtocolValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tDumping Classifier Rule Entry For Index: %X bTOSValid : %X\n",uiClsfrIndex,pstClassifierEntry->bTOSValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tDumping Classifier Rule Entry For Index: %X bDestIpValid : %X\n",uiClsfrIndex,pstClassifierEntry->bDestIpValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tDumping Classifier Rule Entry For Index: %X bSrcIpValid : %X\n",uiClsfrIndex,pstClassifierEntry->bSrcIpValid);
-
-
- for(uiIndex=0;uiIndex<MAX_PORT_RANGE;uiIndex++)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tusSrcPortRangeLo:%X\n",pstClassifierEntry->usSrcPortRangeLo[uiIndex]);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tusSrcPortRangeHi:%X\n",pstClassifierEntry->usSrcPortRangeHi[uiIndex]);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tusDestPortRangeLo:%X\n",pstClassifierEntry->usDestPortRangeLo[uiIndex]);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tusDestPortRangeHi:%X\n",pstClassifierEntry->usDestPortRangeHi[uiIndex]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X Classifier Rule ID : %X\n", uiClsfrIndex, pstClassifierEntry->uiClassifierRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X usVCID_Value : %X\n", uiClsfrIndex, pstClassifierEntry->usVCID_Value);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bProtocolValid : %X\n", uiClsfrIndex, pstClassifierEntry->bProtocolValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bTOSValid : %X\n", uiClsfrIndex, pstClassifierEntry->bTOSValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bDestIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bDestIpValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bSrcIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bSrcIpValid);
+
+ for (uiIndex = 0; uiIndex < MAX_PORT_RANGE; uiIndex++) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeLo:%X\n", pstClassifierEntry->usSrcPortRangeLo[uiIndex]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeHi:%X\n", pstClassifierEntry->usSrcPortRangeHi[uiIndex]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeLo:%X\n", pstClassifierEntry->usDestPortRangeLo[uiIndex]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeHi:%X\n", pstClassifierEntry->usDestPortRangeHi[uiIndex]);
}
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL," \tucIPSourceAddressLength : 0x%x\n",pstClassifierEntry->ucIPSourceAddressLength);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tucIPDestinationAddressLength : 0x%x\n",pstClassifierEntry->ucIPDestinationAddressLength);
- for(uiIndex=0;uiIndex<pstClassifierEntry->ucIPSourceAddressLength;uiIndex++)
- {
- if(Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tIpv6 ulSrcIpAddr :\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPSourceAddressLength : 0x%x\n", pstClassifierEntry->ucIPSourceAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPDestinationAddressLength : 0x%x\n", pstClassifierEntry->ucIPDestinationAddressLength);
+ for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPSourceAddressLength; uiIndex++) {
+ if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpAddr :\n");
DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tIpv6 ulSrcIpMask :\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpMask :\n");
DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask);
- }
- else
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tulSrcIpAddr:%lX\n",pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[uiIndex]);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tulSrcIpMask:%lX\n",pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[uiIndex]);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpAddr:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[uiIndex]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpMask:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[uiIndex]);
}
}
- for(uiIndex=0;uiIndex<pstClassifierEntry->ucIPDestinationAddressLength;uiIndex++)
- {
- if(Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tIpv6 ulDestIpAddr :\n");
+
+ for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPDestinationAddressLength; uiIndex++) {
+ if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpAddr :\n");
DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Addr);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tIpv6 ulDestIpMask :\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpMask :\n");
DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Mask);
-
- }
- else
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tulDestIpAddr:%lX\n",pstClassifierEntry->stDestIpAddress.ulIpv4Addr[uiIndex]);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tulDestIpMask:%lX\n",pstClassifierEntry->stDestIpAddress.ulIpv4Mask[uiIndex]);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpAddr:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Addr[uiIndex]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpMask:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Mask[uiIndex]);
}
}
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tucProtocol:0x%X\n",pstClassifierEntry->ucProtocol[0]);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"\tu8ClassifierRulePriority:%X\n",pstClassifierEntry->u8ClassifierRulePriority);
-
-
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucProtocol:0x%X\n", pstClassifierEntry->ucProtocol[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tu8ClassifierRulePriority:%X\n", pstClassifierEntry->u8ClassifierRulePriority);
}
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"ulSFID:%lX\n",Adapter->PackInfo[uiLoopIndex].ulSFID);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"usVCID_Value:%X\n",Adapter->PackInfo[uiLoopIndex].usVCID_Value);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"PhsEnabled: 0x%X\n",Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiThreshold:%X\n",Adapter->PackInfo[uiLoopIndex].uiThreshold);
-
-
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"bValid:%X\n",Adapter->PackInfo[uiLoopIndex].bValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"bActive:%X\n",Adapter->PackInfo[uiLoopIndex].bActive);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"ActivateReqSent: %x", Adapter->PackInfo[uiLoopIndex].bActivateRequestSent);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"u8QueueType:%X\n",Adapter->PackInfo[uiLoopIndex].u8QueueType);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiMaxBucketSize:%X\n",Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiPerSFTxResourceCount:%X\n",atomic_read(&Adapter->PackInfo[uiLoopIndex].uiPerSFTxResourceCount));
- //DumpDebug(DUMP_INFO,(" bCSSupport:%X\n",Adapter->PackInfo[uiLoopIndex].bCSSupport));
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"CurrQueueDepthOnTarget: %x\n", Adapter->PackInfo[uiLoopIndex].uiCurrentQueueDepthOnTarget);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiCurrentBytesOnHost:%X\n",Adapter->PackInfo[uiLoopIndex].uiCurrentBytesOnHost);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiCurrentPacketsOnHost:%X\n",Adapter->PackInfo[uiLoopIndex].uiCurrentPacketsOnHost);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiDroppedCountBytes:%X\n",Adapter->PackInfo[uiLoopIndex].uiDroppedCountBytes);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiDroppedCountPackets:%X\n",Adapter->PackInfo[uiLoopIndex].uiDroppedCountPackets);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiSentBytes:%X\n",Adapter->PackInfo[uiLoopIndex].uiSentBytes);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiSentPackets:%X\n",Adapter->PackInfo[uiLoopIndex].uiSentPackets);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiCurrentDrainRate:%X\n",Adapter->PackInfo[uiLoopIndex].uiCurrentDrainRate);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiThisPeriodSentBytes:%X\n",Adapter->PackInfo[uiLoopIndex].uiThisPeriodSentBytes);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"liDrainCalculated:%llX\n",Adapter->PackInfo[uiLoopIndex].liDrainCalculated);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiCurrentTokenCount:%X\n",Adapter->PackInfo[uiLoopIndex].uiCurrentTokenCount);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"liLastUpdateTokenAt:%llX\n",Adapter->PackInfo[uiLoopIndex].liLastUpdateTokenAt);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiMaxAllowedRate:%X\n",Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"uiPendedLast:%X\n",Adapter->PackInfo[uiLoopIndex].uiPendedLast);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"NumOfPacketsSent:%X\n",Adapter->PackInfo[uiLoopIndex].NumOfPacketsSent);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Direction: %x\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CID: %x\n", Adapter->PackInfo[uiLoopIndex].usCID);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ProtocolValid: %x\n", Adapter->PackInfo[uiLoopIndex].bProtocolValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "TOSValid: %x\n", Adapter->PackInfo[uiLoopIndex].bTOSValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "DestIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bDestIpValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SrcIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bSrcIpValid);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActiveSet: %x\n", Adapter->PackInfo[uiLoopIndex].bActiveSet);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AdmittedSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAdmittedSet);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AuthzSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAuthorizedSet);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ClassifyPrority: %x\n", Adapter->PackInfo[uiLoopIndex].bClassifierPriority);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxLatency: %x\n",Adapter->PackInfo[uiLoopIndex].uiMaxLatency);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ServiceClassName: %x %x %x %x\n",Adapter->PackInfo[uiLoopIndex].ucServiceClassName[0],Adapter->PackInfo[uiLoopIndex].ucServiceClassName[1],Adapter->PackInfo[uiLoopIndex].ucServiceClassName[2],Adapter->PackInfo[uiLoopIndex].ucServiceClassName[3]);
-// BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bHeaderSuppressionEnabled :%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
-// BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalTxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalTxBytes);
-// BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalRxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalRxBytes);
-// DumpDebug(DUMP_INFO,(" uiRanOutOfResCount:%X\n",Adapter->PackInfo[uiLoopIndex].uiRanOutOfResCount));
- }
-
- for(uiLoopIndex = 0 ; uiLoopIndex < MIBS_MAX_HIST_ENTRIES ; uiLoopIndex++)
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"Adapter->aRxPktSizeHist[%x] = %x\n",uiLoopIndex,Adapter->aRxPktSizeHist[uiLoopIndex]);
-
- for(uiLoopIndex = 0 ; uiLoopIndex < MIBS_MAX_HIST_ENTRIES ; uiLoopIndex++)
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,"Adapter->aTxPktSizeHist[%x] = %x\n",uiLoopIndex,Adapter->aTxPktSizeHist[uiLoopIndex]);
-
-
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ulSFID:%lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "usVCID_Value:%X\n", Adapter->PackInfo[uiLoopIndex].usVCID_Value);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "PhsEnabled: 0x%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThreshold:%X\n", Adapter->PackInfo[uiLoopIndex].uiThreshold);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid:%X\n", Adapter->PackInfo[uiLoopIndex].bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bActive:%X\n", Adapter->PackInfo[uiLoopIndex].bActive);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActivateReqSent: %x", Adapter->PackInfo[uiLoopIndex].bActivateRequestSent);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "u8QueueType:%X\n", Adapter->PackInfo[uiLoopIndex].u8QueueType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxBucketSize:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPerSFTxResourceCount:%X\n", atomic_read(&Adapter->PackInfo[uiLoopIndex].uiPerSFTxResourceCount));
+ /* DumpDebug(DUMP_INFO,("bCSSupport:%X\n",Adapter->PackInfo[uiLoopIndex].bCSSupport)); */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CurrQueueDepthOnTarget: %x\n", Adapter->PackInfo[uiLoopIndex].uiCurrentQueueDepthOnTarget);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentBytesOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentBytesOnHost);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentPacketsOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentPacketsOnHost);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountPackets);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentPackets);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentDrainRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentDrainRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThisPeriodSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiThisPeriodSentBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liDrainCalculated:%llX\n", Adapter->PackInfo[uiLoopIndex].liDrainCalculated);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentTokenCount:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentTokenCount);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liLastUpdateTokenAt:%llX\n", Adapter->PackInfo[uiLoopIndex].liLastUpdateTokenAt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxAllowedRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPendedLast:%X\n", Adapter->PackInfo[uiLoopIndex].uiPendedLast);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "NumOfPacketsSent:%X\n", Adapter->PackInfo[uiLoopIndex].NumOfPacketsSent);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Direction: %x\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CID: %x\n", Adapter->PackInfo[uiLoopIndex].usCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ProtocolValid: %x\n", Adapter->PackInfo[uiLoopIndex].bProtocolValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "TOSValid: %x\n", Adapter->PackInfo[uiLoopIndex].bTOSValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "DestIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bDestIpValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SrcIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bSrcIpValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActiveSet: %x\n", Adapter->PackInfo[uiLoopIndex].bActiveSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AdmittedSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAdmittedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AuthzSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAuthorizedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ClassifyPrority: %x\n", Adapter->PackInfo[uiLoopIndex].bClassifierPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxLatency: %x\n", Adapter->PackInfo[uiLoopIndex].uiMaxLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ServiceClassName: %x %x %x %x\n", Adapter->PackInfo[uiLoopIndex].ucServiceClassName[0], Adapter->PackInfo[uiLoopIndex].ucServiceClassName[1], Adapter->PackInfo[uiLoopIndex].ucServiceClassName[2], Adapter->PackInfo[uiLoopIndex].ucServiceClassName[3]);
+/* BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bHeaderSuppressionEnabled :%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
+ * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalTxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalTxBytes);
+ * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalRxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalRxBytes);
+ * DumpDebug(DUMP_INFO,(" uiRanOutOfResCount:%X\n",Adapter->PackInfo[uiLoopIndex].uiRanOutOfResCount));
+ */
+ }
+
+ for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aRxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aRxPktSizeHist[uiLoopIndex]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aTxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aTxPktSizeHist[uiLoopIndex]);
return;
-
-
}
int reset_card_proc(PMINI_ADAPTER ps_adapter)
{
int retval = STATUS_SUCCESS;
-
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
unsigned int value = 0, uiResetValue = 0;
- psIntfAdapter = ((PS_INTERFACE_ADAPTER)(ps_adapter->pvInterfaceAdapter)) ;
-
+ psIntfAdapter = ((PS_INTERFACE_ADAPTER)(ps_adapter->pvInterfaceAdapter));
ps_adapter->bDDRInitDone = FALSE;
- if(ps_adapter->chip_id >= T3LPB)
- {
- //SYS_CFG register is write protected hence for modifying this reg value, it should be read twice before
- rdmalt(ps_adapter,SYS_CFG, &value, sizeof(value));
- rdmalt(ps_adapter,SYS_CFG, &value, sizeof(value));
+ if (ps_adapter->chip_id >= T3LPB) {
+ /* SYS_CFG register is write protected hence for modifying this reg value, it should be read twice before */
+ rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
+ rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- //making bit[6...5] same as was before f/w download. this setting force the h/w to
- //re-populated the SP RAM area with the string descriptor .
- value = value | (ps_adapter->syscfgBefFwDld & 0x00000060) ;
+ /* making bit[6...5] same as was before f/w download. this setting force the h/w to */
+ /* re-populated the SP RAM area with the string descriptor. */
+ value = value | (ps_adapter->syscfgBefFwDld & 0x00000060);
wrmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
}
- //killing all submitted URBs.
- psIntfAdapter->psAdapter->StopAllXaction = TRUE ;
+ /* killing all submitted URBs. */
+ psIntfAdapter->psAdapter->StopAllXaction = TRUE;
Bcm_kill_all_URBs(psIntfAdapter);
/* Reset the UMA-B Device */
- if(ps_adapter->chip_id >= T3LPB)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Reseting UMA-B \n");
+ if (ps_adapter->chip_id >= T3LPB) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reseting UMA-B\n");
retval = usb_reset_device(psIntfAdapter->udev);
+ psIntfAdapter->psAdapter->StopAllXaction = FALSE;
- psIntfAdapter->psAdapter->StopAllXaction = FALSE ;
-
- if(retval != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Reset failed with ret value :%d", retval);
+ if (retval != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reset failed with ret value :%d", retval);
goto err_exit;
}
+
if (ps_adapter->chip_id == BCS220_2 ||
ps_adapter->chip_id == BCS220_2BC ||
ps_adapter->chip_id == BCS250_BC ||
- ps_adapter->chip_id == BCS220_3)
- {
- retval = rdmalt(ps_adapter,HPM_CONFIG_LDO145, &value, sizeof(value));
- if( retval < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"read failed with status :%d",retval);
+ ps_adapter->chip_id == BCS220_3) {
+
+ retval = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
- //setting 0th bit
+ /* setting 0th bit */
value |= (1<<0);
retval = wrmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if( retval < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"write failed with status :%d",retval);
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
goto err_exit;
}
}
-
- }
- else
- {
- retval = rdmalt(ps_adapter,0x0f007018, &value, sizeof(value));
- if( retval < 0) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"read failed with status :%d",retval);
+ } else {
+ retval = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
- value&=(~(1<<16));
- retval= wrmalt(ps_adapter, 0x0f007018, &value, sizeof(value)) ;
- if( retval < 0) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"write failed with status :%d",retval);
+ value &= (~(1<<16));
+ retval = wrmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
goto err_exit;
}
- // Toggling the GPIO 8, 9
+ /* Toggling the GPIO 8, 9 */
value = 0;
retval = wrmalt(ps_adapter, GPIO_OUTPUT_REGISTER, &value, sizeof(value));
- if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"write failed with status :%d",retval);
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
goto err_exit;
}
value = 0x300;
- retval = wrmalt(ps_adapter, GPIO_MODE_REGISTER, &value, sizeof(value)) ;
- if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"write failed with status :%d",retval);
+ retval = wrmalt(ps_adapter, GPIO_MODE_REGISTER, &value, sizeof(value));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
goto err_exit;
}
mdelay(50);
}
- //ps_adapter->downloadDDR = false;
-
- if(ps_adapter->bFlashBoot)
- {
- //In flash boot mode MIPS state register has reverse polarity.
- // So just or with setting bit 30.
- //Make the MIPS in Reset state.
+ /* ps_adapter->downloadDDR = false; */
+ if (ps_adapter->bFlashBoot) {
+ /* In flash boot mode MIPS state register has reverse polarity.
+ * So just or with setting bit 30.
+ * Make the MIPS in Reset state.
+ */
rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue));
-
- uiResetValue |=(1<<30);
+ uiResetValue |= (1<<30);
wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue));
}
- if(ps_adapter->chip_id >= T3LPB)
- {
+ if (ps_adapter->chip_id >= T3LPB) {
uiResetValue = 0;
- //
- // WA for SYSConfig Issue.
- // Read SYSCFG Twice to make it writable.
- //
+ /*
+ * WA for SYSConfig Issue.
+ * Read SYSCFG Twice to make it writable.
+ */
rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));
- if(uiResetValue & (1<<4))
- {
+ if (uiResetValue & (1<<4)) {
uiResetValue = 0;
- rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));//2nd read to make it writable.
+ rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue)); /* 2nd read to make it writable. */
uiResetValue &= (~(1<<4));
- wrmalt(ps_adapter,SYS_CFG, &uiResetValue, sizeof(uiResetValue));
+ wrmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));
}
-
}
uiResetValue = 0;
wrmalt(ps_adapter, 0x0f01186c, &uiResetValue, sizeof(uiResetValue));
-err_exit :
- psIntfAdapter->psAdapter->StopAllXaction = FALSE ;
+err_exit:
+ psIntfAdapter->psAdapter->StopAllXaction = FALSE;
return retval;
}
-int run_card_proc(PMINI_ADAPTER ps_adapter )
+int run_card_proc(PMINI_ADAPTER ps_adapter)
{
- unsigned int value=0;
+ unsigned int value = 0;
{
-
- if(rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"%s:%d\n", __FUNCTION__, __LINE__);
+ if (rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
return STATUS_FAILURE;
}
- if(ps_adapter->bFlashBoot)
- {
-
- value&=(~(1<<30));
- }
+ if (ps_adapter->bFlashBoot)
+ value &= (~(1<<30));
else
- {
- value |=(1<<30);
- }
+ value |= (1<<30);
- if(wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"%s:%d\n", __FUNCTION__, __LINE__);
+ if (wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
return STATUS_FAILURE;
}
}
@@ -1074,104 +947,87 @@ int run_card_proc(PMINI_ADAPTER ps_adapter )
int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
{
-
int status;
UINT value = 0;
/*
- * Create the threads first and then download the
- * Firm/DDR Settings..
- */
-
+ * Create the threads first and then download the
+ * Firm/DDR Settings..
+ */
status = create_worker_threads(ps_adapter);
- if (status<0)
+ if (status < 0)
return status;
- /*
- * For Downloading the Firm, parse the cfg file first.
- */
- status = bcm_parse_target_params (ps_adapter);
- if(status){
+ status = bcm_parse_target_params(ps_adapter);
+ if (status)
return status;
- }
- if(ps_adapter->chip_id >= T3LPB)
- {
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof (value));
- ps_adapter->syscfgBefFwDld = value ;
- if((value & 0x60)== 0)
- {
+ if (ps_adapter->chip_id >= T3LPB) {
+ rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
+ ps_adapter->syscfgBefFwDld = value;
+
+ if ((value & 0x60) == 0)
ps_adapter->bFlashBoot = TRUE;
- }
}
reset_card_proc(ps_adapter);
- //Initializing the NVM.
+ /* Initializing the NVM. */
BcmInitNVM(ps_adapter);
status = ddr_init(ps_adapter);
- if(status)
- {
+ if (status) {
pr_err(DRV_NAME "ddr_init Failed\n");
return status;
}
/* Download cfg file */
status = buffDnldVerify(ps_adapter,
- (PUCHAR)ps_adapter->pstargetparams,
- sizeof(STARGETPARAMS),
- CONFIG_BEGIN_ADDR);
- if(status)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
+ (PUCHAR)ps_adapter->pstargetparams,
+ sizeof(STARGETPARAMS),
+ CONFIG_BEGIN_ADDR);
+ if (status) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
goto OUT;
}
- if(register_networkdev(ps_adapter))
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed.");
+ if (register_networkdev(ps_adapter)) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed.");
return -EIO;
}
- if(FALSE == ps_adapter->AutoFirmDld)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoFirmDld Disabled in CFG File..\n");
- //If Auto f/w download is disable, register the control interface,
- //register the control interface after the mailbox.
- if(register_control_device_interface(ps_adapter) < 0)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed.");
+ if (FALSE == ps_adapter->AutoFirmDld) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoFirmDld Disabled in CFG File..\n");
+ /* If Auto f/w download is disable, register the control interface, */
+ /* register the control interface after the mailbox. */
+ if (register_control_device_interface(ps_adapter) < 0) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed.");
return -EIO;
}
-
return STATUS_SUCCESS;
}
/*
- * Do the LED Settings here. It will be used by the Firmware Download
- * Thread.
- */
+ * Do the LED Settings here. It will be used by the Firmware Download
+ * Thread.
+ */
/*
- * 1. If the LED Settings fails, do not stop and do the Firmware download.
- * 2. This init would happened only if the cfg file is present, else
- * call from the ioctl context.
- */
-
- status = InitLedSettings (ps_adapter);
+ * 1. If the LED Settings fails, do not stop and do the Firmware download.
+ * 2. This init would happened only if the cfg file is present, else
+ * call from the ioctl context.
+ */
- if(status)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_PRINTK, 0, 0,"INIT LED FAILED\n");
+ status = InitLedSettings(ps_adapter);
+ if (status) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "INIT LED FAILED\n");
return status;
}
- if(ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+
+ if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
ps_adapter->DriverState = DRIVER_INIT;
wake_up(&ps_adapter->LEDInfo.notify_led_event);
}
- if(ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
ps_adapter->DriverState = FW_DOWNLOAD;
wake_up(&ps_adapter->LEDInfo.notify_led_event);
}
@@ -1180,37 +1036,32 @@ int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
- if(ps_adapter->eNVMType == NVM_FLASH)
- {
+ if (ps_adapter->eNVMType == NVM_FLASH) {
status = PropagateCalParamsFromFlashToMemory(ps_adapter);
- if(status)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL," Propagation of Cal param failed .." );
+ if (status) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Propagation of Cal param failed ..");
goto OUT;
}
}
/* Download Firmare */
- if ((status = BcmFileDownload( ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR)))
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Firmware File is present... \n");
+ status = BcmFileDownload(ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR);
+ if (status != 0) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Firmware File is present...\n");
goto OUT;
}
status = run_card_proc(ps_adapter);
- if(status)
- {
- BCM_DEBUG_PRINT (ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "run_card_proc Failed\n");
+ if (status) {
+ BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "run_card_proc Failed\n");
goto OUT;
}
-
ps_adapter->fw_download_done = TRUE;
mdelay(10);
OUT:
- if(ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
ps_adapter->DriverState = FW_DOWNLOAD_DONE;
wake_up(&ps_adapter->LEDInfo.notify_led_event);
}
@@ -1218,41 +1069,39 @@ OUT:
return status;
}
-
static int bcm_parse_target_params(PMINI_ADAPTER Adapter)
{
- struct file *flp=NULL;
- mm_segment_t oldfs={0};
+ struct file *flp = NULL;
+ mm_segment_t oldfs = {0};
char *buff;
int len = 0;
- loff_t pos = 0;
+ loff_t pos = 0;
- buff=kmalloc(BUFFER_1K, GFP_KERNEL);
- if(!buff)
- {
+ buff = kmalloc(BUFFER_1K, GFP_KERNEL);
+ if (!buff)
return -ENOMEM;
- }
- if((Adapter->pstargetparams =
- kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL)) == NULL)
- {
+
+ Adapter->pstargetparams = kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL);
+ if (Adapter->pstargetparams == NULL) {
kfree(buff);
return -ENOMEM;
}
- flp=open_firmware_file(Adapter, CFG_FILE);
- if(!flp) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE \n", CFG_FILE);
+
+ flp = open_firmware_file(Adapter, CFG_FILE);
+ if (!flp) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE\n", CFG_FILE);
kfree(buff);
kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
return -ENOENT;
}
- oldfs=get_fs(); set_fs(get_ds());
- len=vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos);
+ oldfs = get_fs();
+ set_fs(get_ds());
+ len = vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos);
set_fs(oldfs);
- if(len != sizeof(STARGETPARAMS))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Mismatch in Target Param Structure!\n");
+ if (len != sizeof(STARGETPARAMS)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n");
kfree(buff);
kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
@@ -1266,122 +1115,97 @@ static int bcm_parse_target_params(PMINI_ADAPTER Adapter)
* Values in Adapter->pstargetparams are in network byte order
*/
memcpy(Adapter->pstargetparams, buff, sizeof(STARGETPARAMS));
- kfree (buff);
+ kfree(buff);
beceem_parse_target_struct(Adapter);
return STATUS_SUCCESS;
}
void beceem_parse_target_struct(PMINI_ADAPTER Adapter)
{
- UINT uiHostDrvrCfg6 =0, uiEEPROMFlag = 0;
+ UINT uiHostDrvrCfg6 = 0, uiEEPROMFlag = 0;
- if(ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE)
- {
+ if (ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE) {
pr_info(DRV_NAME ": AutoSyncup is Disabled\n");
Adapter->AutoSyncup = FALSE;
- }
- else
- {
+ } else {
pr_info(DRV_NAME ": AutoSyncup is Enabled\n");
- Adapter->AutoSyncup = TRUE;
+ Adapter->AutoSyncup = TRUE;
}
- if(ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE)
- {
+ if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE) {
pr_info(DRV_NAME ": Enabling autolink up");
Adapter->AutoLinkUp = TRUE;
- }
- else
- {
+ } else {
pr_info(DRV_NAME ": Disabling autolink up");
Adapter->AutoLinkUp = FALSE;
}
- // Setting the DDR Setting..
- Adapter->DDRSetting =
- (ntohl(Adapter->pstargetparams->HostDrvrConfig6) >>8)&0x0F;
- Adapter->ulPowerSaveMode =
- (ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F;
-
+ /* Setting the DDR Setting.. */
+ Adapter->DDRSetting = (ntohl(Adapter->pstargetparams->HostDrvrConfig6) >> 8)&0x0F;
+ Adapter->ulPowerSaveMode = (ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F;
pr_info(DRV_NAME ": DDR Setting: %x\n", Adapter->DDRSetting);
pr_info(DRV_NAME ": Power Save Mode: %lx\n", Adapter->ulPowerSaveMode);
- if(ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD)
- {
- pr_info(DRV_NAME ": Enabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = TRUE;
- }
- else
- {
- pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = FALSE;
- }
+ if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD) {
+ pr_info(DRV_NAME ": Enabling Auto Firmware Download\n");
+ Adapter->AutoFirmDld = TRUE;
+ } else {
+ pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
+ Adapter->AutoFirmDld = FALSE;
+ }
uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6);
Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01;
- pr_info(DRV_NAME ": MIPSConfig : 0x%X\n",Adapter->bMipsConfig);
- //used for backward compatibility.
+ pr_info(DRV_NAME ": MIPSConfig : 0x%X\n", Adapter->bMipsConfig);
+ /* used for backward compatibility. */
Adapter->bDPLLConfig = (uiHostDrvrCfg6>>19)&0x01;
-
- Adapter->PmuMode= (uiHostDrvrCfg6 >> 24 ) & 0x03;
+ Adapter->PmuMode = (uiHostDrvrCfg6 >> 24) & 0x03;
pr_info(DRV_NAME ": PMU MODE: %x", Adapter->PmuMode);
- if((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT ) & (0x01))
- {
- Adapter->bDoSuspend = TRUE;
- pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile");
- }
+ if ((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT) & (0x01)) {
+ Adapter->bDoSuspend = TRUE;
+ pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile");
+ }
uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag);
- pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n",uiEEPROMFlag);
+ pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n", uiEEPROMFlag);
Adapter->eNVMType = (NVM_TYPE)((uiEEPROMFlag>>4)&0x3);
-
Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1;
-
Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4));
+ Adapter->bSectorSizeOverride = (bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
- Adapter->bSectorSizeOverride =(bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
-
- if(ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) &0x01)
+ if (ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x01)
Adapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE;
- if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
+ if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
doPowerAutoCorrection(Adapter);
-
}
static VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
{
UINT reporting_mode;
- reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) &0x02 ;
+ reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x02;
psAdapter->bIsAutoCorrectEnabled = !((char)(psAdapter->ulPowerSaveMode >> 3) & 0x1);
- if(reporting_mode == TRUE)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"can't do suspen/resume as reporting mode is enable");
+ if (reporting_mode == TRUE) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "can't do suspen/resume as reporting mode is enable");
psAdapter->bDoSuspend = FALSE;
}
- if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB))
- {
- //If reporting mode is enable, switch PMU to PMC
+ if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB)) {
+ /* If reporting mode is enable, switch PMU to PMC */
{
psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING;
- psAdapter->bDoSuspend =FALSE;
-
+ psAdapter->bDoSuspend = FALSE;
}
- //clearing space bit[15..12]
+ /* clearing space bit[15..12] */
psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl((0xF << 12)));
- //placing the power save mode option
+ /* placing the power save mode option */
psAdapter->pstargetparams->HostDrvrConfig6 |= htonl((psAdapter->ulPowerSaveMode << 12));
-
- }
- else if (psAdapter->bIsAutoCorrectEnabled == FALSE)
- {
-
- // remove the autocorrect disable bit set before dumping.
+ } else if (psAdapter->bIsAutoCorrectEnabled == FALSE) {
+ /* remove the autocorrect disable bit set before dumping. */
psAdapter->ulPowerSaveMode &= ~(1 << 3);
psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl(1 << 15));
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Using Forced User Choice: %lx\n", psAdapter->ulPowerSaveMode);
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Using Forced User Choice: %lx\n", psAdapter->ulPowerSaveMode);
}
}
@@ -1392,63 +1216,56 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
unsigned int temp = 0;
unsigned char *pucmacaddr = kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
- if(!pucmacaddr)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
+ if (!pucmacaddr) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
return NULL;
}
dwAddress |= 0x5b000000;
- status = wrmalt(Adapter, EEPROM_COMMAND_Q_REG,
- (PUINT)&dwAddress, sizeof(UINT));
- if(status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "wrm Failed..\n");
+ status = wrmalt(Adapter, EEPROM_COMMAND_Q_REG, (PUINT)&dwAddress, sizeof(UINT));
+ if (status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm Failed..\n");
kfree(pucmacaddr);
pucmacaddr = NULL;
goto OUT;
}
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
- {
- status = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp,sizeof(temp));
- if(status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
+
+ for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
+ status = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
+ if (status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
kfree(pucmacaddr);
pucmacaddr = NULL;
goto OUT;
}
pucmacaddr[i] = temp & 0xff;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"%x \n", pucmacaddr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "%x\n", pucmacaddr[i]);
}
OUT:
return pucmacaddr;
}
#endif
-
static void convertEndian(B_UINT8 rwFlag, PUINT puiBuffer, UINT uiByteCount)
{
UINT uiIndex = 0;
- if(RWM_WRITE == rwFlag) {
- for(uiIndex =0; uiIndex < (uiByteCount/sizeof(UINT)); uiIndex++) {
+ if (RWM_WRITE == rwFlag) {
+ for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(UINT)); uiIndex++)
puiBuffer[uiIndex] = htonl(puiBuffer[uiIndex]);
- }
} else {
- for(uiIndex =0; uiIndex < (uiByteCount/sizeof(UINT)); uiIndex++) {
+ for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(UINT)); uiIndex++)
puiBuffer[uiIndex] = ntohl(puiBuffer[uiIndex]);
- }
}
}
-#define CACHE_ADDRESS_MASK 0x80000000
-#define UNCACHE_ADDRESS_MASK 0xa0000000
+#define CACHE_ADDRESS_MASK 0x80000000
+#define UNCACHE_ADDRESS_MASK 0xa0000000
int rdm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
return Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
- uiAddress, pucBuff, sSize);
+ uiAddress, pucBuff, sSize);
}
int wrm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
@@ -1456,112 +1273,103 @@ int wrm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
int iRetVal;
iRetVal = Adapter->interface_wrm(Adapter->pvInterfaceAdapter,
- uiAddress, pucBuff, sSize);
-
-
+ uiAddress, pucBuff, sSize);
return iRetVal;
}
-int wrmalt (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
+int wrmalt(PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
{
convertEndian(RWM_WRITE, pucBuff, size);
return wrm(Adapter, uiAddress, (PUCHAR)pucBuff, size);
}
-int rdmalt (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
+int rdmalt(PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
{
- INT uiRetVal =0;
+ INT uiRetVal = 0;
- uiRetVal = rdm(Adapter,uiAddress,(PUCHAR)pucBuff,size);
+ uiRetVal = rdm(Adapter, uiAddress, (PUCHAR)pucBuff, size);
convertEndian(RWM_READ, (PUINT)pucBuff, size);
return uiRetVal;
}
-
int wrmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
- INT status = STATUS_SUCCESS ;
+ INT status = STATUS_SUCCESS;
down(&Adapter->rdmwrmsync);
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
status = -EACCES;
goto exit;
}
- status =wrm(Adapter, uiAddress, pucBuff, sSize);
-
+ status = wrm(Adapter, uiAddress, pucBuff, sSize);
exit:
up(&Adapter->rdmwrmsync);
- return status ;
+ return status;
}
-int wrmaltWithLock (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
+int wrmaltWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
{
int iRetVal = STATUS_SUCCESS;
down(&Adapter->rdmwrmsync);
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
iRetVal = -EACCES;
goto exit;
}
- iRetVal = wrmalt(Adapter,uiAddress,pucBuff,size);
-
+ iRetVal = wrmalt(Adapter, uiAddress, pucBuff, size);
exit:
up(&Adapter->rdmwrmsync);
return iRetVal;
}
-int rdmaltWithLock (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
+int rdmaltWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
{
- INT uiRetVal =STATUS_SUCCESS;
+ INT uiRetVal = STATUS_SUCCESS;
down(&Adapter->rdmwrmsync);
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
uiRetVal = -EACCES;
goto exit;
}
- uiRetVal = rdmalt(Adapter,uiAddress, pucBuff, size);
-
+ uiRetVal = rdmalt(Adapter, uiAddress, pucBuff, size);
exit:
up(&Adapter->rdmwrmsync);
return uiRetVal;
}
-
static VOID HandleShutDownModeWakeup(PMINI_ADAPTER Adapter)
{
- int clear_abort_pattern = 0,Status = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
- //target has woken up From Shut Down
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Clearing Shut Down Software abort pattern\n");
- Status = wrmalt(Adapter,SW_ABORT_IDLEMODE_LOC, (PUINT)&clear_abort_pattern, sizeof(clear_abort_pattern));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL,"WRM to SW_ABORT_IDLEMODE_LOC failed with err:%d", Status);
+ int clear_abort_pattern = 0, Status = 0;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
+ /* target has woken up From Shut Down */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Clearing Shut Down Software abort pattern\n");
+ Status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, (PUINT)&clear_abort_pattern, sizeof(clear_abort_pattern));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "WRM to SW_ABORT_IDLEMODE_LOC failed with err:%d", Status);
return;
}
- if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
- {
+
+ if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
msleep(100);
InterfaceHandleShutdownModeWakeup(Adapter);
msleep(100);
}
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = NO_NETWORK_ENTRY;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
@@ -1569,55 +1377,49 @@ static VOID HandleShutDownModeWakeup(PMINI_ADAPTER Adapter)
Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
Adapter->bShutStatus = FALSE;
wake_up(&Adapter->lowpower_mode_wait_queue);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
}
static VOID SendShutModeResponse(PMINI_ADAPTER Adapter)
{
- CONTROL_MESSAGE stShutdownResponse;
- UINT NVMAccess = 0,lowPwrAbortMsg = 0;
+ CONTROL_MESSAGE stShutdownResponse;
+ UINT NVMAccess = 0, lowPwrAbortMsg = 0;
UINT Status = 0;
- memset (&stShutdownResponse, 0, sizeof(CONTROL_MESSAGE));
+ memset(&stShutdownResponse, 0, sizeof(CONTROL_MESSAGE));
stShutdownResponse.Leader.Status = LINK_UP_CONTROL_REQ;
- stShutdownResponse.Leader.PLength = 8;//8 bytes;
+ stShutdownResponse.Leader.PLength = 8; /* 8 bytes; */
stShutdownResponse.szData[0] = LINK_UP_ACK;
stShutdownResponse.szData[1] = LINK_SHUTDOWN_REQ_FROM_FIRMWARE;
/*********************************
- **down_trylock -
- ** if [ semaphore is available ]
- ** acquire semaphone and return value 0 ;
- ** else
- ** return non-zero value ;
- **
- ***********************************/
+ * down_trylock -
+ * if [ semaphore is available ]
+ * acquire semaphone and return value 0 ;
+ * else
+ * return non-zero value ;
+ *
+ ***********************************/
NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync);
- lowPwrAbortMsg= down_trylock(&Adapter->LowPowerModeSync);
-
-
- if(NVMAccess || lowPwrAbortMsg|| atomic_read(&Adapter->TotalPacketCount))
- {
- if(!NVMAccess)
+ if (NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) {
+ if (!NVMAccess)
up(&Adapter->NVMRdmWrmLock);
- if(!lowPwrAbortMsg)
+ if (!lowPwrAbortMsg)
up(&Adapter->LowPowerModeSync);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Device Access is going on NACK the Shut Down MODE\n");
- stShutdownResponse.szData[2] = SHUTDOWN_NACK_FROM_DRIVER;//NACK- device access is going on.
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Device Access is going on NACK the Shut Down MODE\n");
+ stShutdownResponse.szData[2] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
Adapter->bPreparingForLowPowerMode = FALSE;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Sending SHUTDOWN MODE ACK\n");
- stShutdownResponse.szData[2] = SHUTDOWN_ACK_FROM_DRIVER;//ShutDown ACK
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Sending SHUTDOWN MODE ACK\n");
+ stShutdownResponse.szData[2] = SHUTDOWN_ACK_FROM_DRIVER; /* ShutDown ACK */
/* Wait for the LED to TURN OFF before sending ACK response */
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
INT iRetVal = 0;
/* Wake the LED Thread with LOWPOWER_MODE_ENTER State */
@@ -1625,237 +1427,200 @@ static VOID SendShutModeResponse(PMINI_ADAPTER Adapter)
wake_up(&Adapter->LEDInfo.notify_led_event);
/* Wait for 1 SEC for LED to OFF */
- iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent,\
- Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
+ iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
/* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */
- if(iRetVal <= 0)
- {
- stShutdownResponse.szData[1] = SHUTDOWN_NACK_FROM_DRIVER;//NACK- device access is going on.
-
+ if (iRetVal <= 0) {
+ stShutdownResponse.szData[1] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
Adapter->DriverState = NO_NETWORK_ENTRY;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
}
- if(stShutdownResponse.szData[2] == SHUTDOWN_ACK_FROM_DRIVER)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL,"ACKING SHUTDOWN MODE !!!!!!!!!");
+ if (stShutdownResponse.szData[2] == SHUTDOWN_ACK_FROM_DRIVER) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ACKING SHUTDOWN MODE !!!!!!!!!");
down(&Adapter->rdmwrmsync);
Adapter->bPreparingForLowPowerMode = TRUE;
up(&Adapter->rdmwrmsync);
- //Killing all URBS.
- if(Adapter->bDoSuspend == TRUE)
+ /* Killing all URBS. */
+ if (Adapter->bDoSuspend == TRUE)
Bcm_kill_all_URBs((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
- }
- else
- {
+ } else {
Adapter->bPreparingForLowPowerMode = FALSE;
}
- if(!NVMAccess)
+ if (!NVMAccess)
up(&Adapter->NVMRdmWrmLock);
- if(!lowPwrAbortMsg)
+ if (!lowPwrAbortMsg)
up(&Adapter->LowPowerModeSync);
}
- Status = CopyBufferToControlPacket(Adapter,&stShutdownResponse);
- if((Status != STATUS_SUCCESS))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL,"fail to send the Idle mode Request \n");
- Adapter->bPreparingForLowPowerMode = FALSE;
+ Status = CopyBufferToControlPacket(Adapter, &stShutdownResponse);
+ if ((Status != STATUS_SUCCESS)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "fail to send the Idle mode Request\n");
+ Adapter->bPreparingForLowPowerMode = FALSE;
StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
}
}
-
-static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
+static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter, PUCHAR pucBuffer)
{
B_UINT32 uiResetValue = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
- if(*(pucBuffer+1) == COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW)
- {
+ if (*(pucBuffer+1) == COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW) {
HandleShutDownModeWakeup(Adapter);
- }
- else if(*(pucBuffer+1) == LINK_SHUTDOWN_REQ_FROM_FIRMWARE)
- {
- //Target wants to go to Shut Down Mode
- //InterfacePrepareForShutdown(Adapter);
- if(Adapter->chip_id == BCS220_2 ||
- Adapter->chip_id == BCS220_2BC ||
- Adapter->chip_id == BCS250_BC ||
- Adapter->chip_id == BCS220_3)
- {
- rdmalt(Adapter,HPM_CONFIG_MSW, &uiResetValue, 4);
+ } else if (*(pucBuffer+1) == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) {
+ /* Target wants to go to Shut Down Mode */
+ /* InterfacePrepareForShutdown(Adapter); */
+ if (Adapter->chip_id == BCS220_2 ||
+ Adapter->chip_id == BCS220_2BC ||
+ Adapter->chip_id == BCS250_BC ||
+ Adapter->chip_id == BCS220_3) {
+
+ rdmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4);
uiResetValue |= (1<<17);
wrmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4);
}
SendShutModeResponse(Adapter);
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL,"ShutDownModeResponse:Notification received: Sending the response(Ack/Nack)\n");
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ShutDownModeResponse:Notification received: Sending the response(Ack/Nack)\n");
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
return;
-
}
VOID ResetCounters(PMINI_ADAPTER Adapter)
{
-
- beceem_protocol_reset(Adapter);
-
+ beceem_protocol_reset(Adapter);
Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- Adapter->LinkUpStatus = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ Adapter->LinkUpStatus = 0;
Adapter->LinkStatus = 0;
- atomic_set(&Adapter->cntrlpktCnt,0);
- atomic_set (&Adapter->TotalPacketCount,0);
- Adapter->fw_download_done=FALSE;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ atomic_set(&Adapter->TotalPacketCount, 0);
+ Adapter->fw_download_done = FALSE;
Adapter->LinkStatus = 0;
- Adapter->AutoLinkUp = FALSE;
+ Adapter->AutoLinkUp = FALSE;
Adapter->IdleMode = FALSE;
Adapter->bShutStatus = FALSE;
-
}
-S_CLASSIFIER_RULE *GetFragIPClsEntry(PMINI_ADAPTER Adapter,USHORT usIpIdentification,ULONG SrcIP)
+
+S_CLASSIFIER_RULE *GetFragIPClsEntry(PMINI_ADAPTER Adapter, USHORT usIpIdentification, ULONG SrcIP)
{
- UINT uiIndex=0;
- for(uiIndex=0;uiIndex<MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES;uiIndex++)
- {
- if((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed)&&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification)&&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress== SrcIP)&&
+ UINT uiIndex = 0;
+ for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
+ if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) &&
+ (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) &&
+ (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIP) &&
!Adapter->astFragmentedPktClassifierTable[uiIndex].bOutOfOrderFragment)
+
return Adapter->astFragmentedPktClassifierTable[uiIndex].pstMatchedClassifierEntry;
}
return NULL;
}
-void AddFragIPClsEntry(PMINI_ADAPTER Adapter,PS_FRAGMENTED_PACKET_INFO psFragPktInfo)
+void AddFragIPClsEntry(PMINI_ADAPTER Adapter, PS_FRAGMENTED_PACKET_INFO psFragPktInfo)
{
- UINT uiIndex=0;
- for(uiIndex=0;uiIndex<MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES;uiIndex++)
- {
- if(!Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed)
- {
- memcpy(&Adapter->astFragmentedPktClassifierTable[uiIndex],psFragPktInfo,sizeof(S_FRAGMENTED_PACKET_INFO));
+ UINT uiIndex = 0;
+ for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
+ if (!Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) {
+ memcpy(&Adapter->astFragmentedPktClassifierTable[uiIndex], psFragPktInfo, sizeof(S_FRAGMENTED_PACKET_INFO));
break;
}
}
-
}
-void DelFragIPClsEntry(PMINI_ADAPTER Adapter,USHORT usIpIdentification,ULONG SrcIp)
+void DelFragIPClsEntry(PMINI_ADAPTER Adapter, USHORT usIpIdentification, ULONG SrcIp)
{
- UINT uiIndex=0;
- for(uiIndex=0;uiIndex<MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES;uiIndex++)
- {
- if((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed)&&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification)&&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress== SrcIp))
- memset(&Adapter->astFragmentedPktClassifierTable[uiIndex],0,sizeof(S_FRAGMENTED_PACKET_INFO));
+ UINT uiIndex = 0;
+ for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
+ if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) &&
+ (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) &&
+ (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIp))
+
+ memset(&Adapter->astFragmentedPktClassifierTable[uiIndex], 0, sizeof(S_FRAGMENTED_PACKET_INFO));
}
}
-void update_per_cid_rx (PMINI_ADAPTER Adapter)
+void update_per_cid_rx(PMINI_ADAPTER Adapter)
{
- UINT qindex = 0;
+ UINT qindex = 0;
- if((jiffies - Adapter->liDrainCalculated) < XSECONDS)
+ if ((jiffies - Adapter->liDrainCalculated) < XSECONDS)
return;
- for(qindex = 0; qindex < HiPriority; qindex++)
- {
- if(Adapter->PackInfo[qindex].ucDirection == 0)
- {
+ for (qindex = 0; qindex < HiPriority; qindex++) {
+ if (Adapter->PackInfo[qindex].ucDirection == 0) {
Adapter->PackInfo[qindex].uiCurrentRxRate =
(Adapter->PackInfo[qindex].uiCurrentRxRate +
- Adapter->PackInfo[qindex].uiThisPeriodRxBytes)/2;
+ Adapter->PackInfo[qindex].uiThisPeriodRxBytes) / 2;
Adapter->PackInfo[qindex].uiThisPeriodRxBytes = 0;
- }
- else
- {
+ } else {
Adapter->PackInfo[qindex].uiCurrentDrainRate =
(Adapter->PackInfo[qindex].uiCurrentDrainRate +
- Adapter->PackInfo[qindex].uiThisPeriodSentBytes)/2;
-
- Adapter->PackInfo[qindex].uiThisPeriodSentBytes=0;
+ Adapter->PackInfo[qindex].uiThisPeriodSentBytes) / 2;
+ Adapter->PackInfo[qindex].uiThisPeriodSentBytes = 0;
}
}
- Adapter->liDrainCalculated=jiffies;
+ Adapter->liDrainCalculated = jiffies;
}
-void update_per_sf_desc_cnts( PMINI_ADAPTER Adapter)
+
+void update_per_sf_desc_cnts(PMINI_ADAPTER Adapter)
{
INT iIndex = 0;
u32 uibuff[MAX_TARGET_DSX_BUFFERS];
- if(!atomic_read (&Adapter->uiMBupdate))
+ if (!atomic_read(&Adapter->uiMBupdate))
return;
- if(rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS)<0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
+ if (rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS) < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
return;
}
- for(iIndex = 0;iIndex < HiPriority; iIndex++)
- {
- if(Adapter->PackInfo[iIndex].bValid && Adapter->PackInfo[iIndex].ucDirection)
- {
- if(Adapter->PackInfo[iIndex].usVCID_Value < MAX_TARGET_DSX_BUFFERS)
- {
+
+ for (iIndex = 0; iIndex < HiPriority; iIndex++) {
+ if (Adapter->PackInfo[iIndex].bValid && Adapter->PackInfo[iIndex].ucDirection) {
+ if (Adapter->PackInfo[iIndex].usVCID_Value < MAX_TARGET_DSX_BUFFERS)
atomic_set(&Adapter->PackInfo[iIndex].uiPerSFTxResourceCount, uibuff[Adapter->PackInfo[iIndex].usVCID_Value]);
- }
else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid VCID : %x \n",
- Adapter->PackInfo[iIndex].usVCID_Value);
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid VCID : %x\n", Adapter->PackInfo[iIndex].usVCID_Value);
}
}
- atomic_set (&Adapter->uiMBupdate, FALSE);
+ atomic_set(&Adapter->uiMBupdate, FALSE);
}
void flush_queue(PMINI_ADAPTER Adapter, UINT iQIndex)
{
- struct sk_buff* PacketToDrop=NULL;
- struct net_device_stats* netstats = &Adapter->dev->stats;
-
+ struct sk_buff *PacketToDrop = NULL;
+ struct net_device_stats *netstats = &Adapter->dev->stats;
spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
- while(Adapter->PackInfo[iQIndex].FirstTxQueue &&
- atomic_read(&Adapter->TotalPacketCount))
- {
+ while (Adapter->PackInfo[iQIndex].FirstTxQueue && atomic_read(&Adapter->TotalPacketCount)) {
PacketToDrop = Adapter->PackInfo[iQIndex].FirstTxQueue;
- if(PacketToDrop && PacketToDrop->len)
- {
+ if (PacketToDrop && PacketToDrop->len) {
netstats->tx_dropped++;
- DEQUEUEPACKET(Adapter->PackInfo[iQIndex].FirstTxQueue, \
- Adapter->PackInfo[iQIndex].LastTxQueue);
-
+ DEQUEUEPACKET(Adapter->PackInfo[iQIndex].FirstTxQueue, Adapter->PackInfo[iQIndex].LastTxQueue);
Adapter->PackInfo[iQIndex].uiCurrentPacketsOnHost--;
Adapter->PackInfo[iQIndex].uiCurrentBytesOnHost -= PacketToDrop->len;
- //Adding dropped statistics
+ /* Adding dropped statistics */
Adapter->PackInfo[iQIndex].uiDroppedCountBytes += PacketToDrop->len;
Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
-
dev_kfree_skb(PacketToDrop);
atomic_dec(&Adapter->TotalPacketCount);
}
}
spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
-
}
-static void beceem_protocol_reset (PMINI_ADAPTER Adapter)
+static void beceem_protocol_reset(PMINI_ADAPTER Adapter)
{
int i;
-
if (netif_msg_link(Adapter))
pr_notice(PFX "%s: protocol reset\n", Adapter->dev->name);
@@ -1864,32 +1629,22 @@ static void beceem_protocol_reset (PMINI_ADAPTER Adapter)
Adapter->IdleMode = FALSE;
Adapter->LinkUpStatus = FALSE;
- ClearTargetDSXBuffer(Adapter,0, TRUE);
- //Delete All Classifier Rules
+ ClearTargetDSXBuffer(Adapter, 0, TRUE);
+ /* Delete All Classifier Rules */
- for(i = 0;i<HiPriority;i++)
- {
- DeleteAllClassifiersForSF(Adapter,i);
- }
+ for (i = 0; i < HiPriority; i++)
+ DeleteAllClassifiersForSF(Adapter, i);
flush_all_queues(Adapter);
- if(Adapter->TimerActive == TRUE)
+ if (Adapter->TimerActive == TRUE)
Adapter->TimerActive = FALSE;
- memset(Adapter->astFragmentedPktClassifierTable, 0,
- sizeof(S_FRAGMENTED_PACKET_INFO) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
+ memset(Adapter->astFragmentedPktClassifierTable, 0, sizeof(S_FRAGMENTED_PACKET_INFO) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
- for(i = 0;i<HiPriority;i++)
- {
- //resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF.
- // It is same between MIBs and SF.
- memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable,
- 0, sizeof(S_MIBS_EXTSERVICEFLOW_PARAMETERS));
+ for (i = 0; i < HiPriority; i++) {
+ /* resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF. */
+ /* It is same between MIBs and SF. */
+ memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable, 0, sizeof(S_MIBS_EXTSERVICEFLOW_PARAMETERS));
}
}
-
-
-
-
-
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index 947d0632568..da47db8c8f2 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -74,4 +74,6 @@
#define DRV_VERSION VER_FILEVERSION_STR
#define PFX DRV_NAME " "
+extern struct class *bcm_class;
+
#endif
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 4da5b7b54a1..3de0daf5edb 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -4013,7 +4013,8 @@ INT BcmCopyISO(PMINI_ADAPTER Adapter, FLASH2X_COPY_SECTION sCopySectStrut)
if(uiTotalDataToCopy < ISOLength)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"error as Source ISO Section does not have valid signature");
- return STATUS_FAILURE;
+ Status = STATUS_FAILURE;
+ goto out;
}
uiTotalDataToCopy =(Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) -
@@ -4026,7 +4027,8 @@ INT BcmCopyISO(PMINI_ADAPTER Adapter, FLASH2X_COPY_SECTION sCopySectStrut)
if(uiTotalDataToCopy < ISOLength)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"error as Dest ISO Section does not have enough section size");
- return STATUS_FAILURE;
+ Status = STATUS_FAILURE;
+ goto out;
}
uiTotalDataToCopy = ISOLength;
@@ -4143,7 +4145,8 @@ INT BcmCopyISO(PMINI_ADAPTER Adapter, FLASH2X_COPY_SECTION sCopySectStrut)
if(uiTotalDataToCopy < ISOLength)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"error as Source ISO Section does not have valid signature");
- return STATUS_FAILURE;
+ Status = STATUS_FAILURE;
+ goto out;
}
uiTotalDataToCopy =(Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) -
@@ -4156,7 +4159,8 @@ INT BcmCopyISO(PMINI_ADAPTER Adapter, FLASH2X_COPY_SECTION sCopySectStrut)
if(uiTotalDataToCopy < ISOLength)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"error as Dest ISO Section does not have enough section size");
- return STATUS_FAILURE;
+ Status = STATUS_FAILURE;
+ goto out;
}
uiTotalDataToCopy = ISOLength;
@@ -4257,6 +4261,7 @@ INT BcmCopyISO(PMINI_ADAPTER Adapter, FLASH2X_COPY_SECTION sCopySectStrut)
}
+out:
kfree(Buff);
return Status;
diff --git a/drivers/staging/brcm80211/Kconfig b/drivers/staging/brcm80211/Kconfig
deleted file mode 100644
index 379cf16e89f..00000000000
--- a/drivers/staging/brcm80211/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-config BRCMUTIL
- tristate
- default n
-
-config BRCMSMAC
- tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
- default n
- depends on PCI
- depends on WLAN && MAC80211
- depends on X86 || MIPS
- select BRCMUTIL
- select FW_LOADER
- select CRC_CCITT
- ---help---
- This module adds support for PCIe wireless adapters based on Broadcom
- IEEE802.11n SoftMAC chipsets. If you choose to build a module, it'll
- be called brcmsmac.ko.
-
-config BRCMFMAC
- tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
- default n
- depends on MMC
- depends on WLAN && CFG80211
- depends on X86 || MIPS
- select BRCMUTIL
- select FW_LOADER
- select WIRELESS_EXT
- select WEXT_PRIV
- ---help---
- This module adds support for embedded wireless adapters based on
- Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's
- wireless extensions subsystem. If you choose to build a module,
- it'll be called brcmfmac.ko.
-
-config BRCMDBG
- bool "Broadcom driver debug functions"
- default n
- depends on BRCMSMAC || BRCMFMAC
- ---help---
- Selecting this enables additional code for debug purposes.
diff --git a/drivers/staging/brcm80211/Makefile b/drivers/staging/brcm80211/Makefile
deleted file mode 100644
index 8b01f5e7ba2..00000000000
--- a/drivers/staging/brcm80211/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Makefile fragment for Broadcom 802.11n Networking Device Driver
-#
-# Copyright (c) 2010 Broadcom Corporation
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# common flags
-subdir-ccflags-y := -DBCMDMA32
-subdir-ccflags-$(CONFIG_BRCMDBG) += -DBCMDBG
-
-obj-$(CONFIG_BRCMUTIL) += brcmutil/
-obj-$(CONFIG_BRCMFMAC) += brcmfmac/
-obj-$(CONFIG_BRCMSMAC) += brcmsmac/
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
deleted file mode 100644
index bb86b1b3e58..00000000000
--- a/drivers/staging/brcm80211/README
+++ /dev/null
@@ -1 +0,0 @@
-refer to: http://linuxwireless.org/en/users/Drivers/brcm80211
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
deleted file mode 100644
index e2e2ef9bd7a..00000000000
--- a/drivers/staging/brcm80211/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-To Do List for Broadcom Mac80211 driver before getting in mainline
-
-Bugs
-====
-- none known at this moment
-
-brcmfmac
-=====================
-- ASSERTS deprecated in mainline, replace by warning + error handling
-
-brcm80211 info page
-=====================
-http://linuxwireless.org/en/users/Drivers/brcm80211
diff --git a/drivers/staging/brcm80211/brcmfmac/Makefile b/drivers/staging/brcm80211/brcmfmac/Makefile
deleted file mode 100644
index da3c8057590..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/Makefile
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Makefile fragment for Broadcom 802.11n Networking Device Driver
-#
-# Copyright (c) 2010 Broadcom Corporation
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-ccflags-y := \
- -DBRCMF_FIRSTREAD=64 \
- -DBRCMF_SDALIGN=64 \
- -DMAX_HDR_READ=64
-
-ccflags-$(CONFIG_BRCMDBG) += -DSHOW_EVENTS
-
-ccflags-y += \
- -Idrivers/staging/brcm80211/brcmfmac \
- -Idrivers/staging/brcm80211/include
-
-DHDOFILES = \
- wl_cfg80211.o \
- dhd_cdc.o \
- dhd_common.o \
- dhd_sdio.o \
- dhd_linux.o \
- bcmsdh.o \
- bcmsdh_sdmmc.o
-
-obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
-brcmfmac-objs += $(DHDOFILES)
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
deleted file mode 100644
index f4e72ed126b..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
+++ /dev/null
@@ -1,642 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/* ****************** SDIO CARD Interface Functions **************************/
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-
-#include <defs.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-#include <soc.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "sdio_host.h"
-
-#define SDIOH_API_ACCESS_RETRY_LIMIT 2
-
-#define BRCMF_SD_ERROR_VAL 0x0001 /* Error */
-#define BRCMF_SD_INFO_VAL 0x0002 /* Info */
-
-
-#ifdef BCMDBG
-#define BRCMF_SD_ERROR(x) \
- do { \
- if ((brcmf_sdio_msglevel & BRCMF_SD_ERROR_VAL) && \
- net_ratelimit()) \
- printk x; \
- } while (0)
-#define BRCMF_SD_INFO(x) \
- do { \
- if ((brcmf_sdio_msglevel & BRCMF_SD_INFO_VAL) && \
- net_ratelimit()) \
- printk x; \
- } while (0)
-#else /* BCMDBG */
-#define BRCMF_SD_ERROR(x)
-#define BRCMF_SD_INFO(x)
-#endif /* BCMDBG */
-
-/* debugging macros */
-#define SDLX_MSG(x)
-
-#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
-#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
-#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
-
-#define SDIOH_DATA_PIO 0 /* PIO mode */
-#define SDIOH_DATA_DMA 1 /* DMA mode */
-
-struct brcmf_sdio_card {
- bool init_success; /* underlying driver successfully attached */
- void *sdioh; /* handler for sdioh */
- u32 vendevid; /* Target Vendor and Device ID on SD bus */
- bool regfail; /* Save status of last
- reg_read/reg_write call */
- u32 sbwad; /* Save backplane window address */
-};
-
-/**
- * SDIO Host Controller info
- */
-struct sdio_hc {
- struct sdio_hc *next;
- struct device *dev; /* platform device handle */
- void *regs; /* SDIO Host Controller address */
- struct brcmf_sdio_card *card;
- void *ch;
- unsigned int oob_irq;
- unsigned long oob_flags; /* OOB Host specifiction
- as edge and etc */
- bool oob_irq_registered;
-};
-
-/* local copy of bcm sd handler */
-static struct brcmf_sdio_card *l_card;
-
-const uint brcmf_sdio_msglevel = BRCMF_SD_ERROR_VAL;
-
-static struct sdio_hc *sdhcinfo;
-
-/* driver info, initialized when brcmf_sdio_register is called */
-static struct brcmf_sdioh_driver drvinfo = { NULL, NULL };
-
-/* Module parameters specific to each host-controller driver */
-
-module_param(sd_msglevel, uint, 0);
-
-extern uint sd_f2_blocksize;
-module_param(sd_f2_blocksize, int, 0);
-
-/* forward declarations */
-int brcmf_sdio_probe(struct device *dev);
-EXPORT_SYMBOL(brcmf_sdio_probe);
-
-int brcmf_sdio_remove(struct device *dev);
-EXPORT_SYMBOL(brcmf_sdio_remove);
-
-struct brcmf_sdio_card*
-brcmf_sdcard_attach(void *cfghdl, u32 *regsva, uint irq)
-{
- struct brcmf_sdio_card *card;
-
- card = kzalloc(sizeof(struct brcmf_sdio_card), GFP_ATOMIC);
- if (card == NULL) {
- BRCMF_SD_ERROR(("sdcard_attach: out of memory"));
- return NULL;
- }
-
- /* save the handler locally */
- l_card = card;
-
- card->sdioh = brcmf_sdioh_attach(cfghdl, irq);
- if (!card->sdioh) {
- brcmf_sdcard_detach(card);
- return NULL;
- }
-
- card->init_success = true;
-
- *regsva = SI_ENUM_BASE;
-
- /* Report the BAR, to fix if needed */
- card->sbwad = SI_ENUM_BASE;
- return card;
-}
-
-int brcmf_sdcard_detach(struct brcmf_sdio_card *card)
-{
- if (card != NULL) {
- if (card->sdioh) {
- brcmf_sdioh_detach(card->sdioh);
- card->sdioh = NULL;
- }
- kfree(card);
- }
-
- l_card = NULL;
- return 0;
-}
-
-int
-brcmf_sdcard_iovar_op(struct brcmf_sdio_card *card, const char *name,
- void *params, int plen, void *arg, int len, bool set)
-{
- return brcmf_sdioh_iovar_op(card->sdioh, name, params, plen, arg,
- len, set);
-}
-
-int brcmf_sdcard_intr_enable(struct brcmf_sdio_card *card)
-{
- return brcmf_sdioh_interrupt_set(card->sdioh, true);
-}
-
-int brcmf_sdcard_intr_disable(struct brcmf_sdio_card *card)
-{
- return brcmf_sdioh_interrupt_set(card->sdioh, false);
-}
-
-int brcmf_sdcard_intr_reg(struct brcmf_sdio_card *card,
- void (*fn)(void *), void *argh)
-{
- return brcmf_sdioh_interrupt_register(card->sdioh, fn, argh);
-}
-
-int brcmf_sdcard_intr_dereg(struct brcmf_sdio_card *card)
-{
- return brcmf_sdioh_interrupt_deregister(card->sdioh);
-}
-
-u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_card *card, uint fnc_num, u32 addr,
- int *err)
-{
- int status;
- s32 retry = 0;
- u8 data = 0;
-
- if (!card)
- card = l_card;
-
- do {
- if (retry) /* wait for 1 ms till bus get settled down */
- udelay(1000);
- status =
- brcmf_sdioh_cfg_read(card->sdioh, fnc_num, addr,
- (u8 *) &data);
- } while (status != 0
- && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
- if (err)
- *err = status;
-
- BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u8data = 0x%x\n",
- __func__, fnc_num, addr, data));
-
- return data;
-}
-
-void
-brcmf_sdcard_cfg_write(struct brcmf_sdio_card *card, uint fnc_num, u32 addr,
- u8 data, int *err)
-{
- int status;
- s32 retry = 0;
-
- if (!card)
- card = l_card;
-
- do {
- if (retry) /* wait for 1 ms till bus get settled down */
- udelay(1000);
- status =
- brcmf_sdioh_cfg_write(card->sdioh, fnc_num, addr,
- (u8 *) &data);
- } while (status != 0
- && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
- if (err)
- *err = status;
-
- BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u8data = 0x%x\n",
- __func__, fnc_num, addr, data));
-}
-
-u32 brcmf_sdcard_cfg_read_word(struct brcmf_sdio_card *card, uint fnc_num,
- u32 addr, int *err)
-{
- int status;
- u32 data = 0;
-
- if (!card)
- card = l_card;
-
- status = brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
- SDIOH_READ, fnc_num, addr, &data, 4);
-
- if (err)
- *err = status;
-
- BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u32data = 0x%x\n",
- __func__, fnc_num, addr, data));
-
- return data;
-}
-
-void
-brcmf_sdcard_cfg_write_word(struct brcmf_sdio_card *card, uint fnc_num,
- u32 addr, u32 data, int *err)
-{
- int status;
-
- if (!card)
- card = l_card;
-
- status =
- brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
- SDIOH_WRITE, fnc_num, addr, &data, 4);
-
- if (err)
- *err = status;
-
- BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u32data = 0x%x\n",
- __func__, fnc_num, addr, data));
-}
-
-int brcmf_sdcard_cis_read(struct brcmf_sdio_card *card, uint func, u8 * cis,
- uint length)
-{
- int status;
-
- u8 *tmp_buf, *tmp_ptr;
- u8 *ptr;
- bool ascii = func & ~0xf;
- func &= 0x7;
-
- if (!card)
- card = l_card;
-
- status = brcmf_sdioh_cis_read(card->sdioh, func, cis, length);
-
- if (ascii) {
- /* Move binary bits to tmp and format them
- into the provided buffer. */
- tmp_buf = kmalloc(length, GFP_ATOMIC);
- if (tmp_buf == NULL) {
- BRCMF_SD_ERROR(("%s: out of memory\n", __func__));
- return -ENOMEM;
- }
- memcpy(tmp_buf, cis, length);
- for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4);
- tmp_ptr++) {
- ptr += sprintf((char *)ptr, "%.2x ", *tmp_ptr & 0xff);
- if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
- ptr += sprintf((char *)ptr, "\n");
- }
- kfree(tmp_buf);
- }
-
- return status;
-}
-
-static int
-brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_card *card, u32 address)
-{
- int err = 0;
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
- (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
- if (!err)
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRMID,
- (address >> 16) & SBSDIO_SBADDRMID_MASK,
- &err);
- if (!err)
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRHIGH,
- (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
- &err);
-
- return err;
-}
-
-u32 brcmf_sdcard_reg_read(struct brcmf_sdio_card *card, u32 addr, uint size)
-{
- int status;
- u32 word = 0;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
-
- BRCMF_SD_INFO(("%s:fun = 1, addr = 0x%x, ", __func__, addr));
-
- if (!card)
- card = l_card;
-
- if (bar0 != card->sbwad) {
- if (brcmf_sdcard_set_sbaddr_window(card, bar0))
- return 0xFFFFFFFF;
-
- card->sbwad = bar0;
- }
-
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (size == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
- status = brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
- SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
-
- card->regfail = (status != 0);
-
- BRCMF_SD_INFO(("u32data = 0x%x\n", word));
-
- /* if ok, return appropriately masked word */
- if (status == 0) {
- switch (size) {
- case sizeof(u8):
- return word & 0xff;
- case sizeof(u16):
- return word & 0xffff;
- case sizeof(u32):
- return word;
- default:
- card->regfail = true;
-
- }
- }
-
- /* otherwise, bad sdio access or invalid size */
- BRCMF_SD_ERROR(("%s: error reading addr 0x%04x size %d\n", __func__,
- addr, size));
- return 0xFFFFFFFF;
-}
-
-u32 brcmf_sdcard_reg_write(struct brcmf_sdio_card *card, u32 addr, uint size,
- u32 data)
-{
- int status;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
-
- BRCMF_SD_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
- __func__, addr, size * 8, data));
-
- if (!card)
- card = l_card;
-
- if (bar0 != card->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(card, bar0);
- if (err)
- return err;
-
- card->sbwad = bar0;
- }
-
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (size == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status =
- brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
- SDIOH_WRITE, SDIO_FUNC_1, addr, &data, size);
- card->regfail = (status != 0);
-
- if (status == 0)
- return 0;
-
- BRCMF_SD_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
- __func__, data, addr, size));
- return 0xFFFFFFFF;
-}
-
-bool brcmf_sdcard_regfail(struct brcmf_sdio_card *card)
-{
- return card->regfail;
-}
-
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
- uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt,
- void (*complete)(void *handle, int status,
- bool sync_waiting),
- void *handle)
-{
- int status;
- uint incr_fix;
- uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
-
- BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
- __func__, fn, addr, nbytes));
-
- /* Async not implemented yet */
- if (flags & SDIO_REQ_ASYNC)
- return -ENOTSUPP;
-
- if (bar0 != card->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(card, bar0);
- if (err)
- return err;
-
- card->sbwad = bar0;
- }
-
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
-
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
- status = brcmf_sdioh_request_buffer(card->sdioh, SDIOH_DATA_PIO,
- incr_fix, SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
-
- return status;
-}
-
-int
-brcmf_sdcard_send_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, void *pkt,
- void (*complete)(void *handle, int status,
- bool sync_waiting),
- void *handle)
-{
- uint incr_fix;
- uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
-
- BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
- __func__, fn, addr, nbytes));
-
- /* Async not implemented yet */
- if (flags & SDIO_REQ_ASYNC)
- return -ENOTSUPP;
-
- if (bar0 != card->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(card, bar0);
- if (err)
- return err;
-
- card->sbwad = bar0;
- }
-
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
-
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
- return brcmf_sdioh_request_buffer(card->sdioh, SDIOH_DATA_PIO,
- incr_fix, SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
-}
-
-int brcmf_sdcard_rwdata(struct brcmf_sdio_card *card, uint rw, u32 addr,
- u8 *buf, uint nbytes)
-{
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
- return brcmf_sdioh_request_buffer(card->sdioh, SDIOH_DATA_PIO,
- SDIOH_DATA_INC, (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
- addr, 4, nbytes, buf, NULL);
-}
-
-int brcmf_sdcard_abort(struct brcmf_sdio_card *card, uint fn)
-{
- return brcmf_sdioh_abort(card->sdioh, fn);
-}
-
-int brcmf_sdcard_query_device(struct brcmf_sdio_card *card)
-{
- card->vendevid = (PCI_VENDOR_ID_BROADCOM << 16) | 0;
- return card->vendevid;
-}
-
-u32 brcmf_sdcard_cur_sbwad(struct brcmf_sdio_card *card)
-{
- if (!card)
- card = l_card;
-
- return card->sbwad;
-}
-
-int brcmf_sdio_probe(struct device *dev)
-{
- struct sdio_hc *sdhc = NULL;
- u32 regs = 0;
- struct brcmf_sdio_card *card = NULL;
- int irq = 0;
- u32 vendevid;
- unsigned long irq_flags = 0;
-
- /* allocate SDIO Host Controller state info */
- sdhc = kzalloc(sizeof(struct sdio_hc), GFP_ATOMIC);
- if (!sdhc) {
- SDLX_MSG(("%s: out of memory\n", __func__));
- goto err;
- }
- sdhc->dev = (void *)dev;
-
- card = brcmf_sdcard_attach((void *)0, &regs, irq);
- if (!card) {
- SDLX_MSG(("%s: attach failed\n", __func__));
- goto err;
- }
-
- sdhc->card = card;
- sdhc->oob_irq = irq;
- sdhc->oob_flags = irq_flags;
- sdhc->oob_irq_registered = false; /* to make sure.. */
-
- /* chain SDIO Host Controller info together */
- sdhc->next = sdhcinfo;
- sdhcinfo = sdhc;
- /* Read the vendor/device ID from the CIS */
- vendevid = brcmf_sdcard_query_device(card);
-
- /* try to attach to the target device */
- sdhc->ch = drvinfo.attach((vendevid >> 16), (vendevid & 0xFFFF),
- 0, 0, 0, 0, regs, card);
- if (!sdhc->ch) {
- SDLX_MSG(("%s: device attach failed\n", __func__));
- goto err;
- }
-
- return 0;
-
- /* error handling */
-err:
- if (sdhc) {
- if (sdhc->card)
- brcmf_sdcard_detach(sdhc->card);
- kfree(sdhc);
- }
-
- return -ENODEV;
-}
-
-int brcmf_sdio_remove(struct device *dev)
-{
- struct sdio_hc *sdhc, *prev;
-
- sdhc = sdhcinfo;
- drvinfo.detach(sdhc->ch);
- brcmf_sdcard_detach(sdhc->card);
- /* find the SDIO Host Controller state for this pdev
- and take it out from the list */
- for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
- if (sdhc->dev == (void *)dev) {
- if (prev)
- prev->next = sdhc->next;
- else
- sdhcinfo = NULL;
- break;
- }
- prev = sdhc;
- }
- if (!sdhc) {
- SDLX_MSG(("%s: failed\n", __func__));
- return 0;
- }
-
- /* release SDIO Host Controller info */
- kfree(sdhc);
- return 0;
-}
-
-int brcmf_sdio_register(struct brcmf_sdioh_driver *driver)
-{
- drvinfo = *driver;
-
- SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
- return brcmf_sdio_function_init();
-}
-
-void brcmf_sdio_unregister(void)
-{
- brcmf_sdio_function_cleanup();
-}
-
-void brcmf_sdio_wdtmr_enable(bool enable)
-{
- if (enable)
- brcmf_sdbrcm_wd_timer(sdhcinfo->ch, brcmf_watchdog_ms);
- else
- brcmf_sdbrcm_wd_timer(sdhcinfo->ch, 0);
-}
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
deleted file mode 100644
index 38bd9ba3096..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ /dev/null
@@ -1,1196 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/suspend.h>
-#include <linux/errno.h>
-#include <linux/sched.h> /* request_irq() */
-#include <net/cfg80211.h>
-
-#include <defs.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-#include "sdio_host.h"
-#include "dhd.h"
-#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
-
-#define BLOCK_SIZE_64 64
-#define BLOCK_SIZE_512 512
-#define BLOCK_SIZE_4318 64
-#define BLOCK_SIZE_4328 512
-
-/* private bus modes */
-#define SDIOH_MODE_SD4 2
-
-#define CLIENT_INTR 0x100 /* Get rid of this! */
-
-#if !defined(SDIO_VENDOR_ID_BROADCOM)
-#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
-
-#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
-
-#define DMA_ALIGN_MASK 0x03
-
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
-#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
-#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
-#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
-
-/* Common msglevel constants */
-#define SDH_ERROR_VAL 0x0001 /* Error */
-#define SDH_TRACE_VAL 0x0002 /* Trace */
-#define SDH_INFO_VAL 0x0004 /* Info */
-#define SDH_DEBUG_VAL 0x0008 /* Debug */
-#define SDH_DATA_VAL 0x0010 /* Data */
-#define SDH_CTRL_VAL 0x0020 /* Control Regs */
-#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
-#define SDH_DMA_VAL 0x0080 /* DMA */
-
-#ifdef BCMDBG
-#define sd_err(x) \
- do { \
- if ((sd_msglevel & SDH_ERROR_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_trace(x) \
- do { \
- if ((sd_msglevel & SDH_TRACE_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_info(x) \
- do { \
- if ((sd_msglevel & SDH_INFO_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_debug(x) \
- do { \
- if ((sd_msglevel & SDH_DEBUG_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_data(x) \
- do { \
- if ((sd_msglevel & SDH_DATA_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_ctrl(x) \
- do { \
- if ((sd_msglevel & SDH_CTRL_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#else
-#define sd_err(x)
-#define sd_trace(x)
-#define sd_info(x)
-#define sd_debug(x)
-#define sd_data(x)
-#define sd_ctrl(x)
-#endif
-
-struct sdos_info {
- struct sdioh_info *sd;
- spinlock_t lock;
-};
-
-static void brcmf_sdioh_irqhandler(struct sdio_func *func);
-static void brcmf_sdioh_irqhandler_f2(struct sdio_func *func);
-static int brcmf_sdioh_get_cisaddr(struct sdioh_info *sd, u32 regaddr);
-static int brcmf_ops_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id);
-static void brcmf_ops_sdio_remove(struct sdio_func *func);
-
-#ifdef CONFIG_PM
-static int brcmf_sdio_suspend(struct device *dev);
-static int brcmf_sdio_resume(struct device *dev);
-#endif /* CONFIG_PM */
-
-uint sd_f2_blocksize = 512; /* Default blocksize */
-
-uint sd_msglevel = 0x01;
-
-/* module param defaults */
-static int clockoverride;
-
-module_param(clockoverride, int, 0644);
-MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
-
-struct brcmf_sdmmc_instance *gInstance;
-
-struct device sdmmc_dev;
-
-/* devices we support, null terminated */
-static const struct sdio_device_id brcmf_sdmmc_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT)},
- {SDIO_DEVICE
- (SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319)},
- { /* end: all zeroes */ },
-};
-
-#ifdef CONFIG_PM
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_sdio_suspend,
- .resume = brcmf_sdio_resume,
-};
-#endif /* CONFIG_PM */
-
-static struct sdio_driver brcmf_sdmmc_driver = {
- .probe = brcmf_ops_sdio_probe,
- .remove = brcmf_ops_sdio_remove,
- .name = "brcmfmac",
- .id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &brcmf_sdio_pm_ops,
- },
-#endif /* CONFIG_PM */
-};
-
-MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-
-BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
-BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
-BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
-BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
-
-static int
-brcmf_sdioh_card_regread(struct sdioh_info *sd, int func, u32 regaddr,
- int regsize, u32 *data);
-
-static int brcmf_sdioh_enablefuncs(struct sdioh_info *sd)
-{
- int err_ret;
- u32 fbraddr;
- u8 func;
-
- sd_trace(("%s\n", __func__));
-
- /* Get the Card's common CIS address */
- sd->com_cis_ptr = brcmf_sdioh_get_cisaddr(sd, SDIO_CCCR_CIS);
- sd->func_cis_ptr[0] = sd->com_cis_ptr;
- sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __func__,
- sd->com_cis_ptr));
-
- /* Get the Card's function CIS (for each function) */
- for (fbraddr = SDIO_FBR_BASE(1), func = 1;
- func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
- sd->func_cis_ptr[func] =
- brcmf_sdioh_get_cisaddr(sd, SDIO_FBR_CIS + fbraddr);
- sd_info(("%s: Function %d CIS Ptr = 0x%x\n", __func__, func,
- sd->func_cis_ptr[func]));
- }
-
- sd->func_cis_ptr[0] = sd->com_cis_ptr;
- sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __func__,
- sd->com_cis_ptr));
-
- /* Enable Function 1 */
- sdio_claim_host(gInstance->func[1]);
- err_ret = sdio_enable_func(gInstance->func[1]);
- sdio_release_host(gInstance->func[1]);
- if (err_ret) {
- sd_err(("brcmf_sdioh_enablefuncs: Failed to enable F1 "
- "Err: 0x%08x\n", err_ret));
- }
-
- return false;
-}
-
-/*
- * Public entry points & extern's
- */
-struct sdioh_info *brcmf_sdioh_attach(void *bar0, uint irq)
-{
- struct sdioh_info *sd;
- int err_ret;
-
- sd_trace(("%s\n", __func__));
-
- if (gInstance == NULL) {
- sd_err(("%s: SDIO Device not present\n", __func__));
- return NULL;
- }
-
- sd = kzalloc(sizeof(struct sdioh_info), GFP_ATOMIC);
- if (sd == NULL) {
- sd_err(("sdioh_attach: out of memory\n"));
- return NULL;
- }
- if (brcmf_sdioh_osinit(sd) != 0) {
- sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __func__));
- kfree(sd);
- return NULL;
- }
-
- sd->num_funcs = 2;
- sd->use_client_ints = true;
- sd->client_block_size[0] = 64;
-
- gInstance->sd = sd;
-
- /* Claim host controller */
- sdio_claim_host(gInstance->func[1]);
-
- sd->client_block_size[1] = 64;
- err_ret = sdio_set_block_size(gInstance->func[1], 64);
- if (err_ret)
- sd_err(("brcmf_sdioh_attach: Failed to set F1 blocksize\n"));
-
- /* Release host controller F1 */
- sdio_release_host(gInstance->func[1]);
-
- if (gInstance->func[2]) {
- /* Claim host controller F2 */
- sdio_claim_host(gInstance->func[2]);
-
- sd->client_block_size[2] = sd_f2_blocksize;
- err_ret =
- sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
- if (err_ret)
- sd_err(("brcmf_sdioh_attach: Failed to set F2 blocksize"
- " to %d\n", sd_f2_blocksize));
-
- /* Release host controller F2 */
- sdio_release_host(gInstance->func[2]);
- }
-
- brcmf_sdioh_enablefuncs(sd);
-
- sd_trace(("%s: Done\n", __func__));
- return sd;
-}
-
-extern int brcmf_sdioh_detach(struct sdioh_info *sd)
-{
- sd_trace(("%s\n", __func__));
-
- if (sd) {
-
- /* Disable Function 2 */
- sdio_claim_host(gInstance->func[2]);
- sdio_disable_func(gInstance->func[2]);
- sdio_release_host(gInstance->func[2]);
-
- /* Disable Function 1 */
- sdio_claim_host(gInstance->func[1]);
- sdio_disable_func(gInstance->func[1]);
- sdio_release_host(gInstance->func[1]);
-
- /* deregister irq */
- brcmf_sdioh_osfree(sd);
-
- kfree(sd);
- }
- return 0;
-}
-
-/* Configure callback to client when we receive client interrupt */
-extern int
-brcmf_sdioh_interrupt_register(struct sdioh_info *sd, void (*fn)(void *),
- void *argh)
-{
- sd_trace(("%s: Entering\n", __func__));
- if (fn == NULL) {
- sd_err(("%s: interrupt handler is NULL, not registering\n",
- __func__));
- return -EINVAL;
- }
-
- sd->intr_handler = fn;
- sd->intr_handler_arg = argh;
- sd->intr_handler_valid = true;
-
- /* register and unmask irq */
- if (gInstance->func[2]) {
- sdio_claim_host(gInstance->func[2]);
- sdio_claim_irq(gInstance->func[2], brcmf_sdioh_irqhandler_f2);
- sdio_release_host(gInstance->func[2]);
- }
-
- if (gInstance->func[1]) {
- sdio_claim_host(gInstance->func[1]);
- sdio_claim_irq(gInstance->func[1], brcmf_sdioh_irqhandler);
- sdio_release_host(gInstance->func[1]);
- }
-
- return 0;
-}
-
-extern int brcmf_sdioh_interrupt_deregister(struct sdioh_info *sd)
-{
- sd_trace(("%s: Entering\n", __func__));
-
- if (gInstance->func[1]) {
- /* register and unmask irq */
- sdio_claim_host(gInstance->func[1]);
- sdio_release_irq(gInstance->func[1]);
- sdio_release_host(gInstance->func[1]);
- }
-
- if (gInstance->func[2]) {
- /* Claim host controller F2 */
- sdio_claim_host(gInstance->func[2]);
- sdio_release_irq(gInstance->func[2]);
- /* Release host controller F2 */
- sdio_release_host(gInstance->func[2]);
- }
-
- sd->intr_handler_valid = false;
- sd->intr_handler = NULL;
- sd->intr_handler_arg = NULL;
-
- return 0;
-}
-
-/* IOVar table */
-enum {
- IOV_MSGLEVEL = 1,
- IOV_BLOCKSIZE,
- IOV_USEINTS,
- IOV_NUMINTS,
- IOV_DEVREG,
- IOV_HCIREGS,
- IOV_RXCHAIN
-};
-
-const struct brcmu_iovar sdioh_iovars[] = {
- {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0},
- {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0},/* ((fn << 16) |
- size) */
- {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0},
- {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0},
- {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(struct brcmf_sdreg)}
- ,
- {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0}
- ,
- {NULL, 0, 0, 0, 0}
-};
-
-int
-brcmf_sdioh_iovar_op(struct sdioh_info *si, const char *name,
- void *params, int plen, void *arg, int len, bool set)
-{
- const struct brcmu_iovar *vi = NULL;
- int bcmerror = 0;
- int val_size;
- s32 int_val = 0;
- bool bool_val;
- u32 actionid;
-
- if (name == NULL || len <= 0)
- return -EINVAL;
-
- /* Set does not take qualifiers */
- if (set && (params || plen))
- return -EINVAL;
-
- /* Get must have return space;*/
- if (!set && !(arg && len))
- return -EINVAL;
-
- sd_trace(("%s: Enter (%s %s)\n", __func__, (set ? "set" : "get"),
- name));
-
- vi = brcmu_iovar_lookup(sdioh_iovars, name);
- if (vi == NULL) {
- bcmerror = -ENOTSUPP;
- goto exit;
- }
-
- bcmerror = brcmu_iovar_lencheck(vi, arg, len, set);
- if (bcmerror != 0)
- goto exit;
-
- /* Set up params so get and set can share the convenience variables */
- if (params == NULL) {
- params = arg;
- plen = len;
- }
-
- if (vi->type == IOVT_VOID)
- val_size = 0;
- else if (vi->type == IOVT_BUFFER)
- val_size = len;
- else
- val_size = sizeof(int);
-
- if (plen >= (int)sizeof(int_val))
- memcpy(&int_val, params, sizeof(int_val));
-
- bool_val = (int_val != 0) ? true : false;
-
- actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
- switch (actionid) {
- case IOV_GVAL(IOV_MSGLEVEL):
- int_val = (s32) sd_msglevel;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_MSGLEVEL):
- sd_msglevel = int_val;
- break;
-
- case IOV_GVAL(IOV_BLOCKSIZE):
- if ((u32) int_val > si->num_funcs) {
- bcmerror = -EINVAL;
- break;
- }
- int_val = (s32) si->client_block_size[int_val];
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_BLOCKSIZE):
- {
- uint func = ((u32) int_val >> 16);
- uint blksize = (u16) int_val;
- uint maxsize;
-
- if (func > si->num_funcs) {
- bcmerror = -EINVAL;
- break;
- }
-
- switch (func) {
- case 0:
- maxsize = 32;
- break;
- case 1:
- maxsize = BLOCK_SIZE_4318;
- break;
- case 2:
- maxsize = BLOCK_SIZE_4328;
- break;
- default:
- maxsize = 0;
- }
- if (blksize > maxsize) {
- bcmerror = -EINVAL;
- break;
- }
- if (!blksize)
- blksize = maxsize;
-
- /* Now set it */
- si->client_block_size[func] = blksize;
-
- break;
- }
-
- case IOV_GVAL(IOV_RXCHAIN):
- int_val = false;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_GVAL(IOV_USEINTS):
- int_val = (s32) si->use_client_ints;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_USEINTS):
- si->use_client_ints = (bool) int_val;
- if (si->use_client_ints)
- si->intmask |= CLIENT_INTR;
- else
- si->intmask &= ~CLIENT_INTR;
-
- break;
-
- case IOV_GVAL(IOV_NUMINTS):
- int_val = (s32) si->intrcount;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_GVAL(IOV_DEVREG):
- {
- struct brcmf_sdreg *sd_ptr =
- (struct brcmf_sdreg *) params;
- u8 data = 0;
-
- if (brcmf_sdioh_cfg_read
- (si, sd_ptr->func, sd_ptr->offset, &data)) {
- bcmerror = -EIO;
- break;
- }
-
- int_val = (int)data;
- memcpy(arg, &int_val, sizeof(int_val));
- break;
- }
-
- case IOV_SVAL(IOV_DEVREG):
- {
- struct brcmf_sdreg *sd_ptr =
- (struct brcmf_sdreg *) params;
- u8 data = (u8) sd_ptr->value;
-
- if (brcmf_sdioh_cfg_write
- (si, sd_ptr->func, sd_ptr->offset, &data)) {
- bcmerror = -EIO;
- break;
- }
- break;
- }
-
- default:
- bcmerror = -ENOTSUPP;
- break;
- }
-exit:
-
- return bcmerror;
-}
-
-extern int
-brcmf_sdioh_cfg_read(struct sdioh_info *sd, uint fnc_num, u32 addr, u8 *data)
-{
- int status;
- /* No lock needed since brcmf_sdioh_request_byte does locking */
- status = brcmf_sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
- return status;
-}
-
-extern int
-brcmf_sdioh_cfg_write(struct sdioh_info *sd, uint fnc_num, u32 addr, u8 *data)
-{
- /* No lock needed since brcmf_sdioh_request_byte does locking */
- int status;
- status = brcmf_sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
- return status;
-}
-
-static int brcmf_sdioh_get_cisaddr(struct sdioh_info *sd, u32 regaddr)
-{
- /* read 24 bits and return valid 17 bit addr */
- int i;
- u32 scratch, regdata;
- u8 *ptr = (u8 *)&scratch;
- for (i = 0; i < 3; i++) {
- if ((brcmf_sdioh_card_regread(sd, 0, regaddr, 1, &regdata)) !=
- SUCCESS)
- sd_err(("%s: Can't read!\n", __func__));
-
- *ptr++ = (u8) regdata;
- regaddr++;
- }
-
- /* Only the lower 17-bits are valid */
- scratch = le32_to_cpu(scratch);
- scratch &= 0x0001FFFF;
- return scratch;
-}
-
-extern int
-brcmf_sdioh_cis_read(struct sdioh_info *sd, uint func, u8 *cisd, u32 length)
-{
- u32 count;
- int offset;
- u32 foo;
- u8 *cis = cisd;
-
- sd_trace(("%s: Func = %d\n", __func__, func));
-
- if (!sd->func_cis_ptr[func]) {
- memset(cis, 0, length);
- sd_err(("%s: no func_cis_ptr[%d]\n", __func__, func));
- return -ENOTSUPP;
- }
-
- sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __func__, func,
- sd->func_cis_ptr[func]));
-
- for (count = 0; count < length; count++) {
- offset = sd->func_cis_ptr[func] + count;
- if (brcmf_sdioh_card_regread(sd, 0, offset, 1, &foo) < 0) {
- sd_err(("%s: regread failed: Can't read CIS\n",
- __func__));
- return -EIO;
- }
-
- *cis = (u8) (foo & 0xff);
- cis++;
- }
-
- return 0;
-}
-
-extern int
-brcmf_sdioh_request_byte(struct sdioh_info *sd, uint rw, uint func,
- uint regaddr, u8 *byte)
-{
- int err_ret;
-
- sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __func__, rw, func,
- regaddr));
-
- BRCMF_PM_RESUME_WAIT(sdioh_request_byte_wait);
- BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
- if (rw) { /* CMD52 Write */
- if (func == 0) {
- /* Can only directly write to some F0 registers.
- * Handle F2 enable
- * as a special case.
- */
- if (regaddr == SDIO_CCCR_IOEx) {
- if (gInstance->func[2]) {
- sdio_claim_host(gInstance->func[2]);
- if (*byte & SDIO_FUNC_ENABLE_2) {
- /* Enable Function 2 */
- err_ret =
- sdio_enable_func
- (gInstance->func[2]);
- if (err_ret)
- sd_err(("request_byte: "
- "enable F2 "
- "failed:%d\n",
- err_ret));
- } else {
- /* Disable Function 2 */
- err_ret =
- sdio_disable_func
- (gInstance->func[2]);
- if (err_ret)
- sd_err(("request_byte: "
- "Disab F2 "
- "failed:%d\n",
- err_ret));
- }
- sdio_release_host(gInstance->func[2]);
- }
- }
- /* to allow abort command through F1 */
- else if (regaddr == SDIO_CCCR_ABORT) {
- sdio_claim_host(gInstance->func[func]);
- /*
- * this sdio_f0_writeb() can be replaced
- * with another api
- * depending upon MMC driver change.
- * As of this time, this is temporaray one
- */
- sdio_writeb(gInstance->func[func], *byte,
- regaddr, &err_ret);
- sdio_release_host(gInstance->func[func]);
- } else if (regaddr < 0xF0) {
- sd_err(("brcmf: F0 Wr:0x%02x: write "
- "disallowed\n", regaddr));
- } else {
- /* Claim host controller, perform F0 write,
- and release */
- sdio_claim_host(gInstance->func[func]);
- sdio_f0_writeb(gInstance->func[func], *byte,
- regaddr, &err_ret);
- sdio_release_host(gInstance->func[func]);
- }
- } else {
- /* Claim host controller, perform Fn write,
- and release */
- sdio_claim_host(gInstance->func[func]);
- sdio_writeb(gInstance->func[func], *byte, regaddr,
- &err_ret);
- sdio_release_host(gInstance->func[func]);
- }
- } else { /* CMD52 Read */
- /* Claim host controller, perform Fn read, and release */
- sdio_claim_host(gInstance->func[func]);
-
- if (func == 0) {
- *byte =
- sdio_f0_readb(gInstance->func[func], regaddr,
- &err_ret);
- } else {
- *byte =
- sdio_readb(gInstance->func[func], regaddr,
- &err_ret);
- }
-
- sdio_release_host(gInstance->func[func]);
- }
-
- if (err_ret)
- sd_err(("brcmf: Failed to %s byte F%d:@0x%05x=%02x, "
- "Err: %d\n", rw ? "Write" : "Read", func, regaddr,
- *byte, err_ret));
-
- return err_ret;
-}
-
-extern int
-brcmf_sdioh_request_word(struct sdioh_info *sd, uint cmd_type, uint rw,
- uint func, uint addr, u32 *word, uint nbytes)
-{
- int err_ret = -EIO;
-
- if (func == 0) {
- sd_err(("%s: Only CMD52 allowed to F0.\n", __func__));
- return -EINVAL;
- }
-
- sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
- __func__, cmd_type, rw, func, addr, nbytes));
-
- BRCMF_PM_RESUME_WAIT(sdioh_request_word_wait);
- BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
- /* Claim host controller */
- sdio_claim_host(gInstance->func[func]);
-
- if (rw) { /* CMD52 Write */
- if (nbytes == 4) {
- sdio_writel(gInstance->func[func], *word, addr,
- &err_ret);
- } else if (nbytes == 2) {
- sdio_writew(gInstance->func[func], (*word & 0xFFFF),
- addr, &err_ret);
- } else {
- sd_err(("%s: Invalid nbytes: %d\n", __func__, nbytes));
- }
- } else { /* CMD52 Read */
- if (nbytes == 4) {
- *word =
- sdio_readl(gInstance->func[func], addr, &err_ret);
- } else if (nbytes == 2) {
- *word =
- sdio_readw(gInstance->func[func], addr,
- &err_ret) & 0xFFFF;
- } else {
- sd_err(("%s: Invalid nbytes: %d\n", __func__, nbytes));
- }
- }
-
- /* Release host controller */
- sdio_release_host(gInstance->func[func]);
-
- if (err_ret) {
- sd_err(("brcmf: Failed to %s word, Err: 0x%08x\n",
- rw ? "Write" : "Read", err_ret));
- }
-
- return err_ret;
-}
-
-static int
-brcmf_sdioh_request_packet(struct sdioh_info *sd, uint fix_inc, uint write,
- uint func, uint addr, struct sk_buff *pkt)
-{
- bool fifo = (fix_inc == SDIOH_DATA_FIX);
- u32 SGCount = 0;
- int err_ret = 0;
-
- struct sk_buff *pnext;
-
- sd_trace(("%s: Enter\n", __func__));
-
- BRCMF_PM_RESUME_WAIT(sdioh_request_packet_wait);
- BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
-
- /* Claim host controller */
- sdio_claim_host(gInstance->func[func]);
- for (pnext = pkt; pnext; pnext = pnext->next) {
- uint pkt_len = pnext->len;
- pkt_len += 3;
- pkt_len &= 0xFFFFFFFC;
-
- if ((write) && (!fifo)) {
- err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (write) {
- err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (fifo) {
- err_ret = sdio_readsb(gInstance->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- } else {
- err_ret = sdio_memcpy_fromio(gInstance->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- }
-
- if (err_ret) {
- sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d,"
- "ERR=0x%08x\n", __func__,
- (write) ? "TX" : "RX",
- pnext, SGCount, addr, pkt_len, err_ret));
- } else {
- sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- __func__,
- (write) ? "TX" : "RX",
- pnext, SGCount, addr, pkt_len));
- }
-
- if (!fifo)
- addr += pkt_len;
- SGCount++;
-
- }
-
- /* Release host controller */
- sdio_release_host(gInstance->func[func]);
-
- sd_trace(("%s: Exit\n", __func__));
- return err_ret;
-}
-
-/*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case, it is copied to a new
- * aligned packet.
- *
- */
-extern int
-brcmf_sdioh_request_buffer(struct sdioh_info *sd, uint pio_dma, uint fix_inc,
- uint write, uint func, uint addr, uint reg_width,
- uint buflen_u, u8 *buffer, struct sk_buff *pkt)
-{
- int Status;
- struct sk_buff *mypkt = NULL;
-
- sd_trace(("%s: Enter\n", __func__));
-
- BRCMF_PM_RESUME_WAIT(sdioh_request_buffer_wait);
- BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
- /* Case 1: we don't have a packet. */
- if (pkt == NULL) {
- sd_data(("%s: Creating new %s Packet, len=%d\n",
- __func__, write ? "TX" : "RX", buflen_u));
- mypkt = brcmu_pkt_buf_get_skb(buflen_u);
- if (!mypkt) {
- sd_err(("%s: brcmu_pkt_buf_get_skb failed: len %d\n",
- __func__, buflen_u));
- return -EIO;
- }
-
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, buffer, buflen_u);
-
- Status = brcmf_sdioh_request_packet(sd, fix_inc, write, func,
- addr, mypkt);
-
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(buffer, mypkt->data, buflen_u);
-
- brcmu_pkt_buf_free_skb(mypkt);
- } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
- /*
- * Case 2: We have a packet, but it is unaligned.
- * In this case, we cannot have a chain (pkt->next == NULL)
- */
- sd_data(("%s: Creating aligned %s Packet, len=%d\n",
- __func__, write ? "TX" : "RX", pkt->len));
- mypkt = brcmu_pkt_buf_get_skb(pkt->len);
- if (!mypkt) {
- sd_err(("%s: brcmu_pkt_buf_get_skb failed: len %d\n",
- __func__, pkt->len));
- return -EIO;
- }
-
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, pkt->data, pkt->len);
-
- Status = brcmf_sdioh_request_packet(sd, fix_inc, write, func,
- addr, mypkt);
-
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(pkt->data, mypkt->data, mypkt->len);
-
- brcmu_pkt_buf_free_skb(mypkt);
- } else { /* case 3: We have a packet and
- it is aligned. */
- sd_data(("%s: Aligned %s Packet, direct DMA\n",
- __func__, write ? "Tx" : "Rx"));
- Status = brcmf_sdioh_request_packet(sd, fix_inc, write, func,
- addr, pkt);
- }
-
- return Status;
-}
-
-/* this function performs "abort" for both of host & device */
-extern int brcmf_sdioh_abort(struct sdioh_info *sd, uint func)
-{
- char t_func = (char)func;
- sd_trace(("%s: Enter\n", __func__));
-
- /* issue abort cmd52 command through F0 */
- brcmf_sdioh_request_byte(sd, SDIOH_WRITE, SDIO_FUNC_0, SDIO_CCCR_ABORT,
- &t_func);
-
- sd_trace(("%s: Exit\n", __func__));
- return 0;
-}
-
-/* Disable device interrupt */
-void brcmf_sdioh_dev_intr_off(struct sdioh_info *sd)
-{
- sd_trace(("%s: %d\n", __func__, sd->use_client_ints));
- sd->intmask &= ~CLIENT_INTR;
-}
-
-/* Enable device interrupt */
-void brcmf_sdioh_dev_intr_on(struct sdioh_info *sd)
-{
- sd_trace(("%s: %d\n", __func__, sd->use_client_ints));
- sd->intmask |= CLIENT_INTR;
-}
-
-/* Read client card reg */
-int
-brcmf_sdioh_card_regread(struct sdioh_info *sd, int func, u32 regaddr,
- int regsize, u32 *data)
-{
-
- if ((func == 0) || (regsize == 1)) {
- u8 temp = 0;
-
- brcmf_sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
- *data = temp;
- *data &= 0xff;
- sd_data(("%s: byte read data=0x%02x\n", __func__, *data));
- } else {
- brcmf_sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data,
- regsize);
- if (regsize == 2)
- *data &= 0xffff;
-
- sd_data(("%s: word read data=0x%08x\n", __func__, *data));
- }
-
- return SUCCESS;
-}
-
-static void brcmf_sdioh_irqhandler(struct sdio_func *func)
-{
- struct sdioh_info *sd;
-
- sd_trace(("brcmf: ***IRQHandler\n"));
- sd = gInstance->sd;
-
- sdio_release_host(gInstance->func[0]);
-
- if (sd->use_client_ints) {
- sd->intrcount++;
- (sd->intr_handler) (sd->intr_handler_arg);
- } else {
- sd_err(("brcmf: ***IRQHandler\n"));
-
- sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
- __func__, sd->client_intr_enabled, sd->intr_handler));
- }
-
- sdio_claim_host(gInstance->func[0]);
-}
-
-/* interrupt handler for F2 (dummy handler) */
-static void brcmf_sdioh_irqhandler_f2(struct sdio_func *func)
-{
- struct sdioh_info *sd;
-
- sd_trace(("brcmf: ***IRQHandlerF2\n"));
-
- sd = gInstance->sd;
-}
-
-static int brcmf_ops_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int ret = 0;
- static struct sdio_func sdio_func_0;
- sd_trace(("sdio_probe: %s Enter\n", __func__));
- sd_trace(("sdio_probe: func->class=%x\n", func->class));
- sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
- sd_trace(("sdio_device: 0x%04x\n", func->device));
- sd_trace(("Function#: 0x%04x\n", func->num));
-
- if (func->num == 1) {
- sdio_func_0.num = 0;
- sdio_func_0.card = func->card;
- gInstance->func[0] = &sdio_func_0;
- if (func->device == 0x4) { /* 4318 */
- gInstance->func[2] = NULL;
- sd_trace(("NIC found, calling brcmf_sdio_probe...\n"));
- ret = brcmf_sdio_probe(&sdmmc_dev);
- }
- }
-
- gInstance->func[func->num] = func;
-
- if (func->num == 2) {
- brcmf_cfg80211_sdio_func(func);
- sd_trace(("F2 found, calling brcmf_sdio_probe...\n"));
- ret = brcmf_sdio_probe(&sdmmc_dev);
- }
-
- return ret;
-}
-
-static void brcmf_ops_sdio_remove(struct sdio_func *func)
-{
- sd_trace(("%s Enter\n", __func__));
- sd_info(("func->class=%x\n", func->class));
- sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
- sd_info(("sdio_device: 0x%04x\n", func->device));
- sd_info(("Function#: 0x%04x\n", func->num));
-
- if (func->num == 2) {
- sd_trace(("F2 found, calling brcmf_sdio_remove...\n"));
- brcmf_sdio_remove(&sdmmc_dev);
- }
-}
-
-
-#ifdef CONFIG_PM
-static int brcmf_sdio_suspend(struct device *dev)
-{
- mmc_pm_flag_t sdio_flags;
- int ret = 0;
-
- sd_trace(("%s\n", __func__));
-
- sdio_flags = sdio_get_host_pm_caps(gInstance->func[1]);
- if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- sd_err(("Host can't keep power while suspended\n"));
- return -EINVAL;
- }
-
- ret = sdio_set_host_pm_flags(gInstance->func[1], MMC_PM_KEEP_POWER);
- if (ret) {
- sd_err(("Failed to set pm_flags\n"));
- return ret;
- }
-
- brcmf_sdio_wdtmr_enable(false);
-
- return ret;
-}
-
-static int brcmf_sdio_resume(struct device *dev)
-{
- brcmf_sdio_wdtmr_enable(true);
- return 0;
-}
-#endif /* CONFIG_PM */
-
-int brcmf_sdioh_osinit(struct sdioh_info *sd)
-{
- struct sdos_info *sdos;
-
- sdos = kmalloc(sizeof(struct sdos_info), GFP_ATOMIC);
- sd->sdos_info = (void *)sdos;
- if (sdos == NULL)
- return -ENOMEM;
-
- sdos->sd = sd;
- spin_lock_init(&sdos->lock);
- return 0;
-}
-
-void brcmf_sdioh_osfree(struct sdioh_info *sd)
-{
- struct sdos_info *sdos;
-
- sdos = (struct sdos_info *)sd->sdos_info;
- kfree(sdos);
-}
-
-/* Interrupt enable/disable */
-int brcmf_sdioh_interrupt_set(struct sdioh_info *sd, bool enable)
-{
- unsigned long flags;
- struct sdos_info *sdos;
-
- sd_trace(("%s: %s\n", __func__, enable ? "Enabling" : "Disabling"));
-
- sdos = (struct sdos_info *)sd->sdos_info;
-
- if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
- sd_err(("%s: no handler registered, will not enable\n",
- __func__));
- return -EINVAL;
- }
-
- /* Ensure atomicity for enable/disable calls */
- spin_lock_irqsave(&sdos->lock, flags);
-
- sd->client_intr_enabled = enable;
- if (enable)
- brcmf_sdioh_dev_intr_on(sd);
- else
- brcmf_sdioh_dev_intr_off(sd);
-
- spin_unlock_irqrestore(&sdos->lock, flags);
-
- return 0;
-}
-
-/*
- * module init
-*/
-int brcmf_sdio_function_init(void)
-{
- int error = 0;
- sd_trace(("brcmf_sdio_function_init: %s Enter\n", __func__));
-
- gInstance = kzalloc(sizeof(struct brcmf_sdmmc_instance), GFP_KERNEL);
- if (!gInstance)
- return -ENOMEM;
-
- memset(&sdmmc_dev, 0, sizeof(sdmmc_dev));
- error = sdio_register_driver(&brcmf_sdmmc_driver);
-
- return error;
-}
-
-/*
- * module cleanup
-*/
-void brcmf_sdio_function_cleanup(void)
-{
- sd_trace(("%s Enter\n", __func__));
-
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-
- kfree(gInstance);
-}
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd.h b/drivers/staging/brcm80211/brcmfmac/dhd.h
deleted file mode 100644
index 82bf04df16d..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd.h
+++ /dev/null
@@ -1,904 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/****************
- * Common types *
- */
-
-#ifndef _BRCMF_H_
-#define _BRCMF_H_
-
-#define BRCMF_VERSION_STR "4.218.248.5"
-
-#define BRCMF_C_IOCTL_SMLEN 256 /* "small" ioctl buffer required */
-#define BRCMF_C_IOCTL_MEDLEN 1536 /* "med" ioctl buffer required */
-#define BRCMF_C_IOCTL_MAXLEN 8192
-
-#define BRCMF_C_UP 2
-#define BRCMF_C_SET_PROMISC 10
-#define BRCMF_C_GET_RATE 12
-#define BRCMF_C_GET_INFRA 19
-#define BRCMF_C_SET_INFRA 20
-#define BRCMF_C_GET_AUTH 21
-#define BRCMF_C_SET_AUTH 22
-#define BRCMF_C_GET_BSSID 23
-#define BRCMF_C_GET_SSID 25
-#define BRCMF_C_SET_SSID 26
-#define BRCMF_C_GET_CHANNEL 29
-#define BRCMF_C_GET_SRL 31
-#define BRCMF_C_GET_LRL 33
-#define BRCMF_C_GET_RADIO 37
-#define BRCMF_C_SET_RADIO 38
-#define BRCMF_C_GET_PHYTYPE 39
-#define BRCMF_C_SET_KEY 45
-#define BRCMF_C_SET_PASSIVE_SCAN 49
-#define BRCMF_C_SCAN 50
-#define BRCMF_C_SCAN_RESULTS 51
-#define BRCMF_C_DISASSOC 52
-#define BRCMF_C_REASSOC 53
-#define BRCMF_C_SET_ROAM_TRIGGER 55
-#define BRCMF_C_SET_ROAM_DELTA 57
-#define BRCMF_C_GET_DTIMPRD 77
-#define BRCMF_C_SET_COUNTRY 84
-#define BRCMF_C_GET_PM 85
-#define BRCMF_C_SET_PM 86
-#define BRCMF_C_GET_AP 117
-#define BRCMF_C_SET_AP 118
-#define BRCMF_C_GET_RSSI 127
-#define BRCMF_C_GET_WSEC 133
-#define BRCMF_C_SET_WSEC 134
-#define BRCMF_C_GET_PHY_NOISE 135
-#define BRCMF_C_GET_BSS_INFO 136
-#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
-#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
-#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
-#define BRCMF_C_GET_VALID_CHANNELS 217
-#define BRCMF_C_GET_KEY_PRIMARY 235
-#define BRCMF_C_SET_KEY_PRIMARY 236
-#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
-#define BRCMF_C_GET_VAR 262
-#define BRCMF_C_SET_VAR 263
-
-/* phy types (returned by WLC_GET_PHYTPE) */
-#define WLC_PHY_TYPE_A 0
-#define WLC_PHY_TYPE_B 1
-#define WLC_PHY_TYPE_G 2
-#define WLC_PHY_TYPE_N 4
-#define WLC_PHY_TYPE_LP 5
-#define WLC_PHY_TYPE_SSN 6
-#define WLC_PHY_TYPE_HT 7
-#define WLC_PHY_TYPE_LCN 8
-#define WLC_PHY_TYPE_NULL 0xf
-
-#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter, u)
-#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN \
- offsetof(struct brcmf_pkt_filter_pattern, mask_and_pattern)
-
-#define BRCMF_EVENTING_MASK_LEN 16
-
-#define TOE_TX_CSUM_OL 0x00000001
-#define TOE_RX_CSUM_OL 0x00000002
-
-/* maximum channels returned by the get valid channels iovar */
-#define WL_NUMCHANNELS 64
-
-#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */
-
-/* size of brcmf_scan_params not including variable length array */
-#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
-
-/* masks for channel and ssid count */
-#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
-#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
-
-#define BRCMF_SCAN_ACTION_START 1
-#define BRCMF_SCAN_ACTION_CONTINUE 2
-#define WL_SCAN_ACTION_ABORT 3
-
-#define BRCMF_ISCAN_REQ_VERSION 1
-
-/* brcmf_iscan_results status values */
-#define BRCMF_SCAN_RESULTS_SUCCESS 0
-#define BRCMF_SCAN_RESULTS_PARTIAL 1
-#define BRCMF_SCAN_RESULTS_PENDING 2
-#define BRCMF_SCAN_RESULTS_ABORTED 3
-#define BRCMF_SCAN_RESULTS_NO_MEM 4
-
-#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
-#define BRCMF_PRIMARY_KEY (1 << 1) /* primary (ie tx) key */
-#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
-#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
-#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
-
-/* For supporting multiple interfaces */
-#define BRCMF_MAX_IFS 16
-#define BRCMF_DEL_IF -0xe
-#define BRCMF_BAD_IF -0xf
-
-#define DOT11_BSSTYPE_ANY 2
-#define DOT11_MAX_DEFAULT_KEYS 4
-
-#define BRCMF_EVENT_MSG_LINK 0x01
-#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
-#define BRCMF_EVENT_MSG_GROUP 0x04
-
-struct brcmf_event_msg {
- u16 version;
- u16 flags;
- u32 event_type;
- u32 status;
- u32 reason;
- u32 auth_type;
- u32 datalen;
- u8 addr[ETH_ALEN];
- char ifname[IFNAMSIZ];
-} __packed;
-
-struct brcm_ethhdr {
- u16 subtype;
- u16 length;
- u8 version;
- u8 oui[3];
- u16 usr_subtype;
-} __packed;
-
-struct brcmf_event {
- struct ethhdr eth;
- struct brcm_ethhdr hdr;
- struct brcmf_event_msg msg;
-} __packed;
-
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
-#define BRCMF_E_SET_SSID 0
-#define BRCMF_E_JOIN 1
-#define BRCMF_E_START 2
-#define BRCMF_E_AUTH 3
-#define BRCMF_E_AUTH_IND 4
-#define BRCMF_E_DEAUTH 5
-#define BRCMF_E_DEAUTH_IND 6
-#define BRCMF_E_ASSOC 7
-#define BRCMF_E_ASSOC_IND 8
-#define BRCMF_E_REASSOC 9
-#define BRCMF_E_REASSOC_IND 10
-#define BRCMF_E_DISASSOC 11
-#define BRCMF_E_DISASSOC_IND 12
-#define BRCMF_E_QUIET_START 13
-#define BRCMF_E_QUIET_END 14
-#define BRCMF_E_BEACON_RX 15
-#define BRCMF_E_LINK 16
-#define BRCMF_E_MIC_ERROR 17
-#define BRCMF_E_NDIS_LINK 18
-#define BRCMF_E_ROAM 19
-#define BRCMF_E_TXFAIL 20
-#define BRCMF_E_PMKID_CACHE 21
-#define BRCMF_E_RETROGRADE_TSF 22
-#define BRCMF_E_PRUNE 23
-#define BRCMF_E_AUTOAUTH 24
-#define BRCMF_E_EAPOL_MSG 25
-#define BRCMF_E_SCAN_COMPLETE 26
-#define BRCMF_E_ADDTS_IND 27
-#define BRCMF_E_DELTS_IND 28
-#define BRCMF_E_BCNSENT_IND 29
-#define BRCMF_E_BCNRX_MSG 30
-#define BRCMF_E_BCNLOST_MSG 31
-#define BRCMF_E_ROAM_PREP 32
-#define BRCMF_E_PFN_NET_FOUND 33
-#define BRCMF_E_PFN_NET_LOST 34
-#define BRCMF_E_RESET_COMPLETE 35
-#define BRCMF_E_JOIN_START 36
-#define BRCMF_E_ROAM_START 37
-#define BRCMF_E_ASSOC_START 38
-#define BRCMF_E_IBSS_ASSOC 39
-#define BRCMF_E_RADIO 40
-#define BRCMF_E_PSM_WATCHDOG 41
-#define BRCMF_E_PROBREQ_MSG 44
-#define BRCMF_E_SCAN_CONFIRM_IND 45
-#define BRCMF_E_PSK_SUP 46
-#define BRCMF_E_COUNTRY_CODE_CHANGED 47
-#define BRCMF_E_EXCEEDED_MEDIUM_TIME 48
-#define BRCMF_E_ICV_ERROR 49
-#define BRCMF_E_UNICAST_DECODE_ERROR 50
-#define BRCMF_E_MULTICAST_DECODE_ERROR 51
-#define BRCMF_E_TRACE 52
-#define BRCMF_E_IF 54
-#define BRCMF_E_RSSI 56
-#define BRCMF_E_PFN_SCAN_COMPLETE 57
-#define BRCMF_E_EXTLOG_MSG 58
-#define BRCMF_E_ACTION_FRAME 59
-#define BRCMF_E_ACTION_FRAME_COMPLETE 60
-#define BRCMF_E_PRE_ASSOC_IND 61
-#define BRCMF_E_PRE_REASSOC_IND 62
-#define BRCMF_E_CHANNEL_ADOPTED 63
-#define BRCMF_E_AP_STARTED 64
-#define BRCMF_E_DFS_AP_STOP 65
-#define BRCMF_E_DFS_AP_RESUME 66
-#define BRCMF_E_RESERVED1 67
-#define BRCMF_E_RESERVED2 68
-#define BRCMF_E_ESCAN_RESULT 69
-#define BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
-#define BRCMF_E_DCS_REQUEST 73
-
-#define BRCMF_E_FIFO_CREDIT_MAP 74
-
-#define BRCMF_E_LAST 75
-
-#define BRCMF_E_STATUS_SUCCESS 0
-#define BRCMF_E_STATUS_FAIL 1
-#define BRCMF_E_STATUS_TIMEOUT 2
-#define BRCMF_E_STATUS_NO_NETWORKS 3
-#define BRCMF_E_STATUS_ABORT 4
-#define BRCMF_E_STATUS_NO_ACK 5
-#define BRCMF_E_STATUS_UNSOLICITED 6
-#define BRCMF_E_STATUS_ATTEMPT 7
-#define BRCMF_E_STATUS_PARTIAL 8
-#define BRCMF_E_STATUS_NEWSCAN 9
-#define BRCMF_E_STATUS_NEWASSOC 10
-#define BRCMF_E_STATUS_11HQUIET 11
-#define BRCMF_E_STATUS_SUPPRESS 12
-#define BRCMF_E_STATUS_NOCHANS 13
-#define BRCMF_E_STATUS_CS_ABORT 15
-#define BRCMF_E_STATUS_ERROR 16
-
-#define BRCMF_E_REASON_INITIAL_ASSOC 0
-#define BRCMF_E_REASON_LOW_RSSI 1
-#define BRCMF_E_REASON_DEAUTH 2
-#define BRCMF_E_REASON_DISASSOC 3
-#define BRCMF_E_REASON_BCNS_LOST 4
-#define BRCMF_E_REASON_MINTXRATE 9
-#define BRCMF_E_REASON_TXFAIL 10
-
-#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
-#define BRCMF_E_REASON_DIRECTED_ROAM 6
-#define BRCMF_E_REASON_TSPEC_REJECTED 7
-#define BRCMF_E_REASON_BETTER_AP 8
-
-#define BRCMF_E_PRUNE_ENCR_MISMATCH 1
-#define BRCMF_E_PRUNE_BCAST_BSSID 2
-#define BRCMF_E_PRUNE_MAC_DENY 3
-#define BRCMF_E_PRUNE_MAC_NA 4
-#define BRCMF_E_PRUNE_REG_PASSV 5
-#define BRCMF_E_PRUNE_SPCT_MGMT 6
-#define BRCMF_E_PRUNE_RADAR 7
-#define BRCMF_E_RSN_MISMATCH 8
-#define BRCMF_E_PRUNE_NO_COMMON_RATES 9
-#define BRCMF_E_PRUNE_BASIC_RATES 10
-#define BRCMF_E_PRUNE_CIPHER_NA 12
-#define BRCMF_E_PRUNE_KNOWN_STA 13
-#define BRCMF_E_PRUNE_WDS_PEER 15
-#define BRCMF_E_PRUNE_QBSS_LOAD 16
-#define BRCMF_E_PRUNE_HOME_AP 17
-
-#define BRCMF_E_SUP_OTHER 0
-#define BRCMF_E_SUP_DECRYPT_KEY_DATA 1
-#define BRCMF_E_SUP_BAD_UCAST_WEP128 2
-#define BRCMF_E_SUP_BAD_UCAST_WEP40 3
-#define BRCMF_E_SUP_UNSUP_KEY_LEN 4
-#define BRCMF_E_SUP_PW_KEY_CIPHER 5
-#define BRCMF_E_SUP_MSG3_TOO_MANY_IE 6
-#define BRCMF_E_SUP_MSG3_IE_MISMATCH 7
-#define BRCMF_E_SUP_NO_INSTALL_FLAG 8
-#define BRCMF_E_SUP_MSG3_NO_GTK 9
-#define BRCMF_E_SUP_GRP_KEY_CIPHER 10
-#define BRCMF_E_SUP_GRP_MSG1_NO_GTK 11
-#define BRCMF_E_SUP_GTK_DECRYPT_FAIL 12
-#define BRCMF_E_SUP_SEND_FAIL 13
-#define BRCMF_E_SUP_DEAUTH 14
-
-#define BRCMF_E_IF_ADD 1
-#define BRCMF_E_IF_DEL 2
-#define BRCMF_E_IF_CHANGE 3
-
-#define BRCMF_E_IF_ROLE_STA 0
-#define BRCMF_E_IF_ROLE_AP 1
-#define BRCMF_E_IF_ROLE_WDS 2
-
-#define BRCMF_E_LINK_BCN_LOSS 1
-#define BRCMF_E_LINK_DISASSOC 2
-#define BRCMF_E_LINK_ASSOC_REC 3
-#define BRCMF_E_LINK_BSSCFG_DIS 4
-
-/* The level of bus communication with the dongle */
-enum brcmf_bus_state {
- BRCMF_BUS_DOWN, /* Not ready for frame transfers */
- BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
- BRCMF_BUS_DATA /* Ready for frame transfers */
-};
-
-/* Pattern matching filter. Specifies an offset within received packets to
- * start matching, the pattern to match, the size of the pattern, and a bitmask
- * that indicates which bits within the pattern should be matched.
- */
-struct brcmf_pkt_filter_pattern {
- u32 offset; /* Offset within received packet to start pattern matching.
- * Offset '0' is the first byte of the ethernet header.
- */
- u32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
- u8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
- * at offset 0. Pattern immediately follows mask.
- */
-};
-
-/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
-struct brcmf_pkt_filter {
- u32 id; /* Unique filter id, specified by app. */
- u32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
- u32 negate_match; /* Negate the result of filter matches */
- union { /* Filter definitions */
- struct brcmf_pkt_filter_pattern pattern; /* Filter pattern */
- } u;
-};
-
-/* IOVAR "pkt_filter_enable" parameter. */
-struct brcmf_pkt_filter_enable {
- u32 id; /* Unique filter id */
- u32 enable; /* Enable/disable bool */
-};
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in struct brcmf_scan_results)
- */
-struct brcmf_bss_info {
- u32 version; /* version field */
- u32 length; /* byte length of data in this record,
- * starting at version and including IEs
- */
- u8 BSSID[ETH_ALEN];
- u16 beacon_period; /* units are Kusec */
- u16 capability; /* Capability information */
- u8 SSID_len;
- u8 SSID[32];
- struct {
- uint count; /* # rates in this set */
- u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- chanspec_t chanspec; /* chanspec for bss */
- u16 atim_window; /* units are Kusec */
- u8 dtim_period; /* DTIM period */
- s16 RSSI; /* receive signal strength (in dBm) */
- s8 phy_noise; /* noise (in dBm) */
-
- u8 n_cap; /* BSS is 802.11N Capable */
- u32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
- u8 ctl_ch; /* 802.11N BSS control channel number */
- u32 reserved32[1]; /* Reserved for expansion of BSS properties */
- u8 flags; /* flags */
- u8 reserved[3]; /* Reserved for expansion of BSS properties */
- u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
-
- u16 ie_offset; /* offset at which IEs start, from beginning */
- u32 ie_length; /* byte length of Information Elements */
- s16 SNR; /* average SNR of during frame reception */
- /* Add new fields here */
- /* variable length Information Elements */
-};
-
-struct brcmf_ssid {
- u32 SSID_len;
- unsigned char SSID[32];
-};
-
-struct brcmf_scan_params {
- struct brcmf_ssid ssid; /* default: {0, ""} */
- u8 bssid[ETH_ALEN]; /* default: bcast */
- s8 bss_type; /* default: any,
- * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
- */
- u8 scan_type; /* flags, 0 use default */
- s32 nprobes; /* -1 use default, number of probes per channel */
- s32 active_time; /* -1 use default, dwell time per channel for
- * active scanning
- */
- s32 passive_time; /* -1 use default, dwell time per channel
- * for passive scanning
- */
- s32 home_time; /* -1 use default, dwell time for the home channel
- * between channel scans
- */
- s32 channel_num; /* count of channels and ssids that follow
- *
- * low half is count of channels in
- * channel_list, 0 means default (use all
- * available channels)
- *
- * high half is entries in struct brcmf_ssid
- * array that follows channel_list, aligned for
- * s32 (4 bytes) meaning an odd channel count
- * implies a 2-byte pad between end of
- * channel_list and first ssid
- *
- * if ssid count is zero, single ssid in the
- * fixed parameter portion is assumed, otherwise
- * ssid in the fixed portion is ignored
- */
- u16 channel_list[1]; /* list of chanspecs */
-};
-
-/* incremental scan struct */
-struct brcmf_iscan_params {
- u32 version;
- u16 action;
- u16 scan_duration;
- struct brcmf_scan_params params;
-};
-
-/* 3 fields + size of brcmf_scan_params, not including variable length array */
-#define BRCMF_ISCAN_PARAMS_FIXED_SIZE \
- (offsetof(struct brcmf_iscan_params, params) + \
- sizeof(struct brcmf_ssid))
-
-struct brcmf_scan_results {
- u32 buflen;
- u32 version;
- u32 count;
- struct brcmf_bss_info bss_info[1];
-};
-
-/* used for association with a specific BSSID and chanspec list */
-struct brcmf_assoc_params {
- u8 bssid[ETH_ALEN]; /* 00:00:00:00:00:00: broadcast scan */
- s32 chanspec_num; /* 0: all available channels,
- * otherwise count of chanspecs in chanspec_list
- */
- chanspec_t chanspec_list[1]; /* list of chanspecs */
-};
-#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
- (sizeof(struct brcmf_assoc_params) - sizeof(chanspec_t))
-
-/* used for join with or without a specific bssid and channel list */
-struct brcmf_join_params {
- struct brcmf_ssid ssid;
- struct brcmf_assoc_params params;
-};
-
-/* size of brcmf_scan_results not including variable length array */
-#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
- (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
-
-/* incremental scan results struct */
-struct brcmf_iscan_results {
- u32 status;
- struct brcmf_scan_results results;
-};
-
-/* size of brcmf_iscan_results not including variable length array */
-#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
- (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
- offsetof(struct brcmf_iscan_results, results))
-
-struct brcmf_wsec_key {
- u32 index; /* key index */
- u32 len; /* key length */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- u32 pad_1[18];
- u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- u32 flags; /* misc flags */
- u32 pad_2[2];
- int pad_3;
- int iv_initialized; /* has IV been initialized already? */
- int pad_4;
- /* Rx IV */
- struct {
- u32 hi; /* upper 32 bits of IV */
- u16 lo; /* lower 16 bits of IV */
- } rxiv;
- u32 pad_5[2];
- u8 ea[ETH_ALEN]; /* per station */
-};
-
-/* Used to get specific STA parameters */
-struct brcmf_scb_val {
- u32 val;
- u8 ea[ETH_ALEN];
-};
-
-/* channel encoding */
-struct brcmf_channel_info {
- int hw_channel;
- int target_channel;
- int scan_channel;
-};
-
-/* Linux network driver ioctl encoding */
-struct brcmf_ioctl {
- uint cmd; /* common ioctl definition */
- void *buf; /* pointer to user buffer */
- uint len; /* length of user buffer */
- u8 set; /* get or set request (optional) */
- uint used; /* bytes read or written (optional) */
- uint needed; /* bytes needed (optional) */
-};
-
-/* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus; /* device bus info */
-struct brcmf_proto; /* device communication protocol info */
-struct brcmf_info; /* device driver info */
-
-/* Common structure for module and instance linkage */
-struct brcmf_pub {
- /* Linkage ponters */
- struct brcmf_bus *bus;
- struct brcmf_proto *prot;
- struct brcmf_info *info;
-
- /* Internal brcmf items */
- bool up; /* Driver up/down (to OS) */
- bool txoff; /* Transmit flow-controlled */
- bool dongle_reset; /* true = DEVRESET put dongle into reset */
- enum brcmf_bus_state busstate;
- uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint maxctl; /* Max size rxctl request from proto to bus */
- uint rxsz; /* Rx buffer size bus module should use */
- u8 wme_dp; /* wme discard priority */
-
- /* Dongle media info */
- bool iswl; /* Dongle-resident driver is wl */
- unsigned long drv_version; /* Version of dongle-resident driver */
- u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- struct dngl_stats dstats; /* Stats for dongle-based data */
-
- /* Additional stats for the bus level */
- unsigned long tx_packets; /* Data packets sent to dongle */
- unsigned long tx_multicast; /* Multicast data packets sent to dongle */
- unsigned long tx_errors; /* Errors in sending data to dongle */
- unsigned long tx_ctlpkts; /* Control packets sent to dongle */
- unsigned long tx_ctlerrs; /* Errors sending control frames to dongle */
- unsigned long rx_packets; /* Packets sent up the network interface */
- unsigned long rx_multicast; /* Multicast packets sent up the network
- interface */
- unsigned long rx_errors; /* Errors processing rx data packets */
- unsigned long rx_ctlpkts; /* Control frames processed from dongle */
- unsigned long rx_ctlerrs; /* Errors in processing rx control frames */
- unsigned long rx_dropped; /* Packets dropped locally (no memory) */
- unsigned long rx_flushed; /* Packets flushed due to
- unscheduled sendup thread */
- unsigned long wd_dpc_sched; /* Number of times dpc scheduled by
- watchdog timer */
-
- unsigned long rx_readahead_cnt; /* Number of packets where header read-ahead
- was used. */
- unsigned long tx_realloc; /* Number of tx packets we had to realloc for
- headroom */
- unsigned long fc_packets; /* Number of flow control pkts recvd */
-
- /* Last error return */
- int bcmerror;
- uint tickcnt;
-
- /* Last error from dongle */
- int dongle_error;
-
- /* Suspend disable flag flag */
- int suspend_disable_flag; /* "1" to disable all extra powersaving
- during suspend */
- int in_suspend; /* flag set to 1 when early suspend called */
- int dtim_skip; /* dtim skip , default 0 means wake each dtim */
-
- /* Pkt filter defination */
- char *pktfilter[100];
- int pktfilter_count;
-
- u8 country_code[BRCM_CNTRY_BUF_SZ];
- char eventmask[BRCMF_EVENTING_MASK_LEN];
-
-};
-
-struct brcmf_if_event {
- u8 ifidx;
- u8 action;
- u8 flags;
- u8 bssidx;
-};
-
-struct brcmf_timeout {
- u32 limit; /* Expiration time (usec) */
- u32 increment; /* Current expiration increment (usec) */
- u32 elapsed; /* Current elapsed time (usec) */
- u32 tick; /* O/S tick time (usec) */
-};
-
-struct bcmevent_name {
- uint event;
- const char *name;
-};
-
-#if defined(CONFIG_PM_SLEEP)
-extern atomic_t brcmf_mmc_suspend;
-#define BRCMF_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
-#define _BRCMF_PM_RESUME_WAIT(a, b) do { \
- int retry = 0; \
- while (atomic_read(&brcmf_mmc_suspend) && retry++ != b) { \
- wait_event_timeout(a, false, HZ/100); \
- } \
- } while (0)
-#define BRCMF_PM_RESUME_WAIT(a) _BRCMF_PM_RESUME_WAIT(a, 30)
-#define BRCMF_PM_RESUME_RETURN_ERROR(a) \
- do { if (atomic_read(&brcmf_mmc_suspend)) return a; } while (0)
-
-#define BRCMF_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
-#define BRCMF_SPINWAIT_SLEEP(a, exp, us) do { \
- uint countdown = (us) + 9999; \
- while ((exp) && (countdown >= 10000)) { \
- wait_event_timeout(a, false, HZ/100); \
- countdown -= 10000; \
- } \
- } while (0)
-
-#else
-
-#define BRCMF_PM_RESUME_WAIT_INIT(a)
-#define BRCMF_PM_RESUME_WAIT(a)
-#define BRCMF_PM_RESUME_RETURN_ERROR(a)
-
-#define BRCMF_SPINWAIT_SLEEP_INIT(a)
-#define BRCMF_SPINWAIT_SLEEP(a, exp, us) do { \
- uint countdown = (us) + 9; \
- while ((exp) && (countdown >= 10)) { \
- udelay(10); \
- countdown -= 10; \
- } \
- } while (0)
-
-#endif /* defined(CONFIG_PM_SLEEP) */
-
-/*
- * Insmod parameters for debug/test
- */
-
-/* Use interrupts */
-extern uint brcmf_intr;
-
-/* Use polling */
-extern uint brcmf_poll;
-
-/* ARP offload agent mode */
-extern uint brcmf_arp_mode;
-
-/* ARP offload enable */
-extern uint brcmf_arp_enable;
-
-/* Pkt filte enable control */
-extern uint brcmf_pkt_filter_enable;
-
-/* Pkt filter init setup */
-extern uint brcmf_pkt_filter_init;
-
-/* Pkt filter mode control */
-extern uint brcmf_master_mode;
-
-/* Roaming mode control */
-extern uint brcmf_roam;
-
-/* Roaming mode control */
-extern uint brcmf_radio_up;
-
-/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
-extern int brcmf_idletime;
-#define BRCMF_IDLETIME_TICKS 1
-
-/* SDIO Drive Strength */
-extern uint brcmf_sdiod_drive_strength;
-
-/* Override to force tx queueing all the time */
-extern uint brcmf_force_tx_queueing;
-
-#ifdef SDTEST
-/* Echo packet generator (SDIO), pkts/s */
-extern uint brcmf_pktgen;
-
-/* Echo packet len (0 => sawtooth, max 1800) */
-extern uint brcmf_pktgen_len;
-#define BRCMF_MAX_PKTGEN_LEN 1800
-#endif
-
-extern const struct bcmevent_name bcmevent_names[];
-extern const int bcmevent_names_size;
-
-
-static inline void MUTEX_LOCK_INIT(struct brcmf_pub *drvr)
-{
-}
-
-static inline void MUTEX_LOCK(struct brcmf_pub *drvr)
-{
-}
-
-static inline void MUTEX_UNLOCK(struct brcmf_pub *drvr)
-{
-}
-
-static inline void MUTEX_LOCK_SOFTAP_SET_INIT(struct brcmf_pub *drvr)
-{
-}
-
-static inline void MUTEX_LOCK_SOFTAP_SET(struct brcmf_pub *drvr)
-{
-}
-
-static inline void MUTEX_UNLOCK_SOFTAP_SET(struct brcmf_pub *drvr)
-{
-}
-
-static inline void MUTEX_LOCK_WL_SCAN_SET_INIT(void)
-{
-}
-
-static inline void MUTEX_LOCK_WL_SCAN_SET(void)
-{
-}
-
-static inline void MUTEX_UNLOCK_WL_SCAN_SET(void)
-{
-}
-
-/* Indication from bus module regarding presence/insertion of dongle.
- * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
- * Returned structure should have bus and prot pointers filled in.
- * bus_hdrlen specifies required headroom for bus module header.
- */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
- uint bus_hdrlen);
-extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
-extern int brcmf_netdev_wait_pend8021x(struct net_device *dev);
-
-/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct brcmf_pub *drvr);
-
-/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
-
-extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
- struct sk_buff *pkt, int prec);
-
-/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
- struct sk_buff *rxp, int numpkt);
-
-/* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-
-/* Notify tx completion */
-extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
-
-/* Query ioctl */
-extern int brcmf_proto_cdc_query_ioctl(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len);
-
-/* OS independent layer functions */
-extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
-extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
-extern int brcmf_os_ioctl_resp_wait(struct brcmf_pub *drvr, uint *condition,
- bool *pending);
-extern int brcmf_os_ioctl_resp_wake(struct brcmf_pub *drvr);
-extern unsigned int brcmf_os_get_ioctl_resp_timeout(void);
-extern void brcmf_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
-#ifdef BCMDBG
-extern int brcmf_write_to_file(struct brcmf_pub *drvr, u8 *buf, int size);
-#endif /* BCMDBG */
-
-extern void brcmf_timeout_start(struct brcmf_timeout *tmo, uint usec);
-extern int brcmf_timeout_expired(struct brcmf_timeout *tmo);
-
-extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
-extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
- void *pktdata, struct brcmf_event_msg *,
- void **data_ptr);
-
-extern void brcmf_c_init(void);
-
-extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, void *handle,
- char *name, u8 *mac_addr, u32 flags, u8 bssidx);
-extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
-
-/* Send packet to dongle via data channel */
-extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
- struct sk_buff *pkt);
-
-extern int brcmf_bus_devreset(struct brcmf_pub *drvr, u8 flag);
-extern int brcmf_bus_start(struct brcmf_pub *drvr);
-
-extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
-extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
- int enable, int master_mode);
-
-/* Linux network driver ioctl encoding */
-struct brcmf_c_ioctl {
- uint cmd; /* common ioctl definition */
- void *buf; /* pointer to user buffer */
- uint len; /* length of user buffer */
- bool set; /* get or set request (optional) */
- uint used; /* bytes read or written (optional) */
- uint needed; /* bytes needed (optional) */
- uint driver; /* to identify target driver */
-};
-
-/* per-driver magic numbers */
-#define BRCMF_IOCTL_MAGIC 0x00444944
-
-/* bump this number if you change the ioctl interface */
-#define BRCMF_IOCTL_VERSION 1
-#define BRCMF_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
-
-/* common ioctl definitions */
-#define BRCMF_GET_MAGIC 0
-#define BRCMF_GET_VERSION 1
-#define BRCMF_GET_VAR 2
-#define BRCMF_SET_VAR 3
-
-/* message levels */
-#define BRCMF_ERROR_VAL 0x0001
-#define BRCMF_TRACE_VAL 0x0002
-#define BRCMF_INFO_VAL 0x0004
-#define BRCMF_DATA_VAL 0x0008
-#define BRCMF_CTL_VAL 0x0010
-#define BRCMF_TIMER_VAL 0x0020
-#define BRCMF_HDRS_VAL 0x0040
-#define BRCMF_BYTES_VAL 0x0080
-#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0400
-#define BRCMF_EVENT_VAL 0x0800
-#define BRCMF_BTA_VAL 0x1000
-#define BRCMF_ISCAN_VAL 0x2000
-
-#ifdef SDTEST
-/* For pktgen iovar */
-struct brcmf_pktgen {
- uint version; /* To allow structure change tracking */
- uint freq; /* Max ticks between tx/rx attempts */
- uint count; /* Test packets to send/rcv each attempt */
- uint print; /* Print counts every <print> attempts */
- uint total; /* Total packets (or bursts) */
- uint minlen; /* Minimum length of packets to send */
- uint maxlen; /* Maximum length of packets to send */
- uint numsent; /* Count of test packets sent */
- uint numrcvd; /* Count of test packets received */
- uint numfail; /* Count of test send failures */
- uint mode; /* Test mode (type of test packets) */
- uint stop; /* Stop after this many tx failures */
-};
-
-/* Version in case structure changes */
-#define BRCMF_PKTGEN_VERSION 2
-
-/* Type of test packets to use */
-#define BRCMF_PKTGEN_ECHO 1 /* Send echo requests */
-#define BRCMF_PKTGEN_SEND 2 /* Send discard packets */
-#define BRCMF_PKTGEN_RXBURST 3 /* Request dongle send N packets */
-#define BRCMF_PKTGEN_RECV 4 /* Continuous rx from continuous
- tx dongle */
-#endif /* SDTEST */
-
-/* Enter idle immediately (no timeout) */
-#define BRCMF_IDLE_IMMEDIATE (-1)
-
-/* Values for idleclock iovar: other values are the sd_divisor to use
- when idle */
-#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
- when idle */
-
-#endif /* _BRCMF_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
deleted file mode 100644
index 653cf0daa0e..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMF_BUS_H_
-#define _BRCMF_BUS_H_
-
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#ifndef BRCMF_SDALIGN
-#define BRCMF_SDALIGN 32
-#endif
-#if !ISPOWEROF2(BRCMF_SDALIGN)
-#error BRCMF_SDALIGN is not a power of 2!
-#endif
-
-/*
- * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
- */
-
-/* dongle ram module parameter */
-extern int brcmf_dongle_memsize;
-
-/* Tx/Rx bounds module parameters */
-extern uint brcmf_txbound;
-extern uint brcmf_rxbound;
-
-/* Watchdog timer interval */
-extern uint brcmf_watchdog_ms;
-
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
-
-/* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus, bool enforce_mutex);
-
-/* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr, bool enforce_mutex);
-
-/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
-
-/* Send/receive a control message to/from the dongle.
- * Expects caller to enforce a single outstanding transaction.
- */
-extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
-
-extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
-
-/* Check for and handle local prot-specific iovar commands */
-extern int brcmf_sdbrcm_bus_iovar_op(struct brcmf_pub *drvr, const char *name,
- void *params, int plen, void *arg, int len,
- bool set);
-
-/* Add bus dump output to a buffer */
-extern void brcmf_sdbrcm_bus_dump(struct brcmf_pub *drvr,
- struct brcmu_strbuf *strbuf);
-
-/* Clear any bus counters */
-extern void brcmf_bus_clearcounts(struct brcmf_pub *drvr);
-
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
-
-#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
deleted file mode 100644
index 345acabe935..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <defs.h>
-
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-
-#include "dhd.h"
-#include "dhd_proto.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
-
-struct brcmf_proto_cdc_ioctl {
- u32 cmd; /* ioctl command value */
- u32 len; /* lower 16: output buflen;
- * upper 16: input buflen (excludes header) */
- u32 flags; /* flag defns given below */
- u32 status; /* status code returned from the device */
-};
-
-/* Max valid buffer size that can be sent to the dongle */
-#define CDC_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
-
-/* CDC flag definitions */
-#define CDCF_IOC_ERROR 0x01 /* 1=ioctl cmd failed */
-#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */
-#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */
-#define CDCF_IOC_IF_SHIFT 12
-#define CDCF_IOC_ID_MASK 0xFFFF0000 /* id an ioctl pairing */
-#define CDCF_IOC_ID_SHIFT 16 /* ID Mask shift bits */
-#define CDC_IOC_ID(flags) \
- (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
-#define CDC_SET_IF_IDX(hdr, idx) \
- ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | \
- ((idx) << CDCF_IOC_IF_SHIFT)))
-
-/*
- * BDC header - Broadcom specific extension of CDC.
- * Used on data packets to convey priority across USB.
- */
-#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 1 /* Protocol version */
-#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
-#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
-#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
-#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
-#define BDC_PRIORITY_MASK 0x7
-#define BDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
-#define BDC_FLAG2_IF_SHIFT 0
-
-#define BDC_GET_IF_IDX(hdr) \
- ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
-#define BDC_SET_IF_IDX(hdr, idx) \
- ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
- ((idx) << BDC_FLAG2_IF_SHIFT)))
-
-struct brcmf_proto_bdc_header {
- u8 flags;
- u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
- u8 flags2;
- u8 rssi;
-};
-
-
-#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
-#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
- * (amount of header tha might be added)
- * plus any space that might be needed
- * for alignment padding.
- */
-#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
- * round off at the end of buffer
- */
-
-struct brcmf_proto {
- u16 reqid;
- u8 pending;
- u32 lastcmd;
- u8 bus_header[BUS_HEADER_LEN];
- struct brcmf_proto_cdc_ioctl msg;
- unsigned char buf[BRCMF_C_IOCTL_MAXLEN + ROUND_UP_MARGIN];
-};
-
-static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
-{
- struct brcmf_proto *prot = drvr->prot;
- int len = le32_to_cpu(prot->msg.len) +
- sizeof(struct brcmf_proto_cdc_ioctl);
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* NOTE : cdc->msg.len holds the desired length of the buffer to be
- * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
- * is actually sent to the dongle
- */
- if (len > CDC_MAX_MSG_SIZE)
- len = CDC_MAX_MSG_SIZE;
-
- /* Send request */
- return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
- len);
-}
-
-static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
-{
- int ret;
- struct brcmf_proto *prot = drvr->prot;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- do {
- ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
- (unsigned char *)&prot->msg,
- len + sizeof(struct brcmf_proto_cdc_ioctl));
- if (ret < 0)
- break;
- } while (CDC_IOC_ID(le32_to_cpu(prot->msg.flags)) != id);
-
- return ret;
-}
-
-int
-brcmf_proto_cdc_query_ioctl(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
-{
- struct brcmf_proto *prot = drvr->prot;
- struct brcmf_proto_cdc_ioctl *msg = &prot->msg;
- void *info;
- int ret = 0, retries = 0;
- u32 id, flags = 0;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
- BRCMF_CTL(("%s: cmd %d len %d\n", __func__, cmd, len));
-
- /* Respond "bcmerror" and "bcmerrorstr" with local cache */
- if (cmd == BRCMF_C_GET_VAR && buf) {
- if (!strcmp((char *)buf, "bcmerrorstr")) {
- strncpy((char *)buf, "bcm_error",
- BCME_STRLEN);
- goto done;
- } else if (!strcmp((char *)buf, "bcmerror")) {
- *(int *)buf = drvr->dongle_error;
- goto done;
- }
- }
-
- memset(msg, 0, sizeof(struct brcmf_proto_cdc_ioctl));
-
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
- CDC_SET_IF_IDX(msg, ifidx);
- msg->flags = cpu_to_le32(msg->flags);
-
- if (buf)
- memcpy(prot->buf, buf, len);
-
- ret = brcmf_proto_cdc_msg(drvr);
- if (ret < 0) {
- BRCMF_ERROR(("brcmf_proto_cdc_query_ioctl: brcmf_proto_cdc_msg "
- "failed w/status %d\n", ret));
- goto done;
- }
-
-retry:
- /* wait for interrupt and get first fragment */
- ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
- if (ret < 0)
- goto done;
-
- flags = le32_to_cpu(msg->flags);
- id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
-
- if ((id < prot->reqid) && (++retries < RETRIES))
- goto retry;
- if (id != prot->reqid) {
- BRCMF_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(drvr, ifidx), __func__, id,
- prot->reqid));
- ret = -EINVAL;
- goto done;
- }
-
- /* Check info buffer */
- info = (void *)&msg[1];
-
- /* Copy info buffer */
- if (buf) {
- if (ret < (int)len)
- len = ret;
- memcpy(buf, info, len);
- }
-
- /* Check the ERROR flag */
- if (flags & CDCF_IOC_ERROR) {
- ret = le32_to_cpu(msg->status);
- /* Cache error from dongle */
- drvr->dongle_error = ret;
- }
-
-done:
- return ret;
-}
-
-int brcmf_proto_cdc_set_ioctl(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
-{
- struct brcmf_proto *prot = drvr->prot;
- struct brcmf_proto_cdc_ioctl *msg = &prot->msg;
- int ret = 0;
- u32 flags, id;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
- BRCMF_CTL(("%s: cmd %d len %d\n", __func__, cmd, len));
-
- memset(msg, 0, sizeof(struct brcmf_proto_cdc_ioctl));
-
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT) | CDCF_IOC_SET;
- CDC_SET_IF_IDX(msg, ifidx);
- msg->flags = cpu_to_le32(msg->flags);
-
- if (buf)
- memcpy(prot->buf, buf, len);
-
- ret = brcmf_proto_cdc_msg(drvr);
- if (ret < 0)
- goto done;
-
- ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
- if (ret < 0)
- goto done;
-
- flags = le32_to_cpu(msg->flags);
- id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
-
- if (id != prot->reqid) {
- BRCMF_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(drvr, ifidx), __func__, id,
- prot->reqid));
- ret = -EINVAL;
- goto done;
- }
-
- /* Check the ERROR flag */
- if (flags & CDCF_IOC_ERROR) {
- ret = le32_to_cpu(msg->status);
- /* Cache error from dongle */
- drvr->dongle_error = ret;
- }
-
-done:
- return ret;
-}
-
-int
-brcmf_proto_ioctl(struct brcmf_pub *drvr, int ifidx, struct brcmf_ioctl *ioc,
- void *buf, int len)
-{
- struct brcmf_proto *prot = drvr->prot;
- int ret = -1;
-
- if (drvr->busstate == BRCMF_BUS_DOWN) {
- BRCMF_ERROR(("%s : bus is down. we have nothing to do\n",
- __func__));
- return ret;
- }
- brcmf_os_proto_block(drvr);
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (len > BRCMF_C_IOCTL_MAXLEN)
- goto done;
-
- if (prot->pending == true) {
- BRCMF_TRACE(("CDC packet is pending!!!! cmd=0x%x (%lu) "
- "lastcmd=0x%x (%lu)\n",
- ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
- (unsigned long)prot->lastcmd));
- if ((ioc->cmd == BRCMF_C_SET_VAR) ||
- (ioc->cmd == BRCMF_C_GET_VAR))
- BRCMF_TRACE(("iovar cmd=%s\n", (char *)buf));
-
- goto done;
- }
-
- prot->pending = true;
- prot->lastcmd = ioc->cmd;
- if (ioc->set)
- ret = brcmf_proto_cdc_set_ioctl(drvr, ifidx, ioc->cmd,
- buf, len);
- else {
- ret = brcmf_proto_cdc_query_ioctl(drvr, ifidx, ioc->cmd,
- buf, len);
- if (ret > 0)
- ioc->used = ret - sizeof(struct brcmf_proto_cdc_ioctl);
- }
-
- /* Too many programs assume ioctl() returns 0 on success */
- if (ret >= 0)
- ret = 0;
- else {
- struct brcmf_proto_cdc_ioctl *msg = &prot->msg;
- /* len == needed when set/query fails from dongle */
- ioc->needed = le32_to_cpu(msg->len);
- }
-
- /* Intercept the wme_dp ioctl here */
- if (!ret && ioc->cmd == BRCMF_C_SET_VAR &&
- !strcmp(buf, "wme_dp")) {
- int slen, val = 0;
-
- slen = strlen("wme_dp") + 1;
- if (len >= (int)(slen + sizeof(int)))
- memcpy(&val, (char *)buf + slen, sizeof(int));
- drvr->wme_dp = (u8) le32_to_cpu(val);
- }
-
- prot->pending = false;
-
-done:
- brcmf_os_proto_unblock(drvr);
-
- return ret;
-}
-
-#define PKTSUMNEEDED(skb) \
- (((struct sk_buff *)(skb))->ip_summed == CHECKSUM_PARTIAL)
-#define PKTSETSUMGOOD(skb, x) \
- (((struct sk_buff *)(skb))->ip_summed = \
- ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
-
-void brcmf_proto_dump(struct brcmf_pub *drvr, struct brcmu_strbuf *strbuf)
-{
- brcmu_bprintf(strbuf, "Protocol CDC: reqid %d\n", drvr->prot->reqid);
-}
-
-void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
- struct sk_buff *pktbuf)
-{
- struct brcmf_proto_bdc_header *h;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Push BDC header used to convey priority for buses that don't */
-
- skb_push(pktbuf, BDC_HEADER_LEN);
-
- h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
- h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
- if (PKTSUMNEEDED(pktbuf))
- h->flags |= BDC_FLAG_SUM_NEEDED;
-
- h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
- h->flags2 = 0;
- h->rssi = 0;
- BDC_SET_IF_IDX(h, ifidx);
-}
-
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
- struct sk_buff *pktbuf)
-{
- struct brcmf_proto_bdc_header *h;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Pop BDC header used to convey priority for buses that don't */
-
- if (pktbuf->len < BDC_HEADER_LEN) {
- BRCMF_ERROR(("%s: rx data too short (%d < %d)\n", __func__,
- pktbuf->len, BDC_HEADER_LEN));
- return -EBADE;
- }
-
- h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
- *ifidx = BDC_GET_IF_IDX(h);
- if (*ifidx >= BRCMF_MAX_IFS) {
- BRCMF_ERROR(("%s: rx data ifnum out of range (%d)\n",
- __func__, *ifidx));
- return -EBADE;
- }
-
- if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
- BDC_PROTO_VER) {
- BRCMF_ERROR(("%s: non-BDC packet received, flags 0x%x\n",
- brcmf_ifname(drvr, *ifidx), h->flags));
- return -EBADE;
- }
-
- if (h->flags & BDC_FLAG_SUM_GOOD) {
- BRCMF_INFO(("%s: BDC packet received with good rx-csum, "
- "flags 0x%x\n",
- brcmf_ifname(drvr, *ifidx), h->flags));
- PKTSETSUMGOOD(pktbuf, true);
- }
-
- pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
-
- skb_pull(pktbuf, BDC_HEADER_LEN);
-
- return 0;
-}
-
-int brcmf_proto_attach(struct brcmf_pub *drvr)
-{
- struct brcmf_proto *cdc;
-
- cdc = kzalloc(sizeof(struct brcmf_proto), GFP_ATOMIC);
- if (!cdc) {
- BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
- goto fail;
- }
-
- /* ensure that the msg buf directly follows the cdc msg struct */
- if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
- BRCMF_ERROR(("struct brcmf_proto is not correctly defined\n"));
- goto fail;
- }
-
- drvr->prot = cdc;
- drvr->hdrlen += BDC_HEADER_LEN;
- drvr->maxctl = BRCMF_C_IOCTL_MAXLEN +
- sizeof(struct brcmf_proto_cdc_ioctl) + ROUND_UP_MARGIN;
- return 0;
-
-fail:
- kfree(cdc);
- return -ENOMEM;
-}
-
-/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
-void brcmf_proto_detach(struct brcmf_pub *drvr)
-{
- kfree(drvr->prot);
- drvr->prot = NULL;
-}
-
-void brcmf_proto_dstats(struct brcmf_pub *drvr)
-{
- /* No stats from dongle added yet, copy bus stats */
- drvr->dstats.tx_packets = drvr->tx_packets;
- drvr->dstats.tx_errors = drvr->tx_errors;
- drvr->dstats.rx_packets = drvr->rx_packets;
- drvr->dstats.rx_errors = drvr->rx_errors;
- drvr->dstats.rx_dropped = drvr->rx_dropped;
- drvr->dstats.multicast = drvr->rx_multicast;
- return;
-}
-
-int brcmf_proto_init(struct brcmf_pub *drvr)
-{
- int ret = 0;
- char buf[128];
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- brcmf_os_proto_block(drvr);
-
- /* Get the device MAC address */
- strcpy(buf, "cur_etheraddr");
- ret = brcmf_proto_cdc_query_ioctl(drvr, 0, BRCMF_C_GET_VAR,
- buf, sizeof(buf));
- if (ret < 0) {
- brcmf_os_proto_unblock(drvr);
- return ret;
- }
- memcpy(drvr->mac, buf, ETH_ALEN);
-
- brcmf_os_proto_unblock(drvr);
-
- ret = brcmf_c_preinit_ioctls(drvr);
-
- /* Always assumes wl for now */
- drvr->iswl = true;
-
- return ret;
-}
-
-void brcmf_proto_stop(struct brcmf_pub *drvr)
-{
- /* Nothing to do for CDC */
-}
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_common.c b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
deleted file mode 100644
index fdec4683c42..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_common.c
+++ /dev/null
@@ -1,1196 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <asm/unaligned.h>
-#include <defs.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_proto.h"
-#include "dhd_dbg.h"
-
-#define BRCM_OUI "\x00\x10\x18"
-#define DOT11_OUI_LEN 3
-#define BCMILCP_BCM_SUBTYPE_EVENT 1
-#define PKTFILTER_BUF_SIZE 2048
-
-int brcmf_msg_level;
-
-#define MSGTRACE_VERSION 1
-
-#ifdef BCMDBG
-const char brcmf_version[] =
-"Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on " __DATE__
-" at " __TIME__;
-#else
-const char brcmf_version[] = "Dongle Host Driver, version " BRCMF_VERSION_STR;
-#endif
-
-/* IOVar table */
-enum {
- IOV_VERSION = 1,
- IOV_MSGLEVEL,
- IOV_BCMERRORSTR,
- IOV_BCMERROR,
- IOV_DUMP,
- IOV_CLEARCOUNTS,
- IOV_LOGDUMP,
- IOV_LOGCAL,
- IOV_LOGSTAMP,
- IOV_GPIOOB,
- IOV_IOCTLTIMEOUT,
- IOV_LAST
-};
-
-const struct brcmu_iovar brcmf_iovars[] = {
- {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(brcmf_version)}
- ,
-#ifdef BCMDBG
- {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0}
- ,
-#endif /* BCMDBG */
- {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN}
- ,
- {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0}
- ,
- {"dump", IOV_DUMP, 0, IOVT_BUFFER, BRCMF_IOCTL_MAXLEN}
- ,
- {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0}
- ,
- {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0}
- ,
- {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0}
- ,
- {NULL, 0, 0, 0, 0}
-};
-
-/* Message trace header */
-struct msgtrace_hdr {
- u8 version;
- u8 spare;
- u16 len; /* Len of the trace */
- u32 seqnum; /* Sequence number of message. Useful
- * if the messsage has been lost
- * because of DMA error or a bus reset
- * (ex: SDIO Func2)
- */
- u32 discarded_bytes; /* Number of discarded bytes because of
- trace overflow */
- u32 discarded_printf; /* Number of discarded printf
- because of trace overflow */
-} __packed;
-
-void brcmf_c_init(void)
-{
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behaviour since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
-static int brcmf_c_dump(struct brcmf_pub *drvr, char *buf, int buflen)
-{
- struct brcmu_strbuf b;
- struct brcmu_strbuf *strbuf = &b;
-
- brcmu_binit(strbuf, buf, buflen);
-
- /* Base info */
- brcmu_bprintf(strbuf, "%s\n", brcmf_version);
- brcmu_bprintf(strbuf, "\n");
- brcmu_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
- drvr->up, drvr->txoff, drvr->busstate);
- brcmu_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
- drvr->hdrlen, drvr->maxctl, drvr->rxsz);
- brcmu_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %pM\n",
- drvr->iswl, drvr->drv_version, &drvr->mac);
- brcmu_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", drvr->bcmerror,
- drvr->tickcnt);
-
- brcmu_bprintf(strbuf, "dongle stats:\n");
- brcmu_bprintf(strbuf,
- "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
- drvr->dstats.tx_packets, drvr->dstats.tx_bytes,
- drvr->dstats.tx_errors, drvr->dstats.tx_dropped);
- brcmu_bprintf(strbuf,
- "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
- drvr->dstats.rx_packets, drvr->dstats.rx_bytes,
- drvr->dstats.rx_errors, drvr->dstats.rx_dropped);
- brcmu_bprintf(strbuf, "multicast %ld\n", drvr->dstats.multicast);
-
- brcmu_bprintf(strbuf, "bus stats:\n");
- brcmu_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
- drvr->tx_packets, drvr->tx_multicast, drvr->tx_errors);
- brcmu_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
- drvr->tx_ctlpkts, drvr->tx_ctlerrs);
- brcmu_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld\n",
- drvr->rx_packets, drvr->rx_multicast, drvr->rx_errors);
- brcmu_bprintf(strbuf,
- "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld rx_flushed %ld\n",
- drvr->rx_ctlpkts, drvr->rx_ctlerrs, drvr->rx_dropped,
- drvr->rx_flushed);
- brcmu_bprintf(strbuf,
- "rx_readahead_cnt %ld tx_realloc %ld fc_packets %ld\n",
- drvr->rx_readahead_cnt, drvr->tx_realloc, drvr->fc_packets);
- brcmu_bprintf(strbuf, "wd_dpc_sched %ld\n", drvr->wd_dpc_sched);
- brcmu_bprintf(strbuf, "\n");
-
- /* Add any prot info */
- brcmf_proto_dump(drvr, strbuf);
- brcmu_bprintf(strbuf, "\n");
-
- /* Add any bus info */
- brcmf_sdbrcm_bus_dump(drvr, strbuf);
-
- return !strbuf->size ? -EOVERFLOW : 0;
-}
-
-static int
-brcmf_c_doiovar(struct brcmf_pub *drvr, const struct brcmu_iovar *vi,
- u32 actionid, const char *name, void *params, int plen,
- void *arg, int len, int val_size)
-{
- int bcmerror = 0;
- s32 int_val = 0;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- bcmerror = brcmu_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid));
- if (bcmerror != 0)
- goto exit;
-
- if (plen >= (int)sizeof(int_val))
- memcpy(&int_val, params, sizeof(int_val));
-
- switch (actionid) {
- case IOV_GVAL(IOV_VERSION):
- /* Need to have checked buffer length */
- strncpy((char *)arg, brcmf_version, len);
- break;
-
- case IOV_GVAL(IOV_MSGLEVEL):
- int_val = (s32) brcmf_msg_level;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_MSGLEVEL):
- brcmf_msg_level = int_val;
- break;
-
- case IOV_GVAL(IOV_BCMERRORSTR):
- strncpy((char *)arg, "bcm_error",
- BCME_STRLEN);
- ((char *)arg)[BCME_STRLEN - 1] = 0x00;
- break;
-
- case IOV_GVAL(IOV_BCMERROR):
- int_val = (s32) drvr->bcmerror;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_GVAL(IOV_DUMP):
- bcmerror = brcmf_c_dump(drvr, arg, len);
- break;
-
- case IOV_SVAL(IOV_CLEARCOUNTS):
- drvr->tx_packets = drvr->rx_packets = 0;
- drvr->tx_errors = drvr->rx_errors = 0;
- drvr->tx_ctlpkts = drvr->rx_ctlpkts = 0;
- drvr->tx_ctlerrs = drvr->rx_ctlerrs = 0;
- drvr->rx_dropped = 0;
- drvr->rx_readahead_cnt = 0;
- drvr->tx_realloc = 0;
- drvr->wd_dpc_sched = 0;
- memset(&drvr->dstats, 0, sizeof(drvr->dstats));
- brcmf_bus_clearcounts(drvr);
- break;
-
- case IOV_GVAL(IOV_IOCTLTIMEOUT):{
- int_val = (s32) brcmf_os_get_ioctl_resp_timeout();
- memcpy(arg, &int_val, sizeof(int_val));
- break;
- }
-
- case IOV_SVAL(IOV_IOCTLTIMEOUT):{
- if (int_val <= 0)
- bcmerror = -EINVAL;
- else
- brcmf_os_set_ioctl_resp_timeout((unsigned int)
- int_val);
- break;
- }
-
- default:
- bcmerror = -ENOTSUPP;
- break;
- }
-
-exit:
- return bcmerror;
-}
-
-bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
- struct sk_buff *pkt, int prec)
-{
- struct sk_buff *p;
- int eprec = -1; /* precedence to evict from */
- bool discard_oldest;
-
- /* Fast case, precedence queue is not full and we are also not
- * exceeding total queue length
- */
- if (!pktq_pfull(q, prec) && !pktq_full(q)) {
- brcmu_pktq_penq(q, prec, pkt);
- return true;
- }
-
- /* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(q, prec))
- eprec = prec;
- else if (pktq_full(q)) {
- p = brcmu_pktq_peek_tail(q, &eprec);
- if (eprec > prec)
- return false;
- }
-
- /* Evict if needed */
- if (eprec >= 0) {
- /* Detect queueing to unconfigured precedence */
- discard_oldest = AC_BITMAP_TST(drvr->wme_dp, eprec);
- if (eprec == prec && !discard_oldest)
- return false; /* refuse newer (incoming) packet */
- /* Evict packet according to discard policy */
- p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
- brcmu_pktq_pdeq_tail(q, eprec);
- if (p == NULL) {
- BRCMF_ERROR(("%s: brcmu_pktq_penq() failed, oldest %d.",
- __func__, discard_oldest));
- }
- brcmu_pkt_buf_free_skb(p);
- }
-
- /* Enqueue */
- p = brcmu_pktq_penq(q, prec, pkt);
- if (p == NULL) {
- BRCMF_ERROR(("%s: brcmu_pktq_penq() failed.", __func__));
- }
-
- return p != NULL;
-}
-
-static int
-brcmf_c_iovar_op(struct brcmf_pub *drvr, const char *name,
- void *params, int plen, void *arg, int len, bool set)
-{
- int bcmerror = 0;
- int val_size;
- const struct brcmu_iovar *vi = NULL;
- u32 actionid;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (name == NULL || len <= 0)
- return -EINVAL;
-
- /* Set does not take qualifiers */
- if (set && (params || plen))
- return -EINVAL;
-
- /* Get must have return space;*/
- if (!set && !(arg && len))
- return -EINVAL;
-
- vi = brcmu_iovar_lookup(brcmf_iovars, name);
- if (vi == NULL) {
- bcmerror = -ENOTSUPP;
- goto exit;
- }
-
- BRCMF_CTL(("%s: %s %s, len %d plen %d\n", __func__,
- name, (set ? "set" : "get"), len, plen));
-
- /* set up 'params' pointer in case this is a set command so that
- * the convenience int and bool code can be common to set and get
- */
- if (params == NULL) {
- params = arg;
- plen = len;
- }
-
- if (vi->type == IOVT_VOID)
- val_size = 0;
- else if (vi->type == IOVT_BUFFER)
- val_size = len;
- else
- /* all other types are integer sized */
- val_size = sizeof(int);
-
- actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
- bcmerror =
- brcmf_c_doiovar(drvr, vi, actionid, name, params, plen, arg, len,
- val_size);
-
-exit:
- return bcmerror;
-}
-
-int brcmf_c_ioctl(struct brcmf_pub *drvr, struct brcmf_c_ioctl *ioc, void *buf,
- uint buflen)
-{
- int bcmerror = 0;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (!buf)
- return -EINVAL;
-
- switch (ioc->cmd) {
- case BRCMF_GET_MAGIC:
- if (buflen < sizeof(int))
- bcmerror = -EOVERFLOW;
- else
- *(int *)buf = BRCMF_IOCTL_MAGIC;
- break;
-
- case BRCMF_GET_VERSION:
- if (buflen < sizeof(int))
- bcmerror = -EOVERFLOW;
- else
- *(int *)buf = BRCMF_IOCTL_VERSION;
- break;
-
- case BRCMF_GET_VAR:
- case BRCMF_SET_VAR:{
- char *arg;
- uint arglen;
-
- /* scan past the name to any arguments */
- for (arg = buf, arglen = buflen; *arg && arglen;
- arg++, arglen--)
- ;
-
- if (*arg) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- /* account for the NUL terminator */
- arg++, arglen--;
-
- /* call with the appropriate arguments */
- if (ioc->cmd == BRCMF_GET_VAR)
- bcmerror = brcmf_c_iovar_op(drvr, buf, arg,
- arglen, buf, buflen, IOV_GET);
- else
- bcmerror =
- brcmf_c_iovar_op(drvr, buf, NULL, 0, arg,
- arglen, IOV_SET);
- if (bcmerror != -ENOTSUPP)
- break;
-
- /* if still not found, try bus module */
- if (ioc->cmd == BRCMF_GET_VAR)
- bcmerror = brcmf_sdbrcm_bus_iovar_op(drvr,
- buf, arg, arglen, buf, buflen,
- IOV_GET);
- else
- bcmerror = brcmf_sdbrcm_bus_iovar_op(drvr,
- buf, NULL, 0, arg, arglen,
- IOV_SET);
-
- break;
- }
-
- default:
- bcmerror = -ENOTSUPP;
- }
-
- return bcmerror;
-}
-
-#ifdef SHOW_EVENTS
-static void
-brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
-{
- uint i, status, reason;
- bool group = false, flush_txq = false, link = false;
- char *auth_str, *event_name;
- unsigned char *buf;
- char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
- static struct {
- uint event;
- char *event_name;
- } event_names[] = {
- {
- BRCMF_E_SET_SSID, "SET_SSID"}, {
- BRCMF_E_JOIN, "JOIN"}, {
- BRCMF_E_START, "START"}, {
- BRCMF_E_AUTH, "AUTH"}, {
- BRCMF_E_AUTH_IND, "AUTH_IND"}, {
- BRCMF_E_DEAUTH, "DEAUTH"}, {
- BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, {
- BRCMF_E_ASSOC, "ASSOC"}, {
- BRCMF_E_ASSOC_IND, "ASSOC_IND"}, {
- BRCMF_E_REASSOC, "REASSOC"}, {
- BRCMF_E_REASSOC_IND, "REASSOC_IND"}, {
- BRCMF_E_DISASSOC, "DISASSOC"}, {
- BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, {
- BRCMF_E_QUIET_START, "START_QUIET"}, {
- BRCMF_E_QUIET_END, "END_QUIET"}, {
- BRCMF_E_BEACON_RX, "BEACON_RX"}, {
- BRCMF_E_LINK, "LINK"}, {
- BRCMF_E_MIC_ERROR, "MIC_ERROR"}, {
- BRCMF_E_NDIS_LINK, "NDIS_LINK"}, {
- BRCMF_E_ROAM, "ROAM"}, {
- BRCMF_E_TXFAIL, "TXFAIL"}, {
- BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, {
- BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
- BRCMF_E_PRUNE, "PRUNE"}, {
- BRCMF_E_AUTOAUTH, "AUTOAUTH"}, {
- BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, {
- BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
- BRCMF_E_ADDTS_IND, "ADDTS_IND"}, {
- BRCMF_E_DELTS_IND, "DELTS_IND"}, {
- BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, {
- BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, {
- BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
- BRCMF_E_ROAM_PREP, "ROAM_PREP"}, {
- BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
- BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
- BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
- BRCMF_E_JOIN_START, "JOIN_START"}, {
- BRCMF_E_ROAM_START, "ROAM_START"}, {
- BRCMF_E_ASSOC_START, "ASSOC_START"}, {
- BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
- BRCMF_E_RADIO, "RADIO"}, {
- BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
- BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
- BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
- BRCMF_E_PSK_SUP, "PSK_SUP"}, {
- BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
- BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
- BRCMF_E_ICV_ERROR, "ICV_ERROR"}, {
- BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
- BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
- BRCMF_E_TRACE, "TRACE"}, {
- BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, {
- BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
- BRCMF_E_IF, "IF"}, {
- BRCMF_E_RSSI, "RSSI"}, {
- BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
- };
- uint event_type, flags, auth_type, datalen;
- event_type = be32_to_cpu(event->event_type);
- flags = be16_to_cpu(event->flags);
- status = be32_to_cpu(event->status);
- reason = be32_to_cpu(event->reason);
- auth_type = be32_to_cpu(event->auth_type);
- datalen = be32_to_cpu(event->datalen);
- /* debug dump of event messages */
- sprintf(eabuf, "%pM", event->addr);
-
- event_name = "UNKNOWN";
- for (i = 0; i < ARRAY_SIZE(event_names); i++) {
- if (event_names[i].event == event_type)
- event_name = event_names[i].event_name;
- }
-
- BRCMF_EVENT(("EVENT: %s, event ID = %d\n", event_name, event_type));
- BRCMF_EVENT(("flags 0x%04x, status %d, reason %d, auth_type %d"
- " MAC %s\n", flags, status, reason, auth_type, eabuf));
-
- if (flags & BRCMF_EVENT_MSG_LINK)
- link = true;
- if (flags & BRCMF_EVENT_MSG_GROUP)
- group = true;
- if (flags & BRCMF_EVENT_MSG_FLUSHTXQ)
- flush_txq = true;
-
- switch (event_type) {
- case BRCMF_E_START:
- case BRCMF_E_DEAUTH:
- case BRCMF_E_DISASSOC:
- BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
- break;
-
- case BRCMF_E_ASSOC_IND:
- case BRCMF_E_REASSOC_IND:
- BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
- break;
-
- case BRCMF_E_ASSOC:
- case BRCMF_E_REASSOC:
- if (status == BRCMF_E_STATUS_SUCCESS) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n",
- event_name, eabuf));
- } else if (status == BRCMF_E_STATUS_TIMEOUT) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n",
- event_name, eabuf));
- } else if (status == BRCMF_E_STATUS_FAIL) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, FAILURE,"
- " reason %d\n", event_name, eabuf,
- (int)reason));
- } else {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, unexpected status "
- "%d\n", event_name, eabuf, (int)status));
- }
- break;
-
- case BRCMF_E_DEAUTH_IND:
- case BRCMF_E_DISASSOC_IND:
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name,
- eabuf, (int)reason));
- break;
-
- case BRCMF_E_AUTH:
- case BRCMF_E_AUTH_IND:
- if (auth_type == WLAN_AUTH_OPEN)
- auth_str = "Open System";
- else if (auth_type == WLAN_AUTH_SHARED_KEY)
- auth_str = "Shared Key";
- else {
- sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
- auth_str = err_msg;
- }
- if (event_type == BRCMF_E_AUTH_IND) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name,
- eabuf, auth_str));
- } else if (status == BRCMF_E_STATUS_SUCCESS) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
- event_name, eabuf, auth_str));
- } else if (status == BRCMF_E_STATUS_TIMEOUT) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
- event_name, eabuf, auth_str));
- } else if (status == BRCMF_E_STATUS_FAIL) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, "
- "reason %d\n",
- event_name, eabuf, auth_str, (int)reason));
- }
-
- break;
-
- case BRCMF_E_JOIN:
- case BRCMF_E_ROAM:
- case BRCMF_E_SET_SSID:
- if (status == BRCMF_E_STATUS_SUCCESS) {
- BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name,
- eabuf));
- } else if (status == BRCMF_E_STATUS_FAIL) {
- BRCMF_EVENT(("MACEVENT: %s, failed\n", event_name));
- } else if (status == BRCMF_E_STATUS_NO_NETWORKS) {
- BRCMF_EVENT(("MACEVENT: %s, no networks found\n",
- event_name));
- } else {
- BRCMF_EVENT(("MACEVENT: %s, unexpected status %d\n",
- event_name, (int)status));
- }
- break;
-
- case BRCMF_E_BEACON_RX:
- if (status == BRCMF_E_STATUS_SUCCESS) {
- BRCMF_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
- } else if (status == BRCMF_E_STATUS_FAIL) {
- BRCMF_EVENT(("MACEVENT: %s, FAIL\n", event_name));
- } else {
- BRCMF_EVENT(("MACEVENT: %s, status %d\n", event_name,
- status));
- }
- break;
-
- case BRCMF_E_LINK:
- BRCMF_EVENT(("MACEVENT: %s %s\n", event_name,
- link ? "UP" : "DOWN"));
- break;
-
- case BRCMF_E_MIC_ERROR:
- BRCMF_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
- event_name, eabuf, group, flush_txq));
- break;
-
- case BRCMF_E_ICV_ERROR:
- case BRCMF_E_UNICAST_DECODE_ERROR:
- case BRCMF_E_MULTICAST_DECODE_ERROR:
- BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
- break;
-
- case BRCMF_E_TXFAIL:
- BRCMF_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
- break;
-
- case BRCMF_E_SCAN_COMPLETE:
- case BRCMF_E_PMKID_CACHE:
- BRCMF_EVENT(("MACEVENT: %s\n", event_name));
- break;
-
- case BRCMF_E_PFN_NET_FOUND:
- case BRCMF_E_PFN_NET_LOST:
- case BRCMF_E_PFN_SCAN_COMPLETE:
- BRCMF_EVENT(("PNOEVENT: %s\n", event_name));
- break;
-
- case BRCMF_E_PSK_SUP:
- case BRCMF_E_PRUNE:
- BRCMF_EVENT(("MACEVENT: %s, status %d, reason %d\n",
- event_name, (int)status, (int)reason));
- break;
-
- case BRCMF_E_TRACE:
- {
- static u32 seqnum_prev;
- struct msgtrace_hdr hdr;
- u32 nblost;
- char *s, *p;
-
- buf = (unsigned char *) event_data;
- memcpy(&hdr, buf, sizeof(struct msgtrace_hdr));
-
- if (hdr.version != MSGTRACE_VERSION) {
- BRCMF_ERROR(
- ("\nMACEVENT: %s [unsupported version --> "
- "brcmf version:%d dongle version:%d]\n",
- event_name, MSGTRACE_VERSION, hdr.version)
- );
- /* Reset datalen to avoid display below */
- datalen = 0;
- break;
- }
-
- /* There are 2 bytes available at the end of data */
- *(buf + sizeof(struct msgtrace_hdr)
- + be16_to_cpu(hdr.len)) = '\0';
-
- if (be32_to_cpu(hdr.discarded_bytes)
- || be32_to_cpu(hdr.discarded_printf)) {
- BRCMF_ERROR(
- ("\nWLC_E_TRACE: [Discarded traces in dongle -->"
- "discarded_bytes %d discarded_printf %d]\n",
- be32_to_cpu(hdr.discarded_bytes),
- be32_to_cpu(hdr.discarded_printf)));
- }
-
- nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1;
- if (nblost > 0) {
- BRCMF_ERROR(
- ("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
- be32_to_cpu(hdr.seqnum), nblost));
- }
- seqnum_prev = be32_to_cpu(hdr.seqnum);
-
- /* Display the trace buffer. Advance from \n to \n to
- * avoid display big
- * printf (issue with Linux printk )
- */
- p = (char *)&buf[sizeof(struct msgtrace_hdr)];
- while ((s = strstr(p, "\n")) != NULL) {
- *s = '\0';
- printk(KERN_DEBUG"%s\n", p);
- p = s + 1;
- }
- printk(KERN_DEBUG "%s\n", p);
-
- /* Reset datalen to avoid display below */
- datalen = 0;
- }
- break;
-
- case BRCMF_E_RSSI:
- BRCMF_EVENT(("MACEVENT: %s %d\n", event_name,
- be32_to_cpu(*((int *)event_data))));
- break;
-
- default:
- BRCMF_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, "
- "auth %d\n", event_name, event_type, eabuf,
- (int)status, (int)reason, (int)auth_type));
- break;
- }
-
- /* show any appended data */
- if (datalen) {
- buf = (unsigned char *) event_data;
- BRCMF_EVENT((" data (%d) : ", datalen));
- for (i = 0; i < datalen; i++)
- BRCMF_EVENT((" 0x%02x ", *buf++));
- BRCMF_EVENT(("\n"));
- }
-}
-#endif /* SHOW_EVENTS */
-
-int
-brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
- struct brcmf_event_msg *event, void **data_ptr)
-{
- /* check whether packet is a BRCM event pkt */
- struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
- char *event_data;
- u32 type, status;
- u16 flags;
- int evlen;
-
- if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) {
- BRCMF_ERROR(("%s: mismatched OUI, bailing\n", __func__));
- return -EBADE;
- }
-
- /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
- if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) !=
- BCMILCP_BCM_SUBTYPE_EVENT) {
- BRCMF_ERROR(("%s: mismatched subtype, bailing\n", __func__));
- return -EBADE;
- }
-
- *data_ptr = &pvt_data[1];
- event_data = *data_ptr;
-
- /* memcpy since BRCM event pkt may be unaligned. */
- memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg));
-
- type = get_unaligned_be32(&event->event_type);
- flags = get_unaligned_be16(&event->flags);
- status = get_unaligned_be32(&event->status);
- evlen = get_unaligned_be32(&event->datalen) +
- sizeof(struct brcmf_event);
-
- switch (type) {
- case BRCMF_E_IF:
- {
- struct brcmf_if_event *ifevent =
- (struct brcmf_if_event *) event_data;
- BRCMF_TRACE(("%s: if event\n", __func__));
-
- if (ifevent->ifidx > 0 &&
- ifevent->ifidx < BRCMF_MAX_IFS) {
- if (ifevent->action == BRCMF_E_IF_ADD)
- brcmf_add_if(drvr_priv, ifevent->ifidx,
- NULL, event->ifname,
- pvt_data->eth.h_dest,
- ifevent->flags,
- ifevent->bssidx);
- else
- brcmf_del_if(drvr_priv, ifevent->ifidx);
- } else {
- BRCMF_ERROR(("%s: Invalid ifidx %d for %s\n",
- __func__, ifevent->ifidx,
- event->ifname));
- }
- }
- /* send up the if event: btamp user needs it */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
- break;
-
- /* These are what external supplicant/authenticator wants */
- case BRCMF_E_LINK:
- case BRCMF_E_ASSOC_IND:
- case BRCMF_E_REASSOC_IND:
- case BRCMF_E_DISASSOC_IND:
- case BRCMF_E_MIC_ERROR:
- default:
- /* Fall through: this should get _everything_ */
-
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
- BRCMF_TRACE(("%s: MAC event %d, flags %x, status %x\n",
- __func__, type, flags, status));
-
- /* put it back to BRCMF_E_NDIS_LINK */
- if (type == BRCMF_E_NDIS_LINK) {
- u32 temp;
-
- temp = get_unaligned_be32(&event->event_type);
- BRCMF_TRACE(("Converted to WLC_E_LINK type %d\n",
- temp));
-
- temp = be32_to_cpu(BRCMF_E_NDIS_LINK);
- memcpy((void *)(&pvt_data->msg.event_type), &temp,
- sizeof(pvt_data->msg.event_type));
- }
- break;
- }
-
-#ifdef SHOW_EVENTS
- brcmf_c_show_host_event(event, event_data);
-#endif /* SHOW_EVENTS */
-
- return 0;
-}
-
-/* Convert user's input in hex pattern to byte-size mask */
-static int brcmf_c_pattern_atoh(char *src, char *dst)
-{
- int i;
- if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- BRCMF_ERROR(("Mask invalid format. Needs to start with 0x\n"));
- return -1;
- }
- src = src + 2; /* Skip past 0x */
- if (strlen(src) % 2 != 0) {
- BRCMF_ERROR(("Mask invalid format. Length must be even.\n"));
- return -1;
- }
- for (i = 0; *src != '\0'; i++) {
- char num[3];
- strncpy(num, src, 2);
- num[2] = '\0';
- dst[i] = (u8) simple_strtoul(num, NULL, 16);
- src += 2;
- }
- return i;
-}
-
-void
-brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable,
- int master_mode)
-{
- char *argv[8];
- int i = 0;
- const char *str;
- int buf_len;
- int str_len;
- char *arg_save = 0, *arg_org = 0;
- int rc;
- char buf[128];
- struct brcmf_pkt_filter_enable enable_parm;
- struct brcmf_pkt_filter_enable *pkt_filterp;
-
- arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
- if (!arg_save) {
- BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
- goto fail;
- }
- arg_org = arg_save;
- memcpy(arg_save, arg, strlen(arg) + 1);
-
- argv[i] = strsep(&arg_save, " ");
-
- i = 0;
- if (NULL == argv[i]) {
- BRCMF_ERROR(("No args provided\n"));
- goto fail;
- }
-
- str = "pkt_filter_enable";
- str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[str_len] = '\0';
- buf_len = str_len + 1;
-
- pkt_filterp = (struct brcmf_pkt_filter_enable *) (buf + str_len + 1);
-
- /* Parse packet filter id. */
- enable_parm.id = simple_strtoul(argv[i], NULL, 0);
-
- /* Parse enable/disable value. */
- enable_parm.enable = enable;
-
- buf_len += sizeof(enable_parm);
- memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm));
-
- /* Enable/disable the specified filter. */
- rc = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
- rc = rc >= 0 ? 0 : rc;
- if (rc)
- BRCMF_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __func__, arg, rc));
- else
- BRCMF_TRACE(("%s: successfully added pktfilter %s\n",
- __func__, arg));
-
- /* Contorl the master mode */
- brcmu_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf,
- sizeof(buf));
- rc = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, buf,
- sizeof(buf));
- rc = rc >= 0 ? 0 : rc;
- if (rc)
- BRCMF_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __func__, arg, rc));
-
-fail:
- kfree(arg_org);
-}
-
-void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
-{
- const char *str;
- struct brcmf_pkt_filter pkt_filter;
- struct brcmf_pkt_filter *pkt_filterp;
- int buf_len;
- int str_len;
- int rc;
- u32 mask_size;
- u32 pattern_size;
- char *argv[8], *buf = 0;
- int i = 0;
- char *arg_save = 0, *arg_org = 0;
-
- arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
- if (!arg_save) {
- BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
- goto fail;
- }
-
- arg_org = arg_save;
-
- buf = kmalloc(PKTFILTER_BUF_SIZE, GFP_ATOMIC);
- if (!buf) {
- BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
- goto fail;
- }
-
- strcpy(arg_save, arg);
-
- argv[i] = strsep(&arg_save, " ");
- while (argv[i++])
- argv[i] = strsep(&arg_save, " ");
-
- i = 0;
- if (NULL == argv[i]) {
- BRCMF_ERROR(("No args provided\n"));
- goto fail;
- }
-
- str = "pkt_filter_add";
- strcpy(buf, str);
- str_len = strlen(str);
- buf_len = str_len + 1;
-
- pkt_filterp = (struct brcmf_pkt_filter *) (buf + str_len + 1);
-
- /* Parse packet filter id. */
- pkt_filter.id = simple_strtoul(argv[i], NULL, 0);
-
- if (NULL == argv[++i]) {
- BRCMF_ERROR(("Polarity not provided\n"));
- goto fail;
- }
-
- /* Parse filter polarity. */
- pkt_filter.negate_match = simple_strtoul(argv[i], NULL, 0);
-
- if (NULL == argv[++i]) {
- BRCMF_ERROR(("Filter type not provided\n"));
- goto fail;
- }
-
- /* Parse filter type. */
- pkt_filter.type = simple_strtoul(argv[i], NULL, 0);
-
- if (NULL == argv[++i]) {
- BRCMF_ERROR(("Offset not provided\n"));
- goto fail;
- }
-
- /* Parse pattern filter offset. */
- pkt_filter.u.pattern.offset = simple_strtoul(argv[i], NULL, 0);
-
- if (NULL == argv[++i]) {
- BRCMF_ERROR(("Bitmask not provided\n"));
- goto fail;
- }
-
- /* Parse pattern filter mask. */
- mask_size =
- brcmf_c_pattern_atoh
- (argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
-
- if (NULL == argv[++i]) {
- BRCMF_ERROR(("Pattern not provided\n"));
- goto fail;
- }
-
- /* Parse pattern filter pattern. */
- pattern_size =
- brcmf_c_pattern_atoh(argv[i],
- (char *)&pkt_filterp->u.pattern.
- mask_and_pattern[mask_size]);
-
- if (mask_size != pattern_size) {
- BRCMF_ERROR(("Mask and pattern not the same size\n"));
- goto fail;
- }
-
- pkt_filter.u.pattern.size_bytes = mask_size;
- buf_len += BRCMF_PKT_FILTER_FIXED_LEN;
- buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
-
- /* Keep-alive attributes are set in local
- * variable (keep_alive_pkt), and
- ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
- ** guarantee that the buffer is properly aligned.
- */
- memcpy((char *)pkt_filterp,
- &pkt_filter,
- BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
-
- rc = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
- rc = rc >= 0 ? 0 : rc;
-
- if (rc)
- BRCMF_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __func__, arg, rc));
- else
- BRCMF_TRACE(("%s: successfully added pktfilter %s\n",
- __func__, arg));
-
-fail:
- kfree(arg_org);
-
- kfree(buf);
-}
-
-void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
-{
- char iovbuf[32];
- int retcode;
-
- brcmu_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
- retcode = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- retcode = retcode >= 0 ? 0 : retcode;
- if (retcode)
- BRCMF_TRACE(("%s: failed to set ARP offload mode to 0x%x, "
- "retcode = %d\n", __func__, arp_mode, retcode));
- else
- BRCMF_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
- __func__, arp_mode));
-}
-
-void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
-{
- char iovbuf[32];
- int retcode;
-
- brcmu_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
- retcode = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- retcode = retcode >= 0 ? 0 : retcode;
- if (retcode)
- BRCMF_TRACE(("%s: failed to enabe ARP offload to %d, "
- "retcode = %d\n", __func__, arp_enable, retcode));
- else
- BRCMF_TRACE(("%s: successfully enabed ARP offload to %d\n",
- __func__, arp_enable));
-}
-
-int brcmf_c_preinit_ioctls(struct brcmf_pub *drvr)
-{
- char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
- "event_msgs" + '\0' + bitvec */
- uint up = 0;
- char buf[128], *ptr;
- uint power_mode = PM_FAST;
- u32 dongle_align = BRCMF_SDALIGN;
- u32 glom = 0;
- uint bcn_timeout = 3;
- int scan_assoc_time = 40;
- int scan_unassoc_time = 40;
- int i;
-
- brcmf_os_proto_block(drvr);
-
- /* Set Country code */
- if (drvr->country_code[0] != 0) {
- if (brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_COUNTRY,
- drvr->country_code,
- sizeof(drvr->country_code)) < 0) {
- BRCMF_ERROR(("%s: country code setting failed\n",
- __func__));
- }
- }
-
- /* query for 'ver' to get version info from firmware */
- memset(buf, 0, sizeof(buf));
- ptr = buf;
- brcmu_mkiovar("ver", 0, 0, buf, sizeof(buf));
- brcmf_proto_cdc_query_ioctl(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf));
- strsep(&ptr, "\n");
- /* Print fw version info */
- BRCMF_ERROR(("Firmware version = %s\n", buf));
-
- /* Set PowerSave mode */
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_PM, (char *)&power_mode,
- sizeof(power_mode));
-
- /* Match Host and Dongle rx alignment */
- brcmu_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* disable glom option per default */
- brcmu_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* Setup timeout if Beacons are lost and roam is off to report
- link down */
- brcmu_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* Enable/Disable build-in roaming to allowed ext supplicant to take
- of romaing */
- brcmu_mkiovar("roam_off", (char *)&brcmf_roam, 4,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* Force STA UP */
- if (brcmf_radio_up)
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_UP, (char *)&up,
- sizeof(up));
-
- /* Setup event_msgs */
- brcmu_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- (char *)&scan_assoc_time, sizeof(scan_assoc_time));
- brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- (char *)&scan_unassoc_time, sizeof(scan_unassoc_time));
-
- /* Set and enable ARP offload feature */
- if (brcmf_arp_enable)
- brcmf_c_arp_offload_set(drvr, brcmf_arp_mode);
- brcmf_c_arp_offload_enable(drvr, brcmf_arp_enable);
-
- /* Set up pkt filter */
- if (brcmf_pkt_filter_enable) {
- for (i = 0; i < drvr->pktfilter_count; i++) {
- brcmf_c_pktfilter_offload_set(drvr,
- drvr->pktfilter[i]);
- brcmf_c_pktfilter_offload_enable(drvr,
- drvr->pktfilter[i],
- brcmf_pkt_filter_init,
- brcmf_master_mode);
- }
- }
-
- brcmf_os_proto_unblock(drvr);
-
- return 0;
-}
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h b/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h
deleted file mode 100644
index 5be4d7a609c..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMF_DBG_H_
-#define _BRCMF_DBG_H_
-
-#if defined(BCMDBG)
-
-#define BRCMF_ERROR(args) \
- do {if ((brcmf_msg_level & BRCMF_ERROR_VAL) && (net_ratelimit())) \
- printk args; } while (0)
-#define BRCMF_TRACE(args) do {if (brcmf_msg_level & BRCMF_TRACE_VAL) \
- printk args; } while (0)
-#define BRCMF_INFO(args) do {if (brcmf_msg_level & BRCMF_INFO_VAL) \
- printk args; } while (0)
-#define BRCMF_DATA(args) do {if (brcmf_msg_level & BRCMF_DATA_VAL) \
- printk args; } while (0)
-#define BRCMF_CTL(args) do {if (brcmf_msg_level & BRCMF_CTL_VAL) \
- printk args; } while (0)
-#define BRCMF_TIMER(args) do {if (brcmf_msg_level & BRCMF_TIMER_VAL) \
- printk args; } while (0)
-#define BRCMF_INTR(args) do {if (brcmf_msg_level & BRCMF_INTR_VAL) \
- printk args; } while (0)
-#define BRCMF_GLOM(args) do {if (brcmf_msg_level & BRCMF_GLOM_VAL) \
- printk args; } while (0)
-#define BRCMF_EVENT(args) do {if (brcmf_msg_level & BRCMF_EVENT_VAL) \
- printk args; } while (0)
-
-#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
-#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL)
-#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
-#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
-#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
-
-#else /* (defined BCMDBG) || (defined BCMDBG) */
-
-#define BRCMF_ERROR(args) do {if (net_ratelimit()) printk args; } while (0)
-#define BRCMF_TRACE(args)
-#define BRCMF_INFO(args)
-#define BRCMF_DATA(args)
-#define BRCMF_CTL(args)
-#define BRCMF_TIMER(args)
-#define BRCMF_INTR(args)
-#define BRCMF_GLOM(args)
-#define BRCMF_EVENT(args)
-
-#define BRCMF_DATA_ON() 0
-#define BRCMF_CTL_ON() 0
-#define BRCMF_HDRS_ON() 0
-#define BRCMF_BYTES_ON() 0
-#define BRCMF_GLOM_ON() 0
-
-#endif /* defined(BCMDBG) */
-
-extern int brcmf_msg_level;
-
-#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
deleted file mode 100644
index 05dada98eb6..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ /dev/null
@@ -1,1736 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/random.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/hardirq.h>
-#include <net/cfg80211.h>
-#include <defs.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_proto.h"
-#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
-#include "bcmchip.h"
-
-#if defined(CONFIG_PM_SLEEP)
-#include <linux/suspend.h>
-atomic_t brcmf_mmc_suspend;
-#endif /* defined(CONFIG_PM_SLEEP) */
-
-MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-
-/* Interface control information */
-struct brcmf_if {
- struct brcmf_info *info; /* back pointer to brcmf_info */
- /* OS/stack specifics */
- struct net_device *net;
- struct net_device_stats stats;
- int idx; /* iface idx in dongle */
- int state; /* interface state */
- uint subunit; /* subunit */
- u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
- bool attached; /* Delayed attachment when unset */
- bool txflowcontrol; /* Per interface flow control indicator */
- char name[IFNAMSIZ]; /* linux interface name */
-};
-
-/* Local private structure (extension of pub) */
-struct brcmf_info {
- struct brcmf_pub pub;
-
- /* OS/stack specifics */
- struct brcmf_if *iflist[BRCMF_MAX_IFS];
-
- struct semaphore proto_sem;
- wait_queue_head_t ioctl_resp_wait;
-
- /* Thread to issue ioctl for multicast */
- struct task_struct *sysioc_tsk;
- struct semaphore sysioc_sem;
- bool set_multicast;
- bool set_macaddress;
- u8 macvalue[ETH_ALEN];
- atomic_t pend_8021x_cnt;
-};
-
-/* Error bits */
-module_param(brcmf_msg_level, int, 0);
-
-/* Spawn a thread for system ioctls (set mac, set mcast) */
-uint brcmf_sysioc = true;
-module_param(brcmf_sysioc, uint, 0);
-
-/* ARP offload agent mode : Enable ARP Host Auto-Reply
-and ARP Peer Auto-Reply */
-uint brcmf_arp_mode = 0xb;
-module_param(brcmf_arp_mode, uint, 0);
-
-/* ARP offload enable */
-uint brcmf_arp_enable = true;
-module_param(brcmf_arp_enable, uint, 0);
-
-/* Global Pkt filter enable control */
-uint brcmf_pkt_filter_enable = true;
-module_param(brcmf_pkt_filter_enable, uint, 0);
-
-/* Pkt filter init setup */
-uint brcmf_pkt_filter_init;
-module_param(brcmf_pkt_filter_init, uint, 0);
-
-/* Pkt filter mode control */
-uint brcmf_master_mode = true;
-module_param(brcmf_master_mode, uint, 0);
-
-module_param(brcmf_dongle_memsize, int, 0);
-
-/* Contorl fw roaming */
-uint brcmf_roam = 1;
-
-/* Control radio state */
-uint brcmf_radio_up = 1;
-
-/* Network inteface name */
-char iface_name[IFNAMSIZ] = "wlan";
-module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
-
-/* The following are specific to the SDIO dongle */
-
-/* IOCTL response timeout */
-int brcmf_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
-
-/* Idle timeout for backplane clock */
-int brcmf_idletime = BRCMF_IDLETIME_TICKS;
-module_param(brcmf_idletime, int, 0);
-
-/* Use polling */
-uint brcmf_poll;
-module_param(brcmf_poll, uint, 0);
-
-/* Use interrupts */
-uint brcmf_intr = true;
-module_param(brcmf_intr, uint, 0);
-
-/* SDIO Drive Strength (in milliamps) */
-uint brcmf_sdiod_drive_strength = 6;
-module_param(brcmf_sdiod_drive_strength, uint, 0);
-
-/* Tx/Rx bounds */
-module_param(brcmf_txbound, uint, 0);
-module_param(brcmf_rxbound, uint, 0);
-
-#ifdef SDTEST
-/* Echo packet generator (pkts/s) */
-uint brcmf_pktgen;
-module_param(brcmf_pktgen, uint, 0);
-
-/* Echo packet len (0 => sawtooth, max 2040) */
-uint brcmf_pktgen_len;
-module_param(brcmf_pktgen_len, uint, 0);
-#endif
-
-static int brcmf_toe_get(struct brcmf_info *drvr_priv, int idx, u32 *toe_ol);
-static int brcmf_toe_set(struct brcmf_info *drvr_priv, int idx, u32 toe_ol);
-static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
- struct brcmf_event_msg *event_ptr,
- void **data_ptr);
-
-/*
- * Generalized timeout mechanism. Uses spin sleep with exponential
- * back-off until
- * the sleep time reaches one jiffy, then switches over to task delay. Usage:
- *
- * brcmf_timeout_start(&tmo, usec);
- * while (!brcmf_timeout_expired(&tmo))
- * if (poll_something())
- * break;
- * if (brcmf_timeout_expired(&tmo))
- * fatal();
- */
-
-void brcmf_timeout_start(struct brcmf_timeout *tmo, uint usec)
-{
- tmo->limit = usec;
- tmo->increment = 0;
- tmo->elapsed = 0;
- tmo->tick = 1000000 / HZ;
-}
-
-int brcmf_timeout_expired(struct brcmf_timeout *tmo)
-{
- /* Does nothing the first call */
- if (tmo->increment == 0) {
- tmo->increment = 1;
- return 0;
- }
-
- if (tmo->elapsed >= tmo->limit)
- return 1;
-
- /* Add the delay that's about to take place */
- tmo->elapsed += tmo->increment;
-
- if (tmo->increment < tmo->tick) {
- udelay(tmo->increment);
- tmo->increment *= 2;
- if (tmo->increment > tmo->tick)
- tmo->increment = tmo->tick;
- } else {
- wait_queue_head_t delay_wait;
- DECLARE_WAITQUEUE(wait, current);
- int pending;
- init_waitqueue_head(&delay_wait);
- add_wait_queue(&delay_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- pending = signal_pending(current);
- remove_wait_queue(&delay_wait, &wait);
- set_current_state(TASK_RUNNING);
- if (pending)
- return 1; /* Interrupted */
- }
-
- return 0;
-}
-
-static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *net)
-{
- int i = 0;
-
- while (i < BRCMF_MAX_IFS) {
- if (drvr_priv->iflist[i] && (drvr_priv->iflist[i]->net == net))
- return i;
- i++;
- }
-
- return BRCMF_BAD_IF;
-}
-
-int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
-{
- int i = BRCMF_MAX_IFS;
-
- if (name == NULL || *name == '\0')
- return 0;
-
- while (--i > 0)
- if (drvr_priv->iflist[i]
- && !strncmp(drvr_priv->iflist[i]->name, name, IFNAMSIZ))
- break;
-
- BRCMF_TRACE(("%s: return idx %d for \"%s\"\n", __func__, i, name));
-
- return i; /* default - the primary interface */
-}
-
-char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
- BRCMF_ERROR(("%s: ifidx %d out of range\n", __func__, ifidx));
- return "<if_bad>";
- }
-
- if (drvr_priv->iflist[ifidx] == NULL) {
- BRCMF_ERROR(("%s: null i/f %d\n", __func__, ifidx));
- return "<if_null>";
- }
-
- if (drvr_priv->iflist[ifidx]->net)
- return drvr_priv->iflist[ifidx]->net->name;
-
- return "<if_none>";
-}
-
-static void _brcmf_set_multicast_list(struct brcmf_info *drvr_priv, int ifidx)
-{
- struct net_device *dev;
- struct netdev_hw_addr *ha;
- u32 allmulti, cnt;
-
- struct brcmf_ioctl ioc;
- char *buf, *bufp;
- uint buflen;
- int ret;
-
- dev = drvr_priv->iflist[ifidx]->net;
- cnt = netdev_mc_count(dev);
-
- /* Determine initial value of allmulti flag */
- allmulti = (dev->flags & IFF_ALLMULTI) ? true : false;
-
- /* Send down the multicast list first. */
-
- buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
- bufp = buf = kmalloc(buflen, GFP_ATOMIC);
- if (!bufp) {
- BRCMF_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), cnt));
- return;
- }
-
- strcpy(bufp, "mcast_list");
- bufp += strlen("mcast_list") + 1;
-
- cnt = cpu_to_le32(cnt);
- memcpy(bufp, &cnt, sizeof(cnt));
- bufp += sizeof(cnt);
-
- netdev_for_each_mc_addr(ha, dev) {
- if (!cnt)
- break;
- memcpy(bufp, ha->addr, ETH_ALEN);
- bufp += ETH_ALEN;
- cnt--;
- }
-
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = BRCMF_C_SET_VAR;
- ioc.buf = buf;
- ioc.len = buflen;
- ioc.set = true;
-
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- BRCMF_ERROR(("%s: set mcast_list failed, cnt %d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), cnt));
- allmulti = cnt ? true : allmulti;
- }
-
- kfree(buf);
-
- /* Now send the allmulti setting. This is based on the setting in the
- * net_device flags, but might be modified above to be turned on if we
- * were trying to set some addresses and dongle rejected it...
- */
-
- buflen = sizeof("allmulti") + sizeof(allmulti);
- buf = kmalloc(buflen, GFP_ATOMIC);
- if (!buf) {
- BRCMF_ERROR(("%s: out of memory for allmulti\n",
- brcmf_ifname(&drvr_priv->pub, ifidx)));
- return;
- }
- allmulti = cpu_to_le32(allmulti);
-
- if (!brcmu_mkiovar
- ("allmulti", (void *)&allmulti, sizeof(allmulti), buf, buflen)) {
- BRCMF_ERROR(("%s: mkiovar failed for allmulti, datalen %d "
- "buflen %u\n",
- brcmf_ifname(&drvr_priv->pub, ifidx),
- (int)sizeof(allmulti), buflen));
- kfree(buf);
- return;
- }
-
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = BRCMF_C_SET_VAR;
- ioc.buf = buf;
- ioc.len = buflen;
- ioc.set = true;
-
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- BRCMF_ERROR(("%s: set allmulti %d failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx),
- le32_to_cpu(allmulti)));
- }
-
- kfree(buf);
-
- /* Finally, pick up the PROMISC flag as well, like the NIC
- driver does */
-
- allmulti = (dev->flags & IFF_PROMISC) ? true : false;
- allmulti = cpu_to_le32(allmulti);
-
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = BRCMF_C_SET_PROMISC;
- ioc.buf = &allmulti;
- ioc.len = sizeof(allmulti);
- ioc.set = true;
-
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- BRCMF_ERROR(("%s: set promisc %d failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx),
- le32_to_cpu(allmulti)));
- }
-}
-
-static int _brcmf_set_mac_address(struct brcmf_info *drvr_priv, int ifidx, u8 *addr)
-{
- char buf[32];
- struct brcmf_ioctl ioc;
- int ret;
-
- BRCMF_TRACE(("%s enter\n", __func__));
- if (!brcmu_mkiovar
- ("cur_etheraddr", (char *)addr, ETH_ALEN, buf, 32)) {
- BRCMF_ERROR(("%s: mkiovar failed for cur_etheraddr\n",
- brcmf_ifname(&drvr_priv->pub, ifidx)));
- return -1;
- }
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = BRCMF_C_SET_VAR;
- ioc.buf = buf;
- ioc.len = 32;
- ioc.set = true;
-
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- BRCMF_ERROR(("%s: set cur_etheraddr failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx)));
- } else {
- memcpy(drvr_priv->iflist[ifidx]->net->dev_addr, addr, ETH_ALEN);
- }
-
- return ret;
-}
-
-#ifdef SOFTAP
-extern struct net_device *ap_net_dev;
-#endif
-
-/* Virtual interfaces only ((ifp && ifp->info && ifp->idx == true) */
-static void brcmf_op_if(struct brcmf_if *ifp)
-{
- struct brcmf_info *drvr_priv;
- int ret = 0, err = 0;
-
- drvr_priv = ifp->info;
-
- BRCMF_TRACE(("%s: idx %d, state %d\n", __func__, ifp->idx, ifp->state));
-
- switch (ifp->state) {
- case BRCMF_E_IF_ADD:
- /*
- * Delete the existing interface before overwriting it
- * in case we missed the BRCMF_E_IF_DEL event.
- */
- if (ifp->net != NULL) {
- BRCMF_ERROR(("%s: ERROR: netdev:%s already exists, "
- "try free & unregister\n",
- __func__, ifp->net->name));
- netif_stop_queue(ifp->net);
- unregister_netdev(ifp->net);
- free_netdev(ifp->net);
- }
- /* Allocate etherdev, including space for private structure */
- ifp->net = alloc_etherdev(sizeof(drvr_priv));
- if (!ifp->net) {
- BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
- ret = -ENOMEM;
- }
- if (ret == 0) {
- strcpy(ifp->net->name, ifp->name);
- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
- err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
- if (err != 0) {
- BRCMF_ERROR(("%s: brcmf_net_attach failed, "
- "err %d\n",
- __func__, err));
- ret = -EOPNOTSUPP;
- } else {
-#ifdef SOFTAP
- /* semaphore that the soft AP CODE
- waits on */
- extern struct semaphore ap_eth_sema;
-
- /* save ptr to wl0.1 netdev for use
- in wl_iw.c */
- ap_net_dev = ifp->net;
- /* signal to the SOFTAP 'sleeper' thread,
- wl0.1 is ready */
- up(&ap_eth_sema);
-#endif
- BRCMF_TRACE(("\n ==== pid:%x, net_device for "
- "if:%s created ===\n\n",
- current->pid, ifp->net->name));
- ifp->state = 0;
- }
- }
- break;
- case BRCMF_E_IF_DEL:
- if (ifp->net != NULL) {
- BRCMF_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n",
- __func__));
- netif_stop_queue(ifp->net);
- unregister_netdev(ifp->net);
- ret = BRCMF_DEL_IF; /* Make sure the free_netdev()
- is called */
- }
- break;
- default:
- BRCMF_ERROR(("%s: bad op %d\n", __func__, ifp->state));
- break;
- }
-
- if (ret < 0) {
- if (ifp->net)
- free_netdev(ifp->net);
-
- drvr_priv->iflist[ifp->idx] = NULL;
- kfree(ifp);
-#ifdef SOFTAP
- if (ifp->net == ap_net_dev)
- ap_net_dev = NULL; /* NULL SOFTAP global
- wl0.1 as well */
-#endif /* SOFTAP */
- }
-}
-
-static int _brcmf_sysioc_thread(void *data)
-{
- struct brcmf_info *drvr_priv = (struct brcmf_info *) data;
- int i;
-#ifdef SOFTAP
- bool in_ap = false;
-#endif
-
- allow_signal(SIGTERM);
-
- while (down_interruptible(&drvr_priv->sysioc_sem) == 0) {
- if (kthread_should_stop())
- break;
- for (i = 0; i < BRCMF_MAX_IFS; i++) {
- struct brcmf_if *ifentry = drvr_priv->iflist[i];
- if (ifentry) {
-#ifdef SOFTAP
- in_ap = (ap_net_dev != NULL);
-#endif /* SOFTAP */
- if (ifentry->state)
- brcmf_op_if(ifentry);
-#ifdef SOFTAP
- if (drvr_priv->iflist[i] == NULL) {
- BRCMF_TRACE(("\n\n %s: interface %d "
- "removed!\n", __func__,
- i));
- continue;
- }
-
- if (in_ap && drvr_priv->set_macaddress) {
- BRCMF_TRACE(("attempt to set MAC for"
- " %s in AP Mode,"
- " blocked.\n",
- ifentry->net->name));
- drvr_priv->set_macaddress = false;
- continue;
- }
-
- if (in_ap && drvr_priv->set_multicast) {
- BRCMF_TRACE(("attempt to set MULTICAST "
- "list for %s in AP Mode, "
- "blocked.\n",
- ifentry->net->name));
- drvr_priv->set_multicast = false;
- continue;
- }
-#endif /* SOFTAP */
- if (drvr_priv->set_multicast) {
- drvr_priv->set_multicast = false;
- _brcmf_set_multicast_list(drvr_priv, i);
- }
- if (drvr_priv->set_macaddress) {
- drvr_priv->set_macaddress = false;
- _brcmf_set_mac_address(drvr_priv, i,
- drvr_priv->macvalue);
- }
- }
- }
- }
- return 0;
-}
-
-static int brcmf_netdev_set_mac_address(struct net_device *dev, void *addr)
-{
- int ret = 0;
-
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(dev);
- struct sockaddr *sa = (struct sockaddr *)addr;
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, dev);
- if (ifidx == BRCMF_BAD_IF)
- return -1;
-
- memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
- drvr_priv->set_macaddress = true;
- up(&drvr_priv->sysioc_sem);
-
- return ret;
-}
-
-static void brcmf_netdev_set_multicast_list(struct net_device *dev)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(dev);
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, dev);
- if (ifidx == BRCMF_BAD_IF)
- return;
-
- drvr_priv->set_multicast = true;
- up(&drvr_priv->sysioc_sem);
-}
-
-int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- /* Reject if down */
- if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
- return -ENODEV;
-
- /* Update multicast statistic */
- if (pktbuf->len >= ETH_ALEN) {
- u8 *pktdata = (u8 *) (pktbuf->data);
- struct ethhdr *eh = (struct ethhdr *)pktdata;
-
- if (is_multicast_ether_addr(eh->h_dest))
- drvr->tx_multicast++;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr_priv->pend_8021x_cnt);
- }
-
- /* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
-
- /* Use bus module to send data frame */
- return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
-}
-
-static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *net)
-{
- int ret;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
- int ifidx;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Reject if down */
- if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
- BRCMF_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n",
- __func__, drvr_priv->pub.up,
- drvr_priv->pub.busstate));
- netif_stop_queue(net);
- return -ENODEV;
- }
-
- ifidx = brcmf_net2idx(drvr_priv, net);
- if (ifidx == BRCMF_BAD_IF) {
- BRCMF_ERROR(("%s: bad ifidx %d\n", __func__, ifidx));
- netif_stop_queue(net);
- return -ENODEV;
- }
-
- /* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
- struct sk_buff *skb2;
-
- BRCMF_INFO(("%s: insufficient headroom\n",
- brcmf_ifname(&drvr_priv->pub, ifidx)));
- drvr_priv->pub.tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
- dev_kfree_skb(skb);
- skb = skb2;
- if (skb == NULL) {
- BRCMF_ERROR(("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx)));
- ret = -ENOMEM;
- goto done;
- }
- }
-
- ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
-
-done:
- if (ret)
- drvr_priv->pub.dstats.tx_dropped++;
- else
- drvr_priv->pub.tx_packets++;
-
- /* Return ok: we always eat the packet */
- return 0;
-}
-
-void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
-{
- struct net_device *net;
- struct brcmf_info *drvr_priv = drvr->info;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- drvr->txoff = state;
- net = drvr_priv->iflist[ifidx]->net;
- if (state == ON)
- netif_stop_queue(net);
- else
- netif_wake_queue(net);
-}
-
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
- int numpkt)
-{
- struct brcmf_info *drvr_priv = drvr->info;
- unsigned char *eth;
- uint len;
- void *data;
- struct sk_buff *pnext, *save_pktbuf;
- int i;
- struct brcmf_if *ifp;
- struct brcmf_event_msg event;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- save_pktbuf = skb;
-
- for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
- pnext = skb->next;
- skb->next = NULL;
-
- /* Get the protocol, maintain skb around eth_type_trans()
- * The main reason for this hack is for the limitation of
- * Linux 2.4 where 'eth_type_trans' uses the
- * 'net->hard_header_len'
- * to perform skb_pull inside vs ETH_HLEN. Since to avoid
- * coping of the packet coming from the network stack to add
- * BDC, Hardware header etc, during network interface
- * registration
- * we set the 'net->hard_header_len' to ETH_HLEN + extra space
- * required
- * for BDC, Hardware header etc. and not just the ETH_HLEN
- */
- eth = skb->data;
- len = skb->len;
-
- ifp = drvr_priv->iflist[ifidx];
- if (ifp == NULL)
- ifp = drvr_priv->iflist[0];
-
- skb->dev = ifp->net;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (skb->pkt_type == PACKET_MULTICAST)
- drvr_priv->pub.rx_multicast++;
-
- skb->data = eth;
- skb->len = len;
-
- /* Strip header, count, deliver upward */
- skb_pull(skb, ETH_HLEN);
-
- /* Process special event packets and then discard them */
- if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
- brcmf_host_event(drvr_priv, &ifidx,
- skb_mac_header(skb),
- &event, &data);
-
- if (drvr_priv->iflist[ifidx] &&
- !drvr_priv->iflist[ifidx]->state)
- ifp = drvr_priv->iflist[ifidx];
-
- if (ifp->net)
- ifp->net->last_rx = jiffies;
-
- drvr->dstats.rx_bytes += skb->len;
- drvr->rx_packets++; /* Local count */
-
- if (in_interrupt()) {
- netif_rx(skb);
- } else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
- * by netif_rx_ni(), but in earlier kernels, we need
- * to do it manually.
- */
- netif_rx_ni(skb);
- }
- }
-}
-
-void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
-{
- uint ifidx;
- struct brcmf_info *drvr_priv = drvr->info;
- struct ethhdr *eh;
- u16 type;
-
- brcmf_proto_hdrpull(drvr, &ifidx, txp);
-
- eh = (struct ethhdr *)(txp->data);
- type = ntohs(eh->h_proto);
-
- if (type == ETH_P_PAE)
- atomic_dec(&drvr_priv->pend_8021x_cnt);
-
-}
-
-static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *net)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
- struct brcmf_if *ifp;
- int ifidx;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- ifidx = brcmf_net2idx(drvr_priv, net);
- if (ifidx == BRCMF_BAD_IF)
- return NULL;
-
- ifp = drvr_priv->iflist[ifidx];
-
- if (drvr_priv->pub.up) {
- /* Use the protocol to get dongle stats */
- brcmf_proto_dstats(&drvr_priv->pub);
- }
-
- /* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
- ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
- ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
- ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
- ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
- ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
- ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
- ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
- ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
-
- return &ifp->stats;
-}
-
-/* Retrieve current toe component enables, which are kept
- as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
-{
- struct brcmf_ioctl ioc;
- char buf[32];
- int ret;
-
- memset(&ioc, 0, sizeof(ioc));
-
- ioc.cmd = BRCMF_C_GET_VAR;
- ioc.buf = buf;
- ioc.len = (uint) sizeof(buf);
- ioc.set = false;
-
- strcpy(buf, "toe_ol");
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- /* Check for older dongle image that doesn't support toe_ol */
- if (ret == -EIO) {
- BRCMF_ERROR(("%s: toe not supported by device\n",
- brcmf_ifname(&drvr_priv->pub, ifidx)));
- return -EOPNOTSUPP;
- }
-
- BRCMF_INFO(("%s: could not get toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret));
- return ret;
- }
-
- memcpy(toe_ol, buf, sizeof(u32));
- return 0;
-}
-
-/* Set current toe component enables in toe_ol iovar,
- and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
-{
- struct brcmf_ioctl ioc;
- char buf[32];
- int toe, ret;
-
- memset(&ioc, 0, sizeof(ioc));
-
- ioc.cmd = BRCMF_C_SET_VAR;
- ioc.buf = buf;
- ioc.len = (uint) sizeof(buf);
- ioc.set = true;
-
- /* Set toe_ol as requested */
-
- strcpy(buf, "toe_ol");
- memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(u32));
-
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- BRCMF_ERROR(("%s: could not set toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret));
- return ret;
- }
-
- /* Enable toe globally only if any components are enabled. */
-
- toe = (toe_ol != 0);
-
- strcpy(buf, "toe");
- memcpy(&buf[sizeof("toe")], &toe, sizeof(u32));
-
- ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- BRCMF_ERROR(("%s: could not set toe: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret));
- return ret;
- }
-
- return 0;
-}
-
-static void brcmf_ethtool_get_drvinfo(struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
-
- sprintf(info->driver, KBUILD_MODNAME);
- sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
- sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
- sprintf(info->bus_info, "%s",
- dev_name(&brcmf_cfg80211_get_sdio_func()->dev));
-}
-
-struct ethtool_ops brcmf_ethtool_ops = {
- .get_drvinfo = brcmf_ethtool_get_drvinfo
-};
-
-static int brcmf_ethtool(struct brcmf_info *drvr_priv, void *uaddr)
-{
- struct ethtool_drvinfo info;
- char drvname[sizeof(info.driver)];
- u32 cmd;
- struct ethtool_value edata;
- u32 toe_cmpnt, csum_dir;
- int ret;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* all ethtool calls start with a cmd word */
- if (copy_from_user(&cmd, uaddr, sizeof(u32)))
- return -EFAULT;
-
- switch (cmd) {
- case ETHTOOL_GDRVINFO:
- /* Copy out any request driver name */
- if (copy_from_user(&info, uaddr, sizeof(info)))
- return -EFAULT;
- strncpy(drvname, info.driver, sizeof(info.driver));
- drvname[sizeof(info.driver) - 1] = '\0';
-
- /* clear struct for return */
- memset(&info, 0, sizeof(info));
- info.cmd = cmd;
-
- /* if requested, identify ourselves */
- if (strcmp(drvname, "?dhd") == 0) {
- sprintf(info.driver, "dhd");
- strcpy(info.version, BRCMF_VERSION_STR);
- }
-
- /* otherwise, require dongle to be up */
- else if (!drvr_priv->pub.up) {
- BRCMF_ERROR(("%s: dongle is not up\n", __func__));
- return -ENODEV;
- }
-
- /* finally, report dongle driver type */
- else if (drvr_priv->pub.iswl)
- sprintf(info.driver, "wl");
- else
- sprintf(info.driver, "xx");
-
- sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
- if (copy_to_user(uaddr, &info, sizeof(info)))
- return -EFAULT;
- BRCMF_CTL(("%s: given %*s, returning %s\n", __func__,
- (int)sizeof(drvname), drvname, info.driver));
- break;
-
- /* Get toe offload components from dongle */
- case ETHTOOL_GRXCSUM:
- case ETHTOOL_GTXCSUM:
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
- if (ret < 0)
- return ret;
-
- csum_dir =
- (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- edata.cmd = cmd;
- edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
-
- if (copy_to_user(uaddr, &edata, sizeof(edata)))
- return -EFAULT;
- break;
-
- /* Set toe offload components in dongle */
- case ETHTOOL_SRXCSUM:
- case ETHTOOL_STXCSUM:
- if (copy_from_user(&edata, uaddr, sizeof(edata)))
- return -EFAULT;
-
- /* Read the current settings, update and write back */
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
- if (ret < 0)
- return ret;
-
- csum_dir =
- (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- if (edata.data != 0)
- toe_cmpnt |= csum_dir;
- else
- toe_cmpnt &= ~csum_dir;
-
- ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
- if (ret < 0)
- return ret;
-
- /* If setting TX checksum mode, tell Linux the new mode */
- if (cmd == ETHTOOL_STXCSUM) {
- if (edata.data)
- drvr_priv->iflist[0]->net->features |=
- NETIF_F_IP_CSUM;
- else
- drvr_priv->iflist[0]->net->features &=
- ~NETIF_F_IP_CSUM;
- }
-
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int brcmf_netdev_ioctl_entry(struct net_device *net, struct ifreq *ifr,
- int cmd)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
- struct brcmf_c_ioctl ioc;
- int bcmerror = 0;
- int buflen = 0;
- void *buf = NULL;
- uint driver = 0;
- int ifidx;
- bool is_set_key_cmd;
-
- ifidx = brcmf_net2idx(drvr_priv, net);
- BRCMF_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __func__, ifidx, cmd));
-
- if (ifidx == BRCMF_BAD_IF)
- return -1;
-
- if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(drvr_priv, (void *)ifr->ifr_data);
-
- if (cmd != SIOCDEVPRIVATE)
- return -EOPNOTSUPP;
-
- memset(&ioc, 0, sizeof(ioc));
-
- /* Copy the ioc control structure part of ioctl request */
- if (copy_from_user(&ioc, ifr->ifr_data, sizeof(struct brcmf_ioctl))) {
- bcmerror = -EINVAL;
- goto done;
- }
-
- /* Copy out any buffer passed */
- if (ioc.buf) {
- buflen = min_t(int, ioc.len, BRCMF_IOCTL_MAXLEN);
- /* optimization for direct ioctl calls from kernel */
- /*
- if (segment_eq(get_fs(), KERNEL_DS)) {
- buf = ioc.buf;
- } else {
- */
- {
- buf = kmalloc(buflen, GFP_ATOMIC);
- if (!buf) {
- bcmerror = -ENOMEM;
- goto done;
- }
- if (copy_from_user(buf, ioc.buf, buflen)) {
- bcmerror = -EINVAL;
- goto done;
- }
- }
- }
-
- /* To differentiate read 4 more byes */
- if ((copy_from_user(&driver, (char *)ifr->ifr_data +
- sizeof(struct brcmf_ioctl), sizeof(uint)) != 0)) {
- bcmerror = -EINVAL;
- goto done;
- }
-
- if (!capable(CAP_NET_ADMIN)) {
- bcmerror = -EPERM;
- goto done;
- }
-
- /* check for local brcmf ioctl and handle it */
- if (driver == BRCMF_IOCTL_MAGIC) {
- bcmerror = brcmf_c_ioctl((void *)&drvr_priv->pub, &ioc, buf, buflen);
- if (bcmerror)
- drvr_priv->pub.bcmerror = bcmerror;
- goto done;
- }
-
- /* send to dongle (must be up, and wl) */
- if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
- BRCMF_ERROR(("%s DONGLE_DOWN,__func__\n", __func__));
- bcmerror = -EIO;
- goto done;
- }
-
- if (!drvr_priv->pub.iswl) {
- bcmerror = -EIO;
- goto done;
- }
-
- /*
- * Intercept BRCMF_C_SET_KEY IOCTL - serialize M4 send and
- * set key IOCTL to prevent M4 encryption.
- */
- is_set_key_cmd = ((ioc.cmd == BRCMF_C_SET_KEY) ||
- ((ioc.cmd == BRCMF_C_SET_VAR) &&
- !(strncmp("wsec_key", ioc.buf, 9))) ||
- ((ioc.cmd == BRCMF_C_SET_VAR) &&
- !(strncmp("bsscfg:wsec_key", ioc.buf, 15))));
- if (is_set_key_cmd)
- brcmf_netdev_wait_pend8021x(net);
-
- bcmerror =
- brcmf_proto_ioctl(&drvr_priv->pub, ifidx, (struct brcmf_ioctl *)&ioc,
- buf, buflen);
-
-done:
- if (!bcmerror && buf && ioc.buf) {
- if (copy_to_user(ioc.buf, buf, buflen))
- bcmerror = -EFAULT;
- }
-
- kfree(buf);
-
- if (bcmerror > 0)
- bcmerror = 0;
-
- return bcmerror;
-}
-
-static int brcmf_netdev_stop(struct net_device *net)
-{
-#if !defined(IGNORE_ETH0_DOWN)
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
- brcmf_cfg80211_down();
- if (drvr_priv->pub.up == 0)
- return 0;
-
- /* Set state and stop OS transmissions */
- drvr_priv->pub.up = 0;
- netif_stop_queue(net);
-#else
- BRCMF_ERROR(("BYPASS %s:due to BRCM compilation: under investigation\n",
- __func__));
-#endif /* !defined(IGNORE_ETH0_DOWN) */
-
- return 0;
-}
-
-static int brcmf_netdev_open(struct net_device *net)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
- u32 toe_ol;
- int ifidx = brcmf_net2idx(drvr_priv, net);
- s32 ret = 0;
-
- BRCMF_TRACE(("%s: ifidx %d\n", __func__, ifidx));
-
- if (ifidx == 0) { /* do it only for primary eth0 */
-
- /* try to bring up bus */
- ret = brcmf_bus_start(&drvr_priv->pub);
- if (ret != 0) {
- BRCMF_ERROR(("%s: failed with code %d\n",
- __func__, ret));
- return -1;
- }
- atomic_set(&drvr_priv->pend_8021x_cnt, 0);
-
- memcpy(net->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
-
- /* Get current TOE mode from dongle */
- if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
- && (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr_priv->iflist[ifidx]->net->features |=
- NETIF_F_IP_CSUM;
- else
- drvr_priv->iflist[ifidx]->net->features &=
- ~NETIF_F_IP_CSUM;
- }
- /* Allow transmit calls */
- netif_start_queue(net);
- drvr_priv->pub.up = 1;
- if (unlikely(brcmf_cfg80211_up())) {
- BRCMF_ERROR(("%s: failed to bring up cfg80211\n",
- __func__));
- return -1;
- }
-
- return ret;
-}
-
-int
-brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, void *handle, char *name,
- u8 *mac_addr, u32 flags, u8 bssidx)
-{
- struct brcmf_if *ifp;
-
- BRCMF_TRACE(("%s: idx %d, handle->%p\n", __func__, ifidx, handle));
-
- ifp = drvr_priv->iflist[ifidx];
- if (!ifp) {
- ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
- if (!ifp) {
- BRCMF_ERROR(("%s: OOM - struct brcmf_if\n", __func__));
- return -ENOMEM;
- }
- }
-
- memset(ifp, 0, sizeof(struct brcmf_if));
- ifp->info = drvr_priv;
- drvr_priv->iflist[ifidx] = ifp;
- strlcpy(ifp->name, name, IFNAMSIZ);
- if (mac_addr != NULL)
- memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
-
- if (handle == NULL) {
- ifp->state = BRCMF_E_IF_ADD;
- ifp->idx = ifidx;
- up(&drvr_priv->sysioc_sem);
- } else
- ifp->net = (struct net_device *)handle;
-
- return 0;
-}
-
-void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
-{
- struct brcmf_if *ifp;
-
- BRCMF_TRACE(("%s: idx %d\n", __func__, ifidx));
-
- ifp = drvr_priv->iflist[ifidx];
- if (!ifp) {
- BRCMF_ERROR(("%s: Null interface\n", __func__));
- return;
- }
-
- ifp->state = BRCMF_E_IF_DEL;
- ifp->idx = ifidx;
- up(&drvr_priv->sysioc_sem);
-}
-
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
-{
- struct brcmf_info *drvr_priv = NULL;
- struct net_device *net;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Allocate etherdev, including space for private structure */
- net = alloc_etherdev(sizeof(drvr_priv));
- if (!net) {
- BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
- goto fail;
- }
-
- /* Allocate primary brcmf_info */
- drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
- if (!drvr_priv) {
- BRCMF_ERROR(("%s: OOM - alloc brcmf_info\n", __func__));
- goto fail;
- }
-
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
-
- /* Set network interface name if it was provided as module parameter */
- if (iface_name[0]) {
- int len;
- char ch;
- strncpy(net->name, iface_name, IFNAMSIZ);
- net->name[IFNAMSIZ - 1] = 0;
- len = strlen(net->name);
- ch = net->name[len - 1];
- if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
- strcat(net->name, "%d");
- }
-
- if (brcmf_add_if(drvr_priv, 0, (void *)net, net->name, NULL, 0, 0) ==
- BRCMF_BAD_IF)
- goto fail;
-
- net->netdev_ops = NULL;
- sema_init(&drvr_priv->proto_sem, 1);
- /* Initialize other structure content */
- init_waitqueue_head(&drvr_priv->ioctl_resp_wait);
-
- /* Link to info module */
- drvr_priv->pub.info = drvr_priv;
-
- /* Link to bus module */
- drvr_priv->pub.bus = bus;
- drvr_priv->pub.hdrlen = bus_hdrlen;
-
- /* Attach and link in the protocol */
- if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
- BRCMF_ERROR(("brcmf_prot_attach failed\n"));
- goto fail;
- }
-
- /* Attach and link in the cfg80211 */
- if (unlikely(brcmf_cfg80211_attach(net, &drvr_priv->pub))) {
- BRCMF_ERROR(("wl_cfg80211_attach failed\n"));
- goto fail;
- }
-
- if (brcmf_sysioc) {
- sema_init(&drvr_priv->sysioc_sem, 0);
- drvr_priv->sysioc_tsk = kthread_run(_brcmf_sysioc_thread, drvr_priv,
- "_brcmf_sysioc");
- if (IS_ERR(drvr_priv->sysioc_tsk)) {
- printk(KERN_WARNING
- "_brcmf_sysioc thread failed to start\n");
- drvr_priv->sysioc_tsk = NULL;
- }
- } else
- drvr_priv->sysioc_tsk = NULL;
-
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
-
-#if defined(CONFIG_PM_SLEEP)
- atomic_set(&brcmf_mmc_suspend, false);
-#endif /* defined(CONFIG_PM_SLEEP) */
- return &drvr_priv->pub;
-
-fail:
- if (net)
- free_netdev(net);
- if (drvr_priv)
- brcmf_detach(&drvr_priv->pub);
-
- return NULL;
-}
-
-int brcmf_bus_start(struct brcmf_pub *drvr)
-{
- int ret = -1;
- struct brcmf_info *drvr_priv = drvr->info;
- /* Room for "event_msgs" + '\0' + bitvec */
- char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
-
- BRCMF_TRACE(("%s:\n", __func__));
-
- /* Bring up the bus */
- ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub, true);
- if (ret != 0) {
- BRCMF_ERROR(("%s, brcmf_sdbrcm_bus_init failed %d\n", __func__,
- ret));
- return ret;
- }
-
- /* If bus is not ready, can't come up */
- if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
- BRCMF_ERROR(("%s failed bus is not ready\n", __func__));
- return -ENODEV;
- }
-
- brcmu_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_query_ioctl(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
- sizeof(iovbuf));
- memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
- setbit(drvr->eventmask, BRCMF_E_SET_SSID);
- setbit(drvr->eventmask, BRCMF_E_PRUNE);
- setbit(drvr->eventmask, BRCMF_E_AUTH);
- setbit(drvr->eventmask, BRCMF_E_REASSOC);
- setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
- setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_DISASSOC);
- setbit(drvr->eventmask, BRCMF_E_JOIN);
- setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
- setbit(drvr->eventmask, BRCMF_E_LINK);
- setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
- setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
- setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
- setbit(drvr->eventmask, BRCMF_E_TXFAIL);
- setbit(drvr->eventmask, BRCMF_E_JOIN_START);
- setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
-
-/* enable dongle roaming event */
-
- drvr->pktfilter_count = 1;
- /* Setup filter to allow only unicast */
- drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
-
- /* Bus is ready, do any protocol initialization */
- ret = brcmf_proto_init(&drvr_priv->pub);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct net_device_ops brcmf_netdev_ops_pri = {
- .ndo_open = brcmf_netdev_open,
- .ndo_stop = brcmf_netdev_stop,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_multicast_list = brcmf_netdev_set_multicast_list
-};
-
-int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
-{
- struct brcmf_info *drvr_priv = drvr->info;
- struct net_device *net;
- u8 temp_addr[ETH_ALEN] = {
- 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
-
- BRCMF_TRACE(("%s: ifidx %d\n", __func__, ifidx));
-
- net = drvr_priv->iflist[ifidx]->net;
- net->netdev_ops = &brcmf_netdev_ops_pri;
-
- /*
- * We have to use the primary MAC for virtual interfaces
- */
- if (ifidx != 0) {
- /* for virtual interfaces use the primary MAC */
- memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
-
- }
-
- if (ifidx == 1) {
- BRCMF_TRACE(("%s ACCESS POINT MAC:\n", __func__));
- /* ACCESSPOINT INTERFACE CASE */
- temp_addr[0] |= 0X02; /* set bit 2 ,
- - Locally Administered address */
-
- }
- net->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
- net->ethtool_ops = &brcmf_ethtool_ops;
-
- drvr_priv->pub.rxsz = net->mtu + net->hard_header_len +
- drvr_priv->pub.hdrlen;
-
- memcpy(net->dev_addr, temp_addr, ETH_ALEN);
-
- if (register_netdev(net) != 0) {
- BRCMF_ERROR(("%s: couldn't register the net device\n",
- __func__));
- goto fail;
- }
-
- BRCMF_INFO(("%s: Broadcom Dongle Host Driver\n", net->name));
-
- return 0;
-
-fail:
- net->netdev_ops = NULL;
- return -EBADE;
-}
-
-static void brcmf_bus_detach(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- /* Stop the protocol module */
- brcmf_proto_stop(&drvr_priv->pub);
-
- /* Stop the bus module */
- brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus, true);
- }
- }
-}
-
-void brcmf_detach(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- struct brcmf_if *ifp;
- int i;
-
- for (i = 1; i < BRCMF_MAX_IFS; i++)
- if (drvr_priv->iflist[i])
- brcmf_del_if(drvr_priv, i);
-
- ifp = drvr_priv->iflist[0];
- if (ifp->net->netdev_ops == &brcmf_netdev_ops_pri) {
- brcmf_netdev_stop(ifp->net);
- unregister_netdev(ifp->net);
- }
-
- if (drvr_priv->sysioc_tsk) {
- send_sig(SIGTERM, drvr_priv->sysioc_tsk, 1);
- kthread_stop(drvr_priv->sysioc_tsk);
- drvr_priv->sysioc_tsk = NULL;
- }
-
- brcmf_bus_detach(drvr);
-
- if (drvr->prot)
- brcmf_proto_detach(drvr);
-
- brcmf_cfg80211_detach();
-
- free_netdev(ifp->net);
- kfree(ifp);
- kfree(drvr_priv);
- }
- }
-}
-
-static void __exit brcmf_module_cleanup(void)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
-{
- int error;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- error = brcmf_bus_register();
-
- if (error) {
- BRCMF_ERROR(("%s: brcmf_bus_register failed\n", __func__));
- goto failed;
- }
- return 0;
-
-failed:
- return -EINVAL;
-}
-
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
-
-int brcmf_os_proto_block(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (drvr_priv) {
- down(&drvr_priv->proto_sem);
- return 1;
- }
- return 0;
-}
-
-int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (drvr_priv) {
- up(&drvr_priv->proto_sem);
- return 1;
- }
-
- return 0;
-}
-
-unsigned int brcmf_os_get_ioctl_resp_timeout(void)
-{
- return (unsigned int)brcmf_ioctl_timeout_msec;
-}
-
-void brcmf_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
-{
- brcmf_ioctl_timeout_msec = (int)timeout_msec;
-}
-
-int brcmf_os_ioctl_resp_wait(struct brcmf_pub *drvr, uint *condition,
- bool *pending)
-{
- struct brcmf_info *drvr_priv = drvr->info;
- DECLARE_WAITQUEUE(wait, current);
- int timeout = brcmf_ioctl_timeout_msec;
-
- /* Convert timeout in millsecond to jiffies */
- timeout = timeout * HZ / 1000;
-
- /* Wait until control frame is available */
- add_wait_queue(&drvr_priv->ioctl_resp_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!(*condition) && (!signal_pending(current) && timeout))
- timeout = schedule_timeout(timeout);
-
- if (signal_pending(current))
- *pending = true;
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&drvr_priv->ioctl_resp_wait, &wait);
-
- return timeout;
-}
-
-int brcmf_os_ioctl_resp_wake(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (waitqueue_active(&drvr_priv->ioctl_resp_wait))
- wake_up_interruptible(&drvr_priv->ioctl_resp_wait);
-
- return 0;
-}
-
-static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
- struct brcmf_event_msg *event, void **data)
-{
- int bcmerror = 0;
-
- bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
- if (bcmerror != 0)
- return bcmerror;
-
- if (drvr_priv->iflist[*ifidx]->net)
- brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->net,
- event, *data);
-
- return bcmerror;
-}
-
-int brcmf_netdev_reset(struct net_device *dev, u8 flag)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(dev);
-
- brcmf_bus_devreset(&drvr_priv->pub, flag);
-
- return 1;
-}
-
-static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
-{
- return atomic_read(&drvr_priv->pend_8021x_cnt);
-}
-
-#define MAX_WAIT_FOR_8021X_TX 10
-
-int brcmf_netdev_wait_pend8021x(struct net_device *dev)
-{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(dev);
- int timeout = 10 * HZ / 1000;
- int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
-
- while (ntimes && pend) {
- if (pend) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
- ntimes--;
- }
- pend = brcmf_get_pend_8021x_cnt(drvr_priv);
- }
- return pend;
-}
-
-#ifdef BCMDBG
-int brcmf_write_to_file(struct brcmf_pub *drvr, u8 *buf, int size)
-{
- int ret = 0;
- struct file *fp;
- mm_segment_t old_fs;
- loff_t pos = 0;
-
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- /* open file to write */
- fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
- if (!fp) {
- BRCMF_ERROR(("%s: open file error\n", __func__));
- ret = -1;
- goto exit;
- }
-
- /* Write buf to file */
- fp->f_op->write(fp, buf, size, &pos);
-
-exit:
- /* free buf before return */
- kfree(buf);
- /* close file before return */
- if (fp)
- filp_close(fp, current->files);
- /* restore previous address limit */
- set_fs(old_fs);
-
- return ret;
-}
-#endif /* BCMDBG */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
deleted file mode 100644
index ff788b37afd..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMF_PROTO_H_
-#define _BRCMF_PROTO_H_
-
-#ifndef IOCTL_RESP_TIMEOUT
-#define IOCTL_RESP_TIMEOUT 2000 /* In milli second */
-#endif
-
-#ifndef IOCTL_CHIP_ACTIVE_TIMEOUT
-#define IOCTL_CHIP_ACTIVE_TIMEOUT 10 /* In milli second */
-#endif
-
-/*
- * Exported from the brcmf protocol module (brcmf_cdc)
- */
-
-/* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
-
-/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
-
-/* Initialize protocol: sync w/dongle state.
- * Sets dongle media info (iswl, drv_version, mac address).
- */
-extern int brcmf_proto_init(struct brcmf_pub *drvr);
-
-/* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
-
-/* Add any protocol-specific data header.
- * Caller must reserve prot_hdrlen prepend space.
- */
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
- struct sk_buff *txp);
-
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
- struct sk_buff *rxp);
-
-/* Use protocol to issue ioctl to dongle */
-extern int brcmf_proto_ioctl(struct brcmf_pub *drvr, int ifidx,
- struct brcmf_ioctl *ioc, void *buf, int len);
-
-/* Add prot dump output to a buffer */
-extern void brcmf_proto_dump(struct brcmf_pub *drvr,
- struct brcmu_strbuf *strbuf);
-
-/* Update local copy of dongle statistics */
-extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
-
-extern int brcmf_c_ioctl(struct brcmf_pub *drvr, struct brcmf_c_ioctl *ioc,
- void *buf, uint buflen);
-
-extern int brcmf_c_preinit_ioctls(struct brcmf_pub *drvr);
-
-extern int brcmf_proto_cdc_set_ioctl(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len);
-
-#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
deleted file mode 100644
index 7fa95b6213c..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
+++ /dev/null
@@ -1,6772 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/printk.h>
-#include <linux/pci_ids.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/semaphore.h>
-#include <linux/firmware.h>
-#include <asm/unaligned.h>
-#include <defs.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include <brcm_hw_ids.h>
-#include <soc.h>
-#include "sdio_host.h"
-
-/* register access macros */
-#ifndef __BIG_ENDIAN
-#ifndef __mips__
-#define R_REG(r, typ) \
- brcmf_sdcard_reg_read(NULL, (r), sizeof(typ))
-#else /* __mips__ */
-#define R_REG(r, typ) \
- ({ \
- __typeof(*(r)) __osl_v; \
- __asm__ __volatile__("sync"); \
- __osl_v = brcmf_sdcard_reg_read(NULL, (r),\
- sizeof(typ)); \
- __asm__ __volatile__("sync"); \
- __osl_v; \
- })
-#endif /* __mips__ */
-
-#else /* __BIG_ENDIAN */
-#define R_REG(r, typ) \
- brcmf_sdcard_reg_read(NULL, (r), sizeof(typ))
-#endif /* __BIG_ENDIAN */
-
-#define OR_REG(r, v, typ) \
- brcmf_sdcard_reg_write(NULL, (r), sizeof(typ), R_REG(r, typ) | (v))
-
-#ifdef BCMDBG
-
-/* ARM trap handling */
-
-/* Trap types defined by ARM (see arminc.h) */
-
-#if defined(__ARM_ARCH_4T__)
-#define MAX_TRAP_TYPE (TR_FIQ + 1)
-#elif defined(__ARM_ARCH_7M__)
-#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
-#endif /* __ARM_ARCH_7M__ */
-
-/* The trap structure is defined here as offsets for assembly */
-#define TR_TYPE 0x00
-#define TR_EPC 0x04
-#define TR_CPSR 0x08
-#define TR_SPSR 0x0c
-#define TR_REGS 0x10
-#define TR_REG(n) (TR_REGS + (n) * 4)
-#define TR_SP TR_REG(13)
-#define TR_LR TR_REG(14)
-#define TR_PC TR_REG(15)
-
-#define TRAP_T_SIZE 80
-
-struct brcmf_trap {
- u32 type;
- u32 epc;
- u32 cpsr;
- u32 spsr;
- u32 r0;
- u32 r1;
- u32 r2;
- u32 r3;
- u32 r4;
- u32 r5;
- u32 r6;
- u32 r7;
- u32 r8;
- u32 r9;
- u32 r10;
- u32 r11;
- u32 r12;
- u32 r13;
- u32 r14;
- u32 pc;
-};
-
-#define CBUF_LEN (128)
-
-struct rte_log {
- u32 buf; /* Can't be pointer on (64-bit) hosts */
- uint buf_size;
- uint idx;
- char *_buf_compat; /* Redundant pointer for backward compat. */
-};
-
-struct rte_console {
- /* Virtual UART
- * When there is no UART (e.g. Quickturn),
- * the host should write a complete
- * input line directly into cbuf and then write
- * the length into vcons_in.
- * This may also be used when there is a real UART
- * (at risk of conflicting with
- * the real UART). vcons_out is currently unused.
- */
- volatile uint vcons_in;
- volatile uint vcons_out;
-
- /* Output (logging) buffer
- * Console output is written to a ring buffer log_buf at index log_idx.
- * The host may read the output when it sees log_idx advance.
- * Output will be lost if the output wraps around faster than the host
- * polls.
- */
- struct rte_log log;
-
- /* Console input line buffer
- * Characters are read one at a time into cbuf
- * until <CR> is received, then
- * the buffer is processed as a command line.
- * Also used for virtual UART.
- */
- uint cbuf_idx;
- char cbuf[CBUF_LEN];
-};
-
-#endif /* BCMDBG */
-#include <chipcommon.h>
-
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_proto.h"
-#include "dhd_dbg.h"
-#include <bcmchip.h>
-
-#define TXQLEN 2048 /* bulk tx queue length */
-#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
-#define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
-#define PRIOMASK 7
-
-#define TXRETRIES 2 /* # of retries for tx frames */
-
-#define BRCMF_RXBOUND 50 /* Default for max rx frames in
- one scheduling */
-
-#define BRCMF_TXBOUND 20 /* Default for max tx frames in
- one scheduling */
-
-#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
-
-#define MEMBLOCK 2048 /* Block size used for downloading
- of dongle image */
-#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
- biggest possible glom */
-
-#ifndef BRCMF_FIRSTREAD
-#define BRCMF_FIRSTREAD 32
-#endif
-
-#if !ISPOWEROF2(BRCMF_FIRSTREAD)
-#error BRCMF_FIRSTREAD is not a power of 2!
-#endif
-
-/* SBSDIO_DEVICE_CTL */
-#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
- * receiving CMD53
- */
-#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
- * synchronous to the sdio clock
- */
-#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
- * except the chipActive (rev 8)
- */
-#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
- * external pads in tri-state; requires
- * sdio bus power cycle to clear (rev 9)
- */
-#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
-#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
-#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
-#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
-
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
-#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
-#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
-#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
-#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
-#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
-#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
-
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
- (alponly ? 1 : SBSDIO_HTAV(regval)))
-/* direct(mapped) cis space */
-#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
-#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
-#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
-
-#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
- * link bytes
- */
-
-/* intstatus */
-#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
-#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
-#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
-#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
-#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
-#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
-#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
-#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
-#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
-#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
-#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
-#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
-#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
-#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
-#define I_PC (1 << 10) /* descriptor error */
-#define I_PD (1 << 11) /* data error */
-#define I_DE (1 << 12) /* Descriptor protocol Error */
-#define I_RU (1 << 13) /* Receive descriptor Underflow */
-#define I_RO (1 << 14) /* Receive fifo Overflow */
-#define I_XU (1 << 15) /* Transmit fifo Underflow */
-#define I_RI (1 << 16) /* Receive Interrupt */
-#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
-#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
-#define I_XI (1 << 24) /* Transmit Interrupt */
-#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
-#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
-#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
-#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
-#define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
-#define I_SRESET (1 << 30) /* CCCR RES interrupt */
-#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
-#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
-#define I_DMA (I_RI | I_XI | I_ERRORS)
-
-/* corecontrol */
-#define CC_CISRDY (1 << 0) /* CIS Ready */
-#define CC_BPRESEN (1 << 1) /* CCCR RES signal */
-#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
-#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
-#define CC_XMTDATAAVAIL_MODE (1 << 4)
-#define CC_XMTDATAAVAIL_CTRL (1 << 5)
-
-/* SDA_FRAMECTRL */
-#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
-#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
-#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
-#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
-
-/* HW frame tag */
-#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
-
-/* Total length of frame header for dongle protocol */
-#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
-#ifdef SDTEST
-#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + BRCMF_SDALIGN)
-#else
-#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
-#endif
-
-/*
- * Software allocation of To SB Mailbox resources
- */
-
-/* tosbmailbox bits corresponding to intstatus bits */
-#define SMB_NAK (1 << 0) /* Frame NAK */
-#define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
-#define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
-#define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
-
-/* tosbmailboxdata */
-#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
-
-/*
- * Software allocation of To Host Mailbox resources
- */
-
-/* intstatus bits */
-#define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
-#define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
-#define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
-#define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
-
-/* tohostmailboxdata */
-#define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
-#define HMB_DATA_DEVREADY 2 /* talk to host after enable */
-#define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
-#define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
-
-#define HMB_DATA_FCDATA_MASK 0xff000000
-#define HMB_DATA_FCDATA_SHIFT 24
-
-#define HMB_DATA_VERSION_MASK 0x00ff0000
-#define HMB_DATA_VERSION_SHIFT 16
-
-/*
- * Software-defined protocol header
- */
-
-/* Current protocol version */
-#define SDPCM_PROT_VERSION 4
-
-/* SW frame header */
-#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
-
-#define SDPCM_CHANNEL_MASK 0x00000f00
-#define SDPCM_CHANNEL_SHIFT 8
-#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
-
-#define SDPCM_NEXTLEN_OFFSET 2
-
-/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
-#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
-#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
-#define SDPCM_DOFFSET_MASK 0xff000000
-#define SDPCM_DOFFSET_SHIFT 24
-#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
-#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
-#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
-#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
-
-#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
-
-/* logical channel numbers */
-#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
-#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
-#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
-#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
-#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
-
-#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
-
-#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
-
-/* For TEST_CHANNEL packets, define another 4-byte header */
-#define SDPCM_TEST_HDRLEN 4 /*
- * Generally: Cmd(1), Ext(1), Len(2);
- * Semantics of Ext byte depend on
- * command. Len is current or requested
- * frame length, not including test
- * header; sent little-endian.
- */
-#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext:pattern id. */
-#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext:pattern id. */
-#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext:pattern id. */
-#define SDPCM_TEST_BURST 0x04 /*
- * Receiver to send a burst.
- * Ext is a frame count
- */
-#define SDPCM_TEST_SEND 0x05 /*
- * Receiver sets send mode.
- * Ext is boolean on/off
- */
-
-/* Handy macro for filling in datagen packets with a pattern */
-#define SDPCM_TEST_FILL(byteno, id) ((u8)(id + byteno))
-
-/*
- * Shared structure between dongle and the host.
- * The structure contains pointers to trap or assert information.
- */
-#define SDPCM_SHARED_VERSION 0x0002
-#define SDPCM_SHARED_VERSION_MASK 0x00FF
-#define SDPCM_SHARED_ASSERT_BUILT 0x0100
-#define SDPCM_SHARED_ASSERT 0x0200
-#define SDPCM_SHARED_TRAP 0x0400
-
-
-/* Space for header read, limit for data packets */
-#ifndef MAX_HDR_READ
-#define MAX_HDR_READ 32
-#endif
-#if !ISPOWEROF2(MAX_HDR_READ)
-#error MAX_HDR_READ is not a power of 2!
-#endif
-
-#define MAX_RX_DATASZ 2048
-
-/* Maximum milliseconds to wait for F2 to come up */
-#define BRCMF_WAIT_F2RDY 3000
-
-/* Bump up limit on waiting for HT to account for first startup;
- * if the image is doing a CRC calculation before programming the PMU
- * for HT availability, it could take a couple hundred ms more, so
- * max out at a 1 second (1000000us).
- */
-#if (PMU_MAX_TRANSITION_DLY <= 1000000)
-#undef PMU_MAX_TRANSITION_DLY
-#define PMU_MAX_TRANSITION_DLY 1000000
-#endif
-
-/* Value for ChipClockCSR during initial setup */
-#define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
- SBSDIO_ALP_AVAIL_REQ)
-
-/* Flags for SDH calls */
-#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-
-/* sbimstate */
-#define SBIM_IBE 0x20000 /* inbanderror */
-#define SBIM_TO 0x40000 /* timeout */
-#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
-#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
-
-/* sbtmstatelow */
-#define SBTML_RESET 0x0001 /* reset */
-#define SBTML_REJ_MASK 0x0006 /* reject field */
-#define SBTML_REJ 0x0002 /* reject */
-#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */
-
-#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */
-
-/* sbtmstatehigh */
-#define SBTMH_SERR 0x0001 /* serror */
-#define SBTMH_INT 0x0002 /* interrupt */
-#define SBTMH_BUSY 0x0004 /* busy */
-#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
-
-#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */
-
-/* sbidlow */
-#define SBIDL_INIT 0x80 /* initiator */
-
-/* sbidhigh */
-#define SBIDH_RC_MASK 0x000f /* revision code */
-#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
-#define SBIDH_RCE_SHIFT 8
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0 /* core code */
-#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
-#define SBIDH_VC_SHIFT 16
-
-/*
- * Conversion of 802.1D priority to precedence level
- */
-#define PRIO2PREC(prio) \
- (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? \
- ((prio^2)) : (prio))
-
-BRCMF_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
-
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) (base + offsetof(chipcregs_t, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
-/* core registers */
-struct sdpcmd_regs {
- u32 corecontrol; /* 0x00, rev8 */
- u32 corestatus; /* rev8 */
- u32 PAD[1];
- u32 biststatus; /* rev8 */
-
- /* PCMCIA access */
- u16 pcmciamesportaladdr; /* 0x010, rev8 */
- u16 PAD[1];
- u16 pcmciamesportalmask; /* rev8 */
- u16 PAD[1];
- u16 pcmciawrframebc; /* rev8 */
- u16 PAD[1];
- u16 pcmciaunderflowtimer; /* rev8 */
- u16 PAD[1];
-
- /* interrupt */
- u32 intstatus; /* 0x020, rev8 */
- u32 hostintmask; /* rev8 */
- u32 intmask; /* rev8 */
- u32 sbintstatus; /* rev8 */
- u32 sbintmask; /* rev8 */
- u32 funcintmask; /* rev4 */
- u32 PAD[2];
- u32 tosbmailbox; /* 0x040, rev8 */
- u32 tohostmailbox; /* rev8 */
- u32 tosbmailboxdata; /* rev8 */
- u32 tohostmailboxdata; /* rev8 */
-
- /* synchronized access to registers in SDIO clock domain */
- u32 sdioaccess; /* 0x050, rev8 */
- u32 PAD[3];
-
- /* PCMCIA frame control */
- u8 pcmciaframectrl; /* 0x060, rev8 */
- u8 PAD[3];
- u8 pcmciawatermark; /* rev8 */
- u8 PAD[155];
-
- /* interrupt batching control */
- u32 intrcvlazy; /* 0x100, rev8 */
- u32 PAD[3];
-
- /* counters */
- u32 cmd52rd; /* 0x110, rev8 */
- u32 cmd52wr; /* rev8 */
- u32 cmd53rd; /* rev8 */
- u32 cmd53wr; /* rev8 */
- u32 abort; /* rev8 */
- u32 datacrcerror; /* rev8 */
- u32 rdoutofsync; /* rev8 */
- u32 wroutofsync; /* rev8 */
- u32 writebusy; /* rev8 */
- u32 readwait; /* rev8 */
- u32 readterm; /* rev8 */
- u32 writeterm; /* rev8 */
- u32 PAD[40];
- u32 clockctlstatus; /* rev8 */
- u32 PAD[7];
-
- u32 PAD[128]; /* DMA engines */
-
- /* SDIO/PCMCIA CIS region */
- char cis[512]; /* 0x400-0x5ff, rev6 */
-
- /* PCMCIA function control registers */
- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
- u16 PAD[55];
-
- /* PCMCIA backplane access */
- u16 backplanecsr; /* 0x76E, rev6 */
- u16 backplaneaddr0; /* rev6 */
- u16 backplaneaddr1; /* rev6 */
- u16 backplaneaddr2; /* rev6 */
- u16 backplaneaddr3; /* rev6 */
- u16 backplanedata0; /* rev6 */
- u16 backplanedata1; /* rev6 */
- u16 backplanedata2; /* rev6 */
- u16 backplanedata3; /* rev6 */
- u16 PAD[31];
-
- /* sprom "size" & "blank" info */
- u16 spromstatus; /* 0x7BE, rev2 */
- u32 PAD[464];
-
- u16 PAD[0x80];
-};
-
-#ifdef BCMDBG
-/* Device console log buffer state */
-struct brcmf_console {
- uint count; /* Poll interval msec counter */
- uint log_addr; /* Log struct address (fixed) */
- struct rte_log log; /* Log struct (host copy) */
- uint bufsize; /* Size of log buffer */
- u8 *buf; /* Log buffer (host copy) */
- uint last; /* Last buffer read index */
-};
-#endif /* BCMDBG */
-
-struct sdpcm_shared {
- u32 flags;
- u32 trap_addr;
- u32 assert_exp_addr;
- u32 assert_file_addr;
- u32 assert_line;
- u32 console_addr; /* Address of struct rte_console */
- u32 msgtrace_addr;
- u8 tag[32];
-};
-
-
-/* misc chip info needed by some of the routines */
-struct chip_info {
- u32 chip;
- u32 chiprev;
- u32 cccorebase;
- u32 ccrev;
- u32 cccaps;
- u32 buscorebase; /* 32 bits backplane bus address */
- u32 buscorerev;
- u32 buscoretype;
- u32 ramcorebase;
- u32 armcorebase;
- u32 pmurev;
- u32 ramsize;
-};
-
-/* Private data for SDIO bus interaction */
-struct brcmf_bus {
- struct brcmf_pub *drvr;
-
- struct brcmf_sdio_card *card; /* Handle for sdio card calls */
- struct chip_info *ci; /* Chip info struct */
- char *vars; /* Variables (from CIS and/or other) */
- uint varsz; /* Size of variables buffer */
-
- u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
- u32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
-
- u32 bus; /* gSPI or SDIO bus */
- u32 hostintmask; /* Copy of Host Interrupt Mask */
- u32 intstatus; /* Intstatus bits (events) pending */
- bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
- bool fcstate; /* State of dongle flow-control */
-
- u16 cl_devid; /* cached devid for brcmf_sdio_probe_attach() */
-
- uint blocksize; /* Block size of SDIO transfers */
- uint roundup; /* Max roundup limit */
-
- struct pktq txq; /* Queue length used for flow-control */
- u8 flowcontrol; /* per prio flow control bitmask */
- u8 tx_seq; /* Transmit sequence number (next) */
- u8 tx_max; /* Maximum transmit sequence allowed */
-
- u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
- u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
- u16 nextlen; /* Next Read Len from last header */
- u8 rx_seq; /* Receive sequence number (expected) */
- bool rxskip; /* Skip receive (awaiting NAK ACK) */
-
- struct sk_buff *glomd; /* Packet containing glomming descriptor */
- struct sk_buff *glom; /* Packet chain for glommed superframe */
- uint glomerr; /* Glom packet read errors */
-
- u8 *rxbuf; /* Buffer for receiving control packets */
- uint rxblen; /* Allocated length of rxbuf */
- u8 *rxctl; /* Aligned pointer into rxbuf */
- u8 *databuf; /* Buffer for receiving big glom packet */
- u8 *dataptr; /* Aligned pointer into databuf */
- uint rxlen; /* Length of valid data in buffer */
-
- u8 sdpcm_ver; /* Bus protocol reported by dongle */
-
- bool intr; /* Use interrupts */
- bool poll; /* Use polling */
- bool ipend; /* Device interrupt is pending */
- bool intdis; /* Interrupts disabled by isr */
- uint intrcount; /* Count of device interrupt callbacks */
- uint lastintrs; /* Count as of last watchdog timer */
- uint spurious; /* Count of spurious interrupts */
- uint pollrate; /* Ticks between device polls */
- uint polltick; /* Tick counter */
- uint pollcnt; /* Count of active polls */
-
-#ifdef BCMDBG
- struct brcmf_console console; /* Console output polling support */
- uint console_addr; /* Console address from shared struct */
-#endif /* BCMDBG */
-
- uint regfails; /* Count of R_REG failures */
-
- uint clkstate; /* State of sd and backplane clock(s) */
- bool activity; /* Activity flag for clock down */
- s32 idletime; /* Control for activity timeout */
- s32 idlecount; /* Activity timeout counter */
- s32 idleclock; /* How to set bus driver when idle */
- s32 sd_rxchain;
- bool use_rxchain; /* If brcmf should use PKT chains */
- bool sleeping; /* Is SDIO bus sleeping? */
- bool rxflow_mode; /* Rx flow control mode */
- bool rxflow; /* Is rx flow control on */
- bool alp_only; /* Don't use HT clock (ALP only) */
-/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
- bool usebufpool;
-
-#ifdef SDTEST
- /* external loopback */
- bool ext_loop;
- u8 loopid;
-
- /* pktgen configuration */
- uint pktgen_freq; /* Ticks between bursts */
- uint pktgen_count; /* Packets to send each burst */
- uint pktgen_print; /* Bursts between count displays */
- uint pktgen_total; /* Stop after this many */
- uint pktgen_minlen; /* Minimum packet data len */
- uint pktgen_maxlen; /* Maximum packet data len */
- uint pktgen_mode; /* Configured mode: tx, rx, or echo */
- uint pktgen_stop; /* Number of tx failures causing stop */
-
- /* active pktgen fields */
- uint pktgen_tick; /* Tick counter for bursts */
- uint pktgen_ptick; /* Burst counter for printing */
- uint pktgen_sent; /* Number of test packets generated */
- uint pktgen_rcvd; /* Number of test packets received */
- uint pktgen_fail; /* Number of failed send attempts */
- u16 pktgen_len; /* Length of next packet to send */
-#endif /* SDTEST */
-
- /* Some additional counters */
- uint tx_sderrs; /* Count of tx attempts with sd errors */
- uint fcqueued; /* Tx packets that got queued */
- uint rxrtx; /* Count of rtx requests (NAK to dongle) */
- uint rx_toolong; /* Receive frames too long to receive */
- uint rxc_errors; /* SDIO errors when reading control frames */
- uint rx_hdrfail; /* SDIO errors on header reads */
- uint rx_badhdr; /* Bad received headers (roosync?) */
- uint rx_badseq; /* Mismatched rx sequence number */
- uint fc_rcvd; /* Number of flow-control events received */
- uint fc_xoff; /* Number which turned on flow-control */
- uint fc_xon; /* Number which turned off flow-control */
- uint rxglomfail; /* Failed deglom attempts */
- uint rxglomframes; /* Number of glom frames (superframes) */
- uint rxglompkts; /* Number of packets from glom frames */
- uint f2rxhdrs; /* Number of header reads */
- uint f2rxdata; /* Number of frame data reads */
- uint f2txdata; /* Number of f2 frame writes */
- uint f1regdata; /* Number of f1 register accesses */
-
- u8 *ctrl_frame_buf;
- u32 ctrl_frame_len;
- bool ctrl_frame_stat;
-
- spinlock_t txqlock;
- wait_queue_head_t ctrl_wait;
-
- struct timer_list timer;
- struct completion watchdog_wait;
- struct task_struct *watchdog_tsk;
- bool wd_timer_valid;
-
- struct tasklet_struct tasklet;
- struct task_struct *dpc_tsk;
- struct completion dpc_wait;
-
- bool threads_only;
- struct semaphore sdsem;
- spinlock_t sdlock;
-
- const char *fw_name;
- const struct firmware *firmware;
- const char *nv_name;
- u32 fw_ptr;
-};
-
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
-};
-
-/* clkstate */
-#define CLK_NONE 0
-#define CLK_SDONLY 1
-#define CLK_PENDING 2 /* Not used yet */
-#define CLK_AVAIL 3
-
-#define BRCMF_NOPMU(brcmf) (false)
-
-#ifdef BCMDBG
-static int qcount[NUMPRIO];
-static int tx_packets[NUMPRIO];
-#endif /* BCMDBG */
-
-/* Deferred transmit */
-uint brcmf_deferred_tx = 1;
-module_param(brcmf_deferred_tx, uint, 0);
-
-/* Watchdog thread priority, -1 to use kernel timer */
-int brcmf_watchdog_prio = 97;
-module_param(brcmf_watchdog_prio, int, 0);
-
-/* Watchdog interval */
-uint brcmf_watchdog_ms = 10;
-module_param(brcmf_watchdog_ms, uint, 0);
-
-/* DPC thread priority, -1 to use tasklet */
-int brcmf_dpc_prio = 98;
-module_param(brcmf_dpc_prio, int, 0);
-
-#ifdef BCMDBG
-/* Console poll interval */
-uint brcmf_console_ms;
-module_param(brcmf_console_ms, uint, 0);
-#endif /* BCMDBG */
-
-/* Tx/Rx bounds */
-uint brcmf_txbound;
-uint brcmf_rxbound;
-uint brcmf_txminmax;
-
-/* override the RAM size if possible */
-#define DONGLE_MIN_MEMSIZE (128 * 1024)
-int brcmf_dongle_memsize;
-
-static bool brcmf_alignctl;
-
-static bool sd1idle;
-
-static bool retrydata;
-#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
-
-static const uint watermark = 8;
-static const uint firstread = BRCMF_FIRSTREAD;
-
-/* Retry count for register access failures */
-static const uint retry_limit = 2;
-
-/* Force even SD lengths (some host controllers mess up on odd bytes) */
-static bool forcealign;
-
-#define ALIGNMENT 4
-
-#define PKTALIGN(_p, _len, _align) \
- do { \
- uint datalign; \
- datalign = (unsigned long)((_p)->data); \
- datalign = roundup(datalign, (_align)) - datalign; \
- if (datalign) \
- skb_pull((_p), datalign); \
- __skb_trim((_p), (_len)); \
- } while (0)
-
-/* Limit on rounding up frames */
-static const uint max_roundup = 512;
-
-/* Try doing readahead */
-static bool brcmf_readahead;
-
-/* To check if there's window offered */
-#define DATAOK(bus) \
- (((u8)(bus->tx_max - bus->tx_seq) != 0) && \
- (((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
-
-/*
- * Reads a register in the SDIO hardware block. This block occupies a series of
- * adresses on the 32 bit backplane bus.
- */
-static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
-{
- *retryvar = 0;
- do {
- *regvar = R_REG(bus->ci->buscorebase + reg_offset, u32);
- } while (brcmf_sdcard_regfail(bus->card) &&
- (++(*retryvar) <= retry_limit));
- if (*retryvar) {
- bus->regfails += (*retryvar-1);
- if (*retryvar > retry_limit) {
- BRCMF_ERROR(("FAILED READ %Xh\n", reg_offset));
- *regvar = 0;
- }
- }
-}
-
-static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
-{
- *retryvar = 0;
- do {
- brcmf_sdcard_reg_write(NULL, bus->ci->buscorebase + reg_offset,
- sizeof(u32), regval);
- } while (brcmf_sdcard_regfail(bus->card) &&
- (++(*retryvar) <= retry_limit));
- if (*retryvar) {
- bus->regfails += (*retryvar-1);
- if (*retryvar > retry_limit)
- BRCMF_ERROR(("FAILED REGISTER WRITE"
- " %Xh\n", reg_offset));
- }
-}
-
-#define BRCMF_BUS SDIO_BUS
-
-#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
-
-#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
-
-#ifdef SDTEST
-static void brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, void *pkt, uint seq);
-static void brcmf_sdbrcm_sdtest_set(struct brcmf_bus *bus, bool start);
-#endif
-
-#ifdef BCMDBG
-static int brcmf_sdbrcm_bus_console_in(struct brcmf_pub *drvr,
- unsigned char *msg, uint msglen);
-static int brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, u8 *data, uint size);
-static int brcmf_sdbrcm_mem_dump(struct brcmf_bus *bus);
-#endif /* BCMDBG */
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter);
-
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus);
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus);
-static void brcmf_sdbrcm_disconnect(void *ptr);
-static bool brcmf_sdbrcm_chipmatch(u16 chipid);
-static bool brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, void *card,
- u32 regsva, u16 devid);
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus, void *card);
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus, void *card);
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus);
-
-static uint brcmf_process_nvram_vars(char *varbuf, uint len);
-
-static void brcmf_sdbrcm_setmemsize(struct brcmf_bus *bus, int mem_size);
-static int brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes,
- struct sk_buff *pkt,
- void (*complete)(void *handle, int status,
- bool sync_waiting),
- void *handle);
-
-static bool brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus, void *card);
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus);
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus);
-
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_card *card, u32 corebase);
-
-static int brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs);
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_card *card, u32 corebase);
-
-static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
- u32 drivestrength);
-static void brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus);
-static void brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar);
-static void brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus);
-static void brcmf_sdbrcm_watchdog(unsigned long data);
-static int brcmf_sdbrcm_watchdog_thread(void *data);
-static int brcmf_sdbrcm_dpc_thread(void *data);
-static void brcmf_sdbrcm_dpc_tasklet(unsigned long data);
-static void brcmf_sdbrcm_sched_dpc(struct brcmf_bus *bus);
-static void brcmf_sdbrcm_sdlock(struct brcmf_bus *bus);
-static void brcmf_sdbrcm_sdunlock(struct brcmf_bus *bus);
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus);
-
-/* Packet free applicable unconditionally for sdio and sdspi.
- * Conditional if bufpool was present for gspi bus.
- */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
-{
- if ((bus->bus != SPI_BUS) || bus->usebufpool)
- brcmu_pkt_buf_free_skb(pkt);
-}
-
-static void brcmf_sdbrcm_setmemsize(struct brcmf_bus *bus, int mem_size)
-{
- s32 min_size = DONGLE_MIN_MEMSIZE;
- /* Restrict the memsize to user specified limit */
- BRCMF_ERROR(("user: Restrict the dongle ram size to %d, min %d\n",
- brcmf_dongle_memsize, min_size));
- if ((brcmf_dongle_memsize > min_size) &&
- (brcmf_dongle_memsize < (s32) bus->orig_ramsize))
- bus->ramsize = brcmf_dongle_memsize;
-}
-
-static int brcmf_sdbrcm_set_siaddr_window(struct brcmf_bus *bus, u32 address)
-{
- int err = 0;
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
- (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
- if (!err)
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRMID,
- (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
- if (!err)
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRHIGH,
- (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
- &err);
- return err;
-}
-
-/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
-{
- int err;
- u8 clkctl, clkreq, devctl;
- struct brcmf_sdio_card *card;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- clkctl = 0;
- card = bus->card;
-
- if (on) {
- /* Request HT Avail */
- clkreq =
- bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
-
- if ((bus->ci->chip == BCM4329_CHIP_ID)
- && (bus->ci->chiprev == 0))
- clkreq |= SBSDIO_FORCE_ALP;
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
- if (err) {
- BRCMF_ERROR(("%s: HT Avail request error: %d\n",
- __func__, err));
- return -EBADE;
- }
-
- if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev == 9))) {
- u32 dummy, retries;
- r_sdreg32(bus, &dummy,
- offsetof(struct sdpcmd_regs, clockctlstatus),
- &retries);
- }
-
- /* Check current status */
- clkctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (err) {
- BRCMF_ERROR(("%s: HT Avail read error: %d\n",
- __func__, err));
- return -EBADE;
- }
-
- /* Go to pending and await interrupt if appropriate */
- if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
- /* Allow only clock-available interrupt */
- devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- BRCMF_ERROR(("%s: Devctl error setting CA:"
- " %d\n", __func__, err));
- return -EBADE;
- }
-
- devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
- BRCMF_INFO(("CLKCTL: set PENDING\n"));
- bus->clkstate = CLK_PENDING;
-
- return 0;
- } else if (bus->clkstate == CLK_PENDING) {
- /* Cancel CA-only interrupt filter */
- devctl =
- brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
- devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
- }
-
- /* Otherwise, wait here (polling) for HT Avail */
- if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- BRCMF_SPINWAIT_SLEEP(sdioh_spinwait_sleep,
- ((clkctl =
- brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- &err)),
- !SBSDIO_CLKAV(clkctl, bus->alp_only)),
- PMU_MAX_TRANSITION_DLY);
- }
- if (err) {
- BRCMF_ERROR(("%s: HT Avail request error: %d\n",
- __func__, err));
- return -EBADE;
- }
- if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- BRCMF_ERROR(("%s: HT Avail timeout (%d): "
- "clkctl 0x%02x\n", __func__,
- PMU_MAX_TRANSITION_DLY, clkctl));
- return -EBADE;
- }
-
- /* Mark clock available */
- bus->clkstate = CLK_AVAIL;
- BRCMF_INFO(("CLKCTL: turned ON\n"));
-
-#if defined(BCMDBG)
- if (bus->alp_only != true) {
- if (SBSDIO_ALPONLY(clkctl)) {
- BRCMF_ERROR(("%s: HT Clock should be on.\n",
- __func__));
- }
- }
-#endif /* defined (BCMDBG) */
-
- bus->activity = true;
- } else {
- clkreq = 0;
-
- if (bus->clkstate == CLK_PENDING) {
- /* Cancel CA-only interrupt filter */
- devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
- devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
- }
-
- bus->clkstate = CLK_SDONLY;
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
- BRCMF_INFO(("CLKCTL: turned OFF\n"));
- if (err) {
- BRCMF_ERROR(("%s: Failed access turning clock off:"
- " %d\n", __func__, err));
- return -EBADE;
- }
- }
- return 0;
-}
-
-/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (on)
- bus->clkstate = CLK_SDONLY;
- else
- bus->clkstate = CLK_NONE;
-
- return 0;
-}
-
-/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
-{
-#ifdef BCMDBG
- uint oldstate = bus->clkstate;
-#endif /* BCMDBG */
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Early exit if we're already there */
- if (bus->clkstate == target) {
- if (target == CLK_AVAIL) {
- brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
- bus->activity = true;
- }
- return 0;
- }
-
- switch (target) {
- case CLK_AVAIL:
- /* Make sure SD clock is available */
- if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
- /* Now request HT Avail on the backplane */
- brcmf_sdbrcm_htclk(bus, true, pendok);
- brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
- bus->activity = true;
- break;
-
- case CLK_SDONLY:
- /* Remove HT request, or bring up SD clock */
- if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
- else if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
- else
- BRCMF_ERROR(("brcmf_sdbrcm_clkctl: request for %d -> %d"
- "\n", bus->clkstate, target));
- brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
- break;
-
- case CLK_NONE:
- /* Make sure to remove HT request */
- if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
- /* Now remove the SD clock */
- brcmf_sdbrcm_sdclk(bus, false);
- brcmf_sdbrcm_wd_timer(bus, 0);
- break;
- }
-#ifdef BCMDBG
- BRCMF_INFO(("brcmf_sdbrcm_clkctl: %d -> %d\n",
- oldstate, bus->clkstate));
-#endif /* BCMDBG */
-
- return 0;
-}
-
-int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
-{
- struct brcmf_sdio_card *card = bus->card;
- uint retries = 0;
-
- BRCMF_INFO(("brcmf_sdbrcm_bussleep: request %s (currently %s)\n",
- (sleep ? "SLEEP" : "WAKE"),
- (bus->sleeping ? "SLEEP" : "WAKE")));
-
- /* Done if we're already in the requested state */
- if (sleep == bus->sleeping)
- return 0;
-
- /* Going to sleep: set the alarm and turn off the lights... */
- if (sleep) {
- /* Don't sleep if something is pending */
- if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
- return -EBUSY;
-
- /* Disable SDIO interrupts (no longer interested) */
- brcmf_sdcard_intr_disable(bus->card);
-
- /* Make sure the controller has the bus up */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Tell device to start using OOB wakeup */
- w_sdreg32(bus, SMB_USE_OOB,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
- if (retries > retry_limit)
- BRCMF_ERROR(("CANNOT SIGNAL CHIP, "
- "WILL NOT WAKE UP!!\n"));
-
- /* Turn off our contribution to the HT clock request */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
-
- /* Isolate the bus */
- if (bus->ci->chip != BCM4329_CHIP_ID
- && bus->ci->chip != BCM4319_CHIP_ID) {
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
- }
-
- /* Change state */
- bus->sleeping = true;
-
- } else {
- /* Waking up: bus power up is ok, set local state */
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
- /* Force pad isolation off if possible
- (in case power never toggled) */
- if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev >= 10))
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, 0, NULL);
-
- /* Make sure the controller has the bus up */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Send misc interrupt to indicate OOB not needed */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, tosbmailboxdata),
- &retries);
- if (retries <= retry_limit)
- w_sdreg32(bus, SMB_DEV_INT,
- offsetof(struct sdpcmd_regs, tosbmailbox),
- &retries);
-
- if (retries > retry_limit)
- BRCMF_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
-
- /* Make sure we have SD bus access */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- /* Change state */
- bus->sleeping = false;
-
- /* Enable interrupts again */
- if (bus->intr && (bus->drvr->busstate == BRCMF_BUS_DATA)) {
- bus->intdis = false;
- brcmf_sdcard_intr_enable(bus->card);
- }
- }
-
- return 0;
-}
-
-#define BUS_WAKE(bus) \
- do { \
- if ((bus)->sleeping) \
- brcmf_sdbrcm_bussleep((bus), false); \
- } while (0);
-
-/* Writes a HW/SW header into the packet and sends it. */
-/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, uint chan,
- bool free_pkt)
-{
- int ret;
- u8 *frame;
- u16 len, pad = 0;
- u32 swheader;
- uint retries = 0;
- struct brcmf_sdio_card *card;
- struct sk_buff *new;
- int i;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- card = bus->card;
-
- if (bus->drvr->dongle_reset) {
- ret = -EPERM;
- goto done;
- }
-
- frame = (u8 *) (pkt->data);
-
- /* Add alignment padding, allocate new packet if needed */
- pad = ((unsigned long)frame % BRCMF_SDALIGN);
- if (pad) {
- if (skb_headroom(pkt) < pad) {
- BRCMF_INFO(("%s: insufficient headroom %d for %d pad\n",
- __func__, skb_headroom(pkt), pad));
- bus->drvr->tx_realloc++;
- new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
- if (!new) {
- BRCMF_ERROR(("%s: couldn't allocate new "
- "%d-byte packet\n", __func__,
- pkt->len + BRCMF_SDALIGN));
- ret = -ENOMEM;
- goto done;
- }
-
- PKTALIGN(new, pkt->len, BRCMF_SDALIGN);
- memcpy(new->data, pkt->data, pkt->len);
- if (free_pkt)
- brcmu_pkt_buf_free_skb(pkt);
- /* free the pkt if canned one is not used */
- free_pkt = true;
- pkt = new;
- frame = (u8 *) (pkt->data);
- /* precondition: (frame % BRCMF_SDALIGN) == 0) */
- pad = 0;
- } else {
- skb_push(pkt, pad);
- frame = (u8 *) (pkt->data);
- /* precondition: pad + SDPCM_HDRLEN <= pkt->len */
- memset(frame, 0, pad + SDPCM_HDRLEN);
- }
- }
- /* precondition: pad < BRCMF_SDALIGN */
-
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- len = (u16) (pkt->len);
- *(u16 *) frame = cpu_to_le16(len);
- *(((u16 *) frame) + 1) = cpu_to_le16(~len);
-
- /* Software tag: channel, sequence number, data offset */
- swheader =
- ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
- (((pad +
- SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
-
- put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
- put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
-
-#ifdef BCMDBG
- tx_packets[pkt->priority]++;
- if (BRCMF_BYTES_ON() &&
- (((BRCMF_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
- (BRCMF_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
- printk(KERN_DEBUG "Tx Frame:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, frame, len);
- } else if (BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "TxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- frame, min_t(u16, len, 16));
- }
-#endif
-
- /* Raise len to next SDIO block to eliminate tail command */
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- u16 pad = bus->blocksize - (len % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize))
- len += pad;
- } else if (len % BRCMF_SDALIGN) {
- len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
- }
-
- /* Some controllers have trouble with odd bytes -- round to even */
- if (forcealign && (len & (ALIGNMENT - 1))) {
- len = roundup(len, ALIGNMENT);
- }
-
- do {
- ret = brcmf_sdbrcm_send_buf(bus, brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2, F2SYNC, frame, len, pkt, NULL, NULL);
- bus->f2txdata++;
-
- if (ret < 0) {
- /* On failure, abort the command
- and terminate the frame */
- BRCMF_INFO(("%s: sdio error %d, abort command and "
- "terminate frame.\n", __func__, ret));
- bus->tx_sderrs++;
-
- brcmf_sdcard_abort(card, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
- NULL);
- bus->f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
- bus->f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
-
- }
- if (ret == 0)
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
-
- } while ((ret < 0) && retrydata && retries++ < TXRETRIES);
-
-done:
- /* restore pkt buffer pointer before calling tx complete routine */
- skb_pull(pkt, SDPCM_HDRLEN + pad);
- brcmf_sdbrcm_sdunlock(bus);
- brcmf_txcomplete(bus->drvr, pkt, ret != 0);
- brcmf_sdbrcm_sdlock(bus);
-
- if (free_pkt)
- brcmu_pkt_buf_free_skb(pkt);
-
- return ret;
-}
-
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
-{
- int ret = -EBADE;
- uint datalen, prec;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- datalen = pkt->len;
-
-#ifdef SDTEST
- /* Push the test header if doing loopback */
- if (bus->ext_loop) {
- u8 *data;
- skb_push(pkt, SDPCM_TEST_HDRLEN);
- data = pkt->data;
- *data++ = SDPCM_TEST_ECHOREQ;
- *data++ = (u8) bus->loopid++;
- *data++ = (datalen >> 0);
- *data++ = (datalen >> 8);
- datalen += SDPCM_TEST_HDRLEN;
- }
-#endif /* SDTEST */
-
- /* Add space for the header */
- skb_push(pkt, SDPCM_HDRLEN);
- /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
-
- prec = PRIO2PREC((pkt->priority & PRIOMASK));
-
- /* Check for existing queue, current flow-control,
- pending event, or pending clock */
- if (brcmf_deferred_tx || bus->fcstate || pktq_len(&bus->txq)
- || bus->dpc_sched || (!DATAOK(bus))
- || (bus->flowcontrol & NBITVAL(prec))
- || (bus->clkstate != CLK_AVAIL)) {
- BRCMF_TRACE(("%s: deferring pktq len %d\n", __func__,
- pktq_len(&bus->txq)));
- bus->fcqueued++;
-
- /* Priority based enq */
- spin_lock_bh(&bus->txqlock);
- if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
- skb_pull(pkt, SDPCM_HDRLEN);
- brcmf_txcomplete(bus->drvr, pkt, false);
- brcmu_pkt_buf_free_skb(pkt);
- BRCMF_ERROR(("%s: out of bus->txq !!!\n", __func__));
- ret = -ENOSR;
- } else {
- ret = 0;
- }
- spin_unlock_bh(&bus->txqlock);
-
- if (pktq_len(&bus->txq) >= TXHI)
- brcmf_txflowcontrol(bus->drvr, 0, ON);
-
-#ifdef BCMDBG
- if (pktq_plen(&bus->txq, prec) > qcount[prec])
- qcount[prec] = pktq_plen(&bus->txq, prec);
-#endif
- /* Schedule DPC if needed to send queued packet(s) */
- if (brcmf_deferred_tx && !bus->dpc_sched) {
- bus->dpc_sched = true;
- brcmf_sdbrcm_sched_dpc(bus);
- }
- } else {
- /* Lock: we're about to use shared data/code (and SDIO) */
- brcmf_sdbrcm_sdlock(bus);
-
- /* Otherwise, send it now */
- BUS_WAKE(bus);
- /* Make sure back plane ht clk is on, no pending allowed */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
-
-#ifndef SDTEST
- BRCMF_TRACE(("%s: calling txpkt\n", __func__));
- ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
-#else
- ret = brcmf_sdbrcm_txpkt(bus, pkt,
- (bus->ext_loop ? SDPCM_TEST_CHANNEL :
- SDPCM_DATA_CHANNEL), true);
-#endif
- if (ret)
- bus->drvr->tx_errors++;
- else
- bus->drvr->dstats.tx_bytes += datalen;
-
- if (bus->idletime == BRCMF_IDLE_IMMEDIATE &&
- !bus->dpc_sched) {
- bus->activity = false;
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
- }
-
- brcmf_sdbrcm_sdunlock(bus);
- }
-
- return ret;
-}
-
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
-{
- struct sk_buff *pkt;
- u32 intstatus = 0;
- uint retries = 0;
- int ret = 0, prec_out;
- uint cnt = 0;
- uint datalen;
- u8 tx_prec_map;
-
- struct brcmf_pub *drvr = bus->drvr;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- tx_prec_map = ~bus->flowcontrol;
-
- /* Send frames until the limit or some other event */
- for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
- spin_lock_bh(&bus->txqlock);
- pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
- if (pkt == NULL) {
- spin_unlock_bh(&bus->txqlock);
- break;
- }
- spin_unlock_bh(&bus->txqlock);
- datalen = pkt->len - SDPCM_HDRLEN;
-
-#ifndef SDTEST
- ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
-#else
- ret = brcmf_sdbrcm_txpkt(bus, pkt,
- (bus->ext_loop ? SDPCM_TEST_CHANNEL :
- SDPCM_DATA_CHANNEL), true);
-#endif
- if (ret)
- bus->drvr->tx_errors++;
- else
- bus->drvr->dstats.tx_bytes += datalen;
-
- /* In poll mode, need to check for other events */
- if (!bus->intr && cnt) {
- /* Check device status, signal pending interrupt */
- r_sdreg32(bus, &intstatus,
- offsetof(struct sdpcmd_regs, intstatus),
- &retries);
- bus->f2txdata++;
- if (brcmf_sdcard_regfail(bus->card))
- break;
- if (intstatus & bus->hostintmask)
- bus->ipend = true;
- }
- }
-
- /* Deflow-control stack if needed */
- if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
- drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
- brcmf_txflowcontrol(drvr, 0, OFF);
-
- return cnt;
-}
-
-int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
-{
- u8 *frame;
- u16 len;
- u32 swheader;
- uint retries = 0;
- struct brcmf_sdio_card *card = bus->card;
- u8 doff = 0;
- int ret = -1;
- int i;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus->drvr->dongle_reset)
- return -EIO;
-
- /* Back the pointer to make a room for bus header */
- frame = msg - SDPCM_HDRLEN;
- len = (msglen += SDPCM_HDRLEN);
-
- /* Add alignment padding (optional for ctl frames) */
- if (brcmf_alignctl) {
- doff = ((unsigned long)frame % BRCMF_SDALIGN);
- if (doff) {
- frame -= doff;
- len += doff;
- msglen += doff;
- memset(frame, 0, doff + SDPCM_HDRLEN);
- }
- /* precondition: doff < BRCMF_SDALIGN */
- }
- doff += SDPCM_HDRLEN;
-
- /* Round send length to next SDIO block */
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- u16 pad = bus->blocksize - (len % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize))
- len += pad;
- } else if (len % BRCMF_SDALIGN) {
- len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
- }
-
- /* Satisfy length-alignment requirements */
- if (forcealign && (len & (ALIGNMENT - 1)))
- len = roundup(len, ALIGNMENT);
-
- /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
-
- /* Need to lock here to protect txseq and SDIO tx calls */
- brcmf_sdbrcm_sdlock(bus);
-
- BUS_WAKE(bus);
-
- /* Make sure backplane clock is on */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- *(u16 *) frame = cpu_to_le16((u16) msglen);
- *(((u16 *) frame) + 1) = cpu_to_le16(~msglen);
-
- /* Software tag: channel, sequence number, data offset */
- swheader =
- ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
- SDPCM_CHANNEL_MASK)
- | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
- SDPCM_DOFFSET_MASK);
- put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
- put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
-
- if (!DATAOK(bus)) {
- BRCMF_INFO(("%s: No bus credit bus->tx_max %d,"
- " bus->tx_seq %d\n", __func__,
- bus->tx_max, bus->tx_seq));
- bus->ctrl_frame_stat = true;
- /* Send from dpc */
- bus->ctrl_frame_buf = frame;
- bus->ctrl_frame_len = len;
-
- brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
-
- if (bus->ctrl_frame_stat == false) {
- BRCMF_INFO(("%s: ctrl_frame_stat == false\n",
- __func__));
- ret = 0;
- } else {
- BRCMF_INFO(("%s: ctrl_frame_stat == true\n", __func__));
- ret = -1;
- }
- }
-
- if (ret == -1) {
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
- printk(KERN_DEBUG "Tx Frame:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- frame, len);
- } else if (BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "TxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- frame, min_t(u16, len, 16));
- }
-#endif
-
- do {
- bus->ctrl_frame_stat = false;
- ret = brcmf_sdbrcm_send_buf(bus,
- brcmf_sdcard_cur_sbwad(card), SDIO_FUNC_2,
- F2SYNC, frame, len, NULL, NULL, NULL);
-
- if (ret < 0) {
- /* On failure, abort the command and
- terminate the frame */
- BRCMF_INFO(("%s: sdio error %d, abort command "
- "and terminate frame.\n",
- __func__, ret));
- bus->tx_sderrs++;
-
- brcmf_sdcard_abort(card, SDIO_FUNC_2);
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(card,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(card,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
- bus->f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
-
- }
- if (ret == 0) {
- bus->tx_seq =
- (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
- }
- } while ((ret < 0) && retries++ < TXRETRIES);
- }
-
- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
- bus->activity = false;
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
- }
-
- brcmf_sdbrcm_sdunlock(bus);
-
- if (ret)
- bus->drvr->tx_ctlerrs++;
- else
- bus->drvr->tx_ctlpkts++;
-
- return ret ? -EIO : 0;
-}
-
-int brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
-{
- int timeleft;
- uint rxlen = 0;
- bool pending;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus->drvr->dongle_reset)
- return -EIO;
-
- /* Wait until control frame is available */
- timeleft = brcmf_os_ioctl_resp_wait(bus->drvr, &bus->rxlen, &pending);
-
- brcmf_sdbrcm_sdlock(bus);
- rxlen = bus->rxlen;
- memcpy(msg, bus->rxctl, min(msglen, rxlen));
- bus->rxlen = 0;
- brcmf_sdbrcm_sdunlock(bus);
-
- if (rxlen) {
- BRCMF_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
- __func__, rxlen, msglen));
- } else if (timeleft == 0) {
- BRCMF_ERROR(("%s: resumed on timeout\n", __func__));
-#ifdef BCMDBG
- brcmf_sdbrcm_sdlock(bus);
- brcmf_sdbrcm_checkdied(bus, NULL, 0);
- brcmf_sdbrcm_sdunlock(bus);
-#endif /* BCMDBG */
- } else if (pending == true) {
- BRCMF_CTL(("%s: cancelled\n", __func__));
- return -ERESTARTSYS;
- } else {
- BRCMF_CTL(("%s: resumed for unknown reason?\n", __func__));
-#ifdef BCMDBG
- brcmf_sdbrcm_sdlock(bus);
- brcmf_sdbrcm_checkdied(bus, NULL, 0);
- brcmf_sdbrcm_sdunlock(bus);
-#endif /* BCMDBG */
- }
-
- if (rxlen)
- bus->drvr->rx_ctlpkts++;
- else
- bus->drvr->rx_ctlerrs++;
-
- return rxlen ? (int)rxlen : -ETIMEDOUT;
-}
-
-/* IOVar table */
-enum {
- IOV_INTR = 1,
- IOV_POLLRATE,
- IOV_SDREG,
- IOV_SBREG,
- IOV_SDCIS,
- IOV_MEMBYTES,
- IOV_MEMSIZE,
-#ifdef BCMDBG
- IOV_CHECKDIED,
- IOV_CONS,
- IOV_DCONSOLE_POLL,
-#endif
- IOV_DOWNLOAD,
- IOV_FORCEEVEN,
- IOV_SDIOD_DRIVE,
- IOV_READAHEAD,
- IOV_SDRXCHAIN,
- IOV_ALIGNCTL,
- IOV_SDALIGN,
- IOV_DEVRESET,
- IOV_CPU,
-#ifdef SDTEST
- IOV_PKTGEN,
- IOV_EXTLOOP,
-#endif /* SDTEST */
- IOV_SPROM,
- IOV_TXBOUND,
- IOV_RXBOUND,
- IOV_TXMINMAX,
- IOV_IDLETIME,
- IOV_IDLECLOCK,
- IOV_SD1IDLE,
- IOV_SLEEP,
- IOV_WDTICK,
- IOV_VARS
-};
-
-const struct brcmu_iovar brcmf_sdio_iovars[] = {
- {"intr", IOV_INTR, 0, IOVT_BOOL, 0},
- {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0},
- {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0},
- {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0},
- {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0},
- {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0},
- {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int)},
- {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0},
- {"download", IOV_DOWNLOAD, 0, IOVT_BOOL, 0},
- {"vars", IOV_VARS, 0, IOVT_BUFFER, 0},
- {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0},
- {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0},
- {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0},
- {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0},
- {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0},
- {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0},
- {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0},
-#ifdef BCMDBG
- {"cons", IOV_CONS, 0, IOVT_BUFFER, 0}
- ,
- {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0}
- ,
- {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(struct brcmf_sdreg)}
- ,
- {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(struct brcmf_sdreg)}
- ,
- {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, BRCMF_IOCTL_MAXLEN}
- ,
- {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0}
- ,
- {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0}
- ,
- {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0}
- ,
- {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0}
- ,
- {"cpu", IOV_CPU, 0, IOVT_BOOL, 0}
- ,
- {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0}
- ,
-#endif /* BCMDBG */
-#ifdef SDTEST
- {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0}
- ,
- {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(struct brcmf_pktgen)}
- ,
-#endif /* SDTEST */
-
- {NULL, 0, 0, 0, 0}
-};
-
-static void
-brcmf_dump_pct(struct brcmu_strbuf *strbuf, char *desc, uint num, uint div)
-{
- uint q1, q2;
-
- if (!div) {
- brcmu_bprintf(strbuf, "%s N/A", desc);
- } else {
- q1 = num / div;
- q2 = (100 * (num - (q1 * div))) / div;
- brcmu_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
- }
-}
-
-void brcmf_sdbrcm_bus_dump(struct brcmf_pub *drvr, struct brcmu_strbuf *strbuf)
-{
- struct brcmf_bus *bus = drvr->bus;
-
- brcmu_bprintf(strbuf, "Bus SDIO structure:\n");
- brcmu_bprintf(strbuf,
- "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
- bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
- brcmu_bprintf(strbuf,
- "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
- bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max,
- bus->rxskip, bus->rxlen, bus->rx_seq);
- brcmu_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
- bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
- brcmu_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
- bus->pollrate, bus->pollcnt, bus->regfails);
-
- brcmu_bprintf(strbuf, "\nAdditional counters:\n");
- brcmu_bprintf(strbuf,
- "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
- bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
- bus->rxc_errors);
- brcmu_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
- bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
- brcmu_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
- bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
- brcmu_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
- bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
- brcmu_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs"
- " %d\n",
- (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs,
- bus->f2rxdata, bus->f2txdata, bus->f1regdata);
- {
- brcmf_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->drvr->rx_packets,
- (bus->f2rxhdrs + bus->f2rxdata));
- brcmf_dump_pct(strbuf, ", pkts/f1sd", bus->drvr->rx_packets,
- bus->f1regdata);
- brcmf_dump_pct(strbuf, ", pkts/sd", bus->drvr->rx_packets,
- (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
- brcmf_dump_pct(strbuf, ", pkts/int", bus->drvr->rx_packets,
- bus->intrcount);
- brcmu_bprintf(strbuf, "\n");
-
- brcmf_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
- bus->drvr->rx_packets);
- brcmf_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts,
- bus->rxglomframes);
- brcmu_bprintf(strbuf, "\n");
-
- brcmf_dump_pct(strbuf, "Tx: pkts/f2wr", bus->drvr->tx_packets,
- bus->f2txdata);
- brcmf_dump_pct(strbuf, ", pkts/f1sd", bus->drvr->tx_packets,
- bus->f1regdata);
- brcmf_dump_pct(strbuf, ", pkts/sd", bus->drvr->tx_packets,
- (bus->f2txdata + bus->f1regdata));
- brcmf_dump_pct(strbuf, ", pkts/int", bus->drvr->tx_packets,
- bus->intrcount);
- brcmu_bprintf(strbuf, "\n");
-
- brcmf_dump_pct(strbuf, "Total: pkts/f2rw",
- (bus->drvr->tx_packets + bus->drvr->rx_packets),
- (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
- brcmf_dump_pct(strbuf, ", pkts/f1sd",
- (bus->drvr->tx_packets + bus->drvr->rx_packets),
- bus->f1regdata);
- brcmf_dump_pct(strbuf, ", pkts/sd",
- (bus->drvr->tx_packets + bus->drvr->rx_packets),
- (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata +
- bus->f1regdata));
- brcmf_dump_pct(strbuf, ", pkts/int",
- (bus->drvr->tx_packets + bus->drvr->rx_packets),
- bus->intrcount);
- brcmu_bprintf(strbuf, "\n\n");
- }
-
-#ifdef SDTEST
- if (bus->pktgen_count) {
- brcmu_bprintf(strbuf, "pktgen config and count:\n");
- brcmu_bprintf(strbuf,
- "freq %d count %d print %d total %d min %d len %d\n",
- bus->pktgen_freq, bus->pktgen_count,
- bus->pktgen_print, bus->pktgen_total,
- bus->pktgen_minlen, bus->pktgen_maxlen);
- brcmu_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
- bus->pktgen_sent, bus->pktgen_rcvd,
- bus->pktgen_fail);
- }
-#endif /* SDTEST */
-#ifdef BCMDBG
- brcmu_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
- bus->dpc_sched, " not ");
- brcmu_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize,
- bus->roundup);
-#endif /* BCMDBG */
- brcmu_bprintf(strbuf,
- "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
- bus->clkstate, bus->activity, bus->idletime, bus->idlecount,
- bus->sleeping);
-}
-
-void brcmf_bus_clearcounts(struct brcmf_pub *drvr)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *) drvr->bus;
-
- bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
- bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
- bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
- bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
- bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
- bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
-}
-
-#ifdef SDTEST
-static int brcmf_sdbrcm_pktgen_get(struct brcmf_bus *bus, u8 *arg)
-{
- struct brcmf_pktgen pktgen;
-
- pktgen.version = BRCMF_PKTGEN_VERSION;
- pktgen.freq = bus->pktgen_freq;
- pktgen.count = bus->pktgen_count;
- pktgen.print = bus->pktgen_print;
- pktgen.total = bus->pktgen_total;
- pktgen.minlen = bus->pktgen_minlen;
- pktgen.maxlen = bus->pktgen_maxlen;
- pktgen.numsent = bus->pktgen_sent;
- pktgen.numrcvd = bus->pktgen_rcvd;
- pktgen.numfail = bus->pktgen_fail;
- pktgen.mode = bus->pktgen_mode;
- pktgen.stop = bus->pktgen_stop;
-
- memcpy(arg, &pktgen, sizeof(pktgen));
-
- return 0;
-}
-
-static int brcmf_sdbrcm_pktgen_set(struct brcmf_bus *bus, u8 *arg)
-{
- struct brcmf_pktgen pktgen;
- uint oldcnt, oldmode;
-
- memcpy(&pktgen, arg, sizeof(pktgen));
- if (pktgen.version != BRCMF_PKTGEN_VERSION)
- return -EINVAL;
-
- oldcnt = bus->pktgen_count;
- oldmode = bus->pktgen_mode;
-
- bus->pktgen_freq = pktgen.freq;
- bus->pktgen_count = pktgen.count;
- bus->pktgen_print = pktgen.print;
- bus->pktgen_total = pktgen.total;
- bus->pktgen_minlen = pktgen.minlen;
- bus->pktgen_maxlen = pktgen.maxlen;
- bus->pktgen_mode = pktgen.mode;
- bus->pktgen_stop = pktgen.stop;
-
- bus->pktgen_tick = bus->pktgen_ptick = 0;
- bus->pktgen_len = max(bus->pktgen_len, bus->pktgen_minlen);
- bus->pktgen_len = min(bus->pktgen_len, bus->pktgen_maxlen);
-
- /* Clear counts for a new pktgen (mode change, or was stopped) */
- if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
- bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
-
- return 0;
-}
-#endif /* SDTEST */
-
-static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
- uint size)
-{
- int bcmerror = 0;
- u32 sdaddr;
- uint dsize;
-
- /* Determine initial transfer parameters */
- sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
- if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
- dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
- else
- dsize = size;
-
- /* Set the backplane window to include the start address */
- bcmerror = brcmf_sdbrcm_set_siaddr_window(bus, address);
- if (bcmerror) {
- BRCMF_ERROR(("%s: window change failed\n", __func__));
- goto xfer_done;
- }
-
- /* Do the transfer(s) */
- while (size) {
- BRCMF_INFO(("%s: %s %d bytes at offset 0x%08x in window"
- " 0x%08x\n", __func__, (write ? "write" : "read"),
- dsize, sdaddr, (address & SBSDIO_SBWINDOW_MASK)));
- bcmerror =
- brcmf_sdcard_rwdata(bus->card, write, sdaddr, data, dsize);
- if (bcmerror) {
- BRCMF_ERROR(("%s: membytes transfer failed\n",
- __func__));
- break;
- }
-
- /* Adjust for next transfer (if any) */
- size -= dsize;
- if (size) {
- data += dsize;
- address += dsize;
- bcmerror = brcmf_sdbrcm_set_siaddr_window(bus, address);
- if (bcmerror) {
- BRCMF_ERROR(("%s: window change failed\n",
- __func__));
- break;
- }
- sdaddr = 0;
- dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
- }
- }
-
-xfer_done:
- /* Return the window to backplane enumeration space for core access */
- if (brcmf_sdbrcm_set_siaddr_window(bus,
- brcmf_sdcard_cur_sbwad(bus->card))) {
- BRCMF_ERROR(("%s: FAILED to set window back to 0x%x\n",
- __func__, brcmf_sdcard_cur_sbwad(bus->card)));
- }
-
- return bcmerror;
-}
-
-#ifdef BCMDBG
-static int brcmf_sdbrcm_readshared(struct brcmf_bus *bus, struct sdpcm_shared *sh)
-{
- u32 addr;
- int rv;
-
- /* Read last word in memory to determine address of
- sdpcm_shared structure */
- rv = brcmf_sdbrcm_membytes(bus, false, bus->ramsize - 4, (u8 *)&addr,
- 4);
- if (rv < 0)
- return rv;
-
- addr = le32_to_cpu(addr);
-
- BRCMF_INFO(("sdpcm_shared address 0x%08X\n", addr));
-
- /*
- * Check if addr is valid.
- * NVRAM length at the end of memory should have been overwritten.
- */
- if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
- BRCMF_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
- __func__, addr));
- return -EBADE;
- }
-
- /* Read rte_shared structure */
- rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *) sh,
- sizeof(struct sdpcm_shared));
- if (rv < 0)
- return rv;
-
- /* Endianness */
- sh->flags = le32_to_cpu(sh->flags);
- sh->trap_addr = le32_to_cpu(sh->trap_addr);
- sh->assert_exp_addr = le32_to_cpu(sh->assert_exp_addr);
- sh->assert_file_addr = le32_to_cpu(sh->assert_file_addr);
- sh->assert_line = le32_to_cpu(sh->assert_line);
- sh->console_addr = le32_to_cpu(sh->console_addr);
- sh->msgtrace_addr = le32_to_cpu(sh->msgtrace_addr);
-
- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
- BRCMF_ERROR(("%s: sdpcm_shared version %d in brcmf "
- "is different than sdpcm_shared version %d in dongle\n",
- __func__, SDPCM_SHARED_VERSION,
- sh->flags & SDPCM_SHARED_VERSION_MASK));
- return -EBADE;
- }
-
- return 0;
-}
-
-static int brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, u8 *data, uint size)
-{
- int bcmerror = 0;
- uint msize = 512;
- char *mbuffer = NULL;
- uint maxstrlen = 256;
- char *str = NULL;
- struct brcmf_trap tr;
- struct sdpcm_shared sdpcm_shared;
- struct brcmu_strbuf strbuf;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (data == NULL) {
- /*
- * Called after a rx ctrl timeout. "data" is NULL.
- * allocate memory to trace the trap or assert.
- */
- size = msize;
- mbuffer = data = kmalloc(msize, GFP_ATOMIC);
- if (mbuffer == NULL) {
- BRCMF_ERROR(("%s: kmalloc(%d) failed\n", __func__,
- msize));
- bcmerror = -ENOMEM;
- goto done;
- }
- }
-
- str = kmalloc(maxstrlen, GFP_ATOMIC);
- if (str == NULL) {
- BRCMF_ERROR(("%s: kmalloc(%d) failed\n", __func__, maxstrlen));
- bcmerror = -ENOMEM;
- goto done;
- }
-
- bcmerror = brcmf_sdbrcm_readshared(bus, &sdpcm_shared);
- if (bcmerror < 0)
- goto done;
-
- brcmu_binit(&strbuf, data, size);
-
- brcmu_bprintf(&strbuf,
- "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
- sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
-
- if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
- /* NOTE: Misspelled assert is intentional - DO NOT FIX.
- * (Avoids conflict with real asserts for programmatic
- * parsing of output.)
- */
- brcmu_bprintf(&strbuf, "Assrt not built in dongle\n");
- }
-
- if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) ==
- 0) {
- /* NOTE: Misspelled assert is intentional - DO NOT FIX.
- * (Avoids conflict with real asserts for programmatic
- * parsing of output.)
- */
- brcmu_bprintf(&strbuf, "No trap%s in dongle",
- (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
- ? "/assrt" : "");
- } else {
- if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
- /* Download assert */
- brcmu_bprintf(&strbuf, "Dongle assert");
- if (sdpcm_shared.assert_exp_addr != 0) {
- str[0] = '\0';
- bcmerror = brcmf_sdbrcm_membytes(bus, false,
- sdpcm_shared.assert_exp_addr,
- (u8 *) str, maxstrlen);
- if (bcmerror < 0)
- goto done;
-
- str[maxstrlen - 1] = '\0';
- brcmu_bprintf(&strbuf, " expr \"%s\"", str);
- }
-
- if (sdpcm_shared.assert_file_addr != 0) {
- str[0] = '\0';
- bcmerror = brcmf_sdbrcm_membytes(bus, false,
- sdpcm_shared.assert_file_addr,
- (u8 *) str, maxstrlen);
- if (bcmerror < 0)
- goto done;
-
- str[maxstrlen - 1] = '\0';
- brcmu_bprintf(&strbuf, " file \"%s\"", str);
- }
-
- brcmu_bprintf(&strbuf, " line %d ",
- sdpcm_shared.assert_line);
- }
-
- if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
- bcmerror = brcmf_sdbrcm_membytes(bus, false,
- sdpcm_shared.trap_addr, (u8 *)&tr,
- sizeof(struct brcmf_trap));
- if (bcmerror < 0)
- goto done;
-
- brcmu_bprintf(&strbuf,
- "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
- "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
- "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n",
- tr.type, tr.epc, tr.cpsr, tr.spsr, tr.r13,
- tr.r14, tr.pc, sdpcm_shared.trap_addr,
- tr.r0, tr.r1, tr.r2, tr.r3, tr.r4, tr.r5,
- tr.r6, tr.r7);
- }
- }
-
- if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP))
- BRCMF_ERROR(("%s: %s\n", __func__, strbuf.origbuf));
-
-#ifdef BCMDBG
- if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
- /* Mem dump to a file on device */
- brcmf_sdbrcm_mem_dump(bus);
- }
-#endif /* BCMDBG */
-
-done:
- kfree(mbuffer);
- kfree(str);
-
- return bcmerror;
-}
-
-static int brcmf_sdbrcm_mem_dump(struct brcmf_bus *bus)
-{
- int ret = 0;
- int size; /* Full mem size */
- int start = 0; /* Start address */
- int read_size = 0; /* Read size of each iteration */
- u8 *buf = NULL, *databuf = NULL;
-
- /* Get full mem size */
- size = bus->ramsize;
- buf = kmalloc(size, GFP_ATOMIC);
- if (!buf) {
- BRCMF_ERROR(("%s: Out of memory (%d bytes)\n", __func__, size));
- return -1;
- }
-
- /* Read mem content */
- printk(KERN_DEBUG "Dump dongle memory");
- databuf = buf;
- while (size) {
- read_size = min(MEMBLOCK, size);
- ret = brcmf_sdbrcm_membytes(bus, false, start, databuf,
- read_size);
- if (ret) {
- BRCMF_ERROR(("%s: Error membytes %d\n", __func__, ret));
- kfree(buf);
- return -1;
- }
- printk(".");
-
- /* Decrement size and increment start address */
- size -= read_size;
- start += read_size;
- databuf += read_size;
- }
- printk(KERN_DEBUG "Done\n");
-
- /* free buf before return !!! */
- if (brcmf_write_to_file(bus->drvr, buf, bus->ramsize)) {
- BRCMF_ERROR(("%s: Error writing to files\n", __func__));
- return -1;
- }
-
- /* buf free handled in brcmf_write_to_file, not here */
- return 0;
-}
-
-#define CONSOLE_LINE_MAX 192
-
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
-{
- struct brcmf_console *c = &bus->console;
- u8 line[CONSOLE_LINE_MAX], ch;
- u32 n, idx, addr;
- int rv;
-
- /* Don't do anything until FWREADY updates console address */
- if (bus->console_addr == 0)
- return 0;
-
- /* Read console log struct */
- addr = bus->console_addr + offsetof(struct rte_console, log);
- rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&c->log,
- sizeof(c->log));
- if (rv < 0)
- return rv;
-
- /* Allocate console buffer (one time only) */
- if (c->buf == NULL) {
- c->bufsize = le32_to_cpu(c->log.buf_size);
- c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
- if (c->buf == NULL)
- return -ENOMEM;
- }
-
- idx = le32_to_cpu(c->log.idx);
-
- /* Protect against corrupt value */
- if (idx > c->bufsize)
- return -EBADE;
-
- /* Skip reading the console buffer if the index pointer
- has not moved */
- if (idx == c->last)
- return 0;
-
- /* Read the console buffer */
- addr = le32_to_cpu(c->log.buf);
- rv = brcmf_sdbrcm_membytes(bus, false, addr, c->buf, c->bufsize);
- if (rv < 0)
- return rv;
-
- while (c->last != idx) {
- for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
- if (c->last == idx) {
- /* This would output a partial line.
- * Instead, back up
- * the buffer pointer and output this
- * line next time around.
- */
- if (c->last >= n)
- c->last -= n;
- else
- c->last = c->bufsize - n;
- goto break2;
- }
- ch = c->buf[c->last];
- c->last = (c->last + 1) % c->bufsize;
- if (ch == '\n')
- break;
- line[n] = ch;
- }
-
- if (n > 0) {
- if (line[n - 1] == '\r')
- n--;
- line[n] = 0;
- printk(KERN_DEBUG "CONSOLE: %s\n", line);
- }
- }
-break2:
-
- return 0;
-}
-#endif /* BCMDBG */
-
-int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
-{
- int bcmerror = 0;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Basic sanity checks */
- if (bus->drvr->up) {
- bcmerror = -EISCONN;
- goto err;
- }
- if (!len) {
- bcmerror = -EOVERFLOW;
- goto err;
- }
-
- /* Free the old ones and replace with passed variables */
- kfree(bus->vars);
-
- bus->vars = kmalloc(len, GFP_ATOMIC);
- bus->varsz = bus->vars ? len : 0;
- if (bus->vars == NULL) {
- bcmerror = -ENOMEM;
- goto err;
- }
-
- /* Copy the passed variables, which should include the
- terminating double-null */
- memcpy(bus->vars, arg, bus->varsz);
-err:
- return bcmerror;
-}
-
-static int
-brcmf_sdbrcm_doiovar(struct brcmf_bus *bus, const struct brcmu_iovar *vi, u32 actionid,
- const char *name, void *params, int plen, void *arg, int len,
- int val_size)
-{
- int bcmerror = 0;
- s32 int_val = 0;
- bool bool_val = 0;
-
- BRCMF_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p "
- "len %d val_size %d\n", __func__, actionid, name, params,
- plen, arg, len, val_size));
-
- bcmerror = brcmu_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid));
- if (bcmerror != 0)
- goto exit;
-
- if (plen >= (int)sizeof(int_val))
- memcpy(&int_val, params, sizeof(int_val));
-
- bool_val = (int_val != 0) ? true : false;
-
- /* Some ioctls use the bus */
- brcmf_sdbrcm_sdlock(bus);
-
- /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
- if (bus->drvr->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
- actionid == IOV_GVAL(IOV_DEVRESET))) {
- bcmerror = -EPERM;
- goto exit;
- }
-
- /* Handle sleep stuff before any clock mucking */
- if (vi->varid == IOV_SLEEP) {
- if (IOV_ISSET(actionid)) {
- bcmerror = brcmf_sdbrcm_bussleep(bus, bool_val);
- } else {
- int_val = (s32) bus->sleeping;
- memcpy(arg, &int_val, val_size);
- }
- goto exit;
- }
-
- /* Request clock to allow SDIO accesses */
- if (!bus->drvr->dongle_reset) {
- BUS_WAKE(bus);
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- }
-
- switch (actionid) {
- case IOV_GVAL(IOV_INTR):
- int_val = (s32) bus->intr;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_INTR):
- bus->intr = bool_val;
- bus->intdis = false;
- if (bus->drvr->up) {
- BRCMF_INTR(("%s: %s SDIO interrupts\n", __func__,
- bus->intr ? "enable" : "disable"));
- if (bus->intr) {
- brcmf_sdcard_intr_enable(bus->card);
- } else {
- brcmf_sdcard_intr_disable(bus->card);
- }
- }
- break;
-
- case IOV_GVAL(IOV_POLLRATE):
- int_val = (s32) bus->pollrate;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_POLLRATE):
- bus->pollrate = (uint) int_val;
- bus->poll = (bus->pollrate != 0);
- break;
-
- case IOV_GVAL(IOV_IDLETIME):
- int_val = bus->idletime;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_IDLETIME):
- if ((int_val < 0) && (int_val != BRCMF_IDLE_IMMEDIATE))
- bcmerror = -EINVAL;
- else
- bus->idletime = int_val;
- break;
-
- case IOV_GVAL(IOV_IDLECLOCK):
- int_val = (s32) bus->idleclock;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_IDLECLOCK):
- bus->idleclock = int_val;
- break;
-
- case IOV_GVAL(IOV_SD1IDLE):
- int_val = (s32) sd1idle;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_SD1IDLE):
- sd1idle = bool_val;
- break;
-
- case IOV_SVAL(IOV_MEMBYTES):
- case IOV_GVAL(IOV_MEMBYTES):
- {
- u32 address;
- uint size, dsize;
- u8 *data;
-
- bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
-
- address = (u32) int_val;
- memcpy(&int_val, (char *)params + sizeof(int_val),
- sizeof(int_val));
- size = (uint) int_val;
-
- /* Do some validation */
- dsize = set ? plen - (2 * sizeof(int)) : len;
- if (dsize < size) {
- BRCMF_ERROR(("%s: error on %s membytes, addr "
- "0x%08x size %d dsize %d\n",
- __func__, (set ? "set" : "get"),
- address, size, dsize));
- bcmerror = -EINVAL;
- break;
- }
-
- BRCMF_INFO(("%s: Request to %s %d bytes at address "
- "0x%08x\n", __func__,
- (set ? "write" : "read"), size, address));
-
- /* If we know about SOCRAM, check for a fit */
- if ((bus->orig_ramsize) &&
- ((address > bus->orig_ramsize)
- || (address + size > bus->orig_ramsize))) {
- BRCMF_ERROR(("%s: ramsize 0x%08x doesn't have"
- " %d bytes at 0x%08x\n", __func__,
- bus->orig_ramsize, size, address));
- bcmerror = -EINVAL;
- break;
- }
-
- /* Generate the actual data pointer */
- data =
- set ? (u8 *) params +
- 2 * sizeof(int) : (u8 *) arg;
-
- /* Call to do the transfer */
- bcmerror = brcmf_sdbrcm_membytes(bus, set, address,
- data, size);
-
- break;
- }
-
- case IOV_GVAL(IOV_MEMSIZE):
- int_val = (s32) bus->ramsize;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_GVAL(IOV_SDIOD_DRIVE):
- int_val = (s32) brcmf_sdiod_drive_strength;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_SDIOD_DRIVE):
- brcmf_sdiod_drive_strength = int_val;
- brcmf_sdbrcm_sdiod_drive_strength_init(bus,
- brcmf_sdiod_drive_strength);
- break;
-
- case IOV_SVAL(IOV_DOWNLOAD):
- bcmerror = brcmf_sdbrcm_download_state(bus, bool_val);
- break;
-
- case IOV_SVAL(IOV_VARS):
- bcmerror = brcmf_sdbrcm_downloadvars(bus, arg, len);
- break;
-
- case IOV_GVAL(IOV_READAHEAD):
- int_val = (s32) brcmf_readahead;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_READAHEAD):
- if (bool_val && !brcmf_readahead)
- bus->nextlen = 0;
- brcmf_readahead = bool_val;
- break;
-
- case IOV_GVAL(IOV_SDRXCHAIN):
- int_val = (s32) bus->use_rxchain;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_SDRXCHAIN):
- if (bool_val && !bus->sd_rxchain)
- bcmerror = -ENOTSUPP;
- else
- bus->use_rxchain = bool_val;
- break;
- case IOV_GVAL(IOV_ALIGNCTL):
- int_val = (s32) brcmf_alignctl;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_ALIGNCTL):
- brcmf_alignctl = bool_val;
- break;
-
- case IOV_GVAL(IOV_SDALIGN):
- int_val = BRCMF_SDALIGN;
- memcpy(arg, &int_val, val_size);
- break;
-
-#ifdef BCMDBG
- case IOV_GVAL(IOV_VARS):
- if (bus->varsz < (uint) len)
- memcpy(arg, bus->vars, bus->varsz);
- else
- bcmerror = -EOVERFLOW;
- break;
-#endif /* BCMDBG */
-
-#ifdef BCMDBG
- case IOV_GVAL(IOV_DCONSOLE_POLL):
- int_val = (s32) brcmf_console_ms;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_DCONSOLE_POLL):
- brcmf_console_ms = (uint) int_val;
- break;
-
- case IOV_SVAL(IOV_CONS):
- if (len > 0)
- bcmerror = brcmf_sdbrcm_bus_console_in(bus->drvr,
- arg, len - 1);
- break;
-
- case IOV_GVAL(IOV_SDREG):
- {
- struct brcmf_sdreg *sd_ptr;
- u32 addr, size;
-
- sd_ptr = (struct brcmf_sdreg *) params;
-
- addr = bus->ci->buscorebase + sd_ptr->offset;
- size = sd_ptr->func;
- int_val = (s32) brcmf_sdcard_reg_read(bus->card, addr,
- size);
- if (brcmf_sdcard_regfail(bus->card))
- bcmerror = -EIO;
- memcpy(arg, &int_val, sizeof(s32));
- break;
- }
-
- case IOV_SVAL(IOV_SDREG):
- {
- struct brcmf_sdreg *sd_ptr;
- u32 addr, size;
-
- sd_ptr = (struct brcmf_sdreg *) params;
-
- addr = bus->ci->buscorebase + sd_ptr->offset;
- size = sd_ptr->func;
- brcmf_sdcard_reg_write(bus->card, addr, size,
- sd_ptr->value);
- if (brcmf_sdcard_regfail(bus->card))
- bcmerror = -EIO;
- break;
- }
-
- /* Same as above, but offset is not backplane
- (not SDIO core) */
- case IOV_GVAL(IOV_SBREG):
- {
- struct brcmf_sdreg sdreg;
- u32 addr, size;
-
- memcpy(&sdreg, params, sizeof(sdreg));
-
- addr = SI_ENUM_BASE + sdreg.offset;
- size = sdreg.func;
- int_val = (s32) brcmf_sdcard_reg_read(bus->card, addr,
- size);
- if (brcmf_sdcard_regfail(bus->card))
- bcmerror = -EIO;
- memcpy(arg, &int_val, sizeof(s32));
- break;
- }
-
- case IOV_SVAL(IOV_SBREG):
- {
- struct brcmf_sdreg sdreg;
- u32 addr, size;
-
- memcpy(&sdreg, params, sizeof(sdreg));
-
- addr = SI_ENUM_BASE + sdreg.offset;
- size = sdreg.func;
- brcmf_sdcard_reg_write(bus->card, addr, size,
- sdreg.value);
- if (brcmf_sdcard_regfail(bus->card))
- bcmerror = -EIO;
- break;
- }
-
- case IOV_GVAL(IOV_SDCIS):
- {
- *(char *)arg = 0;
-
- strcat(arg, "\nFunc 0\n");
- brcmf_sdcard_cis_read(bus->card, 0x10,
- (u8 *) arg + strlen(arg),
- SBSDIO_CIS_SIZE_LIMIT);
- strcat(arg, "\nFunc 1\n");
- brcmf_sdcard_cis_read(bus->card, 0x11,
- (u8 *) arg + strlen(arg),
- SBSDIO_CIS_SIZE_LIMIT);
- strcat(arg, "\nFunc 2\n");
- brcmf_sdcard_cis_read(bus->card, 0x12,
- (u8 *) arg + strlen(arg),
- SBSDIO_CIS_SIZE_LIMIT);
- break;
- }
-
- case IOV_GVAL(IOV_FORCEEVEN):
- int_val = (s32) forcealign;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_FORCEEVEN):
- forcealign = bool_val;
- break;
-
- case IOV_GVAL(IOV_TXBOUND):
- int_val = (s32) brcmf_txbound;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_TXBOUND):
- brcmf_txbound = (uint) int_val;
- break;
-
- case IOV_GVAL(IOV_RXBOUND):
- int_val = (s32) brcmf_rxbound;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_RXBOUND):
- brcmf_rxbound = (uint) int_val;
- break;
-
- case IOV_GVAL(IOV_TXMINMAX):
- int_val = (s32) brcmf_txminmax;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_TXMINMAX):
- brcmf_txminmax = (uint) int_val;
- break;
-#endif /* BCMDBG */
-
-#ifdef SDTEST
- case IOV_GVAL(IOV_EXTLOOP):
- int_val = (s32) bus->ext_loop;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_EXTLOOP):
- bus->ext_loop = bool_val;
- break;
-
- case IOV_GVAL(IOV_PKTGEN):
- bcmerror = brcmf_sdbrcm_pktgen_get(bus, arg);
- break;
-
- case IOV_SVAL(IOV_PKTGEN):
- bcmerror = brcmf_sdbrcm_pktgen_set(bus, arg);
- break;
-#endif /* SDTEST */
-
- case IOV_SVAL(IOV_DEVRESET):
- BRCMF_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d "
- "busstate=%d\n",
- __func__, bool_val, bus->drvr->dongle_reset,
- bus->drvr->busstate));
-
- brcmf_bus_devreset(bus->drvr, (u8) bool_val);
-
- break;
-
- case IOV_GVAL(IOV_DEVRESET):
- BRCMF_TRACE(("%s: Called get IOV_DEVRESET\n", __func__));
-
- /* Get its status */
- int_val = (bool) bus->drvr->dongle_reset;
- memcpy(arg, &int_val, val_size);
-
- break;
-
- case IOV_GVAL(IOV_WDTICK):
- int_val = (s32) brcmf_watchdog_ms;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_WDTICK):
- if (!bus->drvr->up) {
- bcmerror = -ENOLINK;
- break;
- }
- brcmf_sdbrcm_wd_timer(bus, (uint) int_val);
- break;
-
- default:
- bcmerror = -ENOTSUPP;
- break;
- }
-
-exit:
- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
- bus->activity = false;
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
- }
-
- brcmf_sdbrcm_sdunlock(bus);
-
- if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == false)
- brcmf_c_preinit_ioctls(bus->drvr);
-
- return bcmerror;
-}
-
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
-{
- int bcmerror = 0;
- u32 varsize;
- u32 varaddr;
- u8 *vbuffer;
- u32 varsizew;
-#ifdef BCMDBG
- char *nvram_ularray;
-#endif /* BCMDBG */
-
- /* Even if there are no vars are to be written, we still
- need to set the ramsize. */
- varsize = bus->varsz ? roundup(bus->varsz, 4) : 0;
- varaddr = (bus->ramsize - 4) - varsize;
-
- if (bus->vars) {
- vbuffer = kzalloc(varsize, GFP_ATOMIC);
- if (!vbuffer)
- return -ENOMEM;
-
- memcpy(vbuffer, bus->vars, bus->varsz);
-
- /* Write the vars list */
- bcmerror =
- brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
-#ifdef BCMDBG
- /* Verify NVRAM bytes */
- BRCMF_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
- nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
- if (!nvram_ularray)
- return -ENOMEM;
-
- /* Upload image to verify downloaded contents. */
- memset(nvram_ularray, 0xaa, varsize);
-
- /* Read the vars list to temp buffer for comparison */
- bcmerror =
- brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray,
- varsize);
- if (bcmerror) {
- BRCMF_ERROR(("%s: error %d on reading %d nvram bytes"
- " at 0x%08x\n", __func__, bcmerror,
- varsize, varaddr));
- }
- /* Compare the org NVRAM with the one read from RAM */
- if (memcmp(vbuffer, nvram_ularray, varsize)) {
- BRCMF_ERROR(("%s: Downloaded NVRAM image is "
- "corrupted.\n", __func__));
- } else
- BRCMF_ERROR(("%s: Download/Upload/Compare of"
- " NVRAM ok.\n", __func__));
-
- kfree(nvram_ularray);
-#endif /* BCMDBG */
-
- kfree(vbuffer);
- }
-
- /* adjust to the user specified RAM */
- BRCMF_INFO(("Physical memory size: %d, usable memory size: %d\n",
- bus->orig_ramsize, bus->ramsize));
- BRCMF_INFO(("Vars are at %d, orig varsize is %d\n", varaddr, varsize));
- varsize = ((bus->orig_ramsize - 4) - varaddr);
-
- /*
- * Determine the length token:
- * Varsize, converted to words, in lower 16-bits, checksum
- * in upper 16-bits.
- */
- if (bcmerror) {
- varsizew = 0;
- } else {
- varsizew = varsize / 4;
- varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
- varsizew = cpu_to_le32(varsizew);
- }
-
- BRCMF_INFO(("New varsize is %d, length token=0x%08x\n", varsize,
- varsizew));
-
- /* Write the length token to the last word */
- bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->orig_ramsize - 4),
- (u8 *)&varsizew, 4);
-
- return bcmerror;
-}
-
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
-{
- uint retries;
- u32 regdata;
- int bcmerror = 0;
-
- /* To enter download state, disable ARM and reset SOCRAM.
- * To exit download state, simply reset ARM (default is RAM boot).
- */
- if (enter) {
- bus->alp_only = true;
-
- brcmf_sdbrcm_chip_disablecore(bus->card, bus->ci->armcorebase);
-
- brcmf_sdbrcm_chip_resetcore(bus->card, bus->ci->ramcorebase);
-
- /* Clear the top bit of memory */
- if (bus->ramsize) {
- u32 zeros = 0;
- brcmf_sdbrcm_membytes(bus, true, bus->ramsize - 4,
- (u8 *)&zeros, 4);
- }
- } else {
- regdata = brcmf_sdcard_reg_read(bus->card,
- CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
- regdata &= (SBTML_RESET | SBTML_REJ_MASK |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
- BRCMF_ERROR(("%s: SOCRAM core is down after reset?\n",
- __func__));
- bcmerror = -EBADE;
- goto fail;
- }
-
- bcmerror = brcmf_sdbrcm_write_vars(bus);
- if (bcmerror) {
- BRCMF_ERROR(("%s: no vars written to RAM\n", __func__));
- bcmerror = 0;
- }
-
- w_sdreg32(bus, 0xFFFFFFFF,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
-
- brcmf_sdbrcm_chip_resetcore(bus->card, bus->ci->armcorebase);
-
- /* Allow HT Clock now that the ARM is running. */
- bus->alp_only = false;
-
- bus->drvr->busstate = BRCMF_BUS_LOAD;
- }
-fail:
- return bcmerror;
-}
-
-int
-brcmf_sdbrcm_bus_iovar_op(struct brcmf_pub *drvr, const char *name,
- void *params, int plen, void *arg, int len, bool set)
-{
- struct brcmf_bus *bus = drvr->bus;
- const struct brcmu_iovar *vi = NULL;
- int bcmerror = 0;
- int val_size;
- u32 actionid;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (name == NULL || len <= 0)
- return -EINVAL;
-
- /* Set does not take qualifiers */
- if (set && (params || plen))
- return -EINVAL;
-
- /* Get must have return space;*/
- if (!set && !(arg && len))
- return -EINVAL;
-
- /* Look up var locally; if not found pass to host driver */
- vi = brcmu_iovar_lookup(brcmf_sdio_iovars, name);
- if (vi == NULL) {
- brcmf_sdbrcm_sdlock(bus);
-
- BUS_WAKE(bus);
-
- /* Turn on clock in case SD command needs backplane */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- bcmerror = brcmf_sdcard_iovar_op(bus->card, name, params, plen,
- arg, len, set);
-
- /* Similar check for blocksize change */
- if (set && strcmp(name, "sd_blocksize") == 0) {
- s32 fnum = 2;
- if (brcmf_sdcard_iovar_op
- (bus->card, "sd_blocksize", &fnum, sizeof(s32),
- &bus->blocksize, sizeof(s32),
- false) != 0) {
- bus->blocksize = 0;
- BRCMF_ERROR(("%s: fail on %s get\n", __func__,
- "sd_blocksize"));
- } else {
- BRCMF_INFO(("%s: noted sd_blocksize update,"
- " value now %d\n", __func__,
- bus->blocksize));
- }
- }
- bus->roundup = min(max_roundup, bus->blocksize);
-
- if (bus->idletime == BRCMF_IDLE_IMMEDIATE &&
- !bus->dpc_sched) {
- bus->activity = false;
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
- }
-
- brcmf_sdbrcm_sdunlock(bus);
- goto exit;
- }
-
- BRCMF_CTL(("%s: %s %s, len %d plen %d\n", __func__,
- name, (set ? "set" : "get"), len, plen));
-
- /* set up 'params' pointer in case this is a set command so that
- * the convenience int and bool code can be common to set and get
- */
- if (params == NULL) {
- params = arg;
- plen = len;
- }
-
- if (vi->type == IOVT_VOID)
- val_size = 0;
- else if (vi->type == IOVT_BUFFER)
- val_size = len;
- else
- /* all other types are integer sized */
- val_size = sizeof(int);
-
- actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
- bcmerror = brcmf_sdbrcm_doiovar(bus, vi, actionid, name, params, plen,
- arg, len, val_size);
-
-exit:
- return bcmerror;
-}
-
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus, bool enforce_mutex)
-{
- u32 local_hostintmask;
- u8 saveclk;
- uint retries;
- int err;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (enforce_mutex)
- brcmf_sdbrcm_sdlock(bus);
-
- BUS_WAKE(bus);
-
- /* Enable clock for device interrupts */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- if (bus->watchdog_tsk) {
- send_sig(SIGTERM, bus->watchdog_tsk, 1);
- kthread_stop(bus->watchdog_tsk);
- bus->watchdog_tsk = NULL;
- }
-
- if (bus->dpc_tsk) {
- send_sig(SIGTERM, bus->dpc_tsk, 1);
- kthread_stop(bus->dpc_tsk);
- bus->dpc_tsk = NULL;
- } else
- tasklet_kill(&bus->tasklet);
-
- /* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
-
- /* Change our idea of bus state */
- bus->drvr->busstate = BRCMF_BUS_DOWN;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err) {
- BRCMF_ERROR(("%s: Failed to force clock for F2: err %d\n",
- __func__, err));
- }
-
- /* Turn off the bus (F2), free any pending packets */
- BRCMF_INTR(("%s: disable SDIO interrupts\n", __func__));
- brcmf_sdcard_intr_disable(bus->card);
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
-
- /* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- /* Clear the data packet queues */
- brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
-
- /* Clear any held glomming stuff */
- if (bus->glomd)
- brcmu_pkt_buf_free_skb(bus->glomd);
-
- if (bus->glom)
- brcmu_pkt_buf_free_skb(bus->glom);
-
- bus->glom = bus->glomd = NULL;
-
- /* Clear rx control and wake any waiters */
- bus->rxlen = 0;
- brcmf_os_ioctl_resp_wake(bus->drvr);
-
- /* Reset some F2 state stuff */
- bus->rxskip = false;
- bus->tx_seq = bus->rx_seq = 0;
-
- if (enforce_mutex)
- brcmf_sdbrcm_sdunlock(bus);
-}
-
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr, bool enforce_mutex)
-{
- struct brcmf_bus *bus = drvr->bus;
- struct brcmf_timeout tmo;
- uint retries = 0;
- u8 ready, enable;
- int err, ret = 0;
- u8 saveclk;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* try to download image and nvram to the dongle */
- if (drvr->busstate == BRCMF_BUS_DOWN) {
- if (!(brcmf_sdbrcm_download_firmware(bus, bus->card)))
- return -1;
- }
-
- if (!bus->drvr)
- return 0;
-
- /* Start the watchdog timer */
- bus->drvr->tickcnt = 0;
- brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
-
- if (enforce_mutex)
- brcmf_sdbrcm_sdlock(bus);
-
- /* Make sure backplane clock is on, needed to generate F2 interrupt */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- if (bus->clkstate != CLK_AVAIL)
- goto exit;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk =
- brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err) {
- BRCMF_ERROR(("%s: Failed to force clock for F2: err %d\n",
- __func__, err));
- goto exit;
- }
-
- /* Enable function 2 (frame transfers) */
- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata), &retries);
- enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
-
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx, enable,
- NULL);
-
- /* Give the dongle some time to do its thing and set IOR2 */
- brcmf_timeout_start(&tmo, BRCMF_WAIT_F2RDY * 1000);
-
- ready = 0;
- while (ready != enable && !brcmf_timeout_expired(&tmo))
- ready = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_0,
- SDIO_CCCR_IORx, NULL);
-
- BRCMF_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
- __func__, enable, ready, tmo.elapsed));
-
- /* If F2 successfully enabled, set core and enable interrupts */
- if (ready == enable) {
- /* Set up the interrupt mask and enable interrupts */
- bus->hostintmask = HOSTINTMASK;
- w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask), &retries);
-
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_WATERMARK,
- (u8) watermark, &err);
-
- /* Set bus state according to enable result */
- drvr->busstate = BRCMF_BUS_DATA;
-
- bus->intdis = false;
- if (bus->intr) {
- BRCMF_INTR(("%s: enable SDIO device interrupts\n",
- __func__));
- brcmf_sdcard_intr_enable(bus->card);
- } else {
- BRCMF_INTR(("%s: disable SDIO interrupts\n", __func__));
- brcmf_sdcard_intr_disable(bus->card);
- }
-
- }
-
- else {
- /* Disable F2 again */
- enable = SDIO_FUNC_ENABLE_1;
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- enable, NULL);
- }
-
- /* Restore previous clock setting */
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
-
-#if defined(OOB_INTR_ONLY)
- /* Host registration for OOB interrupt */
- if (brcmf_sdio_register_oob_intr(bus->dhd)) {
- brcmf_sdbrcm_wd_timer(bus, 0);
- BRCMF_ERROR(("%s Host failed to resgister for OOB\n",
- __func__));
- ret = -ENODEV;
- goto exit;
- }
-
- /* Enable oob at firmware */
- brcmf_sdbrcm_enable_oob_intr(bus, true);
-#endif /* defined(OOB_INTR_ONLY) */
-
- /* If we didn't come up, turn off backplane clock */
- if (drvr->busstate != BRCMF_BUS_DATA)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
-
-exit:
- if (enforce_mutex)
- brcmf_sdbrcm_sdunlock(bus);
-
- return ret;
-}
-
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
-{
- struct brcmf_sdio_card *card = bus->card;
- uint retries = 0;
- u16 lastrbc;
- u8 hi, lo;
- int err;
-
- BRCMF_ERROR(("%s: %sterminate frame%s\n", __func__,
- (abort ? "abort command, " : ""),
- (rtx ? ", send NAK" : "")));
-
- if (abort)
- brcmf_sdcard_abort(card, SDIO_FUNC_2);
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
- bus->f1regdata++;
-
- /* Wait until the packet has been flushed (device/FIFO stable) */
- for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_RFRAMEBCHI, NULL);
- lo = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_RFRAMEBCLO, NULL);
- bus->f1regdata += 2;
-
- if ((hi == 0) && (lo == 0))
- break;
-
- if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
- BRCMF_ERROR(("%s: count growing: last 0x%04x now "
- "0x%04x\n",
- __func__, lastrbc, ((hi << 8) + lo)));
- }
- lastrbc = (hi << 8) + lo;
- }
-
- if (!retries) {
- BRCMF_ERROR(("%s: count never zeroed: last 0x%04x\n",
- __func__, lastrbc));
- } else {
- BRCMF_INFO(("%s: flush took %d iterations\n", __func__,
- (0xffff - retries)));
- }
-
- if (rtx) {
- bus->rxrtx++;
- w_sdreg32(bus, SMB_NAK,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
-
- bus->f1regdata++;
- if (retries <= retry_limit)
- bus->rxskip = true;
- }
-
- /* Clear partial in any case */
- bus->nextlen = 0;
-
- /* If we can't reach the device, signal failure */
- if (err || brcmf_sdcard_regfail(card))
- bus->drvr->busstate = BRCMF_BUS_DOWN;
-}
-
-static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
-{
- struct brcmf_sdio_card *card = bus->card;
- uint rdlen, pad;
-
- int sdret;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Control data already received in aligned rxctl */
- if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
- goto gotpkt;
-
- /* Set rxctl for frame (w/optional alignment) */
- bus->rxctl = bus->rxbuf;
- if (brcmf_alignctl) {
- bus->rxctl += firstread;
- pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN);
- if (pad)
- bus->rxctl += (BRCMF_SDALIGN - pad);
- bus->rxctl -= firstread;
- }
-
- /* Copy the already-read portion over */
- memcpy(bus->rxctl, hdr, firstread);
- if (len <= firstread)
- goto gotpkt;
-
- /* Copy the full data pkt in gSPI case and process ioctl. */
- if (bus->bus == SPI_BUS) {
- memcpy(bus->rxctl, hdr, len);
- goto gotpkt;
- }
-
- /* Raise rdlen to next SDIO block to avoid tail command */
- rdlen = len - firstread;
- if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
- pad = bus->blocksize - (rdlen % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((len + pad) < bus->drvr->maxctl))
- rdlen += pad;
- } else if (rdlen % BRCMF_SDALIGN) {
- rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
- }
-
- /* Satisfy length-alignment requirements */
- if (forcealign && (rdlen & (ALIGNMENT - 1)))
- rdlen = roundup(rdlen, ALIGNMENT);
-
- /* Drop if the read is too big or it exceeds our maximum */
- if ((rdlen + firstread) > bus->drvr->maxctl) {
- BRCMF_ERROR(("%s: %d-byte control read exceeds %d-byte"
- " buffer\n", __func__, rdlen, bus->drvr->maxctl));
- bus->drvr->rx_errors++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- goto done;
- }
-
- if ((len - doff) > bus->drvr->maxctl) {
- BRCMF_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds "
- "%d-byte limit\n",
- __func__, len, (len - doff), bus->drvr->maxctl));
- bus->drvr->rx_errors++;
- bus->rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- goto done;
- }
-
- /* Read remainder of frame body into the rxctl buffer */
- sdret = brcmf_sdcard_recv_buf(card, brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2,
- F2SYNC, (bus->rxctl + firstread), rdlen,
- NULL, NULL, NULL);
- bus->f2rxdata++;
-
- /* Control frame failures need retransmission */
- if (sdret < 0) {
- BRCMF_ERROR(("%s: read %d control bytes failed: %d\n",
- __func__, rdlen, sdret));
- bus->rxc_errors++;
- brcmf_sdbrcm_rxfail(bus, true, true);
- goto done;
- }
-
-gotpkt:
-
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
- printk(KERN_DEBUG "RxCtrl:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bus->rxctl, len);
- }
-#endif
-
- /* Point to valid data and indicate its length */
- bus->rxctl += doff;
- bus->rxlen = len - doff;
-
-done:
- /* Awake any waiters */
- brcmf_os_ioctl_resp_wake(bus->drvr);
-}
-
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
-{
- u16 dlen, totlen;
- u8 *dptr, num = 0;
-
- u16 sublen, check;
- struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
-
- int errcode;
- u8 chan, seq, doff, sfdoff;
- u8 txmax;
-
- int ifidx = 0;
- bool usechain = bus->use_rxchain;
-
- /* If packets, issue read(s) and send up packet chain */
- /* Return sequence numbers consumed? */
-
- BRCMF_TRACE(("brcmf_sdbrcm_rxglom: start: glomd %p glom %p\n",
- bus->glomd, bus->glom));
-
- /* If there's a descriptor, generate the packet chain */
- if (bus->glomd) {
- pfirst = plast = pnext = NULL;
- dlen = (u16) (bus->glomd->len);
- dptr = bus->glomd->data;
- if (!dlen || (dlen & 1)) {
- BRCMF_ERROR(("%s: bad glomd len(%d),"
- " ignore descriptor\n",
- __func__, dlen));
- dlen = 0;
- }
-
- for (totlen = num = 0; dlen; num++) {
- /* Get (and move past) next length */
- sublen = get_unaligned_le16(dptr);
- dlen -= sizeof(u16);
- dptr += sizeof(u16);
- if ((sublen < SDPCM_HDRLEN) ||
- ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
- BRCMF_ERROR(("%s: descriptor len %d bad: %d\n",
- __func__, num, sublen));
- pnext = NULL;
- break;
- }
- if (sublen % BRCMF_SDALIGN) {
- BRCMF_ERROR(("%s: sublen %d not multiple of"
- " %d\n", __func__, sublen,
- BRCMF_SDALIGN));
- usechain = false;
- }
- totlen += sublen;
-
- /* For last frame, adjust read len so total
- is a block multiple */
- if (!dlen) {
- sublen +=
- (roundup(totlen, bus->blocksize) - totlen);
- totlen = roundup(totlen, bus->blocksize);
- }
-
- /* Allocate/chain packet for next subframe */
- pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
- if (pnext == NULL) {
- BRCMF_ERROR(("%s: bcm_pkt_buf_get_skb failed, "
- "num %d len %d\n", __func__,
- num, sublen));
- break;
- }
- if (!pfirst) {
- pfirst = plast = pnext;
- } else {
- plast->next = pnext;
- plast = pnext;
- }
-
- /* Adhere to start alignment requirements */
- PKTALIGN(pnext, sublen, BRCMF_SDALIGN);
- }
-
- /* If all allocations succeeded, save packet chain
- in bus structure */
- if (pnext) {
- BRCMF_GLOM(("%s: allocated %d-byte packet chain for %d "
- "subframes\n", __func__, totlen, num));
- if (BRCMF_GLOM_ON() && bus->nextlen) {
- if (totlen != bus->nextlen) {
- BRCMF_GLOM(("%s: glomdesc mismatch: "
- "nextlen %d glomdesc %d "
- "rxseq %d\n", __func__,
- bus->nextlen,
- totlen, rxseq));
- }
- }
- bus->glom = pfirst;
- pfirst = pnext = NULL;
- } else {
- if (pfirst)
- brcmu_pkt_buf_free_skb(pfirst);
- bus->glom = NULL;
- num = 0;
- }
-
- /* Done with descriptor packet */
- brcmu_pkt_buf_free_skb(bus->glomd);
- bus->glomd = NULL;
- bus->nextlen = 0;
- }
-
- /* Ok -- either we just generated a packet chain,
- or had one from before */
- if (bus->glom) {
- if (BRCMF_GLOM_ON()) {
- BRCMF_GLOM(("%s: try superframe read, packet chain:\n",
- __func__));
- for (pnext = bus->glom; pnext; pnext = pnext->next) {
- BRCMF_GLOM((" %p: %p len 0x%04x (%d)\n",
- pnext, (u8 *) (pnext->data),
- pnext->len, pnext->len));
- }
- }
-
- pfirst = bus->glom;
- dlen = (u16) brcmu_pkttotlen(pfirst);
-
- /* Do an SDIO read for the superframe. Configurable iovar to
- * read directly into the chained packet, or allocate a large
- * packet and and copy into the chain.
- */
- if (usechain) {
- errcode = brcmf_sdcard_recv_buf(bus->card,
- brcmf_sdcard_cur_sbwad(bus->card),
- SDIO_FUNC_2,
- F2SYNC, (u8 *) pfirst->data, dlen,
- pfirst, NULL, NULL);
- } else if (bus->dataptr) {
- errcode = brcmf_sdcard_recv_buf(bus->card,
- brcmf_sdcard_cur_sbwad(bus->card),
- SDIO_FUNC_2,
- F2SYNC, bus->dataptr, dlen,
- NULL, NULL, NULL);
- sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
- bus->dataptr);
- if (sublen != dlen) {
- BRCMF_ERROR(("%s: FAILED TO COPY, dlen %d "
- "sublen %d\n",
- __func__, dlen, sublen));
- errcode = -1;
- }
- pnext = NULL;
- } else {
- BRCMF_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, "
- "FORCE FAILURE\n", dlen));
- errcode = -1;
- }
- bus->f2rxdata++;
-
- /* On failure, kill the superframe, allow a couple retries */
- if (errcode < 0) {
- BRCMF_ERROR(("%s: glom read of %d bytes failed: %d\n",
- __func__, dlen, errcode));
- bus->drvr->rx_errors++;
-
- if (bus->glomerr++ < 3) {
- brcmf_sdbrcm_rxfail(bus, true, true);
- } else {
- bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
- bus->rxglomfail++;
- bus->glom = NULL;
- }
- return 0;
- }
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- printk(KERN_DEBUG "SUPERFRAME:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- pfirst->data, min_t(int, pfirst->len, 48));
- }
-#endif
-
- /* Validate the superframe header */
- dptr = (u8 *) (pfirst->data);
- sublen = get_unaligned_le16(dptr);
- check = get_unaligned_le16(dptr + sizeof(u16));
-
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
- bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
- if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- BRCMF_INFO(("%s: nextlen too large (%d) seq %d\n",
- __func__, bus->nextlen, seq));
- bus->nextlen = 0;
- }
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-
- errcode = 0;
- if ((u16)~(sublen ^ check)) {
- BRCMF_ERROR(("%s (superframe): HW hdr error: len/check "
- "0x%04x/0x%04x\n", __func__, sublen,
- check));
- errcode = -1;
- } else if (roundup(sublen, bus->blocksize) != dlen) {
- BRCMF_ERROR(("%s (superframe): len 0x%04x, rounded "
- "0x%04x, expect 0x%04x\n",
- __func__, sublen,
- roundup(sublen, bus->blocksize), dlen));
- errcode = -1;
- } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
- SDPCM_GLOM_CHANNEL) {
- BRCMF_ERROR(("%s (superframe): bad channel %d\n",
- __func__,
- SDPCM_PACKET_CHANNEL(&dptr
- [SDPCM_FRAMETAG_LEN])));
- errcode = -1;
- } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
- BRCMF_ERROR(("%s (superframe): got 2nd descriptor?\n",
- __func__));
- errcode = -1;
- } else if ((doff < SDPCM_HDRLEN) ||
- (doff > (pfirst->len - SDPCM_HDRLEN))) {
- BRCMF_ERROR(("%s (superframe): Bad data offset %d: "
- "HW %d pkt %d min %d\n",
- __func__, doff, sublen,
- pfirst->len, SDPCM_HDRLEN));
- errcode = -1;
- }
-
- /* Check sequence number of superframe SW header */
- if (rxseq != seq) {
- BRCMF_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
- __func__, seq, rxseq));
- bus->rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- BRCMF_ERROR(("%s: unlikely tx max %d with tx_seq %d\n",
- __func__, txmax, bus->tx_seq));
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
-
- /* Remove superframe header, remember offset */
- skb_pull(pfirst, doff);
- sfdoff = doff;
-
- /* Validate all the subframe headers */
- for (num = 0, pnext = pfirst; pnext && !errcode;
- num++, pnext = pnext->next) {
- dptr = (u8 *) (pnext->data);
- dlen = (u16) (pnext->len);
- sublen = get_unaligned_le16(dptr);
- check = get_unaligned_le16(dptr + sizeof(u16));
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- printk(KERN_DEBUG "subframe:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- dptr, 32);
- }
-#endif
-
- if ((u16)~(sublen ^ check)) {
- BRCMF_ERROR(("%s (subframe %d): HW hdr error: "
- "len/check 0x%04x/0x%04x\n",
- __func__, num, sublen, check));
- errcode = -1;
- } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
- BRCMF_ERROR(("%s (subframe %d): length mismatch"
- ": len 0x%04x, expect 0x%04x\n",
- __func__, num, sublen, dlen));
- errcode = -1;
- } else if ((chan != SDPCM_DATA_CHANNEL) &&
- (chan != SDPCM_EVENT_CHANNEL)) {
- BRCMF_ERROR(("%s (subframe %d): bad channel"
- " %d\n", __func__, num, chan));
- errcode = -1;
- } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
- BRCMF_ERROR(("%s (subframe %d): Bad data offset"
- " %d: HW %d min %d\n",
- __func__, num, doff, sublen,
- SDPCM_HDRLEN));
- errcode = -1;
- }
- }
-
- if (errcode) {
- /* Terminate frame on error, request
- a couple retries */
- if (bus->glomerr++ < 3) {
- /* Restore superframe header space */
- skb_push(pfirst, sfdoff);
- brcmf_sdbrcm_rxfail(bus, true, true);
- } else {
- bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
- bus->rxglomfail++;
- bus->glom = NULL;
- }
- bus->nextlen = 0;
- return 0;
- }
-
- /* Basic SD framing looks ok - process each packet (header) */
- save_pfirst = pfirst;
- bus->glom = NULL;
- plast = NULL;
-
- for (num = 0; pfirst; rxseq++, pfirst = pnext) {
- pnext = pfirst->next;
- pfirst->next = NULL;
-
- dptr = (u8 *) (pfirst->data);
- sublen = get_unaligned_le16(dptr);
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-
- BRCMF_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d "
- "chan %d seq %d\n",
- __func__, num, pfirst, pfirst->data,
- pfirst->len, sublen, chan, seq));
-
- /* precondition: chan == SDPCM_DATA_CHANNEL ||
- chan == SDPCM_EVENT_CHANNEL */
-
- if (rxseq != seq) {
- BRCMF_GLOM(("%s: rx_seq %d, expected %d\n",
- __func__, seq, rxseq));
- bus->rx_badseq++;
- rxseq = seq;
- }
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- printk(KERN_DEBUG "Rx Subframe Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- dptr, dlen);
- }
-#endif
-
- __skb_trim(pfirst, sublen);
- skb_pull(pfirst, doff);
-
- if (pfirst->len == 0) {
- brcmu_pkt_buf_free_skb(pfirst);
- if (plast) {
- plast->next = pnext;
- } else {
- save_pfirst = pnext;
- }
- continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pfirst)
- != 0) {
- BRCMF_ERROR(("%s: rx protocol error\n",
- __func__));
- bus->drvr->rx_errors++;
- brcmu_pkt_buf_free_skb(pfirst);
- if (plast) {
- plast->next = pnext;
- } else {
- save_pfirst = pnext;
- }
- continue;
- }
-
- /* this packet will go up, link back into
- chain and count it */
- pfirst->next = pnext;
- plast = pfirst;
- num++;
-
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- BRCMF_GLOM(("%s subframe %d to stack, %p"
- "(%p/%d) nxt/lnk %p/%p\n",
- __func__, num, pfirst, pfirst->data,
- pfirst->len, pfirst->next,
- pfirst->prev));
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- pfirst->data,
- min_t(int, pfirst->len, 32));
- }
-#endif /* BCMDBG */
- }
- if (num) {
- brcmf_sdbrcm_sdunlock(bus);
- brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
- brcmf_sdbrcm_sdlock(bus);
- }
-
- bus->rxglomframes++;
- bus->rxglompkts += num;
- }
- return num;
-}
-
-/* Return true if there may be more frames to read */
-static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
-{
- struct brcmf_sdio_card *card = bus->card;
-
- u16 len, check; /* Extracted hardware header fields */
- u8 chan, seq, doff; /* Extracted software header fields */
- u8 fcbits; /* Extracted fcbits from software header */
-
- struct sk_buff *pkt; /* Packet for event or data frames */
- u16 pad; /* Number of pad bytes to read */
- u16 rdlen; /* Total number of bytes to read */
- u8 rxseq; /* Next sequence number to expect */
- uint rxleft = 0; /* Remaining number of frames allowed */
- int sdret; /* Return code from calls */
- u8 txmax; /* Maximum tx sequence offered */
- bool len_consistent; /* Result of comparing readahead len and
- len from hw-hdr */
- u8 *rxbuf;
- int ifidx = 0;
- uint rxcount = 0; /* Total frames read */
-
-#if defined(BCMDBG) || defined(SDTEST)
- bool sdtest = false; /* To limit message spew from test mode */
-#endif
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
-#ifdef SDTEST
- /* Allow pktgen to override maxframes */
- if (bus->pktgen_count && (bus->pktgen_mode == BRCMF_PKTGEN_RECV)) {
- maxframes = bus->pktgen_count;
- sdtest = true;
- }
-#endif
-
- /* Not finished unless we encounter no more frames indication */
- *finished = false;
-
- for (rxseq = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
- rxseq++, rxleft--) {
-
- /* Handle glomming separately */
- if (bus->glom || bus->glomd) {
- u8 cnt;
- BRCMF_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
- __func__, bus->glomd, bus->glom));
- cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
- BRCMF_GLOM(("%s: rxglom returned %d\n", __func__, cnt));
- rxseq += cnt - 1;
- rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
- continue;
- }
-
- /* Try doing single read if we can */
- if (brcmf_readahead && bus->nextlen) {
- u16 nextlen = bus->nextlen;
- bus->nextlen = 0;
-
- if (bus->bus == SPI_BUS) {
- rdlen = len = nextlen;
- } else {
- rdlen = len = nextlen << 4;
-
- /* Pad read to blocksize for efficiency */
- if (bus->roundup && bus->blocksize
- && (rdlen > bus->blocksize)) {
- pad =
- bus->blocksize -
- (rdlen % bus->blocksize);
- if ((pad <= bus->roundup)
- && (pad < bus->blocksize)
- && ((rdlen + pad + firstread) <
- MAX_RX_DATASZ))
- rdlen += pad;
- } else if (rdlen % BRCMF_SDALIGN) {
- rdlen +=
- BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
- }
- }
-
- /* We use bus->rxctl buffer in WinXP for initial
- * control pkt receives.
- * Later we use buffer-poll for data as well
- * as control packets.
- * This is required because dhd receives full
- * frame in gSPI unlike SDIO.
- * After the frame is received we have to
- * distinguish whether it is data
- * or non-data frame.
- */
- /* Allocate a packet buffer */
- pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
- if (!pkt) {
- if (bus->bus == SPI_BUS) {
- bus->usebufpool = false;
- bus->rxctl = bus->rxbuf;
- if (brcmf_alignctl) {
- bus->rxctl += firstread;
- pad = ((unsigned long)bus->rxctl %
- BRCMF_SDALIGN);
- if (pad)
- bus->rxctl +=
- (BRCMF_SDALIGN - pad);
- bus->rxctl -= firstread;
- }
- rxbuf = bus->rxctl;
- /* Read the entire frame */
- sdret = brcmf_sdcard_recv_buf(card,
- brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2, F2SYNC,
- rxbuf, rdlen,
- NULL, NULL, NULL);
- bus->f2rxdata++;
-
- /* Control frame failures need
- retransmission */
- if (sdret < 0) {
- BRCMF_ERROR(("%s: read %d "
- "control bytes "
- "failed: %d\n",
- __func__,
- rdlen, sdret));
- /* dhd.rx_ctlerrs is higher */
- bus->rxc_errors++;
- brcmf_sdbrcm_rxfail(bus, true,
- (bus->bus ==
- SPI_BUS) ? false
- : true);
- continue;
- }
- } else {
- /* Give up on data,
- request rtx of events */
- BRCMF_ERROR(("%s (nextlen): "
- "brcmu_pkt_buf_get_skb "
- "failed:"
- " len %d rdlen %d expected"
- " rxseq %d\n", __func__,
- len, rdlen, rxseq));
- continue;
- }
- } else {
- if (bus->bus == SPI_BUS)
- bus->usebufpool = true;
-
- PKTALIGN(pkt, rdlen, BRCMF_SDALIGN);
- rxbuf = (u8 *) (pkt->data);
- /* Read the entire frame */
- sdret = brcmf_sdcard_recv_buf(card,
- brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2, F2SYNC,
- rxbuf, rdlen,
- pkt, NULL, NULL);
- bus->f2rxdata++;
-
- if (sdret < 0) {
- BRCMF_ERROR(("%s (nextlen): read %d"
- " bytes failed: %d\n",
- __func__, rdlen, sdret));
- brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
- /* Force retry w/normal header read.
- * Don't attempt NAK for
- * gSPI
- */
- brcmf_sdbrcm_rxfail(bus, true,
- (bus->bus ==
- SPI_BUS) ? false :
- true);
- continue;
- }
- }
-
- /* Now check the header */
- memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
-
- /* Extract hardware header fields */
- len = get_unaligned_le16(bus->rxhdr);
- check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
- /* All zeros means readahead info was bad */
- if (!(len | check)) {
- BRCMF_INFO(("%s (nextlen): read zeros in HW "
- "header???\n", __func__));
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
- }
-
- /* Validate check bytes */
- if ((u16)~(len ^ check)) {
- BRCMF_ERROR(("%s (nextlen): HW hdr error:"
- " nextlen/len/check"
- " 0x%04x/0x%04x/0x%04x\n",
- __func__, nextlen, len, check));
- bus->rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
- }
-
- /* Validate frame length */
- if (len < SDPCM_HDRLEN) {
- BRCMF_ERROR(("%s (nextlen): HW hdr length "
- "invalid: %d\n", __func__, len));
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
- }
-
- /* Check for consistency withreadahead info */
- len_consistent = (nextlen != (roundup(len, 16) >> 4));
- if (len_consistent) {
- /* Mismatch, force retry w/normal
- header (may be >4K) */
- BRCMF_ERROR(("%s (nextlen): mismatch, "
- "nextlen %d len %d rnd %d; "
- "expected rxseq %d\n",
- __func__, nextlen,
- len, roundup(len, 16), rxseq));
- brcmf_sdbrcm_rxfail(bus, true,
- bus->bus != SPI_BUS);
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
- }
-
- /* Extract software header fields */
- chan = SDPCM_PACKET_CHANNEL(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- bus->nextlen =
- bus->rxhdr[SDPCM_FRAMETAG_LEN +
- SDPCM_NEXTLEN_OFFSET];
- if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- BRCMF_INFO(("%s (nextlen): got frame w/nextlen"
- " too large (%d), seq %d\n",
- __func__, bus->nextlen, seq));
- bus->nextlen = 0;
- }
-
- bus->drvr->rx_readahead_cnt++;
-
- /* Handle Flow Control */
- fcbits = SDPCM_FCMASK_VALUE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- if (bus->flowcontrol != fcbits) {
- if (~bus->flowcontrol & fcbits)
- bus->fc_xoff++;
-
- if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
-
- bus->fc_rcvd++;
- bus->flowcontrol = fcbits;
- }
-
- /* Check and update sequence number */
- if (rxseq != seq) {
- BRCMF_INFO(("%s (nextlen): rx_seq %d, expected "
- "%d\n", __func__, seq, rxseq));
- bus->rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- BRCMF_ERROR(("%s: got unlikely tx max %d with "
- "tx_seq %d\n",
- __func__, txmax, bus->tx_seq));
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
-
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- printk(KERN_DEBUG "Rx Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- rxbuf, len);
- } else if (BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "RxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- bus->rxhdr, SDPCM_HDRLEN);
- }
-#endif
-
- if (chan == SDPCM_CONTROL_CHANNEL) {
- if (bus->bus == SPI_BUS) {
- brcmf_sdbrcm_read_control(bus, rxbuf,
- len, doff);
- } else {
- BRCMF_ERROR(("%s (nextlen): readahead"
- " on control packet %d?\n",
- __func__, seq));
- /* Force retry w/normal header read */
- bus->nextlen = 0;
- brcmf_sdbrcm_rxfail(bus, false, true);
- }
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
- }
-
- if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
- BRCMF_ERROR(("Received %d bytes on %d channel."
- " Running out of " "rx pktbuf's or"
- " not yet malloced.\n",
- len, chan));
- continue;
- }
-
- /* Validate data offset */
- if ((doff < SDPCM_HDRLEN) || (doff > len)) {
- BRCMF_ERROR(("%s (nextlen): bad data offset %d:"
- " HW len %d min %d\n", __func__,
- doff, len, SDPCM_HDRLEN));
- brcmf_sdbrcm_rxfail(bus, false, false);
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
- }
-
- /* All done with this one -- now deliver the packet */
- goto deliver;
- }
- /* gSPI frames should not be handled in fractions */
- if (bus->bus == SPI_BUS)
- break;
-
- /* Read frame header (hardware and software) */
- sdret = brcmf_sdcard_recv_buf(card,
- brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2, F2SYNC, bus->rxhdr, firstread,
- NULL, NULL, NULL);
- bus->f2rxhdrs++;
-
- if (sdret < 0) {
- BRCMF_ERROR(("%s: RXHEADER FAILED: %d\n", __func__,
- sdret));
- bus->rx_hdrfail++;
- brcmf_sdbrcm_rxfail(bus, true, true);
- continue;
- }
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() || BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "RxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- bus->rxhdr, SDPCM_HDRLEN);
- }
-#endif
-
- /* Extract hardware header fields */
- len = get_unaligned_le16(bus->rxhdr);
- check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
- /* All zeros means no more frames */
- if (!(len | check)) {
- *finished = true;
- break;
- }
-
- /* Validate check bytes */
- if ((u16) ~(len ^ check)) {
- BRCMF_ERROR(("%s: HW hdr err: len/check "
- "0x%04x/0x%04x\n", __func__, len, check));
- bus->rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- continue;
- }
-
- /* Validate frame length */
- if (len < SDPCM_HDRLEN) {
- BRCMF_ERROR(("%s: HW hdr length invalid: %d\n",
- __func__, len));
- continue;
- }
-
- /* Extract software header fields */
- chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- /* Validate data offset */
- if ((doff < SDPCM_HDRLEN) || (doff > len)) {
- BRCMF_ERROR(("%s: Bad data offset %d: HW len %d,"
- " min %d seq %d\n", __func__, doff,
- len, SDPCM_HDRLEN, seq));
- bus->rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- continue;
- }
-
- /* Save the readahead length if there is one */
- bus->nextlen =
- bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
- if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- BRCMF_INFO(("%s (nextlen): got frame w/nextlen too"
- " large (%d), seq %d\n",
- __func__, bus->nextlen, seq));
- bus->nextlen = 0;
- }
-
- /* Handle Flow Control */
- fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- if (bus->flowcontrol != fcbits) {
- if (~bus->flowcontrol & fcbits)
- bus->fc_xoff++;
-
- if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
-
- bus->fc_rcvd++;
- bus->flowcontrol = fcbits;
- }
-
- /* Check and update sequence number */
- if (rxseq != seq) {
- BRCMF_INFO(("%s: rx_seq %d, expected %d\n", __func__,
- seq, rxseq));
- bus->rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- BRCMF_ERROR(("%s: unlikely tx max %d with tx_seq %d\n",
- __func__, txmax, bus->tx_seq));
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
-
- /* Call a separate function for control frames */
- if (chan == SDPCM_CONTROL_CHANNEL) {
- brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
- continue;
- }
-
- /* precondition: chan is either SDPCM_DATA_CHANNEL,
- SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
- SDPCM_GLOM_CHANNEL */
-
- /* Length to read */
- rdlen = (len > firstread) ? (len - firstread) : 0;
-
- /* May pad read to blocksize for efficiency */
- if (bus->roundup && bus->blocksize &&
- (rdlen > bus->blocksize)) {
- pad = bus->blocksize - (rdlen % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((rdlen + pad + firstread) < MAX_RX_DATASZ))
- rdlen += pad;
- } else if (rdlen % BRCMF_SDALIGN) {
- rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
- }
-
- /* Satisfy length-alignment requirements */
- if (forcealign && (rdlen & (ALIGNMENT - 1)))
- rdlen = roundup(rdlen, ALIGNMENT);
-
- if ((rdlen + firstread) > MAX_RX_DATASZ) {
- /* Too long -- skip this frame */
- BRCMF_ERROR(("%s: too long: len %d rdlen %d\n",
- __func__, len, rdlen));
- bus->drvr->rx_errors++;
- bus->rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- continue;
- }
-
- pkt = brcmu_pkt_buf_get_skb(rdlen + firstread + BRCMF_SDALIGN);
- if (!pkt) {
- /* Give up on data, request rtx of events */
- BRCMF_ERROR(("%s: brcmu_pkt_buf_get_skb failed:"
- " rdlen %d chan %d\n", __func__, rdlen,
- chan));
- bus->drvr->rx_dropped++;
- brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
- continue;
- }
-
- /* Leave room for what we already read, and align remainder */
- skb_pull(pkt, firstread);
- PKTALIGN(pkt, rdlen, BRCMF_SDALIGN);
-
- /* Read the remaining frame data */
- sdret = brcmf_sdcard_recv_buf(card,
- brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
- rdlen, pkt, NULL, NULL);
- bus->f2rxdata++;
-
- if (sdret < 0) {
- BRCMF_ERROR(("%s: read %d %s bytes failed: %d\n",
- __func__, rdlen,
- ((chan == SDPCM_EVENT_CHANNEL) ? "event"
- : ((chan == SDPCM_DATA_CHANNEL) ? "data"
- : "test")), sdret));
- brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
- brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
- continue;
- }
-
- /* Copy the already-read portion */
- skb_push(pkt, firstread);
- memcpy(pkt->data, bus->rxhdr, firstread);
-
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- printk(KERN_DEBUG "Rx Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- pkt->data, len);
- }
-#endif
-
-deliver:
- /* Save superframe descriptor and allocate packet frame */
- if (chan == SDPCM_GLOM_CHANNEL) {
- if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
- BRCMF_GLOM(("%s: glom descriptor, %d bytes:\n",
- __func__, len));
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- printk(KERN_DEBUG "Glom Data:\n");
- print_hex_dump_bytes("",
- DUMP_PREFIX_OFFSET,
- pkt->data, len);
- }
-#endif
- __skb_trim(pkt, len);
- skb_pull(pkt, SDPCM_HDRLEN);
- bus->glomd = pkt;
- } else {
- BRCMF_ERROR(("%s: glom superframe w/o "
- "descriptor!\n", __func__));
- brcmf_sdbrcm_rxfail(bus, false, false);
- }
- continue;
- }
-
- /* Fill in packet len and prio, deliver upward */
- __skb_trim(pkt, len);
- skb_pull(pkt, doff);
-
-#ifdef SDTEST
- /* Test channel packets are processed separately */
- if (chan == SDPCM_TEST_CHANNEL) {
- brcmf_sdbrcm_checkdied(bus, pkt, seq);
- continue;
- }
-#endif /* SDTEST */
-
- if (pkt->len == 0) {
- brcmu_pkt_buf_free_skb(pkt);
- continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
- BRCMF_ERROR(("%s: rx protocol error\n", __func__));
- brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
- continue;
- }
-
- /* Unlock during rx call */
- brcmf_sdbrcm_sdunlock(bus);
- brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
- brcmf_sdbrcm_sdlock(bus);
- }
- rxcount = maxframes - rxleft;
-#ifdef BCMDBG
- /* Message if we hit the limit */
- if (!rxleft && !sdtest)
- BRCMF_DATA(("%s: hit rx limit of %d frames\n", __func__,
- maxframes));
- else
-#endif /* BCMDBG */
- BRCMF_DATA(("%s: processed %d frames\n", __func__, rxcount));
- /* Back off rxseq if awaiting rtx, update rx_seq */
- if (bus->rxskip)
- rxseq--;
- bus->rx_seq = rxseq;
-
- return rxcount;
-}
-
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
-{
- u32 intstatus = 0;
- u32 hmb_data;
- u8 fcbits;
- uint retries = 0;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Read mailbox data and ack that we did so */
- r_sdreg32(bus, &hmb_data,
- offsetof(struct sdpcmd_regs, tohostmailboxdata), &retries);
-
- if (retries <= retry_limit)
- w_sdreg32(bus, SMB_INT_ACK,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
- bus->f1regdata += 2;
-
- /* Dongle recomposed rx frames, accept them again */
- if (hmb_data & HMB_DATA_NAKHANDLED) {
- BRCMF_INFO(("Dongle reports NAK handled, expect rtx of %d\n",
- bus->rx_seq));
- if (!bus->rxskip)
- BRCMF_ERROR(("%s: unexpected NAKHANDLED!\n", __func__));
-
- bus->rxskip = false;
- intstatus |= I_HMB_FRAME_IND;
- }
-
- /*
- * DEVREADY does not occur with gSPI.
- */
- if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
- bus->sdpcm_ver =
- (hmb_data & HMB_DATA_VERSION_MASK) >>
- HMB_DATA_VERSION_SHIFT;
- if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
- BRCMF_ERROR(("Version mismatch, dongle reports %d, "
- "expecting %d\n",
- bus->sdpcm_ver, SDPCM_PROT_VERSION));
- else
- BRCMF_INFO(("Dongle ready, protocol version %d\n",
- bus->sdpcm_ver));
- }
-
- /*
- * Flow Control has been moved into the RX headers and this out of band
- * method isn't used any more.
- * remaining backward compatible with older dongles.
- */
- if (hmb_data & HMB_DATA_FC) {
- fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
- HMB_DATA_FCDATA_SHIFT;
-
- if (fcbits & ~bus->flowcontrol)
- bus->fc_xoff++;
-
- if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
-
- bus->fc_rcvd++;
- bus->flowcontrol = fcbits;
- }
-
- /* Shouldn't be any others */
- if (hmb_data & ~(HMB_DATA_DEVREADY |
- HMB_DATA_NAKHANDLED |
- HMB_DATA_FC |
- HMB_DATA_FWREADY |
- HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK)) {
- BRCMF_ERROR(("Unknown mailbox data content: 0x%02x\n",
- hmb_data));
- }
-
- return intstatus;
-}
-
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
-{
- struct brcmf_sdio_card *card = bus->card;
- u32 intstatus, newstatus = 0;
- uint retries = 0;
- uint rxlimit = brcmf_rxbound; /* Rx frames to read before resched */
- uint txlimit = brcmf_txbound; /* Tx frames to send before resched */
- uint framecnt = 0; /* Temporary counter of tx/rx frames */
- bool rxdone = true; /* Flag for no more read data */
- bool resched = false; /* Flag indicating resched wanted */
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Start with leftover status bits */
- intstatus = bus->intstatus;
-
- brcmf_sdbrcm_sdlock(bus);
-
- /* If waiting for HTAVAIL, check status */
- if (bus->clkstate == CLK_PENDING) {
- int err;
- u8 clkctl, devctl = 0;
-
-#ifdef BCMDBG
- /* Check for inconsistent device control */
- devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- BRCMF_ERROR(("%s: error reading DEVCTL: %d\n",
- __func__, err));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
- }
-#endif /* BCMDBG */
-
- /* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (err) {
- BRCMF_ERROR(("%s: error reading CSR: %d\n", __func__,
- err));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
- }
-
- BRCMF_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
- devctl, clkctl));
-
- if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- BRCMF_ERROR(("%s: error reading DEVCTL: %d\n",
- __func__, err));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
- }
- devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
- if (err) {
- BRCMF_ERROR(("%s: error writing DEVCTL: %d\n",
- __func__, err));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
- }
- bus->clkstate = CLK_AVAIL;
- } else {
- goto clkwait;
- }
- }
-
- BUS_WAKE(bus);
-
- /* Make sure backplane clock is on */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
- if (bus->clkstate == CLK_PENDING)
- goto clkwait;
-
- /* Pending interrupt indicates new device status */
- if (bus->ipend) {
- bus->ipend = false;
- r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
- bus->f1regdata++;
- if (brcmf_sdcard_regfail(bus->card))
- newstatus = 0;
- newstatus &= bus->hostintmask;
- bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
- if (newstatus) {
- w_sdreg32(bus, newstatus,
- offsetof(struct sdpcmd_regs, intstatus),
- &retries);
- bus->f1regdata++;
- }
- }
-
- /* Merge new bits with previous */
- intstatus |= newstatus;
- bus->intstatus = 0;
-
- /* Handle flow-control change: read new state in case our ack
- * crossed another change interrupt. If change still set, assume
- * FC ON for safety, let next loop through do the debounce.
- */
- if (intstatus & I_HMB_FC_CHANGE) {
- intstatus &= ~I_HMB_FC_CHANGE;
- w_sdreg32(bus, I_HMB_FC_CHANGE,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
-
- r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
- bus->f1regdata += 2;
- bus->fcstate =
- !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
- intstatus |= (newstatus & bus->hostintmask);
- }
-
- /* Handle host mailbox indication */
- if (intstatus & I_HMB_HOST_INT) {
- intstatus &= ~I_HMB_HOST_INT;
- intstatus |= brcmf_sdbrcm_hostmail(bus);
- }
-
- /* Generally don't ask for these, can get CRC errors... */
- if (intstatus & I_WR_OOSYNC) {
- BRCMF_ERROR(("Dongle reports WR_OOSYNC\n"));
- intstatus &= ~I_WR_OOSYNC;
- }
-
- if (intstatus & I_RD_OOSYNC) {
- BRCMF_ERROR(("Dongle reports RD_OOSYNC\n"));
- intstatus &= ~I_RD_OOSYNC;
- }
-
- if (intstatus & I_SBINT) {
- BRCMF_ERROR(("Dongle reports SBINT\n"));
- intstatus &= ~I_SBINT;
- }
-
- /* Would be active due to wake-wlan in gSPI */
- if (intstatus & I_CHIPACTIVE) {
- BRCMF_INFO(("Dongle reports CHIPACTIVE\n"));
- intstatus &= ~I_CHIPACTIVE;
- }
-
- /* Ignore frame indications if rxskip is set */
- if (bus->rxskip)
- intstatus &= ~I_HMB_FRAME_IND;
-
- /* On frame indication, read available frames */
- if (PKT_AVAILABLE()) {
- framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
- if (rxdone || bus->rxskip)
- intstatus &= ~I_HMB_FRAME_IND;
- rxlimit -= min(framecnt, rxlimit);
- }
-
- /* Keep still-pending events for next scheduling */
- bus->intstatus = intstatus;
-
-clkwait:
- /* Re-enable interrupts to detect new device events (mailbox, rx frame)
- * or clock availability. (Allows tx loop to check ipend if desired.)
- * (Unless register access seems hosed, as we may not be able to ACK...)
- */
- if (bus->intr && bus->intdis && !brcmf_sdcard_regfail(card)) {
- BRCMF_INTR(("%s: enable SDIO interrupts, rxdone %d"
- " framecnt %d\n", __func__, rxdone, framecnt));
- bus->intdis = false;
- brcmf_sdcard_intr_enable(card);
- }
-
- if (DATAOK(bus) && bus->ctrl_frame_stat &&
- (bus->clkstate == CLK_AVAIL)) {
- int ret, i;
-
- ret = brcmf_sdbrcm_send_buf(bus, brcmf_sdcard_cur_sbwad(card),
- SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len, NULL, NULL, NULL);
-
- if (ret < 0) {
- /* On failure, abort the command and
- terminate the frame */
- BRCMF_INFO(("%s: sdio error %d, abort command and "
- "terminate frame.\n", __func__, ret));
- bus->tx_sderrs++;
-
- brcmf_sdcard_abort(card, SDIO_FUNC_2);
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
- NULL);
- bus->f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
- bus->f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
-
- }
- if (ret == 0)
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
-
- BRCMF_INFO(("Return_dpc value is : %d\n", ret));
- bus->ctrl_frame_stat = false;
- brcmf_sdbrcm_wait_event_wakeup(bus);
- }
- /* Send queued frames (limit 1 if rx may still be pending) */
- else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
- brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
- && DATAOK(bus)) {
- framecnt = rxdone ? txlimit : min(txlimit, brcmf_txminmax);
- framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
- txlimit -= framecnt;
- }
-
- /* Resched if events or tx frames are pending,
- else await next interrupt */
- /* On failed register access, all bets are off:
- no resched or interrupts */
- if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
- brcmf_sdcard_regfail(card)) {
- BRCMF_ERROR(("%s: failed backplane access over SDIO, halting "
- "operation %d\n", __func__,
- brcmf_sdcard_regfail(card)));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
- bus->intstatus = 0;
- } else if (bus->clkstate == CLK_PENDING) {
- BRCMF_INFO(("%s: rescheduled due to CLK_PENDING awaiting "
- "I_CHIPACTIVE interrupt\n", __func__));
- resched = true;
- } else if (bus->intstatus || bus->ipend ||
- (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
- && DATAOK(bus)) || PKT_AVAILABLE()) {
- resched = true;
- }
-
- bus->dpc_sched = resched;
-
- /* If we're done for now, turn off clock request. */
- if ((bus->clkstate != CLK_PENDING)
- && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
- bus->activity = false;
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- }
-
- brcmf_sdbrcm_sdunlock(bus);
-
- return resched;
-}
-
-void brcmf_sdbrcm_isr(void *arg)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *) arg;
- struct brcmf_sdio_card *card;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (!bus) {
- BRCMF_ERROR(("%s : bus is null pointer , exit\n", __func__));
- return;
- }
- card = bus->card;
-
- if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
- BRCMF_ERROR(("%s : bus is down. we have nothing to do\n",
- __func__));
- return;
- }
- /* Count the interrupt call */
- bus->intrcount++;
- bus->ipend = true;
-
- /* Shouldn't get this interrupt if we're sleeping? */
- if (bus->sleeping) {
- BRCMF_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
- return;
- }
-
- /* Disable additional interrupts (is this needed now)? */
- if (bus->intr)
- BRCMF_INTR(("%s: disable SDIO interrupts\n", __func__));
- else
- BRCMF_ERROR(("brcmf_sdbrcm_isr() w/o interrupt configured!\n"));
-
- brcmf_sdcard_intr_disable(card);
- bus->intdis = true;
-
-#if defined(SDIO_ISR_THREAD)
- BRCMF_TRACE(("Calling brcmf_sdbrcm_dpc() from %s\n", __func__));
- while (brcmf_sdbrcm_dpc(bus))
- ;
-#else
- bus->dpc_sched = true;
- brcmf_sdbrcm_sched_dpc(bus);
-#endif
-
-}
-
-#ifdef SDTEST
-static void brcmf_sdbrcm_pktgen_init(struct brcmf_bus *bus)
-{
- /* Default to specified length, or full range */
- if (brcmf_pktgen_len) {
- bus->pktgen_maxlen = min(brcmf_pktgen_len,
- BRCMF_MAX_PKTGEN_LEN);
- bus->pktgen_minlen = bus->pktgen_maxlen;
- } else {
- bus->pktgen_maxlen = BRCMF_MAX_PKTGEN_LEN;
- bus->pktgen_minlen = 0;
- }
- bus->pktgen_len = (u16) bus->pktgen_minlen;
-
- /* Default to per-watchdog burst with 10s print time */
- bus->pktgen_freq = 1;
- bus->pktgen_print = 10000 / brcmf_watchdog_ms;
- bus->pktgen_count = (brcmf_pktgen * brcmf_watchdog_ms + 999) / 1000;
-
- /* Default to echo mode */
- bus->pktgen_mode = BRCMF_PKTGEN_ECHO;
- bus->pktgen_stop = 1;
-}
-
-static void brcmf_sdbrcm_pktgen(struct brcmf_bus *bus)
-{
- struct sk_buff *pkt;
- u8 *data;
- uint pktcount;
- uint fillbyte;
- u16 len;
-
- /* Display current count if appropriate */
- if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
- bus->pktgen_ptick = 0;
- printk(KERN_DEBUG "%s: send attempts %d rcvd %d\n",
- __func__, bus->pktgen_sent, bus->pktgen_rcvd);
- }
-
- /* For recv mode, just make sure dongle has started sending */
- if (bus->pktgen_mode == BRCMF_PKTGEN_RECV) {
- if (!bus->pktgen_rcvd)
- brcmf_sdbrcm_sdtest_set(bus, true);
- return;
- }
-
- /* Otherwise, generate or request the specified number of packets */
- for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
- /* Stop if total has been reached */
- if (bus->pktgen_total
- && (bus->pktgen_sent >= bus->pktgen_total)) {
- bus->pktgen_count = 0;
- break;
- }
-
- /* Allocate an appropriate-sized packet */
- len = bus->pktgen_len;
- pkt = brcmu_pkt_buf_get_skb(
- len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + BRCMF_SDALIGN,
- true);
- if (!pkt) {
- BRCMF_ERROR(("%s: brcmu_pkt_buf_get_skb failed!\n",
- __func__));
- break;
- }
- PKTALIGN(pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN),
- BRCMF_SDALIGN);
- data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
-
- /* Write test header cmd and extra based on mode */
- switch (bus->pktgen_mode) {
- case BRCMF_PKTGEN_ECHO:
- *data++ = SDPCM_TEST_ECHOREQ;
- *data++ = (u8) bus->pktgen_sent;
- break;
-
- case BRCMF_PKTGEN_SEND:
- *data++ = SDPCM_TEST_DISCARD;
- *data++ = (u8) bus->pktgen_sent;
- break;
-
- case BRCMF_PKTGEN_RXBURST:
- *data++ = SDPCM_TEST_BURST;
- *data++ = (u8) bus->pktgen_count;
- break;
-
- default:
- BRCMF_ERROR(("Unrecognized pktgen mode %d\n",
- bus->pktgen_mode));
- brcmu_pkt_buf_free_skb(pkt, true);
- bus->pktgen_count = 0;
- return;
- }
-
- /* Write test header length field */
- *data++ = (len >> 0);
- *data++ = (len >> 8);
-
- /* Then fill in the remainder -- N/A for burst,
- but who cares... */
- for (fillbyte = 0; fillbyte < len; fillbyte++)
- *data++ =
- SDPCM_TEST_FILL(fillbyte, (u8) bus->pktgen_sent);
-
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
- printk(KERN_DEBUG "brcmf_sdbrcm_pktgen: Tx Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data,
- pkt->len - SDPCM_HDRLEN);
- }
-#endif
-
- /* Send it */
- if (brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true)) {
- bus->pktgen_fail++;
- if (bus->pktgen_stop
- && bus->pktgen_stop == bus->pktgen_fail)
- bus->pktgen_count = 0;
- }
- bus->pktgen_sent++;
-
- /* Bump length if not fixed, wrap at max */
- if (++bus->pktgen_len > bus->pktgen_maxlen)
- bus->pktgen_len = (u16) bus->pktgen_minlen;
-
- /* Special case for burst mode: just send one request! */
- if (bus->pktgen_mode == BRCMF_PKTGEN_RXBURST)
- break;
- }
-}
-
-static void brcmf_sdbrcm_sdtest_set(struct brcmf_bus *bus, bool start)
-{
- struct sk_buff *pkt;
- u8 *data;
-
- /* Allocate the packet */
- pkt = brcmu_pkt_buf_get_skb(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
- BRCMF_SDALIGN, true);
- if (!pkt) {
- BRCMF_ERROR(("%s: brcmu_pkt_buf_get_skb failed!\n", __func__));
- return;
- }
- PKTALIGN(pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), BRCMF_SDALIGN);
- data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
-
- /* Fill in the test header */
- *data++ = SDPCM_TEST_SEND;
- *data++ = start;
- *data++ = (bus->pktgen_maxlen >> 0);
- *data++ = (bus->pktgen_maxlen >> 8);
-
- /* Send it */
- if (brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true))
- bus->pktgen_fail++;
-}
-
-static void
-brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, struct sk_buff *pkt, uint seq)
-{
- u8 *data;
- uint pktlen;
-
- u8 cmd;
- u8 extra;
- u16 len;
- u16 offset;
-
- /* Check for min length */
- pktlen = pkt->len;
- if (pktlen < SDPCM_TEST_HDRLEN) {
- BRCMF_ERROR(("brcmf_sdbrcm_checkdied: toss runt frame, pktlen "
- "%d\n", pktlen));
- brcmu_pkt_buf_free_skb(pkt, false);
- return;
- }
-
- /* Extract header fields */
- data = pkt->data;
- cmd = *data++;
- extra = *data++;
- len = *data++;
- len += *data++ << 8;
-
- /* Check length for relevant commands */
- if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ
- || cmd == SDPCM_TEST_ECHORSP) {
- if (pktlen != len + SDPCM_TEST_HDRLEN) {
- BRCMF_ERROR(("brcmf_sdbrcm_checkdied: frame length "
- "mismatch, pktlen %d seq %d"
- " cmd %d extra %d len %d\n",
- pktlen, seq, cmd, extra, len));
- brcmu_pkt_buf_free_skb(pkt, false);
- return;
- }
- }
-
- /* Process as per command */
- switch (cmd) {
- case SDPCM_TEST_ECHOREQ:
- /* Rx->Tx turnaround ok (even on NDIS w/current
- implementation) */
- *(u8 *) (pkt->data) = SDPCM_TEST_ECHORSP;
- if (brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true) == 0)
- bus->pktgen_sent++;
- else {
- bus->pktgen_fail++;
- brcmu_pkt_buf_free_skb(pkt, false);
- }
- bus->pktgen_rcvd++;
- break;
-
- case SDPCM_TEST_ECHORSP:
- if (bus->ext_loop) {
- brcmu_pkt_buf_free_skb(pkt, false);
- bus->pktgen_rcvd++;
- break;
- }
-
- for (offset = 0; offset < len; offset++, data++) {
- if (*data != SDPCM_TEST_FILL(offset, extra)) {
- BRCMF_ERROR(("brcmf_sdbrcm_checkdied: echo"
- " data mismatch: "
- "offset %d (len %d) "
- "expect 0x%02x rcvd 0x%02x\n",
- offset, len,
- SDPCM_TEST_FILL(offset, extra),
- *data));
- break;
- }
- }
- brcmu_pkt_buf_free_skb(pkt, false);
- bus->pktgen_rcvd++;
- break;
-
- case SDPCM_TEST_DISCARD:
- brcmu_pkt_buf_free_skb(pkt, false);
- bus->pktgen_rcvd++;
- break;
-
- case SDPCM_TEST_BURST:
- case SDPCM_TEST_SEND:
- default:
- BRCMF_INFO(("brcmf_sdbrcm_checkdied: unsupported or unknown "
- "command, pktlen %d seq %d" " cmd %d extra %d"
- " len %d\n", pktlen, seq, cmd, extra, len));
- brcmu_pkt_buf_free_skb(pkt, false);
- break;
- }
-
- /* For recv mode, stop at limie (and tell dongle to stop sending) */
- if (bus->pktgen_mode == BRCMF_PKTGEN_RECV) {
- if (bus->pktgen_total
- && (bus->pktgen_rcvd >= bus->pktgen_total)) {
- bus->pktgen_count = 0;
- brcmf_sdbrcm_sdtest_set(bus, false);
- }
- }
-}
-#endif /* SDTEST */
-
-extern bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
-{
- struct brcmf_bus *bus;
-
- BRCMF_TIMER(("%s: Enter\n", __func__));
-
- bus = drvr->bus;
-
- if (bus->drvr->dongle_reset)
- return false;
-
- /* Ignore the timer if simulating bus down */
- if (bus->sleeping)
- return false;
-
- brcmf_sdbrcm_sdlock(bus);
-
- /* Poll period: check device if appropriate. */
- if (bus->poll && (++bus->polltick >= bus->pollrate)) {
- u32 intstatus = 0;
-
- /* Reset poll tick */
- bus->polltick = 0;
-
- /* Check device if no interrupts */
- if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
-
- if (!bus->dpc_sched) {
- u8 devpend;
- devpend = brcmf_sdcard_cfg_read(bus->card,
- SDIO_FUNC_0, SDIO_CCCR_INTx,
- NULL);
- intstatus =
- devpend & (INTR_STATUS_FUNC1 |
- INTR_STATUS_FUNC2);
- }
-
- /* If there is something, make like the ISR and
- schedule the DPC */
- if (intstatus) {
- bus->pollcnt++;
- bus->ipend = true;
- if (bus->intr)
- brcmf_sdcard_intr_disable(bus->card);
-
- bus->dpc_sched = true;
- brcmf_sdbrcm_sched_dpc(bus);
-
- }
- }
-
- /* Update interrupt tracking */
- bus->lastintrs = bus->intrcount;
- }
-#ifdef BCMDBG
- /* Poll for console output periodically */
- if (drvr->busstate == BRCMF_BUS_DATA && brcmf_console_ms != 0) {
- bus->console.count += brcmf_watchdog_ms;
- if (bus->console.count >= brcmf_console_ms) {
- bus->console.count -= brcmf_console_ms;
- /* Make sure backplane clock is on */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- if (brcmf_sdbrcm_readconsole(bus) < 0)
- brcmf_console_ms = 0; /* On error,
- stop trying */
- }
- }
-#endif /* BCMDBG */
-
-#ifdef SDTEST
- /* Generate packets if configured */
- if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
- /* Make sure backplane clock is on */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- bus->pktgen_tick = 0;
- brcmf_sdbrcm_pktgen(bus);
- }
-#endif
-
- /* On idle timeout clear activity flag and/or turn off clock */
- if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
- if (++bus->idlecount >= bus->idletime) {
- bus->idlecount = 0;
- if (bus->activity) {
- bus->activity = false;
- brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
- } else {
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- }
- }
- }
-
- brcmf_sdbrcm_sdunlock(bus);
-
- return bus->ipend;
-}
-
-#ifdef BCMDBG
-static int brcmf_sdbrcm_bus_console_in(struct brcmf_pub *drvr,
- unsigned char *msg, uint msglen)
-{
- struct brcmf_bus *bus = drvr->bus;
- u32 addr, val;
- int rv;
- struct sk_buff *pkt;
-
- /* Address could be zero if CONSOLE := 0 in dongle Makefile */
- if (bus->console_addr == 0)
- return -ENOTSUPP;
-
- /* Exclusive bus access */
- brcmf_sdbrcm_sdlock(bus);
-
- /* Don't allow input if dongle is in reset */
- if (bus->drvr->dongle_reset) {
- brcmf_sdbrcm_sdunlock(bus);
- return -EPERM;
- }
-
- /* Request clock to allow SDIO accesses */
- BUS_WAKE(bus);
- /* No pend allowed since txpkt is called later, ht clk has to be on */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Zero cbuf_index */
- addr = bus->console_addr + offsetof(struct rte_console, cbuf_idx);
- val = cpu_to_le32(0);
- rv = brcmf_sdbrcm_membytes(bus, true, addr, (u8 *)&val, sizeof(val));
- if (rv < 0)
- goto done;
-
- /* Write message into cbuf */
- addr = bus->console_addr + offsetof(struct rte_console, cbuf);
- rv = brcmf_sdbrcm_membytes(bus, true, addr, (u8 *)msg, msglen);
- if (rv < 0)
- goto done;
-
- /* Write length into vcons_in */
- addr = bus->console_addr + offsetof(struct rte_console, vcons_in);
- val = cpu_to_le32(msglen);
- rv = brcmf_sdbrcm_membytes(bus, true, addr, (u8 *)&val, sizeof(val));
- if (rv < 0)
- goto done;
-
- /* Bump dongle by sending an empty event pkt.
- * sdpcm_sendup (RX) checks for virtual console input.
- */
- pkt = brcmu_pkt_buf_get_skb(4 + SDPCM_RESERVE);
- if ((pkt != NULL) && bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, true);
-
-done:
- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
- bus->activity = false;
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
- }
-
- brcmf_sdbrcm_sdunlock(bus);
-
- return rv;
-}
-#endif /* BCMDBG */
-
-static bool brcmf_sdbrcm_chipmatch(u16 chipid)
-{
- if (chipid == BCM4325_CHIP_ID)
- return true;
- if (chipid == BCM4329_CHIP_ID)
- return true;
- if (chipid == BCM4319_CHIP_ID)
- return true;
- return false;
-}
-
-static void *brcmf_sdbrcm_probe(u16 venid, u16 devid, u16 bus_no,
- u16 slot, u16 func, uint bustype, u32 regsva,
- void *card)
-{
- int ret;
- struct brcmf_bus *bus;
-
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behavior since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_txbound = BRCMF_TXBOUND;
- brcmf_rxbound = BRCMF_RXBOUND;
- brcmf_alignctl = true;
- sd1idle = true;
- brcmf_readahead = true;
- retrydata = false;
- brcmf_dongle_memsize = 0;
- brcmf_txminmax = BRCMF_TXMINMAX;
-
- forcealign = true;
-
- brcmf_c_init();
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
- BRCMF_INFO(("%s: venid 0x%04x devid 0x%04x\n", __func__, venid, devid));
-
- /* We make an assumption about address window mappings:
- * regsva == SI_ENUM_BASE*/
-
- /* SDIO car passes venid and devid based on CIS parsing -- but
- * low-power start
- * means early parse could fail, so here we should get either an ID
- * we recognize OR (-1) indicating we must request power first.
- */
- /* Check the Vendor ID */
- switch (venid) {
- case 0x0000:
- case PCI_VENDOR_ID_BROADCOM:
- break;
- default:
- BRCMF_ERROR(("%s: unknown vendor: 0x%04x\n", __func__, venid));
- return NULL;
- }
-
- /* Check the Device ID and make sure it's one that we support */
- switch (devid) {
- case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
- case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
- case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
- BRCMF_INFO(("%s: found 4325 Dongle\n", __func__));
- break;
- case BCM4329_D11NDUAL_ID: /* 4329 802.11n dualband device */
- case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
- case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
- case 0x4329:
- BRCMF_INFO(("%s: found 4329 Dongle\n", __func__));
- break;
- case BCM4319_D11N_ID: /* 4319 802.11n id */
- case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
- case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
- BRCMF_INFO(("%s: found 4319 Dongle\n", __func__));
- break;
- case 0:
- BRCMF_INFO(("%s: allow device id 0, will check chip"
- " internals\n", __func__));
- break;
-
- default:
- BRCMF_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
- __func__, venid, devid));
- return NULL;
- }
-
- /* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
- if (!bus) {
- BRCMF_ERROR(("%s: kmalloc of struct dhd_bus failed\n",
- __func__));
- goto fail;
- }
- bus->card = card;
- bus->cl_devid = (u16) devid;
- bus->bus = BRCMF_BUS;
- bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
- bus->usebufpool = false; /* Use bufpool if allocated,
- else use locally malloced rxbuf */
-
- /* attempt to attach to the dongle */
- if (!(brcmf_sdbrcm_probe_attach(bus, card, regsva, devid))) {
- BRCMF_ERROR(("%s: brcmf_sdbrcm_probe_attach failed\n",
- __func__));
- goto fail;
- }
-
- spin_lock_init(&bus->txqlock);
- init_waitqueue_head(&bus->ctrl_wait);
-
- /* Set up the watchdog timer */
- init_timer(&bus->timer);
- bus->timer.data = (unsigned long)bus;
- bus->timer.function = brcmf_sdbrcm_watchdog;
-
- /* Initialize thread based operation and lock */
- if ((brcmf_watchdog_prio >= 0) && (brcmf_dpc_prio >= 0)) {
- bus->threads_only = true;
- sema_init(&bus->sdsem, 1);
- } else {
- bus->threads_only = false;
- spin_lock_init(&bus->sdlock);
- }
-
- if (brcmf_dpc_prio >= 0) {
- /* Initialize watchdog thread */
- init_completion(&bus->watchdog_wait);
- bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
- bus, "brcmf_watchdog");
- if (IS_ERR(bus->watchdog_tsk)) {
- printk(KERN_WARNING
- "brcmf_watchdog thread failed to start\n");
- bus->watchdog_tsk = NULL;
- }
- } else
- bus->watchdog_tsk = NULL;
-
- /* Set up the bottom half handler */
- if (brcmf_dpc_prio >= 0) {
- /* Initialize DPC thread */
- init_completion(&bus->dpc_wait);
- bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
- bus, "brcmf_dpc");
- if (IS_ERR(bus->dpc_tsk)) {
- printk(KERN_WARNING
- "brcmf_dpc thread failed to start\n");
- bus->dpc_tsk = NULL;
- }
- } else {
- tasklet_init(&bus->tasklet, brcmf_sdbrcm_dpc_tasklet,
- (unsigned long)bus);
- bus->dpc_tsk = NULL;
- }
-
- /* Attach to the brcmf/OS/network interface */
- bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
- if (!bus->drvr) {
- BRCMF_ERROR(("%s: brcmf_attach failed\n", __func__));
- goto fail;
- }
-
- /* Allocate buffers */
- if (!(brcmf_sdbrcm_probe_malloc(bus, card))) {
- BRCMF_ERROR(("%s: brcmf_sdbrcm_probe_malloc failed\n",
- __func__));
- goto fail;
- }
-
- if (!(brcmf_sdbrcm_probe_init(bus, card))) {
- BRCMF_ERROR(("%s: brcmf_sdbrcm_probe_init failed\n", __func__));
- goto fail;
- }
-
- /* Register interrupt callback, but mask it (not operational yet). */
- BRCMF_INTR(("%s: disable SDIO interrupts (not interested yet)\n",
- __func__));
- brcmf_sdcard_intr_disable(card);
- ret = brcmf_sdcard_intr_reg(card, brcmf_sdbrcm_isr, bus);
- if (ret != 0) {
- BRCMF_ERROR(("%s: FAILED: sdcard_intr_reg returned %d\n",
- __func__, ret));
- goto fail;
- }
- BRCMF_INTR(("%s: registered SDIO interrupt function ok\n", __func__));
-
- BRCMF_INFO(("%s: completed!!\n", __func__));
-
- /* if firmware path present try to download and bring up bus */
- ret = brcmf_bus_start(bus->drvr);
- if (ret != 0) {
- if (ret == -ENOLINK) {
- BRCMF_ERROR(("%s: dongle is not responding\n",
- __func__));
- goto fail;
- }
- }
- /* Ok, have the per-port tell the stack we're open for business */
- if (brcmf_net_attach(bus->drvr, 0) != 0) {
- BRCMF_ERROR(("%s: Net attach failed!!\n", __func__));
- goto fail;
- }
-
- return bus;
-
-fail:
- brcmf_sdbrcm_release(bus);
- return NULL;
-}
-
-static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, void *card, u32 regsva,
- u16 devid)
-{
- u8 clkctl = 0;
- int err = 0;
-
- bus->alp_only = true;
-
- /* Return the window to backplane enumeration space for core access */
- if (brcmf_sdbrcm_set_siaddr_window(bus, SI_ENUM_BASE))
- BRCMF_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n",
- __func__));
-
-#ifdef BCMDBG
- printk(KERN_DEBUG "F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdcard_reg_read(bus->card, SI_ENUM_BASE, 4));
-
-#endif /* BCMDBG */
-
- /*
- * Force PLL off until brcmf_sdbrcm_chip_attach()
- * programs PLL control regs
- */
-
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
- if (!err)
- clkctl =
- brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
-
- if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
- BRCMF_ERROR(("brcmf_sdbrcm_probe: ChipClkCSR access: err %d"
- " wrote 0x%02x read 0x%02x\n",
- err, BRCMF_INIT_CLKCTL1, clkctl));
- goto fail;
- }
-
- if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
- BRCMF_ERROR(("%s: brcmf_sdbrcm_chip_attach failed!\n",
- __func__));
- goto fail;
- }
-
- if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
- BRCMF_ERROR(("%s: unsupported chip: 0x%04x\n",
- __func__, bus->ci->chip));
- goto fail;
- }
-
- brcmf_sdbrcm_sdiod_drive_strength_init(bus, brcmf_sdiod_drive_strength);
-
- /* Get info on the ARM and SOCRAM cores... */
- if (!BRCMF_NOPMU(bus)) {
- brcmf_sdcard_reg_read(bus->card,
- CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
- bus->orig_ramsize = bus->ci->ramsize;
- if (!(bus->orig_ramsize)) {
- BRCMF_ERROR(("%s: failed to find SOCRAM memory!\n",
- __func__));
- goto fail;
- }
- bus->ramsize = bus->orig_ramsize;
- if (brcmf_dongle_memsize)
- brcmf_sdbrcm_setmemsize(bus, brcmf_dongle_memsize);
-
- BRCMF_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
- bus->ramsize, bus->orig_ramsize));
- }
-
- /* Set core control so an SDIO reset does a backplane reset */
- OR_REG(bus->ci->buscorebase + offsetof(struct sdpcmd_regs,
- corecontrol),
- CC_BPRESEN, u32);
-
- brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
-
- /* Locate an appropriately-aligned portion of hdrbuf */
- bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
- BRCMF_SDALIGN);
-
- /* Set the poll and/or interrupt flags */
- bus->intr = (bool) brcmf_intr;
- bus->poll = (bool) brcmf_poll;
- if (bus->poll)
- bus->pollrate = 1;
-
- return true;
-
-fail:
- return false;
-}
-
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus, void *card)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus->drvr->maxctl) {
- bus->rxblen =
- roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
- ALIGNMENT) + BRCMF_SDALIGN;
- bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
- if (!(bus->rxbuf)) {
- BRCMF_ERROR(("%s: kmalloc of %d-byte rxbuf failed\n",
- __func__, bus->rxblen));
- goto fail;
- }
- }
-
- /* Allocate buffer to receive glomed packet */
- bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
- if (!(bus->databuf)) {
- BRCMF_ERROR(("%s: kmalloc of %d-byte databuf failed\n",
- __func__, MAX_DATA_BUF));
- /* release rxbuf which was already located as above */
- if (!bus->rxblen)
- kfree(bus->rxbuf);
- goto fail;
- }
-
- /* Align the buffer */
- if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
- bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
- ((unsigned long)bus->databuf % BRCMF_SDALIGN));
- else
- bus->dataptr = bus->databuf;
-
- return true;
-
-fail:
- return false;
-}
-
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus, void *card)
-{
- s32 fnum;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
-#ifdef SDTEST
- brcmf_sdbrcm_pktgen_init(bus);
-#endif /* SDTEST */
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- bus->drvr->busstate = BRCMF_BUS_DOWN;
- bus->sleeping = false;
- bus->rxflow = false;
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0,
- NULL);
-
- /* ...and initialize clock/power states */
- bus->clkstate = CLK_SDONLY;
- bus->idletime = (s32) brcmf_idletime;
- bus->idleclock = BRCMF_IDLE_ACTIVE;
-
- /* Query the F2 block size, set roundup accordingly */
- fnum = 2;
- if (brcmf_sdcard_iovar_op(card, "sd_blocksize", &fnum, sizeof(s32),
- &bus->blocksize, sizeof(s32), false) != 0) {
- bus->blocksize = 0;
- BRCMF_ERROR(("%s: fail on %s get\n", __func__, "sd_blocksize"));
- } else {
- BRCMF_INFO(("%s: Initial value for %s is %d\n",
- __func__, "sd_blocksize", bus->blocksize));
- }
- bus->roundup = min(max_roundup, bus->blocksize);
-
- /* Query if bus module supports packet chaining,
- default to use if supported */
- if (brcmf_sdcard_iovar_op(card, "sd_rxchain", NULL, 0,
- &bus->sd_rxchain, sizeof(s32),
- false) != 0) {
- bus->sd_rxchain = false;
- } else {
- BRCMF_INFO(("%s: bus module (through sdiocard API) %s"
- " chaining\n", __func__, bus->sd_rxchain
- ? "supports" : "does not support"));
- }
- bus->use_rxchain = (bool) bus->sd_rxchain;
-
- return true;
-}
-
-static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus, void *card)
-{
- bool ret;
-
- /* Download the firmware */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
-
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- return ret;
-}
-
-/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus) {
- /* De-register interrupt handler */
- brcmf_sdcard_intr_disable(bus->card);
- brcmf_sdcard_intr_dereg(bus->card);
-
- if (bus->drvr) {
- brcmf_detach(bus->drvr);
- brcmf_sdbrcm_release_dongle(bus);
- bus->drvr = NULL;
- }
-
- brcmf_sdbrcm_release_malloc(bus);
-
- kfree(bus);
- }
-
- BRCMF_TRACE(("%s: Disconnected\n", __func__));
-}
-
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus->drvr && bus->drvr->dongle_reset)
- return;
-
- kfree(bus->rxbuf);
- bus->rxctl = bus->rxbuf = NULL;
- bus->rxlen = 0;
-
- kfree(bus->databuf);
- bus->databuf = NULL;
-}
-
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus->drvr && bus->drvr->dongle_reset)
- return;
-
- if (bus->ci) {
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- brcmf_sdbrcm_chip_detach(bus);
- if (bus->vars && bus->varsz)
- kfree(bus->vars);
- bus->vars = NULL;
- }
-
- BRCMF_TRACE(("%s: Disconnected\n", __func__));
-}
-
-static void brcmf_sdbrcm_disconnect(void *ptr)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- if (bus) {
- brcmf_sdbrcm_release(bus);
- }
-
- BRCMF_TRACE(("%s: Disconnected\n", __func__));
-}
-
-/* Register/Unregister functions are called by the main DHD entry
- * point (e.g. module insertion) to link with the bus driver, in
- * order to look for or await the device.
- */
-
-static struct brcmf_sdioh_driver brcmf_sdio = {
- brcmf_sdbrcm_probe,
- brcmf_sdbrcm_disconnect
-};
-
-int brcmf_bus_register(void)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* Sanity check on the module parameters */
- do {
- /* Both watchdog and DPC as tasklets are ok */
- if ((brcmf_watchdog_prio < 0) && (brcmf_dpc_prio < 0))
- break;
-
- /* If both watchdog and DPC are threads, TX must be deferred */
- if ((brcmf_watchdog_prio >= 0) && (brcmf_dpc_prio >= 0)
- && brcmf_deferred_tx)
- break;
-
- BRCMF_ERROR(("Invalid module parameters.\n"));
- return -EINVAL;
- } while (0);
-
- return brcmf_sdio_register(&brcmf_sdio);
-}
-
-void brcmf_bus_unregister(void)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- brcmf_sdio_unregister();
-}
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
-{
- int offset = 0;
- uint len;
- u8 *memblock = NULL, *memptr;
- int ret;
-
- BRCMF_INFO(("%s: Enter\n", __func__));
-
- bus->fw_name = BCM4329_FW_NAME;
- ret = request_firmware(&bus->firmware, bus->fw_name,
- &gInstance->func[2]->dev);
- if (ret) {
- BRCMF_ERROR(("%s: Fail to request firmware %d\n",
- __func__, ret));
- return ret;
- }
- bus->fw_ptr = 0;
-
- memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
- if (memblock == NULL) {
- BRCMF_ERROR(("%s: Failed to allocate memory %d bytes\n",
- __func__, MEMBLOCK));
- ret = -ENOMEM;
- goto err;
- }
- if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
- memptr += (BRCMF_SDALIGN -
- ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
- /* Download image */
- while ((len =
- brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
- ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
- if (ret) {
- BRCMF_ERROR(("%s: error %d on writing %d membytes at "
- "0x%08x\n", __func__, ret, MEMBLOCK,
- offset));
- goto err;
- }
-
- offset += MEMBLOCK;
- }
-
-err:
- kfree(memblock);
-
- release_firmware(bus->firmware);
- bus->fw_ptr = 0;
-
- return ret;
-}
-
-/*
- * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
- * and ending in a NUL.
- * Removes carriage returns, empty lines, comment lines, and converts
- * newlines to NULs.
- * Shortens buffer as needed and pads with NULs. End of buffer is marked
- * by two NULs.
-*/
-
-static uint brcmf_process_nvram_vars(char *varbuf, uint len)
-{
- char *dp;
- bool findNewline;
- int column;
- uint buf_len, n;
-
- dp = varbuf;
-
- findNewline = false;
- column = 0;
-
- for (n = 0; n < len; n++) {
- if (varbuf[n] == 0)
- break;
- if (varbuf[n] == '\r')
- continue;
- if (findNewline && varbuf[n] != '\n')
- continue;
- findNewline = false;
- if (varbuf[n] == '#') {
- findNewline = true;
- continue;
- }
- if (varbuf[n] == '\n') {
- if (column == 0)
- continue;
- *dp++ = 0;
- column = 0;
- continue;
- }
- *dp++ = varbuf[n];
- column++;
- }
- buf_len = dp - varbuf;
-
- while (dp < varbuf + n)
- *dp++ = 0;
-
- return buf_len;
-}
-
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
-{
- uint len;
- char *memblock = NULL;
- char *bufp;
- int ret;
-
- bus->nv_name = BCM4329_NV_NAME;
- ret = request_firmware(&bus->firmware, bus->nv_name,
- &gInstance->func[2]->dev);
- if (ret) {
- BRCMF_ERROR(("%s: Fail to request nvram %d\n", __func__, ret));
- return ret;
- }
- bus->fw_ptr = 0;
-
- memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
- if (memblock == NULL) {
- BRCMF_ERROR(("%s: Failed to allocate memory %d bytes\n",
- __func__, MEMBLOCK));
- ret = -ENOMEM;
- goto err;
- }
-
- len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
-
- if (len > 0 && len < MEMBLOCK) {
- bufp = (char *)memblock;
- bufp[len] = 0;
- len = brcmf_process_nvram_vars(bufp, len);
- bufp += len;
- *bufp++ = 0;
- if (len)
- ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
- if (ret)
- BRCMF_ERROR(("%s: error downloading vars: %d\n",
- __func__, ret));
- } else {
- BRCMF_ERROR(("%s: error reading nvram file: %d\n",
- __func__, len));
- ret = -EIO;
- }
-
-err:
- kfree(memblock);
-
- release_firmware(bus->firmware);
- bus->fw_ptr = 0;
-
- return ret;
-}
-
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
-{
- int bcmerror = -1;
-
- /* Keep arm in reset */
- if (brcmf_sdbrcm_download_state(bus, true)) {
- BRCMF_ERROR(("%s: error placing ARM core in reset\n",
- __func__));
- goto err;
- }
-
- /* External image takes precedence if specified */
- if (brcmf_sdbrcm_download_code_file(bus)) {
- BRCMF_ERROR(("%s: dongle image file download failed\n",
- __func__));
- goto err;
- }
-
- /* External nvram takes precedence if specified */
- if (brcmf_sdbrcm_download_nvram(bus)) {
- BRCMF_ERROR(("%s: dongle nvram file download failed\n",
- __func__));
- }
-
- /* Take arm out of reset */
- if (brcmf_sdbrcm_download_state(bus, false)) {
- BRCMF_ERROR(("%s: error getting out of ARM core reset\n",
- __func__));
- goto err;
- }
-
- bcmerror = 0;
-
-err:
- return bcmerror;
-}
-
-
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt,
- void (*complete)(void *handle, int status,
- bool sync_waiting),
- void *handle)
-{
- return brcmf_sdcard_send_buf
- (bus->card, addr, fn, flags, buf, nbytes, pkt, complete,
- handle);
-}
-
-int brcmf_bus_devreset(struct brcmf_pub *drvr, u8 flag)
-{
- int bcmerror = 0;
- struct brcmf_bus *bus;
-
- bus = drvr->bus;
-
- if (flag == true) {
- brcmf_sdbrcm_wd_timer(bus, 0);
- if (!bus->drvr->dongle_reset) {
- /* Expect app to have torn down any
- connection before calling */
- /* Stop the bus, disable F2 */
- brcmf_sdbrcm_bus_stop(bus, false);
-
- /* Clean tx/rx buffer pointers,
- detach from the dongle */
- brcmf_sdbrcm_release_dongle(bus);
-
- bus->drvr->dongle_reset = true;
- bus->drvr->up = false;
-
- BRCMF_TRACE(("%s: WLAN OFF DONE\n", __func__));
- /* App can now remove power from device */
- } else
- bcmerror = -EIO;
- } else {
- /* App must have restored power to device before calling */
-
- BRCMF_TRACE(("\n\n%s: == WLAN ON ==\n", __func__));
-
- if (bus->drvr->dongle_reset) {
- /* Turn on WLAN */
-
- /* Attempt to re-attach & download */
- if (brcmf_sdbrcm_probe_attach(bus, bus->card,
- SI_ENUM_BASE,
- bus->cl_devid)) {
- /* Attempt to download binary to the dongle */
- if (brcmf_sdbrcm_probe_init(bus, bus->card)) {
- /* Re-init bus, enable F2 transfer */
- brcmf_sdbrcm_bus_init(bus->drvr, false);
-
- bus->drvr->dongle_reset = false;
- bus->drvr->up = true;
-
- BRCMF_TRACE(("%s: WLAN ON DONE\n",
- __func__));
- } else
- bcmerror = -EIO;
- } else
- bcmerror = -EIO;
- } else {
- bcmerror = -EISCONN;
- BRCMF_ERROR(("%s: Set DEVRESET=false invoked when"
- " device is on\n", __func__));
- bcmerror = -EIO;
- }
- brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
- }
- return bcmerror;
-}
-
-static int
-brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_card *card,
- struct chip_info *ci, u32 regs)
-{
- u32 regdata;
-
- /*
- * Get CC core rev
- * Chipid is assume to be at offset 0 from regs arg
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- ci->cccorebase = regs;
- regdata = brcmf_sdcard_reg_read(card,
- CORE_CC_REG(ci->cccorebase, chipid), 4);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-
- BRCMF_INFO(("%s: chipid=0x%x chiprev=%d\n",
- __func__, ci->chip, ci->chiprev));
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM4329_CHIP_ID:
- ci->buscorebase = BCM4329_CORE_BUS_BASE;
- ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
- ci->armcorebase = BCM4329_CORE_ARM_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- break;
- default:
- BRCMF_ERROR(("%s: chipid 0x%x is not supported\n",
- __func__, ci->chip));
- return -ENODEV;
- }
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(ci->cccorebase, sbidhigh), 4);
- ci->ccrev = SBCOREREV(regdata);
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
- ci->pmurev = regdata & PCAP_REV_MASK;
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(ci->buscorebase, sbidhigh), 4);
- ci->buscorerev = SBCOREREV(regdata);
- ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
-
- BRCMF_INFO(("%s: ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- __func__, ci->ccrev, ci->pmurev,
- ci->buscorerev, ci->buscoretype));
-
- /* get chipcommon capabilites */
- ci->cccaps = brcmf_sdcard_reg_read(card,
- CORE_CC_REG(ci->cccorebase, capabilities), 4);
-
- return 0;
-}
-
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_card *card, u32 corebase)
-{
- u32 regdata;
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatelow), 4);
- if (regdata & SBTML_RESET)
- return;
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatelow), 4);
- if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatelow), 4);
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
- regdata | SBTML_REJ);
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatehigh), 4) &
- SBTMH_BUSY), 100000);
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_BUSY)
- BRCMF_ERROR(("%s: ARM core still busy\n", __func__));
-
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbimstate), 4) |
- SBIM_RJ;
- brcmf_sdcard_reg_write(card,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbimstate), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbimstate), 4) &
- SBIM_BY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- brcmf_sdcard_reg_write(card,
- CORE_SB(corebase, sbtmstatelow), 4,
- (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_REJ | SBTML_RESET));
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(card,
- CORE_SB(corebase, sbimstate), 4) &
- ~SBIM_RJ;
- brcmf_sdcard_reg_write(card,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
- (SBTML_REJ | SBTML_RESET));
- udelay(1);
-}
-
-static int
-brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
-{
- struct chip_info *ci;
- int err;
- u8 clkval, clkset;
-
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- /* alloc chip_info_t */
- ci = kmalloc(sizeof(struct chip_info), GFP_ATOMIC);
- if (NULL == ci) {
- BRCMF_ERROR(("%s: malloc failed!\n", __func__));
- return -ENOMEM;
- }
-
- memset((unsigned char *)ci, 0, sizeof(struct chip_info));
-
- /* bus/core/clk setup for register access */
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- clkset, &err);
- if (err) {
- BRCMF_ERROR(("%s: error writing for HT off\n", __func__));
- goto fail;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
- if ((clkval & ~SBSDIO_AVBITS) == clkset) {
- SPINWAIT(((clkval =
- brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- BRCMF_ERROR(("%s: timeout on ALPAV wait,"
- " clkval 0x%02x\n", __func__, clkval));
- err = -EBUSY;
- goto fail;
- }
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
- SBSDIO_FORCE_ALP;
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- clkset, &err);
- udelay(65);
- } else {
- BRCMF_ERROR(("%s: ChipClkCSR access: wrote 0x%02x"
- " read 0x%02x\n", __func__, clkset, clkval));
- err = -EACCES;
- goto fail;
- }
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP,
- 0, NULL);
-
- err = brcmf_sdbrcm_chip_recognition(bus->card, ci, regs);
- if (err)
- goto fail;
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- brcmf_sdbrcm_chip_disablecore(bus->card, ci->armcorebase);
-
- brcmf_sdcard_reg_write(bus->card,
- CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
- brcmf_sdcard_reg_write(bus->card,
- CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* WAR: cmd52 backplane read so core HW will drop ALPReq */
- clkval = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
- 0, NULL);
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- 0, NULL);
-
- bus->ci = ci;
- return 0;
-fail:
- bus->ci = NULL;
- kfree(ci);
- return err;
-}
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_card *card, u32 corebase)
-{
- u32 regdata;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdbrcm_chip_disablecore(card, corebase);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
- ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_RESET);
- udelay(1);
-
- regdata = brcmf_sdcard_reg_read(card, CORE_SB(corebase, sbtmstatehigh),
- 4);
- if (regdata & SBTMH_SERR)
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatehigh),
- 4, 0);
-
- regdata = brcmf_sdcard_reg_read(card, CORE_SB(corebase, sbimstate), 4);
- if (regdata & (SBIM_IBE | SBIM_TO))
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbimstate), 4,
- regdata & ~(SBIM_IBE | SBIM_TO));
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_FGC << SBTML_SICF_SHIFT) |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-}
-
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
- {
- 4, 0x2}, {
- 2, 0x3}, {
- 1, 0x0}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
- {
- 12, 0x7}, {
- 10, 0x6}, {
- 8, 0x5}, {
- 6, 0x4}, {
- 4, 0x2}, {
- 2, 0x1}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
- {
- 32, 0x7}, {
- 26, 0x6}, {
- 22, 0x5}, {
- 16, 0x4}, {
- 12, 0x3}, {
- 8, 0x2}, {
- 4, 0x1}, {
- 0, 0x0}
- };
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-
-static void
-brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus, u32 drivestrength) {
- struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask = 0;
- u32 str_shift = 0;
- char chn[8];
-
- if (!(bus->ci->cccaps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
- str_mask = 0x30000000;
- str_shift = 28;
- break;
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- BRCMF_ERROR(("No SDIO Drive strength init"
- "done for chip %s rev %d pmurev %d\n",
- brcmu_chipname(bus->ci->chip, chn, 8),
- bus->ci->chiprev, bus->ci->pmurev));
- break;
- }
-
- if (str_tab != NULL) {
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- int i;
-
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
-
- brcmf_sdcard_reg_write(bus->card,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, 1);
- cc_data_temp = brcmf_sdcard_reg_read(bus->card,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdcard_reg_write(bus->card,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, cc_data_temp);
-
- BRCMF_INFO(("SDIO: %dmA drive strength selected, "
- "set to 0x%08x\n", drivestrength, cc_data_temp));
- }
-}
-
-static void
-brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
-{
- BRCMF_TRACE(("%s: Enter\n", __func__));
-
- kfree(bus->ci);
- bus->ci = NULL;
-}
-
-static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
-{
- brcmf_sdbrcm_sdunlock(bus);
- wait_event_interruptible_timeout(bus->ctrl_wait,
- (*lockvar == false), HZ * 2);
- brcmf_sdbrcm_sdlock(bus);
- return;
-}
-
-static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
-{
- if (waitqueue_active(&bus->ctrl_wait))
- wake_up_interruptible(&bus->ctrl_wait);
- return;
-}
-
-static int
-brcmf_sdbrcm_watchdog_thread(void *data)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
-
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
- if (brcmf_watchdog_prio > 0) {
- struct sched_param param;
- param.sched_priority = (brcmf_watchdog_prio < MAX_RT_PRIO) ?
- brcmf_watchdog_prio : (MAX_RT_PRIO - 1);
- sched_setscheduler(current, SCHED_FIFO, &param);
- }
-
- allow_signal(SIGTERM);
- /* Run until signal received */
- while (1) {
- if (kthread_should_stop())
- break;
- if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- if (bus->drvr->dongle_reset == false)
- brcmf_sdbrcm_bus_watchdog(bus->drvr);
- /* Count the tick for reference */
- bus->drvr->tickcnt++;
- } else
- break;
- }
- return 0;
-}
-
-static void
-brcmf_sdbrcm_watchdog(unsigned long data)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
-
- if (brcmf_watchdog_prio >= 0) {
- if (bus->watchdog_tsk)
- complete(&bus->watchdog_wait);
- else
- return;
- } else {
- brcmf_sdbrcm_bus_watchdog(bus->drvr);
-
- /* Count the tick for reference */
- bus->drvr->tickcnt++;
- }
-
- /* Reschedule the watchdog */
- if (bus->wd_timer_valid)
- mod_timer(&bus->timer, jiffies + brcmf_watchdog_ms * HZ / 1000);
-}
-
-void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
-{
- static uint save_ms;
-
- /* don't start the wd until fw is loaded */
- if (bus->drvr->busstate == BRCMF_BUS_DOWN)
- return;
-
- /* Totally stop the timer */
- if (!wdtick && bus->wd_timer_valid == true) {
- del_timer_sync(&bus->timer);
- bus->wd_timer_valid = false;
- save_ms = wdtick;
- return;
- }
-
- if (wdtick) {
- brcmf_watchdog_ms = (uint) wdtick;
-
- if (save_ms != brcmf_watchdog_ms) {
- if (bus->wd_timer_valid == true)
- /* Stop timer and restart at new value */
- del_timer_sync(&bus->timer);
-
- /* Create timer again when watchdog period is
- dynamically changed or in the first instance
- */
- bus->timer.expires =
- jiffies + brcmf_watchdog_ms * HZ / 1000;
- add_timer(&bus->timer);
-
- } else {
- /* Re arm the timer, at last watchdog period */
- mod_timer(&bus->timer,
- jiffies + brcmf_watchdog_ms * HZ / 1000);
- }
-
- bus->wd_timer_valid = true;
- save_ms = wdtick;
- }
-}
-
-static int brcmf_sdbrcm_dpc_thread(void *data)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *) data;
-
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
- if (brcmf_dpc_prio > 0) {
- struct sched_param param;
- param.sched_priority = (brcmf_dpc_prio < MAX_RT_PRIO) ?
- brcmf_dpc_prio : (MAX_RT_PRIO - 1);
- sched_setscheduler(current, SCHED_FIFO, &param);
- }
-
- allow_signal(SIGTERM);
- /* Run until signal received */
- while (1) {
- if (kthread_should_stop())
- break;
- if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
- /* Call bus dpc unless it indicated down
- (then clean stop) */
- if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
- if (brcmf_sdbrcm_dpc(bus))
- complete(&bus->dpc_wait);
- } else {
- brcmf_sdbrcm_bus_stop(bus, true);
- }
- } else
- break;
- }
- return 0;
-}
-
-static void brcmf_sdbrcm_dpc_tasklet(unsigned long data)
-{
- struct brcmf_bus *bus = (struct brcmf_bus *) data;
-
- /* Call bus dpc unless it indicated down (then clean stop) */
- if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
- if (brcmf_sdbrcm_dpc(bus))
- tasklet_schedule(&bus->tasklet);
- } else
- brcmf_sdbrcm_bus_stop(bus, true);
-}
-
-static void brcmf_sdbrcm_sched_dpc(struct brcmf_bus *bus)
-{
- if (bus->dpc_tsk) {
- complete(&bus->dpc_wait);
- return;
- }
-
- tasklet_schedule(&bus->tasklet);
-}
-
-static void brcmf_sdbrcm_sdlock(struct brcmf_bus *bus)
-{
- if (bus->threads_only)
- down(&bus->sdsem);
- else
- spin_lock_bh(&bus->sdlock);
-}
-
-static void brcmf_sdbrcm_sdunlock(struct brcmf_bus *bus)
-{
- if (bus->threads_only)
- up(&bus->sdsem);
- else
- spin_unlock_bh(&bus->sdlock);
-}
-
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
-{
- if (bus->firmware->size < bus->fw_ptr + len)
- len = bus->firmware->size - bus->fw_ptr;
-
- memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
- bus->fw_ptr += len;
- return len;
-}
-
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
deleted file mode 100644
index d3454721506..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_SDH_H_
-#define _BRCM_SDH_H_
-
-#include <linux/skbuff.h>
-extern const uint brcmf_sdio_msglevel;
-
-#define SDIO_FUNC_0 0
-#define SDIO_FUNC_1 1
-#define SDIO_FUNC_2 2
-
-#define SDIOD_FBR_SIZE 0x100
-
-/* io_en */
-#define SDIO_FUNC_ENABLE_1 0x02
-#define SDIO_FUNC_ENABLE_2 0x04
-
-/* io_rdys */
-#define SDIO_FUNC_READY_1 0x02
-#define SDIO_FUNC_READY_2 0x04
-
-/* intr_status */
-#define INTR_STATUS_FUNC1 0x2
-#define INTR_STATUS_FUNC2 0x4
-
-/* Maximum number of I/O funcs */
-#define SDIOD_MAX_IOFUNCS 7
-
-#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
-
-/* function 1 miscellaneous registers */
-#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
-#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
-#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
-#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
-#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
-#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
-#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
-#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
-#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
-#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
-
-/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
-#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
-#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
-#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
-#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
-#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
-#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
-#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
-#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
-#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
-#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
-
-#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
-#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
-
-/* function 1 OCP space */
-#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
-#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
-#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
-
-/* some duplication with sbsdpcmdev.h here */
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
-#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
-
-#define SDIOH_READ 0 /* Read request */
-#define SDIOH_WRITE 1 /* Write request */
-
-#define SDIOH_DATA_FIX 0 /* Fixed addressing */
-#define SDIOH_DATA_INC 1 /* Incremental addressing */
-
-/* internal return code */
-#define SUCCESS 0
-#define ERROR 1
-
-/* forward declarations */
-struct brcmf_sdio_card;
-
-struct brcmf_sdreg {
- int func;
- int offset;
- int value;
-};
-
-struct sdioh_info {
- struct osl_info *osh; /* osh handler */
- bool client_intr_enabled; /* interrupt connnected flag */
- bool intr_handler_valid; /* client driver interrupt handler valid */
- void (*intr_handler)(void *); /* registered interrupt handler */
- void *intr_handler_arg; /* argument to call interrupt handler */
- u16 intmask; /* Current active interrupts */
- void *sdos_info; /* Pointer to per-OS private data */
-
- uint irq; /* Client irq */
- int intrcount; /* Client interrupts */
- bool sd_blockmode; /* sd_blockmode == false => 64 Byte Cmd 53s. */
- /* Must be on for sd_multiblock to be effective */
- bool use_client_ints; /* If this is false, make sure to restore */
- int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
- u8 num_funcs; /* Supported funcs on client */
- u32 com_cis_ptr;
- u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
- uint max_dma_len;
- uint max_dma_descriptors; /* DMA Descriptors supported by this controller. */
- /* SDDMA_DESCRIPTOR SGList[32]; *//* Scatter/Gather DMA List */
-};
-
-struct brcmf_sdmmc_instance {
- struct sdioh_info *sd;
- struct sdio_func *func[SDIOD_MAX_IOFUNCS];
- u32 host_claimed;
-};
-
-/* Attach and build an interface to the underlying SD host driver.
- * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by
- * brcmf_sdcard.
- * - Returns the sdio card handle and virtual address base for register access.
- * The returned handle should be used in all subsequent calls, but the bcmsh
- * implementation may maintain a single "default" handle (e.g. the first or
- * most recent one) to enable single-instance implementations to pass NULL.
- */
-extern struct brcmf_sdio_card*
-brcmf_sdcard_attach(void *cfghdl, u32 *regsva, uint irq);
-
-/* Detach - freeup resources allocated in attach */
-extern int brcmf_sdcard_detach(struct brcmf_sdio_card *card);
-
-/* Enable/disable SD interrupt */
-extern int brcmf_sdcard_intr_enable(struct brcmf_sdio_card *card);
-extern int brcmf_sdcard_intr_disable(struct brcmf_sdio_card *card);
-
-/* Register/deregister device interrupt handler. */
-extern int
-brcmf_sdcard_intr_reg(struct brcmf_sdio_card *card,
- void (*fn)(void *), void *argh);
-
-extern int brcmf_sdcard_intr_dereg(struct brcmf_sdio_card *card);
-
-/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
- * fn: function number
- * addr: unmodified SDIO-space address
- * data: data byte to write
- * err: pointer to error code (or NULL)
- */
-extern u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_card *card, uint func,
- u32 addr, int *err);
-extern void brcmf_sdcard_cfg_write(struct brcmf_sdio_card *card, uint func,
- u32 addr, u8 data, int *err);
-
-/* Read/Write 4bytes from/to cfg space */
-extern u32
-brcmf_sdcard_cfg_read_word(struct brcmf_sdio_card *card, uint fnc_num,
- u32 addr, int *err);
-
-extern void brcmf_sdcard_cfg_write_word(struct brcmf_sdio_card *card,
- uint fnc_num, u32 addr,
- u32 data, int *err);
-
-/* Read CIS content for specified function.
- * fn: function whose CIS is being requested (0 is common CIS)
- * cis: pointer to memory location to place results
- * length: number of bytes to read
- * Internally, this routine uses the values from the cis base regs (0x9-0xB)
- * to form an SDIO-space address to read the data from.
- */
-extern int brcmf_sdcard_cis_read(struct brcmf_sdio_card *card, uint func,
- u8 *cis, uint length);
-
-/* Synchronous access to device (client) core registers via CMD53 to F1.
- * addr: backplane address (i.e. >= regsva from attach)
- * size: register width in bytes (2 or 4)
- * data: data for register write
- */
-extern u32
-brcmf_sdcard_reg_read(struct brcmf_sdio_card *card, u32 addr, uint size);
-
-extern u32
-brcmf_sdcard_reg_write(struct brcmf_sdio_card *card, u32 addr, uint size,
- u32 data);
-
-/* Indicate if last reg read/write failed */
-extern bool brcmf_sdcard_regfail(struct brcmf_sdio_card *card);
-
-/* Buffer transfer to/from device (client) core via cmd53.
- * fn: function number
- * addr: backplane address (i.e. >= regsva from attach)
- * flags: backplane width, address increment, sync/async
- * buf: pointer to memory data buffer
- * nbytes: number of bytes to transfer to/from buf
- * pkt: pointer to packet associated with buf (if any)
- * complete: callback function for command completion (async only)
- * handle: handle for completion callback (first arg in callback)
- * Returns 0 or error code.
- * NOTE: Async operation is not currently supported.
- */
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, void *pkt,
- void (*complete)(void *handle, int status,
- bool sync_waiting),
- void *handle);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt,
- void (*complete)(void *handle, int status,
- bool sync_waiting),
- void *handle);
-
-/* Flags bits */
-#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
-#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
-#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
-
-/* Pending (non-error) return code */
-#define BCME_PENDING 1
-
-/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
- * rw: read or write (0/1)
- * addr: direct SDIO address
- * buf: pointer to memory data buffer
- * nbytes: number of bytes to transfer to/from buf
- * Returns 0 or error code.
- */
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_card *card, uint rw, u32 addr,
- u8 *buf, uint nbytes);
-
-/* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_card *card, uint fn);
-
-/* Returns the "Device ID" of target device on the SDIO bus. */
-extern int brcmf_sdcard_query_device(struct brcmf_sdio_card *card);
-
-/* Miscellaneous knob tweaker. */
-extern int brcmf_sdcard_iovar_op(struct brcmf_sdio_card *card, const char *name,
- void *params, int plen, void *arg, int len,
- bool set);
-
-/* helper functions */
-
-/* callback functions */
-struct brcmf_sdioh_driver {
- /* attach to device */
- void *(*attach) (u16 vend_id, u16 dev_id, u16 bus, u16 slot,
- u16 func, uint bustype, u32 regsva, void *param);
- /* detach from device */
- void (*detach) (void *ch);
-};
-
-struct sdioh_info;
-
-/* platform specific/high level functions */
-extern int brcmf_sdio_function_init(void);
-extern int brcmf_sdio_register(struct brcmf_sdioh_driver *driver);
-extern void brcmf_sdio_unregister(void);
-extern void brcmf_sdio_function_cleanup(void);
-extern int brcmf_sdio_probe(struct device *dev);
-extern int brcmf_sdio_remove(struct device *dev);
-
-/* Function to return current window addr */
-extern u32 brcmf_sdcard_cur_sbwad(struct brcmf_sdio_card *card);
-
-/* Allocate/init/free per-OS private data */
-extern int brcmf_sdioh_osinit(struct sdioh_info *sd);
-extern void brcmf_sdioh_osfree(struct sdioh_info *sd);
-
-/* Core interrupt enable/disable of device interrupts */
-extern void brcmf_sdioh_dev_intr_on(struct sdioh_info *sd);
-extern void brcmf_sdioh_dev_intr_off(struct sdioh_info *sd);
-
-/* attach, return handler on success, NULL if failed.
- * The handler shall be provided by all subsequent calls. No local cache
- * cfghdl points to the starting address of pci device mapped memory
- */
-extern struct sdioh_info *brcmf_sdioh_attach(void *cfghdl, uint irq);
-extern int brcmf_sdioh_detach(struct sdioh_info *si);
-
-extern int
-brcmf_sdioh_interrupt_register(struct sdioh_info *si,
- void (*sdioh_cb_fn)(void *), void *argh);
-
-extern int brcmf_sdioh_interrupt_deregister(struct sdioh_info *si);
-
-/* enable or disable SD interrupt */
-extern int
-brcmf_sdioh_interrupt_set(struct sdioh_info *si, bool enable_disable);
-
-/* read or write one byte using cmd52 */
-extern int
-brcmf_sdioh_request_byte(struct sdioh_info *si, uint rw, uint fnc, uint addr,
- u8 *byte);
-
-/* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct sdioh_info *si, uint cmd_type,
- uint rw, uint fnc, uint addr,
- u32 *word, uint nbyte);
-
-/* read or write any buffer using cmd53 */
-extern int
-brcmf_sdioh_request_buffer(struct sdioh_info *si, uint pio_dma,
- uint fix_inc, uint rw, uint fnc_num,
- u32 addr, uint regwidth,
- u32 buflen, u8 *buffer, struct sk_buff *pkt);
-
-/* get cis data */
-extern int
-brcmf_sdioh_cis_read(struct sdioh_info *si, uint fuc, u8 *cis, u32 length);
-
-extern int
-brcmf_sdioh_cfg_read(struct sdioh_info *si, uint fuc, u32 addr, u8 *data);
-extern int
-brcmf_sdioh_cfg_write(struct sdioh_info *si, uint fuc, u32 addr, u8 *data);
-
-/* handle iovars */
-extern int brcmf_sdioh_iovar_op(struct sdioh_info *si, const char *name,
- void *params, int plen, void *arg, int len, bool set);
-
-/* Issue abort to the specified function and clear controller as needed */
-extern int brcmf_sdioh_abort(struct sdioh_info *si, uint fnc);
-
-/* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(bool enable);
-
-extern uint sd_msglevel; /* Debug message level */
-
-extern struct brcmf_sdmmc_instance *gInstance;
-
-#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
deleted file mode 100644
index 821206d3e53..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ /dev/null
@@ -1,4152 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/if_arp.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/uaccess.h>
-#include <net/cfg80211.h>
-#include <net/rtnetlink.h>
-
-#include <brcmu_utils.h>
-#include <defs.h>
-#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "wl_cfg80211.h"
-
-static struct sdio_func *cfg80211_sdio_func;
-static struct brcmf_cfg80211_dev *cfg80211_dev;
-static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
-
-u32 brcmf_dbg_level = WL_DBG_ERR;
-
-/*
-** cfg80211_ops api/callback list
-*/
-static s32 brcmf_cfg80211_change_iface(struct wiphy *wiphy,
- struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params);
-static s32 __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid);
-static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request);
-static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
-static s32 brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params);
-static s32 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy,
- struct net_device *dev);
-static s32 brcmf_cfg80211_get_station(struct wiphy *wiphy,
- struct net_device *dev, u8 *mac,
- struct station_info *sinfo);
-static s32 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *dev, bool enabled,
- s32 timeout);
-static s32 brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
- struct net_device *dev,
- const u8 *addr,
- const struct cfg80211_bitrate_mask
- *mask);
-static int brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 brcmf_cfg80211_disconnect(struct wiphy *wiphy,
- struct net_device *dev,
- u16 reason_code);
-static s32 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type,
- s32 dbm);
-static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
-static s32 brcmf_cfg80211_config_default_key(struct wiphy *wiphy,
- struct net_device *dev, u8 key_idx,
- bool unicast, bool multicast);
-static s32 brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
-static s32 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool pairwise, const u8 *mac_addr);
-static s32 brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- void *cookie, void (*callback) (void *cookie,
- struct
- key_params *
- params));
-static s32 brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *dev,
- u8 key_idx);
-static s32 brcmf_cfg80211_resume(struct wiphy *wiphy);
-static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
- struct cfg80211_wowlan *wow);
-static s32 brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa);
-static s32 brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa);
-static s32 brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy,
- struct net_device *dev);
-/*
-** event & event Q handlers for cfg80211 interfaces
-*/
-static s32 brcmf_create_event_handler(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_destroy_event_handler(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_event_handler(void *data);
-static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_lock_eq(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_unlock_eq(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_init_eq_lock(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el);
-static struct brcmf_cfg80211_event_q *
-brcmf_deq_event(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 type,
- const struct brcmf_event_msg *msg, void *data);
-static void brcmf_put_event(struct brcmf_cfg80211_event_q *e);
-static void brcmf_wakeup_event(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e,
- void *data);
-static s32 brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e,
- void *data);
-static s32 brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e,
- void *data);
-static s32 brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data,
- bool completed);
-static s32 brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data);
-static s32 brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data);
-
-/*
-** register/deregister sdio function
-*/
-static void brcmf_clear_sdio_func(void);
-
-/*
-** ioctl utilites
-*/
-static s32 brcmf_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
- s32 buf_len);
-static __used s32 brcmf_dev_bufvar_set(struct net_device *dev, s8 *name,
- s8 *buf, s32 len);
-static s32 brcmf_dev_intvar_set(struct net_device *dev, s8 *name, s32 val);
-static s32 brcmf_dev_intvar_get(struct net_device *dev, s8 *name,
- s32 *retval);
-static s32 brcmf_dev_ioctl(struct net_device *dev, u32 cmd, void *arg,
- u32 len);
-
-/*
-** cfg80211 set_wiphy_params utilities
-*/
-static s32 brcmf_set_frag(struct net_device *dev, u32 frag_threshold);
-static s32 brcmf_set_rts(struct net_device *dev, u32 frag_threshold);
-static s32 brcmf_set_retry(struct net_device *dev, u32 retry, bool l);
-
-/*
-** wl profile utilities
-*/
-static s32 brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e,
- void *data, s32 item);
-static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item);
-static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof);
-
-/*
-** cfg80211 connect utilites
-*/
-static s32 brcmf_set_wpa_version(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 brcmf_set_auth_type(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 brcmf_set_set_cipher(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 brcmf_set_key_mgmt(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 brcmf_set_set_sharedkey(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_ch_to_chanspec(int ch,
- struct brcmf_join_params *join_params, size_t *join_params_size);
-
-/*
-** information element utilities
-*/
-static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
- u8 t, u8 l, u8 *v);
-static s32 brcmf_mode_to_nl80211_iftype(s32 mode);
-static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
- struct device *dev);
-static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
- struct brcmf_bss_info *bi);
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, const u8 *mac_addr,
- struct key_params *params);
-
-/*
-** key indianess swap utilities
-*/
-static void swap_key_from_BE(struct brcmf_wsec_key *key);
-static void swap_key_to_BE(struct brcmf_wsec_key *key);
-
-/*
-** brcmf_cfg80211_priv memory init/deinit utilities
-*/
-static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv);
-
-static void brcmf_delay(u32 ms);
-
-/*
-** store/restore cfg80211 instance data
-*/
-static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data);
-static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev);
-
-/*
-** ibss mode utilities
-*/
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv);
-
-/*
-** dongle up/down , default configuration utilities
-*/
-static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e);
-static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e);
-static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e);
-static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype);
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf);
-
-/*
-** dongle configuration utilities
-*/
-static s32 brcmf_dongle_eventmsg(struct net_device *ndev);
-static s32 brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
- s32 scan_unassoc_time, s32 scan_passive_time);
-static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv,
- bool need_lock);
-static s32 brcmf_dongle_roam(struct net_device *ndev, u32 roamvar,
- u32 bcn_timeout);
-
-/*
-** iscan handler
-*/
-static void brcmf_iscan_timer(unsigned long data);
-static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_iscan_thread(void *data);
-static s32 brcmf_dev_iovar_setbuf(struct net_device *dev, s8 *iovar,
- void *param, s32 paramlen, void *bufptr,
- s32 buflen);
-static s32 brcmf_dev_iovar_getbuf(struct net_device *dev, s8 *iovar,
- void *param, s32 paramlen, void *bufptr,
- s32 buflen);
-static s32 brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
- struct brcmf_ssid *ssid, u16 action);
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan);
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan,
- u32 *status,
- struct brcmf_scan_results **bss_list);
-static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
- bool aborted);
-static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el);
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv);
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv);
-
-/*
-* find most significant bit set
-*/
-static __used u32 brcmf_find_msb(u16 bit16);
-
-/*
-* update pmklist to dongle
-*/
-static __used s32 brcmf_update_pmklist(struct net_device *dev,
- struct brcmf_cfg80211_pmk_list *pmk_list,
- s32 err);
-
-static void brcmf_set_mpc(struct net_device *ndev, int mpc);
-
-/*
-* debufs support
-*/
-static int
-brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv);
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv);
-
-#define WL_PRIV_GET() \
- ({ \
- struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg80211_dev); \
- if (unlikely(!ci)) { \
- WL_ERR("wl_cfg80211_dev is unavailable\n"); \
- BUG(); \
- } \
- ci->cfg_priv; \
-})
-
-#define CHECK_SYS_UP() \
-do { \
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); \
- if (unlikely(!test_bit(WL_STATUS_READY, &cfg_priv->status))) { \
- WL_INFO("device is not ready : status (%d)\n", \
- (int)cfg_priv->status); \
- return -EIO; \
- } \
-} while (0)
-
-#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = 5000 + (5 * (_channel)), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2)
-#define RATETAB_ENT(_rateid, _flags) \
- { \
- .bitrate = RATE_TO_BASE100KBPS(_rateid), \
- .hw_value = (_rateid), \
- .flags = (_flags), \
- }
-
-static struct ieee80211_rate __wl_rates[] = {
- RATETAB_ENT(BRCM_RATE_1M, 0),
- RATETAB_ENT(BRCM_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
- RATETAB_ENT(BRCM_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
- RATETAB_ENT(BRCM_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
- RATETAB_ENT(BRCM_RATE_6M, 0),
- RATETAB_ENT(BRCM_RATE_9M, 0),
- RATETAB_ENT(BRCM_RATE_12M, 0),
- RATETAB_ENT(BRCM_RATE_18M, 0),
- RATETAB_ENT(BRCM_RATE_24M, 0),
- RATETAB_ENT(BRCM_RATE_36M, 0),
- RATETAB_ENT(BRCM_RATE_48M, 0),
- RATETAB_ENT(BRCM_RATE_54M, 0),
-};
-
-#define wl_a_rates (__wl_rates + 4)
-#define wl_a_rates_size 8
-#define wl_g_rates (__wl_rates + 0)
-#define wl_g_rates_size 12
-
-static struct ieee80211_channel __wl_2ghz_channels[] = {
- CHAN2G(1, 2412, 0),
- CHAN2G(2, 2417, 0),
- CHAN2G(3, 2422, 0),
- CHAN2G(4, 2427, 0),
- CHAN2G(5, 2432, 0),
- CHAN2G(6, 2437, 0),
- CHAN2G(7, 2442, 0),
- CHAN2G(8, 2447, 0),
- CHAN2G(9, 2452, 0),
- CHAN2G(10, 2457, 0),
- CHAN2G(11, 2462, 0),
- CHAN2G(12, 2467, 0),
- CHAN2G(13, 2472, 0),
- CHAN2G(14, 2484, 0),
-};
-
-static struct ieee80211_channel __wl_5ghz_a_channels[] = {
- CHAN5G(34, 0), CHAN5G(36, 0),
- CHAN5G(38, 0), CHAN5G(40, 0),
- CHAN5G(42, 0), CHAN5G(44, 0),
- CHAN5G(46, 0), CHAN5G(48, 0),
- CHAN5G(52, 0), CHAN5G(56, 0),
- CHAN5G(60, 0), CHAN5G(64, 0),
- CHAN5G(100, 0), CHAN5G(104, 0),
- CHAN5G(108, 0), CHAN5G(112, 0),
- CHAN5G(116, 0), CHAN5G(120, 0),
- CHAN5G(124, 0), CHAN5G(128, 0),
- CHAN5G(132, 0), CHAN5G(136, 0),
- CHAN5G(140, 0), CHAN5G(149, 0),
- CHAN5G(153, 0), CHAN5G(157, 0),
- CHAN5G(161, 0), CHAN5G(165, 0),
- CHAN5G(184, 0), CHAN5G(188, 0),
- CHAN5G(192, 0), CHAN5G(196, 0),
- CHAN5G(200, 0), CHAN5G(204, 0),
- CHAN5G(208, 0), CHAN5G(212, 0),
- CHAN5G(216, 0),
-};
-
-static struct ieee80211_channel __wl_5ghz_n_channels[] = {
- CHAN5G(32, 0), CHAN5G(34, 0),
- CHAN5G(36, 0), CHAN5G(38, 0),
- CHAN5G(40, 0), CHAN5G(42, 0),
- CHAN5G(44, 0), CHAN5G(46, 0),
- CHAN5G(48, 0), CHAN5G(50, 0),
- CHAN5G(52, 0), CHAN5G(54, 0),
- CHAN5G(56, 0), CHAN5G(58, 0),
- CHAN5G(60, 0), CHAN5G(62, 0),
- CHAN5G(64, 0), CHAN5G(66, 0),
- CHAN5G(68, 0), CHAN5G(70, 0),
- CHAN5G(72, 0), CHAN5G(74, 0),
- CHAN5G(76, 0), CHAN5G(78, 0),
- CHAN5G(80, 0), CHAN5G(82, 0),
- CHAN5G(84, 0), CHAN5G(86, 0),
- CHAN5G(88, 0), CHAN5G(90, 0),
- CHAN5G(92, 0), CHAN5G(94, 0),
- CHAN5G(96, 0), CHAN5G(98, 0),
- CHAN5G(100, 0), CHAN5G(102, 0),
- CHAN5G(104, 0), CHAN5G(106, 0),
- CHAN5G(108, 0), CHAN5G(110, 0),
- CHAN5G(112, 0), CHAN5G(114, 0),
- CHAN5G(116, 0), CHAN5G(118, 0),
- CHAN5G(120, 0), CHAN5G(122, 0),
- CHAN5G(124, 0), CHAN5G(126, 0),
- CHAN5G(128, 0), CHAN5G(130, 0),
- CHAN5G(132, 0), CHAN5G(134, 0),
- CHAN5G(136, 0), CHAN5G(138, 0),
- CHAN5G(140, 0), CHAN5G(142, 0),
- CHAN5G(144, 0), CHAN5G(145, 0),
- CHAN5G(146, 0), CHAN5G(147, 0),
- CHAN5G(148, 0), CHAN5G(149, 0),
- CHAN5G(150, 0), CHAN5G(151, 0),
- CHAN5G(152, 0), CHAN5G(153, 0),
- CHAN5G(154, 0), CHAN5G(155, 0),
- CHAN5G(156, 0), CHAN5G(157, 0),
- CHAN5G(158, 0), CHAN5G(159, 0),
- CHAN5G(160, 0), CHAN5G(161, 0),
- CHAN5G(162, 0), CHAN5G(163, 0),
- CHAN5G(164, 0), CHAN5G(165, 0),
- CHAN5G(166, 0), CHAN5G(168, 0),
- CHAN5G(170, 0), CHAN5G(172, 0),
- CHAN5G(174, 0), CHAN5G(176, 0),
- CHAN5G(178, 0), CHAN5G(180, 0),
- CHAN5G(182, 0), CHAN5G(184, 0),
- CHAN5G(186, 0), CHAN5G(188, 0),
- CHAN5G(190, 0), CHAN5G(192, 0),
- CHAN5G(194, 0), CHAN5G(196, 0),
- CHAN5G(198, 0), CHAN5G(200, 0),
- CHAN5G(202, 0), CHAN5G(204, 0),
- CHAN5G(206, 0), CHAN5G(208, 0),
- CHAN5G(210, 0), CHAN5G(212, 0),
- CHAN5G(214, 0), CHAN5G(216, 0),
- CHAN5G(218, 0), CHAN5G(220, 0),
- CHAN5G(222, 0), CHAN5G(224, 0),
- CHAN5G(226, 0), CHAN5G(228, 0),
-};
-
-static struct ieee80211_supported_band __wl_band_2ghz = {
- .band = IEEE80211_BAND_2GHZ,
- .channels = __wl_2ghz_channels,
- .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
- .bitrates = wl_g_rates,
- .n_bitrates = wl_g_rates_size,
-};
-
-static struct ieee80211_supported_band __wl_band_5ghz_a = {
- .band = IEEE80211_BAND_5GHZ,
- .channels = __wl_5ghz_a_channels,
- .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
- .bitrates = wl_a_rates,
- .n_bitrates = wl_a_rates_size,
-};
-
-static struct ieee80211_supported_band __wl_band_5ghz_n = {
- .band = IEEE80211_BAND_5GHZ,
- .channels = __wl_5ghz_n_channels,
- .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels),
- .bitrates = wl_a_rates,
- .n_bitrates = wl_a_rates_size,
-};
-
-static const u32 __wl_cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
- WLAN_CIPHER_SUITE_TKIP,
- WLAN_CIPHER_SUITE_CCMP,
- WLAN_CIPHER_SUITE_AES_CMAC,
-};
-
-static void swap_key_from_BE(struct brcmf_wsec_key *key)
-{
- key->index = cpu_to_le32(key->index);
- key->len = cpu_to_le32(key->len);
- key->algo = cpu_to_le32(key->algo);
- key->flags = cpu_to_le32(key->flags);
- key->rxiv.hi = cpu_to_le32(key->rxiv.hi);
- key->rxiv.lo = cpu_to_le16(key->rxiv.lo);
- key->iv_initialized = cpu_to_le32(key->iv_initialized);
-}
-
-static void swap_key_to_BE(struct brcmf_wsec_key *key)
-{
- key->index = le32_to_cpu(key->index);
- key->len = le32_to_cpu(key->len);
- key->algo = le32_to_cpu(key->algo);
- key->flags = le32_to_cpu(key->flags);
- key->rxiv.hi = le32_to_cpu(key->rxiv.hi);
- key->rxiv.lo = le16_to_cpu(key->rxiv.lo);
- key->iv_initialized = le32_to_cpu(key->iv_initialized);
-}
-
-static s32
-brcmf_dev_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len)
-{
- struct ifreq ifr;
- struct brcmf_ioctl ioc;
- mm_segment_t fs;
- s32 err = 0;
-
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = cmd;
- ioc.buf = arg;
- ioc.len = len;
- strcpy(ifr.ifr_name, dev->name);
- ifr.ifr_data = (caddr_t)&ioc;
-
- fs = get_fs();
- set_fs(get_ds());
- err = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
- set_fs(fs);
-
- return err;
-}
-
-static s32
-brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct wireless_dev *wdev;
- s32 infra = 0;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- switch (type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_WDS:
- WL_ERR("type (%d) : currently we do not support this type\n",
- type);
- return -EOPNOTSUPP;
- case NL80211_IFTYPE_ADHOC:
- cfg_priv->conf->mode = WL_MODE_IBSS;
- infra = 0;
- break;
- case NL80211_IFTYPE_STATION:
- cfg_priv->conf->mode = WL_MODE_BSS;
- infra = 1;
- break;
- default:
- err = -EINVAL;
- goto done;
- }
-
- infra = cpu_to_le32(infra);
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_INFRA, &infra, sizeof(infra));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_INFRA error (%d)\n", err);
- err = -EAGAIN;
- } else {
- wdev = ndev->ieee80211_ptr;
- wdev->iftype = type;
- }
-
- WL_INFO("IF Type = %s\n",
- (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
-
-done:
- WL_TRACE("Exit\n");
-
- return err;
-}
-
-static void wl_iscan_prep(struct brcmf_scan_params *params,
- struct brcmf_ssid *ssid)
-{
- memcpy(params->bssid, ether_bcast, ETH_ALEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = 0;
- params->nprobes = -1;
- params->active_time = -1;
- params->passive_time = -1;
- params->home_time = -1;
- params->channel_num = 0;
-
- params->nprobes = cpu_to_le32(params->nprobes);
- params->active_time = cpu_to_le32(params->active_time);
- params->passive_time = cpu_to_le32(params->passive_time);
- params->home_time = cpu_to_le32(params->home_time);
- if (ssid && ssid->SSID_len)
- memcpy(&params->ssid, ssid, sizeof(struct brcmf_ssid));
-
-}
-
-static s32
-brcmf_dev_iovar_setbuf(struct net_device *dev, s8 * iovar, void *param,
- s32 paramlen, void *bufptr, s32 buflen)
-{
- s32 iolen;
-
- iolen = brcmu_mkiovar(iovar, param, paramlen, bufptr, buflen);
- BUG_ON(!iolen);
-
- return brcmf_dev_ioctl(dev, BRCMF_C_SET_VAR, bufptr, iolen);
-}
-
-static s32
-brcmf_dev_iovar_getbuf(struct net_device *dev, s8 * iovar, void *param,
- s32 paramlen, void *bufptr, s32 buflen)
-{
- s32 iolen;
-
- iolen = brcmu_mkiovar(iovar, param, paramlen, bufptr, buflen);
- BUG_ON(!iolen);
-
- return brcmf_dev_ioctl(dev, BRCMF_C_GET_VAR, bufptr, buflen);
-}
-
-static s32
-brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
- struct brcmf_ssid *ssid, u16 action)
-{
- s32 params_size = (BRCMF_SCAN_PARAMS_FIXED_SIZE +
- offsetof(struct brcmf_iscan_params, params));
- struct brcmf_iscan_params *params;
- s32 err = 0;
-
- if (ssid && ssid->SSID_len)
- params_size += sizeof(struct brcmf_ssid);
- params = kzalloc(params_size, GFP_KERNEL);
- if (unlikely(!params))
- return -ENOMEM;
- BUG_ON(params_size >= BRCMF_C_IOCTL_SMLEN);
-
- wl_iscan_prep(&params->params, ssid);
-
- params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
- params->action = cpu_to_le16(action);
- params->scan_duration = cpu_to_le16(0);
-
- /* params_size += offsetof(struct brcmf_iscan_params, params); */
- err = brcmf_dev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
- iscan->ioctl_buf, BRCMF_C_IOCTL_SMLEN);
- if (unlikely(err)) {
- if (err == -EBUSY) {
- WL_INFO("system busy : iscan canceled\n");
- } else {
- WL_ERR("error (%d)\n", err);
- }
- }
- kfree(params);
- return err;
-}
-
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
- struct brcmf_ssid ssid;
- s32 passive_scan;
- s32 err = 0;
-
- /* Broadcast scan by default */
- memset(&ssid, 0, sizeof(ssid));
-
- iscan->state = WL_ISCAN_STATE_SCANING;
-
- passive_scan = cfg_priv->active_scan ? 0 : 1;
- err = brcmf_dev_ioctl(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- brcmf_set_mpc(ndev, 0);
- cfg_priv->iscan_kickstart = true;
- brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
-
- return err;
-}
-
-static s32
-__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
- struct cfg80211_ssid *ssids;
- struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int;
- s32 passive_scan;
- bool iscan_req;
- bool spec_scan;
- s32 err = 0;
-
- if (unlikely(test_bit(WL_STATUS_SCANNING, &cfg_priv->status))) {
- WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status);
- return -EAGAIN;
- }
- if (unlikely(test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status))) {
- WL_ERR("Scanning being aborted : status (%lu)\n",
- cfg_priv->status);
- return -EAGAIN;
- }
- if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
- WL_ERR("Connecting : status (%lu)\n",
- cfg_priv->status);
- return -EAGAIN;
- }
-
- iscan_req = false;
- spec_scan = false;
- if (request) {
- /* scan bss */
- ssids = request->ssids;
- if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len))
- iscan_req = true;
- } else {
- /* scan in ibss */
- /* we don't do iscan in ibss */
- ssids = this_ssid;
- }
-
- cfg_priv->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- if (iscan_req) {
- err = brcmf_do_iscan(cfg_priv);
- if (likely(!err))
- return err;
- else
- goto scan_out;
- } else {
- WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
- ssids->ssid, ssids->ssid_len);
- memset(&sr->ssid, 0, sizeof(sr->ssid));
- sr->ssid.SSID_len =
- min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
- if (sr->ssid.SSID_len) {
- memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
- sr->ssid.SSID_len = cpu_to_le32(sr->ssid.SSID_len);
- spec_scan = true;
- } else {
- WL_SCAN("Broadcast scan\n");
- }
-
- passive_scan = cfg_priv->active_scan ? 0 : 1;
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
- goto scan_out;
- }
- brcmf_set_mpc(ndev, 0);
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SCAN, &sr->ssid,
- sizeof(sr->ssid));
- if (err) {
- if (err == -EBUSY) {
- WL_INFO("system busy : scan for \"%s\" canceled\n",
- sr->ssid.SSID);
- } else {
- WL_ERR("WLC_SCAN error (%d)\n", err);
- }
- brcmf_set_mpc(ndev, 1);
- goto scan_out;
- }
- }
-
- return 0;
-
-scan_out:
- clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- cfg_priv->scan_request = NULL;
- return err;
-}
-
-static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-{
- s32 err = 0;
-
- WL_TRACE("Enter\n");
-
- CHECK_SYS_UP();
-
- err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL);
- if (unlikely(err))
- WL_ERR("scan error (%d)\n", err);
-
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32 brcmf_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
-{
- s8 buf[BRCMF_C_IOCTL_SMLEN];
- u32 len;
- s32 err = 0;
-
- val = cpu_to_le32(val);
- len = brcmu_mkiovar(name, (char *)(&val), sizeof(val), buf,
- sizeof(buf));
- BUG_ON(!len);
-
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_VAR, buf, len);
- if (unlikely(err))
- WL_ERR("error (%d)\n", err);
-
- return err;
-}
-
-static s32
-brcmf_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
-{
- union {
- s8 buf[BRCMF_C_IOCTL_SMLEN];
- s32 val;
- } var;
- u32 len;
- u32 data_null;
- s32 err = 0;
-
- len =
- brcmu_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
- sizeof(var.buf));
- BUG_ON(!len);
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_VAR, &var, len);
- if (unlikely(err))
- WL_ERR("error (%d)\n", err);
-
- *retval = le32_to_cpu(var.val);
-
- return err;
-}
-
-static s32 brcmf_set_rts(struct net_device *dev, u32 rts_threshold)
-{
- s32 err = 0;
-
- err = brcmf_dev_intvar_set(dev, "rtsthresh", rts_threshold);
- if (unlikely(err))
- WL_ERR("Error (%d)\n", err);
-
- return err;
-}
-
-static s32 brcmf_set_frag(struct net_device *dev, u32 frag_threshold)
-{
- s32 err = 0;
-
- err = brcmf_dev_intvar_set(dev, "fragthresh", frag_threshold);
- if (unlikely(err))
- WL_ERR("Error (%d)\n", err);
-
- return err;
-}
-
-static s32 brcmf_set_retry(struct net_device *dev, u32 retry, bool l)
-{
- s32 err = 0;
- u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL);
-
- retry = cpu_to_le32(retry);
- err = brcmf_dev_ioctl(dev, cmd, &retry, sizeof(retry));
- if (unlikely(err)) {
- WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
- return err;
- }
- return err;
-}
-
-static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) {
- cfg_priv->conf->rts_threshold = wiphy->rts_threshold;
- err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold);
- if (!err)
- goto done;
- }
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) {
- cfg_priv->conf->frag_threshold = wiphy->frag_threshold;
- err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold);
- if (!err)
- goto done;
- }
- if (changed & WIPHY_PARAM_RETRY_LONG
- && (cfg_priv->conf->retry_long != wiphy->retry_long)) {
- cfg_priv->conf->retry_long = wiphy->retry_long;
- err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true);
- if (!err)
- goto done;
- }
- if (changed & WIPHY_PARAM_RETRY_SHORT
- && (cfg_priv->conf->retry_short != wiphy->retry_short)) {
- cfg_priv->conf->retry_short = wiphy->retry_short;
- err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false);
- if (!err)
- goto done;
- }
-
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct brcmf_join_params join_params;
- size_t join_params_size = 0;
- s32 err = 0;
- s32 wsec = 0;
- s32 bcnprd;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- if (params->ssid)
- WL_CONN("SSID: %s\n", params->ssid);
- else {
- WL_CONN("SSID: NULL, Not supported\n");
- return -EOPNOTSUPP;
- }
-
- set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
-
- if (params->bssid)
- WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n",
- params->bssid[0], params->bssid[1], params->bssid[2],
- params->bssid[3], params->bssid[4], params->bssid[5]);
- else
- WL_CONN("No BSSID specified\n");
-
- if (params->channel)
- WL_CONN("channel: %d\n", params->channel->center_freq);
- else
- WL_CONN("no channel specified\n");
-
- if (params->channel_fixed)
- WL_CONN("fixed channel required\n");
- else
- WL_CONN("no fixed channel required\n");
-
- if (params->ie && params->ie_len)
- WL_CONN("ie len: %d\n", params->ie_len);
- else
- WL_CONN("no ie specified\n");
-
- if (params->beacon_interval)
- WL_CONN("beacon interval: %d\n", params->beacon_interval);
- else
- WL_CONN("no beacon interval specified\n");
-
- if (params->basic_rates)
- WL_CONN("basic rates: %08X\n", params->basic_rates);
- else
- WL_CONN("no basic rates specified\n");
-
- if (params->privacy)
- WL_CONN("privacy required\n");
- else
- WL_CONN("no privacy required\n");
-
- /* Configure Privacy for starter */
- if (params->privacy)
- wsec |= WEP_ENABLED;
-
- err = brcmf_dev_intvar_set(dev, "wsec", wsec);
- if (unlikely(err)) {
- WL_ERR("wsec failed (%d)\n", err);
- goto done;
- }
-
- /* Configure Beacon Interval for starter */
- if (params->beacon_interval)
- bcnprd = cpu_to_le32(params->beacon_interval);
- else
- bcnprd = cpu_to_le32(100);
-
- err = brcmf_dev_ioctl(dev, BRCM_SET_BCNPRD, &bcnprd, sizeof(bcnprd));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err);
- goto done;
- }
-
- /* Configure required join parameter */
- memset(&join_params, 0, sizeof(struct brcmf_join_params));
-
- /* SSID */
- join_params.ssid.SSID_len =
- (params->ssid_len > 32) ? 32 : params->ssid_len;
- memcpy(join_params.ssid.SSID, params->ssid, join_params.ssid.SSID_len);
- join_params.ssid.SSID_len = cpu_to_le32(join_params.ssid.SSID_len);
- join_params_size = sizeof(join_params.ssid);
- brcmf_update_prof(cfg_priv, NULL, &join_params.ssid, WL_PROF_SSID);
-
- /* BSSID */
- if (params->bssid) {
- memcpy(join_params.params.bssid, params->bssid, ETH_ALEN);
- join_params_size = sizeof(join_params.ssid) +
- BRCMF_ASSOC_PARAMS_FIXED_SIZE;
- } else {
- memcpy(join_params.params.bssid, ether_bcast, ETH_ALEN);
- }
- brcmf_update_prof(cfg_priv, NULL,
- &join_params.params.bssid, WL_PROF_BSSID);
-
- /* Channel */
- if (params->channel) {
- u32 target_channel;
-
- cfg_priv->channel =
- ieee80211_frequency_to_channel(
- params->channel->center_freq);
- if (params->channel_fixed) {
- /* adding chanspec */
- brcmf_ch_to_chanspec(cfg_priv->channel,
- &join_params, &join_params_size);
- }
-
- /* set channel for starter */
- target_channel = cpu_to_le32(cfg_priv->channel);
- err = brcmf_dev_ioctl(dev, BRCM_SET_CHANNEL,
- &target_channel, sizeof(target_channel));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err);
- goto done;
- }
- } else
- cfg_priv->channel = 0;
-
- cfg_priv->ibss_starter = false;
-
-
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_SSID,
- &join_params, join_params_size);
- if (unlikely(err)) {
- WL_ERR("WLC_SET_SSID failed (%d)\n", err);
- goto done;
- }
-
-done:
- if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- brcmf_link_down(cfg_priv);
-
- WL_TRACE("Exit\n");
-
- return err;
-}
-
-static s32
-brcmf_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- struct brcmf_cfg80211_security *sec;
- s32 val = 0;
- s32 err = 0;
-
- if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
- val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
- else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
- val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
- else
- val = WPA_AUTH_DISABLED;
- WL_CONN("setting wpa_auth to 0x%0x\n", val);
- err = brcmf_dev_intvar_set(dev, "wpa_auth", val);
- if (unlikely(err)) {
- WL_ERR("set wpa_auth failed (%d)\n", err);
- return err;
- }
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
- sec->wpa_versions = sme->crypto.wpa_versions;
- return err;
-}
-
-static s32
-brcmf_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- struct brcmf_cfg80211_security *sec;
- s32 val = 0;
- s32 err = 0;
-
- switch (sme->auth_type) {
- case NL80211_AUTHTYPE_OPEN_SYSTEM:
- val = 0;
- WL_CONN("open system\n");
- break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- val = 1;
- WL_CONN("shared key\n");
- break;
- case NL80211_AUTHTYPE_AUTOMATIC:
- val = 2;
- WL_CONN("automatic\n");
- break;
- case NL80211_AUTHTYPE_NETWORK_EAP:
- WL_CONN("network eap\n");
- default:
- val = 2;
- WL_ERR("invalid auth type (%d)\n", sme->auth_type);
- break;
- }
-
- err = brcmf_dev_intvar_set(dev, "auth", val);
- if (unlikely(err)) {
- WL_ERR("set auth failed (%d)\n", err);
- return err;
- }
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
- sec->auth_type = sme->auth_type;
- return err;
-}
-
-static s32
-brcmf_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- struct brcmf_cfg80211_security *sec;
- s32 pval = 0;
- s32 gval = 0;
- s32 err = 0;
-
- if (sme->crypto.n_ciphers_pairwise) {
- switch (sme->crypto.ciphers_pairwise[0]) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- pval = WEP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- pval = TKIP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- pval = AES_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- pval = AES_ENABLED;
- break;
- default:
- WL_ERR("invalid cipher pairwise (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
- return -EINVAL;
- }
- }
- if (sme->crypto.cipher_group) {
- switch (sme->crypto.cipher_group) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- gval = WEP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- gval = TKIP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- gval = AES_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- gval = AES_ENABLED;
- break;
- default:
- WL_ERR("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
- return -EINVAL;
- }
- }
-
- WL_CONN("pval (%d) gval (%d)\n", pval, gval);
- err = brcmf_dev_intvar_set(dev, "wsec", pval | gval);
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
-
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
- sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
- sec->cipher_group = sme->crypto.cipher_group;
-
- return err;
-}
-
-static s32
-brcmf_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- struct brcmf_cfg80211_security *sec;
- s32 val = 0;
- s32 err = 0;
-
- if (sme->crypto.n_akm_suites) {
- err = brcmf_dev_intvar_get(dev, "wpa_auth", &val);
- if (unlikely(err)) {
- WL_ERR("could not get wpa_auth (%d)\n", err);
- return err;
- }
- if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
- switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_8021X:
- val = WPA_AUTH_UNSPECIFIED;
- break;
- case WLAN_AKM_SUITE_PSK:
- val = WPA_AUTH_PSK;
- break;
- default:
- WL_ERR("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
- return -EINVAL;
- }
- } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
- switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_8021X:
- val = WPA2_AUTH_UNSPECIFIED;
- break;
- case WLAN_AKM_SUITE_PSK:
- val = WPA2_AUTH_PSK;
- break;
- default:
- WL_ERR("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
- return -EINVAL;
- }
- }
-
- WL_CONN("setting wpa_auth to %d\n", val);
- err = brcmf_dev_intvar_set(dev, "wpa_auth", val);
- if (unlikely(err)) {
- WL_ERR("could not set wpa_auth (%d)\n", err);
- return err;
- }
- }
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
- sec->wpa_auth = sme->crypto.akm_suites[0];
-
- return err;
-}
-
-static s32
-brcmf_set_set_sharedkey(struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- struct brcmf_cfg80211_security *sec;
- struct brcmf_wsec_key key;
- s32 val;
- s32 err = 0;
-
- WL_CONN("key len (%d)\n", sme->key_len);
- if (sme->key_len) {
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
- WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
- sec->wpa_versions, sec->cipher_pairwise);
- if (!
- (sec->wpa_versions & (NL80211_WPA_VERSION_1 |
- NL80211_WPA_VERSION_2))
-&& (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
- WLAN_CIPHER_SUITE_WEP104))) {
- memset(&key, 0, sizeof(key));
- key.len = (u32) sme->key_len;
- key.index = (u32) sme->key_idx;
- if (unlikely(key.len > sizeof(key.data))) {
- WL_ERR("Too long key length (%u)\n", key.len);
- return -EINVAL;
- }
- memcpy(key.data, sme->key, key.len);
- key.flags = BRCMF_PRIMARY_KEY;
- switch (sec->cipher_pairwise) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- break;
- default:
- WL_ERR("Invalid algorithm (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
- return -EINVAL;
- }
- /* Set the new key/index */
- WL_CONN("key length (%d) key index (%d) algo (%d)\n",
- key.len, key.index, key.algo);
- WL_CONN("key \"%s\"\n", key.data);
- swap_key_from_BE(&key);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key,
- sizeof(key));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_KEY error (%d)\n", err);
- return err;
- }
- if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
- WL_CONN("set auth_type to shared key\n");
- val = 1; /* shared key */
- err = brcmf_dev_intvar_set(dev, "auth", val);
- if (unlikely(err)) {
- WL_ERR("set auth failed (%d)\n", err);
- return err;
- }
- }
- }
- }
- return err;
-}
-
-static s32
-brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct ieee80211_channel *chan = sme->channel;
- struct brcmf_join_params join_params;
- size_t join_params_size;
-
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- if (unlikely(!sme->ssid)) {
- WL_ERR("Invalid ssid\n");
- return -EOPNOTSUPP;
- }
-
- set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
-
- if (chan) {
- cfg_priv->channel =
- ieee80211_frequency_to_channel(chan->center_freq);
- WL_CONN("channel (%d), center_req (%d)\n",
- cfg_priv->channel, chan->center_freq);
- } else
- cfg_priv->channel = 0;
-
- WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
-
- err = brcmf_set_wpa_version(dev, sme);
- if (err) {
- WL_ERR("wl_set_wpa_version failed (%d)\n", err);
- goto done;
- }
-
- err = brcmf_set_auth_type(dev, sme);
- if (err) {
- WL_ERR("wl_set_auth_type failed (%d)\n", err);
- goto done;
- }
-
- err = brcmf_set_set_cipher(dev, sme);
- if (err) {
- WL_ERR("wl_set_set_cipher failed (%d)\n", err);
- goto done;
- }
-
- err = brcmf_set_key_mgmt(dev, sme);
- if (err) {
- WL_ERR("wl_set_key_mgmt failed (%d)\n", err);
- goto done;
- }
-
- err = brcmf_set_set_sharedkey(dev, sme);
- if (err) {
- WL_ERR("wl_set_set_sharedkey failed (%d)\n", err);
- goto done;
- }
-
- brcmf_update_prof(cfg_priv, NULL, sme->bssid, WL_PROF_BSSID);
- /*
- ** Join with specific BSSID and cached SSID
- ** If SSID is zero join based on BSSID only
- */
- memset(&join_params, 0, sizeof(join_params));
- join_params_size = sizeof(join_params.ssid);
-
- join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
- memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
- join_params.ssid.SSID_len = cpu_to_le32(join_params.ssid.SSID_len);
- brcmf_update_prof(cfg_priv, NULL, &join_params.ssid, WL_PROF_SSID);
-
- if (sme->bssid)
- memcpy(join_params.params.bssid, sme->bssid, ETH_ALEN);
- else
- memcpy(join_params.params.bssid, ether_bcast, ETH_ALEN);
-
- if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- WL_CONN("ssid \"%s\", len (%d)\n",
- join_params.ssid.SSID, join_params.ssid.SSID_len);
- }
-
- brcmf_ch_to_chanspec(cfg_priv->channel,
- &join_params, &join_params_size);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_SSID,
- &join_params, join_params_size);
- if (err)
- WL_ERR("WLC_SET_SSID failed (%d)\n", err);
-
-done:
- if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct brcmf_scb_val scbval;
- s32 err = 0;
-
- WL_TRACE("Enter. Reason code = %d\n", reason_code);
- CHECK_SYS_UP();
-
- clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
-
- scbval.val = reason_code;
- memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN);
- scbval.val = cpu_to_le32(scbval.val);
- err = brcmf_dev_ioctl(dev, BRCMF_C_DISASSOC, &scbval,
- sizeof(struct brcmf_scb_val));
- if (unlikely(err))
- WL_ERR("error (%d)\n", err);
-
- cfg_priv->link_up = false;
-
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, s32 dbm)
-{
-
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
- u16 txpwrmw;
- s32 err = 0;
- s32 disable = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- switch (type) {
- case NL80211_TX_POWER_AUTOMATIC:
- break;
- case NL80211_TX_POWER_LIMITED:
- if (dbm < 0) {
- WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
- err = -EINVAL;
- goto done;
- }
- break;
- case NL80211_TX_POWER_FIXED:
- if (dbm < 0) {
- WL_ERR("TX_POWER_FIXED - dbm is negative\n");
- err = -EINVAL;
- goto done;
- }
- break;
- }
- /* Make sure radio is off or on as far as software is concerned */
- disable = WL_RADIO_SW_DISABLE << 16;
- disable = cpu_to_le32(disable);
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_RADIO, &disable, sizeof(disable));
- if (unlikely(err))
- WL_ERR("WLC_SET_RADIO error (%d)\n", err);
-
- if (dbm > 0xffff)
- txpwrmw = 0xffff;
- else
- txpwrmw = (u16) dbm;
- err = brcmf_dev_intvar_set(ndev, "qtxpower",
- (s32) (brcmu_mw_to_qdbm(txpwrmw)));
- if (unlikely(err))
- WL_ERR("qtxpower error (%d)\n", err);
- cfg_priv->conf->tx_power = dbm;
-
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
- s32 txpwrdbm;
- u8 result;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- goto done;
- }
-
- result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
- *dbm = (s32) brcmu_qdbm_to_mw(result);
-
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool unicast, bool multicast)
-{
- u32 index;
- s32 wsec;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- WL_CONN("key index (%d)\n", key_idx);
- CHECK_SYS_UP();
-
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_WSEC, &wsec, sizeof(wsec));
- if (unlikely(err)) {
- WL_ERR("WLC_GET_WSEC error (%d)\n", err);
- goto done;
- }
-
- wsec = le32_to_cpu(wsec);
- if (wsec & WEP_ENABLED) {
- /* Just select a new current key */
- index = (u32) key_idx;
- index = cpu_to_le32(index);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY_PRIMARY, &index,
- sizeof(index));
- if (unlikely(err))
- WL_ERR("error (%d)\n", err);
- }
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_add_keyext(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, const u8 *mac_addr, struct key_params *params)
-{
- struct brcmf_wsec_key key;
- s32 err = 0;
-
- memset(&key, 0, sizeof(key));
- key.index = (u32) key_idx;
- /* Instead of bcast for ea address for default wep keys,
- driver needs it to be Null */
- if (!is_multicast_ether_addr(mac_addr))
- memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
- key.len = (u32) params->key_len;
- /* check for key index change */
- if (key.len == 0) {
- /* key delete */
- swap_key_from_BE(&key);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
- if (unlikely(err)) {
- WL_ERR("key delete error (%d)\n", err);
- return err;
- }
- } else {
- if (key.len > sizeof(key.data)) {
- WL_ERR("Invalid key length (%d)\n", key.len);
- return -EINVAL;
- }
-
- WL_CONN("Setting the key index %d\n", key.index);
- memcpy(key.data, params->key, key.len);
-
- if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- u8 keybuf[8];
- memcpy(keybuf, &key.data[24], sizeof(keybuf));
- memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
- memcpy(&key.data[16], keybuf, sizeof(keybuf));
- }
-
- /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
- if (params->seq && params->seq_len == 6) {
- /* rx iv */
- u8 *ivptr;
- ivptr = (u8 *) params->seq;
- key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
- (ivptr[3] << 8) | ivptr[2];
- key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
- key.iv_initialized = true;
- }
-
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
- break;
- default:
- WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
- return -EINVAL;
- }
- swap_key_from_BE(&key);
-
- brcmf_netdev_wait_pend8021x(dev);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_KEY error (%d)\n", err);
- return err;
- }
- }
- return err;
-}
-
-static s32
-brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
-{
- struct brcmf_wsec_key key;
- s32 val;
- s32 wsec;
- s32 err = 0;
- u8 keybuf[8];
-
- WL_TRACE("Enter\n");
- WL_CONN("key index (%d)\n", key_idx);
- CHECK_SYS_UP();
-
- if (mac_addr) {
- WL_TRACE("Exit");
- return brcmf_add_keyext(wiphy, dev, key_idx, mac_addr, params);
- }
- memset(&key, 0, sizeof(key));
-
- key.len = (u32) params->key_len;
- key.index = (u32) key_idx;
-
- if (unlikely(key.len > sizeof(key.data))) {
- WL_ERR("Too long key length (%u)\n", key.len);
- err = -EINVAL;
- goto done;
- }
- memcpy(key.data, params->key, key.len);
-
- key.flags = BRCMF_PRIMARY_KEY;
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- memcpy(keybuf, &key.data[24], sizeof(keybuf));
- memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
- memcpy(&key.data[16], keybuf, sizeof(keybuf));
- key.algo = CRYPTO_ALGO_TKIP;
- WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
- break;
- default:
- WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
- err = -EINVAL;
- goto done;
- }
-
- /* Set the new key/index */
- swap_key_from_BE(&key);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_KEY error (%d)\n", err);
- goto done;
- }
-
- val = WEP_ENABLED;
- err = brcmf_dev_intvar_get(dev, "wsec", &wsec);
- if (unlikely(err)) {
- WL_ERR("get wsec error (%d)\n", err);
- goto done;
- }
- wsec &= ~(WEP_ENABLED);
- wsec |= val;
- err = brcmf_dev_intvar_set(dev, "wsec", wsec);
- if (unlikely(err)) {
- WL_ERR("set wsec error (%d)\n", err);
- goto done;
- }
-
- val = 1; /* assume shared key. otherwise 0 */
- val = cpu_to_le32(val);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_AUTH, &val, sizeof(val));
- if (unlikely(err))
- WL_ERR("WLC_SET_AUTH error (%d)\n", err);
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool pairwise, const u8 *mac_addr)
-{
- struct brcmf_wsec_key key;
- s32 err = 0;
- s32 val;
- s32 wsec;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
- memset(&key, 0, sizeof(key));
-
- key.index = (u32) key_idx;
- key.flags = BRCMF_PRIMARY_KEY;
- key.algo = CRYPTO_ALGO_OFF;
-
- WL_CONN("key index (%d)\n", key_idx);
- /* Set the new key/index */
- swap_key_from_BE(&key);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
- if (unlikely(err)) {
- if (err == -EINVAL) {
- if (key.index >= DOT11_MAX_DEFAULT_KEYS)
- /* we ignore this key index in this case */
- WL_ERR("invalid key index (%d)\n", key_idx);
- } else
- WL_ERR("WLC_SET_KEY error (%d)\n", err);
-
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- goto done;
- }
-
- val = 0;
- err = brcmf_dev_intvar_get(dev, "wsec", &wsec);
- if (unlikely(err)) {
- WL_ERR("get wsec error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- goto done;
- }
- wsec &= ~(WEP_ENABLED);
- wsec |= val;
- err = brcmf_dev_intvar_set(dev, "wsec", wsec);
- if (unlikely(err)) {
- WL_ERR("set wsec error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- goto done;
- }
-
- val = 0; /* assume open key. otherwise 1 */
- val = cpu_to_le32(val);
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_AUTH, &val, sizeof(val));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_AUTH error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- }
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
- void (*callback) (void *cookie, struct key_params * params))
-{
- struct key_params params;
- struct brcmf_wsec_key key;
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_security *sec;
- s32 wsec;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- WL_CONN("key index (%d)\n", key_idx);
- CHECK_SYS_UP();
-
- memset(&key, 0, sizeof(key));
- key.index = key_idx;
- swap_key_to_BE(&key);
- memset(&params, 0, sizeof(params));
- params.key_len = (u8) min_t(u8, WLAN_MAX_KEY_LEN, key.len);
- memcpy(params.key, key.data, params.key_len);
-
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_WSEC, &wsec, sizeof(wsec));
- if (unlikely(err)) {
- WL_ERR("WLC_GET_WSEC error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- goto done;
- }
- wsec = le32_to_cpu(wsec);
- switch (wsec) {
- case WEP_ENABLED:
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
- if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
- params.cipher = WLAN_CIPHER_SUITE_WEP40;
- WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
- } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
- params.cipher = WLAN_CIPHER_SUITE_WEP104;
- WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
- }
- break;
- case TKIP_ENABLED:
- params.cipher = WLAN_CIPHER_SUITE_TKIP;
- WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
- break;
- case AES_ENABLED:
- params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
- WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
- break;
- default:
- WL_ERR("Invalid algo (0x%x)\n", wsec);
- err = -EINVAL;
- goto done;
- }
- callback(cookie, &params);
-
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *dev, u8 key_idx)
-{
- WL_INFO("Not supported\n");
-
- CHECK_SYS_UP();
- return -EOPNOTSUPP;
-}
-
-static s32
-brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct brcmf_scb_val scb_val;
- int rssi;
- s32 rate;
- s32 err = 0;
- u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID);
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- if (unlikely
- (memcmp(mac, bssid, ETH_ALEN))) {
- WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X"
- "wl_bssid-%X:%X:%X:%X:%X:%X\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
- bssid[0], bssid[1], bssid[2], bssid[3],
- bssid[4], bssid[5]);
- err = -ENOENT;
- goto done;
- }
-
- /* Report the current tx rate */
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_RATE, &rate, sizeof(rate));
- if (err) {
- WL_ERR("Could not get rate (%d)\n", err);
- } else {
- rate = le32_to_cpu(rate);
- sinfo->filled |= STATION_INFO_TX_BITRATE;
- sinfo->txrate.legacy = rate * 5;
- WL_CONN("Rate %d Mbps\n", rate / 2);
- }
-
- if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
- scb_val.val = 0;
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_RSSI, &scb_val,
- sizeof(struct brcmf_scb_val));
- if (unlikely(err)) {
- WL_ERR("Could not get rssi (%d)\n", err);
- }
- rssi = le32_to_cpu(scb_val.val);
- sinfo->filled |= STATION_INFO_SIGNAL;
- sinfo->signal = rssi;
- WL_CONN("RSSI %d dBm\n", rssi);
- }
-
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
- bool enabled, s32 timeout)
-{
- s32 pm;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- pm = enabled ? PM_FAST : PM_OFF;
- pm = cpu_to_le32(pm);
- WL_INFO("power save %s\n", (pm ? "enabled" : "disabled"));
-
- err = brcmf_dev_ioctl(dev, BRCMF_C_SET_PM, &pm, sizeof(pm));
- if (unlikely(err)) {
- if (err == -ENODEV)
- WL_ERR("net_device is not ready yet\n");
- else
- WL_ERR("error (%d)\n", err);
- }
- WL_TRACE("Exit\n");
- return err;
-}
-
-static __used u32 brcmf_find_msb(u16 bit16)
-{
- u32 ret = 0;
-
- if (bit16 & 0xff00) {
- ret += 8;
- bit16 >>= 8;
- }
-
- if (bit16 & 0xf0) {
- ret += 4;
- bit16 >>= 4;
- }
-
- if (bit16 & 0xc) {
- ret += 2;
- bit16 >>= 2;
- }
-
- if (bit16 & 2)
- ret += bit16 & 2;
- else if (bit16)
- ret += bit16;
-
- return ret;
-}
-
-static s32
-brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
- const u8 *addr,
- const struct cfg80211_bitrate_mask *mask)
-{
- struct wl_rateset rateset;
- s32 rate;
- s32 val;
- s32 err_bg;
- s32 err_a;
- u32 legacy;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- /* addr param is always NULL. ignore it */
- /* Get current rateset */
- err = brcmf_dev_ioctl(dev, BRCM_GET_CURR_RATESET, &rateset,
- sizeof(rateset));
- if (unlikely(err)) {
- WL_ERR("could not get current rateset (%d)\n", err);
- goto done;
- }
-
- rateset.count = le32_to_cpu(rateset.count);
-
- legacy = brcmf_find_msb(mask->control[IEEE80211_BAND_2GHZ].legacy);
- if (!legacy)
- legacy = brcmf_find_msb(mask->control[IEEE80211_BAND_5GHZ].legacy);
-
- val = wl_g_rates[legacy - 1].bitrate * 100000;
-
- if (val < rateset.count)
- /* Select rate by rateset index */
- rate = rateset.rates[val] & 0x7f;
- else
- /* Specified rate in bps */
- rate = val / 500000;
-
- WL_CONN("rate %d mbps\n", rate / 2);
-
- /*
- *
- * Set rate override,
- * Since the is a/b/g-blind, both a/bg_rate are enforced.
- */
- err_bg = brcmf_dev_intvar_set(dev, "bg_rate", rate);
- err_a = brcmf_dev_intvar_set(dev, "a_rate", rate);
- if (unlikely(err_bg && err_a)) {
- WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
- err = err_bg | err_a;
- }
-
-done:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-
- /*
- * Check for WL_STATUS_READY before any function call which
- * could result is bus access. Don't block the resume for
- * any driver error conditions
- */
- WL_TRACE("Enter\n");
-
-#if defined(CONFIG_PM_SLEEP)
- atomic_set(&brcmf_mmc_suspend, false);
-#endif /* defined(CONFIG_PM_SLEEP) */
-
- if (test_bit(WL_STATUS_READY, &cfg_priv->status))
- brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
-
- WL_TRACE("Exit\n");
- return 0;
-}
-
-static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
- struct cfg80211_wowlan *wow)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
-
- WL_TRACE("Enter\n");
-
- /*
- * Check for WL_STATUS_READY before any function call which
- * could result is bus access. Don't block the suspend for
- * any driver error conditions
- */
-
- /*
- * While going to suspend if associated with AP disassociate
- * from AP to save power while system is in suspended state
- */
- if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
- test_bit(WL_STATUS_READY, &cfg_priv->status)) {
- WL_INFO("Disassociating from AP"
- " while entering suspend state\n");
- brcmf_link_down(cfg_priv);
-
- /*
- * Make sure WPA_Supplicant receives all the event
- * generated due to DISASSOC call to the fw to keep
- * the state fw and WPA_Supplicant state consistent
- */
- rtnl_unlock();
- brcmf_delay(500);
- rtnl_lock();
- }
-
- set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
- if (test_bit(WL_STATUS_READY, &cfg_priv->status))
- brcmf_term_iscan(cfg_priv);
-
- if (cfg_priv->scan_request) {
- /* Indidate scan abort to cfg80211 layer */
- WL_INFO("Terminating scan in progress\n");
- cfg80211_scan_done(cfg_priv->scan_request, true);
- cfg_priv->scan_request = NULL;
- }
- clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
-
- /* Turn off watchdog timer */
- if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
- WL_INFO("Enable MPC\n");
- brcmf_set_mpc(ndev, 1);
- }
-
-#if defined(CONFIG_PM_SLEEP)
- atomic_set(&brcmf_mmc_suspend, true);
-#endif /* defined(CONFIG_PM_SLEEP) */
-
- WL_TRACE("Exit\n");
-
- return 0;
-}
-
-static __used s32
-brcmf_update_pmklist(struct net_device *dev,
- struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
-{
- int i, j;
-
- WL_CONN("No of elements %d\n", pmk_list->pmkids.npmkid);
- for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
- WL_CONN("PMKID[%d]: %pM =\n", i,
- &pmk_list->pmkids.pmkid[i].BSSID);
- for (j = 0; j < WLAN_PMKID_LEN; j++)
- WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
- }
-
- if (likely(!err))
- brcmf_dev_bufvar_set(dev, "pmkid_info", (char *)pmk_list,
- sizeof(*pmk_list));
-
- return err;
-}
-
-static s32
-brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct _pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids;
- s32 err = 0;
- int i;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- for (i = 0; i < pmkids->npmkid; i++)
- if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
- break;
- if (i < WL_NUM_PMKIDS_MAX) {
- memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
- memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
- if (i == pmkids->npmkid)
- pmkids->npmkid++;
- } else
- err = -EINVAL;
-
- WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- pmkids->pmkid[pmkids->npmkid].BSSID);
- for (i = 0; i < WLAN_PMKID_LEN; i++)
- WL_CONN("%02x\n", pmkids->pmkid[pmkids->npmkid].PMKID[i]);
-
- err = brcmf_update_pmklist(dev, cfg_priv->pmk_list, err);
-
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct _pmkid_list pmkid;
- s32 err = 0;
- int i;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
- memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
- memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
-
- WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
- &pmkid.pmkid[0].BSSID);
- for (i = 0; i < WLAN_PMKID_LEN; i++)
- WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
-
- for (i = 0; i < cfg_priv->pmk_list->pmkids.npmkid; i++)
- if (!memcmp
- (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
- ETH_ALEN))
- break;
-
- if ((cfg_priv->pmk_list->pmkids.npmkid > 0)
- && (i < cfg_priv->pmk_list->pmkids.npmkid)) {
- memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0,
- sizeof(pmkid_t));
- for (; i < (cfg_priv->pmk_list->pmkids.npmkid - 1); i++) {
- memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
- &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID,
- ETH_ALEN);
- memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID,
- &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID,
- WLAN_PMKID_LEN);
- }
- cfg_priv->pmk_list->pmkids.npmkid--;
- } else
- err = -EINVAL;
-
- err = brcmf_update_pmklist(dev, cfg_priv->pmk_list, err);
-
- WL_TRACE("Exit\n");
- return err;
-
-}
-
-static s32
-brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
-{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- CHECK_SYS_UP();
-
- memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list));
- err = brcmf_update_pmklist(dev, cfg_priv->pmk_list, err);
-
- WL_TRACE("Exit\n");
- return err;
-
-}
-
-static struct cfg80211_ops wl_cfg80211_ops = {
- .change_virtual_intf = brcmf_cfg80211_change_iface,
- .scan = brcmf_cfg80211_scan,
- .set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
- .join_ibss = brcmf_cfg80211_join_ibss,
- .leave_ibss = brcmf_cfg80211_leave_ibss,
- .get_station = brcmf_cfg80211_get_station,
- .set_tx_power = brcmf_cfg80211_set_tx_power,
- .get_tx_power = brcmf_cfg80211_get_tx_power,
- .add_key = brcmf_cfg80211_add_key,
- .del_key = brcmf_cfg80211_del_key,
- .get_key = brcmf_cfg80211_get_key,
- .set_default_key = brcmf_cfg80211_config_default_key,
- .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
- .set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
- .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask,
- .connect = brcmf_cfg80211_connect,
- .disconnect = brcmf_cfg80211_disconnect,
- .suspend = brcmf_cfg80211_suspend,
- .resume = brcmf_cfg80211_resume,
- .set_pmksa = brcmf_cfg80211_set_pmksa,
- .del_pmksa = brcmf_cfg80211_del_pmksa,
- .flush_pmksa = brcmf_cfg80211_flush_pmksa
-};
-
-static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
-{
- s32 err = 0;
-
- switch (mode) {
- case WL_MODE_BSS:
- return NL80211_IFTYPE_STATION;
- case WL_MODE_IBSS:
- return NL80211_IFTYPE_ADHOC;
- default:
- return NL80211_IFTYPE_UNSPECIFIED;
- }
-
- return err;
-}
-
-static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
- struct device *dev)
-{
- struct wireless_dev *wdev;
- s32 err = 0;
-
- wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
- if (unlikely(!wdev)) {
- WL_ERR("Could not allocate wireless device\n");
- return ERR_PTR(-ENOMEM);
- }
- wdev->wiphy =
- wiphy_new(&wl_cfg80211_ops,
- sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
- if (unlikely(!wdev->wiphy)) {
- WL_ERR("Couldn not allocate wiphy device\n");
- err = -ENOMEM;
- goto wiphy_new_out;
- }
- set_wiphy_dev(wdev->wiphy, dev);
- wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
- wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
- wdev->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
- * it as 11a by default.
- * This will be updated with
- * 11n phy tables in
- * "ifconfig up"
- * if phy has 11n capability
- */
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wdev->wiphy->cipher_suites = __wl_cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
-#ifndef WL_POWERSAVE_DISABLED
- wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
- * save mode
- * by default
- */
-#else
- wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-#endif /* !WL_POWERSAVE_DISABLED */
- err = wiphy_register(wdev->wiphy);
- if (unlikely(err < 0)) {
- WL_ERR("Couldn not register wiphy device (%d)\n", err);
- goto wiphy_register_out;
- }
- return wdev;
-
-wiphy_register_out:
- wiphy_free(wdev->wiphy);
-
-wiphy_new_out:
- kfree(wdev);
-
- return ERR_PTR(err);
-}
-
-static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct wireless_dev *wdev = cfg_to_wdev(cfg_priv);
-
- if (unlikely(!wdev)) {
- WL_ERR("wdev is invalid\n");
- return;
- }
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
- cfg_to_wdev(cfg_priv) = NULL;
-}
-
-static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_scan_results *bss_list;
- struct brcmf_bss_info *bi = NULL; /* must be initialized */
- s32 err = 0;
- int i;
-
- bss_list = cfg_priv->bss_list;
- if (unlikely(bss_list->version != BRCMF_BSS_INFO_VERSION)) {
- WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
- bss_list->version);
- return -EOPNOTSUPP;
- }
- WL_SCAN("scanned AP count (%d)\n", bss_list->count);
- bi = next_bss(bss_list, bi);
- for_each_bss(bss_list, bi, i) {
- err = brcmf_inform_single_bss(cfg_priv, bi);
- if (unlikely(err))
- break;
- }
- return err;
-}
-
-
-static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
- struct brcmf_bss_info *bi)
-{
- struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
- struct ieee80211_channel *notify_channel;
- struct cfg80211_bss *bss;
- struct ieee80211_supported_band *band;
- s32 err = 0;
- u16 channel;
- u32 freq;
- u64 notify_timestamp;
- u16 notify_capability;
- u16 notify_interval;
- u8 *notify_ie;
- size_t notify_ielen;
- s32 notify_signal;
-
- if (unlikely(le32_to_cpu(bi->length) > WL_BSS_INFO_MAX)) {
- WL_ERR("Bss info is larger than buffer. Discarding\n");
- return 0;
- }
-
- channel = bi->ctl_ch ? bi->ctl_ch :
- CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
-
- if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
-
- freq = ieee80211_channel_to_frequency(channel, band->band);
- notify_channel = ieee80211_get_channel(wiphy, freq);
-
- notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
- notify_capability = le16_to_cpu(bi->capability);
- notify_interval = le16_to_cpu(bi->beacon_period);
- notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
- notify_ielen = le16_to_cpu(bi->ie_length);
- notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
-
- WL_CONN("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- bi->BSSID[0], bi->BSSID[1], bi->BSSID[2],
- bi->BSSID[3], bi->BSSID[4], bi->BSSID[5]);
- WL_CONN("Channel: %d(%d)\n", channel, freq);
- WL_CONN("Capability: %X\n", notify_capability);
- WL_CONN("Beacon interval: %d\n", notify_interval);
- WL_CONN("Signal: %d\n", notify_signal);
- WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
-
- bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID,
- notify_timestamp, notify_capability, notify_interval, notify_ie,
- notify_ielen, notify_signal, GFP_KERNEL);
-
- if (unlikely(!bss)) {
- WL_ERR("cfg80211_inform_bss_frame error\n");
- return -EINVAL;
- }
-
- return err;
-}
-
-static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *dev, const u8 *bssid)
-{
- struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
- struct ieee80211_channel *notify_channel;
- struct brcmf_bss_info *bi = NULL;
- struct ieee80211_supported_band *band;
- u8 *buf = NULL;
- s32 err = 0;
- u16 channel;
- u32 freq;
- u64 notify_timestamp;
- u16 notify_capability;
- u16 notify_interval;
- u8 *notify_ie;
- size_t notify_ielen;
- s32 notify_signal;
-
- WL_TRACE("Enter\n");
-
- buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
- if (buf == NULL) {
- WL_ERR("kzalloc() failed\n");
- err = -ENOMEM;
- goto CleanUp;
- }
-
- *(u32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
-
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
- if (unlikely(err)) {
- WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err);
- goto CleanUp;
- }
-
- bi = (struct brcmf_bss_info *)(buf + 4);
-
- channel = bi->ctl_ch ? bi->ctl_ch :
- CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
-
- if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
-
- freq = ieee80211_channel_to_frequency(channel, band->band);
- notify_channel = ieee80211_get_channel(wiphy, freq);
-
- notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
- notify_capability = le16_to_cpu(bi->capability);
- notify_interval = le16_to_cpu(bi->beacon_period);
- notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
- notify_ielen = le16_to_cpu(bi->ie_length);
- notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
-
- WL_CONN("channel: %d(%d)\n", channel, freq);
- WL_CONN("capability: %X\n", notify_capability);
- WL_CONN("beacon interval: %d\n", notify_interval);
- WL_CONN("signal: %d\n", notify_signal);
- WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
-
- cfg80211_inform_bss(wiphy, notify_channel, bssid,
- notify_timestamp, notify_capability, notify_interval,
- notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
-
-CleanUp:
-
- kfree(buf);
-
- WL_TRACE("Exit\n");
-
- return err;
-}
-
-static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e)
-{
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
-
- if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
- WL_CONN("Processing set ssid\n");
- cfg_priv->link_up = true;
- return true;
- }
-
- return false;
-}
-
-static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e)
-{
- u32 event = be32_to_cpu(e->event_type);
- u16 flags = be16_to_cpu(e->flags);
-
- if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
- WL_CONN("Processing link down\n");
- return true;
- }
- return false;
-}
-
-static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e)
-{
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
-
- if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
- WL_CONN("Processing Link %s & no network found\n",
- be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ?
- "up" : "down");
- return true;
- }
-
- if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) {
- WL_CONN("Processing connecting & no network found\n");
- return true;
- }
-
- return false;
-}
-
-static s32
-brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- s32 err = 0;
-
- if (brcmf_is_linkup(cfg_priv, e)) {
- WL_CONN("Linkup\n");
- if (brcmf_is_ibssmode(cfg_priv)) {
- brcmf_update_prof(cfg_priv, NULL, (void *)e->addr,
- WL_PROF_BSSID);
- wl_inform_ibss(cfg_priv, ndev, e->addr);
- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
- set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
- } else
- brcmf_bss_connect_done(cfg_priv, ndev, e, data, true);
- } else if (brcmf_is_linkdown(cfg_priv, e)) {
- WL_CONN("Linkdown\n");
- if (brcmf_is_ibssmode(cfg_priv)) {
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
- if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg_priv->status))
- brcmf_link_down(cfg_priv);
- } else {
- brcmf_bss_connect_done(cfg_priv, ndev, e, data, false);
- if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg_priv->status)) {
- cfg80211_disconnected(ndev, 0, NULL, 0,
- GFP_KERNEL);
- brcmf_link_down(cfg_priv);
- }
- }
- brcmf_init_prof(cfg_priv->profile);
- } else if (brcmf_is_nonetwork(cfg_priv, e)) {
- if (brcmf_is_ibssmode(cfg_priv))
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
- else
- brcmf_bss_connect_done(cfg_priv, ndev, e, data, false);
- }
-
- return err;
-}
-
-static s32
-brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- s32 err = 0;
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
-
- if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
- if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status))
- brcmf_bss_roaming_done(cfg_priv, ndev, e, data);
- else
- brcmf_bss_connect_done(cfg_priv, ndev, e, data, true);
- }
-
- return err;
-}
-
-static __used s32
-brcmf_dev_bufvar_set(struct net_device *dev, s8 *name, s8 *buf, s32 len)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- u32 buflen;
-
- buflen = brcmu_mkiovar(name, buf, len, cfg_priv->ioctl_buf,
- WL_IOCTL_LEN_MAX);
- BUG_ON(!buflen);
-
- return brcmf_dev_ioctl(dev, BRCMF_C_SET_VAR, cfg_priv->ioctl_buf,
- buflen);
-}
-
-static s32
-brcmf_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
- s32 buf_len)
-{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
- u32 len;
- s32 err = 0;
-
- len = brcmu_mkiovar(name, NULL, 0, cfg_priv->ioctl_buf,
- WL_IOCTL_LEN_MAX);
- BUG_ON(!len);
- err = brcmf_dev_ioctl(dev, BRCMF_C_GET_VAR, (void *)cfg_priv->ioctl_buf,
- WL_IOCTL_LEN_MAX);
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- memcpy(buf, cfg_priv->ioctl_buf, buf_len);
-
- return err;
-}
-
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
- struct brcmf_cfg80211_assoc_ielen *assoc_info;
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
- u32 req_len;
- u32 resp_len;
- s32 err = 0;
-
- brcmf_clear_assoc_ies(cfg_priv);
-
- err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf,
- WL_ASSOC_INFO_MAX);
- if (unlikely(err)) {
- WL_ERR("could not get assoc info (%d)\n", err);
- return err;
- }
- assoc_info = (struct brcmf_cfg80211_assoc_ielen *)cfg_priv->extra_buf;
- req_len = assoc_info->req_len;
- resp_len = assoc_info->resp_len;
- if (req_len) {
- err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
- cfg_priv->extra_buf,
- WL_ASSOC_INFO_MAX);
- if (unlikely(err)) {
- WL_ERR("could not get assoc req (%d)\n", err);
- return err;
- }
- conn_info->req_ie_len = req_len;
- conn_info->req_ie =
- kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len,
- GFP_KERNEL);
- } else {
- conn_info->req_ie_len = 0;
- conn_info->req_ie = NULL;
- }
- if (resp_len) {
- err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
- cfg_priv->extra_buf,
- WL_ASSOC_INFO_MAX);
- if (unlikely(err)) {
- WL_ERR("could not get assoc resp (%d)\n", err);
- return err;
- }
- conn_info->resp_ie_len = resp_len;
- conn_info->resp_ie =
- kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len,
- GFP_KERNEL);
- } else {
- conn_info->resp_ie_len = 0;
- conn_info->resp_ie = NULL;
- }
- WL_CONN("req len (%d) resp len (%d)\n",
- conn_info->req_ie_len, conn_info->resp_ie_len);
-
- return err;
-}
-
-static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
-
- kfree(conn_info->req_ie);
- conn_info->req_ie = NULL;
- conn_info->req_ie_len = 0;
- kfree(conn_info->resp_ie);
- conn_info->resp_ie = NULL;
- conn_info->resp_ie_len = 0;
-}
-
-
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
- size_t *join_params_size)
-{
- chanspec_t chanspec = 0;
-
- if (ch != 0) {
- join_params->params.chanspec_num = 1;
- join_params->params.chanspec_list[0] = ch;
-
- if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
- *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
- join_params->params.chanspec_num * sizeof(chanspec_t);
-
- join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- join_params->params.chanspec_list[0] |= chanspec;
- join_params->params.chanspec_list[0] =
- cpu_to_le16(join_params->params.chanspec_list[0]);
-
- join_params->params.chanspec_num =
- cpu_to_le32(join_params->params.chanspec_num);
-
- WL_CONN("join_params->params.chanspec_list[0]= %#X,"
- "channel %d, chanspec %#X\n",
- join_params->params.chanspec_list[0], ch, chanspec);
- }
-}
-
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_bss_info *bi;
- struct brcmf_ssid *ssid;
- struct brcmu_tlv *tim;
- u16 beacon_interval;
- u8 dtim_period;
- size_t ie_len;
- u8 *ie;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
- if (brcmf_is_ibssmode(cfg_priv))
- return err;
-
- ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID);
-
- *(u32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
- err = brcmf_dev_ioctl(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO,
- cfg_priv->extra_buf, WL_EXTRA_BUF_MAX);
- if (unlikely(err)) {
- WL_ERR("Could not get bss info %d\n", err);
- goto update_bss_info_out;
- }
-
- bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
- err = brcmf_inform_single_bss(cfg_priv, bi);
- if (unlikely(err))
- goto update_bss_info_out;
-
- ie = ((u8 *)bi) + bi->ie_offset;
- ie_len = bi->ie_length;
- beacon_interval = cpu_to_le16(bi->beacon_period);
-
- tim = brcmu_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (tim)
- dtim_period = tim->data[1];
- else {
- /*
- * active scan was done so we could not get dtim
- * information out of probe response.
- * so we speficially query dtim information to dongle.
- */
- u32 var;
- err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv),
- "dtim_assoc", &var);
- if (unlikely(err)) {
- WL_ERR("wl dtim_assoc failed (%d)\n", err);
- goto update_bss_info_out;
- }
- dtim_period = (u8)var;
- }
-
- brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT);
- brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
-
-update_bss_info_out:
- WL_TRACE("Exit");
- return err;
-}
-
-static s32
-brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
- s32 err = 0;
-
- WL_TRACE("Enter\n");
-
- brcmf_get_assoc_ies(cfg_priv);
- brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID);
- brcmf_update_bss_info(cfg_priv);
-
- cfg80211_roamed(ndev, NULL,
- (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
- conn_info->req_ie, conn_info->req_ie_len,
- conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
- WL_CONN("Report roaming result\n");
-
- set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev, const struct brcmf_event_msg *e,
- void *data, bool completed)
-{
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
- s32 err = 0;
-
- WL_TRACE("Enter\n");
-
- if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
- if (completed) {
- brcmf_get_assoc_ies(cfg_priv);
- brcmf_update_prof(cfg_priv, NULL, &e->addr,
- WL_PROF_BSSID);
- brcmf_update_bss_info(cfg_priv);
- }
- cfg80211_connect_result(ndev,
- (u8 *)brcmf_read_prof(cfg_priv,
- WL_PROF_BSSID),
- conn_info->req_ie,
- conn_info->req_ie_len,
- conn_info->resp_ie,
- conn_info->resp_ie_len,
- completed ? WLAN_STATUS_SUCCESS :
- WLAN_STATUS_AUTH_TIMEOUT,
- GFP_KERNEL);
- if (completed)
- set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
- WL_CONN("Report connect result - connection %s\n",
- completed ? "succeeded" : "failed");
- }
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- u16 flags = be16_to_cpu(e->flags);
- enum nl80211_key_type key_type;
-
- rtnl_lock();
- if (flags & BRCMF_EVENT_MSG_GROUP)
- key_type = NL80211_KEYTYPE_GROUP;
- else
- key_type = NL80211_KEYTYPE_PAIRWISE;
-
- cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
- NULL, GFP_KERNEL);
- rtnl_unlock();
-
- return 0;
-}
-
-static s32
-brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- struct brcmf_channel_info channel_inform;
- struct brcmf_scan_results *bss_list;
- u32 len = WL_SCAN_BUF_MAX;
- s32 err = 0;
- bool scan_abort = false;
-
- WL_TRACE("Enter\n");
-
- if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) {
- WL_TRACE("Exit\n");
- return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv));
- }
-
- if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING,
- &cfg_priv->status))) {
- WL_ERR("Scan complete while device not scanning\n");
- scan_abort = true;
- err = -EINVAL;
- goto scan_done_out;
- }
-
- err = brcmf_dev_ioctl(ndev, BRCMF_C_GET_CHANNEL, &channel_inform,
- sizeof(channel_inform));
- if (unlikely(err)) {
- WL_ERR("scan busy (%d)\n", err);
- scan_abort = true;
- goto scan_done_out;
- }
- channel_inform.scan_channel = le32_to_cpu(channel_inform.scan_channel);
- if (unlikely(channel_inform.scan_channel)) {
-
- WL_CONN("channel_inform.scan_channel (%d)\n",
- channel_inform.scan_channel);
- }
- cfg_priv->bss_list = cfg_priv->scan_results;
- bss_list = cfg_priv->bss_list;
- memset(bss_list, 0, len);
- bss_list->buflen = cpu_to_le32(len);
-
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SCAN_RESULTS, bss_list, len);
- if (unlikely(err)) {
- WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
- err = -EINVAL;
- scan_abort = true;
- goto scan_done_out;
- }
- bss_list->buflen = le32_to_cpu(bss_list->buflen);
- bss_list->version = le32_to_cpu(bss_list->version);
- bss_list->count = le32_to_cpu(bss_list->count);
-
- err = brcmf_inform_bss(cfg_priv);
- if (err) {
- scan_abort = true;
- goto scan_done_out;
- }
-
-scan_done_out:
- if (cfg_priv->scan_request) {
- WL_SCAN("calling cfg80211_scan_done\n");
- cfg80211_scan_done(cfg_priv->scan_request, scan_abort);
- brcmf_set_mpc(ndev, 1);
- cfg_priv->scan_request = NULL;
- }
-
- WL_TRACE("Exit\n");
-
- return err;
-}
-
-static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
-{
- conf->mode = (u32)-1;
- conf->frag_threshold = (u32)-1;
- conf->rts_threshold = (u32)-1;
- conf->retry_short = (u32)-1;
- conf->retry_long = (u32)-1;
- conf->tx_power = -1;
-}
-
-static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
-{
- memset(prof, 0, sizeof(*prof));
-}
-
-static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
-{
- memset(el, 0, sizeof(*el));
- el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
- el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
- el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
- el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
-}
-
-static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
- cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
- if (unlikely(!cfg_priv->scan_results)) {
- WL_ERR("Scan results alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL);
- if (unlikely(!cfg_priv->conf)) {
- WL_ERR("wl_conf alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL);
- if (unlikely(!cfg_priv->profile)) {
- WL_ERR("wl_profile alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
- if (unlikely(!cfg_priv->bss_info)) {
- WL_ERR("Bss information alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int),
- GFP_KERNEL);
- if (unlikely(!cfg_priv->scan_req_int)) {
- WL_ERR("Scan req alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->ioctl_buf = kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
- if (unlikely(!cfg_priv->ioctl_buf)) {
- WL_ERR("Ioctl buf alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
- if (unlikely(!cfg_priv->extra_buf)) {
- WL_ERR("Extra buf alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL);
- if (unlikely(!cfg_priv->iscan)) {
- WL_ERR("Iscan buf alloc failed\n");
- goto init_priv_mem_out;
- }
- cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL);
- if (unlikely(!cfg_priv->pmk_list)) {
- WL_ERR("pmk list alloc failed\n");
- goto init_priv_mem_out;
- }
-
- return 0;
-
-init_priv_mem_out:
- brcmf_deinit_priv_mem(cfg_priv);
-
- return -ENOMEM;
-}
-
-static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
- kfree(cfg_priv->scan_results);
- cfg_priv->scan_results = NULL;
- kfree(cfg_priv->bss_info);
- cfg_priv->bss_info = NULL;
- kfree(cfg_priv->conf);
- cfg_priv->conf = NULL;
- kfree(cfg_priv->profile);
- cfg_priv->profile = NULL;
- kfree(cfg_priv->scan_req_int);
- cfg_priv->scan_req_int = NULL;
- kfree(cfg_priv->ioctl_buf);
- cfg_priv->ioctl_buf = NULL;
- kfree(cfg_priv->extra_buf);
- cfg_priv->extra_buf = NULL;
- kfree(cfg_priv->iscan);
- cfg_priv->iscan = NULL;
- kfree(cfg_priv->pmk_list);
- cfg_priv->pmk_list = NULL;
-}
-
-static s32 brcmf_create_event_handler(struct brcmf_cfg80211_priv *cfg_priv)
-{
- sema_init(&cfg_priv->event_sync, 0);
- cfg_priv->event_tsk = kthread_run(brcmf_event_handler, cfg_priv,
- "wl_event_handler");
- if (IS_ERR(cfg_priv->event_tsk)) {
- cfg_priv->event_tsk = NULL;
- WL_ERR("failed to create event thread\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-static void brcmf_destroy_event_handler(struct brcmf_cfg80211_priv *cfg_priv)
-{
- if (cfg_priv->event_tsk) {
- send_sig(SIGTERM, cfg_priv->event_tsk, 1);
- kthread_stop(cfg_priv->event_tsk);
- cfg_priv->event_tsk = NULL;
- }
-}
-
-static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
-
- if (cfg_priv->iscan_on && iscan->tsk) {
- iscan->state = WL_ISCAN_STATE_IDLE;
- send_sig(SIGTERM, iscan->tsk, 1);
- kthread_stop(iscan->tsk);
- iscan->tsk = NULL;
- }
-}
-
-static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
- bool aborted)
-{
- struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
-
- if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING,
- &cfg_priv->status))) {
- WL_ERR("Scan complete while device not scanning\n");
- return;
- }
- if (likely(cfg_priv->scan_request)) {
- WL_SCAN("ISCAN Completed scan: %s\n",
- aborted ? "Aborted" : "Done");
- cfg80211_scan_done(cfg_priv->scan_request, aborted);
- brcmf_set_mpc(ndev, 1);
- cfg_priv->scan_request = NULL;
- }
- cfg_priv->iscan_kickstart = false;
-}
-
-static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
-{
- if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
- WL_SCAN("wake up iscan\n");
- up(&iscan->sync);
- return 0;
- }
-
- return -EIO;
-}
-
-static s32
-brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
- struct brcmf_scan_results **bss_list)
-{
- struct brcmf_iscan_results list;
- struct brcmf_scan_results *results;
- struct brcmf_iscan_results *list_buf;
- s32 err = 0;
-
- memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
- list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
- results = &list_buf->results;
- results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
-
- memset(&list, 0, sizeof(list));
- list.results.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
- err = brcmf_dev_iovar_getbuf(iscan->dev, "iscanresults", &list,
- BRCMF_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
- WL_ISCAN_BUF_MAX);
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- results->buflen = le32_to_cpu(results->buflen);
- results->version = le32_to_cpu(results->version);
- results->count = le32_to_cpu(results->count);
- WL_SCAN("results->count = %d\n", results->count);
- WL_SCAN("results->buflen = %d\n", results->buflen);
- *status = le32_to_cpu(list_buf->status);
- *bss_list = results;
-
- return err;
-}
-
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
- s32 err = 0;
-
- iscan->state = WL_ISCAN_STATE_IDLE;
- rtnl_lock();
- brcmf_inform_bss(cfg_priv);
- brcmf_notify_iscan_complete(iscan, false);
- rtnl_unlock();
-
- return err;
-}
-
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
- s32 err = 0;
-
- /* Reschedule the timer */
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
-
- return err;
-}
-
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
- s32 err = 0;
-
- rtnl_lock();
- brcmf_inform_bss(cfg_priv);
- brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
- rtnl_unlock();
- /* Reschedule the timer */
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
-
- return err;
-}
-
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
- s32 err = 0;
-
- iscan->state = WL_ISCAN_STATE_IDLE;
- rtnl_lock();
- brcmf_notify_iscan_complete(iscan, true);
- rtnl_unlock();
-
- return err;
-}
-
-static s32 brcmf_iscan_thread(void *data)
-{
- struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
- struct brcmf_cfg80211_iscan_ctrl *iscan =
- (struct brcmf_cfg80211_iscan_ctrl *)data;
- struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
- struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
- u32 status;
- int err = 0;
-
- sched_setscheduler(current, SCHED_FIFO, &param);
- allow_signal(SIGTERM);
- status = BRCMF_SCAN_RESULTS_PARTIAL;
- while (likely(!down_interruptible(&iscan->sync))) {
- if (kthread_should_stop())
- break;
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
- rtnl_lock();
- err = brcmf_get_iscan_results(iscan, &status,
- &cfg_priv->bss_list);
- if (unlikely(err)) {
- status = BRCMF_SCAN_RESULTS_ABORTED;
- WL_ERR("Abort iscan\n");
- }
- rtnl_unlock();
- el->handler[status](cfg_priv);
- }
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
- WL_SCAN("ISCAN thread terminated\n");
-
- return 0;
-}
-
-static void brcmf_iscan_timer(unsigned long data)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan =
- (struct brcmf_cfg80211_iscan_ctrl *)data;
-
- if (iscan) {
- iscan->timer_on = 0;
- WL_SCAN("timer expired\n");
- brcmf_wakeup_iscan(iscan);
- }
-}
-
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
- int err = 0;
-
- if (cfg_priv->iscan_on && !iscan->tsk) {
- iscan->state = WL_ISCAN_STATE_IDLE;
- sema_init(&iscan->sync, 0);
- iscan->tsk = kthread_run(brcmf_iscan_thread, iscan, "wl_iscan");
- if (IS_ERR(iscan->tsk)) {
- WL_ERR("Could not create iscan thread\n");
- iscan->tsk = NULL;
- return -ENOMEM;
- }
- }
-
- return err;
-}
-
-static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
-{
- memset(el, 0, sizeof(*el));
- el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done;
- el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress;
- el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending;
- el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted;
- el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
-}
-
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
- int err = 0;
-
- if (cfg_priv->iscan_on) {
- iscan->dev = cfg_to_ndev(cfg_priv);
- iscan->state = WL_ISCAN_STATE_IDLE;
- brcmf_init_iscan_eloop(&iscan->el);
- iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
- init_timer(&iscan->timer);
- iscan->timer.data = (unsigned long) iscan;
- iscan->timer.function = brcmf_iscan_timer;
- sema_init(&iscan->sync, 0);
- iscan->tsk = kthread_run(brcmf_iscan_thread, iscan, "wl_iscan");
- if (IS_ERR(iscan->tsk)) {
- WL_ERR("Could not create iscan thread\n");
- iscan->tsk = NULL;
- return -ENOMEM;
- }
- iscan->data = cfg_priv;
- }
-
- return err;
-}
-
-static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
- s32 err = 0;
-
- cfg_priv->scan_request = NULL;
- cfg_priv->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
- cfg_priv->iscan_on = true; /* iscan on & off switch.
- we enable iscan per default */
- cfg_priv->roam_on = false; /* roam on & off switch.
- we enable roam per default */
-
- cfg_priv->iscan_kickstart = false;
- cfg_priv->active_scan = true; /* we do active scan for
- specific scan per default */
- cfg_priv->dongle_up = false; /* dongle is not up yet */
- brcmf_init_eq(cfg_priv);
- err = brcmf_init_priv_mem(cfg_priv);
- if (unlikely(err))
- return err;
- if (unlikely(brcmf_create_event_handler(cfg_priv)))
- return -ENOMEM;
- brcmf_init_eloop_handler(&cfg_priv->el);
- mutex_init(&cfg_priv->usr_sync);
- err = brcmf_init_iscan(cfg_priv);
- if (unlikely(err))
- return err;
- brcmf_init_conf(cfg_priv->conf);
- brcmf_init_prof(cfg_priv->profile);
- brcmf_link_down(cfg_priv);
-
- return err;
-}
-
-static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv)
-{
- brcmf_destroy_event_handler(cfg_priv);
- cfg_priv->dongle_up = false; /* dongle down */
- brcmf_flush_eq(cfg_priv);
- brcmf_link_down(cfg_priv);
- brcmf_term_iscan(cfg_priv);
- brcmf_deinit_priv_mem(cfg_priv);
-}
-
-s32 brcmf_cfg80211_attach(struct net_device *ndev, void *data)
-{
- struct wireless_dev *wdev;
- struct brcmf_cfg80211_priv *cfg_priv;
- struct brcmf_cfg80211_iface *ci;
- s32 err = 0;
-
- if (unlikely(!ndev)) {
- WL_ERR("ndev is invalid\n");
- return -ENODEV;
- }
- cfg80211_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
- if (unlikely(!cfg80211_dev)) {
- WL_ERR("wl_cfg80211_dev is invalid\n");
- return -ENOMEM;
- }
- WL_INFO("func %p\n", brcmf_cfg80211_get_sdio_func());
- wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface),
- &brcmf_cfg80211_get_sdio_func()->dev);
- if (IS_ERR(wdev))
- return -ENOMEM;
-
- wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
- cfg_priv = wdev_to_cfg(wdev);
- cfg_priv->wdev = wdev;
- cfg_priv->pub = data;
- ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
- ci->cfg_priv = cfg_priv;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
- err = wl_init_priv(cfg_priv);
- if (unlikely(err)) {
- WL_ERR("Failed to init iwm_priv (%d)\n", err);
- goto cfg80211_attach_out;
- }
- brcmf_set_drvdata(cfg80211_dev, ci);
-
- return err;
-
-cfg80211_attach_out:
- brcmf_free_wdev(cfg_priv);
- return err;
-}
-
-void brcmf_cfg80211_detach(void)
-{
- struct brcmf_cfg80211_priv *cfg_priv;
-
- cfg_priv = WL_PRIV_GET();
-
- wl_deinit_priv(cfg_priv);
- brcmf_free_wdev(cfg_priv);
- brcmf_set_drvdata(cfg80211_dev, NULL);
- kfree(cfg80211_dev);
- cfg80211_dev = NULL;
- brcmf_clear_sdio_func();
-}
-
-static void brcmf_wakeup_event(struct brcmf_cfg80211_priv *cfg_priv)
-{
- up(&cfg_priv->event_sync);
-}
-
-static s32 brcmf_event_handler(void *data)
-{
- struct brcmf_cfg80211_priv *cfg_priv =
- (struct brcmf_cfg80211_priv *)data;
- struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
- struct brcmf_cfg80211_event_q *e;
-
- sched_setscheduler(current, SCHED_FIFO, &param);
- allow_signal(SIGTERM);
- while (likely(!down_interruptible(&cfg_priv->event_sync))) {
- if (kthread_should_stop())
- break;
- e = brcmf_deq_event(cfg_priv);
- if (unlikely(!e)) {
- WL_ERR("event queue empty...\n");
- BUG();
- }
- WL_INFO("event type (%d)\n", e->etype);
- if (cfg_priv->el.handler[e->etype]) {
- cfg_priv->el.handler[e->etype](cfg_priv,
- cfg_to_ndev(cfg_priv),
- &e->emsg, e->edata);
- } else {
- WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
- }
- brcmf_put_event(e);
- }
- WL_INFO("was terminated\n");
- return 0;
-}
-
-void
-brcmf_cfg80211_event(struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- u32 event_type = be32_to_cpu(e->event_type);
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
-
- if (likely(!brcmf_enq_event(cfg_priv, event_type, e, data)))
- brcmf_wakeup_event(cfg_priv);
-}
-
-static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv)
-{
- brcmf_init_eq_lock(cfg_priv);
- INIT_LIST_HEAD(&cfg_priv->eq_list);
-}
-
-static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_event_q *e;
-
- brcmf_lock_eq(cfg_priv);
- while (!list_empty(&cfg_priv->eq_list)) {
- e = list_first_entry(&cfg_priv->eq_list,
- struct brcmf_cfg80211_event_q, eq_list);
- list_del(&e->eq_list);
- kfree(e);
- }
- brcmf_unlock_eq(cfg_priv);
-}
-
-/*
-* retrieve first queued event from head
-*/
-
-static struct brcmf_cfg80211_event_q *brcmf_deq_event(
- struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct brcmf_cfg80211_event_q *e = NULL;
-
- brcmf_lock_eq(cfg_priv);
- if (likely(!list_empty(&cfg_priv->eq_list))) {
- e = list_first_entry(&cfg_priv->eq_list,
- struct brcmf_cfg80211_event_q, eq_list);
- list_del(&e->eq_list);
- }
- brcmf_unlock_eq(cfg_priv);
-
- return e;
-}
-
-/*
-** push event to tail of the queue
-*/
-
-static s32
-brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
- const struct brcmf_event_msg *msg, void *data)
-{
- struct brcmf_cfg80211_event_q *e;
- s32 err = 0;
-
- e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_KERNEL);
- if (unlikely(!e)) {
- WL_ERR("event alloc failed\n");
- return -ENOMEM;
- }
-
- e->etype = event;
- memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
-
- brcmf_lock_eq(cfg_priv);
- list_add_tail(&e->eq_list, &cfg_priv->eq_list);
- brcmf_unlock_eq(cfg_priv);
-
- return err;
-}
-
-static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
-{
- kfree(e);
-}
-
-void brcmf_cfg80211_sdio_func(void *func)
-{
- cfg80211_sdio_func = (struct sdio_func *)func;
-}
-
-static void brcmf_clear_sdio_func(void)
-{
- cfg80211_sdio_func = NULL;
-}
-
-struct sdio_func *brcmf_cfg80211_get_sdio_func(void)
-{
- return cfg80211_sdio_func;
-}
-
-static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
-{
- s32 infra = 0;
- s32 err = 0;
-
- switch (iftype) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_WDS:
- WL_ERR("type (%d) : currently we do not support this mode\n",
- iftype);
- err = -EINVAL;
- return err;
- case NL80211_IFTYPE_ADHOC:
- infra = 0;
- break;
- case NL80211_IFTYPE_STATION:
- infra = 1;
- break;
- default:
- err = -EINVAL;
- WL_ERR("invalid type (%d)\n", iftype);
- return err;
- }
- infra = cpu_to_le32(infra);
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_INFRA, &infra, sizeof(infra));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_INFRA error (%d)\n", err);
- return err;
- }
-
- return 0;
-}
-
-static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
-{
- /* Room for "event_msgs" + '\0' + bitvec */
- s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- s32 err = 0;
-
- WL_TRACE("Enter\n");
-
- /* Setup event_msgs */
- brcmu_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf));
- err = brcmf_dev_ioctl(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
- if (unlikely(err)) {
- WL_ERR("Get event_msgs error (%d)\n", err);
- goto dongle_eventmsg_out;
- }
- memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
- setbit(eventmask, BRCMF_E_SET_SSID);
- setbit(eventmask, BRCMF_E_ROAM);
- setbit(eventmask, BRCMF_E_PRUNE);
- setbit(eventmask, BRCMF_E_AUTH);
- setbit(eventmask, BRCMF_E_REASSOC);
- setbit(eventmask, BRCMF_E_REASSOC_IND);
- setbit(eventmask, BRCMF_E_DEAUTH_IND);
- setbit(eventmask, BRCMF_E_DISASSOC_IND);
- setbit(eventmask, BRCMF_E_DISASSOC);
- setbit(eventmask, BRCMF_E_JOIN);
- setbit(eventmask, BRCMF_E_ASSOC_IND);
- setbit(eventmask, BRCMF_E_PSK_SUP);
- setbit(eventmask, BRCMF_E_LINK);
- setbit(eventmask, BRCMF_E_NDIS_LINK);
- setbit(eventmask, BRCMF_E_MIC_ERROR);
- setbit(eventmask, BRCMF_E_PMKID_CACHE);
- setbit(eventmask, BRCMF_E_TXFAIL);
- setbit(eventmask, BRCMF_E_JOIN_START);
- setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
-
- brcmu_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf));
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
- if (unlikely(err)) {
- WL_ERR("Set event_msgs error (%d)\n", err);
- goto dongle_eventmsg_out;
- }
-
-dongle_eventmsg_out:
- WL_TRACE("Exit\n");
- return err;
-}
-
-static s32
-brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
-{
- s8 iovbuf[32];
- s32 roamtrigger[2];
- s32 roam_delta[2];
- s32 err = 0;
-
- /*
- * Setup timeout if Beacons are lost and roam is
- * off to report link down
- */
- if (roamvar) {
- brcmu_mkiovar("bcn_timeout", (char *)&bcn_timeout,
- sizeof(bcn_timeout), iovbuf, sizeof(iovbuf));
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- if (unlikely(err)) {
- WL_ERR("bcn_timeout error (%d)\n", err);
- goto dongle_rom_out;
- }
- }
-
- /*
- * Enable/Disable built-in roaming to allow supplicant
- * to take care of roaming
- */
- WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On");
- brcmu_mkiovar("roam_off", (char *)&roamvar,
- sizeof(roamvar), iovbuf, sizeof(iovbuf));
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
- if (unlikely(err)) {
- WL_ERR("roam_off error (%d)\n", err);
- goto dongle_rom_out;
- }
-
- roamtrigger[0] = WL_ROAM_TRIGGER_LEVEL;
- roamtrigger[1] = BRCM_BAND_ALL;
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_ROAM_TRIGGER,
- (void *)roamtrigger, sizeof(roamtrigger));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
- goto dongle_rom_out;
- }
-
- roam_delta[0] = WL_ROAM_DELTA;
- roam_delta[1] = BRCM_BAND_ALL;
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_ROAM_DELTA,
- (void *)roam_delta, sizeof(roam_delta));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err);
- goto dongle_rom_out;
- }
-
-dongle_rom_out:
- return err;
-}
-
-static s32
-brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
- s32 scan_unassoc_time, s32 scan_passive_time)
-{
- s32 err = 0;
-
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- &scan_assoc_time, sizeof(scan_assoc_time));
- if (err) {
- if (err == -EOPNOTSUPP)
- WL_INFO("Scan assoc time is not supported\n");
- else
- WL_ERR("Scan assoc time error (%d)\n", err);
- goto dongle_scantime_out;
- }
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- &scan_unassoc_time, sizeof(scan_unassoc_time));
- if (err) {
- if (err == -EOPNOTSUPP)
- WL_INFO("Scan unassoc time is not supported\n");
- else
- WL_ERR("Scan unassoc time error (%d)\n", err);
- goto dongle_scantime_out;
- }
-
- err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME,
- &scan_passive_time, sizeof(scan_passive_time));
- if (err) {
- if (err == -EOPNOTSUPP)
- WL_INFO("Scan passive time is not supported\n");
- else
- WL_ERR("Scan passive time error (%d)\n", err);
- goto dongle_scantime_out;
- }
-
-dongle_scantime_out:
- return err;
-}
-
-s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv, bool need_lock)
-{
- struct net_device *ndev;
- struct wireless_dev *wdev;
- s32 err = 0;
-
- if (cfg_priv->dongle_up)
- return err;
-
- ndev = cfg_to_ndev(cfg_priv);
- wdev = ndev->ieee80211_ptr;
- if (need_lock)
- rtnl_lock();
-
- brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
- WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
-
- err = brcmf_dongle_eventmsg(ndev);
- if (unlikely(err))
- goto default_conf_out;
- err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1),
- WL_BEACON_TIMEOUT);
- if (unlikely(err))
- goto default_conf_out;
- err = brcmf_dongle_mode(ndev, wdev->iftype);
- if (unlikely(err && err != -EINPROGRESS))
- goto default_conf_out;
- err = brcmf_dongle_probecap(cfg_priv);
- if (unlikely(err))
- goto default_conf_out;
-
- /* -EINPROGRESS: Call commit handler */
-
-default_conf_out:
- if (need_lock)
- rtnl_unlock();
-
- cfg_priv->dongle_up = true;
-
- return err;
-
-}
-
-static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct wiphy *wiphy;
- s32 phy_list;
- s8 phy;
- s32 err = 0;
-
- err = brcmf_dev_ioctl(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST,
- &phy_list, sizeof(phy_list));
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
-
- phy = ((char *)&phy_list)[1];
- WL_INFO("%c phy\n", phy);
- if (phy == 'n' || phy == 'a') {
- wiphy = cfg_to_wiphy(cfg_priv);
- wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
- }
-
- return err;
-}
-
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv)
-{
- s32 err = 0;
-
- set_bit(WL_STATUS_READY, &cfg_priv->status);
-
- brcmf_debugfs_add_netdev_params(cfg_priv);
-
- err = brcmf_config_dongle(cfg_priv, false);
- if (unlikely(err))
- return err;
-
- brcmf_invoke_iscan(cfg_priv);
-
- return err;
-}
-
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
-{
- /*
- * While going down, if associated with AP disassociate
- * from AP to save power
- */
- if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
- test_bit(WL_STATUS_READY, &cfg_priv->status)) {
- WL_INFO("Disassociating from AP");
- brcmf_link_down(cfg_priv);
-
- /* Make sure WPA_Supplicant receives all the event
- generated due to DISASSOC call to the fw to keep
- the state fw and WPA_Supplicant state consistent
- */
- rtnl_unlock();
- brcmf_delay(500);
- rtnl_lock();
- }
-
- set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
- brcmf_term_iscan(cfg_priv);
- if (cfg_priv->scan_request) {
- cfg80211_scan_done(cfg_priv->scan_request, true);
- /* May need to perform this to cover rmmod */
- /* wl_set_mpc(cfg_to_ndev(wl), 1); */
- cfg_priv->scan_request = NULL;
- }
- clear_bit(WL_STATUS_READY, &cfg_priv->status);
- clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
-
- brcmf_debugfs_remove_netdev(cfg_priv);
-
- return 0;
-}
-
-s32 brcmf_cfg80211_up(void)
-{
- struct brcmf_cfg80211_priv *cfg_priv;
- s32 err = 0;
-
- cfg_priv = WL_PRIV_GET();
- mutex_lock(&cfg_priv->usr_sync);
- err = __brcmf_cfg80211_up(cfg_priv);
- mutex_unlock(&cfg_priv->usr_sync);
-
- return err;
-}
-
-s32 brcmf_cfg80211_down(void)
-{
- struct brcmf_cfg80211_priv *cfg_priv;
- s32 err = 0;
-
- cfg_priv = WL_PRIV_GET();
- mutex_lock(&cfg_priv->usr_sync);
- err = __brcmf_cfg80211_down(cfg_priv);
- mutex_unlock(&cfg_priv->usr_sync);
-
- return err;
-}
-
-static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv)
-{
- return wl_update_wiphybands(cfg_priv);
-}
-
-static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
-{
- switch (item) {
- case WL_PROF_SEC:
- return &cfg_priv->profile->sec;
- case WL_PROF_BSSID:
- return &cfg_priv->profile->bssid;
- case WL_PROF_SSID:
- return &cfg_priv->profile->ssid;
- }
- WL_ERR("invalid item (%d)\n", item);
- return NULL;
-}
-
-static s32
-brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e, void *data, s32 item)
-{
- s32 err = 0;
- struct brcmf_ssid *ssid;
-
- switch (item) {
- case WL_PROF_SSID:
- ssid = (struct brcmf_ssid *) data;
- memset(cfg_priv->profile->ssid.SSID, 0,
- sizeof(cfg_priv->profile->ssid.SSID));
- memcpy(cfg_priv->profile->ssid.SSID,
- ssid->SSID, ssid->SSID_len);
- cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
- break;
- case WL_PROF_BSSID:
- if (data)
- memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
- else
- memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
- break;
- case WL_PROF_SEC:
- memcpy(&cfg_priv->profile->sec, data,
- sizeof(cfg_priv->profile->sec));
- break;
- case WL_PROF_BEACONINT:
- cfg_priv->profile->beacon_interval = *(u16 *)data;
- break;
- case WL_PROF_DTIMPERIOD:
- cfg_priv->profile->dtim_period = *(u8 *)data;
- break;
- default:
- WL_ERR("unsupported item (%d)\n", item);
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv)
-{
- return cfg_priv->conf->mode == WL_MODE_IBSS;
-}
-
-static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
- u8 t, u8 l, u8 *v)
-{
- struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
- s32 err = 0;
-
- if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
- WL_ERR("ei crosses buffer boundary\n");
- return -ENOSPC;
- }
- ie->buf[ie->offset] = t;
- ie->buf[ie->offset + 1] = l;
- memcpy(&ie->buf[ie->offset + 2], v, l);
- ie->offset += l + 2;
-
- return err;
-}
-
-static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv)
-{
- struct net_device *dev = NULL;
- s32 err = 0;
-
- WL_TRACE("Enter\n");
-
- if (cfg_priv->link_up) {
- dev = cfg_to_ndev(cfg_priv);
- WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
- err = brcmf_dev_ioctl(dev, BRCMF_C_DISASSOC, NULL, 0);
- if (unlikely(err))
- WL_ERR("WLC_DISASSOC failed (%d)\n", err);
- cfg_priv->link_up = false;
- }
- WL_TRACE("Exit\n");
-}
-
-static void brcmf_lock_eq(struct brcmf_cfg80211_priv *cfg_priv)
-{
- spin_lock_irq(&cfg_priv->eq_lock);
-}
-
-static void brcmf_unlock_eq(struct brcmf_cfg80211_priv *cfg_priv)
-{
- spin_unlock_irq(&cfg_priv->eq_lock);
-}
-
-static void brcmf_init_eq_lock(struct brcmf_cfg80211_priv *cfg_priv)
-{
- spin_lock_init(&cfg_priv->eq_lock);
-}
-
-static void brcmf_delay(u32 ms)
-{
- if (ms < 1000 / HZ) {
- cond_resched();
- mdelay(ms);
- } else {
- msleep(ms);
- }
-}
-
-static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
-{
- dev->driver_data = data;
-}
-
-static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
-{
- void *data = NULL;
-
- if (dev)
- data = dev->driver_data;
- return data;
-}
-
-static void brcmf_set_mpc(struct net_device *ndev, int mpc)
-{
- s32 err = 0;
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
-
- if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
- err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
- if (unlikely(err)) {
- WL_ERR("fail to set mpc\n");
- return;
- }
- WL_INFO("MPC : %d\n", mpc);
- }
-}
-
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv)
-{
- char buf[10+IFNAMSIZ];
- struct dentry *fd;
- s32 err = 0;
-
- sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name);
- cfg_priv->debugfsdir = debugfs_create_dir(buf,
- cfg_to_wiphy(cfg_priv)->debugfsdir);
-
- fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir,
- (u16 *)&cfg_priv->profile->beacon_interval);
- if (!fd) {
- err = -ENOMEM;
- goto err_out;
- }
-
- fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir,
- (u8 *)&cfg_priv->profile->dtim_period);
- if (!fd) {
- err = -ENOMEM;
- goto err_out;
- }
-
-err_out:
- return err;
-}
-
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv)
-{
- debugfs_remove_recursive(cfg_priv->debugfsdir);
- cfg_priv->debugfsdir = NULL;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/Makefile b/drivers/staging/brcm80211/brcmsmac/Makefile
deleted file mode 100644
index 1ea3e0c48f3..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Makefile fragment for Broadcom 802.11n Networking Device Driver
-#
-# Copyright (c) 2010 Broadcom Corporation
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-ccflags-y := \
- -DWLC_HIGH \
- -DWLC_LOW \
- -DSTA \
- -DWME \
- -DWL11N \
- -DDBAND \
- -DBCMNVRAMR \
- -Idrivers/staging/brcm80211/brcmsmac \
- -Idrivers/staging/brcm80211/brcmsmac/phy \
- -Idrivers/staging/brcm80211/include
-
-BRCMSMAC_OFILES := \
- mac80211_if.o \
- ucode_loader.o \
- alloc.o \
- ampdu.o \
- antsel.o \
- bmac.o \
- channel.o \
- main.o \
- phy_shim.o \
- pmu.o \
- rate.o \
- stf.o \
- aiutils.o \
- phy/phy_cmn.o \
- phy/phy_lcn.o \
- phy/phy_n.o \
- phy/phytbl_lcn.o \
- phy/phytbl_n.o \
- phy/phy_qmath.o \
- otp.o \
- srom.o \
- dma.o \
- nicpci.o
-
-MODULEPFX := brcmsmac
-
-obj-$(CONFIG_BRCMSMAC) += $(MODULEPFX).o
-$(MODULEPFX)-objs = $(BRCMSMAC_OFILES)
diff --git a/drivers/staging/brcm80211/brcmsmac/aiutils.c b/drivers/staging/brcm80211/brcmsmac/aiutils.c
deleted file mode 100644
index a25901e9981..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/aiutils.c
+++ /dev/null
@@ -1,2279 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/delay.h>
-#include <linux/pci.h>
-
-#include <defs.h>
-#include <chipcommon.h>
-#include <brcmu_utils.h>
-#include <brcm_hw_ids.h>
-#include "types.h"
-#include "pub.h"
-#include "pmu.h"
-#include "srom.h"
-#include "nicpci.h"
-#include "aiutils.h"
-
-/* slow_clk_ctl */
-#define SCC_SS_MASK 0x00000007 /* slow clock source mask */
-#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */
-#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */
-#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */
-#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
-#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled,
- * 0: LPO is enabled
- */
-#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock,
- * 0: power logic control
- */
-#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors
- * PLL clock disable requests from core
- */
-#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't
- * disable crystal when appropriate
- */
-#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
-#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
-#define SCC_CD_SHIFT 16
-
-/* system_clk_ctl */
-#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */
-#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */
-#define SYCC_FP 0x00000004 /* ForcePLLOn */
-#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */
-#define SYCC_HR 0x00000010 /* Force HT */
-#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
-#define SYCC_CD_SHIFT 16
-
-#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
-#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
-#define CST4329_SPI_SDIO_MODE_SHIFT 2
-
-/* 43224 chip-specific ChipControl register bits */
-#define CCTRL43224_GPIO_TOGGLE 0x8000
-#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
-#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
-
-/* 43236 Chip specific ChipStatus register bits */
-#define CST43236_SFLASH_MASK 0x00000040
-#define CST43236_OTP_MASK 0x00000080
-#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
-#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
-#define CST43236_BOOT_MASK 0x00001800
-#define CST43236_BOOT_SHIFT 11
-#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
-#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
-#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
-#define CST43236_BOOT_FROM_INVALID 3
-
-/* 4331 chip-specific ChipControl register bits */
-#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */
-#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
-#define CCTRL4331_EXT_LNA (1<<2) /* 0 disable */
-#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */
-#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */
-#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */
-#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */
-#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
-#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */
-#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */
-#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */
-#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */
-#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */
-#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */
-
-/* 4331 Chip specific ChipStatus register bits */
-#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */
-#define CST4331_SPROM_PRESENT 0x00000002
-#define CST4331_OTP_PRESENT 0x00000004
-#define CST4331_LDO_RF 0x00000008
-#define CST4331_LDO_PAR 0x00000010
-
-/* 4319 chip-specific ChipStatus register bits */
-#define CST4319_SPI_CPULESSUSB 0x00000001
-#define CST4319_SPI_CLK_POL 0x00000002
-#define CST4319_SPI_CLK_PH 0x00000008
-#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */
-#define CST4319_SPROM_OTP_SEL_SHIFT 6
-#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
-#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */
-#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
-#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
-#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */
-#define CST4319_REMAP_SEL_MASK 0x00000600
-#define CST4319_ILPDIV_EN 0x00000800
-#define CST4319_XTAL_PD_POL 0x00001000
-#define CST4319_LPO_SEL 0x00002000
-#define CST4319_RES_INIT_MODE 0x0000c000
-#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */
-#define CST4319_CBUCK_MODE_MASK 0x00060000
-#define CST4319_CBUCK_MODE_BURST 0x00020000
-#define CST4319_CBUCK_MODE_LPBURST 0x00060000
-#define CST4319_RCAL_VALID 0x01000000
-#define CST4319_RCAL_VALUE_MASK 0x3e000000
-#define CST4319_RCAL_VALUE_SHIFT 25
-
-/* 4336 chip-specific ChipStatus register bits */
-#define CST4336_SPI_MODE_MASK 0x00000001
-#define CST4336_SPROM_PRESENT 0x00000002
-#define CST4336_OTP_PRESENT 0x00000004
-#define CST4336_ARMREMAP_0 0x00000008
-#define CST4336_ILPDIV_EN_MASK 0x00000010
-#define CST4336_ILPDIV_EN_SHIFT 4
-#define CST4336_XTAL_PD_POL_MASK 0x00000020
-#define CST4336_XTAL_PD_POL_SHIFT 5
-#define CST4336_LPO_SEL_MASK 0x00000040
-#define CST4336_LPO_SEL_SHIFT 6
-#define CST4336_RES_INIT_MODE_MASK 0x00000180
-#define CST4336_RES_INIT_MODE_SHIFT 7
-#define CST4336_CBUCK_MODE_MASK 0x00000600
-#define CST4336_CBUCK_MODE_SHIFT 9
-
-/* 4313 chip-specific ChipStatus register bits */
-#define CST4313_SPROM_PRESENT 1
-#define CST4313_OTP_PRESENT 2
-#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
-#define CST4313_SPROM_OTP_SEL_SHIFT 0
-
-/* 4313 Chip specific ChipControl register bits */
-#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
-
-#define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
- (sih->chiprev == 0) && \
- (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
-
-/* Manufacturer Ids */
-#define MFGID_ARM 0x43b
-#define MFGID_BRCM 0x4bf
-#define MFGID_MIPS 0x4a7
-
-/* Enumeration ROM registers */
-#define ER_EROMENTRY 0x000
-#define ER_REMAPCONTROL 0xe00
-#define ER_REMAPSELECT 0xe04
-#define ER_MASTERSELECT 0xe10
-#define ER_ITCR 0xf00
-#define ER_ITIP 0xf04
-
-/* Erom entries */
-#define ER_TAG 0xe
-#define ER_TAG1 0x6
-#define ER_VALID 1
-#define ER_CI 0
-#define ER_MP 2
-#define ER_ADD 4
-#define ER_END 0xe
-#define ER_BAD 0xffffffff
-
-/* EROM CompIdentA */
-#define CIA_MFG_MASK 0xfff00000
-#define CIA_MFG_SHIFT 20
-#define CIA_CID_MASK 0x000fff00
-#define CIA_CID_SHIFT 8
-#define CIA_CCL_MASK 0x000000f0
-#define CIA_CCL_SHIFT 4
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK 0xff000000
-#define CIB_REV_SHIFT 24
-#define CIB_NSW_MASK 0x00f80000
-#define CIB_NSW_SHIFT 19
-#define CIB_NMW_MASK 0x0007c000
-#define CIB_NMW_SHIFT 14
-#define CIB_NSP_MASK 0x00003e00
-#define CIB_NSP_SHIFT 9
-#define CIB_NMP_MASK 0x000001f0
-#define CIB_NMP_SHIFT 4
-
-/* EROM AddrDesc */
-#define AD_ADDR_MASK 0xfffff000
-#define AD_SP_MASK 0x00000f00
-#define AD_SP_SHIFT 8
-#define AD_ST_MASK 0x000000c0
-#define AD_ST_SHIFT 6
-#define AD_ST_SLAVE 0x00000000
-#define AD_ST_BRIDGE 0x00000040
-#define AD_ST_SWRAP 0x00000080
-#define AD_ST_MWRAP 0x000000c0
-#define AD_SZ_MASK 0x00000030
-#define AD_SZ_SHIFT 4
-#define AD_SZ_4K 0x00000000
-#define AD_SZ_8K 0x00000010
-#define AD_SZ_16K 0x00000020
-#define AD_SZ_SZD 0x00000030
-#define AD_AG32 0x00000008
-#define AD_ADDR_ALIGN 0x00000fff
-#define AD_SZ_BASE 0x00001000 /* 4KB */
-
-/* EROM SizeDesc */
-#define SD_SZ_MASK 0xfffff000
-#define SD_SG32 0x00000008
-#define SD_SZ_ALIGN 0x00000fff
-
-#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
-#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */
-#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */
-
-/* power control defines */
-#define PLL_DELAY 150 /* us pll on delay */
-#define FREF_DELAY 200 /* us fref change delay */
-#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
-
-/* resetctrl */
-#define AIRC_RESET 1
-
-struct aidmp {
- u32 oobselina30; /* 0x000 */
- u32 oobselina74; /* 0x004 */
- u32 PAD[6];
- u32 oobselinb30; /* 0x020 */
- u32 oobselinb74; /* 0x024 */
- u32 PAD[6];
- u32 oobselinc30; /* 0x040 */
- u32 oobselinc74; /* 0x044 */
- u32 PAD[6];
- u32 oobselind30; /* 0x060 */
- u32 oobselind74; /* 0x064 */
- u32 PAD[38];
- u32 oobselouta30; /* 0x100 */
- u32 oobselouta74; /* 0x104 */
- u32 PAD[6];
- u32 oobseloutb30; /* 0x120 */
- u32 oobseloutb74; /* 0x124 */
- u32 PAD[6];
- u32 oobseloutc30; /* 0x140 */
- u32 oobseloutc74; /* 0x144 */
- u32 PAD[6];
- u32 oobseloutd30; /* 0x160 */
- u32 oobseloutd74; /* 0x164 */
- u32 PAD[38];
- u32 oobsynca; /* 0x200 */
- u32 oobseloutaen; /* 0x204 */
- u32 PAD[6];
- u32 oobsyncb; /* 0x220 */
- u32 oobseloutben; /* 0x224 */
- u32 PAD[6];
- u32 oobsyncc; /* 0x240 */
- u32 oobseloutcen; /* 0x244 */
- u32 PAD[6];
- u32 oobsyncd; /* 0x260 */
- u32 oobseloutden; /* 0x264 */
- u32 PAD[38];
- u32 oobaextwidth; /* 0x300 */
- u32 oobainwidth; /* 0x304 */
- u32 oobaoutwidth; /* 0x308 */
- u32 PAD[5];
- u32 oobbextwidth; /* 0x320 */
- u32 oobbinwidth; /* 0x324 */
- u32 oobboutwidth; /* 0x328 */
- u32 PAD[5];
- u32 oobcextwidth; /* 0x340 */
- u32 oobcinwidth; /* 0x344 */
- u32 oobcoutwidth; /* 0x348 */
- u32 PAD[5];
- u32 oobdextwidth; /* 0x360 */
- u32 oobdinwidth; /* 0x364 */
- u32 oobdoutwidth; /* 0x368 */
- u32 PAD[37];
- u32 ioctrlset; /* 0x400 */
- u32 ioctrlclear; /* 0x404 */
- u32 ioctrl; /* 0x408 */
- u32 PAD[61];
- u32 iostatus; /* 0x500 */
- u32 PAD[127];
- u32 ioctrlwidth; /* 0x700 */
- u32 iostatuswidth; /* 0x704 */
- u32 PAD[62];
- u32 resetctrl; /* 0x800 */
- u32 resetstatus; /* 0x804 */
- u32 resetreadid; /* 0x808 */
- u32 resetwriteid; /* 0x80c */
- u32 PAD[60];
- u32 errlogctrl; /* 0x900 */
- u32 errlogdone; /* 0x904 */
- u32 errlogstatus; /* 0x908 */
- u32 errlogaddrlo; /* 0x90c */
- u32 errlogaddrhi; /* 0x910 */
- u32 errlogid; /* 0x914 */
- u32 errloguser; /* 0x918 */
- u32 errlogflags; /* 0x91c */
- u32 PAD[56];
- u32 intstatus; /* 0xa00 */
- u32 PAD[127];
- u32 config; /* 0xe00 */
- u32 PAD[63];
- u32 itcr; /* 0xf00 */
- u32 PAD[3];
- u32 itipooba; /* 0xf10 */
- u32 itipoobb; /* 0xf14 */
- u32 itipoobc; /* 0xf18 */
- u32 itipoobd; /* 0xf1c */
- u32 PAD[4];
- u32 itipoobaout; /* 0xf30 */
- u32 itipoobbout; /* 0xf34 */
- u32 itipoobcout; /* 0xf38 */
- u32 itipoobdout; /* 0xf3c */
- u32 PAD[4];
- u32 itopooba; /* 0xf50 */
- u32 itopoobb; /* 0xf54 */
- u32 itopoobc; /* 0xf58 */
- u32 itopoobd; /* 0xf5c */
- u32 PAD[4];
- u32 itopoobain; /* 0xf70 */
- u32 itopoobbin; /* 0xf74 */
- u32 itopoobcin; /* 0xf78 */
- u32 itopoobdin; /* 0xf7c */
- u32 PAD[4];
- u32 itopreset; /* 0xf90 */
- u32 PAD[15];
- u32 peripherialid4; /* 0xfd0 */
- u32 peripherialid5; /* 0xfd4 */
- u32 peripherialid6; /* 0xfd8 */
- u32 peripherialid7; /* 0xfdc */
- u32 peripherialid0; /* 0xfe0 */
- u32 peripherialid1; /* 0xfe4 */
- u32 peripherialid2; /* 0xfe8 */
- u32 peripherialid3; /* 0xfec */
- u32 componentid0; /* 0xff0 */
- u32 componentid1; /* 0xff4 */
- u32 componentid2; /* 0xff8 */
- u32 componentid3; /* 0xffc */
-};
-
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 **eromptr, u32 mask, u32 match)
-{
- u32 ent;
- uint inv = 0, nom = 0;
-
- while (true) {
- ent = R_REG(*eromptr);
- (*eromptr)++;
-
- if (mask == 0)
- break;
-
- if ((ent & ER_VALID) == 0) {
- inv++;
- continue;
- }
-
- if (ent == (ER_END | ER_VALID))
- break;
-
- if ((ent & mask) == match)
- break;
-
- nom++;
- }
-
- SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
- if (inv + nom) {
- SI_VMSG((" after %d invalid and %d non-matching entries\n",
- inv, nom));
- }
- return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 **eromptr, uint sp, uint ad, uint st,
- u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
- u32 asd, sz, szd;
-
- asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
- if (((asd & ER_TAG1) != ER_ADD) ||
- (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
- ((asd & AD_ST_MASK) != st)) {
- /* This is not what we want, "push" it back */
- (*eromptr)--;
- return 0;
- }
- *addrl = asd & AD_ADDR_MASK;
- if (asd & AD_AG32)
- *addrh = get_erom_ent(sih, eromptr, 0, 0);
- else
- *addrh = 0;
- *sizeh = 0;
- sz = asd & AD_SZ_MASK;
- if (sz == AD_SZ_SZD) {
- szd = get_erom_ent(sih, eromptr, 0, 0);
- *sizel = szd & SD_SZ_MASK;
- if (szd & SD_SG32)
- *sizeh = get_erom_ent(sih, eromptr, 0, 0);
- } else
- *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
- SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
- sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
-
- return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-void ai_scan(struct si_pub *sih, void *regs)
-{
- struct si_info *sii = SI_INFO(sih);
- chipcregs_t *cc = (chipcregs_t *) regs;
- u32 erombase, *eromptr, *eromlim;
-
- erombase = R_REG(&cc->eromptr);
-
- switch (sih->bustype) {
- case SI_BUS:
- eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
- break;
-
- case PCI_BUS:
- /* Set wrappers address */
- sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
-
- /* Now point the window at the erom */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
- eromptr = regs;
- break;
-
- case SPI_BUS:
- case SDIO_BUS:
- eromptr = (u32 *)(unsigned long)erombase;
- break;
-
- default:
- SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
- sih->bustype));
- return;
- }
- eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
- SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
- while (eromptr < eromlim) {
- u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
- u32 mpd, asd, addrl, addrh, sizel, sizeh;
- u32 *base;
- uint i, j, idx;
- bool br;
-
- br = false;
-
- /* Grok a component */
- cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
- if (cia == (ER_END | ER_VALID)) {
- SI_VMSG(("Found END of erom after %d cores\n",
- sii->numcores));
- ai_hwfixup(sii);
- return;
- }
- base = eromptr - 1;
- cib = get_erom_ent(sih, &eromptr, 0, 0);
-
- if ((cib & ER_TAG) != ER_CI) {
- SI_ERROR(("CIA not followed by CIB\n"));
- goto error;
- }
-
- cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
- mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
- crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
- nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
- nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
- nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
- nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
- SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
-
- if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
- continue;
- if ((nmw + nsw == 0)) {
- /* A component which is not a core */
- if (cid == OOB_ROUTER_CORE_ID) {
- asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd != 0) {
- sii->oob_router = addrl;
- }
- }
- continue;
- }
-
- idx = sii->numcores;
-/* sii->eromptr[idx] = base; */
- sii->cia[idx] = cia;
- sii->cib[idx] = cib;
- sii->coreid[idx] = cid;
-
- for (i = 0; i < nmp; i++) {
- mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
- if ((mpd & ER_TAG) != ER_MP) {
- SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
- goto error;
- }
- SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
- (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
- (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
- }
-
- /* First Slave Address Descriptor should be port 0:
- * the main register space for the core
- */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
- &sizel, &sizeh);
- if (asd == 0) {
- /* Try again to see if it is a bridge */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd != 0)
- br = true;
- else if ((addrh != 0) || (sizeh != 0)
- || (sizel != SI_CORE_SIZE)) {
- SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
- goto error;
- }
- }
- sii->coresba[idx] = addrl;
- sii->coresba_size[idx] = sizel;
- /* Get any more ASDs in port 0 */
- j = 1;
- do {
- asd =
- get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
- &addrh, &sizel, &sizeh);
- if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
- sii->coresba2[idx] = addrl;
- sii->coresba2_size[idx] = sizel;
- }
- j++;
- } while (asd != 0);
-
- /* Go through the ASDs for other slave ports */
- for (i = 1; i < nsp; i++) {
- j = 0;
- do {
- asd =
- get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- } while (asd != 0);
- if (j == 0) {
- SI_ERROR((" SP %d has no address descriptors\n",
- i));
- goto error;
- }
- }
-
- /* Now get master wrappers */
- for (i = 0; i < nmw; i++) {
- asd =
- get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd == 0) {
- SI_ERROR(("Missing descriptor for MW %d\n", i));
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- SI_ERROR(("Master wrapper %d is not 4KB\n", i));
- goto error;
- }
- if (i == 0)
- sii->wrapba[idx] = addrl;
- }
-
- /* And finally slave wrappers */
- for (i = 0; i < nsw; i++) {
- uint fwp = (nsp == 1) ? 0 : 1;
- asd =
- get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd == 0) {
- SI_ERROR(("Missing descriptor for SW %d\n", i));
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
- goto error;
- }
- if ((nmw == 0) && (i == 0))
- sii->wrapba[idx] = addrl;
- }
-
- /* Don't record bridges */
- if (br)
- continue;
-
- /* Done with core */
- sii->numcores++;
- }
-
- SI_ERROR(("Reached end of erom without finding END"));
-
- error:
- sii->numcores = 0;
- return;
-}
-
-/* This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address.
- */
-void *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
- struct si_info *sii = SI_INFO(sih);
- u32 addr = sii->coresba[coreidx];
- u32 wrap = sii->wrapba[coreidx];
- void *regs;
-
- if (coreidx >= sii->numcores)
- return NULL;
-
- switch (sih->bustype) {
- case SI_BUS:
- /* map new one */
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
- }
- sii->curmap = regs = sii->regs[coreidx];
- if (!sii->wrappers[coreidx]) {
- sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
- }
- sii->curwrap = sii->wrappers[coreidx];
- break;
-
- case PCI_BUS:
- /* point bar0 window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
- regs = sii->curmap;
- /* point bar0 2nd 4KB window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
- break;
-
- case SPI_BUS:
- case SDIO_BUS:
- sii->curmap = regs = (void *)(unsigned long)addr;
- sii->curwrap = (void *)(unsigned long)wrap;
- break;
-
- default:
- regs = NULL;
- break;
- }
-
- sii->curmap = regs;
- sii->curidx = coreidx;
-
- return regs;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
- return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = SI_INFO(sih);
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba[cidx];
- else if (asidx == 1)
- return sii->coresba2[cidx];
- else {
- SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
- return 0;
- }
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = SI_INFO(sih);
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba_size[cidx];
- else if (asidx == 1)
- return sii->coresba2_size[cidx];
- else {
- SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
- return 0;
- }
-}
-
-uint ai_flag(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = SI_INFO(sih);
- if (BCM47162_DMP()) {
- SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
- return sii->curidx;
- }
- ai = sii->curwrap;
-
- return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cia;
-
- sii = SI_INFO(sih);
- cia = sii->cia[sii->curidx];
- return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cib;
-
- sii = SI_INFO(sih);
- cib = sii->cib[sii->curidx];
- return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = SI_INFO(sih);
- ai = sii->curwrap;
-
- return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
- SICF_CLOCK_EN)
- && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = SI_INFO(sih);
-
- if (BCM47162_DMP()) {
- SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
- __func__));
- return;
- }
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = SI_INFO(sih);
- if (BCM47162_DMP()) {
- SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
- __func__));
- return 0;
- }
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-
- return R_REG(&ai->ioctrl);
-}
-
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = SI_INFO(sih);
- if (BCM47162_DMP()) {
- SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
- return 0;
- }
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->iostatus) & ~mask) | val);
- W_REG(&ai->iostatus, w);
- }
-
- return R_REG(&ai->iostatus);
-}
-
-/* *************** from siutils.c ************** */
-/* local prototypes */
-static struct si_info *ai_doattach(struct si_info *sii, void *regs,
- uint bustype, void *sdh, char **vars,
- uint *varsz);
-static bool ai_buscore_prep(struct si_info *sii, uint bustype);
-static bool ai_buscore_setup(struct si_info *sii, chipcregs_t *cc, uint bustype,
- u32 savewin, uint *origidx, void *regs);
-static void ai_nvram_process(struct si_info *sii, char *pvars);
-
-/* dev path concatenation util */
-static char *ai_devpathvar(struct si_pub *sih, char *var, int len,
- const char *name);
-static bool _ai_clkctl_cc(struct si_info *sii, uint mode);
-static bool ai_ispcie(struct si_info *sii);
-
-/* global variable to indicate reservation/release of gpio's */
-static u32 ai_gpioreservation;
-
-/*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
- * bustype - pci/sb/sdio/etc
- * vars - pointer to a pointer area for "environment" variables
- * varsz - pointer to int to return the size of the vars
- */
-struct si_pub *ai_attach(void *regs, uint bustype,
- void *sdh, char **vars, uint *varsz)
-{
- struct si_info *sii;
-
- /* alloc struct si_info */
- sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
- if (sii == NULL) {
- SI_ERROR(("si_attach: malloc failed!\n"));
- return NULL;
- }
-
- if (ai_doattach(sii, regs, bustype, sdh, vars, varsz) ==
- NULL) {
- kfree(sii);
- return NULL;
- }
- sii->vars = vars ? *vars : NULL;
- sii->varsz = varsz ? *varsz : 0;
-
- return (struct si_pub *) sii;
-}
-
-/* global kernel resource */
-static struct si_info ksii;
-
-static bool ai_buscore_prep(struct si_info *sii, uint bustype)
-{
- /* kludge to enable the clock on the 4306 which lacks a slowclock */
- if (bustype == PCI_BUS && !ai_ispcie(sii))
- ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
- return true;
-}
-
-static bool ai_buscore_setup(struct si_info *sii, chipcregs_t *cc, uint bustype,
- u32 savewin, uint *origidx, void *regs)
-{
- bool pci, pcie;
- uint i;
- uint pciidx, pcieidx, pcirev, pcierev;
-
- cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
-
- /* get chipcommon rev */
- sii->pub.ccrev = (int)ai_corerev(&sii->pub);
-
- /* get chipcommon chipstatus */
- if (sii->pub.ccrev >= 11)
- sii->pub.chipst = R_REG(&cc->chipstatus);
-
- /* get chipcommon capabilites */
- sii->pub.cccaps = R_REG(&cc->capabilities);
- /* get chipcommon extended capabilities */
-
- if (sii->pub.ccrev >= 35)
- sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
-
- /* get pmu rev and caps */
- if (sii->pub.cccaps & CC_CAP_PMU) {
- sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
- sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
- }
-
- /* figure out bus/orignal core idx */
- sii->pub.buscoretype = NODEV_CORE_ID;
- sii->pub.buscorerev = NOREV;
- sii->pub.buscoreidx = BADIDX;
-
- pci = pcie = false;
- pcirev = pcierev = NOREV;
- pciidx = pcieidx = BADIDX;
-
- for (i = 0; i < sii->numcores; i++) {
- uint cid, crev;
-
- ai_setcoreidx(&sii->pub, i);
- cid = ai_coreid(&sii->pub);
- crev = ai_corerev(&sii->pub);
-
- /* Display cores found */
- SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
- i, cid, crev, sii->coresba[i], sii->regs[i]));
-
- if (bustype == PCI_BUS) {
- if (cid == PCI_CORE_ID) {
- pciidx = i;
- pcirev = crev;
- pci = true;
- } else if (cid == PCIE_CORE_ID) {
- pcieidx = i;
- pcierev = crev;
- pcie = true;
- }
- }
-
- /* find the core idx before entering this func. */
- if ((savewin && (savewin == sii->coresba[i])) ||
- (regs == sii->regs[i]))
- *origidx = i;
- }
-
- if (pci && pcie) {
- if (ai_ispcie(sii))
- pci = false;
- else
- pcie = false;
- }
- if (pci) {
- sii->pub.buscoretype = PCI_CORE_ID;
- sii->pub.buscorerev = pcirev;
- sii->pub.buscoreidx = pciidx;
- } else if (pcie) {
- sii->pub.buscoretype = PCIE_CORE_ID;
- sii->pub.buscorerev = pcierev;
- sii->pub.buscoreidx = pcieidx;
- }
-
- SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx,
- sii->pub.buscoretype, sii->pub.buscorerev));
-
- /* fixup necessary chip/core configurations */
- if (sii->pub.bustype == PCI_BUS) {
- if (SI_FAST(sii)) {
- if (!sii->pch) {
- sii->pch = (void *)pcicore_init(
- &sii->pub, sii->pbus,
- (void *)PCIEREGS(sii));
- if (sii->pch == NULL)
- return false;
- }
- }
- if (ai_pci_fixcfg(&sii->pub)) {
- SI_ERROR(("si_doattach: si_pci_fixcfg failed\n"));
- return false;
- }
- }
-
- /* return to the original core */
- ai_setcoreidx(&sii->pub, *origidx);
-
- return true;
-}
-
-static __used void ai_nvram_process(struct si_info *sii, char *pvars)
-{
- uint w = 0;
-
- /* get boardtype and boardrev */
- switch (sii->pub.bustype) {
- case PCI_BUS:
- /* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
- /* Let nvram variables override subsystem Vend/ID */
- sii->pub.boardvendor = (u16)ai_getdevpathintvar(&sii->pub,
- "boardvendor");
- if (sii->pub.boardvendor == 0)
- sii->pub.boardvendor = w & 0xffff;
- else
- SI_ERROR(("Overriding boardvendor: 0x%x instead of "
- "0x%x\n", sii->pub.boardvendor, w & 0xffff));
- sii->pub.boardtype = (u16)ai_getdevpathintvar(&sii->pub,
- "boardtype");
- if (sii->pub.boardtype == 0)
- sii->pub.boardtype = (w >> 16) & 0xffff;
- else
- SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n"
- , sii->pub.boardtype, (w >> 16) & 0xffff));
- break;
-
- sii->pub.boardvendor = getintvar(pvars, "manfid");
- sii->pub.boardtype = getintvar(pvars, "prodid");
- break;
-
- case SI_BUS:
- case JTAG_BUS:
- sii->pub.boardvendor = PCI_VENDOR_ID_BROADCOM;
- sii->pub.boardtype = getintvar(pvars, "prodid");
- if (pvars == NULL || (sii->pub.boardtype == 0)) {
- sii->pub.boardtype = getintvar(NULL, "boardtype");
- if (sii->pub.boardtype == 0)
- sii->pub.boardtype = 0xffff;
- }
- break;
- }
-
- if (sii->pub.boardtype == 0) {
- SI_ERROR(("si_doattach: unknown board type\n"));
- }
-
- sii->pub.boardflags = getintvar(pvars, "boardflags");
-}
-
-static struct si_info *ai_doattach(struct si_info *sii,
- void *regs, uint bustype, void *pbus,
- char **vars, uint *varsz)
-{
- struct si_pub *sih = &sii->pub;
- u32 w, savewin;
- chipcregs_t *cc;
- char *pvars = NULL;
- uint socitype;
- uint origidx;
-
- memset((unsigned char *) sii, 0, sizeof(struct si_info));
-
- savewin = 0;
-
- sih->buscoreidx = BADIDX;
-
- sii->curmap = regs;
- sii->pbus = pbus;
-
- /* check to see if we are a si core mimic'ing a pci core */
- if (bustype == PCI_BUS) {
- pci_read_config_dword(sii->pbus, PCI_SPROM_CONTROL, &w);
- if (w == 0xffffffff) {
- SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
- " switching to SI devid:0x%x\n",
- __func__, devid));
- bustype = SI_BUS;
- }
- }
-
- /* find Chipcommon address */
- if (bustype == PCI_BUS) {
- pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
- if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
- savewin = SI_ENUM_BASE;
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
- SI_ENUM_BASE);
- cc = (chipcregs_t *) regs;
- } else {
- cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
- }
-
- sih->bustype = bustype;
-
- /* bus/core/clk setup for register access */
- if (!ai_buscore_prep(sii, bustype)) {
- SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
- bustype));
- return NULL;
- }
-
- /*
- * ChipID recognition.
- * We assume we can read chipid at offset 0 from the regs arg.
- * If we add other chiptypes (or if we need to support old sdio
- * hosts w/o chipcommon), some way of recognizing them needs to
- * be added here.
- */
- w = R_REG(&cc->chipid);
- socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
- /* Might as wll fill in chip id rev & pkg */
- sih->chip = w & CID_ID_MASK;
- sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
- sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
-
- sih->issim = IS_SIM(sih->chippkg);
-
- /* scan for cores */
- if (socitype == SOCI_AI) {
- SI_MSG(("Found chip type AI (0x%08x)\n", w));
- /* pass chipc address instead of original core base */
- ai_scan(&sii->pub, (void *)cc);
- } else {
- SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
- return NULL;
- }
- /* no cores found, bail out */
- if (sii->numcores == 0) {
- SI_ERROR(("si_doattach: could not find any cores\n"));
- return NULL;
- }
- /* bus/core/clk setup */
- origidx = SI_CC_IDX;
- if (!ai_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
- SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
- goto exit;
- }
-
- /* Init nvram from sprom/otp if they exist */
- if (srom_var_init
- (&sii->pub, bustype, regs, vars, varsz)) {
- SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
- goto exit;
- }
- pvars = vars ? *vars : NULL;
- ai_nvram_process(sii, pvars);
-
- /* === NVRAM, clock is ready === */
- cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
- W_REG(&cc->gpiopullup, 0);
- W_REG(&cc->gpiopulldown, 0);
- ai_setcoreidx(sih, origidx);
-
- /* PMU specific initializations */
- if (PMUCTL_ENAB(sih)) {
- u32 xtalfreq;
- si_pmu_init(sih);
- si_pmu_chip_init(sih);
- xtalfreq = getintvar(pvars, "xtalfreq");
- /* If xtalfreq var not available, try to measure it */
- if (xtalfreq == 0)
- xtalfreq = si_pmu_measure_alpclk(sih);
- si_pmu_pll_init(sih, xtalfreq);
- si_pmu_res_init(sih);
- si_pmu_swreg_init(sih);
- }
-
- /* setup the GPIO based LED powersave register */
- w = getintvar(pvars, "leddc");
- if (w == 0)
- w = DEFAULT_GPIOTIMERVAL;
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, gpiotimerval), ~0, w);
-
- if (PCIE(sii)) {
- pcicore_attach(sii->pch, pvars, SI_DOATTACH);
- }
-
- if (sih->chip == BCM43224_CHIP_ID) {
- /*
- * enable 12 mA drive strenth for 43224 and
- * set chipControl register bit 15
- */
- if (sih->chiprev == 0) {
- SI_MSG(("Applying 43224A0 WARs\n"));
- ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
- si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
- CCTRL_43224A0_12MA_LED_DRIVE);
- }
- if (sih->chiprev >= 1) {
- SI_MSG(("Applying 43224B0+ WARs\n"));
- si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
- CCTRL_43224B0_12MA_LED_DRIVE);
- }
- }
-
- if (sih->chip == BCM4313_CHIP_ID) {
- /*
- * enable 12 mA drive strenth for 4313 and
- * set chipControl register bit 1
- */
- SI_MSG(("Applying 4313 WARs\n"));
- si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
- CCTRL_4313_12MA_LED_DRIVE);
- }
-
- return sii;
- exit:
- if (sih->bustype == PCI_BUS) {
- if (sii->pch)
- pcicore_deinit(sii->pch);
- sii->pch = NULL;
- }
-
- return NULL;
-}
-
-/* may be called with core in reset */
-void ai_detach(struct si_pub *sih)
-{
- struct si_info *sii;
- uint idx;
-
- struct si_pub *si_local = NULL;
- memcpy(&si_local, &sih, sizeof(struct si_pub **));
-
- sii = SI_INFO(sih);
-
- if (sii == NULL)
- return;
-
- if (sih->bustype == SI_BUS)
- for (idx = 0; idx < SI_MAXCORES; idx++)
- if (sii->regs[idx]) {
- iounmap(sii->regs[idx]);
- sii->regs[idx] = NULL;
- }
-
- if (sih->bustype == PCI_BUS) {
- if (sii->pch)
- pcicore_deinit(sii->pch);
- sii->pch = NULL;
- }
-
- if (sii != &ksii)
- kfree(sii);
-}
-
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
- sii->intr_arg = intr_arg;
- sii->intrsoff_fn = (si_intrsoff_t) intrsoff_fn;
- sii->intrsrestore_fn = (si_intrsrestore_t) intrsrestore_fn;
- sii->intrsenabled_fn = (si_intrsenabled_t) intrsenabled_fn;
- /* save current core id. when this function called, the current core
- * must be the core which provides driver functions(il, et, wl, etc.)
- */
- sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
- sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
- return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
- return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
- return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
-/* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
-{
- struct si_info *sii;
- uint found;
- uint i;
-
- sii = SI_INFO(sih);
-
- found = 0;
-
- for (i = 0; i < sii->numcores; i++)
- if (sii->coreid[i] == coreid) {
- if (found == coreunit)
- return i;
- found++;
- }
-
- return BADIDX;
-}
-
-/*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
- */
-void *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
-{
- uint idx;
-
- idx = ai_findcoreidx(sih, coreid, coreunit);
- if (!GOODIDX(idx))
- return NULL;
-
- return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
- uint *intr_val)
-{
- void *cc;
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- if (SI_FAST(sii)) {
- /* Overloading the origidx variable to remember the coreid,
- * this works because the core ids cannot be confused with
- * core indices.
- */
- *origidx = coreid;
- if (coreid == CC_CORE_ID)
- return (void *)CCREGS_FAST(sii);
- else if (coreid == sih->buscoretype)
- return (void *)PCIEREGS(sii);
- }
- INTR_OFF(sii, *intr_val);
- *origidx = sii->curidx;
- cc = ai_setcore(sih, coreid, 0);
- return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
- if (SI_FAST(sii)
- && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
- return;
-
- ai_setcoreidx(sih, coreid);
- INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
- struct si_info *sii = SI_INFO(sih);
- u32 *w = (u32 *) sii->curwrap;
- W_REG(w + (offset / 4), val);
- return;
-}
-
-/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
- */
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val)
-{
- uint origidx = 0;
- u32 *r = NULL;
- uint w;
- uint intr_val = 0;
- bool fast = false;
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- if (coreidx >= SI_MAXCORES)
- return 0;
-
- if (sih->bustype == SI_BUS) {
- /* If internal bus, we can always get at everything */
- fast = true;
- /* map if does not exist */
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
- SI_CORE_SIZE);
- }
- r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
- } else if (sih->bustype == PCI_BUS) {
- /*
- * If pci/pcie, we can get at pci/pcie regs
- * and on newer cores to chipc
- */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
- /* Chipc registers are mapped at 12KB */
-
- fast = true;
- r = (u32 *) ((char *)sii->curmap +
- PCI_16KB0_CCREGS_OFFSET + regoff);
- } else if (sii->pub.buscoreidx == coreidx) {
- /*
- * pci registers are at either in the last 2KB of
- * an 8KB window or, in pcie and pci rev 13 at 8KB
- */
- fast = true;
- if (SI_FAST(sii))
- r = (u32 *) ((char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET +
- regoff);
- else
- r = (u32 *) ((char *)sii->curmap +
- ((regoff >= SBCONFIGOFF) ?
- PCI_BAR0_PCISBR_OFFSET :
- PCI_BAR0_PCIREGS_OFFSET) +
- regoff);
- }
- }
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
-
- /* save current core index */
- origidx = ai_coreidx(&sii->pub);
-
- /* switch core */
- r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx)
- + regoff);
- }
-
- /* mask and set */
- if (mask || val) {
- w = (R_REG(r) & ~mask) | val;
- W_REG(r, w);
- }
-
- /* readback */
- w = R_REG(r);
-
- if (!fast) {
- /* restore core index */
- if (origidx != coreidx)
- ai_setcoreidx(&sii->pub, origidx);
-
- INTR_RESTORE(sii, intr_val);
- }
-
- return w;
-}
-
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
- struct si_info *sii;
- u32 dummy;
- struct aidmp *ai;
-
- sii = SI_INFO(sih);
-
- ai = sii->curwrap;
-
- /* if core is already in reset, just return */
- if (R_REG(&ai->resetctrl) & AIRC_RESET)
- return;
-
- W_REG(&ai->ioctrl, bits);
- dummy = R_REG(&ai->ioctrl);
- udelay(10);
-
- W_REG(&ai->resetctrl, AIRC_RESET);
- udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 dummy;
-
- sii = SI_INFO(sih);
- ai = sii->curwrap;
-
- /*
- * Must do the disable sequence first to work
- * for arbitrary current core state.
- */
- ai_core_disable(sih, (bits | resetbits));
-
- /*
- * Now do the initialization sequence.
- */
- W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- W_REG(&ai->resetctrl, 0);
- udelay(1);
-
- W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- udelay(1);
-}
-
-/* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
-{
- chipcregs_t *cc;
- u32 val;
-
- if (sii->pub.ccrev < 6) {
- if (sii->pub.bustype == PCI_BUS) {
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
- &val);
- if (val & PCI_CFG_GPIO_SCS)
- return SCC_SS_PCI;
- }
- return SCC_SS_XTAL;
- } else if (sii->pub.ccrev < 10) {
- cc = (chipcregs_t *) ai_setcoreidx(&sii->pub, sii->curidx);
- return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
- } else /* Insta-clock */
- return SCC_SS_XTAL;
-}
-
-/*
-* return the ILP (slowclock) min or max frequency
-* precondition: we've established the chip has dynamic clk control
-*/
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq, chipcregs_t *cc)
-{
- u32 slowclk;
- uint div;
-
- slowclk = ai_slowclk_src(sii);
- if (sii->pub.ccrev < 6) {
- if (slowclk == SCC_SS_PCI)
- return max_freq ? (PCIMAXFREQ / 64)
- : (PCIMINFREQ / 64);
- else
- return max_freq ? (XTALMAXFREQ / 32)
- : (XTALMINFREQ / 32);
- } else if (sii->pub.ccrev < 10) {
- div = 4 *
- (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
- SCC_CD_SHIFT) + 1);
- if (slowclk == SCC_SS_LPO)
- return max_freq ? LPOMAXFREQ : LPOMINFREQ;
- else if (slowclk == SCC_SS_XTAL)
- return max_freq ? (XTALMAXFREQ / div)
- : (XTALMINFREQ / div);
- else if (slowclk == SCC_SS_PCI)
- return max_freq ? (PCIMAXFREQ / div)
- : (PCIMINFREQ / div);
- } else {
- /* Chipc rev 10 is InstaClock */
- div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
- div = 4 * (div + 1);
- return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
- }
- return 0;
-}
-
-static void ai_clkctl_setdelay(struct si_info *sii, void *chipcregs)
-{
- chipcregs_t *cc = (chipcregs_t *) chipcregs;
- uint slowmaxfreq, pll_delay, slowclk;
- uint pll_on_delay, fref_sel_delay;
-
- pll_delay = PLL_DELAY;
-
- /*
- * If the slow clock is not sourced by the xtal then
- * add the xtal_on_delay since the xtal will also be
- * powered down by dynamic clk control logic.
- */
-
- slowclk = ai_slowclk_src(sii);
- if (slowclk != SCC_SS_XTAL)
- pll_delay += XTAL_ON_DELAY;
-
- /* Starting with 4318 it is ILP that is used for the delays */
- slowmaxfreq =
- ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
-
- pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
- fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
-
- W_REG(&cc->pll_on_delay, pll_on_delay);
- W_REG(&cc->fref_sel_delay, fref_sel_delay);
-}
-
-/* initialize power control delay registers */
-void ai_clkctl_init(struct si_pub *sih)
-{
- struct si_info *sii;
- uint origidx = 0;
- chipcregs_t *cc;
- bool fast;
-
- if (!CCCTL_ENAB(sih))
- return;
-
- sii = SI_INFO(sih);
- fast = SI_FAST(sii);
- if (!fast) {
- origidx = sii->curidx;
- cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- return;
- } else {
- cc = (chipcregs_t *) CCREGS_FAST(sii);
- if (cc == NULL)
- return;
- }
-
- /* set all Instaclk chip ILP to 1 MHz */
- if (sih->ccrev >= 10)
- SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
-
- ai_clkctl_setdelay(sii, (void *)cc);
-
- if (!fast)
- ai_setcoreidx(sih, origidx);
-}
-
-/*
- * return the value suitable for writing to the
- * dot11 core FAST_PWRUP_DELAY register
- */
-u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
-{
- struct si_info *sii;
- uint origidx = 0;
- chipcregs_t *cc;
- uint slowminfreq;
- u16 fpdelay;
- uint intr_val = 0;
- bool fast;
-
- sii = SI_INFO(sih);
- if (PMUCTL_ENAB(sih)) {
- INTR_OFF(sii, intr_val);
- fpdelay = si_pmu_fast_pwrup_delay(sih);
- INTR_RESTORE(sii, intr_val);
- return fpdelay;
- }
-
- if (!CCCTL_ENAB(sih))
- return 0;
-
- fast = SI_FAST(sii);
- fpdelay = 0;
- if (!fast) {
- origidx = sii->curidx;
- INTR_OFF(sii, intr_val);
- cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- goto done;
- } else {
- cc = (chipcregs_t *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
-
- slowminfreq = ai_slowclk_freq(sii, false, cc);
- fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
- (slowminfreq - 1)) / slowminfreq;
-
- done:
- if (!fast) {
- ai_setcoreidx(sih, origidx);
- INTR_RESTORE(sii, intr_val);
- }
- return fpdelay;
-}
-
-/* turn primary xtal and/or pll off/on */
-int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
-{
- struct si_info *sii;
- u32 in, out, outen;
-
- sii = SI_INFO(sih);
-
- switch (sih->bustype) {
-
- case PCI_BUS:
- /* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sii))
- return -1;
-
- pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
-
- /*
- * Avoid glitching the clock if GPRS is already using it.
- * We can't actually read the state of the PLLPD so we infer it
- * by the value of XTAL_PU which *is* readable via gpioin.
- */
- if (on && (in & PCI_CFG_GPIO_XTAL))
- return 0;
-
- if (what & XTAL)
- outen |= PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- outen |= PCI_CFG_GPIO_PLL;
-
- if (on) {
- /* turn primary xtal on */
- if (what & XTAL) {
- out |= PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
- PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
- PCI_GPIO_OUTEN, outen);
- udelay(XTAL_ON_DELAY);
- }
-
- /* turn pll on */
- if (what & PLL) {
- out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
- PCI_GPIO_OUT, out);
- mdelay(2);
- }
- } else {
- if (what & XTAL)
- out &= ~PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
- PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
- PCI_GPIO_OUTEN, outen);
- }
-
- default:
- return -1;
- }
-
- return 0;
-}
-
-/*
- * clock control policy function throught chipcommon
- *
- * set dynamic clk control mode (forceslow, forcefast, dynamic)
- * returns true if we are forcing fast clock
- * this is a wrapper over the next internal function
- * to allow flexible policy settings for outside caller
- */
-bool ai_clkctl_cc(struct si_pub *sih, uint mode)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- /* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sih->ccrev < 6)
- return false;
-
- if (PCI_FORCEHT(sii))
- return mode == CLK_FAST;
-
- return _ai_clkctl_cc(sii, mode);
-}
-
-/* clk control mechanism through chipcommon, no policy checking */
-static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
-{
- uint origidx = 0;
- chipcregs_t *cc;
- u32 scc;
- uint intr_val = 0;
- bool fast = SI_FAST(sii);
-
- /* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sii->pub.ccrev < 6)
- return false;
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
- origidx = sii->curidx;
-
- if ((sii->pub.bustype == SI_BUS) &&
- ai_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
- (ai_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
- goto done;
-
- cc = (chipcregs_t *) ai_setcore(&sii->pub, CC_CORE_ID, 0);
- } else {
- cc = (chipcregs_t *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
-
- if (!CCCTL_ENAB(&sii->pub) && (sii->pub.ccrev < 20))
- goto done;
-
- switch (mode) {
- case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (sii->pub.ccrev < 10) {
- /*
- * don't forget to force xtal back
- * on before we clear SCC_DYN_XTAL..
- */
- ai_clkctl_xtal(&sii->pub, XTAL, ON);
- SET_REG(&cc->slow_clk_ctl,
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (sii->pub.ccrev < 20) {
- OR_REG(&cc->system_clk_ctl, SYCC_HR);
- } else {
- OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
- }
-
- /* wait for the PLL */
- if (PMUCTL_ENAB(&sii->pub)) {
- u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
- == 0), PMU_MAX_TRANSITION_DLY);
- } else {
- udelay(PLL_DELAY);
- }
- break;
-
- case CLK_DYNAMIC: /* enable dynamic clock control */
- if (sii->pub.ccrev < 10) {
- scc = R_REG(&cc->slow_clk_ctl);
- scc &= ~(SCC_FS | SCC_IP | SCC_XC);
- if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
- scc |= SCC_XC;
- W_REG(&cc->slow_clk_ctl, scc);
-
- /*
- * for dynamic control, we have to
- * release our xtal_pu "force on"
- */
- if (scc & SCC_XC)
- ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (sii->pub.ccrev < 20) {
- /* Instaclock */
- AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
- } else {
- AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
- }
- break;
-
- default:
- break;
- }
-
- done:
- if (!fast) {
- ai_setcoreidx(&sii->pub, origidx);
- INTR_RESTORE(sii, intr_val);
- }
- return mode == CLK_FAST;
-}
-
-/* Build device path. Support SI, PCI, and JTAG for now. */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
- int slen;
-
- if (!path || size <= 0)
- return -1;
-
- switch (sih->bustype) {
- case SI_BUS:
- case JTAG_BUS:
- slen = snprintf(path, (size_t) size, "sb/%u/", ai_coreidx(sih));
- break;
- case PCI_BUS:
- slen = snprintf(path, (size_t) size, "pci/%u/%u/",
- ((struct pci_dev *)((SI_INFO(sih))->pbus))->bus->number,
- PCI_SLOT(
- ((struct pci_dev *)((SI_INFO(sih))->pbus))->devfn));
- break;
-
- default:
- slen = -1;
- break;
- }
-
- if (slen < 0 || slen >= size) {
- path[0] = '\0';
- return -1;
- }
-
- return 0;
-}
-
-/* Get a variable, but only if it has a devpath prefix */
-char *ai_getdevpathvar(struct si_pub *sih, const char *name)
-{
- char varname[SI_DEVPATH_BUFSZ + 32];
-
- ai_devpathvar(sih, varname, sizeof(varname), name);
-
- return getvar(NULL, varname);
-}
-
-/* Get a variable, but only if it has a devpath prefix */
-int ai_getdevpathintvar(struct si_pub *sih, const char *name)
-{
-#if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
- return getintvar(NULL, name);
-#else
- char varname[SI_DEVPATH_BUFSZ + 32];
-
- ai_devpathvar(sih, varname, sizeof(varname), name);
-
- return getintvar(NULL, varname);
-#endif
-}
-
-char *ai_getnvramflvar(struct si_pub *sih, const char *name)
-{
- return getvar(NULL, name);
-}
-
-/* Concatenate the dev path with a varname into the given 'var' buffer
- * and return the 'var' pointer. Nothing is done to the arguments if
- * len == 0 or var is NULL, var is still returned. On overflow, the
- * first char will be set to '\0'.
- */
-static char *ai_devpathvar(struct si_pub *sih, char *var, int len,
- const char *name)
-{
- uint path_len;
-
- if (!var || len <= 0)
- return var;
-
- if (ai_devpath(sih, var, len) == 0) {
- path_len = strlen(var);
-
- if (strlen(name) + 1 > (uint) (len - path_len))
- var[0] = '\0';
- else
- strncpy(var + path_len, name, len - path_len - 1);
- }
-
- return var;
-}
-
-/* return true if PCIE capability exists in the pci config space */
-static bool ai_ispcie(struct si_info *sii)
-{
- u8 cap_ptr;
-
- if (sii->pub.bustype != PCI_BUS)
- return false;
-
- cap_ptr =
- pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
- NULL);
- if (!cap_ptr)
- return false;
-
- return true;
-}
-
-bool ai_pci_war16165(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- return PCI(sii) && (sih->buscorerev <= 10);
-}
-
-void ai_pci_up(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- /* if not pci bus, we're done */
- if (sih->bustype != PCI_BUS)
- return;
-
- if (PCI_FORCEHT(sii))
- _ai_clkctl_cc(sii, CLK_FAST);
-
- if (PCIE(sii))
- pcicore_up(sii->pch, SI_PCIUP);
-
-}
-
-/* Unconfigure and/or apply various WARs when system is going to sleep mode */
-void ai_pci_sleep(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- pcicore_sleep(sii->pch);
-}
-
-/* Unconfigure and/or apply various WARs when going down */
-void ai_pci_down(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- /* if not pci bus, we're done */
- if (sih->bustype != PCI_BUS)
- return;
-
- /* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sii))
- _ai_clkctl_cc(sii, CLK_DYNAMIC);
-
- pcicore_down(sii->pch, SI_PCIDOWN);
-}
-
-/*
- * Configure the pci core for pci client (NIC) action
- * coremask is the bitvec of cores by index to be enabled.
- */
-void ai_pci_setup(struct si_pub *sih, uint coremask)
-{
- struct si_info *sii;
- void *regs = NULL;
- u32 siflag = 0, w;
- uint idx = 0;
-
- sii = SI_INFO(sih);
-
- if (sii->pub.bustype != PCI_BUS)
- return;
-
- if (PCI(sii)) {
- /* get current core index */
- idx = sii->curidx;
-
- /* we interrupt on this backplane flag number */
- siflag = ai_flag(sih);
-
- /* switch over to pci core */
- regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
- }
-
- /*
- * Enable sb->pci interrupts. Assume
- * PCI rev 2.3 support was added in pci core rev 6 and things changed..
- */
- if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
- /* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
- w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
- } else {
- /* set sbintvec bit for our flag number */
- ai_setint(sih, siflag);
- }
-
- if (PCI(sii)) {
- pcicore_pci_setup(sii->pch, regs);
-
- /* switch back to previous core */
- ai_setcoreidx(sih, idx);
- }
-}
-
-/*
- * Fixup SROMless PCI device's configuration.
- * The current core may be changed upon return.
- */
-int ai_pci_fixcfg(struct si_pub *sih)
-{
- uint origidx;
- void *regs = NULL;
-
- struct si_info *sii = SI_INFO(sih);
-
- /* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* save the current index */
- origidx = ai_coreidx(&sii->pub);
-
- /* check 'pi' is correct and fix it if not */
- regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
- pcicore_fixcfg(sii->pch, regs);
-
- /* restore the original index */
- ai_setcoreidx(&sii->pub, origidx);
-
- pcicore_hwup(sii->pch);
- return 0;
-}
-
-/* mask&set gpiocontrol bits */
-u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
-{
- uint regoff;
-
- regoff = 0;
-
- /* gpios could be shared on router platforms
- * ignore reservation if it's high priority (e.g., test apps)
- */
- if ((priority != GPIO_HI_PRIORITY) &&
- (sih->bustype == SI_BUS) && (val || mask)) {
- mask = priority ? (ai_gpioreservation & mask) :
- ((ai_gpioreservation | mask) & ~(ai_gpioreservation));
- val &= mask;
- }
-
- regoff = offsetof(chipcregs_t, gpiocontrol);
- return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
-}
-
-void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
-{
- struct si_info *sii;
- chipcregs_t *cc;
- uint origidx;
- u32 val;
-
- sii = SI_INFO(sih);
- origidx = ai_coreidx(sih);
-
- cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
-
- val = R_REG(&cc->chipcontrol);
-
- if (on) {
- if (sih->chippkg == 9 || sih->chippkg == 0xb) {
- /* Ext PA Controls for 4331 12x9 Package */
- W_REG(&cc->chipcontrol, val |
- (CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5));
- } else {
- /* Ext PA Controls for 4331 12x12 Package */
- W_REG(&cc->chipcontrol,
- val | (CCTRL4331_EXTPA_EN));
- }
- } else {
- val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- W_REG(&cc->chipcontrol, val);
- }
-
- ai_setcoreidx(sih, origidx);
-}
-
-/* Enable BT-COEX & Ex-PA for 4313 */
-void ai_epa_4313war(struct si_pub *sih)
-{
- struct si_info *sii;
- chipcregs_t *cc;
- uint origidx;
-
- sii = SI_INFO(sih);
- origidx = ai_coreidx(sih);
-
- cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
-
- /* EPA Fix */
- W_REG(&cc->gpiocontrol,
- R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
- ai_setcoreidx(sih, origidx);
-}
-
-/* check if the device is removed */
-bool ai_deviceremoved(struct si_pub *sih)
-{
- u32 w;
- struct si_info *sii;
-
- sii = SI_INFO(sih);
-
- switch (sih->bustype) {
- case PCI_BUS:
- pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
- if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
- return true;
- break;
- }
- return false;
-}
-
-bool ai_is_sprom_available(struct si_pub *sih)
-{
- if (sih->ccrev >= 31) {
- struct si_info *sii;
- uint origidx;
- chipcregs_t *cc;
- u32 sromctrl;
-
- if ((sih->cccaps & CC_CAP_SROM) == 0)
- return false;
-
- sii = SI_INFO(sih);
- origidx = sii->curidx;
- cc = ai_setcoreidx(sih, SI_CC_IDX);
- sromctrl = R_REG(&cc->sromcontrol);
- ai_setcoreidx(sih, origidx);
- return sromctrl & SRC_PRESENT;
- }
-
- switch (sih->chip) {
- case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
- default:
- return true;
- }
-}
-
-bool ai_is_otp_disabled(struct si_pub *sih)
-{
- switch (sih->chip) {
- case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_OTP_PRESENT) == 0;
- /* These chips always have their OTP on */
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- default:
- return false;
- }
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/aiutils.h b/drivers/staging/brcm80211/brcmsmac/aiutils.h
deleted file mode 100644
index e245c278beb..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/aiutils.h
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_AIUTILS_H_
-#define _BRCM_AIUTILS_H_
-
-#include "types.h"
-
-/*
- * SOC Interconnect Address Map.
- * All regions may not exist on all chips.
- */
-/* Physical SDRAM */
-#define SI_SDRAM_BASE 0x00000000
-/* Host Mode sb2pcitranslation0 (64 MB) */
-#define SI_PCI_MEM 0x08000000
-#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
-/* Host Mode sb2pcitranslation1 (64 MB) */
-#define SI_PCI_CFG 0x0c000000
-/* Byteswapped Physical SDRAM */
-#define SI_SDRAM_SWAPPED 0x10000000
-/* Region 2 for sdram (512 MB) */
-#define SI_SDRAM_R2 0x80000000
-
-#ifdef SI_ENUM_BASE_VARIABLE
-#define SI_ENUM_BASE (sii->pub.si_enum_base)
-#else
-#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
-#endif /* SI_ENUM_BASE_VARIABLE */
-
-/* Wrapper space base */
-#define SI_WRAP_BASE 0x18100000
-/* each core gets 4Kbytes for registers */
-#define SI_CORE_SIZE 0x1000
-/*
- * Max cores (this is arbitrary, for software
- * convenience and could be changed if we
- * make any larger chips
- */
-#define SI_MAXCORES 16
-
-/* On-chip RAM on chips that also have DDR */
-#define SI_FASTRAM 0x19000000
-#define SI_FASTRAM_SWAPPED 0x19800000
-
-/* Flash Region 2 (region 1 shadowed here) */
-#define SI_FLASH2 0x1c000000
-/* Size of Flash Region 2 */
-#define SI_FLASH2_SZ 0x02000000
-/* ARM Cortex-M3 ROM */
-#define SI_ARMCM3_ROM 0x1e000000
-/* MIPS Flash Region 1 */
-#define SI_FLASH1 0x1fc00000
-/* MIPS Size of Flash Region 1 */
-#define SI_FLASH1_SZ 0x00400000
-/* ARM7TDMI-S ROM */
-#define SI_ARM7S_ROM 0x20000000
-/* ARM Cortex-M3 SRAM Region 2 */
-#define SI_ARMCM3_SRAM2 0x60000000
-/* ARM7TDMI-S SRAM Region 2 */
-#define SI_ARM7S_SRAM2 0x80000000
-/* ARM Flash Region 1 */
-#define SI_ARM_FLASH1 0xffff0000
-/* ARM Size of Flash Region 1 */
-#define SI_ARM_FLASH1_SZ 0x00010000
-
-/* Client Mode sb2pcitranslation2 (1 GB) */
-#define SI_PCI_DMA 0x40000000
-/* Client Mode sb2pcitranslation2 (1 GB) */
-#define SI_PCI_DMA2 0x80000000
-/* Client Mode sb2pcitranslation2 size in bytes */
-#define SI_PCI_DMA_SZ 0x40000000
-/* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */
-#define SI_PCIE_DMA_L32 0x00000000
-/* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
-#define SI_PCIE_DMA_H32 0x80000000
-
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
- * maps all unused address ranges
- */
-
-/* chipcommon being the first core: */
-#define SI_CC_IDX 0
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_AI 1
-
-/* Common core control flags */
-#define SICF_BIST_EN 0x8000
-#define SICF_PME_EN 0x4000
-#define SICF_CORE_BITS 0x3ffc
-#define SICF_FGC 0x0002
-#define SICF_CLOCK_EN 0x0001
-
-/* Common core status flags */
-#define SISF_BIST_DONE 0x8000
-#define SISF_BIST_ERROR 0x4000
-#define SISF_GATED_CLK 0x2000
-#define SISF_DMA64 0x1000
-#define SISF_CORE_BITS 0x0fff
-
-/* A register that is common to all cores to
- * communicate w/PMU regarding clock control.
- */
-#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
-
-/* clk_ctl_st register */
-#define CCS_FORCEALP 0x00000001 /* force ALP request */
-#define CCS_FORCEHT 0x00000002 /* force HT request */
-#define CCS_FORCEILP 0x00000004 /* force ILP request */
-#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
-#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
-#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
-#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
-#define CCS_ERSRC_REQ_SHIFT 8
-#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
-#define CCS_HTAVAIL 0x00020000 /* HT is available */
-#define CCS_BP_ON_APL 0x00040000 /* RO: running on ALP clock */
-#define CCS_BP_ON_HT 0x00080000 /* RO: running on HT clock */
-#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
-#define CCS_ERSRC_STS_SHIFT 24
-
-/* HT avail in chipc and pcmcia on 4328a0 */
-#define CCS0_HTAVAIL 0x00010000
-/* ALP avail in chipc and pcmcia on 4328a0 */
-#define CCS0_ALPAVAIL 0x00020000
-
-/* Not really related to SOC Interconnect, but a couple of software
- * conventions for the use the flash space:
- */
-
-/* Minumum amount of flash we support */
-#define FLASH_MIN 0x00020000 /* Minimum flash size */
-
-/* A boot/binary may have an embedded block that describes its size */
-#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
-#define BISZ_MAGIC 0x4249535a /* Marked with value: 'BISZ' */
-#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
-#define BISZ_TXTST_IDX 1 /* 1: text start */
-#define BISZ_TXTEND_IDX 2 /* 2: text end */
-#define BISZ_DATAST_IDX 3 /* 3: data start */
-#define BISZ_DATAEND_IDX 4 /* 4: data end */
-#define BISZ_BSSST_IDX 5 /* 5: bss start */
-#define BISZ_BSSEND_IDX 6 /* 6: bss end */
-#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
-
-#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
-
-/* gpiotimerval */
-#define GPIO_ONTIME_SHIFT 16
-
-/* Fields in clkdiv */
-#define CLKD_OTP 0x000f0000
-#define CLKD_OTP_SHIFT 16
-
-/* When Srom support present, fields in sromcontrol */
-#define SRC_START 0x80000000
-#define SRC_BUSY 0x80000000
-#define SRC_OPCODE 0x60000000
-#define SRC_OP_READ 0x00000000
-#define SRC_OP_WRITE 0x20000000
-#define SRC_OP_WRDIS 0x40000000
-#define SRC_OP_WREN 0x60000000
-#define SRC_OTPSEL 0x00000010
-#define SRC_LOCK 0x00000008
-#define SRC_SIZE_MASK 0x00000006
-#define SRC_SIZE_1K 0x00000000
-#define SRC_SIZE_4K 0x00000002
-#define SRC_SIZE_16K 0x00000004
-#define SRC_SIZE_SHIFT 1
-#define SRC_PRESENT 0x00000001
-
-/* 4330 chip-specific ChipStatus register bits */
-#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */
-#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */
-#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */
-#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */
-#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */
-#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */
-#define CST4330_OTP_PRESENT 0x00000010
-#define CST4330_LPO_AUTODET_EN 0x00000020
-#define CST4330_ARMREMAP_0 0x00000040
-#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */
-#define CST4330_ILPDIV_EN 0x00000100
-#define CST4330_LPO_SEL 0x00000200
-#define CST4330_RES_INIT_MODE_SHIFT 10
-#define CST4330_RES_INIT_MODE_MASK 0x00000c00
-#define CST4330_CBUCK_MODE_SHIFT 12
-#define CST4330_CBUCK_MODE_MASK 0x00003000
-#define CST4330_CBUCK_POWER_OK 0x00004000
-#define CST4330_BB_PLL_LOCKED 0x00008000
-
-/* Package IDs */
-#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */
-#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */
-#define BCM4717_PKG_ID 9 /* 4717 package id */
-#define BCM4718_PKG_ID 10 /* 4718 package id */
-#define HDLSIM_PKG_ID 14 /* HDL simulator package id */
-#define HWSIM_PKG_ID 15 /* Hardware simulator package id */
-#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
-
-/* these are router chips */
-#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
-#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
-#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
-#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */
-#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */
-
-
-#define SI_INFO(sih) ((struct si_info *)sih)
-
-#define GOODCOREADDR(x, b) \
- (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
- IS_ALIGNED((x), SI_CORE_SIZE))
-#define GOODREGS(regs) \
- ((regs) != NULL && IS_ALIGNED((unsigned long)(regs), SI_CORE_SIZE))
-#define BADCOREADDR 0
-#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES)
-#define NOREV -1 /* Invalid rev */
-
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
- (((si)->pub.buscoretype == PCI_CORE_ID) && \
- (si)->pub.buscorerev >= 13))
-
-#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
-#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
-
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
- if ((si)->intrsoff_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
-#define INTR_RESTORE(si, intr_val) \
- if ((si)->intrsrestore_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
-
-/* dynamic clock control defines */
-#define LPOMINFREQ 25000 /* low power oscillator min */
-#define LPOMAXFREQ 43000 /* low power oscillator max */
-#define XTALMINFREQ 19800000 /* 20 MHz - 1% */
-#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
-#define PCIMINFREQ 25000000 /* 25 MHz */
-#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
-
-#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
-#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
-
-#define PCI(si) (((si)->pub.bustype == PCI_BUS) && \
- ((si)->pub.buscoretype == PCI_CORE_ID))
-#define PCIE(si) (((si)->pub.bustype == PCI_BUS) && \
- ((si)->pub.buscoretype == PCIE_CORE_ID))
-#define PCI_FORCEHT(si) \
- (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
-
-/* GPIO Based LED powersave defines */
-#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
-#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
-
-#ifndef DEFAULT_GPIOTIMERVAL
-#define DEFAULT_GPIOTIMERVAL \
- ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
-#endif
-
-/*
- * Data structure to export all chip specific common variables
- * public (read-only) portion of aiutils handle returned by si_attach()
- */
-struct si_pub {
- uint bustype; /* SI_BUS, PCI_BUS */
- uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
- uint buscorerev; /* buscore rev */
- uint buscoreidx; /* buscore index */
- int ccrev; /* chip common core rev */
- u32 cccaps; /* chip common capabilities */
- u32 cccaps_ext; /* chip common capabilities extension */
- int pmurev; /* pmu core rev */
- u32 pmucaps; /* pmu capabilities */
- uint boardtype; /* board type */
- uint boardvendor; /* board vendor */
- uint boardflags; /* board flags */
- uint boardflags2; /* board flags2 */
- uint chip; /* chip number */
- uint chiprev; /* chip revision */
- uint chippkg; /* chip package option */
- u32 chipst; /* chip status */
- bool issim; /* chip is in simulation or emulation */
- uint socirev; /* SOC interconnect rev */
- bool pci_pr32414;
-
-};
-
-/*
- * Many of the routines below take an 'sih' handle as their first arg.
- * Allocate this by calling si_attach(). Free it by calling si_detach().
- * At any one time, the sih is logically focused on one particular si core
- * (the "current core").
- * Use si_setcore() or si_setcoreidx() to change the association to another core
- */
-
-#define BADIDX (SI_MAXCORES + 1)
-
-/* clkctl xtal what flags */
-#define XTAL 0x1 /* primary crystal oscillator (2050) */
-#define PLL 0x2 /* main chip pll */
-
-/* clkctl clk mode */
-#define CLK_FAST 0 /* force fast (pll) clock */
-#define CLK_DYNAMIC 2 /* enable dynamic clock control */
-
-/* GPIO usage priorities */
-#define GPIO_DRV_PRIORITY 0 /* Driver */
-#define GPIO_APP_PRIORITY 1 /* Application */
-#define GPIO_HI_PRIORITY 2 /* Highest priority. Ignore GPIO
- * reservation
- */
-
-/* GPIO pull up/down */
-#define GPIO_PULLUP 0
-#define GPIO_PULLDN 1
-
-/* GPIO event regtype */
-#define GPIO_REGEVT 0 /* GPIO register event */
-#define GPIO_REGEVT_INTMSK 1 /* GPIO register event int mask */
-#define GPIO_REGEVT_INTPOL 2 /* GPIO register event int polarity */
-
-/* device path */
-#define SI_DEVPATH_BUFSZ 16 /* min buffer size in bytes */
-
-/* SI routine enumeration: to be used by update function with multiple hooks */
-#define SI_DOATTACH 1
-#define SI_PCIDOWN 2
-#define SI_PCIUP 3
-
-/* PMU clock/power control */
-#if defined(BCMPMUCTL)
-#define PMUCTL_ENAB(sih) (BCMPMUCTL)
-#else
-#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
-#endif
-
-/* chipcommon clock/power control (exclusive with PMU's) */
-#if defined(BCMPMUCTL) && BCMPMUCTL
-#define CCCTL_ENAB(sih) (0)
-#define CCPLL_ENAB(sih) (0)
-#else
-#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
-#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
-#endif
-
-typedef void (*gpio_handler_t) (u32 stat, void *arg);
-
-/* External PA enable mask */
-#define GPIO_CTRL_EPA_EN_MASK 0x40
-
-#define SI_ERROR(args)
-
-#ifdef BCMDBG
-#define SI_MSG(args) printk args
-#else
-#define SI_MSG(args)
-#endif /* BCMDBG */
-
-/* Define SI_VMSG to printf for verbose debugging, but don't check it in */
-#define SI_VMSG(args)
-
-#define IS_SIM(chippkg) \
- ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-
-typedef u32(*si_intrsoff_t) (void *intr_arg);
-typedef void (*si_intrsrestore_t) (void *intr_arg, u32 arg);
-typedef bool(*si_intrsenabled_t) (void *intr_arg);
-
-struct gpioh_item {
- void *arg;
- bool level;
- gpio_handler_t handler;
- u32 event;
- struct gpioh_item *next;
-};
-
-/* misc si info needed by some of the routines */
-struct si_info {
- struct si_pub pub; /* back plane public state (must be first) */
- void *pbus; /* handle to bus (pci/sdio/..) */
- uint dev_coreid; /* the core provides driver functions */
- void *intr_arg; /* interrupt callback function arg */
- si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
- si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
- si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
-
- void *pch; /* PCI/E core handle */
-
- char *vars;
- uint varsz;
-
- void *curmap; /* current regs va */
- void *regs[SI_MAXCORES]; /* other regs va */
-
- uint curidx; /* current core index */
- uint numcores; /* # discovered cores */
- uint coreid[SI_MAXCORES]; /* id of each core */
- u32 coresba[SI_MAXCORES]; /* backplane address of each core */
- void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
- u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
- u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
- u32 coresba2_size[SI_MAXCORES]; /* second address space size */
-
- void *curwrap; /* current wrapper va */
- void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
- u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
-
- u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
- u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
- u32 oob_router; /* oob router registers for axi */
-};
-
-/* AMBA Interconnect exported externs */
-extern void ai_scan(struct si_pub *sih, void *regs);
-
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern void *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
-
-/* === exported functions === */
-extern struct si_pub *ai_attach(void *regs, uint bustype,
- void *sdh, char **vars, uint *varsz);
-
-extern void ai_detach(struct si_pub *sih);
-extern bool ai_pci_war16165(struct si_pub *sih);
-
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
- uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
-extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
-extern int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on);
-extern bool ai_deviceremoved(struct si_pub *sih);
-extern u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val,
- u8 priority);
-
-/* OTP status */
-extern bool ai_is_otp_disabled(struct si_pub *sih);
-
-/* SPROM availability */
-extern bool ai_is_sprom_available(struct si_pub *sih);
-
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-/* Read variable with prepending the devpath to the name */
-extern char *ai_getdevpathvar(struct si_pub *sih, const char *name);
-extern int ai_getdevpathintvar(struct si_pub *sih, const char *name);
-
-extern void ai_pci_sleep(struct si_pub *sih);
-extern void ai_pci_down(struct si_pub *sih);
-extern void ai_pci_up(struct si_pub *sih);
-extern int ai_pci_fixcfg(struct si_pub *sih);
-
-extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
-/* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
-
-char *ai_getnvramflvar(struct si_pub *sih, const char *name);
-
-#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/alloc.c b/drivers/staging/brcm80211/brcmsmac/alloc.c
deleted file mode 100644
index 7f8dd7b396b..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/alloc.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <brcmu_utils.h>
-#include "types.h"
-#include "pub.h"
-#include "main.h"
-#include "alloc.h"
-
-static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit);
-static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg);
-static struct brcms_pub *brcms_c_pub_malloc(uint unit,
- uint *err, uint devid);
-static void brcms_c_pub_mfree(struct brcms_pub *pub);
-static void brcms_c_tunables_init(struct brcms_tunables *tunables, uint devid);
-
-static void brcms_c_tunables_init(struct brcms_tunables *tunables, uint devid)
-{
- tunables->ntxd = NTXD;
- tunables->nrxd = NRXD;
- tunables->rxbufsz = RXBUFSZ;
- tunables->nrxbufpost = NRXBUFPOST;
- tunables->maxscb = MAXSCB;
- tunables->ampdunummpdu = AMPDU_NUM_MPDU;
- tunables->maxpktcb = MAXPKTCB;
- tunables->maxucodebss = BRCMS_MAX_UCODE_BSS;
- tunables->maxucodebss4 = BRCMS_MAX_UCODE_BSS4;
- tunables->maxbss = MAXBSS;
- tunables->datahiwat = BRCMS_DATAHIWAT;
- tunables->ampdudatahiwat = BRCMS_AMPDUDATAHIWAT;
- tunables->rxbnd = RXBND;
- tunables->txsbnd = TXSBND;
-}
-
-static struct brcms_pub *brcms_c_pub_malloc(uint unit, uint *err, uint devid)
-{
- struct brcms_pub *pub;
-
- pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC);
- if (pub == NULL) {
- *err = 1001;
- goto fail;
- }
-
- pub->tunables = kzalloc(sizeof(struct brcms_tunables), GFP_ATOMIC);
- if (pub->tunables == NULL) {
- *err = 1028;
- goto fail;
- }
-
- /* need to init the tunables now */
- brcms_c_tunables_init(pub->tunables, devid);
-
- pub->multicast = kzalloc(ETH_ALEN * MAXMULTILIST, GFP_ATOMIC);
- if (pub->multicast == NULL) {
- *err = 1003;
- goto fail;
- }
-
- return pub;
-
- fail:
- brcms_c_pub_mfree(pub);
- return NULL;
-}
-
-static void brcms_c_pub_mfree(struct brcms_pub *pub)
-{
- if (pub == NULL)
- return;
-
- kfree(pub->multicast);
- kfree(pub->tunables);
- kfree(pub);
-}
-
-static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
-{
- struct brcms_bss_cfg *cfg;
-
- cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC);
- if (cfg == NULL)
- goto fail;
-
- cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
- if (cfg->current_bss == NULL)
- goto fail;
-
- return cfg;
-
- fail:
- brcms_c_bsscfg_mfree(cfg);
- return NULL;
-}
-
-static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
-{
- if (cfg == NULL)
- return;
-
- kfree(cfg->maclist);
- kfree(cfg->current_bss);
- kfree(cfg);
-}
-
-static void brcms_c_bsscfg_ID_assign(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *bsscfg)
-{
- bsscfg->ID = wlc->next_bsscfg_ID;
- wlc->next_bsscfg_ID++;
-}
-
-/*
- * The common driver entry routine. Error codes should be unique
- */
-struct brcms_c_info *brcms_c_attach_malloc(uint unit, uint *err, uint devid)
-{
- struct brcms_c_info *wlc;
-
- wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC);
- if (wlc == NULL) {
- *err = 1002;
- goto fail;
- }
-
- /* allocate struct brcms_c_pub state structure */
- wlc->pub = brcms_c_pub_malloc(unit, err, devid);
- if (wlc->pub == NULL) {
- *err = 1003;
- goto fail;
- }
- wlc->pub->wlc = wlc;
-
- /* allocate struct brcms_hardware state structure */
-
- wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC);
- if (wlc->hw == NULL) {
- *err = 1005;
- goto fail;
- }
- wlc->hw->wlc = wlc;
-
- wlc->hw->bandstate[0] =
- kzalloc(sizeof(struct brcms_hw_band) * MAXBANDS, GFP_ATOMIC);
- if (wlc->hw->bandstate[0] == NULL) {
- *err = 1006;
- goto fail;
- } else {
- int i;
-
- for (i = 1; i < MAXBANDS; i++) {
- wlc->hw->bandstate[i] = (struct brcms_hw_band *)
- ((unsigned long)wlc->hw->bandstate[0] +
- (sizeof(struct brcms_hw_band) * i));
- }
- }
-
- wlc->modulecb =
- kzalloc(sizeof(struct modulecb) * BRCMS_MAXMODULES, GFP_ATOMIC);
- if (wlc->modulecb == NULL) {
- *err = 1009;
- goto fail;
- }
-
- wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
- if (wlc->default_bss == NULL) {
- *err = 1010;
- goto fail;
- }
-
- wlc->cfg = brcms_c_bsscfg_malloc(unit);
- if (wlc->cfg == NULL) {
- *err = 1011;
- goto fail;
- }
- brcms_c_bsscfg_ID_assign(wlc, wlc->cfg);
-
- wlc->wsec_def_keys[0] =
- kzalloc(sizeof(struct wsec_key) * BRCMS_DEFAULT_KEYS,
- GFP_ATOMIC);
- if (wlc->wsec_def_keys[0] == NULL) {
- *err = 1015;
- goto fail;
- } else {
- int i;
- for (i = 1; i < BRCMS_DEFAULT_KEYS; i++) {
- wlc->wsec_def_keys[i] = (struct wsec_key *)
- ((unsigned long)wlc->wsec_def_keys[0] +
- (sizeof(struct wsec_key) * i));
- }
- }
-
- wlc->protection = kzalloc(sizeof(struct brcms_protection),
- GFP_ATOMIC);
- if (wlc->protection == NULL) {
- *err = 1016;
- goto fail;
- }
-
- wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC);
- if (wlc->stf == NULL) {
- *err = 1017;
- goto fail;
- }
-
- wlc->bandstate[0] =
- kzalloc(sizeof(struct brcms_band)*MAXBANDS, GFP_ATOMIC);
- if (wlc->bandstate[0] == NULL) {
- *err = 1025;
- goto fail;
- } else {
- int i;
-
- for (i = 1; i < MAXBANDS; i++) {
- wlc->bandstate[i] = (struct brcms_band *)
- ((unsigned long)wlc->bandstate[0]
- + (sizeof(struct brcms_band)*i));
- }
- }
-
- wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC);
- if (wlc->corestate == NULL) {
- *err = 1026;
- goto fail;
- }
-
- wlc->corestate->macstat_snapshot =
- kzalloc(sizeof(struct macstat), GFP_ATOMIC);
- if (wlc->corestate->macstat_snapshot == NULL) {
- *err = 1027;
- goto fail;
- }
-
- return wlc;
-
- fail:
- brcms_c_detach_mfree(wlc);
- return NULL;
-}
-
-void brcms_c_detach_mfree(struct brcms_c_info *wlc)
-{
- if (wlc == NULL)
- return;
-
- brcms_c_bsscfg_mfree(wlc->cfg);
- brcms_c_pub_mfree(wlc->pub);
- kfree(wlc->modulecb);
- kfree(wlc->default_bss);
- kfree(wlc->wsec_def_keys[0]);
- kfree(wlc->protection);
- kfree(wlc->stf);
- kfree(wlc->bandstate[0]);
- kfree(wlc->corestate->macstat_snapshot);
- kfree(wlc->corestate);
- kfree(wlc->hw->bandstate[0]);
- kfree(wlc->hw);
-
- /* free the wlc */
- kfree(wlc);
- wlc = NULL;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/alloc.h b/drivers/staging/brcm80211/brcmsmac/alloc.h
deleted file mode 100644
index f465d304303..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/alloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-extern struct brcms_c_info *brcms_c_attach_malloc(uint unit, uint *err,
- uint devid);
-extern void brcms_c_detach_mfree(struct brcms_c_info *wlc);
diff --git a/drivers/staging/brcm80211/brcmsmac/bmac.c b/drivers/staging/brcm80211/brcmsmac/bmac.c
deleted file mode 100644
index b25c5170556..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/bmac.c
+++ /dev/null
@@ -1,3593 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/pci.h>
-#include <net/mac80211.h>
-
-#include <brcm_hw_ids.h>
-#include <aiutils.h>
-#include <chipcommon.h>
-#include "types.h"
-#include "rate.h"
-#include "phy/phy_hal.h"
-#include "channel.h"
-#include "main.h"
-#include "ucode_loader.h"
-#include "mac80211_if.h"
-#include "bmac.h"
-
-#define TIMER_INTERVAL_WATCHDOG_BMAC 1000 /* watchdog timer, in unit of ms */
-
-#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */
-#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us, default */
-#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us, default */
-#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */
-
-#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */
-
-#ifndef BMAC_DUP_TO_REMOVE
-
-#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */
-
-#endif /* BMAC_DUP_TO_REMOVE */
-
-#define DMAREG(wlc_hw, direction, fifonum) \
- ((direction == DMA_TX) ? \
- (void *)&(wlc_hw->regs->fifo64regs[fifonum].dmaxmt) : \
- (void *)&(wlc_hw->regs->fifo64regs[fifonum].dmarcv))
-
-#define APHY_SLOT_TIME 9
-#define BPHY_SLOT_TIME 20
-
-/*
- * The following table lists the buffer memory allocated to xmt fifos in HW.
- * the size is in units of 256bytes(one block), total size is HW dependent
- * ucode has default fifo partition, sw can overwrite if necessary
- *
- * This is documented in twiki under the topic UcodeTxFifo. Please ensure
- * the twiki is updated before making changes.
- */
-
-#define XMTFIFOTBL_STARTREV 20 /* Starting corerev for the fifo size table */
-
-static u16 xmtfifo_sz[][NFIFO] = {
- {20, 192, 192, 21, 17, 5}, /* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
- {9, 58, 22, 14, 14, 5}, /* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
- {20, 192, 192, 21, 17, 5}, /* corerev 22: 5120, 49152, 49152, 5376, 4352, 1280 */
- {20, 192, 192, 21, 17, 5}, /* corerev 23: 5120, 49152, 49152, 5376, 4352, 1280 */
- {9, 58, 22, 14, 14, 5}, /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
-};
-
-static void brcms_b_clkctl_clk(struct brcms_hardware *wlc, uint mode);
-static void brcms_b_coreinit(struct brcms_c_info *wlc);
-
-/* used by wlc_wakeucode_init() */
-static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
- const struct d11init *inits);
-static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const u32 ucode[],
- const uint nbytes);
-static void brcms_ucode_download(struct brcms_hardware *wlc);
-static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw);
-
-/* used by brcms_c_dpc() */
-static bool brcms_b_dotxstatus(struct brcms_hardware *wlc,
- struct tx_status *txs, u32 s2);
-static bool brcms_b_txstatus(struct brcms_hardware *wlc, bool bound,
- bool *fatal);
-static bool brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound);
-
-/* used by brcms_c_down() */
-static void brcms_c_flushqueues(struct brcms_c_info *wlc);
-
-static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs);
-static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw);
-static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw);
-static bool brcms_b_tx_fifo_suspended(struct brcms_hardware *wlc_hw,
- uint tx_fifo);
-static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
- uint tx_fifo);
-static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
- uint tx_fifo);
-
-/* Low Level Prototypes */
-static int brcms_b_bandtype(struct brcms_hardware *wlc_hw);
-static void brcms_b_info_init(struct brcms_hardware *wlc_hw);
-static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want);
-static u16 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset,
- u32 sel);
-static void brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset,
- u16 v, u32 sel);
-static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk);
-static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme);
-static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw);
-static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw);
-static bool brcms_c_validboardtype(struct brcms_hardware *wlc);
-static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw);
-static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw);
-static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw);
-static void brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init);
-static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw);
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool want,
- mbool flags);
-static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw);
-static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw);
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc);
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-static void brcms_c_gpio_init(struct brcms_c_info *wlc);
-static void brcms_c_write_hw_bcntemplate0(struct brcms_hardware *wlc_hw,
- void *bcn, int len);
-static void brcms_c_write_hw_bcntemplate1(struct brcms_hardware *wlc_hw,
- void *bcn, int len);
-static void brcms_b_bsinit(struct brcms_c_info *wlc, chanspec_t chanspec);
-static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit);
-static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
- chanspec_t chanspec);
-static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
- bool shortslot);
-static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw);
-static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw,
- u8 rate);
-
-/* === Low Level functions === */
-
-void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
-{
- wlc_hw->shortslot = shortslot;
-
- if (BAND_2G(brcms_b_bandtype(wlc_hw)) && wlc_hw->up) {
- brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
- brcms_b_update_slot_timing(wlc_hw, shortslot);
- brcms_c_enable_mac(wlc_hw->wlc);
- }
-}
-
-/*
- * Update the slot timing for standard 11b/g (20us slots)
- * or shortslot 11g (9us slots)
- * The PSM needs to be suspended for this call.
- */
-static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
- bool shortslot)
-{
- d11regs_t *regs;
-
- regs = wlc_hw->regs;
-
- if (shortslot) {
- /* 11g short slot: 11a timing */
- W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
- brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
- } else {
- /* 11g long slot: 11b timing */
- W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
- brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
- }
-}
-
-static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
-{
- struct wiphy *wiphy = wlc_hw->wlc->wiphy;
-
- /* init microcode host flags */
- brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs);
-
- /* do band-specific ucode IHR, SHM, and SCR inits */
- if (D11REV_IS(wlc_hw->corerev, 23)) {
- if (BRCMS_ISNPHY(wlc_hw->band)) {
- brcms_c_write_inits(wlc_hw, d11n0bsinitvals16);
- } else {
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
- " %d\n", __func__, wlc_hw->unit,
- wlc_hw->corerev);
- }
- } else {
- if (D11REV_IS(wlc_hw->corerev, 24)) {
- if (BRCMS_ISLCNPHY(wlc_hw->band)) {
- brcms_c_write_inits(wlc_hw,
- d11lcn0bsinitvals24);
- } else
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in"
- " core rev %d\n", __func__,
- wlc_hw->unit, wlc_hw->corerev);
- } else {
- wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev);
- }
- }
-}
-
-/* switch to new band but leave it inactive */
-static u32 brcms_c_setband_inact(struct brcms_c_info *wlc,
- uint bandunit)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- u32 macintmask;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
-
- /* disable interrupts */
- macintmask = brcms_intrsoff(wlc->wl);
-
- /* radio off */
- wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
-
- brcms_b_core_phy_clk(wlc_hw, OFF);
-
- brcms_c_setxband(wlc_hw, bandunit);
-
- return macintmask;
-}
-
-/* Process received frames */
-/*
- * Return true if more frames need to be processed. false otherwise.
- * Param 'bound' indicates max. # frames to process before break out.
- */
-static bool
-brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
-{
- struct sk_buff *p;
- struct sk_buff *head = NULL;
- struct sk_buff *tail = NULL;
- uint n = 0;
- uint bound_limit = bound ? wlc_hw->wlc->pub->tunables->rxbnd : -1;
- struct brcms_d11rxhdr *wlc_rxhdr = NULL;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- /* gather received frames */
- while ((p = dma_rx(wlc_hw->di[fifo]))) {
-
- if (!tail)
- head = tail = p;
- else {
- tail->prev = p;
- tail = p;
- }
-
- /* !give others some time to run! */
- if (++n >= bound_limit)
- break;
- }
-
- /* post more rbufs */
- dma_rxfill(wlc_hw->di[fifo]);
-
- /* process each frame */
- while ((p = head) != NULL) {
- head = head->prev;
- p->prev = NULL;
-
- wlc_rxhdr = (struct brcms_d11rxhdr *) p->data;
-
- /* compute the RSSI from d11rxhdr and record it in wlc_rxd11hr */
- wlc_phy_rssi_compute(wlc_hw->band->pi, wlc_rxhdr);
-
- brcms_c_recv(wlc_hw->wlc, p);
- }
-
- return n >= bound_limit;
-}
-
-/* second-level interrupt processing
- * Return true if another dpc needs to be re-scheduled. false otherwise.
- * Param 'bounded' indicates if applicable loops should be bounded.
- */
-bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
-{
- u32 macintstatus;
- struct brcms_hardware *wlc_hw = wlc->hw;
- d11regs_t *regs = wlc_hw->regs;
- bool fatal = false;
- struct wiphy *wiphy = wlc->wiphy;
-
- if (DEVICEREMOVED(wlc)) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
- __func__);
- brcms_down(wlc->wl);
- return false;
- }
-
- /* grab and clear the saved software intstatus bits */
- macintstatus = wlc->macintstatus;
- wlc->macintstatus = 0;
-
- BCMMSG(wlc->wiphy, "wl%d: macintstatus 0x%x\n",
- wlc_hw->unit, macintstatus);
-
- WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */
-
- /* BCN template is available */
- /* ZZZ: Use AP_ACTIVE ? */
- if (AP_ENAB(wlc->pub) && (!APSTA_ENAB(wlc->pub))
- && (macintstatus & MI_BCNTPL)) {
- brcms_c_update_beacon(wlc);
- }
-
- /* tx status */
- if (macintstatus & MI_TFS) {
- if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
- wlc->macintstatus |= MI_TFS;
- if (fatal) {
- wiphy_err(wiphy, "MI_TFS: fatal\n");
- goto fatal;
- }
- }
-
- if (macintstatus & (MI_TBTT | MI_DTIM_TBTT))
- brcms_c_tbtt(wlc);
-
- /* ATIM window end */
- if (macintstatus & MI_ATIMWINEND) {
- BCMMSG(wlc->wiphy, "end of ATIM window\n");
- OR_REG(&regs->maccommand, wlc->qvalid);
- wlc->qvalid = 0;
- }
-
- /* received data or control frame, MI_DMAINT is indication of RX_FIFO interrupt */
- if (macintstatus & MI_DMAINT)
- if (brcms_b_recv(wlc_hw, RX_FIFO, bounded))
- wlc->macintstatus |= MI_DMAINT;
-
- /* TX FIFO suspend/flush completion */
- if (macintstatus & MI_TXSTOP)
- brcms_b_tx_fifo_suspended(wlc_hw, TX_DATA_FIFO);
-
- /* noise sample collected */
- if (macintstatus & MI_BG_NOISE) {
- wlc_phy_noise_sample_intr(wlc_hw->band->pi);
- }
-
- if (macintstatus & MI_GP0) {
- wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
- "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
-
- printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, wlc_hw->sih->chip,
- wlc_hw->sih->chiprev);
- /* big hammer */
- brcms_init(wlc->wl);
- }
-
- /* gptimer timeout */
- if (macintstatus & MI_TO) {
- W_REG(&regs->gptimer, 0);
- }
-
- if (macintstatus & MI_RFDISABLE) {
- BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
- " RF Disable Input\n", wlc_hw->unit);
- brcms_rfkill_set_hw_state(wlc->wl);
- }
-
- /* send any enq'd tx packets. Just makes sure to jump start tx */
- if (!pktq_empty(&wlc->pkt_queue->q))
- brcms_c_send_q(wlc);
-
- /* it isn't done and needs to be resched if macintstatus is non-zero */
- return wlc->macintstatus != 0;
-
- fatal:
- brcms_init(wlc->wl);
- return wlc->macintstatus != 0;
-}
-
-/* common low-level watchdog code */
-void brcms_b_watchdog(void *arg)
-{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
- struct brcms_hardware *wlc_hw = wlc->hw;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- if (!wlc_hw->up)
- return;
-
- /* increment second count */
- wlc_hw->now++;
-
- /* Check for FIFO error interrupts */
- brcms_b_fifoerrors(wlc_hw);
-
- /* make sure RX dma has buffers */
- dma_rxfill(wlc->hw->di[RX_FIFO]);
-
- wlc_phy_watchdog(wlc_hw->band->pi);
-}
-
-void
-brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, chanspec_t chanspec,
- bool mute, struct txpwr_limits *txpwr)
-{
- uint bandunit;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: 0x%x\n", wlc_hw->unit, chanspec);
-
- wlc_hw->chanspec = chanspec;
-
- /* Switch bands if necessary */
- if (NBANDS_HW(wlc_hw) > 1) {
- bandunit = CHSPEC_BANDUNIT(chanspec);
- if (wlc_hw->band->bandunit != bandunit) {
- /* brcms_b_setband disables other bandunit,
- * use light band switch if not up yet
- */
- if (wlc_hw->up) {
- wlc_phy_chanspec_radio_set(wlc_hw->
- bandstate[bandunit]->
- pi, chanspec);
- brcms_b_setband(wlc_hw, bandunit, chanspec);
- } else {
- brcms_c_setxband(wlc_hw, bandunit);
- }
- }
- }
-
- wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
-
- if (!wlc_hw->up) {
- if (wlc_hw->clk)
- wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr,
- chanspec);
- wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
- } else {
- wlc_phy_chanspec_set(wlc_hw->band->pi, chanspec);
- wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
-
- /* Update muting of the channel */
- brcms_b_mute(wlc_hw, mute, 0);
- }
-}
-
-int brcms_b_state_get(struct brcms_hardware *wlc_hw,
- struct brcms_b_state *state)
-{
- state->machwcap = wlc_hw->machwcap;
-
- return 0;
-}
-
-static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
-{
- uint i;
- char name[8];
- /* ucode host flag 2 needed for pio mode, independent of band and fifo */
- u16 pio_mhf2 = 0;
- struct brcms_hardware *wlc_hw = wlc->hw;
- uint unit = wlc_hw->unit;
- struct brcms_tunables *tune = wlc->pub->tunables;
- struct wiphy *wiphy = wlc->wiphy;
-
- /* name and offsets for dma_attach */
- snprintf(name, sizeof(name), "wl%d", unit);
-
- if (wlc_hw->di[0] == 0) { /* Init FIFOs */
- uint addrwidth;
- int dma_attach_err = 0;
- /* Find out the DMA addressing capability and let OS know
- * All the channels within one DMA core have 'common-minimum' same
- * capability
- */
- addrwidth =
- dma_addrwidth(wlc_hw->sih, DMAREG(wlc_hw, DMA_TX, 0));
-
- if (!wl_alloc_dma_resources(wlc_hw->wlc->wl, addrwidth)) {
- wiphy_err(wiphy, "wl%d: wlc_attach: alloc_dma_"
- "resources failed\n", unit);
- return false;
- }
-
- /*
- * FIFO 0
- * TX: TX_AC_BK_FIFO (TX AC Background data packets)
- * RX: RX_FIFO (RX data packets)
- */
- wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
- (wme ? DMAREG(wlc_hw, DMA_TX, 0) :
- NULL), DMAREG(wlc_hw, DMA_RX, 0),
- (wme ? tune->ntxd : 0), tune->nrxd,
- tune->rxbufsz, -1, tune->nrxbufpost,
- BRCMS_HWRXOFF, &brcm_msg_level);
- dma_attach_err |= (NULL == wlc_hw->di[0]);
-
- /*
- * FIFO 1
- * TX: TX_AC_BE_FIFO (TX AC Best-Effort data packets)
- * (legacy) TX_DATA_FIFO (TX data packets)
- * RX: UNUSED
- */
- wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
- DMAREG(wlc_hw, DMA_TX, 1), NULL,
- tune->ntxd, 0, 0, -1, 0, 0,
- &brcm_msg_level);
- dma_attach_err |= (NULL == wlc_hw->di[1]);
-
- /*
- * FIFO 2
- * TX: TX_AC_VI_FIFO (TX AC Video data packets)
- * RX: UNUSED
- */
- wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
- DMAREG(wlc_hw, DMA_TX, 2), NULL,
- tune->ntxd, 0, 0, -1, 0, 0,
- &brcm_msg_level);
- dma_attach_err |= (NULL == wlc_hw->di[2]);
- /*
- * FIFO 3
- * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
- * (legacy) TX_CTL_FIFO (TX control & mgmt packets)
- */
- wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
- DMAREG(wlc_hw, DMA_TX, 3),
- NULL, tune->ntxd, 0, 0, -1,
- 0, 0, &brcm_msg_level);
- dma_attach_err |= (NULL == wlc_hw->di[3]);
-/* Cleaner to leave this as if with AP defined */
-
- if (dma_attach_err) {
- wiphy_err(wiphy, "wl%d: wlc_attach: dma_attach failed"
- "\n", unit);
- return false;
- }
-
- /* get pointer to dma engine tx flow control variable */
- for (i = 0; i < NFIFO; i++)
- if (wlc_hw->di[i])
- wlc_hw->txavail[i] =
- (uint *) dma_getvar(wlc_hw->di[i],
- "&txavail");
- }
-
- /* initial ucode host flags */
- brcms_c_mhfdef(wlc, wlc_hw->band->mhfs, pio_mhf2);
-
- return true;
-}
-
-static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw)
-{
- uint j;
-
- for (j = 0; j < NFIFO; j++) {
- if (wlc_hw->di[j]) {
- dma_detach(wlc_hw->di[j]);
- wlc_hw->di[j] = NULL;
- }
- }
-}
-
-/* low level attach
- * run backplane attach, init nvram
- * run phy attach
- * initialize software state for each core and band
- * put the whole chip in reset(driver down state), no clock
- */
-int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, uint unit,
- bool piomode, void *regsva, uint bustype, void *btparam)
-{
- struct brcms_hardware *wlc_hw;
- d11regs_t *regs;
- char *macaddr = NULL;
- char *vars;
- uint err = 0;
- uint j;
- bool wme = false;
- struct shared_phy_params sha_params;
- struct wiphy *wiphy = wlc->wiphy;
-
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
- device);
-
- wme = true;
-
- wlc_hw = wlc->hw;
- wlc_hw->wlc = wlc;
- wlc_hw->unit = unit;
- wlc_hw->band = wlc_hw->bandstate[0];
- wlc_hw->_piomode = piomode;
-
- /* populate struct brcms_hardware with default values */
- brcms_b_info_init(wlc_hw);
-
- /*
- * Do the hardware portion of the attach.
- * Also initialize software state that depends on the particular hardware
- * we are running.
- */
- wlc_hw->sih = ai_attach(regsva, bustype, btparam,
- &wlc_hw->vars, &wlc_hw->vars_size);
- if (wlc_hw->sih == NULL) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
- unit);
- err = 11;
- goto fail;
- }
- vars = wlc_hw->vars;
-
- /*
- * Get vendid/devid nvram overwrites, which could be different
- * than those the BIOS recognizes for devices on PCMCIA_BUS,
- * SDIO_BUS, and SROMless devices on PCI_BUS.
- */
-#ifdef BCMBUSTYPE
- bustype = BCMBUSTYPE;
-#endif
- if (bustype != SI_BUS) {
- char *var;
-
- var = getvar(vars, "vendid");
- if (var) {
- vendor = (u16) simple_strtoul(var, NULL, 0);
- wiphy_err(wiphy, "Overriding vendor id = 0x%x\n",
- vendor);
- }
- var = getvar(vars, "devid");
- if (var) {
- u16 devid = (u16) simple_strtoul(var, NULL, 0);
- if (devid != 0xffff) {
- device = devid;
- wiphy_err(wiphy, "Overriding device id = 0x%x"
- "\n", device);
- }
- }
-
- /* verify again the device is supported */
- if (!brcms_c_chipmatch(vendor, device)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
- "vendor/device (0x%x/0x%x)\n",
- unit, vendor, device);
- err = 12;
- goto fail;
- }
- }
-
- wlc_hw->vendorid = vendor;
- wlc_hw->deviceid = device;
-
- /* set bar0 window to point at D11 core */
- wlc_hw->regs = (d11regs_t *) ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- wlc_hw->corerev = ai_corerev(wlc_hw->sih);
-
- regs = wlc_hw->regs;
-
- wlc->regs = wlc_hw->regs;
-
- /* validate chip, chiprev and corerev */
- if (!brcms_c_isgoodchip(wlc_hw)) {
- err = 13;
- goto fail;
- }
-
- /* initialize power control registers */
- ai_clkctl_init(wlc_hw->sih);
-
- /* request fastclock and force fastclock for the rest of attach
- * bring the d11 core out of reset.
- * For PMU chips, the first wlc_clkctl_clk is no-op since core-clk is still false;
- * But it will be called again inside wlc_corereset, after d11 is out of reset.
- */
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
- brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
-
- if (!brcms_b_validate_chip_access(wlc_hw)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: validate_chip_access "
- "failed\n", unit);
- err = 14;
- goto fail;
- }
-
- /* get the board rev, used just below */
- j = getintvar(vars, "boardrev");
- /* promote srom boardrev of 0xFF to 1 */
- if (j == BOARDREV_PROMOTABLE)
- j = BOARDREV_PROMOTED;
- wlc_hw->boardrev = (u16) j;
- if (!brcms_c_validboardtype(wlc_hw)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
- "board type (0x%x)" " or revision level (0x%x)\n",
- unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
- err = 15;
- goto fail;
- }
- wlc_hw->sromrev = (u8) getintvar(vars, "sromrev");
- wlc_hw->boardflags = (u32) getintvar(vars, "boardflags");
- wlc_hw->boardflags2 = (u32) getintvar(vars, "boardflags2");
-
- if (wlc_hw->boardflags & BFL_NOPLLDOWN)
- brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED);
-
- if ((wlc_hw->sih->bustype == PCI_BUS)
- && (ai_pci_war16165(wlc_hw->sih)))
- wlc->war16165 = true;
-
- /* check device id(srom, nvram etc.) to set bands */
- if (wlc_hw->deviceid == BCM43224_D11N_ID ||
- wlc_hw->deviceid == BCM43224_D11N_ID_VEN1) {
- /* Dualband boards */
- wlc_hw->_nbands = 2;
- } else
- wlc_hw->_nbands = 1;
-
- if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->_nbands = 1;
-
- /* BMAC_NOTE: remove init of pub values when brcms_c_attach()
- * unconditionally does the init of these values
- */
- wlc->vendorid = wlc_hw->vendorid;
- wlc->deviceid = wlc_hw->deviceid;
- wlc->pub->sih = wlc_hw->sih;
- wlc->pub->corerev = wlc_hw->corerev;
- wlc->pub->sromrev = wlc_hw->sromrev;
- wlc->pub->boardrev = wlc_hw->boardrev;
- wlc->pub->boardflags = wlc_hw->boardflags;
- wlc->pub->boardflags2 = wlc_hw->boardflags2;
- wlc->pub->_nbands = wlc_hw->_nbands;
-
- wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc);
-
- if (wlc_hw->physhim == NULL) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_shim_attach "
- "failed\n", unit);
- err = 25;
- goto fail;
- }
-
- /* pass all the parameters to wlc_phy_shared_attach in one struct */
- sha_params.sih = wlc_hw->sih;
- sha_params.physhim = wlc_hw->physhim;
- sha_params.unit = unit;
- sha_params.corerev = wlc_hw->corerev;
- sha_params.vars = vars;
- sha_params.vid = wlc_hw->vendorid;
- sha_params.did = wlc_hw->deviceid;
- sha_params.chip = wlc_hw->sih->chip;
- sha_params.chiprev = wlc_hw->sih->chiprev;
- sha_params.chippkg = wlc_hw->sih->chippkg;
- sha_params.sromrev = wlc_hw->sromrev;
- sha_params.boardtype = wlc_hw->sih->boardtype;
- sha_params.boardrev = wlc_hw->boardrev;
- sha_params.boardvendor = wlc_hw->sih->boardvendor;
- sha_params.boardflags = wlc_hw->boardflags;
- sha_params.boardflags2 = wlc_hw->boardflags2;
- sha_params.bustype = wlc_hw->sih->bustype;
- sha_params.buscorerev = wlc_hw->sih->buscorerev;
-
- /* alloc and save pointer to shared phy state area */
- wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
- if (!wlc_hw->phy_sh) {
- err = 16;
- goto fail;
- }
-
- /* initialize software state for each core and band */
- for (j = 0; j < NBANDS_HW(wlc_hw); j++) {
- /*
- * band0 is always 2.4Ghz
- * band1, if present, is 5Ghz
- */
-
- /* So if this is a single band 11a card, use band 1 */
- if (IS_SINGLEBAND_5G(wlc_hw->deviceid))
- j = BAND_5G_INDEX;
-
- brcms_c_setxband(wlc_hw, j);
-
- wlc_hw->band->bandunit = j;
- wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
- wlc->band->bandunit = j;
- wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
- wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
-
- wlc_hw->machwcap = R_REG(&regs->machwcap);
- wlc_hw->machwcap_backup = wlc_hw->machwcap;
-
- /* init tx fifo size */
- wlc_hw->xmtfifo_sz =
- xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
-
- /* Get a phy for this band */
- wlc_hw->band->pi = wlc_phy_attach(wlc_hw->phy_sh,
- (void *)regs, brcms_b_bandtype(wlc_hw), vars,
- wlc->wiphy);
- if (wlc_hw->band->pi == NULL) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_"
- "attach failed\n", unit);
- err = 17;
- goto fail;
- }
-
- wlc_phy_machwcap_set(wlc_hw->band->pi, wlc_hw->machwcap);
-
- wlc_phy_get_phyversion(wlc_hw->band->pi, &wlc_hw->band->phytype,
- &wlc_hw->band->phyrev,
- &wlc_hw->band->radioid,
- &wlc_hw->band->radiorev);
- wlc_hw->band->abgphy_encore =
- wlc_phy_get_encore(wlc_hw->band->pi);
- wlc->band->abgphy_encore = wlc_phy_get_encore(wlc_hw->band->pi);
- wlc_hw->band->core_flags =
- wlc_phy_get_coreflags(wlc_hw->band->pi);
-
- /* verify good phy_type & supported phy revision */
- if (BRCMS_ISNPHY(wlc_hw->band)) {
- if (NCONF_HAS(wlc_hw->band->phyrev))
- goto good_phy;
- else
- goto bad_phy;
- } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
- if (LCNCONF_HAS(wlc_hw->band->phyrev))
- goto good_phy;
- else
- goto bad_phy;
- } else {
- bad_phy:
- wiphy_err(wiphy, "wl%d: brcms_b_attach: unsupported "
- "phy type/rev (%d/%d)\n", unit,
- wlc_hw->band->phytype, wlc_hw->band->phyrev);
- err = 18;
- goto fail;
- }
-
- good_phy:
- /* BMAC_NOTE: wlc->band->pi should not be set below and should be done in the
- * high level attach. However we can not make that change until all low level access
- * is changed to wlc_hw->band->pi. Instead do the wlc->band->pi init below, keeping
- * wlc_hw->band->pi as well for incremental update of low level fns, and cut over
- * low only init when all fns updated.
- */
- wlc->band->pi = wlc_hw->band->pi;
- wlc->band->phytype = wlc_hw->band->phytype;
- wlc->band->phyrev = wlc_hw->band->phyrev;
- wlc->band->radioid = wlc_hw->band->radioid;
- wlc->band->radiorev = wlc_hw->band->radiorev;
-
- /* default contention windows size limits */
- wlc_hw->band->CWmin = APHY_CWMIN;
- wlc_hw->band->CWmax = PHY_CWMAX;
-
- if (!brcms_b_attach_dmapio(wlc, j, wme)) {
- err = 19;
- goto fail;
- }
- }
-
- /* disable core to match driver "down" state */
- brcms_c_coredisable(wlc_hw);
-
- /* Match driver "down" state */
- if (wlc_hw->sih->bustype == PCI_BUS)
- ai_pci_down(wlc_hw->sih);
-
- /* register sb interrupt callback functions */
- ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
- (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
- /* turn off pll and xtal to match driver "down" state */
- brcms_b_xtal(wlc_hw, OFF);
-
- /* *********************************************************************
- * The hardware is in the DOWN state at this point. D11 core
- * or cores are in reset with clocks off, and the board PLLs
- * are off if possible.
- *
- * Beyond this point, wlc->sbclk == false and chip registers
- * should not be touched.
- *********************************************************************
- */
-
- /* init etheraddr state variables */
- macaddr = brcms_c_get_macaddr(wlc_hw);
- if (macaddr == NULL) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: macaddr not found\n",
- unit);
- err = 21;
- goto fail;
- }
- brcmu_ether_atoe(macaddr, wlc_hw->etheraddr);
- if (is_broadcast_ether_addr(wlc_hw->etheraddr) ||
- is_zero_ether_addr(wlc_hw->etheraddr)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr %s\n",
- unit, macaddr);
- err = 22;
- goto fail;
- }
-
- BCMMSG(wlc->wiphy,
- "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands,
- wlc_hw->sih->boardtype, macaddr);
-
- return err;
-
- fail:
- wiphy_err(wiphy, "wl%d: brcms_b_attach: failed with err %d\n", unit,
- err);
- return err;
-}
-
-/*
- * Initialize brcms_c_info default values ...
- * may get overrides later in this function
- * BMAC_NOTES, move low out and resolve the dangling ones
- */
-static void brcms_b_info_init(struct brcms_hardware *wlc_hw)
-{
- struct brcms_c_info *wlc = wlc_hw->wlc;
-
- /* set default sw macintmask value */
- wlc->defmacintmask = DEF_MACINTMASK;
-
- /* various 802.11g modes */
- wlc_hw->shortslot = false;
-
- wlc_hw->SFBL = RETRY_SHORT_FB;
- wlc_hw->LFBL = RETRY_LONG_FB;
-
- /* default mac retry limits */
- wlc_hw->SRL = RETRY_SHORT_DEF;
- wlc_hw->LRL = RETRY_LONG_DEF;
- wlc_hw->chanspec = CH20MHZ_CHSPEC(1);
-}
-
-/*
- * low level detach
- */
-int brcms_b_detach(struct brcms_c_info *wlc)
-{
- uint i;
- struct brcms_hw_band *band;
- struct brcms_hardware *wlc_hw = wlc->hw;
- int callbacks;
-
- callbacks = 0;
-
- if (wlc_hw->sih) {
- /* detach interrupt sync mechanism since interrupt is disabled and per-port
- * interrupt object may has been freed. this must be done before sb core switch
- */
- ai_deregister_intr_callback(wlc_hw->sih);
-
- if (wlc_hw->sih->bustype == PCI_BUS)
- ai_pci_sleep(wlc_hw->sih);
- }
-
- brcms_b_detach_dmapio(wlc_hw);
-
- band = wlc_hw->band;
- for (i = 0; i < NBANDS_HW(wlc_hw); i++) {
- if (band->pi) {
- /* Detach this band's phy */
- wlc_phy_detach(band->pi);
- band->pi = NULL;
- }
- band = wlc_hw->bandstate[OTHERBANDUNIT(wlc)];
- }
-
- /* Free shared phy state */
- kfree(wlc_hw->phy_sh);
-
- wlc_phy_shim_detach(wlc_hw->physhim);
-
- /* free vars */
- kfree(wlc_hw->vars);
- wlc_hw->vars = NULL;
-
- if (wlc_hw->sih) {
- ai_detach(wlc_hw->sih);
- wlc_hw->sih = NULL;
- }
-
- return callbacks;
-
-}
-
-void brcms_b_reset(struct brcms_hardware *wlc_hw)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- /* reset the core */
- if (!DEVICEREMOVED(wlc_hw->wlc))
- brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
-
- /* purge the dma rings */
- brcms_c_flushqueues(wlc_hw->wlc);
-
- brcms_c_reset_bmac_done(wlc_hw->wlc);
-}
-
-void
-brcms_b_init(struct brcms_hardware *wlc_hw, chanspec_t chanspec,
- bool mute) {
- u32 macintmask;
- bool fastclk;
- struct brcms_c_info *wlc = wlc_hw->wlc;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- /* request FAST clock if not on */
- fastclk = wlc_hw->forcefastclk;
- if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- /* disable interrupts */
- macintmask = brcms_intrsoff(wlc->wl);
-
- /* set up the specified band and chanspec */
- brcms_c_setxband(wlc_hw, CHSPEC_BANDUNIT(chanspec));
- wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
-
- /* do one-time phy inits and calibration */
- wlc_phy_cal_init(wlc_hw->band->pi);
-
- /* core-specific initialization */
- brcms_b_coreinit(wlc);
-
- /* suspend the tx fifos and mute the phy for preism cac time */
- if (mute)
- brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
-
- /* band-specific inits */
- brcms_b_bsinit(wlc, chanspec);
-
- /* restore macintmask */
- brcms_intrsrestore(wlc->wl, macintmask);
-
- /* seed wake_override with BRCMS_WAKE_OVERRIDE_MACSUSPEND since the mac
- * is suspended and brcms_c_enable_mac() will clear this override bit.
- */
- mboolset(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
-
- /*
- * initialize mac_suspend_depth to 1 to match ucode initial suspended state
- */
- wlc_hw->mac_suspend_depth = 1;
-
- /* restore the clk */
- if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
-}
-
-int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
-{
- uint coremask;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- /*
- * Enable pll and xtal, initialize the power control registers,
- * and force fastclock for the remainder of brcms_c_up().
- */
- brcms_b_xtal(wlc_hw, ON);
- ai_clkctl_init(wlc_hw->sih);
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- /*
- * Configure pci/pcmcia here instead of in brcms_c_attach()
- * to allow mfg hotswap: down, hotswap (chip power cycle), up.
- */
- coremask = (1 << wlc_hw->wlc->core->coreidx);
-
- if (wlc_hw->sih->bustype == PCI_BUS)
- ai_pci_setup(wlc_hw->sih, coremask);
-
- /*
- * Need to read the hwradio status here to cover the case where the system
- * is loaded with the hw radio disabled. We do not want to bring the driver up in this case.
- */
- if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
- /* put SB PCI in down state again */
- if (wlc_hw->sih->bustype == PCI_BUS)
- ai_pci_down(wlc_hw->sih);
- brcms_b_xtal(wlc_hw, OFF);
- return -ENOMEDIUM;
- }
-
- if (wlc_hw->sih->bustype == PCI_BUS)
- ai_pci_up(wlc_hw->sih);
-
- /* reset the d11 core */
- brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
-
- return 0;
-}
-
-int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- wlc_hw->up = true;
- wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
-
- /* FULLY enable dynamic power control and d11 core interrupt */
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
- brcms_intrson(wlc_hw->wlc->wl);
- return 0;
-}
-
-int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
-{
- bool dev_gone;
- uint callbacks = 0;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- if (!wlc_hw->up)
- return callbacks;
-
- dev_gone = DEVICEREMOVED(wlc_hw->wlc);
-
- /* disable interrupts */
- if (dev_gone)
- wlc_hw->wlc->macintmask = 0;
- else {
- /* now disable interrupts */
- brcms_intrsoff(wlc_hw->wlc->wl);
-
- /* ensure we're running on the pll clock again */
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
- }
- /* down phy at the last of this stage */
- callbacks += wlc_phy_down(wlc_hw->band->pi);
-
- return callbacks;
-}
-
-int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
-{
- uint callbacks = 0;
- bool dev_gone;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- if (!wlc_hw->up)
- return callbacks;
-
- wlc_hw->up = false;
- wlc_phy_hw_state_upd(wlc_hw->band->pi, false);
-
- dev_gone = DEVICEREMOVED(wlc_hw->wlc);
-
- if (dev_gone) {
- wlc_hw->sbclk = false;
- wlc_hw->clk = false;
- wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
-
- /* reclaim any posted packets */
- brcms_c_flushqueues(wlc_hw->wlc);
- } else {
-
- /* Reset and disable the core */
- if (ai_iscoreup(wlc_hw->sih)) {
- if (R_REG(&wlc_hw->regs->maccontrol) &
- MCTL_EN_MAC)
- brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
- callbacks += brcms_reset(wlc_hw->wlc->wl);
- brcms_c_coredisable(wlc_hw);
- }
-
- /* turn off primary xtal and pll */
- if (!wlc_hw->noreset) {
- if (wlc_hw->sih->bustype == PCI_BUS)
- ai_pci_down(wlc_hw->sih);
- brcms_b_xtal(wlc_hw, OFF);
- }
- }
-
- return callbacks;
-}
-
-void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
-{
- /* delay before first read of ucode state */
- udelay(40);
-
- /* wait until ucode is no longer asleep */
- SPINWAIT((brcms_b_read_shm(wlc_hw, M_UCODE_DBGST) ==
- DBGST_ASLEEP), wlc_hw->wlc->fastpwrup_dly);
-}
-
-void brcms_b_hw_etheraddr(struct brcms_hardware *wlc_hw, u8 *ea)
-{
- memcpy(ea, wlc_hw->etheraddr, ETH_ALEN);
-}
-
-static int brcms_b_bandtype(struct brcms_hardware *wlc_hw)
-{
- return wlc_hw->band->bandtype;
-}
-
-/* control chip clock to save power, enable dynamic clock or force fast clock */
-static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
-{
- if (PMUCTL_ENAB(wlc_hw->sih)) {
- /* new chips with PMU, CCS_FORCEHT will distribute the HT clock on backplane,
- * but mac core will still run on ALP(not HT) when it enters powersave mode,
- * which means the FCA bit may not be set.
- * should wakeup mac if driver wants it to run on HT.
- */
-
- if (wlc_hw->clk) {
- if (mode == CLK_FAST) {
- OR_REG(&wlc_hw->regs->clk_ctl_st,
- CCS_FORCEHT);
-
- udelay(64);
-
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL) == 0),
- PMU_MAX_TRANSITION_DLY);
- WARN_ON(!(R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL));
- } else {
- if ((wlc_hw->sih->pmurev == 0) &&
- (R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL)
- == 0),
- PMU_MAX_TRANSITION_DLY);
- AND_REG(&wlc_hw->regs->clk_ctl_st,
- ~CCS_FORCEHT);
- }
- }
- wlc_hw->forcefastclk = (mode == CLK_FAST);
- } else {
-
- /* old chips w/o PMU, force HT through cc,
- * then use FCA to verify mac is running fast clock
- */
-
- wlc_hw->forcefastclk = ai_clkctl_cc(wlc_hw->sih, mode);
-
- /* check fast clock is available (if core is not in reset) */
- if (wlc_hw->forcefastclk && wlc_hw->clk)
- WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
- SISF_FCLKA));
-
- /* keep the ucode wake bit on if forcefastclk is on
- * since we do not want ucode to put us back to slow clock
- * when it dozes for PM mode.
- * Code below matches the wake override bit with current forcefastclk state
- * Only setting bit in wake_override instead of waking ucode immediately
- * since old code (wlc.c 1.4499) had this behavior. Older code set
- * wlc->forcefastclk but only had the wake happen if the wakup_ucode work
- * (protected by an up check) was executed just below.
- */
- if (wlc_hw->forcefastclk)
- mboolset(wlc_hw->wake_override,
- BRCMS_WAKE_OVERRIDE_FORCEFAST);
- else
- mboolclr(wlc_hw->wake_override,
- BRCMS_WAKE_OVERRIDE_FORCEFAST);
- }
-}
-
-/* set initial host flags value */
-static void
-brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
-
- memset(mhfs, 0, MHFMAX * sizeof(u16));
-
- mhfs[MHF2] |= mhf2_init;
-
- /* prohibit use of slowclock on multifunction boards */
- if (wlc_hw->boardflags & BFL_NOPLLDOWN)
- mhfs[MHF1] |= MHF1_FORCEFASTCLK;
-
- if (BRCMS_ISNPHY(wlc_hw->band) && NREV_LT(wlc_hw->band->phyrev, 2)) {
- mhfs[MHF2] |= MHF2_NPHY40MHZ_WAR;
- mhfs[MHF1] |= MHF1_IQSWAP_WAR;
- }
-}
-
-/* set or clear ucode host flag bits
- * it has an optimization for no-change write
- * it only writes through shared memory when the core has clock;
- * pre-CLK changes should use wlc_write_mhf to get around the optimization
- *
- *
- * bands values are: BRCM_BAND_AUTO <--- Current band only
- * BRCM_BAND_5G <--- 5G band only
- * BRCM_BAND_2G <--- 2G band only
- * BRCM_BAND_ALL <--- All bands
- */
-void
-brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
- int bands)
-{
- u16 save;
- u16 addr[MHFMAX] = {
- M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
- M_HOST_FLAGS5
- };
- struct brcms_hw_band *band;
-
- if ((val & ~mask) || idx >= MHFMAX)
- return; /* error condition */
-
- switch (bands) {
- /* Current band only or all bands,
- * then set the band to current band
- */
- case BRCM_BAND_AUTO:
- case BRCM_BAND_ALL:
- band = wlc_hw->band;
- break;
- case BRCM_BAND_5G:
- band = wlc_hw->bandstate[BAND_5G_INDEX];
- break;
- case BRCM_BAND_2G:
- band = wlc_hw->bandstate[BAND_2G_INDEX];
- break;
- default:
- band = NULL; /* error condition */
- }
-
- if (band) {
- save = band->mhfs[idx];
- band->mhfs[idx] = (band->mhfs[idx] & ~mask) | val;
-
- /* optimization: only write through if changed, and
- * changed band is the current band
- */
- if (wlc_hw->clk && (band->mhfs[idx] != save)
- && (band == wlc_hw->band))
- brcms_b_write_shm(wlc_hw, addr[idx],
- (u16) band->mhfs[idx]);
- }
-
- if (bands == BRCM_BAND_ALL) {
- wlc_hw->bandstate[0]->mhfs[idx] =
- (wlc_hw->bandstate[0]->mhfs[idx] & ~mask) | val;
- wlc_hw->bandstate[1]->mhfs[idx] =
- (wlc_hw->bandstate[1]->mhfs[idx] & ~mask) | val;
- }
-}
-
-u16 brcms_b_mhf_get(struct brcms_hardware *wlc_hw, u8 idx, int bands)
-{
- struct brcms_hw_band *band;
-
- if (idx >= MHFMAX)
- return 0; /* error condition */
- switch (bands) {
- case BRCM_BAND_AUTO:
- band = wlc_hw->band;
- break;
- case BRCM_BAND_5G:
- band = wlc_hw->bandstate[BAND_5G_INDEX];
- break;
- case BRCM_BAND_2G:
- band = wlc_hw->bandstate[BAND_2G_INDEX];
- break;
- default:
- band = NULL; /* error condition */
- }
-
- if (!band)
- return 0;
-
- return band->mhfs[idx];
-}
-
-static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs)
-{
- u8 idx;
- u16 addr[] = {
- M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
- M_HOST_FLAGS5
- };
-
- for (idx = 0; idx < MHFMAX; idx++) {
- brcms_b_write_shm(wlc_hw, addr[idx], mhfs[idx]);
- }
-}
-
-/* set the maccontrol register to desired reset state and
- * initialize the sw cache of the register
- */
-static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw)
-{
- /* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */
- wlc_hw->maccontrol = 0;
- wlc_hw->suspended_fifos = 0;
- wlc_hw->wake_override = 0;
- wlc_hw->mute_override = 0;
- brcms_b_mctrl(wlc_hw, ~0, MCTL_IHR_EN | MCTL_WAKE);
-}
-
-/* set or clear maccontrol bits */
-void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val)
-{
- u32 maccontrol;
- u32 new_maccontrol;
-
- if (val & ~mask)
- return; /* error condition */
- maccontrol = wlc_hw->maccontrol;
- new_maccontrol = (maccontrol & ~mask) | val;
-
- /* if the new maccontrol value is the same as the old, nothing to do */
- if (new_maccontrol == maccontrol)
- return;
-
- /* something changed, cache the new value */
- wlc_hw->maccontrol = new_maccontrol;
-
- /* write the new values with overrides applied */
- brcms_c_mctrl_write(wlc_hw);
-}
-
-/* write the software state of maccontrol and overrides to the maccontrol register */
-static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
-{
- u32 maccontrol = wlc_hw->maccontrol;
-
- /* OR in the wake bit if overridden */
- if (wlc_hw->wake_override)
- maccontrol |= MCTL_WAKE;
-
- /* set AP and INFRA bits for mute if needed */
- if (wlc_hw->mute_override) {
- maccontrol &= ~(MCTL_AP);
- maccontrol |= MCTL_INFRA;
- }
-
- W_REG(&wlc_hw->regs->maccontrol, maccontrol);
-}
-
-void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
- u32 override_bit)
-{
- if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE)) {
- mboolset(wlc_hw->wake_override, override_bit);
- return;
- }
-
- mboolset(wlc_hw->wake_override, override_bit);
-
- brcms_c_mctrl_write(wlc_hw);
- brcms_b_wait_for_wake(wlc_hw);
-
- return;
-}
-
-void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
- u32 override_bit)
-{
- mboolclr(wlc_hw->wake_override, override_bit);
-
- if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE))
- return;
-
- brcms_c_mctrl_write(wlc_hw);
-
- return;
-}
-
-/* When driver needs ucode to stop beaconing, it has to make sure that
- * MCTL_AP is clear and MCTL_INFRA is set
- * Mode MCTL_AP MCTL_INFRA
- * AP 1 1
- * STA 0 1 <--- This will ensure no beacons
- * IBSS 0 0
- */
-static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw)
-{
- wlc_hw->mute_override = 1;
-
- /* if maccontrol already has AP == 0 and INFRA == 1 without this
- * override, then there is no change to write
- */
- if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
- return;
-
- brcms_c_mctrl_write(wlc_hw);
-
- return;
-}
-
-/* Clear the override on AP and INFRA bits */
-static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw)
-{
- if (wlc_hw->mute_override == 0)
- return;
-
- wlc_hw->mute_override = 0;
-
- /* if maccontrol already has AP == 0 and INFRA == 1 without this
- * override, then there is no change to write
- */
- if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
- return;
-
- brcms_c_mctrl_write(wlc_hw);
-}
-
-/*
- * Write a MAC address to the given match reg offset in the RXE match engine.
- */
-void
-brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
- const u8 *addr)
-{
- d11regs_t *regs;
- u16 mac_l;
- u16 mac_m;
- u16 mac_h;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
- wlc_hw->unit);
-
- regs = wlc_hw->regs;
- mac_l = addr[0] | (addr[1] << 8);
- mac_m = addr[2] | (addr[3] << 8);
- mac_h = addr[4] | (addr[5] << 8);
-
- /* enter the MAC addr into the RXE match registers */
- W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
- W_REG(&regs->rcm_mat_data, mac_l);
- W_REG(&regs->rcm_mat_data, mac_m);
- W_REG(&regs->rcm_mat_data, mac_h);
-
-}
-
-void
-brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
- void *buf)
-{
- d11regs_t *regs;
- u32 word;
- bool be_bit;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- regs = wlc_hw->regs;
- W_REG(&regs->tplatewrptr, offset);
-
- /* if MCTL_BIGEND bit set in mac control register,
- * the chip swaps data in fifo, as well as data in
- * template ram
- */
- be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
-
- while (len > 0) {
- memcpy(&word, buf, sizeof(u32));
-
- if (be_bit)
- word = cpu_to_be32(word);
- else
- word = cpu_to_le32(word);
-
- W_REG(&regs->tplatewrdata, word);
-
- buf = (u8 *) buf + sizeof(u32);
- len -= sizeof(u32);
- }
-}
-
-void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
-{
- wlc_hw->band->CWmin = newmin;
-
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmin);
-}
-
-void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
-{
- wlc_hw->band->CWmax = newmax;
-
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmax);
-}
-
-void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
-{
- bool fastclk;
-
- /* request FAST clock if not on */
- fastclk = wlc_hw->forcefastclk;
- if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- wlc_phy_bw_state_set(wlc_hw->band->pi, bw);
-
- brcms_b_phy_reset(wlc_hw);
- wlc_phy_init(wlc_hw->band->pi, wlc_phy_chanspec_get(wlc_hw->band->pi));
-
- /* restore the clk */
- if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
-}
-
-static void
-brcms_c_write_hw_bcntemplate0(struct brcms_hardware *wlc_hw, void *bcn,
- int len)
-{
- d11regs_t *regs = wlc_hw->regs;
-
- brcms_b_write_template_ram(wlc_hw, T_BCN0_TPL_BASE, (len + 3) & ~3,
- bcn);
- /* write beacon length to SCR */
- brcms_b_write_shm(wlc_hw, M_BCN0_FRM_BYTESZ, (u16) len);
- /* mark beacon0 valid */
- OR_REG(&regs->maccommand, MCMD_BCN0VLD);
-}
-
-static void
-brcms_c_write_hw_bcntemplate1(struct brcms_hardware *wlc_hw, void *bcn,
- int len)
-{
- d11regs_t *regs = wlc_hw->regs;
-
- brcms_b_write_template_ram(wlc_hw, T_BCN1_TPL_BASE, (len + 3) & ~3,
- bcn);
- /* write beacon length to SCR */
- brcms_b_write_shm(wlc_hw, M_BCN1_FRM_BYTESZ, (u16) len);
- /* mark beacon1 valid */
- OR_REG(&regs->maccommand, MCMD_BCN1VLD);
-}
-
-/* mac is assumed to be suspended at this point */
-void
-brcms_b_write_hw_bcntemplates(struct brcms_hardware *wlc_hw, void *bcn,
- int len, bool both)
-{
- d11regs_t *regs = wlc_hw->regs;
-
- if (both) {
- brcms_c_write_hw_bcntemplate0(wlc_hw, bcn, len);
- brcms_c_write_hw_bcntemplate1(wlc_hw, bcn, len);
- } else {
- /* bcn 0 */
- if (!(R_REG(&regs->maccommand) & MCMD_BCN0VLD))
- brcms_c_write_hw_bcntemplate0(wlc_hw, bcn, len);
- /* bcn 1 */
- else if (!
- (R_REG(&regs->maccommand) & MCMD_BCN1VLD))
- brcms_c_write_hw_bcntemplate1(wlc_hw, bcn, len);
- }
-}
-
-static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw)
-{
- u16 v;
- struct brcms_c_info *wlc = wlc_hw->wlc;
- /* update SYNTHPU_DLY */
-
- if (BRCMS_ISLCNPHY(wlc->band)) {
- v = SYNTHPU_DLY_LPPHY_US;
- } else if (BRCMS_ISNPHY(wlc->band) && (NREV_GE(wlc->band->phyrev, 3))) {
- v = SYNTHPU_DLY_NPHY_US;
- } else {
- v = SYNTHPU_DLY_BPHY_US;
- }
-
- brcms_b_write_shm(wlc_hw, M_SYNTHPU_DLY, v);
-}
-
-/* band-specific init */
-static void
-brcms_b_bsinit(struct brcms_c_info *wlc, chanspec_t chanspec)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
-
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- wlc_hw->band->bandunit);
-
- brcms_c_ucode_bsinit(wlc_hw);
-
- wlc_phy_init(wlc_hw->band->pi, chanspec);
-
- brcms_c_ucode_txant_set(wlc_hw);
-
- /* cwmin is band-specific, update hardware with value for current band */
- brcms_b_set_cwmin(wlc_hw, wlc_hw->band->CWmin);
- brcms_b_set_cwmax(wlc_hw, wlc_hw->band->CWmax);
-
- brcms_b_update_slot_timing(wlc_hw,
- BAND_5G(wlc_hw->band->
- bandtype) ? true : wlc_hw->
- shortslot);
-
- /* write phytype and phyvers */
- brcms_b_write_shm(wlc_hw, M_PHYTYPE, (u16) wlc_hw->band->phytype);
- brcms_b_write_shm(wlc_hw, M_PHYVER, (u16) wlc_hw->band->phyrev);
-
- /* initialize the txphyctl1 rate table since shmem is shared between bands */
- brcms_upd_ofdm_pctl1_table(wlc_hw);
-
- brcms_b_upd_synthpu(wlc_hw);
-}
-
-static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
-
- wlc_hw->phyclk = clk;
-
- if (OFF == clk) { /* clear gmode bit, put phy into reset */
-
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
- (SICF_PRST | SICF_FGC));
- udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
- udelay(1);
-
- } else { /* take phy out of reset */
-
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
- udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
- udelay(1);
-
- }
-}
-
-/* Perform a soft reset of the PHY PLL */
-void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_addr), ~0, 0);
- udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data), 0x4, 0);
- udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data), 0x4, 4);
- udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data), 0x4, 0);
- udelay(1);
-}
-
-/* light way to turn on phy clock without reset for NPHY only
- * refer to brcms_b_core_phy_clk for full version
- */
-void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
-{
- /* support(necessary for NPHY and HYPHY) only */
- if (!BRCMS_ISNPHY(wlc_hw->band))
- return;
-
- if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
- else
- ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
-
-}
-
-void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
-{
- if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
- else
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
-}
-
-void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
-{
- struct brcms_phy_pub *pih = wlc_hw->band->pi;
- u32 phy_bw_clkbits;
- bool phy_in_reset = false;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- if (pih == NULL)
- return;
-
- phy_bw_clkbits = wlc_phy_clk_bwbits(wlc_hw->band->pi);
-
- /* Specific reset sequence required for NPHY rev 3 and 4 */
- if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
- NREV_LE(wlc_hw->band->phyrev, 4)) {
- /* Set the PHY bandwidth */
- ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
-
- udelay(1);
-
- /* Perform a soft reset of the PHY PLL */
- brcms_b_core_phypll_reset(wlc_hw);
-
- /* reset the PHY */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
- (SICF_PRST | SICF_PCLKE));
- phy_in_reset = true;
- } else {
-
- ai_core_cflags(wlc_hw->sih,
- (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
- (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
- }
-
- udelay(2);
- brcms_b_core_phy_clk(wlc_hw, ON);
-
- if (pih)
- wlc_phy_anacore(pih, ON);
-}
-
-/* switch to and initialize new band */
-static void
-brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
- chanspec_t chanspec) {
- struct brcms_c_info *wlc = wlc_hw->wlc;
- u32 macintmask;
-
- /* Enable the d11 core before accessing it */
- if (!ai_iscoreup(wlc_hw->sih)) {
- ai_core_reset(wlc_hw->sih, 0, 0);
- brcms_c_mctrl_reset(wlc_hw);
- }
-
- macintmask = brcms_c_setband_inact(wlc, bandunit);
-
- if (!wlc_hw->up)
- return;
-
- brcms_b_core_phy_clk(wlc_hw, ON);
-
- /* band-specific initializations */
- brcms_b_bsinit(wlc, chanspec);
-
- /*
- * If there are any pending software interrupt bits,
- * then replace these with a harmless nonzero value
- * so brcms_c_dpc() will re-enable interrupts when done.
- */
- if (wlc->macintstatus)
- wlc->macintstatus = MI_DMAINT;
-
- /* restore macintmask */
- brcms_intrsrestore(wlc->wl, macintmask);
-
- /* ucode should still be suspended.. */
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
-}
-
-/* low-level band switch utility routine */
-void brcms_c_setxband(struct brcms_hardware *wlc_hw,
- uint bandunit)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- bandunit);
-
- wlc_hw->band = wlc_hw->bandstate[bandunit];
-
- /* BMAC_NOTE: until we eliminate need for wlc->band refs in low level code */
- wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
-
- /* set gmode core flag */
- if (wlc_hw->sbclk && !wlc_hw->noreset) {
- ai_core_cflags(wlc_hw->sih, SICF_GMODE,
- ((bandunit == 0) ? SICF_GMODE : 0));
- }
-}
-
-static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
-{
-
- /* reject unsupported corerev */
- if (!VALID_COREREV(wlc_hw->corerev)) {
- wiphy_err(wlc_hw->wlc->wiphy, "unsupported core rev %d\n",
- wlc_hw->corerev);
- return false;
- }
-
- return true;
-}
-
-/* Validate some board info parameters */
-static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
-{
- uint boardrev = wlc_hw->boardrev;
-
- /* 4 bits each for board type, major, minor, and tiny version */
- uint brt = (boardrev & 0xf000) >> 12;
- uint b0 = (boardrev & 0xf00) >> 8;
- uint b1 = (boardrev & 0xf0) >> 4;
- uint b2 = boardrev & 0xf;
-
- /* voards from other vendors are always considered valid */
- if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
- return true;
-
- /* do some boardrev sanity checks when boardvendor is Broadcom */
- if (boardrev == 0)
- return false;
-
- if (boardrev <= 0xff)
- return true;
-
- if ((brt > 2) || (brt == 0) || (b0 > 9) || (b0 == 0) || (b1 > 9)
- || (b2 > 9))
- return false;
-
- return true;
-}
-
-static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw)
-{
- const char *varname = "macaddr";
- char *macaddr;
-
- /* If macaddr exists, use it (Sromrev4, CIS, ...). */
- macaddr = getvar(wlc_hw->vars, varname);
- if (macaddr != NULL)
- return macaddr;
-
- if (NBANDS_HW(wlc_hw) > 1)
- varname = "et1macaddr";
- else
- varname = "il0macaddr";
-
- macaddr = getvar(wlc_hw->vars, varname);
- if (macaddr == NULL) {
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: wlc_get_macaddr: macaddr "
- "getvar(%s) not found\n", wlc_hw->unit, varname);
- }
-
- return macaddr;
-}
-
-/*
- * Return true if radio is disabled, otherwise false.
- * hw radio disable signal is an external pin, users activate it asynchronously
- * this function could be called when driver is down and w/o clock
- * it operates on different registers depending on corerev and boardflag.
- */
-bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
-{
- bool v, clk, xtal;
- u32 resetbits = 0, flags = 0;
-
- xtal = wlc_hw->sbclk;
- if (!xtal)
- brcms_b_xtal(wlc_hw, ON);
-
- /* may need to take core out of reset first */
- clk = wlc_hw->clk;
- if (!clk) {
- /*
- * mac no longer enables phyclk automatically when driver
- * accesses phyreg throughput mac. This can be skipped since
- * only mac reg is accessed below
- */
- flags |= SICF_PCLKE;
-
- /* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs =
- (d11regs_t *) ai_setcore(wlc_hw->sih, D11_CORE_ID,
- 0);
- ai_core_reset(wlc_hw->sih, flags, resetbits);
- brcms_c_mctrl_reset(wlc_hw);
- }
-
- v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
-
- /* put core back into reset */
- if (!clk)
- ai_core_disable(wlc_hw->sih, 0);
-
- if (!xtal)
- brcms_b_xtal(wlc_hw, OFF);
-
- return v;
-}
-
-/* Initialize just the hardware when coming out of POR or S3/S5 system states */
-void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
-{
- if (wlc_hw->wlc->pub->hw_up)
- return;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- /*
- * Enable pll and xtal, initialize the power control registers,
- * and force fastclock for the remainder of brcms_c_up().
- */
- brcms_b_xtal(wlc_hw, ON);
- ai_clkctl_init(wlc_hw->sih);
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- if (wlc_hw->sih->bustype == PCI_BUS) {
- ai_pci_fixcfg(wlc_hw->sih);
-
- /* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs =
- (d11regs_t *) ai_setcore(wlc_hw->sih, D11_CORE_ID,
- 0);
- }
-
- /* Inform phy that a POR reset has occurred so it does a complete phy init */
- wlc_phy_por_inform(wlc_hw->band->pi);
-
- wlc_hw->ucode_loaded = false;
- wlc_hw->wlc->pub->hw_up = true;
-
- if ((wlc_hw->boardflags & BFL_FEM)
- && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
- if (!
- (wlc_hw->boardrev >= 0x1250
- && (wlc_hw->boardflags & BFL_FEM_BT)))
- ai_epa_4313war(wlc_hw->sih);
- }
-}
-
-static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
-{
- struct dma_pub *di = wlc_hw->di[fifo];
- return dma_rxreset(di);
-}
-
-/* d11 core reset
- * ensure fask clock during reset
- * reset dma
- * reset d11(out of reset)
- * reset phy(out of reset)
- * clear software macintstatus for fresh new start
- * one testing hack wlc_hw->noreset will bypass the d11/phy reset
- */
-void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
-{
- d11regs_t *regs;
- uint i;
- bool fastclk;
- u32 resetbits = 0;
-
- if (flags == BRCMS_USE_COREFLAGS)
- flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- regs = wlc_hw->regs;
-
- /* request FAST clock if not on */
- fastclk = wlc_hw->forcefastclk;
- if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- /* reset the dma engines except first time thru */
- if (ai_iscoreup(wlc_hw->sih)) {
- for (i = 0; i < NFIFO; i++)
- if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) {
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
- "dma_txreset[%d]: cannot stop dma\n",
- wlc_hw->unit, __func__, i);
- }
-
- if ((wlc_hw->di[RX_FIFO])
- && (!wlc_dma_rxreset(wlc_hw, RX_FIFO))) {
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: dma_rxreset"
- "[%d]: cannot stop dma\n",
- wlc_hw->unit, __func__, RX_FIFO);
- }
- }
- /* if noreset, just stop the psm and return */
- if (wlc_hw->noreset) {
- wlc_hw->wlc->macintstatus = 0; /* skip wl_dpc after down */
- brcms_b_mctrl(wlc_hw, MCTL_PSM_RUN | MCTL_EN_MAC, 0);
- return;
- }
-
- /*
- * mac no longer enables phyclk automatically when driver accesses
- * phyreg throughput mac, AND phy_reset is skipped at early stage when
- * band->pi is invalid. need to enable PHY CLK
- */
- flags |= SICF_PCLKE;
-
- /* reset the core
- * In chips with PMU, the fastclk request goes through d11 core reg 0x1e0, which
- * is cleared by the core_reset. have to re-request it.
- * This adds some delay and we can optimize it by also requesting fastclk through
- * chipcommon during this period if necessary. But that has to work coordinate
- * with other driver like mips/arm since they may touch chipcommon as well.
- */
- wlc_hw->clk = false;
- ai_core_reset(wlc_hw->sih, flags, resetbits);
- wlc_hw->clk = true;
- if (wlc_hw->band && wlc_hw->band->pi)
- wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
-
- brcms_c_mctrl_reset(wlc_hw);
-
- if (PMUCTL_ENAB(wlc_hw->sih))
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- brcms_b_phy_reset(wlc_hw);
-
- /* turn on PHY_PLL */
- brcms_b_core_phypll_ctl(wlc_hw, true);
-
- /* clear sw intstatus */
- wlc_hw->wlc->macintstatus = 0;
-
- /* restore the clk setting */
- if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
-}
-
-/* txfifo sizes needs to be modified(increased) since the newer cores
- * have more memory.
- */
-static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
-{
- d11regs_t *regs = wlc_hw->regs;
- u16 fifo_nu;
- u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
- u16 txfifo_def, txfifo_def1;
- u16 txfifo_cmd;
-
- /* tx fifos start at TXFIFO_START_BLK from the Base address */
- txfifo_startblk = TXFIFO_START_BLK;
-
- /* sequence of operations: reset fifo, set fifo size, reset fifo */
- for (fifo_nu = 0; fifo_nu < NFIFO; fifo_nu++) {
-
- txfifo_endblk = txfifo_startblk + wlc_hw->xmtfifo_sz[fifo_nu];
- txfifo_def = (txfifo_startblk & 0xff) |
- (((txfifo_endblk - 1) & 0xff) << TXFIFO_FIFOTOP_SHIFT);
- txfifo_def1 = ((txfifo_startblk >> 8) & 0x1) |
- ((((txfifo_endblk -
- 1) >> 8) & 0x1) << TXFIFO_FIFOTOP_SHIFT);
- txfifo_cmd =
- TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
-
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
- W_REG(&regs->xmtfifodef, txfifo_def);
- W_REG(&regs->xmtfifodef1, txfifo_def1);
-
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
-
- txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
- }
- /*
- * need to propagate to shm location to be in sync since ucode/hw won't
- * do this
- */
- brcms_b_write_shm(wlc_hw, M_FIFOSIZE0,
- wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]);
- brcms_b_write_shm(wlc_hw, M_FIFOSIZE1,
- wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]);
- brcms_b_write_shm(wlc_hw, M_FIFOSIZE2,
- ((wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO] << 8) | wlc_hw->
- xmtfifo_sz[TX_AC_BK_FIFO]));
- brcms_b_write_shm(wlc_hw, M_FIFOSIZE3,
- ((wlc_hw->xmtfifo_sz[TX_ATIM_FIFO] << 8) | wlc_hw->
- xmtfifo_sz[TX_BCMC_FIFO]));
-}
-
-/* d11 core init
- * reset PSM
- * download ucode/PCM
- * let ucode run to suspended
- * download ucode inits
- * config other core registers
- * init dma
- */
-static void brcms_b_coreinit(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- d11regs_t *regs;
- u32 sflags;
- uint bcnint_us;
- uint i = 0;
- bool fifosz_fixup = false;
- int err = 0;
- u16 buf[NFIFO];
- struct wiphy *wiphy = wlc->wiphy;
-
- regs = wlc_hw->regs;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- /* reset PSM */
- brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
-
- brcms_ucode_download(wlc_hw);
- /*
- * FIFOSZ fixup. driver wants to controls the fifo allocation.
- */
- fifosz_fixup = true;
-
- /* let the PSM run to the suspended state, set mode to BSS STA */
- W_REG(&regs->macintstatus, -1);
- brcms_b_mctrl(wlc_hw, ~0,
- (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
-
- /* wait for ucode to self-suspend after auto-init */
- SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
- 1000 * 1000);
- if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
- wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
- "suspend!\n", wlc_hw->unit);
-
- brcms_c_gpio_init(wlc);
-
- sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
-
- if (D11REV_IS(wlc_hw->corerev, 23)) {
- if (BRCMS_ISNPHY(wlc_hw->band))
- brcms_c_write_inits(wlc_hw, d11n0initvals16);
- else
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
- " %d\n", __func__, wlc_hw->unit,
- wlc_hw->corerev);
- } else if (D11REV_IS(wlc_hw->corerev, 24)) {
- if (BRCMS_ISLCNPHY(wlc_hw->band)) {
- brcms_c_write_inits(wlc_hw, d11lcn0initvals24);
- } else {
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
- " %d\n", __func__, wlc_hw->unit,
- wlc_hw->corerev);
- }
- } else {
- wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev);
- }
-
- /* For old ucode, txfifo sizes needs to be modified(increased) */
- if (fifosz_fixup == true) {
- brcms_b_corerev_fifofixup(wlc_hw);
- }
-
- /* check txfifo allocations match between ucode and driver */
- buf[TX_AC_BE_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE0);
- if (buf[TX_AC_BE_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]) {
- i = TX_AC_BE_FIFO;
- err = -1;
- }
- buf[TX_AC_VI_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE1);
- if (buf[TX_AC_VI_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]) {
- i = TX_AC_VI_FIFO;
- err = -1;
- }
- buf[TX_AC_BK_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE2);
- buf[TX_AC_VO_FIFO] = (buf[TX_AC_BK_FIFO] >> 8) & 0xff;
- buf[TX_AC_BK_FIFO] &= 0xff;
- if (buf[TX_AC_BK_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BK_FIFO]) {
- i = TX_AC_BK_FIFO;
- err = -1;
- }
- if (buf[TX_AC_VO_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO]) {
- i = TX_AC_VO_FIFO;
- err = -1;
- }
- buf[TX_BCMC_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE3);
- buf[TX_ATIM_FIFO] = (buf[TX_BCMC_FIFO] >> 8) & 0xff;
- buf[TX_BCMC_FIFO] &= 0xff;
- if (buf[TX_BCMC_FIFO] != wlc_hw->xmtfifo_sz[TX_BCMC_FIFO]) {
- i = TX_BCMC_FIFO;
- err = -1;
- }
- if (buf[TX_ATIM_FIFO] != wlc_hw->xmtfifo_sz[TX_ATIM_FIFO]) {
- i = TX_ATIM_FIFO;
- err = -1;
- }
- if (err != 0) {
- wiphy_err(wiphy, "wlc_coreinit: txfifo mismatch: ucode size %d"
- " driver size %d index %d\n", buf[i],
- wlc_hw->xmtfifo_sz[i], i);
- }
-
- /* make sure we can still talk to the mac */
- WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
-
- /* band-specific inits done by wlc_bsinit() */
-
- /* Set up frame burst size and antenna swap threshold init values */
- brcms_b_write_shm(wlc_hw, M_MBURST_SIZE, MAXTXFRAMEBURST);
- brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
-
- /* enable one rx interrupt per received frame */
- W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
-
- /* set the station mode (BSS STA) */
- brcms_b_mctrl(wlc_hw,
- (MCTL_INFRA | MCTL_DISCARD_PMQ | MCTL_AP),
- (MCTL_INFRA | MCTL_DISCARD_PMQ));
-
- /* set up Beacon interval */
- bcnint_us = 0x8000 << 10;
- W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
- W_REG(&regs->tsf_cfpstart, bcnint_us);
- W_REG(&regs->macintstatus, MI_GP1);
-
- /* write interrupt mask */
- W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
-
- /* allow the MAC to control the PHY clock (dynamic on/off) */
- brcms_b_macphyclk_set(wlc_hw, ON);
-
- /* program dynamic clock control fast powerup delay register */
- wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
- W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
-
- /* tell the ucode the corerev */
- brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
-
- /* tell the ucode MAC capabilities */
- brcms_b_write_shm(wlc_hw, M_MACHW_CAP_L,
- (u16) (wlc_hw->machwcap & 0xffff));
- brcms_b_write_shm(wlc_hw, M_MACHW_CAP_H,
- (u16) ((wlc_hw->
- machwcap >> 16) & 0xffff));
-
- /* write retry limits to SCR, this done after PSM init */
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->SRL);
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->LRL);
-
- /* write rate fallback retry limits */
- brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
- brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
-
- AND_REG(&regs->ifs_ctl, 0x0FFF);
- W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
-
- /* dma initializations */
- wlc->txpend16165war = 0;
-
- /* init the tx dma engines */
- for (i = 0; i < NFIFO; i++) {
- if (wlc_hw->di[i])
- dma_txinit(wlc_hw->di[i]);
- }
-
- /* init the rx dma engine(s) and post receive buffers */
- dma_rxinit(wlc_hw->di[RX_FIFO]);
- dma_rxfill(wlc_hw->di[RX_FIFO]);
-}
-
-/* This function is used for changing the tsf frac register
- * If spur avoidance mode is off, the mac freq will be 80/120/160Mhz
- * If spur avoidance mode is on1, the mac freq will be 82/123/164Mhz
- * If spur avoidance mode is on2, the mac freq will be 84/126/168Mhz
- * HTPHY Formula is 2^26/freq(MHz) e.g.
- * For spuron2 - 126MHz -> 2^26/126 = 532610.0
- * - 532610 = 0x82082 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x2082
- * For spuron: 123MHz -> 2^26/123 = 545600.5
- * - 545601 = 0x85341 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x5341
- * For spur off: 120MHz -> 2^26/120 = 559240.5
- * - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889
- */
-
-void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
-{
- d11regs_t *regs;
- regs = wlc_hw->regs;
-
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
- if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x2082);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
- } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x5341);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
- } else { /* 120Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x8889);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
- }
- } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
- if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
- } else { /* 80Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
- }
- }
-}
-
-/* Initialize GPIOs that are controlled by D11 core */
-static void brcms_c_gpio_init(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- d11regs_t *regs;
- u32 gc, gm;
-
- regs = wlc_hw->regs;
-
- /* use GPIO select 0 to get all gpio signals from the gpio out reg */
- brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
-
- /*
- * Common GPIO setup:
- * G0 = LED 0 = WLAN Activity
- * G1 = LED 1 = WLAN 2.4 GHz Radio State
- * G2 = LED 2 = WLAN 5 GHz Radio State
- * G4 = radio disable input (HI enabled, LO disabled)
- */
-
- gc = gm = 0;
-
- /* Allocate GPIOs for mimo antenna diversity feature */
- if (wlc_hw->antsel_type == ANTSEL_2x3) {
- /* Enable antenna diversity, use 2x3 mode */
- brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
- MHF3_ANTSEL_EN, BRCM_BAND_ALL);
- brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE,
- MHF3_ANTSEL_MODE, BRCM_BAND_ALL);
-
- /* init superswitch control */
- wlc_phy_antsel_init(wlc_hw->band->pi, false);
-
- } else if (wlc_hw->antsel_type == ANTSEL_2x4) {
- gm |= gc |= (BOARD_GPIO_12 | BOARD_GPIO_13);
- /*
- * The board itself is powered by these GPIOs
- * (when not sending pattern) so set them high
- */
- OR_REG(&regs->psm_gpio_oe,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
- OR_REG(&regs->psm_gpio_out,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
-
- /* Enable antenna diversity, use 2x4 mode */
- brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
- MHF3_ANTSEL_EN, BRCM_BAND_ALL);
- brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, 0,
- BRCM_BAND_ALL);
-
- /* Configure the desired clock to be 4Mhz */
- brcms_b_write_shm(wlc_hw, M_ANTSEL_CLKDIV,
- ANTSEL_CLKDIV_4MHZ);
- }
-
- /* gpio 9 controls the PA. ucode is responsible for wiggling out and oe */
- if (wlc_hw->boardflags & BFL_PACTRL)
- gm |= gc |= BOARD_GPIO_PACTRL;
-
- /* apply to gpiocontrol register */
- ai_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
-}
-
-static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
-{
- struct brcms_c_info *wlc;
- wlc = wlc_hw->wlc;
-
- if (wlc_hw->ucode_loaded)
- return;
-
- if (D11REV_IS(wlc_hw->corerev, 23)) {
- if (BRCMS_ISNPHY(wlc_hw->band)) {
- brcms_ucode_write(wlc_hw, bcm43xx_16_mimo,
- bcm43xx_16_mimosz);
- wlc_hw->ucode_loaded = true;
- } else
- wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
- "corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev);
- } else if (D11REV_IS(wlc_hw->corerev, 24)) {
- if (BRCMS_ISLCNPHY(wlc_hw->band)) {
- brcms_ucode_write(wlc_hw, bcm43xx_24_lcn,
- bcm43xx_24_lcnsz);
- wlc_hw->ucode_loaded = true;
- } else {
- wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
- "corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev);
- }
- }
-}
-
-static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const u32 ucode[],
- const uint nbytes) {
- d11regs_t *regs = wlc_hw->regs;
- uint i;
- uint count;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- count = (nbytes / sizeof(u32));
-
- W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
- (void)R_REG(&regs->objaddr);
- for (i = 0; i < count; i++)
- W_REG(&regs->objdata, ucode[i]);
-}
-
-static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
- const struct d11init *inits)
-{
- int i;
- volatile u8 *base;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- base = (volatile u8 *)wlc_hw->regs;
-
- for (i = 0; inits[i].addr != 0xffff; i++) {
- if (inits[i].size == 2)
- W_REG((u16 *)(base + inits[i].addr),
- inits[i].value);
- else if (inits[i].size == 4)
- W_REG((u32 *)(base + inits[i].addr),
- inits[i].value);
- }
-}
-
-static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw)
-{
- u16 phyctl;
- u16 phytxant = wlc_hw->bmac_phytxant;
- u16 mask = PHY_TXC_ANT_MASK;
-
- /* set the Probe Response frame phy control word */
- phyctl = brcms_b_read_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS);
- phyctl = (phyctl & ~mask) | phytxant;
- brcms_b_write_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS, phyctl);
-
- /* set the Response (ACK/CTS) frame phy control word */
- phyctl = brcms_b_read_shm(wlc_hw, M_RSP_PCTLWD);
- phyctl = (phyctl & ~mask) | phytxant;
- brcms_b_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
-}
-
-void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant)
-{
- /* update sw state */
- wlc_hw->bmac_phytxant = phytxant;
-
- /* push to ucode if up */
- if (!wlc_hw->up)
- return;
- brcms_c_ucode_txant_set(wlc_hw);
-
-}
-
-u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw)
-{
- return (u16) wlc_hw->wlc->stf->txant;
-}
-
-void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
-{
- wlc_hw->antsel_type = antsel_type;
-
- /* Update the antsel type for phy module to use */
- wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
-}
-
-void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
-{
- bool fatal = false;
- uint unit;
- uint intstatus, idx;
- d11regs_t *regs = wlc_hw->regs;
- struct wiphy *wiphy = wlc_hw->wlc->wiphy;
-
- unit = wlc_hw->unit;
-
- for (idx = 0; idx < NFIFO; idx++) {
- /* read intstatus register and ignore any non-error bits */
- intstatus =
- R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
- if (!intstatus)
- continue;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: intstatus%d 0x%x\n",
- unit, idx, intstatus);
-
- if (intstatus & I_RO) {
- wiphy_err(wiphy, "wl%d: fifo %d: receive fifo "
- "overflow\n", unit, idx);
- fatal = true;
- }
-
- if (intstatus & I_PC) {
- wiphy_err(wiphy, "wl%d: fifo %d: descriptor error\n",
- unit, idx);
- fatal = true;
- }
-
- if (intstatus & I_PD) {
- wiphy_err(wiphy, "wl%d: fifo %d: data error\n", unit,
- idx);
- fatal = true;
- }
-
- if (intstatus & I_DE) {
- wiphy_err(wiphy, "wl%d: fifo %d: descriptor protocol "
- "error\n", unit, idx);
- fatal = true;
- }
-
- if (intstatus & I_RU) {
- wiphy_err(wiphy, "wl%d: fifo %d: receive descriptor "
- "underflow\n", idx, unit);
- }
-
- if (intstatus & I_XU) {
- wiphy_err(wiphy, "wl%d: fifo %d: transmit fifo "
- "underflow\n", idx, unit);
- fatal = true;
- }
-
- if (fatal) {
- brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */
- break;
- } else
- W_REG(&regs->intctrlregs[idx].intstatus,
- intstatus);
- }
-}
-
-void brcms_c_intrson(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- wlc->macintmask = wlc->defmacintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/* callback for siutils.c, which has only wlc handler, no wl
- * they both check up, not only because there is no need to off/restore d11 interrupt
- * but also because per-port code may require sync with valid interrupt.
- */
-
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
- if (!wlc->hw->up)
- return 0;
-
- return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
- if (!wlc->hw->up)
- return;
-
- brcms_intrsrestore(wlc->wl, macintmask);
-}
-
-u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- u32 macintmask;
-
- if (!wlc_hw->clk)
- return 0;
-
- macintmask = wlc->macintmask; /* isr can still happen */
-
- W_REG(&wlc_hw->regs->macintmask, 0);
- (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
- udelay(1); /* ensure int line is no longer driven */
- wlc->macintmask = 0;
-
- /* return previous macintmask; resolve race between us and our isr */
- return wlc->macintstatus ? 0 : macintmask;
-}
-
-void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- if (!wlc_hw->clk)
- return;
-
- wlc->macintmask = macintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, mbool flags)
-{
- u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-
- if (on) {
- /* suspend tx fifos */
- brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
- brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
- brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_BK_FIFO);
- brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
-
- /* zero the address match register so we do not send ACKs */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- null_ether_addr);
- } else {
- /* resume tx fifos */
- if (!wlc_hw->wlc->tx_suspended) {
- brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
- }
- brcms_b_tx_fifo_resume(wlc_hw, TX_CTL_FIFO);
- brcms_b_tx_fifo_resume(wlc_hw, TX_AC_BK_FIFO);
- brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
-
- /* Restore address */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- wlc_hw->etheraddr);
- }
-
- wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
-
- if (on)
- brcms_c_ucode_mute_override_set(wlc_hw);
- else
- brcms_c_ucode_mute_override_clear(wlc_hw);
-}
-
-int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
- uint *blocks)
-{
- if (fifo >= NFIFO)
- return -EINVAL;
-
- *blocks = wlc_hw->xmtfifo_sz[fifo];
-
- return 0;
-}
-
-/* brcms_b_tx_fifo_suspended:
- * Check the MAC's tx suspend status for a tx fifo.
- *
- * When the MAC acknowledges a tx suspend, it indicates that no more
- * packets will be transmitted out the radio. This is independent of
- * DMA channel suspension---the DMA may have finished suspending, or may still
- * be pulling data into a tx fifo, by the time the MAC acks the suspend
- * request.
- */
-static bool brcms_b_tx_fifo_suspended(struct brcms_hardware *wlc_hw,
- uint tx_fifo)
-{
- /* check that a suspend has been requested and is no longer pending */
-
- /*
- * for DMA mode, the suspend request is set in xmtcontrol of the DMA engine,
- * and the tx fifo suspend at the lower end of the MAC is acknowledged in the
- * chnstatus register.
- * The tx fifo suspend completion is independent of the DMA suspend completion and
- * may be acked before or after the DMA is suspended.
- */
- if (dma_txsuspended(wlc_hw->di[tx_fifo]) &&
- (R_REG(&wlc_hw->regs->chnstatus) &
- (1 << tx_fifo)) == 0)
- return true;
-
- return false;
-}
-
-static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
- uint tx_fifo)
-{
- u8 fifo = 1 << tx_fifo;
-
- /* Two clients of this code, 11h Quiet period and scanning. */
-
- /* only suspend if not already suspended */
- if ((wlc_hw->suspended_fifos & fifo) == fifo)
- return;
-
- /* force the core awake only if not already */
- if (wlc_hw->suspended_fifos == 0)
- brcms_c_ucode_wake_override_set(wlc_hw,
- BRCMS_WAKE_OVERRIDE_TXFIFO);
-
- wlc_hw->suspended_fifos |= fifo;
-
- if (wlc_hw->di[tx_fifo]) {
- /* Suspending AMPDU transmissions in the middle can cause underflow
- * which may result in mismatch between ucode and driver
- * so suspend the mac before suspending the FIFO
- */
- if (BRCMS_PHY_11N_CAP(wlc_hw->band))
- brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
-
- dma_txsuspend(wlc_hw->di[tx_fifo]);
-
- if (BRCMS_PHY_11N_CAP(wlc_hw->band))
- brcms_c_enable_mac(wlc_hw->wlc);
- }
-}
-
-static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
- uint tx_fifo)
-{
- /* BMAC_NOTE: BRCMS_TX_FIFO_ENAB is done in brcms_c_dpc() for DMA case
- * but need to be done here for PIO otherwise the watchdog will catch
- * the inconsistency and fire
- */
- /* Two clients of this code, 11h Quiet period and scanning. */
- if (wlc_hw->di[tx_fifo])
- dma_txresume(wlc_hw->di[tx_fifo]);
-
- /* allow core to sleep again */
- if (wlc_hw->suspended_fifos == 0)
- return;
- else {
- wlc_hw->suspended_fifos &= ~(1 << tx_fifo);
- if (wlc_hw->suspended_fifos == 0)
- brcms_c_ucode_wake_override_clear(wlc_hw,
- BRCMS_WAKE_OVERRIDE_TXFIFO);
- }
-}
-
-/*
- * Read and clear macintmask and macintstatus and intstatus registers.
- * This routine should be called with interrupts off
- * Return:
- * -1 if DEVICEREMOVED(wlc) evaluates to true;
- * 0 if the interrupt is not for us, or we are in some special cases;
- * device interrupt status bits otherwise.
- */
-static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- d11regs_t *regs = wlc_hw->regs;
- u32 macintstatus;
-
- /* macintstatus includes a DMA interrupt summary bit */
- macintstatus = R_REG(&regs->macintstatus);
-
- BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
- macintstatus);
-
- /* detect cardbus removed, in power down(suspend) and in reset */
- if (DEVICEREMOVED(wlc))
- return -1;
-
- /* DEVICEREMOVED succeeds even when the core is still resetting,
- * handle that case here.
- */
- if (macintstatus == 0xffffffff)
- return 0;
-
- /* defer unsolicited interrupts */
- macintstatus &= (in_isr ? wlc->macintmask : wlc->defmacintmask);
-
- /* if not for us */
- if (macintstatus == 0)
- return 0;
-
- /* interrupts are already turned off for CFE build
- * Caution: For CFE Turning off the interrupts again has some undesired
- * consequences
- */
- /* turn off the interrupts */
- W_REG(&regs->macintmask, 0);
- (void)R_REG(&regs->macintmask); /* sync readback */
- wlc->macintmask = 0;
-
- /* clear device interrupts */
- W_REG(&regs->macintstatus, macintstatus);
-
- /* MI_DMAINT is indication of non-zero intstatus */
- if (macintstatus & MI_DMAINT) {
- /*
- * only fifo interrupt enabled is I_RI in
- * RX_FIFO. If MI_DMAINT is set, assume it
- * is set and clear the interrupt.
- */
- W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
- DEF_RXINTMASK);
- }
-
- return macintstatus;
-}
-
-/* Update wlc->macintstatus and wlc->intstatus[]. */
-/* Return true if they are updated successfully. false otherwise */
-bool brcms_c_intrsupd(struct brcms_c_info *wlc)
-{
- u32 macintstatus;
-
- /* read and clear macintstatus and intstatus registers */
- macintstatus = wlc_intstatus(wlc, false);
-
- /* device is removed */
- if (macintstatus == 0xffffffff)
- return false;
-
- /* update interrupt status in software */
- wlc->macintstatus |= macintstatus;
-
- return true;
-}
-
-/*
- * First-level interrupt processing.
- * Return true if this was our interrupt, false otherwise.
- * *wantdpc will be set to true if further brcms_c_dpc() processing is required,
- * false otherwise.
- */
-bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- u32 macintstatus;
-
- *wantdpc = false;
-
- if (!wlc_hw->up || !wlc->macintmask)
- return false;
-
- /* read and clear macintstatus and intstatus registers */
- macintstatus = wlc_intstatus(wlc, true);
-
- if (macintstatus == 0xffffffff)
- wiphy_err(wlc->wiphy, "DEVICEREMOVED detected in the ISR code"
- " path\n");
-
- /* it is not for us */
- if (macintstatus == 0)
- return false;
-
- *wantdpc = true;
-
- /* save interrupt status bits */
- wlc->macintstatus = macintstatus;
-
- return true;
-
-}
-
-static bool
-brcms_b_dotxstatus(struct brcms_hardware *wlc_hw, struct tx_status *txs,
- u32 s2)
-{
- /* discard intermediate indications for ucode with one legitimate case:
- * e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, but the subsequent
- * tx of DATA failed. so it will start rts/cts from the beginning (resetting the rts
- * transmission count)
- */
- if (!(txs->status & TX_STATUS_AMPDU)
- && (txs->status & TX_STATUS_INTERMEDIATE)) {
- return false;
- }
-
- return brcms_c_dotxstatus(wlc_hw->wlc, txs, s2);
-}
-
-/* process tx completion events in BMAC
- * Return true if more tx status need to be processed. false otherwise.
- */
-static bool
-brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
-{
- bool morepending = false;
- struct brcms_c_info *wlc = wlc_hw->wlc;
- d11regs_t *regs;
- struct tx_status txstatus, *txs;
- u32 s1, s2;
- uint n = 0;
- /*
- * Param 'max_tx_num' indicates max. # tx status to process before
- * break out.
- */
- uint max_tx_num = bound ? wlc->pub->tunables->txsbnd : -1;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- txs = &txstatus;
- regs = wlc_hw->regs;
- while (!(*fatal)
- && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
-
- if (s1 == 0xffffffff) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
- wlc_hw->unit, __func__);
- return morepending;
- }
-
- s2 = R_REG(&regs->frmtxstatus2);
-
- txs->status = s1 & TXS_STATUS_MASK;
- txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
- txs->sequence = s2 & TXS_SEQ_MASK;
- txs->phyerr = (s2 & TXS_PTX_MASK) >> TXS_PTX_SHIFT;
- txs->lasttxtime = 0;
-
- *fatal = brcms_b_dotxstatus(wlc_hw, txs, s2);
-
- /* !give others some time to run! */
- if (++n >= max_tx_num)
- break;
- }
-
- if (*fatal)
- return 0;
-
- if (n >= max_tx_num)
- morepending = true;
-
- if (!pktq_empty(&wlc->pkt_queue->q))
- brcms_c_send_q(wlc);
-
- return morepending;
-}
-
-void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- d11regs_t *regs = wlc_hw->regs;
- u32 mc, mi;
- struct wiphy *wiphy = wlc->wiphy;
-
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- wlc_hw->band->bandunit);
-
- /*
- * Track overlapping suspend requests
- */
- wlc_hw->mac_suspend_depth++;
- if (wlc_hw->mac_suspend_depth > 1)
- return;
-
- /* force the core awake */
- brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
-
- mc = R_REG(&regs->maccontrol);
-
- if (mc == 0xffffffff) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
- __func__);
- brcms_down(wlc->wl);
- return;
- }
- WARN_ON(mc & MCTL_PSM_JMP_0);
- WARN_ON(!(mc & MCTL_PSM_RUN));
- WARN_ON(!(mc & MCTL_EN_MAC));
-
- mi = R_REG(&regs->macintstatus);
- if (mi == 0xffffffff) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
- __func__);
- brcms_down(wlc->wl);
- return;
- }
- WARN_ON(mi & MI_MACSSPNDD);
-
- brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
-
- SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
- BRCMS_MAX_MAC_SUSPEND);
-
- if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
- wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
- " and MI_MACSSPNDD is still not on.\n",
- wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
- wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
- "psm_brc 0x%04x\n", wlc_hw->unit,
- R_REG(&regs->psmdebug),
- R_REG(&regs->phydebug),
- R_REG(&regs->psm_brc));
- }
-
- mc = R_REG(&regs->maccontrol);
- if (mc == 0xffffffff) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
- __func__);
- brcms_down(wlc->wl);
- return;
- }
- WARN_ON(mc & MCTL_PSM_JMP_0);
- WARN_ON(!(mc & MCTL_PSM_RUN));
- WARN_ON(mc & MCTL_EN_MAC);
-}
-
-void brcms_c_enable_mac(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- d11regs_t *regs = wlc_hw->regs;
- u32 mc, mi;
-
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- wlc->band->bandunit);
-
- /*
- * Track overlapping suspend requests
- */
- wlc_hw->mac_suspend_depth--;
- if (wlc_hw->mac_suspend_depth > 0)
- return;
-
- mc = R_REG(&regs->maccontrol);
- WARN_ON(mc & MCTL_PSM_JMP_0);
- WARN_ON(mc & MCTL_EN_MAC);
- WARN_ON(!(mc & MCTL_PSM_RUN));
-
- brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
- W_REG(&regs->macintstatus, MI_MACSSPNDD);
-
- mc = R_REG(&regs->maccontrol);
- WARN_ON(mc & MCTL_PSM_JMP_0);
- WARN_ON(!(mc & MCTL_EN_MAC));
- WARN_ON(!(mc & MCTL_PSM_RUN));
-
- mi = R_REG(&regs->macintstatus);
- WARN_ON(mi & MI_MACSSPNDD);
-
- brcms_c_ucode_wake_override_clear(wlc_hw,
- BRCMS_WAKE_OVERRIDE_MACSUSPEND);
-}
-
-static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw)
-{
- u8 rate;
- u8 rates[8] = {
- BRCM_RATE_6M, BRCM_RATE_9M, BRCM_RATE_12M, BRCM_RATE_18M,
- BRCM_RATE_24M, BRCM_RATE_36M, BRCM_RATE_48M, BRCM_RATE_54M
- };
- u16 entry_ptr;
- u16 pctl1;
- uint i;
-
- if (!BRCMS_PHY_11N_CAP(wlc_hw->band))
- return;
-
- /* walk the phy rate table and update the entries */
- for (i = 0; i < ARRAY_SIZE(rates); i++) {
- rate = rates[i];
-
- entry_ptr = brcms_b_ofdm_ratetable_offset(wlc_hw, rate);
-
- /* read the SHM Rate Table entry OFDM PCTL1 values */
- pctl1 =
- brcms_b_read_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS);
-
- /* modify the value */
- pctl1 &= ~PHY_TXC1_MODE_MASK;
- pctl1 |= (wlc_hw->hw_stf_ss_opmode << PHY_TXC1_MODE_SHIFT);
-
- /* Update the SHM Rate Table entry OFDM PCTL1 values */
- brcms_b_write_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS,
- pctl1);
- }
-}
-
-static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw,
- u8 rate)
-{
- uint i;
- u8 plcp_rate = 0;
- struct plcp_signal_rate_lookup {
- u8 rate;
- u8 signal_rate;
- };
- /* OFDM RATE sub-field of PLCP SIGNAL field, per 802.11 sec 17.3.4.1 */
- const struct plcp_signal_rate_lookup rate_lookup[] = {
- {BRCM_RATE_6M, 0xB},
- {BRCM_RATE_9M, 0xF},
- {BRCM_RATE_12M, 0xA},
- {BRCM_RATE_18M, 0xE},
- {BRCM_RATE_24M, 0x9},
- {BRCM_RATE_36M, 0xD},
- {BRCM_RATE_48M, 0x8},
- {BRCM_RATE_54M, 0xC}
- };
-
- for (i = 0; i < ARRAY_SIZE(rate_lookup); i++) {
- if (rate == rate_lookup[i].rate) {
- plcp_rate = rate_lookup[i].signal_rate;
- break;
- }
- }
-
- /* Find the SHM pointer to the rate table entry by looking in the
- * Direct-map Table
- */
- return 2 * brcms_b_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
-}
-
-void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
-{
- wlc_hw->hw_stf_ss_opmode = stf_mode;
-
- if (wlc_hw->clk)
- brcms_upd_ofdm_pctl1_table(wlc_hw);
-}
-
-void
-brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
- u32 *tsf_h_ptr)
-{
- d11regs_t *regs = wlc_hw->regs;
-
- /* read the tsf timer low, then high to get an atomic read */
- *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
- *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
-
- return;
-}
-
-static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
-{
- d11regs_t *regs;
- u32 w, val;
- struct wiphy *wiphy = wlc_hw->wlc->wiphy;
-
- BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
-
- regs = wlc_hw->regs;
-
- /* Validate dchip register access */
-
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- w = R_REG(&regs->objdata);
-
- /* Can we write and read back a 32bit register? */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0xaa5555aa);
-
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
- if (val != (u32) 0xaa5555aa) {
- wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
- "expected 0xaa5555aa\n", wlc_hw->unit, val);
- return false;
- }
-
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0x55aaaa55);
-
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
- if (val != (u32) 0x55aaaa55) {
- wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
- "expected 0x55aaaa55\n", wlc_hw->unit, val);
- return false;
- }
-
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, w);
-
- /* clear CFPStart */
- W_REG(&regs->tsf_cfpstart, 0);
-
- w = R_REG(&regs->maccontrol);
- if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
- (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
- wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
- "0x%x, expected 0x%x or 0x%x\n", wlc_hw->unit, w,
- (MCTL_IHR_EN | MCTL_WAKE),
- (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE));
- return false;
- }
-
- return true;
-}
-
-#define PHYPLL_WAIT_US 100000
-
-void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
-{
- d11regs_t *regs;
- u32 tmp;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- tmp = 0;
- regs = wlc_hw->regs;
-
- if (on) {
- if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
- CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
- (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
- PHYPLL_WAIT_US);
-
- tmp = R_REG(&regs->clk_ctl_st);
- if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
- (CCS_ERSRC_AVAIL_HT)) {
- wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
- " PLL failed\n", __func__);
- }
- } else {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
- (CCS_ERSRC_AVAIL_D11PLL |
- CCS_ERSRC_AVAIL_PHYPLL)) !=
- (CCS_ERSRC_AVAIL_D11PLL |
- CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
-
- tmp = R_REG(&regs->clk_ctl_st);
- if ((tmp &
- (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
- !=
- (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) {
- wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on "
- "PHY PLL failed\n", __func__);
- }
- }
- } else {
- /* Since the PLL may be shared, other cores can still be requesting it;
- * so we'll deassert the request but not wait for status to comply.
- */
- AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
- tmp = R_REG(&regs->clk_ctl_st);
- }
-}
-
-void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
-{
- bool dev_gone;
-
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- dev_gone = DEVICEREMOVED(wlc_hw->wlc);
-
- if (dev_gone)
- return;
-
- if (wlc_hw->noreset)
- return;
-
- /* radio off */
- wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
-
- /* turn off analog core */
- wlc_phy_anacore(wlc_hw->band->pi, OFF);
-
- /* turn off PHYPLL to save power */
- brcms_b_core_phypll_ctl(wlc_hw, false);
-
- /* No need to set wlc->pub->radio_active = OFF
- * because this function needs down capability and
- * radio_active is designed for BCMNODOWN.
- */
-
- /* remove gpio controls */
- if (wlc_hw->ucode_dbgsel)
- ai_gpiocontrol(wlc_hw->sih, ~0, 0, GPIO_DRV_PRIORITY);
-
- wlc_hw->clk = false;
- ai_core_disable(wlc_hw->sih, 0);
- wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
-}
-
-/* power both the pll and external oscillator on/off */
-static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want);
-
- /* dont power down if plldown is false or we must poll hw radio disable */
- if (!want && wlc_hw->pllreq)
- return;
-
- if (wlc_hw->sih)
- ai_clkctl_xtal(wlc_hw->sih, XTAL | PLL, want);
-
- wlc_hw->sbclk = want;
- if (!wlc_hw->sbclk) {
- wlc_hw->clk = false;
- if (wlc_hw->band && wlc_hw->band->pi)
- wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
- }
-}
-
-static void brcms_c_flushqueues(struct brcms_c_info *wlc)
-{
- struct brcms_hardware *wlc_hw = wlc->hw;
- uint i;
-
- wlc->txpend16165war = 0;
-
- /* free any posted tx packets */
- for (i = 0; i < NFIFO; i++)
- if (wlc_hw->di[i]) {
- dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
- TXPKTPENDCLR(wlc, i);
- BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i);
- }
-
- /* free any posted rx packets */
- dma_rxreclaim(wlc_hw->di[RX_FIFO]);
-}
-
-u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset)
-{
- return brcms_b_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
-}
-
-void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v)
-{
- brcms_b_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
-}
-
-static u16
-brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
-{
- d11regs_t *regs = wlc_hw->regs;
- volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
- volatile u16 *objdata_hi = objdata_lo + 1;
- u16 v;
-
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
- if (offset & 2) {
- v = R_REG(objdata_hi);
- } else {
- v = R_REG(objdata_lo);
- }
-
- return v;
-}
-
-static void
-brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
- u32 sel)
-{
- d11regs_t *regs = wlc_hw->regs;
- volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
- volatile u16 *objdata_hi = objdata_lo + 1;
-
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
- if (offset & 2) {
- W_REG(objdata_hi, v);
- } else {
- W_REG(objdata_lo, v);
- }
-}
-
-/* Copy a buffer to shared memory of specified type .
- * SHM 'offset' needs to be an even address and
- * Buffer length 'len' must be an even number of bytes
- * 'sel' selects the type of memory
- */
-void
-brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
- const void *buf, int len, u32 sel)
-{
- u16 v;
- const u8 *p = (const u8 *)buf;
- int i;
-
- if (len <= 0 || (offset & 1) || (len & 1))
- return;
-
- for (i = 0; i < len; i += 2) {
- v = p[i] | (p[i + 1] << 8);
- brcms_b_write_objmem(wlc_hw, offset + i, v, sel);
- }
-}
-
-/* Copy a piece of shared memory of specified type to a buffer .
- * SHM 'offset' needs to be an even address and
- * Buffer length 'len' must be an even number of bytes
- * 'sel' selects the type of memory
- */
-void
-brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset, void *buf,
- int len, u32 sel)
-{
- u16 v;
- u8 *p = (u8 *) buf;
- int i;
-
- if (len <= 0 || (offset & 1) || (len & 1))
- return;
-
- for (i = 0; i < len; i += 2) {
- v = brcms_b_read_objmem(wlc_hw, offset + i, sel);
- p[i] = v & 0xFF;
- p[i + 1] = (v >> 8) & 0xFF;
- }
-}
-
-void brcms_b_copyfrom_vars(struct brcms_hardware *wlc_hw, char **buf,
- uint *len)
-{
- BCMMSG(wlc_hw->wlc->wiphy, "nvram vars totlen=%d\n",
- wlc_hw->vars_size);
-
- *buf = wlc_hw->vars;
- *len = wlc_hw->vars_size;
-}
-
-void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, u16 SRL, u16 LRL)
-{
- wlc_hw->SRL = SRL;
- wlc_hw->LRL = LRL;
-
- /* write retry limit to SCR, shouldn't need to suspend */
- if (wlc_hw->up) {
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
- }
-}
-
-void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set, mbool req_bit)
-{
- if (set) {
- if (mboolisset(wlc_hw->pllreq, req_bit))
- return;
-
- mboolset(wlc_hw->pllreq, req_bit);
-
- if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
- if (!wlc_hw->sbclk) {
- brcms_b_xtal(wlc_hw, ON);
- }
- }
- } else {
- if (!mboolisset(wlc_hw->pllreq, req_bit))
- return;
-
- mboolclr(wlc_hw->pllreq, req_bit);
-
- if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
- if (wlc_hw->sbclk) {
- brcms_b_xtal(wlc_hw, OFF);
- }
- }
- }
-
- return;
-}
-
-u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
-{
- u16 table_ptr;
- u8 phy_rate, index;
-
- /* get the phy specific rate encoding for the PLCP SIGNAL field */
- if (IS_OFDM(rate))
- table_ptr = M_RT_DIRMAP_A;
- else
- table_ptr = M_RT_DIRMAP_B;
-
- /* for a given rate, the LS-nibble of the PLCP SIGNAL field is
- * the index into the rate table.
- */
- phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
- index = phy_rate & 0xf;
-
- /* Find the SHM pointer to the rate table entry by looking in the
- * Direct-map Table
- */
- return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
-}
-
-void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
-{
- wlc_hw->antsel_avail = antsel_avail;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/bmac.h b/drivers/staging/brcm80211/brcmsmac/bmac.h
deleted file mode 100644
index 3c9ad4f3bd2..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/bmac.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef _BRCM_BOTTOM_MAC_H_
-#define _BRCM_BOTTOM_MAC_H_
-
-#include <brcmu_wifi.h>
-#include "types.h"
-
-/* dup state between BMAC(struct brcms_hardware) and HIGH(struct brcms_c_info)
- driver */
-struct brcms_b_state {
- u32 machwcap; /* mac hw capibility */
- u32 preamble_ovr; /* preamble override */
-};
-
-enum {
- IOV_BMAC_DIAG,
- IOV_BMAC_SBGPIOTIMERVAL,
- IOV_BMAC_SBGPIOOUT,
- IOV_BMAC_CCGPIOCTRL, /* CC GPIOCTRL REG */
- IOV_BMAC_CCGPIOOUT, /* CC GPIOOUT REG */
- IOV_BMAC_CCGPIOOUTEN, /* CC GPIOOUTEN REG */
- IOV_BMAC_CCGPIOIN, /* CC GPIOIN REG */
- IOV_BMAC_WPSGPIO, /* WPS push button GPIO pin */
- IOV_BMAC_OTPDUMP,
- IOV_BMAC_OTPSTAT,
- IOV_BMAC_PCIEASPM, /* obfuscation clkreq/aspm control */
- IOV_BMAC_PCIEADVCORRMASK, /* advanced correctable error mask */
- IOV_BMAC_PCIECLKREQ, /* PCIE 1.1 clockreq enab support */
- IOV_BMAC_PCIELCREG, /* PCIE LCREG */
- IOV_BMAC_SBGPIOTIMERMASK,
- IOV_BMAC_RFDISABLEDLY,
- IOV_BMAC_PCIEREG, /* PCIE REG */
- IOV_BMAC_PCICFGREG, /* PCI Config register */
- IOV_BMAC_PCIESERDESREG, /* PCIE SERDES REG (dev, 0}offset) */
- IOV_BMAC_PCIEGPIOOUT, /* PCIEOUT REG */
- IOV_BMAC_PCIEGPIOOUTEN, /* PCIEOUTEN REG */
- IOV_BMAC_PCIECLKREQENCTRL, /* clkreqenctrl REG (PCIE REV > 6.0 */
- IOV_BMAC_DMALPBK,
- IOV_BMAC_CCREG,
- IOV_BMAC_COREREG,
- IOV_BMAC_SDCIS,
- IOV_BMAC_SDIO_DRIVE,
- IOV_BMAC_OTPW,
- IOV_BMAC_NVOTPW,
- IOV_BMAC_SROM,
- IOV_BMAC_SRCRC,
- IOV_BMAC_CIS_SOURCE,
- IOV_BMAC_CISVAR,
- IOV_BMAC_OTPLOCK,
- IOV_BMAC_OTP_CHIPID,
- IOV_BMAC_CUSTOMVAR1,
- IOV_BMAC_BOARDFLAGS,
- IOV_BMAC_BOARDFLAGS2,
- IOV_BMAC_WPSLED,
- IOV_BMAC_NVRAM_SOURCE,
- IOV_BMAC_OTP_RAW_READ,
- IOV_BMAC_LAST
-};
-
-extern int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, void *regsva, uint bustype,
- void *btparam);
-extern int brcms_b_detach(struct brcms_c_info *wlc);
-extern void brcms_b_watchdog(void *arg);
-
-/* up/down, reset, clk */
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
- uint offset, const void *buf, int len,
- u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
- void *buf, int len, u32 sel);
-#define brcms_b_copyfrom_shm(wlc_hw, offset, buf, len) \
- brcms_b_copyfrom_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
-#define brcms_b_copyto_shm(wlc_hw, offset, buf, len) \
- brcms_b_copyto_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
-
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_init(struct brcms_hardware *wlc_hw, chanspec_t chanspec,
- bool mute);
-extern int brcms_b_up_prep(struct brcms_hardware *wlc_hw);
-extern int brcms_b_up_finish(struct brcms_hardware *wlc_hw);
-extern int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw);
-extern int brcms_b_down_finish(struct brcms_hardware *wlc_hw);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-
-/* chanspec, ucode interface */
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
- chanspec_t chanspec,
- bool mute, struct txpwr_limits *txpwr);
-
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
- uint *blocks);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
- u16 val, int bands);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern u16 brcms_b_mhf_get(struct brcms_hardware *wlc_hw, u8 idx, int bands);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
- u8 antsel_type);
-extern int brcms_b_state_get(struct brcms_hardware *wlc_hw,
- struct brcms_b_state *state);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
- u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
- int offset, int len, void *buf);
-extern void brcms_b_copyfrom_vars(struct brcms_hardware *wlc_hw, char **buf,
- uint *len);
-
-extern void brcms_b_hw_etheraddr(struct brcms_hardware *wlc_hw,
- u8 *ea);
-
-extern bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw);
-extern void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw,
- bool shortslot);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
- u8 stf_mode);
-
-extern void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw);
-
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-
-extern void brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw,
- int match_reg_offset,
- const u8 *addr);
-extern void brcms_b_write_hw_bcntemplates(struct brcms_hardware *wlc_hw,
- void *bcn, int len, bool both);
-
-extern void brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
- u32 *tsf_h_ptr);
-extern void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin);
-extern void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax);
-
-extern void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, u16 SRL,
- u16 LRL);
-
-extern void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw);
-
-
-/* API for BMAC driver (e.g. wlc_phy.c etc) */
-
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set,
- mbool req_bit);
-extern void brcms_b_hw_up(struct brcms_hardware *wlc_hw);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_antsel_set(struct brcms_hardware *wlc_hw,
- u32 antsel_avail);
-
-#endif /* _BRCM_BOTTOM_MAC_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/channel.c b/drivers/staging/brcm80211/brcmsmac/channel.c
deleted file mode 100644
index f59693e1d8a..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/channel.c
+++ /dev/null
@@ -1,1559 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <net/mac80211.h>
-
-#include <defs.h>
-#include "pub.h"
-#include "phy/phy_hal.h"
-#include "bmac.h"
-#include "main.h"
-#include "stf.h"
-#include "channel.h"
-
-#define VALID_CHANNEL20_DB(wlc, val) brcms_c_valid_channel20_db((wlc)->cmi, val)
-#define VALID_CHANNEL20_IN_BAND(wlc, bandunit, val) \
- brcms_c_valid_channel20_in_band((wlc)->cmi, bandunit, val)
-#define VALID_CHANNEL20(wlc, val) brcms_c_valid_channel20((wlc)->cmi, val)
-
-struct brcms_cm_band {
- u8 locale_flags; /* struct locale_info flags */
- chanvec_t valid_channels; /* List of valid channels in the country */
- const chanvec_t *restricted_channels; /* List of restricted use channels */
- const chanvec_t *radar_channels; /* List of radar sensitive channels */
- u8 PAD[8];
-};
-
-struct brcms_cm_info {
- struct brcms_pub *pub;
- struct brcms_c_info *wlc;
- char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
- uint srom_regrev; /* Regulatory Rev for the SROM ccode */
- const struct country_info *country; /* current country def */
- char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
- uint regrev; /* current Regulatory Revision */
- char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
- /* per-band state (one per phy/radio) */
- struct brcms_cm_band bandstate[MAXBANDS];
- /* quiet channels currently for radar sensitivity or 11h support */
- chanvec_t quiet_channels; /* channels on which we cannot transmit */
-};
-
-static int brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
- const struct country_info *country);
-static void brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, uint regrev,
- const struct country_info *country);
-static int brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm,
- const char *ccode);
-static int brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, int regrev);
-static int brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm,
- const char *ccode,
- char *mapped_ccode, uint *mapped_regrev);
-
-static const struct country_info *
-brcms_c_country_lookup_direct(const char *ccode, uint regrev);
-
-static const struct country_info *
-brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm,
- const char *ccode, char *mapped_ccode,
- uint *mapped_regrev);
-
-static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm);
-static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm);
-static bool brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm,
- chanspec_t chspec);
-static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val);
-static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit, uint val);
-static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val);
-
-static const struct country_info *
-brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode);
-
-static void brcms_c_locale_get_channels(const struct locale_info *locale,
- chanvec_t *valid_channels);
-static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx);
-static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx);
-static bool brcms_c_japan(struct brcms_c_info *wlc);
-static bool brcms_c_japan_ccode(const char *ccode);
-static void brcms_c_channel_min_txpower_limits_with_local_constraint(
- struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
- u8 local_constraint_qdbm);
-static void brcms_c_locale_add_channels(chanvec_t *target,
- const chanvec_t *channels);
-static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx);
-static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx);
-
-/* QDB() macro takes a dB value and converts to a quarter dB value */
-#ifdef QDB
-#undef QDB
-#endif
-#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
-
-/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
-
-/*
- * Some common channel sets
- */
-
-/* No channels */
-static const chanvec_t chanvec_none = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* All 2.4 GHz HW channels */
-const chanvec_t chanvec_all_2G = {
- {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* All 5 GHz HW channels */
-const chanvec_t chanvec_all_5G = {
- {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11,
- 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11,
- 0x11, 0x11, 0x11, 0x01}
-};
-
-/*
- * Radar channel sets
- */
-
-/* No radar */
-#define radar_set_none chanvec_none
-
-static const chanvec_t radar_set1 = { /* Channels 52 - 64, 100 - 140 */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
- 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
- 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/*
- * Restricted channel sets
- */
-
-#define restricted_set_none chanvec_none
-
-/* Channels 34, 38, 42, 46 */
-static const chanvec_t restricted_set_japan_legacy = {
- {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 12, 13 */
-static const chanvec_t restricted_set_2g_short = {
- {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channel 165 */
-static const chanvec_t restricted_chan_165 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 36 - 48 & 149 - 165 */
-static const chanvec_t restricted_low_hi = {
- {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 12 - 14 */
-static const chanvec_t restricted_set_12_13_14 = {
- {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-#define LOCALE_CHAN_01_11 (1<<0)
-#define LOCALE_CHAN_12_13 (1<<1)
-#define LOCALE_CHAN_14 (1<<2)
-#define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */
-#define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */
-#define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */
-#define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */
-#define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */
-#define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */
-#define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */
-#define LOCALE_SET_5G_MID3 (1<<10) /* 128 */
-#define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */
-#define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */
-#define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */
-#define LOCALE_CHAN_52_140_ALL (1<<14)
-#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
-
-#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
-#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
-#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
-#define LOCALE_CHAN_100_140 \
- (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
-#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
-#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
-
-#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13 | LOCALE_CHAN_14)
-
-#define LOCALE_RADAR_SET_NONE 0
-#define LOCALE_RADAR_SET_1 1
-
-#define LOCALE_RESTRICTED_NONE 0
-#define LOCALE_RESTRICTED_SET_2G_SHORT 1
-#define LOCALE_RESTRICTED_CHAN_165 2
-#define LOCALE_CHAN_ALL_5G 3
-#define LOCALE_RESTRICTED_JAPAN_LEGACY 4
-#define LOCALE_RESTRICTED_11D_2G 5
-#define LOCALE_RESTRICTED_11D_5G 6
-#define LOCALE_RESTRICTED_LOW_HI 7
-#define LOCALE_RESTRICTED_12_13_14 8
-
-/* global memory to provide working buffer for expanded locale */
-
-static const chanvec_t *g_table_radar_set[] = {
- &chanvec_none,
- &radar_set1
-};
-
-static const chanvec_t *g_table_restricted_chan[] = {
- &chanvec_none, /* restricted_set_none */
- &restricted_set_2g_short,
- &restricted_chan_165,
- &chanvec_all_5G,
- &restricted_set_japan_legacy,
- &chanvec_all_2G, /* restricted_set_11d_2G */
- &chanvec_all_5G, /* restricted_set_11d_5G */
- &restricted_low_hi,
- &restricted_set_12_13_14
-};
-
-static const chanvec_t locale_2g_01_11 = {
- {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_2g_12_13 = {
- {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_2g_14 = {
- {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_LOW_JP1 = {
- {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_LOW_JP2 = {
- {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_LOW1 = {
- {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_LOW2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_LOW3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_MID1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_MID2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_MID3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_HIGH1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_HIGH2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_HIGH3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_52_140_ALL = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const chanvec_t locale_5g_HIGH4 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x11, 0x11, 0x11, 0x11}
-};
-
-static const chanvec_t *g_table_locale_base[] = {
- &locale_2g_01_11,
- &locale_2g_12_13,
- &locale_2g_14,
- &locale_5g_LOW_JP1,
- &locale_5g_LOW_JP2,
- &locale_5g_LOW1,
- &locale_5g_LOW2,
- &locale_5g_LOW3,
- &locale_5g_MID1,
- &locale_5g_MID2,
- &locale_5g_MID3,
- &locale_5g_HIGH1,
- &locale_5g_HIGH2,
- &locale_5g_HIGH3,
- &locale_5g_52_140_ALL,
- &locale_5g_HIGH4
-};
-
-static void brcms_c_locale_add_channels(chanvec_t *target,
- const chanvec_t *channels)
-{
- u8 i;
- for (i = 0; i < sizeof(chanvec_t); i++) {
- target->vec[i] |= channels->vec[i];
- }
-}
-
-static void brcms_c_locale_get_channels(const struct locale_info *locale,
- chanvec_t *channels)
-{
- u8 i;
-
- memset(channels, 0, sizeof(chanvec_t));
-
- for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
- if (locale->valid_channels & (1 << i)) {
- brcms_c_locale_add_channels(channels,
- g_table_locale_base[i]);
- }
- }
-}
-
-/*
- * Locale Definitions - 2.4 GHz
- */
-static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
- LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
- LOCALE_RADAR_SET_NONE,
- LOCALE_RESTRICTED_SET_2G_SHORT,
- {QDB(19), QDB(19), QDB(19),
- QDB(19), QDB(19), QDB(19)},
- {20, 20, 20, 0},
- BRCMS_EIRP
-};
-
-/*
- * Locale Definitions - 5 GHz
- */
-static const struct locale_info locale_11 = {
- /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
- LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
- LOCALE_RADAR_SET_1,
- LOCALE_RESTRICTED_NONE,
- {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
- {23, 23, 23, 30, 30},
- BRCMS_EIRP | BRCMS_DFS_EU
-};
-
-#define LOCALE_2G_IDX_i 0
-static const struct locale_info *g_locale_2g_table[] = {
- &locale_i
-};
-
-#define LOCALE_5G_IDX_11 0
-static const struct locale_info *g_locale_5g_table[] = {
- &locale_11
-};
-
-/*
- * MIMO Locale Definitions - 2.4 GHz
- */
-static const struct locale_mimo_info locale_bn = {
- {QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
- QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
- QDB(13), QDB(13), QDB(13)},
- {0, 0, QDB(13), QDB(13), QDB(13),
- QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
- QDB(13), 0, 0},
- 0
-};
-
-/* locale mimo 2g indexes */
-#define LOCALE_MIMO_IDX_bn 0
-
-static const struct locale_mimo_info *g_mimo_2g_table[] = {
- &locale_bn
-};
-
-/*
- * MIMO Locale Definitions - 5 GHz
- */
-static const struct locale_mimo_info locale_11n = {
- { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
- {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
- 0
-};
-
-#define LOCALE_MIMO_IDX_11n 0
-static const struct locale_mimo_info *g_mimo_5g_table[] = {
- &locale_11n
-};
-
-#ifdef LC
-#undef LC
-#endif
-#define LC(id) LOCALE_MIMO_IDX_ ## id
-
-#ifdef LC_2G
-#undef LC_2G
-#endif
-#define LC_2G(id) LOCALE_2G_IDX_ ## id
-
-#ifdef LC_5G
-#undef LC_5G
-#endif
-#define LC_5G(id) LOCALE_5G_IDX_ ## id
-
-#define LOCALES(band2, band5, mimo2, mimo5) {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
-
-static const struct {
- char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */
- struct country_info country;
-} cntry_locales[] = {
- {
- "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */
-};
-
-#ifdef SUPPORT_40MHZ
-/* 20MHz channel info for 40MHz pairing support */
-struct chan20_info {
- u8 sb;
- u8 adj_sbs;
-};
-
-/* indicates adjacent channels that are allowed for a 40 Mhz channel and
- * those that permitted by the HT
- */
-struct chan20_info chan20_info[] = {
- /* 11b/11g */
-/* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 11 */ {12, (CH_LOWER_SB)},
-/* 12 */ {13, (CH_LOWER_SB)},
-/* 13 */ {14, (CH_LOWER_SB)},
-
-/* 11a japan high */
-/* 14 */ {34, (CH_UPPER_SB)},
-/* 15 */ {38, (CH_LOWER_SB)},
-/* 16 */ {42, (CH_LOWER_SB)},
-/* 17 */ {46, (CH_LOWER_SB)},
-
-/* 11a usa low */
-/* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)},
-
-/* 11a Europe */
-/* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 36 */ {140, (CH_LOWER_SB)},
-
-/* 11a usa high, ref5 only */
-/* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */
-/* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 41 */ {165, (CH_LOWER_SB)},
-
-/* 11a japan */
-/* 42 */ {184, (CH_UPPER_SB)},
-/* 43 */ {188, (CH_LOWER_SB)},
-/* 44 */ {192, (CH_UPPER_SB)},
-/* 45 */ {196, (CH_LOWER_SB)},
-/* 46 */ {200, (CH_UPPER_SB)},
-/* 47 */ {204, (CH_LOWER_SB)},
-/* 48 */ {208, (CH_UPPER_SB)},
-/* 49 */ {212, (CH_LOWER_SB)},
-/* 50 */ {216, (CH_LOWER_SB)}
-};
-#endif /* SUPPORT_40MHZ */
-
-static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_locale_2g_table)) {
- return NULL; /* error condition */
- }
- return g_locale_2g_table[locale_idx];
-}
-
-static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_locale_5g_table)) {
- return NULL; /* error condition */
- }
- return g_locale_5g_table[locale_idx];
-}
-
-static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) {
- return NULL;
- }
- return g_mimo_2g_table[locale_idx];
-}
-
-static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) {
- return NULL;
- }
- return g_mimo_5g_table[locale_idx];
-}
-
-struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
-{
- struct brcms_cm_info *wlc_cm;
- char country_abbrev[BRCM_CNTRY_BUF_SZ];
- const struct country_info *country;
- struct brcms_pub *pub = wlc->pub;
- char *ccode;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
- if (wlc_cm == NULL) {
- wiphy_err(wlc->wiphy, "wl%d: %s: out of memory", pub->unit,
- __func__);
- return NULL;
- }
- wlc_cm->pub = pub;
- wlc_cm->wlc = wlc;
- wlc->cmi = wlc_cm;
-
- /* store the country code for passing up as a regulatory hint */
- ccode = getvar(wlc->pub->vars, "ccode");
- if (ccode) {
- strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
- }
-
- /* internal country information which must match regulatory constraints in firmware */
- memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
- strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
- country = brcms_c_country_lookup(wlc, country_abbrev);
-
- /* save default country for exiting 11d regulatory mode */
- strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
-
- /* initialize autocountry_default to driver default */
- strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1);
-
- brcms_c_set_countrycode(wlc_cm, country_abbrev);
-
- return wlc_cm;
-}
-
-void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
-{
- kfree(wlc_cm);
-}
-
-u8
-brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit)
-{
- return wlc_cm->bandstate[bandunit].locale_flags;
-}
-
-/* set the driver's current country and regulatory information using a country code
- * as the source. Lookup built in country information found with the country code.
- */
-static int
-brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
-{
- char country_abbrev[BRCM_CNTRY_BUF_SZ];
- strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
- return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
-}
-
-static int
-brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, int regrev)
-{
- const struct country_info *country;
- char mapped_ccode[BRCM_CNTRY_BUF_SZ];
- uint mapped_regrev;
-
- /* if regrev is -1, lookup the mapped country code,
- * otherwise use the ccode and regrev directly
- */
- if (regrev == -1) {
- /* map the country code to a built-in country code, regrev, and country_info */
- country =
- brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
- &mapped_regrev);
- } else {
- /* find the matching built-in country definition */
- country = brcms_c_country_lookup_direct(ccode, regrev);
- strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
- mapped_regrev = regrev;
- }
-
- if (country == NULL)
- return -EINVAL;
-
- /* set the driver state for the country */
- brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
- mapped_regrev, country);
-
- return 0;
-}
-
-/* set the driver's current country and regulatory information using a country code
- * as the source. Look up built in country information found with the country code.
- */
-static void
-brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, uint regrev,
- const struct country_info *country)
-{
- const struct locale_mimo_info *li_mimo;
- const struct locale_info *locale;
- struct brcms_c_info *wlc = wlc_cm->wlc;
- char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
-
- /* save current country state */
- wlc_cm->country = country;
-
- memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
- strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
- BRCM_CNTRY_BUF_SZ - 1);
-
- strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
- strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
- wlc_cm->regrev = regrev;
-
- /* disable/restore nmode based on country regulations */
- li_mimo = brcms_c_get_mimo_2g(country->locale_mimo_2G);
- if (li_mimo && (li_mimo->flags & BRCMS_NO_MIMO)) {
- brcms_c_set_nmode(wlc, OFF);
- wlc->stf->no_cddstbc = true;
- } else {
- wlc->stf->no_cddstbc = false;
- if (N_ENAB(wlc->pub) != wlc->protection->nmode_user)
- brcms_c_set_nmode(wlc, wlc->protection->nmode_user);
- }
-
- brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
- brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
- /* set or restore gmode as required by regulatory */
- locale = brcms_c_get_locale_2g(country->locale_2G);
- if (locale && (locale->flags & BRCMS_NO_OFDM)) {
- brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
- } else {
- brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
- }
-
- brcms_c_channels_init(wlc_cm, country);
-
- return;
-}
-
-/* Lookup a country info structure from a null terminated country code
- * The lookup is case sensitive.
- */
-static const struct country_info *
-brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
-{
- const struct country_info *country;
- char mapped_ccode[BRCM_CNTRY_BUF_SZ];
- uint mapped_regrev;
-
- /* map the country code to a built-in country code, regrev, and country_info struct */
- country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
- &mapped_regrev);
-
- return country;
-}
-
-static const struct country_info *
-brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- const struct country_info *country;
- uint srom_regrev = wlc_cm->srom_regrev;
- const char *srom_ccode = wlc_cm->srom_ccode;
- int mapped;
-
- /* check for currently supported ccode size */
- if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
- "match\n", wlc->pub->unit, __func__, ccode);
- return NULL;
- }
-
- /* default mapping is the given ccode and regrev 0 */
- strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
- *mapped_regrev = 0;
-
- /* If the desired country code matches the srom country code,
- * then the mapped country is the srom regulatory rev.
- * Otherwise look for an aggregate mapping.
- */
- if (!strcmp(srom_ccode, ccode)) {
- *mapped_regrev = srom_regrev;
- mapped = 0;
- wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
- } else {
- mapped =
- brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
- mapped_regrev);
- }
-
- /* find the matching built-in country definition */
- country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
-
- /* if there is not an exact rev match, default to rev zero */
- if (country == NULL && *mapped_regrev != 0) {
- *mapped_regrev = 0;
- country =
- brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
- }
-
- return country;
-}
-
-static int
-brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev)
-{
- return false;
-}
-
-/* Lookup a country info structure from a null terminated country
- * abbreviation and regrev directly with no translation.
- */
-static const struct country_info *
-brcms_c_country_lookup_direct(const char *ccode, uint regrev)
-{
- uint size, i;
-
- /* Should just return 0 for single locale driver. */
- /* Keep it this way in case we add more locales. (for now anyway) */
-
- /* all other country def arrays are for regrev == 0, so if regrev is non-zero, fail */
- if (regrev > 0)
- return NULL;
-
- /* find matched table entry from country code */
- size = ARRAY_SIZE(cntry_locales);
- for (i = 0; i < size; i++) {
- if (strcmp(ccode, cntry_locales[i].abbrev) == 0) {
- return &cntry_locales[i].country;
- }
- }
- return NULL;
-}
-
-static int
-brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
- const struct country_info *country)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i, j;
- struct brcms_band *band;
- const struct locale_info *li;
- chanvec_t sup_chan;
- const struct locale_mimo_info *li_mimo;
-
- band = wlc->band;
- for (i = 0; i < NBANDS(wlc);
- i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
-
- li = BAND_5G(band->bandtype) ?
- brcms_c_get_locale_5g(country->locale_5G) :
- brcms_c_get_locale_2g(country->locale_2G);
- wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
- li_mimo = BAND_5G(band->bandtype) ?
- brcms_c_get_mimo_5g(country->locale_mimo_5G) :
- brcms_c_get_mimo_2g(country->locale_mimo_2G);
-
- /* merge the mimo non-mimo locale flags */
- wlc_cm->bandstate[band->bandunit].locale_flags |=
- li_mimo->flags;
-
- wlc_cm->bandstate[band->bandunit].restricted_channels =
- g_table_restricted_chan[li->restricted_channels];
- wlc_cm->bandstate[band->bandunit].radar_channels =
- g_table_radar_set[li->radar_channels];
-
- /* set the channel availability,
- * masking out the channels that may not be supported on this phy
- */
- wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
- &sup_chan);
- brcms_c_locale_get_channels(li,
- &wlc_cm->bandstate[band->bandunit].
- valid_channels);
- for (j = 0; j < sizeof(chanvec_t); j++)
- wlc_cm->bandstate[band->bandunit].valid_channels.
- vec[j] &= sup_chan.vec[j];
- }
-
- brcms_c_quiet_channels_reset(wlc_cm);
- brcms_c_channels_commit(wlc_cm);
-
- return 0;
-}
-
-/* Update the radio state (enable/disable) and tx power targets
- * based on a new set of channel/regulatory information
- */
-static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint chan;
- struct txpwr_limits txpwr;
-
- /* search for the existence of any valid channel */
- for (chan = 0; chan < MAXCHANNEL; chan++) {
- if (VALID_CHANNEL20_DB(wlc, chan)) {
- break;
- }
- }
- if (chan == MAXCHANNEL)
- chan = INVCHANNEL;
-
- /* based on the channel search above, set or clear WL_RADIO_COUNTRY_DISABLE */
- if (chan == INVCHANNEL) {
- /* country/locale with no valid channels, set the radio disable bit */
- mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
- "nbands %d bandlocked %d\n", wlc->pub->unit,
- __func__, wlc_cm->country_abbrev, NBANDS(wlc),
- wlc->bandlocked);
- } else
- if (mboolisset(wlc->pub->radio_disabled,
- WL_RADIO_COUNTRY_DISABLE)) {
- /* country/locale with valid channel, clear the radio disable bit */
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- }
-
- /* Now that the country abbreviation is set, if the radio supports 2G, then
- * set channel 14 restrictions based on the new locale.
- */
- if (NBANDS(wlc) > 1 || BAND_2G(wlc->band->bandtype)) {
- wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
- brcms_c_japan(wlc) ? true :
- false);
- }
-
- if (wlc->pub->up && chan != INVCHANNEL) {
- brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
- brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
- &txpwr, BRCMS_TXPWR_MAX);
- wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
- }
-}
-
-/* reset the quiet channels vector to the union of the restricted and radar channel sets */
-static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i, j;
- struct brcms_band *band;
- const chanvec_t *chanvec;
-
- memset(&wlc_cm->quiet_channels, 0, sizeof(chanvec_t));
-
- band = wlc->band;
- for (i = 0; i < NBANDS(wlc);
- i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
-
- /* initialize quiet channels for restricted channels */
- chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels;
- for (j = 0; j < sizeof(chanvec_t); j++)
- wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j];
-
- }
-}
-
-static bool
-brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, chanspec_t chspec)
-{
- return N_ENAB(wlc_cm->wlc->pub) && CHSPEC_IS40(chspec) ?
- (isset
- (wlc_cm->quiet_channels.vec,
- LOWER_20_SB(CHSPEC_CHANNEL(chspec)))
- || isset(wlc_cm->quiet_channels.vec,
- UPPER_20_SB(CHSPEC_CHANNEL(chspec)))) : isset(wlc_cm->
- quiet_channels.
- vec,
- CHSPEC_CHANNEL
- (chspec));
-}
-
-/* Is the channel valid for the current locale? (but don't consider channels not
- * available due to bandlocking)
- */
-static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
-
- return VALID_CHANNEL20(wlc, val) ||
- (!wlc->bandlocked
- && VALID_CHANNEL20_IN_BAND(wlc, OTHERBANDUNIT(wlc), val));
-}
-
-/* Is the channel valid for the current locale and specified band? */
-static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit, uint val)
-{
- return ((val < MAXCHANNEL)
- && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
-}
-
-/* Is the channel valid for the current locale and current band? */
-static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
-
- return ((val < MAXCHANNEL) &&
- isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
- val));
-}
-
-static void
-brcms_c_channel_min_txpower_limits_with_local_constraint(
- struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
- u8 local_constraint_qdbm)
-{
- int j;
-
- /* CCK Rates */
- for (j = 0; j < WL_TX_POWER_CCK_NUM; j++) {
- txpwr->cck[j] = min(txpwr->cck[j], local_constraint_qdbm);
- }
-
- /* 20 MHz Legacy OFDM SISO */
- for (j = 0; j < WL_TX_POWER_OFDM_NUM; j++) {
- txpwr->ofdm[j] = min(txpwr->ofdm[j], local_constraint_qdbm);
- }
-
- /* 20 MHz Legacy OFDM CDD */
- for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) {
- txpwr->ofdm_cdd[j] =
- min(txpwr->ofdm_cdd[j], local_constraint_qdbm);
- }
-
- /* 40 MHz Legacy OFDM SISO */
- for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) {
- txpwr->ofdm_40_siso[j] =
- min(txpwr->ofdm_40_siso[j], local_constraint_qdbm);
- }
-
- /* 40 MHz Legacy OFDM CDD */
- for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) {
- txpwr->ofdm_40_cdd[j] =
- min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm);
- }
-
- /* 20MHz MCS 0-7 SISO */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
- txpwr->mcs_20_siso[j] =
- min(txpwr->mcs_20_siso[j], local_constraint_qdbm);
- }
-
- /* 20MHz MCS 0-7 CDD */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
- txpwr->mcs_20_cdd[j] =
- min(txpwr->mcs_20_cdd[j], local_constraint_qdbm);
- }
-
- /* 20MHz MCS 0-7 STBC */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
- txpwr->mcs_20_stbc[j] =
- min(txpwr->mcs_20_stbc[j], local_constraint_qdbm);
- }
-
- /* 20MHz MCS 8-15 MIMO */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
- txpwr->mcs_20_mimo[j] =
- min(txpwr->mcs_20_mimo[j], local_constraint_qdbm);
-
- /* 40MHz MCS 0-7 SISO */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
- txpwr->mcs_40_siso[j] =
- min(txpwr->mcs_40_siso[j], local_constraint_qdbm);
- }
-
- /* 40MHz MCS 0-7 CDD */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
- txpwr->mcs_40_cdd[j] =
- min(txpwr->mcs_40_cdd[j], local_constraint_qdbm);
- }
-
- /* 40MHz MCS 0-7 STBC */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
- txpwr->mcs_40_stbc[j] =
- min(txpwr->mcs_40_stbc[j], local_constraint_qdbm);
- }
-
- /* 40MHz MCS 8-15 MIMO */
- for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
- txpwr->mcs_40_mimo[j] =
- min(txpwr->mcs_40_mimo[j], local_constraint_qdbm);
-
- /* 40MHz MCS 32 */
- txpwr->mcs32 = min(txpwr->mcs32, local_constraint_qdbm);
-
-}
-
-void
-brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, chanspec_t chanspec,
- u8 local_constraint_qdbm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- struct txpwr_limits txpwr;
-
- brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
-
- brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm, &txpwr,
- local_constraint_qdbm);
-
- brcms_b_set_chanspec(wlc->hw, chanspec,
- (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0),
- &txpwr);
-}
-
-#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
-{
- int i;
- char buf[80];
- char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
-
- sprintf(buf, "CCK ");
- for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- printk(KERN_DEBUG "MCS32 %2d%s\n",
- txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
-}
-#endif /* POWER_DBG */
-
-void
-brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, chanspec_t chanspec,
- struct txpwr_limits *txpwr)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i;
- uint chan;
- int maxpwr;
- int delta;
- const struct country_info *country;
- struct brcms_band *band;
- const struct locale_info *li;
- int conducted_max;
- int conducted_ofdm_max;
- const struct locale_mimo_info *li_mimo;
- int maxpwr20, maxpwr40;
- int maxpwr_idx;
- uint j;
-
- memset(txpwr, 0, sizeof(struct txpwr_limits));
-
- if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) {
- country = brcms_c_country_lookup(wlc, wlc->autocountry_default);
- if (country == NULL)
- return;
- } else {
- country = wlc_cm->country;
- }
-
- chan = CHSPEC_CHANNEL(chanspec);
- band = wlc->bandstate[CHSPEC_BANDUNIT(chanspec)];
- li = BAND_5G(band->bandtype) ?
- brcms_c_get_locale_5g(country->locale_5G) :
- brcms_c_get_locale_2g(country->locale_2G);
-
- li_mimo = BAND_5G(band->bandtype) ?
- brcms_c_get_mimo_5g(country->locale_mimo_5G) :
- brcms_c_get_mimo_2g(country->locale_mimo_2G);
-
- if (li->flags & BRCMS_EIRP) {
- delta = band->antgain;
- } else {
- delta = 0;
- if (band->antgain > QDB(6))
- delta = band->antgain - QDB(6); /* Excess over 6 dB */
- }
-
- if (li == &locale_i) {
- conducted_max = QDB(22);
- conducted_ofdm_max = QDB(22);
- }
-
- /* CCK txpwr limits for 2.4G band */
- if (BAND_2G(band->bandtype)) {
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)];
-
- maxpwr = maxpwr - delta;
- maxpwr = max(maxpwr, 0);
- maxpwr = min(maxpwr, conducted_max);
-
- for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
- txpwr->cck[i] = (u8) maxpwr;
- }
-
- /* OFDM txpwr limits for 2.4G or 5G bands */
- if (BAND_2G(band->bandtype)) {
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)];
-
- } else {
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)];
- }
-
- maxpwr = maxpwr - delta;
- maxpwr = max(maxpwr, 0);
- maxpwr = min(maxpwr, conducted_ofdm_max);
-
- /* Keep OFDM lmit below CCK limit */
- if (BAND_2G(band->bandtype))
- maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
-
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- txpwr->ofdm[i] = (u8) maxpwr;
-
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
- /* OFDM 40 MHz SISO has the same power as the corresponding MCS0-7 rate unless
- * overriden by the locale specific code. We set this value to 0 as a
- * flag (presumably 0 dBm isn't a possibility) and then copy the MCS0-7 value
- * to the 40 MHz value if it wasn't explicitly set.
- */
- txpwr->ofdm_40_siso[i] = 0;
-
- txpwr->ofdm_cdd[i] = (u8) maxpwr;
-
- txpwr->ofdm_40_cdd[i] = 0;
- }
-
- /* MIMO/HT specific limits */
- if (li_mimo->flags & BRCMS_EIRP) {
- delta = band->antgain;
- } else {
- delta = 0;
- if (band->antgain > QDB(6))
- delta = band->antgain - QDB(6); /* Excess over 6 dB */
- }
-
- if (BAND_2G(band->bandtype))
- maxpwr_idx = (chan - 1);
- else
- maxpwr_idx = CHANNEL_POWER_IDX_5G(chan);
-
- maxpwr20 = li_mimo->maxpwr20[maxpwr_idx];
- maxpwr40 = li_mimo->maxpwr40[maxpwr_idx];
-
- maxpwr20 = maxpwr20 - delta;
- maxpwr20 = max(maxpwr20, 0);
- maxpwr40 = maxpwr40 - delta;
- maxpwr40 = max(maxpwr40, 0);
-
- /* Fill in the MCS 0-7 (SISO) rates */
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
-
- /* 20 MHz has the same power as the corresponding OFDM rate unless
- * overriden by the locale specific code.
- */
- txpwr->mcs_20_siso[i] = txpwr->ofdm[i];
- txpwr->mcs_40_siso[i] = 0;
- }
-
- /* Fill in the MCS 0-7 CDD rates */
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- txpwr->mcs_20_cdd[i] = (u8) maxpwr20;
- txpwr->mcs_40_cdd[i] = (u8) maxpwr40;
- }
-
- /* These locales have SISO expressed in the table and override CDD later */
- if (li_mimo == &locale_bn) {
- if (li_mimo == &locale_bn) {
- maxpwr20 = QDB(16);
- maxpwr40 = 0;
-
- if (chan >= 3 && chan <= 11) {
- maxpwr40 = QDB(16);
- }
- }
-
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- txpwr->mcs_20_siso[i] = (u8) maxpwr20;
- txpwr->mcs_40_siso[i] = (u8) maxpwr40;
- }
- }
-
- /* Fill in the MCS 0-7 STBC rates */
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- txpwr->mcs_20_stbc[i] = 0;
- txpwr->mcs_40_stbc[i] = 0;
- }
-
- /* Fill in the MCS 8-15 SDM rates */
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
- txpwr->mcs_20_mimo[i] = (u8) maxpwr20;
- txpwr->mcs_40_mimo[i] = (u8) maxpwr40;
- }
-
- /* Fill in MCS32 */
- txpwr->mcs32 = (u8) maxpwr40;
-
- for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
- if (txpwr->ofdm_40_cdd[i] == 0)
- txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
- if (i == 0) {
- i = i + 1;
- if (txpwr->ofdm_40_cdd[i] == 0)
- txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
- }
- }
-
- /* Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO value if it wasn't
- * provided explicitly.
- */
-
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- if (txpwr->mcs_40_siso[i] == 0)
- txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i];
- }
-
- for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
- if (txpwr->ofdm_40_siso[i] == 0)
- txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
- if (i == 0) {
- i = i + 1;
- if (txpwr->ofdm_40_siso[i] == 0)
- txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
- }
- }
-
- /* Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding STBC values if they weren't
- * provided explicitly.
- */
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
- if (txpwr->mcs_20_stbc[i] == 0)
- txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i];
-
- if (txpwr->mcs_40_stbc[i] == 0)
- txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
- }
-
-#ifdef POWER_DBG
- wlc_phy_txpower_limits_dump(txpwr);
-#endif
- return;
-}
-
-/* Returns true if currently set country is Japan or variant */
-static bool brcms_c_japan(struct brcms_c_info *wlc)
-{
- return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
-}
-
-/* JP, J1 - J10 are Japan ccodes */
-static bool brcms_c_japan_ccode(const char *ccode)
-{
- return (ccode[0] == 'J' &&
- (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
-}
-
-/*
- * Validate the chanspec for this locale, for 40MHZ we need to also check that the sidebands
- * are valid 20MZH channels in this locale and they are also a legal HT combination
- */
-static bool
-brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, chanspec_t chspec,
- bool dualband)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- u8 channel = CHSPEC_CHANNEL(chspec);
-
- /* check the chanspec */
- if (brcmu_chspec_malformed(chspec)) {
- wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n",
- wlc->pub->unit, chspec);
- return false;
- }
-
- if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) !=
- CHSPEC_BANDUNIT(chspec))
- return false;
-
- /* Check a 20Mhz channel */
- if (CHSPEC_IS20(chspec)) {
- if (dualband)
- return VALID_CHANNEL20_DB(wlc_cm->wlc, channel);
- else
- return VALID_CHANNEL20(wlc_cm->wlc, channel);
- }
-#ifdef SUPPORT_40MHZ
- /* We know we are now checking a 40MHZ channel, so we should only be here
- * for NPHYS
- */
- if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) {
- u8 upper_sideband = 0, idx;
- u8 num_ch20_entries =
- sizeof(chan20_info) / sizeof(struct chan20_info);
-
- if (!VALID_40CHANSPEC_IN_BAND(wlc, CHSPEC_BANDUNIT(chspec)))
- return false;
-
- if (dualband) {
- if (!VALID_CHANNEL20_DB(wlc, LOWER_20_SB(channel)) ||
- !VALID_CHANNEL20_DB(wlc, UPPER_20_SB(channel)))
- return false;
- } else {
- if (!VALID_CHANNEL20(wlc, LOWER_20_SB(channel)) ||
- !VALID_CHANNEL20(wlc, UPPER_20_SB(channel)))
- return false;
- }
-
- /* find the lower sideband info in the sideband array */
- for (idx = 0; idx < num_ch20_entries; idx++) {
- if (chan20_info[idx].sb == LOWER_20_SB(channel))
- upper_sideband = chan20_info[idx].adj_sbs;
- }
- /* check that the lower sideband allows an upper sideband */
- if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) ==
- (CH_UPPER_SB | CH_EWA_VALID))
- return true;
- return false;
- }
-#endif /* 40 MHZ */
-
- return false;
-}
-
-bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, chanspec_t chspec)
-{
- return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/channel.h b/drivers/staging/brcm80211/brcmsmac/channel.h
deleted file mode 100644
index d22f2f5f592..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/channel.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_CHANNEL_H_
-#define _BRCM_CHANNEL_H_
-
-/* conversion for phy txpwr calculations that use .25 dB units */
-#define BRCMS_TXPWR_DB_FACTOR 4
-
-
-/* maxpwr mapping to 5GHz band channels:
- * maxpwr[0] - channels [34-48]
- * maxpwr[1] - channels [52-60]
- * maxpwr[2] - channels [62-64]
- * maxpwr[3] - channels [100-140]
- * maxpwr[4] - channels [149-165]
- */
-#define BAND_5G_PWR_LVLS 5 /* 5 power levels for 5G */
-
-/* power level in group of 2.4GHz band channels:
- * maxpwr[0] - CCK channels [1]
- * maxpwr[1] - CCK channels [2-10]
- * maxpwr[2] - CCK channels [11-14]
- * maxpwr[3] - OFDM channels [1]
- * maxpwr[4] - OFDM channels [2-10]
- * maxpwr[5] - OFDM channels [11-14]
- */
-
-/* macro to get 2.4 GHz channel group index for tx power */
-#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2)) /* cck index */
-#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5)) /* ofdm index */
-
-/* macro to get 5 GHz channel group index for tx power */
-#define CHANNEL_POWER_IDX_5G(c) \
- (((c) < 52) ? 0 : (((c) < 62) ? 1 : (((c) < 100) ? 2 : (((c) < 149) ? 3 : 4))))
-
-/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
-#define BRCMS_MAXPWR_TBL_SIZE 6
-/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
-#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
-
-#define NBANDS(wlc) ((wlc)->pub->_nbands)
-#define NBANDS_PUB(pub) ((pub)->_nbands)
-#define NBANDS_HW(hw) ((hw)->_nbands)
-
-#define IS_SINGLEBAND_5G(device) 0
-
-/* locale channel and power info. */
-struct locale_info {
- u32 valid_channels;
- /* List of radar sensitive channels */
- u8 radar_channels;
- /* List of channels used only if APs are detected */
- u8 restricted_channels;
- /* Max tx pwr in qdBm for each sub-band */
- s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
- s8 pub_maxpwr[BAND_5G_PWR_LVLS]; /* Country IE advertised max tx pwr in dBm
- * per sub-band
- */
- u8 flags;
-};
-
-/* bits for locale_info flags */
-#define BRCMS_PEAK_CONDUCTED 0x00 /* Peak for locals */
-#define BRCMS_EIRP 0x01 /* Flag for EIRP */
-#define BRCMS_DFS_TPC 0x02 /* Flag for DFS TPC */
-#define BRCMS_NO_OFDM 0x04 /* Flag for No OFDM */
-#define BRCMS_NO_40MHZ 0x08 /* Flag for No MIMO 40MHz */
-#define BRCMS_NO_MIMO 0x10 /* Flag for No MIMO, 20 or 40 MHz */
-#define BRCMS_RADAR_TYPE_EU 0x20 /* Flag for EU */
-#define BRCMS_DFS_FCC BRCMS_DFS_TPC /* Flag for DFS FCC */
-#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
-
-#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU)
-
-/* locale per-channel tx power limits for MIMO frames
- * maxpwr arrays are index by channel for 2.4 GHz limits, and
- * by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel)
- */
-struct locale_mimo_info {
- /* tx 20 MHz power limits, qdBm units */
- s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
- /* tx 40 MHz power limits, qdBm units */
- s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
- u8 flags;
-};
-
-extern const chanvec_t chanvec_all_2G;
-extern const chanvec_t chanvec_all_5G;
-
-/*
- * Country names and abbreviations with locale defined from ISO 3166
- */
-struct country_info {
- const u8 locale_2G; /* 2.4G band locale */
- const u8 locale_5G; /* 5G band locale */
- const u8 locale_mimo_2G; /* 2.4G mimo info */
- const u8 locale_mimo_5G; /* 5G mimo info */
-};
-
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
-
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-
-extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit);
-
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
- chanspec_t chspec);
-
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
- chanspec_t chanspec,
- struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
- chanspec_t chanspec,
- u8 local_constraint_qdbm);
-
-#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/staging/brcm80211/brcmsmac/d11.h b/drivers/staging/brcm80211/brcmsmac/d11.h
deleted file mode 100644
index e7ff0e6f28e..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/d11.h
+++ /dev/null
@@ -1,1775 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_D11_H_
-#define _BRCM_D11_H_
-
-#include <linux/ieee80211.h>
-
-#include <defs.h>
-#include "pub.h"
-#include "dma.h"
-
-#define BCN_TMPL_LEN 512 /* length of the BCN template area */
-
-/* RX FIFO numbers */
-#define RX_FIFO 0 /* data and ctl frames */
-#define RX_TXSTATUS_FIFO 3 /* RX fifo for tx status packages */
-
-/* TX FIFO numbers using WME Access Classes */
-#define TX_AC_BK_FIFO 0 /* Access Category Background TX FIFO */
-#define TX_AC_BE_FIFO 1 /* Access Category Best-Effort TX FIFO */
-#define TX_AC_VI_FIFO 2 /* Access Class Video TX FIFO */
-#define TX_AC_VO_FIFO 3 /* Access Class Voice TX FIFO */
-#define TX_BCMC_FIFO 4 /* Broadcast/Multicast TX FIFO */
-#define TX_ATIM_FIFO 5 /* TX fifo for ATIM window info */
-
-/* Addr is byte address used by SW; offset is word offset used by uCode */
-
-/* Per AC TX limit settings */
-#define M_AC_TXLMT_BASE_ADDR (0x180 * 2)
-#define M_AC_TXLMT_ADDR(_ac) (M_AC_TXLMT_BASE_ADDR + (2 * (_ac)))
-
-/* Legacy TX FIFO numbers */
-#define TX_DATA_FIFO TX_AC_BE_FIFO
-#define TX_CTL_FIFO TX_AC_VO_FIFO
-
-#ifndef WL_RSSI_ANT_MAX
-#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
-#elif WL_RSSI_ANT_MAX != 4
-#error "WL_RSSI_ANT_MAX does not match"
-#endif
-
-struct intctrlregs {
- u32 intstatus;
- u32 intmask;
-};
-
-/* PIO structure,
- * support two PIO format: 2 bytes access and 4 bytes access
- * basic FIFO register set is per channel(transmit or receive)
- * a pair of channels is defined for convenience
- */
-/* 2byte-wide pio register set per channel(xmt or rcv) */
-struct pio2regs {
- u16 fifocontrol;
- u16 fifodata;
- u16 fifofree; /* only valid in xmt channel, not in rcv channel */
- u16 PAD;
-};
-
-/* a pair of pio channels(tx and rx) */
-struct pio2regp {
- pio2regs_t tx;
- pio2regs_t rx;
-};
-
-/* 4byte-wide pio register set per channel(xmt or rcv) */
-struct pio4regs {
- u32 fifocontrol;
- u32 fifodata;
-};
-
-/* a pair of pio channels(tx and rx) */
-struct pio4regp {
- pio4regs_t tx;
- pio4regs_t rx;
-};
-
-/* read: 32-bit register that can be read as 32-bit or as 2 16-bit
- * write: only low 16b-it half can be written
- */
-union pmqreg {
- u32 pmqhostdata; /* read only! */
- struct {
- u16 pmqctrlstatus; /* read/write */
- u16 PAD;
- } w;
-};
-
-struct fifo64 {
- dma64regs_t dmaxmt; /* dma tx */
- pio4regs_t piotx; /* pio tx */
- dma64regs_t dmarcv; /* dma rx */
- pio4regs_t piorx; /* pio rx */
-};
-
-/*
- * Host Interface Registers
- */
-struct d11regs {
- /* Device Control ("semi-standard host registers") */
- u32 PAD[3]; /* 0x0 - 0x8 */
- u32 biststatus; /* 0xC */
- u32 biststatus2; /* 0x10 */
- u32 PAD; /* 0x14 */
- u32 gptimer; /* 0x18 */
- u32 usectimer; /* 0x1c *//* for corerev >= 26 */
-
- /* Interrupt Control *//* 0x20 */
- intctrlregs_t intctrlregs[8];
-
- u32 PAD[40]; /* 0x60 - 0xFC */
-
- u32 intrcvlazy[4]; /* 0x100 - 0x10C */
-
- u32 PAD[4]; /* 0x110 - 0x11c */
-
- u32 maccontrol; /* 0x120 */
- u32 maccommand; /* 0x124 */
- u32 macintstatus; /* 0x128 */
- u32 macintmask; /* 0x12C */
-
- /* Transmit Template Access */
- u32 tplatewrptr; /* 0x130 */
- u32 tplatewrdata; /* 0x134 */
- u32 PAD[2]; /* 0x138 - 0x13C */
-
- /* PMQ registers */
- pmqreg_t pmqreg; /* 0x140 */
- u32 pmqpatl; /* 0x144 */
- u32 pmqpath; /* 0x148 */
- u32 PAD; /* 0x14C */
-
- u32 chnstatus; /* 0x150 */
- u32 psmdebug; /* 0x154 */
- u32 phydebug; /* 0x158 */
- u32 machwcap; /* 0x15C */
-
- /* Extended Internal Objects */
- u32 objaddr; /* 0x160 */
- u32 objdata; /* 0x164 */
- u32 PAD[2]; /* 0x168 - 0x16c */
-
- u32 frmtxstatus; /* 0x170 */
- u32 frmtxstatus2; /* 0x174 */
- u32 PAD[2]; /* 0x178 - 0x17c */
-
- /* TSF host access */
- u32 tsf_timerlow; /* 0x180 */
- u32 tsf_timerhigh; /* 0x184 */
- u32 tsf_cfprep; /* 0x188 */
- u32 tsf_cfpstart; /* 0x18c */
- u32 tsf_cfpmaxdur32; /* 0x190 */
- u32 PAD[3]; /* 0x194 - 0x19c */
-
- u32 maccontrol1; /* 0x1a0 */
- u32 machwcap1; /* 0x1a4 */
- u32 PAD[14]; /* 0x1a8 - 0x1dc */
-
- /* Clock control and hardware workarounds*/
- u32 clk_ctl_st; /* 0x1e0 */
- u32 hw_war;
- u32 d11_phypllctl; /* the phypll request/avail bits are
- * moved to clk_ctl_st
- */
- u32 PAD[5]; /* 0x1ec - 0x1fc */
-
- /* 0x200-0x37F dma/pio registers */
- fifo64_t fifo64regs[6];
-
- /* FIFO diagnostic port access */
- dma32diag_t dmafifo; /* 0x380 - 0x38C */
-
- u32 aggfifocnt; /* 0x390 */
- u32 aggfifodata; /* 0x394 */
- u32 PAD[16]; /* 0x398 - 0x3d4 */
- u16 radioregaddr; /* 0x3d8 */
- u16 radioregdata; /* 0x3da */
-
- /*
- * time delay between the change on rf disable input and
- * radio shutdown
- */
- u32 rfdisabledly; /* 0x3DC */
-
- /* PHY register access */
- u16 phyversion; /* 0x3e0 - 0x0 */
- u16 phybbconfig; /* 0x3e2 - 0x1 */
- u16 phyadcbias; /* 0x3e4 - 0x2 Bphy only */
- u16 phyanacore; /* 0x3e6 - 0x3 pwwrdwn on aphy */
- u16 phyrxstatus0; /* 0x3e8 - 0x4 */
- u16 phyrxstatus1; /* 0x3ea - 0x5 */
- u16 phycrsth; /* 0x3ec - 0x6 */
- u16 phytxerror; /* 0x3ee - 0x7 */
- u16 phychannel; /* 0x3f0 - 0x8 */
- u16 PAD[1]; /* 0x3f2 - 0x9 */
- u16 phytest; /* 0x3f4 - 0xa */
- u16 phy4waddr; /* 0x3f6 - 0xb */
- u16 phy4wdatahi; /* 0x3f8 - 0xc */
- u16 phy4wdatalo; /* 0x3fa - 0xd */
- u16 phyregaddr; /* 0x3fc - 0xe */
- u16 phyregdata; /* 0x3fe - 0xf */
-
- /* IHR *//* 0x400 - 0x7FE */
-
- /* RXE Block */
- u16 PAD[3]; /* 0x400 - 0x406 */
- u16 rcv_fifo_ctl; /* 0x406 */
- u16 PAD; /* 0x408 - 0x40a */
- u16 rcv_frm_cnt; /* 0x40a */
- u16 PAD[4]; /* 0x40a - 0x414 */
- u16 rssi; /* 0x414 */
- u16 PAD[5]; /* 0x414 - 0x420 */
- u16 rcm_ctl; /* 0x420 */
- u16 rcm_mat_data; /* 0x422 */
- u16 rcm_mat_mask; /* 0x424 */
- u16 rcm_mat_dly; /* 0x426 */
- u16 rcm_cond_mask_l; /* 0x428 */
- u16 rcm_cond_mask_h; /* 0x42A */
- u16 rcm_cond_dly; /* 0x42C */
- u16 PAD[1]; /* 0x42E */
- u16 ext_ihr_addr; /* 0x430 */
- u16 ext_ihr_data; /* 0x432 */
- u16 rxe_phyrs_2; /* 0x434 */
- u16 rxe_phyrs_3; /* 0x436 */
- u16 phy_mode; /* 0x438 */
- u16 rcmta_ctl; /* 0x43a */
- u16 rcmta_size; /* 0x43c */
- u16 rcmta_addr0; /* 0x43e */
- u16 rcmta_addr1; /* 0x440 */
- u16 rcmta_addr2; /* 0x442 */
- u16 PAD[30]; /* 0x444 - 0x480 */
-
- /* PSM Block *//* 0x480 - 0x500 */
-
- u16 PAD; /* 0x480 */
- u16 psm_maccontrol_h; /* 0x482 */
- u16 psm_macintstatus_l; /* 0x484 */
- u16 psm_macintstatus_h; /* 0x486 */
- u16 psm_macintmask_l; /* 0x488 */
- u16 psm_macintmask_h; /* 0x48A */
- u16 PAD; /* 0x48C */
- u16 psm_maccommand; /* 0x48E */
- u16 psm_brc; /* 0x490 */
- u16 psm_phy_hdr_param; /* 0x492 */
- u16 psm_postcard; /* 0x494 */
- u16 psm_pcard_loc_l; /* 0x496 */
- u16 psm_pcard_loc_h; /* 0x498 */
- u16 psm_gpio_in; /* 0x49A */
- u16 psm_gpio_out; /* 0x49C */
- u16 psm_gpio_oe; /* 0x49E */
-
- u16 psm_bred_0; /* 0x4A0 */
- u16 psm_bred_1; /* 0x4A2 */
- u16 psm_bred_2; /* 0x4A4 */
- u16 psm_bred_3; /* 0x4A6 */
- u16 psm_brcl_0; /* 0x4A8 */
- u16 psm_brcl_1; /* 0x4AA */
- u16 psm_brcl_2; /* 0x4AC */
- u16 psm_brcl_3; /* 0x4AE */
- u16 psm_brpo_0; /* 0x4B0 */
- u16 psm_brpo_1; /* 0x4B2 */
- u16 psm_brpo_2; /* 0x4B4 */
- u16 psm_brpo_3; /* 0x4B6 */
- u16 psm_brwk_0; /* 0x4B8 */
- u16 psm_brwk_1; /* 0x4BA */
- u16 psm_brwk_2; /* 0x4BC */
- u16 psm_brwk_3; /* 0x4BE */
-
- u16 psm_base_0; /* 0x4C0 */
- u16 psm_base_1; /* 0x4C2 */
- u16 psm_base_2; /* 0x4C4 */
- u16 psm_base_3; /* 0x4C6 */
- u16 psm_base_4; /* 0x4C8 */
- u16 psm_base_5; /* 0x4CA */
- u16 psm_base_6; /* 0x4CC */
- u16 psm_pc_reg_0; /* 0x4CE */
- u16 psm_pc_reg_1; /* 0x4D0 */
- u16 psm_pc_reg_2; /* 0x4D2 */
- u16 psm_pc_reg_3; /* 0x4D4 */
- u16 PAD[0xD]; /* 0x4D6 - 0x4DE */
- u16 psm_corectlsts; /* 0x4f0 *//* Corerev >= 13 */
- u16 PAD[0x7]; /* 0x4f2 - 0x4fE */
-
- /* TXE0 Block *//* 0x500 - 0x580 */
- u16 txe_ctl; /* 0x500 */
- u16 txe_aux; /* 0x502 */
- u16 txe_ts_loc; /* 0x504 */
- u16 txe_time_out; /* 0x506 */
- u16 txe_wm_0; /* 0x508 */
- u16 txe_wm_1; /* 0x50A */
- u16 txe_phyctl; /* 0x50C */
- u16 txe_status; /* 0x50E */
- u16 txe_mmplcp0; /* 0x510 */
- u16 txe_mmplcp1; /* 0x512 */
- u16 txe_phyctl1; /* 0x514 */
-
- u16 PAD[0x05]; /* 0x510 - 0x51E */
-
- /* Transmit control */
- u16 xmtfifodef; /* 0x520 */
- u16 xmtfifo_frame_cnt; /* 0x522 *//* Corerev >= 16 */
- u16 xmtfifo_byte_cnt; /* 0x524 *//* Corerev >= 16 */
- u16 xmtfifo_head; /* 0x526 *//* Corerev >= 16 */
- u16 xmtfifo_rd_ptr; /* 0x528 *//* Corerev >= 16 */
- u16 xmtfifo_wr_ptr; /* 0x52A *//* Corerev >= 16 */
- u16 xmtfifodef1; /* 0x52C *//* Corerev >= 16 */
-
- u16 PAD[0x09]; /* 0x52E - 0x53E */
-
- u16 xmtfifocmd; /* 0x540 */
- u16 xmtfifoflush; /* 0x542 */
- u16 xmtfifothresh; /* 0x544 */
- u16 xmtfifordy; /* 0x546 */
- u16 xmtfifoprirdy; /* 0x548 */
- u16 xmtfiforqpri; /* 0x54A */
- u16 xmttplatetxptr; /* 0x54C */
- u16 PAD; /* 0x54E */
- u16 xmttplateptr; /* 0x550 */
- u16 smpl_clct_strptr; /* 0x552 *//* Corerev >= 22 */
- u16 smpl_clct_stpptr; /* 0x554 *//* Corerev >= 22 */
- u16 smpl_clct_curptr; /* 0x556 *//* Corerev >= 22 */
- u16 PAD[0x04]; /* 0x558 - 0x55E */
- u16 xmttplatedatalo; /* 0x560 */
- u16 xmttplatedatahi; /* 0x562 */
-
- u16 PAD[2]; /* 0x564 - 0x566 */
-
- u16 xmtsel; /* 0x568 */
- u16 xmttxcnt; /* 0x56A */
- u16 xmttxshmaddr; /* 0x56C */
-
- u16 PAD[0x09]; /* 0x56E - 0x57E */
-
- /* TXE1 Block */
- u16 PAD[0x40]; /* 0x580 - 0x5FE */
-
- /* TSF Block */
- u16 PAD[0X02]; /* 0x600 - 0x602 */
- u16 tsf_cfpstrt_l; /* 0x604 */
- u16 tsf_cfpstrt_h; /* 0x606 */
- u16 PAD[0X05]; /* 0x608 - 0x610 */
- u16 tsf_cfppretbtt; /* 0x612 */
- u16 PAD[0XD]; /* 0x614 - 0x62C */
- u16 tsf_clk_frac_l; /* 0x62E */
- u16 tsf_clk_frac_h; /* 0x630 */
- u16 PAD[0X14]; /* 0x632 - 0x658 */
- u16 tsf_random; /* 0x65A */
- u16 PAD[0x05]; /* 0x65C - 0x664 */
- /* GPTimer 2 registers */
- u16 tsf_gpt2_stat; /* 0x666 */
- u16 tsf_gpt2_ctr_l; /* 0x668 */
- u16 tsf_gpt2_ctr_h; /* 0x66A */
- u16 tsf_gpt2_val_l; /* 0x66C */
- u16 tsf_gpt2_val_h; /* 0x66E */
- u16 tsf_gptall_stat; /* 0x670 */
- u16 PAD[0x07]; /* 0x672 - 0x67E */
-
- /* IFS Block */
- u16 ifs_sifs_rx_tx_tx; /* 0x680 */
- u16 ifs_sifs_nav_tx; /* 0x682 */
- u16 ifs_slot; /* 0x684 */
- u16 PAD; /* 0x686 */
- u16 ifs_ctl; /* 0x688 */
- u16 PAD[0x3]; /* 0x68a - 0x68F */
- u16 ifsstat; /* 0x690 */
- u16 ifsmedbusyctl; /* 0x692 */
- u16 iftxdur; /* 0x694 */
- u16 PAD[0x3]; /* 0x696 - 0x69b */
- /* EDCF support in dot11macs */
- u16 ifs_aifsn; /* 0x69c */
- u16 ifs_ctl1; /* 0x69e */
-
- /* slow clock registers */
- u16 scc_ctl; /* 0x6a0 */
- u16 scc_timer_l; /* 0x6a2 */
- u16 scc_timer_h; /* 0x6a4 */
- u16 scc_frac; /* 0x6a6 */
- u16 scc_fastpwrup_dly; /* 0x6a8 */
- u16 scc_per; /* 0x6aa */
- u16 scc_per_frac; /* 0x6ac */
- u16 scc_cal_timer_l; /* 0x6ae */
- u16 scc_cal_timer_h; /* 0x6b0 */
- u16 PAD; /* 0x6b2 */
-
- u16 PAD[0x26];
-
- /* NAV Block */
- u16 nav_ctl; /* 0x700 */
- u16 navstat; /* 0x702 */
- u16 PAD[0x3e]; /* 0x702 - 0x77E */
-
- /* WEP/PMQ Block *//* 0x780 - 0x7FE */
- u16 PAD[0x20]; /* 0x780 - 0x7BE */
-
- u16 wepctl; /* 0x7C0 */
- u16 wepivloc; /* 0x7C2 */
- u16 wepivkey; /* 0x7C4 */
- u16 wepwkey; /* 0x7C6 */
-
- u16 PAD[4]; /* 0x7C8 - 0x7CE */
- u16 pcmctl; /* 0X7D0 */
- u16 pcmstat; /* 0X7D2 */
- u16 PAD[6]; /* 0x7D4 - 0x7DE */
-
- u16 pmqctl; /* 0x7E0 */
- u16 pmqstatus; /* 0x7E2 */
- u16 pmqpat0; /* 0x7E4 */
- u16 pmqpat1; /* 0x7E6 */
- u16 pmqpat2; /* 0x7E8 */
-
- u16 pmqdat; /* 0x7EA */
- u16 pmqdator; /* 0x7EC */
- u16 pmqhst; /* 0x7EE */
- u16 pmqpath0; /* 0x7F0 */
- u16 pmqpath1; /* 0x7F2 */
- u16 pmqpath2; /* 0x7F4 */
- u16 pmqdath; /* 0x7F6 */
-
- u16 PAD[0x04]; /* 0x7F8 - 0x7FE */
-
- /* SHM *//* 0x800 - 0xEFE */
- u16 PAD[0x380]; /* 0x800 - 0xEFE */
-};
-
-#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
-
-/* biststatus */
-#define BT_DONE (1U << 31) /* bist done */
-#define BT_B2S (1 << 30) /* bist2 ram summary bit */
-
-/* intstatus and intmask */
-#define I_PC (1 << 10) /* pci descriptor error */
-#define I_PD (1 << 11) /* pci data error */
-#define I_DE (1 << 12) /* descriptor protocol error */
-#define I_RU (1 << 13) /* receive descriptor underflow */
-#define I_RO (1 << 14) /* receive fifo overflow */
-#define I_XU (1 << 15) /* transmit fifo underflow */
-#define I_RI (1 << 16) /* receive interrupt */
-#define I_XI (1 << 24) /* transmit interrupt */
-
-/* interrupt receive lazy */
-#define IRL_TO_MASK 0x00ffffff /* timeout */
-#define IRL_FC_MASK 0xff000000 /* frame count */
-#define IRL_FC_SHIFT 24 /* frame count */
-
-/* maccontrol register */
-#define MCTL_GMODE (1U << 31)
-#define MCTL_DISCARD_PMQ (1 << 30)
-#define MCTL_WAKE (1 << 26)
-#define MCTL_HPS (1 << 25)
-#define MCTL_PROMISC (1 << 24)
-#define MCTL_KEEPBADFCS (1 << 23)
-#define MCTL_KEEPCONTROL (1 << 22)
-#define MCTL_PHYLOCK (1 << 21)
-#define MCTL_BCNS_PROMISC (1 << 20)
-#define MCTL_LOCK_RADIO (1 << 19)
-#define MCTL_AP (1 << 18)
-#define MCTL_INFRA (1 << 17)
-#define MCTL_BIGEND (1 << 16)
-#define MCTL_GPOUT_SEL_MASK (3 << 14)
-#define MCTL_GPOUT_SEL_SHIFT 14
-#define MCTL_EN_PSMDBG (1 << 13)
-#define MCTL_IHR_EN (1 << 10)
-#define MCTL_SHM_UPPER (1 << 9)
-#define MCTL_SHM_EN (1 << 8)
-#define MCTL_PSM_JMP_0 (1 << 2)
-#define MCTL_PSM_RUN (1 << 1)
-#define MCTL_EN_MAC (1 << 0)
-
-/* maccommand register */
-#define MCMD_BCN0VLD (1 << 0)
-#define MCMD_BCN1VLD (1 << 1)
-#define MCMD_DIRFRMQVAL (1 << 2)
-#define MCMD_CCA (1 << 3)
-#define MCMD_BG_NOISE (1 << 4)
-#define MCMD_SKIP_SHMINIT (1 << 5) /* only used for simulation */
-#define MCMD_SAMPLECOLL MCMD_SKIP_SHMINIT /* reuse for sample collect */
-
-/* macintstatus/macintmask */
-#define MI_MACSSPNDD (1 << 0) /* MAC has gracefully suspended */
-#define MI_BCNTPL (1 << 1) /* beacon template available */
-#define MI_TBTT (1 << 2) /* TBTT indication */
-#define MI_BCNSUCCESS (1 << 3) /* beacon successfully tx'd */
-#define MI_BCNCANCLD (1 << 4) /* beacon canceled (IBSS) */
-#define MI_ATIMWINEND (1 << 5) /* end of ATIM-window (IBSS) */
-#define MI_PMQ (1 << 6) /* PMQ entries available */
-#define MI_NSPECGEN_0 (1 << 7) /* non-specific gen-stat bits that are set by PSM */
-#define MI_NSPECGEN_1 (1 << 8) /* non-specific gen-stat bits that are set by PSM */
-#define MI_MACTXERR (1 << 9) /* MAC level Tx error */
-#define MI_NSPECGEN_3 (1 << 10) /* non-specific gen-stat bits that are set by PSM */
-#define MI_PHYTXERR (1 << 11) /* PHY Tx error */
-#define MI_PME (1 << 12) /* Power Management Event */
-#define MI_GP0 (1 << 13) /* General-purpose timer0 */
-#define MI_GP1 (1 << 14) /* General-purpose timer1 */
-#define MI_DMAINT (1 << 15) /* (ORed) DMA-interrupts */
-#define MI_TXSTOP (1 << 16) /* MAC has completed a TX FIFO Suspend/Flush */
-#define MI_CCA (1 << 17) /* MAC has completed a CCA measurement */
-#define MI_BG_NOISE (1 << 18) /* MAC has collected background noise samples */
-#define MI_DTIM_TBTT (1 << 19) /* MBSS DTIM TBTT indication */
-#define MI_PRQ (1 << 20) /* Probe response queue needs attention */
-#define MI_PWRUP (1 << 21) /* Radio/PHY has been powered back up. */
-#define MI_RESERVED3 (1 << 22)
-#define MI_RESERVED2 (1 << 23)
-#define MI_RESERVED1 (1 << 25)
-/* MAC detected change on RF Disable input*/
-#define MI_RFDISABLE (1 << 28)
-#define MI_TFS (1 << 29) /* MAC has completed a TX */
-#define MI_PHYCHANGED (1 << 30) /* A phy status change wrt G mode */
-#define MI_TO (1U << 31) /* general purpose timeout */
-
-/* Mac capabilities registers */
-/* machwcap */
-#define MCAP_TKIPMIC 0x80000000 /* TKIP MIC hardware present */
-
-/* pmqhost data */
-#define PMQH_DATA_MASK 0xffff0000 /* data entry of head pmq entry */
-#define PMQH_BSSCFG 0x00100000 /* PM entry for BSS config */
-#define PMQH_PMOFF 0x00010000 /* PM Mode OFF: power save off */
-#define PMQH_PMON 0x00020000 /* PM Mode ON: power save on */
-#define PMQH_DASAT 0x00040000 /* Dis-associated or De-authenticated */
-#define PMQH_ATIMFAIL 0x00080000 /* ATIM not acknowledged */
-#define PMQH_DEL_ENTRY 0x00000001 /* delete head entry */
-#define PMQH_DEL_MULT 0x00000002 /* delete head entry to cur read pointer -1 */
-#define PMQH_OFLO 0x00000004 /* pmq overflow indication */
-#define PMQH_NOT_EMPTY 0x00000008 /* entries are present in pmq */
-
-/* phydebug */
-#define PDBG_CRS (1 << 0) /* phy is asserting carrier sense */
-#define PDBG_TXA (1 << 1) /* phy is taking xmit byte from mac this cycle */
-#define PDBG_TXF (1 << 2) /* mac is instructing the phy to transmit a frame */
-#define PDBG_TXE (1 << 3) /* phy is signalling a transmit Error to the mac */
-#define PDBG_RXF (1 << 4) /* phy detected the end of a valid frame preamble */
-#define PDBG_RXS (1 << 5) /* phy detected the end of a valid PLCP header */
-#define PDBG_RXFRG (1 << 6) /* rx start not asserted */
-#define PDBG_RXV (1 << 7) /* mac is taking receive byte from phy this cycle */
-#define PDBG_RFD (1 << 16) /* RF portion of the radio is disabled */
-
-/* objaddr register */
-#define OBJADDR_SEL_MASK 0x000F0000
-#define OBJADDR_UCM_SEL 0x00000000
-#define OBJADDR_SHM_SEL 0x00010000
-#define OBJADDR_SCR_SEL 0x00020000
-#define OBJADDR_IHR_SEL 0x00030000
-#define OBJADDR_RCMTA_SEL 0x00040000
-#define OBJADDR_SRCHM_SEL 0x00060000
-#define OBJADDR_WINC 0x01000000
-#define OBJADDR_RINC 0x02000000
-#define OBJADDR_AUTO_INC 0x03000000
-
-#define WEP_PCMADDR 0x07d4
-#define WEP_PCMDATA 0x07d6
-
-/* frmtxstatus */
-#define TXS_V (1 << 0) /* valid bit */
-#define TXS_STATUS_MASK 0xffff
-#define TXS_FID_MASK 0xffff0000
-#define TXS_FID_SHIFT 16
-
-/* frmtxstatus2 */
-#define TXS_SEQ_MASK 0xffff
-#define TXS_PTX_MASK 0xff0000
-#define TXS_PTX_SHIFT 16
-#define TXS_MU_MASK 0x01000000
-#define TXS_MU_SHIFT 24
-
-/* clk_ctl_st */
-#define CCS_ERSRC_REQ_D11PLL 0x00000100 /* d11 core pll request */
-#define CCS_ERSRC_REQ_PHYPLL 0x00000200 /* PHY pll request */
-#define CCS_ERSRC_AVAIL_D11PLL 0x01000000 /* d11 core pll available */
-#define CCS_ERSRC_AVAIL_PHYPLL 0x02000000 /* PHY pll available */
-
-/* HT Cloclk Ctrl and Clock Avail for 4313 */
-#define CCS_ERSRC_REQ_HT 0x00000010 /* HT avail request */
-#define CCS_ERSRC_AVAIL_HT 0x00020000 /* HT clock available */
-
-/* tsf_cfprep register */
-#define CFPREP_CBI_MASK 0xffffffc0
-#define CFPREP_CBI_SHIFT 6
-#define CFPREP_CFPP 0x00000001
-
-/* tx fifo sizes values are in terms of 256 byte blocks */
-#define TXFIFOCMD_RESET_MASK (1 << 15) /* reset */
-#define TXFIFOCMD_FIFOSEL_SHIFT 8 /* fifo */
-#define TXFIFO_FIFOTOP_SHIFT 8 /* fifo start */
-
-#define TXFIFO_START_BLK16 65 /* Base address + 32 * 512 B/P */
-#define TXFIFO_START_BLK 6 /* Base address + 6 * 256 B */
-#define TXFIFO_SIZE_UNIT 256 /* one unit corresponds to 256 bytes */
-#define MBSS16_TEMPLMEM_MINBLKS 65 /* one unit corresponds to 256 bytes */
-
-/* phy versions, PhyVersion:Revision field */
-#define PV_AV_MASK 0xf000 /* analog block version */
-#define PV_AV_SHIFT 12 /* analog block version bitfield offset */
-#define PV_PT_MASK 0x0f00 /* phy type */
-#define PV_PT_SHIFT 8 /* phy type bitfield offset */
-#define PV_PV_MASK 0x000f /* phy version */
-#define PHY_TYPE(v) ((v & PV_PT_MASK) >> PV_PT_SHIFT)
-
-/* phy types, PhyVersion:PhyType field */
-#define PHY_TYPE_N 4 /* N-Phy value */
-#define PHY_TYPE_SSN 6 /* SSLPN-Phy value */
-#define PHY_TYPE_LCN 8 /* LCN-Phy value */
-#define PHY_TYPE_LCNXN 9 /* LCNXN-Phy value */
-#define PHY_TYPE_NULL 0xf /* Invalid Phy value */
-
-/* analog types, PhyVersion:AnalogType field */
-#define ANA_11N_013 5
-
-/* 802.11a PLCP header def */
-struct ofdm_phy_hdr {
- u8 rlpt[3]; /* rate, length, parity, tail */
- u16 service;
- u8 pad;
-} __packed;
-
-#define D11A_PHY_HDR_GRATE(phdr) ((phdr)->rlpt[0] & 0x0f)
-#define D11A_PHY_HDR_GRES(phdr) (((phdr)->rlpt[0] >> 4) & 0x01)
-#define D11A_PHY_HDR_GLENGTH(phdr) (((u32 *)((phdr)->rlpt) >> 5) & 0x0fff)
-#define D11A_PHY_HDR_GPARITY(phdr) (((phdr)->rlpt[3] >> 1) & 0x01)
-#define D11A_PHY_HDR_GTAIL(phdr) (((phdr)->rlpt[3] >> 2) & 0x3f)
-
-/* rate encoded per 802.11a-1999 sec 17.3.4.1 */
-#define D11A_PHY_HDR_SRATE(phdr, rate) \
- ((phdr)->rlpt[0] = ((phdr)->rlpt[0] & 0xf0) | ((rate) & 0xf))
-/* set reserved field to zero */
-#define D11A_PHY_HDR_SRES(phdr) ((phdr)->rlpt[0] &= 0xef)
-/* length is number of octets in PSDU */
-#define D11A_PHY_HDR_SLENGTH(phdr, length) \
- (*(u32 *)((phdr)->rlpt) = *(u32 *)((phdr)->rlpt) | \
- (((length) & 0x0fff) << 5))
-/* set the tail to all zeros */
-#define D11A_PHY_HDR_STAIL(phdr) ((phdr)->rlpt[3] &= 0x03)
-
-#define D11A_PHY_HDR_LEN_L 3 /* low-rate part of PLCP header */
-#define D11A_PHY_HDR_LEN_R 2 /* high-rate part of PLCP header */
-
-#define D11A_PHY_TX_DELAY (2) /* 2.1 usec */
-
-#define D11A_PHY_HDR_TIME (4) /* low-rate part of PLCP header */
-#define D11A_PHY_PRE_TIME (16)
-#define D11A_PHY_PREHDR_TIME (D11A_PHY_PRE_TIME + D11A_PHY_HDR_TIME)
-
-/* 802.11b PLCP header def */
-struct cck_phy_hdr {
- u8 signal;
- u8 service;
- u16 length;
- u16 crc;
-} __packed;
-
-#define D11B_PHY_HDR_LEN 6
-
-#define D11B_PHY_TX_DELAY (3) /* 3.4 usec */
-
-#define D11B_PHY_LHDR_TIME (D11B_PHY_HDR_LEN << 3)
-#define D11B_PHY_LPRE_TIME (144)
-#define D11B_PHY_LPREHDR_TIME (D11B_PHY_LPRE_TIME + D11B_PHY_LHDR_TIME)
-
-#define D11B_PHY_SHDR_TIME (D11B_PHY_LHDR_TIME >> 1)
-#define D11B_PHY_SPRE_TIME (D11B_PHY_LPRE_TIME >> 1)
-#define D11B_PHY_SPREHDR_TIME (D11B_PHY_SPRE_TIME + D11B_PHY_SHDR_TIME)
-
-#define D11B_PLCP_SIGNAL_LOCKED (1 << 2)
-#define D11B_PLCP_SIGNAL_LE (1 << 7)
-
-#define MIMO_PLCP_MCS_MASK 0x7f /* mcs index */
-#define MIMO_PLCP_40MHZ 0x80 /* 40 Hz frame */
-#define MIMO_PLCP_AMPDU 0x08 /* ampdu */
-
-#define BRCMS_GET_CCK_PLCP_LEN(plcp) (plcp[4] + (plcp[5] << 8))
-#define BRCMS_GET_MIMO_PLCP_LEN(plcp) (plcp[1] + (plcp[2] << 8))
-#define BRCMS_SET_MIMO_PLCP_LEN(plcp, len) \
- do { \
- plcp[1] = len & 0xff; \
- plcp[2] = ((len >> 8) & 0xff); \
- } while (0);
-
-#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
-#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
-#define BRCMS_IS_MIMO_PLCP_AMPDU(plcp) (plcp[3] & MIMO_PLCP_AMPDU)
-
-/* The dot11a PLCP header is 5 bytes. To simplify the software (so that we
- * don't need e.g. different tx DMA headers for 11a and 11b), the PLCP header has
- * padding added in the ucode.
- */
-#define D11_PHY_HDR_LEN 6
-
-/* TX DMA buffer header */
-struct d11txh {
- u16 MacTxControlLow; /* 0x0 */
- u16 MacTxControlHigh; /* 0x1 */
- u16 MacFrameControl; /* 0x2 */
- u16 TxFesTimeNormal; /* 0x3 */
- u16 PhyTxControlWord; /* 0x4 */
- u16 PhyTxControlWord_1; /* 0x5 */
- u16 PhyTxControlWord_1_Fbr; /* 0x6 */
- u16 PhyTxControlWord_1_Rts; /* 0x7 */
- u16 PhyTxControlWord_1_FbrRts; /* 0x8 */
- u16 MainRates; /* 0x9 */
- u16 XtraFrameTypes; /* 0xa */
- u8 IV[16]; /* 0x0b - 0x12 */
- u8 TxFrameRA[6]; /* 0x13 - 0x15 */
- u16 TxFesTimeFallback; /* 0x16 */
- u8 RTSPLCPFallback[6]; /* 0x17 - 0x19 */
- u16 RTSDurFallback; /* 0x1a */
- u8 FragPLCPFallback[6]; /* 0x1b - 1d */
- u16 FragDurFallback; /* 0x1e */
- u16 MModeLen; /* 0x1f */
- u16 MModeFbrLen; /* 0x20 */
- u16 TstampLow; /* 0x21 */
- u16 TstampHigh; /* 0x22 */
- u16 ABI_MimoAntSel; /* 0x23 */
- u16 PreloadSize; /* 0x24 */
- u16 AmpduSeqCtl; /* 0x25 */
- u16 TxFrameID; /* 0x26 */
- u16 TxStatus; /* 0x27 */
- u16 MaxNMpdus; /* 0x28 */
- u16 MaxABytes_MRT; /* 0x29 */
- u16 MaxABytes_FBR; /* 0x2a */
- u16 MinMBytes; /* 0x2b */
- u8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */
- struct ieee80211_rts rts_frame; /* 0x2f - 0x36 */
- u16 PAD; /* 0x37 */
-} __packed;
-
-#define D11_TXH_LEN 112 /* bytes */
-
-/* Frame Types */
-#define FT_CCK 0
-#define FT_OFDM 1
-#define FT_HT 2
-#define FT_N 3
-
-/* Position of MPDU inside A-MPDU; indicated with bits 10:9 of MacTxControlLow */
-#define TXC_AMPDU_SHIFT 9 /* shift for ampdu settings */
-#define TXC_AMPDU_NONE 0 /* Regular MPDU, not an A-MPDU */
-#define TXC_AMPDU_FIRST 1 /* first MPDU of an A-MPDU */
-#define TXC_AMPDU_MIDDLE 2 /* intermediate MPDU of an A-MPDU */
-#define TXC_AMPDU_LAST 3 /* last (or single) MPDU of an A-MPDU */
-
-/* MacTxControlLow */
-#define TXC_AMIC 0x8000
-#define TXC_SENDCTS 0x0800
-#define TXC_AMPDU_MASK 0x0600
-#define TXC_BW_40 0x0100
-#define TXC_FREQBAND_5G 0x0080
-#define TXC_DFCS 0x0040
-#define TXC_IGNOREPMQ 0x0020
-#define TXC_HWSEQ 0x0010
-#define TXC_STARTMSDU 0x0008
-#define TXC_SENDRTS 0x0004
-#define TXC_LONGFRAME 0x0002
-#define TXC_IMMEDACK 0x0001
-
-/* MacTxControlHigh */
-#define TXC_PREAMBLE_RTS_FB_SHORT 0x8000 /* RTS fallback preamble type 1 = SHORT 0 = LONG */
-#define TXC_PREAMBLE_RTS_MAIN_SHORT 0x4000 /* RTS main rate preamble type 1 = SHORT 0 = LONG */
-#define TXC_PREAMBLE_DATA_FB_SHORT 0x2000 /* Main fallback rate preamble type
- * 1 = SHORT for OFDM/GF for MIMO
- * 0 = LONG for CCK/MM for MIMO
- */
-/* TXC_PREAMBLE_DATA_MAIN is in PhyTxControl bit 5 */
-#define TXC_AMPDU_FBR 0x1000 /* use fallback rate for this AMPDU */
-#define TXC_SECKEY_MASK 0x0FF0
-#define TXC_SECKEY_SHIFT 4
-#define TXC_ALT_TXPWR 0x0008 /* Use alternate txpwr defined at loc. M_ALT_TXPWR_IDX */
-#define TXC_SECTYPE_MASK 0x0007
-#define TXC_SECTYPE_SHIFT 0
-
-/* Null delimiter for Fallback rate */
-#define AMPDU_FBR_NULL_DELIM 5 /* Location of Null delimiter count for AMPDU */
-
-/* PhyTxControl for Mimophy */
-#define PHY_TXC_PWR_MASK 0xFC00
-#define PHY_TXC_PWR_SHIFT 10
-#define PHY_TXC_ANT_MASK 0x03C0 /* bit 6, 7, 8, 9 */
-#define PHY_TXC_ANT_SHIFT 6
-#define PHY_TXC_ANT_0_1 0x00C0 /* auto, last rx */
-#define PHY_TXC_LCNPHY_ANT_LAST 0x0000
-#define PHY_TXC_ANT_3 0x0200 /* virtual antenna 3 */
-#define PHY_TXC_ANT_2 0x0100 /* virtual antenna 2 */
-#define PHY_TXC_ANT_1 0x0080 /* virtual antenna 1 */
-#define PHY_TXC_ANT_0 0x0040 /* virtual antenna 0 */
-#define PHY_TXC_SHORT_HDR 0x0010
-
-#define PHY_TXC_OLD_ANT_0 0x0000
-#define PHY_TXC_OLD_ANT_1 0x0100
-#define PHY_TXC_OLD_ANT_LAST 0x0300
-
-/* PhyTxControl_1 for Mimophy */
-#define PHY_TXC1_BW_MASK 0x0007
-#define PHY_TXC1_BW_10MHZ 0
-#define PHY_TXC1_BW_10MHZ_UP 1
-#define PHY_TXC1_BW_20MHZ 2
-#define PHY_TXC1_BW_20MHZ_UP 3
-#define PHY_TXC1_BW_40MHZ 4
-#define PHY_TXC1_BW_40MHZ_DUP 5
-#define PHY_TXC1_MODE_SHIFT 3
-#define PHY_TXC1_MODE_MASK 0x0038
-#define PHY_TXC1_MODE_SISO 0
-#define PHY_TXC1_MODE_CDD 1
-#define PHY_TXC1_MODE_STBC 2
-#define PHY_TXC1_MODE_SDM 3
-
-/* PhyTxControl for HTphy that are different from Mimophy */
-#define PHY_TXC_HTANT_MASK 0x3fC0 /* bit 6, 7, 8, 9, 10, 11, 12, 13 */
-
-/* XtraFrameTypes */
-#define XFTS_RTS_FT_SHIFT 2
-#define XFTS_FBRRTS_FT_SHIFT 4
-#define XFTS_CHANNEL_SHIFT 8
-
-/* Antenna diversity bit in ant_wr_settle */
-#define PHY_AWS_ANTDIV 0x2000
-
-/* IFS ctl */
-#define IFS_USEEDCF (1 << 2)
-
-/* IFS ctl1 */
-#define IFS_CTL1_EDCRS (1 << 3)
-#define IFS_CTL1_EDCRS_20L (1 << 4)
-#define IFS_CTL1_EDCRS_40 (1 << 5)
-
-/* ABI_MimoAntSel */
-#define ABI_MAS_ADDR_BMP_IDX_MASK 0x0f00
-#define ABI_MAS_ADDR_BMP_IDX_SHIFT 8
-#define ABI_MAS_FBR_ANT_PTN_MASK 0x00f0
-#define ABI_MAS_FBR_ANT_PTN_SHIFT 4
-#define ABI_MAS_MRT_ANT_PTN_MASK 0x000f
-
-/* tx status packet */
-struct tx_status {
- u16 framelen;
- u16 PAD;
- u16 frameid;
- u16 status;
- u16 lasttxtime;
- u16 sequence;
- u16 phyerr;
- u16 ackphyrxsh;
-} __packed;
-
-#define TXSTATUS_LEN 16
-
-/* status field bit definitions */
-#define TX_STATUS_FRM_RTX_MASK 0xF000
-#define TX_STATUS_FRM_RTX_SHIFT 12
-#define TX_STATUS_RTS_RTX_MASK 0x0F00
-#define TX_STATUS_RTS_RTX_SHIFT 8
-#define TX_STATUS_MASK 0x00FE
-#define TX_STATUS_PMINDCTD (1 << 7) /* PM mode indicated to AP */
-#define TX_STATUS_INTERMEDIATE (1 << 6) /* intermediate or 1st ampdu pkg */
-#define TX_STATUS_AMPDU (1 << 5) /* AMPDU status */
-#define TX_STATUS_SUPR_MASK 0x1C /* suppress status bits (4:2) */
-#define TX_STATUS_SUPR_SHIFT 2
-#define TX_STATUS_ACK_RCV (1 << 1) /* ACK received */
-#define TX_STATUS_VALID (1 << 0) /* Tx status valid */
-#define TX_STATUS_NO_ACK 0
-
-/* suppress status reason codes */
-#define TX_STATUS_SUPR_PMQ (1 << 2) /* PMQ entry */
-#define TX_STATUS_SUPR_FLUSH (2 << 2) /* flush request */
-#define TX_STATUS_SUPR_FRAG (3 << 2) /* previous frag failure */
-#define TX_STATUS_SUPR_TBTT (3 << 2) /* SHARED: Probe response supr for TBTT */
-#define TX_STATUS_SUPR_BADCH (4 << 2) /* channel mismatch */
-#define TX_STATUS_SUPR_EXPTIME (5 << 2) /* lifetime expiry */
-#define TX_STATUS_SUPR_UF (6 << 2) /* underflow */
-
-/* Unexpected tx status for rate update */
-#define TX_STATUS_UNEXP(status) \
- ((((status) & TX_STATUS_INTERMEDIATE) != 0) && \
- TX_STATUS_UNEXP_AMPDU(status))
-
-/* Unexpected tx status for A-MPDU rate update */
-#define TX_STATUS_UNEXP_AMPDU(status) \
- ((((status) & TX_STATUS_SUPR_MASK) != 0) && \
- (((status) & TX_STATUS_SUPR_MASK) != TX_STATUS_SUPR_EXPTIME))
-
-#define TX_STATUS_BA_BMAP03_MASK 0xF000 /* ba bitmap 0:3 in 1st pkg */
-#define TX_STATUS_BA_BMAP03_SHIFT 12 /* ba bitmap 0:3 in 1st pkg */
-#define TX_STATUS_BA_BMAP47_MASK 0x001E /* ba bitmap 4:7 in 2nd pkg */
-#define TX_STATUS_BA_BMAP47_SHIFT 3 /* ba bitmap 4:7 in 2nd pkg */
-
-/* RXE (Receive Engine) */
-
-/* RCM_CTL */
-#define RCM_INC_MASK_H 0x0080
-#define RCM_INC_MASK_L 0x0040
-#define RCM_INC_DATA 0x0020
-#define RCM_INDEX_MASK 0x001F
-#define RCM_SIZE 15
-
-#define RCM_MAC_OFFSET 0 /* current MAC address */
-#define RCM_BSSID_OFFSET 3 /* current BSSID address */
-#define RCM_F_BSSID_0_OFFSET 6 /* foreign BSS CFP tracking */
-#define RCM_F_BSSID_1_OFFSET 9 /* foreign BSS CFP tracking */
-#define RCM_F_BSSID_2_OFFSET 12 /* foreign BSS CFP tracking */
-
-#define RCM_WEP_TA0_OFFSET 16
-#define RCM_WEP_TA1_OFFSET 19
-#define RCM_WEP_TA2_OFFSET 22
-#define RCM_WEP_TA3_OFFSET 25
-
-/* PSM Block */
-
-/* psm_phy_hdr_param bits */
-#define MAC_PHY_RESET 1
-#define MAC_PHY_CLOCK_EN 2
-#define MAC_PHY_FORCE_CLK 4
-
-/* WEP Block */
-
-/* WEP_WKEY */
-#define WKEY_START (1 << 8)
-#define WKEY_SEL_MASK 0x1F
-
-/* WEP data formats */
-
-/* the number of RCMTA entries */
-#define RCMTA_SIZE 50
-
-#define M_ADDR_BMP_BLK (0x37e * 2)
-#define M_ADDR_BMP_BLK_SZ 12
-
-#define ADDR_BMP_RA (1 << 0) /* Receiver Address (RA) */
-#define ADDR_BMP_TA (1 << 1) /* Transmitter Address (TA) */
-#define ADDR_BMP_BSSID (1 << 2) /* BSSID */
-#define ADDR_BMP_AP (1 << 3) /* Infra-BSS Access Point (AP) */
-#define ADDR_BMP_STA (1 << 4) /* Infra-BSS Station (STA) */
-#define ADDR_BMP_RESERVED1 (1 << 5)
-#define ADDR_BMP_RESERVED2 (1 << 6)
-#define ADDR_BMP_RESERVED3 (1 << 7)
-#define ADDR_BMP_BSS_IDX_MASK (3 << 8) /* BSS control block index */
-#define ADDR_BMP_BSS_IDX_SHIFT 8
-
-#define WSEC_MAX_RCMTA_KEYS 54
-
-/* max keys in M_TKMICKEYS_BLK */
-#define WSEC_MAX_TKMIC_ENGINE_KEYS 12 /* 8 + 4 default */
-
-/* max RXE match registers */
-#define WSEC_MAX_RXE_KEYS 4
-
-/* SECKINDXALGO (Security Key Index & Algorithm Block) word format */
-/* SKL (Security Key Lookup) */
-#define SKL_ALGO_MASK 0x0007
-#define SKL_ALGO_SHIFT 0
-#define SKL_KEYID_MASK 0x0008
-#define SKL_KEYID_SHIFT 3
-#define SKL_INDEX_MASK 0x03F0
-#define SKL_INDEX_SHIFT 4
-#define SKL_GRP_ALGO_MASK 0x1c00
-#define SKL_GRP_ALGO_SHIFT 10
-
-/* additional bits defined for IBSS group key support */
-#define SKL_IBSS_INDEX_MASK 0x01F0
-#define SKL_IBSS_INDEX_SHIFT 4
-#define SKL_IBSS_KEYID1_MASK 0x0600
-#define SKL_IBSS_KEYID1_SHIFT 9
-#define SKL_IBSS_KEYID2_MASK 0x1800
-#define SKL_IBSS_KEYID2_SHIFT 11
-#define SKL_IBSS_KEYALGO_MASK 0xE000
-#define SKL_IBSS_KEYALGO_SHIFT 13
-
-#define WSEC_MODE_OFF 0
-#define WSEC_MODE_HW 1
-#define WSEC_MODE_SW 2
-
-#define WSEC_ALGO_OFF 0
-#define WSEC_ALGO_WEP1 1
-#define WSEC_ALGO_TKIP 2
-#define WSEC_ALGO_AES 3
-#define WSEC_ALGO_WEP128 4
-#define WSEC_ALGO_AES_LEGACY 5
-#define WSEC_ALGO_NALG 6
-
-#define AES_MODE_NONE 0
-#define AES_MODE_CCM 1
-
-/* WEP_CTL (Rev 0) */
-#define WECR0_KEYREG_SHIFT 0
-#define WECR0_KEYREG_MASK 0x7
-#define WECR0_DECRYPT (1 << 3)
-#define WECR0_IVINLINE (1 << 4)
-#define WECR0_WEPALG_SHIFT 5
-#define WECR0_WEPALG_MASK (0x7 << 5)
-#define WECR0_WKEYSEL_SHIFT 8
-#define WECR0_WKEYSEL_MASK (0x7 << 8)
-#define WECR0_WKEYSTART (1 << 11)
-#define WECR0_WEPINIT (1 << 14)
-#define WECR0_ICVERR (1 << 15)
-
-/* Frame template map byte offsets */
-#define T_ACTS_TPL_BASE (0)
-#define T_NULL_TPL_BASE (0xc * 2)
-#define T_QNULL_TPL_BASE (0x1c * 2)
-#define T_RR_TPL_BASE (0x2c * 2)
-#define T_BCN0_TPL_BASE (0x34 * 2)
-#define T_PRS_TPL_BASE (0x134 * 2)
-#define T_BCN1_TPL_BASE (0x234 * 2)
-#define T_TX_FIFO_TXRAM_BASE (T_ACTS_TPL_BASE + (TXFIFO_START_BLK * TXFIFO_SIZE_UNIT))
-
-#define T_BA_TPL_BASE T_QNULL_TPL_BASE /* template area for BA */
-
-#define T_RAM_ACCESS_SZ 4 /* template ram is 4 byte access only */
-
-/* Shared Mem byte offsets */
-
-/* Location where the ucode expects the corerev */
-#define M_MACHW_VER (0x00b * 2)
-
-/* Location where the ucode expects the MAC capabilities */
-#define M_MACHW_CAP_L (0x060 * 2)
-#define M_MACHW_CAP_H (0x061 * 2)
-
-/* WME shared memory */
-#define M_EDCF_STATUS_OFF (0x007 * 2)
-#define M_TXF_CUR_INDEX (0x018 * 2)
-#define M_EDCF_QINFO (0x120 * 2)
-
-/* PS-mode related parameters */
-#define M_DOT11_SLOT (0x008 * 2)
-#define M_DOT11_DTIMPERIOD (0x009 * 2)
-#define M_NOSLPZNATDTIM (0x026 * 2)
-
-/* Beacon-related parameters */
-#define M_BCN0_FRM_BYTESZ (0x00c * 2) /* Bcn 0 template length */
-#define M_BCN1_FRM_BYTESZ (0x00d * 2) /* Bcn 1 template length */
-#define M_BCN_TXTSF_OFFSET (0x00e * 2)
-#define M_TIMBPOS_INBEACON (0x00f * 2)
-#define M_SFRMTXCNTFBRTHSD (0x022 * 2)
-#define M_LFRMTXCNTFBRTHSD (0x023 * 2)
-#define M_BCN_PCTLWD (0x02a * 2)
-#define M_BCN_LI (0x05b * 2) /* beacon listen interval */
-
-/* MAX Rx Frame len */
-#define M_MAXRXFRM_LEN (0x010 * 2)
-
-/* ACK/CTS related params */
-#define M_RSP_PCTLWD (0x011 * 2)
-
-/* Hardware Power Control */
-#define M_TXPWR_N (0x012 * 2)
-#define M_TXPWR_TARGET (0x013 * 2)
-#define M_TXPWR_MAX (0x014 * 2)
-#define M_TXPWR_CUR (0x019 * 2)
-
-/* Rx-related parameters */
-#define M_RX_PAD_DATA_OFFSET (0x01a * 2)
-
-/* WEP Shared mem data */
-#define M_SEC_DEFIVLOC (0x01e * 2)
-#define M_SEC_VALNUMSOFTMCHTA (0x01f * 2)
-#define M_PHYVER (0x028 * 2)
-#define M_PHYTYPE (0x029 * 2)
-#define M_SECRXKEYS_PTR (0x02b * 2)
-#define M_TKMICKEYS_PTR (0x059 * 2)
-#define M_SECKINDXALGO_BLK (0x2ea * 2)
-#define M_SECKINDXALGO_BLK_SZ 54
-#define M_SECPSMRXTAMCH_BLK (0x2fa * 2)
-#define M_TKIP_TSC_TTAK (0x18c * 2)
-#define D11_MAX_KEY_SIZE 16
-
-#define M_MAX_ANTCNT (0x02e * 2) /* antenna swap threshold */
-
-/* Probe response related parameters */
-#define M_SSIDLEN (0x024 * 2)
-#define M_PRB_RESP_FRM_LEN (0x025 * 2)
-#define M_PRS_MAXTIME (0x03a * 2)
-#define M_SSID (0xb0 * 2)
-#define M_CTXPRS_BLK (0xc0 * 2)
-#define C_CTX_PCTLWD_POS (0x4 * 2)
-
-/* Delta between OFDM and CCK power in CCK power boost mode */
-#define M_OFDM_OFFSET (0x027 * 2)
-
-/* TSSI for last 4 11b/g CCK packets transmitted */
-#define M_B_TSSI_0 (0x02c * 2)
-#define M_B_TSSI_1 (0x02d * 2)
-
-/* Host flags to turn on ucode options */
-#define M_HOST_FLAGS1 (0x02f * 2)
-#define M_HOST_FLAGS2 (0x030 * 2)
-#define M_HOST_FLAGS3 (0x031 * 2)
-#define M_HOST_FLAGS4 (0x03c * 2)
-#define M_HOST_FLAGS5 (0x06a * 2)
-#define M_HOST_FLAGS_SZ 16
-
-#define M_RADAR_REG (0x033 * 2)
-
-/* TSSI for last 4 11a OFDM packets transmitted */
-#define M_A_TSSI_0 (0x034 * 2)
-#define M_A_TSSI_1 (0x035 * 2)
-
-/* noise interference measurement */
-#define M_NOISE_IF_COUNT (0x034 * 2)
-#define M_NOISE_IF_TIMEOUT (0x035 * 2)
-
-#define M_RF_RX_SP_REG1 (0x036 * 2)
-
-/* TSSI for last 4 11g OFDM packets transmitted */
-#define M_G_TSSI_0 (0x038 * 2)
-#define M_G_TSSI_1 (0x039 * 2)
-
-/* Background noise measure */
-#define M_JSSI_0 (0x44 * 2)
-#define M_JSSI_1 (0x45 * 2)
-#define M_JSSI_AUX (0x46 * 2)
-
-#define M_CUR_2050_RADIOCODE (0x47 * 2)
-
-/* TX fifo sizes */
-#define M_FIFOSIZE0 (0x4c * 2)
-#define M_FIFOSIZE1 (0x4d * 2)
-#define M_FIFOSIZE2 (0x4e * 2)
-#define M_FIFOSIZE3 (0x4f * 2)
-#define D11_MAX_TX_FRMS 32 /* max frames allowed in tx fifo */
-
-/* Current channel number plus upper bits */
-#define M_CURCHANNEL (0x50 * 2)
-#define D11_CURCHANNEL_5G 0x0100;
-#define D11_CURCHANNEL_40 0x0200;
-#define D11_CURCHANNEL_MAX 0x00FF;
-
-/* last posted frameid on the bcmc fifo */
-#define M_BCMC_FID (0x54 * 2)
-#define INVALIDFID 0xffff
-
-/* extended beacon phyctl bytes for 11N */
-#define M_BCN_PCTL1WD (0x058 * 2)
-
-/* idle busy ratio to duty_cycle requirement */
-#define M_TX_IDLE_BUSY_RATIO_X_16_CCK (0x52 * 2)
-#define M_TX_IDLE_BUSY_RATIO_X_16_OFDM (0x5A * 2)
-
-/* CW RSSI for LCNPHY */
-#define M_LCN_RSSI_0 0x1332
-#define M_LCN_RSSI_1 0x1338
-#define M_LCN_RSSI_2 0x133e
-#define M_LCN_RSSI_3 0x1344
-
-/* SNR for LCNPHY */
-#define M_LCN_SNR_A_0 0x1334
-#define M_LCN_SNR_B_0 0x1336
-
-#define M_LCN_SNR_A_1 0x133a
-#define M_LCN_SNR_B_1 0x133c
-
-#define M_LCN_SNR_A_2 0x1340
-#define M_LCN_SNR_B_2 0x1342
-
-#define M_LCN_SNR_A_3 0x1346
-#define M_LCN_SNR_B_3 0x1348
-
-#define M_LCN_LAST_RESET (81*2)
-#define M_LCN_LAST_LOC (63*2)
-#define M_LCNPHY_RESET_STATUS (4902)
-#define M_LCNPHY_DSC_TIME (0x98d*2)
-#define M_LCNPHY_RESET_CNT_DSC (0x98b*2)
-#define M_LCNPHY_RESET_CNT (0x98c*2)
-
-/* Rate table offsets */
-#define M_RT_DIRMAP_A (0xe0 * 2)
-#define M_RT_BBRSMAP_A (0xf0 * 2)
-#define M_RT_DIRMAP_B (0x100 * 2)
-#define M_RT_BBRSMAP_B (0x110 * 2)
-
-/* Rate table entry offsets */
-#define M_RT_PRS_PLCP_POS 10
-#define M_RT_PRS_DUR_POS 16
-#define M_RT_OFDM_PCTL1_POS 18
-
-#define M_20IN40_IQ (0x380 * 2)
-
-/* SHM locations where ucode stores the current power index */
-#define M_CURR_IDX1 (0x384 * 2)
-#define M_CURR_IDX2 (0x387 * 2)
-
-#define M_BSCALE_ANT0 (0x5e * 2)
-#define M_BSCALE_ANT1 (0x5f * 2)
-
-/* Antenna Diversity Testing */
-#define M_MIMO_ANTSEL_RXDFLT (0x63 * 2)
-#define M_ANTSEL_CLKDIV (0x61 * 2)
-#define M_MIMO_ANTSEL_TXDFLT (0x64 * 2)
-
-#define M_MIMO_MAXSYM (0x5d * 2)
-#define MIMO_MAXSYM_DEF 0x8000 /* 32k */
-#define MIMO_MAXSYM_MAX 0xffff /* 64k */
-
-#define M_WATCHDOG_8TU (0x1e * 2)
-#define WATCHDOG_8TU_DEF 5
-#define WATCHDOG_8TU_MAX 10
-
-/* Manufacturing Test Variables */
-#define M_PKTENG_CTRL (0x6c * 2) /* PER test mode */
-#define M_PKTENG_IFS (0x6d * 2) /* IFS for TX mode */
-#define M_PKTENG_FRMCNT_LO (0x6e * 2) /* Lower word of tx frmcnt/rx lostcnt */
-#define M_PKTENG_FRMCNT_HI (0x6f * 2) /* Upper word of tx frmcnt/rx lostcnt */
-
-/* Index variation in vbat ripple */
-#define M_LCN_PWR_IDX_MAX (0x67 * 2) /* highest index read by ucode */
-#define M_LCN_PWR_IDX_MIN (0x66 * 2) /* lowest index read by ucode */
-
-/* M_PKTENG_CTRL bit definitions */
-#define M_PKTENG_MODE_TX 0x0001
-#define M_PKTENG_MODE_TX_RIFS 0x0004
-#define M_PKTENG_MODE_TX_CTS 0x0008
-#define M_PKTENG_MODE_RX 0x0002
-#define M_PKTENG_MODE_RX_WITH_ACK 0x0402
-#define M_PKTENG_MODE_MASK 0x0003
-#define M_PKTENG_FRMCNT_VLD 0x0100 /* TX frames indicated in the frmcnt reg */
-
-/* Sample Collect parameters (bitmap and type) */
-#define M_SMPL_COL_BMP (0x37d * 2) /* Trigger bitmap for sample collect */
-#define M_SMPL_COL_CTL (0x3b2 * 2) /* Sample collect type */
-
-#define ANTSEL_CLKDIV_4MHZ 6
-#define MIMO_ANTSEL_BUSY 0x4000 /* bit 14 (busy) */
-#define MIMO_ANTSEL_SEL 0x8000 /* bit 15 write the value */
-#define MIMO_ANTSEL_WAIT 50 /* 50us wait */
-#define MIMO_ANTSEL_OVERRIDE 0x8000 /* flag */
-
-struct shm_acparams {
- u16 txop;
- u16 cwmin;
- u16 cwmax;
- u16 cwcur;
- u16 aifs;
- u16 bslots;
- u16 reggap;
- u16 status;
- u16 rsvd[8];
-} __packed;
-#define M_EDCF_QLEN (16 * 2)
-
-#define WME_STATUS_NEWAC (1 << 8)
-
-/* M_HOST_FLAGS */
-#define MHFMAX 5 /* Number of valid hostflag half-word (u16) */
-#define MHF1 0 /* Hostflag 1 index */
-#define MHF2 1 /* Hostflag 2 index */
-#define MHF3 2 /* Hostflag 3 index */
-#define MHF4 3 /* Hostflag 4 index */
-#define MHF5 4 /* Hostflag 5 index */
-
-/* Flags in M_HOST_FLAGS */
-#define MHF1_ANTDIV 0x0001 /* Enable ucode antenna diversity help */
-#define MHF1_EDCF 0x0100 /* Enable EDCF access control */
-#define MHF1_IQSWAP_WAR 0x0200
-#define MHF1_FORCEFASTCLK 0x0400 /* Disable Slow clock request, for corerev < 11 */
-
-/* Flags in M_HOST_FLAGS2 */
-#define MHF2_PCISLOWCLKWAR 0x0008 /* PR16165WAR : Enable ucode PCI slow clock WAR */
-#define MHF2_TXBCMC_NOW 0x0040 /* Flush BCMC FIFO immediately */
-#define MHF2_HWPWRCTL 0x0080 /* Enable ucode/hw power control */
-#define MHF2_NPHY40MHZ_WAR 0x0800
-
-/* Flags in M_HOST_FLAGS3 */
-#define MHF3_ANTSEL_EN 0x0001 /* enabled mimo antenna selection */
-#define MHF3_ANTSEL_MODE 0x0002 /* antenna selection mode: 0: 2x3, 1: 2x4 */
-#define MHF3_RESERVED1 0x0004
-#define MHF3_RESERVED2 0x0008
-#define MHF3_NPHY_MLADV_WAR 0x0010
-
-/* Flags in M_HOST_FLAGS4 */
-#define MHF4_BPHY_TXCORE0 0x0080 /* force bphy Tx on core 0 (board level WAR) */
-#define MHF4_EXTPA_ENABLE 0x4000 /* for 4313A0 FEM boards */
-
-/* Flags in M_HOST_FLAGS5 */
-#define MHF5_4313_GPIOCTRL 0x0001
-#define MHF5_RESERVED1 0x0002
-#define MHF5_RESERVED2 0x0004
-/* Radio power setting for ucode */
-#define M_RADIO_PWR (0x32 * 2)
-
-/* phy noise recorded by ucode right after tx */
-#define M_PHY_NOISE (0x037 * 2)
-#define PHY_NOISE_MASK 0x00ff
-
-/* Receive Frame Data Header for 802.11b DCF-only frames */
-struct d11rxhdr {
- u16 RxFrameSize; /* Actual byte length of the frame data received */
- u16 PAD;
- u16 PhyRxStatus_0; /* PhyRxStatus 15:0 */
- u16 PhyRxStatus_1; /* PhyRxStatus 31:16 */
- u16 PhyRxStatus_2; /* PhyRxStatus 47:32 */
- u16 PhyRxStatus_3; /* PhyRxStatus 63:48 */
- u16 PhyRxStatus_4; /* PhyRxStatus 79:64 */
- u16 PhyRxStatus_5; /* PhyRxStatus 95:80 */
- u16 RxStatus1; /* MAC Rx Status */
- u16 RxStatus2; /* extended MAC Rx status */
- u16 RxTSFTime; /* RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */
- u16 RxChan; /* gain code, channel radio code, and phy type */
-} __packed;
-
-#define RXHDR_LEN 24 /* sizeof struct d11rxhdr */
-#define FRAMELEN(h) ((h)->RxFrameSize)
-
-struct brcms_d11rxhdr {
- struct d11rxhdr rxhdr;
- u32 tsf_l; /* TSF_L reading */
- s8 rssi; /* computed instanteneous rssi in BMAC */
- s8 rxpwr0; /* obsoleted, place holder for legacy ROM code. use rxpwr[] */
- s8 rxpwr1; /* obsoleted, place holder for legacy ROM code. use rxpwr[] */
- s8 do_rssi_ma; /* do per-pkt sampling for per-antenna ma in HIGH */
- s8 rxpwr[WL_RSSI_ANT_MAX]; /* rssi for supported antennas */
-} __packed;
-
-/* PhyRxStatus_0: */
-#define PRXS0_FT_MASK 0x0003 /* NPHY only: CCK, OFDM, preN, N */
-#define PRXS0_CLIP_MASK 0x000C /* NPHY only: clip count adjustment steps by AGC */
-#define PRXS0_CLIP_SHIFT 2
-#define PRXS0_UNSRATE 0x0010 /* PHY received a frame with unsupported rate */
-#define PRXS0_RXANT_UPSUBBAND 0x0020 /* GPHY: rx ant, NPHY: upper sideband */
-#define PRXS0_LCRS 0x0040 /* CCK frame only: lost crs during cck frame reception */
-#define PRXS0_SHORTH 0x0080 /* Short Preamble */
-#define PRXS0_PLCPFV 0x0100 /* PLCP violation */
-#define PRXS0_PLCPHCF 0x0200 /* PLCP header integrity check failed */
-#define PRXS0_GAIN_CTL 0x4000 /* legacy PHY gain control */
-#define PRXS0_ANTSEL_MASK 0xF000 /* NPHY: Antennas used for received frame, bitmask */
-#define PRXS0_ANTSEL_SHIFT 0x12
-
-/* subfield PRXS0_FT_MASK */
-#define PRXS0_CCK 0x0000
-#define PRXS0_OFDM 0x0001 /* valid only for G phy, use rxh->RxChan for A phy */
-#define PRXS0_PREN 0x0002
-#define PRXS0_STDN 0x0003
-
-/* subfield PRXS0_ANTSEL_MASK */
-#define PRXS0_ANTSEL_0 0x0 /* antenna 0 is used */
-#define PRXS0_ANTSEL_1 0x2 /* antenna 1 is used */
-#define PRXS0_ANTSEL_2 0x4 /* antenna 2 is used */
-#define PRXS0_ANTSEL_3 0x8 /* antenna 3 is used */
-
-/* PhyRxStatus_1: */
-#define PRXS1_JSSI_MASK 0x00FF
-#define PRXS1_JSSI_SHIFT 0
-#define PRXS1_SQ_MASK 0xFF00
-#define PRXS1_SQ_SHIFT 8
-
-/* nphy PhyRxStatus_1: */
-#define PRXS1_nphy_PWR0_MASK 0x00FF
-#define PRXS1_nphy_PWR1_MASK 0xFF00
-
-/* HTPHY Rx Status defines */
-/* htphy PhyRxStatus_0: those bit are overlapped with PhyRxStatus_0 */
-#define PRXS0_BAND 0x0400 /* 0 = 2.4G, 1 = 5G */
-#define PRXS0_RSVD 0x0800 /* reserved; set to 0 */
-#define PRXS0_UNUSED 0xF000 /* unused and not defined; set to 0 */
-
-/* htphy PhyRxStatus_1: */
-#define PRXS1_HTPHY_CORE_MASK 0x000F /* core enables for {3..0}, 0=disabled, 1=enabled */
-#define PRXS1_HTPHY_ANTCFG_MASK 0x00F0 /* antenna configation */
-#define PRXS1_HTPHY_MMPLCPLenL_MASK 0xFF00 /* Mixmode PLCP Length low byte mask */
-
-/* htphy PhyRxStatus_2: */
-#define PRXS2_HTPHY_MMPLCPLenH_MASK 0x000F /* Mixmode PLCP Length high byte maskw */
-#define PRXS2_HTPHY_MMPLCH_RATE_MASK 0x00F0 /* Mixmode PLCP rate mask */
-#define PRXS2_HTPHY_RXPWR_ANT0 0xFF00 /* Rx power on core 0 */
-
-/* htphy PhyRxStatus_3: */
-#define PRXS3_HTPHY_RXPWR_ANT1 0x00FF /* Rx power on core 1 */
-#define PRXS3_HTPHY_RXPWR_ANT2 0xFF00 /* Rx power on core 2 */
-
-/* htphy PhyRxStatus_4: */
-#define PRXS4_HTPHY_RXPWR_ANT3 0x00FF /* Rx power on core 3 */
-#define PRXS4_HTPHY_CFO 0xFF00 /* Coarse frequency offset */
-
-/* htphy PhyRxStatus_5: */
-#define PRXS5_HTPHY_FFO 0x00FF /* Fine frequency offset */
-#define PRXS5_HTPHY_AR 0xFF00 /* Advance Retard */
-
-#define HTPHY_MMPLCPLen(rxs) ((((rxs)->PhyRxStatus_1 & PRXS1_HTPHY_MMPLCPLenL_MASK) >> 8) | \
- (((rxs)->PhyRxStatus_2 & PRXS2_HTPHY_MMPLCPLenH_MASK) << 8))
-/* Get Rx power on core 0 */
-#define HTPHY_RXPWR_ANT0(rxs) ((((rxs)->PhyRxStatus_2) & PRXS2_HTPHY_RXPWR_ANT0) >> 8)
-/* Get Rx power on core 1 */
-#define HTPHY_RXPWR_ANT1(rxs) (((rxs)->PhyRxStatus_3) & PRXS3_HTPHY_RXPWR_ANT1)
-/* Get Rx power on core 2 */
-#define HTPHY_RXPWR_ANT2(rxs) ((((rxs)->PhyRxStatus_3) & PRXS3_HTPHY_RXPWR_ANT2) >> 8)
-
-/* ucode RxStatus1: */
-#define RXS_BCNSENT 0x8000
-#define RXS_SECKINDX_MASK 0x07e0
-#define RXS_SECKINDX_SHIFT 5
-#define RXS_DECERR (1 << 4)
-#define RXS_DECATMPT (1 << 3)
-#define RXS_PBPRES (1 << 2) /* PAD bytes to make IP data 4 bytes aligned */
-#define RXS_RESPFRAMETX (1 << 1)
-#define RXS_FCSERR (1 << 0)
-
-/* ucode RxStatus2: */
-#define RXS_AMSDU_MASK 1
-#define RXS_AGGTYPE_MASK 0x6
-#define RXS_AGGTYPE_SHIFT 1
-#define RXS_PHYRXST_VALID (1 << 8)
-#define RXS_RXANT_MASK 0x3
-#define RXS_RXANT_SHIFT 12
-
-/* RxChan */
-#define RXS_CHAN_40 0x1000
-#define RXS_CHAN_5G 0x0800
-#define RXS_CHAN_ID_MASK 0x07f8
-#define RXS_CHAN_ID_SHIFT 3
-#define RXS_CHAN_PHYTYPE_MASK 0x0007
-#define RXS_CHAN_PHYTYPE_SHIFT 0
-
-/* Index of attenuations used during ucode power control. */
-#define M_PWRIND_BLKS (0x184 * 2)
-#define M_PWRIND_MAP0 (M_PWRIND_BLKS + 0x0)
-#define M_PWRIND_MAP1 (M_PWRIND_BLKS + 0x2)
-#define M_PWRIND_MAP2 (M_PWRIND_BLKS + 0x4)
-#define M_PWRIND_MAP3 (M_PWRIND_BLKS + 0x6)
-/* M_PWRIND_MAP(core) macro */
-#define M_PWRIND_MAP(core) (M_PWRIND_BLKS + ((core)<<1))
-
-/* PSM SHM variable offsets */
-#define M_PSM_SOFT_REGS 0x0
-#define M_BOM_REV_MAJOR (M_PSM_SOFT_REGS + 0x0)
-#define M_BOM_REV_MINOR (M_PSM_SOFT_REGS + 0x2)
-#define M_UCODE_DBGST (M_PSM_SOFT_REGS + 0x40) /* ucode debug status code */
-#define M_UCODE_MACSTAT (M_PSM_SOFT_REGS + 0xE0) /* macstat counters */
-
-#define M_AGING_THRSH (0x3e * 2) /* max time waiting for medium before tx */
-#define M_MBURST_SIZE (0x40 * 2) /* max frames in a frameburst */
-#define M_MBURST_TXOP (0x41 * 2) /* max frameburst TXOP in unit of us */
-#define M_SYNTHPU_DLY (0x4a * 2) /* pre-wakeup for synthpu, default: 500 */
-#define M_PRETBTT (0x4b * 2)
-
-#define M_ALT_TXPWR_IDX (M_PSM_SOFT_REGS + (0x3b * 2)) /* offset to the target txpwr */
-#define M_PHY_TX_FLT_PTR (M_PSM_SOFT_REGS + (0x3d * 2))
-#define M_CTS_DURATION (M_PSM_SOFT_REGS + (0x5c * 2))
-#define M_LP_RCCAL_OVR (M_PSM_SOFT_REGS + (0x6b * 2))
-
-/* PKTENG Rx Stats Block */
-#define M_RXSTATS_BLK_PTR (M_PSM_SOFT_REGS + (0x65 * 2))
-
-/* ucode debug status codes */
-#define DBGST_INACTIVE 0 /* not valid really */
-#define DBGST_INIT 1 /* after zeroing SHM, before suspending at init */
-#define DBGST_ACTIVE 2 /* "normal" state */
-#define DBGST_SUSPENDED 3 /* suspended */
-#define DBGST_ASLEEP 4 /* asleep (PS mode) */
-
-/* Scratch Reg defs */
-enum _ePsmScratchPadRegDefinitions {
- S_RSV0 = 0,
- S_RSV1,
- S_RSV2,
-
- /* scratch registers for Dot11-contants */
- S_DOT11_CWMIN, /* CW-minimum 0x03 */
- S_DOT11_CWMAX, /* CW-maximum 0x04 */
- S_DOT11_CWCUR, /* CW-current 0x05 */
- S_DOT11_SRC_LMT, /* short retry count limit 0x06 */
- S_DOT11_LRC_LMT, /* long retry count limit 0x07 */
- S_DOT11_DTIMCOUNT, /* DTIM-count 0x08 */
-
- /* Tx-side scratch registers */
- S_SEQ_NUM, /* hardware sequence number reg 0x09 */
- S_SEQ_NUM_FRAG, /* seq-num for frags (Set at the start os MSDU 0x0A */
- S_FRMRETX_CNT, /* frame retx count 0x0B */
- S_SSRC, /* Station short retry count 0x0C */
- S_SLRC, /* Station long retry count 0x0D */
- S_EXP_RSP, /* Expected response frame 0x0E */
- S_OLD_BREM, /* Remaining backoff ctr 0x0F */
- S_OLD_CWWIN, /* saved-off CW-cur 0x10 */
- S_TXECTL, /* TXE-Ctl word constructed in scr-pad 0x11 */
- S_CTXTST, /* frm type-subtype as read from Tx-descr 0x12 */
-
- /* Rx-side scratch registers */
- S_RXTST, /* Type and subtype in Rxframe 0x13 */
-
- /* Global state register */
- S_STREG, /* state storage actual bit maps below 0x14 */
-
- S_TXPWR_SUM, /* Tx power control: accumulator 0x15 */
- S_TXPWR_ITER, /* Tx power control: iteration 0x16 */
- S_RX_FRMTYPE, /* Rate and PHY type for frames 0x17 */
- S_THIS_AGG, /* Size of this AGG (A-MSDU) 0x18 */
-
- S_KEYINDX, /* 0x19 */
- S_RXFRMLEN, /* Receive MPDU length in bytes 0x1A */
-
- /* Receive TSF time stored in SCR */
- S_RXTSFTMRVAL_WD3, /* TSF value at the start of rx 0x1B */
- S_RXTSFTMRVAL_WD2, /* TSF value at the start of rx 0x1C */
- S_RXTSFTMRVAL_WD1, /* TSF value at the start of rx 0x1D */
- S_RXTSFTMRVAL_WD0, /* TSF value at the start of rx 0x1E */
- S_RXSSN, /* Received start seq number for A-MPDU BA 0x1F */
- S_RXQOSFLD, /* Rx-QoS field (if present) 0x20 */
-
- /* Scratch pad regs used in microcode as temp storage */
- S_TMP0, /* stmp0 0x21 */
- S_TMP1, /* stmp1 0x22 */
- S_TMP2, /* stmp2 0x23 */
- S_TMP3, /* stmp3 0x24 */
- S_TMP4, /* stmp4 0x25 */
- S_TMP5, /* stmp5 0x26 */
- S_PRQPENALTY_CTR, /* Probe response queue penalty counter 0x27 */
- S_ANTCNT, /* unsuccessful attempts on current ant. 0x28 */
- S_SYMBOL, /* flag for possible symbol ctl frames 0x29 */
- S_RXTP, /* rx frame type 0x2A */
- S_STREG2, /* extra state storage 0x2B */
- S_STREG3, /* even more extra state storage 0x2C */
- S_STREG4, /* ... 0x2D */
- S_STREG5, /* remember to initialize it to zero 0x2E */
-
- S_ADJPWR_IDX,
- S_CUR_PTR, /* Temp pointer for A-MPDU re-Tx SHM table 0x32 */
- S_REVID4, /* 0x33 */
- S_INDX, /* 0x34 */
- S_ADDR0, /* 0x35 */
- S_ADDR1, /* 0x36 */
- S_ADDR2, /* 0x37 */
- S_ADDR3, /* 0x38 */
- S_ADDR4, /* 0x39 */
- S_ADDR5, /* 0x3A */
- S_TMP6, /* 0x3B */
- S_KEYINDX_BU, /* Backup for Key index 0x3C */
- S_MFGTEST_TMP0, /* Temp register used for RX test calculations 0x3D */
- S_RXESN, /* Received end sequence number for A-MPDU BA 0x3E */
- S_STREG6, /* 0x3F */
-};
-
-#define S_BEACON_INDX S_OLD_BREM
-#define S_PRS_INDX S_OLD_CWWIN
-#define S_PHYTYPE S_SSRC
-#define S_PHYVER S_SLRC
-
-/* IHR SLOW_CTRL values */
-#define SLOW_CTRL_PDE (1 << 0)
-#define SLOW_CTRL_FD (1 << 8)
-
-/* ucode mac statistic counters in shared memory */
-struct macstat {
- u16 txallfrm; /* 0x80 */
- u16 txrtsfrm; /* 0x82 */
- u16 txctsfrm; /* 0x84 */
- u16 txackfrm; /* 0x86 */
- u16 txdnlfrm; /* 0x88 */
- u16 txbcnfrm; /* 0x8a */
- u16 txfunfl[8]; /* 0x8c - 0x9b */
- u16 txtplunfl; /* 0x9c */
- u16 txphyerr; /* 0x9e */
- u16 pktengrxducast; /* 0xa0 */
- u16 pktengrxdmcast; /* 0xa2 */
- u16 rxfrmtoolong; /* 0xa4 */
- u16 rxfrmtooshrt; /* 0xa6 */
- u16 rxinvmachdr; /* 0xa8 */
- u16 rxbadfcs; /* 0xaa */
- u16 rxbadplcp; /* 0xac */
- u16 rxcrsglitch; /* 0xae */
- u16 rxstrt; /* 0xb0 */
- u16 rxdfrmucastmbss; /* 0xb2 */
- u16 rxmfrmucastmbss; /* 0xb4 */
- u16 rxcfrmucast; /* 0xb6 */
- u16 rxrtsucast; /* 0xb8 */
- u16 rxctsucast; /* 0xba */
- u16 rxackucast; /* 0xbc */
- u16 rxdfrmocast; /* 0xbe */
- u16 rxmfrmocast; /* 0xc0 */
- u16 rxcfrmocast; /* 0xc2 */
- u16 rxrtsocast; /* 0xc4 */
- u16 rxctsocast; /* 0xc6 */
- u16 rxdfrmmcast; /* 0xc8 */
- u16 rxmfrmmcast; /* 0xca */
- u16 rxcfrmmcast; /* 0xcc */
- u16 rxbeaconmbss; /* 0xce */
- u16 rxdfrmucastobss; /* 0xd0 */
- u16 rxbeaconobss; /* 0xd2 */
- u16 rxrsptmout; /* 0xd4 */
- u16 bcntxcancl; /* 0xd6 */
- u16 PAD;
- u16 rxf0ovfl; /* 0xda */
- u16 rxf1ovfl; /* 0xdc */
- u16 rxf2ovfl; /* 0xde */
- u16 txsfovfl; /* 0xe0 */
- u16 pmqovfl; /* 0xe2 */
- u16 rxcgprqfrm; /* 0xe4 */
- u16 rxcgprsqovfl; /* 0xe6 */
- u16 txcgprsfail; /* 0xe8 */
- u16 txcgprssuc; /* 0xea */
- u16 prs_timeout; /* 0xec */
- u16 rxnack;
- u16 frmscons;
- u16 txnack;
- u16 txglitch_nack;
- u16 txburst; /* 0xf6 # tx bursts */
- u16 bphy_rxcrsglitch; /* bphy rx crs glitch */
- u16 phywatchdog; /* 0xfa # of phy watchdog events */
- u16 PAD;
- u16 bphy_badplcp; /* bphy bad plcp */
-};
-
-/* dot11 core-specific control flags */
-#define SICF_PCLKE 0x0004 /* PHY clock enable */
-#define SICF_PRST 0x0008 /* PHY reset */
-#define SICF_MPCLKE 0x0010 /* MAC PHY clockcontrol enable */
-#define SICF_FREF 0x0020 /* PLL FreqRefSelect */
-/* NOTE: the following bw bits only apply when the core is attached
- * to a NPHY
- */
-#define SICF_BWMASK 0x00c0 /* phy clock mask (b6 & b7) */
-#define SICF_BW40 0x0080 /* 40MHz BW (160MHz phyclk) */
-#define SICF_BW20 0x0040 /* 20MHz BW (80MHz phyclk) */
-#define SICF_BW10 0x0000 /* 10MHz BW (40MHz phyclk) */
-#define SICF_GMODE 0x2000 /* gmode enable */
-
-/* dot11 core-specific status flags */
-#define SISF_2G_PHY 0x0001 /* 2.4G capable phy */
-#define SISF_5G_PHY 0x0002 /* 5G capable phy */
-#define SISF_FCLKA 0x0004 /* FastClkAvailable */
-#define SISF_DB_PHY 0x0008 /* Dualband phy */
-
-/* === End of MAC reg, Beginning of PHY(b/a/g/n) reg, radio and LPPHY regs are separated === */
-
-#define BPHY_REG_OFT_BASE 0x0
-/* offsets for indirect access to bphy registers */
-#define BPHY_BB_CONFIG 0x01
-#define BPHY_ADCBIAS 0x02
-#define BPHY_ANACORE 0x03
-#define BPHY_PHYCRSTH 0x06
-#define BPHY_TEST 0x0a
-#define BPHY_PA_TX_TO 0x10
-#define BPHY_SYNTH_DC_TO 0x11
-#define BPHY_PA_TX_TIME_UP 0x12
-#define BPHY_RX_FLTR_TIME_UP 0x13
-#define BPHY_TX_POWER_OVERRIDE 0x14
-#define BPHY_RF_OVERRIDE 0x15
-#define BPHY_RF_TR_LOOKUP1 0x16
-#define BPHY_RF_TR_LOOKUP2 0x17
-#define BPHY_COEFFS 0x18
-#define BPHY_PLL_OUT 0x19
-#define BPHY_REFRESH_MAIN 0x1a
-#define BPHY_REFRESH_TO0 0x1b
-#define BPHY_REFRESH_TO1 0x1c
-#define BPHY_RSSI_TRESH 0x20
-#define BPHY_IQ_TRESH_HH 0x21
-#define BPHY_IQ_TRESH_H 0x22
-#define BPHY_IQ_TRESH_L 0x23
-#define BPHY_IQ_TRESH_LL 0x24
-#define BPHY_GAIN 0x25
-#define BPHY_LNA_GAIN_RANGE 0x26
-#define BPHY_JSSI 0x27
-#define BPHY_TSSI_CTL 0x28
-#define BPHY_TSSI 0x29
-#define BPHY_TR_LOSS_CTL 0x2a
-#define BPHY_LO_LEAKAGE 0x2b
-#define BPHY_LO_RSSI_ACC 0x2c
-#define BPHY_LO_IQMAG_ACC 0x2d
-#define BPHY_TX_DC_OFF1 0x2e
-#define BPHY_TX_DC_OFF2 0x2f
-#define BPHY_PEAK_CNT_THRESH 0x30
-#define BPHY_FREQ_OFFSET 0x31
-#define BPHY_DIVERSITY_CTL 0x32
-#define BPHY_PEAK_ENERGY_LO 0x33
-#define BPHY_PEAK_ENERGY_HI 0x34
-#define BPHY_SYNC_CTL 0x35
-#define BPHY_TX_PWR_CTRL 0x36
-#define BPHY_TX_EST_PWR 0x37
-#define BPHY_STEP 0x38
-#define BPHY_WARMUP 0x39
-#define BPHY_LMS_CFF_READ 0x3a
-#define BPHY_LMS_COEFF_I 0x3b
-#define BPHY_LMS_COEFF_Q 0x3c
-#define BPHY_SIG_POW 0x3d
-#define BPHY_RFDC_CANCEL_CTL 0x3e
-#define BPHY_HDR_TYPE 0x40
-#define BPHY_SFD_TO 0x41
-#define BPHY_SFD_CTL 0x42
-#define BPHY_DEBUG 0x43
-#define BPHY_RX_DELAY_COMP 0x44
-#define BPHY_CRS_DROP_TO 0x45
-#define BPHY_SHORT_SFD_NZEROS 0x46
-#define BPHY_DSSS_COEFF1 0x48
-#define BPHY_DSSS_COEFF2 0x49
-#define BPHY_CCK_COEFF1 0x4a
-#define BPHY_CCK_COEFF2 0x4b
-#define BPHY_TR_CORR 0x4c
-#define BPHY_ANGLE_SCALE 0x4d
-#define BPHY_TX_PWR_BASE_IDX 0x4e
-#define BPHY_OPTIONAL_MODES2 0x4f
-#define BPHY_CCK_LMS_STEP 0x50
-#define BPHY_BYPASS 0x51
-#define BPHY_CCK_DELAY_LONG 0x52
-#define BPHY_CCK_DELAY_SHORT 0x53
-#define BPHY_PPROC_CHAN_DELAY 0x54
-#define BPHY_DDFS_ENABLE 0x58
-#define BPHY_PHASE_SCALE 0x59
-#define BPHY_FREQ_CONTROL 0x5a
-#define BPHY_LNA_GAIN_RANGE_10 0x5b
-#define BPHY_LNA_GAIN_RANGE_32 0x5c
-#define BPHY_OPTIONAL_MODES 0x5d
-#define BPHY_RX_STATUS2 0x5e
-#define BPHY_RX_STATUS3 0x5f
-#define BPHY_DAC_CONTROL 0x60
-#define BPHY_ANA11G_FILT_CTRL 0x62
-#define BPHY_REFRESH_CTRL 0x64
-#define BPHY_RF_OVERRIDE2 0x65
-#define BPHY_SPUR_CANCEL_CTRL 0x66
-#define BPHY_FINE_DIGIGAIN_CTRL 0x67
-#define BPHY_RSSI_LUT 0x88
-#define BPHY_RSSI_LUT_END 0xa7
-#define BPHY_TSSI_LUT 0xa8
-#define BPHY_TSSI_LUT_END 0xc7
-#define BPHY_TSSI2PWR_LUT 0x380
-#define BPHY_TSSI2PWR_LUT_END 0x39f
-#define BPHY_LOCOMP_LUT 0x3a0
-#define BPHY_LOCOMP_LUT_END 0x3bf
-#define BPHY_TXGAIN_LUT 0x3c0
-#define BPHY_TXGAIN_LUT_END 0x3ff
-
-/* Bits in BB_CONFIG: */
-#define PHY_BBC_ANT_MASK 0x0180
-#define PHY_BBC_ANT_SHIFT 7
-#define BB_DARWIN 0x1000
-#define BBCFG_RESETCCA 0x4000
-#define BBCFG_RESETRX 0x8000
-
-/* Bits in phytest(0x0a): */
-#define TST_DDFS 0x2000
-#define TST_TXFILT1 0x0800
-#define TST_UNSCRAM 0x0400
-#define TST_CARR_SUPP 0x0200
-#define TST_DC_COMP_LOOP 0x0100
-#define TST_LOOPBACK 0x0080
-#define TST_TXFILT0 0x0040
-#define TST_TXTEST_ENABLE 0x0020
-#define TST_TXTEST_RATE 0x0018
-#define TST_TXTEST_PHASE 0x0007
-
-/* phytest txTestRate values */
-#define TST_TXTEST_RATE_1MBPS 0
-#define TST_TXTEST_RATE_2MBPS 1
-#define TST_TXTEST_RATE_5_5MBPS 2
-#define TST_TXTEST_RATE_11MBPS 3
-#define TST_TXTEST_RATE_SHIFT 3
-
-#define SHM_BYT_CNT 0x2 /* IHR location */
-#define MAX_BYT_CNT 0x600 /* Maximum frame len */
-
-struct d11cnt {
- u32 txfrag;
- u32 txmulti;
- u32 txfail;
- u32 txretry;
- u32 txretrie;
- u32 rxdup;
- u32 txrts;
- u32 txnocts;
- u32 txnoack;
- u32 rxfrag;
- u32 rxmulti;
- u32 rxcrc;
- u32 txfrmsnt;
- u32 rxundec;
-};
-
-#endif /* _BRCM_D11_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/dma.c b/drivers/staging/brcm80211/brcmsmac/dma.c
deleted file mode 100644
index ea17671efb6..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/dma.c
+++ /dev/null
@@ -1,1917 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
-#if defined(__mips__)
-#include <asm/addrspace.h>
-#endif
-
-#include <brcmu_utils.h>
-#include <aiutils.h>
-#include "types.h"
-#include "dma.h"
-
-/*
- * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical address.
- */
-#define D64RINGALIGN_BITS 13
-#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
-#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
-
-#define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
-
-/* transmit channel control */
-#define D64_XC_XE 0x00000001 /* transmit enable */
-#define D64_XC_SE 0x00000002 /* transmit suspend request */
-#define D64_XC_LE 0x00000004 /* loopback enable */
-#define D64_XC_FL 0x00000010 /* flush request */
-#define D64_XC_PD 0x00000800 /* parity check disable */
-#define D64_XC_AE 0x00030000 /* address extension bits */
-#define D64_XC_AE_SHIFT 16
-
-/* transmit descriptor table pointer */
-#define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
-
-/* transmit channel status */
-#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
-#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
-#define D64_XS0_XS_SHIFT 28
-#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
-#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
-#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
-#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
-#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
-
-#define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
-#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
-#define D64_XS1_XE_SHIFT 28
-#define D64_XS1_XE_NOERR 0x00000000 /* no error */
-#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
-#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
-#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
-#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
-#define D64_XS1_XE_COREE 0x50000000 /* core error */
-
-/* receive channel control */
-#define D64_RC_RE 0x00000001 /* receive enable */
-#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
-#define D64_RC_RO_SHIFT 1
-#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
-#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
-#define D64_RC_OC 0x00000400 /* overflow continue */
-#define D64_RC_PD 0x00000800 /* parity check disable */
-#define D64_RC_AE 0x00030000 /* address extension bits */
-#define D64_RC_AE_SHIFT 16
-
-/* flags for dma controller */
-#define DMA_CTRL_PEN (1 << 0) /* partity enable */
-#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
-#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
-#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
-
-/* receive descriptor table pointer */
-#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
-
-/* receive channel status */
-#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
-#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
-#define D64_RS0_RS_SHIFT 28
-#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
-#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
-#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
-#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
-#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
-
-#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
-#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
-#define D64_RS1_RE_SHIFT 28
-#define D64_RS1_RE_NOERR 0x00000000 /* no error */
-#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
-#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
-#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
-#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
-#define D64_RS1_RE_COREE 0x50000000 /* core error */
-
-/* fifoaddr */
-#define D64_FA_OFF_MASK 0xffff /* offset */
-#define D64_FA_SEL_MASK 0xf0000 /* select */
-#define D64_FA_SEL_SHIFT 16
-#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
-#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
-#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
-#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
-#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
-#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
-#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
-#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
-
-/* descriptor control flags 1 */
-#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
-#define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
-#define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
-#define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
-#define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
-
-/* descriptor control flags 2 */
-#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
-#define D64_CTRL2_AE 0x00030000 /* address extension bits */
-#define D64_CTRL2_AE_SHIFT 16
-#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
-
-/* control flags in the range [27:20] are core-specific and not defined here */
-#define D64_CTRL_CORE_MASK 0x0ff00000
-
-#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
-#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
-#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
-#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
-
-#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
-#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
-#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
-#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
-
-/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
- * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
- * There is a compile time check in wlc.c which ensure that this value is at least as big
- * as TXOFF. This value is used in dma_rxfill (dma.c).
- */
-
-#define BCMEXTRAHDROOM 172
-
-/* debug/trace */
-#ifdef BCMDBG
-#define DMA_ERROR(args) \
- do { \
- if (!(*di->msg_level & 1)) \
- ; \
- else \
- printk args; \
- } while (0)
-#define DMA_TRACE(args) \
- do { \
- if (!(*di->msg_level & 2)) \
- ; \
- else \
- printk args; \
- } while (0)
-#else
-#define DMA_ERROR(args)
-#define DMA_TRACE(args)
-#endif /* BCMDBG */
-
-#define DMA_NONE(args)
-
-typedef unsigned long dmaaddr_t;
-#define PHYSADDRHI(_pa) (0)
-#define PHYSADDRHISET(_pa, _val)
-#define PHYSADDRLO(_pa) ((_pa))
-#define PHYSADDRLOSET(_pa, _val) \
- do { \
- (_pa) = (_val); \
- } while (0)
-
-#define d64txregs dregs.d64_u.txregs_64
-#define d64rxregs dregs.d64_u.rxregs_64
-#define txd64 dregs.d64_u.txd_64
-#define rxd64 dregs.d64_u.rxd_64
-
-/* default dma message level (if input msg_level pointer is null in dma_attach()) */
-static uint dma_msg_level;
-
-#define MAXNAMEL 8 /* 8 char names */
-
-#define DI_INFO(dmah) ((dma_info_t *)dmah)
-
-#define R_SM(r) (*(r))
-#define W_SM(r, v) (*(r) = (v))
-
-/* One physical DMA segment */
-struct dma_seg {
- dmaaddr_t addr;
- u32 length;
-};
-
-struct dma_seg_map {
- void *oshdmah; /* Opaque handle for OSL to store its information */
- uint origsize; /* Size of the virtual packet */
- uint nsegs;
- struct dma_seg segs[MAX_DMA_SEGS];
-};
-
-/*
- * DMA Descriptor
- * Descriptors are only read by the hardware, never written back.
- */
-struct dma64desc {
- u32 ctrl1; /* misc control bits & bufcount */
- u32 ctrl2; /* buffer count and address extension */
- u32 addrlow; /* memory address of the date buffer, bits 31:0 */
- u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
-};
-
-/* dma engine software state */
-struct dma_info {
- struct dma_pub dma; /* exported structure */
- uint *msg_level; /* message level pointer */
- char name[MAXNAMEL]; /* callers name for diag msgs */
-
- void *pbus; /* bus handle */
-
- bool dma64; /* this dma engine is operating in 64-bit mode */
- bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
-
- union {
- struct {
- dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
- dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
- /* pointer to dma64 tx descriptor ring */
- struct dma64desc *txd_64;
- /* pointer to dma64 rx descriptor ring */
- struct dma64desc *rxd_64;
- } d64_u;
- } dregs;
-
- u16 dmadesc_align; /* alignment requirement for dma descriptors */
-
- u16 ntxd; /* # tx descriptors tunable */
- u16 txin; /* index of next descriptor to reclaim */
- u16 txout; /* index of next descriptor to post */
- void **txp; /* pointer to parallel array of pointers to packets */
- struct dma_seg_map *txp_dmah; /* DMA MAP meta-data handle */
- dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
- dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
- u16 txdalign; /* #bytes added to alloc'd mem to align txd */
- u32 txdalloc; /* #bytes allocated for the ring */
- u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
- * is not just an index, it needs all 13 bits to be
- * an offset from the addr register.
- */
-
- u16 nrxd; /* # rx descriptors tunable */
- u16 rxin; /* index of next descriptor to reclaim */
- u16 rxout; /* index of next descriptor to post */
- void **rxp; /* pointer to parallel array of pointers to packets */
- struct dma_seg_map *rxp_dmah; /* DMA MAP meta-data handle */
- dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
- dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
- u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
- u32 rxdalloc; /* #bytes allocated for the ring */
- u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
-
- /* tunables */
- unsigned int rxbufsize; /* rx buffer size in bytes,
- * not including the extra headroom
- */
- uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
- * e.g. some rx pkt buffers will be bridged to tx side
- * without byte copying. The extra headroom needs to be
- * large enough to fit txheader needs.
- * Some dongle driver may not need it.
- */
- uint nrxpost; /* # rx buffers to keep posted */
- unsigned int rxoffset; /* rxcontrol offset */
- uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
- uint ddoffsethigh; /* high 32 bits */
- uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
- uint dataoffsethigh; /* high 32 bits */
- bool aligndesc_4k; /* descriptor base need to be aligned or not */
-};
-
-/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
-#ifdef BCMDMASGLISTOSL
-#define DMASGLIST_ENAB true
-#else
-#define DMASGLIST_ENAB false
-#endif /* BCMDMASGLISTOSL */
-
-/* descriptor bumping macros */
-#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
-#define TXD(x) XXD((x), di->ntxd)
-#define RXD(x) XXD((x), di->nrxd)
-#define NEXTTXD(i) TXD((i) + 1)
-#define PREVTXD(i) TXD((i) - 1)
-#define NEXTRXD(i) RXD((i) + 1)
-#define PREVRXD(i) RXD((i) - 1)
-
-#define NTXDACTIVE(h, t) TXD((t) - (h))
-#define NRXDACTIVE(h, t) RXD((t) - (h))
-
-/* macros to convert between byte offsets and indexes */
-#define B2I(bytes, type) ((bytes) / sizeof(type))
-#define I2B(index, type) ((index) * sizeof(type))
-
-#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
-#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
-
-#define PCI64ADDR_HIGH 0x80000000 /* address[63] */
-#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
-
-/* Common prototypes */
-static bool _dma_isaddrext(struct dma_info *di);
-static bool _dma_descriptor_align(struct dma_info *di);
-static bool _dma_alloc(struct dma_info *di, uint direction);
-static void _dma_detach(struct dma_info *di);
-static void _dma_ddtable_init(struct dma_info *di, uint direction,
- dmaaddr_t pa);
-static void _dma_rxinit(struct dma_info *di);
-static void *_dma_rx(struct dma_info *di);
-static bool _dma_rxfill(struct dma_info *di);
-static void _dma_rxreclaim(struct dma_info *di);
-static void _dma_rxenable(struct dma_info *di);
-static void *_dma_getnextrxp(struct dma_info *di, bool forceall);
-static void _dma_rx_param_get(struct dma_info *di, u16 *rxoffset,
- u16 *rxbufsize);
-
-static void _dma_txblock(struct dma_info *di);
-static void _dma_txunblock(struct dma_info *di);
-static uint _dma_txactive(struct dma_info *di);
-static uint _dma_rxactive(struct dma_info *di);
-static uint _dma_txpending(struct dma_info *di);
-static uint _dma_txcommitted(struct dma_info *di);
-
-static void *_dma_peeknexttxp(struct dma_info *di);
-static void *_dma_peeknextrxp(struct dma_info *di);
-static unsigned long _dma_getvar(struct dma_info *di, const char *name);
-static void _dma_counterreset(struct dma_info *di);
-static void _dma_fifoloopbackenable(struct dma_info *di);
-static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags);
-static u8 dma_align_sizetobits(uint size);
-static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
- u16 *alignbits, uint *alloced,
- dmaaddr_t *descpa);
-
-/* Prototypes for 64-bit routines */
-static bool dma64_alloc(struct dma_info *di, uint direction);
-static bool dma64_txreset(struct dma_info *di);
-static bool dma64_rxreset(struct dma_info *di);
-static bool dma64_txsuspendedidle(struct dma_info *di);
-static int dma64_txfast(struct dma_info *di, struct sk_buff *p0, bool commit);
-static int dma64_txunframed(struct dma_info *di, void *p0, uint len,
- bool commit);
-static void *dma64_getpos(struct dma_info *di, bool direction);
-static void *dma64_getnexttxp(struct dma_info *di, enum txd_range range);
-static void *dma64_getnextrxp(struct dma_info *di, bool forceall);
-static void dma64_txrotate(struct dma_info *di);
-
-static bool dma64_rxidle(struct dma_info *di);
-static void dma64_txinit(struct dma_info *di);
-static bool dma64_txenabled(struct dma_info *di);
-static void dma64_txsuspend(struct dma_info *di);
-static void dma64_txresume(struct dma_info *di);
-static bool dma64_txsuspended(struct dma_info *di);
-static void dma64_txreclaim(struct dma_info *di, enum txd_range range);
-static bool dma64_txstopped(struct dma_info *di);
-static bool dma64_rxstopped(struct dma_info *di);
-static bool dma64_rxenabled(struct dma_info *di);
-static bool _dma64_addrext(dma64regs_t *dma64regs);
-
-static inline u32 parity32(u32 data);
-
-const struct di_fcn_s dma64proc = {
- (di_detach_t) _dma_detach,
- (di_txinit_t) dma64_txinit,
- (di_txreset_t) dma64_txreset,
- (di_txenabled_t) dma64_txenabled,
- (di_txsuspend_t) dma64_txsuspend,
- (di_txresume_t) dma64_txresume,
- (di_txsuspended_t) dma64_txsuspended,
- (di_txsuspendedidle_t) dma64_txsuspendedidle,
- (di_txfast_t) dma64_txfast,
- (di_txunframed_t) dma64_txunframed,
- (di_getpos_t) dma64_getpos,
- (di_txstopped_t) dma64_txstopped,
- (di_txreclaim_t) dma64_txreclaim,
- (di_getnexttxp_t) dma64_getnexttxp,
- (di_peeknexttxp_t) _dma_peeknexttxp,
- (di_txblock_t) _dma_txblock,
- (di_txunblock_t) _dma_txunblock,
- (di_txactive_t) _dma_txactive,
- (di_txrotate_t) dma64_txrotate,
-
- (di_rxinit_t) _dma_rxinit,
- (di_rxreset_t) dma64_rxreset,
- (di_rxidle_t) dma64_rxidle,
- (di_rxstopped_t) dma64_rxstopped,
- (di_rxenable_t) _dma_rxenable,
- (di_rxenabled_t) dma64_rxenabled,
- (di_rx_t) _dma_rx,
- (di_rxfill_t) _dma_rxfill,
- (di_rxreclaim_t) _dma_rxreclaim,
- (di_getnextrxp_t) _dma_getnextrxp,
- (di_peeknextrxp_t) _dma_peeknextrxp,
- (di_rxparam_get_t) _dma_rx_param_get,
-
- (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
- (di_getvar_t) _dma_getvar,
- (di_counterreset_t) _dma_counterreset,
- (di_ctrlflags_t) _dma_ctrlflags,
- NULL,
- NULL,
- NULL,
- (di_rxactive_t) _dma_rxactive,
- (di_txpending_t) _dma_txpending,
- (di_txcommitted_t) _dma_txcommitted,
- 39
-};
-
-struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void *dmaregstx, void *dmaregsrx, uint ntxd,
- uint nrxd, uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
-{
- struct dma_info *di;
- uint size;
-
- /* allocate private info structure */
- di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
- if (di == NULL) {
-#ifdef BCMDBG
- printk(KERN_ERR "dma_attach: out of memory\n");
-#endif
- return NULL;
- }
-
- di->msg_level = msg_level ? msg_level : &dma_msg_level;
-
-
- di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
-
- /* init dma reg pointer */
- di->d64txregs = (dma64regs_t *) dmaregstx;
- di->d64rxregs = (dma64regs_t *) dmaregsrx;
- di->dma.di_fn = (const struct di_fcn_s *)&dma64proc;
-
- /* Default flags (which can be changed by the driver calling dma_ctrlflags
- * before enable): For backwards compatibility both Rx Overflow Continue
- * and Parity are DISABLED.
- * supports it.
- */
- di->dma.di_fn->ctrlflags(&di->dma, DMA_CTRL_ROC | DMA_CTRL_PEN,
- 0);
-
- DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
- "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
- "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
- di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
-
- /* make a private copy of our callers name */
- strncpy(di->name, name, MAXNAMEL);
- di->name[MAXNAMEL - 1] = '\0';
-
- di->pbus = ((struct si_info *)sih)->pbus;
-
- /* save tunables */
- di->ntxd = (u16) ntxd;
- di->nrxd = (u16) nrxd;
-
- /* the actual dma size doesn't include the extra headroom */
- di->rxextrahdrroom =
- (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
- if (rxbufsize > BCMEXTRAHDROOM)
- di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
- else
- di->rxbufsize = (u16) rxbufsize;
-
- di->nrxpost = (u16) nrxpost;
- di->rxoffset = (u8) rxoffset;
-
- /*
- * figure out the DMA physical address offset for dd and data
- * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
- * Other bus: use zero
- * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
- */
- di->ddoffsetlow = 0;
- di->dataoffsetlow = 0;
- /* for pci bus, add offset */
- if (sih->bustype == PCI_BUS) {
- /* pcie with DMA64 */
- di->ddoffsetlow = 0;
- di->ddoffsethigh = SI_PCIE_DMA_H32;
- di->dataoffsetlow = di->ddoffsetlow;
- di->dataoffsethigh = di->ddoffsethigh;
- }
-#if defined(__mips__) && defined(IL_BIGENDIAN)
- di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
-#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
- /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((ai_coreid(sih) == SDIOD_CORE_ID)
- && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
- di->addrext = 0;
- else if ((ai_coreid(sih) == I2S_CORE_ID) &&
- ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
- di->addrext = 0;
- else
- di->addrext = _dma_isaddrext(di);
-
- /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
- di->aligndesc_4k = _dma_descriptor_align(di);
- if (di->aligndesc_4k) {
- di->dmadesc_align = D64RINGALIGN_BITS;
- if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
- /* for smaller dd table, HW relax alignment reqmnt */
- di->dmadesc_align = D64RINGALIGN_BITS - 1;
- }
- } else
- di->dmadesc_align = 4; /* 16 byte alignment */
-
- DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
- di->aligndesc_4k, di->dmadesc_align));
-
- /* allocate tx packet pointer vector */
- if (ntxd) {
- size = ntxd * sizeof(void *);
- di->txp = kzalloc(size, GFP_ATOMIC);
- if (di->txp == NULL) {
- DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
- goto fail;
- }
- }
-
- /* allocate rx packet pointer vector */
- if (nrxd) {
- size = nrxd * sizeof(void *);
- di->rxp = kzalloc(size, GFP_ATOMIC);
- if (di->rxp == NULL) {
- DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
- goto fail;
- }
- }
-
- /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
- if (ntxd) {
- if (!_dma_alloc(di, DMA_TX))
- goto fail;
- }
-
- /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
- if (nrxd) {
- if (!_dma_alloc(di, DMA_RX))
- goto fail;
- }
-
- if ((di->ddoffsetlow != 0) && !di->addrext) {
- if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
- goto fail;
- }
- if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
- goto fail;
- }
- }
-
- DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
-
- /* allocate DMA mapping vectors */
- if (DMASGLIST_ENAB) {
- if (ntxd) {
- size = ntxd * sizeof(struct dma_seg_map);
- di->txp_dmah = kzalloc(size, GFP_ATOMIC);
- if (di->txp_dmah == NULL)
- goto fail;
- }
-
- if (nrxd) {
- size = nrxd * sizeof(struct dma_seg_map);
- di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
- if (di->rxp_dmah == NULL)
- goto fail;
- }
- }
-
- return (struct dma_pub *) di;
-
- fail:
- _dma_detach(di);
- return NULL;
-}
-
-/* Check for odd number of 1's */
-static inline u32 parity32(u32 data)
-{
- data ^= data >> 16;
- data ^= data >> 8;
- data ^= data >> 4;
- data ^= data >> 2;
- data ^= data >> 1;
-
- return data & 1;
-}
-
-#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
-
-static inline void
-dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
- dmaaddr_t pa, uint outidx, u32 *flags, u32 bufcount)
-{
- u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
-
- /* PCI bus with big(>1G) physical address, use address extension */
-#if defined(__mips__) && defined(IL_BIGENDIAN)
- if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
- || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
-#else
- if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
-#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
-
- W_SM(&ddring[outidx].addrlow,
- BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
- W_SM(&ddring[outidx].addrhigh,
- BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
- W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
- W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
- } else {
- /* address extension for 32-bit PCI */
- u32 ae;
-
- ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
- PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
-
- ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
- W_SM(&ddring[outidx].addrlow,
- BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
- W_SM(&ddring[outidx].addrhigh,
- BUS_SWAP32(0 + di->dataoffsethigh));
- W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
- W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
- }
- if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
- if (DMA64_DD_PARITY(&ddring[outidx])) {
- W_SM(&ddring[outidx].ctrl2,
- BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
- }
- }
-}
-
-static bool _dma_alloc(struct dma_info *di, uint direction)
-{
- return dma64_alloc(di, direction);
-}
-
-void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
- uint *alloced, unsigned long *pap)
-{
- if (align_bits) {
- u16 align = (1 << align_bits);
- if (!IS_ALIGNED(PAGE_SIZE, align))
- size += align;
- *alloced = size;
- }
- return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
-}
-
-/* !! may be called with core in reset */
-static void _dma_detach(struct dma_info *di)
-{
-
- DMA_TRACE(("%s: dma_detach\n", di->name));
-
- /* free dma descriptor rings */
- if (di->txd64)
- pci_free_consistent(di->pbus, di->txdalloc,
- ((s8 *)di->txd64 - di->txdalign),
- (di->txdpaorig));
- if (di->rxd64)
- pci_free_consistent(di->pbus, di->rxdalloc,
- ((s8 *)di->rxd64 - di->rxdalign),
- (di->rxdpaorig));
-
- /* free packet pointer vectors */
- kfree(di->txp);
- kfree(di->rxp);
-
- /* free tx packet DMA handles */
- kfree(di->txp_dmah);
-
- /* free rx packet DMA handles */
- kfree(di->rxp_dmah);
-
- /* free our private info structure */
- kfree(di);
-
-}
-
-static bool _dma_descriptor_align(struct dma_info *di)
-{
- u32 addrl;
-
- /* Check to see if the descriptors need to be aligned on 4K/8K or not */
- if (di->d64txregs != NULL) {
- W_REG(&di->d64txregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64txregs->addrlow);
- if (addrl != 0)
- return false;
- } else if (di->d64rxregs != NULL) {
- W_REG(&di->d64rxregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64rxregs->addrlow);
- if (addrl != 0)
- return false;
- }
- return true;
-}
-
-/* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
-static bool _dma_isaddrext(struct dma_info *di)
-{
- /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
-
- /* not all tx or rx channel are available */
- if (di->d64txregs != NULL) {
- if (!_dma64_addrext(di->d64txregs)) {
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
- "AE set\n", di->name));
- }
- return true;
- } else if (di->d64rxregs != NULL) {
- if (!_dma64_addrext(di->d64rxregs)) {
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
- "AE set\n", di->name));
- }
- return true;
- }
- return false;
-}
-
-/* initialize descriptor table base address */
-static void _dma_ddtable_init(struct dma_info *di, uint direction, dmaaddr_t pa)
-{
- if (!di->aligndesc_4k) {
- if (direction == DMA_TX)
- di->xmtptrbase = PHYSADDRLO(pa);
- else
- di->rcvptrbase = PHYSADDRLO(pa);
- }
-
- if ((di->ddoffsetlow == 0)
- || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
- if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow,
- (PHYSADDRLO(pa) + di->ddoffsetlow));
- W_REG(&di->d64txregs->addrhigh,
- (PHYSADDRHI(pa) + di->ddoffsethigh));
- } else {
- W_REG(&di->d64rxregs->addrlow,
- (PHYSADDRLO(pa) + di->ddoffsetlow));
- W_REG(&di->d64rxregs->addrhigh,
- (PHYSADDRHI(pa) + di->ddoffsethigh));
- }
- } else {
- /* DMA64 32bits address extension */
- u32 ae;
-
- /* shift the high bit(s) from pa to ae */
- ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
- PCI32ADDR_HIGH_SHIFT;
- PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
-
- if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow,
- (PHYSADDRLO(pa) + di->ddoffsetlow));
- W_REG(&di->d64txregs->addrhigh,
- di->ddoffsethigh);
- SET_REG(&di->d64txregs->control,
- D64_XC_AE, (ae << D64_XC_AE_SHIFT));
- } else {
- W_REG(&di->d64rxregs->addrlow,
- (PHYSADDRLO(pa) + di->ddoffsetlow));
- W_REG(&di->d64rxregs->addrhigh,
- di->ddoffsethigh);
- SET_REG(&di->d64rxregs->control,
- D64_RC_AE, (ae << D64_RC_AE_SHIFT));
- }
- }
-}
-
-static void _dma_fifoloopbackenable(struct dma_info *di)
-{
- DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
-
- OR_REG(&di->d64txregs->control, D64_XC_LE);
-}
-
-static void _dma_rxinit(struct dma_info *di)
-{
- DMA_TRACE(("%s: dma_rxinit\n", di->name));
-
- if (di->nrxd == 0)
- return;
-
- di->rxin = di->rxout = 0;
-
- /* clear rx descriptor ring */
- memset((void *)di->rxd64, '\0',
- (di->nrxd * sizeof(struct dma64desc)));
-
- /* DMA engine with out alignment requirement requires table to be inited
- * before enabling the engine
- */
- if (!di->aligndesc_4k)
- _dma_ddtable_init(di, DMA_RX, di->rxdpa);
-
- _dma_rxenable(di);
-
- if (di->aligndesc_4k)
- _dma_ddtable_init(di, DMA_RX, di->rxdpa);
-}
-
-static void _dma_rxenable(struct dma_info *di)
-{
- uint dmactrlflags = di->dma.dmactrlflags;
- u32 control;
-
- DMA_TRACE(("%s: dma_rxenable\n", di->name));
-
- control =
- (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
- D64_RC_RE;
-
- if ((dmactrlflags & DMA_CTRL_PEN) == 0)
- control |= D64_RC_PD;
-
- if (dmactrlflags & DMA_CTRL_ROC)
- control |= D64_RC_OC;
-
- W_REG(&di->d64rxregs->control,
- ((di->rxoffset << D64_RC_RO_SHIFT) | control));
-}
-
-static void
-_dma_rx_param_get(struct dma_info *di, u16 *rxoffset, u16 *rxbufsize)
-{
- /* the normal values fit into 16 bits */
- *rxoffset = (u16) di->rxoffset;
- *rxbufsize = (u16) di->rxbufsize;
-}
-
-/* !! rx entry routine
- * returns a pointer to the next frame received, or NULL if there are no more
- * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
- * with pkts chain
- * otherwise, it's treated as giant pkt and will be tossed.
- * The DMA scattering starts with normal DMA header, followed by first buffer data.
- * After it reaches the max size of buffer, the data continues in next DMA descriptor
- * buffer WITHOUT DMA header
- */
-static void *_dma_rx(struct dma_info *di)
-{
- struct sk_buff *p, *head, *tail;
- uint len;
- uint pkt_len;
- int resid = 0;
-
- next_frame:
- head = _dma_getnextrxp(di, false);
- if (head == NULL)
- return NULL;
-
- len = le16_to_cpu(*(u16 *) (head->data));
- DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
- dma_spin_for_len(len, head);
-
- /* set actual length */
- pkt_len = min((di->rxoffset + len), di->rxbufsize);
- __skb_trim(head, pkt_len);
- resid = len - (di->rxbufsize - di->rxoffset);
-
- /* check for single or multi-buffer rx */
- if (resid > 0) {
- tail = head;
- while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
- tail->next = p;
- pkt_len = min(resid, (int)di->rxbufsize);
- __skb_trim(p, pkt_len);
-
- tail = p;
- resid -= di->rxbufsize;
- }
-
-#ifdef BCMDBG
- if (resid > 0) {
- uint cur;
- cur =
- B2I(((R_REG(&di->d64rxregs->status0) &
- D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK,
- struct dma64desc);
- DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur));
- }
-#endif /* BCMDBG */
-
- if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
- DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
- di->name, len));
- brcmu_pkt_buf_free_skb(head);
- di->dma.rxgiants++;
- goto next_frame;
- }
- }
-
- return head;
-}
-
-/* post receive buffers
- * return false is refill failed completely and ring is empty
- * this will stall the rx dma and user might want to call rxfill again asap
- * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
- */
-static bool _dma_rxfill(struct dma_info *di)
-{
- struct sk_buff *p;
- u16 rxin, rxout;
- u32 flags = 0;
- uint n;
- uint i;
- dmaaddr_t pa;
- uint extra_offset = 0;
- bool ring_empty;
-
- ring_empty = false;
-
- /*
- * Determine how many receive buffers we're lacking
- * from the full complement, allocate, initialize,
- * and post them, then update the chip rx lastdscr.
- */
-
- rxin = di->rxin;
- rxout = di->rxout;
-
- n = di->nrxpost - NRXDACTIVE(rxin, rxout);
-
- DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
-
- if (di->rxbufsize > BCMEXTRAHDROOM)
- extra_offset = di->rxextrahdrroom;
-
- for (i = 0; i < n; i++) {
- /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
- size to be allocated
- */
-
- p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
-
- if (p == NULL) {
- DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
- di->name));
- if (i == 0 && dma64_rxidle(di)) {
- DMA_ERROR(("%s: rxfill64: ring is empty !\n",
- di->name));
- ring_empty = true;
- }
- di->dma.rxnobuf++;
- break;
- }
- /* reserve an extra headroom, if applicable */
- if (extra_offset)
- skb_pull(p, extra_offset);
-
- /* Do a cached write instead of uncached write since DMA_MAP
- * will flush the cache.
- */
- *(u32 *) (p->data) = 0;
-
- if (DMASGLIST_ENAB)
- memset(&di->rxp_dmah[rxout], 0,
- sizeof(struct dma_seg_map));
-
- pa = pci_map_single(di->pbus, p->data,
- di->rxbufsize, PCI_DMA_FROMDEVICE);
-
- /* save the free packet pointer */
- di->rxp[rxout] = p;
-
- /* reset flags for each descriptor */
- flags = 0;
- if (rxout == (di->nrxd - 1))
- flags = D64_CTRL1_EOT;
-
- dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
- di->rxbufsize);
- rxout = NEXTRXD(rxout);
- }
-
- di->rxout = rxout;
-
- /* update the chip lastdscr pointer */
- W_REG(&di->d64rxregs->ptr,
- di->rcvptrbase + I2B(rxout, struct dma64desc));
-
- return ring_empty;
-}
-
-/* like getnexttxp but no reclaim */
-static void *_dma_peeknexttxp(struct dma_info *di)
-{
- uint end, i;
-
- if (di->ntxd == 0)
- return NULL;
-
- end =
- B2I(((R_REG(&di->d64txregs->status0) &
- D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc);
-
- for (i = di->txin; i != end; i = NEXTTXD(i))
- if (di->txp[i])
- return di->txp[i];
-
- return NULL;
-}
-
-/* like getnextrxp but not take off the ring */
-static void *_dma_peeknextrxp(struct dma_info *di)
-{
- uint end, i;
-
- if (di->nrxd == 0)
- return NULL;
-
- end =
- B2I(((R_REG(&di->d64rxregs->status0) &
- D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
- struct dma64desc);
-
- for (i = di->rxin; i != end; i = NEXTRXD(i))
- if (di->rxp[i])
- return di->rxp[i];
-
- return NULL;
-}
-
-static void _dma_rxreclaim(struct dma_info *di)
-{
- void *p;
-
- DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
-
- while ((p = _dma_getnextrxp(di, true)))
- brcmu_pkt_buf_free_skb(p);
-}
-
-static void *_dma_getnextrxp(struct dma_info *di, bool forceall)
-{
- if (di->nrxd == 0)
- return NULL;
-
- return dma64_getnextrxp(di, forceall);
-}
-
-static void _dma_txblock(struct dma_info *di)
-{
- di->dma.txavail = 0;
-}
-
-static void _dma_txunblock(struct dma_info *di)
-{
- di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-}
-
-static uint _dma_txactive(struct dma_info *di)
-{
- return NTXDACTIVE(di->txin, di->txout);
-}
-
-static uint _dma_txpending(struct dma_info *di)
-{
- uint curr;
-
- curr =
- B2I(((R_REG(&di->d64txregs->status0) &
- D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc);
-
- return NTXDACTIVE(curr, di->txout);
-}
-
-static uint _dma_txcommitted(struct dma_info *di)
-{
- uint ptr;
- uint txin = di->txin;
-
- if (txin == di->txout)
- return 0;
-
- ptr = B2I(R_REG(&di->d64txregs->ptr), struct dma64desc);
-
- return NTXDACTIVE(di->txin, ptr);
-}
-
-static uint _dma_rxactive(struct dma_info *di)
-{
- return NRXDACTIVE(di->rxin, di->rxout);
-}
-
-static void _dma_counterreset(struct dma_info *di)
-{
- /* reset all software counter */
- di->dma.rxgiants = 0;
- di->dma.rxnobuf = 0;
- di->dma.txnobuf = 0;
-}
-
-static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
-{
- uint dmactrlflags = di->dma.dmactrlflags;
-
- if (di == NULL) {
- DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
- return 0;
- }
-
- dmactrlflags &= ~mask;
- dmactrlflags |= flags;
-
- /* If trying to enable parity, check if parity is actually supported */
- if (dmactrlflags & DMA_CTRL_PEN) {
- u32 control;
-
- control = R_REG(&di->d64txregs->control);
- W_REG(&di->d64txregs->control,
- control | D64_XC_PD);
- if (R_REG(&di->d64txregs->control) & D64_XC_PD) {
- /* We *can* disable it so it is supported,
- * restore control register
- */
- W_REG(&di->d64txregs->control,
- control);
- } else {
- /* Not supported, don't allow it to be enabled */
- dmactrlflags &= ~DMA_CTRL_PEN;
- }
- }
-
- di->dma.dmactrlflags = dmactrlflags;
-
- return dmactrlflags;
-}
-
-/* get the address of the var in order to change later */
-static unsigned long _dma_getvar(struct dma_info *di, const char *name)
-{
- if (!strcmp(name, "&txavail"))
- return (unsigned long)&(di->dma.txavail);
- return 0;
-}
-
-static
-u8 dma_align_sizetobits(uint size)
-{
- u8 bitpos = 0;
- while (size >>= 1) {
- bitpos++;
- }
- return bitpos;
-}
-
-/* This function ensures that the DMA descriptor ring will not get allocated
- * across Page boundary. If the allocation is done across the page boundary
- * at the first time, then it is freed and the allocation is done at
- * descriptor ring size aligned location. This will ensure that the ring will
- * not cross page boundary
- */
-static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
- u16 *alignbits, uint *alloced,
- dmaaddr_t *descpa)
-{
- void *va;
- u32 desc_strtaddr;
- u32 alignbytes = 1 << *alignbits;
-
- va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
-
- if (NULL == va)
- return NULL;
-
- desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
- if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
- & boundary)) {
- *alignbits = dma_align_sizetobits(size);
- pci_free_consistent(di->pbus, size, va, *descpa);
- va = dma_alloc_consistent(di->pbus, size, *alignbits,
- alloced, descpa);
- }
- return va;
-}
-
-/* 64-bit DMA functions */
-
-static void dma64_txinit(struct dma_info *di)
-{
- u32 control = D64_XC_XE;
-
- DMA_TRACE(("%s: dma_txinit\n", di->name));
-
- if (di->ntxd == 0)
- return;
-
- di->txin = di->txout = 0;
- di->dma.txavail = di->ntxd - 1;
-
- /* clear tx descriptor ring */
- memset((void *)di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
-
- /* DMA engine with out alignment requirement requires table to be inited
- * before enabling the engine
- */
- if (!di->aligndesc_4k)
- _dma_ddtable_init(di, DMA_TX, di->txdpa);
-
- if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
- control |= D64_XC_PD;
- OR_REG(&di->d64txregs->control, control);
-
- /* DMA engine with alignment requirement requires table to be inited
- * before enabling the engine
- */
- if (di->aligndesc_4k)
- _dma_ddtable_init(di, DMA_TX, di->txdpa);
-}
-
-static bool dma64_txenabled(struct dma_info *di)
-{
- u32 xc;
-
- /* If the chip is dead, it is not enabled :-) */
- xc = R_REG(&di->d64txregs->control);
- return (xc != 0xffffffff) && (xc & D64_XC_XE);
-}
-
-static void dma64_txsuspend(struct dma_info *di)
-{
- DMA_TRACE(("%s: dma_txsuspend\n", di->name));
-
- if (di->ntxd == 0)
- return;
-
- OR_REG(&di->d64txregs->control, D64_XC_SE);
-}
-
-static void dma64_txresume(struct dma_info *di)
-{
- DMA_TRACE(("%s: dma_txresume\n", di->name));
-
- if (di->ntxd == 0)
- return;
-
- AND_REG(&di->d64txregs->control, ~D64_XC_SE);
-}
-
-static bool dma64_txsuspended(struct dma_info *di)
-{
- return (di->ntxd == 0) ||
- ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
- D64_XC_SE);
-}
-
-static void dma64_txreclaim(struct dma_info *di, enum txd_range range)
-{
- void *p;
-
- DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
-
- if (di->txin == di->txout)
- return;
-
- while ((p = dma64_getnexttxp(di, range))) {
- /* For unframed data, we don't have any packets to free */
- if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
- brcmu_pkt_buf_free_skb(p);
- }
-}
-
-static bool dma64_txstopped(struct dma_info *di)
-{
- return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
- D64_XS0_XS_STOPPED);
-}
-
-static bool dma64_rxstopped(struct dma_info *di)
-{
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
- D64_RS0_RS_STOPPED);
-}
-
-static bool dma64_alloc(struct dma_info *di, uint direction)
-{
- u16 size;
- uint ddlen;
- void *va;
- uint alloced = 0;
- u16 align;
- u16 align_bits;
-
- ddlen = sizeof(struct dma64desc);
-
- size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
- align_bits = di->dmadesc_align;
- align = (1 << align_bits);
-
- if (direction == DMA_TX) {
- va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
- &alloced, &di->txdpaorig);
- if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
- return false;
- }
- align = (1 << align_bits);
- di->txd64 = (struct dma64desc *)
- roundup((unsigned long)va, align);
- di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
- PHYSADDRLOSET(di->txdpa,
- PHYSADDRLO(di->txdpaorig) + di->txdalign);
- PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
- di->txdalloc = alloced;
- } else {
- va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
- &alloced, &di->rxdpaorig);
- if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
- return false;
- }
- align = (1 << align_bits);
- di->rxd64 = (struct dma64desc *)
- roundup((unsigned long)va, align);
- di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
- PHYSADDRLOSET(di->rxdpa,
- PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
- PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
- di->rxdalloc = alloced;
- }
-
- return true;
-}
-
-static bool dma64_txreset(struct dma_info *di)
-{
- u32 status;
-
- if (di->ntxd == 0)
- return true;
-
- /* suspend tx DMA first */
- W_REG(&di->d64txregs->control, D64_XC_SE);
- SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
- && (status != D64_XS0_XS_STOPPED), 10000);
-
- W_REG(&di->d64txregs->control, 0);
- SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED), 10000);
-
- /* wait for the last transaction to complete */
- udelay(300);
-
- return status == D64_XS0_XS_DISABLED;
-}
-
-static bool dma64_rxidle(struct dma_info *di)
-{
- DMA_TRACE(("%s: dma_rxidle\n", di->name));
-
- if (di->nrxd == 0)
- return true;
-
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
- (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
-}
-
-static bool dma64_rxreset(struct dma_info *di)
-{
- u32 status;
-
- if (di->nrxd == 0)
- return true;
-
- W_REG(&di->d64rxregs->control, 0);
- SPINWAIT(((status =
- (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
- != D64_RS0_RS_DISABLED), 10000);
-
- return status == D64_RS0_RS_DISABLED;
-}
-
-static bool dma64_rxenabled(struct dma_info *di)
-{
- u32 rc;
-
- rc = R_REG(&di->d64rxregs->control);
- return (rc != 0xffffffff) && (rc & D64_RC_RE);
-}
-
-static bool dma64_txsuspendedidle(struct dma_info *di)
-{
-
- if (di->ntxd == 0)
- return true;
-
- if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
- return 0;
-
- if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
- D64_XS0_XS_IDLE)
- return 1;
-
- return 0;
-}
-
-/* Useful when sending unframed data. This allows us to get a progress report from the DMA.
- * We return a pointer to the beginning of the DATA buffer of the current descriptor.
- * If DMA is idle, we return NULL.
- */
-static void *dma64_getpos(struct dma_info *di, bool direction)
-{
- void *va;
- bool idle;
- u32 cd_offset;
-
- if (direction == DMA_TX) {
- cd_offset =
- R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
- idle = !NTXDACTIVE(di->txin, di->txout);
- va = di->txp[B2I(cd_offset, struct dma64desc)];
- } else {
- cd_offset =
- R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
- idle = !NRXDACTIVE(di->rxin, di->rxout);
- va = di->rxp[B2I(cd_offset, struct dma64desc)];
- }
-
- /* If DMA is IDLE, return NULL */
- if (idle) {
- DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
- va = NULL;
- }
-
- return va;
-}
-
-/* TX of unframed data
- *
- * Adds a DMA ring descriptor for the data pointed to by "buf".
- * This is for DMA of a buffer of data and is unlike other dma TX functions
- * that take a pointer to a "packet"
- * Each call to this is results in a single descriptor being added for "len" bytes of
- * data starting at "buf", it doesn't handle chained buffers.
- */
-static int
-dma64_txunframed(struct dma_info *di, void *buf, uint len, bool commit)
-{
- u16 txout;
- u32 flags = 0;
- dmaaddr_t pa; /* phys addr */
-
- txout = di->txout;
-
- /* return nonzero if out of tx descriptors */
- if (NEXTTXD(txout) == di->txin)
- goto outoftxd;
-
- if (len == 0)
- return 0;
-
- pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
-
- flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
-
- if (txout == (di->ntxd - 1))
- flags |= D64_CTRL1_EOT;
-
- dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
-
- /* save the buffer pointer - used by dma_getpos */
- di->txp[txout] = buf;
-
- txout = NEXTTXD(txout);
- /* bump the tx descriptor index */
- di->txout = txout;
-
- /* kick the chip */
- if (commit) {
- W_REG(&di->d64txregs->ptr,
- di->xmtptrbase + I2B(txout, struct dma64desc));
- }
-
- /* tx flow control */
- di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-
- return 0;
-
- outoftxd:
- DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
- di->dma.txavail = 0;
- di->dma.txnobuf++;
- return -1;
-}
-
-/* !! tx entry routine
- * WARNING: call must check the return value for error.
- * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
- */
-static int dma64_txfast(struct dma_info *di, struct sk_buff *p0,
- bool commit)
-{
- struct sk_buff *p, *next;
- unsigned char *data;
- uint len;
- u16 txout;
- u32 flags = 0;
- dmaaddr_t pa;
-
- DMA_TRACE(("%s: dma_txfast\n", di->name));
-
- txout = di->txout;
-
- /*
- * Walk the chain of packet buffers
- * allocating and initializing transmit descriptor entries.
- */
- for (p = p0; p; p = next) {
- uint nsegs, j;
- struct dma_seg_map *map;
-
- data = p->data;
- len = p->len;
- next = p->next;
-
- /* return nonzero if out of tx descriptors */
- if (NEXTTXD(txout) == di->txin)
- goto outoftxd;
-
- if (len == 0)
- continue;
-
- /* get physical address of buffer start */
- if (DMASGLIST_ENAB)
- memset(&di->txp_dmah[txout], 0,
- sizeof(struct dma_seg_map));
-
- pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
-
- if (DMASGLIST_ENAB) {
- map = &di->txp_dmah[txout];
-
- /* See if all the segments can be accounted for */
- if (map->nsegs >
- (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
- 1))
- goto outoftxd;
-
- nsegs = map->nsegs;
- } else
- nsegs = 1;
-
- for (j = 1; j <= nsegs; j++) {
- flags = 0;
- if (p == p0 && j == 1)
- flags |= D64_CTRL1_SOF;
-
- /* With a DMA segment list, Descriptor table is filled
- * using the segment list instead of looping over
- * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
- * end of segment list is reached.
- */
- if ((!DMASGLIST_ENAB && next == NULL) ||
- (DMASGLIST_ENAB && j == nsegs))
- flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
- if (txout == (di->ntxd - 1))
- flags |= D64_CTRL1_EOT;
-
- if (DMASGLIST_ENAB) {
- len = map->segs[j - 1].length;
- pa = map->segs[j - 1].addr;
- }
- dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
-
- txout = NEXTTXD(txout);
- }
-
- /* See above. No need to loop over individual buffers */
- if (DMASGLIST_ENAB)
- break;
- }
-
- /* if last txd eof not set, fix it */
- if (!(flags & D64_CTRL1_EOF))
- W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
- BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
-
- /* save the packet */
- di->txp[PREVTXD(txout)] = p0;
-
- /* bump the tx descriptor index */
- di->txout = txout;
-
- /* kick the chip */
- if (commit)
- W_REG(&di->d64txregs->ptr,
- di->xmtptrbase + I2B(txout, struct dma64desc));
-
- /* tx flow control */
- di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-
- return 0;
-
- outoftxd:
- DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
- brcmu_pkt_buf_free_skb(p0);
- di->dma.txavail = 0;
- di->dma.txnobuf++;
- return -1;
-}
-
-/*
- * Reclaim next completed txd (txds if using chained buffers) in the range
- * specified and return associated packet.
- * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
- * transmitted as noted by the hardware "CurrDescr" pointer.
- * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
- * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
- * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
- * return associated packet regardless of the value of hardware pointers.
- */
-static void *dma64_getnexttxp(struct dma_info *di, enum txd_range range)
-{
- u16 start, end, i;
- u16 active_desc;
- void *txp;
-
- DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
-
- if (di->ntxd == 0)
- return NULL;
-
- txp = NULL;
-
- start = di->txin;
- if (range == DMA_RANGE_ALL)
- end = di->txout;
- else {
- dma64regs_t *dregs = di->d64txregs;
-
- end = (u16) (B2I(((R_REG(&dregs->status0) &
- D64_XS0_CD_MASK) -
- di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc));
-
- if (range == DMA_RANGE_TRANSFERED) {
- active_desc =
- (u16) (R_REG(&dregs->status1) &
- D64_XS1_AD_MASK);
- active_desc =
- (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
- active_desc = B2I(active_desc, struct dma64desc);
- if (end != active_desc)
- end = PREVTXD(active_desc);
- }
- }
-
- if ((start == 0) && (end > di->txout))
- goto bogus;
-
- for (i = start; i != end && !txp; i = NEXTTXD(i)) {
- dmaaddr_t pa;
- struct dma_seg_map *map = NULL;
- uint size, j, nsegs;
-
- PHYSADDRLOSET(pa,
- (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
- di->dataoffsetlow));
- PHYSADDRHISET(pa,
- (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
- di->dataoffsethigh));
-
- if (DMASGLIST_ENAB) {
- map = &di->txp_dmah[i];
- size = map->origsize;
- nsegs = map->nsegs;
- } else {
- size =
- (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
- D64_CTRL2_BC_MASK);
- nsegs = 1;
- }
-
- for (j = nsegs; j > 0; j--) {
- W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
- W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
-
- txp = di->txp[i];
- di->txp[i] = NULL;
- if (j > 1)
- i = NEXTTXD(i);
- }
-
- pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
- }
-
- di->txin = i;
-
- /* tx flow control */
- di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-
- return txp;
-
- bogus:
- DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
- return NULL;
-}
-
-static void *dma64_getnextrxp(struct dma_info *di, bool forceall)
-{
- uint i, curr;
- void *rxp;
- dmaaddr_t pa;
-
- i = di->rxin;
-
- /* return if no packets posted */
- if (i == di->rxout)
- return NULL;
-
- curr =
- B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
-
- /* ignore curr if forceall */
- if (!forceall && (i == curr))
- return NULL;
-
- /* get the packet pointer that corresponds to the rx descriptor */
- rxp = di->rxp[i];
- di->rxp[i] = NULL;
-
- PHYSADDRLOSET(pa,
- (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
- di->dataoffsetlow));
- PHYSADDRHISET(pa,
- (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
- di->dataoffsethigh));
-
- /* clear this packet from the descriptor ring */
- pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
-
- W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
- W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
-
- di->rxin = NEXTRXD(i);
-
- return rxp;
-}
-
-static bool _dma64_addrext(dma64regs_t *dma64regs)
-{
- u32 w;
- OR_REG(&dma64regs->control, D64_XC_AE);
- w = R_REG(&dma64regs->control);
- AND_REG(&dma64regs->control, ~D64_XC_AE);
- return (w & D64_XC_AE) == D64_XC_AE;
-}
-
-/*
- * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
- */
-static void dma64_txrotate(struct dma_info *di)
-{
- u16 ad;
- uint nactive;
- uint rot;
- u16 old, new;
- u32 w;
- u16 first, last;
-
- nactive = _dma_txactive(di);
- ad = (u16) (B2I((((R_REG(&di->d64txregs->status1) &
- D64_XS1_AD_MASK) - di->xmtptrbase) &
- D64_XS1_AD_MASK), struct dma64desc));
- rot = TXD(ad - di->txin);
-
- /* full-ring case is a lot harder - don't worry about this */
- if (rot >= (di->ntxd - nactive)) {
- DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
- return;
- }
-
- first = di->txin;
- last = PREVTXD(di->txout);
-
- /* move entries starting at last and moving backwards to first */
- for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
- new = TXD(old + rot);
-
- /*
- * Move the tx dma descriptor.
- * EOT is set only in the last entry in the ring.
- */
- w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
- if (new == (di->ntxd - 1))
- w |= D64_CTRL1_EOT;
- W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
-
- w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
- W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
-
- W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
- W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
-
- /* zap the old tx dma descriptor address field */
- W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
- W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
-
- /* move the corresponding txp[] entry */
- di->txp[new] = di->txp[old];
-
- /* Move the map */
- if (DMASGLIST_ENAB) {
- memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
- sizeof(struct dma_seg_map));
- memset(&di->txp_dmah[old], 0,
- sizeof(struct dma_seg_map));
- }
-
- di->txp[old] = NULL;
- }
-
- /* update txin and txout */
- di->txin = ad;
- di->txout = TXD(di->txout + rot);
- di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-
- /* kick the chip */
- W_REG(&di->d64txregs->ptr,
- di->xmtptrbase + I2B(di->txout, struct dma64desc));
-}
-
-uint dma_addrwidth(struct si_pub *sih, void *dmaregs)
-{
- /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
- /* DMA engine is 64-bit capable */
- if ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
- /* backplane are 64-bit capable */
- if (ai_backplane64(sih))
- /* If bus is System Backplane or PCIE then we can access 64-bits */
- if ((sih->bustype == SI_BUS) ||
- ((sih->bustype == PCI_BUS) &&
- (sih->buscoretype == PCIE_CORE_ID)))
- return DMADDRWIDTH_64;
- }
- /* DMA hardware not supported by this driver*/
- return DMADDRWIDTH_64;
-}
-
-/*
- * Mac80211 initiated actions sometimes require packets in the DMA queue to be
- * modified. The modified portion of the packet is not under control of the DMA
- * engine. This function calls a caller-supplied function for each packet in
- * the caller specified dma chain.
- */
-void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
- (void *pkt, void *arg_a), void *arg_a)
-{
- struct dma_info *di = (struct dma_info *) dmah;
- uint i = di->txin;
- uint end = di->txout;
- struct sk_buff *skb;
- struct ieee80211_tx_info *tx_info;
-
- while (i != end) {
- skb = (struct sk_buff *)di->txp[i];
- if (skb != NULL) {
- tx_info = (struct ieee80211_tx_info *)skb->cb;
- (callback_fnc)(tx_info, arg_a);
- }
- i = NEXTTXD(i);
- }
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/dma.h b/drivers/staging/brcm80211/brcmsmac/dma.h
deleted file mode 100644
index 9c8b9a6a557..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/dma.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_DMA_H_
-#define _BRCM_DMA_H_
-
-#include "types.h" /* forward structure declarations */
-
-/* DMA structure:
- * support two DMA engines: 32 bits address or 64 bit addressing
- * basic DMA register set is per channel(transmit or receive)
- * a pair of channels is defined for convenience
- */
-
-/* 32 bits addressing */
-
-struct dma32diag { /* diag access */
- u32 fifoaddr; /* diag address */
- u32 fifodatalow; /* low 32bits of data */
- u32 fifodatahigh; /* high 32bits of data */
- u32 pad; /* reserved */
-};
-
-/* 64 bits addressing */
-
-/* dma registers per channel(xmt or rcv) */
-struct dma64regs {
- u32 control; /* enable, et al */
- u32 ptr; /* last descriptor posted to chip */
- u32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
- u32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
- u32 status0; /* current descriptor, xmt state */
- u32 status1; /* active descriptor, xmt error */
-};
-
-/* map/unmap direction */
-#define DMA_TX 1 /* TX direction for DMA */
-#define DMA_RX 2 /* RX direction for DMA */
-#define BUS_SWAP32(v) (v)
-
-/* range param for dma_getnexttxp() and dma_txreclaim */
-enum txd_range {
- DMA_RANGE_ALL = 1,
- DMA_RANGE_TRANSMITTED,
- DMA_RANGE_TRANSFERED
-};
-
-/* dma function type */
-typedef void (*di_detach_t) (struct dma_pub *dmah);
-typedef bool(*di_txreset_t) (struct dma_pub *dmah);
-typedef bool(*di_rxreset_t) (struct dma_pub *dmah);
-typedef bool(*di_rxidle_t) (struct dma_pub *dmah);
-typedef void (*di_txinit_t) (struct dma_pub *dmah);
-typedef bool(*di_txenabled_t) (struct dma_pub *dmah);
-typedef void (*di_rxinit_t) (struct dma_pub *dmah);
-typedef void (*di_txsuspend_t) (struct dma_pub *dmah);
-typedef void (*di_txresume_t) (struct dma_pub *dmah);
-typedef bool(*di_txsuspended_t) (struct dma_pub *dmah);
-typedef bool(*di_txsuspendedidle_t) (struct dma_pub *dmah);
-typedef int (*di_txfast_t) (struct dma_pub *dmah, struct sk_buff *p,
- bool commit);
-typedef int (*di_txunframed_t) (struct dma_pub *dmah, void *p, uint len,
- bool commit);
-typedef void *(*di_getpos_t) (struct dma_pub *di, bool direction);
-typedef void (*di_fifoloopbackenable_t) (struct dma_pub *dmah);
-typedef bool(*di_txstopped_t) (struct dma_pub *dmah);
-typedef bool(*di_rxstopped_t) (struct dma_pub *dmah);
-typedef bool(*di_rxenable_t) (struct dma_pub *dmah);
-typedef bool(*di_rxenabled_t) (struct dma_pub *dmah);
-typedef void *(*di_rx_t) (struct dma_pub *dmah);
-typedef bool(*di_rxfill_t) (struct dma_pub *dmah);
-typedef void (*di_txreclaim_t) (struct dma_pub *dmah, enum txd_range range);
-typedef void (*di_rxreclaim_t) (struct dma_pub *dmah);
-typedef unsigned long (*di_getvar_t) (struct dma_pub *dmah,
- const char *name);
-typedef void *(*di_getnexttxp_t) (struct dma_pub *dmah, enum txd_range range);
-typedef void *(*di_getnextrxp_t) (struct dma_pub *dmah, bool forceall);
-typedef void *(*di_peeknexttxp_t) (struct dma_pub *dmah);
-typedef void *(*di_peeknextrxp_t) (struct dma_pub *dmah);
-typedef void (*di_rxparam_get_t) (struct dma_pub *dmah, u16 *rxoffset,
- u16 *rxbufsize);
-typedef void (*di_txblock_t) (struct dma_pub *dmah);
-typedef void (*di_txunblock_t) (struct dma_pub *dmah);
-typedef uint(*di_txactive_t) (struct dma_pub *dmah);
-typedef void (*di_txrotate_t) (struct dma_pub *dmah);
-typedef void (*di_counterreset_t) (struct dma_pub *dmah);
-typedef uint(*di_ctrlflags_t) (struct dma_pub *dmah, uint mask, uint flags);
-typedef char *(*di_dump_t) (struct dma_pub *dmah, struct brcmu_strbuf *b,
- bool dumpring);
-typedef char *(*di_dumptx_t) (struct dma_pub *dmah, struct brcmu_strbuf *b,
- bool dumpring);
-typedef char *(*di_dumprx_t) (struct dma_pub *dmah, struct brcmu_strbuf *b,
- bool dumpring);
-typedef uint(*di_rxactive_t) (struct dma_pub *dmah);
-typedef uint(*di_txpending_t) (struct dma_pub *dmah);
-typedef uint(*di_txcommitted_t) (struct dma_pub *dmah);
-
-/* dma opsvec */
-struct di_fcn_s {
- di_detach_t detach;
- di_txinit_t txinit;
- di_txreset_t txreset;
- di_txenabled_t txenabled;
- di_txsuspend_t txsuspend;
- di_txresume_t txresume;
- di_txsuspended_t txsuspended;
- di_txsuspendedidle_t txsuspendedidle;
- di_txfast_t txfast;
- di_txunframed_t txunframed;
- di_getpos_t getpos;
- di_txstopped_t txstopped;
- di_txreclaim_t txreclaim;
- di_getnexttxp_t getnexttxp;
- di_peeknexttxp_t peeknexttxp;
- di_txblock_t txblock;
- di_txunblock_t txunblock;
- di_txactive_t txactive;
- di_txrotate_t txrotate;
-
- di_rxinit_t rxinit;
- di_rxreset_t rxreset;
- di_rxidle_t rxidle;
- di_rxstopped_t rxstopped;
- di_rxenable_t rxenable;
- di_rxenabled_t rxenabled;
- di_rx_t rx;
- di_rxfill_t rxfill;
- di_rxreclaim_t rxreclaim;
- di_getnextrxp_t getnextrxp;
- di_peeknextrxp_t peeknextrxp;
- di_rxparam_get_t rxparam_get;
-
- di_fifoloopbackenable_t fifoloopbackenable;
- di_getvar_t d_getvar;
- di_counterreset_t counterreset;
- di_ctrlflags_t ctrlflags;
- di_dump_t dump;
- di_dumptx_t dumptx;
- di_dumprx_t dumprx;
- di_rxactive_t rxactive;
- di_txpending_t txpending;
- di_txcommitted_t txcommitted;
- uint endnum;
-};
-
-/*
- * Exported data structure (read-only)
- */
-/* export structure */
-struct dma_pub {
- const struct di_fcn_s *di_fn; /* DMA function pointers */
- uint txavail; /* # free tx descriptors */
- uint dmactrlflags; /* dma control flags */
-
- /* rx error counters */
- uint rxgiants; /* rx giant frames */
- uint rxnobuf; /* rx out of dma descriptors */
- /* tx error counters */
- uint txnobuf; /* tx out of dma descriptors */
-};
-
-extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void *dmaregstx, void *dmaregsrx, uint ntxd,
- uint nrxd, uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level);
-
-extern const struct di_fcn_s dma64proc;
-
-#define dma_detach(di) (dma64proc.detach(di))
-#define dma_txreset(di) (dma64proc.txreset(di))
-#define dma_rxreset(di) (dma64proc.rxreset(di))
-#define dma_rxidle(di) (dma64proc.rxidle(di))
-#define dma_txinit(di) (dma64proc.txinit(di))
-#define dma_txenabled(di) (dma64proc.txenabled(di))
-#define dma_rxinit(di) (dma64proc.rxinit(di))
-#define dma_txsuspend(di) (dma64proc.txsuspend(di))
-#define dma_txresume(di) (dma64proc.txresume(di))
-#define dma_txsuspended(di) (dma64proc.txsuspended(di))
-#define dma_txsuspendedidle(di) (dma64proc.txsuspendedidle(di))
-#define dma_txfast(di, p, commit) (dma64proc.txfast(di, p, commit))
-#define dma_txunframed(di, p, l, commit)(dma64proc.txunframed(di, p, l, commit))
-#define dma_getpos(di, dir) (dma64proc.getpos(di, dir))
-#define dma_fifoloopbackenable(di) (dma64proc.fifoloopbackenable(di))
-#define dma_txstopped(di) (dma64proc.txstopped(di))
-#define dma_rxstopped(di) (dma64proc.rxstopped(di))
-#define dma_rxenable(di) (dma64proc.rxenable(di))
-#define dma_rxenabled(di) (dma64proc.rxenabled(di))
-#define dma_rx(di) (dma64proc.rx(di))
-#define dma_rxfill(di) (dma64proc.rxfill(di))
-#define dma_txreclaim(di, range) (dma64proc.txreclaim(di, range))
-#define dma_rxreclaim(di) (dma64proc.rxreclaim(di))
-#define dma_getvar(di, name) (dma64proc.d_getvar(di, name))
-#define dma_getnexttxp(di, range) (dma64proc.getnexttxp(di, range))
-#define dma_getnextrxp(di, forceall) (dma64proc.getnextrxp(di, forceall))
-#define dma_peeknexttxp(di) (dma64proc.peeknexttxp(di))
-#define dma_peeknextrxp(di) (dma64proc.peeknextrxp(di))
-#define dma_rxparam_get(di, off, bufs) (dma64proc.rxparam_get(di, off, bufs))
-
-#define dma_txblock(di) (dma64proc.txblock(di))
-#define dma_txunblock(di) (dma64proc.txunblock(di))
-#define dma_txactive(di) (dma64proc.txactive(di))
-#define dma_rxactive(di) (dma64proc.rxactive(di))
-#define dma_txrotate(di) (dma64proc.txrotate(di))
-#define dma_counterreset(di) (dma64proc.counterreset(di))
-#define dma_ctrlflags(di, mask, flags) (dma64proc.ctrlflags((di), (mask), (flags)))
-#define dma_txpending(di) (dma64proc.txpending(di))
-#define dma_txcommitted(di) (dma64proc.txcommitted(di))
-
-
-/* return addresswidth allowed
- * This needs to be done after SB attach but before dma attach.
- * SB attach provides ability to probe backplane and dma core capabilities
- * This info is needed by DMA_ALLOC_CONSISTENT in dma attach
- */
-extern uint dma_addrwidth(struct si_pub *sih, void *dmaregs);
-void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
- (void *pkt, void *arg_a), void *arg_a);
-
-/*
- * DMA(Bug) on some chips seems to declare that the packet is ready, but the
- * packet length is not updated yet (by DMA) on the expected time.
- * Workaround is to hold processor till DMA updates the length, and stay off
- * the bus to allow DMA update the length in buffer
- */
-static inline void dma_spin_for_len(uint len, struct sk_buff *head)
-{
-#if defined(__mips__)
- if (!len) {
- while (!(len = *(u16 *) KSEG1ADDR(head->data)))
- udelay(1);
-
- *(u16 *) (head->data) = cpu_to_le16((u16) len);
- }
-#endif /* defined(__mips__) */
-}
-
-#endif /* _BRCM_DMA_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/mac80211_if.c b/drivers/staging/brcm80211/brcmsmac/mac80211_if.c
deleted file mode 100644
index d6de44e430d..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/mac80211_if.c
+++ /dev/null
@@ -1,1934 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#define __UNDEF_NO_VERSION__
-
-#include <linux/etherdevice.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/interrupt.h>
-#include <net/mac80211.h>
-#include <defs.h>
-#include "nicpci.h"
-#include "phy/phy_int.h"
-#include "d11.h"
-#include "channel.h"
-#include "scb.h"
-#include "pub.h"
-#include "ucode_loader.h"
-#include "mac80211_if.h"
-
-#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
-
-#define LOCK(wl) spin_lock_bh(&(wl)->lock)
-#define UNLOCK(wl) spin_unlock_bh(&(wl)->lock)
-
-/* locking from inside brcms_isr */
-#define ISR_LOCK(wl, flags)\
- do {\
- spin_lock(&(wl)->isr_lock);\
- (void)(flags); } \
- while (0)
-
-#define ISR_UNLOCK(wl, flags)\
- do {\
- spin_unlock(&(wl)->isr_lock);\
- (void)(flags); } \
- while (0)
-
-/* locking under LOCK() to synchronize with brcms_isr */
-#define INT_LOCK(wl, flags) spin_lock_irqsave(&(wl)->isr_lock, flags)
-#define INT_UNLOCK(wl, flags) spin_unlock_irqrestore(&(wl)->isr_lock, flags)
-
-static void brcms_timer(unsigned long data);
-static void _brcms_timer(struct brcms_timer *t);
-
-
-static int ieee_hw_init(struct ieee80211_hw *hw);
-static int ieee_hw_rate_init(struct ieee80211_hw *hw);
-
-static int wl_linux_watchdog(void *ctx);
-
-/* Flags we support */
-#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
- FIF_FCSFAIL | \
- FIF_PLCPFAIL | \
- FIF_CONTROL | \
- FIF_OTHER_BSS | \
- FIF_BCN_PRBRESP_PROMISC)
-
-static int n_adapters_found;
-
-static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev);
-static void brcms_release_fw(struct brcms_info *wl);
-
-/* local prototypes */
-static void brcms_dpc(unsigned long data);
-static irqreturn_t brcms_isr(int irq, void *dev_id);
-
-static int __devinit brcms_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void brcms_remove(struct pci_dev *pdev);
-static void brcms_free(struct brcms_info *wl);
-static void brcms_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br);
-
-MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
- {PCI_VENDOR_ID_BROADCOM, 0x4357, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 43225 2G */
- {PCI_VENDOR_ID_BROADCOM, 0x4353, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 43224 DUAL */
- {PCI_VENDOR_ID_BROADCOM, 0x4727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 4313 DUAL */
- /* 43224 Ven */
- {PCI_VENDOR_ID_BROADCOM, 0x0576, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
-
-#ifdef BCMDBG
-static int msglevel = 0xdeadbeef;
-module_param(msglevel, int, 0);
-static int phymsglevel = 0xdeadbeef;
-module_param(phymsglevel, int, 0);
-#endif /* BCMDBG */
-
-#define HW_TO_WL(hw) (hw->priv)
-#define WL_TO_HW(wl) (wl->pub->ieee_hw)
-
-/* MAC80211 callback functions */
-static int brcms_ops_start(struct ieee80211_hw *hw);
-static void brcms_ops_stop(struct ieee80211_hw *hw);
-static int brcms_ops_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-static void brcms_ops_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed);
-static void brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed);
-static void brcms_ops_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast);
-static int brcms_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
- bool set);
-static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw);
-static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw);
-static void brcms_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf);
-static int brcms_ops_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats);
-static void brcms_ops_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta);
-static int brcms_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw);
-static int brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-static int brcms_ops_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-static int brcms_ops_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw);
-static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop);
-
-static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct brcms_info *wl = hw->priv;
-
- LOCK(wl);
- if (!wl->pub->up) {
- wiphy_err(wl->wiphy, "ops->tx called while down\n");
- kfree_skb(skb);
- goto done;
- }
- brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
- done:
- UNLOCK(wl);
-}
-
-static int brcms_ops_start(struct ieee80211_hw *hw)
-{
- struct brcms_info *wl = hw->priv;
- bool blocked;
- /*
- struct ieee80211_channel *curchan = hw->conf.channel;
- */
-
- ieee80211_wake_queues(hw);
- LOCK(wl);
- blocked = brcms_rfkill_set_hw_state(wl);
- UNLOCK(wl);
- if (!blocked)
- wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
-
- return 0;
-}
-
-static void brcms_ops_stop(struct ieee80211_hw *hw)
-{
- ieee80211_stop_queues(hw);
-}
-
-static int
-brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct brcms_info *wl;
- int err;
-
- /* Just STA for now */
- if (vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
- vif->type != NL80211_IFTYPE_ADHOC) {
- wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
- " STA for now\n", __func__, vif->type);
- return -EOPNOTSUPP;
- }
-
- wl = HW_TO_WL(hw);
- LOCK(wl);
- err = brcms_up(wl);
- UNLOCK(wl);
-
- if (err != 0) {
- wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
- err);
- }
- return err;
-}
-
-static void
-brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct brcms_info *wl;
-
- wl = HW_TO_WL(hw);
-
- /* put driver in down state */
- LOCK(wl);
- brcms_down(wl);
- UNLOCK(wl);
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-static int
-ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
- enum nl80211_channel_type type)
-{
- struct brcms_info *wl = HW_TO_WL(hw);
- int err = 0;
-
- switch (type) {
- case NL80211_CHAN_HT20:
- case NL80211_CHAN_NO_HT:
- err = brcms_c_set(wl->wlc, BRCM_SET_CHANNEL, chan->hw_value);
- break;
- case NL80211_CHAN_HT40MINUS:
- case NL80211_CHAN_HT40PLUS:
- wiphy_err(hw->wiphy,
- "%s: Need to implement 40 Mhz Channels!\n", __func__);
- err = 1;
- break;
- }
-
- if (err)
- return -EIO;
- return err;
-}
-
-static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct ieee80211_conf *conf = &hw->conf;
- struct brcms_info *wl = HW_TO_WL(hw);
- int err = 0;
- int new_int;
- struct wiphy *wiphy = hw->wiphy;
-
- LOCK(wl);
- if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
- if (brcms_c_set_par(wl->wlc, IOV_BCN_LI_BCN,
- conf->listen_interval) < 0) {
- wiphy_err(wiphy, "%s: Error setting listen_interval\n",
- __func__);
- err = -EIO;
- goto config_out;
- }
- brcms_c_get_par(wl->wlc, IOV_BCN_LI_BCN, &new_int);
- }
- if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
- __func__, conf->flags & IEEE80211_CONF_MONITOR ?
- "true" : "false");
- if (changed & IEEE80211_CONF_CHANGE_PS)
- wiphy_err(wiphy, "%s: change power-save mode: %s (implement)\n",
- __func__, conf->flags & IEEE80211_CONF_PS ?
- "true" : "false");
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- if (brcms_c_set_par(wl->wlc, IOV_QTXPOWER,
- conf->power_level * 4) < 0) {
- wiphy_err(wiphy, "%s: Error setting power_level\n",
- __func__);
- err = -EIO;
- goto config_out;
- }
- brcms_c_get_par(wl->wlc, IOV_QTXPOWER, &new_int);
- if (new_int != (conf->power_level * 4))
- wiphy_err(wiphy, "%s: Power level req != actual, %d %d"
- "\n", __func__, conf->power_level * 4,
- new_int);
- }
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- err = ieee_set_channel(hw, conf->channel, conf->channel_type);
- }
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- if (brcms_c_set
- (wl->wlc, BRCM_SET_SRL,
- conf->short_frame_max_tx_count) < 0) {
- wiphy_err(wiphy, "%s: Error setting srl\n", __func__);
- err = -EIO;
- goto config_out;
- }
- if (brcms_c_set(wl->wlc, BRCM_SET_LRL,
- conf->long_frame_max_tx_count) < 0) {
- wiphy_err(wiphy, "%s: Error setting lrl\n", __func__);
- err = -EIO;
- goto config_out;
- }
- }
-
- config_out:
- UNLOCK(wl);
- return err;
-}
-
-static void
-brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
-{
- struct brcms_info *wl = HW_TO_WL(hw);
- struct wiphy *wiphy = hw->wiphy;
- int val;
-
- if (changed & BSS_CHANGED_ASSOC) {
- /* association status changed (associated/disassociated)
- * also implies a change in the AID.
- */
- wiphy_err(wiphy, "%s: %s: %sassociated\n", KBUILD_MODNAME,
- __func__, info->assoc ? "" : "dis");
- LOCK(wl);
- brcms_c_associate_upd(wl->wlc, info->assoc);
- UNLOCK(wl);
- }
- if (changed & BSS_CHANGED_ERP_SLOT) {
- /* slot timing changed */
- if (info->use_short_slot)
- val = 1;
- else
- val = 0;
- LOCK(wl);
- brcms_c_set(wl->wlc, BRCMS_SET_SHORTSLOT_OVERRIDE, val);
- UNLOCK(wl);
- }
-
- if (changed & BSS_CHANGED_HT) {
- /* 802.11n parameters changed */
- u16 mode = info->ht_operation_mode;
-
- LOCK(wl);
- brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_CFG,
- mode & IEEE80211_HT_OP_MODE_PROTECTION);
- brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_NONGF,
- mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
- brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_OBSS,
- mode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT);
- UNLOCK(wl);
- }
- if (changed & BSS_CHANGED_BASIC_RATES) {
- struct ieee80211_supported_band *bi;
- u32 br_mask, i;
- u16 rate;
- struct wl_rateset rs;
- int error;
-
- /* retrieve the current rates */
- LOCK(wl);
- error = brcms_c_ioctl(wl->wlc, BRCM_GET_CURR_RATESET,
- &rs, sizeof(rs), NULL);
- UNLOCK(wl);
- if (error) {
- wiphy_err(wiphy, "%s: retrieve rateset failed: %d\n",
- __func__, error);
- return;
- }
- br_mask = info->basic_rates;
- bi = hw->wiphy->bands[brcms_c_get_curband(wl->wlc)];
- for (i = 0; i < bi->n_bitrates; i++) {
- /* convert to internal rate value */
- rate = (bi->bitrates[i].bitrate << 1) / 10;
-
- /* set/clear basic rate flag */
- brcms_set_basic_rate(&rs, rate, br_mask & 1);
- br_mask >>= 1;
- }
-
- /* update the rate set */
- LOCK(wl);
- brcms_c_ioctl(wl->wlc, BRCM_SET_RATESET, &rs, sizeof(rs), NULL);
- UNLOCK(wl);
- }
- if (changed & BSS_CHANGED_BEACON_INT) {
- /* Beacon interval changed */
- LOCK(wl);
- brcms_c_set(wl->wlc, BRCM_SET_BCNPRD, info->beacon_int);
- UNLOCK(wl);
- }
- if (changed & BSS_CHANGED_BSSID) {
- /* BSSID changed, for whatever reason (IBSS and managed mode) */
- LOCK(wl);
- brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET,
- info->bssid);
- UNLOCK(wl);
- }
- if (changed & BSS_CHANGED_BEACON) {
- /* Beacon data changed, retrieve new beacon (beaconing modes) */
- wiphy_err(wiphy, "%s: beacon changed\n", __func__);
- }
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- /* Beaconing should be enabled/disabled (beaconing modes) */
- wiphy_err(wiphy, "%s: Beacon enabled: %s\n", __func__,
- info->enable_beacon ? "true" : "false");
- }
- if (changed & BSS_CHANGED_CQM) {
- /* Connection quality monitor config changed */
- wiphy_err(wiphy, "%s: cqm change: threshold %d, hys %d "
- " (implement)\n", __func__, info->cqm_rssi_thold,
- info->cqm_rssi_hyst);
- }
- if (changed & BSS_CHANGED_IBSS) {
- /* IBSS join status changed */
- wiphy_err(wiphy, "%s: IBSS joined: %s (implement)\n", __func__,
- info->ibss_joined ? "true" : "false");
- }
- if (changed & BSS_CHANGED_ARP_FILTER) {
- /* Hardware ARP filter address list or state changed */
- wiphy_err(wiphy, "%s: arp filtering: enabled %s, count %d"
- " (implement)\n", __func__, info->arp_filter_enabled ?
- "true" : "false", info->arp_addr_cnt);
- }
- if (changed & BSS_CHANGED_QOS) {
- /*
- * QoS for this association was enabled/disabled.
- * Note that it is only ever disabled for station mode.
- */
- wiphy_err(wiphy, "%s: qos enabled: %s (implement)\n", __func__,
- info->qos ? "true" : "false");
- }
- return;
-}
-
-static void
-brcms_ops_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
-{
- struct brcms_info *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
-
- changed_flags &= MAC_FILTERS;
- *total_flags &= MAC_FILTERS;
- if (changed_flags & FIF_PROMISC_IN_BSS)
- wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
- if (changed_flags & FIF_ALLMULTI)
- wiphy_err(wiphy, "FIF_ALLMULTI\n");
- if (changed_flags & FIF_FCSFAIL)
- wiphy_err(wiphy, "FIF_FCSFAIL\n");
- if (changed_flags & FIF_PLCPFAIL)
- wiphy_err(wiphy, "FIF_PLCPFAIL\n");
- if (changed_flags & FIF_CONTROL)
- wiphy_err(wiphy, "FIF_CONTROL\n");
- if (changed_flags & FIF_OTHER_BSS)
- wiphy_err(wiphy, "FIF_OTHER_BSS\n");
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- LOCK(wl);
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
- wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
- brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
- } else {
- brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
- wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
- }
- UNLOCK(wl);
- }
- return;
-}
-
-static int
-brcms_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
-{
- return 0;
-}
-
-static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
-{
- struct brcms_info *wl = hw->priv;
- LOCK(wl);
- brcms_c_scan_start(wl->wlc);
- UNLOCK(wl);
- return;
-}
-
-static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw)
-{
- struct brcms_info *wl = hw->priv;
- LOCK(wl);
- brcms_c_scan_stop(wl->wlc);
- UNLOCK(wl);
- return;
-}
-
-static void brcms_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf)
-{
- wiphy_err(hw->wiphy, "%s: Enter\n", __func__);
- return;
-}
-
-static int
-brcms_ops_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
-{
- struct brcms_info *wl = hw->priv;
- struct wl_cnt *cnt;
-
- LOCK(wl);
- cnt = wl->pub->_cnt;
- stats->dot11ACKFailureCount = 0;
- stats->dot11RTSFailureCount = 0;
- stats->dot11FCSErrorCount = 0;
- stats->dot11RTSSuccessCount = 0;
- UNLOCK(wl);
- return 0;
-}
-
-static void
-brcms_ops_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
-{
- switch (cmd) {
- default:
- wiphy_err(hw->wiphy, "%s: Unknown cmd = %d\n", __func__,
- cmd);
- break;
- }
- return;
-}
-
-static int
-brcms_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct brcms_info *wl = hw->priv;
-
- LOCK(wl);
- brcms_c_wme_setparams(wl->wlc, queue, params, true);
- UNLOCK(wl);
-
- return 0;
-}
-
-static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw)
-{
- wiphy_err(hw->wiphy, "%s: Enter\n", __func__);
- return 0;
-}
-
-static int
-brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct scb *scb;
-
- int i;
- struct brcms_info *wl = hw->priv;
-
- /* Init the scb */
- scb = (struct scb *)sta->drv_priv;
- memset(scb, 0, sizeof(struct scb));
- for (i = 0; i < NUMPRIO; i++)
- scb->seqctl[i] = 0xFFFF;
- scb->seqctl_nonqos = 0xFFFF;
- scb->magic = SCB_MAGIC;
-
- wl->pub->global_scb = scb;
- wl->pub->global_ampdu = &(scb->scb_ampdu);
- wl->pub->global_ampdu->scb = scb;
- wl->pub->global_ampdu->max_pdu = 16;
- brcmu_pktq_init(&scb->scb_ampdu.txq, AMPDU_MAX_SCB_TID,
- AMPDU_MAX_SCB_TID * PKTQ_LEN_DEFAULT);
-
- sta->ht_cap.ht_supported = true;
- sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
- sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
-
- /* minstrel_ht initiates addBA on our behalf by calling ieee80211_start_tx_ba_session() */
- return 0;
-}
-
-static int
-brcms_ops_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return 0;
-}
-
-static int
-brcms_ops_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct scb *scb = (struct scb *)sta->drv_priv;
- struct brcms_info *wl = hw->priv;
- int status;
-
- if (WARN_ON(scb->magic != SCB_MAGIC))
- return -EIDRM;
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- break;
- case IEEE80211_AMPDU_RX_STOP:
- break;
- case IEEE80211_AMPDU_TX_START:
- LOCK(wl);
- status = brcms_c_aggregatable(wl->wlc, tid);
- UNLOCK(wl);
- if (!status) {
- wiphy_err(wl->wiphy, "START: tid %d is not agg\'able\n",
- tid);
- return -EINVAL;
- }
- /* Future improvement: Use the starting sequence number provided ... */
- *ssn = 0;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_STOP:
- LOCK(wl);
- brcms_c_ampdu_flush(wl->wlc, sta, tid);
- UNLOCK(wl);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- /*
- * BA window size from ADDBA response ('buf_size') defines how
- * many outstanding MPDUs are allowed for the BA stream by
- * recipient and traffic class. 'ampdu_factor' gives maximum
- * AMPDU size.
- */
- LOCK(wl);
- brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size,
- (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor)) - 1);
- UNLOCK(wl);
- /* Power save wakeup */
- break;
- default:
- wiphy_err(wl->wiphy, "%s: Invalid command, ignoring\n",
- __func__);
- }
-
- return 0;
-}
-
-static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
-{
- struct brcms_info *wl = HW_TO_WL(hw);
- bool blocked;
-
- LOCK(wl);
- blocked = brcms_c_check_radio_disabled(wl->wlc);
- UNLOCK(wl);
-
- wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
-}
-
-static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
-{
- struct brcms_info *wl = HW_TO_WL(hw);
-
- no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
-
- /* wait for packet queue and dma fifos to run empty */
- LOCK(wl);
- brcms_c_wait_for_tx_completion(wl->wlc, drop);
- UNLOCK(wl);
-}
-
-static const struct ieee80211_ops brcms_ops = {
- .tx = brcms_ops_tx,
- .start = brcms_ops_start,
- .stop = brcms_ops_stop,
- .add_interface = brcms_ops_add_interface,
- .remove_interface = brcms_ops_remove_interface,
- .config = brcms_ops_config,
- .bss_info_changed = brcms_ops_bss_info_changed,
- .configure_filter = brcms_ops_configure_filter,
- .set_tim = brcms_ops_set_tim,
- .sw_scan_start = brcms_ops_sw_scan_start,
- .sw_scan_complete = brcms_ops_sw_scan_complete,
- .set_tsf = brcms_ops_set_tsf,
- .get_stats = brcms_ops_get_stats,
- .sta_notify = brcms_ops_sta_notify,
- .conf_tx = brcms_ops_conf_tx,
- .get_tsf = brcms_ops_get_tsf,
- .sta_add = brcms_ops_sta_add,
- .sta_remove = brcms_ops_sta_remove,
- .ampdu_action = brcms_ops_ampdu_action,
- .rfkill_poll = brcms_ops_rfkill_poll,
- .flush = brcms_ops_flush,
-};
-
-/*
- * is called in brcms_pci_probe() context, therefore no locking required.
- */
-static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
-{
- return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
-}
-
-/**
- * attach to the WL device.
- *
- * Attach to the WL device identified by vendor and device parameters.
- * regs is a host accessible memory address pointing to WL device registers.
- *
- * brcms_attach is not defined as static because in the case where no bus
- * is defined, wl_attach will never be called, and thus, gcc will issue
- * a warning that this function is defined but not used if we declare
- * it as static.
- *
- *
- * is called in brcms_pci_probe() context, therefore no locking required.
- */
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
- unsigned long regs,
- uint bustype, void *btparam, uint irq)
-{
- struct brcms_info *wl = NULL;
- int unit, err;
- unsigned long base_addr;
- struct ieee80211_hw *hw;
- u8 perm[ETH_ALEN];
-
- unit = n_adapters_found;
- err = 0;
-
- if (unit < 0) {
- return NULL;
- }
-
- /* allocate private info */
- hw = pci_get_drvdata(btparam); /* btparam == pdev */
- if (hw != NULL)
- wl = hw->priv;
- if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
- return NULL;
- wl->wiphy = hw->wiphy;
-
- atomic_set(&wl->callbacks, 0);
-
- /* setup the bottom half handler */
- tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
-
-
-
- base_addr = regs;
-
- if (bustype == PCI_BUS || bustype == RPC_BUS) {
- /* Do nothing */
- } else {
- bustype = PCI_BUS;
- BCMMSG(wl->wiphy, "force to PCI\n");
- }
- wl->bcm_bustype = bustype;
-
- wl->regsva = ioremap_nocache(base_addr, PCI_BAR0_WINSZ);
- if (wl->regsva == NULL) {
- wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
- goto fail;
- }
- spin_lock_init(&wl->lock);
- spin_lock_init(&wl->isr_lock);
-
- /* prepare ucode */
- if (brcms_request_fw(wl, (struct pci_dev *)btparam) < 0) {
- wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
- "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
- brcms_release_fw(wl);
- brcms_remove((struct pci_dev *)btparam);
- return NULL;
- }
-
- /* common load-time initialization */
- wl->wlc = brcms_c_attach((void *)wl, vendor, device, unit, false,
- wl->regsva, wl->bcm_bustype, btparam, &err);
- brcms_release_fw(wl);
- if (!wl->wlc) {
- wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
- KBUILD_MODNAME, err);
- goto fail;
- }
- wl->pub = brcms_c_pub(wl->wlc);
-
- wl->pub->ieee_hw = hw;
-
- if (brcms_c_set_par(wl->wlc, IOV_MPC, 0) < 0) {
- wiphy_err(wl->wiphy, "wl%d: Error setting MPC variable to 0\n",
- unit);
- }
-
- /* register our interrupt handler */
- if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
- wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
- goto fail;
- }
- wl->irq = irq;
-
- /* register module */
- brcms_c_module_register(wl->pub, "linux", wl, wl_linux_watchdog, NULL);
-
- if (ieee_hw_init(hw)) {
- wiphy_err(wl->wiphy, "wl%d: %s: ieee_hw_init failed!\n", unit,
- __func__);
- goto fail;
- }
-
- memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
- if (WARN_ON(!is_valid_ether_addr(perm)))
- goto fail;
- SET_IEEE80211_PERM_ADDR(hw, perm);
-
- err = ieee80211_register_hw(hw);
- if (err) {
- wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
- "%d\n", __func__, err);
- }
-
- if (wl->pub->srom_ccode[0])
- err = brcms_set_hint(wl, wl->pub->srom_ccode);
- else
- err = brcms_set_hint(wl, "US");
- if (err) {
- wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
- __func__, err);
- }
-
- n_adapters_found++;
- return wl;
-
-fail:
- brcms_free(wl);
- return NULL;
-}
-
-
-
-#define CHAN2GHZ(channel, freqency, chflags) { \
- .band = IEEE80211_BAND_2GHZ, \
- .center_freq = (freqency), \
- .hw_value = (channel), \
- .flags = chflags, \
- .max_antenna_gain = 0, \
- .max_power = 19, \
-}
-
-static struct ieee80211_channel brcms_2ghz_chantable[] = {
- CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN2GHZ(2, 2417, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN2GHZ(3, 2422, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN2GHZ(4, 2427, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN2GHZ(5, 2432, 0),
- CHAN2GHZ(6, 2437, 0),
- CHAN2GHZ(7, 2442, 0),
- CHAN2GHZ(8, 2447, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN2GHZ(9, 2452, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN2GHZ(10, 2457, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN2GHZ(11, 2462, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN2GHZ(12, 2467,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_NO_HT40PLUS),
- CHAN2GHZ(13, 2472,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_NO_HT40PLUS),
- CHAN2GHZ(14, 2484,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
-};
-
-#define CHAN5GHZ(channel, chflags) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = 5000 + 5*(channel), \
- .hw_value = (channel), \
- .flags = chflags, \
- .max_antenna_gain = 0, \
- .max_power = 21, \
-}
-
-static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
- /* UNII-1 */
- CHAN5GHZ(36, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(40, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(44, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(48, IEEE80211_CHAN_NO_HT40PLUS),
- /* UNII-2 */
- CHAN5GHZ(52,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(56,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(60,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(64,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- /* MID */
- CHAN5GHZ(100,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(104,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(108,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(112,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(116,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(120,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(124,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(128,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(132,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(136,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(140,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS |
- IEEE80211_CHAN_NO_HT40MINUS),
- /* UNII-3 */
- CHAN5GHZ(149, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(153, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(157, IEEE80211_CHAN_NO_HT40MINUS),
- CHAN5GHZ(161, IEEE80211_CHAN_NO_HT40PLUS),
- CHAN5GHZ(165, IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
-};
-
-#define RATE(rate100m, _flags) { \
- .bitrate = (rate100m), \
- .flags = (_flags), \
- .hw_value = (rate100m / 5), \
-}
-
-static struct ieee80211_rate legacy_ratetable[] = {
- RATE(10, 0),
- RATE(20, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0),
- RATE(90, 0),
- RATE(120, 0),
- RATE(180, 0),
- RATE(240, 0),
- RATE(360, 0),
- RATE(480, 0),
- RATE(540, 0),
-};
-
-static struct ieee80211_supported_band brcms_band_2GHz_nphy = {
- .band = IEEE80211_BAND_2GHZ,
- .channels = brcms_2ghz_chantable,
- .n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
- .bitrates = legacy_ratetable,
- .n_bitrates = ARRAY_SIZE(legacy_ratetable),
- .ht_cap = {
- /* from include/linux/ieee80211.h */
- .cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
- .ht_supported = true,
- .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
- .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
- .mcs = {
- /* placeholders for now */
- .rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
- .rx_highest = 500,
- .tx_params = IEEE80211_HT_MCS_TX_DEFINED}
- }
-};
-
-static struct ieee80211_supported_band brcms_band_5GHz_nphy = {
- .band = IEEE80211_BAND_5GHZ,
- .channels = brcms_5ghz_nphy_chantable,
- .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
- .bitrates = legacy_ratetable + 4,
- .n_bitrates = ARRAY_SIZE(legacy_ratetable) - 4,
- .ht_cap = {
- /* use IEEE80211_HT_CAP_* from include/linux/ieee80211.h */
- .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
- .ht_supported = true,
- .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
- .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
- .mcs = {
- /* placeholders for now */
- .rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
- .rx_highest = 500,
- .tx_params = IEEE80211_HT_MCS_TX_DEFINED}
- }
-};
-
-/*
- * is called in brcms_pci_probe() context, therefore no locking required.
- */
-static int ieee_hw_rate_init(struct ieee80211_hw *hw)
-{
- struct brcms_info *wl = HW_TO_WL(hw);
- int has_5g;
- char phy_list[4];
-
- has_5g = 0;
-
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
-
- if (brcms_c_get(wl->wlc, BRCM_GET_PHYLIST, (int *)&phy_list) < 0)
- wiphy_err(hw->wiphy, "Phy list failed\n");
-
- if (phy_list[0] == 'n' || phy_list[0] == 'c') {
- if (phy_list[0] == 'c') {
- /* Single stream */
- brcms_band_2GHz_nphy.ht_cap.mcs.rx_mask[1] = 0;
- brcms_band_2GHz_nphy.ht_cap.mcs.rx_highest = 72;
- }
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &brcms_band_2GHz_nphy;
- } else {
- return -EPERM;
- }
-
- /* Assume all bands use the same phy. True for 11n devices. */
- if (NBANDS_PUB(wl->pub) > 1) {
- has_5g++;
- if (phy_list[0] == 'n' || phy_list[0] == 'c') {
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &brcms_band_5GHz_nphy;
- } else {
- return -EPERM;
- }
- }
- return 0;
-}
-
-/*
- * is called in brcms_pci_probe() context, therefore no locking required.
- */
-static int ieee_hw_init(struct ieee80211_hw *hw)
-{
- hw->flags = IEEE80211_HW_SIGNAL_DBM
- /* | IEEE80211_HW_CONNECTION_MONITOR What is this? */
- | IEEE80211_HW_REPORTS_TX_ACK_STATUS
- | IEEE80211_HW_AMPDU_AGGREGATION;
-
- hw->extra_tx_headroom = brcms_c_get_header_len();
- hw->queues = N_TX_QUEUES;
- hw->max_rates = 2; /* Primary rate and 1 fallback rate */
-
- hw->channel_change_time = 7 * 1000; /* channel change time is dependent on chip and band */
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-
- hw->rate_control_algorithm = "minstrel_ht";
-
- hw->sta_data_size = sizeof(struct scb);
- return ieee_hw_rate_init(hw);
-}
-
-/**
- * determines if a device is a WL device, and if so, attaches it.
- *
- * This function determines if a device pointed to by pdev is a WL device,
- * and if so, performs a brcms_attach() on it.
- *
- * Perimeter lock is initialized in the course of this function.
- */
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- u32 val;
-
- dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq);
-
- if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
- ((pdev->device != 0x0576) &&
- ((pdev->device & 0xff00) != 0x4300) &&
- ((pdev->device & 0xff00) != 0x4700) &&
- ((pdev->device < 43000) || (pdev->device > 43999))))
- return -ENODEV;
-
- rc = pci_enable_device(pdev);
- if (rc) {
- pr_err("%s: Cannot enable device %d-%d_%d\n",
- __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- return -ENODEV;
- }
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
- hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
- if (!hw) {
- pr_err("%s: ieee80211_alloc_hw failed\n", __func__);
- return -ENOMEM;
- }
-
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- pci_set_drvdata(pdev, hw);
-
- memset(hw->priv, 0, sizeof(*wl));
-
- wl = brcms_attach(pdev->vendor, pdev->device,
- pci_resource_start(pdev, 0), PCI_BUS, pdev,
- pdev->irq);
-
- if (!wl) {
- pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
- __func__);
- return -ENODEV;
- }
- return 0;
-}
-
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
-
- hw = pci_get_drvdata(pdev);
- wl = HW_TO_WL(hw);
- if (!wl) {
- wiphy_err(wl->wiphy,
- "brcms_suspend: pci_get_drvdata failed\n");
- return -ENODEV;
- }
-
- /* only need to flag hw is down for proper resume */
- LOCK(wl);
- wl->pub->hw_up = false;
- UNLOCK(wl);
-
- pci_save_state(pdev);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static int brcms_resume(struct pci_dev *pdev)
-{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int err = 0;
- u32 val;
-
- hw = pci_get_drvdata(pdev);
- wl = HW_TO_WL(hw);
- if (!wl) {
- wiphy_err(wl->wiphy,
- "wl: brcms_resume: pci_get_drvdata failed\n");
- return -ENODEV;
- }
-
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- return err;
-
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
-
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
- /*
- * done. driver will be put in up state
- * in brcms_ops_add_interface() call.
- */
- return err;
-}
-
-/*
-* called from both kernel as from this kernel module.
-* precondition: perimeter lock is not acquired.
-*/
-static void brcms_remove(struct pci_dev *pdev)
-{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int status;
-
- hw = pci_get_drvdata(pdev);
- wl = HW_TO_WL(hw);
- if (!wl) {
- pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
- return;
- }
-
- LOCK(wl);
- status = brcms_c_chipmatch(pdev->vendor, pdev->device);
- UNLOCK(wl);
- if (!status) {
- wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
- "failed\n");
- return;
- }
- if (wl->wlc) {
- wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
- wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- ieee80211_unregister_hw(hw);
- LOCK(wl);
- brcms_down(wl);
- UNLOCK(wl);
- }
- pci_disable_device(pdev);
-
- brcms_free(wl);
-
- pci_set_drvdata(pdev, NULL);
- ieee80211_free_hw(hw);
-}
-
-static struct pci_driver brcms_pci_driver = {
- .name = KBUILD_MODNAME,
- .probe = brcms_pci_probe,
- .suspend = brcms_suspend,
- .resume = brcms_resume,
- .remove = __devexit_p(brcms_remove),
- .id_table = brcms_pci_id_table,
-};
-
-/**
- * This is the main entry point for the WL driver.
- *
- * This function determines if a device pointed to by pdev is a WL device,
- * and if so, performs a brcms_attach() on it.
- *
- */
-static int __init brcms_module_init(void)
-{
- int error = -ENODEV;
-
-#ifdef BCMDBG
- if (msglevel != 0xdeadbeef)
- brcm_msg_level = msglevel;
- if (phymsglevel != 0xdeadbeef)
- phyhal_msg_level = phymsglevel;
-#endif /* BCMDBG */
-
- error = pci_register_driver(&brcms_pci_driver);
- if (!error)
- return 0;
-
-
-
- return error;
-}
-
-/**
- * This function unloads the WL driver from the system.
- *
- * This function unconditionally unloads the WL driver module from the
- * system.
- *
- */
-static void __exit brcms_module_exit(void)
-{
- pci_unregister_driver(&brcms_pci_driver);
-
-}
-
-module_init(brcms_module_init);
-module_exit(brcms_module_exit);
-
-/**
- * This function frees the WL per-device resources.
- *
- * This function frees resources owned by the WL device pointed to
- * by the wl parameter.
- *
- * precondition: can both be called locked and unlocked
- *
- */
-static void brcms_free(struct brcms_info *wl)
-{
- struct brcms_timer *t, *next;
-
- /* free ucode data */
- if (wl->fw.fw_cnt)
- brcms_ucode_data_free();
- if (wl->irq)
- free_irq(wl->irq, wl);
-
- /* kill dpc */
- tasklet_kill(&wl->tasklet);
-
- if (wl->pub) {
- brcms_c_module_unregister(wl->pub, "linux", wl);
- }
-
- /* free common resources */
- if (wl->wlc) {
- brcms_c_detach(wl->wlc);
- wl->wlc = NULL;
- wl->pub = NULL;
- }
-
- /* virtual interface deletion is deferred so we cannot spinwait */
-
- /* wait for all pending callbacks to complete */
- while (atomic_read(&wl->callbacks) > 0)
- schedule();
-
- /* free timers */
- for (t = wl->timers; t; t = next) {
- next = t->next;
-#ifdef BCMDBG
- kfree(t->name);
-#endif
- kfree(t);
- }
-
- /*
- * unregister_netdev() calls get_stats() which may read chip registers
- * so we cannot unmap the chip registers until after calling unregister_netdev() .
- */
- if (wl->regsva && wl->bcm_bustype != SDIO_BUS &&
- wl->bcm_bustype != JTAG_BUS) {
- iounmap((void *)wl->regsva);
- }
- wl->regsva = NULL;
-}
-
-/* flags the given rate in rateset as requested */
-static void brcms_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br)
-{
- u32 i;
-
- for (i = 0; i < rs->count; i++) {
- if (rate != (rs->rates[i] & 0x7f))
- continue;
-
- if (is_br)
- rs->rates[i] |= BRCMS_RATE_FLAG;
- else
- rs->rates[i] &= BRCMS_RATE_MASK;
- return;
- }
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
- bool state, int prio)
-{
- wiphy_err(wl->wiphy, "Shouldn't be here %s\n", __func__);
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_init(struct brcms_info *wl)
-{
- BCMMSG(WL_TO_HW(wl)->wiphy, "wl%d\n", wl->pub->unit);
- brcms_reset(wl);
-
- brcms_c_init(wl->wlc);
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-uint brcms_reset(struct brcms_info *wl)
-{
- BCMMSG(WL_TO_HW(wl)->wiphy, "wl%d\n", wl->pub->unit);
- brcms_c_reset(wl->wlc);
-
- /* dpc will not be rescheduled */
- wl->resched = 0;
-
- return 0;
-}
-
-/*
- * These are interrupt on/off entry points. Disable interrupts
- * during interrupt state transition.
- */
-void brcms_intrson(struct brcms_info *wl)
-{
- unsigned long flags;
-
- INT_LOCK(wl, flags);
- brcms_c_intrson(wl->wlc);
- INT_UNLOCK(wl, flags);
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-bool wl_alloc_dma_resources(struct brcms_info *wl, uint addrwidth)
-{
- return true;
-}
-
-u32 brcms_intrsoff(struct brcms_info *wl)
-{
- unsigned long flags;
- u32 status;
-
- INT_LOCK(wl, flags);
- status = brcms_c_intrsoff(wl->wlc);
- INT_UNLOCK(wl, flags);
- return status;
-}
-
-void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask)
-{
- unsigned long flags;
-
- INT_LOCK(wl, flags);
- brcms_c_intrsrestore(wl->wlc, macintmask);
- INT_UNLOCK(wl, flags);
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-int brcms_up(struct brcms_info *wl)
-{
- int error = 0;
-
- if (wl->pub->up)
- return 0;
-
- error = brcms_c_up(wl->wlc);
-
- return error;
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_down(struct brcms_info *wl)
-{
- uint callbacks, ret_val = 0;
-
- /* call common down function */
- ret_val = brcms_c_down(wl->wlc);
- callbacks = atomic_read(&wl->callbacks) - ret_val;
-
- /* wait for down callbacks to complete */
- UNLOCK(wl);
-
- /* For HIGH_only driver, it's important to actually schedule other work,
- * not just spin wait since everything runs at schedule level
- */
- SPINWAIT((atomic_read(&wl->callbacks) > callbacks), 100 * 1000);
-
- LOCK(wl);
-}
-
-static irqreturn_t brcms_isr(int irq, void *dev_id)
-{
- struct brcms_info *wl;
- bool ours, wantdpc;
- unsigned long flags;
-
- wl = (struct brcms_info *) dev_id;
-
- ISR_LOCK(wl, flags);
-
- /* call common first level interrupt handler */
- ours = brcms_c_isr(wl->wlc, &wantdpc);
- if (ours) {
- /* if more to do... */
- if (wantdpc) {
-
- /* ...and call the second level interrupt handler */
- /* schedule dpc */
- tasklet_schedule(&wl->tasklet);
- }
- }
-
- ISR_UNLOCK(wl, flags);
-
- return IRQ_RETVAL(ours);
-}
-
-static void brcms_dpc(unsigned long data)
-{
- struct brcms_info *wl;
-
- wl = (struct brcms_info *) data;
-
- LOCK(wl);
-
- /* call the common second level interrupt handler */
- if (wl->pub->up) {
- if (wl->resched) {
- unsigned long flags;
-
- INT_LOCK(wl, flags);
- brcms_c_intrsupd(wl->wlc);
- INT_UNLOCK(wl, flags);
- }
-
- wl->resched = brcms_c_dpc(wl->wlc, true);
- }
-
- /* brcms_c_dpc() may bring the driver down */
- if (!wl->pub->up)
- goto done;
-
- /* re-schedule dpc */
- if (wl->resched)
- tasklet_schedule(&wl->tasklet);
- else {
- /* re-enable interrupts */
- brcms_intrson(wl);
- }
-
- done:
- UNLOCK(wl);
-}
-
-/*
- * is called by the kernel from software irq context
- */
-static void brcms_timer(unsigned long data)
-{
- _brcms_timer((struct brcms_timer *) data);
-}
-
-/*
-* precondition: perimeter lock is not acquired
- */
-static void _brcms_timer(struct brcms_timer *t)
-{
- LOCK(t->wl);
-
- if (t->set) {
- if (t->periodic) {
- t->timer.expires = jiffies + t->ms * HZ / 1000;
- atomic_inc(&t->wl->callbacks);
- add_timer(&t->timer);
- t->set = true;
- } else
- t->set = false;
-
- t->fn(t->arg);
- }
-
- atomic_dec(&t->wl->callbacks);
-
- UNLOCK(t->wl);
-}
-
-/*
- * Adds a timer to the list. Caller supplies a timer function.
- * Is called from wlc.
- *
- * precondition: perimeter lock has been acquired
- */
-struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
- void (*fn) (void *arg),
- void *arg, const char *name)
-{
- struct brcms_timer *t;
-
- t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC);
- if (!t) {
- wiphy_err(wl->wiphy, "wl%d: brcms_init_timer: out of memory\n",
- wl->pub->unit);
- return 0;
- }
-
- init_timer(&t->timer);
- t->timer.data = (unsigned long) t;
- t->timer.function = brcms_timer;
- t->wl = wl;
- t->fn = fn;
- t->arg = arg;
- t->next = wl->timers;
- wl->timers = t;
-
-#ifdef BCMDBG
- t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
- if (t->name)
- strcpy(t->name, name);
-#endif
-
- return t;
-}
-
-/* BMAC_NOTE: Add timer adds only the kernel timer since it's going to be more accurate
- * as well as it's easier to make it periodic
- *
- * precondition: perimeter lock has been acquired
- */
-void brcms_add_timer(struct brcms_info *wl, struct brcms_timer *t, uint ms,
- int periodic)
-{
-#ifdef BCMDBG
- if (t->set) {
- wiphy_err(wl->wiphy, "%s: Already set. Name: %s, per %d\n",
- __func__, t->name, periodic);
- }
-#endif
- t->ms = ms;
- t->periodic = (bool) periodic;
- t->set = true;
- t->timer.expires = jiffies + ms * HZ / 1000;
-
- atomic_inc(&wl->callbacks);
- add_timer(&t->timer);
-}
-
-/*
- * return true if timer successfully deleted, false if still pending
- *
- * precondition: perimeter lock has been acquired
- */
-bool brcms_del_timer(struct brcms_info *wl, struct brcms_timer *t)
-{
- if (t->set) {
- t->set = false;
- if (!del_timer(&t->timer)) {
- return false;
- }
- atomic_dec(&wl->callbacks);
- }
-
- return true;
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_free_timer(struct brcms_info *wl, struct brcms_timer *t)
-{
- struct brcms_timer *tmp;
-
- /* delete the timer in case it is active */
- brcms_del_timer(wl, t);
-
- if (wl->timers == t) {
- wl->timers = wl->timers->next;
-#ifdef BCMDBG
- kfree(t->name);
-#endif
- kfree(t);
- return;
-
- }
-
- tmp = wl->timers;
- while (tmp) {
- if (tmp->next == t) {
- tmp->next = t->next;
-#ifdef BCMDBG
- kfree(t->name);
-#endif
- kfree(t);
- return;
- }
- tmp = tmp->next;
- }
-
-}
-
-/*
- * runs in software irq context
- *
- * precondition: perimeter lock is not acquired
- */
-static int wl_linux_watchdog(void *ctx)
-{
- return 0;
-}
-
-struct firmware_hdr {
- u32 offset;
- u32 len;
- u32 idx;
-};
-
-char *brcms_firmwares[MAX_FW_IMAGES] = {
- "brcm/bcm43xx",
- NULL
-};
-
-/*
- * precondition: perimeter lock has been acquired
- */
-int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
-{
- int i, entry;
- const u8 *pdata;
- struct firmware_hdr *hdr;
- for (i = 0; i < wl->fw.fw_cnt; i++) {
- hdr = (struct firmware_hdr *)wl->fw.fw_hdr[i]->data;
- for (entry = 0; entry < wl->fw.hdr_num_entries[i];
- entry++, hdr++) {
- if (hdr->idx == idx) {
- pdata = wl->fw.fw_bin[i]->data + hdr->offset;
- *pbuf = kmalloc(hdr->len, GFP_ATOMIC);
- if (*pbuf == NULL) {
- wiphy_err(wl->wiphy, "fail to alloc %d"
- " bytes\n", hdr->len);
- goto fail;
- }
- memcpy(*pbuf, pdata, hdr->len);
- return 0;
- }
- }
- }
- wiphy_err(wl->wiphy, "ERROR: ucode buf tag:%d can not be found!\n",
- idx);
- *pbuf = NULL;
-fail:
- return -ENODATA;
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-int brcms_ucode_init_uint(struct brcms_info *wl, u32 *data, u32 idx)
-{
- int i, entry;
- const u8 *pdata;
- struct firmware_hdr *hdr;
- for (i = 0; i < wl->fw.fw_cnt; i++) {
- hdr = (struct firmware_hdr *)wl->fw.fw_hdr[i]->data;
- for (entry = 0; entry < wl->fw.hdr_num_entries[i];
- entry++, hdr++) {
- if (hdr->idx == idx) {
- pdata = wl->fw.fw_bin[i]->data + hdr->offset;
- if (hdr->len != 4) {
- wiphy_err(wl->wiphy,
- "ERROR: fw hdr len\n");
- return -ENOMSG;
- }
- *data = *((u32 *) pdata);
- return 0;
- }
- }
- }
- wiphy_err(wl->wiphy, "ERROR: ucode tag:%d can not be found!\n", idx);
- return -ENOMSG;
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev)
-{
- int status;
- struct device *device = &pdev->dev;
- char fw_name[100];
- int i;
-
- memset((void *)&wl->fw, 0, sizeof(struct brcms_firmware));
- for (i = 0; i < MAX_FW_IMAGES; i++) {
- if (brcms_firmwares[i] == NULL)
- break;
- sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
- UCODE_LOADER_API_VER);
- status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
- if (status) {
- wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
- KBUILD_MODNAME, fw_name);
- return status;
- }
- sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
- UCODE_LOADER_API_VER);
- status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
- if (status) {
- wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
- KBUILD_MODNAME, fw_name);
- return status;
- }
- wl->fw.hdr_num_entries[i] =
- wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
- }
- wl->fw.fw_cnt = i;
- return brcms_ucode_data_init(wl);
-}
-
-/*
- * precondition: can both be called locked and unlocked
- */
-void brcms_ucode_free_buf(void *p)
-{
- kfree(p);
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static void brcms_release_fw(struct brcms_info *wl)
-{
- int i;
- for (i = 0; i < MAX_FW_IMAGES; i++) {
- release_firmware(wl->fw.fw_bin[i]);
- release_firmware(wl->fw.fw_hdr[i]);
- }
-}
-
-
-/*
- * checks validity of all firmware images loaded from user space
- *
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-int brcms_check_firmwares(struct brcms_info *wl)
-{
- int i;
- int entry;
- int rc = 0;
- const struct firmware *fw;
- const struct firmware *fw_hdr;
- struct firmware_hdr *ucode_hdr;
- for (i = 0; i < MAX_FW_IMAGES && rc == 0; i++) {
- fw = wl->fw.fw_bin[i];
- fw_hdr = wl->fw.fw_hdr[i];
- if (fw == NULL && fw_hdr == NULL) {
- break;
- } else if (fw == NULL || fw_hdr == NULL) {
- wiphy_err(wl->wiphy, "%s: invalid bin/hdr fw\n",
- __func__);
- rc = -EBADF;
- } else if (fw_hdr->size % sizeof(struct firmware_hdr)) {
- wiphy_err(wl->wiphy, "%s: non integral fw hdr file "
- "size %zu/%zu\n", __func__, fw_hdr->size,
- sizeof(struct firmware_hdr));
- rc = -EBADF;
- } else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
- wiphy_err(wl->wiphy, "%s: out of bounds fw file size "
- "%zu\n", __func__, fw->size);
- rc = -EBADF;
- } else {
- /* check if ucode section overruns firmware image */
- ucode_hdr = (struct firmware_hdr *)fw_hdr->data;
- for (entry = 0; entry < wl->fw.hdr_num_entries[i] &&
- !rc; entry++, ucode_hdr++) {
- if (ucode_hdr->offset + ucode_hdr->len >
- fw->size) {
- wiphy_err(wl->wiphy,
- "%s: conflicting bin/hdr\n",
- __func__);
- rc = -EBADF;
- }
- }
- }
- }
- if (rc == 0 && wl->fw.fw_cnt != i) {
- wiphy_err(wl->wiphy, "%s: invalid fw_cnt=%d\n", __func__,
- wl->fw.fw_cnt);
- rc = -EBADF;
- }
- return rc;
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
-{
- bool blocked = brcms_c_check_radio_disabled(wl->wlc);
-
- UNLOCK(wl);
- wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
- if (blocked)
- wiphy_rfkill_start_polling(wl->pub->ieee_hw->wiphy);
- LOCK(wl);
- return blocked;
-}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
- UNLOCK(wl);
- msleep(ms);
- LOCK(wl);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
deleted file mode 100644
index 40e3d375ea9..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_MAC80211_IF_H_
-#define _BRCM_MAC80211_IF_H_
-
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-
-/* softmac ioctl definitions */
-#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
-
-
-/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
- * sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
- * submitted to workqueue instead of being on kernel timer
- */
-struct brcms_timer {
- struct timer_list timer;
- struct brcms_info *wl;
- void (*fn) (void *);
- void *arg; /* argument to fn */
- uint ms;
- bool periodic;
- bool set;
- struct brcms_timer *next;
-#ifdef BCMDBG
- char *name; /* Description of the timer */
-#endif
-};
-
-struct brcms_if {
- uint subunit; /* WDS/BSS unit */
- struct pci_dev *pci_dev;
-};
-
-#define MAX_FW_IMAGES 4
-struct brcms_firmware {
- u32 fw_cnt;
- const struct firmware *fw_bin[MAX_FW_IMAGES];
- const struct firmware *fw_hdr[MAX_FW_IMAGES];
- u32 hdr_num_entries[MAX_FW_IMAGES];
-};
-
-struct brcms_info {
- struct brcms_pub *pub; /* pointer to public wlc state */
- void *wlc; /* pointer to private common os-independent data */
- u32 magic;
-
- int irq;
-
- spinlock_t lock; /* per-device perimeter lock */
- spinlock_t isr_lock; /* per-device ISR synchronization lock */
-
- /* bus type and regsva for unmap in brcms_free() */
- uint bcm_bustype; /* bus type */
- void *regsva; /* opaque chip registers virtual address */
-
- /* timer related fields */
- atomic_t callbacks; /* # outstanding callback functions */
- struct brcms_timer *timers; /* timer cleanup queue */
-
- struct tasklet_struct tasklet; /* dpc tasklet */
- bool resched; /* dpc needs to be and is rescheduled */
-#ifdef LINUXSTA_PS
- u32 pci_psstate[16]; /* pci ps-state save/restore */
-#endif
- struct brcms_firmware fw;
- struct wiphy *wiphy;
-};
-
-/* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
- bool state, int prio);
-extern bool wl_alloc_dma_resources(struct brcms_info *wl, uint dmaddrwidth);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
-
-/* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void brcms_free_timer(struct brcms_info *wl, struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_info *wl, struct brcms_timer *timer,
- uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_info *wl, struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
-
-#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/main.c b/drivers/staging/brcm80211/brcmsmac/main.c
deleted file mode 100644
index 1763c4535cd..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/main.c
+++ /dev/null
@@ -1,6102 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/pci_ids.h>
-#include <net/mac80211.h>
-
-#include <brcm_hw_ids.h>
-#include <aiutils.h>
-#include "rate.h"
-#include "scb.h"
-#include "phy/phy_hal.h"
-#include "channel.h"
-#include "bmac.h"
-#include "antsel.h"
-#include "stf.h"
-#include "ampdu.h"
-#include "alloc.h"
-#include "mac80211_if.h"
-#include "main.h"
-
-/*
- * WPA(2) definitions
- */
-#define RSN_CAP_4_REPLAY_CNTRS 2
-#define RSN_CAP_16_REPLAY_CNTRS 3
-
-#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
-#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
-
-/*
- * Indication for txflowcontrol that all priority bits in
- * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
- */
-#define ALLPRIO -1
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
-
-#define TIMER_INTERVAL_WATCHDOG 1000 /* watchdog timer, in unit of ms */
-#define TIMER_INTERVAL_RADIOCHK 800 /* radio monitor timer, in unit of ms */
-
-/* Max MPC timeout, in unit of watchdog */
-#ifndef BRCMS_MPC_MAX_DELAYCNT
-#define BRCMS_MPC_MAX_DELAYCNT 10
-#endif
-
-/* Min MPC timeout, in unit of watchdog */
-#define BRCMS_MPC_MIN_DELAYCNT 1
-#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */
-
-#define BEACON_INTERVAL_DEFAULT 100 /* beacon interval, in unit of 1024TU */
-#define DTIM_INTERVAL_DEFAULT 3 /* DTIM interval, in unit of beacon interval */
-
-/* Scale down delays to accommodate QT slow speed */
-#define BEACON_INTERVAL_DEF_QT 20 /* beacon interval, in unit of 1024TU */
-#define DTIM_INTERVAL_DEF_QT 1 /* DTIM interval, in unit of beacon interval */
-
-#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */
-
-/* Software feature flag defines used by wlfeatureflag */
-#define WL_SWFL_NOHWRADIO 0x0004
-#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
-#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
-
-/* n-mode support capability */
-/* 2x2 includes both 1x1 & 2x2 devices
- * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
- * control it independently
- */
-#define WL_11N_2x2 1
-#define WL_11N_3x3 3
-#define WL_11N_4x4 4
-
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N 0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF 0x00000080
-
-#define EDCF_ACI_MASK 0x60
-#define EDCF_ACI_SHIFT 5
-#define EDCF_ECWMIN_MASK 0x0f
-#define EDCF_ECWMAX_SHIFT 4
-#define EDCF_AIFSN_MASK 0x0f
-#define EDCF_AIFSN_MAX 15
-#define EDCF_ECWMAX_MASK 0xf0
-
-#define EDCF_AC_BE_TXOP_STA 0x0000
-#define EDCF_AC_BK_TXOP_STA 0x0000
-#define EDCF_AC_VO_ACI_STA 0x62
-#define EDCF_AC_VO_ECW_STA 0x32
-#define EDCF_AC_VI_ACI_STA 0x42
-#define EDCF_AC_VI_ECW_STA 0x43
-#define EDCF_AC_BK_ECW_STA 0xA4
-#define EDCF_AC_VI_TXOP_STA 0x005e
-#define EDCF_AC_VO_TXOP_STA 0x002f
-#define EDCF_AC_BE_ACI_STA 0x03
-#define EDCF_AC_BE_ECW_STA 0xA4
-#define EDCF_AC_BK_ACI_STA 0x27
-#define EDCF_AC_VO_TXOP_AP 0x002f
-
-#define EDCF_TXOP2USEC(txop) ((txop) << 5)
-#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
-
-#define APHY_SYMBOL_TIME 4
-#define APHY_PREAMBLE_TIME 16
-#define APHY_SIGNAL_TIME 4
-#define APHY_SIFS_TIME 16
-#define APHY_SERVICE_NBITS 16
-#define APHY_TAIL_NBITS 6
-#define BPHY_SIFS_TIME 10
-#define BPHY_PLCP_SHORT_TIME 96
-
-#define PREN_PREAMBLE 24
-#define PREN_MM_EXT 12
-#define PREN_PREAMBLE_EXT 4
-
-#define DOT11_MAC_HDR_LEN 24
-#define DOT11_ACK_LEN 10
-#define DOT11_BA_LEN 4
-#define DOT11_OFDM_SIGNAL_EXTENSION 6
-#define DOT11_MIN_FRAG_LEN 256
-#define DOT11_RTS_LEN 16
-#define DOT11_CTS_LEN 10
-#define DOT11_BA_BITMAP_LEN 128
-#define DOT11_MIN_BEACON_PERIOD 1
-#define DOT11_MAX_BEACON_PERIOD 0xFFFF
-#define DOT11_MAXNUMFRAGS 16
-#define DOT11_MAX_FRAG_LEN 2346
-
-#define BPHY_PLCP_TIME 192
-#define RIFS_11N_TIME 2
-
-#define WME_VER 1
-#define WME_SUBTYPE_PARAM_IE 1
-#define WME_TYPE 2
-#define WME_OUI "\x00\x50\xf2"
-
-#define AC_BE 0
-#define AC_BK 1
-#define AC_VI 2
-#define AC_VO 3
-
-/*
- * driver maintains internal 'tick'(wlc->pub->now) which increments in 1s OS timer(soft
- * watchdog) it is not a wall clock and won't increment when driver is in "down" state
- * this low resolution driver tick can be used for maintenance tasks such as phy
- * calibration and scb update
- */
-
-/* To inform the ucode of the last mcast frame posted so that it can clear moredata bit */
-#define BCMCFID(wlc, fid) brcms_b_write_shm((wlc)->hw, M_BCMC_FID, (fid))
-
-#define BRCMS_WAR16165(wlc) (wlc->pub->sih->bustype == PCI_BUS && \
- (!AP_ENAB(wlc->pub)) && (wlc->war16165))
-
-/* debug/trace */
-uint brcm_msg_level =
-#if defined(BCMDBG)
- LOG_ERROR_VAL;
-#else
- 0;
-#endif /* BCMDBG */
-
-/* Find basic rate for a given rate */
-#define BRCMS_BASIC_RATE(wlc, rspec) (IS_MCS(rspec) ? \
- (wlc)->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK].leg_ofdm] : \
- (wlc)->band->basic_rate[rspec & RSPEC_RATE_MASK])
-
-#define FRAMETYPE(r, mimoframe) (IS_MCS(r) ? mimoframe : (IS_CCK(r) ? FT_CCK : FT_OFDM))
-
-#define RFDISABLE_DEFAULT 10000000 /* rfdisable delay timer 500 ms, runs of ALP clock */
-
-#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
-
-#define SCAN_IN_PROGRESS(x) 0
-
-#define EPI_VERSION_NUM 0x054b0b00
-
-#ifdef BCMDBG
-/* pointer to most recently allocated wl/wlc */
-static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
-#endif
-
-const u8 prio2fifo[NUMPRIO] = {
- TX_AC_BE_FIFO, /* 0 BE AC_BE Best Effort */
- TX_AC_BK_FIFO, /* 1 BK AC_BK Background */
- TX_AC_BK_FIFO, /* 2 -- AC_BK Background */
- TX_AC_BE_FIFO, /* 3 EE AC_BE Best Effort */
- TX_AC_VI_FIFO, /* 4 CL AC_VI Video */
- TX_AC_VI_FIFO, /* 5 VI AC_VI Video */
- TX_AC_VO_FIFO, /* 6 VO AC_VO Voice */
- TX_AC_VO_FIFO /* 7 NC AC_VO Voice */
-};
-
-/* precedences numbers for wlc queues. These are twice as may levels as
- * 802.1D priorities.
- * Odd numbers are used for HI priority traffic at same precedence levels
- * These constants are used ONLY by wlc_prio2prec_map. Do not use them elsewhere.
- */
-#define _BRCMS_PREC_NONE 0 /* None = - */
-#define _BRCMS_PREC_BK 2 /* BK - Background */
-#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
-#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
-#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
-#define _BRCMS_PREC_VI 10 /* Vi - Video */
-#define _BRCMS_PREC_VO 12 /* Vo - Voice */
-#define _BRCMS_PREC_NC 14 /* NC - Network Control */
-
-#define MAXMACLIST 64 /* max # source MAC matches */
-#define BCN_TEMPLATE_COUNT 2
-
-/* The BSS is generating beacons in HW */
-#define BRCMS_BSSCFG_HW_BCN 0x20
-
-#define HWBCN_ENAB(cfg) (((cfg)->flags & BRCMS_BSSCFG_HW_BCN) != 0)
-
-#define MBSS_BCN_ENAB(cfg) 0
-#define MBSS_PRB_ENAB(cfg) 0
-#define SOFTBCN_ENAB(pub) (0)
-
-/* 802.1D Priority to precedence queue mapping */
-const u8 wlc_prio2prec_map[] = {
- _BRCMS_PREC_BE, /* 0 BE - Best-effort */
- _BRCMS_PREC_BK, /* 1 BK - Background */
- _BRCMS_PREC_NONE, /* 2 None = - */
- _BRCMS_PREC_EE, /* 3 EE - Excellent-effort */
- _BRCMS_PREC_CL, /* 4 CL - Controlled Load */
- _BRCMS_PREC_VI, /* 5 Vi - Video */
- _BRCMS_PREC_VO, /* 6 Vo - Voice */
- _BRCMS_PREC_NC, /* 7 NC - Network Control */
-};
-
-/* Check if a particular BSS config is AP or STA */
-#define BSSCFG_AP(cfg) (0)
-#define BSSCFG_STA(cfg) (1)
-#define BSSCFG_IBSS(cfg) (!(cfg)->BSS)
-
-/* As above for all non-NULL BSS configs */
-#define FOREACH_BSS(wlc, idx, cfg) \
- for (idx = 0; (int) idx < BRCMS_MAXBSSCFG; idx++) \
- if ((cfg = (wlc)->bsscfg[idx]))
-
-/* TX FIFO number to WME/802.1E Access Category */
-const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
-
-/* WME/802.1E Access Category to TX FIFO number */
-static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
-
-static bool in_send_q;
-
-/* Shared memory location index for various AC params */
-#define wme_shmemacindex(ac) wme_ac2fifo[ac]
-
-#ifdef BCMDBG
-static const char * const fifo_names[] = {
- "AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
-#else
-static const char fifo_names[6][0];
-#endif
-
-static const u8 acbitmap2maxprio[] = {
- PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
- PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
-};
-
-/* currently the best mechanism for determining SIFS is the band in use */
-#define SIFS(band) ((band)->bandtype == BRCM_BAND_5G ? APHY_SIFS_TIME : \
- BPHY_SIFS_TIME);
-
-/* local prototypes */
-static u16 brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc,
- struct ieee80211_hw *hw,
- struct sk_buff *p,
- struct scb *scb, uint frag,
- uint nfrags, uint queue,
- uint next_frag_len,
- struct wsec_key *key,
- ratespec_t rspec_override);
-static void brcms_c_bss_default_init(struct brcms_c_info *wlc);
-static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc);
-static ratespec_t mac80211_wlc_set_nrate(struct brcms_c_info *wlc,
- struct brcms_band *cur_band,
- u32 int_val);
-static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc);
-static void brcms_c_watchdog(void *arg);
-static void brcms_c_watchdog_by_timer(void *arg);
-static u16 brcms_c_rate_shm_offset(struct brcms_c_info *wlc, u8 rate);
-static int brcms_c_set_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs_arg);
-static u8 brcms_c_local_constraint_qdbm(struct brcms_c_info *wlc);
-
-/* send and receive */
-static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc);
-static void brcms_c_txq_free(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi);
-static void brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi,
- bool on, int prio);
-static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc);
-static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, ratespec_t rate,
- uint length, u8 *plcp);
-static void brcms_c_compute_ofdm_plcp(ratespec_t rate, uint length, u8 *plcp);
-static void brcms_c_compute_mimo_plcp(ratespec_t rate, uint length, u8 *plcp);
-static u16 brcms_c_compute_frame_dur(struct brcms_c_info *wlc, ratespec_t rate,
- u8 preamble_type, uint next_frag_len);
-static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc,
- struct brcms_d11rxhdr *rxh);
-static void brcms_c_recvctl(struct brcms_c_info *wlc,
- struct d11rxhdr *rxh, struct sk_buff *p);
-static uint brcms_c_calc_frame_len(struct brcms_c_info *wlc, ratespec_t rate,
- u8 preamble_type, uint dur);
-static uint brcms_c_calc_ack_time(struct brcms_c_info *wlc, ratespec_t rate,
- u8 preamble_type);
-static uint brcms_c_calc_cts_time(struct brcms_c_info *wlc, ratespec_t rate,
- u8 preamble_type);
-/* interrupt, up/down, band */
-static void brcms_c_setband(struct brcms_c_info *wlc, uint bandunit);
-static chanspec_t brcms_c_init_chanspec(struct brcms_c_info *wlc);
-static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
- chanspec_t chanspec);
-static void brcms_c_bsinit(struct brcms_c_info *wlc);
-static int brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle,
- bool isOFDM, bool writeToShm);
-static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc);
-static bool brcms_c_radio_monitor_start(struct brcms_c_info *wlc);
-static void brcms_c_radio_timer(void *arg);
-static void brcms_c_radio_enable(struct brcms_c_info *wlc);
-static void brcms_c_radio_upd(struct brcms_c_info *wlc);
-
-/* scan, association, BSS */
-static uint brcms_c_calc_ba_time(struct brcms_c_info *wlc, ratespec_t rate,
- u8 preamble_type);
-static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap);
-static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val);
-static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val);
-static void brcms_c_war16165(struct brcms_c_info *wlc, bool tx);
-
-static void brcms_c_wme_retries_write(struct brcms_c_info *wlc);
-static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc);
-static uint brcms_c_attach_module(struct brcms_c_info *wlc);
-static void brcms_c_detach_module(struct brcms_c_info *wlc);
-static void brcms_c_timers_deinit(struct brcms_c_info *wlc);
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc);
-static uint brcms_c_down_del_timer(struct brcms_c_info *wlc);
-static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc);
-static int _brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
- struct brcms_c_if *wlcif);
-
-/* conditions under which the PM bit should be set in outgoing frames and STAY_AWAKE is meaningful
- */
-bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
-{
- int idx;
- struct brcms_bss_cfg *cfg;
-
- /* disallow PS when one of the following global conditions meets */
- if (!wlc->pub->associated)
- return false;
-
- /* disallow PS when one of these meets when not scanning */
- if (AP_ACTIVE(wlc) || wlc->monitor)
- return false;
-
- for (idx = 0; idx < BRCMS_MAXBSSCFG; idx++) {
- cfg = wlc->bsscfg[idx];
- if (cfg && BSSCFG_STA(cfg) && cfg->associated) {
- /*
- * disallow PS when one of the following
- * bsscfg specific conditions meets
- */
- if (!cfg->BSS || !BRCMS_PORTOPEN(cfg))
- return false;
-
- if (!cfg->dtim_programmed)
- return false;
- }
- }
-
- return true;
-}
-
-void brcms_c_reset(struct brcms_c_info *wlc)
-{
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- /* slurp up hw mac counters before core reset */
- brcms_c_statsupd(wlc);
-
- /* reset our snapshot of macstat counters */
- memset((char *)wlc->core->macstat_snapshot, 0,
- sizeof(struct macstat));
-
- brcms_b_reset(wlc->hw);
-}
-
-void brcms_c_fatal_error(struct brcms_c_info *wlc)
-{
- wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
- wlc->pub->unit);
- brcms_init(wlc->wl);
-}
-
-/* Return the channel the driver should initialize during brcms_c_init.
- * the channel may have to be changed from the currently configured channel
- * if other configurations are in conflict (bandlocked, 11n mode disabled,
- * invalid channel for current country, etc.)
- */
-static chanspec_t brcms_c_init_chanspec(struct brcms_c_info *wlc)
-{
- chanspec_t chanspec =
- 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
- WL_CHANSPEC_BAND_2G;
-
- return chanspec;
-}
-
-struct scb global_scb;
-
-static void brcms_c_init_scb(struct brcms_c_info *wlc, struct scb *scb)
-{
- int i;
- scb->flags = SCB_WMECAP | SCB_HTCAP;
- for (i = 0; i < NUMPRIO; i++)
- scb->seqnum[i] = 0;
-}
-
-void brcms_c_init(struct brcms_c_info *wlc)
-{
- d11regs_t *regs;
- chanspec_t chanspec;
- int i;
- struct brcms_bss_cfg *bsscfg;
- bool mute = false;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- regs = wlc->regs;
-
- /* This will happen if a big-hammer was executed. In that case, we want to go back
- * to the channel that we were on and not new channel
- */
- if (wlc->pub->associated)
- chanspec = wlc->home_chanspec;
- else
- chanspec = brcms_c_init_chanspec(wlc);
-
- brcms_b_init(wlc->hw, chanspec, mute);
-
- /* update beacon listen interval */
- brcms_c_bcn_li_upd(wlc);
-
- /* the world is new again, so is our reported rate */
- brcms_c_reprate_init(wlc);
-
- /* write ethernet address to core */
- FOREACH_BSS(wlc, i, bsscfg) {
- brcms_c_set_mac(bsscfg);
- brcms_c_set_bssid(bsscfg);
- }
-
- /* Update tsf_cfprep if associated and up */
- if (wlc->pub->associated) {
- FOREACH_BSS(wlc, i, bsscfg) {
- if (bsscfg->up) {
- u32 bi;
-
- /* get beacon period and convert to uS */
- bi = bsscfg->current_bss->beacon_period << 10;
- /*
- * update since init path would reset
- * to default value
- */
- W_REG(&regs->tsf_cfprep,
- (bi << CFPREP_CBI_SHIFT));
-
- /* Update maccontrol PM related bits */
- brcms_c_set_ps_ctrl(wlc);
-
- break;
- }
- }
- }
-
- brcms_c_bandinit_ordered(wlc, chanspec);
-
- brcms_c_init_scb(wlc, &global_scb);
-
- /* init probe response timeout */
- brcms_c_write_shm(wlc, M_PRS_MAXTIME, wlc->prb_resp_timeout);
-
- /* init max burst txop (framebursting) */
- brcms_c_write_shm(wlc, M_MBURST_TXOP,
- (wlc->
- _rifs ? (EDCF_AC_VO_TXOP_AP << 5) : MAXFRAMEBURST_TXOP));
-
- /* initialize maximum allowed duty cycle */
- brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_ofdm, true, true);
- brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_cck, false, true);
-
- /* Update some shared memory locations related to max AMPDU size allowed to received */
- brcms_c_ampdu_shm_upd(wlc->ampdu);
-
- /* band-specific inits */
- brcms_c_bsinit(wlc);
-
- /* Enable EDCF mode (while the MAC is suspended) */
- if (EDCF_ENAB(wlc->pub)) {
- OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
- brcms_c_edcf_setparams(wlc, false);
- }
-
- /* Init precedence maps for empty FIFOs */
- brcms_c_tx_prec_map_init(wlc);
-
- /* read the ucode version if we have not yet done so */
- if (wlc->ucode_rev == 0) {
- wlc->ucode_rev =
- brcms_c_read_shm(wlc, M_BOM_REV_MAJOR) << NBITS(u16);
- wlc->ucode_rev |= brcms_c_read_shm(wlc, M_BOM_REV_MINOR);
- }
-
- /* ..now really unleash hell (allow the MAC out of suspend) */
- brcms_c_enable_mac(wlc);
-
- /* clear tx flow control */
- brcms_c_txflowcontrol_reset(wlc);
-
- /* clear tx data fifo suspends */
- wlc->tx_suspended = false;
-
- /* enable the RF Disable Delay timer */
- W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
-
- /*
- * Initialize WME parameters; if they haven't been set by some other
- * mechanism (IOVar, etc) then read them from the hardware.
- */
- if (BRCMS_WME_RETRY_SHORT_GET(wlc, 0) == 0) {
- /* Uninitialized; read from HW */
- int ac;
-
- for (ac = 0; ac < AC_COUNT; ac++) {
- wlc->wme_retries[ac] =
- brcms_c_read_shm(wlc, M_AC_TXLMT_ADDR(ac));
- }
- }
-}
-
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
- wlc->bcnmisc_monitor = promisc;
- brcms_c_mac_bcn_promisc(wlc);
-}
-
-void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
-{
- if ((AP_ENAB(wlc->pub) && (N_ENAB(wlc->pub) || wlc->band->gmode)) ||
- wlc->bcnmisc_ibss || wlc->bcnmisc_scan || wlc->bcnmisc_monitor)
- brcms_c_mctrl(wlc, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
- else
- brcms_c_mctrl(wlc, MCTL_BCNS_PROMISC, 0);
-}
-
-/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-void brcms_c_mac_promisc(struct brcms_c_info *wlc)
-{
- u32 promisc_bits = 0;
-
- /* promiscuous mode just sets MCTL_PROMISC
- * Note: APs get all BSS traffic without the need to set the MCTL_PROMISC bit
- * since all BSS data traffic is directed at the AP
- */
- if (PROMISC_ENAB(wlc->pub) && !AP_ENAB(wlc->pub))
- promisc_bits |= MCTL_PROMISC;
-
- /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
- * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
- * handled in brcms_c_mac_bcn_promisc()
- */
- if (MONITOR_ENAB(wlc))
- promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
-
- brcms_c_mctrl(wlc, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
-}
-
-/* push sw hps and wake state through hardware */
-void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
-{
- u32 v1, v2;
- bool hps;
- bool awake_before;
-
- hps = PS_ALLOWED(wlc);
-
- BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
-
- v1 = R_REG(&wlc->regs->maccontrol);
- v2 = MCTL_WAKE;
- if (hps)
- v2 |= MCTL_HPS;
-
- brcms_c_mctrl(wlc, MCTL_WAKE | MCTL_HPS, v2);
-
- awake_before = ((v1 & MCTL_WAKE) || ((v1 & MCTL_HPS) == 0));
-
- if (!awake_before)
- brcms_b_wait_for_wake(wlc->hw);
-
-}
-
-/*
- * Write this BSS config's MAC address to core.
- * Updates RXE match engine.
- */
-int brcms_c_set_mac(struct brcms_bss_cfg *cfg)
-{
- int err = 0;
- struct brcms_c_info *wlc = cfg->wlc;
-
- if (cfg == wlc->cfg) {
- /* enter the MAC addr into the RXE match registers */
- brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, cfg->cur_etheraddr);
- }
-
- brcms_c_ampdu_macaddr_upd(wlc);
-
- return err;
-}
-
-/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).
- * Updates RXE match engine.
- */
-void brcms_c_set_bssid(struct brcms_bss_cfg *cfg)
-{
- struct brcms_c_info *wlc = cfg->wlc;
-
- /* if primary config, we need to update BSSID in RXE match registers */
- if (cfg == wlc->cfg) {
- brcms_c_set_addrmatch(wlc, RCM_BSSID_OFFSET, cfg->BSSID);
- }
-#ifdef SUPPORT_HWKEYS
- else if (BSSCFG_STA(cfg) && cfg->BSS) {
- brcms_c_rcmta_add_bssid(wlc, cfg);
- }
-#endif
-}
-
-/*
- * Suspend the the MAC and update the slot timing
- * for standard 11b/g (20us slots) or shortslot 11g (9us slots).
- */
-void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot)
-{
- int idx;
- struct brcms_bss_cfg *cfg;
-
- /* use the override if it is set */
- if (wlc->shortslot_override != BRCMS_SHORTSLOT_AUTO)
- shortslot = (wlc->shortslot_override == BRCMS_SHORTSLOT_ON);
-
- if (wlc->shortslot == shortslot)
- return;
-
- wlc->shortslot = shortslot;
-
- /* update the capability based on current shortslot mode */
- FOREACH_BSS(wlc, idx, cfg) {
- if (!cfg->associated)
- continue;
- cfg->current_bss->capability &=
- ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
- if (wlc->shortslot)
- cfg->current_bss->capability |=
- WLAN_CAPABILITY_SHORT_SLOT_TIME;
- }
-
- brcms_b_set_shortslot(wlc->hw, shortslot);
-}
-
-static u8 brcms_c_local_constraint_qdbm(struct brcms_c_info *wlc)
-{
- u8 local;
- s16 local_max;
-
- local = BRCMS_TXPWR_MAX;
- if (wlc->pub->associated &&
- (brcmu_chspec_ctlchan(wlc->chanspec) ==
- brcmu_chspec_ctlchan(wlc->home_chanspec))) {
-
- /* get the local power constraint if we are on the AP's
- * channel [802.11h, 7.3.2.13]
- */
- /* Clamp the value between 0 and BRCMS_TXPWR_MAX w/o
- * overflowing the target */
- local_max =
- (wlc->txpwr_local_max -
- wlc->txpwr_local_constraint) * BRCMS_TXPWR_DB_FACTOR;
- if (local_max > 0 && local_max < BRCMS_TXPWR_MAX)
- return (u8) local_max;
- if (local_max < 0)
- return 0;
- }
-
- return local;
-}
-
-/* propagate home chanspec to all bsscfgs in case bsscfg->current_bss->chanspec is referenced */
-void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, chanspec_t chanspec)
-{
- if (wlc->home_chanspec != chanspec) {
- int idx;
- struct brcms_bss_cfg *cfg;
-
- wlc->home_chanspec = chanspec;
-
- FOREACH_BSS(wlc, idx, cfg) {
- if (!cfg->associated)
- continue;
-
- cfg->current_bss->chanspec = chanspec;
- }
-
- }
-}
-
-static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc,
- chanspec_t chanspec)
-{
- /* Save our copy of the chanspec */
- wlc->chanspec = chanspec;
-
- /* Set the chanspec and power limits for this locale after computing
- * any 11h local tx power constraints.
- */
- brcms_c_channel_set_chanspec(wlc->cmi, chanspec,
- brcms_c_local_constraint_qdbm(wlc));
-
- if (wlc->stf->ss_algosel_auto)
- brcms_c_stf_ss_algo_channel_get(wlc, &wlc->stf->ss_algo_channel,
- chanspec);
-
- brcms_c_stf_ss_update(wlc, wlc->band);
-
-}
-
-void brcms_c_set_chanspec(struct brcms_c_info *wlc, chanspec_t chanspec)
-{
- uint bandunit;
- bool switchband = false;
- chanspec_t old_chanspec = wlc->chanspec;
-
- if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n",
- wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
- return;
- }
-
- /* Switch bands if necessary */
- if (NBANDS(wlc) > 1) {
- bandunit = CHSPEC_BANDUNIT(chanspec);
- if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
- switchband = true;
- if (wlc->bandlocked) {
- wiphy_err(wlc->wiphy, "wl%d: %s: chspec %d "
- "band is locked!\n",
- wlc->pub->unit, __func__,
- CHSPEC_CHANNEL(chanspec));
- return;
- }
- /*
- * should the setband call come after the
- * brcms_b_chanspec() ? if the setband updates
- * (brcms_c_bsinit) use low level calls to inspect and
- * set state, the state inspected may be from the wrong
- * band, or the following brcms_b_set_chanspec() may
- * undo the work.
- */
- brcms_c_setband(wlc, bandunit);
- }
- }
-
- /* sync up phy/radio chanspec */
- brcms_c_set_phy_chanspec(wlc, chanspec);
-
- /* init antenna selection */
- if (CHSPEC_WLC_BW(old_chanspec) != CHSPEC_WLC_BW(chanspec)) {
- brcms_c_antsel_init(wlc->asi);
-
- /* Fix the hardware rateset based on bw.
- * Mainly add MCS32 for 40Mhz, remove MCS 32 for 20Mhz
- */
- brcms_c_rateset_bw_mcs_filter(&wlc->band->hw_rateset,
- wlc->band->
- mimo_cap_40 ? CHSPEC_WLC_BW(chanspec)
- : 0);
- }
-
- /* update some mac configuration since chanspec changed */
- brcms_c_ucode_mac_upd(wlc);
-}
-
-ratespec_t brcms_c_lowest_basic_rspec(struct brcms_c_info *wlc,
- wlc_rateset_t *rs)
-{
- ratespec_t lowest_basic_rspec;
- uint i;
-
- /* Use the lowest basic rate */
- lowest_basic_rspec = rs->rates[0] & BRCMS_RATE_MASK;
- for (i = 0; i < rs->count; i++) {
- if (rs->rates[i] & BRCMS_RATE_FLAG) {
- lowest_basic_rspec = rs->rates[i] & BRCMS_RATE_MASK;
- break;
- }
- }
-#if NCONF
- /* pick siso/cdd as default for OFDM (note no basic rate MCSs are supported yet) */
- if (IS_OFDM(lowest_basic_rspec)) {
- lowest_basic_rspec |= (wlc->stf->ss_opmode << RSPEC_STF_SHIFT);
- }
-#endif
-
- return lowest_basic_rspec;
-}
-
-/* This function changes the phytxctl for beacon based on current beacon ratespec AND txant
- * setting as per this table:
- * ratespec CCK ant = wlc->stf->txant
- * OFDM ant = 3
- */
-void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
- ratespec_t bcn_rspec)
-{
- u16 phyctl;
- u16 phytxant = wlc->stf->phytxant;
- u16 mask = PHY_TXC_ANT_MASK;
-
- /* for non-siso rates or default setting, use the available chains */
- if (BRCMS_PHY_11N_CAP(wlc->band))
- phytxant = brcms_c_stf_phytxchain_sel(wlc, bcn_rspec);
-
- phyctl = brcms_c_read_shm(wlc, M_BCN_PCTLWD);
- phyctl = (phyctl & ~mask) | phytxant;
- brcms_c_write_shm(wlc, M_BCN_PCTLWD, phyctl);
-}
-
-/* centralized protection config change function to simplify debugging, no consistency checking
- * this should be called only on changes to avoid overhead in periodic function
-*/
-void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
-{
- BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
-
- switch (idx) {
- case BRCMS_PROT_G_SPEC:
- wlc->protection->_g = (bool) val;
- break;
- case BRCMS_PROT_G_OVR:
- wlc->protection->g_override = (s8) val;
- break;
- case BRCMS_PROT_G_USER:
- wlc->protection->gmode_user = (u8) val;
- break;
- case BRCMS_PROT_OVERLAP:
- wlc->protection->overlap = (s8) val;
- break;
- case BRCMS_PROT_N_USER:
- wlc->protection->nmode_user = (s8) val;
- break;
- case BRCMS_PROT_N_CFG:
- wlc->protection->n_cfg = (s8) val;
- break;
- case BRCMS_PROT_N_CFG_OVR:
- wlc->protection->n_cfg_override = (s8) val;
- break;
- case BRCMS_PROT_N_NONGF:
- wlc->protection->nongf = (bool) val;
- break;
- case BRCMS_PROT_N_NONGF_OVR:
- wlc->protection->nongf_override = (s8) val;
- break;
- case BRCMS_PROT_N_PAM_OVR:
- wlc->protection->n_pam_override = (s8) val;
- break;
- case BRCMS_PROT_N_OBSS:
- wlc->protection->n_obss = (bool) val;
- break;
-
- default:
- break;
- }
-
-}
-
-static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val)
-{
- wlc->ht_cap.cap_info &= ~(IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40);
- wlc->ht_cap.cap_info |= (val & BRCMS_N_SGI_20) ?
- IEEE80211_HT_CAP_SGI_20 : 0;
- wlc->ht_cap.cap_info |= (val & BRCMS_N_SGI_40) ?
- IEEE80211_HT_CAP_SGI_40 : 0;
-
- if (wlc->pub->up) {
- brcms_c_update_beacon(wlc);
- brcms_c_update_probe_resp(wlc, true);
- }
-}
-
-static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val)
-{
- wlc->stf->ldpc = val;
-
- wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_LDPC_CODING;
- if (wlc->stf->ldpc != OFF)
- wlc->ht_cap.cap_info |= IEEE80211_HT_CAP_LDPC_CODING;
-
- if (wlc->pub->up) {
- brcms_c_update_beacon(wlc);
- brcms_c_update_probe_resp(wlc, true);
- wlc_phy_ldpc_override_set(wlc->band->pi, (val ? true : false));
- }
-}
-
-/*
- * ucode, hwmac update
- * Channel dependent updates for ucode and hw
- */
-static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
-{
- /* enable or disable any active IBSSs depending on whether or not
- * we are on the home channel
- */
- if (wlc->home_chanspec == BRCMS_BAND_PI_RADIO_CHANSPEC) {
- if (wlc->pub->associated) {
- /* BMAC_NOTE: This is something that should be fixed in ucode inits.
- * I think that the ucode inits set up the bcn templates and shm values
- * with a bogus beacon. This should not be done in the inits. If ucode needs
- * to set up a beacon for testing, the test routines should write it down,
- * not expect the inits to populate a bogus beacon.
- */
- if (BRCMS_PHY_11N_CAP(wlc->band)) {
- brcms_c_write_shm(wlc, M_BCN_TXTSF_OFFSET,
- wlc->band->bcntsfoff);
- }
- }
- } else {
- /* disable an active IBSS if we are not on the home channel */
- }
-
- /* update the various promisc bits */
- brcms_c_mac_bcn_promisc(wlc);
- brcms_c_mac_promisc(wlc);
-}
-
-static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
- chanspec_t chanspec)
-{
- wlc_rateset_t default_rateset;
- uint parkband;
- uint i, band_order[2];
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- /*
- * We might have been bandlocked during down and the chip power-cycled (hibernate).
- * figure out the right band to park on
- */
- if (wlc->bandlocked || NBANDS(wlc) == 1) {
- /* updated in brcms_c_bandlock() */
- parkband = wlc->band->bandunit;
- band_order[0] = band_order[1] = parkband;
- } else {
- /* park on the band of the specified chanspec */
- parkband = CHSPEC_BANDUNIT(chanspec);
-
- /* order so that parkband initialize last */
- band_order[0] = parkband ^ 1;
- band_order[1] = parkband;
- }
-
- /* make each band operational, software state init */
- for (i = 0; i < NBANDS(wlc); i++) {
- uint j = band_order[i];
-
- wlc->band = wlc->bandstate[j];
-
- brcms_default_rateset(wlc, &default_rateset);
-
- /* fill in hw_rate */
- brcms_c_rateset_filter(&default_rateset, &wlc->band->hw_rateset,
- false, BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
- (bool) N_ENAB(wlc->pub));
-
- /* init basic rate lookup */
- brcms_c_rate_lookup_init(wlc, &default_rateset);
- }
-
- /* sync up phy/radio chanspec */
- brcms_c_set_phy_chanspec(wlc, chanspec);
-}
-
-/* band-specific init */
-static void brcms_c_bsinit(struct brcms_c_info *wlc)
-{
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n",
- wlc->pub->unit, wlc->band->bandunit);
-
- /* write ucode ACK/CTS rate table */
- brcms_c_set_ratetable(wlc);
-
- /* update some band specific mac configuration */
- brcms_c_ucode_mac_upd(wlc);
-
- /* init antenna selection */
- brcms_c_antsel_init(wlc->asi);
-
-}
-
-/* switch to and initialize new band */
-static void brcms_c_setband(struct brcms_c_info *wlc,
- uint bandunit)
-{
- int idx;
- struct brcms_bss_cfg *cfg;
-
- wlc->band = wlc->bandstate[bandunit];
-
- if (!wlc->pub->up)
- return;
-
- /* wait for at least one beacon before entering sleeping state */
- for (idx = 0; idx < BRCMS_MAXBSSCFG; idx++) {
- cfg = wlc->bsscfg[idx];
- if (cfg && BSSCFG_STA(cfg) && cfg->associated)
- cfg->PMawakebcn = true;
- }
- brcms_c_set_ps_ctrl(wlc);
-
- /* band-specific initializations */
- brcms_c_bsinit(wlc);
-}
-
-/* Initialize a WME Parameter Info Element with default STA parameters from WMM Spec, Table 12 */
-void
-brcms_c_wme_initparams_sta(struct brcms_c_info *wlc, struct wme_param_ie *pe)
-{
- static const struct wme_param_ie stadef = {
- WME_OUI,
- WME_TYPE,
- WME_SUBTYPE_PARAM_IE,
- WME_VER,
- 0,
- 0,
- {
- {EDCF_AC_BE_ACI_STA, EDCF_AC_BE_ECW_STA,
- cpu_to_le16(EDCF_AC_BE_TXOP_STA)},
- {EDCF_AC_BK_ACI_STA, EDCF_AC_BK_ECW_STA,
- cpu_to_le16(EDCF_AC_BK_TXOP_STA)},
- {EDCF_AC_VI_ACI_STA, EDCF_AC_VI_ECW_STA,
- cpu_to_le16(EDCF_AC_VI_TXOP_STA)},
- {EDCF_AC_VO_ACI_STA, EDCF_AC_VO_ECW_STA,
- cpu_to_le16(EDCF_AC_VO_TXOP_STA)}
- }
- };
- memcpy(pe, &stadef, sizeof(*pe));
-}
-
-void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
- const struct ieee80211_tx_queue_params *params,
- bool suspend)
-{
- int i;
- struct shm_acparams acp_shm;
- u16 *shm_entry;
-
- /* Only apply params if the core is out of reset and has clocks */
- if (!wlc->clk) {
- wiphy_err(wlc->wiphy, "wl%d: %s : no-clock\n", wlc->pub->unit,
- __func__);
- return;
- }
-
- do {
- memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
- /* fill in shm ac params struct */
- acp_shm.txop = le16_to_cpu(params->txop);
- /* convert from units of 32us to us for ucode */
- wlc->edcf_txop[aci & 0x3] = acp_shm.txop =
- EDCF_TXOP2USEC(acp_shm.txop);
- acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
-
- if (aci == AC_VI && acp_shm.txop == 0
- && acp_shm.aifs < EDCF_AIFSN_MAX)
- acp_shm.aifs++;
-
- if (acp_shm.aifs < EDCF_AIFSN_MIN
- || acp_shm.aifs > EDCF_AIFSN_MAX) {
- wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad "
- "aifs %d\n", wlc->pub->unit, acp_shm.aifs);
- continue;
- }
-
- acp_shm.cwmin = params->cw_min;
- acp_shm.cwmax = params->cw_max;
- acp_shm.cwcur = acp_shm.cwmin;
- acp_shm.bslots =
- R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
- acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
- /* Indicate the new params to the ucode */
- acp_shm.status = brcms_c_read_shm(wlc, (M_EDCF_QINFO +
- wme_shmemacindex(aci) *
- M_EDCF_QLEN +
- M_EDCF_STATUS_OFF));
- acp_shm.status |= WME_STATUS_NEWAC;
-
- /* Fill in shm acparam table */
- shm_entry = (u16 *) &acp_shm;
- for (i = 0; i < (int)sizeof(struct shm_acparams); i += 2)
- brcms_c_write_shm(wlc,
- M_EDCF_QINFO +
- wme_shmemacindex(aci) * M_EDCF_QLEN + i,
- *shm_entry++);
-
- } while (0);
-
- if (suspend)
- brcms_c_suspend_mac_and_wait(wlc);
-
- if (suspend)
- brcms_c_enable_mac(wlc);
-
-}
-
-void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
-{
- u16 aci;
- int i_ac;
- struct edcf_acparam *edcf_acp;
-
- struct ieee80211_tx_queue_params txq_pars;
- struct ieee80211_tx_queue_params *params = &txq_pars;
-
- /*
- * AP uses AC params from wme_param_ie_ap.
- * AP advertises AC params from wme_param_ie.
- * STA uses AC params from wme_param_ie.
- */
-
- edcf_acp = (struct edcf_acparam *) &wlc->wme_param_ie.acparam[0];
-
- for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
- /* find out which ac this set of params applies to */
- aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
-
- /* fill in shm ac params struct */
- params->txop = edcf_acp->TXOP;
- params->aifs = edcf_acp->ACI;
-
- /* CWmin = 2^(ECWmin) - 1 */
- params->cw_min = EDCF_ECW2CW(edcf_acp->ECW & EDCF_ECWMIN_MASK);
- /* CWmax = 2^(ECWmax) - 1 */
- params->cw_max = EDCF_ECW2CW((edcf_acp->ECW & EDCF_ECWMAX_MASK)
- >> EDCF_ECWMAX_SHIFT);
- brcms_c_wme_setparams(wlc, aci, params, suspend);
- }
-
- if (suspend)
- brcms_c_suspend_mac_and_wait(wlc);
-
- if (AP_ENAB(wlc->pub) && WME_ENAB(wlc->pub)) {
- brcms_c_update_beacon(wlc);
- brcms_c_update_probe_resp(wlc, false);
- }
-
- if (suspend)
- brcms_c_enable_mac(wlc);
-
-}
-
-bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
-{
- wlc->wdtimer = brcms_init_timer(wlc->wl, brcms_c_watchdog_by_timer,
- wlc, "watchdog");
- if (!wlc->wdtimer) {
- wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for wdtimer "
- "failed\n", unit);
- goto fail;
- }
-
- wlc->radio_timer = brcms_init_timer(wlc->wl, brcms_c_radio_timer,
- wlc, "radio");
- if (!wlc->radio_timer) {
- wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for radio_timer "
- "failed\n", unit);
- goto fail;
- }
-
- return true;
-
- fail:
- return false;
-}
-
-/*
- * Initialize brcms_c_info default values ...
- * may get overrides later in this function
- */
-void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
-{
- int i;
- /* Assume the device is there until proven otherwise */
- wlc->device_present = true;
-
- /* Save our copy of the chanspec */
- wlc->chanspec = CH20MHZ_CHSPEC(1);
-
- /* various 802.11g modes */
- wlc->shortslot = false;
- wlc->shortslot_override = BRCMS_SHORTSLOT_AUTO;
-
- brcms_c_protection_upd(wlc, BRCMS_PROT_G_OVR, BRCMS_PROTECTION_AUTO);
- brcms_c_protection_upd(wlc, BRCMS_PROT_G_SPEC, false);
-
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG_OVR,
- BRCMS_PROTECTION_AUTO);
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG, BRCMS_N_PROTECTION_OFF);
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF_OVR,
- BRCMS_PROTECTION_AUTO);
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF, false);
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, AUTO);
-
- brcms_c_protection_upd(wlc, BRCMS_PROT_OVERLAP,
- BRCMS_PROTECTION_CTL_OVERLAP);
-
- /* 802.11g draft 4.0 NonERP elt advertisement */
- wlc->include_legacy_erp = true;
-
- wlc->stf->ant_rx_ovr = ANT_RX_DIV_DEF;
- wlc->stf->txant = ANT_TX_DEF;
-
- wlc->prb_resp_timeout = BRCMS_PRB_RESP_TIMEOUT;
-
- wlc->usr_fragthresh = DOT11_DEFAULT_FRAG_LEN;
- for (i = 0; i < NFIFO; i++)
- wlc->fragthresh[i] = DOT11_DEFAULT_FRAG_LEN;
- wlc->RTSThresh = DOT11_DEFAULT_RTS_LEN;
-
- /* default rate fallback retry limits */
- wlc->SFBL = RETRY_SHORT_FB;
- wlc->LFBL = RETRY_LONG_FB;
-
- /* default mac retry limits */
- wlc->SRL = RETRY_SHORT_DEF;
- wlc->LRL = RETRY_LONG_DEF;
-
- /* Set flag to indicate that hw keys should be used when available. */
- wlc->wsec_swkeys = false;
-
- /* init the 4 static WEP default keys */
- for (i = 0; i < WSEC_MAX_DEFAULT_KEYS; i++) {
- wlc->wsec_keys[i] = wlc->wsec_def_keys[i];
- wlc->wsec_keys[i]->idx = (u8) i;
- }
-
- /* WME QoS mode is Auto by default */
- wlc->pub->_wme = AUTO;
-
-#ifdef BCMSDIODEV_ENABLED
- wlc->pub->_priofc = true; /* enable priority flow control for sdio dongle */
-#endif
-
- wlc->pub->_ampdu = AMPDU_AGG_HOST;
- wlc->pub->bcmerror = 0;
- wlc->pub->_coex = ON;
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
-}
-
-static bool brcms_c_state_bmac_sync(struct brcms_c_info *wlc)
-{
- struct brcms_b_state state_bmac;
-
- if (brcms_b_state_get(wlc->hw, &state_bmac) != 0)
- return false;
-
- wlc->machwcap = state_bmac.machwcap;
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR,
- (s8) state_bmac.preamble_ovr);
-
- return true;
-}
-
-static uint brcms_c_attach_module(struct brcms_c_info *wlc)
-{
- uint err = 0;
- uint unit;
- unit = wlc->pub->unit;
-
- wlc->asi = brcms_c_antsel_attach(wlc);
- if (wlc->asi == NULL) {
- wiphy_err(wlc->wiphy, "wl%d: attach: antsel_attach "
- "failed\n", unit);
- err = 44;
- goto fail;
- }
-
- wlc->ampdu = brcms_c_ampdu_attach(wlc);
- if (wlc->ampdu == NULL) {
- wiphy_err(wlc->wiphy, "wl%d: attach: ampdu_attach "
- "failed\n", unit);
- err = 50;
- goto fail;
- }
-
- if ((brcms_c_stf_attach(wlc) != 0)) {
- wiphy_err(wlc->wiphy, "wl%d: attach: stf_attach "
- "failed\n", unit);
- err = 68;
- goto fail;
- }
- fail:
- return err;
-}
-
-struct brcms_pub *brcms_c_pub(void *wlc)
-{
- return ((struct brcms_c_info *) wlc)->pub;
-}
-
-#define CHIP_SUPPORTS_11N(wlc) 1
-
-/*
- * The common driver entry routine. Error codes should be unique
- */
-void *brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void *regsva, uint bustype, void *btparam,
- uint *perr)
-{
- struct brcms_c_info *wlc;
- uint err = 0;
- uint j;
- struct brcms_pub *pub;
- uint n_disabled;
-
- /* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
- if (wlc == NULL)
- goto fail;
- wlc->wiphy = wl->wiphy;
- pub = wlc->pub;
-
-#if defined(BCMDBG)
- wlc_info_dbg = wlc;
-#endif
-
- wlc->band = wlc->bandstate[0];
- wlc->core = wlc->corestate;
- wlc->wl = wl;
- pub->unit = unit;
- pub->_piomode = piomode;
- wlc->bandinit_pending = false;
-
- /* populate struct brcms_c_info with default values */
- brcms_c_info_init(wlc, unit);
-
- /* update sta/ap related parameters */
- brcms_c_ap_upd(wlc);
-
- /* 11n_disable nvram */
- n_disabled = getintvar(pub->vars, "11n_disable");
-
- /*
- * low level attach steps(all hw accesses go
- * inside, no more in rest of the attach)
- */
- err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
- bustype, btparam);
- if (err)
- goto fail;
-
- /* for some states, due to different info pointer(e,g, wlc, wlc_hw) or master/slave split,
- * HIGH driver(both monolithic and HIGH_ONLY) needs to sync states FROM BMAC portion driver
- */
- if (!brcms_c_state_bmac_sync(wlc)) {
- err = 20;
- goto fail;
- }
-
- pub->phy_11ncapable = BRCMS_PHY_11N_CAP(wlc->band);
-
- /* propagate *vars* from BMAC driver to high driver */
- brcms_b_copyfrom_vars(wlc->hw, &pub->vars, &wlc->vars_size);
-
-
- /* set maximum allowed duty cycle */
- wlc->tx_duty_cycle_ofdm =
- (u16) getintvar(pub->vars, "tx_duty_cycle_ofdm");
- wlc->tx_duty_cycle_cck =
- (u16) getintvar(pub->vars, "tx_duty_cycle_cck");
-
- brcms_c_stf_phy_chain_calc(wlc);
-
- /* txchain 1: txant 0, txchain 2: txant 1 */
- if (BRCMS_ISNPHY(wlc->band) && (wlc->stf->txstreams == 1))
- wlc->stf->txant = wlc->stf->hw_txchain - 1;
-
- /* push to BMAC driver */
- wlc_phy_stf_chain_init(wlc->band->pi, wlc->stf->hw_txchain,
- wlc->stf->hw_rxchain);
-
- /* pull up some info resulting from the low attach */
- {
- int i;
- for (i = 0; i < NFIFO; i++)
- wlc->core->txavail[i] = wlc->hw->txavail[i];
- }
-
- brcms_b_hw_etheraddr(wlc->hw, wlc->perm_etheraddr);
-
- memcpy(&pub->cur_etheraddr, &wlc->perm_etheraddr, ETH_ALEN);
-
- for (j = 0; j < NBANDS(wlc); j++) {
- /* Use band 1 for single band 11a */
- if (IS_SINGLEBAND_5G(wlc->deviceid))
- j = BAND_5G_INDEX;
-
- wlc->band = wlc->bandstate[j];
-
- if (!brcms_c_attach_stf_ant_init(wlc)) {
- err = 24;
- goto fail;
- }
-
- /* default contention windows size limits */
- wlc->band->CWmin = APHY_CWMIN;
- wlc->band->CWmax = PHY_CWMAX;
-
- /* init gmode value */
- if (BAND_2G(wlc->band->bandtype)) {
- wlc->band->gmode = GMODE_AUTO;
- brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER,
- wlc->band->gmode);
- }
-
- /* init _n_enab supported mode */
- if (BRCMS_PHY_11N_CAP(wlc->band) && CHIP_SUPPORTS_11N(wlc)) {
- if (n_disabled & WLFEATURE_DISABLE_11N) {
- pub->_n_enab = OFF;
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER,
- OFF);
- } else {
- pub->_n_enab = SUPPORT_11N;
- brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER,
- ((pub->_n_enab ==
- SUPPORT_11N) ? WL_11N_2x2 :
- WL_11N_3x3));
- }
- }
-
- /* init per-band default rateset, depend on band->gmode */
- brcms_default_rateset(wlc, &wlc->band->defrateset);
-
- /* fill in hw_rateset (used early by BRCM_SET_RATESET) */
- brcms_c_rateset_filter(&wlc->band->defrateset,
- &wlc->band->hw_rateset, false,
- BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
- (bool) N_ENAB(wlc->pub));
- }
-
- /* update antenna config due to wlc->stf->txant/txchain/ant_rx_ovr change */
- brcms_c_stf_phy_txant_upd(wlc);
-
- /* attach each modules */
- err = brcms_c_attach_module(wlc);
- if (err != 0)
- goto fail;
-
- if (!brcms_c_timers_init(wlc, unit)) {
- wiphy_err(wl->wiphy, "wl%d: %s: init_timer failed\n", unit,
- __func__);
- err = 32;
- goto fail;
- }
-
- /* depend on rateset, gmode */
- wlc->cmi = brcms_c_channel_mgr_attach(wlc);
- if (!wlc->cmi) {
- wiphy_err(wl->wiphy, "wl%d: %s: channel_mgr_attach failed"
- "\n", unit, __func__);
- err = 33;
- goto fail;
- }
-
- /* init default when all parameters are ready, i.e. ->rateset */
- brcms_c_bss_default_init(wlc);
-
- /*
- * Complete the wlc default state initializations..
- */
-
- /* allocate our initial queue */
- wlc->pkt_queue = brcms_c_txq_alloc(wlc);
- if (wlc->pkt_queue == NULL) {
- wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n",
- unit, __func__);
- err = 100;
- goto fail;
- }
-
- wlc->bsscfg[0] = wlc->cfg;
- wlc->cfg->_idx = 0;
- wlc->cfg->wlc = wlc;
- pub->txmaxpkts = MAXTXPKTS;
-
- brcms_c_wme_initparams_sta(wlc, &wlc->wme_param_ie);
-
- wlc->mimoft = FT_HT;
- wlc->ht_cap.cap_info = HT_CAP;
- if (HT_ENAB(wlc->pub))
- wlc->stf->ldpc = AUTO;
-
- wlc->mimo_40txbw = AUTO;
- wlc->ofdm_40txbw = AUTO;
- wlc->cck_40txbw = AUTO;
- brcms_c_update_mimo_band_bwcap(wlc, BRCMS_N_BW_20IN2G_40IN5G);
-
- /* Set default values of SGI */
- if (BRCMS_SGI_CAP_PHY(wlc)) {
- brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
- BRCMS_N_SGI_40));
- wlc->sgi_tx = AUTO;
- } else if (BRCMS_ISSSLPNPHY(wlc->band)) {
- brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
- BRCMS_N_SGI_40));
- wlc->sgi_tx = AUTO;
- } else {
- brcms_c_ht_update_sgi_rx(wlc, 0);
- wlc->sgi_tx = OFF;
- }
-
- /* *******nvram 11n config overrides Start ********* */
-
- /* apply the sgi override from nvram conf */
- if (n_disabled & WLFEATURE_DISABLE_11N_SGI_TX)
- wlc->sgi_tx = OFF;
-
- if (n_disabled & WLFEATURE_DISABLE_11N_SGI_RX)
- brcms_c_ht_update_sgi_rx(wlc, 0);
-
- /* apply the stbc override from nvram conf */
- if (n_disabled & WLFEATURE_DISABLE_11N_STBC_TX) {
- wlc->bandstate[BAND_2G_INDEX]->band_stf_stbc_tx = OFF;
- wlc->bandstate[BAND_5G_INDEX]->band_stf_stbc_tx = OFF;
- wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_TX_STBC;
- }
- if (n_disabled & WLFEATURE_DISABLE_11N_STBC_RX)
- brcms_c_stf_stbc_rx_set(wlc, HT_CAP_RX_STBC_NO);
-
- /* apply the GF override from nvram conf */
- if (n_disabled & WLFEATURE_DISABLE_11N_GF)
- wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_GRN_FLD;
-
- /* initialize radio_mpc_disable according to wlc->mpc */
- brcms_c_radio_mpc_upd(wlc);
- brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
-
- if (perr)
- *perr = 0;
-
- return (void *)wlc;
-
- fail:
- wiphy_err(wl->wiphy, "wl%d: %s: failed with err %d\n",
- unit, __func__, err);
- if (wlc)
- brcms_c_detach(wlc);
-
- if (perr)
- *perr = err;
- return NULL;
-}
-
-static void brcms_c_attach_antgain_init(struct brcms_c_info *wlc)
-{
- uint unit;
- unit = wlc->pub->unit;
-
- if ((wlc->band->antgain == -1) && (wlc->pub->sromrev == 1)) {
- /* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */
- wlc->band->antgain = 8;
- } else if (wlc->band->antgain == -1) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
- " srom, using 2dB\n", unit, __func__);
- wlc->band->antgain = 8;
- } else {
- s8 gain, fract;
- /* Older sroms specified gain in whole dbm only. In order
- * be able to specify qdbm granularity and remain backward compatible
- * the whole dbms are now encoded in only low 6 bits and remaining qdbms
- * are encoded in the hi 2 bits. 6 bit signed number ranges from
- * -32 - 31. Examples: 0x1 = 1 db,
- * 0xc1 = 1.75 db (1 + 3 quarters),
- * 0x3f = -1 (-1 + 0 quarters),
- * 0x7f = -.75 (-1 in low 6 bits + 1 quarters in hi 2 bits) = -3 qdbm.
- * 0xbf = -.50 (-1 in low 6 bits + 2 quarters in hi 2 bits) = -2 qdbm.
- */
- gain = wlc->band->antgain & 0x3f;
- gain <<= 2; /* Sign extend */
- gain >>= 2;
- fract = (wlc->band->antgain & 0xc0) >> 6;
- wlc->band->antgain = 4 * gain + fract;
- }
-}
-
-static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
-{
- int aa;
- uint unit;
- char *vars;
- int bandtype;
-
- unit = wlc->pub->unit;
- vars = wlc->pub->vars;
- bandtype = wlc->band->bandtype;
-
- /* get antennas available */
- aa = (s8) getintvar(vars, (BAND_5G(bandtype) ? "aa5g" : "aa2g"));
- if (aa == 0)
- aa = (s8) getintvar(vars,
- (BAND_5G(bandtype) ? "aa1" : "aa0"));
- if ((aa < 1) || (aa > 15)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
- " srom (0x%x), using 3\n", unit, __func__, aa);
- aa = 3;
- }
-
- /* reset the defaults if we have a single antenna */
- if (aa == 1) {
- wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_0;
- wlc->stf->txant = ANT_TX_FORCE_0;
- } else if (aa == 2) {
- wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_1;
- wlc->stf->txant = ANT_TX_FORCE_1;
- } else {
- }
-
- /* Compute Antenna Gain */
- wlc->band->antgain =
- (s8) getintvar(vars, (BAND_5G(bandtype) ? "ag1" : "ag0"));
- brcms_c_attach_antgain_init(wlc);
-
- return true;
-}
-
-
-static void brcms_c_timers_deinit(struct brcms_c_info *wlc)
-{
- /* free timer state */
- if (wlc->wdtimer) {
- brcms_free_timer(wlc->wl, wlc->wdtimer);
- wlc->wdtimer = NULL;
- }
- if (wlc->radio_timer) {
- brcms_free_timer(wlc->wl, wlc->radio_timer);
- wlc->radio_timer = NULL;
- }
-}
-
-static void brcms_c_detach_module(struct brcms_c_info *wlc)
-{
- if (wlc->asi) {
- brcms_c_antsel_detach(wlc->asi);
- wlc->asi = NULL;
- }
-
- if (wlc->ampdu) {
- brcms_c_ampdu_detach(wlc->ampdu);
- wlc->ampdu = NULL;
- }
-
- brcms_c_stf_detach(wlc);
-}
-
-/*
- * Return a count of the number of driver callbacks still pending.
- *
- * General policy is that brcms_c_detach can only dealloc/free software states.
- * It can NOT touch hardware registers since the d11core may be in reset and
- * clock may not be available.
- * One exception is sb register access, which is possible if crystal is turned
- * on after "down" state, driver should avoid software timer with the exception
- * of radio_monitor.
- */
-uint brcms_c_detach(struct brcms_c_info *wlc)
-{
- uint callbacks = 0;
-
- if (wlc == NULL)
- return 0;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- callbacks += brcms_b_detach(wlc);
-
- /* delete software timers */
- if (!brcms_c_radio_monitor_stop(wlc))
- callbacks++;
-
- brcms_c_channel_mgr_detach(wlc->cmi);
-
- brcms_c_timers_deinit(wlc);
-
- brcms_c_detach_module(wlc);
-
-
- while (wlc->tx_queues != NULL)
- brcms_c_txq_free(wlc, wlc->tx_queues);
-
- brcms_c_detach_mfree(wlc);
- return callbacks;
-}
-
-/* update state that depends on the current value of "ap" */
-void brcms_c_ap_upd(struct brcms_c_info *wlc)
-{
- if (AP_ENAB(wlc->pub))
- /* AP: short not allowed, but not enforced */
- wlc->PLCPHdr_override = BRCMS_PLCP_AUTO;
- else
- /* STA-BSS; short capable */
- wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
-
- /* fixup mpc */
- wlc->mpc = true;
-}
-
-/* read hwdisable state and propagate to wlc flag */
-static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
-{
- if (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO || wlc->pub->hw_off)
- return;
-
- if (brcms_b_radio_read_hwdisabled(wlc->hw)) {
- mboolset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
- } else {
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
- }
-}
-
-/* return true if Minimum Power Consumption should be entered, false otherwise */
-bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
-{
- return false;
-}
-
-bool brcms_c_ismpc(struct brcms_c_info *wlc)
-{
- return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
-}
-
-void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
-{
- bool mpc_radio, radio_state;
-
- /*
- * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
- * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
- * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
- * the radio is going down.
- */
- if (!wlc->mpc) {
- if (!wlc->pub->radio_disabled)
- return;
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (!wlc->pub->radio_disabled)
- brcms_c_radio_monitor_stop(wlc);
- return;
- }
-
- /*
- * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in wlc->pub->radio_disabled
- * to go ON, always call radio_upd synchronously
- * to go OFF, postpone radio_upd to later when context is safe(e.g. watchdog)
- */
- radio_state =
- (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
- ON);
- mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
-
- if (radio_state == ON && mpc_radio == OFF)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
- else if (radio_state == OFF && mpc_radio == ON) {
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
- wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
- else
- wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
- wlc->mpc_dur += OSL_SYSUPTIME() - wlc->mpc_laston_ts;
- }
- /* Below logic is meant to capture the transition from mpc off to mpc on for reasons
- * other than wlc->mpc_delay_off keeping the mpc off. In that case reset
- * wlc->mpc_delay_off to wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
- */
- if ((wlc->prev_non_delay_mpc == false) &&
- (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off) {
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
- }
- wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
-}
-
-/*
- * centralized radio disable/enable function,
- * invoke radio enable/disable after updating hwradio status
- */
-static void brcms_c_radio_upd(struct brcms_c_info *wlc)
-{
- if (wlc->pub->radio_disabled) {
- brcms_c_radio_disable(wlc);
- } else {
- brcms_c_radio_enable(wlc);
- }
-}
-
-/* maintain LED behavior in down state */
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
-{
- /* maintain LEDs while in down state, turn on sbclk if not available yet */
- /* turn on sbclk if necessary */
- if (!AP_ENAB(wlc->pub)) {
- brcms_c_pllreq(wlc, true, BRCMS_PLLREQ_FLIP);
-
- brcms_c_pllreq(wlc, false, BRCMS_PLLREQ_FLIP);
- }
-}
-
-/* update hwradio status and return it */
-bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
-{
- brcms_c_radio_hwdisable_upd(wlc);
-
- return mboolisset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE) ? true : false;
-}
-
-void brcms_c_radio_disable(struct brcms_c_info *wlc)
-{
- if (!wlc->pub->up) {
- brcms_c_down_led_upd(wlc);
- return;
- }
-
- brcms_c_radio_monitor_start(wlc);
- brcms_down(wlc->wl);
-}
-
-static void brcms_c_radio_enable(struct brcms_c_info *wlc)
-{
- if (wlc->pub->up)
- return;
-
- if (DEVICEREMOVED(wlc))
- return;
-
- brcms_up(wlc->wl);
-}
-
-/* periodical query hw radio button while driver is "down" */
-static void brcms_c_radio_timer(void *arg)
-{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
-
- if (DEVICEREMOVED(wlc)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
- __func__);
- brcms_down(wlc->wl);
- return;
- }
-
- /* cap mpc off count */
- if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
- wlc->mpc_offcnt++;
-
- brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
-}
-
-static bool brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
-{
- /* Don't start the timer if HWRADIO feature is disabled */
- if (wlc->radio_monitor || (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO))
- return true;
-
- wlc->radio_monitor = true;
- brcms_c_pllreq(wlc, true, BRCMS_PLLREQ_RADIO_MON);
- brcms_add_timer(wlc->wl, wlc->radio_timer, TIMER_INTERVAL_RADIOCHK,
- true);
- return true;
-}
-
-bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
-{
- if (!wlc->radio_monitor)
- return true;
-
- wlc->radio_monitor = false;
- brcms_c_pllreq(wlc, false, BRCMS_PLLREQ_RADIO_MON);
- return brcms_del_timer(wlc->wl, wlc->radio_timer);
-}
-
-static void brcms_c_watchdog_by_timer(void *arg)
-{
- brcms_c_watchdog(arg);
-}
-
-/* common watchdog code */
-static void brcms_c_watchdog(void *arg)
-{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
- int i;
- struct brcms_bss_cfg *cfg;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- if (!wlc->pub->up)
- return;
-
- if (DEVICEREMOVED(wlc)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
- __func__);
- brcms_down(wlc->wl);
- return;
- }
-
- /* increment second count */
- wlc->pub->now++;
-
- /* delay radio disable */
- if (wlc->mpc_delay_off) {
- if (--wlc->mpc_delay_off == 0) {
- mboolset(wlc->pub->radio_disabled,
- WL_RADIO_MPC_DISABLE);
- if (wlc->mpc && brcms_c_ismpc(wlc))
- wlc->mpc_offcnt = 0;
- wlc->mpc_laston_ts = OSL_SYSUPTIME();
- }
- }
-
- /* mpc sync */
- brcms_c_radio_mpc_upd(wlc);
- /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
- brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
- /* if radio is disable, driver may be down, quit here */
- if (wlc->pub->radio_disabled)
- return;
-
- brcms_b_watchdog(wlc);
-
- /* occasionally sample mac stat counters to detect 16-bit counter wrap */
- if ((wlc->pub->now % SW_TIMER_MAC_STAT_UPD) == 0)
- brcms_c_statsupd(wlc);
-
- /* Manage TKIP countermeasures timers */
- FOREACH_BSS(wlc, i, cfg) {
- if (cfg->tk_cm_dt) {
- cfg->tk_cm_dt--;
- }
- if (cfg->tk_cm_bt) {
- cfg->tk_cm_bt--;
- }
- }
-
- /* Call any registered watchdog handlers */
- for (i = 0; i < BRCMS_MAXMODULES; i++) {
- if (wlc->modulecb[i].watchdog_fn)
- wlc->modulecb[i].watchdog_fn(wlc->modulecb[i].hdl);
- }
-
- if (BRCMS_ISNPHY(wlc->band) && !wlc->pub->tempsense_disable &&
- ((wlc->pub->now - wlc->tempsense_lasttime) >=
- BRCMS_TEMPSENSE_PERIOD)) {
- wlc->tempsense_lasttime = wlc->pub->now;
- brcms_c_tempsense_upd(wlc);
- }
-}
-
-/* make interface operational */
-int brcms_c_up(struct brcms_c_info *wlc)
-{
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- /* HW is turned off so don't try to access it */
- if (wlc->pub->hw_off || DEVICEREMOVED(wlc))
- return -ENOMEDIUM;
-
- if (!wlc->pub->hw_up) {
- brcms_b_hw_up(wlc->hw);
- wlc->pub->hw_up = true;
- }
-
- if ((wlc->pub->boardflags & BFL_FEM)
- && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
- if (wlc->pub->boardrev >= 0x1250
- && (wlc->pub->boardflags & BFL_FEM_BT)) {
- brcms_c_mhf(wlc, MHF5, MHF5_4313_GPIOCTRL,
- MHF5_4313_GPIOCTRL, BRCM_BAND_ALL);
- } else {
- brcms_c_mhf(wlc, MHF4, MHF4_EXTPA_ENABLE,
- MHF4_EXTPA_ENABLE, BRCM_BAND_ALL);
- }
- }
-
- /*
- * Need to read the hwradio status here to cover the case where the system
- * is loaded with the hw radio disabled. We do not want to bring the driver up in this case.
- * if radio is disabled, abort up, lower power, start radio timer and return 0(for NDIS)
- * don't call radio_update to avoid looping brcms_c_up.
- *
- * brcms_b_up_prep() returns either 0 or -BCME_RADIOOFF only
- */
- if (!wlc->pub->radio_disabled) {
- int status = brcms_b_up_prep(wlc->hw);
- if (status == -ENOMEDIUM) {
- if (!mboolisset
- (wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE)) {
- int idx;
- struct brcms_bss_cfg *bsscfg;
- mboolset(wlc->pub->radio_disabled,
- WL_RADIO_HW_DISABLE);
-
- FOREACH_BSS(wlc, idx, bsscfg) {
- if (!BSSCFG_STA(bsscfg)
- || !bsscfg->enable || !bsscfg->BSS)
- continue;
- wiphy_err(wlc->wiphy, "wl%d.%d: up"
- ": rfdisable -> "
- "bsscfg_disable()\n",
- wlc->pub->unit, idx);
- }
- }
- }
- }
-
- if (wlc->pub->radio_disabled) {
- brcms_c_radio_monitor_start(wlc);
- return 0;
- }
-
- /* brcms_b_up_prep has done brcms_c_corereset(). so clk is on, set it */
- wlc->clk = true;
-
- brcms_c_radio_monitor_stop(wlc);
-
- /* Set EDCF hostflags */
- if (EDCF_ENAB(wlc->pub)) {
- brcms_c_mhf(wlc, MHF1, MHF1_EDCF, MHF1_EDCF, BRCM_BAND_ALL);
- } else {
- brcms_c_mhf(wlc, MHF1, MHF1_EDCF, 0, BRCM_BAND_ALL);
- }
-
- if (BRCMS_WAR16165(wlc))
- brcms_c_mhf(wlc, MHF2, MHF2_PCISLOWCLKWAR, MHF2_PCISLOWCLKWAR,
- BRCM_BAND_ALL);
-
- brcms_init(wlc->wl);
- wlc->pub->up = true;
-
- if (wlc->bandinit_pending) {
- brcms_c_suspend_mac_and_wait(wlc);
- brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec);
- wlc->bandinit_pending = false;
- brcms_c_enable_mac(wlc);
- }
-
- brcms_b_up_finish(wlc->hw);
-
- /* other software states up after ISR is running */
- /* start APs that were to be brought up but are not up yet */
- /* if (AP_ENAB(wlc->pub)) brcms_c_restart_ap(wlc->ap); */
-
- /* Program the TX wme params with the current settings */
- brcms_c_wme_retries_write(wlc);
-
- /* start one second watchdog timer */
- brcms_add_timer(wlc->wl, wlc->wdtimer, TIMER_INTERVAL_WATCHDOG, true);
- wlc->WDarmed = true;
-
- /* ensure antenna config is up to date */
- brcms_c_stf_phy_txant_upd(wlc);
- /* ensure LDPC config is in sync */
- brcms_c_ht_update_ldpc(wlc, wlc->stf->ldpc);
-
- return 0;
-}
-
-/* Initialize the base precedence map for dequeueing from txq based on WME settings */
-static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc)
-{
- wlc->tx_prec_map = BRCMS_PREC_BMP_ALL;
- memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
-
- /* For non-WME, both fifos have overlapping MAXPRIO. So just disable all precedences
- * if either is full.
- */
- if (!EDCF_ENAB(wlc->pub)) {
- wlc->fifo2prec_map[TX_DATA_FIFO] = BRCMS_PREC_BMP_ALL;
- wlc->fifo2prec_map[TX_CTL_FIFO] = BRCMS_PREC_BMP_ALL;
- } else {
- wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK;
- wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE;
- wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI;
- wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO;
- }
-}
-
-static uint brcms_c_down_del_timer(struct brcms_c_info *wlc)
-{
- uint callbacks = 0;
-
- return callbacks;
-}
-
-/*
- * Mark the interface nonoperational, stop the software mechanisms,
- * disable the hardware, free any transient buffer state.
- * Return a count of the number of driver callbacks still pending.
- */
-uint brcms_c_down(struct brcms_c_info *wlc)
-{
-
- uint callbacks = 0;
- int i;
- bool dev_gone = false;
- struct brcms_txq_info *qi;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- /* check if we are already in the going down path */
- if (wlc->going_down) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Driver going down so return"
- "\n", wlc->pub->unit, __func__);
- return 0;
- }
- if (!wlc->pub->up)
- return callbacks;
-
- /* in between, mpc could try to bring down again.. */
- wlc->going_down = true;
-
- callbacks += brcms_b_bmac_down_prep(wlc->hw);
-
- dev_gone = DEVICEREMOVED(wlc);
-
- /* Call any registered down handlers */
- for (i = 0; i < BRCMS_MAXMODULES; i++) {
- if (wlc->modulecb[i].down_fn)
- callbacks +=
- wlc->modulecb[i].down_fn(wlc->modulecb[i].hdl);
- }
-
- /* cancel the watchdog timer */
- if (wlc->WDarmed) {
- if (!brcms_del_timer(wlc->wl, wlc->wdtimer))
- callbacks++;
- wlc->WDarmed = false;
- }
- /* cancel all other timers */
- callbacks += brcms_c_down_del_timer(wlc);
-
- wlc->pub->up = false;
-
- wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
-
- /* clear txq flow control */
- brcms_c_txflowcontrol_reset(wlc);
-
- /* flush tx queues */
- for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
- brcmu_pktq_flush(&qi->q, true, NULL, NULL);
- }
-
- callbacks += brcms_b_down_finish(wlc->hw);
-
- /* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
- wlc->clk = false;
-
- wlc->going_down = false;
- return callbacks;
-}
-
-/* Set the current gmode configuration */
-int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
-{
- int ret = 0;
- uint i;
- wlc_rateset_t rs;
- /* Default to 54g Auto */
- /* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
- s8 shortslot = BRCMS_SHORTSLOT_AUTO;
- bool shortslot_restrict = false; /* Restrict association to stations that support shortslot
- */
- bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */
- /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
- int preamble = BRCMS_PLCP_LONG;
- bool preamble_restrict = false; /* Restrict association to stations that support short
- * preambles
- */
- struct brcms_band *band;
-
- /* if N-support is enabled, allow Gmode set as long as requested
- * Gmode is not GMODE_LEGACY_B
- */
- if (N_ENAB(wlc->pub) && gmode == GMODE_LEGACY_B)
- return -ENOTSUPP;
-
- /* verify that we are dealing with 2G band and grab the band pointer */
- if (wlc->band->bandtype == BRCM_BAND_2G)
- band = wlc->band;
- else if ((NBANDS(wlc) > 1) &&
- (wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype == BRCM_BAND_2G))
- band = wlc->bandstate[OTHERBANDUNIT(wlc)];
- else
- return -EINVAL;
-
- /* Legacy or bust when no OFDM is supported by regulatory */
- if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
- BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
- return -EINVAL;
-
- /* update configuration value */
- if (config == true)
- brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
-
- /* Clear supported rates filter */
- memset(&wlc->sup_rates_override, 0, sizeof(wlc_rateset_t));
-
- /* Clear rateset override */
- memset(&rs, 0, sizeof(wlc_rateset_t));
-
- switch (gmode) {
- case GMODE_LEGACY_B:
- shortslot = BRCMS_SHORTSLOT_OFF;
- brcms_c_rateset_copy(&gphy_legacy_rates, &rs);
-
- break;
-
- case GMODE_LRS:
- if (AP_ENAB(wlc->pub))
- brcms_c_rateset_copy(&cck_rates,
- &wlc->sup_rates_override);
- break;
-
- case GMODE_AUTO:
- /* Accept defaults */
- break;
-
- case GMODE_ONLY:
- ofdm_basic = true;
- preamble = BRCMS_PLCP_SHORT;
- preamble_restrict = true;
- break;
-
- case GMODE_PERFORMANCE:
- if (AP_ENAB(wlc->pub)) /* Put all rates into the Supported Rates element */
- brcms_c_rateset_copy(&cck_ofdm_rates,
- &wlc->sup_rates_override);
-
- shortslot = BRCMS_SHORTSLOT_ON;
- shortslot_restrict = true;
- ofdm_basic = true;
- preamble = BRCMS_PLCP_SHORT;
- preamble_restrict = true;
- break;
-
- default:
- /* Error */
- wiphy_err(wlc->wiphy, "wl%d: %s: invalid gmode %d\n",
- wlc->pub->unit, __func__, gmode);
- return -ENOTSUPP;
- }
-
- /*
- * If we are switching to gmode == GMODE_LEGACY_B,
- * clean up rate info that may refer to OFDM rates.
- */
- if ((gmode == GMODE_LEGACY_B) && (band->gmode != GMODE_LEGACY_B)) {
- band->gmode = gmode;
- if (band->rspec_override && !IS_CCK(band->rspec_override)) {
- band->rspec_override = 0;
- brcms_c_reprate_init(wlc);
- }
- if (band->mrspec_override && !IS_CCK(band->mrspec_override)) {
- band->mrspec_override = 0;
- }
- }
-
- band->gmode = gmode;
-
- wlc->shortslot_override = shortslot;
-
- if (AP_ENAB(wlc->pub)) {
- /* wlc->ap->shortslot_restrict = shortslot_restrict; */
- wlc->PLCPHdr_override =
- (preamble !=
- BRCMS_PLCP_LONG) ? BRCMS_PLCP_SHORT : BRCMS_PLCP_AUTO;
- }
-
- if ((AP_ENAB(wlc->pub) && preamble != BRCMS_PLCP_LONG)
- || preamble == BRCMS_PLCP_SHORT)
- wlc->default_bss->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
- else
- wlc->default_bss->capability &= ~WLAN_CAPABILITY_SHORT_PREAMBLE;
-
- /* Update shortslot capability bit for AP and IBSS */
- if ((AP_ENAB(wlc->pub) && shortslot == BRCMS_SHORTSLOT_AUTO) ||
- shortslot == BRCMS_SHORTSLOT_ON)
- wlc->default_bss->capability |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- else
- wlc->default_bss->capability &=
- ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
-
- /* Use the default 11g rateset */
- if (!rs.count)
- brcms_c_rateset_copy(&cck_ofdm_rates, &rs);
-
- if (ofdm_basic) {
- for (i = 0; i < rs.count; i++) {
- if (rs.rates[i] == BRCM_RATE_6M
- || rs.rates[i] == BRCM_RATE_12M
- || rs.rates[i] == BRCM_RATE_24M)
- rs.rates[i] |= BRCMS_RATE_FLAG;
- }
- }
-
- /* Set default bss rateset */
- wlc->default_bss->rateset.count = rs.count;
- memcpy(wlc->default_bss->rateset.rates, rs.rates,
- sizeof(wlc->default_bss->rateset.rates));
-
- return ret;
-}
-
-static int brcms_c_nmode_validate(struct brcms_c_info *wlc, s32 nmode)
-{
- int err = 0;
-
- switch (nmode) {
-
- case OFF:
- break;
-
- case AUTO:
- case WL_11N_2x2:
- case WL_11N_3x3:
- if (!(BRCMS_PHY_11N_CAP(wlc->band)))
- err = -EINVAL;
- break;
-
- default:
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-int brcms_c_set_nmode(struct brcms_c_info *wlc, s32 nmode)
-{
- uint i;
- int err;
-
- err = brcms_c_nmode_validate(wlc, nmode);
- if (err)
- return err;
-
- switch (nmode) {
- case OFF:
- wlc->pub->_n_enab = OFF;
- wlc->default_bss->flags &= ~BRCMS_BSS_HT;
- /* delete the mcs rates from the default and hw ratesets */
- brcms_c_rateset_mcs_clear(&wlc->default_bss->rateset);
- for (i = 0; i < NBANDS(wlc); i++) {
- memset(wlc->bandstate[i]->hw_rateset.mcs, 0,
- MCSSET_LEN);
- if (IS_MCS(wlc->band->rspec_override)) {
- wlc->bandstate[i]->rspec_override = 0;
- brcms_c_reprate_init(wlc);
- }
- if (IS_MCS(wlc->band->mrspec_override))
- wlc->bandstate[i]->mrspec_override = 0;
- }
- break;
-
- case AUTO:
- if (wlc->stf->txstreams == WL_11N_3x3)
- nmode = WL_11N_3x3;
- else
- nmode = WL_11N_2x2;
- case WL_11N_2x2:
- case WL_11N_3x3:
- /* force GMODE_AUTO if NMODE is ON */
- brcms_c_set_gmode(wlc, GMODE_AUTO, true);
- if (nmode == WL_11N_3x3)
- wlc->pub->_n_enab = SUPPORT_HT;
- else
- wlc->pub->_n_enab = SUPPORT_11N;
- wlc->default_bss->flags |= BRCMS_BSS_HT;
- /* add the mcs rates to the default and hw ratesets */
- brcms_c_rateset_mcs_build(&wlc->default_bss->rateset,
- wlc->stf->txstreams);
- for (i = 0; i < NBANDS(wlc); i++)
- memcpy(wlc->bandstate[i]->hw_rateset.mcs,
- wlc->default_bss->rateset.mcs, MCSSET_LEN);
- break;
-
- default:
- break;
- }
-
- return err;
-}
-
-static int brcms_c_set_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs_arg)
-{
- wlc_rateset_t rs, new;
- uint bandunit;
-
- memcpy(&rs, rs_arg, sizeof(wlc_rateset_t));
-
- /* check for bad count value */
- if ((rs.count == 0) || (rs.count > BRCMS_NUMRATES))
- return -EINVAL;
-
- /* try the current band */
- bandunit = wlc->band->bandunit;
- memcpy(&new, &rs, sizeof(wlc_rateset_t));
- if (brcms_c_rate_hwrs_filter_sort_validate
- (&new, &wlc->bandstate[bandunit]->hw_rateset, true,
- wlc->stf->txstreams))
- goto good;
-
- /* try the other band */
- if (IS_MBAND_UNLOCKED(wlc)) {
- bandunit = OTHERBANDUNIT(wlc);
- memcpy(&new, &rs, sizeof(wlc_rateset_t));
- if (brcms_c_rate_hwrs_filter_sort_validate(&new,
- &wlc->
- bandstate[bandunit]->
- hw_rateset, true,
- wlc->stf->txstreams))
- goto good;
- }
-
- return -EBADE;
-
- good:
- /* apply new rateset */
- memcpy(&wlc->default_bss->rateset, &new, sizeof(wlc_rateset_t));
- memcpy(&wlc->bandstate[bandunit]->defrateset, &new,
- sizeof(wlc_rateset_t));
- return 0;
-}
-
-/* simplified integer set interface for common ioctl handler */
-int brcms_c_set(struct brcms_c_info *wlc, int cmd, int arg)
-{
- return brcms_c_ioctl(wlc, cmd, (void *)&arg, sizeof(arg), NULL);
-}
-
-/* simplified integer get interface for common ioctl handler */
-int brcms_c_get(struct brcms_c_info *wlc, int cmd, int *arg)
-{
- return brcms_c_ioctl(wlc, cmd, arg, sizeof(int), NULL);
-}
-
-static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
-{
- u8 r;
- bool war = false;
-
- if (wlc->cfg->associated)
- r = wlc->cfg->current_bss->rateset.rates[0];
- else
- r = wlc->default_bss->rateset.rates[0];
-
- wlc_phy_ofdm_rateset_war(wlc->band->pi, war);
-
- return;
-}
-
-int
-brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
- struct brcms_c_if *wlcif)
-{
- return _brcms_c_ioctl(wlc, cmd, arg, len, wlcif);
-}
-
-/* common ioctl handler. return: 0=ok, -1=error, positive=particular error */
-static int
-_brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
- struct brcms_c_if *wlcif)
-{
- int val, *pval;
- bool bool_val;
- int bcmerror;
- struct scb *nextscb;
- bool ta_ok;
- uint band;
- struct brcms_bss_cfg *bsscfg;
- struct brcms_bss_info *current_bss;
-
- /* update bsscfg pointer */
- bsscfg = wlc->cfg;
- current_bss = bsscfg->current_bss;
-
- /* initialize the following to get rid of compiler warning */
- nextscb = NULL;
- ta_ok = false;
- band = 0;
-
- /* If the device is turned off, then it's not "removed" */
- if (!wlc->pub->hw_off && DEVICEREMOVED(wlc)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
- __func__);
- brcms_down(wlc->wl);
- return -EBADE;
- }
-
- /* default argument is generic integer */
- pval = arg ? (int *)arg : NULL;
-
- /* This will prevent the misaligned access */
- if (pval && (u32) len >= sizeof(val))
- memcpy(&val, pval, sizeof(val));
- else
- val = 0;
-
- /* bool conversion to avoid duplication below */
- bool_val = val != 0;
- bcmerror = 0;
-
- if ((arg == NULL) || (len <= 0)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Command %d needs arguments\n",
- wlc->pub->unit, __func__, cmd);
- bcmerror = -EINVAL;
- goto done;
- }
-
- switch (cmd) {
-
- case BRCM_SET_CHANNEL:{
- chanspec_t chspec = CH20MHZ_CHSPEC(val);
-
- if (val < 0 || val > MAXCHANNEL) {
- bcmerror = -EINVAL;
- break;
- }
-
- if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec)) {
- bcmerror = -EINVAL;
- break;
- }
-
- if (!wlc->pub->up && IS_MBAND_UNLOCKED(wlc)) {
- if (wlc->band->bandunit !=
- CHSPEC_BANDUNIT(chspec))
- wlc->bandinit_pending = true;
- else
- wlc->bandinit_pending = false;
- }
-
- wlc->default_bss->chanspec = chspec;
- /* brcms_c_BSSinit() will sanitize the rateset before
- * using it.. */
- if (wlc->pub->up &&
- (BRCMS_BAND_PI_RADIO_CHANSPEC != chspec)) {
- brcms_c_set_home_chanspec(wlc, chspec);
- brcms_c_suspend_mac_and_wait(wlc);
- brcms_c_set_chanspec(wlc, chspec);
- brcms_c_enable_mac(wlc);
- }
- break;
- }
-
- case BRCM_SET_SRL:
- if (val >= 1 && val <= RETRY_SHORT_MAX) {
- int ac;
- wlc->SRL = (u16) val;
-
- brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
-
- for (ac = 0; ac < AC_COUNT; ac++) {
- BRCMS_WME_RETRY_SHORT_SET(wlc, ac, wlc->SRL);
- }
- brcms_c_wme_retries_write(wlc);
- } else
- bcmerror = -EINVAL;
- break;
-
- case BRCM_SET_LRL:
- if (val >= 1 && val <= 255) {
- int ac;
- wlc->LRL = (u16) val;
-
- brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
-
- for (ac = 0; ac < AC_COUNT; ac++) {
- BRCMS_WME_RETRY_LONG_SET(wlc, ac, wlc->LRL);
- }
- brcms_c_wme_retries_write(wlc);
- } else
- bcmerror = -EINVAL;
- break;
-
- case BRCM_GET_CURR_RATESET:{
- wl_rateset_t *ret_rs = (wl_rateset_t *) arg;
- wlc_rateset_t *rs;
-
- if (wlc->pub->associated)
- rs = &current_bss->rateset;
- else
- rs = &wlc->default_bss->rateset;
-
- if (len < (int)(rs->count + sizeof(rs->count))) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- /* Copy only legacy rateset section */
- ret_rs->count = rs->count;
- memcpy(&ret_rs->rates, &rs->rates, rs->count);
- break;
- }
-
- case BRCM_SET_RATESET:{
- wlc_rateset_t rs;
- wl_rateset_t *in_rs = (wl_rateset_t *) arg;
-
- if (len < (int)(in_rs->count + sizeof(in_rs->count))) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- if (in_rs->count > BRCMS_NUMRATES) {
- bcmerror = -ENOBUFS;
- break;
- }
-
- memset(&rs, 0, sizeof(wlc_rateset_t));
-
- /* Copy only legacy rateset section */
- rs.count = in_rs->count;
- memcpy(&rs.rates, &in_rs->rates, rs.count);
-
- /* merge rateset coming in with the current mcsset */
- if (N_ENAB(wlc->pub)) {
- if (bsscfg->associated)
- memcpy(rs.mcs,
- &current_bss->rateset.mcs[0],
- MCSSET_LEN);
- else
- memcpy(rs.mcs,
- &wlc->default_bss->rateset.mcs[0],
- MCSSET_LEN);
- }
-
- bcmerror = brcms_c_set_rateset(wlc, &rs);
-
- if (!bcmerror)
- brcms_c_ofdm_rateset_war(wlc);
-
- break;
- }
-
- case BRCM_SET_BCNPRD:
- /* range [1, 0xffff] */
- if (val >= DOT11_MIN_BEACON_PERIOD
- && val <= DOT11_MAX_BEACON_PERIOD)
- wlc->default_bss->beacon_period = (u16) val;
- else
- bcmerror = -EINVAL;
- break;
-
- case BRCM_GET_PHYLIST:
- {
- unsigned char *cp = arg;
- if (len < 3) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- if (BRCMS_ISNPHY(wlc->band))
- *cp++ = 'n';
- else if (BRCMS_ISLCNPHY(wlc->band))
- *cp++ = 'c';
- else if (BRCMS_ISSSLPNPHY(wlc->band))
- *cp++ = 's';
- *cp = '\0';
- break;
- }
-
- case BRCMS_SET_SHORTSLOT_OVERRIDE:
- if (val != BRCMS_SHORTSLOT_AUTO && val != BRCMS_SHORTSLOT_OFF &&
- val != BRCMS_SHORTSLOT_ON) {
- bcmerror = -EINVAL;
- break;
- }
-
- wlc->shortslot_override = (s8) val;
-
- /* shortslot is an 11g feature, so no more work if we are
- * currently on the 5G band
- */
- if (BAND_5G(wlc->band->bandtype))
- break;
-
- if (wlc->pub->up && wlc->pub->associated) {
- /* let watchdog or beacon processing update shortslot */
- } else if (wlc->pub->up) {
- /* unassociated shortslot is off */
- brcms_c_switch_shortslot(wlc, false);
- } else {
- /* driver is down, so just update the brcms_c_info
- * value */
- if (wlc->shortslot_override == BRCMS_SHORTSLOT_AUTO) {
- wlc->shortslot = false;
- } else {
- wlc->shortslot =
- (wlc->shortslot_override ==
- BRCMS_SHORTSLOT_ON);
- }
- }
-
- break;
-
- }
- done:
-
- if (bcmerror)
- wlc->pub->bcmerror = bcmerror;
-
- return bcmerror;
-}
-
-/*
- * register watchdog and down handlers.
- */
-int brcms_c_module_register(struct brcms_pub *pub,
- const char *name, void *hdl,
- watchdog_fn_t w_fn, down_fn_t d_fn)
-{
- struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
- int i;
-
- /* find an empty entry and just add, no duplication check! */
- for (i = 0; i < BRCMS_MAXMODULES; i++) {
- if (wlc->modulecb[i].name[0] == '\0') {
- strncpy(wlc->modulecb[i].name, name,
- sizeof(wlc->modulecb[i].name) - 1);
- wlc->modulecb[i].hdl = hdl;
- wlc->modulecb[i].watchdog_fn = w_fn;
- wlc->modulecb[i].down_fn = d_fn;
- return 0;
- }
- }
-
- return -ENOSR;
-}
-
-/* unregister module callbacks */
-int
-brcms_c_module_unregister(struct brcms_pub *pub, const char *name, void *hdl)
-{
- struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
- int i;
-
- if (wlc == NULL)
- return -ENODATA;
-
- for (i = 0; i < BRCMS_MAXMODULES; i++) {
- if (!strcmp(wlc->modulecb[i].name, name) &&
- (wlc->modulecb[i].hdl == hdl)) {
- memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
- return 0;
- }
- }
-
- /* table not found! */
- return -ENODATA;
-}
-
-/* Write WME tunable parameters for retransmit/max rate from wlc struct to ucode */
-static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
-{
- int ac;
-
- /* Need clock to do this */
- if (!wlc->clk)
- return;
-
- for (ac = 0; ac < AC_COUNT; ac++) {
- brcms_c_write_shm(wlc, M_AC_TXLMT_ADDR(ac),
- wlc->wme_retries[ac]);
- }
-}
-
-#ifdef BCMDBG
-static const char * const supr_reason[] = {
- "None", "PMQ Entry", "Flush request",
- "Previous frag failure", "Channel mismatch",
- "Lifetime Expiry", "Underflow"
-};
-
-static void brcms_c_print_txs_status(u16 s)
-{
- printk(KERN_DEBUG "[15:12] %d frame attempts\n",
- (s & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT);
- printk(KERN_DEBUG " [11:8] %d rts attempts\n",
- (s & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT);
- printk(KERN_DEBUG " [7] %d PM mode indicated\n",
- ((s & TX_STATUS_PMINDCTD) ? 1 : 0));
- printk(KERN_DEBUG " [6] %d intermediate status\n",
- ((s & TX_STATUS_INTERMEDIATE) ? 1 : 0));
- printk(KERN_DEBUG " [5] %d AMPDU\n",
- (s & TX_STATUS_AMPDU) ? 1 : 0);
- printk(KERN_DEBUG " [4:2] %d Frame Suppressed Reason (%s)\n",
- ((s & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT),
- supr_reason[(s & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT]);
- printk(KERN_DEBUG " [1] %d acked\n",
- ((s & TX_STATUS_ACK_RCV) ? 1 : 0));
-}
-#endif /* BCMDBG */
-
-void brcms_c_print_txstatus(struct tx_status *txs)
-{
-#if defined(BCMDBG)
- u16 s = txs->status;
- u16 ackphyrxsh = txs->ackphyrxsh;
-
- printk(KERN_DEBUG "\ntxpkt (MPDU) Complete\n");
-
- printk(KERN_DEBUG "FrameID: %04x ", txs->frameid);
- printk(KERN_DEBUG "TxStatus: %04x", s);
- printk(KERN_DEBUG "\n");
-
- brcms_c_print_txs_status(s);
-
- printk(KERN_DEBUG "LastTxTime: %04x ", txs->lasttxtime);
- printk(KERN_DEBUG "Seq: %04x ", txs->sequence);
- printk(KERN_DEBUG "PHYTxStatus: %04x ", txs->phyerr);
- printk(KERN_DEBUG "RxAckRSSI: %04x ",
- (ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT);
- printk(KERN_DEBUG "RxAckSQ: %04x",
- (ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
- printk(KERN_DEBUG "\n");
-#endif /* defined(BCMDBG) */
-}
-
-void brcms_c_statsupd(struct brcms_c_info *wlc)
-{
- int i;
- struct macstat macstats;
-#ifdef BCMDBG
- u16 delta;
- u16 rxf0ovfl;
- u16 txfunfl[NFIFO];
-#endif /* BCMDBG */
-
- /* if driver down, make no sense to update stats */
- if (!wlc->pub->up)
- return;
-
-#ifdef BCMDBG
- /* save last rx fifo 0 overflow count */
- rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl;
-
- /* save last tx fifo underflow count */
- for (i = 0; i < NFIFO; i++)
- txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i];
-#endif /* BCMDBG */
-
- /* Read mac stats from contiguous shared memory */
- brcms_b_copyfrom_shm(wlc->hw, M_UCODE_MACSTAT,
- &macstats, sizeof(struct macstat));
-
-#ifdef BCMDBG
- /* check for rx fifo 0 overflow */
- delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
- if (delta)
- wiphy_err(wlc->wiphy, "wl%d: %u rx fifo 0 overflows!\n",
- wlc->pub->unit, delta);
-
- /* check for tx fifo underflows */
- for (i = 0; i < NFIFO; i++) {
- delta =
- (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
- txfunfl[i]);
- if (delta)
- wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!"
- "\n", wlc->pub->unit, delta, i);
- }
-#endif /* BCMDBG */
-
- /* merge counters from dma module */
- for (i = 0; i < NFIFO; i++) {
- if (wlc->hw->di[i]) {
- dma_counterreset(wlc->hw->di[i]);
- }
- }
-}
-
-bool brcms_c_chipmatch(u16 vendor, u16 device)
-{
- if (vendor != PCI_VENDOR_ID_BROADCOM) {
- pr_err("chipmatch: unknown vendor id %04x\n", vendor);
- return false;
- }
-
- if (device == BCM43224_D11N_ID_VEN1)
- return true;
- if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
- return true;
- if (device == BCM4313_D11N2G_ID)
- return true;
- if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
- return true;
-
- pr_err("chipmatch: unknown device id %04x\n", device);
- return false;
-}
-
-#if defined(BCMDBG)
-void brcms_c_print_txdesc(struct d11txh *txh)
-{
- u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
- u16 mtch = le16_to_cpu(txh->MacTxControlHigh);
- u16 mfc = le16_to_cpu(txh->MacFrameControl);
- u16 tfest = le16_to_cpu(txh->TxFesTimeNormal);
- u16 ptcw = le16_to_cpu(txh->PhyTxControlWord);
- u16 ptcw_1 = le16_to_cpu(txh->PhyTxControlWord_1);
- u16 ptcw_1_Fbr = le16_to_cpu(txh->PhyTxControlWord_1_Fbr);
- u16 ptcw_1_Rts = le16_to_cpu(txh->PhyTxControlWord_1_Rts);
- u16 ptcw_1_FbrRts = le16_to_cpu(txh->PhyTxControlWord_1_FbrRts);
- u16 mainrates = le16_to_cpu(txh->MainRates);
- u16 xtraft = le16_to_cpu(txh->XtraFrameTypes);
- u8 *iv = txh->IV;
- u8 *ra = txh->TxFrameRA;
- u16 tfestfb = le16_to_cpu(txh->TxFesTimeFallback);
- u8 *rtspfb = txh->RTSPLCPFallback;
- u16 rtsdfb = le16_to_cpu(txh->RTSDurFallback);
- u8 *fragpfb = txh->FragPLCPFallback;
- u16 fragdfb = le16_to_cpu(txh->FragDurFallback);
- u16 mmodelen = le16_to_cpu(txh->MModeLen);
- u16 mmodefbrlen = le16_to_cpu(txh->MModeFbrLen);
- u16 tfid = le16_to_cpu(txh->TxFrameID);
- u16 txs = le16_to_cpu(txh->TxStatus);
- u16 mnmpdu = le16_to_cpu(txh->MaxNMpdus);
- u16 mabyte = le16_to_cpu(txh->MaxABytes_MRT);
- u16 mabyte_f = le16_to_cpu(txh->MaxABytes_FBR);
- u16 mmbyte = le16_to_cpu(txh->MinMBytes);
-
- u8 *rtsph = txh->RTSPhyHeader;
- struct ieee80211_rts rts = txh->rts_frame;
- char hexbuf[256];
-
- /* add plcp header along with txh descriptor */
- printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- txh, sizeof(struct d11txh) + 48);
-
- printk(KERN_DEBUG "TxCtlLow: %04x ", mtcl);
- printk(KERN_DEBUG "TxCtlHigh: %04x ", mtch);
- printk(KERN_DEBUG "FC: %04x ", mfc);
- printk(KERN_DEBUG "FES Time: %04x\n", tfest);
- printk(KERN_DEBUG "PhyCtl: %04x%s ", ptcw,
- (ptcw & PHY_TXC_SHORT_HDR) ? " short" : "");
- printk(KERN_DEBUG "PhyCtl_1: %04x ", ptcw_1);
- printk(KERN_DEBUG "PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
- printk(KERN_DEBUG "PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
- printk(KERN_DEBUG "PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
- printk(KERN_DEBUG "MainRates: %04x ", mainrates);
- printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
- printk(KERN_DEBUG "\n");
-
- brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
- printk(KERN_DEBUG "SecIV: %s\n", hexbuf);
- brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
- printk(KERN_DEBUG "RA: %s\n", hexbuf);
-
- printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
- brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
- printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
- brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
- printk(KERN_DEBUG "PLCP: %s ", hexbuf);
- printk(KERN_DEBUG "DUR: %04x", fragdfb);
- printk(KERN_DEBUG "\n");
-
- printk(KERN_DEBUG "MModeLen: %04x ", mmodelen);
- printk(KERN_DEBUG "MModeFbrLen: %04x\n", mmodefbrlen);
-
- printk(KERN_DEBUG "FrameID: %04x\n", tfid);
- printk(KERN_DEBUG "TxStatus: %04x\n", txs);
-
- printk(KERN_DEBUG "MaxNumMpdu: %04x\n", mnmpdu);
- printk(KERN_DEBUG "MaxAggbyte: %04x\n", mabyte);
- printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
- printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
-
- brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
- brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
- printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
- printk(KERN_DEBUG "\n");
-}
-#endif /* defined(BCMDBG) */
-
-#if defined(BCMDBG)
-void brcms_c_print_rxh(struct d11rxhdr *rxh)
-{
- u16 len = rxh->RxFrameSize;
- u16 phystatus_0 = rxh->PhyRxStatus_0;
- u16 phystatus_1 = rxh->PhyRxStatus_1;
- u16 phystatus_2 = rxh->PhyRxStatus_2;
- u16 phystatus_3 = rxh->PhyRxStatus_3;
- u16 macstatus1 = rxh->RxStatus1;
- u16 macstatus2 = rxh->RxStatus2;
- char flagstr[64];
- char lenbuf[20];
- static const struct brcmu_bit_desc macstat_flags[] = {
- {RXS_FCSERR, "FCSErr"},
- {RXS_RESPFRAMETX, "Reply"},
- {RXS_PBPRES, "PADDING"},
- {RXS_DECATMPT, "DeCr"},
- {RXS_DECERR, "DeCrErr"},
- {RXS_BCNSENT, "Bcn"},
- {0, NULL}
- };
-
- printk(KERN_DEBUG "Raw RxDesc:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, rxh,
- sizeof(struct d11rxhdr));
-
- brcmu_format_flags(macstat_flags, macstatus1, flagstr, 64);
-
- snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
-
- printk(KERN_DEBUG "RxFrameSize: %6s (%d)%s\n", lenbuf, len,
- (rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : "");
- printk(KERN_DEBUG "RxPHYStatus: %04x %04x %04x %04x\n",
- phystatus_0, phystatus_1, phystatus_2, phystatus_3);
- printk(KERN_DEBUG "RxMACStatus: %x %s\n", macstatus1, flagstr);
- printk(KERN_DEBUG "RXMACaggtype: %x\n",
- (macstatus2 & RXS_AGGTYPE_MASK));
- printk(KERN_DEBUG "RxTSFTime: %04x\n", rxh->RxTSFTime);
-}
-#endif /* defined(BCMDBG) */
-
-static u16 brcms_c_rate_shm_offset(struct brcms_c_info *wlc, u8 rate)
-{
- return brcms_b_rate_shm_offset(wlc->hw, rate);
-}
-
-/* Callback for device removed */
-
-/*
- * Attempts to queue a packet onto a multiple-precedence queue,
- * if necessary evicting a lower precedence packet from the queue.
- *
- * 'prec' is the precedence number that has already been mapped
- * from the packet priority.
- *
- * Returns true if packet consumed (queued), false if not.
- */
-bool
-brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q, void *pkt, int prec)
-{
- return brcms_c_prec_enq_head(wlc, q, pkt, prec, false);
-}
-
-bool
-brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
- struct sk_buff *pkt, int prec, bool head)
-{
- struct sk_buff *p;
- int eprec = -1; /* precedence to evict from */
-
- /* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(q, prec))
- eprec = prec;
- else if (pktq_full(q)) {
- p = brcmu_pktq_peek_tail(q, &eprec);
- if (eprec > prec) {
- wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d"
- "\n", __func__, eprec, prec);
- return false;
- }
- }
-
- /* Evict if needed */
- if (eprec >= 0) {
- bool discard_oldest;
-
- discard_oldest = AC_BITMAP_TST(wlc->wme_dp, eprec);
-
- /* Refuse newer packet unless configured to discard oldest */
- if (eprec == prec && !discard_oldest) {
- wiphy_err(wlc->wiphy, "%s: No where to go, prec == %d"
- "\n", __func__, prec);
- return false;
- }
-
- /* Evict packet according to discard policy */
- p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
- brcmu_pktq_pdeq_tail(q, eprec);
- brcmu_pkt_buf_free_skb(p);
- }
-
- /* Enqueue */
- if (head)
- p = brcmu_pktq_penq_head(q, prec, pkt);
- else
- p = brcmu_pktq_penq(q, prec, pkt);
-
- return true;
-}
-
-void brcms_c_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
- uint prec)
-{
- struct brcms_c_info *wlc = (struct brcms_c_info *) ctx;
- struct brcms_txq_info *qi = wlc->pkt_queue; /* Check me */
- struct pktq *q = &qi->q;
- int prio;
-
- prio = sdu->priority;
-
- if (!brcms_c_prec_enq(wlc, q, sdu, prec)) {
- if (!EDCF_ENAB(wlc->pub)
- || (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL))
- wiphy_err(wlc->wiphy, "wl%d: txq_enq: txq overflow"
- "\n", wlc->pub->unit);
-
- /*
- * we might hit this condtion in case
- * packet flooding from mac80211 stack
- */
- brcmu_pkt_buf_free_skb(sdu);
- }
-
- /* Check if flow control needs to be turned on after enqueuing the packet
- * Don't turn on flow control if EDCF is enabled. Driver would make the decision on what
- * to drop instead of relying on stack to make the right decision
- */
- if (!EDCF_ENAB(wlc->pub)
- || (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL)) {
- if (pktq_len(q) >= wlc->pub->tunables->datahiwat) {
- brcms_c_txflowcontrol(wlc, qi, ON, ALLPRIO);
- }
- } else if (wlc->pub->_priofc) {
- if (pktq_plen(q, wlc_prio2prec_map[prio]) >=
- wlc->pub->tunables->datahiwat) {
- brcms_c_txflowcontrol(wlc, qi, ON, prio);
- }
- }
-}
-
-bool
-brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
- struct ieee80211_hw *hw)
-{
- u8 prio;
- uint fifo;
- void *pkt;
- struct scb *scb = &global_scb;
- struct ieee80211_hdr *d11_header = (struct ieee80211_hdr *)(sdu->data);
-
- /* 802.11 standard requires management traffic to go at highest priority */
- prio = ieee80211_is_data(d11_header->frame_control) ? sdu->priority :
- MAXPRIO;
- fifo = prio2fifo[prio];
- pkt = sdu;
- if (unlikely
- (brcms_c_d11hdrs_mac80211(
- wlc, hw, pkt, scb, 0, 1, fifo, 0, NULL, 0)))
- return -EINVAL;
- brcms_c_txq_enq(wlc, scb, pkt, BRCMS_PRIO_TO_PREC(prio));
- brcms_c_send_q(wlc);
- return 0;
-}
-
-void brcms_c_send_q(struct brcms_c_info *wlc)
-{
- struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
- int prec;
- u16 prec_map;
- int err = 0, i, count;
- uint fifo;
- struct brcms_txq_info *qi = wlc->pkt_queue;
- struct pktq *q = &qi->q;
- struct ieee80211_tx_info *tx_info;
-
- if (in_send_q)
- return;
- else
- in_send_q = true;
-
- prec_map = wlc->tx_prec_map;
-
- /* Send all the enq'd pkts that we can.
- * Dequeue packets with precedence with empty HW fifo only
- */
- while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) {
- tx_info = IEEE80211_SKB_CB(pkt[0]);
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec);
- } else {
- count = 1;
- err = brcms_c_prep_pdu(wlc, pkt[0], &fifo);
- if (!err) {
- for (i = 0; i < count; i++) {
- brcms_c_txfifo(wlc, fifo, pkt[i], true,
- 1);
- }
- }
- }
-
- if (err == -EBUSY) {
- brcmu_pktq_penq_head(q, prec, pkt[0]);
- /* If send failed due to any other reason than a change in
- * HW FIFO condition, quit. Otherwise, read the new prec_map!
- */
- if (prec_map == wlc->tx_prec_map)
- break;
- prec_map = wlc->tx_prec_map;
- }
- }
-
- /* Check if flow control needs to be turned off after sending the packet */
- if (!EDCF_ENAB(wlc->pub)
- || (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL)) {
- if (brcms_c_txflowcontrol_prio_isset(wlc, qi, ALLPRIO)
- && (pktq_len(q) < wlc->pub->tunables->datahiwat / 2)) {
- brcms_c_txflowcontrol(wlc, qi, OFF, ALLPRIO);
- }
- } else if (wlc->pub->_priofc) {
- int prio;
- for (prio = MAXPRIO; prio >= 0; prio--) {
- if (brcms_c_txflowcontrol_prio_isset(wlc, qi, prio) &&
- (pktq_plen(q, wlc_prio2prec_map[prio]) <
- wlc->pub->tunables->datahiwat / 2)) {
- brcms_c_txflowcontrol(wlc, qi, OFF, prio);
- }
- }
- }
- in_send_q = false;
-}
-
-/*
- * bcmc_fid_generate:
- * Generate frame ID for a BCMC packet. The frag field is not used
- * for MC frames so is used as part of the sequence number.
- */
-static inline u16
-bcmc_fid_generate(struct brcms_c_info *wlc, struct brcms_bss_cfg *bsscfg,
- struct d11txh *txh)
-{
- u16 frameid;
-
- frameid = le16_to_cpu(txh->TxFrameID) & ~(TXFID_SEQ_MASK |
- TXFID_QUEUE_MASK);
- frameid |=
- (((wlc->
- mc_fid_counter++) << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
- TX_BCMC_FIFO;
-
- return frameid;
-}
-
-void
-brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
- bool commit, s8 txpktpend)
-{
- u16 frameid = INVALIDFID;
- struct d11txh *txh;
-
- txh = (struct d11txh *) (p->data);
-
- /* When a BC/MC frame is being committed to the BCMC fifo via DMA (NOT PIO), update
- * ucode or BSS info as appropriate.
- */
- if (fifo == TX_BCMC_FIFO) {
- frameid = le16_to_cpu(txh->TxFrameID);
-
- }
-
- if (BRCMS_WAR16165(wlc))
- brcms_c_war16165(wlc, true);
-
-
- /* Bump up pending count for if not using rpc. If rpc is used, this will be handled
- * in brcms_b_txfifo()
- */
- if (commit) {
- TXPKTPENDINC(wlc, fifo, txpktpend);
- BCMMSG(wlc->wiphy, "pktpend inc %d to %d\n",
- txpktpend, TXPKTPENDGET(wlc, fifo));
- }
-
- /* Commit BCMC sequence number in the SHM frame ID location */
- if (frameid != INVALIDFID)
- BCMCFID(wlc, frameid);
-
- if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0) {
- wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
- }
-}
-
-void
-brcms_c_compute_plcp(struct brcms_c_info *wlc, ratespec_t rspec,
- uint length, u8 *plcp)
-{
- if (IS_MCS(rspec)) {
- brcms_c_compute_mimo_plcp(rspec, length, plcp);
- } else if (IS_OFDM(rspec)) {
- brcms_c_compute_ofdm_plcp(rspec, length, plcp);
- } else {
- brcms_c_compute_cck_plcp(wlc, rspec, length, plcp);
- }
- return;
-}
-
-/* Rate: 802.11 rate code, length: PSDU length in octets */
-static void brcms_c_compute_mimo_plcp(ratespec_t rspec, uint length, u8 *plcp)
-{
- u8 mcs = (u8) (rspec & RSPEC_RATE_MASK);
- plcp[0] = mcs;
- if (RSPEC_IS40MHZ(rspec) || (mcs == 32))
- plcp[0] |= MIMO_PLCP_40MHZ;
- BRCMS_SET_MIMO_PLCP_LEN(plcp, length);
- plcp[3] = RSPEC_MIMOPLCP3(rspec); /* rspec already holds this byte */
- plcp[3] |= 0x7; /* set smoothing, not sounding ppdu & reserved */
- plcp[4] = 0; /* number of extension spatial streams bit 0 & 1 */
- plcp[5] = 0;
-}
-
-/* Rate: 802.11 rate code, length: PSDU length in octets */
-static void
-brcms_c_compute_ofdm_plcp(ratespec_t rspec, u32 length, u8 *plcp)
-{
- u8 rate_signal;
- u32 tmp = 0;
- int rate = RSPEC2RATE(rspec);
-
- /* encode rate per 802.11a-1999 sec 17.3.4.1, with lsb transmitted first */
- rate_signal = rate_info[rate] & BRCMS_RATE_MASK;
- memset(plcp, 0, D11_PHY_HDR_LEN);
- D11A_PHY_HDR_SRATE((struct ofdm_phy_hdr *) plcp, rate_signal);
-
- tmp = (length & 0xfff) << 5;
- plcp[2] |= (tmp >> 16) & 0xff;
- plcp[1] |= (tmp >> 8) & 0xff;
- plcp[0] |= tmp & 0xff;
-
- return;
-}
-
-/*
- * Compute PLCP, but only requires actual rate and length of pkt.
- * Rate is given in the driver standard multiple of 500 kbps.
- * le is set for 11 Mbps rate if necessary.
- * Broken out for PRQ.
- */
-
-static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500,
- uint length, u8 *plcp)
-{
- u16 usec = 0;
- u8 le = 0;
-
- switch (rate_500) {
- case BRCM_RATE_1M:
- usec = length << 3;
- break;
- case BRCM_RATE_2M:
- usec = length << 2;
- break;
- case BRCM_RATE_5M5:
- usec = (length << 4) / 11;
- if ((length << 4) - (usec * 11) > 0)
- usec++;
- break;
- case BRCM_RATE_11M:
- usec = (length << 3) / 11;
- if ((length << 3) - (usec * 11) > 0) {
- usec++;
- if ((usec * 11) - (length << 3) >= 8)
- le = D11B_PLCP_SIGNAL_LE;
- }
- break;
-
- default:
- wiphy_err(wlc->wiphy, "brcms_c_cck_plcp_set: unsupported rate %d"
- "\n", rate_500);
- rate_500 = BRCM_RATE_1M;
- usec = length << 3;
- break;
- }
- /* PLCP signal byte */
- plcp[0] = rate_500 * 5; /* r (500kbps) * 5 == r (100kbps) */
- /* PLCP service byte */
- plcp[1] = (u8) (le | D11B_PLCP_SIGNAL_LOCKED);
- /* PLCP length u16, little endian */
- plcp[2] = usec & 0xff;
- plcp[3] = (usec >> 8) & 0xff;
- /* PLCP CRC16 */
- plcp[4] = 0;
- plcp[5] = 0;
-}
-
-/* Rate: 802.11 rate code, length: PSDU length in octets */
-static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, ratespec_t rspec,
- uint length, u8 *plcp)
-{
- int rate = RSPEC2RATE(rspec);
-
- brcms_c_cck_plcp_set(wlc, rate, length, plcp);
-}
-
-/* brcms_c_compute_frame_dur()
- *
- * Calculate the 802.11 MAC header DUR field for MPDU
- * DUR for a single frame = 1 SIFS + 1 ACK
- * DUR for a frame with following frags = 3 SIFS + 2 ACK + next frag time
- *
- * rate MPDU rate in unit of 500kbps
- * next_frag_len next MPDU length in bytes
- * preamble_type use short/GF or long/MM PLCP header
- */
-static u16
-brcms_c_compute_frame_dur(struct brcms_c_info *wlc, ratespec_t rate,
- u8 preamble_type, uint next_frag_len)
-{
- u16 dur, sifs;
-
- sifs = SIFS(wlc->band);
-
- dur = sifs;
- dur += (u16) brcms_c_calc_ack_time(wlc, rate, preamble_type);
-
- if (next_frag_len) {
- /* Double the current DUR to get 2 SIFS + 2 ACKs */
- dur *= 2;
- /* add another SIFS and the frag time */
- dur += sifs;
- dur +=
- (u16) brcms_c_calc_frame_time(wlc, rate, preamble_type,
- next_frag_len);
- }
- return dur;
-}
-
-/* brcms_c_compute_rtscts_dur()
- *
- * Calculate the 802.11 MAC header DUR field for an RTS or CTS frame
- * DUR for normal RTS/CTS w/ frame = 3 SIFS + 1 CTS + next frame time + 1 ACK
- * DUR for CTS-TO-SELF w/ frame = 2 SIFS + next frame time + 1 ACK
- *
- * cts cts-to-self or rts/cts
- * rts_rate rts or cts rate in unit of 500kbps
- * rate next MPDU rate in unit of 500kbps
- * frame_len next MPDU frame length in bytes
- */
-u16
-brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
- ratespec_t rts_rate,
- ratespec_t frame_rate, u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len, bool ba)
-{
- u16 dur, sifs;
-
- sifs = SIFS(wlc->band);
-
- if (!cts_only) { /* RTS/CTS */
- dur = 3 * sifs;
- dur +=
- (u16) brcms_c_calc_cts_time(wlc, rts_rate,
- rts_preamble_type);
- } else { /* CTS-TO-SELF */
- dur = 2 * sifs;
- }
-
- dur +=
- (u16) brcms_c_calc_frame_time(wlc, frame_rate, frame_preamble_type,
- frame_len);
- if (ba)
- dur +=
- (u16) brcms_c_calc_ba_time(wlc, frame_rate,
- BRCMS_SHORT_PREAMBLE);
- else
- dur +=
- (u16) brcms_c_calc_ack_time(wlc, frame_rate,
- frame_preamble_type);
- return dur;
-}
-
-u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, ratespec_t rspec)
-{
- u16 phyctl1 = 0;
- u16 bw;
-
- if (BRCMS_ISLCNPHY(wlc->band)) {
- bw = PHY_TXC1_BW_20MHZ;
- } else {
- bw = RSPEC_GET_BW(rspec);
- /* 10Mhz is not supported yet */
- if (bw < PHY_TXC1_BW_20MHZ) {
- wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is "
- "not supported yet, set to 20L\n", bw);
- bw = PHY_TXC1_BW_20MHZ;
- }
- }
-
- if (IS_MCS(rspec)) {
- uint mcs = rspec & RSPEC_RATE_MASK;
-
- /* bw, stf, coding-type is part of RSPEC_PHYTXBYTE2 returns */
- phyctl1 = RSPEC_PHYTXBYTE2(rspec);
- /* set the upper byte of phyctl1 */
- phyctl1 |= (mcs_table[mcs].tx_phy_ctl3 << 8);
- } else if (IS_CCK(rspec) && !BRCMS_ISLCNPHY(wlc->band)
- && !BRCMS_ISSSLPNPHY(wlc->band)) {
- /* In CCK mode LPPHY overloads OFDM Modulation bits with CCK Data Rate */
- /* Eventually MIMOPHY would also be converted to this format */
- /* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */
- phyctl1 = (bw | (RSPEC_STF(rspec) << PHY_TXC1_MODE_SHIFT));
- } else { /* legacy OFDM/CCK */
- s16 phycfg;
- /* get the phyctl byte from rate phycfg table */
- phycfg = brcms_c_rate_legacy_phyctl(RSPEC2RATE(rspec));
- if (phycfg == -1) {
- wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong "
- "legacy OFDM/CCK rate\n");
- phycfg = 0;
- }
- /* set the upper byte of phyctl1 */
- phyctl1 =
- (bw | (phycfg << 8) |
- (RSPEC_STF(rspec) << PHY_TXC1_MODE_SHIFT));
- }
- return phyctl1;
-}
-
-ratespec_t
-brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, ratespec_t rspec,
- bool use_rspec, u16 mimo_ctlchbw)
-{
- ratespec_t rts_rspec = 0;
-
- if (use_rspec) {
- /* use frame rate as rts rate */
- rts_rspec = rspec;
-
- } else if (wlc->band->gmode && wlc->protection->_g && !IS_CCK(rspec)) {
- /* Use 11Mbps as the g protection RTS target rate and fallback.
- * Use the BRCMS_BASIC_RATE() lookup to find the best basic rate
- * under the target in case 11 Mbps is not Basic.
- * 6 and 9 Mbps are not usually selected by rate selection, but even
- * if the OFDM rate we are protecting is 6 or 9 Mbps, 11 is more robust.
- */
- rts_rspec = BRCMS_BASIC_RATE(wlc, BRCM_RATE_11M);
- } else {
- /* calculate RTS rate and fallback rate based on the frame rate
- * RTS must be sent at a basic rate since it is a
- * control frame, sec 9.6 of 802.11 spec
- */
- rts_rspec = BRCMS_BASIC_RATE(wlc, rspec);
- }
-
- if (BRCMS_PHY_11N_CAP(wlc->band)) {
- /* set rts txbw to correct side band */
- rts_rspec &= ~RSPEC_BW_MASK;
-
- /* if rspec/rspec_fallback is 40MHz, then send RTS on both 20MHz channel
- * (DUP), otherwise send RTS on control channel
- */
- if (RSPEC_IS40MHZ(rspec) && !IS_CCK(rts_rspec))
- rts_rspec |= (PHY_TXC1_BW_40MHZ_DUP << RSPEC_BW_SHIFT);
- else
- rts_rspec |= (mimo_ctlchbw << RSPEC_BW_SHIFT);
-
- /* pick siso/cdd as default for ofdm */
- if (IS_OFDM(rts_rspec)) {
- rts_rspec &= ~RSPEC_STF_MASK;
- rts_rspec |= (wlc->stf->ss_opmode << RSPEC_STF_SHIFT);
- }
- }
- return rts_rspec;
-}
-
-/*
- * Add struct d11txh, struct cck_phy_hdr.
- *
- * 'p' data must start with 802.11 MAC header
- * 'p' must allow enough bytes of local headers to be "pushed" onto the packet
- *
- * headroom == D11_PHY_HDR_LEN + D11_TXH_LEN (D11_TXH_LEN is now 104 bytes)
- *
- */
-static u16
-brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
- struct sk_buff *p, struct scb *scb, uint frag,
- uint nfrags, uint queue, uint next_frag_len,
- struct wsec_key *key, ratespec_t rspec_override)
-{
- struct ieee80211_hdr *h;
- struct d11txh *txh;
- u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN];
- int len, phylen, rts_phylen;
- u16 mch, phyctl, xfts, mainrates;
- u16 seq = 0, mcl = 0, status = 0, frameid = 0;
- ratespec_t rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M }, rts_rspec[2] = {
- BRCM_RATE_1M, BRCM_RATE_1M};
- bool use_rts = false;
- bool use_cts = false;
- bool use_rifs = false;
- bool short_preamble[2] = { false, false };
- u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
- u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
- u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN];
- struct ieee80211_rts *rts = NULL;
- bool qos;
- uint ac;
- u32 rate_val[2];
- bool hwtkmic = false;
- u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
-#define ANTCFG_NONE 0xFF
- u8 antcfg = ANTCFG_NONE;
- u8 fbantcfg = ANTCFG_NONE;
- uint phyctl1_stf = 0;
- u16 durid = 0;
- struct ieee80211_tx_rate *txrate[2];
- int k;
- struct ieee80211_tx_info *tx_info;
- bool is_mcs[2];
- u16 mimo_txbw;
- u8 mimo_preamble_type;
-
- /* locate 802.11 MAC header */
- h = (struct ieee80211_hdr *)(p->data);
- qos = ieee80211_is_data_qos(h->frame_control);
-
- /* compute length of frame in bytes for use in PLCP computations */
- len = brcmu_pkttotlen(p);
- phylen = len + FCS_LEN;
-
- /* If WEP enabled, add room in phylen for the additional bytes of
- * ICV which MAC generates. We do NOT add the additional bytes to
- * the packet itself, thus phylen = packet length + ICV_LEN + FCS_LEN
- * in this case
- */
- if (key) {
- phylen += key->icv_len;
- }
-
- /* Get tx_info */
- tx_info = IEEE80211_SKB_CB(p);
-
- /* add PLCP */
- plcp = skb_push(p, D11_PHY_HDR_LEN);
-
- /* add Broadcom tx descriptor header */
- txh = (struct d11txh *) skb_push(p, D11_TXH_LEN);
- memset(txh, 0, D11_TXH_LEN);
-
- /* setup frameid */
- if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- /* non-AP STA should never use BCMC queue */
- if (queue == TX_BCMC_FIFO) {
- wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == "
- "TX_BCMC!\n", BRCMS_UNIT(wlc), __func__);
- frameid = bcmc_fid_generate(wlc, NULL, txh);
- } else {
- /* Increment the counter for first fragment */
- if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
- SCB_SEQNUM(scb, p->priority)++;
- }
-
- /* extract fragment number from frame first */
- seq = le16_to_cpu(seq) & FRAGNUM_MASK;
- seq |= (SCB_SEQNUM(scb, p->priority) << SEQNUM_SHIFT);
- h->seq_ctrl = cpu_to_le16(seq);
-
- frameid = ((seq << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
- (queue & TXFID_QUEUE_MASK);
- }
- }
- frameid |= queue & TXFID_QUEUE_MASK;
-
- /* set the ignpmq bit for all pkts tx'd in PS mode and for beacons */
- if (SCB_PS(scb) || ieee80211_is_beacon(h->frame_control))
- mcl |= TXC_IGNOREPMQ;
-
- txrate[0] = tx_info->control.rates;
- txrate[1] = txrate[0] + 1;
-
- /* if rate control algorithm didn't give us a fallback rate, use the primary rate */
- if (txrate[1]->idx < 0) {
- txrate[1] = txrate[0];
- }
-
- for (k = 0; k < hw->max_rates; k++) {
- is_mcs[k] =
- txrate[k]->flags & IEEE80211_TX_RC_MCS ? true : false;
- if (!is_mcs[k]) {
- if ((txrate[k]->idx >= 0)
- && (txrate[k]->idx <
- hw->wiphy->bands[tx_info->band]->n_bitrates)) {
- rate_val[k] =
- hw->wiphy->bands[tx_info->band]->
- bitrates[txrate[k]->idx].hw_value;
- short_preamble[k] =
- txrate[k]->
- flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ?
- true : false;
- } else {
- rate_val[k] = BRCM_RATE_1M;
- }
- } else {
- rate_val[k] = txrate[k]->idx;
- }
- /* Currently only support same setting for primay and fallback rates.
- * Unify flags for each rate into a single value for the frame
- */
- use_rts |=
- txrate[k]->
- flags & IEEE80211_TX_RC_USE_RTS_CTS ? true : false;
- use_cts |=
- txrate[k]->
- flags & IEEE80211_TX_RC_USE_CTS_PROTECT ? true : false;
-
- if (is_mcs[k])
- rate_val[k] |= NRATE_MCS_INUSE;
-
- rspec[k] = mac80211_wlc_set_nrate(wlc, wlc->band, rate_val[k]);
-
- /* (1) RATE: determine and validate primary rate and fallback rates */
- if (!RSPEC_ACTIVE(rspec[k])) {
- rspec[k] = BRCM_RATE_1M;
- } else {
- if (!is_multicast_ether_addr(h->addr1)) {
- /* set tx antenna config */
- brcms_c_antsel_antcfg_get(wlc->asi, false,
- false, 0, 0, &antcfg, &fbantcfg);
- }
- }
- }
-
- phyctl1_stf = wlc->stf->ss_opmode;
-
- if (N_ENAB(wlc->pub)) {
- for (k = 0; k < hw->max_rates; k++) {
- /* apply siso/cdd to single stream mcs's or ofdm if rspec is auto selected */
- if (((IS_MCS(rspec[k]) &&
- IS_SINGLE_STREAM(rspec[k] & RSPEC_RATE_MASK)) ||
- IS_OFDM(rspec[k]))
- && ((rspec[k] & RSPEC_OVERRIDE_MCS_ONLY)
- || !(rspec[k] & RSPEC_OVERRIDE))) {
- rspec[k] &= ~(RSPEC_STF_MASK | RSPEC_STC_MASK);
-
- /* For SISO MCS use STBC if possible */
- if (IS_MCS(rspec[k])
- && BRCMS_STF_SS_STBC_TX(wlc, scb)) {
- u8 stc;
-
- stc = 1; /* Nss for single stream is always 1 */
- rspec[k] |=
- (PHY_TXC1_MODE_STBC <<
- RSPEC_STF_SHIFT) | (stc <<
- RSPEC_STC_SHIFT);
- } else
- rspec[k] |=
- (phyctl1_stf << RSPEC_STF_SHIFT);
- }
-
- /* Is the phy configured to use 40MHZ frames? If so then pick the desired txbw */
- if (CHSPEC_WLC_BW(wlc->chanspec) == BRCMS_40_MHZ) {
- /* default txbw is 20in40 SB */
- mimo_ctlchbw = mimo_txbw =
- CHSPEC_SB_UPPER(BRCMS_BAND_PI_RADIO_CHANSPEC)
- ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
-
- if (IS_MCS(rspec[k])) {
- /* mcs 32 must be 40b/w DUP */
- if ((rspec[k] & RSPEC_RATE_MASK) == 32) {
- mimo_txbw =
- PHY_TXC1_BW_40MHZ_DUP;
- /* use override */
- } else if (wlc->mimo_40txbw != AUTO)
- mimo_txbw = wlc->mimo_40txbw;
- /* else check if dst is using 40 Mhz */
- else if (scb->flags & SCB_IS40)
- mimo_txbw = PHY_TXC1_BW_40MHZ;
- } else if (IS_OFDM(rspec[k])) {
- if (wlc->ofdm_40txbw != AUTO)
- mimo_txbw = wlc->ofdm_40txbw;
- } else {
- if (wlc->cck_40txbw != AUTO)
- mimo_txbw = wlc->cck_40txbw;
- }
- } else {
- /* mcs32 is 40 b/w only.
- * This is possible for probe packets on a STA during SCAN
- */
- if ((rspec[k] & RSPEC_RATE_MASK) == 32) {
- /* mcs 0 */
- rspec[k] = RSPEC_MIMORATE;
- }
- mimo_txbw = PHY_TXC1_BW_20MHZ;
- }
-
- /* Set channel width */
- rspec[k] &= ~RSPEC_BW_MASK;
- if ((k == 0) || ((k > 0) && IS_MCS(rspec[k])))
- rspec[k] |= (mimo_txbw << RSPEC_BW_SHIFT);
- else
- rspec[k] |= (mimo_ctlchbw << RSPEC_BW_SHIFT);
-
- /* Set Short GI */
-#ifdef NOSGIYET
- if (IS_MCS(rspec[k])
- && (txrate[k]->flags & IEEE80211_TX_RC_SHORT_GI))
- rspec[k] |= RSPEC_SHORT_GI;
- else if (!(txrate[k]->flags & IEEE80211_TX_RC_SHORT_GI))
- rspec[k] &= ~RSPEC_SHORT_GI;
-#else
- rspec[k] &= ~RSPEC_SHORT_GI;
-#endif
-
- mimo_preamble_type = BRCMS_MM_PREAMBLE;
- if (txrate[k]->flags & IEEE80211_TX_RC_GREEN_FIELD)
- mimo_preamble_type = BRCMS_GF_PREAMBLE;
-
- if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
- && (!IS_MCS(rspec[k]))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_"
- "RC_MCS != IS_MCS(rspec)\n",
- BRCMS_UNIT(wlc), __func__);
- }
-
- if (IS_MCS(rspec[k])) {
- preamble_type[k] = mimo_preamble_type;
-
- /* if SGI is selected, then forced mm for single stream */
- if ((rspec[k] & RSPEC_SHORT_GI)
- && IS_SINGLE_STREAM(rspec[k] &
- RSPEC_RATE_MASK)) {
- preamble_type[k] = BRCMS_MM_PREAMBLE;
- }
- }
-
- /* should be better conditionalized */
- if (!IS_MCS(rspec[0])
- && (tx_info->control.rates[0].
- flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
- preamble_type[k] = BRCMS_SHORT_PREAMBLE;
- }
- } else {
- for (k = 0; k < hw->max_rates; k++) {
- /* Set ctrlchbw as 20Mhz */
- rspec[k] &= ~RSPEC_BW_MASK;
- rspec[k] |= (PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT);
-
- /* for nphy, stf of ofdm frames must follow policies */
- if (BRCMS_ISNPHY(wlc->band) && IS_OFDM(rspec[k])) {
- rspec[k] &= ~RSPEC_STF_MASK;
- rspec[k] |= phyctl1_stf << RSPEC_STF_SHIFT;
- }
- }
- }
-
- /* Reset these for use with AMPDU's */
- txrate[0]->count = 0;
- txrate[1]->count = 0;
-
- /* (2) PROTECTION, may change rspec */
- if ((ieee80211_is_data(h->frame_control) ||
- ieee80211_is_mgmt(h->frame_control)) &&
- (phylen > wlc->RTSThresh) && !is_multicast_ether_addr(h->addr1))
- use_rts = true;
-
- /* (3) PLCP: determine PLCP header and MAC duration,
- * fill struct d11txh */
- brcms_c_compute_plcp(wlc, rspec[0], phylen, plcp);
- brcms_c_compute_plcp(wlc, rspec[1], phylen, plcp_fallback);
- memcpy(&txh->FragPLCPFallback,
- plcp_fallback, sizeof(txh->FragPLCPFallback));
-
- /* Length field now put in CCK FBR CRC field */
- if (IS_CCK(rspec[1])) {
- txh->FragPLCPFallback[4] = phylen & 0xff;
- txh->FragPLCPFallback[5] = (phylen & 0xff00) >> 8;
- }
-
- /* MIMO-RATE: need validation ?? */
- mainrates = IS_OFDM(rspec[0]) ?
- D11A_PHY_HDR_GRATE((struct ofdm_phy_hdr *) plcp) :
- plcp[0];
-
- /* DUR field for main rate */
- if (!ieee80211_is_pspoll(h->frame_control) &&
- !is_multicast_ether_addr(h->addr1) && !use_rifs) {
- durid =
- brcms_c_compute_frame_dur(wlc, rspec[0], preamble_type[0],
- next_frag_len);
- h->duration_id = cpu_to_le16(durid);
- } else if (use_rifs) {
- /* NAV protect to end of next max packet size */
- durid =
- (u16) brcms_c_calc_frame_time(wlc, rspec[0],
- preamble_type[0],
- DOT11_MAX_FRAG_LEN);
- durid += RIFS_11N_TIME;
- h->duration_id = cpu_to_le16(durid);
- }
-
- /* DUR field for fallback rate */
- if (ieee80211_is_pspoll(h->frame_control))
- txh->FragDurFallback = h->duration_id;
- else if (is_multicast_ether_addr(h->addr1) || use_rifs)
- txh->FragDurFallback = 0;
- else {
- durid = brcms_c_compute_frame_dur(wlc, rspec[1],
- preamble_type[1], next_frag_len);
- txh->FragDurFallback = cpu_to_le16(durid);
- }
-
- /* (4) MAC-HDR: MacTxControlLow */
- if (frag == 0)
- mcl |= TXC_STARTMSDU;
-
- if (!is_multicast_ether_addr(h->addr1))
- mcl |= TXC_IMMEDACK;
-
- if (BAND_5G(wlc->band->bandtype))
- mcl |= TXC_FREQBAND_5G;
-
- if (CHSPEC_IS40(BRCMS_BAND_PI_RADIO_CHANSPEC))
- mcl |= TXC_BW_40;
-
- /* set AMIC bit if using hardware TKIP MIC */
- if (hwtkmic)
- mcl |= TXC_AMIC;
-
- txh->MacTxControlLow = cpu_to_le16(mcl);
-
- /* MacTxControlHigh */
- mch = 0;
-
- /* Set fallback rate preamble type */
- if ((preamble_type[1] == BRCMS_SHORT_PREAMBLE) ||
- (preamble_type[1] == BRCMS_GF_PREAMBLE)) {
- if (RSPEC2RATE(rspec[1]) != BRCM_RATE_1M)
- mch |= TXC_PREAMBLE_DATA_FB_SHORT;
- }
-
- /* MacFrameControl */
- memcpy(&txh->MacFrameControl, &h->frame_control, sizeof(u16));
- txh->TxFesTimeNormal = cpu_to_le16(0);
-
- txh->TxFesTimeFallback = cpu_to_le16(0);
-
- /* TxFrameRA */
- memcpy(&txh->TxFrameRA, &h->addr1, ETH_ALEN);
-
- /* TxFrameID */
- txh->TxFrameID = cpu_to_le16(frameid);
-
- /* TxStatus, Note the case of recreating the first frag of a suppressed frame
- * then we may need to reset the retry cnt's via the status reg
- */
- txh->TxStatus = cpu_to_le16(status);
-
- /* extra fields for ucode AMPDU aggregation, the new fields are added to
- * the END of previous structure so that it's compatible in driver.
- */
- txh->MaxNMpdus = cpu_to_le16(0);
- txh->MaxABytes_MRT = cpu_to_le16(0);
- txh->MaxABytes_FBR = cpu_to_le16(0);
- txh->MinMBytes = cpu_to_le16(0);
-
- /* (5) RTS/CTS: determine RTS/CTS PLCP header and MAC duration,
- * furnish struct d11txh */
- /* RTS PLCP header and RTS frame */
- if (use_rts || use_cts) {
- if (use_rts && use_cts)
- use_cts = false;
-
- for (k = 0; k < 2; k++) {
- rts_rspec[k] = brcms_c_rspec_to_rts_rspec(wlc, rspec[k],
- false,
- mimo_ctlchbw);
- }
-
- if (!IS_OFDM(rts_rspec[0]) &&
- !((RSPEC2RATE(rts_rspec[0]) == BRCM_RATE_1M) ||
- (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
- rts_preamble_type[0] = BRCMS_SHORT_PREAMBLE;
- mch |= TXC_PREAMBLE_RTS_MAIN_SHORT;
- }
-
- if (!IS_OFDM(rts_rspec[1]) &&
- !((RSPEC2RATE(rts_rspec[1]) == BRCM_RATE_1M) ||
- (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
- rts_preamble_type[1] = BRCMS_SHORT_PREAMBLE;
- mch |= TXC_PREAMBLE_RTS_FB_SHORT;
- }
-
- /* RTS/CTS additions to MacTxControlLow */
- if (use_cts) {
- txh->MacTxControlLow |= cpu_to_le16(TXC_SENDCTS);
- } else {
- txh->MacTxControlLow |= cpu_to_le16(TXC_SENDRTS);
- txh->MacTxControlLow |= cpu_to_le16(TXC_LONGFRAME);
- }
-
- /* RTS PLCP header */
- rts_plcp = txh->RTSPhyHeader;
- if (use_cts)
- rts_phylen = DOT11_CTS_LEN + FCS_LEN;
- else
- rts_phylen = DOT11_RTS_LEN + FCS_LEN;
-
- brcms_c_compute_plcp(wlc, rts_rspec[0], rts_phylen, rts_plcp);
-
- /* fallback rate version of RTS PLCP header */
- brcms_c_compute_plcp(wlc, rts_rspec[1], rts_phylen,
- rts_plcp_fallback);
- memcpy(&txh->RTSPLCPFallback, rts_plcp_fallback,
- sizeof(txh->RTSPLCPFallback));
-
- /* RTS frame fields... */
- rts = (struct ieee80211_rts *)&txh->rts_frame;
-
- durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec[0],
- rspec[0], rts_preamble_type[0],
- preamble_type[0], phylen, false);
- rts->duration = cpu_to_le16(durid);
- /* fallback rate version of RTS DUR field */
- durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
- rts_rspec[1], rspec[1],
- rts_preamble_type[1],
- preamble_type[1], phylen, false);
- txh->RTSDurFallback = cpu_to_le16(durid);
-
- if (use_cts) {
- rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
- IEEE80211_STYPE_CTS);
-
- memcpy(&rts->ra, &h->addr2, ETH_ALEN);
- } else {
- rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
- IEEE80211_STYPE_RTS);
-
- memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN);
- }
-
- /* mainrate
- * low 8 bits: main frag rate/mcs,
- * high 8 bits: rts/cts rate/mcs
- */
- mainrates |= (IS_OFDM(rts_rspec[0]) ?
- D11A_PHY_HDR_GRATE(
- (struct ofdm_phy_hdr *) rts_plcp) :
- rts_plcp[0]) << 8;
- } else {
- memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
- memset((char *)&txh->rts_frame, 0,
- sizeof(struct ieee80211_rts));
- memset((char *)txh->RTSPLCPFallback, 0,
- sizeof(txh->RTSPLCPFallback));
- txh->RTSDurFallback = 0;
- }
-
-#ifdef SUPPORT_40MHZ
- /* add null delimiter count */
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && IS_MCS(rspec)) {
- txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] =
- brcm_c_ampdu_null_delim_cnt(wlc->ampdu, scb, rspec, phylen);
- }
-#endif
-
- /* Now that RTS/RTS FB preamble types are updated, write the final value */
- txh->MacTxControlHigh = cpu_to_le16(mch);
-
- /* MainRates (both the rts and frag plcp rates have been calculated now) */
- txh->MainRates = cpu_to_le16(mainrates);
-
- /* XtraFrameTypes */
- xfts = FRAMETYPE(rspec[1], wlc->mimoft);
- xfts |= (FRAMETYPE(rts_rspec[0], wlc->mimoft) << XFTS_RTS_FT_SHIFT);
- xfts |= (FRAMETYPE(rts_rspec[1], wlc->mimoft) << XFTS_FBRRTS_FT_SHIFT);
- xfts |=
- CHSPEC_CHANNEL(BRCMS_BAND_PI_RADIO_CHANSPEC) << XFTS_CHANNEL_SHIFT;
- txh->XtraFrameTypes = cpu_to_le16(xfts);
-
- /* PhyTxControlWord */
- phyctl = FRAMETYPE(rspec[0], wlc->mimoft);
- if ((preamble_type[0] == BRCMS_SHORT_PREAMBLE) ||
- (preamble_type[0] == BRCMS_GF_PREAMBLE)) {
- if (RSPEC2RATE(rspec[0]) != BRCM_RATE_1M)
- phyctl |= PHY_TXC_SHORT_HDR;
- }
-
- /* phytxant is properly bit shifted */
- phyctl |= brcms_c_stf_d11hdrs_phyctl_txant(wlc, rspec[0]);
- txh->PhyTxControlWord = cpu_to_le16(phyctl);
-
- /* PhyTxControlWord_1 */
- if (BRCMS_PHY_11N_CAP(wlc->band)) {
- u16 phyctl1 = 0;
-
- phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[0]);
- txh->PhyTxControlWord_1 = cpu_to_le16(phyctl1);
- phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[1]);
- txh->PhyTxControlWord_1_Fbr = cpu_to_le16(phyctl1);
-
- if (use_rts || use_cts) {
- phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[0]);
- txh->PhyTxControlWord_1_Rts = cpu_to_le16(phyctl1);
- phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[1]);
- txh->PhyTxControlWord_1_FbrRts = cpu_to_le16(phyctl1);
- }
-
- /*
- * For mcs frames, if mixedmode(overloaded with long preamble) is going to be set,
- * fill in non-zero MModeLen and/or MModeFbrLen
- * it will be unnecessary if they are separated
- */
- if (IS_MCS(rspec[0]) &&
- (preamble_type[0] == BRCMS_MM_PREAMBLE)) {
- u16 mmodelen =
- brcms_c_calc_lsig_len(wlc, rspec[0], phylen);
- txh->MModeLen = cpu_to_le16(mmodelen);
- }
-
- if (IS_MCS(rspec[1]) &&
- (preamble_type[1] == BRCMS_MM_PREAMBLE)) {
- u16 mmodefbrlen =
- brcms_c_calc_lsig_len(wlc, rspec[1], phylen);
- txh->MModeFbrLen = cpu_to_le16(mmodefbrlen);
- }
- }
-
- ac = skb_get_queue_mapping(p);
- if (SCB_WME(scb) && qos && wlc->edcf_txop[ac]) {
- uint frag_dur, dur, dur_fallback;
-
- /* WME: Update TXOP threshold */
- if ((!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) && (frag == 0)) {
- frag_dur =
- brcms_c_calc_frame_time(wlc, rspec[0],
- preamble_type[0], phylen);
-
- if (rts) {
- /* 1 RTS or CTS-to-self frame */
- dur =
- brcms_c_calc_cts_time(wlc, rts_rspec[0],
- rts_preamble_type[0]);
- dur_fallback =
- brcms_c_calc_cts_time(wlc, rts_rspec[1],
- rts_preamble_type[1]);
- /* (SIFS + CTS) + SIFS + frame + SIFS + ACK */
- dur += le16_to_cpu(rts->duration);
- dur_fallback +=
- le16_to_cpu(txh->RTSDurFallback);
- } else if (use_rifs) {
- dur = frag_dur;
- dur_fallback = 0;
- } else {
- /* frame + SIFS + ACK */
- dur = frag_dur;
- dur +=
- brcms_c_compute_frame_dur(wlc, rspec[0],
- preamble_type[0], 0);
-
- dur_fallback =
- brcms_c_calc_frame_time(wlc, rspec[1],
- preamble_type[1],
- phylen);
- dur_fallback +=
- brcms_c_compute_frame_dur(wlc, rspec[1],
- preamble_type[1], 0);
- }
- /* NEED to set TxFesTimeNormal (hard) */
- txh->TxFesTimeNormal = cpu_to_le16((u16) dur);
- /* NEED to set fallback rate version of TxFesTimeNormal (hard) */
- txh->TxFesTimeFallback =
- cpu_to_le16((u16) dur_fallback);
-
- /* update txop byte threshold (txop minus intraframe overhead) */
- if (wlc->edcf_txop[ac] >= (dur - frag_dur)) {
- {
- uint newfragthresh;
-
- newfragthresh =
- brcms_c_calc_frame_len(wlc,
- rspec[0], preamble_type[0],
- (wlc->edcf_txop[ac] -
- (dur - frag_dur)));
- /* range bound the fragthreshold */
- if (newfragthresh < DOT11_MIN_FRAG_LEN)
- newfragthresh =
- DOT11_MIN_FRAG_LEN;
- else if (newfragthresh >
- wlc->usr_fragthresh)
- newfragthresh =
- wlc->usr_fragthresh;
- /* update the fragthresh and do txc update */
- if (wlc->fragthresh[queue] !=
- (u16) newfragthresh) {
- wlc->fragthresh[queue] =
- (u16) newfragthresh;
- }
- }
- } else
- wiphy_err(wlc->wiphy, "wl%d: %s txop invalid "
- "for rate %d\n",
- wlc->pub->unit, fifo_names[queue],
- RSPEC2RATE(rspec[0]));
-
- if (dur > wlc->edcf_txop[ac])
- wiphy_err(wlc->wiphy, "wl%d: %s: %s txop "
- "exceeded phylen %d/%d dur %d/%d\n",
- wlc->pub->unit, __func__,
- fifo_names[queue],
- phylen, wlc->fragthresh[queue],
- dur, wlc->edcf_txop[ac]);
- }
- }
-
- return 0;
-}
-
-void brcms_c_tbtt(struct brcms_c_info *wlc)
-{
- struct brcms_bss_cfg *cfg = wlc->cfg;
-
- if (!cfg->BSS) {
- /* DirFrmQ is now valid...defer setting until end of ATIM window */
- wlc->qvalid |= MCMD_DIRFRMQVAL;
- }
-}
-
-static void brcms_c_war16165(struct brcms_c_info *wlc, bool tx)
-{
- if (tx) {
- /* the post-increment is used in STAY_AWAKE macro */
- if (wlc->txpend16165war++ == 0)
- brcms_c_set_ps_ctrl(wlc);
- } else {
- wlc->txpend16165war--;
- if (wlc->txpend16165war == 0)
- brcms_c_set_ps_ctrl(wlc);
- }
-}
-
-/* process an individual struct tx_status */
-bool
-brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs, u32 frm_tx2)
-{
- struct sk_buff *p;
- uint queue;
- struct d11txh *txh;
- struct scb *scb = NULL;
- bool free_pdu;
- int tx_rts, tx_frame_count, tx_rts_count;
- uint totlen, supr_status;
- bool lastframe;
- struct ieee80211_hdr *h;
- u16 mcl;
- struct ieee80211_tx_info *tx_info;
- struct ieee80211_tx_rate *txrate;
- int i;
-
- (void)(frm_tx2); /* Compiler reference to avoid unused variable warning */
-
- /* discard intermediate indications for ucode with one legitimate case:
- * e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, but the subsequent
- * tx of DATA failed. so it will start rts/cts from the beginning (resetting the rts
- * transmission count)
- */
- if (!(txs->status & TX_STATUS_AMPDU)
- && (txs->status & TX_STATUS_INTERMEDIATE)) {
- wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
- __func__);
- return false;
- }
-
- queue = txs->frameid & TXFID_QUEUE_MASK;
- if (queue >= NFIFO) {
- p = NULL;
- goto fatal;
- }
-
- p = GETNEXTTXP(wlc, queue);
- if (BRCMS_WAR16165(wlc))
- brcms_c_war16165(wlc, false);
- if (p == NULL)
- goto fatal;
-
- txh = (struct d11txh *) (p->data);
- mcl = le16_to_cpu(txh->MacTxControlLow);
-
- if (txs->phyerr) {
- if (WL_ERROR_ON()) {
- wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n",
- txs->phyerr, txh->MainRates);
- brcms_c_print_txdesc(txh);
- }
- brcms_c_print_txstatus(txs);
- }
-
- if (txs->frameid != cpu_to_le16(txh->TxFrameID))
- goto fatal;
- tx_info = IEEE80211_SKB_CB(p);
- h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
-
- if (tx_info->control.sta)
- scb = (struct scb *)tx_info->control.sta->drv_priv;
-
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
- return false;
- }
-
- supr_status = txs->status & TX_STATUS_SUPR_MASK;
- if (supr_status == TX_STATUS_SUPR_BADCH)
- BCMMSG(wlc->wiphy,
- "%s: Pkt tx suppressed, possibly channel %d\n",
- __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec));
-
- tx_rts = cpu_to_le16(txh->MacTxControlLow) & TXC_SENDRTS;
- tx_frame_count =
- (txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
- tx_rts_count =
- (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
-
- lastframe = !ieee80211_has_morefrags(h->frame_control);
-
- if (!lastframe) {
- wiphy_err(wlc->wiphy, "Not last frame!\n");
- } else {
- /*
- * Set information to be consumed by Minstrel ht.
- *
- * The "fallback limit" is the number of tx attempts a given
- * MPDU is sent at the "primary" rate. Tx attempts beyond that
- * limit are sent at the "secondary" rate.
- * A 'short frame' does not exceed RTS treshold.
- */
- u16 sfbl, /* Short Frame Rate Fallback Limit */
- lfbl, /* Long Frame Rate Fallback Limit */
- fbl;
-
- if (queue < AC_COUNT) {
- sfbl = BRCMS_WME_RETRY_SFB_GET(wlc, wme_fifo2ac[queue]);
- lfbl = BRCMS_WME_RETRY_LFB_GET(wlc, wme_fifo2ac[queue]);
- } else {
- sfbl = wlc->SFBL;
- lfbl = wlc->LFBL;
- }
-
- txrate = tx_info->status.rates;
- if (txrate[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- fbl = lfbl;
- else
- fbl = sfbl;
-
- ieee80211_tx_info_clear_status(tx_info);
-
- if ((tx_frame_count > fbl) && (txrate[1].idx >= 0)) {
- /* rate selection requested a fallback rate and we used it */
- txrate[0].count = fbl;
- txrate[1].count = tx_frame_count - fbl;
- } else {
- /* rate selection did not request fallback rate, or we didn't need it */
- txrate[0].count = tx_frame_count;
- /* rc80211_minstrel.c:minstrel_tx_status() expects unused rates to be marked with idx = -1 */
- txrate[1].idx = -1;
- txrate[1].count = 0;
- }
-
- /* clear the rest of the rates */
- for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
- txrate[i].idx = -1;
- txrate[i].count = 0;
- }
-
- if (txs->status & TX_STATUS_ACK_RCV)
- tx_info->flags |= IEEE80211_TX_STAT_ACK;
- }
-
- totlen = brcmu_pkttotlen(p);
- free_pdu = true;
-
- brcms_c_txfifo_complete(wlc, queue, 1);
-
- if (lastframe) {
- p->next = NULL;
- p->prev = NULL;
- /* remove PLCP & Broadcom tx descriptor header */
- skb_pull(p, D11_PHY_HDR_LEN);
- skb_pull(p, D11_TXH_LEN);
- ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
- } else {
- wiphy_err(wlc->wiphy, "%s: Not last frame => not calling "
- "tx_status\n", __func__);
- }
-
- return false;
-
- fatal:
- if (p)
- brcmu_pkt_buf_free_skb(p);
-
- return true;
-
-}
-
-void
-brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend)
-{
- TXPKTPENDDEC(wlc, fifo, txpktpend);
- BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend,
- TXPKTPENDGET(wlc, fifo));
-
- /* There is more room; mark precedences related to this FIFO sendable */
- BRCMS_TX_FIFO_ENAB(wlc, fifo);
-
- /* Clear MHF2_TXBCMC_NOW flag if BCMC fifo has drained */
- if (AP_ENAB(wlc->pub) &&
- !TXPKTPENDGET(wlc, TX_BCMC_FIFO)) {
- brcms_c_mhf(wlc, MHF2, MHF2_TXBCMC_NOW, 0, BRCM_BAND_AUTO);
- }
-
- /* figure out which bsscfg is being worked on... */
-}
-
-/* Update beacon listen interval in shared memory */
-void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
-{
- if (AP_ENAB(wlc->pub))
- return;
-
- /* wake up every DTIM is the default */
- if (wlc->bcn_li_dtim == 1)
- brcms_c_write_shm(wlc, M_BCN_LI, 0);
- else
- brcms_c_write_shm(wlc, M_BCN_LI,
- (wlc->bcn_li_dtim << 8) | wlc->bcn_li_bcn);
-}
-
-/*
- * recover 64bit TSF value from the 16bit TSF value in the rx header
- * given the assumption that the TSF passed in header is within 65ms
- * of the current tsf.
- *
- * 6 5 4 4 3 2 1
- * 3.......6.......8.......0.......2.......4.......6.......8......0
- * |<---------- tsf_h ----------->||<--- tsf_l -->||<-RxTSFTime ->|
- *
- * The RxTSFTime are the lowest 16 bits and provided by the ucode. The
- * tsf_l is filled in by brcms_b_recv, which is done earlier in the
- * receive call sequence after rx interrupt. Only the higher 16 bits
- * are used. Finally, the tsf_h is read from the tsf register.
- */
-static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc,
- struct brcms_d11rxhdr *rxh)
-{
- u32 tsf_h, tsf_l;
- u16 rx_tsf_0_15, rx_tsf_16_31;
-
- brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h);
-
- rx_tsf_16_31 = (u16)(tsf_l >> 16);
- rx_tsf_0_15 = rxh->rxhdr.RxTSFTime;
-
- /*
- * a greater tsf time indicates the low 16 bits of
- * tsf_l wrapped, so decrement the high 16 bits.
- */
- if ((u16)tsf_l < rx_tsf_0_15) {
- rx_tsf_16_31 -= 1;
- if (rx_tsf_16_31 == 0xffff)
- tsf_h -= 1;
- }
-
- return ((u64)tsf_h << 32) | (((u32)rx_tsf_16_31 << 16) + rx_tsf_0_15);
-}
-
-static void
-prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
- struct sk_buff *p,
- struct ieee80211_rx_status *rx_status)
-{
- struct brcms_d11rxhdr *wlc_rxh = (struct brcms_d11rxhdr *) rxh;
- int preamble;
- int channel;
- ratespec_t rspec;
- unsigned char *plcp;
-
- /* fill in TSF and flag its presence */
- rx_status->mactime = brcms_c_recover_tsf64(wlc, wlc_rxh);
- rx_status->flag |= RX_FLAG_MACTIME_MPDU;
-
- channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
-
- if (channel > 14) {
- rx_status->band = IEEE80211_BAND_5GHZ;
- rx_status->freq = ieee80211_ofdm_chan_to_freq(
- WF_CHAN_FACTOR_5_G/2, channel);
-
- } else {
- rx_status->band = IEEE80211_BAND_2GHZ;
- rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
- }
-
- rx_status->signal = wlc_rxh->rssi; /* signal */
-
- /* noise */
- /* qual */
- rx_status->antenna = (rxh->PhyRxStatus_0 & PRXS0_RXANT_UPSUBBAND) ? 1 : 0; /* ant */
-
- plcp = p->data;
-
- rspec = brcms_c_compute_rspec(rxh, plcp);
- if (IS_MCS(rspec)) {
- rx_status->rate_idx = rspec & RSPEC_RATE_MASK;
- rx_status->flag |= RX_FLAG_HT;
- if (RSPEC_IS40MHZ(rspec))
- rx_status->flag |= RX_FLAG_40MHZ;
- } else {
- switch (RSPEC2RATE(rspec)) {
- case BRCM_RATE_1M:
- rx_status->rate_idx = 0;
- break;
- case BRCM_RATE_2M:
- rx_status->rate_idx = 1;
- break;
- case BRCM_RATE_5M5:
- rx_status->rate_idx = 2;
- break;
- case BRCM_RATE_11M:
- rx_status->rate_idx = 3;
- break;
- case BRCM_RATE_6M:
- rx_status->rate_idx = 4;
- break;
- case BRCM_RATE_9M:
- rx_status->rate_idx = 5;
- break;
- case BRCM_RATE_12M:
- rx_status->rate_idx = 6;
- break;
- case BRCM_RATE_18M:
- rx_status->rate_idx = 7;
- break;
- case BRCM_RATE_24M:
- rx_status->rate_idx = 8;
- break;
- case BRCM_RATE_36M:
- rx_status->rate_idx = 9;
- break;
- case BRCM_RATE_48M:
- rx_status->rate_idx = 10;
- break;
- case BRCM_RATE_54M:
- rx_status->rate_idx = 11;
- break;
- default:
- wiphy_err(wlc->wiphy, "%s: Unknown rate\n", __func__);
- }
-
- /* Determine short preamble and rate_idx */
- preamble = 0;
- if (IS_CCK(rspec)) {
- if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
- rx_status->flag |= RX_FLAG_SHORTPRE;
- } else if (IS_OFDM(rspec)) {
- rx_status->flag |= RX_FLAG_SHORTPRE;
- } else {
- wiphy_err(wlc->wiphy, "%s: Unknown modulation\n",
- __func__);
- }
- }
-
- if (PLCP3_ISSGI(plcp[3]))
- rx_status->flag |= RX_FLAG_SHORT_GI;
-
- if (rxh->RxStatus1 & RXS_DECERR) {
- rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
- wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_PLCP_CRC\n",
- __func__);
- }
- if (rxh->RxStatus1 & RXS_FCSERR) {
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_FCS_CRC\n",
- __func__);
- }
-}
-
-static void
-brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
- struct sk_buff *p)
-{
- int len_mpdu;
- struct ieee80211_rx_status rx_status;
-
- memset(&rx_status, 0, sizeof(rx_status));
- prep_mac80211_status(wlc, rxh, p, &rx_status);
-
- /* mac header+body length, exclude CRC and plcp header */
- len_mpdu = p->len - D11_PHY_HDR_LEN - FCS_LEN;
- skb_pull(p, D11_PHY_HDR_LEN);
- __skb_trim(p, len_mpdu);
-
- memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
- ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
- return;
-}
-
-/* Process received frames */
-/*
- * Return true if more frames need to be processed. false otherwise.
- * Param 'bound' indicates max. # frames to process before break out.
- */
-void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
-{
- struct d11rxhdr *rxh;
- struct ieee80211_hdr *h;
- uint len;
- bool is_amsdu;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- /* frame starts with rxhdr */
- rxh = (struct d11rxhdr *) (p->data);
-
- /* strip off rxhdr */
- skb_pull(p, BRCMS_HWRXOFF);
-
- /* fixup rx header endianness */
- rxh->RxFrameSize = le16_to_cpu(rxh->RxFrameSize);
- rxh->PhyRxStatus_0 = le16_to_cpu(rxh->PhyRxStatus_0);
- rxh->PhyRxStatus_1 = le16_to_cpu(rxh->PhyRxStatus_1);
- rxh->PhyRxStatus_2 = le16_to_cpu(rxh->PhyRxStatus_2);
- rxh->PhyRxStatus_3 = le16_to_cpu(rxh->PhyRxStatus_3);
- rxh->PhyRxStatus_4 = le16_to_cpu(rxh->PhyRxStatus_4);
- rxh->PhyRxStatus_5 = le16_to_cpu(rxh->PhyRxStatus_5);
- rxh->RxStatus1 = le16_to_cpu(rxh->RxStatus1);
- rxh->RxStatus2 = le16_to_cpu(rxh->RxStatus2);
- rxh->RxTSFTime = le16_to_cpu(rxh->RxTSFTime);
- rxh->RxChan = le16_to_cpu(rxh->RxChan);
-
- /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
- if (rxh->RxStatus1 & RXS_PBPRES) {
- if (p->len < 2) {
- wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of "
- "len %d\n", wlc->pub->unit, p->len);
- goto toss;
- }
- skb_pull(p, 2);
- }
-
- h = (struct ieee80211_hdr *)(p->data + D11_PHY_HDR_LEN);
- len = p->len;
-
- if (rxh->RxStatus1 & RXS_FCSERR) {
- if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
- wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
- " tossing\n");
- goto toss;
- } else {
- wiphy_err(wlc->wiphy, "RCSERR!!!\n");
- goto toss;
- }
- }
-
- /* check received pkt has at least frame control field */
- if (len < D11_PHY_HDR_LEN + sizeof(h->frame_control)) {
- goto toss;
- }
-
- is_amsdu = rxh->RxStatus2 & RXS_AMSDU_MASK;
-
- /* explicitly test bad src address to avoid sending bad deauth */
- if (!is_amsdu) {
- /* CTS and ACK CTL frames are w/o a2 */
-
- if (ieee80211_is_data(h->frame_control) ||
- ieee80211_is_mgmt(h->frame_control)) {
- if ((is_zero_ether_addr(h->addr2) ||
- is_multicast_ether_addr(h->addr2))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dropping a "
- "frame with invalid src mac address,"
- " a2: %pM\n",
- wlc->pub->unit, __func__, h->addr2);
- goto toss;
- }
- }
- }
-
- /* due to sheer numbers, toss out probe reqs for now */
- if (ieee80211_is_probe_req(h->frame_control))
- goto toss;
-
- if (is_amsdu)
- goto toss;
-
- brcms_c_recvctl(wlc, rxh, p);
- return;
-
- toss:
- brcmu_pkt_buf_free_skb(p);
-}
-
-/* calculate frame duration for Mixed-mode L-SIG spoofing, return
- * number of bytes goes in the length field
- *
- * Formula given by HT PHY Spec v 1.13
- * len = 3(nsyms + nstream + 3) - 3
- */
-u16
-brcms_c_calc_lsig_len(struct brcms_c_info *wlc, ratespec_t ratespec,
- uint mac_len)
-{
- uint nsyms, len = 0, kNdps;
-
- BCMMSG(wlc->wiphy, "wl%d: rate %d, len%d\n",
- wlc->pub->unit, RSPEC2RATE(ratespec), mac_len);
-
- if (IS_MCS(ratespec)) {
- uint mcs = ratespec & RSPEC_RATE_MASK;
- /* MCS_TXS(mcs) returns num tx streams - 1 */
- int tot_streams = (MCS_TXS(mcs) + 1) + RSPEC_STC(ratespec);
-
- /* the payload duration calculation matches that of regular ofdm */
- /* 1000Ndbps = kbps * 4 */
- kNdps =
- MCS_RATE(mcs, RSPEC_IS40MHZ(ratespec),
- RSPEC_ISSGI(ratespec)) * 4;
-
- if (RSPEC_STC(ratespec) == 0)
- /* NSyms = CEILING((SERVICE + 8*NBytes + TAIL) / Ndbps) */
- nsyms =
- CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
- APHY_TAIL_NBITS) * 1000, kNdps);
- else
- /* STBC needs to have even number of symbols */
- nsyms =
- 2 *
- CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
- APHY_TAIL_NBITS) * 1000, 2 * kNdps);
-
- nsyms += (tot_streams + 3); /* (+3) account for HT-SIG(2) and HT-STF(1) */
- /* 3 bytes/symbol @ legacy 6Mbps rate */
- len = (3 * nsyms) - 3; /* (-3) excluding service bits and tail bits */
- }
-
- return (u16) len;
-}
-
-/* calculate frame duration of a given rate and length, return time in usec unit */
-uint
-brcms_c_calc_frame_time(struct brcms_c_info *wlc, ratespec_t ratespec,
- u8 preamble_type, uint mac_len)
-{
- uint nsyms, dur = 0, Ndps, kNdps;
- uint rate = RSPEC2RATE(ratespec);
-
- if (rate == 0) {
- wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n",
- wlc->pub->unit);
- rate = BRCM_RATE_1M;
- }
-
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n",
- wlc->pub->unit, ratespec, preamble_type, mac_len);
-
- if (IS_MCS(ratespec)) {
- uint mcs = ratespec & RSPEC_RATE_MASK;
- int tot_streams = MCS_TXS(mcs) + RSPEC_STC(ratespec);
-
- dur = PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
- if (preamble_type == BRCMS_MM_PREAMBLE)
- dur += PREN_MM_EXT;
- /* 1000Ndbps = kbps * 4 */
- kNdps =
- MCS_RATE(mcs, RSPEC_IS40MHZ(ratespec),
- RSPEC_ISSGI(ratespec)) * 4;
-
- if (RSPEC_STC(ratespec) == 0)
- /* NSyms = CEILING((SERVICE + 8*NBytes + TAIL) / Ndbps) */
- nsyms =
- CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
- APHY_TAIL_NBITS) * 1000, kNdps);
- else
- /* STBC needs to have even number of symbols */
- nsyms =
- 2 *
- CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
- APHY_TAIL_NBITS) * 1000, 2 * kNdps);
-
- dur += APHY_SYMBOL_TIME * nsyms;
- if (BAND_2G(wlc->band->bandtype))
- dur += DOT11_OFDM_SIGNAL_EXTENSION;
- } else if (IS_OFDM(rate)) {
- dur = APHY_PREAMBLE_TIME;
- dur += APHY_SIGNAL_TIME;
- /* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */
- Ndps = rate * 2;
- /* NSyms = CEILING((SERVICE + 8*NBytes + TAIL) / Ndbps) */
- nsyms =
- CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS),
- Ndps);
- dur += APHY_SYMBOL_TIME * nsyms;
- if (BAND_2G(wlc->band->bandtype))
- dur += DOT11_OFDM_SIGNAL_EXTENSION;
- } else {
- /* calc # bits * 2 so factor of 2 in rate (1/2 mbps) will divide out */
- mac_len = mac_len * 8 * 2;
- /* calc ceiling of bits/rate = microseconds of air time */
- dur = (mac_len + rate - 1) / rate;
- if (preamble_type & BRCMS_SHORT_PREAMBLE)
- dur += BPHY_PLCP_SHORT_TIME;
- else
- dur += BPHY_PLCP_TIME;
- }
- return dur;
-}
-
-/* The opposite of brcms_c_calc_frame_time */
-static uint
-brcms_c_calc_frame_len(struct brcms_c_info *wlc, ratespec_t ratespec,
- u8 preamble_type, uint dur)
-{
- uint nsyms, mac_len, Ndps, kNdps;
- uint rate = RSPEC2RATE(ratespec);
-
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, dur %d\n",
- wlc->pub->unit, ratespec, preamble_type, dur);
-
- if (IS_MCS(ratespec)) {
- uint mcs = ratespec & RSPEC_RATE_MASK;
- int tot_streams = MCS_TXS(mcs) + RSPEC_STC(ratespec);
- dur -= PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
- /* payload calculation matches that of regular ofdm */
- if (BAND_2G(wlc->band->bandtype))
- dur -= DOT11_OFDM_SIGNAL_EXTENSION;
- /* kNdbps = kbps * 4 */
- kNdps =
- MCS_RATE(mcs, RSPEC_IS40MHZ(ratespec),
- RSPEC_ISSGI(ratespec)) * 4;
- nsyms = dur / APHY_SYMBOL_TIME;
- mac_len =
- ((nsyms * kNdps) -
- ((APHY_SERVICE_NBITS + APHY_TAIL_NBITS) * 1000)) / 8000;
- } else if (IS_OFDM(ratespec)) {
- dur -= APHY_PREAMBLE_TIME;
- dur -= APHY_SIGNAL_TIME;
- /* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */
- Ndps = rate * 2;
- nsyms = dur / APHY_SYMBOL_TIME;
- mac_len =
- ((nsyms * Ndps) -
- (APHY_SERVICE_NBITS + APHY_TAIL_NBITS)) / 8;
- } else {
- if (preamble_type & BRCMS_SHORT_PREAMBLE)
- dur -= BPHY_PLCP_SHORT_TIME;
- else
- dur -= BPHY_PLCP_TIME;
- mac_len = dur * rate;
- /* divide out factor of 2 in rate (1/2 mbps) */
- mac_len = mac_len / 8 / 2;
- }
- return mac_len;
-}
-
-static uint
-brcms_c_calc_ba_time(struct brcms_c_info *wlc, ratespec_t rspec,
- u8 preamble_type)
-{
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, "
- "preamble_type %d\n", wlc->pub->unit, rspec, preamble_type);
- /* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
- * or equal to the rate of the immediately previous frame in the FES
- */
- rspec = BRCMS_BASIC_RATE(wlc, rspec);
- /* BA len == 32 == 16(ctl hdr) + 4(ba len) + 8(bitmap) + 4(fcs) */
- return brcms_c_calc_frame_time(wlc, rspec, preamble_type,
- (DOT11_BA_LEN + DOT11_BA_BITMAP_LEN +
- FCS_LEN));
-}
-
-static uint
-brcms_c_calc_ack_time(struct brcms_c_info *wlc, ratespec_t rspec,
- u8 preamble_type)
-{
- uint dur = 0;
-
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type);
- /* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
- * or equal to the rate of the immediately previous frame in the FES
- */
- rspec = BRCMS_BASIC_RATE(wlc, rspec);
- /* ACK frame len == 14 == 2(fc) + 2(dur) + 6(ra) + 4(fcs) */
- dur =
- brcms_c_calc_frame_time(wlc, rspec, preamble_type,
- (DOT11_ACK_LEN + FCS_LEN));
- return dur;
-}
-
-static uint
-brcms_c_calc_cts_time(struct brcms_c_info *wlc, ratespec_t rspec,
- u8 preamble_type)
-{
- BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type);
- return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
-}
-
-/* derive wlc->band->basic_rate[] table from 'rateset' */
-void brcms_c_rate_lookup_init(struct brcms_c_info *wlc, wlc_rateset_t *rateset)
-{
- u8 rate;
- u8 mandatory;
- u8 cck_basic = 0;
- u8 ofdm_basic = 0;
- u8 *br = wlc->band->basic_rate;
- uint i;
-
- /* incoming rates are in 500kbps units as in 802.11 Supported Rates */
- memset(br, 0, BRCM_MAXRATE + 1);
-
- /* For each basic rate in the rates list, make an entry in the
- * best basic lookup.
- */
- for (i = 0; i < rateset->count; i++) {
- /* only make an entry for a basic rate */
- if (!(rateset->rates[i] & BRCMS_RATE_FLAG))
- continue;
-
- /* mask off basic bit */
- rate = (rateset->rates[i] & BRCMS_RATE_MASK);
-
- if (rate > BRCM_MAXRATE) {
- wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: "
- "invalid rate 0x%X in rate set\n",
- rateset->rates[i]);
- continue;
- }
-
- br[rate] = rate;
- }
-
- /* The rate lookup table now has non-zero entries for each
- * basic rate, equal to the basic rate: br[basicN] = basicN
- *
- * To look up the best basic rate corresponding to any
- * particular rate, code can use the basic_rate table
- * like this
- *
- * basic_rate = wlc->band->basic_rate[tx_rate]
- *
- * Make sure there is a best basic rate entry for
- * every rate by walking up the table from low rates
- * to high, filling in holes in the lookup table
- */
-
- for (i = 0; i < wlc->band->hw_rateset.count; i++) {
- rate = wlc->band->hw_rateset.rates[i];
-
- if (br[rate] != 0) {
- /* This rate is a basic rate.
- * Keep track of the best basic rate so far by
- * modulation type.
- */
- if (IS_OFDM(rate))
- ofdm_basic = rate;
- else
- cck_basic = rate;
-
- continue;
- }
-
- /* This rate is not a basic rate so figure out the
- * best basic rate less than this rate and fill in
- * the hole in the table
- */
-
- br[rate] = IS_OFDM(rate) ? ofdm_basic : cck_basic;
-
- if (br[rate] != 0)
- continue;
-
- if (IS_OFDM(rate)) {
- /* In 11g and 11a, the OFDM mandatory rates are 6, 12, and 24 Mbps */
- if (rate >= BRCM_RATE_24M)
- mandatory = BRCM_RATE_24M;
- else if (rate >= BRCM_RATE_12M)
- mandatory = BRCM_RATE_12M;
- else
- mandatory = BRCM_RATE_6M;
- } else {
- /* In 11b, all the CCK rates are mandatory 1 - 11 Mbps */
- mandatory = rate;
- }
-
- br[rate] = mandatory;
- }
-}
-
-static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
- u8 basic_rate)
-{
- u8 phy_rate, index;
- u8 basic_phy_rate, basic_index;
- u16 dir_table, basic_table;
- u16 basic_ptr;
-
- /* Shared memory address for the table we are reading */
- dir_table = IS_OFDM(basic_rate) ? M_RT_DIRMAP_A : M_RT_DIRMAP_B;
-
- /* Shared memory address for the table we are writing */
- basic_table = IS_OFDM(rate) ? M_RT_BBRSMAP_A : M_RT_BBRSMAP_B;
-
- /*
- * for a given rate, the LS-nibble of the PLCP SIGNAL field is
- * the index into the rate table.
- */
- phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
- basic_phy_rate = rate_info[basic_rate] & BRCMS_RATE_MASK;
- index = phy_rate & 0xf;
- basic_index = basic_phy_rate & 0xf;
-
- /* Find the SHM pointer to the ACK rate entry by looking in the
- * Direct-map Table
- */
- basic_ptr = brcms_c_read_shm(wlc, (dir_table + basic_index * 2));
-
- /* Update the SHM BSS-basic-rate-set mapping table with the pointer
- * to the correct basic rate for the given incoming rate
- */
- brcms_c_write_shm(wlc, (basic_table + index * 2), basic_ptr);
-}
-
-static const wlc_rateset_t *brcms_c_rateset_get_hwrs(struct brcms_c_info *wlc)
-{
- const wlc_rateset_t *rs_dflt;
-
- if (BRCMS_PHY_11N_CAP(wlc->band)) {
- if (BAND_5G(wlc->band->bandtype))
- rs_dflt = &ofdm_mimo_rates;
- else
- rs_dflt = &cck_ofdm_mimo_rates;
- } else if (wlc->band->gmode)
- rs_dflt = &cck_ofdm_rates;
- else
- rs_dflt = &cck_rates;
-
- return rs_dflt;
-}
-
-void brcms_c_set_ratetable(struct brcms_c_info *wlc)
-{
- const wlc_rateset_t *rs_dflt;
- wlc_rateset_t rs;
- u8 rate, basic_rate;
- uint i;
-
- rs_dflt = brcms_c_rateset_get_hwrs(wlc);
-
- brcms_c_rateset_copy(rs_dflt, &rs);
- brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
-
- /* walk the phy rate table and update SHM basic rate lookup table */
- for (i = 0; i < rs.count; i++) {
- rate = rs.rates[i] & BRCMS_RATE_MASK;
-
- /* for a given rate BRCMS_BASIC_RATE returns the rate at
- * which a response ACK/CTS should be sent.
- */
- basic_rate = BRCMS_BASIC_RATE(wlc, rate);
- if (basic_rate == 0) {
- /* This should only happen if we are using a
- * restricted rateset.
- */
- basic_rate = rs.rates[0] & BRCMS_RATE_MASK;
- }
-
- brcms_c_write_rate_shm(wlc, rate, basic_rate);
- }
-}
-
-/*
- * Return true if the specified rate is supported by the specified band.
- * BRCM_BAND_AUTO indicates the current band.
- */
-bool brcms_c_valid_rate(struct brcms_c_info *wlc, ratespec_t rspec, int band,
- bool verbose)
-{
- wlc_rateset_t *hw_rateset;
- uint i;
-
- if ((band == BRCM_BAND_AUTO) || (band == wlc->band->bandtype)) {
- hw_rateset = &wlc->band->hw_rateset;
- } else if (NBANDS(wlc) > 1) {
- hw_rateset = &wlc->bandstate[OTHERBANDUNIT(wlc)]->hw_rateset;
- } else {
- /* other band specified and we are a single band device */
- return false;
- }
-
- /* check if this is a mimo rate */
- if (IS_MCS(rspec)) {
- if (!VALID_MCS((rspec & RSPEC_RATE_MASK)))
- goto error;
-
- return isset(hw_rateset->mcs, (rspec & RSPEC_RATE_MASK));
- }
-
- for (i = 0; i < hw_rateset->count; i++)
- if (hw_rateset->rates[i] == RSPEC2RATE(rspec))
- return true;
- error:
- if (verbose) {
- wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x "
- "not in hw_rateset\n", wlc->pub->unit, rspec);
- }
-
- return false;
-}
-
-static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
-{
- uint i;
- struct brcms_band *band;
-
- for (i = 0; i < NBANDS(wlc); i++) {
- if (IS_SINGLEBAND_5G(wlc->deviceid))
- i = BAND_5G_INDEX;
- band = wlc->bandstate[i];
- if (band->bandtype == BRCM_BAND_5G) {
- if ((bwcap == BRCMS_N_BW_40ALL)
- || (bwcap == BRCMS_N_BW_20IN2G_40IN5G))
- band->mimo_cap_40 = true;
- else
- band->mimo_cap_40 = false;
- } else {
- if (bwcap == BRCMS_N_BW_40ALL)
- band->mimo_cap_40 = true;
- else
- band->mimo_cap_40 = false;
- }
- }
-}
-
-void brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc, uint frame_len)
-{
- const wlc_rateset_t *rs_dflt;
- wlc_rateset_t rs;
- u8 rate;
- u16 entry_ptr;
- u8 plcp[D11_PHY_HDR_LEN];
- u16 dur, sifs;
- uint i;
-
- sifs = SIFS(wlc->band);
-
- rs_dflt = brcms_c_rateset_get_hwrs(wlc);
-
- brcms_c_rateset_copy(rs_dflt, &rs);
- brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
-
- /* walk the phy rate table and update MAC core SHM basic rate table entries */
- for (i = 0; i < rs.count; i++) {
- rate = rs.rates[i] & BRCMS_RATE_MASK;
-
- entry_ptr = brcms_c_rate_shm_offset(wlc, rate);
-
- /* Calculate the Probe Response PLCP for the given rate */
- brcms_c_compute_plcp(wlc, rate, frame_len, plcp);
-
- /* Calculate the duration of the Probe Response frame plus SIFS for the MAC */
- dur = (u16) brcms_c_calc_frame_time(wlc, rate,
- BRCMS_LONG_PREAMBLE, frame_len);
- dur += sifs;
-
- /* Update the SHM Rate Table entry Probe Response values */
- brcms_c_write_shm(wlc, entry_ptr + M_RT_PRS_PLCP_POS,
- (u16) (plcp[0] + (plcp[1] << 8)));
- brcms_c_write_shm(wlc, entry_ptr + M_RT_PRS_PLCP_POS + 2,
- (u16) (plcp[2] + (plcp[3] << 8)));
- brcms_c_write_shm(wlc, entry_ptr + M_RT_PRS_DUR_POS, dur);
- }
-}
-
-/* Max buffering needed for beacon template/prb resp template is 142 bytes.
- *
- * PLCP header is 6 bytes.
- * 802.11 A3 header is 24 bytes.
- * Max beacon frame body template length is 112 bytes.
- * Max probe resp frame body template length is 110 bytes.
- *
- * *len on input contains the max length of the packet available.
- *
- * The *len value is set to the number of bytes in buf used, and starts with the PLCP
- * and included up to, but not including, the 4 byte FCS.
- */
-static void
-brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
- ratespec_t bcn_rspec,
- struct brcms_bss_cfg *cfg, u16 *buf, int *len)
-{
- static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
- struct cck_phy_hdr *plcp;
- struct ieee80211_mgmt *h;
- int hdr_len, body_len;
-
- if (MBSS_BCN_ENAB(cfg) && type == IEEE80211_STYPE_BEACON)
- hdr_len = DOT11_MAC_HDR_LEN;
- else
- hdr_len = D11_PHY_HDR_LEN + DOT11_MAC_HDR_LEN;
- body_len = *len - hdr_len; /* calc buffer size provided for frame body */
-
- *len = hdr_len + body_len; /* return actual size */
-
- /* format PHY and MAC headers */
- memset((char *)buf, 0, hdr_len);
-
- plcp = (struct cck_phy_hdr *) buf;
-
- /* PLCP for Probe Response frames are filled in from core's rate table */
- if (type == IEEE80211_STYPE_BEACON && !MBSS_BCN_ENAB(cfg)) {
- /* fill in PLCP */
- brcms_c_compute_plcp(wlc, bcn_rspec,
- (DOT11_MAC_HDR_LEN + body_len + FCS_LEN),
- (u8 *) plcp);
-
- }
- /* "Regular" and 16 MBSS but not for 4 MBSS */
- /* Update the phytxctl for the beacon based on the rspec */
- if (!SOFTBCN_ENAB(cfg))
- brcms_c_beacon_phytxctl_txant_upd(wlc, bcn_rspec);
-
- if (MBSS_BCN_ENAB(cfg) && type == IEEE80211_STYPE_BEACON)
- h = (struct ieee80211_mgmt *)&plcp[0];
- else
- h = (struct ieee80211_mgmt *)&plcp[1];
-
- /* fill in 802.11 header */
- h->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | type);
-
- /* DUR is 0 for multicast bcn, or filled in by MAC for prb resp */
- /* A1 filled in by MAC for prb resp, broadcast for bcn */
- if (type == IEEE80211_STYPE_BEACON)
- memcpy(&h->da, &ether_bcast, ETH_ALEN);
- memcpy(&h->sa, &cfg->cur_etheraddr, ETH_ALEN);
- memcpy(&h->bssid, &cfg->BSSID, ETH_ALEN);
-
- /* SEQ filled in by MAC */
-
- return;
-}
-
-int brcms_c_get_header_len()
-{
- return TXOFF;
-}
-
-/* Update a beacon for a particular BSS
- * For MBSS, this updates the software template and sets "latest" to the index of the
- * template updated.
- * Otherwise, it updates the hardware template.
- */
-void brcms_c_bss_update_beacon(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *cfg)
-{
- int len = BCN_TMPL_LEN;
-
- /* Clear the soft intmask */
- wlc->defmacintmask &= ~MI_BCNTPL;
-
- if (!cfg->up) { /* Only allow updates on an UP bss */
- return;
- }
-
- /* Optimize: Some of if/else could be combined */
- if (!MBSS_BCN_ENAB(cfg) && HWBCN_ENAB(cfg)) {
- /* Hardware beaconing for this config */
- u16 bcn[BCN_TMPL_LEN / 2];
- u32 both_valid = MCMD_BCN0VLD | MCMD_BCN1VLD;
- d11regs_t *regs = wlc->regs;
-
- /* Check if both templates are in use, if so sched. an interrupt
- * that will call back into this routine
- */
- if ((R_REG(&regs->maccommand) & both_valid) == both_valid) {
- /* clear any previous status */
- W_REG(&regs->macintstatus, MI_BCNTPL);
- }
- /* Check that after scheduling the interrupt both of the
- * templates are still busy. if not clear the int. & remask
- */
- if ((R_REG(&regs->maccommand) & both_valid) == both_valid) {
- wlc->defmacintmask |= MI_BCNTPL;
- return;
- }
-
- wlc->bcn_rspec =
- brcms_c_lowest_basic_rspec(wlc, &cfg->current_bss->rateset);
- /* update the template and ucode shm */
- brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_BEACON,
- wlc->bcn_rspec, cfg, bcn, &len);
- brcms_c_write_hw_bcntemplates(wlc, bcn, len, false);
- }
-}
-
-/*
- * Update all beacons for the system.
- */
-void brcms_c_update_beacon(struct brcms_c_info *wlc)
-{
- int idx;
- struct brcms_bss_cfg *bsscfg;
-
- /* update AP or IBSS beacons */
- FOREACH_BSS(wlc, idx, bsscfg) {
- if (bsscfg->up && (BSSCFG_AP(bsscfg) || !bsscfg->BSS))
- brcms_c_bss_update_beacon(wlc, bsscfg);
- }
-}
-
-/* Write ssid into shared memory */
-void brcms_c_shm_ssid_upd(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg)
-{
- u8 *ssidptr = cfg->SSID;
- u16 base = M_SSID;
- u8 ssidbuf[IEEE80211_MAX_SSID_LEN];
-
- /* padding the ssid with zero and copy it into shm */
- memset(ssidbuf, 0, IEEE80211_MAX_SSID_LEN);
- memcpy(ssidbuf, ssidptr, cfg->SSID_len);
-
- brcms_c_copyto_shm(wlc, base, ssidbuf, IEEE80211_MAX_SSID_LEN);
-
- if (!MBSS_BCN_ENAB(cfg))
- brcms_c_write_shm(wlc, M_SSIDLEN, (u16) cfg->SSID_len);
-}
-
-void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
-{
- int idx;
- struct brcms_bss_cfg *bsscfg;
-
- /* update AP or IBSS probe responses */
- FOREACH_BSS(wlc, idx, bsscfg) {
- if (bsscfg->up && (BSSCFG_AP(bsscfg) || !bsscfg->BSS))
- brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend);
- }
-}
-
-void
-brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *cfg,
- bool suspend)
-{
- u16 prb_resp[BCN_TMPL_LEN / 2];
- int len = BCN_TMPL_LEN;
-
- /* write the probe response to hardware, or save in the config structure */
- if (!MBSS_PRB_ENAB(cfg)) {
-
- /* create the probe response template */
- brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_PROBE_RESP, 0,
- cfg, prb_resp, &len);
-
- if (suspend)
- brcms_c_suspend_mac_and_wait(wlc);
-
- /* write the probe response into the template region */
- brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE,
- (len + 3) & ~3, prb_resp);
-
- /* write the length of the probe response frame (+PLCP/-FCS) */
- brcms_c_write_shm(wlc, M_PRB_RESP_FRM_LEN, (u16) len);
-
- /* write the SSID and SSID length */
- brcms_c_shm_ssid_upd(wlc, cfg);
-
- /*
- * Write PLCP headers and durations for probe response frames at all rates.
- * Use the actual frame length covered by the PLCP header for the call to
- * brcms_c_mod_prb_rsp_rate_table() by subtracting the PLCP len
- * and adding the FCS.
- */
- len += (-D11_PHY_HDR_LEN + FCS_LEN);
- brcms_c_mod_prb_rsp_rate_table(wlc, (u16) len);
-
- if (suspend)
- brcms_c_enable_mac(wlc);
- } else { /* Generating probe resp in sw; update local template */
- /* error: No software probe response support without MBSS */
- }
-}
-
-/* prepares pdu for transmission. returns BCM error codes */
-int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop)
-{
- uint fifo;
- struct d11txh *txh;
- struct ieee80211_hdr *h;
- struct scb *scb;
-
- txh = (struct d11txh *) (pdu->data);
- h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
-
- /* get the pkt queue info. This was put at brcms_c_sendctl or
- * brcms_c_send for PDU */
- fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
-
- scb = NULL;
-
- *fifop = fifo;
-
- /* return if insufficient dma resources */
- if (TXAVAIL(wlc, fifo) < MAX_DMA_SEGS) {
- /* Mark precedences related to this FIFO, unsendable */
- BRCMS_TX_FIFO_CLEAR(wlc, fifo);
- return -EBUSY;
- }
- return 0;
-}
-
-/* init tx reported rate mechanism */
-void brcms_c_reprate_init(struct brcms_c_info *wlc)
-{
- int i;
- struct brcms_bss_cfg *bsscfg;
-
- FOREACH_BSS(wlc, i, bsscfg) {
- brcms_c_bsscfg_reprate_init(bsscfg);
- }
-}
-
-/* per bsscfg init tx reported rate mechanism */
-void brcms_c_bsscfg_reprate_init(struct brcms_bss_cfg *bsscfg)
-{
- bsscfg->txrspecidx = 0;
- memset((char *)bsscfg->txrspec, 0, sizeof(bsscfg->txrspec));
-}
-
-void brcms_default_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs)
-{
- brcms_c_rateset_default(rs, NULL, wlc->band->phytype,
- wlc->band->bandtype, false, BRCMS_RATE_MASK_FULL,
- (bool) N_ENAB(wlc->pub),
- CHSPEC_WLC_BW(wlc->default_bss->chanspec),
- wlc->stf->txstreams);
-}
-
-static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
-{
- chanspec_t chanspec;
- struct brcms_band *band;
- struct brcms_bss_info *bi = wlc->default_bss;
-
- /* init default and target BSS with some sane initial values */
- memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
- bi->beacon_period = BEACON_INTERVAL_DEFAULT;
- bi->dtim_period = DTIM_INTERVAL_DEFAULT;
-
- /* fill the default channel as the first valid channel
- * starting from the 2G channels
- */
- chanspec = CH20MHZ_CHSPEC(1);
- wlc->home_chanspec = bi->chanspec = chanspec;
-
- /* find the band of our default channel */
- band = wlc->band;
- if (NBANDS(wlc) > 1 && band->bandunit != CHSPEC_BANDUNIT(chanspec))
- band = wlc->bandstate[OTHERBANDUNIT(wlc)];
-
- /* init bss rates to the band specific default rate set */
- brcms_c_rateset_default(&bi->rateset, NULL, band->phytype,
- band->bandtype, false, BRCMS_RATE_MASK_FULL,
- (bool) N_ENAB(wlc->pub), CHSPEC_WLC_BW(chanspec),
- wlc->stf->txstreams);
-
- if (N_ENAB(wlc->pub))
- bi->flags |= BRCMS_BSS_HT;
-}
-
-static ratespec_t
-mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
- u32 int_val)
-{
- u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
- u8 rate = int_val & NRATE_RATE_MASK;
- ratespec_t rspec;
- bool ismcs = ((int_val & NRATE_MCS_INUSE) == NRATE_MCS_INUSE);
- bool issgi = ((int_val & NRATE_SGI_MASK) >> NRATE_SGI_SHIFT);
- bool override_mcs_only = ((int_val & NRATE_OVERRIDE_MCS_ONLY)
- == NRATE_OVERRIDE_MCS_ONLY);
- int bcmerror = 0;
-
- if (!ismcs) {
- return (ratespec_t) rate;
- }
-
- /* validate the combination of rate/mcs/stf is allowed */
- if (N_ENAB(wlc->pub) && ismcs) {
- /* mcs only allowed when nmode */
- if (stf > PHY_TXC1_MODE_SDM) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n",
- BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
-
- /* mcs 32 is a special case, DUP mode 40 only */
- if (rate == 32) {
- if (!CHSPEC_IS40(wlc->home_chanspec) ||
- ((stf != PHY_TXC1_MODE_SISO)
- && (stf != PHY_TXC1_MODE_CDD))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs "
- "32\n", BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
- /* mcs > 7 must use stf SDM */
- } else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
- /* mcs > 7 must use stf SDM */
- if (stf != PHY_TXC1_MODE_SDM) {
- BCMMSG(wlc->wiphy, "wl%d: enabling "
- "SDM mode for mcs %d\n",
- BRCMS_UNIT(wlc), rate);
- stf = PHY_TXC1_MODE_SDM;
- }
- } else {
- /* MCS 0-7 may use SISO, CDD, and for phy_rev >= 3 STBC */
- if ((stf > PHY_TXC1_MODE_STBC) ||
- (!BRCMS_STBC_CAP_PHY(wlc)
- && (stf == PHY_TXC1_MODE_STBC))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC"
- "\n", BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
- }
- } else if (IS_OFDM(rate)) {
- if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n",
- BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
- } else if (IS_CCK(rate)) {
- if ((cur_band->bandtype != BRCM_BAND_2G)
- || (stf != PHY_TXC1_MODE_SISO)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n",
- BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
- } else {
- wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n",
- BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
- /* make sure multiple antennae are available for non-siso rates */
- if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO "
- "request\n", BRCMS_UNIT(wlc), __func__);
- bcmerror = -EINVAL;
- goto done;
- }
-
- rspec = rate;
- if (ismcs) {
- rspec |= RSPEC_MIMORATE;
- /* For STBC populate the STC field of the ratespec */
- if (stf == PHY_TXC1_MODE_STBC) {
- u8 stc;
- stc = 1; /* Nss for single stream is always 1 */
- rspec |= (stc << RSPEC_STC_SHIFT);
- }
- }
-
- rspec |= (stf << RSPEC_STF_SHIFT);
-
- if (override_mcs_only)
- rspec |= RSPEC_OVERRIDE_MCS_ONLY;
-
- if (issgi)
- rspec |= RSPEC_SHORT_GI;
-
- if ((rate != 0)
- && !brcms_c_valid_rate(wlc, rspec, cur_band->bandtype, true)) {
- return rate;
- }
-
- return rspec;
-done:
- return rate;
-}
-
-/* formula: IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */
-static int
-brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
- bool writeToShm)
-{
- int idle_busy_ratio_x_16 = 0;
- uint offset =
- isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
- M_TX_IDLE_BUSY_RATIO_X_16_CCK;
- if (duty_cycle > 100 || duty_cycle < 0) {
- wiphy_err(wlc->wiphy, "wl%d: duty cycle value off limit\n",
- wlc->pub->unit);
- return -EINVAL;
- }
- if (duty_cycle)
- idle_busy_ratio_x_16 = (100 - duty_cycle) * 16 / duty_cycle;
- /* Only write to shared memory when wl is up */
- if (writeToShm)
- brcms_c_write_shm(wlc, offset, (u16) idle_busy_ratio_x_16);
-
- if (isOFDM)
- wlc->tx_duty_cycle_ofdm = (u16) duty_cycle;
- else
- wlc->tx_duty_cycle_cck = (u16) duty_cycle;
-
- return 0;
-}
-
-/* Read a single u16 from shared memory.
- * SHM 'offset' needs to be an even address
- */
-u16 brcms_c_read_shm(struct brcms_c_info *wlc, uint offset)
-{
- return brcms_b_read_shm(wlc->hw, offset);
-}
-
-/* Write a single u16 to shared memory.
- * SHM 'offset' needs to be an even address
- */
-void brcms_c_write_shm(struct brcms_c_info *wlc, uint offset, u16 v)
-{
- brcms_b_write_shm(wlc->hw, offset, v);
-}
-
-/* Copy a buffer to shared memory.
- * SHM 'offset' needs to be an even address and
- * Buffer length 'len' must be an even number of bytes
- */
-void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset, const void *buf,
- int len)
-{
- /* offset and len need to be even */
- if (len <= 0 || (offset & 1) || (len & 1))
- return;
-
- brcms_b_copyto_objmem(wlc->hw, offset, buf, len, OBJADDR_SHM_SEL);
-
-}
-
-/* wrapper BMAC functions to for HIGH driver access */
-void brcms_c_mctrl(struct brcms_c_info *wlc, u32 mask, u32 val)
-{
- brcms_b_mctrl(wlc->hw, mask, val);
-}
-
-void brcms_c_mhf(struct brcms_c_info *wlc, u8 idx, u16 mask, u16 val, int bands)
-{
- brcms_b_mhf(wlc->hw, idx, mask, val, bands);
-}
-
-int brcms_c_xmtfifo_sz_get(struct brcms_c_info *wlc, uint fifo, uint *blocks)
-{
- return brcms_b_xmtfifo_sz_get(wlc->hw, fifo, blocks);
-}
-
-void brcms_c_write_template_ram(struct brcms_c_info *wlc, int offset, int len,
- void *buf)
-{
- brcms_b_write_template_ram(wlc->hw, offset, len, buf);
-}
-
-void brcms_c_write_hw_bcntemplates(struct brcms_c_info *wlc, void *bcn, int len,
- bool both)
-{
- brcms_b_write_hw_bcntemplates(wlc->hw, bcn, len, both);
-}
-
-void
-brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
- const u8 *addr)
-{
- brcms_b_set_addrmatch(wlc->hw, match_reg_offset, addr);
- if (match_reg_offset == RCM_BSSID_OFFSET)
- memcpy(wlc->cfg->BSSID, addr, ETH_ALEN);
-}
-
-void brcms_c_pllreq(struct brcms_c_info *wlc, bool set, mbool req_bit)
-{
- brcms_b_pllreq(wlc->hw, set, req_bit);
-}
-
-void brcms_c_reset_bmac_done(struct brcms_c_info *wlc)
-{
-}
-
-/* check for the particular priority flow control bit being set */
-bool
-brcms_c_txflowcontrol_prio_isset(struct brcms_c_info *wlc,
- struct brcms_txq_info *q,
- int prio)
-{
- uint prio_mask;
-
- if (prio == ALLPRIO) {
- prio_mask = TXQ_STOP_FOR_PRIOFC_MASK;
- } else {
- prio_mask = NBITVAL(prio);
- }
-
- return (q->stopped & prio_mask) == prio_mask;
-}
-
-/* propagate the flow control to all interfaces using the given tx queue */
-void brcms_c_txflowcontrol(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi,
- bool on, int prio)
-{
- uint prio_bits;
- uint cur_bits;
-
- BCMMSG(wlc->wiphy, "flow control kicks in\n");
-
- if (prio == ALLPRIO) {
- prio_bits = TXQ_STOP_FOR_PRIOFC_MASK;
- } else {
- prio_bits = NBITVAL(prio);
- }
-
- cur_bits = qi->stopped & prio_bits;
-
- /* Check for the case of no change and return early
- * Otherwise update the bit and continue
- */
- if (on) {
- if (cur_bits == prio_bits) {
- return;
- }
- mboolset(qi->stopped, prio_bits);
- } else {
- if (cur_bits == 0) {
- return;
- }
- mboolclr(qi->stopped, prio_bits);
- }
-
- /* If there is a flow control override we will not change the external
- * flow control state.
- */
- if (qi->stopped & ~TXQ_STOP_FOR_PRIOFC_MASK) {
- return;
- }
-
- brcms_c_txflowcontrol_signal(wlc, qi, on, prio);
-}
-
-void
-brcms_c_txflowcontrol_override(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi,
- bool on, uint override)
-{
- uint prev_override;
-
- prev_override = (qi->stopped & ~TXQ_STOP_FOR_PRIOFC_MASK);
-
- /* Update the flow control bits and do an early return if there is
- * no change in the external flow control state.
- */
- if (on) {
- mboolset(qi->stopped, override);
- /* if there was a previous override bit on, then setting this
- * makes no difference.
- */
- if (prev_override) {
- return;
- }
-
- brcms_c_txflowcontrol_signal(wlc, qi, ON, ALLPRIO);
- } else {
- mboolclr(qi->stopped, override);
- /* clearing an override bit will only make a difference for
- * flow control if it was the only bit set. For any other
- * override setting, just return
- */
- if (prev_override != override) {
- return;
- }
-
- if (qi->stopped == 0) {
- brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
- } else {
- int prio;
-
- for (prio = MAXPRIO; prio >= 0; prio--) {
- if (!mboolisset(qi->stopped, NBITVAL(prio)))
- brcms_c_txflowcontrol_signal(
- wlc, qi, OFF, prio);
- }
- }
- }
-}
-
-static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc)
-{
- struct brcms_txq_info *qi;
-
- for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
- if (qi->stopped) {
- brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
- qi->stopped = 0;
- }
- }
-}
-
-static void
-brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi, bool on, int prio)
-{
-#ifdef NON_FUNCTIONAL
- /* wlcif_list is never filled so this function is not functional */
- struct brcms_c_if *wlcif;
-
- for (wlcif = wlc->wlcif_list; wlcif != NULL; wlcif = wlcif->next) {
- if (wlcif->qi == qi && wlcif->flags & BRCMS_IF_LINKED)
- brcms_txflowcontrol(wlc->wl, wlcif->wlif, on, prio);
- }
-#endif
-}
-
-static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc)
-{
- struct brcms_txq_info *qi, *p;
-
- qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC);
- if (qi != NULL) {
- /*
- * Have enough room for control packets along with HI watermark
- * Also, add room to txq for total psq packets if all the SCBs
- * leave PS mode. The watermark for flowcontrol to OS packets
- * will remain the same
- */
- brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT,
- (2 * wlc->pub->tunables->datahiwat) + PKTQ_LEN_DEFAULT
- + wlc->pub->psq_pkts_total);
-
- /* add this queue to the the global list */
- p = wlc->tx_queues;
- if (p == NULL) {
- wlc->tx_queues = qi;
- } else {
- while (p->next != NULL)
- p = p->next;
- p->next = qi;
- }
- }
- return qi;
-}
-
-static void brcms_c_txq_free(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi)
-{
- struct brcms_txq_info *p;
-
- if (qi == NULL)
- return;
-
- /* remove the queue from the linked list */
- p = wlc->tx_queues;
- if (p == qi)
- wlc->tx_queues = p->next;
- else {
- while (p != NULL && p->next != qi)
- p = p->next;
- if (p != NULL)
- p->next = p->next->next;
- }
-
- kfree(qi);
-}
-
-/*
- * Flag 'scan in progress' to withhold dynamic phy calibration
- */
-void brcms_c_scan_start(struct brcms_c_info *wlc)
-{
- wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
-}
-
-void brcms_c_scan_stop(struct brcms_c_info *wlc)
-{
- wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
-}
-
-void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state)
-{
- wlc->pub->associated = state;
- wlc->cfg->associated = state;
-}
-
-/*
- * When a remote STA/AP is removed by Mac80211, or when it can no longer accept
- * AMPDU traffic, packets pending in hardware have to be invalidated so that
- * when later on hardware releases them, they can be handled appropriately.
- */
-void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
- struct ieee80211_sta *sta,
- void (*dma_callback_fn))
-{
- struct dma_pub *dmah;
- int i;
- for (i = 0; i < NFIFO; i++) {
- dmah = hw->di[i];
- if (dmah != NULL)
- dma_walk_packets(dmah, dma_callback_fn, sta);
- }
-}
-
-int brcms_c_get_curband(struct brcms_c_info *wlc)
-{
- return wlc->band->bandunit;
-}
-
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
-{
- /* flush packet queue when requested */
- if (drop)
- brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
-
- /* wait for queue and DMA fifos to run dry */
- while (!pktq_empty(&wlc->pkt_queue->q) ||
- TXPKTPENDTOT(wlc) > 0) {
- brcms_msleep(wlc->wl, 1);
- }
-}
-
-int brcms_c_set_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
- int int_val)
-{
- int err = 0;
-
- switch (par_id) {
- case IOV_BCN_LI_BCN:
- wlc->bcn_li_bcn = (u8) int_val;
- if (wlc->pub->up)
- brcms_c_bcn_li_upd(wlc);
- break;
- /* As long as override is false, this only sets the *user*
- targets. User can twiddle this all he wants with no harm.
- wlc_phy_txpower_set() explicitly sets override to false if
- not internal or test.
- */
- case IOV_QTXPOWER:{
- u8 qdbm;
- bool override;
-
- /* Remove override bit and clip to max qdbm value */
- qdbm = (u8)min_t(u32, (int_val & ~WL_TXPWR_OVERRIDE), 0xff);
- /* Extract override setting */
- override = (int_val & WL_TXPWR_OVERRIDE) ? true : false;
- err =
- wlc_phy_txpower_set(wlc->band->pi, qdbm, override);
- break;
- }
- case IOV_MPC:
- wlc->mpc = (bool)int_val;
- brcms_c_radio_mpc_upd(wlc);
- break;
- default:
- err = -ENOTSUPP;
- }
- return err;
-}
-
-int brcms_c_get_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
- int *ret_int_ptr)
-{
- int err = 0;
-
- switch (par_id) {
- case IOV_BCN_LI_BCN:
- *ret_int_ptr = wlc->bcn_li_bcn;
- break;
- case IOV_QTXPOWER: {
- uint qdbm;
- bool override;
-
- err = wlc_phy_txpower_get(wlc->band->pi, &qdbm,
- &override);
- if (err != 0)
- return err;
-
- /* Return qdbm units */
- *ret_int_ptr =
- qdbm | (override ? WL_TXPWR_OVERRIDE : 0);
- break;
- }
- case IOV_MPC:
- *ret_int_ptr = (s32) wlc->mpc;
- break;
- default:
- err = -ENOTSUPP;
- }
- return err;
-}
-
-/*
- * Search the name=value vars for a specific one and return its value.
- * Returns NULL if not found.
- */
-char *getvar(char *vars, const char *name)
-{
- char *s;
- int len;
-
- if (!name)
- return NULL;
-
- len = strlen(name);
- if (len == 0)
- return NULL;
-
- /* first look in vars[] */
- for (s = vars; s && *s;) {
- if ((memcmp(s, name, len) == 0) && (s[len] == '='))
- return &s[len + 1];
-
- while (*s++)
- ;
- }
- /* nothing found */
- return NULL;
-}
-
-/*
- * Search the vars for a specific one and return its value as
- * an integer. Returns 0 if not found.
- */
-int getintvar(char *vars, const char *name)
-{
- char *val;
-
- val = getvar(vars, name);
- if (val == NULL)
- return 0;
-
- return simple_strtoul(val, NULL, 0);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/main.h b/drivers/staging/brcm80211/brcmsmac/main.h
deleted file mode 100644
index f204b1f4747..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/main.h
+++ /dev/null
@@ -1,1025 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_MAIN_H_
-#define _BRCM_MAIN_H_
-
-#include <linux/etherdevice.h>
-
-#include <brcmu_utils.h>
-#include "types.h"
-#include "d11.h"
-
-#define MA_WINDOW_SZ 8 /* moving average window size */
-#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */
-#define INVCHANNEL 255 /* invalid channel */
-/* max # supported core revisions (0 .. MAXCOREREV - 1) */
-#define MAXCOREREV 28
-/* max # brcms_c_module_register() calls */
-#define BRCMS_MAXMODULES 22
-
-#define SEQNUM_SHIFT 4
-#define AMPDU_DELIMITER_LEN 4
-#define SEQNUM_MAX 0x1000
-
-#define APHY_CWMIN 15
-#define PHY_CWMAX 1023
-
-#define EDCF_AIFSN_MIN 1
-#define FRAGNUM_MASK 0xF
-
-#define NTXRATE 64 /* # tx MPDUs rate is reported for */
-
-#define BRCMS_BITSCNT(x) brcmu_bitcount((u8 *)&(x), sizeof(u8))
-
-/* Maximum wait time for a MAC suspend */
-/* uS: 83mS is max packet time (64KB ampdu @ 6Mbps) */
-#define BRCMS_MAX_MAC_SUSPEND 83000
-
-/* Probe Response timeout - responses for probe requests older that this are tossed, zero to disable
- */
-#define BRCMS_PRB_RESP_TIMEOUT 0 /* Disable probe response timeout */
-
-/* transmit buffer max headroom for protocol headers */
-#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
-
-#define AC_COUNT 4
-
-/* Macros for doing definition and get/set of bitfields
- * Usage example, e.g. a three-bit field (bits 4-6):
- * #define <NAME>_M BITFIELD_MASK(3)
- * #define <NAME>_S 4
- * ...
- * regval = R_REG(osh, &regs->regfoo);
- * field = GFIELD(regval, <NAME>);
- * regval = SFIELD(regval, <NAME>, 1);
- * W_REG(osh, &regs->regfoo, regval);
- */
-#define BITFIELD_MASK(width) \
- (((unsigned)1 << (width)) - 1)
-#define GFIELD(val, field) \
- (((val) >> field ## _S) & field ## _M)
-#define SFIELD(val, field, bits) \
- (((val) & (~(field ## _M << field ## _S))) | \
- ((unsigned)(bits) << field ## _S))
-
-#define SW_TIMER_MAC_STAT_UPD 30 /* periodic MAC stats update */
-
-/* Double check that unsupported cores are not enabled */
-#if CONF_MSK(D11CONF, 0x4f) || CONF_GE(D11CONF, MAXCOREREV)
-#error "Configuration for D11CONF includes unsupported versions."
-#endif /* Bad versions */
-
-#define VALID_COREREV(corerev) CONF_HAS(D11CONF, corerev)
-
-/* values for shortslot_override */
-#define BRCMS_SHORTSLOT_AUTO -1 /* Driver will manage Shortslot setting */
-#define BRCMS_SHORTSLOT_OFF 0 /* Turn off short slot */
-#define BRCMS_SHORTSLOT_ON 1 /* Turn on short slot */
-
-/* value for short/long and mixmode/greenfield preamble */
-#define BRCMS_LONG_PREAMBLE (0)
-#define BRCMS_SHORT_PREAMBLE (1 << 0)
-#define BRCMS_GF_PREAMBLE (1 << 1)
-#define BRCMS_MM_PREAMBLE (1 << 2)
-#define BRCMS_IS_MIMO_PREAMBLE(_pre) (((_pre) == BRCMS_GF_PREAMBLE) || \
- ((_pre) == BRCMS_MM_PREAMBLE))
-
-/* values for barker_preamble */
-#define BRCMS_BARKER_SHORT_ALLOWED 0 /* Short pre-amble allowed */
-
-/* A fifo is full. Clear precedences related to that FIFO */
-#define BRCMS_TX_FIFO_CLEAR(wlc, fifo) \
- ((wlc)->tx_prec_map &= ~(wlc)->fifo2prec_map[fifo])
-
-/* Fifo is NOT full. Enable precedences for that FIFO */
-#define BRCMS_TX_FIFO_ENAB(wlc, fifo) \
- ((wlc)->tx_prec_map |= (wlc)->fifo2prec_map[fifo])
-
-/* TxFrameID */
-/* seq and frag bits: SEQNUM_SHIFT, FRAGNUM_MASK (802.11.h) */
-/* rate epoch bits: TXFID_RATE_SHIFT, TXFID_RATE_MASK ((wlc_rate.c) */
-#define TXFID_QUEUE_MASK 0x0007 /* Bits 0-2 */
-#define TXFID_SEQ_MASK 0x7FE0 /* Bits 5-15 */
-#define TXFID_SEQ_SHIFT 5 /* Number of bit shifts */
-#define TXFID_RATE_PROBE_MASK 0x8000 /* Bit 15 for rate probe */
-#define TXFID_RATE_MASK 0x0018 /* Mask for bits 3 and 4 */
-#define TXFID_RATE_SHIFT 3 /* Shift 3 bits for rate mask */
-
-/* promote boardrev */
-#define BOARDREV_PROMOTABLE 0xFF /* from */
-#define BOARDREV_PROMOTED 1 /* to */
-
-/* if wpa is in use then portopen is true when the group key is plumbed otherwise it is always true
- */
-#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#define BRCMS_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \
- ((bsscfg)->wsec & WSEC_SWFLAG)))
-
-#define BRCMS_PORTOPEN(cfg) \
- (((cfg)->WPA_auth != WPA_AUTH_DISABLED && WSEC_ENABLED((cfg)->wsec)) ? \
- (cfg)->wsec_portopen : true)
-
-#define PS_ALLOWED(wlc) brcms_c_ps_allowed(wlc)
-
-#define DATA_BLOCK_TX_SUPR (1 << 4)
-
-/* 802.1D Priority to TX FIFO number for wme */
-extern const u8 prio2fifo[];
-
-/* Ucode MCTL_WAKE override bits */
-#define BRCMS_WAKE_OVERRIDE_CLKCTL 0x01
-#define BRCMS_WAKE_OVERRIDE_PHYREG 0x02
-#define BRCMS_WAKE_OVERRIDE_MACSUSPEND 0x04
-#define BRCMS_WAKE_OVERRIDE_TXFIFO 0x08
-#define BRCMS_WAKE_OVERRIDE_FORCEFAST 0x10
-
-/* stuff pulled in from wlc.c */
-
-/* Interrupt bit error summary. Don't include I_RU: we refill DMA at other
- * times; and if we run out, constant I_RU interrupts may cause lockup. We
- * will still get error counts from rx0ovfl.
- */
-#define I_ERRORS (I_PC | I_PD | I_DE | I_RO | I_XU)
-/* default software intmasks */
-#define DEF_RXINTMASK (I_RI) /* enable rx int on rxfifo only */
-#define DEF_MACINTMASK (MI_TXSTOP | MI_TBTT | MI_ATIMWINEND | MI_PMQ | \
- MI_PHYTXERR | MI_DMAINT | MI_TFS | MI_BG_NOISE | \
- MI_CCA | MI_TO | MI_GP0 | MI_RFDISABLE | MI_PWRUP)
-
-#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
-#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
-#define RETRY_LONG_DEF 4 /* Default Long retry count */
-#define RETRY_SHORT_FB 3 /* Short retry count for fallback rate */
-#define RETRY_LONG_FB 2 /* Long retry count for fallback rate */
-
-#define MAXTXPKTS 6 /* max # pkts pending */
-
-/* frameburst */
-#define MAXTXFRAMEBURST 8 /* vanilla xpress mode: max frames/burst */
-#define MAXFRAMEBURST_TXOP 10000 /* Frameburst TXOP in usec */
-
-/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
-#define EDCF_SHORT_S 0
-#define EDCF_SFB_S 4
-#define EDCF_LONG_S 8
-#define EDCF_LFB_S 12
-#define EDCF_SHORT_M BITFIELD_MASK(4)
-#define EDCF_SFB_M BITFIELD_MASK(4)
-#define EDCF_LONG_M BITFIELD_MASK(4)
-#define EDCF_LFB_M BITFIELD_MASK(4)
-
-#define NFIFO 6 /* # tx/rx fifopairs */
-
-#define BRCMS_WME_RETRY_SHORT_GET(wlc, ac) \
- GFIELD(wlc->wme_retries[ac], EDCF_SHORT)
-#define BRCMS_WME_RETRY_SFB_GET(wlc, ac) \
- GFIELD(wlc->wme_retries[ac], EDCF_SFB)
-#define BRCMS_WME_RETRY_LONG_GET(wlc, ac) \
- GFIELD(wlc->wme_retries[ac], EDCF_LONG)
-#define BRCMS_WME_RETRY_LFB_GET(wlc, ac) \
- GFIELD(wlc->wme_retries[ac], EDCF_LFB)
-
-#define BRCMS_WME_RETRY_SHORT_SET(wlc, ac, val) \
- (wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SHORT, val))
-#define BRCMS_WME_RETRY_SFB_SET(wlc, ac, val) \
- (wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SFB, val))
-#define BRCMS_WME_RETRY_LONG_SET(wlc, ac, val) \
- (wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_LONG, val))
-#define BRCMS_WME_RETRY_LFB_SET(wlc, ac, val) \
- (wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_LFB, val))
-
-/* PLL requests */
-
-/* pll is shared on old chips */
-#define BRCMS_PLLREQ_SHARED 0x1
-/* hold pll for radio monitor register checking */
-#define BRCMS_PLLREQ_RADIO_MON 0x2
-/* hold/release pll for some short operation */
-#define BRCMS_PLLREQ_FLIP 0x4
-
-/*
- * Macros to check if AP or STA is active.
- * AP Active means more than just configured: driver and BSS are "up";
- * that is, we are beaconing/responding as an AP (aps_associated).
- * STA Active similarly means the driver is up and a configured STA BSS
- * is up: either associated (stas_associated) or trying.
- *
- * Macro definitions vary as per AP/STA ifdefs, allowing references to
- * ifdef'd structure fields and constant values (0) for optimization.
- * Make sure to enclose blocks of code such that any routines they
- * reference can also be unused and optimized out by the linker.
- */
-/* NOTE: References structure fields defined in wlc.h */
-#define AP_ACTIVE(wlc) (0)
-
-/*
- * Detect Card removed.
- * Even checking an sbconfig register read will not false trigger when the core is in reset.
- * it breaks CF address mechanism. Accessing gphy phyversion will cause SB error if aphy
- * is in reset on 4306B0-DB. Need a simple accessible reg with fixed 0/1 pattern
- * (some platforms return all 0).
- * If clocks are present, call the sb routine which will figure out if the device is removed.
- */
-#define DEVICEREMOVED(wlc) \
- ((wlc->hw->clk) ? \
- ((R_REG(&wlc->hw->regs->maccontrol) & \
- (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN) : \
- (ai_deviceremoved(wlc->hw->sih)))
-
-#define BRCMS_UNIT(wlc) ((wlc)->pub->unit)
-
-struct brcms_protection {
- bool _g; /* use g spec protection, driver internal */
- s8 g_override; /* override for use of g spec protection */
- u8 gmode_user; /* user config gmode, operating band->gmode is different */
- s8 overlap; /* Overlap BSS/IBSS protection for both 11g and 11n */
- s8 nmode_user; /* user config nmode, operating pub->nmode is different */
- s8 n_cfg; /* use OFDM protection on MIMO frames */
- s8 n_cfg_override; /* override for use of N protection */
- bool nongf; /* non-GF present protection */
- s8 nongf_override; /* override for use of GF protection */
- s8 n_pam_override; /* override for preamble: MM or GF */
- bool n_obss; /* indicated OBSS Non-HT STA present */
-};
-
-/* anything affects the single/dual streams/antenna operation */
-struct brcms_stf {
- u8 hw_txchain; /* HW txchain bitmap cfg */
- u8 txchain; /* txchain bitmap being used */
- u8 txstreams; /* number of txchains being used */
-
- u8 hw_rxchain; /* HW rxchain bitmap cfg */
- u8 rxchain; /* rxchain bitmap being used */
- u8 rxstreams; /* number of rxchains being used */
-
- u8 ant_rx_ovr; /* rx antenna override */
- s8 txant; /* userTx antenna setting */
- u16 phytxant; /* phyTx antenna setting in txheader */
-
- u8 ss_opmode; /* singlestream Operational mode, 0:siso; 1:cdd */
- bool ss_algosel_auto; /* if true, use wlc->stf->ss_algo_channel; */
- /* else use wlc->band->stf->ss_mode_band; */
- u16 ss_algo_channel; /* ss based on per-channel algo: 0: SISO, 1: CDD 2: STBC */
- u8 no_cddstbc; /* stf override, 1: no CDD (or STBC) allowed */
-
- u8 rxchain_restore_delay; /* delay time to restore default rxchain */
-
- s8 ldpc; /* AUTO/ON/OFF ldpc cap supported */
- u8 txcore[MAX_STREAMS_SUPPORTED + 1]; /* bitmap of selected core for each Nsts */
- s8 spatial_policy;
-};
-
-#define BRCMS_STF_SS_STBC_TX(wlc, scb) \
- (((wlc)->stf->txstreams > 1) && (((wlc)->band->band_stf_stbc_tx == ON) || \
- (SCB_STBC_CAP((scb)) && \
- (wlc)->band->band_stf_stbc_tx == AUTO && \
- isset(&((wlc)->stf->ss_algo_channel), PHY_TXC1_MODE_STBC))))
-
-#define BRCMS_STBC_CAP_PHY(wlc) (BRCMS_ISNPHY(wlc->band) && \
- NREV_GE(wlc->band->phyrev, 3))
-
-#define BRCMS_SGI_CAP_PHY(wlc) ((BRCMS_ISNPHY(wlc->band) && \
- NREV_GE(wlc->band->phyrev, 3)) || \
- BRCMS_ISLCNPHY(wlc->band))
-
-#define BRCMS_CHAN_PHYTYPE(x) (((x) & RXS_CHAN_PHYTYPE_MASK) \
- >> RXS_CHAN_PHYTYPE_SHIFT)
-#define BRCMS_CHAN_CHANNEL(x) (((x) & RXS_CHAN_ID_MASK) \
- >> RXS_CHAN_ID_SHIFT)
-#define BRCMS_RX_CHANNEL(rxh) (BRCMS_CHAN_CHANNEL((rxh)->RxChan))
-
-/* brcms_bss_info flag bit values */
-#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-
-/* Flags used in brcms_c_txq_info.stopped */
-#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF /* per prio flow control bits */
-#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100 /* stop txq enqueue for packet drain */
-#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200 /* stop txq enqueue for ampdu flow control */
-
-#define BRCMS_HT_WEP_RESTRICT 0x01 /* restrict HT with WEP */
-#define BRCMS_HT_TKIP_RESTRICT 0x02 /* restrict HT with TKIP */
-
-/* Maximum # of keys that wl driver supports in S/W.
- * Keys supported in H/W is less than or equal to WSEC_MAX_KEYS.
- */
-#define WSEC_MAX_KEYS 54 /* Max # of keys (50 + 4 default keys) */
-#define BRCMS_DEFAULT_KEYS 4 /* Default # of keys */
-
-/*
-* Max # of keys currently supported:
-*
-* s/w keys if WSEC_SW(wlc->wsec).
-* h/w keys otherwise.
-*/
-#define BRCMS_MAX_WSEC_KEYS(wlc) WSEC_MAX_KEYS
-
-/* number of 802.11 default (non-paired, group keys) */
-#define WSEC_MAX_DEFAULT_KEYS 4 /* # of default keys */
-
-struct wsec_iv {
- u32 hi; /* upper 32 bits of IV */
- u16 lo; /* lower 16 bits of IV */
-};
-
-#define BRCMS_NUMRXIVS 16 /* # rx IVs (one per 802.11e TID) */
-
-struct wsec_key {
- u8 ea[ETH_ALEN]; /* per station */
- u8 idx; /* key index in wsec_keys array */
- u8 id; /* key ID [0-3] */
- u8 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- u8 rcmta; /* rcmta entry index, same as idx by default */
- u16 flags; /* misc flags */
- u8 algo_hw; /* cache for hw register */
- u8 aes_mode; /* cache for hw register */
- s8 iv_len; /* IV length */
- s8 icv_len; /* ICV length */
- u32 len; /* key length..don't move this var */
- /* data is 4byte aligned */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- struct wsec_iv rxiv[BRCMS_NUMRXIVS]; /* Rx IV (one per TID) */
- struct wsec_iv txiv; /* Tx IV */
-};
-
-/*
- * core state (mac)
- */
-struct brcms_core {
- uint coreidx; /* # sb enumerated core */
-
- /* fifo */
- uint *txavail[NFIFO]; /* # tx descriptors available */
- s16 txpktpend[NFIFO]; /* tx admission control */
-
- struct macstat *macstat_snapshot; /* mac hw prev read values */
-};
-
-/*
- * band state (phy+ana+radio)
- */
-struct brcms_band {
- int bandtype; /* BRCM_BAND_2G, BRCM_BAND_5G */
- uint bandunit; /* bandstate[] index */
-
- u16 phytype; /* phytype */
- u16 phyrev;
- u16 radioid;
- u16 radiorev;
- struct brcms_phy_pub *pi; /* pointer to phy specific information */
- bool abgphy_encore;
-
- u8 gmode; /* currently active gmode */
-
- struct scb *hwrs_scb; /* permanent scb for hw rateset */
-
- wlc_rateset_t defrateset; /* band-specific copy of default_bss.rateset */
-
- ratespec_t rspec_override; /* 802.11 rate override */
- ratespec_t mrspec_override; /* multicast rate override */
- u8 band_stf_ss_mode; /* Configured STF type, 0:siso; 1:cdd */
- s8 band_stf_stbc_tx; /* STBC TX 0:off; 1:force on; -1:auto */
- wlc_rateset_t hw_rateset; /* rates supported by chip (phy-specific) */
- u8 basic_rate[BRCM_MAXRATE + 1]; /* basic rates indexed by rate */
- bool mimo_cap_40; /* 40 MHz cap enabled on this band */
- s8 antgain; /* antenna gain from srom */
-
- u16 CWmin; /* The minimum size of contention window, in unit of aSlotTime */
- u16 CWmax; /* The maximum size of contention window, in unit of aSlotTime */
- u16 bcntsfoff; /* beacon tsf offset */
-};
-
-/* tx completion callback takes 3 args */
-typedef void (*pkcb_fn_t) (struct brcms_c_info *wlc, uint txstatus, void *arg);
-
-struct pkt_cb {
- pkcb_fn_t fn; /* function to call when tx frame completes */
- void *arg; /* void arg for fn */
- u8 nextidx; /* index of next call back if threading */
- bool entered; /* recursion check */
-};
-
-/* module control blocks */
-struct modulecb {
- char name[32]; /* module name : NULL indicates empty array member */
- const struct brcmu_iovar *iovars; /* iovar table */
- void *hdl; /* handle passed when handler 'doiovar' is called */
- watchdog_fn_t watchdog_fn; /* watchdog handler */
- iovar_fn_t iovar_fn; /* iovar handler */
- down_fn_t down_fn; /* down handler. Note: the int returned
- * by the down function is a count of the
- * number of timers that could not be
- * freed.
- */
-};
-
-/* dump control blocks */
-struct dumpcb_s {
- const char *name; /* dump name */
- dump_fn_t dump_fn; /* 'wl dump' handler */
- void *dump_fn_arg;
- struct dumpcb_s *next;
-};
-
-struct edcf_acparam {
- u8 ACI;
- u8 ECW;
- u16 TXOP;
-} __packed;
-
-struct wme_param_ie {
- u8 oui[3];
- u8 type;
- u8 subtype;
- u8 version;
- u8 qosinfo;
- u8 rsvd;
- struct edcf_acparam acparam[AC_COUNT];
-} __packed;
-
-/* virtual interface */
-struct brcms_c_if {
- struct brcms_c_if *next;
- u8 type; /* BSS or WDS */
- u8 index; /* assigned in wl_add_if(), index of the wlif if any,
- * not necessarily corresponding to bsscfg._idx or
- * AID2PVBMAP(scb).
- */
- u8 flags; /* flags for the interface */
- struct brcms_if *wlif; /* pointer to wlif */
- struct brcms_txq_info *qi; /* pointer to associated tx queue */
- union {
- /* pointer to scb if WDS */
- struct scb *scb;
- /* pointer to bsscfg if BSS */
- struct brcms_bss_cfg *bsscfg;
- } u;
-};
-
-/* flags for the interface, this interface is linked to a brcms_if */
-#define BRCMS_IF_LINKED 0x02
-
-struct brcms_hw_band {
- int bandtype; /* BRCM_BAND_2G, BRCM_BAND_5G */
- uint bandunit; /* bandstate[] index */
- u16 mhfs[MHFMAX]; /* MHF array shadow */
- u8 bandhw_stf_ss_mode; /* HW configured STF type, 0:siso; 1:cdd */
- u16 CWmin;
- u16 CWmax;
- u32 core_flags;
-
- u16 phytype; /* phytype */
- u16 phyrev;
- u16 radioid;
- u16 radiorev;
- struct brcms_phy_pub *pi; /* pointer to phy specific information */
- bool abgphy_encore;
-};
-
-struct brcms_hardware {
- bool _piomode; /* true if pio mode */
- struct brcms_c_info *wlc;
-
- /* fifo */
- struct dma_pub *di[NFIFO]; /* dma handles, per fifo */
-
- uint unit; /* device instance number */
-
- /* version info */
- u16 vendorid; /* PCI vendor id */
- u16 deviceid; /* PCI device id */
- uint corerev; /* core revision */
- u8 sromrev; /* version # of the srom */
- u16 boardrev; /* version # of particular board */
- u32 boardflags; /* Board specific flags from srom */
- u32 boardflags2; /* More board flags if sromrev >= 4 */
- u32 machwcap; /* MAC capabilities */
- u32 machwcap_backup; /* backup of machwcap */
- u16 ucode_dbgsel; /* dbgsel for ucode debug(config gpio) */
-
- struct si_pub *sih; /* SI handle (cookie for siutils calls) */
- char *vars; /* "environment" name=value */
- uint vars_size; /* size of vars, free vars on detach */
- d11regs_t *regs; /* pointer to device registers */
- void *physhim; /* phy shim layer handler */
- void *phy_sh; /* pointer to shared phy state */
- struct brcms_hw_band *band;/* pointer to active per-band state */
- /* band state per phy/radio */
- struct brcms_hw_band *bandstate[MAXBANDS];
- u16 bmac_phytxant; /* cache of high phytxant state */
- bool shortslot; /* currently using 11g ShortSlot timing */
- u16 SRL; /* 802.11 dot11ShortRetryLimit */
- u16 LRL; /* 802.11 dot11LongRetryLimit */
- u16 SFBL; /* Short Frame Rate Fallback Limit */
- u16 LFBL; /* Long Frame Rate Fallback Limit */
-
- bool up; /* d11 hardware up and running */
- uint now; /* # elapsed seconds */
- uint _nbands; /* # bands supported */
- chanspec_t chanspec; /* bmac chanspec shadow */
-
- uint *txavail[NFIFO]; /* # tx descriptors available */
- u16 *xmtfifo_sz; /* fifo size in 256B for each xmt fifo */
-
- mbool pllreq; /* pll requests to keep PLL on */
-
- u8 suspended_fifos; /* Which TX fifo to remain awake for */
- u32 maccontrol; /* Cached value of maccontrol */
- uint mac_suspend_depth; /* current depth of mac_suspend levels */
- u32 wake_override; /* Various conditions to force MAC to WAKE mode */
- u32 mute_override; /* Prevent ucode from sending beacons */
- u8 etheraddr[ETH_ALEN]; /* currently configured ethernet address */
- u32 led_gpio_mask; /* LED GPIO Mask */
- bool noreset; /* true= do not reset hw, used by WLC_OUT */
- bool forcefastclk; /* true if the h/w is forcing the use of fast clk */
- bool clk; /* core is out of reset and has clock */
- bool sbclk; /* sb has clock */
- struct bmac_pmq *bmac_pmq; /* bmac PM states derived from ucode PMQ */
- bool phyclk; /* phy is out of reset and has clock */
- bool dma_lpbk; /* core is in DMA loopback */
-
- bool ucode_loaded; /* true after ucode downloaded */
-
-
- u8 hw_stf_ss_opmode; /* STF single stream operation mode */
- u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
- * 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
- */
- u32 antsel_avail; /*
- * put struct antsel_info here if more info is
- * needed
- */
-};
-
-/* TX Queue information
- *
- * Each flow of traffic out of the device has a TX Queue with independent
- * flow control. Several interfaces may be associated with a single TX Queue
- * if they belong to the same flow of traffic from the device. For multi-channel
- * operation there are independent TX Queues for each channel.
- */
-struct brcms_txq_info {
- struct brcms_txq_info *next;
- struct pktq q;
- uint stopped; /* tx flow control bits */
-};
-
-/*
- * Principal common (os-independent) software data structure.
- */
-struct brcms_c_info {
- struct brcms_pub *pub; /* pointer to wlc public state */
- struct brcms_info *wl; /* pointer to os-specific private state */
- d11regs_t *regs; /* pointer to device registers */
-
- /* HW related state used primarily by BMAC */
- struct brcms_hardware *hw;
-
- /* clock */
- int clkreq_override; /* setting for clkreq for PCIE : Auto, 0, 1 */
- u16 fastpwrup_dly; /* time in us needed to bring up d11 fast clock */
-
- /* interrupt */
- u32 macintstatus; /* bit channel between isr and dpc */
- u32 macintmask; /* sw runtime master macintmask value */
- u32 defmacintmask; /* default "on" macintmask value */
-
- /* up and down */
- bool device_present; /* (removable) device is present */
-
- bool clk; /* core is out of reset and has clock */
-
- /* multiband */
- struct brcms_core *core; /* pointer to active io core */
- struct brcms_band *band; /* pointer to active per-band state */
- struct brcms_core *corestate; /* per-core state (one per hw core) */
- /* per-band state (one per phy/radio): */
- struct brcms_band *bandstate[MAXBANDS];
-
- bool war16165; /* PCI slow clock 16165 war flag */
-
- bool tx_suspended; /* data fifos need to remain suspended */
-
- uint txpend16165war;
-
- /* packet queue */
- uint qvalid; /* DirFrmQValid and BcMcFrmQValid */
-
- /* Regulatory power limits */
- s8 txpwr_local_max; /* regulatory local txpwr max */
- u8 txpwr_local_constraint; /* local power contraint in dB */
-
-
- struct ampdu_info *ampdu; /* ampdu module handler */
- struct antsel_info *asi; /* antsel module handler */
- struct brcms_cm_info *cmi; /* channel manager module handler */
-
- uint vars_size; /* size of vars, free vars on detach */
-
- u16 vendorid; /* PCI vendor id */
- u16 deviceid; /* PCI device id */
- uint ucode_rev; /* microcode revision */
-
- u32 machwcap; /* MAC capabilities, BMAC shadow */
-
- u8 perm_etheraddr[ETH_ALEN]; /* original sprom local ethernet address */
-
- bool bandlocked; /* disable auto multi-band switching */
- bool bandinit_pending; /* track band init in auto band */
-
- bool radio_monitor; /* radio timer is running */
- bool going_down; /* down path intermediate variable */
-
- bool mpc; /* enable minimum power consumption */
- u8 mpc_dlycnt; /* # of watchdog cnt before turn disable radio */
- u8 mpc_offcnt; /* # of watchdog cnt that radio is disabled */
- u8 mpc_delay_off; /* delay radio disable by # of watchdog cnt */
- u8 prev_non_delay_mpc; /* prev state brcms_c_is_non_delay_mpc */
-
- /* timer for watchdog routine */
- struct brcms_timer *wdtimer;
- /* timer for hw radio button monitor routine */
- struct brcms_timer *radio_timer;
-
- /* promiscuous */
- bool monitor; /* monitor (MPDU sniffing) mode */
- bool bcnmisc_ibss; /* bcns promisc mode override for IBSS */
- bool bcnmisc_scan; /* bcns promisc mode override for scan */
- bool bcnmisc_monitor; /* bcns promisc mode override for monitor */
-
- /* driver feature */
- bool _rifs; /* enable per-packet rifs */
- s8 sgi_tx; /* sgi tx */
-
- /* AP-STA synchronization, power save */
- u8 bcn_li_bcn; /* beacon listen interval in # beacons */
- u8 bcn_li_dtim; /* beacon listen interval in # dtims */
-
- bool WDarmed; /* watchdog timer is armed */
- u32 WDlast; /* last time wlc_watchdog() was called */
-
- /* WME */
- ac_bitmap_t wme_dp; /* Discard (oldest first) policy per AC */
- u16 edcf_txop[AC_COUNT]; /* current txop for each ac */
-
- /*
- * WME parameter info element, which on STA contains parameters in use
- * locally, and on AP contains parameters advertised to STA in beacons
- * and assoc responses.
- */
- struct wme_param_ie wme_param_ie;
- u16 wme_retries[AC_COUNT]; /* per-AC retry limits */
-
- u16 tx_prec_map; /* Precedence map based on HW FIFO space */
- u16 fifo2prec_map[NFIFO]; /* pointer to fifo2_prec map based on WME */
-
- /*
- * BSS Configurations set of BSS configurations, idx 0 is default and
- * always valid
- */
- struct brcms_bss_cfg *bsscfg[BRCMS_MAXBSSCFG];
- struct brcms_bss_cfg *cfg; /* the primary bsscfg (can be AP or STA) */
-
- /* tx queue */
- struct brcms_txq_info *tx_queues; /* common TX Queue list */
-
- /* security */
- struct wsec_key *wsec_keys[WSEC_MAX_KEYS]; /* dynamic key storage */
- /* default key storage */
- struct wsec_key *wsec_def_keys[BRCMS_DEFAULT_KEYS];
- bool wsec_swkeys; /* indicates that all keys should be
- * treated as sw keys (used for debugging)
- */
- struct modulecb *modulecb;
-
- u8 mimoft; /* SIGN or 11N */
- s8 cck_40txbw; /* 11N, cck tx b/w override when in 40MHZ mode */
- s8 ofdm_40txbw; /* 11N, ofdm tx b/w override when in 40MHZ mode */
- s8 mimo_40txbw; /* 11N, mimo tx b/w override when in 40MHZ mode */
- /* HT CAP IE being advertised by this node: */
- struct ieee80211_ht_cap ht_cap;
-
- struct brcms_bss_info *default_bss; /* configured BSS parameters */
-
- u16 mc_fid_counter; /* BC/MC FIFO frame ID counter */
-
- /* saved country for leaving 802.11d auto-country mode */
- char country_default[BRCM_CNTRY_BUF_SZ];
- /* initial country for 802.11d auto-country mode */
- char autocountry_default[BRCM_CNTRY_BUF_SZ];
- u16 prb_resp_timeout; /* do not send prb resp if request older than this,
- * 0 = disable
- */
-
- wlc_rateset_t sup_rates_override; /* use only these rates in 11g supported rates if
- * specifed
- */
-
- chanspec_t home_chanspec; /* shared home chanspec */
-
- /* PHY parameters */
- chanspec_t chanspec; /* target operational channel */
- u16 usr_fragthresh; /* user configured fragmentation threshold */
- u16 fragthresh[NFIFO]; /* per-fifo fragmentation thresholds */
- u16 RTSThresh; /* 802.11 dot11RTSThreshold */
- u16 SRL; /* 802.11 dot11ShortRetryLimit */
- u16 LRL; /* 802.11 dot11LongRetryLimit */
- u16 SFBL; /* Short Frame Rate Fallback Limit */
- u16 LFBL; /* Long Frame Rate Fallback Limit */
-
- /* network config */
- bool shortslot; /* currently using 11g ShortSlot timing */
- s8 shortslot_override; /* 11g ShortSlot override */
- bool include_legacy_erp; /* include Legacy ERP info elt ID 47 as well as g ID 42 */
-
- struct brcms_protection *protection;
- s8 PLCPHdr_override; /* 802.11b Preamble Type override */
-
- struct brcms_stf *stf;
-
- ratespec_t bcn_rspec; /* save bcn ratespec purpose */
-
- uint tempsense_lasttime;
-
- u16 tx_duty_cycle_ofdm; /* maximum allowed duty cycle for OFDM */
- u16 tx_duty_cycle_cck; /* maximum allowed duty cycle for CCK */
-
- u16 next_bsscfg_ID;
-
- struct brcms_txq_info *pkt_queue; /* txq for transmit packets */
- u32 mpc_dur; /* total time (ms) in mpc mode except for the
- * portion since radio is turned off last time
- */
- u32 mpc_laston_ts; /* timestamp (ms) when radio is turned off last
- * time
- */
- struct wiphy *wiphy;
-};
-
-/* antsel module specific state */
-struct antsel_info {
- struct brcms_c_info *wlc; /* pointer to main wlc structure */
- struct brcms_pub *pub; /* pointer to public fn */
- u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
- * 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
- */
- u8 antsel_antswitch; /* board level antenna switch type */
- bool antsel_avail; /* Ant selection availability (SROM based) */
- struct brcms_antselcfg antcfg_11n; /* antenna configuration */
- struct brcms_antselcfg antcfg_cur; /* current antenna config (auto) */
-};
-
-/* BSS configuration state */
-struct brcms_bss_cfg {
- struct brcms_c_info *wlc; /* wlc to which this bsscfg belongs to. */
- bool up; /* is this configuration up operational */
- bool enable; /* is this configuration enabled */
- bool associated; /* is BSS in ASSOCIATED state */
- bool BSS; /* infraustructure or adhac */
- bool dtim_programmed;
-
- u8 SSID_len; /* the length of SSID */
- u8 SSID[IEEE80211_MAX_SSID_LEN]; /* SSID string */
- struct scb *bcmc_scb[MAXBANDS]; /* one bcmc_scb per band */
- s8 _idx; /* the index of this bsscfg,
- * assigned at wlc_bsscfg_alloc()
- */
- /* MAC filter */
- uint nmac; /* # of entries on maclist array */
- int macmode; /* allow/deny stations on maclist array */
- struct ether_addr *maclist; /* list of source MAC addrs to match */
-
- /* security */
- u32 wsec; /* wireless security bitvec */
- s16 auth; /* 802.11 authentication: Open, Shared Key, WPA */
- s16 openshared; /* try Open auth first, then Shared Key */
- bool wsec_restrict; /* drop unencrypted packets if wsec is enabled */
- bool eap_restrict; /* restrict data until 802.1X auth succeeds */
- u16 WPA_auth; /* WPA: authenticated key management */
- bool wpa2_preauth; /* default is true, wpa_cap sets value */
- bool wsec_portopen; /* indicates keys are plumbed */
- /* global txiv for WPA_NONE, tkip and aes */
- struct wsec_iv wpa_none_txiv;
- int wsec_index; /* 0-3: default tx key, -1: not set */
- /* default key storage: */
- struct wsec_key *bss_def_keys[BRCMS_DEFAULT_KEYS];
-
- /* TKIP countermeasures */
- bool tkip_countermeasures; /* flags TKIP no-assoc period */
- u32 tk_cm_dt; /* detect timer */
- u32 tk_cm_bt; /* blocking timer */
- u32 tk_cm_bt_tmstmp; /* Timestamp when TKIP BT is activated */
- bool tk_cm_activate; /* activate countermeasures after EAPOL-Key sent */
-
- u8 BSSID[ETH_ALEN]; /* BSSID (associated) */
- u8 cur_etheraddr[ETH_ALEN]; /* h/w address */
- u16 bcmc_fid; /* the last BCMC FID queued to TX_BCMC_FIFO */
- u16 bcmc_fid_shm; /* the last BCMC FID written to shared mem */
-
- u32 flags; /* BSSCFG flags; see below */
-
- u8 *bcn; /* AP beacon */
- uint bcn_len; /* AP beacon length */
- bool ar_disassoc; /* disassociated in associated recreation */
-
- int auth_atmptd; /* auth type (open/shared) attempted */
-
- pmkid_cand_t pmkid_cand[MAXPMKID]; /* PMKID candidate list */
- uint npmkid_cand; /* num PMKID candidates */
- pmkid_t pmkid[MAXPMKID]; /* PMKID cache */
- uint npmkid; /* num cached PMKIDs */
-
- struct brcms_bss_info *current_bss; /* BSS parms in ASSOCIATED state */
-
- /* PM states */
- bool PMawakebcn; /* bcn recvd during current waking state */
- bool PMpending; /* waiting for tx status with PM indicated set */
- bool priorPMstate; /* Detecting PM state transitions */
- bool PSpoll; /* whether there is an outstanding PS-Poll frame */
-
- /* BSSID entry in RCMTA, use the wsec key management infrastructure to
- * manage the RCMTA entries.
- */
- struct wsec_key *rcmta;
-
- /* 'unique' ID of this bsscfg, assigned at bsscfg allocation */
- u16 ID;
-
- uint txrspecidx; /* index into tx rate circular buffer */
- ratespec_t txrspec[NTXRATE][2]; /* circular buffer of prev MPDUs tx rates */
-};
-
-#define CHANNEL_BANDUNIT(wlc, ch) (((ch) <= CH_MAX_2G_CHANNEL) ? BAND_2G_INDEX : BAND_5G_INDEX)
-#define OTHERBANDUNIT(wlc) ((uint)((wlc)->band->bandunit ? BAND_2G_INDEX : BAND_5G_INDEX))
-
-#define IS_MBAND_UNLOCKED(wlc) \
- ((NBANDS(wlc) > 1) && !(wlc)->bandlocked)
-
-#define BRCMS_BAND_PI_RADIO_CHANSPEC wlc_phy_chanspec_get(wlc->band->pi)
-
-/* sum the individual fifo tx pending packet counts */
-#define TXPKTPENDTOT(wlc) ((wlc)->core->txpktpend[0] + (wlc)->core->txpktpend[1] + \
- (wlc)->core->txpktpend[2] + (wlc)->core->txpktpend[3])
-#define TXPKTPENDGET(wlc, fifo) ((wlc)->core->txpktpend[(fifo)])
-#define TXPKTPENDINC(wlc, fifo, val) ((wlc)->core->txpktpend[(fifo)] += (val))
-#define TXPKTPENDDEC(wlc, fifo, val) ((wlc)->core->txpktpend[(fifo)] -= (val))
-#define TXPKTPENDCLR(wlc, fifo) ((wlc)->core->txpktpend[(fifo)] = 0)
-#define TXAVAIL(wlc, fifo) (*(wlc)->core->txavail[(fifo)])
-#define GETNEXTTXP(wlc, _queue) \
- dma_getnexttxp((wlc)->hw->di[(_queue)], DMA_RANGE_TRANSMITTED)
-
-#define BRCMS_IS_MATCH_SSID(wlc, ssid1, ssid2, len1, len2) \
- ((len1 == len2) && !memcmp(ssid1, ssid2, len1))
-
-extern void brcms_c_fatal_error(struct brcms_c_info *wlc);
-extern void brcms_b_rpc_watchdog(struct brcms_c_info *wlc);
-extern void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p);
-extern bool brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs,
- u32 frm_tx2);
-extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
- struct sk_buff *p,
- bool commit, s8 txpktpend);
-extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo,
- s8 txpktpend);
-extern void brcms_c_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
- uint prec);
-extern void brcms_c_info_init(struct brcms_c_info *wlc, int unit);
-extern void brcms_c_print_txstatus(struct tx_status *txs);
-extern int brcms_c_xmtfifo_sz_get(struct brcms_c_info *wlc, uint fifo,
- uint *blocks);
-extern void brcms_c_write_template_ram(struct brcms_c_info *wlc, int offset,
- int len, void *buf);
-extern void brcms_c_write_hw_bcntemplates(struct brcms_c_info *wlc, void *bcn,
- int len, bool both);
-extern void brcms_c_pllreq(struct brcms_c_info *wlc, bool set, mbool req_bit);
-extern void brcms_c_reset_bmac_done(struct brcms_c_info *wlc);
-
-#if defined(BCMDBG)
-extern void brcms_c_print_rxh(struct d11rxhdr *rxh);
-extern void brcms_c_print_txdesc(struct d11txh *txh);
-#else
-#define brcms_c_print_txdesc(a)
-#endif
-
-extern void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit);
-extern void brcms_c_coredisable(struct brcms_hardware *wlc_hw);
-
-extern bool brcms_c_valid_rate(struct brcms_c_info *wlc, ratespec_t rate,
- int band, bool verbose);
-extern void brcms_c_ap_upd(struct brcms_c_info *wlc);
-
-/* helper functions */
-extern void brcms_c_shm_ssid_upd(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *cfg);
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
- bool promisc);
-extern void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc);
-extern void brcms_c_txflowcontrol(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi,
- bool on, int prio);
-extern void brcms_c_txflowcontrol_override(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi,
- bool on, uint override);
-extern bool brcms_c_txflowcontrol_prio_isset(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi,
- int prio);
-extern void brcms_c_send_q(struct brcms_c_info *wlc);
-extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
- uint *fifo);
-
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, ratespec_t ratespec,
- uint mac_len);
-extern ratespec_t brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
- ratespec_t rspec,
- bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
- ratespec_t rts_rate,
- ratespec_t frame_rate,
- u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len,
- bool ba);
-
-extern void brcms_c_tbtt(struct brcms_c_info *wlc);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
- struct ieee80211_sta *sta,
- void (*dma_callback_fn));
-
-extern void brcms_c_reprate_init(struct brcms_c_info *wlc);
-extern void brcms_c_bsscfg_reprate_init(struct brcms_bss_cfg *bsscfg);
-
-/* Shared memory access */
-extern void brcms_c_write_shm(struct brcms_c_info *wlc, uint offset, u16 v);
-extern u16 brcms_c_read_shm(struct brcms_c_info *wlc, uint offset);
-extern void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset,
- const void *buf, int len);
-
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_bss_update_beacon(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *bsscfg);
-
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern void brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *cfg,
- bool suspend);
-extern bool brcms_c_ismpc(struct brcms_c_info *wlc);
-extern bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc);
-extern void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc);
-extern bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q,
- void *pkt, int prec);
-extern bool brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
- struct sk_buff *pkt, int prec, bool head);
-extern u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, ratespec_t rspec);
-extern void brcms_c_compute_plcp(struct brcms_c_info *wlc, ratespec_t rate,
- uint length, u8 *plcp);
-extern uint brcms_c_calc_frame_time(struct brcms_c_info *wlc,
- ratespec_t ratespec,
- u8 preamble_type, uint mac_len);
-
-extern void brcms_c_set_chanspec(struct brcms_c_info *wlc,
- chanspec_t chanspec);
-
-extern bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit);
-
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc, s32 nmode);
-extern void brcms_c_mimops_action_ht_send(struct brcms_c_info *wlc,
- struct brcms_bss_cfg *bsscfg,
- u8 mimops_mode);
-
-extern void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot);
-extern void brcms_c_set_bssid(struct brcms_bss_cfg *cfg);
-extern void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend);
-
-extern void brcms_c_set_ratetable(struct brcms_c_info *wlc);
-extern int brcms_c_set_mac(struct brcms_bss_cfg *cfg);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
- ratespec_t bcn_rate);
-extern void brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc,
- uint frame_len);
-extern ratespec_t brcms_c_lowest_basic_rspec(struct brcms_c_info *wlc,
- wlc_rateset_t *rs);
-extern void brcms_c_radio_disable(struct brcms_c_info *wlc);
-extern void brcms_c_bcn_li_upd(struct brcms_c_info *wlc);
-extern void brcms_c_set_home_chanspec(struct brcms_c_info *wlc,
- chanspec_t chanspec);
-extern bool brcms_c_ps_allowed(struct brcms_c_info *wlc);
-extern bool brcms_c_stay_awake(struct brcms_c_info *wlc);
-extern void brcms_c_wme_initparams_sta(struct brcms_c_info *wlc,
- struct wme_param_ie *pe);
-
-#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/nicpci.h b/drivers/staging/brcm80211/brcmsmac/nicpci.h
deleted file mode 100644
index f71f842a215..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/nicpci.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_NICPCI_H_
-#define _BRCM_NICPCI_H_
-
-#include "types.h"
-
-/* PCI configuration address space size */
-#define PCI_SZPCR 256
-
-/* Brcm PCI configuration registers */
-/* backplane address space accessed by BAR0 */
-#define PCI_BAR0_WIN 0x80
-/* sprom property control */
-#define PCI_SPROM_CONTROL 0x88
-/* mask of PCI and other cores interrupts */
-#define PCI_INT_MASK 0x94
-/* backplane core interrupt mask bits offset */
-#define PCI_SBIM_SHIFT 8
-/* backplane address space accessed by second 4KB of BAR0 */
-#define PCI_BAR0_WIN2 0xac
-/* pci config space gpio input (>=rev3) */
-#define PCI_GPIO_IN 0xb0
-/* pci config space gpio output (>=rev3) */
-#define PCI_GPIO_OUT 0xb4
-/* pci config space gpio output enable (>=rev3) */
-#define PCI_GPIO_OUTEN 0xb8
-
-/* bar0 + 4K accesses external sprom */
-#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
-/* bar0 + 6K accesses pci core registers */
-#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
-/*
- * pci core SB registers are at the end of the
- * 8KB window, so their address is the "regular"
- * address plus 4K
- */
-#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
-/* bar0 window size Match with corerev 13 */
-#define PCI_BAR0_WINSZ (16 * 1024)
-/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
-/* bar0 + 8K accesses pci/pcie core registers */
-#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
-/* bar0 + 12K accesses chipc core registers */
-#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
-
-#define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
-
-/* Sonics to PCI translation types */
-#define SBTOPCI_PREF 0x4 /* prefetch enable */
-#define SBTOPCI_BURST 0x8 /* burst enable */
-#define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
-
-/* PCI core index in SROM shadow area */
-#define SRSH_PI_OFFSET 0 /* first word */
-#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
-#define SRSH_PI_SHIFT 12 /* bit 15:12 */
-
-extern void *pcicore_init(struct si_pub *sih, void *pdev, void *regs);
-extern void pcicore_deinit(void *pch);
-extern void pcicore_attach(void *pch, char *pvars, int state);
-extern void pcicore_hwup(void *pch);
-extern void pcicore_up(void *pch, int state);
-extern void pcicore_sleep(void *pch);
-extern void pcicore_down(void *pch, int state);
-extern u8 pcicore_find_pci_capability(void *dev, u8 req_cap_id,
- unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg(void *pch, void *regs);
-extern void pcicore_pci_setup(void *pch, void *regs);
-
-#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c
deleted file mode 100644
index 4a70180eba5..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/otp.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-
-#include <brcm_hw_ids.h>
-#include <chipcommon.h>
-#include "aiutils.h"
-#include "otp.h"
-
-#define OTPS_GUP_MASK 0x00000f00
-#define OTPS_GUP_SHIFT 8
-#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */
-#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */
-#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */
-#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */
-
-/* Fields in otpprog in rev >= 21 */
-#define OTPP_COL_MASK 0x000000ff
-#define OTPP_COL_SHIFT 0
-#define OTPP_ROW_MASK 0x0000ff00
-#define OTPP_ROW_SHIFT 8
-#define OTPP_OC_MASK 0x0f000000
-#define OTPP_OC_SHIFT 24
-#define OTPP_READERR 0x10000000
-#define OTPP_VALUE_MASK 0x20000000
-#define OTPP_VALUE_SHIFT 29
-#define OTPP_START_BUSY 0x80000000
-#define OTPP_READ 0x40000000
-
-/* Opcodes for OTPP_OC field */
-#define OTPPOC_READ 0
-#define OTPPOC_BIT_PROG 1
-#define OTPPOC_VERIFY 3
-#define OTPPOC_INIT 4
-#define OTPPOC_SET 5
-#define OTPPOC_RESET 6
-#define OTPPOC_OCST 7
-#define OTPPOC_ROW_LOCK 8
-#define OTPPOC_PRESCN_TEST 9
-
-#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
-
-#define OTPP_TRIES 10000000 /* # of tries for OTPP */
-
-#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
-
-/* OTP common function type */
-typedef int (*otp_status_t) (void *oh);
-typedef int (*otp_size_t) (void *oh);
-typedef void *(*otp_init_t) (struct si_pub *sih);
-typedef u16(*otp_read_bit_t) (void *oh, chipcregs_t *cc, uint off);
-typedef int (*otp_read_region_t) (struct si_pub *sih, int region, u16 *data,
- uint *wlen);
-typedef int (*otp_nvread_t) (void *oh, char *data, uint *len);
-
-/* OTP function struct */
-struct otp_fn_s {
- otp_size_t size;
- otp_read_bit_t read_bit;
- otp_init_t init;
- otp_read_region_t read_region;
- otp_nvread_t nvread;
- otp_status_t status;
-};
-
-struct otpinfo {
- uint ccrev; /* chipc revision */
- struct otp_fn_s *fn; /* OTP functions */
- struct si_pub *sih; /* Saved sb handle */
-
- /* IPX OTP section */
- u16 wsize; /* Size of otp in words */
- u16 rows; /* Geometry */
- u16 cols; /* Geometry */
- u32 status; /* Flag bits (lock/prog/rv).
- * (Reflected only when OTP is power cycled)
- */
- u16 hwbase; /* hardware subregion offset */
- u16 hwlim; /* hardware subregion boundary */
- u16 swbase; /* software subregion offset */
- u16 swlim; /* software subregion boundary */
- u16 fbase; /* fuse subregion offset */
- u16 flim; /* fuse subregion boundary */
- int otpgu_base; /* offset to General Use Region */
-};
-
-static struct otpinfo otpinfo;
-
-/*
- * IPX OTP Code
- *
- * Exported functions:
- * ipxotp_status()
- * ipxotp_size()
- * ipxotp_init()
- * ipxotp_read_bit()
- * ipxotp_read_region()
- * ipxotp_nvread()
- *
- */
-
-#define HWSW_RGN(rgn) (((rgn) == OTP_HW_RGN) ? "h/w" : "s/w")
-
-/* OTP layout */
-/* CC revs 21, 24 and 27 OTP General Use Region word offset */
-#define REVA4_OTPGU_BASE 12
-
-/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
-#define REVB8_OTPGU_BASE 20
-
-/* CC rev 36 OTP General Use Region word offset */
-#define REV36_OTPGU_BASE 12
-
-/* Subregion word offsets in General Use region */
-#define OTPGU_HSB_OFF 0
-#define OTPGU_SFB_OFF 1
-#define OTPGU_CI_OFF 2
-#define OTPGU_P_OFF 3
-#define OTPGU_SROM_OFF 4
-
-/* Flag bit offsets in General Use region */
-#define OTPGU_HWP_OFF 60
-#define OTPGU_SWP_OFF 61
-#define OTPGU_CIP_OFF 62
-#define OTPGU_FUSEP_OFF 63
-#define OTPGU_CIP_MSK 0x4000
-#define OTPGU_P_MSK 0xf000
-#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
-
-/* OTP Size */
-#define OTP_SZ_FU_324 ((roundup(324, 8))/8) /* 324 bits */
-#define OTP_SZ_FU_288 (288/8) /* 288 bits */
-#define OTP_SZ_FU_216 (216/8) /* 216 bits */
-#define OTP_SZ_FU_72 (72/8) /* 72 bits */
-#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
-#define OTP4315_SWREG_SZ 178 /* 178 bytes */
-#define OTP_SZ_FU_144 (144/8) /* 144 bits */
-
-static int ipxotp_status(void *oh)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
- return (int)(oi->status);
-}
-
-/* Return size in bytes */
-static int ipxotp_size(void *oh)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
- return (int)oi->wsize * 2;
-}
-
-static u16 ipxotp_otpr(void *oh, chipcregs_t *cc, uint wn)
-{
- struct otpinfo *oi;
-
- oi = (struct otpinfo *) oh;
-
- return R_REG(&cc->sromotp[wn]);
-}
-
-static u16 ipxotp_read_bit(void *oh, chipcregs_t *cc, uint off)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
- uint k, row, col;
- u32 otpp, st;
-
- row = off / oi->cols;
- col = off % oi->cols;
-
- otpp = OTPP_START_BUSY |
- ((OTPPOC_READ << OTPP_OC_SHIFT) & OTPP_OC_MASK) |
- ((row << OTPP_ROW_SHIFT) & OTPP_ROW_MASK) |
- ((col << OTPP_COL_SHIFT) & OTPP_COL_MASK);
- W_REG(&cc->otpprog, otpp);
-
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
- if (k >= OTPP_TRIES) {
- return 0xffff;
- }
- if (st & OTPP_READERR) {
- return 0xffff;
- }
- st = (st & OTPP_VALUE_MASK) >> OTPP_VALUE_SHIFT;
-
- return (int)st;
-}
-
-/* Calculate max HW/SW region byte size by subtracting fuse region and checksum size,
- * osizew is oi->wsize (OTP size - GU size) in words
- */
-static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
-{
- int ret = 0;
-
- switch (sih->chip) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- case BCM4313_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- default:
- break; /* Don't know about this chip */
- }
-
- return ret;
-}
-
-static void _ipxotp_init(struct otpinfo *oi, chipcregs_t *cc)
-{
- uint k;
- u32 otpp, st;
-
- /* record word offset of General Use Region for various chipcommon revs */
- if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
- || oi->sih->ccrev == 27) {
- oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (oi->sih->ccrev == 36) {
- /* OTP size greater than equal to 2KB (128 words), otpgu_base is similar to rev23 */
- if (oi->wsize >= 128)
- oi->otpgu_base = REVB8_OTPGU_BASE;
- else
- oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
- oi->otpgu_base = REVB8_OTPGU_BASE;
- }
-
- /* First issue an init command so the status is up to date */
- otpp =
- OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
-
- W_REG(&cc->otpprog, otpp);
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
- if (k >= OTPP_TRIES) {
- return;
- }
-
- /* Read OTP lock bits and subregion programmed indication bits */
- oi->status = R_REG(&cc->otpstatus);
-
- if ((oi->sih->chip == BCM43224_CHIP_ID)
- || (oi->sih->chip == BCM43225_CHIP_ID)) {
- u32 p_bits;
- p_bits =
- (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK)
- >> OTPGU_P_SHIFT;
- oi->status |= (p_bits << OTPS_GUP_SHIFT);
- }
-
- /*
- * h/w region base and fuse region limit are fixed to the top and
- * the bottom of the general use region. Everything else can be flexible.
- */
- oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
- oi->hwlim = oi->wsize;
- if (oi->status & OTPS_GUP_HW) {
- oi->hwlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
- oi->swbase = oi->hwlim;
- } else
- oi->swbase = oi->hwbase;
-
- /* subtract fuse and checksum from beginning */
- oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
-
- if (oi->status & OTPS_GUP_SW) {
- oi->swlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
- oi->fbase = oi->swlim;
- } else
- oi->fbase = oi->swbase;
-
- oi->flim = oi->wsize;
-}
-
-static void *ipxotp_init(struct si_pub *sih)
-{
- uint idx;
- chipcregs_t *cc;
- struct otpinfo *oi;
-
- /* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(sih->ccrev))
- return NULL;
-
- /* Make sure OTP is not disabled */
- if (ai_is_otp_disabled(sih))
- return NULL;
-
- /* OTP is always powered */
- oi = &otpinfo;
-
- /* Check for otp size */
- switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
- case 0:
- /* Nothing there */
- return NULL;
- case 1: /* 32x64 */
- oi->rows = 32;
- oi->cols = 64;
- oi->wsize = 128;
- break;
- case 2: /* 64x64 */
- oi->rows = 64;
- oi->cols = 64;
- oi->wsize = 256;
- break;
- case 5: /* 96x64 */
- oi->rows = 96;
- oi->cols = 64;
- oi->wsize = 384;
- break;
- case 7: /* 16x64 *//* 1024 bits */
- oi->rows = 16;
- oi->cols = 64;
- oi->wsize = 64;
- break;
- default:
- /* Don't know the geometry */
- return NULL;
- }
-
- /* Retrieve OTP region info */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- _ipxotp_init(oi, cc);
-
- ai_setcoreidx(sih, idx);
-
- return (void *)oi;
-}
-
-static int ipxotp_read_region(void *oh, int region, u16 *data, uint *wlen)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
- uint idx;
- chipcregs_t *cc;
- uint base, i, sz;
-
- /* Validate region selection */
- switch (region) {
- case OTP_HW_RGN:
- sz = (uint) oi->hwlim - oi->hwbase;
- if (!(oi->status & OTPS_GUP_HW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- case OTP_SW_RGN:
- sz = ((uint) oi->swlim - oi->swbase);
- if (!(oi->status & OTPS_GUP_SW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->swbase;
- break;
- case OTP_CI_RGN:
- sz = OTPGU_CI_SZ;
- if (!(oi->status & OTPS_GUP_CI)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->otpgu_base + OTPGU_CI_OFF;
- break;
- case OTP_FUSE_RGN:
- sz = (uint) oi->flim - oi->fbase;
- if (!(oi->status & OTPS_GUP_FUSE)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->fbase;
- break;
- case OTP_ALL_RGN:
- sz = ((uint) oi->flim - oi->hwbase);
- if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- default:
- return -EINVAL;
- }
-
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
- /* Read the data */
- for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oh, cc, base + i);
-
- ai_setcoreidx(oi->sih, idx);
- *wlen = sz;
- return 0;
-}
-
-static int ipxotp_nvread(void *oh, char *data, uint *len)
-{
- return -ENOTSUPP;
-}
-
-static struct otp_fn_s ipxotp_fn = {
- (otp_size_t) ipxotp_size,
- (otp_read_bit_t) ipxotp_read_bit,
-
- (otp_init_t) ipxotp_init,
- (otp_read_region_t) ipxotp_read_region,
- (otp_nvread_t) ipxotp_nvread,
-
- (otp_status_t) ipxotp_status
-};
-
-/*
- * otp_status()
- * otp_size()
- * otp_read_bit()
- * otp_init()
- * otp_read_region()
- * otp_nvread()
- */
-
-int otp_status(void *oh)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
-
- return oi->fn->status(oh);
-}
-
-int otp_size(void *oh)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
-
- return oi->fn->size(oh);
-}
-
-u16 otp_read_bit(void *oh, uint offset)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
- uint idx = ai_coreidx(oi->sih);
- chipcregs_t *cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
- u16 readBit = (u16) oi->fn->read_bit(oh, cc, offset);
- ai_setcoreidx(oi->sih, idx);
- return readBit;
-}
-
-void *otp_init(struct si_pub *sih)
-{
- struct otpinfo *oi;
- void *ret = NULL;
-
- oi = &otpinfo;
- memset(oi, 0, sizeof(struct otpinfo));
-
- oi->ccrev = sih->ccrev;
-
- if (OTPTYPE_IPX(oi->ccrev))
- oi->fn = &ipxotp_fn;
-
- if (oi->fn == NULL) {
- return NULL;
- }
-
- oi->sih = sih;
-
- ret = (oi->fn->init) (sih);
-
- return ret;
-}
-
-int
-otp_read_region(struct si_pub *sih, int region, u16 *data,
- uint *wlen) {
- void *oh;
- int err = 0;
-
- if (ai_is_otp_disabled(sih)) {
- err = -EPERM;
- goto out;
- }
-
- oh = otp_init(sih);
- if (oh == NULL) {
- err = -EBADE;
- goto out;
- }
-
- err = (((struct otpinfo *) oh)->fn->read_region)
- (oh, region, data, wlen);
-
- out:
- return err;
-}
-
-int otp_nvread(void *oh, char *data, uint *len)
-{
- struct otpinfo *oi = (struct otpinfo *) oh;
-
- return oi->fn->nvread(oh, data, len);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.h b/drivers/staging/brcm80211/brcmsmac/otp.h
deleted file mode 100644
index f6d3a56acf1..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/otp.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_OTP_H_
-#define _BRCM_OTP_H_
-
-#include "types.h"
-
-/* OTP regions */
-#define OTP_HW_RGN 1
-#define OTP_SW_RGN 2
-#define OTP_CI_RGN 4
-#define OTP_FUSE_RGN 8
-#define OTP_ALL_RGN 0xf /* From h/w region to end of OTP including checksum */
-
-/* OTP Size */
-#define OTP_SZ_MAX (6144/8) /* maximum bytes in one CIS */
-
-/* Fixed size subregions sizes in words */
-#define OTPGU_CI_SZ 2
-
-/* OTP usage */
-#define OTP4325_FM_DISABLED_OFFSET 188
-
-/* Exported functions */
-extern int otp_status(void *oh);
-extern int otp_size(void *oh);
-extern u16 otp_read_bit(void *oh, uint offset);
-extern void *otp_init(struct si_pub *sih);
-extern int otp_read_region(struct si_pub *sih, int region, u16 *data,
- uint *wlen);
-extern int otp_nvread(void *oh, char *data, uint *len);
-
-#endif /* _BRCM_OTP_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c
deleted file mode 100644
index 01ff0c8eb4b..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "phy_qmath.h"
-
-/*
-Description: This function make 16 bit unsigned multiplication. To fit the output into
-16 bits the 32 bit multiplication result is right shifted by 16 bits.
-*/
-u16 qm_mulu16(u16 op1, u16 op2)
-{
- return (u16) (((u32) op1 * (u32) op2) >> 16);
-}
-
-/*
-Description: This function make 16 bit multiplication and return the result in 16 bits.
-To fit the multiplication result into 16 bits the multiplication result is right shifted by
-15 bits. Right shifting 15 bits instead of 16 bits is done to remove the extra sign bit formed
-due to the multiplication.
-When both the 16bit inputs are 0x8000 then the output is saturated to 0x7fffffff.
-*/
-s16 qm_muls16(s16 op1, s16 op2)
-{
- s32 result;
- if (op1 == (s16) 0x8000 && op2 == (s16) 0x8000) {
- result = 0x7fffffff;
- } else {
- result = ((s32) (op1) * (s32) (op2));
- }
- return (s16) (result >> 15);
-}
-
-/*
-Description: This function add two 32 bit numbers and return the 32bit result.
-If the result overflow 32 bits, the output will be saturated to 32bits.
-*/
-s32 qm_add32(s32 op1, s32 op2)
-{
- s32 result;
- result = op1 + op2;
- if (op1 < 0 && op2 < 0 && result > 0) {
- result = 0x80000000;
- } else if (op1 > 0 && op2 > 0 && result < 0) {
- result = 0x7fffffff;
- }
- return result;
-}
-
-/*
-Description: This function add two 16 bit numbers and return the 16bit result.
-If the result overflow 16 bits, the output will be saturated to 16bits.
-*/
-s16 qm_add16(s16 op1, s16 op2)
-{
- s16 result;
- s32 temp = (s32) op1 + (s32) op2;
- if (temp > (s32) 0x7fff) {
- result = (s16) 0x7fff;
- } else if (temp < (s32) 0xffff8000) {
- result = (s16) 0xffff8000;
- } else {
- result = (s16) temp;
- }
- return result;
-}
-
-/*
-Description: This function make 16 bit subtraction and return the 16bit result.
-If the result overflow 16 bits, the output will be saturated to 16bits.
-*/
-s16 qm_sub16(s16 op1, s16 op2)
-{
- s16 result;
- s32 temp = (s32) op1 - (s32) op2;
- if (temp > (s32) 0x7fff) {
- result = (s16) 0x7fff;
- } else if (temp < (s32) 0xffff8000) {
- result = (s16) 0xffff8000;
- } else {
- result = (s16) temp;
- }
- return result;
-}
-
-/*
-Description: This function make a 32 bit saturated left shift when the specified shift
-is +ve. This function will make a 32 bit right shift when the specified shift is -ve.
-This function return the result after shifting operation.
-*/
-s32 qm_shl32(s32 op, int shift)
-{
- int i;
- s32 result;
- result = op;
- if (shift > 31)
- shift = 31;
- else if (shift < -31)
- shift = -31;
- if (shift >= 0) {
- for (i = 0; i < shift; i++) {
- result = qm_add32(result, result);
- }
- } else {
- result = result >> (-shift);
- }
- return result;
-}
-
-/*
-Description: This function make a 16 bit saturated left shift when the specified shift
-is +ve. This function will make a 16 bit right shift when the specified shift is -ve.
-This function return the result after shifting operation.
-*/
-s16 qm_shl16(s16 op, int shift)
-{
- int i;
- s16 result;
- result = op;
- if (shift > 15)
- shift = 15;
- else if (shift < -15)
- shift = -15;
- if (shift > 0) {
- for (i = 0; i < shift; i++) {
- result = qm_add16(result, result);
- }
- } else {
- result = result >> (-shift);
- }
- return result;
-}
-
-/*
-Description: This function make a 16 bit right shift when shift is +ve.
-This function make a 16 bit saturated left shift when shift is -ve. This function
-return the result of the shift operation.
-*/
-s16 qm_shr16(s16 op, int shift)
-{
- return qm_shl16(op, -shift);
-}
-
-/*
-Description: This function return the number of redundant sign bits in a 32 bit number.
-Example: qm_norm32(0x00000080) = 23
-*/
-s16 qm_norm32(s32 op)
-{
- u16 u16extraSignBits;
- if (op == 0) {
- return 31;
- } else {
- u16extraSignBits = 0;
- while ((op >> 31) == (op >> 30)) {
- u16extraSignBits++;
- op = op << 1;
- }
- }
- return u16extraSignBits;
-}
-
-/* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */
-static const s16 log_table[] = {
- 0,
- 1455,
- 2866,
- 4236,
- 5568,
- 6863,
- 8124,
- 9352,
- 10549,
- 11716,
- 12855,
- 13968,
- 15055,
- 16117,
- 17156,
- 18173,
- 19168,
- 20143,
- 21098,
- 22034,
- 22952,
- 23852,
- 24736,
- 25604,
- 26455,
- 27292,
- 28114,
- 28922,
- 29717,
- 30498,
- 31267,
- 32024
-};
-
-#define LOG_TABLE_SIZE 32 /* log_table size */
-#define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */
-#define Q_LOG_TABLE 15 /* qformat of log_table */
-#define LOG10_2 19728 /* log10(2) in q.16 */
-
-/*
-Description:
-This routine takes the input number N and its q format qN and compute
-the log10(N). This routine first normalizes the input no N. Then N is in mag*(2^x) format.
-mag is any number in the range 2^30-(2^31 - 1). Then log2(mag * 2^x) = log2(mag) + x is computed.
-From that log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed.
-This routine looks the log2 value in the table considering LOG2_LOG_TABLE_SIZE+1 MSBs.
-As the MSB is always 1, only next LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup.
-Next 16 MSBs are used for interpolation.
-Inputs:
-N - number to which log10 has to be found.
-qN - q format of N
-log10N - address where log10(N) will be written.
-qLog10N - address where log10N qformat will be written.
-Note/Problem:
-For accurate results input should be in normalized or near normalized form.
-*/
-void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N)
-{
- s16 s16norm, s16tableIndex, s16errorApproximation;
- u16 u16offset;
- s32 s32log;
-
- /* normalize the N. */
- s16norm = qm_norm32(N);
- N = N << s16norm;
-
- /* The qformat of N after normalization.
- * -30 is added to treat the no as between 1.0 to 2.0
- * i.e. after adding the -30 to the qformat the decimal point will be
- * just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e.
- * at the right side of 30th bit.
- */
- qN = qN + s16norm - 30;
-
- /* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the MSB */
- s16tableIndex = (s16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE)));
-
- /* remove the MSB. the MSB is always 1 after normalization. */
- s16tableIndex =
- s16tableIndex & (s16) ((1 << LOG2_LOG_TABLE_SIZE) - 1);
-
- /* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */
- N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1);
-
- /* take the offset as the 16 MSBS after table index.
- */
- u16offset = (u16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16)));
-
- /* look the log value in the table. */
- s32log = log_table[s16tableIndex]; /* q.15 format */
-
- /* interpolate using the offset. */
- s16errorApproximation = (s16) qm_mulu16(u16offset, (u16) (log_table[s16tableIndex + 1] - log_table[s16tableIndex])); /* q.15 */
-
- s32log = qm_add16((s16) s32log, s16errorApproximation); /* q.15 format */
-
- /* adjust for the qformat of the N as
- * log2(mag * 2^x) = log2(mag) + x
- */
- s32log = qm_add32(s32log, ((s32) -qN) << 15); /* q.15 format */
-
- /* normalize the result. */
- s16norm = qm_norm32(s32log);
-
- /* bring all the important bits into lower 16 bits */
- s32log = qm_shl32(s32log, s16norm - 16); /* q.15+s16norm-16 format */
-
- /* compute the log10(N) by multiplying log2(N) with log10(2).
- * as log10(mag * 2^x) = log2(mag * 2^x) * log10(2)
- * log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16)
- */
- *log10N = qm_muls16((s16) s32log, (s16) LOG10_2);
-
- /* write the q format of the result. */
- *qLog10N = 15 + s16norm - 16 + 1;
-
- return;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c
deleted file mode 100644
index 023d05aa97a..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ /dev/null
@@ -1,3638 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <types.h>
-#include "phytbl_lcn.h"
-
-const u32 dot11lcn_gain_tbl_rev0[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000004,
- 0x00000000,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000d,
- 0x0000004d,
- 0x0000008d,
- 0x0000000d,
- 0x0000004d,
- 0x0000008d,
- 0x000000cd,
- 0x0000004f,
- 0x0000008f,
- 0x000000cf,
- 0x000000d3,
- 0x00000113,
- 0x00000513,
- 0x00000913,
- 0x00000953,
- 0x00000d53,
- 0x00001153,
- 0x00001193,
- 0x00005193,
- 0x00009193,
- 0x0000d193,
- 0x00011193,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000004,
- 0x00000000,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000d,
- 0x0000004d,
- 0x0000008d,
- 0x0000000d,
- 0x0000004d,
- 0x0000008d,
- 0x000000cd,
- 0x0000004f,
- 0x0000008f,
- 0x000000cf,
- 0x000000d3,
- 0x00000113,
- 0x00000513,
- 0x00000913,
- 0x00000953,
- 0x00000d53,
- 0x00001153,
- 0x00005153,
- 0x00009153,
- 0x0000d153,
- 0x00011153,
- 0x00015153,
- 0x00019153,
- 0x0001d153,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
-const u32 dot11lcn_gain_tbl_rev1[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000D,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x000000d1,
- 0x00000053,
- 0x00000093,
- 0x000000d3,
- 0x000000d7,
- 0x00000117,
- 0x00000517,
- 0x00000917,
- 0x00000957,
- 0x00000d57,
- 0x00001157,
- 0x00001197,
- 0x00005197,
- 0x00009197,
- 0x0000d197,
- 0x00011197,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000D,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x000000d1,
- 0x00000053,
- 0x00000093,
- 0x000000d3,
- 0x000000d7,
- 0x00000117,
- 0x00000517,
- 0x00000917,
- 0x00000957,
- 0x00000d57,
- 0x00001157,
- 0x00005157,
- 0x00009157,
- 0x0000d157,
- 0x00011157,
- 0x00015157,
- 0x00019157,
- 0x0001d157,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
-const u16 dot11lcn_aux_gain_idx_tbl_rev0[] = {
- 0x0401,
- 0x0402,
- 0x0403,
- 0x0404,
- 0x0405,
- 0x0406,
- 0x0407,
- 0x0408,
- 0x0409,
- 0x040a,
- 0x058b,
- 0x058c,
- 0x058d,
- 0x058e,
- 0x058f,
- 0x0090,
- 0x0091,
- 0x0092,
- 0x0193,
- 0x0194,
- 0x0195,
- 0x0196,
- 0x0197,
- 0x0198,
- 0x0199,
- 0x019a,
- 0x019b,
- 0x019c,
- 0x019d,
- 0x019e,
- 0x019f,
- 0x01a0,
- 0x01a1,
- 0x01a2,
- 0x01a3,
- 0x01a4,
- 0x01a5,
- 0x0000,
-};
-
-const u32 dot11lcn_gain_idx_tbl_rev0[] = {
- 0x00000000,
- 0x00000000,
- 0x10000000,
- 0x00000000,
- 0x20000000,
- 0x00000000,
- 0x30000000,
- 0x00000000,
- 0x40000000,
- 0x00000000,
- 0x50000000,
- 0x00000000,
- 0x60000000,
- 0x00000000,
- 0x70000000,
- 0x00000000,
- 0x80000000,
- 0x00000000,
- 0x90000000,
- 0x00000008,
- 0xa0000000,
- 0x00000008,
- 0xb0000000,
- 0x00000008,
- 0xc0000000,
- 0x00000008,
- 0xd0000000,
- 0x00000008,
- 0xe0000000,
- 0x00000008,
- 0xf0000000,
- 0x00000008,
- 0x00000000,
- 0x00000009,
- 0x10000000,
- 0x00000009,
- 0x20000000,
- 0x00000019,
- 0x30000000,
- 0x00000019,
- 0x40000000,
- 0x00000019,
- 0x50000000,
- 0x00000019,
- 0x60000000,
- 0x00000019,
- 0x70000000,
- 0x00000019,
- 0x80000000,
- 0x00000019,
- 0x90000000,
- 0x00000019,
- 0xa0000000,
- 0x00000019,
- 0xb0000000,
- 0x00000019,
- 0xc0000000,
- 0x00000019,
- 0xd0000000,
- 0x00000019,
- 0xe0000000,
- 0x00000019,
- 0xf0000000,
- 0x00000019,
- 0x00000000,
- 0x0000001a,
- 0x10000000,
- 0x0000001a,
- 0x20000000,
- 0x0000001a,
- 0x30000000,
- 0x0000001a,
- 0x40000000,
- 0x0000001a,
- 0x50000000,
- 0x00000002,
- 0x60000000,
- 0x00000002,
- 0x70000000,
- 0x00000002,
- 0x80000000,
- 0x00000002,
- 0x90000000,
- 0x00000002,
- 0xa0000000,
- 0x00000002,
- 0xb0000000,
- 0x00000002,
- 0xc0000000,
- 0x0000000a,
- 0xd0000000,
- 0x0000000a,
- 0xe0000000,
- 0x0000000a,
- 0xf0000000,
- 0x0000000a,
- 0x00000000,
- 0x0000000b,
- 0x10000000,
- 0x0000000b,
- 0x20000000,
- 0x0000000b,
- 0x30000000,
- 0x0000000b,
- 0x40000000,
- 0x0000000b,
- 0x50000000,
- 0x0000001b,
- 0x60000000,
- 0x0000001b,
- 0x70000000,
- 0x0000001b,
- 0x80000000,
- 0x0000001b,
- 0x90000000,
- 0x0000001b,
- 0xa0000000,
- 0x0000001b,
- 0xb0000000,
- 0x0000001b,
- 0xc0000000,
- 0x0000001b,
- 0xd0000000,
- 0x0000001b,
- 0xe0000000,
- 0x0000001b,
- 0xf0000000,
- 0x0000001b,
- 0x00000000,
- 0x0000001c,
- 0x10000000,
- 0x0000001c,
- 0x20000000,
- 0x0000001c,
- 0x30000000,
- 0x0000001c,
- 0x40000000,
- 0x0000001c,
- 0x50000000,
- 0x0000001c,
- 0x60000000,
- 0x0000001c,
- 0x70000000,
- 0x0000001c,
- 0x80000000,
- 0x0000001c,
- 0x90000000,
- 0x0000001c,
-};
-
-const u16 dot11lcn_aux_gain_idx_tbl_2G[] = {
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0001,
- 0x0080,
- 0x0081,
- 0x0100,
- 0x0101,
- 0x0180,
- 0x0181,
- 0x0182,
- 0x0183,
- 0x0184,
- 0x0185,
- 0x0186,
- 0x0187,
- 0x0188,
- 0x0285,
- 0x0289,
- 0x028a,
- 0x028b,
- 0x028c,
- 0x028d,
- 0x028e,
- 0x028f,
- 0x0290,
- 0x0291,
- 0x0292,
- 0x0293,
- 0x0294,
- 0x0295,
- 0x0296,
- 0x0297,
- 0x0298,
- 0x0299,
- 0x029a,
- 0x0000
-};
-
-const u8 dot11lcn_gain_val_tbl_2G[] = {
- 0xfc,
- 0x02,
- 0x08,
- 0x0e,
- 0x13,
- 0x1b,
- 0xfc,
- 0x02,
- 0x08,
- 0x0e,
- 0x13,
- 0x1b,
- 0xfc,
- 0x00,
- 0x0c,
- 0x03,
- 0xeb,
- 0xfe,
- 0x07,
- 0x0b,
- 0x0f,
- 0xfb,
- 0xfe,
- 0x01,
- 0x05,
- 0x08,
- 0x0b,
- 0x0e,
- 0x11,
- 0x14,
- 0x17,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x15,
- 0x18,
- 0x1b,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00
-};
-
-const u32 dot11lcn_gain_idx_tbl_2G[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x10000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x10000000,
- 0x00000008,
- 0x00000000,
- 0x00000010,
- 0x10000000,
- 0x00000010,
- 0x00000000,
- 0x00000018,
- 0x10000000,
- 0x00000018,
- 0x20000000,
- 0x00000018,
- 0x30000000,
- 0x00000018,
- 0x40000000,
- 0x00000018,
- 0x50000000,
- 0x00000018,
- 0x60000000,
- 0x00000018,
- 0x70000000,
- 0x00000018,
- 0x80000000,
- 0x00000018,
- 0x50000000,
- 0x00000028,
- 0x90000000,
- 0x00000028,
- 0xa0000000,
- 0x00000028,
- 0xb0000000,
- 0x00000028,
- 0xc0000000,
- 0x00000028,
- 0xd0000000,
- 0x00000028,
- 0xe0000000,
- 0x00000028,
- 0xf0000000,
- 0x00000028,
- 0x00000000,
- 0x00000029,
- 0x10000000,
- 0x00000029,
- 0x20000000,
- 0x00000029,
- 0x30000000,
- 0x00000029,
- 0x40000000,
- 0x00000029,
- 0x50000000,
- 0x00000029,
- 0x60000000,
- 0x00000029,
- 0x70000000,
- 0x00000029,
- 0x80000000,
- 0x00000029,
- 0x90000000,
- 0x00000029,
- 0xa0000000,
- 0x00000029,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x10000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x10000000,
- 0x00000008,
- 0x00000000,
- 0x00000010,
- 0x10000000,
- 0x00000010,
- 0x00000000,
- 0x00000018,
- 0x10000000,
- 0x00000018,
- 0x20000000,
- 0x00000018,
- 0x30000000,
- 0x00000018,
- 0x40000000,
- 0x00000018,
- 0x50000000,
- 0x00000018,
- 0x60000000,
- 0x00000018,
- 0x70000000,
- 0x00000018,
- 0x80000000,
- 0x00000018,
- 0x50000000,
- 0x00000028,
- 0x90000000,
- 0x00000028,
- 0xa0000000,
- 0x00000028,
- 0xb0000000,
- 0x00000028,
- 0xc0000000,
- 0x00000028,
- 0xd0000000,
- 0x00000028,
- 0xe0000000,
- 0x00000028,
- 0xf0000000,
- 0x00000028,
- 0x00000000,
- 0x00000029,
- 0x10000000,
- 0x00000029,
- 0x20000000,
- 0x00000029,
- 0x30000000,
- 0x00000029,
- 0x40000000,
- 0x00000029,
- 0x50000000,
- 0x00000029,
- 0x60000000,
- 0x00000029,
- 0x70000000,
- 0x00000029,
- 0x80000000,
- 0x00000029,
- 0x90000000,
- 0x00000029,
- 0xa0000000,
- 0x00000029,
- 0xb0000000,
- 0x00000029,
- 0xc0000000,
- 0x00000029,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-const u32 dot11lcn_gain_tbl_2G[] = {
- 0x00000000,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000d,
- 0x0000004d,
- 0x0000008d,
- 0x00000049,
- 0x00000089,
- 0x000000c9,
- 0x0000004b,
- 0x0000008b,
- 0x000000cb,
- 0x000000cf,
- 0x0000010f,
- 0x0000050f,
- 0x0000090f,
- 0x0000094f,
- 0x00000d4f,
- 0x0000114f,
- 0x0000118f,
- 0x0000518f,
- 0x0000918f,
- 0x0000d18f,
- 0x0001118f,
- 0x0001518f,
- 0x0001918f,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-const u32 dot11lcn_gain_tbl_extlna_2G[] = {
- 0x00000000,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000d,
- 0x00000003,
- 0x00000007,
- 0x0000000b,
- 0x0000000f,
- 0x0000004f,
- 0x0000008f,
- 0x000000cf,
- 0x0000010f,
- 0x0000014f,
- 0x0000018f,
- 0x0000058f,
- 0x0000098f,
- 0x00000d8f,
- 0x00008000,
- 0x00008004,
- 0x00008008,
- 0x00008001,
- 0x00008005,
- 0x00008009,
- 0x0000800d,
- 0x00008003,
- 0x00008007,
- 0x0000800b,
- 0x0000800f,
- 0x0000804f,
- 0x0000808f,
- 0x000080cf,
- 0x0000810f,
- 0x0000814f,
- 0x0000818f,
- 0x0000858f,
- 0x0000898f,
- 0x00008d8f,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-const u16 dot11lcn_aux_gain_idx_tbl_extlna_2G[] = {
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0400,
- 0x0401,
- 0x0402,
- 0x0403,
- 0x0404,
- 0x0483,
- 0x0484,
- 0x0485,
- 0x0486,
- 0x0583,
- 0x0584,
- 0x0585,
- 0x0587,
- 0x0588,
- 0x0589,
- 0x058a,
- 0x0687,
- 0x0688,
- 0x0689,
- 0x068a,
- 0x068b,
- 0x068c,
- 0x068d,
- 0x068e,
- 0x068f,
- 0x0690,
- 0x0691,
- 0x0692,
- 0x0693,
- 0x0000
-};
-
-const u8 dot11lcn_gain_val_tbl_extlna_2G[] = {
- 0xfc,
- 0x02,
- 0x08,
- 0x0e,
- 0x13,
- 0x1b,
- 0xfc,
- 0x02,
- 0x08,
- 0x0e,
- 0x13,
- 0x1b,
- 0xfc,
- 0x00,
- 0x0f,
- 0x03,
- 0xeb,
- 0xfe,
- 0x07,
- 0x0b,
- 0x0f,
- 0xfb,
- 0xfe,
- 0x01,
- 0x05,
- 0x08,
- 0x0b,
- 0x0e,
- 0x11,
- 0x14,
- 0x17,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x15,
- 0x18,
- 0x1b,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00
-};
-
-const u32 dot11lcn_gain_idx_tbl_extlna_2G[] = {
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x00000000,
- 0x00000040,
- 0x10000000,
- 0x00000040,
- 0x20000000,
- 0x00000040,
- 0x30000000,
- 0x00000040,
- 0x40000000,
- 0x00000040,
- 0x30000000,
- 0x00000048,
- 0x40000000,
- 0x00000048,
- 0x50000000,
- 0x00000048,
- 0x60000000,
- 0x00000048,
- 0x30000000,
- 0x00000058,
- 0x40000000,
- 0x00000058,
- 0x50000000,
- 0x00000058,
- 0x70000000,
- 0x00000058,
- 0x80000000,
- 0x00000058,
- 0x90000000,
- 0x00000058,
- 0xa0000000,
- 0x00000058,
- 0x70000000,
- 0x00000068,
- 0x80000000,
- 0x00000068,
- 0x90000000,
- 0x00000068,
- 0xa0000000,
- 0x00000068,
- 0xb0000000,
- 0x00000068,
- 0xc0000000,
- 0x00000068,
- 0xd0000000,
- 0x00000068,
- 0xe0000000,
- 0x00000068,
- 0xf0000000,
- 0x00000068,
- 0x00000000,
- 0x00000069,
- 0x10000000,
- 0x00000069,
- 0x20000000,
- 0x00000069,
- 0x30000000,
- 0x00000069,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x40000000,
- 0x00000041,
- 0x50000000,
- 0x00000041,
- 0x60000000,
- 0x00000041,
- 0x70000000,
- 0x00000041,
- 0x80000000,
- 0x00000041,
- 0x70000000,
- 0x00000049,
- 0x80000000,
- 0x00000049,
- 0x90000000,
- 0x00000049,
- 0xa0000000,
- 0x00000049,
- 0x70000000,
- 0x00000059,
- 0x80000000,
- 0x00000059,
- 0x90000000,
- 0x00000059,
- 0xb0000000,
- 0x00000059,
- 0xc0000000,
- 0x00000059,
- 0xd0000000,
- 0x00000059,
- 0xe0000000,
- 0x00000059,
- 0xb0000000,
- 0x00000069,
- 0xc0000000,
- 0x00000069,
- 0xd0000000,
- 0x00000069,
- 0xe0000000,
- 0x00000069,
- 0xf0000000,
- 0x00000069,
- 0x00000000,
- 0x0000006a,
- 0x10000000,
- 0x0000006a,
- 0x20000000,
- 0x0000006a,
- 0x30000000,
- 0x0000006a,
- 0x40000000,
- 0x0000006a,
- 0x50000000,
- 0x0000006a,
- 0x60000000,
- 0x0000006a,
- 0x70000000,
- 0x0000006a,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-const u32 dot11lcn_aux_gain_idx_tbl_5G[] = {
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0001,
- 0x0002,
- 0x0003,
- 0x0004,
- 0x0083,
- 0x0084,
- 0x0085,
- 0x0086,
- 0x0087,
- 0x0186,
- 0x0187,
- 0x0188,
- 0x0189,
- 0x018a,
- 0x018b,
- 0x018c,
- 0x018d,
- 0x018e,
- 0x018f,
- 0x0190,
- 0x0191,
- 0x0192,
- 0x0193,
- 0x0194,
- 0x0195,
- 0x0196,
- 0x0197,
- 0x0198,
- 0x0199,
- 0x019a,
- 0x019b,
- 0x019c,
- 0x019d,
- 0x0000
-};
-
-const u32 dot11lcn_gain_val_tbl_5G[] = {
- 0xf7,
- 0xfd,
- 0x00,
- 0x04,
- 0x04,
- 0x04,
- 0xf7,
- 0xfd,
- 0x00,
- 0x04,
- 0x04,
- 0x04,
- 0xf6,
- 0x00,
- 0x0c,
- 0x03,
- 0xeb,
- 0xfe,
- 0x06,
- 0x0a,
- 0x10,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x15,
- 0x18,
- 0x1b,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x15,
- 0x18,
- 0x1b,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00
-};
-
-const u32 dot11lcn_gain_idx_tbl_5G[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x10000000,
- 0x00000000,
- 0x20000000,
- 0x00000000,
- 0x30000000,
- 0x00000000,
- 0x40000000,
- 0x00000000,
- 0x30000000,
- 0x00000008,
- 0x40000000,
- 0x00000008,
- 0x50000000,
- 0x00000008,
- 0x60000000,
- 0x00000008,
- 0x70000000,
- 0x00000008,
- 0x60000000,
- 0x00000018,
- 0x70000000,
- 0x00000018,
- 0x80000000,
- 0x00000018,
- 0x90000000,
- 0x00000018,
- 0xa0000000,
- 0x00000018,
- 0xb0000000,
- 0x00000018,
- 0xc0000000,
- 0x00000018,
- 0xd0000000,
- 0x00000018,
- 0xe0000000,
- 0x00000018,
- 0xf0000000,
- 0x00000018,
- 0x00000000,
- 0x00000019,
- 0x10000000,
- 0x00000019,
- 0x20000000,
- 0x00000019,
- 0x30000000,
- 0x00000019,
- 0x40000000,
- 0x00000019,
- 0x50000000,
- 0x00000019,
- 0x60000000,
- 0x00000019,
- 0x70000000,
- 0x00000019,
- 0x80000000,
- 0x00000019,
- 0x90000000,
- 0x00000019,
- 0xa0000000,
- 0x00000019,
- 0xb0000000,
- 0x00000019,
- 0xc0000000,
- 0x00000019,
- 0xd0000000,
- 0x00000019,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-const u32 dot11lcn_gain_tbl_5G[] = {
- 0x00000000,
- 0x00000040,
- 0x00000080,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000d,
- 0x00000011,
- 0x00000015,
- 0x00000055,
- 0x00000095,
- 0x00000017,
- 0x0000001b,
- 0x0000005b,
- 0x0000009b,
- 0x000000db,
- 0x0000011b,
- 0x0000015b,
- 0x0000019b,
- 0x0000059b,
- 0x0000099b,
- 0x00000d9b,
- 0x0000119b,
- 0x0000519b,
- 0x0000919b,
- 0x0000d19b,
- 0x0001119b,
- 0x0001519b,
- 0x0001919b,
- 0x0001d19b,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
- {&dot11lcn_gain_tbl_rev0,
- sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
- 0, 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
- ,
-};
-
-const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
- {&dot11lcn_gain_tbl_rev1,
- sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18,
- 0, 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
- ,
-};
-
-const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
- {&dot11lcn_gain_tbl_2G,
- sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0,
- 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_2G,
- sizeof(dot11lcn_aux_gain_idx_tbl_2G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_2G,
- sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]),
- 13, 0, 32}
- ,
- {&dot11lcn_gain_val_tbl_2G,
- sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]),
- 17, 0, 8}
-};
-
-const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
- {&dot11lcn_gain_tbl_5G,
- sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
- 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_5G,
- sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_5G,
- sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
- 13, 0, 32}
- ,
- {&dot11lcn_gain_val_tbl_5G,
- sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
- 17, 0, 8}
-};
-
-const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
- {&dot11lcn_gain_tbl_extlna_2G,
- sizeof(dot11lcn_gain_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_extlna_2G,
- sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_extlna_2G,
- sizeof(dot11lcn_gain_idx_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32}
- ,
- {&dot11lcn_gain_val_tbl_extlna_2G,
- sizeof(dot11lcn_gain_val_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
-};
-
-const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
- {&dot11lcn_gain_tbl_5G,
- sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
- 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_5G,
- sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_5G,
- sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
- 13, 0, 32}
- ,
- {&dot11lcn_gain_val_tbl_5G,
- sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
- 17, 0, 8}
-};
-
-const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
- sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
- sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
-
-const u32 dot11lcnphytbl_rx_gain_info_sz_rev1 =
- sizeof(dot11lcnphytbl_rx_gain_info_rev1) /
- sizeof(dot11lcnphytbl_rx_gain_info_rev1[0]);
-
-const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
-
-const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
-
-const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
- 0x014d,
-};
-
-const u16 dot11lcn_noise_scale_tbl_rev0[] = {
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
-};
-
-const u32 dot11lcn_fltr_ctrl_tbl_rev0[] = {
- 0x000141f8,
- 0x000021f8,
- 0x000021fb,
- 0x000041fb,
- 0x0001fe4b,
- 0x0000217b,
- 0x00002133,
- 0x000040eb,
- 0x0001fea3,
- 0x0000024b,
-};
-
-const u32 dot11lcn_ps_ctrl_tbl_rev0[] = {
- 0x00100001,
- 0x00200010,
- 0x00300001,
- 0x00400010,
- 0x00500022,
- 0x00600122,
- 0x00700222,
- 0x00800322,
- 0x00900422,
- 0x00a00522,
- 0x00b00622,
- 0x00c00722,
- 0x00d00822,
- 0x00f00922,
- 0x00100a22,
- 0x00200b22,
- 0x00300c22,
- 0x00400d22,
- 0x00500e22,
- 0x00600f22,
-};
-
-const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[] = {
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x0007,
- 0x0005,
- 0x0006,
- 0x0004,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
- 0x000b,
- 0x000b,
- 0x000a,
- 0x000a,
-
-};
-
-const u16 dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[] = {
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0005,
- 0x0002,
- 0x0000,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
- 0x0007,
- 0x0007,
- 0x0002,
- 0x0002,
-};
-
-const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
- 0x0002,
- 0x0008,
- 0x0004,
- 0x0001,
-};
-
-const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
- 0x000a,
- 0x0009,
- 0x0006,
- 0x0005,
-};
-
-const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
- 0x0004,
- 0x0004,
- 0x0002,
- 0x0002,
-};
-
-const u8 dot11lcn_nf_table_rev0[] = {
- 0x5f,
- 0x36,
- 0x29,
- 0x1f,
- 0x5f,
- 0x36,
- 0x29,
- 0x1f,
- 0x5f,
- 0x36,
- 0x29,
- 0x1f,
- 0x5f,
- 0x36,
- 0x29,
- 0x1f,
-};
-
-const u8 dot11lcn_gain_val_tbl_rev0[] = {
- 0x09,
- 0x0f,
- 0x14,
- 0x18,
- 0xfe,
- 0x07,
- 0x0b,
- 0x0f,
- 0xfb,
- 0xfe,
- 0x01,
- 0x05,
- 0x08,
- 0x0b,
- 0x0e,
- 0x11,
- 0x14,
- 0x17,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0x06,
- 0x09,
- 0x0c,
- 0x0f,
- 0x12,
- 0x15,
- 0x18,
- 0x1b,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x00,
- 0x03,
- 0xeb,
- 0x00,
- 0x00,
-};
-
-const u8 dot11lcn_spur_tbl_rev0[] = {
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x02,
- 0x03,
- 0x01,
- 0x03,
- 0x02,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x02,
- 0x03,
- 0x01,
- 0x03,
- 0x02,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
- 0x01,
-};
-
-const u16 dot11lcn_unsup_mcs_tbl_rev0[] = {
- 0x001a,
- 0x0034,
- 0x004e,
- 0x0068,
- 0x009c,
- 0x00d0,
- 0x00ea,
- 0x0104,
- 0x0034,
- 0x0068,
- 0x009c,
- 0x00d0,
- 0x0138,
- 0x01a0,
- 0x01d4,
- 0x0208,
- 0x004e,
- 0x009c,
- 0x00ea,
- 0x0138,
- 0x01d4,
- 0x0270,
- 0x02be,
- 0x030c,
- 0x0068,
- 0x00d0,
- 0x0138,
- 0x01a0,
- 0x0270,
- 0x0340,
- 0x03a8,
- 0x0410,
- 0x0018,
- 0x009c,
- 0x00d0,
- 0x0104,
- 0x00ea,
- 0x0138,
- 0x0186,
- 0x00d0,
- 0x0104,
- 0x0104,
- 0x0138,
- 0x016c,
- 0x016c,
- 0x01a0,
- 0x0138,
- 0x0186,
- 0x0186,
- 0x01d4,
- 0x0222,
- 0x0222,
- 0x0270,
- 0x0104,
- 0x0138,
- 0x016c,
- 0x0138,
- 0x016c,
- 0x01a0,
- 0x01d4,
- 0x01a0,
- 0x01d4,
- 0x0208,
- 0x0208,
- 0x023c,
- 0x0186,
- 0x01d4,
- 0x0222,
- 0x01d4,
- 0x0222,
- 0x0270,
- 0x02be,
- 0x0270,
- 0x02be,
- 0x030c,
- 0x030c,
- 0x035a,
- 0x0036,
- 0x006c,
- 0x00a2,
- 0x00d8,
- 0x0144,
- 0x01b0,
- 0x01e6,
- 0x021c,
- 0x006c,
- 0x00d8,
- 0x0144,
- 0x01b0,
- 0x0288,
- 0x0360,
- 0x03cc,
- 0x0438,
- 0x00a2,
- 0x0144,
- 0x01e6,
- 0x0288,
- 0x03cc,
- 0x0510,
- 0x05b2,
- 0x0654,
- 0x00d8,
- 0x01b0,
- 0x0288,
- 0x0360,
- 0x0510,
- 0x06c0,
- 0x0798,
- 0x0870,
- 0x0018,
- 0x0144,
- 0x01b0,
- 0x021c,
- 0x01e6,
- 0x0288,
- 0x032a,
- 0x01b0,
- 0x021c,
- 0x021c,
- 0x0288,
- 0x02f4,
- 0x02f4,
- 0x0360,
- 0x0288,
- 0x032a,
- 0x032a,
- 0x03cc,
- 0x046e,
- 0x046e,
- 0x0510,
- 0x021c,
- 0x0288,
- 0x02f4,
- 0x0288,
- 0x02f4,
- 0x0360,
- 0x03cc,
- 0x0360,
- 0x03cc,
- 0x0438,
- 0x0438,
- 0x04a4,
- 0x032a,
- 0x03cc,
- 0x046e,
- 0x03cc,
- 0x046e,
- 0x0510,
- 0x05b2,
- 0x0510,
- 0x05b2,
- 0x0654,
- 0x0654,
- 0x06f6,
-};
-
-const u16 dot11lcn_iq_local_tbl_rev0[] = {
- 0x0200,
- 0x0300,
- 0x0400,
- 0x0600,
- 0x0800,
- 0x0b00,
- 0x1000,
- 0x1001,
- 0x1002,
- 0x1003,
- 0x1004,
- 0x1005,
- 0x1006,
- 0x1007,
- 0x1707,
- 0x2007,
- 0x2d07,
- 0x4007,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0200,
- 0x0300,
- 0x0400,
- 0x0600,
- 0x0800,
- 0x0b00,
- 0x1000,
- 0x1001,
- 0x1002,
- 0x1003,
- 0x1004,
- 0x1005,
- 0x1006,
- 0x1007,
- 0x1707,
- 0x2007,
- 0x2d07,
- 0x4007,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x4000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
-};
-
-const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
- 0x00080000,
-};
-
-const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
- {&dot11lcn_min_sig_sq_tbl_rev0,
- sizeof(dot11lcn_min_sig_sq_tbl_rev0) /
- sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
- ,
- {&dot11lcn_noise_scale_tbl_rev0,
- sizeof(dot11lcn_noise_scale_tbl_rev0) /
- sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16}
- ,
- {&dot11lcn_fltr_ctrl_tbl_rev0,
- sizeof(dot11lcn_fltr_ctrl_tbl_rev0) /
- sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32}
- ,
- {&dot11lcn_ps_ctrl_tbl_rev0,
- sizeof(dot11lcn_ps_ctrl_tbl_rev0) /
- sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32}
- ,
- {&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
- ,
- {&dot11lcn_sw_ctrl_tbl_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16}
- ,
- {&dot11lcn_nf_table_rev0,
- sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16,
- 0, 8}
- ,
- {&dot11lcn_gain_val_tbl_rev0,
- sizeof(dot11lcn_gain_val_tbl_rev0) /
- sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8}
- ,
- {&dot11lcn_gain_tbl_rev0,
- sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
- 0, 32}
- ,
- {&dot11lcn_spur_tbl_rev0,
- sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20,
- 0, 8}
- ,
- {&dot11lcn_unsup_mcs_tbl_rev0,
- sizeof(dot11lcn_unsup_mcs_tbl_rev0) /
- sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16}
- ,
- {&dot11lcn_iq_local_tbl_rev0,
- sizeof(dot11lcn_iq_local_tbl_rev0) /
- sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16}
- ,
- {&dot11lcn_papd_compdelta_tbl_rev0,
- sizeof(dot11lcn_papd_compdelta_tbl_rev0) /
- sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32}
- ,
-};
-
-const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
- &dot11lcn_sw_ctrl_tbl_4313_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16
-};
-
-const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
- &dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
-};
-
-const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
- &dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
-};
-
-const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
- &dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
-};
-
-const u32 dot11lcnphytbl_info_sz_rev0 =
- sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
-
-const struct lcnphy_tx_gain_tbl_entry
-dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
- {3, 0, 31, 0, 72,}
- ,
- {3, 0, 31, 0, 70,}
- ,
- {3, 0, 31, 0, 68,}
- ,
- {3, 0, 30, 0, 67,}
- ,
- {3, 0, 29, 0, 68,}
- ,
- {3, 0, 28, 0, 68,}
- ,
- {3, 0, 27, 0, 69,}
- ,
- {3, 0, 26, 0, 70,}
- ,
- {3, 0, 25, 0, 70,}
- ,
- {3, 0, 24, 0, 71,}
- ,
- {3, 0, 23, 0, 72,}
- ,
- {3, 0, 23, 0, 70,}
- ,
- {3, 0, 22, 0, 71,}
- ,
- {3, 0, 21, 0, 72,}
- ,
- {3, 0, 21, 0, 70,}
- ,
- {3, 0, 21, 0, 68,}
- ,
- {3, 0, 21, 0, 66,}
- ,
- {3, 0, 21, 0, 64,}
- ,
- {3, 0, 21, 0, 63,}
- ,
- {3, 0, 20, 0, 64,}
- ,
- {3, 0, 19, 0, 65,}
- ,
- {3, 0, 19, 0, 64,}
- ,
- {3, 0, 18, 0, 65,}
- ,
- {3, 0, 18, 0, 64,}
- ,
- {3, 0, 17, 0, 65,}
- ,
- {3, 0, 17, 0, 64,}
- ,
- {3, 0, 16, 0, 65,}
- ,
- {3, 0, 16, 0, 64,}
- ,
- {3, 0, 16, 0, 62,}
- ,
- {3, 0, 16, 0, 60,}
- ,
- {3, 0, 16, 0, 58,}
- ,
- {3, 0, 15, 0, 61,}
- ,
- {3, 0, 15, 0, 59,}
- ,
- {3, 0, 14, 0, 61,}
- ,
- {3, 0, 14, 0, 60,}
- ,
- {3, 0, 14, 0, 58,}
- ,
- {3, 0, 13, 0, 60,}
- ,
- {3, 0, 13, 0, 59,}
- ,
- {3, 0, 12, 0, 62,}
- ,
- {3, 0, 12, 0, 60,}
- ,
- {3, 0, 12, 0, 58,}
- ,
- {3, 0, 11, 0, 62,}
- ,
- {3, 0, 11, 0, 60,}
- ,
- {3, 0, 11, 0, 59,}
- ,
- {3, 0, 11, 0, 57,}
- ,
- {3, 0, 10, 0, 61,}
- ,
- {3, 0, 10, 0, 59,}
- ,
- {3, 0, 10, 0, 57,}
- ,
- {3, 0, 9, 0, 62,}
- ,
- {3, 0, 9, 0, 60,}
- ,
- {3, 0, 9, 0, 58,}
- ,
- {3, 0, 9, 0, 57,}
- ,
- {3, 0, 8, 0, 62,}
- ,
- {3, 0, 8, 0, 60,}
- ,
- {3, 0, 8, 0, 58,}
- ,
- {3, 0, 8, 0, 57,}
- ,
- {3, 0, 8, 0, 55,}
- ,
- {3, 0, 7, 0, 61,}
- ,
- {3, 0, 7, 0, 60,}
- ,
- {3, 0, 7, 0, 58,}
- ,
- {3, 0, 7, 0, 56,}
- ,
- {3, 0, 7, 0, 55,}
- ,
- {3, 0, 6, 0, 62,}
- ,
- {3, 0, 6, 0, 60,}
- ,
- {3, 0, 6, 0, 58,}
- ,
- {3, 0, 6, 0, 57,}
- ,
- {3, 0, 6, 0, 55,}
- ,
- {3, 0, 6, 0, 54,}
- ,
- {3, 0, 6, 0, 52,}
- ,
- {3, 0, 5, 0, 61,}
- ,
- {3, 0, 5, 0, 59,}
- ,
- {3, 0, 5, 0, 57,}
- ,
- {3, 0, 5, 0, 56,}
- ,
- {3, 0, 5, 0, 54,}
- ,
- {3, 0, 5, 0, 53,}
- ,
- {3, 0, 5, 0, 51,}
- ,
- {3, 0, 4, 0, 62,}
- ,
- {3, 0, 4, 0, 60,}
- ,
- {3, 0, 4, 0, 58,}
- ,
- {3, 0, 4, 0, 57,}
- ,
- {3, 0, 4, 0, 55,}
- ,
- {3, 0, 4, 0, 54,}
- ,
- {3, 0, 4, 0, 52,}
- ,
- {3, 0, 4, 0, 51,}
- ,
- {3, 0, 4, 0, 49,}
- ,
- {3, 0, 4, 0, 48,}
- ,
- {3, 0, 4, 0, 46,}
- ,
- {3, 0, 3, 0, 60,}
- ,
- {3, 0, 3, 0, 58,}
- ,
- {3, 0, 3, 0, 57,}
- ,
- {3, 0, 3, 0, 55,}
- ,
- {3, 0, 3, 0, 54,}
- ,
- {3, 0, 3, 0, 52,}
- ,
- {3, 0, 3, 0, 51,}
- ,
- {3, 0, 3, 0, 49,}
- ,
- {3, 0, 3, 0, 48,}
- ,
- {3, 0, 3, 0, 46,}
- ,
- {3, 0, 3, 0, 45,}
- ,
- {3, 0, 3, 0, 44,}
- ,
- {3, 0, 3, 0, 43,}
- ,
- {3, 0, 3, 0, 41,}
- ,
- {3, 0, 2, 0, 61,}
- ,
- {3, 0, 2, 0, 59,}
- ,
- {3, 0, 2, 0, 57,}
- ,
- {3, 0, 2, 0, 56,}
- ,
- {3, 0, 2, 0, 54,}
- ,
- {3, 0, 2, 0, 53,}
- ,
- {3, 0, 2, 0, 51,}
- ,
- {3, 0, 2, 0, 50,}
- ,
- {3, 0, 2, 0, 48,}
- ,
- {3, 0, 2, 0, 47,}
- ,
- {3, 0, 2, 0, 46,}
- ,
- {3, 0, 2, 0, 44,}
- ,
- {3, 0, 2, 0, 43,}
- ,
- {3, 0, 2, 0, 42,}
- ,
- {3, 0, 2, 0, 41,}
- ,
- {3, 0, 2, 0, 39,}
- ,
- {3, 0, 2, 0, 38,}
- ,
- {3, 0, 2, 0, 37,}
- ,
- {3, 0, 2, 0, 36,}
- ,
- {3, 0, 2, 0, 35,}
- ,
- {3, 0, 2, 0, 34,}
- ,
- {3, 0, 2, 0, 33,}
- ,
- {3, 0, 2, 0, 32,}
- ,
- {3, 0, 1, 0, 63,}
- ,
- {3, 0, 1, 0, 61,}
- ,
- {3, 0, 1, 0, 59,}
- ,
- {3, 0, 1, 0, 57,}
- ,
-};
-
-const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
- {7, 0, 31, 0, 72,}
- ,
- {7, 0, 31, 0, 70,}
- ,
- {7, 0, 31, 0, 68,}
- ,
- {7, 0, 30, 0, 67,}
- ,
- {7, 0, 29, 0, 68,}
- ,
- {7, 0, 28, 0, 68,}
- ,
- {7, 0, 27, 0, 69,}
- ,
- {7, 0, 26, 0, 70,}
- ,
- {7, 0, 25, 0, 70,}
- ,
- {7, 0, 24, 0, 71,}
- ,
- {7, 0, 23, 0, 72,}
- ,
- {7, 0, 23, 0, 70,}
- ,
- {7, 0, 22, 0, 71,}
- ,
- {7, 0, 21, 0, 72,}
- ,
- {7, 0, 21, 0, 70,}
- ,
- {7, 0, 21, 0, 68,}
- ,
- {7, 0, 21, 0, 66,}
- ,
- {7, 0, 21, 0, 64,}
- ,
- {7, 0, 21, 0, 63,}
- ,
- {7, 0, 20, 0, 64,}
- ,
- {7, 0, 19, 0, 65,}
- ,
- {7, 0, 19, 0, 64,}
- ,
- {7, 0, 18, 0, 65,}
- ,
- {7, 0, 18, 0, 64,}
- ,
- {7, 0, 17, 0, 65,}
- ,
- {7, 0, 17, 0, 64,}
- ,
- {7, 0, 16, 0, 65,}
- ,
- {7, 0, 16, 0, 64,}
- ,
- {7, 0, 16, 0, 62,}
- ,
- {7, 0, 16, 0, 60,}
- ,
- {7, 0, 16, 0, 58,}
- ,
- {7, 0, 15, 0, 61,}
- ,
- {7, 0, 15, 0, 59,}
- ,
- {7, 0, 14, 0, 61,}
- ,
- {7, 0, 14, 0, 60,}
- ,
- {7, 0, 14, 0, 58,}
- ,
- {7, 0, 13, 0, 60,}
- ,
- {7, 0, 13, 0, 59,}
- ,
- {7, 0, 12, 0, 62,}
- ,
- {7, 0, 12, 0, 60,}
- ,
- {7, 0, 12, 0, 58,}
- ,
- {7, 0, 11, 0, 62,}
- ,
- {7, 0, 11, 0, 60,}
- ,
- {7, 0, 11, 0, 59,}
- ,
- {7, 0, 11, 0, 57,}
- ,
- {7, 0, 10, 0, 61,}
- ,
- {7, 0, 10, 0, 59,}
- ,
- {7, 0, 10, 0, 57,}
- ,
- {7, 0, 9, 0, 62,}
- ,
- {7, 0, 9, 0, 60,}
- ,
- {7, 0, 9, 0, 58,}
- ,
- {7, 0, 9, 0, 57,}
- ,
- {7, 0, 8, 0, 62,}
- ,
- {7, 0, 8, 0, 60,}
- ,
- {7, 0, 8, 0, 58,}
- ,
- {7, 0, 8, 0, 57,}
- ,
- {7, 0, 8, 0, 55,}
- ,
- {7, 0, 7, 0, 61,}
- ,
- {7, 0, 7, 0, 60,}
- ,
- {7, 0, 7, 0, 58,}
- ,
- {7, 0, 7, 0, 56,}
- ,
- {7, 0, 7, 0, 55,}
- ,
- {7, 0, 6, 0, 62,}
- ,
- {7, 0, 6, 0, 60,}
- ,
- {7, 0, 6, 0, 58,}
- ,
- {7, 0, 6, 0, 57,}
- ,
- {7, 0, 6, 0, 55,}
- ,
- {7, 0, 6, 0, 54,}
- ,
- {7, 0, 6, 0, 52,}
- ,
- {7, 0, 5, 0, 61,}
- ,
- {7, 0, 5, 0, 59,}
- ,
- {7, 0, 5, 0, 57,}
- ,
- {7, 0, 5, 0, 56,}
- ,
- {7, 0, 5, 0, 54,}
- ,
- {7, 0, 5, 0, 53,}
- ,
- {7, 0, 5, 0, 51,}
- ,
- {7, 0, 4, 0, 62,}
- ,
- {7, 0, 4, 0, 60,}
- ,
- {7, 0, 4, 0, 58,}
- ,
- {7, 0, 4, 0, 57,}
- ,
- {7, 0, 4, 0, 55,}
- ,
- {7, 0, 4, 0, 54,}
- ,
- {7, 0, 4, 0, 52,}
- ,
- {7, 0, 4, 0, 51,}
- ,
- {7, 0, 4, 0, 49,}
- ,
- {7, 0, 4, 0, 48,}
- ,
- {7, 0, 4, 0, 46,}
- ,
- {7, 0, 3, 0, 60,}
- ,
- {7, 0, 3, 0, 58,}
- ,
- {7, 0, 3, 0, 57,}
- ,
- {7, 0, 3, 0, 55,}
- ,
- {7, 0, 3, 0, 54,}
- ,
- {7, 0, 3, 0, 52,}
- ,
- {7, 0, 3, 0, 51,}
- ,
- {7, 0, 3, 0, 49,}
- ,
- {7, 0, 3, 0, 48,}
- ,
- {7, 0, 3, 0, 46,}
- ,
- {7, 0, 3, 0, 45,}
- ,
- {7, 0, 3, 0, 44,}
- ,
- {7, 0, 3, 0, 43,}
- ,
- {7, 0, 3, 0, 41,}
- ,
- {7, 0, 2, 0, 61,}
- ,
- {7, 0, 2, 0, 59,}
- ,
- {7, 0, 2, 0, 57,}
- ,
- {7, 0, 2, 0, 56,}
- ,
- {7, 0, 2, 0, 54,}
- ,
- {7, 0, 2, 0, 53,}
- ,
- {7, 0, 2, 0, 51,}
- ,
- {7, 0, 2, 0, 50,}
- ,
- {7, 0, 2, 0, 48,}
- ,
- {7, 0, 2, 0, 47,}
- ,
- {7, 0, 2, 0, 46,}
- ,
- {7, 0, 2, 0, 44,}
- ,
- {7, 0, 2, 0, 43,}
- ,
- {7, 0, 2, 0, 42,}
- ,
- {7, 0, 2, 0, 41,}
- ,
- {7, 0, 2, 0, 39,}
- ,
- {7, 0, 2, 0, 38,}
- ,
- {7, 0, 2, 0, 37,}
- ,
- {7, 0, 2, 0, 36,}
- ,
- {7, 0, 2, 0, 35,}
- ,
- {7, 0, 2, 0, 34,}
- ,
- {7, 0, 2, 0, 33,}
- ,
- {7, 0, 2, 0, 32,}
- ,
- {7, 0, 1, 0, 63,}
- ,
- {7, 0, 1, 0, 61,}
- ,
- {7, 0, 1, 0, 59,}
- ,
- {7, 0, 1, 0, 57,}
- ,
-};
-
-const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
- {255, 255, 0xf0, 0, 152,}
- ,
- {255, 255, 0xf0, 0, 147,}
- ,
- {255, 255, 0xf0, 0, 143,}
- ,
- {255, 255, 0xf0, 0, 139,}
- ,
- {255, 255, 0xf0, 0, 135,}
- ,
- {255, 255, 0xf0, 0, 131,}
- ,
- {255, 255, 0xf0, 0, 128,}
- ,
- {255, 255, 0xf0, 0, 124,}
- ,
- {255, 255, 0xf0, 0, 121,}
- ,
- {255, 255, 0xf0, 0, 117,}
- ,
- {255, 255, 0xf0, 0, 114,}
- ,
- {255, 255, 0xf0, 0, 111,}
- ,
- {255, 255, 0xf0, 0, 107,}
- ,
- {255, 255, 0xf0, 0, 104,}
- ,
- {255, 255, 0xf0, 0, 101,}
- ,
- {255, 255, 0xf0, 0, 99,}
- ,
- {255, 255, 0xf0, 0, 96,}
- ,
- {255, 255, 0xf0, 0, 93,}
- ,
- {255, 255, 0xf0, 0, 90,}
- ,
- {255, 255, 0xf0, 0, 88,}
- ,
- {255, 255, 0xf0, 0, 85,}
- ,
- {255, 255, 0xf0, 0, 83,}
- ,
- {255, 255, 0xf0, 0, 81,}
- ,
- {255, 255, 0xf0, 0, 78,}
- ,
- {255, 255, 0xf0, 0, 76,}
- ,
- {255, 255, 0xf0, 0, 74,}
- ,
- {255, 255, 0xf0, 0, 72,}
- ,
- {255, 255, 0xf0, 0, 70,}
- ,
- {255, 255, 0xf0, 0, 68,}
- ,
- {255, 255, 0xf0, 0, 66,}
- ,
- {255, 255, 0xf0, 0, 64,}
- ,
- {255, 248, 0xf0, 0, 64,}
- ,
- {255, 241, 0xf0, 0, 64,}
- ,
- {255, 251, 0xe0, 0, 64,}
- ,
- {255, 244, 0xe0, 0, 64,}
- ,
- {255, 254, 0xd0, 0, 64,}
- ,
- {255, 246, 0xd0, 0, 64,}
- ,
- {255, 239, 0xd0, 0, 64,}
- ,
- {255, 249, 0xc0, 0, 64,}
- ,
- {255, 242, 0xc0, 0, 64,}
- ,
- {255, 255, 0xb0, 0, 64,}
- ,
- {255, 248, 0xb0, 0, 64,}
- ,
- {255, 241, 0xb0, 0, 64,}
- ,
- {255, 254, 0xa0, 0, 64,}
- ,
- {255, 246, 0xa0, 0, 64,}
- ,
- {255, 239, 0xa0, 0, 64,}
- ,
- {255, 255, 0x90, 0, 64,}
- ,
- {255, 248, 0x90, 0, 64,}
- ,
- {255, 241, 0x90, 0, 64,}
- ,
- {255, 234, 0x90, 0, 64,}
- ,
- {255, 255, 0x80, 0, 64,}
- ,
- {255, 248, 0x80, 0, 64,}
- ,
- {255, 241, 0x80, 0, 64,}
- ,
- {255, 234, 0x80, 0, 64,}
- ,
- {255, 255, 0x70, 0, 64,}
- ,
- {255, 248, 0x70, 0, 64,}
- ,
- {255, 241, 0x70, 0, 64,}
- ,
- {255, 234, 0x70, 0, 64,}
- ,
- {255, 227, 0x70, 0, 64,}
- ,
- {255, 221, 0x70, 0, 64,}
- ,
- {255, 215, 0x70, 0, 64,}
- ,
- {255, 208, 0x70, 0, 64,}
- ,
- {255, 203, 0x70, 0, 64,}
- ,
- {255, 197, 0x70, 0, 64,}
- ,
- {255, 255, 0x60, 0, 64,}
- ,
- {255, 248, 0x60, 0, 64,}
- ,
- {255, 241, 0x60, 0, 64,}
- ,
- {255, 234, 0x60, 0, 64,}
- ,
- {255, 227, 0x60, 0, 64,}
- ,
- {255, 221, 0x60, 0, 64,}
- ,
- {255, 255, 0x50, 0, 64,}
- ,
- {255, 248, 0x50, 0, 64,}
- ,
- {255, 241, 0x50, 0, 64,}
- ,
- {255, 234, 0x50, 0, 64,}
- ,
- {255, 227, 0x50, 0, 64,}
- ,
- {255, 221, 0x50, 0, 64,}
- ,
- {255, 215, 0x50, 0, 64,}
- ,
- {255, 208, 0x50, 0, 64,}
- ,
- {255, 255, 0x40, 0, 64,}
- ,
- {255, 248, 0x40, 0, 64,}
- ,
- {255, 241, 0x40, 0, 64,}
- ,
- {255, 234, 0x40, 0, 64,}
- ,
- {255, 227, 0x40, 0, 64,}
- ,
- {255, 221, 0x40, 0, 64,}
- ,
- {255, 215, 0x40, 0, 64,}
- ,
- {255, 208, 0x40, 0, 64,}
- ,
- {255, 203, 0x40, 0, 64,}
- ,
- {255, 197, 0x40, 0, 64,}
- ,
- {255, 255, 0x30, 0, 64,}
- ,
- {255, 248, 0x30, 0, 64,}
- ,
- {255, 241, 0x30, 0, 64,}
- ,
- {255, 234, 0x30, 0, 64,}
- ,
- {255, 227, 0x30, 0, 64,}
- ,
- {255, 221, 0x30, 0, 64,}
- ,
- {255, 215, 0x30, 0, 64,}
- ,
- {255, 208, 0x30, 0, 64,}
- ,
- {255, 203, 0x30, 0, 64,}
- ,
- {255, 197, 0x30, 0, 64,}
- ,
- {255, 191, 0x30, 0, 64,}
- ,
- {255, 186, 0x30, 0, 64,}
- ,
- {255, 181, 0x30, 0, 64,}
- ,
- {255, 175, 0x30, 0, 64,}
- ,
- {255, 255, 0x20, 0, 64,}
- ,
- {255, 248, 0x20, 0, 64,}
- ,
- {255, 241, 0x20, 0, 64,}
- ,
- {255, 234, 0x20, 0, 64,}
- ,
- {255, 227, 0x20, 0, 64,}
- ,
- {255, 221, 0x20, 0, 64,}
- ,
- {255, 215, 0x20, 0, 64,}
- ,
- {255, 208, 0x20, 0, 64,}
- ,
- {255, 203, 0x20, 0, 64,}
- ,
- {255, 197, 0x20, 0, 64,}
- ,
- {255, 191, 0x20, 0, 64,}
- ,
- {255, 186, 0x20, 0, 64,}
- ,
- {255, 181, 0x20, 0, 64,}
- ,
- {255, 175, 0x20, 0, 64,}
- ,
- {255, 170, 0x20, 0, 64,}
- ,
- {255, 166, 0x20, 0, 64,}
- ,
- {255, 161, 0x20, 0, 64,}
- ,
- {255, 156, 0x20, 0, 64,}
- ,
- {255, 152, 0x20, 0, 64,}
- ,
- {255, 148, 0x20, 0, 64,}
- ,
- {255, 143, 0x20, 0, 64,}
- ,
- {255, 139, 0x20, 0, 64,}
- ,
- {255, 135, 0x20, 0, 64,}
- ,
- {255, 132, 0x20, 0, 64,}
- ,
- {255, 255, 0x10, 0, 64,}
- ,
- {255, 248, 0x10, 0, 64,}
- ,
-};
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h
deleted file mode 100644
index c5266cf2372..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#define ANT_SWCTRL_TBL_REV3_IDX (0)
-
-#include <types.h>
-#include "phy_int.h"
-
-extern const struct phytbl_info mimophytbl_info_rev0[],
- mimophytbl_info_rev0_volatile[];
-extern const u32 mimophytbl_info_sz_rev0, mimophytbl_info_sz_rev0_volatile;
-
-extern const struct phytbl_info mimophytbl_info_rev3[],
- mimophytbl_info_rev3_volatile[], mimophytbl_info_rev3_volatile1[],
- mimophytbl_info_rev3_volatile2[], mimophytbl_info_rev3_volatile3[];
-extern const u32 mimophytbl_info_sz_rev3, mimophytbl_info_sz_rev3_volatile,
- mimophytbl_info_sz_rev3_volatile1, mimophytbl_info_sz_rev3_volatile2,
- mimophytbl_info_sz_rev3_volatile3;
-
-extern const u32 noise_var_tbl_rev3[];
-
-extern const struct phytbl_info mimophytbl_info_rev7[];
-extern const u32 mimophytbl_info_sz_rev7;
-extern const u32 noise_var_tbl_rev7[];
-
-extern const struct phytbl_info mimophytbl_info_rev16[];
-extern const u32 mimophytbl_info_sz_rev16;
diff --git a/drivers/staging/brcm80211/brcmsmac/phy_shim.h b/drivers/staging/brcm80211/brcmsmac/phy_shim.h
deleted file mode 100644
index 2d12bb4400f..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/phy_shim.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * phy_shim.h: stuff defined in phy_shim.c and included only by the phy
- */
-
-#ifndef _BRCM_PHY_SHIM_H_
-#define _BRCM_PHY_SHIM_H_
-
-#include "types.h"
-
-#define RADAR_TYPE_NONE 0 /* Radar type None */
-#define RADAR_TYPE_ETSI_1 1 /* ETSI 1 Radar type */
-#define RADAR_TYPE_ETSI_2 2 /* ETSI 2 Radar type */
-#define RADAR_TYPE_ETSI_3 3 /* ETSI 3 Radar type */
-#define RADAR_TYPE_ITU_E 4 /* ITU E Radar type */
-#define RADAR_TYPE_ITU_K 5 /* ITU K Radar type */
-#define RADAR_TYPE_UNCLASSIFIED 6 /* Unclassified Radar type */
-#define RADAR_TYPE_BIN5 7 /* long pulse radar type */
-#define RADAR_TYPE_STG2 8 /* staggered-2 radar */
-#define RADAR_TYPE_STG3 9 /* staggered-3 radar */
-#define RADAR_TYPE_FRA 10 /* French radar */
-
-/* French radar pulse widths */
-#define FRA_T1_20MHZ 52770
-#define FRA_T2_20MHZ 61538
-#define FRA_T3_20MHZ 66002
-#define FRA_T1_40MHZ 105541
-#define FRA_T2_40MHZ 123077
-#define FRA_T3_40MHZ 132004
-#define FRA_ERR_20MHZ 60
-#define FRA_ERR_40MHZ 120
-
-#define ANTSEL_NA 0 /* No boardlevel selection available */
-#define ANTSEL_2x4 1 /* 2x4 boardlevel selection available */
-#define ANTSEL_2x3 2 /* 2x3 CB2 boardlevel selection available */
-
-/* Rx Antenna diversity control values */
-#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
-#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
-#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
-#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
-#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
-#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
-
-#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
-#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
-#define WL_ANT_IDX_1 0 /* antenna index 1 */
-#define WL_ANT_IDX_2 1 /* antenna index 2 */
-
-/* values for n_preamble_type */
-#define BRCMS_N_PREAMBLE_MIXEDMODE 0
-#define BRCMS_N_PREAMBLE_GF 1
-#define BRCMS_N_PREAMBLE_GF_BRCM 2
-
-#define WL_TX_POWER_RATES_LEGACY 45
-#define WL_TX_POWER_MCS20_FIRST 12
-#define WL_TX_POWER_MCS20_NUM 16
-#define WL_TX_POWER_MCS40_FIRST 28
-#define WL_TX_POWER_MCS40_NUM 17
-
-
-#define WL_TX_POWER_RATES 101
-#define WL_TX_POWER_CCK_FIRST 0
-#define WL_TX_POWER_CCK_NUM 4
-#define WL_TX_POWER_OFDM_FIRST 4 /* Index for first 20MHz OFDM SISO rate */
-#define WL_TX_POWER_OFDM20_CDD_FIRST 12 /* Index for first 20MHz OFDM CDD rate */
-#define WL_TX_POWER_OFDM40_SISO_FIRST 52 /* Index for first 40MHz OFDM SISO rate */
-#define WL_TX_POWER_OFDM40_CDD_FIRST 60 /* Index for first 40MHz OFDM CDD rate */
-#define WL_TX_POWER_OFDM_NUM 8
-#define WL_TX_POWER_MCS20_SISO_FIRST 20 /* Index for first 20MHz MCS SISO rate */
-#define WL_TX_POWER_MCS20_CDD_FIRST 28 /* Index for first 20MHz MCS CDD rate */
-#define WL_TX_POWER_MCS20_STBC_FIRST 36 /* Index for first 20MHz MCS STBC rate */
-#define WL_TX_POWER_MCS20_SDM_FIRST 44 /* Index for first 20MHz MCS SDM rate */
-#define WL_TX_POWER_MCS40_SISO_FIRST 68 /* Index for first 40MHz MCS SISO rate */
-#define WL_TX_POWER_MCS40_CDD_FIRST 76 /* Index for first 40MHz MCS CDD rate */
-#define WL_TX_POWER_MCS40_STBC_FIRST 84 /* Index for first 40MHz MCS STBC rate */
-#define WL_TX_POWER_MCS40_SDM_FIRST 92 /* Index for first 40MHz MCS SDM rate */
-#define WL_TX_POWER_MCS_1_STREAM_NUM 8
-#define WL_TX_POWER_MCS_2_STREAM_NUM 8
-#define WL_TX_POWER_MCS_32 100 /* Index for 40MHz rate MCS 32 */
-#define WL_TX_POWER_MCS_32_NUM 1
-
-/* sslpnphy specifics */
-#define WL_TX_POWER_MCS20_SISO_FIRST_SSN 12 /* Index for first 20MHz MCS SISO rate */
-
-/* struct tx_power::flags bits */
-#define WL_TX_POWER_F_ENABLED 1
-#define WL_TX_POWER_F_HW 2
-#define WL_TX_POWER_F_MIMO 4
-#define WL_TX_POWER_F_SISO 8
-
-/* values to force tx/rx chain */
-#define BRCMS_N_TXRX_CHAIN0 0
-#define BRCMS_N_TXRX_CHAIN1 1
-
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
- void *wl, void *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
-
-/* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void wlapi_free_timer(struct phy_shim_info *physhim,
- struct wlapi_timer *t);
-extern void wlapi_add_timer(struct phy_shim_info *physhim,
- struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct phy_shim_info *physhim,
- struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
- u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
- u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
- u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
- u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
- int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
- u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
- void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
- const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
- u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
-#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/pub.h b/drivers/staging/brcm80211/brcmsmac/pub.h
deleted file mode 100644
index 01d74609560..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/pub.h
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_PUB_H_
-#define _BRCM_PUB_H_
-
-#include <brcmu_wifi.h>
-#include "types.h"
-#include "defs.h"
-
-#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
-#define MAXMULTILIST 32 /* max # multicast addresses */
-#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
-
-/* phy types */
-#define PHY_TYPE_A 0 /* Phy type A */
-#define PHY_TYPE_G 2 /* Phy type G */
-#define PHY_TYPE_N 4 /* Phy type N */
-#define PHY_TYPE_LP 5 /* Phy type Low Power A/B/G */
-#define PHY_TYPE_SSN 6 /* Phy type Single Stream N */
-#define PHY_TYPE_LCN 8 /* Phy type Single Stream N */
-#define PHY_TYPE_LCNXN 9 /* Phy type 2-stream N */
-#define PHY_TYPE_HT 7 /* Phy type 3-Stream N */
-
-/* bw */
-#define BRCMS_10_MHZ 10 /* 10Mhz nphy channel bandwidth */
-#define BRCMS_20_MHZ 20 /* 20Mhz nphy channel bandwidth */
-#define BRCMS_40_MHZ 40 /* 40Mhz nphy channel bandwidth */
-
-#define CHSPEC_WLC_BW(chanspec) (CHSPEC_IS40(chanspec) ? BRCMS_40_MHZ : \
- CHSPEC_IS20(chanspec) ? BRCMS_20_MHZ : \
- BRCMS_10_MHZ)
-
-#define BRCMS_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */
-#define BRCMS_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */
-#define BRCMS_RSSI_VERY_LOW -80 /* Very low quality cutoffs */
-#define BRCMS_RSSI_LOW -70 /* Low quality cutoffs */
-#define BRCMS_RSSI_GOOD -68 /* Good quality cutoffs */
-#define BRCMS_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */
-#define BRCMS_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */
-
-/* macro to perform PHY -> D11 PHY TYPE, currently 1:1 */
-#define BRCMS_PHYTYPE(_x) (_x)
-
-#define MA_WINDOW_SZ 8 /* moving average window size */
-
-#define BRCMS_SNR_INVALID 0 /* invalid SNR value */
-
-/* a large TX Power as an init value to factor out of min() calculations,
- * keep low enough to fit in an s8, units are .25 dBm
- */
-#define BRCMS_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
-
-/* rate related definitions */
-#define BRCMS_RATE_FLAG 0x80 /* Flag to indicate it is a basic rate */
-#define BRCMS_RATE_MASK 0x7f /* Rate value mask w/o basic rate flag */
-
-/* legacy rx Antenna diversity for SISO rates */
-#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
-#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
-#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
-#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
-#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
-#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
-
-/* legacy rx Antenna diversity for SISO rates */
-#define ANT_TX_FORCE_0 0 /* Tx on antenna 0, "legacy term Main" */
-#define ANT_TX_FORCE_1 1 /* Tx on antenna 1, "legacy term Aux" */
-#define ANT_TX_LAST_RX 3 /* Tx on phy's last good Rx antenna */
-#define ANT_TX_DEF 3 /* driver's default tx antenna setting */
-
-#define TXCORE_POLICY_ALL 0x1 /* use all available core for transmit */
-
-/* Tx Chain values */
-#define TXCHAIN_DEF 0x1 /* def bitmap of txchain */
-#define TXCHAIN_DEF_NPHY 0x3 /* default bitmap of tx chains for nphy */
-#define TXCHAIN_DEF_HTPHY 0x7 /* default bitmap of tx chains for nphy */
-#define RXCHAIN_DEF 0x1 /* def bitmap of rxchain */
-#define RXCHAIN_DEF_NPHY 0x3 /* default bitmap of rx chains for nphy */
-#define RXCHAIN_DEF_HTPHY 0x7 /* default bitmap of rx chains for nphy */
-#define ANTSWITCH_NONE 0 /* no antenna switch */
-#define ANTSWITCH_TYPE_1 1 /* antenna switch on 4321CB2, 2of3 */
-#define ANTSWITCH_TYPE_2 2 /* antenna switch on 4321MPCI, 2of3 */
-#define ANTSWITCH_TYPE_3 3 /* antenna switch on 4322, 2of3 */
-
-#define RXBUFSZ PKTBUFSZ
-#ifndef AIDMAPSZ
-#define AIDMAPSZ (roundup(MAXSCB, NBBY)/NBBY) /* aid bitmap size in bytes */
-#endif /* AIDMAPSZ */
-
-#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
-
-#define WL_SPURAVOID_OFF 0
-#define WL_SPURAVOID_ON1 1
-#define WL_SPURAVOID_ON2 2
-
-struct brcms_tunables {
- int ntxd; /* size of tx descriptor table */
- int nrxd; /* size of rx descriptor table */
- int rxbufsz; /* size of rx buffers to post */
- int nrxbufpost; /* # of rx buffers to post */
- int maxscb; /* # of SCBs supported */
- int ampdunummpdu; /* max number of mpdu in an ampdu */
- int maxpktcb; /* max # of packet callbacks */
- int maxucodebss; /* max # of BSS handled in ucode bcn/prb */
- int maxucodebss4; /* max # of BSS handled in sw bcn/prb */
- int maxbss; /* max # of bss info elements in scan list */
- int datahiwat; /* data msg txq hiwat mark */
- int ampdudatahiwat; /* AMPDU msg txq hiwat mark */
- int rxbnd; /* max # of rx bufs to process before deferring to dpc */
- int txsbnd; /* max # tx status to process in wlc_txstatus() */
- int memreserved; /* memory reserved for BMAC's USB dma rx */
-};
-
-struct brcms_rateset {
- uint count; /* number of rates in rates[] */
- /* rates in 500kbps units w/hi bit set if basic */
- u8 rates[BRCMS_NUMRATES];
- u8 htphy_membership; /* HT PHY Membership */
- u8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
-};
-
-struct rsn_parms {
- u8 flags; /* misc booleans (e.g., supported) */
- u8 multicast; /* multicast cipher */
- u8 ucount; /* count of unicast ciphers */
- u8 unicast[4]; /* unicast ciphers */
- u8 acount; /* count of auth modes */
- u8 auth[4]; /* Authentication modes */
- u8 PAD[4]; /* padding for future growth */
-};
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
-
-#define RSN_FLAGS_SUPPORTED 0x1 /* Flag for rsn_params */
-#define RSN_FLAGS_PREAUTH 0x2 /* Flag for WPA2 rsn_params */
-
-/* All the HT-specific default advertised capabilities (including AMPDU)
- * should be grouped here at one place
- */
-#define AMPDU_DEF_MPDU_DENSITY 6 /* default mpdu density (110 ==> 4us) */
-
-/* defaults for the HT (MIMO) bss */
-#define HT_CAP (IEEE80211_HT_CAP_SM_PS |\
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD |\
- IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_DSSSCCK40)
-
-/* wlc internal bss_info */
-struct brcms_bss_info {
- u8 BSSID[ETH_ALEN]; /* network BSSID */
- u16 flags; /* flags for internal attributes */
- u8 SSID_len; /* the length of SSID */
- u8 SSID[32]; /* SSID string */
- s16 RSSI; /* receive signal strength (in dBm) */
- s16 SNR; /* receive signal SNR in dB */
- u16 beacon_period; /* units are Kusec */
- u16 atim_window; /* units are Kusec */
- chanspec_t chanspec; /* Channel num, bw, ctrl_sb and band */
- s8 infra; /* 0=IBSS, 1=infrastructure, 2=unknown */
- wlc_rateset_t rateset; /* supported rates */
- u8 dtim_period; /* DTIM period */
- s8 phy_noise; /* noise right after tx (in dBm) */
- u16 capability; /* Capability information */
- u8 wme_qosinfo; /* QoS Info from WME IE; valid if BSS_WME flag set */
- struct rsn_parms wpa;
- struct rsn_parms wpa2;
- u16 qbss_load_aac; /* qbss load available admission capacity */
- /* qbss_load_chan_free <- (0xff - channel_utilization of qbss_load_ie_t) */
- u8 qbss_load_chan_free; /* indicates how free the channel is */
- u8 mcipher; /* multicast cipher */
- u8 wpacfg; /* wpa config index */
-};
-
-/* IOVar flags for common error checks */
-#define IOVF_MFG (1<<3) /* flag for mfgtest iovars */
-#define IOVF_WHL (1<<4) /* value must be whole (0-max) */
-#define IOVF_NTRL (1<<5) /* value must be natural (1-max) */
-
-#define IOVF_SET_UP (1<<6) /* set requires driver be up */
-#define IOVF_SET_DOWN (1<<7) /* set requires driver be down */
-#define IOVF_SET_CLK (1<<8) /* set requires core clock */
-#define IOVF_SET_BAND (1<<9) /* set requires fixed band */
-
-#define IOVF_GET_UP (1<<10) /* get requires driver be up */
-#define IOVF_GET_DOWN (1<<11) /* get requires driver be down */
-#define IOVF_GET_CLK (1<<12) /* get requires core clock */
-#define IOVF_GET_BAND (1<<13) /* get requires fixed band */
-#define IOVF_OPEN_ALLOW (1<<14) /* set allowed iovar for opensrc */
-
-/* watchdog down and dump callback function proto's */
-typedef int (*watchdog_fn_t) (void *handle);
-typedef int (*down_fn_t) (void *handle);
-typedef int (*dump_fn_t) (void *handle, struct brcmu_strbuf *b);
-
-/* IOVar handler
- *
- * handle - a pointer value registered with the function
- * vi - iovar_info that was looked up
- * actionid - action ID, calculated by IOV_GVAL() and IOV_SVAL() based on varid.
- * name - the actual iovar name
- * params/plen - parameters and length for a get, input only.
- * arg/len - buffer and length for value to be set or retrieved, input or output.
- * vsize - value size, valid for integer type only.
- * wlcif - interface context (brcms_c_if pointer)
- *
- * All pointers may point into the same buffer.
- */
-typedef int (*iovar_fn_t) (void *handle, const struct brcmu_iovar *vi,
- u32 actionid, const char *name, void *params,
- uint plen, void *arg, int alen, int vsize,
- struct brcms_c_if *wlcif);
-
-#define MAC80211_PROMISC_BCNS (1 << 0)
-#define MAC80211_SCAN (1 << 1)
-
-/*
- * Public portion of "common" os-independent state structure.
- * The wlc handle points at this.
- */
-struct brcms_pub {
- void *wlc;
-
- struct ieee80211_hw *ieee_hw;
- struct scb *global_scb;
- struct scb_ampdu *global_ampdu;
- uint mac80211_state;
- uint unit; /* device instance number */
- uint corerev; /* core revision */
- struct si_pub *sih; /* SI handle (cookie for siutils calls) */
- char *vars; /* "environment" name=value */
- bool up; /* interface up and running */
- bool hw_off; /* HW is off */
- /* tunables: ntxd, nrxd, maxscb, etc. */
- struct brcms_tunables *tunables;
- bool hw_up; /* one time hw up/down(from boot or hibernation) */
- bool _piomode; /* true if pio mode *//* BMAC_NOTE: NEED In both */
- uint _nbands; /* # bands supported */
- uint now; /* # elapsed seconds */
-
- bool promisc; /* promiscuous destination address */
- bool delayed_down; /* down delayed */
- bool _ap; /* AP mode enabled */
- bool _apsta; /* simultaneous AP/STA mode enabled */
- bool _assoc_recreate; /* association recreation on up transitions */
- int _wme; /* WME QoS mode */
- u8 _mbss; /* MBSS mode on */
- bool allmulti; /* enable all multicasts */
- bool associated; /* true:part of [I]BSS, false: not */
- /* (union of stas_associated, aps_associated) */
- bool phytest_on; /* whether a PHY test is running */
- bool bf_preempt_4306; /* True to enable 'darwin' mode */
- bool _ampdu; /* ampdu enabled or not */
- bool _cac; /* 802.11e CAC enabled */
- u8 _n_enab; /* bitmap of 11N + HT support */
- bool _n_reqd; /* N support required for clients */
-
- s8 _coex; /* 20/40 MHz BSS Management AUTO, ENAB, DISABLE */
- bool _priofc; /* Priority-based flowcontrol */
-
- u8 cur_etheraddr[ETH_ALEN]; /* our local ethernet address */
-
- u8 *multicast; /* ptr to list of multicast addresses */
- uint nmulticast; /* # enabled multicast addresses */
-
- u32 wlfeatureflag; /* Flags to control sw features from registry */
- int psq_pkts_total; /* total num of ps pkts */
-
- u16 txmaxpkts; /* max number of large pkts allowed to be pending */
-
- /* s/w decryption counters */
- u32 swdecrypt; /* s/w decrypt attempts */
-
- int bcmerror; /* last bcm error */
-
- mbool radio_disabled; /* bit vector for radio disabled reasons */
- bool radio_active; /* radio on/off state */
- u16 roam_time_thresh; /* Max. # secs. of not hearing beacons
- * before roaming.
- */
- bool align_wd_tbtt; /* Align watchdog with tbtt indication
- * handling. This flag is cleared by default
- * and is set by per port code explicitly and
- * you need to make sure the OSL_SYSUPTIME()
- * is implemented properly in osl of that port
- * when it enables this Power Save feature.
- */
-
- u16 boardrev; /* version # of particular board */
- u8 sromrev; /* version # of the srom */
- char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
- u32 boardflags; /* Board specific flags from srom */
- u32 boardflags2; /* More board flags if sromrev >= 4 */
- bool tempsense_disable; /* disable periodic tempsense check */
- bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */
- bool _ampdumac; /* mac assist ampdu enabled or not */
-
- struct wl_cnt *_cnt; /* low-level counters in driver */
-};
-
-/* wl_monitor rx status per packet */
-struct wl_rxsts {
- uint pkterror; /* error flags per pkt */
- uint phytype; /* 802.11 A/B/G ... */
- uint channel; /* channel */
- uint datarate; /* rate in 500kbps */
- uint antenna; /* antenna pkts received on */
- uint pktlength; /* pkt length minus bcm phy hdr */
- u32 mactime; /* time stamp from mac, count per 1us */
- uint sq; /* signal quality */
- s32 signal; /* in dbm */
- s32 noise; /* in dbm */
- uint preamble; /* Unknown, short, long */
- uint encoding; /* Unknown, CCK, PBCC, OFDM */
- uint nfrmtype; /* special 802.11n frames(AMPDU, AMSDU) */
- struct brcms_if *wlif; /* wl interface */
-};
-
-/* status per error RX pkt */
-#define WL_RXS_CRC_ERROR 0x00000001 /* CRC Error in packet */
-#define WL_RXS_RUNT_ERROR 0x00000002 /* Runt packet */
-#define WL_RXS_ALIGN_ERROR 0x00000004 /* Misaligned packet */
-#define WL_RXS_OVERSIZE_ERROR 0x00000008 /* packet bigger than RX_LENGTH (usually 1518) */
-#define WL_RXS_WEP_ICV_ERROR 0x00000010 /* Integrity Check Value error */
-#define WL_RXS_WEP_ENCRYPTED 0x00000020 /* Encrypted with WEP */
-#define WL_RXS_PLCP_SHORT 0x00000040 /* Short PLCP error */
-#define WL_RXS_DECRYPT_ERR 0x00000080 /* Decryption error */
-#define WL_RXS_OTHER_ERR 0x80000000 /* Other errors */
-
-/* phy type */
-#define WL_RXS_PHY_A 0x00000000 /* A phy type */
-#define WL_RXS_PHY_B 0x00000001 /* B phy type */
-#define WL_RXS_PHY_G 0x00000002 /* G phy type */
-#define WL_RXS_PHY_N 0x00000004 /* N phy type */
-
-/* encoding */
-#define WL_RXS_ENCODING_CCK 0x00000000 /* CCK encoding */
-#define WL_RXS_ENCODING_OFDM 0x00000001 /* OFDM encoding */
-
-/* preamble */
-#define WL_RXS_UNUSED_STUB 0x0 /* stub to match with wlc_ethereal.h */
-#define WL_RXS_PREAMBLE_SHORT 0x00000001 /* Short preamble */
-#define WL_RXS_PREAMBLE_LONG 0x00000002 /* Long preamble */
-#define WL_RXS_PREAMBLE_MIMO_MM 0x00000003 /* MIMO mixed mode preamble */
-#define WL_RXS_PREAMBLE_MIMO_GF 0x00000004 /* MIMO green field preamble */
-
-#define WL_RXS_NFRM_AMPDU_FIRST 0x00000001 /* first MPDU in A-MPDU */
-#define WL_RXS_NFRM_AMPDU_SUB 0x00000002 /* subsequent MPDU(s) in A-MPDU */
-#define WL_RXS_NFRM_AMSDU_FIRST 0x00000004 /* first MSDU in A-MSDU */
-#define WL_RXS_NFRM_AMSDU_SUB 0x00000008 /* subsequent MSDU(s) in A-MSDU */
-
-enum wlc_par_id {
- IOV_MPC = 1,
- IOV_RTSTHRESH,
- IOV_QTXPOWER,
- IOV_BCN_LI_BCN /* Beacon listen interval in # of beacons */
-};
-
-/***********************************************
- * Feature-related macros to optimize out code *
- * *********************************************
- */
-
-/* AP Support (versus STA) */
-#define AP_ENAB(pub) (0)
-
-/* Macro to check if APSTA mode enabled */
-#define APSTA_ENAB(pub) (0)
-
-/* Some useful combinations */
-#define STA_ONLY(pub) (!AP_ENAB(pub))
-#define AP_ONLY(pub) (AP_ENAB(pub) && !APSTA_ENAB(pub))
-
-#define ENAB_1x1 0x01
-#define ENAB_2x2 0x02
-#define ENAB_3x3 0x04
-#define ENAB_4x4 0x08
-#define SUPPORT_11N (ENAB_1x1|ENAB_2x2)
-#define SUPPORT_HT (ENAB_1x1|ENAB_2x2|ENAB_3x3)
-/* WL11N Support */
-#if ((defined(NCONF) && (NCONF != 0)) || (defined(LCNCONF) && (LCNCONF != 0)) || \
- (defined(HTCONF) && (HTCONF != 0)) || (defined(SSLPNCONF) && (SSLPNCONF != 0)))
-#define N_ENAB(pub) ((pub)->_n_enab & SUPPORT_11N)
-#define N_REQD(pub) ((pub)->_n_reqd)
-#else
-#define N_ENAB(pub) 0
-#define N_REQD(pub) 0
-#endif
-
-#if (defined(HTCONF) && (HTCONF != 0))
-#define HT_ENAB(pub) (((pub)->_n_enab & SUPPORT_HT) == SUPPORT_HT)
-#else
-#define HT_ENAB(pub) 0
-#endif
-
-#define AMPDU_AGG_HOST 1
-#define AMPDU_ENAB(pub) ((pub)->_ampdu)
-
-#define EDCF_ENAB(pub) (WME_ENAB(pub))
-#define QOS_ENAB(pub) (WME_ENAB(pub) || N_ENAB(pub))
-
-#define MONITOR_ENAB(wlc) ((wlc)->monitor)
-
-#define PROMISC_ENAB(wlc) ((wlc)->promisc)
-
-#define BRCMS_PREC_COUNT 16 /* Max precedence level implemented */
-
-/* pri is priority encoded in the packet. This maps the Packet priority to
- * enqueue precedence as defined in wlc_prec_map
- */
-extern const u8 wlc_prio2prec_map[];
-#define BRCMS_PRIO_TO_PREC(pri) wlc_prio2prec_map[(pri) & 7]
-
-/* This maps priority to one precedence higher - Used by PS-Poll response packets to
- * simulate enqueue-at-head operation, but still maintain the order on the queue
- */
-#define BRCMS_PRIO_TO_HI_PREC(pri) min(BRCMS_PRIO_TO_PREC(pri) + 1,\
- BRCMS_PREC_COUNT - 1)
-
-extern const u8 wme_fifo2ac[];
-#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
-
-/* Mask to describe all precedence levels */
-#define BRCMS_PREC_BMP_ALL MAXBITVAL(BRCMS_PREC_COUNT)
-
-/* Define a bitmap of precedences comprised by each AC */
-#define BRCMS_PREC_BMP_AC_BE (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BE)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BE)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_EE)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
-#define BRCMS_PREC_BMP_AC_BK (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BK)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BK)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NONE)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
-#define BRCMS_PREC_BMP_AC_VI (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_CL)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_CL)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VI)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
-#define BRCMS_PREC_BMP_AC_VO (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VO)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VO)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NC)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
-
-/* WME Support */
-#define WME_ENAB(pub) ((pub)->_wme != OFF)
-#define WME_AUTO(wlc) ((wlc)->pub->_wme == AUTO)
-
-/* invalid core flags, use the saved coreflags */
-#define BRCMS_USE_COREFLAGS 0xffffffff
-
-
-/* network protection config */
-#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */
-#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */
-#define BRCMS_PROT_G_USER 3 /* gmode specified by user */
-#define BRCMS_PROT_OVERLAP 4 /* overlap */
-#define BRCMS_PROT_N_USER 10 /* nmode specified by user */
-#define BRCMS_PROT_N_CFG 11 /* n protection */
-#define BRCMS_PROT_N_CFG_OVR 12 /* n protection override */
-#define BRCMS_PROT_N_NONGF 13 /* non-GF protection */
-#define BRCMS_PROT_N_NONGF_OVR 14 /* non-GF protection override */
-#define BRCMS_PROT_N_PAM_OVR 15 /* n preamble override */
-#define BRCMS_PROT_N_OBSS 16 /* non-HT OBSS present */
-
-/*
- * 54g modes (basic bits may still be overridden)
- *
- * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
- * Preamble: Long
- * Shortslot: Off
- * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
- * Extended Rateset: 6, 9, 12, 48
- * Preamble: Long
- * Shortslot: Auto
- * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
- * Extended Rateset: 6b, 9, 12b, 48
- * Preamble: Short required
- * Shortslot: Auto
- * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
- * Extended Rateset: 6, 9, 12, 48
- * Preamble: Long
- * Shortslot: On
- * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
- * Preamble: Short required
- * Shortslot: On and required
- * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
- * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
- * Preamble: Long
- * Shortslot: Auto
- */
-#define GMODE_LEGACY_B 0
-#define GMODE_AUTO 1
-#define GMODE_ONLY 2
-#define GMODE_B_DEFERRED 3
-#define GMODE_PERFORMANCE 4
-#define GMODE_LRS 5
-#define GMODE_MAX 6
-
-/* values for PLCPHdr_override */
-#define BRCMS_PLCP_AUTO -1
-#define BRCMS_PLCP_SHORT 0
-#define BRCMS_PLCP_LONG 1
-
-/* values for g_protection_override and n_protection_override */
-#define BRCMS_PROTECTION_AUTO -1
-#define BRCMS_PROTECTION_OFF 0
-#define BRCMS_PROTECTION_ON 1
-#define BRCMS_PROTECTION_MMHDR_ONLY 2
-#define BRCMS_PROTECTION_CTS_ONLY 3
-
-/* values for g_protection_control and n_protection_control */
-#define BRCMS_PROTECTION_CTL_OFF 0
-#define BRCMS_PROTECTION_CTL_LOCAL 1
-#define BRCMS_PROTECTION_CTL_OVERLAP 2
-
-/* values for n_protection */
-#define BRCMS_N_PROTECTION_OFF 0
-#define BRCMS_N_PROTECTION_OPTIONAL 1
-#define BRCMS_N_PROTECTION_20IN40 2
-#define BRCMS_N_PROTECTION_MIXEDMODE 3
-
-/* values for band specific 40MHz capabilities */
-#define BRCMS_N_BW_20ALL 0
-#define BRCMS_N_BW_40ALL 1
-#define BRCMS_N_BW_20IN2G_40IN5G 2
-
-/* bitflags for SGI support (sgi_rx iovar) */
-#define BRCMS_N_SGI_20 0x01
-#define BRCMS_N_SGI_40 0x02
-
-/* defines used by the nrate iovar */
-#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
-#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_SHIFT 8 /* stf mode shift */
-#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
-#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
-#define NRATE_SGI_SHIFT 23 /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
-
-#define NRATE_STF_SISO 0 /* stf mode SISO */
-#define NRATE_STF_CDD 1 /* stf mode CDD */
-#define NRATE_STF_STBC 2 /* stf mode STBC */
-#define NRATE_STF_SDM 3 /* stf mode SDM */
-
-#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
-
-#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
-
-struct brcms_antselcfg {
- u8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
- u8 num_antcfg; /* number of available antenna configurations */
-};
-
-/* common functions for every port */
-extern void *brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device,
- uint unit, bool piomode, void *regsva, uint bustype,
- void *btparam, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern int brcms_c_set(struct brcms_c_info *wlc, int cmd, int arg);
-extern int brcms_c_get(struct brcms_c_info *wlc, int cmd, int *arg);
-extern bool brcms_c_chipmatch(u16 vendor, u16 device);
-extern void brcms_c_init(struct brcms_c_info *wlc);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
- struct sk_buff *sdu,
- struct ieee80211_hw *hw);
-extern int brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
- struct brcms_c_if *wlcif);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-
-/* helper functions */
-extern void brcms_c_statsupd(struct brcms_c_info *wlc);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
- int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
- bool promisc);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
- int match_reg_offset,
- const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
- const struct ieee80211_tx_queue_params *arg,
- bool suspend);
-extern struct brcms_pub *brcms_c_pub(void *wlc);
-
-/* common functions for every port */
-extern void brcms_c_mhf(struct brcms_c_info *wlc, u8 idx, u16 mask, u16 val,
- int bands);
-extern void brcms_c_rate_lookup_init(struct brcms_c_info *wlc,
- wlc_rateset_t *rateset);
-extern void brcms_default_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs);
-
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
- struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
- u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_set_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
- int val);
-extern int brcms_c_get_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
- int *ret_int_ptr);
-extern char *getvar(char *vars, const char *name);
-extern int getintvar(char *vars, const char *name);
-
-/* wlc_phy.c helper functions */
-extern void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc);
-extern void brcms_c_mctrl(struct brcms_c_info *wlc, u32 mask, u32 val);
-
-extern int brcms_c_module_register(struct brcms_pub *pub,
- const char *name, void *hdl,
- watchdog_fn_t watchdog_fn, down_fn_t down_fn);
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
- void *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
- bool drop);
-
-/* helper functions */
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc);
-
-#define MAXBANDS 2 /* Maximum #of bands */
-/* bandstate array indices */
-#define BAND_2G_INDEX 0 /* wlc->bandstate[x] index */
-#define BAND_5G_INDEX 1 /* wlc->bandstate[x] index */
-
-#define BAND_2G_NAME "2.4G"
-#define BAND_5G_NAME "5G"
-
-/* BMAC RPC: 7 u32 params: pkttotlen, fifo, commit, fid, txpktpend, pktflag, rpc_id */
-#define BRCMS_RPCTX_PARAMS 32
-
-#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/rate.c b/drivers/staging/brcm80211/brcmsmac/rate.c
deleted file mode 100644
index f0e4b99c256..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/rate.c
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-
-#include "d11.h"
-#include "pub.h"
-#include "rate.h"
-
-/* Rate info per rate: It tells whether a rate is ofdm or not and its phy_rate value */
-const u8 rate_info[BRCM_MAXRATE + 1] = {
- /* 0 1 2 3 4 5 6 7 8 9 */
-/* 0 */ 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
-/* 10 */ 0x00, 0x37, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x00,
-/* 20 */ 0x00, 0x00, 0x6e, 0x00, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00,
-/* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00,
-/* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x00,
-/* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-/* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-/* 70 */ 0x00, 0x00, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-/* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-/* 90 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00,
-/* 100 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c
-};
-
-/* rates are in units of Kbps */
-const struct brcms_mcs_info mcs_table[MCS_TABLE_SIZE] = {
- /* MCS 0: SS 1, MOD: BPSK, CR 1/2 */
- {6500, 13500, CEIL(6500 * 10, 9), CEIL(13500 * 10, 9), 0x00,
- BRCM_RATE_6M},
- /* MCS 1: SS 1, MOD: QPSK, CR 1/2 */
- {13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x08,
- BRCM_RATE_12M},
- /* MCS 2: SS 1, MOD: QPSK, CR 3/4 */
- {19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x0A,
- BRCM_RATE_18M},
- /* MCS 3: SS 1, MOD: 16QAM, CR 1/2 */
- {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x10,
- BRCM_RATE_24M},
- /* MCS 4: SS 1, MOD: 16QAM, CR 3/4 */
- {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x12,
- BRCM_RATE_36M},
- /* MCS 5: SS 1, MOD: 64QAM, CR 2/3 */
- {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x19,
- BRCM_RATE_48M},
- /* MCS 6: SS 1, MOD: 64QAM, CR 3/4 */
- {58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x1A,
- BRCM_RATE_54M},
- /* MCS 7: SS 1, MOD: 64QAM, CR 5/6 */
- {65000, 135000, CEIL(65000 * 10, 9), CEIL(135000 * 10, 9), 0x1C,
- BRCM_RATE_54M},
- /* MCS 8: SS 2, MOD: BPSK, CR 1/2 */
- {13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x40,
- BRCM_RATE_6M},
- /* MCS 9: SS 2, MOD: QPSK, CR 1/2 */
- {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x48,
- BRCM_RATE_12M},
- /* MCS 10: SS 2, MOD: QPSK, CR 3/4 */
- {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x4A,
- BRCM_RATE_18M},
- /* MCS 11: SS 2, MOD: 16QAM, CR 1/2 */
- {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x50,
- BRCM_RATE_24M},
- /* MCS 12: SS 2, MOD: 16QAM, CR 3/4 */
- {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x52,
- BRCM_RATE_36M},
- /* MCS 13: SS 2, MOD: 64QAM, CR 2/3 */
- {104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0x59,
- BRCM_RATE_48M},
- /* MCS 14: SS 2, MOD: 64QAM, CR 3/4 */
- {117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x5A,
- BRCM_RATE_54M},
- /* MCS 15: SS 2, MOD: 64QAM, CR 5/6 */
- {130000, 270000, CEIL(130000 * 10, 9), CEIL(270000 * 10, 9), 0x5C,
- BRCM_RATE_54M},
- /* MCS 16: SS 3, MOD: BPSK, CR 1/2 */
- {19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x80,
- BRCM_RATE_6M},
- /* MCS 17: SS 3, MOD: QPSK, CR 1/2 */
- {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x88,
- BRCM_RATE_12M},
- /* MCS 18: SS 3, MOD: QPSK, CR 3/4 */
- {58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x8A,
- BRCM_RATE_18M},
- /* MCS 19: SS 3, MOD: 16QAM, CR 1/2 */
- {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x90,
- BRCM_RATE_24M},
- /* MCS 20: SS 3, MOD: 16QAM, CR 3/4 */
- {117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x92,
- BRCM_RATE_36M},
- /* MCS 21: SS 3, MOD: 64QAM, CR 2/3 */
- {156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0x99,
- BRCM_RATE_48M},
- /* MCS 22: SS 3, MOD: 64QAM, CR 3/4 */
- {175500, 364500, CEIL(175500 * 10, 9), CEIL(364500 * 10, 9), 0x9A,
- BRCM_RATE_54M},
- /* MCS 23: SS 3, MOD: 64QAM, CR 5/6 */
- {195000, 405000, CEIL(195000 * 10, 9), CEIL(405000 * 10, 9), 0x9B,
- BRCM_RATE_54M},
- /* MCS 24: SS 4, MOD: BPSK, CR 1/2 */
- {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0xC0,
- BRCM_RATE_6M},
- /* MCS 25: SS 4, MOD: QPSK, CR 1/2 */
- {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0xC8,
- BRCM_RATE_12M},
- /* MCS 26: SS 4, MOD: QPSK, CR 3/4 */
- {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0xCA,
- BRCM_RATE_18M},
- /* MCS 27: SS 4, MOD: 16QAM, CR 1/2 */
- {104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0xD0,
- BRCM_RATE_24M},
- /* MCS 28: SS 4, MOD: 16QAM, CR 3/4 */
- {156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0xD2,
- BRCM_RATE_36M},
- /* MCS 29: SS 4, MOD: 64QAM, CR 2/3 */
- {208000, 432000, CEIL(208000 * 10, 9), CEIL(432000 * 10, 9), 0xD9,
- BRCM_RATE_48M},
- /* MCS 30: SS 4, MOD: 64QAM, CR 3/4 */
- {234000, 486000, CEIL(234000 * 10, 9), CEIL(486000 * 10, 9), 0xDA,
- BRCM_RATE_54M},
- /* MCS 31: SS 4, MOD: 64QAM, CR 5/6 */
- {260000, 540000, CEIL(260000 * 10, 9), CEIL(540000 * 10, 9), 0xDB,
- BRCM_RATE_54M},
- /* MCS 32: SS 1, MOD: BPSK, CR 1/2 */
- {0, 6000, 0, CEIL(6000 * 10, 9), 0x00, BRCM_RATE_6M},
-};
-
-/* phycfg for legacy OFDM frames: code rate, modulation scheme, spatial streams
- * Number of spatial streams: always 1
- * other fields: refer to table 78 of section 17.3.2.2 of the original .11a standard
- */
-struct legacy_phycfg {
- u32 rate_ofdm; /* ofdm mac rate */
- u8 tx_phy_ctl3; /* phy ctl byte 3, code rate, modulation type, # of streams */
-};
-
-#define LEGACY_PHYCFG_TABLE_SIZE 12 /* Number of legacy_rate_cfg entries in the table */
-
-/* In CCK mode LPPHY overloads OFDM Modulation bits with CCK Data Rate */
-/* Eventually MIMOPHY would also be converted to this format */
-/* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */
-static const struct
-legacy_phycfg legacy_phycfg_table[LEGACY_PHYCFG_TABLE_SIZE] = {
- {BRCM_RATE_1M, 0x00}, /* CCK 1Mbps, data rate 0 */
- {BRCM_RATE_2M, 0x08}, /* CCK 2Mbps, data rate 1 */
- {BRCM_RATE_5M5, 0x10}, /* CCK 5.5Mbps, data rate 2 */
- {BRCM_RATE_11M, 0x18}, /* CCK 11Mbps, data rate 3 */
- /* OFDM 6Mbps, code rate 1/2, BPSK, 1 spatial stream */
- {BRCM_RATE_6M, 0x00},
- /* OFDM 9Mbps, code rate 3/4, BPSK, 1 spatial stream */
- {BRCM_RATE_9M, 0x02},
- /* OFDM 12Mbps, code rate 1/2, QPSK, 1 spatial stream */
- {BRCM_RATE_12M, 0x08},
- /* OFDM 18Mbps, code rate 3/4, QPSK, 1 spatial stream */
- {BRCM_RATE_18M, 0x0A},
- /* OFDM 24Mbps, code rate 1/2, 16-QAM, 1 spatial stream */
- {BRCM_RATE_24M, 0x10},
- /* OFDM 36Mbps, code rate 3/4, 16-QAM, 1 spatial stream */
- {BRCM_RATE_36M, 0x12},
- /* OFDM 48Mbps, code rate 2/3, 64-QAM, 1 spatial stream */
- {BRCM_RATE_48M, 0x19},
- /* OFDM 54Mbps, code rate 3/4, 64-QAM, 1 spatial stream */
- {BRCM_RATE_54M, 0x1A},
-};
-
-/* Hardware rates (also encodes default basic rates) */
-
-const wlc_rateset_t cck_ofdm_mimo_rates = {
- 12,
- { /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48, 54 Mbps */
- 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60,
- 0x6c},
- 0x00,
- {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-const wlc_rateset_t ofdm_mimo_rates = {
- 8,
- { /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */
- 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c},
- 0x00,
- {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Default ratesets that include MCS32 for 40BW channels */
-const wlc_rateset_t cck_ofdm_40bw_mimo_rates = {
- 12,
- { /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48, 54 Mbps */
- 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60,
- 0x6c},
- 0x00,
- {0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-const wlc_rateset_t ofdm_40bw_mimo_rates = {
- 8,
- { /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */
- 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c},
- 0x00,
- {0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-const wlc_rateset_t cck_ofdm_rates = {
- 12,
- { /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48, 54 Mbps */
- 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60,
- 0x6c},
- 0x00,
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-const wlc_rateset_t gphy_legacy_rates = {
- 4,
- { /* 1b, 2b, 5.5b, 11b Mbps */
- 0x82, 0x84, 0x8b, 0x96},
- 0x00,
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-const wlc_rateset_t ofdm_rates = {
- 8,
- { /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */
- 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c},
- 0x00,
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-const wlc_rateset_t cck_rates = {
- 4,
- { /* 1b, 2b, 5.5, 11 Mbps */
- 0x82, 0x84, 0x0b, 0x16},
- 0x00,
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* check if rateset is valid.
- * if check_brate is true, rateset without a basic rate is considered NOT valid.
- */
-static bool brcms_c_rateset_valid(wlc_rateset_t *rs, bool check_brate)
-{
- uint idx;
-
- if (!rs->count)
- return false;
-
- if (!check_brate)
- return true;
-
- /* error if no basic rates */
- for (idx = 0; idx < rs->count; idx++) {
- if (rs->rates[idx] & BRCMS_RATE_FLAG)
- return true;
- }
- return false;
-}
-
-void brcms_c_rateset_mcs_upd(wlc_rateset_t *rs, u8 txstreams)
-{
- int i;
- for (i = txstreams; i < MAX_STREAMS_SUPPORTED; i++)
- rs->mcs[i] = 0;
-}
-
-/* filter based on hardware rateset, and sort filtered rateset with basic bit(s) preserved,
- * and check if resulting rateset is valid.
-*/
-bool
-brcms_c_rate_hwrs_filter_sort_validate(wlc_rateset_t *rs,
- const wlc_rateset_t *hw_rs,
- bool check_brate, u8 txstreams)
-{
- u8 rateset[BRCM_MAXRATE + 1];
- u8 r;
- uint count;
- uint i;
-
- memset(rateset, 0, sizeof(rateset));
- count = rs->count;
-
- for (i = 0; i < count; i++) {
- /* mask off "basic rate" bit, BRCMS_RATE_FLAG */
- r = (int)rs->rates[i] & BRCMS_RATE_MASK;
- if ((r > BRCM_MAXRATE) || (rate_info[r] == 0))
- continue;
- rateset[r] = rs->rates[i]; /* preserve basic bit! */
- }
-
- /* fill out the rates in order, looking at only supported rates */
- count = 0;
- for (i = 0; i < hw_rs->count; i++) {
- r = hw_rs->rates[i] & BRCMS_RATE_MASK;
- if (rateset[r])
- rs->rates[count++] = rateset[r];
- }
-
- rs->count = count;
-
- /* only set the mcs rate bit if the equivalent hw mcs bit is set */
- for (i = 0; i < MCSSET_LEN; i++)
- rs->mcs[i] = (rs->mcs[i] & hw_rs->mcs[i]);
-
- if (brcms_c_rateset_valid(rs, check_brate))
- return true;
- else
- return false;
-}
-
-/* calculate the rate of a rx'd frame and return it as a ratespec */
-ratespec_t brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp)
-{
- int phy_type;
- ratespec_t rspec = PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT;
-
- phy_type =
- ((rxh->RxChan & RXS_CHAN_PHYTYPE_MASK) >> RXS_CHAN_PHYTYPE_SHIFT);
-
- if ((phy_type == PHY_TYPE_N) || (phy_type == PHY_TYPE_SSN) ||
- (phy_type == PHY_TYPE_LCN) || (phy_type == PHY_TYPE_HT)) {
- switch (rxh->PhyRxStatus_0 & PRXS0_FT_MASK) {
- case PRXS0_CCK:
- rspec =
- CCK_PHY2MAC_RATE(
- ((struct cck_phy_hdr *) plcp)->signal);
- break;
- case PRXS0_OFDM:
- rspec =
- OFDM_PHY2MAC_RATE(
- ((struct ofdm_phy_hdr *) plcp)->rlpt[0]);
- break;
- case PRXS0_PREN:
- rspec = (plcp[0] & MIMO_PLCP_MCS_MASK) | RSPEC_MIMORATE;
- if (plcp[0] & MIMO_PLCP_40MHZ) {
- /* indicate rspec is for 40 MHz mode */
- rspec &= ~RSPEC_BW_MASK;
- rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
- }
- break;
- case PRXS0_STDN:
- /* fallthru */
- default:
- /* not supported, error condition */
- break;
- }
- if (PLCP3_ISSGI(plcp[3]))
- rspec |= RSPEC_SHORT_GI;
- } else
- if ((phy_type == PHY_TYPE_A) || (rxh->PhyRxStatus_0 & PRXS0_OFDM))
- rspec = OFDM_PHY2MAC_RATE(
- ((struct ofdm_phy_hdr *) plcp)->rlpt[0]);
- else
- rspec = CCK_PHY2MAC_RATE(
- ((struct cck_phy_hdr *) plcp)->signal);
-
- return rspec;
-}
-
-/* copy rateset src to dst as-is (no masking or sorting) */
-void brcms_c_rateset_copy(const wlc_rateset_t *src, wlc_rateset_t *dst)
-{
- memcpy(dst, src, sizeof(wlc_rateset_t));
-}
-
-/*
- * Copy and selectively filter one rateset to another.
- * 'basic_only' means only copy basic rates.
- * 'rates' indicates cck (11b) and ofdm rates combinations.
- * - 0: cck and ofdm
- * - 1: cck only
- * - 2: ofdm only
- * 'xmask' is the copy mask (typically 0x7f or 0xff).
- */
-void
-brcms_c_rateset_filter(wlc_rateset_t *src, wlc_rateset_t *dst, bool basic_only,
- u8 rates, uint xmask, bool mcsallow)
-{
- uint i;
- uint r;
- uint count;
-
- count = 0;
- for (i = 0; i < src->count; i++) {
- r = src->rates[i];
- if (basic_only && !(r & BRCMS_RATE_FLAG))
- continue;
- if (rates == BRCMS_RATES_CCK && IS_OFDM((r & BRCMS_RATE_MASK)))
- continue;
- if (rates == BRCMS_RATES_OFDM && IS_CCK((r & BRCMS_RATE_MASK)))
- continue;
- dst->rates[count++] = r & xmask;
- }
- dst->count = count;
- dst->htphy_membership = src->htphy_membership;
-
- if (mcsallow && rates != BRCMS_RATES_CCK)
- memcpy(&dst->mcs[0], &src->mcs[0], MCSSET_LEN);
- else
- brcms_c_rateset_mcs_clear(dst);
-}
-
-/* select rateset for a given phy_type and bandtype and filter it, sort it
- * and fill rs_tgt with result
- */
-void
-brcms_c_rateset_default(wlc_rateset_t *rs_tgt, const wlc_rateset_t *rs_hw,
- uint phy_type, int bandtype, bool cck_only, uint rate_mask,
- bool mcsallow, u8 bw, u8 txstreams)
-{
- const wlc_rateset_t *rs_dflt;
- wlc_rateset_t rs_sel;
- if ((PHYTYPE_IS(phy_type, PHY_TYPE_HT)) ||
- (PHYTYPE_IS(phy_type, PHY_TYPE_N)) ||
- (PHYTYPE_IS(phy_type, PHY_TYPE_LCN)) ||
- (PHYTYPE_IS(phy_type, PHY_TYPE_SSN))) {
- if (BAND_5G(bandtype)) {
- rs_dflt = (bw == BRCMS_20_MHZ ?
- &ofdm_mimo_rates : &ofdm_40bw_mimo_rates);
- } else {
- rs_dflt = (bw == BRCMS_20_MHZ ?
- &cck_ofdm_mimo_rates :
- &cck_ofdm_40bw_mimo_rates);
- }
- } else if (PHYTYPE_IS(phy_type, PHY_TYPE_LP)) {
- rs_dflt = (BAND_5G(bandtype)) ? &ofdm_rates : &cck_ofdm_rates;
- } else if (PHYTYPE_IS(phy_type, PHY_TYPE_A)) {
- rs_dflt = &ofdm_rates;
- } else if (PHYTYPE_IS(phy_type, PHY_TYPE_G)) {
- rs_dflt = &cck_ofdm_rates;
- } else {
- /* should not happen, error condition */
- rs_dflt = &cck_rates; /* force cck */
- }
-
- /* if hw rateset is not supplied, assign selected rateset to it */
- if (!rs_hw)
- rs_hw = rs_dflt;
-
- brcms_c_rateset_copy(rs_dflt, &rs_sel);
- brcms_c_rateset_mcs_upd(&rs_sel, txstreams);
- brcms_c_rateset_filter(&rs_sel, rs_tgt, false,
- cck_only ? BRCMS_RATES_CCK : BRCMS_RATES_CCK_OFDM,
- rate_mask, mcsallow);
- brcms_c_rate_hwrs_filter_sort_validate(rs_tgt, rs_hw, false,
- mcsallow ? txstreams : 1);
-}
-
-s16 brcms_c_rate_legacy_phyctl(uint rate)
-{
- uint i;
- for (i = 0; i < LEGACY_PHYCFG_TABLE_SIZE; i++)
- if (rate == legacy_phycfg_table[i].rate_ofdm)
- return legacy_phycfg_table[i].tx_phy_ctl3;
-
- return -1;
-}
-
-void brcms_c_rateset_mcs_clear(wlc_rateset_t *rateset)
-{
- uint i;
- for (i = 0; i < MCSSET_LEN; i++)
- rateset->mcs[i] = 0;
-}
-
-void brcms_c_rateset_mcs_build(wlc_rateset_t *rateset, u8 txstreams)
-{
- memcpy(&rateset->mcs[0], &cck_ofdm_mimo_rates.mcs[0], MCSSET_LEN);
- brcms_c_rateset_mcs_upd(rateset, txstreams);
-}
-
-/* Based on bandwidth passed, allow/disallow MCS 32 in the rateset */
-void brcms_c_rateset_bw_mcs_filter(wlc_rateset_t *rateset, u8 bw)
-{
- if (bw == BRCMS_40_MHZ)
- setbit(rateset->mcs, 32);
- else
- clrbit(rateset->mcs, 32);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/rate.h b/drivers/staging/brcm80211/brcmsmac/rate.h
deleted file mode 100644
index dbfd3e5816d..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/rate.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_RATE_H_
-#define _BRCM_RATE_H_
-
-#include "types.h"
-
-extern const u8 rate_info[];
-extern const struct brcms_rateset cck_ofdm_mimo_rates;
-extern const struct brcms_rateset ofdm_mimo_rates;
-extern const struct brcms_rateset cck_ofdm_rates;
-extern const struct brcms_rateset ofdm_rates;
-extern const struct brcms_rateset cck_rates;
-extern const struct brcms_rateset gphy_legacy_rates;
-extern const struct brcms_rateset wlc_lrs_rates;
-extern const struct brcms_rateset rate_limit_1_2;
-
-struct brcms_mcs_info {
- u32 phy_rate_20; /* phy rate in kbps [20Mhz] */
- u32 phy_rate_40; /* phy rate in kbps [40Mhz] */
- u32 phy_rate_20_sgi; /* phy rate in kbps [20Mhz] with SGI */
- u32 phy_rate_40_sgi; /* phy rate in kbps [40Mhz] with SGI */
- u8 tx_phy_ctl3; /* phy ctl byte 3, code rate, modulation type, # of streams */
- u8 leg_ofdm; /* matching legacy ofdm rate in 500bkps */
-};
-
-#define BRCMS_MAXMCS 32 /* max valid mcs index */
-#define MCS_TABLE_SIZE 33 /* Number of mcs entries in the table */
-extern const struct brcms_mcs_info mcs_table[];
-
-#define MCS_INVALID 0xFF
-#define MCS_CR_MASK 0x07 /* Code Rate bit mask */
-#define MCS_MOD_MASK 0x38 /* Modulation bit shift */
-#define MCS_MOD_SHIFT 3 /* MOdulation bit shift */
-#define MCS_TXS_MASK 0xc0 /* num tx streams - 1 bit mask */
-#define MCS_TXS_SHIFT 6 /* num tx streams - 1 bit shift */
-#define MCS_CR(_mcs) (mcs_table[_mcs].tx_phy_ctl3 & MCS_CR_MASK)
-#define MCS_MOD(_mcs) ((mcs_table[_mcs].tx_phy_ctl3 & MCS_MOD_MASK) >> MCS_MOD_SHIFT)
-#define MCS_TXS(_mcs) ((mcs_table[_mcs].tx_phy_ctl3 & MCS_TXS_MASK) >> MCS_TXS_SHIFT)
-#define MCS_RATE(_mcs, _is40, _sgi) (_sgi ? \
- (_is40 ? mcs_table[_mcs].phy_rate_40_sgi : mcs_table[_mcs].phy_rate_20_sgi) : \
- (_is40 ? mcs_table[_mcs].phy_rate_40 : mcs_table[_mcs].phy_rate_20))
-#define VALID_MCS(_mcs) ((_mcs < MCS_TABLE_SIZE))
-
-/* Macro to use the rate_info table */
-#define BRCMS_RATE_MASK_FULL 0xff /* Rate value mask with basic rate flag */
-
-/* convert 500kbps to bps */
-#define BRCMS_RATE_500K_TO_BPS(rate) ((rate) * 500000)
-
-/* rate spec : holds rate and mode specific information required to generate a tx frame. */
-/* Legacy CCK and OFDM information is held in the same manner as was done in the past */
-/* (in the lower byte) the upper 3 bytes primarily hold MIMO specific information */
-
-/* rate spec bit fields */
-#define RSPEC_RATE_MASK 0x0000007F /* Either 500Kbps units or MIMO MCS idx */
-#define RSPEC_MIMORATE 0x08000000 /* mimo MCS is stored in RSPEC_RATE_MASK */
-#define RSPEC_BW_MASK 0x00000700 /* mimo bw mask */
-#define RSPEC_BW_SHIFT 8 /* mimo bw shift */
-#define RSPEC_STF_MASK 0x00003800 /* mimo Space/Time/Frequency mode mask */
-#define RSPEC_STF_SHIFT 11 /* mimo Space/Time/Frequency mode shift */
-#define RSPEC_CT_MASK 0x0000C000 /* mimo coding type mask */
-#define RSPEC_CT_SHIFT 14 /* mimo coding type shift */
-#define RSPEC_STC_MASK 0x00300000 /* mimo num STC streams per PLCP defn. */
-#define RSPEC_STC_SHIFT 20 /* mimo num STC streams per PLCP defn. */
-#define RSPEC_LDPC_CODING 0x00400000 /* mimo bit indicates adv coding in use */
-#define RSPEC_SHORT_GI 0x00800000 /* mimo bit indicates short GI in use */
-#define RSPEC_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
-#define RSPEC_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicates override rate only */
-
-#define BRCMS_HTPHY 127 /* HT PHY Membership */
-
-#define RSPEC_ACTIVE(rspec) (rspec & (RSPEC_RATE_MASK | RSPEC_MIMORATE))
-#define RSPEC2RATE(rspec) ((rspec & RSPEC_MIMORATE) ? \
- MCS_RATE((rspec & RSPEC_RATE_MASK), RSPEC_IS40MHZ(rspec), RSPEC_ISSGI(rspec)) : \
- (rspec & RSPEC_RATE_MASK))
-/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */
-#define RSPEC2RATE500K(rspec) ((rspec & RSPEC_MIMORATE) ? \
- MCS_RATE((rspec & RSPEC_RATE_MASK), state->is40bw, RSPEC_ISSGI(rspec))/500 : \
- (rspec & RSPEC_RATE_MASK))
-#define CRSPEC2RATE500K(rspec) ((rspec & RSPEC_MIMORATE) ? \
- MCS_RATE((rspec & RSPEC_RATE_MASK), RSPEC_IS40MHZ(rspec), RSPEC_ISSGI(rspec))/500 :\
- (rspec & RSPEC_RATE_MASK))
-
-#define RSPEC2KBPS(rspec) (IS_MCS(rspec) ? RSPEC2RATE(rspec) : RSPEC2RATE(rspec)*500)
-#define RSPEC_PHYTXBYTE2(rspec) ((rspec & 0xff00) >> 8)
-#define RSPEC_GET_BW(rspec) ((rspec & RSPEC_BW_MASK) >> RSPEC_BW_SHIFT)
-#define RSPEC_IS40MHZ(rspec) ((((rspec & RSPEC_BW_MASK) >> RSPEC_BW_SHIFT) == \
- PHY_TXC1_BW_40MHZ) || (((rspec & RSPEC_BW_MASK) >> \
- RSPEC_BW_SHIFT) == PHY_TXC1_BW_40MHZ_DUP))
-#define RSPEC_ISSGI(rspec) ((rspec & RSPEC_SHORT_GI) == RSPEC_SHORT_GI)
-#define RSPEC_MIMOPLCP3(rspec) ((rspec & 0xf00000) >> 16)
-#define PLCP3_ISSGI(plcp) (plcp & (RSPEC_SHORT_GI >> 16))
-#define RSPEC_STC(rspec) ((rspec & RSPEC_STC_MASK) >> RSPEC_STC_SHIFT)
-#define RSPEC_STF(rspec) ((rspec & RSPEC_STF_MASK) >> RSPEC_STF_SHIFT)
-#define PLCP3_ISSTBC(plcp) ((plcp & (RSPEC_STC_MASK) >> 16) == 0x10)
-#define PLCP3_STC_MASK 0x30
-#define PLCP3_STC_SHIFT 4
-
-/* Rate info table; takes a legacy rate or ratespec_t */
-#define IS_MCS(r) (r & RSPEC_MIMORATE)
-#define IS_OFDM(r) (!IS_MCS(r) && (rate_info[(r) & RSPEC_RATE_MASK] & \
- BRCMS_RATE_FLAG))
-#define IS_CCK(r) (!IS_MCS(r) && ( \
- ((r) & BRCMS_RATE_MASK) == BRCM_RATE_1M || \
- ((r) & BRCMS_RATE_MASK) == BRCM_RATE_2M || \
- ((r) & BRCMS_RATE_MASK) == BRCM_RATE_5M5 || \
- ((r) & BRCMS_RATE_MASK) == BRCM_RATE_11M))
-#define IS_SINGLE_STREAM(mcs) (((mcs) <= HIGHEST_SINGLE_STREAM_MCS) || ((mcs) == 32))
-#define CCK_RSPEC(cck) ((cck) & RSPEC_RATE_MASK)
-#define OFDM_RSPEC(ofdm) (((ofdm) & RSPEC_RATE_MASK) |\
- (PHY_TXC1_MODE_CDD << RSPEC_STF_SHIFT))
-#define LEGACY_RSPEC(rate) (IS_CCK(rate) ? CCK_RSPEC(rate) : OFDM_RSPEC(rate))
-
-#define MCS_RSPEC(mcs) (((mcs) & RSPEC_RATE_MASK) | RSPEC_MIMORATE | \
- (IS_SINGLE_STREAM(mcs) ? (PHY_TXC1_MODE_CDD << RSPEC_STF_SHIFT) : \
- (PHY_TXC1_MODE_SDM << RSPEC_STF_SHIFT)))
-
-/* Convert encoded rate value in plcp header to numerical rates in 500 KHz increments */
-extern const u8 ofdm_rate_lookup[];
-#define OFDM_PHY2MAC_RATE(rlpt) (ofdm_rate_lookup[rlpt & 0x7])
-#define CCK_PHY2MAC_RATE(signal) (signal/5)
-
-/* Rates specified in brcms_c_rateset_filter() */
-#define BRCMS_RATES_CCK_OFDM 0
-#define BRCMS_RATES_CCK 1
-#define BRCMS_RATES_OFDM 2
-
-/* sanitize, and sort a rateset with the basic bit(s) preserved, validate rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_rateset *rs,
- const struct brcms_rateset *hw_rs,
- bool check_brate, u8 txstreams);
-/* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_rateset *src,
- struct brcms_rateset *dst);
-
-/* would be nice to have these documented ... */
-extern ratespec_t brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_rateset *src,
- struct brcms_rateset *dst, bool basic_only, u8 rates, uint xmask,
- bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_rateset *rs_tgt,
- const struct brcms_rateset *rs_hw, uint phy_type,
- int bandtype, bool cck_only, uint rate_mask,
- bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_rateset *rateset,
- u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_rateset *rateset, u8 bw);
-
-#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/scb.h b/drivers/staging/brcm80211/brcmsmac/scb.h
deleted file mode 100644
index d6c8328554d..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/scb.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_SCB_H_
-#define _BRCM_SCB_H_
-
-#include <linux/if_ether.h>
-#include <brcmu_utils.h>
-#include <defs.h>
-#include "types.h"
-
-#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
-/* structure to store per-tid state for the ampdu initiator */
-struct scb_ampdu_tid_ini {
- u8 tx_in_transit; /* number of pending mpdus in transit in driver */
- u8 tid; /* initiator tid for easy lookup */
- u8 txretry[AMPDU_TX_BA_MAX_WSIZE]; /* tx retry count; indexed by seq modulo */
- struct scb *scb; /* backptr for easy lookup */
- u8 ba_wsize; /* negotiated ba window size (in pdu) */
-};
-
-#define AMPDU_MAX_SCB_TID NUMPRIO
-
-struct scb_ampdu {
- struct scb *scb; /* back pointer for easy reference */
- u8 mpdu_density; /* mpdu density */
- u8 max_pdu; /* max pdus allowed in ampdu */
- u8 release; /* # of mpdus released at a time */
- u16 min_len; /* min mpdu len to support the density */
- u32 max_rx_ampdu_bytes; /* max ampdu rcv length; 8k, 16k, 32k, 64k */
- struct pktq txq; /* sdu transmit queue pending aggregation */
-
- /* This could easily be a ini[] pointer and we keep this info in wl itself instead
- * of having mac80211 hold it for us. Also could be made dynamic per tid instead of
- * static.
- */
- /* initiator info - per tid (NUMPRIO): */
- struct scb_ampdu_tid_ini ini[AMPDU_MAX_SCB_TID];
-};
-
-#define SCB_MAGIC 0xbeefcafe
-
-/* station control block - one per remote MAC address */
-struct scb {
- u32 magic;
- u32 flags; /* various bit flags as defined below */
- u32 flags2; /* various bit flags2 as defined below */
- u8 state; /* current state bitfield of auth/assoc process */
- u8 ea[ETH_ALEN]; /* station address */
- void *fragbuf[NUMPRIO]; /* defragmentation buffer per prio */
- uint fragresid[NUMPRIO]; /* #bytes unused in frag buffer per prio */
-
- u16 seqctl[NUMPRIO]; /* seqctl of last received frame (for dups) */
- u16 seqctl_nonqos; /* seqctl of last received frame (for dups) for
- * non-QoS data and management
- */
- u16 seqnum[NUMPRIO]; /* WME: driver maintained sw seqnum per priority */
-
- struct scb_ampdu scb_ampdu; /* AMPDU state including per tid info */
-};
-
-/* scb flags */
-#define SCB_WMECAP 0x0040 /* may ONLY be set if WME_ENAB(wlc) */
-#define SCB_HTCAP 0x10000 /* HT (MIMO) capable device */
-#define SCB_IS40 0x80000 /* 40MHz capable */
-#define SCB_STBCCAP 0x40000000 /* STBC Capable */
-#define SCB_WME(a) ((a)->flags & SCB_WMECAP)/* implies WME_ENAB */
-#define SCB_SEQNUM(scb, prio) ((scb)->seqnum[(prio)])
-#define SCB_PS(a) NULL
-#define SCB_STBC_CAP(a) ((a)->flags & SCB_STBCCAP)
-#define SCB_AMPDU(a) true
-#endif /* _BRCM_SCB_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/srom.c b/drivers/staging/brcm80211/brcmsmac/srom.c
deleted file mode 100644
index f39442ed4ce..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/srom.c
+++ /dev/null
@@ -1,1237 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/etherdevice.h>
-#include <stdarg.h>
-
-#include <chipcommon.h>
-#include <brcmu_utils.h>
-#include "nicpci.h"
-#include "aiutils.h"
-#include "otp.h"
-#include "srom.h"
-
-#define SROM_OFFSET(sih) ((sih->ccrev > 31) ? \
- (((sih->cccaps & CC_CAP_SROM) == 0) ? NULL : \
- ((u8 *)curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP)) : \
- ((u8 *)curmap + PCI_BAR0_SPROM_OFFSET))
-
-#if defined(BCMDBG)
-#define WRITE_ENABLE_DELAY 500 /* 500 ms after write enable/disable toggle */
-#define WRITE_WORD_DELAY 20 /* 20 ms between each word write */
-#endif
-
-/* Maximum srom: 6 Kilobits == 768 bytes */
-#define SROM_MAX 768
-
-/* PCI fields */
-#define PCI_F0DEVID 48
-
-#define SROM_WORDS 64
-
-#define SROM_SSID 2
-
-#define SROM_WL1LHMAXP 29
-
-#define SROM_WL1LPAB0 30
-#define SROM_WL1LPAB1 31
-#define SROM_WL1LPAB2 32
-
-#define SROM_WL1HPAB0 33
-#define SROM_WL1HPAB1 34
-#define SROM_WL1HPAB2 35
-
-#define SROM_MACHI_IL0 36
-#define SROM_MACMID_IL0 37
-#define SROM_MACLO_IL0 38
-#define SROM_MACHI_ET1 42
-#define SROM_MACMID_ET1 43
-#define SROM_MACLO_ET1 44
-#define SROM3_MACHI 37
-#define SROM3_MACMID 38
-#define SROM3_MACLO 39
-
-#define SROM_BXARSSI2G 40
-#define SROM_BXARSSI5G 41
-
-#define SROM_TRI52G 42
-#define SROM_TRI5GHL 43
-
-#define SROM_RXPO52G 45
-
-#define SROM_AABREV 46
-/* Fields in AABREV */
-#define SROM_BR_MASK 0x00ff
-#define SROM_CC_MASK 0x0f00
-#define SROM_CC_SHIFT 8
-#define SROM_AA0_MASK 0x3000
-#define SROM_AA0_SHIFT 12
-#define SROM_AA1_MASK 0xc000
-#define SROM_AA1_SHIFT 14
-
-#define SROM_WL0PAB0 47
-#define SROM_WL0PAB1 48
-#define SROM_WL0PAB2 49
-
-#define SROM_LEDBH10 50
-#define SROM_LEDBH32 51
-
-#define SROM_WL10MAXP 52
-
-#define SROM_WL1PAB0 53
-#define SROM_WL1PAB1 54
-#define SROM_WL1PAB2 55
-
-#define SROM_ITT 56
-
-#define SROM_BFL 57
-#define SROM_BFL2 28
-#define SROM3_BFL2 61
-
-#define SROM_AG10 58
-
-#define SROM_CCODE 59
-
-#define SROM_OPO 60
-
-#define SROM3_LEDDC 62
-
-#define SROM_CRCREV 63
-
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
-#define SROM4_WORDS 220
-
-#define SROM4_SIGN 32
-#define SROM4_SIGNATURE 0x5372
-
-#define SROM4_BREV 33
-
-#define SROM4_BFL0 34
-#define SROM4_BFL1 35
-#define SROM4_BFL2 36
-#define SROM4_BFL3 37
-#define SROM5_BFL0 37
-#define SROM5_BFL1 38
-#define SROM5_BFL2 39
-#define SROM5_BFL3 40
-
-#define SROM4_MACHI 38
-#define SROM4_MACMID 39
-#define SROM4_MACLO 40
-#define SROM5_MACHI 41
-#define SROM5_MACMID 42
-#define SROM5_MACLO 43
-
-#define SROM4_CCODE 41
-#define SROM4_REGREV 42
-#define SROM5_CCODE 34
-#define SROM5_REGREV 35
-
-#define SROM4_LEDBH10 43
-#define SROM4_LEDBH32 44
-#define SROM5_LEDBH10 59
-#define SROM5_LEDBH32 60
-
-#define SROM4_LEDDC 45
-#define SROM5_LEDDC 45
-
-#define SROM4_AA 46
-
-#define SROM4_AG10 47
-#define SROM4_AG32 48
-
-#define SROM4_TXPID2G 49
-#define SROM4_TXPID5G 51
-#define SROM4_TXPID5GL 53
-#define SROM4_TXPID5GH 55
-
-#define SROM4_TXRXC 61
-#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_TXCHAIN_SHIFT 0
-#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_RXCHAIN_SHIFT 4
-#define SROM4_SWITCH_MASK 0xff00
-#define SROM4_SWITCH_SHIFT 8
-
-/* Per-path fields */
-#define MAX_PATH_SROM 4
-#define SROM4_PATH0 64
-#define SROM4_PATH1 87
-#define SROM4_PATH2 110
-#define SROM4_PATH3 133
-
-#define SROM4_2G_ITT_MAXP 0
-#define SROM4_2G_PA 1
-#define SROM4_5G_ITT_MAXP 5
-#define SROM4_5GLH_MAXP 6
-#define SROM4_5G_PA 7
-#define SROM4_5GL_PA 11
-#define SROM4_5GH_PA 15
-
-/* All the miriad power offsets */
-#define SROM4_2G_CCKPO 156
-#define SROM4_2G_OFDMPO 157
-#define SROM4_5G_OFDMPO 159
-#define SROM4_5GL_OFDMPO 161
-#define SROM4_5GH_OFDMPO 163
-#define SROM4_2G_MCSPO 165
-#define SROM4_5G_MCSPO 173
-#define SROM4_5GL_MCSPO 181
-#define SROM4_5GH_MCSPO 189
-#define SROM4_CDDPO 197
-#define SROM4_STBCPO 198
-#define SROM4_BW40PO 199
-#define SROM4_BWDUPPO 200
-
-#define SROM4_CRCREV 219
-
-/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
- * This is acombined srom for both MIMO and SISO boards, usable in
- * the .130 4Kilobit OTP with hardware redundancy.
- */
-#define SROM8_BREV 65
-
-#define SROM8_BFL0 66
-#define SROM8_BFL1 67
-#define SROM8_BFL2 68
-#define SROM8_BFL3 69
-
-#define SROM8_MACHI 70
-#define SROM8_MACMID 71
-#define SROM8_MACLO 72
-
-#define SROM8_CCODE 73
-#define SROM8_REGREV 74
-
-#define SROM8_LEDBH10 75
-#define SROM8_LEDBH32 76
-
-#define SROM8_LEDDC 77
-
-#define SROM8_AA 78
-
-#define SROM8_AG10 79
-#define SROM8_AG32 80
-
-#define SROM8_TXRXC 81
-
-#define SROM8_BXARSSI2G 82
-#define SROM8_BXARSSI5G 83
-#define SROM8_TRI52G 84
-#define SROM8_TRI5GHL 85
-#define SROM8_RXPO52G 86
-
-#define SROM8_FEM2G 87
-#define SROM8_FEM5G 88
-#define SROM8_FEM_ANTSWLUT_MASK 0xf800
-#define SROM8_FEM_ANTSWLUT_SHIFT 11
-#define SROM8_FEM_TR_ISO_MASK 0x0700
-#define SROM8_FEM_TR_ISO_SHIFT 8
-#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
-#define SROM8_FEM_PDET_RANGE_SHIFT 3
-#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
-#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
-#define SROM8_FEM_TSSIPOS_MASK 0x0001
-#define SROM8_FEM_TSSIPOS_SHIFT 0
-
-#define SROM8_THERMAL 89
-
-/* Temp sense related entries */
-#define SROM8_MPWR_RAWTS 90
-#define SROM8_TS_SLP_OPT_CORRX 91
-/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
-#define SROM8_FOC_HWIQ_IQSWP 92
-
-/* Temperature delta for PHY calibration */
-#define SROM8_PHYCAL_TEMPDELTA 93
-
-/* Per-path offsets & fields */
-#define SROM8_PATH0 96
-#define SROM8_PATH1 112
-#define SROM8_PATH2 128
-#define SROM8_PATH3 144
-
-#define SROM8_2G_ITT_MAXP 0
-#define SROM8_2G_PA 1
-#define SROM8_5G_ITT_MAXP 4
-#define SROM8_5GLH_MAXP 5
-#define SROM8_5G_PA 6
-#define SROM8_5GL_PA 9
-#define SROM8_5GH_PA 12
-
-/* All the miriad power offsets */
-#define SROM8_2G_CCKPO 160
-
-#define SROM8_2G_OFDMPO 161
-#define SROM8_5G_OFDMPO 163
-#define SROM8_5GL_OFDMPO 165
-#define SROM8_5GH_OFDMPO 167
-
-#define SROM8_2G_MCSPO 169
-#define SROM8_5G_MCSPO 177
-#define SROM8_5GL_MCSPO 185
-#define SROM8_5GH_MCSPO 193
-
-#define SROM8_CDDPO 201
-#define SROM8_STBCPO 202
-#define SROM8_BW40PO 203
-#define SROM8_BWDUPPO 204
-
-/* SISO PA parameters are in the path0 spaces */
-#define SROM8_SISO 96
-
-/* Legacy names for SISO PA paramters */
-#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
-#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
-#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
-#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
-#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
-#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
-#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
-#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
-#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
-#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
-#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
-#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
-#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
-#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
-#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
-
-/* SROM REV 9 */
-#define SROM9_2GPO_CCKBW20 160
-#define SROM9_2GPO_CCKBW20UL 161
-#define SROM9_2GPO_LOFDMBW20 162
-#define SROM9_2GPO_LOFDMBW20UL 164
-
-#define SROM9_5GLPO_LOFDMBW20 166
-#define SROM9_5GLPO_LOFDMBW20UL 168
-#define SROM9_5GMPO_LOFDMBW20 170
-#define SROM9_5GMPO_LOFDMBW20UL 172
-#define SROM9_5GHPO_LOFDMBW20 174
-#define SROM9_5GHPO_LOFDMBW20UL 176
-
-#define SROM9_2GPO_MCSBW20 178
-#define SROM9_2GPO_MCSBW20UL 180
-#define SROM9_2GPO_MCSBW40 182
-
-#define SROM9_5GLPO_MCSBW20 184
-#define SROM9_5GLPO_MCSBW20UL 186
-#define SROM9_5GLPO_MCSBW40 188
-#define SROM9_5GMPO_MCSBW20 190
-#define SROM9_5GMPO_MCSBW20UL 192
-#define SROM9_5GMPO_MCSBW40 194
-#define SROM9_5GHPO_MCSBW20 196
-#define SROM9_5GHPO_MCSBW20UL 198
-#define SROM9_5GHPO_MCSBW40 200
-
-#define SROM9_PO_MCS32 202
-#define SROM9_PO_LOFDM40DUP 203
-
-/* SROM flags (see sromvar_t) */
-#define SRFL_MORE 1 /* value continues as described by the next entry */
-#define SRFL_NOFFS 2 /* value bits can't be all one's */
-#define SRFL_PRHEX 4 /* value is in hexdecimal format */
-#define SRFL_PRSIGN 8 /* value is in signed decimal format */
-#define SRFL_CCODE 0x10 /* value is in country code format */
-#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
-#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
-#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */
-
-/* Max. nvram variable table size */
-#define MAXSZ_NVRAM_VARS 4096
-
-struct brcms_sromvar {
- const char *name;
- u32 revmask;
- u32 flags;
- u16 off;
- u16 mask;
-};
-
-struct brcms_varbuf {
- char *base; /* pointer to buffer base */
- char *buf; /* pointer to current position */
- unsigned int size; /* current (residual) size in bytes */
-};
-
-/* Assumptions:
- * - Ethernet address spans across 3 consective words
- *
- * Table rules:
- * - Add multiple entries next to each other if a value spans across multiple words
- * (even multiple fields in the same word) with each entry except the last having
- * it's SRFL_MORE bit set.
- * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
- * bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
- * - The last entry's name field must be NULL to indicate the end of the table. Other
- * entries must have non-NULL name.
- */
-static const struct brcms_sromvar pci_sromvars[] = {
- {"devid", 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID, 0xffff},
- {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
- {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
- {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
- {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
- {"boardflags", 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL, 0xffff},
- {"", 0, 0, SROM_BFL2, 0xffff},
- {"boardflags", 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL, 0xffff},
- {"", 0, 0, SROM3_BFL2, 0xffff},
- {"boardflags", 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0, 0xffff},
- {"", 0, 0, SROM4_BFL1, 0xffff},
- {"boardflags", 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0, 0xffff},
- {"", 0, 0, SROM5_BFL1, 0xffff},
- {"boardflags", 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0, 0xffff},
- {"", 0, 0, SROM8_BFL1, 0xffff},
- {"boardflags2", 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2, 0xffff},
- {"", 0, 0, SROM4_BFL3, 0xffff},
- {"boardflags2", 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2, 0xffff},
- {"", 0, 0, SROM5_BFL3, 0xffff},
- {"boardflags2", 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2, 0xffff},
- {"", 0, 0, SROM8_BFL3, 0xffff},
- {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
- {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
- {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff},
- {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff},
- {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff},
- {"boardnum", 0xffffff00, 0, SROM8_MACLO, 0xffff},
- {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
- {"regrev", 0x00000008, 0, SROM_OPO, 0xff00},
- {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff},
- {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff},
- {"regrev", 0xffffff00, 0, SROM8_REGREV, 0x00ff},
- {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
- {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
- {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
- {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
- {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
- {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
- {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
- {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
- {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
- {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
- {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
- {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
- {"ledbh0", 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
- {"ledbh1", 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
- {"ledbh2", 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
- {"ledbh3", 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
- {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
- {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
- {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
- {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff},
- {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
- {"pa0b0", 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
- {"pa0b1", 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
- {"pa0b2", 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
- {"pa0itssit", 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
- {"pa0maxpwr", 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
- {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff},
- {"opo", 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
- {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
- {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff},
- {"aa2g", 0xffffff00, 0, SROM8_AA, 0x00ff},
- {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
- {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00},
- {"aa5g", 0xffffff00, 0, SROM8_AA, 0xff00},
- {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff},
- {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00},
- {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff},
- {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00},
- {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff},
- {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00},
- {"ag0", 0xffffff00, 0, SROM8_AG10, 0x00ff},
- {"ag1", 0xffffff00, 0, SROM8_AG10, 0xff00},
- {"ag2", 0xffffff00, 0, SROM8_AG32, 0x00ff},
- {"ag3", 0xffffff00, 0, SROM8_AG32, 0xff00},
- {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
- {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
- {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
- {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
- {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
- {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
- {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
- {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
- {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
- {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00},
- {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
- {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
- {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
- {"pa1b0", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
- {"pa1b1", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
- {"pa1b2", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
- {"pa1lob0", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
- {"pa1lob1", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
- {"pa1lob2", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
- {"pa1hib0", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
- {"pa1hib1", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
- {"pa1hib2", 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
- {"pa1itssit", 0xffffff00, 0, SROM8_W1_ITTMAXP, 0xff00},
- {"pa1maxpwr", 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
- {"pa1lomaxpwr", 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
- {"pa1himaxpwr", 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
- {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
- {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
- {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
- {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
- {"bxa2g", 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
- {"rssisav2g", 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
- {"rssismc2g", 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
- {"rssismf2g", 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
- {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
- {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
- {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
- {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
- {"bxa5g", 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
- {"rssisav5g", 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
- {"rssismc5g", 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
- {"rssismf5g", 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
- {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff},
- {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00},
- {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
- {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00},
- {"tri2g", 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
- {"tri5g", 0xffffff00, 0, SROM8_TRI52G, 0xff00},
- {"tri5gl", 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
- {"tri5gh", 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
- {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
- {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
- {"rxpo2g", 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
- {"rxpo5g", 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
- {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK},
- {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK},
- {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK},
- {"txchain", 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK},
- {"rxchain", 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK},
- {"antswitch", 0xffffff00, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK},
- {"tssipos2g", 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK},
- {"extpagain2g", 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK},
- {"pdetrange2g", 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK},
- {"triso2g", 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
- {"antswctl2g", 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK},
- {"tssipos5g", 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK},
- {"extpagain5g", 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK},
- {"pdetrange5g", 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK},
- {"triso5g", 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
- {"antswctl5g", 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK},
- {"tempthresh", 0xffffff00, 0, SROM8_THERMAL, 0xff00},
- {"tempoffset", 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
- {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
- {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
- {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
- {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
- {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
- {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
- {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
- {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
- {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
- {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
- {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
- {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
- {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
- {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
- {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
- {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
-
- {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
- {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
- {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
- {"ccode", 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
- {"macaddr", 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
- {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
- {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
- {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
- {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff},
- {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff},
- {"leddc", 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC, 0xffff},
- {"leddc", 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC, 0xffff},
- {"leddc", 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC, 0xffff},
- {"leddc", 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC, 0xffff},
- {"rawtempsense", 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff},
- {"measpower", 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00},
- {"tempsense_slope", 0xffffff00, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX,
- 0x00ff},
- {"tempcorrx", 0xffffff00, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00},
- {"tempsense_option", 0xffffff00, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX,
- 0x0300},
- {"freqoffset_corr", 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
- 0x000f},
- {"iqcal_swp_dis", 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010},
- {"hw_iqcal_en", 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020},
- {"phycal_tempdelta", 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff},
-
- {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
- {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
- {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
- {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
- {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
- {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
- {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
- {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
- {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
- {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
- {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
- {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
- {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
- {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
- {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
- {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
- {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
- {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
- {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
- {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
- {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
- {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
- {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
- {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
- {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
- {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
- {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
- {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
- {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
- {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
- {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
- {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
- {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
- {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
- {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
- {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
- {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
- {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
- {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
- {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
- {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
- {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
- {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
- {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
- {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
- {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
- {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
- {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
- {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
- {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
- {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
- {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
- {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
- {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
- {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
- {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
- {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
- {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
- {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
- {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
- {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
- {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
- {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
- {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
- {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
- {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
- {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
- {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
- {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
- {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
- {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
- {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
- {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
- {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
- {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
- {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
- {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
- {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
- {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
- {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
- {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
- {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
- {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff},
- {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff},
- {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff},
- {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
- {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff},
- {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff},
- {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff},
- {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
-
- /* power per rate from sromrev 9 */
- {"cckbw202gpo", 0xfffffe00, 0, SROM9_2GPO_CCKBW20, 0xffff},
- {"cckbw20ul2gpo", 0xfffffe00, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
- {"legofdmbw202gpo", 0xfffffe00, SRFL_MORE, SROM9_2GPO_LOFDMBW20,
- 0xffff},
- {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
- {"legofdmbw20ul2gpo", 0xfffffe00, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
- {"legofdmbw205glpo", 0xfffffe00, SRFL_MORE, SROM9_5GLPO_LOFDMBW20,
- 0xffff},
- {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
- {"legofdmbw20ul5glpo", 0xfffffe00, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
- {"legofdmbw205gmpo", 0xfffffe00, SRFL_MORE, SROM9_5GMPO_LOFDMBW20,
- 0xffff},
- {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
- {"legofdmbw20ul5gmpo", 0xfffffe00, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
- {"legofdmbw205ghpo", 0xfffffe00, SRFL_MORE, SROM9_5GHPO_LOFDMBW20,
- 0xffff},
- {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
- {"legofdmbw20ul5ghpo", 0xfffffe00, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
- {"mcsbw202gpo", 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff},
- {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
- {"mcsbw20ul2gpo", 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff},
- {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
- {"mcsbw402gpo", 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff},
- {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
- {"mcsbw205glpo", 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff},
- {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
- {"mcsbw20ul5glpo", 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
- {"mcsbw405glpo", 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff},
- {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
- {"mcsbw205gmpo", 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff},
- {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
- {"mcsbw20ul5gmpo", 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
- {"mcsbw405gmpo", 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff},
- {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
- {"mcsbw205ghpo", 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff},
- {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
- {"mcsbw20ul5ghpo", 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW20UL,
- 0xffff},
- {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
- {"mcsbw405ghpo", 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff},
- {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
- {"mcs32po", 0xfffffe00, 0, SROM9_PO_MCS32, 0xffff},
- {"legofdm40duppo", 0xfffffe00, 0, SROM9_PO_LOFDM40DUP, 0xffff},
-
- {NULL, 0, 0, 0, 0}
-};
-
-static const struct brcms_sromvar perpath_pci_sromvars[] = {
- {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
- {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
- {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
- {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
- {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
- {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
- {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
- {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
- {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
- {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
- {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
- {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
- {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
- {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
- {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
- {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff},
- {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff},
- {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff},
- {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
- {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff},
- {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff},
- {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff},
- {"maxp2ga", 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
- {"itt2ga", 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
- {"itt5ga", 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
- {"pa2gw0a", 0xffffff00, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
- {"pa2gw1a", 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
- {"pa2gw2a", 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
- {"maxp5ga", 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0x00ff},
- {"maxp5gha", 0xffffff00, 0, SROM8_5GLH_MAXP, 0x00ff},
- {"maxp5gla", 0xffffff00, 0, SROM8_5GLH_MAXP, 0xff00},
- {"pa5gw0a", 0xffffff00, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
- {"pa5gw1a", 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
- {"pa5gw2a", 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
- {"pa5glw0a", 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
- {"pa5glw1a", 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff},
- {"pa5glw2a", 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff},
- {"pa5ghw0a", 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
- {"pa5ghw1a", 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff},
- {"pa5ghw2a", 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff},
- {NULL, 0, 0, 0, 0}
-};
-
-static void _initvars_srom_pci(u8 sromrev, u16 *srom, uint off,
- struct brcms_varbuf *b);
-static int initvars_srom_pci(struct si_pub *sih, void *curmap, char **vars,
- uint *count);
-static int sprom_read_pci(struct si_pub *sih, u16 *sprom,
- uint wordoff, u16 *buf, uint nwords, bool check_crc);
-#if defined(BCMNVRAMR)
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz);
-#endif
-
-static int initvars_table(char *start, char *end,
- char **vars, uint *count);
-
-/* Initialization of varbuf structure */
-static void varbuf_init(struct brcms_varbuf *b, char *buf, uint size)
-{
- b->size = size;
- b->base = b->buf = buf;
-}
-
-/* append a null terminated var=value string */
-static int varbuf_append(struct brcms_varbuf *b, const char *fmt, ...)
-{
- va_list ap;
- int r;
- size_t len;
- char *s;
-
- if (b->size < 2)
- return 0;
-
- va_start(ap, fmt);
- r = vsnprintf(b->buf, b->size, fmt, ap);
- va_end(ap);
-
- /* C99 snprintf behavior returns r >= size on overflow,
- * others return -1 on overflow.
- * All return -1 on format error.
- * We need to leave room for 2 null terminations, one for the current var
- * string, and one for final null of the var table. So check that the
- * strlen written, r, leaves room for 2 chars.
- */
- if ((r == -1) || (r > (int)(b->size - 2))) {
- b->size = 0;
- return 0;
- }
-
- /* Remove any earlier occurrence of the same variable */
- s = strchr(b->buf, '=');
- if (s != NULL) {
- len = (size_t) (s - b->buf);
- for (s = b->base; s < b->buf;) {
- if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') {
- len = strlen(s) + 1;
- memmove(s, (s + len),
- ((b->buf + r + 1) - (s + len)));
- b->buf -= len;
- b->size += (unsigned int)len;
- break;
- }
-
- while (*s++)
- ;
- }
- }
-
- /* skip over this string's null termination */
- r++;
- b->size -= r;
- b->buf += r;
-
- return r;
-}
-
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, uint bustype, void *curmap,
- char **vars, uint *count)
-{
- uint len;
-
- len = 0;
-
- if (vars == NULL || count == NULL)
- return 0;
-
- *vars = NULL;
- *count = 0;
-
- if (curmap != NULL && bustype == PCI_BUS)
- return initvars_srom_pci(sih, curmap, vars, count);
-
- return -EINVAL;
-}
-
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
-{
- for (size /= 2; size; size--)
- *(buf + size) = le16_to_cpu(*(buf + size));
-}
-
-static inline void htol16_buf(u16 *buf, unsigned int size)
-{
- for (size /= 2; size; size--)
- *(buf + size) = cpu_to_le16(*(buf + size));
-}
-
-/*
- * Read in and validate sprom.
- * Return 0 on success, nonzero on error.
- */
-static int
-sprom_read_pci(struct si_pub *sih, u16 *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc)
-{
- int err = 0;
- uint i;
-
- /* read the sprom */
- for (i = 0; i < nwords; i++)
- buf[i] = R_REG(&sprom[wordoff + i]);
-
- if (check_crc) {
-
- if (buf[0] == 0xffff) {
- /* The hardware thinks that an srom that starts with 0xffff
- * is blank, regardless of the rest of the content, so declare
- * it bad.
- */
- return -ENODATA;
- }
-
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, nwords * 2);
- if (brcmu_crc8((u8 *) buf, nwords * 2, CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE) {
- /* DBG only pci always read srom4 first, then srom8/9 */
- err = -EIO;
- }
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, nwords * 2);
- }
- return err;
-}
-
-#if defined(BCMNVRAMR)
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
-{
- u8 *otp;
- uint sz = OTP_SZ_MAX / 2; /* size in words */
- int err = 0;
-
- otp = kzalloc(OTP_SZ_MAX, GFP_ATOMIC);
- if (otp == NULL) {
- return -ENOMEM;
- }
-
- err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
-
- memcpy(buf, otp, bufsz);
-
- kfree(otp);
-
- /* Check CRC */
- if (buf[0] == 0xffff) {
- /* The hardware thinks that an srom that starts with 0xffff
- * is blank, regardless of the rest of the content, so declare
- * it bad.
- */
- return -ENODATA;
- }
-
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, bufsz);
- if (brcmu_crc8((u8 *) buf, SROM4_WORDS * 2, CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE) {
- err = -EIO;
- }
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, bufsz);
-
- return err;
-}
-#endif /* defined(BCMNVRAMR) */
-/*
-* Create variable table from memory.
-* Return 0 on success, nonzero on error.
-*/
-static int initvars_table(char *start, char *end,
- char **vars, uint *count)
-{
- int c = (int)(end - start);
-
- /* do it only when there is more than just the null string */
- if (c > 1) {
- char *vp = kmalloc(c, GFP_ATOMIC);
- if (!vp)
- return -ENOMEM;
- memcpy(vp, start, c);
- *vars = vp;
- *count = c;
- } else {
- *vars = NULL;
- *count = 0;
- }
-
- return 0;
-}
-
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
-static uint mask_shift(u16 mask)
-{
- uint i;
- for (i = 0; i < (sizeof(mask) << 3); i++) {
- if (mask & (1 << i))
- return i;
- }
- return 0;
-}
-
-static uint mask_width(u16 mask)
-{
- int i;
- for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
- if (mask & (1 << i))
- return (uint) (i - mask_shift(mask) + 1);
- }
- return 0;
-}
-
-static void
-_initvars_srom_pci(u8 sromrev, u16 *srom, uint off, struct brcms_varbuf *b)
-{
- u16 w;
- u32 val;
- const struct brcms_sromvar *srv;
- uint width;
- uint flags;
- u32 sr = (1 << sromrev);
-
- varbuf_append(b, "sromrev=%d", sromrev);
-
- for (srv = pci_sromvars; srv->name != NULL; srv++) {
- const char *name;
-
- if ((srv->revmask & sr) == 0)
- continue;
-
- if (srv->off < off)
- continue;
-
- flags = srv->flags;
- name = srv->name;
-
- /* This entry is for mfgc only. Don't generate param for it, */
- if (flags & SRFL_NOVAR)
- continue;
-
- if (flags & SRFL_ETHADDR) {
- u8 ea[ETH_ALEN];
-
- ea[0] = (srom[srv->off - off] >> 8) & 0xff;
- ea[1] = srom[srv->off - off] & 0xff;
- ea[2] = (srom[srv->off + 1 - off] >> 8) & 0xff;
- ea[3] = srom[srv->off + 1 - off] & 0xff;
- ea[4] = (srom[srv->off + 2 - off] >> 8) & 0xff;
- ea[5] = srom[srv->off + 2 - off] & 0xff;
-
- varbuf_append(b, "%s=%pM", name, ea);
- } else {
- w = srom[srv->off - off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- while (srv->flags & SRFL_MORE) {
- srv++;
- if (srv->off == 0 || srv->off < off)
- continue;
-
- w = srom[srv->off - off];
- val +=
- ((w & srv->mask) >> mask_shift(srv->
- mask)) <<
- width;
- width += mask_width(srv->mask);
- }
-
- if ((flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- if (flags & SRFL_CCODE) {
- if (val == 0)
- varbuf_append(b, "ccode=");
- else
- varbuf_append(b, "ccode=%c%c",
- (val >> 8), (val & 0xff));
- }
- /* LED Powersave duty cycle has to be scaled:
- *(oncount >> 24) (offcount >> 8)
- */
- else if (flags & SRFL_LEDDC) {
- u32 w32 = (((val >> 8) & 0xff) << 24) | /* oncount */
- (((val & 0xff)) << 8); /* offcount */
- varbuf_append(b, "leddc=%d", w32);
- } else if (flags & SRFL_PRHEX)
- varbuf_append(b, "%s=0x%x", name, val);
- else if ((flags & SRFL_PRSIGN)
- && (val & (1 << (width - 1))))
- varbuf_append(b, "%s=%d", name,
- (int)(val | (~0 << width)));
- else
- varbuf_append(b, "%s=%u", name, val);
- }
- }
-
- if (sromrev >= 4) {
- /* Do per-path variables */
- uint p, pb, psz;
-
- if (sromrev >= 8) {
- pb = SROM8_PATH0;
- psz = SROM8_PATH1 - SROM8_PATH0;
- } else {
- pb = SROM4_PATH0;
- psz = SROM4_PATH1 - SROM4_PATH0;
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars; srv->name != NULL;
- srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
-
- if (pb + srv->off < off)
- continue;
-
- /* This entry is for mfgc only. Don't generate param for it, */
- if (srv->flags & SRFL_NOVAR)
- continue;
-
- w = srom[pb + srv->off - off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- /* Cheating: no per-path var is more than 1 word */
-
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- if (srv->flags & SRFL_PRHEX)
- varbuf_append(b, "%s%d=0x%x", srv->name,
- p, val);
- else
- varbuf_append(b, "%s%d=%d", srv->name,
- p, val);
- }
- pb += psz;
- }
- }
-}
-
-/*
- * Initialize nonvolatile variable table from sprom.
- * Return 0 on success, nonzero on error.
- */
-static int initvars_srom_pci(struct si_pub *sih, void *curmap, char **vars,
- uint *count)
-{
- u16 *srom, *sromwindow;
- u8 sromrev = 0;
- u32 sr;
- struct brcms_varbuf b;
- char *vp, *base = NULL;
- int err = 0;
-
- /*
- * Apply CRC over SROM content regardless SROM is present or not.
- */
- srom = kmalloc(SROM_MAX, GFP_ATOMIC);
- if (!srom)
- return -ENOMEM;
-
- sromwindow = (u16 *) SROM_OFFSET(sih);
- if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
- true);
-
- if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
- (((sih->buscoretype == PCIE_CORE_ID)
- && (sih->buscorerev >= 6))
- || ((sih->buscoretype == PCI_CORE_ID)
- && (sih->buscorerev >= 0xe)))) {
- /* sromrev >= 4, read more */
- err = sprom_read_pci(sih, sromwindow, 0, srom,
- SROM4_WORDS, true);
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else if (err == 0) {
- /* srom is good and is rev < 4 */
- /* top word of sprom contains version and crc8 */
- sromrev = srom[SROM_CRCREV] & 0xff;
- /* bcm4401 sroms misprogrammed */
- if (sromrev == 0x10)
- sromrev = 1;
- }
- }
-#if defined(BCMNVRAMR)
- /* Use OTP if SPROM not available */
- else {
- err = otp_read_pci(sih, srom, SROM_MAX);
- if (err == 0)
- /* OTP only contain SROM rev8/rev9 for now */
- sromrev = srom[SROM4_CRCREV] & 0xff;
- }
-#else
- else
- err = -ENODEV;
-#endif
-
- if (!err) {
- /* Bitmask for the sromrev */
- sr = 1 << sromrev;
-
- /* srom version check: Current valid versions: 1, 2, 3, 4, 5, 8, 9 */
- if ((sr & 0x33e) == 0) {
- err = -EINVAL;
- goto errout;
- }
-
- base = kmalloc(MAXSZ_NVRAM_VARS, GFP_ATOMIC);
- if (!base) {
- err = -ENOMEM;
- goto errout;
- }
-
- varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
-
- /* parse SROM into name=value pairs. */
- _initvars_srom_pci(sromrev, srom, 0, &b);
-
- /* final nullbyte terminator */
- vp = b.buf;
- *vp++ = '\0';
-
- err = initvars_table(base, vp, vars, count);
- kfree(base);
- }
-
-errout:
- kfree(srom);
- return err;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/stf.c b/drivers/staging/brcm80211/brcmsmac/stf.c
deleted file mode 100644
index a55ff010178..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/stf.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <net/mac80211.h>
-
-#include "types.h"
-#include "d11.h"
-#include "rate.h"
-#include "phy/phy_hal.h"
-#include "channel.h"
-#include "main.h"
-#include "bmac.h"
-#include "stf.h"
-
-#define MIN_SPATIAL_EXPANSION 0
-#define MAX_SPATIAL_EXPANSION 1
-
-#define BRCMS_STF_SS_STBC_RX(wlc) (BRCMS_ISNPHY(wlc->band) && \
- NREV_GT(wlc->band->phyrev, 3) && NREV_LE(wlc->band->phyrev, 6))
-
-static bool brcms_c_stf_stbc_tx_set(struct brcms_c_info *wlc, s32 int_val);
-static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts, u8 val);
-static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val);
-static void brcms_c_stf_stbc_rx_ht_update(struct brcms_c_info *wlc, int val);
-
-static void _brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-static u16 _brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
- ratespec_t rspec);
-
-#define NSTS_1 1
-#define NSTS_2 2
-#define NSTS_3 3
-#define NSTS_4 4
-const u8 txcore_default[5] = {
- (0), /* bitmap of the core enabled */
- (0x01), /* For Nsts = 1, enable core 1 */
- (0x03), /* For Nsts = 2, enable core 1 & 2 */
- (0x07), /* For Nsts = 3, enable core 1, 2 & 3 */
- (0x0f) /* For Nsts = 4, enable all cores */
-};
-
-static void brcms_c_stf_stbc_rx_ht_update(struct brcms_c_info *wlc, int val)
-{
- /* MIMOPHYs rev3-6 cannot receive STBC with only one rx core active */
- if (BRCMS_STF_SS_STBC_RX(wlc)) {
- if ((wlc->stf->rxstreams == 1) && (val != HT_CAP_RX_STBC_NO))
- return;
- }
-
- wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_RX_STBC;
- wlc->ht_cap.cap_info |= (val << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
- if (wlc->pub->up) {
- brcms_c_update_beacon(wlc);
- brcms_c_update_probe_resp(wlc, true);
- }
-}
-
-/* every WLC_TEMPSENSE_PERIOD seconds temperature check to decide whether to turn on/off txchain */
-void brcms_c_tempsense_upd(struct brcms_c_info *wlc)
-{
- struct brcms_phy_pub *pi = wlc->band->pi;
- uint active_chains, txchain;
-
- /* Check if the chip is too hot. Disable one Tx chain, if it is */
- /* high 4 bits are for Rx chain, low 4 bits are for Tx chain */
- active_chains = wlc_phy_stf_chain_active_get(pi);
- txchain = active_chains & 0xf;
-
- if (wlc->stf->txchain == wlc->stf->hw_txchain) {
- if (txchain && (txchain < wlc->stf->hw_txchain)) {
- /* turn off 1 tx chain */
- brcms_c_stf_txchain_set(wlc, txchain, true);
- }
- } else if (wlc->stf->txchain < wlc->stf->hw_txchain) {
- if (txchain == wlc->stf->hw_txchain) {
- /* turn back on txchain */
- brcms_c_stf_txchain_set(wlc, txchain, true);
- }
- }
-}
-
-void
-brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel,
- chanspec_t chanspec)
-{
- struct tx_power power;
- u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id;
-
- /* Clear previous settings */
- *ss_algo_channel = 0;
-
- if (!wlc->pub->up) {
- *ss_algo_channel = (u16) -1;
- return;
- }
-
- wlc_phy_txpower_get_current(wlc->band->pi, &power,
- CHSPEC_CHANNEL(chanspec));
-
- siso_mcs_id = (CHSPEC_IS40(chanspec)) ?
- WL_TX_POWER_MCS40_SISO_FIRST : WL_TX_POWER_MCS20_SISO_FIRST;
- cdd_mcs_id = (CHSPEC_IS40(chanspec)) ?
- WL_TX_POWER_MCS40_CDD_FIRST : WL_TX_POWER_MCS20_CDD_FIRST;
- stbc_mcs_id = (CHSPEC_IS40(chanspec)) ?
- WL_TX_POWER_MCS40_STBC_FIRST : WL_TX_POWER_MCS20_STBC_FIRST;
-
- /* criteria to choose stf mode */
-
- /* the "+3dbm (12 0.25db units)" is to account for the fact that with CDD, tx occurs
- * on both chains
- */
- if (power.target[siso_mcs_id] > (power.target[cdd_mcs_id] + 12))
- setbit(ss_algo_channel, PHY_TXC1_MODE_SISO);
- else
- setbit(ss_algo_channel, PHY_TXC1_MODE_CDD);
-
- /* STBC is ORed into to algo channel as STBC requires per-packet SCB capability check
- * so cannot be default mode of operation. One of SISO, CDD have to be set
- */
- if (power.target[siso_mcs_id] <= (power.target[stbc_mcs_id] + 12))
- setbit(ss_algo_channel, PHY_TXC1_MODE_STBC);
-}
-
-static bool brcms_c_stf_stbc_tx_set(struct brcms_c_info *wlc, s32 int_val)
-{
- if ((int_val != AUTO) && (int_val != OFF) && (int_val != ON)) {
- return false;
- }
-
- if ((int_val == ON) && (wlc->stf->txstreams == 1))
- return false;
-
- if ((int_val == OFF) || (wlc->stf->txstreams == 1)
- || !BRCMS_STBC_CAP_PHY(wlc))
- wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_TX_STBC;
- else
- wlc->ht_cap.cap_info |= IEEE80211_HT_CAP_TX_STBC;
-
- wlc->bandstate[BAND_2G_INDEX]->band_stf_stbc_tx = (s8) int_val;
- wlc->bandstate[BAND_5G_INDEX]->band_stf_stbc_tx = (s8) int_val;
-
- return true;
-}
-
-bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val)
-{
- if ((int_val != HT_CAP_RX_STBC_NO)
- && (int_val != HT_CAP_RX_STBC_ONE_STREAM)) {
- return false;
- }
-
- if (BRCMS_STF_SS_STBC_RX(wlc)) {
- if ((int_val != HT_CAP_RX_STBC_NO)
- && (wlc->stf->rxstreams == 1))
- return false;
- }
-
- brcms_c_stf_stbc_rx_ht_update(wlc, int_val);
- return true;
-}
-
-static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
- u8 core_mask)
-{
- BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n",
- wlc->pub->unit, Nsts, core_mask);
-
- if (BRCMS_BITSCNT(core_mask) > wlc->stf->txstreams) {
- core_mask = 0;
- }
-
- if ((BRCMS_BITSCNT(core_mask) == wlc->stf->txstreams) &&
- ((core_mask & ~wlc->stf->txchain)
- || !(core_mask & wlc->stf->txchain))) {
- core_mask = wlc->stf->txchain;
- }
-
- wlc->stf->txcore[Nsts] = core_mask;
- /* Nsts = 1..4, txcore index = 1..4 */
- if (Nsts == 1) {
- /* Needs to update beacon and ucode generated response
- * frames when 1 stream core map changed
- */
- wlc->stf->phytxant = core_mask << PHY_TXC_ANT_SHIFT;
- brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
- if (wlc->clk) {
- brcms_c_suspend_mac_and_wait(wlc);
- brcms_c_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
- brcms_c_enable_mac(wlc);
- }
- }
-
- return 0;
-}
-
-static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val)
-{
- int i;
- u8 core_mask = 0;
-
- BCMMSG(wlc->wiphy, "wl%d: val %x\n", wlc->pub->unit, val);
-
- wlc->stf->spatial_policy = (s8) val;
- for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
- core_mask = (val == MAX_SPATIAL_EXPANSION) ?
- wlc->stf->txchain : txcore_default[i];
- brcms_c_stf_txcore_set(wlc, (u8) i, core_mask);
- }
- return 0;
-}
-
-int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
-{
- u8 txchain = (u8) int_val;
- u8 txstreams;
- uint i;
-
- if (wlc->stf->txchain == txchain)
- return 0;
-
- if ((txchain & ~wlc->stf->hw_txchain)
- || !(txchain & wlc->stf->hw_txchain))
- return -EINVAL;
-
- /* if nrate override is configured to be non-SISO STF mode, reject reducing txchain to 1 */
- txstreams = (u8) BRCMS_BITSCNT(txchain);
- if (txstreams > MAX_STREAMS_SUPPORTED)
- return -EINVAL;
-
- if (txstreams == 1) {
- for (i = 0; i < NBANDS(wlc); i++)
- if ((RSPEC_STF(wlc->bandstate[i]->rspec_override) !=
- PHY_TXC1_MODE_SISO)
- || (RSPEC_STF(wlc->bandstate[i]->mrspec_override) !=
- PHY_TXC1_MODE_SISO)) {
- if (!force)
- return -EBADE;
-
- /* over-write the override rspec */
- if (RSPEC_STF(wlc->bandstate[i]->rspec_override)
- != PHY_TXC1_MODE_SISO) {
- wlc->bandstate[i]->rspec_override = 0;
- wiphy_err(wlc->wiphy, "%s(): temp "
- "sense override non-SISO "
- "rspec_override\n",
- __func__);
- }
- if (RSPEC_STF
- (wlc->bandstate[i]->mrspec_override) !=
- PHY_TXC1_MODE_SISO) {
- wlc->bandstate[i]->mrspec_override = 0;
- wiphy_err(wlc->wiphy, "%s(): temp "
- "sense override non-SISO "
- "mrspec_override\n",
- __func__);
- }
- }
- }
-
- wlc->stf->txchain = txchain;
- wlc->stf->txstreams = txstreams;
- brcms_c_stf_stbc_tx_set(wlc, wlc->band->band_stf_stbc_tx);
- brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
- brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
- wlc->stf->txant =
- (wlc->stf->txstreams == 1) ? ANT_TX_FORCE_0 : ANT_TX_DEF;
- _brcms_c_stf_phy_txant_upd(wlc);
-
- wlc_phy_stf_chain_set(wlc->band->pi, wlc->stf->txchain,
- wlc->stf->rxchain);
-
- for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++)
- brcms_c_stf_txcore_set(wlc, (u8) i, txcore_default[i]);
-
- return 0;
-}
-
-/* update wlc->stf->ss_opmode which represents the operational stf_ss mode we're using */
-int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
-{
- int ret_code = 0;
- u8 prev_stf_ss;
- u8 upd_stf_ss;
-
- prev_stf_ss = wlc->stf->ss_opmode;
-
- /* NOTE: opmode can only be SISO or CDD as STBC is decided on a per-packet basis */
- if (BRCMS_STBC_CAP_PHY(wlc) &&
- wlc->stf->ss_algosel_auto
- && (wlc->stf->ss_algo_channel != (u16) -1)) {
- upd_stf_ss = (wlc->stf->no_cddstbc || (wlc->stf->txstreams == 1)
- || isset(&wlc->stf->ss_algo_channel,
- PHY_TXC1_MODE_SISO)) ? PHY_TXC1_MODE_SISO
- : PHY_TXC1_MODE_CDD;
- } else {
- if (wlc->band != band)
- return ret_code;
- upd_stf_ss = (wlc->stf->no_cddstbc
- || (wlc->stf->txstreams ==
- 1)) ? PHY_TXC1_MODE_SISO : band->
- band_stf_ss_mode;
- }
- if (prev_stf_ss != upd_stf_ss) {
- wlc->stf->ss_opmode = upd_stf_ss;
- brcms_b_band_stf_ss_set(wlc->hw, upd_stf_ss);
- }
-
- return ret_code;
-}
-
-int brcms_c_stf_attach(struct brcms_c_info *wlc)
-{
- wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_SISO;
- wlc->bandstate[BAND_5G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_CDD;
-
- if (BRCMS_ISNPHY(wlc->band) &&
- (wlc_phy_txpower_hw_ctrl_get(wlc->band->pi) != PHY_TPC_HW_ON))
- wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode =
- PHY_TXC1_MODE_CDD;
- brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
- brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
-
- brcms_c_stf_stbc_rx_ht_update(wlc, HT_CAP_RX_STBC_NO);
- wlc->bandstate[BAND_2G_INDEX]->band_stf_stbc_tx = OFF;
- wlc->bandstate[BAND_5G_INDEX]->band_stf_stbc_tx = OFF;
-
- if (BRCMS_STBC_CAP_PHY(wlc)) {
- wlc->stf->ss_algosel_auto = true;
- wlc->stf->ss_algo_channel = (u16) -1; /* Init the default value */
- }
- return 0;
-}
-
-void brcms_c_stf_detach(struct brcms_c_info *wlc)
-{
-}
-
-/*
- * Centralized txant update function. call it whenever wlc->stf->txant and/or wlc->stf->txchain
- * change
- *
- * Antennas are controlled by ucode indirectly, which drives PHY or GPIO to
- * achieve various tx/rx antenna selection schemes
- *
- * legacy phy, bit 6 and bit 7 means antenna 0 and 1 respectively, bit6+bit7 means auto(last rx)
- * for NREV<3, bit 6 and bit 7 means antenna 0 and 1 respectively, bit6+bit7 means last rx and
- * do tx-antenna selection for SISO transmissions
- * for NREV=3, bit 6 and bit _8_ means antenna 0 and 1 respectively, bit6+bit7 means last rx and
- * do tx-antenna selection for SISO transmissions
- * for NREV>=7, bit 6 and bit 7 mean antenna 0 and 1 respectively, nit6+bit7 means both cores active
-*/
-static void _brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
-{
- s8 txant;
-
- txant = (s8) wlc->stf->txant;
- if (BRCMS_PHY_11N_CAP(wlc->band)) {
- if (txant == ANT_TX_FORCE_0) {
- wlc->stf->phytxant = PHY_TXC_ANT_0;
- } else if (txant == ANT_TX_FORCE_1) {
- wlc->stf->phytxant = PHY_TXC_ANT_1;
-
- if (BRCMS_ISNPHY(wlc->band) &&
- NREV_GE(wlc->band->phyrev, 3)
- && NREV_LT(wlc->band->phyrev, 7)) {
- wlc->stf->phytxant = PHY_TXC_ANT_2;
- }
- } else {
- if (BRCMS_ISLCNPHY(wlc->band) ||
- BRCMS_ISSSLPNPHY(wlc->band))
- wlc->stf->phytxant = PHY_TXC_LCNPHY_ANT_LAST;
- else {
- /* catch out of sync wlc->stf->txcore */
- WARN_ON(wlc->stf->txchain <= 0);
- wlc->stf->phytxant =
- wlc->stf->txchain << PHY_TXC_ANT_SHIFT;
- }
- }
- } else {
- if (txant == ANT_TX_FORCE_0)
- wlc->stf->phytxant = PHY_TXC_OLD_ANT_0;
- else if (txant == ANT_TX_FORCE_1)
- wlc->stf->phytxant = PHY_TXC_OLD_ANT_1;
- else
- wlc->stf->phytxant = PHY_TXC_OLD_ANT_LAST;
- }
-
- brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
-}
-
-void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
-{
- _brcms_c_stf_phy_txant_upd(wlc);
-}
-
-void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc)
-{
- /* get available rx/tx chains */
- wlc->stf->hw_txchain = (u8) getintvar(wlc->pub->vars, "txchain");
- wlc->stf->hw_rxchain = (u8) getintvar(wlc->pub->vars, "rxchain");
-
- /* these parameter are intended to be used for all PHY types */
- if (wlc->stf->hw_txchain == 0 || wlc->stf->hw_txchain == 0xf) {
- if (BRCMS_ISNPHY(wlc->band)) {
- wlc->stf->hw_txchain = TXCHAIN_DEF_NPHY;
- } else {
- wlc->stf->hw_txchain = TXCHAIN_DEF;
- }
- }
-
- wlc->stf->txchain = wlc->stf->hw_txchain;
- wlc->stf->txstreams = (u8) BRCMS_BITSCNT(wlc->stf->hw_txchain);
-
- if (wlc->stf->hw_rxchain == 0 || wlc->stf->hw_rxchain == 0xf) {
- if (BRCMS_ISNPHY(wlc->band)) {
- wlc->stf->hw_rxchain = RXCHAIN_DEF_NPHY;
- } else {
- wlc->stf->hw_rxchain = RXCHAIN_DEF;
- }
- }
-
- wlc->stf->rxchain = wlc->stf->hw_rxchain;
- wlc->stf->rxstreams = (u8) BRCMS_BITSCNT(wlc->stf->hw_rxchain);
-
- /* initialize the txcore table */
- memcpy(wlc->stf->txcore, txcore_default, sizeof(wlc->stf->txcore));
-
- /* default spatial_policy */
- wlc->stf->spatial_policy = MIN_SPATIAL_EXPANSION;
- brcms_c_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
-}
-
-static u16 _brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
- ratespec_t rspec)
-{
- u16 phytxant = wlc->stf->phytxant;
-
- if (RSPEC_STF(rspec) != PHY_TXC1_MODE_SISO) {
- phytxant = wlc->stf->txchain << PHY_TXC_ANT_SHIFT;
- } else if (wlc->stf->txant == ANT_TX_DEF)
- phytxant = wlc->stf->txchain << PHY_TXC_ANT_SHIFT;
- phytxant &= PHY_TXC_ANT_MASK;
- return phytxant;
-}
-
-u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, ratespec_t rspec)
-{
- return _brcms_c_stf_phytxchain_sel(wlc, rspec);
-}
-
-u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, ratespec_t rspec)
-{
- u16 phytxant = wlc->stf->phytxant;
- u16 mask = PHY_TXC_ANT_MASK;
-
- /* for non-siso rates or default setting, use the available chains */
- if (BRCMS_ISNPHY(wlc->band)) {
- phytxant = _brcms_c_stf_phytxchain_sel(wlc, rspec);
- mask = PHY_TXC_HTANT_MASK;
- }
- phytxant |= phytxant & mask;
- return phytxant;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h
deleted file mode 100644
index 823b5e4672e..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/types.h
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_TYPES_H_
-#define _BRCM_TYPES_H_
-
-#include <linux/types.h>
-#include <linux/io.h>
-
-/* Bus types */
-#define SI_BUS 0 /* SOC Interconnect */
-#define PCI_BUS 1 /* PCI target */
-#define SDIO_BUS 3 /* SDIO target */
-#define JTAG_BUS 4 /* JTAG */
-#define USB_BUS 5 /* USB (does not support R/W REG) */
-#define SPI_BUS 6 /* gSPI target */
-#define RPC_BUS 7 /* RPC target */
-
-#define WL_CHAN_FREQ_RANGE_2G 0
-#define WL_CHAN_FREQ_RANGE_5GL 1
-#define WL_CHAN_FREQ_RANGE_5GM 2
-#define WL_CHAN_FREQ_RANGE_5GH 3
-
-#define MAX_DMA_SEGS 4
-
-/* boardflags */
-#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */
-#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */
-#define BFL_FEM 0x00000800 /* Board supports the Front End Module */
-#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
-#define BFL_NOPA 0x00010000 /* Board has no PA */
-#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */
-#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */
-#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */
-#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */
-#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
-
-/* boardflags2 */
-#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */
-#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
-#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */
-#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */
-#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */
-#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */
-#define BFL2_LEGACY 0x00000080
-#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */
-#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */
-#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */
-#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
-#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */
-#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */
-#define BFL2_IPALVLSHIFT_3P3 0x00020000
-#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */
-#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio "ON"
- * Most drivers will turn it off without this flag
- * to save power.
- */
-
-/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
-#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */
-#define BOARD_GPIO_12 0x1000 /* gpio 12 */
-#define BOARD_GPIO_13 0x2000 /* gpio 13 */
-
-/* **** Core type/rev defaults **** */
-#define D11CONF 0x0fffffb0 /* Supported D11 revs: 4, 5, 7-27
- * also need to update wlc.h MAXCOREREV
- */
-
-#define NCONF 0x000001ff /* Supported nphy revs:
- * 0 4321a0
- * 1 4321a1
- * 2 4321b0/b1/c0/c1
- * 3 4322a0
- * 4 4322a1
- * 5 4716a0
- * 6 43222a0, 43224a0
- * 7 43226a0
- * 8 5357a0, 43236a0
- */
-
-#define LCNCONF 0x00000007 /* Supported lcnphy revs:
- * 0 4313a0, 4336a0, 4330a0
- * 1
- * 2 4330a0
- */
-
-#define SSLPNCONF 0x0000000f /* Supported sslpnphy revs:
- * 0 4329a0/k0
- * 1 4329b0/4329C0
- * 2 4319a0
- * 3 5356a0
- */
-
-/********************************************************************
- * Phy/Core Configuration. Defines macros to to check core phy/rev *
- * compile-time configuration. Defines default core support. *
- * ******************************************************************
- */
-
-/* Basic macros to check a configuration bitmask */
-
-#define CONF_HAS(config, val) ((config) & (1 << (val)))
-#define CONF_MSK(config, mask) ((config) & (mask))
-#define MSK_RANGE(low, hi) ((1 << ((hi)+1)) - (1 << (low)))
-#define CONF_RANGE(config, low, hi) (CONF_MSK(config, MSK_RANGE(low, high)))
-
-#define CONF_IS(config, val) ((config) == (1 << (val)))
-#define CONF_GE(config, val) ((config) & (0-(1 << (val))))
-#define CONF_GT(config, val) ((config) & (0-2*(1 << (val))))
-#define CONF_LT(config, val) ((config) & ((1 << (val))-1))
-#define CONF_LE(config, val) ((config) & (2*(1 << (val))-1))
-
-/* Wrappers for some of the above, specific to config constants */
-
-#define NCONF_HAS(val) CONF_HAS(NCONF, val)
-#define NCONF_MSK(mask) CONF_MSK(NCONF, mask)
-#define NCONF_IS(val) CONF_IS(NCONF, val)
-#define NCONF_GE(val) CONF_GE(NCONF, val)
-#define NCONF_GT(val) CONF_GT(NCONF, val)
-#define NCONF_LT(val) CONF_LT(NCONF, val)
-#define NCONF_LE(val) CONF_LE(NCONF, val)
-
-#define LCNCONF_HAS(val) CONF_HAS(LCNCONF, val)
-#define LCNCONF_MSK(mask) CONF_MSK(LCNCONF, mask)
-#define LCNCONF_IS(val) CONF_IS(LCNCONF, val)
-#define LCNCONF_GE(val) CONF_GE(LCNCONF, val)
-#define LCNCONF_GT(val) CONF_GT(LCNCONF, val)
-#define LCNCONF_LT(val) CONF_LT(LCNCONF, val)
-#define LCNCONF_LE(val) CONF_LE(LCNCONF, val)
-
-#define D11CONF_HAS(val) CONF_HAS(D11CONF, val)
-#define D11CONF_MSK(mask) CONF_MSK(D11CONF, mask)
-#define D11CONF_IS(val) CONF_IS(D11CONF, val)
-#define D11CONF_GE(val) CONF_GE(D11CONF, val)
-#define D11CONF_GT(val) CONF_GT(D11CONF, val)
-#define D11CONF_LT(val) CONF_LT(D11CONF, val)
-#define D11CONF_LE(val) CONF_LE(D11CONF, val)
-
-#define PHYCONF_HAS(val) CONF_HAS(PHYTYPE, val)
-#define PHYCONF_IS(val) CONF_IS(PHYTYPE, val)
-
-#define NREV_IS(var, val) (NCONF_HAS(val) && (NCONF_IS(val) || ((var) == (val))))
-#define NREV_GE(var, val) (NCONF_GE(val) && (!NCONF_LT(val) || ((var) >= (val))))
-#define NREV_GT(var, val) (NCONF_GT(val) && (!NCONF_LE(val) || ((var) > (val))))
-#define NREV_LT(var, val) (NCONF_LT(val) && (!NCONF_GE(val) || ((var) < (val))))
-#define NREV_LE(var, val) (NCONF_LE(val) && (!NCONF_GT(val) || ((var) <= (val))))
-
-#define LCNREV_IS(var, val) (LCNCONF_HAS(val) && (LCNCONF_IS(val) || ((var) == (val))))
-#define LCNREV_GE(var, val) (LCNCONF_GE(val) && (!LCNCONF_LT(val) || ((var) >= (val))))
-#define LCNREV_GT(var, val) (LCNCONF_GT(val) && (!LCNCONF_LE(val) || ((var) > (val))))
-#define LCNREV_LT(var, val) (LCNCONF_LT(val) && (!LCNCONF_GE(val) || ((var) < (val))))
-#define LCNREV_LE(var, val) (LCNCONF_LE(val) && (!LCNCONF_GT(val) || ((var) <= (val))))
-
-#define D11REV_IS(var, val) (D11CONF_HAS(val) && (D11CONF_IS(val) || ((var) == (val))))
-#define D11REV_GE(var, val) (D11CONF_GE(val) && (!D11CONF_LT(val) || ((var) >= (val))))
-#define D11REV_GT(var, val) (D11CONF_GT(val) && (!D11CONF_LE(val) || ((var) > (val))))
-#define D11REV_LT(var, val) (D11CONF_LT(val) && (!D11CONF_GE(val) || ((var) < (val))))
-#define D11REV_LE(var, val) (D11CONF_LE(val) && (!D11CONF_GT(val) || ((var) <= (val))))
-
-#define PHYTYPE_IS(var, val) (PHYCONF_HAS(val) && (PHYCONF_IS(val) || ((var) == (val))))
-
-/* Finally, early-exit from switch case if anyone wants it... */
-
-#define CASECHECK(config, val) if (!(CONF_HAS(config, val))) break
-#define CASEMSK(config, mask) if (!(CONF_MSK(config, mask))) break
-
-/* Set up PHYTYPE automatically: (depends on PHY_TYPE_X, from d11.h) */
-
-#define _PHYCONF_N (1 << PHY_TYPE_N)
-#define _PHYCONF_LCN (1 << PHY_TYPE_LCN)
-#define _PHYCONF_SSLPN (1 << PHY_TYPE_SSN)
-
-#define PHYTYPE (_PHYCONF_N | _PHYCONF_LCN | _PHYCONF_SSLPN)
-
-/* Utility macro to identify 802.11n (HT) capable PHYs */
-#define PHYTYPE_11N_CAP(phytype) \
- (PHYTYPE_IS(phytype, PHY_TYPE_N) || \
- PHYTYPE_IS(phytype, PHY_TYPE_LCN) || \
- PHYTYPE_IS(phytype, PHY_TYPE_SSN))
-
-/* Last but not least: shorter wlc-specific var checks */
-#define BRCMS_ISNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_N)
-#define BRCMS_ISLCNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_LCN)
-#define BRCMS_ISSSLPNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_SSN)
-
-#define BRCMS_PHY_11N_CAP(band) PHYTYPE_11N_CAP((band)->phytype)
-
-/**********************************************************************
- * ------------- End of Core phy/rev configuration. ----------------- *
- * ********************************************************************
- */
-
-/*************************************************
- * Defaults for tunables (e.g. sizing constants)
- *
- * For each new tunable, add a member to the end
- * of struct brcms_tunables in brcms_c_pub.h to enable
- * runtime checks of tunable values. (Directly
- * using the macros in code invalidates ROM code)
- *
- * ***********************************************
- */
-#define NTXD 256 /* Max # of entries in Tx FIFO based on 4kb page size */
-#define NRXD 256 /* Max # of entries in Rx FIFO based on 4kb page size */
-#define NRXBUFPOST 32 /* try to keep this # rbufs posted to the chip */
-#define MAXSCB 32 /* Maximum SCBs in cache for STA */
-#define AMPDU_NUM_MPDU 16 /* max allowed number of mpdus in an ampdu (2 streams) */
-
-/* Count of packet callback structures. either of following
- * 1. Set to the number of SCBs since a STA
- * can queue up a rate callback for each IBSS STA it knows about, and an AP can
- * queue up an "are you there?" Null Data callback for each associated STA
- * 2. controlled by tunable config file
- */
-#define MAXPKTCB MAXSCB /* Max number of packet callbacks */
-
-/* NetBSD also needs to keep track of this */
-
-/* Number of BSS handled in ucode bcn/prb */
-#define BRCMS_MAX_UCODE_BSS (16)
-/* Number of BSS handled in sw bcn/prb */
-#define BRCMS_MAX_UCODE_BSS4 (4)
-/* max # BSS configs */
-#define BRCMS_MAXBSSCFG (1)
-/* max # available networks */
-#define MAXBSS 64
-/* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT 50
-#define BRCMS_AMPDUDATAHIWAT 255
-
-/* bounded rx loops */
-#define RXBND 8 /* max # frames to process in brcms_c_recv() */
-#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
-
-#define BAND_5G(bt) ((bt) == BRCM_BAND_5G)
-#define BAND_2G(bt) ((bt) == BRCM_BAND_2G)
-
-#define BCMMSG(dev, fmt, args...) \
-do { \
- if (brcm_msg_level & LOG_TRACE_VAL) \
- wiphy_err(dev, "%s: " fmt, __func__, ##args); \
-} while (0)
-
-#define WL_ERROR_ON() (brcm_msg_level & LOG_ERROR_VAL)
-
-/* register access macros */
-#ifndef __BIG_ENDIAN
-#ifndef __mips__
-#define R_REG(r) \
- ({\
- sizeof(*(r)) == sizeof(u8) ? \
- readb((u8 *)(r)) : \
- sizeof(*(r)) == sizeof(u16) ? readw((u16 *)(r)) : \
- readl((u32 *)(r)); \
- })
-#else /* __mips__ */
-#define R_REG(r) \
- ({ \
- __typeof(*(r)) __osl_v; \
- __asm__ __volatile__("sync"); \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((u8 *)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((u16 *)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = \
- readl((u32 *)(r)); \
- break; \
- } \
- __asm__ __volatile__("sync"); \
- __osl_v; \
- })
-#endif /* __mips__ */
-
-#define W_REG(r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)(v), (u8 *)(r)); break; \
- case sizeof(u16): \
- writew((u16)(v), (u16 *)(r)); break; \
- case sizeof(u32): \
- writel((u32)(v), (u32 *)(r)); break; \
- }; \
- } while (0)
-#else /* __BIG_ENDIAN */
-#define R_REG(r) \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = \
- readb((u8 *)((r)^3)); \
- break; \
- case sizeof(u16): \
- __osl_v = \
- readw((u16 *)((r)^2)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((u32 *)(r)); \
- break; \
- } \
- __osl_v; \
- })
-
-#define W_REG(r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)(v), \
- (u8 *)((r)^3)); break; \
- case sizeof(u16): \
- writew((u16)(v), \
- (u16 *)((r)^2)); break; \
- case sizeof(u32): \
- writel((u32)(v), \
- (u32 *)(r)); break; \
- } \
- } while (0)
-#endif /* __BIG_ENDIAN */
-
-#ifdef __mips__
-/*
- * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
- * transactions. As a fix, a read after write is performed on certain places
- * in the code. Older chips and the newer 5357 family don't require this fix.
- */
-#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
-#else
-#define W_REG_FLUSH(r, v) W_REG((r), (v))
-#endif /* __mips__ */
-
-#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
- W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
-/* multi-bool data type: set of bools, mbool is true if any is set */
-typedef u32 mbool;
-#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */
-#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */
-#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* true if one bool is set */
-#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
-
-/* forward declarations */
-struct wiphy;
-struct ieee80211_sta;
-struct ieee80211_tx_queue_params;
-struct brcms_info;
-struct brcms_c_info;
-struct brcms_hardware;
-struct brcms_c_if;
-struct brcmu_iovar;
-struct brcmu_strbuf;
-struct brcms_txq_info;
-struct brcms_band;
-struct dma_pub;
-struct si_pub;
-struct tx_status;
-struct d11rxhdr;
-struct brcms_d11rxhdr;
-struct txpwr_limits;
-struct brcms_phy;
-
-typedef volatile struct intctrlregs intctrlregs_t;
-typedef volatile struct pio2regs pio2regs_t;
-typedef volatile struct pio2regp pio2regp_t;
-typedef volatile struct pio4regs pio4regs_t;
-typedef volatile struct pio4regp pio4regp_t;
-typedef volatile struct fifo64 fifo64_t;
-typedef volatile struct d11regs d11regs_t;
-typedef volatile struct dma32diag dma32diag_t;
-typedef volatile struct dma64regs dma64regs_t;
-typedef struct brcms_rateset wlc_rateset_t;
-typedef u32 ratespec_t;
-typedef struct chanvec chanvec_t;
-typedef s32 fixed;
-typedef struct _cs32 cs32;
-typedef volatile union pmqreg pmqreg_t;
-
-/* brcm_msg_level is a bit vector with defs in defs.h */
-extern u32 brcm_msg_level;
-
-#endif /* _BRCM_TYPES_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/ucode_loader.c b/drivers/staging/brcm80211/brcmsmac/ucode_loader.c
deleted file mode 100644
index bf733fb18ce..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/ucode_loader.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <defs.h>
-#include "types.h"
-#include <ucode_loader.h>
-
-enum {
- D11UCODE_NAMETAG_START = 0,
- D11LCN0BSINITVALS24,
- D11LCN0INITVALS24,
- D11LCN1BSINITVALS24,
- D11LCN1INITVALS24,
- D11LCN2BSINITVALS24,
- D11LCN2INITVALS24,
- D11N0ABSINITVALS16,
- D11N0BSINITVALS16,
- D11N0INITVALS16,
- D11UCODE_OVERSIGHT16_MIMO,
- D11UCODE_OVERSIGHT16_MIMOSZ,
- D11UCODE_OVERSIGHT24_LCN,
- D11UCODE_OVERSIGHT24_LCNSZ,
- D11UCODE_OVERSIGHT_BOMMAJOR,
- D11UCODE_OVERSIGHT_BOMMINOR
-};
-
-struct d11init *d11lcn0bsinitvals24;
-struct d11init *d11lcn0initvals24;
-struct d11init *d11lcn1bsinitvals24;
-struct d11init *d11lcn1initvals24;
-struct d11init *d11lcn2bsinitvals24;
-struct d11init *d11lcn2initvals24;
-struct d11init *d11n0absinitvals16;
-struct d11init *d11n0bsinitvals16;
-struct d11init *d11n0initvals16;
-u32 *bcm43xx_16_mimo;
-u32 bcm43xx_16_mimosz;
-u32 *bcm43xx_24_lcn;
-u32 bcm43xx_24_lcnsz;
-u32 *bcm43xx_bommajor;
-u32 *bcm43xx_bomminor;
-
-int brcms_ucode_data_init(struct brcms_info *wl)
-{
- int rc;
- rc = brcms_check_firmwares(wl);
-
- rc = rc < 0 ? rc :
- brcms_ucode_init_buf(wl, (void **)&d11lcn0bsinitvals24,
- D11LCN0BSINITVALS24);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11lcn0initvals24,
- D11LCN0INITVALS24);
- rc = rc < 0 ? rc :
- brcms_ucode_init_buf(wl, (void **)&d11lcn1bsinitvals24,
- D11LCN1BSINITVALS24);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11lcn1initvals24,
- D11LCN1INITVALS24);
- rc = rc < 0 ? rc :
- brcms_ucode_init_buf(wl, (void **)&d11lcn2bsinitvals24,
- D11LCN2BSINITVALS24);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11lcn2initvals24,
- D11LCN2INITVALS24);
- rc = rc < 0 ? rc :
- brcms_ucode_init_buf(wl, (void **)&d11n0absinitvals16,
- D11N0ABSINITVALS16);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11n0bsinitvals16,
- D11N0BSINITVALS16);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11n0initvals16,
- D11N0INITVALS16);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_16_mimo,
- D11UCODE_OVERSIGHT16_MIMO);
- rc = rc < 0 ? rc : brcms_ucode_init_uint(wl, &bcm43xx_16_mimosz,
- D11UCODE_OVERSIGHT16_MIMOSZ);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_24_lcn,
- D11UCODE_OVERSIGHT24_LCN);
- rc = rc < 0 ? rc : brcms_ucode_init_uint(wl, &bcm43xx_24_lcnsz,
- D11UCODE_OVERSIGHT24_LCNSZ);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_bommajor,
- D11UCODE_OVERSIGHT_BOMMAJOR);
- rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_bomminor,
- D11UCODE_OVERSIGHT_BOMMINOR);
- return rc;
-}
-
-void brcms_ucode_data_free(void)
-{
- brcms_ucode_free_buf((void *)d11lcn0bsinitvals24);
- brcms_ucode_free_buf((void *)d11lcn0initvals24);
- brcms_ucode_free_buf((void *)d11lcn1bsinitvals24);
- brcms_ucode_free_buf((void *)d11lcn1initvals24);
- brcms_ucode_free_buf((void *)d11lcn2bsinitvals24);
- brcms_ucode_free_buf((void *)d11lcn2initvals24);
- brcms_ucode_free_buf((void *)d11n0absinitvals16);
- brcms_ucode_free_buf((void *)d11n0bsinitvals16);
- brcms_ucode_free_buf((void *)d11n0initvals16);
- brcms_ucode_free_buf((void *)bcm43xx_16_mimo);
- brcms_ucode_free_buf((void *)bcm43xx_24_lcn);
- brcms_ucode_free_buf((void *)bcm43xx_bommajor);
- brcms_ucode_free_buf((void *)bcm43xx_bomminor);
-
- return;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/ucode_loader.h b/drivers/staging/brcm80211/brcmsmac/ucode_loader.h
deleted file mode 100644
index ca53deced7b..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/ucode_loader.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "types.h" /* forward structure declarations */
-
-#define MIN_FW_SIZE 40000 /* minimum firmware file size in bytes */
-#define MAX_FW_SIZE 150000
-
-#define UCODE_LOADER_API_VER 0
-
-struct d11init {
- u16 addr;
- u16 size;
- u32 value;
-};
-
-extern struct d11init *d11lcn0bsinitvals24;
-extern struct d11init *d11lcn0initvals24;
-extern struct d11init *d11lcn1bsinitvals24;
-extern struct d11init *d11lcn1initvals24;
-extern struct d11init *d11lcn2bsinitvals24;
-extern struct d11init *d11lcn2initvals24;
-extern struct d11init *d11n0absinitvals16;
-extern struct d11init *d11n0bsinitvals16;
-extern struct d11init *d11n0initvals16;
-extern u32 *bcm43xx_16_mimo;
-extern u32 bcm43xx_16_mimosz;
-extern u32 *bcm43xx_24_lcn;
-extern u32 bcm43xx_24_lcnsz;
-
-extern int brcms_ucode_data_init(struct brcms_info *wl);
-extern void brcms_ucode_data_free(void);
-
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
- unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, unsigned *data,
- unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int brcms_check_firmwares(struct brcms_info *wl);
diff --git a/drivers/staging/brcm80211/brcmutil/Makefile b/drivers/staging/brcm80211/brcmutil/Makefile
deleted file mode 100644
index 6403423c021..00000000000
--- a/drivers/staging/brcm80211/brcmutil/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Makefile fragment for Broadcom 802.11n Networking Device Driver Utilities
-#
-# Copyright (c) 2011 Broadcom Corporation
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-ccflags-y := \
- -Idrivers/staging/brcm80211/brcmutil \
- -Idrivers/staging/brcm80211/include
-
-BRCMUTIL_OFILES := \
- utils.o \
- wifi.o
-
-MODULEPFX := brcmutil
-
-obj-$(CONFIG_BRCMUTIL) += $(MODULEPFX).o
-$(MODULEPFX)-objs = $(BRCMUTIL_OFILES)
diff --git a/drivers/staging/brcm80211/brcmutil/utils.c b/drivers/staging/brcm80211/brcmutil/utils.c
deleted file mode 100644
index 37b6b779779..00000000000
--- a/drivers/staging/brcm80211/brcmutil/utils.c
+++ /dev/null
@@ -1,787 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/netdevice.h>
-#include <brcmu_utils.h>
-
-MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-struct sk_buff *brcmu_pkt_buf_get_skb(uint len)
-{
- struct sk_buff *skb;
-
- skb = dev_alloc_skb(len);
- if (skb) {
- skb_put(skb, len);
- skb->priority = 0;
- }
-
- return skb;
-}
-EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
-
-/* Free the driver packet. Free the tag if present */
-void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
-{
- struct sk_buff *nskb;
- int nest = 0;
-
- /* perversion: we use skb->next to chain multi-skb packets */
- while (skb) {
- nskb = skb->next;
- skb->next = NULL;
-
- if (skb->destructor)
- /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
- * destructor exists
- */
- dev_kfree_skb_any(skb);
- else
- /* can free immediately (even in_irq()) if destructor
- * does not exist
- */
- dev_kfree_skb(skb);
-
- nest++;
- skb = nskb;
- }
-}
-EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
-
-
-/* copy a buffer into a pkt buffer chain */
-uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
- unsigned char *buf)
-{
- uint n, ret = 0;
-
- /* skip 'offset' bytes */
- for (; p && offset; p = p->next) {
- if (offset < (uint) (p->len))
- break;
- offset -= p->len;
- }
-
- if (!p)
- return 0;
-
- /* copy the data */
- for (; p && len; p = p->next) {
- n = min((uint) (p->len) - offset, (uint) len);
- memcpy(p->data + offset, buf, n);
- buf += n;
- len -= n;
- ret += n;
- offset = 0;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(brcmu_pktfrombuf);
-
-/* return total length of buffer chain */
-uint brcmu_pkttotlen(struct sk_buff *p)
-{
- uint total;
-
- total = 0;
- for (; p; p = p->next)
- total += p->len;
- return total;
-}
-EXPORT_SYMBOL(brcmu_pkttotlen);
-
-/*
- * osl multiple-precedence packet queue
- * hi_prec is always >= the number of the highest non-empty precedence
- */
-struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
- struct sk_buff *p)
-{
- struct pktq_prec *q;
-
- if (pktq_full(pq) || pktq_pfull(pq, prec))
- return NULL;
-
- q = &pq->q[prec];
-
- if (q->head)
- q->tail->prev = p;
- else
- q->head = p;
-
- q->tail = p;
- q->len++;
-
- pq->len++;
-
- if (pq->hi_prec < prec)
- pq->hi_prec = (u8) prec;
-
- return p;
-}
-EXPORT_SYMBOL(brcmu_pktq_penq);
-
-struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
- struct sk_buff *p)
-{
- struct pktq_prec *q;
-
- if (pktq_full(pq) || pktq_pfull(pq, prec))
- return NULL;
-
- q = &pq->q[prec];
-
- if (q->head == NULL)
- q->tail = p;
-
- p->prev = q->head;
- q->head = p;
- q->len++;
-
- pq->len++;
-
- if (pq->hi_prec < prec)
- pq->hi_prec = (u8) prec;
-
- return p;
-}
-EXPORT_SYMBOL(brcmu_pktq_penq_head);
-
-struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
-{
- struct pktq_prec *q;
- struct sk_buff *p;
-
- q = &pq->q[prec];
-
- p = q->head;
- if (p == NULL)
- return NULL;
-
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
-
- pq->len--;
-
- p->prev = NULL;
-
- return p;
-}
-EXPORT_SYMBOL(brcmu_pktq_pdeq);
-
-struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
-{
- struct pktq_prec *q;
- struct sk_buff *p, *prev;
-
- q = &pq->q[prec];
-
- p = q->head;
- if (p == NULL)
- return NULL;
-
- for (prev = NULL; p != q->tail; p = p->prev)
- prev = p;
-
- if (prev)
- prev->prev = NULL;
- else
- q->head = NULL;
-
- q->tail = prev;
- q->len--;
-
- pq->len--;
-
- return p;
-}
-EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
-
-void
-brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
- ifpkt_cb_t fn, void *arg)
-{
- struct pktq_prec *q;
- struct sk_buff *p, *prev = NULL;
-
- q = &pq->q[prec];
- p = q->head;
- while (p) {
- if (fn == NULL || (*fn) (p, arg)) {
- bool head = (p == q->head);
- if (head)
- q->head = p->prev;
- else
- prev->prev = p->prev;
- p->prev = NULL;
- brcmu_pkt_buf_free_skb(p);
- q->len--;
- pq->len--;
- p = (head ? q->head : prev->prev);
- } else {
- prev = p;
- p = p->prev;
- }
- }
-
- if (q->head == NULL) {
- q->tail = NULL;
- }
-}
-EXPORT_SYMBOL(brcmu_pktq_pflush);
-
-void brcmu_pktq_flush(struct pktq *pq, bool dir,
- ifpkt_cb_t fn, void *arg)
-{
- int prec;
- for (prec = 0; prec < pq->num_prec; prec++)
- brcmu_pktq_pflush(pq, prec, dir, fn, arg);
-}
-EXPORT_SYMBOL(brcmu_pktq_flush);
-
-void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
-{
- int prec;
-
- /* pq is variable size; only zero out what's requested */
- memset(pq, 0,
- offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
-
- pq->num_prec = (u16) num_prec;
-
- pq->max = (u16) max_len;
-
- for (prec = 0; prec < num_prec; prec++)
- pq->q[prec].max = pq->max;
-}
-EXPORT_SYMBOL(brcmu_pktq_init);
-
-struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
-{
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
- break;
-
- if (prec_out)
- *prec_out = prec;
-
- return pq->q[prec].tail;
-}
-EXPORT_SYMBOL(brcmu_pktq_peek_tail);
-
-/* Return sum of lengths of a specific set of precedences */
-int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
-{
- int prec, len;
-
- len = 0;
-
- for (prec = 0; prec <= pq->hi_prec; prec++)
- if (prec_bmp & (1 << prec))
- len += pq->q[prec].len;
-
- return len;
-}
-EXPORT_SYMBOL(brcmu_pktq_mlen);
-
-/* Priority dequeue from a specific set of precedences */
-struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
- int *prec_out)
-{
- struct pktq_prec *q;
- struct sk_buff *p;
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
- pq->hi_prec--;
-
- while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
- if (prec-- == 0)
- return NULL;
-
- q = &pq->q[prec];
-
- p = q->head;
- if (p == NULL)
- return NULL;
-
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
-
- if (prec_out)
- *prec_out = prec;
-
- pq->len--;
-
- p->prev = NULL;
-
- return p;
-}
-EXPORT_SYMBOL(brcmu_pktq_mdeq);
-
-/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
-int brcmu_ether_atoe(char *p, u8 *ea)
-{
- int i = 0;
-
- for (;;) {
- ea[i++] = (char)simple_strtoul(p, &p, 16);
- if (!*p++ || i == 6)
- break;
- }
-
- return i == 6;
-}
-EXPORT_SYMBOL(brcmu_ether_atoe);
-
-#if defined(BCMDBG)
-/* pretty hex print a pkt buffer chain */
-void brcmu_prpkt(const char *msg, struct sk_buff *p0)
-{
- struct sk_buff *p;
-
- if (msg && (msg[0] != '\0'))
- printk(KERN_DEBUG "%s:\n", msg);
-
- for (p = p0; p; p = p->next)
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len);
-}
-EXPORT_SYMBOL(brcmu_prpkt);
-#endif /* defined(BCMDBG) */
-
-/* iovar table lookup */
-const struct brcmu_iovar *brcmu_iovar_lookup(const struct brcmu_iovar *table,
- const char *name)
-{
- const struct brcmu_iovar *vi;
- const char *lookup_name;
-
- /* skip any ':' delimited option prefixes */
- lookup_name = strrchr(name, ':');
- if (lookup_name != NULL)
- lookup_name++;
- else
- lookup_name = name;
-
- for (vi = table; vi->name; vi++) {
- if (!strcmp(vi->name, lookup_name))
- return vi;
- }
- /* ran to end of table */
-
- return NULL; /* var name not found */
-}
-EXPORT_SYMBOL(brcmu_iovar_lookup);
-
-int brcmu_iovar_lencheck(const struct brcmu_iovar *vi, void *arg, int len,
- bool set)
-{
- int bcmerror = 0;
-
- /* length check on io buf */
- switch (vi->type) {
- case IOVT_BOOL:
- case IOVT_INT8:
- case IOVT_INT16:
- case IOVT_INT32:
- case IOVT_UINT8:
- case IOVT_UINT16:
- case IOVT_UINT32:
- /* all integers are s32 sized args at the ioctl interface */
- if (len < (int)sizeof(int)) {
- bcmerror = -EOVERFLOW;
- }
- break;
-
- case IOVT_BUFFER:
- /* buffer must meet minimum length requirement */
- if (len < vi->minlen) {
- bcmerror = -EOVERFLOW;
- }
- break;
-
- case IOVT_VOID:
- if (!set) {
- /* Cannot return nil... */
- bcmerror = -ENOTSUPP;
- } else if (len) {
- /* Set is an action w/o parameters */
- bcmerror = -ENOBUFS;
- }
- break;
-
- default:
- /* unknown type for length check in iovar info */
- bcmerror = -ENOTSUPP;
- }
-
- return bcmerror;
-}
-EXPORT_SYMBOL(brcmu_iovar_lencheck);
-
-/*******************************************************************************
- * crc8
- *
- * Computes a crc8 over the input data using the polynomial:
- *
- * x^8 + x^7 +x^6 + x^4 + x^2 + 1
- *
- * The caller provides the initial value (either CRC8_INIT_VALUE
- * or the previous returned value) to allow for processing of
- * discontiguous blocks of data. When generating the CRC the
- * caller is responsible for complementing the final return value
- * and inserting it into the byte stream. When checking, a final
- * return value of CRC8_GOOD_VALUE indicates a valid CRC.
- *
- * Reference: Dallas Semiconductor Application Note 27
- * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
- * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
- * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
- *
- * ****************************************************************************
- */
-
-static const u8 crc8_table[256] = {
- 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
- 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
- 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
- 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
- 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
- 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
- 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
- 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
- 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
- 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
- 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
- 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
- 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
- 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
- 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
- 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
- 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
- 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
- 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
- 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
- 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
- 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
- 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
- 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
- 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
- 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
- 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
- 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
- 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
- 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
- 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
- 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
-};
-
-u8 brcmu_crc8(u8 *pdata, /* pointer to array of data to process */
- uint nbytes, /* number of input data bytes to process */
- u8 crc /* either CRC8_INIT_VALUE or previous return value */
- ) {
- /* loop over the buffer data */
- while (nbytes-- > 0)
- crc = crc8_table[(crc ^ *pdata++) & 0xff];
-
- return crc;
-}
-EXPORT_SYMBOL(brcmu_crc8);
-
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- */
-struct brcmu_tlv *brcmu_parse_tlvs(void *buf, int buflen, uint key)
-{
- struct brcmu_tlv *elt;
- int totlen;
-
- elt = (struct brcmu_tlv *) buf;
- totlen = buflen;
-
- /* find tagged parameter */
- while (totlen >= 2) {
- int len = elt->len;
-
- /* validate remaining totlen */
- if ((elt->id == key) && (totlen >= (len + 2)))
- return elt;
-
- elt = (struct brcmu_tlv *) ((u8 *) elt + (len + 2));
- totlen -= (len + 2);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(brcmu_parse_tlvs);
-
-
-#if defined(BCMDBG)
-int
-brcmu_format_flags(const struct brcmu_bit_desc *bd, u32 flags, char *buf,
- int len)
-{
- int i;
- char *p = buf;
- char hexstr[16];
- int slen = 0, nlen = 0;
- u32 bit;
- const char *name;
-
- if (len < 2 || !buf)
- return 0;
-
- buf[0] = '\0';
-
- for (i = 0; flags != 0; i++) {
- bit = bd[i].bit;
- name = bd[i].name;
- if (bit == 0 && flags != 0) {
- /* print any unnamed bits */
- snprintf(hexstr, 16, "0x%X", flags);
- name = hexstr;
- flags = 0; /* exit loop */
- } else if ((flags & bit) == 0)
- continue;
- flags &= ~bit;
- nlen = strlen(name);
- slen += nlen;
- /* count btwn flag space */
- if (flags != 0)
- slen += 1;
- /* need NULL char as well */
- if (len <= slen)
- break;
- /* copy NULL char but don't count it */
- strncpy(p, name, nlen + 1);
- p += nlen;
- /* copy btwn flag space and NULL char */
- if (flags != 0)
- p += snprintf(p, 2, " ");
- len -= slen;
- }
-
- /* indicate the str was too short */
- if (flags != 0) {
- if (len < 2)
- p -= 2 - len; /* overwrite last char */
- p += snprintf(p, 2, ">");
- }
-
- return (int)(p - buf);
-}
-EXPORT_SYMBOL(brcmu_format_flags);
-
-/* print bytes formatted as hex to a string. return the resulting string length */
-int brcmu_format_hex(char *str, const void *bytes, int len)
-{
- int i;
- char *p = str;
- const u8 *src = (const u8 *)bytes;
-
- for (i = 0; i < len; i++) {
- p += snprintf(p, 3, "%02X", *src);
- src++;
- }
- return (int)(p - str);
-}
-EXPORT_SYMBOL(brcmu_format_hex);
-#endif /* defined(BCMDBG) */
-
-char *brcmu_chipname(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-EXPORT_SYMBOL(brcmu_chipname);
-
-uint brcmu_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
-{
- uint len;
-
- len = strlen(name) + 1;
-
- if ((len + datalen) > buflen)
- return 0;
-
- strncpy(buf, name, buflen);
-
- /* append data onto the end of the name string */
- memcpy(&buf[len], data, datalen);
- len += datalen;
-
- return len;
-}
-EXPORT_SYMBOL(brcmu_mkiovar);
-
-/* Quarter dBm units to mW
- * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
- * Table is offset so the last entry is largest mW value that fits in
- * a u16.
- */
-
-#define QDBM_OFFSET 153 /* Offset for first entry */
-#define QDBM_TABLE_LEN 40 /* Table size */
-
-/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
- * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
- */
-#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
-
-/* Largest mW value that will round down to the last table entry,
- * QDBM_OFFSET + QDBM_TABLE_LEN-1.
- * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) +
- * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
- */
-#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
-
-static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
-/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
-/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
-/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
-/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
-/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
-/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
-};
-
-u16 brcmu_qdbm_to_mw(u8 qdbm)
-{
- uint factor = 1;
- int idx = qdbm - QDBM_OFFSET;
-
- if (idx >= QDBM_TABLE_LEN) {
- /* clamp to max u16 mW value */
- return 0xFFFF;
- }
-
- /* scale the qdBm index up to the range of the table 0-40
- * where an offset of 40 qdBm equals a factor of 10 mW.
- */
- while (idx < 0) {
- idx += 40;
- factor *= 10;
- }
-
- /* return the mW value scaled down to the correct factor of 10,
- * adding in factor/2 to get proper rounding.
- */
- return (nqdBm_to_mW_map[idx] + factor / 2) / factor;
-}
-EXPORT_SYMBOL(brcmu_qdbm_to_mw);
-
-u8 brcmu_mw_to_qdbm(u16 mw)
-{
- u8 qdbm;
- int offset;
- uint mw_uint = mw;
- uint boundary;
-
- /* handle boundary case */
- if (mw_uint <= 1)
- return 0;
-
- offset = QDBM_OFFSET;
-
- /* move mw into the range of the table */
- while (mw_uint < QDBM_TABLE_LOW_BOUND) {
- mw_uint *= 10;
- offset -= 40;
- }
-
- for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) {
- boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] -
- nqdBm_to_mW_map[qdbm]) / 2;
- if (mw_uint < boundary)
- break;
- }
-
- qdbm += (u8) offset;
-
- return qdbm;
-}
-EXPORT_SYMBOL(brcmu_mw_to_qdbm);
-
-uint brcmu_bitcount(u8 *bitmap, uint length)
-{
- uint bitcount = 0, i;
- u8 tmp;
- for (i = 0; i < length; i++) {
- tmp = bitmap[i];
- while (tmp) {
- bitcount++;
- tmp &= (tmp - 1);
- }
- }
- return bitcount;
-}
-EXPORT_SYMBOL(brcmu_bitcount);
-
-/* Initialization of brcmu_strbuf structure */
-void brcmu_binit(struct brcmu_strbuf *b, char *buf, uint size)
-{
- b->origsize = b->size = size;
- b->origbuf = b->buf = buf;
-}
-EXPORT_SYMBOL(brcmu_binit);
-
-/* Buffer sprintf wrapper to guard against buffer overflow */
-int brcmu_bprintf(struct brcmu_strbuf *b, const char *fmt, ...)
-{
- va_list ap;
- int r;
-
- va_start(ap, fmt);
- r = vsnprintf(b->buf, b->size, fmt, ap);
-
- /* Non Ansi C99 compliant returns -1,
- * Ansi compliant return r >= b->size,
- * stdlib returns 0, handle all
- */
- if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
- b->size = 0;
- } else {
- b->size -= r;
- b->buf += r;
- }
-
- va_end(ap);
-
- return r;
-}
-EXPORT_SYMBOL(brcmu_bprintf);
diff --git a/drivers/staging/brcm80211/brcmutil/wifi.c b/drivers/staging/brcm80211/brcmutil/wifi.c
deleted file mode 100644
index b9ffe8682a2..00000000000
--- a/drivers/staging/brcm80211/brcmutil/wifi.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <brcmu_wifi.h>
-
-/*
- * Verify the chanspec is using a legal set of parameters, i.e. that the
- * chanspec specified a band, bw, ctl_sb and channel and that the
- * combination could be legal given any set of circumstances.
- * RETURNS: true is the chanspec is malformed, false if it looks good.
- */
-bool brcmu_chspec_malformed(chanspec_t chanspec)
-{
- /* must be 2G or 5G band */
- if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
- return true;
- /* must be 20 or 40 bandwidth */
- if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
- return true;
-
- /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */
- if (CHSPEC_IS20(chanspec)) {
- if (!CHSPEC_SB_NONE(chanspec))
- return true;
- } else {
- if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
- return true;
- }
-
- return false;
-}
-EXPORT_SYMBOL(brcmu_chspec_malformed);
-
-/*
- * This function returns the channel number that control traffic is being sent on, for legacy
- * channels this is just the channel number, for 40MHZ channels it is the upper or lowre 20MHZ
- * sideband depending on the chanspec selected
- */
-u8 brcmu_chspec_ctlchan(chanspec_t chspec)
-{
- u8 ctl_chan;
-
- /* Is there a sideband ? */
- if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
- return CHSPEC_CHANNEL(chspec);
- } else {
- /* we only support 40MHZ with sidebands */
- /* chanspec channel holds the centre frequency, use that and the
- * side band information to reconstruct the control channel number
- */
- if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
- /* control chan is the upper 20 MHZ SB of the 40MHZ channel */
- ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
- } else {
- /* control chan is the lower 20 MHZ SB of the 40MHZ channel */
- ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
- }
- }
-
- return ctl_chan;
-}
-EXPORT_SYMBOL(brcmu_chspec_ctlchan);
-
-/*
- * Return the channel number for a given frequency and base frequency.
- * The returned channel number is relative to the given base frequency.
- * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
- * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
- *
- * Frequency is specified in MHz.
- * The base frequency is specified as (start_factor * 500 kHz).
- * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
- * 2.4 GHz and 5 GHz bands.
- *
- * The returned channel will be in the range [1, 14] in the 2.4 GHz band
- * and [0, 200] otherwise.
- * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
- * frequency is not a 2.4 GHz channel, or if the frequency is not and even
- * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
- *
- * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
- */
-int brcmu_mhz2channel(uint freq, uint start_factor)
-{
- int ch = -1;
- uint base;
- int offset;
-
- /* take the default channel start frequency */
- if (start_factor == 0) {
- if (freq >= 2400 && freq <= 2500)
- start_factor = WF_CHAN_FACTOR_2_4_G;
- else if (freq >= 5000 && freq <= 6000)
- start_factor = WF_CHAN_FACTOR_5_G;
- }
-
- if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
- return 14;
-
- base = start_factor / 2;
-
- /* check that the frequency is in 1GHz range of the base */
- if ((freq < base) || (freq > base + 1000))
- return -1;
-
- offset = freq - base;
- ch = offset / 5;
-
- /* check that frequency is a 5MHz multiple from the base */
- if (offset != (ch * 5))
- return -1;
-
- /* restricted channel range check for 2.4G */
- if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
- return -1;
-
- return ch;
-}
-EXPORT_SYMBOL(brcmu_mhz2channel);
diff --git a/drivers/staging/brcm80211/include/brcmu_utils.h b/drivers/staging/brcm80211/include/brcmu_utils.h
deleted file mode 100644
index 2d54cc5f4b1..00000000000
--- a/drivers/staging/brcm80211/include/brcmu_utils.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMU_UTILS_H_
-#define _BRCMU_UTILS_H_
-
-#include <linux/skbuff.h>
-
-/* Buffer structure for collecting string-formatted data
-* using brcmu_bprintf() API.
-* Use brcmu_binit() to initialize before use
-*/
-
-struct brcmu_strbuf {
- char *buf; /* pointer to current position in origbuf */
- unsigned int size; /* current (residual) size in bytes */
- char *origbuf; /* unmodified pointer to orignal buffer */
- unsigned int origsize; /* unmodified orignal buffer size in bytes */
-};
-
-/*
- * Spin at most 'us' microseconds while 'exp' is true.
- * Caller should explicitly test 'exp' when this completes
- * and take appropriate error action if 'exp' is still true.
- */
-#define SPINWAIT(exp, us) { \
- uint countdown = (us) + 9; \
- while ((exp) && (countdown >= 10)) {\
- udelay(10); \
- countdown -= 10; \
- } \
-}
-
-/* osl multi-precedence packet queue */
-#ifndef PKTQ_LEN_DEFAULT
-#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
-#endif
-#ifndef PKTQ_MAX_PREC
-#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
-#endif
-
-struct pktq_prec {
- struct sk_buff *head; /* first packet to dequeue */
- struct sk_buff *tail; /* last packet to dequeue */
- u16 len; /* number of queued packets */
- u16 max; /* maximum number of queued packets */
-};
-
-/* multi-priority pkt queue */
-struct pktq {
- u16 num_prec; /* number of precedences in use */
- u16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */
- u16 max; /* total max packets */
- u16 len; /* total number of packets */
- /*
- * q array must be last since # of elements can be either
- * PKTQ_MAX_PREC or 1
- */
- struct pktq_prec q[PKTQ_MAX_PREC];
-};
-
-/* fn(pkt, arg). return true if pkt belongs to if */
-typedef bool(*ifpkt_cb_t) (struct sk_buff *, void *);
-
-/* operations on a specific precedence in packet queue */
-
-#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
-#define pktq_plen(pq, prec) ((pq)->q[prec].len)
-#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
-#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
-#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
-
-#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
-#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
-
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-
-/* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
-
-/* Empty the queue at particular precedence level */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
- bool dir, ifpkt_cb_t fn, void *arg);
-
-/* operations on a set of precedences in packet queue */
-
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
- int *prec_out);
-
-/* operations on packet queue as a whole */
-
-#define pktq_len(pq) ((int)(pq)->len)
-#define pktq_max(pq) ((int)(pq)->max)
-#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
-#define pktq_full(pq) ((pq)->len >= (pq)->max)
-#define pktq_empty(pq) ((pq)->len == 0)
-
-/* operations for single precedence queues */
-#define pktenq(pq, p) brcmu_pktq_penq(((struct pktq *)pq), 0, (p))
-#define pktenq_head(pq, p)\
- brcmu_pktq_penq_head(((struct pktq *)pq), 0, (p))
-#define pktdeq(pq) brcmu_pktq_pdeq(((struct pktq *)pq), 0)
-#define pktdeq_tail(pq) brcmu_pktq_pdeq_tail(((struct pktq *)pq), 0)
-#define pktqinit(pq, len) brcmu_pktq_init(((struct pktq *)pq), 1, len)
-
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
-/* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
- ifpkt_cb_t fn, void *arg);
-
-/* externs */
-/* packet */
-extern uint brcmu_pktfrombuf(struct sk_buff *p,
- uint offset, int len, unsigned char *buf);
-extern uint brcmu_pkttotlen(struct sk_buff *p);
-
-/* ethernet address */
-extern int brcmu_ether_atoe(char *p, u8 *ea);
-
-/* ip address */
-struct ipv4_addr;
-
-#ifdef BCMDBG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
-#else
-#define brcmu_prpkt(a, b)
-#endif /* BCMDBG */
-
-/* Support for sharing code across in-driver iovar implementations.
- * The intent is that a driver use this structure to map iovar names
- * to its (private) iovar identifiers, and the lookup function to
- * find the entry. Macros are provided to map ids and get/set actions
- * into a single number space for a switch statement.
- */
-
-/* iovar structure */
-struct brcmu_iovar {
- const char *name; /* name for lookup and display */
- u16 varid; /* id for switch */
- u16 flags; /* driver-specific flag bits */
- u16 type; /* base type of argument */
- u16 minlen; /* min length for buffer vars */
-};
-
-/* varid definitions are per-driver, may use these get/set bits */
-
-/* IOVar action bits for id mapping */
-#define IOV_GET 0 /* Get an iovar */
-#define IOV_SET 1 /* Set an iovar */
-
-/* Varid to actionid mapping */
-#define IOV_GVAL(id) ((id)*2)
-#define IOV_SVAL(id) (((id)*2)+IOV_SET)
-#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
-#define IOV_ID(actionid) (actionid >> 1)
-
-extern const struct
-brcmu_iovar *brcmu_iovar_lookup(const struct brcmu_iovar *table,
- const char *name);
-extern int brcmu_iovar_lencheck(const struct brcmu_iovar *table, void *arg,
- int len, bool set);
-
-/* Base type definitions */
-#define IOVT_VOID 0 /* no value (implictly set only) */
-#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */
-#define IOVT_INT8 2 /* integer values are range-checked */
-#define IOVT_UINT8 3 /* unsigned int 8 bits */
-#define IOVT_INT16 4 /* int 16 bits */
-#define IOVT_UINT16 5 /* unsigned int 16 bits */
-#define IOVT_INT32 6 /* int 32 bits */
-#define IOVT_UINT32 7 /* unsigned int 32 bits */
-#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */
-#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
-
-/* ** driver/apps-shared section ** */
-
-#define BCME_STRLEN 64 /* Max string length for BCM errors */
-
-#ifndef ABS
-#define ABS(a) (((a) < 0) ? -(a) : (a))
-#endif /* ABS */
-
-#define CEIL(x, y) (((x) + ((y)-1)) / (y))
-#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0)
-
-/* map physical to virtual I/O */
-#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), \
- (unsigned long)(size))
-
-/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
-#define PKTBUFSZ 2048
-
-#define OSL_SYSUPTIME() ((u32)jiffies * (1000 / HZ))
-
-#ifndef setbit
-#ifndef NBBY /* the BSD family defines NBBY */
-#define NBBY 8 /* 8 bits per byte */
-#endif /* #ifndef NBBY */
-#define setbit(a, i) (((u8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
-#define clrbit(a, i) (((u8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
-#define isset(a, i) (((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
-#define isclr(a, i) ((((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
-#endif /* setbit */
-
-#define NBITS(type) (sizeof(type) * 8)
-#define NBITVAL(nbits) (1 << (nbits))
-#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
-#define NBITMASK(nbits) MAXBITVAL(nbits)
-#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
-
-/* basic mux operation - can be optimized on several architectures */
-#define MUX(pred, true, false) ((pred) ? (true) : (false))
-
-/* modulo inc/dec - assumes x E [0, bound - 1] */
-#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
-#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
-
-/* modulo inc/dec, bound = 2^k */
-#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
-#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
-
-/* modulo add/sub - assumes x, y E [0, bound - 1] */
-#define MODADD(x, y, bound) \
- MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
-#define MODSUB(x, y, bound) \
- MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
-
-/* module add/sub, bound = 2^k */
-#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
-#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
-
-/* crc defines */
-#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */
-#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */
-#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */
-#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */
-
-/* brcmu_format_flags() bit description structure */
-struct brcmu_bit_desc {
- u32 bit;
- const char *name;
-};
-
-/* tag_ID/length/value_buffer tuple */
-struct brcmu_tlv {
- u8 id;
- u8 len;
- u8 data[1];
-};
-
-#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */
-
-/* externs */
-/* crc */
-extern u8 brcmu_crc8(u8 *p, uint nbytes, u8 crc);
-
-/* format/print */
-#if defined(BCMDBG)
-extern int brcmu_format_flags(const struct brcmu_bit_desc *bd, u32 flags,
- char *buf, int len);
-extern int brcmu_format_hex(char *str, const void *bytes, int len);
-#endif
-
-extern char *brcmu_chipname(uint chipid, char *buf, uint len);
-
-extern struct brcmu_tlv *brcmu_parse_tlvs(void *buf, int buflen,
- uint key);
-
-/* power conversion */
-extern u16 brcmu_qdbm_to_mw(u8 qdbm);
-extern u8 brcmu_mw_to_qdbm(u16 mw);
-
-extern void brcmu_binit(struct brcmu_strbuf *b, char *buf, uint size);
-extern int brcmu_bprintf(struct brcmu_strbuf *b, const char *fmt, ...);
-
-extern uint brcmu_mkiovar(char *name, char *data, uint datalen,
- char *buf, uint len);
-extern uint brcmu_bitcount(u8 *bitmap, uint bytelength);
-
-#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/staging/brcm80211/include/brcmu_wifi.h b/drivers/staging/brcm80211/include/brcmu_wifi.h
deleted file mode 100644
index fde592bd917..00000000000
--- a/drivers/staging/brcm80211/include/brcmu_wifi.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMU_WIFI_H_
-#define _BRCMU_WIFI_H_
-
-#include <linux/if_ether.h> /* for ETH_ALEN */
-#include <linux/ieee80211.h> /* for WLAN_PMKID_LEN */
-
-/* A chanspec holds the channel number, band, bandwidth and control sideband */
-typedef u16 chanspec_t;
-
-/* channel defines */
-#define CH_UPPER_SB 0x01
-#define CH_LOWER_SB 0x02
-#define CH_EWA_VALID 0x04
-#define CH_20MHZ_APART 4
-#define CH_10MHZ_APART 2
-#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
-#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
-#define BRCM_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL /* legacy define */
-#define MAXCHANNEL 224 /* max # supported channels. The max channel no is 216,
- * this is that + 1 rounded up to a multiple of NBBY (8).
- * DO NOT MAKE it > 255: channels are u8's all over
- */
-
-#define WL_CHANSPEC_CHAN_MASK 0x00ff
-#define WL_CHANSPEC_CHAN_SHIFT 0
-
-#define WL_CHANSPEC_CTL_SB_MASK 0x0300
-#define WL_CHANSPEC_CTL_SB_SHIFT 8
-#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
-#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
-#define WL_CHANSPEC_CTL_SB_NONE 0x0300
-
-#define WL_CHANSPEC_BW_MASK 0x0C00
-#define WL_CHANSPEC_BW_SHIFT 10
-#define WL_CHANSPEC_BW_10 0x0400
-#define WL_CHANSPEC_BW_20 0x0800
-#define WL_CHANSPEC_BW_40 0x0C00
-
-#define WL_CHANSPEC_BAND_MASK 0xf000
-#define WL_CHANSPEC_BAND_SHIFT 12
-#define WL_CHANSPEC_BAND_5G 0x1000
-#define WL_CHANSPEC_BAND_2G 0x2000
-#define INVCHANSPEC 255
-
-/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
-#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
-#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
-
-/* channel defines */
-#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
-#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
- ((channel) + CH_10MHZ_APART) : 0)
-#define CHSPEC_BANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : \
- BAND_2G_INDEX)
-#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
- WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
- WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
-#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
- ((channel) + CH_20MHZ_APART) : 0)
-#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
- ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
- ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
- WL_CHANSPEC_BAND_5G))
-#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
-#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
-
-#ifdef WL11N_20MHZONLY
-
-#define CHSPEC_CTL_SB(chspec) WL_CHANSPEC_CTL_SB_NONE
-#define CHSPEC_BW(chspec) WL_CHANSPEC_BW_20
-#define CHSPEC_IS10(chspec) 0
-#define CHSPEC_IS20(chspec) 1
-#ifndef CHSPEC_IS40
-#define CHSPEC_IS40(chspec) 0
-#endif
-
-#else /* !WL11N_20MHZONLY */
-
-#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
-#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
-#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
-#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
-#ifndef CHSPEC_IS40
-#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
-#endif
-
-#endif /* !WL11N_20MHZONLY */
-
-#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
-#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
-#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
-#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
-#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
-#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
- (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
- (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
-#define CHSPEC2BAND(chspec) (CHSPEC_IS5G(chspec) ? BRCM_BAND_5G : BRCM_BAND_2G)
-
-#define CHANSPEC_STR_LEN 8
-
-/* defined rate in 500kbps */
-#define BRCM_MAXRATE 108 /* in 500kbps units */
-#define BRCM_RATE_1M 2 /* in 500kbps units */
-#define BRCM_RATE_2M 4 /* in 500kbps units */
-#define BRCM_RATE_5M5 11 /* in 500kbps units */
-#define BRCM_RATE_11M 22 /* in 500kbps units */
-#define BRCM_RATE_6M 12 /* in 500kbps units */
-#define BRCM_RATE_9M 18 /* in 500kbps units */
-#define BRCM_RATE_12M 24 /* in 500kbps units */
-#define BRCM_RATE_18M 36 /* in 500kbps units */
-#define BRCM_RATE_24M 48 /* in 500kbps units */
-#define BRCM_RATE_36M 72 /* in 500kbps units */
-#define BRCM_RATE_48M 96 /* in 500kbps units */
-#define BRCM_RATE_54M 108 /* in 500kbps units */
-
-#define BRCM_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
-
-#define MCSSET_LEN 16
-
-#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
-
-/*
- * Verify the chanspec is using a legal set of parameters, i.e. that the
- * chanspec specified a band, bw, ctl_sb and channel and that the
- * combination could be legal given any set of circumstances.
- * RETURNS: true is the chanspec is malformed, false if it looks good.
- */
-extern bool brcmu_chspec_malformed(chanspec_t chanspec);
-
-/*
- * This function returns the channel number that control traffic is being sent on, for legacy
- * channels this is just the channel number, for 40MHZ channels it is the upper or lowre 20MHZ
- * sideband depending on the chanspec selected
- */
-extern u8 brcmu_chspec_ctlchan(chanspec_t chspec);
-
-/*
- * Return the channel number for a given frequency and base frequency.
- * The returned channel number is relative to the given base frequency.
- * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
- * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
- *
- * Frequency is specified in MHz.
- * The base frequency is specified as (start_factor * 500 kHz).
- * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
- * 2.4 GHz and 5 GHz bands.
- *
- * The returned channel will be in the range [1, 14] in the 2.4 GHz band
- * and [0, 200] otherwise.
- * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
- * frequency is not a 2.4 GHz channel, or if the frequency is not and even
- * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
- *
- * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
- */
-extern int brcmu_mhz2channel(uint freq, uint start_factor);
-
-/* Enumerate crypto algorithms */
-#define CRYPTO_ALGO_OFF 0
-#define CRYPTO_ALGO_WEP1 1
-#define CRYPTO_ALGO_TKIP 2
-#define CRYPTO_ALGO_WEP128 3
-#define CRYPTO_ALGO_AES_CCM 4
-#define CRYPTO_ALGO_AES_RESERVED1 5
-#define CRYPTO_ALGO_AES_RESERVED2 6
-#define CRYPTO_ALGO_NALG 7
-
-/* wireless security bitvec */
-#define WEP_ENABLED 0x0001
-#define TKIP_ENABLED 0x0002
-#define AES_ENABLED 0x0004
-#define WSEC_SWFLAG 0x0008
-#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
-
-/* WPA authentication mode bitvec */
-#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
-#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
-#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
-#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
-#define WPA_AUTH_RESERVED1 0x0008
-#define WPA_AUTH_RESERVED2 0x0010
- /* #define WPA_AUTH_8021X 0x0020 *//* 802.1x, reserved */
-#define WPA2_AUTH_RESERVED1 0x0020
-#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
-#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
-#define WPA2_AUTH_RESERVED3 0x0200
-#define WPA2_AUTH_RESERVED4 0x0400
-#define WPA2_AUTH_RESERVED5 0x0800
-
-/* pmkid */
-#define MAXPMKID 16
-
-#define DOT11_DEFAULT_RTS_LEN 2347
-#define DOT11_DEFAULT_FRAG_LEN 2346
-
-#define DOT11_ICV_AES_LEN 8
-#define DOT11_QOS_LEN 2
-#define DOT11_IV_MAX_LEN 8
-#define DOT11_A4_HDR_LEN 30
-
-#define HT_CAP_RX_STBC_NO 0x0
-#define HT_CAP_RX_STBC_ONE_STREAM 0x1
-
-typedef struct _pmkid {
- u8 BSSID[ETH_ALEN];
- u8 PMKID[WLAN_PMKID_LEN];
-} pmkid_t;
-
-typedef struct _pmkid_list {
- u32 npmkid;
- pmkid_t pmkid[1];
-} pmkid_list_t;
-
-typedef struct _pmkid_cand {
- u8 BSSID[ETH_ALEN];
- u8 preauth;
-} pmkid_cand_t;
-
-typedef struct _pmkid_cand_list {
- u32 npmkid_cand;
- pmkid_cand_t pmkid_cand[1];
-} pmkid_cand_list_t;
-
-typedef u8 ac_bitmap_t;
-
-#endif /* _BRCMU_WIFI_H_ */
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 5e65dde5845..4c77e508066 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1244,11 +1244,10 @@ config COMEDI_DT9812
called dt9812.
config COMEDI_USBDUX
- tristate "ITL USBDUX support"
+ tristate "ITL USB-DUX-D support"
default N
---help---
- Enable support for the University of Stirling USB DAQ and INCITE
- Technology Limited driver
+ Enable support for the Incite Technology Ltd USB-DUX-D Board
To compile this driver as a module, choose M here: the module will be
called usbdux.
@@ -1258,12 +1257,21 @@ config COMEDI_USBDUXFAST
select COMEDI_FC
default N
---help---
- Enable support for the University of Stirling USB-DUXfast and INCITE
- Technology Limited driver
+ Enable support for the Incite Technology Ltd USB-DUXfast Board
To compile this driver as a module, choose M here: the module will be
called usbduxfast.
+config COMEDI_USBDUXSIGMA
+ tristate "ITL USB-DUXsigma support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for the Incite Technology Ltd USB-DUXsigma Board
+
+ To compile this driver as a module, choose M here: the module will be
+ called usbduxsigma.
+
config COMEDI_VMK80XX
tristate "Velleman VM110/VM140 USB Board support"
default N
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e90e3cceb5f..21d8c1c16cd 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -101,7 +101,7 @@ static int do_insn_ioctl(struct comedi_device *dev,
static int do_poll_ioctl(struct comedi_device *dev, unsigned int subd,
void *file);
-extern void do_become_nonbusy(struct comedi_device *dev,
+static void do_become_nonbusy(struct comedi_device *dev,
struct comedi_subdevice *s);
static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 33bf1f5aad4..170da609195 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_COMEDI_QUATECH_DAQP_CS) += quatech_daqp_cs.o
obj-$(CONFIG_COMEDI_DT9812) += dt9812.o
obj-$(CONFIG_COMEDI_USBDUX) += usbdux.o
obj-$(CONFIG_COMEDI_USBDUXFAST) += usbduxfast.o
+obj-$(CONFIG_COMEDI_USBDUXSIGMA) += usbduxsigma.o
obj-$(CONFIG_COMEDI_VMK80XX) += vmk80xx.o
# Comedi NI drivers
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index d23799be7ce..69334f6f64e 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -422,7 +422,7 @@ struct pci_dio_private {
unsigned short IDIFiltrHigh[8]; /* IDI's filter value high signal */
};
-static struct pci_dio_private *pci_priv = NULL; /* list of allocated cards */
+static struct pci_dio_private *pci_priv; /* list of allocated cards */
#define devpriv ((struct pci_dio_private *)dev->private)
#define this_board ((const struct dio_boardtype *)dev->board_ptr)
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index e0ac825ea58..11cdaf2a5aa 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -343,7 +343,7 @@ static int c6xdigio_pwmo_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- printk("c6xdigio_pwmo_insn_read %x\n", insn->n);
+ printk(KERN_DEBUG "c6xdigio_pwmo_insn_read %x\n", insn->n);
return insn->n;
}
@@ -439,9 +439,9 @@ static int c6xdigio_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
iobase = it->options[0];
- printk("comedi%d: c6xdigio: 0x%04lx\n", dev->minor, iobase);
+ printk(KERN_DEBUG "comedi%d: c6xdigio: 0x%04lx\n", dev->minor, iobase);
if (!request_region(iobase, C6XDIGIO_SIZE, "c6xdigio")) {
- printk("comedi%d: I/O port conflict\n", dev->minor);
+ printk(KERN_ERR "comedi%d: I/O port conflict\n", dev->minor);
return -EIO;
}
dev->iobase = iobase;
@@ -456,9 +456,10 @@ static int c6xdigio_attach(struct comedi_device *dev,
irq = it->options[1];
if (irq > 0)
- printk("comedi%d: irq = %u ignored\n", dev->minor, irq);
+ printk(KERN_DEBUG "comedi%d: irq = %u ignored\n",
+ dev->minor, irq);
else if (irq == 0)
- printk("comedi%d: no irq\n", dev->minor);
+ printk(KERN_DEBUG "comedi%d: no irq\n", dev->minor);
s = dev->subdevices + 0;
/* pwm output subdevice */
@@ -503,7 +504,7 @@ static int c6xdigio_detach(struct comedi_device *dev)
{
/* board_halt(dev); may not need this */
- printk("comedi%d: c6xdigio: remove\n", dev->minor);
+ printk(KERN_DEBUG "comedi%d: c6xdigio: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, C6XDIGIO_SIZE);
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 8a1b8a7fa15..e171c56112d 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -370,7 +370,8 @@ static int das16cs_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /* step 2: make sure trigger sources are unique and
+ * mutually compatible */
/* note that mutual compatibility is not an issue here */
if (cmd->scan_begin_src != TRIG_TIMER &&
@@ -508,7 +509,7 @@ static int das16cs_ao_winsn(struct comedi_device *dev,
else
status1 |= 0x0008;
-/* printk("0x%04x\n",status1);*/
+/* printk("0x%04x\n",status1);*/
outw(status1, dev->iobase + 4);
udelay(1);
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 3330b3d53e8..647c228abfb 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -268,8 +268,9 @@ static const struct ni_board_struct ni_boards[] = {
}
};
-static const int ni_irqpin[] =
- { -1, -1, -1, 0, 1, 2, -1, 3, -1, -1, 4, 5, 6, -1, -1, 7 };
+static const int ni_irqpin[] = {
+ -1, -1, -1, 0, 1, 2, -1, 3, -1, -1, 4, 5, 6, -1, -1, 7
+};
#define interrupt_pin(a) (ni_irqpin[(a)])
@@ -279,7 +280,10 @@ static const int ni_irqpin[] =
struct ni_private {
struct pnp_dev *isapnp_dev;
- NI_PRIVATE_COMMON};
+ NI_PRIVATE_COMMON
+
+};
+
#define devpriv ((struct ni_private *)dev->private)
/* How we access registers */
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 7611def97d0..721b2be2250 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -244,7 +244,7 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
#ifdef CONFIG_ISA_DMA_API
static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
#endif
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
#endif
static int labpc_dio_mem_callback(int dir, int port, int data,
@@ -461,7 +461,7 @@ static const struct labpc_board_struct labpc_boards[] = {
.ai_scan_up = 0,
.memory_mapped_io = 0,
},
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
{
.name = "pci-1200",
.device_id = 0x161,
@@ -505,14 +505,14 @@ static struct comedi_driver driver_labpc = {
.offset = sizeof(struct labpc_board_struct),
};
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x161)},
{0}
};
MODULE_DEVICE_TABLE(pci, labpc_pci_table);
-#endif /* CONFIG_COMEDI_PCI */
+#endif /* CONFIG_COMEDI_PCI_DRIVERS */
static inline int labpc_counter_load(struct comedi_device *dev,
unsigned long base_address,
@@ -722,7 +722,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long iobase = 0;
unsigned int irq = 0;
unsigned int dma_chan = 0;
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
int retval;
#endif
@@ -744,7 +744,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
break;
case pci_bustype:
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
retval = labpc_find_device(dev, it->options[0], it->options[1]);
if (retval < 0)
return retval;
@@ -774,7 +774,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
/* adapted from ni_pcimio for finding mite based boards (pc-1200) */
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
static int labpc_find_device(struct comedi_device *dev, int bus, int slot)
{
struct mite_struct *mite;
@@ -822,7 +822,7 @@ int labpc_common_detach(struct comedi_device *dev)
free_irq(dev->irq, dev);
if (thisboard->bustype == isa_bustype && dev->iobase)
release_region(dev->iobase, LABPC_SIZE);
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
if (devpriv->mite)
mite_unsetup(devpriv->mite);
#endif
@@ -2137,7 +2137,7 @@ static void write_caldac(struct comedi_device *dev, unsigned int channel,
devpriv->write_byte(devpriv->command5_bits, dev->iobase + COMMAND5_REG);
}
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
static int __devinit driver_labpc_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 8dd3a01d48d..045a4c00f34 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -688,7 +688,7 @@ static void ni_pcidio_print_status(unsigned int flags)
static void debug_int(struct comedi_device *dev)
{
int a, b;
- static int n_int = 0;
+ static int n_int;
struct timeval tv;
do_gettimeofday(&tv);
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index f2e88e57d55..3ad04aaa1e3 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -371,7 +371,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = it->options[0];
irq[0] = it->options[1];
- printk("comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
+ printk(KERN_INFO "comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
iobase);
dev->iobase = iobase;
@@ -379,7 +379,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!iobase || !request_region(iobase,
thisboard->total_iosize,
driver.driver_name)) {
- printk("I/O port conflict\n");
+ printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
@@ -394,7 +394,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmmio_private)) < 0) {
- printk("cannot allocate private data structure\n");
+ printk(KERN_ERR "cannot allocate private data structure\n");
return -ENOMEM;
}
@@ -417,7 +417,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk("cannot allocate subdevice private data structures\n");
+ printk(KERN_ERR "cannot allocate subdevice private data structures\n");
return -ENOMEM;
}
/*
@@ -427,7 +427,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* Allocate 1 AI + 1 AO + 2 DIO subdevs (24 lines per DIO)
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk("cannot allocate subdevice data structures\n");
+ printk(KERN_ERR "cannot allocate subdevice data structures\n");
return -ENOMEM;
}
@@ -557,14 +557,14 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
*/
if (irq[0]) {
- printk("irq: %u ", irq[0]);
+ printk(KERN_DEBUG "irq: %u ", irq[0]);
if (thisboard->dio_num_asics == 2 && irq[1])
- printk("second ASIC irq: %u ", irq[1]);
+ printk(KERN_DEBUG "second ASIC irq: %u ", irq[1]);
} else {
- printk("(IRQ mode disabled) ");
+ printk(KERN_INFO "(IRQ mode disabled) ");
}
- printk("attached\n");
+ printk(KERN_INFO "attached\n");
return 1;
}
@@ -581,7 +581,7 @@ static int pcmmio_detach(struct comedi_device *dev)
{
int i;
- printk("comedi%d: %s: remove\n", dev->minor, driver.driver_name);
+ printk(KERN_INFO "comedi%d: %s: remove\n", dev->minor, driver.driver_name);
if (dev->iobase)
release_region(dev->iobase, thisboard->total_iosize);
@@ -622,7 +622,7 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("write mask: %08x data: %08x\n", data[0], data[1]);
+ printk(KERN_DEBUG "write mask: %08x data: %08x\n", data[0], data[1]);
#endif
s->state = 0;
@@ -644,9 +644,9 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
printk
- ("byte %d wmb %02x db %02x offset %02d io %04x, data_in %02x ",
- byte_no, (unsigned)write_mask_byte, (unsigned)data_byte,
- offset, ioaddr, (unsigned)byte);
+ (KERN_DEBUG "byte %d wmb %02x db %02x offset %02d io %04x,"
+ " data_in %02x ", byte_no, (unsigned)write_mask_byte,
+ (unsigned)data_byte, offset, ioaddr, (unsigned)byte);
#endif
if (write_mask_byte) {
@@ -663,7 +663,7 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("data_out_byte %02x\n", (unsigned)byte);
+ printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
@@ -674,7 +674,7 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("s->state %08x data_out %08x\n", s->state, data[1]);
+ printk(KERN_DEBUG "s->state %08x data_out %08x\n", s->state, data[1]);
#endif
return 2;
@@ -886,7 +886,7 @@ static irqreturn_t interrupt_pcmmio(int irq, void *d)
* with commands..
*/
printk
- ("PCMMIO DEBUG: got edge detect interrupt %d asic %d which_chans: %06x\n",
+ (KERN_DEBUG "got edge detect interrupt %d asic %d which_chans: %06x\n",
irq, asic, triggered);
for (s = dev->subdevices + 2;
s < dev->subdevices + dev->n_subdevices;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
new file mode 100644
index 00000000000..a8fea9a9173
--- /dev/null
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -0,0 +1,2880 @@
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
+#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
+/*
+ comedi/drivers/usbdux.c
+ Copyright (C) 2011 Bernd Porr, Bernd.Porr@f2s.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+/*
+Driver: usbduxsigma
+Description: University of Stirling USB DAQ & INCITE Technology Limited
+Devices: [ITL] USB-DUX (usbduxsigma.o)
+Author: Bernd Porr <BerndPorr@f2s.com>
+Updated: 21 Jul 2011
+Status: testing
+*/
+/*
+ * I must give credit here to Chris Baugher who
+ * wrote the driver for AT-MIO-16d. I used some parts of this
+ * driver. I also must give credits to David Brownell
+ * who supported me with the USB development.
+ *
+ * Note: the raw data from the A/D converter is 24 bit big endian
+ * anything else is little endian to/from the dux board
+ *
+ *
+ * Revision history:
+ * 0.1: inital version
+ * 0.2: all basic functions implemented, digital I/O only for one port
+ * 0.3: proper vendor ID and driver name
+ * 0.4: fixed D/A voltage range
+ * 0.5: various bug fixes, health check at startup
+ */
+
+/* generates loads of debug info */
+/* #define NOISY_DUX_DEBUGBUG */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/fcntl.h>
+#include <linux/compiler.h>
+#include <linux/firmware.h>
+#include "comedi_fc.h"
+#include "../comedidev.h"
+
+#define BOARDNAME "usbduxsigma"
+
+/* timeout for the USB-transfer in ms*/
+#define BULK_TIMEOUT 1000
+
+/* constants for "firmware" upload and download */
+#define USBDUXSUB_FIRMWARE 0xA0
+#define VENDOR_DIR_IN 0xC0
+#define VENDOR_DIR_OUT 0x40
+
+/* internal addresses of the 8051 processor */
+#define USBDUXSUB_CPUCS 0xE600
+
+/*
+ * the minor device number, major is 180 only for debugging purposes and to
+ * upload special firmware (programming the eeprom etc) which is not
+ * compatible with the comedi framwork
+ */
+#define USBDUXSUB_MINOR 32
+
+/* max lenghth of the transfer-buffer for software upload */
+#define TB_LEN 0x2000
+
+/* Input endpoint number: ISO/IRQ */
+#define ISOINEP 6
+
+/* Output endpoint number: ISO/IRQ */
+#define ISOOUTEP 2
+
+/* This EP sends DUX commands to USBDUX */
+#define COMMAND_OUT_EP 1
+
+/* This EP receives the DUX commands from USBDUX */
+#define COMMAND_IN_EP 8
+
+/* Output endpoint for PWM */
+#define PWM_EP 4
+
+/* 300Hz max frequ under PWM */
+#define MIN_PWM_PERIOD ((long)(1E9/300))
+
+/* Default PWM frequency */
+#define PWM_DEFAULT_PERIOD ((long)(1E9/100))
+
+/* Number of channels (16 AD and offset)*/
+#define NUMCHANNELS 16
+
+/* Size of one A/D value */
+#define SIZEADIN ((sizeof(int32_t)))
+
+/*
+ * Size of the async input-buffer IN BYTES, the DIO state is transmitted
+ * as the first byte.
+ */
+#define SIZEINBUF (((NUMCHANNELS+1)*SIZEADIN))
+
+/* 16 bytes. */
+#define SIZEINSNBUF 16
+
+/* Number of DA channels */
+#define NUMOUTCHANNELS 8
+
+/* size of one value for the D/A converter: channel and value */
+#define SIZEDAOUT ((sizeof(uint8_t)+sizeof(int16_t)))
+
+/*
+ * Size of the output-buffer in bytes
+ * Actually only the first 4 triplets are used but for the
+ * high speed mode we need to pad it to 8 (microframes).
+ */
+#define SIZEOUTBUF ((8*SIZEDAOUT))
+
+/*
+ * Size of the buffer for the dux commands: just now max size is determined
+ * by the analogue out + command byte + panic bytes...
+ */
+#define SIZEOFDUXBUFFER ((8*SIZEDAOUT+2))
+
+/* Number of in-URBs which receive the data: min=2 */
+#define NUMOFINBUFFERSFULL 5
+
+/* Number of out-URBs which send the data: min=2 */
+#define NUMOFOUTBUFFERSFULL 5
+
+/* Number of in-URBs which receive the data: min=5 */
+/* must have more buffers due to buggy USB ctr */
+#define NUMOFINBUFFERSHIGH 10
+
+/* Number of out-URBs which send the data: min=5 */
+/* must have more buffers due to buggy USB ctr */
+#define NUMOFOUTBUFFERSHIGH 10
+
+/* Total number of usbdux devices */
+#define NUMUSBDUX 16
+
+/* Analogue in subdevice */
+#define SUBDEV_AD 0
+
+/* Analogue out subdevice */
+#define SUBDEV_DA 1
+
+/* Digital I/O */
+#define SUBDEV_DIO 2
+
+/* timer aka pwm output */
+#define SUBDEV_PWM 3
+
+/* number of retries to get the right dux command */
+#define RETRIES 10
+
+/**************************************************/
+/* comedi constants */
+static const struct comedi_lrange range_usbdux_ai_range = { 1, {
+ BIP_RANGE
+ (2.65)
+ }
+};
+
+static const struct comedi_lrange range_usbdux_ao_range = { 1, {
+ UNI_RANGE
+ (2.5),
+ }
+};
+
+/*
+ * private structure of one subdevice
+ */
+
+/*
+ * This is the structure which holds all the data of
+ * this driver one sub device just now: A/D
+ */
+struct usbduxsub {
+ /* attached? */
+ int attached;
+ /* is it associated with a subdevice? */
+ int probed;
+ /* pointer to the usb-device */
+ struct usb_device *usbdev;
+ /* actual number of in-buffers */
+ int numOfInBuffers;
+ /* actual number of out-buffers */
+ int numOfOutBuffers;
+ /* ISO-transfer handling: buffers */
+ struct urb **urbIn;
+ struct urb **urbOut;
+ /* pwm-transfer handling */
+ struct urb *urbPwm;
+ /* PWM period */
+ unsigned int pwmPeriod;
+ /* PWM internal delay for the GPIF in the FX2 */
+ uint8_t pwmDelay;
+ /* size of the PWM buffer which holds the bit pattern */
+ int sizePwmBuf;
+ /* input buffer for the ISO-transfer */
+ int32_t *inBuffer;
+ /* input buffer for single insn */
+ int8_t *insnBuffer;
+ /* output buffer for single DA outputs */
+ int16_t *outBuffer;
+ /* interface number */
+ int ifnum;
+ /* interface structure in 2.6 */
+ struct usb_interface *interface;
+ /* comedi device for the interrupt context */
+ struct comedi_device *comedidev;
+ /* is it USB_SPEED_HIGH or not? */
+ short int high_speed;
+ /* asynchronous command is running */
+ short int ai_cmd_running;
+ short int ao_cmd_running;
+ /* pwm is running */
+ short int pwm_cmd_running;
+ /* continous aquisition */
+ short int ai_continous;
+ short int ao_continous;
+ /* number of samples to acquire */
+ int ai_sample_count;
+ int ao_sample_count;
+ /* time between samples in units of the timer */
+ unsigned int ai_timer;
+ unsigned int ao_timer;
+ /* counter between aquisitions */
+ unsigned int ai_counter;
+ unsigned int ao_counter;
+ /* interval in frames/uframes */
+ unsigned int ai_interval;
+ /* D/A commands */
+ uint8_t *dac_commands;
+ /* commands */
+ uint8_t *dux_commands;
+ struct semaphore sem;
+};
+
+/*
+ * The pointer to the private usb-data of the driver is also the private data
+ * for the comedi-device. This has to be global as the usb subsystem needs
+ * global variables. The other reason is that this structure must be there
+ * _before_ any comedi command is issued. The usb subsystem must be initialised
+ * before comedi can access it.
+ */
+static struct usbduxsub usbduxsub[NUMUSBDUX];
+
+static DEFINE_SEMAPHORE(start_stop_sem);
+
+/*
+ * Stops the data acquision
+ * It should be safe to call this function from any context
+ */
+static int usbduxsub_unlink_InURBs(struct usbduxsub *usbduxsub_tmp)
+{
+ int i = 0;
+ int err = 0;
+
+ if (usbduxsub_tmp && usbduxsub_tmp->urbIn) {
+ for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) {
+ if (usbduxsub_tmp->urbIn[i]) {
+ /* We wait here until all transfers have been
+ * cancelled. */
+ usb_kill_urb(usbduxsub_tmp->urbIn[i]);
+ }
+ dev_dbg(&usbduxsub_tmp->interface->dev,
+ "comedi: usbdux: unlinked InURB %d, err=%d\n",
+ i, err);
+ }
+ }
+ return err;
+}
+
+/*
+ * This will stop a running acquisition operation
+ * Is called from within this driver from both the
+ * interrupt context and from comedi
+ */
+static int usbdux_ai_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+{
+ int ret = 0;
+
+ if (!this_usbduxsub) {
+ pr_err("comedi?: usbdux_ai_stop: this_usbduxsub=NULL!\n");
+ return -EFAULT;
+ }
+ dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_stop\n");
+
+ if (do_unlink) {
+ /* stop aquistion */
+ ret = usbduxsub_unlink_InURBs(this_usbduxsub);
+ }
+
+ this_usbduxsub->ai_cmd_running = 0;
+
+ return ret;
+}
+
+/*
+ * This will cancel a running acquisition operation.
+ * This is called by comedi but never from inside the driver.
+ */
+static int usbdux_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct usbduxsub *this_usbduxsub;
+ int res = 0;
+
+ /* force unlink of all urbs */
+ this_usbduxsub = dev->private;
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_cancel\n");
+
+ /* prevent other CPUs from submitting new commands just now */
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ /* unlink only if the urb really has been submitted */
+ res = usbdux_ai_stop(this_usbduxsub, this_usbduxsub->ai_cmd_running);
+ up(&this_usbduxsub->sem);
+ return res;
+}
+
+/* analogue IN - interrupt service routine */
+static void usbduxsub_ai_IsocIrq(struct urb *urb)
+{
+ int i, err, n;
+ struct usbduxsub *this_usbduxsub;
+ struct comedi_device *this_comedidev;
+ struct comedi_subdevice *s;
+ int32_t v;
+ unsigned int dio_state;
+
+ /* the context variable points to the comedi device */
+ this_comedidev = urb->context;
+ /* the private structure of the subdevice is struct usbduxsub */
+ this_usbduxsub = this_comedidev->private;
+ /* subdevice which is the AD converter */
+ s = this_comedidev->subdevices + SUBDEV_AD;
+
+ /* first we test if something unusual has just happened */
+ switch (urb->status) {
+ case 0:
+ /* copy the result in the transfer buffer */
+ memcpy(this_usbduxsub->inBuffer,
+ urb->transfer_buffer, SIZEINBUF);
+ break;
+ case -EILSEQ:
+ /* error in the ISOchronous data */
+ /* we don't copy the data into the transfer buffer */
+ /* and recycle the last data byte */
+ dev_dbg(&urb->dev->dev,
+ "comedi%d: usbdux: CRC error in ISO IN stream.\n",
+ this_usbduxsub->comedidev->minor);
+
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /* happens after an unlink command */
+ if (this_usbduxsub->ai_cmd_running) {
+ /* we are still running a command */
+ /* tell this comedi */
+ s->async->events |= COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
+ comedi_event(this_usbduxsub->comedidev, s);
+ /* stop the transfer w/o unlink */
+ usbdux_ai_stop(this_usbduxsub, 0);
+ }
+ return;
+
+ default:
+ /* a real error on the bus */
+ /* pass error to comedi if we are really running a command */
+ if (this_usbduxsub->ai_cmd_running) {
+ dev_err(&urb->dev->dev,
+ "Non-zero urb status received in ai intr "
+ "context: %d\n", urb->status);
+ s->async->events |= COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
+ comedi_event(this_usbduxsub->comedidev, s);
+ /* don't do an unlink here */
+ usbdux_ai_stop(this_usbduxsub, 0);
+ }
+ return;
+ }
+
+ /*
+ * at this point we are reasonably sure that nothing dodgy has happened
+ * are we running a command?
+ */
+ if (unlikely((!(this_usbduxsub->ai_cmd_running)))) {
+ /*
+ * not running a command, do not continue execution if no
+ * asynchronous command is running in particular not resubmit
+ */
+ return;
+ }
+
+ urb->dev = this_usbduxsub->usbdev;
+
+ /* resubmit the urb */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err < 0)) {
+ dev_err(&urb->dev->dev,
+ "comedi_: urb resubmit failed in int-context!"
+ "err=%d\n",
+ err);
+ if (err == -EL2NSYNC)
+ dev_err(&urb->dev->dev,
+ "buggy USB host controller or bug in IRQ "
+ "handler!\n");
+ s->async->events |= COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
+ comedi_event(this_usbduxsub->comedidev, s);
+ /* don't do an unlink here */
+ usbdux_ai_stop(this_usbduxsub, 0);
+ return;
+ }
+
+ /* get the state of the dio pins to allow external trigger */
+ dio_state = be32_to_cpu(this_usbduxsub->inBuffer[0]);
+
+ this_usbduxsub->ai_counter--;
+ if (likely(this_usbduxsub->ai_counter > 0))
+ return;
+
+ /* timer zero, transfer measurements to comedi */
+ this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+
+ /* test, if we transmit only a fixed number of samples */
+ if (!(this_usbduxsub->ai_continous)) {
+ /* not continous, fixed number of samples */
+ this_usbduxsub->ai_sample_count--;
+ /* all samples received? */
+ if (this_usbduxsub->ai_sample_count < 0) {
+ /* prevent a resubmit next time */
+ usbdux_ai_stop(this_usbduxsub, 0);
+ /* say comedi that the acquistion is over */
+ s->async->events |= COMEDI_CB_EOA;
+ comedi_event(this_usbduxsub->comedidev, s);
+ return;
+ }
+ }
+ /* get the data from the USB bus and hand it over to comedi */
+ n = s->async->cmd.chanlist_len;
+ for (i = 0; i < n; i++) {
+ /* transfer data, note first byte is the DIO state */
+ v = be32_to_cpu(this_usbduxsub->inBuffer[i+1]);
+ /* strip status byte */
+ v = v & 0x00ffffff;
+ /* convert to unsigned */
+ v = v ^ 0x00800000;
+ /* write the byte to the buffer */
+ err = cfc_write_array_to_buffer(s, &v, sizeof(uint32_t));
+ if (unlikely(err == 0)) {
+ /* buffer overflow */
+ usbdux_ai_stop(this_usbduxsub, 0);
+ return;
+ }
+ }
+ /* tell comedi that data is there */
+ s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
+ comedi_event(this_usbduxsub->comedidev, s);
+}
+
+static int usbduxsub_unlink_OutURBs(struct usbduxsub *usbduxsub_tmp)
+{
+ int i = 0;
+ int err = 0;
+
+ if (usbduxsub_tmp && usbduxsub_tmp->urbOut) {
+ for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) {
+ if (usbduxsub_tmp->urbOut[i])
+ usb_kill_urb(usbduxsub_tmp->urbOut[i]);
+
+ dev_dbg(&usbduxsub_tmp->interface->dev,
+ "comedi: usbdux: unlinked OutURB %d: res=%d\n",
+ i, err);
+ }
+ }
+ return err;
+}
+
+/* This will cancel a running acquisition operation
+ * in any context.
+ */
+static int usbdux_ao_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+{
+ int ret = 0;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+ dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ao_cancel\n");
+
+ if (do_unlink)
+ ret = usbduxsub_unlink_OutURBs(this_usbduxsub);
+
+ this_usbduxsub->ao_cmd_running = 0;
+
+ return ret;
+}
+
+/* force unlink, is called by comedi */
+static int usbdux_ao_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+ int res = 0;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ /* prevent other CPUs from submitting a command just now */
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ /* unlink only if it is really running */
+ res = usbdux_ao_stop(this_usbduxsub, this_usbduxsub->ao_cmd_running);
+ up(&this_usbduxsub->sem);
+ return res;
+}
+
+static void usbduxsub_ao_IsocIrq(struct urb *urb)
+{
+ int i, ret;
+ uint8_t *datap;
+ struct usbduxsub *this_usbduxsub;
+ struct comedi_device *this_comedidev;
+ struct comedi_subdevice *s;
+
+ /* the context variable points to the subdevice */
+ this_comedidev = urb->context;
+ /* the private structure of the subdevice is struct usbduxsub */
+ this_usbduxsub = this_comedidev->private;
+
+ s = this_comedidev->subdevices + SUBDEV_DA;
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /* after an unlink command, unplug, ... etc */
+ /* no unlink needed here. Already shutting down. */
+ if (this_usbduxsub->ao_cmd_running) {
+ s->async->events |= COMEDI_CB_EOA;
+ comedi_event(this_usbduxsub->comedidev, s);
+ usbdux_ao_stop(this_usbduxsub, 0);
+ }
+ return;
+
+ default:
+ /* a real error */
+ if (this_usbduxsub->ao_cmd_running) {
+ dev_err(&urb->dev->dev,
+ "comedi_: Non-zero urb status received in ao "
+ "intr context: %d\n", urb->status);
+ s->async->events |= COMEDI_CB_ERROR;
+ s->async->events |= COMEDI_CB_EOA;
+ comedi_event(this_usbduxsub->comedidev, s);
+ /* we do an unlink if we are in the high speed mode */
+ usbdux_ao_stop(this_usbduxsub, 0);
+ }
+ return;
+ }
+
+ /* are we actually running? */
+ if (!(this_usbduxsub->ao_cmd_running))
+ return;
+
+ /* normal operation: executing a command in this subdevice */
+ this_usbduxsub->ao_counter--;
+ if ((int)this_usbduxsub->ao_counter <= 0) {
+ /* timer zero */
+ this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+
+ /* handle non continous aquisition */
+ if (!(this_usbduxsub->ao_continous)) {
+ /* fixed number of samples */
+ this_usbduxsub->ao_sample_count--;
+ if (this_usbduxsub->ao_sample_count < 0) {
+ /* all samples transmitted */
+ usbdux_ao_stop(this_usbduxsub, 0);
+ s->async->events |= COMEDI_CB_EOA;
+ comedi_event(this_usbduxsub->comedidev, s);
+ /* no resubmit of the urb */
+ return;
+ }
+ }
+ /* transmit data to the USB bus */
+ ((uint8_t *) (urb->transfer_buffer))[0] =
+ s->async->cmd.chanlist_len;
+ for (i = 0; i < s->async->cmd.chanlist_len; i++) {
+ short temp;
+ if (i >= NUMOUTCHANNELS)
+ break;
+
+ /* pointer to the DA */
+ datap =
+ (&(((uint8_t *) urb->transfer_buffer)[i * 2 + 1]));
+ /* get the data from comedi */
+ ret = comedi_buf_get(s->async, &temp);
+ datap[0] = temp;
+ datap[1] = this_usbduxsub->dac_commands[i];
+ /* printk("data[0]=%x, data[1]=%x, data[2]=%x\n", */
+ /* datap[0],datap[1],datap[2]); */
+ if (ret < 0) {
+ dev_err(&urb->dev->dev,
+ "comedi: buffer underflow\n");
+ s->async->events |= COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ }
+ /* transmit data to comedi */
+ s->async->events |= COMEDI_CB_BLOCK;
+ comedi_event(this_usbduxsub->comedidev, s);
+ }
+ }
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->dev = this_usbduxsub->usbdev;
+ urb->status = 0;
+ if (this_usbduxsub->ao_cmd_running) {
+ if (this_usbduxsub->high_speed) {
+ /* uframes */
+ urb->interval = 8;
+ } else {
+ /* frames */
+ urb->interval = 1;
+ }
+ urb->number_of_packets = 1;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(&urb->dev->dev,
+ "comedi_: ao urb resubm failed in int-cont. "
+ "ret=%d", ret);
+ if (ret == EL2NSYNC)
+ dev_err(&urb->dev->dev,
+ "buggy USB host controller or bug in "
+ "IRQ handling!\n");
+
+ s->async->events |= COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_ERROR;
+ comedi_event(this_usbduxsub->comedidev, s);
+ /* don't do an unlink here */
+ usbdux_ao_stop(this_usbduxsub, 0);
+ }
+ }
+}
+
+static int usbduxsub_start(struct usbduxsub *usbduxsub)
+{
+ int errcode = 0;
+ uint8_t local_transfer_buffer[16];
+
+ /* 7f92 to zero */
+ local_transfer_buffer[0] = 0;
+ errcode = usb_control_msg(usbduxsub->usbdev,
+ /* create a pipe for a control transfer */
+ usb_sndctrlpipe(usbduxsub->usbdev, 0),
+ /* bRequest, "Firmware" */
+ USBDUXSUB_FIRMWARE,
+ /* bmRequestType */
+ VENDOR_DIR_OUT,
+ /* Value */
+ USBDUXSUB_CPUCS,
+ /* Index */
+ 0x0000,
+ /* address of the transfer buffer */
+ local_transfer_buffer,
+ /* Length */
+ 1,
+ /* Timeout */
+ BULK_TIMEOUT);
+ if (errcode < 0) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: control msg failed (start)\n");
+ return errcode;
+ }
+ return 0;
+}
+
+static int usbduxsub_stop(struct usbduxsub *usbduxsub)
+{
+ int errcode = 0;
+
+ uint8_t local_transfer_buffer[16];
+
+ /* 7f92 to one */
+ local_transfer_buffer[0] = 1;
+ errcode = usb_control_msg(usbduxsub->usbdev,
+ usb_sndctrlpipe(usbduxsub->usbdev, 0),
+ /* bRequest, "Firmware" */
+ USBDUXSUB_FIRMWARE,
+ /* bmRequestType */
+ VENDOR_DIR_OUT,
+ /* Value */
+ USBDUXSUB_CPUCS,
+ /* Index */
+ 0x0000, local_transfer_buffer,
+ /* Length */
+ 1,
+ /* Timeout */
+ BULK_TIMEOUT);
+ if (errcode < 0) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: control msg failed (stop)\n");
+ return errcode;
+ }
+ return 0;
+}
+
+static int usbduxsub_upload(struct usbduxsub *usbduxsub,
+ uint8_t *local_transfer_buffer,
+ unsigned int startAddr, unsigned int len)
+{
+ int errcode;
+
+ errcode = usb_control_msg(usbduxsub->usbdev,
+ usb_sndctrlpipe(usbduxsub->usbdev, 0),
+ /* brequest, firmware */
+ USBDUXSUB_FIRMWARE,
+ /* bmRequestType */
+ VENDOR_DIR_OUT,
+ /* value */
+ startAddr,
+ /* index */
+ 0x0000,
+ /* our local safe buffer */
+ local_transfer_buffer,
+ /* length */
+ len,
+ /* timeout */
+ BULK_TIMEOUT);
+ dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
+ if (errcode < 0) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: upload failed\n");
+ return errcode;
+ }
+ return 0;
+}
+
+/* the FX2LP has twice as much as the standard FX2 */
+#define FIRMWARE_MAX_LEN 0x4000
+
+static int firmwareUpload(struct usbduxsub *usbduxsub,
+ const u8 *firmwareBinary, int sizeFirmware)
+{
+ int ret;
+ uint8_t *fwBuf;
+
+ if (!firmwareBinary)
+ return 0;
+
+ if (sizeFirmware > FIRMWARE_MAX_LEN) {
+ dev_err(&usbduxsub->interface->dev,
+ "usbduxsigma firmware binary it too large for FX2.\n");
+ return -ENOMEM;
+ }
+
+ /* we generate a local buffer for the firmware */
+ fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL);
+ if (!fwBuf) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: mem alloc for firmware failed\n");
+ return -ENOMEM;
+ }
+
+ ret = usbduxsub_stop(usbduxsub);
+ if (ret < 0) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: can not stop firmware\n");
+ kfree(fwBuf);
+ return ret;
+ }
+
+ ret = usbduxsub_upload(usbduxsub, fwBuf, 0, sizeFirmware);
+ if (ret < 0) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: firmware upload failed\n");
+ kfree(fwBuf);
+ return ret;
+ }
+ ret = usbduxsub_start(usbduxsub);
+ if (ret < 0) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: can not start firmware\n");
+ kfree(fwBuf);
+ return ret;
+ }
+ kfree(fwBuf);
+ return 0;
+}
+
+static int usbduxsub_submit_InURBs(struct usbduxsub *usbduxsub)
+{
+ int i, errFlag;
+
+ if (!usbduxsub)
+ return -EFAULT;
+
+ /* Submit all URBs and start the transfer on the bus */
+ for (i = 0; i < usbduxsub->numOfInBuffers; i++) {
+ /* in case of a resubmission after an unlink... */
+ usbduxsub->urbIn[i]->interval = usbduxsub->ai_interval;
+ usbduxsub->urbIn[i]->context = usbduxsub->comedidev;
+ usbduxsub->urbIn[i]->dev = usbduxsub->usbdev;
+ usbduxsub->urbIn[i]->status = 0;
+ usbduxsub->urbIn[i]->transfer_flags = URB_ISO_ASAP;
+ dev_dbg(&usbduxsub->interface->dev,
+ "comedi%d: submitting in-urb[%d]: %p,%p intv=%d\n",
+ usbduxsub->comedidev->minor, i,
+ (usbduxsub->urbIn[i]->context),
+ (usbduxsub->urbIn[i]->dev),
+ (usbduxsub->urbIn[i]->interval));
+ errFlag = usb_submit_urb(usbduxsub->urbIn[i], GFP_ATOMIC);
+ if (errFlag) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: ai: usb_submit_urb(%d) error %d\n",
+ i, errFlag);
+ return errFlag;
+ }
+ }
+ return 0;
+}
+
+static int usbduxsub_submit_OutURBs(struct usbduxsub *usbduxsub)
+{
+ int i, errFlag;
+
+ if (!usbduxsub)
+ return -EFAULT;
+
+ for (i = 0; i < usbduxsub->numOfOutBuffers; i++) {
+ dev_dbg(&usbduxsub->interface->dev,
+ "comedi_: submitting out-urb[%d]\n", i);
+ /* in case of a resubmission after an unlink... */
+ usbduxsub->urbOut[i]->context = usbduxsub->comedidev;
+ usbduxsub->urbOut[i]->dev = usbduxsub->usbdev;
+ usbduxsub->urbOut[i]->status = 0;
+ usbduxsub->urbOut[i]->transfer_flags = URB_ISO_ASAP;
+ errFlag = usb_submit_urb(usbduxsub->urbOut[i], GFP_ATOMIC);
+ if (errFlag) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: ao: usb_submit_urb(%d) error %d\n",
+ i, errFlag);
+ return errFlag;
+ }
+ }
+ return 0;
+}
+
+static int chanToInterval(int nChannels)
+{
+ if (nChannels <= 2)
+ /* 4kHz */
+ return 2;
+ if (nChannels <= 8)
+ /* 2kHz */
+ return 4;
+ /* 1kHz */
+ return 8;
+}
+
+static int usbdux_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0, tmp, i;
+ unsigned int tmpTimer;
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!(this_usbduxsub->probed))
+ return -ENODEV;
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ai_cmdtest\n", dev->minor);
+
+ /* make sure triggers are valid */
+ /* Only immediate triggers are allowed */
+ tmp = cmd->start_src;
+ cmd->start_src &= TRIG_NOW | TRIG_INT;
+ if (!cmd->start_src || tmp != cmd->start_src)
+ err++;
+
+ /* trigger should happen timed */
+ tmp = cmd->scan_begin_src;
+ /* start a new _scan_ with a timer */
+ cmd->scan_begin_src &= TRIG_TIMER;
+ if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
+ err++;
+
+ /* scanning is continous */
+ tmp = cmd->convert_src;
+ cmd->convert_src &= TRIG_NOW;
+ if (!cmd->convert_src || tmp != cmd->convert_src)
+ err++;
+
+ /* issue a trigger when scan is finished and start a new scan */
+ tmp = cmd->scan_end_src;
+ cmd->scan_end_src &= TRIG_COUNT;
+ if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
+ err++;
+
+ /* trigger at the end of count events or not, stop condition or not */
+ tmp = cmd->stop_src;
+ cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
+ if (!cmd->stop_src || tmp != cmd->stop_src)
+ err++;
+
+ if (err)
+ return 1;
+
+ /*
+ * step 2: make sure trigger sources are unique and mutually compatible
+ * note that mutual compatibility is not an issue here
+ */
+ if (cmd->scan_begin_src != TRIG_FOLLOW &&
+ cmd->scan_begin_src != TRIG_EXT &&
+ cmd->scan_begin_src != TRIG_TIMER)
+ err++;
+ if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
+ err++;
+
+ if (err)
+ return 2;
+
+ /* step 3: make sure arguments are trivially compatible */
+ if (cmd->start_arg != 0) {
+ cmd->start_arg = 0;
+ err++;
+ }
+
+ if (cmd->scan_begin_src == TRIG_FOLLOW) {
+ /* internal trigger */
+ if (cmd->scan_begin_arg != 0) {
+ cmd->scan_begin_arg = 0;
+ err++;
+ }
+ }
+
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ if (this_usbduxsub->high_speed) {
+ /*
+ * In high speed mode microframes are possible.
+ * However, during one microframe we can roughly
+ * sample two channels. Thus, the more channels
+ * are in the channel list the more time we need.
+ */
+ i = chanToInterval(cmd->chanlist_len);
+ if (cmd->scan_begin_arg < (1000000 / 8 * i)) {
+ cmd->scan_begin_arg = 1000000 / 8 * i;
+ err++;
+ }
+ /* now calc the real sampling rate with all the
+ * rounding errors */
+ tmpTimer =
+ ((unsigned int)(cmd->scan_begin_arg / 125000)) *
+ 125000;
+ if (cmd->scan_begin_arg != tmpTimer) {
+ cmd->scan_begin_arg = tmpTimer;
+ err++;
+ }
+ } else {
+ /* full speed */
+ /* 1kHz scans every USB frame */
+ if (cmd->scan_begin_arg < 1000000) {
+ cmd->scan_begin_arg = 1000000;
+ err++;
+ }
+ /*
+ * calc the real sampling rate with the rounding errors
+ */
+ tmpTimer = ((unsigned int)(cmd->scan_begin_arg /
+ 1000000)) * 1000000;
+ if (cmd->scan_begin_arg != tmpTimer) {
+ cmd->scan_begin_arg = tmpTimer;
+ err++;
+ }
+ }
+ }
+ /* the same argument */
+ if (cmd->scan_end_arg != cmd->chanlist_len) {
+ cmd->scan_end_arg = cmd->chanlist_len;
+ err++;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* any count is allowed */
+ } else {
+ /* TRIG_NONE */
+ if (cmd->stop_arg != 0) {
+ cmd->stop_arg = 0;
+ err++;
+ }
+ }
+
+ if (err)
+ return 3;
+
+ return 0;
+}
+
+/*
+ * creates the ADC command for the MAX1271
+ * range is the range value from comedi
+ */
+static void create_adc_command(unsigned int chan,
+ uint8_t *muxsg0,
+ uint8_t *muxsg1)
+{
+ if (chan < 8)
+ (*muxsg0) = (*muxsg0) | (1 << chan);
+ else if (chan < 16)
+ (*muxsg1) = (*muxsg1) | (1 << (chan-8));
+}
+
+
+/* bulk transfers to usbdux */
+
+#define SENDADCOMMANDS 0
+#define SENDDACOMMANDS 1
+#define SENDDIOCONFIGCOMMAND 2
+#define SENDDIOBITSCOMMAND 3
+#define SENDSINGLEAD 4
+#define SENDPWMON 7
+#define SENDPWMOFF 8
+
+static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
+{
+ int result, nsent;
+
+ this_usbduxsub->dux_commands[0] = cmd_type;
+#ifdef NOISY_DUX_DEBUGBUG
+ printk(KERN_DEBUG "comedi%d: usbdux: dux_commands: ",
+ this_usbduxsub->comedidev->minor);
+ for (result = 0; result < SIZEOFDUXBUFFER; result++)
+ printk(" %02x", this_usbduxsub->dux_commands[result]);
+ printk("\n");
+#endif
+ result = usb_bulk_msg(this_usbduxsub->usbdev,
+ usb_sndbulkpipe(this_usbduxsub->usbdev,
+ COMMAND_OUT_EP),
+ this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
+ &nsent, BULK_TIMEOUT);
+ if (result < 0)
+ dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
+ "could not transmit dux_command to the usb-device, "
+ "err=%d\n", this_usbduxsub->comedidev->minor, result);
+
+ return result;
+}
+
+static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
+{
+ int result = (-EFAULT);
+ int nrec;
+ int i;
+
+ for (i = 0; i < RETRIES; i++) {
+ result = usb_bulk_msg(this_usbduxsub->usbdev,
+ usb_rcvbulkpipe(this_usbduxsub->usbdev,
+ COMMAND_IN_EP),
+ this_usbduxsub->insnBuffer, SIZEINSNBUF,
+ &nrec, BULK_TIMEOUT);
+ if (result < 0) {
+ dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
+ "insn: USB error %d "
+ "while receiving DUX command"
+ "\n", this_usbduxsub->comedidev->minor,
+ result);
+ return result;
+ }
+ if (this_usbduxsub->insnBuffer[0] == command)
+ return result;
+ }
+ /* this is only reached if the data has been requested a couple of
+ * times */
+ dev_err(&this_usbduxsub->interface->dev, "comedi%d: insn: "
+ "wrong data returned from firmware: want %d, got %d.\n",
+ this_usbduxsub->comedidev->minor, command,
+ this_usbduxsub->insnBuffer[0]);
+ return -EFAULT;
+}
+
+static int usbdux_ai_inttrig(struct comedi_device *dev,
+ struct comedi_subdevice *s, unsigned int trignum)
+{
+ int ret;
+ struct usbduxsub *this_usbduxsub = dev->private;
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ai_inttrig\n", dev->minor);
+
+ if (trignum != 0) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ai_inttrig: invalid trignum\n",
+ dev->minor);
+ up(&this_usbduxsub->sem);
+ return -EINVAL;
+ }
+ if (!(this_usbduxsub->ai_cmd_running)) {
+ this_usbduxsub->ai_cmd_running = 1;
+ ret = usbduxsub_submit_InURBs(this_usbduxsub);
+ if (ret < 0) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ai_inttrig: "
+ "urbSubmit: err=%d\n", dev->minor, ret);
+ this_usbduxsub->ai_cmd_running = 0;
+ up(&this_usbduxsub->sem);
+ return ret;
+ }
+ s->async->inttrig = NULL;
+ } else {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: ai_inttrig but acqu is already running\n",
+ dev->minor);
+ }
+ up(&this_usbduxsub->sem);
+ return 1;
+}
+
+static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int chan;
+ int i, ret;
+ struct usbduxsub *this_usbduxsub = dev->private;
+ int result;
+ uint8_t muxsg0 = 0;
+ uint8_t muxsg1 = 0;
+ uint8_t sysred = 0;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ai_cmd\n", dev->minor);
+
+ /* block other CPUs from starting an ai_cmd */
+ down(&this_usbduxsub->sem);
+
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ if (this_usbduxsub->ai_cmd_running) {
+ dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
+ "ai_cmd not possible. Another ai_cmd is running.\n",
+ dev->minor);
+ up(&this_usbduxsub->sem);
+ return -EBUSY;
+ }
+ /* set current channel of the running aquisition to zero */
+ s->async->cur_chan = 0;
+
+ /* first the number of channels per time step */
+ this_usbduxsub->dux_commands[1] = cmd->chanlist_len;
+
+ /* CONFIG0 */
+ this_usbduxsub->dux_commands[2] = 0x12;
+
+ /* CONFIG1: 23kHz sampling rate, delay = 0us, */
+ this_usbduxsub->dux_commands[3] = 0x03;
+
+ /* CONFIG3: differential channels off */
+ this_usbduxsub->dux_commands[4] = 0x00;
+
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ chan = CR_CHAN(cmd->chanlist[i]);
+ create_adc_command(chan, &muxsg0, &muxsg1);
+ if (i >= NUMCHANNELS) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: channel list too long\n",
+ dev->minor);
+ break;
+ }
+ }
+ this_usbduxsub->dux_commands[5] = muxsg0;
+ this_usbduxsub->dux_commands[6] = muxsg1;
+ this_usbduxsub->dux_commands[7] = sysred;
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi %d: sending commands to the usb device: size=%u\n",
+ dev->minor, NUMCHANNELS);
+
+ result = send_dux_commands(this_usbduxsub, SENDADCOMMANDS);
+ if (result < 0) {
+ up(&this_usbduxsub->sem);
+ return result;
+ }
+
+ if (this_usbduxsub->high_speed) {
+ /*
+ * every 2 channels get a time window of 125us. Thus, if we
+ * sample all 16 channels we need 1ms. If we sample only one
+ * channel we need only 125us
+ */
+ this_usbduxsub->ai_interval =
+ chanToInterval(cmd->chanlist_len);
+ this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 *
+ (this_usbduxsub->
+ ai_interval));
+ } else {
+ /* interval always 1ms */
+ this_usbduxsub->ai_interval = 1;
+ this_usbduxsub->ai_timer = cmd->scan_begin_arg / 1000000;
+ }
+ if (this_usbduxsub->ai_timer < 1) {
+ dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_cmd: "
+ "timer=%d, scan_begin_arg=%d. "
+ "Not properly tested by cmdtest?\n", dev->minor,
+ this_usbduxsub->ai_timer, cmd->scan_begin_arg);
+ up(&this_usbduxsub->sem);
+ return -EINVAL;
+ }
+ this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* data arrives as one packet */
+ this_usbduxsub->ai_sample_count = cmd->stop_arg;
+ this_usbduxsub->ai_continous = 0;
+ } else {
+ /* continous aquisition */
+ this_usbduxsub->ai_continous = 1;
+ this_usbduxsub->ai_sample_count = 0;
+ }
+
+ if (cmd->start_src == TRIG_NOW) {
+ /* enable this acquisition operation */
+ this_usbduxsub->ai_cmd_running = 1;
+ ret = usbduxsub_submit_InURBs(this_usbduxsub);
+ if (ret < 0) {
+ this_usbduxsub->ai_cmd_running = 0;
+ /* fixme: unlink here?? */
+ up(&this_usbduxsub->sem);
+ return ret;
+ }
+ s->async->inttrig = NULL;
+ } else {
+ /* TRIG_INT */
+ /* don't enable the acquision operation */
+ /* wait for an internal signal */
+ s->async->inttrig = usbdux_ai_inttrig;
+ }
+ up(&this_usbduxsub->sem);
+ return 0;
+}
+
+/* Mode 0 is used to get a single conversion on demand */
+static int usbdux_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ int i;
+ int32_t one = 0;
+ int chan;
+ int err;
+ struct usbduxsub *this_usbduxsub = dev->private;
+ uint8_t muxsg0 = 0;
+ uint8_t muxsg1 = 0;
+ uint8_t sysred = 0;
+
+ if (!this_usbduxsub)
+ return 0;
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: ai_insn_read, insn->n=%d, insn->subdev=%d\n",
+ dev->minor, insn->n, insn->subdev);
+
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ if (this_usbduxsub->ai_cmd_running) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: ai_insn_read not possible. "
+ "Async Command is running.\n", dev->minor);
+ up(&this_usbduxsub->sem);
+ return 0;
+ }
+
+ /* sample one channel */
+ /* CONFIG0: chopper on */
+ this_usbduxsub->dux_commands[1] = 0x16;
+
+ /* CONFIG1: 2kHz sampling rate */
+ this_usbduxsub->dux_commands[2] = 0x80;
+
+ /* CONFIG3: differential channels off */
+ this_usbduxsub->dux_commands[3] = 0x00;
+
+ chan = CR_CHAN(insn->chanspec);
+ create_adc_command(chan, &muxsg0, &muxsg1);
+
+ this_usbduxsub->dux_commands[4] = muxsg0;
+ this_usbduxsub->dux_commands[5] = muxsg1;
+ this_usbduxsub->dux_commands[6] = sysred;
+
+ /* adc commands */
+ err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
+ if (err < 0) {
+ up(&this_usbduxsub->sem);
+ return err;
+ }
+
+ for (i = 0; i < insn->n; i++) {
+ err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
+ if (err < 0) {
+ up(&this_usbduxsub->sem);
+ return 0;
+ }
+ /* 32 bits big endian from the A/D converter */
+ one = be32_to_cpu(*((int32_t *)
+ ((this_usbduxsub->insnBuffer)+1)));
+ /* mask out the staus byte */
+ one = one & 0x00ffffff;
+ /* turn it into an unsigned integer */
+ one = one ^ 0x00800000;
+ data[i] = one;
+ }
+ up(&this_usbduxsub->sem);
+ return i;
+}
+
+
+
+
+static int usbdux_getstatusinfo(struct comedi_device *dev, int chan)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+ uint8_t sysred = 0;
+ uint32_t one;
+ int err;
+
+ if (!this_usbduxsub)
+ return 0;
+
+ if (this_usbduxsub->ai_cmd_running) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: status read not possible. "
+ "Async Command is running.\n", dev->minor);
+ return 0;
+ }
+
+ /* CONFIG0 */
+ this_usbduxsub->dux_commands[1] = 0x12;
+
+ /* CONFIG1: 2kHz sampling rate */
+ this_usbduxsub->dux_commands[2] = 0x80;
+
+ /* CONFIG3: differential channels off */
+ this_usbduxsub->dux_commands[3] = 0x00;
+
+ if (chan == 1) {
+ /* ADC offset */
+ sysred = sysred | 1;
+ } else if (chan == 2) {
+ /* VCC */
+ sysred = sysred | 4;
+ } else if (chan == 3) {
+ /* temperature */
+ sysred = sysred | 8;
+ } else if (chan == 4) {
+ /* gain */
+ sysred = sysred | 16;
+ } else if (chan == 5) {
+ /* ref */
+ sysred = sysred | 32;
+ }
+
+ this_usbduxsub->dux_commands[4] = 0;
+ this_usbduxsub->dux_commands[5] = 0;
+ this_usbduxsub->dux_commands[6] = sysred;
+
+ /* adc commands */
+ err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
+ if (err < 0)
+ return err;
+
+ err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
+ if (err < 0)
+ return err;
+
+ /* 32 bits big endian from the A/D converter */
+ one = be32_to_cpu(*((int32_t *)((this_usbduxsub->insnBuffer)+1)));
+ /* mask out the staus byte */
+ one = one & 0x00ffffff;
+ one = one ^ 0x00800000;
+
+ return (int)one;
+}
+
+
+
+
+
+
+/************************************/
+/* analog out */
+
+static int usbdux_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ int i;
+ int chan = CR_CHAN(insn->chanspec);
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ for (i = 0; i < insn->n; i++)
+ data[i] = this_usbduxsub->outBuffer[chan];
+
+ up(&this_usbduxsub->sem);
+ return i;
+}
+
+static int usbdux_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ int i, err;
+ int chan = CR_CHAN(insn->chanspec);
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: ao_insn_write\n", dev->minor);
+
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ if (this_usbduxsub->ao_cmd_running) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: ao_insn_write: "
+ "ERROR: asynchronous ao_cmd is running\n", dev->minor);
+ up(&this_usbduxsub->sem);
+ return 0;
+ }
+
+ for (i = 0; i < insn->n; i++) {
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: ao_insn_write: data[chan=%d,i=%d]=%d\n",
+ dev->minor, chan, i, data[i]);
+
+ /* number of channels: 1 */
+ this_usbduxsub->dux_commands[1] = 1;
+ /* channel number */
+ this_usbduxsub->dux_commands[2] = data[i];
+ this_usbduxsub->outBuffer[chan] = data[i];
+ this_usbduxsub->dux_commands[3] = chan;
+ err = send_dux_commands(this_usbduxsub, SENDDACOMMANDS);
+ if (err < 0) {
+ up(&this_usbduxsub->sem);
+ return err;
+ }
+ }
+ up(&this_usbduxsub->sem);
+
+ return i;
+}
+
+static int usbdux_ao_inttrig(struct comedi_device *dev,
+ struct comedi_subdevice *s, unsigned int trignum)
+{
+ int ret;
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ if (trignum != 0) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ao_inttrig: invalid trignum\n",
+ dev->minor);
+ return -EINVAL;
+ }
+ if (!(this_usbduxsub->ao_cmd_running)) {
+ this_usbduxsub->ao_cmd_running = 1;
+ ret = usbduxsub_submit_OutURBs(this_usbduxsub);
+ if (ret < 0) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ao_inttrig: submitURB: "
+ "err=%d\n", dev->minor, ret);
+ this_usbduxsub->ao_cmd_running = 0;
+ up(&this_usbduxsub->sem);
+ return ret;
+ }
+ s->async->inttrig = NULL;
+ } else {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: ao_inttrig but acqu is already running.\n",
+ dev->minor);
+ }
+ up(&this_usbduxsub->sem);
+ return 1;
+}
+
+static int usbdux_ao_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0, tmp;
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ if (!(this_usbduxsub->probed))
+ return -ENODEV;
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux_ao_cmdtest\n", dev->minor);
+
+ /* make sure triggers are valid */
+ /* Only immediate triggers are allowed */
+ tmp = cmd->start_src;
+ cmd->start_src &= TRIG_NOW | TRIG_INT;
+ if (!cmd->start_src || tmp != cmd->start_src)
+ err++;
+
+ /* trigger should happen timed */
+ tmp = cmd->scan_begin_src;
+ /* just now we scan also in the high speed mode every frame */
+ /* this is due to ehci driver limitations */
+ if (0) { /* (this_usbduxsub->high_speed) */
+ /* start immidiately a new scan */
+ /* the sampling rate is set by the coversion rate */
+ cmd->scan_begin_src &= TRIG_FOLLOW;
+ } else {
+ /* start a new scan (output at once) with a timer */
+ cmd->scan_begin_src &= TRIG_TIMER;
+ }
+ if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
+ err++;
+
+ /* scanning is continous */
+ tmp = cmd->convert_src;
+
+ /* all conversion events happen simultaneously */
+ cmd->convert_src &= TRIG_NOW;
+
+ if (!cmd->convert_src || tmp != cmd->convert_src)
+ err++;
+
+ /* issue a trigger when scan is finished and start a new scan */
+ tmp = cmd->scan_end_src;
+ cmd->scan_end_src &= TRIG_COUNT;
+ if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
+ err++;
+
+ /* trigger at the end of count events or not, stop condition or not */
+ tmp = cmd->stop_src;
+ cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
+ if (!cmd->stop_src || tmp != cmd->stop_src)
+ err++;
+
+ if (err)
+ return 1;
+
+ /*
+ * step 2: make sure trigger sources
+ * are unique and mutually compatible
+ * note that mutual compatibility is not an issue here
+ */
+ if (cmd->scan_begin_src != TRIG_FOLLOW &&
+ cmd->scan_begin_src != TRIG_EXT &&
+ cmd->scan_begin_src != TRIG_TIMER)
+ err++;
+ if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
+ err++;
+
+ if (err)
+ return 2;
+
+ /* step 3: make sure arguments are trivially compatible */
+
+ if (cmd->start_arg != 0) {
+ cmd->start_arg = 0;
+ err++;
+ }
+
+ if (cmd->scan_begin_src == TRIG_FOLLOW) {
+ /* internal trigger */
+ if (cmd->scan_begin_arg != 0) {
+ cmd->scan_begin_arg = 0;
+ err++;
+ }
+ }
+
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ /* timer */
+ if (cmd->scan_begin_arg < 1000000) {
+ cmd->scan_begin_arg = 1000000;
+ err++;
+ }
+ }
+ /* not used now, is for later use */
+ if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->convert_arg < 125000) {
+ cmd->convert_arg = 125000;
+ err++;
+ }
+ }
+
+ /* the same argument */
+ if (cmd->scan_end_arg != cmd->chanlist_len) {
+ cmd->scan_end_arg = cmd->chanlist_len;
+ err++;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* any count is allowed */
+ } else {
+ /* TRIG_NONE */
+ if (cmd->stop_arg != 0) {
+ cmd->stop_arg = 0;
+ err++;
+ }
+ }
+
+ dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: err=%d, "
+ "scan_begin_src=%d, scan_begin_arg=%d, convert_src=%d, "
+ "convert_arg=%d\n", dev->minor, err, cmd->scan_begin_src,
+ cmd->scan_begin_arg, cmd->convert_src, cmd->convert_arg);
+
+ if (err)
+ return 3;
+
+ return 0;
+}
+
+static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int chan, gain;
+ int i, ret;
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ down(&this_usbduxsub->sem);
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: %s\n", dev->minor, __func__);
+
+ /* set current channel of the running aquisition to zero */
+ s->async->cur_chan = 0;
+ for (i = 0; i < cmd->chanlist_len; ++i) {
+ chan = CR_CHAN(cmd->chanlist[i]);
+ gain = CR_RANGE(cmd->chanlist[i]);
+ if (i >= NUMOUTCHANNELS) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: %s: channel list too long\n",
+ dev->minor, __func__);
+ break;
+ }
+ this_usbduxsub->dac_commands[i] = chan;
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: dac command for ch %d is %x\n",
+ dev->minor, i, this_usbduxsub->dac_commands[i]);
+ }
+
+ /* we count in steps of 1ms (125us) */
+ /* 125us mode not used yet */
+ if (0) { /* (this_usbduxsub->high_speed) */
+ /* 125us */
+ /* timing of the conversion itself: every 125 us */
+ this_usbduxsub->ao_timer = cmd->convert_arg / 125000;
+ } else {
+ /* 1ms */
+ /* timing of the scan: we get all channels at once */
+ this_usbduxsub->ao_timer = cmd->scan_begin_arg / 1000000;
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: scan_begin_src=%d, scan_begin_arg=%d, "
+ "convert_src=%d, convert_arg=%d\n", dev->minor,
+ cmd->scan_begin_src, cmd->scan_begin_arg,
+ cmd->convert_src, cmd->convert_arg);
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: ao_timer=%d (ms)\n",
+ dev->minor, this_usbduxsub->ao_timer);
+ if (this_usbduxsub->ao_timer < 1) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: usbdux: ao_timer=%d, "
+ "scan_begin_arg=%d. "
+ "Not properly tested by cmdtest?\n",
+ dev->minor, this_usbduxsub->ao_timer,
+ cmd->scan_begin_arg);
+ up(&this_usbduxsub->sem);
+ return -EINVAL;
+ }
+ }
+ this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* not continous */
+ /* counter */
+ /* high speed also scans everything at once */
+ if (0) { /* (this_usbduxsub->high_speed) */
+ this_usbduxsub->ao_sample_count =
+ (cmd->stop_arg) * (cmd->scan_end_arg);
+ } else {
+ /* there's no scan as the scan has been */
+ /* perf inside the FX2 */
+ /* data arrives as one packet */
+ this_usbduxsub->ao_sample_count = cmd->stop_arg;
+ }
+ this_usbduxsub->ao_continous = 0;
+ } else {
+ /* continous aquisition */
+ this_usbduxsub->ao_continous = 1;
+ this_usbduxsub->ao_sample_count = 0;
+ }
+
+ if (cmd->start_src == TRIG_NOW) {
+ /* enable this acquisition operation */
+ this_usbduxsub->ao_cmd_running = 1;
+ ret = usbduxsub_submit_OutURBs(this_usbduxsub);
+ if (ret < 0) {
+ this_usbduxsub->ao_cmd_running = 0;
+ /* fixme: unlink here?? */
+ up(&this_usbduxsub->sem);
+ return ret;
+ }
+ s->async->inttrig = NULL;
+ } else {
+ /* TRIG_INT */
+ /* submit the urbs later */
+ /* wait for an internal signal */
+ s->async->inttrig = usbdux_ao_inttrig;
+ }
+
+ up(&this_usbduxsub->sem);
+ return 0;
+}
+
+static int usbdux_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ int chan = CR_CHAN(insn->chanspec);
+
+ /* The input or output configuration of each digital line is
+ * configured by a special insn_config instruction. chanspec
+ * contains the channel to be changed, and data[0] contains the
+ * value COMEDI_INPUT or COMEDI_OUTPUT. */
+
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_OUTPUT:
+ s->io_bits |= 1 << chan; /* 1 means Out */
+ break;
+ case INSN_CONFIG_DIO_INPUT:
+ s->io_bits &= ~(1 << chan);
+ break;
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] =
+ (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ /* we don't tell the firmware here as it would take 8 frames */
+ /* to submit the information. We do it in the insn_bits. */
+ return insn->n;
+}
+
+static int usbdux_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+
+ struct usbduxsub *this_usbduxsub = dev->private;
+ int err;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ if (insn->n != 2)
+ return -EINVAL;
+
+ down(&this_usbduxsub->sem);
+
+ if (!(this_usbduxsub->probed)) {
+ up(&this_usbduxsub->sem);
+ return -ENODEV;
+ }
+
+ /* The insn data is a mask in data[0] and the new data
+ * in data[1], each channel cooresponding to a bit. */
+ s->state &= ~data[0];
+ s->state |= data[0] & data[1];
+ /* The commands are 8 bits wide */
+ this_usbduxsub->dux_commands[1] = (s->io_bits) & 0x000000FF;
+ this_usbduxsub->dux_commands[4] = (s->state) & 0x000000FF;
+ this_usbduxsub->dux_commands[2] = ((s->io_bits) & 0x0000FF00) >> 8;
+ this_usbduxsub->dux_commands[5] = ((s->state) & 0x0000FF00) >> 8;
+ this_usbduxsub->dux_commands[3] = ((s->io_bits) & 0x00FF0000) >> 16;
+ this_usbduxsub->dux_commands[6] = ((s->state) & 0x00FF0000) >> 16;
+
+ /* This command also tells the firmware to return */
+ /* the digital input lines */
+ err = send_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
+ if (err < 0) {
+ up(&this_usbduxsub->sem);
+ return err;
+ }
+ err = receive_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
+ if (err < 0) {
+ up(&this_usbduxsub->sem);
+ return err;
+ }
+
+ data[1] = (((unsigned int)(this_usbduxsub->insnBuffer[1]))&0xff) |
+ ((((unsigned int)(this_usbduxsub->insnBuffer[2]))&0xff) << 8) |
+ ((((unsigned int)(this_usbduxsub->insnBuffer[3]))&0xff) << 16);
+
+ s->state = data[1];
+
+ up(&this_usbduxsub->sem);
+ return 2;
+}
+
+/***********************************/
+/* PWM */
+
+static int usbduxsub_unlink_PwmURBs(struct usbduxsub *usbduxsub_tmp)
+{
+ int err = 0;
+
+ if (usbduxsub_tmp && usbduxsub_tmp->urbPwm) {
+ if (usbduxsub_tmp->urbPwm)
+ usb_kill_urb(usbduxsub_tmp->urbPwm);
+ dev_dbg(&usbduxsub_tmp->interface->dev,
+ "comedi: unlinked PwmURB: res=%d\n", err);
+ }
+ return err;
+}
+
+/* This cancels a running acquisition operation
+ * in any context.
+ */
+static int usbdux_pwm_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+{
+ int ret = 0;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ dev_dbg(&this_usbduxsub->interface->dev, "comedi: %s\n", __func__);
+ if (do_unlink)
+ ret = usbduxsub_unlink_PwmURBs(this_usbduxsub);
+
+ this_usbduxsub->pwm_cmd_running = 0;
+
+ return ret;
+}
+
+/* force unlink - is called by comedi */
+static int usbdux_pwm_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+ int res = 0;
+
+ /* unlink only if it is really running */
+ res = usbdux_pwm_stop(this_usbduxsub, this_usbduxsub->pwm_cmd_running);
+
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi %d: sending pwm off command to the usb device.\n",
+ dev->minor);
+ res = send_dux_commands(this_usbduxsub, SENDPWMOFF);
+ if (res < 0)
+ return res;
+
+ return res;
+}
+
+static void usbduxsub_pwm_irq(struct urb *urb)
+{
+ int ret;
+ struct usbduxsub *this_usbduxsub;
+ struct comedi_device *this_comedidev;
+ struct comedi_subdevice *s;
+
+ /* printk(KERN_DEBUG "PWM: IRQ\n"); */
+
+ /* the context variable points to the subdevice */
+ this_comedidev = urb->context;
+ /* the private structure of the subdevice is struct usbduxsub */
+ this_usbduxsub = this_comedidev->private;
+
+ s = this_comedidev->subdevices + SUBDEV_DA;
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /*
+ * after an unlink command, unplug, ... etc
+ * no unlink needed here. Already shutting down.
+ */
+ if (this_usbduxsub->pwm_cmd_running)
+ usbdux_pwm_stop(this_usbduxsub, 0);
+
+ return;
+
+ default:
+ /* a real error */
+ if (this_usbduxsub->pwm_cmd_running) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi_: Non-zero urb status received in "
+ "pwm intr context: %d\n", urb->status);
+ usbdux_pwm_stop(this_usbduxsub, 0);
+ }
+ return;
+ }
+
+ /* are we actually running? */
+ if (!(this_usbduxsub->pwm_cmd_running))
+ return;
+
+ urb->transfer_buffer_length = this_usbduxsub->sizePwmBuf;
+ urb->dev = this_usbduxsub->usbdev;
+ urb->status = 0;
+ if (this_usbduxsub->pwm_cmd_running) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi_: pwm urb resubm failed in int-cont. "
+ "ret=%d", ret);
+ if (ret == EL2NSYNC)
+ dev_err(&this_usbduxsub->interface->dev,
+ "buggy USB host controller or bug in "
+ "IRQ handling!\n");
+
+ /* don't do an unlink here */
+ usbdux_pwm_stop(this_usbduxsub, 0);
+ }
+ }
+}
+
+static int usbduxsub_submit_PwmURBs(struct usbduxsub *usbduxsub)
+{
+ int errFlag;
+
+ if (!usbduxsub)
+ return -EFAULT;
+
+ dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting pwm-urb\n");
+
+ /* in case of a resubmission after an unlink... */
+ usb_fill_bulk_urb(usbduxsub->urbPwm,
+ usbduxsub->usbdev,
+ usb_sndbulkpipe(usbduxsub->usbdev, PWM_EP),
+ usbduxsub->urbPwm->transfer_buffer,
+ usbduxsub->sizePwmBuf, usbduxsub_pwm_irq,
+ usbduxsub->comedidev);
+
+ errFlag = usb_submit_urb(usbduxsub->urbPwm, GFP_ATOMIC);
+ if (errFlag) {
+ dev_err(&usbduxsub->interface->dev,
+ "comedi_: usbduxsigma: pwm: usb_submit_urb error %d\n",
+ errFlag);
+ return errFlag;
+ }
+ return 0;
+}
+
+static int usbdux_pwm_period(struct comedi_device *dev,
+ struct comedi_subdevice *s, unsigned int period)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+ int fx2delay = 255;
+
+ if (period < MIN_PWM_PERIOD) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: illegal period setting for pwm.\n",
+ dev->minor);
+ return -EAGAIN;
+ } else {
+ fx2delay = period / ((int)(6 * 512 * (1.0 / 0.033))) - 6;
+ if (fx2delay > 255) {
+ dev_err(&this_usbduxsub->interface->dev,
+ "comedi%d: period %d for pwm is too low.\n",
+ dev->minor, period);
+ return -EAGAIN;
+ }
+ }
+ this_usbduxsub->pwmDelay = fx2delay;
+ this_usbduxsub->pwmPeriod = period;
+ dev_dbg(&this_usbduxsub->interface->dev, "%s: frequ=%d, period=%d\n",
+ __func__, period, fx2delay);
+ return 0;
+}
+
+/* is called from insn so there's no need to do all the sanity checks */
+static int usbdux_pwm_start(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ int ret, i;
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n",
+ dev->minor, __func__);
+
+ if (this_usbduxsub->pwm_cmd_running) {
+ /* already running */
+ return 0;
+ }
+
+ this_usbduxsub->dux_commands[1] = ((uint8_t) this_usbduxsub->pwmDelay);
+ ret = send_dux_commands(this_usbduxsub, SENDPWMON);
+ if (ret < 0)
+ return ret;
+
+ /* initialise the buffer */
+ for (i = 0; i < this_usbduxsub->sizePwmBuf; i++)
+ ((char *)(this_usbduxsub->urbPwm->transfer_buffer))[i] = 0;
+
+ this_usbduxsub->pwm_cmd_running = 1;
+ ret = usbduxsub_submit_PwmURBs(this_usbduxsub);
+ if (ret < 0) {
+ this_usbduxsub->pwm_cmd_running = 0;
+ return ret;
+ }
+ return 0;
+}
+
+/* generates the bit pattern for PWM with the optional sign bit */
+static int usbdux_pwm_pattern(struct comedi_device *dev,
+ struct comedi_subdevice *s, int channel,
+ unsigned int value, unsigned int sign)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+ int i, szbuf;
+ char *pBuf;
+ char pwm_mask;
+ char sgn_mask;
+ char c;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ /* this is the DIO bit which carries the PWM data */
+ pwm_mask = (1 << channel);
+ /* this is the DIO bit which carries the optional direction bit */
+ sgn_mask = (16 << channel);
+ /* this is the buffer which will be filled with the with bit */
+ /* pattern for one period */
+ szbuf = this_usbduxsub->sizePwmBuf;
+ pBuf = (char *)(this_usbduxsub->urbPwm->transfer_buffer);
+ for (i = 0; i < szbuf; i++) {
+ c = *pBuf;
+ /* reset bits */
+ c = c & (~pwm_mask);
+ /* set the bit as long as the index is lower than the value */
+ if (i < value)
+ c = c | pwm_mask;
+ /* set the optional sign bit for a relay */
+ if (!sign) {
+ /* positive value */
+ c = c & (~sgn_mask);
+ } else {
+ /* negative value */
+ c = c | sgn_mask;
+ }
+ *(pBuf++) = c;
+ }
+ return 1;
+}
+
+static int usbdux_pwm_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+
+ if (!this_usbduxsub)
+ return -EFAULT;
+
+ if ((insn->n) != 1) {
+ /*
+ * doesn't make sense to have more than one value here because
+ * it would just overwrite the PWM buffer a couple of times
+ */
+ return -EINVAL;
+ }
+
+ /*
+ * the sign is set via a special INSN only, this gives us 8 bits for
+ * normal operation
+ * relay sign 0 by default
+ */
+ return usbdux_pwm_pattern(dev, s, CR_CHAN(insn->chanspec), data[0], 0);
+}
+
+static int usbdux_pwm_read(struct comedi_device *x1,
+ struct comedi_subdevice *x2, struct comedi_insn *x3,
+ unsigned int *x4)
+{
+ /* not needed */
+ return -EINVAL;
+};
+
+/* switches on/off PWM */
+static int usbdux_pwm_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct usbduxsub *this_usbduxsub = dev->private;
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ /* switch it on */
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: %s: pwm on\n", dev->minor, __func__);
+ /*
+ * if not zero the PWM is limited to a certain time which is
+ * not supported here
+ */
+ if (data[1] != 0)
+ return -EINVAL;
+ return usbdux_pwm_start(dev, s);
+ case INSN_CONFIG_DISARM:
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: %s: pwm off\n", dev->minor, __func__);
+ return usbdux_pwm_cancel(dev, s);
+ case INSN_CONFIG_GET_PWM_STATUS:
+ /*
+ * to check if the USB transmission has failed or in case PWM
+ * was limited to n cycles to check if it has terminated
+ */
+ data[1] = this_usbduxsub->pwm_cmd_running;
+ return 0;
+ case INSN_CONFIG_PWM_SET_PERIOD:
+ dev_dbg(&this_usbduxsub->interface->dev,
+ "comedi%d: %s: setting period\n", dev->minor,
+ __func__);
+ return usbdux_pwm_period(dev, s, data[1]);
+ case INSN_CONFIG_PWM_GET_PERIOD:
+ data[1] = this_usbduxsub->pwmPeriod;
+ return 0;
+ case INSN_CONFIG_PWM_SET_H_BRIDGE:
+ /* value in the first byte and the sign in the second for a
+ relay */
+ return usbdux_pwm_pattern(dev, s,
+ /* the channel number */
+ CR_CHAN(insn->chanspec),
+ /* actual PWM data */
+ data[1],
+ /* just a sign */
+ (data[2] != 0));
+ case INSN_CONFIG_PWM_GET_H_BRIDGE:
+ /* values are not kept in this driver, nothing to return */
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+/* end of PWM */
+/*****************************************************************/
+
+static void tidy_up(struct usbduxsub *usbduxsub_tmp)
+{
+ int i;
+
+ if (!usbduxsub_tmp)
+ return;
+ dev_dbg(&usbduxsub_tmp->interface->dev, "comedi_: tiding up\n");
+
+ /* shows the usb subsystem that the driver is down */
+ if (usbduxsub_tmp->interface)
+ usb_set_intfdata(usbduxsub_tmp->interface, NULL);
+
+ usbduxsub_tmp->probed = 0;
+
+ if (usbduxsub_tmp->urbIn) {
+ if (usbduxsub_tmp->ai_cmd_running) {
+ usbduxsub_tmp->ai_cmd_running = 0;
+ usbduxsub_unlink_InURBs(usbduxsub_tmp);
+ }
+ for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) {
+ kfree(usbduxsub_tmp->urbIn[i]->transfer_buffer);
+ usbduxsub_tmp->urbIn[i]->transfer_buffer = NULL;
+ usb_kill_urb(usbduxsub_tmp->urbIn[i]);
+ usb_free_urb(usbduxsub_tmp->urbIn[i]);
+ usbduxsub_tmp->urbIn[i] = NULL;
+ }
+ kfree(usbduxsub_tmp->urbIn);
+ usbduxsub_tmp->urbIn = NULL;
+ }
+ if (usbduxsub_tmp->urbOut) {
+ if (usbduxsub_tmp->ao_cmd_running) {
+ usbduxsub_tmp->ao_cmd_running = 0;
+ usbduxsub_unlink_OutURBs(usbduxsub_tmp);
+ }
+ for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) {
+ if (usbduxsub_tmp->urbOut[i]->transfer_buffer) {
+ kfree(usbduxsub_tmp->
+ urbOut[i]->transfer_buffer);
+ usbduxsub_tmp->urbOut[i]->transfer_buffer =
+ NULL;
+ }
+ if (usbduxsub_tmp->urbOut[i]) {
+ usb_kill_urb(usbduxsub_tmp->urbOut[i]);
+ usb_free_urb(usbduxsub_tmp->urbOut[i]);
+ usbduxsub_tmp->urbOut[i] = NULL;
+ }
+ }
+ kfree(usbduxsub_tmp->urbOut);
+ usbduxsub_tmp->urbOut = NULL;
+ }
+ if (usbduxsub_tmp->urbPwm) {
+ if (usbduxsub_tmp->pwm_cmd_running) {
+ usbduxsub_tmp->pwm_cmd_running = 0;
+ usbduxsub_unlink_PwmURBs(usbduxsub_tmp);
+ }
+ kfree(usbduxsub_tmp->urbPwm->transfer_buffer);
+ usbduxsub_tmp->urbPwm->transfer_buffer = NULL;
+ usb_kill_urb(usbduxsub_tmp->urbPwm);
+ usb_free_urb(usbduxsub_tmp->urbPwm);
+ usbduxsub_tmp->urbPwm = NULL;
+ }
+ kfree(usbduxsub_tmp->inBuffer);
+ usbduxsub_tmp->inBuffer = NULL;
+ kfree(usbduxsub_tmp->insnBuffer);
+ usbduxsub_tmp->insnBuffer = NULL;
+ kfree(usbduxsub_tmp->outBuffer);
+ usbduxsub_tmp->outBuffer = NULL;
+ kfree(usbduxsub_tmp->dac_commands);
+ usbduxsub_tmp->dac_commands = NULL;
+ kfree(usbduxsub_tmp->dux_commands);
+ usbduxsub_tmp->dux_commands = NULL;
+ usbduxsub_tmp->ai_cmd_running = 0;
+ usbduxsub_tmp->ao_cmd_running = 0;
+ usbduxsub_tmp->pwm_cmd_running = 0;
+}
+
+static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
+ void *context)
+{
+ struct usbduxsub *usbduxsub_tmp = context;
+ struct usb_device *usbdev = usbduxsub_tmp->usbdev;
+ int ret;
+
+ if (fw == NULL) {
+ dev_err(&usbdev->dev,
+ "Firmware complete handler without firmware!\n");
+ return;
+ }
+
+ /*
+ * we need to upload the firmware here because fw will be
+ * freed once we've left this function
+ */
+ ret = firmwareUpload(usbduxsub_tmp, fw->data, fw->size);
+
+ if (ret) {
+ dev_err(&usbdev->dev,
+ "Could not upload firmware (err=%d)\n", ret);
+ goto out;
+ }
+ comedi_usb_auto_config(usbdev, BOARDNAME);
+out:
+ release_firmware(fw);
+}
+
+/* allocate memory for the urbs and initialise them */
+static int usbduxsigma_probe(struct usb_interface *uinterf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(uinterf);
+ struct device *dev = &uinterf->dev;
+ int i;
+ int index;
+ int ret;
+
+ dev_dbg(dev, "comedi_: usbdux_: "
+ "finding a free structure for the usb-device\n");
+
+ down(&start_stop_sem);
+ /* look for a free place in the usbdux array */
+ index = -1;
+ for (i = 0; i < NUMUSBDUX; i++) {
+ if (!(usbduxsub[i].probed)) {
+ index = i;
+ break;
+ }
+ }
+
+ /* no more space */
+ if (index == -1) {
+ dev_err(dev, "Too many usbduxsigma-devices connected.\n");
+ up(&start_stop_sem);
+ return -EMFILE;
+ }
+ dev_dbg(dev, "comedi_: usbdux: "
+ "usbduxsub[%d] is ready to connect to comedi.\n", index);
+
+ sema_init(&(usbduxsub[index].sem), 1);
+ /* save a pointer to the usb device */
+ usbduxsub[index].usbdev = udev;
+
+ /* save the interface itself */
+ usbduxsub[index].interface = uinterf;
+ /* get the interface number from the interface */
+ usbduxsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber;
+ /* hand the private data over to the usb subsystem */
+ /* will be needed for disconnect */
+ usb_set_intfdata(uinterf, &(usbduxsub[index]));
+
+ dev_dbg(dev, "comedi_: usbdux: ifnum=%d\n", usbduxsub[index].ifnum);
+
+ /* test if it is high speed (USB 2.0) */
+ usbduxsub[index].high_speed =
+ (usbduxsub[index].usbdev->speed == USB_SPEED_HIGH);
+
+ /* create space for the commands of the DA converter */
+ usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
+ if (!usbduxsub[index].dac_commands) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "error alloc space for dac commands\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ /* create space for the commands going to the usb device */
+ usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
+ if (!usbduxsub[index].dux_commands) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "error alloc space for dux commands\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ /* create space for the in buffer and set it to zero */
+ usbduxsub[index].inBuffer = kzalloc(SIZEINBUF, GFP_KERNEL);
+ if (!(usbduxsub[index].inBuffer)) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "could not alloc space for inBuffer\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ /* create space of the instruction buffer */
+ usbduxsub[index].insnBuffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
+ if (!(usbduxsub[index].insnBuffer)) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "could not alloc space for insnBuffer\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ /* create space for the outbuffer */
+ usbduxsub[index].outBuffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
+ if (!(usbduxsub[index].outBuffer)) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "could not alloc space for outBuffer\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ /* setting to alternate setting 3: enabling iso ep and bulk ep. */
+ i = usb_set_interface(usbduxsub[index].usbdev,
+ usbduxsub[index].ifnum, 3);
+ if (i < 0) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "could not set alternate setting 3 in high speed.\n",
+ index);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENODEV;
+ }
+ if (usbduxsub[index].high_speed)
+ usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSHIGH;
+ else
+ usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSFULL;
+
+ usbduxsub[index].urbIn =
+ kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfInBuffers,
+ GFP_KERNEL);
+ if (!(usbduxsub[index].urbIn)) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "Could not alloc. urbIn array\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ for (i = 0; i < usbduxsub[index].numOfInBuffers; i++) {
+ /* one frame: 1ms */
+ usbduxsub[index].urbIn[i] = usb_alloc_urb(1, GFP_KERNEL);
+ if (usbduxsub[index].urbIn[i] == NULL) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "Could not alloc. urb(%d)\n", index, i);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ usbduxsub[index].urbIn[i]->dev = usbduxsub[index].usbdev;
+ /* will be filled later with a pointer to the comedi-device */
+ /* and ONLY then the urb should be submitted */
+ usbduxsub[index].urbIn[i]->context = NULL;
+ usbduxsub[index].urbIn[i]->pipe =
+ usb_rcvisocpipe(usbduxsub[index].usbdev, ISOINEP);
+ usbduxsub[index].urbIn[i]->transfer_flags = URB_ISO_ASAP;
+ usbduxsub[index].urbIn[i]->transfer_buffer =
+ kzalloc(SIZEINBUF, GFP_KERNEL);
+ if (!(usbduxsub[index].urbIn[i]->transfer_buffer)) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "could not alloc. transb.\n", index);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ usbduxsub[index].urbIn[i]->complete = usbduxsub_ai_IsocIrq;
+ usbduxsub[index].urbIn[i]->number_of_packets = 1;
+ usbduxsub[index].urbIn[i]->transfer_buffer_length = SIZEINBUF;
+ usbduxsub[index].urbIn[i]->iso_frame_desc[0].offset = 0;
+ usbduxsub[index].urbIn[i]->iso_frame_desc[0].length =
+ SIZEINBUF;
+ }
+
+ /* out */
+ if (usbduxsub[index].high_speed)
+ usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSHIGH;
+ else
+ usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSFULL;
+
+ usbduxsub[index].urbOut =
+ kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfOutBuffers,
+ GFP_KERNEL);
+ if (!(usbduxsub[index].urbOut)) {
+ dev_err(dev, "comedi_: usbduxsigma: "
+ "Could not alloc. urbOut array\n");
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ for (i = 0; i < usbduxsub[index].numOfOutBuffers; i++) {
+ /* one frame: 1ms */
+ usbduxsub[index].urbOut[i] = usb_alloc_urb(1, GFP_KERNEL);
+ if (usbduxsub[index].urbOut[i] == NULL) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "Could not alloc. urb(%d)\n", index, i);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ usbduxsub[index].urbOut[i]->dev = usbduxsub[index].usbdev;
+ /* will be filled later with a pointer to the comedi-device */
+ /* and ONLY then the urb should be submitted */
+ usbduxsub[index].urbOut[i]->context = NULL;
+ usbduxsub[index].urbOut[i]->pipe =
+ usb_sndisocpipe(usbduxsub[index].usbdev, ISOOUTEP);
+ usbduxsub[index].urbOut[i]->transfer_flags = URB_ISO_ASAP;
+ usbduxsub[index].urbOut[i]->transfer_buffer =
+ kzalloc(SIZEOUTBUF, GFP_KERNEL);
+ if (!(usbduxsub[index].urbOut[i]->transfer_buffer)) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "could not alloc. transb.\n", index);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ usbduxsub[index].urbOut[i]->complete = usbduxsub_ao_IsocIrq;
+ usbduxsub[index].urbOut[i]->number_of_packets = 1;
+ usbduxsub[index].urbOut[i]->transfer_buffer_length =
+ SIZEOUTBUF;
+ usbduxsub[index].urbOut[i]->iso_frame_desc[0].offset = 0;
+ usbduxsub[index].urbOut[i]->iso_frame_desc[0].length =
+ SIZEOUTBUF;
+ if (usbduxsub[index].high_speed) {
+ /* uframes */
+ usbduxsub[index].urbOut[i]->interval = 8;
+ } else {
+ /* frames */
+ usbduxsub[index].urbOut[i]->interval = 1;
+ }
+ }
+
+ /* pwm */
+ if (usbduxsub[index].high_speed) {
+ /* max bulk ep size in high speed */
+ usbduxsub[index].sizePwmBuf = 512;
+ usbduxsub[index].urbPwm = usb_alloc_urb(0, GFP_KERNEL);
+ if (usbduxsub[index].urbPwm == NULL) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "Could not alloc. pwm urb\n", index);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ usbduxsub[index].urbPwm->transfer_buffer =
+ kzalloc(usbduxsub[index].sizePwmBuf, GFP_KERNEL);
+ if (!(usbduxsub[index].urbPwm->transfer_buffer)) {
+ dev_err(dev, "comedi_: usbduxsigma%d: "
+ "could not alloc. transb. for pwm\n", index);
+ tidy_up(&(usbduxsub[index]));
+ up(&start_stop_sem);
+ return -ENOMEM;
+ }
+ } else {
+ usbduxsub[index].urbPwm = NULL;
+ usbduxsub[index].sizePwmBuf = 0;
+ }
+
+ usbduxsub[index].ai_cmd_running = 0;
+ usbduxsub[index].ao_cmd_running = 0;
+ usbduxsub[index].pwm_cmd_running = 0;
+
+ /* we've reached the bottom of the function */
+ usbduxsub[index].probed = 1;
+ up(&start_stop_sem);
+
+ ret = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG,
+ "usbduxsigma_firmware.bin",
+ &udev->dev,
+ GFP_KERNEL,
+ usbduxsub + index,
+ usbdux_firmware_request_complete_handler
+ );
+
+ if (ret) {
+ dev_err(dev, "Could not load firmware (err=%d)\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "comedi_: successfully initialised.\n");
+ /* success */
+ return 0;
+}
+
+static void usbduxsigma_disconnect(struct usb_interface *intf)
+{
+ struct usbduxsub *usbduxsub_tmp = usb_get_intfdata(intf);
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ if (!usbduxsub_tmp) {
+ dev_err(&intf->dev,
+ "comedi_: disconnect called with null pointer.\n");
+ return;
+ }
+ if (usbduxsub_tmp->usbdev != udev) {
+ dev_err(&intf->dev, "comedi_: BUG! wrong ptr!\n");
+ return;
+ }
+ if (usbduxsub_tmp->ai_cmd_running)
+ /* we are still running a command */
+ usbdux_ai_stop(usbduxsub_tmp, 1);
+ if (usbduxsub_tmp->ao_cmd_running)
+ /* we are still running a command */
+ usbdux_ao_stop(usbduxsub_tmp, 1);
+ comedi_usb_auto_unconfig(udev);
+ down(&start_stop_sem);
+ down(&usbduxsub_tmp->sem);
+ tidy_up(usbduxsub_tmp);
+ up(&usbduxsub_tmp->sem);
+ up(&start_stop_sem);
+ dev_info(&intf->dev, "comedi_: disconnected from the usb\n");
+}
+
+/* is called when comedi-config is called */
+static int usbduxsigma_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ int ret;
+ int index;
+ int i;
+ struct usbduxsub *udev;
+
+ int offset;
+
+ struct comedi_subdevice *s = NULL;
+ dev->private = NULL;
+
+ down(&start_stop_sem);
+ /* find a valid device which has been detected by the probe function of
+ * the usb */
+ index = -1;
+ for (i = 0; i < NUMUSBDUX; i++) {
+ if ((usbduxsub[i].probed) && (!usbduxsub[i].attached)) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index < 0) {
+ printk(KERN_ERR "comedi%d: usbduxsigma: error: attach failed,"
+ "dev not connected to the usb bus.\n", dev->minor);
+ up(&start_stop_sem);
+ return -ENODEV;
+ }
+
+ udev = &usbduxsub[index];
+ down(&udev->sem);
+ /* pointer back to the corresponding comedi device */
+ udev->comedidev = dev;
+
+ /* trying to upload the firmware into the FX2 */
+ if (comedi_aux_data(it->options, 0) &&
+ it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
+ firmwareUpload(udev, comedi_aux_data(it->options, 0),
+ it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]);
+ }
+
+ dev->board_name = BOARDNAME;
+
+ /* set number of subdevices */
+ if (udev->high_speed) {
+ /* with pwm */
+ dev->n_subdevices = 4;
+ } else {
+ /* without pwm */
+ dev->n_subdevices = 3;
+ }
+
+ /* allocate space for the subdevices */
+ ret = alloc_subdevices(dev, dev->n_subdevices);
+ if (ret < 0) {
+ dev_err(&udev->interface->dev,
+ "comedi%d: no space for subdev\n", dev->minor);
+ up(&start_stop_sem);
+ return ret;
+ }
+
+ /* private structure is also simply the usb-structure */
+ dev->private = udev;
+
+ /* the first subdevice is the A/D converter */
+ s = dev->subdevices + SUBDEV_AD;
+ /* the URBs get the comedi subdevice */
+ /* which is responsible for reading */
+ /* this is the subdevice which reads data */
+ dev->read_subdev = s;
+ /* the subdevice receives as private structure the */
+ /* usb-structure */
+ s->private = NULL;
+ /* analog input */
+ s->type = COMEDI_SUBD_AI;
+ /* readable and ref is to ground, 32 bit wide data! */
+ s->subdev_flags = SDF_READABLE | SDF_GROUND |
+ SDF_CMD_READ | SDF_LSAMPL;
+ /* 16 A/D channels */
+ s->n_chan = NUMCHANNELS;
+ /* length of the channellist */
+ s->len_chanlist = NUMCHANNELS;
+ /* callback functions */
+ s->insn_read = usbdux_ai_insn_read;
+ s->do_cmdtest = usbdux_ai_cmdtest;
+ s->do_cmd = usbdux_ai_cmd;
+ s->cancel = usbdux_ai_cancel;
+ /* max value from the A/D converter (24bit) */
+ s->maxdata = 0x00FFFFFF;
+ /* range table to convert to physical units */
+ s->range_table = (&range_usbdux_ai_range);
+
+ /* analog out */
+ s = dev->subdevices + SUBDEV_DA;
+ /* analog out */
+ s->type = COMEDI_SUBD_AO;
+ /* backward pointer */
+ dev->write_subdev = s;
+ /* the subdevice receives as private structure the */
+ /* usb-structure */
+ s->private = NULL;
+ /* are writable */
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
+ /* 4 channels */
+ s->n_chan = 4;
+ /* length of the channellist */
+ s->len_chanlist = 4;
+ /* 8 bit resolution */
+ s->maxdata = 0x00ff;
+ /* unipolar range */
+ s->range_table = (&range_usbdux_ao_range);
+ /* callback */
+ s->do_cmdtest = usbdux_ao_cmdtest;
+ s->do_cmd = usbdux_ao_cmd;
+ s->cancel = usbdux_ao_cancel;
+ s->insn_read = usbdux_ao_insn_read;
+ s->insn_write = usbdux_ao_insn_write;
+
+ /* digital I/O */
+ s = dev->subdevices + SUBDEV_DIO;
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ /* 8 external and 16 internal channels */
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = (&range_digital);
+ s->insn_bits = usbdux_dio_insn_bits;
+ s->insn_config = usbdux_dio_insn_config;
+ /* we don't use it */
+ s->private = NULL;
+
+ if (udev->high_speed) {
+ /* timer / pwm */
+ s = dev->subdevices + SUBDEV_PWM;
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
+ s->n_chan = 8;
+ /* this defines the max duty cycle resolution */
+ s->maxdata = udev->sizePwmBuf;
+ s->insn_write = usbdux_pwm_write;
+ s->insn_read = usbdux_pwm_read;
+ s->insn_config = usbdux_pwm_config;
+ usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
+ }
+ /* finally decide that it's attached */
+ udev->attached = 1;
+
+ up(&udev->sem);
+
+ up(&start_stop_sem);
+
+ offset = usbdux_getstatusinfo(dev, 0);
+ if (offset < 0)
+ dev_err(&udev->interface->dev,
+ "Communication to USBDUXSIGMA failed!"
+ "Check firmware and cabling.");
+
+ dev_info(&udev->interface->dev,
+ "comedi%d: attached, ADC_zero = %x", dev->minor, offset);
+
+ return 0;
+}
+
+static int usbduxsigma_detach(struct comedi_device *dev)
+{
+ struct usbduxsub *usbduxsub_tmp;
+
+ if (!dev) {
+ printk(KERN_ERR
+ "comedi? usbduxsigma detach: dev=NULL\n");
+ return -EFAULT;
+ }
+
+ usbduxsub_tmp = dev->private;
+ if (!usbduxsub_tmp) {
+ printk(KERN_ERR
+ "comedi?: usbduxsigma detach: private=NULL\n");
+ return -EFAULT;
+ }
+
+ dev_dbg(&usbduxsub_tmp->interface->dev,
+ "comedi%d: detach usb device\n",
+ dev->minor);
+
+ down(&usbduxsub_tmp->sem);
+ /* Don't allow detach to free the private structure */
+ /* It's one entry of of usbduxsub[] */
+ dev->private = NULL;
+ usbduxsub_tmp->attached = 0;
+ usbduxsub_tmp->comedidev = NULL;
+ dev_info(&usbduxsub_tmp->interface->dev,
+ "comedi%d: successfully detached.\n", dev->minor);
+ up(&usbduxsub_tmp->sem);
+ return 0;
+}
+
+/* main driver struct */
+static struct comedi_driver driver_usbduxsigma = {
+ .driver_name = "usbduxsigma",
+ .module = THIS_MODULE,
+ .attach = usbduxsigma_attach,
+ .detach = usbduxsigma_detach,
+};
+
+/* Table with the USB-devices */
+static const struct usb_device_id usbduxsigma_table[] = {
+ {USB_DEVICE(0x13d8, 0x0020)},
+ {USB_DEVICE(0x13d8, 0x0021)},
+ {USB_DEVICE(0x13d8, 0x0022)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usbduxsigma_table);
+
+/* The usbduxsub-driver */
+static struct usb_driver usbduxsigma_driver = {
+ .name = BOARDNAME,
+ .probe = usbduxsigma_probe,
+ .disconnect = usbduxsigma_disconnect,
+ .id_table = usbduxsigma_table,
+};
+
+/* Can't use the nice macro as I have also to initialise the USB */
+/* subsystem: */
+/* registering the usb-system _and_ the comedi-driver */
+static int __init init_usbduxsigma(void)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": "
+ DRIVER_VERSION ":" DRIVER_DESC "\n");
+ usb_register(&usbduxsigma_driver);
+ comedi_driver_register(&driver_usbduxsigma);
+ return 0;
+}
+
+/* deregistering the comedi driver and the usb-subsystem */
+static void __exit exit_usbduxsigma(void)
+{
+ comedi_driver_unregister(&driver_usbduxsigma);
+ usb_deregister(&usbduxsigma_driver);
+}
+
+module_init(init_usbduxsigma);
+module_exit(exit_usbduxsigma);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h
index 778e76af052..fde5b0627f6 100644
--- a/drivers/staging/crystalhd/bc_dts_defs.h
+++ b/drivers/staging/crystalhd/bc_dts_defs.h
@@ -84,7 +84,7 @@ enum BC_SW_OPTIONS {
BC_OPT_LINK_OUT_ENCRYPT = BC_BIT(29),
};
-struct BC_REG_CONFIG{
+struct BC_REG_CONFIG {
uint32_t DbgOptions;
};
@@ -391,7 +391,7 @@ struct BC_PIC_INFO_BLOCK {
* ProcOut Info *
*------------------------------------------------------*/
/* Optional flags for ProcOut Interface.*/
-enum POUT_OPTIONAL_IN_FLAGS_{
+enum POUT_OPTIONAL_IN_FLAGS_ {
/* Flags from App to Device */
BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */
BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index 80b7a73a9d4..bbe5119761f 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -175,7 +175,7 @@ struct BC_DEC_YUV_BUFFS {
uint32_t RefCnt;
};
-enum DECOUT_COMPLETION_FLAGS{
+enum DECOUT_COMPLETION_FLAGS {
COMP_FLAG_NO_INFO = 0x00,
COMP_FLAG_FMT_CHANGE = 0x01,
COMP_FLAG_PIB_VALID = 0x02,
@@ -184,7 +184,7 @@ enum DECOUT_COMPLETION_FLAGS{
COMP_FLAG_DATA_BOT = 0x10,
};
-struct BC_DEC_OUT_BUFF{
+struct BC_DEC_OUT_BUFF {
struct BC_DEC_YUV_BUFFS OutPutBuffs;
struct BC_PIC_INFO_BLOCK PibInfo;
uint32_t Flags;
@@ -289,7 +289,7 @@ struct crystalhd_ioctl_data {
struct crystalhd_ioctl_data *next; /* List/Fifo management */
};
-enum crystalhd_kmod_ver{
+enum crystalhd_kmod_ver {
crystalhd_kmod_major = 0,
crystalhd_kmod_minor = 9,
crystalhd_kmod_rev = 27,
diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h
index d2131e7fe2f..1085a91221b 100644
--- a/drivers/staging/crystalhd/bc_dts_types.h
+++ b/drivers/staging/crystalhd/bc_dts_types.h
@@ -25,32 +25,7 @@
#ifndef _BC_DTS_TYPES_H_
#define _BC_DTS_TYPES_H_
-#ifdef __LINUX_USER__ /* Don't include these for KERNEL.. */
#include <stdint.h>
-#endif
-
-#ifndef PVOID
-typedef void *PVOID;
-#endif
-
-#ifndef BOOL
-typedef int BOOL;
-#endif
-
-#if defined(__KERNEL__) || defined(__LINUX_USER__)
-
-#ifdef __LINUX_USER__ /* Don't include these for KERNEL */
-typedef uint32_t ULONG;
-typedef int32_t LONG;
-typedef void *HANDLE;
-#ifndef VOID
-typedef void VOID;
-#endif
-typedef void *LPVOID;
-typedef uint32_t DWORD;
-typedef uint32_t UINT32;
-typedef uint32_t *LPDWORD;
-typedef unsigned char *PUCHAR;
#ifndef TRUE
#define TRUE 1
@@ -62,36 +37,4 @@ typedef unsigned char *PUCHAR;
#define TEXT
-#else
-
-/* For Kernel usage.. */
-#endif
-
-#else
-
-#ifndef uint64_t
-typedef struct _uint64_t {
- uint32_t low_dw;
- uint32_t hi_dw;
-} uint64_t;
#endif
-
-#ifndef int32_t
-typedef signed long int32_t;
-#endif
-
-#ifndef uint32_t
-typedef unsigned long uint32_t;
-#endif
-
-#ifndef uint16_t
-typedef unsigned short uint16_t;
-#endif
-
-#ifndef uint8_t
-typedef unsigned char uint8_t;
-#endif
-#endif
-
-#endif
-
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index 10130296601..f0a2796045c 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -36,7 +36,7 @@
#include "crystalhd_misc.h"
#include "crystalhd_hw.h"
-enum crystalhd_state{
+enum crystalhd_state {
BC_LINK_INVALID = 0x00,
BC_LINK_INIT = 0x01,
BC_LINK_CAP_EN = 0x02,
diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h
index 77560d4b680..9e2831e68bb 100644
--- a/drivers/staging/crystalhd/crystalhd_fw_if.h
+++ b/drivers/staging/crystalhd/crystalhd_fw_if.h
@@ -31,33 +31,33 @@
/* User Data Header */
struct user_data {
- struct user_data *next;
- uint32_t type;
- uint32_t size;
+ struct user_data *next;
+ uint32_t type;
+ uint32_t size;
};
/*------------------------------------------------------*
* MPEG Extension to the PPB *
*------------------------------------------------------*/
struct ppb_mpeg {
- uint32_t to_be_defined;
- uint32_t valid;
+ uint32_t to_be_defined;
+ uint32_t valid;
- /* Always valid, defaults to picture size if no
- sequence display extension in the stream. */
- uint32_t display_horizontal_size;
- uint32_t display_vertical_size;
+ /* Always valid, defaults to picture size if no
+ sequence display extension in the stream. */
+ uint32_t display_horizontal_size;
+ uint32_t display_vertical_size;
- /* MPEG_VALID_PANSCAN
- Offsets are a copy values from the MPEG stream. */
- uint32_t offset_count;
- int32_t horizontal_offset[3];
- int32_t vertical_offset[3];
+ /* MPEG_VALID_PANSCAN
+ Offsets are a copy values from the MPEG stream. */
+ uint32_t offset_count;
+ int32_t horizontal_offset[3];
+ int32_t vertical_offset[3];
- /* MPEG_VALID_USERDATA
- User data is in the form of a linked list. */
- int32_t userDataSize;
- struct user_data *userData;
+ /* MPEG_VALID_USERDATA
+ User data is in the form of a linked list. */
+ int32_t userDataSize;
+ struct user_data *userData;
};
@@ -66,25 +66,25 @@ struct ppb_mpeg {
* VC1 Extension to the PPB *
*------------------------------------------------------*/
struct ppb_vc1 {
- uint32_t to_be_defined;
- uint32_t valid;
-
- /* Always valid, defaults to picture size if no
- sequence display extension in the stream. */
- uint32_t display_horizontal_size;
- uint32_t display_vertical_size;
-
- /* VC1 pan scan windows */
- uint32_t num_panscan_windows;
- int32_t ps_horiz_offset[4];
- int32_t ps_vert_offset[4];
- int32_t ps_width[4];
- int32_t ps_height[4];
-
- /* VC1_VALID_USERDATA
- User data is in the form of a linked list. */
- int32_t userDataSize;
- struct user_data *userData;
+ uint32_t to_be_defined;
+ uint32_t valid;
+
+ /* Always valid, defaults to picture size if no
+ sequence display extension in the stream. */
+ uint32_t display_horizontal_size;
+ uint32_t display_vertical_size;
+
+ /* VC1 pan scan windows */
+ uint32_t num_panscan_windows;
+ int32_t ps_horiz_offset[4];
+ int32_t ps_vert_offset[4];
+ int32_t ps_width[4];
+ int32_t ps_height[4];
+
+ /* VC1_VALID_USERDATA
+ User data is in the form of a linked list. */
+ int32_t userDataSize;
+ struct user_data *userData;
};
@@ -105,145 +105,145 @@ struct ppb_vc1 {
#define MAX_FGT_VALUE_INTERVAL (256)
struct fgt_sei {
- struct fgt_sei *next;
- unsigned char model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE];
- unsigned char upper_bound[3][MAX_FGT_VALUE_INTERVAL];
- unsigned char lower_bound[3][MAX_FGT_VALUE_INTERVAL];
-
- unsigned char cancel_flag; /* Cancel flag: 1 no film grain. */
- unsigned char model_id; /* Model id. */
-
- /* +unused SE based on Thomson spec */
- unsigned char color_desc_flag; /* Separate color descrition flag. */
- unsigned char bit_depth_luma; /* Bit depth luma minus 8. */
- unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */
- unsigned char full_range_flag; /* Full range flag. */
- unsigned char color_primaries; /* Color primaries. */
- unsigned char transfer_charact; /* Transfer characteristics. */
- unsigned char matrix_coeff; /*< Matrix coefficients. */
- /* -unused SE based on Thomson spec */
-
- unsigned char blending_mode_id; /* Blending mode. */
- unsigned char log2_scale_factor; /* Log2 scale factor (2-7). */
- unsigned char comp_flag[3]; /* Components [0,2] parameters present flag. */
- unsigned char num_intervals_minus1[3]; /* Number of intensity level intervals. */
- unsigned char num_model_values[3]; /* Number of model values. */
- uint16_t repetition_period; /* Repetition period (0-16384) */
+ struct fgt_sei *next;
+ unsigned char model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE];
+ unsigned char upper_bound[3][MAX_FGT_VALUE_INTERVAL];
+ unsigned char lower_bound[3][MAX_FGT_VALUE_INTERVAL];
+
+ unsigned char cancel_flag; /* Cancel flag: 1 no film grain. */
+ unsigned char model_id; /* Model id. */
+
+ /* +unused SE based on Thomson spec */
+ unsigned char color_desc_flag; /* Separate color descrition flag. */
+ unsigned char bit_depth_luma; /* Bit depth luma minus 8. */
+ unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */
+ unsigned char full_range_flag; /* Full range flag. */
+ unsigned char color_primaries; /* Color primaries. */
+ unsigned char transfer_charact; /* Transfer characteristics. */
+ unsigned char matrix_coeff; /*< Matrix coefficients. */
+ /* -unused SE based on Thomson spec */
+
+ unsigned char blending_mode_id; /* Blending mode. */
+ unsigned char log2_scale_factor; /* Log2 scale factor (2-7). */
+ unsigned char comp_flag[3]; /* Components [0,2] parameters present flag. */
+ unsigned char num_intervals_minus1[3]; /* Number of intensity level intervals. */
+ unsigned char num_model_values[3]; /* Number of model values. */
+ uint16_t repetition_period; /* Repetition period (0-16384) */
};
struct ppb_h264 {
- /* 'valid' specifies which fields (or sets of
- * fields) below are valid. If the corresponding
- * bit in 'valid' is NOT set then that field(s)
- * is (are) not initialized. */
- uint32_t valid;
-
- int32_t poc_top; /* POC for Top Field/Frame */
- int32_t poc_bottom; /* POC for Bottom Field */
- uint32_t idr_pic_id;
-
- /* H264_VALID_PANSCAN */
- uint32_t pan_scan_count;
- int32_t pan_scan_left[3];
- int32_t pan_scan_right[3];
- int32_t pan_scan_top[3];
- int32_t pan_scan_bottom[3];
-
- /* H264_VALID_CT_TYPE */
- uint32_t ct_type_count;
- uint32_t ct_type[3];
-
- /* H264_VALID_SPS_CROP */
- int32_t sps_crop_left;
- int32_t sps_crop_right;
- int32_t sps_crop_top;
- int32_t sps_crop_bottom;
-
- /* H264_VALID_VUI */
- uint32_t chroma_top;
- uint32_t chroma_bottom;
-
- /* H264_VALID_USER */
- uint32_t user_data_size;
- struct user_data *user_data;
-
- /* H264 VALID FGT */
- struct fgt_sei *pfgt;
+ /* 'valid' specifies which fields (or sets of
+ * fields) below are valid. If the corresponding
+ * bit in 'valid' is NOT set then that field(s)
+ * is (are) not initialized. */
+ uint32_t valid;
+
+ int32_t poc_top; /* POC for Top Field/Frame */
+ int32_t poc_bottom; /* POC for Bottom Field */
+ uint32_t idr_pic_id;
+
+ /* H264_VALID_PANSCAN */
+ uint32_t pan_scan_count;
+ int32_t pan_scan_left[3];
+ int32_t pan_scan_right[3];
+ int32_t pan_scan_top[3];
+ int32_t pan_scan_bottom[3];
+
+ /* H264_VALID_CT_TYPE */
+ uint32_t ct_type_count;
+ uint32_t ct_type[3];
+
+ /* H264_VALID_SPS_CROP */
+ int32_t sps_crop_left;
+ int32_t sps_crop_right;
+ int32_t sps_crop_top;
+ int32_t sps_crop_bottom;
+
+ /* H264_VALID_VUI */
+ uint32_t chroma_top;
+ uint32_t chroma_bottom;
+
+ /* H264_VALID_USER */
+ uint32_t user_data_size;
+ struct user_data *user_data;
+
+ /* H264 VALID FGT */
+ struct fgt_sei *pfgt;
};
struct ppb {
- /* Common fields. */
- uint32_t picture_number; /* Ordinal display number */
- uint32_t video_buffer; /* Video (picbuf) number */
- uint32_t video_address; /* Address of picbuf Y */
- uint32_t video_address_uv; /* Address of picbuf UV */
- uint32_t video_stripe; /* Picbuf stripe */
- uint32_t video_width; /* Picbuf width */
- uint32_t video_height; /* Picbuf height */
-
- uint32_t channel_id; /* Decoder channel ID */
- uint32_t status; /* reserved */
- uint32_t width; /* pixels */
- uint32_t height; /* pixels */
- uint32_t chroma_format; /* see above */
- uint32_t pulldown; /* see above */
- uint32_t flags; /* see above */
- uint32_t pts; /* 32 LSBs of PTS */
- uint32_t protocol; /* protocolXXX (above) */
-
- uint32_t frame_rate; /* see above */
- uint32_t matrix_coeff; /* see above */
- uint32_t aspect_ratio; /* see above */
- uint32_t colour_primaries; /* see above */
- uint32_t transfer_char; /* see above */
- uint32_t pcr_offset; /* 45kHz if PCR type; else 27MHz */
- uint32_t n_drop; /* Number of pictures to be dropped */
-
- uint32_t custom_aspect_ratio_width_height;
- /* upper 16-bits is Y and lower 16-bits is X */
-
- uint32_t picture_tag; /* Indexing tag from BUD packets */
- uint32_t picture_done_payload;
- uint32_t picture_meta_payload;
- uint32_t reserved[1];
-
- /* Protocol-specific extensions. */
- union {
- struct ppb_h264 h264;
- struct ppb_mpeg mpeg;
- struct ppb_vc1 vc1;
- } other;
+ /* Common fields. */
+ uint32_t picture_number; /* Ordinal display number */
+ uint32_t video_buffer; /* Video (picbuf) number */
+ uint32_t video_address; /* Address of picbuf Y */
+ uint32_t video_address_uv; /* Address of picbuf UV */
+ uint32_t video_stripe; /* Picbuf stripe */
+ uint32_t video_width; /* Picbuf width */
+ uint32_t video_height; /* Picbuf height */
+
+ uint32_t channel_id; /* Decoder channel ID */
+ uint32_t status; /* reserved */
+ uint32_t width; /* pixels */
+ uint32_t height; /* pixels */
+ uint32_t chroma_format; /* see above */
+ uint32_t pulldown; /* see above */
+ uint32_t flags; /* see above */
+ uint32_t pts; /* 32 LSBs of PTS */
+ uint32_t protocol; /* protocolXXX (above) */
+
+ uint32_t frame_rate; /* see above */
+ uint32_t matrix_coeff; /* see above */
+ uint32_t aspect_ratio; /* see above */
+ uint32_t colour_primaries; /* see above */
+ uint32_t transfer_char; /* see above */
+ uint32_t pcr_offset; /* 45kHz if PCR type; else 27MHz */
+ uint32_t n_drop; /* Number of pictures to be dropped */
+
+ uint32_t custom_aspect_ratio_width_height;
+ /* upper 16-bits is Y and lower 16-bits is X */
+
+ uint32_t picture_tag; /* Indexing tag from BUD packets */
+ uint32_t picture_done_payload;
+ uint32_t picture_meta_payload;
+ uint32_t reserved[1];
+
+ /* Protocol-specific extensions. */
+ union {
+ struct ppb_h264 h264;
+ struct ppb_mpeg mpeg;
+ struct ppb_vc1 vc1;
+ } other;
};
struct c011_pib {
- uint32_t bFormatChange;
- uint32_t resolution;
- uint32_t channelId;
- uint32_t ppbPtr;
- int32_t ptsStcOffset;
- uint32_t zeroPanscanValid;
- uint32_t dramOutBufAddr;
- uint32_t yComponent;
- struct ppb ppb;
+ uint32_t bFormatChange;
+ uint32_t resolution;
+ uint32_t channelId;
+ uint32_t ppbPtr;
+ int32_t ptsStcOffset;
+ uint32_t zeroPanscanValid;
+ uint32_t dramOutBufAddr;
+ uint32_t yComponent;
+ struct ppb ppb;
};
struct dec_rsp_channel_start_video {
- uint32_t command;
- uint32_t sequence;
- uint32_t status;
- uint32_t picBuf;
- uint32_t picRelBuf;
- uint32_t picInfoDeliveryQ;
- uint32_t picInfoReleaseQ;
- uint32_t channelStatus;
- uint32_t userDataDeliveryQ;
- uint32_t userDataReleaseQ;
- uint32_t transportStreamCaptureAddr;
- uint32_t asyncEventQ;
+ uint32_t command;
+ uint32_t sequence;
+ uint32_t status;
+ uint32_t picBuf;
+ uint32_t picRelBuf;
+ uint32_t picInfoDeliveryQ;
+ uint32_t picInfoReleaseQ;
+ uint32_t channelStatus;
+ uint32_t userDataDeliveryQ;
+ uint32_t userDataReleaseQ;
+ uint32_t transportStreamCaptureAddr;
+ uint32_t asyncEventQ;
};
@@ -251,112 +251,112 @@ struct dec_rsp_channel_start_video {
/* host commands */
enum c011_ts_cmd {
- eCMD_TS_GET_NEXT_PIC = 0x7376F100, /* debug get next picture */
- eCMD_TS_GET_LAST_PIC = 0x7376F102, /* debug get last pic status */
- eCMD_TS_READ_WRITE_MEM = 0x7376F104, /* debug read write memory */
-
- /* New API commands */
- /* General commands */
- eCMD_C011_INIT = eCMD_C011_CMD_BASE + 0x01,
- eCMD_C011_RESET = eCMD_C011_CMD_BASE + 0x02,
- eCMD_C011_SELF_TEST = eCMD_C011_CMD_BASE + 0x03,
- eCMD_C011_GET_VERSION = eCMD_C011_CMD_BASE + 0x04,
- eCMD_C011_GPIO = eCMD_C011_CMD_BASE + 0x05,
- eCMD_C011_DEBUG_SETUP = eCMD_C011_CMD_BASE + 0x06,
-
- /* Decoding commands */
- eCMD_C011_DEC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x100,
- eCMD_C011_DEC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x101,
- eCMD_C011_DEC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x102,
- eCMD_C011_DEC_CHAN_STATUS = eCMD_C011_CMD_BASE + 0x103,
- eCMD_C011_DEC_CHAN_FLUSH = eCMD_C011_CMD_BASE + 0x104,
- eCMD_C011_DEC_CHAN_TRICK_PLAY = eCMD_C011_CMD_BASE + 0x105,
- eCMD_C011_DEC_CHAN_TS_PIDS = eCMD_C011_CMD_BASE + 0x106,
- eCMD_C011_DEC_CHAN_PS_STREAM_ID = eCMD_C011_CMD_BASE + 0x107,
- eCMD_C011_DEC_CHAN_INPUT_PARAMS = eCMD_C011_CMD_BASE + 0x108,
- eCMD_C011_DEC_CHAN_VIDEO_OUTPUT = eCMD_C011_CMD_BASE + 0x109,
- eCMD_C011_DEC_CHAN_OUTPUT_FORMAT = eCMD_C011_CMD_BASE + 0x10A,
- eCMD_C011_DEC_CHAN_SCALING_FILTERS = eCMD_C011_CMD_BASE + 0x10B,
- eCMD_C011_DEC_CHAN_OSD_MODE = eCMD_C011_CMD_BASE + 0x10D,
- eCMD_C011_DEC_CHAN_DROP = eCMD_C011_CMD_BASE + 0x10E,
- eCMD_C011_DEC_CHAN_RELEASE = eCMD_C011_CMD_BASE + 0x10F,
- eCMD_C011_DEC_CHAN_STREAM_SETTINGS = eCMD_C011_CMD_BASE + 0x110,
- eCMD_C011_DEC_CHAN_PAUSE_OUTPUT = eCMD_C011_CMD_BASE + 0x111,
- eCMD_C011_DEC_CHAN_CHANGE = eCMD_C011_CMD_BASE + 0x112,
- eCMD_C011_DEC_CHAN_SET_STC = eCMD_C011_CMD_BASE + 0x113,
- eCMD_C011_DEC_CHAN_SET_PTS = eCMD_C011_CMD_BASE + 0x114,
- eCMD_C011_DEC_CHAN_CC_MODE = eCMD_C011_CMD_BASE + 0x115,
- eCMD_C011_DEC_CREATE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x116,
- eCMD_C011_DEC_COPY_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x117,
- eCMD_C011_DEC_DELETE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x118,
- eCMD_C011_DEC_CHAN_SET_DECYPTION = eCMD_C011_CMD_BASE + 0x119,
- eCMD_C011_DEC_CHAN_START_VIDEO = eCMD_C011_CMD_BASE + 0x11A,
- eCMD_C011_DEC_CHAN_STOP_VIDEO = eCMD_C011_CMD_BASE + 0x11B,
- eCMD_C011_DEC_CHAN_PIC_CAPTURE = eCMD_C011_CMD_BASE + 0x11C,
- eCMD_C011_DEC_CHAN_PAUSE = eCMD_C011_CMD_BASE + 0x11D,
- eCMD_C011_DEC_CHAN_PAUSE_STATE = eCMD_C011_CMD_BASE + 0x11E,
- eCMD_C011_DEC_CHAN_SET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x11F,
- eCMD_C011_DEC_CHAN_GET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x120,
- eCMD_C011_DEC_CHAN_SET_FF_RATE = eCMD_C011_CMD_BASE + 0x121,
- eCMD_C011_DEC_CHAN_GET_FF_RATE = eCMD_C011_CMD_BASE + 0x122,
- eCMD_C011_DEC_CHAN_FRAME_ADVANCE = eCMD_C011_CMD_BASE + 0x123,
- eCMD_C011_DEC_CHAN_SET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x124,
- eCMD_C011_DEC_CHAN_GET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x125,
- eCMD_C011_DEC_CHAN_FILL_PIC_BUF = eCMD_C011_CMD_BASE + 0x126,
- eCMD_C011_DEC_CHAN_SET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x127,
- eCMD_C011_DEC_CHAN_GET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x128,
- eCMD_C011_DEC_CHAN_SET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x129,
- eCMD_C011_DEC_CHAN_GET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x12A,
- eCMD_C011_DEC_CHAN_REVERSE_FIELD_STATUS = eCMD_C011_CMD_BASE + 0x12B,
- eCMD_C011_DEC_CHAN_I_PICTURE_FOUND = eCMD_C011_CMD_BASE + 0x12C,
- eCMD_C011_DEC_CHAN_SET_PARAMETER = eCMD_C011_CMD_BASE + 0x12D,
- eCMD_C011_DEC_CHAN_SET_USER_DATA_MODE = eCMD_C011_CMD_BASE + 0x12E,
- eCMD_C011_DEC_CHAN_SET_PAUSE_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x12F,
- eCMD_C011_DEC_CHAN_SET_SLOW_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x130,
- eCMD_C011_DEC_CHAN_SET_FF_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x131,
- eCMD_C011_DEC_CHAN_SET_DISPLAY_TIMING_MODE = eCMD_C011_CMD_BASE + 0x132,
- eCMD_C011_DEC_CHAN_SET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x133,
- eCMD_C011_DEC_CHAN_GET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x134,
- eCMD_C011_DEC_CHAN_SET_REVERSE_FIELD = eCMD_C011_CMD_BASE + 0x135,
- eCMD_C011_DEC_CHAN_STREAM_OPEN = eCMD_C011_CMD_BASE + 0x136,
- eCMD_C011_DEC_CHAN_SET_PCR_PID = eCMD_C011_CMD_BASE + 0x137,
- eCMD_C011_DEC_CHAN_SET_VID_PID = eCMD_C011_CMD_BASE + 0x138,
- eCMD_C011_DEC_CHAN_SET_PAN_SCAN_MODE = eCMD_C011_CMD_BASE + 0x139,
- eCMD_C011_DEC_CHAN_START_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x140,
- eCMD_C011_DEC_CHAN_STOP_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x141,
- eCMD_C011_DEC_CHAN_SET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x142,
- eCMD_C011_DEC_CHAN_GET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x143,
- eCMD_C011_DEC_CHAN_SET_HOST_TRICK_MODE = eCMD_C011_CMD_BASE + 0x144,
- eCMD_C011_DEC_CHAN_SET_OPERATION_MODE = eCMD_C011_CMD_BASE + 0x145,
- eCMD_C011_DEC_CHAN_DISPLAY_PAUSE_UNTO_PTS = eCMD_C011_CMD_BASE + 0x146,
- eCMD_C011_DEC_CHAN_SET_PTS_STC_DIFF_THRESHOLD = eCMD_C011_CMD_BASE + 0x147,
- eCMD_C011_DEC_CHAN_SEND_COMPRESSED_BUF = eCMD_C011_CMD_BASE + 0x148,
- eCMD_C011_DEC_CHAN_SET_CLIPPING = eCMD_C011_CMD_BASE + 0x149,
- eCMD_C011_DEC_CHAN_SET_PARAMETERS_FOR_HARD_RESET_INTERRUPT_TO_HOST
- = eCMD_C011_CMD_BASE + 0x150,
-
- /* Decoder RevD commands */
- eCMD_C011_DEC_CHAN_SET_CSC = eCMD_C011_CMD_BASE + 0x180, /* color space conversion */
- eCMD_C011_DEC_CHAN_SET_RANGE_REMAP = eCMD_C011_CMD_BASE + 0x181,
- eCMD_C011_DEC_CHAN_SET_FGT = eCMD_C011_CMD_BASE + 0x182,
- /* Note: 0x183 not implemented yet in Rev D main */
- eCMD_C011_DEC_CHAN_SET_LASTPICTURE_PADDING = eCMD_C011_CMD_BASE + 0x183,
-
- /* Decoder 7412 commands (7412-only) */
- eCMD_C011_DEC_CHAN_SET_CONTENT_KEY = eCMD_C011_CMD_BASE + 0x190,
- eCMD_C011_DEC_CHAN_SET_SESSION_KEY = eCMD_C011_CMD_BASE + 0x191,
- eCMD_C011_DEC_CHAN_FMT_CHANGE_ACK = eCMD_C011_CMD_BASE + 0x192,
-
- eCMD_C011_DEC_CHAN_CUSTOM_VIDOUT = eCMD_C011_CMD_BASE + 0x1FF,
-
- /* Encoding commands */
- eCMD_C011_ENC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x200,
- eCMD_C011_ENC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x201,
- eCMD_C011_ENC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x202,
- eCMD_C011_ENC_CHAN_CONTROL = eCMD_C011_CMD_BASE + 0x203,
- eCMD_C011_ENC_CHAN_STATISTICS = eCMD_C011_CMD_BASE + 0x204,
-
- eNOTIFY_C011_ENC_CHAN_EVENT = eCMD_C011_CMD_BASE + 0x210,
+ eCMD_TS_GET_NEXT_PIC = 0x7376F100, /* debug get next picture */
+ eCMD_TS_GET_LAST_PIC = 0x7376F102, /* debug get last pic status */
+ eCMD_TS_READ_WRITE_MEM = 0x7376F104, /* debug read write memory */
+
+ /* New API commands */
+ /* General commands */
+ eCMD_C011_INIT = eCMD_C011_CMD_BASE + 0x01,
+ eCMD_C011_RESET = eCMD_C011_CMD_BASE + 0x02,
+ eCMD_C011_SELF_TEST = eCMD_C011_CMD_BASE + 0x03,
+ eCMD_C011_GET_VERSION = eCMD_C011_CMD_BASE + 0x04,
+ eCMD_C011_GPIO = eCMD_C011_CMD_BASE + 0x05,
+ eCMD_C011_DEBUG_SETUP = eCMD_C011_CMD_BASE + 0x06,
+
+ /* Decoding commands */
+ eCMD_C011_DEC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x100,
+ eCMD_C011_DEC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x101,
+ eCMD_C011_DEC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x102,
+ eCMD_C011_DEC_CHAN_STATUS = eCMD_C011_CMD_BASE + 0x103,
+ eCMD_C011_DEC_CHAN_FLUSH = eCMD_C011_CMD_BASE + 0x104,
+ eCMD_C011_DEC_CHAN_TRICK_PLAY = eCMD_C011_CMD_BASE + 0x105,
+ eCMD_C011_DEC_CHAN_TS_PIDS = eCMD_C011_CMD_BASE + 0x106,
+ eCMD_C011_DEC_CHAN_PS_STREAM_ID = eCMD_C011_CMD_BASE + 0x107,
+ eCMD_C011_DEC_CHAN_INPUT_PARAMS = eCMD_C011_CMD_BASE + 0x108,
+ eCMD_C011_DEC_CHAN_VIDEO_OUTPUT = eCMD_C011_CMD_BASE + 0x109,
+ eCMD_C011_DEC_CHAN_OUTPUT_FORMAT = eCMD_C011_CMD_BASE + 0x10A,
+ eCMD_C011_DEC_CHAN_SCALING_FILTERS = eCMD_C011_CMD_BASE + 0x10B,
+ eCMD_C011_DEC_CHAN_OSD_MODE = eCMD_C011_CMD_BASE + 0x10D,
+ eCMD_C011_DEC_CHAN_DROP = eCMD_C011_CMD_BASE + 0x10E,
+ eCMD_C011_DEC_CHAN_RELEASE = eCMD_C011_CMD_BASE + 0x10F,
+ eCMD_C011_DEC_CHAN_STREAM_SETTINGS = eCMD_C011_CMD_BASE + 0x110,
+ eCMD_C011_DEC_CHAN_PAUSE_OUTPUT = eCMD_C011_CMD_BASE + 0x111,
+ eCMD_C011_DEC_CHAN_CHANGE = eCMD_C011_CMD_BASE + 0x112,
+ eCMD_C011_DEC_CHAN_SET_STC = eCMD_C011_CMD_BASE + 0x113,
+ eCMD_C011_DEC_CHAN_SET_PTS = eCMD_C011_CMD_BASE + 0x114,
+ eCMD_C011_DEC_CHAN_CC_MODE = eCMD_C011_CMD_BASE + 0x115,
+ eCMD_C011_DEC_CREATE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x116,
+ eCMD_C011_DEC_COPY_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x117,
+ eCMD_C011_DEC_DELETE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x118,
+ eCMD_C011_DEC_CHAN_SET_DECYPTION = eCMD_C011_CMD_BASE + 0x119,
+ eCMD_C011_DEC_CHAN_START_VIDEO = eCMD_C011_CMD_BASE + 0x11A,
+ eCMD_C011_DEC_CHAN_STOP_VIDEO = eCMD_C011_CMD_BASE + 0x11B,
+ eCMD_C011_DEC_CHAN_PIC_CAPTURE = eCMD_C011_CMD_BASE + 0x11C,
+ eCMD_C011_DEC_CHAN_PAUSE = eCMD_C011_CMD_BASE + 0x11D,
+ eCMD_C011_DEC_CHAN_PAUSE_STATE = eCMD_C011_CMD_BASE + 0x11E,
+ eCMD_C011_DEC_CHAN_SET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x11F,
+ eCMD_C011_DEC_CHAN_GET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x120,
+ eCMD_C011_DEC_CHAN_SET_FF_RATE = eCMD_C011_CMD_BASE + 0x121,
+ eCMD_C011_DEC_CHAN_GET_FF_RATE = eCMD_C011_CMD_BASE + 0x122,
+ eCMD_C011_DEC_CHAN_FRAME_ADVANCE = eCMD_C011_CMD_BASE + 0x123,
+ eCMD_C011_DEC_CHAN_SET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x124,
+ eCMD_C011_DEC_CHAN_GET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x125,
+ eCMD_C011_DEC_CHAN_FILL_PIC_BUF = eCMD_C011_CMD_BASE + 0x126,
+ eCMD_C011_DEC_CHAN_SET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x127,
+ eCMD_C011_DEC_CHAN_GET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x128,
+ eCMD_C011_DEC_CHAN_SET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x129,
+ eCMD_C011_DEC_CHAN_GET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x12A,
+ eCMD_C011_DEC_CHAN_REVERSE_FIELD_STATUS = eCMD_C011_CMD_BASE + 0x12B,
+ eCMD_C011_DEC_CHAN_I_PICTURE_FOUND = eCMD_C011_CMD_BASE + 0x12C,
+ eCMD_C011_DEC_CHAN_SET_PARAMETER = eCMD_C011_CMD_BASE + 0x12D,
+ eCMD_C011_DEC_CHAN_SET_USER_DATA_MODE = eCMD_C011_CMD_BASE + 0x12E,
+ eCMD_C011_DEC_CHAN_SET_PAUSE_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x12F,
+ eCMD_C011_DEC_CHAN_SET_SLOW_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x130,
+ eCMD_C011_DEC_CHAN_SET_FF_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x131,
+ eCMD_C011_DEC_CHAN_SET_DISPLAY_TIMING_MODE = eCMD_C011_CMD_BASE + 0x132,
+ eCMD_C011_DEC_CHAN_SET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x133,
+ eCMD_C011_DEC_CHAN_GET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x134,
+ eCMD_C011_DEC_CHAN_SET_REVERSE_FIELD = eCMD_C011_CMD_BASE + 0x135,
+ eCMD_C011_DEC_CHAN_STREAM_OPEN = eCMD_C011_CMD_BASE + 0x136,
+ eCMD_C011_DEC_CHAN_SET_PCR_PID = eCMD_C011_CMD_BASE + 0x137,
+ eCMD_C011_DEC_CHAN_SET_VID_PID = eCMD_C011_CMD_BASE + 0x138,
+ eCMD_C011_DEC_CHAN_SET_PAN_SCAN_MODE = eCMD_C011_CMD_BASE + 0x139,
+ eCMD_C011_DEC_CHAN_START_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x140,
+ eCMD_C011_DEC_CHAN_STOP_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x141,
+ eCMD_C011_DEC_CHAN_SET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x142,
+ eCMD_C011_DEC_CHAN_GET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x143,
+ eCMD_C011_DEC_CHAN_SET_HOST_TRICK_MODE = eCMD_C011_CMD_BASE + 0x144,
+ eCMD_C011_DEC_CHAN_SET_OPERATION_MODE = eCMD_C011_CMD_BASE + 0x145,
+ eCMD_C011_DEC_CHAN_DISPLAY_PAUSE_UNTO_PTS = eCMD_C011_CMD_BASE + 0x146,
+ eCMD_C011_DEC_CHAN_SET_PTS_STC_DIFF_THRESHOLD = eCMD_C011_CMD_BASE + 0x147,
+ eCMD_C011_DEC_CHAN_SEND_COMPRESSED_BUF = eCMD_C011_CMD_BASE + 0x148,
+ eCMD_C011_DEC_CHAN_SET_CLIPPING = eCMD_C011_CMD_BASE + 0x149,
+ eCMD_C011_DEC_CHAN_SET_PARAMETERS_FOR_HARD_RESET_INTERRUPT_TO_HOST
+ = eCMD_C011_CMD_BASE + 0x150,
+
+ /* Decoder RevD commands */
+ eCMD_C011_DEC_CHAN_SET_CSC = eCMD_C011_CMD_BASE + 0x180, /* color space conversion */
+ eCMD_C011_DEC_CHAN_SET_RANGE_REMAP = eCMD_C011_CMD_BASE + 0x181,
+ eCMD_C011_DEC_CHAN_SET_FGT = eCMD_C011_CMD_BASE + 0x182,
+ /* Note: 0x183 not implemented yet in Rev D main */
+ eCMD_C011_DEC_CHAN_SET_LASTPICTURE_PADDING = eCMD_C011_CMD_BASE + 0x183,
+
+ /* Decoder 7412 commands (7412-only) */
+ eCMD_C011_DEC_CHAN_SET_CONTENT_KEY = eCMD_C011_CMD_BASE + 0x190,
+ eCMD_C011_DEC_CHAN_SET_SESSION_KEY = eCMD_C011_CMD_BASE + 0x191,
+ eCMD_C011_DEC_CHAN_FMT_CHANGE_ACK = eCMD_C011_CMD_BASE + 0x192,
+
+ eCMD_C011_DEC_CHAN_CUSTOM_VIDOUT = eCMD_C011_CMD_BASE + 0x1FF,
+
+ /* Encoding commands */
+ eCMD_C011_ENC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x200,
+ eCMD_C011_ENC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x201,
+ eCMD_C011_ENC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x202,
+ eCMD_C011_ENC_CHAN_CONTROL = eCMD_C011_CMD_BASE + 0x203,
+ eCMD_C011_ENC_CHAN_STATISTICS = eCMD_C011_CMD_BASE + 0x204,
+
+ eNOTIFY_C011_ENC_CHAN_EVENT = eCMD_C011_CMD_BASE + 0x210,
};
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 382078eafa0..4d617235742 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -205,12 +205,12 @@ enum _chd_log_levels {
#define BCMLOG_ENTER \
if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
- printk("Entered %s\n", __func__); \
+ printk(KERN_DEBUG "Entered %s\n", __func__); \
}
#define BCMLOG_LEAVE \
if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
- printk("Leaving %s\n", __func__); \
+ printk(KERN_DEBUG "Leaving %s\n", __func__); \
}
#define BCMLOG(trace, fmt, args...) \
@@ -221,7 +221,7 @@ if (g_linklog_level & trace) { \
#define BCMLOG_ERR(fmt, args...) \
do { \
if (g_linklog_level & BCMLOG_ERROR) { \
- printk("*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \
+ printk(KERN_ERR "*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \
} \
} while (0);
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index ebdba7c65bc..09e99de5fd2 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -708,7 +708,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
chip->irq = dev->pci->irq;
err = request_irq(dev->pci->irq, cx25821_irq,
- IRQF_SHARED | IRQF_DISABLED, chip->dev->name, chip);
+ IRQF_SHARED, chip->dev->name, chip);
if (err < 0) {
pr_err("ERROR %s: can't get IRQ %d for ALSA\n",
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.c b/drivers/staging/cx25821/cx25821-audio-upstream.c
index 0f9ca777bd4..c20d6dece15 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.c
+++ b/drivers/staging/cx25821/cx25821-audio-upstream.c
@@ -107,7 +107,7 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
{
unsigned int line;
struct sram_channel *sram_ch =
- dev->channels[dev->_audio_upstream_channel_select].sram_channels;
+ dev->channels[dev->_audio_upstream_channel].sram_channels;
int offset = 0;
/* scan lines */
@@ -349,10 +349,8 @@ static void cx25821_audioups_handler(struct work_struct *work)
return;
}
- cx25821_get_audio_data(dev,
- dev->channels[dev->
- _audio_upstream_channel_select].
- sram_channels);
+ cx25821_get_audio_data(dev, dev->channels[dev->_audio_upstream_channel].
+ sram_channels);
}
int cx25821_openfile_audio(struct cx25821_dev *dev,
@@ -540,13 +538,9 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
AUDIO_RISC_DMA_BUF_SIZE;
rp = cx25821_risc_field_upstream_audio(dev,
- dev->
- _risc_virt_start_addr
- + 1,
- dev->
- _audiodata_buf_phys_addr,
- AUDIO_LINE_SIZE,
- FIFO_DISABLE);
+ dev->_risc_virt_start_addr + 1,
+ dev->_audiodata_buf_phys_addr,
+ AUDIO_LINE_SIZE, FIFO_DISABLE);
if (USE_RISC_NOOP_AUDIO) {
for (i = 0; i < NUM_NO_OPS; i++) {
@@ -555,8 +549,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
}
}
/* Jump to 2nd Audio Frame */
- *(rp++) =
- cpu_to_le32(RISC_JUMP | RISC_IRQ1 |
+ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 |
RISC_CNT_RESET);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
@@ -604,18 +597,15 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
if (!dev)
return -1;
- sram_ch = dev->channels[dev->_audio_upstream_channel_select].sram_channels;
+ sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels;
msk_stat = cx_read(sram_ch->int_mstat);
audio_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
if (audio_status) {
- handled =
- cx25821_audio_upstream_irq(dev,
- dev->
- _audio_upstream_channel_select,
- audio_status);
+ handled = cx25821_audio_upstream_irq(dev,
+ dev->_audio_upstream_channel, audio_status);
}
if (handled < 0)
@@ -690,7 +680,7 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
err =
request_irq(dev->pci->irq, cx25821_upstream_irq_audio,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get upstream IRQ %d\n",
dev->name, dev->pci->irq);
@@ -726,7 +716,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
return 0;
}
- dev->_audio_upstream_channel_select = channel_select;
+ dev->_audio_upstream_channel = channel_select;
sram_ch = dev->channels[channel_select].sram_channels;
/* Work queue */
@@ -770,9 +760,8 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
memcpy(dev->_audiofilename, _defaultAudioName, str_length + 1);
}
- retval =
- cx25821_sram_channel_setup_upstream_audio(dev, sram_ch, _line_size,
- 0);
+ retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
+ _line_size, 0);
dev->audio_upstream_riscbuf_size =
AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
@@ -780,8 +769,8 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
/* Allocating buffers and prepare RISC program */
- retval =
- cx25821_audio_upstream_buffer_prepare(dev, sram_ch, _line_size);
+ retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
+ _line_size);
if (retval < 0) {
pr_err("%s: Failed to set up Audio upstream buffers!\n",
dev->name);
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.h b/drivers/staging/cx25821/cx25821-audio-upstream.h
index 668a4f11e80..af2ae7c5815 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.h
+++ b/drivers/staging/cx25821/cx25821-audio-upstream.h
@@ -46,11 +46,16 @@
#define USE_RISC_NOOP_AUDIO 1
#ifdef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE (LINES_PER_AUDIO_BUFFER*RISC_READ_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
+#define AUDIO_RISC_DMA_BUF_SIZE \
+ (LINES_PER_AUDIO_BUFFER * RISC_READ_INSTRUCTION_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS * DWORD_SIZE + \
+ RISC_JUMP_INSTRUCTION_SIZE)
#endif
#ifndef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE (LINES_PER_AUDIO_BUFFER*RISC_READ_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
+#define AUDIO_RISC_DMA_BUF_SIZE \
+ (LINES_PER_AUDIO_BUFFER * RISC_READ_INSTRUCTION_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
#endif
static int _line_size;
diff --git a/drivers/staging/cx25821/cx25821-audio.h b/drivers/staging/cx25821/cx25821-audio.h
index 27717253227..8eb55b7b88c 100644
--- a/drivers/staging/cx25821/cx25821-audio.h
+++ b/drivers/staging/cx25821/cx25821-audio.h
@@ -36,13 +36,15 @@
*/
#ifndef USE_RISC_NOOP
#define MAX_BUFFER_PROGRAM_SIZE \
- (2*LINES_PER_BUFFER*RISC_WRITE_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE*4)
+ (2 * LINES_PER_BUFFER * RISC_WRITE_INSTRUCTION_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE * 4)
#endif
/* MAE 12 July 2005 Try to use NOOP RISC instruction instead */
#ifdef USE_RISC_NOOP
#define MAX_BUFFER_PROGRAM_SIZE \
- (2*LINES_PER_BUFFER*RISC_WRITE_INSTRUCTION_SIZE + RISC_NOOP_INSTRUCTION_SIZE*4)
+ (2 * LINES_PER_BUFFER * RISC_WRITE_INSTRUCTION_SIZE + \
+ RISC_NOOP_INSTRUCTION_SIZE * 4)
#endif
/* Sizes of various instructions in bytes. Used when adding instructions. */
diff --git a/drivers/staging/cx25821/cx25821-cards.c b/drivers/staging/cx25821/cx25821-cards.c
index 94e8d685c88..6ace60313b4 100644
--- a/drivers/staging/cx25821/cx25821-cards.c
+++ b/drivers/staging/cx25821/cx25821-cards.c
@@ -36,17 +36,17 @@
struct cx25821_board cx25821_boards[] = {
[UNKNOWN_BOARD] = {
- .name = "UNKNOWN/GENERIC",
- /* Ensure safe default for unknown boards */
- .clk_freq = 0,
- },
+ .name = "UNKNOWN/GENERIC",
+ /* Ensure safe default for unknown boards */
+ .clk_freq = 0,
+ },
[CX25821_BOARD] = {
- .name = "CX25821",
- .portb = CX25821_RAW,
- .portc = CX25821_264,
- .input[0].type = CX25821_VMUX_COMPOSITE,
- },
+ .name = "CX25821",
+ .portb = CX25821_RAW,
+ .portc = CX25821_264,
+ .input[0].type = CX25821_VMUX_COMPOSITE,
+ },
};
@@ -54,10 +54,10 @@ const unsigned int cx25821_bcount = ARRAY_SIZE(cx25821_boards);
struct cx25821_subid cx25821_subids[] = {
{
- .subvendor = 0x14f1,
- .subdevice = 0x0920,
- .card = CX25821_BOARD,
- },
+ .subvendor = 0x14f1,
+ .subdevice = 0x0920,
+ .card = CX25821_BOARD,
+ },
};
void cx25821_card_setup(struct cx25821_dev *dev)
diff --git a/drivers/staging/cx25821/cx25821-core.c b/drivers/staging/cx25821/cx25821-core.c
index 523ac5e16c1..a7fa38f9594 100644
--- a/drivers/staging/cx25821/cx25821-core.c
+++ b/drivers/staging/cx25821/cx25821-core.c
@@ -50,270 +50,270 @@ EXPORT_SYMBOL(cx25821_devlist);
struct sram_channel cx25821_sram_channels[] = {
[SRAM_CH00] = {
- .i = SRAM_CH00,
- .name = "VID A",
- .cmds_start = VID_A_DOWN_CMDS,
- .ctrl_start = VID_A_IQ,
- .cdt = VID_A_CDT,
- .fifo_start = VID_A_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA1_PTR1,
- .ptr2_reg = DMA1_PTR2,
- .cnt1_reg = DMA1_CNT1,
- .cnt2_reg = DMA1_CNT2,
- .int_msk = VID_A_INT_MSK,
- .int_stat = VID_A_INT_STAT,
- .int_mstat = VID_A_INT_MSTAT,
- .dma_ctl = VID_DST_A_DMA_CTL,
- .gpcnt_ctl = VID_DST_A_GPCNT_CTL,
- .gpcnt = VID_DST_A_GPCNT,
- .vip_ctl = VID_DST_A_VIP_CTL,
- .pix_frmt = VID_DST_A_PIX_FRMT,
- },
+ .i = SRAM_CH00,
+ .name = "VID A",
+ .cmds_start = VID_A_DOWN_CMDS,
+ .ctrl_start = VID_A_IQ,
+ .cdt = VID_A_CDT,
+ .fifo_start = VID_A_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA1_PTR1,
+ .ptr2_reg = DMA1_PTR2,
+ .cnt1_reg = DMA1_CNT1,
+ .cnt2_reg = DMA1_CNT2,
+ .int_msk = VID_A_INT_MSK,
+ .int_stat = VID_A_INT_STAT,
+ .int_mstat = VID_A_INT_MSTAT,
+ .dma_ctl = VID_DST_A_DMA_CTL,
+ .gpcnt_ctl = VID_DST_A_GPCNT_CTL,
+ .gpcnt = VID_DST_A_GPCNT,
+ .vip_ctl = VID_DST_A_VIP_CTL,
+ .pix_frmt = VID_DST_A_PIX_FRMT,
+ },
[SRAM_CH01] = {
- .i = SRAM_CH01,
- .name = "VID B",
- .cmds_start = VID_B_DOWN_CMDS,
- .ctrl_start = VID_B_IQ,
- .cdt = VID_B_CDT,
- .fifo_start = VID_B_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA2_PTR1,
- .ptr2_reg = DMA2_PTR2,
- .cnt1_reg = DMA2_CNT1,
- .cnt2_reg = DMA2_CNT2,
- .int_msk = VID_B_INT_MSK,
- .int_stat = VID_B_INT_STAT,
- .int_mstat = VID_B_INT_MSTAT,
- .dma_ctl = VID_DST_B_DMA_CTL,
- .gpcnt_ctl = VID_DST_B_GPCNT_CTL,
- .gpcnt = VID_DST_B_GPCNT,
- .vip_ctl = VID_DST_B_VIP_CTL,
- .pix_frmt = VID_DST_B_PIX_FRMT,
- },
+ .i = SRAM_CH01,
+ .name = "VID B",
+ .cmds_start = VID_B_DOWN_CMDS,
+ .ctrl_start = VID_B_IQ,
+ .cdt = VID_B_CDT,
+ .fifo_start = VID_B_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA2_PTR1,
+ .ptr2_reg = DMA2_PTR2,
+ .cnt1_reg = DMA2_CNT1,
+ .cnt2_reg = DMA2_CNT2,
+ .int_msk = VID_B_INT_MSK,
+ .int_stat = VID_B_INT_STAT,
+ .int_mstat = VID_B_INT_MSTAT,
+ .dma_ctl = VID_DST_B_DMA_CTL,
+ .gpcnt_ctl = VID_DST_B_GPCNT_CTL,
+ .gpcnt = VID_DST_B_GPCNT,
+ .vip_ctl = VID_DST_B_VIP_CTL,
+ .pix_frmt = VID_DST_B_PIX_FRMT,
+ },
[SRAM_CH02] = {
- .i = SRAM_CH02,
- .name = "VID C",
- .cmds_start = VID_C_DOWN_CMDS,
- .ctrl_start = VID_C_IQ,
- .cdt = VID_C_CDT,
- .fifo_start = VID_C_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA3_PTR1,
- .ptr2_reg = DMA3_PTR2,
- .cnt1_reg = DMA3_CNT1,
- .cnt2_reg = DMA3_CNT2,
- .int_msk = VID_C_INT_MSK,
- .int_stat = VID_C_INT_STAT,
- .int_mstat = VID_C_INT_MSTAT,
- .dma_ctl = VID_DST_C_DMA_CTL,
- .gpcnt_ctl = VID_DST_C_GPCNT_CTL,
- .gpcnt = VID_DST_C_GPCNT,
- .vip_ctl = VID_DST_C_VIP_CTL,
- .pix_frmt = VID_DST_C_PIX_FRMT,
- },
+ .i = SRAM_CH02,
+ .name = "VID C",
+ .cmds_start = VID_C_DOWN_CMDS,
+ .ctrl_start = VID_C_IQ,
+ .cdt = VID_C_CDT,
+ .fifo_start = VID_C_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA3_PTR1,
+ .ptr2_reg = DMA3_PTR2,
+ .cnt1_reg = DMA3_CNT1,
+ .cnt2_reg = DMA3_CNT2,
+ .int_msk = VID_C_INT_MSK,
+ .int_stat = VID_C_INT_STAT,
+ .int_mstat = VID_C_INT_MSTAT,
+ .dma_ctl = VID_DST_C_DMA_CTL,
+ .gpcnt_ctl = VID_DST_C_GPCNT_CTL,
+ .gpcnt = VID_DST_C_GPCNT,
+ .vip_ctl = VID_DST_C_VIP_CTL,
+ .pix_frmt = VID_DST_C_PIX_FRMT,
+ },
[SRAM_CH03] = {
- .i = SRAM_CH03,
- .name = "VID D",
- .cmds_start = VID_D_DOWN_CMDS,
- .ctrl_start = VID_D_IQ,
- .cdt = VID_D_CDT,
- .fifo_start = VID_D_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA4_PTR1,
- .ptr2_reg = DMA4_PTR2,
- .cnt1_reg = DMA4_CNT1,
- .cnt2_reg = DMA4_CNT2,
- .int_msk = VID_D_INT_MSK,
- .int_stat = VID_D_INT_STAT,
- .int_mstat = VID_D_INT_MSTAT,
- .dma_ctl = VID_DST_D_DMA_CTL,
- .gpcnt_ctl = VID_DST_D_GPCNT_CTL,
- .gpcnt = VID_DST_D_GPCNT,
- .vip_ctl = VID_DST_D_VIP_CTL,
- .pix_frmt = VID_DST_D_PIX_FRMT,
- },
+ .i = SRAM_CH03,
+ .name = "VID D",
+ .cmds_start = VID_D_DOWN_CMDS,
+ .ctrl_start = VID_D_IQ,
+ .cdt = VID_D_CDT,
+ .fifo_start = VID_D_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA4_PTR1,
+ .ptr2_reg = DMA4_PTR2,
+ .cnt1_reg = DMA4_CNT1,
+ .cnt2_reg = DMA4_CNT2,
+ .int_msk = VID_D_INT_MSK,
+ .int_stat = VID_D_INT_STAT,
+ .int_mstat = VID_D_INT_MSTAT,
+ .dma_ctl = VID_DST_D_DMA_CTL,
+ .gpcnt_ctl = VID_DST_D_GPCNT_CTL,
+ .gpcnt = VID_DST_D_GPCNT,
+ .vip_ctl = VID_DST_D_VIP_CTL,
+ .pix_frmt = VID_DST_D_PIX_FRMT,
+ },
[SRAM_CH04] = {
- .i = SRAM_CH04,
- .name = "VID E",
- .cmds_start = VID_E_DOWN_CMDS,
- .ctrl_start = VID_E_IQ,
- .cdt = VID_E_CDT,
- .fifo_start = VID_E_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA5_PTR1,
- .ptr2_reg = DMA5_PTR2,
- .cnt1_reg = DMA5_CNT1,
- .cnt2_reg = DMA5_CNT2,
- .int_msk = VID_E_INT_MSK,
- .int_stat = VID_E_INT_STAT,
- .int_mstat = VID_E_INT_MSTAT,
- .dma_ctl = VID_DST_E_DMA_CTL,
- .gpcnt_ctl = VID_DST_E_GPCNT_CTL,
- .gpcnt = VID_DST_E_GPCNT,
- .vip_ctl = VID_DST_E_VIP_CTL,
- .pix_frmt = VID_DST_E_PIX_FRMT,
- },
+ .i = SRAM_CH04,
+ .name = "VID E",
+ .cmds_start = VID_E_DOWN_CMDS,
+ .ctrl_start = VID_E_IQ,
+ .cdt = VID_E_CDT,
+ .fifo_start = VID_E_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA5_PTR1,
+ .ptr2_reg = DMA5_PTR2,
+ .cnt1_reg = DMA5_CNT1,
+ .cnt2_reg = DMA5_CNT2,
+ .int_msk = VID_E_INT_MSK,
+ .int_stat = VID_E_INT_STAT,
+ .int_mstat = VID_E_INT_MSTAT,
+ .dma_ctl = VID_DST_E_DMA_CTL,
+ .gpcnt_ctl = VID_DST_E_GPCNT_CTL,
+ .gpcnt = VID_DST_E_GPCNT,
+ .vip_ctl = VID_DST_E_VIP_CTL,
+ .pix_frmt = VID_DST_E_PIX_FRMT,
+ },
[SRAM_CH05] = {
- .i = SRAM_CH05,
- .name = "VID F",
- .cmds_start = VID_F_DOWN_CMDS,
- .ctrl_start = VID_F_IQ,
- .cdt = VID_F_CDT,
- .fifo_start = VID_F_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA6_PTR1,
- .ptr2_reg = DMA6_PTR2,
- .cnt1_reg = DMA6_CNT1,
- .cnt2_reg = DMA6_CNT2,
- .int_msk = VID_F_INT_MSK,
- .int_stat = VID_F_INT_STAT,
- .int_mstat = VID_F_INT_MSTAT,
- .dma_ctl = VID_DST_F_DMA_CTL,
- .gpcnt_ctl = VID_DST_F_GPCNT_CTL,
- .gpcnt = VID_DST_F_GPCNT,
- .vip_ctl = VID_DST_F_VIP_CTL,
- .pix_frmt = VID_DST_F_PIX_FRMT,
- },
+ .i = SRAM_CH05,
+ .name = "VID F",
+ .cmds_start = VID_F_DOWN_CMDS,
+ .ctrl_start = VID_F_IQ,
+ .cdt = VID_F_CDT,
+ .fifo_start = VID_F_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA6_PTR1,
+ .ptr2_reg = DMA6_PTR2,
+ .cnt1_reg = DMA6_CNT1,
+ .cnt2_reg = DMA6_CNT2,
+ .int_msk = VID_F_INT_MSK,
+ .int_stat = VID_F_INT_STAT,
+ .int_mstat = VID_F_INT_MSTAT,
+ .dma_ctl = VID_DST_F_DMA_CTL,
+ .gpcnt_ctl = VID_DST_F_GPCNT_CTL,
+ .gpcnt = VID_DST_F_GPCNT,
+ .vip_ctl = VID_DST_F_VIP_CTL,
+ .pix_frmt = VID_DST_F_PIX_FRMT,
+ },
[SRAM_CH06] = {
- .i = SRAM_CH06,
- .name = "VID G",
- .cmds_start = VID_G_DOWN_CMDS,
- .ctrl_start = VID_G_IQ,
- .cdt = VID_G_CDT,
- .fifo_start = VID_G_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA7_PTR1,
- .ptr2_reg = DMA7_PTR2,
- .cnt1_reg = DMA7_CNT1,
- .cnt2_reg = DMA7_CNT2,
- .int_msk = VID_G_INT_MSK,
- .int_stat = VID_G_INT_STAT,
- .int_mstat = VID_G_INT_MSTAT,
- .dma_ctl = VID_DST_G_DMA_CTL,
- .gpcnt_ctl = VID_DST_G_GPCNT_CTL,
- .gpcnt = VID_DST_G_GPCNT,
- .vip_ctl = VID_DST_G_VIP_CTL,
- .pix_frmt = VID_DST_G_PIX_FRMT,
- },
+ .i = SRAM_CH06,
+ .name = "VID G",
+ .cmds_start = VID_G_DOWN_CMDS,
+ .ctrl_start = VID_G_IQ,
+ .cdt = VID_G_CDT,
+ .fifo_start = VID_G_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA7_PTR1,
+ .ptr2_reg = DMA7_PTR2,
+ .cnt1_reg = DMA7_CNT1,
+ .cnt2_reg = DMA7_CNT2,
+ .int_msk = VID_G_INT_MSK,
+ .int_stat = VID_G_INT_STAT,
+ .int_mstat = VID_G_INT_MSTAT,
+ .dma_ctl = VID_DST_G_DMA_CTL,
+ .gpcnt_ctl = VID_DST_G_GPCNT_CTL,
+ .gpcnt = VID_DST_G_GPCNT,
+ .vip_ctl = VID_DST_G_VIP_CTL,
+ .pix_frmt = VID_DST_G_PIX_FRMT,
+ },
[SRAM_CH07] = {
- .i = SRAM_CH07,
- .name = "VID H",
- .cmds_start = VID_H_DOWN_CMDS,
- .ctrl_start = VID_H_IQ,
- .cdt = VID_H_CDT,
- .fifo_start = VID_H_DOWN_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA8_PTR1,
- .ptr2_reg = DMA8_PTR2,
- .cnt1_reg = DMA8_CNT1,
- .cnt2_reg = DMA8_CNT2,
- .int_msk = VID_H_INT_MSK,
- .int_stat = VID_H_INT_STAT,
- .int_mstat = VID_H_INT_MSTAT,
- .dma_ctl = VID_DST_H_DMA_CTL,
- .gpcnt_ctl = VID_DST_H_GPCNT_CTL,
- .gpcnt = VID_DST_H_GPCNT,
- .vip_ctl = VID_DST_H_VIP_CTL,
- .pix_frmt = VID_DST_H_PIX_FRMT,
- },
+ .i = SRAM_CH07,
+ .name = "VID H",
+ .cmds_start = VID_H_DOWN_CMDS,
+ .ctrl_start = VID_H_IQ,
+ .cdt = VID_H_CDT,
+ .fifo_start = VID_H_DOWN_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA8_PTR1,
+ .ptr2_reg = DMA8_PTR2,
+ .cnt1_reg = DMA8_CNT1,
+ .cnt2_reg = DMA8_CNT2,
+ .int_msk = VID_H_INT_MSK,
+ .int_stat = VID_H_INT_STAT,
+ .int_mstat = VID_H_INT_MSTAT,
+ .dma_ctl = VID_DST_H_DMA_CTL,
+ .gpcnt_ctl = VID_DST_H_GPCNT_CTL,
+ .gpcnt = VID_DST_H_GPCNT,
+ .vip_ctl = VID_DST_H_VIP_CTL,
+ .pix_frmt = VID_DST_H_PIX_FRMT,
+ },
[SRAM_CH08] = {
- .name = "audio from",
- .cmds_start = AUD_A_DOWN_CMDS,
- .ctrl_start = AUD_A_IQ,
- .cdt = AUD_A_CDT,
- .fifo_start = AUD_A_DOWN_CLUSTER_1,
- .fifo_size = AUDIO_CLUSTER_SIZE * 3,
- .ptr1_reg = DMA17_PTR1,
- .ptr2_reg = DMA17_PTR2,
- .cnt1_reg = DMA17_CNT1,
- .cnt2_reg = DMA17_CNT2,
- },
+ .name = "audio from",
+ .cmds_start = AUD_A_DOWN_CMDS,
+ .ctrl_start = AUD_A_IQ,
+ .cdt = AUD_A_CDT,
+ .fifo_start = AUD_A_DOWN_CLUSTER_1,
+ .fifo_size = AUDIO_CLUSTER_SIZE * 3,
+ .ptr1_reg = DMA17_PTR1,
+ .ptr2_reg = DMA17_PTR2,
+ .cnt1_reg = DMA17_CNT1,
+ .cnt2_reg = DMA17_CNT2,
+ },
[SRAM_CH09] = {
- .i = SRAM_CH09,
- .name = "VID Upstream I",
- .cmds_start = VID_I_UP_CMDS,
- .ctrl_start = VID_I_IQ,
- .cdt = VID_I_CDT,
- .fifo_start = VID_I_UP_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA15_PTR1,
- .ptr2_reg = DMA15_PTR2,
- .cnt1_reg = DMA15_CNT1,
- .cnt2_reg = DMA15_CNT2,
- .int_msk = VID_I_INT_MSK,
- .int_stat = VID_I_INT_STAT,
- .int_mstat = VID_I_INT_MSTAT,
- .dma_ctl = VID_SRC_I_DMA_CTL,
- .gpcnt_ctl = VID_SRC_I_GPCNT_CTL,
- .gpcnt = VID_SRC_I_GPCNT,
-
- .vid_fmt_ctl = VID_SRC_I_FMT_CTL,
- .vid_active_ctl1 = VID_SRC_I_ACTIVE_CTL1,
- .vid_active_ctl2 = VID_SRC_I_ACTIVE_CTL2,
- .vid_cdt_size = VID_SRC_I_CDT_SZ,
- .irq_bit = 8,
- },
+ .i = SRAM_CH09,
+ .name = "VID Upstream I",
+ .cmds_start = VID_I_UP_CMDS,
+ .ctrl_start = VID_I_IQ,
+ .cdt = VID_I_CDT,
+ .fifo_start = VID_I_UP_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA15_PTR1,
+ .ptr2_reg = DMA15_PTR2,
+ .cnt1_reg = DMA15_CNT1,
+ .cnt2_reg = DMA15_CNT2,
+ .int_msk = VID_I_INT_MSK,
+ .int_stat = VID_I_INT_STAT,
+ .int_mstat = VID_I_INT_MSTAT,
+ .dma_ctl = VID_SRC_I_DMA_CTL,
+ .gpcnt_ctl = VID_SRC_I_GPCNT_CTL,
+ .gpcnt = VID_SRC_I_GPCNT,
+
+ .vid_fmt_ctl = VID_SRC_I_FMT_CTL,
+ .vid_active_ctl1 = VID_SRC_I_ACTIVE_CTL1,
+ .vid_active_ctl2 = VID_SRC_I_ACTIVE_CTL2,
+ .vid_cdt_size = VID_SRC_I_CDT_SZ,
+ .irq_bit = 8,
+ },
[SRAM_CH10] = {
- .i = SRAM_CH10,
- .name = "VID Upstream J",
- .cmds_start = VID_J_UP_CMDS,
- .ctrl_start = VID_J_IQ,
- .cdt = VID_J_CDT,
- .fifo_start = VID_J_UP_CLUSTER_1,
- .fifo_size = (VID_CLUSTER_SIZE << 2),
- .ptr1_reg = DMA16_PTR1,
- .ptr2_reg = DMA16_PTR2,
- .cnt1_reg = DMA16_CNT1,
- .cnt2_reg = DMA16_CNT2,
- .int_msk = VID_J_INT_MSK,
- .int_stat = VID_J_INT_STAT,
- .int_mstat = VID_J_INT_MSTAT,
- .dma_ctl = VID_SRC_J_DMA_CTL,
- .gpcnt_ctl = VID_SRC_J_GPCNT_CTL,
- .gpcnt = VID_SRC_J_GPCNT,
-
- .vid_fmt_ctl = VID_SRC_J_FMT_CTL,
- .vid_active_ctl1 = VID_SRC_J_ACTIVE_CTL1,
- .vid_active_ctl2 = VID_SRC_J_ACTIVE_CTL2,
- .vid_cdt_size = VID_SRC_J_CDT_SZ,
- .irq_bit = 9,
- },
+ .i = SRAM_CH10,
+ .name = "VID Upstream J",
+ .cmds_start = VID_J_UP_CMDS,
+ .ctrl_start = VID_J_IQ,
+ .cdt = VID_J_CDT,
+ .fifo_start = VID_J_UP_CLUSTER_1,
+ .fifo_size = (VID_CLUSTER_SIZE << 2),
+ .ptr1_reg = DMA16_PTR1,
+ .ptr2_reg = DMA16_PTR2,
+ .cnt1_reg = DMA16_CNT1,
+ .cnt2_reg = DMA16_CNT2,
+ .int_msk = VID_J_INT_MSK,
+ .int_stat = VID_J_INT_STAT,
+ .int_mstat = VID_J_INT_MSTAT,
+ .dma_ctl = VID_SRC_J_DMA_CTL,
+ .gpcnt_ctl = VID_SRC_J_GPCNT_CTL,
+ .gpcnt = VID_SRC_J_GPCNT,
+
+ .vid_fmt_ctl = VID_SRC_J_FMT_CTL,
+ .vid_active_ctl1 = VID_SRC_J_ACTIVE_CTL1,
+ .vid_active_ctl2 = VID_SRC_J_ACTIVE_CTL2,
+ .vid_cdt_size = VID_SRC_J_CDT_SZ,
+ .irq_bit = 9,
+ },
[SRAM_CH11] = {
- .i = SRAM_CH11,
- .name = "Audio Upstream Channel B",
- .cmds_start = AUD_B_UP_CMDS,
- .ctrl_start = AUD_B_IQ,
- .cdt = AUD_B_CDT,
- .fifo_start = AUD_B_UP_CLUSTER_1,
- .fifo_size = (AUDIO_CLUSTER_SIZE * 3),
- .ptr1_reg = DMA22_PTR1,
- .ptr2_reg = DMA22_PTR2,
- .cnt1_reg = DMA22_CNT1,
- .cnt2_reg = DMA22_CNT2,
- .int_msk = AUD_B_INT_MSK,
- .int_stat = AUD_B_INT_STAT,
- .int_mstat = AUD_B_INT_MSTAT,
- .dma_ctl = AUD_INT_DMA_CTL,
- .gpcnt_ctl = AUD_B_GPCNT_CTL,
- .gpcnt = AUD_B_GPCNT,
- .aud_length = AUD_B_LNGTH,
- .aud_cfg = AUD_B_CFG,
- .fld_aud_fifo_en = FLD_AUD_SRC_B_FIFO_EN,
- .fld_aud_risc_en = FLD_AUD_SRC_B_RISC_EN,
- .irq_bit = 11,
- },
+ .i = SRAM_CH11,
+ .name = "Audio Upstream Channel B",
+ .cmds_start = AUD_B_UP_CMDS,
+ .ctrl_start = AUD_B_IQ,
+ .cdt = AUD_B_CDT,
+ .fifo_start = AUD_B_UP_CLUSTER_1,
+ .fifo_size = (AUDIO_CLUSTER_SIZE * 3),
+ .ptr1_reg = DMA22_PTR1,
+ .ptr2_reg = DMA22_PTR2,
+ .cnt1_reg = DMA22_CNT1,
+ .cnt2_reg = DMA22_CNT2,
+ .int_msk = AUD_B_INT_MSK,
+ .int_stat = AUD_B_INT_STAT,
+ .int_mstat = AUD_B_INT_MSTAT,
+ .dma_ctl = AUD_INT_DMA_CTL,
+ .gpcnt_ctl = AUD_B_GPCNT_CTL,
+ .gpcnt = AUD_B_GPCNT,
+ .aud_length = AUD_B_LNGTH,
+ .aud_cfg = AUD_B_CFG,
+ .fld_aud_fifo_en = FLD_AUD_SRC_B_FIFO_EN,
+ .fld_aud_risc_en = FLD_AUD_SRC_B_RISC_EN,
+ .irq_bit = 11,
+ },
};
EXPORT_SYMBOL(cx25821_sram_channels);
@@ -1428,7 +1428,7 @@ static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
}
err =
- request_irq(pci_dev->irq, cx25821_irq, IRQF_SHARED | IRQF_DISABLED,
+ request_irq(pci_dev->irq, cx25821_irq, IRQF_SHARED,
dev->name, dev);
if (err < 0) {
@@ -1473,17 +1473,17 @@ static void __devexit cx25821_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
-static struct pci_device_id cx25821_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cx25821_pci_tbl) = {
{
- /* CX25821 Athena */
- .vendor = 0x14f1,
- .device = 0x8210,
- .subvendor = 0x14f1,
- .subdevice = 0x0920,
- },
+ /* CX25821 Athena */
+ .vendor = 0x14f1,
+ .device = 0x8210,
+ .subvendor = 0x14f1,
+ .subdevice = 0x0920,
+ },
{
- /* --- end of list --- */
- }
+ /* --- end of list --- */
+ }
};
MODULE_DEVICE_TABLE(pci, cx25821_pci_tbl);
@@ -1512,7 +1512,6 @@ static void __exit cx25821_fini(void)
pci_unregister_driver(&cx25821_pci_driver);
}
-EXPORT_SYMBOL(cx25821_set_gpiopin_direction);
module_init(cx25821_init);
module_exit(cx25821_fini);
diff --git a/drivers/staging/cx25821/cx25821-gpio.c b/drivers/staging/cx25821/cx25821-gpio.c
index 2f154b3658a..29e43b03c85 100644
--- a/drivers/staging/cx25821/cx25821-gpio.c
+++ b/drivers/staging/cx25821/cx25821-gpio.c
@@ -50,6 +50,7 @@ void cx25821_set_gpiopin_direction(struct cx25821_dev *dev,
cx_write(gpio_oe_reg, value);
}
+EXPORT_SYMBOL(cx25821_set_gpiopin_direction);
static void cx25821_set_gpiopin_logicvalue(struct cx25821_dev *dev,
int pin_number, int pin_logic_value)
diff --git a/drivers/staging/cx25821/cx25821-gpio.h b/drivers/staging/cx25821/cx25821-gpio.h
deleted file mode 100644
index ca07644154a..00000000000
--- a/drivers/staging/cx25821/cx25821-gpio.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-void cx25821_gpio_init(struct athena_dev *dev);
diff --git a/drivers/staging/cx25821/cx25821-i2c.c b/drivers/staging/cx25821/cx25821-i2c.c
index 130dfebebe2..4d3d0ce4078 100644
--- a/drivers/staging/cx25821/cx25821-i2c.c
+++ b/drivers/staging/cx25821/cx25821-i2c.c
@@ -370,16 +370,16 @@ int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
struct i2c_msg msgs[2] = {
{
- .addr = client->addr,
- .flags = 0,
- .len = 2,
- .buf = addr,
- }, {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = 4,
- .buf = buf,
- }
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = addr,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 4,
+ .buf = buf,
+ }
};
addr[0] = (reg_addr >> 8);
@@ -403,11 +403,11 @@ int cx25821_i2c_write(struct cx25821_i2c *bus, u16 reg_addr, int value)
struct i2c_msg msgs[1] = {
{
- .addr = client->addr,
- .flags = 0,
- .len = 6,
- .buf = buf,
- }
+ .addr = client->addr,
+ .flags = 0,
+ .len = 6,
+ .buf = buf,
+ }
};
buf[0] = reg_addr >> 8;
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c b/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
index 655357da3d6..2a724ddfa53 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
@@ -40,8 +40,8 @@ MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
MODULE_LICENSE("GPL");
-static int _intr_msk =
- FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR;
+static int _intr_msk = FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC |
+ FLD_VID_SRC_OPC_ERR;
static __le32 *cx25821_update_riscprogram_ch2(struct cx25821_dev *dev,
__le32 *rp, unsigned int offset,
@@ -145,15 +145,16 @@ int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
if (dev->_isNTSC_ch2) {
odd_num_lines = singlefield_lines + 1;
risc_program_size = FRAME1_VID_PROG_SIZE;
- frame_size =
- (bpl ==
- Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 :
- FRAME_SIZE_NTSC_Y422;
+ if (bpl == Y411_LINE_SZ)
+ frame_size = FRAME_SIZE_NTSC_Y411;
+ else
+ frame_size = FRAME_SIZE_NTSC_Y422;
} else {
risc_program_size = PAL_VID_PROG_SIZE;
- frame_size =
- (bpl ==
- Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
+ if (bpl == Y411_LINE_SZ)
+ frame_size = FRAME_SIZE_PAL_Y411;
+ else
+ frame_size = FRAME_SIZE_PAL_Y422;
}
/* Virtual address of Risc buffer program */
@@ -165,30 +166,23 @@ int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
if (UNSET != top_offset) {
fifo_enable = (frame == 0) ? FIFO_ENABLE : FIFO_DISABLE;
rp = cx25821_risc_field_upstream_ch2(dev, rp,
- dev->
- _data_buf_phys_addr_ch2
- + databuf_offset,
- top_offset, 0, bpl,
- odd_num_lines,
- fifo_enable,
- ODD_FIELD);
+ dev->_data_buf_phys_addr_ch2 + databuf_offset,
+ top_offset, 0, bpl, odd_num_lines, fifo_enable,
+ ODD_FIELD);
}
fifo_enable = FIFO_DISABLE;
/* Even field */
rp = cx25821_risc_field_upstream_ch2(dev, rp,
- dev->
- _data_buf_phys_addr_ch2 +
- databuf_offset,
- bottom_offset, 0x200, bpl,
- singlefield_lines,
- fifo_enable, EVEN_FIELD);
+ dev->_data_buf_phys_addr_ch2 + databuf_offset,
+ bottom_offset, 0x200, bpl, singlefield_lines,
+ fifo_enable, EVEN_FIELD);
if (frame == 0) {
risc_flag = RISC_CNT_RESET;
- risc_phys_jump_addr =
- dev->_dma_phys_start_addr_ch2 + risc_program_size;
+ risc_phys_jump_addr = dev->_dma_phys_start_addr_ch2 +
+ risc_program_size;
} else {
risc_flag = RISC_CNT_INC;
risc_phys_jump_addr = dev->_dma_phys_start_addr_ch2;
@@ -510,9 +504,8 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
return ret;
/* Creating RISC programs */
- ret =
- cx25821_risc_buffer_upstream_ch2(dev, dev->pci, 0, bpl,
- dev->_lines_count_ch2);
+ ret = cx25821_risc_buffer_upstream_ch2(dev, dev->pci, 0, bpl,
+ dev->_lines_count_ch2);
if (ret < 0) {
pr_info("Failed creating Video Upstream Risc programs!\n");
goto error;
@@ -520,7 +513,7 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
return 0;
- error:
+error:
return ret;
}
@@ -565,23 +558,19 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
}
if (dev->_dma_virt_start_addr_ch2 != NULL) {
- line_size_in_bytes =
- (dev->_pixel_format_ch2 ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ :
- Y422_LINE_SZ;
+ if (dev->_pixel_format_ch2 == PIXEL_FRMT_411)
+ line_size_in_bytes = Y411_LINE_SZ;
+ else
+ line_size_in_bytes = Y422_LINE_SZ;
risc_phys_jump_addr =
dev->_dma_phys_start_addr_ch2 +
odd_risc_prog_size;
rp = cx25821_update_riscprogram_ch2(dev,
- dev->
- _dma_virt_start_addr_ch2,
- TOP_OFFSET,
- line_size_in_bytes,
- 0x0,
- singlefield_lines,
- FIFO_DISABLE,
- ODD_FIELD);
+ dev->_dma_virt_start_addr_ch2,
+ TOP_OFFSET, line_size_in_bytes,
+ 0x0, singlefield_lines,
+ FIFO_DISABLE, ODD_FIELD);
/* Jump to Even Risc program of 1st Frame */
*(rp++) = cpu_to_le32(RISC_JUMP);
@@ -704,7 +693,7 @@ int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
err =
request_irq(dev->pci->irq, cx25821_upstream_irq_ch2,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get upstream IRQ %d\n",
dev->name, dev->pci->irq);
@@ -719,7 +708,7 @@ int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
return 0;
- fail_irq:
+fail_irq:
cx25821_dev_unregister(dev);
return err;
}
@@ -805,8 +794,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
}
}
- retval =
- cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size_ch2, 0);
/* setup fifo + format */
@@ -816,8 +804,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size_ch2 = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval =
- cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
+ retval = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
dev->_line_size_ch2);
if (retval < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
@@ -829,7 +816,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
return 0;
- error:
+error:
cx25821_dev_unregister(dev);
return err;
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.h b/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
index 029e8305724..d42dab59b66 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
+++ b/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
@@ -65,37 +65,74 @@
#define USE_RISC_NOOP_VIDEO 1
#ifdef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define PAL_US_VID_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
-#define PAL_VID_PROG_SIZE ((PAL_FIELD_HEIGHT*2) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + 2*NUM_NO_OPS*DWORD_SIZE)
+#define PAL_VID_PROG_SIZE \
+ ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
+ 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
-#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define ODD_FLD_PAL_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
-#define NTSC_US_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define NTSC_US_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
-#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
+#define NTSC_RISC_BUF_SIZE \
+ (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-#define FRAME1_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + 2*NUM_NO_OPS*DWORD_SIZE)
-#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define FRAME1_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * \
+ 3 * DWORD_SIZE + 2 * RISC_SYNC_INSTRUCTION_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + \
+ 2 * NUM_NO_OPS * DWORD_SIZE)
+
+#define ODD_FLD_NTSC_PROG_SIZE \
+ (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
#endif
#ifndef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE ((PAL_FIELD_HEIGHT + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-#define PAL_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + PAL_US_VID_PROG_SIZE))
-#define PAL_VID_PROG_SIZE ((PAL_FIELD_HEIGHT*2) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-#define NTSC_US_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-#define FRAME1_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
+#define PAL_US_VID_PROG_SIZE \
+ ((PAL_FIELD_HEIGHT + 1) * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE)
+
+#define PAL_RISC_BUF_SIZE \
+ (2 * (RISC_SYNC_INSTRUCTION_SIZE + PAL_US_VID_PROG_SIZE))
+
+#define PAL_VID_PROG_SIZE \
+ ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
+ 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE)
+
+#define ODD_FLD_PAL_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+
+#define ODD_FLD_NTSC_PROG_SIZE \
+ (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+
+#define NTSC_US_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
+
+#define NTSC_RISC_BUF_SIZE \
+ (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
+
+#define FRAME1_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * \
+ 3 * DWORD_SIZE + 2 * RISC_SYNC_INSTRUCTION_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
+
#endif
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.c b/drivers/staging/cx25821/cx25821-video-upstream.c
index eb0172bf39d..c0b80068f46 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream.c
@@ -40,8 +40,8 @@ MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
MODULE_LICENSE("GPL");
-static int _intr_msk =
- FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR;
+static int _intr_msk = FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC |
+ FLD_VID_SRC_OPC_ERR;
int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
struct sram_channel *ch,
@@ -615,14 +615,10 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
odd_risc_prog_size;
rp = cx25821_update_riscprogram(dev,
- dev->
- _dma_virt_start_addr,
- TOP_OFFSET,
- line_size_in_bytes,
- 0x0,
- singlefield_lines,
- FIFO_DISABLE,
- ODD_FIELD);
+ dev->_dma_virt_start_addr, TOP_OFFSET,
+ line_size_in_bytes, 0x0,
+ singlefield_lines, FIFO_DISABLE,
+ ODD_FIELD);
/* Jump to Even Risc program of 1st Frame */
*(rp++) = cpu_to_le32(RISC_JUMP);
@@ -753,7 +749,7 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
err =
request_irq(dev->pci->irq, cx25821_upstream_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get upstream IRQ %d\n",
dev->name, dev->pci->irq);
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.h b/drivers/staging/cx25821/cx25821-video-upstream.h
index f0b3ac0880a..268ec8aa6a6 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.h
+++ b/drivers/staging/cx25821/cx25821-video-upstream.h
@@ -66,44 +66,74 @@
#define USE_RISC_NOOP_VIDEO 1
#ifdef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define PAL_US_VID_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
-#define PAL_VID_PROG_SIZE ((PAL_FIELD_HEIGHT*2) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + 2*NUM_NO_OPS*DWORD_SIZE)
+#define PAL_VID_PROG_SIZE \
+ ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
+ 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
-#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define ODD_FLD_PAL_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
-#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define ODD_FLD_NTSC_PROG_SIZE \
+ (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
-#define NTSC_US_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE)
+#define NTSC_US_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + \
+ NUM_NO_OPS * DWORD_SIZE)
-#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
+#define NTSC_RISC_BUF_SIZE \
+ (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-#define FRAME1_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + 2*NUM_NO_OPS*DWORD_SIZE)
+#define FRAME1_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + \
+ 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
#endif
#ifndef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
+#define PAL_US_VID_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE)
-#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
+#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
+
+#define PAL_VID_PROG_SIZE \
+ ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
+ 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE)
+
+#define ODD_FLD_PAL_PROG_SIZE \
+ (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+
+#define ODD_FLD_NTSC_PROG_SIZE \
+ (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
+ RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+
+#define NTSC_US_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define PAL_VID_PROG_SIZE ((PAL_FIELD_HEIGHT*2) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
+#define NTSC_RISC_BUF_SIZE \
+ (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+#define FRAME1_VID_PROG_SIZE \
+ ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + \
+ 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
+ JUMP_INSTRUCTION_SIZE)
-#define NTSC_US_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-#define FRAME1_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
#endif
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
index 7a0304a8573..084fc0899e1 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/staging/cx25821/cx25821-video.c
@@ -98,9 +98,8 @@ struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
- if (fourcc == V4L2_PIX_FMT_Y41P || fourcc == V4L2_PIX_FMT_YUV411P) {
+ if (fourcc == V4L2_PIX_FMT_Y41P || fourcc == V4L2_PIX_FMT_YUV411P)
return formats + 1;
- }
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
@@ -110,7 +109,8 @@ struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
return NULL;
}
-void cx25821_dump_video_queue(struct cx25821_dev *dev, struct cx25821_dmaqueue *q)
+void cx25821_dump_video_queue(struct cx25821_dev *dev,
+ struct cx25821_dmaqueue *q)
{
struct cx25821_buffer *buf;
struct list_head *item;
@@ -146,9 +146,8 @@ void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
/* count comes from the hw and it is 16bit wide --
* this trick handles wrap-arounds correctly for
* up to 32767 buffers in flight... */
- if ((s16) (count - buf->count) < 0) {
+ if ((s16) (count - buf->count) < 0)
break;
- }
do_gettimeofday(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
@@ -161,8 +160,7 @@ void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
else
mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
if (bc != 1)
- pr_err("%s: %d buffers handled (should be 1)\n",
- __func__, bc);
+ pr_err("%s: %d buffers handled (should be 1)\n", __func__, bc);
}
#ifdef TUNER_FLAG
@@ -203,24 +201,25 @@ struct video_device *cx25821_vdev_init(struct cx25821_dev *dev,
/*
static int cx25821_ctrl_query(struct v4l2_queryctrl *qctrl)
{
- int i;
+ int i;
- if (qctrl->id < V4L2_CID_BASE || qctrl->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- for (i = 0; i < CX25821_CTLS; i++)
- if (cx25821_ctls[i].v.id == qctrl->id)
- break;
- if (i == CX25821_CTLS) {
- *qctrl = no_ctl;
+ if (qctrl->id < V4L2_CID_BASE || qctrl->id >= V4L2_CID_LASTP1)
+ return -EINVAL;
+ for (i = 0; i < CX25821_CTLS; i++)
+ if (cx25821_ctls[i].v.id == qctrl->id)
+ break;
+ if (i == CX25821_CTLS) {
+ *qctrl = no_ctl;
+ return 0;
+ }
+ *qctrl = cx25821_ctls[i].v;
return 0;
- }
- *qctrl = cx25821_ctls[i].v;
- return 0;
}
*/
/* resource management */
-int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
+int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh,
+ unsigned int bit)
{
dprintk(1, "%s()\n", __func__);
if (fh->resources & bit)
@@ -229,14 +228,14 @@ int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int
/* is it free? */
mutex_lock(&dev->lock);
- if (dev->channels[fh->channel_id].resources & bit) {
+ if (dev->channels[fh->channel_id].resources & bit) {
/* no, someone else uses it */
mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
- dev->channels[fh->channel_id].resources |= bit;
+ dev->channels[fh->channel_id].resources |= bit;
dprintk(1, "res: get %d\n", bit);
mutex_unlock(&dev->lock);
return 1;
@@ -249,17 +248,18 @@ int cx25821_res_check(struct cx25821_fh *fh, unsigned int bit)
int cx25821_res_locked(struct cx25821_fh *fh, unsigned int bit)
{
- return fh->dev->channels[fh->channel_id].resources & bit;
+ return fh->dev->channels[fh->channel_id].resources & bit;
}
-void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bits)
+void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh,
+ unsigned int bits)
{
BUG_ON((fh->resources & bits) != bits);
dprintk(1, "%s()\n", __func__);
mutex_lock(&dev->lock);
fh->resources &= ~bits;
- dev->channels[fh->channel_id].resources &= ~bits;
+ dev->channels[fh->channel_id].resources &= ~bits;
dprintk(1, "res: put %d\n", bits);
mutex_unlock(&dev->lock);
}
@@ -353,7 +353,7 @@ int cx25821_restart_video_queue(struct cx25821_dev *dev,
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63 - 32 */
+ prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63 - 32 */
} else {
return 0;
}
@@ -366,11 +366,11 @@ void cx25821_vid_timeout(unsigned long data)
struct cx25821_data *timeout_data = (struct cx25821_data *)data;
struct cx25821_dev *dev = timeout_data->dev;
struct sram_channel *channel = timeout_data->channel;
- struct cx25821_dmaqueue *q = &dev->channels[channel->i].vidq;
+ struct cx25821_dmaqueue *q = &dev->channels[channel->i].vidq;
struct cx25821_buffer *buf;
unsigned long flags;
- /* cx25821_sram_channel_dump(dev, channel); */
+ /* cx25821_sram_channel_dump(dev, channel); */
cx_clear(channel->dma_ctl, 0x11);
spin_lock_irqsave(&dev->slock, flags);
@@ -392,7 +392,7 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
u32 count = 0;
int handled = 0;
u32 mask;
- struct sram_channel *channel = dev->channels[chan_num].sram_channels;
+ struct sram_channel *channel = dev->channels[chan_num].sram_channels;
mask = cx_read(channel->int_msk);
if (0 == (status & mask))
@@ -412,8 +412,8 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
if (status & FLD_VID_DST_RISC1) {
spin_lock(&dev->slock);
count = cx_read(channel->gpcnt);
- cx25821_video_wakeup(dev,
- &dev->channels[channel->i].vidq, count);
+ cx25821_video_wakeup(dev, &dev->channels[channel->i].vidq,
+ count);
spin_unlock(&dev->slock);
handled++;
}
@@ -422,9 +422,8 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
if (status & 0x10) {
dprintk(2, "stopper video\n");
spin_lock(&dev->slock);
- cx25821_restart_video_queue(dev,
- &dev->channels[channel->i].vidq,
- channel);
+ cx25821_restart_video_queue(dev,
+ &dev->channels[channel->i].vidq, channel);
spin_unlock(&dev->slock);
handled++;
}
@@ -447,18 +446,18 @@ void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
{
cx_clear(PCI_INT_MSK, 1);
- if (dev->channels[chan_num].video_dev) {
- if (video_is_registered(dev->channels[chan_num].video_dev))
- video_unregister_device(
- dev->channels[chan_num].video_dev);
+ if (dev->channels[chan_num].video_dev) {
+ if (video_is_registered(dev->channels[chan_num].video_dev))
+ video_unregister_device(
+ dev->channels[chan_num].video_dev);
else
- video_device_release(
- dev->channels[chan_num].video_dev);
+ video_device_release(
+ dev->channels[chan_num].video_dev);
- dev->channels[chan_num].video_dev = NULL;
+ dev->channels[chan_num].video_dev = NULL;
- btcx_riscmem_free(dev->pci,
- &dev->channels[chan_num].vidq.stopper);
+ btcx_riscmem_free(dev->pci,
+ &dev->channels[chan_num].vidq.stopper);
pr_warn("device %d released!\n", chan_num);
}
@@ -468,74 +467,72 @@ void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
int cx25821_video_register(struct cx25821_dev *dev)
{
int err;
- int i;
+ int i;
- struct video_device cx25821_video_device = {
- .name = "cx25821-video",
- .fops = &video_fops,
- .minor = -1,
- .ioctl_ops = &video_ioctl_ops,
- .tvnorms = CX25821_NORMS,
- .current_norm = V4L2_STD_NTSC_M,
- };
+ struct video_device cx25821_video_device = {
+ .name = "cx25821-video",
+ .fops = &video_fops,
+ .minor = -1,
+ .ioctl_ops = &video_ioctl_ops,
+ .tvnorms = CX25821_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+ };
spin_lock_init(&dev->slock);
- for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) {
- cx25821_init_controls(dev, i);
+ for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) {
+ cx25821_init_controls(dev, i);
- cx25821_risc_stopper(dev->pci,
- &dev->channels[i].vidq.stopper,
- dev->channels[i].sram_channels->dma_ctl,
- 0x11, 0);
+ cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper,
+ dev->channels[i].sram_channels->dma_ctl,
+ 0x11, 0);
- dev->channels[i].sram_channels = &cx25821_sram_channels[i];
- dev->channels[i].video_dev = NULL;
- dev->channels[i].resources = 0;
+ dev->channels[i].sram_channels = &cx25821_sram_channels[i];
+ dev->channels[i].video_dev = NULL;
+ dev->channels[i].resources = 0;
- cx_write(dev->channels[i].sram_channels->int_stat,
- 0xffffffff);
+ cx_write(dev->channels[i].sram_channels->int_stat, 0xffffffff);
- INIT_LIST_HEAD(&dev->channels[i].vidq.active);
- INIT_LIST_HEAD(&dev->channels[i].vidq.queued);
+ INIT_LIST_HEAD(&dev->channels[i].vidq.active);
+ INIT_LIST_HEAD(&dev->channels[i].vidq.queued);
- dev->channels[i].timeout_data.dev = dev;
- dev->channels[i].timeout_data.channel =
- &cx25821_sram_channels[i];
- dev->channels[i].vidq.timeout.function =
- cx25821_vid_timeout;
- dev->channels[i].vidq.timeout.data =
- (unsigned long)&dev->channels[i].timeout_data;
- init_timer(&dev->channels[i].vidq.timeout);
+ dev->channels[i].timeout_data.dev = dev;
+ dev->channels[i].timeout_data.channel =
+ &cx25821_sram_channels[i];
+ dev->channels[i].vidq.timeout.function =
+ cx25821_vid_timeout;
+ dev->channels[i].vidq.timeout.data =
+ (unsigned long)&dev->channels[i].timeout_data;
+ init_timer(&dev->channels[i].vidq.timeout);
- /* register v4l devices */
- dev->channels[i].video_dev = cx25821_vdev_init(dev,
- dev->pci, &cx25821_video_device, "video");
+ /* register v4l devices */
+ dev->channels[i].video_dev = cx25821_vdev_init(dev,
+ dev->pci, &cx25821_video_device, "video");
- err = video_register_device(dev->channels[i].video_dev,
- VFL_TYPE_GRABBER, video_nr[dev->nr]);
+ err = video_register_device(dev->channels[i].video_dev,
+ VFL_TYPE_GRABBER, video_nr[dev->nr]);
- if (err < 0)
- goto fail_unreg;
+ if (err < 0)
+ goto fail_unreg;
}
- /* set PCI interrupt */
+ /* set PCI interrupt */
cx_set(PCI_INT_MSK, 0xff);
/* initial device configuration */
mutex_lock(&dev->lock);
#ifdef TUNER_FLAG
- dev->tvnorm = cx25821_video_device.current_norm;
+ dev->tvnorm = cx25821_video_device.current_norm;
cx25821_set_tvnorm(dev, dev->tvnorm);
#endif
mutex_unlock(&dev->lock);
- return 0;
+ return 0;
fail_unreg:
- cx25821_video_unregister(dev, i);
+ cx25821_video_unregister(dev, i);
return err;
}
@@ -566,7 +563,7 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
u32 line0_offset, line1_offset;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int bpl_local = LINE_SIZE_D1;
- int channel_opened = fh->channel_id;
+ int channel_opened = fh->channel_id;
BUG_ON(NULL == fh->fmt);
if (fh->width < 48 || fh->width > 720 ||
@@ -602,24 +599,24 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
if (init_buffer) {
channel_opened = dev->channel_opened;
- channel_opened = (channel_opened < 0
- || channel_opened > 7) ? 7 : channel_opened;
+ if (channel_opened < 0 || channel_opened > 7)
+ channel_opened = 7;
- if (dev->channels[channel_opened]
- .pixel_formats == PIXEL_FRMT_411)
+ if (dev->channels[channel_opened].pixel_formats ==
+ PIXEL_FRMT_411)
buf->bpl = (buf->fmt->depth * buf->vb.width) >> 3;
else
buf->bpl = (buf->fmt->depth >> 3) * (buf->vb.width);
- if (dev->channels[channel_opened]
- .pixel_formats == PIXEL_FRMT_411) {
+ if (dev->channels[channel_opened].pixel_formats ==
+ PIXEL_FRMT_411) {
bpl_local = buf->bpl;
} else {
- bpl_local = buf->bpl; /* Default */
+ bpl_local = buf->bpl; /* Default */
if (channel_opened >= 0 && channel_opened <= 7) {
- if (dev->channels[channel_opened]
- .use_cif_resolution) {
+ if (dev->channels[channel_opened]
+ .use_cif_resolution) {
if (dev->tvnorm & V4L2_STD_PAL_BG
|| dev->tvnorm & V4L2_STD_PAL_DK)
bpl_local = 352 << 1;
@@ -679,12 +676,13 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
return 0;
- fail:
+fail:
cx25821_free_buffer(q, buf);
return rc;
}
-void cx25821_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+void cx25821_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
{
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
@@ -724,385 +722,381 @@ int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma)
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
- struct cx25821_buffer *buf =
+ struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
- struct cx25821_buffer *prev;
- struct cx25821_fh *fh = vq->priv_data;
- struct cx25821_dev *dev = fh->dev;
- struct cx25821_dmaqueue *q = &dev->channels[fh->channel_id].vidq;
-
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
- buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
-
- dprintk(2, "jmp to stopper (0x%x)\n", buf->risc.jmp[1]);
-
- if (!list_empty(&q->queued)) {
- list_add_tail(&buf->vb.queue, &q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf,
- buf->vb.i);
-
- } else if (list_empty(&q->active)) {
- list_add_tail(&buf->vb.queue, &q->active);
- cx25821_start_video_dma(dev, q, buf,
- dev->channels[fh->channel_id].
- sram_channels);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- dprintk(2,
- "[%p/%d] buffer_queue - first active, buf cnt = %d, \
- q->count = %d\n",
- buf, buf->vb.i, buf->count, q->count);
- } else {
- prev =
- list_entry(q->active.prev, struct cx25821_buffer, vb.queue);
- if (prev->vb.width == buf->vb.width
+ struct cx25821_buffer *prev;
+ struct cx25821_fh *fh = vq->priv_data;
+ struct cx25821_dev *dev = fh->dev;
+ struct cx25821_dmaqueue *q = &dev->channels[fh->channel_id].vidq;
+
+ /* add jump to stopper */
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
+ buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
+
+ dprintk(2, "jmp to stopper (0x%x)\n", buf->risc.jmp[1]);
+
+ if (!list_empty(&q->queued)) {
+ list_add_tail(&buf->vb.queue, &q->queued);
+ buf->vb.state = VIDEOBUF_QUEUED;
+ dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf,
+ buf->vb.i);
+
+ } else if (list_empty(&q->active)) {
+ list_add_tail(&buf->vb.queue, &q->active);
+ cx25821_start_video_dma(dev, q, buf,
+ dev->channels[fh->channel_id].sram_channels);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ buf->count = q->count++;
+ mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
+ dprintk(2, "[%p/%d] buffer_queue - first active, buf cnt = %d, q->count = %d\n",
+ buf, buf->vb.i, buf->count, q->count);
+ } else {
+ prev = list_entry(q->active.prev, struct cx25821_buffer,
+ vb.queue);
+ if (prev->vb.width == buf->vb.width
&& prev->vb.height == buf->vb.height
&& prev->fmt == buf->fmt) {
- list_add_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
-
- /* 64 bit bits 63-32 */
- prev->risc.jmp[2] = cpu_to_le32(0);
- dprintk(2,
- "[%p/%d] buffer_queue - append to active, \
- buf->count=%d\n",
- buf, buf->vb.i, buf->count);
-
- } else {
- list_add_tail(&buf->vb.queue, &q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf,
- buf->vb.i);
- }
- }
-
- if (list_empty(&q->active))
- dprintk(2, "active queue empty!\n");
+ list_add_tail(&buf->vb.queue, &q->active);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ buf->count = q->count++;
+ prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
+
+ /* 64 bit bits 63-32 */
+ prev->risc.jmp[2] = cpu_to_le32(0);
+ dprintk(2, "[%p/%d] buffer_queue - append to active, buf->count=%d\n",
+ buf, buf->vb.i, buf->count);
+
+ } else {
+ list_add_tail(&buf->vb.queue, &q->queued);
+ buf->vb.state = VIDEOBUF_QUEUED;
+ dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf,
+ buf->vb.i);
+ }
+ }
+
+ if (list_empty(&q->active))
+ dprintk(2, "active queue empty!\n");
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = cx25821_buffer_setup,
- .buf_prepare = cx25821_buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = cx25821_buffer_release,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct cx25821_dev *h, *dev = video_drvdata(file);
- struct cx25821_fh *fh;
- struct list_head *list;
- int minor = video_devdata(file)->minor;
- enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- u32 pix_format;
- int ch_id = 0;
- int i;
-
- dprintk(1, "open dev=%s type=%s\n",
- video_device_node_name(vdev),
- v4l2_type_names[type]);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh)
- return -ENOMEM;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *h, *dev = video_drvdata(file);
+ struct cx25821_fh *fh;
+ struct list_head *list;
+ int minor = video_devdata(file)->minor;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ u32 pix_format;
+ int ch_id = 0;
+ int i;
+
+ dprintk(1, "open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
mutex_lock(&cx25821_devlist_mutex);
- list_for_each(list, &cx25821_devlist)
- {
- h = list_entry(list, struct cx25821_dev, devlist);
+ list_for_each(list, &cx25821_devlist)
+ {
+ h = list_entry(list, struct cx25821_dev, devlist);
- for (i = 0; i < MAX_VID_CHANNEL_NUM; i++) {
- if (h->channels[i].video_dev &&
+ for (i = 0; i < MAX_VID_CHANNEL_NUM; i++) {
+ if (h->channels[i].video_dev &&
h->channels[i].video_dev->minor == minor) {
- dev = h;
- ch_id = i;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
- }
-
- if (NULL == dev) {
+ dev = h;
+ ch_id = i;
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ }
+ }
+ }
+
+ if (NULL == dev) {
mutex_unlock(&cx25821_devlist_mutex);
kfree(fh);
return -ENODEV;
- }
+ }
- file->private_data = fh;
- fh->dev = dev;
- fh->type = type;
- fh->width = 720;
- fh->channel_id = ch_id;
+ file->private_data = fh;
+ fh->dev = dev;
+ fh->type = type;
+ fh->width = 720;
+ fh->channel_id = ch_id;
- if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK)
- fh->height = 576;
- else
- fh->height = 480;
+ if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK)
+ fh->height = 576;
+ else
+ fh->height = 480;
- dev->channel_opened = fh->channel_id;
- pix_format =
- (dev->channels[ch_id].pixel_formats ==
- PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV;
- fh->fmt = cx25821_format_by_fourcc(pix_format);
+ dev->channel_opened = fh->channel_id;
+ if (dev->channels[ch_id].pixel_formats == PIXEL_FRMT_411)
+ pix_format = V4L2_PIX_FMT_Y41P;
+ else
+ pix_format = V4L2_PIX_FMT_YUYV;
+ fh->fmt = cx25821_format_by_fourcc(pix_format);
- v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio);
+ v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio);
- videobuf_queue_sg_init(&fh->vidq, &cx25821_video_qops,
+ videobuf_queue_sg_init(&fh->vidq, &cx25821_video_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx25821_buffer), fh, NULL);
- dprintk(1, "post videobuf_queue_init()\n");
+ dprintk(1, "post videobuf_queue_init()\n");
mutex_unlock(&cx25821_devlist_mutex);
- return 0;
+ return 0;
}
static ssize_t video_read(struct file *file, char __user * data, size_t count,
loff_t *ppos)
{
- struct cx25821_fh *fh = file->private_data;
+ struct cx25821_fh *fh = file->private_data;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (cx25821_res_locked(fh, RESOURCE_VIDEO0))
- return -EBUSY;
+ switch (fh->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (cx25821_res_locked(fh, RESOURCE_VIDEO0))
+ return -EBUSY;
- return videobuf_read_one(&fh->vidq, data, count, ppos,
+ return videobuf_read_one(&fh->vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
- default:
- BUG();
- return 0;
- }
+ default:
+ BUG();
+ return 0;
+ }
}
static unsigned int video_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct cx25821_fh *fh = file->private_data;
- struct cx25821_buffer *buf;
+ struct cx25821_fh *fh = file->private_data;
+ struct cx25821_buffer *buf;
- if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
- /* streaming capture */
- if (list_empty(&fh->vidq.stream))
- return POLLERR;
- buf = list_entry(fh->vidq.stream.next,
+ if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
+ /* streaming capture */
+ if (list_empty(&fh->vidq.stream))
+ return POLLERR;
+ buf = list_entry(fh->vidq.stream.next,
struct cx25821_buffer, vb.stream);
- } else {
- /* read() capture */
- buf = (struct cx25821_buffer *)fh->vidq.read_buf;
- if (NULL == buf)
- return POLLERR;
- }
-
- poll_wait(file, &buf->vb.done, wait);
- if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) {
- if (buf->vb.state == VIDEOBUF_DONE) {
- struct cx25821_dev *dev = fh->dev;
-
- if (dev && dev->channels[fh->channel_id]
- .use_cif_resolution) {
- u8 cam_id = *((char *)buf->vb.baddr + 3);
- memcpy((char *)buf->vb.baddr,
+ } else {
+ /* read() capture */
+ buf = (struct cx25821_buffer *)fh->vidq.read_buf;
+ if (NULL == buf)
+ return POLLERR;
+ }
+
+ poll_wait(file, &buf->vb.done, wait);
+ if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) {
+ if (buf->vb.state == VIDEOBUF_DONE) {
+ struct cx25821_dev *dev = fh->dev;
+
+ if (dev && dev->channels[fh->channel_id]
+ .use_cif_resolution) {
+ u8 cam_id = *((char *)buf->vb.baddr + 3);
+ memcpy((char *)buf->vb.baddr,
(char *)buf->vb.baddr + (fh->width * 2),
(fh->width * 2));
- *((char *)buf->vb.baddr + 3) = cam_id;
- }
- }
+ *((char *)buf->vb.baddr + 3) = cam_id;
+ }
+ }
- return POLLIN | POLLRDNORM;
- }
+ return POLLIN | POLLRDNORM;
+ }
- return 0;
+ return 0;
}
static int video_release(struct file *file)
{
- struct cx25821_fh *fh = file->private_data;
- struct cx25821_dev *dev = fh->dev;
+ struct cx25821_fh *fh = file->private_data;
+ struct cx25821_dev *dev = fh->dev;
- /* stop the risc engine and fifo */
- cx_write(channel0->dma_ctl, 0); /* FIFO and RISC disable */
+ /* stop the risc engine and fifo */
+ cx_write(channel0->dma_ctl, 0); /* FIFO and RISC disable */
- /* stop video capture */
- if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
- videobuf_queue_cancel(&fh->vidq);
- cx25821_res_free(dev, fh, RESOURCE_VIDEO0);
- }
+ /* stop video capture */
+ if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
+ videobuf_queue_cancel(&fh->vidq);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO0);
+ }
- if (fh->vidq.read_buf) {
- cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
- kfree(fh->vidq.read_buf);
- }
+ if (fh->vidq.read_buf) {
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
+ kfree(fh->vidq.read_buf);
+ }
- videobuf_mmap_free(&fh->vidq);
+ videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->channels[fh->channel_id].prio, fh->prio);
- file->private_data = NULL;
- kfree(fh);
+ v4l2_prio_close(&dev->channels[fh->channel_id].prio, fh->prio);
+ file->private_data = NULL;
+ kfree(fh);
- return 0;
+ return 0;
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct cx25821_fh *fh = priv;
- struct cx25821_dev *dev = fh->dev;
+ struct cx25821_fh *fh = priv;
+ struct cx25821_dev *dev = fh->dev;
- if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE))
- return -EINVAL;
+ if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return -EINVAL;
- if (unlikely(i != fh->type))
- return -EINVAL;
+ if (unlikely(i != fh->type))
+ return -EINVAL;
- if (unlikely(!cx25821_res_get(dev, fh,
- cx25821_get_resource(fh, RESOURCE_VIDEO0))))
- return -EBUSY;
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh,
+ RESOURCE_VIDEO0))))
+ return -EBUSY;
- return videobuf_streamon(get_queue(fh));
+ return videobuf_streamon(get_queue(fh));
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct cx25821_fh *fh = priv;
- struct cx25821_dev *dev = fh->dev;
- int err, res;
+ struct cx25821_fh *fh = priv;
+ struct cx25821_dev *dev = fh->dev;
+ int err, res;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
- return -EINVAL;
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
- res = cx25821_get_resource(fh, RESOURCE_VIDEO0);
- err = videobuf_streamoff(get_queue(fh));
- if (err < 0)
- return err;
- cx25821_res_free(dev, fh, res);
- return 0;
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO0);
+ err = videobuf_streamoff(get_queue(fh));
+ if (err < 0)
+ return err;
+ cx25821_res_free(dev, fh, res);
+ return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
- struct cx25821_fh *fh = priv;
- struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
+ struct cx25821_fh *fh = priv;
+ struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
struct v4l2_mbus_framefmt mbus_fmt;
- int err;
- int pix_format = PIXEL_FRMT_422;
+ int err;
+ int pix_format = PIXEL_FRMT_422;
- if (fh) {
- err = v4l2_prio_check(&dev->channels[fh->channel_id]
- .prio, fh->prio);
- if (0 != err)
- return err;
- }
+ if (fh) {
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
+ if (0 != err)
+ return err;
+ }
- dprintk(2, "%s()\n", __func__);
- err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
+ dprintk(2, "%s()\n", __func__);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
- if (0 != err)
- return err;
+ if (0 != err)
+ return err;
- fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
- fh->vidq.field = f->fmt.pix.field;
+ fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->vidq.field = f->fmt.pix.field;
- /* check if width and height is valid based on set standard */
- if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm))
- fh->width = f->fmt.pix.width;
+ /* check if width and height is valid based on set standard */
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm))
+ fh->width = f->fmt.pix.width;
- if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm))
- fh->height = f->fmt.pix.height;
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm))
+ fh->height = f->fmt.pix.height;
- if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P)
- pix_format = PIXEL_FRMT_411;
- else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
- pix_format = PIXEL_FRMT_422;
- else
- return -EINVAL;
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P)
+ pix_format = PIXEL_FRMT_411;
+ else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
+ pix_format = PIXEL_FRMT_422;
+ else
+ return -EINVAL;
- cx25821_set_pixel_format(dev, SRAM_CH00, pix_format);
+ cx25821_set_pixel_format(dev, SRAM_CH00, pix_format);
- /* check if cif resolution */
- if (fh->width == 320 || fh->width == 352)
- dev->channels[fh->channel_id].use_cif_resolution = 1;
- else
- dev->channels[fh->channel_id].use_cif_resolution = 0;
+ /* check if cif resolution */
+ if (fh->width == 320 || fh->width == 352)
+ dev->channels[fh->channel_id].use_cif_resolution = 1;
+ else
+ dev->channels[fh->channel_id].use_cif_resolution = 0;
- dev->channels[fh->channel_id].cif_width = fh->width;
- medusa_set_resolution(dev, fh->width, SRAM_CH00);
+ dev->channels[fh->channel_id].cif_width = fh->width;
+ medusa_set_resolution(dev, fh->width, SRAM_CH00);
dprintk(2, "%s(): width=%d height=%d field=%d\n", __func__, fh->width,
fh->height, fh->vidq.field);
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
cx25821_call_all(dev, video, s_mbus_fmt, &mbus_fmt);
- return 0;
+ return 0;
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- int ret_val = 0;
- struct cx25821_fh *fh = priv;
- struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
+ int ret_val = 0;
+ struct cx25821_fh *fh = priv;
+ struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
- ret_val = videobuf_dqbuf(get_queue(fh), p, file->f_flags & O_NONBLOCK);
+ ret_val = videobuf_dqbuf(get_queue(fh), p, file->f_flags & O_NONBLOCK);
- p->sequence = dev->channels[fh->channel_id].vidq.count;
+ p->sequence = dev->channels[fh->channel_id].vidq.count;
- return ret_val;
+ return ret_val;
}
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
- struct cx25821_fh *fh = priv;
- char name[32 + 2];
+ struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
+ struct cx25821_fh *fh = priv;
+ char name[32 + 2];
- struct sram_channel *sram_ch = dev->channels[fh->channel_id]
- .sram_channels;
- u32 tmp = 0;
+ struct sram_channel *sram_ch = dev->channels[fh->channel_id]
+ .sram_channels;
+ u32 tmp = 0;
- snprintf(name, sizeof(name), "%s/2", dev->name);
+ snprintf(name, sizeof(name), "%s/2", dev->name);
pr_info("%s/2: ============ START LOG STATUS ============\n",
dev->name);
- cx25821_call_all(dev, core, log_status);
- tmp = cx_read(sram_ch->dma_ctl);
+ cx25821_call_all(dev, core, log_status);
+ tmp = cx_read(sram_ch->dma_ctl);
pr_info("Video input 0 is %s\n",
(tmp & 0x11) ? "streaming" : "stopped");
pr_info("%s/2: ============= END LOG STATUS =============\n",
dev->name);
- return 0;
+ return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
- struct cx25821_fh *fh = priv;
- struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
- int err;
+ struct cx25821_fh *fh = priv;
+ struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
+ int err;
- if (fh) {
- err = v4l2_prio_check(&dev->channels[fh->channel_id]
- .prio, fh->prio);
- if (0 != err)
- return err;
- }
+ if (fh) {
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
+ if (0 != err)
+ return err;
+ }
- return cx25821_set_control(dev, ctl, fh->channel_id);
+ return cx25821_set_control(dev, ctl, fh->channel_id);
}
-/* VIDEO IOCTLS */
-int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
+/* VIDEO IOCTLS */
+int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct cx25821_fh *fh = priv;
@@ -1116,7 +1110,8 @@ int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_form
return 0;
}
-int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
+int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct cx25821_fmt *fmt;
enum v4l2_field field;
@@ -1131,8 +1126,10 @@ int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fo
maxh = 576;
if (V4L2_FIELD_ANY == field) {
- field = (f->fmt.pix.height > maxh / 2)
- ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
+ if (f->fmt.pix.height > maxh / 2)
+ field = V4L2_FIELD_INTERLACED;
+ else
+ field = V4L2_FIELD_TOP;
}
switch (field) {
@@ -1162,7 +1159,8 @@ int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fo
return 0;
}
-int cx25821_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+int cx25821_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1189,13 +1187,15 @@ int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-int cx25821_vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p)
+int cx25821_vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
{
struct cx25821_fh *fh = priv;
return videobuf_reqbufs(get_queue(fh), p);
}
-int cx25821_vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+int cx25821_vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
{
struct cx25821_fh *fh = priv;
return videobuf_querybuf(get_queue(fh), p);
@@ -1210,20 +1210,21 @@ int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
int cx25821_vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)f)->dev;
- struct cx25821_fh *fh = f;
+ struct cx25821_fh *fh = f;
- *p = v4l2_prio_max(&dev->channels[fh->channel_id].prio);
+ *p = v4l2_prio_max(&dev->channels[fh->channel_id].prio);
return 0;
}
-int cx25821_vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
+int cx25821_vidioc_s_priority(struct file *file, void *f,
+ enum v4l2_priority prio)
{
struct cx25821_fh *fh = f;
struct cx25821_dev *dev = ((struct cx25821_fh *)f)->dev;
- return v4l2_prio_change(&dev->channels[fh->channel_id]
- .prio, &fh->prio, prio);
+ return v4l2_prio_change(&dev->channels[fh->channel_id].prio, &fh->prio,
+ prio);
}
#ifdef TUNER_FLAG
@@ -1236,15 +1237,14 @@ int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
dprintk(1, "%s()\n", __func__);
if (fh) {
- err = v4l2_prio_check(&dev->channels[fh->channel_id]
- .prio, fh->prio);
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
if (0 != err)
return err;
}
- if (dev->tvnorm == *tvnorms) {
+ if (dev->tvnorm == *tvnorms)
return 0;
- }
mutex_lock(&dev->lock);
cx25821_set_tvnorm(dev, *tvnorms);
@@ -1258,7 +1258,7 @@ int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i)
{
- static const char *iname[] = {
+ static const char * const iname[] = {
[CX25821_VMUX_COMPOSITE] = "Composite",
[CX25821_VMUX_SVIDEO] = "S-Video",
[CX25821_VMUX_DEBUG] = "for debug only",
@@ -1280,7 +1280,8 @@ int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i)
return 0;
}
-int cx25821_vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
+int cx25821_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
dprintk(1, "%s()\n", __func__);
@@ -1305,8 +1306,8 @@ int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i)
dprintk(1, "%s(%d)\n", __func__, i);
if (fh) {
- err = v4l2_prio_check(&dev->channels[fh->channel_id]
- .prio, fh->prio);
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
if (0 != err)
return err;
}
@@ -1323,7 +1324,8 @@ int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i)
}
#ifdef TUNER_FLAG
-int cx25821_vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
+int cx25821_vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = fh->dev;
@@ -1350,21 +1352,22 @@ int cx25821_set_freq(struct cx25821_dev *dev, struct v4l2_frequency *f)
return 0;
}
-int cx25821_vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
+int cx25821_vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev;
int err;
if (fh) {
- dev = fh->dev;
- err = v4l2_prio_check(&dev->channels[fh->channel_id]
- .prio, fh->prio);
+ dev = fh->dev;
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
if (0 != err)
return err;
- } else {
- pr_err("Invalid fh pointer!\n");
- return -EINVAL;
+ } else {
+ pr_err("Invalid fh pointer!\n");
+ return -EINVAL;
}
return cx25821_set_freq(dev, f);
@@ -1426,8 +1429,8 @@ int cx25821_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
int err;
if (fh) {
- err = v4l2_prio_check(&dev->channels[fh->channel_id]
- .prio, fh->prio);
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
if (0 != err)
return err;
}
@@ -1522,10 +1525,11 @@ static const struct v4l2_queryctrl *ctrl_by_id(unsigned int id)
return NULL;
}
-int cx25821_vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl)
+int cx25821_vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
- struct cx25821_fh *fh = priv;
+ struct cx25821_fh *fh = priv;
const struct v4l2_queryctrl *ctrl;
@@ -1535,16 +1539,16 @@ int cx25821_vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ct
return -EINVAL;
switch (ctl->id) {
case V4L2_CID_BRIGHTNESS:
- ctl->value = dev->channels[fh->channel_id].ctl_bright;
+ ctl->value = dev->channels[fh->channel_id].ctl_bright;
break;
case V4L2_CID_HUE:
- ctl->value = dev->channels[fh->channel_id].ctl_hue;
+ ctl->value = dev->channels[fh->channel_id].ctl_hue;
break;
case V4L2_CID_CONTRAST:
- ctl->value = dev->channels[fh->channel_id].ctl_contrast;
+ ctl->value = dev->channels[fh->channel_id].ctl_contrast;
break;
case V4L2_CID_SATURATION:
- ctl->value = dev->channels[fh->channel_id].ctl_saturation;
+ ctl->value = dev->channels[fh->channel_id].ctl_saturation;
break;
}
return 0;
@@ -1578,19 +1582,19 @@ int cx25821_set_control(struct cx25821_dev *dev,
switch (ctl->id) {
case V4L2_CID_BRIGHTNESS:
- dev->channels[chan_num].ctl_bright = ctl->value;
+ dev->channels[chan_num].ctl_bright = ctl->value;
medusa_set_brightness(dev, ctl->value, chan_num);
break;
case V4L2_CID_HUE:
- dev->channels[chan_num].ctl_hue = ctl->value;
+ dev->channels[chan_num].ctl_hue = ctl->value;
medusa_set_hue(dev, ctl->value, chan_num);
break;
case V4L2_CID_CONTRAST:
- dev->channels[chan_num].ctl_contrast = ctl->value;
+ dev->channels[chan_num].ctl_contrast = ctl->value;
medusa_set_contrast(dev, ctl->value, chan_num);
break;
case V4L2_CID_SATURATION:
- dev->channels[chan_num].ctl_saturation = ctl->value;
+ dev->channels[chan_num].ctl_saturation = ctl->value;
medusa_set_saturation(dev, ctl->value, chan_num);
break;
}
@@ -1612,7 +1616,8 @@ static void cx25821_init_controls(struct cx25821_dev *dev, int chan_num)
}
}
-int cx25821_vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
+int cx25821_vidioc_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cropcap)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1622,9 +1627,9 @@ int cx25821_vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *c
cropcap->bounds.width = 720;
cropcap->bounds.height = dev->tvnorm == V4L2_STD_PAL_BG ? 576 : 480;
cropcap->pixelaspect.numerator =
- dev->tvnorm == V4L2_STD_PAL_BG ? 59 : 10;
+ dev->tvnorm == V4L2_STD_PAL_BG ? 59 : 10;
cropcap->pixelaspect.denominator =
- dev->tvnorm == V4L2_STD_PAL_BG ? 54 : 11;
+ dev->tvnorm == V4L2_STD_PAL_BG ? 54 : 11;
cropcap->defrect = cropcap->bounds;
return 0;
}
@@ -1636,24 +1641,24 @@ int cx25821_vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
int err;
if (fh) {
- err = v4l2_prio_check(&dev->channels[fh->channel_id].
- prio, fh->prio);
+ err = v4l2_prio_check(&dev->channels[fh->channel_id].prio,
+ fh->prio);
if (0 != err)
return err;
}
- /* cx25821_vidioc_s_crop not supported */
+ /* cx25821_vidioc_s_crop not supported */
return -EINVAL;
}
int cx25821_vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
{
- /* cx25821_vidioc_g_crop not supported */
+ /* cx25821_vidioc_g_crop not supported */
return -EINVAL;
}
int cx25821_vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
{
- /* medusa does not support video standard sensing of current input */
+ /* medusa does not support video standard sensing of current input */
*norm = CX25821_NORMS;
return 0;
@@ -1699,310 +1704,309 @@ int cx25821_is_valid_height(u32 height, v4l2_std_id tvnorm)
static long video_ioctl_upstream9(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cx25821_fh *fh = file->private_data;
- struct cx25821_dev *dev = fh->dev;
- int command = 0;
- struct upstream_user_struct *data_from_user;
+ struct cx25821_fh *fh = file->private_data;
+ struct cx25821_dev *dev = fh->dev;
+ int command = 0;
+ struct upstream_user_struct *data_from_user;
- data_from_user = (struct upstream_user_struct *)arg;
+ data_from_user = (struct upstream_user_struct *)arg;
if (!data_from_user) {
pr_err("%s(): Upstream data is INVALID. Returning\n", __func__);
return 0;
}
- command = data_from_user->command;
+ command = data_from_user->command;
- if (command != UPSTREAM_START_VIDEO &&
- command != UPSTREAM_STOP_VIDEO)
- return 0;
+ if (command != UPSTREAM_START_VIDEO && command != UPSTREAM_STOP_VIDEO)
+ return 0;
- dev->input_filename = data_from_user->input_filename;
- dev->input_audiofilename = data_from_user->input_filename;
- dev->vid_stdname = data_from_user->vid_stdname;
- dev->pixel_format = data_from_user->pixel_format;
- dev->channel_select = data_from_user->channel_select;
- dev->command = data_from_user->command;
+ dev->input_filename = data_from_user->input_filename;
+ dev->input_audiofilename = data_from_user->input_filename;
+ dev->vid_stdname = data_from_user->vid_stdname;
+ dev->pixel_format = data_from_user->pixel_format;
+ dev->channel_select = data_from_user->channel_select;
+ dev->command = data_from_user->command;
- switch (command) {
- case UPSTREAM_START_VIDEO:
- cx25821_start_upstream_video_ch1(dev, data_from_user);
- break;
+ switch (command) {
+ case UPSTREAM_START_VIDEO:
+ cx25821_start_upstream_video_ch1(dev, data_from_user);
+ break;
- case UPSTREAM_STOP_VIDEO:
- cx25821_stop_upstream_video_ch1(dev);
- break;
- }
+ case UPSTREAM_STOP_VIDEO:
+ cx25821_stop_upstream_video_ch1(dev);
+ break;
+ }
- return 0;
+ return 0;
}
static long video_ioctl_upstream10(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cx25821_fh *fh = file->private_data;
- struct cx25821_dev *dev = fh->dev;
- int command = 0;
- struct upstream_user_struct *data_from_user;
+ struct cx25821_fh *fh = file->private_data;
+ struct cx25821_dev *dev = fh->dev;
+ int command = 0;
+ struct upstream_user_struct *data_from_user;
- data_from_user = (struct upstream_user_struct *)arg;
+ data_from_user = (struct upstream_user_struct *)arg;
if (!data_from_user) {
pr_err("%s(): Upstream data is INVALID. Returning\n", __func__);
return 0;
}
- command = data_from_user->command;
+ command = data_from_user->command;
- if (command != UPSTREAM_START_VIDEO &&
- command != UPSTREAM_STOP_VIDEO)
- return 0;
+ if (command != UPSTREAM_START_VIDEO && command != UPSTREAM_STOP_VIDEO)
+ return 0;
- dev->input_filename_ch2 = data_from_user->input_filename;
- dev->input_audiofilename = data_from_user->input_filename;
- dev->vid_stdname_ch2 = data_from_user->vid_stdname;
- dev->pixel_format_ch2 = data_from_user->pixel_format;
- dev->channel_select_ch2 = data_from_user->channel_select;
- dev->command_ch2 = data_from_user->command;
+ dev->input_filename_ch2 = data_from_user->input_filename;
+ dev->input_audiofilename = data_from_user->input_filename;
+ dev->vid_stdname_ch2 = data_from_user->vid_stdname;
+ dev->pixel_format_ch2 = data_from_user->pixel_format;
+ dev->channel_select_ch2 = data_from_user->channel_select;
+ dev->command_ch2 = data_from_user->command;
- switch (command) {
- case UPSTREAM_START_VIDEO:
- cx25821_start_upstream_video_ch2(dev, data_from_user);
- break;
+ switch (command) {
+ case UPSTREAM_START_VIDEO:
+ cx25821_start_upstream_video_ch2(dev, data_from_user);
+ break;
- case UPSTREAM_STOP_VIDEO:
- cx25821_stop_upstream_video_ch2(dev);
- break;
- }
+ case UPSTREAM_STOP_VIDEO:
+ cx25821_stop_upstream_video_ch2(dev);
+ break;
+ }
- return 0;
+ return 0;
}
static long video_ioctl_upstream11(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cx25821_fh *fh = file->private_data;
- struct cx25821_dev *dev = fh->dev;
- int command = 0;
- struct upstream_user_struct *data_from_user;
+ struct cx25821_fh *fh = file->private_data;
+ struct cx25821_dev *dev = fh->dev;
+ int command = 0;
+ struct upstream_user_struct *data_from_user;
- data_from_user = (struct upstream_user_struct *)arg;
+ data_from_user = (struct upstream_user_struct *)arg;
if (!data_from_user) {
pr_err("%s(): Upstream data is INVALID. Returning\n", __func__);
return 0;
}
- command = data_from_user->command;
+ command = data_from_user->command;
- if (command != UPSTREAM_START_AUDIO &&
- command != UPSTREAM_STOP_AUDIO)
- return 0;
+ if (command != UPSTREAM_START_AUDIO && command != UPSTREAM_STOP_AUDIO)
+ return 0;
- dev->input_filename = data_from_user->input_filename;
- dev->input_audiofilename = data_from_user->input_filename;
- dev->vid_stdname = data_from_user->vid_stdname;
- dev->pixel_format = data_from_user->pixel_format;
- dev->channel_select = data_from_user->channel_select;
- dev->command = data_from_user->command;
+ dev->input_filename = data_from_user->input_filename;
+ dev->input_audiofilename = data_from_user->input_filename;
+ dev->vid_stdname = data_from_user->vid_stdname;
+ dev->pixel_format = data_from_user->pixel_format;
+ dev->channel_select = data_from_user->channel_select;
+ dev->command = data_from_user->command;
- switch (command) {
- case UPSTREAM_START_AUDIO:
- cx25821_start_upstream_audio(dev, data_from_user);
- break;
+ switch (command) {
+ case UPSTREAM_START_AUDIO:
+ cx25821_start_upstream_audio(dev, data_from_user);
+ break;
- case UPSTREAM_STOP_AUDIO:
- cx25821_stop_upstream_audio(dev);
- break;
- }
+ case UPSTREAM_STOP_AUDIO:
+ cx25821_stop_upstream_audio(dev);
+ break;
+ }
- return 0;
+ return 0;
}
static long video_ioctl_set(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cx25821_fh *fh = file->private_data;
- struct cx25821_dev *dev = fh->dev;
- struct downstream_user_struct *data_from_user;
- int command;
- int width = 720;
- int selected_channel = 0, pix_format = 0, i = 0;
- int cif_enable = 0, cif_width = 0;
- u32 value = 0;
+ struct cx25821_fh *fh = file->private_data;
+ struct cx25821_dev *dev = fh->dev;
+ struct downstream_user_struct *data_from_user;
+ int command;
+ int width = 720;
+ int selected_channel = 0, pix_format = 0, i = 0;
+ int cif_enable = 0, cif_width = 0;
+ u32 value = 0;
- data_from_user = (struct downstream_user_struct *)arg;
+ data_from_user = (struct downstream_user_struct *)arg;
if (!data_from_user) {
pr_err("%s(): User data is INVALID. Returning\n", __func__);
return 0;
}
- command = data_from_user->command;
+ command = data_from_user->command;
- if (command != SET_VIDEO_STD && command != SET_PIXEL_FORMAT
+ if (command != SET_VIDEO_STD && command != SET_PIXEL_FORMAT
&& command != ENABLE_CIF_RESOLUTION && command != REG_READ
&& command != REG_WRITE && command != MEDUSA_READ
&& command != MEDUSA_WRITE) {
- return 0;
- }
-
- switch (command) {
- case SET_VIDEO_STD:
- dev->tvnorm =
- !strcmp(data_from_user->vid_stdname,
- "PAL") ? V4L2_STD_PAL_BG : V4L2_STD_NTSC_M;
- medusa_set_videostandard(dev);
- break;
-
- case SET_PIXEL_FORMAT:
- selected_channel = data_from_user->decoder_select;
- pix_format = data_from_user->pixel_format;
-
- if (!(selected_channel <= 7 && selected_channel >= 0)) {
- selected_channel -= 4;
- selected_channel = selected_channel % 8;
- }
-
- if (selected_channel >= 0)
- cx25821_set_pixel_format(dev, selected_channel,
+ return 0;
+ }
+
+ switch (command) {
+ case SET_VIDEO_STD:
+ if (!strcmp(data_from_user->vid_stdname, "PAL"))
+ dev->tvnorm = V4L2_STD_PAL_BG;
+ else
+ dev->tvnorm = V4L2_STD_NTSC_M;
+ medusa_set_videostandard(dev);
+ break;
+
+ case SET_PIXEL_FORMAT:
+ selected_channel = data_from_user->decoder_select;
+ pix_format = data_from_user->pixel_format;
+
+ if (!(selected_channel <= 7 && selected_channel >= 0)) {
+ selected_channel -= 4;
+ selected_channel = selected_channel % 8;
+ }
+
+ if (selected_channel >= 0)
+ cx25821_set_pixel_format(dev, selected_channel,
pix_format);
- break;
-
- case ENABLE_CIF_RESOLUTION:
- selected_channel = data_from_user->decoder_select;
- cif_enable = data_from_user->cif_resolution_enable;
- cif_width = data_from_user->cif_width;
-
- if (cif_enable) {
- if (dev->tvnorm & V4L2_STD_PAL_BG
- || dev->tvnorm & V4L2_STD_PAL_DK)
- width = 352;
- else
- width = (cif_width == 320
- || cif_width == 352) ? cif_width : 320;
- }
-
- if (!(selected_channel <= 7 && selected_channel >= 0)) {
- selected_channel -= 4;
- selected_channel = selected_channel % 8;
- }
-
- if (selected_channel <= 7 && selected_channel >= 0) {
- dev->channels[selected_channel].
- use_cif_resolution = cif_enable;
- dev->channels[selected_channel].cif_width = width;
- } else {
- for (i = 0; i < VID_CHANNEL_NUM; i++) {
- dev->channels[i].use_cif_resolution =
- cif_enable;
- dev->channels[i].cif_width = width;
- }
- }
-
- medusa_set_resolution(dev, width, selected_channel);
- break;
- case REG_READ:
- data_from_user->reg_data = cx_read(data_from_user->reg_address);
- break;
- case REG_WRITE:
- cx_write(data_from_user->reg_address, data_from_user->reg_data);
- break;
- case MEDUSA_READ:
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- (u16) data_from_user->reg_address,
- &data_from_user->reg_data);
- break;
- case MEDUSA_WRITE:
- cx25821_i2c_write(&dev->i2c_bus[0],
- (u16) data_from_user->reg_address,
- data_from_user->reg_data);
- break;
- }
-
- return 0;
-}
+ break;
-static long cx25821_video_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
+ case ENABLE_CIF_RESOLUTION:
+ selected_channel = data_from_user->decoder_select;
+ cif_enable = data_from_user->cif_resolution_enable;
+ cif_width = data_from_user->cif_width;
+
+ if (cif_enable) {
+ if (dev->tvnorm & V4L2_STD_PAL_BG
+ || dev->tvnorm & V4L2_STD_PAL_DK) {
+ width = 352;
+ } else {
+ width = cif_width;
+ if (cif_width != 320 && cif_width != 352)
+ width = 320;
+ }
+ }
- struct cx25821_fh *fh = file->private_data;
+ if (!(selected_channel <= 7 && selected_channel >= 0)) {
+ selected_channel -= 4;
+ selected_channel = selected_channel % 8;
+ }
+
+ if (selected_channel <= 7 && selected_channel >= 0) {
+ dev->channels[selected_channel].
+ use_cif_resolution = cif_enable;
+ dev->channels[selected_channel].cif_width = width;
+ } else {
+ for (i = 0; i < VID_CHANNEL_NUM; i++) {
+ dev->channels[i].use_cif_resolution =
+ cif_enable;
+ dev->channels[i].cif_width = width;
+ }
+ }
- /* check to see if it's the video upstream */
- if (fh->channel_id == SRAM_CH09) {
- ret = video_ioctl_upstream9(file, cmd, arg);
- return ret;
- } else if (fh->channel_id == SRAM_CH10) {
- ret = video_ioctl_upstream10(file, cmd, arg);
- return ret;
- } else if (fh->channel_id == SRAM_CH11) {
- ret = video_ioctl_upstream11(file, cmd, arg);
- ret = video_ioctl_set(file, cmd, arg);
- return ret;
- }
+ medusa_set_resolution(dev, width, selected_channel);
+ break;
+ case REG_READ:
+ data_from_user->reg_data = cx_read(data_from_user->reg_address);
+ break;
+ case REG_WRITE:
+ cx_write(data_from_user->reg_address, data_from_user->reg_data);
+ break;
+ case MEDUSA_READ:
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ (u16) data_from_user->reg_address,
+ &data_from_user->reg_data);
+ break;
+ case MEDUSA_WRITE:
+ cx25821_i2c_write(&dev->i2c_bus[0],
+ (u16) data_from_user->reg_address,
+ data_from_user->reg_data);
+ break;
+ }
+
+ return 0;
+}
+
+static long cx25821_video_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+
+ struct cx25821_fh *fh = file->private_data;
+
+ /* check to see if it's the video upstream */
+ if (fh->channel_id == SRAM_CH09) {
+ ret = video_ioctl_upstream9(file, cmd, arg);
+ return ret;
+ } else if (fh->channel_id == SRAM_CH10) {
+ ret = video_ioctl_upstream10(file, cmd, arg);
+ return ret;
+ } else if (fh->channel_id == SRAM_CH11) {
+ ret = video_ioctl_upstream11(file, cmd, arg);
+ ret = video_ioctl_set(file, cmd, arg);
+ return ret;
+ }
- return video_ioctl2(file, cmd, arg);
+ return video_ioctl2(file, cmd, arg);
}
/* exported stuff */
static const struct v4l2_file_operations video_fops = {
- .owner = THIS_MODULE,
- .open = video_open,
- .release = video_release,
- .read = video_read,
- .poll = video_poll,
- .mmap = cx25821_video_mmap,
- .ioctl = cx25821_video_ioctl,
+ .owner = THIS_MODULE,
+ .open = video_open,
+ .release = video_release,
+ .read = video_read,
+ .poll = video_poll,
+ .mmap = cx25821_video_mmap,
+ .ioctl = cx25821_video_ioctl,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = cx25821_vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = cx25821_vidioc_reqbufs,
- .vidioc_querybuf = cx25821_vidioc_querybuf,
- .vidioc_qbuf = cx25821_vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = cx25821_vidioc_s_std,
- .vidioc_querystd = cx25821_vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = cx25821_vidioc_cropcap,
- .vidioc_s_crop = cx25821_vidioc_s_crop,
- .vidioc_g_crop = cx25821_vidioc_g_crop,
- .vidioc_enum_input = cx25821_vidioc_enum_input,
- .vidioc_g_input = cx25821_vidioc_g_input,
- .vidioc_s_input = cx25821_vidioc_s_input,
- .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = cx25821_vidioc_queryctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
- .vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = cx25821_vidioc_g_priority,
- .vidioc_s_priority = cx25821_vidioc_s_priority,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef TUNER_FLAG
- .vidioc_g_tuner = cx25821_vidioc_g_tuner,
- .vidioc_s_tuner = cx25821_vidioc_s_tuner,
- .vidioc_g_frequency = cx25821_vidioc_g_frequency,
- .vidioc_s_frequency = cx25821_vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = cx25821_vidioc_g_register,
- .vidioc_s_register = cx25821_vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
struct video_device cx25821_videoioctl_template = {
- .name = "cx25821-videoioctl",
- .fops = &video_fops,
- .ioctl_ops = &video_ioctl_ops,
- .tvnorms = CX25821_NORMS,
- .current_norm = V4L2_STD_NTSC_M,
+ .name = "cx25821-videoioctl",
+ .fops = &video_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .tvnorms = CX25821_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
};
diff --git a/drivers/staging/cx25821/cx25821-video.h b/drivers/staging/cx25821/cx25821-video.h
index f4ee8051b8b..d0d9538ca5b 100644
--- a/drivers/staging/cx25821/cx25821-video.h
+++ b/drivers/staging/cx25821/cx25821-video.h
@@ -63,7 +63,7 @@ do { \
#define REG_READ 900
#define REG_WRITE 901
#define MEDUSA_READ 910
-#define MEDUSA_WRITE 911
+#define MEDUSA_WRITE 911
extern struct sram_channel *channel0;
extern struct sram_channel *channel1;
@@ -87,7 +87,7 @@ extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc);
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q);
+ struct cx25821_dmaqueue *q);
extern void cx25821_video_wakeup(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q, u32 count);
@@ -96,11 +96,11 @@ extern int cx25821_set_tvnorm(struct cx25821_dev *dev, v4l2_std_id norm);
#endif
extern int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh,
- unsigned int bit);
+ unsigned int bit);
extern int cx25821_res_check(struct cx25821_fh *fh, unsigned int bit);
extern int cx25821_res_locked(struct cx25821_fh *fh, unsigned int bit);
extern void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh,
- unsigned int bits);
+ unsigned int bits);
extern int cx25821_video_mux(struct cx25821_dev *dev, unsigned int input);
extern int cx25821_start_video_dma(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q,
@@ -115,63 +115,74 @@ extern int cx25821_video_register(struct cx25821_dev *dev);
extern int cx25821_get_format_size(void);
extern int cx25821_buffer_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size);
-extern int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field);
+ unsigned int *size);
+extern int cx25821_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field);
extern void cx25821_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
+ struct videobuf_buffer *vb);
extern struct videobuf_queue *get_queue(struct cx25821_fh *fh);
extern int cx25821_get_resource(struct cx25821_fh *fh, int resource);
extern int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma);
extern int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f);
+ struct v4l2_format *f);
extern int cx25821_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap);
+ struct v4l2_capability *cap);
extern int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f);
+ struct v4l2_fmtdesc *f);
extern int cx25821_vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p);
+ struct v4l2_requestbuffers *p);
extern int cx25821_vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p);
-extern int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
-extern int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms);
+ struct v4l2_buffer *p);
+extern int cx25821_vidioc_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p);
+extern int cx25821_vidioc_s_std(struct file *file, void *priv,
+ v4l2_std_id *tvnorms);
extern int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i);
extern int cx25821_vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *i);
-extern int cx25821_vidioc_g_input(struct file *file, void *priv, unsigned int *i);
-extern int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i);
+ struct v4l2_input *i);
+extern int cx25821_vidioc_g_input(struct file *file, void *priv,
+ unsigned int *i);
+extern int cx25821_vidioc_s_input(struct file *file, void *priv,
+ unsigned int i);
extern int cx25821_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl);
+ struct v4l2_control *ctl);
extern int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f);
+ struct v4l2_format *f);
extern int cx25821_vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f);
+ struct v4l2_frequency *f);
extern int cx25821_set_freq(struct cx25821_dev *dev, struct v4l2_frequency *f);
extern int cx25821_vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f);
+ struct v4l2_frequency *f);
extern int cx25821_vidioc_g_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg);
+ struct v4l2_dbg_register *reg);
extern int cx25821_vidioc_s_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg);
-extern int cx25821_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
-extern int cx25821_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
+ struct v4l2_dbg_register *reg);
+extern int cx25821_vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t);
+extern int cx25821_vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t);
extern int cx25821_is_valid_width(u32 width, v4l2_std_id tvnorm);
extern int cx25821_is_valid_height(u32 height, v4l2_std_id tvnorm);
-extern int cx25821_vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p);
+extern int cx25821_vidioc_g_priority(struct file *file, void *f,
+ enum v4l2_priority *p);
extern int cx25821_vidioc_s_priority(struct file *file, void *f,
- enum v4l2_priority prio);
+ enum v4l2_priority prio);
extern int cx25821_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qctrl);
+ struct v4l2_queryctrl *qctrl);
extern int cx25821_set_control(struct cx25821_dev *dev,
struct v4l2_control *ctrl, int chan_num);
extern int cx25821_vidioc_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cropcap);
-extern int cx25821_vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop);
-extern int cx25821_vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop);
-
-extern int cx25821_vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm);
+ struct v4l2_cropcap *cropcap);
+extern int cx25821_vidioc_s_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop);
+extern int cx25821_vidioc_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop);
+
+extern int cx25821_vidioc_querystd(struct file *file, void *priv,
+ v4l2_std_id *norm);
#endif
diff --git a/drivers/staging/cx25821/cx25821.h b/drivers/staging/cx25821/cx25821.h
index 6230243e2cc..db2615b2bac 100644
--- a/drivers/staging/cx25821/cx25821.h
+++ b/drivers/staging/cx25821/cx25821.h
@@ -179,15 +179,17 @@ struct cx25821_input {
u32 gpio0, gpio1, gpio2, gpio3;
};
-typedef enum {
+enum port {
CX25821_UNDEFINED = 0,
CX25821_RAW,
CX25821_264
-} port_t;
+};
struct cx25821_board {
char *name;
- port_t porta, portb, portc;
+ enum port porta;
+ enum port portb;
+ enum port portc;
unsigned int tuner_type;
unsigned int radio_type;
unsigned char tuner_addr;
@@ -308,7 +310,7 @@ struct cx25821_dev {
int _audiofile_status;
int _audio_lines_count;
int _audioframe_count;
- int _audio_upstream_channel_select;
+ int _audio_upstream_channel;
int _last_index_irq; /* The last interrupt index processed. */
__le32 *_risc_audio_jmp_addr;
diff --git a/drivers/staging/cxd2099/Makefile b/drivers/staging/cxd2099/Makefile
index 72b14558c11..64cfc77be35 100644
--- a/drivers/staging/cxd2099/Makefile
+++ b/drivers/staging/cxd2099/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_DVB_CXD2099) += cxd2099.o
-EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
-EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
-EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+ccflags-y += -Idrivers/media/dvb/dvb-core/
+ccflags-y += -Idrivers/media/dvb/frontends/
+ccflags-y += -Idrivers/media/common/tuners/
diff --git a/drivers/staging/cxt1e1/Kconfig b/drivers/staging/cxt1e1/Kconfig
index 73430ef6ae2..947f42a65c5 100644
--- a/drivers/staging/cxt1e1/Kconfig
+++ b/drivers/staging/cxt1e1/Kconfig
@@ -6,8 +6,7 @@ config CXT1E1
channelized stream WAN adapter card which contains a HDLC/Transparent
mode controller.
- If you want to compile this driver as a module
- say M here and read <file:Documentation/modules.txt>.
+ If you want to compile this driver as a module say M here.
The module will be called 'cxt1e1'.
If unsure, say N.
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 9ced08f253b..24e009c0149 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -1017,13 +1017,7 @@ c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1,
**************************************************************/
if (request_irq (irq0, &c4_linux_interrupt,
-#if defined(SBE_ISR_TASKLET)
- IRQF_DISABLED | IRQF_SHARED,
-#elif defined(SBE_ISR_IMMEDIATE)
- IRQF_DISABLED | IRQF_SHARED,
-#elif defined(SBE_ISR_INLINE)
IRQF_SHARED,
-#endif
ndev->name, ndev))
{
pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index c0563e68997..9ea2c0c2061 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -43,81 +43,17 @@
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#if defined(CONFIG_SMP) && ! defined(__SMP__)
-#define __SMP__
-#endif
-#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
-#define MODVERSIONS
-#endif
-
-#ifdef MODULE
-#ifdef MODVERSIONS
-#include <config/modversions.h>
-#endif
#include <linux/module.h>
-#endif
-#endif
-
#include <linux/kernel.h> /* resolves kmalloc references */
#include <linux/skbuff.h> /* resolves skb references */
#include <linux/netdevice.h> /* resolves dev_kree_skb_any */
#include <asm/byteorder.h> /* resolves cpu_to_le32 */
-#if 0
-
-/*** PORT POINT WARNING
- ***
- *** Under Linux 2.6 it has been found that compiler is re-ordering
- *** in-lined pci_write_32() functions to the detrement of correct
- *** hardware setup. Therefore, inlining of PCI accesses has been
- *** de-implemented, and subroutine calls have been implemented.
- ***/
-
-static inline u_int32_t
-pci_read_32 (u_int32_t *p)
-{
-#ifdef FLOW_DEBUG
- u_int32_t v;
-
- FLUSH_PCI_READ ();
- v = le32_to_cpu (*p);
- if (cxt1e1_log_level >= LOG_DEBUG)
- pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
- return v;
-#else
- FLUSH_PCI_READ (); /* */
- return le32_to_cpu (*p);
-#endif
-}
-
-static inline void
-pci_write_32 (u_int32_t *p, u_int32_t v)
-{
-#ifdef FLOW_DEBUG
- if (cxt1e1_log_level >= LOG_DEBUG)
- pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
-#endif
- *p = cpu_to_le32 (v);
- FLUSH_PCI_WRITE (); /* This routine is called from routines
- * which do multiple register writes
- * which themselves need flushing between
- * writes in order to guarantee write
- * ordering. It is less code-cumbersome
- * to flush here-in then to investigate
- * and code the many other register
- * writing routines. */
-}
-#else
/* forward reference */
u_int32_t pci_read_32 (u_int32_t *p);
void pci_write_32 (u_int32_t *p, u_int32_t v);
-#endif
-
/*
* system dependent callbacks
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index 05aa41cf875..5b212dc725e 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -18,6 +18,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
+#include <linux/module.h>
#include <linux/version.h>
#include <linux/stringify.h>
#include <linux/delay.h>
@@ -103,18 +104,13 @@ read_i2c_reg(void __iomem *addr, u8 index, u8 *data)
iowrite32((tmp<<17) | IIC_READ, addr + IIC_CSR2);
mmiowb();
udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
- if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
- /* error: NEW_CYCLE not cleared */
- printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
- return -EIO;
- }
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
+ return -EIO; /* error: NEW_CYCLE not cleared */
tmp = ioread32(addr + IIC_CSR1);
if (tmp & DIRECT_ABORT) {
- /* error: DIRECT_ABORT set */
- printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
/* reset DIRECT_ABORT bit */
iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
- return -EIO;
+ return -EIO; /* error: DIRECT_ABORT set */
}
*data = tmp>>24;
return 0;
@@ -140,17 +136,12 @@ write_i2c_reg(void __iomem *addr, u8 index, u8 data)
iowrite32((tmp<<17) | IIC_WRITE | data, addr + IIC_CSR2);
mmiowb();
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
- if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
- /* error: NEW_CYCLE not cleared */
- printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
- return -EIO;
- }
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
+ return -EIO; /* error: NEW_CYCLE not cleared */
if (ioread32(addr + IIC_CSR1) & DIRECT_ABORT) {
- /* error: DIRECT_ABORT set */
- printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
/* reset DIRECT_ABORT bit */
iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
- return -EIO;
+ return -EIO; /* error: DIRECT_ABORT set */
}
return 0;
}
@@ -186,17 +177,12 @@ static int wait_i2c_reg(void __iomem *addr)
{
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
- if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
- /* error: NEW_CYCLE not cleared */
- printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
- return -EIO;
- }
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
+ return -EIO; /* error: NEW_CYCLE not cleared */
if (ioread32(addr + IIC_CSR1) & DIRECT_ABORT) {
- /* error: DIRECT_ABORT set */
- printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
/* reset DIRECT_ABORT bit */
iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
- return -EIO;
+ return -EIO; /* error: DIRECT_ABORT set */
}
return 0;
}
@@ -344,17 +330,14 @@ dt3155_irq_handler_even(int irq, void *dev_id)
ipd->field_count++;
return IRQ_HANDLED; /* start of field irq */
}
- if ((tmp & FLD_START) && (tmp & FLD_END_ODD)) {
- if (!ipd->stats.start_before_end++)
- printk(KERN_ERR "dt3155: irq: START before END\n");
- }
+ if ((tmp & FLD_START) && (tmp & FLD_END_ODD))
+ ipd->stats.start_before_end++;
/* check for corrupted fields */
/* write_i2c_reg(ipd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE); */
/* write_i2c_reg(ipd->regs, ODD_CSR, CSR_ERROR | CSR_DONE); */
tmp = ioread32(ipd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
if (tmp) {
- if (!ipd->stats.corrupted_fields++)
- printk(KERN_ERR "dt3155: corrupted field %u\n", tmp);
+ ipd->stats.corrupted_fields++;
iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN |
CAP_CONT_EVEN | CAP_CONT_ODD,
@@ -404,13 +387,9 @@ dt3155_open(struct file *filp)
int ret = 0;
struct dt3155_priv *pd = video_drvdata(filp);
- printk(KERN_INFO "dt3155: open(): minor: %i, users: %i\n",
- pd->vdev->minor, pd->users);
-
if (!pd->users) {
pd->q = kzalloc(sizeof(*pd->q), GFP_KERNEL);
if (!pd->q) {
- printk(KERN_ERR "dt3155: error: alloc queue\n");
ret = -ENOMEM;
goto err_alloc_queue;
}
@@ -427,13 +406,10 @@ dt3155_open(struct file *filp)
/* disable all irqs, clear all irq flags */
iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD,
pd->regs + INT_CSR);
- pd->irq_handler = dt3155_irq_handler_even;
- ret = request_irq(pd->pdev->irq, pd->irq_handler,
+ ret = request_irq(pd->pdev->irq, dt3155_irq_handler_even,
IRQF_SHARED, DT3155_NAME, pd);
- if (ret) {
- printk(KERN_ERR "dt3155: error: request_irq\n");
+ if (ret)
goto err_request_irq;
- }
}
pd->users++;
return 0; /* success */
@@ -449,9 +425,6 @@ dt3155_release(struct file *filp)
{
struct dt3155_priv *pd = video_drvdata(filp);
- printk(KERN_INFO "dt3155: release(): minor: %i, users: %i\n",
- pd->vdev->minor, pd->users - 1);
-
pd->users--;
BUG_ON(pd->users < 0);
if (!pd->users) {
@@ -804,11 +777,8 @@ dt3155_init_board(struct pci_dev *pdev)
/* allocate memory, and initialize the DMA machine */
buf_cpu = dma_alloc_coherent(&pdev->dev, DT3155_BUF_SIZE, &buf_dma,
GFP_KERNEL);
- if (!buf_cpu) {
- printk(KERN_ERR "dt3155: dma_alloc_coherent "
- "(in dt3155_init_board) failed\n");
+ if (!buf_cpu)
return -ENOMEM;
- }
iowrite32(buf_dma, pd->regs + EVEN_DMA_START);
iowrite32(buf_dma, pd->regs + ODD_DMA_START);
iowrite32(0, pd->regs + EVEN_DMA_STRIDE);
@@ -829,10 +799,8 @@ dt3155_init_board(struct pci_dev *pdev)
/* deallocate memory */
dma_free_coherent(&pdev->dev, DT3155_BUF_SIZE, buf_cpu, buf_dma);
- if (tmp & BUSY_EVEN) {
- printk(KERN_ERR "dt3155: BUSY_EVEN not cleared\n");
+ if (tmp & BUSY_EVEN)
return -EIO;
- }
return 0;
}
@@ -916,27 +884,18 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int err;
struct dt3155_priv *pd;
- printk(KERN_INFO "dt3155: probe()\n");
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- printk(KERN_ERR "dt3155: cannot set dma_mask\n");
+ if (err)
return -ENODEV;
- }
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- printk(KERN_ERR "dt3155: cannot set dma_coherent_mask\n");
+ if (err)
return -ENODEV;
- }
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- printk(KERN_ERR "dt3155: cannot allocate dt3155_priv\n");
+ if (!pd)
return -ENOMEM;
- }
pd->vdev = video_device_alloc();
- if (!pd->vdev) {
- printk(KERN_ERR "dt3155: cannot allocate vdev structure\n");
+ if (!pd->vdev)
goto err_video_device_alloc;
- }
*pd->vdev = dt3155_vdev;
pci_set_drvdata(pdev, pd); /* for use in dt3155_remove() */
video_set_drvdata(pd->vdev, pd); /* for use in video_fops */
@@ -949,34 +908,25 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd->csr2 = csr2_init;
pd->config = config_init;
err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "dt3155: pci_dev not enabled\n");
+ if (err)
goto err_enable_dev;
- }
err = pci_request_region(pdev, 0, pci_name(pdev));
if (err)
goto err_req_region;
pd->regs = pci_iomap(pdev, 0, pci_resource_len(pd->pdev, 0));
- if (!pd->regs) {
+ if (!pd->regs)
err = -ENOMEM;
- printk(KERN_ERR "dt3155: pci_iomap failed\n");
goto err_pci_iomap;
- }
err = dt3155_init_board(pdev);
- if (err) {
- printk(KERN_ERR "dt3155: dt3155_init_board failed\n");
+ if (err)
goto err_init_board;
- }
err = video_register_device(pd->vdev, VFL_TYPE_GRABBER, -1);
- if (err) {
- printk(KERN_ERR "dt3155: Cannot register video device\n");
- goto err_init_board;
- }
- err = dt3155_alloc_coherent(&pdev->dev, DT3155_CHUNK_SIZE,
- DMA_MEMORY_MAP);
if (err)
- printk(KERN_INFO "dt3155: preallocated 8 buffers\n");
- printk(KERN_INFO "dt3155: /dev/video%i is ready\n", pd->vdev->minor);
+ goto err_init_board;
+ if (dt3155_alloc_coherent(&pdev->dev, DT3155_CHUNK_SIZE,
+ DMA_MEMORY_MAP))
+ dev_info(&pdev->dev, "preallocated 8 buffers\n");
+ dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev->minor);
return 0; /* success */
err_init_board:
@@ -997,7 +947,6 @@ dt3155_remove(struct pci_dev *pdev)
{
struct dt3155_priv *pd = pci_get_drvdata(pdev);
- printk(KERN_INFO "dt3155: remove()\n");
dt3155_free_coherent(&pdev->dev);
video_unregister_device(pd->vdev);
pci_iounmap(pdev, pd->regs);
@@ -1026,24 +975,13 @@ static struct pci_driver pci_driver = {
static int __init
dt3155_init_module(void)
{
- int err;
-
- printk(KERN_INFO "dt3155: ==================\n");
- printk(KERN_INFO "dt3155: init()\n");
- err = pci_register_driver(&pci_driver);
- if (err) {
- printk(KERN_ERR "dt3155: cannot register pci_driver\n");
- return err;
- }
- return 0; /* succes */
+ return pci_register_driver(&pci_driver);
}
static void __exit
dt3155_exit_module(void)
{
pci_unregister_driver(&pci_driver);
- printk(KERN_INFO "dt3155: exit()\n");
- printk(KERN_INFO "dt3155: ==================\n");
}
module_init(dt3155_init_module);
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.h b/drivers/staging/dt3155v4l/dt3155v4l.h
index b0792b3d9b7..2e4f89d402e 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.h
+++ b/drivers/staging/dt3155v4l/dt3155v4l.h
@@ -183,7 +183,6 @@ struct dt3155_stats {
* @q pointer to vb2_queue structure
* @curr_buf: pointer to curren buffer
* @mux: mutex to protect the instance
- * @irq_handler: irq handler for the driver
* @dmaq queue for dma buffers
* @lock spinlock for dma queue
* @field_count fields counter
@@ -199,12 +198,11 @@ struct dt3155_priv {
struct vb2_queue *q;
struct vb2_buffer *curr_buf;
struct mutex mux;
- irq_handler_t irq_handler;
struct list_head dmaq;
spinlock_t lock;
unsigned int field_count;
struct dt3155_stats stats;
- void *regs;
+ void __iomem *regs;
int users;
u8 csr2, config;
};
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h
index 22b24b6c5a5..7b256a948c2 100644
--- a/drivers/staging/easycap/easycap.h
+++ b/drivers/staging/easycap/easycap.h
@@ -324,13 +324,6 @@ struct easycap {
int lost[INPUT_MANY];
int merit[180];
- struct timeval timeval0;
- struct timeval timeval1;
- struct timeval timeval2;
- struct timeval timeval3;
- struct timeval timeval6;
- struct timeval timeval7;
- struct timeval timeval8;
long long int dnbydt;
int video_interface;
@@ -543,10 +536,6 @@ int read_vt(struct usb_device *, u16);
int write_vt(struct usb_device *, u16, u16);
int isdongle(struct easycap *);
/*---------------------------------------------------------------------------*/
-struct signed_div_result {
- long long int quotient;
- unsigned long long int remainder;
-} signed_div(long long int, long long int);
/*---------------------------------------------------------------------------*/
diff --git a/drivers/staging/easycap/easycap_ioctl.c b/drivers/staging/easycap/easycap_ioctl.c
index 0accab97a7f..c99addfb624 100644
--- a/drivers/staging/easycap/easycap_ioctl.c
+++ b/drivers/staging/easycap/easycap_ioctl.c
@@ -931,7 +931,6 @@ static int adjust_mute(struct easycap *peasycap, int value)
switch (peasycap->mute) {
case 1: {
peasycap->audio_idle = 1;
- peasycap->timeval0.tv_sec = 0;
SAM("adjusting mute: %i=peasycap->audio_idle\n",
peasycap->audio_idle);
return 0;
@@ -1317,17 +1316,12 @@ long easycap_unlocked_ioctl(struct file *file,
struct v4l2_control *pv4l2_control;
JOM(8, "VIDIOC_G_CTRL\n");
- pv4l2_control = kzalloc(sizeof(struct v4l2_control), GFP_KERNEL);
- if (!pv4l2_control) {
- SAM("ERROR: out of memory\n");
+ pv4l2_control = memdup_user((void __user *)arg,
+ sizeof(struct v4l2_control));
+ if (IS_ERR(pv4l2_control)) {
+ SAM("ERROR: copy from user failed\n");
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- return -ENOMEM;
- }
- if (0 != copy_from_user(pv4l2_control, (void __user *)arg,
- sizeof(struct v4l2_control))) {
- kfree(pv4l2_control);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- return -EFAULT;
+ return PTR_ERR(pv4l2_control);
}
switch (pv4l2_control->id) {
@@ -2338,7 +2332,6 @@ long easycap_unlocked_ioctl(struct file *file,
peasycap->video_idle = 1;
peasycap->audio_idle = 1;
- peasycap->timeval0.tv_sec = 0;
/*---------------------------------------------------------------------------*/
/*
* IF THE WAIT QUEUES ARE NOT CLEARED IN RESPONSE TO THE STREAMOFF COMMAND
@@ -2356,17 +2349,12 @@ long easycap_unlocked_ioctl(struct file *file,
struct v4l2_streamparm *pv4l2_streamparm;
JOM(8, "VIDIOC_G_PARM\n");
- pv4l2_streamparm = kzalloc(sizeof(struct v4l2_streamparm), GFP_KERNEL);
- if (!pv4l2_streamparm) {
- SAM("ERROR: out of memory\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- return -ENOMEM;
- }
- if (copy_from_user(pv4l2_streamparm,
- (void __user *)arg, sizeof(struct v4l2_streamparm))) {
- kfree(pv4l2_streamparm);
+ pv4l2_streamparm = memdup_user((void __user *)arg,
+ sizeof(struct v4l2_streamparm));
+ if (IS_ERR(pv4l2_streamparm)) {
+ SAM("ERROR: copy from user failed\n");
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- return -EFAULT;
+ return PTR_ERR(pv4l2_streamparm);
}
if (pv4l2_streamparm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
diff --git a/drivers/staging/easycap/easycap_main.c b/drivers/staging/easycap/easycap_main.c
index bea281624c4..a45c0b50706 100644
--- a/drivers/staging/easycap/easycap_main.c
+++ b/drivers/staging/easycap/easycap_main.c
@@ -58,7 +58,7 @@ MODULE_PARM_DESC(gain, "Audio gain: 0,...,16(default),...31");
static bool easycap_ntsc;
module_param_named(ntsc, easycap_ntsc, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(ntsc, "NTCS default encoding (default PAL)");
+MODULE_PARM_DESC(ntsc, "NTSC default encoding (default PAL)");
@@ -270,7 +270,6 @@ static int reset(struct easycap *peasycap)
peasycap->video_eof = 0;
peasycap->audio_eof = 0;
- do_gettimeofday(&peasycap->timeval7);
/*---------------------------------------------------------------------------*/
/*
* RESTORE INPUT AND FORCE REFRESH OF STANDARD, FORMAT, ETC.
@@ -953,8 +952,10 @@ static unsigned int easycap_poll(struct file *file, poll_table *wait)
* peasycap, IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
* IF NECESSARY, BAIL OUT.
*/
- if (kd != isdongle(peasycap))
+ if (kd != isdongle(peasycap)) {
+ mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
return -ERESTARTSYS;
+ }
if (!file) {
SAY("ERROR: file is NULL\n");
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
@@ -1213,10 +1214,6 @@ int easycap_dqbuf(struct easycap *peasycap, int mode)
int
field2frame(struct easycap *peasycap)
{
- struct timeval timeval;
- long long int above, below;
- u32 remainder;
- struct signed_div_result sdr;
void *pex, *pad;
int kex, kad, mex, mad, rex, rad, rad2;
@@ -1574,52 +1571,11 @@ field2frame(struct easycap *peasycap)
if (peasycap->field_read == peasycap->field_fill)
SAM("WARNING: on exit, filling field buffer %i\n",
peasycap->field_read);
-/*---------------------------------------------------------------------------*/
-/*
- * CALCULATE VIDEO STREAMING RATE
- */
-/*---------------------------------------------------------------------------*/
- do_gettimeofday(&timeval);
- if (peasycap->timeval6.tv_sec) {
- below = ((long long int)(1000000)) *
- ((long long int)(timeval.tv_sec -
- peasycap->timeval6.tv_sec)) +
- (long long int)(timeval.tv_usec - peasycap->timeval6.tv_usec);
- above = (long long int)1000000;
-
- sdr = signed_div(above, below);
- above = sdr.quotient;
- remainder = (u32)sdr.remainder;
-
- JOM(8, "video streaming at %3lli.%03i fields per second\n",
- above, (remainder/1000));
- }
- peasycap->timeval6 = timeval;
if (caches)
JOM(8, "%i=caches\n", caches);
return 0;
}
-/*****************************************************************************/
-struct signed_div_result
-signed_div(long long int above, long long int below)
-{
- struct signed_div_result sdr;
-
- if (((0 <= above) && (0 <= below)) || ((0 > above) && (0 > below))) {
- sdr.remainder = (unsigned long long int) do_div(above, below);
- sdr.quotient = (long long int) above;
- } else {
- if (0 > above)
- above = -above;
- if (0 > below)
- below = -below;
- sdr.remainder = (unsigned long long int) do_div(above, below);
- sdr.quotient = -((long long int) above);
- }
- return sdr;
-}
-/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* DECIMATION AND COLOURSPACE CONVERSION.
@@ -2753,8 +2709,6 @@ static void easycap_complete(struct urb *purb)
wake_up_interruptible
(&(peasycap->
wq_video));
- do_gettimeofday
- (&peasycap->timeval7);
} else {
peasycap->video_junk++;
if (bad & 0x0010)
diff --git a/drivers/staging/easycap/easycap_sound.c b/drivers/staging/easycap/easycap_sound.c
index 213d0400b3e..b22bb39b5f6 100644
--- a/drivers/staging/easycap/easycap_sound.c
+++ b/drivers/staging/easycap/easycap_sound.c
@@ -666,9 +666,6 @@ easycap_sound_setup(struct easycap *peasycap)
peasycap->audio_eof = 0;
peasycap->audio_idle = 0;
- peasycap->timeval1.tv_sec = 0;
- peasycap->timeval1.tv_usec = 0;
-
submit_audio_urbs(peasycap);
JOM(4, "finished initialization\n");
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig
index e11cf340856..f0ed08d6a97 100644
--- a/drivers/staging/et131x/Kconfig
+++ b/drivers/staging/et131x/Kconfig
@@ -7,12 +7,3 @@ config ET131X
To compile this driver as a module, choose M here. The module
will be called et131x.
-
-config ET131X_DEBUG
- bool "Enable et131x debugging"
- depends on ET131X
- default n
- ---help---
- Say Y for detailed debug information.
-
- If in doubt, say N.
diff --git a/drivers/staging/et131x/Makefile b/drivers/staging/et131x/Makefile
index dfcd2bfcb94..027ff9453fe 100644
--- a/drivers/staging/et131x/Makefile
+++ b/drivers/staging/et131x/Makefile
@@ -3,13 +3,3 @@
#
obj-$(CONFIG_ET131X) += et131x.o
-
-et131x-y := et1310_eeprom.o \
- et1310_mac.o \
- et1310_phy.o \
- et1310_pm.o \
- et1310_rx.o \
- et1310_tx.o \
- et131x_initpci.o \
- et131x_isr.o \
- et131x_netdev.o
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 28752a50231..3458aa713a3 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -2,24 +2,15 @@ This is a driver for the ET1310 network device.
Based on the driver found at https://sourceforge.net/projects/et131x/
-Cleaned up immensely by Olaf Hartman <o.hartmann@telovital.com> and Christoph
-Hellwig <hch@infradead.org>
+Cleaned up immensely by Olaf Hartman and Christoph Hellwig <hch@infradead.org>
Note, the powermanagement options were removed from the vendor provided
driver as they did not build properly at the time.
TODO:
- - kernel coding style cleanups
- - forward port for latest network driver changes
- - kill useless typecasts (e.g. in et1310_phy.c)
- - alloc_etherdev is initializing memory with zero?!?
- - add_timer call in et131x_netdev.c is correct?
- - Add power saving functionality (suspend, sleep, resume)
- - Implement a few more kernel Parameter (set mac )
+ - Use of kmem_cache seems a bit unusual
Please send patches to:
Greg Kroah-Hartman <gregkh@suse.de>
-
-And Cc: Olaf Hartmann <o.hartmann@telovital.com> as he has this device and can
-test any changes.
+ Mark Einon <mark.einon@gmail.com>
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
deleted file mode 100644
index 410677ee22b..00000000000
--- a/drivers/staging/et131x/et1310_address_map.h
+++ /dev/null
@@ -1,1434 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_address_map.h - Contains the register mapping for the ET1310
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef _ET1310_ADDRESS_MAP_H_
-#define _ET1310_ADDRESS_MAP_H_
-
-
-/* START OF GLOBAL REGISTER ADDRESS MAP */
-
-/*
- * 10bit registers
- *
- * Tx queue start address reg in global address map at address 0x0000
- * tx queue end address reg in global address map at address 0x0004
- * rx queue start address reg in global address map at address 0x0008
- * rx queue end address reg in global address map at address 0x000C
- */
-
-/*
- * structure for power management control status reg in global address map
- * located at address 0x0010
- * jagcore_rx_rdy bit 9
- * jagcore_tx_rdy bit 8
- * phy_lped_en bit 7
- * phy_sw_coma bit 6
- * rxclk_gate bit 5
- * txclk_gate bit 4
- * sysclk_gate bit 3
- * jagcore_rx_en bit 2
- * jagcore_tx_en bit 1
- * gigephy_en bit 0
- */
-
-#define ET_PM_PHY_SW_COMA 0x40
-#define ET_PMCSR_INIT 0x38
-
-/*
- * Interrupt status reg at address 0x0018
- */
-
-#define ET_INTR_TXDMA_ISR 0x00000008
-#define ET_INTR_TXDMA_ERR 0x00000010
-#define ET_INTR_RXDMA_XFR_DONE 0x00000020
-#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040
-#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080
-#define ET_INTR_RXDMA_STAT_LOW 0x00000100
-#define ET_INTR_RXDMA_ERR 0x00000200
-#define ET_INTR_WATCHDOG 0x00004000
-#define ET_INTR_WOL 0x00008000
-#define ET_INTR_PHY 0x00010000
-#define ET_INTR_TXMAC 0x00020000
-#define ET_INTR_RXMAC 0x00040000
-#define ET_INTR_MAC_STAT 0x00080000
-#define ET_INTR_SLV_TIMEOUT 0x00100000
-
-/*
- * Interrupt mask register at address 0x001C
- * Interrupt alias clear mask reg at address 0x0020
- * Interrupt status alias reg at address 0x0024
- *
- * Same masks as above
- */
-
-/*
- * Software reset reg at address 0x0028
- * 0: txdma_sw_reset
- * 1: rxdma_sw_reset
- * 2: txmac_sw_reset
- * 3: rxmac_sw_reset
- * 4: mac_sw_reset
- * 5: mac_stat_sw_reset
- * 6: mmc_sw_reset
- *31: selfclr_disable
- */
-
-/*
- * SLV Timer reg at address 0x002C (low 24 bits)
- */
-
-/*
- * MSI Configuration reg at address 0x0030
- */
-
-#define ET_MSI_VECTOR 0x0000001F
-#define ET_MSI_TC 0x00070000
-
-/*
- * Loopback reg located at address 0x0034
- */
-
-#define ET_LOOP_MAC 0x00000001
-#define ET_LOOP_DMA 0x00000002
-
-/*
- * GLOBAL Module of JAGCore Address Mapping
- * Located at address 0x0000
- */
-struct global_regs { /* Location: */
- u32 txq_start_addr; /* 0x0000 */
- u32 txq_end_addr; /* 0x0004 */
- u32 rxq_start_addr; /* 0x0008 */
- u32 rxq_end_addr; /* 0x000C */
- u32 pm_csr; /* 0x0010 */
- u32 unused; /* 0x0014 */
- u32 int_status; /* 0x0018 */
- u32 int_mask; /* 0x001C */
- u32 int_alias_clr_en; /* 0x0020 */
- u32 int_status_alias; /* 0x0024 */
- u32 sw_reset; /* 0x0028 */
- u32 slv_timer; /* 0x002C */
- u32 msi_config; /* 0x0030 */
- u32 loopback; /* 0x0034 */
- u32 watchdog_timer; /* 0x0038 */
-};
-
-
-/* START OF TXDMA REGISTER ADDRESS MAP */
-
-/*
- * txdma control status reg at address 0x1000
- */
-
-#define ET_TXDMA_CSR_HALT 0x00000001
-#define ET_TXDMA_DROP_TLP 0x00000002
-#define ET_TXDMA_CACHE_THRS 0x000000F0
-#define ET_TXDMA_CACHE_SHIFT 4
-#define ET_TXDMA_SNGL_EPKT 0x00000100
-#define ET_TXDMA_CLASS 0x00001E00
-
-/*
- * structure for txdma packet ring base address hi reg in txdma address map
- * located at address 0x1004
- * Defined earlier (u32)
- */
-
-/*
- * structure for txdma packet ring base address low reg in txdma address map
- * located at address 0x1008
- * Defined earlier (u32)
- */
-
-/*
- * structure for txdma packet ring number of descriptor reg in txdma address
- * map. Located at address 0x100C
- *
- * 31-10: unused
- * 9-0: pr ndes
- */
-
-#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
-#define ET_DMA12_WRAP 0x1000
-#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
-#define ET_DMA10_WRAP 0x0400
-#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
-#define ET_DMA4_WRAP 0x0010
-
-#define INDEX12(x) ((x) & ET_DMA12_MASK)
-#define INDEX10(x) ((x) & ET_DMA10_MASK)
-#define INDEX4(x) ((x) & ET_DMA4_MASK)
-
-extern inline void add_10bit(u32 *v, int n)
-{
- *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
-}
-
-extern inline void add_12bit(u32 *v, int n)
-{
- *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
-}
-
-/*
- * 10bit DMA with wrap
- * txdma tx queue write address reg in txdma address map at 0x1010
- * txdma tx queue write address external reg in txdma address map at 0x1014
- * txdma tx queue read address reg in txdma address map at 0x1018
- *
- * u32
- * txdma status writeback address hi reg in txdma address map at0x101C
- * txdma status writeback address lo reg in txdma address map at 0x1020
- *
- * 10bit DMA with wrap
- * txdma service request reg in txdma address map at 0x1024
- * structure for txdma service complete reg in txdma address map at 0x1028
- *
- * 4bit DMA with wrap
- * txdma tx descriptor cache read index reg in txdma address map at 0x102C
- * txdma tx descriptor cache write index reg in txdma address map at 0x1030
- *
- * txdma error reg in txdma address map at address 0x1034
- * 0: PyldResend
- * 1: PyldRewind
- * 4: DescrResend
- * 5: DescrRewind
- * 8: WrbkResend
- * 9: WrbkRewind
- */
-
-/*
- * Tx DMA Module of JAGCore Address Mapping
- * Located at address 0x1000
- */
-struct txdma_regs { /* Location: */
- u32 csr; /* 0x1000 */
- u32 pr_base_hi; /* 0x1004 */
- u32 pr_base_lo; /* 0x1008 */
- u32 pr_num_des; /* 0x100C */
- u32 txq_wr_addr; /* 0x1010 */
- u32 txq_wr_addr_ext; /* 0x1014 */
- u32 txq_rd_addr; /* 0x1018 */
- u32 dma_wb_base_hi; /* 0x101C */
- u32 dma_wb_base_lo; /* 0x1020 */
- u32 service_request; /* 0x1024 */
- u32 service_complete; /* 0x1028 */
- u32 cache_rd_index; /* 0x102C */
- u32 cache_wr_index; /* 0x1030 */
- u32 tx_dma_error; /* 0x1034 */
- u32 desc_abort_cnt; /* 0x1038 */
- u32 payload_abort_cnt; /* 0x103c */
- u32 writeback_abort_cnt; /* 0x1040 */
- u32 desc_timeout_cnt; /* 0x1044 */
- u32 payload_timeout_cnt; /* 0x1048 */
- u32 writeback_timeout_cnt; /* 0x104c */
- u32 desc_error_cnt; /* 0x1050 */
- u32 payload_error_cnt; /* 0x1054 */
- u32 writeback_error_cnt; /* 0x1058 */
- u32 dropped_tlp_cnt; /* 0x105c */
- u32 new_service_complete; /* 0x1060 */
- u32 ethernet_packet_cnt; /* 0x1064 */
-};
-
-/* END OF TXDMA REGISTER ADDRESS MAP */
-
-
-/* START OF RXDMA REGISTER ADDRESS MAP */
-
-/*
- * structure for control status reg in rxdma address map
- * Located at address 0x2000
- *
- * CSR
- * 0: halt
- * 1-3: tc
- * 4: fbr_big_endian
- * 5: psr_big_endian
- * 6: pkt_big_endian
- * 7: dma_big_endian
- * 8-9: fbr0_size
- * 10: fbr0_enable
- * 11-12: fbr1_size
- * 13: fbr1_enable
- * 14: unused
- * 15: pkt_drop_disable
- * 16: pkt_done_flush
- * 17: halt_status
- * 18-31: unused
- */
-
-
-/*
- * structure for dma writeback lo reg in rxdma address map
- * located at address 0x2004
- * Defined earlier (u32)
- */
-
-/*
- * structure for dma writeback hi reg in rxdma address map
- * located at address 0x2008
- * Defined earlier (u32)
- */
-
-/*
- * structure for number of packets done reg in rxdma address map
- * located at address 0x200C
- *
- * 31-8: unused
- * 7-0: num done
- */
-
-/*
- * structure for max packet time reg in rxdma address map
- * located at address 0x2010
- *
- * 31-18: unused
- * 17-0: time done
- */
-
-/*
- * structure for rx queue read address reg in rxdma address map
- * located at address 0x2014
- * Defined earlier (u32)
- */
-
-/*
- * structure for rx queue read address external reg in rxdma address map
- * located at address 0x2018
- * Defined earlier (u32)
- */
-
-/*
- * structure for rx queue write address reg in rxdma address map
- * located at address 0x201C
- * Defined earlier (u32)
- */
-
-/*
- * structure for packet status ring base address lo reg in rxdma address map
- * located at address 0x2020
- * Defined earlier (u32)
- */
-
-/*
- * structure for packet status ring base address hi reg in rxdma address map
- * located at address 0x2024
- * Defined earlier (u32)
- */
-
-/*
- * structure for packet status ring number of descriptors reg in rxdma address
- * map. Located at address 0x2028
- *
- * 31-12: unused
- * 11-0: psr ndes
- */
-
-/*
- * structure for packet status ring available offset reg in rxdma address map
- * located at address 0x202C
- *
- * 31-13: unused
- * 12: psr avail wrap
- * 11-0: psr avail
- */
-
-/*
- * structure for packet status ring full offset reg in rxdma address map
- * located at address 0x2030
- *
- * 31-13: unused
- * 12: psr full wrap
- * 11-0: psr full
- */
-
-/*
- * structure for packet status ring access index reg in rxdma address map
- * located at address 0x2034
- *
- * 31-5: unused
- * 4-0: psr_ai
- */
-
-/*
- * structure for packet status ring minimum descriptors reg in rxdma address
- * map. Located at address 0x2038
- *
- * 31-12: unused
- * 11-0: psr_min
- */
-
-/*
- * structure for free buffer ring base lo address reg in rxdma address map
- * located at address 0x203C
- * Defined earlier (u32)
- */
-
-/*
- * structure for free buffer ring base hi address reg in rxdma address map
- * located at address 0x2040
- * Defined earlier (u32)
- */
-
-/*
- * structure for free buffer ring number of descriptors reg in rxdma address
- * map. Located at address 0x2044
- *
- * 31-10: unused
- * 9-0: fbr ndesc
- */
-
-/*
- * structure for free buffer ring 0 available offset reg in rxdma address map
- * located at address 0x2048
- * Defined earlier (u32)
- */
-
-/*
- * structure for free buffer ring 0 full offset reg in rxdma address map
- * located at address 0x204C
- * Defined earlier (u32)
- */
-
-/*
- * structure for free buffer cache 0 full offset reg in rxdma address map
- * located at address 0x2050
- *
- * 31-5: unused
- * 4-0: fbc rdi
- */
-
-/*
- * structure for free buffer ring 0 minimum descriptor reg in rxdma address map
- * located at address 0x2054
- *
- * 31-10: unused
- * 9-0: fbr min
- */
-
-/*
- * structure for free buffer ring 1 base address lo reg in rxdma address map
- * located at address 0x2058 - 0x205C
- * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t)
- */
-
-/*
- * structure for free buffer ring 1 number of descriptors reg in rxdma address
- * map. Located at address 0x2060
- * Defined earlier (RXDMA_FBR_NUM_DES_t)
- */
-
-/*
- * structure for free buffer ring 1 available offset reg in rxdma address map
- * located at address 0x2064
- * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t)
- */
-
-/*
- * structure for free buffer ring 1 full offset reg in rxdma address map
- * located at address 0x2068
- * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t)
- */
-
-/*
- * structure for free buffer cache 1 read index reg in rxdma address map
- * located at address 0x206C
- * Defined Earlier (RXDMA_FBC_RD_INDEX_t)
- */
-
-/*
- * structure for free buffer ring 1 minimum descriptor reg in rxdma address map
- * located at address 0x2070
- * Defined Earlier (RXDMA_FBR_MIN_DES_t)
- */
-
-/*
- * Rx DMA Module of JAGCore Address Mapping
- * Located at address 0x2000
- */
-struct rxdma_regs { /* Location: */
- u32 csr; /* 0x2000 */
- u32 dma_wb_base_lo; /* 0x2004 */
- u32 dma_wb_base_hi; /* 0x2008 */
- u32 num_pkt_done; /* 0x200C */
- u32 max_pkt_time; /* 0x2010 */
- u32 rxq_rd_addr; /* 0x2014 */
- u32 rxq_rd_addr_ext; /* 0x2018 */
- u32 rxq_wr_addr; /* 0x201C */
- u32 psr_base_lo; /* 0x2020 */
- u32 psr_base_hi; /* 0x2024 */
- u32 psr_num_des; /* 0x2028 */
- u32 psr_avail_offset; /* 0x202C */
- u32 psr_full_offset; /* 0x2030 */
- u32 psr_access_index; /* 0x2034 */
- u32 psr_min_des; /* 0x2038 */
- u32 fbr0_base_lo; /* 0x203C */
- u32 fbr0_base_hi; /* 0x2040 */
- u32 fbr0_num_des; /* 0x2044 */
- u32 fbr0_avail_offset; /* 0x2048 */
- u32 fbr0_full_offset; /* 0x204C */
- u32 fbr0_rd_index; /* 0x2050 */
- u32 fbr0_min_des; /* 0x2054 */
- u32 fbr1_base_lo; /* 0x2058 */
- u32 fbr1_base_hi; /* 0x205C */
- u32 fbr1_num_des; /* 0x2060 */
- u32 fbr1_avail_offset; /* 0x2064 */
- u32 fbr1_full_offset; /* 0x2068 */
- u32 fbr1_rd_index; /* 0x206C */
- u32 fbr1_min_des; /* 0x2070 */
-};
-
-/* END OF RXDMA REGISTER ADDRESS MAP */
-
-
-/* START OF TXMAC REGISTER ADDRESS MAP */
-
-/*
- * structure for control reg in txmac address map
- * located at address 0x3000
- *
- * bits
- * 31-8: unused
- * 7: cklseg_disable
- * 6: ckbcnt_disable
- * 5: cksegnum
- * 4: async_disable
- * 3: fc_disable
- * 2: mcif_disable
- * 1: mif_disable
- * 0: txmac_en
- */
-
-/*
- * structure for shadow pointer reg in txmac address map
- * located at address 0x3004
- * 31-27: reserved
- * 26-16: txq rd ptr
- * 15-11: reserved
- * 10-0: txq wr ptr
- */
-
-/*
- * structure for error count reg in txmac address map
- * located at address 0x3008
- *
- * 31-12: unused
- * 11-8: reserved
- * 7-4: txq_underrun
- * 3-0: fifo_underrun
- */
-
-/*
- * structure for max fill reg in txmac address map
- * located at address 0x300C
- * 31-12: unused
- * 11-0: max fill
- */
-
-/*
- * structure for cf parameter reg in txmac address map
- * located at address 0x3010
- * 31-16: cfep
- * 15-0: cfpt
- */
-
-/*
- * structure for tx test reg in txmac address map
- * located at address 0x3014
- * 31-17: unused
- * 16: reserved1
- * 15: txtest_en
- * 14-11: unused
- * 10-0: txq test pointer
- */
-
-/*
- * structure for error reg in txmac address map
- * located at address 0x3018
- *
- * 31-9: unused
- * 8: fifo_underrun
- * 7-6: unused
- * 5: ctrl2_err
- * 4: txq_underrun
- * 3: bcnt_err
- * 2: lseg_err
- * 1: segnum_err
- * 0: seg0_err
- */
-
-/*
- * structure for error interrupt reg in txmac address map
- * located at address 0x301C
- *
- * 31-9: unused
- * 8: fifo_underrun
- * 7-6: unused
- * 5: ctrl2_err
- * 4: txq_underrun
- * 3: bcnt_err
- * 2: lseg_err
- * 1: segnum_err
- * 0: seg0_err
- */
-
-/*
- * structure for error interrupt reg in txmac address map
- * located at address 0x3020
- *
- * 31-2: unused
- * 1: bp_req
- * 0: bp_xonxoff
- */
-
-/*
- * Tx MAC Module of JAGCore Address Mapping
- */
-struct txmac_regs { /* Location: */
- u32 ctl; /* 0x3000 */
- u32 shadow_ptr; /* 0x3004 */
- u32 err_cnt; /* 0x3008 */
- u32 max_fill; /* 0x300C */
- u32 cf_param; /* 0x3010 */
- u32 tx_test; /* 0x3014 */
- u32 err; /* 0x3018 */
- u32 err_int; /* 0x301C */
- u32 bp_ctrl; /* 0x3020 */
-};
-
-/* END OF TXMAC REGISTER ADDRESS MAP */
-
-/* START OF RXMAC REGISTER ADDRESS MAP */
-
-/*
- * structure for rxmac control reg in rxmac address map
- * located at address 0x4000
- *
- * 31-7: reserved
- * 6: rxmac_int_disable
- * 5: async_disable
- * 4: mif_disable
- * 3: wol_disable
- * 2: pkt_filter_disable
- * 1: mcif_disable
- * 0: rxmac_en
- */
-
-/*
- * structure for Wake On Lan Control and CRC 0 reg in rxmac address map
- * located at address 0x4004
- * 31-16: crc
- * 15-12: reserved
- * 11: ignore_pp
- * 10: ignore_mp
- * 9: clr_intr
- * 8: ignore_link_chg
- * 7: ignore_uni
- * 6: ignore_multi
- * 5: ignore_broad
- * 4-0: valid_crc 4-0
- */
-
-/*
- * structure for CRC 1 and CRC 2 reg in rxmac address map
- * located at address 0x4008
- *
- * 31-16: crc2
- * 15-0: crc1
- */
-
-/*
- * structure for CRC 3 and CRC 4 reg in rxmac address map
- * located at address 0x400C
- *
- * 31-16: crc4
- * 15-0: crc3
- */
-
-/*
- * structure for Wake On Lan Source Address Lo reg in rxmac address map
- * located at address 0x4010
- *
- * 31-24: sa3
- * 23-16: sa4
- * 15-8: sa5
- * 7-0: sa6
- */
-
-#define ET_WOL_LO_SA3_SHIFT 24
-#define ET_WOL_LO_SA4_SHIFT 16
-#define ET_WOL_LO_SA5_SHIFT 8
-
-/*
- * structure for Wake On Lan Source Address Hi reg in rxmac address map
- * located at address 0x4014
- *
- * 31-16: reserved
- * 15-8: sa1
- * 7-0: sa2
- */
-
-#define ET_WOL_HI_SA1_SHIFT 8
-
-/*
- * structure for Wake On Lan mask reg in rxmac address map
- * located at address 0x4018 - 0x4064
- * Defined earlier (u32)
- */
-
-/*
- * structure for Unicast Paket Filter Address 1 reg in rxmac address map
- * located at address 0x4068
- *
- * 31-24: addr1_3
- * 23-16: addr1_4
- * 15-8: addr1_5
- * 7-0: addr1_6
- */
-
-#define ET_UNI_PF_ADDR1_3_SHIFT 24
-#define ET_UNI_PF_ADDR1_4_SHIFT 16
-#define ET_UNI_PF_ADDR1_5_SHIFT 8
-
-/*
- * structure for Unicast Paket Filter Address 2 reg in rxmac address map
- * located at address 0x406C
- *
- * 31-24: addr2_3
- * 23-16: addr2_4
- * 15-8: addr2_5
- * 7-0: addr2_6
- */
-
-#define ET_UNI_PF_ADDR2_3_SHIFT 24
-#define ET_UNI_PF_ADDR2_4_SHIFT 16
-#define ET_UNI_PF_ADDR2_5_SHIFT 8
-
-/*
- * structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map
- * located at address 0x4070
- *
- * 31-24: addr2_1
- * 23-16: addr2_2
- * 15-8: addr1_1
- * 7-0: addr1_2
- */
-
-#define ET_UNI_PF_ADDR2_1_SHIFT 24
-#define ET_UNI_PF_ADDR2_2_SHIFT 16
-#define ET_UNI_PF_ADDR1_1_SHIFT 8
-
-
-/*
- * structure for Multicast Hash reg in rxmac address map
- * located at address 0x4074 - 0x4080
- * Defined earlier (u32)
- */
-
-/*
- * structure for Packet Filter Control reg in rxmac address map
- * located at address 0x4084
- *
- * 31-23: unused
- * 22-16: min_pkt_size
- * 15-4: unused
- * 3: filter_frag_en
- * 2: filter_uni_en
- * 1: filter_multi_en
- * 0: filter_broad_en
- */
-
-/*
- * structure for Memory Controller Interface Control Max Segment reg in rxmac
- * address map. Located at address 0x4088
- *
- * 31-10: reserved
- * 9-2: max_size
- * 1: fc_en
- * 0: seg_en
- */
-
-/*
- * structure for Memory Controller Interface Water Mark reg in rxmac address
- * map. Located at address 0x408C
- *
- * 31-26: unused
- * 25-16: mark_hi
- * 15-10: unused
- * 9-0: mark_lo
- */
-
-/*
- * structure for Rx Queue Dialog reg in rxmac address map.
- * located at address 0x4090
- *
- * 31-26: reserved
- * 25-16: rd_ptr
- * 15-10: reserved
- * 9-0: wr_ptr
- */
-
-/*
- * structure for space available reg in rxmac address map.
- * located at address 0x4094
- *
- * 31-17: reserved
- * 16: space_avail_en
- * 15-10: reserved
- * 9-0: space_avail
- */
-
-/*
- * structure for management interface reg in rxmac address map.
- * located at address 0x4098
- *
- * 31-18: reserved
- * 17: drop_pkt_en
- * 16-0: drop_pkt_mask
- */
-
-/*
- * structure for Error reg in rxmac address map.
- * located at address 0x409C
- *
- * 31-4: unused
- * 3: mif
- * 2: async
- * 1: pkt_filter
- * 0: mcif
- */
-
-/*
- * Rx MAC Module of JAGCore Address Mapping
- */
-struct rxmac_regs { /* Location: */
- u32 ctrl; /* 0x4000 */
- u32 crc0; /* 0x4004 */
- u32 crc12; /* 0x4008 */
- u32 crc34; /* 0x400C */
- u32 sa_lo; /* 0x4010 */
- u32 sa_hi; /* 0x4014 */
- u32 mask0_word0; /* 0x4018 */
- u32 mask0_word1; /* 0x401C */
- u32 mask0_word2; /* 0x4020 */
- u32 mask0_word3; /* 0x4024 */
- u32 mask1_word0; /* 0x4028 */
- u32 mask1_word1; /* 0x402C */
- u32 mask1_word2; /* 0x4030 */
- u32 mask1_word3; /* 0x4034 */
- u32 mask2_word0; /* 0x4038 */
- u32 mask2_word1; /* 0x403C */
- u32 mask2_word2; /* 0x4040 */
- u32 mask2_word3; /* 0x4044 */
- u32 mask3_word0; /* 0x4048 */
- u32 mask3_word1; /* 0x404C */
- u32 mask3_word2; /* 0x4050 */
- u32 mask3_word3; /* 0x4054 */
- u32 mask4_word0; /* 0x4058 */
- u32 mask4_word1; /* 0x405C */
- u32 mask4_word2; /* 0x4060 */
- u32 mask4_word3; /* 0x4064 */
- u32 uni_pf_addr1; /* 0x4068 */
- u32 uni_pf_addr2; /* 0x406C */
- u32 uni_pf_addr3; /* 0x4070 */
- u32 multi_hash1; /* 0x4074 */
- u32 multi_hash2; /* 0x4078 */
- u32 multi_hash3; /* 0x407C */
- u32 multi_hash4; /* 0x4080 */
- u32 pf_ctrl; /* 0x4084 */
- u32 mcif_ctrl_max_seg; /* 0x4088 */
- u32 mcif_water_mark; /* 0x408C */
- u32 rxq_diag; /* 0x4090 */
- u32 space_avail; /* 0x4094 */
-
- u32 mif_ctrl; /* 0x4098 */
- u32 err_reg; /* 0x409C */
-};
-
-/* END OF RXMAC REGISTER ADDRESS MAP */
-
-
-/* START OF MAC REGISTER ADDRESS MAP */
-
-/*
- * structure for configuration #1 reg in mac address map.
- * located at address 0x5000
- *
- * 31: soft reset
- * 30: sim reset
- * 29-20: reserved
- * 19: reset rx mc
- * 18: reset tx mc
- * 17: reset rx func
- * 16: reset tx fnc
- * 15-9: reserved
- * 8: loopback
- * 7-6: reserved
- * 5: rx flow
- * 4: tx flow
- * 3: syncd rx en
- * 2: rx enable
- * 1: syncd tx en
- * 0: tx enable
- */
-
-#define CFG1_LOOPBACK 0x00000100
-#define CFG1_RX_FLOW 0x00000020
-#define CFG1_TX_FLOW 0x00000010
-#define CFG1_RX_ENABLE 0x00000004
-#define CFG1_TX_ENABLE 0x00000001
-#define CFG1_WAIT 0x0000000A /* RX & TX syncd */
-
-/*
- * structure for configuration #2 reg in mac address map.
- * located at address 0x5004
- * 31-16: reserved
- * 15-12: preamble
- * 11-10: reserved
- * 9-8: if mode
- * 7-6: reserved
- * 5: huge frame
- * 4: length check
- * 3: undefined
- * 2: pad crc
- * 1: crc enable
- * 0: full duplex
- */
-
-
-/*
- * structure for Interpacket gap reg in mac address map.
- * located at address 0x5008
- *
- * 31: reserved
- * 30-24: non B2B ipg 1
- * 23: undefined
- * 22-16: non B2B ipg 2
- * 15-8: Min ifg enforce
- * 7-0: B2B ipg
- *
- * structure for half duplex reg in mac address map.
- * located at address 0x500C
- * 31-24: reserved
- * 23-20: Alt BEB trunc
- * 19: Alt BEB enable
- * 18: BP no backoff
- * 17: no backoff
- * 16: excess defer
- * 15-12: re-xmit max
- * 11-10: reserved
- * 9-0: collision window
- */
-
-/*
- * structure for Maximum Frame Length reg in mac address map.
- * located at address 0x5010: bits 0-15 hold the length.
- */
-
-/*
- * structure for Reserve 1 reg in mac address map.
- * located at address 0x5014 - 0x5018
- * Defined earlier (u32)
- */
-
-/*
- * structure for Test reg in mac address map.
- * located at address 0x501C
- * test: bits 0-2, rest unused
- */
-
-/*
- * structure for MII Management Configuration reg in mac address map.
- * located at address 0x5020
- *
- * 31: reset MII mgmt
- * 30-6: unused
- * 5: scan auto increment
- * 4: preamble suppress
- * 3: undefined
- * 2-0: mgmt clock reset
- */
-
-/*
- * structure for MII Management Command reg in mac address map.
- * located at address 0x5024
- * bit 1: scan cycle
- * bit 0: read cycle
- */
-
-/*
- * structure for MII Management Address reg in mac address map.
- * located at address 0x5028
- * 31-13: reserved
- * 12-8: phy addr
- * 7-5: reserved
- * 4-0: register
- */
-
-#define MII_ADDR(phy, reg) ((phy) << 8 | (reg))
-
-/*
- * structure for MII Management Control reg in mac address map.
- * located at address 0x502C
- * 31-16: reserved
- * 15-0: phy control
- */
-
-/*
- * structure for MII Management Status reg in mac address map.
- * located at address 0x5030
- * 31-16: reserved
- * 15-0: phy control
- */
-
-/*
- * structure for MII Management Indicators reg in mac address map.
- * located at address 0x5034
- * 31-3: reserved
- * 2: not valid
- * 1: scanning
- * 0: busy
- */
-
-#define MGMT_BUSY 0x00000001 /* busy */
-#define MGMT_WAIT 0x00000005 /* busy | not valid */
-
-/*
- * structure for Interface Control reg in mac address map.
- * located at address 0x5038
- *
- * 31: reset if module
- * 30-28: reserved
- * 27: tbi mode
- * 26: ghd mode
- * 25: lhd mode
- * 24: phy mode
- * 23: reset per mii
- * 22-17: reserved
- * 16: speed
- * 15: reset pe100x
- * 14-11: reserved
- * 10: force quiet
- * 9: no cipher
- * 8: disable link fail
- * 7: reset gpsi
- * 6-1: reserved
- * 0: enable jabber protection
- */
-
-/*
- * structure for Interface Status reg in mac address map.
- * located at address 0x503C
- *
- * 31-10: reserved
- * 9: excess_defer
- * 8: clash
- * 7: phy_jabber
- * 6: phy_link_ok
- * 5: phy_full_duplex
- * 4: phy_speed
- * 3: pe100x_link_fail
- * 2: pe10t_loss_carrier
- * 1: pe10t_sqe_error
- * 0: pe10t_jabber
- */
-
-/*
- * structure for Mac Station Address, Part 1 reg in mac address map.
- * located at address 0x5040
- *
- * 31-24: Octet6
- * 23-16: Octet5
- * 15-8: Octet4
- * 7-0: Octet3
- */
-
-#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24
-#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16
-#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8
-
-/*
- * structure for Mac Station Address, Part 2 reg in mac address map.
- * located at address 0x5044
- *
- * 31-24: Octet2
- * 23-16: Octet1
- * 15-0: reserved
- */
-
-#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24
-#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16
-
-/*
- * MAC Module of JAGCore Address Mapping
- */
-struct mac_regs { /* Location: */
- u32 cfg1; /* 0x5000 */
- u32 cfg2; /* 0x5004 */
- u32 ipg; /* 0x5008 */
- u32 hfdp; /* 0x500C */
- u32 max_fm_len; /* 0x5010 */
- u32 rsv1; /* 0x5014 */
- u32 rsv2; /* 0x5018 */
- u32 mac_test; /* 0x501C */
- u32 mii_mgmt_cfg; /* 0x5020 */
- u32 mii_mgmt_cmd; /* 0x5024 */
- u32 mii_mgmt_addr; /* 0x5028 */
- u32 mii_mgmt_ctrl; /* 0x502C */
- u32 mii_mgmt_stat; /* 0x5030 */
- u32 mii_mgmt_indicator; /* 0x5034 */
- u32 if_ctrl; /* 0x5038 */
- u32 if_stat; /* 0x503C */
- u32 station_addr_1; /* 0x5040 */
- u32 station_addr_2; /* 0x5044 */
-};
-
-/* END OF MAC REGISTER ADDRESS MAP */
-
-/* START OF MAC STAT REGISTER ADDRESS MAP */
-
-/*
- * structure for Carry Register One and it's Mask Register reg located in mac
- * stat address map address 0x6130 and 0x6138.
- *
- * 31: tr64
- * 30: tr127
- * 29: tr255
- * 28: tr511
- * 27: tr1k
- * 26: trmax
- * 25: trmgv
- * 24-17: unused
- * 16: rbyt
- * 15: rpkt
- * 14: rfcs
- * 13: rmca
- * 12: rbca
- * 11: rxcf
- * 10: rxpf
- * 9: rxuo
- * 8: raln
- * 7: rflr
- * 6: rcde
- * 5: rcse
- * 4: rund
- * 3: rovr
- * 2: rfrg
- * 1: rjbr
- * 0: rdrp
- */
-
-/*
- * structure for Carry Register Two Mask Register reg in mac stat address map.
- * located at address 0x613C
- *
- * 31-20: unused
- * 19: tjbr
- * 18: tfcs
- * 17: txcf
- * 16: tovr
- * 15: tund
- * 14: trfg
- * 13: tbyt
- * 12: tpkt
- * 11: tmca
- * 10: tbca
- * 9: txpf
- * 8: tdfr
- * 7: tedf
- * 6: tscl
- * 5: tmcl
- * 4: tlcl
- * 3: txcl
- * 2: tncl
- * 1: tpfh
- * 0: tdrp
- */
-
-/*
- * MAC STATS Module of JAGCore Address Mapping
- */
-struct macstat_regs { /* Location: */
- u32 pad[32]; /* 0x6000 - 607C */
-
- /* Tx/Rx 0-64 Byte Frame Counter */
- u32 txrx_0_64_byte_frames; /* 0x6080 */
-
- /* Tx/Rx 65-127 Byte Frame Counter */
- u32 txrx_65_127_byte_frames; /* 0x6084 */
-
- /* Tx/Rx 128-255 Byte Frame Counter */
- u32 txrx_128_255_byte_frames; /* 0x6088 */
-
- /* Tx/Rx 256-511 Byte Frame Counter */
- u32 txrx_256_511_byte_frames; /* 0x608C */
-
- /* Tx/Rx 512-1023 Byte Frame Counter */
- u32 txrx_512_1023_byte_frames; /* 0x6090 */
-
- /* Tx/Rx 1024-1518 Byte Frame Counter */
- u32 txrx_1024_1518_byte_frames; /* 0x6094 */
-
- /* Tx/Rx 1519-1522 Byte Good VLAN Frame Count */
- u32 txrx_1519_1522_gvln_frames; /* 0x6098 */
-
- /* Rx Byte Counter */
- u32 rx_bytes; /* 0x609C */
-
- /* Rx Packet Counter */
- u32 rx_packets; /* 0x60A0 */
-
- /* Rx FCS Error Counter */
- u32 rx_fcs_errs; /* 0x60A4 */
-
- /* Rx Multicast Packet Counter */
- u32 rx_multicast_packets; /* 0x60A8 */
-
- /* Rx Broadcast Packet Counter */
- u32 rx_broadcast_packets; /* 0x60AC */
-
- /* Rx Control Frame Packet Counter */
- u32 rx_control_frames; /* 0x60B0 */
-
- /* Rx Pause Frame Packet Counter */
- u32 rx_pause_frames; /* 0x60B4 */
-
- /* Rx Unknown OP Code Counter */
- u32 rx_unknown_opcodes; /* 0x60B8 */
-
- /* Rx Alignment Error Counter */
- u32 rx_align_errs; /* 0x60BC */
-
- /* Rx Frame Length Error Counter */
- u32 rx_frame_len_errs; /* 0x60C0 */
-
- /* Rx Code Error Counter */
- u32 rx_code_errs; /* 0x60C4 */
-
- /* Rx Carrier Sense Error Counter */
- u32 rx_carrier_sense_errs; /* 0x60C8 */
-
- /* Rx Undersize Packet Counter */
- u32 rx_undersize_packets; /* 0x60CC */
-
- /* Rx Oversize Packet Counter */
- u32 rx_oversize_packets; /* 0x60D0 */
-
- /* Rx Fragment Counter */
- u32 rx_fragment_packets; /* 0x60D4 */
-
- /* Rx Jabber Counter */
- u32 rx_jabbers; /* 0x60D8 */
-
- /* Rx Drop */
- u32 rx_drops; /* 0x60DC */
-
- /* Tx Byte Counter */
- u32 tx_bytes; /* 0x60E0 */
-
- /* Tx Packet Counter */
- u32 tx_packets; /* 0x60E4 */
-
- /* Tx Multicast Packet Counter */
- u32 tx_multicast_packets; /* 0x60E8 */
-
- /* Tx Broadcast Packet Counter */
- u32 tx_broadcast_packets; /* 0x60EC */
-
- /* Tx Pause Control Frame Counter */
- u32 tx_pause_frames; /* 0x60F0 */
-
- /* Tx Deferral Packet Counter */
- u32 tx_deferred; /* 0x60F4 */
-
- /* Tx Excessive Deferral Packet Counter */
- u32 tx_excessive_deferred; /* 0x60F8 */
-
- /* Tx Single Collision Packet Counter */
- u32 tx_single_collisions; /* 0x60FC */
-
- /* Tx Multiple Collision Packet Counter */
- u32 tx_multiple_collisions; /* 0x6100 */
-
- /* Tx Late Collision Packet Counter */
- u32 tx_late_collisions; /* 0x6104 */
-
- /* Tx Excessive Collision Packet Counter */
- u32 tx_excessive_collisions; /* 0x6108 */
-
- /* Tx Total Collision Packet Counter */
- u32 tx_total_collisions; /* 0x610C */
-
- /* Tx Pause Frame Honored Counter */
- u32 tx_pause_honored_frames; /* 0x6110 */
-
- /* Tx Drop Frame Counter */
- u32 tx_drops; /* 0x6114 */
-
- /* Tx Jabber Frame Counter */
- u32 tx_jabbers; /* 0x6118 */
-
- /* Tx FCS Error Counter */
- u32 tx_fcs_errs; /* 0x611C */
-
- /* Tx Control Frame Counter */
- u32 tx_control_frames; /* 0x6120 */
-
- /* Tx Oversize Frame Counter */
- u32 tx_oversize_frames; /* 0x6124 */
-
- /* Tx Undersize Frame Counter */
- u32 tx_undersize_frames; /* 0x6128 */
-
- /* Tx Fragments Frame Counter */
- u32 tx_fragments; /* 0x612C */
-
- /* Carry Register One Register */
- u32 carry_reg1; /* 0x6130 */
-
- /* Carry Register Two Register */
- u32 carry_reg2; /* 0x6134 */
-
- /* Carry Register One Mask Register */
- u32 carry_reg1_mask; /* 0x6138 */
-
- /* Carry Register Two Mask Register */
- u32 carry_reg2_mask; /* 0x613C */
-};
-
-/* END OF MAC STAT REGISTER ADDRESS MAP */
-
-
-/* START OF MMC REGISTER ADDRESS MAP */
-
-/*
- * Main Memory Controller Control reg in mmc address map.
- * located at address 0x7000
- */
-
-#define ET_MMC_ENABLE 1
-#define ET_MMC_ARB_DISABLE 2
-#define ET_MMC_RXMAC_DISABLE 4
-#define ET_MMC_TXMAC_DISABLE 8
-#define ET_MMC_TXDMA_DISABLE 16
-#define ET_MMC_RXDMA_DISABLE 32
-#define ET_MMC_FORCE_CE 64
-
-/*
- * Main Memory Controller Host Memory Access Address reg in mmc
- * address map. Located at address 0x7004. Top 16 bits hold the address bits
- */
-
-#define ET_SRAM_REQ_ACCESS 1
-#define ET_SRAM_WR_ACCESS 2
-#define ET_SRAM_IS_CTRL 4
-
-/*
- * structure for Main Memory Controller Host Memory Access Data reg in mmc
- * address map. Located at address 0x7008 - 0x7014
- * Defined earlier (u32)
- */
-
-/*
- * Memory Control Module of JAGCore Address Mapping
- */
-struct mmc_regs { /* Location: */
- u32 mmc_ctrl; /* 0x7000 */
- u32 sram_access; /* 0x7004 */
- u32 sram_word1; /* 0x7008 */
- u32 sram_word2; /* 0x700C */
- u32 sram_word3; /* 0x7010 */
- u32 sram_word4; /* 0x7014 */
-};
-
-/* END OF MMC REGISTER ADDRESS MAP */
-
-
-/*
- * JAGCore Address Mapping
- */
-struct address_map {
- struct global_regs global;
- /* unused section of global address map */
- u8 unused_global[4096 - sizeof(struct global_regs)];
- struct txdma_regs txdma;
- /* unused section of txdma address map */
- u8 unused_txdma[4096 - sizeof(struct txdma_regs)];
- struct rxdma_regs rxdma;
- /* unused section of rxdma address map */
- u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)];
- struct txmac_regs txmac;
- /* unused section of txmac address map */
- u8 unused_txmac[4096 - sizeof(struct txmac_regs)];
- struct rxmac_regs rxmac;
- /* unused section of rxmac address map */
- u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)];
- struct mac_regs mac;
- /* unused section of mac address map */
- u8 unused_mac[4096 - sizeof(struct mac_regs)];
- struct macstat_regs macstat;
- /* unused section of mac stat address map */
- u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)];
- struct mmc_regs mmc;
- /* unused section of mmc address map */
- u8 unused_mmc[4096 - sizeof(struct mmc_regs)];
- /* unused section of address map */
- u8 unused_[1015808];
-
- u8 unused_exp_rom[4096]; /* MGS-size TBD */
- u8 unused__[524288]; /* unused section of address map */
-};
-
-#endif /* _ET1310_ADDRESS_MAP_H_ */
diff --git a/drivers/staging/et131x/et1310_eeprom.c b/drivers/staging/et131x/et1310_eeprom.c
deleted file mode 100644
index 237584001a8..00000000000
--- a/drivers/staging/et131x/et1310_eeprom.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_eeprom.c - Code used to access the device's EEPROM
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include "et1310_phy.h"
-#include "et131x_adapter.h"
-#include "et131x.h"
-
-/*
- * EEPROM Defines
- */
-
-/* LBCIF Register Groups (addressed via 32-bit offsets) */
-#define LBCIF_DWORD0_GROUP 0xAC
-#define LBCIF_DWORD1_GROUP 0xB0
-
-/* LBCIF Registers (addressed via 8-bit offsets) */
-#define LBCIF_ADDRESS_REGISTER 0xAC
-#define LBCIF_DATA_REGISTER 0xB0
-#define LBCIF_CONTROL_REGISTER 0xB1
-#define LBCIF_STATUS_REGISTER 0xB2
-
-/* LBCIF Control Register Bits */
-#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01
-#define LBCIF_CONTROL_PAGE_WRITE 0x02
-#define LBCIF_CONTROL_EEPROM_RELOAD 0x08
-#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20
-#define LBCIF_CONTROL_I2C_WRITE 0x40
-#define LBCIF_CONTROL_LBCIF_ENABLE 0x80
-
-/* LBCIF Status Register Bits */
-#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01
-#define LBCIF_STATUS_I2C_IDLE 0x02
-#define LBCIF_STATUS_ACK_ERROR 0x04
-#define LBCIF_STATUS_GENERAL_ERROR 0x08
-#define LBCIF_STATUS_CHECKSUM_ERROR 0x40
-#define LBCIF_STATUS_EEPROM_PRESENT 0x80
-
-/* Miscellaneous Constraints */
-#define MAX_NUM_REGISTER_POLLS 1000
-#define MAX_NUM_WRITE_RETRIES 2
-
-static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
-{
- u32 reg;
- int i;
-
- /*
- * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
- * bits 7,1:0 both equal to 1, at least once after reset.
- * Subsequent operations need only to check that bits 1:0 are equal
- * to 1 prior to starting a single byte read/write
- */
-
- for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
- /* Read registers grouped in DWORD1 */
- if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
- return -EIO;
-
- /* I2C idle and Phy Queue Avail both true */
- if ((reg & 0x3000) == 0x3000) {
- if (status)
- *status = reg;
- return reg & 0xFF;
- }
- }
- return -ETIMEDOUT;
-}
-
-
-/**
- * eeprom_write - Write a byte to the ET1310's EEPROM
- * @etdev: pointer to our private adapter structure
- * @addr: the address to write
- * @data: the value to write
- *
- * Returns 1 for a successful write.
- */
-static int eeprom_write(struct et131x_adapter *etdev, u32 addr, u8 data)
-{
- struct pci_dev *pdev = etdev->pdev;
- int index = 0;
- int retries;
- int err = 0;
- int i2c_wack = 0;
- int writeok = 0;
- u32 status;
- u32 val = 0;
-
- /*
- * For an EEPROM, an I2C single byte write is defined as a START
- * condition followed by the device address, EEPROM address, one byte
- * of data and a STOP condition. The STOP condition will trigger the
- * EEPROM's internally timed write cycle to the nonvolatile memory.
- * All inputs are disabled during this write cycle and the EEPROM will
- * not respond to any access until the internal write is complete.
- */
-
- err = eeprom_wait_ready(pdev, NULL);
- if (err)
- return err;
-
- /*
- * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
- * and bits 1:0 both =0. Bit 5 should be set according to the
- * type of EEPROM being accessed (1=two byte addressing, 0=one
- * byte addressing).
- */
- if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
- LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
- return -EIO;
-
- i2c_wack = 1;
-
- /* Prepare EEPROM address for Step 3 */
-
- for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
- /* Write the address to the LBCIF Address Register */
- if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
- break;
- /*
- * Write the data to the LBCIF Data Register (the I2C write
- * will begin).
- */
- if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
- break;
- /*
- * Monitor bit 1:0 of the LBCIF Status Register. When bits
- * 1:0 are both equal to 1, the I2C write has completed and the
- * internal write cycle of the EEPROM is about to start.
- * (bits 1:0 = 01 is a legal state while waiting from both
- * equal to 1, but bits 1:0 = 10 is invalid and implies that
- * something is broken).
- */
- err = eeprom_wait_ready(pdev, &status);
- if (err < 0)
- return 0;
-
- /*
- * Check bit 3 of the LBCIF Status Register. If equal to 1,
- * an error has occurred.Don't break here if we are revision
- * 1, this is so we do a blind write for load bug.
- */
- if ((status & LBCIF_STATUS_GENERAL_ERROR)
- && etdev->pdev->revision == 0)
- break;
-
- /*
- * Check bit 2 of the LBCIF Status Register. If equal to 1 an
- * ACK error has occurred on the address phase of the write.
- * This could be due to an actual hardware failure or the
- * EEPROM may still be in its internal write cycle from a
- * previous write. This write operation was ignored and must be
- *repeated later.
- */
- if (status & LBCIF_STATUS_ACK_ERROR) {
- /*
- * This could be due to an actual hardware failure
- * or the EEPROM may still be in its internal write
- * cycle from a previous write. This write operation
- * was ignored and must be repeated later.
- */
- udelay(10);
- continue;
- }
-
- writeok = 1;
- break;
- }
-
- /*
- * Set bit 6 of the LBCIF Control Register = 0.
- */
- udelay(10);
-
- while (i2c_wack) {
- if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
- LBCIF_CONTROL_LBCIF_ENABLE))
- writeok = 0;
-
- /* Do read until internal ACK_ERROR goes away meaning write
- * completed
- */
- do {
- pci_write_config_dword(pdev,
- LBCIF_ADDRESS_REGISTER,
- addr);
- do {
- pci_read_config_dword(pdev,
- LBCIF_DATA_REGISTER, &val);
- } while ((val & 0x00010000) == 0);
- } while (val & 0x00040000);
-
- if ((val & 0xFF00) != 0xC000 || index == 10000)
- break;
- index++;
- }
- return writeok ? 0 : -EIO;
-}
-
-/**
- * eeprom_read - Read a byte from the ET1310's EEPROM
- * @etdev: pointer to our private adapter structure
- * @addr: the address from which to read
- * @pdata: a pointer to a byte in which to store the value of the read
- * @eeprom_id: the ID of the EEPROM
- * @addrmode: how the EEPROM is to be accessed
- *
- * Returns 1 for a successful read
- */
-static int eeprom_read(struct et131x_adapter *etdev, u32 addr, u8 *pdata)
-{
- struct pci_dev *pdev = etdev->pdev;
- int err;
- u32 status;
-
- /*
- * A single byte read is similar to the single byte write, with the
- * exception of the data flow:
- */
-
- err = eeprom_wait_ready(pdev, NULL);
- if (err)
- return err;
- /*
- * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
- * and bits 1:0 both =0. Bit 5 should be set according to the type
- * of EEPROM being accessed (1=two byte addressing, 0=one byte
- * addressing).
- */
- if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
- LBCIF_CONTROL_LBCIF_ENABLE))
- return -EIO;
- /*
- * Write the address to the LBCIF Address Register (I2C read will
- * begin).
- */
- if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
- return -EIO;
- /*
- * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
- * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
- * has occurred).
- */
- err = eeprom_wait_ready(pdev, &status);
- if (err < 0)
- return err;
- /*
- * Regardless of error status, read data byte from LBCIF Data
- * Register.
- */
- *pdata = err;
- /*
- * Check bit 2 of the LBCIF Status Register. If = 1,
- * then an error has occurred.
- */
- return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
-}
-
-int et131x_init_eeprom(struct et131x_adapter *etdev)
-{
- struct pci_dev *pdev = etdev->pdev;
- u8 eestatus;
-
- /* We first need to check the EEPROM Status code located at offset
- * 0xB2 of config space
- */
- pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
- &eestatus);
-
- /* THIS IS A WORKAROUND:
- * I need to call this function twice to get my card in a
- * LG M1 Express Dual running. I tried also a msleep before this
- * function, because I thougth there could be some time condidions
- * but it didn't work. Call the whole function twice also work.
- */
- if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
- dev_err(&pdev->dev,
- "Could not read PCI config space for EEPROM Status\n");
- return -EIO;
- }
-
- /* Determine if the error(s) we care about are present. If they are
- * present we need to fail.
- */
- if (eestatus & 0x4C) {
- int write_failed = 0;
- if (pdev->revision == 0x01) {
- int i;
- static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
-
- /* Re-write the first 4 bytes if we have an eeprom
- * present and the revision id is 1, this fixes the
- * corruption seen with 1310 B Silicon
- */
- for (i = 0; i < 3; i++)
- if (eeprom_write(etdev, i, eedata[i]) < 0)
- write_failed = 1;
- }
- if (pdev->revision != 0x01 || write_failed) {
- dev_err(&pdev->dev,
- "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
-
- /* This error could mean that there was an error
- * reading the eeprom or that the eeprom doesn't exist.
- * We will treat each case the same and not try to
- * gather additional information that normally would
- * come from the eeprom, like MAC Address
- */
- etdev->has_eeprom = 0;
- return -EIO;
- }
- }
- etdev->has_eeprom = 1;
-
- /* Read the EEPROM for information regarding LED behavior. Refer to
- * ET1310_phy.c, et131x_xcvr_init(), for its use.
- */
- eeprom_read(etdev, 0x70, &etdev->eeprom_data[0]);
- eeprom_read(etdev, 0x71, &etdev->eeprom_data[1]);
-
- if (etdev->eeprom_data[0] != 0xcd)
- /* Disable all optional features */
- etdev->eeprom_data[1] = 0x00;
-
- return 0;
-}
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c
deleted file mode 100644
index 656be4b99cf..00000000000
--- a/drivers/staging/et131x/et1310_mac.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_mac.c - All code and routines pertaining to the MAC
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/pci.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/crc32.h>
-
-#include "et1310_phy.h"
-#include "et131x_adapter.h"
-#include "et131x.h"
-
-
-#define COUNTER_WRAP_28_BIT 0x10000000
-#define COUNTER_WRAP_22_BIT 0x400000
-#define COUNTER_WRAP_16_BIT 0x10000
-#define COUNTER_WRAP_12_BIT 0x1000
-
-#define COUNTER_MASK_28_BIT (COUNTER_WRAP_28_BIT - 1)
-#define COUNTER_MASK_22_BIT (COUNTER_WRAP_22_BIT - 1)
-#define COUNTER_MASK_16_BIT (COUNTER_WRAP_16_BIT - 1)
-#define COUNTER_MASK_12_BIT (COUNTER_WRAP_12_BIT - 1)
-
-/**
- * ConfigMacRegs1 - Initialize the first part of MAC regs
- * @pAdpater: pointer to our adapter structure
- */
-void ConfigMACRegs1(struct et131x_adapter *etdev)
-{
- struct mac_regs __iomem *pMac = &etdev->regs->mac;
- u32 station1;
- u32 station2;
- u32 ipg;
-
- /* First we need to reset everything. Write to MAC configuration
- * register 1 to perform reset.
- */
- writel(0xC00F0000, &pMac->cfg1);
-
- /* Next lets configure the MAC Inter-packet gap register */
- ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
- ipg |= 0x50 << 8; /* ifg enforce 0x50 */
- writel(ipg, &pMac->ipg);
-
- /* Next lets configure the MAC Half Duplex register */
- /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
- writel(0x00A1F037, &pMac->hfdp);
-
- /* Next lets configure the MAC Interface Control register */
- writel(0, &pMac->if_ctrl);
-
- /* Let's move on to setting up the mii management configuration */
- writel(0x07, &pMac->mii_mgmt_cfg); /* Clock reset 0x7 */
-
- /* Next lets configure the MAC Station Address register. These
- * values are read from the EEPROM during initialization and stored
- * in the adapter structure. We write what is stored in the adapter
- * structure to the MAC Station Address registers high and low. This
- * station address is used for generating and checking pause control
- * packets.
- */
- station2 = (etdev->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
- (etdev->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
- station1 = (etdev->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
- (etdev->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
- (etdev->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
- etdev->addr[2];
- writel(station1, &pMac->station_addr_1);
- writel(station2, &pMac->station_addr_2);
-
- /* Max ethernet packet in bytes that will passed by the mac without
- * being truncated. Allow the MAC to pass 4 more than our max packet
- * size. This is 4 for the Ethernet CRC.
- *
- * Packets larger than (RegistryJumboPacket) that do not contain a
- * VLAN ID will be dropped by the Rx function.
- */
- writel(etdev->RegistryJumboPacket + 4, &pMac->max_fm_len);
-
- /* clear out MAC config reset */
- writel(0, &pMac->cfg1);
-}
-
-/**
- * ConfigMacRegs2 - Initialize the second part of MAC regs
- * @pAdpater: pointer to our adapter structure
- */
-void ConfigMACRegs2(struct et131x_adapter *etdev)
-{
- int32_t delay = 0;
- struct mac_regs __iomem *pMac = &etdev->regs->mac;
- u32 cfg1;
- u32 cfg2;
- u32 ifctrl;
- u32 ctl;
-
- ctl = readl(&etdev->regs->txmac.ctl);
- cfg1 = readl(&pMac->cfg1);
- cfg2 = readl(&pMac->cfg2);
- ifctrl = readl(&pMac->if_ctrl);
-
- /* Set up the if mode bits */
- cfg2 &= ~0x300;
- if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
- cfg2 |= 0x200;
- /* Phy mode bit */
- ifctrl &= ~(1 << 24);
- } else {
- cfg2 |= 0x100;
- ifctrl |= (1 << 24);
- }
-
- /* We need to enable Rx/Tx */
- cfg1 |= CFG1_RX_ENABLE|CFG1_TX_ENABLE|CFG1_TX_FLOW;
- /* Initialize loop back to off */
- cfg1 &= ~(CFG1_LOOPBACK|CFG1_RX_FLOW);
- if (etdev->flowcontrol == FLOW_RXONLY || etdev->flowcontrol == FLOW_BOTH)
- cfg1 |= CFG1_RX_FLOW;
- writel(cfg1, &pMac->cfg1);
-
- /* Now we need to initialize the MAC Configuration 2 register */
- /* preamble 7, check length, huge frame off, pad crc, crc enable
- full duplex off */
- cfg2 |= 0x7016;
- cfg2 &= ~0x0021;
-
- /* Turn on duplex if needed */
- if (etdev->duplex_mode)
- cfg2 |= 0x01;
-
- ifctrl &= ~(1 << 26);
- if (!etdev->duplex_mode)
- ifctrl |= (1<<26); /* Enable ghd */
-
- writel(ifctrl, &pMac->if_ctrl);
- writel(cfg2, &pMac->cfg2);
-
- do {
- udelay(10);
- delay++;
- cfg1 = readl(&pMac->cfg1);
- } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
-
- if (delay == 100) {
- dev_warn(&etdev->pdev->dev,
- "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
- cfg1);
- }
-
- /* Enable TXMAC */
- ctl |= 0x09; /* TX mac enable, FC disable */
- writel(ctl, &etdev->regs->txmac.ctl);
-
- /* Ready to start the RXDMA/TXDMA engine */
- if (etdev->flags & fMP_ADAPTER_LOWER_POWER) {
- et131x_rx_dma_enable(etdev);
- et131x_tx_dma_enable(etdev);
- }
-}
-
-void ConfigRxMacRegs(struct et131x_adapter *etdev)
-{
- struct rxmac_regs __iomem *pRxMac = &etdev->regs->rxmac;
- u32 sa_lo;
- u32 sa_hi = 0;
- u32 pf_ctrl = 0;
-
- /* Disable the MAC while it is being configured (also disable WOL) */
- writel(0x8, &pRxMac->ctrl);
-
- /* Initialize WOL to disabled. */
- writel(0, &pRxMac->crc0);
- writel(0, &pRxMac->crc12);
- writel(0, &pRxMac->crc34);
-
- /* We need to set the WOL mask0 - mask4 next. We initialize it to
- * its default Values of 0x00000000 because there are not WOL masks
- * as of this time.
- */
- writel(0, &pRxMac->mask0_word0);
- writel(0, &pRxMac->mask0_word1);
- writel(0, &pRxMac->mask0_word2);
- writel(0, &pRxMac->mask0_word3);
-
- writel(0, &pRxMac->mask1_word0);
- writel(0, &pRxMac->mask1_word1);
- writel(0, &pRxMac->mask1_word2);
- writel(0, &pRxMac->mask1_word3);
-
- writel(0, &pRxMac->mask2_word0);
- writel(0, &pRxMac->mask2_word1);
- writel(0, &pRxMac->mask2_word2);
- writel(0, &pRxMac->mask2_word3);
-
- writel(0, &pRxMac->mask3_word0);
- writel(0, &pRxMac->mask3_word1);
- writel(0, &pRxMac->mask3_word2);
- writel(0, &pRxMac->mask3_word3);
-
- writel(0, &pRxMac->mask4_word0);
- writel(0, &pRxMac->mask4_word1);
- writel(0, &pRxMac->mask4_word2);
- writel(0, &pRxMac->mask4_word3);
-
- /* Lets setup the WOL Source Address */
- sa_lo = (etdev->addr[2] << ET_WOL_LO_SA3_SHIFT) |
- (etdev->addr[3] << ET_WOL_LO_SA4_SHIFT) |
- (etdev->addr[4] << ET_WOL_LO_SA5_SHIFT) |
- etdev->addr[5];
- writel(sa_lo, &pRxMac->sa_lo);
-
- sa_hi = (u32) (etdev->addr[0] << ET_WOL_HI_SA1_SHIFT) |
- etdev->addr[1];
- writel(sa_hi, &pRxMac->sa_hi);
-
- /* Disable all Packet Filtering */
- writel(0, &pRxMac->pf_ctrl);
-
- /* Let's initialize the Unicast Packet filtering address */
- if (etdev->PacketFilter & ET131X_PACKET_TYPE_DIRECTED) {
- SetupDeviceForUnicast(etdev);
- pf_ctrl |= 4; /* Unicast filter */
- } else {
- writel(0, &pRxMac->uni_pf_addr1);
- writel(0, &pRxMac->uni_pf_addr2);
- writel(0, &pRxMac->uni_pf_addr3);
- }
-
- /* Let's initialize the Multicast hash */
- if (!(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- pf_ctrl |= 2; /* Multicast filter */
- SetupDeviceForMulticast(etdev);
- }
-
- /* Runt packet filtering. Didn't work in version A silicon. */
- pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
- pf_ctrl |= 8; /* Fragment filter */
-
- if (etdev->RegistryJumboPacket > 8192)
- /* In order to transmit jumbo packets greater than 8k, the
- * FIFO between RxMAC and RxDMA needs to be reduced in size
- * to (16k - Jumbo packet size). In order to implement this,
- * we must use "cut through" mode in the RxMAC, which chops
- * packets down into segments which are (max_size * 16). In
- * this case we selected 256 bytes, since this is the size of
- * the PCI-Express TLP's that the 1310 uses.
- *
- * seg_en on, fc_en off, size 0x10
- */
- writel(0x41, &pRxMac->mcif_ctrl_max_seg);
- else
- writel(0, &pRxMac->mcif_ctrl_max_seg);
-
- /* Initialize the MCIF water marks */
- writel(0, &pRxMac->mcif_water_mark);
-
- /* Initialize the MIF control */
- writel(0, &pRxMac->mif_ctrl);
-
- /* Initialize the Space Available Register */
- writel(0, &pRxMac->space_avail);
-
- /* Initialize the the mif_ctrl register
- * bit 3: Receive code error. One or more nibbles were signaled as
- * errors during the reception of the packet. Clear this
- * bit in Gigabit, set it in 100Mbit. This was derived
- * experimentally at UNH.
- * bit 4: Receive CRC error. The packet's CRC did not match the
- * internally generated CRC.
- * bit 5: Receive length check error. Indicates that frame length
- * field value in the packet does not match the actual data
- * byte length and is not a type field.
- * bit 16: Receive frame truncated.
- * bit 17: Drop packet enable
- */
- if (etdev->linkspeed == TRUEPHY_SPEED_100MBPS)
- writel(0x30038, &pRxMac->mif_ctrl);
- else
- writel(0x30030, &pRxMac->mif_ctrl);
-
- /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
- * filter is always enabled since it is where the runt packets are
- * supposed to be dropped. For version A silicon, runt packet
- * dropping doesn't work, so it is disabled in the pf_ctrl register,
- * but we still leave the packet filter on.
- */
- writel(pf_ctrl, &pRxMac->pf_ctrl);
- writel(0x9, &pRxMac->ctrl);
-}
-
-void ConfigTxMacRegs(struct et131x_adapter *etdev)
-{
- struct txmac_regs *txmac = &etdev->regs->txmac;
-
- /* We need to update the Control Frame Parameters
- * cfpt - control frame pause timer set to 64 (0x40)
- * cfep - control frame extended pause timer set to 0x0
- */
- if (etdev->flowcontrol == FLOW_NONE)
- writel(0, &txmac->cf_param);
- else
- writel(0x40, &txmac->cf_param);
-}
-
-void ConfigMacStatRegs(struct et131x_adapter *etdev)
-{
- struct macstat_regs __iomem *macstat =
- &etdev->regs->macstat;
-
- /* Next we need to initialize all the macstat registers to zero on
- * the device.
- */
- writel(0, &macstat->txrx_0_64_byte_frames);
- writel(0, &macstat->txrx_65_127_byte_frames);
- writel(0, &macstat->txrx_128_255_byte_frames);
- writel(0, &macstat->txrx_256_511_byte_frames);
- writel(0, &macstat->txrx_512_1023_byte_frames);
- writel(0, &macstat->txrx_1024_1518_byte_frames);
- writel(0, &macstat->txrx_1519_1522_gvln_frames);
-
- writel(0, &macstat->rx_bytes);
- writel(0, &macstat->rx_packets);
- writel(0, &macstat->rx_fcs_errs);
- writel(0, &macstat->rx_multicast_packets);
- writel(0, &macstat->rx_broadcast_packets);
- writel(0, &macstat->rx_control_frames);
- writel(0, &macstat->rx_pause_frames);
- writel(0, &macstat->rx_unknown_opcodes);
- writel(0, &macstat->rx_align_errs);
- writel(0, &macstat->rx_frame_len_errs);
- writel(0, &macstat->rx_code_errs);
- writel(0, &macstat->rx_carrier_sense_errs);
- writel(0, &macstat->rx_undersize_packets);
- writel(0, &macstat->rx_oversize_packets);
- writel(0, &macstat->rx_fragment_packets);
- writel(0, &macstat->rx_jabbers);
- writel(0, &macstat->rx_drops);
-
- writel(0, &macstat->tx_bytes);
- writel(0, &macstat->tx_packets);
- writel(0, &macstat->tx_multicast_packets);
- writel(0, &macstat->tx_broadcast_packets);
- writel(0, &macstat->tx_pause_frames);
- writel(0, &macstat->tx_deferred);
- writel(0, &macstat->tx_excessive_deferred);
- writel(0, &macstat->tx_single_collisions);
- writel(0, &macstat->tx_multiple_collisions);
- writel(0, &macstat->tx_late_collisions);
- writel(0, &macstat->tx_excessive_collisions);
- writel(0, &macstat->tx_total_collisions);
- writel(0, &macstat->tx_pause_honored_frames);
- writel(0, &macstat->tx_drops);
- writel(0, &macstat->tx_jabbers);
- writel(0, &macstat->tx_fcs_errs);
- writel(0, &macstat->tx_control_frames);
- writel(0, &macstat->tx_oversize_frames);
- writel(0, &macstat->tx_undersize_frames);
- writel(0, &macstat->tx_fragments);
- writel(0, &macstat->carry_reg1);
- writel(0, &macstat->carry_reg2);
-
- /* Unmask any counters that we want to track the overflow of.
- * Initially this will be all counters. It may become clear later
- * that we do not need to track all counters.
- */
- writel(0xFFFFBE32, &macstat->carry_reg1_mask);
- writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
-}
-
-void ConfigFlowControl(struct et131x_adapter *etdev)
-{
- if (etdev->duplex_mode == 0) {
- etdev->flowcontrol = FLOW_NONE;
- } else {
- char remote_pause, remote_async_pause;
-
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_READ, 5, 10, &remote_pause);
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_READ, 5, 11,
- &remote_async_pause);
-
- if ((remote_pause == TRUEPHY_BIT_SET) &&
- (remote_async_pause == TRUEPHY_BIT_SET)) {
- etdev->flowcontrol = etdev->wanted_flow;
- } else if ((remote_pause == TRUEPHY_BIT_SET) &&
- (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
- if (etdev->wanted_flow == FLOW_BOTH)
- etdev->flowcontrol = FLOW_BOTH;
- else
- etdev->flowcontrol = FLOW_NONE;
- } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
- (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
- etdev->flowcontrol = FLOW_NONE;
- } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
- remote_async_pause == TRUEPHY_SET_BIT) */
- if (etdev->wanted_flow == FLOW_BOTH)
- etdev->flowcontrol = FLOW_RXONLY;
- else
- etdev->flowcontrol = FLOW_NONE;
- }
- }
-}
-
-/**
- * UpdateMacStatHostCounters - Update the local copy of the statistics
- * @etdev: pointer to the adapter structure
- */
-void UpdateMacStatHostCounters(struct et131x_adapter *etdev)
-{
- struct ce_stats *stats = &etdev->stats;
- struct macstat_regs __iomem *macstat =
- &etdev->regs->macstat;
-
- stats->collisions += readl(&macstat->tx_total_collisions);
- stats->first_collision += readl(&macstat->tx_single_collisions);
- stats->tx_deferred += readl(&macstat->tx_deferred);
- stats->excessive_collisions += readl(&macstat->tx_multiple_collisions);
- stats->late_collisions += readl(&macstat->tx_late_collisions);
- stats->tx_uflo += readl(&macstat->tx_undersize_frames);
- stats->max_pkt_error += readl(&macstat->tx_oversize_frames);
-
- stats->alignment_err += readl(&macstat->rx_align_errs);
- stats->crc_err += readl(&macstat->rx_code_errs);
- stats->norcvbuf += readl(&macstat->rx_drops);
- stats->rx_ov_flow += readl(&macstat->rx_oversize_packets);
- stats->code_violations += readl(&macstat->rx_fcs_errs);
- stats->length_err += readl(&macstat->rx_frame_len_errs);
-
- stats->other_errors += readl(&macstat->rx_fragment_packets);
-}
-
-/**
- * HandleMacStatInterrupt
- * @etdev: pointer to the adapter structure
- *
- * One of the MACSTAT counters has wrapped. Update the local copy of
- * the statistics held in the adapter structure, checking the "wrap"
- * bit for each counter.
- */
-void HandleMacStatInterrupt(struct et131x_adapter *etdev)
-{
- u32 carry_reg1;
- u32 carry_reg2;
-
- /* Read the interrupt bits from the register(s). These are Clear On
- * Write.
- */
- carry_reg1 = readl(&etdev->regs->macstat.carry_reg1);
- carry_reg2 = readl(&etdev->regs->macstat.carry_reg2);
-
- writel(carry_reg2, &etdev->regs->macstat.carry_reg1);
- writel(carry_reg2, &etdev->regs->macstat.carry_reg2);
-
- /* We need to do update the host copy of all the MAC_STAT counters.
- * For each counter, check it's overflow bit. If the overflow bit is
- * set, then increment the host version of the count by one complete
- * revolution of the counter. This routine is called when the counter
- * block indicates that one of the counters has wrapped.
- */
- if (carry_reg1 & (1 << 14))
- etdev->stats.code_violations += COUNTER_WRAP_16_BIT;
- if (carry_reg1 & (1 << 8))
- etdev->stats.alignment_err += COUNTER_WRAP_12_BIT;
- if (carry_reg1 & (1 << 7))
- etdev->stats.length_err += COUNTER_WRAP_16_BIT;
- if (carry_reg1 & (1 << 2))
- etdev->stats.other_errors += COUNTER_WRAP_16_BIT;
- if (carry_reg1 & (1 << 6))
- etdev->stats.crc_err += COUNTER_WRAP_16_BIT;
- if (carry_reg1 & (1 << 3))
- etdev->stats.rx_ov_flow += COUNTER_WRAP_16_BIT;
- if (carry_reg1 & (1 << 0))
- etdev->stats.norcvbuf += COUNTER_WRAP_16_BIT;
- if (carry_reg2 & (1 << 16))
- etdev->stats.max_pkt_error += COUNTER_WRAP_12_BIT;
- if (carry_reg2 & (1 << 15))
- etdev->stats.tx_uflo += COUNTER_WRAP_12_BIT;
- if (carry_reg2 & (1 << 6))
- etdev->stats.first_collision += COUNTER_WRAP_12_BIT;
- if (carry_reg2 & (1 << 8))
- etdev->stats.tx_deferred += COUNTER_WRAP_12_BIT;
- if (carry_reg2 & (1 << 5))
- etdev->stats.excessive_collisions += COUNTER_WRAP_12_BIT;
- if (carry_reg2 & (1 << 4))
- etdev->stats.late_collisions += COUNTER_WRAP_12_BIT;
- if (carry_reg2 & (1 << 2))
- etdev->stats.collisions += COUNTER_WRAP_12_BIT;
-}
-
-void SetupDeviceForMulticast(struct et131x_adapter *etdev)
-{
- struct rxmac_regs __iomem *rxmac = &etdev->regs->rxmac;
- uint32_t nIndex;
- uint32_t result;
- uint32_t hash1 = 0;
- uint32_t hash2 = 0;
- uint32_t hash3 = 0;
- uint32_t hash4 = 0;
- u32 pm_csr;
-
- /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
- * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
- * specified) then we should pass NO multi-cast addresses to the
- * driver.
- */
- if (etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) {
- /* Loop through our multicast array and set up the device */
- for (nIndex = 0; nIndex < etdev->MCAddressCount; nIndex++) {
- result = ether_crc(6, etdev->MCList[nIndex]);
-
- result = (result & 0x3F800000) >> 23;
-
- if (result < 32) {
- hash1 |= (1 << result);
- } else if ((31 < result) && (result < 64)) {
- result -= 32;
- hash2 |= (1 << result);
- } else if ((63 < result) && (result < 96)) {
- result -= 64;
- hash3 |= (1 << result);
- } else {
- result -= 96;
- hash4 |= (1 << result);
- }
- }
- }
-
- /* Write out the new hash to the device */
- pm_csr = readl(&etdev->regs->global.pm_csr);
- if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
- writel(hash1, &rxmac->multi_hash1);
- writel(hash2, &rxmac->multi_hash2);
- writel(hash3, &rxmac->multi_hash3);
- writel(hash4, &rxmac->multi_hash4);
- }
-}
-
-void SetupDeviceForUnicast(struct et131x_adapter *etdev)
-{
- struct rxmac_regs __iomem *rxmac = &etdev->regs->rxmac;
- u32 uni_pf1;
- u32 uni_pf2;
- u32 uni_pf3;
- u32 pm_csr;
-
- /* Set up unicast packet filter reg 3 to be the first two octets of
- * the MAC address for both address
- *
- * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
- * MAC address for second address
- *
- * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
- * MAC address for first address
- */
- uni_pf3 = (etdev->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
- (etdev->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
- (etdev->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
- etdev->addr[1];
-
- uni_pf2 = (etdev->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
- (etdev->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
- (etdev->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
- etdev->addr[5];
-
- uni_pf1 = (etdev->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
- (etdev->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
- (etdev->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
- etdev->addr[5];
-
- pm_csr = readl(&etdev->regs->global.pm_csr);
- if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
- writel(uni_pf1, &rxmac->uni_pf_addr1);
- writel(uni_pf2, &rxmac->uni_pf_addr2);
- writel(uni_pf3, &rxmac->uni_pf_addr3);
- }
-}
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
deleted file mode 100644
index 0bcb7fb6e2c..00000000000
--- a/drivers/staging/et131x/et1310_phy.c
+++ /dev/null
@@ -1,979 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright * 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_phy.c - Routines for configuring and accessing the PHY
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright * 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/random.h>
-
-#include "et1310_phy.h"
-
-#include "et131x_adapter.h"
-
-#include "et1310_address_map.h"
-#include "et1310_tx.h"
-#include "et1310_rx.h"
-
-#include "et131x.h"
-
-/* Prototypes for functions with local scope */
-static void et131x_xcvr_init(struct et131x_adapter *etdev);
-
-/**
- * PhyMiRead - Read from the PHY through the MII Interface on the MAC
- * @etdev: pointer to our private adapter structure
- * @xcvrAddr: the address of the transceiver
- * @xcvrReg: the register to read
- * @value: pointer to a 16-bit value in which the value will be stored
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int PhyMiRead(struct et131x_adapter *etdev, u8 xcvrAddr,
- u8 xcvrReg, u16 *value)
-{
- struct mac_regs __iomem *mac = &etdev->regs->mac;
- int status = 0;
- u32 delay;
- u32 miiAddr;
- u32 miiCmd;
- u32 miiIndicator;
-
- /* Save a local copy of the registers we are dealing with so we can
- * set them back
- */
- miiAddr = readl(&mac->mii_mgmt_addr);
- miiCmd = readl(&mac->mii_mgmt_cmd);
-
- /* Stop the current operation */
- writel(0, &mac->mii_mgmt_cmd);
-
- /* Set up the register we need to read from on the correct PHY */
- writel(MII_ADDR(xcvrAddr, xcvrReg), &mac->mii_mgmt_addr);
-
- /* Kick the read cycle off */
- delay = 0;
-
- writel(0x1, &mac->mii_mgmt_cmd);
-
- do {
- udelay(50);
- delay++;
- miiIndicator = readl(&mac->mii_mgmt_indicator);
- } while ((miiIndicator & MGMT_WAIT) && delay < 50);
-
- /* If we hit the max delay, we could not read the register */
- if (delay == 50) {
- dev_warn(&etdev->pdev->dev,
- "xcvrReg 0x%08x could not be read\n", xcvrReg);
- dev_warn(&etdev->pdev->dev, "status is 0x%08x\n",
- miiIndicator);
-
- status = -EIO;
- }
-
- /* If we hit here we were able to read the register and we need to
- * return the value to the caller */
- *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
-
- /* Stop the read operation */
- writel(0, &mac->mii_mgmt_cmd);
-
- /* set the registers we touched back to the state at which we entered
- * this function
- */
- writel(miiAddr, &mac->mii_mgmt_addr);
- writel(miiCmd, &mac->mii_mgmt_cmd);
-
- return status;
-}
-
-/**
- * MiWrite - Write to a PHY register through the MII interface of the MAC
- * @etdev: pointer to our private adapter structure
- * @xcvrReg: the register to read
- * @value: 16-bit value to write
- *
- * FIXME: one caller in netdev still
- *
- * Return 0 on success, errno on failure (as defined in errno.h)
- */
-int MiWrite(struct et131x_adapter *etdev, u8 xcvrReg, u16 value)
-{
- struct mac_regs __iomem *mac = &etdev->regs->mac;
- int status = 0;
- u8 xcvrAddr = etdev->stats.xcvr_addr;
- u32 delay;
- u32 miiAddr;
- u32 miiCmd;
- u32 miiIndicator;
-
- /* Save a local copy of the registers we are dealing with so we can
- * set them back
- */
- miiAddr = readl(&mac->mii_mgmt_addr);
- miiCmd = readl(&mac->mii_mgmt_cmd);
-
- /* Stop the current operation */
- writel(0, &mac->mii_mgmt_cmd);
-
- /* Set up the register we need to write to on the correct PHY */
- writel(MII_ADDR(xcvrAddr, xcvrReg), &mac->mii_mgmt_addr);
-
- /* Add the value to write to the registers to the mac */
- writel(value, &mac->mii_mgmt_ctrl);
- delay = 0;
-
- do {
- udelay(50);
- delay++;
- miiIndicator = readl(&mac->mii_mgmt_indicator);
- } while ((miiIndicator & MGMT_BUSY) && delay < 100);
-
- /* If we hit the max delay, we could not write the register */
- if (delay == 100) {
- u16 TempValue;
-
- dev_warn(&etdev->pdev->dev,
- "xcvrReg 0x%08x could not be written", xcvrReg);
- dev_warn(&etdev->pdev->dev, "status is 0x%08x\n",
- miiIndicator);
- dev_warn(&etdev->pdev->dev, "command is 0x%08x\n",
- readl(&mac->mii_mgmt_cmd));
-
- MiRead(etdev, xcvrReg, &TempValue);
-
- status = -EIO;
- }
- /* Stop the write operation */
- writel(0, &mac->mii_mgmt_cmd);
-
- /* set the registers we touched back to the state at which we entered
- * this function
- */
- writel(miiAddr, &mac->mii_mgmt_addr);
- writel(miiCmd, &mac->mii_mgmt_cmd);
-
- return status;
-}
-
-/**
- * et131x_xcvr_find - Find the PHY ID
- * @etdev: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_xcvr_find(struct et131x_adapter *etdev)
-{
- u8 xcvr_addr;
- u16 idr1;
- u16 idr2;
- u32 xcvr_id;
-
- /* We need to get xcvr id and address we just get the first one */
- for (xcvr_addr = 0; xcvr_addr < 32; xcvr_addr++) {
- /* Read the ID from the PHY */
- PhyMiRead(etdev, xcvr_addr,
- (u8) offsetof(struct mi_regs, idr1),
- &idr1);
- PhyMiRead(etdev, xcvr_addr,
- (u8) offsetof(struct mi_regs, idr2),
- &idr2);
-
- xcvr_id = (u32) ((idr1 << 16) | idr2);
-
- if (idr1 != 0 && idr1 != 0xffff) {
- etdev->stats.xcvr_id = xcvr_id;
- etdev->stats.xcvr_addr = xcvr_addr;
- return 0;
- }
- }
- return -ENODEV;
-}
-
-void ET1310_PhyReset(struct et131x_adapter *etdev)
-{
- MiWrite(etdev, PHY_CONTROL, 0x8000);
-}
-
-/**
- * ET1310_PhyPowerDown - PHY power control
- * @etdev: device to control
- * @down: true for off/false for back on
- *
- * one hundred, ten, one thousand megs
- * How would you like to have your LAN accessed
- * Can't you see that this code processed
- * Phy power, phy power..
- */
-
-void ET1310_PhyPowerDown(struct et131x_adapter *etdev, bool down)
-{
- u16 data;
-
- MiRead(etdev, PHY_CONTROL, &data);
- data &= ~0x0800; /* Power UP */
- if (down) /* Power DOWN */
- data |= 0x0800;
- MiWrite(etdev, PHY_CONTROL, data);
-}
-
-/**
- * ET130_PhyAutoNEg - autonegotiate control
- * @etdev: device to control
- * @enabe: autoneg on/off
- *
- * Set up the autonegotiation state according to whether we will be
- * negotiating the state or forcing a speed.
- */
-
-static void ET1310_PhyAutoNeg(struct et131x_adapter *etdev, bool enable)
-{
- u16 data;
-
- MiRead(etdev, PHY_CONTROL, &data);
- data &= ~0x1000; /* Autonegotiation OFF */
- if (enable)
- data |= 0x1000; /* Autonegotiation ON */
- MiWrite(etdev, PHY_CONTROL, data);
-}
-
-/**
- * ET130_PhyDuplexMode - duplex control
- * @etdev: device to control
- * @duplex: duplex on/off
- *
- * Set up the duplex state on the PHY
- */
-
-static void ET1310_PhyDuplexMode(struct et131x_adapter *etdev, u16 duplex)
-{
- u16 data;
-
- MiRead(etdev, PHY_CONTROL, &data);
- data &= ~0x100; /* Set Half Duplex */
- if (duplex == TRUEPHY_DUPLEX_FULL)
- data |= 0x100; /* Set Full Duplex */
- MiWrite(etdev, PHY_CONTROL, data);
-}
-
-/**
- * ET130_PhySpeedSelect - speed control
- * @etdev: device to control
- * @duplex: duplex on/off
- *
- * Set the speed of our PHY.
- */
-
-static void ET1310_PhySpeedSelect(struct et131x_adapter *etdev, u16 speed)
-{
- u16 data;
- static const u16 bits[3] = {0x0000, 0x2000, 0x0040};
-
- /* Read the PHY control register */
- MiRead(etdev, PHY_CONTROL, &data);
- /* Clear all Speed settings (Bits 6, 13) */
- data &= ~0x2040;
- /* Write back the new speed */
- MiWrite(etdev, PHY_CONTROL, data | bits[speed]);
-}
-
-/**
- * ET1310_PhyLinkStatus - read link state
- * @etdev: device to read
- * @link_status: reported link state
- * @autoneg: reported autonegotiation state (complete/incomplete/disabled)
- * @linkspeed: returnedlink speed in use
- * @duplex_mode: reported half/full duplex state
- * @mdi_mdix: not yet working
- * @masterslave: report whether we are master or slave
- * @polarity: link polarity
- *
- * I can read your lan like a magazine
- * I see if your up
- * I know your link speed
- * I see all the setting that you'd rather keep
- */
-
-static void ET1310_PhyLinkStatus(struct et131x_adapter *etdev,
- u8 *link_status,
- u32 *autoneg,
- u32 *linkspeed,
- u32 *duplex_mode,
- u32 *mdi_mdix,
- u32 *masterslave, u32 *polarity)
-{
- u16 mistatus = 0;
- u16 is1000BaseT = 0;
- u16 vmi_phystatus = 0;
- u16 control = 0;
-
- MiRead(etdev, PHY_STATUS, &mistatus);
- MiRead(etdev, PHY_1000_STATUS, &is1000BaseT);
- MiRead(etdev, PHY_PHY_STATUS, &vmi_phystatus);
- MiRead(etdev, PHY_CONTROL, &control);
-
- *link_status = (vmi_phystatus & 0x0040) ? 1 : 0;
- *autoneg = (control & 0x1000) ? ((vmi_phystatus & 0x0020) ?
- TRUEPHY_ANEG_COMPLETE :
- TRUEPHY_ANEG_NOT_COMPLETE) :
- TRUEPHY_ANEG_DISABLED;
- *linkspeed = (vmi_phystatus & 0x0300) >> 8;
- *duplex_mode = (vmi_phystatus & 0x0080) >> 7;
- /* NOTE: Need to complete this */
- *mdi_mdix = 0;
-
- *masterslave = (is1000BaseT & 0x4000) ?
- TRUEPHY_CFG_MASTER : TRUEPHY_CFG_SLAVE;
- *polarity = (vmi_phystatus & 0x0400) ?
- TRUEPHY_POLARITY_INVERTED : TRUEPHY_POLARITY_NORMAL;
-}
-
-static void ET1310_PhyAndOrReg(struct et131x_adapter *etdev,
- u16 regnum, u16 andMask, u16 orMask)
-{
- u16 reg;
-
- MiRead(etdev, regnum, &reg);
- reg &= andMask;
- reg |= orMask;
- MiWrite(etdev, regnum, reg);
-}
-
-/* Still used from _mac for BIT_READ */
-void ET1310_PhyAccessMiBit(struct et131x_adapter *etdev, u16 action,
- u16 regnum, u16 bitnum, u8 *value)
-{
- u16 reg;
- u16 mask = 0x0001 << bitnum;
-
- /* Read the requested register */
- MiRead(etdev, regnum, &reg);
-
- switch (action) {
- case TRUEPHY_BIT_READ:
- *value = (reg & mask) >> bitnum;
- break;
-
- case TRUEPHY_BIT_SET:
- MiWrite(etdev, regnum, reg | mask);
- break;
-
- case TRUEPHY_BIT_CLEAR:
- MiWrite(etdev, regnum, reg & ~mask);
- break;
-
- default:
- break;
- }
-}
-
-void ET1310_PhyAdvertise1000BaseT(struct et131x_adapter *etdev,
- u16 duplex)
-{
- u16 data;
-
- /* Read the PHY 1000 Base-T Control Register */
- MiRead(etdev, PHY_1000_CONTROL, &data);
-
- /* Clear Bits 8,9 */
- data &= ~0x0300;
-
- switch (duplex) {
- case TRUEPHY_ADV_DUPLEX_NONE:
- /* Duplex already cleared, do nothing */
- break;
-
- case TRUEPHY_ADV_DUPLEX_FULL:
- /* Set Bit 9 */
- data |= 0x0200;
- break;
-
- case TRUEPHY_ADV_DUPLEX_HALF:
- /* Set Bit 8 */
- data |= 0x0100;
- break;
-
- case TRUEPHY_ADV_DUPLEX_BOTH:
- default:
- data |= 0x0300;
- break;
- }
-
- /* Write back advertisement */
- MiWrite(etdev, PHY_1000_CONTROL, data);
-}
-
-static void ET1310_PhyAdvertise100BaseT(struct et131x_adapter *etdev,
- u16 duplex)
-{
- u16 data;
-
- /* Read the Autonegotiation Register (10/100) */
- MiRead(etdev, PHY_AUTO_ADVERTISEMENT, &data);
-
- /* Clear bits 7,8 */
- data &= ~0x0180;
-
- switch (duplex) {
- case TRUEPHY_ADV_DUPLEX_NONE:
- /* Duplex already cleared, do nothing */
- break;
-
- case TRUEPHY_ADV_DUPLEX_FULL:
- /* Set Bit 8 */
- data |= 0x0100;
- break;
-
- case TRUEPHY_ADV_DUPLEX_HALF:
- /* Set Bit 7 */
- data |= 0x0080;
- break;
-
- case TRUEPHY_ADV_DUPLEX_BOTH:
- default:
- /* Set Bits 7,8 */
- data |= 0x0180;
- break;
- }
-
- /* Write back advertisement */
- MiWrite(etdev, PHY_AUTO_ADVERTISEMENT, data);
-}
-
-static void ET1310_PhyAdvertise10BaseT(struct et131x_adapter *etdev,
- u16 duplex)
-{
- u16 data;
-
- /* Read the Autonegotiation Register (10/100) */
- MiRead(etdev, PHY_AUTO_ADVERTISEMENT, &data);
-
- /* Clear bits 5,6 */
- data &= ~0x0060;
-
- switch (duplex) {
- case TRUEPHY_ADV_DUPLEX_NONE:
- /* Duplex already cleared, do nothing */
- break;
-
- case TRUEPHY_ADV_DUPLEX_FULL:
- /* Set Bit 6 */
- data |= 0x0040;
- break;
-
- case TRUEPHY_ADV_DUPLEX_HALF:
- /* Set Bit 5 */
- data |= 0x0020;
- break;
-
- case TRUEPHY_ADV_DUPLEX_BOTH:
- default:
- /* Set Bits 5,6 */
- data |= 0x0060;
- break;
- }
-
- /* Write back advertisement */
- MiWrite(etdev, PHY_AUTO_ADVERTISEMENT, data);
-}
-
-/**
- * et131x_setphy_normal - Set PHY for normal operation.
- * @etdev: pointer to our private adapter structure
- *
- * Used by Power Management to force the PHY into 10 Base T half-duplex mode,
- * when going to D3 in WOL mode. Also used during initialization to set the
- * PHY for normal operation.
- */
-void et131x_setphy_normal(struct et131x_adapter *etdev)
-{
- /* Make sure the PHY is powered up */
- ET1310_PhyPowerDown(etdev, 0);
- et131x_xcvr_init(etdev);
-}
-
-
-/**
- * et131x_xcvr_init - Init the phy if we are setting it into force mode
- * @etdev: pointer to our private adapter structure
- *
- */
-static void et131x_xcvr_init(struct et131x_adapter *etdev)
-{
- u16 imr;
- u16 isr;
- u16 lcr2;
-
- /* Zero out the adapter structure variable representing BMSR */
- etdev->bmsr = 0;
-
- MiRead(etdev, (u8) offsetof(struct mi_regs, isr), &isr);
- MiRead(etdev, (u8) offsetof(struct mi_regs, imr), &imr);
-
- /* Set the link status interrupt only. Bad behavior when link status
- * and auto neg are set, we run into a nested interrupt problem
- */
- imr |= 0x0105;
-
- MiWrite(etdev, (u8) offsetof(struct mi_regs, imr), imr);
-
- /* Set the LED behavior such that LED 1 indicates speed (off =
- * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
- * link and activity (on for link, blink off for activity).
- *
- * NOTE: Some customizations have been added here for specific
- * vendors; The LED behavior is now determined by vendor data in the
- * EEPROM. However, the above description is the default.
- */
- if ((etdev->eeprom_data[1] & 0x4) == 0) {
- MiRead(etdev, (u8) offsetof(struct mi_regs, lcr2),
- &lcr2);
-
- lcr2 &= 0x00FF;
- lcr2 |= 0xA000; /* led link */
-
- if ((etdev->eeprom_data[1] & 0x8) == 0)
- lcr2 |= 0x0300;
- else
- lcr2 |= 0x0400;
-
- MiWrite(etdev, (u8) offsetof(struct mi_regs, lcr2),
- lcr2);
- }
-
- /* Determine if we need to go into a force mode and set it */
- if (etdev->AiForceSpeed == 0 && etdev->AiForceDpx == 0) {
- if (etdev->wanted_flow == FLOW_TXONLY ||
- etdev->wanted_flow == FLOW_BOTH)
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_SET, 4, 11, NULL);
- else
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_CLEAR, 4, 11, NULL);
-
- if (etdev->wanted_flow == FLOW_BOTH)
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_SET, 4, 10, NULL);
- else
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_CLEAR, 4, 10, NULL);
-
- /* Set the phy to autonegotiation */
- ET1310_PhyAutoNeg(etdev, true);
-
- /* NOTE - Do we need this? */
- ET1310_PhyAccessMiBit(etdev, TRUEPHY_BIT_SET, 0, 9, NULL);
- return;
- }
-
- ET1310_PhyAutoNeg(etdev, false);
-
- /* Set to the correct force mode. */
- if (etdev->AiForceDpx != 1) {
- if (etdev->wanted_flow == FLOW_TXONLY ||
- etdev->wanted_flow == FLOW_BOTH)
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_SET, 4, 11, NULL);
- else
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_CLEAR, 4, 11, NULL);
-
- if (etdev->wanted_flow == FLOW_BOTH)
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_SET, 4, 10, NULL);
- else
- ET1310_PhyAccessMiBit(etdev,
- TRUEPHY_BIT_CLEAR, 4, 10, NULL);
- } else {
- ET1310_PhyAccessMiBit(etdev, TRUEPHY_BIT_CLEAR, 4, 10, NULL);
- ET1310_PhyAccessMiBit(etdev, TRUEPHY_BIT_CLEAR, 4, 11, NULL);
- }
- ET1310_PhyPowerDown(etdev, 1);
- switch (etdev->AiForceSpeed) {
- case 10:
- /* First we need to turn off all other advertisement */
- ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
- ET1310_PhyAdvertise100BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
- if (etdev->AiForceDpx == 1) {
- /* Set our advertise values accordingly */
- ET1310_PhyAdvertise10BaseT(etdev,
- TRUEPHY_ADV_DUPLEX_HALF);
- } else if (etdev->AiForceDpx == 2) {
- /* Set our advertise values accordingly */
- ET1310_PhyAdvertise10BaseT(etdev,
- TRUEPHY_ADV_DUPLEX_FULL);
- } else {
- /* Disable autoneg */
- ET1310_PhyAutoNeg(etdev, false);
- /* Disable rest of the advertisements */
- ET1310_PhyAdvertise10BaseT(etdev,
- TRUEPHY_ADV_DUPLEX_NONE);
- /* Force 10 Mbps */
- ET1310_PhySpeedSelect(etdev, TRUEPHY_SPEED_10MBPS);
- /* Force Full duplex */
- ET1310_PhyDuplexMode(etdev, TRUEPHY_DUPLEX_FULL);
- }
- break;
- case 100:
- /* first we need to turn off all other advertisement */
- ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
- ET1310_PhyAdvertise10BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
- if (etdev->AiForceDpx == 1) {
- /* Set our advertise values accordingly */
- ET1310_PhyAdvertise100BaseT(etdev,
- TRUEPHY_ADV_DUPLEX_HALF);
- /* Set speed */
- ET1310_PhySpeedSelect(etdev, TRUEPHY_SPEED_100MBPS);
- } else if (etdev->AiForceDpx == 2) {
- /* Set our advertise values accordingly */
- ET1310_PhyAdvertise100BaseT(etdev,
- TRUEPHY_ADV_DUPLEX_FULL);
- } else {
- /* Disable autoneg */
- ET1310_PhyAutoNeg(etdev, false);
- /* Disable other advertisement */
- ET1310_PhyAdvertise100BaseT(etdev,
- TRUEPHY_ADV_DUPLEX_NONE);
- /* Force 100 Mbps */
- ET1310_PhySpeedSelect(etdev, TRUEPHY_SPEED_100MBPS);
- /* Force Full duplex */
- ET1310_PhyDuplexMode(etdev, TRUEPHY_DUPLEX_FULL);
- }
- break;
- case 1000:
- /* first we need to turn off all other advertisement */
- ET1310_PhyAdvertise100BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
- ET1310_PhyAdvertise10BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
- /* set our advertise values accordingly */
- ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_FULL);
- break;
- }
- ET1310_PhyPowerDown(etdev, 0);
-}
-
-void et131x_Mii_check(struct et131x_adapter *etdev,
- u16 bmsr, u16 bmsr_ints)
-{
- u8 link_status;
- u32 autoneg_status;
- u32 speed;
- u32 duplex;
- u32 mdi_mdix;
- u32 masterslave;
- u32 polarity;
- unsigned long flags;
-
- if (bmsr_ints & MI_BMSR_LINK_STATUS) {
- if (bmsr & MI_BMSR_LINK_STATUS) {
- etdev->boot_coma = 20;
-
- /* Update our state variables and indicate the
- * connected state
- */
- spin_lock_irqsave(&etdev->Lock, flags);
-
- etdev->MediaState = NETIF_STATUS_MEDIA_CONNECT;
-
- spin_unlock_irqrestore(&etdev->Lock, flags);
-
- netif_carrier_on(etdev->netdev);
- } else {
- dev_warn(&etdev->pdev->dev,
- "Link down - cable problem ?\n");
-
- if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
- /* NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(etdev->hTruePhy, 0) ==
- * EMI_TRUEPHY_A13O) {
- */
- u16 Register18;
-
- MiRead(etdev, 0x12, &Register18);
- MiWrite(etdev, 0x12, Register18 | 0x4);
- MiWrite(etdev, 0x10, Register18 | 0x8402);
- MiWrite(etdev, 0x11, Register18 | 511);
- MiWrite(etdev, 0x12, Register18);
- }
-
- /* For the first N seconds of life, we are in "link
- * detection" When we are in this state, we should
- * only report "connected". When the LinkDetection
- * Timer expires, we can report disconnected (handled
- * in the LinkDetectionDPC).
- */
- if ((etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
- spin_lock_irqsave(&etdev->Lock, flags);
- etdev->MediaState =
- NETIF_STATUS_MEDIA_DISCONNECT;
- spin_unlock_irqrestore(&etdev->Lock,
- flags);
-
- netif_carrier_off(etdev->netdev);
- }
-
- etdev->linkspeed = 0;
- etdev->duplex_mode = 0;
-
- /* Free the packets being actively sent & stopped */
- et131x_free_busy_send_packets(etdev);
-
- /* Re-initialize the send structures */
- et131x_init_send(etdev);
-
- /* Reset the RFD list and re-start RU */
- et131x_reset_recv(etdev);
-
- /*
- * Bring the device back to the state it was during
- * init prior to autonegotiation being complete. This
- * way, when we get the auto-neg complete interrupt,
- * we can complete init by calling ConfigMacREGS2.
- */
- et131x_soft_reset(etdev);
-
- /* Setup ET1310 as per the documentation */
- et131x_adapter_setup(etdev);
-
- /* Setup the PHY into coma mode until the cable is
- * plugged back in
- */
- if (etdev->RegistryPhyComa == 1)
- EnablePhyComa(etdev);
- }
- }
-
- if ((bmsr_ints & MI_BMSR_AUTO_NEG_COMPLETE) ||
- (etdev->AiForceDpx == 3 && (bmsr_ints & MI_BMSR_LINK_STATUS))) {
- if ((bmsr & MI_BMSR_AUTO_NEG_COMPLETE) ||
- etdev->AiForceDpx == 3) {
- ET1310_PhyLinkStatus(etdev,
- &link_status, &autoneg_status,
- &speed, &duplex, &mdi_mdix,
- &masterslave, &polarity);
-
- etdev->linkspeed = speed;
- etdev->duplex_mode = duplex;
-
- etdev->boot_coma = 20;
-
- if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
- /*
- * NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(etdev->hTruePhy, 0)==
- * EMI_TRUEPHY_A13O) {
- */
- u16 Register18;
-
- MiRead(etdev, 0x12, &Register18);
- MiWrite(etdev, 0x12, Register18 | 0x4);
- MiWrite(etdev, 0x10, Register18 | 0x8402);
- MiWrite(etdev, 0x11, Register18 | 511);
- MiWrite(etdev, 0x12, Register18);
- }
-
- ConfigFlowControl(etdev);
-
- if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS &&
- etdev->RegistryJumboPacket > 2048)
- ET1310_PhyAndOrReg(etdev, 0x16, 0xcfff,
- 0x2000);
-
- SetRxDmaTimer(etdev);
- ConfigMACRegs2(etdev);
- }
- }
-}
-
-/*
- * The routines which follow provide low-level access to the PHY, and are used
- * primarily by the routines above (although there are a few places elsewhere
- * in the driver where this level of access is required).
- */
-
-static const u16 ConfigPhy[25][2] = {
- /* Reg Value Register */
- /* Addr */
- {0x880B, 0x0926}, /* AfeIfCreg4B1000Msbs */
- {0x880C, 0x0926}, /* AfeIfCreg4B100Msbs */
- {0x880D, 0x0926}, /* AfeIfCreg4B10Msbs */
-
- {0x880E, 0xB4D3}, /* AfeIfCreg4B1000Lsbs */
- {0x880F, 0xB4D3}, /* AfeIfCreg4B100Lsbs */
- {0x8810, 0xB4D3}, /* AfeIfCreg4B10Lsbs */
-
- {0x8805, 0xB03E}, /* AfeIfCreg3B1000Msbs */
- {0x8806, 0xB03E}, /* AfeIfCreg3B100Msbs */
- {0x8807, 0xFF00}, /* AfeIfCreg3B10Msbs */
-
- {0x8808, 0xE090}, /* AfeIfCreg3B1000Lsbs */
- {0x8809, 0xE110}, /* AfeIfCreg3B100Lsbs */
- {0x880A, 0x0000}, /* AfeIfCreg3B10Lsbs */
-
- {0x300D, 1}, /* DisableNorm */
-
- {0x280C, 0x0180}, /* LinkHoldEnd */
-
- {0x1C21, 0x0002}, /* AlphaM */
-
- {0x3821, 6}, /* FfeLkgTx0 */
- {0x381D, 1}, /* FfeLkg1g4 */
- {0x381E, 1}, /* FfeLkg1g5 */
- {0x381F, 1}, /* FfeLkg1g6 */
- {0x3820, 1}, /* FfeLkg1g7 */
-
- {0x8402, 0x01F0}, /* Btinact */
- {0x800E, 20}, /* LftrainTime */
- {0x800F, 24}, /* DvguardTime */
- {0x8010, 46}, /* IdlguardTime */
-
- {0, 0}
-
-};
-
-/* condensed version of the phy initialization routine */
-void ET1310_PhyInit(struct et131x_adapter *etdev)
-{
- u16 data, index;
-
- if (etdev == NULL)
- return;
-
- /* get the identity (again ?) */
- MiRead(etdev, PHY_ID_1, &data);
- MiRead(etdev, PHY_ID_2, &data);
-
- /* what does this do/achieve ? */
- MiRead(etdev, PHY_MPHY_CONTROL_REG, &data); /* should read 0002 */
- MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0006);
-
- /* read modem register 0402, should I do something with the return
- data ? */
- MiWrite(etdev, PHY_INDEX_REG, 0x0402);
- MiRead(etdev, PHY_DATA_REG, &data);
-
- /* what does this do/achieve ? */
- MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0002);
-
- /* get the identity (again ?) */
- MiRead(etdev, PHY_ID_1, &data);
- MiRead(etdev, PHY_ID_2, &data);
-
- /* what does this achieve ? */
- MiRead(etdev, PHY_MPHY_CONTROL_REG, &data); /* should read 0002 */
- MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0006);
-
- /* read modem register 0402, should I do something with
- the return data? */
- MiWrite(etdev, PHY_INDEX_REG, 0x0402);
- MiRead(etdev, PHY_DATA_REG, &data);
-
- MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0002);
-
- /* what does this achieve (should return 0x1040) */
- MiRead(etdev, PHY_CONTROL, &data);
- MiRead(etdev, PHY_MPHY_CONTROL_REG, &data); /* should read 0002 */
- MiWrite(etdev, PHY_CONTROL, 0x1840);
-
- MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0007);
-
- /* here the writing of the array starts.... */
- index = 0;
- while (ConfigPhy[index][0] != 0x0000) {
- /* write value */
- MiWrite(etdev, PHY_INDEX_REG, ConfigPhy[index][0]);
- MiWrite(etdev, PHY_DATA_REG, ConfigPhy[index][1]);
-
- /* read it back */
- MiWrite(etdev, PHY_INDEX_REG, ConfigPhy[index][0]);
- MiRead(etdev, PHY_DATA_REG, &data);
-
- /* do a check on the value read back ? */
- index++;
- }
- /* here the writing of the array ends... */
-
- MiRead(etdev, PHY_CONTROL, &data); /* 0x1840 */
- MiRead(etdev, PHY_MPHY_CONTROL_REG, &data);/* should read 0007 */
- MiWrite(etdev, PHY_CONTROL, 0x1040);
- MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0002);
-}
-
diff --git a/drivers/staging/et131x/et1310_phy.h b/drivers/staging/et131x/et1310_phy.h
deleted file mode 100644
index 6b38a3e0cab..00000000000
--- a/drivers/staging/et131x/et1310_phy.h
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_phy.h - Defines, structs, enums, prototypes, etc. pertaining to the
- * PHY.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef _ET1310_PHY_H_
-#define _ET1310_PHY_H_
-
-#include "et1310_address_map.h"
-
-/* MI Register Addresses */
-#define MI_CONTROL_REG 0
-#define MI_STATUS_REG 1
-#define MI_PHY_IDENTIFIER_1_REG 2
-#define MI_PHY_IDENTIFIER_2_REG 3
-#define MI_AUTONEG_ADVERTISEMENT_REG 4
-#define MI_AUTONEG_LINK_PARTNER_ABILITY_REG 5
-#define MI_AUTONEG_EXPANSION_REG 6
-#define MI_AUTONEG_NEXT_PAGE_TRANSMIT_REG 7
-#define MI_LINK_PARTNER_NEXT_PAGE_REG 8
-#define MI_1000BASET_CONTROL_REG 9
-#define MI_1000BASET_STATUS_REG 10
-#define MI_RESERVED11_REG 11
-#define MI_RESERVED12_REG 12
-#define MI_RESERVED13_REG 13
-#define MI_RESERVED14_REG 14
-#define MI_EXTENDED_STATUS_REG 15
-
-/* VMI Register Addresses */
-#define VMI_RESERVED16_REG 16
-#define VMI_RESERVED17_REG 17
-#define VMI_RESERVED18_REG 18
-#define VMI_LOOPBACK_CONTROL_REG 19
-#define VMI_RESERVED20_REG 20
-#define VMI_MI_CONTROL_REG 21
-#define VMI_PHY_CONFIGURATION_REG 22
-#define VMI_PHY_CONTROL_REG 23
-#define VMI_INTERRUPT_MASK_REG 24
-#define VMI_INTERRUPT_STATUS_REG 25
-#define VMI_PHY_STATUS_REG 26
-#define VMI_LED_CONTROL_1_REG 27
-#define VMI_LED_CONTROL_2_REG 28
-#define VMI_RESERVED29_REG 29
-#define VMI_RESERVED30_REG 30
-#define VMI_RESERVED31_REG 31
-
-/* PHY Register Mapping(MI) Management Interface Regs */
-struct mi_regs {
- u8 bmcr; /* Basic mode control reg(Reg 0x00) */
- u8 bmsr; /* Basic mode status reg(Reg 0x01) */
- u8 idr1; /* Phy identifier reg 1(Reg 0x02) */
- u8 idr2; /* Phy identifier reg 2(Reg 0x03) */
- u8 anar; /* Auto-Negotiation advertisement(Reg 0x04) */
- u8 anlpar; /* Auto-Negotiation link Partner Ability(Reg 0x05) */
- u8 aner; /* Auto-Negotiation expansion reg(Reg 0x06) */
- u8 annptr; /* Auto-Negotiation next page transmit reg(Reg 0x07) */
- u8 lpnpr; /* link partner next page reg(Reg 0x08) */
- u8 gcr; /* Gigabit basic mode control reg(Reg 0x09) */
- u8 gsr; /* Gigabit basic mode status reg(Reg 0x0A) */
- u8 mi_res1[4]; /* Future use by MI working group(Reg 0x0B - 0x0E) */
- u8 esr; /* Extended status reg(Reg 0x0F) */
- u8 mi_res2[3]; /* Future use by MI working group(Reg 0x10 - 0x12) */
- u8 loop_ctl; /* Loopback Control Reg(Reg 0x13) */
- u8 mi_res3; /* Future use by MI working group(Reg 0x14) */
- u8 mcr; /* MI Control Reg(Reg 0x15) */
- u8 pcr; /* Configuration Reg(Reg 0x16) */
- u8 phy_ctl; /* PHY Control Reg(Reg 0x17) */
- u8 imr; /* Interrupt Mask Reg(Reg 0x18) */
- u8 isr; /* Interrupt Status Reg(Reg 0x19) */
- u8 psr; /* PHY Status Reg(Reg 0x1A) */
- u8 lcr1; /* LED Control 1 Reg(Reg 0x1B) */
- u8 lcr2; /* LED Control 2 Reg(Reg 0x1C) */
- u8 mi_res4[3]; /* Future use by MI working group(Reg 0x1D - 0x1F) */
-};
-
-/*
- * MI Register 0: Basic mode control register
- * 15: reset
- * 14: loopback
- * 13: speed_sel
- * 12: enable_autoneg
- * 11: power_down
- * 10: isolate
- * 9: restart_autoneg
- * 8: duplex_mode
- * 7: col_test
- * 6: speed_1000_sel
- * 5-0: res1
- */
-
-/*
- * MI Register 1: Basic mode status register
- * 15: link_100T4
- * 14: link_100fdx
- * 13: link_100hdx
- * 12: link_10fdx
- * 11: link_10hdx
- * 10: link_100T2fdx
- * 9: link_100T2hdx
- * 8: extend_status
- * 7: res1
- * 6: preamble_supress
- * 5: auto_neg_complete
- * 4: remote_fault
- * 3: auto_neg_able
- * 2: link_status
- * 1: jabber_detect
- * 0: ext_cap
- */
-
-#define MI_BMSR_LINK_STATUS 0x04
-#define MI_BMSR_AUTO_NEG_COMPLETE 0x20
-
-/*
- * MI Register 4: Auto-negotiation advertisement register
- *
- * 15: np_indication
- * 14: res2
- * 13: remote_fault
- * 12: res1
- * 11: cap_asmpause
- * 10: cap_pause
- * 9: cap_100T4
- * 8: cap_100fdx
- * 7: cap_100hdx
- * 6: cap_10fdx
- * 5: cap_10hdx
- * 4-0: selector
- */
-
-/* MI Register 5: Auto-negotiation link partner advertisement register
- * 15: np_indication
- * 14: acknowledge
- * 13: remote_fault
- * 12: res1
- * 11: cap_asmpause
- * 10: cap_pause
- * 9: cap_100T4
- * 8: cap_100fdx
- * 7: cap_100hdx
- * 6: cap_10fdx
- * 5: cap_10hdx
- * 4-0: selector
- */
-
-/* MI Register 6: Auto-negotiation expansion register
- * 15-5: reserved
- * 4: pdf
- * 3: lp_np_able
- * 2: np_able
- * 1: page_rx
- * 0: lp_an_able
- */
-
-/* MI Register 7: Auto-negotiation next page transmit reg(0x07)
- * 15: np
- * 14: reserved
- * 13: msg_page
- * 12: ack2
- * 11: toggle
- * 10-0 msg
- */
-
-/* MI Register 8: Link Partner Next Page Reg(0x08)
- * 15: np
- * 14: ack
- * 13: msg_page
- * 12: ack2
- * 11: toggle
- * 10-0: msg
- */
-
-/* MI Register 9: 1000BaseT Control Reg(0x09)
- * 15-13: test_mode
- * 12: ms_config_en
- * 11: ms_value
- * 10: port_type
- * 9: link_1000fdx
- * 8: link_1000hdx
- * 7-0: reserved
- */
-
-/* MI Register 10: 1000BaseT Status Reg(0x0A)
- * 15: ms_config_fault
- * 14: ms_resolve
- * 13: local_rx_status
- * 12: remote_rx_status
- * 11: link_1000fdx
- * 10: link_1000hdx
- * 9-8: reserved
- * 7-0: idle_err_cnt
- */
-
-/* MI Register 11 - 14: Reserved Regs(0x0B - 0x0E) */
-
-/* MI Register 15: Extended status Reg(0x0F)
- * 15: link_1000Xfdx
- * 14: link_1000Xhdx
- * 13: link_1000fdx
- * 12: link_1000hdx
- * 11-0: reserved
- */
-
-/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */
-
-/* MI Register 19: Loopback Control Reg(0x13)
- * 15: mii_en
- * 14: pcs_en
- * 13: pmd_en
- * 12: all_digital_en
- * 11: replica_en
- * 10: line_driver_en
- * 9-0: reserved
- */
-
-/* MI Register 20: Reserved Reg(0x14) */
-
-/* MI Register 21: Management Interface Control Reg(0x15)
- * 15-11: reserved
- * 10-4: mi_error_count
- * 3: reserved
- * 2: ignore_10g_fr
- * 1: reserved
- * 0: preamble_supress_en
- */
-
-/* MI Register 22: PHY Configuration Reg(0x16)
- * 15: crs_tx_en
- * 14: reserved
- * 13-12: tx_fifo_depth
- * 11-10: speed_downshift
- * 9: pbi_detect
- * 8: tbi_rate
- * 7: alternate_np
- * 6: group_mdio_en
- * 5: tx_clock_en
- * 4: sys_clock_en
- * 3: reserved
- * 2-0: mac_if_mode
- */
-
-/* MI Register 23: PHY CONTROL Reg(0x17)
- * 15: reserved
- * 14: tdr_en
- * 13: reserved
- * 12-11: downshift_attempts
- * 10-6: reserved
- * 5: jabber_10baseT
- * 4: sqe_10baseT
- * 3: tp_loopback_10baseT
- * 2: preamble_gen_en
- * 1: reserved
- * 0: force_int
- */
-
-/* MI Register 24: Interrupt Mask Reg(0x18)
- * 15-10: reserved
- * 9: mdio_sync_lost
- * 8: autoneg_status
- * 7: hi_bit_err
- * 6: np_rx
- * 5: err_counter_full
- * 4: fifo_over_underflow
- * 3: rx_status
- * 2: link_status
- * 1: automatic_speed
- * 0: int_en
- */
-
-
-/* MI Register 25: Interrupt Status Reg(0x19)
- * 15-10: reserved
- * 9: mdio_sync_lost
- * 8: autoneg_status
- * 7: hi_bit_err
- * 6: np_rx
- * 5: err_counter_full
- * 4: fifo_over_underflow
- * 3: rx_status
- * 2: link_status
- * 1: automatic_speed
- * 0: int_en
- */
-
-/* MI Register 26: PHY Status Reg(0x1A)
- * 15: reserved
- * 14-13: autoneg_fault
- * 12: autoneg_status
- * 11: mdi_x_status
- * 10: polarity_status
- * 9-8: speed_status
- * 7: duplex_status
- * 6: link_status
- * 5: tx_status
- * 4: rx_status
- * 3: collision_status
- * 2: autoneg_en
- * 1: pause_en
- * 0: asymmetric_dir
- */
-
-/* MI Register 27: LED Control Reg 1(0x1B)
- * 15-14: reserved
- * 13-12: led_dup_indicate
- * 11-10: led_10baseT
- * 9-8: led_collision
- * 7-4: reserved
- * 3-2: pulse_dur
- * 1: pulse_stretch1
- * 0: pulse_stretch0
- */
-
-/* MI Register 28: LED Control Reg 2(0x1C)
- * 15-12: led_link
- * 11-8: led_tx_rx
- * 7-4: led_100BaseTX
- * 3-0: led_1000BaseT
- */
-
-/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */
-
-
-/* Prototypes for ET1310_phy.c */
-/* Defines for PHY access routines */
-
-/* Define bit operation flags */
-#define TRUEPHY_BIT_CLEAR 0
-#define TRUEPHY_BIT_SET 1
-#define TRUEPHY_BIT_READ 2
-
-/* Define read/write operation flags */
-#ifndef TRUEPHY_READ
-#define TRUEPHY_READ 0
-#define TRUEPHY_WRITE 1
-#define TRUEPHY_MASK 2
-#endif
-
-/* Define speeds */
-#define TRUEPHY_SPEED_10MBPS 0
-#define TRUEPHY_SPEED_100MBPS 1
-#define TRUEPHY_SPEED_1000MBPS 2
-
-/* Define duplex modes */
-#define TRUEPHY_DUPLEX_HALF 0
-#define TRUEPHY_DUPLEX_FULL 1
-
-/* Define master/slave configuration values */
-#define TRUEPHY_CFG_SLAVE 0
-#define TRUEPHY_CFG_MASTER 1
-
-/* Define MDI/MDI-X settings */
-#define TRUEPHY_MDI 0
-#define TRUEPHY_MDIX 1
-#define TRUEPHY_AUTO_MDI_MDIX 2
-
-/* Define 10Base-T link polarities */
-#define TRUEPHY_POLARITY_NORMAL 0
-#define TRUEPHY_POLARITY_INVERTED 1
-
-/* Define auto-negotiation results */
-#define TRUEPHY_ANEG_NOT_COMPLETE 0
-#define TRUEPHY_ANEG_COMPLETE 1
-#define TRUEPHY_ANEG_DISABLED 2
-
-/* Define duplex advertisement flags */
-#define TRUEPHY_ADV_DUPLEX_NONE 0x00
-#define TRUEPHY_ADV_DUPLEX_FULL 0x01
-#define TRUEPHY_ADV_DUPLEX_HALF 0x02
-#define TRUEPHY_ADV_DUPLEX_BOTH \
- (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
-
-#define PHY_CONTROL 0x00 /* #define TRU_MI_CONTROL_REGISTER 0 */
-#define PHY_STATUS 0x01 /* #define TRU_MI_STATUS_REGISTER 1 */
-#define PHY_ID_1 0x02 /* #define TRU_MI_PHY_IDENTIFIER_1_REGISTER 2 */
-#define PHY_ID_2 0x03 /* #define TRU_MI_PHY_IDENTIFIER_2_REGISTER 3 */
-#define PHY_AUTO_ADVERTISEMENT 0x04 /* #define TRU_MI_ADVERTISEMENT_REGISTER 4 */
-#define PHY_AUTO_LINK_PARTNER 0x05 /* #define TRU_MI_LINK_PARTNER_ABILITY_REGISTER 5 */
-#define PHY_AUTO_EXPANSION 0x06 /* #define TRU_MI_EXPANSION_REGISTER 6 */
-#define PHY_AUTO_NEXT_PAGE_TX 0x07 /* #define TRU_MI_NEXT_PAGE_TRANSMIT_REGISTER 7 */
-#define PHY_LINK_PARTNER_NEXT_PAGE 0x08 /* #define TRU_MI_LINK_PARTNER_NEXT_PAGE_REGISTER 8 */
-#define PHY_1000_CONTROL 0x09 /* #define TRU_MI_1000BASET_CONTROL_REGISTER 9 */
-#define PHY_1000_STATUS 0x0A /* #define TRU_MI_1000BASET_STATUS_REGISTER 10 */
-
-#define PHY_EXTENDED_STATUS 0x0F /* #define TRU_MI_EXTENDED_STATUS_REGISTER 15 */
-
-/* some defines for modem registers that seem to be 'reserved' */
-#define PHY_INDEX_REG 0x10
-#define PHY_DATA_REG 0x11
-
-#define PHY_MPHY_CONTROL_REG 0x12 /* #define TRU_VMI_MPHY_CONTROL_REGISTER 18 */
-
-#define PHY_LOOPBACK_CONTROL 0x13 /* #define TRU_VMI_LOOPBACK_CONTROL_1_REGISTER 19 */
- /* #define TRU_VMI_LOOPBACK_CONTROL_2_REGISTER 20 */
-#define PHY_REGISTER_MGMT_CONTROL 0x15 /* #define TRU_VMI_MI_SEQ_CONTROL_REGISTER 21 */
-#define PHY_CONFIG 0x16 /* #define TRU_VMI_CONFIGURATION_REGISTER 22 */
-#define PHY_PHY_CONTROL 0x17 /* #define TRU_VMI_PHY_CONTROL_REGISTER 23 */
-#define PHY_INTERRUPT_MASK 0x18 /* #define TRU_VMI_INTERRUPT_MASK_REGISTER 24 */
-#define PHY_INTERRUPT_STATUS 0x19 /* #define TRU_VMI_INTERRUPT_STATUS_REGISTER 25 */
-#define PHY_PHY_STATUS 0x1A /* #define TRU_VMI_PHY_STATUS_REGISTER 26 */
-#define PHY_LED_1 0x1B /* #define TRU_VMI_LED_CONTROL_1_REGISTER 27 */
-#define PHY_LED_2 0x1C /* #define TRU_VMI_LED_CONTROL_2_REGISTER 28 */
- /* #define TRU_VMI_LINK_CONTROL_REGISTER 29 */
- /* #define TRU_VMI_TIMING_CONTROL_REGISTER */
-
-#endif /* _ET1310_PHY_H_ */
diff --git a/drivers/staging/et131x/et1310_pm.c b/drivers/staging/et131x/et1310_pm.c
deleted file mode 100644
index 29d4d66d345..00000000000
--- a/drivers/staging/et131x/et1310_pm.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_pm.c - All power management related code (not completely implemented)
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include "et1310_phy.h"
-#include "et1310_rx.h"
-#include "et131x_adapter.h"
-#include "et131x.h"
-
-/**
- * EnablePhyComa - called when network cable is unplugged
- * @etdev: pointer to our adapter structure
- *
- * driver receive an phy status change interrupt while in D0 and check that
- * phy_status is down.
- *
- * -- gate off JAGCore;
- * -- set gigE PHY in Coma mode
- * -- wake on phy_interrupt; Perform software reset JAGCore,
- * re-initialize jagcore and gigE PHY
- *
- * Add D0-ASPM-PhyLinkDown Support:
- * -- while in D0, when there is a phy_interrupt indicating phy link
- * down status, call the MPSetPhyComa routine to enter this active
- * state power saving mode
- * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
- * indicating linkup status, call the MPDisablePhyComa routine to
- * restore JAGCore and gigE PHY
- */
-void EnablePhyComa(struct et131x_adapter *etdev)
-{
- unsigned long flags;
- u32 pmcsr;
-
- pmcsr = readl(&etdev->regs->global.pm_csr);
-
- /* Save the GbE PHY speed and duplex modes. Need to restore this
- * when cable is plugged back in
- */
- etdev->pdown_speed = etdev->AiForceSpeed;
- etdev->pdown_duplex = etdev->AiForceDpx;
-
- /* Stop sending packets. */
- spin_lock_irqsave(&etdev->send_hw_lock, flags);
- etdev->flags |= fMP_ADAPTER_LOWER_POWER;
- spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
-
- /* Wait for outstanding Receive packets */
-
- /* Gate off JAGCore 3 clock domains */
- pmcsr &= ~ET_PMCSR_INIT;
- writel(pmcsr, &etdev->regs->global.pm_csr);
-
- /* Program gigE PHY in to Coma mode */
- pmcsr |= ET_PM_PHY_SW_COMA;
- writel(pmcsr, &etdev->regs->global.pm_csr);
-}
-
-/**
- * DisablePhyComa - Disable the Phy Coma Mode
- * @etdev: pointer to our adapter structure
- */
-void DisablePhyComa(struct et131x_adapter *etdev)
-{
- u32 pmcsr;
-
- pmcsr = readl(&etdev->regs->global.pm_csr);
-
- /* Disable phy_sw_coma register and re-enable JAGCore clocks */
- pmcsr |= ET_PMCSR_INIT;
- pmcsr &= ~ET_PM_PHY_SW_COMA;
- writel(pmcsr, &etdev->regs->global.pm_csr);
-
- /* Restore the GbE PHY speed and duplex modes;
- * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
- */
- etdev->AiForceSpeed = etdev->pdown_speed;
- etdev->AiForceDpx = etdev->pdown_duplex;
-
- /* Re-initialize the send structures */
- et131x_init_send(etdev);
-
- /* Reset the RFD list and re-start RU */
- et131x_reset_recv(etdev);
-
- /* Bring the device back to the state it was during init prior to
- * autonegotiation being complete. This way, when we get the auto-neg
- * complete interrupt, we can complete init by calling ConfigMacREGS2.
- */
- et131x_soft_reset(etdev);
-
- /* setup et1310 as per the documentation ?? */
- et131x_adapter_setup(etdev);
-
- /* Allow Tx to restart */
- etdev->flags &= ~fMP_ADAPTER_LOWER_POWER;
-
- /* Need to re-enable Rx. */
- et131x_rx_dma_enable(etdev);
-}
-
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
deleted file mode 100644
index 7e386e07ff9..00000000000
--- a/drivers/staging/et131x/et1310_rx.c
+++ /dev/null
@@ -1,1152 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_rx.c - Routines used to perform data reception
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include "et1310_phy.h"
-#include "et131x_adapter.h"
-#include "et1310_rx.h"
-#include "et131x.h"
-
-static inline u32 bump_fbr(u32 *fbr, u32 limit)
-{
- u32 v = *fbr;
- v++;
- /* This works for all cases where limit < 1024. The 1023 case
- works because 1023++ is 1024 which means the if condition is not
- taken but the carry of the bit into the wrap bit toggles the wrap
- value correctly */
- if ((v & ET_DMA10_MASK) > limit) {
- v &= ~ET_DMA10_MASK;
- v ^= ET_DMA10_WRAP;
- }
- /* For the 1023 case */
- v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
- *fbr = v;
- return v;
-}
-
-/**
- * et131x_rx_dma_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h)
- *
- * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
- * and the Packet Status Ring.
- */
-int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
-{
- u32 i, j;
- u32 bufsize;
- u32 pktStatRingSize, FBRChunkSize;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
-
- /* Alloc memory for the lookup table */
-#ifdef USE_FBR0
- rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
-#endif
- rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
-
- /* The first thing we will do is configure the sizes of the buffer
- * rings. These will change based on jumbo packet support. Larger
- * jumbo packets increases the size of each entry in FBR0, and the
- * number of entries in FBR0, while at the same time decreasing the
- * number of entries in FBR1.
- *
- * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
- * entries are huge in order to accommodate a "jumbo" frame, then it
- * will have less entries. Conversely, FBR1 will now be relied upon
- * to carry more "normal" frames, thus it's entry size also increases
- * and the number of entries goes up too (since it now carries
- * "small" + "regular" packets.
- *
- * In this scheme, we try to maintain 512 entries between the two
- * rings. Also, FBR1 remains a constant size - when it's size doubles
- * the number of entries halves. FBR0 increases in size, however.
- */
-
- if (adapter->RegistryJumboPacket < 2048) {
-#ifdef USE_FBR0
- rx_ring->Fbr0BufferSize = 256;
- rx_ring->Fbr0NumEntries = 512;
-#endif
- rx_ring->Fbr1BufferSize = 2048;
- rx_ring->Fbr1NumEntries = 512;
- } else if (adapter->RegistryJumboPacket < 4096) {
-#ifdef USE_FBR0
- rx_ring->Fbr0BufferSize = 512;
- rx_ring->Fbr0NumEntries = 1024;
-#endif
- rx_ring->Fbr1BufferSize = 4096;
- rx_ring->Fbr1NumEntries = 512;
- } else {
-#ifdef USE_FBR0
- rx_ring->Fbr0BufferSize = 1024;
- rx_ring->Fbr0NumEntries = 768;
-#endif
- rx_ring->Fbr1BufferSize = 16384;
- rx_ring->Fbr1NumEntries = 128;
- }
-
-#ifdef USE_FBR0
- adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries +
- adapter->rx_ring.Fbr1NumEntries;
-#else
- adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries;
-#endif
-
- /* Allocate an area of memory for Free Buffer Ring 1 */
- bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
- rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
- bufsize,
- &rx_ring->pFbr1RingPa);
- if (!rx_ring->pFbr1RingVa) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Free Buffer Ring 1\n");
- return -ENOMEM;
- }
-
- /* Save physical address
- *
- * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here
- * before storing the adjusted address.
- */
- rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
-
- /* Align Free Buffer Ring 1 on a 4K boundary */
- et131x_align_allocated_memory(adapter,
- &rx_ring->Fbr1Realpa,
- &rx_ring->Fbr1offset, 0x0FFF);
-
- rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa +
- rx_ring->Fbr1offset);
-
-#ifdef USE_FBR0
- /* Allocate an area of memory for Free Buffer Ring 0 */
- bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
- rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
- bufsize,
- &rx_ring->pFbr0RingPa);
- if (!rx_ring->pFbr0RingVa) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Free Buffer Ring 0\n");
- return -ENOMEM;
- }
-
- /* Save physical address
- *
- * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here before
- * storing the adjusted address.
- */
- rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
-
- /* Align Free Buffer Ring 0 on a 4K boundary */
- et131x_align_allocated_memory(adapter,
- &rx_ring->Fbr0Realpa,
- &rx_ring->Fbr0offset, 0x0FFF);
-
- rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa +
- rx_ring->Fbr0offset);
-#endif
-
- for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
- i++) {
- u64 Fbr1Offset;
- u64 Fbr1TempPa;
- u32 Fbr1Align;
-
- /* This code allocates an area of memory big enough for N
- * free buffers + (buffer_size - 1) so that the buffers can
- * be aligned on 4k boundaries. If each buffer were aligned
- * to a buffer_size boundary, the effect would be to double
- * the size of FBR0. By allocating N buffers at once, we
- * reduce this overhead.
- */
- if (rx_ring->Fbr1BufferSize > 4096)
- Fbr1Align = 4096;
- else
- Fbr1Align = rx_ring->Fbr1BufferSize;
-
- FBRChunkSize =
- (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
- rx_ring->Fbr1MemVa[i] =
- pci_alloc_consistent(adapter->pdev, FBRChunkSize,
- &rx_ring->Fbr1MemPa[i]);
-
- if (!rx_ring->Fbr1MemVa[i]) {
- dev_err(&adapter->pdev->dev,
- "Could not alloc memory\n");
- return -ENOMEM;
- }
-
- /* See NOTE in "Save Physical Address" comment above */
- Fbr1TempPa = rx_ring->Fbr1MemPa[i];
-
- et131x_align_allocated_memory(adapter,
- &Fbr1TempPa,
- &Fbr1Offset, (Fbr1Align - 1));
-
- for (j = 0; j < FBR_CHUNKS; j++) {
- u32 index = (i * FBR_CHUNKS) + j;
-
- /* Save the Virtual address of this index for quick
- * access later
- */
- rx_ring->fbr[1]->virt[index] =
- (u8 *) rx_ring->Fbr1MemVa[i] +
- (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
-
- /* now store the physical address in the descriptor
- * so the device can access it
- */
- rx_ring->fbr[1]->bus_high[index] =
- (u32) (Fbr1TempPa >> 32);
- rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
-
- Fbr1TempPa += rx_ring->Fbr1BufferSize;
-
- rx_ring->fbr[1]->buffer1[index] =
- rx_ring->fbr[1]->virt[index];
- rx_ring->fbr[1]->buffer2[index] =
- rx_ring->fbr[1]->virt[index] - 4;
- }
- }
-
-#ifdef USE_FBR0
- /* Same for FBR0 (if in use) */
- for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
- i++) {
- u64 Fbr0Offset;
- u64 Fbr0TempPa;
-
- FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
- rx_ring->Fbr0MemVa[i] =
- pci_alloc_consistent(adapter->pdev, FBRChunkSize,
- &rx_ring->Fbr0MemPa[i]);
-
- if (!rx_ring->Fbr0MemVa[i]) {
- dev_err(&adapter->pdev->dev,
- "Could not alloc memory\n");
- return -ENOMEM;
- }
-
- /* See NOTE in "Save Physical Address" comment above */
- Fbr0TempPa = rx_ring->Fbr0MemPa[i];
-
- et131x_align_allocated_memory(adapter,
- &Fbr0TempPa,
- &Fbr0Offset,
- rx_ring->Fbr0BufferSize - 1);
-
- for (j = 0; j < FBR_CHUNKS; j++) {
- u32 index = (i * FBR_CHUNKS) + j;
-
- rx_ring->fbr[0]->virt[index] =
- (u8 *) rx_ring->Fbr0MemVa[i] +
- (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
-
- rx_ring->fbr[0]->bus_high[index] =
- (u32) (Fbr0TempPa >> 32);
- rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
-
- Fbr0TempPa += rx_ring->Fbr0BufferSize;
-
- rx_ring->fbr[0]->buffer1[index] =
- rx_ring->fbr[0]->virt[index];
- rx_ring->fbr[0]->buffer2[index] =
- rx_ring->fbr[0]->virt[index] - 4;
- }
- }
-#endif
-
- /* Allocate an area of memory for FIFO of Packet Status ring entries */
- pktStatRingSize =
- sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
-
- rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
- pktStatRingSize,
- &rx_ring->pPSRingPa);
-
- if (!rx_ring->pPSRingVa) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Packet Status Ring\n");
- return -ENOMEM;
- }
- printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
-
- /*
- * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here before
- * storing the adjusted address.
- */
-
- /* Allocate an area of memory for writeback of status information */
- rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
- sizeof(struct rx_status_block),
- &rx_ring->rx_status_bus);
- if (!rx_ring->rx_status_block) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Status Block\n");
- return -ENOMEM;
- }
- rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
- printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
-
- /* Recv
- * pci_pool_create initializes a lookaside list. After successful
- * creation, nonpaged fixed-size blocks can be allocated from and
- * freed to the lookaside list.
- * RFDs will be allocated from this pool.
- */
- rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
- sizeof(struct rfd),
- 0,
- SLAB_CACHE_DMA |
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
-
- /* The RFDs are going to be put on lists later on, so initialize the
- * lists now.
- */
- INIT_LIST_HEAD(&rx_ring->RecvList);
- return 0;
-}
-
-/**
- * et131x_rx_dma_memory_free - Free all memory allocated within this module.
- * @adapter: pointer to our private adapter structure
- */
-void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
-{
- u32 index;
- u32 bufsize;
- u32 pktStatRingSize;
- struct rfd *rfd;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
-
- /* Free RFDs and associated packet descriptors */
- WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
-
- while (!list_empty(&rx_ring->RecvList)) {
- rfd = (struct rfd *) list_entry(rx_ring->RecvList.next,
- struct rfd, list_node);
-
- list_del(&rfd->list_node);
- rfd->skb = NULL;
- kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd);
- }
-
- /* Free Free Buffer Ring 1 */
- if (rx_ring->pFbr1RingVa) {
- /* First the packet memory */
- for (index = 0; index <
- (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
- if (rx_ring->Fbr1MemVa[index]) {
- u32 Fbr1Align;
-
- if (rx_ring->Fbr1BufferSize > 4096)
- Fbr1Align = 4096;
- else
- Fbr1Align = rx_ring->Fbr1BufferSize;
-
- bufsize =
- (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
- Fbr1Align - 1;
-
- pci_free_consistent(adapter->pdev,
- bufsize,
- rx_ring->Fbr1MemVa[index],
- rx_ring->Fbr1MemPa[index]);
-
- rx_ring->Fbr1MemVa[index] = NULL;
- }
- }
-
- /* Now the FIFO itself */
- rx_ring->pFbr1RingVa = (void *)((u8 *)
- rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
-
- bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
- + 0xfff;
-
- pci_free_consistent(adapter->pdev, bufsize,
- rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
-
- rx_ring->pFbr1RingVa = NULL;
- }
-
-#ifdef USE_FBR0
- /* Now the same for Free Buffer Ring 0 */
- if (rx_ring->pFbr0RingVa) {
- /* First the packet memory */
- for (index = 0; index <
- (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
- if (rx_ring->Fbr0MemVa[index]) {
- bufsize =
- (rx_ring->Fbr0BufferSize *
- (FBR_CHUNKS + 1)) - 1;
-
- pci_free_consistent(adapter->pdev,
- bufsize,
- rx_ring->Fbr0MemVa[index],
- rx_ring->Fbr0MemPa[index]);
-
- rx_ring->Fbr0MemVa[index] = NULL;
- }
- }
-
- /* Now the FIFO itself */
- rx_ring->pFbr0RingVa = (void *)((u8 *)
- rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
-
- bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
- + 0xfff;
-
- pci_free_consistent(adapter->pdev,
- bufsize,
- rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
-
- rx_ring->pFbr0RingVa = NULL;
- }
-#endif
-
- /* Free Packet Status Ring */
- if (rx_ring->pPSRingVa) {
- pktStatRingSize =
- sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
-
- pci_free_consistent(adapter->pdev, pktStatRingSize,
- rx_ring->pPSRingVa, rx_ring->pPSRingPa);
-
- rx_ring->pPSRingVa = NULL;
- }
-
- /* Free area of memory for the writeback of status information */
- if (rx_ring->rx_status_block) {
- pci_free_consistent(adapter->pdev,
- sizeof(struct rx_status_block),
- rx_ring->rx_status_block, rx_ring->rx_status_bus);
- rx_ring->rx_status_block = NULL;
- }
-
- /* Free receive buffer pool */
-
- /* Free receive packet pool */
-
- /* Destroy the lookaside (RFD) pool */
- if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
- kmem_cache_destroy(rx_ring->RecvLookaside);
- adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
- }
-
- /* Free the FBR Lookup Table */
-#ifdef USE_FBR0
- kfree(rx_ring->fbr[0]);
-#endif
-
- kfree(rx_ring->fbr[1]);
-
- /* Reset Counters */
- rx_ring->nReadyRecv = 0;
-}
-
-/**
- * et131x_init_recv - Initialize receive data structures.
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h)
- */
-int et131x_init_recv(struct et131x_adapter *adapter)
-{
- int status = -ENOMEM;
- struct rfd *rfd = NULL;
- u32 rfdct;
- u32 numrfd = 0;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
-
- /* Setup each RFD */
- for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
- rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
- GFP_ATOMIC | GFP_DMA);
-
- if (!rfd) {
- dev_err(&adapter->pdev->dev,
- "Couldn't alloc RFD out of kmem_cache\n");
- status = -ENOMEM;
- continue;
- }
-
- rfd->skb = NULL;
-
- /* Add this RFD to the RecvList */
- list_add_tail(&rfd->list_node, &rx_ring->RecvList);
-
- /* Increment both the available RFD's, and the total RFD's. */
- rx_ring->nReadyRecv++;
- numrfd++;
- }
-
- if (numrfd > NIC_MIN_NUM_RFD)
- status = 0;
-
- rx_ring->NumRfd = numrfd;
-
- if (status != 0) {
- kmem_cache_free(rx_ring->RecvLookaside, rfd);
- dev_err(&adapter->pdev->dev,
- "Allocation problems in et131x_init_recv\n");
- }
- return status;
-}
-
-/**
- * ConfigRxDmaRegs - Start of Rx_DMA init sequence
- * @etdev: pointer to our adapter structure
- */
-void ConfigRxDmaRegs(struct et131x_adapter *etdev)
-{
- struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
- struct rx_ring *rx_local = &etdev->rx_ring;
- struct fbr_desc *fbr_entry;
- u32 entry;
- u32 psr_num_des;
- unsigned long flags;
-
- /* Halt RXDMA to perform the reconfigure. */
- et131x_rx_dma_disable(etdev);
-
- /* Load the completion writeback physical address
- *
- * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here
- * before storing the adjusted address.
- */
- writel((u32) ((u64)rx_local->rx_status_bus >> 32),
- &rx_dma->dma_wb_base_hi);
- writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
-
- memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
-
- /* Set the address and parameters of the packet status ring into the
- * 1310's registers
- */
- writel((u32) ((u64)rx_local->pPSRingPa >> 32),
- &rx_dma->psr_base_hi);
- writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
- writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
- writel(0, &rx_dma->psr_full_offset);
-
- psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
- writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
- &rx_dma->psr_min_des);
-
- spin_lock_irqsave(&etdev->rcv_lock, flags);
-
- /* These local variables track the PSR in the adapter structure */
- rx_local->local_psr_full = 0;
-
- /* Now's the best time to initialize FBR1 contents */
- fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
- for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
- fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
- fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
- fbr_entry->word2 = entry;
- fbr_entry++;
- }
-
- /* Set the address and parameters of Free buffer ring 1 (and 0 if
- * required) into the 1310's registers
- */
- writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
- writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
- writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
- writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
-
- /* This variable tracks the free buffer ring 1 full position, so it
- * has to match the above.
- */
- rx_local->local_Fbr1_full = ET_DMA10_WRAP;
- writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
- &rx_dma->fbr1_min_des);
-
-#ifdef USE_FBR0
- /* Now's the best time to initialize FBR0 contents */
- fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
- for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
- fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
- fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
- fbr_entry->word2 = entry;
- fbr_entry++;
- }
-
- writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
- writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
- writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
- writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
-
- /* This variable tracks the free buffer ring 0 full position, so it
- * has to match the above.
- */
- rx_local->local_Fbr0_full = ET_DMA10_WRAP;
- writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
- &rx_dma->fbr0_min_des);
-#endif
-
- /* Program the number of packets we will receive before generating an
- * interrupt.
- * For version B silicon, this value gets updated once autoneg is
- *complete.
- */
- writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
-
- /* The "time_done" is not working correctly to coalesce interrupts
- * after a given time period, but rather is giving us an interrupt
- * regardless of whether we have received packets.
- * This value gets updated once autoneg is complete.
- */
- writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
-
- spin_unlock_irqrestore(&etdev->rcv_lock, flags);
-}
-
-/**
- * SetRxDmaTimer - Set the heartbeat timer according to line rate.
- * @etdev: pointer to our adapter structure
- */
-void SetRxDmaTimer(struct et131x_adapter *etdev)
-{
- /* For version B silicon, we do not use the RxDMA timer for 10 and 100
- * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
- */
- if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
- (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
- writel(0, &etdev->regs->rxdma.max_pkt_time);
- writel(1, &etdev->regs->rxdma.num_pkt_done);
- }
-}
-
-/**
- * NICReturnRFD - Recycle a RFD and put it back onto the receive list
- * @etdev: pointer to our adapter
- * @rfd: pointer to the RFD
- */
-void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
-{
- struct rx_ring *rx_local = &etdev->rx_ring;
- struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
- u16 bi = rfd->bufferindex;
- u8 ri = rfd->ringindex;
- unsigned long flags;
-
- /* We don't use any of the OOB data besides status. Otherwise, we
- * need to clean up OOB data
- */
- if (
-#ifdef USE_FBR0
- (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
-#endif
- (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
- spin_lock_irqsave(&etdev->FbrLock, flags);
-
- if (ri == 1) {
- struct fbr_desc *next =
- (struct fbr_desc *) (rx_local->pFbr1RingVa) +
- INDEX10(rx_local->local_Fbr1_full);
-
- /* Handle the Free Buffer Ring advancement here. Write
- * the PA / Buffer Index for the returned buffer into
- * the oldest (next to be freed)FBR entry
- */
- next->addr_hi = rx_local->fbr[1]->bus_high[bi];
- next->addr_lo = rx_local->fbr[1]->bus_low[bi];
- next->word2 = bi;
-
- writel(bump_fbr(&rx_local->local_Fbr1_full,
- rx_local->Fbr1NumEntries - 1),
- &rx_dma->fbr1_full_offset);
- }
-#ifdef USE_FBR0
- else {
- struct fbr_desc *next = (struct fbr_desc *)
- rx_local->pFbr0RingVa +
- INDEX10(rx_local->local_Fbr0_full);
-
- /* Handle the Free Buffer Ring advancement here. Write
- * the PA / Buffer Index for the returned buffer into
- * the oldest (next to be freed) FBR entry
- */
- next->addr_hi = rx_local->fbr[0]->bus_high[bi];
- next->addr_lo = rx_local->fbr[0]->bus_low[bi];
- next->word2 = bi;
-
- writel(bump_fbr(&rx_local->local_Fbr0_full,
- rx_local->Fbr0NumEntries - 1),
- &rx_dma->fbr0_full_offset);
- }
-#endif
- spin_unlock_irqrestore(&etdev->FbrLock, flags);
- } else {
- dev_err(&etdev->pdev->dev,
- "NICReturnRFD illegal Buffer Index returned\n");
- }
-
- /* The processing on this RFD is done, so put it back on the tail of
- * our list
- */
- spin_lock_irqsave(&etdev->rcv_lock, flags);
- list_add_tail(&rfd->list_node, &rx_local->RecvList);
- rx_local->nReadyRecv++;
- spin_unlock_irqrestore(&etdev->rcv_lock, flags);
-
- WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
-}
-
-/**
- * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
- * @etdev: pointer to our adapter structure
- */
-void et131x_rx_dma_disable(struct et131x_adapter *etdev)
-{
- u32 csr;
- /* Setup the receive dma configuration register */
- writel(0x00002001, &etdev->regs->rxdma.csr);
- csr = readl(&etdev->regs->rxdma.csr);
- if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
- udelay(5);
- csr = readl(&etdev->regs->rxdma.csr);
- if ((csr & 0x00020000) == 0)
- dev_err(&etdev->pdev->dev,
- "RX Dma failed to enter halt state. CSR 0x%08x\n",
- csr);
- }
-}
-
-/**
- * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
- * @etdev: pointer to our adapter structure
- */
-void et131x_rx_dma_enable(struct et131x_adapter *etdev)
-{
- /* Setup the receive dma configuration register for normal operation */
- u32 csr = 0x2000; /* FBR1 enable */
-
- if (etdev->rx_ring.Fbr1BufferSize == 4096)
- csr |= 0x0800;
- else if (etdev->rx_ring.Fbr1BufferSize == 8192)
- csr |= 0x1000;
- else if (etdev->rx_ring.Fbr1BufferSize == 16384)
- csr |= 0x1800;
-#ifdef USE_FBR0
- csr |= 0x0400; /* FBR0 enable */
- if (etdev->rx_ring.Fbr0BufferSize == 256)
- csr |= 0x0100;
- else if (etdev->rx_ring.Fbr0BufferSize == 512)
- csr |= 0x0200;
- else if (etdev->rx_ring.Fbr0BufferSize == 1024)
- csr |= 0x0300;
-#endif
- writel(csr, &etdev->regs->rxdma.csr);
-
- csr = readl(&etdev->regs->rxdma.csr);
- if ((csr & 0x00020000) != 0) {
- udelay(5);
- csr = readl(&etdev->regs->rxdma.csr);
- if ((csr & 0x00020000) != 0) {
- dev_err(&etdev->pdev->dev,
- "RX Dma failed to exit halt state. CSR 0x%08x\n",
- csr);
- }
- }
-}
-
-/**
- * nic_rx_pkts - Checks the hardware for available packets
- * @etdev: pointer to our adapter
- *
- * Returns rfd, a pointer to our MPRFD.
- *
- * Checks the hardware for available packets, using completion ring
- * If packets are available, it gets an RFD from the RecvList, attaches
- * the packet to it, puts the RFD in the RecvPendList, and also returns
- * the pointer to the RFD.
- */
-struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
-{
- struct rx_ring *rx_local = &etdev->rx_ring;
- struct rx_status_block *status;
- struct pkt_stat_desc *psr;
- struct rfd *rfd;
- u32 i;
- u8 *buf;
- unsigned long flags;
- struct list_head *element;
- u8 rindex;
- u16 bindex;
- u32 len;
- u32 word0;
- u32 word1;
-
- /* RX Status block is written by the DMA engine prior to every
- * interrupt. It contains the next to be used entry in the Packet
- * Status Ring, and also the two Free Buffer rings.
- */
- status = rx_local->rx_status_block;
- word1 = status->Word1 >> 16; /* Get the useful bits */
-
- /* Check the PSR and wrap bits do not match */
- if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
- /* Looks like this ring is not updated yet */
- return NULL;
-
- /* The packet status ring indicates that data is available. */
- psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) +
- (rx_local->local_psr_full & 0xFFF);
-
- /* Grab any information that is required once the PSR is
- * advanced, since we can no longer rely on the memory being
- * accurate
- */
- len = psr->word1 & 0xFFFF;
- rindex = (psr->word1 >> 26) & 0x03;
- bindex = (psr->word1 >> 16) & 0x3FF;
- word0 = psr->word0;
-
- /* Indicate that we have used this PSR entry. */
- /* FIXME wrap 12 */
- add_12bit(&rx_local->local_psr_full, 1);
- if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) {
- /* Clear psr full and toggle the wrap bit */
- rx_local->local_psr_full &= ~0xFFF;
- rx_local->local_psr_full ^= 0x1000;
- }
-
- writel(rx_local->local_psr_full,
- &etdev->regs->rxdma.psr_full_offset);
-
-#ifndef USE_FBR0
- if (rindex != 1)
- return NULL;
-#endif
-
-#ifdef USE_FBR0
- if (rindex > 1 ||
- (rindex == 0 &&
- bindex > rx_local->Fbr0NumEntries - 1) ||
- (rindex == 1 &&
- bindex > rx_local->Fbr1NumEntries - 1))
-#else
- if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
-#endif
- {
- /* Illegal buffer or ring index cannot be used by S/W*/
- dev_err(&etdev->pdev->dev,
- "NICRxPkts PSR Entry %d indicates "
- "length of %d and/or bad bi(%d)\n",
- rx_local->local_psr_full & 0xFFF,
- len, bindex);
- return NULL;
- }
-
- /* Get and fill the RFD. */
- spin_lock_irqsave(&etdev->rcv_lock, flags);
-
- rfd = NULL;
- element = rx_local->RecvList.next;
- rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
-
- if (rfd == NULL) {
- spin_unlock_irqrestore(&etdev->rcv_lock, flags);
- return NULL;
- }
-
- list_del(&rfd->list_node);
- rx_local->nReadyRecv--;
-
- spin_unlock_irqrestore(&etdev->rcv_lock, flags);
-
- rfd->bufferindex = bindex;
- rfd->ringindex = rindex;
-
- /* In V1 silicon, there is a bug which screws up filtering of
- * runt packets. Therefore runt packet filtering is disabled
- * in the MAC and the packets are dropped here. They are
- * also counted here.
- */
- if (len < (NIC_MIN_PACKET_SIZE + 4)) {
- etdev->stats.other_errors++;
- len = 0;
- }
-
- if (len) {
- if (etdev->ReplicaPhyLoopbk == 1) {
- buf = rx_local->fbr[rindex]->virt[bindex];
-
- if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) {
- if (memcmp(&buf[42], "Replica packet",
- ETH_HLEN)) {
- etdev->ReplicaPhyLoopbkPF = 1;
- }
- }
- }
-
- /* Determine if this is a multicast packet coming in */
- if ((word0 & ALCATEL_MULTICAST_PKT) &&
- !(word0 & ALCATEL_BROADCAST_PKT)) {
- /* Promiscuous mode and Multicast mode are
- * not mutually exclusive as was first
- * thought. I guess Promiscuous is just
- * considered a super-set of the other
- * filters. Generally filter is 0x2b when in
- * promiscuous mode.
- */
- if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
- && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
- && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- buf = rx_local->fbr[rindex]->
- virt[bindex];
-
- /* Loop through our list to see if the
- * destination address of this packet
- * matches one in our list.
- */
- for (i = 0;
- i < etdev->MCAddressCount;
- i++) {
- if (buf[0] ==
- etdev->MCList[i][0]
- && buf[1] ==
- etdev->MCList[i][1]
- && buf[2] ==
- etdev->MCList[i][2]
- && buf[3] ==
- etdev->MCList[i][3]
- && buf[4] ==
- etdev->MCList[i][4]
- && buf[5] ==
- etdev->MCList[i][5]) {
- break;
- }
- }
-
- /* If our index is equal to the number
- * of Multicast address we have, then
- * this means we did not find this
- * packet's matching address in our
- * list. Set the len to zero,
- * so we free our RFD when we return
- * from this function.
- */
- if (i == etdev->MCAddressCount)
- len = 0;
- }
-
- if (len > 0)
- etdev->stats.multircv++;
- } else if (word0 & ALCATEL_BROADCAST_PKT)
- etdev->stats.brdcstrcv++;
- else
- /* Not sure what this counter measures in
- * promiscuous mode. Perhaps we should check
- * the MAC address to see if it is directed
- * to us in promiscuous mode.
- */
- etdev->stats.unircv++;
- }
-
- if (len > 0) {
- struct sk_buff *skb = NULL;
-
- /*rfd->len = len - 4; */
- rfd->len = len;
-
- skb = dev_alloc_skb(rfd->len + 2);
- if (!skb) {
- dev_err(&etdev->pdev->dev,
- "Couldn't alloc an SKB for Rx\n");
- return NULL;
- }
-
- etdev->net_stats.rx_bytes += rfd->len;
-
- memcpy(skb_put(skb, rfd->len),
- rx_local->fbr[rindex]->virt[bindex],
- rfd->len);
-
- skb->dev = etdev->netdev;
- skb->protocol = eth_type_trans(skb, etdev->netdev);
- skb->ip_summed = CHECKSUM_NONE;
-
- netif_rx(skb);
- } else {
- rfd->len = 0;
- }
-
- nic_return_rfd(etdev, rfd);
- return rfd;
-}
-
-/**
- * et131x_reset_recv - Reset the receive list
- * @etdev: pointer to our adapter
- *
- * Assumption, Rcv spinlock has been acquired.
- */
-void et131x_reset_recv(struct et131x_adapter *etdev)
-{
- WARN_ON(list_empty(&etdev->rx_ring.RecvList));
-
-}
-
-/**
- * et131x_handle_recv_interrupt - Interrupt handler for receive processing
- * @etdev: pointer to our adapter
- *
- * Assumption, Rcv spinlock has been acquired.
- */
-void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
-{
- struct rfd *rfd = NULL;
- u32 count = 0;
- bool done = true;
-
- /* Process up to available RFD's */
- while (count < NUM_PACKETS_HANDLED) {
- if (list_empty(&etdev->rx_ring.RecvList)) {
- WARN_ON(etdev->rx_ring.nReadyRecv != 0);
- done = false;
- break;
- }
-
- rfd = nic_rx_pkts(etdev);
-
- if (rfd == NULL)
- break;
-
- /* Do not receive any packets until a filter has been set.
- * Do not receive any packets until we have link.
- * If length is zero, return the RFD in order to advance the
- * Free buffer ring.
- */
- if (!etdev->PacketFilter ||
- !netif_carrier_ok(etdev->netdev) ||
- rfd->len == 0)
- continue;
-
- /* Increment the number of packets we received */
- etdev->net_stats.rx_packets++;
-
- /* Set the status on the packet, either resources or success */
- if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
- dev_warn(&etdev->pdev->dev,
- "RFD's are running out\n");
- }
- count++;
- }
-
- if (count == NUM_PACKETS_HANDLED || !done) {
- etdev->rx_ring.UnfinishedReceives = true;
- writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
- &etdev->regs->global.watchdog_timer);
- } else
- /* Watchdog timer will disable itself if appropriate. */
- etdev->rx_ring.UnfinishedReceives = false;
-}
-
diff --git a/drivers/staging/et131x/et1310_rx.h b/drivers/staging/et131x/et1310_rx.h
deleted file mode 100644
index e8c653d37a7..00000000000
--- a/drivers/staging/et131x/et1310_rx.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_rx.h - Defines, structs, enums, prototypes, etc. pertaining to data
- * reception.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef __ET1310_RX_H__
-#define __ET1310_RX_H__
-
-#include "et1310_address_map.h"
-
-#define USE_FBR0 true
-
-#ifdef USE_FBR0
-/* #define FBR0_BUFFER_SIZE 256 */
-#endif
-
-/* #define FBR1_BUFFER_SIZE 2048 */
-
-#define FBR_CHUNKS 32
-
-#define MAX_DESC_PER_RING_RX 1024
-
-/* number of RFDs - default and min */
-#ifdef USE_FBR0
-#define RFD_LOW_WATER_MARK 40
-#define NIC_MIN_NUM_RFD 64
-#define NIC_DEFAULT_NUM_RFD 1024
-#else
-#define RFD_LOW_WATER_MARK 20
-#define NIC_MIN_NUM_RFD 64
-#define NIC_DEFAULT_NUM_RFD 256
-#endif
-
-#define NUM_PACKETS_HANDLED 256
-
-#define ALCATEL_BAD_STATUS 0xe47f0000
-#define ALCATEL_MULTICAST_PKT 0x01000000
-#define ALCATEL_BROADCAST_PKT 0x02000000
-
-/* typedefs for Free Buffer Descriptors */
-struct fbr_desc {
- u32 addr_lo;
- u32 addr_hi;
- u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
-};
-
-/* Packet Status Ring Descriptors
- *
- * Word 0:
- *
- * top 16 bits are from the Alcatel Status Word as enumerated in
- * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
- *
- * 0: hp hash pass
- * 1: ipa IP checksum assist
- * 2: ipp IP checksum pass
- * 3: tcpa TCP checksum assist
- * 4: tcpp TCP checksum pass
- * 5: wol WOL Event
- * 6: rxmac_error RXMAC Error Indicator
- * 7: drop Drop packet
- * 8: ft Frame Truncated
- * 9: jp Jumbo Packet
- * 10: vp VLAN Packet
- * 11-15: unused
- * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
- * 17: asw_RX_DV_event short receive event detected
- * 18: asw_false_carrier_event bad carrier since last good packet
- * 19: asw_code_err one or more nibbles signalled as errors
- * 20: asw_CRC_err CRC error
- * 21: asw_len_chk_err frame length field incorrect
- * 22: asw_too_long frame length > 1518 bytes
- * 23: asw_OK valid CRC + no code error
- * 24: asw_multicast has a multicast address
- * 25: asw_broadcast has a broadcast address
- * 26: asw_dribble_nibble spurious bits after EOP
- * 27: asw_control_frame is a control frame
- * 28: asw_pause_frame is a pause frame
- * 29: asw_unsupported_op unsupported OP code
- * 30: asw_VLAN_tag VLAN tag detected
- * 31: asw_long_evt Rx long event
- *
- * Word 1:
- * 0-15: length length in bytes
- * 16-25: bi Buffer Index
- * 26-27: ri Ring Index
- * 28-31: reserved
- */
-
-struct pkt_stat_desc {
- u32 word0;
- u32 word1;
-};
-
-/* Typedefs for the RX DMA status word */
-
-/*
- * rx status word 0 holds part of the status bits of the Rx DMA engine
- * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
- * which contains the Free Buffer ring 0 and 1 available offset.
- *
- * bit 0-9 FBR1 offset
- * bit 10 Wrap flag for FBR1
- * bit 16-25 FBR0 offset
- * bit 26 Wrap flag for FBR0
- */
-
-/*
- * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
- * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
- * which contains the Packet Status Ring available offset.
- *
- * bit 0-15 reserved
- * bit 16-27 PSRoffset
- * bit 28 PSRwrap
- * bit 29-31 unused
- */
-
-/*
- * struct rx_status_block is a structure representing the status of the Rx
- * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
- */
-struct rx_status_block {
- u32 Word0;
- u32 Word1;
-};
-
-/*
- * Structure for look-up table holding free buffer ring pointers
- */
-struct fbr_lookup {
- void *virt[MAX_DESC_PER_RING_RX];
- void *buffer1[MAX_DESC_PER_RING_RX];
- void *buffer2[MAX_DESC_PER_RING_RX];
- u32 bus_high[MAX_DESC_PER_RING_RX];
- u32 bus_low[MAX_DESC_PER_RING_RX];
-};
-
-/*
- * struct rx_ring is the ssructure representing the adaptor's local
- * reference(s) to the rings
- */
-struct rx_ring {
-#ifdef USE_FBR0
- void *pFbr0RingVa;
- dma_addr_t pFbr0RingPa;
- void *Fbr0MemVa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- dma_addr_t Fbr0MemPa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- uint64_t Fbr0Realpa;
- uint64_t Fbr0offset;
- u32 local_Fbr0_full;
- u32 Fbr0NumEntries;
- u32 Fbr0BufferSize;
-#endif
- void *pFbr1RingVa;
- dma_addr_t pFbr1RingPa;
- void *Fbr1MemVa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- dma_addr_t Fbr1MemPa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- uint64_t Fbr1Realpa;
- uint64_t Fbr1offset;
- struct fbr_lookup *fbr[2]; /* One per ring */
- u32 local_Fbr1_full;
- u32 Fbr1NumEntries;
- u32 Fbr1BufferSize;
-
- void *pPSRingVa;
- dma_addr_t pPSRingPa;
- u32 local_psr_full;
- u32 PsrNumEntries;
-
- struct rx_status_block *rx_status_block;
- dma_addr_t rx_status_bus;
-
- struct list_head RecvBufferPool;
-
- /* RECV */
- struct list_head RecvList;
- u32 nReadyRecv;
-
- u32 NumRfd;
-
- bool UnfinishedReceives;
-
- struct list_head RecvPacketPool;
-
- /* lookaside lists */
- struct kmem_cache *RecvLookaside;
-};
-
-#endif /* __ET1310_RX_H__ */
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
deleted file mode 100644
index 8fb3051fe28..00000000000
--- a/drivers/staging/et131x/et1310_tx.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_tx.c - Routines used to perform data transmission.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include "et1310_phy.h"
-#include "et131x_adapter.h"
-#include "et1310_tx.h"
-#include "et131x.h"
-
-static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
- struct tcb *tcb);
-static int et131x_send_packet(struct sk_buff *skb,
- struct et131x_adapter *etdev);
-static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
-
-/**
- * et131x_tx_dma_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h).
- *
- * Allocates memory that will be visible both to the device and to the CPU.
- * The OS will pass us packets, pointers to which we will insert in the Tx
- * Descriptor queue. The device will read this queue to find the packets in
- * memory. The device will update the "status" in memory each time it xmits a
- * packet.
- */
-int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
-{
- int desc_size = 0;
- struct tx_ring *tx_ring = &adapter->tx_ring;
-
- /* Allocate memory for the TCB's (Transmit Control Block) */
- adapter->tx_ring.tcb_ring =
- kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
- if (!adapter->tx_ring.tcb_ring) {
- dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
- return -ENOMEM;
- }
-
- /* Allocate enough memory for the Tx descriptor ring, and allocate
- * some extra so that the ring can be aligned on a 4k boundary.
- */
- desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
- tx_ring->tx_desc_ring =
- (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
- &tx_ring->tx_desc_ring_pa);
- if (!adapter->tx_ring.tx_desc_ring) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Tx Ring\n");
- return -ENOMEM;
- }
-
- /* Save physical address
- *
- * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here before
- * storing the adjusted address.
- */
- /* Allocate memory for the Tx status block */
- tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
- sizeof(u32),
- &tx_ring->tx_status_pa);
- if (!adapter->tx_ring.tx_status_pa) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Tx status block\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * et131x_tx_dma_memory_free - Free all memory allocated within this module
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h).
- */
-void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
-{
- int desc_size = 0;
-
- if (adapter->tx_ring.tx_desc_ring) {
- /* Free memory relating to Tx rings here */
- desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
- + 4096 - 1;
- pci_free_consistent(adapter->pdev,
- desc_size,
- adapter->tx_ring.tx_desc_ring,
- adapter->tx_ring.tx_desc_ring_pa);
- adapter->tx_ring.tx_desc_ring = NULL;
- }
-
- /* Free memory for the Tx status block */
- if (adapter->tx_ring.tx_status) {
- pci_free_consistent(adapter->pdev,
- sizeof(u32),
- adapter->tx_ring.tx_status,
- adapter->tx_ring.tx_status_pa);
-
- adapter->tx_ring.tx_status = NULL;
- }
- /* Free the memory for the tcb structures */
- kfree(adapter->tx_ring.tcb_ring);
-}
-
-/**
- * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
- * @etdev: pointer to our private adapter structure
- *
- * Configure the transmit engine with the ring buffers we have created
- * and prepare it for use.
- */
-void ConfigTxDmaRegs(struct et131x_adapter *etdev)
-{
- struct txdma_regs __iomem *txdma = &etdev->regs->txdma;
-
- /* Load the hardware with the start of the transmit descriptor ring. */
- writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32),
- &txdma->pr_base_hi);
- writel((u32) etdev->tx_ring.tx_desc_ring_pa,
- &txdma->pr_base_lo);
-
- /* Initialise the transmit DMA engine */
- writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
-
- /* Load the completion writeback physical address */
- writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
- &txdma->dma_wb_base_hi);
- writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
-
- *etdev->tx_ring.tx_status = 0;
-
- writel(0, &txdma->service_request);
- etdev->tx_ring.send_idx = 0;
-}
-
-/**
- * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
- * @etdev: pointer to our adapter structure
- */
-void et131x_tx_dma_disable(struct et131x_adapter *etdev)
-{
- /* Setup the tramsmit dma configuration register */
- writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
- &etdev->regs->txdma.csr);
-}
-
-/**
- * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
- * @etdev: pointer to our adapter structure
- *
- * Mainly used after a return to the D0 (full-power) state from a lower state.
- */
-void et131x_tx_dma_enable(struct et131x_adapter *etdev)
-{
- /* Setup the transmit dma configuration register for normal
- * operation
- */
- writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
- &etdev->regs->txdma.csr);
-}
-
-/**
- * et131x_init_send - Initialize send data structures
- * @adapter: pointer to our private adapter structure
- */
-void et131x_init_send(struct et131x_adapter *adapter)
-{
- struct tcb *tcb;
- u32 ct;
- struct tx_ring *tx_ring;
-
- /* Setup some convenience pointers */
- tx_ring = &adapter->tx_ring;
- tcb = adapter->tx_ring.tcb_ring;
-
- tx_ring->tcb_qhead = tcb;
-
- memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
-
- /* Go through and set up each TCB */
- for (ct = 0; ct++ < NUM_TCB; tcb++)
- /* Set the link pointer in HW TCB to the next TCB in the
- * chain
- */
- tcb->next = tcb + 1;
-
- /* Set the tail pointer */
- tcb--;
- tx_ring->tcb_qtail = tcb;
- tcb->next = NULL;
- /* Curr send queue should now be empty */
- tx_ring->send_head = NULL;
- tx_ring->send_tail = NULL;
-}
-
-/**
- * et131x_send_packets - This function is called by the OS to send packets
- * @skb: the packet(s) to send
- * @netdev:device on which to TX the above packet(s)
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only
- */
-int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
-{
- int status = 0;
- struct et131x_adapter *etdev = NULL;
-
- etdev = netdev_priv(netdev);
-
- /* Send these packets
- *
- * NOTE: The Linux Tx entry point is only given one packet at a time
- * to Tx, so the PacketCount and it's array used makes no sense here
- */
-
- /* TCB is not available */
- if (etdev->tx_ring.used >= NUM_TCB) {
- /* NOTE: If there's an error on send, no need to queue the
- * packet under Linux; if we just send an error up to the
- * netif layer, it will resend the skb to us.
- */
- status = -ENOMEM;
- } else {
- /* We need to see if the link is up; if it's not, make the
- * netif layer think we're good and drop the packet
- */
- if ((etdev->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
- !netif_carrier_ok(netdev)) {
- dev_kfree_skb_any(skb);
- skb = NULL;
-
- etdev->net_stats.tx_dropped++;
- } else {
- status = et131x_send_packet(skb, etdev);
- if (status != 0 && status != -ENOMEM) {
- /* On any other error, make netif think we're
- * OK and drop the packet
- */
- dev_kfree_skb_any(skb);
- skb = NULL;
- etdev->net_stats.tx_dropped++;
- }
- }
- }
- return status;
-}
-
-/**
- * et131x_send_packet - Do the work to send a packet
- * @skb: the packet(s) to send
- * @etdev: a pointer to the device's private adapter structure
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only.
- *
- * Assumption: Send spinlock has been acquired
- */
-static int et131x_send_packet(struct sk_buff *skb,
- struct et131x_adapter *etdev)
-{
- int status;
- struct tcb *tcb = NULL;
- u16 *shbufva;
- unsigned long flags;
-
- /* All packets must have at least a MAC address and a protocol type */
- if (skb->len < ETH_HLEN)
- return -EIO;
-
- /* Get a TCB for this packet */
- spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
-
- tcb = etdev->tx_ring.tcb_qhead;
-
- if (tcb == NULL) {
- spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
- return -ENOMEM;
- }
-
- etdev->tx_ring.tcb_qhead = tcb->next;
-
- if (etdev->tx_ring.tcb_qhead == NULL)
- etdev->tx_ring.tcb_qtail = NULL;
-
- spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
-
- tcb->skb = skb;
-
- if (skb->data != NULL && skb->len - skb->data_len >= 6) {
- shbufva = (u16 *) skb->data;
-
- if ((shbufva[0] == 0xffff) &&
- (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
- tcb->flags |= fMP_DEST_BROAD;
- } else if ((shbufva[0] & 0x3) == 0x0001) {
- tcb->flags |= fMP_DEST_MULTI;
- }
- }
-
- tcb->next = NULL;
-
- /* Call the NIC specific send handler. */
- status = nic_send_packet(etdev, tcb);
-
- if (status != 0) {
- spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
-
- if (etdev->tx_ring.tcb_qtail)
- etdev->tx_ring.tcb_qtail->next = tcb;
- else
- /* Apparently ready Q is empty. */
- etdev->tx_ring.tcb_qhead = tcb;
-
- etdev->tx_ring.tcb_qtail = tcb;
- spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
- return status;
- }
- WARN_ON(etdev->tx_ring.used > NUM_TCB);
- return 0;
-}
-
-/**
- * nic_send_packet - NIC specific send handler for version B silicon.
- * @etdev: pointer to our adapter
- * @tcb: pointer to struct tcb
- *
- * Returns 0 or errno.
- */
-static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
-{
- u32 i;
- struct tx_desc desc[24]; /* 24 x 16 byte */
- u32 frag = 0;
- u32 thiscopy, remainder;
- struct sk_buff *skb = tcb->skb;
- u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
- struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
- unsigned long flags;
-
- /* Part of the optimizations of this send routine restrict us to
- * sending 24 fragments at a pass. In practice we should never see
- * more than 5 fragments.
- *
- * NOTE: The older version of this function (below) can handle any
- * number of fragments. If needed, we can call this function,
- * although it is less efficient.
- */
- if (nr_frags > 23)
- return -EIO;
-
- memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
-
- for (i = 0; i < nr_frags; i++) {
- /* If there is something in this element, lets get a
- * descriptor from the ring and get the necessary data
- */
- if (i == 0) {
- /* If the fragments are smaller than a standard MTU,
- * then map them to a single descriptor in the Tx
- * Desc ring. However, if they're larger, as is
- * possible with support for jumbo packets, then
- * split them each across 2 descriptors.
- *
- * This will work until we determine why the hardware
- * doesn't seem to like large fragments.
- */
- if ((skb->len - skb->data_len) <= 1514) {
- desc[frag].addr_hi = 0;
- /* Low 16bits are length, high is vlan and
- unused currently so zero */
- desc[frag].len_vlan =
- skb->len - skb->data_len;
-
- /* NOTE: Here, the dma_addr_t returned from
- * pci_map_single() is implicitly cast as a
- * u32. Although dma_addr_t can be
- * 64-bit, the address returned by
- * pci_map_single() is always 32-bit
- * addressable (as defined by the pci/dma
- * subsystem)
- */
- desc[frag++].addr_lo =
- pci_map_single(etdev->pdev,
- skb->data,
- skb->len -
- skb->data_len,
- PCI_DMA_TODEVICE);
- } else {
- desc[frag].addr_hi = 0;
- desc[frag].len_vlan =
- (skb->len - skb->data_len) / 2;
-
- /* NOTE: Here, the dma_addr_t returned from
- * pci_map_single() is implicitly cast as a
- * u32. Although dma_addr_t can be
- * 64-bit, the address returned by
- * pci_map_single() is always 32-bit
- * addressable (as defined by the pci/dma
- * subsystem)
- */
- desc[frag++].addr_lo =
- pci_map_single(etdev->pdev,
- skb->data,
- ((skb->len -
- skb->data_len) / 2),
- PCI_DMA_TODEVICE);
- desc[frag].addr_hi = 0;
-
- desc[frag].len_vlan =
- (skb->len - skb->data_len) / 2;
-
- /* NOTE: Here, the dma_addr_t returned from
- * pci_map_single() is implicitly cast as a
- * u32. Although dma_addr_t can be
- * 64-bit, the address returned by
- * pci_map_single() is always 32-bit
- * addressable (as defined by the pci/dma
- * subsystem)
- */
- desc[frag++].addr_lo =
- pci_map_single(etdev->pdev,
- skb->data +
- ((skb->len -
- skb->data_len) / 2),
- ((skb->len -
- skb->data_len) / 2),
- PCI_DMA_TODEVICE);
- }
- } else {
- desc[frag].addr_hi = 0;
- desc[frag].len_vlan =
- frags[i - 1].size;
-
- /* NOTE: Here, the dma_addr_t returned from
- * pci_map_page() is implicitly cast as a u32.
- * Although dma_addr_t can be 64-bit, the address
- * returned by pci_map_page() is always 32-bit
- * addressable (as defined by the pci/dma subsystem)
- */
- desc[frag++].addr_lo =
- pci_map_page(etdev->pdev,
- frags[i - 1].page,
- frags[i - 1].page_offset,
- frags[i - 1].size,
- PCI_DMA_TODEVICE);
- }
- }
-
- if (frag == 0)
- return -EIO;
-
- if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
- if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
- /* Last element & Interrupt flag */
- desc[frag - 1].flags = 0x5;
- etdev->tx_ring.since_irq = 0;
- } else { /* Last element */
- desc[frag - 1].flags = 0x1;
- }
- } else
- desc[frag - 1].flags = 0x5;
-
- desc[0].flags |= 2; /* First element flag */
-
- tcb->index_start = etdev->tx_ring.send_idx;
- tcb->stale = 0;
-
- spin_lock_irqsave(&etdev->send_hw_lock, flags);
-
- thiscopy = NUM_DESC_PER_RING_TX -
- INDEX10(etdev->tx_ring.send_idx);
-
- if (thiscopy >= frag) {
- remainder = 0;
- thiscopy = frag;
- } else {
- remainder = frag - thiscopy;
- }
-
- memcpy(etdev->tx_ring.tx_desc_ring +
- INDEX10(etdev->tx_ring.send_idx), desc,
- sizeof(struct tx_desc) * thiscopy);
-
- add_10bit(&etdev->tx_ring.send_idx, thiscopy);
-
- if (INDEX10(etdev->tx_ring.send_idx) == 0 ||
- INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
- etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
- etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
- }
-
- if (remainder) {
- memcpy(etdev->tx_ring.tx_desc_ring,
- desc + thiscopy,
- sizeof(struct tx_desc) * remainder);
-
- add_10bit(&etdev->tx_ring.send_idx, remainder);
- }
-
- if (INDEX10(etdev->tx_ring.send_idx) == 0) {
- if (etdev->tx_ring.send_idx)
- tcb->index = NUM_DESC_PER_RING_TX - 1;
- else
- tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
- } else
- tcb->index = etdev->tx_ring.send_idx - 1;
-
- spin_lock(&etdev->TCBSendQLock);
-
- if (etdev->tx_ring.send_tail)
- etdev->tx_ring.send_tail->next = tcb;
- else
- etdev->tx_ring.send_head = tcb;
-
- etdev->tx_ring.send_tail = tcb;
-
- WARN_ON(tcb->next != NULL);
-
- etdev->tx_ring.used++;
-
- spin_unlock(&etdev->TCBSendQLock);
-
- /* Write the new write pointer back to the device. */
- writel(etdev->tx_ring.send_idx,
- &etdev->regs->txdma.service_request);
-
- /* For Gig only, we use Tx Interrupt coalescing. Enable the software
- * timer to wake us up if this packet isn't followed by N more.
- */
- if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
- writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
- &etdev->regs->global.watchdog_timer);
- }
- spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
-
- return 0;
-}
-
-
-/**
- * et131x_free_send_packet - Recycle a struct tcb
- * @etdev: pointer to our adapter
- * @tcb: pointer to struct tcb
- *
- * Complete the packet if necessary
- * Assumption - Send spinlock has been acquired
- */
-inline void et131x_free_send_packet(struct et131x_adapter *etdev,
- struct tcb *tcb)
-{
- unsigned long flags;
- struct tx_desc *desc = NULL;
- struct net_device_stats *stats = &etdev->net_stats;
-
- if (tcb->flags & fMP_DEST_BROAD)
- atomic_inc(&etdev->stats.brdcstxmt);
- else if (tcb->flags & fMP_DEST_MULTI)
- atomic_inc(&etdev->stats.multixmt);
- else
- atomic_inc(&etdev->stats.unixmt);
-
- if (tcb->skb) {
- stats->tx_bytes += tcb->skb->len;
-
- /* Iterate through the TX descriptors on the ring
- * corresponding to this packet and umap the fragments
- * they point to
- */
- do {
- desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring +
- INDEX10(tcb->index_start));
-
- pci_unmap_single(etdev->pdev,
- desc->addr_lo,
- desc->len_vlan, PCI_DMA_TODEVICE);
-
- add_10bit(&tcb->index_start, 1);
- if (INDEX10(tcb->index_start) >=
- NUM_DESC_PER_RING_TX) {
- tcb->index_start &= ~ET_DMA10_MASK;
- tcb->index_start ^= ET_DMA10_WRAP;
- }
- } while (desc != (etdev->tx_ring.tx_desc_ring +
- INDEX10(tcb->index)));
-
- dev_kfree_skb_any(tcb->skb);
- }
-
- memset(tcb, 0, sizeof(struct tcb));
-
- /* Add the TCB to the Ready Q */
- spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
-
- etdev->net_stats.tx_packets++;
-
- if (etdev->tx_ring.tcb_qtail)
- etdev->tx_ring.tcb_qtail->next = tcb;
- else
- /* Apparently ready Q is empty. */
- etdev->tx_ring.tcb_qhead = tcb;
-
- etdev->tx_ring.tcb_qtail = tcb;
-
- spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
- WARN_ON(etdev->tx_ring.used < 0);
-}
-
-/**
- * et131x_free_busy_send_packets - Free and complete the stopped active sends
- * @etdev: pointer to our adapter
- *
- * Assumption - Send spinlock has been acquired
- */
-void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
-{
- struct tcb *tcb;
- unsigned long flags;
- u32 freed = 0;
-
- /* Any packets being sent? Check the first TCB on the send list */
- spin_lock_irqsave(&etdev->TCBSendQLock, flags);
-
- tcb = etdev->tx_ring.send_head;
-
- while (tcb != NULL && freed < NUM_TCB) {
- struct tcb *next = tcb->next;
-
- etdev->tx_ring.send_head = next;
-
- if (next == NULL)
- etdev->tx_ring.send_tail = NULL;
-
- etdev->tx_ring.used--;
-
- spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
-
- freed++;
- et131x_free_send_packet(etdev, tcb);
-
- spin_lock_irqsave(&etdev->TCBSendQLock, flags);
-
- tcb = etdev->tx_ring.send_head;
- }
-
- WARN_ON(freed == NUM_TCB);
-
- spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
-
- etdev->tx_ring.used = 0;
-}
-
-/**
- * et131x_handle_send_interrupt - Interrupt handler for sending processing
- * @etdev: pointer to our adapter
- *
- * Re-claim the send resources, complete sends and get more to send from
- * the send wait queue.
- *
- * Assumption - Send spinlock has been acquired
- */
-void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
-{
- unsigned long flags;
- u32 serviced;
- struct tcb *tcb;
- u32 index;
-
- serviced = readl(&etdev->regs->txdma.new_service_complete);
- index = INDEX10(serviced);
-
- /* Has the ring wrapped? Process any descriptors that do not have
- * the same "wrap" indicator as the current completion indicator
- */
- spin_lock_irqsave(&etdev->TCBSendQLock, flags);
-
- tcb = etdev->tx_ring.send_head;
-
- while (tcb &&
- ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
- index < INDEX10(tcb->index)) {
- etdev->tx_ring.used--;
- etdev->tx_ring.send_head = tcb->next;
- if (tcb->next == NULL)
- etdev->tx_ring.send_tail = NULL;
-
- spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
- et131x_free_send_packet(etdev, tcb);
- spin_lock_irqsave(&etdev->TCBSendQLock, flags);
-
- /* Goto the next packet */
- tcb = etdev->tx_ring.send_head;
- }
- while (tcb &&
- !((serviced ^ tcb->index) & ET_DMA10_WRAP)
- && index > (tcb->index & ET_DMA10_MASK)) {
- etdev->tx_ring.used--;
- etdev->tx_ring.send_head = tcb->next;
- if (tcb->next == NULL)
- etdev->tx_ring.send_tail = NULL;
-
- spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
- et131x_free_send_packet(etdev, tcb);
- spin_lock_irqsave(&etdev->TCBSendQLock, flags);
-
- /* Goto the next packet */
- tcb = etdev->tx_ring.send_head;
- }
-
- /* Wake up the queue when we hit a low-water mark */
- if (etdev->tx_ring.used <= NUM_TCB / 3)
- netif_wake_queue(etdev->netdev);
-
- spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
-}
-
diff --git a/drivers/staging/et131x/et1310_tx.h b/drivers/staging/et131x/et1310_tx.h
deleted file mode 100644
index 82d06e9870d..00000000000
--- a/drivers/staging/et131x/et1310_tx.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et1310_tx.h - Defines, structs, enums, prototypes, etc. pertaining to data
- * transmission.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef __ET1310_TX_H__
-#define __ET1310_TX_H__
-
-
-/* Typedefs for Tx Descriptor Ring */
-
-/*
- * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
- *
- * 0-15: length of packet
- * 16-27: VLAN tag
- * 28: VLAN CFI
- * 29-31: VLAN priority
- *
- * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
- *
- * 0: last packet in the sequence
- * 1: first packet in the sequence
- * 2: interrupt the processor when this pkt sent
- * 3: Control word - no packet data
- * 4: Issue half-duplex backpressure : XON/XOFF
- * 5: send pause frame
- * 6: Tx frame has error
- * 7: append CRC
- * 8: MAC override
- * 9: pad packet
- * 10: Packet is a Huge packet
- * 11: append VLAN tag
- * 12: IP checksum assist
- * 13: TCP checksum assist
- * 14: UDP checksum assist
- */
-
-/* struct tx_desc represents each descriptor on the ring */
-struct tx_desc {
- u32 addr_hi;
- u32 addr_lo;
- u32 len_vlan; /* control words how to xmit the */
- u32 flags; /* data (detailed above) */
-};
-
-/*
- * The status of the Tx DMA engine it sits in free memory, and is pointed to
- * by 0x101c / 0x1020. This is a DMA10 type
- */
-
-/* TCB (Transmit Control Block: Host Side) */
-struct tcb {
- struct tcb *next; /* Next entry in ring */
- u32 flags; /* Our flags for the packet */
- u32 count; /* Used to spot stuck/lost packets */
- u32 stale; /* Used to spot stuck/lost packets */
- struct sk_buff *skb; /* Network skb we are tied to */
- u32 index; /* Ring indexes */
- u32 index_start;
-};
-
-/* Structure representing our local reference(s) to the ring */
-struct tx_ring {
- /* TCB (Transmit Control Block) memory and lists */
- struct tcb *tcb_ring;
-
- /* List of TCBs that are ready to be used */
- struct tcb *tcb_qhead;
- struct tcb *tcb_qtail;
-
- /* list of TCBs that are currently being sent. NOTE that access to all
- * three of these (including used) are controlled via the
- * TCBSendQLock. This lock should be secured prior to incementing /
- * decrementing used, or any queue manipulation on send_head /
- * tail
- */
- struct tcb *send_head;
- struct tcb *send_tail;
- int used;
-
- /* The actual descriptor ring */
- struct tx_desc *tx_desc_ring;
- dma_addr_t tx_desc_ring_pa;
-
- /* send_idx indicates where we last wrote to in the descriptor ring. */
- u32 send_idx;
-
- /* The location of the write-back status block */
- u32 *tx_status;
- dma_addr_t tx_status_pa;
-
- /* Packets since the last IRQ: used for interrupt coalescing */
- int since_irq;
-};
-
-#endif /* __ET1310_TX_H__ */
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
new file mode 100644
index 00000000000..f5f44a02456
--- /dev/null
+++ b/drivers/staging/et131x/et131x.c
@@ -0,0 +1,5514 @@
+/*
+ * Agere Systems Inc.
+ * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
+ *
+ * Copyright © 2005 Agere Systems Inc.
+ * All rights reserved.
+ * http://www.agere.com
+ *
+ * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
+ *
+ *------------------------------------------------------------------------------
+ *
+ * SOFTWARE LICENSE
+ *
+ * This software is provided subject to the following terms and conditions,
+ * which you should read carefully before using the software. Using this
+ * software indicates your acceptance of these terms and conditions. If you do
+ * not agree with these terms and conditions, do not use the software.
+ *
+ * Copyright © 2005 Agere Systems Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source or binary forms, with or without
+ * modifications, are permitted provided that the following conditions are met:
+ *
+ * . Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following Disclaimer as comments in the code as
+ * well as in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * . Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following Disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * . Neither the name of Agere Systems Inc. nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Disclaimer
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
+ * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
+ * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/phy.h>
+
+#include "et131x.h"
+
+MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
+MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
+ "for the ET1310 by Agere Systems");
+
+/* EEPROM defines */
+#define MAX_NUM_REGISTER_POLLS 1000
+#define MAX_NUM_WRITE_RETRIES 2
+
+/* MAC defines */
+#define COUNTER_WRAP_16_BIT 0x10000
+#define COUNTER_WRAP_12_BIT 0x1000
+
+/* PCI defines */
+#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
+#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
+
+/* ISR defines */
+/*
+ * For interrupts, normal running is:
+ * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
+ * watchdog_interrupt & txdma_xfer_done
+ *
+ * In both cases, when flow control is enabled for either Tx or bi-direction,
+ * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
+ * buffer rings are running low.
+ */
+#define INT_MASK_DISABLE 0xffffffff
+
+/* NOTE: Masking out MAC_STAT Interrupt for now...
+ * #define INT_MASK_ENABLE 0xfff6bf17
+ * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
+ */
+#define INT_MASK_ENABLE 0xfffebf17
+#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
+
+/* General defines */
+/* Packet and header sizes */
+#define NIC_MIN_PACKET_SIZE 60
+
+/* Multicast list size */
+#define NIC_MAX_MCAST_LIST 128
+
+/* Supported Filters */
+#define ET131X_PACKET_TYPE_DIRECTED 0x0001
+#define ET131X_PACKET_TYPE_MULTICAST 0x0002
+#define ET131X_PACKET_TYPE_BROADCAST 0x0004
+#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
+#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
+
+/* Tx Timeout */
+#define ET131X_TX_TIMEOUT (1 * HZ)
+#define NIC_SEND_HANG_THRESHOLD 0
+
+/* MP_TCB flags */
+#define fMP_DEST_MULTI 0x00000001
+#define fMP_DEST_BROAD 0x00000002
+
+/* MP_ADAPTER flags */
+#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
+#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
+
+/* MP_SHARED flags */
+#define fMP_ADAPTER_LOWER_POWER 0x00200000
+
+#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
+#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
+
+#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
+
+/* Some offsets in PCI config space that are actually used. */
+#define ET1310_PCI_MAX_PYLD 0x4C
+#define ET1310_PCI_MAC_ADDRESS 0xA4
+#define ET1310_PCI_EEPROM_STATUS 0xB2
+#define ET1310_PCI_ACK_NACK 0xC0
+#define ET1310_PCI_REPLAY 0xC2
+#define ET1310_PCI_L0L1LATENCY 0xCF
+
+/* PCI Product IDs */
+#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
+#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
+
+/* Define order of magnitude converter */
+#define NANO_IN_A_MICRO 1000
+
+#define PARM_RX_NUM_BUFS_DEF 4
+#define PARM_RX_TIME_INT_DEF 10
+#define PARM_RX_MEM_END_DEF 0x2bc
+#define PARM_TX_TIME_INT_DEF 40
+#define PARM_TX_NUM_BUFS_DEF 4
+#define PARM_DMA_CACHE_DEF 0
+
+/* RX defines */
+#define USE_FBR0 1
+
+#define FBR_CHUNKS 32
+
+#define MAX_DESC_PER_RING_RX 1024
+
+/* number of RFDs - default and min */
+#ifdef USE_FBR0
+#define RFD_LOW_WATER_MARK 40
+#define NIC_DEFAULT_NUM_RFD 1024
+#define NUM_FBRS 2
+#else
+#define RFD_LOW_WATER_MARK 20
+#define NIC_DEFAULT_NUM_RFD 256
+#define NUM_FBRS 1
+#endif
+
+#define NIC_MIN_NUM_RFD 64
+
+#define NUM_PACKETS_HANDLED 256
+
+#define ALCATEL_MULTICAST_PKT 0x01000000
+#define ALCATEL_BROADCAST_PKT 0x02000000
+
+/* typedefs for Free Buffer Descriptors */
+struct fbr_desc {
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
+};
+
+/* Packet Status Ring Descriptors
+ *
+ * Word 0:
+ *
+ * top 16 bits are from the Alcatel Status Word as enumerated in
+ * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
+ *
+ * 0: hp hash pass
+ * 1: ipa IP checksum assist
+ * 2: ipp IP checksum pass
+ * 3: tcpa TCP checksum assist
+ * 4: tcpp TCP checksum pass
+ * 5: wol WOL Event
+ * 6: rxmac_error RXMAC Error Indicator
+ * 7: drop Drop packet
+ * 8: ft Frame Truncated
+ * 9: jp Jumbo Packet
+ * 10: vp VLAN Packet
+ * 11-15: unused
+ * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
+ * 17: asw_RX_DV_event short receive event detected
+ * 18: asw_false_carrier_event bad carrier since last good packet
+ * 19: asw_code_err one or more nibbles signalled as errors
+ * 20: asw_CRC_err CRC error
+ * 21: asw_len_chk_err frame length field incorrect
+ * 22: asw_too_long frame length > 1518 bytes
+ * 23: asw_OK valid CRC + no code error
+ * 24: asw_multicast has a multicast address
+ * 25: asw_broadcast has a broadcast address
+ * 26: asw_dribble_nibble spurious bits after EOP
+ * 27: asw_control_frame is a control frame
+ * 28: asw_pause_frame is a pause frame
+ * 29: asw_unsupported_op unsupported OP code
+ * 30: asw_VLAN_tag VLAN tag detected
+ * 31: asw_long_evt Rx long event
+ *
+ * Word 1:
+ * 0-15: length length in bytes
+ * 16-25: bi Buffer Index
+ * 26-27: ri Ring Index
+ * 28-31: reserved
+ */
+
+struct pkt_stat_desc {
+ u32 word0;
+ u32 word1;
+};
+
+/* Typedefs for the RX DMA status word */
+
+/*
+ * rx status word 0 holds part of the status bits of the Rx DMA engine
+ * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
+ * which contains the Free Buffer ring 0 and 1 available offset.
+ *
+ * bit 0-9 FBR1 offset
+ * bit 10 Wrap flag for FBR1
+ * bit 16-25 FBR0 offset
+ * bit 26 Wrap flag for FBR0
+ */
+
+/*
+ * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
+ * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
+ * which contains the Packet Status Ring available offset.
+ *
+ * bit 0-15 reserved
+ * bit 16-27 PSRoffset
+ * bit 28 PSRwrap
+ * bit 29-31 unused
+ */
+
+/*
+ * struct rx_status_block is a structure representing the status of the Rx
+ * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
+ */
+struct rx_status_block {
+ u32 word0;
+ u32 word1;
+};
+
+/*
+ * Structure for look-up table holding free buffer ring pointers, addresses
+ * and state.
+ */
+struct fbr_lookup {
+ void *virt[MAX_DESC_PER_RING_RX];
+ void *buffer1[MAX_DESC_PER_RING_RX];
+ void *buffer2[MAX_DESC_PER_RING_RX];
+ u32 bus_high[MAX_DESC_PER_RING_RX];
+ u32 bus_low[MAX_DESC_PER_RING_RX];
+ void *ring_virtaddr;
+ dma_addr_t ring_physaddr;
+ void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
+ dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
+ uint64_t real_physaddr;
+ uint64_t offset;
+ u32 local_full;
+ u32 num_entries;
+ u32 buffsize;
+};
+
+/*
+ * struct rx_ring is the sructure representing the adaptor's local
+ * reference(s) to the rings
+ *
+ ******************************************************************************
+ * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
+ * and index 1 to refer to FRB0
+ ******************************************************************************
+ */
+struct rx_ring {
+ struct fbr_lookup *fbr[NUM_FBRS];
+ void *ps_ring_virtaddr;
+ dma_addr_t ps_ring_physaddr;
+ u32 local_psr_full;
+ u32 psr_num_entries;
+
+ struct rx_status_block *rx_status_block;
+ dma_addr_t rx_status_bus;
+
+ /* RECV */
+ struct list_head recv_list;
+ u32 num_ready_recv;
+
+ u32 num_rfd;
+
+ bool unfinished_receives;
+
+ /* lookaside lists */
+ struct kmem_cache *recv_lookaside;
+};
+
+/* TX defines */
+/*
+ * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
+ *
+ * 0-15: length of packet
+ * 16-27: VLAN tag
+ * 28: VLAN CFI
+ * 29-31: VLAN priority
+ *
+ * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
+ *
+ * 0: last packet in the sequence
+ * 1: first packet in the sequence
+ * 2: interrupt the processor when this pkt sent
+ * 3: Control word - no packet data
+ * 4: Issue half-duplex backpressure : XON/XOFF
+ * 5: send pause frame
+ * 6: Tx frame has error
+ * 7: append CRC
+ * 8: MAC override
+ * 9: pad packet
+ * 10: Packet is a Huge packet
+ * 11: append VLAN tag
+ * 12: IP checksum assist
+ * 13: TCP checksum assist
+ * 14: UDP checksum assist
+ */
+
+/* struct tx_desc represents each descriptor on the ring */
+struct tx_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+ u32 len_vlan; /* control words how to xmit the */
+ u32 flags; /* data (detailed above) */
+};
+
+/*
+ * The status of the Tx DMA engine it sits in free memory, and is pointed to
+ * by 0x101c / 0x1020. This is a DMA10 type
+ */
+
+/* TCB (Transmit Control Block: Host Side) */
+struct tcb {
+ struct tcb *next; /* Next entry in ring */
+ u32 flags; /* Our flags for the packet */
+ u32 count; /* Used to spot stuck/lost packets */
+ u32 stale; /* Used to spot stuck/lost packets */
+ struct sk_buff *skb; /* Network skb we are tied to */
+ u32 index; /* Ring indexes */
+ u32 index_start;
+};
+
+/* Structure representing our local reference(s) to the ring */
+struct tx_ring {
+ /* TCB (Transmit Control Block) memory and lists */
+ struct tcb *tcb_ring;
+
+ /* List of TCBs that are ready to be used */
+ struct tcb *tcb_qhead;
+ struct tcb *tcb_qtail;
+
+ /* list of TCBs that are currently being sent. NOTE that access to all
+ * three of these (including used) are controlled via the
+ * TCBSendQLock. This lock should be secured prior to incementing /
+ * decrementing used, or any queue manipulation on send_head /
+ * tail
+ */
+ struct tcb *send_head;
+ struct tcb *send_tail;
+ int used;
+
+ /* The actual descriptor ring */
+ struct tx_desc *tx_desc_ring;
+ dma_addr_t tx_desc_ring_pa;
+
+ /* send_idx indicates where we last wrote to in the descriptor ring. */
+ u32 send_idx;
+
+ /* The location of the write-back status block */
+ u32 *tx_status;
+ dma_addr_t tx_status_pa;
+
+ /* Packets since the last IRQ: used for interrupt coalescing */
+ int since_irq;
+};
+
+/* ADAPTER defines */
+/*
+ * Do not change these values: if changed, then change also in respective
+ * TXdma and Rxdma engines
+ */
+#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
+#define NUM_TCB 64
+
+/*
+ * These values are all superseded by registry entries to facilitate tuning.
+ * Once the desired performance has been achieved, the optimal registry values
+ * should be re-populated to these #defines:
+ */
+#define TX_ERROR_PERIOD 1000
+
+#define LO_MARK_PERCENT_FOR_PSR 15
+#define LO_MARK_PERCENT_FOR_RX 15
+
+/* RFD (Receive Frame Descriptor) */
+struct rfd {
+ struct list_head list_node;
+ struct sk_buff *skb;
+ u32 len; /* total size of receive frame */
+ u16 bufferindex;
+ u8 ringindex;
+};
+
+/* Flow Control */
+#define FLOW_BOTH 0
+#define FLOW_TXONLY 1
+#define FLOW_RXONLY 2
+#define FLOW_NONE 3
+
+/* Struct to define some device statistics */
+struct ce_stats {
+ /* MIB II variables
+ *
+ * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
+ * MUST have 32, then we'll need another way to perform atomic
+ * operations
+ */
+ u32 unicast_pkts_rcvd;
+ atomic_t unicast_pkts_xmtd;
+ u32 multicast_pkts_rcvd;
+ atomic_t multicast_pkts_xmtd;
+ u32 broadcast_pkts_rcvd;
+ atomic_t broadcast_pkts_xmtd;
+ u32 rcvd_pkts_dropped;
+
+ /* Tx Statistics. */
+ u32 tx_underflows;
+
+ u32 tx_collisions;
+ u32 tx_excessive_collisions;
+ u32 tx_first_collisions;
+ u32 tx_late_collisions;
+ u32 tx_max_pkt_errs;
+ u32 tx_deferred;
+
+ /* Rx Statistics. */
+ u32 rx_overflows;
+
+ u32 rx_length_errs;
+ u32 rx_align_errs;
+ u32 rx_crc_errs;
+ u32 rx_code_violations;
+ u32 rx_other_errs;
+
+ u32 synchronous_iterations;
+ u32 interrupt_status;
+};
+
+/* The private adapter structure */
+struct et131x_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+ struct work_struct task;
+
+ /* Flags that indicate current state of the adapter */
+ u32 flags;
+
+ /* local link state, to determine if a state change has occurred */
+ int link;
+
+ /* Configuration */
+ u8 rom_addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
+ bool has_eeprom;
+ u8 eeprom_data[2];
+
+ /* Spinlocks */
+ spinlock_t lock;
+
+ spinlock_t tcb_send_qlock;
+ spinlock_t tcb_ready_qlock;
+ spinlock_t send_hw_lock;
+
+ spinlock_t rcv_lock;
+ spinlock_t rcv_pend_lock;
+ spinlock_t fbr_lock;
+
+ spinlock_t phy_lock;
+
+ /* Packet Filter and look ahead size */
+ u32 packet_filter;
+
+ /* multicast list */
+ u32 multicast_addr_count;
+ u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
+
+ /* Pointer to the device's PCI register space */
+ struct address_map __iomem *regs;
+
+ /* Registry parameters */
+ u8 wanted_flow; /* Flow we want for 802.3x flow control */
+ u32 registry_jumbo_packet; /* Max supported ethernet packet size */
+
+ /* Derived from the registry: */
+ u8 flowcontrol; /* flow control validated by the far-end */
+
+ /* Minimize init-time */
+ struct timer_list error_timer;
+
+ /* variable putting the phy into coma mode when boot up with no cable
+ * plugged in after 5 seconds
+ */
+ u8 boot_coma;
+
+ /* Next two used to save power information at power down. This
+ * information will be used during power up to set up parts of Power
+ * Management in JAGCore
+ */
+ u16 pdown_speed;
+ u8 pdown_duplex;
+
+ /* Tx Memory Variables */
+ struct tx_ring tx_ring;
+
+ /* Rx Memory Variables */
+ struct rx_ring rx_ring;
+
+ /* Stats */
+ struct ce_stats stats;
+
+ struct net_device_stats net_stats;
+};
+
+/* EEPROM functions */
+
+static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
+{
+ u32 reg;
+ int i;
+
+ /*
+ * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
+ * bits 7,1:0 both equal to 1, at least once after reset.
+ * Subsequent operations need only to check that bits 1:0 are equal
+ * to 1 prior to starting a single byte read/write
+ */
+
+ for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
+ /* Read registers grouped in DWORD1 */
+ if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
+ return -EIO;
+
+ /* I2C idle and Phy Queue Avail both true */
+ if ((reg & 0x3000) == 0x3000) {
+ if (status)
+ *status = reg;
+ return reg & 0xFF;
+ }
+ }
+ return -ETIMEDOUT;
+}
+
+
+/**
+ * eeprom_write - Write a byte to the ET1310's EEPROM
+ * @adapter: pointer to our private adapter structure
+ * @addr: the address to write
+ * @data: the value to write
+ *
+ * Returns 1 for a successful write.
+ */
+static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int index = 0;
+ int retries;
+ int err = 0;
+ int i2c_wack = 0;
+ int writeok = 0;
+ u32 status;
+ u32 val = 0;
+
+ /*
+ * For an EEPROM, an I2C single byte write is defined as a START
+ * condition followed by the device address, EEPROM address, one byte
+ * of data and a STOP condition. The STOP condition will trigger the
+ * EEPROM's internally timed write cycle to the nonvolatile memory.
+ * All inputs are disabled during this write cycle and the EEPROM will
+ * not respond to any access until the internal write is complete.
+ */
+
+ err = eeprom_wait_ready(pdev, NULL);
+ if (err)
+ return err;
+
+ /*
+ * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
+ * and bits 1:0 both =0. Bit 5 should be set according to the
+ * type of EEPROM being accessed (1=two byte addressing, 0=one
+ * byte addressing).
+ */
+ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
+ LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
+ return -EIO;
+
+ i2c_wack = 1;
+
+ /* Prepare EEPROM address for Step 3 */
+
+ for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
+ /* Write the address to the LBCIF Address Register */
+ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
+ break;
+ /*
+ * Write the data to the LBCIF Data Register (the I2C write
+ * will begin).
+ */
+ if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
+ break;
+ /*
+ * Monitor bit 1:0 of the LBCIF Status Register. When bits
+ * 1:0 are both equal to 1, the I2C write has completed and the
+ * internal write cycle of the EEPROM is about to start.
+ * (bits 1:0 = 01 is a legal state while waiting from both
+ * equal to 1, but bits 1:0 = 10 is invalid and implies that
+ * something is broken).
+ */
+ err = eeprom_wait_ready(pdev, &status);
+ if (err < 0)
+ return 0;
+
+ /*
+ * Check bit 3 of the LBCIF Status Register. If equal to 1,
+ * an error has occurred.Don't break here if we are revision
+ * 1, this is so we do a blind write for load bug.
+ */
+ if ((status & LBCIF_STATUS_GENERAL_ERROR)
+ && adapter->pdev->revision == 0)
+ break;
+
+ /*
+ * Check bit 2 of the LBCIF Status Register. If equal to 1 an
+ * ACK error has occurred on the address phase of the write.
+ * This could be due to an actual hardware failure or the
+ * EEPROM may still be in its internal write cycle from a
+ * previous write. This write operation was ignored and must be
+ *repeated later.
+ */
+ if (status & LBCIF_STATUS_ACK_ERROR) {
+ /*
+ * This could be due to an actual hardware failure
+ * or the EEPROM may still be in its internal write
+ * cycle from a previous write. This write operation
+ * was ignored and must be repeated later.
+ */
+ udelay(10);
+ continue;
+ }
+
+ writeok = 1;
+ break;
+ }
+
+ /*
+ * Set bit 6 of the LBCIF Control Register = 0.
+ */
+ udelay(10);
+
+ while (i2c_wack) {
+ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
+ LBCIF_CONTROL_LBCIF_ENABLE))
+ writeok = 0;
+
+ /* Do read until internal ACK_ERROR goes away meaning write
+ * completed
+ */
+ do {
+ pci_write_config_dword(pdev,
+ LBCIF_ADDRESS_REGISTER,
+ addr);
+ do {
+ pci_read_config_dword(pdev,
+ LBCIF_DATA_REGISTER, &val);
+ } while ((val & 0x00010000) == 0);
+ } while (val & 0x00040000);
+
+ if ((val & 0xFF00) != 0xC000 || index == 10000)
+ break;
+ index++;
+ }
+ return writeok ? 0 : -EIO;
+}
+
+/**
+ * eeprom_read - Read a byte from the ET1310's EEPROM
+ * @adapter: pointer to our private adapter structure
+ * @addr: the address from which to read
+ * @pdata: a pointer to a byte in which to store the value of the read
+ * @eeprom_id: the ID of the EEPROM
+ * @addrmode: how the EEPROM is to be accessed
+ *
+ * Returns 1 for a successful read
+ */
+static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+ u32 status;
+
+ /*
+ * A single byte read is similar to the single byte write, with the
+ * exception of the data flow:
+ */
+
+ err = eeprom_wait_ready(pdev, NULL);
+ if (err)
+ return err;
+ /*
+ * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
+ * and bits 1:0 both =0. Bit 5 should be set according to the type
+ * of EEPROM being accessed (1=two byte addressing, 0=one byte
+ * addressing).
+ */
+ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
+ LBCIF_CONTROL_LBCIF_ENABLE))
+ return -EIO;
+ /*
+ * Write the address to the LBCIF Address Register (I2C read will
+ * begin).
+ */
+ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
+ return -EIO;
+ /*
+ * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
+ * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
+ * has occurred).
+ */
+ err = eeprom_wait_ready(pdev, &status);
+ if (err < 0)
+ return err;
+ /*
+ * Regardless of error status, read data byte from LBCIF Data
+ * Register.
+ */
+ *pdata = err;
+ /*
+ * Check bit 2 of the LBCIF Status Register. If = 1,
+ * then an error has occurred.
+ */
+ return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
+}
+
+int et131x_init_eeprom(struct et131x_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u8 eestatus;
+
+ /* We first need to check the EEPROM Status code located at offset
+ * 0xB2 of config space
+ */
+ pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
+ &eestatus);
+
+ /* THIS IS A WORKAROUND:
+ * I need to call this function twice to get my card in a
+ * LG M1 Express Dual running. I tried also a msleep before this
+ * function, because I thougth there could be some time condidions
+ * but it didn't work. Call the whole function twice also work.
+ */
+ if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
+ dev_err(&pdev->dev,
+ "Could not read PCI config space for EEPROM Status\n");
+ return -EIO;
+ }
+
+ /* Determine if the error(s) we care about are present. If they are
+ * present we need to fail.
+ */
+ if (eestatus & 0x4C) {
+ int write_failed = 0;
+ if (pdev->revision == 0x01) {
+ int i;
+ static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
+
+ /* Re-write the first 4 bytes if we have an eeprom
+ * present and the revision id is 1, this fixes the
+ * corruption seen with 1310 B Silicon
+ */
+ for (i = 0; i < 3; i++)
+ if (eeprom_write(adapter, i, eedata[i]) < 0)
+ write_failed = 1;
+ }
+ if (pdev->revision != 0x01 || write_failed) {
+ dev_err(&pdev->dev,
+ "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
+
+ /* This error could mean that there was an error
+ * reading the eeprom or that the eeprom doesn't exist.
+ * We will treat each case the same and not try to
+ * gather additional information that normally would
+ * come from the eeprom, like MAC Address
+ */
+ adapter->has_eeprom = 0;
+ return -EIO;
+ }
+ }
+ adapter->has_eeprom = 1;
+
+ /* Read the EEPROM for information regarding LED behavior. Refer to
+ * ET1310_phy.c, et131x_xcvr_init(), for its use.
+ */
+ eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
+ eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
+
+ if (adapter->eeprom_data[0] != 0xcd)
+ /* Disable all optional features */
+ adapter->eeprom_data[1] = 0x00;
+
+ return 0;
+}
+
+/**
+ * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
+ * @adapter: pointer to our adapter structure
+ */
+void et131x_rx_dma_enable(struct et131x_adapter *adapter)
+{
+ /* Setup the receive dma configuration register for normal operation */
+ u32 csr = 0x2000; /* FBR1 enable */
+
+ if (adapter->rx_ring.fbr[0]->buffsize == 4096)
+ csr |= 0x0800;
+ else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
+ csr |= 0x1000;
+ else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
+ csr |= 0x1800;
+#ifdef USE_FBR0
+ csr |= 0x0400; /* FBR0 enable */
+ if (adapter->rx_ring.fbr[1]->buffsize == 256)
+ csr |= 0x0100;
+ else if (adapter->rx_ring.fbr[1]->buffsize == 512)
+ csr |= 0x0200;
+ else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
+ csr |= 0x0300;
+#endif
+ writel(csr, &adapter->regs->rxdma.csr);
+
+ csr = readl(&adapter->regs->rxdma.csr);
+ if ((csr & 0x00020000) != 0) {
+ udelay(5);
+ csr = readl(&adapter->regs->rxdma.csr);
+ if ((csr & 0x00020000) != 0) {
+ dev_err(&adapter->pdev->dev,
+ "RX Dma failed to exit halt state. CSR 0x%08x\n",
+ csr);
+ }
+ }
+}
+
+/**
+ * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
+ * @adapter: pointer to our adapter structure
+ */
+void et131x_rx_dma_disable(struct et131x_adapter *adapter)
+{
+ u32 csr;
+ /* Setup the receive dma configuration register */
+ writel(0x00002001, &adapter->regs->rxdma.csr);
+ csr = readl(&adapter->regs->rxdma.csr);
+ if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
+ udelay(5);
+ csr = readl(&adapter->regs->rxdma.csr);
+ if ((csr & 0x00020000) == 0)
+ dev_err(&adapter->pdev->dev,
+ "RX Dma failed to enter halt state. CSR 0x%08x\n",
+ csr);
+ }
+}
+
+/**
+ * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
+ * @adapter: pointer to our adapter structure
+ *
+ * Mainly used after a return to the D0 (full-power) state from a lower state.
+ */
+void et131x_tx_dma_enable(struct et131x_adapter *adapter)
+{
+ /* Setup the transmit dma configuration register for normal
+ * operation
+ */
+ writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
+ &adapter->regs->txdma.csr);
+}
+
+static inline void add_10bit(u32 *v, int n)
+{
+ *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
+}
+
+static inline void add_12bit(u32 *v, int n)
+{
+ *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
+}
+
+/**
+ * nic_rx_pkts - Checks the hardware for available packets
+ * @adapter: pointer to our adapter
+ *
+ * Returns rfd, a pointer to our MPRFD.
+ *
+ * Checks the hardware for available packets, using completion ring
+ * If packets are available, it gets an RFD from the recv_list, attaches
+ * the packet to it, puts the RFD in the RecvPendList, and also returns
+ * the pointer to the RFD.
+ */
+/* MAC functions */
+
+/**
+ * et1310_config_mac_regs1 - Initialize the first part of MAC regs
+ * @adapter: pointer to our adapter structure
+ */
+void et1310_config_mac_regs1(struct et131x_adapter *adapter)
+{
+ struct mac_regs __iomem *macregs = &adapter->regs->mac;
+ u32 station1;
+ u32 station2;
+ u32 ipg;
+
+ /* First we need to reset everything. Write to MAC configuration
+ * register 1 to perform reset.
+ */
+ writel(0xC00F0000, &macregs->cfg1);
+
+ /* Next lets configure the MAC Inter-packet gap register */
+ ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
+ ipg |= 0x50 << 8; /* ifg enforce 0x50 */
+ writel(ipg, &macregs->ipg);
+
+ /* Next lets configure the MAC Half Duplex register */
+ /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
+ writel(0x00A1F037, &macregs->hfdp);
+
+ /* Next lets configure the MAC Interface Control register */
+ writel(0, &macregs->if_ctrl);
+
+ /* Let's move on to setting up the mii management configuration */
+ writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */
+
+ /* Next lets configure the MAC Station Address register. These
+ * values are read from the EEPROM during initialization and stored
+ * in the adapter structure. We write what is stored in the adapter
+ * structure to the MAC Station Address registers high and low. This
+ * station address is used for generating and checking pause control
+ * packets.
+ */
+ station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
+ (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
+ station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
+ (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
+ (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
+ adapter->addr[2];
+ writel(station1, &macregs->station_addr_1);
+ writel(station2, &macregs->station_addr_2);
+
+ /* Max ethernet packet in bytes that will passed by the mac without
+ * being truncated. Allow the MAC to pass 4 more than our max packet
+ * size. This is 4 for the Ethernet CRC.
+ *
+ * Packets larger than (registry_jumbo_packet) that do not contain a
+ * VLAN ID will be dropped by the Rx function.
+ */
+ writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
+
+ /* clear out MAC config reset */
+ writel(0, &macregs->cfg1);
+}
+
+/**
+ * et1310_config_mac_regs2 - Initialize the second part of MAC regs
+ * @adapter: pointer to our adapter structure
+ */
+void et1310_config_mac_regs2(struct et131x_adapter *adapter)
+{
+ int32_t delay = 0;
+ struct mac_regs __iomem *mac = &adapter->regs->mac;
+ struct phy_device *phydev = adapter->phydev;
+ u32 cfg1;
+ u32 cfg2;
+ u32 ifctrl;
+ u32 ctl;
+
+ ctl = readl(&adapter->regs->txmac.ctl);
+ cfg1 = readl(&mac->cfg1);
+ cfg2 = readl(&mac->cfg2);
+ ifctrl = readl(&mac->if_ctrl);
+
+ /* Set up the if mode bits */
+ cfg2 &= ~0x300;
+ if (phydev && phydev->speed == SPEED_1000) {
+ cfg2 |= 0x200;
+ /* Phy mode bit */
+ ifctrl &= ~(1 << 24);
+ } else {
+ cfg2 |= 0x100;
+ ifctrl |= (1 << 24);
+ }
+
+ /* We need to enable Rx/Tx */
+ cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
+ /* Initialize loop back to off */
+ cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
+ if (adapter->flowcontrol == FLOW_RXONLY ||
+ adapter->flowcontrol == FLOW_BOTH)
+ cfg1 |= CFG1_RX_FLOW;
+ writel(cfg1, &mac->cfg1);
+
+ /* Now we need to initialize the MAC Configuration 2 register */
+ /* preamble 7, check length, huge frame off, pad crc, crc enable
+ full duplex off */
+ cfg2 |= 0x7016;
+ cfg2 &= ~0x0021;
+
+ /* Turn on duplex if needed */
+ if (phydev && phydev->duplex == DUPLEX_FULL)
+ cfg2 |= 0x01;
+
+ ifctrl &= ~(1 << 26);
+ if (phydev && phydev->duplex == DUPLEX_HALF)
+ ifctrl |= (1<<26); /* Enable ghd */
+
+ writel(ifctrl, &mac->if_ctrl);
+ writel(cfg2, &mac->cfg2);
+
+ do {
+ udelay(10);
+ delay++;
+ cfg1 = readl(&mac->cfg1);
+ } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
+
+ if (delay == 100) {
+ dev_warn(&adapter->pdev->dev,
+ "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
+ cfg1);
+ }
+
+ /* Enable txmac */
+ ctl |= 0x09; /* TX mac enable, FC disable */
+ writel(ctl, &adapter->regs->txmac.ctl);
+
+ /* Ready to start the RXDMA/TXDMA engine */
+ if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
+ et131x_rx_dma_enable(adapter);
+ et131x_tx_dma_enable(adapter);
+ }
+}
+
+/**
+ * et1310_in_phy_coma - check if the device is in phy coma
+ * @adapter: pointer to our adapter structure
+ *
+ * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
+ */
+int et1310_in_phy_coma(struct et131x_adapter *adapter)
+{
+ u32 pmcsr;
+
+ pmcsr = readl(&adapter->regs->global.pm_csr);
+
+ return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
+}
+
+void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
+{
+ struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
+ uint32_t nIndex;
+ uint32_t result;
+ uint32_t hash1 = 0;
+ uint32_t hash2 = 0;
+ uint32_t hash3 = 0;
+ uint32_t hash4 = 0;
+ u32 pm_csr;
+
+ /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
+ * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
+ * specified) then we should pass NO multi-cast addresses to the
+ * driver.
+ */
+ if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
+ /* Loop through our multicast array and set up the device */
+ for (nIndex = 0; nIndex < adapter->multicast_addr_count;
+ nIndex++) {
+ result = ether_crc(6, adapter->multicast_list[nIndex]);
+
+ result = (result & 0x3F800000) >> 23;
+
+ if (result < 32) {
+ hash1 |= (1 << result);
+ } else if ((31 < result) && (result < 64)) {
+ result -= 32;
+ hash2 |= (1 << result);
+ } else if ((63 < result) && (result < 96)) {
+ result -= 64;
+ hash3 |= (1 << result);
+ } else {
+ result -= 96;
+ hash4 |= (1 << result);
+ }
+ }
+ }
+
+ /* Write out the new hash to the device */
+ pm_csr = readl(&adapter->regs->global.pm_csr);
+ if (!et1310_in_phy_coma(adapter)) {
+ writel(hash1, &rxmac->multi_hash1);
+ writel(hash2, &rxmac->multi_hash2);
+ writel(hash3, &rxmac->multi_hash3);
+ writel(hash4, &rxmac->multi_hash4);
+ }
+}
+
+void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
+{
+ struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
+ u32 uni_pf1;
+ u32 uni_pf2;
+ u32 uni_pf3;
+ u32 pm_csr;
+
+ /* Set up unicast packet filter reg 3 to be the first two octets of
+ * the MAC address for both address
+ *
+ * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
+ * MAC address for second address
+ *
+ * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
+ * MAC address for first address
+ */
+ uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
+ (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
+ (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
+ adapter->addr[1];
+
+ uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
+ (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
+ (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
+ adapter->addr[5];
+
+ uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
+ (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
+ (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
+ adapter->addr[5];
+
+ pm_csr = readl(&adapter->regs->global.pm_csr);
+ if (!et1310_in_phy_coma(adapter)) {
+ writel(uni_pf1, &rxmac->uni_pf_addr1);
+ writel(uni_pf2, &rxmac->uni_pf_addr2);
+ writel(uni_pf3, &rxmac->uni_pf_addr3);
+ }
+}
+
+void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
+{
+ struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
+ struct phy_device *phydev = adapter->phydev;
+ u32 sa_lo;
+ u32 sa_hi = 0;
+ u32 pf_ctrl = 0;
+
+ /* Disable the MAC while it is being configured (also disable WOL) */
+ writel(0x8, &rxmac->ctrl);
+
+ /* Initialize WOL to disabled. */
+ writel(0, &rxmac->crc0);
+ writel(0, &rxmac->crc12);
+ writel(0, &rxmac->crc34);
+
+ /* We need to set the WOL mask0 - mask4 next. We initialize it to
+ * its default Values of 0x00000000 because there are not WOL masks
+ * as of this time.
+ */
+ writel(0, &rxmac->mask0_word0);
+ writel(0, &rxmac->mask0_word1);
+ writel(0, &rxmac->mask0_word2);
+ writel(0, &rxmac->mask0_word3);
+
+ writel(0, &rxmac->mask1_word0);
+ writel(0, &rxmac->mask1_word1);
+ writel(0, &rxmac->mask1_word2);
+ writel(0, &rxmac->mask1_word3);
+
+ writel(0, &rxmac->mask2_word0);
+ writel(0, &rxmac->mask2_word1);
+ writel(0, &rxmac->mask2_word2);
+ writel(0, &rxmac->mask2_word3);
+
+ writel(0, &rxmac->mask3_word0);
+ writel(0, &rxmac->mask3_word1);
+ writel(0, &rxmac->mask3_word2);
+ writel(0, &rxmac->mask3_word3);
+
+ writel(0, &rxmac->mask4_word0);
+ writel(0, &rxmac->mask4_word1);
+ writel(0, &rxmac->mask4_word2);
+ writel(0, &rxmac->mask4_word3);
+
+ /* Lets setup the WOL Source Address */
+ sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
+ (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
+ (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
+ adapter->addr[5];
+ writel(sa_lo, &rxmac->sa_lo);
+
+ sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
+ adapter->addr[1];
+ writel(sa_hi, &rxmac->sa_hi);
+
+ /* Disable all Packet Filtering */
+ writel(0, &rxmac->pf_ctrl);
+
+ /* Let's initialize the Unicast Packet filtering address */
+ if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
+ et1310_setup_device_for_unicast(adapter);
+ pf_ctrl |= 4; /* Unicast filter */
+ } else {
+ writel(0, &rxmac->uni_pf_addr1);
+ writel(0, &rxmac->uni_pf_addr2);
+ writel(0, &rxmac->uni_pf_addr3);
+ }
+
+ /* Let's initialize the Multicast hash */
+ if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
+ pf_ctrl |= 2; /* Multicast filter */
+ et1310_setup_device_for_multicast(adapter);
+ }
+
+ /* Runt packet filtering. Didn't work in version A silicon. */
+ pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
+ pf_ctrl |= 8; /* Fragment filter */
+
+ if (adapter->registry_jumbo_packet > 8192)
+ /* In order to transmit jumbo packets greater than 8k, the
+ * FIFO between RxMAC and RxDMA needs to be reduced in size
+ * to (16k - Jumbo packet size). In order to implement this,
+ * we must use "cut through" mode in the RxMAC, which chops
+ * packets down into segments which are (max_size * 16). In
+ * this case we selected 256 bytes, since this is the size of
+ * the PCI-Express TLP's that the 1310 uses.
+ *
+ * seg_en on, fc_en off, size 0x10
+ */
+ writel(0x41, &rxmac->mcif_ctrl_max_seg);
+ else
+ writel(0, &rxmac->mcif_ctrl_max_seg);
+
+ /* Initialize the MCIF water marks */
+ writel(0, &rxmac->mcif_water_mark);
+
+ /* Initialize the MIF control */
+ writel(0, &rxmac->mif_ctrl);
+
+ /* Initialize the Space Available Register */
+ writel(0, &rxmac->space_avail);
+
+ /* Initialize the the mif_ctrl register
+ * bit 3: Receive code error. One or more nibbles were signaled as
+ * errors during the reception of the packet. Clear this
+ * bit in Gigabit, set it in 100Mbit. This was derived
+ * experimentally at UNH.
+ * bit 4: Receive CRC error. The packet's CRC did not match the
+ * internally generated CRC.
+ * bit 5: Receive length check error. Indicates that frame length
+ * field value in the packet does not match the actual data
+ * byte length and is not a type field.
+ * bit 16: Receive frame truncated.
+ * bit 17: Drop packet enable
+ */
+ if (phydev && phydev->speed == SPEED_100)
+ writel(0x30038, &rxmac->mif_ctrl);
+ else
+ writel(0x30030, &rxmac->mif_ctrl);
+
+ /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
+ * filter is always enabled since it is where the runt packets are
+ * supposed to be dropped. For version A silicon, runt packet
+ * dropping doesn't work, so it is disabled in the pf_ctrl register,
+ * but we still leave the packet filter on.
+ */
+ writel(pf_ctrl, &rxmac->pf_ctrl);
+ writel(0x9, &rxmac->ctrl);
+}
+
+void et1310_config_txmac_regs(struct et131x_adapter *adapter)
+{
+ struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
+
+ /* We need to update the Control Frame Parameters
+ * cfpt - control frame pause timer set to 64 (0x40)
+ * cfep - control frame extended pause timer set to 0x0
+ */
+ if (adapter->flowcontrol == FLOW_NONE)
+ writel(0, &txmac->cf_param);
+ else
+ writel(0x40, &txmac->cf_param);
+}
+
+void et1310_config_macstat_regs(struct et131x_adapter *adapter)
+{
+ struct macstat_regs __iomem *macstat =
+ &adapter->regs->macstat;
+
+ /* Next we need to initialize all the macstat registers to zero on
+ * the device.
+ */
+ writel(0, &macstat->txrx_0_64_byte_frames);
+ writel(0, &macstat->txrx_65_127_byte_frames);
+ writel(0, &macstat->txrx_128_255_byte_frames);
+ writel(0, &macstat->txrx_256_511_byte_frames);
+ writel(0, &macstat->txrx_512_1023_byte_frames);
+ writel(0, &macstat->txrx_1024_1518_byte_frames);
+ writel(0, &macstat->txrx_1519_1522_gvln_frames);
+
+ writel(0, &macstat->rx_bytes);
+ writel(0, &macstat->rx_packets);
+ writel(0, &macstat->rx_fcs_errs);
+ writel(0, &macstat->rx_multicast_packets);
+ writel(0, &macstat->rx_broadcast_packets);
+ writel(0, &macstat->rx_control_frames);
+ writel(0, &macstat->rx_pause_frames);
+ writel(0, &macstat->rx_unknown_opcodes);
+ writel(0, &macstat->rx_align_errs);
+ writel(0, &macstat->rx_frame_len_errs);
+ writel(0, &macstat->rx_code_errs);
+ writel(0, &macstat->rx_carrier_sense_errs);
+ writel(0, &macstat->rx_undersize_packets);
+ writel(0, &macstat->rx_oversize_packets);
+ writel(0, &macstat->rx_fragment_packets);
+ writel(0, &macstat->rx_jabbers);
+ writel(0, &macstat->rx_drops);
+
+ writel(0, &macstat->tx_bytes);
+ writel(0, &macstat->tx_packets);
+ writel(0, &macstat->tx_multicast_packets);
+ writel(0, &macstat->tx_broadcast_packets);
+ writel(0, &macstat->tx_pause_frames);
+ writel(0, &macstat->tx_deferred);
+ writel(0, &macstat->tx_excessive_deferred);
+ writel(0, &macstat->tx_single_collisions);
+ writel(0, &macstat->tx_multiple_collisions);
+ writel(0, &macstat->tx_late_collisions);
+ writel(0, &macstat->tx_excessive_collisions);
+ writel(0, &macstat->tx_total_collisions);
+ writel(0, &macstat->tx_pause_honored_frames);
+ writel(0, &macstat->tx_drops);
+ writel(0, &macstat->tx_jabbers);
+ writel(0, &macstat->tx_fcs_errs);
+ writel(0, &macstat->tx_control_frames);
+ writel(0, &macstat->tx_oversize_frames);
+ writel(0, &macstat->tx_undersize_frames);
+ writel(0, &macstat->tx_fragments);
+ writel(0, &macstat->carry_reg1);
+ writel(0, &macstat->carry_reg2);
+
+ /* Unmask any counters that we want to track the overflow of.
+ * Initially this will be all counters. It may become clear later
+ * that we do not need to track all counters.
+ */
+ writel(0xFFFFBE32, &macstat->carry_reg1_mask);
+ writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
+}
+
+/**
+ * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
+ * @adapter: pointer to our private adapter structure
+ * @addr: the address of the transceiver
+ * @reg: the register to read
+ * @value: pointer to a 16-bit value in which the value will be stored
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
+ u8 reg, u16 *value)
+{
+ struct mac_regs __iomem *mac = &adapter->regs->mac;
+ int status = 0;
+ u32 delay = 0;
+ u32 mii_addr;
+ u32 mii_cmd;
+ u32 mii_indicator;
+
+ /* Save a local copy of the registers we are dealing with so we can
+ * set them back
+ */
+ mii_addr = readl(&mac->mii_mgmt_addr);
+ mii_cmd = readl(&mac->mii_mgmt_cmd);
+
+ /* Stop the current operation */
+ writel(0, &mac->mii_mgmt_cmd);
+
+ /* Set up the register we need to read from on the correct PHY */
+ writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
+
+ writel(0x1, &mac->mii_mgmt_cmd);
+
+ do {
+ udelay(50);
+ delay++;
+ mii_indicator = readl(&mac->mii_mgmt_indicator);
+ } while ((mii_indicator & MGMT_WAIT) && delay < 50);
+
+ /* If we hit the max delay, we could not read the register */
+ if (delay == 50) {
+ dev_warn(&adapter->pdev->dev,
+ "reg 0x%08x could not be read\n", reg);
+ dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
+ mii_indicator);
+
+ status = -EIO;
+ }
+
+ /* If we hit here we were able to read the register and we need to
+ * return the value to the caller */
+ *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
+
+ /* Stop the read operation */
+ writel(0, &mac->mii_mgmt_cmd);
+
+ /* set the registers we touched back to the state at which we entered
+ * this function
+ */
+ writel(mii_addr, &mac->mii_mgmt_addr);
+ writel(mii_cmd, &mac->mii_mgmt_cmd);
+
+ return status;
+}
+
+int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
+{
+ struct phy_device *phydev = adapter->phydev;
+
+ if (!phydev)
+ return -EIO;
+
+ return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
+}
+
+/**
+ * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
+ * @adapter: pointer to our private adapter structure
+ * @reg: the register to read
+ * @value: 16-bit value to write
+ *
+ * FIXME: one caller in netdev still
+ *
+ * Return 0 on success, errno on failure (as defined in errno.h)
+ */
+int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
+{
+ struct mac_regs __iomem *mac = &adapter->regs->mac;
+ struct phy_device *phydev = adapter->phydev;
+ int status = 0;
+ u8 addr;
+ u32 delay = 0;
+ u32 mii_addr;
+ u32 mii_cmd;
+ u32 mii_indicator;
+
+ if (!phydev)
+ return -EIO;
+
+ addr = phydev->addr;
+
+ /* Save a local copy of the registers we are dealing with so we can
+ * set them back
+ */
+ mii_addr = readl(&mac->mii_mgmt_addr);
+ mii_cmd = readl(&mac->mii_mgmt_cmd);
+
+ /* Stop the current operation */
+ writel(0, &mac->mii_mgmt_cmd);
+
+ /* Set up the register we need to write to on the correct PHY */
+ writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
+
+ /* Add the value to write to the registers to the mac */
+ writel(value, &mac->mii_mgmt_ctrl);
+
+ do {
+ udelay(50);
+ delay++;
+ mii_indicator = readl(&mac->mii_mgmt_indicator);
+ } while ((mii_indicator & MGMT_BUSY) && delay < 100);
+
+ /* If we hit the max delay, we could not write the register */
+ if (delay == 100) {
+ u16 tmp;
+
+ dev_warn(&adapter->pdev->dev,
+ "reg 0x%08x could not be written", reg);
+ dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
+ mii_indicator);
+ dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
+ readl(&mac->mii_mgmt_cmd));
+
+ et131x_mii_read(adapter, reg, &tmp);
+
+ status = -EIO;
+ }
+ /* Stop the write operation */
+ writel(0, &mac->mii_mgmt_cmd);
+
+ /*
+ * set the registers we touched back to the state at which we entered
+ * this function
+ */
+ writel(mii_addr, &mac->mii_mgmt_addr);
+ writel(mii_cmd, &mac->mii_mgmt_cmd);
+
+ return status;
+}
+
+/* Still used from _mac for BIT_READ */
+void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
+ u16 regnum, u16 bitnum, u8 *value)
+{
+ u16 reg;
+ u16 mask = 0x0001 << bitnum;
+
+ /* Read the requested register */
+ et131x_mii_read(adapter, regnum, &reg);
+
+ switch (action) {
+ case TRUEPHY_BIT_READ:
+ *value = (reg & mask) >> bitnum;
+ break;
+
+ case TRUEPHY_BIT_SET:
+ et131x_mii_write(adapter, regnum, reg | mask);
+ break;
+
+ case TRUEPHY_BIT_CLEAR:
+ et131x_mii_write(adapter, regnum, reg & ~mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void et1310_config_flow_control(struct et131x_adapter *adapter)
+{
+ struct phy_device *phydev = adapter->phydev;
+
+ if (phydev->duplex == DUPLEX_HALF) {
+ adapter->flowcontrol = FLOW_NONE;
+ } else {
+ char remote_pause, remote_async_pause;
+
+ et1310_phy_access_mii_bit(adapter,
+ TRUEPHY_BIT_READ, 5, 10, &remote_pause);
+ et1310_phy_access_mii_bit(adapter,
+ TRUEPHY_BIT_READ, 5, 11,
+ &remote_async_pause);
+
+ if ((remote_pause == TRUEPHY_BIT_SET) &&
+ (remote_async_pause == TRUEPHY_BIT_SET)) {
+ adapter->flowcontrol = adapter->wanted_flow;
+ } else if ((remote_pause == TRUEPHY_BIT_SET) &&
+ (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
+ if (adapter->wanted_flow == FLOW_BOTH)
+ adapter->flowcontrol = FLOW_BOTH;
+ else
+ adapter->flowcontrol = FLOW_NONE;
+ } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
+ (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
+ adapter->flowcontrol = FLOW_NONE;
+ } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
+ remote_async_pause == TRUEPHY_SET_BIT) */
+ if (adapter->wanted_flow == FLOW_BOTH)
+ adapter->flowcontrol = FLOW_RXONLY;
+ else
+ adapter->flowcontrol = FLOW_NONE;
+ }
+ }
+}
+
+/**
+ * et1310_update_macstat_host_counters - Update the local copy of the statistics
+ * @adapter: pointer to the adapter structure
+ */
+void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
+{
+ struct ce_stats *stats = &adapter->stats;
+ struct macstat_regs __iomem *macstat =
+ &adapter->regs->macstat;
+
+ stats->tx_collisions += readl(&macstat->tx_total_collisions);
+ stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
+ stats->tx_deferred += readl(&macstat->tx_deferred);
+ stats->tx_excessive_collisions +=
+ readl(&macstat->tx_multiple_collisions);
+ stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
+ stats->tx_underflows += readl(&macstat->tx_undersize_frames);
+ stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
+
+ stats->rx_align_errs += readl(&macstat->rx_align_errs);
+ stats->rx_crc_errs += readl(&macstat->rx_code_errs);
+ stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
+ stats->rx_overflows += readl(&macstat->rx_oversize_packets);
+ stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
+ stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
+ stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
+}
+
+/**
+ * et1310_handle_macstat_interrupt
+ * @adapter: pointer to the adapter structure
+ *
+ * One of the MACSTAT counters has wrapped. Update the local copy of
+ * the statistics held in the adapter structure, checking the "wrap"
+ * bit for each counter.
+ */
+void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
+{
+ u32 carry_reg1;
+ u32 carry_reg2;
+
+ /* Read the interrupt bits from the register(s). These are Clear On
+ * Write.
+ */
+ carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
+ carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
+
+ writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
+ writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
+
+ /* We need to do update the host copy of all the MAC_STAT counters.
+ * For each counter, check it's overflow bit. If the overflow bit is
+ * set, then increment the host version of the count by one complete
+ * revolution of the counter. This routine is called when the counter
+ * block indicates that one of the counters has wrapped.
+ */
+ if (carry_reg1 & (1 << 14))
+ adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 8))
+ adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
+ if (carry_reg1 & (1 << 7))
+ adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 2))
+ adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 6))
+ adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 3))
+ adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 0))
+ adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
+ if (carry_reg2 & (1 << 16))
+ adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 15))
+ adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 6))
+ adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 8))
+ adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 5))
+ adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 4))
+ adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 2))
+ adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
+}
+
+/* PHY functions */
+
+int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
+{
+ struct net_device *netdev = bus->priv;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ u16 value;
+ int ret;
+
+ ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
+
+ if (ret < 0)
+ return ret;
+ else
+ return value;
+}
+
+int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
+{
+ struct net_device *netdev = bus->priv;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ return et131x_mii_write(adapter, reg, value);
+}
+
+int et131x_mdio_reset(struct mii_bus *bus)
+{
+ struct net_device *netdev = bus->priv;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
+
+ return 0;
+}
+
+/**
+ * et1310_phy_power_down - PHY power control
+ * @adapter: device to control
+ * @down: true for off/false for back on
+ *
+ * one hundred, ten, one thousand megs
+ * How would you like to have your LAN accessed
+ * Can't you see that this code processed
+ * Phy power, phy power..
+ */
+void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
+{
+ u16 data;
+
+ et131x_mii_read(adapter, MII_BMCR, &data);
+ data &= ~BMCR_PDOWN;
+ if (down)
+ data |= BMCR_PDOWN;
+ et131x_mii_write(adapter, MII_BMCR, data);
+}
+
+/**
+ * et131x_xcvr_init - Init the phy if we are setting it into force mode
+ * @adapter: pointer to our private adapter structure
+ *
+ */
+void et131x_xcvr_init(struct et131x_adapter *adapter)
+{
+ u16 imr;
+ u16 isr;
+ u16 lcr2;
+
+ et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
+ et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
+
+ /* Set the link status interrupt only. Bad behavior when link status
+ * and auto neg are set, we run into a nested interrupt problem
+ */
+ imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
+ ET_PHY_INT_MASK_LINKSTAT &
+ ET_PHY_INT_MASK_ENABLE);
+
+ et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
+
+ /* Set the LED behavior such that LED 1 indicates speed (off =
+ * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
+ * link and activity (on for link, blink off for activity).
+ *
+ * NOTE: Some customizations have been added here for specific
+ * vendors; The LED behavior is now determined by vendor data in the
+ * EEPROM. However, the above description is the default.
+ */
+ if ((adapter->eeprom_data[1] & 0x4) == 0) {
+ et131x_mii_read(adapter, PHY_LED_2, &lcr2);
+
+ lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
+ lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
+
+ if ((adapter->eeprom_data[1] & 0x8) == 0)
+ lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
+ else
+ lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
+
+ et131x_mii_write(adapter, PHY_LED_2, lcr2);
+ }
+}
+
+/**
+ * et131x_configure_global_regs - configure JAGCore global regs
+ * @adapter: pointer to our adapter structure
+ *
+ * Used to configure the global registers on the JAGCore
+ */
+void et131x_configure_global_regs(struct et131x_adapter *adapter)
+{
+ struct global_regs __iomem *regs = &adapter->regs->global;
+
+ writel(0, &regs->rxq_start_addr);
+ writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
+
+ if (adapter->registry_jumbo_packet < 2048) {
+ /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
+ * block of RAM that the driver can split between Tx
+ * and Rx as it desires. Our default is to split it
+ * 50/50:
+ */
+ writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
+ writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
+ } else if (adapter->registry_jumbo_packet < 8192) {
+ /* For jumbo packets > 2k but < 8k, split 50-50. */
+ writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
+ writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
+ } else {
+ /* 9216 is the only packet size greater than 8k that
+ * is available. The Tx buffer has to be big enough
+ * for one whole packet on the Tx side. We'll make
+ * the Tx 9408, and give the rest to Rx
+ */
+ writel(0x01b3, &regs->rxq_end_addr);
+ writel(0x01b4, &regs->txq_start_addr);
+ }
+
+ /* Initialize the loopback register. Disable all loopbacks. */
+ writel(0, &regs->loopback);
+
+ /* MSI Register */
+ writel(0, &regs->msi_config);
+
+ /* By default, disable the watchdog timer. It will be enabled when
+ * a packet is queued.
+ */
+ writel(0, &regs->watchdog_timer);
+}
+
+/* PM functions */
+
+/**
+ * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
+ * @adapter: pointer to our adapter structure
+ */
+void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
+{
+ struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
+ struct rx_ring *rx_local = &adapter->rx_ring;
+ struct fbr_desc *fbr_entry;
+ u32 entry;
+ u32 psr_num_des;
+ unsigned long flags;
+
+ /* Halt RXDMA to perform the reconfigure. */
+ et131x_rx_dma_disable(adapter);
+
+ /* Load the completion writeback physical address
+ *
+ * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
+ * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
+ * are ever returned, make sure the high part is retrieved here
+ * before storing the adjusted address.
+ */
+ writel((u32) ((u64)rx_local->rx_status_bus >> 32),
+ &rx_dma->dma_wb_base_hi);
+ writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
+
+ memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
+
+ /* Set the address and parameters of the packet status ring into the
+ * 1310's registers
+ */
+ writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
+ &rx_dma->psr_base_hi);
+ writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
+ writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
+ writel(0, &rx_dma->psr_full_offset);
+
+ psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
+ writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
+ &rx_dma->psr_min_des);
+
+ spin_lock_irqsave(&adapter->rcv_lock, flags);
+
+ /* These local variables track the PSR in the adapter structure */
+ rx_local->local_psr_full = 0;
+
+ /* Now's the best time to initialize FBR1 contents */
+ fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
+ for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
+ fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
+ fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
+ fbr_entry->word2 = entry;
+ fbr_entry++;
+ }
+
+ /* Set the address and parameters of Free buffer ring 1 (and 0 if
+ * required) into the 1310's registers
+ */
+ writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
+ &rx_dma->fbr1_base_hi);
+ writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
+ writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
+ writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
+
+ /* This variable tracks the free buffer ring 1 full position, so it
+ * has to match the above.
+ */
+ rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
+ writel(
+ ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ &rx_dma->fbr1_min_des);
+
+#ifdef USE_FBR0
+ /* Now's the best time to initialize FBR0 contents */
+ fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
+ for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
+ fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
+ fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
+ fbr_entry->word2 = entry;
+ fbr_entry++;
+ }
+
+ writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
+ &rx_dma->fbr0_base_hi);
+ writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
+ writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
+ writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
+
+ /* This variable tracks the free buffer ring 0 full position, so it
+ * has to match the above.
+ */
+ rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
+ writel(
+ ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ &rx_dma->fbr0_min_des);
+#endif
+
+ /* Program the number of packets we will receive before generating an
+ * interrupt.
+ * For version B silicon, this value gets updated once autoneg is
+ *complete.
+ */
+ writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
+
+ /* The "time_done" is not working correctly to coalesce interrupts
+ * after a given time period, but rather is giving us an interrupt
+ * regardless of whether we have received packets.
+ * This value gets updated once autoneg is complete.
+ */
+ writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
+
+ spin_unlock_irqrestore(&adapter->rcv_lock, flags);
+}
+
+/**
+ * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
+ * @adapter: pointer to our private adapter structure
+ *
+ * Configure the transmit engine with the ring buffers we have created
+ * and prepare it for use.
+ */
+void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
+{
+ struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
+
+ /* Load the hardware with the start of the transmit descriptor ring. */
+ writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
+ &txdma->pr_base_hi);
+ writel((u32) adapter->tx_ring.tx_desc_ring_pa,
+ &txdma->pr_base_lo);
+
+ /* Initialise the transmit DMA engine */
+ writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
+
+ /* Load the completion writeback physical address */
+ writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
+ &txdma->dma_wb_base_hi);
+ writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
+
+ *adapter->tx_ring.tx_status = 0;
+
+ writel(0, &txdma->service_request);
+ adapter->tx_ring.send_idx = 0;
+}
+
+/**
+ * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
+ * @adapter: pointer to our private adapter structure
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+void et131x_adapter_setup(struct et131x_adapter *adapter)
+{
+ /* Configure the JAGCore */
+ et131x_configure_global_regs(adapter);
+
+ et1310_config_mac_regs1(adapter);
+
+ /* Configure the MMC registers */
+ /* All we need to do is initialize the Memory Control Register */
+ writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
+
+ et1310_config_rxmac_regs(adapter);
+ et1310_config_txmac_regs(adapter);
+
+ et131x_config_rx_dma_regs(adapter);
+ et131x_config_tx_dma_regs(adapter);
+
+ et1310_config_macstat_regs(adapter);
+
+ et1310_phy_power_down(adapter, 0);
+ et131x_xcvr_init(adapter);
+}
+
+/**
+ * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
+ * @adapter: pointer to our private adapter structure
+ */
+void et131x_soft_reset(struct et131x_adapter *adapter)
+{
+ /* Disable MAC Core */
+ writel(0xc00f0000, &adapter->regs->mac.cfg1);
+
+ /* Set everything to a reset value */
+ writel(0x7F, &adapter->regs->global.sw_reset);
+ writel(0x000f0000, &adapter->regs->mac.cfg1);
+ writel(0x00000000, &adapter->regs->mac.cfg1);
+}
+
+/**
+ * et131x_enable_interrupts - enable interrupt
+ * @adapter: et131x device
+ *
+ * Enable the appropriate interrupts on the ET131x according to our
+ * configuration
+ */
+void et131x_enable_interrupts(struct et131x_adapter *adapter)
+{
+ u32 mask;
+
+ /* Enable all global interrupts */
+ if (adapter->flowcontrol == FLOW_TXONLY ||
+ adapter->flowcontrol == FLOW_BOTH)
+ mask = INT_MASK_ENABLE;
+ else
+ mask = INT_MASK_ENABLE_NO_FLOW;
+
+ writel(mask, &adapter->regs->global.int_mask);
+}
+
+/**
+ * et131x_disable_interrupts - interrupt disable
+ * @adapter: et131x device
+ *
+ * Block all interrupts from the et131x device at the device itself
+ */
+void et131x_disable_interrupts(struct et131x_adapter *adapter)
+{
+ /* Disable all global interrupts */
+ writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
+}
+
+/**
+ * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
+ * @adapter: pointer to our adapter structure
+ */
+void et131x_tx_dma_disable(struct et131x_adapter *adapter)
+{
+ /* Setup the tramsmit dma configuration register */
+ writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
+ &adapter->regs->txdma.csr);
+}
+
+/**
+ * et131x_enable_txrx - Enable tx/rx queues
+ * @netdev: device to be enabled
+ */
+void et131x_enable_txrx(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* Enable the Tx and Rx DMA engines (if not already enabled) */
+ et131x_rx_dma_enable(adapter);
+ et131x_tx_dma_enable(adapter);
+
+ /* Enable device interrupts */
+ if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
+ et131x_enable_interrupts(adapter);
+
+ /* We're ready to move some data, so start the queue */
+ netif_start_queue(netdev);
+}
+
+/**
+ * et131x_disable_txrx - Disable tx/rx queues
+ * @netdev: device to be disabled
+ */
+void et131x_disable_txrx(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* First thing is to stop the queue */
+ netif_stop_queue(netdev);
+
+ /* Stop the Tx and Rx DMA engines */
+ et131x_rx_dma_disable(adapter);
+ et131x_tx_dma_disable(adapter);
+
+ /* Disable device interrupts */
+ et131x_disable_interrupts(adapter);
+}
+
+/**
+ * et131x_init_send - Initialize send data structures
+ * @adapter: pointer to our private adapter structure
+ */
+void et131x_init_send(struct et131x_adapter *adapter)
+{
+ struct tcb *tcb;
+ u32 ct;
+ struct tx_ring *tx_ring;
+
+ /* Setup some convenience pointers */
+ tx_ring = &adapter->tx_ring;
+ tcb = adapter->tx_ring.tcb_ring;
+
+ tx_ring->tcb_qhead = tcb;
+
+ memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
+
+ /* Go through and set up each TCB */
+ for (ct = 0; ct++ < NUM_TCB; tcb++)
+ /* Set the link pointer in HW TCB to the next TCB in the
+ * chain
+ */
+ tcb->next = tcb + 1;
+
+ /* Set the tail pointer */
+ tcb--;
+ tx_ring->tcb_qtail = tcb;
+ tcb->next = NULL;
+ /* Curr send queue should now be empty */
+ tx_ring->send_head = NULL;
+ tx_ring->send_tail = NULL;
+}
+
+/**
+ * et1310_enable_phy_coma - called when network cable is unplugged
+ * @adapter: pointer to our adapter structure
+ *
+ * driver receive an phy status change interrupt while in D0 and check that
+ * phy_status is down.
+ *
+ * -- gate off JAGCore;
+ * -- set gigE PHY in Coma mode
+ * -- wake on phy_interrupt; Perform software reset JAGCore,
+ * re-initialize jagcore and gigE PHY
+ *
+ * Add D0-ASPM-PhyLinkDown Support:
+ * -- while in D0, when there is a phy_interrupt indicating phy link
+ * down status, call the MPSetPhyComa routine to enter this active
+ * state power saving mode
+ * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
+ * indicating linkup status, call the MPDisablePhyComa routine to
+ * restore JAGCore and gigE PHY
+ */
+void et1310_enable_phy_coma(struct et131x_adapter *adapter)
+{
+ unsigned long flags;
+ u32 pmcsr;
+
+ pmcsr = readl(&adapter->regs->global.pm_csr);
+
+ /* Save the GbE PHY speed and duplex modes. Need to restore this
+ * when cable is plugged back in
+ */
+ /*
+ * TODO - when PM is re-enabled, check if we need to
+ * perform a similar task as this -
+ * adapter->pdown_speed = adapter->ai_force_speed;
+ * adapter->pdown_duplex = adapter->ai_force_duplex;
+ */
+
+ /* Stop sending packets. */
+ spin_lock_irqsave(&adapter->send_hw_lock, flags);
+ adapter->flags |= fMP_ADAPTER_LOWER_POWER;
+ spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
+
+ /* Wait for outstanding Receive packets */
+
+ et131x_disable_txrx(adapter->netdev);
+
+ /* Gate off JAGCore 3 clock domains */
+ pmcsr &= ~ET_PMCSR_INIT;
+ writel(pmcsr, &adapter->regs->global.pm_csr);
+
+ /* Program gigE PHY in to Coma mode */
+ pmcsr |= ET_PM_PHY_SW_COMA;
+ writel(pmcsr, &adapter->regs->global.pm_csr);
+}
+
+/**
+ * et1310_disable_phy_coma - Disable the Phy Coma Mode
+ * @adapter: pointer to our adapter structure
+ */
+void et1310_disable_phy_coma(struct et131x_adapter *adapter)
+{
+ u32 pmcsr;
+
+ pmcsr = readl(&adapter->regs->global.pm_csr);
+
+ /* Disable phy_sw_coma register and re-enable JAGCore clocks */
+ pmcsr |= ET_PMCSR_INIT;
+ pmcsr &= ~ET_PM_PHY_SW_COMA;
+ writel(pmcsr, &adapter->regs->global.pm_csr);
+
+ /* Restore the GbE PHY speed and duplex modes;
+ * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
+ */
+ /* TODO - when PM is re-enabled, check if we need to
+ * perform a similar task as this -
+ * adapter->ai_force_speed = adapter->pdown_speed;
+ * adapter->ai_force_duplex = adapter->pdown_duplex;
+ */
+
+ /* Re-initialize the send structures */
+ et131x_init_send(adapter);
+
+ /* Bring the device back to the state it was during init prior to
+ * autonegotiation being complete. This way, when we get the auto-neg
+ * complete interrupt, we can complete init by calling ConfigMacREGS2.
+ */
+ et131x_soft_reset(adapter);
+
+ /* setup et1310 as per the documentation ?? */
+ et131x_adapter_setup(adapter);
+
+ /* Allow Tx to restart */
+ adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
+
+ et131x_enable_txrx(adapter->netdev);
+}
+
+/* RX functions */
+
+static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
+{
+ u32 tmp_free_buff_ring = *free_buff_ring;
+ tmp_free_buff_ring++;
+ /* This works for all cases where limit < 1024. The 1023 case
+ works because 1023++ is 1024 which means the if condition is not
+ taken but the carry of the bit into the wrap bit toggles the wrap
+ value correctly */
+ if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
+ tmp_free_buff_ring &= ~ET_DMA10_MASK;
+ tmp_free_buff_ring ^= ET_DMA10_WRAP;
+ }
+ /* For the 1023 case */
+ tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
+ *free_buff_ring = tmp_free_buff_ring;
+ return tmp_free_buff_ring;
+}
+
+/**
+ * et131x_align_allocated_memory - Align allocated memory on a given boundary
+ * @adapter: pointer to our adapter structure
+ * @phys_addr: pointer to Physical address
+ * @offset: pointer to the offset variable
+ * @mask: correct mask
+ */
+void et131x_align_allocated_memory(struct et131x_adapter *adapter,
+ uint64_t *phys_addr,
+ uint64_t *offset, uint64_t mask)
+{
+ uint64_t new_addr;
+
+ *offset = 0;
+
+ new_addr = *phys_addr & ~mask;
+
+ if (new_addr != *phys_addr) {
+ /* Move to next aligned block */
+ new_addr += mask + 1;
+ /* Return offset for adjusting virt addr */
+ *offset = new_addr - *phys_addr;
+ /* Return new physical address */
+ *phys_addr = new_addr;
+ }
+}
+
+/**
+ * et131x_rx_dma_memory_alloc
+ * @adapter: pointer to our private adapter structure
+ *
+ * Returns 0 on success and errno on failure (as defined in errno.h)
+ *
+ * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
+ * and the Packet Status Ring.
+ */
+int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
+{
+ u32 i, j;
+ u32 bufsize;
+ u32 pktstat_ringsize, fbr_chunksize;
+ struct rx_ring *rx_ring;
+
+ /* Setup some convenience pointers */
+ rx_ring = &adapter->rx_ring;
+
+ /* Alloc memory for the lookup table */
+#ifdef USE_FBR0
+ rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+#endif
+ rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+
+ /* The first thing we will do is configure the sizes of the buffer
+ * rings. These will change based on jumbo packet support. Larger
+ * jumbo packets increases the size of each entry in FBR0, and the
+ * number of entries in FBR0, while at the same time decreasing the
+ * number of entries in FBR1.
+ *
+ * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
+ * entries are huge in order to accommodate a "jumbo" frame, then it
+ * will have less entries. Conversely, FBR1 will now be relied upon
+ * to carry more "normal" frames, thus it's entry size also increases
+ * and the number of entries goes up too (since it now carries
+ * "small" + "regular" packets.
+ *
+ * In this scheme, we try to maintain 512 entries between the two
+ * rings. Also, FBR1 remains a constant size - when it's size doubles
+ * the number of entries halves. FBR0 increases in size, however.
+ */
+
+ if (adapter->registry_jumbo_packet < 2048) {
+#ifdef USE_FBR0
+ rx_ring->fbr[1]->buffsize = 256;
+ rx_ring->fbr[1]->num_entries = 512;
+#endif
+ rx_ring->fbr[0]->buffsize = 2048;
+ rx_ring->fbr[0]->num_entries = 512;
+ } else if (adapter->registry_jumbo_packet < 4096) {
+#ifdef USE_FBR0
+ rx_ring->fbr[1]->buffsize = 512;
+ rx_ring->fbr[1]->num_entries = 1024;
+#endif
+ rx_ring->fbr[0]->buffsize = 4096;
+ rx_ring->fbr[0]->num_entries = 512;
+ } else {
+#ifdef USE_FBR0
+ rx_ring->fbr[1]->buffsize = 1024;
+ rx_ring->fbr[1]->num_entries = 768;
+#endif
+ rx_ring->fbr[0]->buffsize = 16384;
+ rx_ring->fbr[0]->num_entries = 128;
+ }
+
+#ifdef USE_FBR0
+ adapter->rx_ring.psr_num_entries =
+ adapter->rx_ring.fbr[1]->num_entries +
+ adapter->rx_ring.fbr[0]->num_entries;
+#else
+ adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
+#endif
+
+ /* Allocate an area of memory for Free Buffer Ring 1 */
+ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
+ 0xfff;
+ rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
+ bufsize,
+ &rx_ring->fbr[0]->ring_physaddr,
+ GFP_KERNEL);
+ if (!rx_ring->fbr[0]->ring_virtaddr) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot alloc memory for Free Buffer Ring 1\n");
+ return -ENOMEM;
+ }
+
+ /* Save physical address
+ *
+ * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
+ * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
+ * are ever returned, make sure the high part is retrieved here
+ * before storing the adjusted address.
+ */
+ rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
+
+ /* Align Free Buffer Ring 1 on a 4K boundary */
+ et131x_align_allocated_memory(adapter,
+ &rx_ring->fbr[0]->real_physaddr,
+ &rx_ring->fbr[0]->offset, 0x0FFF);
+
+ rx_ring->fbr[0]->ring_virtaddr =
+ (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
+ rx_ring->fbr[0]->offset);
+
+#ifdef USE_FBR0
+ /* Allocate an area of memory for Free Buffer Ring 0 */
+ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
+ 0xfff;
+ rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
+ bufsize,
+ &rx_ring->fbr[1]->ring_physaddr,
+ GFP_KERNEL);
+ if (!rx_ring->fbr[1]->ring_virtaddr) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot alloc memory for Free Buffer Ring 0\n");
+ return -ENOMEM;
+ }
+
+ /* Save physical address
+ *
+ * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
+ * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
+ * are ever returned, make sure the high part is retrieved here before
+ * storing the adjusted address.
+ */
+ rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
+
+ /* Align Free Buffer Ring 0 on a 4K boundary */
+ et131x_align_allocated_memory(adapter,
+ &rx_ring->fbr[1]->real_physaddr,
+ &rx_ring->fbr[1]->offset, 0x0FFF);
+
+ rx_ring->fbr[1]->ring_virtaddr =
+ (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
+ rx_ring->fbr[1]->offset);
+#endif
+ for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
+ u64 fbr1_offset;
+ u64 fbr1_tmp_physaddr;
+ u32 fbr1_align;
+
+ /* This code allocates an area of memory big enough for N
+ * free buffers + (buffer_size - 1) so that the buffers can
+ * be aligned on 4k boundaries. If each buffer were aligned
+ * to a buffer_size boundary, the effect would be to double
+ * the size of FBR0. By allocating N buffers at once, we
+ * reduce this overhead.
+ */
+ if (rx_ring->fbr[0]->buffsize > 4096)
+ fbr1_align = 4096;
+ else
+ fbr1_align = rx_ring->fbr[0]->buffsize;
+
+ fbr_chunksize =
+ (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
+ rx_ring->fbr[0]->mem_virtaddrs[i] =
+ dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
+ &rx_ring->fbr[0]->mem_physaddrs[i],
+ GFP_KERNEL);
+
+ if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
+ dev_err(&adapter->pdev->dev,
+ "Could not alloc memory\n");
+ return -ENOMEM;
+ }
+
+ /* See NOTE in "Save Physical Address" comment above */
+ fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
+
+ et131x_align_allocated_memory(adapter,
+ &fbr1_tmp_physaddr,
+ &fbr1_offset, (fbr1_align - 1));
+
+ for (j = 0; j < FBR_CHUNKS; j++) {
+ u32 index = (i * FBR_CHUNKS) + j;
+
+ /* Save the Virtual address of this index for quick
+ * access later
+ */
+ rx_ring->fbr[0]->virt[index] =
+ (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
+ (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
+
+ /* now store the physical address in the descriptor
+ * so the device can access it
+ */
+ rx_ring->fbr[0]->bus_high[index] =
+ (u32) (fbr1_tmp_physaddr >> 32);
+ rx_ring->fbr[0]->bus_low[index] =
+ (u32) fbr1_tmp_physaddr;
+
+ fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
+
+ rx_ring->fbr[0]->buffer1[index] =
+ rx_ring->fbr[0]->virt[index];
+ rx_ring->fbr[0]->buffer2[index] =
+ rx_ring->fbr[0]->virt[index] - 4;
+ }
+ }
+
+#ifdef USE_FBR0
+ /* Same for FBR0 (if in use) */
+ for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
+ u64 fbr0_offset;
+ u64 fbr0_tmp_physaddr;
+
+ fbr_chunksize =
+ ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
+ rx_ring->fbr[1]->mem_virtaddrs[i] =
+ dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
+ &rx_ring->fbr[1]->mem_physaddrs[i],
+ GFP_KERNEL);
+
+ if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
+ dev_err(&adapter->pdev->dev,
+ "Could not alloc memory\n");
+ return -ENOMEM;
+ }
+
+ /* See NOTE in "Save Physical Address" comment above */
+ fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
+
+ et131x_align_allocated_memory(adapter,
+ &fbr0_tmp_physaddr,
+ &fbr0_offset,
+ rx_ring->fbr[1]->buffsize - 1);
+
+ for (j = 0; j < FBR_CHUNKS; j++) {
+ u32 index = (i * FBR_CHUNKS) + j;
+
+ rx_ring->fbr[1]->virt[index] =
+ (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
+ (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
+
+ rx_ring->fbr[1]->bus_high[index] =
+ (u32) (fbr0_tmp_physaddr >> 32);
+ rx_ring->fbr[1]->bus_low[index] =
+ (u32) fbr0_tmp_physaddr;
+
+ fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
+
+ rx_ring->fbr[1]->buffer1[index] =
+ rx_ring->fbr[1]->virt[index];
+ rx_ring->fbr[1]->buffer2[index] =
+ rx_ring->fbr[1]->virt[index] - 4;
+ }
+ }
+#endif
+
+ /* Allocate an area of memory for FIFO of Packet Status ring entries */
+ pktstat_ringsize =
+ sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
+
+ rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
+ pktstat_ringsize,
+ &rx_ring->ps_ring_physaddr,
+ GFP_KERNEL);
+
+ if (!rx_ring->ps_ring_virtaddr) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot alloc memory for Packet Status Ring\n");
+ return -ENOMEM;
+ }
+ printk(KERN_INFO "Packet Status Ring %lx\n",
+ (unsigned long) rx_ring->ps_ring_physaddr);
+
+ /*
+ * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
+ * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
+ * are ever returned, make sure the high part is retrieved here before
+ * storing the adjusted address.
+ */
+
+ /* Allocate an area of memory for writeback of status information */
+ rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct rx_status_block),
+ &rx_ring->rx_status_bus,
+ GFP_KERNEL);
+ if (!rx_ring->rx_status_block) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot alloc memory for Status Block\n");
+ return -ENOMEM;
+ }
+ rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
+ printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
+
+ /* Recv
+ * kmem_cache_create initializes a lookaside list. After successful
+ * creation, nonpaged fixed-size blocks can be allocated from and
+ * freed to the lookaside list.
+ * RFDs will be allocated from this pool.
+ */
+ rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
+ sizeof(struct rfd),
+ 0,
+ SLAB_CACHE_DMA |
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
+
+ /* The RFDs are going to be put on lists later on, so initialize the
+ * lists now.
+ */
+ INIT_LIST_HEAD(&rx_ring->recv_list);
+ return 0;
+}
+
+/**
+ * et131x_rx_dma_memory_free - Free all memory allocated within this module.
+ * @adapter: pointer to our private adapter structure
+ */
+void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
+{
+ u32 index;
+ u32 bufsize;
+ u32 pktstat_ringsize;
+ struct rfd *rfd;
+ struct rx_ring *rx_ring;
+
+ /* Setup some convenience pointers */
+ rx_ring = &adapter->rx_ring;
+
+ /* Free RFDs and associated packet descriptors */
+ WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
+
+ while (!list_empty(&rx_ring->recv_list)) {
+ rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
+ struct rfd, list_node);
+
+ list_del(&rfd->list_node);
+ rfd->skb = NULL;
+ kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
+ }
+
+ /* Free Free Buffer Ring 1 */
+ if (rx_ring->fbr[0]->ring_virtaddr) {
+ /* First the packet memory */
+ for (index = 0; index <
+ (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
+ if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
+ u32 fbr1_align;
+
+ if (rx_ring->fbr[0]->buffsize > 4096)
+ fbr1_align = 4096;
+ else
+ fbr1_align = rx_ring->fbr[0]->buffsize;
+
+ bufsize =
+ (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
+ fbr1_align - 1;
+
+ dma_free_coherent(&adapter->pdev->dev,
+ bufsize,
+ rx_ring->fbr[0]->mem_virtaddrs[index],
+ rx_ring->fbr[0]->mem_physaddrs[index]);
+
+ rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
+ }
+ }
+
+ /* Now the FIFO itself */
+ rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
+ rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
+
+ bufsize =
+ (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
+ 0xfff;
+
+ dma_free_coherent(&adapter->pdev->dev, bufsize,
+ rx_ring->fbr[0]->ring_virtaddr,
+ rx_ring->fbr[0]->ring_physaddr);
+
+ rx_ring->fbr[0]->ring_virtaddr = NULL;
+ }
+
+#ifdef USE_FBR0
+ /* Now the same for Free Buffer Ring 0 */
+ if (rx_ring->fbr[1]->ring_virtaddr) {
+ /* First the packet memory */
+ for (index = 0; index <
+ (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
+ if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
+ bufsize =
+ (rx_ring->fbr[1]->buffsize *
+ (FBR_CHUNKS + 1)) - 1;
+
+ dma_free_coherent(&adapter->pdev->dev,
+ bufsize,
+ rx_ring->fbr[1]->mem_virtaddrs[index],
+ rx_ring->fbr[1]->mem_physaddrs[index]);
+
+ rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
+ }
+ }
+
+ /* Now the FIFO itself */
+ rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
+ rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
+
+ bufsize =
+ (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
+ 0xfff;
+
+ dma_free_coherent(&adapter->pdev->dev,
+ bufsize,
+ rx_ring->fbr[1]->ring_virtaddr,
+ rx_ring->fbr[1]->ring_physaddr);
+
+ rx_ring->fbr[1]->ring_virtaddr = NULL;
+ }
+#endif
+
+ /* Free Packet Status Ring */
+ if (rx_ring->ps_ring_virtaddr) {
+ pktstat_ringsize =
+ sizeof(struct pkt_stat_desc) *
+ adapter->rx_ring.psr_num_entries;
+
+ dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
+ rx_ring->ps_ring_virtaddr,
+ rx_ring->ps_ring_physaddr);
+
+ rx_ring->ps_ring_virtaddr = NULL;
+ }
+
+ /* Free area of memory for the writeback of status information */
+ if (rx_ring->rx_status_block) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct rx_status_block),
+ rx_ring->rx_status_block, rx_ring->rx_status_bus);
+ rx_ring->rx_status_block = NULL;
+ }
+
+ /* Destroy the lookaside (RFD) pool */
+ if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
+ kmem_cache_destroy(rx_ring->recv_lookaside);
+ adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
+ }
+
+ /* Free the FBR Lookup Table */
+#ifdef USE_FBR0
+ kfree(rx_ring->fbr[1]);
+#endif
+
+ kfree(rx_ring->fbr[0]);
+
+ /* Reset Counters */
+ rx_ring->num_ready_recv = 0;
+}
+
+/**
+ * et131x_init_recv - Initialize receive data structures.
+ * @adapter: pointer to our private adapter structure
+ *
+ * Returns 0 on success and errno on failure (as defined in errno.h)
+ */
+int et131x_init_recv(struct et131x_adapter *adapter)
+{
+ int status = -ENOMEM;
+ struct rfd *rfd = NULL;
+ u32 rfdct;
+ u32 numrfd = 0;
+ struct rx_ring *rx_ring;
+
+ /* Setup some convenience pointers */
+ rx_ring = &adapter->rx_ring;
+
+ /* Setup each RFD */
+ for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
+ rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
+ GFP_ATOMIC | GFP_DMA);
+
+ if (!rfd) {
+ dev_err(&adapter->pdev->dev,
+ "Couldn't alloc RFD out of kmem_cache\n");
+ status = -ENOMEM;
+ continue;
+ }
+
+ rfd->skb = NULL;
+
+ /* Add this RFD to the recv_list */
+ list_add_tail(&rfd->list_node, &rx_ring->recv_list);
+
+ /* Increment both the available RFD's, and the total RFD's. */
+ rx_ring->num_ready_recv++;
+ numrfd++;
+ }
+
+ if (numrfd > NIC_MIN_NUM_RFD)
+ status = 0;
+
+ rx_ring->num_rfd = numrfd;
+
+ if (status != 0) {
+ kmem_cache_free(rx_ring->recv_lookaside, rfd);
+ dev_err(&adapter->pdev->dev,
+ "Allocation problems in et131x_init_recv\n");
+ }
+ return status;
+}
+
+/**
+ * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
+ * @adapter: pointer to our adapter structure
+ */
+void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
+{
+ struct phy_device *phydev = adapter->phydev;
+
+ if (!phydev)
+ return;
+
+ /* For version B silicon, we do not use the RxDMA timer for 10 and 100
+ * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
+ */
+ if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
+ writel(0, &adapter->regs->rxdma.max_pkt_time);
+ writel(1, &adapter->regs->rxdma.num_pkt_done);
+ }
+}
+
+/**
+ * NICReturnRFD - Recycle a RFD and put it back onto the receive list
+ * @adapter: pointer to our adapter
+ * @rfd: pointer to the RFD
+ */
+static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
+{
+ struct rx_ring *rx_local = &adapter->rx_ring;
+ struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
+ u16 buff_index = rfd->bufferindex;
+ u8 ring_index = rfd->ringindex;
+ unsigned long flags;
+
+ /* We don't use any of the OOB data besides status. Otherwise, we
+ * need to clean up OOB data
+ */
+ if (
+#ifdef USE_FBR0
+ (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
+#endif
+ (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
+ spin_lock_irqsave(&adapter->fbr_lock, flags);
+
+ if (ring_index == 1) {
+ struct fbr_desc *next = (struct fbr_desc *)
+ (rx_local->fbr[0]->ring_virtaddr) +
+ INDEX10(rx_local->fbr[0]->local_full);
+
+ /* Handle the Free Buffer Ring advancement here. Write
+ * the PA / Buffer Index for the returned buffer into
+ * the oldest (next to be freed)FBR entry
+ */
+ next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
+ next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
+ next->word2 = buff_index;
+
+ writel(bump_free_buff_ring(
+ &rx_local->fbr[0]->local_full,
+ rx_local->fbr[0]->num_entries - 1),
+ &rx_dma->fbr1_full_offset);
+ }
+#ifdef USE_FBR0
+ else {
+ struct fbr_desc *next = (struct fbr_desc *)
+ rx_local->fbr[1]->ring_virtaddr +
+ INDEX10(rx_local->fbr[1]->local_full);
+
+ /* Handle the Free Buffer Ring advancement here. Write
+ * the PA / Buffer Index for the returned buffer into
+ * the oldest (next to be freed) FBR entry
+ */
+ next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
+ next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
+ next->word2 = buff_index;
+
+ writel(bump_free_buff_ring(
+ &rx_local->fbr[1]->local_full,
+ rx_local->fbr[1]->num_entries - 1),
+ &rx_dma->fbr0_full_offset);
+ }
+#endif
+ spin_unlock_irqrestore(&adapter->fbr_lock, flags);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s illegal Buffer Index returned\n", __func__);
+ }
+
+ /* The processing on this RFD is done, so put it back on the tail of
+ * our list
+ */
+ spin_lock_irqsave(&adapter->rcv_lock, flags);
+ list_add_tail(&rfd->list_node, &rx_local->recv_list);
+ rx_local->num_ready_recv++;
+ spin_unlock_irqrestore(&adapter->rcv_lock, flags);
+
+ WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
+}
+
+static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
+{
+ struct rx_ring *rx_local = &adapter->rx_ring;
+ struct rx_status_block *status;
+ struct pkt_stat_desc *psr;
+ struct rfd *rfd;
+ u32 i;
+ u8 *buf;
+ unsigned long flags;
+ struct list_head *element;
+ u8 ring_index;
+ u16 buff_index;
+ u32 len;
+ u32 word0;
+ u32 word1;
+
+ /* RX Status block is written by the DMA engine prior to every
+ * interrupt. It contains the next to be used entry in the Packet
+ * Status Ring, and also the two Free Buffer rings.
+ */
+ status = rx_local->rx_status_block;
+ word1 = status->word1 >> 16; /* Get the useful bits */
+
+ /* Check the PSR and wrap bits do not match */
+ if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
+ /* Looks like this ring is not updated yet */
+ return NULL;
+
+ /* The packet status ring indicates that data is available. */
+ psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
+ (rx_local->local_psr_full & 0xFFF);
+
+ /* Grab any information that is required once the PSR is
+ * advanced, since we can no longer rely on the memory being
+ * accurate
+ */
+ len = psr->word1 & 0xFFFF;
+ ring_index = (psr->word1 >> 26) & 0x03;
+ buff_index = (psr->word1 >> 16) & 0x3FF;
+ word0 = psr->word0;
+
+ /* Indicate that we have used this PSR entry. */
+ /* FIXME wrap 12 */
+ add_12bit(&rx_local->local_psr_full, 1);
+ if (
+ (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
+ /* Clear psr full and toggle the wrap bit */
+ rx_local->local_psr_full &= ~0xFFF;
+ rx_local->local_psr_full ^= 0x1000;
+ }
+
+ writel(rx_local->local_psr_full,
+ &adapter->regs->rxdma.psr_full_offset);
+
+#ifndef USE_FBR0
+ if (ring_index != 1)
+ return NULL;
+#endif
+
+#ifdef USE_FBR0
+ if (ring_index > 1 ||
+ (ring_index == 0 &&
+ buff_index > rx_local->fbr[1]->num_entries - 1) ||
+ (ring_index == 1 &&
+ buff_index > rx_local->fbr[0]->num_entries - 1))
+#else
+ if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
+#endif
+ {
+ /* Illegal buffer or ring index cannot be used by S/W*/
+ dev_err(&adapter->pdev->dev,
+ "NICRxPkts PSR Entry %d indicates "
+ "length of %d and/or bad bi(%d)\n",
+ rx_local->local_psr_full & 0xFFF,
+ len, buff_index);
+ return NULL;
+ }
+
+ /* Get and fill the RFD. */
+ spin_lock_irqsave(&adapter->rcv_lock, flags);
+
+ rfd = NULL;
+ element = rx_local->recv_list.next;
+ rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
+
+ if (rfd == NULL) {
+ spin_unlock_irqrestore(&adapter->rcv_lock, flags);
+ return NULL;
+ }
+
+ list_del(&rfd->list_node);
+ rx_local->num_ready_recv--;
+
+ spin_unlock_irqrestore(&adapter->rcv_lock, flags);
+
+ rfd->bufferindex = buff_index;
+ rfd->ringindex = ring_index;
+
+ /* In V1 silicon, there is a bug which screws up filtering of
+ * runt packets. Therefore runt packet filtering is disabled
+ * in the MAC and the packets are dropped here. They are
+ * also counted here.
+ */
+ if (len < (NIC_MIN_PACKET_SIZE + 4)) {
+ adapter->stats.rx_other_errs++;
+ len = 0;
+ }
+
+ if (len) {
+ /* Determine if this is a multicast packet coming in */
+ if ((word0 & ALCATEL_MULTICAST_PKT) &&
+ !(word0 & ALCATEL_BROADCAST_PKT)) {
+ /* Promiscuous mode and Multicast mode are
+ * not mutually exclusive as was first
+ * thought. I guess Promiscuous is just
+ * considered a super-set of the other
+ * filters. Generally filter is 0x2b when in
+ * promiscuous mode.
+ */
+ if ((adapter->packet_filter &
+ ET131X_PACKET_TYPE_MULTICAST)
+ && !(adapter->packet_filter &
+ ET131X_PACKET_TYPE_PROMISCUOUS)
+ && !(adapter->packet_filter &
+ ET131X_PACKET_TYPE_ALL_MULTICAST)) {
+ /*
+ * Note - ring_index for fbr[] array is reversed
+ * 1 for FBR0 etc
+ */
+ buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
+ virt[buff_index];
+
+ /* Loop through our list to see if the
+ * destination address of this packet
+ * matches one in our list.
+ */
+ for (i = 0; i < adapter->multicast_addr_count;
+ i++) {
+ if (buf[0] ==
+ adapter->multicast_list[i][0]
+ && buf[1] ==
+ adapter->multicast_list[i][1]
+ && buf[2] ==
+ adapter->multicast_list[i][2]
+ && buf[3] ==
+ adapter->multicast_list[i][3]
+ && buf[4] ==
+ adapter->multicast_list[i][4]
+ && buf[5] ==
+ adapter->multicast_list[i][5]) {
+ break;
+ }
+ }
+
+ /* If our index is equal to the number
+ * of Multicast address we have, then
+ * this means we did not find this
+ * packet's matching address in our
+ * list. Set the len to zero,
+ * so we free our RFD when we return
+ * from this function.
+ */
+ if (i == adapter->multicast_addr_count)
+ len = 0;
+ }
+
+ if (len > 0)
+ adapter->stats.multicast_pkts_rcvd++;
+ } else if (word0 & ALCATEL_BROADCAST_PKT)
+ adapter->stats.broadcast_pkts_rcvd++;
+ else
+ /* Not sure what this counter measures in
+ * promiscuous mode. Perhaps we should check
+ * the MAC address to see if it is directed
+ * to us in promiscuous mode.
+ */
+ adapter->stats.unicast_pkts_rcvd++;
+ }
+
+ if (len > 0) {
+ struct sk_buff *skb = NULL;
+
+ /*rfd->len = len - 4; */
+ rfd->len = len;
+
+ skb = dev_alloc_skb(rfd->len + 2);
+ if (!skb) {
+ dev_err(&adapter->pdev->dev,
+ "Couldn't alloc an SKB for Rx\n");
+ return NULL;
+ }
+
+ adapter->net_stats.rx_bytes += rfd->len;
+
+ /*
+ * Note - ring_index for fbr[] array is reversed,
+ * 1 for FBR0 etc
+ */
+ memcpy(skb_put(skb, rfd->len),
+ rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
+ rfd->len);
+
+ skb->dev = adapter->netdev;
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(skb);
+ } else {
+ rfd->len = 0;
+ }
+
+ nic_return_rfd(adapter, rfd);
+ return rfd;
+}
+
+/**
+ * et131x_handle_recv_interrupt - Interrupt handler for receive processing
+ * @adapter: pointer to our adapter
+ *
+ * Assumption, Rcv spinlock has been acquired.
+ */
+void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
+{
+ struct rfd *rfd = NULL;
+ u32 count = 0;
+ bool done = true;
+
+ /* Process up to available RFD's */
+ while (count < NUM_PACKETS_HANDLED) {
+ if (list_empty(&adapter->rx_ring.recv_list)) {
+ WARN_ON(adapter->rx_ring.num_ready_recv != 0);
+ done = false;
+ break;
+ }
+
+ rfd = nic_rx_pkts(adapter);
+
+ if (rfd == NULL)
+ break;
+
+ /* Do not receive any packets until a filter has been set.
+ * Do not receive any packets until we have link.
+ * If length is zero, return the RFD in order to advance the
+ * Free buffer ring.
+ */
+ if (!adapter->packet_filter ||
+ !netif_carrier_ok(adapter->netdev) ||
+ rfd->len == 0)
+ continue;
+
+ /* Increment the number of packets we received */
+ adapter->net_stats.rx_packets++;
+
+ /* Set the status on the packet, either resources or success */
+ if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
+ dev_warn(&adapter->pdev->dev,
+ "RFD's are running out\n");
+ }
+ count++;
+ }
+
+ if (count == NUM_PACKETS_HANDLED || !done) {
+ adapter->rx_ring.unfinished_receives = true;
+ writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
+ &adapter->regs->global.watchdog_timer);
+ } else
+ /* Watchdog timer will disable itself if appropriate. */
+ adapter->rx_ring.unfinished_receives = false;
+}
+
+/* TX functions */
+
+/**
+ * et131x_tx_dma_memory_alloc
+ * @adapter: pointer to our private adapter structure
+ *
+ * Returns 0 on success and errno on failure (as defined in errno.h).
+ *
+ * Allocates memory that will be visible both to the device and to the CPU.
+ * The OS will pass us packets, pointers to which we will insert in the Tx
+ * Descriptor queue. The device will read this queue to find the packets in
+ * memory. The device will update the "status" in memory each time it xmits a
+ * packet.
+ */
+int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
+{
+ int desc_size = 0;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
+
+ /* Allocate memory for the TCB's (Transmit Control Block) */
+ adapter->tx_ring.tcb_ring =
+ kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
+ if (!adapter->tx_ring.tcb_ring) {
+ dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate enough memory for the Tx descriptor ring, and allocate
+ * some extra so that the ring can be aligned on a 4k boundary.
+ */
+ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
+ tx_ring->tx_desc_ring =
+ (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
+ desc_size,
+ &tx_ring->tx_desc_ring_pa,
+ GFP_KERNEL);
+ if (!adapter->tx_ring.tx_desc_ring) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot alloc memory for Tx Ring\n");
+ return -ENOMEM;
+ }
+
+ /* Save physical address
+ *
+ * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
+ * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
+ * are ever returned, make sure the high part is retrieved here before
+ * storing the adjusted address.
+ */
+ /* Allocate memory for the Tx status block */
+ tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(u32),
+ &tx_ring->tx_status_pa,
+ GFP_KERNEL);
+ if (!adapter->tx_ring.tx_status_pa) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot alloc memory for Tx status block\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * et131x_tx_dma_memory_free - Free all memory allocated within this module
+ * @adapter: pointer to our private adapter structure
+ *
+ * Returns 0 on success and errno on failure (as defined in errno.h).
+ */
+void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
+{
+ int desc_size = 0;
+
+ if (adapter->tx_ring.tx_desc_ring) {
+ /* Free memory relating to Tx rings here */
+ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
+ + 4096 - 1;
+ dma_free_coherent(&adapter->pdev->dev,
+ desc_size,
+ adapter->tx_ring.tx_desc_ring,
+ adapter->tx_ring.tx_desc_ring_pa);
+ adapter->tx_ring.tx_desc_ring = NULL;
+ }
+
+ /* Free memory for the Tx status block */
+ if (adapter->tx_ring.tx_status) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(u32),
+ adapter->tx_ring.tx_status,
+ adapter->tx_ring.tx_status_pa);
+
+ adapter->tx_ring.tx_status = NULL;
+ }
+ /* Free the memory for the tcb structures */
+ kfree(adapter->tx_ring.tcb_ring);
+}
+
+/**
+ * nic_send_packet - NIC specific send handler for version B silicon.
+ * @adapter: pointer to our adapter
+ * @tcb: pointer to struct tcb
+ *
+ * Returns 0 or errno.
+ */
+static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
+{
+ u32 i;
+ struct tx_desc desc[24]; /* 24 x 16 byte */
+ u32 frag = 0;
+ u32 thiscopy, remainder;
+ struct sk_buff *skb = tcb->skb;
+ u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
+ struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+ unsigned long flags;
+ struct phy_device *phydev = adapter->phydev;
+
+ /* Part of the optimizations of this send routine restrict us to
+ * sending 24 fragments at a pass. In practice we should never see
+ * more than 5 fragments.
+ *
+ * NOTE: The older version of this function (below) can handle any
+ * number of fragments. If needed, we can call this function,
+ * although it is less efficient.
+ */
+ if (nr_frags > 23)
+ return -EIO;
+
+ memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
+
+ for (i = 0; i < nr_frags; i++) {
+ /* If there is something in this element, lets get a
+ * descriptor from the ring and get the necessary data
+ */
+ if (i == 0) {
+ /* If the fragments are smaller than a standard MTU,
+ * then map them to a single descriptor in the Tx
+ * Desc ring. However, if they're larger, as is
+ * possible with support for jumbo packets, then
+ * split them each across 2 descriptors.
+ *
+ * This will work until we determine why the hardware
+ * doesn't seem to like large fragments.
+ */
+ if ((skb->len - skb->data_len) <= 1514) {
+ desc[frag].addr_hi = 0;
+ /* Low 16bits are length, high is vlan and
+ unused currently so zero */
+ desc[frag].len_vlan =
+ skb->len - skb->data_len;
+
+ /* NOTE: Here, the dma_addr_t returned from
+ * dma_map_single() is implicitly cast as a
+ * u32. Although dma_addr_t can be
+ * 64-bit, the address returned by
+ * dma_map_single() is always 32-bit
+ * addressable (as defined by the pci/dma
+ * subsystem)
+ */
+ desc[frag++].addr_lo =
+ dma_map_single(&adapter->pdev->dev,
+ skb->data,
+ skb->len -
+ skb->data_len,
+ DMA_TO_DEVICE);
+ } else {
+ desc[frag].addr_hi = 0;
+ desc[frag].len_vlan =
+ (skb->len - skb->data_len) / 2;
+
+ /* NOTE: Here, the dma_addr_t returned from
+ * dma_map_single() is implicitly cast as a
+ * u32. Although dma_addr_t can be
+ * 64-bit, the address returned by
+ * dma_map_single() is always 32-bit
+ * addressable (as defined by the pci/dma
+ * subsystem)
+ */
+ desc[frag++].addr_lo =
+ dma_map_single(&adapter->pdev->dev,
+ skb->data,
+ ((skb->len -
+ skb->data_len) / 2),
+ DMA_TO_DEVICE);
+ desc[frag].addr_hi = 0;
+
+ desc[frag].len_vlan =
+ (skb->len - skb->data_len) / 2;
+
+ /* NOTE: Here, the dma_addr_t returned from
+ * dma_map_single() is implicitly cast as a
+ * u32. Although dma_addr_t can be
+ * 64-bit, the address returned by
+ * dma_map_single() is always 32-bit
+ * addressable (as defined by the pci/dma
+ * subsystem)
+ */
+ desc[frag++].addr_lo =
+ dma_map_single(&adapter->pdev->dev,
+ skb->data +
+ ((skb->len -
+ skb->data_len) / 2),
+ ((skb->len -
+ skb->data_len) / 2),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ desc[frag].addr_hi = 0;
+ desc[frag].len_vlan =
+ frags[i - 1].size;
+
+ /* NOTE: Here, the dma_addr_t returned from
+ * dma_map_page() is implicitly cast as a u32.
+ * Although dma_addr_t can be 64-bit, the address
+ * returned by dma_map_page() is always 32-bit
+ * addressable (as defined by the pci/dma subsystem)
+ */
+ desc[frag++].addr_lo = skb_frag_dma_map(
+ &adapter->pdev->dev,
+ &frags[i - 1],
+ 0,
+ frags[i - 1].size,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (phydev && phydev->speed == SPEED_1000) {
+ if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
+ /* Last element & Interrupt flag */
+ desc[frag - 1].flags = 0x5;
+ adapter->tx_ring.since_irq = 0;
+ } else { /* Last element */
+ desc[frag - 1].flags = 0x1;
+ }
+ } else
+ desc[frag - 1].flags = 0x5;
+
+ desc[0].flags |= 2; /* First element flag */
+
+ tcb->index_start = adapter->tx_ring.send_idx;
+ tcb->stale = 0;
+
+ spin_lock_irqsave(&adapter->send_hw_lock, flags);
+
+ thiscopy = NUM_DESC_PER_RING_TX -
+ INDEX10(adapter->tx_ring.send_idx);
+
+ if (thiscopy >= frag) {
+ remainder = 0;
+ thiscopy = frag;
+ } else {
+ remainder = frag - thiscopy;
+ }
+
+ memcpy(adapter->tx_ring.tx_desc_ring +
+ INDEX10(adapter->tx_ring.send_idx), desc,
+ sizeof(struct tx_desc) * thiscopy);
+
+ add_10bit(&adapter->tx_ring.send_idx, thiscopy);
+
+ if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
+ INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
+ adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
+ adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
+ }
+
+ if (remainder) {
+ memcpy(adapter->tx_ring.tx_desc_ring,
+ desc + thiscopy,
+ sizeof(struct tx_desc) * remainder);
+
+ add_10bit(&adapter->tx_ring.send_idx, remainder);
+ }
+
+ if (INDEX10(adapter->tx_ring.send_idx) == 0) {
+ if (adapter->tx_ring.send_idx)
+ tcb->index = NUM_DESC_PER_RING_TX - 1;
+ else
+ tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
+ } else
+ tcb->index = adapter->tx_ring.send_idx - 1;
+
+ spin_lock(&adapter->tcb_send_qlock);
+
+ if (adapter->tx_ring.send_tail)
+ adapter->tx_ring.send_tail->next = tcb;
+ else
+ adapter->tx_ring.send_head = tcb;
+
+ adapter->tx_ring.send_tail = tcb;
+
+ WARN_ON(tcb->next != NULL);
+
+ adapter->tx_ring.used++;
+
+ spin_unlock(&adapter->tcb_send_qlock);
+
+ /* Write the new write pointer back to the device. */
+ writel(adapter->tx_ring.send_idx,
+ &adapter->regs->txdma.service_request);
+
+ /* For Gig only, we use Tx Interrupt coalescing. Enable the software
+ * timer to wake us up if this packet isn't followed by N more.
+ */
+ if (phydev && phydev->speed == SPEED_1000) {
+ writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
+ &adapter->regs->global.watchdog_timer);
+ }
+ spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
+
+ return 0;
+}
+
+/**
+ * send_packet - Do the work to send a packet
+ * @skb: the packet(s) to send
+ * @adapter: a pointer to the device's private adapter structure
+ *
+ * Return 0 in almost all cases; non-zero value in extreme hard failure only.
+ *
+ * Assumption: Send spinlock has been acquired
+ */
+static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
+{
+ int status;
+ struct tcb *tcb = NULL;
+ u16 *shbufva;
+ unsigned long flags;
+
+ /* All packets must have at least a MAC address and a protocol type */
+ if (skb->len < ETH_HLEN)
+ return -EIO;
+
+ /* Get a TCB for this packet */
+ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
+
+ tcb = adapter->tx_ring.tcb_qhead;
+
+ if (tcb == NULL) {
+ spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
+ return -ENOMEM;
+ }
+
+ adapter->tx_ring.tcb_qhead = tcb->next;
+
+ if (adapter->tx_ring.tcb_qhead == NULL)
+ adapter->tx_ring.tcb_qtail = NULL;
+
+ spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
+
+ tcb->skb = skb;
+
+ if (skb->data != NULL && skb->len - skb->data_len >= 6) {
+ shbufva = (u16 *) skb->data;
+
+ if ((shbufva[0] == 0xffff) &&
+ (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
+ tcb->flags |= fMP_DEST_BROAD;
+ } else if ((shbufva[0] & 0x3) == 0x0001) {
+ tcb->flags |= fMP_DEST_MULTI;
+ }
+ }
+
+ tcb->next = NULL;
+
+ /* Call the NIC specific send handler. */
+ status = nic_send_packet(adapter, tcb);
+
+ if (status != 0) {
+ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
+
+ if (adapter->tx_ring.tcb_qtail)
+ adapter->tx_ring.tcb_qtail->next = tcb;
+ else
+ /* Apparently ready Q is empty. */
+ adapter->tx_ring.tcb_qhead = tcb;
+
+ adapter->tx_ring.tcb_qtail = tcb;
+ spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
+ return status;
+ }
+ WARN_ON(adapter->tx_ring.used > NUM_TCB);
+ return 0;
+}
+
+/**
+ * et131x_send_packets - This function is called by the OS to send packets
+ * @skb: the packet(s) to send
+ * @netdev:device on which to TX the above packet(s)
+ *
+ * Return 0 in almost all cases; non-zero value in extreme hard failure only
+ */
+int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
+{
+ int status = 0;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* Send these packets
+ *
+ * NOTE: The Linux Tx entry point is only given one packet at a time
+ * to Tx, so the PacketCount and it's array used makes no sense here
+ */
+
+ /* TCB is not available */
+ if (adapter->tx_ring.used >= NUM_TCB) {
+ /* NOTE: If there's an error on send, no need to queue the
+ * packet under Linux; if we just send an error up to the
+ * netif layer, it will resend the skb to us.
+ */
+ status = -ENOMEM;
+ } else {
+ /* We need to see if the link is up; if it's not, make the
+ * netif layer think we're good and drop the packet
+ */
+ if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
+ !netif_carrier_ok(netdev)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+
+ adapter->net_stats.tx_dropped++;
+ } else {
+ status = send_packet(skb, adapter);
+ if (status != 0 && status != -ENOMEM) {
+ /* On any other error, make netif think we're
+ * OK and drop the packet
+ */
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ adapter->net_stats.tx_dropped++;
+ }
+ }
+ }
+ return status;
+}
+
+/**
+ * free_send_packet - Recycle a struct tcb
+ * @adapter: pointer to our adapter
+ * @tcb: pointer to struct tcb
+ *
+ * Complete the packet if necessary
+ * Assumption - Send spinlock has been acquired
+ */
+static inline void free_send_packet(struct et131x_adapter *adapter,
+ struct tcb *tcb)
+{
+ unsigned long flags;
+ struct tx_desc *desc = NULL;
+ struct net_device_stats *stats = &adapter->net_stats;
+
+ if (tcb->flags & fMP_DEST_BROAD)
+ atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
+ else if (tcb->flags & fMP_DEST_MULTI)
+ atomic_inc(&adapter->stats.multicast_pkts_xmtd);
+ else
+ atomic_inc(&adapter->stats.unicast_pkts_xmtd);
+
+ if (tcb->skb) {
+ stats->tx_bytes += tcb->skb->len;
+
+ /* Iterate through the TX descriptors on the ring
+ * corresponding to this packet and umap the fragments
+ * they point to
+ */
+ do {
+ desc = (struct tx_desc *)
+ (adapter->tx_ring.tx_desc_ring +
+ INDEX10(tcb->index_start));
+
+ dma_unmap_single(&adapter->pdev->dev,
+ desc->addr_lo,
+ desc->len_vlan, DMA_TO_DEVICE);
+
+ add_10bit(&tcb->index_start, 1);
+ if (INDEX10(tcb->index_start) >=
+ NUM_DESC_PER_RING_TX) {
+ tcb->index_start &= ~ET_DMA10_MASK;
+ tcb->index_start ^= ET_DMA10_WRAP;
+ }
+ } while (desc != (adapter->tx_ring.tx_desc_ring +
+ INDEX10(tcb->index)));
+
+ dev_kfree_skb_any(tcb->skb);
+ }
+
+ memset(tcb, 0, sizeof(struct tcb));
+
+ /* Add the TCB to the Ready Q */
+ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
+
+ adapter->net_stats.tx_packets++;
+
+ if (adapter->tx_ring.tcb_qtail)
+ adapter->tx_ring.tcb_qtail->next = tcb;
+ else
+ /* Apparently ready Q is empty. */
+ adapter->tx_ring.tcb_qhead = tcb;
+
+ adapter->tx_ring.tcb_qtail = tcb;
+
+ spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
+ WARN_ON(adapter->tx_ring.used < 0);
+}
+
+/**
+ * et131x_free_busy_send_packets - Free and complete the stopped active sends
+ * @adapter: pointer to our adapter
+ *
+ * Assumption - Send spinlock has been acquired
+ */
+void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
+{
+ struct tcb *tcb;
+ unsigned long flags;
+ u32 freed = 0;
+
+ /* Any packets being sent? Check the first TCB on the send list */
+ spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
+
+ tcb = adapter->tx_ring.send_head;
+
+ while (tcb != NULL && freed < NUM_TCB) {
+ struct tcb *next = tcb->next;
+
+ adapter->tx_ring.send_head = next;
+
+ if (next == NULL)
+ adapter->tx_ring.send_tail = NULL;
+
+ adapter->tx_ring.used--;
+
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
+
+ freed++;
+ free_send_packet(adapter, tcb);
+
+ spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
+
+ tcb = adapter->tx_ring.send_head;
+ }
+
+ WARN_ON(freed == NUM_TCB);
+
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
+
+ adapter->tx_ring.used = 0;
+}
+
+/**
+ * et131x_handle_send_interrupt - Interrupt handler for sending processing
+ * @adapter: pointer to our adapter
+ *
+ * Re-claim the send resources, complete sends and get more to send from
+ * the send wait queue.
+ *
+ * Assumption - Send spinlock has been acquired
+ */
+void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
+{
+ unsigned long flags;
+ u32 serviced;
+ struct tcb *tcb;
+ u32 index;
+
+ serviced = readl(&adapter->regs->txdma.new_service_complete);
+ index = INDEX10(serviced);
+
+ /* Has the ring wrapped? Process any descriptors that do not have
+ * the same "wrap" indicator as the current completion indicator
+ */
+ spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
+
+ tcb = adapter->tx_ring.send_head;
+
+ while (tcb &&
+ ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
+ index < INDEX10(tcb->index)) {
+ adapter->tx_ring.used--;
+ adapter->tx_ring.send_head = tcb->next;
+ if (tcb->next == NULL)
+ adapter->tx_ring.send_tail = NULL;
+
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
+ free_send_packet(adapter, tcb);
+ spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
+
+ /* Goto the next packet */
+ tcb = adapter->tx_ring.send_head;
+ }
+ while (tcb &&
+ !((serviced ^ tcb->index) & ET_DMA10_WRAP)
+ && index > (tcb->index & ET_DMA10_MASK)) {
+ adapter->tx_ring.used--;
+ adapter->tx_ring.send_head = tcb->next;
+ if (tcb->next == NULL)
+ adapter->tx_ring.send_tail = NULL;
+
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
+ free_send_packet(adapter, tcb);
+ spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
+
+ /* Goto the next packet */
+ tcb = adapter->tx_ring.send_head;
+ }
+
+ /* Wake up the queue when we hit a low-water mark */
+ if (adapter->tx_ring.used <= NUM_TCB / 3)
+ netif_wake_queue(adapter->netdev);
+
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
+}
+
+/* ETHTOOL functions */
+
+static int et131x_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ return phy_ethtool_gset(adapter->phydev, cmd);
+}
+
+static int et131x_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ return phy_ethtool_sset(adapter->phydev, cmd);
+}
+
+static int et131x_get_regs_len(struct net_device *netdev)
+{
+#define ET131X_REGS_LEN 256
+ return ET131X_REGS_LEN * sizeof(u32);
+}
+
+static void et131x_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *regs_data)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct address_map __iomem *aregs = adapter->regs;
+ u32 *regs_buff = regs_data;
+ u32 num = 0;
+
+ memset(regs_data, 0, et131x_get_regs_len(netdev));
+
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
+
+ /* PHY regs */
+ et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
+ /* Autoneg next page transmit reg */
+ et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
+ /* Link partner next page reg */
+ et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
+ (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
+ (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
+ (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
+ (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
+
+ /* Global regs */
+ regs_buff[num++] = readl(&aregs->global.txq_start_addr);
+ regs_buff[num++] = readl(&aregs->global.txq_end_addr);
+ regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
+ regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
+ regs_buff[num++] = readl(&aregs->global.pm_csr);
+ regs_buff[num++] = adapter->stats.interrupt_status;
+ regs_buff[num++] = readl(&aregs->global.int_mask);
+ regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
+ regs_buff[num++] = readl(&aregs->global.int_status_alias);
+ regs_buff[num++] = readl(&aregs->global.sw_reset);
+ regs_buff[num++] = readl(&aregs->global.slv_timer);
+ regs_buff[num++] = readl(&aregs->global.msi_config);
+ regs_buff[num++] = readl(&aregs->global.loopback);
+ regs_buff[num++] = readl(&aregs->global.watchdog_timer);
+
+ /* TXDMA regs */
+ regs_buff[num++] = readl(&aregs->txdma.csr);
+ regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
+ regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
+ regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
+ regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
+ regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
+ regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
+ regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
+ regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
+ regs_buff[num++] = readl(&aregs->txdma.service_request);
+ regs_buff[num++] = readl(&aregs->txdma.service_complete);
+ regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
+ regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
+ regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
+ regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
+ regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
+ regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
+
+ /* RXDMA regs */
+ regs_buff[num++] = readl(&aregs->rxdma.csr);
+ regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
+ regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
+ regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
+ regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
+ regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
+ regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
+ regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
+ regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
+ regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
+}
+
+#define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
+static void et131x_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
+ strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
+ strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
+}
+
+static struct ethtool_ops et131x_ethtool_ops = {
+ .get_settings = et131x_get_settings,
+ .set_settings = et131x_set_settings,
+ .get_drvinfo = et131x_get_drvinfo,
+ .get_regs_len = et131x_get_regs_len,
+ .get_regs = et131x_get_regs,
+ .get_link = ethtool_op_get_link,
+};
+
+void et131x_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
+}
+
+/* PCI functions */
+
+/**
+ * et131x_hwaddr_init - set up the MAC Address on the ET1310
+ * @adapter: pointer to our private adapter structure
+ */
+void et131x_hwaddr_init(struct et131x_adapter *adapter)
+{
+ /* If have our default mac from init and no mac address from
+ * EEPROM then we need to generate the last octet and set it on the
+ * device
+ */
+ if (adapter->rom_addr[0] == 0x00 &&
+ adapter->rom_addr[1] == 0x00 &&
+ adapter->rom_addr[2] == 0x00 &&
+ adapter->rom_addr[3] == 0x00 &&
+ adapter->rom_addr[4] == 0x00 &&
+ adapter->rom_addr[5] == 0x00) {
+ /*
+ * We need to randomly generate the last octet so we
+ * decrease our chances of setting the mac address to
+ * same as another one of our cards in the system
+ */
+ get_random_bytes(&adapter->addr[5], 1);
+ /*
+ * We have the default value in the register we are
+ * working with so we need to copy the current
+ * address into the permanent address
+ */
+ memcpy(adapter->rom_addr,
+ adapter->addr, ETH_ALEN);
+ } else {
+ /* We do not have an override address, so set the
+ * current address to the permanent address and add
+ * it to the device
+ */
+ memcpy(adapter->addr,
+ adapter->rom_addr, ETH_ALEN);
+ }
+}
+
+/**
+ * et131x_pci_init - initial PCI setup
+ * @adapter: pointer to our private adapter structure
+ * @pdev: our PCI device
+ *
+ * Perform the initial setup of PCI registers and if possible initialise
+ * the MAC address. At this point the I/O registers have yet to be mapped
+ */
+static int et131x_pci_init(struct et131x_adapter *adapter,
+ struct pci_dev *pdev)
+{
+ int i;
+ u8 max_payload;
+ u8 read_size_reg;
+
+ if (et131x_init_eeprom(adapter) < 0)
+ return -EIO;
+
+ /* Let's set up the PORT LOGIC Register. First we need to know what
+ * the max_payload_size is
+ */
+ if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
+ dev_err(&pdev->dev,
+ "Could not read PCI config space for Max Payload Size\n");
+ return -EIO;
+ }
+
+ /* Program the Ack/Nak latency and replay timers */
+ max_payload &= 0x07; /* Only the lower 3 bits are valid */
+
+ if (max_payload < 2) {
+ static const u16 acknak[2] = { 0x76, 0xD0 };
+ static const u16 replay[2] = { 0x1E0, 0x2ED };
+
+ if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
+ acknak[max_payload])) {
+ dev_err(&pdev->dev,
+ "Could not write PCI config space for ACK/NAK\n");
+ return -EIO;
+ }
+ if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
+ replay[max_payload])) {
+ dev_err(&pdev->dev,
+ "Could not write PCI config space for Replay Timer\n");
+ return -EIO;
+ }
+ }
+
+ /* l0s and l1 latency timers. We are using default values.
+ * Representing 001 for L0s and 010 for L1
+ */
+ if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
+ dev_err(&pdev->dev,
+ "Could not write PCI config space for Latency Timers\n");
+ return -EIO;
+ }
+
+ /* Change the max read size to 2k */
+ if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
+ dev_err(&pdev->dev,
+ "Could not read PCI config space for Max read size\n");
+ return -EIO;
+ }
+
+ read_size_reg &= 0x8f;
+ read_size_reg |= 0x40;
+
+ if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
+ dev_err(&pdev->dev,
+ "Could not write PCI config space for Max read size\n");
+ return -EIO;
+ }
+
+ /* Get MAC address from config space if an eeprom exists, otherwise
+ * the MAC address there will not be valid
+ */
+ if (!adapter->has_eeprom) {
+ et131x_hwaddr_init(adapter);
+ return 0;
+ }
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
+ adapter->rom_addr + i)) {
+ dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
+ return -EIO;
+ }
+ }
+ memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
+ return 0;
+}
+
+/**
+ * et131x_error_timer_handler
+ * @data: timer-specific variable; here a pointer to our adapter structure
+ *
+ * The routine called when the error timer expires, to track the number of
+ * recurring errors.
+ */
+void et131x_error_timer_handler(unsigned long data)
+{
+ struct et131x_adapter *adapter = (struct et131x_adapter *) data;
+ struct phy_device *phydev = adapter->phydev;
+
+ if (et1310_in_phy_coma(adapter)) {
+ /* Bring the device immediately out of coma, to
+ * prevent it from sleeping indefinitely, this
+ * mechanism could be improved! */
+ et1310_disable_phy_coma(adapter);
+ adapter->boot_coma = 20;
+ } else {
+ et1310_update_macstat_host_counters(adapter);
+ }
+
+ if (!phydev->link && adapter->boot_coma < 11)
+ adapter->boot_coma++;
+
+ if (adapter->boot_coma == 10) {
+ if (!phydev->link) {
+ if (!et1310_in_phy_coma(adapter)) {
+ /* NOTE - This was originally a 'sync with
+ * interrupt'. How to do that under Linux?
+ */
+ et131x_enable_interrupts(adapter);
+ et1310_enable_phy_coma(adapter);
+ }
+ }
+ }
+
+ /* This is a periodic timer, so reschedule */
+ mod_timer(&adapter->error_timer, jiffies +
+ TX_ERROR_PERIOD * HZ / 1000);
+}
+
+/**
+ * et131x_adapter_memory_alloc
+ * @adapter: pointer to our private adapter structure
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h).
+ *
+ * Allocate all the memory blocks for send, receive and others.
+ */
+int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
+{
+ int status;
+
+ /* Allocate memory for the Tx Ring */
+ status = et131x_tx_dma_memory_alloc(adapter);
+ if (status != 0) {
+ dev_err(&adapter->pdev->dev,
+ "et131x_tx_dma_memory_alloc FAILED\n");
+ return status;
+ }
+ /* Receive buffer memory allocation */
+ status = et131x_rx_dma_memory_alloc(adapter);
+ if (status != 0) {
+ dev_err(&adapter->pdev->dev,
+ "et131x_rx_dma_memory_alloc FAILED\n");
+ et131x_tx_dma_memory_free(adapter);
+ return status;
+ }
+
+ /* Init receive data structures */
+ status = et131x_init_recv(adapter);
+ if (status != 0) {
+ dev_err(&adapter->pdev->dev,
+ "et131x_init_recv FAILED\n");
+ et131x_tx_dma_memory_free(adapter);
+ et131x_rx_dma_memory_free(adapter);
+ }
+ return status;
+}
+
+/**
+ * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
+ * @adapter: pointer to our private adapter structure
+ */
+void et131x_adapter_memory_free(struct et131x_adapter *adapter)
+{
+ /* Free DMA memory */
+ et131x_tx_dma_memory_free(adapter);
+ et131x_rx_dma_memory_free(adapter);
+}
+
+static void et131x_adjust_link(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = adapter->phydev;
+
+ if (netif_carrier_ok(netdev)) {
+ adapter->boot_coma = 20;
+
+ if (phydev && phydev->speed == SPEED_10) {
+ /*
+ * NOTE - Is there a way to query this without
+ * TruePHY?
+ * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
+ * EMI_TRUEPHY_A13O) {
+ */
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
+
+ et1310_config_flow_control(adapter);
+
+ if (phydev && phydev->speed == SPEED_1000 &&
+ adapter->registry_jumbo_packet > 2048) {
+ u16 reg;
+
+ et131x_mii_read(adapter, PHY_CONFIG, &reg);
+ reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
+ reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
+ et131x_mii_write(adapter, PHY_CONFIG, reg);
+ }
+
+ et131x_set_rx_dma_timer(adapter);
+ et1310_config_mac_regs2(adapter);
+ }
+
+ if (phydev && phydev->link != adapter->link) {
+ /*
+ * Check to see if we are in coma mode and if
+ * so, disable it because we will not be able
+ * to read PHY values until we are out.
+ */
+ if (et1310_in_phy_coma(adapter))
+ et1310_disable_phy_coma(adapter);
+
+ if (phydev->link) {
+ adapter->boot_coma = 20;
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "Link down - cable problem ?\n");
+ adapter->boot_coma = 0;
+
+ if (phydev->speed == SPEED_10) {
+ /* NOTE - Is there a way to query this without
+ * TruePHY?
+ * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
+ * EMI_TRUEPHY_A13O)
+ */
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
+
+ /* Free the packets being actively sent & stopped */
+ et131x_free_busy_send_packets(adapter);
+
+ /* Re-initialize the send structures */
+ et131x_init_send(adapter);
+
+ /*
+ * Bring the device back to the state it was during
+ * init prior to autonegotiation being complete. This
+ * way, when we get the auto-neg complete interrupt,
+ * we can complete init by calling config_mac_regs2.
+ */
+ et131x_soft_reset(adapter);
+
+ /* Setup ET1310 as per the documentation */
+ et131x_adapter_setup(adapter);
+
+ /* perform reset of tx/rx */
+ et131x_disable_txrx(netdev);
+ et131x_enable_txrx(netdev);
+ }
+
+ adapter->link = phydev->link;
+
+ phy_print_status(phydev);
+ }
+}
+
+static int et131x_mii_probe(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
+
+ phydev = phy_find_first(adapter->mii_bus);
+ if (!phydev) {
+ dev_err(&adapter->pdev->dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, dev_name(&phydev->dev),
+ &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+
+ phydev->advertising = phydev->supported;
+ adapter->phydev = phydev;
+
+ dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+}
+
+/**
+ * et131x_adapter_init
+ * @adapter: pointer to the private adapter struct
+ * @pdev: pointer to the PCI device
+ *
+ * Initialize the data structures for the et131x_adapter object and link
+ * them together with the platform provided device structures.
+ */
+static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
+ struct pci_dev *pdev)
+{
+ static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
+
+ struct et131x_adapter *adapter;
+
+ /* Allocate private adapter struct and copy in relevant information */
+ adapter = netdev_priv(netdev);
+ adapter->pdev = pci_dev_get(pdev);
+ adapter->netdev = netdev;
+
+ /* Do the same for the netdev struct */
+ netdev->irq = pdev->irq;
+ netdev->base_addr = pci_resource_start(pdev, 0);
+
+ /* Initialize spinlocks here */
+ spin_lock_init(&adapter->lock);
+ spin_lock_init(&adapter->tcb_send_qlock);
+ spin_lock_init(&adapter->tcb_ready_qlock);
+ spin_lock_init(&adapter->send_hw_lock);
+ spin_lock_init(&adapter->rcv_lock);
+ spin_lock_init(&adapter->rcv_pend_lock);
+ spin_lock_init(&adapter->fbr_lock);
+ spin_lock_init(&adapter->phy_lock);
+
+ adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
+
+ /* Set the MAC address to a default */
+ memcpy(adapter->addr, default_mac, ETH_ALEN);
+
+ return adapter;
+}
+
+/**
+ * et131x_pci_remove
+ * @pdev: a pointer to the device's pci_dev structure
+ *
+ * Registered in the pci_driver structure, this function is called when the
+ * PCI subsystem detects that a PCI device which matches the information
+ * contained in the pci_device_id table has been removed.
+ */
+static void __devexit et131x_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ mdiobus_unregister(adapter->mii_bus);
+ kfree(adapter->mii_bus->irq);
+ mdiobus_free(adapter->mii_bus);
+
+ et131x_adapter_memory_free(adapter);
+ iounmap(adapter->regs);
+ pci_dev_put(pdev);
+
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * et131x_up - Bring up a device for use.
+ * @netdev: device to be opened
+ */
+void et131x_up(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ et131x_enable_txrx(netdev);
+ phy_start(adapter->phydev);
+}
+
+/**
+ * et131x_down - Bring down the device
+ * @netdev: device to be broght down
+ */
+void et131x_down(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* Save the timestamp for the TX watchdog, prevent a timeout */
+ netdev->trans_start = jiffies;
+
+ phy_stop(adapter->phydev);
+ et131x_disable_txrx(netdev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int et131x_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ et131x_down(netdev);
+ pci_save_state(pdev);
+ }
+
+ return 0;
+}
+
+static int et131x_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if (netif_running(netdev)) {
+ pci_restore_state(pdev);
+ et131x_up(netdev);
+ netif_device_attach(netdev);
+ }
+
+ return 0;
+}
+
+/* ISR functions */
+
+/**
+ * et131x_isr - The Interrupt Service Routine for the driver.
+ * @irq: the IRQ on which the interrupt was received.
+ * @dev_id: device-specific info (here a pointer to a net_device struct)
+ *
+ * Returns a value indicating if the interrupt was handled.
+ */
+irqreturn_t et131x_isr(int irq, void *dev_id)
+{
+ bool handled = true;
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct et131x_adapter *adapter = NULL;
+ u32 status;
+
+ if (!netif_device_present(netdev)) {
+ handled = false;
+ goto out;
+ }
+
+ adapter = netdev_priv(netdev);
+
+ /* If the adapter is in low power state, then it should not
+ * recognize any interrupt
+ */
+
+ /* Disable Device Interrupts */
+ et131x_disable_interrupts(adapter);
+
+ /* Get a copy of the value in the interrupt status register
+ * so we can process the interrupting section
+ */
+ status = readl(&adapter->regs->global.int_status);
+
+ if (adapter->flowcontrol == FLOW_TXONLY ||
+ adapter->flowcontrol == FLOW_BOTH) {
+ status &= ~INT_MASK_ENABLE;
+ } else {
+ status &= ~INT_MASK_ENABLE_NO_FLOW;
+ }
+
+ /* Make sure this is our interrupt */
+ if (!status) {
+ handled = false;
+ et131x_enable_interrupts(adapter);
+ goto out;
+ }
+
+ /* This is our interrupt, so process accordingly */
+
+ if (status & ET_INTR_WATCHDOG) {
+ struct tcb *tcb = adapter->tx_ring.send_head;
+
+ if (tcb)
+ if (++tcb->stale > 1)
+ status |= ET_INTR_TXDMA_ISR;
+
+ if (adapter->rx_ring.unfinished_receives)
+ status |= ET_INTR_RXDMA_XFR_DONE;
+ else if (tcb == NULL)
+ writel(0, &adapter->regs->global.watchdog_timer);
+
+ status &= ~ET_INTR_WATCHDOG;
+ }
+
+ if (status == 0) {
+ /* This interrupt has in some way been "handled" by
+ * the ISR. Either it was a spurious Rx interrupt, or
+ * it was a Tx interrupt that has been filtered by
+ * the ISR.
+ */
+ et131x_enable_interrupts(adapter);
+ goto out;
+ }
+
+ /* We need to save the interrupt status value for use in our
+ * DPC. We will clear the software copy of that in that
+ * routine.
+ */
+ adapter->stats.interrupt_status = status;
+
+ /* Schedule the ISR handler as a bottom-half task in the
+ * kernel's tq_immediate queue, and mark the queue for
+ * execution
+ */
+ schedule_work(&adapter->task);
+out:
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * et131x_isr_handler - The ISR handler
+ * @p_adapter, a pointer to the device's private adapter structure
+ *
+ * scheduled to run in a deferred context by the ISR. This is where the ISR's
+ * work actually gets done.
+ */
+void et131x_isr_handler(struct work_struct *work)
+{
+ struct et131x_adapter *adapter =
+ container_of(work, struct et131x_adapter, task);
+ u32 status = adapter->stats.interrupt_status;
+ struct address_map __iomem *iomem = adapter->regs;
+
+ /*
+ * These first two are by far the most common. Once handled, we clear
+ * their two bits in the status word. If the word is now zero, we
+ * exit.
+ */
+ /* Handle all the completed Transmit interrupts */
+ if (status & ET_INTR_TXDMA_ISR)
+ et131x_handle_send_interrupt(adapter);
+
+ /* Handle all the completed Receives interrupts */
+ if (status & ET_INTR_RXDMA_XFR_DONE)
+ et131x_handle_recv_interrupt(adapter);
+
+ status &= 0xffffffd7;
+
+ if (status) {
+ /* Handle the TXDMA Error interrupt */
+ if (status & ET_INTR_TXDMA_ERR) {
+ u32 txdma_err;
+
+ /* Following read also clears the register (COR) */
+ txdma_err = readl(&iomem->txdma.tx_dma_error);
+
+ dev_warn(&adapter->pdev->dev,
+ "TXDMA_ERR interrupt, error = %d\n",
+ txdma_err);
+ }
+
+ /* Handle Free Buffer Ring 0 and 1 Low interrupt */
+ if (status &
+ (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
+ /*
+ * This indicates the number of unused buffers in
+ * RXDMA free buffer ring 0 is <= the limit you
+ * programmed. Free buffer resources need to be
+ * returned. Free buffers are consumed as packets
+ * are passed from the network to the host. The host
+ * becomes aware of the packets from the contents of
+ * the packet status ring. This ring is queried when
+ * the packet done interrupt occurs. Packets are then
+ * passed to the OS. When the OS is done with the
+ * packets the resources can be returned to the
+ * ET1310 for re-use. This interrupt is one method of
+ * returning resources.
+ */
+
+ /* If the user has flow control on, then we will
+ * send a pause packet, otherwise just exit
+ */
+ if (adapter->flowcontrol == FLOW_TXONLY ||
+ adapter->flowcontrol == FLOW_BOTH) {
+ u32 pm_csr;
+
+ /* Tell the device to send a pause packet via
+ * the back pressure register (bp req and
+ * bp xon/xoff)
+ */
+ pm_csr = readl(&iomem->global.pm_csr);
+ if (!et1310_in_phy_coma(adapter))
+ writel(3, &iomem->txmac.bp_ctrl);
+ }
+ }
+
+ /* Handle Packet Status Ring Low Interrupt */
+ if (status & ET_INTR_RXDMA_STAT_LOW) {
+
+ /*
+ * Same idea as with the two Free Buffer Rings.
+ * Packets going from the network to the host each
+ * consume a free buffer resource and a packet status
+ * resource. These resoures are passed to the OS.
+ * When the OS is done with the resources, they need
+ * to be returned to the ET1310. This is one method
+ * of returning the resources.
+ */
+ }
+
+ /* Handle RXDMA Error Interrupt */
+ if (status & ET_INTR_RXDMA_ERR) {
+ /*
+ * The rxdma_error interrupt is sent when a time-out
+ * on a request issued by the JAGCore has occurred or
+ * a completion is returned with an un-successful
+ * status. In both cases the request is considered
+ * complete. The JAGCore will automatically re-try the
+ * request in question. Normally information on events
+ * like these are sent to the host using the "Advanced
+ * Error Reporting" capability. This interrupt is
+ * another way of getting similar information. The
+ * only thing required is to clear the interrupt by
+ * reading the ISR in the global resources. The
+ * JAGCore will do a re-try on the request. Normally
+ * you should never see this interrupt. If you start
+ * to see this interrupt occurring frequently then
+ * something bad has occurred. A reset might be the
+ * thing to do.
+ */
+ /* TRAP();*/
+
+ dev_warn(&adapter->pdev->dev,
+ "RxDMA_ERR interrupt, error %x\n",
+ readl(&iomem->txmac.tx_test));
+ }
+
+ /* Handle the Wake on LAN Event */
+ if (status & ET_INTR_WOL) {
+ /*
+ * This is a secondary interrupt for wake on LAN.
+ * The driver should never see this, if it does,
+ * something serious is wrong. We will TRAP the
+ * message when we are in DBG mode, otherwise we
+ * will ignore it.
+ */
+ dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
+ }
+
+ /* Let's move on to the TxMac */
+ if (status & ET_INTR_TXMAC) {
+ u32 err = readl(&iomem->txmac.err);
+
+ /*
+ * When any of the errors occur and TXMAC generates
+ * an interrupt to report these errors, it usually
+ * means that TXMAC has detected an error in the data
+ * stream retrieved from the on-chip Tx Q. All of
+ * these errors are catastrophic and TXMAC won't be
+ * able to recover data when these errors occur. In
+ * a nutshell, the whole Tx path will have to be reset
+ * and re-configured afterwards.
+ */
+ dev_warn(&adapter->pdev->dev,
+ "TXMAC interrupt, error 0x%08x\n",
+ err);
+
+ /* If we are debugging, we want to see this error,
+ * otherwise we just want the device to be reset and
+ * continue
+ */
+ }
+
+ /* Handle RXMAC Interrupt */
+ if (status & ET_INTR_RXMAC) {
+ /*
+ * These interrupts are catastrophic to the device,
+ * what we need to do is disable the interrupts and
+ * set the flag to cause us to reset so we can solve
+ * this issue.
+ */
+ /* MP_SET_FLAG( adapter,
+ fMP_ADAPTER_HARDWARE_ERROR); */
+
+ dev_warn(&adapter->pdev->dev,
+ "RXMAC interrupt, error 0x%08x. Requesting reset\n",
+ readl(&iomem->rxmac.err_reg));
+
+ dev_warn(&adapter->pdev->dev,
+ "Enable 0x%08x, Diag 0x%08x\n",
+ readl(&iomem->rxmac.ctrl),
+ readl(&iomem->rxmac.rxq_diag));
+
+ /*
+ * If we are debugging, we want to see this error,
+ * otherwise we just want the device to be reset and
+ * continue
+ */
+ }
+
+ /* Handle MAC_STAT Interrupt */
+ if (status & ET_INTR_MAC_STAT) {
+ /*
+ * This means at least one of the un-masked counters
+ * in the MAC_STAT block has rolled over. Use this
+ * to maintain the top, software managed bits of the
+ * counter(s).
+ */
+ et1310_handle_macstat_interrupt(adapter);
+ }
+
+ /* Handle SLV Timeout Interrupt */
+ if (status & ET_INTR_SLV_TIMEOUT) {
+ /*
+ * This means a timeout has occurred on a read or
+ * write request to one of the JAGCore registers. The
+ * Global Resources block has terminated the request
+ * and on a read request, returned a "fake" value.
+ * The most likely reasons are: Bad Address or the
+ * addressed module is in a power-down state and
+ * can't respond.
+ */
+ }
+ }
+ et131x_enable_interrupts(adapter);
+}
+
+/* NETDEV functions */
+
+/**
+ * et131x_stats - Return the current device statistics.
+ * @netdev: device whose stats are being queried
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+static struct net_device_stats *et131x_stats(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &adapter->net_stats;
+ struct ce_stats *devstat = &adapter->stats;
+
+ stats->rx_errors = devstat->rx_length_errs +
+ devstat->rx_align_errs +
+ devstat->rx_crc_errs +
+ devstat->rx_code_violations +
+ devstat->rx_other_errs;
+ stats->tx_errors = devstat->tx_max_pkt_errs;
+ stats->multicast = devstat->multicast_pkts_rcvd;
+ stats->collisions = devstat->tx_collisions;
+
+ stats->rx_length_errors = devstat->rx_length_errs;
+ stats->rx_over_errors = devstat->rx_overflows;
+ stats->rx_crc_errors = devstat->rx_crc_errs;
+
+ /* NOTE: These stats don't have corresponding values in CE_STATS,
+ * so we're going to have to update these directly from within the
+ * TX/RX code
+ */
+ /* stats->rx_bytes = 20; devstat->; */
+ /* stats->tx_bytes = 20; devstat->; */
+ /* stats->rx_dropped = devstat->; */
+ /* stats->tx_dropped = devstat->; */
+
+ /* NOTE: Not used, can't find analogous statistics */
+ /* stats->rx_frame_errors = devstat->; */
+ /* stats->rx_fifo_errors = devstat->; */
+ /* stats->rx_missed_errors = devstat->; */
+
+ /* stats->tx_aborted_errors = devstat->; */
+ /* stats->tx_carrier_errors = devstat->; */
+ /* stats->tx_fifo_errors = devstat->; */
+ /* stats->tx_heartbeat_errors = devstat->; */
+ /* stats->tx_window_errors = devstat->; */
+ return stats;
+}
+
+/**
+ * et131x_open - Open the device for use.
+ * @netdev: device to be opened
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+int et131x_open(struct net_device *netdev)
+{
+ int result = 0;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* Start the timer to track NIC errors */
+ init_timer(&adapter->error_timer);
+ adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
+ adapter->error_timer.function = et131x_error_timer_handler;
+ adapter->error_timer.data = (unsigned long)adapter;
+ add_timer(&adapter->error_timer);
+
+ /* Register our IRQ */
+ result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
+ netdev->name, netdev);
+ if (result) {
+ dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
+ netdev->irq);
+ return result;
+ }
+
+ adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
+
+ et131x_up(netdev);
+
+ return result;
+}
+
+/**
+ * et131x_close - Close the device
+ * @netdev: device to be closed
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+int et131x_close(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ et131x_down(netdev);
+
+ adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
+ free_irq(netdev->irq, netdev);
+
+ /* Stop the error timer */
+ return del_timer_sync(&adapter->error_timer);
+}
+
+/**
+ * et131x_ioctl - The I/O Control handler for the driver
+ * @netdev: device on which the control request is being made
+ * @reqbuf: a pointer to the IOCTL request buffer
+ * @cmd: the IOCTL command code
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
+ int cmd)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
+}
+
+/**
+ * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
+ * @adapter: pointer to our private adapter structure
+ *
+ * FIXME: lot of dups with MAC code
+ *
+ * Returns 0 on success, errno on failure
+ */
+static int et131x_set_packet_filter(struct et131x_adapter *adapter)
+{
+ int status = 0;
+ uint32_t filter = adapter->packet_filter;
+ u32 ctrl;
+ u32 pf_ctrl;
+
+ ctrl = readl(&adapter->regs->rxmac.ctrl);
+ pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
+
+ /* Default to disabled packet filtering. Enable it in the individual
+ * case statements that require the device to filter something
+ */
+ ctrl |= 0x04;
+
+ /* Set us to be in promiscuous mode so we receive everything, this
+ * is also true when we get a packet filter of 0
+ */
+ if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
+ pf_ctrl &= ~7; /* Clear filter bits */
+ else {
+ /*
+ * Set us up with Multicast packet filtering. Three cases are
+ * possible - (1) we have a multi-cast list, (2) we receive ALL
+ * multicast entries or (3) we receive none.
+ */
+ if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
+ pf_ctrl &= ~2; /* Multicast filter bit */
+ else {
+ et1310_setup_device_for_multicast(adapter);
+ pf_ctrl |= 2;
+ ctrl &= ~0x04;
+ }
+
+ /* Set us up with Unicast packet filtering */
+ if (filter & ET131X_PACKET_TYPE_DIRECTED) {
+ et1310_setup_device_for_unicast(adapter);
+ pf_ctrl |= 4;
+ ctrl &= ~0x04;
+ }
+
+ /* Set us up with Broadcast packet filtering */
+ if (filter & ET131X_PACKET_TYPE_BROADCAST) {
+ pf_ctrl |= 1; /* Broadcast filter bit */
+ ctrl &= ~0x04;
+ } else
+ pf_ctrl &= ~1;
+
+ /* Setup the receive mac configuration registers - Packet
+ * Filter control + the enable / disable for packet filter
+ * in the control reg.
+ */
+ writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
+ writel(ctrl, &adapter->regs->rxmac.ctrl);
+ }
+ return status;
+}
+
+/**
+ * et131x_multicast - The handler to configure multicasting on the interface
+ * @netdev: a pointer to a net_device struct representing the device
+ */
+static void et131x_multicast(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ uint32_t packet_filter = 0;
+ unsigned long flags;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ /* Before we modify the platform-independent filter flags, store them
+ * locally. This allows us to determine if anything's changed and if
+ * we even need to bother the hardware
+ */
+ packet_filter = adapter->packet_filter;
+
+ /* Clear the 'multicast' flag locally; because we only have a single
+ * flag to check multicast, and multiple multicast addresses can be
+ * set, this is the easiest way to determine if more than one
+ * multicast address is being set.
+ */
+ packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
+
+ /* Check the net_device flags and set the device independent flags
+ * accordingly
+ */
+
+ if (netdev->flags & IFF_PROMISC)
+ adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
+ else
+ adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
+
+ if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
+ adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
+
+ if (netdev_mc_count(netdev) < 1) {
+ adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
+ adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
+ } else
+ adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
+
+ /* Set values in the private adapter struct */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == NIC_MAX_MCAST_LIST)
+ break;
+ memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
+ }
+ adapter->multicast_addr_count = i;
+
+ /* Are the new flags different from the previous ones? If not, then no
+ * action is required
+ *
+ * NOTE - This block will always update the multicast_list with the
+ * hardware, even if the addresses aren't the same.
+ */
+ if (packet_filter != adapter->packet_filter) {
+ /* Call the device's filter function */
+ et131x_set_packet_filter(adapter);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/**
+ * et131x_tx - The handler to tx a packet on the device
+ * @skb: data to be Tx'd
+ * @netdev: device on which data is to be Tx'd
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ int status = 0;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* stop the queue if it's getting full */
+ if (adapter->tx_ring.used >= NUM_TCB - 1 &&
+ !netif_queue_stopped(netdev))
+ netif_stop_queue(netdev);
+
+ /* Save the timestamp for the TX timeout watchdog */
+ netdev->trans_start = jiffies;
+
+ /* Call the device-specific data Tx routine */
+ status = et131x_send_packets(skb, netdev);
+
+ /* Check status and manage the netif queue if necessary */
+ if (status != 0) {
+ if (status == -ENOMEM)
+ status = NETDEV_TX_BUSY;
+ else
+ status = NETDEV_TX_OK;
+ }
+ return status;
+}
+
+/**
+ * et131x_tx_timeout - Timeout handler
+ * @netdev: a pointer to a net_device struct representing the device
+ *
+ * The handler called when a Tx request times out. The timeout period is
+ * specified by the 'tx_timeo" element in the net_device structure (see
+ * et131x_alloc_device() to see how this value is set).
+ */
+static void et131x_tx_timeout(struct net_device *netdev)
+{
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tcb *tcb;
+ unsigned long flags;
+
+ /* If the device is closed, ignore the timeout */
+ if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
+ return;
+
+ /* Any nonrecoverable hardware error?
+ * Checks adapter->flags for any failure in phy reading
+ */
+ if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
+ return;
+
+ /* Hardware failure? */
+ if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
+ dev_err(&adapter->pdev->dev, "hardware error - reset\n");
+ return;
+ }
+
+ /* Is send stuck? */
+ spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
+
+ tcb = adapter->tx_ring.send_head;
+
+ if (tcb != NULL) {
+ tcb->count++;
+
+ if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock,
+ flags);
+
+ dev_warn(&adapter->pdev->dev,
+ "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
+ tcb->index,
+ tcb->flags);
+
+ adapter->net_stats.tx_errors++;
+
+ /* perform reset of tx/rx */
+ et131x_disable_txrx(netdev);
+ et131x_enable_txrx(netdev);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
+}
+
+/**
+ * et131x_change_mtu - The handler called to change the MTU for the device
+ * @netdev: device whose MTU is to be changed
+ * @new_mtu: the desired MTU
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int result = 0;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+ /* Make sure the requested MTU is valid */
+ if (new_mtu < 64 || new_mtu > 9216)
+ return -EINVAL;
+
+ et131x_disable_txrx(netdev);
+ et131x_handle_send_interrupt(adapter);
+ et131x_handle_recv_interrupt(adapter);
+
+ /* Set the new MTU */
+ netdev->mtu = new_mtu;
+
+ /* Free Rx DMA memory */
+ et131x_adapter_memory_free(adapter);
+
+ /* Set the config parameter for Jumbo Packet support */
+ adapter->registry_jumbo_packet = new_mtu + 14;
+ et131x_soft_reset(adapter);
+
+ /* Alloc and init Rx DMA memory */
+ result = et131x_adapter_memory_alloc(adapter);
+ if (result != 0) {
+ dev_warn(&adapter->pdev->dev,
+ "Change MTU failed; couldn't re-alloc DMA memory\n");
+ return result;
+ }
+
+ et131x_init_send(adapter);
+
+ et131x_hwaddr_init(adapter);
+ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
+
+ /* Init the device with the new settings */
+ et131x_adapter_setup(adapter);
+
+ et131x_enable_txrx(netdev);
+
+ return result;
+}
+
+/**
+ * et131x_set_mac_addr - handler to change the MAC address for the device
+ * @netdev: device whose MAC is to be changed
+ * @new_mac: the desired MAC address
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ *
+ * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
+ */
+static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
+{
+ int result = 0;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *address = new_mac;
+
+ /* begin blux */
+
+ if (adapter == NULL)
+ return -ENODEV;
+
+ /* Make sure the requested MAC is valid */
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EINVAL;
+
+ et131x_disable_txrx(netdev);
+ et131x_handle_send_interrupt(adapter);
+ et131x_handle_recv_interrupt(adapter);
+
+ /* Set the new MAC */
+ /* netdev->set_mac_address = &new_mac; */
+
+ memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
+
+ printk(KERN_INFO "%s: Setting MAC address to %pM\n",
+ netdev->name, netdev->dev_addr);
+
+ /* Free Rx DMA memory */
+ et131x_adapter_memory_free(adapter);
+
+ et131x_soft_reset(adapter);
+
+ /* Alloc and init Rx DMA memory */
+ result = et131x_adapter_memory_alloc(adapter);
+ if (result != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Change MAC failed; couldn't re-alloc DMA memory\n");
+ return result;
+ }
+
+ et131x_init_send(adapter);
+
+ et131x_hwaddr_init(adapter);
+
+ /* Init the device with the new settings */
+ et131x_adapter_setup(adapter);
+
+ et131x_enable_txrx(netdev);
+
+ return result;
+}
+
+static const struct net_device_ops et131x_netdev_ops = {
+ .ndo_open = et131x_open,
+ .ndo_stop = et131x_close,
+ .ndo_start_xmit = et131x_tx,
+ .ndo_set_rx_mode = et131x_multicast,
+ .ndo_tx_timeout = et131x_tx_timeout,
+ .ndo_change_mtu = et131x_change_mtu,
+ .ndo_set_mac_address = et131x_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats = et131x_stats,
+ .ndo_do_ioctl = et131x_ioctl,
+};
+
+/**
+ * et131x_device_alloc
+ *
+ * Returns pointer to the allocated and initialized net_device struct for
+ * this device.
+ *
+ * Create instances of net_device and wl_private for the new adapter and
+ * register the device's entry points in the net_device structure.
+ */
+struct net_device *et131x_device_alloc(void)
+{
+ struct net_device *netdev;
+
+ /* Alloc net_device and adapter structs */
+ netdev = alloc_etherdev(sizeof(struct et131x_adapter));
+
+ if (!netdev) {
+ printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
+ return NULL;
+ }
+
+ /*
+ * Setup the function registration table (and other data) for a
+ * net_device
+ */
+ netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
+ netdev->netdev_ops = &et131x_netdev_ops;
+
+ /* Poll? */
+ /* netdev->poll = &et131x_poll; */
+ /* netdev->poll_controller = &et131x_poll_controller; */
+ return netdev;
+}
+
+/**
+ * et131x_pci_setup - Perform device initialization
+ * @pdev: a pointer to the device's pci_dev structure
+ * @ent: this device's entry in the pci_device_id table
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ *
+ * Registered in the pci_driver structure, this function is called when the
+ * PCI subsystem finds a new PCI device which matches the information
+ * contained in the pci_device_id table. This routine is the equivalent to
+ * a device insertion routine.
+ */
+static int __devinit et131x_pci_setup(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int result;
+ struct net_device *netdev;
+ struct et131x_adapter *adapter;
+ int ii;
+
+ result = pci_enable_device(pdev);
+ if (result) {
+ dev_err(&pdev->dev, "pci_enable_device() failed\n");
+ goto err_out;
+ }
+
+ /* Perform some basic PCI checks */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Can't find PCI device's base address\n");
+ goto err_disable;
+ }
+
+ if (pci_request_regions(pdev, DRIVER_NAME)) {
+ dev_err(&pdev->dev, "Can't get PCI resources\n");
+ goto err_disable;
+ }
+
+ pci_set_master(pdev);
+
+ /* Check the DMA addressing support of this device */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (result) {
+ dev_err(&pdev->dev,
+ "Unable to obtain 64 bit DMA for consistent allocations\n");
+ goto err_release_res;
+ }
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (result) {
+ dev_err(&pdev->dev,
+ "Unable to obtain 32 bit DMA for consistent allocations\n");
+ goto err_release_res;
+ }
+ } else {
+ dev_err(&pdev->dev, "No usable DMA addressing method\n");
+ result = -EIO;
+ goto err_release_res;
+ }
+
+ /* Allocate netdev and private adapter structs */
+ netdev = et131x_device_alloc();
+ if (!netdev) {
+ dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
+ result = -ENOMEM;
+ goto err_release_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ et131x_set_ethtool_ops(netdev);
+
+ adapter = et131x_adapter_init(netdev, pdev);
+
+ /* Initialise the PCI setup for the device */
+ et131x_pci_init(adapter, pdev);
+
+ /* Map the bus-relative registers to system virtual memory */
+ adapter->regs = pci_ioremap_bar(pdev, 0);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers\n");
+ result = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* If Phy COMA mode was enabled when we went down, disable it here. */
+ writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
+
+ /* Issue a global reset to the et1310 */
+ et131x_soft_reset(adapter);
+
+ /* Disable all interrupts (paranoid) */
+ et131x_disable_interrupts(adapter);
+
+ /* Allocate DMA memory */
+ result = et131x_adapter_memory_alloc(adapter);
+ if (result) {
+ dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
+ goto err_iounmap;
+ }
+
+ /* Init send data structures */
+ et131x_init_send(adapter);
+
+ /* Set up the task structure for the ISR's deferred handler */
+ INIT_WORK(&adapter->task, et131x_isr_handler);
+
+ /* Copy address into the net_device struct */
+ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
+
+ /* Init variable for counting how long we do not have link status */
+ adapter->boot_coma = 0;
+ et1310_disable_phy_coma(adapter);
+
+ /* Setup the mii_bus struct */
+ adapter->mii_bus = mdiobus_alloc();
+ if (!adapter->mii_bus) {
+ dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
+ goto err_mem_free;
+ }
+
+ adapter->mii_bus->name = "et131x_eth_mii";
+ snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
+ (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
+ adapter->mii_bus->priv = netdev;
+ adapter->mii_bus->read = et131x_mdio_read;
+ adapter->mii_bus->write = et131x_mdio_write;
+ adapter->mii_bus->reset = et131x_mdio_reset;
+ adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!adapter->mii_bus->irq) {
+ dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+ goto err_mdio_free;
+ }
+
+ for (ii = 0; ii < PHY_MAX_ADDR; ii++)
+ adapter->mii_bus->irq[ii] = PHY_POLL;
+
+ if (mdiobus_register(adapter->mii_bus)) {
+ dev_err(&pdev->dev, "failed to register MII bus\n");
+ mdiobus_free(adapter->mii_bus);
+ goto err_mdio_free_irq;
+ }
+
+ if (et131x_mii_probe(netdev)) {
+ dev_err(&pdev->dev, "failed to probe MII bus\n");
+ goto err_mdio_unregister;
+ }
+
+ /* Setup et1310 as per the documentation */
+ et131x_adapter_setup(adapter);
+
+ /* We can enable interrupts now
+ *
+ * NOTE - Because registration of interrupt handler is done in the
+ * device's open(), defer enabling device interrupts to that
+ * point
+ */
+
+ /* Register the net_device struct with the Linux network layer */
+ result = register_netdev(netdev);
+ if (result != 0) {
+ dev_err(&pdev->dev, "register_netdev() failed\n");
+ goto err_mdio_unregister;
+ }
+
+ /* Register the net_device struct with the PCI subsystem. Save a copy
+ * of the PCI config space for this device now that the device has
+ * been initialized, just in case it needs to be quickly restored.
+ */
+ pci_set_drvdata(pdev, netdev);
+ pci_save_state(adapter->pdev);
+
+ return result;
+
+err_mdio_unregister:
+ mdiobus_unregister(adapter->mii_bus);
+err_mdio_free_irq:
+ kfree(adapter->mii_bus->irq);
+err_mdio_free:
+ mdiobus_free(adapter->mii_bus);
+err_mem_free:
+ et131x_adapter_memory_free(adapter);
+err_iounmap:
+ iounmap(adapter->regs);
+err_free_dev:
+ pci_dev_put(pdev);
+ free_netdev(netdev);
+err_release_res:
+ pci_release_regions(pdev);
+err_disable:
+ pci_disable_device(pdev);
+err_out:
+ return result;
+}
+
+static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
+#define ET131X_PM_OPS (&et131x_pm_ops)
+#else
+#define ET131X_PM_OPS NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
+ { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
+ { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, et131x_pci_table);
+
+static struct pci_driver et131x_driver = {
+ .name = DRIVER_NAME,
+ .id_table = et131x_pci_table,
+ .probe = et131x_pci_setup,
+ .remove = __devexit_p(et131x_pci_remove),
+ .driver.pm = ET131X_PM_OPS,
+};
+
+/**
+ * et131x_init_module - The "main" entry point called on driver initialization
+ *
+ * Returns 0 on success, errno on failure (as defined in errno.h)
+ */
+static int __init et131x_init_module(void)
+{
+ return pci_register_driver(&et131x_driver);
+}
+
+/**
+ * et131x_cleanup_module - The entry point called on driver cleanup
+ */
+static void __exit et131x_cleanup_module(void)
+{
+ pci_unregister_driver(&et131x_driver);
+}
+
+module_init(et131x_init_module);
+module_exit(et131x_cleanup_module);
+
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index 48ebac0e55c..7eed3c8986f 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -1,6 +1,4 @@
/*
- * Merged from files
- *
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
@@ -48,106 +46,1621 @@
*
*/
-/* et131x_eeprom.c */
-int et131x_init_eeprom(struct et131x_adapter *etdev);
-
-/* et131x_initpci.c */
-void ConfigGlobalRegs(struct et131x_adapter *pAdapter);
-void ConfigMMCRegs(struct et131x_adapter *pAdapter);
-void et131x_enable_interrupts(struct et131x_adapter *adapter);
-void et131x_disable_interrupts(struct et131x_adapter *adapter);
-void et131x_align_allocated_memory(struct et131x_adapter *adapter,
- u64 *phys_addr,
- u64 *offset, u64 mask);
-
-int et131x_adapter_setup(struct et131x_adapter *adapter);
-int et131x_adapter_memory_alloc(struct et131x_adapter *adapter);
-void et131x_adapter_memory_free(struct et131x_adapter *adapter);
-void et131x_hwaddr_init(struct et131x_adapter *adapter);
-void et131x_soft_reset(struct et131x_adapter *adapter);
-
-/* et131x_isr.c */
-irqreturn_t et131x_isr(int irq, void *dev_id);
-void et131x_isr_handler(struct work_struct *work);
-
-/* et1310_mac.c */
-void ConfigMACRegs1(struct et131x_adapter *adapter);
-void ConfigMACRegs2(struct et131x_adapter *adapter);
-void ConfigRxMacRegs(struct et131x_adapter *adapter);
-void ConfigTxMacRegs(struct et131x_adapter *adapter);
-void ConfigMacStatRegs(struct et131x_adapter *adapter);
-void ConfigFlowControl(struct et131x_adapter *adapter);
-void UpdateMacStatHostCounters(struct et131x_adapter *adapter);
-void HandleMacStatInterrupt(struct et131x_adapter *adapter);
-void SetupDeviceForMulticast(struct et131x_adapter *adapter);
-void SetupDeviceForUnicast(struct et131x_adapter *adapter);
-
-/* et131x_netdev.c */
-struct net_device *et131x_device_alloc(void);
-
-/* et131x_pm.c */
-void EnablePhyComa(struct et131x_adapter *adapter);
-void DisablePhyComa(struct et131x_adapter *adapter);
-
-/* et131x_phy.c */
-void ET1310_PhyInit(struct et131x_adapter *adapter);
-void ET1310_PhyReset(struct et131x_adapter *adapter);
-void ET1310_PhyPowerDown(struct et131x_adapter *adapter, bool down);
-void ET1310_PhyAdvertise1000BaseT(struct et131x_adapter *adapter,
- u16 duplex);
-void ET1310_PhyAccessMiBit(struct et131x_adapter *adapter,
- u16 action,
- u16 regnum, u16 bitnum, u8 *value);
-
-int et131x_xcvr_find(struct et131x_adapter *adapter);
-void et131x_setphy_normal(struct et131x_adapter *adapter);
-
-/* static inline function does not work because et131x_adapter is not always
- * defined
- */
-int PhyMiRead(struct et131x_adapter *adapter, u8 xcvrAddr,
- u8 xcvrReg, u16 *value);
-#define MiRead(adapter, xcvrReg, value) \
- PhyMiRead((adapter), (adapter)->stats.xcvr_addr, (xcvrReg), (value))
-
-int32_t MiWrite(struct et131x_adapter *adapter,
- u8 xcvReg, u16 value);
-void et131x_Mii_check(struct et131x_adapter *pAdapter,
- u16 bmsr, u16 bmsr_ints);
-
-/* This last is not strictly required (the driver could call the TPAL
- * version instead), but this sets the adapter up correctly, and calls the
- * access routine indirectly. This protects the driver from changes in TPAL.
- */
-void SetPhy_10BaseTHalfDuplex(struct et131x_adapter *adapter);
-
-
-/* et1310_rx.c */
-int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter);
-void et131x_rx_dma_memory_free(struct et131x_adapter *adapter);
-int et131x_rfd_resources_alloc(struct et131x_adapter *adapter,
- struct rfd *rfd);
-void et131x_rfd_resources_free(struct et131x_adapter *adapter,
- struct rfd *rfd);
-int et131x_init_recv(struct et131x_adapter *adapter);
-
-void ConfigRxDmaRegs(struct et131x_adapter *adapter);
-void SetRxDmaTimer(struct et131x_adapter *adapter);
-void et131x_rx_dma_disable(struct et131x_adapter *adapter);
-void et131x_rx_dma_enable(struct et131x_adapter *adapter);
-
-void et131x_reset_recv(struct et131x_adapter *adapter);
-
-void et131x_handle_recv_interrupt(struct et131x_adapter *adapter);
-
-/* et131x_tx.c */
-int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter);
-void et131x_tx_dma_memory_free(struct et131x_adapter *adapter);
-void ConfigTxDmaRegs(struct et131x_adapter *adapter);
-void et131x_init_send(struct et131x_adapter *adapter);
-void et131x_tx_dma_disable(struct et131x_adapter *adapter);
-void et131x_tx_dma_enable(struct et131x_adapter *adapter);
-void et131x_handle_send_interrupt(struct et131x_adapter *adapter);
-void et131x_free_busy_send_packets(struct et131x_adapter *adapter);
-int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev);
+#define DRIVER_NAME "et131x"
+#define DRIVER_VERSION "v2.0"
+
+/* EEPROM registers */
+
+/* LBCIF Register Groups (addressed via 32-bit offsets) */
+#define LBCIF_DWORD0_GROUP 0xAC
+#define LBCIF_DWORD1_GROUP 0xB0
+
+/* LBCIF Registers (addressed via 8-bit offsets) */
+#define LBCIF_ADDRESS_REGISTER 0xAC
+#define LBCIF_DATA_REGISTER 0xB0
+#define LBCIF_CONTROL_REGISTER 0xB1
+#define LBCIF_STATUS_REGISTER 0xB2
+
+/* LBCIF Control Register Bits */
+#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01
+#define LBCIF_CONTROL_PAGE_WRITE 0x02
+#define LBCIF_CONTROL_EEPROM_RELOAD 0x08
+#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20
+#define LBCIF_CONTROL_I2C_WRITE 0x40
+#define LBCIF_CONTROL_LBCIF_ENABLE 0x80
+
+/* LBCIF Status Register Bits */
+#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01
+#define LBCIF_STATUS_I2C_IDLE 0x02
+#define LBCIF_STATUS_ACK_ERROR 0x04
+#define LBCIF_STATUS_GENERAL_ERROR 0x08
+#define LBCIF_STATUS_CHECKSUM_ERROR 0x40
+#define LBCIF_STATUS_EEPROM_PRESENT 0x80
+
+/* START OF GLOBAL REGISTER ADDRESS MAP */
+
+/*
+ * 10bit registers
+ *
+ * Tx queue start address reg in global address map at address 0x0000
+ * tx queue end address reg in global address map at address 0x0004
+ * rx queue start address reg in global address map at address 0x0008
+ * rx queue end address reg in global address map at address 0x000C
+ */
+
+/*
+ * structure for power management control status reg in global address map
+ * located at address 0x0010
+ * jagcore_rx_rdy bit 9
+ * jagcore_tx_rdy bit 8
+ * phy_lped_en bit 7
+ * phy_sw_coma bit 6
+ * rxclk_gate bit 5
+ * txclk_gate bit 4
+ * sysclk_gate bit 3
+ * jagcore_rx_en bit 2
+ * jagcore_tx_en bit 1
+ * gigephy_en bit 0
+ */
+
+#define ET_PM_PHY_SW_COMA 0x40
+#define ET_PMCSR_INIT 0x38
+
+/*
+ * Interrupt status reg at address 0x0018
+ */
+
+#define ET_INTR_TXDMA_ISR 0x00000008
+#define ET_INTR_TXDMA_ERR 0x00000010
+#define ET_INTR_RXDMA_XFR_DONE 0x00000020
+#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040
+#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080
+#define ET_INTR_RXDMA_STAT_LOW 0x00000100
+#define ET_INTR_RXDMA_ERR 0x00000200
+#define ET_INTR_WATCHDOG 0x00004000
+#define ET_INTR_WOL 0x00008000
+#define ET_INTR_PHY 0x00010000
+#define ET_INTR_TXMAC 0x00020000
+#define ET_INTR_RXMAC 0x00040000
+#define ET_INTR_MAC_STAT 0x00080000
+#define ET_INTR_SLV_TIMEOUT 0x00100000
+
+/*
+ * Interrupt mask register at address 0x001C
+ * Interrupt alias clear mask reg at address 0x0020
+ * Interrupt status alias reg at address 0x0024
+ *
+ * Same masks as above
+ */
+
+/*
+ * Software reset reg at address 0x0028
+ * 0: txdma_sw_reset
+ * 1: rxdma_sw_reset
+ * 2: txmac_sw_reset
+ * 3: rxmac_sw_reset
+ * 4: mac_sw_reset
+ * 5: mac_stat_sw_reset
+ * 6: mmc_sw_reset
+ *31: selfclr_disable
+ */
+
+/*
+ * SLV Timer reg at address 0x002C (low 24 bits)
+ */
+
+/*
+ * MSI Configuration reg at address 0x0030
+ */
+
+#define ET_MSI_VECTOR 0x0000001F
+#define ET_MSI_TC 0x00070000
+
+/*
+ * Loopback reg located at address 0x0034
+ */
+
+#define ET_LOOP_MAC 0x00000001
+#define ET_LOOP_DMA 0x00000002
+
+/*
+ * GLOBAL Module of JAGCore Address Mapping
+ * Located at address 0x0000
+ */
+struct global_regs { /* Location: */
+ u32 txq_start_addr; /* 0x0000 */
+ u32 txq_end_addr; /* 0x0004 */
+ u32 rxq_start_addr; /* 0x0008 */
+ u32 rxq_end_addr; /* 0x000C */
+ u32 pm_csr; /* 0x0010 */
+ u32 unused; /* 0x0014 */
+ u32 int_status; /* 0x0018 */
+ u32 int_mask; /* 0x001C */
+ u32 int_alias_clr_en; /* 0x0020 */
+ u32 int_status_alias; /* 0x0024 */
+ u32 sw_reset; /* 0x0028 */
+ u32 slv_timer; /* 0x002C */
+ u32 msi_config; /* 0x0030 */
+ u32 loopback; /* 0x0034 */
+ u32 watchdog_timer; /* 0x0038 */
+};
+
+
+/* START OF TXDMA REGISTER ADDRESS MAP */
+
+/*
+ * txdma control status reg at address 0x1000
+ */
+
+#define ET_TXDMA_CSR_HALT 0x00000001
+#define ET_TXDMA_DROP_TLP 0x00000002
+#define ET_TXDMA_CACHE_THRS 0x000000F0
+#define ET_TXDMA_CACHE_SHIFT 4
+#define ET_TXDMA_SNGL_EPKT 0x00000100
+#define ET_TXDMA_CLASS 0x00001E00
+
+/*
+ * structure for txdma packet ring base address hi reg in txdma address map
+ * located at address 0x1004
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for txdma packet ring base address low reg in txdma address map
+ * located at address 0x1008
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for txdma packet ring number of descriptor reg in txdma address
+ * map. Located at address 0x100C
+ *
+ * 31-10: unused
+ * 9-0: pr ndes
+ */
+
+#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
+#define ET_DMA12_WRAP 0x1000
+#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
+#define ET_DMA10_WRAP 0x0400
+#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
+#define ET_DMA4_WRAP 0x0010
+
+#define INDEX12(x) ((x) & ET_DMA12_MASK)
+#define INDEX10(x) ((x) & ET_DMA10_MASK)
+#define INDEX4(x) ((x) & ET_DMA4_MASK)
+
+/*
+ * 10bit DMA with wrap
+ * txdma tx queue write address reg in txdma address map at 0x1010
+ * txdma tx queue write address external reg in txdma address map at 0x1014
+ * txdma tx queue read address reg in txdma address map at 0x1018
+ *
+ * u32
+ * txdma status writeback address hi reg in txdma address map at0x101C
+ * txdma status writeback address lo reg in txdma address map at 0x1020
+ *
+ * 10bit DMA with wrap
+ * txdma service request reg in txdma address map at 0x1024
+ * structure for txdma service complete reg in txdma address map at 0x1028
+ *
+ * 4bit DMA with wrap
+ * txdma tx descriptor cache read index reg in txdma address map at 0x102C
+ * txdma tx descriptor cache write index reg in txdma address map at 0x1030
+ *
+ * txdma error reg in txdma address map at address 0x1034
+ * 0: PyldResend
+ * 1: PyldRewind
+ * 4: DescrResend
+ * 5: DescrRewind
+ * 8: WrbkResend
+ * 9: WrbkRewind
+ */
+
+/*
+ * Tx DMA Module of JAGCore Address Mapping
+ * Located at address 0x1000
+ */
+struct txdma_regs { /* Location: */
+ u32 csr; /* 0x1000 */
+ u32 pr_base_hi; /* 0x1004 */
+ u32 pr_base_lo; /* 0x1008 */
+ u32 pr_num_des; /* 0x100C */
+ u32 txq_wr_addr; /* 0x1010 */
+ u32 txq_wr_addr_ext; /* 0x1014 */
+ u32 txq_rd_addr; /* 0x1018 */
+ u32 dma_wb_base_hi; /* 0x101C */
+ u32 dma_wb_base_lo; /* 0x1020 */
+ u32 service_request; /* 0x1024 */
+ u32 service_complete; /* 0x1028 */
+ u32 cache_rd_index; /* 0x102C */
+ u32 cache_wr_index; /* 0x1030 */
+ u32 tx_dma_error; /* 0x1034 */
+ u32 desc_abort_cnt; /* 0x1038 */
+ u32 payload_abort_cnt; /* 0x103c */
+ u32 writeback_abort_cnt; /* 0x1040 */
+ u32 desc_timeout_cnt; /* 0x1044 */
+ u32 payload_timeout_cnt; /* 0x1048 */
+ u32 writeback_timeout_cnt; /* 0x104c */
+ u32 desc_error_cnt; /* 0x1050 */
+ u32 payload_error_cnt; /* 0x1054 */
+ u32 writeback_error_cnt; /* 0x1058 */
+ u32 dropped_tlp_cnt; /* 0x105c */
+ u32 new_service_complete; /* 0x1060 */
+ u32 ethernet_packet_cnt; /* 0x1064 */
+};
+
+/* END OF TXDMA REGISTER ADDRESS MAP */
+
+
+/* START OF RXDMA REGISTER ADDRESS MAP */
+
+/*
+ * structure for control status reg in rxdma address map
+ * Located at address 0x2000
+ *
+ * CSR
+ * 0: halt
+ * 1-3: tc
+ * 4: fbr_big_endian
+ * 5: psr_big_endian
+ * 6: pkt_big_endian
+ * 7: dma_big_endian
+ * 8-9: fbr0_size
+ * 10: fbr0_enable
+ * 11-12: fbr1_size
+ * 13: fbr1_enable
+ * 14: unused
+ * 15: pkt_drop_disable
+ * 16: pkt_done_flush
+ * 17: halt_status
+ * 18-31: unused
+ */
+
+
+/*
+ * structure for dma writeback lo reg in rxdma address map
+ * located at address 0x2004
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for dma writeback hi reg in rxdma address map
+ * located at address 0x2008
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for number of packets done reg in rxdma address map
+ * located at address 0x200C
+ *
+ * 31-8: unused
+ * 7-0: num done
+ */
+
+/*
+ * structure for max packet time reg in rxdma address map
+ * located at address 0x2010
+ *
+ * 31-18: unused
+ * 17-0: time done
+ */
+
+/*
+ * structure for rx queue read address reg in rxdma address map
+ * located at address 0x2014
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for rx queue read address external reg in rxdma address map
+ * located at address 0x2018
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for rx queue write address reg in rxdma address map
+ * located at address 0x201C
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for packet status ring base address lo reg in rxdma address map
+ * located at address 0x2020
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for packet status ring base address hi reg in rxdma address map
+ * located at address 0x2024
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for packet status ring number of descriptors reg in rxdma address
+ * map. Located at address 0x2028
+ *
+ * 31-12: unused
+ * 11-0: psr ndes
+ */
+
+/*
+ * structure for packet status ring available offset reg in rxdma address map
+ * located at address 0x202C
+ *
+ * 31-13: unused
+ * 12: psr avail wrap
+ * 11-0: psr avail
+ */
+
+/*
+ * structure for packet status ring full offset reg in rxdma address map
+ * located at address 0x2030
+ *
+ * 31-13: unused
+ * 12: psr full wrap
+ * 11-0: psr full
+ */
+
+/*
+ * structure for packet status ring access index reg in rxdma address map
+ * located at address 0x2034
+ *
+ * 31-5: unused
+ * 4-0: psr_ai
+ */
+
+/*
+ * structure for packet status ring minimum descriptors reg in rxdma address
+ * map. Located at address 0x2038
+ *
+ * 31-12: unused
+ * 11-0: psr_min
+ */
+
+/*
+ * structure for free buffer ring base lo address reg in rxdma address map
+ * located at address 0x203C
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for free buffer ring base hi address reg in rxdma address map
+ * located at address 0x2040
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for free buffer ring number of descriptors reg in rxdma address
+ * map. Located at address 0x2044
+ *
+ * 31-10: unused
+ * 9-0: fbr ndesc
+ */
+
+/*
+ * structure for free buffer ring 0 available offset reg in rxdma address map
+ * located at address 0x2048
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for free buffer ring 0 full offset reg in rxdma address map
+ * located at address 0x204C
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for free buffer cache 0 full offset reg in rxdma address map
+ * located at address 0x2050
+ *
+ * 31-5: unused
+ * 4-0: fbc rdi
+ */
+
+/*
+ * structure for free buffer ring 0 minimum descriptor reg in rxdma address map
+ * located at address 0x2054
+ *
+ * 31-10: unused
+ * 9-0: fbr min
+ */
+
+/*
+ * structure for free buffer ring 1 base address lo reg in rxdma address map
+ * located at address 0x2058 - 0x205C
+ * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t)
+ */
+
+/*
+ * structure for free buffer ring 1 number of descriptors reg in rxdma address
+ * map. Located at address 0x2060
+ * Defined earlier (RXDMA_FBR_NUM_DES_t)
+ */
+
+/*
+ * structure for free buffer ring 1 available offset reg in rxdma address map
+ * located at address 0x2064
+ * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t)
+ */
+
+/*
+ * structure for free buffer ring 1 full offset reg in rxdma address map
+ * located at address 0x2068
+ * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t)
+ */
+
+/*
+ * structure for free buffer cache 1 read index reg in rxdma address map
+ * located at address 0x206C
+ * Defined Earlier (RXDMA_FBC_RD_INDEX_t)
+ */
+
+/*
+ * structure for free buffer ring 1 minimum descriptor reg in rxdma address map
+ * located at address 0x2070
+ * Defined Earlier (RXDMA_FBR_MIN_DES_t)
+ */
+
+/*
+ * Rx DMA Module of JAGCore Address Mapping
+ * Located at address 0x2000
+ */
+struct rxdma_regs { /* Location: */
+ u32 csr; /* 0x2000 */
+ u32 dma_wb_base_lo; /* 0x2004 */
+ u32 dma_wb_base_hi; /* 0x2008 */
+ u32 num_pkt_done; /* 0x200C */
+ u32 max_pkt_time; /* 0x2010 */
+ u32 rxq_rd_addr; /* 0x2014 */
+ u32 rxq_rd_addr_ext; /* 0x2018 */
+ u32 rxq_wr_addr; /* 0x201C */
+ u32 psr_base_lo; /* 0x2020 */
+ u32 psr_base_hi; /* 0x2024 */
+ u32 psr_num_des; /* 0x2028 */
+ u32 psr_avail_offset; /* 0x202C */
+ u32 psr_full_offset; /* 0x2030 */
+ u32 psr_access_index; /* 0x2034 */
+ u32 psr_min_des; /* 0x2038 */
+ u32 fbr0_base_lo; /* 0x203C */
+ u32 fbr0_base_hi; /* 0x2040 */
+ u32 fbr0_num_des; /* 0x2044 */
+ u32 fbr0_avail_offset; /* 0x2048 */
+ u32 fbr0_full_offset; /* 0x204C */
+ u32 fbr0_rd_index; /* 0x2050 */
+ u32 fbr0_min_des; /* 0x2054 */
+ u32 fbr1_base_lo; /* 0x2058 */
+ u32 fbr1_base_hi; /* 0x205C */
+ u32 fbr1_num_des; /* 0x2060 */
+ u32 fbr1_avail_offset; /* 0x2064 */
+ u32 fbr1_full_offset; /* 0x2068 */
+ u32 fbr1_rd_index; /* 0x206C */
+ u32 fbr1_min_des; /* 0x2070 */
+};
+
+/* END OF RXDMA REGISTER ADDRESS MAP */
+
+
+/* START OF TXMAC REGISTER ADDRESS MAP */
+
+/*
+ * structure for control reg in txmac address map
+ * located at address 0x3000
+ *
+ * bits
+ * 31-8: unused
+ * 7: cklseg_disable
+ * 6: ckbcnt_disable
+ * 5: cksegnum
+ * 4: async_disable
+ * 3: fc_disable
+ * 2: mcif_disable
+ * 1: mif_disable
+ * 0: txmac_en
+ */
+
+/*
+ * structure for shadow pointer reg in txmac address map
+ * located at address 0x3004
+ * 31-27: reserved
+ * 26-16: txq rd ptr
+ * 15-11: reserved
+ * 10-0: txq wr ptr
+ */
+
+/*
+ * structure for error count reg in txmac address map
+ * located at address 0x3008
+ *
+ * 31-12: unused
+ * 11-8: reserved
+ * 7-4: txq_underrun
+ * 3-0: fifo_underrun
+ */
+
+/*
+ * structure for max fill reg in txmac address map
+ * located at address 0x300C
+ * 31-12: unused
+ * 11-0: max fill
+ */
+
+/*
+ * structure for cf parameter reg in txmac address map
+ * located at address 0x3010
+ * 31-16: cfep
+ * 15-0: cfpt
+ */
+
+/*
+ * structure for tx test reg in txmac address map
+ * located at address 0x3014
+ * 31-17: unused
+ * 16: reserved1
+ * 15: txtest_en
+ * 14-11: unused
+ * 10-0: txq test pointer
+ */
+
+/*
+ * structure for error reg in txmac address map
+ * located at address 0x3018
+ *
+ * 31-9: unused
+ * 8: fifo_underrun
+ * 7-6: unused
+ * 5: ctrl2_err
+ * 4: txq_underrun
+ * 3: bcnt_err
+ * 2: lseg_err
+ * 1: segnum_err
+ * 0: seg0_err
+ */
+
+/*
+ * structure for error interrupt reg in txmac address map
+ * located at address 0x301C
+ *
+ * 31-9: unused
+ * 8: fifo_underrun
+ * 7-6: unused
+ * 5: ctrl2_err
+ * 4: txq_underrun
+ * 3: bcnt_err
+ * 2: lseg_err
+ * 1: segnum_err
+ * 0: seg0_err
+ */
+
+/*
+ * structure for error interrupt reg in txmac address map
+ * located at address 0x3020
+ *
+ * 31-2: unused
+ * 1: bp_req
+ * 0: bp_xonxoff
+ */
+
+/*
+ * Tx MAC Module of JAGCore Address Mapping
+ */
+struct txmac_regs { /* Location: */
+ u32 ctl; /* 0x3000 */
+ u32 shadow_ptr; /* 0x3004 */
+ u32 err_cnt; /* 0x3008 */
+ u32 max_fill; /* 0x300C */
+ u32 cf_param; /* 0x3010 */
+ u32 tx_test; /* 0x3014 */
+ u32 err; /* 0x3018 */
+ u32 err_int; /* 0x301C */
+ u32 bp_ctrl; /* 0x3020 */
+};
+
+/* END OF TXMAC REGISTER ADDRESS MAP */
+
+/* START OF RXMAC REGISTER ADDRESS MAP */
+
+/*
+ * structure for rxmac control reg in rxmac address map
+ * located at address 0x4000
+ *
+ * 31-7: reserved
+ * 6: rxmac_int_disable
+ * 5: async_disable
+ * 4: mif_disable
+ * 3: wol_disable
+ * 2: pkt_filter_disable
+ * 1: mcif_disable
+ * 0: rxmac_en
+ */
+
+/*
+ * structure for Wake On Lan Control and CRC 0 reg in rxmac address map
+ * located at address 0x4004
+ * 31-16: crc
+ * 15-12: reserved
+ * 11: ignore_pp
+ * 10: ignore_mp
+ * 9: clr_intr
+ * 8: ignore_link_chg
+ * 7: ignore_uni
+ * 6: ignore_multi
+ * 5: ignore_broad
+ * 4-0: valid_crc 4-0
+ */
+
+/*
+ * structure for CRC 1 and CRC 2 reg in rxmac address map
+ * located at address 0x4008
+ *
+ * 31-16: crc2
+ * 15-0: crc1
+ */
+
+/*
+ * structure for CRC 3 and CRC 4 reg in rxmac address map
+ * located at address 0x400C
+ *
+ * 31-16: crc4
+ * 15-0: crc3
+ */
+
+/*
+ * structure for Wake On Lan Source Address Lo reg in rxmac address map
+ * located at address 0x4010
+ *
+ * 31-24: sa3
+ * 23-16: sa4
+ * 15-8: sa5
+ * 7-0: sa6
+ */
+
+#define ET_WOL_LO_SA3_SHIFT 24
+#define ET_WOL_LO_SA4_SHIFT 16
+#define ET_WOL_LO_SA5_SHIFT 8
+
+/*
+ * structure for Wake On Lan Source Address Hi reg in rxmac address map
+ * located at address 0x4014
+ *
+ * 31-16: reserved
+ * 15-8: sa1
+ * 7-0: sa2
+ */
+
+#define ET_WOL_HI_SA1_SHIFT 8
+
+/*
+ * structure for Wake On Lan mask reg in rxmac address map
+ * located at address 0x4018 - 0x4064
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for Unicast Paket Filter Address 1 reg in rxmac address map
+ * located at address 0x4068
+ *
+ * 31-24: addr1_3
+ * 23-16: addr1_4
+ * 15-8: addr1_5
+ * 7-0: addr1_6
+ */
+
+#define ET_UNI_PF_ADDR1_3_SHIFT 24
+#define ET_UNI_PF_ADDR1_4_SHIFT 16
+#define ET_UNI_PF_ADDR1_5_SHIFT 8
+
+/*
+ * structure for Unicast Paket Filter Address 2 reg in rxmac address map
+ * located at address 0x406C
+ *
+ * 31-24: addr2_3
+ * 23-16: addr2_4
+ * 15-8: addr2_5
+ * 7-0: addr2_6
+ */
+
+#define ET_UNI_PF_ADDR2_3_SHIFT 24
+#define ET_UNI_PF_ADDR2_4_SHIFT 16
+#define ET_UNI_PF_ADDR2_5_SHIFT 8
+
+/*
+ * structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map
+ * located at address 0x4070
+ *
+ * 31-24: addr2_1
+ * 23-16: addr2_2
+ * 15-8: addr1_1
+ * 7-0: addr1_2
+ */
+
+#define ET_UNI_PF_ADDR2_1_SHIFT 24
+#define ET_UNI_PF_ADDR2_2_SHIFT 16
+#define ET_UNI_PF_ADDR1_1_SHIFT 8
+
+
+/*
+ * structure for Multicast Hash reg in rxmac address map
+ * located at address 0x4074 - 0x4080
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for Packet Filter Control reg in rxmac address map
+ * located at address 0x4084
+ *
+ * 31-23: unused
+ * 22-16: min_pkt_size
+ * 15-4: unused
+ * 3: filter_frag_en
+ * 2: filter_uni_en
+ * 1: filter_multi_en
+ * 0: filter_broad_en
+ */
+
+/*
+ * structure for Memory Controller Interface Control Max Segment reg in rxmac
+ * address map. Located at address 0x4088
+ *
+ * 31-10: reserved
+ * 9-2: max_size
+ * 1: fc_en
+ * 0: seg_en
+ */
+
+/*
+ * structure for Memory Controller Interface Water Mark reg in rxmac address
+ * map. Located at address 0x408C
+ *
+ * 31-26: unused
+ * 25-16: mark_hi
+ * 15-10: unused
+ * 9-0: mark_lo
+ */
+
+/*
+ * structure for Rx Queue Dialog reg in rxmac address map.
+ * located at address 0x4090
+ *
+ * 31-26: reserved
+ * 25-16: rd_ptr
+ * 15-10: reserved
+ * 9-0: wr_ptr
+ */
+
+/*
+ * structure for space available reg in rxmac address map.
+ * located at address 0x4094
+ *
+ * 31-17: reserved
+ * 16: space_avail_en
+ * 15-10: reserved
+ * 9-0: space_avail
+ */
+
+/*
+ * structure for management interface reg in rxmac address map.
+ * located at address 0x4098
+ *
+ * 31-18: reserved
+ * 17: drop_pkt_en
+ * 16-0: drop_pkt_mask
+ */
+
+/*
+ * structure for Error reg in rxmac address map.
+ * located at address 0x409C
+ *
+ * 31-4: unused
+ * 3: mif
+ * 2: async
+ * 1: pkt_filter
+ * 0: mcif
+ */
+
+/*
+ * Rx MAC Module of JAGCore Address Mapping
+ */
+struct rxmac_regs { /* Location: */
+ u32 ctrl; /* 0x4000 */
+ u32 crc0; /* 0x4004 */
+ u32 crc12; /* 0x4008 */
+ u32 crc34; /* 0x400C */
+ u32 sa_lo; /* 0x4010 */
+ u32 sa_hi; /* 0x4014 */
+ u32 mask0_word0; /* 0x4018 */
+ u32 mask0_word1; /* 0x401C */
+ u32 mask0_word2; /* 0x4020 */
+ u32 mask0_word3; /* 0x4024 */
+ u32 mask1_word0; /* 0x4028 */
+ u32 mask1_word1; /* 0x402C */
+ u32 mask1_word2; /* 0x4030 */
+ u32 mask1_word3; /* 0x4034 */
+ u32 mask2_word0; /* 0x4038 */
+ u32 mask2_word1; /* 0x403C */
+ u32 mask2_word2; /* 0x4040 */
+ u32 mask2_word3; /* 0x4044 */
+ u32 mask3_word0; /* 0x4048 */
+ u32 mask3_word1; /* 0x404C */
+ u32 mask3_word2; /* 0x4050 */
+ u32 mask3_word3; /* 0x4054 */
+ u32 mask4_word0; /* 0x4058 */
+ u32 mask4_word1; /* 0x405C */
+ u32 mask4_word2; /* 0x4060 */
+ u32 mask4_word3; /* 0x4064 */
+ u32 uni_pf_addr1; /* 0x4068 */
+ u32 uni_pf_addr2; /* 0x406C */
+ u32 uni_pf_addr3; /* 0x4070 */
+ u32 multi_hash1; /* 0x4074 */
+ u32 multi_hash2; /* 0x4078 */
+ u32 multi_hash3; /* 0x407C */
+ u32 multi_hash4; /* 0x4080 */
+ u32 pf_ctrl; /* 0x4084 */
+ u32 mcif_ctrl_max_seg; /* 0x4088 */
+ u32 mcif_water_mark; /* 0x408C */
+ u32 rxq_diag; /* 0x4090 */
+ u32 space_avail; /* 0x4094 */
+
+ u32 mif_ctrl; /* 0x4098 */
+ u32 err_reg; /* 0x409C */
+};
+
+/* END OF RXMAC REGISTER ADDRESS MAP */
+
+
+/* START OF MAC REGISTER ADDRESS MAP */
+
+/*
+ * structure for configuration #1 reg in mac address map.
+ * located at address 0x5000
+ *
+ * 31: soft reset
+ * 30: sim reset
+ * 29-20: reserved
+ * 19: reset rx mc
+ * 18: reset tx mc
+ * 17: reset rx func
+ * 16: reset tx fnc
+ * 15-9: reserved
+ * 8: loopback
+ * 7-6: reserved
+ * 5: rx flow
+ * 4: tx flow
+ * 3: syncd rx en
+ * 2: rx enable
+ * 1: syncd tx en
+ * 0: tx enable
+ */
+
+#define CFG1_LOOPBACK 0x00000100
+#define CFG1_RX_FLOW 0x00000020
+#define CFG1_TX_FLOW 0x00000010
+#define CFG1_RX_ENABLE 0x00000004
+#define CFG1_TX_ENABLE 0x00000001
+#define CFG1_WAIT 0x0000000A /* RX & TX syncd */
+
+/*
+ * structure for configuration #2 reg in mac address map.
+ * located at address 0x5004
+ * 31-16: reserved
+ * 15-12: preamble
+ * 11-10: reserved
+ * 9-8: if mode
+ * 7-6: reserved
+ * 5: huge frame
+ * 4: length check
+ * 3: undefined
+ * 2: pad crc
+ * 1: crc enable
+ * 0: full duplex
+ */
+
+
+/*
+ * structure for Interpacket gap reg in mac address map.
+ * located at address 0x5008
+ *
+ * 31: reserved
+ * 30-24: non B2B ipg 1
+ * 23: undefined
+ * 22-16: non B2B ipg 2
+ * 15-8: Min ifg enforce
+ * 7-0: B2B ipg
+ *
+ * structure for half duplex reg in mac address map.
+ * located at address 0x500C
+ * 31-24: reserved
+ * 23-20: Alt BEB trunc
+ * 19: Alt BEB enable
+ * 18: BP no backoff
+ * 17: no backoff
+ * 16: excess defer
+ * 15-12: re-xmit max
+ * 11-10: reserved
+ * 9-0: collision window
+ */
+
+/*
+ * structure for Maximum Frame Length reg in mac address map.
+ * located at address 0x5010: bits 0-15 hold the length.
+ */
+
+/*
+ * structure for Reserve 1 reg in mac address map.
+ * located at address 0x5014 - 0x5018
+ * Defined earlier (u32)
+ */
+
+/*
+ * structure for Test reg in mac address map.
+ * located at address 0x501C
+ * test: bits 0-2, rest unused
+ */
+
+/*
+ * structure for MII Management Configuration reg in mac address map.
+ * located at address 0x5020
+ *
+ * 31: reset MII mgmt
+ * 30-6: unused
+ * 5: scan auto increment
+ * 4: preamble suppress
+ * 3: undefined
+ * 2-0: mgmt clock reset
+ */
+
+/*
+ * structure for MII Management Command reg in mac address map.
+ * located at address 0x5024
+ * bit 1: scan cycle
+ * bit 0: read cycle
+ */
+
+/*
+ * structure for MII Management Address reg in mac address map.
+ * located at address 0x5028
+ * 31-13: reserved
+ * 12-8: phy addr
+ * 7-5: reserved
+ * 4-0: register
+ */
+
+#define MII_ADDR(phy, reg) ((phy) << 8 | (reg))
+
+/*
+ * structure for MII Management Control reg in mac address map.
+ * located at address 0x502C
+ * 31-16: reserved
+ * 15-0: phy control
+ */
+
+/*
+ * structure for MII Management Status reg in mac address map.
+ * located at address 0x5030
+ * 31-16: reserved
+ * 15-0: phy control
+ */
+
+/*
+ * structure for MII Management Indicators reg in mac address map.
+ * located at address 0x5034
+ * 31-3: reserved
+ * 2: not valid
+ * 1: scanning
+ * 0: busy
+ */
+
+#define MGMT_BUSY 0x00000001 /* busy */
+#define MGMT_WAIT 0x00000005 /* busy | not valid */
+
+/*
+ * structure for Interface Control reg in mac address map.
+ * located at address 0x5038
+ *
+ * 31: reset if module
+ * 30-28: reserved
+ * 27: tbi mode
+ * 26: ghd mode
+ * 25: lhd mode
+ * 24: phy mode
+ * 23: reset per mii
+ * 22-17: reserved
+ * 16: speed
+ * 15: reset pe100x
+ * 14-11: reserved
+ * 10: force quiet
+ * 9: no cipher
+ * 8: disable link fail
+ * 7: reset gpsi
+ * 6-1: reserved
+ * 0: enable jabber protection
+ */
+
+/*
+ * structure for Interface Status reg in mac address map.
+ * located at address 0x503C
+ *
+ * 31-10: reserved
+ * 9: excess_defer
+ * 8: clash
+ * 7: phy_jabber
+ * 6: phy_link_ok
+ * 5: phy_full_duplex
+ * 4: phy_speed
+ * 3: pe100x_link_fail
+ * 2: pe10t_loss_carrier
+ * 1: pe10t_sqe_error
+ * 0: pe10t_jabber
+ */
+
+/*
+ * structure for Mac Station Address, Part 1 reg in mac address map.
+ * located at address 0x5040
+ *
+ * 31-24: Octet6
+ * 23-16: Octet5
+ * 15-8: Octet4
+ * 7-0: Octet3
+ */
+
+#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24
+#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16
+#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8
+
+/*
+ * structure for Mac Station Address, Part 2 reg in mac address map.
+ * located at address 0x5044
+ *
+ * 31-24: Octet2
+ * 23-16: Octet1
+ * 15-0: reserved
+ */
+
+#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24
+#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16
+
+/*
+ * MAC Module of JAGCore Address Mapping
+ */
+struct mac_regs { /* Location: */
+ u32 cfg1; /* 0x5000 */
+ u32 cfg2; /* 0x5004 */
+ u32 ipg; /* 0x5008 */
+ u32 hfdp; /* 0x500C */
+ u32 max_fm_len; /* 0x5010 */
+ u32 rsv1; /* 0x5014 */
+ u32 rsv2; /* 0x5018 */
+ u32 mac_test; /* 0x501C */
+ u32 mii_mgmt_cfg; /* 0x5020 */
+ u32 mii_mgmt_cmd; /* 0x5024 */
+ u32 mii_mgmt_addr; /* 0x5028 */
+ u32 mii_mgmt_ctrl; /* 0x502C */
+ u32 mii_mgmt_stat; /* 0x5030 */
+ u32 mii_mgmt_indicator; /* 0x5034 */
+ u32 if_ctrl; /* 0x5038 */
+ u32 if_stat; /* 0x503C */
+ u32 station_addr_1; /* 0x5040 */
+ u32 station_addr_2; /* 0x5044 */
+};
+
+/* END OF MAC REGISTER ADDRESS MAP */
+
+/* START OF MAC STAT REGISTER ADDRESS MAP */
+
+/*
+ * structure for Carry Register One and it's Mask Register reg located in mac
+ * stat address map address 0x6130 and 0x6138.
+ *
+ * 31: tr64
+ * 30: tr127
+ * 29: tr255
+ * 28: tr511
+ * 27: tr1k
+ * 26: trmax
+ * 25: trmgv
+ * 24-17: unused
+ * 16: rbyt
+ * 15: rpkt
+ * 14: rfcs
+ * 13: rmca
+ * 12: rbca
+ * 11: rxcf
+ * 10: rxpf
+ * 9: rxuo
+ * 8: raln
+ * 7: rflr
+ * 6: rcde
+ * 5: rcse
+ * 4: rund
+ * 3: rovr
+ * 2: rfrg
+ * 1: rjbr
+ * 0: rdrp
+ */
+
+/*
+ * structure for Carry Register Two Mask Register reg in mac stat address map.
+ * located at address 0x613C
+ *
+ * 31-20: unused
+ * 19: tjbr
+ * 18: tfcs
+ * 17: txcf
+ * 16: tovr
+ * 15: tund
+ * 14: trfg
+ * 13: tbyt
+ * 12: tpkt
+ * 11: tmca
+ * 10: tbca
+ * 9: txpf
+ * 8: tdfr
+ * 7: tedf
+ * 6: tscl
+ * 5: tmcl
+ * 4: tlcl
+ * 3: txcl
+ * 2: tncl
+ * 1: tpfh
+ * 0: tdrp
+ */
+
+/*
+ * MAC STATS Module of JAGCore Address Mapping
+ */
+struct macstat_regs { /* Location: */
+ u32 pad[32]; /* 0x6000 - 607C */
+
+ /* Tx/Rx 0-64 Byte Frame Counter */
+ u32 txrx_0_64_byte_frames; /* 0x6080 */
+
+ /* Tx/Rx 65-127 Byte Frame Counter */
+ u32 txrx_65_127_byte_frames; /* 0x6084 */
+
+ /* Tx/Rx 128-255 Byte Frame Counter */
+ u32 txrx_128_255_byte_frames; /* 0x6088 */
+
+ /* Tx/Rx 256-511 Byte Frame Counter */
+ u32 txrx_256_511_byte_frames; /* 0x608C */
+
+ /* Tx/Rx 512-1023 Byte Frame Counter */
+ u32 txrx_512_1023_byte_frames; /* 0x6090 */
+
+ /* Tx/Rx 1024-1518 Byte Frame Counter */
+ u32 txrx_1024_1518_byte_frames; /* 0x6094 */
+
+ /* Tx/Rx 1519-1522 Byte Good VLAN Frame Count */
+ u32 txrx_1519_1522_gvln_frames; /* 0x6098 */
+
+ /* Rx Byte Counter */
+ u32 rx_bytes; /* 0x609C */
+
+ /* Rx Packet Counter */
+ u32 rx_packets; /* 0x60A0 */
+
+ /* Rx FCS Error Counter */
+ u32 rx_fcs_errs; /* 0x60A4 */
+
+ /* Rx Multicast Packet Counter */
+ u32 rx_multicast_packets; /* 0x60A8 */
+
+ /* Rx Broadcast Packet Counter */
+ u32 rx_broadcast_packets; /* 0x60AC */
+
+ /* Rx Control Frame Packet Counter */
+ u32 rx_control_frames; /* 0x60B0 */
+
+ /* Rx Pause Frame Packet Counter */
+ u32 rx_pause_frames; /* 0x60B4 */
+
+ /* Rx Unknown OP Code Counter */
+ u32 rx_unknown_opcodes; /* 0x60B8 */
+
+ /* Rx Alignment Error Counter */
+ u32 rx_align_errs; /* 0x60BC */
+
+ /* Rx Frame Length Error Counter */
+ u32 rx_frame_len_errs; /* 0x60C0 */
+
+ /* Rx Code Error Counter */
+ u32 rx_code_errs; /* 0x60C4 */
+
+ /* Rx Carrier Sense Error Counter */
+ u32 rx_carrier_sense_errs; /* 0x60C8 */
+
+ /* Rx Undersize Packet Counter */
+ u32 rx_undersize_packets; /* 0x60CC */
+
+ /* Rx Oversize Packet Counter */
+ u32 rx_oversize_packets; /* 0x60D0 */
+
+ /* Rx Fragment Counter */
+ u32 rx_fragment_packets; /* 0x60D4 */
+
+ /* Rx Jabber Counter */
+ u32 rx_jabbers; /* 0x60D8 */
+
+ /* Rx Drop */
+ u32 rx_drops; /* 0x60DC */
+
+ /* Tx Byte Counter */
+ u32 tx_bytes; /* 0x60E0 */
+
+ /* Tx Packet Counter */
+ u32 tx_packets; /* 0x60E4 */
+
+ /* Tx Multicast Packet Counter */
+ u32 tx_multicast_packets; /* 0x60E8 */
+
+ /* Tx Broadcast Packet Counter */
+ u32 tx_broadcast_packets; /* 0x60EC */
+
+ /* Tx Pause Control Frame Counter */
+ u32 tx_pause_frames; /* 0x60F0 */
+
+ /* Tx Deferral Packet Counter */
+ u32 tx_deferred; /* 0x60F4 */
+
+ /* Tx Excessive Deferral Packet Counter */
+ u32 tx_excessive_deferred; /* 0x60F8 */
+
+ /* Tx Single Collision Packet Counter */
+ u32 tx_single_collisions; /* 0x60FC */
+
+ /* Tx Multiple Collision Packet Counter */
+ u32 tx_multiple_collisions; /* 0x6100 */
+
+ /* Tx Late Collision Packet Counter */
+ u32 tx_late_collisions; /* 0x6104 */
+
+ /* Tx Excessive Collision Packet Counter */
+ u32 tx_excessive_collisions; /* 0x6108 */
+
+ /* Tx Total Collision Packet Counter */
+ u32 tx_total_collisions; /* 0x610C */
+
+ /* Tx Pause Frame Honored Counter */
+ u32 tx_pause_honored_frames; /* 0x6110 */
+
+ /* Tx Drop Frame Counter */
+ u32 tx_drops; /* 0x6114 */
+
+ /* Tx Jabber Frame Counter */
+ u32 tx_jabbers; /* 0x6118 */
+
+ /* Tx FCS Error Counter */
+ u32 tx_fcs_errs; /* 0x611C */
+
+ /* Tx Control Frame Counter */
+ u32 tx_control_frames; /* 0x6120 */
+
+ /* Tx Oversize Frame Counter */
+ u32 tx_oversize_frames; /* 0x6124 */
+
+ /* Tx Undersize Frame Counter */
+ u32 tx_undersize_frames; /* 0x6128 */
+
+ /* Tx Fragments Frame Counter */
+ u32 tx_fragments; /* 0x612C */
+
+ /* Carry Register One Register */
+ u32 carry_reg1; /* 0x6130 */
+
+ /* Carry Register Two Register */
+ u32 carry_reg2; /* 0x6134 */
+
+ /* Carry Register One Mask Register */
+ u32 carry_reg1_mask; /* 0x6138 */
+
+ /* Carry Register Two Mask Register */
+ u32 carry_reg2_mask; /* 0x613C */
+};
+
+/* END OF MAC STAT REGISTER ADDRESS MAP */
+
+/* START OF MMC REGISTER ADDRESS MAP */
+
+/*
+ * Main Memory Controller Control reg in mmc address map.
+ * located at address 0x7000
+ */
+
+#define ET_MMC_ENABLE 1
+#define ET_MMC_ARB_DISABLE 2
+#define ET_MMC_RXMAC_DISABLE 4
+#define ET_MMC_TXMAC_DISABLE 8
+#define ET_MMC_TXDMA_DISABLE 16
+#define ET_MMC_RXDMA_DISABLE 32
+#define ET_MMC_FORCE_CE 64
+
+/*
+ * Main Memory Controller Host Memory Access Address reg in mmc
+ * address map. Located at address 0x7004. Top 16 bits hold the address bits
+ */
+
+#define ET_SRAM_REQ_ACCESS 1
+#define ET_SRAM_WR_ACCESS 2
+#define ET_SRAM_IS_CTRL 4
+
+/*
+ * structure for Main Memory Controller Host Memory Access Data reg in mmc
+ * address map. Located at address 0x7008 - 0x7014
+ * Defined earlier (u32)
+ */
+
+/*
+ * Memory Control Module of JAGCore Address Mapping
+ */
+struct mmc_regs { /* Location: */
+ u32 mmc_ctrl; /* 0x7000 */
+ u32 sram_access; /* 0x7004 */
+ u32 sram_word1; /* 0x7008 */
+ u32 sram_word2; /* 0x700C */
+ u32 sram_word3; /* 0x7010 */
+ u32 sram_word4; /* 0x7014 */
+};
+
+/* END OF MMC REGISTER ADDRESS MAP */
+
+
+/*
+ * JAGCore Address Mapping
+ */
+struct address_map {
+ struct global_regs global;
+ /* unused section of global address map */
+ u8 unused_global[4096 - sizeof(struct global_regs)];
+ struct txdma_regs txdma;
+ /* unused section of txdma address map */
+ u8 unused_txdma[4096 - sizeof(struct txdma_regs)];
+ struct rxdma_regs rxdma;
+ /* unused section of rxdma address map */
+ u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)];
+ struct txmac_regs txmac;
+ /* unused section of txmac address map */
+ u8 unused_txmac[4096 - sizeof(struct txmac_regs)];
+ struct rxmac_regs rxmac;
+ /* unused section of rxmac address map */
+ u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)];
+ struct mac_regs mac;
+ /* unused section of mac address map */
+ u8 unused_mac[4096 - sizeof(struct mac_regs)];
+ struct macstat_regs macstat;
+ /* unused section of mac stat address map */
+ u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)];
+ struct mmc_regs mmc;
+ /* unused section of mmc address map */
+ u8 unused_mmc[4096 - sizeof(struct mmc_regs)];
+ /* unused section of address map */
+ u8 unused_[1015808];
+
+ u8 unused_exp_rom[4096]; /* MGS-size TBD */
+ u8 unused__[524288]; /* unused section of address map */
+};
+
+/*
+ * Defines for generic MII registers 0x00 -> 0x0F can be found in
+ * include/linux/mii.h
+ */
+
+/* some defines for modem registers that seem to be 'reserved' */
+#define PHY_INDEX_REG 0x10
+#define PHY_DATA_REG 0x11
+#define PHY_MPHY_CONTROL_REG 0x12
+
+/* defines for specified registers */
+#define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */
+ /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */
+#define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */
+#define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */
+#define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */
+#define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */
+#define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */
+#define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */
+#define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */
+#define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */
+ /* TRU_VMI_LINK_CONTROL_REG 29 */
+ /* TRU_VMI_TIMING_CONTROL_REG */
+
+/* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */
+#define ET_1000BT_MSTR_SLV 0x4000
+
+/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */
+
+/* MI Register 19: Loopback Control Reg(0x13)
+ * 15: mii_en
+ * 14: pcs_en
+ * 13: pmd_en
+ * 12: all_digital_en
+ * 11: replica_en
+ * 10: line_driver_en
+ * 9-0: reserved
+ */
+
+/* MI Register 20: Reserved Reg(0x14) */
+
+/* MI Register 21: Management Interface Control Reg(0x15)
+ * 15-11: reserved
+ * 10-4: mi_error_count
+ * 3: reserved
+ * 2: ignore_10g_fr
+ * 1: reserved
+ * 0: preamble_supress_en
+ */
+
+/* MI Register 22: PHY Configuration Reg(0x16)
+ * 15: crs_tx_en
+ * 14: reserved
+ * 13-12: tx_fifo_depth
+ * 11-10: speed_downshift
+ * 9: pbi_detect
+ * 8: tbi_rate
+ * 7: alternate_np
+ * 6: group_mdio_en
+ * 5: tx_clock_en
+ * 4: sys_clock_en
+ * 3: reserved
+ * 2-0: mac_if_mode
+ */
+
+#define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000
+
+#define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000
+#define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000
+#define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000
+#define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000
+
+/* MI Register 23: PHY CONTROL Reg(0x17)
+ * 15: reserved
+ * 14: tdr_en
+ * 13: reserved
+ * 12-11: downshift_attempts
+ * 10-6: reserved
+ * 5: jabber_10baseT
+ * 4: sqe_10baseT
+ * 3: tp_loopback_10baseT
+ * 2: preamble_gen_en
+ * 1: reserved
+ * 0: force_int
+ */
+
+/* MI Register 24: Interrupt Mask Reg(0x18)
+ * 15-10: reserved
+ * 9: mdio_sync_lost
+ * 8: autoneg_status
+ * 7: hi_bit_err
+ * 6: np_rx
+ * 5: err_counter_full
+ * 4: fifo_over_underflow
+ * 3: rx_status
+ * 2: link_status
+ * 1: automatic_speed
+ * 0: int_en
+ */
+
+#define ET_PHY_INT_MASK_AUTONEGSTAT 0x0100
+#define ET_PHY_INT_MASK_LINKSTAT 0x0004
+#define ET_PHY_INT_MASK_ENABLE 0x0001
+
+/* MI Register 25: Interrupt Status Reg(0x19)
+ * 15-10: reserved
+ * 9: mdio_sync_lost
+ * 8: autoneg_status
+ * 7: hi_bit_err
+ * 6: np_rx
+ * 5: err_counter_full
+ * 4: fifo_over_underflow
+ * 3: rx_status
+ * 2: link_status
+ * 1: automatic_speed
+ * 0: int_en
+ */
+
+/* MI Register 26: PHY Status Reg(0x1A)
+ * 15: reserved
+ * 14-13: autoneg_fault
+ * 12: autoneg_status
+ * 11: mdi_x_status
+ * 10: polarity_status
+ * 9-8: speed_status
+ * 7: duplex_status
+ * 6: link_status
+ * 5: tx_status
+ * 4: rx_status
+ * 3: collision_status
+ * 2: autoneg_en
+ * 1: pause_en
+ * 0: asymmetric_dir
+ */
+#define ET_PHY_AUTONEG_STATUS 0x1000
+#define ET_PHY_POLARITY_STATUS 0x0400
+#define ET_PHY_SPEED_STATUS 0x0300
+#define ET_PHY_DUPLEX_STATUS 0x0080
+#define ET_PHY_LSTATUS 0x0040
+#define ET_PHY_AUTONEG_ENABLE 0x0020
+
+/* MI Register 27: LED Control Reg 1(0x1B)
+ * 15-14: reserved
+ * 13-12: led_dup_indicate
+ * 11-10: led_10baseT
+ * 9-8: led_collision
+ * 7-4: reserved
+ * 3-2: pulse_dur
+ * 1: pulse_stretch1
+ * 0: pulse_stretch0
+ */
+
+/* MI Register 28: LED Control Reg 2(0x1C)
+ * 15-12: led_link
+ * 11-8: led_tx_rx
+ * 7-4: led_100BaseTX
+ * 3-0: led_1000BaseT
+ */
+#define ET_LED2_LED_LINK 0xF000
+#define ET_LED2_LED_TXRX 0x0F00
+#define ET_LED2_LED_100TX 0x00F0
+#define ET_LED2_LED_1000T 0x000F
+
+/* defines for LED control reg 2 values */
+#define LED_VAL_1000BT 0x0
+#define LED_VAL_100BTX 0x1
+#define LED_VAL_10BT 0x2
+#define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */
+#define LED_VAL_LINKON 0x4
+#define LED_VAL_TX 0x5
+#define LED_VAL_RX 0x6
+#define LED_VAL_TXRX 0x7 /* TX or RX */
+#define LED_VAL_DUPLEXFULL 0x8
+#define LED_VAL_COLLISION 0x9
+#define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */
+#define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */
+#define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */
+#define LED_VAL_BLINK 0xD
+#define LED_VAL_ON 0xE
+#define LED_VAL_OFF 0xF
+
+#define LED_LINK_SHIFT 12
+#define LED_TXRX_SHIFT 8
+#define LED_100TX_SHIFT 4
+
+/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */
+
+/* Defines for PHY access routines */
+
+/* Define bit operation flags */
+#define TRUEPHY_BIT_CLEAR 0
+#define TRUEPHY_BIT_SET 1
+#define TRUEPHY_BIT_READ 2
+
+/* Define read/write operation flags */
+#ifndef TRUEPHY_READ
+#define TRUEPHY_READ 0
+#define TRUEPHY_WRITE 1
+#define TRUEPHY_MASK 2
+#endif
+
+/* Define master/slave configuration values */
+#define TRUEPHY_CFG_SLAVE 0
+#define TRUEPHY_CFG_MASTER 1
+
+/* Define MDI/MDI-X settings */
+#define TRUEPHY_MDI 0
+#define TRUEPHY_MDIX 1
+#define TRUEPHY_AUTO_MDI_MDIX 2
+
+/* Define 10Base-T link polarities */
+#define TRUEPHY_POLARITY_NORMAL 0
+#define TRUEPHY_POLARITY_INVERTED 1
+
+/* Define auto-negotiation results */
+#define TRUEPHY_ANEG_NOT_COMPLETE 0
+#define TRUEPHY_ANEG_COMPLETE 1
+#define TRUEPHY_ANEG_DISABLED 2
+
+/* Define duplex advertisement flags */
+#define TRUEPHY_ADV_DUPLEX_NONE 0x00
+#define TRUEPHY_ADV_DUPLEX_FULL 0x01
+#define TRUEPHY_ADV_DUPLEX_HALF 0x02
+#define TRUEPHY_ADV_DUPLEX_BOTH \
+ (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
deleted file mode 100644
index 408c50ba4f2..00000000000
--- a/drivers/staging/et131x/et131x_adapter.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et131x_adapter.h - Header which includes the private adapter structure, along
- * with related support structures, macros, definitions, etc.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef __ET131X_ADAPTER_H__
-#define __ET131X_ADAPTER_H__
-
-#include "et1310_address_map.h"
-#include "et1310_tx.h"
-#include "et1310_rx.h"
-
-/*
- * Do not change these values: if changed, then change also in respective
- * TXdma and Rxdma engines
- */
-#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
-#define NUM_TCB 64
-
-/*
- * These values are all superseded by registry entries to facilitate tuning.
- * Once the desired performance has been achieved, the optimal registry values
- * should be re-populated to these #defines:
- */
-#define NUM_TRAFFIC_CLASSES 1
-
-#define TX_ERROR_PERIOD 1000
-
-#define LO_MARK_PERCENT_FOR_PSR 15
-#define LO_MARK_PERCENT_FOR_RX 15
-
-/* RFD (Receive Frame Descriptor) */
-struct rfd {
- struct list_head list_node;
- struct sk_buff *skb;
- u32 len; /* total size of receive frame */
- u16 bufferindex;
- u8 ringindex;
-};
-
-/* Flow Control */
-#define FLOW_BOTH 0
-#define FLOW_TXONLY 1
-#define FLOW_RXONLY 2
-#define FLOW_NONE 3
-
-/* Struct to define some device statistics */
-struct ce_stats {
- /* MIB II variables
- *
- * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
- * MUST have 32, then we'll need another way to perform atomic
- * operations
- */
- u32 unircv; /* # multicast packets received */
- atomic_t unixmt; /* # multicast packets for Tx */
- u32 multircv; /* # multicast packets received */
- atomic_t multixmt; /* # multicast packets for Tx */
- u32 brdcstrcv; /* # broadcast packets received */
- atomic_t brdcstxmt; /* # broadcast packets for Tx */
- u32 norcvbuf; /* # Rx packets discarded */
- u32 noxmtbuf; /* # Tx packets discarded */
-
- /* Transceiver state informations. */
- u8 xcvr_addr;
- u32 xcvr_id;
-
- /* Tx Statistics. */
- u32 tx_uflo; /* Tx Underruns */
-
- u32 collisions;
- u32 excessive_collisions;
- u32 first_collision;
- u32 late_collisions;
- u32 max_pkt_error;
- u32 tx_deferred;
-
- /* Rx Statistics. */
- u32 rx_ov_flow; /* Rx Overflow */
-
- u32 length_err;
- u32 alignment_err;
- u32 crc_err;
- u32 code_violations;
- u32 other_errors;
-
- u32 SynchrounousIterations;
- u32 InterruptStatus;
-};
-
-
-/* The private adapter structure */
-struct et131x_adapter {
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- struct work_struct task;
-
- /* Flags that indicate current state of the adapter */
- u32 flags;
- u32 HwErrCount;
-
- /* Configuration */
- u8 rom_addr[ETH_ALEN];
- u8 addr[ETH_ALEN];
- bool has_eeprom;
- u8 eeprom_data[2];
-
- /* Spinlocks */
- spinlock_t Lock;
-
- spinlock_t TCBSendQLock;
- spinlock_t TCBReadyQLock;
- spinlock_t send_hw_lock;
-
- spinlock_t rcv_lock;
- spinlock_t RcvPendLock;
- spinlock_t FbrLock;
-
- spinlock_t PHYLock;
-
- /* Packet Filter and look ahead size */
- u32 PacketFilter;
- u32 linkspeed;
- u32 duplex_mode;
-
- /* multicast list */
- u32 MCAddressCount;
- u8 MCList[NIC_MAX_MCAST_LIST][ETH_ALEN];
-
- /* Pointer to the device's PCI register space */
- struct address_map __iomem *regs;
-
- /* Registry parameters */
- u8 SpeedDuplex; /* speed/duplex */
- u8 wanted_flow; /* Flow we want for 802.3x flow control */
- u8 RegistryPhyComa; /* Phy Coma mode enable/disable */
-
- u32 RegistryRxMemEnd; /* Size of internal rx memory */
- u32 RegistryJumboPacket; /* Max supported ethernet packet size */
-
-
- /* Derived from the registry: */
- u8 AiForceDpx; /* duplex setting */
- u16 AiForceSpeed; /* 'Speed', user over-ride of line speed */
- u8 flowcontrol; /* flow control validated by the far-end */
- enum {
- NETIF_STATUS_INVALID = 0,
- NETIF_STATUS_MEDIA_CONNECT,
- NETIF_STATUS_MEDIA_DISCONNECT,
- NETIF_STATUS_MAX
- } MediaState;
-
- /* Minimize init-time */
- struct timer_list ErrorTimer;
-
- /* variable putting the phy into coma mode when boot up with no cable
- * plugged in after 5 seconds
- */
- u8 boot_coma;
-
- /* Next two used to save power information at power down. This
- * information will be used during power up to set up parts of Power
- * Management in JAGCore
- */
- u16 pdown_speed;
- u8 pdown_duplex;
-
- u32 CachedMaskValue;
-
- /* Xcvr status at last poll */
- u16 bmsr;
-
- /* Tx Memory Variables */
- struct tx_ring tx_ring;
-
- /* Rx Memory Variables */
- struct rx_ring rx_ring;
-
- /* Loopback specifics */
- u8 ReplicaPhyLoopbk; /* Replica Enable */
- u8 ReplicaPhyLoopbkPF; /* Replica Enable Pass/Fail */
-
- /* Stats */
- struct ce_stats stats;
-
- struct net_device_stats net_stats;
-};
-
-#endif /* __ET131X_ADAPTER_H__ */
diff --git a/drivers/staging/et131x/et131x_defs.h b/drivers/staging/et131x/et131x_defs.h
deleted file mode 100644
index 3d5193fdb00..00000000000
--- a/drivers/staging/et131x/et131x_defs.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et131x_defs.h - Defines, structs, enums, prototypes, etc. to assist with OS
- * compatibility
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef __ET131X_DEFS_H__
-#define __ET131X_DEFS_H__
-
-/* Packet and header sizes */
-#define NIC_MIN_PACKET_SIZE 60
-
-/* Multicast list size */
-#define NIC_MAX_MCAST_LIST 128
-
-/* Supported Filters */
-#define ET131X_PACKET_TYPE_DIRECTED 0x0001
-#define ET131X_PACKET_TYPE_MULTICAST 0x0002
-#define ET131X_PACKET_TYPE_BROADCAST 0x0004
-#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
-#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
-
-/* Tx Timeout */
-#define ET131X_TX_TIMEOUT (1 * HZ)
-#define NIC_SEND_HANG_THRESHOLD 0
-
-/* MP_TCB flags */
-#define fMP_DEST_MULTI 0x00000001
-#define fMP_DEST_BROAD 0x00000002
-
-/* MP_ADAPTER flags */
-#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
-#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
-#define fMP_ADAPTER_SECONDARY 0x00000010
-
-/* MP_SHARED flags */
-#define fMP_ADAPTER_SHUTDOWN 0x00100000
-#define fMP_ADAPTER_LOWER_POWER 0x00200000
-
-#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
-#define fMP_ADAPTER_RESET_IN_PROGRESS 0x01000000
-#define fMP_ADAPTER_NO_CABLE 0x02000000
-#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
-#define fMP_ADAPTER_REMOVE_IN_PROGRESS 0x08000000
-#define fMP_ADAPTER_HALT_IN_PROGRESS 0x10000000
-
-#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
-#define fMP_ADAPTER_NOT_READY_MASK 0x3ff00000
-
-/* Some offsets in PCI config space that are actually used. */
-#define ET1310_PCI_MAX_PYLD 0x4C
-#define ET1310_PCI_MAC_ADDRESS 0xA4
-#define ET1310_PCI_EEPROM_STATUS 0xB2
-#define ET1310_PCI_ACK_NACK 0xC0
-#define ET1310_PCI_REPLAY 0xC2
-#define ET1310_PCI_L0L1LATENCY 0xCF
-
-/* PCI Vendor/Product IDs */
-#define ET131X_PCI_VENDOR_ID 0x11C1 /* Agere Systems */
-#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
-#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
-
-/* Define order of magnitude converter */
-#define NANO_IN_A_MICRO 1000
-
-#define PARM_RX_NUM_BUFS_DEF 4
-#define PARM_RX_TIME_INT_DEF 10
-#define PARM_RX_MEM_END_DEF 0x2bc
-#define PARM_TX_TIME_INT_DEF 40
-#define PARM_TX_NUM_BUFS_DEF 4
-#define PARM_DMA_CACHE_DEF 0
-
-
-#endif /* __ET131X_DEFS_H__ */
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
deleted file mode 100644
index 8c8d6b87a25..00000000000
--- a/drivers/staging/et131x/et131x_initpci.c
+++ /dev/null
@@ -1,848 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et131x_initpci.c - Routines and data used to register the driver with the
- * PCI (and PCI Express) subsystem, as well as basic driver
- * init and startup.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/random.h>
-
-#include "et1310_phy.h"
-
-#include "et131x_adapter.h"
-
-#include "et1310_address_map.h"
-#include "et1310_tx.h"
-#include "et1310_rx.h"
-#include "et131x.h"
-
-#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
-#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
-
-/* Defines for Parameter Default/Min/Max vaules */
-#define PARM_SPEED_DUPLEX_MIN 0
-#define PARM_SPEED_DUPLEX_MAX 5
-
-/* Module parameter for manual speed setting
- * Set Link speed and dublex manually (0-5) [0]
- * 1 : 10Mb Half-Duplex
- * 2 : 10Mb Full-Duplex
- * 3 : 100Mb Half-Duplex
- * 4 : 100Mb Full-Duplex
- * 5 : 1000Mb Full-Duplex
- * 0 : Auto Speed Auto Duplex // default
- */
-static u32 et131x_speed_set;
-module_param(et131x_speed_set, uint, 0);
-MODULE_PARM_DESC(et131x_speed_set,
- "Set Link speed and dublex manually (0-5) [0]\n"
- "1 : 10Mb Half-Duplex\n"
- "2 : 10Mb Full-Duplex\n"
- "3 : 100Mb Half-Duplex\n"
- "4 : 100Mb Full-Duplex\n"
- "5 : 1000Mb Full-Duplex\n"
- "0 : Auto Speed Auto Dublex");
-
-/**
- * et131x_hwaddr_init - set up the MAC Address on the ET1310
- * @adapter: pointer to our private adapter structure
- */
-void et131x_hwaddr_init(struct et131x_adapter *adapter)
-{
- /* If have our default mac from init and no mac address from
- * EEPROM then we need to generate the last octet and set it on the
- * device
- */
- if (adapter->rom_addr[0] == 0x00 &&
- adapter->rom_addr[1] == 0x00 &&
- adapter->rom_addr[2] == 0x00 &&
- adapter->rom_addr[3] == 0x00 &&
- adapter->rom_addr[4] == 0x00 &&
- adapter->rom_addr[5] == 0x00) {
- /*
- * We need to randomly generate the last octet so we
- * decrease our chances of setting the mac address to
- * same as another one of our cards in the system
- */
- get_random_bytes(&adapter->addr[5], 1);
- /*
- * We have the default value in the register we are
- * working with so we need to copy the current
- * address into the permanent address
- */
- memcpy(adapter->rom_addr,
- adapter->addr, ETH_ALEN);
- } else {
- /* We do not have an override address, so set the
- * current address to the permanent address and add
- * it to the device
- */
- memcpy(adapter->addr,
- adapter->rom_addr, ETH_ALEN);
- }
-}
-
-
-/**
- * et131x_pci_init - initial PCI setup
- * @adapter: pointer to our private adapter structure
- * @pdev: our PCI device
- *
- * Perform the initial setup of PCI registers and if possible initialise
- * the MAC address. At this point the I/O registers have yet to be mapped
- */
-
-static int et131x_pci_init(struct et131x_adapter *adapter,
- struct pci_dev *pdev)
-{
- int i;
- u8 max_payload;
- u8 read_size_reg;
-
- if (et131x_init_eeprom(adapter) < 0)
- return -EIO;
-
- /* Let's set up the PORT LOGIC Register. First we need to know what
- * the max_payload_size is
- */
- if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
- dev_err(&pdev->dev,
- "Could not read PCI config space for Max Payload Size\n");
- return -EIO;
- }
-
- /* Program the Ack/Nak latency and replay timers */
- max_payload &= 0x07; /* Only the lower 3 bits are valid */
-
- if (max_payload < 2) {
- static const u16 acknak[2] = { 0x76, 0xD0 };
- static const u16 replay[2] = { 0x1E0, 0x2ED };
-
- if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
- acknak[max_payload])) {
- dev_err(&pdev->dev,
- "Could not write PCI config space for ACK/NAK\n");
- return -EIO;
- }
- if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
- replay[max_payload])) {
- dev_err(&pdev->dev,
- "Could not write PCI config space for Replay Timer\n");
- return -EIO;
- }
- }
-
- /* l0s and l1 latency timers. We are using default values.
- * Representing 001 for L0s and 010 for L1
- */
- if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
- dev_err(&pdev->dev,
- "Could not write PCI config space for Latency Timers\n");
- return -EIO;
- }
-
- /* Change the max read size to 2k */
- if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
- dev_err(&pdev->dev,
- "Could not read PCI config space for Max read size\n");
- return -EIO;
- }
-
- read_size_reg &= 0x8f;
- read_size_reg |= 0x40;
-
- if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
- dev_err(&pdev->dev,
- "Could not write PCI config space for Max read size\n");
- return -EIO;
- }
-
- /* Get MAC address from config space if an eeprom exists, otherwise
- * the MAC address there will not be valid
- */
- if (!adapter->has_eeprom) {
- et131x_hwaddr_init(adapter);
- return 0;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
- adapter->rom_addr + i)) {
- dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
- return -EIO;
- }
- }
- memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
- return 0;
-}
-
-/**
- * et131x_error_timer_handler
- * @data: timer-specific variable; here a pointer to our adapter structure
- *
- * The routine called when the error timer expires, to track the number of
- * recurring errors.
- */
-void et131x_error_timer_handler(unsigned long data)
-{
- struct et131x_adapter *etdev = (struct et131x_adapter *) data;
- u32 pm_csr;
-
- pm_csr = readl(&etdev->regs->global.pm_csr);
-
- if ((pm_csr & ET_PM_PHY_SW_COMA) == 0)
- UpdateMacStatHostCounters(etdev);
- else
- dev_err(&etdev->pdev->dev,
- "No interrupts, in PHY coma, pm_csr = 0x%x\n", pm_csr);
-
- if (!(etdev->bmsr & MI_BMSR_LINK_STATUS) &&
- etdev->RegistryPhyComa &&
- etdev->boot_coma < 11) {
- etdev->boot_coma++;
- }
-
- if (etdev->boot_coma == 10) {
- if (!(etdev->bmsr & MI_BMSR_LINK_STATUS)
- && etdev->RegistryPhyComa) {
- if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
- /* NOTE - This was originally a 'sync with
- * interrupt'. How to do that under Linux?
- */
- et131x_enable_interrupts(etdev);
- EnablePhyComa(etdev);
- }
- }
- }
-
- /* This is a periodic timer, so reschedule */
- mod_timer(&etdev->ErrorTimer, jiffies +
- TX_ERROR_PERIOD * HZ / 1000);
-}
-
-/**
- * et131x_link_detection_handler
- *
- * Timer function for link up at driver load time
- */
-void et131x_link_detection_handler(unsigned long data)
-{
- struct et131x_adapter *etdev = (struct et131x_adapter *) data;
- unsigned long flags;
-
- if (etdev->MediaState == 0) {
- spin_lock_irqsave(&etdev->Lock, flags);
-
- etdev->MediaState = NETIF_STATUS_MEDIA_DISCONNECT;
-
- spin_unlock_irqrestore(&etdev->Lock, flags);
-
- netif_carrier_off(etdev->netdev);
- }
-}
-
-/**
- * et131x_configure_global_regs - configure JAGCore global regs
- * @etdev: pointer to our adapter structure
- *
- * Used to configure the global registers on the JAGCore
- */
-void ConfigGlobalRegs(struct et131x_adapter *etdev)
-{
- struct global_regs __iomem *regs = &etdev->regs->global;
-
- writel(0, &regs->rxq_start_addr);
- writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
-
- if (etdev->RegistryJumboPacket < 2048) {
- /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
- * block of RAM that the driver can split between Tx
- * and Rx as it desires. Our default is to split it
- * 50/50:
- */
- writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
- writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
- } else if (etdev->RegistryJumboPacket < 8192) {
- /* For jumbo packets > 2k but < 8k, split 50-50. */
- writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
- writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
- } else {
- /* 9216 is the only packet size greater than 8k that
- * is available. The Tx buffer has to be big enough
- * for one whole packet on the Tx side. We'll make
- * the Tx 9408, and give the rest to Rx
- */
- writel(0x01b3, &regs->rxq_end_addr);
- writel(0x01b4, &regs->txq_start_addr);
- }
-
- /* Initialize the loopback register. Disable all loopbacks. */
- writel(0, &regs->loopback);
-
- /* MSI Register */
- writel(0, &regs->msi_config);
-
- /* By default, disable the watchdog timer. It will be enabled when
- * a packet is queued.
- */
- writel(0, &regs->watchdog_timer);
-}
-
-
-/**
- * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_adapter_setup(struct et131x_adapter *etdev)
-{
- int status = 0;
-
- /* Configure the JAGCore */
- ConfigGlobalRegs(etdev);
-
- ConfigMACRegs1(etdev);
-
- /* Configure the MMC registers */
- /* All we need to do is initialize the Memory Control Register */
- writel(ET_MMC_ENABLE, &etdev->regs->mmc.mmc_ctrl);
-
- ConfigRxMacRegs(etdev);
- ConfigTxMacRegs(etdev);
-
- ConfigRxDmaRegs(etdev);
- ConfigTxDmaRegs(etdev);
-
- ConfigMacStatRegs(etdev);
-
- /* Move the following code to Timer function?? */
- status = et131x_xcvr_find(etdev);
-
- if (status != 0)
- dev_warn(&etdev->pdev->dev, "Could not find the xcvr\n");
-
- /* Prepare the TRUEPHY library. */
- ET1310_PhyInit(etdev);
-
- /* Reset the phy now so changes take place */
- ET1310_PhyReset(etdev);
-
- /* Power down PHY */
- ET1310_PhyPowerDown(etdev, 1);
-
- /*
- * We need to turn off 1000 base half dulplex, the mac does not
- * support it. For the 10/100 part, turn off all gig advertisement
- */
- if (etdev->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
- ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_FULL);
- else
- ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
-
- /* Power up PHY */
- ET1310_PhyPowerDown(etdev, 0);
-
- et131x_setphy_normal(etdev);
-; return status;
-}
-
-/**
- * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
- * @adapter: pointer to our private adapter structure
- */
-void et131x_soft_reset(struct et131x_adapter *adapter)
-{
- /* Disable MAC Core */
- writel(0xc00f0000, &adapter->regs->mac.cfg1);
-
- /* Set everything to a reset value */
- writel(0x7F, &adapter->regs->global.sw_reset);
- writel(0x000f0000, &adapter->regs->mac.cfg1);
- writel(0x00000000, &adapter->regs->mac.cfg1);
-}
-
-/**
- * et131x_align_allocated_memory - Align allocated memory on a given boundary
- * @adapter: pointer to our adapter structure
- * @phys_addr: pointer to Physical address
- * @offset: pointer to the offset variable
- * @mask: correct mask
- */
-void et131x_align_allocated_memory(struct et131x_adapter *adapter,
- uint64_t *phys_addr,
- uint64_t *offset, uint64_t mask)
-{
- uint64_t new_addr;
-
- *offset = 0;
-
- new_addr = *phys_addr & ~mask;
-
- if (new_addr != *phys_addr) {
- /* Move to next aligned block */
- new_addr += mask + 1;
- /* Return offset for adjusting virt addr */
- *offset = new_addr - *phys_addr;
- /* Return new physical address */
- *phys_addr = new_addr;
- }
-}
-
-/**
- * et131x_adapter_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h).
- *
- * Allocate all the memory blocks for send, receive and others.
- */
-int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
-{
- int status;
-
- /* Allocate memory for the Tx Ring */
- status = et131x_tx_dma_memory_alloc(adapter);
- if (status != 0) {
- dev_err(&adapter->pdev->dev,
- "et131x_tx_dma_memory_alloc FAILED\n");
- return status;
- }
- /* Receive buffer memory allocation */
- status = et131x_rx_dma_memory_alloc(adapter);
- if (status != 0) {
- dev_err(&adapter->pdev->dev,
- "et131x_rx_dma_memory_alloc FAILED\n");
- et131x_tx_dma_memory_free(adapter);
- return status;
- }
-
- /* Init receive data structures */
- status = et131x_init_recv(adapter);
- if (status != 0) {
- dev_err(&adapter->pdev->dev,
- "et131x_init_recv FAILED\n");
- et131x_tx_dma_memory_free(adapter);
- et131x_rx_dma_memory_free(adapter);
- }
- return status;
-}
-
-/**
- * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
- * @adapter: pointer to our private adapter structure
- */
-void et131x_adapter_memory_free(struct et131x_adapter *adapter)
-{
- /* Free DMA memory */
- et131x_tx_dma_memory_free(adapter);
- et131x_rx_dma_memory_free(adapter);
-}
-
-
-
-/**
- * et131x_adapter_init
- * @etdev: pointer to the private adapter struct
- * @pdev: pointer to the PCI device
- *
- * Initialize the data structures for the et131x_adapter object and link
- * them together with the platform provided device structures.
- */
-
-
-static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
- struct pci_dev *pdev)
-{
- static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
- static const u8 duplex[] = { 0, 1, 2, 1, 2, 2 };
- static const u16 speed[] = { 0, 10, 10, 100, 100, 1000 };
-
- struct et131x_adapter *etdev;
-
- /* Setup the fundamental net_device and private adapter structure
- * elements */
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- /* Allocate private adapter struct and copy in relevant information */
- etdev = netdev_priv(netdev);
- etdev->pdev = pci_dev_get(pdev);
- etdev->netdev = netdev;
-
- /* Do the same for the netdev struct */
- netdev->irq = pdev->irq;
- netdev->base_addr = pci_resource_start(pdev, 0);
-
- /* Initialize spinlocks here */
- spin_lock_init(&etdev->Lock);
- spin_lock_init(&etdev->TCBSendQLock);
- spin_lock_init(&etdev->TCBReadyQLock);
- spin_lock_init(&etdev->send_hw_lock);
- spin_lock_init(&etdev->rcv_lock);
- spin_lock_init(&etdev->RcvPendLock);
- spin_lock_init(&etdev->FbrLock);
- spin_lock_init(&etdev->PHYLock);
-
- /* Parse configuration parameters into the private adapter struct */
- if (et131x_speed_set)
- dev_info(&etdev->pdev->dev,
- "Speed set manually to : %d\n", et131x_speed_set);
-
- etdev->SpeedDuplex = et131x_speed_set;
- etdev->RegistryJumboPacket = 1514; /* 1514-9216 */
-
- /* Set the MAC address to a default */
- memcpy(etdev->addr, default_mac, ETH_ALEN);
-
- /* Decode SpeedDuplex
- *
- * Set up as if we are auto negotiating always and then change if we
- * go into force mode
- *
- * If we are the 10/100 device, and gigabit is somehow requested then
- * knock it down to 100 full.
- */
- if (etdev->pdev->device == ET131X_PCI_DEVICE_ID_FAST &&
- etdev->SpeedDuplex == 5)
- etdev->SpeedDuplex = 4;
-
- etdev->AiForceSpeed = speed[etdev->SpeedDuplex];
- etdev->AiForceDpx = duplex[etdev->SpeedDuplex]; /* Auto FDX */
-
- return etdev;
-}
-
-/**
- * et131x_pci_setup - Perform device initialization
- * @pdev: a pointer to the device's pci_dev structure
- * @ent: this device's entry in the pci_device_id table
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- *
- * Registered in the pci_driver structure, this function is called when the
- * PCI subsystem finds a new PCI device which matches the information
- * contained in the pci_device_id table. This routine is the equivalent to
- * a device insertion routine.
- */
-
-static int __devinit et131x_pci_setup(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int result = -EBUSY;
- int pm_cap;
- bool pci_using_dac;
- struct net_device *netdev;
- struct et131x_adapter *adapter;
-
- /* Enable the device via the PCI subsystem */
- if (pci_enable_device(pdev) != 0) {
- dev_err(&pdev->dev,
- "pci_enable_device() failed\n");
- return -EIO;
- }
-
- /* Perform some basic PCI checks */
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Can't find PCI device's base address\n");
- goto err_disable;
- }
-
- if (pci_request_regions(pdev, DRIVER_NAME)) {
- dev_err(&pdev->dev,
- "Can't get PCI resources\n");
- goto err_disable;
- }
-
- /* Enable PCI bus mastering */
- pci_set_master(pdev);
-
- /* Query PCI for Power Mgmt Capabilities
- *
- * NOTE: Now reading PowerMgmt in another location; is this still
- * needed?
- */
- pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pm_cap == 0) {
- dev_err(&pdev->dev,
- "Cannot find Power Management capabilities\n");
- result = -EIO;
- goto err_release_res;
- }
-
- /* Check the DMA addressing support of this device */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pci_using_dac = true;
-
- result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (result != 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 64 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pci_using_dac = false;
- } else {
- dev_err(&pdev->dev,
- "No usable DMA addressing method\n");
- result = -EIO;
- goto err_release_res;
- }
-
- /* Allocate netdev and private adapter structs */
- netdev = et131x_device_alloc();
- if (netdev == NULL) {
- dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
- result = -ENOMEM;
- goto err_release_res;
- }
- adapter = et131x_adapter_init(netdev, pdev);
- /* Initialise the PCI setup for the device */
- et131x_pci_init(adapter, pdev);
-
- /* Map the bus-relative registers to system virtual memory */
- adapter->regs = pci_ioremap_bar(pdev, 0);
- if (adapter->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map device registers\n");
- result = -ENOMEM;
- goto err_free_dev;
- }
-
- /* If Phy COMA mode was enabled when we went down, disable it here. */
- writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
-
- /* Issue a global reset to the et1310 */
- et131x_soft_reset(adapter);
-
- /* Disable all interrupts (paranoid) */
- et131x_disable_interrupts(adapter);
-
- /* Allocate DMA memory */
- result = et131x_adapter_memory_alloc(adapter);
- if (result != 0) {
- dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
- goto err_iounmap;
- }
-
- /* Init send data structures */
- et131x_init_send(adapter);
-
- /*
- * Set up the task structure for the ISR's deferred handler
- */
- INIT_WORK(&adapter->task, et131x_isr_handler);
-
- /* Copy address into the net_device struct */
- memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
-
- /* Setup et1310 as per the documentation */
- et131x_adapter_setup(adapter);
-
- /* Create a timer to count errors received by the NIC */
- init_timer(&adapter->ErrorTimer);
-
- adapter->ErrorTimer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
- adapter->ErrorTimer.function = et131x_error_timer_handler;
- adapter->ErrorTimer.data = (unsigned long)adapter;
-
- /* Initialize link state */
- et131x_link_detection_handler((unsigned long)adapter);
-
- /* Initialize variable for counting how long we do not have
- link status */
- adapter->boot_coma = 0;
-
- /* We can enable interrupts now
- *
- * NOTE - Because registration of interrupt handler is done in the
- * device's open(), defer enabling device interrupts to that
- * point
- */
-
- /* Register the net_device struct with the Linux network layer */
- result = register_netdev(netdev);
- if (result != 0) {
- dev_err(&pdev->dev, "register_netdev() failed\n");
- goto err_mem_free;
- }
-
- /* Register the net_device struct with the PCI subsystem. Save a copy
- * of the PCI config space for this device now that the device has
- * been initialized, just in case it needs to be quickly restored.
- */
- pci_set_drvdata(pdev, netdev);
- pci_save_state(adapter->pdev);
- return result;
-
-err_mem_free:
- et131x_adapter_memory_free(adapter);
-err_iounmap:
- iounmap(adapter->regs);
-err_free_dev:
- pci_dev_put(pdev);
- free_netdev(netdev);
-err_release_res:
- pci_release_regions(pdev);
-err_disable:
- pci_disable_device(pdev);
- return result;
-}
-
-/**
- * et131x_pci_remove
- * @pdev: a pointer to the device's pci_dev structure
- *
- * Registered in the pci_driver structure, this function is called when the
- * PCI subsystem detects that a PCI device which matches the information
- * contained in the pci_device_id table has been removed.
- */
-
-static void __devexit et131x_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct et131x_adapter *adapter;
-
- /* Retrieve the net_device pointer from the pci_dev struct, as well
- * as the private adapter struct
- */
- netdev = pci_get_drvdata(pdev);
- adapter = netdev_priv(netdev);
-
- /* Perform device cleanup */
- unregister_netdev(netdev);
- et131x_adapter_memory_free(adapter);
- iounmap(adapter->regs);
- pci_dev_put(adapter->pdev);
- free_netdev(netdev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-static struct pci_device_id et131x_pci_table[] __devinitdata = {
- {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_GIG, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0UL},
- {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_FAST, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0UL},
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, et131x_pci_table);
-
-static struct pci_driver et131x_driver = {
- .name = DRIVER_NAME,
- .id_table = et131x_pci_table,
- .probe = et131x_pci_setup,
- .remove = __devexit_p(et131x_pci_remove),
- .suspend = NULL, /* et131x_pci_suspend */
- .resume = NULL, /* et131x_pci_resume */
-};
-
-
-/**
- * et131x_init_module - The "main" entry point called on driver initialization
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-static int __init et131x_init_module(void)
-{
- if (et131x_speed_set < PARM_SPEED_DUPLEX_MIN ||
- et131x_speed_set > PARM_SPEED_DUPLEX_MAX) {
- printk(KERN_WARNING "et131x: invalid speed setting ignored.\n");
- et131x_speed_set = 0;
- }
- return pci_register_driver(&et131x_driver);
-}
-
-/**
- * et131x_cleanup_module - The entry point called on driver cleanup
- */
-static void __exit et131x_cleanup_module(void)
-{
- pci_unregister_driver(&et131x_driver);
-}
-
-module_init(et131x_init_module);
-module_exit(et131x_cleanup_module);
-
-/* Modinfo parameters (filled out using defines from et131x_version.h) */
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_INFO);
-MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/staging/et131x/et131x_isr.c b/drivers/staging/et131x/et131x_isr.c
deleted file mode 100644
index 9c33209c840..00000000000
--- a/drivers/staging/et131x/et131x_isr.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et131x_isr.c - File which contains the ISR, ISR handler, and related routines
- * for processing interrupts from the device.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/pci.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include "et1310_phy.h"
-#include "et131x_adapter.h"
-#include "et131x.h"
-
-/*
- * For interrupts, normal running is:
- * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
- * watchdog_interrupt & txdma_xfer_done
- *
- * In both cases, when flow control is enabled for either Tx or bi-direction,
- * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
- * buffer rings are running low.
- */
-#define INT_MASK_DISABLE 0xffffffff
-
-/* NOTE: Masking out MAC_STAT Interrupt for now...
- * #define INT_MASK_ENABLE 0xfff6bf17
- * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
- */
-#define INT_MASK_ENABLE 0xfffebf17
-#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
-
-
-/**
- * et131x_enable_interrupts - enable interrupt
- * @adapter: et131x device
- *
- * Enable the appropriate interrupts on the ET131x according to our
- * configuration
- */
-
-void et131x_enable_interrupts(struct et131x_adapter *adapter)
-{
- u32 mask;
-
- /* Enable all global interrupts */
- if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH)
- mask = INT_MASK_ENABLE;
- else
- mask = INT_MASK_ENABLE_NO_FLOW;
-
- adapter->CachedMaskValue = mask;
- writel(mask, &adapter->regs->global.int_mask);
-}
-
-/**
- * et131x_disable_interrupts - interrupt disable
- * @adapter: et131x device
- *
- * Block all interrupts from the et131x device at the device itself
- */
-
-void et131x_disable_interrupts(struct et131x_adapter *adapter)
-{
- /* Disable all global interrupts */
- adapter->CachedMaskValue = INT_MASK_DISABLE;
- writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
-}
-
-
-/**
- * et131x_isr - The Interrupt Service Routine for the driver.
- * @irq: the IRQ on which the interrupt was received.
- * @dev_id: device-specific info (here a pointer to a net_device struct)
- *
- * Returns a value indicating if the interrupt was handled.
- */
-
-irqreturn_t et131x_isr(int irq, void *dev_id)
-{
- bool handled = true;
- struct net_device *netdev = (struct net_device *)dev_id;
- struct et131x_adapter *adapter = NULL;
- u32 status;
-
- if (!netif_device_present(netdev)) {
- handled = false;
- goto out;
- }
-
- adapter = netdev_priv(netdev);
-
- /* If the adapter is in low power state, then it should not
- * recognize any interrupt
- */
-
- /* Disable Device Interrupts */
- et131x_disable_interrupts(adapter);
-
- /* Get a copy of the value in the interrupt status register
- * so we can process the interrupting section
- */
- status = readl(&adapter->regs->global.int_status);
-
- if (adapter->flowcontrol == FLOW_TXONLY ||
- adapter->flowcontrol == FLOW_BOTH) {
- status &= ~INT_MASK_ENABLE;
- } else {
- status &= ~INT_MASK_ENABLE_NO_FLOW;
- }
-
- /* Make sure this is our interrupt */
- if (!status) {
- handled = false;
- et131x_enable_interrupts(adapter);
- goto out;
- }
-
- /* This is our interrupt, so process accordingly */
-
- if (status & ET_INTR_WATCHDOG) {
- struct tcb *tcb = adapter->tx_ring.send_head;
-
- if (tcb)
- if (++tcb->stale > 1)
- status |= ET_INTR_TXDMA_ISR;
-
- if (adapter->rx_ring.UnfinishedReceives)
- status |= ET_INTR_RXDMA_XFR_DONE;
- else if (tcb == NULL)
- writel(0, &adapter->regs->global.watchdog_timer);
-
- status &= ~ET_INTR_WATCHDOG;
- }
-
- if (status == 0) {
- /* This interrupt has in some way been "handled" by
- * the ISR. Either it was a spurious Rx interrupt, or
- * it was a Tx interrupt that has been filtered by
- * the ISR.
- */
- et131x_enable_interrupts(adapter);
- goto out;
- }
-
- /* We need to save the interrupt status value for use in our
- * DPC. We will clear the software copy of that in that
- * routine.
- */
- adapter->stats.InterruptStatus = status;
-
- /* Schedule the ISR handler as a bottom-half task in the
- * kernel's tq_immediate queue, and mark the queue for
- * execution
- */
- schedule_work(&adapter->task);
-out:
- return IRQ_RETVAL(handled);
-}
-
-/**
- * et131x_isr_handler - The ISR handler
- * @p_adapter, a pointer to the device's private adapter structure
- *
- * scheduled to run in a deferred context by the ISR. This is where the ISR's
- * work actually gets done.
- */
-void et131x_isr_handler(struct work_struct *work)
-{
- struct et131x_adapter *etdev =
- container_of(work, struct et131x_adapter, task);
- u32 status = etdev->stats.InterruptStatus;
- struct address_map __iomem *iomem = etdev->regs;
-
- /*
- * These first two are by far the most common. Once handled, we clear
- * their two bits in the status word. If the word is now zero, we
- * exit.
- */
- /* Handle all the completed Transmit interrupts */
- if (status & ET_INTR_TXDMA_ISR)
- et131x_handle_send_interrupt(etdev);
-
- /* Handle all the completed Receives interrupts */
- if (status & ET_INTR_RXDMA_XFR_DONE)
- et131x_handle_recv_interrupt(etdev);
-
- status &= 0xffffffd7;
-
- if (status) {
- /* Handle the TXDMA Error interrupt */
- if (status & ET_INTR_TXDMA_ERR) {
- u32 txdma_err;
-
- /* Following read also clears the register (COR) */
- txdma_err = readl(&iomem->txdma.tx_dma_error);
-
- dev_warn(&etdev->pdev->dev,
- "TXDMA_ERR interrupt, error = %d\n",
- txdma_err);
- }
-
- /* Handle Free Buffer Ring 0 and 1 Low interrupt */
- if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
- /*
- * This indicates the number of unused buffers in
- * RXDMA free buffer ring 0 is <= the limit you
- * programmed. Free buffer resources need to be
- * returned. Free buffers are consumed as packets
- * are passed from the network to the host. The host
- * becomes aware of the packets from the contents of
- * the packet status ring. This ring is queried when
- * the packet done interrupt occurs. Packets are then
- * passed to the OS. When the OS is done with the
- * packets the resources can be returned to the
- * ET1310 for re-use. This interrupt is one method of
- * returning resources.
- */
-
- /* If the user has flow control on, then we will
- * send a pause packet, otherwise just exit
- */
- if (etdev->flowcontrol == FLOW_TXONLY ||
- etdev->flowcontrol == FLOW_BOTH) {
- u32 pm_csr;
-
- /* Tell the device to send a pause packet via
- * the back pressure register (bp req and
- * bp xon/xoff)
- */
- pm_csr = readl(&iomem->global.pm_csr);
- if ((pm_csr & ET_PM_PHY_SW_COMA) == 0)
- writel(3, &iomem->txmac.bp_ctrl);
- }
- }
-
- /* Handle Packet Status Ring Low Interrupt */
- if (status & ET_INTR_RXDMA_STAT_LOW) {
-
- /*
- * Same idea as with the two Free Buffer Rings.
- * Packets going from the network to the host each
- * consume a free buffer resource and a packet status
- * resource. These resoures are passed to the OS.
- * When the OS is done with the resources, they need
- * to be returned to the ET1310. This is one method
- * of returning the resources.
- */
- }
-
- /* Handle RXDMA Error Interrupt */
- if (status & ET_INTR_RXDMA_ERR) {
- /*
- * The rxdma_error interrupt is sent when a time-out
- * on a request issued by the JAGCore has occurred or
- * a completion is returned with an un-successful
- * status. In both cases the request is considered
- * complete. The JAGCore will automatically re-try the
- * request in question. Normally information on events
- * like these are sent to the host using the "Advanced
- * Error Reporting" capability. This interrupt is
- * another way of getting similar information. The
- * only thing required is to clear the interrupt by
- * reading the ISR in the global resources. The
- * JAGCore will do a re-try on the request. Normally
- * you should never see this interrupt. If you start
- * to see this interrupt occurring frequently then
- * something bad has occurred. A reset might be the
- * thing to do.
- */
- /* TRAP();*/
-
- dev_warn(&etdev->pdev->dev,
- "RxDMA_ERR interrupt, error %x\n",
- readl(&iomem->txmac.tx_test));
- }
-
- /* Handle the Wake on LAN Event */
- if (status & ET_INTR_WOL) {
- /*
- * This is a secondary interrupt for wake on LAN.
- * The driver should never see this, if it does,
- * something serious is wrong. We will TRAP the
- * message when we are in DBG mode, otherwise we
- * will ignore it.
- */
- dev_err(&etdev->pdev->dev, "WAKE_ON_LAN interrupt\n");
- }
-
- /* Handle the PHY interrupt */
- if (status & ET_INTR_PHY) {
- u32 pm_csr;
- u16 bmsr_ints;
- u16 bmsr_data;
- u16 myisr;
-
- /* If we are in coma mode when we get this interrupt,
- * we need to disable it.
- */
- pm_csr = readl(&iomem->global.pm_csr);
- if (pm_csr & ET_PM_PHY_SW_COMA) {
- /*
- * Check to see if we are in coma mode and if
- * so, disable it because we will not be able
- * to read PHY values until we are out.
- */
- DisablePhyComa(etdev);
- }
-
- /* Read the PHY ISR to clear the reason for the
- * interrupt.
- */
- MiRead(etdev, (uint8_t) offsetof(struct mi_regs, isr),
- &myisr);
-
- if (!etdev->ReplicaPhyLoopbk) {
- MiRead(etdev,
- (uint8_t) offsetof(struct mi_regs, bmsr),
- &bmsr_data);
-
- bmsr_ints = etdev->bmsr ^ bmsr_data;
- etdev->bmsr = bmsr_data;
-
- /* Do all the cable in / cable out stuff */
- et131x_Mii_check(etdev, bmsr_data, bmsr_ints);
- }
- }
-
- /* Let's move on to the TxMac */
- if (status & ET_INTR_TXMAC) {
- u32 err = readl(&iomem->txmac.err);
-
- /*
- * When any of the errors occur and TXMAC generates
- * an interrupt to report these errors, it usually
- * means that TXMAC has detected an error in the data
- * stream retrieved from the on-chip Tx Q. All of
- * these errors are catastrophic and TXMAC won't be
- * able to recover data when these errors occur. In
- * a nutshell, the whole Tx path will have to be reset
- * and re-configured afterwards.
- */
- dev_warn(&etdev->pdev->dev,
- "TXMAC interrupt, error 0x%08x\n",
- err);
-
- /* If we are debugging, we want to see this error,
- * otherwise we just want the device to be reset and
- * continue
- */
- }
-
- /* Handle RXMAC Interrupt */
- if (status & ET_INTR_RXMAC) {
- /*
- * These interrupts are catastrophic to the device,
- * what we need to do is disable the interrupts and
- * set the flag to cause us to reset so we can solve
- * this issue.
- */
- /* MP_SET_FLAG( etdev,
- fMP_ADAPTER_HARDWARE_ERROR); */
-
- dev_warn(&etdev->pdev->dev,
- "RXMAC interrupt, error 0x%08x. Requesting reset\n",
- readl(&iomem->rxmac.err_reg));
-
- dev_warn(&etdev->pdev->dev,
- "Enable 0x%08x, Diag 0x%08x\n",
- readl(&iomem->rxmac.ctrl),
- readl(&iomem->rxmac.rxq_diag));
-
- /*
- * If we are debugging, we want to see this error,
- * otherwise we just want the device to be reset and
- * continue
- */
- }
-
- /* Handle MAC_STAT Interrupt */
- if (status & ET_INTR_MAC_STAT) {
- /*
- * This means at least one of the un-masked counters
- * in the MAC_STAT block has rolled over. Use this
- * to maintain the top, software managed bits of the
- * counter(s).
- */
- HandleMacStatInterrupt(etdev);
- }
-
- /* Handle SLV Timeout Interrupt */
- if (status & ET_INTR_SLV_TIMEOUT) {
- /*
- * This means a timeout has occurred on a read or
- * write request to one of the JAGCore registers. The
- * Global Resources block has terminated the request
- * and on a read request, returned a "fake" value.
- * The most likely reasons are: Bad Address or the
- * addressed module is in a power-down state and
- * can't respond.
- */
- }
- }
- et131x_enable_interrupts(etdev);
-}
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
deleted file mode 100644
index 5f25bbad36b..00000000000
--- a/drivers/staging/et131x/et131x_netdev.c
+++ /dev/null
@@ -1,686 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et131x_netdev.c - Routines and data required by all Linux network devices.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include "et131x_version.h"
-#include "et131x_defs.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/pci.h>
-#include <asm/system.h>
-
-#include <linux/mii.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include "et1310_phy.h"
-#include "et1310_tx.h"
-#include "et131x_adapter.h"
-#include "et131x.h"
-
-/**
- * et131x_stats - Return the current device statistics.
- * @netdev: device whose stats are being queried
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-static struct net_device_stats *et131x_stats(struct net_device *netdev)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
- struct net_device_stats *stats = &adapter->net_stats;
- struct ce_stats *devstat = &adapter->stats;
-
- stats->rx_errors = devstat->length_err + devstat->alignment_err +
- devstat->crc_err + devstat->code_violations + devstat->other_errors;
- stats->tx_errors = devstat->max_pkt_error;
- stats->multicast = devstat->multircv;
- stats->collisions = devstat->collisions;
-
- stats->rx_length_errors = devstat->length_err;
- stats->rx_over_errors = devstat->rx_ov_flow;
- stats->rx_crc_errors = devstat->crc_err;
-
- /* NOTE: These stats don't have corresponding values in CE_STATS,
- * so we're going to have to update these directly from within the
- * TX/RX code
- */
- /* stats->rx_bytes = 20; devstat->; */
- /* stats->tx_bytes = 20; devstat->; */
- /* stats->rx_dropped = devstat->; */
- /* stats->tx_dropped = devstat->; */
-
- /* NOTE: Not used, can't find analogous statistics */
- /* stats->rx_frame_errors = devstat->; */
- /* stats->rx_fifo_errors = devstat->; */
- /* stats->rx_missed_errors = devstat->; */
-
- /* stats->tx_aborted_errors = devstat->; */
- /* stats->tx_carrier_errors = devstat->; */
- /* stats->tx_fifo_errors = devstat->; */
- /* stats->tx_heartbeat_errors = devstat->; */
- /* stats->tx_window_errors = devstat->; */
- return stats;
-}
-
-/**
- * et131x_open - Open the device for use.
- * @netdev: device to be opened
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_open(struct net_device *netdev)
-{
- int result = 0;
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- /* Start the timer to track NIC errors */
- add_timer(&adapter->ErrorTimer);
-
- /* Register our IRQ */
- result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
- netdev->name, netdev);
- if (result) {
- dev_err(&adapter->pdev->dev, "c ould not register IRQ %d\n",
- netdev->irq);
- return result;
- }
-
- /* Enable the Tx and Rx DMA engines (if not already enabled) */
- et131x_rx_dma_enable(adapter);
- et131x_tx_dma_enable(adapter);
-
- /* Enable device interrupts */
- et131x_enable_interrupts(adapter);
-
- adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
-
- /* We're ready to move some data, so start the queue */
- netif_start_queue(netdev);
- return result;
-}
-
-/**
- * et131x_close - Close the device
- * @netdev: device to be closed
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_close(struct net_device *netdev)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- /* First thing is to stop the queue */
- netif_stop_queue(netdev);
-
- /* Stop the Tx and Rx DMA engines */
- et131x_rx_dma_disable(adapter);
- et131x_tx_dma_disable(adapter);
-
- /* Disable device interrupts */
- et131x_disable_interrupts(adapter);
-
- /* Deregistering ISR */
- adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
- free_irq(netdev->irq, netdev);
-
- /* Stop the error timer */
- del_timer_sync(&adapter->ErrorTimer);
- return 0;
-}
-
-/**
- * et131x_ioctl_mii - The function which handles MII IOCTLs
- * @netdev: device on which the query is being made
- * @reqbuf: the request-specific data buffer
- * @cmd: the command request code
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_ioctl_mii(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
-{
- int status = 0;
- struct et131x_adapter *etdev = netdev_priv(netdev);
- struct mii_ioctl_data *data = if_mii(reqbuf);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = etdev->stats.xcvr_addr;
- break;
-
- case SIOCGMIIREG:
- if (!capable(CAP_NET_ADMIN))
- status = -EPERM;
- else
- status = MiRead(etdev,
- data->reg_num, &data->val_out);
- break;
-
- case SIOCSMIIREG:
- if (!capable(CAP_NET_ADMIN))
- status = -EPERM;
- else
- status = MiWrite(etdev, data->reg_num,
- data->val_in);
- break;
-
- default:
- status = -EOPNOTSUPP;
- }
- return status;
-}
-
-/**
- * et131x_ioctl - The I/O Control handler for the driver
- * @netdev: device on which the control request is being made
- * @reqbuf: a pointer to the IOCTL request buffer
- * @cmd: the IOCTL command code
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
-{
- int status = 0;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- status = et131x_ioctl_mii(netdev, reqbuf, cmd);
- break;
-
- default:
- status = -EOPNOTSUPP;
- }
- return status;
-}
-
-/**
- * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
- * @adapter: pointer to our private adapter structure
- *
- * FIXME: lot of dups with MAC code
- *
- * Returns 0 on success, errno on failure
- */
-int et131x_set_packet_filter(struct et131x_adapter *adapter)
-{
- int status = 0;
- uint32_t filter = adapter->PacketFilter;
- u32 ctrl;
- u32 pf_ctrl;
-
- ctrl = readl(&adapter->regs->rxmac.ctrl);
- pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
-
- /* Default to disabled packet filtering. Enable it in the individual
- * case statements that require the device to filter something
- */
- ctrl |= 0x04;
-
- /* Set us to be in promiscuous mode so we receive everything, this
- * is also true when we get a packet filter of 0
- */
- if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
- pf_ctrl &= ~7; /* Clear filter bits */
- else {
- /*
- * Set us up with Multicast packet filtering. Three cases are
- * possible - (1) we have a multi-cast list, (2) we receive ALL
- * multicast entries or (3) we receive none.
- */
- if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
- pf_ctrl &= ~2; /* Multicast filter bit */
- else {
- SetupDeviceForMulticast(adapter);
- pf_ctrl |= 2;
- ctrl &= ~0x04;
- }
-
- /* Set us up with Unicast packet filtering */
- if (filter & ET131X_PACKET_TYPE_DIRECTED) {
- SetupDeviceForUnicast(adapter);
- pf_ctrl |= 4;
- ctrl &= ~0x04;
- }
-
- /* Set us up with Broadcast packet filtering */
- if (filter & ET131X_PACKET_TYPE_BROADCAST) {
- pf_ctrl |= 1; /* Broadcast filter bit */
- ctrl &= ~0x04;
- } else
- pf_ctrl &= ~1;
-
- /* Setup the receive mac configuration registers - Packet
- * Filter control + the enable / disable for packet filter
- * in the control reg.
- */
- writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
- writel(ctrl, &adapter->regs->rxmac.ctrl);
- }
- return status;
-}
-
-/**
- * et131x_multicast - The handler to configure multicasting on the interface
- * @netdev: a pointer to a net_device struct representing the device
- */
-void et131x_multicast(struct net_device *netdev)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
- uint32_t PacketFilter = 0;
- unsigned long flags;
- struct netdev_hw_addr *ha;
- int i;
-
- spin_lock_irqsave(&adapter->Lock, flags);
-
- /* Before we modify the platform-independent filter flags, store them
- * locally. This allows us to determine if anything's changed and if
- * we even need to bother the hardware
- */
- PacketFilter = adapter->PacketFilter;
-
- /* Clear the 'multicast' flag locally; because we only have a single
- * flag to check multicast, and multiple multicast addresses can be
- * set, this is the easiest way to determine if more than one
- * multicast address is being set.
- */
- PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
-
- /* Check the net_device flags and set the device independent flags
- * accordingly
- */
-
- if (netdev->flags & IFF_PROMISC)
- adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS;
- else
- adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
-
- if (netdev->flags & IFF_ALLMULTI)
- adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
-
- if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
- adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
-
- if (netdev_mc_count(netdev) < 1) {
- adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
- adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
- } else
- adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST;
-
- /* Set values in the private adapter struct */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == NIC_MAX_MCAST_LIST)
- break;
- memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN);
- }
- adapter->MCAddressCount = i;
-
- /* Are the new flags different from the previous ones? If not, then no
- * action is required
- *
- * NOTE - This block will always update the MCList with the hardware,
- * even if the addresses aren't the same.
- */
- if (PacketFilter != adapter->PacketFilter) {
- /* Call the device's filter function */
- et131x_set_packet_filter(adapter);
- }
- spin_unlock_irqrestore(&adapter->Lock, flags);
-}
-
-/**
- * et131x_tx - The handler to tx a packet on the device
- * @skb: data to be Tx'd
- * @netdev: device on which data is to be Tx'd
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
-{
- int status = 0;
-
- /* Save the timestamp for the TX timeout watchdog */
- netdev->trans_start = jiffies;
-
- /* Call the device-specific data Tx routine */
- status = et131x_send_packets(skb, netdev);
-
- /* Check status and manage the netif queue if necessary */
- if (status != 0) {
- if (status == -ENOMEM) {
- /* Put the queue to sleep until resources are
- * available
- */
- netif_stop_queue(netdev);
- status = NETDEV_TX_BUSY;
- } else {
- status = NETDEV_TX_OK;
- }
- }
- return status;
-}
-
-/**
- * et131x_tx_timeout - Timeout handler
- * @netdev: a pointer to a net_device struct representing the device
- *
- * The handler called when a Tx request times out. The timeout period is
- * specified by the 'tx_timeo" element in the net_device structure (see
- * et131x_alloc_device() to see how this value is set).
- */
-void et131x_tx_timeout(struct net_device *netdev)
-{
- struct et131x_adapter *etdev = netdev_priv(netdev);
- struct tcb *tcb;
- unsigned long flags;
-
- /* Any nonrecoverable hardware error?
- * Checks adapter->flags for any failure in phy reading
- */
- if (etdev->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
- return;
-
- /* Hardware failure? */
- if (etdev->flags & fMP_ADAPTER_HARDWARE_ERROR) {
- dev_err(&etdev->pdev->dev, "hardware error - reset\n");
- return;
- }
-
- /* Is send stuck? */
- spin_lock_irqsave(&etdev->TCBSendQLock, flags);
-
- tcb = etdev->tx_ring.send_head;
-
- if (tcb != NULL) {
- tcb->count++;
-
- if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
- spin_unlock_irqrestore(&etdev->TCBSendQLock,
- flags);
-
- dev_warn(&etdev->pdev->dev,
- "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
- tcb->index,
- tcb->flags);
-
- et131x_close(netdev);
- et131x_open(netdev);
-
- return;
- }
- }
-
- spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
-}
-
-/**
- * et131x_change_mtu - The handler called to change the MTU for the device
- * @netdev: device whose MTU is to be changed
- * @new_mtu: the desired MTU
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
-int et131x_change_mtu(struct net_device *netdev, int new_mtu)
-{
- int result = 0;
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- /* Make sure the requested MTU is valid */
- if (new_mtu < 64 || new_mtu > 9216)
- return -EINVAL;
-
- /* Stop the netif queue */
- netif_stop_queue(netdev);
-
- /* Stop the Tx and Rx DMA engines */
- et131x_rx_dma_disable(adapter);
- et131x_tx_dma_disable(adapter);
-
- /* Disable device interrupts */
- et131x_disable_interrupts(adapter);
- et131x_handle_send_interrupt(adapter);
- et131x_handle_recv_interrupt(adapter);
-
- /* Set the new MTU */
- netdev->mtu = new_mtu;
-
- /* Free Rx DMA memory */
- et131x_adapter_memory_free(adapter);
-
- /* Set the config parameter for Jumbo Packet support */
- adapter->RegistryJumboPacket = new_mtu + 14;
- et131x_soft_reset(adapter);
-
- /* Alloc and init Rx DMA memory */
- result = et131x_adapter_memory_alloc(adapter);
- if (result != 0) {
- dev_warn(&adapter->pdev->dev,
- "Change MTU failed; couldn't re-alloc DMA memory\n");
- return result;
- }
-
- et131x_init_send(adapter);
-
- et131x_hwaddr_init(adapter);
- memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
-
- /* Init the device with the new settings */
- et131x_adapter_setup(adapter);
-
- /* Enable interrupts */
- if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
- et131x_enable_interrupts(adapter);
-
- /* Restart the Tx and Rx DMA engines */
- et131x_rx_dma_enable(adapter);
- et131x_tx_dma_enable(adapter);
-
- /* Restart the netif queue */
- netif_wake_queue(netdev);
- return result;
-}
-
-/**
- * et131x_set_mac_addr - handler to change the MAC address for the device
- * @netdev: device whose MAC is to be changed
- * @new_mac: the desired MAC address
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- *
- * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
- */
-int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
-{
- int result = 0;
- struct et131x_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *address = new_mac;
-
- /* begin blux */
-
- if (adapter == NULL)
- return -ENODEV;
-
- /* Make sure the requested MAC is valid */
- if (!is_valid_ether_addr(address->sa_data))
- return -EINVAL;
-
- /* Stop the netif queue */
- netif_stop_queue(netdev);
-
- /* Stop the Tx and Rx DMA engines */
- et131x_rx_dma_disable(adapter);
- et131x_tx_dma_disable(adapter);
-
- /* Disable device interrupts */
- et131x_disable_interrupts(adapter);
- et131x_handle_send_interrupt(adapter);
- et131x_handle_recv_interrupt(adapter);
-
- /* Set the new MAC */
- /* netdev->set_mac_address = &new_mac; */
- /* netdev->mtu = new_mtu; */
-
- memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
-
- printk(KERN_INFO "%s: Setting MAC address to %pM\n",
- netdev->name, netdev->dev_addr);
-
- /* Free Rx DMA memory */
- et131x_adapter_memory_free(adapter);
-
- /* Set the config parameter for Jumbo Packet support */
- /* adapter->RegistryJumboPacket = new_mtu + 14; */
- /* blux: not needet here, we'll change the MAC */
-
- et131x_soft_reset(adapter);
-
- /* Alloc and init Rx DMA memory */
- result = et131x_adapter_memory_alloc(adapter);
- if (result != 0) {
- dev_err(&adapter->pdev->dev,
- "Change MAC failed; couldn't re-alloc DMA memory\n");
- return result;
- }
-
- et131x_init_send(adapter);
-
- et131x_hwaddr_init(adapter);
-
- /* Init the device with the new settings */
- et131x_adapter_setup(adapter);
-
- /* Enable interrupts */
- if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
- et131x_enable_interrupts(adapter);
-
- /* Restart the Tx and Rx DMA engines */
- et131x_rx_dma_enable(adapter);
- et131x_tx_dma_enable(adapter);
-
- /* Restart the netif queue */
- netif_wake_queue(netdev);
- return result;
-}
-
-static const struct net_device_ops et131x_netdev_ops = {
- .ndo_open = et131x_open,
- .ndo_stop = et131x_close,
- .ndo_start_xmit = et131x_tx,
- .ndo_set_multicast_list = et131x_multicast,
- .ndo_tx_timeout = et131x_tx_timeout,
- .ndo_change_mtu = et131x_change_mtu,
- .ndo_set_mac_address = et131x_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = et131x_ioctl,
-};
-
-/**
- * et131x_device_alloc
- *
- * Returns pointer to the allocated and initialized net_device struct for
- * this device.
- *
- * Create instances of net_device and wl_private for the new adapter and
- * register the device's entry points in the net_device structure.
- */
-struct net_device *et131x_device_alloc(void)
-{
- struct net_device *netdev;
-
- /* Alloc net_device and adapter structs */
- netdev = alloc_etherdev(sizeof(struct et131x_adapter));
-
- if (netdev == NULL) {
- printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
- return NULL;
- }
-
- /* Setup the function registration table (and other data) for a
- * net_device
- */
- /* netdev->init = &et131x_init; */
- /* netdev->set_config = &et131x_config; */
- netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
- netdev->netdev_ops = &et131x_netdev_ops;
-
- /* netdev->ethtool_ops = &et131x_ethtool_ops; */
-
- /* Poll? */
- /* netdev->poll = &et131x_poll; */
- /* netdev->poll_controller = &et131x_poll_controller; */
- return netdev;
-}
-
diff --git a/drivers/staging/et131x/et131x_version.h b/drivers/staging/et131x/et131x_version.h
deleted file mode 100644
index 2aa9bda44ac..00000000000
--- a/drivers/staging/et131x/et131x_version.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Agere Systems Inc.
- * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- *------------------------------------------------------------------------------
- *
- * et131x_version.h - This file provides system and device version information.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2005 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#ifndef __ET131X_VERSION_H__
-#define __ET131X_VERSION_H__
-
-#define DRIVER_AUTHOR "Victor Soriano (vjsoriano@agere.com)"
-#define DRIVER_LICENSE "Dual BSD/GPL"
-#define DRIVER_DEVICE_STRING "ET1310"
-#define DRIVER_NAME "et131x"
-#define DRIVER_VERSION_STRING "1.2.3-lk"
-#define DRIVER_VENDOR "Agere Systems, http://www.agere.com"
-#define DRIVER_DESC "10/100/1000 Base-T Ethernet Driver"
-
-#define DRIVER_INFO DRIVER_DESC " for the "\
- DRIVER_DEVICE_STRING ", v" \
- DRIVER_VERSION_STRING " by " \
- DRIVER_VENDOR
-
-#endif /* __ET131X_VERSION_H__ */
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index 3b0130fe608..adb436ed251 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -68,6 +68,16 @@ struct ft1000_info {
char netdevname[IFNAMSIZ];
};
+struct pcmcia_device;
+struct net_device;
+extern struct net_device *init_ft1000_card(struct pcmcia_device *link,
+ void *ft1000_reset);
+extern void stop_ft1000_card(struct net_device *dev);
+extern int card_download(struct net_device *dev, const u8 *pFileStart,
+ size_t FileLength);
+extern void ft1000InitProc(struct net_device *dev);
+extern void ft1000CleanupProc(struct net_device *dev);
+
extern u16 ft1000_read_dpram(struct net_device *dev, int offset);
extern void card_bootload(struct net_device *dev);
extern u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
index 6a1c1d4dcca..f376ca43720 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
@@ -41,14 +41,12 @@ MODULE_LICENSE("GPL");
/*====================================================================*/
-struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset);
-void stop_ft1000_card(struct net_device *);
-
static int ft1000_config(struct pcmcia_device *link);
static void ft1000_detach(struct pcmcia_device *link);
static int ft1000_attach(struct pcmcia_device *link);
+#include "ft1000.h"
+
/*====================================================================*/
static void ft1000_reset(struct pcmcia_device *link)
@@ -75,7 +73,7 @@ static void ft1000_detach(struct pcmcia_device *link)
free_netdev(dev);
}
-int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
+static int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
{
return pcmcia_request_io(link);
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index c956857e2d5..f8b8e71284d 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -287,7 +287,8 @@ u16 hdr_checksum(struct pseudo_hdr *pHdr)
return chksum;
}
-int card_download(struct net_device *dev, const u8 *pFileStart, u32 FileLength)
+int card_download(struct net_device *dev, const u8 *pFileStart,
+ size_t FileLength)
{
struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
int Status = SUCCESS;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 990b2afb3d6..b3d743a3d30 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -56,16 +56,11 @@
#include <linux/delay.h>
#include "ft1000.h"
-int card_download(struct net_device *dev, const u8 *pFileStart, u32 FileLength);
-
-void ft1000InitProc(struct net_device *dev);
-void ft1000CleanupProc(struct net_device *dev);
-
-const struct firmware *fw_entry;
+static const struct firmware *fw_entry;
static void ft1000_hbchk(u_long data);
static struct timer_list poll_timer = {
- function:ft1000_hbchk
+ .function = ft1000_hbchk
};
static u16 cmdbuffer[1024];
@@ -788,7 +783,7 @@ static void ft1000_hbchk(u_long data)
// Output:
//
//---------------------------------------------------------------------------
-void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
+static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
struct ft1000_info *info = netdev_priv(dev);
int i;
@@ -873,7 +868,8 @@ void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qt
// = 1 (successful)
//
//---------------------------------------------------------------------------
-bool ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16 *pnxtph)
+static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
+ int maxsz, u16 *pnxtph)
{
struct ft1000_info *info = netdev_priv(dev);
u16 size;
@@ -966,7 +962,7 @@ bool ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16 *p
// none
//
//---------------------------------------------------------------------------
-void ft1000_proc_drvmsg(struct net_device *dev)
+static void ft1000_proc_drvmsg(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
u16 msgtype;
@@ -1245,7 +1241,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
// SUCCESS
//
//---------------------------------------------------------------------------
-int ft1000_parse_dpram_msg(struct net_device *dev)
+static int ft1000_parse_dpram_msg(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
u16 doorbell;
@@ -1546,7 +1542,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
// SUCCESS
//
//---------------------------------------------------------------------------
-int ft1000_copy_up_pkt(struct net_device *dev)
+static int ft1000_copy_up_pkt(struct net_device *dev)
{
u16 tempword;
struct ft1000_info *info = netdev_priv(dev);
@@ -1734,7 +1730,7 @@ int ft1000_copy_up_pkt(struct net_device *dev)
// SUCCESS
//
//---------------------------------------------------------------------------
-int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
+static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
{
struct ft1000_info *info = netdev_priv(dev);
union {
@@ -2102,7 +2098,7 @@ static const struct ethtool_ops ops = {
};
struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset)
+ void *ft1000_reset)
{
struct ft1000_info *info;
struct net_device *dev;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 9e728b3415e..7faeadad1ff 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -48,8 +48,8 @@
} \
len += snprintf(page+len, PAGE_SIZE - len, "%d\n", var[i])
-int ft1000ReadProc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int ft1000ReadProc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
{
struct net_device *dev;
int len;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 3f303ea1433..aaf44c35982 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -485,7 +485,7 @@ void card_send_command(struct ft1000_device *ft1000dev, void *ptempbuffer,
DEBUG("card_send_command: enter card_send_command... size=%d\n", size);
- commandbuf = (unsigned char *)kmalloc(size + 2, GFP_KERNEL);
+ commandbuf = kmalloc(size + 2, GFP_KERNEL);
memcpy((void *)commandbuf + 2, (void *)ptempbuffer, size);
ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/staging/gma500/accel_2d.c
index 14400fcfe8a..114b99a1ce1 100644
--- a/drivers/staging/gma500/accel_2d.c
+++ b/drivers/staging/gma500/accel_2d.c
@@ -111,14 +111,15 @@ static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
int ret = 0;
int i;
unsigned submit_size;
+ unsigned long flags;
- mutex_lock(&dev_priv->mutex_2d);
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
while (size > 0) {
submit_size = (size < 0x60) ? size : 0x60;
size -= submit_size;
ret = psb_2d_wait_available(dev_priv, submit_size);
if (ret)
- break;
+ break;
submit_size <<= 2;
@@ -127,7 +128,7 @@ static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
(void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
}
- mutex_unlock(&dev_priv->mutex_2d);
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
return ret;
}
@@ -327,8 +328,9 @@ int psbfb_sync(struct fb_info *info)
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long _end = jiffies + DRM_HZ;
int busy = 0;
+ unsigned long flags;
- mutex_lock(&dev_priv->mutex_2d);
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
/*
* First idle the 2D engine.
*/
@@ -357,7 +359,7 @@ int psbfb_sync(struct fb_info *info)
_PSB_C2B_STATUS_BUSY) != 0);
out:
- mutex_unlock(&dev_priv->mutex_2d);
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
return (busy) ? -EBUSY : 0;
}
@@ -388,19 +390,19 @@ int psb_accel_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < op->size; i++, op_ptr++) {
u32 r = *op_ptr & 0xF0000000;
/* Fill in the GTT offsets for the command buffer */
- if (r == PSB_2D_SRC_SURF_BH ||
- r == PSB_2D_DST_SURF_BH ||
+ if (r == PSB_2D_SRC_SURF_BH ||
+ r == PSB_2D_DST_SURF_BH ||
r == PSB_2D_MASK_SURF_BH ||
r == PSB_2D_PAT_SURF_BH) {
i++;
op_ptr++;
if (i == op->size)
goto bad;
- if (*op_ptr)
+ if (*op_ptr)
goto bad;
- *op_ptr = gtt->offset;
- continue;
- }
+ *op_ptr = gtt->offset;
+ continue;
+ }
}
psbfb_2d_submit(dev_priv, op->cmd, op->size);
err = 0;
diff --git a/drivers/staging/gma500/cdv_device.c b/drivers/staging/gma500/cdv_device.c
index 87614e0d396..8ec10caab13 100644
--- a/drivers/staging/gma500/cdv_device.c
+++ b/drivers/staging/gma500/cdv_device.c
@@ -30,7 +30,6 @@
#define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5
-/* FIXME: should check if we are the active VGA device ?? */
static void cdv_disable_vga(struct drm_device *dev)
{
u8 sr1;
diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
index ebfde13ec18..3f39a37456f 100644
--- a/drivers/staging/gma500/framebuffer.c
+++ b/drivers/staging/gma500/framebuffer.c
@@ -37,6 +37,7 @@
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
#include "framebuffer.h"
+#include "gtt.h"
#include "mdfld_output.h"
@@ -91,6 +92,21 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+
+ /*
+ * We have to poke our nose in here. The core fb code assumes
+ * panning is part of the hardware that can be invoked before
+ * the actual fb is mapped. In our case that isn't quite true.
+ */
+ if (psbfb->gtt->npage)
+ psb_gtt_roll(dev, psbfb->gtt, var->yoffset);
+ return 0;
+}
void psbfb_suspend(struct drm_device *dev)
{
@@ -217,6 +233,21 @@ static struct fb_ops psbfb_ops = {
.fb_ioctl = psbfb_ioctl,
};
+static struct fb_ops psbfb_roll_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = psbfb_pan,
+ .fb_mmap = psbfb_mmap,
+ .fb_sync = psbfb_sync,
+ .fb_ioctl = psbfb_ioctl,
+};
+
static struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -304,6 +335,7 @@ static struct drm_framebuffer *psb_framebuffer_create
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
+ * @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
@@ -311,11 +343,9 @@ static struct drm_framebuffer *psb_framebuffer_create
* and back it with a GEM object.
*
* In this case the GEM object has no handle.
- *
- * FIXME: console speed up - allocate twice the space if room and use
- * hardware scrolling for acceleration.
*/
-static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+static struct gtt_range *psbfb_alloc(struct drm_device *dev,
+ int aligned_size, int force)
{
struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */
@@ -326,6 +356,9 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
return backing;
psb_gtt_free_range(dev, backing);
}
+ if (!force)
+ return NULL;
+
/* Next try using GEM host memory */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb(gem)", 0);
if (backing == NULL)
@@ -359,6 +392,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
int size;
int ret;
struct gtt_range *backing;
+ int gtt_roll = 1;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -368,17 +402,37 @@ static int psbfb_create(struct psb_fbdev *fbdev,
if (mode_cmd.bpp == 24)
mode_cmd.bpp = 32;
- /* HW requires pitch to be 64 byte aligned */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
+ /* Acceleration via the GTT requires pitch to be 4096 byte aligned
+ (ie 1024 or 2048 pixels in normal use) */
+ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
- backing = psbfb_alloc(dev, size);
- if (backing == NULL)
- return -ENOMEM;
+ backing = psbfb_alloc(dev, size, 0);
+ if (backing == NULL) {
+ /*
+ * We couldn't get the space we wanted, fall back to the
+ * display engine requirement instead. The HW requires
+ * the pitch to be 64 byte aligned
+ */
+
+ gtt_roll = 0; /* Don't use GTT accelerated scrolling */
+
+ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
+ mode_cmd.depth = sizes->surface_depth;
+
+ size = mode_cmd.pitch * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the framebuffer in the GTT with stolen page
+ backing when there is room */
+ backing = psbfb_alloc(dev, size, 1);
+ if (backing == NULL)
+ return -ENOMEM;
+ }
mutex_lock(&dev->struct_mutex);
@@ -402,11 +456,14 @@ static int psbfb_create(struct psb_fbdev *fbdev,
strcpy(info->fix.id, "psbfb");
info->flags = FBINFO_DEFAULT;
- /* No 2D engine */
- if (!dev_priv->ops->accel_2d)
- info->fbops = &psbfb_unaccel_ops;
- else
+ if (gtt_roll) { /* GTT rolling seems best */
+ info->fbops = &psbfb_roll_ops;
+ info->flags |= FBINFO_HWACCEL_YPAN;
+ }
+ else if (dev_priv->ops->accel_2d) /* 2D engine */
info->fbops = &psbfb_ops;
+ else /* Software */
+ info->fbops = &psbfb_unaccel_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
@@ -416,6 +473,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
+ info->fix.ywrapstep = gtt_roll;
+ info->fix.ypanstep = gtt_roll;
if (backing->stolen) {
/* Accessed stolen memory directly */
@@ -733,9 +792,12 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
- if (IS_MFLD(dev))
+ /* HDMI on crtc 1 for SoC devices and crtc 0 for
+ Cedarview. HDMI on Poulsbo is only via external
+ logic */
+ if (IS_MFLD(dev) || IS_MRST(dev))
crtc_mask = (1 << 1);
- else /* FIXME: review Oaktrail */
+ else
crtc_mask = (1 << 0); /* Cedarview */
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
diff --git a/drivers/staging/gma500/gem.c b/drivers/staging/gma500/gem.c
index 65fdd6b8ab1..f6433c037d2 100644
--- a/drivers/staging/gma500/gem.c
+++ b/drivers/staging/gma500/gem.c
@@ -120,8 +120,7 @@ static int psb_gem_create(struct drm_file *file,
/* Initialize the extra goodies GEM needs to do all the hard work */
if (drm_gem_object_init(dev, &r->gem, size) != 0) {
psb_gtt_free_range(dev, r);
- /* GEM doesn't give an error code and we don't have an
- EGEMSUCKS so make something up for now - FIXME */
+ /* GEM doesn't give an error code so use -ENOMEM */
dev_err(dev->dev, "GEM init failed for %lld\n", size);
return -ENOMEM;
}
@@ -191,8 +190,6 @@ int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
* The VMA was set up by GEM. In doing so it also ensured that the
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
- *
- * FIXME
*/
int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
diff --git a/drivers/staging/gma500/gtt.c b/drivers/staging/gma500/gtt.c
index 461ead251bb..e770bd190a5 100644
--- a/drivers/staging/gma500/gtt.c
+++ b/drivers/staging/gma500/gtt.c
@@ -72,9 +72,8 @@ u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* @r: our GTT range
*
* Take our preallocated GTT range and insert the GEM object into
- * the GTT.
- *
- * FIXME: gtt lock ?
+ * the GTT. This is protected via the gtt mutex which the caller
+ * must hold.
*/
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
@@ -96,12 +95,17 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
set_pages_array_uc(pages, r->npage);
/* Write our page entries into the GTT itself */
- for (i = 0; i < r->npage; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
iowrite32(pte, gtt_slot++);
}
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
+
return 0;
}
@@ -111,9 +115,9 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
* @r: our GTT range
*
* Remove a preallocated GTT range from the GTT. Overwrite all the
- * page table entries with the dummy page
+ * page table entries with the dummy page. This is protected via the gtt
+ * mutex which the caller must hold.
*/
-
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -132,11 +136,52 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
}
/**
+ * psb_gtt_roll - set scrolling position
+ * @dev: our DRM device
+ * @r: the gtt mapping we are using
+ * @roll: roll offset
+ *
+ * Roll an existing pinned mapping by moving the pages through the GTT.
+ * This allows us to implement hardware scrolling on the consoles without
+ * a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+ u32 *gtt_slot, pte;
+ int i;
+
+ if (roll >= r->npage) {
+ WARN_ON(1);
+ return;
+ }
+
+ r->roll = roll;
+
+ /* Not currently in the GTT - no worry we will write the mapping at
+ the right position when it gets pinned */
+ if (!r->stolen && !r->in_gart)
+ return;
+
+ gtt_slot = psb_gtt_entry(dev, r);
+
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ ioread32(gtt_slot - 1);
+}
+
+/**
* psb_gtt_attach_pages - attach and pin GEM pages
* @gt: the gtt range
*
* Pin and build an in kernel list of the pages that back our GEM object.
- * While we hold this the pages cannot be swapped out
+ * While we hold this the pages cannot be swapped out. This is protected
+ * via the gtt mutex which the caller must hold.
*/
static int psb_gtt_attach_pages(struct gtt_range *gt)
{
@@ -158,7 +203,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
gt->npage = pages;
for (i = 0; i < pages; i++) {
- /* FIXME: review flags later */
+ /* FIXME: needs updating as per mail from Hugh Dickins */
p = read_cache_page_gfp(mapping, i,
__GFP_COLD | GFP_KERNEL);
if (IS_ERR(p))
@@ -181,7 +226,8 @@ err:
*
* Undo the effect of psb_gtt_attach_pages. At this point the pages
* must have been removed from the GTT as they could now be paged out
- * and move bus address.
+ * and move bus address. This is protected via the gtt mutex which the
+ * caller must hold.
*/
static void psb_gtt_detach_pages(struct gtt_range *gt)
{
@@ -300,6 +346,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
gt->resource.name = name;
gt->stolen = backed;
gt->in_gart = backed;
+ gt->roll = 0;
/* Ensure this is set for non GEM objects */
gt->gem.dev = dev;
ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
@@ -390,15 +437,18 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
/*
- * FIXME: video mmu has hw bug to access 0x0D0000000,
- * then make gatt start at 0x0e000,0000
+ * The video mmu has a hw bug when accessing 0x0D0000000.
+ * Make gatt start at 0x0e000,0000. This doesn't actually
+ * matter for us but may do if the video acceleration ever
+ * gets opened up.
*/
pg->mmu_gatt_start = 0xE0000000;
pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
- /* CDV workaround */
+ /* Some CDV firmware doesn't report this currently. In which case the
+ system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
gtt_pages = 64;
@@ -412,13 +462,16 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
static struct resource fudge; /* Preferably peppermint */
-
/* This can occur on CDV SDV systems. Fudge it in this case.
We really don't care what imaginary space is being allocated
at this point */
dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
pg->gatt_start = 0x40000000;
pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+ /* This is a little confusing but in fact the GTT is providing
+ a view from the GPU into memory and not vice versa. As such
+ this is really allocating space that is not the same as the
+ CPU address space on CDV */
fudge.start = 0x40000000;
fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
fudge.name = "fudge";
diff --git a/drivers/staging/gma500/gtt.h b/drivers/staging/gma500/gtt.h
index e0e1cb6f9bd..aa1742387f5 100644
--- a/drivers/staging/gma500/gtt.h
+++ b/drivers/staging/gma500/gtt.h
@@ -49,6 +49,7 @@ struct gtt_range {
bool mmapping; /* Is mmappable */
struct page **pages; /* Backing pages if present */
int npage; /* Number of backing pages */
+ int roll; /* Roll applied to the GTT entries */
};
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
@@ -57,5 +58,7 @@ extern void psb_gtt_kref_put(struct gtt_range *gt);
extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
extern int psb_gtt_pin(struct gtt_range *gt);
extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+ struct gtt_range *gt, int roll);
#endif
diff --git a/drivers/staging/gma500/intel_opregion.c b/drivers/staging/gma500/intel_opregion.c
index d2e60376982..d946bc1b17b 100644
--- a/drivers/staging/gma500/intel_opregion.c
+++ b/drivers/staging/gma500/intel_opregion.c
@@ -20,6 +20,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
+ * FIXME: resolve with the i915 version
*/
#include "psb_drv.h"
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
index aa2ff559383..8eb827ecc3d 100644
--- a/drivers/staging/gma500/mdfld_intel_display.c
+++ b/drivers/staging/gma500/mdfld_intel_display.c
@@ -282,7 +282,7 @@ static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return -EINVAL;
}
-#if 1 /* FIXME_JLIU7 can't enalbe cursorB/C HW issue. need to remove after HW fix */
+#if 1 /* FIXME_JLIU7 can't enable cursorB/C HW issue. need to remove after HW fix */
if (pipe != 0)
return 0;
#endif
@@ -484,7 +484,7 @@ void mdfld_disable_crtc (struct drm_device *dev, int pipe)
/* FIXME_JLIU7 MDFLD_PO revisit */
/* Wait for vblank for the disable to take effect */
-// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
+/* MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev); */
/* Next, disable display pipes */
temp = REG_READ(pipeconf_reg);
@@ -561,7 +561,6 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
//gbdispstatus = true;
}
-
/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
@@ -1150,8 +1149,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
- /*Moorestown doesn't have register support for centering so we need to
- mess with the h/vblank and h/vsync start and ends to get centering*/
+ /*
+ * Medfield doesn't have register support for centering so
+ * we need to mess with the h/vblank and h/vsync start and
+ * ends to get central
+ */
int offsetX = 0, offsetY = 0;
offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
index 72464dd0f23..c9311a573c2 100644
--- a/drivers/staging/gma500/mrst_crtc.c
+++ b/drivers/staging/gma500/mrst_crtc.c
@@ -521,12 +521,11 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
unsigned long start, offset;
- /* FIXME: check if we need this surely MRST is pipe 0 only */
+
int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
@@ -572,15 +571,10 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
}
REG_WRITE(dspcntr_reg, dspcntr);
- if (0 /* FIXMEAC - check what PSB needs */) {
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
- } else {
- REG_WRITE(dspbase, start + offset);
- REG_READ(dspbase);
- }
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
pipe_set_base_exit:
gma_power_end(dev);
diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
index 972bea7c1af..436fe9733b1 100644
--- a/drivers/staging/gma500/power.c
+++ b/drivers/staging/gma500/power.c
@@ -83,7 +83,7 @@ static void gma_suspend_display(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->suspended)
+ if (!dev_priv->display_power)
return;
dev_priv->ops->save_regs(dev);
dev_priv->ops->power_down(dev);
@@ -101,7 +101,7 @@ static void gma_resume_display(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->suspended == false)
+ if (dev_priv->display_power)
return;
/* turn on the display power island */
@@ -265,6 +265,8 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
/* Ok power up needed */
ret = gma_resume_pci(dev->pdev);
if (ret == 0) {
+ /* FIXME: we want to defer this for Medfield/Oaktrail */
+ gma_resume_display(dev);
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
pm_runtime_get(&dev->pdev->dev);
@@ -302,7 +304,7 @@ int psb_runtime_suspend(struct device *dev)
int psb_runtime_resume(struct device *dev)
{
- return 0;
+ return gma_power_resume(dev);;
}
int psb_runtime_idle(struct device *dev)
diff --git a/drivers/staging/gma500/psb_device.c b/drivers/staging/gma500/psb_device.c
index 46591323595..b97aa78519f 100644
--- a/drivers/staging/gma500/psb_device.c
+++ b/drivers/staging/gma500/psb_device.c
@@ -213,7 +213,6 @@ static int psb_restore_display_registers(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_connector *connector;
- int pp_stat;
/* Display arbitration + watermarks */
PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
@@ -237,37 +236,6 @@ static int psb_restore_display_registers(struct drm_device *dev)
connector->funcs->restore(connector);
mutex_unlock(&dev->mode_config.mutex);
-
- if (dev_priv->iLVDS_enable) {
- /*shutdown the panel*/
- PSB_WVDC32(0, PP_CONTROL);
- do {
- pp_stat = PSB_RVDC32(PP_STATUS);
- } while (pp_stat & 0x80000000);
-
- /* Turn off the plane */
- PSB_WVDC32(0x58000000, DSPACNTR);
- PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
- /* Wait ~4 ticks */
- msleep(4);
- /* Turn off pipe */
- PSB_WVDC32(0x0, PIPEACONF);
- /* Wait ~8 ticks */
- msleep(8);
-
- /* Turn off PLLs */
- PSB_WVDC32(0, MRST_DPLL_A);
- } else {
- PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
- PSB_WVDC32(0x0, PIPEACONF);
- PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
- while (REG_READ(0x70008) & 0x40000000)
- cpu_relax();
- while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
- != DPI_FIFO_EMPTY)
- cpu_relax();
- PSB_WVDC32(0, DEVICE_READY_REG);
- }
return 0;
}
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index b2cdce7b97e..dc676c2ce81 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -183,7 +183,6 @@ static void psb_lastclose(struct drm_device *dev)
static void psb_do_takedown(struct drm_device *dev)
{
- /* FIXME: do we need to clean up the gtt here ? */
}
static int psb_do_init(struct drm_device *dev)
@@ -229,7 +228,7 @@ static int psb_do_init(struct drm_device *dev)
spin_lock_init(&dev_priv->irqmask_lock);
- mutex_init(&dev_priv->mutex_2d);
+ spin_lock_init(&dev_priv->lock_2d);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
@@ -329,6 +328,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev->dev_private = (void *) dev_priv;
+ if (!IS_PSB(dev)) {
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ }
+
dev_priv->num_pipe = dev_priv->ops->pipes;
resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -410,7 +414,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_install(dev);
dev->vblank_disable_allowed = 1;
@@ -444,12 +448,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
return ret;
-#if 0
- /*enable runtime pm at last*/
- pm_runtime_enable(&dev->pdev->dev);
+
+ /* Enable runtime pm at last */
pm_runtime_set_active(&dev->pdev->dev);
-#endif
- /*Intel drm driver load is done, continue doing pvr load*/
return 0;
out_err:
psb_driver_unload(dev);
@@ -466,14 +467,13 @@ static int psb_sizes_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_private *dev_priv = psb_priv(dev);
- struct drm_psb_sizes_arg *arg =
- (struct drm_psb_sizes_arg *) data;
+ struct drm_psb_sizes_arg *arg = data;
*arg = dev_priv->sizes;
return 0;
}
-static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
+static int psb_dc_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
uint32_t flags;
@@ -481,8 +481,7 @@ static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_crtc *crtc;
- struct drm_psb_dc_state_arg *arg =
- (struct drm_psb_dc_state_arg *)data;
+ struct drm_psb_dc_state_arg *arg = data;
/* Double check MRST case */
@@ -1110,15 +1109,12 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- static unsigned int runtime_allowed;
-
- if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
- runtime_allowed++;
- pm_runtime_allow(&dev->pdev->dev);
- dev_priv->rpm_enabled = 1;
- }
- return drm_ioctl(filp, cmd, arg);
+ int ret;
+
+ pm_runtime_forbid(dev->dev);
+ ret = drm_ioctl(filp, cmd, arg);
+ pm_runtime_allow(dev->dev);
+ return ret;
/* FIXME: do we need to wrap the other side of this */
}
@@ -1137,8 +1133,12 @@ static void psb_remove(struct pci_dev *pdev)
}
static const struct dev_pm_ops psb_pm_ops = {
- .resume = gma_power_resume,
.suspend = gma_power_suspend,
+ .resume = gma_power_resume,
+ .freeze = gma_power_suspend,
+ .thaw = gma_power_resume,
+ .poweroff = gma_power_suspend,
+ .restore = gma_power_resume,
.runtime_suspend = psb_runtime_suspend,
.runtime_resume = psb_runtime_resume,
.runtime_idle = psb_runtime_idle,
@@ -1207,9 +1207,6 @@ static struct pci_driver psb_pci_driver = {
static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- /* MLD Added this from Inaky's patch */
- if (pci_enable_msi(pdev))
- dev_warn(&pdev->dev, "Enable MSI failed!\n");
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
index fd4732dd783..11d963a055b 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/staging/gma500/psb_drv.h
@@ -43,6 +43,7 @@ enum {
CHIP_MFLD_0130 = 3, /* Medfield */
};
+#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
@@ -134,6 +135,9 @@ enum {
#define _PSB_IRQ_MSVDX_FLAG (1<<19)
#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
+#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
+ _PSB_VSYNC_PIPEB_FLAG)
+
/* This flag includes all the display IRQ bits excepts the vblank irqs. */
#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
_MDFLD_PIPEB_EVENT_FLAG | \
@@ -608,7 +612,7 @@ struct drm_psb_private {
void (*exit_idle)(struct drm_device *dev, u32 update_src);
/* 2D acceleration */
- struct mutex mutex_2d;
+ spinlock_t lock_2d;
/* FIXME: Arrays anyone ? */
struct mdfld_dsi_encoder *encoder0;
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
index 4afa671f974..caa9d86f26d 100644
--- a/drivers/staging/gma500/psb_intel_display.c
+++ b/drivers/staging/gma500/psb_intel_display.c
@@ -1102,10 +1102,6 @@ static int psb_crtc_set_config(struct drm_mode_set *set)
{
int ret;
struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set);
pm_runtime_forbid(&dev->pdev->dev);
ret = drm_crtc_helper_set_config(set);
diff --git a/drivers/staging/gma500/psb_intel_lvds.c b/drivers/staging/gma500/psb_intel_lvds.c
index c6436da6073..21022e1a977 100644
--- a/drivers/staging/gma500/psb_intel_lvds.c
+++ b/drivers/staging/gma500/psb_intel_lvds.c
@@ -68,20 +68,23 @@ struct psb_intel_lvds_priv {
static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- u32 retVal;
+ u32 ret;
if (gma_power_begin(dev, false)) {
- retVal = ((REG_READ(BLC_PWM_CTL) &
- BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-
+ ret = REG_READ(BLC_PWM_CTL);
gma_power_end(dev);
- } else
- retVal = ((dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-
- return retVal;
+ } else /* Powered off, use the saved value */
+ ret = dev_priv->saveBLC_PWM_CTL;
+
+ /* Top 15bits hold the frequency mask */
+ ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+ ret *= 2; /* Return a 16bit range as needed for setting */
+ if (ret == 0)
+ dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+ REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+ return ret;
}
/*
@@ -142,7 +145,7 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
/*BLC_PWM_CTL Should be initiated while backlight device init*/
- BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+ BUG_ON(max_pwm_blc == 0);
blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
@@ -154,6 +157,10 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
(max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
(blc_pwm_duty_cycle));
+ dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
return 0;
}
@@ -162,14 +169,12 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
*/
void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
{
- /*u32 blc_pwm_ctl;*/
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
dev_dbg(dev->dev, "backlight level is %d\n", level);
if (!dev_priv->lvds_bl) {
- dev_err(dev->dev, "NO LVDS Backlight Info\n");
+ dev_err(dev->dev, "NO LVDS backlight info\n");
return;
}
@@ -190,11 +195,13 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
- blc_pwm_ctl =
- REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL,
(blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
gma_power_end(dev);
} else {
blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
@@ -212,9 +219,11 @@ static void psb_intel_lvds_set_power(struct drm_device *dev,
{
u32 pp_status;
- if (!gma_power_begin(dev, true))
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "set power, chip off!\n");
return;
-
+ }
+
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
@@ -296,9 +305,6 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
u32 pp_status;
-
- /*struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;*/
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
struct psb_intel_lvds_priv *lvds_priv =
@@ -382,7 +388,6 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
panel_fixed_mode = mode_dev->panel_fixed_mode2;
- /* FIXME: review for Medfield */
/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
printk(KERN_ERR "Can't support LVDS on pipe A\n");
@@ -622,7 +627,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
else {
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *devp = encoder->dev->dev_private;
+ struct drm_psb_private *devp =
+ encoder->dev->dev_private;
struct backlight_device *bd = devp->backlight_device;
if (bd) {
bd->props.brightness = value;
@@ -695,8 +701,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 lvds;
int pipe;
@@ -712,8 +717,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
}
psb_intel_output->dev_priv = lvds_priv;
-
psb_intel_output->mode_dev = mode_dev;
+
connector = &psb_intel_output->base;
encoder = &psb_intel_output->enc;
drm_connector_init(dev, &psb_intel_output->base,
diff --git a/drivers/staging/gma500/psb_irq.c b/drivers/staging/gma500/psb_irq.c
index 4a0fa42893f..36dd63044b0 100644
--- a/drivers/staging/gma500/psb_irq.c
+++ b/drivers/staging/gma500/psb_irq.c
@@ -139,21 +139,10 @@ void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
}
/**
- * Display controller interrupt handler for vsync/vblank.
- *
- */
-static void mid_vblank_handler(struct drm_device *dev, uint32_t pipe)
-{
- drm_handle_vblank(dev, pipe);
-}
-
-
-/**
* Display controller interrupt handler for pipe event.
*
*/
-#define WAIT_STATUS_CLEAR_LOOP_COUNT 0xffff
-static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
@@ -162,6 +151,7 @@ static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
uint32_t pipe_stat_reg = psb_pipestat(pipe);
uint32_t pipe_enable = dev_priv->pipestat[pipe];
uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+ uint32_t pipe_clear;
uint32_t i = 0;
spin_lock(&dev_priv->irqmask_lock);
@@ -172,27 +162,23 @@ static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
spin_unlock(&dev_priv->irqmask_lock);
- /* clear the 2nd level interrupt status bits */
- /**
- * FIXME: shouldn't use while loop here. However, the interrupt
- * status 'sticky' bits cannot be cleared by setting '1' to that
- * bit once...
- */
- for (i = 0; i < WAIT_STATUS_CLEAR_LOOP_COUNT; i++) {
+ /* Clear the 2nd level interrupt status bits
+ * Sometimes the bits are very sticky so we repeat until they unstick */
+ for (i = 0; i < 0xffff; i++) {
PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
- (void) PSB_RVDC32(pipe_stat_reg);
+ pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
- if ((PSB_RVDC32(pipe_stat_reg) & pipe_status) == 0)
+ if (pipe_clear == 0)
break;
}
- if (i == WAIT_STATUS_CLEAR_LOOP_COUNT)
+ if (pipe_clear)
dev_err(dev->dev,
- "%s, can't clear the status bits in pipe_stat_reg, its value = 0x%x.\n",
- __func__, PSB_RVDC32(pipe_stat_reg));
+ "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+ __func__, pipe, PSB_RVDC32(pipe_stat_reg));
if (pipe_stat_val & PIPE_VBLANK_STATUS)
- mid_vblank_handler(dev, pipe);
+ drm_handle_vblank(dev, pipe);
if (pipe_stat_val & PIPE_TE_STATUS)
drm_handle_vblank(dev, pipe);
@@ -203,8 +189,11 @@ static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
*/
static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
{
- if (vdc_stat & _PSB_PIPEA_EVENT_FLAG)
+ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
mid_pipe_event_handler(dev, 0);
+
+ if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+ mid_pipe_event_handler(dev, 1);
}
irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
@@ -220,8 +209,13 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+ if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+ dsp_int = 1;
+
+ /* FIXME: Handle Medfield
if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
dsp_int = 1;
+ */
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
@@ -267,13 +261,18 @@ void psb_irq_preinstall(struct drm_device *dev)
if (gma_power_is_on(dev))
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
- dev_priv->vdc_irq_mask |= _PSB_PIPEA_EVENT_FLAG;
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ if (dev->vblank_enabled[1])
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ /* FIXME: Handle Medfield irq mask
if (dev->vblank_enabled[1])
dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
if (dev->vblank_enabled[2])
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+ */
- /*This register is safe even if display island is off*/
+ /* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
@@ -471,7 +470,13 @@ int psb_enable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
- mid_enable_pipe_event(dev_priv, pipe);
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
@@ -493,7 +498,13 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
#endif
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
- mid_disable_pipe_event(dev_priv, pipe);
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
diff --git a/drivers/staging/gma500/psb_lid.c b/drivers/staging/gma500/psb_lid.c
index af328516561..b867aabe6bf 100644
--- a/drivers/staging/gma500/psb_lid.c
+++ b/drivers/staging/gma500/psb_lid.c
@@ -52,8 +52,6 @@ static void psb_lid_timer_func(unsigned long data)
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
}
- /* printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); */
-
dev_priv->lid_last_state = readl(lid_state);
lid_timer_schedule:
diff --git a/drivers/staging/go7007/Makefile b/drivers/staging/go7007/Makefile
index 60a91853570..6ee837c5670 100644
--- a/drivers/staging/go7007/Makefile
+++ b/drivers/staging/go7007/Makefile
@@ -20,15 +20,11 @@ go7007-y := go7007-v4l2.o go7007-driver.o go7007-i2c.o go7007-fw.o \
s2250-y := s2250-board.o
# Uncomment when the saa7134 patches get into upstream
-#ifneq ($(CONFIG_VIDEO_SAA7134),)
#obj-$(CONFIG_VIDEO_SAA7134) += saa7134-go7007.o
-#EXTRA_CFLAGS += -Idrivers/media/video/saa7134 -DSAA7134_MPEG_GO7007=3
-#endif
+#ccflags-$(CONFIG_VIDEO_SAA7134:m=y) += -Idrivers/media/video/saa7134 -DSAA7134_MPEG_GO7007=3
# S2250 needs cypress ezusb loader from dvb-usb
-ifneq ($(CONFIG_VIDEO_GO7007_USB_S2250_BOARD),)
-ccflags-y := -Idrivers/media/dvb/dvb-usb
-endif
+ccflags-$(CONFIG_VIDEO_GO7007_USB_S2250_BOARD:m=y) += -Idrivers/media/dvb/dvb-usb
ccflags-y += -Idrivers/media/dvb/frontends
ccflags-y += -Idrivers/media/dvb/dvb-core
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index 5b218c55842..9134f03e3cf 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -33,8 +33,7 @@ struct wis_tw2804 {
int hue;
};
-static u8 global_registers[] =
-{
+static u8 global_registers[] = {
0x39, 0x00,
0x3a, 0xff,
0x3b, 0x84,
@@ -45,8 +44,7 @@ static u8 global_registers[] =
0xff, 0xff, /* Terminator (reg 0xff does not exist) */
};
-static u8 channel_registers[] =
-{
+static u8 channel_registers[] = {
0x01, 0xc4,
0x02, 0xa5,
0x03, 0x20,
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
index 5e0c9f6c745..072185ebe95 100644
--- a/drivers/staging/hv/Kconfig
+++ b/drivers/staging/hv/Kconfig
@@ -1,46 +1,17 @@
-config HYPERV
- tristate "Microsoft Hyper-V client drivers"
- depends on X86 && ACPI && PCI && m
- default n
- help
- Select this option to run Linux as a Hyper-V client operating
- system.
-
-if HYPERV
-
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
- depends on SCSI
- default HYPERV
+ depends on HYPERV && SCSI
help
Select this option to enable the Hyper-V virtual storage driver.
-config HYPERV_BLOCK
- tristate "Microsoft Hyper-V virtual block driver"
- depends on BLOCK && SCSI && (LBDAF || 64BIT)
- default HYPERV
- help
- Select this option to enable the Hyper-V virtual block driver.
-
config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
- depends on NET
- default HYPERV
+ depends on HYPERV && NET
help
Select this option to enable the Hyper-V virtual network driver.
-config HYPERV_UTILS
- tristate "Microsoft Hyper-V Utilities driver"
- depends on CONNECTOR && NLS
- default HYPERV
- help
- Select this option to enable the Hyper-V Utilities.
-
config HYPERV_MOUSE
tristate "Microsoft Hyper-V mouse driver"
- depends on HID
- default HYPERV
+ depends on HYPERV && HID
help
Select this option to enable the Hyper-V mouse driver.
-
-endif
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index 30046743a0b..e071c12c8f6 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -1,14 +1,7 @@
-obj-$(CONFIG_HYPERV) += hv_vmbus.o hv_timesource.o
+obj-$(CONFIG_HYPERV) += hv_timesource.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
-obj-$(CONFIG_HYPERV_BLOCK) += hv_blkvsc.o
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_MOUSE) += hv_mouse.o
-hv_vmbus-y := vmbus_drv.o \
- hv.o connection.o channel.o \
- channel_mgmt.o ring_buffer.o
-hv_storvsc-y := storvsc_drv.o storvsc.o
-hv_blkvsc-y := blkvsc_drv.o storvsc.o
+hv_storvsc-y := storvsc_drv.o
hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
-hv_utils-y := hv_util.o hv_kvp.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
index 582fd4ab3f1..ed4d63619df 100644
--- a/drivers/staging/hv/TODO
+++ b/drivers/staging/hv/TODO
@@ -1,14 +1,7 @@
TODO:
- - fix remaining checkpatch warnings and errors
- - audit the vmbus to verify it is working properly with the
- driver model
- - see if the vmbus can be merged with the other virtual busses
- in the kernel
- audit the network driver
- - checking for carrier inside open is wrong, network device API
- confusion??
- - audit the block driver
- audit the scsi driver
Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
-Hank Janssen <hjanssen@microsoft.com>, and Haiyang Zhang <haiyangz@microsoft.com>.
+Hank Janssen <hjanssen@microsoft.com>, Haiyang Zhang <haiyangz@microsoft.com>,
+K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
deleted file mode 100644
index d286b222318..00000000000
--- a/drivers/staging/hv/blkvsc_drv.c
+++ /dev/null
@@ -1,1026 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- * K. Y. Srinivasan <kys@microsoft.com>
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/blkdev.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/slab.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_dbg.h>
-
-#include "hyperv.h"
-#include "hyperv_storage.h"
-
-
-#define BLKVSC_MINORS 64
-
-enum blkvsc_device_type {
- UNKNOWN_DEV_TYPE,
- HARDDISK_TYPE,
- DVD_TYPE,
-};
-
-enum blkvsc_op_type {
- DO_INQUIRY,
- DO_CAPACITY,
- DO_FLUSH,
-};
-
-/*
- * This request ties the struct request and struct
- * blkvsc_request/hv_storvsc_request together A struct request may be
- * represented by 1 or more struct blkvsc_request
- */
-struct blkvsc_request_group {
- int outstanding;
- int status;
- struct list_head blkvsc_req_list; /* list of blkvsc_requests */
-};
-
-struct blkvsc_request {
- /* blkvsc_request_group.blkvsc_req_list */
- struct list_head req_entry;
-
- /* block_device_context.pending_list */
- struct list_head pend_entry;
-
- /* This may be null if we generate a request internally */
- struct request *req;
-
- struct block_device_context *dev;
-
- /* The group this request is part of. Maybe null */
- struct blkvsc_request_group *group;
-
- int write;
- sector_t sector_start;
- unsigned long sector_count;
-
- unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
- unsigned char cmd_len;
- unsigned char cmnd[MAX_COMMAND_SIZE];
-
- struct hv_storvsc_request request;
-};
-
-/* Per device structure */
-struct block_device_context {
- /* point back to our device context */
- struct hv_device *device_ctx;
- struct kmem_cache *request_pool;
- spinlock_t lock;
- struct gendisk *gd;
- enum blkvsc_device_type device_type;
- struct list_head pending_list;
-
- unsigned char device_id[64];
- unsigned int device_id_len;
- int num_outstanding_reqs;
- int shutting_down;
- unsigned int sector_size;
- sector_t capacity;
- unsigned int port;
- unsigned char path;
- unsigned char target;
- int users;
-};
-
-static const char *drv_name = "blkvsc";
-
-/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
-static const struct hv_guid dev_type = {
- .data = {
- 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
- 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
- }
-};
-
-/*
- * There is a circular dependency involving blkvsc_request_completion()
- * and blkvsc_do_request().
- */
-static void blkvsc_request_completion(struct hv_storvsc_request *request);
-
-static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
-
-module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
-MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)");
-
-/*
- * There is a circular dependency involving blkvsc_probe()
- * and block_ops.
- */
-static int blkvsc_probe(struct hv_device *dev);
-
-static int blkvsc_device_add(struct hv_device *device,
- void *additional_info)
-{
- struct storvsc_device_info *device_info;
- int ret = 0;
-
- device_info = (struct storvsc_device_info *)additional_info;
-
- device_info->ring_buffer_size = blkvsc_ringbuffer_size;
-
- ret = storvsc_dev_add(device, additional_info);
- if (ret != 0)
- return ret;
-
- /*
- * We need to use the device instance guid to set the path and target
- * id. For IDE devices, the device instance id is formatted as
- * <bus id> * - <device id> - 8899 - 000000000000.
- */
- device_info->path_id = device->dev_instance.data[3] << 24 |
- device->dev_instance.data[2] << 16 |
- device->dev_instance.data[1] << 8 |
- device->dev_instance.data[0];
-
- device_info->target_id = device->dev_instance.data[5] << 8 |
- device->dev_instance.data[4];
-
- return ret;
-}
-
-static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
- void (*request_completion)(struct hv_storvsc_request *))
-{
- struct block_device_context *blkdev = blkvsc_req->dev;
- struct hv_storvsc_request *storvsc_req;
- struct vmscsi_request *vm_srb;
- int ret;
-
-
- storvsc_req = &blkvsc_req->request;
- vm_srb = &storvsc_req->vstor_packet.vm_srb;
-
- vm_srb->data_in = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
-
- storvsc_req->on_io_completion = request_completion;
- storvsc_req->context = blkvsc_req;
-
- vm_srb->port_number = blkdev->port;
- vm_srb->path_id = blkdev->path;
- vm_srb->target_id = blkdev->target;
- vm_srb->lun = 0; /* this is not really used at all */
-
- vm_srb->cdb_length = blkvsc_req->cmd_len;
-
- memcpy(vm_srb->cdb, blkvsc_req->cmnd, vm_srb->cdb_length);
-
- storvsc_req->sense_buffer = blkvsc_req->sense_buffer;
-
- ret = storvsc_do_io(blkdev->device_ctx,
- &blkvsc_req->request);
- if (ret == 0)
- blkdev->num_outstanding_reqs++;
-
- return ret;
-}
-
-
-static int blkvsc_open(struct block_device *bdev, fmode_t mode)
-{
- struct block_device_context *blkdev = bdev->bd_disk->private_data;
- unsigned long flags;
-
- spin_lock_irqsave(&blkdev->lock, flags);
-
- blkdev->users++;
-
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- return 0;
-}
-
-
-static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
-{
- sector_t nsect = get_capacity(bd->bd_disk);
- sector_t cylinders = nsect;
-
- /*
- * We are making up these values; let us keep it simple.
- */
- hg->heads = 0xff;
- hg->sectors = 0x3f;
- sector_div(cylinders, hg->heads * hg->sectors);
- hg->cylinders = cylinders;
- if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
- hg->cylinders = 0xffff;
- return 0;
-
-}
-
-
-static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
-{
-
- blkvsc_req->cmd_len = 16;
-
- if (rq_data_dir(blkvsc_req->req)) {
- blkvsc_req->write = 1;
- blkvsc_req->cmnd[0] = WRITE_16;
- } else {
- blkvsc_req->write = 0;
- blkvsc_req->cmnd[0] = READ_16;
- }
-
- blkvsc_req->cmnd[1] |=
- (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
-
- *(unsigned long long *)&blkvsc_req->cmnd[2] =
- cpu_to_be64(blkvsc_req->sector_start);
- *(unsigned int *)&blkvsc_req->cmnd[10] =
- cpu_to_be32(blkvsc_req->sector_count);
-}
-
-
-static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- struct block_device_context *blkdev = bd->bd_disk->private_data;
- int ret = 0;
-
- switch (cmd) {
- case HDIO_GET_IDENTITY:
- if (copy_to_user((void __user *)arg, blkdev->device_id,
- blkdev->device_id_len))
- ret = -EFAULT;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
-{
- struct blkvsc_request *blkvsc_req =
- (struct blkvsc_request *)request->context;
- struct block_device_context *blkdev =
- (struct block_device_context *)blkvsc_req->dev;
- struct scsi_sense_hdr sense_hdr;
- struct vmscsi_request *vm_srb;
- unsigned long flags;
-
-
- vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
-
- spin_lock_irqsave(&blkdev->lock, flags);
- blkdev->num_outstanding_reqs--;
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- if (vm_srb->scsi_status)
- if (scsi_normalize_sense(blkvsc_req->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr))
- scsi_print_sense_hdr("blkvsc", &sense_hdr);
-
- complete(&blkvsc_req->request.wait_event);
-}
-
-
-static int blkvsc_do_operation(struct block_device_context *blkdev,
- enum blkvsc_op_type op)
-{
- struct blkvsc_request *blkvsc_req;
- struct page *page_buf;
- unsigned char *buf;
- unsigned char device_type;
- struct scsi_sense_hdr sense_hdr;
- struct vmscsi_request *vm_srb;
- unsigned long flags;
-
- int ret = 0;
-
- blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
- if (!blkvsc_req)
- return -ENOMEM;
-
- page_buf = alloc_page(GFP_KERNEL);
- if (!page_buf) {
- kmem_cache_free(blkdev->request_pool, blkvsc_req);
- return -ENOMEM;
- }
-
- vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
- init_completion(&blkvsc_req->request.wait_event);
- blkvsc_req->dev = blkdev;
- blkvsc_req->req = NULL;
- blkvsc_req->write = 0;
-
- blkvsc_req->request.data_buffer.pfn_array[0] =
- page_to_pfn(page_buf);
- blkvsc_req->request.data_buffer.offset = 0;
-
- switch (op) {
- case DO_INQUIRY:
- blkvsc_req->cmnd[0] = INQUIRY;
- blkvsc_req->cmnd[1] = 0x1; /* Get product data */
- blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */
- blkvsc_req->cmnd[4] = 64;
- blkvsc_req->cmd_len = 6;
- blkvsc_req->request.data_buffer.len = 64;
- break;
-
- case DO_CAPACITY:
- blkdev->sector_size = 0;
- blkdev->capacity = 0;
-
- blkvsc_req->cmnd[0] = READ_CAPACITY;
- blkvsc_req->cmd_len = 16;
- blkvsc_req->request.data_buffer.len = 8;
- break;
-
- case DO_FLUSH:
- blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
- blkvsc_req->cmd_len = 10;
- blkvsc_req->request.data_buffer.pfn_array[0] = 0;
- blkvsc_req->request.data_buffer.len = 0;
- break;
- default:
- ret = -EINVAL;
- goto cleanup;
- }
-
- spin_lock_irqsave(&blkdev->lock, flags);
- blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
-
- /* check error */
- if (vm_srb->scsi_status) {
- scsi_normalize_sense(blkvsc_req->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr);
-
- return 0;
- }
-
- buf = kmap(page_buf);
-
- switch (op) {
- case DO_INQUIRY:
- device_type = buf[0] & 0x1F;
-
- if (device_type == 0x0)
- blkdev->device_type = HARDDISK_TYPE;
- else
- blkdev->device_type = UNKNOWN_DEV_TYPE;
-
- blkdev->device_id_len = buf[7];
- if (blkdev->device_id_len > 64)
- blkdev->device_id_len = 64;
-
- memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
- break;
-
- case DO_CAPACITY:
- /* be to le */
- blkdev->capacity =
- ((buf[0] << 24) | (buf[1] << 16) |
- (buf[2] << 8) | buf[3]) + 1;
-
- blkdev->sector_size =
- (buf[4] << 24) | (buf[5] << 16) |
- (buf[6] << 8) | buf[7];
- break;
- default:
- break;
-
- }
-
-cleanup:
-
- kunmap(page_buf);
-
- __free_page(page_buf);
-
- kmem_cache_free(blkdev->request_pool, blkvsc_req);
-
- return ret;
-}
-
-
-static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
-{
- struct blkvsc_request *pend_req, *tmp;
- struct blkvsc_request *comp_req, *tmp2;
- struct vmscsi_request *vm_srb;
-
- int ret = 0;
-
-
- /* Flush the pending list first */
- list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
- pend_entry) {
- /*
- * The pend_req could be part of a partially completed
- * request. If so, complete those req first until we
- * hit the pend_req
- */
- list_for_each_entry_safe(comp_req, tmp2,
- &pend_req->group->blkvsc_req_list,
- req_entry) {
-
- if (comp_req == pend_req)
- break;
-
- list_del(&comp_req->req_entry);
-
- if (comp_req->req) {
- vm_srb =
- &comp_req->request.vstor_packet.
- vm_srb;
- ret = __blk_end_request(comp_req->req,
- (!vm_srb->scsi_status ? 0 : -EIO),
- comp_req->sector_count *
- blkdev->sector_size);
-
- /* FIXME: shouldn't this do more than return? */
- if (ret)
- goto out;
- }
-
- kmem_cache_free(blkdev->request_pool, comp_req);
- }
-
- list_del(&pend_req->pend_entry);
-
- list_del(&pend_req->req_entry);
-
- if (comp_req->req) {
- if (!__blk_end_request(pend_req->req, -EIO,
- pend_req->sector_count *
- blkdev->sector_size)) {
- /*
- * All the sectors have been xferred ie the
- * request is done
- */
- kmem_cache_free(blkdev->request_pool,
- pend_req->group);
- }
- }
-
- kmem_cache_free(blkdev->request_pool, pend_req);
- }
-
-out:
- return ret;
-}
-
-
-/*
- * blkvsc_remove() - Callback when our device is removed
- */
-static int blkvsc_remove(struct hv_device *dev)
-{
- struct block_device_context *blkdev = dev_get_drvdata(&dev->device);
- unsigned long flags;
-
-
- /* Get to a known state */
- spin_lock_irqsave(&blkdev->lock, flags);
-
- blkdev->shutting_down = 1;
-
- blk_stop_queue(blkdev->gd->queue);
-
- blkvsc_cancel_pending_reqs(blkdev);
-
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- blkvsc_do_operation(blkdev, DO_FLUSH);
-
- if (blkdev->users == 0) {
- del_gendisk(blkdev->gd);
- put_disk(blkdev->gd);
- blk_cleanup_queue(blkdev->gd->queue);
-
- storvsc_dev_remove(blkdev->device_ctx);
-
- kmem_cache_destroy(blkdev->request_pool);
- kfree(blkdev);
- }
-
- return 0;
-}
-
-static void blkvsc_shutdown(struct hv_device *dev)
-{
- struct block_device_context *blkdev = dev_get_drvdata(&dev->device);
- unsigned long flags;
-
- if (!blkdev)
- return;
-
- spin_lock_irqsave(&blkdev->lock, flags);
-
- blkdev->shutting_down = 1;
-
- blk_stop_queue(blkdev->gd->queue);
-
- blkvsc_cancel_pending_reqs(blkdev);
-
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- blkvsc_do_operation(blkdev, DO_FLUSH);
-
- /*
- * Now wait for all outgoing I/O to be drained.
- */
- storvsc_wait_to_drain((struct storvsc_device *)dev->ext);
-
-}
-
-static int blkvsc_release(struct gendisk *disk, fmode_t mode)
-{
- struct block_device_context *blkdev = disk->private_data;
- unsigned long flags;
-
- spin_lock_irqsave(&blkdev->lock, flags);
-
- if ((--blkdev->users == 0) && (blkdev->shutting_down)) {
- blk_stop_queue(blkdev->gd->queue);
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- blkvsc_do_operation(blkdev, DO_FLUSH);
- del_gendisk(blkdev->gd);
- put_disk(blkdev->gd);
- blk_cleanup_queue(blkdev->gd->queue);
-
- storvsc_dev_remove(blkdev->device_ctx);
-
- kmem_cache_destroy(blkdev->request_pool);
- kfree(blkdev);
- } else
- spin_unlock_irqrestore(&blkdev->lock, flags);
-
- return 0;
-}
-
-
-/*
- * We break the request into 1 or more blkvsc_requests and submit
- * them. If we cant submit them all, we put them on the
- * pending_list. The blkvsc_request() will work on the pending_list.
- */
-static int blkvsc_do_request(struct block_device_context *blkdev,
- struct request *req)
-{
- struct bio *bio = NULL;
- struct bio_vec *bvec = NULL;
- struct bio_vec *prev_bvec = NULL;
- struct blkvsc_request *blkvsc_req = NULL;
- struct blkvsc_request *tmp;
- int databuf_idx = 0;
- int seg_idx = 0;
- sector_t start_sector;
- unsigned long num_sectors = 0;
- int ret = 0;
- int pending = 0;
- struct blkvsc_request_group *group = NULL;
-
- /* Create a group to tie req to list of blkvsc_reqs */
- group = kmem_cache_zalloc(blkdev->request_pool, GFP_ATOMIC);
- if (!group)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&group->blkvsc_req_list);
- group->outstanding = group->status = 0;
-
- start_sector = blk_rq_pos(req);
-
- /* foreach bio in the request */
- if (req->bio) {
- for (bio = req->bio; bio; bio = bio->bi_next) {
- /*
- * Map this bio into an existing or new storvsc request
- */
- bio_for_each_segment(bvec, bio, seg_idx) {
- /* Get a new storvsc request */
- /* 1st-time */
- if ((!blkvsc_req) ||
- (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT)
- /* hole at the begin of page */
- || (bvec->bv_offset != 0) ||
- /* hold at the end of page */
- (prev_bvec &&
- (prev_bvec->bv_len != PAGE_SIZE))) {
- /* submit the prev one */
- if (blkvsc_req) {
- blkvsc_req->sector_start =
- start_sector;
- sector_div(
- blkvsc_req->sector_start,
- (blkdev->sector_size >> 9));
-
- blkvsc_req->sector_count =
- num_sectors /
- (blkdev->sector_size >> 9);
- blkvsc_init_rw(blkvsc_req);
- }
-
- /*
- * Create new blkvsc_req to represent
- * the current bvec
- */
- blkvsc_req =
- kmem_cache_zalloc(
- blkdev->request_pool, GFP_ATOMIC);
- if (!blkvsc_req) {
- /* free up everything */
- list_for_each_entry_safe(
- blkvsc_req, tmp,
- &group->blkvsc_req_list,
- req_entry) {
- list_del(
- &blkvsc_req->req_entry);
- kmem_cache_free(
- blkdev->request_pool,
- blkvsc_req);
- }
-
- kmem_cache_free(
- blkdev->request_pool, group);
- return -ENOMEM;
- }
-
- memset(blkvsc_req, 0,
- sizeof(struct blkvsc_request));
-
- blkvsc_req->dev = blkdev;
- blkvsc_req->req = req;
- blkvsc_req->request.
- data_buffer.offset
- = bvec->bv_offset;
- blkvsc_req->request.
- data_buffer.len = 0;
-
- /* Add to the group */
- blkvsc_req->group = group;
- blkvsc_req->group->outstanding++;
- list_add_tail(&blkvsc_req->req_entry,
- &blkvsc_req->group->blkvsc_req_list);
-
- start_sector += num_sectors;
- num_sectors = 0;
- databuf_idx = 0;
- }
-
- /*
- * Add the curr bvec/segment to the curr
- * blkvsc_req
- */
- blkvsc_req->request.data_buffer.
- pfn_array[databuf_idx]
- = page_to_pfn(bvec->bv_page);
- blkvsc_req->request.data_buffer.len
- += bvec->bv_len;
-
- prev_bvec = bvec;
-
- databuf_idx++;
- num_sectors += bvec->bv_len >> 9;
-
- } /* bio_for_each_segment */
-
- } /* rq_for_each_bio */
- }
-
- /* Handle the last one */
- if (blkvsc_req) {
- blkvsc_req->sector_start = start_sector;
- sector_div(blkvsc_req->sector_start,
- (blkdev->sector_size >> 9));
-
- blkvsc_req->sector_count = num_sectors /
- (blkdev->sector_size >> 9);
-
- blkvsc_init_rw(blkvsc_req);
- }
-
- list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
- if (pending) {
-
- list_add_tail(&blkvsc_req->pend_entry,
- &blkdev->pending_list);
- } else {
- ret = blkvsc_submit_request(blkvsc_req,
- blkvsc_request_completion);
- if (ret == -1) {
- pending = 1;
- list_add_tail(&blkvsc_req->pend_entry,
- &blkdev->pending_list);
- }
-
- }
- }
-
- return pending;
-}
-
-static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
-{
- struct blkvsc_request *pend_req, *tmp;
- int ret = 0;
-
- /* Flush the pending list first */
- list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
- pend_entry) {
-
- ret = blkvsc_submit_request(pend_req,
- blkvsc_request_completion);
- if (ret != 0)
- break;
- else
- list_del(&pend_req->pend_entry);
- }
-
- return ret;
-}
-
-
-static void blkvsc_request(struct request_queue *queue)
-{
- struct block_device_context *blkdev = NULL;
- struct request *req;
- int ret = 0;
-
- while ((req = blk_peek_request(queue)) != NULL) {
-
- blkdev = req->rq_disk->private_data;
- if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS) {
- __blk_end_request_cur(req, 0);
- continue;
- }
-
- ret = blkvsc_do_pending_reqs(blkdev);
-
- if (ret != 0) {
- blk_stop_queue(queue);
- break;
- }
-
- blk_start_request(req);
-
- ret = blkvsc_do_request(blkdev, req);
- if (ret > 0) {
- blk_stop_queue(queue);
- break;
- } else if (ret < 0) {
- blk_requeue_request(queue, req);
- blk_stop_queue(queue);
- break;
- }
- }
-}
-
-
-
-/* The one and only one */
-static struct hv_driver blkvsc_drv = {
- .probe = blkvsc_probe,
- .remove = blkvsc_remove,
- .shutdown = blkvsc_shutdown,
-};
-
-static const struct block_device_operations block_ops = {
- .owner = THIS_MODULE,
- .open = blkvsc_open,
- .release = blkvsc_release,
- .getgeo = blkvsc_getgeo,
- .ioctl = blkvsc_ioctl,
-};
-
-/*
- * blkvsc_drv_init - BlkVsc driver initialization.
- */
-static int blkvsc_drv_init(void)
-{
- struct hv_driver *drv = &blkvsc_drv;
- int ret;
-
- BUILD_BUG_ON(sizeof(sector_t) != 8);
-
- memcpy(&drv->dev_type, &dev_type, sizeof(struct hv_guid));
- drv->driver.name = drv_name;
-
- /* The driver belongs to vmbus */
- ret = vmbus_child_driver_register(&drv->driver);
-
- return ret;
-}
-
-
-static void blkvsc_drv_exit(void)
-{
-
- vmbus_child_driver_unregister(&blkvsc_drv.driver);
-}
-
-/*
- * blkvsc_probe - Add a new device for this driver
- */
-static int blkvsc_probe(struct hv_device *dev)
-{
- struct block_device_context *blkdev = NULL;
- struct storvsc_device_info device_info;
- struct storvsc_major_info major_info;
- int ret = 0;
-
- blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
- if (!blkdev) {
- ret = -ENOMEM;
- goto cleanup;
- }
-
- INIT_LIST_HEAD(&blkdev->pending_list);
-
- /* Initialize what we can here */
- spin_lock_init(&blkdev->lock);
-
-
- blkdev->request_pool = kmem_cache_create(dev_name(&dev->device),
- sizeof(struct blkvsc_request), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!blkdev->request_pool) {
- ret = -ENOMEM;
- goto cleanup;
- }
-
-
- ret = blkvsc_device_add(dev, &device_info);
- if (ret != 0)
- goto cleanup;
-
- blkdev->device_ctx = dev;
- /* this identified the device 0 or 1 */
- blkdev->target = device_info.target_id;
- /* this identified the ide ctrl 0 or 1 */
- blkdev->path = device_info.path_id;
-
- dev_set_drvdata(&dev->device, blkdev);
-
- ret = storvsc_get_major_info(&device_info, &major_info);
-
- if (ret)
- goto cleanup;
-
- if (major_info.do_register) {
- ret = register_blkdev(major_info.major, major_info.devname);
-
- if (ret != 0) {
- DPRINT_ERR(BLKVSC_DRV,
- "register_blkdev() failed! ret %d", ret);
- goto remove;
- }
- }
-
- DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!",
- major_info.major);
-
- blkdev->gd = alloc_disk(BLKVSC_MINORS);
- if (!blkdev->gd) {
- ret = -1;
- goto cleanup;
- }
-
- blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
-
- blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
- blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
- blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
- blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
- blk_queue_dma_alignment(blkdev->gd->queue, 511);
-
- blkdev->gd->major = major_info.major;
- if (major_info.index == 1 || major_info.index == 3)
- blkdev->gd->first_minor = BLKVSC_MINORS;
- else
- blkdev->gd->first_minor = 0;
- blkdev->gd->fops = &block_ops;
- blkdev->gd->private_data = blkdev;
- blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
- sprintf(blkdev->gd->disk_name, "hd%c", 'a' + major_info.index);
-
- blkvsc_do_operation(blkdev, DO_INQUIRY);
- blkvsc_do_operation(blkdev, DO_CAPACITY);
-
- set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
- blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
- /* go! */
- add_disk(blkdev->gd);
-
- DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d",
- blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
- blkdev->sector_size);
-
- return ret;
-
-remove:
- storvsc_dev_remove(dev);
-
-cleanup:
- if (blkdev) {
- if (blkdev->request_pool) {
- kmem_cache_destroy(blkdev->request_pool);
- blkdev->request_pool = NULL;
- }
- kfree(blkdev);
- blkdev = NULL;
- }
-
- return ret;
-}
-
-static void blkvsc_request_completion(struct hv_storvsc_request *request)
-{
- struct blkvsc_request *blkvsc_req =
- (struct blkvsc_request *)request->context;
- struct block_device_context *blkdev =
- (struct block_device_context *)blkvsc_req->dev;
- unsigned long flags;
- struct blkvsc_request *comp_req, *tmp;
- struct vmscsi_request *vm_srb;
-
-
- spin_lock_irqsave(&blkdev->lock, flags);
-
- blkdev->num_outstanding_reqs--;
- blkvsc_req->group->outstanding--;
-
- /*
- * Only start processing when all the blkvsc_reqs are
- * completed. This guarantees no out-of-order blkvsc_req
- * completion when calling end_that_request_first()
- */
- if (blkvsc_req->group->outstanding == 0) {
- list_for_each_entry_safe(comp_req, tmp,
- &blkvsc_req->group->blkvsc_req_list,
- req_entry) {
-
- list_del(&comp_req->req_entry);
-
- vm_srb =
- &comp_req->request.vstor_packet.vm_srb;
- if (!__blk_end_request(comp_req->req,
- (!vm_srb->scsi_status ? 0 : -EIO),
- comp_req->sector_count * blkdev->sector_size)) {
- /*
- * All the sectors have been xferred ie the
- * request is done
- */
- kmem_cache_free(blkdev->request_pool,
- comp_req->group);
- }
-
- kmem_cache_free(blkdev->request_pool, comp_req);
- }
-
- if (!blkdev->shutting_down) {
- blkvsc_do_pending_reqs(blkdev);
- blk_start_queue(blkdev->gd->queue);
- blkvsc_request(blkdev->gd->queue);
- }
- }
-
- spin_unlock_irqrestore(&blkdev->lock, flags);
-}
-
-static void __exit blkvsc_exit(void)
-{
- blkvsc_drv_exit();
-}
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
-MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
-module_init(blkvsc_drv_init);
-module_exit(blkvsc_exit);
diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
deleted file mode 100644
index 455f47a891f..00000000000
--- a/drivers/staging/hv/channel.c
+++ /dev/null
@@ -1,877 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include "hyperv.h"
-#include "hyperv_vmbus.h"
-
-#define NUM_PAGES_SPANNED(addr, len) \
-((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
-
-/* Internal routines */
-static int create_gpadl_header(
- void *kbuffer, /* must be phys and virt contiguous */
- u32 size, /* page-size multiple */
- struct vmbus_channel_msginfo **msginfo,
- u32 *messagecount);
-static void vmbus_setevent(struct vmbus_channel *channel);
-
-/*
- * vmbus_setevent- Trigger an event notification on the specified
- * channel.
- */
-static void vmbus_setevent(struct vmbus_channel *channel)
-{
- struct hv_monitor_page *monitorpage;
-
- if (channel->offermsg.monitor_allocated) {
- /* Each u32 represents 32 channels */
- sync_set_bit(channel->offermsg.child_relid & 31,
- (unsigned long *) vmbus_connection.send_int_page +
- (channel->offermsg.child_relid >> 5));
-
- monitorpage = vmbus_connection.monitor_pages;
- monitorpage++; /* Get the child to parent monitor page */
-
- sync_set_bit(channel->monitor_bit,
- (unsigned long *)&monitorpage->trigger_group
- [channel->monitor_grp].pending);
-
- } else {
- vmbus_set_event(channel->offermsg.child_relid);
- }
-}
-
-/*
- * vmbus_get_debug_info -Retrieve various channel debug info
- */
-void vmbus_get_debug_info(struct vmbus_channel *channel,
- struct vmbus_channel_debug_info *debuginfo)
-{
- struct hv_monitor_page *monitorpage;
- u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
- u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
- /* u32 monitorBit = 1 << monitorOffset; */
-
- debuginfo->relid = channel->offermsg.child_relid;
- debuginfo->state = channel->state;
- memcpy(&debuginfo->interfacetype,
- &channel->offermsg.offer.if_type, sizeof(struct hv_guid));
- memcpy(&debuginfo->interface_instance,
- &channel->offermsg.offer.if_instance,
- sizeof(struct hv_guid));
-
- monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
-
- debuginfo->monitorid = channel->offermsg.monitorid;
-
- debuginfo->servermonitor_pending =
- monitorpage->trigger_group[monitor_group].pending;
- debuginfo->servermonitor_latency =
- monitorpage->latency[monitor_group][monitor_offset];
- debuginfo->servermonitor_connectionid =
- monitorpage->parameter[monitor_group]
- [monitor_offset].connectionid.u.id;
-
- monitorpage++;
-
- debuginfo->clientmonitor_pending =
- monitorpage->trigger_group[monitor_group].pending;
- debuginfo->clientmonitor_latency =
- monitorpage->latency[monitor_group][monitor_offset];
- debuginfo->clientmonitor_connectionid =
- monitorpage->parameter[monitor_group]
- [monitor_offset].connectionid.u.id;
-
- hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
- hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
-}
-
-/*
- * vmbus_open - Open the specified channel.
- */
-int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
- u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
-{
- struct vmbus_channel_open_channel *openMsg;
- struct vmbus_channel_msginfo *openInfo = NULL;
- void *in, *out;
- unsigned long flags;
- int ret, t, err = 0;
-
- newchannel->onchannel_callback = onchannelcallback;
- newchannel->channel_callback_context = context;
-
- /* Allocate the ring buffer */
- out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
-
- if (!out)
- return -ENOMEM;
-
-
- in = (void *)((unsigned long)out + send_ringbuffer_size);
-
- newchannel->ringbuffer_pages = out;
- newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
- recv_ringbuffer_size) >> PAGE_SHIFT;
-
- ret = hv_ringbuffer_init(
- &newchannel->outbound, out, send_ringbuffer_size);
-
- if (ret != 0) {
- err = ret;
- goto errorout;
- }
-
- ret = hv_ringbuffer_init(
- &newchannel->inbound, in, recv_ringbuffer_size);
- if (ret != 0) {
- err = ret;
- goto errorout;
- }
-
-
- /* Establish the gpadl for the ring buffer */
- newchannel->ringbuffer_gpadlhandle = 0;
-
- ret = vmbus_establish_gpadl(newchannel,
- newchannel->outbound.ring_buffer,
- send_ringbuffer_size +
- recv_ringbuffer_size,
- &newchannel->ringbuffer_gpadlhandle);
-
- if (ret != 0) {
- err = ret;
- goto errorout;
- }
-
- /* Create and init the channel open message */
- openInfo = kmalloc(sizeof(*openInfo) +
- sizeof(struct vmbus_channel_open_channel),
- GFP_KERNEL);
- if (!openInfo) {
- err = -ENOMEM;
- goto errorout;
- }
-
- init_completion(&openInfo->waitevent);
-
- openMsg = (struct vmbus_channel_open_channel *)openInfo->msg;
- openMsg->header.msgtype = CHANNELMSG_OPENCHANNEL;
- openMsg->openid = newchannel->offermsg.child_relid;
- openMsg->child_relid = newchannel->offermsg.child_relid;
- openMsg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
- openMsg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
- PAGE_SHIFT;
- openMsg->server_contextarea_gpadlhandle = 0;
-
- if (userdatalen > MAX_USER_DEFINED_BYTES) {
- err = -EINVAL;
- goto errorout;
- }
-
- if (userdatalen)
- memcpy(openMsg->userdata, userdata, userdatalen);
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&openInfo->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- ret = vmbus_post_msg(openMsg,
- sizeof(struct vmbus_channel_open_channel));
-
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&openInfo->waitevent, 5*HZ);
- if (t == 0) {
- err = -ETIMEDOUT;
- goto errorout;
- }
-
-
- if (openInfo->response.open_result.status)
- err = openInfo->response.open_result.status;
-
-cleanup:
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&openInfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- kfree(openInfo);
- return err;
-
-errorout:
- hv_ringbuffer_cleanup(&newchannel->outbound);
- hv_ringbuffer_cleanup(&newchannel->inbound);
- free_pages((unsigned long)out,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
- kfree(openInfo);
- return err;
-}
-EXPORT_SYMBOL_GPL(vmbus_open);
-
-/*
- * dump_gpadl_body - Dump the gpadl body message to the console for
- * debugging purposes.
- */
-static void dump_gpadl_body(struct vmbus_channel_gpadl_body *gpadl, u32 len)
-{
- int i;
- int pfncount;
-
- pfncount = (len - sizeof(struct vmbus_channel_gpadl_body)) /
- sizeof(u64);
-
- DPRINT_DBG(VMBUS, "gpadl body - len %d pfn count %d", len, pfncount);
-
- for (i = 0; i < pfncount; i++)
- DPRINT_DBG(VMBUS, "gpadl body - %d) pfn %llu",
- i, gpadl->pfn[i]);
-}
-
-/*
- * dump_gpadl_header - Dump the gpadl header message to the console for
- * debugging purposes.
- */
-static void dump_gpadl_header(struct vmbus_channel_gpadl_header *gpadl)
-{
- int i, j;
- int pagecount;
-
- DPRINT_DBG(VMBUS,
- "gpadl header - relid %d, range count %d, range buflen %d",
- gpadl->child_relid, gpadl->rangecount, gpadl->range_buflen);
- for (i = 0; i < gpadl->rangecount; i++) {
- pagecount = gpadl->range[i].byte_count >> PAGE_SHIFT;
- pagecount = (pagecount > 26) ? 26 : pagecount;
-
- DPRINT_DBG(VMBUS, "gpadl range %d - len %d offset %d "
- "page count %d", i, gpadl->range[i].byte_count,
- gpadl->range[i].byte_offset, pagecount);
-
- for (j = 0; j < pagecount; j++)
- DPRINT_DBG(VMBUS, "%d) pfn %llu", j,
- gpadl->range[i].pfn_array[j]);
- }
-}
-
-/*
- * create_gpadl_header - Creates a gpadl for the specified buffer
- */
-static int create_gpadl_header(void *kbuffer, u32 size,
- struct vmbus_channel_msginfo **msginfo,
- u32 *messagecount)
-{
- int i;
- int pagecount;
- unsigned long long pfn;
- struct vmbus_channel_gpadl_header *gpadl_header;
- struct vmbus_channel_gpadl_body *gpadl_body;
- struct vmbus_channel_msginfo *msgheader;
- struct vmbus_channel_msginfo *msgbody = NULL;
- u32 msgsize;
-
- int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
-
- pagecount = size >> PAGE_SHIFT;
- pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
-
- /* do we need a gpadl body msg */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_header) -
- sizeof(struct gpa_range);
- pfncount = pfnsize / sizeof(u64);
-
- if (pagecount > pfncount) {
- /* we need a gpadl body */
- /* fill in the header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pfncount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (!msgheader)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = size;
- for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
- *msginfo = msgheader;
- *messagecount = 1;
-
- pfnsum = pfncount;
- pfnleft = pagecount - pfncount;
-
- /* how many pfns can we fit */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_body);
- pfncount = pfnsize / sizeof(u64);
-
- /* fill in the body */
- while (pfnleft) {
- if (pfnleft > pfncount)
- pfncurr = pfncount;
- else
- pfncurr = pfnleft;
-
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_body) +
- pfncurr * sizeof(u64);
- msgbody = kzalloc(msgsize, GFP_KERNEL);
-
- if (!msgbody) {
- struct vmbus_channel_msginfo *pos = NULL;
- struct vmbus_channel_msginfo *tmp = NULL;
- /*
- * Free up all the allocated messages.
- */
- list_for_each_entry_safe(pos, tmp,
- &msgheader->submsglist,
- msglistentry) {
-
- list_del(&pos->msglistentry);
- kfree(pos);
- }
-
- goto nomem;
- }
-
- msgbody->msgsize = msgsize;
- (*messagecount)++;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->msg;
-
- /*
- * Gpadl is u32 and we are using a pointer which could
- * be 64-bit
- * This is governed by the guest/host protocol and
- * so the hypervisor gurantees that this is ok.
- */
- for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = pfn + pfnsum + i;
-
- /* add to msg header */
- list_add_tail(&msgbody->msglistentry,
- &msgheader->submsglist);
- pfnsum += pfncurr;
- pfnleft -= pfncurr;
- }
- } else {
- /* everything fits in a header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pagecount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (msgheader == NULL)
- goto nomem;
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = size;
- for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
-
- *msginfo = msgheader;
- *messagecount = 1;
- }
-
- return 0;
-nomem:
- kfree(msgheader);
- kfree(msgbody);
- return -ENOMEM;
-}
-
-/*
- * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
- *
- * @channel: a channel
- * @kbuffer: from kmalloc
- * @size: page-size multiple
- * @gpadl_handle: some funky thing
- */
-int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
- u32 size, u32 *gpadl_handle)
-{
- struct vmbus_channel_gpadl_header *gpadlmsg;
- struct vmbus_channel_gpadl_body *gpadl_body;
- /* struct vmbus_channel_gpadl_created *gpadlCreated; */
- struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo;
- u32 msgcount;
- struct list_head *curr;
- u32 next_gpadl_handle;
- unsigned long flags;
- int ret = 0;
- int t;
-
- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
- atomic_inc(&vmbus_connection.next_gpadl_handle);
-
- ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
- if (ret)
- return ret;
-
- init_completion(&msginfo->waitevent);
-
- gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
- gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
- gpadlmsg->child_relid = channel->offermsg.child_relid;
- gpadlmsg->gpadl = next_gpadl_handle;
-
- dump_gpadl_header(gpadlmsg);
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&msginfo->msglistentry,
- &vmbus_connection.chn_msg_list);
-
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
- sizeof(*msginfo));
- if (ret != 0)
- goto cleanup;
-
- if (msgcount > 1) {
- list_for_each(curr, &msginfo->submsglist) {
-
- submsginfo = (struct vmbus_channel_msginfo *)curr;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)submsginfo->msg;
-
- gpadl_body->header.msgtype =
- CHANNELMSG_GPADL_BODY;
- gpadl_body->gpadl = next_gpadl_handle;
-
- dump_gpadl_body(gpadl_body, submsginfo->msgsize -
- sizeof(*submsginfo));
- ret = vmbus_post_msg(gpadl_body,
- submsginfo->msgsize -
- sizeof(*submsginfo));
- if (ret != 0)
- goto cleanup;
-
- }
- }
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- BUG_ON(t == 0);
-
-
- /* At this point, we received the gpadl created msg */
- *gpadl_handle = gpadlmsg->gpadl;
-
-cleanup:
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- kfree(msginfo);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
-
-/*
- * vmbus_teardown_gpadl -Teardown the specified GPADL handle
- */
-int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
-{
- struct vmbus_channel_gpadl_teardown *msg;
- struct vmbus_channel_msginfo *info;
- unsigned long flags;
- int ret, t;
-
- /* ASSERT(gpadl_handle != 0); */
-
- info = kmalloc(sizeof(*info) +
- sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- init_completion(&info->waitevent);
-
- msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
-
- msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
- msg->child_relid = channel->offermsg.child_relid;
- msg->gpadl = gpadl_handle;
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&info->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_gpadl_teardown));
-
- BUG_ON(ret != 0);
- t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
- BUG_ON(t == 0);
-
- /* Received a torndown response */
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&info->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- kfree(info);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
-
-/*
- * vmbus_close - Close the specified channel
- */
-void vmbus_close(struct vmbus_channel *channel)
-{
- struct vmbus_channel_close_channel *msg;
- int ret;
-
- /* Stop callback and cancel the timer asap */
- channel->onchannel_callback = NULL;
-
- /* Send a closing message */
-
- msg = &channel->close_msg.msg;
-
- msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
- msg->child_relid = channel->offermsg.child_relid;
-
- ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
-
- BUG_ON(ret != 0);
- /* Tear down the gpadl for the channel's ring buffer */
- if (channel->ringbuffer_gpadlhandle)
- vmbus_teardown_gpadl(channel,
- channel->ringbuffer_gpadlhandle);
-
- /* Cleanup the ring buffers for this channel */
- hv_ringbuffer_cleanup(&channel->outbound);
- hv_ringbuffer_cleanup(&channel->inbound);
-
- free_pages((unsigned long)channel->ringbuffer_pages,
- get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
-
-
-}
-EXPORT_SYMBOL_GPL(vmbus_close);
-
-/**
- * vmbus_sendpacket() - Send the specified buffer on the given channel
- * @channel: Pointer to vmbus_channel structure.
- * @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
- * @requestid: Identifier of the request
- * @type: Type of packet that is being send e.g. negotiate, time
- * packet etc.
- *
- * Sends data in @buffer directly to hyper-v via the vmbus
- * This will send the data unparsed to hyper-v.
- *
- * Mainly used by Hyper-V drivers.
- */
-int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
- u32 bufferlen, u64 requestid,
- enum vmbus_packet_type type, u32 flags)
-{
- struct vmpacket_descriptor desc;
- u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
- u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
- struct scatterlist bufferlist[3];
- u64 aligned_data = 0;
- int ret;
-
-
- /* Setup the descriptor */
- desc.type = type; /* VmbusPacketTypeDataInBand; */
- desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
- /* in 8-bytes granularity */
- desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
- desc.len8 = (u16)(packetlen_aligned >> 3);
- desc.trans_id = requestid;
-
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
-
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
-
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
- vmbus_setevent(channel);
-
- return ret;
-}
-EXPORT_SYMBOL(vmbus_sendpacket);
-
-/*
- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
- * packets using a GPADL Direct packet type.
- */
-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid)
-{
- int ret;
- int i;
- struct vmbus_channel_packet_page_buffer desc;
- u32 descsize;
- u32 packetlen;
- u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
- u64 aligned_data = 0;
-
- if (pagecount > MAX_PAGE_BUFFER_COUNT)
- return -EINVAL;
-
-
- /*
- * Adjust the size down since vmbus_channel_packet_page_buffer is the
- * largest size we support
- */
- descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
- ((MAX_PAGE_BUFFER_COUNT - pagecount) *
- sizeof(struct hv_page_buffer));
- packetlen = descsize + bufferlen;
- packetlen_aligned = ALIGN(packetlen, sizeof(u64));
-
- /* Setup the descriptor */
- desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
- desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = requestid;
- desc.rangecount = pagecount;
-
- for (i = 0; i < pagecount; i++) {
- desc.range[i].len = pagebuffers[i].len;
- desc.range[i].offset = pagebuffers[i].offset;
- desc.range[i].pfn = pagebuffers[i].pfn;
- }
-
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
-
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
-
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
- vmbus_setevent(channel);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
-
-/*
- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
- * using a GPADL Direct packet type.
- */
-int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
- struct hv_multipage_buffer *multi_pagebuffer,
- void *buffer, u32 bufferlen, u64 requestid)
-{
- int ret;
- struct vmbus_channel_packet_multipage_buffer desc;
- u32 descsize;
- u32 packetlen;
- u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
- u64 aligned_data = 0;
- u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
- multi_pagebuffer->len);
-
-
- if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
- return -EINVAL;
-
- /*
- * Adjust the size down since vmbus_channel_packet_multipage_buffer is
- * the largest size we support
- */
- descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
- ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
- sizeof(u64));
- packetlen = descsize + bufferlen;
- packetlen_aligned = ALIGN(packetlen, sizeof(u64));
-
-
- /* Setup the descriptor */
- desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
- desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = requestid;
- desc.rangecount = 1;
-
- desc.range.len = multi_pagebuffer->len;
- desc.range.offset = multi_pagebuffer->offset;
-
- memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
- pfncount * sizeof(u64));
-
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
-
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
-
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
- vmbus_setevent(channel);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
-
-/**
- * vmbus_recvpacket() - Retrieve the user packet on the specified channel
- * @channel: Pointer to vmbus_channel structure.
- * @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
- * @buffer_actual_len: The actual size of the data after it was received
- * @requestid: Identifier of the request
- *
- * Receives directly from the hyper-v vmbus and puts the data it received
- * into Buffer. This will receive the data unparsed from hyper-v.
- *
- * Mainly used by Hyper-V drivers.
- */
-int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
- u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
-{
- struct vmpacket_descriptor desc;
- u32 packetlen;
- u32 userlen;
- int ret;
- unsigned long flags;
-
- *buffer_actual_len = 0;
- *requestid = 0;
-
- spin_lock_irqsave(&channel->inbound_lock, flags);
-
- ret = hv_ringbuffer_peek(&channel->inbound, &desc,
- sizeof(struct vmpacket_descriptor));
- if (ret != 0) {
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
- return 0;
- }
-
- packetlen = desc.len8 << 3;
- userlen = packetlen - (desc.offset8 << 3);
-
- *buffer_actual_len = userlen;
-
- if (userlen > bufferlen) {
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
-
- pr_err("Buffer too small - got %d needs %d\n",
- bufferlen, userlen);
- return -ETOOSMALL;
- }
-
- *requestid = desc.trans_id;
-
- /* Copy over the packet to the user buffer */
- ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
- (desc.offset8 << 3));
-
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(vmbus_recvpacket);
-
-/*
- * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
- */
-int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
- u32 bufferlen, u32 *buffer_actual_len,
- u64 *requestid)
-{
- struct vmpacket_descriptor desc;
- u32 packetlen;
- u32 userlen;
- int ret;
- unsigned long flags;
-
- *buffer_actual_len = 0;
- *requestid = 0;
-
- spin_lock_irqsave(&channel->inbound_lock, flags);
-
- ret = hv_ringbuffer_peek(&channel->inbound, &desc,
- sizeof(struct vmpacket_descriptor));
- if (ret != 0) {
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
- return 0;
- }
-
-
- packetlen = desc.len8 << 3;
- userlen = packetlen - (desc.offset8 << 3);
-
- *buffer_actual_len = packetlen;
-
- if (packetlen > bufferlen) {
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
-
- pr_err("Buffer too small - needed %d bytes but "
- "got space for only %d bytes\n",
- packetlen, bufferlen);
- return -2;
- }
-
- *requestid = desc.trans_id;
-
- /* Copy over the entire packet to the user buffer */
- ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
-
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
index d957fc22801..ccd39c70c52 100644
--- a/drivers/staging/hv/hv_mouse.c
+++ b/drivers/staging/hv/hv_mouse.c
@@ -22,20 +22,15 @@
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
-#include <linux/pci.h>
-#include <linux/dmi.h>
+#include <linux/hyperv.h>
-#include "hyperv.h"
-
-/*
- * Data types
- */
struct hv_input_dev_info {
+ unsigned int size;
unsigned short vendor;
unsigned short product;
unsigned short version;
- char name[128];
+ unsigned short reserved[11];
};
/* The maximum size of a synthetic input message. */
@@ -54,17 +49,17 @@ struct hv_input_dev_info {
(SYNTHHID_INPUT_VERSION_MAJOR << 16))
-#pragma pack(push,1)
+#pragma pack(push, 1)
/*
* Message types in the synthetic input protocol
*/
enum synthhid_msg_type {
- SynthHidProtocolRequest,
- SynthHidProtocolResponse,
- SynthHidInitialDeviceInfo,
- SynthHidInitialDeviceInfoAck,
- SynthHidInputReport,
- SynthHidMax
+ SYNTH_HID_PROTOCOL_REQUEST,
+ SYNTH_HID_PROTOCOL_RESPONSE,
+ SYNTH_HID_INITIAL_DEVICE_INFO,
+ SYNTH_HID_INITIAL_DEVICE_INFO_ACK,
+ SYNTH_HID_INPUT_REPORT,
+ SYNTH_HID_MAX
};
/*
@@ -120,15 +115,15 @@ struct synthhid_input_report {
#pragma pack(pop)
-#define INPUTVSC_SEND_RING_BUFFER_SIZE 10*PAGE_SIZE
-#define INPUTVSC_RECV_RING_BUFFER_SIZE 10*PAGE_SIZE
+#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE)
+#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE)
#define NBITS(x) (((x)/BITS_PER_LONG)+1)
enum pipe_prot_msg_type {
- PipeMessageInvalid = 0,
- PipeMessageData,
- PipeMessageMaximum
+ PIPE_MESSAGE_INVALID,
+ PIPE_MESSAGE_DATA,
+ PIPE_MESSAGE_MAXIMUM
};
@@ -138,9 +133,6 @@ struct pipe_prt_msg {
char data[1];
};
-/*
- * Data types
- */
struct mousevsc_prt_msg {
enum pipe_prot_msg_type type;
u32 size;
@@ -156,38 +148,22 @@ struct mousevsc_prt_msg {
*/
struct mousevsc_dev {
struct hv_device *device;
- /* 0 indicates the device is being destroyed */
- atomic_t ref_count;
- int num_outstanding_req;
unsigned char init_complete;
struct mousevsc_prt_msg protocol_req;
struct mousevsc_prt_msg protocol_resp;
/* Synchronize the request/response if needed */
- wait_queue_head_t protocol_wait_event;
- wait_queue_head_t dev_info_wait_event;
- int protocol_wait_condition;
- int device_wait_condition;
+ struct completion wait_event;
int dev_info_status;
struct hid_descriptor *hid_desc;
unsigned char *report_desc;
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
+ int connected;
+ struct hid_device *hid_device;
};
-static const char *driver_name = "mousevsc";
-
-/* {CFA8B69E-5B4A-4cc0-B98B-8BA1A1F3F95A} */
-static const struct hv_guid mouse_guid = {
- .data = {0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
- 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A}
-};
-
-static void deviceinfo_callback(struct hv_device *dev, struct hv_input_dev_info *info);
-static void inputreport_callback(struct hv_device *dev, void *packet, u32 len);
-static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len);
-
static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
{
struct mousevsc_dev *input_dev;
@@ -197,130 +173,21 @@ static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
if (!input_dev)
return NULL;
- /*
- * Set to 2 to allow both inbound and outbound traffics
- * (ie get_input_device() and must_get_input_device()) to proceed.
- */
- atomic_cmpxchg(&input_dev->ref_count, 0, 2);
-
input_dev->device = device;
- device->ext = input_dev;
+ hv_set_drvdata(device, input_dev);
+ init_completion(&input_dev->wait_event);
return input_dev;
}
static void free_input_device(struct mousevsc_dev *device)
{
- WARN_ON(atomic_read(&device->ref_count) == 0);
+ kfree(device->hid_desc);
+ kfree(device->report_desc);
+ hv_set_drvdata(device->device, NULL);
kfree(device);
}
-/*
- * Get the inputdevice object if exists and its refcount > 1
- */
-static struct mousevsc_dev *get_input_device(struct hv_device *device)
-{
- struct mousevsc_dev *input_dev;
-
- input_dev = (struct mousevsc_dev *)device->ext;
-
-/*
- * FIXME
- * This sure isn't a valid thing to print for debugging, no matter
- * what the intention is...
- *
- * printk(KERN_ERR "-------------------------> REFCOUNT = %d",
- * input_dev->ref_count);
- */
-
- if (input_dev && atomic_read(&input_dev->ref_count) > 1)
- atomic_inc(&input_dev->ref_count);
- else
- input_dev = NULL;
-
- return input_dev;
-}
-
-/*
- * Get the inputdevice object iff exists and its refcount > 0
- */
-static struct mousevsc_dev *must_get_input_device(struct hv_device *device)
-{
- struct mousevsc_dev *input_dev;
-
- input_dev = (struct mousevsc_dev *)device->ext;
-
- if (input_dev && atomic_read(&input_dev->ref_count))
- atomic_inc(&input_dev->ref_count);
- else
- input_dev = NULL;
-
- return input_dev;
-}
-
-static void put_input_device(struct hv_device *device)
-{
- struct mousevsc_dev *input_dev;
-
- input_dev = (struct mousevsc_dev *)device->ext;
-
- atomic_dec(&input_dev->ref_count);
-}
-
-/*
- * Drop ref count to 1 to effectively disable get_input_device()
- */
-static struct mousevsc_dev *release_input_device(struct hv_device *device)
-{
- struct mousevsc_dev *input_dev;
-
- input_dev = (struct mousevsc_dev *)device->ext;
-
- /* Busy wait until the ref drop to 2, then set it to 1 */
- while (atomic_cmpxchg(&input_dev->ref_count, 2, 1) != 2)
- udelay(100);
-
- return input_dev;
-}
-
-/*
- * Drop ref count to 0. No one can use input_device object.
- */
-static struct mousevsc_dev *final_release_input_device(struct hv_device *device)
-{
- struct mousevsc_dev *input_dev;
-
- input_dev = (struct mousevsc_dev *)device->ext;
-
- /* Busy wait until the ref drop to 1, then set it to 0 */
- while (atomic_cmpxchg(&input_dev->ref_count, 1, 0) != 1)
- udelay(100);
-
- device->ext = NULL;
- return input_dev;
-}
-
-static void mousevsc_on_send_completion(struct hv_device *device,
- struct vmpacket_descriptor *packet)
-{
- struct mousevsc_dev *input_dev;
- void *request;
-
- input_dev = must_get_input_device(device);
- if (!input_dev) {
- pr_err("unable to get input device...device being destroyed?");
- return;
- }
-
- request = (void *)(unsigned long)packet->trans_id;
-
- if (request == &input_dev->protocol_req) {
- /* FIXME */
- /* Shouldn't we be doing something here? */
- }
-
- put_input_device(device);
-}
static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct synthhid_device_info *device_info)
@@ -332,33 +199,27 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
/* Assume success for now */
input_device->dev_info_status = 0;
- /* Save the device attr */
memcpy(&input_device->hid_dev_info, &device_info->hid_dev_info,
sizeof(struct hv_input_dev_info));
- /* Save the hid desc */
desc = &device_info->hid_descriptor;
- WARN_ON(desc->bLength > 0);
+ WARN_ON(desc->bLength == 0);
- input_device->hid_desc = kzalloc(desc->bLength, GFP_KERNEL);
+ input_device->hid_desc = kzalloc(desc->bLength, GFP_ATOMIC);
- if (!input_device->hid_desc) {
- pr_err("unable to allocate hid descriptor - size %d", desc->bLength);
- goto Cleanup;
- }
+ if (!input_device->hid_desc)
+ goto cleanup;
memcpy(input_device->hid_desc, desc, desc->bLength);
- /* Save the report desc */
input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+ if (input_device->report_desc_size == 0)
+ goto cleanup;
input_device->report_desc = kzalloc(input_device->report_desc_size,
- GFP_KERNEL);
+ GFP_ATOMIC);
- if (!input_device->report_desc) {
- pr_err("unable to allocate report descriptor - size %d",
- input_device->report_desc_size);
- goto Cleanup;
- }
+ if (!input_device->report_desc)
+ goto cleanup;
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
@@ -367,10 +228,10 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
- ack.type = PipeMessageData;
+ ack.type = PIPE_MESSAGE_DATA;
ack.size = sizeof(struct synthhid_device_info_ack);
- ack.ack.header.type = SynthHidInitialDeviceInfoAck;
+ ack.ack.header.type = SYNTH_HID_INITIAL_DEVICE_INFO_ACK;
ack.ack.header.size = 1;
ack.ack.reserved = 0;
@@ -381,18 +242,14 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
(unsigned long)&ack,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0) {
- pr_err("unable to send synthhid device info ack - ret %d",
- ret);
- goto Cleanup;
- }
+ if (ret != 0)
+ goto cleanup;
- input_device->device_wait_condition = 1;
- wake_up(&input_device->dev_info_wait_event);
+ complete(&input_device->wait_event);
return;
-Cleanup:
+cleanup:
kfree(input_device->hid_desc);
input_device->hid_desc = NULL;
@@ -400,25 +257,7 @@ Cleanup:
input_device->report_desc = NULL;
input_device->dev_info_status = -1;
- input_device->device_wait_condition = 1;
- wake_up(&input_device->dev_info_wait_event);
-}
-
-static void mousevsc_on_receive_input_report(struct mousevsc_dev *input_device,
- struct synthhid_input_report *input_report)
-{
- struct hv_driver *input_drv;
-
- if (!input_device->init_complete) {
- pr_info("Initialization incomplete...ignoring input_report msg");
- return;
- }
-
- input_drv = drv_to_hv_drv(input_device->device->device.driver);
-
- inputreport_callback(input_device->device,
- input_report->buffer,
- input_report->header.size);
+ complete(&input_device->wait_event);
}
static void mousevsc_on_receive(struct hv_device *device,
@@ -426,37 +265,27 @@ static void mousevsc_on_receive(struct hv_device *device,
{
struct pipe_prt_msg *pipe_msg;
struct synthhid_msg *hid_msg;
- struct mousevsc_dev *input_dev;
-
- input_dev = must_get_input_device(device);
- if (!input_dev) {
- pr_err("unable to get input device...device being destroyed?");
- return;
- }
+ struct mousevsc_dev *input_dev = hv_get_drvdata(device);
+ struct synthhid_input_report *input_report;
pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
(packet->offset8 << 3));
- if (pipe_msg->type != PipeMessageData) {
- pr_err("unknown pipe msg type - type %d len %d",
- pipe_msg->type, pipe_msg->size);
- put_input_device(device);
- return ;
- }
+ if (pipe_msg->type != PIPE_MESSAGE_DATA)
+ return;
hid_msg = (struct synthhid_msg *)&pipe_msg->data[0];
switch (hid_msg->header.type) {
- case SynthHidProtocolResponse:
+ case SYNTH_HID_PROTOCOL_RESPONSE:
memcpy(&input_dev->protocol_resp, pipe_msg,
pipe_msg->size + sizeof(struct pipe_prt_msg) -
sizeof(unsigned char));
- input_dev->protocol_wait_condition = 1;
- wake_up(&input_dev->protocol_wait_event);
+ complete(&input_dev->wait_event);
break;
- case SynthHidInitialDeviceInfo:
- WARN_ON(pipe_msg->size >= sizeof(struct hv_input_dev_info));
+ case SYNTH_HID_INITIAL_DEVICE_INFO:
+ WARN_ON(pipe_msg->size < sizeof(struct hv_input_dev_info));
/*
* Parse out the device info into device attr,
@@ -465,10 +294,14 @@ static void mousevsc_on_receive(struct hv_device *device,
mousevsc_on_receive_device_info(input_dev,
(struct synthhid_device_info *)&pipe_msg->data[0]);
break;
- case SynthHidInputReport:
- mousevsc_on_receive_input_report(input_dev,
- (struct synthhid_input_report *)&pipe_msg->data[0]);
-
+ case SYNTH_HID_INPUT_REPORT:
+ input_report =
+ (struct synthhid_input_report *)&pipe_msg->data[0];
+ if (!input_dev->init_complete)
+ break;
+ hid_input_report(input_dev->hid_device,
+ HID_INPUT_REPORT, input_report->buffer,
+ input_report->header.size, 1);
break;
default:
pr_err("unsupported hid msg type - type %d len %d",
@@ -476,7 +309,6 @@ static void mousevsc_on_receive(struct hv_device *device,
break;
}
- put_input_device(device);
}
static void mousevsc_on_channel_callback(void *context)
@@ -484,7 +316,6 @@ static void mousevsc_on_channel_callback(void *context)
const int packetSize = 0x100;
int ret = 0;
struct hv_device *device = (struct hv_device *)context;
- struct mousevsc_dev *input_dev;
u32 bytes_recvd;
u64 req_id;
@@ -493,12 +324,6 @@ static void mousevsc_on_channel_callback(void *context)
unsigned char *buffer = packet;
int bufferlen = packetSize;
- input_dev = must_get_input_device(device);
-
- if (!input_dev) {
- pr_err("unable to get input device...device being destroyed?");
- return;
- }
do {
ret = vmbus_recvpacket_raw(device->channel, buffer,
@@ -509,22 +334,20 @@ static void mousevsc_on_channel_callback(void *context)
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
- case VM_PKT_COMP:
- mousevsc_on_send_completion(
- device, desc);
- break;
-
- case VM_PKT_DATA_INBAND:
- mousevsc_on_receive(
- device, desc);
- break;
-
- default:
- pr_err("unhandled packet type %d, tid %llx len %d\n",
- desc->type,
- req_id,
- bytes_recvd);
- break;
+ case VM_PKT_COMP:
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ mousevsc_on_receive(
+ device, desc);
+ break;
+
+ default:
+ pr_err("unhandled packet type %d, tid %llx len %d\n",
+ desc->type,
+ req_id,
+ bytes_recvd);
+ break;
}
/* reset */
@@ -535,10 +358,6 @@ static void mousevsc_on_channel_callback(void *context)
bufferlen = packetSize;
}
} else {
- /*
- * pr_debug("nothing else to read...");
- * reset
- */
if (bufferlen > packetSize) {
kfree(buffer);
@@ -547,79 +366,57 @@ static void mousevsc_on_channel_callback(void *context)
}
break;
}
- } else if (ret == -2) {
+ } else if (ret == -ENOBUFS) {
/* Handle large packet */
bufferlen = bytes_recvd;
- buffer = kzalloc(bytes_recvd, GFP_KERNEL);
+ buffer = kzalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
buffer = packet;
bufferlen = packetSize;
-
- /* Try again next time around */
- pr_err("unable to allocate buffer of size %d!",
- bytes_recvd);
break;
}
}
} while (1);
- put_input_device(device);
-
return;
}
static int mousevsc_connect_to_vsp(struct hv_device *device)
{
int ret = 0;
- struct mousevsc_dev *input_dev;
+ int t;
+ struct mousevsc_dev *input_dev = hv_get_drvdata(device);
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
- input_dev = get_input_device(device);
-
- if (!input_dev) {
- pr_err("unable to get input device...device being destroyed?");
- return -1;
- }
-
- init_waitqueue_head(&input_dev->protocol_wait_event);
- init_waitqueue_head(&input_dev->dev_info_wait_event);
request = &input_dev->protocol_req;
- /*
- * Now, initiate the vsc/vsp initialization protocol on the open channel
- */
memset(request, 0, sizeof(struct mousevsc_prt_msg));
- request->type = PipeMessageData;
+ request->type = PIPE_MESSAGE_DATA;
request->size = sizeof(struct synthhid_protocol_request);
- request->request.header.type = SynthHidProtocolRequest;
- request->request.header.size = sizeof(unsigned long);
+ request->request.header.type = SYNTH_HID_PROTOCOL_REQUEST;
+ request->request.header.size = sizeof(unsigned int);
request->request.version_requested.version = SYNTHHID_INPUT_VERSION;
- pr_info("synthhid protocol request...");
ret = vmbus_sendpacket(device->channel, request,
- sizeof(struct pipe_prt_msg) -
- sizeof(unsigned char) +
- sizeof(struct synthhid_protocol_request),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0) {
- pr_err("unable to send synthhid protocol request.");
- goto Cleanup;
- }
-
- input_dev->protocol_wait_condition = 0;
- wait_event_timeout(input_dev->protocol_wait_event,
- input_dev->protocol_wait_condition, msecs_to_jiffies(1000));
- if (input_dev->protocol_wait_condition == 0) {
+ sizeof(struct pipe_prt_msg) -
+ sizeof(unsigned char) +
+ sizeof(struct synthhid_protocol_request),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
+ if (t == 0) {
ret = -ETIMEDOUT;
- goto Cleanup;
+ goto cleanup;
}
response = &input_dev->protocol_resp;
@@ -627,51 +424,88 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
if (!response->response.approved) {
pr_err("synthhid protocol request failed (version %d)",
SYNTHHID_INPUT_VERSION);
- ret = -1;
- goto Cleanup;
+ ret = -ENODEV;
+ goto cleanup;
}
- input_dev->device_wait_condition = 0;
- wait_event_timeout(input_dev->dev_info_wait_event,
- input_dev->device_wait_condition, msecs_to_jiffies(1000));
- if (input_dev->device_wait_condition == 0) {
+ t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
+ if (t == 0) {
ret = -ETIMEDOUT;
- goto Cleanup;
+ goto cleanup;
}
/*
* We should have gotten the device attr, hid desc and report
* desc at this point
*/
- if (!input_dev->dev_info_status)
- pr_info("**** input channel up and running!! ****");
- else
- ret = -1;
+ if (input_dev->dev_info_status)
+ ret = -ENOMEM;
-Cleanup:
- put_input_device(device);
+cleanup:
return ret;
}
-static int mousevsc_on_device_add(struct hv_device *device,
- void *additional_info)
+static int mousevsc_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void mousevsc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver mousevsc_ll_driver = {
+ .open = mousevsc_hid_open,
+ .close = mousevsc_hid_close,
+};
+
+static struct hid_driver mousevsc_hid_driver;
+
+static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
+{
+ struct hid_device *hid_dev;
+ struct mousevsc_dev *input_device = hv_get_drvdata(dev);
+
+ hid_dev = hid_allocate_device();
+ if (IS_ERR(hid_dev))
+ return;
+
+ hid_dev->ll_driver = &mousevsc_ll_driver;
+ hid_dev->driver = &mousevsc_hid_driver;
+
+ if (hid_parse_report(hid_dev, packet, len))
+ return;
+
+ hid_dev->bus = BUS_VIRTUAL;
+ hid_dev->vendor = input_device->hid_dev_info.vendor;
+ hid_dev->product = input_device->hid_dev_info.product;
+ hid_dev->version = input_device->hid_dev_info.version;
+
+ sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
+
+ if (!hidinput_connect(hid_dev, 0)) {
+ hid_dev->claimed |= HID_CLAIMED_INPUT;
+
+ input_device->connected = 1;
+
+ }
+
+ input_device->hid_device = hid_dev;
+}
+
+static int mousevsc_on_device_add(struct hv_device *device)
{
int ret = 0;
struct mousevsc_dev *input_dev;
- struct hv_driver *input_drv;
- struct hv_input_dev_info dev_info;
input_dev = alloc_input_device(device);
- if (!input_dev) {
- ret = -1;
- goto Cleanup;
- }
+ if (!input_dev)
+ return -ENOMEM;
input_dev->init_complete = false;
- /* Open the channel */
ret = vmbus_open(device->channel,
INPUTVSC_SEND_RING_BUFFER_SIZE,
INPUTVSC_RECV_RING_BUFFER_SIZE,
@@ -682,34 +516,20 @@ static int mousevsc_on_device_add(struct hv_device *device,
);
if (ret != 0) {
- pr_err("unable to open channel: %d", ret);
free_input_device(input_dev);
- return -1;
+ return ret;
}
- pr_info("InputVsc channel open: %d", ret);
ret = mousevsc_connect_to_vsp(device);
if (ret != 0) {
- pr_err("unable to connect channel: %d", ret);
-
vmbus_close(device->channel);
free_input_device(input_dev);
return ret;
}
- input_drv = drv_to_hv_drv(input_dev->device->device.driver);
-
- dev_info.vendor = input_dev->hid_dev_info.vendor;
- dev_info.product = input_dev->hid_dev_info.product;
- dev_info.version = input_dev->hid_dev_info.version;
- strcpy(dev_info.name, "Microsoft Vmbus HID-compliant Mouse");
- /* Send the device info back up */
- deviceinfo_callback(device, &dev_info);
-
- /* Send the report desc back up */
/* workaround SA-167 */
if (input_dev->report_desc[14] == 0x25)
input_dev->report_desc[14] = 0x29;
@@ -719,256 +539,61 @@ static int mousevsc_on_device_add(struct hv_device *device,
input_dev->init_complete = true;
-Cleanup:
return ret;
}
-static int mousevsc_on_device_remove(struct hv_device *device)
-{
- struct mousevsc_dev *input_dev;
- int ret = 0;
-
- pr_info("disabling input device (%p)...",
- device->ext);
-
- input_dev = release_input_device(device);
-
-
- /*
- * At this point, all outbound traffic should be disable. We only
- * allow inbound traffic (responses) to proceed
- *
- * so that outstanding requests can be completed.
- */
- while (input_dev->num_outstanding_req) {
- pr_info("waiting for %d requests to complete...",
- input_dev->num_outstanding_req);
-
- udelay(100);
- }
-
- pr_info("removing input device (%p)...", device->ext);
-
- input_dev = final_release_input_device(device);
-
- pr_info("input device (%p) safe to remove", input_dev);
-
- /* Close the channel */
- vmbus_close(device->channel);
-
- free_input_device(input_dev);
-
- return ret;
-}
-
-
-/*
- * Data types
- */
-struct input_device_context {
- struct hv_device *device_ctx;
- struct hid_device *hid_device;
- struct hv_input_dev_info device_info;
- int connected;
-};
-
-
-static void deviceinfo_callback(struct hv_device *dev, struct hv_input_dev_info *info)
-{
- struct input_device_context *input_device_ctx =
- dev_get_drvdata(&dev->device);
-
- memcpy(&input_device_ctx->device_info, info,
- sizeof(struct hv_input_dev_info));
-
- DPRINT_INFO(INPUTVSC_DRV, "%s", __func__);
-}
-
-static void inputreport_callback(struct hv_device *dev, void *packet, u32 len)
-{
- int ret = 0;
-
- struct input_device_context *input_dev_ctx =
- dev_get_drvdata(&dev->device);
-
- ret = hid_input_report(input_dev_ctx->hid_device,
- HID_INPUT_REPORT, packet, len, 1);
-
- DPRINT_DBG(INPUTVSC_DRV, "hid_input_report (ret %d)", ret);
-}
-
-static int mousevsc_hid_open(struct hid_device *hid)
-{
- return 0;
-}
-
-static void mousevsc_hid_close(struct hid_device *hid)
-{
-}
-
-static int mousevsc_probe(struct hv_device *dev)
+static int mousevsc_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
{
- int ret = 0;
-
- struct input_device_context *input_dev_ctx;
-
- input_dev_ctx = kmalloc(sizeof(struct input_device_context),
- GFP_KERNEL);
-
- dev_set_drvdata(&dev->device, input_dev_ctx);
-
- /* Call to the vsc driver to add the device */
- ret = mousevsc_on_device_add(dev, NULL);
-
- if (ret != 0) {
- DPRINT_ERR(INPUTVSC_DRV, "unable to add input vsc device");
- return -1;
- }
+ return mousevsc_on_device_add(dev);
- return 0;
}
static int mousevsc_remove(struct hv_device *dev)
{
- int ret = 0;
-
- struct input_device_context *input_dev_ctx;
-
- input_dev_ctx = kmalloc(sizeof(struct input_device_context),
- GFP_KERNEL);
-
- dev_set_drvdata(&dev->device, input_dev_ctx);
-
- if (input_dev_ctx->connected) {
- hidinput_disconnect(input_dev_ctx->hid_device);
- input_dev_ctx->connected = 0;
- }
+ struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
- /*
- * Call to the vsc driver to let it know that the device
- * is being removed
- */
- ret = mousevsc_on_device_remove(dev);
+ vmbus_close(dev->channel);
- if (ret != 0) {
- DPRINT_ERR(INPUTVSC_DRV,
- "unable to remove vsc device (ret %d)", ret);
+ if (input_dev->connected) {
+ hidinput_disconnect(input_dev->hid_device);
+ input_dev->connected = 0;
+ hid_destroy_device(input_dev->hid_device);
}
- kfree(input_dev_ctx);
+ free_input_device(input_dev);
- return ret;
+ return 0;
}
-static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
-{
- struct input_device_context *input_device_ctx =
- dev_get_drvdata(&dev->device);
- struct hid_device *hid_dev;
-
- /* hid_debug = -1; */
- hid_dev = kmalloc(sizeof(struct hid_device), GFP_KERNEL);
-
- if (hid_parse_report(hid_dev, packet, len)) {
- DPRINT_INFO(INPUTVSC_DRV, "Unable to call hd_parse_report");
- return;
- }
-
- if (hid_dev) {
- DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
-
- hid_dev->ll_driver->open = mousevsc_hid_open;
- hid_dev->ll_driver->close = mousevsc_hid_close;
-
- hid_dev->bus = BUS_VIRTUAL;
- hid_dev->vendor = input_device_ctx->device_info.vendor;
- hid_dev->product = input_device_ctx->device_info.product;
- hid_dev->version = input_device_ctx->device_info.version;
- hid_dev->dev = dev->device;
-
- sprintf(hid_dev->name, "%s",
- input_device_ctx->device_info.name);
-
- /*
- * HJ Do we want to call it with a 0
- */
- if (!hidinput_connect(hid_dev, 0)) {
- hid_dev->claimed |= HID_CLAIMED_INPUT;
-
- input_device_ctx->connected = 1;
-
- DPRINT_INFO(INPUTVSC_DRV,
- "HID device claimed by input\n");
- }
-
- if (!hid_dev->claimed) {
- DPRINT_ERR(INPUTVSC_DRV,
- "HID device not claimed by "
- "input or hiddev\n");
- }
-
- input_device_ctx->hid_device = hid_dev;
- }
-
- kfree(hid_dev);
-}
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Mouse guid */
+ { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
+ 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+ { },
+};
+MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver mousevsc_drv = {
+ .name = "mousevsc",
+ .id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
};
-static void mousevsc_drv_exit(void)
-{
- vmbus_child_driver_unregister(&mousevsc_drv.driver);
-}
-
static int __init mousevsc_init(void)
{
- struct hv_driver *drv = &mousevsc_drv;
-
- DPRINT_INFO(INPUTVSC_DRV, "Hyper-V Mouse driver initializing.");
-
- memcpy(&drv->dev_type, &mouse_guid,
- sizeof(struct hv_guid));
-
- drv->driver.name = driver_name;
-
- /* The driver belongs to vmbus */
- vmbus_child_driver_register(&drv->driver);
-
- return 0;
+ return vmbus_driver_register(&mousevsc_drv);
}
static void __exit mousevsc_exit(void)
{
- mousevsc_drv_exit();
+ vmbus_driver_unregister(&mousevsc_drv);
}
-/*
- * We don't want to automatically load this driver just yet, it's quite
- * broken. It's safe if you want to load it yourself manually, but
- * don't inflict it on unsuspecting users, that's just mean.
- */
-#if 0
-
-/*
- * We use a PCI table to determine if we should autoload this driver This is
- * needed by distro tools to determine if the hyperv drivers should be
- * installed and/or configured. We don't do anything else with the table, but
- * it needs to be present.
- */
-const static struct pci_device_id microsoft_hv_pci_table[] = {
- { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, microsoft_hv_pci_table);
-#endif
-
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
module_init(mousevsc_init);
module_exit(mousevsc_exit);
-
diff --git a/drivers/staging/hv/hv_util.c b/drivers/staging/hv/hv_util.c
deleted file mode 100644
index c164b54b4cd..00000000000
--- a/drivers/staging/hv/hv_util.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2010, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sysctl.h>
-#include <linux/reboot.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
-
-#include "hyperv.h"
-#include "hv_kvp.h"
-
-static u8 *shut_txf_buf;
-static u8 *time_txf_buf;
-static u8 *hbeat_txf_buf;
-
-static void shutdown_onchannelcallback(void *context)
-{
- struct vmbus_channel *channel = context;
- u32 recvlen;
- u64 requestid;
- u8 execute_shutdown = false;
-
- struct shutdown_msg_data *shutdown_msg;
-
- struct icmsg_hdr *icmsghdrp;
- struct icmsg_negotiate *negop = NULL;
-
- vmbus_recvpacket(channel, shut_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, negop, shut_txf_buf);
- } else {
- shutdown_msg =
- (struct shutdown_msg_data *)&shut_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
-
- switch (shutdown_msg->flags) {
- case 0:
- case 1:
- icmsghdrp->status = HV_S_OK;
- execute_shutdown = true;
-
- pr_info("Shutdown request received -"
- " graceful shutdown initiated\n");
- break;
- default:
- icmsghdrp->status = HV_E_FAIL;
- execute_shutdown = false;
-
- pr_info("Shutdown request received -"
- " Invalid request\n");
- break;
- }
- }
-
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
-
- vmbus_sendpacket(channel, shut_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
- }
-
- if (execute_shutdown == true)
- orderly_poweroff(false);
-}
-
-/*
- * Set guest time to host UTC time.
- */
-static inline void do_adj_guesttime(u64 hosttime)
-{
- s64 host_tns;
- struct timespec host_ts;
-
- host_tns = (hosttime - WLTIMEDELTA) * 100;
- host_ts = ns_to_timespec(host_tns);
-
- do_settimeofday(&host_ts);
-}
-
-/*
- * Synchronize time with host after reboot, restore, etc.
- *
- * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
- * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
- * message after the timesync channel is opened. Since the hv_utils module is
- * loaded after hv_vmbus, the first message is usually missed. The other
- * thing is, systime is automatically set to emulated hardware clock which may
- * not be UTC time or in the same time zone. So, to override these effects, we
- * use the first 50 time samples for initial system time setting.
- */
-static inline void adj_guesttime(u64 hosttime, u8 flags)
-{
- static s32 scnt = 50;
-
- if ((flags & ICTIMESYNCFLAG_SYNC) != 0) {
- do_adj_guesttime(hosttime);
- return;
- }
-
- if ((flags & ICTIMESYNCFLAG_SAMPLE) != 0 && scnt > 0) {
- scnt--;
- do_adj_guesttime(hosttime);
- }
-}
-
-/*
- * Time Sync Channel message handler.
- */
-static void timesync_onchannelcallback(void *context)
-{
- struct vmbus_channel *channel = context;
- u32 recvlen;
- u64 requestid;
- struct icmsg_hdr *icmsghdrp;
- struct ictimesync_data *timedatap;
-
- vmbus_recvpacket(channel, time_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf);
- } else {
- timedatap = (struct ictimesync_data *)&time_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
- adj_guesttime(timedatap->parenttime, timedatap->flags);
- }
-
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
-
- vmbus_sendpacket(channel, time_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
- }
-}
-
-/*
- * Heartbeat functionality.
- * Every two seconds, Hyper-V send us a heartbeat request message.
- * we respond to this message, and Hyper-V knows we are alive.
- */
-static void heartbeat_onchannelcallback(void *context)
-{
- struct vmbus_channel *channel = context;
- u32 recvlen;
- u64 requestid;
- struct icmsg_hdr *icmsghdrp;
- struct heartbeat_msg_data *heartbeat_msg;
-
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, NULL, hbeat_txf_buf);
- } else {
- heartbeat_msg =
- (struct heartbeat_msg_data *)&hbeat_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
-
- heartbeat_msg->seq_num += 1;
- }
-
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
-
- vmbus_sendpacket(channel, hbeat_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
- }
-}
-
-static const struct pci_device_id __initconst
-hv_utils_pci_table[] __maybe_unused = {
- { PCI_DEVICE(0x1414, 0x5353) }, /* Hyper-V emulated VGA controller */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, hv_utils_pci_table);
-
-
-static const struct dmi_system_id __initconst
-hv_utils_dmi_table[] __maybe_unused = {
- {
- .ident = "Hyper-V",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
- },
- },
- { },
-};
-MODULE_DEVICE_TABLE(dmi, hv_utils_dmi_table);
-
-
-static int __init init_hyperv_utils(void)
-{
- pr_info("Registering HyperV Utility Driver\n");
-
- if (hv_kvp_init())
- return -ENODEV;
-
-
- if (!dmi_check_system(hv_utils_dmi_table))
- return -ENODEV;
-
- shut_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- time_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- hbeat_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
- if (!shut_txf_buf || !time_txf_buf || !hbeat_txf_buf) {
- pr_info("Unable to allocate memory for receive buffer\n");
- kfree(shut_txf_buf);
- kfree(time_txf_buf);
- kfree(hbeat_txf_buf);
- return -ENOMEM;
- }
-
- hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback;
-
- hv_cb_utils[HV_TIMESYNC_MSG].callback = &timesync_onchannelcallback;
-
- hv_cb_utils[HV_HEARTBEAT_MSG].callback = &heartbeat_onchannelcallback;
-
- hv_cb_utils[HV_KVP_MSG].callback = &hv_kvp_onchannelcallback;
-
- return 0;
-}
-
-static void exit_hyperv_utils(void)
-{
- pr_info("De-Registered HyperV Utility Driver\n");
-
- if (hv_cb_utils[HV_SHUTDOWN_MSG].channel != NULL)
- hv_cb_utils[HV_SHUTDOWN_MSG].channel->onchannel_callback =
- &chn_cb_negotiate;
- hv_cb_utils[HV_SHUTDOWN_MSG].callback = NULL;
-
- if (hv_cb_utils[HV_TIMESYNC_MSG].channel != NULL)
- hv_cb_utils[HV_TIMESYNC_MSG].channel->onchannel_callback =
- &chn_cb_negotiate;
- hv_cb_utils[HV_TIMESYNC_MSG].callback = NULL;
-
- if (hv_cb_utils[HV_HEARTBEAT_MSG].channel != NULL)
- hv_cb_utils[HV_HEARTBEAT_MSG].channel->onchannel_callback =
- &chn_cb_negotiate;
- hv_cb_utils[HV_HEARTBEAT_MSG].callback = NULL;
-
- if (hv_cb_utils[HV_KVP_MSG].channel != NULL)
- hv_cb_utils[HV_KVP_MSG].channel->onchannel_callback =
- &chn_cb_negotiate;
- hv_cb_utils[HV_KVP_MSG].callback = NULL;
-
- hv_kvp_deinit();
-
- kfree(shut_txf_buf);
- kfree(time_txf_buf);
- kfree(hbeat_txf_buf);
-}
-
-module_init(init_hyperv_utils);
-module_exit(exit_hyperv_utils);
-
-MODULE_DESCRIPTION("Hyper-V Utilities");
-MODULE_VERSION(HV_DRV_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/hyperv_net.h b/drivers/staging/hv/hyperv_net.h
index 27f987b48df..ac1ec840512 100644
--- a/drivers/staging/hv/hyperv_net.h
+++ b/drivers/staging/hv/hyperv_net.h
@@ -26,7 +26,7 @@
#define _HYPERV_NET_H
#include <linux/list.h>
-#include "hyperv.h"
+#include <linux/hyperv.h>
/* Fwd declaration */
struct hv_netvsc_packet;
@@ -96,7 +96,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
unsigned int status);
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet);
-int netvsc_initialize(struct hv_driver *drv);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -370,8 +369,8 @@ struct nvsp_message {
struct netvsc_device {
struct hv_device *dev;
- atomic_t refcnt;
atomic_t num_outstanding_sends;
+ bool destroy;
/*
* List of free preallocated hv_netvsc_packet to represent receive
* packet
@@ -393,6 +392,8 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
/* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
+ struct net_device *ndev;
+
/* Holds rndis device info */
void *extension;
};
diff --git a/drivers/staging/hv/hyperv_storage.h b/drivers/staging/hv/hyperv_storage.h
deleted file mode 100644
index a01f9a07c98..00000000000
--- a/drivers/staging/hv/hyperv_storage.h
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- *
- * Copyright (c) 2011, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- * K. Y. Srinivasan <kys@microsoft.com>
- *
- */
-
-#ifndef _HYPERV_STORAGE_H
-#define _HYPERV_STORAGE_H
-
-
-/* vstorage.w revision number. This is used in the case of a version match, */
-/* to alert the user that structure sizes may be mismatched even though the */
-/* protocol versions match. */
-
-
-#define REVISION_STRING(REVISION_) #REVISION_
-#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
- do { \
- char *revision_string \
- = REVISION_STRING($Rev : 6 $) + 6; \
- RESULT_LVALUE_ = 0; \
- while (*revision_string >= '0' \
- && *revision_string <= '9') { \
- RESULT_LVALUE_ *= 10; \
- RESULT_LVALUE_ += *revision_string - '0'; \
- revision_string++; \
- } \
- } while (0)
-
-/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
-/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
-#define VMSTOR_PROTOCOL_MAJOR(VERSION_) (((VERSION_) >> 8) & 0xff)
-#define VMSTOR_PROTOCOL_MINOR(VERSION_) (((VERSION_)) & 0xff)
-#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
- (((MINOR_) & 0xff)))
-#define VMSTOR_INVALID_PROTOCOL_VERSION (-1)
-
-/* Version history: */
-/* V1 Beta 0.1 */
-/* V1 RC < 2008/1/31 1.0 */
-/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(2, 0)
-
-
-
-
-/* This will get replaced with the max transfer length that is possible on */
-/* the host adapter. */
-/* The max transfer length will be published when we offer a vmbus channel. */
-#define MAX_TRANSFER_LENGTH 0x40000
-#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
- sizeof(struct vstor_packet) + \
- sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
-
-
-/* Packet structure describing virtual storage requests. */
-enum vstor_packet_operation {
- VSTOR_OPERATION_COMPLETE_IO = 1,
- VSTOR_OPERATION_REMOVE_DEVICE = 2,
- VSTOR_OPERATION_EXECUTE_SRB = 3,
- VSTOR_OPERATION_RESET_LUN = 4,
- VSTOR_OPERATION_RESET_ADAPTER = 5,
- VSTOR_OPERATION_RESET_BUS = 6,
- VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
- VSTOR_OPERATION_END_INITIALIZATION = 8,
- VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
- VSTOR_OPERATION_QUERY_PROPERTIES = 10,
- VSTOR_OPERATION_MAXIMUM = 10
-};
-
-/*
- * Platform neutral description of a scsi request -
- * this remains the same across the write regardless of 32/64 bit
- * note: it's patterned off the SCSI_PASS_THROUGH structure
- */
-#define CDB16GENERIC_LENGTH 0x10
-
-#ifndef SENSE_BUFFER_SIZE
-#define SENSE_BUFFER_SIZE 0x12
-#endif
-
-#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
-
-struct vmscsi_request {
- unsigned short length;
- unsigned char srb_status;
- unsigned char scsi_status;
-
- unsigned char port_number;
- unsigned char path_id;
- unsigned char target_id;
- unsigned char lun;
-
- unsigned char cdb_length;
- unsigned char sense_info_length;
- unsigned char data_in;
- unsigned char reserved;
-
- unsigned int data_transfer_length;
-
- union {
- unsigned char cdb[CDB16GENERIC_LENGTH];
- unsigned char sense_data[SENSE_BUFFER_SIZE];
- unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
- };
-} __attribute((packed));
-
-
-/*
- * This structure is sent during the intialization phase to get the different
- * properties of the channel.
- */
-struct vmstorage_channel_properties {
- unsigned short protocol_version;
- unsigned char path_id;
- unsigned char target_id;
-
- /* Note: port number is only really known on the client side */
- unsigned int port_number;
- unsigned int flags;
- unsigned int max_transfer_bytes;
-
- /* This id is unique for each channel and will correspond with */
- /* vendor specific data in the inquirydata */
- unsigned long long unique_id;
-} __packed;
-
-/* This structure is sent during the storage protocol negotiations. */
-struct vmstorage_protocol_version {
- /* Major (MSW) and minor (LSW) version numbers. */
- unsigned short major_minor;
-
- /*
- * Revision number is auto-incremented whenever this file is changed
- * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
- * definitely indicate incompatibility--but it does indicate mismatched
- * builds.
- */
- unsigned short revision;
-} __packed;
-
-/* Channel Property Flags */
-#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
-#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
-
-struct vstor_packet {
- /* Requested operation type */
- enum vstor_packet_operation operation;
-
- /* Flags - see below for values */
- unsigned int flags;
-
- /* Status of the request returned from the server side. */
- unsigned int status;
-
- /* Data payload area */
- union {
- /*
- * Structure used to forward SCSI commands from the
- * client to the server.
- */
- struct vmscsi_request vm_srb;
-
- /* Structure used to query channel properties. */
- struct vmstorage_channel_properties storage_channel_properties;
-
- /* Used during version negotiations. */
- struct vmstorage_protocol_version version;
- };
-} __packed;
-
-/* Packet flags */
-/*
- * This flag indicates that the server should send back a completion for this
- * packet.
- */
-#define REQUEST_COMPLETION_FLAG 0x1
-
-/* This is the set of flags that the vsc can set in any packets it sends */
-#define VSC_LEGAL_FLAGS (REQUEST_COMPLETION_FLAG)
-
-
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include "hyperv_storage.h"
-#include "hyperv.h"
-
-/* Defines */
-#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-
-#define STORVSC_MAX_IO_REQUESTS 128
-
-/*
- * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
- * reality, the path/target is not used (ie always set to 0) so our
- * scsi host adapter essentially has 1 bus with 1 target that contains
- * up to 256 luns.
- */
-#define STORVSC_MAX_LUNS_PER_TARGET 64
-#define STORVSC_MAX_TARGETS 1
-#define STORVSC_MAX_CHANNELS 1
-
-struct hv_storvsc_request;
-
-/* Matches Windows-end */
-enum storvsc_request_type {
- WRITE_TYPE,
- READ_TYPE,
- UNKNOWN_TYPE,
-};
-
-
-struct hv_storvsc_request {
- struct hv_storvsc_request *request;
- struct hv_device *device;
-
- /* Synchronize the request/response if needed */
- struct completion wait_event;
-
- unsigned char *sense_buffer;
- void *context;
- void (*on_io_completion)(struct hv_storvsc_request *request);
- struct hv_multipage_buffer data_buffer;
-
- struct vstor_packet vstor_packet;
-};
-
-
-struct storvsc_device_info {
- u32 ring_buffer_size;
- unsigned int port_number;
- unsigned char path_id;
- unsigned char target_id;
-};
-
-struct storvsc_major_info {
- int major;
- int index;
- bool do_register;
- char *devname;
- char *diskname;
-};
-
-/* A storvsc device is a device object that contains a vmbus channel */
-struct storvsc_device {
- struct hv_device *device;
-
- /* 0 indicates the device is being destroyed */
- atomic_t ref_count;
-
- bool drain_notify;
- atomic_t num_outstanding_req;
-
- wait_queue_head_t waiting_to_drain;
-
- /*
- * Each unique Port/Path/Target represents 1 channel ie scsi
- * controller. In reality, the pathid, targetid is always 0
- * and the port is set by us
- */
- unsigned int port_number;
- unsigned char path_id;
- unsigned char target_id;
-
- /* Used for vsc/vsp channel reset process */
- struct hv_storvsc_request init_request;
- struct hv_storvsc_request reset_request;
-};
-
-
-/* Get the stordevice object iff exists and its refcount > 1 */
-static inline struct storvsc_device *get_stor_device(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = (struct storvsc_device *)device->ext;
- if (stor_device && atomic_read(&stor_device->ref_count) > 1)
- atomic_inc(&stor_device->ref_count);
- else
- stor_device = NULL;
-
- return stor_device;
-}
-
-
-static inline void put_stor_device(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = (struct storvsc_device *)device->ext;
-
- atomic_dec(&stor_device->ref_count);
-}
-
-static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
-{
- dev->drain_notify = true;
- wait_event(dev->waiting_to_drain,
- atomic_read(&dev->num_outstanding_req) == 0);
- dev->drain_notify = false;
-}
-
-/* Interface */
-
-int storvsc_dev_add(struct hv_device *device,
- void *additional_info);
-int storvsc_dev_remove(struct hv_device *device);
-
-int storvsc_do_io(struct hv_device *device,
- struct hv_storvsc_request *request);
-
-int storvsc_get_major_info(struct storvsc_device_info *device_info,
- struct storvsc_major_info *major_info);
-
-#endif /* _HYPERV_STORAGE_H */
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index dc5e5c488e3..b902579c7fe 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -27,122 +27,63 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
-#include "hyperv.h"
#include "hyperv_net.h"
-/* Globals */
-static const char *driver_name = "netvsc";
-
-/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
-static const struct hv_guid netvsc_device_type = {
- .data = {
- 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
- 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
- }
-};
-
-
static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
struct netvsc_device *net_device;
+ struct net_device *ndev = hv_get_drvdata(device);
net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
if (!net_device)
return NULL;
- /* Set to 2 to allow both inbound and outbound traffic */
- atomic_cmpxchg(&net_device->refcnt, 0, 2);
+ net_device->destroy = false;
net_device->dev = device;
- device->ext = net_device;
+ net_device->ndev = ndev;
+ hv_set_drvdata(device, net_device);
return net_device;
}
-static void free_net_device(struct netvsc_device *device)
-{
- WARN_ON(atomic_read(&device->refcnt) != 0);
- device->dev->ext = NULL;
- kfree(device);
-}
-
-
-/* Get the net device object iff exists and its refcount > 1 */
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device;
- net_device = device->ext;
- if (net_device && atomic_read(&net_device->refcnt) > 1)
- atomic_inc(&net_device->refcnt);
- else
+ net_device = hv_get_drvdata(device);
+ if (net_device && net_device->destroy)
net_device = NULL;
return net_device;
}
-/* Get the net device object iff exists and its refcount > 0 */
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device;
- net_device = device->ext;
- if (net_device && atomic_read(&net_device->refcnt))
- atomic_inc(&net_device->refcnt);
- else
- net_device = NULL;
-
- return net_device;
-}
-
-static void put_net_device(struct hv_device *device)
-{
- struct netvsc_device *net_device;
-
- net_device = device->ext;
-
- atomic_dec(&net_device->refcnt);
-}
-
-static struct netvsc_device *release_outbound_net_device(
- struct hv_device *device)
-{
- struct netvsc_device *net_device;
+ net_device = hv_get_drvdata(device);
- net_device = device->ext;
- if (net_device == NULL)
- return NULL;
+ if (!net_device)
+ goto get_in_err;
- /* Busy wait until the ref drop to 2, then set it to 1 */
- while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
- udelay(100);
+ if (net_device->destroy &&
+ atomic_read(&net_device->num_outstanding_sends) == 0)
+ net_device = NULL;
+get_in_err:
return net_device;
}
-static struct netvsc_device *release_inbound_net_device(
- struct hv_device *device)
-{
- struct netvsc_device *net_device;
-
- net_device = device->ext;
- if (net_device == NULL)
- return NULL;
-
- /* Busy wait until the ref drop to 1, then set it to 0 */
- while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
- udelay(100);
-
- device->ext = NULL;
- return net_device;
-}
static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
+ struct net_device *ndev = net_device->ndev;
/*
* If we got a section count, it means we received a
@@ -170,9 +111,9 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
* have a leak rather than continue and a bugchk
*/
if (ret != 0) {
- dev_err(&net_device->dev->device, "unable to send "
- "revoke receive buffer to netvsp");
- return -1;
+ netdev_err(ndev, "unable to send "
+ "revoke receive buffer to netvsp\n");
+ return ret;
}
}
@@ -185,9 +126,9 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
* rather than continue and a bugchk
*/
if (ret != 0) {
- dev_err(&net_device->dev->device,
- "unable to teardown receive buffer's gpadl");
- return -1;
+ netdev_err(ndev,
+ "unable to teardown receive buffer's gpadl\n");
+ return ret;
}
net_device->recv_buf_gpadl_handle = 0;
}
@@ -214,21 +155,20 @@ static int netvsc_init_recv_buf(struct hv_device *device)
int t;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
+ struct net_device *ndev;
net_device = get_outbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "unable to get net device..."
- "device being destroyed?");
- return -1;
- }
+ if (!net_device)
+ return -ENODEV;
+ ndev = net_device->ndev;
net_device->recv_buf =
(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
get_order(net_device->recv_buf_size));
if (!net_device->recv_buf) {
- dev_err(&device->device, "unable to allocate receive "
- "buffer of size %d", net_device->recv_buf_size);
- ret = -1;
+ netdev_err(ndev, "unable to allocate receive "
+ "buffer of size %d\n", net_device->recv_buf_size);
+ ret = -ENOMEM;
goto cleanup;
}
@@ -241,8 +181,8 @@ static int netvsc_init_recv_buf(struct hv_device *device)
net_device->recv_buf_size,
&net_device->recv_buf_gpadl_handle);
if (ret != 0) {
- dev_err(&device->device,
- "unable to establish receive buffer's gpadl");
+ netdev_err(ndev,
+ "unable to establish receive buffer's gpadl\n");
goto cleanup;
}
@@ -265,8 +205,8 @@ static int netvsc_init_recv_buf(struct hv_device *device)
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
- dev_err(&device->device,
- "unable to send receive buffer's gpadl to netvsp");
+ netdev_err(ndev,
+ "unable to send receive buffer's gpadl to netvsp\n");
goto cleanup;
}
@@ -277,11 +217,11 @@ static int netvsc_init_recv_buf(struct hv_device *device)
/* Check the response */
if (init_packet->msg.v1_msg.
send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
- dev_err(&device->device, "Unable to complete receive buffer "
- "initialzation with NetVsp - status %d",
+ netdev_err(ndev, "Unable to complete receive buffer "
+ "initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
send_recv_buf_complete.status);
- ret = -1;
+ ret = -EINVAL;
goto cleanup;
}
@@ -293,7 +233,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
net_device->recv_section = kmalloc(net_device->recv_section_cnt
* sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
if (net_device->recv_section == NULL) {
- ret = -1;
+ ret = -EINVAL;
goto cleanup;
}
@@ -309,7 +249,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
*/
if (net_device->recv_section_cnt != 1 ||
net_device->recv_section->offset != 0) {
- ret = -1;
+ ret = -EINVAL;
goto cleanup;
}
@@ -319,7 +259,6 @@ cleanup:
netvsc_destroy_recv_buf(net_device);
exit:
- put_net_device(device);
return ret;
}
@@ -330,13 +269,12 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
+ struct net_device *ndev;
net_device = get_outbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "unable to get net device..."
- "device being destroyed?");
- return -1;
- }
+ if (!net_device)
+ return -ENODEV;
+ ndev = net_device->ndev;
init_packet = &net_device->channel_init_pkt;
@@ -366,13 +304,13 @@ static int netvsc_connect_vsp(struct hv_device *device)
if (init_packet->msg.init_msg.init_complete.status !=
NVSP_STAT_SUCCESS) {
- ret = -1;
+ ret = -EINVAL;
goto cleanup;
}
if (init_packet->msg.init_msg.init_complete.
negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
- ret = -1;
+ ret = -EPROTO;
goto cleanup;
}
/* Send the ndis version */
@@ -393,16 +331,13 @@ static int netvsc_connect_vsp(struct hv_device *device)
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VM_PKT_DATA_INBAND, 0);
- if (ret != 0) {
- ret = -1;
+ if (ret != 0)
goto cleanup;
- }
/* Post the big receive buffer to NetVSP */
ret = netvsc_init_recv_buf(device);
cleanup:
- put_net_device(device);
return ret;
}
@@ -418,29 +353,40 @@ int netvsc_device_remove(struct hv_device *device)
{
struct netvsc_device *net_device;
struct hv_netvsc_packet *netvsc_packet, *pos;
+ unsigned long flags;
- /* Stop outbound traffic ie sends and receives completions */
- net_device = release_outbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "No net device present!!");
- return -1;
- }
+ net_device = hv_get_drvdata(device);
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ net_device->destroy = true;
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Wait for all send completions */
while (atomic_read(&net_device->num_outstanding_sends)) {
- dev_err(&device->device,
- "waiting for %d requests to complete...",
+ dev_info(&device->device,
+ "waiting for %d requests to complete...\n",
atomic_read(&net_device->num_outstanding_sends));
udelay(100);
}
netvsc_disconnect_vsp(net_device);
- /* Stop inbound traffic ie receives and sends completions */
- net_device = release_inbound_net_device(device);
+ /*
+ * Since we have already drained, we don't need to busy wait
+ * as was done in final_release_stor_device()
+ * Note that we cannot set the ext pointer to NULL until
+ * we have drained - to drain the outgoing packets, we need to
+ * allow incoming packets.
+ */
+
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ hv_set_drvdata(device, NULL);
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
- /* At this point, no one should be accessing netDevice except in here */
- dev_notice(&device->device, "net device safe to remove");
+ /*
+ * At this point, no one should be accessing net_device
+ * except in here
+ */
+ dev_notice(&device->device, "net device safe to remove\n");
/* Now, we can close the channel safely */
vmbus_close(device->channel);
@@ -452,7 +398,7 @@ int netvsc_device_remove(struct hv_device *device)
kfree(netvsc_packet);
}
- free_net_device(net_device);
+ kfree(net_device);
return 0;
}
@@ -462,13 +408,12 @@ static void netvsc_send_completion(struct hv_device *device,
struct netvsc_device *net_device;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
+ struct net_device *ndev;
net_device = get_inbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "unable to get net device..."
- "device being destroyed?");
+ if (!net_device)
return;
- }
+ ndev = net_device->ndev;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
(packet->offset8 << 3));
@@ -494,11 +439,10 @@ static void netvsc_send_completion(struct hv_device *device,
atomic_dec(&net_device->num_outstanding_sends);
} else {
- dev_err(&device->device, "Unknown send completion packet type- "
- "%d received!!", nvsp_packet->hdr.msg_type);
+ netdev_err(ndev, "Unknown send completion packet type- "
+ "%d received!!\n", nvsp_packet->hdr.msg_type);
}
- put_net_device(device);
}
int netvsc_send(struct hv_device *device,
@@ -506,15 +450,13 @@ int netvsc_send(struct hv_device *device,
{
struct netvsc_device *net_device;
int ret = 0;
-
struct nvsp_message sendMessage;
+ struct net_device *ndev;
net_device = get_outbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "net device (%p) shutting down..."
- "ignoring outbound packets", net_device);
- return -2;
- }
+ if (!net_device)
+ return -ENODEV;
+ ndev = net_device->ndev;
sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (packet->is_data_pkt) {
@@ -547,11 +489,11 @@ int netvsc_send(struct hv_device *device,
}
if (ret != 0)
- dev_err(&device->device, "Unable to send packet %p ret %d",
+ netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
+ else
+ atomic_inc(&net_device->num_outstanding_sends);
- atomic_inc(&net_device->num_outstanding_sends);
- put_net_device(device);
return ret;
}
@@ -561,6 +503,10 @@ static void netvsc_send_recv_completion(struct hv_device *device,
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
+ struct net_device *ndev;
+ struct netvsc_device *net_device = hv_get_drvdata(device);
+
+ ndev = net_device->ndev;
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
@@ -577,23 +523,23 @@ retry_send_cmplt:
if (ret == 0) {
/* success */
/* no-op */
- } else if (ret == -1) {
+ } else if (ret == -EAGAIN) {
/* no more room...wait a bit and attempt to retry 3 times */
retries++;
- dev_err(&device->device, "unable to send receive completion pkt"
- " (tid %llx)...retrying %d", transaction_id, retries);
+ netdev_err(ndev, "unable to send receive completion pkt"
+ " (tid %llx)...retrying %d\n", transaction_id, retries);
if (retries < 4) {
udelay(100);
goto retry_send_cmplt;
} else {
- dev_err(&device->device, "unable to send receive "
- "completion pkt (tid %llx)...give up retrying",
+ netdev_err(ndev, "unable to send receive "
+ "completion pkt (tid %llx)...give up retrying\n",
transaction_id);
}
} else {
- dev_err(&device->device, "unable to send receive "
- "completion pkt - %llx", transaction_id);
+ netdev_err(ndev, "unable to send receive "
+ "completion pkt - %llx\n", transaction_id);
}
}
@@ -606,6 +552,7 @@ static void netvsc_receive_completion(void *context)
u64 transaction_id = 0;
bool fsend_receive_comp = false;
unsigned long flags;
+ struct net_device *ndev;
/*
* Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -613,11 +560,9 @@ static void netvsc_receive_completion(void *context)
* since we may have disable outbound traffic already.
*/
net_device = get_inbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "unable to get net device..."
- "device being destroyed?");
+ if (!net_device)
return;
- }
+ ndev = net_device->ndev;
/* Overloading use of the lock. */
spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
@@ -644,7 +589,6 @@ static void netvsc_receive_completion(void *context)
if (fsend_receive_comp)
netvsc_send_recv_completion(device, transaction_id);
- put_net_device(device);
}
static void netvsc_receive(struct hv_device *device,
@@ -661,24 +605,22 @@ static void netvsc_receive(struct hv_device *device,
int i, j;
int count = 0, bytes_remain = 0;
unsigned long flags;
+ struct net_device *ndev;
LIST_HEAD(listHead);
net_device = get_inbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "unable to get net device..."
- "device being destroyed?");
+ if (!net_device)
return;
- }
+ ndev = net_device->ndev;
/*
* All inbound packets other than send completion should be xfer page
* packet
*/
if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
- dev_err(&device->device, "Unknown packet type received - %d",
+ netdev_err(ndev, "Unknown packet type received - %d\n",
packet->type);
- put_net_device(device);
return;
}
@@ -688,19 +630,17 @@ static void netvsc_receive(struct hv_device *device,
/* Make sure this is a valid nvsp packet */
if (nvsp_packet->hdr.msg_type !=
NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
- dev_err(&device->device, "Unknown nvsp packet type received-"
- " %d", nvsp_packet->hdr.msg_type);
- put_net_device(device);
+ netdev_err(ndev, "Unknown nvsp packet type received-"
+ " %d\n", nvsp_packet->hdr.msg_type);
return;
}
vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
- dev_err(&device->device, "Invalid xfer page set id - "
- "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
+ netdev_err(ndev, "Invalid xfer page set id - "
+ "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
vmxferpage_packet->xfer_pageset_id);
- put_net_device(device);
return;
}
@@ -724,8 +664,8 @@ static void netvsc_receive(struct hv_device *device,
* some of the xfer page packet ranges...
*/
if (count < 2) {
- dev_err(&device->device, "Got only %d netvsc pkt...needed "
- "%d pkts. Dropping this xfer page packet completely!",
+ netdev_err(ndev, "Got only %d netvsc pkt...needed "
+ "%d pkts. Dropping this xfer page packet completely!\n",
count, vmxferpage_packet->range_cnt + 1);
/* Return it to the freelist */
@@ -740,7 +680,6 @@ static void netvsc_receive(struct hv_device *device,
netvsc_send_recv_completion(device,
vmxferpage_packet->d.trans_id);
- put_net_device(device);
return;
}
@@ -752,8 +691,8 @@ static void netvsc_receive(struct hv_device *device,
xferpage_packet->count = count - 1;
if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
- dev_err(&device->device, "Needed %d netvsc pkts to satisy "
- "this xfer page...got %d",
+ netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
+ "this xfer page...got %d\n",
vmxferpage_packet->range_cnt, xferpage_packet->count);
}
@@ -828,7 +767,6 @@ static void netvsc_receive(struct hv_device *device,
completion.recv.recv_completion_ctx);
}
- put_net_device(device);
}
static void netvsc_channel_cb(void *context)
@@ -842,6 +780,7 @@ static void netvsc_channel_cb(void *context)
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
+ struct net_device *ndev;
packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
GFP_ATOMIC);
@@ -850,11 +789,9 @@ static void netvsc_channel_cb(void *context)
buffer = packet;
net_device = get_inbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "net device (%p) shutting down..."
- "ignoring inbound packets", net_device);
+ if (!net_device)
goto out;
- }
+ ndev = net_device->ndev;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -872,7 +809,7 @@ static void netvsc_channel_cb(void *context)
break;
default:
- dev_err(&device->device,
+ netdev_err(ndev,
"unhandled packet type %d, "
"tid %llx len %d\n",
desc->type, request_id,
@@ -896,14 +833,14 @@ static void netvsc_channel_cb(void *context)
break;
}
- } else if (ret == -2) {
+ } else if (ret == -ENOBUFS) {
/* Handle large packet */
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
/* Try again next time around */
- dev_err(&device->device,
+ netdev_err(ndev,
"unable to allocate buffer of size "
- "(%d)!!", bytes_recvd);
+ "(%d)!!\n", bytes_recvd);
break;
}
@@ -911,7 +848,6 @@ static void netvsc_channel_cb(void *context)
}
} while (1);
- put_net_device(device);
out:
kfree(buffer);
return;
@@ -929,13 +865,23 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
((struct netvsc_device_info *)additional_info)->ring_size;
struct netvsc_device *net_device;
struct hv_netvsc_packet *packet, *pos;
+ struct net_device *ndev;
net_device = alloc_net_device(device);
if (!net_device) {
- ret = -1;
+ ret = -ENOMEM;
goto cleanup;
}
+ /*
+ * Coming into this function, struct net_device * is
+ * registered as the driver private data.
+ * In alloc_net_device(), we register struct netvsc_device *
+ * as the driver private data and stash away struct net_device *
+ * in struct netvsc_device *.
+ */
+ ndev = net_device->ndev;
+
/* Initialize the NetVSC channel extension */
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
spin_lock_init(&net_device->recv_pkt_list_lock);
@@ -960,20 +906,18 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
netvsc_channel_cb, device);
if (ret != 0) {
- dev_err(&device->device, "unable to open channel: %d", ret);
- ret = -1;
+ netdev_err(ndev, "unable to open channel: %d\n", ret);
goto cleanup;
}
/* Channel is opened */
- pr_info("hv_netvsc channel opened successfully");
+ pr_info("hv_netvsc channel opened successfully\n");
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
if (ret != 0) {
- dev_err(&device->device,
- "unable to connect to NetVSP - %d", ret);
- ret = -1;
+ netdev_err(ndev,
+ "unable to connect to NetVSP - %d\n", ret);
goto close;
}
@@ -993,23 +937,8 @@ cleanup:
kfree(packet);
}
- release_outbound_net_device(device);
- release_inbound_net_device(device);
-
- free_net_device(net_device);
+ kfree(net_device);
}
return ret;
}
-
-/*
- * netvsc_initialize - Main entry point
- */
-int netvsc_initialize(struct hv_driver *drv)
-{
-
- drv->name = driver_name;
- memcpy(&drv->dev_type, &netvsc_device_type, sizeof(struct hv_guid));
-
- return 0;
-}
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 61989f0d9f0..93b0e91cbf9 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -33,14 +33,11 @@
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/slab.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
-#include "hyperv.h"
#include "hyperv_net.h"
struct net_device_context {
@@ -72,20 +69,15 @@ static int netvsc_open(struct net_device *net)
struct hv_device *device_obj = net_device_ctx->device_ctx;
int ret = 0;
- if (netif_carrier_ok(net)) {
- /* Open up the device */
- ret = rndis_filter_open(device_obj);
- if (ret != 0) {
- netdev_err(net, "unable to open device (ret %d).\n",
- ret);
- return ret;
- }
-
- netif_start_queue(net);
- } else {
- netdev_err(net, "unable to open device...link is down.\n");
+ /* Open up the device */
+ ret = rndis_filter_open(device_obj);
+ if (ret != 0) {
+ netdev_err(net, "unable to open device (ret %d).\n", ret);
+ return ret;
}
+ netif_start_queue(net);
+
return ret;
}
@@ -143,12 +135,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
(num_pages * sizeof(struct hv_page_buffer)) +
sizeof(struct rndis_filter_packet), GFP_ATOMIC);
if (!packet) {
- /* out of memory, silently drop packet */
+ /* out of memory, drop packet */
netdev_err(net, "unable to allocate hv_netvsc_packet\n");
dev_kfree_skb(skb);
net->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ return NETDEV_TX_BUSY;
}
packet->extension = (void *)(unsigned long)packet +
@@ -169,11 +161,11 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- packet->page_buf[i+2].pfn = page_to_pfn(f->page);
+ packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
packet->page_buf[i+2].offset = f->page_offset;
- packet->page_buf[i+2].len = f->size;
+ packet->page_buf[i+2].len = skb_frag_size(f);
}
/* Set the completion routine */
@@ -193,10 +185,11 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
} else {
/* we are shutting down or bus overloaded, just drop packet */
net->stats.tx_dropped++;
- netvsc_xmit_completion(packet);
+ kfree(packet);
+ dev_kfree_skb_any(skb);
}
- return NETDEV_TX_OK;
+ return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
/*
@@ -205,8 +198,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
void netvsc_linkstatus_callback(struct hv_device *device_obj,
unsigned int status)
{
- struct net_device *net = dev_get_drvdata(&device_obj->device);
+ struct net_device *net;
struct net_device_context *ndev_ctx;
+ struct netvsc_device *net_device;
+
+ net_device = hv_get_drvdata(device_obj);
+ net = net_device->ndev;
if (!net) {
netdev_err(net, "got link status but net device "
@@ -217,8 +214,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (status == 1) {
netif_carrier_on(net);
netif_wake_queue(net);
- netif_notify_peers(net);
ndev_ctx = netdev_priv(net);
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
@@ -238,6 +235,10 @@ int netvsc_recv_callback(struct hv_device *device_obj,
void *data;
int i;
unsigned long flags;
+ struct netvsc_device *net_device;
+
+ net_device = hv_get_drvdata(device_obj);
+ net = net_device->ndev;
if (!net) {
netdev_err(net, "got receive callback but net device"
@@ -307,7 +308,7 @@ static const struct net_device_ops device_ops = {
.ndo_open = netvsc_open,
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
- .ndo_set_multicast_list = netvsc_set_multicast_list,
+ .ndo_set_rx_mode = netvsc_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -324,14 +325,17 @@ static void netvsc_send_garp(struct work_struct *w)
{
struct net_device_context *ndev_ctx;
struct net_device *net;
+ struct netvsc_device *net_device;
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
- net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
+ net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ net = net_device->ndev;
netif_notify_peers(net);
}
-static int netvsc_probe(struct hv_device *dev)
+static int netvsc_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
{
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
@@ -340,7 +344,7 @@ static int netvsc_probe(struct hv_device *dev)
net = alloc_etherdev(sizeof(struct net_device_context));
if (!net)
- return -1;
+ return -ENOMEM;
/* Set initial state */
netif_carrier_off(net);
@@ -348,24 +352,9 @@ static int netvsc_probe(struct hv_device *dev)
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
atomic_set(&net_device_ctx->avail, ring_size);
- dev_set_drvdata(&dev->device, net);
+ hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
- /* Notify the netvsc driver of the new device */
- device_info.ring_size = ring_size;
- ret = rndis_filter_device_add(dev, &device_info);
- if (ret != 0) {
- free_netdev(net);
- dev_set_drvdata(&dev->device, NULL);
-
- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- return ret;
- }
-
- netif_carrier_on(net);
-
- memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
-
net->netdev_ops = &device_ops;
/* TODO: Add GSO and Checksum offload */
@@ -377,18 +366,37 @@ static int netvsc_probe(struct hv_device *dev)
ret = register_netdev(net);
if (ret != 0) {
- /* Remove the device and release the resource */
- rndis_filter_device_remove(dev);
+ pr_err("Unable to register netdev.\n");
+ free_netdev(net);
+ goto out;
+ }
+
+ /* Notify the netvsc driver of the new device */
+ device_info.ring_size = ring_size;
+ ret = rndis_filter_device_add(dev, &device_info);
+ if (ret != 0) {
+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+ unregister_netdev(net);
free_netdev(net);
+ hv_set_drvdata(dev, NULL);
+ return ret;
}
+ memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+
+ netif_carrier_on(net);
+out:
return ret;
}
static int netvsc_remove(struct hv_device *dev)
{
- struct net_device *net = dev_get_drvdata(&dev->device);
+ struct net_device *net;
struct net_device_context *ndev_ctx;
+ struct netvsc_device *net_device;
+
+ net_device = hv_get_drvdata(dev);
+ net = net_device->ndev;
if (net == NULL) {
dev_err(&dev->device, "No net device to remove\n");
@@ -413,61 +421,33 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Network guid */
+ { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
+ 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
/* The one and only one */
static struct hv_driver netvsc_drv = {
+ .name = "netvsc",
+ .id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
};
static void __exit netvsc_drv_exit(void)
{
- vmbus_child_driver_unregister(&netvsc_drv.driver);
+ vmbus_driver_unregister(&netvsc_drv);
}
-
-static const struct dmi_system_id __initconst
-hv_netvsc_dmi_table[] __maybe_unused = {
- {
- .ident = "Hyper-V",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
- },
- },
- { },
-};
-MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
-
static int __init netvsc_drv_init(void)
{
- struct hv_driver *drv = &netvsc_drv;
- int ret;
-
- pr_info("initializing....");
-
- if (!dmi_check_system(hv_netvsc_dmi_table))
- return -ENODEV;
-
-
- /* Callback to client driver to complete the initialization */
- netvsc_initialize(drv);
-
- drv->driver.name = drv->name;
-
- /* The driver belongs to vmbus */
- ret = vmbus_child_driver_register(&drv->driver);
-
- return ret;
+ return vmbus_driver_register(&netvsc_drv);
}
-static const struct pci_device_id __initconst
-hv_netvsc_pci_table[] __maybe_unused = {
- { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, hv_netvsc_pci_table);
-
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
index dbb52019975..bafccb36004 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/staging/hv/rndis_filter.c
@@ -27,7 +27,6 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
-#include "hyperv.h"
#include "hyperv_net.h"
@@ -42,7 +41,7 @@ struct rndis_device {
struct netvsc_device *net_dev;
enum rndis_device_state state;
- u32 link_stat;
+ bool link_state;
atomic_t new_req_id;
spinlock_t request_lock;
@@ -142,7 +141,11 @@ static void put_rndis_request(struct rndis_device *dev,
static void dump_rndis_message(struct hv_device *hv_dev,
struct rndis_message *rndis_msg)
{
- struct net_device *netdev = dev_get_drvdata(&hv_dev->device);
+ struct net_device *netdev;
+ struct netvsc_device *net_device;
+
+ net_device = hv_get_drvdata(hv_dev);
+ netdev = net_device->ndev;
switch (rndis_msg->ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
@@ -250,6 +253,9 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_request *request = NULL;
bool found = false;
unsigned long flags;
+ struct net_device *ndev;
+
+ ndev = dev->net_dev->ndev;
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -270,7 +276,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
memcpy(&request->response_msg, resp,
resp->msg_len);
} else {
- dev_err(&dev->net_dev->dev->device,
+ netdev_err(ndev,
"rndis response buffer overflow "
"detected (size %u max %zu)\n",
resp->msg_len,
@@ -290,7 +296,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
complete(&request->wait_event);
} else {
- dev_err(&dev->net_dev->dev->device,
+ netdev_err(ndev,
"no rndis request found for this response "
"(id 0x%x res type 0x%x)\n",
resp->msg.init_complete.req_id,
@@ -323,6 +329,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
+ int i;
rndis_pkt = &msg->msg.pkt;
@@ -338,6 +345,15 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->page_buf[0].offset += data_offset;
pkt->page_buf[0].len -= data_offset;
+ /* Drop the 0th page, if rndis data go beyond page boundary */
+ if (pkt->page_buf[0].offset >= PAGE_SIZE) {
+ pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
+ pkt->page_buf[1].len -= pkt->page_buf[1].offset;
+ pkt->page_buf_cnt--;
+ for (i = 0; i < pkt->page_buf_cnt; i++)
+ pkt->page_buf[i] = pkt->page_buf[i+1];
+ }
+
pkt->is_data_pkt = true;
netvsc_recv_callback(dev->net_dev->dev, pkt);
@@ -346,26 +362,29 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
int rndis_filter_receive(struct hv_device *dev,
struct hv_netvsc_packet *pkt)
{
- struct netvsc_device *net_dev = dev->ext;
+ struct netvsc_device *net_dev = hv_get_drvdata(dev);
struct rndis_device *rndis_dev;
struct rndis_message rndis_msg;
struct rndis_message *rndis_hdr;
+ struct net_device *ndev;
if (!net_dev)
return -EINVAL;
+ ndev = net_dev->ndev;
+
/* Make sure the rndis device state is initialized */
if (!net_dev->extension) {
- dev_err(&dev->device, "got rndis message but no rndis device - "
+ netdev_err(ndev, "got rndis message but no rndis device - "
"dropping this message!\n");
- return -1;
+ return -ENODEV;
}
rndis_dev = (struct rndis_device *)net_dev->extension;
if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
- dev_err(&dev->device, "got rndis message but rndis device "
+ netdev_err(ndev, "got rndis message but rndis device "
"uninitialized...dropping this message!\n");
- return -1;
+ return -ENODEV;
}
rndis_hdr = (struct rndis_message *)kmap_atomic(
@@ -377,7 +396,7 @@ int rndis_filter_receive(struct hv_device *dev,
/* Make sure we got a valid rndis message */
if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
(rndis_hdr->msg_len > sizeof(struct rndis_message))) {
- dev_err(&dev->device, "incoming rndis message buffer overflow "
+ netdev_err(ndev, "incoming rndis message buffer overflow "
"detected (got %u, max %zu)..marking it an error!\n",
rndis_hdr->msg_len,
sizeof(struct rndis_message));
@@ -410,7 +429,7 @@ int rndis_filter_receive(struct hv_device *dev,
rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
break;
default:
- dev_err(&dev->device,
+ netdev_err(ndev,
"unhandled rndis message (type %u len %u)\n",
rndis_msg.ndis_msg_type,
rndis_msg.msg_len);
@@ -437,8 +456,8 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_query_request));
if (!request) {
- ret = -1;
- goto Cleanup;
+ ret = -ENOMEM;
+ goto cleanup;
}
/* Setup the rndis query */
@@ -450,12 +469,12 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
ret = rndis_filter_send_request(dev, request);
if (ret != 0)
- goto Cleanup;
+ goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto Cleanup;
+ goto cleanup;
}
/* Copy the response back */
@@ -463,7 +482,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
if (query_complete->info_buflen > inresult_size) {
ret = -1;
- goto Cleanup;
+ goto cleanup;
}
memcpy(result,
@@ -473,7 +492,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
*result_size = query_complete->info_buflen;
-Cleanup:
+cleanup:
if (request)
put_rndis_request(dev, request);
@@ -492,10 +511,15 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
+ u32 link_status;
+ int ret;
- return rndis_filter_query_device(dev,
+ ret = rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
- &dev->link_stat, &size);
+ &link_status, &size);
+ dev->link_state = (link_status != 0) ? true : false;
+
+ return ret;
}
static int rndis_filter_set_packet_filter(struct rndis_device *dev,
@@ -506,13 +530,16 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
struct rndis_set_complete *set_complete;
u32 status;
int ret, t;
+ struct net_device *ndev;
+
+ ndev = dev->net_dev->ndev;
request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
sizeof(u32));
if (!request) {
- ret = -1;
- goto Cleanup;
+ ret = -ENOMEM;
+ goto cleanup;
}
/* Setup the rndis set */
@@ -526,30 +553,27 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
ret = rndis_filter_send_request(dev, request);
if (ret != 0)
- goto Cleanup;
+ goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
- ret = -1;
- dev_err(&dev->net_dev->dev->device,
+ netdev_err(ndev,
"timeout before we got a set response...\n");
/*
* We can't deallocate the request since we may still receive a
* send completion for it.
*/
- goto Exit;
+ goto exit;
} else {
- if (ret > 0)
- ret = 0;
set_complete = &request->response_msg.msg.set_complete;
status = set_complete->status;
}
-Cleanup:
+cleanup:
if (request)
put_rndis_request(dev, request);
-Exit:
+exit:
return ret;
}
@@ -565,8 +589,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
if (!request) {
- ret = -1;
- goto Cleanup;
+ ret = -ENOMEM;
+ goto cleanup;
}
/* Setup the rndis set */
@@ -581,7 +605,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
ret = rndis_filter_send_request(dev, request);
if (ret != 0) {
dev->state = RNDIS_DEV_UNINITIALIZED;
- goto Cleanup;
+ goto cleanup;
}
@@ -589,7 +613,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
if (t == 0) {
ret = -ETIMEDOUT;
- goto Cleanup;
+ goto cleanup;
}
init_complete = &request->response_msg.msg.init_complete;
@@ -599,10 +623,10 @@ static int rndis_filter_init_device(struct rndis_device *dev)
ret = 0;
} else {
dev->state = RNDIS_DEV_UNINITIALIZED;
- ret = -1;
+ ret = -EINVAL;
}
-Cleanup:
+cleanup:
if (request)
put_rndis_request(dev, request);
@@ -618,7 +642,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
if (!request)
- goto Cleanup;
+ goto cleanup;
/* Setup the rndis set */
halt = &request->request_msg.msg.halt_req;
@@ -629,7 +653,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
dev->state = RNDIS_DEV_UNINITIALIZED;
-Cleanup:
+cleanup:
if (request)
put_rndis_request(dev, request);
return;
@@ -670,13 +694,13 @@ int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
int ret;
- struct netvsc_device *netDevice;
- struct rndis_device *rndisDevice;
- struct netvsc_device_info *deviceInfo = additional_info;
+ struct netvsc_device *net_device;
+ struct rndis_device *rndis_device;
+ struct netvsc_device_info *device_info = additional_info;
- rndisDevice = get_rndis_device();
- if (!rndisDevice)
- return -1;
+ rndis_device = get_rndis_device();
+ if (!rndis_device)
+ return -ENODEV;
/*
* Let the inner driver handle this first to create the netvsc channel
@@ -685,19 +709,19 @@ int rndis_filter_device_add(struct hv_device *dev,
*/
ret = netvsc_device_add(dev, additional_info);
if (ret != 0) {
- kfree(rndisDevice);
+ kfree(rndis_device);
return ret;
}
/* Initialize the rndis device */
- netDevice = dev->ext;
+ net_device = hv_get_drvdata(dev);
- netDevice->extension = rndisDevice;
- rndisDevice->net_dev = netDevice;
+ net_device->extension = rndis_device;
+ rndis_device->net_dev = net_device;
/* Send the rndis initialization message */
- ret = rndis_filter_init_device(rndisDevice);
+ ret = rndis_filter_init_device(rndis_device);
if (ret != 0) {
/*
* TODO: If rndis init failed, we will need to shut down the
@@ -706,29 +730,29 @@ int rndis_filter_device_add(struct hv_device *dev,
}
/* Get the mac address */
- ret = rndis_filter_query_device_mac(rndisDevice);
+ ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
/*
* TODO: shutdown rndis device and the channel
*/
}
- memcpy(deviceInfo->mac_adr, rndisDevice->hw_mac_adr, ETH_ALEN);
+ memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
- rndis_filter_query_device_link_status(rndisDevice);
+ rndis_filter_query_device_link_status(rndis_device);
- deviceInfo->link_state = rndisDevice->link_stat;
+ device_info->link_state = rndis_device->link_state;
- dev_info(&dev->device, "Device MAC %pM link state %s",
- rndisDevice->hw_mac_adr,
- ((deviceInfo->link_state) ? ("down\n") : ("up\n")));
+ dev_info(&dev->device, "Device MAC %pM link state %s\n",
+ rndis_device->hw_mac_adr,
+ device_info->link_state ? "down" : "up");
return ret;
}
void rndis_filter_device_remove(struct hv_device *dev)
{
- struct netvsc_device *net_dev = dev->ext;
+ struct netvsc_device *net_dev = hv_get_drvdata(dev);
struct rndis_device *rndis_dev = net_dev->extension;
/* Halt and release the rndis device */
@@ -743,17 +767,17 @@ void rndis_filter_device_remove(struct hv_device *dev)
int rndis_filter_open(struct hv_device *dev)
{
- struct netvsc_device *netDevice = dev->ext;
+ struct netvsc_device *net_device = hv_get_drvdata(dev);
- if (!netDevice)
+ if (!net_device)
return -EINVAL;
- return rndis_filter_open_device(netDevice->extension);
+ return rndis_filter_open_device(net_device->extension);
}
int rndis_filter_close(struct hv_device *dev)
{
- struct netvsc_device *netDevice = dev->ext;
+ struct netvsc_device *netDevice = hv_get_drvdata(dev);
if (!netDevice)
return -EINVAL;
diff --git a/drivers/staging/hv/storvsc.c b/drivers/staging/hv/storvsc.c
deleted file mode 100644
index 30297861194..00000000000
--- a/drivers/staging/hv/storvsc.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- * K. Y. Srinivasan <kys@microsoft.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-
-#include "hyperv.h"
-#include "hyperv_storage.h"
-
-
-static inline struct storvsc_device *alloc_stor_device(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
- if (!stor_device)
- return NULL;
-
- /* Set to 2 to allow both inbound and outbound traffics */
- /* (ie get_stor_device() and must_get_stor_device()) to proceed. */
- atomic_cmpxchg(&stor_device->ref_count, 0, 2);
-
- init_waitqueue_head(&stor_device->waiting_to_drain);
- stor_device->device = device;
- device->ext = stor_device;
-
- return stor_device;
-}
-
-static inline void free_stor_device(struct storvsc_device *device)
-{
- kfree(device);
-}
-
-/* Get the stordevice object iff exists and its refcount > 0 */
-static inline struct storvsc_device *must_get_stor_device(
- struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = (struct storvsc_device *)device->ext;
- if (stor_device && atomic_read(&stor_device->ref_count))
- atomic_inc(&stor_device->ref_count);
- else
- stor_device = NULL;
-
- return stor_device;
-}
-
-/* Drop ref count to 1 to effectively disable get_stor_device() */
-static inline struct storvsc_device *release_stor_device(
- struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = (struct storvsc_device *)device->ext;
-
- /* Busy wait until the ref drop to 2, then set it to 1 */
- while (atomic_cmpxchg(&stor_device->ref_count, 2, 1) != 2)
- udelay(100);
-
- return stor_device;
-}
-
-/* Drop ref count to 0. No one can use stor_device object. */
-static inline struct storvsc_device *final_release_stor_device(
- struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = (struct storvsc_device *)device->ext;
-
- /* Busy wait until the ref drop to 1, then set it to 0 */
- while (atomic_cmpxchg(&stor_device->ref_count, 1, 0) != 1)
- udelay(100);
-
- device->ext = NULL;
- return stor_device;
-}
-
-static int storvsc_channel_init(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
- struct vstor_packet *vstor_packet;
- int ret, t;
-
- stor_device = get_stor_device(device);
- if (!stor_device)
- return -1;
-
- request = &stor_device->init_request;
- vstor_packet = &request->vstor_packet;
-
- /*
- * Now, initiate the vsc/vsp initialization protocol on the open
- * channel
- */
- memset(request, 0, sizeof(struct hv_storvsc_request));
- init_completion(&request->wait_event);
- vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-
- DPRINT_INFO(STORVSC, "BEGIN_INITIALIZATION_OPERATION...");
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
- DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION...");
-
- /* reuse the packet for version range supported */
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-
- vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
- FILL_VMSTOR_REVISION(vstor_packet->version.revision);
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- /* TODO: Check returned version */
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
- /* Query channel properties */
- DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION...");
-
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->storage_channel_properties.port_number =
- stor_device->port_number;
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- /* TODO: Check returned version */
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
- stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
- stor_device->target_id
- = vstor_packet->storage_channel_properties.target_id;
-
- DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION...");
-
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
- DPRINT_INFO(STORVSC, "**** storage channel up and running!! ****");
-
-cleanup:
- put_stor_device(device);
- return ret;
-}
-
-static void storvsc_on_io_completion(struct hv_device *device,
- struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
-{
- struct storvsc_device *stor_device;
- struct vstor_packet *stor_pkt;
-
- stor_device = must_get_stor_device(device);
- if (!stor_device)
- return;
-
- stor_pkt = &request->vstor_packet;
-
-
- /* Copy over the status...etc */
- stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
- stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
- stor_pkt->vm_srb.sense_info_length =
- vstor_packet->vm_srb.sense_info_length;
-
- if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != 1){
- DPRINT_WARN(STORVSC,
- "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
- stor_pkt->vm_srb.cdb[0],
- vstor_packet->vm_srb.scsi_status,
- vstor_packet->vm_srb.srb_status);
- }
-
- if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
- /* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status & 0x80) {
- /* autosense data available */
- DPRINT_WARN(STORVSC, "storvsc pkt %p autosense data "
- "valid - len %d\n", request,
- vstor_packet->vm_srb.sense_info_length);
-
- memcpy(request->sense_buffer,
- vstor_packet->vm_srb.sense_data,
- vstor_packet->vm_srb.sense_info_length);
-
- }
- }
-
- stor_pkt->vm_srb.data_transfer_length =
- vstor_packet->vm_srb.data_transfer_length;
-
- request->on_io_completion(request);
-
- if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
- stor_device->drain_notify)
- wake_up(&stor_device->waiting_to_drain);
-
-
- put_stor_device(device);
-}
-
-static void storvsc_on_receive(struct hv_device *device,
- struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
-{
- switch (vstor_packet->operation) {
- case VSTOR_OPERATION_COMPLETE_IO:
- storvsc_on_io_completion(device, vstor_packet, request);
- break;
- case VSTOR_OPERATION_REMOVE_DEVICE:
- DPRINT_INFO(STORVSC, "REMOVE_DEVICE_OPERATION");
- /* TODO: */
- break;
-
- default:
- DPRINT_INFO(STORVSC, "Unknown operation received - %d",
- vstor_packet->operation);
- break;
- }
-}
-
-static void storvsc_on_channel_callback(void *context)
-{
- struct hv_device *device = (struct hv_device *)context;
- struct storvsc_device *stor_device;
- u32 bytes_recvd;
- u64 request_id;
- unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
- struct hv_storvsc_request *request;
- int ret;
-
-
- stor_device = must_get_stor_device(device);
- if (!stor_device)
- return;
-
- do {
- ret = vmbus_recvpacket(device->channel, packet,
- ALIGN(sizeof(struct vstor_packet), 8),
- &bytes_recvd, &request_id);
- if (ret == 0 && bytes_recvd > 0) {
-
- request = (struct hv_storvsc_request *)
- (unsigned long)request_id;
-
- if ((request == &stor_device->init_request) ||
- (request == &stor_device->reset_request)) {
-
- memcpy(&request->vstor_packet, packet,
- sizeof(struct vstor_packet));
- complete(&request->wait_event);
- } else {
- storvsc_on_receive(device,
- (struct vstor_packet *)packet,
- request);
- }
- } else {
- break;
- }
- } while (1);
-
- put_stor_device(device);
- return;
-}
-
-static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
-{
- struct vmstorage_channel_properties props;
- int ret;
-
- memset(&props, 0, sizeof(struct vmstorage_channel_properties));
-
- /* Open the channel */
- ret = vmbus_open(device->channel,
- ring_size,
- ring_size,
- (void *)&props,
- sizeof(struct vmstorage_channel_properties),
- storvsc_on_channel_callback, device);
-
- if (ret != 0)
- return -1;
-
- ret = storvsc_channel_init(device);
-
- return ret;
-}
-
-int storvsc_dev_add(struct hv_device *device,
- void *additional_info)
-{
- struct storvsc_device *stor_device;
- struct storvsc_device_info *device_info;
- int ret = 0;
-
- device_info = (struct storvsc_device_info *)additional_info;
- stor_device = alloc_stor_device(device);
- if (!stor_device) {
- ret = -1;
- goto cleanup;
- }
-
- /* Save the channel properties to our storvsc channel */
-
- /* FIXME: */
- /*
- * If we support more than 1 scsi channel, we need to set the
- * port number here to the scsi channel but how do we get the
- * scsi channel prior to the bus scan
- */
-
- stor_device->port_number = device_info->port_number;
- /* Send it back up */
- ret = storvsc_connect_to_vsp(device, device_info->ring_buffer_size);
-
- device_info->path_id = stor_device->path_id;
- device_info->target_id = stor_device->target_id;
-
-cleanup:
- return ret;
-}
-
-int storvsc_dev_remove(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- DPRINT_INFO(STORVSC, "disabling storage device (%p)...",
- device->ext);
-
- stor_device = release_stor_device(device);
-
- /*
- * At this point, all outbound traffic should be disable. We
- * only allow inbound traffic (responses) to proceed so that
- * outstanding requests can be completed.
- */
-
- storvsc_wait_to_drain(stor_device);
-
- stor_device = final_release_stor_device(device);
-
- /* Close the channel */
- vmbus_close(device->channel);
-
- free_stor_device(stor_device);
- return 0;
-}
-
-int storvsc_do_io(struct hv_device *device,
- struct hv_storvsc_request *request)
-{
- struct storvsc_device *stor_device;
- struct vstor_packet *vstor_packet;
- int ret = 0;
-
- vstor_packet = &request->vstor_packet;
- stor_device = get_stor_device(device);
-
- if (!stor_device)
- return -2;
-
-
- request->device = device;
-
-
- vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
-
- vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
-
-
- vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
-
-
- vstor_packet->vm_srb.data_transfer_length =
- request->data_buffer.len;
-
- vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
-
- if (request->data_buffer.len) {
- ret = vmbus_sendpacket_multipagebuffer(device->channel,
- &request->data_buffer,
- vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request);
- } else {
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- }
-
- if (ret != 0)
- return ret;
-
- atomic_inc(&stor_device->num_outstanding_req);
-
- put_stor_device(device);
- return ret;
-}
-
-/*
- * The channel properties uniquely specify how the device is to be
- * presented to the guest. Map this information for use by the block
- * driver. For Linux guests on Hyper-V, we emulate a scsi HBA in the guest
- * (storvsc_drv) and so scsi devices in the guest are handled by
- * native upper level Linux drivers. Consequently, Hyper-V
- * block driver, while being a generic block driver, presently does not
- * deal with anything other than devices that would need to be presented
- * to the guest as an IDE disk.
- *
- * This function maps the channel properties as embedded in the input
- * parameter device_info onto information necessary to register the
- * corresponding block device.
- *
- * Currently, there is no way to stop the emulation of the block device
- * on the host side. And so, to prevent the native IDE drivers in Linux
- * from taking over these devices (to be managedby Hyper-V block
- * driver), we will take over if need be the major of the IDE controllers.
- *
- */
-
-int storvsc_get_major_info(struct storvsc_device_info *device_info,
- struct storvsc_major_info *major_info)
-{
- static bool ide0_registered;
- static bool ide1_registered;
-
- /*
- * For now we only support IDE disks.
- */
- major_info->devname = "ide";
- major_info->diskname = "hd";
-
- if (device_info->path_id) {
- major_info->major = 22;
- if (!ide1_registered) {
- major_info->do_register = true;
- ide1_registered = true;
- } else
- major_info->do_register = false;
-
- if (device_info->target_id)
- major_info->index = 3;
- else
- major_info->index = 2;
-
- return 0;
- } else {
- major_info->major = 3;
- if (!ide0_registered) {
- major_info->do_register = true;
- ide0_registered = true;
- } else
- major_info->do_register = false;
-
- if (device_info->target_id)
- major_info->index = 1;
- else
- major_info->index = 0;
-
- return 0;
- }
-
- return -ENODEV;
-}
-
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 7effaf32e25..ae8c33e7849 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -19,12 +19,19 @@
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
*/
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/blkdev.h>
-#include <linux/dmi.h>
+#include <linux/hyperv.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -34,22 +41,247 @@
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_dbg.h>
-#include "hyperv.h"
-#include "hyperv_storage.h"
+#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
-static const char *driver_name = "storvsc";
+/* to alert the user that structure sizes may be mismatched even though the */
+/* protocol versions match. */
+
+
+#define REVISION_STRING(REVISION_) #REVISION_
+#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
+ do { \
+ char *revision_string \
+ = REVISION_STRING($Rev : 6 $) + 6; \
+ RESULT_LVALUE_ = 0; \
+ while (*revision_string >= '0' \
+ && *revision_string <= '9') { \
+ RESULT_LVALUE_ *= 10; \
+ RESULT_LVALUE_ += *revision_string - '0'; \
+ revision_string++; \
+ } \
+ } while (0)
+
+/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
+/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
+#define VMSTOR_PROTOCOL_MAJOR(VERSION_) (((VERSION_) >> 8) & 0xff)
+#define VMSTOR_PROTOCOL_MINOR(VERSION_) (((VERSION_)) & 0xff)
+#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
+ (((MINOR_) & 0xff)))
+#define VMSTOR_INVALID_PROTOCOL_VERSION (-1)
+
+/* Version history: */
+/* V1 Beta 0.1 */
+/* V1 RC < 2008/1/31 1.0 */
+/* V1 RC > 2008/1/31 2.0 */
+#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(2, 0)
+
+
+
+
+/* This will get replaced with the max transfer length that is possible on */
+/* the host adapter. */
+/* The max transfer length will be published when we offer a vmbus channel. */
+#define MAX_TRANSFER_LENGTH 0x40000
+#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
+ sizeof(struct vstor_packet) + \
+ sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
+
+
+/* Packet structure describing virtual storage requests. */
+enum vstor_packet_operation {
+ VSTOR_OPERATION_COMPLETE_IO = 1,
+ VSTOR_OPERATION_REMOVE_DEVICE = 2,
+ VSTOR_OPERATION_EXECUTE_SRB = 3,
+ VSTOR_OPERATION_RESET_LUN = 4,
+ VSTOR_OPERATION_RESET_ADAPTER = 5,
+ VSTOR_OPERATION_RESET_BUS = 6,
+ VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
+ VSTOR_OPERATION_END_INITIALIZATION = 8,
+ VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
+ VSTOR_OPERATION_QUERY_PROPERTIES = 10,
+ VSTOR_OPERATION_MAXIMUM = 10
+};
-/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
-static const struct hv_guid stor_vsci_device_type = {
- .data = {
- 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
- 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
- }
+/*
+ * Platform neutral description of a scsi request -
+ * this remains the same across the write regardless of 32/64 bit
+ * note: it's patterned off the SCSI_PASS_THROUGH structure
+ */
+#define CDB16GENERIC_LENGTH 0x10
+
+#ifndef SENSE_BUFFER_SIZE
+#define SENSE_BUFFER_SIZE 0x12
+#endif
+
+#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
+
+struct vmscsi_request {
+ unsigned short length;
+ unsigned char srb_status;
+ unsigned char scsi_status;
+
+ unsigned char port_number;
+ unsigned char path_id;
+ unsigned char target_id;
+ unsigned char lun;
+
+ unsigned char cdb_length;
+ unsigned char sense_info_length;
+ unsigned char data_in;
+ unsigned char reserved;
+
+ unsigned int data_transfer_length;
+
+ union {
+ unsigned char cdb[CDB16GENERIC_LENGTH];
+ unsigned char sense_data[SENSE_BUFFER_SIZE];
+ unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
+ };
+} __attribute((packed));
+
+
+/*
+ * This structure is sent during the intialization phase to get the different
+ * properties of the channel.
+ */
+struct vmstorage_channel_properties {
+ unsigned short protocol_version;
+ unsigned char path_id;
+ unsigned char target_id;
+
+ /* Note: port number is only really known on the client side */
+ unsigned int port_number;
+ unsigned int flags;
+ unsigned int max_transfer_bytes;
+
+ /* This id is unique for each channel and will correspond with */
+ /* vendor specific data in the inquirydata */
+ unsigned long long unique_id;
+} __packed;
+
+/* This structure is sent during the storage protocol negotiations. */
+struct vmstorage_protocol_version {
+ /* Major (MSW) and minor (LSW) version numbers. */
+ unsigned short major_minor;
+
+ /*
+ * Revision number is auto-incremented whenever this file is changed
+ * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
+ * definitely indicate incompatibility--but it does indicate mismatched
+ * builds.
+ */
+ unsigned short revision;
+} __packed;
+
+/* Channel Property Flags */
+#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
+#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
+
+struct vstor_packet {
+ /* Requested operation type */
+ enum vstor_packet_operation operation;
+
+ /* Flags - see below for values */
+ unsigned int flags;
+
+ /* Status of the request returned from the server side. */
+ unsigned int status;
+
+ /* Data payload area */
+ union {
+ /*
+ * Structure used to forward SCSI commands from the
+ * client to the server.
+ */
+ struct vmscsi_request vm_srb;
+
+ /* Structure used to query channel properties. */
+ struct vmstorage_channel_properties storage_channel_properties;
+
+ /* Used during version negotiations. */
+ struct vmstorage_protocol_version version;
+ };
+} __packed;
+
+/* Packet flags */
+/*
+ * This flag indicates that the server should send back a completion for this
+ * packet.
+ */
+#define REQUEST_COMPLETION_FLAG 0x1
+
+/* This is the set of flags that the vsc can set in any packets it sends */
+#define VSC_LEGAL_FLAGS (REQUEST_COMPLETION_FLAG)
+
+
+/* Defines */
+
+#define STORVSC_MAX_IO_REQUESTS 128
+
+/*
+ * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
+ * reality, the path/target is not used (ie always set to 0) so our
+ * scsi host adapter essentially has 1 bus with 1 target that contains
+ * up to 256 luns.
+ */
+#define STORVSC_MAX_LUNS_PER_TARGET 64
+#define STORVSC_MAX_TARGETS 1
+#define STORVSC_MAX_CHANNELS 1
+#define STORVSC_MAX_CMD_LEN 16
+
+struct hv_storvsc_request;
+
+/* Matches Windows-end */
+enum storvsc_request_type {
+ WRITE_TYPE,
+ READ_TYPE,
+ UNKNOWN_TYPE,
+};
+
+
+struct hv_storvsc_request {
+ struct hv_device *device;
+
+ /* Synchronize the request/response if needed */
+ struct completion wait_event;
+
+ unsigned char *sense_buffer;
+ void *context;
+ void (*on_io_completion)(struct hv_storvsc_request *request);
+ struct hv_multipage_buffer data_buffer;
+
+ struct vstor_packet vstor_packet;
+};
+
+
+/* A storvsc device is a device object that contains a vmbus channel */
+struct storvsc_device {
+ struct hv_device *device;
+
+ bool destroy;
+ bool drain_notify;
+ atomic_t num_outstanding_req;
+ struct Scsi_Host *host;
+
+ wait_queue_head_t waiting_to_drain;
+
+ /*
+ * Each unique Port/Path/Target represents 1 channel ie scsi
+ * controller. In reality, the pathid, targetid is always 0
+ * and the port is set by us
+ */
+ unsigned int port_number;
+ unsigned char path_id;
+ unsigned char target_id;
+
+ /* Used for vsc/vsp channel reset process */
+ struct hv_storvsc_request init_request;
+ struct hv_storvsc_request reset_request;
};
struct hv_host_device {
@@ -70,6 +302,430 @@ struct storvsc_cmd_request {
struct hv_storvsc_request request;
};
+static inline struct storvsc_device *get_out_stor_device(
+ struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+
+ stor_device = hv_get_drvdata(device);
+
+ if (stor_device && stor_device->destroy)
+ stor_device = NULL;
+
+ return stor_device;
+}
+
+
+static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
+{
+ dev->drain_notify = true;
+ wait_event(dev->waiting_to_drain,
+ atomic_read(&dev->num_outstanding_req) == 0);
+ dev->drain_notify = false;
+}
+
+static inline struct storvsc_device *get_in_stor_device(
+ struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+
+ stor_device = hv_get_drvdata(device);
+
+ if (!stor_device)
+ goto get_in_err;
+
+ /*
+ * If the device is being destroyed; allow incoming
+ * traffic only to cleanup outstanding requests.
+ */
+
+ if (stor_device->destroy &&
+ (atomic_read(&stor_device->num_outstanding_req) == 0))
+ stor_device = NULL;
+
+get_in_err:
+ return stor_device;
+
+}
+
+static int storvsc_channel_init(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ struct hv_storvsc_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return -ENODEV;
+
+ request = &stor_device->init_request;
+ vstor_packet = &request->vstor_packet;
+
+ /*
+ * Now, initiate the vsc/vsp initialization protocol on the open
+ * channel
+ */
+ memset(request, 0, sizeof(struct hv_storvsc_request));
+ init_completion(&request->wait_event);
+ vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ /* reuse the packet for version range supported */
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
+ FILL_VMSTOR_REVISION(vstor_packet->version.revision);
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->storage_channel_properties.port_number =
+ stor_device->port_number;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+ stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
+ stor_device->target_id
+ = vstor_packet->storage_channel_properties.target_id;
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+cleanup:
+ return ret;
+}
+
+static void storvsc_on_io_completion(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct hv_storvsc_request *request)
+{
+ struct storvsc_device *stor_device;
+ struct vstor_packet *stor_pkt;
+
+ stor_device = hv_get_drvdata(device);
+ stor_pkt = &request->vstor_packet;
+
+ /*
+ * The current SCSI handling on the host side does
+ * not correctly handle:
+ * INQUIRY command with page code parameter set to 0x80
+ * MODE_SENSE command with cmd[2] == 0x1c
+ *
+ * Setup srb and scsi status so this won't be fatal.
+ * We do this so we can distinguish truly fatal failues
+ * (srb status == 0x4) and off-line the device in that case.
+ */
+
+ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
+ (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
+ vstor_packet->vm_srb.scsi_status = 0;
+ vstor_packet->vm_srb.srb_status = 0x1;
+ }
+
+
+ /* Copy over the status...etc */
+ stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
+ stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
+ stor_pkt->vm_srb.sense_info_length =
+ vstor_packet->vm_srb.sense_info_length;
+
+ if (vstor_packet->vm_srb.scsi_status != 0 ||
+ vstor_packet->vm_srb.srb_status != 1){
+ dev_warn(&device->device,
+ "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
+ stor_pkt->vm_srb.cdb[0],
+ vstor_packet->vm_srb.scsi_status,
+ vstor_packet->vm_srb.srb_status);
+ }
+
+ if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
+ /* CHECK_CONDITION */
+ if (vstor_packet->vm_srb.srb_status & 0x80) {
+ /* autosense data available */
+ dev_warn(&device->device,
+ "stor pkt %p autosense data valid - len %d\n",
+ request,
+ vstor_packet->vm_srb.sense_info_length);
+
+ memcpy(request->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ vstor_packet->vm_srb.sense_info_length);
+
+ }
+ }
+
+ stor_pkt->vm_srb.data_transfer_length =
+ vstor_packet->vm_srb.data_transfer_length;
+
+ request->on_io_completion(request);
+
+ if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
+ stor_device->drain_notify)
+ wake_up(&stor_device->waiting_to_drain);
+
+
+}
+
+static void storvsc_on_receive(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct hv_storvsc_request *request)
+{
+ switch (vstor_packet->operation) {
+ case VSTOR_OPERATION_COMPLETE_IO:
+ storvsc_on_io_completion(device, vstor_packet, request);
+ break;
+ case VSTOR_OPERATION_REMOVE_DEVICE:
+
+ default:
+ break;
+ }
+}
+
+static void storvsc_on_channel_callback(void *context)
+{
+ struct hv_device *device = (struct hv_device *)context;
+ struct storvsc_device *stor_device;
+ u32 bytes_recvd;
+ u64 request_id;
+ unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
+ struct hv_storvsc_request *request;
+ int ret;
+
+
+ stor_device = get_in_stor_device(device);
+ if (!stor_device)
+ return;
+
+ do {
+ ret = vmbus_recvpacket(device->channel, packet,
+ ALIGN(sizeof(struct vstor_packet), 8),
+ &bytes_recvd, &request_id);
+ if (ret == 0 && bytes_recvd > 0) {
+
+ request = (struct hv_storvsc_request *)
+ (unsigned long)request_id;
+
+ if ((request == &stor_device->init_request) ||
+ (request == &stor_device->reset_request)) {
+
+ memcpy(&request->vstor_packet, packet,
+ sizeof(struct vstor_packet));
+ complete(&request->wait_event);
+ } else {
+ storvsc_on_receive(device,
+ (struct vstor_packet *)packet,
+ request);
+ }
+ } else {
+ break;
+ }
+ } while (1);
+
+ return;
+}
+
+static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
+{
+ struct vmstorage_channel_properties props;
+ int ret;
+
+ memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+
+ /* Open the channel */
+ ret = vmbus_open(device->channel,
+ ring_size,
+ ring_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, device);
+
+ if (ret != 0)
+ return ret;
+
+ ret = storvsc_channel_init(device);
+
+ return ret;
+}
+
+static int storvsc_dev_remove(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ unsigned long flags;
+
+ stor_device = hv_get_drvdata(device);
+
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ stor_device->destroy = true;
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /*
+ * At this point, all outbound traffic should be disable. We
+ * only allow inbound traffic (responses) to proceed so that
+ * outstanding requests can be completed.
+ */
+
+ storvsc_wait_to_drain(stor_device);
+
+ /*
+ * Since we have already drained, we don't need to busy wait
+ * as was done in final_release_stor_device()
+ * Note that we cannot set the ext pointer to NULL until
+ * we have drained - to drain the outgoing packets, we need to
+ * allow incoming packets.
+ */
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ hv_set_drvdata(device, NULL);
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /* Close the channel */
+ vmbus_close(device->channel);
+
+ kfree(stor_device);
+ return 0;
+}
+
+static int storvsc_do_io(struct hv_device *device,
+ struct hv_storvsc_request *request)
+{
+ struct storvsc_device *stor_device;
+ struct vstor_packet *vstor_packet;
+ int ret = 0;
+
+ vstor_packet = &request->vstor_packet;
+ stor_device = get_out_stor_device(device);
+
+ if (!stor_device)
+ return -ENODEV;
+
+
+ request->device = device;
+
+
+ vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
+
+
+ vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
+
+
+ vstor_packet->vm_srb.data_transfer_length =
+ request->data_buffer.len;
+
+ vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
+
+ if (request->data_buffer.len) {
+ ret = vmbus_sendpacket_multipagebuffer(device->channel,
+ &request->data_buffer,
+ vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request);
+ } else {
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ }
+
+ if (ret != 0)
+ return ret;
+
+ atomic_inc(&stor_device->num_outstanding_req);
+
+ return ret;
+}
+
+static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
+{
+ *target =
+ dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
+
+ *path =
+ dev->dev_instance.b[3] << 24 |
+ dev->dev_instance.b[2] << 16 |
+ dev->dev_instance.b[1] << 8 | dev->dev_instance.b[0];
+}
+
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
@@ -307,7 +963,8 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
static int storvsc_remove(struct hv_device *dev)
{
- struct Scsi_Host *host = dev_get_drvdata(&dev->device);
+ struct storvsc_device *stor_device = hv_get_drvdata(dev);
+ struct Scsi_Host *host = stor_device->host;
struct hv_host_device *host_dev =
(struct hv_host_device *)host->hostdata;
@@ -355,9 +1012,9 @@ static int storvsc_host_reset(struct hv_device *device)
int ret, t;
- stor_device = get_stor_device(device);
+ stor_device = get_out_stor_device(device);
if (!stor_device)
- return -1;
+ return -ENODEV;
request = &stor_device->reset_request;
vstor_packet = &request->vstor_packet;
@@ -389,7 +1046,6 @@ static int storvsc_host_reset(struct hv_device *device)
*/
cleanup:
- put_stor_device(device);
return ret;
}
@@ -413,9 +1069,9 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
/*
- * storvsc_commmand_completion - Command completion processing
+ * storvsc_command_completion - Command completion processing
*/
-static void storvsc_commmand_completion(struct hv_storvsc_request *request)
+static void storvsc_command_completion(struct hv_storvsc_request *request)
{
struct storvsc_cmd_request *cmd_request =
(struct storvsc_cmd_request *)request->context;
@@ -426,18 +1082,26 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
+ vm_srb = &request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
-
- /* FIXME: We can optimize on writes by just skipping this */
- copy_from_bounce_buffer(scsi_sglist(scmnd),
+ if (vm_srb->data_in == READ_TYPE) {
+ copy_from_bounce_buffer(scsi_sglist(scmnd),
cmd_request->bounce_sgl,
scsi_sg_count(scmnd));
- destroy_bounce_buffer(cmd_request->bounce_sgl,
- cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+ }
}
- vm_srb = &request->vstor_packet.vm_srb;
- scmnd->result = vm_srb->scsi_status;
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side.
+ */
+ if (vm_srb->srb_status == 0x4)
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ else
+ scmnd->result = vm_srb->scsi_status;
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
@@ -459,6 +1123,22 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
kmem_cache_free(host_dev->request_pool, cmd_request);
}
+static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
+{
+ bool allowed = true;
+ u8 scsi_op = scmnd->cmnd[0];
+
+ switch (scsi_op) {
+ /* smartd sends this command, which will offline the device */
+ case SET_WINDOW:
+ scmnd->result = DID_ERROR << 16;
+ allowed = false;
+ break;
+ default:
+ break;
+ }
+ return allowed;
+}
/*
* storvsc_queuecommand - Initiate command processing
@@ -478,6 +1158,10 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ if (storvsc_check_scsi_cmd(scmnd) == false) {
+ done(scmnd);
+ return 0;
+ }
/* If retrying, no need to prep the cmd */
if (scmnd->host_scribble) {
@@ -523,7 +1207,7 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
break;
}
- request->on_io_completion = storvsc_commmand_completion;
+ request->on_io_completion = storvsc_command_completion;
request->context = cmd_request;/* scmnd; */
vm_srb->port_number = host_dev->port;
@@ -561,12 +1245,10 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
PAGE_SHIFT;
- /*
- * FIXME: We can optimize on reads by just skipping
- * this
- */
- copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
+ if (vm_srb->data_in == WRITE_TYPE)
+ copy_to_bounce_buffer(sgl,
+ cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd));
sgl = cmd_request->bounce_sgl;
sg_count = cmd_request->bounce_sgl_count;
@@ -589,20 +1271,12 @@ retry_request:
/* Invokes the vsc to start an IO */
ret = storvsc_do_io(dev, &cmd_request->request);
- if (ret == -1) {
+ if (ret == -EAGAIN) {
/* no more space */
- if (cmd_request->bounce_sgl_count) {
- /*
- * FIXME: We can optimize on writes by just skipping
- * this
- */
- copy_from_bounce_buffer(scsi_sglist(scmnd),
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
+ if (cmd_request->bounce_sgl_count)
destroy_bounce_buffer(cmd_request->bounce_sgl,
- cmd_request->bounce_sgl_count);
- }
+ cmd_request->bounce_sgl_count);
kmem_cache_free(host_dev->request_pool, cmd_request);
@@ -646,25 +1320,46 @@ static struct scsi_host_template scsi_driver = {
.dma_boundary = PAGE_SIZE-1,
};
+enum {
+ SCSI_GUID,
+ IDE_GUID,
+};
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* SCSI guid */
+ { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
+ 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .driver_data = SCSI_GUID },
+ /* IDE guid */
+ { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
+ 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .driver_data = IDE_GUID },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
/*
* storvsc_probe - Add a new device for this driver
*/
-static int storvsc_probe(struct hv_device *device)
+static int storvsc_probe(struct hv_device *device,
+ const struct hv_vmbus_device_id *dev_id)
{
int ret;
struct Scsi_Host *host;
struct hv_host_device *host_dev;
- struct storvsc_device_info device_info;
+ bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
+ int path = 0;
+ int target = 0;
+ struct storvsc_device *stor_device;
host = scsi_host_alloc(&scsi_driver,
sizeof(struct hv_host_device));
if (!host)
return -ENOMEM;
- dev_set_drvdata(&device->device, host);
-
host_dev = (struct hv_host_device *)host->hostdata;
memset(host_dev, 0, sizeof(struct hv_host_device));
@@ -681,19 +1376,33 @@ static int storvsc_probe(struct hv_device *device)
return -ENOMEM;
}
- device_info.port_number = host->host_no;
- device_info.ring_buffer_size = storvsc_ringbuffer_size;
- /* Call to the vsc driver to add the device */
- ret = storvsc_dev_add(device, (void *)&device_info);
+ stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
+ if (!stor_device) {
+ kmem_cache_destroy(host_dev->request_pool);
+ scsi_host_put(host);
+ return -ENOMEM;
+ }
- if (ret != 0) {
+ stor_device->destroy = false;
+ init_waitqueue_head(&stor_device->waiting_to_drain);
+ stor_device->device = device;
+ stor_device->host = host;
+ hv_set_drvdata(device, stor_device);
+
+ stor_device->port_number = host->host_no;
+ ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
+ if (ret) {
kmem_cache_destroy(host_dev->request_pool);
scsi_host_put(host);
- return -1;
+ kfree(stor_device);
+ return ret;
}
- host_dev->path = device_info.path_id;
- host_dev->target = device_info.target_id;
+ if (dev_is_ide)
+ storvsc_get_ide_info(device, &target, &path);
+
+ host_dev->path = stor_device->path_id;
+ host_dev->target = stor_device->target_id;
/* max # of devices per target */
host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
@@ -701,54 +1410,43 @@ static int storvsc_probe(struct hv_device *device)
host->max_id = STORVSC_MAX_TARGETS;
/* max # of channels */
host->max_channel = STORVSC_MAX_CHANNELS - 1;
+ /* max cmd length */
+ host->max_cmd_len = STORVSC_MAX_CMD_LEN;
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
- if (ret != 0) {
-
- storvsc_dev_remove(device);
+ if (ret != 0)
+ goto err_out;
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- return -1;
+ if (!dev_is_ide) {
+ scsi_scan_host(host);
+ return 0;
+ }
+ ret = scsi_add_device(host, 0, target, 0);
+ if (ret) {
+ scsi_remove_host(host);
+ goto err_out;
}
+ return 0;
- scsi_scan_host(host);
- return ret;
+err_out:
+ storvsc_dev_remove(device);
+ kmem_cache_destroy(host_dev->request_pool);
+ scsi_host_put(host);
+ return -ENODEV;
}
/* The one and only one */
static struct hv_driver storvsc_drv = {
+ .name = "storvsc",
+ .id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
};
-/*
- * We use a DMI table to determine if we should autoload this driver This is
- * needed by distro tools to determine if the hyperv drivers should be
- * installed and/or configured. We don't do anything else with the table, but
- * it needs to be present.
- */
-
-static const struct dmi_system_id __initconst
-hv_stor_dmi_table[] __maybe_unused = {
- {
- .ident = "Hyper-V",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
- },
- },
- { },
-};
-MODULE_DEVICE_TABLE(dmi, hv_stor_dmi_table);
-
static int __init storvsc_drv_init(void)
{
- int ret;
- struct hv_driver *drv = &storvsc_drv;
u32 max_outstanding_req_per_channel;
/*
@@ -757,32 +1455,22 @@ static int __init storvsc_drv_init(void)
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
-
max_outstanding_req_per_channel =
- ((storvsc_ringbuffer_size - PAGE_SIZE) /
- ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
- sizeof(struct vstor_packet) + sizeof(u64),
- sizeof(u64)));
-
- memcpy(&drv->dev_type, &stor_vsci_device_type,
- sizeof(struct hv_guid));
+ ((storvsc_ringbuffer_size - PAGE_SIZE) /
+ ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
+ sizeof(struct vstor_packet) + sizeof(u64),
+ sizeof(u64)));
if (max_outstanding_req_per_channel <
STORVSC_MAX_IO_REQUESTS)
- return -1;
-
- drv->driver.name = driver_name;
-
+ return -EINVAL;
- /* The driver belongs to vmbus */
- ret = vmbus_child_driver_register(&drv->driver);
-
- return ret;
+ return vmbus_driver_register(&storvsc_drv);
}
static void __exit storvsc_drv_exit(void)
{
- vmbus_child_driver_unregister(&storvsc_drv.driver);
+ vmbus_driver_unregister(&storvsc_drv);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
deleted file mode 100644
index 1c949f5fb71..00000000000
--- a/drivers/staging/hv/vmbus_drv.c
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- * K. Y. Srinivasan <kys@microsoft.com>
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/sysctl.h>
-#include <linux/pci.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <linux/completion.h>
-
-#include "hyperv.h"
-#include "hyperv_vmbus.h"
-
-
-static struct acpi_device *hv_acpi_dev;
-
-static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
-
-unsigned int vmbus_loglevel = (ALL_MODULES << 16 | INFO_LVL);
-EXPORT_SYMBOL(vmbus_loglevel);
- /* (ALL_MODULES << 16 | DEBUG_LVL_ENTEREXIT); */
- /* (((VMBUS | VMBUS_DRV)<<16) | DEBUG_LVL_ENTEREXIT); */
-
-static struct completion probe_event;
-static int irq;
-
-static void get_channel_info(struct hv_device *device,
- struct hv_device_info *info)
-{
- struct vmbus_channel_debug_info debug_info;
-
- if (!device->channel)
- return;
-
- vmbus_get_debug_info(device->channel, &debug_info);
-
- info->chn_id = debug_info.relid;
- info->chn_state = debug_info.state;
- memcpy(&info->chn_type, &debug_info.interfacetype,
- sizeof(struct hv_guid));
- memcpy(&info->chn_instance, &debug_info.interface_instance,
- sizeof(struct hv_guid));
-
- info->monitor_id = debug_info.monitorid;
-
- info->server_monitor_pending = debug_info.servermonitor_pending;
- info->server_monitor_latency = debug_info.servermonitor_latency;
- info->server_monitor_conn_id = debug_info.servermonitor_connectionid;
-
- info->client_monitor_pending = debug_info.clientmonitor_pending;
- info->client_monitor_latency = debug_info.clientmonitor_latency;
- info->client_monitor_conn_id = debug_info.clientmonitor_connectionid;
-
- info->inbound.int_mask = debug_info.inbound.current_interrupt_mask;
- info->inbound.read_idx = debug_info.inbound.current_read_index;
- info->inbound.write_idx = debug_info.inbound.current_write_index;
- info->inbound.bytes_avail_toread =
- debug_info.inbound.bytes_avail_toread;
- info->inbound.bytes_avail_towrite =
- debug_info.inbound.bytes_avail_towrite;
-
- info->outbound.int_mask =
- debug_info.outbound.current_interrupt_mask;
- info->outbound.read_idx = debug_info.outbound.current_read_index;
- info->outbound.write_idx = debug_info.outbound.current_write_index;
- info->outbound.bytes_avail_toread =
- debug_info.outbound.bytes_avail_toread;
- info->outbound.bytes_avail_towrite =
- debug_info.outbound.bytes_avail_towrite;
-}
-
-/*
- * vmbus_show_device_attr - Show the device attribute in sysfs.
- *
- * This is invoked when user does a
- * "cat /sys/bus/vmbus/devices/<busdevice>/<attr name>"
- */
-static ssize_t vmbus_show_device_attr(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
-{
- struct hv_device *hv_dev = device_to_hv_device(dev);
- struct hv_device_info device_info;
-
- memset(&device_info, 0, sizeof(struct hv_device_info));
-
- get_channel_info(hv_dev, &device_info);
-
- if (!strcmp(dev_attr->attr.name, "class_id")) {
- return sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x%02x%02x%02x%02x%02x%02x}\n",
- device_info.chn_type.data[3],
- device_info.chn_type.data[2],
- device_info.chn_type.data[1],
- device_info.chn_type.data[0],
- device_info.chn_type.data[5],
- device_info.chn_type.data[4],
- device_info.chn_type.data[7],
- device_info.chn_type.data[6],
- device_info.chn_type.data[8],
- device_info.chn_type.data[9],
- device_info.chn_type.data[10],
- device_info.chn_type.data[11],
- device_info.chn_type.data[12],
- device_info.chn_type.data[13],
- device_info.chn_type.data[14],
- device_info.chn_type.data[15]);
- } else if (!strcmp(dev_attr->attr.name, "device_id")) {
- return sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x%02x%02x%02x%02x%02x%02x}\n",
- device_info.chn_instance.data[3],
- device_info.chn_instance.data[2],
- device_info.chn_instance.data[1],
- device_info.chn_instance.data[0],
- device_info.chn_instance.data[5],
- device_info.chn_instance.data[4],
- device_info.chn_instance.data[7],
- device_info.chn_instance.data[6],
- device_info.chn_instance.data[8],
- device_info.chn_instance.data[9],
- device_info.chn_instance.data[10],
- device_info.chn_instance.data[11],
- device_info.chn_instance.data[12],
- device_info.chn_instance.data[13],
- device_info.chn_instance.data[14],
- device_info.chn_instance.data[15]);
- } else if (!strcmp(dev_attr->attr.name, "state")) {
- return sprintf(buf, "%d\n", device_info.chn_state);
- } else if (!strcmp(dev_attr->attr.name, "id")) {
- return sprintf(buf, "%d\n", device_info.chn_id);
- } else if (!strcmp(dev_attr->attr.name, "out_intr_mask")) {
- return sprintf(buf, "%d\n", device_info.outbound.int_mask);
- } else if (!strcmp(dev_attr->attr.name, "out_read_index")) {
- return sprintf(buf, "%d\n", device_info.outbound.read_idx);
- } else if (!strcmp(dev_attr->attr.name, "out_write_index")) {
- return sprintf(buf, "%d\n", device_info.outbound.write_idx);
- } else if (!strcmp(dev_attr->attr.name, "out_read_bytes_avail")) {
- return sprintf(buf, "%d\n",
- device_info.outbound.bytes_avail_toread);
- } else if (!strcmp(dev_attr->attr.name, "out_write_bytes_avail")) {
- return sprintf(buf, "%d\n",
- device_info.outbound.bytes_avail_towrite);
- } else if (!strcmp(dev_attr->attr.name, "in_intr_mask")) {
- return sprintf(buf, "%d\n", device_info.inbound.int_mask);
- } else if (!strcmp(dev_attr->attr.name, "in_read_index")) {
- return sprintf(buf, "%d\n", device_info.inbound.read_idx);
- } else if (!strcmp(dev_attr->attr.name, "in_write_index")) {
- return sprintf(buf, "%d\n", device_info.inbound.write_idx);
- } else if (!strcmp(dev_attr->attr.name, "in_read_bytes_avail")) {
- return sprintf(buf, "%d\n",
- device_info.inbound.bytes_avail_toread);
- } else if (!strcmp(dev_attr->attr.name, "in_write_bytes_avail")) {
- return sprintf(buf, "%d\n",
- device_info.inbound.bytes_avail_towrite);
- } else if (!strcmp(dev_attr->attr.name, "monitor_id")) {
- return sprintf(buf, "%d\n", device_info.monitor_id);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_pending")) {
- return sprintf(buf, "%d\n", device_info.server_monitor_pending);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_latency")) {
- return sprintf(buf, "%d\n", device_info.server_monitor_latency);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_conn_id")) {
- return sprintf(buf, "%d\n",
- device_info.server_monitor_conn_id);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_pending")) {
- return sprintf(buf, "%d\n", device_info.client_monitor_pending);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_latency")) {
- return sprintf(buf, "%d\n", device_info.client_monitor_latency);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_conn_id")) {
- return sprintf(buf, "%d\n",
- device_info.client_monitor_conn_id);
- } else {
- return 0;
- }
-}
-
-/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct device_attribute vmbus_device_attrs[] = {
- __ATTR(id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(state, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(class_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(device_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(monitor_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(server_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(server_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(server_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(client_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(client_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(client_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(out_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(in_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR_NULL
-};
-
-
-/*
- * vmbus_uevent - add uevent for our device
- *
- * This routine is invoked when a device is added or removed on the vmbus to
- * generate a uevent to udev in the userspace. The udev will then look at its
- * rule and the uevent generated here to load the appropriate driver
- */
-static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
-{
- struct hv_device *dev = device_to_hv_device(device);
- int ret;
-
- ret = add_uevent_var(env, "VMBUS_DEVICE_CLASS_GUID={"
- "%02x%02x%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x%02x%02x%02x%02x%02x%02x}",
- dev->dev_type.data[3],
- dev->dev_type.data[2],
- dev->dev_type.data[1],
- dev->dev_type.data[0],
- dev->dev_type.data[5],
- dev->dev_type.data[4],
- dev->dev_type.data[7],
- dev->dev_type.data[6],
- dev->dev_type.data[8],
- dev->dev_type.data[9],
- dev->dev_type.data[10],
- dev->dev_type.data[11],
- dev->dev_type.data[12],
- dev->dev_type.data[13],
- dev->dev_type.data[14],
- dev->dev_type.data[15]);
-
- if (ret)
- return ret;
-
- ret = add_uevent_var(env, "VMBUS_DEVICE_DEVICE_GUID={"
- "%02x%02x%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x%02x%02x%02x%02x%02x%02x}",
- dev->dev_instance.data[3],
- dev->dev_instance.data[2],
- dev->dev_instance.data[1],
- dev->dev_instance.data[0],
- dev->dev_instance.data[5],
- dev->dev_instance.data[4],
- dev->dev_instance.data[7],
- dev->dev_instance.data[6],
- dev->dev_instance.data[8],
- dev->dev_instance.data[9],
- dev->dev_instance.data[10],
- dev->dev_instance.data[11],
- dev->dev_instance.data[12],
- dev->dev_instance.data[13],
- dev->dev_instance.data[14],
- dev->dev_instance.data[15]);
- if (ret)
- return ret;
-
- return 0;
-}
-
-
-/*
- * vmbus_match - Attempt to match the specified device to the specified driver
- */
-static int vmbus_match(struct device *device, struct device_driver *driver)
-{
- int match = 0;
- struct hv_driver *drv = drv_to_hv_drv(driver);
- struct hv_device *hv_dev = device_to_hv_device(device);
-
- /* We found our driver ? */
- if (memcmp(&hv_dev->dev_type, &drv->dev_type,
- sizeof(struct hv_guid)) == 0)
- match = 1;
-
- return match;
-}
-
-/*
- * vmbus_probe - Add the new vmbus's child device
- */
-static int vmbus_probe(struct device *child_device)
-{
- int ret = 0;
- struct hv_driver *drv =
- drv_to_hv_drv(child_device->driver);
- struct hv_device *dev = device_to_hv_device(child_device);
-
- if (drv->probe) {
- ret = drv->probe(dev);
- if (ret != 0)
- pr_err("probe failed for device %s (%d)\n",
- dev_name(child_device), ret);
-
- } else {
- pr_err("probe not set for driver %s\n",
- dev_name(child_device));
- ret = -ENODEV;
- }
- return ret;
-}
-
-/*
- * vmbus_remove - Remove a vmbus device
- */
-static int vmbus_remove(struct device *child_device)
-{
- int ret;
- struct hv_driver *drv;
-
- struct hv_device *dev = device_to_hv_device(child_device);
-
- if (child_device->driver) {
- drv = drv_to_hv_drv(child_device->driver);
-
- if (drv->remove) {
- ret = drv->remove(dev);
- } else {
- pr_err("remove not set for driver %s\n",
- dev_name(child_device));
- ret = -ENODEV;
- }
- }
-
- return 0;
-}
-
-
-/*
- * vmbus_shutdown - Shutdown a vmbus device
- */
-static void vmbus_shutdown(struct device *child_device)
-{
- struct hv_driver *drv;
- struct hv_device *dev = device_to_hv_device(child_device);
-
-
- /* The device may not be attached yet */
- if (!child_device->driver)
- return;
-
- drv = drv_to_hv_drv(child_device->driver);
-
- if (drv->shutdown)
- drv->shutdown(dev);
-
- return;
-}
-
-
-/*
- * vmbus_device_release - Final callback release of the vmbus child device
- */
-static void vmbus_device_release(struct device *device)
-{
- struct hv_device *hv_dev = device_to_hv_device(device);
-
- kfree(hv_dev);
-
-}
-
-/* The one and only one */
-static struct bus_type hv_bus = {
- .name = "vmbus",
- .match = vmbus_match,
- .shutdown = vmbus_shutdown,
- .remove = vmbus_remove,
- .probe = vmbus_probe,
- .uevent = vmbus_uevent,
- .dev_attrs = vmbus_device_attrs,
-};
-
-static const char *driver_name = "hyperv";
-
-
-struct onmessage_work_context {
- struct work_struct work;
- struct hv_message msg;
-};
-
-static void vmbus_onmessage_work(struct work_struct *work)
-{
- struct onmessage_work_context *ctx;
-
- ctx = container_of(work, struct onmessage_work_context,
- work);
- vmbus_onmessage(&ctx->msg);
- kfree(ctx);
-}
-
-/*
- * vmbus_on_msg_dpc - DPC routine to handle messages from the hypervisior
- */
-static void vmbus_on_msg_dpc(unsigned long data)
-{
- int cpu = smp_processor_id();
- void *page_addr = hv_context.synic_message_page[cpu];
- struct hv_message *msg = (struct hv_message *)page_addr +
- VMBUS_MESSAGE_SINT;
- struct onmessage_work_context *ctx;
-
- while (1) {
- if (msg->header.message_type == HVMSG_NONE) {
- /* no msg */
- break;
- } else {
- ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
- if (ctx == NULL)
- continue;
- INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(*msg));
- queue_work(vmbus_connection.work_queue, &ctx->work);
- }
-
- msg->header.message_type = HVMSG_NONE;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- smp_mb();
-
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
- }
-}
-
-/*
- * vmbus_on_isr - ISR routine
- */
-static int vmbus_on_isr(void)
-{
- int ret = 0;
- int cpu = smp_processor_id();
- void *page_addr;
- struct hv_message *msg;
- union hv_synic_event_flags *event;
-
- page_addr = hv_context.synic_message_page[cpu];
- msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
-
- /* Check if there are actual msgs to be process */
- if (msg->header.message_type != HVMSG_NONE)
- ret |= 0x1;
-
- page_addr = hv_context.synic_event_page[cpu];
- event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
-
- /* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0]))
- ret |= 0x2;
-
- return ret;
-}
-
-
-static irqreturn_t vmbus_isr(int irq, void *dev_id)
-{
- int ret;
-
- ret = vmbus_on_isr();
-
- /* Schedules a dpc if necessary */
- if (ret > 0) {
- if (test_bit(0, (unsigned long *)&ret))
- tasklet_schedule(&msg_dpc);
-
- if (test_bit(1, (unsigned long *)&ret))
- tasklet_schedule(&event_dpc);
-
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
-}
-
-/*
- * vmbus_bus_init -Main vmbus driver initialization routine.
- *
- * Here, we
- * - initialize the vmbus driver context
- * - invoke the vmbus hv main init routine
- * - get the irq resource
- * - retrieve the channel offers
- */
-static int vmbus_bus_init(int irq)
-{
- int ret;
- unsigned int vector;
-
- /* Hypervisor initialization...setup hypercall page..etc */
- ret = hv_init();
- if (ret != 0) {
- pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
- return ret;
- }
-
- /* Initialize the bus context */
- tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
- tasklet_init(&event_dpc, vmbus_on_event, 0);
-
- /* Now, register the bus with LDM */
- ret = bus_register(&hv_bus);
- if (ret)
- return ret;
-
- /* Get the interrupt resource */
- ret = request_irq(irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
- driver_name, hv_acpi_dev);
-
- if (ret != 0) {
- pr_err("Unable to request IRQ %d\n",
- irq);
-
- bus_unregister(&hv_bus);
-
- return ret;
- }
-
- vector = IRQ0_VECTOR + irq;
-
- /*
- * Notify the hypervisor of our irq and
- * connect to the host.
- */
- on_each_cpu(hv_synic_init, (void *)&vector, 1);
- ret = vmbus_connect();
- if (ret) {
- free_irq(irq, hv_acpi_dev);
- bus_unregister(&hv_bus);
- return ret;
- }
-
-
- vmbus_request_offers();
-
- return 0;
-}
-
-/**
- * vmbus_child_driver_register() - Register a vmbus's child driver
- * @drv: Pointer to driver structure you want to register
- *
- *
- * Registers the given driver with Linux through the 'driver_register()' call
- * And sets up the hyper-v vmbus handling for this driver.
- * It will return the state of the 'driver_register()' call.
- *
- * Mainly used by Hyper-V drivers.
- */
-int vmbus_child_driver_register(struct device_driver *drv)
-{
- int ret;
-
- pr_info("child driver registering - name %s\n", drv->name);
-
- /* The child driver on this vmbus */
- drv->bus = &hv_bus;
-
- ret = driver_register(drv);
-
- vmbus_request_offers();
-
- return ret;
-}
-EXPORT_SYMBOL(vmbus_child_driver_register);
-
-/**
- * vmbus_child_driver_unregister() - Unregister a vmbus's child driver
- * @drv: Pointer to driver structure you want to un-register
- *
- *
- * Un-register the given driver with Linux through the 'driver_unregister()'
- * call. And ungegisters the driver from the Hyper-V vmbus handler.
- *
- * Mainly used by Hyper-V drivers.
- */
-void vmbus_child_driver_unregister(struct device_driver *drv)
-{
- pr_info("child driver unregistering - name %s\n", drv->name);
-
- driver_unregister(drv);
-
-}
-EXPORT_SYMBOL(vmbus_child_driver_unregister);
-
-/*
- * vmbus_child_device_create - Creates and registers a new child device
- * on the vmbus.
- */
-struct hv_device *vmbus_child_device_create(struct hv_guid *type,
- struct hv_guid *instance,
- struct vmbus_channel *channel)
-{
- struct hv_device *child_device_obj;
-
- /* Allocate the new child device */
- child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
- if (!child_device_obj) {
- pr_err("Unable to allocate device object for child device\n");
- return NULL;
- }
-
- child_device_obj->channel = channel;
- memcpy(&child_device_obj->dev_type, type, sizeof(struct hv_guid));
- memcpy(&child_device_obj->dev_instance, instance,
- sizeof(struct hv_guid));
-
-
- return child_device_obj;
-}
-
-/*
- * vmbus_child_device_register - Register the child device
- */
-int vmbus_child_device_register(struct hv_device *child_device_obj)
-{
- int ret = 0;
-
- static atomic_t device_num = ATOMIC_INIT(0);
-
- /* Set the device name. Otherwise, device_register() will fail. */
- dev_set_name(&child_device_obj->device, "vmbus_0_%d",
- atomic_inc_return(&device_num));
-
- /* The new device belongs to this bus */
- child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
- child_device_obj->device.parent = &hv_acpi_dev->dev;
- child_device_obj->device.release = vmbus_device_release;
-
- /*
- * Register with the LDM. This will kick off the driver/device
- * binding...which will eventually call vmbus_match() and vmbus_probe()
- */
- ret = device_register(&child_device_obj->device);
-
- if (ret)
- pr_err("Unable to register child device\n");
- else
- pr_info("child device %s registered\n",
- dev_name(&child_device_obj->device));
-
- return ret;
-}
-
-/*
- * vmbus_child_device_unregister - Remove the specified child device
- * from the vmbus.
- */
-void vmbus_child_device_unregister(struct hv_device *device_obj)
-{
- /*
- * Kick off the process of unregistering the device.
- * This will call vmbus_remove() and eventually vmbus_device_release()
- */
- device_unregister(&device_obj->device);
-
- pr_info("child device %s unregistered\n",
- dev_name(&device_obj->device));
-}
-
-
-/*
- * VMBUS is an acpi enumerated device. Get the the IRQ information
- * from DSDT.
- */
-
-static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
-{
-
- if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
- struct acpi_resource_irq *irqp;
- irqp = &res->data.irq;
-
- *((unsigned int *)irq) = irqp->interrupts[0];
- }
-
- return AE_OK;
-}
-
-static int vmbus_acpi_add(struct acpi_device *device)
-{
- acpi_status result;
-
- hv_acpi_dev = device;
-
- result =
- acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- vmbus_walk_resources, &irq);
-
- if (ACPI_FAILURE(result)) {
- complete(&probe_event);
- return -ENODEV;
- }
- complete(&probe_event);
- return 0;
-}
-
-static const struct acpi_device_id vmbus_acpi_device_ids[] = {
- {"VMBUS", 0},
- {"VMBus", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
-
-static struct acpi_driver vmbus_acpi_driver = {
- .name = "vmbus",
- .ids = vmbus_acpi_device_ids,
- .ops = {
- .add = vmbus_acpi_add,
- },
-};
-
-/*
- * We use a PCI table to determine if we should autoload this driver This is
- * needed by distro tools to determine if the hyperv drivers should be
- * installed and/or configured. We don't do anything else with the table, but
- * it needs to be present.
- */
-static const struct pci_device_id microsoft_hv_pci_table[] = {
- { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, microsoft_hv_pci_table);
-
-static int __init hv_acpi_init(void)
-{
- int ret;
-
- init_completion(&probe_event);
-
- /*
- * Get irq resources first.
- */
-
- ret = acpi_bus_register_driver(&vmbus_acpi_driver);
-
- if (ret)
- return ret;
-
- wait_for_completion(&probe_event);
-
- if (irq <= 0) {
- acpi_bus_unregister_driver(&vmbus_acpi_driver);
- return -ENODEV;
- }
-
- ret = vmbus_bus_init(irq);
- if (ret)
- acpi_bus_unregister_driver(&vmbus_acpi_driver);
- return ret;
-}
-
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
-module_param(vmbus_loglevel, int, S_IRUGO|S_IWUSR);
-
-module_init(hv_acpi_init);
diff --git a/drivers/staging/iio/Documentation/generic_buffer.c b/drivers/staging/iio/Documentation/generic_buffer.c
index f82894f42d2..d5809532149 100644
--- a/drivers/staging/iio/Documentation/generic_buffer.c
+++ b/drivers/staging/iio/Documentation/generic_buffer.c
@@ -173,7 +173,7 @@ int main(int argc, char **argv)
return -1;
/* Find the device requested */
- dev_num = find_type_by_name(device_name, "device");
+ dev_num = find_type_by_name(device_name, "iio:device");
if (dev_num < 0) {
printf("Failed to find the %s\n", device_name);
ret = -ENODEV;
@@ -181,7 +181,7 @@ int main(int argc, char **argv)
}
printf("iio device number being used is %d\n", dev_num);
- asprintf(&dev_dir_name, "%sdevice%d", iio_dir, dev_num);
+ asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
if (trigger_name == NULL) {
/*
* Build the trigger name. If it is device associated it's
@@ -212,6 +212,7 @@ int main(int argc, char **argv)
ret = build_channel_array(dev_dir_name, &infoarray, &num_channels);
if (ret) {
printf("Problem reading scan element information\n");
+ printf("diag %s\n", dev_dir_name);
goto error_free_triggername;
}
@@ -220,7 +221,8 @@ int main(int argc, char **argv)
* As we know that the lis3l02dq has only one buffer this may
* be built rather than found.
*/
- ret = asprintf(&buf_dir_name, "%sdevice%d:buffer0", iio_dir, dev_num);
+ ret = asprintf(&buf_dir_name,
+ "%siio:device%d/buffer", iio_dir, dev_num);
if (ret < 0) {
ret = -ENOMEM;
goto error_free_triggername;
@@ -251,9 +253,7 @@ int main(int argc, char **argv)
goto error_free_buf_dir_name;
}
- ret = asprintf(&buffer_access,
- "/dev/device%d:buffer0",
- dev_num);
+ ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
if (ret < 0) {
ret = -ENOMEM;
goto error_free_data;
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 150f4407363..75938b2412f 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -16,7 +16,7 @@
#define IIO_MAX_NAME_LENGTH 30
-#define FORMAT_SCAN_ELEMENTS_DIR "%s:buffer0/scan_elements"
+#define FORMAT_SCAN_ELEMENTS_DIR "%s/scan_elements"
#define FORMAT_TYPE_FILE "%s_type"
const char *iio_dir = "/sys/bus/iio/devices/";
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio b/drivers/staging/iio/Documentation/sysfs-bus-iio
index 467c49a4725..0d6823d19b6 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio
@@ -1,17 +1,11 @@
-What: /sys/bus/iio/devices/deviceX
+What: /sys/bus/iio/devices/iio:deviceX
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Hardware chip or device accessed by on communication port.
+ Hardware chip or device accessed by one communication port.
Corresponds to a grouping of sensor channels. X is the IIO
index of the device.
-What: /sys/bus/iio/devices/device[n]/power_state
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the device power state.
-
What: /sys/bus/iio/devices/triggerX
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
@@ -25,21 +19,22 @@ Description:
generalize well and hence are not documented in this file.
X is the IIO index of the trigger.
-What: /sys/bus/iio/devices/deviceX:buffer
+What: /sys/bus/iio/devices/iio:deviceX/buffer
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Link to /sys/class/iio/deviceX/deviceX:buffer. X indicates
- the device with which this buffer buffer is associated.
+ Directory of attributes relating to the buffer for the device.
-What: /sys/bus/iio/devices/deviceX/name
+What: /sys/bus/iio/devices/iio:deviceX/name
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Description of the physical chip / device for device X.
Typically a part number.
-What: /sys/bus/iio/devices/deviceX/sampling_frequency
+What: /sys/bus/iio/devices/iio:deviceX/sampling_frequency
+What: /sys/bus/iio/devices/iio:deviceX/buffer/sampling_frequency
+What: /sys/bus/iio/devices/triggerX/sampling_frequency
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -50,42 +45,32 @@ Description:
effects datardy triggers, hardware buffers and the sysfs
direct access interfaces, it may be found in any of the
relevant directories. If it effects all of the above
- then it is to be found in the base device directory as here.
+ then it is to be found in the base device directory.
-What: /sys/bus/iio/devices/deviceX/sampling_frequency_available
+What: /sys/bus/iio/devices/iio:deviceX/sampling_frequency_available
+What: /sys/.../iio:deviceX/buffer/sampling_frequency_available
+What: /sys/bus/iio/devices/triggerX/sampling_frequency_available
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
When the internal sampling clock can only take a small
discrete set of values, this file lists those available.
-What: /sys/bus/iio/devices/deviceX/range
-KernelVersion: 2.6.38
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent ADC Full Scale Range in mVolt.
-
-What: /sys/bus/iio/devices/deviceX/range_available
-KernelVersion: 2.6.38
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent supported vales for ADC Full Scale Range.
-
-What: /sys/bus/iio/devices/deviceX/oversampling_ratio
+What: /sys/bus/iio/devices/iio:deviceX/oversampling_ratio
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
Description:
Hardware dependent ADC oversampling. Controls the sampling ratio
of the digital filter if available.
-What: /sys/bus/iio/devices/deviceX/oversampling_ratio_available
+What: /sys/bus/iio/devices/iio:deviceX/oversampling_ratio_available
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
Description:
Hardware dependent values supported by the oversampling filter.
-What: /sys/bus/iio/devices/deviceX/inY_raw
-What: /sys/bus/iio/devices/deviceX/inY_supply_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -93,9 +78,10 @@ Description:
channel Y. In special cases where the channel does not
correspond to externally available input one of the named
versions may be used. The number must always be specified and
- unique to allow association with event codes.
+ unique to allow association with event codes. Units after
+ application of scale and offset are microvolts.
-What: /sys/bus/iio/devices/deviceX/inY-inZ_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY-voltageZ_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -103,52 +89,73 @@ Description:
channel Y - channel Z where these channel numbers apply to the
physically equivalent inputs when non differential readings are
separately available. In differential only parts, then all that
- is required is a consistent labeling.
+ is required is a consistent labeling. Units after application
+ of scale and offset are microvolts.
-What: /sys/bus/iio/devices/deviceX/temp_raw
-What: /sys/bus/iio/devices/deviceX/temp_x_raw
-What: /sys/bus/iio/devices/deviceX/temp_y_raw
-What: /sys/bus/iio/devices/deviceX/temp_z_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_capacitanceY_raw
+KernelVersion: 3.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw capacitance measurement from channel Y. Units after
+ application of scale and offset are nanofarads.
+
+What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw
+KernelVersion: 3.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw differential capacitance measurement equivalent to
+ channel Y - channel Z where these channel numbers apply to the
+ physically equivalent inputs when non differential readings are
+ separately available. In differential only parts, then all that
+ is required is a consistent labeling. Units after application
+ of scale and offset are nanofarads..
+
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_tempX_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled no bias removal etc) temperature measurement.
It an axis is specified it generally means that the temperature
sensor is associated with one part of a compound device (e.g.
- a gyroscope axis).
+ a gyroscope axis). Units after application of scale and offset
+ are milli degrees Celsuis.
-What: /sys/bus/iio/devices/deviceX/tempX_input
+What: /sys/bus/iio/devices/iio:deviceX/in_tempX_input
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
Description:
Scaled temperature measurement in milli degrees Celsius.
-What: /sys/bus/iio/devices/deviceX/accel_x_raw
-What: /sys/bus/iio/devices/deviceX/accel_y_raw
-What: /sys/bus/iio/devices/deviceX/accel_z_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Acceleration in direction x, y or z (may be arbitrarily assigned
- but should match other such assignments on device)
- channel m (not present if only one accelerometer channel at
- this orientation). Has all of the equivalent parameters as per
- inY. Units after application of scale and offset are m/s^2.
-
-What: /sys/bus/iio/devices/deviceX/gyro_x_raw
-What: /sys/bus/iio/devices/deviceX/gyro_y_raw
-What: /sys/bus/iio/devices/deviceX/gyro_z_raw
+ but should match other such assignments on device).
+ Has all of the equivalent parameters as per voltageY. Units
+ after application of scale and offset are m/s^2.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Angular velocity about axis x, y or z (may be arbitrarily
assigned) Data converted by application of offset then scale to
radians per second. Has all the equivalent parameters as
- per inY.
+ per voltageY. Units after application of scale and offset are
+ radians per second.
-What: /sys/bus/iio/devices/deviceX/incli_x_raw
-What: /sys/bus/iio/devices/deviceX/incli_y_raw
-What: /sys/bus/iio/devices/deviceX/incli_z_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_incli_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_incli_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_incli_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -156,63 +163,69 @@ Description:
arbitrarily assigned). Data converted by application of offset
and scale to Degrees.
-What: /sys/bus/iio/devices/deviceX/magn_x_raw
-What: /sys/bus/iio/devices/deviceX/magn_y_raw
-What: /sys/bus/iio/devices/deviceX/magn_z_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Magnetic field along axis x, y or z (may be arbitrarily
- assigned) channel m (not present if only one magnetometer
- at this orientation). Data converted by application of
- offset then scale to Gauss. Has all the equivalent modifiers
- as per inY.
-
-What: /sys/bus/iio/devices/deviceX/accel_x_peak_raw
-What: /sys/bus/iio/devices/deviceX/accel_y_peak_raw
-What: /sys/bus/iio/devices/deviceX/accel_z_peak_raw
+ assigned). Data converted by application of offset
+ then scale to Gauss.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_peak_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_peak_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_peak_raw
KernelVersion: 2.6.36
Contact: linux-iio@vger.kernel.org
Description:
- Some devices provide a store of the highest value seen since
- some reset condition. These attributes allow access to this
- and are otherwise the direct equivalent of the
- <type>Y[_name]_raw attributes.
+ Highest value since some reset condition. These
+ attributes allow access to this and are otherwise
+ the direct equivalent of the <type>Y[_name]_raw attributes.
-What: /sys/bus/iio/devices/deviceX/accel_xyz_squared_peak_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_xyz_squared_peak_raw
KernelVersion: 2.6.36
Contact: linux-iio@vger.kernel.org
Description:
A computed peak value based on the sum squared magnitude of
the underlying value in the specified directions.
-What: /sys/bus/iio/devices/deviceX/accel_offset
-What: /sys/bus/iio/devices/deviceX/temp_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_tempY_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_offset
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
If known for a device, offset to be added to <type>[Y]_raw prior
to scaling by <type>[Y]_scale in order to obtain value in the
<type> units as specified in <type>[y]_raw documentation.
- Not present if the offset is always 0 or unknown. If Y is not
- present, then the offset applies to all in channels of <type>.
+ Not present if the offset is always 0 or unknown. If Y or
+ axis <x|y|z> is not present, then the offset applies to all
+ in channels of <type>.
May be writable if a variable offset can be applied on the
device. Note that this is different to calibbias which
is for devices (or drivers) that apply offsets to compensate
for variation between different instances of the part, typically
adjusted by using some hardware supported calibration procedure.
-
-What: /sys/bus/iio/devices/deviceX/inY_scale
-What: /sys/bus/iio/devices/deviceX/inY_supply_scale
-What: /sys/bus/iio/devices/deviceX/in_scale
-What: /sys/bus/iio/devices/deviceX/outY_scale
-What: /sys/bus/iio/devices/deviceX/accel_scale
-What: /sys/bus/iio/devices/deviceX/accel_peak_scale
-What: /sys/bus/iio/devices/deviceX/gyro_scale
-What: /sys/bus/iio/devices/deviceX/magn_scale
-What: /sys/bus/iio/devices/deviceX/magn_x_scale
-What: /sys/bus/iio/devices/deviceX/magn_y_scale
-What: /sys/bus/iio/devices/deviceX/magn_z_scale
+ Calibbias is applied internally, offset is applied in userspace
+ to the _raw output.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -220,32 +233,31 @@ Description:
post addition of <type>[Y][_name]_offset in order to obtain the
measured value in <type> units as specified in
<type>[Y][_name]_raw documentation.. If shared across all in
- channels then Y is not present and the value is called
- <type>[Y][_name]_scale. The peak modifier means this value
- is applied to <type>Y[_name]_peak_raw values.
-
-What: /sys/bus/iio/devices/deviceX/accel_x_calibbias
-What: /sys/bus/iio/devices/deviceX/accel_y_calibbias
-What: /sys/bus/iio/devices/deviceX/accel_z_calibbias
-What: /sys/bus/iio/devices/deviceX/gyro_x_calibbias
-What: /sys/bus/iio/devices/deviceX/gyro_y_calibbias
-What: /sys/bus/iio/devices/deviceX/gyro_z_calibbias
+ channels then Y and <x|y|z> are not present and the value is
+ called <type>[Y][_name]_scale. The peak modifier means this
+ value is applied to <type>Y[_name]_peak_raw values.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibbias
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Hardware applied calibration offset. (assumed to fix production
- inaccuracies). If shared across all channels, <type>_calibbias
- is used.
-
-What /sys/bus/iio/devices/deviceX/inY_calibscale
-What /sys/bus/iio/devices/deviceX/inY_supply_calibscale
-What /sys/bus/iio/devices/deviceX/in_calibscale
-What /sys/bus/iio/devices/deviceX/accel_x_calibscale
-What /sys/bus/iio/devices/deviceX/accel_y_calibscale
-What /sys/bus/iio/devices/deviceX/accel_z_calibscale
-What /sys/bus/iio/devices/deviceX/gyro_x_calibscale
-What /sys/bus/iio/devices/deviceX/gyro_y_calibscale
-What /sys/bus/iio/devices/deviceX/gyro_z_calibscale
+ inaccuracies).
+
+What /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibscale
+What /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibscale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -253,14 +265,18 @@ Description:
production inaccuracies). If shared across all channels,
<type>_calibscale is used.
-What: /sys/bus/iio/devices/deviceX/accel_scale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
+What: /sys/.../iio:deviceX/in_voltageX_scale_available
+What: /sys/.../iio:deviceX/in_voltage-voltage_scale_available
+What: /sys/.../iio:deviceX/out_voltageX_scale_available
+What: /sys/.../iio:deviceX/in_capacitance_scale_available
KernelVersion: 2.635
Contact: linux-iio@vger.kernel.org
Description:
If a discrete set of scale values are available, they
are listed in this attribute.
-What: /sys/bus/iio/devices/deviceX/outY_raw
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -268,7 +284,7 @@ Description:
channel Y. The number must always be specified and
unique if the output corresponds to a single channel.
-What: /sys/bus/iio/devices/deviceX/outY&Z_raw
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -277,8 +293,8 @@ Description:
where a single output sets the value for multiple channels
simultaneously.
-What: /sys/bus/iio/devices/deviceX/outY_powerdown_mode
-What: /sys/bus/iio/devices/deviceX/out_powerdown_mode
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode
+What: /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
Description:
@@ -291,16 +307,16 @@ Description:
outX_powerdown_mode_available. If Y is not present the
mode is shared across all outputs.
-What: /sys/bus/iio/devices/deviceX/outY_powerdown_mode_available
-What: /sys/bus/iio/devices/deviceX/out_powerdown_mode_available
+What: /sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
+What: /sys/.../iio:deviceX/out_voltage_powerdown_mode_available
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
Description:
Lists all available output power down modes.
If Y is not present the mode is shared across all outputs.
-What: /sys/bus/iio/devices/deviceX/outY_powerdown
-What: /sys/bus/iio/devices/deviceX/out_powerdown
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown
+What: /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown
KernelVersion: 2.6.38
Contact: linux-iio@vger.kernel.org
Description:
@@ -309,55 +325,47 @@ Description:
normal operation. Y may be suppressed if all outputs are
controlled together.
-What: /sys/bus/iio/devices/deviceX/deviceX:eventY
+What: /sys/bus/iio/devices/iio:deviceX/events
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Configuration of which hardware generated events are passed up
to user-space.
-What: /sys/bus/iio/devices/deviceX:event/dev
-What: /sys/bus/iio/devices/deviceX:eventY/dev
-KernelVersion: 2.6.35
-Contact: linux-iio@vger.kernel.org
-Description:
- major:minor character device numbers for the event line Y of
- device X.
-
-What: /sys/.../deviceX:eventY/accel_x_thresh_rising_en
-What: /sys/.../deviceX:eventY/accel_x_thresh_falling_en
-What: /sys/.../deviceX:eventY/accel_y_thresh_rising_en
-What: /sys/.../deviceX:eventY/accel_y_thresh_falling_en
-What: /sys/.../deviceX:eventY/accel_z_thresh_rising_en
-What: /sys/.../deviceX:eventY/accel_z_thresh_falling_en
-What: /sys/.../deviceX:eventY/gyro_x_thresh_rising_en
-What: /sys/.../deviceX:eventY/gyro_x_thresh_falling_en
-What: /sys/.../deviceX:eventY/gyro_y_thresh_rising_en
-What: /sys/.../deviceX:eventY/gyro_y_thresh_falling_en
-What: /sys/.../deviceX:eventY/gyro_z_thresh_rising_en
-What: /sys/.../deviceX:eventY/gyro_z_thresh_falling_en
-What: /sys/.../deviceX:eventY/magn_x_thresh_rising_en
-What: /sys/.../deviceX:eventY/magn_x_thresh_falling_en
-What: /sys/.../deviceX:eventY/magn_y_thresh_rising_en
-What: /sys/.../deviceX:eventY/magn_y_thresh_falling_en
-What: /sys/.../deviceX:eventY/magn_z_thresh_rising_en
-What: /sys/.../deviceX:eventY/magn_z_thresh_falling_en
-What: /sys/.../deviceX:eventY/inZ_supply_thresh_rising_en
-What: /sys/.../deviceX:eventY/inZ_supply_thresh_falling_en
-What: /sys/.../deviceX:eventY/inZ_thresh_rising_en
-What: /sys/.../deviceX:eventY/inZ_thresh_falling_en
-What: /sys/.../deviceX:eventY/temp_thresh_rising_en
-What: /sys/.../deviceX:eventY/temp_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_x_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_x_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_y_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_y_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_z_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_z_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_anglvel_x_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_anglvel_x_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_anglvel_y_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_anglvel_y_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_anglvel_z_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_anglvel_z_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_magn_x_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_magn_x_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_magn_y_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_magn_y_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_magn_z_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_magn_z_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_voltageY_supply_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_voltageY_supply_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_voltageY_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_voltageY_thresh_falling_en
+What: /sys/.../iio:deviceX/events/in_tempY_thresh_rising_en
+What: /sys/.../iio:deviceX/events/in_tempY_thresh_falling_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Event generated when channel passes a threshold in the specified
(_rising|_falling) direction. If the direction is not specified,
then either the device will report an event which ever direction
- a single threshold value is called in (e.g.
- <type>[Z][_name]_<raw|input>_thresh_value) or
- <type>[Z][_name]_<raw|input>_thresh_rising_value and
- <type>[Z][_name]_<raw|input>_thresh_falling_value may take
+ a single threshold value is passed in (e.g.
+ <type>[Y][_name]_<raw|input>_thresh_value) or
+ <type>[Y][_name]_<raw|input>_thresh_rising_value and
+ <type>[Y][_name]_<raw|input>_thresh_falling_value may take
different values, but the device can only enable both thresholds
or neither.
Note the driver will assume the last p events requested are
@@ -369,30 +377,30 @@ Description:
a given event type is enabled a future point (and not those for
whatever event was previously enabled).
-What: /sys/.../deviceX:eventY/accel_x_roc_rising_en
-What: /sys/.../deviceX:eventY/accel_x_roc_falling_en
-What: /sys/.../deviceX:eventY/accel_y_roc_rising_en
-What: /sys/.../deviceX:eventY/accel_y_roc_falling_en
-What: /sys/.../deviceX:eventY/accel_z_roc_rising_en
-What: /sys/.../deviceX:eventY/accel_z_roc_falling_en
-What: /sys/.../deviceX:eventY/gyro_x_roc_rising_en
-What: /sys/.../deviceX:eventY/gyro_x_roc_falling_en
-What: /sys/.../deviceX:eventY/gyro_y_roc_rising_en
-What: /sys/.../deviceX:eventY/gyro_y_roc_falling_en
-What: /sys/.../deviceX:eventY/gyro_z_roc_rising_en
-What: /sys/.../deviceX:eventY/gyro_z_roc_falling_en
-What: /sys/.../deviceX:eventY/magn_x_roc_rising_en
-What: /sys/.../deviceX:eventY/magn_x_roc_falling_en
-What: /sys/.../deviceX:eventY/magn_y_roc_rising_en
-What: /sys/.../deviceX:eventY/magn_y_roc_falling_en
-What: /sys/.../deviceX:eventY/magn_z_roc_rising_en
-What: /sys/.../deviceX:eventY/magn_z_roc_falling_en
-What: /sys/.../deviceX:eventY/inZ_supply_roc_rising_en
-What: /sys/.../deviceX:eventY/inZ_supply_roc_falling_en
-What: /sys/.../deviceX:eventY/inZ_roc_rising_en
-What: /sys/.../deviceX:eventY/inZ_roc_falling_en
-What: /sys/.../deviceX:eventY/temp_roc_rising_en
-What: /sys/.../deviceX:eventY/temp_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_x_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_x_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_y_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_y_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_z_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_z_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_anglvel_x_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_anglvel_x_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_anglvel_y_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_anglvel_y_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_anglvel_z_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_anglvel_z_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_magn_x_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_magn_x_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_magn_y_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_magn_y_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_magn_z_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_magn_z_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_voltageY_supply_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_voltageY_supply_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_voltageY_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_voltageY_roc_falling_en
+What: /sys/.../iio:deviceX/events/in_tempY_roc_rising_en
+What: /sys/.../iio:deviceX/events/in_tempY_roc_falling_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -400,10 +408,10 @@ Description:
change (1st differential) in the specified (_rising|_falling)
direction. If the direction is not specified, then either the
device will report an event which ever direction a single
- threshold value is called in (e.g.
- <type>[Z][_name]_<raw|input>_roc_value) or
- <type>[Z][_name]_<raw|input>_roc_rising_value and
- <type>[Z][_name]_<raw|input>_roc_falling_value may take
+ threshold value is passed in (e.g.
+ <type>[Y][_name]_<raw|input>_roc_value) or
+ <type>[Y][_name]_<raw|input>_roc_rising_value and
+ <type>[Y][_name]_<raw|input>_roc_falling_value may take
different values, but the device can only enable both rate of
change thresholds or neither.
Note the driver will assume the last p events requested are
@@ -415,73 +423,73 @@ Description:
a given event type is enabled a future point (and not those for
whatever event was previously enabled).
-What: /sys/.../deviceX:eventY/accel_x_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/accel_x_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/accel_y_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/accel_y_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/accel_z_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/accel_z_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/gyro_x_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/gyro_x_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/gyro_y_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/gyro_y_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/gyro_z_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/gyro_z_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/magn_x_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/magn_x_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/magn_y_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/magn_y_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/magn_z_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/magn_z_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/inZ_supply_raw_thresh_rising_value
-What: /sys/.../deviceX:eventY/inZ_supply_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/inZ_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/inZ_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/temp_raw_thresh_falling_value
-What: /sys/.../deviceX:eventY/temp_raw_thresh_falling_value
+What: /sys/.../events/in_accel_x_raw_thresh_rising_value
+What: /sys/.../events/in_accel_x_raw_thresh_falling_value
+What: /sys/.../events/in_accel_y_raw_thresh_rising_value
+What: /sys/.../events/in_accel_y_raw_thresh_falling_value
+What: /sys/.../events/in_accel_z_raw_thresh_rising_value
+What: /sys/.../events/in_accel_z_raw_thresh_falling_value
+What: /sys/.../events/in_anglvel_x_raw_thresh_rising_value
+What: /sys/.../events/in_anglvel_x_raw_thresh_falling_value
+What: /sys/.../events/in_anglvel_y_raw_thresh_rising_value
+What: /sys/.../events/in_anglvel_y_raw_thresh_falling_value
+What: /sys/.../events/in_anglvel_z_raw_thresh_rising_value
+What: /sys/.../events/in_anglvel_z_raw_thresh_falling_value
+What: /sys/.../events/in_magn_x_raw_thresh_rising_value
+What: /sys/.../events/in_magn_x_raw_thresh_falling_value
+What: /sys/.../events/in_magn_y_raw_thresh_rising_value
+What: /sys/.../events/in_magn_y_raw_thresh_falling_value
+What: /sys/.../events/in_magn_z_raw_thresh_rising_value
+What: /sys/.../events/in_magn_z_raw_thresh_falling_value
+What: /sys/.../events/in_voltageY_supply_raw_thresh_rising_value
+What: /sys/.../events/in_voltageY_supply_raw_thresh_falling_value
+What: /sys/.../events/in_voltageY_raw_thresh_falling_value
+What: /sys/.../events/in_voltageY_raw_thresh_falling_value
+What: /sys/.../events/in_tempY_raw_thresh_falling_value
+What: /sys/.../events/in_tempY_raw_thresh_falling_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Specifies the value of threshold that the device is comparing
against for the events enabled by
- <type>Z[_name]_thresh[_rising|falling]_en.
- If separate attributes exist for the two directions, but
+ <type>Y[_name]_thresh[_rising|falling]_en.
+ If separate attributes exist for the two directions, but
direction is not specified for this attribute, then a single
threshold value applies to both directions.
The raw or input element of the name indicates whether the
value is in raw device units or in processed units (as _raw
and _input do on sysfs direct channel read attributes).
-What: /sys/.../deviceX:eventY/accel_x_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/accel_x_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/accel_y_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/accel_y_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/accel_z_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/accel_z_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/gyro_x_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/gyro_x_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/gyro_y_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/gyro_y_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/gyro_z_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/gyro_z_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/magn_x_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/magn_x_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/magn_y_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/magn_y_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/magn_z_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/magn_z_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/inZ_supply_raw_roc_rising_value
-What: /sys/.../deviceX:eventY/inZ_supply_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/inZ_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/inZ_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/temp_raw_roc_falling_value
-What: /sys/.../deviceX:eventY/temp_raw_roc_falling_value
+What: /sys/.../events/in_accel_x_raw_roc_rising_value
+What: /sys/.../events/in_accel_x_raw_roc_falling_value
+What: /sys/.../events/in_accel_y_raw_roc_rising_value
+What: /sys/.../events/in_accel_y_raw_roc_falling_value
+What: /sys/.../events/in_accel_z_raw_roc_rising_value
+What: /sys/.../events/in_accel_z_raw_roc_falling_value
+What: /sys/.../events/in_anglvel_x_raw_roc_rising_value
+What: /sys/.../events/in_anglvel_x_raw_roc_falling_value
+What: /sys/.../events/in_anglvel_y_raw_roc_rising_value
+What: /sys/.../events/in_anglvel_y_raw_roc_falling_value
+What: /sys/.../events/in_anglvel_z_raw_roc_rising_value
+What: /sys/.../events/in_anglvel_z_raw_roc_falling_value
+What: /sys/.../events/in_magn_x_raw_roc_rising_value
+What: /sys/.../events/in_magn_x_raw_roc_falling_value
+What: /sys/.../events/in_magn_y_raw_roc_rising_value
+What: /sys/.../events/in_magn_y_raw_roc_falling_value
+What: /sys/.../events/in_magn_z_raw_roc_rising_value
+What: /sys/.../events/in_magn_z_raw_roc_falling_value
+What: /sys/.../events/in_voltageY_supply_raw_roc_rising_value
+What: /sys/.../events/in_voltageY_supply_raw_roc_falling_value
+What: /sys/.../events/in_voltageY_raw_roc_falling_value
+What: /sys/.../events/in_voltageY_raw_roc_falling_value
+What: /sys/.../events/in_tempY_raw_roc_falling_value
+What: /sys/.../events/in_tempY_raw_roc_falling_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Specifies the value of rate of change threshold that the
device is comparing against for the events enabled by
- <type>[Z][_name]_roc[_rising|falling]_en.
+ <type>[Y][_name]_roc[_rising|falling]_en.
If separate attributes exist for the two directions,
but direction is not specified for this attribute,
then a single threshold value applies to both directions.
@@ -489,55 +497,55 @@ Description:
value is in raw device units or in processed units (as _raw
and _input do on sysfs direct channel read attributes).
-What: /sys/.../deviceX:eventY/accel_x_thresh_rising_period
-What: /sys/.../deviceX:eventY/accel_x_thresh_falling_period
-hat: /sys/.../deviceX:eventY/accel_x_roc_rising_period
-What: /sys/.../deviceX:eventY/accel_x_roc_falling_period
-What: /sys/.../deviceX:eventY/accel_y_thresh_rising_period
-What: /sys/.../deviceX:eventY/accel_y_thresh_falling_period
-What: /sys/.../deviceX:eventY/accel_y_roc_rising_period
-What: /sys/.../deviceX:eventY/accel_y_roc_falling_period
-What: /sys/.../deviceX:eventY/accel_z_thresh_rising_period
-What: /sys/.../deviceX:eventY/accel_z_thresh_falling_period
-What: /sys/.../deviceX:eventY/accel_z_roc_rising_period
-What: /sys/.../deviceX:eventY/accel_z_roc_falling_period
-What: /sys/.../deviceX:eventY/gyro_x_thresh_rising_period
-What: /sys/.../deviceX:eventY/gyro_x_thresh_falling_period
-What: /sys/.../deviceX:eventY/gyro_x_roc_rising_period
-What: /sys/.../deviceX:eventY/gyro_x_roc_falling_period
-What: /sys/.../deviceX:eventY/gyro_y_thresh_rising_period
-What: /sys/.../deviceX:eventY/gyro_y_thresh_falling_period
-What: /sys/.../deviceX:eventY/gyro_y_roc_rising_period
-What: /sys/.../deviceX:eventY/gyro_y_roc_falling_period
-What: /sys/.../deviceX:eventY/gyro_z_thresh_rising_period
-What: /sys/.../deviceX:eventY/gyro_z_thresh_falling_period
-What: /sys/.../deviceX:eventY/gyro_z_roc_rising_period
-What: /sys/.../deviceX:eventY/gyro_z_roc_falling_period
-What: /sys/.../deviceX:eventY/magn_x_thresh_rising_period
-What: /sys/.../deviceX:eventY/magn_x_thresh_falling_period
-What: /sys/.../deviceX:eventY/magn_x_roc_rising_period
-What: /sys/.../deviceX:eventY/magn_x_roc_falling_period
-What: /sys/.../deviceX:eventY/magn_y_thresh_rising_period
-What: /sys/.../deviceX:eventY/magn_y_thresh_falling_period
-What: /sys/.../deviceX:eventY/magn_y_roc_rising_period
-What: /sys/.../deviceX:eventY/magn_y_roc_falling_period
-What: /sys/.../deviceX:eventY/magn_z_thresh_rising_period
-What: /sys/.../deviceX:eventY/magn_z_thresh_falling_period
-What: /sys/.../deviceX:eventY/magn_z_roc_rising_period
-What: /sys/.../deviceX:eventY/magn_z_roc_falling_period
-What: /sys/.../deviceX:eventY/inZ_supply_thresh_rising_period
-What: /sys/.../deviceX:eventY/inZ_supply_thresh_falling_period
-What: /sys/.../deviceX:eventY/inz_supply_roc_rising_period
-What: /sys/.../deviceX:eventY/inZ_supply_roc_falling_period
-What: /sys/.../deviceX:eventY/inZ_thresh_rising_period
-What: /sys/.../deviceX:eventY/inZ_thresh_falling_period
-What: /sys/.../deviceX:eventY/inZ_roc_rising_period
-What: /sys/.../deviceX:eventY/inZ_roc_falling_period
-What: /sys/.../deviceX:eventY/temp_thresh_rising_period
-What: /sys/.../deviceX:eventY/temp_thresh_falling_period
-What: /sys/.../deviceX:eventY/temp_roc_rising_period
-What: /sys/.../deviceX:eventY/temp_roc_falling_period
-What: /sys/.../deviceX:eventY/accel_x&y&z_mag_falling_period
+What: /sys/.../events/in_accel_x_thresh_rising_period
+What: /sys/.../events/in_accel_x_thresh_falling_period
+hat: /sys/.../events/in_accel_x_roc_rising_period
+What: /sys/.../events/in_accel_x_roc_falling_period
+What: /sys/.../events/in_accel_y_thresh_rising_period
+What: /sys/.../events/in_accel_y_thresh_falling_period
+What: /sys/.../events/in_accel_y_roc_rising_period
+What: /sys/.../events/in_accel_y_roc_falling_period
+What: /sys/.../events/in_accel_z_thresh_rising_period
+What: /sys/.../events/in_accel_z_thresh_falling_period
+What: /sys/.../events/in_accel_z_roc_rising_period
+What: /sys/.../events/in_accel_z_roc_falling_period
+What: /sys/.../events/in_anglvel_x_thresh_rising_period
+What: /sys/.../events/in_anglvel_x_thresh_falling_period
+What: /sys/.../events/in_anglvel_x_roc_rising_period
+What: /sys/.../events/in_anglvel_x_roc_falling_period
+What: /sys/.../events/in_anglvel_y_thresh_rising_period
+What: /sys/.../events/in_anglvel_y_thresh_falling_period
+What: /sys/.../events/in_anglvel_y_roc_rising_period
+What: /sys/.../events/in_anglvel_y_roc_falling_period
+What: /sys/.../events/in_anglvel_z_thresh_rising_period
+What: /sys/.../events/in_anglvel_z_thresh_falling_period
+What: /sys/.../events/in_anglvel_z_roc_rising_period
+What: /sys/.../events/in_anglvel_z_roc_falling_period
+What: /sys/.../events/in_magn_x_thresh_rising_period
+What: /sys/.../events/in_magn_x_thresh_falling_period
+What: /sys/.../events/in_magn_x_roc_rising_period
+What: /sys/.../events/in_magn_x_roc_falling_period
+What: /sys/.../events/in_magn_y_thresh_rising_period
+What: /sys/.../events/in_magn_y_thresh_falling_period
+What: /sys/.../events/in_magn_y_roc_rising_period
+What: /sys/.../events/in_magn_y_roc_falling_period
+What: /sys/.../events/in_magn_z_thresh_rising_period
+What: /sys/.../events/in_magn_z_thresh_falling_period
+What: /sys/.../events/in_magn_z_roc_rising_period
+What: /sys/.../events/in_magn_z_roc_falling_period
+What: /sys/.../events/in_voltageY_supply_thresh_rising_period
+What: /sys/.../events/in_voltageY_supply_thresh_falling_period
+What: /sys/.../events/in_voltageY_supply_roc_rising_period
+What: /sys/.../events/in_voltageY_supply_roc_falling_period
+What: /sys/.../events/in_voltageY_thresh_rising_period
+What: /sys/.../events/in_voltageY_thresh_falling_period
+What: /sys/.../events/in_voltageY_roc_rising_period
+What: /sys/.../events/in_voltageY_roc_falling_period
+What: /sys/.../events/in_tempY_thresh_rising_period
+What: /sys/.../events/in_tempY_thresh_falling_period
+What: /sys/.../events/in_tempY_roc_rising_period
+What: /sys/.../events/in_tempY_roc_falling_period
+What: /sys/.../events/in_accel_x&y&z_mag_falling_period
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -545,31 +553,31 @@ Description:
met before an event is generated. If direction is not
specified then this period applies to both directions.
-What: /sys/.../deviceX:eventY/accel_mag_en
-What: /sys/.../deviceX:eventY/accel_mag_rising_en
-What: /sys/.../deviceX:eventY/accel_mag_falling_en
-What: /sys/.../deviceX:eventY/accel_x_mag_en
-What: /sys/.../deviceX:eventY/accel_x_mag_rising_en
-What: /sys/.../deviceX:eventY/accel_x_mag_falling_en
-What: /sys/.../deviceX:eventY/accel_y_mag_en
-What: /sys/.../deviceX:eventY/accel_y_mag_rising_en
-What: /sys/.../deviceX:eventY/accel_y_mag_falling_en
-What: /sys/.../deviceX:eventY/accel_z_mag_en
-What: /sys/.../deviceX:eventY/accel_z_mag_rising_en
-What: /sys/.../deviceX:eventY/accel_z_mag_falling_en
-What: /sys/.../deviceX:eventY/accel_x&y&z_mag_rising_en
-What: /sys/.../deviceX:eventY/accel_x&y&z_mag_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_mag_en
+What: /sys/.../iio:deviceX/events/in_accel_mag_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_mag_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_x_mag_en
+What: /sys/.../iio:deviceX/events/in_accel_x_mag_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_x_mag_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_y_mag_en
+What: /sys/.../iio:deviceX/events/in_accel_y_mag_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_y_mag_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_z_mag_en
+What: /sys/.../iio:deviceX/events/in_accel_z_mag_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_z_mag_falling_en
+What: /sys/.../iio:deviceX/events/in_accel_x&y&z_mag_rising_en
+What: /sys/.../iio:deviceX/events/in_accel_x&y&z_mag_falling_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- Similar to accel_x_thresh[_rising|_falling]_en, but here the
+ Similar to in_accel_x_thresh[_rising|_falling]_en, but here the
magnitude of the channel is compared to the threshold, not its
signed value.
-What: /sys/.../accel_raw_mag_value
-What: /sys/.../accel_x_raw_mag_rising_value
-What: /sys/.../accel_y_raw_mag_rising_value
-What: /sys/.../accel_z_raw_mag_rising_value
+What: /sys/.../events/in_accel_raw_mag_value
+What: /sys/.../events/in_accel_x_raw_mag_rising_value
+What: /sys/.../events/in_accel_y_raw_mag_rising_value
+What: /sys/.../events/in_accel_z_raw_mag_rising_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -577,86 +585,75 @@ Description:
number or direction is not specified, applies to all channels of
this type.
-What: /sys/bus/iio/devices/deviceX:buffer:event/dev
-KernelVersion: 2.6.35
-Contact: linux-iio@vger.kernel.org
-Description:
- Buffer for device X event character device major:minor numbers.
-
-What: /sys/bus/iio/devices/deviceX:buffer:access/dev
-KernelVersion: 2.6.35
-Contact: linux-iio@vger.kernel.org
-Description:
- Buffer for device X access character device major:minor numbers.
-
-What: /sys/bus/iio/devices/deviceX:buffer/trigger
+What: /sys/bus/iio/devices/iio:deviceX/trigger/current_trigger
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
The name of the trigger source being used, as per string given
in /sys/class/iio/triggerY/name.
-What: /sys/bus/iio/devices/deviceX:buffer/length
+What: /sys/bus/iio/devices/iio:deviceX/buffer/length
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Number of scans contained by the buffer.
-What: /sys/bus/iio/devices/deviceX:buffer/bytes_per_datum
+What: /sys/bus/iio/devices/iio:deviceX/buffer/bytes_per_datum
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Bytes per scan. Due to alignment fun, the scan may be larger
than implied directly by the scan_element parameters.
-What: /sys/bus/iio/devices/deviceX:buffer/enable
+What: /sys/bus/iio/devices/iio:deviceX/buffer/enable
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Actually start the buffer capture up. Will start trigger
if first device and appropriate.
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements
+What: /sys/bus/iio/devices/iio:deviceX/buffer/scan_elements
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Directory containing interfaces for elements that will be
captured for a single triggered sample set in the buffer.
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_x_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_y_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_z_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_x_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_y_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_z_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_x_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_y_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_z_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/timestamp_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_supply_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY-inZ_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_x_en
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_y_en
+What: /sys/.../buffer/scan_elements/in_accel_x_en
+What: /sys/.../buffer/scan_elements/in_accel_y_en
+What: /sys/.../buffer/scan_elements/in_accel_z_en
+What: /sys/.../buffer/scan_elements/in_anglvel_x_en
+What: /sys/.../buffer/scan_elements/in_anglvel_y_en
+What: /sys/.../buffer/scan_elements/in_anglvel_z_en
+What: /sys/.../buffer/scan_elements/in_magn_x_en
+What: /sys/.../buffer/scan_elements/in_magn_y_en
+What: /sys/.../buffer/scan_elements/in_magn_z_en
+What: /sys/.../buffer/scan_elements/in_timestamp_en
+What: /sys/.../buffer/scan_elements/in_voltageY_supply_en
+What: /sys/.../buffer/scan_elements/in_voltageY_en
+What: /sys/.../buffer/scan_elements/in_voltageY-voltageZ_en
+What: /sys/.../buffer/scan_elements/in_incli_x_en
+What: /sys/.../buffer/scan_elements/in_incli_y_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Scan element control for triggered data capture.
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_type
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_type
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_type
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_type
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_type
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/in-in_type
-What: /sys/.../deviceX:buffer/scan_elements/inY_supply_type
-What: /sys/.../deviceX:buffer/scan_elements/timestamp_type
+What: /sys/.../buffer/scan_elements/in_accel_type
+What: /sys/.../buffer/scan_elements/in_anglvel_type
+What: /sys/.../buffer/scan_elements/in_magn_type
+What: /sys/.../buffer/scan_elements/in_incli_type
+What: /sys/.../buffer/scan_elements/in_voltageY_type
+What: /sys/.../buffer/scan_elements/in_voltage-in_type
+What: /sys/.../buffer/scan_elements/in_voltageY_supply_type
+What: /sys/.../buffer/scan_elements/in_timestamp_type
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Description of the scan element data storage within the buffer
and hence the form in which it is read from user-space.
- Form is [s|u]bits/storagebits[>>shift]. s or u specifies if
+ Form is [be|le]:[s|u]bits/storagebits[>>shift].
+ be or le specifies big or little endian. s or u specifies if
signed (2's complement) or unsigned. bits is the number of bits
of data and storagebits is the space (after padding) that it
occupies in the buffer. shift if specified, is the shift that
@@ -673,27 +670,27 @@ Description:
For other storage combinations this attribute will be extended
appropriately.
-What: /sys/.../deviceX:buffer/scan_elements/accel_type_available
+What: /sys/.../buffer/scan_elements/in_accel_type_available
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
If the type parameter can take one of a small set of values,
this attribute lists them.
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_index
-What: /sys/.../deviceX:buffer/scan_elements/inY_supply_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_x_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_y_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_z_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_x_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_y_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_z_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_x_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_y_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_z_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_x_index
-What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_y_index
-What: /sys/.../deviceX:buffer/scan_elements/timestamp_index
+What: /sys/.../buffer/scan_elements/in_voltageY_index
+What: /sys/.../buffer/scan_elements/in_voltageY_supply_index
+What: /sys/.../buffer/scan_elements/in_accel_x_index
+What: /sys/.../buffer/scan_elements/in_accel_y_index
+What: /sys/.../buffer/scan_elements/in_accel_z_index
+What: /sys/.../buffer/scan_elements/in_anglvel_x_index
+What: /sys/.../buffer/scan_elements/in_anglvel_y_index
+What: /sys/.../buffer/scan_elements/in_anglvel_z_index
+What: /sys/.../buffer/scan_elements/in_magn_x_index
+What: /sys/.../buffer/scan_elements/in_magn_y_index
+What: /sys/.../buffer/scan_elements/in_magn_z_index
+What: /sys/.../buffer/scan_elements/in_incli_x_index
+What: /sys/.../buffer/scan_elements/in_incli_y_index
+What: /sys/.../buffer/scan_elements/in_timestamp_index
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -705,9 +702,30 @@ Description:
and the relevant _type attributes to establish the data storage
format.
-What: /sys/bus/iio/devices/deviceX/gyro_z_quadrature_correction_raw
+What: /sys/.../iio:deviceX/in_anglvel_z_quadrature_correction_raw
KernelVersion: 2.6.38
-Contact: linux-iio@xxxxxxxxxxxxxxx
+Contact: linux-iio@vger.kernel.org
Description:
This attribute is used to read the amount of quadrature error
present in the device at a given time.
+
+What: /sys/.../iio:deviceX/ac_excitation_en
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, is used to enable the AC
+ excitation mode found on some converters. In ac excitation mode,
+ the polarity of the excitation voltage is reversed on
+ alternate cycles, to eliminate DC errors.
+
+What: /sys/.../iio:deviceX/bridge_switch_en
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, is used to close or open the
+ bridge power down switch found on some converters.
+ In bridge applications, such as strain gauges and load cells,
+ the bridge itself consumes the majority of the current in the
+ system. To minimize the current consumption of the system,
+ the bridge can be disconnected (when it is not being used
+ using the bridge_switch_en attribute.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-adc-ad7280a b/drivers/staging/iio/Documentation/sysfs-bus-iio-adc-ad7280a
new file mode 100644
index 00000000000..863d3856718
--- /dev/null
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-adc-ad7280a
@@ -0,0 +1,21 @@
+What: /sys/bus/iio/devices/deviceX/inY-inZ_balance_switch_en
+KernelVersion: 3.0.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing 1 enables the cell balance output switch corresponding
+ to input Y. Writing 0 disables it. If the inY-inZ_balance_timer
+ is set to a none zero value, the corresponding switch will
+ enable for the programmed amount of time, before it
+ automatically disables.
+
+What: /sys/bus/iio/devices/deviceX/inY-inZ_balance_timer
+KernelVersion: 3.0.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ The inY-inZ_balance_timer file allows the user to program
+ individual times for each cell balance output. The AD7280A
+ allows the user to set the timer to a value from 0 minutes to
+ 36.9 minutes. The resolution of the timer is 71.5 sec.
+ The value written is the on-time in milliseconds. When the
+ timer value is set 0, the timer is disabled. The cell balance
+ outputs are controlled only by inY-inZ_balance_switch_en.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933 b/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
new file mode 100644
index 00000000000..79c7e88c64c
--- /dev/null
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
@@ -0,0 +1,30 @@
+What: /sys/bus/iio/devices/iio:deviceX/outY_freq_start
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ Frequency sweep start frequency in Hz.
+
+What: /sys/bus/iio/devices/iio:deviceX/outY_freq_increment
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ Frequency increment in Hz (step size) between consecutive
+ frequency points along the sweep.
+
+What: /sys/bus/iio/devices/iio:deviceX/outY_freq_points
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ Number of frequency points (steps) in the frequency sweep.
+ This value, in conjunction with the outY_freq_start and the
+ outY_freq_increment, determines the frequency sweep range
+ for the sweep operation.
+
+What: /sys/bus/iio/devices/iio:deviceX/outY_settling_cycles
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ Number of output excitation cycles (settling time cycles)
+ that are allowed to pass through the unknown impedance,
+ after each frequency increment, and before the ADC is triggered
+ to perform a conversion sequence of the response signal.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index d329635fb5c..4ec9118955f 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -12,13 +12,13 @@ menuconfig IIO
drivers/staging/iio/Documentation for more information.
if IIO
-config IIO_RING_BUFFER
+config IIO_BUFFER
bool "Enable buffer support within IIO"
help
Provide core support for various buffer based data
acquisition methods.
-if IIO_RING_BUFFER
+if IIO_BUFFER
config IIO_SW_RING
select IIO_TRIGGER
@@ -38,7 +38,7 @@ config IIO_KFIFO_BUF
no buffer events so it is up to userspace to work out how
often to read from the buffer.
-endif # IIO_RINGBUFFER
+endif # IIO_BUFFER
config IIO_TRIGGER
boolean "Enable triggered sampling support"
@@ -59,9 +59,11 @@ config IIO_CONSUMERS_PER_TRIGGER
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
+source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/dac/Kconfig"
source "drivers/staging/iio/dds/Kconfig"
source "drivers/staging/iio/gyro/Kconfig"
+source "drivers/staging/iio/impedance-analyzer/Kconfig"
source "drivers/staging/iio/imu/Kconfig"
source "drivers/staging/iio/light/Kconfig"
source "drivers/staging/iio/magnetometer/Kconfig"
@@ -69,4 +71,31 @@ source "drivers/staging/iio/meter/Kconfig"
source "drivers/staging/iio/resolver/Kconfig"
source "drivers/staging/iio/trigger/Kconfig"
+config IIO_DUMMY_EVGEN
+ tristate
+
+config IIO_SIMPLE_DUMMY
+ tristate "An example driver with no hardware requirements"
+ select IIO_SIMPLE_DUMMY_EVGEN if IIO_SIMPLE_DUMMY_EVENTS
+ help
+ Driver intended mainly as documentation for how to write
+ a driver. May also be useful for testing userspace code
+ without hardward.
+
+if IIO_SIMPLE_DUMMY
+
+config IIO_SIMPLE_DUMMY_EVENTS
+ boolean "Event generation support"
+ select IIO_DUMMY_EVGEN
+ help
+ Add some dummy events to the simple dummy driver.
+
+config IIO_SIMPLE_DUMMY_BUFFER
+ boolean "Buffered capture support"
+ depends on IIO_KFIFO_BUF
+ help
+ Add buffered data capture to the simple dummy driver.
+
+endif # IIO_SIMPLE_DUMMY
+
endif # IIO
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index bb5c95c7d69..1340aead18b 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -4,18 +4,27 @@
obj-$(CONFIG_IIO) += industrialio.o
industrialio-y := industrialio-core.o
-industrialio-$(CONFIG_IIO_RING_BUFFER) += industrialio-ring.o
+industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
obj-$(CONFIG_IIO_SW_RING) += ring_sw.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
+obj-$(CONFIG_IIO_SIMPLE_DUMMY) += iio_dummy.o
+iio_dummy-y := iio_simple_dummy.o
+iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_EVENTS) += iio_simple_dummy_events.o
+iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_BUFFER) += iio_simple_dummy_buffer.o
+
+obj-$(CONFIG_IIO_DUMMY_EVGEN) += iio_dummy_evgen.o
+
obj-y += accel/
obj-y += adc/
obj-y += addac/
+obj-y += cdc/
obj-y += dac/
obj-y += dds/
obj-y += gyro/
+obj-y += impedance-analyzer/
obj-y += imu/
obj-y += light/
obj-y += magnetometer/
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 81a33b60512..5ab71670b70 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -1,13 +1,13 @@
#
# Accelerometer drivers
#
-comment "Accelerometers"
+menu "Accelerometers"
config ADIS16201
tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices adis16201 dual-axis
digital inclinometer and accelerometer.
@@ -15,8 +15,8 @@ config ADIS16201
config ADIS16203
tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices adis16203 Programmable
360 Degrees Inclinometer.
@@ -24,8 +24,8 @@ config ADIS16203
config ADIS16204
tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices adis16204 Programmable
High-g Digital Impact Sensor and Recorder.
@@ -33,8 +33,8 @@ config ADIS16204
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices adis16209 dual-axis digital inclinometer
and accelerometer.
@@ -49,8 +49,8 @@ config ADIS16220
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices adis16240 programmable
impact Sensor and recorder.
@@ -65,8 +65,9 @@ config KXSD9
config LIS3L02DQ
tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- depends on !IIO_RING_BUFFER || IIO_KFIFO_BUF || IIO_SW_RING
+ select IIO_TRIGGER if IIO_BUFFER
+ depends on !IIO_BUFFER || IIO_KFIFO_BUF || IIO_SW_RING
+ depends on GENERIC_GPIO
help
Say yes here to build SPI support for the ST microelectronics
accelerometer. The driver supplies direct access via sysfs files
@@ -74,7 +75,7 @@ config LIS3L02DQ
choice
prompt "Buffer type"
- depends on LIS3L02DQ && IIO_RING_BUFFER
+ depends on LIS3L02DQ && IIO_BUFFER
config LIS3L02DQ_BUF_KFIFO
depends on IIO_KFIFO_BUF
@@ -94,9 +95,11 @@ config LIS3L02DQ_BUF_RING_SW
endchoice
config SCA3000
- depends on IIO_RING_BUFFER
+ depends on IIO_BUFFER
depends on SPI
tristate "VTI SCA3000 series accelerometers"
help
Say yes here to build support for the VTI SCA3000 series of SPI
accelerometers. These devices use a hardware ring buffer.
+
+endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1b2a6d3ddaf..95c66661e70 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -3,32 +3,32 @@
#
adis16201-y := adis16201_core.o
-adis16201-$(CONFIG_IIO_RING_BUFFER) += adis16201_ring.o adis16201_trigger.o
+adis16201-$(CONFIG_IIO_BUFFER) += adis16201_ring.o adis16201_trigger.o
obj-$(CONFIG_ADIS16201) += adis16201.o
adis16203-y := adis16203_core.o
-adis16203-$(CONFIG_IIO_RING_BUFFER) += adis16203_ring.o adis16203_trigger.o
+adis16203-$(CONFIG_IIO_BUFFER) += adis16203_ring.o adis16203_trigger.o
obj-$(CONFIG_ADIS16203) += adis16203.o
adis16204-y := adis16204_core.o
-adis16204-$(CONFIG_IIO_RING_BUFFER) += adis16204_ring.o adis16204_trigger.o
+adis16204-$(CONFIG_IIO_BUFFER) += adis16204_ring.o adis16204_trigger.o
obj-$(CONFIG_ADIS16204) += adis16204.o
adis16209-y := adis16209_core.o
-adis16209-$(CONFIG_IIO_RING_BUFFER) += adis16209_ring.o adis16209_trigger.o
+adis16209-$(CONFIG_IIO_BUFFER) += adis16209_ring.o adis16209_trigger.o
obj-$(CONFIG_ADIS16209) += adis16209.o
adis16220-y := adis16220_core.o
obj-$(CONFIG_ADIS16220) += adis16220.o
adis16240-y := adis16240_core.o
-adis16240-$(CONFIG_IIO_RING_BUFFER) += adis16240_ring.o adis16240_trigger.o
+adis16240-$(CONFIG_IIO_BUFFER) += adis16240_ring.o adis16240_trigger.o
obj-$(CONFIG_ADIS16240) += adis16240.o
obj-$(CONFIG_KXSD9) += kxsd9.o
lis3l02dq-y := lis3l02dq_core.o
-lis3l02dq-$(CONFIG_IIO_RING_BUFFER) += lis3l02dq_ring.o
+lis3l02dq-$(CONFIG_IIO_BUFFER) += lis3l02dq_ring.o
obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
sca3000-y := sca3000_core.o sca3000_ring.o
diff --git a/drivers/staging/iio/accel/accel.h b/drivers/staging/iio/accel/accel.h
deleted file mode 100644
index 50651f835ce..00000000000
--- a/drivers/staging/iio/accel/accel.h
+++ /dev/null
@@ -1,87 +0,0 @@
-
-#include "../sysfs.h"
-
-/* Accelerometer types of attribute */
-#define IIO_DEV_ATTR_ACCEL_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_X_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_x_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Y_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_y_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Z_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_z_offset, _mode, _show, _store, _addr)
-
-#define IIO_CONST_ATTR_ACCEL_SCALE(_string) \
- IIO_CONST_ATTR(accel_scale, _string)
-
-#define IIO_DEV_ATTR_ACCEL_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_X_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_x_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Y_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_y_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Z_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_z_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_X_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_x_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Y_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_y_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Z_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_z_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_X_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_x_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Y_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_y_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Z_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(accel_z_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACCEL(_show, _addr) \
- IIO_DEVICE_ATTR(accel_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_X(_show, _addr) \
- IIO_DEVICE_ATTR(accel_x_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Y(_show, _addr) \
- IIO_DEVICE_ATTR(accel_y_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_Z(_show, _addr) \
- IIO_DEVICE_ATTR(accel_z_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_XY(_show, _addr) \
- IIO_DEVICE_ATTR(accel_xy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_PEAK(_show, _addr) \
- IIO_DEVICE_ATTR(accel_peak, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_XPEAK(_show, _addr) \
- IIO_DEVICE_ATTR(accel_xpeak, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_YPEAK(_show, _addr) \
- IIO_DEVICE_ATTR(accel_ypeak, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_ZPEAK(_show, _addr) \
- IIO_DEVICE_ATTR(accel_zpeak, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_XYPEAK(_show, _addr) \
- IIO_DEVICE_ATTR(accel_xypeak, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ACCEL_XYZPEAK(_show, _addr) \
- IIO_DEVICE_ATTR(accel_xyzpeak, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index dac5540b5a8..72750f7f3a8 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -89,7 +89,7 @@ enum adis16201_scan {
ADIS16201_SCAN_INCLI_Y,
};
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16201_remove_trigger(struct iio_dev *indio_dev);
int adis16201_probe_trigger(struct iio_dev *indio_dev);
@@ -100,7 +100,7 @@ ssize_t adis16201_read_data_from_ring(struct device *dev,
int adis16201_configure_ring(struct iio_dev *indio_dev);
void adis16201_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16201_remove_trigger(struct iio_dev *indio_dev)
{
@@ -137,5 +137,5 @@ static inline void adis16201_uninitialize_ring(struct iio_ring_buffer *ring)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16201_H_ */
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 2fd01aecdf9..1c5dad53784 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -13,14 +13,11 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-
-#include "accel.h"
-#include "inclinometer.h"
-#include "../adc/adc.h"
+#include "../buffer_generic.h"
#include "adis16201.h"
@@ -328,7 +325,7 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
switch (chan->type) {
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 1220;
@@ -410,7 +407,7 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
}
static struct iio_chan_spec adis16201_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_supply, ADIS16201_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
@@ -429,7 +426,7 @@ static struct iio_chan_spec adis16201_channels[] = {
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
accel_y, ADIS16201_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_aux, ADIS16201_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
@@ -466,7 +463,7 @@ static const struct iio_info adis16201_info = {
static int __devinit adis16201_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16201_state *st;
struct iio_dev *indio_dev;
@@ -495,14 +492,9 @@ static int __devinit adis16201_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- adis16201_channels,
- ARRAY_SIZE(adis16201_channels));
+ ret = iio_buffer_register(indio_dev,
+ adis16201_channels,
+ ARRAY_SIZE(adis16201_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -518,19 +510,20 @@ static int __devinit adis16201_probe(struct spi_device *spi)
ret = adis16201_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_remove_trigger;
return 0;
error_remove_trigger:
adis16201_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16201_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -539,10 +532,11 @@ static int adis16201_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
- adis16201_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
+ adis16201_remove_trigger(indio_dev);
+ iio_buffer_unregister(indio_dev);
adis16201_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 66e708ddf8b..dbd883294d6 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -1,17 +1,12 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16201.h"
@@ -38,10 +33,12 @@ static int adis16201_read_ring_data(struct iio_dev *indio_dev, u8 *rx)
xfers[i].cs_change = 1;
xfers[i].len = 2;
xfers[i].delay_usecs = 20;
- xfers[i].tx_buf = st->tx + 2 * i;
- st->tx[2 * i] = ADIS16201_READ_REG(ADIS16201_SUPPLY_OUT +
- 2 * i);
- st->tx[2 * i + 1] = 0;
+ if (i < ADIS16201_OUTPUTS) {
+ xfers[i].tx_buf = st->tx + 2 * i;
+ st->tx[2 * i] = ADIS16201_READ_REG(ADIS16201_SUPPLY_OUT +
+ 2 * i);
+ st->tx[2 * i + 1] = 0;
+ }
if (i >= 1)
xfers[i].rx_buf = rx + 2 * (i - 1);
spi_message_add_tail(&xfers[i], &msg);
@@ -62,9 +59,9 @@ static int adis16201_read_ring_data(struct iio_dev *indio_dev, u8 *rx)
static irqreturn_t adis16201_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16201_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
@@ -97,26 +94,26 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
void adis16201_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16201_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16201_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16201_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->bpe = 2;
ring->scan_timestamp = true;
@@ -124,15 +121,6 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
ring->setup_ops = &adis16201_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- iio_scan_mask_set(ring, ADIS16201_SCAN_SUPPLY);
- iio_scan_mask_set(ring, ADIS16201_SCAN_ACC_X);
- iio_scan_mask_set(ring, ADIS16201_SCAN_ACC_Y);
- iio_scan_mask_set(ring, ADIS16201_SCAN_AUX_ADC);
- iio_scan_mask_set(ring, ADIS16201_SCAN_TEMP);
- iio_scan_mask_set(ring, ADIS16201_SCAN_INCLI_X);
- iio_scan_mask_set(ring, ADIS16201_SCAN_INCLI_Y);
-
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16201_trigger_handler,
IRQF_ONESHOT,
@@ -144,9 +132,9 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/adis16201_trigger.c b/drivers/staging/iio/accel/adis16201_trigger.c
index 3a95c083b45..f448258884c 100644
--- a/drivers/staging/iio/accel/adis16201_trigger.c
+++ b/drivers/staging/iio/accel/adis16201_trigger.c
@@ -1,13 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16201.h"
@@ -23,6 +18,11 @@ static int adis16201_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16201_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16201_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16201_data_rdy_trigger_set_state,
+};
+
int adis16201_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -41,9 +41,8 @@ int adis16201_probe_trigger(struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &adis16201_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16201_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 4071bc0d69a..3f96ad3bbd6 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -82,7 +82,7 @@ enum adis16203_scan {
ADIS16203_SCAN_INCLI_Y,
};
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16203_remove_trigger(struct iio_dev *indio_dev);
int adis16203_probe_trigger(struct iio_dev *indio_dev);
@@ -93,7 +93,7 @@ ssize_t adis16203_read_data_from_ring(struct device *dev,
int adis16203_configure_ring(struct iio_dev *indio_dev);
void adis16203_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16203_remove_trigger(struct iio_dev *indio_dev)
{
@@ -121,5 +121,5 @@ static inline void adis16203_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16203_H_ */
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index cf5d15da76a..8a3337442af 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -13,13 +13,11 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "accel.h"
-#include "inclinometer.h"
-#include "../ring_generic.h"
-#include "../adc/adc.h"
+#include "../buffer_generic.h"
#include "adis16203.h"
@@ -334,7 +332,7 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
switch (chan->type) {
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 1220;
@@ -375,11 +373,11 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
}
static struct iio_chan_spec adis16203_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_supply, ADIS16203_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_aux, ADIS16203_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
@@ -421,7 +419,7 @@ static const struct iio_info adis16203_info = {
static int __devinit adis16203_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct iio_dev *indio_dev;
struct adis16203_state *st;
@@ -448,14 +446,9 @@ static int __devinit adis16203_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- adis16203_channels,
- ARRAY_SIZE(adis16203_channels));
+ ret = iio_buffer_register(indio_dev,
+ adis16203_channels,
+ ARRAY_SIZE(adis16203_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -471,19 +464,21 @@ static int __devinit adis16203_probe(struct spi_device *spi)
ret = adis16203_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
adis16203_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16203_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -492,10 +487,11 @@ static int adis16203_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
- adis16203_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
+ adis16203_remove_trigger(indio_dev);
+ iio_buffer_unregister(indio_dev);
adis16203_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index d2c07c52746..838d3012c87 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -1,20 +1,12 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16203.h"
/**
@@ -67,9 +59,9 @@ static int adis16203_read_ring_data(struct device *dev, u8 *rx)
static irqreturn_t adis16203_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16203_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
@@ -104,26 +96,26 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
void adis16203_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16203_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16203_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16203_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->bpe = 2;
ring->scan_timestamp = true;
@@ -131,13 +123,6 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
ring->setup_ops = &adis16203_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- iio_scan_mask_set(ring, ADIS16203_SCAN_SUPPLY);
- iio_scan_mask_set(ring, ADIS16203_SCAN_TEMP);
- iio_scan_mask_set(ring, ADIS16203_SCAN_AUX_ADC);
- iio_scan_mask_set(ring, ADIS16203_SCAN_INCLI_X);
- iio_scan_mask_set(ring, ADIS16203_SCAN_INCLI_Y);
-
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16203_trigger_handler,
IRQF_ONESHOT,
@@ -149,10 +134,10 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/adis16203_trigger.c b/drivers/staging/iio/accel/adis16203_trigger.c
index 3caf3e8bc9d..50165f9ddc5 100644
--- a/drivers/staging/iio/accel/adis16203_trigger.c
+++ b/drivers/staging/iio/accel/adis16203_trigger.c
@@ -1,14 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16203.h"
@@ -24,6 +18,11 @@ static int adis16203_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16203_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16203_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16203_data_rdy_trigger_set_state,
+};
+
int adis16203_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -44,9 +43,8 @@ int adis16203_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &adis16203_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16203_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
index 3bb0490465f..7cf4e91f83a 100644
--- a/drivers/staging/iio/accel/adis16204.h
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -90,7 +90,7 @@ enum adis16204_scan {
ADIS16204_SCAN_TEMP,
};
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16204_remove_trigger(struct iio_dev *indio_dev);
int adis16204_probe_trigger(struct iio_dev *indio_dev);
@@ -101,7 +101,7 @@ ssize_t adis16204_read_data_from_ring(struct device *dev,
int adis16204_configure_ring(struct iio_dev *indio_dev);
void adis16204_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16204_remove_trigger(struct iio_dev *indio_dev)
{
@@ -129,5 +129,5 @@ static inline void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 3e2b62654b7..644ac8e4d2a 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,12 +16,11 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "accel.h"
-#include "../adc/adc.h"
+#include "../buffer_generic.h"
#include "adis16204.h"
@@ -299,15 +297,18 @@ static int adis16204_initial_setup(struct iio_dev *indio_dev)
err_ret:
return ret;
}
+
+/* Unique to this driver currently */
+#define IIO_DEV_ATTR_ACCEL_XY(_show, _addr) \
+ IIO_DEVICE_ATTR(in_accel_xy, S_IRUGO, _show, NULL, _addr)
+#define IIO_DEV_ATTR_ACCEL_XYPEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(in_accel_xypeak, S_IRUGO, _show, NULL, _addr)
+
static IIO_DEV_ATTR_ACCEL_XY(adis16204_read_14bit_signed,
ADIS16204_XY_RSS_OUT);
-static IIO_DEV_ATTR_ACCEL_XPEAK(adis16204_read_14bit_signed,
- ADIS16204_X_PEAK_OUT);
-static IIO_DEV_ATTR_ACCEL_YPEAK(adis16204_read_14bit_signed,
- ADIS16204_Y_PEAK_OUT);
static IIO_DEV_ATTR_ACCEL_XYPEAK(adis16204_read_14bit_signed,
ADIS16204_XY_PEAK_OUT);
-static IIO_CONST_ATTR(accel_xy_scale, "0.017125");
+static IIO_CONST_ATTR(in_accel_xy_scale, "0.017125");
static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16204_write_reset, 0);
@@ -319,13 +320,16 @@ enum adis16204_channel {
accel_y,
};
-static u8 adis16204_addresses[5][2] = {
+static u8 adis16204_addresses[5][3] = {
[in_supply] = { ADIS16204_SUPPLY_OUT },
[in_aux] = { ADIS16204_AUX_ADC },
[temp] = { ADIS16204_TEMP_OUT },
- [accel_x] = { ADIS16204_XACCL_OUT, ADIS16204_XACCL_NULL },
- [accel_y] = { ADIS16204_XACCL_OUT, ADIS16204_YACCL_NULL },
+ [accel_x] = { ADIS16204_XACCL_OUT, ADIS16204_XACCL_NULL,
+ ADIS16204_X_PEAK_OUT },
+ [accel_y] = { ADIS16204_XACCL_OUT, ADIS16204_YACCL_NULL,
+ ADIS16204_Y_PEAK_OUT },
};
+
static int adis16204_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
@@ -335,6 +339,7 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
int bits;
u8 addr;
s16 val16;
+ int addrind;
switch (mask) {
case 0:
@@ -363,7 +368,7 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
switch (chan->type) {
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 1220;
@@ -389,15 +394,16 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
*val = 25;
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
- switch (chan->type) {
- case IIO_ACCEL:
+ case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ if (mask == (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE)) {
bits = 12;
- break;
- default:
- return -EINVAL;
- };
+ addrind = 1;
+ } else { /* PEAK_SEPARATE */
+ bits = 14;
+ addrind = 2;
+ }
mutex_lock(&indio_dev->mlock);
- addr = adis16204_addresses[chan->address][1];
+ addr = adis16204_addresses[chan->address][addrind];
ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16);
if (ret) {
mutex_unlock(&indio_dev->mlock);
@@ -438,11 +444,11 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
}
static struct iio_chan_spec adis16204_channels[] = {
- IIO_CHAN(IIO_IN, 0, 0, 0, "supply", 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 0, 0, "supply", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_supply, ADIS16204_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_aux, ADIS16204_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
@@ -453,23 +459,24 @@ static struct iio_chan_spec adis16204_channels[] = {
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
accel_x, ADIS16204_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
accel_y, ADIS16204_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
};
+
static struct attribute *adis16204_attributes[] = {
&iio_dev_attr_reset.dev_attr.attr,
- &iio_dev_attr_accel_xy.dev_attr.attr,
- &iio_dev_attr_accel_xpeak.dev_attr.attr,
- &iio_dev_attr_accel_ypeak.dev_attr.attr,
- &iio_dev_attr_accel_xypeak.dev_attr.attr,
- &iio_const_attr_accel_xy_scale.dev_attr.attr,
+ &iio_dev_attr_in_accel_xy.dev_attr.attr,
+ &iio_dev_attr_in_accel_xypeak.dev_attr.attr,
+ &iio_const_attr_in_accel_xy_scale.dev_attr.attr,
NULL
};
@@ -486,7 +493,7 @@ static const struct iio_info adis16204_info = {
static int __devinit adis16204_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16204_state *st;
struct iio_dev *indio_dev;
@@ -513,14 +520,9 @@ static int __devinit adis16204_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- adis16204_channels,
- ARRAY_SIZE(adis16204_channels));
+ ret = iio_buffer_register(indio_dev,
+ adis16204_channels,
+ ARRAY_SIZE(adis16204_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -536,19 +538,20 @@ static int __devinit adis16204_probe(struct spi_device *spi)
ret = adis16204_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
adis16204_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16204_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -557,10 +560,11 @@ static int adis16204_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
- adis16204_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
+ adis16204_remove_trigger(indio_dev);
+ iio_buffer_unregister(indio_dev);
adis16204_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 852df06684d..08551bb48f1 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -1,20 +1,12 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16204.h"
/**
@@ -65,9 +57,9 @@ static int adis16204_read_ring_data(struct device *dev, u8 *rx)
static irqreturn_t adis16204_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16204_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
@@ -99,26 +91,26 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16204_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16204_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16204_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
ring->bpe = 2;
@@ -126,13 +118,6 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
ring->setup_ops = &adis16204_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- iio_scan_mask_set(ring, ADIS16204_SCAN_SUPPLY);
- iio_scan_mask_set(ring, ADIS16204_SCAN_ACC_X);
- iio_scan_mask_set(ring, ADIS16204_SCAN_ACC_Y);
- iio_scan_mask_set(ring, ADIS16204_SCAN_AUX_ADC);
- iio_scan_mask_set(ring, ADIS16204_SCAN_TEMP);
-
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16204_trigger_handler,
IRQF_ONESHOT,
@@ -145,10 +130,10 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/adis16204_trigger.c b/drivers/staging/iio/accel/adis16204_trigger.c
index 01f73b9b888..55b661c98d2 100644
--- a/drivers/staging/iio/accel/adis16204_trigger.c
+++ b/drivers/staging/iio/accel/adis16204_trigger.c
@@ -1,14 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16204.h"
@@ -24,6 +18,11 @@ static int adis16204_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16204_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16204_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16204_data_rdy_trigger_set_state,
+};
+
int adis16204_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -44,9 +43,8 @@ int adis16204_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &adis16204_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16204_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index c8b7b00d417..3c88b86e7b8 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -128,7 +128,7 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16209_SCAN_INCLI_Y 6
#define ADIS16209_SCAN_ROT 7
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16209_remove_trigger(struct iio_dev *indio_dev);
int adis16209_probe_trigger(struct iio_dev *indio_dev);
@@ -140,7 +140,7 @@ ssize_t adis16209_read_data_from_ring(struct device *dev,
int adis16209_configure_ring(struct iio_dev *indio_dev);
void adis16209_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16209_remove_trigger(struct iio_dev *indio_dev)
{
@@ -168,5 +168,5 @@ static inline void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16209_H_ */
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index bec1fa8de9b..0a8571b18b3 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -14,13 +14,11 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "accel.h"
-#include "inclinometer.h"
-#include "../adc/adc.h"
+#include "../buffer_generic.h"
#include "adis16209.h"
@@ -360,7 +358,7 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
switch (chan->type) {
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 305180;
@@ -411,7 +409,7 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
}
static struct iio_chan_spec adis16209_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_supply, ADIS16209_SCAN_SUPPLY,
IIO_ST('u', 14, 16, 0), 0),
@@ -430,15 +428,15 @@ static struct iio_chan_spec adis16209_channels[] = {
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
accel_y, ADIS16209_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_aux, ADIS16209_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_INCLI, 0, 1, 0, NULL, 0, IIO_MOD_X,
+ IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
incli_x, ADIS16209_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_INCLI, 0, 1, 0, NULL, 0, IIO_MOD_Y,
+ IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
incli_y, ADIS16209_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
@@ -469,7 +467,7 @@ static const struct iio_info adis16209_info = {
static int __devinit adis16209_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16209_state *st;
struct iio_dev *indio_dev;
@@ -496,14 +494,9 @@ static int __devinit adis16209_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- adis16209_channels,
- ARRAY_SIZE(adis16209_channels));
+ ret = iio_buffer_register(indio_dev,
+ adis16209_channels,
+ ARRAY_SIZE(adis16209_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -519,19 +512,20 @@ static int __devinit adis16209_probe(struct spi_device *spi)
ret = adis16209_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
adis16209_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16209_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -542,10 +536,11 @@ static int adis16209_remove(struct spi_device *spi)
flush_scheduled_work();
- adis16209_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
+ adis16209_remove_trigger(indio_dev);
+ iio_buffer_unregister(indio_dev);
adis16209_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 45017d3f02f..bb66364bef0 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -1,20 +1,12 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16209.h"
/**
@@ -65,9 +57,9 @@ static int adis16209_read_ring_data(struct device *dev, u8 *rx)
static irqreturn_t adis16209_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16209_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
@@ -99,26 +91,26 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16209_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16209_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16209_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
ring->bpe = 2;
@@ -126,16 +118,6 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
ring->setup_ops = &adis16209_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- iio_scan_mask_set(ring, ADIS16209_SCAN_SUPPLY);
- iio_scan_mask_set(ring, ADIS16209_SCAN_ACC_X);
- iio_scan_mask_set(ring, ADIS16209_SCAN_ACC_Y);
- iio_scan_mask_set(ring, ADIS16209_SCAN_AUX_ADC);
- iio_scan_mask_set(ring, ADIS16209_SCAN_TEMP);
- iio_scan_mask_set(ring, ADIS16209_SCAN_INCLI_X);
- iio_scan_mask_set(ring, ADIS16209_SCAN_INCLI_Y);
- iio_scan_mask_set(ring, ADIS16209_SCAN_ROT);
-
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16209_trigger_handler,
IRQF_ONESHOT,
@@ -148,10 +130,10 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/adis16209_trigger.c b/drivers/staging/iio/accel/adis16209_trigger.c
index 6df7b47ec7b..8df8a9791d5 100644
--- a/drivers/staging/iio/accel/adis16209_trigger.c
+++ b/drivers/staging/iio/accel/adis16209_trigger.c
@@ -1,14 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16209.h"
@@ -33,6 +27,11 @@ static int adis16209_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16209_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16209_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16209_data_rdy_trigger_set_state,
+};
+
int adis16209_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -52,9 +51,8 @@ int adis16209_probe_trigger(struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &adis16209_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16209_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index bf9ba07c038..6d4503de192 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -6,9 +6,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -16,12 +13,10 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "accel.h"
-#include "../adc/adc.h"
#include "adis16220.h"
@@ -29,16 +24,15 @@
/**
* adis16220_spi_write_reg_8() - write single byte to a register
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @indio_dev: iio device associated with child of actual device
* @reg_address: the address of the register to be written
* @val: the value to write
**/
-static int adis16220_spi_write_reg_8(struct device *dev,
+static int adis16220_spi_write_reg_8(struct iio_dev *indio_dev,
u8 reg_address,
u8 val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16220_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -53,18 +47,17 @@ static int adis16220_spi_write_reg_8(struct device *dev,
/**
* adis16220_spi_write_reg_16() - write 2 bytes to a pair of registers
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @indio_dev: iio device associated with child of actual device
* @reg_address: the address of the lower of the two registers. Second register
* is assumed to have address one greater.
* @val: value to be written
**/
-static int adis16220_spi_write_reg_16(struct device *dev,
+static int adis16220_spi_write_reg_16(struct iio_dev *indio_dev,
u8 lower_reg_address,
u16 value)
{
int ret;
struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16220_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
@@ -77,7 +70,6 @@ static int adis16220_spi_write_reg_16(struct device *dev,
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
- .cs_change = 1,
.delay_usecs = 35,
},
};
@@ -99,17 +91,16 @@ static int adis16220_spi_write_reg_16(struct device *dev,
/**
* adis16220_spi_read_reg_16() - read 2 bytes from a 16-bit register
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @indio_dev: iio device associated with child of actual device
* @reg_address: the address of the lower of the two registers. Second register
* is assumed to have address one greater.
* @val: somewhere to pass back the value read
**/
-static int adis16220_spi_read_reg_16(struct device *dev,
- u8 lower_reg_address,
- u16 *val)
+static int adis16220_spi_read_reg_16(struct iio_dev *indio_dev,
+ u8 lower_reg_address,
+ u16 *val)
{
struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16220_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
@@ -149,52 +140,23 @@ error_ret:
return ret;
}
-static ssize_t adis16220_spi_read_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf,
- unsigned bits)
-{
- int ret;
- s16 val = 0;
- unsigned shift = 16 - bits;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = adis16220_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
- if (ret)
- return ret;
-
- val = ((s16)(val << shift) >> shift);
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t adis16220_read_12bit_unsigned(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = adis16220_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val & 0x0FFF);
-}
-
static ssize_t adis16220_read_16bit(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
ssize_t ret;
+ s16 val = 0;
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- ret = adis16220_spi_read_signed(dev, attr, buf, 16);
+ ret = adis16220_spi_read_reg_16(indio_dev, this_attr->address,
+ (u16 *)&val);
mutex_unlock(&indio_dev->mlock);
-
- return ret;
+ if (ret)
+ return ret;
+ return sprintf(buf, "%d\n", val);
}
static ssize_t adis16220_write_16bit(struct device *dev,
@@ -202,6 +164,7 @@ static ssize_t adis16220_write_16bit(struct device *dev,
const char *buf,
size_t len)
{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
long val;
@@ -209,34 +172,34 @@ static ssize_t adis16220_write_16bit(struct device *dev,
ret = strict_strtol(buf, 10, &val);
if (ret)
goto error_ret;
- ret = adis16220_spi_write_reg_16(dev, this_attr->address, val);
+ ret = adis16220_spi_write_reg_16(indio_dev, this_attr->address, val);
error_ret:
return ret ? ret : len;
}
-static int adis16220_capture(struct device *dev)
+static int adis16220_capture(struct iio_dev *indio_dev)
{
int ret;
- ret = adis16220_spi_write_reg_16(dev,
+ ret = adis16220_spi_write_reg_16(indio_dev,
ADIS16220_GLOB_CMD,
0xBF08); /* initiates a manual data capture */
if (ret)
- dev_err(dev, "problem beginning capture");
+ dev_err(&indio_dev->dev, "problem beginning capture");
msleep(10); /* delay for capture to finish */
return ret;
}
-static int adis16220_reset(struct device *dev)
+static int adis16220_reset(struct iio_dev *indio_dev)
{
int ret;
- ret = adis16220_spi_write_reg_8(dev,
+ ret = adis16220_spi_write_reg_8(indio_dev,
ADIS16220_GLOB_CMD,
ADIS16220_GLOB_CMD_SW_RESET);
if (ret)
- dev_err(dev, "problem resetting device");
+ dev_err(&indio_dev->dev, "problem resetting device");
return ret;
}
@@ -245,72 +208,84 @@ static ssize_t adis16220_write_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return adis16220_reset(dev) == 0 ? len : -EIO;
- }
- return -1;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool val;
+ int ret;
+
+ ret = strtobool(buf, &val);
+ if (ret)
+ return ret;
+ if (!val)
+ return -EINVAL;
+
+ ret = adis16220_reset(indio_dev);
+ if (ret)
+ return ret;
+ return len;
}
static ssize_t adis16220_write_capture(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return adis16220_capture(dev) == 0 ? len : -EIO;
- }
- return -1;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool val;
+ int ret;
+
+ ret = strtobool(buf, &val);
+ if (ret)
+ return ret;
+ if (!val)
+ return -EINVAL;
+ ret = adis16220_capture(indio_dev);
+ if (ret)
+ return ret;
+
+ return len;
}
-static int adis16220_check_status(struct device *dev)
+static int adis16220_check_status(struct iio_dev *indio_dev)
{
u16 status;
int ret;
- ret = adis16220_spi_read_reg_16(dev, ADIS16220_DIAG_STAT, &status);
+ ret = adis16220_spi_read_reg_16(indio_dev, ADIS16220_DIAG_STAT,
+ &status);
if (ret < 0) {
- dev_err(dev, "Reading status failed\n");
+ dev_err(&indio_dev->dev, "Reading status failed\n");
goto error_ret;
}
ret = status & 0x7F;
if (status & ADIS16220_DIAG_STAT_VIOLATION)
- dev_err(dev, "Capture period violation/interruption\n");
+ dev_err(&indio_dev->dev,
+ "Capture period violation/interruption\n");
if (status & ADIS16220_DIAG_STAT_SPI_FAIL)
- dev_err(dev, "SPI failure\n");
+ dev_err(&indio_dev->dev, "SPI failure\n");
if (status & ADIS16220_DIAG_STAT_FLASH_UPT)
- dev_err(dev, "Flash update failed\n");
+ dev_err(&indio_dev->dev, "Flash update failed\n");
if (status & ADIS16220_DIAG_STAT_POWER_HIGH)
- dev_err(dev, "Power supply above 3.625V\n");
+ dev_err(&indio_dev->dev, "Power supply above 3.625V\n");
if (status & ADIS16220_DIAG_STAT_POWER_LOW)
- dev_err(dev, "Power supply below 3.15V\n");
+ dev_err(&indio_dev->dev, "Power supply below 3.15V\n");
error_ret:
return ret;
}
-static int adis16220_self_test(struct device *dev)
+static int adis16220_self_test(struct iio_dev *indio_dev)
{
int ret;
- ret = adis16220_spi_write_reg_16(dev,
+ ret = adis16220_spi_write_reg_16(indio_dev,
ADIS16220_MSC_CTRL,
ADIS16220_MSC_CTRL_SELF_TEST_EN);
if (ret) {
- dev_err(dev, "problem starting self test");
+ dev_err(&indio_dev->dev, "problem starting self test");
goto err_ret;
}
- adis16220_check_status(dev);
+ adis16220_check_status(indio_dev);
err_ret:
return ret;
@@ -319,24 +294,23 @@ err_ret:
static int adis16220_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &indio_dev->dev;
/* Do self test */
- ret = adis16220_self_test(dev);
+ ret = adis16220_self_test(indio_dev);
if (ret) {
- dev_err(dev, "self test failure");
+ dev_err(&indio_dev->dev, "self test failure");
goto err_ret;
}
/* Read status register to check the result */
- ret = adis16220_check_status(dev);
+ ret = adis16220_check_status(indio_dev);
if (ret) {
- adis16220_reset(dev);
- dev_err(dev, "device not playing ball -> reset");
+ adis16220_reset(indio_dev);
+ dev_err(&indio_dev->dev, "device not playing ball -> reset");
msleep(ADIS16220_STARTUP_DELAY);
- ret = adis16220_check_status(dev);
+ ret = adis16220_check_status(indio_dev);
if (ret) {
- dev_err(dev, "giving up");
+ dev_err(&indio_dev->dev, "giving up");
goto err_ret;
}
}
@@ -381,7 +355,7 @@ static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
count = ADIS16220_CAPTURE_SIZE - off;
/* write the begin position of capture buffer */
- ret = adis16220_spi_write_reg_16(&indio_dev->dev,
+ ret = adis16220_spi_write_reg_16(indio_dev,
ADIS16220_CAPT_PNTR,
off > 1);
if (ret)
@@ -480,24 +454,6 @@ static struct bin_attribute adc2_bin = {
.size = ADIS16220_CAPTURE_SIZE,
};
-static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16220_read_12bit_unsigned,
- ADIS16220_CAPT_SUPPLY);
-static IIO_CONST_ATTR_IN_NAMED_SCALE(0, supply, "0.0012207");
-static IIO_DEV_ATTR_ACCEL(adis16220_read_16bit, ADIS16220_CAPT_BUFA);
-static IIO_DEVICE_ATTR(accel_peak_raw, S_IRUGO, adis16220_read_16bit,
- NULL, ADIS16220_CAPT_PEAKA);
-static IIO_DEV_ATTR_ACCEL_OFFSET(S_IWUSR | S_IRUGO,
- adis16220_read_16bit,
- adis16220_write_16bit,
- ADIS16220_ACCL_NULL);
-static IIO_CONST_ATTR_ACCEL_SCALE("0.18704223545");
-static IIO_DEV_ATTR_TEMP_RAW(adis16220_read_12bit_unsigned);
-static IIO_CONST_ATTR_TEMP_OFFSET("25");
-static IIO_CONST_ATTR_TEMP_SCALE("-0.47");
-
-static IIO_DEV_ATTR_IN_RAW(1, adis16220_read_16bit, ADIS16220_CAPT_BUF1);
-static IIO_DEV_ATTR_IN_RAW(2, adis16220_read_16bit, ADIS16220_CAPT_BUF2);
-
static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL,
adis16220_write_reset, 0);
@@ -514,21 +470,142 @@ static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
adis16220_write_16bit,
ADIS16220_CAPT_PNTR);
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100200");
+enum adis16220_channel {
+ in_supply, in_1, in_2, accel, temp
+};
+
+struct adis16220_address_spec {
+ u8 addr;
+ u8 bits;
+ bool sign;
+};
+
+/* Address / bits / signed */
+static const struct adis16220_address_spec adis16220_addresses[][3] = {
+ [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, },
+ [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 },
+ { ADIS16220_AIN1_NULL, 16, 1 },
+ { ADIS16220_CAPT_PEAK1, 16, 1 }, },
+ [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 },
+ { ADIS16220_AIN2_NULL, 16, 1 },
+ { ADIS16220_CAPT_PEAK2, 16, 1 }, },
+ [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 },
+ { ADIS16220_ACCL_NULL, 16, 1 },
+ { ADIS16220_CAPT_PEAKA, 16, 1 }, },
+ [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, }
+};
+
+static int adis16220_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ int ret = -EINVAL;
+ int addrind = 0;
+ u16 uval;
+ s16 sval;
+ u8 bits;
+
+ switch (mask) {
+ case 0:
+ addrind = 0;
+ break;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ if (chan->type == IIO_TEMP) {
+ *val = 25;
+ return IIO_VAL_INT;
+ }
+ addrind = 1;
+ break;
+ case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ addrind = 2;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ *val = 0;
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val2 = -470000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val2 = 1887042;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_VOLTAGE:
+ if (chan->channel == 0)
+ *val2 = 0012221;
+ else /* Should really be dependent on VDD */
+ *val2 = 305;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (adis16220_addresses[chan->address][addrind].sign) {
+ ret = adis16220_spi_read_reg_16(indio_dev,
+ adis16220_addresses[chan
+ ->address]
+ [addrind].addr,
+ &sval);
+ if (ret)
+ return ret;
+ bits = adis16220_addresses[chan->address][addrind].bits;
+ sval &= (1 << bits) - 1;
+ sval = (s16)(sval << (16 - bits)) >> (16 - bits);
+ *val = sval;
+ return IIO_VAL_INT;
+ } else {
+ ret = adis16220_spi_read_reg_16(indio_dev,
+ adis16220_addresses[chan
+ ->address]
+ [addrind].addr,
+ &uval);
+ if (ret)
+ return ret;
+ bits = adis16220_addresses[chan->address][addrind].bits;
+ uval &= (1 << bits) - 1;
+ *val = uval;
+ return IIO_VAL_INT;
+ }
+}
+
+static const struct iio_chan_spec adis16220_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .extend_name = "supply",
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in_supply,
+ }, {
+ .type = IIO_ACCEL,
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ .address = accel,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = temp,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in_1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .address = in_2,
+ }
+};
static struct attribute *adis16220_attributes[] = {
- &iio_dev_attr_in0_supply_raw.dev_attr.attr,
- &iio_const_attr_in0_supply_scale.dev_attr.attr,
- &iio_dev_attr_accel_raw.dev_attr.attr,
- &iio_dev_attr_accel_offset.dev_attr.attr,
- &iio_dev_attr_accel_peak_raw.dev_attr.attr,
- &iio_const_attr_accel_scale.dev_attr.attr,
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_dev_attr_in1_raw.dev_attr.attr,
- &iio_dev_attr_in2_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_capture.dev_attr.attr,
&iio_dev_attr_capture_count.dev_attr.attr,
@@ -542,10 +619,12 @@ static const struct attribute_group adis16220_attribute_group = {
static const struct iio_info adis16220_info = {
.attrs = &adis16220_attribute_group,
.driver_module = THIS_MODULE,
+ .read_raw = &adis16220_read_raw,
};
+
static int __devinit adis16220_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16220_state *st;
struct iio_dev *indio_dev;
@@ -567,15 +646,16 @@ static int __devinit adis16220_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16220_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adis16220_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
- regdone = 1;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
if (ret)
- goto error_free_dev;
+ goto error_unregister_dev;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
if (ret)
@@ -597,11 +677,10 @@ error_rm_adc1_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
error_rm_accel_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
+error_unregister_dev:
+ iio_device_unregister(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -616,6 +695,7 @@ static int adis16220_remove(struct spi_device *spi)
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index f1dd047aa5e..3fabcc0b347 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -152,7 +152,7 @@ int adis16240_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16240_SCAN_AUX_ADC 4
#define ADIS16240_SCAN_TEMP 5
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16240_remove_trigger(struct iio_dev *indio_dev);
int adis16240_probe_trigger(struct iio_dev *indio_dev);
@@ -164,7 +164,7 @@ ssize_t adis16240_read_data_from_ring(struct device *dev,
int adis16240_configure_ring(struct iio_dev *indio_dev);
void adis16240_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16240_remove_trigger(struct iio_dev *indio_dev)
{
@@ -192,5 +192,5 @@ static inline void adis16240_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16240_H_ */
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index aee8b69173c..b8be2925d61 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -17,12 +17,11 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "accel.h"
-#include "../adc/adc.h"
+#include "../buffer_generic.h"
#include "adis16240.h"
@@ -326,7 +325,7 @@ err_ret:
return ret;
}
-static IIO_DEVICE_ATTR(accel_xyz_squared_peak_raw, S_IRUGO,
+static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, S_IRUGO,
adis16240_read_12bit_signed, NULL,
ADIS16240_XYZPEAK_OUT);
@@ -393,7 +392,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
switch (chan->type) {
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 4880;
@@ -470,11 +469,11 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
}
static struct iio_chan_spec adis16240_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
in_supply, ADIS16240_SCAN_SUPPLY,
IIO_ST('u', 10, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
0,
in_aux, ADIS16240_SCAN_AUX_ADC,
IIO_ST('u', 10, 16, 0), 0),
@@ -501,7 +500,7 @@ static struct iio_chan_spec adis16240_channels[] = {
};
static struct attribute *adis16240_attributes[] = {
- &iio_dev_attr_accel_xyz_squared_peak_raw.dev_attr.attr,
+ &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
NULL
@@ -520,7 +519,7 @@ static const struct iio_info adis16240_info = {
static int __devinit adis16240_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16240_state *st;
struct iio_dev *indio_dev;
@@ -548,14 +547,9 @@ static int __devinit adis16240_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- adis16240_channels,
- ARRAY_SIZE(adis16240_channels));
+ ret = iio_buffer_register(indio_dev,
+ adis16240_channels,
+ ARRAY_SIZE(adis16240_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -571,19 +565,19 @@ static int __devinit adis16240_probe(struct spi_device *spi)
ret = adis16240_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
return 0;
error_remove_trigger:
adis16240_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16240_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -595,10 +589,11 @@ static int adis16240_remove(struct spi_device *spi)
flush_scheduled_work();
- adis16240_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
+ adis16240_remove_trigger(indio_dev);
+ iio_buffer_unregister(indio_dev);
adis16240_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index c812a34daca..34f1e7e6a56 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -1,20 +1,12 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16240.h"
/**
@@ -62,9 +54,9 @@ static int adis16240_read_ring_data(struct device *dev, u8 *rx)
static irqreturn_t adis16240_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16240_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
@@ -96,26 +88,26 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
void adis16240_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16240_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16240_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16240_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
ring->bpe = 2;
@@ -123,14 +115,6 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
ring->setup_ops = &adis16240_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- iio_scan_mask_set(ring, ADIS16240_SCAN_SUPPLY);
- iio_scan_mask_set(ring, ADIS16240_SCAN_ACC_X);
- iio_scan_mask_set(ring, ADIS16240_SCAN_ACC_Y);
- iio_scan_mask_set(ring, ADIS16240_SCAN_ACC_Z);
- iio_scan_mask_set(ring, ADIS16240_SCAN_AUX_ADC);
- iio_scan_mask_set(ring, ADIS16240_SCAN_TEMP);
-
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16240_trigger_handler,
IRQF_ONESHOT,
@@ -143,10 +127,10 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/adis16240_trigger.c b/drivers/staging/iio/accel/adis16240_trigger.c
index 17135fc33c9..13f1d142eea 100644
--- a/drivers/staging/iio/accel/adis16240_trigger.c
+++ b/drivers/staging/iio/accel/adis16240_trigger.c
@@ -1,14 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16240.h"
@@ -33,6 +27,11 @@ static int adis16240_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16240_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16240_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16240_data_rdy_trigger_set_state,
+};
+
int adis16240_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -53,9 +52,8 @@ int adis16240_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &adis16240_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16240_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/accel/inclinometer.h b/drivers/staging/iio/accel/inclinometer.h
deleted file mode 100644
index faf73d7892e..00000000000
--- a/drivers/staging/iio/accel/inclinometer.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Inclinometer related attributes
- */
-#include "../sysfs.h"
-
-#define IIO_DEV_ATTR_INCLI_X(_show, _addr) \
- IIO_DEVICE_ATTR(incli_x_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_INCLI_Y(_show, _addr) \
- IIO_DEVICE_ATTR(incli_y_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_INCLI_Z(_show, _addr) \
- IIO_DEVICE_ATTR(incli_z_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_INCLI_X_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(incli_x_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_INCLI_Y_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(incli_y_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_INCLI_Z_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(incli_z_offset, _mode, _show, _store, _addr)
-
-#define IIO_CONST_ATTR_INCLI_SCALE(_string) \
- IIO_CONST_ATTR(incli_scale, _string)
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index c8a358a5df8..5238503f680 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -21,11 +21,10 @@
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../adc/adc.h"
-#include "accel.h"
#define KXSD9_REG_X 0x00
#define KXSD9_REG_Y 0x02
@@ -34,10 +33,6 @@
#define KXSD9_REG_RESET 0x0a
#define KXSD9_REG_CTRL_C 0x0c
-#define KXSD9_FS_8 0x00
-#define KXSD9_FS_6 0x01
-#define KXSD9_FS_4 0x02
-#define KXSD9_FS_2 0x03
#define KXSD9_FS_MASK 0x03
#define KXSD9_REG_CTRL_B 0x0d
@@ -46,13 +41,8 @@
#define KXSD9_READ(a) (0x80 | (a))
#define KXSD9_WRITE(a) (a)
-#define KXSD9_SCALE_2G "0.011978"
-#define KXSD9_SCALE_4G "0.023927"
-#define KXSD9_SCALE_6G "0.035934"
-#define KXSD9_SCALE_8G "0.047853"
-
#define KXSD9_STATE_RX_SIZE 2
-#define KXSD9_STATE_TX_SIZE 4
+#define KXSD9_STATE_TX_SIZE 2
/**
* struct kxsd9_state - device related storage
* @buf_lock: protect the rx and tx buffers.
@@ -67,170 +57,70 @@ struct kxsd9_state {
u8 tx[KXSD9_STATE_TX_SIZE];
};
-/* This may want to move to mili g to allow for non integer ranges */
-static ssize_t kxsd9_read_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- ssize_t len = 0;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct kxsd9_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- };
- struct spi_message msg;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
- st->tx[1] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->us, &msg);
- if (ret)
- goto error_ret;
-
- switch (st->rx[1] & KXSD9_FS_MASK) {
- case KXSD9_FS_8:
- len += sprintf(buf, "%s\n", KXSD9_SCALE_8G);
- break;
- case KXSD9_FS_6:
- len += sprintf(buf, "%s\n", KXSD9_SCALE_6G);
- break;
- case KXSD9_FS_4:
- len += sprintf(buf, "%s\n", KXSD9_SCALE_4G);
- break;
- case KXSD9_FS_2:
- len += sprintf(buf, "%s\n", KXSD9_SCALE_2G);
- break;
- }
+#define KXSD9_SCALE_2G "0.011978"
+#define KXSD9_SCALE_4G "0.023927"
+#define KXSD9_SCALE_6G "0.035934"
+#define KXSD9_SCALE_8G "0.047853"
-error_ret:
- mutex_unlock(&st->buf_lock);
+/* reverse order */
+static const int kxsd9_micro_scales[4] = { 47853, 35934, 23927, 11978 };
- return ret ? ret : len;
-}
-static ssize_t kxsd9_write_scale(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
{
-
- struct spi_message msg;
- int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int ret, i;
struct kxsd9_state *st = iio_priv(indio_dev);
- u8 val;
- struct spi_transfer xfers[] = {
- {
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- }, {
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- },
- };
-
- if (!strncmp(buf, KXSD9_SCALE_8G,
- strlen(buf) < strlen(KXSD9_SCALE_8G)
- ? strlen(buf) : strlen(KXSD9_SCALE_8G)))
- val = KXSD9_FS_8;
- else if (!strncmp(buf, KXSD9_SCALE_6G,
- strlen(buf) < strlen(KXSD9_SCALE_6G)
- ? strlen(buf) : strlen(KXSD9_SCALE_6G)))
- val = KXSD9_FS_6;
- else if (!strncmp(buf, KXSD9_SCALE_4G,
- strlen(buf) < strlen(KXSD9_SCALE_4G)
- ? strlen(buf) : strlen(KXSD9_SCALE_4G)))
- val = KXSD9_FS_4;
- else if (!strncmp(buf, KXSD9_SCALE_2G,
- strlen(buf) < strlen(KXSD9_SCALE_2G)
- ? strlen(buf) : strlen(KXSD9_SCALE_2G)))
- val = KXSD9_FS_2;
- else
+ bool foundit = false;
+
+ for (i = 0; i < 4; i++)
+ if (micro == kxsd9_micro_scales[i]) {
+ foundit = true;
+ break;
+ }
+ if (!foundit)
return -EINVAL;
mutex_lock(&st->buf_lock);
- st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
- st->tx[1] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret)
goto error_ret;
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
- st->tx[1] = (st->rx[1] & ~KXSD9_FS_MASK) | val;
+ st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_write(st->us, st->tx, 2);
error_ret:
mutex_unlock(&st->buf_lock);
- return ret ? ret : len;
+ return ret;
}
-static ssize_t kxsd9_read_accel(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
{
struct spi_message msg;
int ret;
- ssize_t len = 0;
- u16 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct kxsd9_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.bits_per_word = 8,
.len = 1,
- .cs_change = 0,
.delay_usecs = 200,
.tx_buf = st->tx,
}, {
.bits_per_word = 8,
.len = 2,
- .cs_change = 1,
.rx_buf = st->rx,
},
};
mutex_lock(&st->buf_lock);
- st->tx[0] = KXSD9_READ(this_attr->address);
+ st->tx[0] = KXSD9_READ(address);
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
if (ret)
- goto error_ret;
- val = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
- len = sprintf(buf, "%d\n", val);
-error_ret:
- mutex_unlock(&st->buf_lock);
-
- return ret ? ret : len;
+ return ret;
+ return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
}
-static IIO_DEV_ATTR_ACCEL_X(kxsd9_read_accel, KXSD9_REG_X);
-static IIO_DEV_ATTR_ACCEL_Y(kxsd9_read_accel, KXSD9_REG_Y);
-static IIO_DEV_ATTR_ACCEL_Z(kxsd9_read_accel, KXSD9_REG_Z);
-static IIO_DEV_ATTR_IN_RAW(0, kxsd9_read_accel, KXSD9_REG_AUX);
-
-static IIO_DEVICE_ATTR(accel_scale,
- S_IRUGO | S_IWUSR,
- kxsd9_read_scale,
- kxsd9_write_scale,
- 0);
-
static IIO_CONST_ATTR(accel_scale_available,
KXSD9_SCALE_2G " "
KXSD9_SCALE_4G " "
@@ -238,48 +128,94 @@ static IIO_CONST_ATTR(accel_scale_available,
KXSD9_SCALE_8G);
static struct attribute *kxsd9_attributes[] = {
- &iio_dev_attr_accel_x_raw.dev_attr.attr,
- &iio_dev_attr_accel_y_raw.dev_attr.attr,
- &iio_dev_attr_accel_z_raw.dev_attr.attr,
- &iio_dev_attr_in0_raw.dev_attr.attr,
- &iio_dev_attr_accel_scale.dev_attr.attr,
&iio_const_attr_accel_scale_available.dev_attr.attr,
NULL,
};
+static int kxsd9_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret = -EINVAL;
+
+ if (mask == (1 << IIO_CHAN_INFO_SCALE_SHARED)) {
+ /* Check no integer component */
+ if (val)
+ return -EINVAL;
+ ret = kxsd9_write_scale(indio_dev, val2);
+ }
+
+ return ret;
+}
+
+static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = -EINVAL;
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case 0:
+ ret = kxsd9_read(indio_dev, chan->address);
+ if (ret < 0)
+ goto error_ret;
+ *val = ret;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ if (ret)
+ goto error_ret;
+ *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ };
+
+error_ret:
+ return ret;
+};
+#define KXSD9_ACCEL_CHAN(axis) \
+ { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask = 1 << IIO_CHAN_INFO_SCALE_SHARED, \
+ .address = KXSD9_REG_##axis, \
+ }
+
+static struct iio_chan_spec kxsd9_channels[] = {
+ KXSD9_ACCEL_CHAN(X), KXSD9_ACCEL_CHAN(Y), KXSD9_ACCEL_CHAN(Z),
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .address = KXSD9_REG_AUX,
+ }
+};
+
static const struct attribute_group kxsd9_attribute_group = {
.attrs = kxsd9_attributes,
};
static int __devinit kxsd9_power_up(struct kxsd9_state *st)
{
- struct spi_transfer xfers[2] = {
- {
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- }, {
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx + 2,
- },
- };
- struct spi_message msg;
+ int ret;
+
st->tx[0] = 0x0d;
st->tx[1] = 0x40;
- st->tx[2] = 0x0c;
- st->tx[3] = 0x9b;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_write(st->us, st->tx, 2);
+ if (ret)
+ return ret;
- return spi_sync(st->us, &msg);
+ st->tx[0] = 0x0c;
+ st->tx[1] = 0x9b;
+ return spi_write(st->us, st->tx, 2);
};
static const struct iio_info kxsd9_info = {
+ .read_raw = &kxsd9_read_raw,
+ .write_raw = &kxsd9_write_raw,
.attrs = &kxsd9_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -300,7 +236,9 @@ static int __devinit kxsd9_probe(struct spi_device *spi)
st->us = spi;
mutex_init(&st->buf_lock);
-
+ indio_dev->channels = kxsd9_channels;
+ indio_dev->num_channels = ARRAY_SIZE(kxsd9_channels);
+ indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &kxsd9_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -324,10 +262,15 @@ error_ret:
static int __devexit kxsd9_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
+static const struct spi_device_id kxsd9_id[] = {
+ {"kxsd9", 0}
+};
+
static struct spi_driver kxsd9_driver = {
.driver = {
.name = "kxsd9",
@@ -335,6 +278,7 @@ static struct spi_driver kxsd9_driver = {
},
.probe = kxsd9_probe,
.remove = __devexit_p(kxsd9_remove),
+ .id_table = kxsd9_id,
};
static __init int kxsd9_spi_init(void)
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 18b23acfb6f..7237a9ab61a 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -174,20 +174,20 @@ int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
-#ifdef CONFIG_IIO_RING_BUFFER
-/* At the moment triggers are only used for ring buffer
+#ifdef CONFIG_IIO_BUFFER
+/* At the moment triggers are only used for buffer
* filling. This may change!
*/
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
+ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
int index,
int *val);
-int lis3l02dq_configure_ring(struct iio_dev *indio_dev);
-void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev);
+int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
+void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW
#define lis3l02dq_free_buf iio_sw_rb_free
@@ -202,8 +202,8 @@ void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev);
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
-#else /* CONFIG_IIO_RING_BUFFER */
-#define lis3l02dq_th lis3l02dq_noring
+#else /* CONFIG_IIO_BUFFER */
+#define lis3l02dq_th lis3l02dq_nobuffer
static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
{
@@ -213,19 +213,19 @@ static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
return 0;
}
static inline ssize_t
-lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
- int index,
- int *val)
+lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
+ int index,
+ int *val)
{
return 0;
}
-static int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
+static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
return 0;
}
-static inline void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
+static inline void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index a29dfd27d44..559545a4233 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -21,12 +21,11 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-
-#include "accel.h"
+#include "../buffer_generic.h"
#include "lis3l02dq.h"
@@ -35,8 +34,8 @@
* This means that use cannot be made of spi_write etc.
*/
/* direct copy of the irq_default_primary_handler */
-#ifndef CONFIG_IIO_RING_BUFFER
-static irqreturn_t lis3l02dq_noring(int irq, void *private)
+#ifndef CONFIG_IIO_BUFFER
+static irqreturn_t lis3l02dq_nobuffer(int irq, void *private)
{
return IRQ_WAKE_THREAD;
}
@@ -201,14 +200,14 @@ static u8 lis3l02dq_axis_map[3][3] = {
};
static int lis3l02dq_read_thresh(struct iio_dev *indio_dev,
- int e,
+ u64 e,
int *val)
{
return lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
}
static int lis3l02dq_write_thresh(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int val)
{
u16 value = val;
@@ -260,10 +259,11 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
case 0:
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_RING_TRIGGERED)
- ret = lis3l02dq_read_accel_from_ring(indio_dev->ring,
- chan->scan_index,
- val);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ ret = lis3l02dq_read_accel_from_buffer(indio_dev->
+ buffer,
+ chan->scan_index,
+ val);
else {
reg = lis3l02dq_axis_map
[LIS3L02DQ_ACCEL][chan->address];
@@ -453,55 +453,55 @@ static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
&t);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_Z,
+ IIO_MOD_Z,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_Z,
+ IIO_MOD_Z,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_Y,
+ IIO_MOD_Y,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_Y,
+ IIO_MOD_Y,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_X,
+ IIO_MOD_X,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_X,
+ IIO_MOD_X,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
@@ -535,7 +535,7 @@ static struct iio_chan_spec lis3l02dq_channels[] = {
static ssize_t lis3l02dq_read_event_config(struct iio_dev *indio_dev,
- int event_code)
+ u64 event_code)
{
u8 val;
@@ -587,7 +587,7 @@ error_ret:
}
static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int state)
{
int ret = 0;
@@ -652,7 +652,6 @@ static const struct attribute_group lis3l02dq_attribute_group = {
};
static const struct iio_info lis3l02dq_info = {
- .num_interrupt_lines = 1,
.read_raw = &lis3l02dq_read_raw,
.write_raw = &lis3l02dq_write_raw,
.read_event_value = &lis3l02dq_read_thresh,
@@ -665,7 +664,7 @@ static const struct iio_info lis3l02dq_info = {
static int __devinit lis3l02dq_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct lis3l02dq_state *st;
struct iio_dev *indio_dev;
@@ -688,21 +687,16 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = lis3l02dq_configure_ring(indio_dev);
+ ret = lis3l02dq_configure_buffer(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- lis3l02dq_channels,
- ARRAY_SIZE(lis3l02dq_channels));
+ ret = iio_buffer_register(indio_dev,
+ lis3l02dq_channels,
+ ARRAY_SIZE(lis3l02dq_channels));
if (ret) {
- printk(KERN_ERR "failed to initialize the ring\n");
- goto error_unreg_ring_funcs;
+ printk(KERN_ERR "failed to initialize the buffer\n");
+ goto error_unreg_buffer_funcs;
}
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
@@ -713,7 +707,7 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
"lis3l02dq",
indio_dev);
if (ret)
- goto error_uninitialize_ring;
+ goto error_uninitialize_buffer;
ret = lis3l02dq_probe_trigger(indio_dev);
if (ret)
@@ -724,23 +718,25 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
ret = lis3l02dq_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
- if (indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
lis3l02dq_remove_trigger(indio_dev);
error_free_interrupt:
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
free_irq(st->us->irq, indio_dev);
-error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
-error_unreg_ring_funcs:
- lis3l02dq_unconfigure_ring(indio_dev);
+error_uninitialize_buffer:
+ iio_buffer_unregister(indio_dev);
+error_unreg_buffer_funcs:
+ lis3l02dq_unconfigure_buffer(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -778,6 +774,8 @@ static int lis3l02dq_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct lis3l02dq_state *st = iio_priv(indio_dev);
+ iio_device_unregister(indio_dev);
+
ret = lis3l02dq_disable_all_events(indio_dev);
if (ret)
goto err_ret;
@@ -790,12 +788,10 @@ static int lis3l02dq_remove(struct spi_device *spi)
free_irq(st->us->irq, indio_dev);
lis3l02dq_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
- lis3l02dq_unconfigure_ring(indio_dev);
- iio_device_unregister(indio_dev);
-
- return 0;
+ iio_buffer_unregister(indio_dev);
+ lis3l02dq_unconfigure_buffer(indio_dev);
+ iio_free_device(indio_dev);
err_ret:
return ret;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 8d5c8ac7db5..5c542dd0461 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -1,19 +1,15 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
#include <linux/slab.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
#include "../kfifo_buf.h"
-#include "accel.h"
#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "lis3l02dq.h"
/**
@@ -42,31 +38,32 @@ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
}
/**
- * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
+ * lis3l02dq_read_accel_from_buffer() individual acceleration read from buffer
**/
-ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
- int index,
- int *val)
+ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
+ int index,
+ int *val)
{
int ret;
s16 *data;
- if (!iio_scan_mask_query(ring, index))
+ if (!iio_scan_mask_query(buffer, index))
return -EINVAL;
- if (!ring->access->read_last)
+ if (!buffer->access->read_last)
return -EBUSY;
- data = kmalloc(ring->access->get_bytes_per_datum(ring),
+ data = kmalloc(buffer->access->get_bytes_per_datum(buffer),
GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
- ret = ring->access->read_last(ring, (u8 *)data);
+ ret = buffer->access->read_last(buffer, (u8 *)data);
if (ret)
goto error_free_data;
- *val = data[bitmap_weight(&ring->scan_mask, index)];
+ *val = data[bitmap_weight(buffer->scan_mask, index)];
error_free_data:
+
kfree(data);
return ret;
@@ -89,13 +86,13 @@ static const u8 read_all_tx_array[] = {
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *buffer = indio_dev->buffer;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
- xfers = kzalloc((ring->scan_count) * 2
+ xfers = kzalloc((buffer->scan_count) * 2
* sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
@@ -103,7 +100,7 @@ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
- if (ring->scan_mask & (1 << i)) {
+ if (test_bit(i, buffer->scan_mask)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
@@ -131,7 +128,7 @@ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
* values in alternate bytes
*/
spi_message_init(&msg);
- for (j = 0; j < ring->scan_count * 2; j++)
+ for (j = 0; j < buffer->scan_count * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -141,20 +138,20 @@ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
return ret;
}
-static int lis3l02dq_get_ring_element(struct iio_dev *indio_dev,
+static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
u8 *buf)
{
int ret, i;
u8 *rx_array ;
s16 *data = (s16 *)buf;
- rx_array = kzalloc(4 * (indio_dev->ring->scan_count), GFP_KERNEL);
+ rx_array = kzalloc(4 * (indio_dev->buffer->scan_count), GFP_KERNEL);
if (rx_array == NULL)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0)
return ret;
- for (i = 0; i < indio_dev->ring->scan_count; i++)
+ for (i = 0; i < indio_dev->buffer->scan_count; i++)
data[i] = combine_8_to_16(rx_array[i*4+1],
rx_array[i*4+3]);
kfree(rx_array);
@@ -165,27 +162,27 @@ static int lis3l02dq_get_ring_element(struct iio_dev *indio_dev,
static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *buffer = indio_dev->buffer;
int len = 0;
- size_t datasize = ring->access->get_bytes_per_datum(ring);
+ size_t datasize = buffer->access->get_bytes_per_datum(buffer);
char *data = kmalloc(datasize, GFP_KERNEL);
if (data == NULL) {
dev_err(indio_dev->dev.parent,
- "memory alloc failed in ring bh");
+ "memory alloc failed in buffer bh");
return -ENOMEM;
}
- if (ring->scan_count)
- len = lis3l02dq_get_ring_element(indio_dev, data);
+ if (buffer->scan_count)
+ len = lis3l02dq_get_buffer_element(indio_dev, data);
/* Guaranteed to be aligned with 8 byte boundary */
- if (ring->scan_timestamp)
+ if (buffer->scan_timestamp)
*(s64 *)(((phys_addr_t)data + len
+ sizeof(s64) - 1) & ~(sizeof(s64) - 1))
= pf->timestamp;
- ring->access->store_to(ring, (u8 *)data, pf->timestamp);
+ buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
kfree(data);
@@ -258,7 +255,7 @@ error_ret:
*
* If disabling the interrupt also does a final read to ensure it is clear.
* This is only important in some cases where the scan enable elements are
- * switched before the ring is reenabled.
+ * switched before the buffer is reenabled.
**/
static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
@@ -306,6 +303,12 @@ static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
return 0;
}
+static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
+ .try_reenable = &lis3l02dq_trig_try_reen,
+};
+
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -318,10 +321,8 @@ int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
}
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &lis3l02dq_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
- st->trig->try_reenable = &lis3l02dq_trig_try_reen;
ret = iio_trigger_register(st->trig);
if (ret)
goto error_free_trig;
@@ -342,13 +343,13 @@ void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
iio_free_trigger(st->trig);
}
-void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
+void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- lis3l02dq_free_buf(indio_dev->ring);
+ lis3l02dq_free_buf(indio_dev->buffer);
}
-static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
+static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
{
/* Disable unwanted channels otherwise the interrupt will not clear */
u8 t;
@@ -361,17 +362,17 @@ static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
if (ret)
goto error_ret;
- if (iio_scan_mask_query(indio_dev->ring, 0)) {
+ if (iio_scan_mask_query(indio_dev->buffer, 0)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- if (iio_scan_mask_query(indio_dev->ring, 1)) {
+ if (iio_scan_mask_query(indio_dev->buffer, 1)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- if (iio_scan_mask_query(indio_dev->ring, 2)) {
+ if (iio_scan_mask_query(indio_dev->buffer, 2)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else
@@ -385,18 +386,18 @@ static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
if (ret)
goto error_ret;
- return iio_triggered_ring_postenable(indio_dev);
+ return iio_triggered_buffer_postenable(indio_dev);
error_ret:
return ret;
}
/* Turn all channels on again */
-static int lis3l02dq_ring_predisable(struct iio_dev *indio_dev)
+static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
{
u8 t;
int ret;
- ret = iio_triggered_ring_predisable(indio_dev);
+ ret = iio_triggered_buffer_predisable(indio_dev);
if (ret)
goto error_ret;
@@ -417,34 +418,29 @@ error_ret:
return ret;
}
-static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &lis3l02dq_ring_postenable,
- .predisable = &lis3l02dq_ring_predisable,
+static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &lis3l02dq_buffer_postenable,
+ .predisable = &lis3l02dq_buffer_predisable,
};
-int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
+int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
int ret;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *buffer;
- ring = lis3l02dq_alloc_buf(indio_dev);
- if (!ring)
+ buffer = lis3l02dq_alloc_buf(indio_dev);
+ if (!buffer)
return -ENOMEM;
- indio_dev->ring = ring;
- /* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &lis3l02dq_access_funcs;
- ring->bpe = 2;
-
- ring->scan_timestamp = true;
- ring->setup_ops = &lis3l02dq_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->buffer = buffer;
+ /* Effectively select the buffer implementation */
+ indio_dev->buffer->access = &lis3l02dq_access_funcs;
+ buffer->bpe = 2;
- /* Set default scan mode */
- iio_scan_mask_set(ring, 0);
- iio_scan_mask_set(ring, 1);
- iio_scan_mask_set(ring, 2);
+ buffer->scan_timestamp = true;
+ buffer->setup_ops = &lis3l02dq_buffer_setup_ops;
+ buffer->owner = THIS_MODULE;
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
@@ -459,10 +455,10 @@ int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- lis3l02dq_free_buf(indio_dev->ring);
+ lis3l02dq_free_buf(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index 1e396cefdf6..ad38dd955cd 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -221,7 +221,7 @@ int sca3000_read_data_short(struct sca3000_state *st,
**/
int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
/**
* sca3000_register_ring_funcs() setup the ring state change functions
**/
@@ -248,7 +248,7 @@ void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
* sca3000_ring_int_process() handles ring related event pushing and escalation
* @val: the event code
**/
-void sca3000_ring_int_process(u8 val, struct iio_ring_buffer *ring);
+void sca3000_ring_int_process(u8 val, struct iio_buffer *ring);
#else
static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 603f5bca797..a44a70589db 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -11,18 +11,17 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
-#include "accel.h"
#include "sca3000.h"
enum sca3000_variant {
@@ -268,8 +267,8 @@ static ssize_t sca3000_show_rev(struct device *dev,
char *buf)
{
int len = 0, ret;
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
@@ -296,8 +295,8 @@ sca3000_show_available_measurement_modes(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
int len = 0;
len += sprintf(buf + len, "0 - normal mode");
@@ -328,8 +327,8 @@ sca3000_show_measurement_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
int len = 0, ret;
mutex_lock(&st->lock);
@@ -379,8 +378,8 @@ sca3000_store_measurement_mode(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
int mask = 0x03;
long val;
@@ -422,7 +421,7 @@ static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
/* More standard attributes */
-static IIO_DEV_ATTR_REV(sca3000_show_rev);
+static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
#define SCA3000_INFO_MASK \
(1 << IIO_CHAN_INFO_SCALE_SHARED)
@@ -695,7 +694,7 @@ static IIO_CONST_ATTR_TEMP_OFFSET("-214.6");
* sca3000_read_thresh() - query of a threshold
**/
static int sca3000_read_thresh(struct iio_dev *indio_dev,
- int e,
+ u64 e,
int *val)
{
int ret, i;
@@ -723,8 +722,8 @@ static int sca3000_read_thresh(struct iio_dev *indio_dev,
* sca3000_write_thresh() control of threshold
**/
static int sca3000_write_thresh(struct iio_dev *indio_dev,
- int e,
- int val)
+ u64 e,
+ int val)
{
struct sca3000_state *st = iio_priv(indio_dev);
int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
@@ -771,9 +770,9 @@ static struct attribute *sca3000_attributes_with_temp[] = {
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
/* Only present if temp sensor is */
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in_temp_raw.dev_attr.attr,
+ &iio_const_attr_in_temp_offset.dev_attr.attr,
+ &iio_const_attr_in_temp_scale.dev_attr.attr,
NULL,
};
@@ -812,40 +811,40 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
if (ret)
goto done;
- sca3000_ring_int_process(val, indio_dev->ring);
+ sca3000_ring_int_process(val, indio_dev->buffer);
if (val & SCA3000_INT_STATUS_FREE_FALL)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_FALLING),
last_timestamp);
if (val & SCA3000_INT_STATUS_Y_TRIGGER)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_Y,
+ IIO_MOD_Y,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
last_timestamp);
if (val & SCA3000_INT_STATUS_X_TRIGGER)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_X,
+ IIO_MOD_X,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
last_timestamp);
if (val & SCA3000_INT_STATUS_Z_TRIGGER)
- iio_push_event(indio_dev, 0,
- IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_EV_MOD_Z,
+ IIO_MOD_Z,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
last_timestamp);
@@ -858,7 +857,7 @@ done:
* sca3000_read_event_config() what events are enabled
**/
static int sca3000_read_event_config(struct iio_dev *indio_dev,
- int e)
+ u64 e)
{
struct sca3000_state *st = iio_priv(indio_dev);
int ret;
@@ -961,7 +960,7 @@ error_ret:
* this mode is disabled. Currently normal mode is assumed.
**/
static int sca3000_write_event_config(struct iio_dev *indio_dev,
- int e,
+ u64 e,
int state)
{
struct sca3000_state *st = iio_priv(indio_dev);
@@ -1018,14 +1017,14 @@ exit_point:
/* Free fall detector related event attribute */
static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en,
- accel_x&y&z_mag_falling_en,
+ in_accel_x&y&z_mag_falling_en,
S_IRUGO | S_IWUSR,
sca3000_query_free_fall_mode,
sca3000_set_free_fall_mode,
0);
static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period,
- accel_x&y&z_mag_falling_period,
+ in_accel_x&y&z_mag_falling_period,
"0.226");
static struct attribute *sca3000_event_attributes[] = {
@@ -1036,6 +1035,7 @@ static struct attribute *sca3000_event_attributes[] = {
static struct attribute_group sca3000_event_attribute_group = {
.attrs = sca3000_event_attributes,
+ .name = "events",
};
/**
@@ -1102,7 +1102,6 @@ error_ret:
static const struct iio_info sca3000_info = {
.attrs = &sca3000_attribute_group,
.read_raw = &sca3000_read_raw,
- .num_interrupt_lines = 1,
.event_attrs = &sca3000_event_attribute_group,
.read_event_value = &sca3000_read_thresh,
.write_event_value = &sca3000_write_thresh,
@@ -1123,7 +1122,7 @@ static const struct iio_info sca3000_info_with_temp = {
static int __devinit sca3000_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct sca3000_state *st;
struct iio_dev *indio_dev;
@@ -1155,13 +1154,19 @@ static int __devinit sca3000_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret < 0)
goto error_free_dev;
- regdone = 1;
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- sca3000_channels,
- ARRAY_SIZE(sca3000_channels));
+
+ ret = iio_buffer_register(indio_dev,
+ sca3000_channels,
+ ARRAY_SIZE(sca3000_channels));
if (ret < 0)
goto error_unregister_dev;
- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ if (indio_dev->buffer) {
+ iio_scan_mask_set(indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev->buffer, 2);
+ }
+
+ if (spi->irq) {
ret = request_threaded_irq(spi->irq,
NULL,
&sca3000_event_handler,
@@ -1178,16 +1183,14 @@ static int __devinit sca3000_probe(struct spi_device *spi)
return 0;
error_free_irq:
- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ if (spi->irq)
free_irq(spi->irq, indio_dev);
error_unregister_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unregister_dev:
+ iio_device_unregister(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
@@ -1220,11 +1223,12 @@ static int sca3000_remove(struct spi_device *spi)
ret = sca3000_stop_all_interrupts(st);
if (ret)
return ret;
- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ if (spi->irq)
free_irq(spi->irq, indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
- sca3000_unconfigure_ring(indio_dev);
iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
+ sca3000_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index a704c75fffc..4a9a01dccd0 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -10,9 +10,7 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/fs.h>
-#include <linux/device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
@@ -22,9 +20,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_hw.h"
-#include "accel.h"
#include "sca3000.h"
/* RFC / future work
@@ -84,10 +81,10 @@ error_ret:
* can only be inferred approximately from ring buffer events such as 50% full
* and knowledge of when buffer was last emptied. This is left to userspace.
**/
-static int sca3000_read_first_n_hw_rb(struct iio_ring_buffer *r,
+static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
size_t count, char __user *buf)
{
- struct iio_hw_ring_buffer *hw_ring = iio_to_hw_ring_buf(r);
+ struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
struct iio_dev *indio_dev = hw_ring->private;
struct sca3000_state *st = iio_priv(indio_dev);
u8 *rx;
@@ -137,25 +134,20 @@ error_ret:
}
/* This is only valid with all 3 elements enabled */
-static int sca3000_ring_get_length(struct iio_ring_buffer *r)
+static int sca3000_ring_get_length(struct iio_buffer *r)
{
return 64;
}
/* only valid if resolution is kept at 11bits */
-static int sca3000_ring_get_bytes_per_datum(struct iio_ring_buffer *r)
+static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
{
return 6;
}
-static void sca3000_ring_release(struct device *dev)
-{
- struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
- kfree(iio_to_hw_ring_buf(r));
-}
-static IIO_RING_ENABLE_ATTR;
-static IIO_RING_BYTES_PER_DATUM_ATTR;
-static IIO_RING_LENGTH_ATTR;
+static IIO_BUFFER_ENABLE_ATTR;
+static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
+static IIO_BUFFER_LENGTH_ATTR;
/**
* sca3000_query_ring_int() is the hardware ring status interrupt enabled
@@ -166,7 +158,7 @@ static ssize_t sca3000_query_ring_int(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, val;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ struct iio_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
struct sca3000_state *st = iio_priv(indio_dev);
@@ -188,7 +180,7 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ struct iio_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
struct sca3000_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
@@ -226,86 +218,18 @@ static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
sca3000_set_ring_int,
SCA3000_INT_MASK_RING_THREE_QUARTER);
-
-/**
- * sca3000_show_ring_bpse() -sysfs function to query bits per sample from ring
- * @dev: ring buffer device
- * @attr: this device attribute
- * @buf: buffer to write to
- **/
-static ssize_t sca3000_show_ring_bpse(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0, ret;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- if (st->rx[0] & SCA3000_RING_BUF_8BIT)
- len = sprintf(buf, "s8/8\n");
- else
- len = sprintf(buf, "s11/16\n");
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_store_ring_bpse() - bits per scan element
- * @dev: ring buffer device
- * @attr: attribute called from
- * @buf: input from userspace
- * @len: length of input
- **/
-static ssize_t sca3000_store_ring_bpse(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->lock);
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- if (sysfs_streq(buf, "s8/8")) {
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- st->rx[0] | SCA3000_RING_BUF_8BIT);
- st->bpse = 8;
- } else if (sysfs_streq(buf, "s11/16")) {
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- st->rx[0] & ~SCA3000_RING_BUF_8BIT);
- st->bpse = 11;
- } else
- ret = -EINVAL;
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
static ssize_t sca3000_show_buffer_scale(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ struct iio_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
struct sca3000_state *st = iio_priv(indio_dev);
return sprintf(buf, "0.%06d\n", 4*st->info->scale);
}
-static IIO_DEVICE_ATTR(accel_scale,
+static IIO_DEVICE_ATTR(in_accel_scale,
S_IRUGO,
sca3000_show_buffer_scale,
NULL,
@@ -323,28 +247,19 @@ static struct attribute *sca3000_ring_attributes[] = {
&dev_attr_enable.attr,
&iio_dev_attr_50_percent.dev_attr.attr,
&iio_dev_attr_75_percent.dev_attr.attr,
- &iio_dev_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale.dev_attr.attr,
NULL,
};
static struct attribute_group sca3000_ring_attr = {
.attrs = sca3000_ring_attributes,
+ .name = "buffer",
};
-static const struct attribute_group *sca3000_ring_attr_groups[] = {
- &sca3000_ring_attr,
- NULL
-};
-
-static struct device_type sca3000_ring_type = {
- .release = sca3000_ring_release,
- .groups = sca3000_ring_attr_groups,
-};
-
-static struct iio_ring_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
+static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
{
- struct iio_ring_buffer *buf;
- struct iio_hw_ring_buffer *ring;
+ struct iio_buffer *buf;
+ struct iio_hw_buffer *ring;
ring = kzalloc(sizeof *ring, GFP_KERNEL);
if (!ring)
@@ -353,21 +268,18 @@ static struct iio_ring_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
ring->private = indio_dev;
buf = &ring->buf;
buf->stufftoread = 0;
- iio_ring_buffer_init(buf, indio_dev);
- buf->dev.type = &sca3000_ring_type;
- buf->dev.parent = &indio_dev->dev;
- dev_set_drvdata(&buf->dev, (void *)buf);
+ buf->attrs = &sca3000_ring_attr;
+ iio_buffer_init(buf, indio_dev);
return buf;
}
-static inline void sca3000_rb_free(struct iio_ring_buffer *r)
+static inline void sca3000_rb_free(struct iio_buffer *r)
{
- if (r)
- iio_put_ring_buffer(r);
+ kfree(iio_to_hw_buf(r));
}
-static const struct iio_ring_access_funcs sca3000_ring_access_funcs = {
+static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
.read_first_n = &sca3000_read_first_n_hw_rb,
.get_length = &sca3000_ring_get_length,
.get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum,
@@ -375,23 +287,19 @@ static const struct iio_ring_access_funcs sca3000_ring_access_funcs = {
int sca3000_configure_ring(struct iio_dev *indio_dev)
{
- indio_dev->ring = sca3000_rb_allocate(indio_dev);
- if (indio_dev->ring == NULL)
+ indio_dev->buffer = sca3000_rb_allocate(indio_dev);
+ if (indio_dev->buffer == NULL)
return -ENOMEM;
- indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER;
-
- indio_dev->ring->access = &sca3000_ring_access_funcs;
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
- iio_scan_mask_set(indio_dev->ring, 0);
- iio_scan_mask_set(indio_dev->ring, 1);
- iio_scan_mask_set(indio_dev->ring, 2);
+ indio_dev->buffer->access = &sca3000_ring_access_funcs;
return 0;
}
void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
{
- sca3000_rb_free(indio_dev->ring);
+ sca3000_rb_free(indio_dev->buffer);
}
static inline
@@ -435,14 +343,14 @@ static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
return __sca3000_hw_ring_state_set(indio_dev, 0);
}
-static const struct iio_ring_setup_ops sca3000_ring_setup_ops = {
+static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
.preenable = &sca3000_hw_ring_preenable,
.postdisable = &sca3000_hw_ring_postdisable,
};
void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
{
- indio_dev->ring->setup_ops = &sca3000_ring_setup_ops;
+ indio_dev->buffer->setup_ops = &sca3000_ring_setup_ops;
}
/**
@@ -451,7 +359,7 @@ void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
* This is only split from the main interrupt handler so as to
* reduce the amount of code if the ring buffer is not enabled.
**/
-void sca3000_ring_int_process(u8 val, struct iio_ring_buffer *ring)
+void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
{
if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
SCA3000_INT_STATUS_HALF)) {
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index b39f2e1c1fe..d9decea4fa6 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -1,28 +1,14 @@
#
# ADC drivers
#
-comment "Analog to digital convertors"
-
-config AD7150
- tristate "Analog Devices ad7150/1/6 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (ad7150, ad7151, ad7156) Provides direct access via sysfs.
-
-config AD7152
- tristate "Analog Devices ad7152/3 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (ad7152, ad7153) Provides direct access via sysfs.
+menu "Analog to digital converters"
config AD7291
- tristate "Analog Devices AD7291 temperature sensor driver"
+ tristate "Analog Devices AD7291 ADC driver"
depends on I2C
help
Say yes here to build support for Analog Devices AD7291
- temperature sensors.
+ 8 Channel ADC with temperature sensor.
config AD7298
tristate "Analog Devices AD7298 ADC driver"
@@ -34,22 +20,15 @@ config AD7298
To compile this driver as a module, choose M here: the
module will be called ad7298.
-config AD7314
- tristate "Analog Devices AD7314 temperature sensor driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices AD7314
- temperature sensors.
-
config AD7606
tristate "Analog Devices AD7606 ADC driver"
depends on GPIOLIB
- select IIO_RING_BUFFER
+ select IIO_BUFFER
select IIO_TRIGGER
select IIO_SW_RING
help
Say yes here to build support for Analog Devices:
- ad7606, ad7606-6, ad7606-4 analog to digital convertors (ADC).
+ ad7606, ad7606-6, ad7606-4 analog to digital converters (ADC).
To compile this driver as a module, choose M here: the
module will be called ad7606.
@@ -72,18 +51,18 @@ config AD7606_IFACE_SPI
config AD799X
tristate "Analog Devices AD799x ADC driver"
depends on I2C
- select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
select AD799X_RING_BUFFER
help
Say yes here to build support for Analog Devices:
ad7991, ad7995, ad7999, ad7992, ad7993, ad7994, ad7997, ad7998
- i2c analog to digital convertors (ADC). Provides direct access
+ i2c analog to digital converters (ADC). Provides direct access
via sysfs.
config AD799X_RING_BUFFER
bool "Analog Devices AD799x: use ring buffer"
depends on AD799X
- select IIO_RING_BUFFER
+ select IIO_BUFFER
select IIO_SW_RING
help
Say yes here to include ring buffer support in the AD799X
@@ -92,13 +71,13 @@ config AD799X_RING_BUFFER
config AD7476
tristate "Analog Devices AD7475/6/7/8 AD7466/7/8 and AD7495 ADC driver"
depends on SPI
- select IIO_RING_BUFFER
+ select IIO_BUFFER
select IIO_SW_RING
select IIO_TRIGGER
help
Say yes here to build support for Analog Devices
AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468, AD7495
- SPI analog to digital convertors (ADC).
+ SPI analog to digital converters (ADC).
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
@@ -107,12 +86,12 @@ config AD7476
config AD7887
tristate "Analog Devices AD7887 ADC driver"
depends on SPI
- select IIO_RING_BUFFER
+ select IIO_BUFFER
select IIO_SW_RING
select IIO_TRIGGER
help
Say yes here to build support for Analog Devices
- AD7887 SPI analog to digital convertor (ADC).
+ AD7887 SPI analog to digital converter (ADC).
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
@@ -124,7 +103,7 @@ config AD7780
depends on GPIOLIB
help
Say yes here to build support for Analog Devices
- AD7780 and AD7781 SPI analog to digital convertors (ADC).
+ AD7780 and AD7781 SPI analog to digital converters (ADC).
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
@@ -133,40 +112,38 @@ config AD7780
config AD7793
tristate "Analog Devices AD7792 AD7793 ADC driver"
depends on SPI
- select IIO_RING_BUFFER
+ select IIO_BUFFER
select IIO_SW_RING
select IIO_TRIGGER
help
Say yes here to build support for Analog Devices
- AD7792 and AD7793 SPI analog to digital convertors (ADC).
+ AD7792 and AD7793 SPI analog to digital converters (ADC).
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
module will be called AD7793.
-config AD7745
- tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (AD7745, AD7746, AD7747) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7745.
-
config AD7816
tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
depends on SPI
+ depends on GENERIC_GPIO
help
Say yes here to build support for Analog Devices AD7816/7/8
temperature sensors and ADC.
-config ADT75
- tristate "Analog Devices ADT75 temperature sensor driver"
- depends on I2C
+config AD7192
+ tristate "Analog Devices AD7190 AD7192 AD7195 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_SW_RING
+ select IIO_TRIGGER
help
- Say yes here to build support for Analog Devices ADT75
- temperature sensors.
+ Say yes here to build support for Analog Devices AD7190,
+ AD7192 or AD7195 SPI analog to digital converters (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7192.
config ADT7310
tristate "Analog Devices ADT7310 temperature sensor driver"
@@ -182,14 +159,24 @@ config ADT7410
Say yes here to build support for Analog Devices ADT7410
temperature sensors.
+config AD7280
+ tristate "Analog Devices AD7280A Lithium Ion Battery Monitoring System"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7280A
+ Lithium Ion Battery Monitoring System.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7280a
+
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
- select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
select MAX1363_RING_BUFFER
help
Say yes here to build support for many Maxim i2c analog to digital
- convertors (ADC). (max1361, max1362, max1363, max1364, max1036,
+ converters (ADC). (max1361, max1362, max1363, max1364, max1036,
max1037, max1038, max1039, max1136, max1136, max1137, max1138,
max1139, max1236, max1237, max11238, max1239, max11600, max11601,
max11602, max11603, max11604, max11605, max11606, max11607,
@@ -200,8 +187,10 @@ config MAX1363
config MAX1363_RING_BUFFER
bool "Maxim max1363: use ring buffer"
depends on MAX1363
- select IIO_RING_BUFFER
+ select IIO_BUFFER
select IIO_SW_RING
help
Say yes here to include ring buffer support in the MAX1363
ADC driver.
+
+endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index f0203513997..ceee7f3c306 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -8,7 +8,7 @@ max1363-y += max1363_ring.o
obj-$(CONFIG_MAX1363) += max1363.o
ad7606-y := ad7606_core.o
-ad7606-$(CONFIG_IIO_RING_BUFFER) += ad7606_ring.o
+ad7606-$(CONFIG_IIO_BUFFER) += ad7606_ring.o
ad7606-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
ad7606-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
obj-$(CONFIG_AD7606) += ad7606.o
@@ -18,25 +18,22 @@ ad799x-$(CONFIG_AD799X_RING_BUFFER) += ad799x_ring.o
obj-$(CONFIG_AD799X) += ad799x.o
ad7476-y := ad7476_core.o
-ad7476-$(CONFIG_IIO_RING_BUFFER) += ad7476_ring.o
+ad7476-$(CONFIG_IIO_BUFFER) += ad7476_ring.o
obj-$(CONFIG_AD7476) += ad7476.o
ad7887-y := ad7887_core.o
-ad7887-$(CONFIG_IIO_RING_BUFFER) += ad7887_ring.o
+ad7887-$(CONFIG_IIO_BUFFER) += ad7887_ring.o
obj-$(CONFIG_AD7887) += ad7887.o
ad7298-y := ad7298_core.o
-ad7298-$(CONFIG_IIO_RING_BUFFER) += ad7298_ring.o
+ad7298-$(CONFIG_IIO_BUFFER) += ad7298_ring.o
obj-$(CONFIG_AD7298) += ad7298.o
-obj-$(CONFIG_AD7150) += ad7150.o
-obj-$(CONFIG_AD7152) += ad7152.o
obj-$(CONFIG_AD7291) += ad7291.o
-obj-$(CONFIG_AD7314) += ad7314.o
-obj-$(CONFIG_AD7745) += ad7745.o
obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7816) += ad7816.o
-obj-$(CONFIG_ADT75) += adt75.o
+obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_ADT7310) += adt7310.o
obj-$(CONFIG_ADT7410) += adt7410.o
+obj-$(CONFIG_AD7280) += ad7280a.o
diff --git a/drivers/staging/iio/adc/ad7150.c b/drivers/staging/iio/adc/ad7150.c
deleted file mode 100644
index 04017ef6688..00000000000
--- a/drivers/staging/iio/adc/ad7150.c
+++ /dev/null
@@ -1,812 +0,0 @@
-/*
- * AD7150 capacitive sensor driver supporting AD7150/1/6
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-
-#include "../iio.h"
-#include "../sysfs.h"
-
-/*
- * AD7150 registers definition
- */
-
-#define AD7150_STATUS 0
-#define AD7150_STATUS_OUT1 (1 << 3)
-#define AD7150_STATUS_OUT2 (1 << 5)
-#define AD7150_CH1_DATA_HIGH 1
-#define AD7150_CH1_DATA_LOW 2
-#define AD7150_CH2_DATA_HIGH 3
-#define AD7150_CH2_DATA_LOW 4
-#define AD7150_CH1_AVG_HIGH 5
-#define AD7150_CH1_AVG_LOW 6
-#define AD7150_CH2_AVG_HIGH 7
-#define AD7150_CH2_AVG_LOW 8
-#define AD7150_CH1_SENSITIVITY 9
-#define AD7150_CH1_THR_HOLD_H 9
-#define AD7150_CH1_TIMEOUT 10
-#define AD7150_CH1_THR_HOLD_L 10
-#define AD7150_CH1_SETUP 11
-#define AD7150_CH2_SENSITIVITY 12
-#define AD7150_CH2_THR_HOLD_H 12
-#define AD7150_CH2_TIMEOUT 13
-#define AD7150_CH2_THR_HOLD_L 13
-#define AD7150_CH2_SETUP 14
-#define AD7150_CFG 15
-#define AD7150_CFG_FIX (1 << 7)
-#define AD7150_PD_TIMER 16
-#define AD7150_CH1_CAPDAC 17
-#define AD7150_CH2_CAPDAC 18
-#define AD7150_SN3 19
-#define AD7150_SN2 20
-#define AD7150_SN1 21
-#define AD7150_SN0 22
-#define AD7150_ID 23
-
-#define AD7150_MAX_CONV_MODE 4
-
-/*
- * struct ad7150_chip_info - chip specifc information
- */
-
-struct ad7150_chip_info {
- struct i2c_client *client;
- bool inter;
- u16 ch1_threshold; /* Ch1 Threshold (in fixed threshold mode) */
- u8 ch1_sensitivity; /* Ch1 Sensitivity (in adaptive threshold mode) */
- u8 ch1_timeout; /* Ch1 Timeout (in adaptive threshold mode) */
- u8 ch1_setup;
- u16 ch2_threshold; /* Ch2 Threshold (in fixed threshold mode) */
- u8 ch2_sensitivity; /* Ch1 Sensitivity (in adaptive threshold mode) */
- u8 ch2_timeout; /* Ch1 Timeout (in adaptive threshold mode) */
- u8 ch2_setup;
- u8 powerdown_timer;
- char threshold_mode[10]; /* adaptive/fixed threshold mode */
- int old_state;
- char *conversion_mode;
-};
-
-struct ad7150_conversion_mode {
- char *name;
- u8 reg_cfg;
-};
-
-static struct ad7150_conversion_mode
-ad7150_conv_mode_table[AD7150_MAX_CONV_MODE] = {
- { "idle", 0 },
- { "continuous-conversion", 1 },
- { "single-conversion", 2 },
- { "power-down", 3 },
-};
-
-/*
- * ad7150 register access by I2C
- */
-
-static int ad7150_i2c_read(struct ad7150_chip_info *chip, u8 reg, u8 *data, int len)
-{
- struct i2c_client *client = chip->client;
- int ret = 0;
-
- ret = i2c_master_send(client, &reg, 1);
- if (ret < 0) {
- dev_err(&client->dev, "I2C write error\n");
- return ret;
- }
-
- ret = i2c_master_recv(client, data, len);
- if (ret < 0) {
- dev_err(&client->dev, "I2C read error\n");
- return ret;
- }
-
- return ret;
-}
-
-static int ad7150_i2c_write(struct ad7150_chip_info *chip, u8 reg, u8 data)
-{
- struct i2c_client *client = chip->client;
- int ret = 0;
-
- u8 tx[2] = {
- reg,
- data,
- };
-
- ret = i2c_master_send(client, tx, 2);
- if (ret < 0)
- dev_err(&client->dev, "I2C write error\n");
-
- return ret;
-}
-
-/*
- * sysfs nodes
- */
-
-#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show) \
- IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store) \
- IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_AVAIL_THRESHOLD_MODES(_show) \
- IIO_DEVICE_ATTR(available_threshold_modes, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_THRESHOLD_MODE(_mode, _show, _store) \
- IIO_DEVICE_ATTR(threshold_mode, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_THRESHOLD(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_threshold, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_THRESHOLD(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_threshold, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_SENSITIVITY(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_sensitivity, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_SENSITIVITY(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_sensitivity, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_TIMEOUT(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_timeout, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_TIMEOUT(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_timeout, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_VALUE(_show) \
- IIO_DEVICE_ATTR(ch1_value, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CH2_VALUE(_show) \
- IIO_DEVICE_ATTR(ch2_value, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CH1_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_POWERDOWN_TIMER(_mode, _show, _store) \
- IIO_DEVICE_ATTR(powerdown_timer, _mode, _show, _store, 0)
-
-static ssize_t ad7150_show_conversion_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int i;
- int len = 0;
-
- for (i = 0; i < AD7150_MAX_CONV_MODE; i++)
- len += sprintf(buf + len, "%s\n", ad7150_conv_mode_table[i].name);
-
- return len;
-}
-
-static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad7150_show_conversion_modes);
-
-static ssize_t ad7150_show_conversion_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%s\n", chip->conversion_mode);
-}
-
-static ssize_t ad7150_store_conversion_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- u8 cfg;
- int i;
-
- ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
-
- for (i = 0; i < AD7150_MAX_CONV_MODE; i++) {
- if (strncmp(buf, ad7150_conv_mode_table[i].name,
- strlen(ad7150_conv_mode_table[i].name) - 1) == 0) {
- chip->conversion_mode = ad7150_conv_mode_table[i].name;
- cfg |= 0x18 | ad7150_conv_mode_table[i].reg_cfg;
- ad7150_i2c_write(chip, AD7150_CFG, cfg);
- return len;
- }
- }
-
- dev_err(dev, "not supported conversion mode\n");
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
- ad7150_show_conversion_mode,
- ad7150_store_conversion_mode);
-
-static ssize_t ad7150_show_threshold_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "adaptive\nfixed\n");
-}
-
-static IIO_DEV_ATTR_AVAIL_THRESHOLD_MODES(ad7150_show_threshold_modes);
-
-static ssize_t ad7150_show_ch1_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- u8 data[2];
-
- ad7150_i2c_read(chip, AD7150_CH1_DATA_HIGH, data, 2);
- return sprintf(buf, "%d\n", ((int) data[0] << 8) | data[1]);
-}
-
-static IIO_DEV_ATTR_CH1_VALUE(ad7150_show_ch1_value);
-
-static ssize_t ad7150_show_ch2_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- u8 data[2];
-
- ad7150_i2c_read(chip, AD7150_CH2_DATA_HIGH, data, 2);
- return sprintf(buf, "%d\n", ((int) data[0] << 8) | data[1]);
-}
-
-static IIO_DEV_ATTR_CH2_VALUE(ad7150_show_ch2_value);
-
-static ssize_t ad7150_show_threshold_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%s\n", chip->threshold_mode);
-}
-
-static ssize_t ad7150_store_threshold_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- u8 cfg;
-
- ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
-
- if (strncmp(buf, "fixed", 5) == 0) {
- strcpy(chip->threshold_mode, "fixed");
- cfg |= AD7150_CFG_FIX;
- ad7150_i2c_write(chip, AD7150_CFG, cfg);
-
- return len;
- } else if (strncmp(buf, "adaptive", 8) == 0) {
- strcpy(chip->threshold_mode, "adaptive");
- cfg &= ~AD7150_CFG_FIX;
- ad7150_i2c_write(chip, AD7150_CFG, cfg);
-
- return len;
- }
-
- dev_err(dev, "not supported threshold mode\n");
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_THRESHOLD_MODE(S_IRUGO | S_IWUSR,
- ad7150_show_threshold_mode,
- ad7150_store_threshold_mode);
-
-static ssize_t ad7150_show_ch1_threshold(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch1_threshold);
-}
-
-static ssize_t ad7150_store_ch1_threshold(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad7150_i2c_write(chip, AD7150_CH1_THR_HOLD_H, data >> 8);
- ad7150_i2c_write(chip, AD7150_CH1_THR_HOLD_L, data);
- chip->ch1_threshold = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_THRESHOLD(S_IRUGO | S_IWUSR,
- ad7150_show_ch1_threshold,
- ad7150_store_ch1_threshold);
-
-static ssize_t ad7150_show_ch2_threshold(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch2_threshold);
-}
-
-static ssize_t ad7150_store_ch2_threshold(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad7150_i2c_write(chip, AD7150_CH2_THR_HOLD_H, data >> 8);
- ad7150_i2c_write(chip, AD7150_CH2_THR_HOLD_L, data);
- chip->ch2_threshold = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_THRESHOLD(S_IRUGO | S_IWUSR,
- ad7150_show_ch2_threshold,
- ad7150_store_ch2_threshold);
-
-static ssize_t ad7150_show_ch1_sensitivity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch1_sensitivity);
-}
-
-static ssize_t ad7150_store_ch1_sensitivity(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7150_i2c_write(chip, AD7150_CH1_SENSITIVITY, data);
- chip->ch1_sensitivity = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_SENSITIVITY(S_IRUGO | S_IWUSR,
- ad7150_show_ch1_sensitivity,
- ad7150_store_ch1_sensitivity);
-
-static ssize_t ad7150_show_ch2_sensitivity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch2_sensitivity);
-}
-
-static ssize_t ad7150_store_ch2_sensitivity(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7150_i2c_write(chip, AD7150_CH2_SENSITIVITY, data);
- chip->ch2_sensitivity = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_SENSITIVITY(S_IRUGO | S_IWUSR,
- ad7150_show_ch2_sensitivity,
- ad7150_store_ch2_sensitivity);
-
-static ssize_t ad7150_show_ch1_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch1_timeout);
-}
-
-static ssize_t ad7150_store_ch1_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7150_i2c_write(chip, AD7150_CH1_TIMEOUT, data);
- chip->ch1_timeout = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_TIMEOUT(S_IRUGO | S_IWUSR,
- ad7150_show_ch1_timeout,
- ad7150_store_ch1_timeout);
-
-static ssize_t ad7150_show_ch2_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch2_timeout);
-}
-
-static ssize_t ad7150_store_ch2_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7150_i2c_write(chip, AD7150_CH2_TIMEOUT, data);
- chip->ch2_timeout = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_TIMEOUT(S_IRUGO | S_IWUSR,
- ad7150_show_ch2_timeout,
- ad7150_store_ch2_timeout);
-
-static ssize_t ad7150_show_ch1_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->ch1_setup);
-}
-
-static ssize_t ad7150_store_ch1_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7150_i2c_write(chip, AD7150_CH1_SETUP, data);
- chip->ch1_setup = data;
- return len;
- }
-
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_SETUP(S_IRUGO | S_IWUSR,
- ad7150_show_ch1_setup,
- ad7150_store_ch1_setup);
-
-static ssize_t ad7150_show_ch2_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->ch2_setup);
-}
-
-static ssize_t ad7150_store_ch2_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7150_i2c_write(chip, AD7150_CH2_SETUP, data);
- chip->ch2_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_SETUP(S_IRUGO | S_IWUSR,
- ad7150_show_ch2_setup,
- ad7150_store_ch2_setup);
-
-static ssize_t ad7150_show_powerdown_timer(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->powerdown_timer);
-}
-
-static ssize_t ad7150_store_powerdown_timer(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x40)) {
- chip->powerdown_timer = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_POWERDOWN_TIMER(S_IRUGO | S_IWUSR,
- ad7150_show_powerdown_timer,
- ad7150_store_powerdown_timer);
-
-static struct attribute *ad7150_attributes[] = {
- &iio_dev_attr_available_threshold_modes.dev_attr.attr,
- &iio_dev_attr_threshold_mode.dev_attr.attr,
- &iio_dev_attr_ch1_threshold.dev_attr.attr,
- &iio_dev_attr_ch2_threshold.dev_attr.attr,
- &iio_dev_attr_ch1_timeout.dev_attr.attr,
- &iio_dev_attr_ch2_timeout.dev_attr.attr,
- &iio_dev_attr_ch1_setup.dev_attr.attr,
- &iio_dev_attr_ch2_setup.dev_attr.attr,
- &iio_dev_attr_ch1_sensitivity.dev_attr.attr,
- &iio_dev_attr_ch2_sensitivity.dev_attr.attr,
- &iio_dev_attr_powerdown_timer.dev_attr.attr,
- &iio_dev_attr_ch1_value.dev_attr.attr,
- &iio_dev_attr_ch2_value.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7150_attribute_group = {
- .attrs = ad7150_attributes,
-};
-
-/*
- * threshold events
- */
-
-static irqreturn_t ad7150_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- u8 int_status;
- s64 timestamp = iio_get_time_ns();
-
- ad7150_i2c_read(chip, AD7150_STATUS, &int_status, 1);
-
- if ((int_status & AD7150_STATUS_OUT1) && !(chip->old_state & AD7150_STATUS_OUT1))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_IN,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
- else if ((!(int_status & AD7150_STATUS_OUT1)) && (chip->old_state & AD7150_STATUS_OUT1))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_IN,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- if ((int_status & AD7150_STATUS_OUT2) && !(chip->old_state & AD7150_STATUS_OUT2))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_IN,
- 1,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
- else if ((!(int_status & AD7150_STATUS_OUT2)) && (chip->old_state & AD7150_STATUS_OUT2))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_IN,
- 1,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
- return IRQ_HANDLED;
-}
-
-static IIO_CONST_ATTR(ch1_high_en, "1");
-static IIO_CONST_ATTR(ch2_high_en, "1");
-static IIO_CONST_ATTR(ch1_low_en, "1");
-static IIO_CONST_ATTR(ch2_low_en, "1");
-
-static struct attribute *ad7150_event_attributes[] = {
- &iio_const_attr_ch1_high_en.dev_attr.attr,
- &iio_const_attr_ch2_high_en.dev_attr.attr,
- &iio_const_attr_ch1_low_en.dev_attr.attr,
- &iio_const_attr_ch2_low_en.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group ad7150_event_attribute_group = {
- .attrs = ad7150_event_attributes,
-};
-
-static const struct iio_info ad7150_info = {
- .attrs = &ad7150_attribute_group,
- .num_interrupt_lines = 1,
- .event_attrs = &ad7150_event_attribute_group,
- .driver_module = THIS_MODULE,
-};
-/*
- * device probe and remove
- */
-
-static int __devinit ad7150_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0, regdone = 0;
- struct ad7150_chip_info *chip;
- struct iio_dev *indio_dev;
-
- indio_dev = iio_allocate_device(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
-
- /* Establish that the iio_dev is a child of the i2c device */
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
-
- indio_dev->info = &ad7150_info;
-
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
- regdone = 1;
-
- if (client->irq) {
- ret = request_threaded_irq(client->irq,
- NULL,
- &ad7150_event_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "ad7150",
- indio_dev);
- if (ret)
- goto error_free_dev;
- }
-
- dev_err(&client->dev, "%s capacitive sensor registered, irq: %d\n", id->name, client->irq);
-
- return 0;
-
-error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
-error_ret:
- return ret;
-}
-
-static int __devexit ad7150_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- if (client->irq)
- free_irq(client->irq, indio_dev);
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id ad7150_id[] = {
- { "ad7150", 0 },
- { "ad7151", 0 },
- { "ad7156", 0 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad7150_id);
-
-static struct i2c_driver ad7150_driver = {
- .driver = {
- .name = "ad7150",
- },
- .probe = ad7150_probe,
- .remove = __devexit_p(ad7150_remove),
- .id_table = ad7150_id,
-};
-
-static __init int ad7150_init(void)
-{
- return i2c_add_driver(&ad7150_driver);
-}
-
-static __exit void ad7150_exit(void)
-{
- i2c_del_driver(&ad7150_driver);
-}
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ad7150/1/6 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(ad7150_init);
-module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/adc/ad7152.c b/drivers/staging/iio/adc/ad7152.c
deleted file mode 100644
index 21f5f380fb5..00000000000
--- a/drivers/staging/iio/adc/ad7152.c
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- * AD7152 capacitive sensor driver supporting AD7152/3
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/i2c.h>
-
-#include "../iio.h"
-#include "../sysfs.h"
-
-/*
- * AD7152 registers definition
- */
-
-#define AD7152_STATUS 0
-#define AD7152_STATUS_RDY1 (1 << 0)
-#define AD7152_STATUS_RDY2 (1 << 1)
-#define AD7152_CH1_DATA_HIGH 1
-#define AD7152_CH1_DATA_LOW 2
-#define AD7152_CH2_DATA_HIGH 3
-#define AD7152_CH2_DATA_LOW 4
-#define AD7152_CH1_OFFS_HIGH 5
-#define AD7152_CH1_OFFS_LOW 6
-#define AD7152_CH2_OFFS_HIGH 7
-#define AD7152_CH2_OFFS_LOW 8
-#define AD7152_CH1_GAIN_HIGH 9
-#define AD7152_CH1_GAIN_LOW 10
-#define AD7152_CH1_SETUP 11
-#define AD7152_CH2_GAIN_HIGH 12
-#define AD7152_CH2_GAIN_LOW 13
-#define AD7152_CH2_SETUP 14
-#define AD7152_CFG 15
-#define AD7152_RESEVERD 16
-#define AD7152_CAPDAC_POS 17
-#define AD7152_CAPDAC_NEG 18
-#define AD7152_CFG2 26
-
-#define AD7152_MAX_CONV_MODE 6
-
-/*
- * struct ad7152_chip_info - chip specifc information
- */
-
-struct ad7152_chip_info {
- struct i2c_client *client;
- u16 ch1_offset; /* Channel 1 offset calibration coefficient */
- u16 ch1_gain; /* Channel 1 gain coefficient */
- u8 ch1_setup;
- u16 ch2_offset; /* Channel 2 offset calibration coefficient */
- u16 ch2_gain; /* Channel 1 gain coefficient */
- u8 ch2_setup;
- u8 filter_rate_setup; /* Capacitive channel digital filter setup; conversion time/update rate setup per channel */
- char *conversion_mode;
-};
-
-struct ad7152_conversion_mode {
- char *name;
- u8 reg_cfg;
-};
-
-static struct ad7152_conversion_mode
-ad7152_conv_mode_table[AD7152_MAX_CONV_MODE] = {
- { "idle", 0 },
- { "continuous-conversion", 1 },
- { "single-conversion", 2 },
- { "power-down", 3 },
- { "offset-calibration", 5 },
- { "gain-calibration", 6 },
-};
-
-/*
- * ad7152 register access by I2C
- */
-
-static int ad7152_i2c_read(struct ad7152_chip_info *chip, u8 reg, u8 *data, int len)
-{
- struct i2c_client *client = chip->client;
- int ret;
-
- ret = i2c_master_send(client, &reg, 1);
- if (ret < 0) {
- dev_err(&client->dev, "I2C write error\n");
- return ret;
- }
-
- ret = i2c_master_recv(client, data, len);
- if (ret < 0) {
- dev_err(&client->dev, "I2C read error\n");
- }
-
- return ret;
-}
-
-static int ad7152_i2c_write(struct ad7152_chip_info *chip, u8 reg, u8 data)
-{
- struct i2c_client *client = chip->client;
- int ret;
-
- u8 tx[2] = {
- reg,
- data,
- };
-
- ret = i2c_master_send(client, tx, 2);
- if (ret < 0)
- dev_err(&client->dev, "I2C write error\n");
-
- return ret;
-}
-
-/*
- * sysfs nodes
- */
-
-#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show) \
- IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store) \
- IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_OFFSET(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_offset, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_OFFSET(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_offset, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_GAIN(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_gain, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_GAIN(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_gain, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH1_VALUE(_show) \
- IIO_DEVICE_ATTR(ch1_value, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CH2_VALUE(_show) \
- IIO_DEVICE_ATTR(ch2_value, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CH1_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch1_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CH2_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(ch2_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_FILTER_RATE_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(filter_rate_setup, _mode, _show, _store, 0)
-
-static ssize_t ad7152_show_conversion_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int i;
- int len = 0;
-
- for (i = 0; i < AD7152_MAX_CONV_MODE; i++)
- len += sprintf(buf + len, "%s ", ad7152_conv_mode_table[i].name);
-
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad7152_show_conversion_modes);
-
-static ssize_t ad7152_show_ch1_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- u8 data[2];
-
- ad7152_i2c_read(chip, AD7152_CH1_DATA_HIGH, data, 2);
- return sprintf(buf, "%d\n", ((int)data[0] << 8) | data[1]);
-}
-
-static IIO_DEV_ATTR_CH1_VALUE(ad7152_show_ch1_value);
-
-static ssize_t ad7152_show_ch2_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- u8 data[2];
-
- ad7152_i2c_read(chip, AD7152_CH2_DATA_HIGH, data, 2);
- return sprintf(buf, "%d\n", ((int)data[0] << 8) | data[1]);
-}
-
-static IIO_DEV_ATTR_CH2_VALUE(ad7152_show_ch2_value);
-
-static ssize_t ad7152_show_conversion_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%s\n", chip->conversion_mode);
-}
-
-static ssize_t ad7152_store_conversion_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- u8 cfg;
- int i;
-
- ad7152_i2c_read(chip, AD7152_CFG, &cfg, 1);
-
- for (i = 0; i < AD7152_MAX_CONV_MODE; i++)
- if (strncmp(buf, ad7152_conv_mode_table[i].name,
- strlen(ad7152_conv_mode_table[i].name) - 1) == 0) {
- chip->conversion_mode = ad7152_conv_mode_table[i].name;
- cfg |= 0x18 | ad7152_conv_mode_table[i].reg_cfg;
- ad7152_i2c_write(chip, AD7152_CFG, cfg);
- return len;
- }
-
- dev_err(dev, "not supported conversion mode\n");
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
- ad7152_show_conversion_mode,
- ad7152_store_conversion_mode);
-
-static ssize_t ad7152_show_ch1_offset(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch1_offset);
-}
-
-static ssize_t ad7152_store_ch1_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad7152_i2c_write(chip, AD7152_CH1_OFFS_HIGH, data >> 8);
- ad7152_i2c_write(chip, AD7152_CH1_OFFS_LOW, data);
- chip->ch1_offset = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_OFFSET(S_IRUGO | S_IWUSR,
- ad7152_show_ch1_offset,
- ad7152_store_ch1_offset);
-
-static ssize_t ad7152_show_ch2_offset(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch2_offset);
-}
-
-static ssize_t ad7152_store_ch2_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad7152_i2c_write(chip, AD7152_CH2_OFFS_HIGH, data >> 8);
- ad7152_i2c_write(chip, AD7152_CH2_OFFS_LOW, data);
- chip->ch2_offset = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_OFFSET(S_IRUGO | S_IWUSR,
- ad7152_show_ch2_offset,
- ad7152_store_ch2_offset);
-
-static ssize_t ad7152_show_ch1_gain(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch1_gain);
-}
-
-static ssize_t ad7152_store_ch1_gain(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad7152_i2c_write(chip, AD7152_CH1_GAIN_HIGH, data >> 8);
- ad7152_i2c_write(chip, AD7152_CH1_GAIN_LOW, data);
- chip->ch1_gain = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_GAIN(S_IRUGO | S_IWUSR,
- ad7152_show_ch1_gain,
- ad7152_store_ch1_gain);
-
-static ssize_t ad7152_show_ch2_gain(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->ch2_gain);
-}
-
-static ssize_t ad7152_store_ch2_gain(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad7152_i2c_write(chip, AD7152_CH2_GAIN_HIGH, data >> 8);
- ad7152_i2c_write(chip, AD7152_CH2_GAIN_LOW, data);
- chip->ch2_gain = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_GAIN(S_IRUGO | S_IWUSR,
- ad7152_show_ch2_gain,
- ad7152_store_ch2_gain);
-
-static ssize_t ad7152_show_ch1_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->ch1_setup);
-}
-
-static ssize_t ad7152_store_ch1_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7152_i2c_write(chip, AD7152_CH1_SETUP, data);
- chip->ch1_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH1_SETUP(S_IRUGO | S_IWUSR,
- ad7152_show_ch1_setup,
- ad7152_store_ch1_setup);
-
-static ssize_t ad7152_show_ch2_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->ch2_setup);
-}
-
-static ssize_t ad7152_store_ch2_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7152_i2c_write(chip, AD7152_CH2_SETUP, data);
- chip->ch2_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CH2_SETUP(S_IRUGO | S_IWUSR,
- ad7152_show_ch2_setup,
- ad7152_store_ch2_setup);
-
-static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->filter_rate_setup);
-}
-
-static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad7152_i2c_write(chip, AD7152_CFG2, data);
- chip->filter_rate_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_FILTER_RATE_SETUP(S_IRUGO | S_IWUSR,
- ad7152_show_filter_rate_setup,
- ad7152_store_filter_rate_setup);
-
-static struct attribute *ad7152_attributes[] = {
- &iio_dev_attr_available_conversion_modes.dev_attr.attr,
- &iio_dev_attr_conversion_mode.dev_attr.attr,
- &iio_dev_attr_ch1_gain.dev_attr.attr,
- &iio_dev_attr_ch2_gain.dev_attr.attr,
- &iio_dev_attr_ch1_offset.dev_attr.attr,
- &iio_dev_attr_ch2_offset.dev_attr.attr,
- &iio_dev_attr_ch1_value.dev_attr.attr,
- &iio_dev_attr_ch2_value.dev_attr.attr,
- &iio_dev_attr_ch1_setup.dev_attr.attr,
- &iio_dev_attr_ch2_setup.dev_attr.attr,
- &iio_dev_attr_filter_rate_setup.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7152_attribute_group = {
- .attrs = ad7152_attributes,
-};
-
-static const struct iio_info ad7152_info = {
- .attrs = &ad7152_attribute_group,
- .driver_module = THIS_MODULE,
-};
-/*
- * device probe and remove
- */
-
-static int __devinit ad7152_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0;
- struct ad7152_chip_info *chip;
- struct iio_dev *indio_dev;
-
- indio_dev = iio_allocate_device(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
-
- /* Echipabilish that the iio_dev is a child of the i2c device */
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ad7152_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
- dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
-
- return 0;
-
-error_free_dev:
- iio_free_device(indio_dev);
-error_ret:
- return ret;
-}
-
-static int __devexit ad7152_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id ad7152_id[] = {
- { "ad7152", 0 },
- { "ad7153", 0 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad7152_id);
-
-static struct i2c_driver ad7152_driver = {
- .driver = {
- .name = "ad7152",
- },
- .probe = ad7152_probe,
- .remove = __devexit_p(ad7152_remove),
- .id_table = ad7152_id,
-};
-
-static __init int ad7152_init(void)
-{
- return i2c_add_driver(&ad7152_driver);
-}
-
-static __exit void ad7152_exit(void)
-{
- i2c_del_driver(&ad7152_driver);
-}
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ad7152/3 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(ad7152_init);
-module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
new file mode 100644
index 00000000000..31c376b9d5e
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -0,0 +1,1179 @@
+/*
+ * AD7190 AD7192 AD7195 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../buffer_generic.h"
+#include "../ring_sw.h"
+#include "../trigger.h"
+#include "../trigger_consumer.h"
+
+#include "ad7192.h"
+
+/* Registers */
+#define AD7192_REG_COMM 0 /* Communications Register (WO, 8-bit) */
+#define AD7192_REG_STAT 0 /* Status Register (RO, 8-bit) */
+#define AD7192_REG_MODE 1 /* Mode Register (RW, 24-bit */
+#define AD7192_REG_CONF 2 /* Configuration Register (RW, 24-bit) */
+#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */
+#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */
+#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */
+#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit
+ * (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_FULLSALE 7 /* Full-Scale Register
+ * (RW, 16-bit (AD7792)/24-bit (AD7192)) */
+
+/* Communications Register Bit Designations (AD7192_REG_COMM) */
+#define AD7192_COMM_WEN (1 << 7) /* Write Enable */
+#define AD7192_COMM_WRITE (0 << 6) /* Write Operation */
+#define AD7192_COMM_READ (1 << 6) /* Read Operation */
+#define AD7192_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */
+#define AD7192_COMM_CREAD (1 << 2) /* Continuous Read of Data Register */
+
+/* Status Register Bit Designations (AD7192_REG_STAT) */
+#define AD7192_STAT_RDY (1 << 7) /* Ready */
+#define AD7192_STAT_ERR (1 << 6) /* Error (Overrange, Underrange) */
+#define AD7192_STAT_NOREF (1 << 5) /* Error no external reference */
+#define AD7192_STAT_PARITY (1 << 4) /* Parity */
+#define AD7192_STAT_CH3 (1 << 2) /* Channel 3 */
+#define AD7192_STAT_CH2 (1 << 1) /* Channel 2 */
+#define AD7192_STAT_CH1 (1 << 0) /* Channel 1 */
+
+/* Mode Register Bit Designations (AD7192_REG_MODE) */
+#define AD7192_MODE_SEL(x) (((x) & 0x7) << 21) /* Operation Mode Select */
+#define AD7192_MODE_DAT_STA (1 << 20) /* Status Register transmission */
+#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
+#define AD7192_MODE_SINC3 (1 << 15) /* SINC3 Filter Select */
+#define AD7192_MODE_ACX (1 << 14) /* AC excitation enable(AD7195 only)*/
+#define AD7192_MODE_ENPAR (1 << 13) /* Parity Enable */
+#define AD7192_MODE_CLKDIV (1 << 12) /* Clock divide by 2 (AD7190/2 only)*/
+#define AD7192_MODE_SCYCLE (1 << 11) /* Single cycle conversion */
+#define AD7192_MODE_REJ60 (1 << 10) /* 50/60Hz notch filter */
+#define AD7192_MODE_RATE(x) ((x) & 0x3FF) /* Filter Update Rate Select */
+
+/* Mode Register: AD7192_MODE_SEL options */
+#define AD7192_MODE_CONT 0 /* Continuous Conversion Mode */
+#define AD7192_MODE_SINGLE 1 /* Single Conversion Mode */
+#define AD7192_MODE_IDLE 2 /* Idle Mode */
+#define AD7192_MODE_PWRDN 3 /* Power-Down Mode */
+#define AD7192_MODE_CAL_INT_ZERO 4 /* Internal Zero-Scale Calibration */
+#define AD7192_MODE_CAL_INT_FULL 5 /* Internal Full-Scale Calibration */
+#define AD7192_MODE_CAL_SYS_ZERO 6 /* System Zero-Scale Calibration */
+#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
+
+/* Mode Register: AD7192_MODE_CLKSRC options */
+#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected
+ * from MCLK1 to MCLK2 */
+#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */
+#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not
+ * available at the MCLK2 pin */
+#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available
+ * at the MCLK2 pin */
+
+
+/* Configuration Register Bit Designations (AD7192_REG_CONF) */
+
+#define AD7192_CONF_CHOP (1 << 23) /* CHOP enable */
+#define AD7192_CONF_REFSEL (1 << 20) /* REFIN1/REFIN2 Reference Select */
+#define AD7192_CONF_CHAN(x) (((x) & 0xFF) << 8) /* Channel select */
+#define AD7192_CONF_BURN (1 << 7) /* Burnout current enable */
+#define AD7192_CONF_REFDET (1 << 6) /* Reference detect enable */
+#define AD7192_CONF_BUF (1 << 4) /* Buffered Mode Enable */
+#define AD7192_CONF_UNIPOLAR (1 << 3) /* Unipolar/Bipolar Enable */
+#define AD7192_CONF_GAIN(x) ((x) & 0x7) /* Gain Select */
+
+#define AD7192_CH_AIN1P_AIN2M 0 /* AIN1(+) - AIN2(-) */
+#define AD7192_CH_AIN3P_AIN4M 1 /* AIN3(+) - AIN4(-) */
+#define AD7192_CH_TEMP 2 /* Temp Sensor */
+#define AD7192_CH_AIN2P_AIN2M 3 /* AIN2(+) - AIN2(-) */
+#define AD7192_CH_AIN1 4 /* AIN1 - AINCOM */
+#define AD7192_CH_AIN2 5 /* AIN2 - AINCOM */
+#define AD7192_CH_AIN3 6 /* AIN3 - AINCOM */
+#define AD7192_CH_AIN4 7 /* AIN4 - AINCOM */
+
+/* ID Register Bit Designations (AD7192_REG_ID) */
+#define ID_AD7190 0x4
+#define ID_AD7192 0x0
+#define ID_AD7195 0x6
+#define AD7192_ID_MASK 0x0F
+
+/* GPOCON Register Bit Designations (AD7192_REG_GPOCON) */
+#define AD7192_GPOCON_BPDSW (1 << 6) /* Bridge power-down switch enable */
+#define AD7192_GPOCON_GP32EN (1 << 5) /* Digital Output P3 and P2 enable */
+#define AD7192_GPOCON_GP10EN (1 << 4) /* Digital Output P1 and P0 enable */
+#define AD7192_GPOCON_P3DAT (1 << 3) /* P3 state */
+#define AD7192_GPOCON_P2DAT (1 << 2) /* P2 state */
+#define AD7192_GPOCON_P1DAT (1 << 1) /* P1 state */
+#define AD7192_GPOCON_P0DAT (1 << 0) /* P0 state */
+
+#define AD7192_INT_FREQ_MHz 4915200
+
+/* NOTE:
+ * The AD7190/2/5 features a dual use data out ready DOUT/RDY output.
+ * In order to avoid contentions on the SPI bus, it's therefore necessary
+ * to use spi bus locking.
+ *
+ * The DOUT/RDY output must also be wired to an interrupt capable GPIO.
+ */
+
+struct ad7192_state {
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+ struct regulator *reg;
+ struct ad7192_platform_data *pdata;
+ wait_queue_head_t wq_data_avail;
+ bool done;
+ bool irq_dis;
+ u16 int_vref_mv;
+ u32 mclk;
+ u32 f_order;
+ u32 mode;
+ u32 conf;
+ u32 scale_avail[8][2];
+ long available_scan_masks[9];
+ u8 gpocon;
+ u8 devid;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 data[4] ____cacheline_aligned;
+};
+
+static int __ad7192_write_reg(struct ad7192_state *st, bool locked,
+ bool cs_change, unsigned char reg,
+ unsigned size, unsigned val)
+{
+ u8 *data = st->data;
+ struct spi_transfer t = {
+ .tx_buf = data,
+ .len = size + 1,
+ .cs_change = cs_change,
+ };
+ struct spi_message m;
+
+ data[0] = AD7192_COMM_WRITE | AD7192_COMM_ADDR(reg);
+
+ switch (size) {
+ case 3:
+ data[1] = val >> 16;
+ data[2] = val >> 8;
+ data[3] = val;
+ break;
+ case 2:
+ data[1] = val >> 8;
+ data[2] = val;
+ break;
+ case 1:
+ data[1] = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ if (locked)
+ return spi_sync_locked(st->spi, &m);
+ else
+ return spi_sync(st->spi, &m);
+}
+
+static int ad7192_write_reg(struct ad7192_state *st,
+ unsigned reg, unsigned size, unsigned val)
+{
+ return __ad7192_write_reg(st, false, false, reg, size, val);
+}
+
+static int __ad7192_read_reg(struct ad7192_state *st, bool locked,
+ bool cs_change, unsigned char reg,
+ int *val, unsigned size)
+{
+ u8 *data = st->data;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = data,
+ .len = 1,
+ }, {
+ .rx_buf = data,
+ .len = size,
+ .cs_change = cs_change,
+ },
+ };
+ struct spi_message m;
+
+ data[0] = AD7192_COMM_READ | AD7192_COMM_ADDR(reg);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ if (locked)
+ ret = spi_sync_locked(st->spi, &m);
+ else
+ ret = spi_sync(st->spi, &m);
+
+ if (ret < 0)
+ return ret;
+
+ switch (size) {
+ case 3:
+ *val = data[0] << 16 | data[1] << 8 | data[2];
+ break;
+ case 2:
+ *val = data[0] << 8 | data[1];
+ break;
+ case 1:
+ *val = data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad7192_read_reg(struct ad7192_state *st,
+ unsigned reg, int *val, unsigned size)
+{
+ return __ad7192_read_reg(st, 0, 0, reg, val, size);
+}
+
+static int ad7192_read(struct ad7192_state *st, unsigned ch,
+ unsigned len, int *val)
+{
+ int ret;
+ st->conf = (st->conf & ~AD7192_CONF_CHAN(-1)) |
+ AD7192_CONF_CHAN(1 << ch);
+ st->mode = (st->mode & ~AD7192_MODE_SEL(-1)) |
+ AD7192_MODE_SEL(AD7192_MODE_SINGLE);
+
+ ad7192_write_reg(st, AD7192_REG_CONF, 3, st->conf);
+
+ spi_bus_lock(st->spi->master);
+ st->done = false;
+
+ ret = __ad7192_write_reg(st, 1, 1, AD7192_REG_MODE, 3, st->mode);
+ if (ret < 0)
+ goto out;
+
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+ wait_event_interruptible(st->wq_data_avail, st->done);
+
+ ret = __ad7192_read_reg(st, 1, 0, AD7192_REG_DATA, val, len);
+out:
+ spi_bus_unlock(st->spi->master);
+
+ return ret;
+}
+
+static int ad7192_calibrate(struct ad7192_state *st, unsigned mode, unsigned ch)
+{
+ int ret;
+
+ st->conf = (st->conf & ~AD7192_CONF_CHAN(-1)) |
+ AD7192_CONF_CHAN(1 << ch);
+ st->mode = (st->mode & ~AD7192_MODE_SEL(-1)) | AD7192_MODE_SEL(mode);
+
+ ad7192_write_reg(st, AD7192_REG_CONF, 3, st->conf);
+
+ spi_bus_lock(st->spi->master);
+ st->done = false;
+
+ ret = __ad7192_write_reg(st, 1, 1, AD7192_REG_MODE, 3,
+ (st->devid != ID_AD7195) ?
+ st->mode | AD7192_MODE_CLKDIV :
+ st->mode);
+ if (ret < 0)
+ goto out;
+
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+ wait_event_interruptible(st->wq_data_avail, st->done);
+
+ st->mode = (st->mode & ~AD7192_MODE_SEL(-1)) |
+ AD7192_MODE_SEL(AD7192_MODE_IDLE);
+
+ ret = __ad7192_write_reg(st, 1, 0, AD7192_REG_MODE, 3, st->mode);
+out:
+ spi_bus_unlock(st->spi->master);
+
+ return ret;
+}
+
+static const u8 ad7192_calib_arr[8][2] = {
+ {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN1},
+ {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN1},
+ {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN2},
+ {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN2},
+ {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN3},
+ {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN3},
+ {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN4},
+ {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN4}
+};
+
+static int ad7192_calibrate_all(struct ad7192_state *st)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7192_calib_arr); i++) {
+ ret = ad7192_calibrate(st, ad7192_calib_arr[i][0],
+ ad7192_calib_arr[i][1]);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(&st->spi->dev, "Calibration failed\n");
+ return ret;
+}
+
+static int ad7192_setup(struct ad7192_state *st)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(st->spi);
+ struct ad7192_platform_data *pdata = st->pdata;
+ unsigned long long scale_uv;
+ int i, ret, id;
+ u8 ones[6];
+
+ /* reset the serial interface */
+ memset(&ones, 0xFF, 6);
+ ret = spi_write(st->spi, &ones, 6);
+ if (ret < 0)
+ goto out;
+ msleep(1); /* Wait for at least 500us */
+
+ /* write/read test for device presence */
+ ret = ad7192_read_reg(st, AD7192_REG_ID, &id, 1);
+ if (ret)
+ goto out;
+
+ id &= AD7192_ID_MASK;
+
+ if (id != st->devid)
+ dev_warn(&st->spi->dev, "device ID query failed (0x%X)\n", id);
+
+ switch (pdata->clock_source_sel) {
+ case AD7192_CLK_EXT_MCLK1_2:
+ case AD7192_CLK_EXT_MCLK2:
+ st->mclk = AD7192_INT_FREQ_MHz;
+ break;
+ case AD7192_CLK_INT:
+ case AD7192_CLK_INT_CO:
+ if (pdata->ext_clk_Hz)
+ st->mclk = pdata->ext_clk_Hz;
+ else
+ st->mclk = AD7192_INT_FREQ_MHz;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
+ AD7192_MODE_CLKSRC(pdata->clock_source_sel) |
+ AD7192_MODE_RATE(480);
+
+ st->conf = AD7192_CONF_GAIN(0);
+
+ if (pdata->rej60_en)
+ st->mode |= AD7192_MODE_REJ60;
+
+ if (pdata->sinc3_en)
+ st->mode |= AD7192_MODE_SINC3;
+
+ if (pdata->refin2_en && (st->devid != ID_AD7195))
+ st->conf |= AD7192_CONF_REFSEL;
+
+ if (pdata->chop_en) {
+ st->conf |= AD7192_CONF_CHOP;
+ if (pdata->sinc3_en)
+ st->f_order = 3; /* SINC 3rd order */
+ else
+ st->f_order = 4; /* SINC 4th order */
+ } else {
+ st->f_order = 1;
+ }
+
+ if (pdata->buf_en)
+ st->conf |= AD7192_CONF_BUF;
+
+ if (pdata->unipolar_en)
+ st->conf |= AD7192_CONF_UNIPOLAR;
+
+ if (pdata->burnout_curr_en)
+ st->conf |= AD7192_CONF_BURN;
+
+ ret = ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode);
+ if (ret)
+ goto out;
+
+ ret = ad7192_write_reg(st, AD7192_REG_CONF, 3, st->conf);
+ if (ret)
+ goto out;
+
+ ret = ad7192_calibrate_all(st);
+ if (ret)
+ goto out;
+
+ /* Populate available ADC input ranges */
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ scale_uv = ((u64)st->int_vref_mv * 100000000)
+ >> (indio_dev->channels[0].scan_type.realbits -
+ ((st->conf & AD7192_CONF_UNIPOLAR) ? 0 : 1));
+ scale_uv >>= i;
+
+ st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10;
+ st->scale_avail[i][0] = scale_uv;
+ }
+
+ return 0;
+out:
+ dev_err(&st->spi->dev, "setup failed\n");
+ return ret;
+}
+
+static int ad7192_scan_from_ring(struct ad7192_state *st, unsigned ch, int *val)
+{
+ struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
+ int ret;
+ s64 dat64[2];
+ u32 *dat32 = (u32 *)dat64;
+
+ if (!(test_bit(ch, ring->scan_mask)))
+ return -EBUSY;
+
+ ret = ring->access->read_last(ring, (u8 *) &dat64);
+ if (ret)
+ return ret;
+
+ *val = *dat32;
+
+ return 0;
+}
+
+static int ad7192_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ struct iio_buffer *ring = indio_dev->buffer;
+ size_t d_size;
+ unsigned channel;
+
+ if (!ring->scan_count)
+ return -EINVAL;
+
+ channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+
+ d_size = ring->scan_count *
+ indio_dev->channels[0].scan_type.storagebits / 8;
+
+ if (ring->scan_timestamp) {
+ d_size += sizeof(s64);
+
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
+
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, d_size);
+
+ st->mode = (st->mode & ~AD7192_MODE_SEL(-1)) |
+ AD7192_MODE_SEL(AD7192_MODE_CONT);
+ st->conf = (st->conf & ~AD7192_CONF_CHAN(-1)) |
+ AD7192_CONF_CHAN(1 << indio_dev->channels[channel].address);
+
+ ad7192_write_reg(st, AD7192_REG_CONF, 3, st->conf);
+
+ spi_bus_lock(st->spi->master);
+ __ad7192_write_reg(st, 1, 1, AD7192_REG_MODE, 3, st->mode);
+
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+
+ return 0;
+}
+
+static int ad7192_ring_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ st->mode = (st->mode & ~AD7192_MODE_SEL(-1)) |
+ AD7192_MODE_SEL(AD7192_MODE_IDLE);
+
+ st->done = false;
+ wait_event_interruptible(st->wq_data_avail, st->done);
+
+ if (!st->irq_dis)
+ disable_irq_nosync(st->spi->irq);
+
+ __ad7192_write_reg(st, 1, 0, AD7192_REG_MODE, 3, st->mode);
+
+ return spi_bus_unlock(st->spi->master);
+}
+
+/**
+ * ad7192_trigger_handler() bh of trigger launched polling to ring buffer
+ **/
+static irqreturn_t ad7192_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *ring = indio_dev->buffer;
+ struct ad7192_state *st = iio_priv(indio_dev);
+ s64 dat64[2];
+ s32 *dat32 = (s32 *)dat64;
+
+ if (ring->scan_count)
+ __ad7192_read_reg(st, 1, 1, AD7192_REG_DATA,
+ dat32,
+ indio_dev->channels[0].scan_type.realbits/8);
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (ring->scan_timestamp)
+ dat64[1] = pf->timestamp;
+
+ ring->access->store_to(ring, (u8 *)dat64, pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_buffer_setup_ops ad7192_ring_setup_ops = {
+ .preenable = &ad7192_ring_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7192_ring_postdisable,
+};
+
+static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* Effectively select the ring buffer implementation */
+ indio_dev->buffer->access = &ring_sw_access_funcs;
+ indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
+ &ad7192_trigger_handler,
+ IRQF_ONESHOT,
+ indio_dev,
+ "ad7192_consumer%d",
+ indio_dev->id);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_deallocate_sw_rb;
+ }
+
+ /* Ring buffer functions - here trigger setup related */
+ indio_dev->buffer->setup_ops = &ad7192_ring_setup_ops;
+
+ /* Flag that polled ring buffering is possible */
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+ return 0;
+
+error_deallocate_sw_rb:
+ iio_sw_rb_free(indio_dev->buffer);
+error_ret:
+ return ret;
+}
+
+static void ad7192_ring_cleanup(struct iio_dev *indio_dev)
+{
+ iio_dealloc_pollfunc(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->buffer);
+}
+
+/**
+ * ad7192_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static irqreturn_t ad7192_data_rdy_trig_poll(int irq, void *private)
+{
+ struct ad7192_state *st = iio_priv(private);
+
+ st->done = true;
+ wake_up_interruptible(&st->wq_data_avail);
+ disable_irq_nosync(irq);
+ st->irq_dis = true;
+ iio_trigger_poll(st->trig, iio_get_time_ns());
+
+ return IRQ_HANDLED;
+}
+
+static int ad7192_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ int ret;
+
+ st->trig = iio_allocate_trigger("%s-dev%d",
+ spi_get_device_id(st->spi)->name,
+ indio_dev->id);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ ret = request_irq(st->spi->irq,
+ ad7192_data_rdy_trig_poll,
+ IRQF_TRIGGER_LOW,
+ spi_get_device_id(st->spi)->name,
+ indio_dev);
+ if (ret)
+ goto error_free_trig;
+
+ disable_irq_nosync(st->spi->irq);
+ st->irq_dis = true;
+ st->trig->dev.parent = &st->spi->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = indio_dev;
+
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_irq;
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->spi->irq, indio_dev);
+error_free_trig:
+ iio_free_trigger(st->trig);
+error_ret:
+ return ret;
+}
+
+static void ad7192_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ iio_trigger_unregister(st->trig);
+ free_irq(st->spi->irq, indio_dev);
+ iio_free_trigger(st->trig);
+}
+
+static ssize_t ad7192_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->mclk /
+ (st->f_order * 1024 * AD7192_MODE_RATE(st->mode)));
+}
+
+static ssize_t ad7192_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+ unsigned long lval;
+ int div, ret;
+
+ ret = strict_strtoul(buf, 10, &lval);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+
+ div = st->mclk / (lval * st->f_order * 1024);
+ if (div < 1 || div > 1023) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ st->mode &= ~AD7192_MODE_RATE(-1);
+ st->mode |= AD7192_MODE_RATE(div);
+ ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ad7192_read_frequency,
+ ad7192_write_frequency);
+
+
+static ssize_t ad7192_show_scale_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
+ st->scale_avail[i][1]);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_NAMED(in_v_m_v_scale_available,
+ in_voltage-voltage_scale_available,
+ S_IRUGO, ad7192_show_scale_available, NULL, 0);
+
+static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO,
+ ad7192_show_scale_available, NULL, 0);
+
+static ssize_t ad7192_show_ac_excitation(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX));
+}
+
+static ssize_t ad7192_show_bridge_switch(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", !!(st->gpocon & AD7192_GPOCON_BPDSW));
+}
+
+static ssize_t ad7192_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ bool val;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+
+ switch (this_attr->address) {
+ case AD7192_REG_GPOCON:
+ if (val)
+ st->gpocon |= AD7192_GPOCON_BPDSW;
+ else
+ st->gpocon &= ~AD7192_GPOCON_BPDSW;
+
+ ad7192_write_reg(st, AD7192_REG_GPOCON, 1, st->gpocon);
+ break;
+ case AD7192_REG_MODE:
+ if (val)
+ st->mode |= AD7192_MODE_ACX;
+ else
+ st->mode &= ~AD7192_MODE_ACX;
+
+ ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(bridge_switch_en, S_IRUGO | S_IWUSR,
+ ad7192_show_bridge_switch, ad7192_set,
+ AD7192_REG_GPOCON);
+
+static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
+ ad7192_show_ac_excitation, ad7192_set,
+ AD7192_REG_MODE);
+
+static struct attribute *ad7192_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
+ &iio_dev_attr_bridge_switch_en.dev_attr.attr,
+ &iio_dev_attr_ac_excitation_en.dev_attr.attr,
+ NULL
+};
+
+static mode_t ad7192_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ mode_t mode = attr->mode;
+
+ if ((st->devid != ID_AD7195) &&
+ (attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
+ mode = 0;
+
+ return mode;
+}
+
+static const struct attribute_group ad7192_attribute_group = {
+ .attrs = ad7192_attributes,
+ .is_visible = ad7192_attr_is_visible,
+};
+
+static int ad7192_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ int ret, smpl = 0;
+ bool unipolar = !!(st->conf & AD7192_CONF_UNIPOLAR);
+
+ switch (m) {
+ case 0:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = ad7192_scan_from_ring(st,
+ chan->scan_index, &smpl);
+ else
+ ret = ad7192_read(st, chan->address,
+ chan->scan_type.realbits / 8, &smpl);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ *val = (smpl >> chan->scan_type.shift) &
+ ((1 << (chan->scan_type.realbits)) - 1);
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (!unipolar)
+ *val -= (1 << (chan->scan_type.realbits - 1));
+ break;
+ case IIO_TEMP:
+ *val -= 0x800000;
+ *val /= 2815; /* temp Kelvin */
+ *val -= 273; /* temp Celsius */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return IIO_VAL_INT;
+
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ mutex_lock(&indio_dev->mlock);
+ *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
+ *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
+ mutex_unlock(&indio_dev->mlock);
+
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ *val = 1000;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad7192_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ int ret, i;
+ unsigned int tmp;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ if (val2 == st->scale_avail[i][1]) {
+ tmp = st->conf;
+ st->conf &= ~AD7192_CONF_GAIN(-1);
+ st->conf |= AD7192_CONF_GAIN(i);
+
+ if (tmp != st->conf) {
+ ad7192_write_reg(st, AD7192_REG_CONF,
+ 3, st->conf);
+ ad7192_calibrate_all(st);
+ }
+ ret = 0;
+ }
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad7192_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ if (indio_dev->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info ad7192_info = {
+ .read_raw = &ad7192_read_raw,
+ .write_raw = &ad7192_write_raw,
+ .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
+ .attrs = &ad7192_attribute_group,
+ .validate_trigger = ad7192_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD7192_CHAN_DIFF(_chan, _chan2, _name, _address, _si) \
+ { .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .extend_name = _name, \
+ .channel = _chan, \
+ .channel2 = _chan2, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .address = _address, \
+ .scan_index = _si, \
+ .scan_type = IIO_ST('s', 24, 32, 0)}
+
+#define AD7192_CHAN(_chan, _address, _si) \
+ { .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .address = _address, \
+ .scan_index = _si, \
+ .scan_type = IIO_ST('s', 24, 32, 0)}
+
+#define AD7192_CHAN_TEMP(_chan, _address, _si) \
+ { .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .address = _address, \
+ .scan_index = _si, \
+ .scan_type = IIO_ST('s', 24, 32, 0)}
+
+static struct iio_chan_spec ad7192_channels[] = {
+ AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0),
+ AD7192_CHAN_DIFF(3, 4, NULL, AD7192_CH_AIN3P_AIN4M, 1),
+ AD7192_CHAN_TEMP(0, AD7192_CH_TEMP, 2),
+ AD7192_CHAN_DIFF(2, 2, "shorted", AD7192_CH_AIN2P_AIN2M, 3),
+ AD7192_CHAN(1, AD7192_CH_AIN1, 4),
+ AD7192_CHAN(2, AD7192_CH_AIN2, 5),
+ AD7192_CHAN(3, AD7192_CH_AIN3, 6),
+ AD7192_CHAN(4, AD7192_CH_AIN4, 7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+static int __devinit ad7192_probe(struct spi_device *spi)
+{
+ struct ad7192_platform_data *pdata = spi->dev.platform_data;
+ struct ad7192_state *st;
+ struct iio_dev *indio_dev;
+ int ret, i , voltage_uv = 0;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
+ st->pdata = pdata;
+
+ if (pdata && pdata->vref_mv)
+ st->int_vref_mv = pdata->vref_mv;
+ else if (voltage_uv)
+ st->int_vref_mv = voltage_uv / 1000;
+ else
+ dev_warn(&spi->dev, "reference voltage undefined\n");
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->devid = spi_get_device_id(spi)->driver_data;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7192_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
+ indio_dev->available_scan_masks = st->available_scan_masks;
+ indio_dev->info = &ad7192_info;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ st->available_scan_masks[i] = (1 << i) | (1 <<
+ indio_dev->channels[indio_dev->num_channels - 1].
+ scan_index);
+
+ init_waitqueue_head(&st->wq_data_avail);
+
+ ret = ad7192_register_ring_funcs_and_init(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad7192_probe_trigger(indio_dev);
+ if (ret)
+ goto error_ring_cleanup;
+
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = ad7192_setup(st);
+ if (ret)
+ goto error_unreg_ring;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_unreg_ring;
+ return 0;
+
+error_unreg_ring:
+ iio_buffer_unregister(indio_dev);
+error_remove_trigger:
+ ad7192_remove_trigger(indio_dev);
+error_ring_cleanup:
+ ad7192_ring_cleanup(indio_dev);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int ad7192_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
+ ad7192_remove_trigger(indio_dev);
+ ad7192_ring_cleanup(indio_dev);
+
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+
+ return 0;
+}
+
+static const struct spi_device_id ad7192_id[] = {
+ {"ad7190", ID_AD7190},
+ {"ad7192", ID_AD7192},
+ {"ad7195", ID_AD7195},
+ {}
+};
+
+static struct spi_driver ad7192_driver = {
+ .driver = {
+ .name = "ad7192",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7192_probe,
+ .remove = __devexit_p(ad7192_remove),
+ .id_table = ad7192_id,
+};
+
+static int __init ad7192_init(void)
+{
+ return spi_register_driver(&ad7192_driver);
+}
+module_init(ad7192_init);
+
+static void __exit ad7192_exit(void)
+{
+ spi_unregister_driver(&ad7192_driver);
+}
+module_exit(ad7192_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7192.h b/drivers/staging/iio/adc/ad7192.h
new file mode 100644
index 00000000000..a0a5b61a41f
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7192.h
@@ -0,0 +1,47 @@
+/*
+ * AD7190 AD7192 AD7195 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef IIO_ADC_AD7192_H_
+#define IIO_ADC_AD7192_H_
+
+/*
+ * TODO: struct ad7192_platform_data needs to go into include/linux/iio
+ */
+
+/**
+ * struct ad7192_platform_data - platform/board specific information
+ * @vref_mv: the external reference voltage in millivolt
+ * @clock_source_sel: [0..3]
+ * 0 External 4.92 MHz clock connected from MCLK1 to MCLK2
+ * 1 External Clock applied to MCLK2
+ * 2 Internal 4.92 MHz Clock not available at the MCLK2 pin
+ * 3 Internal 4.92 MHz Clock available at the MCLK2 pin
+ * @ext_clk_Hz: the external clock frequency in Hz, if not set
+ * the driver uses the internal clock (16.776 MHz)
+ * @refin2_en: REFIN1/REFIN2 Reference Select (AD7190/2 only)
+ * @rej60_en: 50/60Hz notch filter enable
+ * @sinc3_en: SINC3 filter enable (default SINC4)
+ * @chop_en: CHOP mode enable
+ * @buf_en: buffered input mode enable
+ * @unipolar_en: unipolar mode enable
+ * @burnout_curr_en: constant current generators on AIN(+|-) enable
+ */
+
+struct ad7192_platform_data {
+ u16 vref_mv;
+ u8 clock_source_sel;
+ u32 ext_clk_Hz;
+ bool refin2_en;
+ bool rej60_en;
+ bool sinc3_en;
+ bool chop_en;
+ bool buf_en;
+ bool unipolar_en;
+ bool burnout_curr_en;
+};
+
+#endif /* IIO_ADC_AD7192_H_ */
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
new file mode 100644
index 00000000000..372d059042f
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -0,0 +1,997 @@
+/*
+ * AD7280A Lithium Ion Battery Monitoring System
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#include "ad7280a.h"
+
+/* Registers */
+#define AD7280A_CELL_VOLTAGE_1 0x0 /* D11 to D0, Read only */
+#define AD7280A_CELL_VOLTAGE_2 0x1 /* D11 to D0, Read only */
+#define AD7280A_CELL_VOLTAGE_3 0x2 /* D11 to D0, Read only */
+#define AD7280A_CELL_VOLTAGE_4 0x3 /* D11 to D0, Read only */
+#define AD7280A_CELL_VOLTAGE_5 0x4 /* D11 to D0, Read only */
+#define AD7280A_CELL_VOLTAGE_6 0x5 /* D11 to D0, Read only */
+#define AD7280A_AUX_ADC_1 0x6 /* D11 to D0, Read only */
+#define AD7280A_AUX_ADC_2 0x7 /* D11 to D0, Read only */
+#define AD7280A_AUX_ADC_3 0x8 /* D11 to D0, Read only */
+#define AD7280A_AUX_ADC_4 0x9 /* D11 to D0, Read only */
+#define AD7280A_AUX_ADC_5 0xA /* D11 to D0, Read only */
+#define AD7280A_AUX_ADC_6 0xB /* D11 to D0, Read only */
+#define AD7280A_SELF_TEST 0xC /* D11 to D0, Read only */
+#define AD7280A_CONTROL_HB 0xD /* D15 to D8, Read/write */
+#define AD7280A_CONTROL_LB 0xE /* D7 to D0, Read/write */
+#define AD7280A_CELL_OVERVOLTAGE 0xF /* D7 to D0, Read/write */
+#define AD7280A_CELL_UNDERVOLTAGE 0x10 /* D7 to D0, Read/write */
+#define AD7280A_AUX_ADC_OVERVOLTAGE 0x11 /* D7 to D0, Read/write */
+#define AD7280A_AUX_ADC_UNDERVOLTAGE 0x12 /* D7 to D0, Read/write */
+#define AD7280A_ALERT 0x13 /* D7 to D0, Read/write */
+#define AD7280A_CELL_BALANCE 0x14 /* D7 to D0, Read/write */
+#define AD7280A_CB1_TIMER 0x15 /* D7 to D0, Read/write */
+#define AD7280A_CB2_TIMER 0x16 /* D7 to D0, Read/write */
+#define AD7280A_CB3_TIMER 0x17 /* D7 to D0, Read/write */
+#define AD7280A_CB4_TIMER 0x18 /* D7 to D0, Read/write */
+#define AD7280A_CB5_TIMER 0x19 /* D7 to D0, Read/write */
+#define AD7280A_CB6_TIMER 0x1A /* D7 to D0, Read/write */
+#define AD7280A_PD_TIMER 0x1B /* D7 to D0, Read/write */
+#define AD7280A_READ 0x1C /* D7 to D0, Read/write */
+#define AD7280A_CNVST_CONTROL 0x1D /* D7 to D0, Read/write */
+
+/* Bits and Masks */
+#define AD7280A_CTRL_HB_CONV_INPUT_ALL (0 << 6)
+#define AD7280A_CTRL_HB_CONV_INPUT_6CELL_AUX1_3_4 (1 << 6)
+#define AD7280A_CTRL_HB_CONV_INPUT_6CELL (2 << 6)
+#define AD7280A_CTRL_HB_CONV_INPUT_SELF_TEST (3 << 6)
+#define AD7280A_CTRL_HB_CONV_RES_READ_ALL (0 << 4)
+#define AD7280A_CTRL_HB_CONV_RES_READ_6CELL_AUX1_3_4 (1 << 4)
+#define AD7280A_CTRL_HB_CONV_RES_READ_6CELL (2 << 4)
+#define AD7280A_CTRL_HB_CONV_RES_READ_NO (3 << 4)
+#define AD7280A_CTRL_HB_CONV_START_CNVST (0 << 3)
+#define AD7280A_CTRL_HB_CONV_START_CS (1 << 3)
+#define AD7280A_CTRL_HB_CONV_AVG_DIS (0 << 1)
+#define AD7280A_CTRL_HB_CONV_AVG_2 (1 << 1)
+#define AD7280A_CTRL_HB_CONV_AVG_4 (2 << 1)
+#define AD7280A_CTRL_HB_CONV_AVG_8 (3 << 1)
+#define AD7280A_CTRL_HB_CONV_AVG(x) ((x) << 1)
+#define AD7280A_CTRL_HB_PWRDN_SW (1 << 0)
+
+#define AD7280A_CTRL_LB_SWRST (1 << 7)
+#define AD7280A_CTRL_LB_ACQ_TIME_400ns (0 << 5)
+#define AD7280A_CTRL_LB_ACQ_TIME_800ns (1 << 5)
+#define AD7280A_CTRL_LB_ACQ_TIME_1200ns (2 << 5)
+#define AD7280A_CTRL_LB_ACQ_TIME_1600ns (3 << 5)
+#define AD7280A_CTRL_LB_ACQ_TIME(x) ((x) << 5)
+#define AD7280A_CTRL_LB_MUST_SET (1 << 4)
+#define AD7280A_CTRL_LB_THERMISTOR_EN (1 << 3)
+#define AD7280A_CTRL_LB_LOCK_DEV_ADDR (1 << 2)
+#define AD7280A_CTRL_LB_INC_DEV_ADDR (1 << 1)
+#define AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN (1 << 0)
+
+#define AD7280A_ALERT_GEN_STATIC_HIGH (1 << 6)
+#define AD7280A_ALERT_RELAY_SIG_CHAIN_DOWN (3 << 6)
+
+#define AD7280A_ALL_CELLS (0xAD << 16)
+
+#define AD7280A_MAX_SPI_CLK_Hz 700000 /* < 1MHz */
+#define AD7280A_MAX_CHAIN 8
+#define AD7280A_CELLS_PER_DEV 6
+#define AD7280A_BITS 12
+#define AD7280A_NUM_CH (AD7280A_AUX_ADC_6 - \
+ AD7280A_CELL_VOLTAGE_1 + 1)
+
+#define AD7280A_DEVADDR_MASTER 0
+#define AD7280A_DEVADDR_ALL 0x1F
+/* 5-bit device address is sent LSB first */
+#define AD7280A_DEVADDR(addr) (((addr & 0x1) << 4) | ((addr & 0x2) << 3) | \
+ (addr & 0x4) | ((addr & 0x8) >> 3) | \
+ ((addr & 0x10) >> 4))
+
+/* During a read a valid write is mandatory.
+ * So writing to the highest available address (Address 0x1F)
+ * and setting the address all parts bit to 0 is recommended
+ * So the TXVAL is AD7280A_DEVADDR_ALL + CRC
+ */
+#define AD7280A_READ_TXVAL 0xF800030A
+
+/*
+ * AD7280 CRC
+ *
+ * P(x) = x^8 + x^5 + x^3 + x^2 + x^1 + x^0 = 0b100101111 => 0x2F
+ */
+#define POLYNOM 0x2F
+#define POLYNOM_ORDER 8
+#define HIGHBIT 1 << (POLYNOM_ORDER - 1);
+
+struct ad7280_state {
+ struct spi_device *spi;
+ struct iio_chan_spec *channels;
+ struct iio_dev_attr *iio_attr;
+ int slave_num;
+ int scan_cnt;
+ int readback_delay_us;
+ unsigned char crc_tab[256];
+ unsigned char ctrl_hb;
+ unsigned char ctrl_lb;
+ unsigned char cell_threshhigh;
+ unsigned char cell_threshlow;
+ unsigned char aux_threshhigh;
+ unsigned char aux_threshlow;
+ unsigned char cb_mask[AD7280A_MAX_CHAIN];
+};
+
+static void ad7280_crc8_build_table(unsigned char *crc_tab)
+{
+ unsigned char bit, crc;
+ int cnt, i;
+
+ for (cnt = 0; cnt < 256; cnt++) {
+ crc = cnt;
+ for (i = 0; i < 8; i++) {
+ bit = crc & HIGHBIT;
+ crc <<= 1;
+ if (bit)
+ crc ^= POLYNOM;
+ }
+ crc_tab[cnt] = crc;
+ }
+}
+
+static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
+{
+ unsigned char crc;
+
+ crc = crc_tab[val >> 16 & 0xFF];
+ crc = crc_tab[crc ^ (val >> 8 & 0xFF)];
+
+ return crc ^ (val & 0xFF);
+}
+
+static int ad7280_check_crc(struct ad7280_state *st, unsigned val)
+{
+ unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
+
+ if (crc != ((val >> 2) & 0xFF))
+ return -EIO;
+
+ return 0;
+}
+
+/* After initiating a conversion sequence we need to wait until the
+ * conversion is done. The delay is typically in the range of 15..30 us
+ * however depending an the number of devices in the daisy chain and the
+ * number of averages taken, conversion delays and acquisition time options
+ * it may take up to 250us, in this case we better sleep instead of busy
+ * wait.
+ */
+
+static void ad7280_delay(struct ad7280_state *st)
+{
+ if (st->readback_delay_us < 50)
+ udelay(st->readback_delay_us);
+ else
+ msleep(1);
+}
+
+static int __ad7280_read32(struct spi_device *spi, unsigned *val)
+{
+ unsigned rx_buf, tx_buf = cpu_to_be32(AD7280A_READ_TXVAL);
+ int ret;
+
+ struct spi_transfer t = {
+ .tx_buf = &tx_buf,
+ .rx_buf = &rx_buf,
+ .len = 4,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
+
+ *val = be32_to_cpu(rx_buf);
+
+ return 0;
+}
+
+static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
+ unsigned addr, bool all, unsigned val)
+{
+ unsigned reg = (devaddr << 27 | addr << 21 |
+ (val & 0xFF) << 13 | all << 12);
+
+ reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
+ reg = cpu_to_be32(reg);
+
+ return spi_write(st->spi, &reg, 4);
+}
+
+static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
+ unsigned addr)
+{
+ int ret;
+ unsigned tmp;
+
+ /* turns off the read operation on all parts */
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
+ AD7280A_CTRL_HB_CONV_INPUT_ALL |
+ AD7280A_CTRL_HB_CONV_RES_READ_NO |
+ st->ctrl_hb);
+ if (ret)
+ return ret;
+
+ /* turns on the read operation on the addressed part */
+ ret = ad7280_write(st, devaddr, AD7280A_CONTROL_HB, 0,
+ AD7280A_CTRL_HB_CONV_INPUT_ALL |
+ AD7280A_CTRL_HB_CONV_RES_READ_ALL |
+ st->ctrl_hb);
+ if (ret)
+ return ret;
+
+ /* Set register address on the part to be read from */
+ ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
+ if (ret)
+ return ret;
+
+ __ad7280_read32(st->spi, &tmp);
+
+ if (ad7280_check_crc(st, tmp))
+ return -EIO;
+
+ if (((tmp >> 27) != devaddr) || (((tmp >> 21) & 0x3F) != addr))
+ return -EFAULT;
+
+ return (tmp >> 13) & 0xFF;
+}
+
+static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
+ unsigned addr)
+{
+ int ret;
+ unsigned tmp;
+
+ ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
+ if (ret)
+ return ret;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
+ AD7280A_CTRL_HB_CONV_INPUT_ALL |
+ AD7280A_CTRL_HB_CONV_RES_READ_NO |
+ st->ctrl_hb);
+ if (ret)
+ return ret;
+
+ ret = ad7280_write(st, devaddr, AD7280A_CONTROL_HB, 0,
+ AD7280A_CTRL_HB_CONV_INPUT_ALL |
+ AD7280A_CTRL_HB_CONV_RES_READ_ALL |
+ AD7280A_CTRL_HB_CONV_START_CS |
+ st->ctrl_hb);
+ if (ret)
+ return ret;
+
+ ad7280_delay(st);
+
+ __ad7280_read32(st->spi, &tmp);
+
+ if (ad7280_check_crc(st, tmp))
+ return -EIO;
+
+ if (((tmp >> 27) != devaddr) || (((tmp >> 23) & 0xF) != addr))
+ return -EFAULT;
+
+ return (tmp >> 11) & 0xFFF;
+}
+
+static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
+ unsigned *array)
+{
+ int i, ret;
+ unsigned tmp, sum = 0;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
+ AD7280A_CELL_VOLTAGE_1 << 2);
+ if (ret)
+ return ret;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
+ AD7280A_CTRL_HB_CONV_INPUT_ALL |
+ AD7280A_CTRL_HB_CONV_RES_READ_ALL |
+ AD7280A_CTRL_HB_CONV_START_CS |
+ st->ctrl_hb);
+ if (ret)
+ return ret;
+
+ ad7280_delay(st);
+
+ for (i = 0; i < cnt; i++) {
+ __ad7280_read32(st->spi, &tmp);
+
+ if (ad7280_check_crc(st, tmp))
+ return -EIO;
+
+ if (array)
+ array[i] = tmp;
+ /* only sum cell voltages */
+ if (((tmp >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6)
+ sum += ((tmp >> 11) & 0xFFF);
+ }
+
+ return sum;
+}
+
+static int ad7280_chain_setup(struct ad7280_state *st)
+{
+ unsigned val, n;
+ int ret;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
+ AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN |
+ AD7280A_CTRL_LB_LOCK_DEV_ADDR |
+ AD7280A_CTRL_LB_MUST_SET |
+ AD7280A_CTRL_LB_SWRST |
+ st->ctrl_lb);
+ if (ret)
+ return ret;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
+ AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN |
+ AD7280A_CTRL_LB_LOCK_DEV_ADDR |
+ AD7280A_CTRL_LB_MUST_SET |
+ st->ctrl_lb);
+ if (ret)
+ return ret;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
+ AD7280A_CONTROL_LB << 2);
+ if (ret)
+ return ret;
+
+ for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
+ __ad7280_read32(st->spi, &val);
+ if (val == 0)
+ return n - 1;
+
+ if (ad7280_check_crc(st, val))
+ return -EIO;
+
+ if (n != AD7280A_DEVADDR(val >> 27))
+ return -EIO;
+ }
+
+ return -EFAULT;
+}
+
+static ssize_t ad7280_show_balance_sw(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7280_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return sprintf(buf, "%d\n",
+ !!(st->cb_mask[this_attr->address >> 8] &
+ (1 << ((this_attr->address & 0xFF) + 2))));
+}
+
+static ssize_t ad7280_store_balance_sw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7280_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ bool readin;
+ int ret;
+ unsigned devaddr, ch;
+
+ ret = strtobool(buf, &readin);
+ if (ret)
+ return ret;
+
+ devaddr = this_attr->address >> 8;
+ ch = this_attr->address & 0xFF;
+
+ mutex_lock(&indio_dev->mlock);
+ if (readin)
+ st->cb_mask[devaddr] |= 1 << (ch + 2);
+ else
+ st->cb_mask[devaddr] &= ~(1 << (ch + 2));
+
+ ret = ad7280_write(st, devaddr, AD7280A_CELL_BALANCE,
+ 0, st->cb_mask[devaddr]);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad7280_show_balance_timer(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7280_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ unsigned msecs;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad7280_read(st, this_attr->address >> 8,
+ this_attr->address & 0xFF);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ msecs = (ret >> 3) * 71500;
+
+ return sprintf(buf, "%d\n", msecs);
+}
+
+static ssize_t ad7280_store_balance_timer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7280_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ val /= 71500;
+
+ if (val > 31)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad7280_write(st, this_attr->address >> 8,
+ this_attr->address & 0xFF,
+ 0, (val & 0x1F) << 3);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static struct attribute *ad7280_attributes[AD7280A_MAX_CHAIN *
+ AD7280A_CELLS_PER_DEV * 2 + 1];
+
+static struct attribute_group ad7280_attrs_group = {
+ .attrs = ad7280_attributes,
+};
+
+static int ad7280_channel_init(struct ad7280_state *st)
+{
+ int dev, ch, cnt;
+
+ st->channels = kzalloc(sizeof(*st->channels) *
+ ((st->slave_num + 1) * 12 + 2), GFP_KERNEL);
+ if (st->channels == NULL)
+ return -ENOMEM;
+
+ for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
+ for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_AUX_ADC_6; ch++,
+ cnt++) {
+ if (ch < AD7280A_AUX_ADC_1) {
+ st->channels[cnt].type = IIO_VOLTAGE;
+ st->channels[cnt].differential = 1;
+ st->channels[cnt].channel = (dev * 6) + ch;
+ st->channels[cnt].channel2 =
+ st->channels[cnt].channel + 1;
+ } else {
+ st->channels[cnt].type = IIO_TEMP;
+ st->channels[cnt].channel = (dev * 6) + ch - 6;
+ }
+ st->channels[cnt].indexed = 1;
+ st->channels[cnt].info_mask =
+ (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ st->channels[cnt].address =
+ AD7280A_DEVADDR(dev) << 8 | ch;
+ st->channels[cnt].scan_index = cnt;
+ st->channels[cnt].scan_type.sign = 'u';
+ st->channels[cnt].scan_type.realbits = 12;
+ st->channels[cnt].scan_type.storagebits = 32;
+ st->channels[cnt].scan_type.shift = 0;
+ }
+
+ st->channels[cnt].type = IIO_VOLTAGE;
+ st->channels[cnt].differential = 1;
+ st->channels[cnt].channel = 0;
+ st->channels[cnt].channel2 = dev * 6;
+ st->channels[cnt].address = AD7280A_ALL_CELLS;
+ st->channels[cnt].indexed = 1;
+ st->channels[cnt].info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ st->channels[cnt].scan_index = cnt;
+ st->channels[cnt].scan_type.sign = 'u';
+ st->channels[cnt].scan_type.realbits = 32;
+ st->channels[cnt].scan_type.storagebits = 32;
+ st->channels[cnt].scan_type.shift = 0;
+ cnt++;
+ st->channels[cnt].type = IIO_TIMESTAMP;
+ st->channels[cnt].channel = -1;
+ st->channels[cnt].scan_index = cnt;
+ st->channels[cnt].scan_type.sign = 's';
+ st->channels[cnt].scan_type.realbits = 64;
+ st->channels[cnt].scan_type.storagebits = 64;
+ st->channels[cnt].scan_type.shift = 0;
+
+ return cnt + 1;
+}
+
+static int ad7280_attr_init(struct ad7280_state *st)
+{
+ int dev, ch, cnt;
+
+ st->iio_attr = kzalloc(sizeof(*st->iio_attr) * (st->slave_num + 1) *
+ AD7280A_CELLS_PER_DEV * 2, GFP_KERNEL);
+ if (st->iio_attr == NULL)
+ return -ENOMEM;
+
+ for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
+ for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6;
+ ch++, cnt++) {
+ st->iio_attr[cnt].address =
+ AD7280A_DEVADDR(dev) << 8 | ch;
+ st->iio_attr[cnt].dev_attr.attr.mode =
+ S_IWUSR | S_IRUGO;
+ st->iio_attr[cnt].dev_attr.show =
+ ad7280_show_balance_sw;
+ st->iio_attr[cnt].dev_attr.store =
+ ad7280_store_balance_sw;
+ st->iio_attr[cnt].dev_attr.attr.name =
+ kasprintf(GFP_KERNEL,
+ "in%d-in%d_balance_switch_en",
+ (dev * AD7280A_CELLS_PER_DEV) + ch,
+ (dev * AD7280A_CELLS_PER_DEV) + ch + 1);
+ ad7280_attributes[cnt] =
+ &st->iio_attr[cnt].dev_attr.attr;
+ cnt++;
+ st->iio_attr[cnt].address =
+ AD7280A_DEVADDR(dev) << 8 |
+ (AD7280A_CB1_TIMER + ch);
+ st->iio_attr[cnt].dev_attr.attr.mode =
+ S_IWUSR | S_IRUGO;
+ st->iio_attr[cnt].dev_attr.show =
+ ad7280_show_balance_timer;
+ st->iio_attr[cnt].dev_attr.store =
+ ad7280_store_balance_timer;
+ st->iio_attr[cnt].dev_attr.attr.name =
+ kasprintf(GFP_KERNEL, "in%d-in%d_balance_timer",
+ (dev * AD7280A_CELLS_PER_DEV) + ch,
+ (dev * AD7280A_CELLS_PER_DEV) + ch + 1);
+ ad7280_attributes[cnt] =
+ &st->iio_attr[cnt].dev_attr.attr;
+ }
+
+ ad7280_attributes[cnt] = NULL;
+
+ return 0;
+}
+
+static ssize_t ad7280_read_channel_config(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7280_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned val;
+
+ switch (this_attr->address) {
+ case AD7280A_CELL_OVERVOLTAGE:
+ val = 1000 + (st->cell_threshhigh * 1568) / 100;
+ break;
+ case AD7280A_CELL_UNDERVOLTAGE:
+ val = 1000 + (st->cell_threshlow * 1568) / 100;
+ break;
+ case AD7280A_AUX_ADC_OVERVOLTAGE:
+ val = (st->aux_threshhigh * 196) / 10;
+ break;
+ case AD7280A_AUX_ADC_UNDERVOLTAGE:
+ val = (st->aux_threshlow * 196) / 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t ad7280_write_channel_config(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7280_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ long val;
+ int ret;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (this_attr->address) {
+ case AD7280A_CELL_OVERVOLTAGE:
+ case AD7280A_CELL_UNDERVOLTAGE:
+ val = ((val - 1000) * 100) / 1568; /* LSB 15.68mV */
+ break;
+ case AD7280A_AUX_ADC_OVERVOLTAGE:
+ case AD7280A_AUX_ADC_UNDERVOLTAGE:
+ val = (val * 10) / 196; /* LSB 19.6mV */
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ val = clamp(val, 0L, 0xFFL);
+
+ mutex_lock(&indio_dev->mlock);
+ switch (this_attr->address) {
+ case AD7280A_CELL_OVERVOLTAGE:
+ st->cell_threshhigh = val;
+ break;
+ case AD7280A_CELL_UNDERVOLTAGE:
+ st->cell_threshlow = val;
+ break;
+ case AD7280A_AUX_ADC_OVERVOLTAGE:
+ st->aux_threshhigh = val;
+ break;
+ case AD7280A_AUX_ADC_UNDERVOLTAGE:
+ st->aux_threshlow = val;
+ break;
+ }
+
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
+ this_attr->address, 1, val);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static irqreturn_t ad7280_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct ad7280_state *st = iio_priv(indio_dev);
+ unsigned *channels;
+ int i, ret;
+
+ channels = kzalloc(sizeof(*channels) * st->scan_cnt, GFP_KERNEL);
+ if (channels == NULL)
+ return IRQ_HANDLED;
+
+ ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < st->scan_cnt; i++) {
+ if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
+ if (((channels[i] >> 11) & 0xFFF) >=
+ st->cell_threshhigh)
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_VOLTAGE,
+ 1,
+ 0,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH,
+ 0, 0, 0),
+ iio_get_time_ns());
+ else if (((channels[i] >> 11) & 0xFFF) <=
+ st->cell_threshlow)
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_VOLTAGE,
+ 1,
+ 0,
+ IIO_EV_DIR_FALLING,
+ IIO_EV_TYPE_THRESH,
+ 0, 0, 0),
+ iio_get_time_ns());
+ } else {
+ if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ else if (((channels[i] >> 11) & 0xFFF) <=
+ st->aux_threshlow)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+ }
+
+ kfree(channels);
+
+ return IRQ_HANDLED;
+}
+
+static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value,
+ in_voltage-voltage_thresh_low_value,
+ S_IRUGO | S_IWUSR,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_CELL_UNDERVOLTAGE);
+
+static IIO_DEVICE_ATTR_NAMED(in_thresh_high_value,
+ in_voltage-voltage_thresh_high_value,
+ S_IRUGO | S_IWUSR,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_CELL_OVERVOLTAGE);
+
+static IIO_DEVICE_ATTR(in_temp_thresh_low_value,
+ S_IRUGO | S_IWUSR,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_AUX_ADC_UNDERVOLTAGE);
+
+static IIO_DEVICE_ATTR(in_temp_thresh_high_value,
+ S_IRUGO | S_IWUSR,
+ ad7280_read_channel_config,
+ ad7280_write_channel_config,
+ AD7280A_AUX_ADC_OVERVOLTAGE);
+
+
+static struct attribute *ad7280_event_attributes[] = {
+ &iio_dev_attr_in_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in_temp_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in_temp_thresh_high_value.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad7280_event_attrs_group = {
+ .attrs = ad7280_event_attributes,
+};
+
+static int ad7280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad7280_state *st = iio_priv(indio_dev);
+ unsigned int scale_uv;
+ int ret;
+
+ switch (m) {
+ case 0:
+ mutex_lock(&indio_dev->mlock);
+ if (chan->address == AD7280A_ALL_CELLS)
+ ret = ad7280_read_all_channels(st, st->scan_cnt, NULL);
+ else
+ ret = ad7280_read_channel(st, chan->address >> 8,
+ chan->address & 0xFF);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
+ scale_uv = (4000 * 1000) >> AD7280A_BITS;
+ else
+ scale_uv = (5000 * 1000) >> AD7280A_BITS;
+
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ad7280_info = {
+ .read_raw = &ad7280_read_raw,
+ .event_attrs = &ad7280_event_attrs_group,
+ .attrs = &ad7280_attrs_group,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct ad7280_platform_data ad7793_default_pdata = {
+ .acquisition_time = AD7280A_ACQ_TIME_400ns,
+ .conversion_averaging = AD7280A_CONV_AVG_DIS,
+ .thermistor_term_en = true,
+};
+
+static int __devinit ad7280_probe(struct spi_device *spi)
+{
+ const struct ad7280_platform_data *pdata = spi->dev.platform_data;
+ struct ad7280_state *st;
+ int ret;
+ const unsigned short tACQ_ns[4] = {465, 1010, 1460, 1890};
+ const unsigned short nAVG[4] = {1, 2, 4, 8};
+ struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
+
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+
+ if (!pdata)
+ pdata = &ad7793_default_pdata;
+
+ ad7280_crc8_build_table(st->crc_tab);
+
+ st->spi->max_speed_hz = AD7280A_MAX_SPI_CLK_Hz;
+ st->spi->mode = SPI_MODE_1;
+ spi_setup(st->spi);
+
+ st->ctrl_lb = AD7280A_CTRL_LB_ACQ_TIME(pdata->acquisition_time & 0x3);
+ st->ctrl_hb = AD7280A_CTRL_HB_CONV_AVG(pdata->conversion_averaging
+ & 0x3) | (pdata->thermistor_term_en ?
+ AD7280A_CTRL_LB_THERMISTOR_EN : 0);
+
+ ret = ad7280_chain_setup(st);
+ if (ret < 0)
+ goto error_free_device;
+
+ st->slave_num = ret;
+ st->scan_cnt = (st->slave_num + 1) * AD7280A_NUM_CH;
+ st->cell_threshhigh = 0xFF;
+ st->aux_threshhigh = 0xFF;
+
+ /*
+ * Total Conversion Time = ((tACQ + tCONV) *
+ * (Number of Conversions per Part)) −
+ * tACQ + ((N - 1) * tDELAY)
+ *
+ * Readback Delay = Total Conversion Time + tWAIT
+ */
+
+ st->readback_delay_us =
+ ((tACQ_ns[pdata->acquisition_time & 0x3] + 695) *
+ (AD7280A_NUM_CH * nAVG[pdata->conversion_averaging & 0x3]))
+ - tACQ_ns[pdata->acquisition_time & 0x3] +
+ st->slave_num * 250;
+
+ /* Convert to usecs */
+ st->readback_delay_us = DIV_ROUND_UP(st->readback_delay_us, 1000);
+ st->readback_delay_us += 5; /* Add tWAIT */
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ad7280_channel_init(st);
+ if (ret < 0)
+ goto error_free_device;
+
+ indio_dev->num_channels = ret;
+ indio_dev->channels = st->channels;
+ indio_dev->info = &ad7280_info;
+
+ ret = ad7280_attr_init(st);
+ if (ret < 0)
+ goto error_free_channels;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_attr;
+
+ if (spi->irq > 0) {
+ ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
+ AD7280A_ALERT, 1,
+ AD7280A_ALERT_RELAY_SIG_CHAIN_DOWN);
+ if (ret)
+ goto error_unregister;
+
+ ret = ad7280_write(st, AD7280A_DEVADDR(st->slave_num),
+ AD7280A_ALERT, 0,
+ AD7280A_ALERT_GEN_STATIC_HIGH |
+ (pdata->chain_last_alert_ignore & 0xF));
+ if (ret)
+ goto error_unregister;
+
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ ad7280_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ indio_dev->name,
+ indio_dev);
+ if (ret)
+ goto error_unregister;
+ }
+
+ return 0;
+error_unregister:
+ iio_device_unregister(indio_dev);
+
+error_free_attr:
+ kfree(st->iio_attr);
+
+error_free_channels:
+ kfree(st->channels);
+
+error_free_device:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad7280_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7280_state *st = iio_priv(indio_dev);
+
+ if (spi->irq > 0)
+ free_irq(spi->irq, indio_dev);
+ iio_device_unregister(indio_dev);
+
+ ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
+ AD7280A_CTRL_HB_PWRDN_SW | st->ctrl_hb);
+
+ kfree(st->channels);
+ kfree(st->iio_attr);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7280_id[] = {
+ {"ad7280a", 0},
+ {}
+};
+
+static struct spi_driver ad7280_driver = {
+ .driver = {
+ .name = "ad7280",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7280_probe,
+ .remove = __devexit_p(ad7280_remove),
+ .id_table = ad7280_id,
+};
+
+static int __init ad7280_init(void)
+{
+ return spi_register_driver(&ad7280_driver);
+}
+module_init(ad7280_init);
+
+static void __exit ad7280_exit(void)
+{
+ spi_unregister_driver(&ad7280_driver);
+}
+module_exit(ad7280_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7280A");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
new file mode 100644
index 00000000000..20400b0045e
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7280a.h
@@ -0,0 +1,38 @@
+/*
+ * AD7280A Lithium Ion Battery Monitoring System
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_ADC_AD7280_H_
+#define IIO_ADC_AD7280_H_
+
+/*
+ * TODO: struct ad7280_platform_data needs to go into include/linux/iio
+ */
+
+#define AD7280A_ACQ_TIME_400ns 0
+#define AD7280A_ACQ_TIME_800ns 1
+#define AD7280A_ACQ_TIME_1200ns 2
+#define AD7280A_ACQ_TIME_1600ns 3
+
+#define AD7280A_CONV_AVG_DIS 0
+#define AD7280A_CONV_AVG_2 1
+#define AD7280A_CONV_AVG_4 2
+#define AD7280A_CONV_AVG_8 3
+
+#define AD7280A_ALERT_REMOVE_VIN5 (1 << 2)
+#define AD7280A_ALERT_REMOVE_VIN4_VIN5 (2 << 2)
+#define AD7280A_ALERT_REMOVE_AUX5 (1 << 0)
+#define AD7280A_ALERT_REMOVE_AUX4_AUX5 (2 << 0)
+
+struct ad7280_platform_data {
+ unsigned acquisition_time;
+ unsigned conversion_averaging;
+ unsigned chain_last_alert_ignore;
+ bool thermistor_term_en;
+};
+
+#endif /* IIO_ADC_AD7280_H_ */
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 96cbb17bc2c..10e79e8ee64 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -1,83 +1,110 @@
/*
- * AD7291 digital temperature sensor driver supporting AD7291
+ * AD7291 8-Channel, I2C, 12-Bit SAR ADC with Temperature Sensor
*
- * Copyright 2010 Analog Devices Inc.
+ * Copyright 2010-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
#include "../iio.h"
#include "../sysfs.h"
/*
+ * Simplified handling
+ *
+ * If no events enabled - single polled channel read
+ * If event enabled direct reads disable unless channel
+ * is in the read mask.
+ *
+ * The noise-delayed bit as per datasheet suggestion is always enabled.
+ *
+ */
+
+/*
* AD7291 registers definition
*/
-#define AD7291_COMMAND 0
-#define AD7291_VOLTAGE 1
-#define AD7291_T_SENSE 2
-#define AD7291_T_AVERAGE 3
-#define AD7291_VOLTAGE_LIMIT_BASE 4
-#define AD7291_VOLTAGE_LIMIT_COUNT 8
-#define AD7291_T_SENSE_HIGH 0x1c
-#define AD7291_T_SENSE_LOW 0x1d
-#define AD7291_T_SENSE_HYST 0x1e
-#define AD7291_VOLTAGE_ALERT_STATUS 0x1f
+#define AD7291_COMMAND 0x00
+#define AD7291_VOLTAGE 0x01
+#define AD7291_T_SENSE 0x02
+#define AD7291_T_AVERAGE 0x03
+#define AD7291_CH0_DATA_HIGH 0x04
+#define AD7291_CH0_DATA_LOW 0x05
+#define AD7291_CH0_HYST 0x06
+#define AD7291_CH1_DATA_HIGH 0x07
+#define AD7291_CH1_DATA_LOW 0x08
+#define AD7291_CH1_HYST 0x09
+#define AD7291_CH2_DATA_HIGH 0x0A
+#define AD7291_CH2_DATA_LOW 0x0B
+#define AD7291_CH2_HYST 0x0C
+#define AD7291_CH3_DATA_HIGH 0x0D
+#define AD7291_CH3_DATA_LOW 0x0E
+#define AD7291_CH3_HYST 0x0F
+#define AD7291_CH4_DATA_HIGH 0x10
+#define AD7291_CH4_DATA_LOW 0x11
+#define AD7291_CH4_HYST 0x12
+#define AD7291_CH5_DATA_HIGH 0x13
+#define AD7291_CH5_DATA_LOW 0x14
+#define AD7291_CH5_HYST 0x15
+#define AD7291_CH6_DATA_HIGH 0x16
+#define AD7291_CH6_DATA_LOW 0x17
+#define AD7291_CH6_HYST 0x18
+#define AD7291_CH7_DATA_HIGH 0x19
+#define AD7291_CH7_DATA_LOW 0x1A
+#define AD7291_CH7_HYST 0x2B
+#define AD7291_T_SENSE_HIGH 0x1C
+#define AD7291_T_SENSE_LOW 0x1D
+#define AD7291_T_SENSE_HYST 0x1E
+#define AD7291_VOLTAGE_ALERT_STATUS 0x1F
#define AD7291_T_ALERT_STATUS 0x20
+#define AD7291_VOLTAGE_LIMIT_COUNT 8
+
+
/*
* AD7291 command
*/
-#define AD7291_AUTOCYCLE 0x1
-#define AD7291_RESET 0x2
-#define AD7291_ALART_CLEAR 0x4
-#define AD7291_ALART_POLARITY 0x8
-#define AD7291_EXT_REF 0x10
-#define AD7291_NOISE_DELAY 0x20
-#define AD7291_T_SENSE_MASK 0x40
-#define AD7291_VOLTAGE_MASK 0xff00
+#define AD7291_AUTOCYCLE (1 << 0)
+#define AD7291_RESET (1 << 1)
+#define AD7291_ALERT_CLEAR (1 << 2)
+#define AD7291_ALERT_POLARITY (1 << 3)
+#define AD7291_EXT_REF (1 << 4)
+#define AD7291_NOISE_DELAY (1 << 5)
+#define AD7291_T_SENSE_MASK (1 << 7)
+#define AD7291_VOLTAGE_MASK 0xFF00
#define AD7291_VOLTAGE_OFFSET 0x8
/*
* AD7291 value masks
*/
-#define AD7291_CHANNEL_MASK 0xf000
-#define AD7291_VALUE_MASK 0xfff
+#define AD7291_CHANNEL_MASK 0xF000
+#define AD7291_BITS 12
+#define AD7291_VALUE_MASK 0xFFF
#define AD7291_T_VALUE_SIGN 0x400
#define AD7291_T_VALUE_FLOAT_OFFSET 2
#define AD7291_T_VALUE_FLOAT_MASK 0x2
-/*
- * struct ad7291_chip_info - chip specifc information
- */
+#define AD7291_BITS 12
struct ad7291_chip_info {
- struct i2c_client *client;
- u16 command;
- u8 channels; /* Active voltage channels */
+ struct i2c_client *client;
+ struct regulator *reg;
+ u16 int_vref_mv;
+ u16 command;
+ u16 c_mask; /* Active voltage channels for events */
+ struct mutex state_lock;
};
-/*
- * struct ad7291_chip_info - chip specifc information
- */
-
-struct ad7291_limit_regs {
- u16 data_high;
- u16 data_low;
- u16 hysteresis;
-};
-
-/*
- * ad7291 register access by I2C
- */
static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
{
struct i2c_client *client = chip->client;
@@ -96,352 +123,25 @@ static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
static int ad7291_i2c_write(struct ad7291_chip_info *chip, u8 reg, u16 data)
{
- struct i2c_client *client = chip->client;
- int ret = 0;
-
- ret = i2c_smbus_write_word_data(client, reg, swab16(data));
- if (ret < 0)
- dev_err(&client->dev, "I2C write error\n");
-
- return ret;
-}
-
-/* Returns negative errno, or else the number of words read. */
-static int ad7291_i2c_read_data(struct ad7291_chip_info *chip, u8 reg, u16 *data)
-{
- struct i2c_client *client = chip->client;
- u8 commands[4];
- int ret = 0;
- int i, count;
-
- if (reg == AD7291_T_SENSE || reg == AD7291_T_AVERAGE)
- count = 2;
- else if (reg == AD7291_VOLTAGE) {
- if (!chip->channels) {
- dev_err(&client->dev, "No voltage channel is selected.\n");
- return -EINVAL;
- }
- count = 2 + chip->channels * 2;
- } else {
- dev_err(&client->dev, "I2C wrong data register\n");
- return -EINVAL;
- }
-
- commands[0] = 0;
- commands[1] = (chip->command >> 8) & 0xff;
- commands[2] = chip->command & 0xff;
- commands[3] = reg;
-
- ret = i2c_master_send(client, commands, 4);
- if (ret < 0) {
- dev_err(&client->dev, "I2C master send error\n");
- return ret;
- }
-
- ret = i2c_master_recv(client, (u8 *)data, count);
- if (ret < 0) {
- dev_err(&client->dev, "I2C master receive error\n");
- return ret;
- }
- ret >>= 2;
-
- for (i = 0; i < ret; i++)
- data[i] = swab16(data[i]);
-
- return ret;
-}
-
-static ssize_t ad7291_show_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
-
- if (chip->command & AD7291_AUTOCYCLE)
- return sprintf(buf, "autocycle\n");
- else
- return sprintf(buf, "command\n");
+ return i2c_smbus_write_word_data(chip->client, reg, swab16(data));
}
-static ssize_t ad7291_store_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 command;
- int ret;
-
- command = chip->command & (~AD7291_AUTOCYCLE);
- if (strcmp(buf, "autocycle"))
- command |= AD7291_AUTOCYCLE;
-
- ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
- if (ret)
- return -EIO;
-
- chip->command = command;
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- ad7291_show_mode,
- ad7291_store_mode,
- 0);
-
-static ssize_t ad7291_show_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "command\nautocycle\n");
-}
-
-static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7291_show_available_modes, NULL, 0);
-
static ssize_t ad7291_store_reset(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 command;
- int ret;
-
- command = chip->command | AD7291_RESET;
-
- ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
- if (ret)
- return -EIO;
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(reset, S_IWUSR,
- NULL,
- ad7291_store_reset,
- 0);
-
-static ssize_t ad7291_show_ext_ref(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", !!(chip->command & AD7291_EXT_REF));
-}
-
-static ssize_t ad7291_store_ext_ref(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 command;
- int ret;
-
- command = chip->command & (~AD7291_EXT_REF);
- if (strcmp(buf, "1"))
- command |= AD7291_EXT_REF;
-
- ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
- if (ret)
- return -EIO;
-
- chip->command = command;
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(ext_ref, S_IRUGO | S_IWUSR,
- ad7291_show_ext_ref,
- ad7291_store_ext_ref,
- 0);
-
-static ssize_t ad7291_show_noise_delay(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", !!(chip->command & AD7291_NOISE_DELAY));
-}
-
-static ssize_t ad7291_store_noise_delay(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 command;
- int ret;
-
- command = chip->command & (~AD7291_NOISE_DELAY);
- if (strcmp(buf, "1"))
- command |= AD7291_NOISE_DELAY;
-
- ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
- if (ret)
- return -EIO;
-
- chip->command = command;
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(noise_delay, S_IRUGO | S_IWUSR,
- ad7291_show_noise_delay,
- ad7291_store_noise_delay,
- 0);
-
-static ssize_t ad7291_show_t_sense(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 data;
- char sign = ' ';
- int ret;
-
- ret = ad7291_i2c_read_data(chip, AD7291_T_SENSE, &data);
- if (ret)
- return -EIO;
-
- if (data & AD7291_T_VALUE_SIGN) {
- /* convert supplement to positive value */
- data = (AD7291_T_VALUE_SIGN << 1) - data;
- sign = '-';
- }
-
- return sprintf(buf, "%c%d.%.2d\n", sign,
- (data >> AD7291_T_VALUE_FLOAT_OFFSET),
- (data & AD7291_T_VALUE_FLOAT_MASK) * 25);
-}
-
-static IIO_DEVICE_ATTR(t_sense, S_IRUGO, ad7291_show_t_sense, NULL, 0);
-
-static ssize_t ad7291_show_t_average(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 data;
- char sign = ' ';
- int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
- ret = ad7291_i2c_read_data(chip, AD7291_T_AVERAGE, &data);
- if (ret)
- return -EIO;
-
- if (data & AD7291_T_VALUE_SIGN) {
- /* convert supplement to positive value */
- data = (AD7291_T_VALUE_SIGN << 1) - data;
- sign = '-';
- }
-
- return sprintf(buf, "%c%d.%.2d\n", sign,
- (data >> AD7291_T_VALUE_FLOAT_OFFSET),
- (data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+ return ad7291_i2c_write(chip, AD7291_COMMAND,
+ chip->command | AD7291_RESET);
}
-static IIO_DEVICE_ATTR(t_average, S_IRUGO, ad7291_show_t_average, NULL, 0);
-
-static ssize_t ad7291_show_voltage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 data[AD7291_VOLTAGE_LIMIT_COUNT];
- int i, size, ret;
-
- ret = ad7291_i2c_read_data(chip, AD7291_VOLTAGE, data);
- if (ret)
- return -EIO;
-
- for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
- if (chip->command & (AD7291_T_SENSE_MASK << i)) {
- ret = sprintf(buf, "channel[%d]=%d\n", i,
- data[i] & AD7291_VALUE_MASK);
- if (ret < 0)
- break;
- buf += ret;
- size += ret;
- }
- }
-
- return size;
-}
-
-static IIO_DEVICE_ATTR(voltage, S_IRUGO, ad7291_show_voltage, NULL, 0);
-
-static ssize_t ad7291_show_channel_mask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%x\n", (chip->command & AD7291_VOLTAGE_MASK) >>
- AD7291_VOLTAGE_OFFSET);
-}
-
-static ssize_t ad7291_store_channel_mask(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 command;
- unsigned long data;
- int i, ret;
-
- ret = strict_strtoul(buf, 16, &data);
- if (ret || data > 0xff)
- return -EINVAL;
-
- command = chip->command & (~AD7291_VOLTAGE_MASK);
- command |= data << AD7291_VOLTAGE_OFFSET;
-
- ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
- if (ret)
- return -EIO;
-
- chip->command = command;
-
- for (i = 0, chip->channels = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
- if (chip->command & (AD7291_T_SENSE_MASK << i))
- chip->channels++;
- }
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(channel_mask, S_IRUGO | S_IWUSR,
- ad7291_show_channel_mask,
- ad7291_store_channel_mask,
- 0);
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, ad7291_store_reset, 0);
static struct attribute *ad7291_attributes[] = {
- &iio_dev_attr_available_modes.dev_attr.attr,
- &iio_dev_attr_mode.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
- &iio_dev_attr_ext_ref.dev_attr.attr,
- &iio_dev_attr_noise_delay.dev_attr.attr,
- &iio_dev_attr_t_sense.dev_attr.attr,
- &iio_dev_attr_t_average.dev_attr.attr,
- &iio_dev_attr_voltage.dev_attr.attr,
- &iio_dev_attr_channel_mask.dev_attr.attr,
NULL,
};
@@ -449,10 +149,6 @@ static const struct attribute_group ad7291_attribute_group = {
.attrs = ad7291_attributes,
};
-/*
- * temperature bound events
- */
-
static irqreturn_t ad7291_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
@@ -471,35 +167,22 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
if (!(t_status || v_status))
return IRQ_HANDLED;
- command = chip->command | AD7291_ALART_CLEAR;
+ command = chip->command | AD7291_ALERT_CLEAR;
ad7291_i2c_write(chip, AD7291_COMMAND, command);
- command = chip->command & ~AD7291_ALART_CLEAR;
+ command = chip->command & ~AD7291_ALERT_CLEAR;
ad7291_i2c_write(chip, AD7291_COMMAND, command);
- if (t_status & (1 << 0))
- iio_push_event(indio_dev, 0,
+ /* For now treat t_sense and t_sense_average the same */
+ if ((t_status & (1 << 0)) || (t_status & (1 << 2)))
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
- if (t_status & (1 << 1))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_TEMP,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
- if (t_status & (1 << 2))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_TEMP,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
- if (t_status & (1 << 3))
- iio_push_event(indio_dev, 0,
+ if ((t_status & (1 << 1)) || (t_status & (1 << 3)))
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP,
0,
IIO_EV_TYPE_THRESH,
@@ -508,15 +191,15 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT*2; i += 2) {
if (v_status & (1 << i))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_IN,
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
i/2,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (v_status & (1 << (i + 1)))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_IN,
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
i/2,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
@@ -526,251 +209,352 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
-static inline ssize_t ad7291_show_t_bound(struct device *dev,
+static inline ssize_t ad7291_show_hyst(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
u16 data;
- char sign = ' ';
int ret;
ret = ad7291_i2c_read(chip, this_attr->address, &data);
- if (ret)
- return -EIO;
-
- data &= AD7291_VALUE_MASK;
- if (data & AD7291_T_VALUE_SIGN) {
- /* convert supplement to positive value */
- data = (AD7291_T_VALUE_SIGN << 1) - data;
- sign = '-';
- }
+ if (ret < 0)
+ return ret;
- return sprintf(buf, "%c%d.%.2d\n", sign,
- data >> AD7291_T_VALUE_FLOAT_OFFSET,
- (data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+ return sprintf(buf, "%d\n", data & AD7291_VALUE_MASK);
}
-static inline ssize_t ad7291_set_t_bound(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static inline ssize_t ad7291_set_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long tmp1, tmp2;
u16 data;
- char *pos;
int ret;
- pos = strchr(buf, '.');
+ ret = kstrtou16(buf, 10, &data);
- ret = strict_strtol(buf, 10, &tmp1);
-
- if (ret || tmp1 > 127 || tmp1 < -128)
+ if (ret < 0)
+ return ret;
+ if (data > AD7291_VALUE_MASK)
return -EINVAL;
- if (pos) {
- len = strlen(pos);
- if (len > AD7291_T_VALUE_FLOAT_OFFSET)
- len = AD7291_T_VALUE_FLOAT_OFFSET;
- pos[len] = 0;
- ret = strict_strtol(pos, 10, &tmp2);
-
- if (!ret)
- tmp2 = (tmp2 / 25) * 25;
- }
-
- if (tmp1 < 0)
- data = (u16)(-tmp1);
- else
- data = (u16)tmp1;
- data = (data << AD7291_T_VALUE_FLOAT_OFFSET) |
- (tmp2 & AD7291_T_VALUE_FLOAT_MASK);
- if (tmp1 < 0)
- /* convert positive value to supplyment */
- data = (AD7291_T_VALUE_SIGN << 1) - data;
-
ret = ad7291_i2c_write(chip, this_attr->address, data);
- if (ret)
- return -EIO;
+ if (ret < 0)
+ return ret;
- return ret;
+ return len;
}
-static inline ssize_t ad7291_show_v_bound(struct device *dev,
- struct device_attribute *attr,
- u8 bound_reg,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- u16 data;
- int ret;
+static IIO_DEVICE_ATTR(in_temp0_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst,
+ AD7291_T_SENSE_HYST);
+static IIO_DEVICE_ATTR(in_voltage0_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH0_HYST);
+static IIO_DEVICE_ATTR(in_voltage1_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH1_HYST);
+static IIO_DEVICE_ATTR(in_voltage2_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH2_HYST);
+static IIO_DEVICE_ATTR(in_voltage3_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH3_HYST);
+static IIO_DEVICE_ATTR(in_voltage4_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH4_HYST);
+static IIO_DEVICE_ATTR(in_voltage5_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH5_HYST);
+static IIO_DEVICE_ATTR(in_voltage6_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH6_HYST);
+static IIO_DEVICE_ATTR(in_voltage7_thresh_both_hyst_raw,
+ S_IRUGO | S_IWUSR,
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_CH7_HYST);
- if (bound_reg < AD7291_VOLTAGE_LIMIT_BASE ||
- bound_reg >= AD7291_VOLTAGE_LIMIT_BASE +
- AD7291_VOLTAGE_LIMIT_COUNT)
- return -EINVAL;
+static struct attribute *ad7291_event_attributes[] = {
+ &iio_dev_attr_in_temp0_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage1_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage2_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage3_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage4_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage5_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage6_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage7_thresh_both_hyst_raw.dev_attr.attr,
+ NULL,
+};
- ret = ad7291_i2c_read(chip, bound_reg, &data);
- if (ret)
- return -EIO;
+/* high / low */
+static u8 ad7291_limit_regs[9][2] = {
+ { AD7291_CH0_DATA_HIGH, AD7291_CH0_DATA_LOW },
+ { AD7291_CH1_DATA_HIGH, AD7291_CH1_DATA_LOW },
+ { AD7291_CH2_DATA_HIGH, AD7291_CH2_DATA_LOW },
+ { AD7291_CH3_DATA_HIGH, AD7291_CH3_DATA_LOW }, /* FIXME: ? */
+ { AD7291_CH4_DATA_HIGH, AD7291_CH4_DATA_LOW },
+ { AD7291_CH5_DATA_HIGH, AD7291_CH5_DATA_LOW },
+ { AD7291_CH6_DATA_HIGH, AD7291_CH6_DATA_LOW },
+ { AD7291_CH7_DATA_HIGH, AD7291_CH7_DATA_LOW },
+ /* temp */
+ { AD7291_T_SENSE_HIGH, AD7291_T_SENSE_LOW },
+};
- data &= AD7291_VALUE_MASK;
+static int ad7291_read_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int *val)
+{
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
- return sprintf(buf, "%d\n", data);
+ int ret;
+ u8 reg;
+ u16 uval;
+ s16 signval;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_VOLTAGE:
+ reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]
+ [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)];
+
+ ret = ad7291_i2c_read(chip, reg, &uval);
+ if (ret < 0)
+ return ret;
+ *val = uval & AD7291_VALUE_MASK;
+ return 0;
+
+ case IIO_TEMP:
+ reg = ad7291_limit_regs[8]
+ [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)];
+
+ ret = ad7291_i2c_read(chip, reg, &signval);
+ if (ret < 0)
+ return ret;
+ signval = (s16)((signval & AD7291_VALUE_MASK) << 4) >> 4;
+ *val = signval;
+ return 0;
+ default:
+ return -EINVAL;
+ };
}
-static inline ssize_t ad7291_set_v_bound(struct device *dev,
- struct device_attribute *attr,
- u8 bound_reg,
- const char *buf,
- size_t len)
+static int ad7291_write_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int val)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = iio_priv(dev_info);
- unsigned long value;
- u16 data;
- int ret;
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ u8 reg;
+ s16 signval;
- if (bound_reg < AD7291_VOLTAGE_LIMIT_BASE ||
- bound_reg >= AD7291_VOLTAGE_LIMIT_BASE +
- AD7291_VOLTAGE_LIMIT_COUNT)
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_VOLTAGE:
+ if (val > AD7291_VALUE_MASK || val < 0)
+ return -EINVAL;
+ reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]
+ [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)];
+ return ad7291_i2c_write(chip, reg, val);
+ case IIO_TEMP:
+ if (val > 2047 || val < -2048)
+ return -EINVAL;
+ reg = ad7291_limit_regs[8]
+ [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)];
+ signval = val;
+ return ad7291_i2c_write(chip, reg, *(u16 *)&signval);
+ default:
return -EINVAL;
+ };
+}
- ret = strict_strtoul(buf, 10, &value);
-
- if (ret || value >= 4096)
+static int ad7291_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ /* To be enabled the channel must simply be on. If any are enabled
+ we are in continuous sampling mode */
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_VOLTAGE:
+ if (chip->c_mask &
+ (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM(event_code))))
+ return 1;
+ else
+ return 0;
+ case IIO_TEMP:
+ /* always on */
+ return 1;
+ default:
return -EINVAL;
+ }
- data = (u16)value;
- ret = ad7291_i2c_write(chip, bound_reg, data);
- if (ret)
- return -EIO;
+}
+static int ad7291_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code,
+ int state)
+{
+ int ret = 0;
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ u16 regval;
+
+ mutex_lock(&chip->state_lock);
+ regval = chip->command;
+ /*
+ * To be enabled the channel must simply be on. If any are enabled
+ * use continuous sampling mode.
+ * Possible to disable temp as well but that makes single read tricky.
+ */
+
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ case IIO_VOLTAGE:
+ if ((!state) && (chip->c_mask & (1 << (15 -
+ IIO_EVENT_CODE_EXTRACT_NUM(event_code)))))
+ chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM
+ (event_code)));
+ else if (state && (!(chip->c_mask & (1 << (15 -
+ IIO_EVENT_CODE_EXTRACT_NUM(event_code))))))
+ chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM
+ (event_code)));
+ else
+ break;
+
+ regval &= ~AD7291_AUTOCYCLE;
+ regval |= chip->c_mask;
+ if (chip->c_mask) /* Enable autocycle? */
+ regval |= AD7291_AUTOCYCLE;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, regval);
+ if (ret < 0)
+ goto error_ret;
+
+ chip->command = regval;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+error_ret:
+ mutex_unlock(&chip->state_lock);
return ret;
}
-static IIO_DEVICE_ATTR(t_sense_high_value,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound,
- AD7291_T_SENSE_HIGH);
-static IIO_DEVICE_ATTR(t_sense_low_value,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound,
- AD7291_T_SENSE_LOW);
-static IIO_DEVICE_ATTR(t_sense_hyst_value,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound,
- AD7291_T_SENSE_HYST);
-static IIO_DEVICE_ATTR(v0_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x04);
-static IIO_DEVICE_ATTR(v0_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x05);
-static IIO_DEVICE_ATTR(v0_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x06);
-static IIO_DEVICE_ATTR(v1_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x07);
-static IIO_DEVICE_ATTR(v1_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x08);
-static IIO_DEVICE_ATTR(v1_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x09);
-static IIO_DEVICE_ATTR(v2_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x0A);
-static IIO_DEVICE_ATTR(v2_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x0B);
-static IIO_DEVICE_ATTR(v2_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x0C);
-static IIO_DEVICE_ATTR(v3_high,
- S_IRUGO | S_IWUSR,
- /* Datasheet suggests this one and this one only
- has the registers in different order */
- ad7291_show_t_bound, ad7291_set_t_bound, 0x0E);
-static IIO_DEVICE_ATTR(v3_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x0D);
-static IIO_DEVICE_ATTR(v3_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x0F);
-static IIO_DEVICE_ATTR(v4_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x10);
-static IIO_DEVICE_ATTR(v4_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x11);
-static IIO_DEVICE_ATTR(v4_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x12);
-static IIO_DEVICE_ATTR(v5_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x13);
-static IIO_DEVICE_ATTR(v5_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x14);
-static IIO_DEVICE_ATTR(v5_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x15);
-static IIO_DEVICE_ATTR(v6_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x16);
-static IIO_DEVICE_ATTR(v6_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x17);
-static IIO_DEVICE_ATTR(v6_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x18);
-static IIO_DEVICE_ATTR(v7_high,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x19);
-static IIO_DEVICE_ATTR(v7_low,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x1A);
-static IIO_DEVICE_ATTR(v7_hyst,
- S_IRUGO | S_IWUSR,
- ad7291_show_t_bound, ad7291_set_t_bound, 0x1B);
+static int ad7291_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ unsigned int scale_uv;
+ u16 regval;
+ s16 signval;
+
+ switch (mask) {
+ case 0:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&chip->state_lock);
+ /* If in autocycle mode drop through */
+ if (chip->command & AD7291_AUTOCYCLE) {
+ mutex_unlock(&chip->state_lock);
+ return -EBUSY;
+ }
+ /* Enable this channel alone */
+ regval = chip->command & (~AD7291_VOLTAGE_MASK);
+ regval |= 1 << (15 - chan->channel);
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, regval);
+ if (ret < 0) {
+ mutex_unlock(&chip->state_lock);
+ return ret;
+ }
+ /* Read voltage */
+ ret = i2c_smbus_read_word_data(chip->client,
+ AD7291_VOLTAGE);
+ if (ret < 0) {
+ mutex_unlock(&chip->state_lock);
+ return ret;
+ }
+ *val = swab16((u16)ret) & AD7291_VALUE_MASK;
+ mutex_unlock(&chip->state_lock);
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* Assumes tsense bit of command register always set */
+ ret = i2c_smbus_read_word_data(chip->client,
+ AD7291_T_SENSE);
+ if (ret < 0)
+ return ret;
+ signval = (s16)((swab16((u16)ret) &
+ AD7291_VALUE_MASK) << 4) >> 4;
+ *val = signval;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ ret = i2c_smbus_read_word_data(chip->client,
+ AD7291_T_AVERAGE);
+ if (ret < 0)
+ return ret;
+ signval = (s16)((swab16((u16)ret) &
+ AD7291_VALUE_MASK) << 4) >> 4;
+ *val = signval;
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ /*
+ * One LSB of the ADC corresponds to 0.25 deg C.
+ * The temperature reading is in 12-bit twos complement format
+ */
+ *val = 250;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
-static struct attribute *ad7291_event_attributes[] = {
- &iio_dev_attr_t_sense_high_value.dev_attr.attr,
- &iio_dev_attr_t_sense_low_value.dev_attr.attr,
- &iio_dev_attr_t_sense_hyst_value.dev_attr.attr,
- &iio_dev_attr_v0_high.dev_attr.attr,
- &iio_dev_attr_v0_low.dev_attr.attr,
- &iio_dev_attr_v0_hyst.dev_attr.attr,
- &iio_dev_attr_v1_high.dev_attr.attr,
- &iio_dev_attr_v1_low.dev_attr.attr,
- &iio_dev_attr_v1_hyst.dev_attr.attr,
- &iio_dev_attr_v2_high.dev_attr.attr,
- &iio_dev_attr_v2_low.dev_attr.attr,
- &iio_dev_attr_v2_hyst.dev_attr.attr,
- &iio_dev_attr_v3_high.dev_attr.attr,
- &iio_dev_attr_v3_low.dev_attr.attr,
- &iio_dev_attr_v3_hyst.dev_attr.attr,
- &iio_dev_attr_v4_high.dev_attr.attr,
- &iio_dev_attr_v4_low.dev_attr.attr,
- &iio_dev_attr_v4_hyst.dev_attr.attr,
- &iio_dev_attr_v5_high.dev_attr.attr,
- &iio_dev_attr_v5_low.dev_attr.attr,
- &iio_dev_attr_v5_hyst.dev_attr.attr,
- &iio_dev_attr_v6_high.dev_attr.attr,
- &iio_dev_attr_v6_low.dev_attr.attr,
- &iio_dev_attr_v6_hyst.dev_attr.attr,
- &iio_dev_attr_v7_high.dev_attr.attr,
- &iio_dev_attr_v7_low.dev_attr.attr,
- &iio_dev_attr_v7_hyst.dev_attr.attr,
- NULL,
+#define AD7291_VOLTAGE_CHAN(_chan) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .indexed = 1, \
+ .channel = _chan, \
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|\
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) \
+}
+
+static const struct iio_chan_spec ad7291_channels[] = {
+ AD7291_VOLTAGE_CHAN(0),
+ AD7291_VOLTAGE_CHAN(1),
+ AD7291_VOLTAGE_CHAN(2),
+ AD7291_VOLTAGE_CHAN(3),
+ AD7291_VOLTAGE_CHAN(4),
+ AD7291_VOLTAGE_CHAN(5),
+ AD7291_VOLTAGE_CHAN(6),
+ AD7291_VOLTAGE_CHAN(7),
+ {
+ .type = IIO_TEMP,
+ .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .indexed = 1,
+ .channel = 0,
+ .event_mask =
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING)
+ }
};
static struct attribute_group ad7291_event_attribute_group = {
@@ -779,20 +563,20 @@ static struct attribute_group ad7291_event_attribute_group = {
static const struct iio_info ad7291_info = {
.attrs = &ad7291_attribute_group,
- .num_interrupt_lines = 1,
+ .read_raw = &ad7291_read_raw,
+ .read_event_config = &ad7291_read_event_config,
+ .write_event_config = &ad7291_write_event_config,
+ .read_event_value = &ad7291_read_event_value,
+ .write_event_value = &ad7291_write_event_value,
.event_attrs = &ad7291_event_attribute_group,
};
-/*
- * device probe and remove
- */
-
static int __devinit ad7291_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad7291_chip_info *chip;
struct iio_dev *indio_dev;
- int ret = 0;
+ int ret = 0, voltage_uv = 0;
indio_dev = iio_allocate_device(sizeof(*chip));
if (indio_dev == NULL) {
@@ -800,20 +584,51 @@ static int __devinit ad7291_probe(struct i2c_client *client,
goto error_ret;
}
chip = iio_priv(indio_dev);
+
+ chip->reg = regulator_get(&client->dev, "vcc");
+ if (!IS_ERR(chip->reg)) {
+ ret = regulator_enable(chip->reg);
+ if (ret)
+ goto error_put_reg;
+ voltage_uv = regulator_get_voltage(chip->reg);
+ }
+
+ mutex_init(&chip->state_lock);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
chip->client = client;
- chip->command = AD7291_NOISE_DELAY | AD7291_T_SENSE_MASK;
+
+ chip->command = AD7291_NOISE_DELAY |
+ AD7291_T_SENSE_MASK | /* Tsense always enabled */
+ AD7291_ALERT_POLARITY; /* set irq polarity low level */
+
+ if (voltage_uv) {
+ chip->int_vref_mv = voltage_uv / 1000;
+ chip->command |= AD7291_EXT_REF;
+ } else {
+ chip->int_vref_mv = 2500; /* Build-in ref */
+ }
indio_dev->name = id->name;
+ indio_dev->channels = ad7291_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7291_channels);
+
indio_dev->dev.parent = &client->dev;
indio_dev->info = &ad7291_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, AD7291_RESET);
+ if (ret) {
+ ret = -EIO;
+ goto error_disable_reg;
+ }
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
+ if (ret) {
+ ret = -EIO;
+ goto error_disable_reg;
+ }
if (client->irq > 0) {
ret = request_threaded_irq(client->irq,
@@ -823,28 +638,28 @@ static int __devinit ad7291_probe(struct i2c_client *client,
id->name,
indio_dev);
if (ret)
- goto error_unreg_dev;
-
- /* set irq polarity low level */
- chip->command |= AD7291_ALART_POLARITY;
+ goto error_disable_reg;
}
- ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
- if (ret) {
- ret = -EIO;
+ ret = iio_device_register(indio_dev);
+ if (ret)
goto error_unreg_irq;
- }
- dev_info(&client->dev, "%s temperature sensor registered.\n",
+ dev_info(&client->dev, "%s ADC registered.\n",
id->name);
return 0;
error_unreg_irq:
- free_irq(client->irq, indio_dev);
-error_unreg_dev:
- iio_device_unregister(indio_dev);
-error_free_dev:
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+error_disable_reg:
+ if (!IS_ERR(chip->reg))
+ regulator_disable(chip->reg);
+error_put_reg:
+ if (!IS_ERR(chip->reg))
+ regulator_put(chip->reg);
+
iio_free_device(indio_dev);
error_ret:
return ret;
@@ -853,10 +668,18 @@ error_ret:
static int __devexit ad7291_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
if (client->irq)
free_irq(client->irq, indio_dev);
- iio_device_unregister(indio_dev);
+
+ if (!IS_ERR(chip->reg)) {
+ regulator_disable(chip->reg);
+ regulator_put(chip->reg);
+ }
+
iio_free_device(indio_dev);
return 0;
@@ -871,7 +694,7 @@ MODULE_DEVICE_TABLE(i2c, ad7291_id);
static struct i2c_driver ad7291_driver = {
.driver = {
- .name = "ad7291",
+ .name = KBUILD_MODNAME,
},
.probe = ad7291_probe,
.remove = __devexit_p(ad7291_remove),
@@ -889,8 +712,7 @@ static __exit void ad7291_exit(void)
}
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD7291 digital"
- " temperature sensor driver");
+MODULE_DESCRIPTION("Analog Devices AD7291 ADC driver");
MODULE_LICENSE("GPL v2");
module_init(ad7291_init);
diff --git a/drivers/staging/iio/adc/ad7298.h b/drivers/staging/iio/adc/ad7298.h
index 628f5adcf0c..9ddf5cf0e51 100644
--- a/drivers/staging/iio/adc/ad7298.h
+++ b/drivers/staging/iio/adc/ad7298.h
@@ -53,11 +53,11 @@ struct ad7298_state {
unsigned short tx_buf[2];
};
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch);
int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7298_ring_cleanup(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
{
return 0;
@@ -72,5 +72,5 @@ ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
static inline void ad7298_ring_cleanup(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* IIO_ADC_AD7298_H_ */
diff --git a/drivers/staging/iio/adc/ad7298_core.c b/drivers/staging/iio/adc/ad7298_core.c
index b8e4ae29b0b..c1de73a1ca9 100644
--- a/drivers/staging/iio/adc/ad7298_core.c
+++ b/drivers/staging/iio/adc/ad7298_core.c
@@ -14,11 +14,11 @@
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "adc.h"
+#include "../buffer_generic.h"
#include "ad7298.h"
@@ -26,28 +26,28 @@ static struct iio_chan_spec ad7298_channels[] = {
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
9, AD7298_CH_TEMP, IIO_ST('s', 32, 32, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
1, 1, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 2, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
2, 2, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 3, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
3, 3, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 4, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
4, 4, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 5, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
5, 5, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 6, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
6, 6, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 7, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
7, 7, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(8),
@@ -109,24 +109,24 @@ static int ad7298_scan_temp(struct ad7298_state *st, int *val)
return 0;
}
-static int ad7298_read_raw(struct iio_dev *dev_info,
+static int ad7298_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret;
- struct ad7298_state *st = iio_priv(dev_info);
+ struct ad7298_state *st = iio_priv(indio_dev);
unsigned int scale_uv;
switch (m) {
case 0:
- mutex_lock(&dev_info->mlock);
- if (iio_ring_enabled(dev_info)) {
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
if (chan->address == AD7298_CH_TEMP)
ret = -ENODEV;
else
- ret = ad7298_scan_from_ring(dev_info,
+ ret = ad7298_scan_from_ring(indio_dev,
chan->address);
} else {
if (chan->address == AD7298_CH_TEMP)
@@ -134,7 +134,7 @@ static int ad7298_read_raw(struct iio_dev *dev_info,
else
ret = ad7298_scan_direct(st, chan->address);
}
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
@@ -165,7 +165,7 @@ static int __devinit ad7298_probe(struct spi_device *spi)
{
struct ad7298_platform_data *pdata = spi->dev.platform_data;
struct ad7298_state *st;
- int ret, regdone = 0;
+ int ret;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL)
@@ -218,19 +218,19 @@ static int __devinit ad7298_probe(struct spi_device *spi)
if (ret)
goto error_disable_reg;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- &ad7298_channels[1], /* skip temp0 */
- ARRAY_SIZE(ad7298_channels) - 1);
+ ret = iio_buffer_register(indio_dev,
+ &ad7298_channels[1], /* skip temp0 */
+ ARRAY_SIZE(ad7298_channels) - 1);
if (ret)
goto error_cleanup_ring;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unregister_ring;
return 0;
+error_unregister_ring:
+ iio_buffer_unregister(indio_dev);
error_cleanup_ring:
ad7298_ring_cleanup(indio_dev);
error_disable_reg:
@@ -239,11 +239,7 @@ error_disable_reg:
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
-
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
return ret;
}
@@ -253,14 +249,14 @@ static int __devexit ad7298_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7298_state *st = iio_priv(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
- ad7298_ring_cleanup(indio_dev);
iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
+ ad7298_ring_cleanup(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index a04c0335262..47630d506a6 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -7,27 +7,24 @@
*/
#include <linux/interrupt.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
-#include "../trigger.h"
-#include "../sysfs.h"
+#include "../trigger_consumer.h"
#include "ad7298.h"
-int ad7298_scan_from_ring(struct iio_dev *dev_info, long ch)
+int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
{
- struct iio_ring_buffer *ring = dev_info->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int ret;
u16 *ring_data;
- if (!(ring->scan_mask & (1 << ch))) {
+ if (!(test_bit(ch, ring->scan_mask))) {
ret = -EBUSY;
goto error_ret;
}
@@ -60,7 +57,7 @@ error_ret:
static int ad7298_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7298_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
size_t d_size;
int i, m;
unsigned short command;
@@ -82,7 +79,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
command = AD7298_WRITE | st->ext_ref;
for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
- if (ring->scan_mask & (1 << i))
+ if (test_bit(i, ring->scan_mask))
command |= m;
st->tx_buf[0] = cpu_to_be16(command);
@@ -120,9 +117,9 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
static irqreturn_t ad7298_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct ad7298_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
s64 time_ns;
__u16 buf[16];
int b_sent, i;
@@ -140,29 +137,29 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
for (i = 0; i < ring->scan_count; i++)
buf[i] = be16_to_cpu(st->rx_buf[i]);
- indio_dev->ring->access->store_to(ring, (u8 *)buf, time_ns);
+ indio_dev->buffer->access->store_to(ring, (u8 *)buf, time_ns);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
-static const struct iio_ring_setup_ops ad7298_ring_setup_ops = {
+static const struct iio_buffer_setup_ops ad7298_ring_setup_ops = {
.preenable = &ad7298_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
int ret;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad7298_trigger_handler,
@@ -177,26 +174,21 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &ad7298_ring_setup_ops;
- indio_dev->ring->scan_timestamp = true;
+ indio_dev->buffer->setup_ops = &ad7298_ring_setup_ops;
+ indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
void ad7298_ring_cleanup(struct iio_dev *indio_dev)
{
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/adc/ad7314.c b/drivers/staging/iio/adc/ad7314.c
deleted file mode 100644
index 9070d9cac72..00000000000
--- a/drivers/staging/iio/adc/ad7314.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * AD7314 digital temperature sensor driver for AD7314, ADT7301 and ADT7302
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/spi/spi.h>
-
-#include "../iio.h"
-#include "../sysfs.h"
-
-/*
- * AD7314 power mode
- */
-#define AD7314_PD 0x2000
-
-/*
- * AD7314 temperature masks
- */
-#define AD7314_TEMP_SIGN 0x200
-#define AD7314_TEMP_MASK 0x7FE0
-#define AD7314_TEMP_OFFSET 5
-#define AD7314_TEMP_FLOAT_OFFSET 2
-#define AD7314_TEMP_FLOAT_MASK 0x3
-
-/*
- * ADT7301 and ADT7302 temperature masks
- */
-#define ADT7301_TEMP_SIGN 0x2000
-#define ADT7301_TEMP_MASK 0x2FFF
-#define ADT7301_TEMP_FLOAT_OFFSET 5
-#define ADT7301_TEMP_FLOAT_MASK 0x1F
-
-/*
- * struct ad7314_chip_info - chip specifc information
- */
-
-struct ad7314_chip_info {
- struct spi_device *spi_dev;
- s64 last_timestamp;
- u8 mode;
-};
-
-/*
- * ad7314 register access by SPI
- */
-
-static int ad7314_spi_read(struct ad7314_chip_info *chip, u16 *data)
-{
- struct spi_device *spi_dev = chip->spi_dev;
- int ret = 0;
- u16 value;
-
- ret = spi_read(spi_dev, (u8 *)&value, sizeof(value));
- if (ret < 0) {
- dev_err(&spi_dev->dev, "SPI read error\n");
- return ret;
- }
-
- *data = be16_to_cpu((u16)value);
-
- return ret;
-}
-
-static int ad7314_spi_write(struct ad7314_chip_info *chip, u16 data)
-{
- struct spi_device *spi_dev = chip->spi_dev;
- int ret = 0;
- u16 value = cpu_to_be16(data);
-
- ret = spi_write(spi_dev, (u8 *)&value, sizeof(value));
- if (ret < 0)
- dev_err(&spi_dev->dev, "SPI write error\n");
-
- return ret;
-}
-
-static ssize_t ad7314_show_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7314_chip_info *chip = iio_priv(dev_info);
-
- if (chip->mode)
- return sprintf(buf, "power-save\n");
- else
- return sprintf(buf, "full\n");
-}
-
-static ssize_t ad7314_store_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7314_chip_info *chip = iio_priv(dev_info);
- u16 mode = 0;
- int ret;
-
- if (!strcmp(buf, "full"))
- mode = AD7314_PD;
-
- ret = ad7314_spi_write(chip, mode);
- if (ret)
- return -EIO;
-
- chip->mode = mode;
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- ad7314_show_mode,
- ad7314_store_mode,
- 0);
-
-static ssize_t ad7314_show_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "full\npower-save\n");
-}
-
-static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7314_show_available_modes, NULL, 0);
-
-static ssize_t ad7314_show_temperature(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7314_chip_info *chip = iio_priv(dev_info);
- u16 data;
- char sign = ' ';
- int ret;
-
- if (chip->mode) {
- ret = ad7314_spi_write(chip, 0);
- if (ret)
- return -EIO;
- }
-
- ret = ad7314_spi_read(chip, &data);
- if (ret)
- return -EIO;
-
- if (chip->mode)
- ad7314_spi_write(chip, chip->mode);
-
- if (strcmp(dev_info->name, "ad7314")) {
- data = (data & AD7314_TEMP_MASK) >>
- AD7314_TEMP_OFFSET;
- if (data & AD7314_TEMP_SIGN) {
- data = (AD7314_TEMP_SIGN << 1) - data;
- sign = '-';
- }
-
- return sprintf(buf, "%c%d.%.2d\n", sign,
- data >> AD7314_TEMP_FLOAT_OFFSET,
- (data & AD7314_TEMP_FLOAT_MASK) * 25);
- } else {
- data &= ADT7301_TEMP_MASK;
- if (data & ADT7301_TEMP_SIGN) {
- data = (ADT7301_TEMP_SIGN << 1) - data;
- sign = '-';
- }
-
- return sprintf(buf, "%c%d.%.5d\n", sign,
- data >> ADT7301_TEMP_FLOAT_OFFSET,
- (data & ADT7301_TEMP_FLOAT_MASK) * 3125);
- }
-}
-
-static IIO_DEVICE_ATTR(temperature, S_IRUGO, ad7314_show_temperature, NULL, 0);
-
-static struct attribute *ad7314_attributes[] = {
- &iio_dev_attr_available_modes.dev_attr.attr,
- &iio_dev_attr_mode.dev_attr.attr,
- &iio_dev_attr_temperature.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7314_attribute_group = {
- .attrs = ad7314_attributes,
-};
-
-static const struct iio_info ad7314_info = {
- .attrs = &ad7314_attribute_group,
- .driver_module = THIS_MODULE,
-};
-/*
- * device probe and remove
- */
-
-static int __devinit ad7314_probe(struct spi_device *spi_dev)
-{
- struct ad7314_chip_info *chip;
- struct iio_dev *indio_dev;
- int ret = 0;
-
- indio_dev = iio_allocate_device(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- dev_set_drvdata(&spi_dev->dev, chip);
-
- chip->spi_dev = spi_dev;
-
- indio_dev->name = spi_get_device_id(spi_dev)->name;
- indio_dev->dev.parent = &spi_dev->dev;
- indio_dev->info = &ad7314_info;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
- dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
- indio_dev->name);
-
- return 0;
-error_free_dev:
- iio_free_device(indio_dev);
-error_ret:
- return ret;
-}
-
-static int __devexit ad7314_remove(struct spi_device *spi_dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
-
- dev_set_drvdata(&spi_dev->dev, NULL);
- iio_device_unregister(indio_dev);
- iio_free_device(indio_dev);
-
- return 0;
-}
-
-static const struct spi_device_id ad7314_id[] = {
- { "adt7301", 0 },
- { "adt7302", 0 },
- { "ad7314", 0 },
- {}
-};
-
-static struct spi_driver ad7314_driver = {
- .driver = {
- .name = "ad7314",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
- .probe = ad7314_probe,
- .remove = __devexit_p(ad7314_remove),
- .id_table = ad7314_id,
-};
-
-static __init int ad7314_init(void)
-{
- return spi_register_driver(&ad7314_driver);
-}
-
-static __exit void ad7314_exit(void)
-{
- spi_unregister_driver(&ad7314_driver);
-}
-
-MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD7314, ADT7301 and ADT7302 digital"
- " temperature sensor driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(ad7314_init);
-module_exit(ad7314_exit);
diff --git a/drivers/staging/iio/adc/ad7476.h b/drivers/staging/iio/adc/ad7476.h
index 0d44976e846..60142926565 100644
--- a/drivers/staging/iio/adc/ad7476.h
+++ b/drivers/staging/iio/adc/ad7476.h
@@ -49,11 +49,11 @@ enum ad7476_supported_device_ids {
ID_AD7495
};
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
int ad7476_scan_from_ring(struct iio_dev *indio_dev);
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7476_ring_cleanup(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline int ad7476_scan_from_ring(struct iio_dev *indio_dev)
{
return 0;
@@ -68,5 +68,5 @@ ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
static inline void ad7476_ring_cleanup(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* IIO_ADC_AD7476_H_ */
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index c21089894d2..fd79facc6ca 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -13,11 +13,11 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "adc.h"
+#include "../buffer_generic.h"
#include "ad7476.h"
@@ -32,24 +32,24 @@ static int ad7476_scan_direct(struct ad7476_state *st)
return (st->data[0] << 8) | st->data[1];
}
-static int ad7476_read_raw(struct iio_dev *dev_info,
+static int ad7476_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret;
- struct ad7476_state *st = iio_priv(dev_info);
+ struct ad7476_state *st = iio_priv(indio_dev);
unsigned int scale_uv;
switch (m) {
case 0:
- mutex_lock(&dev_info->mlock);
- if (iio_ring_enabled(dev_info))
- ret = ad7476_scan_from_ring(dev_info);
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = ad7476_scan_from_ring(indio_dev);
else
ret = ad7476_scan_direct(st);
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
@@ -68,49 +68,49 @@ static int ad7476_read_raw(struct iio_dev *dev_info,
static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7466] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7467] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7468] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1 , 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1 , 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7475] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7476] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7477] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7478] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7495] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
@@ -129,8 +129,6 @@ static int __devinit ad7476_probe(struct spi_device *spi)
struct ad7476_state *st;
struct iio_dev *indio_dev;
int ret, voltage_uv = 0;
- bool reg_done = false;
- struct regulator *reg;
indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
@@ -138,15 +136,14 @@ static int __devinit ad7476_probe(struct spi_device *spi)
goto error_ret;
}
st = iio_priv(indio_dev);
- reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ voltage_uv = regulator_get_voltage(st->reg);
}
- st->reg = reg;
st->chip_info =
&ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -182,28 +179,29 @@ static int __devinit ad7476_probe(struct spi_device *spi)
if (ret)
goto error_disable_reg;
- ret = iio_device_register(indio_dev);
+ ret = iio_buffer_register(indio_dev,
+ st->chip_info->channel,
+ ARRAY_SIZE(st->chip_info->channel));
if (ret)
- goto error_disable_reg;
+ goto error_cleanup_ring;
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- st->chip_info->channel,
- ARRAY_SIZE(st->chip_info->channel));
+ ret = iio_device_register(indio_dev);
if (ret)
- goto error_cleanup_ring;
+ goto error_ring_unregister;
return 0;
+error_ring_unregister:
+ iio_buffer_unregister(indio_dev);
error_cleanup_ring:
ad7476_ring_cleanup(indio_dev);
- iio_device_unregister(indio_dev);
error_disable_reg:
- if (!IS_ERR(reg))
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_put_reg:
- if (!IS_ERR(reg))
- regulator_put(reg);
- if (!reg_done)
- iio_free_device(indio_dev);
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+ iio_free_device(indio_dev);
+
error_ret:
return ret;
}
@@ -212,16 +210,15 @@ static int ad7476_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7476_state *st = iio_priv(indio_dev);
- /* copy needed as st will have been freed */
- struct regulator *reg = st->reg;
- iio_ring_buffer_unregister(indio_dev->ring);
- ad7476_ring_cleanup(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
+ iio_buffer_unregister(indio_dev);
+ ad7476_ring_cleanup(indio_dev);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
}
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index a92fc5a1a60..e82c1a433f4 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -11,20 +11,18 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
-#include "../trigger.h"
-#include "../sysfs.h"
+#include "../trigger_consumer.h"
#include "ad7476.h"
int ad7476_scan_from_ring(struct iio_dev *indio_dev)
{
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int ret;
u8 *ring_data;
@@ -56,7 +54,7 @@ error_ret:
static int ad7476_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7476_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
st->d_size = ring->scan_count *
st->chip_info->channel[0].scan_type.storagebits / 8;
@@ -68,9 +66,9 @@ static int ad7476_ring_preenable(struct iio_dev *indio_dev)
st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
}
- if (indio_dev->ring->access->set_bytes_per_datum)
- indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
- st->d_size);
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, st->d_size);
return 0;
}
@@ -78,7 +76,7 @@ static int ad7476_ring_preenable(struct iio_dev *indio_dev)
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct ad7476_state *st = iio_priv(indio_dev);
s64 time_ns;
__u8 *rxbuf;
@@ -95,11 +93,11 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
time_ns = iio_get_time_ns();
- if (indio_dev->ring->scan_timestamp)
+ if (indio_dev->buffer->scan_timestamp)
memcpy(rxbuf + st->d_size - sizeof(s64),
&time_ns, sizeof(time_ns));
- indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns);
+ indio_dev->buffer->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
kfree(rxbuf);
@@ -107,10 +105,10 @@ done:
return IRQ_HANDLED;
}
-static const struct iio_ring_setup_ops ad7476_ring_setup_ops = {
+static const struct iio_buffer_setup_ops ad7476_ring_setup_ops = {
.preenable = &ad7476_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
@@ -118,13 +116,13 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct ad7476_state *st = iio_priv(indio_dev);
int ret = 0;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc
= iio_alloc_pollfunc(NULL,
&ad7476_trigger_handler,
@@ -139,27 +137,21 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &ad7476_ring_setup_ops;
- indio_dev->ring->scan_timestamp = true;
+ indio_dev->buffer->setup_ops = &ad7476_ring_setup_ops;
+ indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
void ad7476_ring_cleanup(struct iio_dev *indio_dev)
{
- /* ensure that the trigger has been detached */
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index b8b3d8ef1ff..35018c3f518 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -50,8 +50,6 @@ struct ad7606_platform_data {
struct ad7606_chip_info {
const char *name;
- u8 bits;
- char sign;
u16 int_vref_mv;
struct iio_chan_spec *channels;
unsigned num_channels;
@@ -68,18 +66,10 @@ struct ad7606_state {
struct regulator *reg;
struct work_struct poll_work;
wait_queue_head_t wq_data_avail;
- size_t d_size;
const struct ad7606_bus_ops *bops;
- int irq;
- unsigned id;
unsigned range;
unsigned oversampling;
bool done;
- bool have_frstdata;
- bool have_os;
- bool have_stby;
- bool have_reset;
- bool have_range;
void __iomem *base_address;
/*
@@ -100,7 +90,7 @@ void ad7606_resume(struct iio_dev *indio_dev);
struct iio_dev *ad7606_probe(struct device *dev, int irq,
void __iomem *base_address, unsigned id,
const struct ad7606_bus_ops *bops);
-int ad7606_remove(struct iio_dev *indio_dev);
+int ad7606_remove(struct iio_dev *indio_dev, int irq);
int ad7606_reset(struct ad7606_state *st);
enum ad7606_supported_device_ids {
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 459371ae4dc..54423ab196f 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -16,17 +16,17 @@
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "adc.h"
+#include "../buffer_generic.h"
#include "ad7606.h"
int ad7606_reset(struct ad7606_state *st)
{
- if (st->have_reset) {
+ if (gpio_is_valid(st->pdata->gpio_reset)) {
gpio_set_value(st->pdata->gpio_reset, 1);
ndelay(100); /* t_reset >= 100ns */
gpio_set_value(st->pdata->gpio_reset, 0);
@@ -48,7 +48,7 @@ static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch)
if (ret)
goto error_ret;
- if (st->have_frstdata) {
+ if (gpio_is_valid(st->pdata->gpio_frstdata)) {
ret = st->bops->read_block(st->dev, 1, st->data);
if (ret)
goto error_ret;
@@ -90,7 +90,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
- if (iio_ring_enabled(indio_dev))
+ if (iio_buffer_enabled(indio_dev))
ret = ad7606_scan_from_ring(indio_dev, chan->address);
else
ret = ad7606_scan_direct(indio_dev, chan->address);
@@ -140,9 +140,9 @@ static ssize_t ad7606_store_range(struct device *dev,
return count;
}
-static IIO_DEVICE_ATTR(range, S_IRUGO | S_IWUSR, \
+static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR, \
ad7606_show_range, ad7606_store_range, 0);
-static IIO_CONST_ATTR(range_available, "5000 10000");
+static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -198,8 +198,8 @@ static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
static struct attribute *ad7606_attributes[] = {
- &iio_dev_attr_range.dev_attr.attr,
- &iio_const_attr_range_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_range.dev_attr.attr,
+ &iio_const_attr_in_voltage_range_available.dev_attr.attr,
&iio_dev_attr_oversampling_ratio.dev_attr.attr,
&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
@@ -214,15 +214,18 @@ static mode_t ad7606_attr_is_visible(struct kobject *kobj,
mode_t mode = attr->mode;
- if (!st->have_os &&
- (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
- attr ==
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr))
+ if (!(gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2)) &&
+ (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
+ attr ==
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr))
+ mode = 0;
+ else if (!gpio_is_valid(st->pdata->gpio_range) &&
+ (attr == &iio_dev_attr_in_voltage_range.dev_attr.attr ||
+ attr ==
+ &iio_const_attr_in_voltage_range_available.dev_attr.attr))
mode = 0;
- else if (!st->have_range &&
- (attr == &iio_dev_attr_range.dev_attr.attr ||
- attr == &iio_const_attr_range_available.dev_attr.attr))
- mode = 0;
return mode;
}
@@ -232,69 +235,43 @@ static const struct attribute_group ad7606_attribute_group = {
.is_visible = ad7606_attr_is_visible,
};
+#define AD7606_CHANNEL(num) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = num, \
+ .address = num, \
+ .scan_index = num, \
+ .scan_type = IIO_ST('s', 16, 16, 0), \
+ }
+
static struct iio_chan_spec ad7606_8_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 4, 4, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 5, 5, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 6, 6, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 7, 7, IIO_ST('s', 16, 16, 0), 0),
+ AD7606_CHANNEL(0),
+ AD7606_CHANNEL(1),
+ AD7606_CHANNEL(2),
+ AD7606_CHANNEL(3),
+ AD7606_CHANNEL(4),
+ AD7606_CHANNEL(5),
+ AD7606_CHANNEL(6),
+ AD7606_CHANNEL(7),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
static struct iio_chan_spec ad7606_6_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 4, 4, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 5, 5, IIO_ST('s', 16, 16, 0), 0),
+ AD7606_CHANNEL(0),
+ AD7606_CHANNEL(1),
+ AD7606_CHANNEL(2),
+ AD7606_CHANNEL(3),
+ AD7606_CHANNEL(4),
+ AD7606_CHANNEL(5),
IIO_CHAN_SOFT_TIMESTAMP(6),
};
static struct iio_chan_spec ad7606_4_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('s', 16, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('s', 16, 16, 0), 0),
+ AD7606_CHANNEL(0),
+ AD7606_CHANNEL(1),
+ AD7606_CHANNEL(2),
+ AD7606_CHANNEL(3),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
@@ -346,64 +323,96 @@ static int ad7606_request_gpios(struct ad7606_state *st)
};
int ret;
- ret = gpio_request_one(st->pdata->gpio_convst, GPIOF_OUT_INIT_LOW,
- "AD7606_CONVST");
- if (ret) {
- dev_err(st->dev, "failed to request GPIO CONVST\n");
- return ret;
+ if (gpio_is_valid(st->pdata->gpio_convst)) {
+ ret = gpio_request_one(st->pdata->gpio_convst,
+ GPIOF_OUT_INIT_LOW,
+ "AD7606_CONVST");
+ if (ret) {
+ dev_err(st->dev, "failed to request GPIO CONVST\n");
+ goto error_ret;
+ }
+ } else {
+ ret = -EIO;
+ goto error_ret;
}
- ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
- if (!ret) {
- st->have_os = true;
+ if (gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2)) {
+ ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
+ if (ret < 0)
+ goto error_free_convst;
}
- ret = gpio_request_one(st->pdata->gpio_reset, GPIOF_OUT_INIT_LOW,
- "AD7606_RESET");
- if (!ret)
- st->have_reset = true;
-
- ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
- ((st->range == 10000) ? GPIOF_INIT_HIGH :
- GPIOF_INIT_LOW), "AD7606_RANGE");
- if (!ret)
- st->have_range = true;
+ if (gpio_is_valid(st->pdata->gpio_reset)) {
+ ret = gpio_request_one(st->pdata->gpio_reset,
+ GPIOF_OUT_INIT_LOW,
+ "AD7606_RESET");
+ if (ret < 0)
+ goto error_free_os;
+ }
- ret = gpio_request_one(st->pdata->gpio_stby, GPIOF_OUT_INIT_HIGH,
- "AD7606_STBY");
- if (!ret)
- st->have_stby = true;
+ if (gpio_is_valid(st->pdata->gpio_range)) {
+ ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
+ ((st->range == 10000) ? GPIOF_INIT_HIGH :
+ GPIOF_INIT_LOW), "AD7606_RANGE");
+ if (ret < 0)
+ goto error_free_reset;
+ }
+ if (gpio_is_valid(st->pdata->gpio_stby)) {
+ ret = gpio_request_one(st->pdata->gpio_stby,
+ GPIOF_OUT_INIT_HIGH,
+ "AD7606_STBY");
+ if (ret < 0)
+ goto error_free_range;
+ }
if (gpio_is_valid(st->pdata->gpio_frstdata)) {
ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
"AD7606_FRSTDATA");
- if (!ret)
- st->have_frstdata = true;
+ if (ret < 0)
+ goto error_free_stby;
}
return 0;
+
+error_free_stby:
+ if (gpio_is_valid(st->pdata->gpio_stby))
+ gpio_free(st->pdata->gpio_stby);
+error_free_range:
+ if (gpio_is_valid(st->pdata->gpio_range))
+ gpio_free(st->pdata->gpio_range);
+error_free_reset:
+ if (gpio_is_valid(st->pdata->gpio_reset))
+ gpio_free(st->pdata->gpio_reset);
+error_free_os:
+ if (gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2))
+ gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array));
+error_free_convst:
+ gpio_free(st->pdata->gpio_convst);
+error_ret:
+ return ret;
}
static void ad7606_free_gpios(struct ad7606_state *st)
{
- if (st->have_range)
- gpio_free(st->pdata->gpio_range);
-
- if (st->have_stby)
+ if (gpio_is_valid(st->pdata->gpio_frstdata))
+ gpio_free(st->pdata->gpio_frstdata);
+ if (gpio_is_valid(st->pdata->gpio_stby))
gpio_free(st->pdata->gpio_stby);
-
- if (st->have_os) {
- gpio_free(st->pdata->gpio_os0);
- gpio_free(st->pdata->gpio_os1);
+ if (gpio_is_valid(st->pdata->gpio_range))
+ gpio_free(st->pdata->gpio_range);
+ if (gpio_is_valid(st->pdata->gpio_reset))
+ gpio_free(st->pdata->gpio_reset);
+ if (gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2)) {
gpio_free(st->pdata->gpio_os2);
+ gpio_free(st->pdata->gpio_os1);
+ gpio_free(st->pdata->gpio_os0);
}
-
- if (st->have_reset)
- gpio_free(st->pdata->gpio_reset);
-
- if (st->have_frstdata)
- gpio_free(st->pdata->gpio_frstdata);
-
gpio_free(st->pdata->gpio_convst);
}
@@ -415,7 +424,7 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
struct iio_dev *indio_dev = dev_id;
struct ad7606_state *st = iio_priv(indio_dev);
- if (iio_ring_enabled(indio_dev)) {
+ if (iio_buffer_enabled(indio_dev)) {
if (!work_pending(&st->poll_work))
schedule_work(&st->poll_work);
} else {
@@ -439,7 +448,7 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
{
struct ad7606_platform_data *pdata = dev->platform_data;
struct ad7606_state *st;
- int ret, regdone = 0;
+ int ret;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
@@ -450,8 +459,6 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
st = iio_priv(indio_dev);
st->dev = dev;
- st->id = id;
- st->irq = irq;
st->bops = bops;
st->base_address = base_address;
st->range = pdata->default_range == 10000 ? 10000 : 5000;
@@ -492,7 +499,7 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
if (ret)
dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
- ret = request_irq(st->irq, ad7606_interrupt,
+ ret = request_irq(irq, ad7606_interrupt,
IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev);
if (ret)
goto error_free_gpios;
@@ -501,24 +508,24 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
if (ret)
goto error_free_irq;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_irq;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- indio_dev->channels,
- indio_dev->num_channels);
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
if (ret)
goto error_cleanup_ring;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unregister_ring;
return indio_dev;
+error_unregister_ring:
+ iio_buffer_unregister(indio_dev);
error_cleanup_ring:
ad7606_ring_cleanup(indio_dev);
error_free_irq:
- free_irq(st->irq, indio_dev);
+ free_irq(irq, indio_dev);
error_free_gpios:
ad7606_free_gpios(st);
@@ -529,29 +536,27 @@ error_disable_reg:
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ERR_PTR(ret);
}
-int ad7606_remove(struct iio_dev *indio_dev)
+int ad7606_remove(struct iio_dev *indio_dev, int irq)
{
struct ad7606_state *st = iio_priv(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
ad7606_ring_cleanup(indio_dev);
- free_irq(st->irq, indio_dev);
+ free_irq(irq, indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
ad7606_free_gpios(st);
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
@@ -560,8 +565,8 @@ void ad7606_suspend(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- if (st->have_stby) {
- if (st->have_range)
+ if (gpio_is_valid(st->pdata->gpio_stby)) {
+ if (gpio_is_valid(st->pdata->gpio_range))
gpio_set_value(st->pdata->gpio_range, 1);
gpio_set_value(st->pdata->gpio_stby, 0);
}
@@ -571,8 +576,8 @@ void ad7606_resume(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- if (st->have_stby) {
- if (st->have_range)
+ if (gpio_is_valid(st->pdata->gpio_stby)) {
+ if (gpio_is_valid(st->pdata->gpio_range))
gpio_set_value(st->pdata->gpio_range,
st->range == 10000);
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index d21218da923..688632edd3d 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -106,7 +106,7 @@ static int __devexit ad7606_par_remove(struct platform_device *pdev)
struct resource *res;
struct ad7606_state *st = iio_priv(indio_dev);
- ad7606_remove(indio_dev);
+ ad7606_remove(indio_dev, platform_get_irq(pdev, 0));
iounmap(st->base_address);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index a199bf48396..20927fd5372 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -10,19 +10,17 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include "../iio.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
-#include "../trigger.h"
-#include "../sysfs.h"
+#include "../trigger_consumer.h"
#include "ad7606.h"
int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch)
{
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int ret;
u16 *ring_data;
@@ -45,45 +43,13 @@ error_ret:
}
/**
- * ad7606_ring_preenable() setup the parameters of the ring before enabling
- *
- * The complex nature of the setting of the nuber of bytes per datum is due
- * to this driver currently ensuring that the timestamp is stored at an 8
- * byte boundary.
- **/
-static int ad7606_ring_preenable(struct iio_dev *indio_dev)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
- size_t d_size;
-
- d_size = st->chip_info->num_channels *
- st->chip_info->channels[0].scan_type.storagebits / 8;
-
- if (ring->scan_timestamp) {
- d_size += sizeof(s64);
-
- if (d_size % sizeof(s64))
- d_size += sizeof(s64) - (d_size % sizeof(s64));
- }
-
- if (ring->access->set_bytes_per_datum)
- ring->access->set_bytes_per_datum(ring, d_size);
-
- st->d_size = d_size;
-
- return 0;
-}
-
-/**
* ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
*
**/
static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
- struct ad7606_state *st = iio_priv(indio_dev);
+ struct ad7606_state *st = iio_priv(pf->indio_dev);
gpio_set_value(st->pdata->gpio_convst, 1);
@@ -104,16 +70,17 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
struct ad7606_state *st = container_of(work_s, struct ad7606_state,
poll_work);
struct iio_dev *indio_dev = iio_priv_to_dev(st);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
s64 time_ns;
__u8 *buf;
int ret;
- buf = kzalloc(st->d_size, GFP_KERNEL);
+ buf = kzalloc(ring->access->get_bytes_per_datum(ring),
+ GFP_KERNEL);
if (buf == NULL)
return;
- if (st->have_frstdata) {
+ if (gpio_is_valid(st->pdata->gpio_frstdata)) {
ret = st->bops->read_block(st->dev, 1, buf);
if (ret)
goto done;
@@ -140,20 +107,20 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
time_ns = iio_get_time_ns();
if (ring->scan_timestamp)
- memcpy(buf + st->d_size - sizeof(s64),
- &time_ns, sizeof(time_ns));
+ *((s64 *)(buf + ring->access->get_bytes_per_datum(ring) -
+ sizeof(s64))) = time_ns;
- ring->access->store_to(indio_dev->ring, buf, time_ns);
+ ring->access->store_to(indio_dev->buffer, buf, time_ns);
done:
gpio_set_value(st->pdata->gpio_convst, 0);
iio_trigger_notify_done(indio_dev->trig);
kfree(buf);
}
-static const struct iio_ring_setup_ops ad7606_ring_setup_ops = {
- .preenable = &ad7606_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops ad7606_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
@@ -161,14 +128,16 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
+ indio_dev->buffer->bpe =
+ st->chip_info->channels[0].scan_type.storagebits / 8;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
@@ -183,28 +152,23 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &ad7606_ring_setup_ops;
- indio_dev->ring->scan_timestamp = true ;
+ indio_dev->buffer->setup_ops = &ad7606_ring_setup_ops;
+ indio_dev->buffer->scan_timestamp = true ;
INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
void ad7606_ring_cleanup(struct iio_dev *indio_dev)
{
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 0769c807d95..aede1ba5e04 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -59,7 +59,7 @@ static int __devexit ad7606_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
- return ad7606_remove(indio_dev);
+ return ad7606_remove(indio_dev, spi->irq);
}
#ifdef CONFIG_PM
diff --git a/drivers/staging/iio/adc/ad7745.c b/drivers/staging/iio/adc/ad7745.c
deleted file mode 100644
index 4c13f26aa9a..00000000000
--- a/drivers/staging/iio/adc/ad7745.c
+++ /dev/null
@@ -1,674 +0,0 @@
-/*
- * AD774X capacitive sensor driver supporting AD7745/6/7
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/i2c.h>
-
-#include "../iio.h"
-#include "../sysfs.h"
-
-/*
- * AD774X registers definition
- */
-
-#define AD774X_STATUS 0
-#define AD774X_STATUS_RDY (1 << 2)
-#define AD774X_STATUS_RDYVT (1 << 1)
-#define AD774X_STATUS_RDYCAP (1 << 0)
-#define AD774X_CAP_DATA_HIGH 1
-#define AD774X_CAP_DATA_MID 2
-#define AD774X_CAP_DATA_LOW 3
-#define AD774X_VT_DATA_HIGH 4
-#define AD774X_VT_DATA_MID 5
-#define AD774X_VT_DATA_LOW 6
-#define AD774X_CAP_SETUP 7
-#define AD774X_VT_SETUP 8
-#define AD774X_EXEC_SETUP 9
-#define AD774X_CFG 10
-#define AD774X_CAPDACA 11
-#define AD774X_CAPDACB 12
-#define AD774X_CAPDAC_EN (1 << 7)
-#define AD774X_CAP_OFFH 13
-#define AD774X_CAP_OFFL 14
-#define AD774X_CAP_GAINH 15
-#define AD774X_CAP_GAINL 16
-#define AD774X_VOLT_GAINH 17
-#define AD774X_VOLT_GAINL 18
-
-#define AD774X_MAX_CONV_MODE 6
-
-/*
- * struct ad774x_chip_info - chip specifc information
- */
-
-struct ad774x_chip_info {
- struct i2c_client *client;
- bool inter;
- u16 cap_offs; /* Capacitive offset */
- u16 cap_gain; /* Capacitive gain calibration */
- u16 volt_gain; /* Voltage gain calibration */
- u8 cap_setup;
- u8 vt_setup;
- u8 exec_setup;
-
- char *conversion_mode;
-};
-
-struct ad774x_conversion_mode {
- char *name;
- u8 reg_cfg;
-};
-
-static struct ad774x_conversion_mode
-ad774x_conv_mode_table[AD774X_MAX_CONV_MODE] = {
- { "idle", 0 },
- { "continuous-conversion", 1 },
- { "single-conversion", 2 },
- { "power-down", 3 },
- { "offset-calibration", 5 },
- { "gain-calibration", 6 },
-};
-
-/*
- * ad774x register access by I2C
- */
-
-static int ad774x_i2c_read(struct ad774x_chip_info *chip, u8 reg, u8 *data, int len)
-{
- struct i2c_client *client = chip->client;
- int ret;
-
- ret = i2c_master_send(client, &reg, 1);
- if (ret < 0) {
- dev_err(&client->dev, "I2C write error\n");
- return ret;
- }
-
- ret = i2c_master_recv(client, data, len);
- if (ret < 0) {
- dev_err(&client->dev, "I2C read error\n");
- return ret;
- }
-
- return ret;
-}
-
-static int ad774x_i2c_write(struct ad774x_chip_info *chip, u8 reg, u8 data)
-{
- struct i2c_client *client = chip->client;
- int ret;
-
- u8 tx[2] = {
- reg,
- data,
- };
-
- ret = i2c_master_send(client, tx, 2);
- if (ret < 0)
- dev_err(&client->dev, "I2C write error\n");
-
- return ret;
-}
-
-/*
- * sysfs nodes
- */
-
-#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show) \
- IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store) \
- IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CAP_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(cap_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_VT_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(in0_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_EXEC_SETUP(_mode, _show, _store) \
- IIO_DEVICE_ATTR(exec_setup, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_VOLT_GAIN(_mode, _show, _store) \
- IIO_DEVICE_ATTR(in0_gain, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CAP_OFFS(_mode, _show, _store) \
- IIO_DEVICE_ATTR(cap_offs, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CAP_GAIN(_mode, _show, _store) \
- IIO_DEVICE_ATTR(cap_gain, _mode, _show, _store, 0)
-#define IIO_DEV_ATTR_CAP_DATA(_show) \
- IIO_DEVICE_ATTR(cap0_raw, S_IRUGO, _show, NULL, 0)
-#define IIO_DEV_ATTR_VT_DATA(_show) \
- IIO_DEVICE_ATTR(in0_raw, S_IRUGO, _show, NULL, 0)
-
-static ssize_t ad774x_show_conversion_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int i;
- int len = 0;
-
- for (i = 0; i < AD774X_MAX_CONV_MODE; i++)
- len += sprintf(buf + len, "%s ", ad774x_conv_mode_table[i].name);
-
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad774x_show_conversion_modes);
-
-static ssize_t ad774x_show_conversion_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%s\n", chip->conversion_mode);
-}
-
-static ssize_t ad774x_store_conversion_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- u8 cfg;
- int i;
-
- ad774x_i2c_read(chip, AD774X_CFG, &cfg, 1);
-
- for (i = 0; i < AD774X_MAX_CONV_MODE; i++) {
- if (strncmp(buf, ad774x_conv_mode_table[i].name,
- strlen(ad774x_conv_mode_table[i].name) - 1) == 0) {
- chip->conversion_mode = ad774x_conv_mode_table[i].name;
- cfg |= 0x18 | ad774x_conv_mode_table[i].reg_cfg;
- ad774x_i2c_write(chip, AD774X_CFG, cfg);
- return len;
- }
- }
-
- dev_err(dev, "not supported conversion mode\n");
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
- ad774x_show_conversion_mode,
- ad774x_store_conversion_mode);
-
-static ssize_t ad774x_show_dac_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u8 data;
-
- ad774x_i2c_read(chip, this_attr->address, &data, 1);
-
- return sprintf(buf, "%02x\n", data & 0x7F);
-}
-
-static ssize_t ad774x_store_dac_value(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if (!ret) {
- ad774x_i2c_write(chip, this_attr->address,
- (data ? AD774X_CAPDAC_EN : 0) | (data & 0x7F));
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEVICE_ATTR(capdac0_raw, S_IRUGO | S_IWUSR,
- ad774x_show_dac_value,
- ad774x_store_dac_value,
- AD774X_CAPDACA);
-
-static IIO_DEVICE_ATTR(capdac1_raw, S_IRUGO | S_IWUSR,
- ad774x_show_dac_value,
- ad774x_store_dac_value,
- AD774X_CAPDACB);
-
-static ssize_t ad774x_show_cap_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->cap_setup);
-}
-
-static ssize_t ad774x_store_cap_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad774x_i2c_write(chip, AD774X_CAP_SETUP, data);
- chip->cap_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CAP_SETUP(S_IRUGO | S_IWUSR,
- ad774x_show_cap_setup,
- ad774x_store_cap_setup);
-
-static ssize_t ad774x_show_vt_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->vt_setup);
-}
-
-static ssize_t ad774x_store_vt_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad774x_i2c_write(chip, AD774X_VT_SETUP, data);
- chip->vt_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_VT_SETUP(S_IRUGO | S_IWUSR,
- ad774x_show_vt_setup,
- ad774x_store_vt_setup);
-
-static ssize_t ad774x_show_exec_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "0x%02x\n", chip->exec_setup);
-}
-
-static ssize_t ad774x_store_exec_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x100)) {
- ad774x_i2c_write(chip, AD774X_EXEC_SETUP, data);
- chip->exec_setup = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_EXEC_SETUP(S_IRUGO | S_IWUSR,
- ad774x_show_exec_setup,
- ad774x_store_exec_setup);
-
-static ssize_t ad774x_show_volt_gain(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->volt_gain);
-}
-
-static ssize_t ad774x_store_volt_gain(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad774x_i2c_write(chip, AD774X_VOLT_GAINH, data >> 8);
- ad774x_i2c_write(chip, AD774X_VOLT_GAINL, data);
- chip->volt_gain = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_VOLT_GAIN(S_IRUGO | S_IWUSR,
- ad774x_show_volt_gain,
- ad774x_store_volt_gain);
-
-static ssize_t ad774x_show_cap_data(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- char tmp[3];
-
- ad774x_i2c_read(chip, AD774X_CAP_DATA_HIGH, tmp, 3);
- data = ((int)tmp[0] << 16) | ((int)tmp[1] << 8) | (int)tmp[2];
-
- return sprintf(buf, "%ld\n", data);
-}
-
-static IIO_DEV_ATTR_CAP_DATA(ad774x_show_cap_data);
-
-static ssize_t ad774x_show_vt_data(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- char tmp[3];
-
- ad774x_i2c_read(chip, AD774X_VT_DATA_HIGH, tmp, 3);
- data = ((int)tmp[0] << 16) | ((int)tmp[1] << 8) | (int)tmp[2];
-
- return sprintf(buf, "%ld\n", data);
-}
-
-static IIO_DEV_ATTR_VT_DATA(ad774x_show_vt_data);
-
-static ssize_t ad774x_show_cap_offs(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->cap_offs);
-}
-
-static ssize_t ad774x_store_cap_offs(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad774x_i2c_write(chip, AD774X_CAP_OFFH, data >> 8);
- ad774x_i2c_write(chip, AD774X_CAP_OFFL, data);
- chip->cap_offs = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CAP_OFFS(S_IRUGO | S_IWUSR,
- ad774x_show_cap_offs,
- ad774x_store_cap_offs);
-
-static ssize_t ad774x_show_cap_gain(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
-
- return sprintf(buf, "%d\n", chip->cap_gain);
-}
-
-static ssize_t ad774x_store_cap_gain(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
-
- ret = strict_strtoul(buf, 10, &data);
-
- if ((!ret) && (data < 0x10000)) {
- ad774x_i2c_write(chip, AD774X_CAP_GAINH, data >> 8);
- ad774x_i2c_write(chip, AD774X_CAP_GAINL, data);
- chip->cap_gain = data;
- return len;
- }
-
- return -EINVAL;
-}
-
-static IIO_DEV_ATTR_CAP_GAIN(S_IRUGO | S_IWUSR,
- ad774x_show_cap_gain,
- ad774x_store_cap_gain);
-
-static struct attribute *ad774x_attributes[] = {
- &iio_dev_attr_available_conversion_modes.dev_attr.attr,
- &iio_dev_attr_conversion_mode.dev_attr.attr,
- &iio_dev_attr_cap_setup.dev_attr.attr,
- &iio_dev_attr_in0_setup.dev_attr.attr,
- &iio_dev_attr_exec_setup.dev_attr.attr,
- &iio_dev_attr_cap_offs.dev_attr.attr,
- &iio_dev_attr_cap_gain.dev_attr.attr,
- &iio_dev_attr_in0_gain.dev_attr.attr,
- &iio_dev_attr_in0_raw.dev_attr.attr,
- &iio_dev_attr_cap0_raw.dev_attr.attr,
- &iio_dev_attr_capdac0_raw.dev_attr.attr,
- &iio_dev_attr_capdac1_raw.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad774x_attribute_group = {
- .attrs = ad774x_attributes,
-};
-
-/*
- * data ready events
- */
-
-#define IIO_EVENT_CODE_CAP_RDY 0
-#define IIO_EVENT_CODE_VT_RDY 1
-
-#define IIO_EVENT_ATTR_CAP_RDY_SH(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(cap_rdy, _evlist, _show, _store, _mask)
-
-#define IIO_EVENT_ATTR_VT_RDY_SH(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(vt_rdy, _evlist, _show, _store, _mask)
-
-static irqreturn_t ad774x_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct ad774x_chip_info *chip = iio_priv(indio_dev);
- u8 int_status;
-
- ad774x_i2c_read(chip, AD774X_STATUS, &int_status, 1);
-
- if (int_status & AD774X_STATUS_RDYCAP)
- iio_push_event(indio_dev, 0,
- IIO_EVENT_CODE_CAP_RDY,
- iio_get_time_ns());
-
- if (int_status & AD774X_STATUS_RDYVT)
- iio_push_event(indio_dev, 0,
- IIO_EVENT_CODE_VT_RDY,
- iio_get_time_ns());
-
- return IRQ_HANDLED;
-}
-
-static IIO_CONST_ATTR(cap_rdy_en, "1");
-static IIO_CONST_ATTR(vt_rdy_en, "1");
-
-static struct attribute *ad774x_event_attributes[] = {
- &iio_const_attr_cap_rdy_en.dev_attr.attr,
- &iio_const_attr_vt_rdy_en.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group ad774x_event_attribute_group = {
- .attrs = ad774x_event_attributes,
-};
-
-static const struct iio_info ad774x_info = {
- .attrs = &ad774x_event_attribute_group,
- .event_attrs = &ad774x_event_attribute_group,
- .num_interrupt_lines = 1,
- .driver_module = THIS_MODULE,
-};
-/*
- * device probe and remove
- */
-
-static int __devinit ad774x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0, regdone = 0;
- struct ad774x_chip_info *chip;
- struct iio_dev *indio_dev;
-
- indio_dev = iio_allocate_device(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
-
- /* Establish that the iio_dev is a child of the i2c device */
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ad774x_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
- regdone = 1;
-
- if (client->irq) {
- ret = request_threaded_irq(client->irq,
- NULL,
- &ad774x_event_handler,
- IRQF_TRIGGER_FALLING,
- "ad774x",
- indio_dev);
- if (ret)
- goto error_free_dev;
- }
-
- dev_err(&client->dev, "%s capacitive sensor registered, irq: %d\n", id->name, client->irq);
-
- return 0;
-
-error_free_dev:
- if (regdone)
- free_irq(client->irq, indio_dev);
- else
- iio_free_device(indio_dev);
-error_ret:
- return ret;
-}
-
-static int __devexit ad774x_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- if (client->irq)
- free_irq(client->irq, indio_dev);
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id ad774x_id[] = {
- { "ad7745", 0 },
- { "ad7746", 0 },
- { "ad7747", 0 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad774x_id);
-
-static struct i2c_driver ad774x_driver = {
- .driver = {
- .name = "ad774x",
- },
- .probe = ad774x_probe,
- .remove = __devexit_p(ad774x_remove),
- .id_table = ad774x_id,
-};
-
-static __init int ad774x_init(void)
-{
- return i2c_add_driver(&ad774x_driver);
-}
-
-static __exit void ad774x_exit(void)
-{
- i2c_del_driver(&ad774x_driver);
-}
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ad7745/6/7 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(ad774x_init);
-module_exit(ad774x_exit);
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index e0c7b6cc05c..7a579a1fd69 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -16,11 +16,10 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "adc.h"
#include "ad7780.h"
@@ -127,12 +126,12 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7780] = {
- .channel = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('s', 24, 32, 8), 0),
},
[ID_AD7781] = {
- .channel = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
+ .channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('s', 20, 32, 12), 0),
},
@@ -256,13 +255,14 @@ static int ad7780_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7780_state *st = iio_priv(indio_dev);
+ iio_device_unregister(indio_dev);
free_irq(spi->irq, st);
gpio_free(st->pdata->gpio_pdrst);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 90f6c039d6c..a831b92cd08 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -19,10 +19,10 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
#include "../trigger.h"
-#include "adc.h"
+#include "../trigger_consumer.h"
#include "ad7793.h"
@@ -51,7 +51,8 @@ struct ad7793_state {
u16 mode;
u16 conf;
u32 scale_avail[8][2];
- u32 available_scan_masks[7];
+ /* Note this uses fact that 8 the mask always fits in a long */
+ unsigned long available_scan_masks[7];
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -316,12 +317,12 @@ out:
static int ad7793_scan_from_ring(struct ad7793_state *st, unsigned ch, int *val)
{
- struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring;
+ struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
int ret;
s64 dat64[2];
u32 *dat32 = (u32 *)dat64;
- if (!(ring->scan_mask & (1 << ch)))
+ if (!(test_bit(ch, ring->scan_mask)))
return -EBUSY;
ret = ring->access->read_last(ring, (u8 *) &dat64);
@@ -336,14 +337,15 @@ static int ad7793_scan_from_ring(struct ad7793_state *st, unsigned ch, int *val)
static int ad7793_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
size_t d_size;
unsigned channel;
if (!ring->scan_count)
return -EINVAL;
- channel = __ffs(ring->scan_mask);
+ channel = find_first_bit(ring->scan_mask,
+ indio_dev->masklength);
d_size = ring->scan_count *
indio_dev->channels[0].scan_type.storagebits / 8;
@@ -355,9 +357,9 @@ static int ad7793_ring_preenable(struct iio_dev *indio_dev)
d_size += sizeof(s64) - (d_size % sizeof(s64));
}
- if (indio_dev->ring->access->set_bytes_per_datum)
- indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
- d_size);
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, d_size);
st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) |
AD7793_MODE_SEL(AD7793_MODE_CONT);
@@ -402,8 +404,8 @@ static int ad7793_ring_postdisable(struct iio_dev *indio_dev)
static irqreturn_t ad7793_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *ring = indio_dev->buffer;
struct ad7793_state *st = iio_priv(indio_dev);
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
@@ -426,10 +428,10 @@ static irqreturn_t ad7793_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
-static const struct iio_ring_setup_ops ad7793_ring_setup_ops = {
+static const struct iio_buffer_setup_ops ad7793_ring_setup_ops = {
.preenable = &ad7793_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
.postdisable = &ad7793_ring_postdisable,
};
@@ -437,13 +439,13 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
int ret;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7793_trigger_handler,
IRQF_ONESHOT,
@@ -456,28 +458,22 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &ad7793_ring_setup_ops;
+ indio_dev->buffer->setup_ops = &ad7793_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
static void ad7793_ring_cleanup(struct iio_dev *indio_dev)
{
- /* ensure that the trigger has been detached */
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
/**
@@ -574,7 +570,7 @@ static ssize_t ad7793_write_frequency(struct device *dev,
int i, ret;
mutex_lock(&indio_dev->mlock);
- if (iio_ring_enabled(indio_dev)) {
+ if (iio_buffer_enabled(indio_dev)) {
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
@@ -651,7 +647,7 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
- if (iio_ring_enabled(indio_dev))
+ if (iio_buffer_enabled(indio_dev))
ret = ad7793_scan_from_ring(st,
chan->scan_index, &smpl);
else
@@ -678,7 +674,7 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
switch (chan->type) {
- case IIO_IN:
+ case IIO_VOLTAGE:
/* 1170mV / 2^23 * 6 */
scale_uv = (1170ULL * 100000000ULL * 6ULL)
>> (chan->scan_type.realbits -
@@ -713,7 +709,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
unsigned int tmp;
mutex_lock(&indio_dev->mlock);
- if (iio_ring_enabled(indio_dev)) {
+ if (iio_buffer_enabled(indio_dev)) {
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
@@ -771,57 +767,137 @@ static const struct iio_info ad7793_info = {
static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
[ID_AD7793] = {
- .channel[0] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN1P_AIN1M,
- 0, IIO_ST('s', 24, 32, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 1, 1,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN2P_AIN2M,
- 1, IIO_ST('s', 24, 32, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 2, 2,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN3P_AIN3M,
- 2, IIO_ST('s', 24, 32, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, "shorted", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN1M_AIN1M,
- 3, IIO_ST('s', 24, 32, 0), 0),
- .channel[4] = IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- AD7793_CH_TEMP,
- 4, IIO_ST('s', 24, 32, 0), 0),
- .channel[5] = IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- AD7793_CH_AVDD_MONITOR,
- 5, IIO_ST('s', 24, 32, 0), 0),
+ .channel[0] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 0,
+ .channel2 = 0,
+ .address = AD7793_CH_AIN1P_AIN1M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 0,
+ .scan_type = IIO_ST('s', 24, 32, 0)
+ },
+ .channel[1] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 1,
+ .address = AD7793_CH_AIN2P_AIN2M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 1,
+ .scan_type = IIO_ST('s', 24, 32, 0)
+ },
+ .channel[2] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 2,
+ .channel2 = 2,
+ .address = AD7793_CH_AIN3P_AIN3M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 2,
+ .scan_type = IIO_ST('s', 24, 32, 0)
+ },
+ .channel[3] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .extend_name = "shorted",
+ .indexed = 1,
+ .channel = 2,
+ .channel2 = 2,
+ .address = AD7793_CH_AIN1M_AIN1M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 2,
+ .scan_type = IIO_ST('s', 24, 32, 0)
+ },
+ .channel[4] = {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .address = AD7793_CH_TEMP,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .scan_index = 4,
+ .scan_type = IIO_ST('s', 24, 32, 0),
+ },
+ .channel[5] = {
+ .type = IIO_VOLTAGE,
+ .extend_name = "supply",
+ .indexed = 1,
+ .channel = 4,
+ .address = AD7793_CH_AVDD_MONITOR,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .scan_index = 5,
+ .scan_type = IIO_ST('s', 24, 32, 0),
+ },
.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
},
[ID_AD7792] = {
- .channel[0] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN1P_AIN1M,
- 0, IIO_ST('s', 16, 32, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 1, 1,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN2P_AIN2M,
- 1, IIO_ST('s', 16, 32, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 2, 2,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN3P_AIN3M,
- 2, IIO_ST('s', 16, 32, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, "shorted", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD7793_CH_AIN1M_AIN1M,
- 3, IIO_ST('s', 16, 32, 0), 0),
- .channel[4] = IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- AD7793_CH_TEMP,
- 4, IIO_ST('s', 16, 32, 0), 0),
- .channel[5] = IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- AD7793_CH_AVDD_MONITOR,
- 5, IIO_ST('s', 16, 32, 0), 0),
+ .channel[0] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 0,
+ .channel2 = 0,
+ .address = AD7793_CH_AIN1P_AIN1M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 0,
+ .scan_type = IIO_ST('s', 16, 32, 0)
+ },
+ .channel[1] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 1,
+ .address = AD7793_CH_AIN2P_AIN2M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 1,
+ .scan_type = IIO_ST('s', 16, 32, 0)
+ },
+ .channel[2] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 2,
+ .channel2 = 2,
+ .address = AD7793_CH_AIN3P_AIN3M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 2,
+ .scan_type = IIO_ST('s', 16, 32, 0)
+ },
+ .channel[3] = {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .extend_name = "shorted",
+ .indexed = 1,
+ .channel = 2,
+ .channel2 = 2,
+ .address = AD7793_CH_AIN1M_AIN1M,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = 2,
+ .scan_type = IIO_ST('s', 16, 32, 0)
+ },
+ .channel[4] = {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .address = AD7793_CH_TEMP,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .scan_index = 4,
+ .scan_type = IIO_ST('s', 16, 32, 0),
+ },
+ .channel[5] = {
+ .type = IIO_VOLTAGE,
+ .extend_name = "supply",
+ .indexed = 1,
+ .channel = 4,
+ .address = AD7793_CH_AVDD_MONITOR,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .scan_index = 5,
+ .scan_type = IIO_ST('s', 16, 32, 0),
+ },
.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
},
};
@@ -831,7 +907,7 @@ static int __devinit ad7793_probe(struct spi_device *spi)
struct ad7793_platform_data *pdata = spi->dev.platform_data;
struct ad7793_state *st;
struct iio_dev *indio_dev;
- int ret, i, voltage_uv = 0, regdone = 0;
+ int ret, i, voltage_uv = 0;
if (!pdata) {
dev_err(&spi->dev, "no platform data?\n");
@@ -881,10 +957,12 @@ static int __devinit ad7793_probe(struct spi_device *spi)
indio_dev->num_channels = 7;
indio_dev->info = &ad7793_info;
- for (i = 0; i < indio_dev->num_channels; i++)
- st->available_scan_masks[i] = (1 << i) | (1 <<
- indio_dev->channels[indio_dev->num_channels - 1].
- scan_index);
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ set_bit(i, &st->available_scan_masks[i]);
+ set_bit(indio_dev->
+ channels[indio_dev->num_channels - 1].scan_index,
+ &st->available_scan_masks[i]);
+ }
init_waitqueue_head(&st->wq_data_avail);
@@ -892,18 +970,13 @@ static int __devinit ad7793_probe(struct spi_device *spi)
if (ret)
goto error_disable_reg;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring;
- regdone = 1;
-
ret = ad7793_probe_trigger(indio_dev);
if (ret)
goto error_unreg_ring;
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- indio_dev->channels,
- indio_dev->num_channels);
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
if (ret)
goto error_remove_trigger;
@@ -911,10 +984,14 @@ static int __devinit ad7793_probe(struct spi_device *spi)
if (ret)
goto error_uninitialize_ring;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_uninitialize_ring;
+
return 0;
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_remove_trigger:
ad7793_remove_trigger(indio_dev);
error_unreg_ring:
@@ -926,10 +1003,7 @@ error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
return ret;
}
@@ -939,7 +1013,8 @@ static int ad7793_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7793_state *st = iio_priv(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
ad7793_remove_trigger(indio_dev);
ad7793_ring_cleanup(indio_dev);
@@ -948,7 +1023,7 @@ static int ad7793_remove(struct spi_device *spi)
regulator_put(st->reg);
}
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 0c84217bde3..bdb90492b8a 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -14,6 +14,7 @@
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -111,8 +112,8 @@ static ssize_t ad7816_show_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
if (chip->mode)
return sprintf(buf, "power-save\n");
@@ -125,8 +126,8 @@ static ssize_t ad7816_store_mode(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
if (strcmp(buf, "full")) {
gpio_set_value(chip->rdwr_pin, 1);
@@ -157,8 +158,8 @@ static ssize_t ad7816_show_channel(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
return sprintf(buf, "%d\n", chip->channel_id);
}
@@ -168,8 +169,8 @@ static ssize_t ad7816_store_channel(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
unsigned long data;
int ret;
@@ -179,13 +180,13 @@ static ssize_t ad7816_store_channel(struct device *dev,
if (data > AD7816_CS_MAX && data != AD7816_CS_MASK) {
dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for %s.\n",
- data, dev_info->name);
+ data, indio_dev->name);
return -EINVAL;
- } else if (strcmp(dev_info->name, "ad7818") == 0 && data > 1) {
+ } else if (strcmp(indio_dev->name, "ad7818") == 0 && data > 1) {
dev_err(&chip->spi_dev->dev,
"Invalid channel id %lu for ad7818.\n", data);
return -EINVAL;
- } else if (strcmp(dev_info->name, "ad7816") == 0 && data > 0) {
+ } else if (strcmp(indio_dev->name, "ad7816") == 0 && data > 0) {
dev_err(&chip->spi_dev->dev,
"Invalid channel id %lu for ad7816.\n", data);
return -EINVAL;
@@ -206,8 +207,8 @@ static ssize_t ad7816_show_value(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
u16 data;
s8 value;
int ret;
@@ -246,16 +247,14 @@ static const struct attribute_group ad7816_attribute_group = {
* temperature bound events
*/
-#define IIO_EVENT_CODE_AD7816_OTI IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_TEMP, \
+#define IIO_EVENT_CODE_AD7816_OTI IIO_UNMOD_EVENT_CODE(IIO_TEMP, \
0, \
IIO_EV_TYPE_THRESH, \
IIO_EV_DIR_FALLING)
static irqreturn_t ad7816_event_handler(int irq, void *private)
{
- iio_push_event(private, 0,
- IIO_EVENT_CODE_AD7816_OTI,
- iio_get_time_ns());
+ iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI, iio_get_time_ns());
return IRQ_HANDLED;
}
@@ -263,8 +262,8 @@ static ssize_t ad7816_show_oti(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
int value;
if (chip->channel_id > AD7816_CS_MAX) {
@@ -284,8 +283,8 @@ static inline ssize_t ad7816_set_oti(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
long value;
u8 data;
int ret;
@@ -328,11 +327,11 @@ static struct attribute *ad7816_event_attributes[] = {
static struct attribute_group ad7816_event_attribute_group = {
.attrs = ad7816_event_attributes,
+ .name = "events",
};
static const struct iio_info ad7816_info = {
.attrs = &ad7816_attribute_group,
- .num_interrupt_lines = 1,
.event_attrs = &ad7816_event_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -397,10 +396,6 @@ static int __devinit ad7816_probe(struct spi_device *spi_dev)
indio_dev->info = &ad7816_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_gpio;
-
if (spi_dev->irq) {
/* Only low trigger is supported in ad7816/7/8 */
ret = request_threaded_irq(spi_dev->irq,
@@ -410,16 +405,19 @@ static int __devinit ad7816_probe(struct spi_device *spi_dev)
indio_dev->name,
indio_dev);
if (ret)
- goto error_unreg_dev;
+ goto error_free_gpio;
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_irq;
+
dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
indio_dev->name);
return 0;
-
-error_unreg_dev:
- iio_device_unregister(indio_dev);
+error_free_irq:
+ free_irq(spi_dev->irq, indio_dev);
error_free_gpio:
gpio_free(chip->busy_pin);
error_free_gpio_convert:
@@ -437,13 +435,13 @@ static int __devexit ad7816_remove(struct spi_device *spi_dev)
struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
+ iio_device_unregister(indio_dev);
dev_set_drvdata(&spi_dev->dev, NULL);
if (spi_dev->irq)
free_irq(spi_dev->irq, indio_dev);
gpio_free(chip->busy_pin);
gpio_free(chip->convert_pin);
gpio_free(chip->rdwr_pin);
- iio_device_unregister(indio_dev);
iio_free_device(indio_dev);
return 0;
diff --git a/drivers/staging/iio/adc/ad7887.h b/drivers/staging/iio/adc/ad7887.h
index 837046c7b89..3452d181907 100644
--- a/drivers/staging/iio/adc/ad7887.h
+++ b/drivers/staging/iio/adc/ad7887.h
@@ -82,12 +82,12 @@ enum ad7887_supported_device_ids {
ID_AD7887
};
-#ifdef CONFIG_IIO_RING_BUFFER
-int ad7887_scan_from_ring(struct ad7887_state *st, long mask);
+#ifdef CONFIG_IIO_BUFFER
+int ad7887_scan_from_ring(struct ad7887_state *st, int channum);
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7887_ring_cleanup(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
-static inline int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
+#else /* CONFIG_IIO_BUFFER */
+static inline int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
{
return 0;
}
@@ -101,5 +101,5 @@ ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
static inline void ad7887_ring_cleanup(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* IIO_ADC_AD7887_H_ */
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 3d9121e5c37..609dcd5f2dd 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -13,11 +13,12 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "adc.h"
+#include "../buffer_generic.h"
+
#include "ad7887.h"
@@ -30,24 +31,24 @@ static int ad7887_scan_direct(struct ad7887_state *st, unsigned ch)
return (st->data[(ch * 2)] << 8) | st->data[(ch * 2) + 1];
}
-static int ad7887_read_raw(struct iio_dev *dev_info,
+static int ad7887_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret;
- struct ad7887_state *st = iio_priv(dev_info);
+ struct ad7887_state *st = iio_priv(indio_dev);
unsigned int scale_uv;
switch (m) {
case 0:
- mutex_lock(&dev_info->mlock);
- if (iio_ring_enabled(dev_info))
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
ret = ad7887_scan_from_ring(st, 1 << chan->address);
else
ret = ad7887_scan_direct(st, chan->address);
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
@@ -70,14 +71,24 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
* More devices added in future
*/
[ID_AD7887] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 12, 16, 0), 0),
-
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 12, 16, 0), 0),
-
+ .channel[0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ .channel[1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
.int_vref_mv = 2500,
},
@@ -92,7 +103,7 @@ static int __devinit ad7887_probe(struct spi_device *spi)
{
struct ad7887_platform_data *pdata = spi->dev.platform_data;
struct ad7887_state *st;
- int ret, voltage_uv = 0, regdone = 0;
+ int ret, voltage_uv = 0;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL)
@@ -188,18 +199,19 @@ static int __devinit ad7887_probe(struct spi_device *spi)
if (ret)
goto error_disable_reg;
- ret = iio_device_register(indio_dev);
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
if (ret)
- goto error_disable_reg;
- regdone = 1;
+ goto error_cleanup_ring;
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- indio_dev->channels,
- indio_dev->num_channels);
+ ret = iio_device_register(indio_dev);
if (ret)
- goto error_cleanup_ring;
- return 0;
+ goto error_unregister_ring;
+ return 0;
+error_unregister_ring:
+ iio_buffer_unregister(indio_dev);
error_cleanup_ring:
ad7887_ring_cleanup(indio_dev);
error_disable_reg:
@@ -208,10 +220,7 @@ error_disable_reg:
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
return ret;
}
@@ -221,13 +230,14 @@ static int ad7887_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7887_state *st = iio_priv(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
ad7887_ring_cleanup(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index 0ac7c0b9d71..cb74cada561 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -8,27 +8,24 @@
*/
#include <linux/interrupt.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
-#include "../trigger.h"
-#include "../sysfs.h"
+#include "../trigger_consumer.h"
#include "ad7887.h"
-int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
+int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
{
- struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring;
+ struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
int count = 0, ret;
u16 *ring_data;
- if (!(ring->scan_mask & mask)) {
+ if (!(test_bit(channum, ring->scan_mask))) {
ret = -EBUSY;
goto error_ret;
}
@@ -44,7 +41,8 @@ int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
goto error_free_ring_data;
/* for single channel scan the result is stored with zero offset */
- if ((ring->scan_mask == ((1 << 1) | (1 << 0))) && (mask == (1 << 1)))
+ if ((test_bit(1, ring->scan_mask) || test_bit(0, ring->scan_mask)) &&
+ (channum == 1))
count = 1;
ret = be16_to_cpu(ring_data[count]);
@@ -65,7 +63,7 @@ error_ret:
static int ad7887_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7887_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
st->d_size = ring->scan_count *
st->chip_info->channel[0].scan_type.storagebits / 8;
@@ -77,11 +75,12 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
}
- if (indio_dev->ring->access->set_bytes_per_datum)
- indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
- st->d_size);
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, st->d_size);
- switch (ring->scan_mask) {
+ /* We know this is a single long so can 'cheat' */
+ switch (*ring->scan_mask) {
case (1 << 0):
st->ring_msg = &st->msg[AD7887_CH0];
break;
@@ -115,9 +114,9 @@ static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
static irqreturn_t ad7887_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct ad7887_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
s64 time_ns;
__u8 *buf;
int b_sent;
@@ -140,7 +139,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
memcpy(buf + st->d_size - sizeof(s64),
&time_ns, sizeof(time_ns));
- indio_dev->ring->access->store_to(indio_dev->ring, buf, time_ns);
+ indio_dev->buffer->access->store_to(indio_dev->buffer, buf, time_ns);
done:
kfree(buf);
iio_trigger_notify_done(indio_dev->trig);
@@ -148,10 +147,10 @@ done:
return IRQ_HANDLED;
}
-static const struct iio_ring_setup_ops ad7887_ring_setup_ops = {
+static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = {
.preenable = &ad7887_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
.postdisable = &ad7887_ring_postdisable,
};
@@ -159,13 +158,13 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
int ret;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7887_trigger_handler,
IRQF_ONESHOT,
@@ -177,26 +176,20 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_deallocate_sw_rb;
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &ad7887_ring_setup_ops;
+ indio_dev->buffer->setup_ops = &ad7887_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
void ad7887_ring_cleanup(struct iio_dev *indio_dev)
{
- /* ensure that the trigger has been detached */
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index 0dc9b4c73a3..eda01d5bab7 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -124,11 +124,11 @@ struct ad799x_platform_data {
int ad7997_8_set_scan_mode(struct ad799x_state *st, unsigned mask);
#ifdef CONFIG_AD799X_RING_BUFFER
-int ad799x_single_channel_from_ring(struct ad799x_state *st, long mask);
+int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum);
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad799x_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_AD799X_RING_BUFFER */
-int ad799x_single_channel_from_ring(struct ad799x_state *st, long mask)
+int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
{
return -EINVAL;
}
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 92cfe2e3ea4..ee6cd792bec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -31,12 +31,12 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../buffer_generic.h"
-#include "../ring_generic.h"
-#include "adc.h"
#include "ad799x.h"
/*
@@ -136,49 +136,56 @@ static int ad799x_scan_direct(struct ad799x_state *st, unsigned ch)
return rxbuf;
}
-static int ad799x_read_raw(struct iio_dev *dev_info,
+static int ad799x_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret;
- struct ad799x_state *st = iio_priv(dev_info);
+ struct ad799x_state *st = iio_priv(indio_dev);
unsigned int scale_uv;
switch (m) {
case 0:
- mutex_lock(&dev_info->mlock);
- if (iio_ring_enabled(dev_info))
- ret = ad799x_single_channel_from_ring(st,
- 1 << chan->address);
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = ad799x_single_channel_from_ring(indio_dev,
+ chan->scan_index);
else
- ret = ad799x_scan_direct(st, chan->address);
- mutex_unlock(&dev_info->mlock);
+ ret = ad799x_scan_direct(st, chan->scan_index);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
- *val = (ret >> st->chip_info->channel[0].scan_type.shift) &
- RES_MASK(st->chip_info->channel[0].scan_type.realbits);
+ *val = (ret >> chan->scan_type.shift) &
+ RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (st->int_vref_mv * 1000)
- >> st->chip_info->channel[0].scan_type.realbits;
+ scale_uv = (st->int_vref_mv * 1000) >> chan->scan_type.realbits;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
-
+static const unsigned int ad7998_frequencies[] = {
+ [AD7998_CYC_DIS] = 0,
+ [AD7998_CYC_TCONF_32] = 15625,
+ [AD7998_CYC_TCONF_64] = 7812,
+ [AD7998_CYC_TCONF_128] = 3906,
+ [AD7998_CYC_TCONF_512] = 976,
+ [AD7998_CYC_TCONF_1024] = 488,
+ [AD7998_CYC_TCONF_2048] = 244,
+};
static ssize_t ad799x_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
- int ret, len = 0;
+ int ret;
u8 val;
ret = ad799x_i2c_read8(st, AD7998_CYCLE_TMR_REG, &val);
if (ret)
@@ -186,33 +193,7 @@ static ssize_t ad799x_read_frequency(struct device *dev,
val &= AD7998_CYC_MASK;
- switch (val) {
- case AD7998_CYC_DIS:
- len = sprintf(buf, "0\n");
- break;
- case AD7998_CYC_TCONF_32:
- len = sprintf(buf, "15625\n");
- break;
- case AD7998_CYC_TCONF_64:
- len = sprintf(buf, "7812\n");
- break;
- case AD7998_CYC_TCONF_128:
- len = sprintf(buf, "3906\n");
- break;
- case AD7998_CYC_TCONF_256:
- len = sprintf(buf, "1953\n");
- break;
- case AD7998_CYC_TCONF_512:
- len = sprintf(buf, "976\n");
- break;
- case AD7998_CYC_TCONF_1024:
- len = sprintf(buf, "488\n");
- break;
- case AD7998_CYC_TCONF_2048:
- len = sprintf(buf, "244\n");
- break;
- }
- return len;
+ return sprintf(buf, "%u\n", ad7998_frequencies[val]);
}
static ssize_t ad799x_write_frequency(struct device *dev,
@@ -220,68 +201,101 @@ static ssize_t ad799x_write_frequency(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
long val;
- int ret;
+ int ret, i;
u8 t;
ret = strict_strtol(buf, 10, &val);
if (ret)
return ret;
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
ret = ad799x_i2c_read8(st, AD7998_CYCLE_TMR_REG, &t);
if (ret)
goto error_ret_mutex;
/* Wipe the bits clean */
t &= ~AD7998_CYC_MASK;
- switch (val) {
- case 15625:
- t |= AD7998_CYC_TCONF_32;
- break;
- case 7812:
- t |= AD7998_CYC_TCONF_64;
- break;
- case 3906:
- t |= AD7998_CYC_TCONF_128;
- break;
- case 1953:
- t |= AD7998_CYC_TCONF_256;
- break;
- case 976:
- t |= AD7998_CYC_TCONF_512;
- break;
- case 488:
- t |= AD7998_CYC_TCONF_1024;
- break;
- case 244:
- t |= AD7998_CYC_TCONF_2048;
- break;
- case 0:
- t |= AD7998_CYC_DIS;
- break;
- default:
+ for (i = 0; i < ARRAY_SIZE(ad7998_frequencies); i++)
+ if (val == ad7998_frequencies[i])
+ break;
+ if (i == ARRAY_SIZE(ad7998_frequencies)) {
ret = -EINVAL;
goto error_ret_mutex;
}
-
+ t |= i;
ret = ad799x_i2c_write8(st, AD7998_CYCLE_TMR_REG, t);
error_ret_mutex:
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
+static int ad799x_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ return 1;
+}
+
+static const u8 ad799x_threshold_addresses[][2] = {
+ { AD7998_DATALOW_CH1_REG, AD7998_DATAHIGH_CH1_REG },
+ { AD7998_DATALOW_CH2_REG, AD7998_DATAHIGH_CH2_REG },
+ { AD7998_DATALOW_CH3_REG, AD7998_DATAHIGH_CH3_REG },
+ { AD7998_DATALOW_CH4_REG, AD7998_DATAHIGH_CH4_REG },
+};
+
+static int ad799x_write_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int val)
+{
+ int ret;
+ struct ad799x_state *st = iio_priv(indio_dev);
+ int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_FALLING);
+ int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad799x_i2c_write16(st,
+ ad799x_threshold_addresses[number][direction],
+ val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad799x_read_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int *val)
+{
+ int ret;
+ struct ad799x_state *st = iio_priv(indio_dev);
+ int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_FALLING);
+ int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ u16 valin;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad799x_i2c_read16(st,
+ ad799x_threshold_addresses[number][direction],
+ &valin);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ *val = valin;
+
+ return 0;
+}
+
static ssize_t ad799x_read_channel_config(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -298,8 +312,8 @@ static ssize_t ad799x_write_channel_config(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
@@ -309,9 +323,9 @@ static ssize_t ad799x_write_channel_config(struct device *dev,
if (ret)
return ret;
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
ret = ad799x_i2c_write16(st, this_attr->address, val);
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
@@ -334,83 +348,41 @@ static irqreturn_t ad799x_event_handler(int irq, void *private)
for (i = 0; i < 8; i++) {
if (status & (1 << i))
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
i & 0x1 ?
- IIO_EVENT_CODE_IN_HIGH_THRESH(i >> 1) :
- IIO_EVENT_CODE_IN_LOW_THRESH(i >> 1),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ (i >> 1),
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING) :
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ (i >> 1),
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
iio_get_time_ns());
}
return IRQ_HANDLED;
}
-static IIO_DEVICE_ATTR(in0_thresh_low_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATALOW_CH1_REG);
-
-static IIO_DEVICE_ATTR(in0_thresh_high_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATAHIGH_CH1_REG);
-
-static IIO_DEVICE_ATTR(in0_thresh_both_hyst_raw,
+static IIO_DEVICE_ATTR(in_voltage0_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
ad799x_read_channel_config,
ad799x_write_channel_config,
AD7998_HYST_CH1_REG);
-static IIO_DEVICE_ATTR(in1_thresh_low_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATALOW_CH2_REG);
-
-static IIO_DEVICE_ATTR(in1_thresh_high_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATAHIGH_CH2_REG);
-
-static IIO_DEVICE_ATTR(in1_thresh_both_hyst_raw,
+static IIO_DEVICE_ATTR(in_voltage1_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
ad799x_read_channel_config,
ad799x_write_channel_config,
AD7998_HYST_CH2_REG);
-static IIO_DEVICE_ATTR(in2_thresh_low_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATALOW_CH3_REG);
-
-static IIO_DEVICE_ATTR(in2_thresh_high_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATAHIGH_CH3_REG);
-
-static IIO_DEVICE_ATTR(in2_thresh_both_hyst_raw,
+static IIO_DEVICE_ATTR(in_voltage2_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
ad799x_read_channel_config,
ad799x_write_channel_config,
AD7998_HYST_CH3_REG);
-static IIO_DEVICE_ATTR(in3_thresh_low_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATALOW_CH4_REG);
-
-static IIO_DEVICE_ATTR(in3_thresh_high_value,
- S_IRUGO | S_IWUSR,
- ad799x_read_channel_config,
- ad799x_write_channel_config,
- AD7998_DATAHIGH_CH4_REG);
-
-static IIO_DEVICE_ATTR(in3_thresh_both_hyst_raw,
+static IIO_DEVICE_ATTR(in_voltage3_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
ad799x_read_channel_config,
ad799x_write_channel_config,
@@ -422,18 +394,10 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("15625 7812 3906 1953 976 488 244 0");
static struct attribute *ad7993_4_7_8_event_attributes[] = {
- &iio_dev_attr_in0_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in0_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in0_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in1_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in1_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in1_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in2_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in2_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in2_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in3_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in3_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in3_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage1_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage2_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage3_thresh_both_hyst_raw.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL,
@@ -441,15 +405,12 @@ static struct attribute *ad7993_4_7_8_event_attributes[] = {
static struct attribute_group ad7993_4_7_8_event_attrs_group = {
.attrs = ad7993_4_7_8_event_attributes,
+ .name = "events",
};
static struct attribute *ad7992_event_attributes[] = {
- &iio_dev_attr_in0_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in0_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in0_thresh_both_hyst_raw.dev_attr.attr,
- &iio_dev_attr_in1_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in1_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in1_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_thresh_both_hyst_raw.dev_attr.attr,
+ &iio_dev_attr_in_voltage1_thresh_both_hyst_raw.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL,
@@ -457,6 +418,7 @@ static struct attribute *ad7992_event_attributes[] = {
static struct attribute_group ad7992_event_attrs_group = {
.attrs = ad7992_event_attributes,
+ .name = "events",
};
static const struct iio_info ad7991_info = {
@@ -466,181 +428,374 @@ static const struct iio_info ad7991_info = {
static const struct iio_info ad7992_info = {
.read_raw = &ad799x_read_raw,
- .num_interrupt_lines = 1,
.event_attrs = &ad7992_event_attrs_group,
+ .read_event_config = &ad799x_read_event_config,
+ .read_event_value = &ad799x_read_event_value,
+ .write_event_value = &ad799x_write_event_value,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad7993_4_7_8_info = {
.read_raw = &ad799x_read_raw,
- .num_interrupt_lines = 1,
.event_attrs = &ad7993_4_7_8_event_attrs_group,
+ .read_event_config = &ad799x_read_event_config,
+ .read_event_value = &ad799x_read_event_value,
+ .write_event_value = &ad799x_write_event_value,
.driver_module = THIS_MODULE,
};
+#define AD799X_EV_MASK (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
+
static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
[ad7991] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 12, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 12, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 12, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 12, 16, 0), 0),
- .channel[4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
.num_channels = 5,
.int_vref_mv = 4096,
.info = &ad7991_info,
},
[ad7995] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 10, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 10, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 10, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 10, 16, 0), 0),
- .channel[4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
.num_channels = 5,
.int_vref_mv = 1024,
.info = &ad7991_info,
},
[ad7999] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 10, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 10, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 10, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 10, 16, 0), 0),
- .channel[4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 8, 16, 4),
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 8, 16, 4),
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 8, 16, 4),
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 8, 16, 4),
+ },
+ [4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
.num_channels = 5,
.int_vref_mv = 1024,
.info = &ad7991_info,
},
[ad7992] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 12, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 12, 16, 0), 0),
- .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ },
.num_channels = 3,
.int_vref_mv = 4096,
.default_config = AD7998_ALERT_EN,
.info = &ad7992_info,
},
[ad7993] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 10, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 10, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 10, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 10, 16, 0), 0),
- .channel[4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
.num_channels = 5,
.int_vref_mv = 1024,
.default_config = AD7998_ALERT_EN,
.info = &ad7993_4_7_8_info,
},
[ad7994] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 12, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 12, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 12, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 12, 16, 0), 0),
- .channel[4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [4] = IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
.num_channels = 5,
.int_vref_mv = 4096,
.default_config = AD7998_ALERT_EN,
.info = &ad7993_4_7_8_info,
},
[ad7997] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 10, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 10, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 10, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 10, 16, 0), 0),
- .channel[4] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 4, 4, IIO_ST('u', 10, 16, 0), 0),
- .channel[5] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 5, 5, IIO_ST('u', 10, 16, 0), 0),
- .channel[6] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 6, 6, IIO_ST('u', 10, 16, 0), 0),
- .channel[7] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 7, 7, IIO_ST('u', 10, 16, 0), 0),
- .channel[8] = IIO_CHAN_SOFT_TIMESTAMP(8),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [4] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 4,
+ .scan_index = 4,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [5] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 5,
+ .scan_index = 5,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [6] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 6,
+ .scan_index = 6,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [7] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 7,
+ .scan_index = 7,
+ .scan_type = IIO_ST('u', 10, 16, 2),
+ },
+ [8] = IIO_CHAN_SOFT_TIMESTAMP(8),
+ },
.num_channels = 9,
.int_vref_mv = 1024,
.default_config = AD7998_ALERT_EN,
.info = &ad7993_4_7_8_info,
},
[ad7998] = {
- .channel[0] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, 0, IIO_ST('u', 12, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, 1, IIO_ST('u', 12, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, 2, IIO_ST('u', 12, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, 3, IIO_ST('u', 12, 16, 0), 0),
- .channel[4] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 4, 4, IIO_ST('u', 12, 16, 0), 0),
- .channel[5] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 5, 5, IIO_ST('u', 12, 16, 0), 0),
- .channel[6] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 6, 6, IIO_ST('u', 12, 16, 0), 0),
- .channel[7] = IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- 7, 7, IIO_ST('u', 12, 16, 0), 0),
- .channel[8] = IIO_CHAN_SOFT_TIMESTAMP(8),
+ .channel = {
+ [0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [2] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+ .scan_index = 2,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [3] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 3,
+ .scan_index = 3,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ .event_mask = AD799X_EV_MASK,
+ },
+ [4] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 4,
+ .scan_index = 4,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [5] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 5,
+ .scan_index = 5,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [6] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 6,
+ .scan_index = 6,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [7] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 7,
+ .scan_index = 7,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ [8] = IIO_CHAN_SOFT_TIMESTAMP(8),
+ },
.num_channels = 9,
.int_vref_mv = 4096,
.default_config = AD7998_ALERT_EN,
@@ -651,7 +806,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
static int __devinit ad799x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret, regdone = 0;
+ int ret;
struct ad799x_platform_data *pdata = client->dev.platform_data;
struct ad799x_state *st;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
@@ -685,7 +840,6 @@ static int __devinit ad799x_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->info = st->chip_info->info;
- indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channel;
@@ -695,14 +849,9 @@ static int __devinit ad799x_probe(struct i2c_client *client,
if (ret)
goto error_disable_reg;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_ring;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- indio_dev->channels,
- indio_dev->num_channels);
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
if (ret)
goto error_cleanup_ring;
@@ -717,9 +866,14 @@ static int __devinit ad799x_probe(struct i2c_client *client,
if (ret)
goto error_cleanup_ring;
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_irq;
return 0;
+error_free_irq:
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
@@ -728,10 +882,7 @@ error_disable_reg:
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
return ret;
}
@@ -741,16 +892,17 @@ static __devexit int ad799x_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ad799x_state *st = iio_priv(indio_dev);
+ iio_device_unregister(indio_dev);
if (client->irq > 0)
free_irq(client->irq, indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
ad799x_ring_cleanup(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index 0376a826c26..e3f4698d781 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -10,29 +10,26 @@
*/
#include <linux/interrupt.h>
-#include <linux/device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/bitops.h>
#include "../iio.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
-#include "../trigger.h"
-#include "../sysfs.h"
+#include "../trigger_consumer.h"
#include "ad799x.h"
-int ad799x_single_channel_from_ring(struct ad799x_state *st, long mask)
+int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
{
- struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int count = 0, ret;
u16 *ring_data;
- if (!(ring->scan_mask & mask)) {
+ if (!(test_bit(channum, ring->scan_mask))) {
ret = -EBUSY;
goto error_ret;
}
@@ -47,13 +44,7 @@ int ad799x_single_channel_from_ring(struct ad799x_state *st, long mask)
if (ret)
goto error_free_ring_data;
/* Need a count of channels prior to this one */
- mask >>= 1;
- while (mask) {
- if (mask & ring->scan_mask)
- count++;
- mask >>= 1;
- }
-
+ count = bitmap_weight(ring->scan_mask, channum);
ret = be16_to_cpu(ring_data[count]);
error_free_ring_data:
@@ -71,7 +62,7 @@ error_ret:
**/
static int ad799x_ring_preenable(struct iio_dev *indio_dev)
{
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
struct ad799x_state *st = iio_priv(indio_dev);
/*
@@ -80,7 +71,7 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev)
*/
if (st->id == ad7997 || st->id == ad7998)
- ad7997_8_set_scan_mode(st, ring->scan_mask);
+ ad7997_8_set_scan_mode(st, *ring->scan_mask);
st->d_size = ring->scan_count * 2;
@@ -91,9 +82,9 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev)
st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
}
- if (indio_dev->ring->access->set_bytes_per_datum)
- indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
- st->d_size);
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, st->d_size);
return 0;
}
@@ -108,9 +99,9 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev)
static irqreturn_t ad799x_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct ad799x_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
s64 time_ns;
__u8 *rxbuf;
int b_sent;
@@ -124,12 +115,12 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
case ad7991:
case ad7995:
case ad7999:
- cmd = st->config | (ring->scan_mask << AD799X_CHANNEL_SHIFT);
+ cmd = st->config | (*ring->scan_mask << AD799X_CHANNEL_SHIFT);
break;
case ad7992:
case ad7993:
case ad7994:
- cmd = (ring->scan_mask << AD799X_CHANNEL_SHIFT) |
+ cmd = (*ring->scan_mask << AD799X_CHANNEL_SHIFT) |
AD7998_CONV_RES_REG;
break;
case ad7997:
@@ -151,7 +142,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
memcpy(rxbuf + st->d_size - sizeof(s64),
&time_ns, sizeof(time_ns));
- ring->access->store_to(indio_dev->ring, rxbuf, time_ns);
+ ring->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
kfree(rxbuf);
if (b_sent < 0)
@@ -162,23 +153,23 @@ out:
return IRQ_HANDLED;
}
-static const struct iio_ring_setup_ops ad799x_buf_setup_ops = {
+static const struct iio_buffer_setup_ops ad799x_buf_setup_ops = {
.preenable = &ad799x_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
int ret = 0;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad799x_trigger_handler,
IRQF_ONESHOT,
@@ -192,27 +183,21 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &ad799x_buf_setup_ops;
- indio_dev->ring->scan_timestamp = true;
+ indio_dev->buffer->setup_ops = &ad799x_buf_setup_ops;
+ indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
void ad799x_ring_cleanup(struct iio_dev *indio_dev)
{
- /* ensure that the trigger has been detached */
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/adc/adc.h b/drivers/staging/iio/adc/adc.h
deleted file mode 100644
index 40c5949880b..00000000000
--- a/drivers/staging/iio/adc/adc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * adc.h - sysfs attributes associated with ADCs
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2008 Jonathan Cameron <jic23@cam.ac.uk>
- *
- */
-
-/* Deprecated */
-#define IIO_DEV_ATTR_ADC(_num, _show, _addr) \
- IIO_DEVICE_ATTR(adc_##_num, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_IN_RAW(_num, _show, _addr) \
- IIO_DEVICE_ATTR(in##_num##_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_IN_NAMED_RAW(_num, _name, _show, _addr) \
- IIO_DEVICE_ATTR(in##_num##_##_name##_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_IN_DIFF_RAW(_nump, _numn, _show, _addr) \
- IIO_DEVICE_ATTR_NAMED(in##_nump##min##_numn##_raw, \
- in##_nump-in##_numn##_raw, \
- S_IRUGO, \
- _show, \
- NULL, \
- _addr)
-
-
-#define IIO_CONST_ATTR_IN_NAMED_OFFSET(_num, _name, _string) \
- IIO_CONST_ATTR(in##_num##_##_name##_offset, _string)
-
-#define IIO_CONST_ATTR_IN_NAMED_SCALE(_num, _name, _string) \
- IIO_CONST_ATTR(in##_num##_##_name##_scale, _string)
-
-#define IIO_EVENT_CODE_IN_HIGH_THRESH(a) \
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_IN, a, IIO_EV_TYPE_THRESH, \
- IIO_EV_DIR_RISING)
-#define IIO_EVENT_CODE_IN_LOW_THRESH(a) \
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_IN, a, IIO_EV_TYPE_THRESH, \
- IIO_EV_DIR_FALLING)
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index 1a41b803440..c9e0be3b1cf 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -13,6 +13,7 @@
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/spi/spi.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -399,19 +400,19 @@ static irqreturn_t adt7310_event_handler(int irq, void *private)
return ret;
if (status & ADT7310_STAT_T_HIGH)
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (status & ADT7310_STAT_T_LOW)
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (status & ADT7310_STAT_T_CRIT)
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
@@ -740,14 +741,15 @@ static struct attribute *adt7310_event_ct_attributes[] = {
static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = {
{
.attrs = adt7310_event_int_attributes,
+ .name = "events",
}, {
.attrs = adt7310_event_ct_attributes,
+ .name = "events",
}
};
static const struct iio_info adt7310_info = {
.attrs = &adt7310_attribute_group,
- .num_interrupt_lines = ADT7310_IRQS,
.event_attrs = adt7310_event_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -780,10 +782,6 @@ static int __devinit adt7310_probe(struct spi_device *spi_dev)
indio_dev->info = &adt7310_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
/* CT critcal temperature event. line 0 */
if (spi_dev->irq) {
if (adt7310_platform_data[2])
@@ -797,7 +795,7 @@ static int __devinit adt7310_probe(struct spi_device *spi_dev)
indio_dev->name,
indio_dev);
if (ret)
- goto error_unreg_dev;
+ goto error_free_dev;
}
/* INT bound temperature alarm event. line 1 */
@@ -834,6 +832,10 @@ static int __devinit adt7310_probe(struct spi_device *spi_dev)
}
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_int_irq;
+
dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
indio_dev->name);
@@ -843,8 +845,6 @@ error_unreg_int_irq:
free_irq(adt7310_platform_data[0], indio_dev);
error_unreg_ct_irq:
free_irq(spi_dev->irq, indio_dev);
-error_unreg_dev:
- iio_device_unregister(indio_dev);
error_free_dev:
iio_free_device(indio_dev);
error_ret:
@@ -856,12 +856,12 @@ static int __devexit adt7310_remove(struct spi_device *spi_dev)
struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
+ iio_device_unregister(indio_dev);
dev_set_drvdata(&spi_dev->dev, NULL);
if (adt7310_platform_data[0])
free_irq(adt7310_platform_data[0], indio_dev);
if (spi_dev->irq)
free_irq(spi_dev->irq, indio_dev);
- iio_device_unregister(indio_dev);
iio_free_device(indio_dev);
return 0;
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index 76aa0639a55..a289e429dc4 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -13,6 +13,7 @@
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -365,19 +366,19 @@ static irqreturn_t adt7410_event_handler(int irq, void *private)
return IRQ_HANDLED;
if (status & ADT7410_STAT_T_HIGH)
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (status & ADT7410_STAT_T_LOW)
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (status & ADT7410_STAT_T_CRIT)
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
@@ -707,14 +708,15 @@ static struct attribute *adt7410_event_ct_attributes[] = {
static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = {
{
.attrs = adt7410_event_int_attributes,
+ .name = "events",
}, {
.attrs = adt7410_event_ct_attributes,
+ .name = "events",
}
};
static const struct iio_info adt7410_info = {
.attrs = &adt7410_attribute_group,
- .num_interrupt_lines = ADT7410_IRQS,
.event_attrs = adt7410_event_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -747,10 +749,6 @@ static int __devinit adt7410_probe(struct i2c_client *client,
indio_dev->info = &adt7410_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
/* CT critcal temperature event. line 0 */
if (client->irq) {
ret = request_threaded_irq(client->irq,
@@ -760,7 +758,7 @@ static int __devinit adt7410_probe(struct i2c_client *client,
id->name,
indio_dev);
if (ret)
- goto error_unreg_dev;
+ goto error_free_dev;
}
/* INT bound temperature alarm event. line 1 */
@@ -797,6 +795,9 @@ static int __devinit adt7410_probe(struct i2c_client *client,
goto error_unreg_int_irq;
}
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_int_irq;
dev_info(&client->dev, "%s temperature sensor registered.\n",
id->name);
@@ -807,8 +808,6 @@ error_unreg_int_irq:
free_irq(adt7410_platform_data[0], indio_dev);
error_unreg_ct_irq:
free_irq(client->irq, indio_dev);
-error_unreg_dev:
- iio_device_unregister(indio_dev);
error_free_dev:
iio_free_device(indio_dev);
error_ret:
@@ -820,11 +819,12 @@ static int __devexit adt7410_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
unsigned long *adt7410_platform_data = client->dev.platform_data;
+ iio_device_unregister(indio_dev);
if (adt7410_platform_data[0])
free_irq(adt7410_platform_data[0], indio_dev);
if (client->irq)
free_irq(client->irq, indio_dev);
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/adt75.c b/drivers/staging/iio/adc/adt75.c
deleted file mode 100644
index 38f141de6a4..00000000000
--- a/drivers/staging/iio/adc/adt75.c
+++ /dev/null
@@ -1,657 +0,0 @@
-/*
- * ADT75 digital temperature sensor driver supporting ADT75
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/i2c.h>
-
-#include "../iio.h"
-#include "../sysfs.h"
-
-/*
- * ADT75 registers definition
- */
-
-#define ADT75_TEMPERATURE 0
-#define ADT75_CONFIG 1
-#define ADT75_T_HYST 2
-#define ADT75_T_OS 3
-#define ADT75_ONESHOT 4
-
-/*
- * ADT75 config
- */
-#define ADT75_PD 0x1
-#define ADT75_OS_INT 0x2
-#define ADT75_OS_POLARITY 0x4
-#define ADT75_FAULT_QUEUE_MASK 0x18
-#define ADT75_FAULT_QUEUE_OFFSET 3
-#define ADT75_SMBUS_ALART 0x8
-
-/*
- * ADT75 masks
- */
-#define ADT75_VALUE_SIGN 0x800
-#define ADT75_VALUE_OFFSET 4
-#define ADT75_VALUE_FLOAT_OFFSET 4
-#define ADT75_VALUE_FLOAT_MASK 0xF
-
-
-/*
- * struct adt75_chip_info - chip specifc information
- */
-
-struct adt75_chip_info {
- struct i2c_client *client;
- u8 config;
-};
-
-/*
- * adt75 register access by I2C
- */
-
-static int adt75_i2c_read(struct iio_dev *dev_info, u8 reg, u8 *data)
-{
- struct adt75_chip_info *chip = iio_priv(dev_info);
- struct i2c_client *client = chip->client;
- int ret = 0, len;
-
- ret = i2c_smbus_write_byte(client, reg);
- if (ret < 0) {
- dev_err(&client->dev, "I2C read register address error\n");
- return ret;
- }
-
- if (reg == ADT75_CONFIG || reg == ADT75_ONESHOT)
- len = 1;
- else
- len = 2;
-
- ret = i2c_master_recv(client, data, len);
- if (ret < 0) {
- dev_err(&client->dev, "I2C read error\n");
- return ret;
- }
-
- return ret;
-}
-
-static int adt75_i2c_write(struct iio_dev *dev_info, u8 reg, u8 data)
-{
- struct adt75_chip_info *chip = iio_priv(dev_info);
- struct i2c_client *client = chip->client;
- int ret = 0;
-
- if (reg == ADT75_CONFIG || reg == ADT75_ONESHOT)
- ret = i2c_smbus_write_byte_data(client, reg, data);
- else
- ret = i2c_smbus_write_word_data(client, reg, data);
-
- if (ret < 0)
- dev_err(&client->dev, "I2C write error\n");
-
- return ret;
-}
-
-static ssize_t adt75_show_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adt75_chip_info *chip = iio_priv(dev_get_drvdata(dev));
-
- if (chip->config & ADT75_PD)
- return sprintf(buf, "power-save\n");
- else
- return sprintf(buf, "full\n");
-}
-
-static ssize_t adt75_store_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- int ret;
- u8 config;
-
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- config = chip->config & ~ADT75_PD;
- if (!strcmp(buf, "full"))
- config |= ADT75_PD;
-
- ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
- if (ret)
- return -EIO;
-
- chip->config = config;
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- adt75_show_mode,
- adt75_store_mode,
- 0);
-
-static ssize_t adt75_show_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "full\npower-down\n");
-}
-
-static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt75_show_available_modes, NULL, 0);
-
-static ssize_t adt75_show_oneshot(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adt75_chip_info *chip = iio_priv(dev_get_drvdata(dev));
-
- return sprintf(buf, "%d\n", !!(chip->config & ADT75_ONESHOT));
-}
-
-static ssize_t adt75_store_oneshot(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- unsigned long data = 0;
- int ret;
- u8 config;
-
- ret = strict_strtoul(buf, 10, &data);
- if (ret)
- return -EINVAL;
-
-
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- config = chip->config & ~ADT75_ONESHOT;
- if (data)
- config |= ADT75_ONESHOT;
-
- ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
- if (ret)
- return -EIO;
-
- chip->config = config;
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(oneshot, S_IRUGO | S_IWUSR,
- adt75_show_oneshot,
- adt75_store_oneshot,
- 0);
-
-static ssize_t adt75_show_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- u16 data;
- char sign = ' ';
- int ret;
-
- if (chip->config & ADT75_PD) {
- dev_err(dev, "Can't read value in power-down mode.\n");
- return -EIO;
- }
-
- if (chip->config & ADT75_ONESHOT) {
- /* write to active converter */
- ret = i2c_smbus_write_byte(chip->client, ADT75_ONESHOT);
- if (ret)
- return -EIO;
- }
-
- ret = adt75_i2c_read(dev_info, ADT75_TEMPERATURE, (u8 *)&data);
- if (ret)
- return -EIO;
-
- data = swab16(data) >> ADT75_VALUE_OFFSET;
- if (data & ADT75_VALUE_SIGN) {
- /* convert supplement to positive value */
- data = (ADT75_VALUE_SIGN << 1) - data;
- sign = '-';
- }
-
- return sprintf(buf, "%c%d.%.4d\n", sign,
- (data >> ADT75_VALUE_FLOAT_OFFSET),
- (data & ADT75_VALUE_FLOAT_MASK) * 625);
-}
-
-static IIO_DEVICE_ATTR(value, S_IRUGO, adt75_show_value, NULL, 0);
-
-static struct attribute *adt75_attributes[] = {
- &iio_dev_attr_available_modes.dev_attr.attr,
- &iio_dev_attr_mode.dev_attr.attr,
- &iio_dev_attr_oneshot.dev_attr.attr,
- &iio_dev_attr_value.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group adt75_attribute_group = {
- .attrs = adt75_attributes,
-};
-
-/*
- * temperature bound events
- */
-
-#define IIO_EVENT_CODE_ADT75_OTI IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_TEMP, \
- 0, \
- IIO_EV_TYPE_THRESH, \
- IIO_EV_DIR_FALLING)
-
-static irqreturn_t adt75_event_handler(int irq, void *private)
-{
- iio_push_event(private, 0,
- IIO_EVENT_CODE_ADT75_OTI,
- iio_get_time_ns());
-
- return IRQ_HANDLED;
-}
-
-static ssize_t adt75_show_oti_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- int ret;
-
- /* retrive ALART status */
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- if (chip->config & ADT75_OS_INT)
- return sprintf(buf, "interrupt\n");
- else
- return sprintf(buf, "comparator\n");
-}
-
-static ssize_t adt75_set_oti_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- int ret;
- u8 config;
-
- /* retrive ALART status */
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- config = chip->config & ~ADT75_OS_INT;
- if (strcmp(buf, "comparator") != 0)
- config |= ADT75_OS_INT;
-
- ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
- if (ret)
- return -EIO;
-
- chip->config = config;
-
- return ret;
-}
-
-static ssize_t adt75_show_available_oti_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "comparator\ninterrupt\n");
-}
-
-static ssize_t adt75_show_smbus_alart(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- int ret;
-
- /* retrive ALART status */
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- return sprintf(buf, "%d\n", !!(chip->config & ADT75_SMBUS_ALART));
-}
-
-static ssize_t adt75_set_smbus_alart(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- unsigned long data = 0;
- int ret;
- u8 config;
-
- ret = strict_strtoul(buf, 10, &data);
- if (ret)
- return -EINVAL;
-
- /* retrive ALART status */
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- config = chip->config & ~ADT75_SMBUS_ALART;
- if (data)
- config |= ADT75_SMBUS_ALART;
-
- ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
- if (ret)
- return -EIO;
-
- chip->config = config;
-
- return ret;
-}
-
-static ssize_t adt75_show_fault_queue(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- int ret;
-
- /* retrive ALART status */
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- return sprintf(buf, "%d\n", (chip->config & ADT75_FAULT_QUEUE_MASK) >>
- ADT75_FAULT_QUEUE_OFFSET);
-}
-
-static ssize_t adt75_set_fault_queue(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = iio_priv(dev_info);
- unsigned long data;
- int ret;
- u8 config;
-
- ret = strict_strtoul(buf, 10, &data);
- if (ret || data > 3)
- return -EINVAL;
-
- /* retrive ALART status */
- ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
- if (ret)
- return -EIO;
-
- config = chip->config & ~ADT75_FAULT_QUEUE_MASK;
- config |= (data << ADT75_FAULT_QUEUE_OFFSET);
- ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
- if (ret)
- return -EIO;
-
- chip->config = config;
-
- return ret;
-}
-static inline ssize_t adt75_show_t_bound(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- u16 data;
- char sign = ' ';
- int ret;
-
- ret = adt75_i2c_read(dev_info, this_attr->address, (u8 *)&data);
- if (ret)
- return -EIO;
-
- data = swab16(data) >> ADT75_VALUE_OFFSET;
- if (data & ADT75_VALUE_SIGN) {
- /* convert supplement to positive value */
- data = (ADT75_VALUE_SIGN << 1) - data;
- sign = '-';
- }
-
- return sprintf(buf, "%c%d.%.4d\n", sign,
- (data >> ADT75_VALUE_FLOAT_OFFSET),
- (data & ADT75_VALUE_FLOAT_MASK) * 625);
-}
-
-static inline ssize_t adt75_set_t_bound(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- long tmp1, tmp2;
- u16 data;
- char *pos;
- int ret;
-
- pos = strchr(buf, '.');
-
- ret = strict_strtol(buf, 10, &tmp1);
-
- if (ret || tmp1 > 127 || tmp1 < -128)
- return -EINVAL;
-
- if (pos) {
- len = strlen(pos);
- if (len > ADT75_VALUE_FLOAT_OFFSET)
- len = ADT75_VALUE_FLOAT_OFFSET;
- pos[len] = 0;
- ret = strict_strtol(pos, 10, &tmp2);
-
- if (!ret)
- tmp2 = (tmp2 / 625) * 625;
- }
-
- if (tmp1 < 0)
- data = (u16)(-tmp1);
- else
- data = (u16)tmp1;
- data = (data << ADT75_VALUE_FLOAT_OFFSET) | (tmp2 & ADT75_VALUE_FLOAT_MASK);
- if (tmp1 < 0)
- /* convert positive value to supplyment */
- data = (ADT75_VALUE_SIGN << 1) - data;
- data <<= ADT75_VALUE_OFFSET;
- data = swab16(data);
-
- ret = adt75_i2c_write(dev_info, this_attr->address, (u8)data);
- if (ret)
- return -EIO;
-
- return ret;
-}
-
-
-static IIO_DEVICE_ATTR(oti_mode,
- S_IRUGO | S_IWUSR,
- adt75_show_oti_mode, adt75_set_oti_mode, 0);
-static IIO_DEVICE_ATTR(available_oti_modes,
- S_IRUGO,
- adt75_show_available_oti_modes, NULL, 0);
-static IIO_DEVICE_ATTR(smbus_alart,
- S_IRUGO | S_IWUSR,
- adt75_show_smbus_alart, adt75_set_smbus_alart, 0);
-static IIO_DEVICE_ATTR(fault_queue,
- S_IRUGO | S_IWUSR,
- adt75_show_fault_queue, adt75_set_fault_queue, 0);
-static IIO_DEVICE_ATTR(t_os_value,
- S_IRUGO | S_IWUSR,
- adt75_show_t_bound, adt75_set_t_bound,
- ADT75_T_OS);
-static IIO_DEVICE_ATTR(t_hyst_value,
- S_IRUGO | S_IWUSR,
- adt75_show_t_bound, adt75_set_t_bound,
- ADT75_T_HYST);
-
-static struct attribute *adt75_event_attributes[] = {
- &iio_dev_attr_oti_mode.dev_attr.attr,
- &iio_dev_attr_available_oti_modes.dev_attr.attr,
- &iio_dev_attr_smbus_alart.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
- &iio_dev_attr_t_os_value.dev_attr.attr,
- &iio_dev_attr_t_hyst_value.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group adt75_event_attribute_group = {
- .attrs = adt75_event_attributes,
-};
-
-static const struct iio_info adt75_info = {
- .attrs = &adt75_attribute_group,
- .num_interrupt_lines = 1,
- .event_attrs = &adt75_event_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-/*
- * device probe and remove
- */
-
-static int __devinit adt75_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct adt75_chip_info *chip;
- struct iio_dev *indio_dev;
- int ret = 0;
-
- indio_dev = iio_allocate_device(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- chip = iio_priv(indio_dev);
-
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
-
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &adt75_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
- if (client->irq > 0) {
- ret = request_threaded_irq(client->irq,
- NULL,
- &adt75_event_handler,
- IRQF_TRIGGER_LOW,
- indio_dev->name,
- indio_dev);
- if (ret)
- goto error_unreg_dev;
-
- ret = adt75_i2c_read(indio_dev, ADT75_CONFIG, &chip->config);
- if (ret) {
- ret = -EIO;
- goto error_unreg_irq;
- }
-
- /* set irq polarity low level */
- chip->config &= ~ADT75_OS_POLARITY;
-
- ret = adt75_i2c_write(indio_dev, ADT75_CONFIG, chip->config);
- if (ret) {
- ret = -EIO;
- goto error_unreg_irq;
- }
- }
-
- dev_info(&client->dev, "%s temperature sensor registered.\n",
- indio_dev->name);
-
- return 0;
-error_unreg_irq:
- free_irq(client->irq, indio_dev);
-error_unreg_dev:
- iio_device_unregister(indio_dev);
-error_free_dev:
- iio_free_device(indio_dev);
-error_ret:
- return ret;
-}
-
-static int __devexit adt75_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- if (client->irq)
- free_irq(client->irq, indio_dev);
- iio_device_unregister(indio_dev);
- iio_free_device(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id adt75_id[] = {
- { "adt75", 0 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, adt75_id);
-
-static struct i2c_driver adt75_driver = {
- .driver = {
- .name = "adt75",
- },
- .probe = adt75_probe,
- .remove = __devexit_p(adt75_remove),
- .id_table = adt75_id,
-};
-
-static __init int adt75_init(void)
-{
- return i2c_add_driver(&adt75_driver);
-}
-
-static __exit void adt75_exit(void)
-{
- i2c_del_driver(&adt75_driver);
-}
-
-MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("Analog Devices ADT75 digital"
- " temperature sensor driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(adt75_init);
-module_exit(adt75_exit);
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
index 360bfc5398f..cbcb08a2cb5 100644
--- a/drivers/staging/iio/adc/max1363.h
+++ b/drivers/staging/iio/adc/max1363.h
@@ -57,6 +57,7 @@
#define MAX1363_SCAN_MASK 0x60
#define MAX1363_SE_DE_MASK 0x01
+#define MAX1363_MAX_CHANNELS 25
/**
* struct max1363_mode - scan mode information
* @conf: The corresponding value of the configuration register
@@ -64,7 +65,7 @@
*/
struct max1363_mode {
int8_t conf;
- long modemask;
+ DECLARE_BITMAP(modemask, MAX1363_MAX_CHANNELS);
};
/* This must be maintained along side the max1363_mode_table in max1363_core */
@@ -145,13 +146,14 @@ struct max1363_state {
};
const struct max1363_mode
-*max1363_match_mode(u32 mask, const struct max1363_chip_info *ci);
+*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci);
int max1363_set_scan_mode(struct max1363_state *st);
#ifdef CONFIG_MAX1363_RING_BUFFER
-int max1363_single_channel_from_ring(long mask, struct max1363_state *st);
+int max1363_single_channel_from_ring(const long *mask,
+ struct max1363_state *st);
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void max1363_ring_cleanup(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 72b0917412e..eb699ade34b 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -30,26 +30,26 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../buffer_generic.h"
-#include "../ring_generic.h"
-#include "adc.h"
#include "max1363.h"
#define MAX1363_MODE_SINGLE(_num, _mask) { \
.conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1363_CONFIG_SCAN_SINGLE_1 \
| MAX1363_CONFIG_SE, \
- .modemask = _mask, \
+ .modemask[0] = _mask, \
}
#define MAX1363_MODE_SCAN_TO_CHANNEL(_num, _mask) { \
.conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1363_CONFIG_SCAN_TO_CS \
| MAX1363_CONFIG_SE, \
- .modemask = _mask, \
+ .modemask[0] = _mask, \
}
/* note not available for max1363 hence naming */
@@ -57,14 +57,14 @@
.conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1236_SCAN_MID_TO_CHANNEL \
| MAX1363_CONFIG_SE, \
- .modemask = _mask \
+ .modemask[0] = _mask \
}
#define MAX1363_MODE_DIFF_SINGLE(_nump, _numm, _mask) { \
.conf = MAX1363_CHANNEL_SEL(_nump) \
| MAX1363_CONFIG_SCAN_SINGLE_1 \
| MAX1363_CONFIG_DE, \
- .modemask = _mask \
+ .modemask[0] = _mask \
}
/* Can't think how to automate naming so specify for now */
@@ -72,7 +72,7 @@
.conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1363_CONFIG_SCAN_TO_CS \
| MAX1363_CONFIG_DE, \
- .modemask = _mask \
+ .modemask[0] = _mask \
}
/* note only available for max1363 hence naming */
@@ -80,7 +80,7 @@
.conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1236_SCAN_MID_TO_CHANNEL \
| MAX1363_CONFIG_SE, \
- .modemask = _mask \
+ .modemask[0] = _mask \
}
static const struct max1363_mode max1363_mode_table[] = {
@@ -147,13 +147,15 @@ static const struct max1363_mode max1363_mode_table[] = {
};
const struct max1363_mode
-*max1363_match_mode(u32 mask, const struct max1363_chip_info *ci)
+*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci)
{
int i;
if (mask)
for (i = 0; i < ci->num_modes; i++)
- if (!((~max1363_mode_table[ci->mode_list[i]].modemask) &
- mask))
+ if (bitmap_subset(mask,
+ max1363_mode_table[ci->mode_list[i]].
+ modemask,
+ MAX1363_MAX_CHANNELS))
return &max1363_mode_table[ci->mode_list[i]];
return NULL;
}
@@ -187,7 +189,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int ret = 0;
s32 data;
char rxbuf[2];
- long mask;
+ const unsigned long *mask;
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
@@ -203,7 +205,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
}
/* If ring buffer capture is occurring, query the buffer */
- if (iio_ring_enabled(indio_dev)) {
+ if (iio_buffer_enabled(indio_dev)) {
mask = max1363_mode_table[chan->address].modemask;
data = max1363_single_channel_from_ring(mask, st);
if (data < 0) {
@@ -255,7 +257,7 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
switch (m) {
case 0:
ret = max1363_read_single_chan(indio_dev, chan, val, m);
- if (ret)
+ if (ret < 0)
return ret;
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
@@ -287,72 +289,52 @@ static const enum max1363_modes max1363_mode_list[] = {
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) \
| IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
#define MAX1363_INFO_MASK (1 << IIO_CHAN_INFO_SCALE_SHARED)
+#define MAX1363_CHAN_U(num, addr, si, bits, evmask) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = num, \
+ .address = addr, \
+ .info_mask = MAX1363_INFO_MASK, \
+ .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .scan_index = si, \
+ .event_mask = evmask, \
+ }
-static struct iio_chan_spec max1363_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0, MAX1363_INFO_MASK,
- _s0, 0, IIO_ST('u', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0, MAX1363_INFO_MASK,
- _s1, 1, IIO_ST('u', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0, MAX1363_INFO_MASK,
- _s2, 2, IIO_ST('u', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0, MAX1363_INFO_MASK,
- _s3, 3, IIO_ST('u', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 0, 1, MAX1363_INFO_MASK,
- d0m1, 4, IIO_ST('s', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 2, 3, MAX1363_INFO_MASK,
- d2m3, 5, IIO_ST('s', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 1, 0, MAX1363_INFO_MASK,
- d1m0, 6, IIO_ST('s', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 3, 2, MAX1363_INFO_MASK,
- d3m2, 7, IIO_ST('s', 12, 16, 0), MAX1363_EV_M),
- IIO_CHAN_SOFT_TIMESTAMP(8)
-};
-
-static struct iio_chan_spec max1361_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0, MAX1363_INFO_MASK,
- _s0, 0, IIO_ST('u', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0, MAX1363_INFO_MASK,
- _s1, 1, IIO_ST('u', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0, MAX1363_INFO_MASK,
- _s2, 2, IIO_ST('u', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0, MAX1363_INFO_MASK,
- _s3, 3, IIO_ST('u', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 0, 1, MAX1363_INFO_MASK,
- d0m1, 4, IIO_ST('s', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 2, 3, MAX1363_INFO_MASK,
- d2m3, 5, IIO_ST('s', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 1, 0, MAX1363_INFO_MASK,
- d1m0, 6, IIO_ST('s', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 3, 2, MAX1363_INFO_MASK,
- d3m2, 7, IIO_ST('s', 10, 16, 0), MAX1363_EV_M),
- IIO_CHAN_SOFT_TIMESTAMP(8)
-};
-
-#define MAX1363_CHAN_U(num, address, scan_index, bits) \
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, num, 0, MAX1363_INFO_MASK, \
- address, scan_index, IIO_ST('u', bits, \
- (bits == 8) ? 8 : 16, 0), 0)
/* bipolar channel */
-#define MAX1363_CHAN_B(num, num2, address, scan_index, bits) \
- IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, num, num2, MAX1363_INFO_MASK,\
- address, scan_index, IIO_ST('s', bits, \
- (bits == 8) ? 8 : 16, 0), 0)
-
-#define MAX1363_4X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits), \
- MAX1363_CHAN_U(1, _s1, 1, bits), \
- MAX1363_CHAN_U(2, _s2, 2, bits), \
- MAX1363_CHAN_U(3, _s3, 3, bits), \
- MAX1363_CHAN_B(0, 1, d0m1, 4, bits), \
- MAX1363_CHAN_B(2, 3, d2m3, 5, bits), \
- MAX1363_CHAN_B(1, 0, d1m0, 6, bits), \
- MAX1363_CHAN_B(3, 2, d3m2, 7, bits), \
- IIO_CHAN_SOFT_TIMESTAMP(8) \
+#define MAX1363_CHAN_B(num, num2, addr, si, bits, evmask) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .channel = num, \
+ .channel2 = num2, \
+ .address = addr, \
+ .info_mask = MAX1363_INFO_MASK, \
+ .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .scan_index = si, \
+ .event_mask = evmask, \
}
-static struct iio_chan_spec max1036_channels[] = MAX1363_4X_CHANS(8);
-static struct iio_chan_spec max1136_channels[] = MAX1363_4X_CHANS(10);
-static struct iio_chan_spec max1236_channels[] = MAX1363_4X_CHANS(12);
+#define MAX1363_4X_CHANS(bits, em) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, em), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, em), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, em), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, em), \
+ MAX1363_CHAN_B(0, 1, d0m1, 4, bits, em), \
+ MAX1363_CHAN_B(2, 3, d2m3, 5, bits, em), \
+ MAX1363_CHAN_B(1, 0, d1m0, 6, bits, em), \
+ MAX1363_CHAN_B(3, 2, d3m2, 7, bits, em), \
+ IIO_CHAN_SOFT_TIMESTAMP(8) \
+ }
+
+static struct iio_chan_spec max1036_channels[] = MAX1363_4X_CHANS(8, 0);
+static struct iio_chan_spec max1136_channels[] = MAX1363_4X_CHANS(10, 0);
+static struct iio_chan_spec max1236_channels[] = MAX1363_4X_CHANS(12, 0);
+static struct iio_chan_spec max1361_channels[] =
+ MAX1363_4X_CHANS(10, MAX1363_EV_M);
+static struct iio_chan_spec max1363_channels[] =
+ MAX1363_4X_CHANS(12, MAX1363_EV_M);
/* Appies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
@@ -377,30 +359,30 @@ static const enum max1363_modes max1238_mode_list[] = {
};
#define MAX1363_12X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits), \
- MAX1363_CHAN_U(1, _s1, 1, bits), \
- MAX1363_CHAN_U(2, _s2, 2, bits), \
- MAX1363_CHAN_U(3, _s3, 3, bits), \
- MAX1363_CHAN_U(4, _s4, 4, bits), \
- MAX1363_CHAN_U(5, _s5, 5, bits), \
- MAX1363_CHAN_U(6, _s6, 6, bits), \
- MAX1363_CHAN_U(7, _s7, 7, bits), \
- MAX1363_CHAN_U(8, _s8, 8, bits), \
- MAX1363_CHAN_U(9, _s9, 9, bits), \
- MAX1363_CHAN_U(10, _s10, 10, bits), \
- MAX1363_CHAN_U(11, _s11, 11, bits), \
- MAX1363_CHAN_B(0, 1, d0m1, 12, bits), \
- MAX1363_CHAN_B(2, 3, d2m3, 13, bits), \
- MAX1363_CHAN_B(4, 5, d4m5, 14, bits), \
- MAX1363_CHAN_B(6, 7, d6m7, 15, bits), \
- MAX1363_CHAN_B(8, 9, d8m9, 16, bits), \
- MAX1363_CHAN_B(10, 11, d10m11, 17, bits), \
- MAX1363_CHAN_B(1, 0, d1m0, 18, bits), \
- MAX1363_CHAN_B(3, 2, d3m2, 19, bits), \
- MAX1363_CHAN_B(5, 4, d5m4, 20, bits), \
- MAX1363_CHAN_B(7, 6, d7m6, 21, bits), \
- MAX1363_CHAN_B(9, 8, d9m8, 22, bits), \
- MAX1363_CHAN_B(11, 10, d11m10, 23, bits), \
+ MAX1363_CHAN_U(0, _s0, 0, bits, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, 0), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, 0), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, 0), \
+ MAX1363_CHAN_U(4, _s4, 4, bits, 0), \
+ MAX1363_CHAN_U(5, _s5, 5, bits, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, 0), \
+ MAX1363_CHAN_U(8, _s8, 8, bits, 0), \
+ MAX1363_CHAN_U(9, _s9, 9, bits, 0), \
+ MAX1363_CHAN_U(10, _s10, 10, bits, 0), \
+ MAX1363_CHAN_U(11, _s11, 11, bits, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 14, bits, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 15, bits, 0), \
+ MAX1363_CHAN_B(8, 9, d8m9, 16, bits, 0), \
+ MAX1363_CHAN_B(10, 11, d10m11, 17, bits, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 20, bits, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 21, bits, 0), \
+ MAX1363_CHAN_B(9, 8, d9m8, 22, bits, 0), \
+ MAX1363_CHAN_B(11, 10, d11m10, 23, bits, 0), \
IIO_CHAN_SOFT_TIMESTAMP(24) \
}
static struct iio_chan_spec max1038_channels[] = MAX1363_12X_CHANS(8);
@@ -425,25 +407,25 @@ static const enum max1363_modes max11608_mode_list[] = {
d1m0to3m2, d1m0to5m4, d1m0to7m6,
};
-#define MAX1363_8X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits), \
- MAX1363_CHAN_U(1, _s1, 1, bits), \
- MAX1363_CHAN_U(2, _s2, 2, bits), \
- MAX1363_CHAN_U(3, _s3, 3, bits), \
- MAX1363_CHAN_U(4, _s4, 4, bits), \
- MAX1363_CHAN_U(5, _s5, 5, bits), \
- MAX1363_CHAN_U(6, _s6, 6, bits), \
- MAX1363_CHAN_U(7, _s7, 7, bits), \
- MAX1363_CHAN_B(0, 1, d0m1, 8, bits), \
- MAX1363_CHAN_B(2, 3, d2m3, 9, bits), \
- MAX1363_CHAN_B(4, 5, d4m5, 10, bits), \
- MAX1363_CHAN_B(6, 7, d6m7, 11, bits), \
- MAX1363_CHAN_B(1, 0, d1m0, 12, bits), \
- MAX1363_CHAN_B(3, 2, d3m2, 13, bits), \
- MAX1363_CHAN_B(5, 4, d5m4, 14, bits), \
- MAX1363_CHAN_B(7, 6, d7m6, 15, bits), \
- IIO_CHAN_SOFT_TIMESTAMP(16) \
- }
+#define MAX1363_8X_CHANS(bits) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, 0), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, 0), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, 0), \
+ MAX1363_CHAN_U(4, _s4, 4, bits, 0), \
+ MAX1363_CHAN_U(5, _s5, 5, bits, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 8, bits, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 9, bits, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 10, bits, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 11, bits, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 12, bits, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 13, bits, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 14, bits, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 15, bits, 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(16) \
+}
static struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
static struct iio_chan_spec max11608_channels[] = MAX1363_8X_CHANS(10);
static struct iio_chan_spec max11614_channels[] = MAX1363_8X_CHANS(12);
@@ -453,10 +435,10 @@ static const enum max1363_modes max11644_mode_list[] = {
};
#define MAX1363_2X_CHANS(bits) { \
- MAX1363_CHAN_U(0, _s0, 0, bits), \
- MAX1363_CHAN_U(1, _s1, 1, bits), \
- MAX1363_CHAN_B(0, 1, d0m1, 2, bits), \
- MAX1363_CHAN_B(1, 0, d1m0, 3, bits), \
+ MAX1363_CHAN_U(0, _s0, 0, bits, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 2, bits, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 3, bits, 0), \
IIO_CHAN_SOFT_TIMESTAMP(4) \
}
@@ -551,7 +533,7 @@ static IIO_CONST_ATTR(sampling_frequency_available,
"133000 665000 33300 16600 8300 4200 2000 1000");
static int max1363_read_thresh(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int *val)
{
struct max1363_state *st = iio_priv(indio_dev);
@@ -563,7 +545,7 @@ static int max1363_read_thresh(struct iio_dev *indio_dev,
}
static int max1363_write_thresh(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int val)
{
struct max1363_state *st = iio_priv(indio_dev);
@@ -591,11 +573,23 @@ static int max1363_write_thresh(struct iio_dev *indio_dev,
return 0;
}
-static const int max1363_event_codes[] = {
- IIO_EVENT_CODE_IN_LOW_THRESH(3), IIO_EVENT_CODE_IN_HIGH_THRESH(3),
- IIO_EVENT_CODE_IN_LOW_THRESH(2), IIO_EVENT_CODE_IN_HIGH_THRESH(2),
- IIO_EVENT_CODE_IN_LOW_THRESH(1), IIO_EVENT_CODE_IN_HIGH_THRESH(1),
- IIO_EVENT_CODE_IN_LOW_THRESH(0), IIO_EVENT_CODE_IN_HIGH_THRESH(0)
+static const u64 max1363_event_codes[] = {
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
};
static irqreturn_t max1363_event_handler(int irq, void *private)
@@ -611,15 +605,14 @@ static irqreturn_t max1363_event_handler(int irq, void *private)
i2c_master_recv(st->client, &rx, 1);
mask = rx;
for_each_set_bit(loc, &mask, 8)
- iio_push_event(indio_dev, 0, max1363_event_codes[loc],
- timestamp);
+ iio_push_event(indio_dev, max1363_event_codes[loc], timestamp);
i2c_master_send(st->client, tx, 2);
return IRQ_HANDLED;
}
static int max1363_read_event_config(struct iio_dev *indio_dev,
- int event_code)
+ u64 event_code)
{
struct max1363_state *st = iio_priv(indio_dev);
@@ -641,7 +634,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
int ret, i = 3, j;
unsigned long numelements;
int len;
- long modemask;
+ const long *modemask;
if (!enabled) {
/* transition to ring capture is not currently supported */
@@ -669,7 +662,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
st->configbyte |= max1363_mode_table[d1m0to3m2].conf;
modemask = max1363_mode_table[d1m0to3m2].modemask;
}
- numelements = hweight_long(modemask);
+ numelements = bitmap_weight(modemask, MAX1363_MAX_CHANNELS);
len = 3 * numelements + 3;
tx_buf = kmalloc(len, GFP_KERNEL);
if (!tx_buf) {
@@ -685,7 +678,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
* setup to match what we need.
*/
for (j = 0; j < 8; j++)
- if (modemask & (1 << j)) {
+ if (test_bit(j, modemask)) {
/* Establish the mode is in the scan */
if (st->mask_low & (1 << j)) {
tx_buf[i] = (st->thresh_low[j] >> 4) & 0xFF;
@@ -771,7 +764,7 @@ error_ret:
}
static int max1363_write_event_config(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int state)
{
int ret = 0;
@@ -823,6 +816,7 @@ static struct attribute *max1363_event_attributes[] = {
static struct attribute_group max1363_event_attribute_group = {
.attrs = max1363_event_attributes,
+ .name = "events",
};
#define MAX1363_EVENT_FUNCS \
@@ -840,7 +834,6 @@ static const struct iio_info max1363_info = {
.write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
.driver_module = THIS_MODULE,
- .num_interrupt_lines = 1,
.event_attrs = &max1363_event_attribute_group,
};
@@ -1249,7 +1242,7 @@ static int max1363_initial_setup(struct max1363_state *st)
static int __devinit max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret, i, regdone = 0;
+ int ret, i;
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *reg;
@@ -1278,7 +1271,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
st->client = client;
indio_dev->available_scan_masks
- = kzalloc(sizeof(*indio_dev->available_scan_masks)*
+ = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
(st->chip_info->num_modes + 1), GFP_KERNEL);
if (!indio_dev->available_scan_masks) {
ret = -ENOMEM;
@@ -1286,15 +1279,19 @@ static int __devinit max1363_probe(struct i2c_client *client,
}
for (i = 0; i < st->chip_info->num_modes; i++)
- indio_dev->available_scan_masks[i] =
- max1363_mode_table[st->chip_info->mode_list[i]]
- .modemask;
+ bitmap_copy(indio_dev->available_scan_masks +
+ BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
+ max1363_mode_table[st->chip_info->mode_list[i]]
+ .modemask, MAX1363_MAX_CHANNELS);
/* Estabilish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
-
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->info = st->chip_info->info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
ret = max1363_initial_setup(st);
if (ret < 0)
goto error_free_available_scan_masks;
@@ -1303,13 +1300,9 @@ static int __devinit max1363_probe(struct i2c_client *client,
if (ret)
goto error_free_available_scan_masks;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_ring;
- regdone = 1;
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- st->chip_info->channels,
- st->chip_info->num_channels);
+ ret = iio_buffer_register(indio_dev,
+ st->chip_info->channels,
+ st->chip_info->num_channels);
if (ret)
goto error_cleanup_ring;
@@ -1325,19 +1318,21 @@ static int __devinit max1363_probe(struct i2c_client *client,
goto error_uninit_ring;
}
- return 0;
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_free_irq;
+ return 0;
+error_free_irq:
+ free_irq(st->client->irq, indio_dev);
error_uninit_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_cleanup_ring:
max1363_ring_cleanup(indio_dev);
error_free_available_scan_masks:
kfree(indio_dev->available_scan_masks);
error_free_device:
- if (!regdone)
- iio_free_device(indio_dev);
- else
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg:
regulator_disable(reg);
error_put_reg:
@@ -1352,16 +1347,17 @@ static int max1363_remove(struct i2c_client *client)
struct max1363_state *st = iio_priv(indio_dev);
struct regulator *reg = st->reg;
+ iio_device_unregister(indio_dev);
if (client->irq)
free_irq(st->client->irq, indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
max1363_ring_cleanup(indio_dev);
kfree(indio_dev->available_scan_masks);
if (!IS_ERR(reg)) {
regulator_disable(reg);
regulator_put(reg);
}
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index f43befd1f77..df6893e058c 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -9,28 +9,26 @@
*/
#include <linux/interrupt.h>
-#include <linux/device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/bitops.h>
#include "../iio.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "../ring_sw.h"
-#include "../trigger.h"
-#include "../sysfs.h"
+#include "../trigger_consumer.h"
#include "max1363.h"
-int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
+int max1363_single_channel_from_ring(const long *mask, struct max1363_state *st)
{
- struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring;
- int count = 0, ret;
+ struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
+ int count = 0, ret, index;
u8 *ring_data;
- if (!(st->current_mode->modemask & mask)) {
+ index = find_first_bit(mask, MAX1363_MAX_CHANNELS);
+
+ if (!(test_bit(index, st->current_mode->modemask))) {
ret = -EBUSY;
goto error_ret;
}
@@ -45,12 +43,8 @@ int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
if (ret)
goto error_free_ring_data;
/* Need a count of channels prior to this one */
- mask >>= 1;
- while (mask) {
- if (mask & st->current_mode->modemask)
- count++;
- mask >>= 1;
- }
+
+ count = bitmap_weight(mask, index - 1);
if (st->chip_info->bits != 8)
ret = ((int)(ring_data[count*2 + 0] & 0x0F) << 8)
+ (int)(ring_data[count*2 + 1]);
@@ -74,7 +68,7 @@ error_ret:
static int max1363_ring_preenable(struct iio_dev *indio_dev)
{
struct max1363_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
size_t d_size = 0;
unsigned long numvals;
@@ -89,7 +83,8 @@ static int max1363_ring_preenable(struct iio_dev *indio_dev)
max1363_set_scan_mode(st);
- numvals = hweight_long(st->current_mode->modemask);
+ numvals = bitmap_weight(st->current_mode->modemask,
+ indio_dev->masklength);
if (ring->access->set_bytes_per_datum) {
if (ring->scan_timestamp)
d_size += sizeof(s64);
@@ -108,13 +103,14 @@ static int max1363_ring_preenable(struct iio_dev *indio_dev)
static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct max1363_state *st = iio_priv(indio_dev);
s64 time_ns;
__u8 *rxbuf;
int b_sent;
size_t d_size;
- unsigned long numvals = hweight_long(st->current_mode->modemask);
+ unsigned long numvals = bitmap_weight(st->current_mode->modemask,
+ MAX1363_MAX_CHANNELS);
/* Ensure the timestamp is 8 byte aligned */
if (st->chip_info->bits != 8)
@@ -145,7 +141,7 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
- indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns);
+ indio_dev->buffer->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
kfree(rxbuf);
@@ -153,10 +149,10 @@ done:
return IRQ_HANDLED;
}
-static const struct iio_ring_setup_ops max1363_ring_setup_ops = {
- .postenable = &iio_triggered_ring_postenable,
+static const struct iio_buffer_setup_ops max1363_ring_setup_ops = {
+ .postenable = &iio_triggered_buffer_postenable,
.preenable = &max1363_ring_preenable,
- .predisable = &iio_triggered_ring_predisable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
@@ -164,8 +160,8 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct max1363_state *st = iio_priv(indio_dev);
int ret = 0;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
goto error_ret;
}
@@ -181,17 +177,17 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_deallocate_sw_rb;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->setup_ops = &max1363_ring_setup_ops;
+ indio_dev->buffer->setup_ops = &max1363_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
error_ret:
return ret;
}
@@ -199,11 +195,6 @@ error_ret:
void max1363_ring_cleanup(struct iio_dev *indio_dev)
{
/* ensure that the trigger has been detached */
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index 9847baf0270..698a8970b37 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -1,10 +1,11 @@
#
# ADDAC drivers
#
-comment "Analog digital bi-direction convertors"
+menu "Analog digital bi-direction converters"
config ADT7316
tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver"
+ depends on GENERIC_GPIO
help
Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
@@ -23,3 +24,5 @@ config ADT7316_I2C
help
Say yes here to build I2C bus support for Analog Devices ADT7316/7/8
and ADT7516/7/9.
+
+endmenu
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 52d1ea34963..07d718e8fb4 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include "adt7316.h"
@@ -109,7 +110,7 @@ static int __devinit adt7316_i2c_probe(struct i2c_client *client,
static int __devexit adt7316_i2c_remove(struct i2c_client *client)
{
- return adt7316_remove(&client->dev);;
+ return adt7316_remove(&client->dev);
}
static const struct i2c_device_id adt7316_i2c_id[] = {
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 637316f79f7..8df24704ff2 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/rtc.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -203,7 +204,7 @@ struct adt7316_chip_info {
#define ADT7316_TEMP_INT_MASK 0x1F
#define ADT7516_AIN_INT_MASK 0xE0
#define ADT7316_TEMP_AIN_INT_MASK \
- (ADT7316_TEMP_INT_MASK | ADT7316_TEMP_INT_MASK)
+ (ADT7316_TEMP_INT_MASK)
/*
* struct adt7316_chip_info - chip specifc information
@@ -1776,44 +1777,44 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
time = iio_get_time_ns();
if (stat1 & (1 << 0))
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
time);
if (stat1 & (1 << 1))
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
time);
if (stat1 & (1 << 2))
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
time);
if (stat1 & (1 << 3))
- iio_push_event(indio_dev, 0,
+ iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
time);
if (stat1 & (1 << 5))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_IN, 1,
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
time);
if (stat1 & (1 << 6))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_IN, 2,
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
time);
if (stat1 & (1 << 7))
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_IN, 3,
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
time);
@@ -1821,8 +1822,8 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT2, &stat2);
if (!ret) {
if (stat2 & ADT7316_INT_MASK2_VDD)
- iio_push_event(indio_dev, 0,
- IIO_UNMOD_EVENT_CODE(IIO_IN,
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
@@ -2063,6 +2064,7 @@ static struct attribute *adt7316_event_attributes[] = {
static struct attribute_group adt7316_event_attribute_group = {
.attrs = adt7316_event_attributes,
+ .name = "events",
};
static struct attribute *adt7516_event_attributes[] = {
@@ -2083,6 +2085,7 @@ static struct attribute *adt7516_event_attributes[] = {
static struct attribute_group adt7516_event_attribute_group = {
.attrs = adt7516_event_attributes,
+ .name = "events",
};
#ifdef CONFIG_PM
@@ -2107,14 +2110,12 @@ EXPORT_SYMBOL(adt7316_enable);
static const struct iio_info adt7316_info = {
.attrs = &adt7316_attribute_group,
- .num_interrupt_lines = 1,
.event_attrs = &adt7316_event_attribute_group,
.driver_module = THIS_MODULE,
};
static const struct iio_info adt7516_info = {
.attrs = &adt7516_attribute_group,
- .num_interrupt_lines = 1,
.event_attrs = &adt7516_event_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -2166,10 +2167,6 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
if (chip->bus.irq > 0) {
if (adt7316_platform_data[0])
chip->bus.irq_flags = adt7316_platform_data[0];
@@ -2181,7 +2178,7 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
indio_dev->name,
indio_dev);
if (ret)
- goto error_unreg_dev;
+ goto error_free_dev;
if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH)
chip->config1 |= ADT7316_INT_POLARITY;
@@ -2199,6 +2196,10 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
goto error_unreg_irq;
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_irq;
+
dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n",
indio_dev->name);
@@ -2206,8 +2207,6 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
error_unreg_irq:
free_irq(chip->bus.irq, indio_dev);
-error_unreg_dev:
- iio_device_unregister(indio_dev);
error_free_dev:
iio_free_device(indio_dev);
error_ret:
@@ -2220,10 +2219,9 @@ int __devexit adt7316_remove(struct device *dev)
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adt7316_chip_info *chip = iio_priv(indio_dev);
- dev_set_drvdata(dev, NULL);
+ iio_device_unregister(indio_dev);
if (chip->bus.irq)
free_irq(chip->bus.irq, indio_dev);
- iio_device_unregister(indio_dev);
iio_free_device(indio_dev);
return 0;
diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
new file mode 100644
index 00000000000..9e8f0100997
--- /dev/null
+++ b/drivers/staging/iio/buffer_generic.h
@@ -0,0 +1,228 @@
+/* The industrial I/O core - generic buffer interfaces.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_BUFFER_GENERIC_H_
+#define _IIO_BUFFER_GENERIC_H_
+#include <linux/sysfs.h>
+#include "iio.h"
+#include "chrdev.h"
+
+#ifdef CONFIG_IIO_BUFFER
+
+struct iio_buffer;
+
+/**
+ * struct iio_buffer_access_funcs - access functions for buffers.
+ * @mark_in_use: reference counting, typically to prevent module removal
+ * @unmark_in_use: reduce reference count when no longer using buffer
+ * @store_to: actually store stuff to the buffer
+ * @read_last: get the last element stored
+ * @read_first_n: try to get a specified number of elements (must exist)
+ * @mark_param_change: notify buffer that some relevant parameter has changed
+ * Often this means the underlying storage may need to
+ * change.
+ * @request_update: if a parameter change has been marked, update underlying
+ * storage.
+ * @get_bytes_per_datum:get current bytes per datum
+ * @set_bytes_per_datum:set number of bytes per datum
+ * @get_length: get number of datums in buffer
+ * @set_length: set number of datums in buffer
+ * @is_enabled: query if buffer is currently being used
+ * @enable: enable the buffer
+ *
+ * The purpose of this structure is to make the buffer element
+ * modular as event for a given driver, different usecases may require
+ * different buffer designs (space efficiency vs speed for example).
+ *
+ * It is worth noting that a given buffer implementation may only support a
+ * small proportion of these functions. The core code 'should' cope fine with
+ * any of them not existing.
+ **/
+struct iio_buffer_access_funcs {
+ void (*mark_in_use)(struct iio_buffer *buffer);
+ void (*unmark_in_use)(struct iio_buffer *buffer);
+
+ int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp);
+ int (*read_last)(struct iio_buffer *buffer, u8 *data);
+ int (*read_first_n)(struct iio_buffer *buffer,
+ size_t n,
+ char __user *buf);
+
+ int (*mark_param_change)(struct iio_buffer *buffer);
+ int (*request_update)(struct iio_buffer *buffer);
+
+ int (*get_bytes_per_datum)(struct iio_buffer *buffer);
+ int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
+ int (*get_length)(struct iio_buffer *buffer);
+ int (*set_length)(struct iio_buffer *buffer, int length);
+
+ int (*is_enabled)(struct iio_buffer *buffer);
+ int (*enable)(struct iio_buffer *buffer);
+};
+
+/**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+ * @preenable: [DRIVER] function to run prior to marking buffer enabled
+ * @postenable: [DRIVER] function to run after marking buffer enabled
+ * @predisable: [DRIVER] function to run prior to marking buffer
+ * disabled
+ * @postdisable: [DRIVER] function to run after marking buffer disabled
+ */
+struct iio_buffer_setup_ops {
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
+};
+
+/**
+ * struct iio_buffer - general buffer structure
+ * @indio_dev: industrial I/O device structure
+ * @owner: module that owns the buffer (for ref counting)
+ * @length: [DEVICE] number of datums in buffer
+ * @bytes_per_datum: [DEVICE] size of individual datum including timestamp
+ * @bpe: [DEVICE] size of individual channel value
+ * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
+ * control method is used
+ * @scan_count: [INTERN] the number of elements in the current scan mode
+ * @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_timestamp: [INTERN] does the scan mode include a timestamp
+ * @access: [DRIVER] buffer access functions associated with the
+ * implementation.
+ * @flags: [INTERN] file ops related flags including busy flag.
+ **/
+struct iio_buffer {
+ struct iio_dev *indio_dev;
+ struct module *owner;
+ int length;
+ int bytes_per_datum;
+ int bpe;
+ struct attribute_group *scan_el_attrs;
+ int scan_count;
+ long *scan_mask;
+ bool scan_timestamp;
+ const struct iio_buffer_access_funcs *access;
+ const struct iio_buffer_setup_ops *setup_ops;
+ struct list_head scan_el_dev_attr_list;
+ struct attribute_group scan_el_group;
+ wait_queue_head_t pollq;
+ bool stufftoread;
+ unsigned long flags;
+ const struct attribute_group *attrs;
+};
+
+/**
+ * iio_buffer_init() - Initialize the buffer structure
+ * @buffer: buffer to be initialized
+ * @indio_dev: the iio device the buffer is assocated with
+ **/
+void iio_buffer_init(struct iio_buffer *buffer,
+ struct iio_dev *indio_dev);
+
+void iio_buffer_deinit(struct iio_buffer *buffer);
+
+/**
+ * __iio_update_buffer() - update common elements of buffers
+ * @buffer: buffer that is the event source
+ * @bytes_per_datum: size of individual datum including timestamp
+ * @length: number of datums in buffer
+ **/
+static inline void __iio_update_buffer(struct iio_buffer *buffer,
+ int bytes_per_datum, int length)
+{
+ buffer->bytes_per_datum = bytes_per_datum;
+ buffer->length = length;
+}
+
+int iio_scan_mask_query(struct iio_buffer *buffer, int bit);
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ **/
+int iio_scan_mask_set(struct iio_buffer *buffer, int bit);
+
+#define to_iio_buffer(d) \
+ container_of(d, struct iio_buffer, dev)
+
+/**
+ * iio_buffer_register() - register the buffer with IIO core
+ * @indio_dev: device with the buffer to be registered
+ **/
+int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels);
+
+/**
+ * iio_buffer_unregister() - unregister the buffer from IIO core
+ * @indio_dev: the device with the buffer to be unregistered
+ **/
+void iio_buffer_unregister(struct iio_dev *indio_dev);
+
+/**
+ * iio_buffer_read_length() - attr func to get number of datums in the buffer
+ **/
+ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+/**
+ * iio_buffer_write_length() - attr func to set number of datums in the buffer
+ **/
+ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len);
+/**
+ * iio_buffer_read_bytes_per_datum() - attr for number of bytes in whole datum
+ **/
+ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+/**
+ * iio_buffer_store_enable() - attr to turn the buffer on
+ **/
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len);
+/**
+ * iio_buffer_show_enable() - attr to see if the buffer is on
+ **/
+ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
+ iio_buffer_read_length, \
+ iio_buffer_write_length)
+#define IIO_BUFFER_BYTES_PER_DATUM_ATTR \
+ DEVICE_ATTR(bytes_per_datum, S_IRUGO | S_IWUSR, \
+ iio_buffer_read_bytes_per_datum, NULL)
+
+#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
+ iio_buffer_show_enable, \
+ iio_buffer_store_enable)
+
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev);
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline int iio_buffer_register(struct iio_dev *indio_dev,
+ struct iio_chan_spec *channels,
+ int num_channels)
+{
+ return 0;
+}
+
+static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
+{};
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/drivers/staging/iio/cdc/Kconfig b/drivers/staging/iio/cdc/Kconfig
new file mode 100644
index 00000000000..80211df8c57
--- /dev/null
+++ b/drivers/staging/iio/cdc/Kconfig
@@ -0,0 +1,36 @@
+#
+# CDC drivers
+#
+menu "Capacitance to digital converters"
+
+config AD7150
+ tristate "Analog Devices ad7150/1/6 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (ad7150, ad7151, ad7156) Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7150.
+
+config AD7152
+ tristate "Analog Devices ad7152/3 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (ad7152, ad7153) Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7152.
+
+config AD7746
+ tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (AD7745, AD7746, AD7747) Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7746.
+
+endmenu
diff --git a/drivers/staging/iio/cdc/Makefile b/drivers/staging/iio/cdc/Makefile
new file mode 100644
index 00000000000..a5fbabf5c8b
--- /dev/null
+++ b/drivers/staging/iio/cdc/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for industrial I/O DAC drivers
+#
+
+obj-$(CONFIG_AD7150) += ad7150.o
+obj-$(CONFIG_AD7152) += ad7152.o
+obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
new file mode 100644
index 00000000000..a761ca94a17
--- /dev/null
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -0,0 +1,676 @@
+/*
+ * AD7150 capacitive sensor driver supporting AD7150/1/6
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7150 registers definition
+ */
+
+#define AD7150_STATUS 0
+#define AD7150_STATUS_OUT1 (1 << 3)
+#define AD7150_STATUS_OUT2 (1 << 5)
+#define AD7150_CH1_DATA_HIGH 1
+#define AD7150_CH2_DATA_HIGH 3
+#define AD7150_CH1_AVG_HIGH 5
+#define AD7150_CH2_AVG_HIGH 7
+#define AD7150_CH1_SENSITIVITY 9
+#define AD7150_CH1_THR_HOLD_H 9
+#define AD7150_CH1_TIMEOUT 10
+#define AD7150_CH1_SETUP 11
+#define AD7150_CH2_SENSITIVITY 12
+#define AD7150_CH2_THR_HOLD_H 12
+#define AD7150_CH2_TIMEOUT 13
+#define AD7150_CH2_SETUP 14
+#define AD7150_CFG 15
+#define AD7150_CFG_FIX (1 << 7)
+#define AD7150_PD_TIMER 16
+#define AD7150_CH1_CAPDAC 17
+#define AD7150_CH2_CAPDAC 18
+#define AD7150_SN3 19
+#define AD7150_SN2 20
+#define AD7150_SN1 21
+#define AD7150_SN0 22
+#define AD7150_ID 23
+
+/**
+ * struct ad7150_chip_info - instance specific chip data
+ * @client: i2c client for this device
+ * @current_event: device always has one type of event enabled.
+ * This element stores the event code of the current one.
+ * @threshold: thresholds for simple capacitance value events
+ * @thresh_sensitivity: threshold for simple capacitance offset
+ * from 'average' value.
+ * @mag_sensitity: threshold for magnitude of capacitance offset from
+ * from 'average' value.
+ * @thresh_timeout: a timeout, in samples from the moment an
+ * adaptive threshold event occurs to when the average
+ * value jumps to current value.
+ * @mag_timeout: a timeout, in sample from the moment an
+ * adaptive magnitude event occurs to when the average
+ * value jumps to the current value.
+ * @old_state: store state from previous event, allowing confirmation
+ * of new condition.
+ * @conversion_mode: the current conversion mode.
+ * @state_lock: ensure consistent state of this structure wrt the
+ * hardware.
+ */
+struct ad7150_chip_info {
+ struct i2c_client *client;
+ u64 current_event;
+ u16 threshold[2][2];
+ u8 thresh_sensitivity[2][2];
+ u8 mag_sensitivity[2][2];
+ u8 thresh_timeout[2][2];
+ u8 mag_timeout[2][2];
+ int old_state;
+ char *conversion_mode;
+ struct mutex state_lock;
+};
+
+/*
+ * sysfs nodes
+ */
+
+static const u8 ad7150_addresses[][6] = {
+ { AD7150_CH1_DATA_HIGH, AD7150_CH1_AVG_HIGH,
+ AD7150_CH1_SETUP, AD7150_CH1_THR_HOLD_H,
+ AD7150_CH1_SENSITIVITY, AD7150_CH1_TIMEOUT },
+ { AD7150_CH2_DATA_HIGH, AD7150_CH2_AVG_HIGH,
+ AD7150_CH2_SETUP, AD7150_CH2_THR_HOLD_H,
+ AD7150_CH2_SENSITIVITY, AD7150_CH2_TIMEOUT },
+};
+
+static int ad7150_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case 0:
+ ret = i2c_smbus_read_word_data(chip->client,
+ ad7150_addresses[chan->channel][0]);
+ if (ret < 0)
+ return ret;
+ *val = swab16(ret);
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ ret = i2c_smbus_read_word_data(chip->client,
+ ad7150_addresses[chan->channel][1]);
+ if (ret < 0)
+ return ret;
+ *val = swab16(ret);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7150_read_event_config(struct iio_dev *indio_dev, u64 event_code)
+{
+ int ret;
+ u8 threshtype;
+ bool adaptive;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING);
+
+ ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
+ if (ret < 0)
+ return ret;
+
+ threshtype = (ret >> 5) & 0x03;
+ adaptive = !!(ret & 0x80);
+
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ if (rising)
+ return adaptive && (threshtype == 0x1);
+ else
+ return adaptive && (threshtype == 0x0);
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ if (rising)
+ return adaptive && (threshtype == 0x3);
+ else
+ return adaptive && (threshtype == 0x2);
+
+ case IIO_EV_TYPE_THRESH:
+ if (rising)
+ return !adaptive && (threshtype == 0x1);
+ else
+ return !adaptive && (threshtype == 0x0);
+ };
+ return -EINVAL;
+}
+
+/* lock should be held */
+static int ad7150_write_event_params(struct iio_dev *indio_dev, u64 event_code)
+{
+ int ret;
+ u16 value;
+ u8 sens, timeout;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING);
+
+ if (event_code != chip->current_event)
+ return 0;
+
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ /* Note completely different from the adaptive versions */
+ case IIO_EV_TYPE_THRESH:
+ value = chip->threshold[rising][chan];
+ ret = i2c_smbus_write_word_data(chip->client,
+ ad7150_addresses[chan][3],
+ swab16(value));
+ if (ret < 0)
+ return ret;
+ return 0;
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ sens = chip->mag_sensitivity[rising][chan];
+ timeout = chip->mag_timeout[rising][chan];
+ break;
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ sens = chip->thresh_sensitivity[rising][chan];
+ timeout = chip->thresh_timeout[rising][chan];
+ break;
+ default:
+ return -EINVAL;
+ };
+ ret = i2c_smbus_write_byte_data(chip->client,
+ ad7150_addresses[chan][4],
+ sens);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ ad7150_addresses[chan][5],
+ timeout);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ad7150_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code, int state)
+{
+ u8 thresh_type, cfg, adaptive;
+ int ret;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING);
+
+ /* Something must always be turned on */
+ if (state == 0)
+ return -EINVAL;
+
+ if (event_code == chip->current_event)
+ return 0;
+ mutex_lock(&chip->state_lock);
+ ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
+ if (ret < 0)
+ goto error_ret;
+
+ cfg = ret & ~((0x03 << 5) | (0x1 << 7));
+
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ adaptive = 1;
+ if (rising)
+ thresh_type = 0x1;
+ else
+ thresh_type = 0x0;
+ break;
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ adaptive = 1;
+ if (rising)
+ thresh_type = 0x3;
+ else
+ thresh_type = 0x2;
+ break;
+ case IIO_EV_TYPE_THRESH:
+ adaptive = 0;
+ if (rising)
+ thresh_type = 0x1;
+ else
+ thresh_type = 0x0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_ret;
+ };
+
+ cfg |= (!adaptive << 7) | (thresh_type << 5);
+
+ ret = i2c_smbus_write_byte_data(chip->client, AD7150_CFG, cfg);
+ if (ret < 0)
+ goto error_ret;
+
+ chip->current_event = event_code;
+
+ /* update control attributes */
+ ret = ad7150_write_event_params(indio_dev, event_code);
+error_ret:
+ mutex_unlock(&chip->state_lock);
+
+ return 0;
+}
+
+static int ad7150_read_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int *val)
+{
+ int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING);
+
+ /* Complex register sharing going on here */
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ *val = chip->mag_sensitivity[rising][chan];
+ return 0;
+
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ *val = chip->thresh_sensitivity[rising][chan];
+ return 0;
+
+ case IIO_EV_TYPE_THRESH:
+ *val = chip->threshold[rising][chan];
+ return 0;
+
+ default:
+ return -EINVAL;
+ };
+}
+
+static int ad7150_write_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int val)
+{
+ int ret;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING);
+
+ mutex_lock(&chip->state_lock);
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ chip->mag_sensitivity[rising][chan] = val;
+ break;
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ chip->thresh_sensitivity[rising][chan] = val;
+ break;
+ case IIO_EV_TYPE_THRESH:
+ chip->threshold[rising][chan] = val;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_ret;
+ };
+
+ /* write back if active */
+ ret = ad7150_write_event_params(indio_dev, event_code);
+
+error_ret:
+ mutex_unlock(&chip->state_lock);
+ return ret;
+}
+
+static ssize_t ad7150_show_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 value;
+
+ /* use the event code for consistency reasons */
+ int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address)
+ == IIO_EV_DIR_RISING);
+
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(this_attr->address)) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ value = chip->mag_timeout[rising][chan];
+ break;
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ value = chip->thresh_timeout[rising][chan];
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t ad7150_store_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address);
+ int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) ==
+ IIO_EV_DIR_RISING);
+ u8 data;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&chip->state_lock);
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(this_attr->address)) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ chip->mag_timeout[rising][chan] = data;
+ break;
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ chip->thresh_timeout[rising][chan] = data;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_ret;
+ };
+
+ ret = ad7150_write_event_params(indio_dev, this_attr->address);
+error_ret:
+ mutex_unlock(&chip->state_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+#define AD7150_TIMEOUT(chan, type, dir, ev_type, ev_dir) \
+ IIO_DEVICE_ATTR(in_capacitance##chan##_##type##_##dir##_timeout, \
+ S_IRUGO | S_IWUSR, \
+ &ad7150_show_timeout, \
+ &ad7150_store_timeout, \
+ IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE, \
+ chan, \
+ IIO_EV_TYPE_##ev_type, \
+ IIO_EV_DIR_##ev_dir))
+static AD7150_TIMEOUT(0, mag_adaptive, rising, MAG_ADAPTIVE, RISING);
+static AD7150_TIMEOUT(0, mag_adaptive, falling, MAG_ADAPTIVE, FALLING);
+static AD7150_TIMEOUT(1, mag_adaptive, rising, MAG_ADAPTIVE, RISING);
+static AD7150_TIMEOUT(1, mag_adaptive, falling, MAG_ADAPTIVE, FALLING);
+static AD7150_TIMEOUT(0, thresh_adaptive, rising, THRESH_ADAPTIVE, RISING);
+static AD7150_TIMEOUT(0, thresh_adaptive, falling, THRESH_ADAPTIVE, FALLING);
+static AD7150_TIMEOUT(1, thresh_adaptive, rising, THRESH_ADAPTIVE, RISING);
+static AD7150_TIMEOUT(1, thresh_adaptive, falling, THRESH_ADAPTIVE, FALLING);
+
+static const struct iio_chan_spec ad7150_channels[] = {
+ {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .event_mask =
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_FALLING) |
+ IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_FALLING)
+ }, {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .event_mask =
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_DIR_FALLING) |
+ IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_DIR_FALLING)
+ },
+};
+
+/*
+ * threshold events
+ */
+
+static irqreturn_t ad7150_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ u8 int_status;
+ s64 timestamp = iio_get_time_ns();
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, AD7150_STATUS);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ int_status = ret;
+
+ if ((int_status & AD7150_STATUS_OUT1) &&
+ !(chip->old_state & AD7150_STATUS_OUT1))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if ((!(int_status & AD7150_STATUS_OUT1)) &&
+ (chip->old_state & AD7150_STATUS_OUT1))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ if ((int_status & AD7150_STATUS_OUT2) &&
+ !(chip->old_state & AD7150_STATUS_OUT2))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if ((!(int_status & AD7150_STATUS_OUT2)) &&
+ (chip->old_state & AD7150_STATUS_OUT2))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+ /* store the status to avoid repushing same events */
+ chip->old_state = int_status;
+
+ return IRQ_HANDLED;
+}
+
+/* Timeouts not currently handled by core */
+static struct attribute *ad7150_event_attributes[] = {
+ &iio_dev_attr_in_capacitance0_mag_adaptive_rising_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_mag_adaptive_falling_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_mag_adaptive_rising_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_mag_adaptive_falling_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_thresh_adaptive_rising_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_thresh_adaptive_falling_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_thresh_adaptive_rising_timeout
+ .dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_thresh_adaptive_falling_timeout
+ .dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad7150_event_attribute_group = {
+ .attrs = ad7150_event_attributes,
+ .name = "events",
+};
+
+static const struct iio_info ad7150_info = {
+ .event_attrs = &ad7150_event_attribute_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7150_read_raw,
+ .read_event_config = &ad7150_read_event_config,
+ .write_event_config = &ad7150_write_event_config,
+ .read_event_value = &ad7150_read_event_value,
+ .write_event_value = &ad7150_write_event_value,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7150_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct ad7150_chip_info *chip;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
+ mutex_init(&chip->state_lock);
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->client = client;
+
+ indio_dev->name = id->name;
+ indio_dev->channels = ad7150_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7150_channels);
+ /* Establish that the iio_dev is a child of the i2c device */
+ indio_dev->dev.parent = &client->dev;
+
+ indio_dev->info = &ad7150_info;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ &ad7150_event_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "ad7150_irq1",
+ indio_dev);
+ if (ret)
+ goto error_free_dev;
+ }
+
+ if (client->dev.platform_data) {
+ ret = request_threaded_irq(*(unsigned int *)
+ client->dev.platform_data,
+ NULL,
+ &ad7150_event_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "ad7150_irq2",
+ indio_dev);
+ if (ret)
+ goto error_free_irq;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_irq2;
+
+ dev_info(&client->dev, "%s capacitive sensor registered,irq: %d\n",
+ id->name, client->irq);
+
+ return 0;
+error_free_irq2:
+ if (client->dev.platform_data)
+ free_irq(*(unsigned int *)client->dev.platform_data,
+ indio_dev);
+error_free_irq:
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+error_free_dev:
+ iio_free_device(indio_dev);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7150_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+
+ if (client->dev.platform_data)
+ free_irq(*(unsigned int *)client->dev.platform_data, indio_dev);
+
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7150_id[] = {
+ { "ad7150", 0 },
+ { "ad7151", 0 },
+ { "ad7156", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7150_id);
+
+static struct i2c_driver ad7150_driver = {
+ .driver = {
+ .name = "ad7150",
+ },
+ .probe = ad7150_probe,
+ .remove = __devexit_p(ad7150_remove),
+ .id_table = ad7150_id,
+};
+
+static __init int ad7150_init(void)
+{
+ return i2c_add_driver(&ad7150_driver);
+}
+
+static __exit void ad7150_exit(void)
+{
+ i2c_del_driver(&ad7150_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD7150/1/6 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7150_init);
+module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
new file mode 100644
index 00000000000..662584d72a7
--- /dev/null
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -0,0 +1,559 @@
+/*
+ * AD7152 capacitive sensor driver supporting AD7152/3
+ *
+ * Copyright 2010-2011a Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * TODO: Check compliance of calibbias with abi (units)
+ */
+/*
+ * AD7152 registers definition
+ */
+
+#define AD7152_REG_STATUS 0
+#define AD7152_REG_CH1_DATA_HIGH 1
+#define AD7152_REG_CH2_DATA_HIGH 3
+#define AD7152_REG_CH1_OFFS_HIGH 5
+#define AD7152_REG_CH2_OFFS_HIGH 7
+#define AD7152_REG_CH1_GAIN_HIGH 9
+#define AD7152_REG_CH1_SETUP 11
+#define AD7152_REG_CH2_GAIN_HIGH 12
+#define AD7152_REG_CH2_SETUP 14
+#define AD7152_REG_CFG 15
+#define AD7152_REG_RESEVERD 16
+#define AD7152_REG_CAPDAC_POS 17
+#define AD7152_REG_CAPDAC_NEG 18
+#define AD7152_REG_CFG2 26
+
+/* Status Register Bit Designations (AD7152_REG_STATUS) */
+#define AD7152_STATUS_RDY1 (1 << 0)
+#define AD7152_STATUS_RDY2 (1 << 1)
+#define AD7152_STATUS_C1C2 (1 << 2)
+#define AD7152_STATUS_PWDN (1 << 7)
+
+/* Setup Register Bit Designations (AD7152_REG_CHx_SETUP) */
+#define AD7152_SETUP_CAPDIFF (1 << 5)
+#define AD7152_SETUP_RANGE_2pF (0 << 6)
+#define AD7152_SETUP_RANGE_0_5pF (1 << 6)
+#define AD7152_SETUP_RANGE_1pF (2 << 6)
+#define AD7152_SETUP_RANGE_4pF (3 << 6)
+#define AD7152_SETUP_RANGE(x) ((x) << 6)
+
+/* Config Register Bit Designations (AD7152_REG_CFG) */
+#define AD7152_CONF_CH2EN (1 << 3)
+#define AD7152_CONF_CH1EN (1 << 4)
+#define AD7152_CONF_MODE_IDLE (0 << 0)
+#define AD7152_CONF_MODE_CONT_CONV (1 << 0)
+#define AD7152_CONF_MODE_SINGLE_CONV (2 << 0)
+#define AD7152_CONF_MODE_OFFS_CAL (5 << 0)
+#define AD7152_CONF_MODE_GAIN_CAL (6 << 0)
+
+/* Capdac Register Bit Designations (AD7152_REG_CAPDAC_XXX) */
+#define AD7152_CAPDAC_DACEN (1 << 7)
+#define AD7152_CAPDAC_DACP(x) ((x) & 0x1F)
+
+/* CFG2 Register Bit Designations (AD7152_REG_CFG2) */
+#define AD7152_CFG2_OSR(x) (((x) & 0x3) << 4)
+
+enum {
+ AD7152_DATA,
+ AD7152_OFFS,
+ AD7152_GAIN,
+ AD7152_SETUP
+};
+
+/*
+ * struct ad7152_chip_info - chip specifc information
+ */
+
+struct ad7152_chip_info {
+ struct i2c_client *client;
+ /*
+ * Capacitive channel digital filter setup;
+ * conversion time/update rate setup per channel
+ */
+ u8 filter_rate_setup;
+ u8 setup[2];
+};
+
+static inline ssize_t ad7152_start_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len,
+ u8 regval)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ bool doit;
+ int ret, timeout = 10;
+
+ ret = strtobool(buf, &doit);
+ if (ret < 0)
+ return ret;
+
+ if (!doit)
+ return 0;
+
+ if (this_attr->address == 0)
+ regval |= AD7152_CONF_CH1EN;
+ else
+ regval |= AD7152_CONF_CH2EN;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ do {
+ mdelay(20);
+ ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ } while ((ret == regval) && timeout--);
+
+ mutex_unlock(&indio_dev->mlock);
+ return len;
+}
+static ssize_t ad7152_start_offset_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return ad7152_start_calib(dev, attr, buf, len,
+ AD7152_CONF_MODE_OFFS_CAL);
+}
+static ssize_t ad7152_start_gain_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return ad7152_start_calib(dev, attr, buf, len,
+ AD7152_CONF_MODE_GAIN_CAL);
+}
+
+static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
+ S_IWUSR, NULL, ad7152_start_offset_calib, 0);
+static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
+ S_IWUSR, NULL, ad7152_start_offset_calib, 1);
+static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
+ S_IWUSR, NULL, ad7152_start_gain_calib, 0);
+static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
+ S_IWUSR, NULL, ad7152_start_gain_calib, 1);
+
+/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
+static const unsigned char ad7152_filter_rate_table[][2] = {
+ {200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
+};
+
+static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ ad7152_filter_rate_table[chip->filter_rate_setup][0]);
+}
+
+static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+ u8 data;
+ int ret, i;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
+ if (data >= ad7152_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
+ i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ chip->filter_rate_setup = i;
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
+ ad7152_show_filter_rate_setup,
+ ad7152_store_filter_rate_setup);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
+
+static IIO_CONST_ATTR(in_capacitance_scale_available,
+ "0.000061050 0.000030525 0.000015263 0.000007631");
+
+static struct attribute *ad7152_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
+ &iio_const_attr_in_capacitance_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7152_attribute_group = {
+ .attrs = ad7152_attributes,
+};
+
+static const u8 ad7152_addresses[][4] = {
+ { AD7152_REG_CH1_DATA_HIGH, AD7152_REG_CH1_OFFS_HIGH,
+ AD7152_REG_CH1_GAIN_HIGH, AD7152_REG_CH1_SETUP },
+ { AD7152_REG_CH2_DATA_HIGH, AD7152_REG_CH2_OFFS_HIGH,
+ AD7152_REG_CH2_GAIN_HIGH, AD7152_REG_CH2_SETUP },
+};
+
+/* Values are nano relative to pf base. */
+static const int ad7152_scale_table[] = {
+ 30525, 7631, 15263, 61050
+};
+
+static int ad7152_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+ int ret, i;
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ if (val != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ val = (val2 * 1024) / 15625;
+
+ ret = i2c_smbus_write_word_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_GAIN],
+ swab16(val));
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ if ((val < 0) | (val > 0xFFFF)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = i2c_smbus_write_word_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_OFFS],
+ swab16(val));
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ if (val != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < ARRAY_SIZE(ad7152_scale_table); i++)
+ if (val2 == ad7152_scale_table[i])
+ break;
+
+ chip->setup[chan->channel] &= ~AD7152_SETUP_RANGE_4pF;
+ chip->setup[chan->channel] |= AD7152_SETUP_RANGE(i);
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_SETUP],
+ chip->setup[chan->channel]);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+static int ad7152_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+ int ret;
+ u8 regval = 0;
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (mask) {
+ case 0:
+ /* First set whether in differential mode */
+
+ regval = chip->setup[chan->channel];
+
+ if (chan->differential)
+ chip->setup[chan->channel] |= AD7152_SETUP_CAPDIFF;
+ else
+ chip->setup[chan->channel] &= ~AD7152_SETUP_CAPDIFF;
+
+ if (regval != chip->setup[chan->channel]) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_SETUP],
+ chip->setup[chan->channel]);
+ if (ret < 0)
+ goto out;
+ }
+ /* Make sure the channel is enabled */
+ if (chan->channel == 0)
+ regval = AD7152_CONF_CH1EN;
+ else
+ regval = AD7152_CONF_CH2EN;
+
+ /* Trigger a single read */
+ regval |= AD7152_CONF_MODE_SINGLE_CONV;
+ ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG,
+ regval);
+ if (ret < 0)
+ goto out;
+
+ msleep(ad7152_filter_rate_table[chip->filter_rate_setup][1]);
+ /* Now read the actual register */
+ ret = i2c_smbus_read_word_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_DATA]);
+ if (ret < 0)
+ goto out;
+ *val = swab16(ret);
+
+ if (chan->differential)
+ *val -= 0x8000;
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+
+ ret = i2c_smbus_read_word_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_GAIN]);
+ if (ret < 0)
+ goto out;
+ /* 1 + gain_val / 2^16 */
+ *val = 1;
+ *val2 = (15625 * swab16(ret)) / 1024;
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ ret = i2c_smbus_read_word_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_OFFS]);
+ if (ret < 0)
+ goto out;
+ *val = swab16(ret);
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ ret = i2c_smbus_read_byte_data(chip->client,
+ ad7152_addresses[chan->channel][AD7152_SETUP]);
+ if (ret < 0)
+ goto out;
+ *val = 0;
+ *val2 = ad7152_scale_table[ret >> 6];
+
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
+ default:
+ ret = -EINVAL;
+ };
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static const struct iio_info ad7152_info = {
+ .attrs = &ad7152_attribute_group,
+ .read_raw = &ad7152_read_raw,
+ .write_raw = &ad7152_write_raw,
+ .write_raw_get_fmt = &ad7152_write_raw_get_fmt,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec ad7152_channels[] = {
+ {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ }, {
+ .type = IIO_CAPACITANCE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 0,
+ .channel2 = 2,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ }, {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ }, {
+ .type = IIO_CAPACITANCE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 3,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ }
+};
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7152_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct ad7152_chip_info *chip;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->client = client;
+
+ /* Establish that the iio_dev is a child of the i2c device */
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad7152_info;
+ indio_dev->channels = ad7152_channels;
+ if (id->driver_data == 0)
+ indio_dev->num_channels = ARRAY_SIZE(ad7152_channels);
+ else
+ indio_dev->num_channels = 2;
+ indio_dev->num_channels = ARRAY_SIZE(ad7152_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(indio_dev);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7152_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7152_id[] = {
+ { "ad7152", 0 },
+ { "ad7153", 1 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7152_id);
+
+static struct i2c_driver ad7152_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = ad7152_probe,
+ .remove = __devexit_p(ad7152_remove),
+ .id_table = ad7152_id,
+};
+
+static __init int ad7152_init(void)
+{
+ return i2c_add_driver(&ad7152_driver);
+}
+
+static __exit void ad7152_exit(void)
+{
+ i2c_del_driver(&ad7152_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD7152/3 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7152_init);
+module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
new file mode 100644
index 00000000000..2867943309c
--- /dev/null
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -0,0 +1,807 @@
+/*
+ * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#include "ad7746.h"
+
+/*
+ * AD7746 Register Definition
+ */
+
+#define AD7746_REG_STATUS 0
+#define AD7746_REG_CAP_DATA_HIGH 1
+#define AD7746_REG_CAP_DATA_MID 2
+#define AD7746_REG_CAP_DATA_LOW 3
+#define AD7746_REG_VT_DATA_HIGH 4
+#define AD7746_REG_VT_DATA_MID 5
+#define AD7746_REG_VT_DATA_LOW 6
+#define AD7746_REG_CAP_SETUP 7
+#define AD7746_REG_VT_SETUP 8
+#define AD7746_REG_EXC_SETUP 9
+#define AD7746_REG_CFG 10
+#define AD7746_REG_CAPDACA 11
+#define AD7746_REG_CAPDACB 12
+#define AD7746_REG_CAP_OFFH 13
+#define AD7746_REG_CAP_OFFL 14
+#define AD7746_REG_CAP_GAINH 15
+#define AD7746_REG_CAP_GAINL 16
+#define AD7746_REG_VOLT_GAINH 17
+#define AD7746_REG_VOLT_GAINL 18
+
+/* Status Register Bit Designations (AD7746_REG_STATUS) */
+#define AD7746_STATUS_EXCERR (1 << 3)
+#define AD7746_STATUS_RDY (1 << 2)
+#define AD7746_STATUS_RDYVT (1 << 1)
+#define AD7746_STATUS_RDYCAP (1 << 0)
+
+/* Capacitive Channel Setup Register Bit Designations (AD7746_REG_CAP_SETUP) */
+#define AD7746_CAPSETUP_CAPEN (1 << 7)
+#define AD7746_CAPSETUP_CIN2 (1 << 6) /* AD7746 only */
+#define AD7746_CAPSETUP_CAPDIFF (1 << 5)
+#define AD7746_CAPSETUP_CACHOP (1 << 0)
+
+/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
+#define AD7746_VTSETUP_VTEN (1 << 7)
+#define AD7746_VTSETUP_VTMD_INT_TEMP (0 << 5)
+#define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5)
+#define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5)
+#define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5)
+#define AD7746_VTSETUP_EXTREF (1 << 4)
+#define AD7746_VTSETUP_VTSHORT (1 << 1)
+#define AD7746_VTSETUP_VTCHOP (1 << 0)
+
+/* Excitation Setup Register Bit Designations (AD7746_REG_EXC_SETUP) */
+#define AD7746_EXCSETUP_CLKCTRL (1 << 7)
+#define AD7746_EXCSETUP_EXCON (1 << 6)
+#define AD7746_EXCSETUP_EXCB (1 << 5)
+#define AD7746_EXCSETUP_NEXCB (1 << 4)
+#define AD7746_EXCSETUP_EXCA (1 << 3)
+#define AD7746_EXCSETUP_NEXCA (1 << 2)
+#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
+
+/* Config Register Bit Designations (AD7746_REG_CFG) */
+#define AD7746_CONF_VTFS(x) ((x) << 6)
+#define AD7746_CONF_CAPFS(x) ((x) << 3)
+#define AD7746_CONF_MODE_IDLE (0 << 0)
+#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
+#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
+#define AD7746_CONF_MODE_PWRDN (3 << 0)
+#define AD7746_CONF_MODE_OFFS_CAL (5 << 0)
+#define AD7746_CONF_MODE_GAIN_CAL (6 << 0)
+
+/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
+#define AD7746_CAPDAC_DACEN (1 << 7)
+#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
+
+/*
+ * struct ad7746_chip_info - chip specifc information
+ */
+
+struct ad7746_chip_info {
+ struct i2c_client *client;
+ /*
+ * Capacitive channel digital filter setup;
+ * conversion time/update rate setup per channel
+ */
+ u8 config;
+ u8 cap_setup;
+ u8 vt_setup;
+ u8 capdac[2][2];
+ s8 capdac_set;
+};
+
+enum ad7746_chan {
+ VIN,
+ VIN_VDD,
+ TEMP_INT,
+ TEMP_EXT,
+ CIN1,
+ CIN1_DIFF,
+ CIN2,
+ CIN2_DIFF,
+};
+
+static const struct iio_chan_spec ad7746_channels[] = {
+ [VIN] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_EXT_VIN,
+ },
+ [VIN_VDD] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .extend_name = "supply",
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_VDD_MON,
+ },
+ [TEMP_INT] = {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .processed_val = IIO_PROCESSED,
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_INT_TEMP,
+ },
+ [TEMP_EXT] = {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 1,
+ .processed_val = IIO_PROCESSED,
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_EXT_TEMP,
+ },
+ [CIN1] = {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8,
+ },
+ [CIN1_DIFF] = {
+ .type = IIO_CAPACITANCE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 0,
+ .channel2 = 2,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8 |
+ AD7746_CAPSETUP_CAPDIFF
+ },
+ [CIN2] = {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8 |
+ AD7746_CAPSETUP_CIN2,
+ },
+ [CIN2_DIFF] = {
+ .type = IIO_CAPACITANCE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 3,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8 |
+ AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
+ }
+};
+
+/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
+static const unsigned char ad7746_vt_filter_rate_table[][2] = {
+ {50, 20 + 1}, {31, 32 + 1}, {16, 62 + 1}, {8, 122 + 1},
+};
+
+static const unsigned char ad7746_cap_filter_rate_table[][2] = {
+ {91, 11 + 1}, {84, 12 + 1}, {50, 20 + 1}, {26, 38 + 1},
+ {16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1},
+};
+
+static int ad7746_select_channel(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, delay;
+ u8 vt_setup, cap_setup;
+
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
+ vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
+ delay = ad7746_cap_filter_rate_table[(chip->config >> 3) &
+ 0x7][1];
+
+ if (chip->capdac_set != chan->channel) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACA,
+ chip->capdac[chan->channel][0]);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACB,
+ chip->capdac[chan->channel][1]);
+ if (ret < 0)
+ return ret;
+
+ chip->capdac_set = chan->channel;
+ }
+ break;
+ case IIO_VOLTAGE:
+ case IIO_TEMP:
+ vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
+ cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
+ delay = ad7746_cap_filter_rate_table[(chip->config >> 6) &
+ 0x3][1];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (chip->cap_setup != cap_setup) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAP_SETUP,
+ cap_setup);
+ if (ret < 0)
+ return ret;
+
+ chip->cap_setup = cap_setup;
+ }
+
+ if (chip->vt_setup != vt_setup) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_VT_SETUP,
+ vt_setup);
+ if (ret < 0)
+ return ret;
+
+ chip->vt_setup = vt_setup;
+ }
+
+ return delay;
+}
+
+static inline ssize_t ad7746_start_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len,
+ u8 regval)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ bool doit;
+ int ret, timeout = 10;
+
+ ret = strtobool(buf, &doit);
+ if (ret < 0)
+ return ret;
+
+ if (!doit)
+ return 0;
+
+ mutex_lock(&indio_dev->mlock);
+ regval |= chip->config;
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ do {
+ msleep(20);
+ ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ } while ((ret == regval) && timeout--);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static ssize_t ad7746_start_offset_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int ret = ad7746_select_channel(indio_dev,
+ &ad7746_channels[to_iio_dev_attr(attr)->address]);
+ if (ret < 0)
+ return ret;
+
+ return ad7746_start_calib(dev, attr, buf, len,
+ AD7746_CONF_MODE_OFFS_CAL);
+}
+
+static ssize_t ad7746_start_gain_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int ret = ad7746_select_channel(indio_dev,
+ &ad7746_channels[to_iio_dev_attr(attr)->address]);
+ if (ret < 0)
+ return ret;
+
+ return ad7746_start_calib(dev, attr, buf, len,
+ AD7746_CONF_MODE_GAIN_CAL);
+}
+
+static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
+ S_IWUSR, NULL, ad7746_start_offset_calib, CIN1);
+static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
+ S_IWUSR, NULL, ad7746_start_offset_calib, CIN2);
+static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
+ S_IWUSR, NULL, ad7746_start_gain_calib, CIN1);
+static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
+ S_IWUSR, NULL, ad7746_start_gain_calib, CIN2);
+static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
+ S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
+
+static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
+ (chip->config >> 3) & 0x7][0]);
+}
+
+static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ u8 data;
+ int ret, i;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
+ if (data >= ad7746_cap_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
+ i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
+
+ mutex_lock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_CAPFS(0x7);
+ chip->config |= AD7746_CONF_CAPFS(i);
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
+ (chip->config >> 6) & 0x3][0]);
+}
+
+static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ u8 data;
+ int ret, i;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
+ if (data >= ad7746_vt_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
+ i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
+
+ mutex_lock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_VTFS(0x3);
+ chip->config |= AD7746_CONF_VTFS(i);
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
+ S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
+ ad7746_store_cap_filter_rate_setup, 0);
+
+static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
+ S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
+ ad7746_store_vt_filter_rate_setup, 0);
+
+static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
+static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
+ "91 84 50 26 16 13 11 9");
+
+static struct attribute *ad7746_attributes[] = {
+ &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
+ &iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_capacitance_sampling_frequency_available.
+ dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7746_attribute_group = {
+ .attrs = ad7746_attributes,
+};
+
+static int ad7746_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, reg;
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ if (val != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ val = (val2 * 1024) / 15625;
+
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ reg = AD7746_REG_CAP_GAINH;
+ break;
+ case IIO_VOLTAGE:
+ reg = AD7746_REG_VOLT_GAINH;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = i2c_smbus_write_word_data(chip->client, reg, swab16(val));
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ if ((val < 0) | (val > 0xFFFF)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = i2c_smbus_write_word_data(chip->client,
+ AD7746_REG_CAP_OFFH, swab16(val));
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ if ((val < 0) | (val > 43008000)) { /* 21pF */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* CAPDAC Scale = 21pF_typ / 127
+ * CIN Scale = 8.192pF / 2^24
+ * Offset Scale = CAPDAC Scale / CIN Scale = 338646
+ * */
+
+ val /= 338646;
+
+ chip->capdac[chan->channel][chan->differential] = (val > 0 ?
+ AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0);
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACA,
+ chip->capdac[chan->channel][0]);
+ if (ret < 0)
+ goto out;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACB,
+ chip->capdac[chan->channel][1]);
+ if (ret < 0)
+ goto out;
+
+ chip->capdac_set = chan->channel;
+
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad7746_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, delay;
+ u8 regval, reg;
+
+ union {
+ u32 d32;
+ u8 d8[4];
+ } data;
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (mask) {
+ case 0:
+ ret = ad7746_select_channel(indio_dev, chan);
+ if (ret < 0)
+ goto out;
+ delay = ret;
+
+ regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV;
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG,
+ regval);
+ if (ret < 0)
+ goto out;
+
+ msleep(delay);
+ /* Now read the actual register */
+
+ ret = i2c_smbus_read_i2c_block_data(chip->client,
+ chan->address >> 8, 3, &data.d8[1]);
+
+ if (ret < 0)
+ goto out;
+
+ *val = (be32_to_cpu(data.d32) & 0xFFFFFF) - 0x800000;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* temperature in milli degrees Celsius
+ * T = ((*val / 2048) - 4096) * 1000
+ */
+ *val = (*val * 125) / 256;
+ break;
+ case IIO_VOLTAGE:
+ if (chan->channel == 1) /* supply_raw*/
+ *val = *val * 6;
+ break;
+ default:
+ break;
+ }
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ reg = AD7746_REG_CAP_GAINH;
+ break;
+ case IIO_VOLTAGE:
+ reg = AD7746_REG_VOLT_GAINH;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = i2c_smbus_read_word_data(chip->client, reg);
+ if (ret < 0)
+ goto out;
+ /* 1 + gain_val / 2^16 */
+ *val = 1;
+ *val2 = (15625 * swab16(ret)) / 1024;
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ ret = i2c_smbus_read_word_data(chip->client,
+ AD7746_REG_CAP_OFFH);
+ if (ret < 0)
+ goto out;
+ *val = swab16(ret);
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ *val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
+ [chan->differential]) * 338646;
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ /* 8.192pf / 2^24 */
+ *val2 = 488;
+ *val = 0;
+ break;
+ case IIO_VOLTAGE:
+ /* 1170mV / 2^23 */
+ *val2 = 139475;
+ *val = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
+ default:
+ ret = -EINVAL;
+ };
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static const struct iio_info ad7746_info = {
+ .attrs = &ad7746_attribute_group,
+ .read_raw = &ad7746_read_raw,
+ .write_raw = &ad7746_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7746_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad7746_platform_data *pdata = client->dev.platform_data;
+ struct ad7746_chip_info *chip;
+ struct iio_dev *indio_dev;
+ int ret = 0;
+ unsigned char regval = 0;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->client = client;
+ chip->capdac_set = -1;
+
+ /* Establish that the iio_dev is a child of the i2c device */
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad7746_info;
+ indio_dev->channels = ad7746_channels;
+ if (id->driver_data == 7746)
+ indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
+ else
+ indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
+ indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (pdata) {
+ if (pdata->exca_en) {
+ if (pdata->exca_inv_en)
+ regval |= AD7746_EXCSETUP_NEXCA;
+ else
+ regval |= AD7746_EXCSETUP_EXCA;
+ }
+
+ if (pdata->excb_en) {
+ if (pdata->excb_inv_en)
+ regval |= AD7746_EXCSETUP_NEXCB;
+ else
+ regval |= AD7746_EXCSETUP_EXCB;
+ }
+
+ regval |= AD7746_EXCSETUP_EXCLVL(pdata->exclvl);
+ } else {
+ dev_warn(&client->dev, "No platform data? using default\n");
+ regval = AD7746_EXCSETUP_EXCA | AD7746_EXCSETUP_EXCB |
+ AD7746_EXCSETUP_EXCLVL(3);
+ }
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_EXC_SETUP, regval);
+ if (ret < 0)
+ goto error_free_dev;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ dev_info(&client->dev, "%s capacitive sensor registered\n", id->name);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(indio_dev);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7746_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7746_id[] = {
+ { "ad7745", 7745 },
+ { "ad7746", 7746 },
+ { "ad7747", 7747 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7746_id);
+
+static struct i2c_driver ad7746_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = ad7746_probe,
+ .remove = __devexit_p(ad7746_remove),
+ .id_table = ad7746_id,
+};
+
+static __init int ad7746_init(void)
+{
+ return i2c_add_driver(&ad7746_driver);
+}
+
+static __exit void ad7746_exit(void)
+{
+ i2c_del_driver(&ad7746_driver);
+}
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7746_init);
+module_exit(ad7746_exit);
diff --git a/drivers/staging/iio/cdc/ad7746.h b/drivers/staging/iio/cdc/ad7746.h
new file mode 100644
index 00000000000..ea8572d1df0
--- /dev/null
+++ b/drivers/staging/iio/cdc/ad7746.h
@@ -0,0 +1,29 @@
+/*
+ * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_CDC_AD7746_H_
+#define IIO_CDC_AD7746_H_
+
+/*
+ * TODO: struct ad7746_platform_data needs to go into include/linux/iio
+ */
+
+#define AD7466_EXCLVL_0 0 /* +-VDD/8 */
+#define AD7466_EXCLVL_1 1 /* +-VDD/4 */
+#define AD7466_EXCLVL_2 2 /* +-VDD * 3/8 */
+#define AD7466_EXCLVL_3 3 /* +-VDD/2 */
+
+struct ad7746_platform_data {
+ unsigned char exclvl; /*Excitation Voltage Level */
+ bool exca_en; /* enables EXCA pin as the excitation output */
+ bool exca_inv_en; /* enables /EXCA pin as the excitation output */
+ bool excb_en; /* enables EXCB pin as the excitation output */
+ bool excb_inv_en; /* enables /EXCB pin as the excitation output */
+};
+
+#endif /* IIO_CDC_AD7746_H_ */
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
index 3e31ee6220e..d8e736f6052 100644
--- a/drivers/staging/iio/chrdev.h
+++ b/drivers/staging/iio/chrdev.h
@@ -9,26 +9,6 @@
#ifndef _IIO_CHRDEV_H_
#define _IIO_CHRDEV_H_
-struct iio_dev;
-
-/**
- * struct iio_handler - Structure used to specify file operations
- * for a particular chrdev
- * @chrdev: character device structure
- * @id: the location in the handler table - used for deallocation.
- * @flags: file operations related flags including busy flag.
- * @private: handler specific data used by the fileops registered with
- * the chrdev.
- */
-struct iio_handler {
- struct cdev chrdev;
- int id;
- unsigned long flags;
- void *private;
-};
-
-#define iio_cdev_to_handler(cd) \
- container_of(cd, struct iio_handler, chrdev)
/**
* struct iio_event_data - The actual event being pushed to userspace
@@ -37,39 +17,9 @@ struct iio_handler {
* the interrupt handler)
*/
struct iio_event_data {
- int id;
+ u64 id;
s64 timestamp;
};
-/**
- * struct iio_detected_event_list - list element for events that have occurred
- * @list: linked list header
- * @ev: the event itself
- */
-struct iio_detected_event_list {
- struct list_head list;
- struct iio_event_data ev;
-};
-
-/**
- * struct iio_event_interface - chrdev interface for an event line
- * @dev: device assocated with event interface
- * @handler: fileoperations and related control for the chrdev
- * @wait: wait queue to allow blocking reads of events
- * @event_list_lock: mutex to protect the list of detected events
- * @det_events: list of detected events
- * @max_events: maximum number of events before new ones are dropped
- * @current_events: number of events in detected list
- */
-struct iio_event_interface {
- struct device dev;
- struct iio_handler handler;
- wait_queue_head_t wait;
- struct mutex event_list_lock;
- struct list_head det_events;
- int max_events;
- int current_events;
- struct list_head dev_attr_list;
-};
-
+#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index 7ddae357f20..fac8549dc8e 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -1,14 +1,35 @@
#
# DAC drivers
#
-comment "Digital to analog convertors"
+menu "Digital to analog converters"
+
+config AD5064
+ tristate "Analog Devices AD5064/64-1/44/24 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5064, AD5064-1,
+ AD5044, AD5024 Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5064.
+
+config AD5360
+ tristate "Analog Devices Analog Devices AD5360/61/62/63/70/71/73 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5360, AD5361,
+ AD5362, AD5363, AD5370, AD5371, AD5373 multi-channel
+ Digital to Analog Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5360.
config AD5624R_SPI
tristate "Analog Devices AD5624/44/64R DAC spi driver"
depends on SPI
help
Say yes here to build support for Analog Devices AD5624R, AD5644R and
- AD5664R convertors (DAC). This driver uses the common SPI interface.
+ AD5664R converters (DAC). This driver uses the common SPI interface.
config AD5446
tristate "Analog Devices AD5444/6, AD5620/40/60 and AD5542A/12A DAC SPI driver"
@@ -62,3 +83,5 @@ config MAX517
This driver can also be built as a module. If so, the module
will be called max517.
+
+endmenu
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
index 7f4f2ed031e..07b6f5ebdb5 100644
--- a/drivers/staging/iio/dac/Makefile
+++ b/drivers/staging/iio/dac/Makefile
@@ -2,7 +2,9 @@
# Makefile for industrial I/O DAC drivers
#
+obj-$(CONFIG_AD5360) += ad5360.o
obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
+obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
obj-$(CONFIG_AD5791) += ad5791.o
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
new file mode 100644
index 00000000000..24279f2ae41
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -0,0 +1,463 @@
+/*
+ * AD5064, AD5064-1, AD5044, AD5024 Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#define AD5064_DAC_CHANNELS 4
+
+#define AD5064_ADDR(x) ((x) << 20)
+#define AD5064_CMD(x) ((x) << 24)
+
+#define AD5064_ADDR_DAC(chan) (chan)
+#define AD5064_ADDR_ALL_DAC 0xF
+
+#define AD5064_CMD_WRITE_INPUT_N 0x0
+#define AD5064_CMD_UPDATE_DAC_N 0x1
+#define AD5064_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
+#define AD5064_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5064_CMD_POWERDOWN_DAC 0x4
+#define AD5064_CMD_CLEAR 0x5
+#define AD5064_CMD_LDAC_MASK 0x6
+#define AD5064_CMD_RESET 0x7
+#define AD5064_CMD_DAISY_CHAIN_ENABLE 0x8
+
+#define AD5064_LDAC_PWRDN_NONE 0x0
+#define AD5064_LDAC_PWRDN_1K 0x1
+#define AD5064_LDAC_PWRDN_100K 0x2
+#define AD5064_LDAC_PWRDN_3STATE 0x3
+
+/**
+ * struct ad5064_chip_info - chip specific information
+ * @shared_vref: whether the vref supply is shared between channels
+ * @channel: channel specification
+*/
+
+struct ad5064_chip_info {
+ bool shared_vref;
+ struct iio_chan_spec channel[AD5064_DAC_CHANNELS];
+};
+
+/**
+ * struct ad5064_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulators
+ * @pwr_down: whether channel is powered down
+ * @pwr_down_mode: channel's current power down mode
+ * @dac_cache: current DAC raw value (chip does not support readback)
+ * @data: spi transfer buffers
+ */
+
+struct ad5064_state {
+ struct spi_device *spi;
+ const struct ad5064_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[AD5064_DAC_CHANNELS];
+ bool pwr_down[AD5064_DAC_CHANNELS];
+ u8 pwr_down_mode[AD5064_DAC_CHANNELS];
+ unsigned int dac_cache[AD5064_DAC_CHANNELS];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be32 data ____cacheline_aligned;
+};
+
+enum ad5064_type {
+ ID_AD5024,
+ ID_AD5044,
+ ID_AD5064,
+ ID_AD5064_1,
+};
+
+#define AD5064_CHANNEL(chan, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .address = AD5064_ADDR_DAC(chan), \
+ .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
+}
+
+static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ [ID_AD5024] = {
+ .shared_vref = false,
+ .channel[0] = AD5064_CHANNEL(0, 12),
+ .channel[1] = AD5064_CHANNEL(1, 12),
+ .channel[2] = AD5064_CHANNEL(2, 12),
+ .channel[3] = AD5064_CHANNEL(3, 12),
+ },
+ [ID_AD5044] = {
+ .shared_vref = false,
+ .channel[0] = AD5064_CHANNEL(0, 14),
+ .channel[1] = AD5064_CHANNEL(1, 14),
+ .channel[2] = AD5064_CHANNEL(2, 14),
+ .channel[3] = AD5064_CHANNEL(3, 14),
+ },
+ [ID_AD5064] = {
+ .shared_vref = false,
+ .channel[0] = AD5064_CHANNEL(0, 16),
+ .channel[1] = AD5064_CHANNEL(1, 16),
+ .channel[2] = AD5064_CHANNEL(2, 16),
+ .channel[3] = AD5064_CHANNEL(3, 16),
+ },
+ [ID_AD5064_1] = {
+ .shared_vref = true,
+ .channel[0] = AD5064_CHANNEL(0, 16),
+ .channel[1] = AD5064_CHANNEL(1, 16),
+ .channel[2] = AD5064_CHANNEL(2, 16),
+ .channel[3] = AD5064_CHANNEL(3, 16),
+ },
+};
+
+static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
+ unsigned int addr, unsigned int val, unsigned int shift)
+{
+ val <<= shift;
+
+ st->data = cpu_to_be32(AD5064_CMD(cmd) | AD5064_ADDR(addr) | val);
+
+ return spi_write(st->spi, &st->data, sizeof(st->data));
+}
+
+static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
+ unsigned int channel)
+{
+ unsigned int val;
+ int ret;
+
+ val = (0x1 << channel);
+
+ if (st->pwr_down[channel])
+ val |= st->pwr_down_mode[channel] << 8;
+
+ ret = ad5064_spi_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
+
+ return ret;
+}
+
+static const char ad5064_powerdown_modes[][15] = {
+ [AD5064_LDAC_PWRDN_NONE] = "",
+ [AD5064_LDAC_PWRDN_1K] = "1kohm_to_gnd",
+ [AD5064_LDAC_PWRDN_100K] = "100kohm_to_gnd",
+ [AD5064_LDAC_PWRDN_3STATE] = "three_state",
+};
+
+static ssize_t ad5064_read_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5064_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%s\n",
+ ad5064_powerdown_modes[st->pwr_down_mode[this_attr->address]]);
+}
+
+static ssize_t ad5064_write_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5064_state *st = iio_priv(indio_dev);
+ unsigned int mode, i;
+ int ret;
+
+ mode = 0;
+
+ for (i = 1; i < ARRAY_SIZE(ad5064_powerdown_modes); ++i) {
+ if (sysfs_streq(buf, ad5064_powerdown_modes[i])) {
+ mode = i;
+ break;
+ }
+ }
+ if (mode == 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ st->pwr_down_mode[this_attr->address] = mode;
+
+ ret = ad5064_sync_powerdown_mode(st, this_attr->address);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad5064_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5064_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return sprintf(buf, "%d\n", st->pwr_down[this_attr->address]);
+}
+
+static ssize_t ad5064_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5064_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->pwr_down[this_attr->address] = pwr_down;
+
+ ret = ad5064_sync_powerdown_mode(st, this_attr->address);
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
+ "1kohm_to_gnd 100kohm_to_gnd three_state");
+
+#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_chan) \
+ IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown_mode, \
+ S_IRUGO | S_IWUSR, \
+ ad5064_read_powerdown_mode, \
+ ad5064_write_powerdown_mode, _chan);
+
+#define IIO_DEV_ATTR_DAC_POWERDOWN(_chan) \
+ IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown, \
+ S_IRUGO | S_IWUSR, \
+ ad5064_read_dac_powerdown, \
+ ad5064_write_dac_powerdown, _chan)
+
+static IIO_DEV_ATTR_DAC_POWERDOWN(0);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0);
+static IIO_DEV_ATTR_DAC_POWERDOWN(1);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
+static IIO_DEV_ATTR_DAC_POWERDOWN(2);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
+static IIO_DEV_ATTR_DAC_POWERDOWN(3);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
+
+static struct attribute *ad5064_attributes[] = {
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5064_attribute_group = {
+ .attrs = ad5064_attributes,
+};
+
+static int ad5064_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ unsigned int vref;
+
+ switch (m) {
+ case 0:
+ *val = st->dac_cache[chan->channel];
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ vref = st->chip_info->shared_vref ? 0 : chan->channel;
+ scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
+ if (scale_uv < 0)
+ return scale_uv;
+
+ scale_uv = (scale_uv * 100) >> chan->scan_type.realbits;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad5064_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val > (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5064_spi_write(st, AD5064_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val, chan->scan_type.shift);
+ if (ret == 0)
+ st->dac_cache[chan->channel] = val;
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5064_info = {
+ .read_raw = ad5064_read_raw,
+ .write_raw = ad5064_write_raw,
+ .attrs = &ad5064_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
+{
+ return st->chip_info->shared_vref ? 1 : AD5064_DAC_CHANNELS;
+}
+
+static const char * const ad5064_vref_names[] = {
+ "vrefA",
+ "vrefB",
+ "vrefC",
+ "vrefD",
+};
+
+static const char * const ad5064_vref_name(struct ad5064_state *st,
+ unsigned int vref)
+{
+ return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
+}
+
+static int __devinit ad5064_probe(struct spi_device *spi)
+{
+ enum ad5064_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5064_state *st;
+ unsigned int i;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->chip_info = &ad5064_chip_info_tbl[type];
+ st->spi = spi;
+
+ for (i = 0; i < ad5064_num_vref(st); ++i)
+ st->vref_reg[i].supply = ad5064_vref_name(st, i);
+
+ ret = regulator_bulk_get(&st->spi->dev, ad5064_num_vref(st),
+ st->vref_reg);
+ if (ret)
+ goto error_free;
+
+ ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
+ if (ret)
+ goto error_free_reg;
+
+ for (i = 0; i < AD5064_DAC_CHANNELS; ++i) {
+ st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
+ st->dac_cache[i] = 0x8000;
+ }
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5064_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = AD5064_DAC_CHANNELS;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+error_free_reg:
+ regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+
+static int __devexit ad5064_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5064_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+ regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5064_id[] = {
+ {"ad5024", ID_AD5024},
+ {"ad5044", ID_AD5044},
+ {"ad5064", ID_AD5064},
+ {"ad5064-1", ID_AD5064_1},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5064_id);
+
+static struct spi_driver ad5064_driver = {
+ .driver = {
+ .name = "ad5064",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5064_probe,
+ .remove = __devexit_p(ad5064_remove),
+ .id_table = ad5064_id,
+};
+
+static __init int ad5064_spi_init(void)
+{
+ return spi_register_driver(&ad5064_driver);
+}
+module_init(ad5064_spi_init);
+
+static __exit void ad5064_spi_exit(void)
+{
+ spi_unregister_driver(&ad5064_driver);
+}
+module_exit(ad5064_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
new file mode 100644
index 00000000000..72d0f3f0d6a
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -0,0 +1,581 @@
+/*
+ * Analog devices AD5360, AD5361, AD5362, AD5363, AD5370, AD5371, AD5373
+ * multi-channel Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#define AD5360_CMD(x) ((x) << 22)
+#define AD5360_ADDR(x) ((x) << 16)
+
+#define AD5360_READBACK_TYPE(x) ((x) << 13)
+#define AD5360_READBACK_ADDR(x) ((x) << 7)
+
+#define AD5360_CHAN_ADDR(chan) ((chan) + 0x8)
+
+#define AD5360_CMD_WRITE_DATA 0x3
+#define AD5360_CMD_WRITE_OFFSET 0x2
+#define AD5360_CMD_WRITE_GAIN 0x1
+#define AD5360_CMD_SPECIAL_FUNCTION 0x0
+
+/* Special function register addresses */
+#define AD5360_REG_SF_NOP 0x0
+#define AD5360_REG_SF_CTRL 0x1
+#define AD5360_REG_SF_OFS(x) (0x2 + (x))
+#define AD5360_REG_SF_READBACK 0x5
+
+#define AD5360_SF_CTRL_PWR_DOWN BIT(0)
+
+#define AD5360_READBACK_X1A 0x0
+#define AD5360_READBACK_X1B 0x1
+#define AD5360_READBACK_OFFSET 0x2
+#define AD5360_READBACK_GAIN 0x3
+#define AD5360_READBACK_SF 0x4
+
+
+/**
+ * struct ad5360_chip_info - chip specific information
+ * @channel_template: channel specification template
+ * @num_channels: number of channels
+ * @channels_per_group: number of channels per group
+ * @num_vrefs: number of vref supplies for the chip
+*/
+
+struct ad5360_chip_info {
+ struct iio_chan_spec channel_template;
+ unsigned int num_channels;
+ unsigned int channels_per_group;
+ unsigned int num_vrefs;
+};
+
+/**
+ * struct ad5360_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulators
+ * @ctrl: control register cache
+ * @data: spi transfer buffers
+ */
+
+struct ad5360_state {
+ struct spi_device *spi;
+ const struct ad5360_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[3];
+ unsigned int ctrl;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5360_type {
+ ID_AD5360,
+ ID_AD5361,
+ ID_AD5362,
+ ID_AD5363,
+ ID_AD5370,
+ ID_AD5371,
+ ID_AD5372,
+ ID_AD5373,
+};
+
+#define AD5360_CHANNEL(bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE) | \
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
+ (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE), \
+ .scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \
+}
+
+static const struct ad5360_chip_info ad5360_chip_info_tbl[] = {
+ [ID_AD5360] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 16,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5361] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 16,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5362] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 8,
+ .channels_per_group = 4,
+ .num_vrefs = 2,
+ },
+ [ID_AD5363] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 8,
+ .channels_per_group = 4,
+ .num_vrefs = 2,
+ },
+ [ID_AD5370] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 40,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5371] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 40,
+ .channels_per_group = 8,
+ .num_vrefs = 3,
+ },
+ [ID_AD5372] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 32,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5373] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 32,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+};
+
+static unsigned int ad5360_get_channel_vref_index(struct ad5360_state *st,
+ unsigned int channel)
+{
+ unsigned int i;
+
+ /* The first groups have their own vref, while the remaining groups
+ * share the last vref */
+ i = channel / st->chip_info->channels_per_group;
+ if (i >= st->chip_info->num_vrefs)
+ i = st->chip_info->num_vrefs - 1;
+
+ return i;
+}
+
+static int ad5360_get_channel_vref(struct ad5360_state *st,
+ unsigned int channel)
+{
+ unsigned int i = ad5360_get_channel_vref_index(st, channel);
+
+ return regulator_get_voltage(st->vref_reg[i].consumer);
+}
+
+
+static int ad5360_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int cmd, unsigned int addr, unsigned int val,
+ unsigned int shift)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+
+ val <<= shift;
+ val |= AD5360_CMD(cmd) | AD5360_ADDR(addr);
+ st->data[0].d32 = cpu_to_be32(val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd,
+ unsigned int addr, unsigned int val, unsigned int shift)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
+ unsigned int addr)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
+ AD5360_ADDR(AD5360_REG_SF_READBACK) |
+ AD5360_READBACK_TYPE(type) |
+ AD5360_READBACK_ADDR(addr));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t ad5360_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5360_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", (bool)(st->ctrl & AD5360_SF_CTRL_PWR_DOWN));
+}
+
+static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ unsigned int clr)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ unsigned int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->ctrl |= set;
+ st->ctrl &= ~clr;
+
+ ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
+ AD5360_REG_SF_CTRL, st->ctrl, 0);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t ad5360_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ if (pwr_down)
+ ret = ad5360_update_ctrl(indio_dev, AD5360_SF_CTRL_PWR_DOWN, 0);
+ else
+ ret = ad5360_update_ctrl(indio_dev, 0, AD5360_SF_CTRL_PWR_DOWN);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown,
+ S_IRUGO | S_IWUSR,
+ ad5360_read_dac_powerdown,
+ ad5360_write_dac_powerdown, 0);
+
+static struct attribute *ad5360_attributes[] = {
+ &iio_dev_attr_out_voltage_powerdown.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5360_attribute_group = {
+ .attrs = ad5360_attributes,
+};
+
+static int ad5360_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ int max_val = (1 << chan->scan_type.realbits);
+ unsigned int ofs_index;
+
+ switch (mask) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA,
+ chan->address, val, chan->scan_type.shift);
+
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET,
+ chan->address, val, chan->scan_type.shift);
+
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN,
+ chan->address, val, chan->scan_type.shift);
+
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ if (val <= -max_val || val > 0)
+ return -EINVAL;
+
+ val = -val;
+
+ /* offset is supposed to have the same scale as raw, but it
+ * is always 14bits wide, so on a chip where the raw value has
+ * more bits, we need to shift offset. */
+ val >>= (chan->scan_type.realbits - 14);
+
+ /* There is one DAC offset register per vref. Changing one
+ * channels offset will also change the offset for all other
+ * channels which share the same vref supply. */
+ ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
+ return ad5360_write(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
+ AD5360_REG_SF_OFS(ofs_index), val, 0);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5360_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ unsigned int ofs_index;
+ int ret;
+
+ switch (m) {
+ case 0:
+ ret = ad5360_read(indio_dev, AD5360_READBACK_X1A,
+ chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret >> chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ /* vout = 4 * vref * dac_code */
+ scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100;
+ if (scale_uv < 0)
+ return scale_uv;
+
+ scale_uv >>= (chan->scan_type.realbits);
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET,
+ chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN,
+ chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
+ ret = ad5360_read(indio_dev, AD5360_READBACK_SF,
+ AD5360_REG_SF_OFS(ofs_index));
+ if (ret < 0)
+ return ret;
+
+ ret <<= (chan->scan_type.realbits - 14);
+ *val = -ret;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5360_info = {
+ .read_raw = ad5360_read_raw,
+ .write_raw = ad5360_write_raw,
+ .attrs = &ad5360_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const ad5360_vref_name[] = {
+ "vref0", "vref1", "vref2"
+};
+
+static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+ channels = kcalloc(sizeof(struct iio_chan_spec),
+ st->chip_info->num_channels, GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = AD5360_CHAN_ADDR(i);
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int __devinit ad5360_probe(struct spi_device *spi)
+{
+ enum ad5360_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5360_state *st;
+ unsigned int i;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->chip_info = &ad5360_chip_info_tbl[type];
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5360_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ ret = ad5360_alloc_channels(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret);
+ goto error_free;
+ }
+
+ for (i = 0; i < st->chip_info->num_vrefs; ++i)
+ st->vref_reg[i].supply = ad5360_vref_name[i];
+
+ ret = regulator_bulk_get(&st->spi->dev, st->chip_info->num_vrefs,
+ st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request vref regulators: %d\n", ret);
+ goto error_free_channels;
+ }
+
+ ret = regulator_bulk_enable(st->chip_info->num_vrefs, st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulators: %d\n", ret);
+ goto error_free_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
+error_free_reg:
+ regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg);
+error_free_channels:
+ kfree(indio_dev->channels);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5360_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5360_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ kfree(indio_dev->channels);
+
+ regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
+ regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg);
+
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5360_ids[] = {
+ { "ad5360", ID_AD5360 },
+ { "ad5361", ID_AD5361 },
+ { "ad5362", ID_AD5362 },
+ { "ad5363", ID_AD5363 },
+ { "ad5370", ID_AD5370 },
+ { "ad5371", ID_AD5371 },
+ { "ad5372", ID_AD5372 },
+ { "ad5373", ID_AD5373 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5360_ids);
+
+static struct spi_driver ad5360_driver = {
+ .driver = {
+ .name = "ad5360",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5360_probe,
+ .remove = __devexit_p(ad5360_remove),
+ .id_table = ad5360_ids,
+};
+
+static __init int ad5360_spi_init(void)
+{
+ return spi_register_driver(&ad5360_driver);
+}
+module_init(ad5360_spi_init);
+
+static __exit void ad5360_spi_exit(void)
+{
+ spi_unregister_driver(&ad5360_driver);
+}
+module_exit(ad5360_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e8a9d0bf1ed..e1c204d51d8 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -16,6 +16,7 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -67,8 +68,8 @@ static ssize_t ad5446_write(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
int ret;
long val;
@@ -81,11 +82,11 @@ static ssize_t ad5446_write(struct device *dev,
goto error_ret;
}
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
st->cached_val = val;
st->chip_info->store_sample(st, val);
ret = spi_sync(st->spi, &st->msg);
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
error_ret:
return ret ? ret : len;
@@ -97,21 +98,21 @@ static ssize_t ad5446_show_scale(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
}
-static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
+static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
static ssize_t ad5446_write_powerdown_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
if (sysfs_streq(buf, "1kohm_to_gnd"))
st->pwr_down_mode = MODE_PWRDWN_1k;
@@ -128,8 +129,8 @@ static ssize_t ad5446_write_powerdown_mode(struct device *dev,
static ssize_t ad5446_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"};
@@ -140,8 +141,8 @@ static ssize_t ad5446_read_dac_powerdown(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n", st->pwr_down);
}
@@ -150,8 +151,8 @@ static ssize_t ad5446_write_dac_powerdown(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
unsigned long readin;
int ret;
@@ -162,7 +163,7 @@ static ssize_t ad5446_write_dac_powerdown(struct device *dev,
if (readin > 1)
ret = -EINVAL;
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
st->pwr_down = readin;
if (st->pwr_down)
@@ -171,28 +172,28 @@ static ssize_t ad5446_write_dac_powerdown(struct device *dev,
st->chip_info->store_sample(st, st->cached_val);
ret = spi_sync(st->spi, &st->msg);
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_powerdown_mode, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO | S_IWUSR,
ad5446_read_powerdown_mode,
ad5446_write_powerdown_mode, 0);
-static IIO_CONST_ATTR(out_powerdown_mode_available,
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"1kohm_to_gnd 100kohm_to_gnd three_state");
-static IIO_DEVICE_ATTR(out0_powerdown, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage0_powerdown, S_IRUGO | S_IWUSR,
ad5446_read_dac_powerdown,
ad5446_write_dac_powerdown, 0);
static struct attribute *ad5446_attributes[] = {
- &iio_dev_attr_out0_raw.dev_attr.attr,
- &iio_dev_attr_out_scale.dev_attr.attr,
- &iio_dev_attr_out0_powerdown.dev_attr.attr,
- &iio_dev_attr_out_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
@@ -200,16 +201,18 @@ static mode_t ad5446_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
mode_t mode = attr->mode;
if (!st->chip_info->store_pwr_down &&
- (attr == &iio_dev_attr_out0_powerdown.dev_attr.attr ||
- attr == &iio_dev_attr_out_powerdown_mode.dev_attr.attr ||
+ (attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
+ attr == &iio_dev_attr_out_voltage_powerdown_mode.
+ dev_attr.attr ||
attr ==
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr))
+ &iio_const_attr_out_voltage_powerdown_mode_available.
+ dev_attr.attr))
mode = 0;
return mode;
@@ -421,13 +424,14 @@ static int ad5446_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5446_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
}
+ iio_free_device(indio_dev);
+
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5504.c b/drivers/staging/iio/dac/ad5504.c
index 1915f459868..60dd6404d68 100644
--- a/drivers/staging/iio/dac/ad5504.c
+++ b/drivers/staging/iio/dac/ad5504.c
@@ -7,7 +7,6 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -15,6 +14,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -168,10 +168,10 @@ static ssize_t ad5504_show_scale(struct device *dev,
return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
}
-static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5504_show_scale, NULL, 0);
+static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5504_show_scale, NULL, 0);
#define IIO_DEV_ATTR_OUT_RW_RAW(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out##_num##_raw, \
+ IIO_DEVICE_ATTR(out_voltage##_num##_raw, \
S_IRUGO | S_IWUSR, _show, _store, _addr)
static IIO_DEV_ATTR_OUT_RW_RAW(0, ad5504_read_dac,
@@ -183,17 +183,16 @@ static IIO_DEV_ATTR_OUT_RW_RAW(2, ad5504_read_dac,
static IIO_DEV_ATTR_OUT_RW_RAW(3, ad5504_read_dac,
ad5504_write_dac, AD5504_ADDR_DAC3);
-static IIO_DEVICE_ATTR(out_powerdown_mode, S_IRUGO |
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5504_read_powerdown_mode,
ad5504_write_powerdown_mode, 0);
-static IIO_CONST_ATTR(out_powerdown_mode_available,
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"20kohm_to_gnd three_state");
#define IIO_DEV_ATTR_DAC_POWERDOWN(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out##_num##_powerdown, \
+ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \
S_IRUGO | S_IWUSR, _show, _store, _addr)
-
static IIO_DEV_ATTR_DAC_POWERDOWN(0, ad5504_read_dac_powerdown,
ad5504_write_dac_powerdown, 0);
static IIO_DEV_ATTR_DAC_POWERDOWN(1, ad5504_read_dac_powerdown,
@@ -204,17 +203,17 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5504_read_dac_powerdown,
ad5504_write_dac_powerdown, 3);
static struct attribute *ad5504_attributes[] = {
- &iio_dev_attr_out0_raw.dev_attr.attr,
- &iio_dev_attr_out1_raw.dev_attr.attr,
- &iio_dev_attr_out2_raw.dev_attr.attr,
- &iio_dev_attr_out3_raw.dev_attr.attr,
- &iio_dev_attr_out0_powerdown.dev_attr.attr,
- &iio_dev_attr_out1_powerdown.dev_attr.attr,
- &iio_dev_attr_out2_powerdown.dev_attr.attr,
- &iio_dev_attr_out3_powerdown.dev_attr.attr,
- &iio_dev_attr_out_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -223,11 +222,11 @@ static const struct attribute_group ad5504_attribute_group = {
};
static struct attribute *ad5501_attributes[] = {
- &iio_dev_attr_out0_raw.dev_attr.attr,
- &iio_dev_attr_out0_powerdown.dev_attr.attr,
- &iio_dev_attr_out_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -246,12 +245,13 @@ static struct attribute *ad5504_ev_attributes[] = {
static struct attribute_group ad5504_ev_attribute_group = {
.attrs = ad5504_ev_attributes,
+ .name = "events",
};
static irqreturn_t ad5504_event_handler(int irq, void *private)
{
- iio_push_event(private, 0,
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_TEMP,
+ iio_push_event(private,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
@@ -262,14 +262,12 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
static const struct iio_info ad5504_info = {
.attrs = &ad5504_attribute_group,
- .num_interrupt_lines = 1,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad5501_info = {
.attrs = &ad5501_attribute_group,
- .num_interrupt_lines = 1,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -282,6 +280,11 @@ static int __devinit ad5504_probe(struct spi_device *spi)
struct regulator *reg;
int ret, voltage_uv = 0;
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
reg = regulator_get(&spi->dev, "vcc");
if (!IS_ERR(reg)) {
ret = regulator_enable(reg);
@@ -291,11 +294,6 @@ static int __devinit ad5504_probe(struct spi_device *spi)
voltage_uv = regulator_get_voltage(reg);
}
- indio_dev = iio_allocate_device(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
if (voltage_uv)
@@ -315,10 +313,6 @@ static int __devinit ad5504_probe(struct spi_device *spi)
indio_dev->info = &ad5504_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
if (spi->irq) {
ret = request_threaded_irq(spi->irq,
NULL,
@@ -327,22 +321,26 @@ static int __devinit ad5504_probe(struct spi_device *spi)
spi_get_device_id(st->spi)->name,
indio_dev);
if (ret)
- goto error_unreg_iio_device;
+ goto error_disable_reg;
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_irq;
+
return 0;
-error_unreg_iio_device:
- iio_device_unregister(indio_dev);
-error_free_dev:
- iio_free_device(indio_dev);
+error_free_irq:
+ free_irq(spi->irq, indio_dev);
error_disable_reg:
if (!IS_ERR(reg))
- regulator_disable(st->reg);
+ regulator_disable(reg);
error_put_reg:
if (!IS_ERR(reg))
regulator_put(reg);
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
@@ -350,16 +348,16 @@ static int __devexit ad5504_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5504_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
iio_device_unregister(indio_dev);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
}
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
index a5b3776718e..284d8790036 100644
--- a/drivers/staging/iio/dac/ad5624r_spi.c
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -7,7 +7,6 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -15,6 +14,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -172,22 +172,22 @@ static ssize_t ad5624r_show_scale(struct device *dev,
return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
}
-static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5624r_show_scale, NULL, 0);
+static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5624r_show_scale, NULL, 0);
static IIO_DEV_ATTR_OUT_RAW(0, ad5624r_write_dac, AD5624R_ADDR_DAC0);
static IIO_DEV_ATTR_OUT_RAW(1, ad5624r_write_dac, AD5624R_ADDR_DAC1);
static IIO_DEV_ATTR_OUT_RAW(2, ad5624r_write_dac, AD5624R_ADDR_DAC2);
static IIO_DEV_ATTR_OUT_RAW(3, ad5624r_write_dac, AD5624R_ADDR_DAC3);
-static IIO_DEVICE_ATTR(out_powerdown_mode, S_IRUGO |
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5624r_read_powerdown_mode,
ad5624r_write_powerdown_mode, 0);
-static IIO_CONST_ATTR(out_powerdown_mode_available,
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"1kohm_to_gnd 100kohm_to_gnd three_state");
#define IIO_DEV_ATTR_DAC_POWERDOWN(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out##_num##_powerdown, \
+ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \
S_IRUGO | S_IWUSR, _show, _store, _addr)
static IIO_DEV_ATTR_DAC_POWERDOWN(0, ad5624r_read_dac_powerdown,
@@ -200,17 +200,17 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5624r_read_dac_powerdown,
ad5624r_write_dac_powerdown, 3);
static struct attribute *ad5624r_attributes[] = {
- &iio_dev_attr_out0_raw.dev_attr.attr,
- &iio_dev_attr_out1_raw.dev_attr.attr,
- &iio_dev_attr_out2_raw.dev_attr.attr,
- &iio_dev_attr_out3_raw.dev_attr.attr,
- &iio_dev_attr_out0_powerdown.dev_attr.attr,
- &iio_dev_attr_out1_powerdown.dev_attr.attr,
- &iio_dev_attr_out2_powerdown.dev_attr.attr,
- &iio_dev_attr_out3_powerdown.dev_attr.attr,
- &iio_dev_attr_out_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -227,24 +227,23 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
{
struct ad5624r_state *st;
struct iio_dev *indio_dev;
- struct regulator *reg;
int ret, voltage_uv = 0;
- reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- goto error_put_reg;
-
- voltage_uv = regulator_get_voltage(reg);
- }
indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
- goto error_disable_reg;
+ goto error_ret;
}
st = iio_priv(indio_dev);
- st->reg = reg;
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
spi_set_drvdata(spi, indio_dev);
st->chip_info =
&ad5624r_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -261,25 +260,25 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
indio_dev->info = &ad5624r_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
ret = ad5624r_spi_write(spi, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
!!voltage_uv, 16);
if (ret)
- goto error_free_dev;
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
return 0;
-error_free_dev:
- iio_free_device(indio_dev);
error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
error_put_reg:
- if (!IS_ERR(reg))
- regulator_put(reg);
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
@@ -288,13 +287,13 @@ static int __devexit ad5624r_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5624r_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
}
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index fd67cfa5edb..48389e1c19f 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -7,9 +7,9 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/fs.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
@@ -25,10 +25,7 @@
#define AD5686_ADDR(x) ((x) << 16)
#define AD5686_CMD(x) ((x) << 20)
-#define AD5686_ADDR_DAC0 0x1
-#define AD5686_ADDR_DAC1 0x2
-#define AD5686_ADDR_DAC2 0x4
-#define AD5686_ADDR_DAC3 0x8
+#define AD5686_ADDR_DAC(chan) (0x1 << (chan))
#define AD5686_ADDR_ALL_DAC 0xF
#define AD5686_CMD_NOOP 0x0
@@ -96,63 +93,35 @@ enum ad5686_supported_device_ids {
ID_AD5685,
ID_AD5686,
};
-
+#define AD5868_CHANNEL(chan, bits, shift) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .address = AD5686_ADDR_DAC(chan), \
+ .scan_type = IIO_ST('u', bits, 16, shift) \
+}
static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
[ID_AD5684] = {
- .channel[0] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC0,
- 0, IIO_ST('u', 12, 16, 4), 0),
- .channel[1] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC1,
- 1, IIO_ST('u', 12, 16, 4), 0),
- .channel[2] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC2,
- 2, IIO_ST('u', 12, 16, 4), 0),
- .channel[3] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC3,
- 3, IIO_ST('u', 12, 16, 4), 0),
+ .channel[0] = AD5868_CHANNEL(0, 12, 4),
+ .channel[1] = AD5868_CHANNEL(1, 12, 4),
+ .channel[2] = AD5868_CHANNEL(2, 12, 4),
+ .channel[3] = AD5868_CHANNEL(3, 12, 4),
.int_vref_mv = 2500,
},
[ID_AD5685] = {
- .channel[0] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC0,
- 0, IIO_ST('u', 14, 16, 2), 0),
- .channel[1] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC1,
- 1, IIO_ST('u', 14, 16, 2), 0),
- .channel[2] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC2,
- 2, IIO_ST('u', 14, 16, 2), 0),
- .channel[3] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC3,
- 3, IIO_ST('u', 14, 16, 2), 0),
+ .channel[0] = AD5868_CHANNEL(0, 14, 2),
+ .channel[1] = AD5868_CHANNEL(1, 14, 2),
+ .channel[2] = AD5868_CHANNEL(2, 14, 2),
+ .channel[3] = AD5868_CHANNEL(3, 14, 2),
.int_vref_mv = 2500,
},
[ID_AD5686] = {
- .channel[0] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC0,
- 0, IIO_ST('u', 16, 16, 0), 0),
- .channel[1] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC1,
- 1, IIO_ST('u', 16, 16, 0), 0),
- .channel[2] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC2,
- 2, IIO_ST('u', 16, 16, 0), 0),
- .channel[3] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- AD5686_ADDR_DAC3,
- 3, IIO_ST('u', 16, 16, 0), 0),
+ .channel[0] = AD5868_CHANNEL(0, 16, 0),
+ .channel[1] = AD5868_CHANNEL(1, 16, 0),
+ .channel[2] = AD5868_CHANNEL(2, 16, 0),
+ .channel[3] = AD5868_CHANNEL(3, 16, 0),
.int_vref_mv = 2500,
},
};
@@ -274,11 +243,12 @@ static ssize_t ad5686_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static IIO_CONST_ATTR(out_powerdown_mode_available,
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"1kohm_to_gnd 100kohm_to_gnd three_state");
-#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_num) \
- IIO_DEVICE_ATTR(out##_num##_powerdown_mode, S_IRUGO | S_IWUSR, \
+#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_num) \
+ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown_mode, \
+ S_IRUGO | S_IWUSR, \
ad5686_read_powerdown_mode, \
ad5686_write_powerdown_mode, _num)
@@ -287,8 +257,9 @@ static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
-#define IIO_DEV_ATTR_DAC_POWERDOWN(_num) \
- IIO_DEVICE_ATTR(out##_num##_powerdown, S_IRUGO | S_IWUSR, \
+#define IIO_DEV_ATTR_DAC_POWERDOWN(_num) \
+ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \
+ S_IRUGO | S_IWUSR, \
ad5686_read_dac_powerdown, \
ad5686_write_dac_powerdown, _num)
@@ -298,15 +269,15 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(2);
static IIO_DEV_ATTR_DAC_POWERDOWN(3);
static struct attribute *ad5686_attributes[] = {
- &iio_dev_attr_out0_powerdown.dev_attr.attr,
- &iio_dev_attr_out1_powerdown.dev_attr.attr,
- &iio_dev_attr_out2_powerdown.dev_attr.attr,
- &iio_dev_attr_out3_powerdown.dev_attr.attr,
- &iio_dev_attr_out0_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out1_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out2_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out3_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
@@ -356,7 +327,7 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case 0:
- if (val > (1 << chan->scan_type.realbits))
+ if (val > (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
mutex_lock(&indio_dev->mlock);
@@ -420,16 +391,16 @@ static int __devinit ad5686_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channel;
indio_dev->num_channels = AD5686_DAC_CHANNELS;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
regdone = 1;
ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0,
!!voltage_uv, 0);
if (ret)
goto error_disable_reg;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
return 0;
error_disable_reg:
@@ -439,10 +410,7 @@ error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
return ret;
}
@@ -451,14 +419,13 @@ static int __devexit ad5686_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5686_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
-
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
- }
iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5791.c b/drivers/staging/iio/dac/ad5791.c
index 64770d2a1b4..6fbca8d9615 100644
--- a/drivers/staging/iio/dac/ad5791.c
+++ b/drivers/staging/iio/dac/ad5791.c
@@ -7,7 +7,6 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -15,6 +14,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -71,48 +71,23 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
return ret;
}
-static ssize_t ad5791_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long readin;
- int ret;
-
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
-
- readin += (1 << (st->chip_info->bits - 1));
- readin &= AD5791_RES_MASK(st->chip_info->bits);
- readin <<= st->chip_info->left_shift;
-
- ret = ad5791_spi_write(st->spi, this_attr->address, readin);
- return ret ? ret : len;
+#define AD5791_CHAN(bits, shift) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .address = AD5791_ADDR_DAC0, \
+ .channel = 0, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED) | \
+ (1 << IIO_CHAN_INFO_OFFSET_SHARED), \
+ .scan_type = IIO_ST('u', bits, 24, shift) \
}
-static ssize_t ad5791_read_dac(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- int val;
-
- ret = ad5791_spi_read(st->spi, this_attr->address, &val);
- if (ret)
- return ret;
-
- val &= AD5791_DAC_MASK;
- val >>= st->chip_info->left_shift;
- val -= (1 << (st->chip_info->bits - 1));
-
- return sprintf(buf, "%d\n", val);
-}
+static const struct iio_chan_spec ad5791_channels[] = {
+ [ID_AD5760] = AD5791_CHAN(16, 4),
+ [ID_AD5780] = AD5791_CHAN(18, 2),
+ [ID_AD5781] = AD5791_CHAN(18, 2),
+ [ID_AD5791] = AD5791_CHAN(20, 0)
+};
static ssize_t ad5791_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -183,58 +158,24 @@ static ssize_t ad5791_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5791_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5791_show_scale, NULL, 0);
-
-static ssize_t ad5791_show_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
-}
-static IIO_DEVICE_ATTR(name, S_IRUGO, ad5791_show_name, NULL, 0);
-
-#define IIO_DEV_ATTR_OUT_RW_RAW(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out##_num##_raw, \
- S_IRUGO | S_IWUSR, _show, _store, _addr)
-
-static IIO_DEV_ATTR_OUT_RW_RAW(0, ad5791_read_dac,
- ad5791_write_dac, AD5791_ADDR_DAC0);
-
-static IIO_DEVICE_ATTR(out_powerdown_mode, S_IRUGO |
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5791_read_powerdown_mode,
ad5791_write_powerdown_mode, 0);
-static IIO_CONST_ATTR(out_powerdown_mode_available,
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"6kohm_to_gnd three_state");
#define IIO_DEV_ATTR_DAC_POWERDOWN(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out##_num##_powerdown, \
+ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \
S_IRUGO | S_IWUSR, _show, _store, _addr)
static IIO_DEV_ATTR_DAC_POWERDOWN(0, ad5791_read_dac_powerdown,
ad5791_write_dac_powerdown, 0);
static struct attribute *ad5791_attributes[] = {
- &iio_dev_attr_out0_raw.dev_attr.attr,
- &iio_dev_attr_out0_powerdown.dev_attr.attr,
- &iio_dev_attr_out_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_scale.dev_attr.attr,
- &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
@@ -263,31 +204,78 @@ static int ad5780_get_lin_comp(unsigned int span)
else
return AD5780_LINCOMP_10_20;
}
-
static const struct ad5791_chip_info ad5791_chip_info_tbl[] = {
[ID_AD5760] = {
- .bits = 16,
- .left_shift = 4,
.get_lin_comp = ad5780_get_lin_comp,
},
[ID_AD5780] = {
- .bits = 18,
- .left_shift = 2,
.get_lin_comp = ad5780_get_lin_comp,
},
[ID_AD5781] = {
- .bits = 18,
- .left_shift = 2,
.get_lin_comp = ad5791_get_lin_comp,
},
[ID_AD5791] = {
- .bits = 20,
- .left_shift = 0,
.get_lin_comp = ad5791_get_lin_comp,
},
};
+static int ad5791_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+ u64 val64;
+ int ret;
+
+ switch (m) {
+ case 0:
+ ret = ad5791_spi_read(st->spi, chan->address, val);
+ if (ret)
+ return ret;
+ *val &= AD5791_DAC_MASK;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ *val = 0;
+ *val2 = (((u64)st->vref_mv) * 1000000ULL) >> chan->scan_type.realbits;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case (1 << IIO_CHAN_INFO_OFFSET_SHARED):
+ val64 = (((u64)st->vref_neg_mv) << chan->scan_type.realbits);
+ do_div(val64, st->vref_mv);
+ *val = -val64;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+};
+
+
+static int ad5791_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case 0:
+ val &= AD5791_RES_MASK(chan->scan_type.realbits);
+ val <<= chan->scan_type.shift;
+
+ return ad5791_spi_write(st->spi, chan->address, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info ad5791_info = {
+ .read_raw = &ad5791_read_raw,
+ .write_raw = &ad5791_write_raw,
.attrs = &ad5791_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -296,50 +284,52 @@ static int __devinit ad5791_probe(struct spi_device *spi)
{
struct ad5791_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
- struct regulator *reg_vdd, *reg_vss;
struct ad5791_state *st;
int ret, pos_voltage_uv = 0, neg_voltage_uv = 0;
- reg_vdd = regulator_get(&spi->dev, "vdd");
- if (!IS_ERR(reg_vdd)) {
- ret = regulator_enable(reg_vdd);
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ st = iio_priv(indio_dev);
+ st->reg_vdd = regulator_get(&spi->dev, "vdd");
+ if (!IS_ERR(st->reg_vdd)) {
+ ret = regulator_enable(st->reg_vdd);
if (ret)
goto error_put_reg_pos;
- pos_voltage_uv = regulator_get_voltage(reg_vdd);
+ pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
}
- reg_vss = regulator_get(&spi->dev, "vss");
- if (!IS_ERR(reg_vss)) {
- ret = regulator_enable(reg_vss);
+ st->reg_vss = regulator_get(&spi->dev, "vss");
+ if (!IS_ERR(st->reg_vss)) {
+ ret = regulator_enable(st->reg_vss);
if (ret)
goto error_put_reg_neg;
- neg_voltage_uv = regulator_get_voltage(reg_vss);
+ neg_voltage_uv = regulator_get_voltage(st->reg_vss);
}
- indio_dev = iio_allocate_device(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg_neg;
- }
- st = iio_priv(indio_dev);
st->pwr_down = true;
st->spi = spi;
- if (!IS_ERR(reg_vss) && !IS_ERR(reg_vdd))
- st->vref_mv = (pos_voltage_uv - neg_voltage_uv) / 1000;
- else if (pdata)
- st->vref_mv = pdata->vref_pos_mv - pdata->vref_neg_mv;
- else
+ if (!IS_ERR(st->reg_vss) && !IS_ERR(st->reg_vdd)) {
+ st->vref_mv = (pos_voltage_uv + neg_voltage_uv) / 1000;
+ st->vref_neg_mv = neg_voltage_uv / 1000;
+ } else if (pdata) {
+ st->vref_mv = pdata->vref_pos_mv + pdata->vref_neg_mv;
+ st->vref_neg_mv = pdata->vref_neg_mv;
+ } else {
dev_warn(&spi->dev, "reference voltage unspecified\n");
+ }
ret = ad5791_spi_write(spi, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
if (ret)
- goto error_free_dev;
+ goto error_disable_reg_neg;
- st->chip_info =
- &ad5791_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+ st->chip_info = &ad5791_chip_info_tbl[spi_get_device_id(spi)
+ ->driver_data];
st->ctrl = AD5761_CTRL_LINCOMP(st->chip_info->get_lin_comp(st->vref_mv))
@@ -349,39 +339,37 @@ static int __devinit ad5791_probe(struct spi_device *spi)
ret = ad5791_spi_write(spi, AD5791_ADDR_CTRL, st->ctrl |
AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI);
if (ret)
- goto error_free_dev;
-
- st->reg_vdd = reg_vdd;
- st->reg_vss = reg_vss;
+ goto error_disable_reg_neg;
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &ad5791_info;
indio_dev->modes = INDIO_DIRECT_MODE;
-
+ indio_dev->channels
+ = &ad5791_channels[spi_get_device_id(spi)->driver_data];
+ indio_dev->num_channels = 1;
+ indio_dev->name = spi_get_device_id(st->spi)->name;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ goto error_disable_reg_neg;
return 0;
-error_free_dev:
- iio_free_device(indio_dev);
-
error_disable_reg_neg:
- if (!IS_ERR(reg_vss))
- regulator_disable(reg_vss);
+ if (!IS_ERR(st->reg_vss))
+ regulator_disable(st->reg_vss);
error_put_reg_neg:
- if (!IS_ERR(reg_vss))
- regulator_put(reg_vss);
+ if (!IS_ERR(st->reg_vss))
+ regulator_put(st->reg_vss);
- if (!IS_ERR(reg_vdd))
- regulator_disable(reg_vdd);
+ if (!IS_ERR(st->reg_vdd))
+ regulator_disable(st->reg_vdd);
error_put_reg_pos:
- if (!IS_ERR(reg_vdd))
- regulator_put(reg_vdd);
-
+ if (!IS_ERR(st->reg_vdd))
+ regulator_put(st->reg_vdd);
+ iio_free_device(indio_dev);
error_ret:
+
return ret;
}
@@ -389,20 +377,18 @@ static int __devexit ad5791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5791_state *st = iio_priv(indio_dev);
- struct regulator *reg_vdd = st->reg_vdd;
- struct regulator *reg_vss = st->reg_vss;
iio_device_unregister(indio_dev);
-
if (!IS_ERR(st->reg_vdd)) {
- regulator_disable(reg_vdd);
- regulator_put(reg_vdd);
+ regulator_disable(st->reg_vdd);
+ regulator_put(st->reg_vdd);
}
if (!IS_ERR(st->reg_vss)) {
- regulator_disable(reg_vss);
- regulator_put(reg_vss);
+ regulator_disable(st->reg_vss);
+ regulator_put(st->reg_vss);
}
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5791.h b/drivers/staging/iio/dac/ad5791.h
index c807f26539d..fd7edbdb4ec 100644
--- a/drivers/staging/iio/dac/ad5791.h
+++ b/drivers/staging/iio/dac/ad5791.h
@@ -68,14 +68,10 @@ struct ad5791_platform_data {
/**
* struct ad5791_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
- * @left_shift: number of bits the datum must be shifted
* @get_lin_comp: function pointer to the device specific function
*/
struct ad5791_chip_info {
- u8 bits;
- u8 left_shift;
int (*get_lin_comp) (unsigned int span);
};
@@ -86,6 +82,7 @@ struct ad5791_chip_info {
* @reg_vss: negative supply regulator
* @chip_info: chip model specific constants
* @vref_mv: actual reference voltage used
+ * @vref_neg_mv: voltage of the negative supply
* @pwr_down_mode current power down mode
*/
@@ -95,6 +92,7 @@ struct ad5791_state {
struct regulator *reg_vss;
const struct ad5791_chip_info *chip_info;
unsigned short vref_mv;
+ unsigned int vref_neg_mv;
unsigned ctrl;
unsigned pwr_down_mode;
bool pwr_down;
diff --git a/drivers/staging/iio/dac/dac.h b/drivers/staging/iio/dac/dac.h
index 1d82f353241..0754d715cf9 100644
--- a/drivers/staging/iio/dac/dac.h
+++ b/drivers/staging/iio/dac/dac.h
@@ -3,4 +3,4 @@
*/
#define IIO_DEV_ATTR_OUT_RAW(_num, _store, _addr) \
- IIO_DEVICE_ATTR(out##_num##_raw, S_IWUSR, NULL, _store, _addr)
+ IIO_DEVICE_ATTR(out_voltage##_num##_raw, S_IWUSR, NULL, _store, _addr)
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index ed5d351b238..adfbd20f898 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -26,6 +26,7 @@
#include <linux/err.h>
#include "../iio.h"
+#include "../sysfs.h"
#include "dac.h"
#include "max517.h"
@@ -58,8 +59,8 @@ static ssize_t max517_set_value(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, int channel)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max517_data *data = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct max517_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
u8 outbuf[4]; /* 1x or 2x command + value */
int outbuf_size = 0;
@@ -119,15 +120,16 @@ static ssize_t max517_set_value_both(struct device *dev,
{
return max517_set_value(dev, attr, buf, count, 3);
}
-static IIO_DEVICE_ATTR_NAMED(out1and2_raw, out1&2_raw, S_IWUSR, NULL,
- max517_set_value_both, -1);
+static IIO_DEVICE_ATTR_NAMED(out_voltage1and2_raw,
+ out_voltage1&2_raw, S_IWUSR, NULL,
+ max517_set_value_both, -1);
static ssize_t max517_show_scale(struct device *dev,
struct device_attribute *attr,
char *buf, int channel)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max517_data *data = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct max517_data *data = iio_priv(indio_dev);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (data->vref_mv[channel - 1] * 1000) >> 8;
@@ -140,7 +142,8 @@ static ssize_t max517_show_scale1(struct device *dev,
{
return max517_show_scale(dev, attr, buf, 1);
}
-static IIO_DEVICE_ATTR(out1_scale, S_IRUGO, max517_show_scale1, NULL, 0);
+static IIO_DEVICE_ATTR(out_voltage1_scale, S_IRUGO,
+ max517_show_scale1, NULL, 0);
static ssize_t max517_show_scale2(struct device *dev,
struct device_attribute *attr,
@@ -148,12 +151,13 @@ static ssize_t max517_show_scale2(struct device *dev,
{
return max517_show_scale(dev, attr, buf, 2);
}
-static IIO_DEVICE_ATTR(out2_scale, S_IRUGO, max517_show_scale2, NULL, 0);
+static IIO_DEVICE_ATTR(out_voltage2_scale, S_IRUGO,
+ max517_show_scale2, NULL, 0);
/* On MAX517 variant, we have one output */
static struct attribute *max517_attributes[] = {
- &iio_dev_attr_out1_raw.dev_attr.attr,
- &iio_dev_attr_out1_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_scale.dev_attr.attr,
NULL
};
@@ -163,11 +167,11 @@ static struct attribute_group max517_attribute_group = {
/* On MAX518 and MAX519 variant, we have two outputs */
static struct attribute *max518_attributes[] = {
- &iio_dev_attr_out1_raw.dev_attr.attr,
- &iio_dev_attr_out1_scale.dev_attr.attr,
- &iio_dev_attr_out2_raw.dev_attr.attr,
- &iio_dev_attr_out2_scale.dev_attr.attr,
- &iio_dev_attr_out1and2_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage1_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
+ &iio_dev_attr_out_voltage2_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage1and2_raw.dev_attr.attr,
NULL
};
diff --git a/drivers/staging/iio/dds/Kconfig b/drivers/staging/iio/dds/Kconfig
index e07431d8009..93b7141b2c1 100644
--- a/drivers/staging/iio/dds/Kconfig
+++ b/drivers/staging/iio/dds/Kconfig
@@ -1,7 +1,7 @@
#
# Direct Digital Synthesis drivers
#
-comment "Direct Digital Synthesis"
+menu "Direct Digital Synthesis"
config AD5930
tristate "Analog Devices ad5930/5932 driver"
@@ -57,3 +57,5 @@ config AD9951
help
Say yes here to build support for Analog Devices DDS chip
ad9951, provides direct access via sysfs.
+
+endmenu
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
index 0b2aa4cafdd..f5e368b5665 100644
--- a/drivers/staging/iio/dds/ad5930.c
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -129,6 +130,7 @@ error_ret:
static int __devexit ad5930_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
index e3e61a469bb..9b4ff606535 100644
--- a/drivers/staging/iio/dds/ad9832.c
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -13,6 +13,7 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <asm/div64.h>
#include "../iio.h"
@@ -52,7 +53,7 @@ static int ad9832_write_frequency(struct ad9832_state *st,
((addr - 3) << ADD_SHIFT) |
((regval >> 0) & 0xFF));
- return spi_sync(st->spi, &st->freq_msg);;
+ return spi_sync(st->spi, &st->freq_msg);
}
static int ad9832_write_phase(struct ad9832_state *st,
@@ -76,8 +77,8 @@ static ssize_t ad9832_write(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9832_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad9832_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
long val;
@@ -86,7 +87,7 @@ static ssize_t ad9832_write(struct device *dev,
if (ret)
goto error_ret;
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
switch (this_attr->address) {
case AD9832_FREQ0HM:
case AD9832_FREQ1HM:
@@ -147,7 +148,7 @@ static ssize_t ad9832_write(struct device *dev,
default:
ret = -ENODEV;
}
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
error_ret:
return ret ? ret : len;
@@ -327,13 +328,14 @@ static int __devexit ad9832_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad9832_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
}
+ iio_free_device(indio_dev);
+
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index e6454d58fe4..c468f696fe2 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -16,6 +16,7 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <asm/div64.h>
#include "../iio.h"
@@ -65,8 +66,8 @@ static ssize_t ad9834_write(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
long val;
@@ -75,7 +76,7 @@ static ssize_t ad9834_write(struct device *dev,
if (ret)
goto error_ret;
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
switch (this_attr->address) {
case AD9834_REG_FREQ0:
case AD9834_REG_FREQ1:
@@ -133,7 +134,7 @@ static ssize_t ad9834_write(struct device *dev,
default:
ret = -ENODEV;
}
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
error_ret:
return ret ? ret : len;
@@ -144,13 +145,13 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret = 0;
bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837);
- mutex_lock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
switch (this_attr->address) {
case 0:
@@ -193,7 +194,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
ret = spi_sync(st->spi, &st->msg);
}
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
@@ -202,8 +203,8 @@ static ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
char *str;
if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837))
@@ -224,8 +225,8 @@ static ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
char *str;
if (st->control & AD9834_MODE)
@@ -284,8 +285,8 @@ static mode_t ad9834_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(dev_info);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
mode_t mode = attr->mode;
@@ -416,13 +417,13 @@ static int __devexit ad9834_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad9834_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
}
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
index d7c9d05f635..a14771b24c5 100644
--- a/drivers/staging/iio/dds/ad9850.c
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -115,6 +116,7 @@ error_ret:
static int __devexit ad9850_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
index 0184585425d..cfceaa64a53 100644
--- a/drivers/staging/iio/dds/ad9852.c
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -217,7 +218,6 @@ static struct attribute *ad9852_attributes[] = {
};
static const struct attribute_group ad9852_attribute_group = {
- .name = DRV_NAME,
.attrs = ad9852_attributes,
};
@@ -267,6 +267,7 @@ error_ret:
static int __devexit ad9852_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
index 0fa217f7b90..da83d2b88c4 100644
--- a/drivers/staging/iio/dds/ad9910.c
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -352,7 +353,6 @@ static struct attribute *ad9910_attributes[] = {
};
static const struct attribute_group ad9910_attribute_group = {
- .name = DRV_NAME,
.attrs = ad9910_attributes,
};
@@ -400,6 +400,7 @@ error_ret:
static int __devexit ad9910_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
index d361d1f125d..20c182576de 100644
--- a/drivers/staging/iio/dds/ad9951.c
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -161,7 +162,6 @@ static struct attribute *ad9951_attributes[] = {
};
static const struct attribute_group ad9951_attribute_group = {
- .name = DRV_NAME,
.attrs = ad9951_attributes,
};
@@ -211,6 +211,7 @@ error_ret:
static int __devexit ad9951_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index ae2e7d3095a..22aea5b4e61 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -1,7 +1,7 @@
#
# IIO Digital Gyroscope Sensor drivers configuration
#
-comment "Digital gyroscope sensors"
+menu "Digital gyroscope sensors"
config ADIS16060
tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
@@ -27,8 +27,8 @@ config ADIS16130
config ADIS16260
tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices ADIS16260 ADIS16265
ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
@@ -45,3 +45,5 @@ config ADXRS450
This driver can also be built as a module. If so, the module
will be called adxrs450.
+
+endmenu
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index 2212240f7de..9ba5ec15170 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -12,7 +12,7 @@ adis16130-y := adis16130_core.o
obj-$(CONFIG_ADIS16130) += adis16130.o
adis16260-y := adis16260_core.o
-adis16260-$(CONFIG_IIO_RING_BUFFER) += adis16260_ring.o adis16260_trigger.o
+adis16260-$(CONFIG_IIO_BUFFER) += adis16260_ring.o adis16260_trigger.o
obj-$(CONFIG_ADIS16260) += adis16260.o
adis16251-y := adis16251_core.o
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index afa52d1961a..38cf3f4bf72 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -13,12 +13,10 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "gyro.h"
-#include "../adc/adc.h"
#define ADIS16060_GYRO 0x20 /* Measure Angular Rate (Gyro) */
#define ADIS16060_TEMP_OUT 0x10 /* Measure Temperature */
@@ -42,11 +40,9 @@ struct adis16060_state {
static struct iio_dev *adis16060_iio_dev;
-static int adis16060_spi_write(struct device *dev,
- u8 val)
+static int adis16060_spi_write(struct iio_dev *indio_dev, u8 val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16060_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -57,11 +53,9 @@ static int adis16060_spi_write(struct device *dev,
return ret;
}
-static int adis16060_spi_read(struct device *dev,
- u16 *val)
+static int adis16060_spi_read(struct iio_dev *indio_dev, u16 *val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16060_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -82,63 +76,74 @@ static int adis16060_spi_read(struct device *dev,
return ret;
}
-static ssize_t adis16060_read(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int adis16060_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- u16 val = 0;
- ssize_t ret;
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
+ u16 tval = 0;
+ int ret;
- ret = adis16060_spi_write(dev, this_attr->address);
- if (ret < 0)
- goto error_ret;
- ret = adis16060_spi_read(dev, &val);
-error_ret:
- mutex_unlock(&indio_dev->mlock);
+ switch (mask) {
+ case 0:
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16060_spi_write(indio_dev, chan->address);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = adis16060_spi_read(indio_dev, &tval);
+ mutex_unlock(&indio_dev->mlock);
+ *val = tval;
+ return IIO_VAL_INT;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ *val = -7;
+ *val2 = 461117;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ *val = 0;
+ *val2 = 34000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
- if (ret == 0)
- return sprintf(buf, "%d\n", val);
- else
- return ret;
+ return -EINVAL;
}
-static IIO_DEV_ATTR_GYRO_Z(adis16060_read, ADIS16060_GYRO);
-static IIO_DEVICE_ATTR(temp_raw, S_IRUGO, adis16060_read, NULL,
- ADIS16060_TEMP_OUT);
-static IIO_CONST_ATTR_TEMP_SCALE("34"); /* Milli degrees C */
-static IIO_CONST_ATTR_TEMP_OFFSET("-7461.117"); /* Milli degrees C */
-static IIO_DEV_ATTR_IN_RAW(0, adis16060_read, ADIS16060_AIN1);
-static IIO_DEV_ATTR_IN_RAW(1, adis16060_read, ADIS16060_AIN2);
-static IIO_CONST_ATTR(name, "adis16060");
-
-static struct attribute *adis16060_attributes[] = {
- &iio_dev_attr_gyro_z_raw.dev_attr.attr,
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_dev_attr_in0_raw.dev_attr.attr,
- &iio_dev_attr_in1_raw.dev_attr.attr,
- &iio_const_attr_name.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16060_attribute_group = {
- .attrs = adis16060_attributes,
-};
-
static const struct iio_info adis16060_info = {
- .attrs = &adis16060_attribute_group,
+ .read_raw = &adis16060_read_raw,
.driver_module = THIS_MODULE,
};
+static const struct iio_chan_spec adis16060_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .address = ADIS16060_GYRO,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .address = ADIS16060_AIN1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .address = ADIS16060_AIN2,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = ADIS16060_TEMP_OUT,
+ }
+};
+
static int __devinit adis16060_r_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16060_state *st;
struct iio_dev *indio_dev;
@@ -154,23 +159,22 @@ static int __devinit adis16060_r_probe(struct spi_device *spi)
st->us_r = spi;
mutex_init(&st->buf_lock);
+ indio_dev->name = spi->dev.driver->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16060_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adis16060_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16060_channels);
ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
- regdone = 1;
adis16060_iio_dev = indio_dev;
return 0;
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -179,6 +183,7 @@ error_ret:
static int adis16060_r_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index ad2db4d723d..5d7a906fec7 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -5,7 +5,6 @@
*
* Licensed under the GPL-2 or later.
*/
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -13,11 +12,10 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "gyro.h"
-#include "../adc/adc.h"
#define ADIS16080_DIN_GYRO (0 << 10) /* Gyroscope output */
#define ADIS16080_DIN_TEMP (1 << 10) /* Temperature output */
@@ -44,11 +42,10 @@ struct adis16080_state {
u8 buf[2] ____cacheline_aligned;
};
-static int adis16080_spi_write(struct device *dev,
+static int adis16080_spi_write(struct iio_dev *indio_dev,
u16 val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16080_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -61,11 +58,10 @@ static int adis16080_spi_write(struct device *dev,
return ret;
}
-static int adis16080_spi_read(struct device *dev,
- u16 *val)
+static int adis16080_spi_read(struct iio_dev *indio_dev,
+ u16 *val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16080_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -79,56 +75,68 @@ static int adis16080_spi_read(struct device *dev,
return ret;
}
-static ssize_t adis16080_read(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int adis16080_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- u16 val = 0;
- ssize_t ret;
-
+ int ret = -EINVAL;
+ u16 ut;
/* Take the iio_dev status lock */
+
mutex_lock(&indio_dev->mlock);
- ret = adis16080_spi_write(dev,
- this_attr->address | ADIS16080_DIN_WRITE);
- if (ret < 0)
- goto error_ret;
- ret = adis16080_spi_read(dev, &val);
-error_ret:
+ switch (mask) {
+ case 0:
+ ret = adis16080_spi_write(indio_dev,
+ chan->address |
+ ADIS16080_DIN_WRITE);
+ if (ret < 0)
+ break;
+ ret = adis16080_spi_read(indio_dev, &ut);
+ if (ret < 0)
+ break;
+ *val = ut;
+ ret = IIO_VAL_INT;
+ break;
+ }
mutex_unlock(&indio_dev->mlock);
- if (ret == 0)
- return sprintf(buf, "%d\n", val);
- else
- return ret;
+ return ret;
}
-static IIO_DEV_ATTR_GYRO_Z(adis16080_read, ADIS16080_DIN_GYRO);
-static IIO_DEVICE_ATTR(temp_raw, S_IRUGO, adis16080_read, NULL,
- ADIS16080_DIN_TEMP);
-static IIO_DEV_ATTR_IN_RAW(0, adis16080_read, ADIS16080_DIN_AIN1);
-static IIO_DEV_ATTR_IN_RAW(1, adis16080_read, ADIS16080_DIN_AIN2);
-
-static struct attribute *adis16080_attributes[] = {
- &iio_dev_attr_gyro_z_raw.dev_attr.attr,
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_dev_attr_in0_raw.dev_attr.attr,
- &iio_dev_attr_in1_raw.dev_attr.attr,
- NULL
-};
-static const struct attribute_group adis16080_attribute_group = {
- .attrs = adis16080_attributes,
+static const struct iio_chan_spec adis16080_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .address = ADIS16080_DIN_GYRO,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .address = ADIS16080_DIN_AIN1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .address = ADIS16080_DIN_AIN2,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .address = ADIS16080_DIN_TEMP,
+ }
};
static const struct iio_info adis16080_info = {
- .attrs = &adis16080_attribute_group,
+ .read_raw = &adis16080_read_raw,
.driver_module = THIS_MODULE,
};
static int __devinit adis16080_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16080_state *st;
struct iio_dev *indio_dev;
@@ -147,6 +155,8 @@ static int __devinit adis16080_probe(struct spi_device *spi)
mutex_init(&st->buf_lock);
indio_dev->name = spi->dev.driver->name;
+ indio_dev->channels = adis16080_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16080_channels);
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16080_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -154,15 +164,10 @@ static int __devinit adis16080_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
- regdone = 1;
-
return 0;
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -171,6 +176,7 @@ error_ret:
static int adis16080_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
index c80e908d8ac..749240d0d8c 100644
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -14,11 +14,10 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "gyro.h"
-#include "../adc/adc.h"
#define ADIS16130_CON 0x0
#define ADIS16130_CON_RD (1 << 6)
@@ -41,140 +40,77 @@
/**
* struct adis16130_state - device instance specific data
* @us: actual spi_device to write data
- * @mode: 24 bits (1) or 16 bits (0)
* @buf_lock: mutex to protect tx and rx
* @buf: unified tx/rx buffer
**/
struct adis16130_state {
struct spi_device *us;
- u32 mode;
struct mutex buf_lock;
u8 buf[4] ____cacheline_aligned;
};
-static int adis16130_spi_write(struct device *dev, u8 reg_addr,
- u8 val)
+static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16130_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->buf[0] = reg_addr;
- st->buf[1] = val;
-
- ret = spi_write(st->us, st->buf, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16130_spi_read(struct device *dev, u8 reg_addr,
- u32 *val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adis16130_state *st = iio_priv(indio_dev);
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .tx_buf = st->buf,
+ .rx_buf = st->buf,
+ .len = 4,
+ };
mutex_lock(&st->buf_lock);
st->buf[0] = ADIS16130_CON_RD | reg_addr;
- if (st->mode)
- ret = spi_read(st->us, st->buf, 4);
- else
- ret = spi_read(st->us, st->buf, 3);
-
- if (ret == 0) {
- if (st->mode)
- *val = (st->buf[1] << 16) |
- (st->buf[2] << 8) |
- st->buf[3];
- else
- *val = (st->buf[1] << 8) | st->buf[2];
- }
+ st->buf[1] = st->buf[2] = st->buf[3] = 0;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->us, &msg);
+ ret = spi_read(st->us, st->buf, 4);
+
+ if (ret == 0)
+ *val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
mutex_unlock(&st->buf_lock);
return ret;
}
-static ssize_t adis16130_val_read(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int adis16130_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- u32 val;
- ssize_t ret;
+ int ret;
+ u32 temp;
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- ret = adis16130_spi_read(dev, this_attr->address, &val);
+ ret = adis16130_spi_read(indio_dev, chan->address, &temp);
mutex_unlock(&indio_dev->mlock);
-
- if (ret == 0)
- return sprintf(buf, "%d\n", val);
- else
+ if (ret)
return ret;
+ *val = temp;
+ return IIO_VAL_INT;
}
-static ssize_t adis16130_bitsmode_read(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16130_state *st = iio_priv(indio_dev);
-
- if (st->mode == 1)
- return sprintf(buf, "s24\n");
- else
- return sprintf(buf, "s16\n");
-}
-
-static ssize_t adis16130_bitsmode_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- u8 val;
-
- if (sysfs_streq(buf, "s16"))
- val = 0;
- else if (sysfs_streq(buf, "s24"))
- val = 1;
- else
- return -EINVAL;
-
- ret = adis16130_spi_write(dev, ADIS16130_MODE, val);
-
- return ret ? ret : len;
-}
-static IIO_DEVICE_ATTR(temp_raw, S_IRUGO, adis16130_val_read, NULL,
- ADIS16130_TEMPDATA);
-
-static IIO_DEV_ATTR_GYRO_Z(adis16130_val_read, ADIS16130_RATEDATA);
-
-static IIO_DEVICE_ATTR(gyro_z_type, S_IWUSR | S_IRUGO, adis16130_bitsmode_read,
- adis16130_bitsmode_write,
- ADIS16130_MODE);
-
-static IIO_CONST_ATTR(gyro_z_type_available, "s16 s24");
-
-static struct attribute *adis16130_attributes[] = {
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_dev_attr_gyro_z_raw.dev_attr.attr,
- &iio_dev_attr_gyro_z_type.dev_attr.attr,
- &iio_const_attr_gyro_z_type_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16130_attribute_group = {
- .attrs = adis16130_attributes,
+static const struct iio_chan_spec adis16130_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .address = ADIS16130_RATEDATA,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .address = ADIS16130_TEMPDATA,
+ }
};
static const struct iio_info adis16130_info = {
- .attrs = &adis16130_attribute_group,
+ .read_raw = &adis16130_read_raw,
.driver_module = THIS_MODULE,
};
@@ -196,10 +132,11 @@ static int __devinit adis16130_probe(struct spi_device *spi)
st->us = spi;
mutex_init(&st->buf_lock);
indio_dev->name = spi->dev.driver->name;
+ indio_dev->channels = adis16130_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16130_channels);
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16130_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- st->mode = 1;
ret = iio_device_register(indio_dev);
if (ret)
@@ -218,6 +155,7 @@ error_ret:
static int adis16130_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 969b624be6d..4c4b25129c6 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -112,7 +112,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16260_SCAN_TEMP 3
#define ADIS16260_SCAN_ANGL 4
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16260_remove_trigger(struct iio_dev *indio_dev);
int adis16260_probe_trigger(struct iio_dev *indio_dev);
@@ -124,7 +124,7 @@ ssize_t adis16260_read_data_from_ring(struct device *dev,
int adis16260_configure_ring(struct iio_dev *indio_dev);
void adis16260_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16260_remove_trigger(struct iio_dev *indio_dev)
{
@@ -152,5 +152,5 @@ static inline void adis16260_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16260_H_ */
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index f2d43cfcc49..aaa3967f8c0 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,12 +16,11 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "../adc/adc.h"
-#include "gyro.h"
+#include "../buffer_generic.h"
#include "adis16260.h"
@@ -391,7 +389,7 @@ enum adis16260_channel {
};
#define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \
struct iio_chan_spec adis16260_channels_##axis[] = { \
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, mod, \
+ IIO_CHAN(IIO_ANGL_VEL, 1, 0, 0, NULL, 0, mod, \
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) | \
(1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
(1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
@@ -406,11 +404,11 @@ enum adis16260_channel {
(1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
temp, ADIS16260_SCAN_TEMP, \
IIO_ST('u', 12, 16, 0), 0), \
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0, \
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, \
(1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
in_supply, ADIS16260_SCAN_SUPPLY, \
IIO_ST('u', 12, 16, 0), 0), \
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0, \
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, \
(1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
in_aux, ADIS16260_SCAN_AUX_ADC, \
IIO_ST('u', 12, 16, 0), 0), \
@@ -469,14 +467,14 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
switch (chan->type) {
- case IIO_GYRO:
+ case IIO_ANGL_VEL:
*val = 0;
if (spi_get_device_id(st->us)->driver_data)
*val2 = 320;
else
*val2 = 1278;
return IIO_VAL_INT_PLUS_MICRO;
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 18315;
@@ -496,7 +494,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
switch (chan->type) {
- case IIO_GYRO:
+ case IIO_ANGL_VEL:
bits = 12;
break;
default:
@@ -516,7 +514,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
switch (chan->type) {
- case IIO_GYRO:
+ case IIO_ANGL_VEL:
bits = 12;
break;
default:
@@ -578,7 +576,7 @@ static const struct iio_info adis16260_info = {
static int __devinit adis16260_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16260_platform_data *pd = spi->dev.platform_data;
struct adis16260_state *st;
struct iio_dev *indio_dev;
@@ -626,18 +624,21 @@ static int __devinit adis16260_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- indio_dev->channels,
- ARRAY_SIZE(adis16260_channels_x));
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ ARRAY_SIZE(adis16260_channels_x));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
-
+ if (indio_dev->buffer) {
+ /* Set default scan mode */
+ iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_SUPPLY);
+ iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_GYRO);
+ iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_AUX_ADC);
+ iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_TEMP);
+ iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_ANGL);
+ }
if (spi->irq) {
ret = adis16260_probe_trigger(indio_dev);
if (ret)
@@ -648,19 +649,20 @@ static int __devinit adis16260_probe(struct spi_device *spi)
ret = adis16260_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
adis16260_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16260_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -670,6 +672,8 @@ static int adis16260_remove(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ iio_device_unregister(indio_dev);
+
ret = adis16260_stop_device(indio_dev);
if (ret)
goto err_ret;
@@ -677,9 +681,9 @@ static int adis16260_remove(struct spi_device *spi)
flush_scheduled_work();
adis16260_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
- iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
adis16260_unconfigure_ring(indio_dev);
+ iio_free_device(indio_dev);
err_ret:
return ret;
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index a4df8b32251..679c1515571 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -1,20 +1,12 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "../accel/accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16260.h"
/**
@@ -68,9 +60,9 @@ static int adis16260_read_ring_data(struct device *dev, u8 *rx)
static irqreturn_t adis16260_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16260_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0;
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
@@ -101,26 +93,26 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
void adis16260_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16260_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16260_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16260_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
ring->bpe = 2;
@@ -128,13 +120,6 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
ring->setup_ops = &adis16260_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- iio_scan_mask_set(ring, ADIS16260_SCAN_SUPPLY);
- iio_scan_mask_set(ring, ADIS16260_SCAN_GYRO);
- iio_scan_mask_set(ring, ADIS16260_SCAN_AUX_ADC);
- iio_scan_mask_set(ring, ADIS16260_SCAN_TEMP);
- iio_scan_mask_set(ring, ADIS16260_SCAN_ANGL);
-
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16260_trigger_handler,
IRQF_ONESHOT,
@@ -146,10 +131,10 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/gyro/adis16260_trigger.c b/drivers/staging/iio/gyro/adis16260_trigger.c
index 01094d0e714..2f2b2160f44 100644
--- a/drivers/staging/iio/gyro/adis16260_trigger.c
+++ b/drivers/staging/iio/gyro/adis16260_trigger.c
@@ -1,14 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16260.h"
@@ -24,6 +18,11 @@ static int adis16260_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16260_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16260_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16260_data_rdy_trigger_set_state,
+};
+
int adis16260_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
@@ -46,9 +45,8 @@ int adis16260_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &adis16260_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16260_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/staging/iio/gyro/adxrs450_core.c
index 7502a264770..3c3ef796d48 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/staging/iio/gyro/adxrs450_core.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,11 +16,10 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "gyro.h"
-#include "../adc/adc.h"
#include "adxrs450.h"
@@ -32,26 +30,12 @@
* Second register's address is reg_address + 1.
* @val: somewhere to pass back the value read
**/
-static int adxrs450_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
+static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
+ u8 reg_address,
+ u16 *val)
{
- struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 4,
- .cs_change = 1,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- },
- };
mutex_lock(&st->buf_lock);
st->tx[0] = ADXRS450_READ_DATA | (reg_address >> 7);
@@ -62,10 +46,13 @@ static int adxrs450_spi_read_reg_16(struct device *dev,
if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1))
st->tx[3] |= ADXRS450_P;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_write(st->us, st->tx, 4);
+ if (ret) {
+ dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
+ reg_address);
+ goto error_ret;
+ }
+ ret = spi_read(st->us, st->rx, 4);
if (ret) {
dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
reg_address);
@@ -86,20 +73,12 @@ error_ret:
* Second register's address is reg_address + 1.
* @val: value to be written.
**/
-static int adxrs450_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 val)
+static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
+ u8 reg_address,
+ u16 val)
{
- struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
- struct spi_transfer xfers = {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- };
mutex_lock(&st->buf_lock);
st->tx[0] = ADXRS450_WRITE_DATA | reg_address >> 7;
@@ -108,14 +87,12 @@ static int adxrs450_spi_write_reg_16(struct device *dev,
st->tx[3] = val << 1;
if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1))
- st->tx[3] |= ADXRS450_P;
+ st->tx[3] |= ADXRS450_P;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_write(st->us, st->tx, 4);
if (ret)
dev_err(&st->us->dev, "problem while writing 16 bit register 0x%02x\n",
- reg_address);
+ reg_address);
msleep(1); /* enforce sequential transfer delay 0.1ms */
mutex_unlock(&st->buf_lock);
return ret;
@@ -126,24 +103,10 @@ static int adxrs450_spi_write_reg_16(struct device *dev,
* @dev: device associated with child of actual iio_dev
* @val: somewhere to pass back the value read
**/
-static int adxrs450_spi_sensor_data(struct device *dev, s16 *val)
+static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
{
- struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 4,
- .cs_change = 1,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- },
- };
mutex_lock(&st->buf_lock);
st->tx[0] = ADXRS450_SENSOR_DATA;
@@ -151,10 +114,13 @@ static int adxrs450_spi_sensor_data(struct device *dev, s16 *val)
st->tx[2] = 0;
st->tx[3] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_write(st->us, st->tx, 4);
+ if (ret) {
+ dev_err(&st->us->dev, "Problem while reading sensor data\n");
+ goto error_ret;
+ }
+
+ ret = spi_read(st->us, st->rx, 4);
if (ret) {
dev_err(&st->us->dev, "Problem while reading sensor data\n");
goto error_ret;
@@ -206,73 +172,12 @@ error_ret:
return ret;
}
-static ssize_t adxrs450_read_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 t;
- ret = adxrs450_spi_read_reg_16(dev,
- ADXRS450_TEMP1,
- &t);
- if (ret)
- return ret;
- return sprintf(buf, "%d\n", t >> 7);
-}
-
-static ssize_t adxrs450_read_quad(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- s16 t;
- ret = adxrs450_spi_read_reg_16(dev,
- ADXRS450_QUAD1,
- &t);
- if (ret)
- return ret;
- return sprintf(buf, "%d\n", t);
-}
-
-static ssize_t adxrs450_write_dnc(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- long val;
-
- ret = strict_strtol(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = adxrs450_spi_write_reg_16(dev,
- ADXRS450_DNC1,
- val & 0x3FF);
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t adxrs450_read_sensor_data(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- s16 t;
-
- ret = adxrs450_spi_sensor_data(dev, &t);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", t);
-}
-
/* Recommended Startup Sequence by spec */
static int adxrs450_initial_setup(struct iio_dev *indio_dev)
{
u32 t;
u16 data;
int ret;
- struct device *dev = &indio_dev->dev;
struct adxrs450_state *st = iio_priv(indio_dev);
msleep(ADXRS450_STARTUP_DELAY*2);
@@ -305,23 +210,23 @@ static int adxrs450_initial_setup(struct iio_dev *indio_dev)
return -EIO;
}
- ret = adxrs450_spi_read_reg_16(dev, ADXRS450_FAULT1, &data);
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_FAULT1, &data);
if (ret)
return ret;
if (data & 0x0fff) {
dev_err(&st->us->dev, "The device is not in normal status!\n");
return -EINVAL;
}
- ret = adxrs450_spi_read_reg_16(dev, ADXRS450_PID1, &data);
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_PID1, &data);
if (ret)
return ret;
dev_info(&st->us->dev, "The Part ID is 0x%x\n", data);
- ret = adxrs450_spi_read_reg_16(dev, ADXRS450_SNL, &data);
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNL, &data);
if (ret)
return ret;
t = data;
- ret = adxrs450_spi_read_reg_16(dev, ADXRS450_SNH, &data);
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNH, &data);
if (ret)
return ret;
t |= data << 16;
@@ -330,34 +235,96 @@ static int adxrs450_initial_setup(struct iio_dev *indio_dev)
return 0;
}
-static IIO_DEV_ATTR_GYRO_Z(adxrs450_read_sensor_data, 0);
-static IIO_DEV_ATTR_TEMP_RAW(adxrs450_read_temp);
-static IIO_DEV_ATTR_GYRO_Z_QUADRATURE_CORRECTION(adxrs450_read_quad, 0);
-static IIO_DEV_ATTR_GYRO_Z_CALIBBIAS(S_IWUSR,
- NULL, adxrs450_write_dnc, 0);
-static IIO_CONST_ATTR(name, "adxrs450");
-
-static struct attribute *adxrs450_attributes[] = {
- &iio_dev_attr_gyro_z_raw.dev_attr.attr,
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_dev_attr_gyro_z_quadrature_correction_raw.dev_attr.attr,
- &iio_dev_attr_gyro_z_calibbias.dev_attr.attr,
- &iio_const_attr_name.dev_attr.attr,
- NULL
-};
+static int adxrs450_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret;
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ ret = adxrs450_spi_write_reg_16(indio_dev,
+ ADXRS450_DNC1,
+ val & 0x3FF);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
-static const struct attribute_group adxrs450_attribute_group = {
- .attrs = adxrs450_attributes,
+static int adxrs450_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+ s16 t;
+ u16 ut;
+ switch (mask) {
+ case 0:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = adxrs450_spi_sensor_data(indio_dev, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ ret = adxrs450_spi_read_reg_16(indio_dev,
+ ADXRS450_TEMP1, &ut);
+ if (ret)
+ break;
+ *val = ut;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE):
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_chan_spec adxrs450_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE)
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ }
};
static const struct iio_info adxrs450_info = {
- .attrs = &adxrs450_attribute_group,
.driver_module = THIS_MODULE,
+ .read_raw = &adxrs450_read_raw,
+ .write_raw = &adxrs450_write_raw,
};
static int __devinit adxrs450_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adxrs450_state *st;
struct iio_dev *indio_dev;
@@ -376,24 +343,23 @@ static int __devinit adxrs450_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adxrs450_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adxrs450_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
+ indio_dev->name = spi->dev.driver->name;
ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
- regdone = 1;
/* Get the device into a sane initial state */
ret = adxrs450_initial_setup(indio_dev);
if (ret)
goto error_initial;
return 0;
-
error_initial:
+ iio_device_unregister(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
@@ -402,6 +368,7 @@ error_ret:
static int adxrs450_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/gyro.h b/drivers/staging/iio/gyro/gyro.h
deleted file mode 100644
index b5495613407..00000000000
--- a/drivers/staging/iio/gyro/gyro.h
+++ /dev/null
@@ -1,85 +0,0 @@
-
-#include "../sysfs.h"
-
-/* Gyroscope types of attribute */
-
-#define IIO_CONST_ATTR_GYRO_OFFSET(_string) \
- IIO_CONST_ATTR(gyro_offset, _string)
-
-#define IIO_DEV_ATTR_GYRO_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_X_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_x_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Y_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_y_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Z_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_z_offset, _mode, _show, _store, _addr)
-
-#define IIO_CONST_ATTR_GYRO_SCALE(_string) \
- IIO_CONST_ATTR(gyro_scale, _string)
-
-#define IIO_DEV_ATTR_GYRO_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_scale, S_IRUGO, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_X_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_x_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Y_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_y_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Z_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_z_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_calibbias, S_IRUGO, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_X_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_x_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Y_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_y_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Z_CALIBBIAS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_z_calibbias, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_calibscale, S_IRUGO, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_X_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_x_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Y_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_y_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Z_CALIBSCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(gyro_z_calibscale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Z_QUADRATURE_CORRECTION(_show, _addr) \
- IIO_DEVICE_ATTR(gyro_z_quadrature_correction_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_GYRO(_show, _addr) \
- IIO_DEVICE_ATTR(gyro_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_GYRO_X(_show, _addr) \
- IIO_DEVICE_ATTR(gyro_x_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Y(_show, _addr) \
- IIO_DEVICE_ATTR(gyro_y_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_GYRO_Z(_show, _addr) \
- IIO_DEVICE_ATTR(gyro_z_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGL(_show, _addr) \
- IIO_DEVICE_ATTR(angl_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGL_X(_show, _addr) \
- IIO_DEVICE_ATTR(angl_x_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGL_Y(_show, _addr) \
- IIO_DEVICE_ATTR(angl_y_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGL_Z(_show, _addr) \
- IIO_DEVICE_ATTR(angl_z_raw, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index 7a6ce4d0fb7..f3d88cd7e8a 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -1,3 +1,4 @@
+
/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
@@ -12,9 +13,6 @@
#include <linux/device.h>
#include <linux/cdev.h>
-#include <linux/irq.h>
-#include "sysfs.h"
-#include "chrdev.h"
/* IIO TODO LIST */
/*
@@ -22,20 +20,18 @@
* Currently assumes nano seconds.
*/
-/* Event interface flags */
-#define IIO_BUSY_BIT_POS 1
+enum iio_data_type {
+ IIO_RAW,
+ IIO_PROCESSED,
+};
-/* naughty temporary hack to match these against the event version
- - need to flattern these together */
enum iio_chan_type {
/* real channel types */
- IIO_IN,
- IIO_OUT,
+ IIO_VOLTAGE,
IIO_CURRENT,
IIO_POWER,
IIO_ACCEL,
- IIO_IN_DIFF,
- IIO_GYRO,
+ IIO_ANGL_VEL,
IIO_MAGN,
IIO_LIGHT,
IIO_INTENSITY,
@@ -45,21 +41,25 @@ enum iio_chan_type {
IIO_ROT,
IIO_ANGL,
IIO_TIMESTAMP,
+ IIO_CAPACITANCE,
};
-#define IIO_MOD_X 0
-#define IIO_MOD_LIGHT_BOTH 0
-#define IIO_MOD_Y 1
-#define IIO_MOD_LIGHT_IR 1
-#define IIO_MOD_Z 2
-#define IIO_MOD_X_AND_Y 3
-#define IIO_MOD_X_ANX_Z 4
-#define IIO_MOD_Y_AND_Z 5
-#define IIO_MOD_X_AND_Y_AND_Z 6
-#define IIO_MOD_X_OR_Y 7
-#define IIO_MOD_X_OR_Z 8
-#define IIO_MOD_Y_OR_Z 9
-#define IIO_MOD_X_OR_Y_OR_Z 10
+enum iio_modifier {
+ IIO_NO_MOD,
+ IIO_MOD_X,
+ IIO_MOD_Y,
+ IIO_MOD_Z,
+ IIO_MOD_X_AND_Y,
+ IIO_MOD_X_ANX_Z,
+ IIO_MOD_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_OR_Y,
+ IIO_MOD_X_OR_Z,
+ IIO_MOD_Y_OR_Z,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_MOD_LIGHT_BOTH,
+ IIO_MOD_LIGHT_IR,
+};
/* Could add the raw attributes as well - allowing buffer only devices */
enum iio_chan_info_enum {
@@ -75,6 +75,16 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_PEAK_SEPARATE,
IIO_CHAN_INFO_PEAK_SCALE_SHARED,
IIO_CHAN_INFO_PEAK_SCALE_SEPARATE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE,
+ IIO_CHAN_INFO_AVERAGE_RAW_SHARED,
+ IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE,
+};
+
+enum iio_endian {
+ IIO_CPU,
+ IIO_BE,
+ IIO_LE,
};
/**
@@ -92,6 +102,7 @@ enum iio_chan_info_enum {
* storage_bits: Realbits + padding
* shift: Shift right by this before masking out
* realbits.
+ * endianness: little or big endian
* @info_mask: What information is to be exported about this channel.
* This includes calibbias, scale etc.
* @event_mask: What events can this channel produce.
@@ -108,6 +119,7 @@ enum iio_chan_info_enum {
* the value in channel will be suppressed for attribute
* but not for event codes. Typically set it to 0 when
* the index is false.
+ * @differential: Channel is differential.
*/
struct iio_chan_spec {
enum iio_chan_type type;
@@ -120,33 +132,26 @@ struct iio_chan_spec {
u8 realbits;
u8 storagebits;
u8 shift;
+ enum iio_endian endianness;
} scan_type;
- const long info_mask;
- const long event_mask;
- const char *extend_name;
+ long info_mask;
+ long event_mask;
+ char *extend_name;
unsigned processed_val:1;
unsigned modified:1;
unsigned indexed:1;
+ unsigned output:1;
+ unsigned differential:1;
};
-/* Meant for internal use only */
-void __iio_device_attr_deinit(struct device_attribute *dev_attr);
-int __iio_device_attr_init(struct device_attribute *dev_attr,
- const char *postfix,
- struct iio_chan_spec const *chan,
- ssize_t (*readfunc)(struct device *dev,
- struct device_attribute *attr,
- char *buf),
- ssize_t (*writefunc)(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len),
- bool generic);
+
#define IIO_ST(si, rb, sb, sh) \
{ .sign = si, .realbits = rb, .storagebits = sb, .shift = sh }
-#define IIO_CHAN(_type, _mod, _indexed, _proc, _name, _chan, _chan2, \
+/* Macro assumes input channels */
+#define IIO_CHAN(_type, _mod, _indexed, _proc, _name, _chan, _chan2, \
_inf_mask, _address, _si, _stype, _event_mask) \
{ .type = _type, \
+ .output = 0, \
.modified = _mod, \
.indexed = _indexed, \
.processed_val = _proc, \
@@ -163,20 +168,6 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
{ .type = IIO_TIMESTAMP, .channel = -1, \
.scan_index = _si, .scan_type = IIO_ST('s', 64, 64, 0) }
-int __iio_add_chan_devattr(const char *postfix,
- const char *group,
- struct iio_chan_spec const *chan,
- ssize_t (*func)(struct device *dev,
- struct device_attribute *attr,
- char *buf),
- ssize_t (*writefunc)(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len),
- int mask,
- bool generic,
- struct device *dev,
- struct list_head *attr_list);
/**
* iio_get_time_ns() - utility function to get a time stamp for events etc
**/
@@ -194,10 +185,11 @@ static inline s64 iio_get_time_ns(void)
/* Device operating modes */
#define INDIO_DIRECT_MODE 0x01
-#define INDIO_RING_TRIGGERED 0x02
-#define INDIO_RING_HARDWARE_BUFFER 0x08
+#define INDIO_BUFFER_TRIGGERED 0x02
+#define INDIO_BUFFER_HARDWARE 0x08
-#define INDIO_ALL_RING_MODES (INDIO_RING_TRIGGERED | INDIO_RING_HARDWARE_BUFFER)
+#define INDIO_ALL_BUFFER_MODES \
+ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
/* Vast majority of this is set by the industrialio subsystem on a
* call to iio_device_register. */
@@ -206,12 +198,12 @@ static inline s64 iio_get_time_ns(void)
#define IIO_VAL_INT_PLUS_NANO 3
struct iio_trigger; /* forward declaration */
+struct iio_dev;
/**
* struct iio_info - constant information about device
* @driver_module: module structure used to ensure correct
* ownership of chrdevs etc
- * @num_interrupt_lines:number of physical interrupt lines from device
* @event_attrs: event control attributes
* @attrs: general purpose device attributes
* @read_raw: function to request a value from the device.
@@ -235,7 +227,6 @@ struct iio_trigger; /* forward declaration */
**/
struct iio_info {
struct module *driver_module;
- int num_interrupt_lines;
struct attribute_group *event_attrs;
const struct attribute_group *attrs;
@@ -256,17 +247,17 @@ struct iio_info {
long mask);
int (*read_event_config)(struct iio_dev *indio_dev,
- int event_code);
+ u64 event_code);
int (*write_event_config)(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int state);
int (*read_event_value)(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int *val);
int (*write_event_value)(struct iio_dev *indio_dev,
- int event_code,
+ u64 event_code,
int val);
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
@@ -276,110 +267,92 @@ struct iio_info {
/**
* struct iio_dev - industrial I/O device
* @id: [INTERN] used to identify device internally
- * @dev_data: [DRIVER] device specific data
* @modes: [DRIVER] operating modes supported by device
* @currentmode: [DRIVER] current operating mode
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
- * @event_interfaces: [INTERN] event chrdevs associated with interrupt lines
- * @ring: [DRIVER] any ring buffer present
+ * @event_interface: [INTERN] event chrdevs associated with interrupt lines
+ * @buffer: [DRIVER] any buffer present
* @mlock: [INTERN] lock used to prevent simultaneous device state
* changes
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
- * @trig: [INTERN] current device trigger (ring buffer modes)
+ * @masklength: [INTERN] the length of the mask established from
+ * channels
+ * @trig: [INTERN] current device trigger (buffer modes)
* @pollfunc: [DRIVER] function run on trigger being received
* @channels: [DRIVER] channel specification structure table
* @num_channels: [DRIVER] number of chanels specified in @channels.
* @channel_attr_list: [INTERN] keep track of automatically created channel
- * attributes.
+ * attributes
+ * @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
+ * @info: [DRIVER] callbacks and constant info from driver
+ * @chrdev: [INTERN] associated character device
+ * @groups: [INTERN] attribute groups
+ * @groupcounter: [INTERN] index of next attribute group
**/
struct iio_dev {
int id;
- void *dev_data;
+
int modes;
int currentmode;
struct device dev;
- struct iio_event_interface *event_interfaces;
+ struct iio_event_interface *event_interface;
- struct iio_ring_buffer *ring;
+ struct iio_buffer *buffer;
struct mutex mlock;
- u32 *available_scan_masks;
+ unsigned long *available_scan_masks;
+ unsigned masklength;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
- struct iio_chan_spec const *channels;
- int num_channels;
-
- struct list_head channel_attr_list;
- const char *name;
- const struct iio_info *info;
+ struct iio_chan_spec const *channels;
+ int num_channels;
+
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+ const char *name;
+ const struct iio_info *info;
+ struct cdev chrdev;
+#define IIO_MAX_GROUPS 6
+ const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
+ int groupcounter;
};
/**
* iio_device_register() - register a device with the IIO subsystem
- * @dev_info: Device structure filled by the device driver
+ * @indio_dev: Device structure filled by the device driver
**/
-int iio_device_register(struct iio_dev *dev_info);
+int iio_device_register(struct iio_dev *indio_dev);
/**
* iio_device_unregister() - unregister a device from the IIO subsystem
- * @dev_info: Device structure representing the device.
+ * @indio_dev: Device structure representing the device.
**/
-void iio_device_unregister(struct iio_dev *dev_info);
+void iio_device_unregister(struct iio_dev *indio_dev);
/**
* iio_push_event() - try to add event to the list for userspace reading
- * @dev_info: IIO device structure
- * @ev_line: Which event line (hardware interrupt)
+ * @indio_dev: IIO device structure
* @ev_code: What event
* @timestamp: When the event occurred
**/
-int iio_push_event(struct iio_dev *dev_info,
- int ev_line,
- int ev_code,
- s64 timestamp);
-
-/* Used to distinguish between bipolar and unipolar scan elemenents.
- * Whilst this may seem obvious, we may well want to change the representation
- * in the future!*/
-#define IIO_SIGNED(a) -(a)
-#define IIO_UNSIGNED(a) (a)
-
-extern dev_t iio_devt;
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
+
extern struct bus_type iio_bus_type;
/**
* iio_put_device() - reference counted deallocation of struct device
* @dev: the iio_device containing the device
**/
-static inline void iio_put_device(struct iio_dev *dev)
+static inline void iio_put_device(struct iio_dev *indio_dev)
{
- if (dev)
- put_device(&dev->dev);
+ if (indio_dev)
+ put_device(&indio_dev->dev);
};
-/**
- * to_iio_dev() - get iio_dev for which we have the struct device
- * @d: the struct device
- **/
-static inline struct iio_dev *to_iio_dev(struct device *d)
-{
- return container_of(d, struct iio_dev, dev);
-};
-
-/**
- * iio_dev_get_devdata() - helper function gets device specific data
- * @d: the iio_dev associated with the device
- **/
-static inline void *iio_dev_get_devdata(struct iio_dev *d)
-{
- return d->dev_data;
-}
-
-
/* Can we make this smaller? */
#define IIO_ALIGN L1_CACHE_BYTES
/**
@@ -388,9 +361,9 @@ static inline void *iio_dev_get_devdata(struct iio_dev *d)
**/
struct iio_dev *iio_allocate_device(int sizeof_priv);
-static inline void *iio_priv(const struct iio_dev *dev)
+static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return (char *)dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
+ return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
}
static inline struct iio_dev *iio_priv_to_dev(void *priv)
@@ -403,37 +376,16 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
* iio_free_device() - free an iio_dev from a driver
* @dev: the iio_dev associated with the device
**/
-void iio_free_device(struct iio_dev *dev);
-
-/**
- * iio_put() - internal module reference count reduce
- **/
-void iio_put(void);
+void iio_free_device(struct iio_dev *indio_dev);
/**
- * iio_get() - internal module reference count increase
+ * iio_buffer_enabled() - helper function to test if the buffer is enabled
+ * @indio_dev: IIO device info structure for device
**/
-void iio_get(void);
-
-/**
- * iio_device_get_chrdev_minor() - get an unused minor number
- **/
-int iio_device_get_chrdev_minor(void);
-void iio_device_free_chrdev_minor(int val);
-
-/**
- * iio_ring_enabled() - helper function to test if any form of ring is enabled
- * @dev_info: IIO device info structure for device
- **/
-static inline bool iio_ring_enabled(struct iio_dev *dev_info)
+static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
- return dev_info->currentmode
- & (INDIO_RING_TRIGGERED
- | INDIO_RING_HARDWARE_BUFFER);
+ return indio_dev->currentmode
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
};
-struct ida;
-
-int iio_get_new_ida_val(struct ida *this_ida);
-void iio_free_ida_val(struct ida *this_ida, int id);
#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
new file mode 100644
index 00000000000..36159e0dbfc
--- /dev/null
+++ b/drivers/staging/iio/iio_core.h
@@ -0,0 +1,63 @@
+/* The industrial I/O core function defs.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * These definitions are meant for use only within the IIO core, not individual
+ * drivers.
+ */
+
+#ifndef _IIO_CORE_H_
+#define _IIO_CORE_H_
+
+int __iio_add_chan_devattr(const char *postfix,
+ struct iio_chan_spec const *chan,
+ ssize_t (*func)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*writefunc)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len),
+ u64 mask,
+ bool generic,
+ struct device *dev,
+ struct list_head *attr_list);
+
+/* Event interface flags */
+#define IIO_BUSY_BIT_POS 1
+
+#ifdef CONFIG_IIO_BUFFER
+struct poll_table_struct;
+
+int iio_chrdev_buffer_open(struct iio_dev *indio_dev);
+void iio_chrdev_buffer_release(struct iio_dev *indio_dev);
+
+unsigned int iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait);
+ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps);
+
+
+#define iio_buffer_poll_addr (&iio_buffer_poll)
+#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
+
+#else
+
+static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+{
+ return -EINVAL;
+}
+
+static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
+{}
+
+#define iio_buffer_poll_addr NULL
+#define iio_buffer_read_first_n_outer_addr NULL
+
+#endif
+
+#endif
diff --git a/drivers/staging/iio/iio_core_trigger.h b/drivers/staging/iio/iio_core_trigger.h
new file mode 100644
index 00000000000..523c288b776
--- /dev/null
+++ b/drivers/staging/iio/iio_core_trigger.h
@@ -0,0 +1,47 @@
+
+/* The industrial I/O core, trigger consumer handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifdef CONFIG_IIO_TRIGGER
+/**
+ * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
+ * @indio_dev: iio_dev associated with the device that will consume the trigger
+ **/
+
+int iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_unregister_trigger_consumer() - reverse the registration process
+ * @indio_dev: iio_dev associated with the device that consumed the trigger
+ **/
+void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev);
+
+#else
+
+/**
+ * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
+ * @indio_dev: iio_dev associated with the device that will consume the trigger
+ **/
+static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+
+/**
+ * iio_device_unregister_trigger_consumer() - reverse the registration process
+ * @indio_dev: iio_dev associated with the device that consumed the trigger
+ **/
+static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
+{
+};
+
+#endif /* CONFIG_TRIGGER_CONSUMER */
+
+
+
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
new file mode 100644
index 00000000000..da657d10471
--- /dev/null
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -0,0 +1,217 @@
+/**
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Companion module to the iio simple dummy example driver.
+ * The purpose of this is to generate 'fake' event interrupts thus
+ * allowing that driver's code to be as close as possible to that of
+ * a normal driver talking to hardware. The approach used here
+ * is not intended to be general and just happens to work for this
+ * particular use case.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/sysfs.h>
+
+#include "iio_dummy_evgen.h"
+#include "iio.h"
+#include "sysfs.h"
+
+/* Fiddly bit of faking and irq without hardware */
+#define IIO_EVENTGEN_NO 10
+/**
+ * struct iio_dummy_evgen - evgen state
+ * @chip: irq chip we are faking
+ * @base: base of irq range
+ * @enabled: mask of which irqs are enabled
+ * @inuse: mask of which irqs actually have anyone connected
+ * @lock: protect the evgen state
+ */
+struct iio_dummy_eventgen {
+ struct irq_chip chip;
+ int base;
+ bool enabled[IIO_EVENTGEN_NO];
+ bool inuse[IIO_EVENTGEN_NO];
+ struct mutex lock;
+};
+
+/* We can only ever have one instance of this 'device' */
+static struct iio_dummy_eventgen *iio_evgen;
+static const char *iio_evgen_name = "iio_dummy_evgen";
+
+static void iio_dummy_event_irqmask(struct irq_data *d)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct iio_dummy_eventgen *evgen =
+ container_of(chip, struct iio_dummy_eventgen, chip);
+
+ evgen->enabled[d->irq - evgen->base] = false;
+}
+
+static void iio_dummy_event_irqunmask(struct irq_data *d)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct iio_dummy_eventgen *evgen =
+ container_of(chip, struct iio_dummy_eventgen, chip);
+
+ evgen->enabled[d->irq - evgen->base] = true;
+}
+
+static int iio_dummy_evgen_create(void)
+{
+ int ret, i;
+
+ iio_evgen = kzalloc(sizeof(*iio_evgen), GFP_KERNEL);
+ if (iio_evgen == NULL)
+ return -ENOMEM;
+
+ iio_evgen->base = irq_alloc_descs(-1, 0, IIO_EVENTGEN_NO, 0);
+ if (iio_evgen->base < 0) {
+ ret = iio_evgen->base;
+ kfree(iio_evgen);
+ return ret;
+ }
+ iio_evgen->chip.name = iio_evgen_name;
+ iio_evgen->chip.irq_mask = &iio_dummy_event_irqmask;
+ iio_evgen->chip.irq_unmask = &iio_dummy_event_irqunmask;
+ for (i = 0; i < IIO_EVENTGEN_NO; i++) {
+ irq_set_chip(iio_evgen->base + i, &iio_evgen->chip);
+ irq_set_handler(iio_evgen->base + i, &handle_simple_irq);
+ irq_modify_status(iio_evgen->base + i,
+ IRQ_NOREQUEST | IRQ_NOAUTOEN,
+ IRQ_NOPROBE);
+ }
+ mutex_init(&iio_evgen->lock);
+ return 0;
+}
+
+/**
+ * iio_dummy_evgen_get_irq() - get an evgen provided irq for a device
+ *
+ * This function will give a free allocated irq to a client device.
+ * That irq can then be caused to 'fire' by using the associated sysfs file.
+ */
+int iio_dummy_evgen_get_irq(void)
+{
+ int i, ret = 0;
+ mutex_lock(&iio_evgen->lock);
+ for (i = 0; i < IIO_EVENTGEN_NO; i++)
+ if (iio_evgen->inuse[i] == false) {
+ ret = iio_evgen->base + i;
+ iio_evgen->inuse[i] = true;
+ break;
+ }
+ mutex_unlock(&iio_evgen->lock);
+ if (i == IIO_EVENTGEN_NO)
+ return -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
+
+/**
+ * iio_dummy_evgen_release_irq() - give the irq back.
+ * @irq: irq being returned to the pool
+ *
+ * Used by client driver instances to give the irqs back when they disconnect
+ */
+int iio_dummy_evgen_release_irq(int irq)
+{
+ mutex_lock(&iio_evgen->lock);
+ iio_evgen->inuse[irq - iio_evgen->base] = false;
+ mutex_unlock(&iio_evgen->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq);
+
+static void iio_dummy_evgen_free(void)
+{
+ irq_free_descs(iio_evgen->base, IIO_EVENTGEN_NO);
+ kfree(iio_evgen);
+}
+
+static void iio_evgen_release(struct device *dev)
+{
+ iio_dummy_evgen_free();
+}
+
+static ssize_t iio_evgen_poke(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ if (iio_evgen->enabled[this_attr->address])
+ handle_nested_irq(iio_evgen->base + this_attr->address);
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(poke_ev0, S_IWUSR, NULL, &iio_evgen_poke, 0);
+static IIO_DEVICE_ATTR(poke_ev1, S_IWUSR, NULL, &iio_evgen_poke, 1);
+static IIO_DEVICE_ATTR(poke_ev2, S_IWUSR, NULL, &iio_evgen_poke, 2);
+static IIO_DEVICE_ATTR(poke_ev3, S_IWUSR, NULL, &iio_evgen_poke, 3);
+static IIO_DEVICE_ATTR(poke_ev4, S_IWUSR, NULL, &iio_evgen_poke, 4);
+static IIO_DEVICE_ATTR(poke_ev5, S_IWUSR, NULL, &iio_evgen_poke, 5);
+static IIO_DEVICE_ATTR(poke_ev6, S_IWUSR, NULL, &iio_evgen_poke, 6);
+static IIO_DEVICE_ATTR(poke_ev7, S_IWUSR, NULL, &iio_evgen_poke, 7);
+static IIO_DEVICE_ATTR(poke_ev8, S_IWUSR, NULL, &iio_evgen_poke, 8);
+static IIO_DEVICE_ATTR(poke_ev9, S_IWUSR, NULL, &iio_evgen_poke, 9);
+
+static struct attribute *iio_evgen_attrs[] = {
+ &iio_dev_attr_poke_ev0.dev_attr.attr,
+ &iio_dev_attr_poke_ev1.dev_attr.attr,
+ &iio_dev_attr_poke_ev2.dev_attr.attr,
+ &iio_dev_attr_poke_ev3.dev_attr.attr,
+ &iio_dev_attr_poke_ev4.dev_attr.attr,
+ &iio_dev_attr_poke_ev5.dev_attr.attr,
+ &iio_dev_attr_poke_ev6.dev_attr.attr,
+ &iio_dev_attr_poke_ev7.dev_attr.attr,
+ &iio_dev_attr_poke_ev8.dev_attr.attr,
+ &iio_dev_attr_poke_ev9.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group iio_evgen_group = {
+ .attrs = iio_evgen_attrs,
+};
+
+static const struct attribute_group *iio_evgen_groups[] = {
+ &iio_evgen_group,
+ NULL
+};
+
+static struct device iio_evgen_dev = {
+ .bus = &iio_bus_type,
+ .groups = iio_evgen_groups,
+ .release = &iio_evgen_release,
+};
+static __init int iio_dummy_evgen_init(void)
+{
+ int ret = iio_dummy_evgen_create();
+ if (ret < 0)
+ return ret;
+ device_initialize(&iio_evgen_dev);
+ dev_set_name(&iio_evgen_dev, "iio_evgen");
+ return device_add(&iio_evgen_dev);
+}
+module_init(iio_dummy_evgen_init);
+
+static __exit void iio_dummy_evgen_exit(void)
+{
+ device_unregister(&iio_evgen_dev);
+}
+module_exit(iio_dummy_evgen_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("IIO dummy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_dummy_evgen.h b/drivers/staging/iio/iio_dummy_evgen.h
new file mode 100644
index 00000000000..d8845e2288b
--- /dev/null
+++ b/drivers/staging/iio/iio_dummy_evgen.h
@@ -0,0 +1,2 @@
+int iio_dummy_evgen_get_irq(void);
+int iio_dummy_evgen_release_irq(int irq);
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
new file mode 100644
index 00000000000..af0c99236d4
--- /dev/null
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -0,0 +1,545 @@
+/**
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * A reference industrial I/O driver to illustrate the functionality available.
+ *
+ * There are numerous real drivers to illustrate the finer points.
+ * The purpose of this driver is to provide a driver with far more comments
+ * and explanatory notes than any 'real' driver would have.
+ * Anyone starting out writing an IIO driver should first make sure they
+ * understand all of this driver except those bits specifically marked
+ * as being present to allow us to 'fake' the presence of hardware.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "iio.h"
+#include "sysfs.h"
+#include "buffer_generic.h"
+#include "iio_simple_dummy.h"
+
+/*
+ * A few elements needed to fake a bus for this driver
+ * Note instances parmeter controls how many of these
+ * dummy devices are registered.
+ */
+static unsigned instances = 1;
+module_param(instances, int, 0);
+
+/* Pointer array used to fake bus elements */
+static struct iio_dev **iio_dummy_devs;
+
+/* Fake a name for the part number, usually obtained from the id table */
+static const char *iio_dummy_part_number = "iio_dummy_part_no";
+
+/**
+ * struct iio_dummy_accel_calibscale - realworld to register mapping
+ * @val: first value in read_raw - here integer part.
+ * @val2: second value in read_raw etc - here micro part.
+ * @regval: register value - magic device specific numbers.
+ */
+struct iio_dummy_accel_calibscale {
+ int val;
+ int val2;
+ int regval; /* what would be written to hardware */
+};
+
+static const struct iio_dummy_accel_calibscale dummy_scales[] = {
+ { 0, 100, 0x8 }, /* 0.000100 */
+ { 0, 133, 0x7 }, /* 0.000133 */
+ { 733, 13, 0x9 }, /* 733.00013 */
+};
+
+/*
+ * iio_dummy_channels - Description of available channels
+ *
+ * This array of structures tells the IIO core about what the device
+ * actually provides for a given channel.
+ */
+static struct iio_chan_spec iio_dummy_channels[] = {
+ /* indexed ADC channel in_voltage0_raw etc */
+ {
+ .type = IIO_VOLTAGE,
+ /* Channel has a numeric index of 0 */
+ .indexed = 1,
+ .channel = 0,
+ /* What other information is available? */
+ .info_mask =
+ /*
+ * in_voltage0_offset
+ * Offset for userspace to apply prior to scale
+ * when converting to standard units (microvolts)
+ */
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ /*
+ * in_voltage0_scale
+ * Multipler for userspace to apply post offset
+ * when converting to standard units (microvolts)
+ */
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ /* The ordering of elements in the buffer via an enum */
+ .scan_index = voltage0,
+ .scan_type = { /* Description of storage in buffer */
+ .sign = 'u', /* unsigned */
+ .realbits = 13, /* 13 bits */
+ .storagebits = 16, /* 16 bits used for storage */
+ .shift = 0, /* zero shift */
+ },
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+ /*
+ * simple event - triggered when value rises above
+ * a threshold
+ */
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
+ },
+ /* Differential ADC channel in_voltage1-voltage2_raw etc*/
+ {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ /*
+ * Indexing for differential channels uses channel
+ * for the positive part, channel2 for the negative.
+ */
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 2,
+ .info_mask =
+ /*
+ * in_voltage-voltage_scale
+ * Shared version of scale - shared by differential
+ * input channels of type IIO_VOLTAGE.
+ */
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = diffvoltage1m2,
+ .scan_type = { /* Description of storage in buffer */
+ .sign = 's', /* signed */
+ .realbits = 12, /* 12 bits */
+ .storagebits = 16, /* 16 bits used for storage */
+ .shift = 0, /* zero shift */
+ },
+ },
+ /* Differential ADC channel in_voltage3-voltage4_raw etc*/
+ {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 3,
+ .channel2 = 4,
+ .info_mask =
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .scan_index = diffvoltage3m4,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 11,
+ .storagebits = 16,
+ .shift = 0,
+ },
+ },
+ /*
+ * 'modified' (i.e. axis specified) acceleration channel
+ * in_accel_z_raw
+ */
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ /* Channel 2 is use for modifiers */
+ .channel2 = IIO_MOD_X,
+ .info_mask =
+ /*
+ * Internal bias correction value. Applied
+ * by the hardware or driver prior to userspace
+ * seeing the readings. Typically part of hardware
+ * calibration.
+ */
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ .scan_index = accelx,
+ .scan_type = { /* Description of storage in buffer */
+ .sign = 's', /* signed */
+ .realbits = 16, /* 12 bits */
+ .storagebits = 16, /* 16 bits used for storage */
+ .shift = 0, /* zero shift */
+ },
+ },
+ /*
+ * Convenience macro for timestamps. 4 is the index in
+ * the buffer.
+ */
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ /* DAC channel out_voltage0_raw */
+ {
+ .type = IIO_VOLTAGE,
+ .output = 1,
+ .indexed = 1,
+ .channel = 0,
+ },
+};
+
+/**
+ * iio_dummy_read_raw() - data read function.
+ * @indio_dev: the struct iio_dev associated with this device instance
+ * @chan: the channel whose data is to be read
+ * @val: first element of returned value (typically INT)
+ * @val2: second element of returned value (typically MICRO)
+ * @mask: what we actually want to read. 0 is the channel, everything else
+ * is as per the info_mask in iio_chan_spec.
+ */
+static int iio_dummy_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&st->lock);
+ switch (mask) {
+ case 0: /* magic value - channel value read */
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ /* Set integer part to cached value */
+ *val = st->dac_val;
+ ret = IIO_VAL_INT;
+ } else if (chan->differential) {
+ if (chan->channel == 1)
+ *val = st->differential_adc_val[0];
+ else
+ *val = st->differential_adc_val[1];
+ ret = IIO_VAL_INT;
+ } else {
+ *val = st->single_ended_adc_val;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_ACCEL:
+ *val = st->accel_val;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ /* only single ended adc -> 7 */
+ *val = 7;
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ /* only single ended adc -> 0.001333 */
+ *val = 0;
+ *val2 = 1333;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ /* all differential adc channels -> 0.000001344 */
+ *val = 0;
+ *val2 = 1344;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ /* only the acceleration axis - read from cache */
+ *val = st->accel_calibbias;
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ *val = st->accel_calibscale->val;
+ *val2 = st->accel_calibscale->val2;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+/**
+ * iio_dummy_write_raw() - data write function.
+ * @indio_dev: the struct iio_dev associated with this device instance
+ * @chan: the channel whose data is to be read
+ * @val: first element of returned value (typically INT)
+ * @val2: second element of returned value (typically MICRO)
+ * @mask: what we actually want to read. 0 is the channel, everything else
+ * is as per the info_mask in iio_chan_spec.
+ *
+ * Note that all raw writes are assumed IIO_VAL_INT and info mask elements
+ * are assumed to be IIO_INT_PLUS_MICRO unless the callback write_raw_get_fmt
+ * in struct iio_info is provided by the driver.
+ */
+static int iio_dummy_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int i;
+ int ret = 0;
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case 0:
+ if (chan->output == 0)
+ return -EINVAL;
+
+ /* Locking not required as writing single value */
+ mutex_lock(&st->lock);
+ st->dac_val = val;
+ mutex_unlock(&st->lock);
+ return 0;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ mutex_lock(&st->lock);
+ /* Compare against table - hard matching here */
+ for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
+ if (val == dummy_scales[i].val &&
+ val2 == dummy_scales[i].val2)
+ break;
+ if (i == ARRAY_SIZE(dummy_scales))
+ ret = -EINVAL;
+ else
+ st->accel_calibscale = &dummy_scales[i];
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Device type specific information.
+ */
+static const struct iio_info iio_dummy_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &iio_dummy_read_raw,
+ .write_raw = &iio_dummy_write_raw,
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+ .read_event_config = &iio_simple_dummy_read_event_config,
+ .write_event_config = &iio_simple_dummy_write_event_config,
+ .read_event_value = &iio_simple_dummy_read_event_value,
+ .write_event_value = &iio_simple_dummy_write_event_value,
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
+};
+
+/**
+ * iio_dummy_init_device() - device instance specific init
+ * @indio_dev: the iio device structure
+ *
+ * Most drivers have one of these to set up default values,
+ * reset the device to known state etc.
+ */
+static int iio_dummy_init_device(struct iio_dev *indio_dev)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ st->dac_val = 0;
+ st->single_ended_adc_val = 73;
+ st->differential_adc_val[0] = 33;
+ st->differential_adc_val[1] = -34;
+ st->accel_val = 34;
+ st->accel_calibbias = -7;
+ st->accel_calibscale = &dummy_scales[0];
+
+ return 0;
+}
+
+/**
+ * iio_dummy_probe() - device instance probe
+ * @index: an id number for this instance.
+ *
+ * Arguments are bus type specific.
+ * I2C: iio_dummy_probe(struct i2c_client *client,
+ * const struct i2c_device_id *id)
+ * SPI: iio_dummy_probe(struct spi_device *spi)
+ */
+static int __devinit iio_dummy_probe(int index)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct iio_dummy_state *st;
+
+ /*
+ * Allocate an IIO device.
+ *
+ * This structure contains all generic state
+ * information about the device instance.
+ * It also has a region (accessed by iio_priv()
+ * for chip specific state information.
+ */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
+ iio_dummy_init_device(indio_dev);
+ /*
+ * With hardware: Set the parent device.
+ * indio_dev->dev.parent = &spi->dev;
+ * indio_dev->dev.parent = &client->dev;
+ */
+
+ /*
+ * Make the iio_dev struct available to remove function.
+ * Bus equivalents
+ * i2c_set_clientdata(client, indio_dev);
+ * spi_set_drvdata(spi, indio_dev);
+ */
+ iio_dummy_devs[index] = indio_dev;
+
+
+ /*
+ * Set the device name.
+ *
+ * This is typically a part number and obtained from the module
+ * id table.
+ * e.g. for i2c and spi:
+ * indio_dev->name = id->name;
+ * indio_dev->name = spi_get_device_id(spi)->name;
+ */
+ indio_dev->name = iio_dummy_part_number;
+
+ /* Provide description of available channels */
+ indio_dev->channels = iio_dummy_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iio_dummy_channels);
+
+ /*
+ * Provide device type specific interface functions and
+ * constant data.
+ */
+ indio_dev->info = &iio_dummy_info;
+
+ /* Specify that device provides sysfs type interfaces */
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_simple_dummy_events_register(indio_dev);
+ if (ret < 0)
+ goto error_free_device;
+
+ /* Configure buffered capture support. */
+ ret = iio_simple_dummy_configure_buffer(indio_dev);
+ if (ret < 0)
+ goto error_unregister_events;
+
+ /*
+ * Register the channels with the buffer, but avoid the output
+ * channel being registered by reducing the number of channels by 1.
+ */
+ ret = iio_buffer_register(indio_dev, iio_dummy_channels, 5);
+ if (ret < 0)
+ goto error_unconfigure_buffer;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_unregister_buffer;
+
+ return 0;
+error_unregister_buffer:
+ iio_buffer_unregister(indio_dev);
+error_unconfigure_buffer:
+ iio_simple_dummy_unconfigure_buffer(indio_dev);
+error_unregister_events:
+ iio_simple_dummy_events_unregister(indio_dev);
+error_free_device:
+ /* Note free device should only be called, before registration
+ * has succeeded. */
+ iio_free_device(indio_dev);
+error_ret:
+ return ret;
+}
+
+/**
+ * iio_dummy_remove() - device instance removal function
+ * @index: device index.
+ *
+ * Parameters follow those of iio_dummy_probe for buses.
+ */
+static int iio_dummy_remove(int index)
+{
+ int ret;
+ /*
+ * Get a pointer to the device instance iio_dev structure
+ * from the bus subsystem. E.g.
+ * struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ * struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ */
+ struct iio_dev *indio_dev = iio_dummy_devs[index];
+
+
+ /* Unregister the device */
+ iio_device_unregister(indio_dev);
+
+ /* Device specific code to power down etc */
+
+ /* Buffered capture related cleanup */
+ iio_buffer_unregister(indio_dev);
+ iio_simple_dummy_unconfigure_buffer(indio_dev);
+
+ ret = iio_simple_dummy_events_unregister(indio_dev);
+ if (ret)
+ goto error_ret;
+
+ /* Free all structures */
+ iio_free_device(indio_dev);
+
+error_ret:
+ return ret;
+}
+
+/**
+ * iio_dummy_init() - device driver registration
+ *
+ * Varies depending on bus type of the device. As there is no device
+ * here, call probe directly. For information on device registration
+ * i2c:
+ * Documentation/i2c/writing-clients
+ * spi:
+ * Documentation/spi/spi-summary
+ */
+static __init int iio_dummy_init(void)
+{
+ int i, ret;
+ if (instances > 10) {
+ instances = 1;
+ return -EINVAL;
+ }
+ /* Fake a bus */
+ iio_dummy_devs = kzalloc(sizeof(*iio_dummy_devs)*instances, GFP_KERNEL);
+ /* Here we have no actual device so call probe */
+ for (i = 0; i < instances; i++) {
+ ret = iio_dummy_probe(i);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+module_init(iio_dummy_init);
+
+/**
+ * iio_dummy_exit() - device driver removal
+ *
+ * Varies depending on bus type of the device.
+ * As there is no device here, call remove directly.
+ */
+static __exit void iio_dummy_exit(void)
+{
+ int i;
+ for (i = 0; i < instances; i++)
+ iio_dummy_remove(i);
+ kfree(iio_dummy_devs);
+}
+module_exit(iio_dummy_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("IIO dummy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_simple_dummy.h b/drivers/staging/iio/iio_simple_dummy.h
new file mode 100644
index 00000000000..53975d916fc
--- /dev/null
+++ b/drivers/staging/iio/iio_simple_dummy.h
@@ -0,0 +1,108 @@
+/**
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Join together the various functionality of iio_simple_dummy driver
+ */
+
+#include <linux/kernel.h>
+
+struct iio_dummy_accel_calibscale;
+
+/**
+ * struct iio_dummy_state - device instance specific state.
+ * @dac_val: cache for dac value
+ * @single_ended_adc_val: cache for single ended adc value
+ * @differential_adc_val: cache for differential adc value
+ * @accel_val: cache for acceleration value
+ * @accel_calibbias: cache for acceleration calibbias
+ * @accel_calibscale: cache for acceleration calibscale
+ * @lock: lock to ensure state is consistent
+ * @event_irq: irq number for event line (faked)
+ * @event_val: cache for event theshold value
+ * @event_en: cache of whether event is enabled
+ */
+struct iio_dummy_state {
+ int dac_val;
+ int single_ended_adc_val;
+ int differential_adc_val[2];
+ int accel_val;
+ int accel_calibbias;
+ const struct iio_dummy_accel_calibscale *accel_calibscale;
+ struct mutex lock;
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+ int event_irq;
+ int event_val;
+ bool event_en;
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
+};
+
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
+
+struct iio_dev;
+
+int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code);
+
+int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code,
+ int state);
+
+int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int *val);
+
+int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int val);
+
+int iio_simple_dummy_events_register(struct iio_dev *indio_dev);
+int iio_simple_dummy_events_unregister(struct iio_dev *indio_dev);
+
+#else /* Stubs for when events are disabled at compile time */
+
+static inline int
+iio_simple_dummy_events_register(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+
+static inline int
+iio_simple_dummy_events_unregister(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS*/
+
+/**
+ * enum iio_simple_dummy_scan_elements - scan index enum
+ * @voltage0: the single ended voltage channel
+ * @diffvoltage1m2: first differential channel
+ * @diffvoltage3m4: second differenial channel
+ * @accelx: acceleration channel
+ *
+ * Enum provides convenient numbering for the scan index.
+ */
+enum iio_simple_dummy_scan_elements {
+ voltage0,
+ diffvoltage1m2,
+ diffvoltage3m4,
+ accelx,
+};
+
+#ifdef CONFIG_IIO_SIMPLE_DUMMY_BUFFER
+int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev);
+void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev);
+#else
+static inline int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
+{
+ return 0;
+};
+static inline
+void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
+{};
+#endif /* CONFIG_IIO_SIMPLE_DUMMY_BUFFER */
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
new file mode 100644
index 00000000000..f0b36d25414
--- /dev/null
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -0,0 +1,206 @@
+/**
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Buffer handling elements of industrial I/O reference driver.
+ * Uses the kfifo buffer.
+ *
+ * To test without hardware use the sysfs trigger.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitmap.h>
+
+#include "iio.h"
+#include "trigger_consumer.h"
+#include "kfifo_buf.h"
+
+#include "iio_simple_dummy.h"
+
+/* Some fake data */
+
+static const s16 fakedata[] = {
+ [voltage0] = 7,
+ [diffvoltage1m2] = -33,
+ [diffvoltage3m4] = -2,
+ [accelx] = 344,
+};
+/**
+ * iio_simple_dummy_trigger_h() - the trigger handler function
+ * @irq: the interrupt number
+ * @p: private data - always a pointer to the poll func.
+ *
+ * This is the guts of buffered capture. On a trigger event occuring,
+ * if the pollfunc is attached then this handler is called as a threaded
+ * interrupt (and hence may sleep). It is responsible for grabbing data
+ * from the device and pushing it into the associated buffer.
+ */
+static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int len = 0;
+ /*
+ * The datasize is obtained from the buffer. It was stored when
+ * the preenable setup function was called.
+ */
+ size_t datasize = buffer->access->get_bytes_per_datum(buffer);
+ u16 *data = kmalloc(datasize, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (buffer->scan_count) {
+ /*
+ * Three common options here:
+ * hardware scans: certain combinations of channels make
+ * up a fast read. The capture will consist of all of them.
+ * Hence we just call the grab data function and fill the
+ * buffer without processing.
+ * sofware scans: can be considered to be random access
+ * so efficient reading is just a case of minimal bus
+ * transactions.
+ * software culled hardware scans:
+ * occasionally a driver may process the nearest hardware
+ * scan to avoid storing elements that are not desired. This
+ * is the fidliest option by far.
+ * Here lets pretend we have random access. And the values are
+ * in the constant table fakedata.
+ */
+ int i, j;
+ for (i = 0, j = 0; i < buffer->scan_count; i++) {
+ j = find_next_bit(buffer->scan_mask,
+ indio_dev->masklength, j + 1);
+ /* random access read form the 'device' */
+ data[i] = fakedata[j];
+ len += 2;
+ }
+ }
+ /* Store a timestampe at an 8 byte boundary */
+ if (buffer->scan_timestamp)
+ *(s64 *)(((phys_addr_t)data + len
+ + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
+ = iio_get_time_ns();
+ buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);
+
+ kfree(data);
+
+ /*
+ * Tell the core we are done with this trigger and ready for the
+ * next one.
+ */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
+ /*
+ * iio_sw_buffer_preenable:
+ * Generic function for equal sized ring elements + 64 bit timestamp
+ * Assumes that any combination of channels can be enabled.
+ * Typically replaced to implement restrictions on what combinations
+ * can be captured (hardware scan modes).
+ */
+ .preenable = &iio_sw_buffer_preenable,
+ /*
+ * iio_triggered_buffer_postenable:
+ * Generic function that simply attaches the pollfunc to the trigger.
+ * Replace this to mess with hardware state before we attach the
+ * trigger.
+ */
+ .postenable = &iio_triggered_buffer_postenable,
+ /*
+ * iio_triggered_buffer_predisable:
+ * Generic function that simple detaches the pollfunc from the trigger.
+ * Replace this to put hardware state back again after the trigger is
+ * detached but before userspace knows we have disabled the ring.
+ */
+ .predisable = &iio_triggered_buffer_predisable,
+};
+
+int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct iio_buffer *buffer;
+
+ /* Allocate a buffer to use - here a kfifo */
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (buffer == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ indio_dev->buffer = buffer;
+ /* Tell the core how to access the buffer */
+ buffer->access = &kfifo_access_funcs;
+
+ /* Number of bytes per element */
+ buffer->bpe = 2;
+ /* Enable timestamps by default */
+ buffer->scan_timestamp = true;
+
+ /*
+ * Tell the core what device type specific functions should
+ * be run on either side of buffer capture enable / disable.
+ */
+ buffer->setup_ops = &iio_simple_dummy_buffer_setup_ops;
+ buffer->owner = THIS_MODULE;
+
+ /*
+ * Configure a polling function.
+ * When a trigger event with this polling function connected
+ * occurs, this function is run. Typically this grabs data
+ * from the device.
+ *
+ * NULL for the top half. This is normally implemented only if we
+ * either want to ping a capture now pin (no sleeping) or grab
+ * a timestamp as close as possible to a data ready trigger firing.
+ *
+ * IRQF_ONESHOT ensures irqs are masked such that only one instance
+ * of the handler can run at a time.
+ *
+ * "iio_simple_dummy_consumer%d" formatting string for the irq 'name'
+ * as seen under /proc/interrupts. Remaining parameters as per printk.
+ */
+ indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
+ &iio_simple_dummy_trigger_h,
+ IRQF_ONESHOT,
+ indio_dev,
+ "iio_simple_dummy_consumer%d",
+ indio_dev->id);
+
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_free_buffer;
+ }
+
+ /*
+ * Notify the core that this device is capable of buffered capture
+ * driven by a trigger.
+ */
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+ return 0;
+
+error_free_buffer:
+ iio_kfifo_free(indio_dev->buffer);
+error_ret:
+ return ret;
+
+}
+
+/**
+ * iio_simple_dummy_unconfigure_buffer() - release buffer resources
+ * @indo_dev: device instance state
+ */
+void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
+{
+ iio_dealloc_pollfunc(indio_dev->pollfunc);
+ iio_kfifo_free(indio_dev->buffer);
+}
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
new file mode 100644
index 00000000000..9f00cff7ddd
--- /dev/null
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -0,0 +1,190 @@
+/**
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Event handling elements of industrial I/O reference driver.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "iio.h"
+#include "sysfs.h"
+#include "iio_simple_dummy.h"
+
+/* Evgen 'fakes' interrupt events for this example */
+#include "iio_dummy_evgen.h"
+
+/**
+ * iio_simple_dummy_read_event_config() - is event enabled?
+ * @indio_dev: the device instance data
+ * @event_code: event code of the event being queried
+ *
+ * This function would normally query the relevant registers or a cache to
+ * discover if the event generation is enabled on the device.
+ */
+int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ return st->event_en;
+}
+
+/**
+ * iio_simple_dummy_write_event_config() - set whether event is enabled
+ * @indio_dev: the device instance data
+ * @event_code: event code of event being enabled/disabled
+ * @state: whether to enable or disable the device.
+ *
+ * This function would normally set the relevant registers on the devices
+ * so that it generates the specified event. Here it just sets up a cached
+ * value.
+ */
+int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code,
+ int state)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ /*
+ * Deliberately over the top code splitting to illustrate
+ * how this is done when multiple events exist.
+ */
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_VOLTAGE:
+ switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
+ case IIO_EV_TYPE_THRESH:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ st->event_en = state;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iio_simple_dummy_read_event_value() - get value associated with event
+ * @indio_dev: device instance specific data
+ * @event_code: event code for the event whose value is being queried
+ * @val: value for the event code.
+ *
+ * Many devices provide a large set of events of which only a subset may
+ * be enabled at a time, with value registers whose meaning changes depending
+ * on the event enabled. This often means that the driver must cache the values
+ * associated with each possible events so that the right value is in place when
+ * the enabled event is changed.
+ */
+int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int *val)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ *val = st->event_val;
+
+ return 0;
+}
+
+/**
+ * iio_simple_dummy_write_event_value() - set value associate with event
+ * @indio_dev: device instance specific data
+ * @event_code: event code for the event whose value is being set
+ * @val: the value to be set.
+ */
+int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
+ u64 event_code,
+ int val)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ st->event_val = val;
+
+ return 0;
+}
+
+/**
+ * iio_simple_dummy_event_handler() - identify and pass on event
+ * @irq: irq of event line
+ * @private: pointer to device instance state.
+ *
+ * This handler is responsible for querying the device to find out what
+ * event occured and for then pushing that event towards userspace.
+ * Here only one event occurs so we push that directly on with locally
+ * grabbed timestamp.
+ */
+static irqreturn_t iio_simple_dummy_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ iio_push_event(indio_dev,
+ IIO_EVENT_CODE(IIO_VOLTAGE, 0, 0,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH, 0, 0, 0),
+ iio_get_time_ns());
+ return IRQ_HANDLED;
+}
+
+/**
+ * iio_simple_dummy_events_register() - setup interrupt handling for events
+ * @indio_dev: device instance data
+ *
+ * This function requests the threaded interrupt to handle the events.
+ * Normally the irq is a hardware interrupt and the number comes
+ * from board configuration files. Here we get it from a companion
+ * module that fakes the interrupt for us. Note that module in
+ * no way forms part of this example. Just assume that events magically
+ * appear via the provided interrupt.
+ */
+int iio_simple_dummy_events_register(struct iio_dev *indio_dev)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /* Fire up event source - normally not present */
+ st->event_irq = iio_dummy_evgen_get_irq();
+ if (st->event_irq < 0) {
+ ret = st->event_irq;
+ goto error_ret;
+ }
+ ret = request_threaded_irq(st->event_irq,
+ NULL,
+ &iio_simple_dummy_event_handler,
+ IRQF_ONESHOT,
+ "iio_simple_event",
+ indio_dev);
+ if (ret < 0)
+ goto error_free_evgen;
+ return 0;
+
+error_free_evgen:
+ iio_dummy_evgen_release_irq(st->event_irq);
+error_ret:
+ return ret;
+}
+
+/**
+ * iio_simple_dummy_events_unregister() - tidy up interrupt handling on remove
+ * @indio_dev: device instance data
+ */
+int iio_simple_dummy_events_unregister(struct iio_dev *indio_dev)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ free_irq(st->event_irq, indio_dev);
+ /* Not part of normal driver */
+ iio_dummy_evgen_release_irq(st->event_irq);
+
+ return 0;
+}
diff --git a/drivers/staging/iio/impedance-analyzer/Kconfig b/drivers/staging/iio/impedance-analyzer/Kconfig
new file mode 100644
index 00000000000..ad0ff765e4b
--- /dev/null
+++ b/drivers/staging/iio/impedance-analyzer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Impedance Converter, Network Analyzer drivers
+#
+menu "Network Analyzer, Impedance Converters"
+
+config AD5933
+ tristate "Analog Devices AD5933, AD5934 driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_SW_RING
+ help
+ Say yes here to build support for Analog Devices Impedance Converter,
+ Network Analyzer, AD5933/4, provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5933.
+
+endmenu
diff --git a/drivers/staging/iio/impedance-analyzer/Makefile b/drivers/staging/iio/impedance-analyzer/Makefile
new file mode 100644
index 00000000000..7604d786583
--- /dev/null
+++ b/drivers/staging/iio/impedance-analyzer/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Impedance Converter, Network Analyzer drivers
+#
+
+obj-$(CONFIG_AD5933) += ad5933.o
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
new file mode 100644
index 00000000000..1086e0befc2
--- /dev/null
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -0,0 +1,814 @@
+/*
+ * AD5933 AD5934 Impedance Converter, Network Analyzer
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <asm/div64.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../buffer_generic.h"
+#include "../ring_sw.h"
+
+#include "ad5933.h"
+
+/* AD5933/AD5934 Registers */
+#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
+#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
+#define AD5933_REG_FREQ_START 0x82 /* R/W, 3 bytes */
+#define AD5933_REG_FREQ_INC 0x85 /* R/W, 3 bytes */
+#define AD5933_REG_INC_NUM 0x88 /* R/W, 2 bytes, 9 bit */
+#define AD5933_REG_SETTLING_CYCLES 0x8A /* R/W, 2 bytes */
+#define AD5933_REG_STATUS 0x8F /* R, 1 byte */
+#define AD5933_REG_TEMP_DATA 0x92 /* R, 2 bytes*/
+#define AD5933_REG_REAL_DATA 0x94 /* R, 2 bytes*/
+#define AD5933_REG_IMAG_DATA 0x96 /* R, 2 bytes*/
+
+/* AD5933_REG_CONTROL_HB Bits */
+#define AD5933_CTRL_INIT_START_FREQ (0x1 << 4)
+#define AD5933_CTRL_START_SWEEP (0x2 << 4)
+#define AD5933_CTRL_INC_FREQ (0x3 << 4)
+#define AD5933_CTRL_REPEAT_FREQ (0x4 << 4)
+#define AD5933_CTRL_MEASURE_TEMP (0x9 << 4)
+#define AD5933_CTRL_POWER_DOWN (0xA << 4)
+#define AD5933_CTRL_STANDBY (0xB << 4)
+
+#define AD5933_CTRL_RANGE_2000mVpp (0x0 << 1)
+#define AD5933_CTRL_RANGE_200mVpp (0x1 << 1)
+#define AD5933_CTRL_RANGE_400mVpp (0x2 << 1)
+#define AD5933_CTRL_RANGE_1000mVpp (0x3 << 1)
+#define AD5933_CTRL_RANGE(x) ((x) << 1)
+
+#define AD5933_CTRL_PGA_GAIN_1 (0x1 << 0)
+#define AD5933_CTRL_PGA_GAIN_5 (0x0 << 0)
+
+/* AD5933_REG_CONTROL_LB Bits */
+#define AD5933_CTRL_RESET (0x1 << 4)
+#define AD5933_CTRL_INT_SYSCLK (0x0 << 3)
+#define AD5933_CTRL_EXT_SYSCLK (0x1 << 3)
+
+/* AD5933_REG_STATUS Bits */
+#define AD5933_STAT_TEMP_VALID (0x1 << 0)
+#define AD5933_STAT_DATA_VALID (0x1 << 1)
+#define AD5933_STAT_SWEEP_DONE (0x1 << 2)
+
+/* I2C Block Commands */
+#define AD5933_I2C_BLOCK_WRITE 0xA0
+#define AD5933_I2C_BLOCK_READ 0xA1
+#define AD5933_I2C_ADDR_POINTER 0xB0
+
+/* Device Specs */
+#define AD5933_INT_OSC_FREQ_Hz 16776000
+#define AD5933_MAX_OUTPUT_FREQ_Hz 100000
+#define AD5933_MAX_RETRIES 100
+
+#define AD5933_OUT_RANGE 1
+#define AD5933_OUT_RANGE_AVAIL 2
+#define AD5933_OUT_SETTLING_CYCLES 3
+#define AD5933_IN_PGA_GAIN 4
+#define AD5933_IN_PGA_GAIN_AVAIL 5
+#define AD5933_FREQ_POINTS 6
+
+#define AD5933_POLL_TIME_ms 10
+#define AD5933_INIT_EXCITATION_TIME_ms 100
+
+struct ad5933_state {
+ struct i2c_client *client;
+ struct regulator *reg;
+ struct ad5933_platform_data *pdata;
+ struct delayed_work work;
+ unsigned long mclk_hz;
+ unsigned char ctrl_hb;
+ unsigned char ctrl_lb;
+ unsigned range_avail[4];
+ unsigned short vref_mv;
+ unsigned short settling_cycles;
+ unsigned short freq_points;
+ unsigned freq_start;
+ unsigned freq_inc;
+ unsigned state;
+ unsigned poll_time_jiffies;
+};
+
+static struct ad5933_platform_data ad5933_default_pdata = {
+ .vref_mv = 3300,
+};
+
+static struct iio_chan_spec ad5933_channels[] = {
+ IIO_CHAN(IIO_TEMP, 0, 1, 1, NULL, 0, 0, 0,
+ 0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
+ /* Ring Channels */
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
+};
+
+static int ad5933_i2c_write(struct i2c_client *client,
+ u8 reg, u8 len, u8 *data)
+{
+ int ret;
+
+ while (len--) {
+ ret = i2c_smbus_write_byte_data(client, reg++, *data++);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write error\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ad5933_i2c_read(struct i2c_client *client,
+ u8 reg, u8 len, u8 *data)
+{
+ int ret;
+
+ while (len--) {
+ ret = i2c_smbus_read_byte_data(client, reg++);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+ *data++ = ret;
+ }
+ return 0;
+}
+
+static int ad5933_cmd(struct ad5933_state *st, unsigned char cmd)
+{
+ unsigned char dat = st->ctrl_hb | cmd;
+
+ return ad5933_i2c_write(st->client,
+ AD5933_REG_CONTROL_HB, 1, &dat);
+}
+
+static int ad5933_reset(struct ad5933_state *st)
+{
+ unsigned char dat = st->ctrl_lb | AD5933_CTRL_RESET;
+ return ad5933_i2c_write(st->client,
+ AD5933_REG_CONTROL_LB, 1, &dat);
+}
+
+static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
+{
+ unsigned char val, timeout = AD5933_MAX_RETRIES;
+ int ret;
+
+ while (timeout--) {
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &val);
+ if (ret < 0)
+ return ret;
+ if (val & event)
+ return val;
+ cpu_relax();
+ mdelay(1);
+ }
+
+ return -EAGAIN;
+}
+
+static int ad5933_set_freq(struct ad5933_state *st,
+ unsigned reg, unsigned long freq)
+{
+ unsigned long long freqreg;
+ union {
+ u32 d32;
+ u8 d8[4];
+ } dat;
+
+ freqreg = (u64) freq * (u64) (1 << 27);
+ do_div(freqreg, st->mclk_hz / 4);
+
+ switch (reg) {
+ case AD5933_REG_FREQ_START:
+ st->freq_start = freq;
+ break;
+ case AD5933_REG_FREQ_INC:
+ st->freq_inc = freq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dat.d32 = cpu_to_be32(freqreg);
+ return ad5933_i2c_write(st->client, reg, 3, &dat.d8[1]);
+}
+
+static int ad5933_setup(struct ad5933_state *st)
+{
+ unsigned short dat;
+ int ret;
+
+ ret = ad5933_reset(st);
+ if (ret < 0)
+ return ret;
+
+ ret = ad5933_set_freq(st, AD5933_REG_FREQ_START, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = ad5933_set_freq(st, AD5933_REG_FREQ_INC, 200);
+ if (ret < 0)
+ return ret;
+
+ st->settling_cycles = 10;
+ dat = cpu_to_be16(st->settling_cycles);
+
+ ret = ad5933_i2c_write(st->client,
+ AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
+ if (ret < 0)
+ return ret;
+
+ st->freq_points = 100;
+ dat = cpu_to_be16(st->freq_points);
+
+ return ad5933_i2c_write(st->client, AD5933_REG_INC_NUM, 2, (u8 *)&dat);
+}
+
+static void ad5933_calc_out_ranges(struct ad5933_state *st)
+{
+ int i;
+ unsigned normalized_3v3[4] = {1980, 198, 383, 970};
+
+ for (i = 0; i < 4; i++)
+ st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
+
+}
+
+/*
+ * handles: AD5933_REG_FREQ_START and AD5933_REG_FREQ_INC
+ */
+
+static ssize_t ad5933_show_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5933_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ unsigned long long freqreg;
+ union {
+ u32 d32;
+ u8 d8[4];
+ } dat;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5933_i2c_read(st->client, this_attr->address, 3, &dat.d8[1]);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+
+ freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
+
+ freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
+ do_div(freqreg, 1 << 27);
+
+ return sprintf(buf, "%d\n", (int) freqreg);
+}
+
+static ssize_t ad5933_store_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5933_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val > AD5933_MAX_OUTPUT_FREQ_Hz)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5933_set_freq(st, this_attr->address, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage0_freq_start, S_IRUGO | S_IWUSR,
+ ad5933_show_frequency,
+ ad5933_store_frequency,
+ AD5933_REG_FREQ_START);
+
+static IIO_DEVICE_ATTR(out_voltage0_freq_increment, S_IRUGO | S_IWUSR,
+ ad5933_show_frequency,
+ ad5933_store_frequency,
+ AD5933_REG_FREQ_INC);
+
+static ssize_t ad5933_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5933_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret = 0, len = 0;
+
+ mutex_lock(&indio_dev->mlock);
+ switch (this_attr->address) {
+ case AD5933_OUT_RANGE:
+ len = sprintf(buf, "%d\n",
+ st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
+ break;
+ case AD5933_OUT_RANGE_AVAIL:
+ len = sprintf(buf, "%d %d %d %d\n", st->range_avail[0],
+ st->range_avail[3], st->range_avail[2],
+ st->range_avail[1]);
+ break;
+ case AD5933_OUT_SETTLING_CYCLES:
+ len = sprintf(buf, "%d\n", st->settling_cycles);
+ break;
+ case AD5933_IN_PGA_GAIN:
+ len = sprintf(buf, "%s\n",
+ (st->ctrl_hb & AD5933_CTRL_PGA_GAIN_1) ?
+ "1" : "0.2");
+ break;
+ case AD5933_IN_PGA_GAIN_AVAIL:
+ len = sprintf(buf, "1 0.2\n");
+ break;
+ case AD5933_FREQ_POINTS:
+ len = sprintf(buf, "%d\n", st->freq_points);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : len;
+}
+
+static ssize_t ad5933_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5933_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ long val;
+ int i, ret = 0;
+ unsigned short dat;
+
+ if (this_attr->address != AD5933_IN_PGA_GAIN) {
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ switch (this_attr->address) {
+ case AD5933_OUT_RANGE:
+ for (i = 0; i < 4; i++)
+ if (val == st->range_avail[i]) {
+ st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
+ st->ctrl_hb |= AD5933_CTRL_RANGE(i);
+ ret = ad5933_cmd(st, 0);
+ break;
+ }
+ ret = -EINVAL;
+ break;
+ case AD5933_IN_PGA_GAIN:
+ if (sysfs_streq(buf, "1")) {
+ st->ctrl_hb |= AD5933_CTRL_PGA_GAIN_1;
+ } else if (sysfs_streq(buf, "0.2")) {
+ st->ctrl_hb &= ~AD5933_CTRL_PGA_GAIN_1;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ ret = ad5933_cmd(st, 0);
+ break;
+ case AD5933_OUT_SETTLING_CYCLES:
+ val = clamp(val, 0L, 0x7FFL);
+ st->settling_cycles = val;
+
+ /* 2x, 4x handling, see datasheet */
+ if (val > 511)
+ val = (val >> 1) | (1 << 9);
+ else if (val > 1022)
+ val = (val >> 2) | (3 << 9);
+
+ dat = cpu_to_be16(val);
+ ret = ad5933_i2c_write(st->client,
+ AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
+ break;
+ case AD5933_FREQ_POINTS:
+ val = clamp(val, 0L, 511L);
+ st->freq_points = val;
+
+ dat = cpu_to_be16(val);
+ ret = ad5933_i2c_write(st->client, AD5933_REG_INC_NUM, 2,
+ (u8 *)&dat);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage0_scale, S_IRUGO | S_IWUSR,
+ ad5933_show,
+ ad5933_store,
+ AD5933_OUT_RANGE);
+
+static IIO_DEVICE_ATTR(out_voltage0_scale_available, S_IRUGO,
+ ad5933_show,
+ NULL,
+ AD5933_OUT_RANGE_AVAIL);
+
+static IIO_DEVICE_ATTR(in_voltage0_scale, S_IRUGO | S_IWUSR,
+ ad5933_show,
+ ad5933_store,
+ AD5933_IN_PGA_GAIN);
+
+static IIO_DEVICE_ATTR(in_voltage0_scale_available, S_IRUGO,
+ ad5933_show,
+ NULL,
+ AD5933_IN_PGA_GAIN_AVAIL);
+
+static IIO_DEVICE_ATTR(out_voltage0_freq_points, S_IRUGO | S_IWUSR,
+ ad5933_show,
+ ad5933_store,
+ AD5933_FREQ_POINTS);
+
+static IIO_DEVICE_ATTR(out_voltage0_settling_cycles, S_IRUGO | S_IWUSR,
+ ad5933_show,
+ ad5933_store,
+ AD5933_OUT_SETTLING_CYCLES);
+
+/* note:
+ * ideally we would handle the scale attributes via the iio_info
+ * (read|write)_raw methods, however this part is a untypical since we
+ * don't create dedicated sysfs channel attributes for out0 and in0.
+ */
+static struct attribute *ad5933_attributes[] = {
+ &iio_dev_attr_out_voltage0_scale.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_scale_available.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_freq_start.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_freq_increment.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_freq_points.dev_attr.attr,
+ &iio_dev_attr_out_voltage0_settling_cycles.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_scale.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad5933_attribute_group = {
+ .attrs = ad5933_attributes,
+};
+
+static int ad5933_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5933_state *st = iio_priv(indio_dev);
+ unsigned short dat;
+ int ret = -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ switch (m) {
+ case 0:
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = ad5933_cmd(st, AD5933_CTRL_MEASURE_TEMP);
+ if (ret < 0)
+ goto out;
+ ret = ad5933_wait_busy(st, AD5933_STAT_TEMP_VALID);
+ if (ret < 0)
+ goto out;
+
+ ret = ad5933_i2c_read(st->client,
+ AD5933_REG_TEMP_DATA, 2,
+ (u8 *)&dat);
+ if (ret < 0)
+ goto out;
+ mutex_unlock(&indio_dev->mlock);
+ ret = be16_to_cpu(dat);
+ /* Temp in Milli degrees Celsius */
+ if (ret < 8192)
+ *val = ret * 1000 / 32;
+ else
+ *val = (ret - 16384) * 1000 / 32;
+
+ return IIO_VAL_INT;
+ }
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static const struct iio_info ad5933_info = {
+ .read_raw = &ad5933_read_raw,
+ .attrs = &ad5933_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad5933_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct ad5933_state *st = iio_priv(indio_dev);
+ struct iio_buffer *ring = indio_dev->buffer;
+ size_t d_size;
+ int ret;
+
+ if (!ring->scan_count)
+ return -EINVAL;
+
+ d_size = ring->scan_count *
+ ad5933_channels[1].scan_type.storagebits / 8;
+
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, d_size);
+
+ ret = ad5933_reset(st);
+ if (ret < 0)
+ return ret;
+
+ ret = ad5933_cmd(st, AD5933_CTRL_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = ad5933_cmd(st, AD5933_CTRL_INIT_START_FREQ);
+ if (ret < 0)
+ return ret;
+
+ st->state = AD5933_CTRL_INIT_START_FREQ;
+
+ return 0;
+}
+
+static int ad5933_ring_postenable(struct iio_dev *indio_dev)
+{
+ struct ad5933_state *st = iio_priv(indio_dev);
+
+ /* AD5933_CTRL_INIT_START_FREQ:
+ * High Q complex circuits require a long time to reach steady state.
+ * To facilitate the measurement of such impedances, this mode allows
+ * the user full control of the settling time requirement before
+ * entering start frequency sweep mode where the impedance measurement
+ * takes place. In this mode the impedance is excited with the
+ * programmed start frequency (ad5933_ring_preenable),
+ * but no measurement takes place.
+ */
+
+ schedule_delayed_work(&st->work,
+ msecs_to_jiffies(AD5933_INIT_EXCITATION_TIME_ms));
+ return 0;
+}
+
+static int ad5933_ring_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad5933_state *st = iio_priv(indio_dev);
+
+ cancel_delayed_work_sync(&st->work);
+ return ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
+}
+
+static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
+ .preenable = &ad5933_ring_preenable,
+ .postenable = &ad5933_ring_postenable,
+ .postdisable = &ad5933_ring_postdisable,
+};
+
+static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer)
+ return -ENOMEM;
+
+ /* Effectively select the ring buffer implementation */
+ indio_dev->buffer->access = &ring_sw_access_funcs;
+
+ /* Ring buffer functions - here trigger setup related */
+ indio_dev->buffer->setup_ops = &ad5933_ring_setup_ops;
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ return 0;
+}
+
+static void ad5933_work(struct work_struct *work)
+{
+ struct ad5933_state *st = container_of(work,
+ struct ad5933_state, work.work);
+ struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
+ struct iio_buffer *ring = indio_dev->buffer;
+ signed short buf[2];
+ unsigned char status;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->state == AD5933_CTRL_INIT_START_FREQ) {
+ /* start sweep */
+ ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
+ st->state = AD5933_CTRL_START_SWEEP;
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+ mutex_unlock(&indio_dev->mlock);
+ return;
+ }
+
+ ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+
+ if (status & AD5933_STAT_DATA_VALID) {
+ ad5933_i2c_read(st->client,
+ test_bit(1, ring->scan_mask) ?
+ AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
+ ring->scan_count * 2, (u8 *)buf);
+
+ if (ring->scan_count == 2) {
+ buf[0] = be16_to_cpu(buf[0]);
+ buf[1] = be16_to_cpu(buf[1]);
+ } else {
+ buf[0] = be16_to_cpu(buf[0]);
+ }
+ /* save datum to the ring */
+ ring->access->store_to(ring, (u8 *)buf, iio_get_time_ns());
+ } else {
+ /* no data available - try again later */
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+ mutex_unlock(&indio_dev->mlock);
+ return;
+ }
+
+ if (status & AD5933_STAT_SWEEP_DONE) {
+ /* last sample received - power down do nothing until
+ * the ring enable is toggled */
+ ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
+ } else {
+ /* we just received a valid datum, move on to the next */
+ ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+}
+
+static int __devinit ad5933_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret, voltage_uv = 0;
+ struct ad5933_platform_data *pdata = client->dev.platform_data;
+ struct ad5933_state *st;
+ struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ st->client = client;
+
+ if (!pdata)
+ st->pdata = &ad5933_default_pdata;
+ else
+ st->pdata = pdata;
+
+ st->reg = regulator_get(&client->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
+ if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else
+ st->vref_mv = st->pdata->vref_mv;
+
+ if (st->pdata->ext_clk_Hz) {
+ st->mclk_hz = st->pdata->ext_clk_Hz;
+ st->ctrl_lb = AD5933_CTRL_EXT_SYSCLK;
+ } else {
+ st->mclk_hz = AD5933_INT_OSC_FREQ_Hz;
+ st->ctrl_lb = AD5933_CTRL_INT_SYSCLK;
+ }
+
+ ad5933_calc_out_ranges(st);
+ INIT_DELAYED_WORK(&st->work, ad5933_work);
+ st->poll_time_jiffies = msecs_to_jiffies(AD5933_POLL_TIME_ms);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad5933_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5933_channels;
+ indio_dev->num_channels = 1; /* only register temp0_input */
+
+ ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ /* skip temp0_input, register in0_(real|imag)_raw */
+ ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
+ if (ret)
+ goto error_unreg_ring;
+
+ /* enable both REAL and IMAG channels by default */
+ iio_scan_mask_set(indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev->buffer, 1);
+
+ ret = ad5933_setup(st);
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_uninitialize_ring;
+
+ return 0;
+
+error_uninitialize_ring:
+ iio_buffer_unregister(indio_dev);
+error_unreg_ring:
+ iio_sw_rb_free(indio_dev->buffer);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static __devexit int ad5933_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ad5933_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
+ iio_sw_rb_free(indio_dev->buffer);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad5933_id[] = {
+ { "ad5933", 0 },
+ { "ad5934", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad5933_id);
+
+static struct i2c_driver ad5933_driver = {
+ .driver = {
+ .name = "ad5933",
+ },
+ .probe = ad5933_probe,
+ .remove = __devexit_p(ad5933_remove),
+ .id_table = ad5933_id,
+};
+
+static __init int ad5933_init(void)
+{
+ return i2c_add_driver(&ad5933_driver);
+}
+module_init(ad5933_init);
+
+static __exit void ad5933_exit(void)
+{
+ i2c_del_driver(&ad5933_driver);
+}
+module_exit(ad5933_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.h b/drivers/staging/iio/impedance-analyzer/ad5933.h
new file mode 100644
index 00000000000..b140e42d67c
--- /dev/null
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.h
@@ -0,0 +1,28 @@
+/*
+ * AD5933 AD5934 Impedance Converter, Network Analyzer
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_ADC_AD5933_H_
+#define IIO_ADC_AD5933_H_
+
+/*
+ * TODO: struct ad5933_platform_data needs to go into include/linux/iio
+ */
+
+/**
+ * struct ad5933_platform_data - platform specific data
+ * @ext_clk_Hz: the external clock frequency in Hz, if not set
+ * the driver uses the internal clock (16.776 MHz)
+ * @vref_mv: the external reference voltage in millivolt
+ */
+
+struct ad5933_platform_data {
+ unsigned long ext_clk_Hz;
+ unsigned short vref_mv;
+};
+
+#endif /* IIO_ADC_AD5933_H_ */
diff --git a/drivers/staging/iio/imu/Kconfig b/drivers/staging/iio/imu/Kconfig
index e0e01446117..2c2f47de263 100644
--- a/drivers/staging/iio/imu/Kconfig
+++ b/drivers/staging/iio/imu/Kconfig
@@ -1,15 +1,17 @@
#
# IIO imu drivers configuration
#
-comment "Inertial measurement units"
+menu "Inertial measurement units"
config ADIS16400
tristate "Analog Devices ADIS16400 and similar IMU SPI driver"
depends on SPI
- select IIO_SW_RING if IIO_RING_BUFFER
- select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16300, adis16350,
- adis16354, adis16355, adis16360, adis16362, adis16364, adis16365,
- adis16400 and adis16405 triaxial inertial sensors (adis16400 series
- also have magnetometers).
+ Say yes here to build support for Analog Devices adis16300, adis16344,
+ adis16350, adis16354, adis16355, adis16360, adis16362, adis16364,
+ adis16365, adis16400 and adis16405 triaxial inertial sensors
+ (adis16400 series also have magnetometers).
+
+endmenu
diff --git a/drivers/staging/iio/imu/Makefile b/drivers/staging/iio/imu/Makefile
index d46a691912e..3400a13d152 100644
--- a/drivers/staging/iio/imu/Makefile
+++ b/drivers/staging/iio/imu/Makefile
@@ -3,5 +3,5 @@
#
adis16400-y := adis16400_core.o
-adis16400-$(CONFIG_IIO_RING_BUFFER) += adis16400_ring.o adis16400_trigger.o
+adis16400-$(CONFIG_IIO_BUFFER) += adis16400_ring.o adis16400_trigger.o
obj-$(CONFIG_ADIS16400) += adis16400.o
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index 1f8f0c60c2c..f3546ee910a 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -184,7 +184,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16300_SCAN_INCLI_X 12
#define ADIS16300_SCAN_INCLI_Y 13
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
void adis16400_remove_trigger(struct iio_dev *indio_dev);
int adis16400_probe_trigger(struct iio_dev *indio_dev);
@@ -196,7 +196,7 @@ ssize_t adis16400_read_data_from_ring(struct device *dev,
int adis16400_configure_ring(struct iio_dev *indio_dev);
void adis16400_unconfigure_ring(struct iio_dev *indio_dev);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void adis16400_remove_trigger(struct iio_dev *indio_dev)
{
@@ -224,5 +224,5 @@ static inline void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index a2c3b67dcbd..d082a37c4fb 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -25,21 +24,16 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
-#include "../accel/accel.h"
-#include "../adc/adc.h"
-#include "../gyro/gyro.h"
-#include "../magnetometer/magnet.h"
-
+#include "../buffer_generic.h"
#include "adis16400.h"
-#define DRIVER_NAME "adis16400"
-
enum adis16400_chip_variant {
ADIS16300,
+ ADIS16334,
ADIS16350,
ADIS16360,
ADIS16362,
@@ -48,19 +42,12 @@ enum adis16400_chip_variant {
ADIS16400,
};
-static int adis16400_check_status(struct iio_dev *indio_dev);
-
-/* At the moment the spi framework doesn't allow global setting of cs_change.
- * It's in the likely to be added comment at the top of spi.h.
- * This means that use cannot be made of spi_write etc.
- */
-
/**
* adis16400_spi_write_reg_8() - write single byte to a register
* @dev: device associated with child of actual device (iio_dev or iio_trig)
* @reg_address: the address of the register to be written
* @val: the value to write
- **/
+ */
static int adis16400_spi_write_reg_8(struct iio_dev *indio_dev,
u8 reg_address,
u8 val)
@@ -84,7 +71,10 @@ static int adis16400_spi_write_reg_8(struct iio_dev *indio_dev,
* @reg_address: the address of the lower of the two registers. Second register
* is assumed to have address one greater.
* @val: value to be written
- **/
+ *
+ * At the moment the spi framework doesn't allow global setting of cs_change.
+ * This means that use cannot be made of spi_write.
+ */
static int adis16400_spi_write_reg_16(struct iio_dev *indio_dev,
u8 lower_reg_address,
u16 value)
@@ -126,6 +116,9 @@ static int adis16400_spi_write_reg_16(struct iio_dev *indio_dev,
* @reg_address: the address of the lower of the two registers. Second register
* is assumed to have address one greater.
* @val: somewhere to pass back the value read
+ *
+ * At the moment the spi framework doesn't allow global setting of cs_change.
+ * This means that use cannot be made of spi_read.
**/
static int adis16400_spi_read_reg_16(struct iio_dev *indio_dev,
u8 lower_reg_address,
@@ -150,8 +143,6 @@ static int adis16400_spi_read_reg_16(struct iio_dev *indio_dev,
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16400_READ_REG(lower_reg_address);
st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
@@ -240,23 +231,26 @@ static ssize_t adis16400_write_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool val;
+ int ret;
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return adis16400_reset(indio_dev);
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+ if (val) {
+ ret = adis16400_reset(dev_get_drvdata(dev));
+ if (ret < 0)
+ return ret;
}
- return -1;
+
+ return len;
}
int adis16400_set_irq(struct iio_dev *indio_dev, bool enable)
{
int ret;
u16 msc;
+
ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_MSC_CTRL, &msc);
if (ret)
goto error_ret;
@@ -289,24 +283,6 @@ static int adis16400_stop_device(struct iio_dev *indio_dev)
return ret;
}
-static int adis16400_self_test(struct iio_dev *indio_dev)
-{
- int ret;
- ret = adis16400_spi_write_reg_16(indio_dev,
- ADIS16400_MSC_CTRL,
- ADIS16400_MSC_CTRL_MEM_TEST);
- if (ret) {
- dev_err(&indio_dev->dev, "problem starting self test");
- goto err_ret;
- }
-
- msleep(ADIS16400_MTEST_DELAY);
- adis16400_check_status(indio_dev);
-
-err_ret:
- return ret;
-}
-
static int adis16400_check_status(struct iio_dev *indio_dev)
{
u16 status;
@@ -356,11 +332,28 @@ error_ret:
return ret;
}
+static int adis16400_self_test(struct iio_dev *indio_dev)
+{
+ int ret;
+ ret = adis16400_spi_write_reg_16(indio_dev,
+ ADIS16400_MSC_CTRL,
+ ADIS16400_MSC_CTRL_MEM_TEST);
+ if (ret) {
+ dev_err(&indio_dev->dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ msleep(ADIS16400_MTEST_DELAY);
+ adis16400_check_status(indio_dev);
+
+err_ret:
+ return ret;
+}
+
static int adis16400_initial_setup(struct iio_dev *indio_dev)
{
int ret;
u16 prod_id, smp_prd;
- struct device *dev = &indio_dev->dev;
struct adis16400_state *st = iio_priv(indio_dev);
/* use low spi speed for init */
@@ -368,29 +361,26 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
st->us->mode = SPI_MODE_3;
spi_setup(st->us);
- /* Disable IRQ */
ret = adis16400_set_irq(indio_dev, false);
if (ret) {
- dev_err(dev, "disable irq failed");
+ dev_err(&indio_dev->dev, "disable irq failed");
goto err_ret;
}
- /* Do self test */
ret = adis16400_self_test(indio_dev);
if (ret) {
- dev_err(dev, "self test failure");
+ dev_err(&indio_dev->dev, "self test failure");
goto err_ret;
}
- /* Read status register to check the result */
ret = adis16400_check_status(indio_dev);
if (ret) {
adis16400_reset(indio_dev);
- dev_err(dev, "device not playing ball -> reset");
+ dev_err(&indio_dev->dev, "device not playing ball -> reset");
msleep(ADIS16400_STARTUP_DELAY);
ret = adis16400_check_status(indio_dev);
if (ret) {
- dev_err(dev, "giving up");
+ dev_err(&indio_dev->dev, "giving up");
goto err_ret;
}
}
@@ -401,10 +391,11 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
goto err_ret;
if ((prod_id & 0xF000) != st->variant->product_id)
- dev_warn(dev, "incorrect id");
+ dev_warn(&indio_dev->dev, "incorrect id");
- printk(KERN_INFO DRIVER_NAME ": prod_id 0x%04x at CS%d (irq %d)\n",
- prod_id, st->us->chip_select, st->us->irq);
+ dev_info(&indio_dev->dev, "%s: prod_id 0x%04x at CS%d (irq %d)\n",
+ indio_dev->name, prod_id,
+ st->us->chip_select, st->us->irq);
}
/* use high spi speed if possible */
ret = adis16400_spi_read_reg_16(indio_dev,
@@ -414,15 +405,13 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
spi_setup(st->us);
}
-
err_ret:
-
return ret;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- adis16400_read_frequency,
- adis16400_write_frequency);
+ adis16400_read_frequency,
+ adis16400_write_frequency);
static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16400_write_reset, 0);
@@ -447,23 +436,23 @@ enum adis16400_chan {
};
static u8 adis16400_addresses[17][2] = {
- [in_supply] = { ADIS16400_SUPPLY_OUT, 0 },
+ [in_supply] = { ADIS16400_SUPPLY_OUT },
[gyro_x] = { ADIS16400_XGYRO_OUT, ADIS16400_XGYRO_OFF },
[gyro_y] = { ADIS16400_YGYRO_OUT, ADIS16400_YGYRO_OFF },
[gyro_z] = { ADIS16400_ZGYRO_OUT, ADIS16400_ZGYRO_OFF },
[accel_x] = { ADIS16400_XACCL_OUT, ADIS16400_XACCL_OFF },
[accel_y] = { ADIS16400_YACCL_OUT, ADIS16400_YACCL_OFF },
[accel_z] = { ADIS16400_ZACCL_OUT, ADIS16400_ZACCL_OFF },
- [magn_x] = { ADIS16400_XMAGN_OUT, 0 },
- [magn_y] = { ADIS16400_YMAGN_OUT, 0 },
- [magn_z] = { ADIS16400_ZMAGN_OUT, 0 },
- [temp] = { ADIS16400_TEMP_OUT, 0 },
+ [magn_x] = { ADIS16400_XMAGN_OUT },
+ [magn_y] = { ADIS16400_YMAGN_OUT },
+ [magn_z] = { ADIS16400_ZMAGN_OUT },
+ [temp] = { ADIS16400_TEMP_OUT },
[temp0] = { ADIS16350_XTEMP_OUT },
[temp1] = { ADIS16350_YTEMP_OUT },
[temp2] = { ADIS16350_ZTEMP_OUT },
- [in1] = { ADIS16400_AUX_ADC, 0 },
- [incli_x] = { ADIS16300_PITCH_OUT, 0 },
- [incli_y] = { ADIS16300_ROLL_OUT, 0 }
+ [in1] = { ADIS16400_AUX_ADC },
+ [incli_x] = { ADIS16300_PITCH_OUT },
+ [incli_y] = { ADIS16300_ROLL_OUT }
};
static int adis16400_write_raw(struct iio_dev *indio_dev,
@@ -473,6 +462,7 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
long mask)
{
int ret;
+
switch (mask) {
case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
mutex_lock(&indio_dev->mlock);
@@ -493,9 +483,8 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct adis16400_state *st = iio_priv(indio_dev);
- int ret;
+ int ret, shift;
s16 val16;
- int shift;
switch (mask) {
case 0:
@@ -518,11 +507,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
switch (chan->type) {
- case IIO_GYRO:
+ case IIO_ANGL_VEL:
*val = 0;
*val2 = st->variant->gyro_scale_micro;
return IIO_VAL_INT_PLUS_MICRO;
- case IIO_IN:
+ case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
*val2 = 2418;
@@ -566,135 +555,371 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
}
static struct iio_chan_spec adis16400_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- in_supply, ADIS16400_SCAN_SUPPLY,
- IIO_ST('u', 14, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_x, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_y, ADIS16400_SCAN_GYRO_Y, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_z, ADIS16400_SCAN_GYRO_Z, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_x, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_y, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_z, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_MAGN, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- magn_x, ADIS16400_SCAN_MAGN_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_MAGN, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- magn_y, ADIS16400_SCAN_MAGN_Y, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_MAGN, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- magn_z, ADIS16400_SCAN_MAGN_Z, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- temp, ADIS16400_SCAN_TEMP, IIO_ST('s', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- in1, ADIS16400_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .extend_name = "supply",
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in_supply,
+ .scan_index = ADIS16400_SCAN_SUPPLY,
+ .scan_type = IIO_ST('u', 14, 16, 0)
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_x,
+ .scan_index = ADIS16400_SCAN_GYRO_X,
+ .scan_type = IIO_ST('s', 14, 16, 0)
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_y,
+ .scan_index = ADIS16400_SCAN_GYRO_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_z,
+ .scan_index = ADIS16400_SCAN_GYRO_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_x,
+ .scan_index = ADIS16400_SCAN_ACC_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_y,
+ .scan_index = ADIS16400_SCAN_ACC_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_z,
+ .scan_index = ADIS16400_SCAN_ACC_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_MAGN,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = magn_x,
+ .scan_index = ADIS16400_SCAN_MAGN_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_MAGN,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = magn_y,
+ .scan_index = ADIS16400_SCAN_MAGN_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_MAGN,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = magn_z,
+ .scan_index = ADIS16400_SCAN_MAGN_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = temp,
+ .scan_index = ADIS16400_SCAN_TEMP,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in1,
+ .scan_index = ADIS16400_SCAN_ADC_0,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ },
IIO_CHAN_SOFT_TIMESTAMP(12)
};
static struct iio_chan_spec adis16350_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- in_supply, ADIS16400_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_x, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_y, ADIS16400_SCAN_GYRO_Y, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_z, ADIS16400_SCAN_GYRO_Z, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_x, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_y, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_z, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_TEMP, 0, 1, 0, "x", 0, 0,
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- temp0, ADIS16350_SCAN_TEMP_X, IIO_ST('s', 12, 16, 0), 0),
- IIO_CHAN(IIO_TEMP, 0, 1, 0, "y", 1, 0,
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- temp1, ADIS16350_SCAN_TEMP_Y, IIO_ST('s', 12, 16, 0), 0),
- IIO_CHAN(IIO_TEMP, 0, 1, 0, "z", 2, 0,
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- temp2, ADIS16350_SCAN_TEMP_Z, IIO_ST('s', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- in1, ADIS16350_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .extend_name = "supply",
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in_supply,
+ .scan_index = ADIS16400_SCAN_SUPPLY,
+ .scan_type = IIO_ST('u', 12, 16, 0)
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_x,
+ .scan_index = ADIS16400_SCAN_GYRO_X,
+ .scan_type = IIO_ST('s', 14, 16, 0)
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_y,
+ .scan_index = ADIS16400_SCAN_GYRO_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_z,
+ .scan_index = ADIS16400_SCAN_GYRO_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_x,
+ .scan_index = ADIS16400_SCAN_ACC_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_y,
+ .scan_index = ADIS16400_SCAN_ACC_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_z,
+ .scan_index = ADIS16400_SCAN_ACC_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .extend_name = "x",
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = temp0,
+ .scan_index = ADIS16350_SCAN_TEMP_X,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 1,
+ .extend_name = "y",
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = temp1,
+ .scan_index = ADIS16350_SCAN_TEMP_Y,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 2,
+ .extend_name = "z",
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = temp2,
+ .scan_index = ADIS16350_SCAN_TEMP_Z,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in1,
+ .scan_index = ADIS16350_SCAN_ADC_0,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ },
IIO_CHAN_SOFT_TIMESTAMP(11)
};
static struct iio_chan_spec adis16300_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- in_supply, ADIS16400_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0),
- IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- gyro_x, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_x, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_y, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- accel_z, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
- IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- temp, ADIS16400_SCAN_TEMP, IIO_ST('s', 12, 16, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- in1, ADIS16350_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
- IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- incli_x, ADIS16300_SCAN_INCLI_X, IIO_ST('s', 13, 16, 0), 0),
- IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
- incli_y, ADIS16300_SCAN_INCLI_Y, IIO_ST('s', 13, 16, 0), 0),
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .extend_name = "supply",
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in_supply,
+ .scan_index = ADIS16400_SCAN_SUPPLY,
+ .scan_type = IIO_ST('u', 12, 16, 0)
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_x,
+ .scan_index = ADIS16400_SCAN_GYRO_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_x,
+ .scan_index = ADIS16400_SCAN_ACC_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_y,
+ .scan_index = ADIS16400_SCAN_ACC_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_z,
+ .scan_index = ADIS16400_SCAN_ACC_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = temp,
+ .scan_index = ADIS16400_SCAN_TEMP,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .address = in1,
+ .scan_index = ADIS16350_SCAN_ADC_0,
+ .scan_type = IIO_ST('s', 12, 16, 0),
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = incli_x,
+ .scan_index = ADIS16300_SCAN_INCLI_X,
+ .scan_type = IIO_ST('s', 13, 16, 0),
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = incli_y,
+ .scan_index = ADIS16300_SCAN_INCLI_Y,
+ .scan_type = IIO_ST('s', 13, 16, 0),
+ },
IIO_CHAN_SOFT_TIMESTAMP(14)
};
+static const struct iio_chan_spec adis16334_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_x,
+ .scan_index = ADIS16400_SCAN_GYRO_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_y,
+ .scan_index = ADIS16400_SCAN_GYRO_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = gyro_z,
+ .scan_index = ADIS16400_SCAN_GYRO_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_x,
+ .scan_index = ADIS16400_SCAN_ACC_X,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_y,
+ .scan_index = ADIS16400_SCAN_ACC_Y,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_z,
+ .scan_index = ADIS16400_SCAN_ACC_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = accel_z,
+ .scan_index = ADIS16400_SCAN_ACC_Z,
+ .scan_type = IIO_ST('s', 14, 16, 0),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(12)
+};
+
static struct attribute *adis16400_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
@@ -719,6 +944,16 @@ static struct adis16400_chip_info adis16400_chips[] = {
(1 << ADIS16300_SCAN_INCLI_X) | (1 << ADIS16300_SCAN_INCLI_Y) |
(1 << 14),
},
+ [ADIS16334] = {
+ .channels = adis16334_channels,
+ .num_channels = ARRAY_SIZE(adis16334_channels),
+ .gyro_scale_micro = 873,
+ .accel_scale_micro = 981,
+ .default_scan_mask = (1 << ADIS16400_SCAN_GYRO_X) |
+ (1 << ADIS16400_SCAN_GYRO_Y) | (1 << ADIS16400_SCAN_GYRO_Z) |
+ (1 << ADIS16400_SCAN_ACC_X) | (1 << ADIS16400_SCAN_ACC_Y) |
+ (1 << ADIS16400_SCAN_ACC_Z),
+ },
[ADIS16350] = {
.channels = adis16350_channels,
.num_channels = ARRAY_SIZE(adis16350_channels),
@@ -783,7 +1018,7 @@ static const struct iio_info adis16400_info = {
static int __devinit adis16400_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct adis16400_state *st;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
@@ -810,20 +1045,15 @@ static int __devinit adis16400_probe(struct spi_device *spi)
if (ret)
goto error_free_dev;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- st->variant->channels,
- st->variant->num_channels);
+ ret = iio_buffer_register(indio_dev,
+ st->variant->channels,
+ st->variant->num_channels);
if (ret) {
dev_err(&spi->dev, "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ if (spi->irq) {
ret = adis16400_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
@@ -833,20 +1063,21 @@ static int __devinit adis16400_probe(struct spi_device *spi)
ret = adis16400_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
- if (indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
adis16400_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
error_unreg_ring_funcs:
adis16400_unconfigure_ring(indio_dev);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -857,14 +1088,15 @@ static int adis16400_remove(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ iio_device_unregister(indio_dev);
ret = adis16400_stop_device(indio_dev);
if (ret)
goto err_ret;
adis16400_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(indio_dev->ring);
+ iio_buffer_unregister(indio_dev);
adis16400_unconfigure_ring(indio_dev);
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
@@ -874,6 +1106,7 @@ err_ret:
static const struct spi_device_id adis16400_id[] = {
{"adis16300", ADIS16300},
+ {"adis16334", ADIS16334},
{"adis16350", ADIS16350},
{"adis16354", ADIS16350},
{"adis16355", ADIS16350},
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 3612373dded..c3682458d78 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -1,21 +1,13 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/bitops.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "../accel/accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16400.h"
/**
@@ -87,13 +79,13 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
int i, j = 0, ret;
struct spi_transfer *xfers;
- xfers = kzalloc(sizeof(*xfers)*indio_dev->ring->scan_count + 1,
+ xfers = kzalloc(sizeof(*xfers)*indio_dev->buffer->scan_count + 1,
GFP_KERNEL);
if (xfers == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
- if (indio_dev->ring->scan_mask & (1 << i)) {
+ if (test_bit(i, indio_dev->buffer->scan_mask)) {
xfers[j].tx_buf = &read_all_tx_array[i];
xfers[j].bits_per_word = 16;
xfers[j].len = 2;
@@ -104,7 +96,7 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
xfers[j].len = 2;
spi_message_init(&msg);
- for (j = 0; j < indio_dev->ring->scan_count + 1; j++)
+ for (j = 0; j < indio_dev->buffer->scan_count + 1; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -119,13 +111,14 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
static irqreturn_t adis16400_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
+ struct iio_dev *indio_dev = pf->indio_dev;
struct adis16400_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
int i = 0, j, ret = 0;
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
- unsigned long mask = ring->scan_mask;
+ /* Asumption that long is enough for maximum channels */
+ unsigned long mask = *ring->scan_mask;
data = kmalloc(datasize , GFP_KERNEL);
if (data == NULL) {
@@ -144,7 +137,7 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < indio_dev->ring->scan_count; i++) {
+ for (; i < indio_dev->buffer->scan_count; i++) {
j = __ffs(mask);
mask &= ~(1 << j);
data[i] = be16_to_cpup(
@@ -155,7 +148,7 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
- ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
+ ring->access->store_to(indio_dev->buffer, (u8 *) data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
@@ -170,36 +163,32 @@ err:
void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
-static const struct iio_ring_setup_ops adis16400_ring_setup_ops = {
- .preenable = &iio_sw_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+static const struct iio_buffer_setup_ops adis16400_ring_setup_ops = {
+ .preenable = &iio_sw_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
int adis16400_configure_ring(struct iio_dev *indio_dev)
{
int ret = 0;
- struct adis16400_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring;
+ struct iio_buffer *ring;
ring = iio_sw_rb_allocate(indio_dev);
if (!ring) {
ret = -ENOMEM;
return ret;
}
- indio_dev->ring = ring;
+ indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
ring->bpe = 2;
ring->scan_timestamp = true;
ring->setup_ops = &adis16400_ring_setup_ops;
ring->owner = THIS_MODULE;
- /* Set default scan mode */
- ring->scan_mask = st->variant->default_scan_mask;
- ring->scan_count = hweight_long(st->variant->default_scan_mask);
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16400_trigger_handler,
@@ -213,9 +202,9 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/imu/adis16400_trigger.c b/drivers/staging/iio/imu/adis16400_trigger.c
index c6ec41a02a6..bf991531e0d 100644
--- a/drivers/staging/iio/imu/adis16400_trigger.c
+++ b/drivers/staging/iio/imu/adis16400_trigger.c
@@ -1,14 +1,8 @@
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16400.h"
@@ -24,13 +18,18 @@ static int adis16400_data_rdy_trigger_set_state(struct iio_trigger *trig,
return adis16400_set_irq(indio_dev, state);
}
+static const struct iio_trigger_ops adis16400_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis16400_data_rdy_trigger_set_state,
+};
+
int adis16400_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct adis16400_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("%s-dev%d",
- spi_get_device_id(st->us)->name,
+ indio_dev->name,
indio_dev->id);
if (st->trig == NULL) {
ret = -ENOMEM;
@@ -45,9 +44,8 @@ int adis16400_probe_trigger(struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &adis16400_data_rdy_trigger_set_state;
+ st->trig->ops = &adis16400_trigger_ops;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
new file mode 100644
index 00000000000..6dd5d7d629a
--- /dev/null
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -0,0 +1,635 @@
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Handling of buffer allocation / resizing.
+ *
+ *
+ * Things to look at here.
+ * - Better memory allocation techniques?
+ * - Alternative access techniques?
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+#include "iio.h"
+#include "iio_core.h"
+#include "sysfs.h"
+#include "buffer_generic.h"
+
+static const char * const iio_endian_prefix[] = {
+ [IIO_BE] = "be",
+ [IIO_LE] = "le",
+};
+
+/**
+ * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ *
+ * This function relies on all buffer implementations having an
+ * iio_buffer as their first element.
+ **/
+ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ if (!rb->access->read_first_n)
+ return -EINVAL;
+ return rb->access->read_first_n(rb, n, buf);
+}
+
+/**
+ * iio_buffer_poll() - poll the buffer to find out if it has data
+ */
+unsigned int iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ poll_wait(filp, &rb->pollq, wait);
+ if (rb->stufftoread)
+ return POLLIN | POLLRDNORM;
+ /* need a way of knowing if there may be enough data... */
+ return 0;
+}
+
+int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *rb = indio_dev->buffer;
+ if (!rb)
+ return -EINVAL;
+ if (rb->access->mark_in_use)
+ rb->access->mark_in_use(rb);
+ return 0;
+}
+
+void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
+ if (rb->access->unmark_in_use)
+ rb->access->unmark_in_use(rb);
+}
+
+void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
+{
+ buffer->indio_dev = indio_dev;
+ init_waitqueue_head(&buffer->pollq);
+}
+EXPORT_SYMBOL(iio_buffer_init);
+
+static ssize_t iio_show_scan_index(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
+}
+
+static ssize_t iio_show_fixed_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 type = this_attr->c->scan_type.endianness;
+
+ if (type == IIO_CPU) {
+#ifdef __LITTLE_ENDIAN
+ type = IIO_LE;
+#else
+ type = IIO_BE;
+#endif
+ }
+ return sprintf(buf, "%s:%c%d/%d>>%u\n",
+ iio_endian_prefix[type],
+ this_attr->c->scan_type.sign,
+ this_attr->c->scan_type.realbits,
+ this_attr->c->scan_type.storagebits,
+ this_attr->c->scan_type.shift);
+}
+
+static ssize_t iio_scan_el_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ ret = iio_scan_mask_query(indio_dev->buffer,
+ to_iio_dev_attr(attr)->address);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
+{
+ clear_bit(bit, buffer->scan_mask);
+ buffer->scan_count--;
+ return 0;
+}
+
+static ssize_t iio_scan_el_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ bool state;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ ret = iio_scan_mask_query(buffer, this_attr->address);
+ if (ret < 0)
+ goto error_ret;
+ if (!state && ret) {
+ ret = iio_scan_mask_clear(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ } else if (state && !ret) {
+ ret = iio_scan_mask_set(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+
+}
+
+static ssize_t iio_scan_el_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
+}
+
+static ssize_t iio_scan_el_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool state;
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ indio_dev->buffer->scan_timestamp = state;
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret, attrcount = 0;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = __iio_add_chan_devattr("index",
+ chan,
+ &iio_show_scan_index,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ ret = __iio_add_chan_devattr("type",
+ chan,
+ &iio_show_fixed_type,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ if (chan->type != IIO_TIMESTAMP)
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_show,
+ &iio_scan_el_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ else
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_ts_show,
+ &iio_scan_el_ts_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ attrcount++;
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
+ struct iio_dev_attr *p)
+{
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+}
+
+static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p, *n;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ list_for_each_entry_safe(p, n,
+ &buffer->scan_el_dev_attr_list, l)
+ iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
+}
+
+static const char * const iio_scan_elements_group_name = "scan_elements";
+
+int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels)
+{
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, i, attrn, attrcount, attrcount_orig = 0;
+
+ if (buffer->attrs)
+ indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
+
+ if (buffer->scan_el_attrs != NULL) {
+ attr = buffer->scan_el_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
+ if (channels) {
+ /* new magic */
+ for (i = 0; i < num_channels; i++) {
+ /* Establish necessary mask length */
+ if (channels[i].scan_index >
+ (int)indio_dev->masklength - 1)
+ indio_dev->masklength
+ = indio_dev->channels[i].scan_index + 1;
+
+ ret = iio_buffer_add_channel_sysfs(indio_dev,
+ &channels[i]);
+ if (ret < 0)
+ goto error_cleanup_dynamic;
+ attrcount += ret;
+ }
+ if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ buffer->scan_mask
+ = kzalloc(sizeof(*buffer->scan_mask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+ if (buffer->scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_cleanup_dynamic;
+ }
+ }
+ }
+
+ buffer->scan_el_group.name = iio_scan_elements_group_name;
+
+ buffer->scan_el_group.attrs
+ = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
+ (attrcount + 1),
+ GFP_KERNEL);
+ if (buffer->scan_el_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_scan_mask;
+ }
+ if (buffer->scan_el_attrs)
+ memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
+ sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
+ attrn = attrcount_orig;
+
+ list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
+ buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
+
+ return 0;
+
+error_free_scan_mask:
+ kfree(buffer->scan_mask);
+error_cleanup_dynamic:
+ __iio_buffer_attr_cleanup(indio_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_register);
+
+void iio_buffer_unregister(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->buffer->scan_mask);
+ kfree(indio_dev->buffer->scan_el_group.attrs);
+ __iio_buffer_attr_cleanup(indio_dev);
+}
+EXPORT_SYMBOL(iio_buffer_unregister);
+
+ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_length)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_length(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_length);
+
+ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ ulong val;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (buffer->access->get_length)
+ if (val == buffer->access->get_length(buffer))
+ return len;
+
+ if (buffer->access->set_length) {
+ buffer->access->set_length(buffer, val);
+ if (buffer->access->mark_param_change)
+ buffer->access->mark_param_change(buffer);
+ }
+
+ return len;
+}
+EXPORT_SYMBOL(iio_buffer_write_length);
+
+ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_bytes_per_datum)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_bytes_per_datum(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
+
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool requested_state, current_state;
+ int previous_mode;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ mutex_lock(&indio_dev->mlock);
+ previous_mode = indio_dev->currentmode;
+ requested_state = !(buf[0] == '0');
+ current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
+ if (current_state == requested_state) {
+ printk(KERN_INFO "iio-buffer, current state requested again\n");
+ goto done;
+ }
+ if (requested_state) {
+ if (buffer->setup_ops->preenable) {
+ ret = buffer->setup_ops->preenable(indio_dev);
+ if (ret) {
+ printk(KERN_ERR
+ "Buffer not started:"
+ "buffer preenable failed\n");
+ goto error_ret;
+ }
+ }
+ if (buffer->access->request_update) {
+ ret = buffer->access->request_update(buffer);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "buffer parameter update failed\n");
+ goto error_ret;
+ }
+ }
+ if (buffer->access->mark_in_use)
+ buffer->access->mark_in_use(buffer);
+ /* Definitely possible for devices to support both of these.*/
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
+ if (!indio_dev->trig) {
+ printk(KERN_INFO
+ "Buffer not started: no trigger\n");
+ ret = -EINVAL;
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ goto error_ret;
+ }
+ indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
+ } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
+ indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
+ else { /* should never be reached */
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+ if (buffer->setup_ops->postenable) {
+ ret = buffer->setup_ops->postenable(indio_dev);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "postenable failed\n");
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ indio_dev->currentmode = previous_mode;
+ if (buffer->setup_ops->postdisable)
+ buffer->setup_ops->
+ postdisable(indio_dev);
+ goto error_ret;
+ }
+ }
+ } else {
+ if (buffer->setup_ops->predisable) {
+ ret = buffer->setup_ops->predisable(indio_dev);
+ if (ret)
+ goto error_ret;
+ }
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (buffer->setup_ops->postdisable) {
+ ret = buffer->setup_ops->postdisable(indio_dev);
+ if (ret)
+ goto error_ret;
+ }
+ }
+done:
+ mutex_unlock(&indio_dev->mlock);
+ return len;
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_store_enable);
+
+ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", !!(indio_dev->currentmode
+ & INDIO_ALL_BUFFER_MODES));
+}
+EXPORT_SYMBOL(iio_buffer_show_enable);
+
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(buffer->scan_count || buffer->scan_timestamp))
+ return -EINVAL;
+ if (buffer->scan_timestamp)
+ if (buffer->scan_count)
+ /* Timestamp (aligned to s64) and data */
+ size = (((buffer->scan_count * buffer->bpe)
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = buffer->scan_count * buffer->bpe;
+ buffer->access->set_bytes_per_datum(buffer, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_buffer_preenable);
+
+
+/* note NULL used as error indicator as it doesn't make sense. */
+static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
+ unsigned int masklength,
+ unsigned long *mask)
+{
+ if (bitmap_empty(mask, masklength))
+ return NULL;
+ while (*av_masks) {
+ if (bitmap_subset(mask, av_masks, masklength))
+ return av_masks;
+ av_masks += BITS_TO_LONGS(masklength);
+ }
+ return NULL;
+}
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ **/
+int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
+{
+ struct iio_dev *indio_dev = buffer->indio_dev;
+ unsigned long *mask;
+ unsigned long *trialmask;
+
+ trialmask = kmalloc(sizeof(*trialmask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+
+ if (trialmask == NULL)
+ return -ENOMEM;
+ if (!indio_dev->masklength) {
+ WARN_ON("trying to set scanmask prior to registering buffer\n");
+ kfree(trialmask);
+ return -EINVAL;
+ }
+ bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+ set_bit(bit, trialmask);
+
+ if (indio_dev->available_scan_masks) {
+ mask = iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ trialmask);
+ if (!mask) {
+ kfree(trialmask);
+ return -EINVAL;
+ }
+ }
+ bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
+ buffer->scan_count++;
+
+ kfree(trialmask);
+
+ return 0;
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_set);
+
+int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
+{
+ struct iio_dev *indio_dev = buffer->indio_dev;
+ long *mask;
+
+ if (bit > indio_dev->masklength)
+ return -EINVAL;
+
+ if (!buffer->scan_mask)
+ return 0;
+ if (indio_dev->available_scan_masks)
+ mask = iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ buffer->scan_mask);
+ else
+ mask = buffer->scan_mask;
+ if (!mask)
+ return 0;
+
+ return test_bit(bit, mask);
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_query);
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 19819e7578c..326e967d54e 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -21,21 +21,17 @@
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/slab.h>
+#include <linux/anon_inodes.h>
#include "iio.h"
-#include "trigger_consumer.h"
+#include "iio_core.h"
+#include "iio_core_trigger.h"
+#include "chrdev.h"
+#include "sysfs.h"
-#define IIO_ID_PREFIX "device"
-#define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
-
-/* IDR to assign each registered device a unique id*/
+/* IDA to assign each registered device a unique id*/
static DEFINE_IDA(iio_ida);
-/* IDR to allocate character device minor numbers */
-static DEFINE_IDA(iio_chrdev_ida);
-/* Lock used to protect both of the above */
-static DEFINE_SPINLOCK(iio_ida_lock);
-dev_t iio_devt;
-EXPORT_SYMBOL(iio_devt);
+static dev_t iio_devt;
#define IIO_DEV_MAX 256
struct bus_type iio_bus_type = {
@@ -43,14 +39,22 @@ struct bus_type iio_bus_type = {
};
EXPORT_SYMBOL(iio_bus_type);
-static const char * const iio_chan_type_name_spec_shared[] = {
- [IIO_IN] = "in",
- [IIO_OUT] = "out",
+static const char * const iio_data_type_name[] = {
+ [IIO_RAW] = "raw",
+ [IIO_PROCESSED] = "input",
+};
+
+static const char * const iio_direction[] = {
+ [0] = "in",
+ [1] = "out",
+};
+
+static const char * const iio_chan_type_name_spec[] = {
+ [IIO_VOLTAGE] = "voltage",
[IIO_CURRENT] = "current",
[IIO_POWER] = "power",
[IIO_ACCEL] = "accel",
- [IIO_IN_DIFF] = "in-in",
- [IIO_GYRO] = "gyro",
+ [IIO_ANGL_VEL] = "anglvel",
[IIO_MAGN] = "magn",
[IIO_LIGHT] = "illuminance",
[IIO_INTENSITY] = "intensity",
@@ -60,21 +64,15 @@ static const char * const iio_chan_type_name_spec_shared[] = {
[IIO_ROT] = "rot",
[IIO_ANGL] = "angl",
[IIO_TIMESTAMP] = "timestamp",
+ [IIO_CAPACITANCE] = "capacitance",
};
-static const char * const iio_chan_type_name_spec_complex[] = {
- [IIO_IN_DIFF] = "in%d-in%d",
-};
-
-static const char * const iio_modifier_names_light[] = {
- [IIO_MOD_LIGHT_BOTH] = "both",
- [IIO_MOD_LIGHT_IR] = "ir",
-};
-
-static const char * const iio_modifier_names_axial[] = {
+static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
+ [IIO_MOD_LIGHT_BOTH] = "both",
+ [IIO_MOD_LIGHT_IR] = "ir",
};
/* relies on pairs of these shared then separate */
@@ -85,21 +83,51 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
[IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
[IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
+ [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
+ = "quadrature_correction_raw",
+ [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
};
-int iio_push_event(struct iio_dev *dev_info,
- int ev_line,
- int ev_code,
- s64 timestamp)
+/**
+ * struct iio_detected_event_list - list element for events that have occurred
+ * @list: linked list header
+ * @ev: the event itself
+ */
+struct iio_detected_event_list {
+ struct list_head list;
+ struct iio_event_data ev;
+};
+
+/**
+ * struct iio_event_interface - chrdev interface for an event line
+ * @dev: device assocated with event interface
+ * @wait: wait queue to allow blocking reads of events
+ * @event_list_lock: mutex to protect the list of detected events
+ * @det_events: list of detected events
+ * @max_events: maximum number of events before new ones are dropped
+ * @current_events: number of events in detected list
+ * @flags: file operations related flags including busy flag.
+ */
+struct iio_event_interface {
+ wait_queue_head_t wait;
+ struct mutex event_list_lock;
+ struct list_head det_events;
+ int max_events;
+ int current_events;
+ struct list_head dev_attr_list;
+ unsigned long flags;
+ struct attribute_group group;
+};
+
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
- struct iio_event_interface *ev_int
- = &dev_info->event_interfaces[ev_line];
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
struct iio_detected_event_list *ev;
int ret = 0;
/* Does anyone care? */
mutex_lock(&ev_int->event_list_lock);
- if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
+ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
if (ev_int->current_events == ev_int->max_events) {
mutex_unlock(&ev_int->event_list_lock);
return 0;
@@ -125,7 +153,6 @@ error_ret:
}
EXPORT_SYMBOL(iio_push_event);
-
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
@@ -135,7 +162,6 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
-
static ssize_t iio_event_chrdev_read(struct file *filep,
char __user *buf,
size_t count,
@@ -187,12 +213,11 @@ error_ret:
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
- struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
- struct iio_event_interface *ev_int = hand->private;
+ struct iio_event_interface *ev_int = filep->private_data;
struct iio_detected_event_list *el, *t;
mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
/*
* In order to maintain a clean state for reopening,
* clear out any awaiting events. The mask will prevent
@@ -202,23 +227,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
list_del(&el->list);
kfree(el);
}
- mutex_unlock(&ev_int->event_list_lock);
-
- return 0;
-}
-
-static int iio_event_chrdev_open(struct inode *inode, struct file *filep)
-{
- struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
- struct iio_event_interface *ev_int = hand->private;
-
- mutex_lock(&ev_int->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
- fops_put(filep->f_op);
- mutex_unlock(&ev_int->event_list_lock);
- return -EBUSY;
- }
- filep->private_data = hand->private;
+ ev_int->current_events = 0;
mutex_unlock(&ev_int->event_list_lock);
return 0;
@@ -227,124 +236,25 @@ static int iio_event_chrdev_open(struct inode *inode, struct file *filep)
static const struct file_operations iio_event_chrdev_fileops = {
.read = iio_event_chrdev_read,
.release = iio_event_chrdev_release,
- .open = iio_event_chrdev_open,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
-static void iio_event_dev_release(struct device *dev)
+static int iio_event_getfd(struct iio_dev *indio_dev)
{
- struct iio_event_interface *ev_int
- = container_of(dev, struct iio_event_interface, dev);
- cdev_del(&ev_int->handler.chrdev);
- iio_device_free_chrdev_minor(MINOR(dev->devt));
-};
-
-static struct device_type iio_event_type = {
- .release = iio_event_dev_release,
-};
-
-int iio_device_get_chrdev_minor(void)
-{
- int ret, val;
-
-ida_again:
- if (unlikely(ida_pre_get(&iio_chrdev_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
- spin_lock(&iio_ida_lock);
- ret = ida_get_new(&iio_chrdev_ida, &val);
- spin_unlock(&iio_ida_lock);
- if (unlikely(ret == -EAGAIN))
- goto ida_again;
- else if (unlikely(ret))
- return ret;
- if (val > IIO_DEV_MAX)
- return -ENOMEM;
- return val;
-}
-
-void iio_device_free_chrdev_minor(int val)
-{
- spin_lock(&iio_ida_lock);
- ida_remove(&iio_chrdev_ida, val);
- spin_unlock(&iio_ida_lock);
-}
+ if (indio_dev->event_interface == NULL)
+ return -ENODEV;
-static int iio_setup_ev_int(struct iio_event_interface *ev_int,
- const char *dev_name,
- int index,
- struct module *owner,
- struct device *dev)
-{
- int ret, minor;
-
- ev_int->dev.bus = &iio_bus_type;
- ev_int->dev.parent = dev;
- ev_int->dev.type = &iio_event_type;
- device_initialize(&ev_int->dev);
-
- minor = iio_device_get_chrdev_minor();
- if (minor < 0) {
- ret = minor;
- goto error_device_put;
+ mutex_lock(&indio_dev->event_interface->event_list_lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS,
+ &indio_dev->event_interface->flags)) {
+ mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ return -EBUSY;
}
- ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
- dev_set_name(&ev_int->dev, "%s:event%d", dev_name, index);
-
- ret = device_add(&ev_int->dev);
- if (ret)
- goto error_free_minor;
-
- cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
- ev_int->handler.chrdev.owner = owner;
-
- mutex_init(&ev_int->event_list_lock);
- /* discussion point - make this variable? */
- ev_int->max_events = 10;
- ev_int->current_events = 0;
- INIT_LIST_HEAD(&ev_int->det_events);
- init_waitqueue_head(&ev_int->wait);
- ev_int->handler.private = ev_int;
- ev_int->handler.flags = 0;
-
- ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
- if (ret)
- goto error_unreg_device;
-
- return 0;
-
-error_unreg_device:
- device_unregister(&ev_int->dev);
-error_free_minor:
- iio_device_free_chrdev_minor(minor);
-error_device_put:
- put_device(&ev_int->dev);
-
- return ret;
-}
-
-static void iio_free_ev_int(struct iio_event_interface *ev_int)
-{
- device_unregister(&ev_int->dev);
- put_device(&ev_int->dev);
-}
-
-static int __init iio_dev_init(void)
-{
- int err;
-
- err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
- if (err < 0)
- printk(KERN_ERR "%s: failed to allocate char dev region\n",
- __FILE__);
-
- return err;
-}
-
-static void __exit iio_dev_exit(void)
-{
- if (iio_devt)
- unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
+ mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ return anon_inode_getfd("iio:event",
+ &iio_event_chrdev_fileops,
+ indio_dev->event_interface, O_RDONLY);
}
static int __init iio_init(void)
@@ -360,9 +270,12 @@ static int __init iio_init(void)
goto error_nothing;
}
- ret = iio_dev_init();
- if (ret < 0)
+ ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to allocate char dev region\n",
+ __FILE__);
goto error_unregister_bus_type;
+ }
return 0;
@@ -374,7 +287,8 @@ error_nothing:
static void __exit iio_exit(void)
{
- iio_dev_exit();
+ if (iio_devt)
+ unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
bus_unregister(&iio_bus_type);
}
@@ -476,54 +390,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
return len;
}
-static int __iio_build_postfix(struct iio_chan_spec const *chan,
- bool generic,
- const char *postfix,
- char **result)
-{
- char *all_post;
- /* 3 options - generic, extend_name, modified - if generic, extend_name
- * and modified cannot apply.*/
-
- if (generic || (!chan->modified && !chan->extend_name)) {
- all_post = kasprintf(GFP_KERNEL, "%s", postfix);
- } else if (chan->modified) {
- const char *intermediate;
- switch (chan->type) {
- case IIO_INTENSITY:
- intermediate
- = iio_modifier_names_light[chan->channel2];
- break;
- case IIO_ACCEL:
- case IIO_GYRO:
- case IIO_MAGN:
- case IIO_INCLI:
- case IIO_ROT:
- case IIO_ANGL:
- intermediate
- = iio_modifier_names_axial[chan->channel2];
- break;
- default:
- return -EINVAL;
- }
- if (chan->extend_name)
- all_post = kasprintf(GFP_KERNEL, "%s_%s_%s",
- intermediate,
- chan->extend_name,
- postfix);
- else
- all_post = kasprintf(GFP_KERNEL, "%s_%s",
- intermediate,
- postfix);
- } else
- all_post = kasprintf(GFP_KERNEL, "%s_%s", chan->extend_name,
- postfix);
- if (all_post == NULL)
- return -ENOMEM;
- *result = all_post;
- return 0;
-}
-
+static
int __iio_device_attr_init(struct device_attribute *dev_attr,
const char *postfix,
struct iio_chan_spec const *chan,
@@ -539,28 +406,77 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
int ret;
char *name_format, *full_postfix;
sysfs_attr_init(&dev_attr->attr);
- ret = __iio_build_postfix(chan, generic, postfix, &full_postfix);
- if (ret)
- goto error_ret;
- /* Special case for types that uses both channel numbers in naming */
- if (chan->type == IIO_IN_DIFF && !generic)
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s",
- iio_chan_type_name_spec_complex[chan->type],
- full_postfix);
- else if (generic || !chan->indexed)
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s",
- iio_chan_type_name_spec_shared[chan->type],
- full_postfix);
- else
- name_format
- = kasprintf(GFP_KERNEL, "%s%d_%s",
- iio_chan_type_name_spec_shared[chan->type],
- chan->channel,
- full_postfix);
+ /* Build up postfix of <extend_name>_<modifier>_postfix */
+ if (chan->modified) {
+ if (chan->extend_name)
+ full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_modifier_names[chan
+ ->channel2],
+ chan->extend_name,
+ postfix);
+ else
+ full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_modifier_names[chan
+ ->channel2],
+ postfix);
+ } else {
+ if (chan->extend_name == NULL)
+ full_postfix = kstrdup(postfix, GFP_KERNEL);
+ else
+ full_postfix = kasprintf(GFP_KERNEL,
+ "%s_%s",
+ chan->extend_name,
+ postfix);
+ }
+ if (full_postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ if (chan->differential) { /* Differential can not have modifier */
+ if (generic)
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ else if (chan->indexed)
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ chan->channel,
+ iio_chan_type_name_spec[chan->type],
+ chan->channel2,
+ full_postfix);
+ else {
+ WARN_ON("Differential channels must be indexed\n");
+ ret = -EINVAL;
+ goto error_free_full_postfix;
+ }
+ } else { /* Single ended */
+ if (generic)
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ else if (chan->indexed)
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ chan->channel,
+ full_postfix);
+ else
+ name_format
+ = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ }
if (name_format == NULL) {
ret = -ENOMEM;
goto error_free_full_postfix;
@@ -596,13 +512,12 @@ error_ret:
return ret;
}
-void __iio_device_attr_deinit(struct device_attribute *dev_attr)
+static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
{
kfree(dev_attr->attr.name);
}
int __iio_add_chan_devattr(const char *postfix,
- const char *group,
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
struct device_attribute *attr,
@@ -611,7 +526,7 @@ int __iio_add_chan_devattr(const char *postfix,
struct device_attribute *attr,
const char *buf,
size_t len),
- int mask,
+ u64 mask,
bool generic,
struct device *dev,
struct list_head *attr_list)
@@ -640,12 +555,6 @@ int __iio_add_chan_devattr(const char *postfix,
ret = -EBUSY;
goto error_device_attr_deinit;
}
-
- ret = sysfs_add_file_to_group(&dev->kobj,
- &iio_attr->dev_attr.attr, group);
- if (ret < 0)
- goto error_device_attr_deinit;
-
list_add(&iio_attr->l, attr_list);
return 0;
@@ -658,59 +567,52 @@ error_ret:
return ret;
}
-static int iio_device_add_channel_sysfs(struct iio_dev *dev_info,
+static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
- int ret, i;
-
+ int ret, i, attrcount = 0;
if (chan->channel < 0)
return 0;
- if (chan->processed_val)
- ret = __iio_add_chan_devattr("input", NULL, chan,
- &iio_read_channel_info,
- NULL,
- 0,
- 0,
- &dev_info->dev,
- &dev_info->channel_attr_list);
- else
- ret = __iio_add_chan_devattr("raw", NULL, chan,
- &iio_read_channel_info,
- (chan->type == IIO_OUT ?
- &iio_write_channel_info : NULL),
- 0,
- 0,
- &dev_info->dev,
- &dev_info->channel_attr_list);
+
+ ret = __iio_add_chan_devattr(iio_data_type_name[chan->processed_val],
+ chan,
+ &iio_read_channel_info,
+ (chan->output ?
+ &iio_write_channel_info : NULL),
+ 0,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
if (ret)
goto error_ret;
+ attrcount++;
for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
- NULL, chan,
+ chan,
&iio_read_channel_info,
&iio_write_channel_info,
(1 << i),
!(i%2),
- &dev_info->dev,
- &dev_info->channel_attr_list);
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
if (ret == -EBUSY && (i%2 == 0)) {
ret = 0;
continue;
}
if (ret < 0)
goto error_ret;
+ attrcount++;
}
+ ret = attrcount;
error_ret:
return ret;
}
-static void iio_device_remove_and_free_read_attr(struct iio_dev *dev_info,
+static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev,
struct iio_dev_attr *p)
{
- sysfs_remove_file_from_group(&dev_info->dev.kobj,
- &p->dev_attr.attr, NULL);
kfree(p->dev_attr.attr.name);
kfree(p);
}
@@ -725,107 +627,91 @@ static ssize_t iio_show_dev_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
-static int iio_device_register_sysfs(struct iio_dev *dev_info)
+static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
- int i, ret = 0;
+ int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
struct iio_dev_attr *p, *n;
+ struct attribute **attr;
- if (dev_info->info->attrs) {
- ret = sysfs_create_group(&dev_info->dev.kobj,
- dev_info->info->attrs);
- if (ret) {
- dev_err(dev_info->dev.parent,
- "Failed to register sysfs hooks\n");
- goto error_ret;
- }
+ /* First count elements in any existing group */
+ if (indio_dev->info->attrs) {
+ attr = indio_dev->info->attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
}
-
+ attrcount = attrcount_orig;
/*
* New channel registration method - relies on the fact a group does
* not need to be initialized if it is name is NULL.
*/
- INIT_LIST_HEAD(&dev_info->channel_attr_list);
- if (dev_info->channels)
- for (i = 0; i < dev_info->num_channels; i++) {
- ret = iio_device_add_channel_sysfs(dev_info,
- &dev_info
+ INIT_LIST_HEAD(&indio_dev->channel_attr_list);
+ if (indio_dev->channels)
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = iio_device_add_channel_sysfs(indio_dev,
+ &indio_dev
->channels[i]);
if (ret < 0)
goto error_clear_attrs;
+ attrcount += ret;
}
- if (dev_info->name) {
- ret = sysfs_add_file_to_group(&dev_info->dev.kobj,
- &dev_attr_name.attr,
- NULL);
- if (ret)
- goto error_clear_attrs;
+
+ if (indio_dev->name)
+ attrcount++;
+
+ indio_dev->chan_attr_group.attrs
+ = kzalloc(sizeof(indio_dev->chan_attr_group.attrs[0])*
+ (attrcount + 1),
+ GFP_KERNEL);
+ if (indio_dev->chan_attr_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_attrs;
}
+ /* Copy across original attributes */
+ if (indio_dev->info->attrs)
+ memcpy(indio_dev->chan_attr_group.attrs,
+ indio_dev->info->attrs->attrs,
+ sizeof(indio_dev->chan_attr_group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p, &indio_dev->channel_attr_list, l)
+ indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
+ if (indio_dev->name)
+ indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->chan_attr_group;
+
return 0;
error_clear_attrs:
list_for_each_entry_safe(p, n,
- &dev_info->channel_attr_list, l) {
+ &indio_dev->channel_attr_list, l) {
list_del(&p->l);
- iio_device_remove_and_free_read_attr(dev_info, p);
+ iio_device_remove_and_free_read_attr(indio_dev, p);
}
- if (dev_info->info->attrs)
- sysfs_remove_group(&dev_info->dev.kobj, dev_info->info->attrs);
-error_ret:
- return ret;
+ return ret;
}
-static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
+static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
{
struct iio_dev_attr *p, *n;
- if (dev_info->name)
- sysfs_remove_file_from_group(&dev_info->dev.kobj,
- &dev_attr_name.attr,
- NULL);
- list_for_each_entry_safe(p, n, &dev_info->channel_attr_list, l) {
+
+ list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) {
list_del(&p->l);
- iio_device_remove_and_free_read_attr(dev_info, p);
+ iio_device_remove_and_free_read_attr(indio_dev, p);
}
-
- if (dev_info->info->attrs)
- sysfs_remove_group(&dev_info->dev.kobj, dev_info->info->attrs);
-}
-
-/* Return a negative errno on failure */
-int iio_get_new_ida_val(struct ida *this_ida)
-{
- int ret;
- int val;
-
-ida_again:
- if (unlikely(ida_pre_get(this_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- spin_lock(&iio_ida_lock);
- ret = ida_get_new(this_ida, &val);
- spin_unlock(&iio_ida_lock);
- if (unlikely(ret == -EAGAIN))
- goto ida_again;
- else if (unlikely(ret))
- return ret;
-
- return val;
+ kfree(indio_dev->chan_attr_group.attrs);
}
-EXPORT_SYMBOL(iio_get_new_ida_val);
-
-void iio_free_ida_val(struct ida *this_ida, int id)
-{
- spin_lock(&iio_ida_lock);
- ida_remove(this_ida, id);
- spin_unlock(&iio_ida_lock);
-}
-EXPORT_SYMBOL(iio_free_ida_val);
static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_THRESH] = "thresh",
[IIO_EV_TYPE_MAG] = "mag",
- [IIO_EV_TYPE_ROC] = "roc"
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
};
static const char * const iio_ev_dir_text[] = {
@@ -907,230 +793,214 @@ static ssize_t iio_ev_value_store(struct device *dev,
return len;
}
-static int iio_device_add_event_sysfs(struct iio_dev *dev_info,
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
-
- int ret = 0, i, mask;
+ int ret = 0, i, attrcount = 0;
+ u64 mask = 0;
char *postfix;
if (!chan->event_mask)
return 0;
for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
- iio_ev_type_text[i/IIO_EV_TYPE_MAX],
- iio_ev_dir_text[i%IIO_EV_TYPE_MAX]);
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
if (postfix == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- switch (chan->type) {
- /* Switch this to a table at some point */
- case IIO_IN:
- mask = IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
- i/IIO_EV_TYPE_MAX,
- i%IIO_EV_TYPE_MAX);
- break;
- case IIO_ACCEL:
+ if (chan->modified)
mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
- i/IIO_EV_TYPE_MAX,
- i%IIO_EV_TYPE_MAX);
- break;
- case IIO_IN_DIFF:
- mask = IIO_MOD_EVENT_CODE(chan->type, chan->channel,
- chan->channel2,
- i/IIO_EV_TYPE_MAX,
- i%IIO_EV_TYPE_MAX);
- break;
- default:
- printk(KERN_INFO "currently unhandled type of event\n");
- }
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+ else if (chan->differential)
+ mask = IIO_EVENT_CODE(chan->type,
+ 0, 0,
+ i%IIO_EV_DIR_MAX,
+ i/IIO_EV_DIR_MAX,
+ 0,
+ chan->channel,
+ chan->channel2);
+ else
+ mask = IIO_UNMOD_EVENT_CODE(chan->type,
+ chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+
ret = __iio_add_chan_devattr(postfix,
- NULL,
chan,
&iio_ev_state_show,
iio_ev_state_store,
mask,
- /*HACK. - limits us to one
- event interface - fix by
- extending the bitmask - but
- how far*/
0,
- &dev_info->event_interfaces[0].dev,
- &dev_info->event_interfaces[0].
+ &indio_dev->dev,
+ &indio_dev->event_interface->
dev_attr_list);
kfree(postfix);
if (ret)
goto error_ret;
-
+ attrcount++;
postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
- iio_ev_type_text[i/IIO_EV_TYPE_MAX],
- iio_ev_dir_text[i%IIO_EV_TYPE_MAX]);
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
if (postfix == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- ret = __iio_add_chan_devattr(postfix, NULL, chan,
+ ret = __iio_add_chan_devattr(postfix, chan,
iio_ev_value_show,
iio_ev_value_store,
mask,
0,
- &dev_info->event_interfaces[0]
- .dev,
- &dev_info->event_interfaces[0]
- .dev_attr_list);
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
kfree(postfix);
if (ret)
goto error_ret;
-
+ attrcount++;
}
-
+ ret = attrcount;
error_ret:
return ret;
}
-static inline void __iio_remove_all_event_sysfs(struct iio_dev *dev_info,
- const char *groupname,
- int num)
+static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
{
struct iio_dev_attr *p, *n;
list_for_each_entry_safe(p, n,
- &dev_info->event_interfaces[num].
+ &indio_dev->event_interface->
dev_attr_list, l) {
- sysfs_remove_file_from_group(&dev_info
- ->event_interfaces[num].dev.kobj,
- &p->dev_attr.attr,
- groupname);
kfree(p->dev_attr.attr.name);
kfree(p);
}
}
-static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
+static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
- int j;
- int ret;
- INIT_LIST_HEAD(&dev_info->event_interfaces[0].dev_attr_list);
+ int j, ret, attrcount = 0;
+
+ INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
/* Dynically created from the channels array */
- if (dev_info->channels) {
- for (j = 0; j < dev_info->num_channels; j++) {
- ret = iio_device_add_event_sysfs(dev_info,
- &dev_info
- ->channels[j]);
- if (ret)
- goto error_clear_attrs;
- }
+ for (j = 0; j < indio_dev->num_channels; j++) {
+ ret = iio_device_add_event_sysfs(indio_dev,
+ &indio_dev->channels[j]);
+ if (ret < 0)
+ goto error_clear_attrs;
+ attrcount += ret;
}
- return 0;
+ return attrcount;
error_clear_attrs:
- __iio_remove_all_event_sysfs(dev_info, NULL, i);
+ __iio_remove_event_config_attrs(indio_dev);
return ret;
}
-static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
- int i)
+static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
- __iio_remove_all_event_sysfs(dev_info, NULL, i);
- return 0;
+ int j;
+
+ for (j = 0; j < indio_dev->num_channels; j++)
+ if (indio_dev->channels[j].event_mask != 0)
+ return true;
+ return false;
+}
+
+static void iio_setup_ev_int(struct iio_event_interface *ev_int)
+{
+ mutex_init(&ev_int->event_list_lock);
+ /* discussion point - make this variable? */
+ ev_int->max_events = 10;
+ ev_int->current_events = 0;
+ INIT_LIST_HEAD(&ev_int->det_events);
+ init_waitqueue_head(&ev_int->wait);
}
-static int iio_device_register_eventset(struct iio_dev *dev_info)
+static const char *iio_event_group_name = "events";
+static int iio_device_register_eventset(struct iio_dev *indio_dev)
{
- int ret = 0, i, j;
+ struct iio_dev_attr *p;
+ int ret = 0, attrcount_orig = 0, attrcount, attrn;
+ struct attribute **attr;
- if (dev_info->info->num_interrupt_lines == 0)
+ if (!(indio_dev->info->event_attrs ||
+ iio_check_for_dynamic_events(indio_dev)))
return 0;
- dev_info->event_interfaces =
- kzalloc(sizeof(struct iio_event_interface)
- *dev_info->info->num_interrupt_lines,
- GFP_KERNEL);
- if (dev_info->event_interfaces == NULL) {
+ indio_dev->event_interface =
+ kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
+ if (indio_dev->event_interface == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- for (i = 0; i < dev_info->info->num_interrupt_lines; i++) {
- ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
- dev_name(&dev_info->dev),
- i,
- dev_info->info->driver_module,
- &dev_info->dev);
- if (ret) {
- dev_err(&dev_info->dev,
- "Could not get chrdev interface\n");
- goto error_free_setup_ev_ints;
- }
-
- dev_set_drvdata(&dev_info->event_interfaces[i].dev,
- (void *)dev_info);
-
- if (dev_info->info->event_attrs != NULL)
- ret = sysfs_create_group(&dev_info
- ->event_interfaces[i]
- .dev.kobj,
- &dev_info->info
- ->event_attrs[i]);
-
- if (ret) {
- dev_err(&dev_info->dev,
- "Failed to register sysfs for event attrs");
- goto error_remove_sysfs_interfaces;
- }
+ iio_setup_ev_int(indio_dev->event_interface);
+ if (indio_dev->info->event_attrs != NULL) {
+ attr = indio_dev->info->event_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ if (indio_dev->channels) {
+ ret = __iio_add_event_config_attrs(indio_dev);
+ if (ret < 0)
+ goto error_free_setup_event_lines;
+ attrcount += ret;
}
- for (i = 0; i < dev_info->info->num_interrupt_lines; i++) {
- ret = __iio_add_event_config_attrs(dev_info, i);
- if (ret)
- goto error_unregister_config_attrs;
+ indio_dev->event_interface->group.name = iio_event_group_name;
+ indio_dev->event_interface->group.attrs =
+ kzalloc(sizeof(indio_dev->event_interface->group.attrs[0])
+ *(attrcount + 1),
+ GFP_KERNEL);
+ if (indio_dev->event_interface->group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_setup_event_lines;
}
+ if (indio_dev->info->event_attrs)
+ memcpy(indio_dev->event_interface->group.attrs,
+ indio_dev->info->event_attrs->attrs,
+ sizeof(indio_dev->event_interface->group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p,
+ &indio_dev->event_interface->dev_attr_list,
+ l)
+ indio_dev->event_interface->group.attrs[attrn++] =
+ &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->event_interface->group;
return 0;
-error_unregister_config_attrs:
- for (j = 0; j < i; j++)
- __iio_remove_event_config_attrs(dev_info, i);
- i = dev_info->info->num_interrupt_lines - 1;
-error_remove_sysfs_interfaces:
- for (j = 0; j < i; j++)
- if (dev_info->info->event_attrs != NULL)
- sysfs_remove_group(&dev_info
- ->event_interfaces[j].dev.kobj,
- &dev_info->info->event_attrs[j]);
-error_free_setup_ev_ints:
- for (j = 0; j < i; j++)
- iio_free_ev_int(&dev_info->event_interfaces[j]);
- kfree(dev_info->event_interfaces);
+error_free_setup_event_lines:
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface);
error_ret:
return ret;
}
-static void iio_device_unregister_eventset(struct iio_dev *dev_info)
+static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
- int i;
-
- if (dev_info->info->num_interrupt_lines == 0)
+ if (indio_dev->event_interface == NULL)
return;
- for (i = 0; i < dev_info->info->num_interrupt_lines; i++) {
- __iio_remove_event_config_attrs(dev_info, i);
- if (dev_info->info->event_attrs != NULL)
- sysfs_remove_group(&dev_info
- ->event_interfaces[i].dev.kobj,
- &dev_info->info->event_attrs[i]);
- }
-
- for (i = 0; i < dev_info->info->num_interrupt_lines; i++)
- iio_free_ev_int(&dev_info->event_interfaces[i]);
- kfree(dev_info->event_interfaces);
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface->group.attrs);
+ kfree(indio_dev->event_interface);
}
static void iio_dev_release(struct device *device)
{
- iio_put();
- kfree(to_iio_dev(device));
+ struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
+ cdev_del(&indio_dev->chrdev);
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
+ iio_device_unregister_trigger_consumer(indio_dev);
+ iio_device_unregister_eventset(indio_dev);
+ iio_device_unregister_sysfs(indio_dev);
}
static struct device_type iio_dev_type = {
@@ -1154,12 +1024,21 @@ struct iio_dev *iio_allocate_device(int sizeof_priv)
dev = kzalloc(alloc_size, GFP_KERNEL);
if (dev) {
+ dev->dev.groups = dev->groups;
dev->dev.type = &iio_dev_type;
dev->dev.bus = &iio_bus_type;
device_initialize(&dev->dev);
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
- iio_get();
+
+ dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ if (dev->id < 0) {
+ /* cannot use a dev_err as the name isn't available */
+ printk(KERN_ERR "Failed to get id\n");
+ kfree(dev);
+ return NULL;
+ }
+ dev_set_name(&dev->dev, "iio:device%d", dev->id);
}
return dev;
@@ -1168,75 +1047,111 @@ EXPORT_SYMBOL(iio_allocate_device);
void iio_free_device(struct iio_dev *dev)
{
- if (dev)
- iio_put_device(dev);
+ if (dev) {
+ ida_simple_remove(&iio_ida, dev->id);
+ kfree(dev);
+ }
}
EXPORT_SYMBOL(iio_free_device);
-int iio_device_register(struct iio_dev *dev_info)
+/**
+ * iio_chrdev_open() - chrdev file open for buffer access and ioctls
+ **/
+static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
- int ret;
+ struct iio_dev *indio_dev = container_of(inode->i_cdev,
+ struct iio_dev, chrdev);
+ filp->private_data = indio_dev;
- dev_info->id = iio_get_new_ida_val(&iio_ida);
- if (dev_info->id < 0) {
- ret = dev_info->id;
- dev_err(&dev_info->dev, "Failed to get id\n");
- goto error_ret;
+ return iio_chrdev_buffer_open(indio_dev);
+}
+
+/**
+ * iio_chrdev_release() - chrdev file close buffer access and ioctls
+ **/
+static int iio_chrdev_release(struct inode *inode, struct file *filp)
+{
+ iio_chrdev_buffer_release(container_of(inode->i_cdev,
+ struct iio_dev, chrdev));
+ return 0;
+}
+
+/* Somewhat of a cross file organization violation - ioctls here are actually
+ * event related */
+static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ int __user *ip = (int __user *)arg;
+ int fd;
+
+ if (cmd == IIO_GET_EVENT_FD_IOCTL) {
+ fd = iio_event_getfd(indio_dev);
+ if (copy_to_user(ip, &fd, sizeof(fd)))
+ return -EFAULT;
+ return 0;
}
- dev_set_name(&dev_info->dev, "device%d", dev_info->id);
+ return -EINVAL;
+}
- ret = device_add(&dev_info->dev);
- if (ret)
- goto error_free_ida;
- ret = iio_device_register_sysfs(dev_info);
+static const struct file_operations iio_buffer_fileops = {
+ .read = iio_buffer_read_first_n_outer_addr,
+ .release = iio_chrdev_release,
+ .open = iio_chrdev_open,
+ .poll = iio_buffer_poll_addr,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = iio_ioctl,
+ .compat_ioctl = iio_ioctl,
+};
+
+int iio_device_register(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ /* configure elements for the chrdev */
+ indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
+
+ ret = iio_device_register_sysfs(indio_dev);
if (ret) {
- dev_err(dev_info->dev.parent,
+ dev_err(indio_dev->dev.parent,
"Failed to register sysfs interfaces\n");
- goto error_del_device;
+ goto error_ret;
}
- ret = iio_device_register_eventset(dev_info);
+ ret = iio_device_register_eventset(indio_dev);
if (ret) {
- dev_err(dev_info->dev.parent,
+ dev_err(indio_dev->dev.parent,
"Failed to register event set\n");
goto error_free_sysfs;
}
- if (dev_info->modes & INDIO_RING_TRIGGERED)
- iio_device_register_trigger_consumer(dev_info);
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
+ iio_device_register_trigger_consumer(indio_dev);
+ ret = device_add(&indio_dev->dev);
+ if (ret < 0)
+ goto error_unreg_eventset;
+ cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
+ indio_dev->chrdev.owner = indio_dev->info->driver_module;
+ ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
+ if (ret < 0)
+ goto error_del_device;
return 0;
-error_free_sysfs:
- iio_device_unregister_sysfs(dev_info);
error_del_device:
- device_del(&dev_info->dev);
-error_free_ida:
- iio_free_ida_val(&iio_ida, dev_info->id);
+ device_del(&indio_dev->dev);
+error_unreg_eventset:
+ iio_device_unregister_eventset(indio_dev);
+error_free_sysfs:
+ iio_device_unregister_sysfs(indio_dev);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_device_register);
-void iio_device_unregister(struct iio_dev *dev_info)
+void iio_device_unregister(struct iio_dev *indio_dev)
{
- if (dev_info->modes & INDIO_RING_TRIGGERED)
- iio_device_unregister_trigger_consumer(dev_info);
- iio_device_unregister_eventset(dev_info);
- iio_device_unregister_sysfs(dev_info);
- iio_free_ida_val(&iio_ida, dev_info->id);
- device_unregister(&dev_info->dev);
+ device_unregister(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
-
-void iio_put(void)
-{
- module_put(THIS_MODULE);
-}
-
-void iio_get(void)
-{
- __module_get(THIS_MODULE);
-}
-
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c
deleted file mode 100644
index 843eb82a69b..00000000000
--- a/drivers/staging/iio/industrialio-ring.c
+++ /dev/null
@@ -1,596 +0,0 @@
-/* The industrial I/O core
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Handling of ring allocation / resizing.
- *
- *
- * Things to look at here.
- * - Better memory allocation techniques?
- * - Alternative access techniques?
- */
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-
-#include "iio.h"
-#include "ring_generic.h"
-
-/**
- * iio_ring_open() - chrdev file open for ring buffer access
- *
- * This function relies on all ring buffer implementations having an
- * iio_ring_buffer as their first element.
- **/
-static int iio_ring_open(struct inode *inode, struct file *filp)
-{
- struct iio_handler *hand
- = container_of(inode->i_cdev, struct iio_handler, chrdev);
- struct iio_ring_buffer *rb = hand->private;
-
- filp->private_data = hand->private;
- if (rb->access->mark_in_use)
- rb->access->mark_in_use(rb);
-
- return 0;
-}
-
-/**
- * iio_ring_release() - chrdev file close ring buffer access
- *
- * This function relies on all ring buffer implementations having an
- * iio_ring_buffer as their first element.
- **/
-static int iio_ring_release(struct inode *inode, struct file *filp)
-{
- struct cdev *cd = inode->i_cdev;
- struct iio_handler *hand = iio_cdev_to_handler(cd);
- struct iio_ring_buffer *rb = hand->private;
-
- clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
- if (rb->access->unmark_in_use)
- rb->access->unmark_in_use(rb);
-
- return 0;
-}
-
-/**
- * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
- *
- * This function relies on all ring buffer implementations having an
- * iio_ring _bufer as their first element.
- **/
-static ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf,
- size_t n, loff_t *f_ps)
-{
- struct iio_ring_buffer *rb = filp->private_data;
-
- if (!rb->access->read_first_n)
- return -EINVAL;
- return rb->access->read_first_n(rb, n, buf);
-}
-
-/**
- * iio_ring_poll() - poll the ring to find out if it has data
- */
-static unsigned int iio_ring_poll(struct file *filp,
- struct poll_table_struct *wait)
-{
- struct iio_ring_buffer *rb = filp->private_data;
-
- poll_wait(filp, &rb->pollq, wait);
- if (rb->stufftoread)
- return POLLIN | POLLRDNORM;
- /* need a way of knowing if there may be enough data... */
- return 0;
-}
-
-static const struct file_operations iio_ring_fileops = {
- .read = iio_ring_read_first_n_outer,
- .release = iio_ring_release,
- .open = iio_ring_open,
- .poll = iio_ring_poll,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-void iio_ring_access_release(struct device *dev)
-{
- struct iio_ring_buffer *buf
- = container_of(dev, struct iio_ring_buffer, dev);
- cdev_del(&buf->access_handler.chrdev);
- iio_device_free_chrdev_minor(MINOR(dev->devt));
-}
-EXPORT_SYMBOL(iio_ring_access_release);
-
-static inline int
-__iio_request_ring_buffer_chrdev(struct iio_ring_buffer *buf,
- struct module *owner,
- int id)
-{
- int ret;
-
- buf->access_handler.flags = 0;
- buf->dev.bus = &iio_bus_type;
- device_initialize(&buf->dev);
-
- ret = iio_device_get_chrdev_minor();
- if (ret < 0)
- goto error_device_put;
-
- buf->dev.devt = MKDEV(MAJOR(iio_devt), ret);
- dev_set_name(&buf->dev, "%s:buffer%d",
- dev_name(buf->dev.parent),
- id);
- ret = device_add(&buf->dev);
- if (ret < 0) {
- printk(KERN_ERR "failed to add the ring dev\n");
- goto error_device_put;
- }
- cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
- buf->access_handler.chrdev.owner = owner;
- ret = cdev_add(&buf->access_handler.chrdev, buf->dev.devt, 1);
- if (ret) {
- printk(KERN_ERR "failed to allocate ring chrdev\n");
- goto error_device_unregister;
- }
- return 0;
-
-error_device_unregister:
- device_unregister(&buf->dev);
-error_device_put:
- put_device(&buf->dev);
-
- return ret;
-}
-
-static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer *buf)
-{
- device_unregister(&buf->dev);
-}
-
-void iio_ring_buffer_init(struct iio_ring_buffer *ring,
- struct iio_dev *dev_info)
-{
- ring->indio_dev = dev_info;
- ring->access_handler.private = ring;
- init_waitqueue_head(&ring->pollq);
-}
-EXPORT_SYMBOL(iio_ring_buffer_init);
-
-static ssize_t iio_show_scan_index(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
-}
-
-static ssize_t iio_show_fixed_type(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- return sprintf(buf, "%c%d/%d>>%u\n",
- this_attr->c->scan_type.sign,
- this_attr->c->scan_type.realbits,
- this_attr->c->scan_type.storagebits,
- this_attr->c->scan_type.shift);
-}
-
-static ssize_t iio_scan_el_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
-
- ret = iio_scan_mask_query(ring, to_iio_dev_attr(attr)->address);
- if (ret < 0)
- return ret;
- return sprintf(buf, "%d\n", ret);
-}
-
-static int iio_scan_mask_clear(struct iio_ring_buffer *ring, int bit)
-{
- if (bit > IIO_MAX_SCAN_LENGTH)
- return -EINVAL;
- ring->scan_mask &= ~(1 << bit);
- ring->scan_count--;
- return 0;
-}
-
-static ssize_t iio_scan_el_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret = 0;
- bool state;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- state = !(buf[0] == '0');
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
- ret = -EBUSY;
- goto error_ret;
- }
- ret = iio_scan_mask_query(ring, this_attr->address);
- if (ret < 0)
- goto error_ret;
- if (!state && ret) {
- ret = iio_scan_mask_clear(ring, this_attr->address);
- if (ret)
- goto error_ret;
- } else if (state && !ret) {
- ret = iio_scan_mask_set(ring, this_attr->address);
- if (ret)
- goto error_ret;
- }
-
-error_ret:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-
-}
-
-static ssize_t iio_scan_el_ts_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", ring->scan_timestamp);
-}
-
-static ssize_t iio_scan_el_ts_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret = 0;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
- bool state;
- state = !(buf[0] == '0');
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
- ret = -EBUSY;
- goto error_ret;
- }
- ring->scan_timestamp = state;
-error_ret:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static int iio_ring_add_channel_sysfs(struct iio_ring_buffer *ring,
- const struct iio_chan_spec *chan)
-{
- int ret;
-
- ret = __iio_add_chan_devattr("index", "scan_elements",
- chan,
- &iio_show_scan_index,
- NULL,
- 0,
- 0,
- &ring->dev,
- &ring->scan_el_dev_attr_list);
- if (ret)
- goto error_ret;
-
- ret = __iio_add_chan_devattr("type", "scan_elements",
- chan,
- &iio_show_fixed_type,
- NULL,
- 0,
- 0,
- &ring->dev,
- &ring->scan_el_dev_attr_list);
- if (ret)
- goto error_ret;
-
- if (chan->type != IIO_TIMESTAMP)
- ret = __iio_add_chan_devattr("en", "scan_elements",
- chan,
- &iio_scan_el_show,
- &iio_scan_el_store,
- chan->scan_index,
- 0,
- &ring->dev,
- &ring->scan_el_dev_attr_list);
- else
- ret = __iio_add_chan_devattr("en", "scan_elements",
- chan,
- &iio_scan_el_ts_show,
- &iio_scan_el_ts_store,
- chan->scan_index,
- 0,
- &ring->dev,
- &ring->scan_el_dev_attr_list);
-error_ret:
- return ret;
-}
-
-static void iio_ring_remove_and_free_scan_dev_attr(struct iio_ring_buffer *ring,
- struct iio_dev_attr *p)
-{
- sysfs_remove_file_from_group(&ring->dev.kobj,
- &p->dev_attr.attr, "scan_elements");
- kfree(p->dev_attr.attr.name);
- kfree(p);
-}
-
-static struct attribute *iio_scan_el_dummy_attrs[] = {
- NULL
-};
-
-static struct attribute_group iio_scan_el_dummy_group = {
- .name = "scan_elements",
- .attrs = iio_scan_el_dummy_attrs
-};
-
-static void __iio_ring_attr_cleanup(struct iio_ring_buffer *ring)
-{
- struct iio_dev_attr *p, *n;
- int anydynamic = !list_empty(&ring->scan_el_dev_attr_list);
- list_for_each_entry_safe(p, n,
- &ring->scan_el_dev_attr_list, l)
- iio_ring_remove_and_free_scan_dev_attr(ring, p);
-
- if (ring->scan_el_attrs)
- sysfs_remove_group(&ring->dev.kobj,
- ring->scan_el_attrs);
- else if (anydynamic)
- sysfs_remove_group(&ring->dev.kobj,
- &iio_scan_el_dummy_group);
-}
-
-int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, int id,
- const struct iio_chan_spec *channels,
- int num_channels)
-{
- int ret, i;
-
- ret = __iio_request_ring_buffer_chrdev(ring, ring->owner, id);
- if (ret)
- goto error_ret;
-
- if (ring->scan_el_attrs) {
- ret = sysfs_create_group(&ring->dev.kobj,
- ring->scan_el_attrs);
- if (ret) {
- dev_err(&ring->dev,
- "Failed to add sysfs scan elements\n");
- goto error_free_ring_buffer_chrdev;
- }
- } else if (channels) {
- ret = sysfs_create_group(&ring->dev.kobj,
- &iio_scan_el_dummy_group);
- if (ret)
- goto error_free_ring_buffer_chrdev;
- }
-
- INIT_LIST_HEAD(&ring->scan_el_dev_attr_list);
- if (channels) {
- /* new magic */
- for (i = 0; i < num_channels; i++) {
- ret = iio_ring_add_channel_sysfs(ring, &channels[i]);
- if (ret < 0)
- goto error_cleanup_dynamic;
- }
- }
-
- return 0;
-error_cleanup_dynamic:
- __iio_ring_attr_cleanup(ring);
-error_free_ring_buffer_chrdev:
- __iio_free_ring_buffer_chrdev(ring);
-error_ret:
- return ret;
-}
-EXPORT_SYMBOL(iio_ring_buffer_register_ex);
-
-void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
-{
- __iio_ring_attr_cleanup(ring);
- __iio_free_ring_buffer_chrdev(ring);
-}
-EXPORT_SYMBOL(iio_ring_buffer_unregister);
-
-ssize_t iio_read_ring_length(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
-
- if (ring->access->get_length)
- return sprintf(buf, "%d\n",
- ring->access->get_length(ring));
-
- return 0;
-}
-EXPORT_SYMBOL(iio_read_ring_length);
-
-ssize_t iio_write_ring_length(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- ulong val;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- if (ring->access->get_length)
- if (val == ring->access->get_length(ring))
- return len;
-
- if (ring->access->set_length) {
- ring->access->set_length(ring, val);
- if (ring->access->mark_param_change)
- ring->access->mark_param_change(ring);
- }
-
- return len;
-}
-EXPORT_SYMBOL(iio_write_ring_length);
-
-ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
-
- if (ring->access->get_bytes_per_datum)
- return sprintf(buf, "%d\n",
- ring->access->get_bytes_per_datum(ring));
-
- return 0;
-}
-EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
-
-ssize_t iio_store_ring_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- bool requested_state, current_state;
- int previous_mode;
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *dev_info = ring->indio_dev;
-
- mutex_lock(&dev_info->mlock);
- previous_mode = dev_info->currentmode;
- requested_state = !(buf[0] == '0');
- current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
- if (current_state == requested_state) {
- printk(KERN_INFO "iio-ring, current state requested again\n");
- goto done;
- }
- if (requested_state) {
- if (ring->setup_ops->preenable) {
- ret = ring->setup_ops->preenable(dev_info);
- if (ret) {
- printk(KERN_ERR
- "Buffer not started:"
- "ring preenable failed\n");
- goto error_ret;
- }
- }
- if (ring->access->request_update) {
- ret = ring->access->request_update(ring);
- if (ret) {
- printk(KERN_INFO
- "Buffer not started:"
- "ring parameter update failed\n");
- goto error_ret;
- }
- }
- if (ring->access->mark_in_use)
- ring->access->mark_in_use(ring);
- /* Definitely possible for devices to support both of these.*/
- if (dev_info->modes & INDIO_RING_TRIGGERED) {
- if (!dev_info->trig) {
- printk(KERN_INFO
- "Buffer not started: no trigger\n");
- ret = -EINVAL;
- if (ring->access->unmark_in_use)
- ring->access->unmark_in_use(ring);
- goto error_ret;
- }
- dev_info->currentmode = INDIO_RING_TRIGGERED;
- } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
- dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
- else { /* should never be reached */
- ret = -EINVAL;
- goto error_ret;
- }
-
- if (ring->setup_ops->postenable) {
- ret = ring->setup_ops->postenable(dev_info);
- if (ret) {
- printk(KERN_INFO
- "Buffer not started:"
- "postenable failed\n");
- if (ring->access->unmark_in_use)
- ring->access->unmark_in_use(ring);
- dev_info->currentmode = previous_mode;
- if (ring->setup_ops->postdisable)
- ring->setup_ops->postdisable(dev_info);
- goto error_ret;
- }
- }
- } else {
- if (ring->setup_ops->predisable) {
- ret = ring->setup_ops->predisable(dev_info);
- if (ret)
- goto error_ret;
- }
- if (ring->access->unmark_in_use)
- ring->access->unmark_in_use(ring);
- dev_info->currentmode = INDIO_DIRECT_MODE;
- if (ring->setup_ops->postdisable) {
- ret = ring->setup_ops->postdisable(dev_info);
- if (ret)
- goto error_ret;
- }
- }
-done:
- mutex_unlock(&dev_info->mlock);
- return len;
-
-error_ret:
- mutex_unlock(&dev_info->mlock);
- return ret;
-}
-EXPORT_SYMBOL(iio_store_ring_enable);
-
-ssize_t iio_show_ring_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_ring_buffer *ring = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
- & INDIO_ALL_RING_MODES));
-}
-EXPORT_SYMBOL(iio_show_ring_enable);
-
-int iio_sw_ring_preenable(struct iio_dev *indio_dev)
-{
- struct iio_ring_buffer *ring = indio_dev->ring;
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(ring->scan_count || ring->scan_timestamp))
- return -EINVAL;
- if (ring->scan_timestamp)
- if (ring->scan_count)
- /* Timestamp (aligned to s64) and data */
- size = (((ring->scan_count * ring->bpe)
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = ring->scan_count * ring->bpe;
- ring->access->set_bytes_per_datum(ring, size);
-
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_ring_preenable);
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 90ca2df23ea..2c626e0cb29 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/device.h>
@@ -18,6 +17,8 @@
#include "iio.h"
#include "trigger.h"
+#include "iio_core.h"
+#include "iio_core_trigger.h"
#include "trigger_consumer.h"
/* RFC - Question of approach
@@ -31,8 +32,7 @@
* Any other suggestions?
*/
-static DEFINE_IDR(iio_trigger_idr);
-static DEFINE_SPINLOCK(iio_trigger_idr_lock);
+static DEFINE_IDA(iio_trigger_ida);
/* Single list of all available triggers */
static LIST_HEAD(iio_trigger_list);
@@ -71,48 +71,15 @@ static void iio_trigger_unregister_sysfs(struct iio_trigger *trig_info)
NULL);
}
-
-/**
- * iio_trigger_register_id() - get a unique id for this trigger
- * @trig_info: the trigger
- **/
-static int iio_trigger_register_id(struct iio_trigger *trig_info)
-{
- int ret = 0;
-
-idr_again:
- if (unlikely(idr_pre_get(&iio_trigger_idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- spin_lock(&iio_trigger_idr_lock);
- ret = idr_get_new(&iio_trigger_idr, NULL, &trig_info->id);
- spin_unlock(&iio_trigger_idr_lock);
- if (unlikely(ret == -EAGAIN))
- goto idr_again;
- else if (likely(!ret))
- trig_info->id = trig_info->id & MAX_ID_MASK;
-
- return ret;
-}
-
-/**
- * iio_trigger_unregister_id() - free up unique id for use by another trigger
- * @trig_info: the trigger
- **/
-static void iio_trigger_unregister_id(struct iio_trigger *trig_info)
-{
- spin_lock(&iio_trigger_idr_lock);
- idr_remove(&iio_trigger_idr, trig_info->id);
- spin_unlock(&iio_trigger_idr_lock);
-}
-
int iio_trigger_register(struct iio_trigger *trig_info)
{
int ret;
- ret = iio_trigger_register_id(trig_info);
- if (ret)
+ trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
+ if (trig_info->id < 0) {
+ ret = trig_info->id;
goto error_ret;
+ }
/* Set the name used for the sysfs directory etc */
dev_set_name(&trig_info->dev, "trigger%ld",
(unsigned long) trig_info->id);
@@ -135,7 +102,7 @@ int iio_trigger_register(struct iio_trigger *trig_info)
error_device_del:
device_del(&trig_info->dev);
error_unregister_id:
- iio_trigger_unregister_id(trig_info);
+ ida_simple_remove(&iio_trigger_ida, trig_info->id);
error_ret:
return ret;
}
@@ -148,7 +115,7 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
mutex_unlock(&iio_trigger_list_lock);
iio_trigger_unregister_sysfs(trig_info);
- iio_trigger_unregister_id(trig_info);
+ ida_simple_remove(&iio_trigger_ida, trig_info->id);
/* Possible issue in here */
device_unregister(&trig_info->dev);
}
@@ -173,13 +140,12 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
void iio_trigger_poll(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count) {
+ if (!trig->use_count)
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
if (trig->subirqs[i].enabled) {
trig->use_count++;
generic_handle_irq(trig->subirq_base + i);
}
- }
}
EXPORT_SYMBOL(iio_trigger_poll);
@@ -206,8 +172,8 @@ EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
trig->use_count--;
- if (trig->use_count == 0 && trig->try_reenable)
- if (trig->try_reenable(trig)) {
+ if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
+ if (trig->ops->try_reenable(trig)) {
/* Missed and interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
}
@@ -215,6 +181,26 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
EXPORT_SYMBOL(iio_trigger_notify_done);
/* Trigger Consumer related functions */
+static int iio_trigger_get_irq(struct iio_trigger *trig)
+{
+ int ret;
+ mutex_lock(&trig->pool_lock);
+ ret = bitmap_find_free_region(trig->pool,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER,
+ ilog2(1));
+ mutex_unlock(&trig->pool_lock);
+ if (ret >= 0)
+ ret += trig->subirq_base;
+
+ return ret;
+}
+
+static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
+{
+ mutex_lock(&trig->pool_lock);
+ clear_bit(irq - trig->subirq_base, trig->pool);
+ mutex_unlock(&trig->pool_lock);
+}
/* Complexity in here. With certain triggers (datardy) an acknowledgement
* may be needed if the pollfuncs do not include the data read for the
@@ -223,44 +209,45 @@ EXPORT_SYMBOL(iio_trigger_notify_done);
* the relevant function is in there may be the best option.
*/
/* Worth protecting against double additions?*/
-int iio_trigger_attach_poll_func(struct iio_trigger *trig,
- struct iio_poll_func *pf)
+static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf)
{
int ret = 0;
bool notinuse
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+ /* Prevent the module being removed whilst attached to a trigger */
+ __module_get(pf->indio_dev->info->driver_module);
pf->irq = iio_trigger_get_irq(trig);
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
- if (trig->set_trigger_state && notinuse)
- ret = trig->set_trigger_state(trig, true);
+ if (trig->ops && trig->ops->set_trigger_state && notinuse)
+ ret = trig->ops->set_trigger_state(trig, true);
return ret;
}
-EXPORT_SYMBOL(iio_trigger_attach_poll_func);
-int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
- struct iio_poll_func *pf)
+static int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf)
{
int ret = 0;
bool no_other_users
= (bitmap_weight(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER)
== 1);
- if (trig->set_trigger_state && no_other_users) {
- ret = trig->set_trigger_state(trig, false);
+ if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
+ ret = trig->ops->set_trigger_state(trig, false);
if (ret)
goto error_ret;
}
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
+ module_put(pf->indio_dev->info->driver_module);
error_ret:
return ret;
}
-EXPORT_SYMBOL(iio_trigger_dettach_poll_func);
irqreturn_t iio_pollfunc_store_time(int irq, void *p)
{
@@ -274,7 +261,7 @@ struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
- void *private,
+ struct iio_dev *indio_dev,
const char *fmt,
...)
{
@@ -294,7 +281,7 @@ struct iio_poll_func
pf->h = h;
pf->thread = thread;
pf->type = type;
- pf->private_data = private;
+ pf->indio_dev = indio_dev;
return pf;
}
@@ -317,13 +304,11 @@ static ssize_t iio_trigger_read_current(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- int len = 0;
- if (dev_info->trig)
- len = sprintf(buf,
- "%s\n",
- dev_info->trig->name);
- return len;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ if (indio_dev->trig)
+ return sprintf(buf, "%s\n", indio_dev->trig->name);
+ return 0;
}
/**
@@ -338,38 +323,38 @@ static ssize_t iio_trigger_write_current(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct iio_trigger *oldtrig = dev_info->trig;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_trigger *oldtrig = indio_dev->trig;
struct iio_trigger *trig;
int ret;
- mutex_lock(&dev_info->mlock);
- if (dev_info->currentmode == INDIO_RING_TRIGGERED) {
- mutex_unlock(&dev_info->mlock);
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
- mutex_unlock(&dev_info->mlock);
+ mutex_unlock(&indio_dev->mlock);
trig = iio_trigger_find_by_name(buf, len);
- if (trig && dev_info->info->validate_trigger) {
- ret = dev_info->info->validate_trigger(dev_info, trig);
+ if (trig && indio_dev->info->validate_trigger) {
+ ret = indio_dev->info->validate_trigger(indio_dev, trig);
if (ret)
return ret;
}
- if (trig && trig->validate_device) {
- ret = trig->validate_device(trig, dev_info);
+ if (trig && trig->ops && trig->ops->validate_device) {
+ ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
return ret;
}
- dev_info->trig = trig;
+ indio_dev->trig = trig;
- if (oldtrig && dev_info->trig != oldtrig)
+ if (oldtrig && indio_dev->trig != oldtrig)
iio_put_trigger(oldtrig);
- if (dev_info->trig)
- iio_get_trigger(dev_info->trig);
+ if (indio_dev->trig)
+ iio_get_trigger(indio_dev->trig);
return len;
}
@@ -409,7 +394,6 @@ static void iio_trig_release(struct device *device)
}
kfree(trig->name);
kfree(trig);
- iio_put();
}
static struct device_type iio_trig_type = {
@@ -476,7 +460,7 @@ struct iio_trigger *iio_allocate_trigger(const char *fmt, ...)
IRQ_NOREQUEST | IRQ_NOAUTOEN,
IRQ_NOPROBE);
}
- iio_get();
+ get_device(&trig->dev);
}
return trig;
}
@@ -489,37 +473,35 @@ void iio_free_trigger(struct iio_trigger *trig)
}
EXPORT_SYMBOL(iio_free_trigger);
-int iio_device_register_trigger_consumer(struct iio_dev *dev_info)
+int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
- int ret;
- ret = sysfs_create_group(&dev_info->dev.kobj,
- &iio_trigger_consumer_attr_group);
- return ret;
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &iio_trigger_consumer_attr_group;
+
+ return 0;
}
-EXPORT_SYMBOL(iio_device_register_trigger_consumer);
-int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info)
+void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
- sysfs_remove_group(&dev_info->dev.kobj,
- &iio_trigger_consumer_attr_group);
- return 0;
+ /* Clean up and associated but not attached triggers references */
+ if (indio_dev->trig)
+ iio_put_trigger(indio_dev->trig);
}
-EXPORT_SYMBOL(iio_device_unregister_trigger_consumer);
-int iio_triggered_ring_postenable(struct iio_dev *indio_dev)
+int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
{
return indio_dev->trig
? iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc)
: 0;
}
-EXPORT_SYMBOL(iio_triggered_ring_postenable);
+EXPORT_SYMBOL(iio_triggered_buffer_postenable);
-int iio_triggered_ring_predisable(struct iio_dev *indio_dev)
+int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
return indio_dev->trig
? iio_trigger_dettach_poll_func(indio_dev->trig,
indio_dev->pollfunc)
: 0;
}
-EXPORT_SYMBOL(iio_triggered_ring_predisable);
+EXPORT_SYMBOL(iio_triggered_buffer_predisable);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index 6002368fdcf..e8c234bb18f 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -8,7 +8,15 @@
#include "kfifo_buf.h"
-#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, ring)
+struct iio_kfifo {
+ struct iio_buffer buffer;
+ struct kfifo kf;
+ int use_count;
+ int update_needed;
+ struct mutex use_lock;
+};
+
+#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
int bytes_per_datum, int length)
@@ -16,11 +24,11 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
- __iio_update_ring_buffer(&buf->ring, bytes_per_datum, length);
+ __iio_update_buffer(&buf->buffer, bytes_per_datum, length);
return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL);
}
-static int iio_request_update_kfifo(struct iio_ring_buffer *r)
+static int iio_request_update_kfifo(struct iio_buffer *r)
{
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
@@ -33,14 +41,14 @@ static int iio_request_update_kfifo(struct iio_ring_buffer *r)
goto error_ret;
}
kfifo_free(&buf->kf);
- ret = __iio_allocate_kfifo(buf, buf->ring.bytes_per_datum,
- buf->ring.length);
+ ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
+ buf->buffer.length);
error_ret:
mutex_unlock(&buf->use_lock);
return ret;
}
-static void iio_mark_kfifo_in_use(struct iio_ring_buffer *r)
+static void iio_mark_kfifo_in_use(struct iio_buffer *r)
{
struct iio_kfifo *buf = iio_to_kfifo(r);
mutex_lock(&buf->use_lock);
@@ -48,7 +56,7 @@ static void iio_mark_kfifo_in_use(struct iio_ring_buffer *r)
mutex_unlock(&buf->use_lock);
}
-static void iio_unmark_kfifo_in_use(struct iio_ring_buffer *r)
+static void iio_unmark_kfifo_in_use(struct iio_buffer *r)
{
struct iio_kfifo *buf = iio_to_kfifo(r);
mutex_lock(&buf->use_lock);
@@ -56,7 +64,7 @@ static void iio_unmark_kfifo_in_use(struct iio_ring_buffer *r)
mutex_unlock(&buf->use_lock);
}
-static int iio_get_length_kfifo(struct iio_ring_buffer *r)
+static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
@@ -66,9 +74,9 @@ static inline void __iio_init_kfifo(struct iio_kfifo *kf)
mutex_init(&kf->use_lock);
}
-static IIO_RING_ENABLE_ATTR;
-static IIO_RING_BYTES_PER_DATUM_ATTR;
-static IIO_RING_LENGTH_ATTR;
+static IIO_BUFFER_ENABLE_ATTR;
+static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
+static IIO_BUFFER_LENGTH_ATTR;
static struct attribute *iio_kfifo_attributes[] = {
&dev_attr_length.attr,
@@ -79,27 +87,10 @@ static struct attribute *iio_kfifo_attributes[] = {
static struct attribute_group iio_kfifo_attribute_group = {
.attrs = iio_kfifo_attributes,
+ .name = "buffer",
};
-static const struct attribute_group *iio_kfifo_attribute_groups[] = {
- &iio_kfifo_attribute_group,
- NULL
-};
-
-static void iio_kfifo_release(struct device *dev)
-{
- struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
- struct iio_kfifo *kf = iio_to_kfifo(r);
- kfifo_free(&kf->kf);
- kfree(kf);
-}
-
-static struct device_type iio_kfifo_type = {
- .release = iio_kfifo_release,
- .groups = iio_kfifo_attribute_groups,
-};
-
-struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
{
struct iio_kfifo *kf;
@@ -107,22 +98,20 @@ struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
if (!kf)
return NULL;
kf->update_needed = true;
- iio_ring_buffer_init(&kf->ring, indio_dev);
+ iio_buffer_init(&kf->buffer, indio_dev);
+ kf->buffer.attrs = &iio_kfifo_attribute_group;
__iio_init_kfifo(kf);
- kf->ring.dev.type = &iio_kfifo_type;
- kf->ring.dev.parent = &indio_dev->dev;
- dev_set_drvdata(&kf->ring.dev, (void *)&(kf->ring));
- return &kf->ring;
+ return &kf->buffer;
}
EXPORT_SYMBOL(iio_kfifo_allocate);
-static int iio_get_bytes_per_datum_kfifo(struct iio_ring_buffer *r)
+static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
{
return r->bytes_per_datum;
}
-static int iio_set_bytes_per_datum_kfifo(struct iio_ring_buffer *r, size_t bpd)
+static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
@@ -132,14 +121,14 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_ring_buffer *r, size_t bpd)
return 0;
}
-static int iio_mark_update_needed_kfifo(struct iio_ring_buffer *r)
+static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
struct iio_kfifo *kf = iio_to_kfifo(r);
kf->update_needed = true;
return 0;
}
-static int iio_set_length_kfifo(struct iio_ring_buffer *r, int length)
+static int iio_set_length_kfifo(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
@@ -149,14 +138,13 @@ static int iio_set_length_kfifo(struct iio_ring_buffer *r, int length)
return 0;
}
-void iio_kfifo_free(struct iio_ring_buffer *r)
+void iio_kfifo_free(struct iio_buffer *r)
{
- if (r)
- iio_put_ring_buffer(r);
+ kfree(iio_to_kfifo(r));
}
EXPORT_SYMBOL(iio_kfifo_free);
-static int iio_store_to_kfifo(struct iio_ring_buffer *r,
+static int iio_store_to_kfifo(struct iio_buffer *r,
u8 *data,
s64 timestamp)
{
@@ -175,7 +163,7 @@ static int iio_store_to_kfifo(struct iio_ring_buffer *r,
return 0;
}
-static int iio_read_first_n_kfifo(struct iio_ring_buffer *r,
+static int iio_read_first_n_kfifo(struct iio_buffer *r,
size_t n, char __user *buf)
{
int ret, copied;
@@ -186,7 +174,7 @@ static int iio_read_first_n_kfifo(struct iio_ring_buffer *r,
return copied;
}
-const struct iio_ring_access_funcs kfifo_access_funcs = {
+const struct iio_buffer_access_funcs kfifo_access_funcs = {
.mark_in_use = &iio_mark_kfifo_in_use,
.unmark_in_use = &iio_unmark_kfifo_in_use,
.store_to = &iio_store_to_kfifo,
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index aac30539b2c..a15598bb9fd 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -1,18 +1,10 @@
#include <linux/kfifo.h>
#include "iio.h"
-#include "ring_generic.h"
+#include "buffer_generic.h"
-struct iio_kfifo {
- struct iio_ring_buffer ring;
- struct kfifo kf;
- int use_count;
- int update_needed;
- struct mutex use_lock;
-};
+extern const struct iio_buffer_access_funcs kfifo_access_funcs;
-extern const struct iio_ring_access_funcs kfifo_access_funcs;
-
-struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
-void iio_kfifo_free(struct iio_ring_buffer *r);
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
+void iio_kfifo_free(struct iio_buffer *r);
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index 1ad2d56c8ba..e7e9159d989 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -1,7 +1,7 @@
#
# Light sensors
#
-comment "Light sensors"
+menu "Light sensors"
config SENSORS_ISL29018
tristate "ISL 29018 light and proximity sensor"
@@ -30,3 +30,5 @@ config TSL2583
help
Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
Access ALS data via iio, sysfs.
+
+endmenu
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 426b6af7080..9dc9e635839 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -28,7 +28,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include "../iio.h"
-
+#include "../sysfs.h"
#define CONVERSION_TIME_MS 100
#define ISL29018_REG_ADD_COMMAND1 0x00
@@ -51,7 +51,11 @@
#define ISL29018_REG_ADD_DATA_LSB 0x02
#define ISL29018_REG_ADD_DATA_MSB 0x03
-#define ISL29018_MAX_REGS ISL29018_REG_ADD_DATA_MSB
+#define ISL29018_MAX_REGS (ISL29018_REG_ADD_DATA_MSB+1)
+
+#define ISL29018_REG_TEST 0x08
+#define ISL29018_TEST_SHIFT 0
+#define ISL29018_TEST_MASK (0xFF << ISL29018_TEST_SHIFT)
struct isl29018_chip {
struct i2c_client *client;
@@ -66,22 +70,27 @@ struct isl29018_chip {
static int isl29018_write_data(struct i2c_client *client, u8 reg,
u8 val, u8 mask, u8 shift)
{
- u8 regval;
- int ret = 0;
+ u8 regval = val;
+ int ret;
struct isl29018_chip *chip = iio_priv(i2c_get_clientdata(client));
- regval = chip->reg_cache[reg];
- regval &= ~mask;
- regval |= val << shift;
+ /* don't cache or mask REG_TEST */
+ if (reg < ISL29018_MAX_REGS) {
+ regval = chip->reg_cache[reg];
+ regval &= ~mask;
+ regval |= val << shift;
+ }
ret = i2c_smbus_write_byte_data(client, reg, regval);
if (ret) {
dev_err(&client->dev, "Write to device fails status %x\n", ret);
- return ret;
+ } else {
+ /* don't update cache on err */
+ if (reg < ISL29018_MAX_REGS)
+ chip->reg_cache[reg] = regval;
}
- chip->reg_cache[reg] = regval;
- return 0;
+ return ret;
}
static int isl29018_set_range(struct i2c_client *client, unsigned long range,
@@ -411,7 +420,7 @@ static const struct iio_chan_spec isl29018_channels[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .processed_val = 1,
+ .processed_val = IIO_PROCESSED,
.info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
}, {
.type = IIO_INTENSITY,
@@ -457,6 +466,48 @@ static int isl29018_chip_init(struct i2c_client *client)
memset(chip->reg_cache, 0, sizeof(chip->reg_cache));
+ /* Code added per Intersil Application Note 1534:
+ * When VDD sinks to approximately 1.8V or below, some of
+ * the part's registers may change their state. When VDD
+ * recovers to 2.25V (or greater), the part may thus be in an
+ * unknown mode of operation. The user can return the part to
+ * a known mode of operation either by (a) setting VDD = 0V for
+ * 1 second or more and then powering back up with a slew rate
+ * of 0.5V/ms or greater, or (b) via I2C disable all ALS/PROX
+ * conversions, clear the test registers, and then rewrite all
+ * registers to the desired values.
+ * ...
+ * FOR ISL29011, ISL29018, ISL29021, ISL29023
+ * 1. Write 0x00 to register 0x08 (TEST)
+ * 2. Write 0x00 to register 0x00 (CMD1)
+ * 3. Rewrite all registers to the desired values
+ *
+ * ISL29018 Data Sheet (FN6619.1, Feb 11, 2010) essentially says
+ * the same thing EXCEPT the data sheet asks for a 1ms delay after
+ * writing the CMD1 register.
+ */
+ status = isl29018_write_data(client, ISL29018_REG_TEST, 0,
+ ISL29018_TEST_MASK, ISL29018_TEST_SHIFT);
+ if (status < 0) {
+ dev_err(&client->dev, "Failed to clear isl29018 TEST reg."
+ "(%d)\n", status);
+ return status;
+ }
+
+ /* See Intersil AN1534 comments above.
+ * "Operating Mode" (COMMAND1) register is reprogrammed when
+ * data is read from the device.
+ */
+ status = isl29018_write_data(client, ISL29018_REG_ADD_COMMAND1, 0,
+ 0xff, 0);
+ if (status < 0) {
+ dev_err(&client->dev, "Failed to clear isl29018 CMD1 reg."
+ "(%d)\n", status);
+ return status;
+ }
+
+ msleep(1); /* per data sheet, page 10 */
+
/* set defaults */
status = isl29018_set_range(client, chip->range, &new_range);
if (status < 0) {
@@ -530,6 +581,7 @@ static int __devexit isl29018_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s()\n", __func__);
iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index f25243b0847..7e984bcf8d7 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -31,13 +31,12 @@
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/platform_device.h>
#include <linux/pm.h>
-#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/slab.h>
#include "../iio.h"
+#include "../sysfs.h"
#include "tsl2563.h"
/* Use this many bits for fraction part. */
@@ -519,7 +518,8 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto error_ret;
}
error_ret:
@@ -527,21 +527,31 @@ error_ret:
return ret;
}
-#define INFO_MASK (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE)
-#define EVENT_MASK (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
-#define IIO_CHAN_2563(type, mod, proc, chan, imask, emask) \
- IIO_CHAN(type, mod, 1, proc, NULL, chan, 0, imask, 0, 0, {}, emask)
-
static const struct iio_chan_spec tsl2563_channels[] = {
- IIO_CHAN_2563(IIO_LIGHT, 0, 1, 0, 0, 0),
- IIO_CHAN_2563(IIO_INTENSITY, 1, 0, 0, INFO_MASK, EVENT_MASK),
- IIO_CHAN_2563(IIO_INTENSITY, 1, 0, 1, INFO_MASK, 0),
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ }, {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING)),
+ }, {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ }
};
static int tsl2563_read_thresh(struct iio_dev *indio_dev,
- int event_code,
- int *val)
+ u64 event_code,
+ int *val)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
@@ -559,8 +569,8 @@ static int tsl2563_read_thresh(struct iio_dev *indio_dev,
return 0;
}
-static ssize_t tsl2563_write_thresh(struct iio_dev *indio_dev,
- int event_code,
+static int tsl2563_write_thresh(struct iio_dev *indio_dev,
+ u64 event_code,
int val)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
@@ -595,8 +605,8 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
struct iio_dev *dev_info = private;
struct tsl2563_chip *chip = iio_priv(dev_info);
- iio_push_event(dev_info, 0,
- IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_LIGHT,
+ iio_push_event(dev_info,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
@@ -608,8 +618,8 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
}
static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
- int event_code,
- int state)
+ u64 event_code,
+ int state)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret = 0;
@@ -650,7 +660,7 @@ out:
}
static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
- int event_code)
+ u64 event_code)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
@@ -680,7 +690,6 @@ static const struct iio_info tsl2563_info_no_irq = {
static const struct iio_info tsl2563_info = {
.driver_module = THIS_MODULE,
- .num_interrupt_lines = 1,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
.read_event_value = &tsl2563_read_thresh,
@@ -697,7 +706,7 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
struct tsl2563_platform_data *pdata = client->dev.platform_data;
int err = 0;
int ret;
- u8 id;
+ u8 id = 0;
indio_dev = iio_allocate_device(sizeof(*chip));
if (!indio_dev)
@@ -743,9 +752,6 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
indio_dev->info = &tsl2563_info;
else
indio_dev->info = &tsl2563_info_no_irq;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto fail1;
if (client->irq) {
ret = request_threaded_irq(client->irq,
NULL,
@@ -764,12 +770,16 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
/* The interrupt cannot yet be enabled so this is fine without lock */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto fail3;
+
return 0;
fail3:
if (client->irq)
free_irq(client->irq, indio_dev);
fail2:
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
fail1:
kfree(chip);
return err;
@@ -779,6 +789,8 @@ static int tsl2563_remove(struct i2c_client *client)
{
struct tsl2563_chip *chip = i2c_get_clientdata(client);
struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+
+ iio_device_unregister(indio_dev);
if (!chip->int_enabled)
cancel_delayed_work(&chip->poweroff_work);
/* Ensure that interrupts are disabled - then flush any bottom halves */
@@ -789,7 +801,8 @@ static int tsl2563_remove(struct i2c_client *client)
tsl2563_set_power(chip, 0);
if (client->irq)
free_irq(client->irq, indio_dev);
- iio_device_unregister(indio_dev);
+
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 5694610da1c..80f77cf8e9c 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -27,6 +27,7 @@
#include <linux/mutex.h>
#include <linux/unistd.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "../iio.h"
#define TSL258X_MAX_DEVICE_REGS 32
@@ -68,7 +69,7 @@ enum {
TSL258X_CHIP_UNKNOWN = 0,
TSL258X_CHIP_WORKING = 1,
TSL258X_CHIP_SUSPENDED = 2
-} TSL258X_CHIP_WORKING_STATUS;
+};
/* Per-device data */
struct taos_als_info {
@@ -87,7 +88,6 @@ struct taos_settings {
struct tsl2583_chip {
struct mutex als_mutex;
struct i2c_client *client;
- struct iio_dev *iio_dev;
struct taos_als_info als_cur_info;
struct taos_settings taos_settings;
int als_time_scale;
@@ -114,7 +114,7 @@ struct taos_lux {
/* This structure is intentionally large to accommodate updates via sysfs. */
/* Sized to 11 = max 10 segments + 1 termination segment */
/* Assumption is is one and only one type of glass used */
-struct taos_lux taos_device_lux[11] = {
+static struct taos_lux taos_device_lux[11] = {
{ 9830, 8520, 15729 },
{ 12452, 10807, 23344 },
{ 14746, 6383, 11705 },
@@ -159,8 +159,7 @@ static void taos_defaults(struct tsl2583_chip *chip)
static int
taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
{
- int ret;
- int i;
+ int i, ret;
for (i = 0; i < len; i++) {
/* select register to write */
@@ -191,47 +190,47 @@ taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
* the array are then used along with the time scale factor array values, to
* calculate the lux.
*/
-static int taos_get_lux(struct i2c_client *client)
+static int taos_get_lux(struct iio_dev *indio_dev)
{
u16 ch0, ch1; /* separated ch0/ch1 data from device */
u32 lux; /* raw lux calculated from device data */
u32 ratio;
u8 buf[5];
struct taos_lux *p;
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
int i, ret;
u32 ch0lux = 0;
u32 ch1lux = 0;
if (mutex_trylock(&chip->als_mutex) == 0) {
- dev_info(&client->dev, "taos_get_lux device is busy\n");
+ dev_info(&chip->client->dev, "taos_get_lux device is busy\n");
return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
}
if (chip->taos_chip_status != TSL258X_CHIP_WORKING) {
/* device is not enabled */
- dev_err(&client->dev, "taos_get_lux device is not enabled\n");
+ dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n");
ret = -EBUSY ;
goto out_unlock;
}
- ret = taos_i2c_read(client, (TSL258X_CMD_REG), &buf[0], 1);
+ ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1);
if (ret < 0) {
- dev_err(&client->dev, "taos_get_lux failed to read CMD_REG\n");
+ dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n");
goto out_unlock;
}
/* is data new & valid */
if (!(buf[0] & TSL258X_STA_ADC_INTR)) {
- dev_err(&client->dev, "taos_get_lux data not valid\n");
+ dev_err(&chip->client->dev, "taos_get_lux data not valid\n");
ret = chip->als_cur_info.lux; /* return LAST VALUE */
goto out_unlock;
}
for (i = 0; i < 4; i++) {
int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i);
- ret = taos_i2c_read(client, reg, &buf[i], 1);
+ ret = taos_i2c_read(chip->client, reg, &buf[i], 1);
if (ret < 0) {
- dev_err(&client->dev, "taos_get_lux failed to read"
+ dev_err(&chip->client->dev, "taos_get_lux failed to read"
" register %x\n", reg);
goto out_unlock;
}
@@ -239,11 +238,12 @@ static int taos_get_lux(struct i2c_client *client)
/* clear status, really interrupt status (interrupts are off), but
* we use the bit anyway - don't forget 0x80 - this is a command*/
- ret = i2c_smbus_write_byte(client,
- (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN | TSL258X_CMD_ALS_INT_CLR));
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
+ TSL258X_CMD_ALS_INT_CLR));
if (ret < 0) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_i2c_write_command failed in taos_get_lux, err = %d\n",
ret);
goto out_unlock; /* have no data, so return failure */
@@ -285,7 +285,7 @@ static int taos_get_lux(struct i2c_client *client)
/* note: lux is 31 bit max at this point */
if (ch1lux > ch0lux) {
- dev_dbg(&client->dev, "No Data - Return last value\n");
+ dev_dbg(&chip->client->dev, "No Data - Return last value\n");
ret = chip->als_cur_info.lux = 0;
goto out_unlock;
}
@@ -319,54 +319,56 @@ out_unlock:
* to derive actual lux).
* Return updated gain_trim value.
*/
-int taos_als_calibrate(struct i2c_client *client)
+static int taos_als_calibrate(struct iio_dev *indio_dev)
{
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
u8 reg_val;
unsigned int gain_trim_val;
int ret;
int lux_val;
- ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | TSL258X_CNTRL));
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL258X_CMD_REG | TSL258X_CNTRL));
if (ret < 0) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_als_calibrate failed to reach the CNTRL register, ret=%d\n",
ret);
return ret;
}
- reg_val = i2c_smbus_read_byte(client);
+ reg_val = i2c_smbus_read_byte(chip->client);
if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON))
!= (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_als_calibrate failed: device not powered on with ADC enabled\n");
return -1;
}
- ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | TSL258X_CNTRL));
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL258X_CMD_REG | TSL258X_CNTRL));
if (ret < 0) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_als_calibrate failed to reach the STATUS register, ret=%d\n",
ret);
return ret;
}
- reg_val = i2c_smbus_read_byte(client);
+ reg_val = i2c_smbus_read_byte(chip->client);
if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_als_calibrate failed: STATUS - ADC not valid.\n");
return -ENODATA;
}
- lux_val = taos_get_lux(client);
+ lux_val = taos_get_lux(indio_dev);
if (lux_val < 0) {
- dev_err(&client->dev, "taos_als_calibrate failed to get lux\n");
+ dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
return lux_val;
}
gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target)
* chip->taos_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_als_calibrate failed: trim_val of %d is out of range\n",
gain_trim_val);
return -ENODATA;
@@ -380,21 +382,21 @@ int taos_als_calibrate(struct i2c_client *client)
* Turn the device on.
* Configuration must be set before calling this function.
*/
-static int taos_chip_on(struct i2c_client *client)
+static int taos_chip_on(struct iio_dev *indio_dev)
{
int i;
- int ret = 0;
+ int ret;
u8 *uP;
u8 utmp;
int als_count;
int als_time;
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
/* and make sure we're not already on */
if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
/* if forcing a register update - turn off, then on */
- dev_info(&client->dev, "device is already enabled\n");
- return -EINVAL;
+ dev_info(&chip->client->dev, "device is already enabled\n");
+ return -EINVAL;
}
/* determine als integration regster */
@@ -416,20 +418,21 @@ static int taos_chip_on(struct i2c_client *client)
/* TSL258x Specific power-on / adc enable sequence
* Power on the device 1st. */
utmp = TSL258X_CNTL_PWR_ON;
- ret = i2c_smbus_write_byte_data(client,
- TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
if (ret < 0) {
- dev_err(&client->dev, "taos_chip_on failed on CNTRL reg.\n");
+ dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
return -1;
}
/* Use the following shadow copy for our delay before enabling ADC.
* Write all the registers. */
for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
- ret = i2c_smbus_write_byte_data(client, TSL258X_CMD_REG + i,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL258X_CMD_REG + i,
*uP++);
if (ret < 0) {
- dev_err(&client->dev,
+ dev_err(&chip->client->dev,
"taos_chip_on failed on reg %d.\n", i);
return -1;
}
@@ -439,10 +442,11 @@ static int taos_chip_on(struct i2c_client *client)
/* NOW enable the ADC
* initialize the desired mode of operation */
utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
- ret = i2c_smbus_write_byte_data(client, TSL258X_CMD_REG | TSL258X_CNTRL,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL258X_CMD_REG | TSL258X_CNTRL,
utmp);
if (ret < 0) {
- dev_err(&client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
+ dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
return -1;
}
chip->taos_chip_status = TSL258X_CHIP_WORKING;
@@ -450,33 +454,26 @@ static int taos_chip_on(struct i2c_client *client)
return ret;
}
-static int taos_chip_off(struct i2c_client *client)
+static int taos_chip_off(struct iio_dev *indio_dev)
{
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret;
/* turn device off */
chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- ret = i2c_smbus_write_byte_data(client, TSL258X_CMD_REG | TSL258X_CNTRL,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL258X_CMD_REG | TSL258X_CNTRL,
0x00);
return ret;
}
/* Sysfs Interface Functions */
-static ssize_t taos_device_id(struct device *dev,
-struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
-
- return sprintf(buf, "%s\n", chip->client->name);
-}
static ssize_t taos_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
return sprintf(buf, "%d\n", chip->taos_chip_status);
}
@@ -485,16 +482,15 @@ static ssize_t taos_power_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
unsigned long value;
if (strict_strtoul(buf, 0, &value))
return -EINVAL;
if (value == 0)
- taos_chip_off(chip->client);
+ taos_chip_off(indio_dev);
else
- taos_chip_on(chip->client);
+ taos_chip_on(indio_dev);
return len;
}
@@ -503,7 +499,7 @@ static ssize_t taos_gain_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
char gain[4] = {0};
switch (chip->taos_settings.als_gain) {
@@ -528,7 +524,7 @@ static ssize_t taos_gain_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
unsigned long value;
if (strict_strtoul(buf, 0, &value))
@@ -565,7 +561,7 @@ static ssize_t taos_als_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
return sprintf(buf, "%d\n", chip->taos_settings.als_time);
}
@@ -574,7 +570,7 @@ static ssize_t taos_als_time_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
unsigned long value;
if (strict_strtoul(buf, 0, &value))
@@ -586,7 +582,7 @@ static ssize_t taos_als_time_store(struct device *dev,
if (value % 50)
return -EINVAL;
- chip->taos_settings.als_time = value;
+ chip->taos_settings.als_time = value;
return len;
}
@@ -602,7 +598,7 @@ static ssize_t taos_als_trim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim);
}
@@ -611,7 +607,7 @@ static ssize_t taos_als_trim_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
unsigned long value;
if (strict_strtoul(buf, 0, &value))
@@ -627,7 +623,7 @@ static ssize_t taos_als_cal_target_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target);
}
@@ -636,7 +632,7 @@ static ssize_t taos_als_cal_target_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
unsigned long value;
if (strict_strtoul(buf, 0, &value))
@@ -651,27 +647,26 @@ static ssize_t taos_als_cal_target_store(struct device *dev,
static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
- int lux;
+ int ret;
- lux = taos_get_lux(chip->client);
+ ret = taos_get_lux(dev_get_drvdata(dev));
+ if (ret < 0)
+ return ret;
- return sprintf(buf, "%d\n", lux);
+ return sprintf(buf, "%d\n", ret);
}
static ssize_t taos_do_calibrate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
unsigned long value;
if (strict_strtoul(buf, 0, &value))
return -EINVAL;
if (value == 1)
- taos_als_calibrate(chip->client);
+ taos_als_calibrate(indio_dev);
return len;
}
@@ -703,8 +698,8 @@ static ssize_t taos_luxtable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2583_chip *chip = indio_dev->dev_data;
- int value[ARRAY_SIZE(taos_device_lux)];
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value[ARRAY_SIZE(taos_device_lux)*3 + 1];
int n;
get_options(buf, ARRAY_SIZE(value), value);
@@ -725,18 +720,17 @@ static ssize_t taos_luxtable_store(struct device *dev,
}
if (chip->taos_chip_status == TSL258X_CHIP_WORKING)
- taos_chip_off(chip->client);
+ taos_chip_off(indio_dev);
/* Zero out the table */
memset(taos_device_lux, 0, sizeof(taos_device_lux));
memcpy(taos_device_lux, &value[1], (value[0] * 4));
- taos_chip_on(chip->client);
+ taos_chip_on(indio_dev);
return len;
}
-static DEVICE_ATTR(name, S_IRUGO, taos_device_id, NULL);
static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
taos_power_state_show, taos_power_state_store);
@@ -762,7 +756,6 @@ static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR,
taos_luxtable_show, taos_luxtable_store);
static struct attribute *sysfs_attrs_ctrl[] = {
- &dev_attr_name.attr,
&dev_attr_power_state.attr,
&dev_attr_illuminance0_calibscale.attr, /* Gain */
&dev_attr_illuminance0_calibscale_available.attr,
@@ -798,9 +791,10 @@ static const struct iio_info tsl2583_info = {
static int __devinit taos_probe(struct i2c_client *clientp,
const struct i2c_device_id *idp)
{
- int i, ret = 0;
+ int i, ret;
unsigned char buf[TSL258X_MAX_DEVICE_REGS];
- static struct tsl2583_chip *chip;
+ struct tsl2583_chip *chip;
+ struct iio_dev *indio_dev;
if (!i2c_check_functionality(clientp->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -810,12 +804,15 @@ static int __devinit taos_probe(struct i2c_client *clientp,
return -EOPNOTSUPP;
}
- chip = kzalloc(sizeof(struct tsl2583_chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ dev_err(&clientp->dev, "iio allocation failed\n");
+ goto fail1;
+ }
+ chip = iio_priv(indio_dev);
chip->client = clientp;
- i2c_set_clientdata(clientp, chip);
+ i2c_set_clientdata(clientp, indio_dev);
mutex_init(&chip->als_mutex);
chip->taos_chip_status = TSL258X_CHIP_UNKNOWN;
@@ -827,14 +824,14 @@ static int __devinit taos_probe(struct i2c_client *clientp,
if (ret < 0) {
dev_err(&clientp->dev, "i2c_smbus_write_bytes() to cmd "
"reg failed in taos_probe(), err = %d\n", ret);
- goto fail1;
+ goto fail2;
}
ret = i2c_smbus_read_byte(clientp);
if (ret < 0) {
dev_err(&clientp->dev, "i2c_smbus_read_byte from "
"reg failed in taos_probe(), err = %d\n", ret);
- goto fail1;
+ goto fail2;
}
buf[i] = ret;
}
@@ -842,58 +839,50 @@ static int __devinit taos_probe(struct i2c_client *clientp,
if (!taos_tsl258x_device(buf)) {
dev_info(&clientp->dev, "i2c device found but does not match "
"expected id in taos_probe()\n");
- goto fail1;
+ goto fail2;
}
ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
if (ret < 0) {
dev_err(&clientp->dev, "i2c_smbus_write_byte() to cmd reg "
"failed in taos_probe(), err = %d\n", ret);
- goto fail1;
- }
-
- chip->iio_dev = iio_allocate_device(0);
- if (!chip->iio_dev) {
- ret = -ENOMEM;
- dev_err(&clientp->dev, "iio allocation failed\n");
- goto fail1;
+ goto fail2;
}
- chip->iio_dev->info = &tsl2583_info;
- chip->iio_dev->dev.parent = &clientp->dev;
- chip->iio_dev->dev_data = (void *)(chip);
- chip->iio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->iio_dev);
+ indio_dev->info = &tsl2583_info;
+ indio_dev->dev.parent = &clientp->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+ ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev, "iio registration failed\n");
- goto fail1;
+ goto fail2;
}
/* Load up the V2 defaults (these are hard coded defaults for now) */
taos_defaults(chip);
/* Make sure the chip is on */
- taos_chip_on(clientp);
+ taos_chip_on(indio_dev);
dev_info(&clientp->dev, "Light sensor found.\n");
-
return 0;
-
fail1:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+fail2:
return ret;
}
static int taos_suspend(struct i2c_client *client, pm_message_t state)
{
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret = 0;
mutex_lock(&chip->als_mutex);
if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- ret = taos_chip_off(client);
+ ret = taos_chip_off(indio_dev);
chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
}
@@ -903,13 +892,14 @@ static int taos_suspend(struct i2c_client *client, pm_message_t state)
static int taos_resume(struct i2c_client *client)
{
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret = 0;
mutex_lock(&chip->als_mutex);
if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED)
- ret = taos_chip_on(client);
+ ret = taos_chip_on(indio_dev);
mutex_unlock(&chip->als_mutex);
return ret;
@@ -918,11 +908,9 @@ static int taos_resume(struct i2c_client *client)
static int __devexit taos_remove(struct i2c_client *client)
{
- struct tsl2583_chip *chip = i2c_get_clientdata(client);
-
- iio_device_unregister(chip->iio_dev);
+ iio_device_unregister(i2c_get_clientdata(client));
+ iio_free_device(i2c_get_clientdata(client));
- kfree(chip);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
index 81b579d371d..722c4e13f71 100644
--- a/drivers/staging/iio/magnetometer/Kconfig
+++ b/drivers/staging/iio/magnetometer/Kconfig
@@ -1,11 +1,12 @@
#
# Magnetometer sensors
#
-comment "Magnetometer sensors"
+menu "Magnetometer sensors"
config SENSORS_AK8975
tristate "Asahi Kasei AK8975 3-Axis Magnetometer"
depends on I2C
+ depends on GENERIC_GPIO
help
Say yes here to build support for Asahi Kasei AK8975 3-Axis
Magnetometer.
@@ -23,3 +24,4 @@ config SENSORS_HMC5843
To compile this driver as a module, choose M here: the module
will be called hmc5843
+endmenu
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 33919e87e7c..8b017127fd4 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -31,8 +31,7 @@
#include <linux/gpio.h>
#include "../iio.h"
-#include "magnet.h"
-
+#include "../sysfs.h"
/*
* Register definitions, as well as various shifts and masks to get at the
* individual fields of the registers.
@@ -93,38 +92,28 @@ struct ak8975_data {
struct mutex lock;
u8 asa[3];
long raw_to_gauss[3];
- unsigned long mode;
+ bool mode;
u8 reg_cache[AK8975_MAX_REGS];
int eoc_gpio;
int eoc_irq;
};
+static const int ak8975_index_to_reg[] = {
+ AK8975_REG_HXL, AK8975_REG_HYL, AK8975_REG_HZL,
+};
+
/*
* Helper function to write to the I2C device's registers.
*/
static int ak8975_write_data(struct i2c_client *client,
u8 reg, u8 val, u8 mask, u8 shift)
{
- u8 regval;
- struct i2c_msg msg;
- u8 w_data[2];
- int ret = 0;
-
struct ak8975_data *data = i2c_get_clientdata(client);
+ u8 regval;
+ int ret;
- regval = data->reg_cache[reg];
- regval &= ~mask;
- regval |= val << shift;
-
- w_data[0] = reg;
- w_data[1] = regval;
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = w_data;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
+ regval = (data->reg_cache[reg] & ~mask) | (val << shift);
+ ret = i2c_smbus_write_byte_data(client, reg, regval);
if (ret < 0) {
dev_err(&client->dev, "Write to device fails status %x\n", ret);
return ret;
@@ -140,21 +129,20 @@ static int ak8975_write_data(struct i2c_client *client,
static int ak8975_read_data(struct i2c_client *client,
u8 reg, u8 length, u8 *buffer)
{
- struct i2c_msg msg[2];
- u8 w_data[2];
int ret;
-
- w_data[0] = reg;
-
- msg[0].addr = client->addr;
- msg[0].flags = I2C_M_NOSTART; /* set repeated start and write */
- msg[0].len = 1;
- msg[0].buf = w_data;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = length;
- msg[1].buf = buffer;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = buffer,
+ }
+ };
ret = i2c_transfer(client->adapter, msg, 2);
if (ret < 0) {
@@ -204,8 +192,41 @@ static int ak8975_setup(struct i2c_client *client)
return ret;
}
- /* Precalculate scale factor for each axis and
- store in the device data. */
+/*
+ * Precalculate scale factor (in Gauss units) for each axis and
+ * store in the device data.
+ *
+ * This scale factor is axis-dependent, and is derived from 3 calibration
+ * factors ASA(x), ASA(y), and ASA(z).
+ *
+ * These ASA values are read from the sensor device at start of day, and
+ * cached in the device context struct.
+ *
+ * Adjusting the flux value with the sensitivity adjustment value should be
+ * done via the following formula:
+ *
+ * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 )
+ *
+ * where H is the raw value, ASA is the sensitivity adjustment, and Hadj
+ * is the resultant adjusted value.
+ *
+ * We reduce the formula to:
+ *
+ * Hadj = H * (ASA + 128) / 256
+ *
+ * H is in the range of -4096 to 4095. The magnetometer has a range of
+ * +-1229uT. To go from the raw value to uT is:
+ *
+ * HuT = H * 1229/4096, or roughly, 3/10.
+ *
+ * Since 1uT = 100 gauss, our final scale factor becomes:
+ *
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
+ * Hadj = H * ((ASA + 128) * 30 / 256
+ *
+ * Since ASA doesn't change, we cache the resultant scale factor into the
+ * device context in ak8975_setup().
+ */
data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
@@ -222,7 +243,7 @@ static ssize_t show_mode(struct device *dev, struct device_attribute *devattr,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ak8975_data *data = iio_priv(indio_dev);
- return sprintf(buf, "%lu\n", data->mode);
+ return sprintf(buf, "%u\n", data->mode);
}
/*
@@ -235,26 +256,22 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ak8975_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- unsigned long oval;
+ bool value;
int ret;
/* Convert mode string and do some basic sanity checking on it.
only 0 or 1 are valid. */
- if (strict_strtoul(buf, 10, &oval))
- return -EINVAL;
-
- if (oval > 1) {
- dev_err(dev, "mode value is not supported\n");
- return -EINVAL;
- }
+ ret = strtobool(buf, &value);
+ if (ret < 0)
+ return ret;
mutex_lock(&data->lock);
/* Write the mode to the device. */
- if (data->mode != oval) {
+ if (data->mode != value) {
ret = ak8975_write_data(client,
AK8975_REG_CNTL,
- (u8)oval,
+ (u8)value,
AK8975_REG_CNTL_MODE_MASK,
AK8975_REG_CNTL_MODE_SHIFT);
@@ -263,7 +280,7 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->lock);
return ret;
}
- data->mode = oval;
+ data->mode = value;
}
mutex_unlock(&data->lock);
@@ -271,50 +288,6 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
return count;
}
-/*
- * Emits the scale factor to bring the raw value into Gauss units.
- *
- * This scale factor is axis-dependent, and is derived from 3 calibration
- * factors ASA(x), ASA(y), and ASA(z).
- *
- * These ASA values are read from the sensor device at start of day, and
- * cached in the device context struct.
- *
- * Adjusting the flux value with the sensitivity adjustment value should be
- * done via the following formula:
- *
- * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 )
- *
- * where H is the raw value, ASA is the sensitivity adjustment, and Hadj
- * is the resultant adjusted value.
- *
- * We reduce the formula to:
- *
- * Hadj = H * (ASA + 128) / 256
- *
- * H is in the range of -4096 to 4095. The magnetometer has a range of
- * +-1229uT. To go from the raw value to uT is:
- *
- * HuT = H * 1229/4096, or roughly, 3/10.
- *
- * Since 1uT = 100 gauss, our final scale factor becomes:
- *
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
- *
- * Since ASA doesn't change, we cache the resultant scale factor into the
- * device context in ak8975_setup().
- */
-static ssize_t show_scale(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ak8975_data *data = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
-
- return sprintf(buf, "%ld\n", data->raw_to_gauss[this_attr->address]);
-}
-
static int wait_conversion_complete_gpio(struct ak8975_data *data)
{
struct i2c_client *client = data->client;
@@ -371,13 +344,10 @@ static int wait_conversion_complete_polled(struct ak8975_data *data)
/*
* Emits the raw flux value for the x, y, or z axis.
*/
-static ssize_t show_raw(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ak8975_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
u16 meas_reg;
s16 raw;
u8 read_status;
@@ -403,7 +373,7 @@ static ssize_t show_raw(struct device *dev, struct device_attribute *devattr,
}
/* Wait for the conversion to complete. */
- if (data->eoc_gpio)
+ if (gpio_is_valid(data->eoc_gpio))
ret = wait_conversion_complete_gpio(data);
else
ret = wait_conversion_complete_polled(data);
@@ -429,7 +399,8 @@ static ssize_t show_raw(struct device *dev, struct device_attribute *devattr,
/* Read the flux value from the appropriate register
(the register is specified in the iio device attributes). */
- ret = ak8975_read_data(client, this_attr->address, 2, (u8 *)&meas_reg);
+ ret = ak8975_read_data(client, ak8975_index_to_reg[index],
+ 2, (u8 *)&meas_reg);
if (ret < 0) {
dev_err(&client->dev, "Read axis data fails\n");
goto exit;
@@ -442,30 +413,48 @@ static ssize_t show_raw(struct device *dev, struct device_attribute *devattr,
/* Clamp to valid range. */
raw = clamp_t(s16, raw, -4096, 4095);
-
- return sprintf(buf, "%d\n", raw);
+ *val = raw;
+ return IIO_VAL_INT;
exit:
mutex_unlock(&data->lock);
return ret;
}
+static int ak8975_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case 0:
+ return ak8975_read_axis(indio_dev, chan->address, val);
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ *val = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+#define AK8975_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .address = index, \
+ }
+
+static const struct iio_chan_spec ak8975_channels[] = {
+ AK8975_CHANNEL(X, 0), AK8975_CHANNEL(Y, 1), AK8975_CHANNEL(Z, 2),
+};
+
static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode, 0);
-static IIO_DEV_ATTR_MAGN_X_SCALE(S_IRUGO, show_scale, NULL, 0);
-static IIO_DEV_ATTR_MAGN_Y_SCALE(S_IRUGO, show_scale, NULL, 1);
-static IIO_DEV_ATTR_MAGN_Z_SCALE(S_IRUGO, show_scale, NULL, 2);
-static IIO_DEV_ATTR_MAGN_X(show_raw, AK8975_REG_HXL);
-static IIO_DEV_ATTR_MAGN_Y(show_raw, AK8975_REG_HYL);
-static IIO_DEV_ATTR_MAGN_Z(show_raw, AK8975_REG_HZL);
static struct attribute *ak8975_attr[] = {
&iio_dev_attr_mode.dev_attr.attr,
- &iio_dev_attr_magn_x_scale.dev_attr.attr,
- &iio_dev_attr_magn_y_scale.dev_attr.attr,
- &iio_dev_attr_magn_z_scale.dev_attr.attr,
- &iio_dev_attr_magn_x_raw.dev_attr.attr,
- &iio_dev_attr_magn_y_raw.dev_attr.attr,
- &iio_dev_attr_magn_z_raw.dev_attr.attr,
NULL
};
@@ -475,6 +464,7 @@ static struct attribute_group ak8975_attr_group = {
static const struct iio_info ak8975_info = {
.attrs = &ak8975_attr_group,
+ .read_raw = &ak8975_read_raw,
.driver_module = THIS_MODULE,
};
@@ -487,11 +477,14 @@ static int ak8975_probe(struct i2c_client *client,
int err;
/* Grab and set up the supplied GPIO. */
- eoc_gpio = irq_to_gpio(client->irq);
+ if (client->dev.platform_data == NULL)
+ eoc_gpio = -1;
+ else
+ eoc_gpio = *(int *)(client->dev.platform_data);
/* We may not have a GPIO based IRQ to scan, that is fine, we will
poll if so */
- if (eoc_gpio > 0) {
+ if (gpio_is_valid(eoc_gpio)) {
err = gpio_request(eoc_gpio, "ak_8975");
if (err < 0) {
dev_err(&client->dev,
@@ -507,8 +500,7 @@ static int ak8975_probe(struct i2c_client *client,
eoc_gpio, err);
goto exit_gpio;
}
- } else
- eoc_gpio = 0; /* No GPIO available */
+ }
/* Register with IIO */
indio_dev = iio_allocate_device(sizeof(*data));
@@ -521,7 +513,7 @@ static int ak8975_probe(struct i2c_client *client,
err = ak8975_setup(client);
if (err < 0) {
dev_err(&client->dev, "AK8975 initialization fails\n");
- goto exit_gpio;
+ goto exit_free_iio;
}
i2c_set_clientdata(client, indio_dev);
@@ -530,6 +522,8 @@ static int ak8975_probe(struct i2c_client *client,
data->eoc_irq = client->irq;
data->eoc_gpio = eoc_gpio;
indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = ak8975_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
indio_dev->info = &ak8975_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -542,7 +536,7 @@ static int ak8975_probe(struct i2c_client *client,
exit_free_iio:
iio_free_device(indio_dev);
exit_gpio:
- if (eoc_gpio)
+ if (gpio_is_valid(eoc_gpio))
gpio_free(eoc_gpio);
exit:
return err;
@@ -552,13 +546,13 @@ static int ak8975_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ak8975_data *data = iio_priv(indio_dev);
- int eoc_gpio = data->eoc_gpio;
iio_device_unregister(indio_dev);
- iio_free_device(indio_dev);
- if (eoc_gpio)
- gpio_free(eoc_gpio);
+ if (gpio_is_valid(data->eoc_gpio))
+ gpio_free(data->eoc_gpio);
+
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index b44c273a91a..fc9ee970888 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "magnet.h"
#define HMC5843_I2C_ADDRESS 0x1E
@@ -62,9 +61,9 @@
/*
* Device status
*/
-#define DATA_READY 0x01
-#define DATA_OUTPUT_LOCK 0x02
-#define VOLTAGE_REGULATOR_ENABLED 0x04
+#define DATA_READY 0x01
+#define DATA_OUTPUT_LOCK 0x02
+#define VOLTAGE_REGULATOR_ENABLED 0x04
/*
* Mode register configuration
@@ -89,22 +88,16 @@
/*
* Device Configutration
*/
-#define CONF_NORMAL 0x00
+#define CONF_NORMAL 0x00
#define CONF_POSITIVE_BIAS 0x01
#define CONF_NEGATIVE_BIAS 0x02
#define CONF_NOT_USED 0x03
#define MEAS_CONF_MASK 0x03
-static const char *regval_to_scale[] = {
- "0.0000006173",
- "0.0000007692",
- "0.0000010309",
- "0.0000012821",
- "0.0000018868",
- "0.0000021739",
- "0.0000025641",
- "0.0000035714",
+static int hmc5843_regval_to_nanoscale[] = {
+ 6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
};
+
static const int regval_to_input_field_mg[] = {
700,
1000,
@@ -115,7 +108,7 @@ static const int regval_to_input_field_mg[] = {
4500,
6500
};
-static const char *regval_to_samp_freq[] = {
+static const char * const regval_to_samp_freq[] = {
"0.5",
"1",
"2",
@@ -150,37 +143,28 @@ static s32 hmc5843_configure(struct i2c_client *client,
}
/* Return the measurement value from the specified channel */
-static ssize_t hmc5843_read_measurement(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int hmc5843_read_measurement(struct iio_dev *indio_dev,
+ int address,
+ int *val)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- s16 coordinate_val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct hmc5843_data *data = iio_priv(indio_dev);
s32 result;
mutex_lock(&data->lock);
-
result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
while (!(result & DATA_READY))
result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
- result = i2c_smbus_read_word_data(client, this_attr->address);
+ result = i2c_smbus_read_word_data(client, address);
mutex_unlock(&data->lock);
if (result < 0)
return -EINVAL;
- coordinate_val = (s16)swab16((u16)result);
- return sprintf(buf, "%d\n", coordinate_val);
+ *val = (s16)swab16((u16)result);
+ return IIO_VAL_INT;
}
-static IIO_DEV_ATTR_MAGN_X(hmc5843_read_measurement,
- HMC5843_DATA_OUT_X_MSB_REG);
-static IIO_DEV_ATTR_MAGN_Y(hmc5843_read_measurement,
- HMC5843_DATA_OUT_Y_MSB_REG);
-static IIO_DEV_ATTR_MAGN_Z(hmc5843_read_measurement,
- HMC5843_DATA_OUT_Z_MSB_REG);
+
/*
* From the datasheet
@@ -336,7 +320,7 @@ static s32 hmc5843_set_rate(struct i2c_client *client,
reg_val = (data->meas_conf) | (rate << RATE_OFFSET);
if (rate >= RATE_NOT_USED) {
dev_err(&client->dev,
- "This data output rate is not supported \n");
+ "This data output rate is not supported\n");
return -EINVAL;
}
return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
@@ -461,34 +445,52 @@ exit:
return count;
}
-static IIO_DEVICE_ATTR(magn_range,
+static IIO_DEVICE_ATTR(in_magn_range,
S_IWUSR | S_IRUGO,
show_range,
set_range,
HMC5843_CONFIG_REG_B);
-static ssize_t show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int hmc5843_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hmc5843_data *data = iio_priv(indio_dev);
- return strlen(strcpy(buf, regval_to_scale[data->range]));
+
+ switch (mask) {
+ case 0:
+ return hmc5843_read_measurement(indio_dev,
+ chan->address,
+ val);
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ *val = 0;
+ *val2 = hmc5843_regval_to_nanoscale[data->range];
+ return IIO_VAL_INT_PLUS_NANO;
+ };
+ return -EINVAL;
}
-static IIO_DEVICE_ATTR(magn_scale,
- S_IRUGO,
- show_scale,
- NULL , 0);
+
+#define HMC5843_CHANNEL(axis, add) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .address = add \
+ }
+
+static const struct iio_chan_spec hmc5843_channels[] = {
+ HMC5843_CHANNEL(X, HMC5843_DATA_OUT_X_MSB_REG),
+ HMC5843_CHANNEL(Y, HMC5843_DATA_OUT_Y_MSB_REG),
+ HMC5843_CHANNEL(Z, HMC5843_DATA_OUT_Z_MSB_REG),
+};
static struct attribute *hmc5843_attributes[] = {
&iio_dev_attr_meas_conf.dev_attr.attr,
&iio_dev_attr_operating_mode.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_magn_range.dev_attr.attr,
- &iio_dev_attr_magn_scale.dev_attr.attr,
- &iio_dev_attr_magn_x_raw.dev_attr.attr,
- &iio_dev_attr_magn_y_raw.dev_attr.attr,
- &iio_dev_attr_magn_z_raw.dev_attr.attr,
+ &iio_dev_attr_in_magn_range.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@@ -530,6 +532,7 @@ static void hmc5843_init_client(struct i2c_client *client)
static const struct iio_info hmc5843_info = {
.attrs = &hmc5843_group,
+ .read_raw = &hmc5843_read_raw,
.driver_module = THIS_MODULE,
};
@@ -558,6 +561,9 @@ static int hmc5843_probe(struct i2c_client *client,
hmc5843_init_client(client);
indio_dev->info = &hmc5843_info;
+ indio_dev->name = id->name;
+ indio_dev->channels = hmc5843_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hmc5843_channels);
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
err = iio_device_register(indio_dev);
@@ -573,9 +579,11 @@ exit:
static int hmc5843_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
/* sleep mode to save power */
hmc5843_configure(client, MODE_SLEEP);
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/magnet.h b/drivers/staging/iio/magnetometer/magnet.h
deleted file mode 100644
index 1260eb7bd41..00000000000
--- a/drivers/staging/iio/magnetometer/magnet.h
+++ /dev/null
@@ -1,31 +0,0 @@
-
-#include "../sysfs.h"
-
-/* Magnetometer types of attribute */
-
-#define IIO_DEV_ATTR_MAGN_X_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(magn_x_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_MAGN_Y_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(magn_y_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_MAGN_Z_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(magn_z_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_MAGN_X_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(magn_x_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_MAGN_Y_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(magn_y_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_MAGN_Z_SCALE(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(magn_z_scale, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_MAGN_X(_show, _addr) \
- IIO_DEVICE_ATTR(magn_x_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_MAGN_Y(_show, _addr) \
- IIO_DEVICE_ATTR(magn_y_raw, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_MAGN_Z(_show, _addr) \
- IIO_DEVICE_ATTR(magn_z_raw, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index 12e36e46069..d290d273841 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -1,7 +1,7 @@
#
# IIO meter drivers configuration
#
-comment "Active energy metering IC"
+menu "Active energy metering IC"
config ADE7753
tristate "Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver"
@@ -20,8 +20,8 @@ config ADE7754
config ADE7758
tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_BUFFER
+ select IIO_SW_RING if IIO_BUFFER
help
Say yes here to build support for Analog Devices ADE7758 Polyphase
Multifunction Energy Metering IC with Per Phase Information Driver.
@@ -59,3 +59,5 @@ config ADE7854_SPI
To compile this driver as a module, choose M here: the
module will be called ade7854-spi.
+
+endmenu
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
index 0cc7d5140df..de3863d6b07 100644
--- a/drivers/staging/iio/meter/Makefile
+++ b/drivers/staging/iio/meter/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_ADE7753) += ade7753.o
obj-$(CONFIG_ADE7754) += ade7754.o
ade7758-y := ade7758_core.o
-ade7758-$(CONFIG_IIO_RING_BUFFER) += ade7758_ring.o ade7758_trigger.o
+ade7758-$(CONFIG_IIO_BUFFER) += ade7758_ring.o ade7758_trigger.o
obj-$(CONFIG_ADE7758) += ade7758.o
obj-$(CONFIG_ADE7759) += ade7759.o
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 4d1bd42ff9e..940fef602b1 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,6 +16,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -453,8 +453,8 @@ out:
}
static IIO_DEV_ATTR_TEMP_RAW(ade7753_read_8bit);
-static IIO_CONST_ATTR(temp_offset, "-25 C");
-static IIO_CONST_ATTR(temp_scale, "0.67 C");
+static IIO_CONST_ATTR(in_temp_offset, "-25 C");
+static IIO_CONST_ATTR(in_temp_scale, "0.67 C");
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7753_read_frequency,
@@ -465,9 +465,9 @@ static IIO_DEV_ATTR_RESET(ade7753_write_reset);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
static struct attribute *ade7753_attributes[] = {
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in_temp_raw.dev_attr.attr,
+ &iio_const_attr_in_temp_offset.dev_attr.attr,
+ &iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
@@ -512,7 +512,7 @@ static const struct iio_info ade7753_info = {
static int __devinit ade7753_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct ade7753_state *st;
struct iio_dev *indio_dev;
@@ -534,22 +534,19 @@ static int __devinit ade7753_probe(struct spi_device *spi)
indio_dev->info = &ade7753_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
+ /* Get the device into a sane initial state */
+ ret = ade7753_initial_setup(indio_dev);
if (ret)
goto error_free_dev;
- regdone = 1;
- /* Get the device into a sane initial state */
- ret = ade7753_initial_setup(indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
+
return 0;
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
@@ -561,12 +558,13 @@ static int ade7753_remove(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ iio_device_unregister(indio_dev);
+
ret = ade7753_stop_device(&(indio_dev->dev));
if (ret)
goto err_ret;
- iio_device_unregister(indio_dev);
-
+ iio_free_device(indio_dev);
err_ret:
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f4f85fd5619..33f0d327c9f 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,6 +16,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -472,8 +472,8 @@ out:
return ret ? ret : len;
}
static IIO_DEV_ATTR_TEMP_RAW(ade7754_read_8bit);
-static IIO_CONST_ATTR(temp_offset, "129 C");
-static IIO_CONST_ATTR(temp_scale, "4 C");
+static IIO_CONST_ATTR(in_temp_offset, "129 C");
+static IIO_CONST_ATTR(in_temp_scale, "4 C");
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7754_read_frequency,
@@ -484,9 +484,9 @@ static IIO_DEV_ATTR_RESET(ade7754_write_reset);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
static struct attribute *ade7754_attributes[] = {
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in_temp_raw.dev_attr.attr,
+ &iio_const_attr_in_temp_offset.dev_attr.attr,
+ &iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
@@ -535,7 +535,7 @@ static const struct iio_info ade7754_info = {
static int __devinit ade7754_probe(struct spi_device *spi)
{
- int ret, regdone = 0;
+ int ret;
struct ade7754_state *st;
struct iio_dev *indio_dev;
@@ -557,22 +557,18 @@ static int __devinit ade7754_probe(struct spi_device *spi)
indio_dev->info = &ade7754_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
- regdone = 1;
-
/* Get the device into a sane initial state */
ret = ade7754_initial_setup(indio_dev);
if (ret)
goto error_free_dev;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
return 0;
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
@@ -584,11 +580,12 @@ static int ade7754_remove(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ iio_device_unregister(indio_dev);
ret = ade7754_stop_device(&(indio_dev->dev));
if (ret)
goto err_ret;
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
err_ret:
return ret;
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index fd74e156abf..bdd1b05bf7a 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -122,7 +122,7 @@ struct ade7758_state {
u8 *tx;
u8 *rx;
struct mutex buf_lock;
- u32 available_scan_masks[AD7758_NUM_WAVESRC];
+ unsigned long available_scan_masks[AD7758_NUM_WAVESRC];
struct iio_chan_spec *ade7758_ring_channels;
struct spi_transfer ring_xfer[4];
struct spi_message ring_msg;
@@ -134,7 +134,7 @@ struct ade7758_state {
unsigned char tx_buf[8];
};
-#ifdef CONFIG_IIO_RING_BUFFER
+#ifdef CONFIG_IIO_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
@@ -150,8 +150,7 @@ ssize_t ade7758_read_data_from_ring(struct device *dev,
int ade7758_configure_ring(struct iio_dev *indio_dev);
void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
-int ade7758_initialize_ring(struct iio_ring_buffer *ring);
-void ade7758_uninitialize_ring(struct iio_ring_buffer *ring);
+void ade7758_uninitialize_ring(struct iio_dev *indio_dev);
int ade7758_set_irq(struct device *dev, bool enable);
int ade7758_spi_write_reg_8(struct device *dev,
@@ -159,7 +158,7 @@ int ade7758_spi_write_reg_8(struct device *dev,
int ade7758_spi_read_reg_8(struct device *dev,
u8 reg_address, u8 *val);
-#else /* CONFIG_IIO_RING_BUFFER */
+#else /* CONFIG_IIO_BUFFER */
static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
{
@@ -180,9 +179,9 @@ static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
{
return 0;
}
-static inline void ade7758_uninitialize_ring(struct iio_ring_buffer *ring)
+static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
{
}
-#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* CONFIG_IIO_BUFFER */
#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 299b95434e2..c5dafbdf3bd 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,10 +16,11 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../ring_generic.h"
+#include "../buffer_generic.h"
#include "meter.h"
#include "ade7758.h"
@@ -583,8 +583,8 @@ out:
}
static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
-static IIO_CONST_ATTR(temp_offset, "129 C");
-static IIO_CONST_ATTR(temp_scale, "4 C");
+static IIO_CONST_ATTR(in_temp_offset, "129 C");
+static IIO_CONST_ATTR(in_temp_scale, "4 C");
static IIO_DEV_ATTR_AWATTHR(ade7758_read_16bit,
ADE7758_AWATTHR);
@@ -614,9 +614,9 @@ static IIO_DEV_ATTR_RESET(ade7758_write_reset);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
static struct attribute *ade7758_attributes[] = {
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in_temp_raw.dev_attr.attr,
+ &iio_const_attr_in_temp_offset.dev_attr.attr,
+ &iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
@@ -662,7 +662,7 @@ static const struct attribute_group ade7758_attribute_group = {
};
static struct iio_chan_spec ade7758_channels[] = {
- IIO_CHAN(IIO_IN, 0, 1, 0, "raw", 0, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
0, IIO_ST('s', 24, 32, 0), 0),
@@ -682,7 +682,7 @@ static struct iio_chan_spec ade7758_channels[] = {
(1 << IIO_CHAN_INFO_SCALE_SHARED),
AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
4, IIO_ST('s', 24, 32, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, "raw", 1, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
5, IIO_ST('s', 24, 32, 0), 0),
@@ -702,7 +702,7 @@ static struct iio_chan_spec ade7758_channels[] = {
(1 << IIO_CHAN_INFO_SCALE_SHARED),
AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
9, IIO_ST('s', 24, 32, 0), 0),
- IIO_CHAN(IIO_IN, 0, 1, 0, "raw", 2, 0,
+ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 2, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
10, IIO_ST('s', 24, 32, 0), 0),
@@ -732,7 +732,7 @@ static const struct iio_info ade7758_info = {
static int __devinit ade7758_probe(struct spi_device *spi)
{
- int i, ret, regdone = 0;
+ int i, ret;
struct ade7758_state *st;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
@@ -766,7 +766,7 @@ static int __devinit ade7758_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
for (i = 0; i < AD7758_NUM_WAVESRC; i++)
- st->available_scan_masks[i] = 1 << i;
+ set_bit(i, &st->available_scan_masks[i]);
indio_dev->available_scan_masks = st->available_scan_masks;
@@ -774,14 +774,9 @@ static int __devinit ade7758_probe(struct spi_device *spi)
if (ret)
goto error_free_tx;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- regdone = 1;
-
- ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
- &ade7758_channels[0],
- ARRAY_SIZE(ade7758_channels));
+ ret = iio_buffer_register(indio_dev,
+ &ade7758_channels[0],
+ ARRAY_SIZE(ade7758_channels));
if (ret) {
dev_err(&spi->dev, "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -795,16 +790,20 @@ static int __devinit ade7758_probe(struct spi_device *spi)
if (spi->irq) {
ret = ade7758_probe_trigger(indio_dev);
if (ret)
- goto error_remove_trigger;
+ goto error_uninitialize_ring;
}
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
return 0;
error_remove_trigger:
- if (indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
ade7758_remove_trigger(indio_dev);
error_uninitialize_ring:
- ade7758_uninitialize_ring(indio_dev->ring);
+ ade7758_uninitialize_ring(indio_dev);
error_unreg_ring_funcs:
ade7758_unconfigure_ring(indio_dev);
error_free_tx:
@@ -812,10 +811,7 @@ error_free_tx:
error_free_rx:
kfree(st->rx);
error_free_dev:
- if (regdone)
- iio_device_unregister(indio_dev);
- else
- iio_free_device(indio_dev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -826,18 +822,19 @@ static int ade7758_remove(struct spi_device *spi)
struct ade7758_state *st = iio_priv(indio_dev);
int ret;
+ iio_device_unregister(indio_dev);
ret = ade7758_stop_device(&indio_dev->dev);
if (ret)
goto err_ret;
ade7758_remove_trigger(indio_dev);
- ade7758_uninitialize_ring(indio_dev->ring);
+ ade7758_uninitialize_ring(indio_dev);
ade7758_unconfigure_ring(indio_dev);
kfree(st->tx);
kfree(st->rx);
- iio_device_unregister(indio_dev);
- return 0;
+ iio_free_device(indio_dev);
+
err_ret:
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index b89b7f882e8..99ade658a2d 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -6,23 +6,14 @@
* Licensed under the GPL-2.
*/
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <asm/unaligned.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "../accel/accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "ade7758.h"
/**
@@ -69,8 +60,8 @@ out:
static irqreturn_t ade7758_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->private_data;
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *ring = indio_dev->buffer;
struct ade7758_state *st = iio_priv(indio_dev);
s64 dat64[2];
u32 *dat32 = (u32 *)dat64;
@@ -100,14 +91,14 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
struct ade7758_state *st = iio_priv(indio_dev);
- struct iio_ring_buffer *ring = indio_dev->ring;
+ struct iio_buffer *ring = indio_dev->buffer;
size_t d_size;
unsigned channel;
if (!ring->scan_count)
return -EINVAL;
- channel = __ffs(ring->scan_mask);
+ channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
d_size = st->ade7758_ring_channels[channel].scan_type.storagebits / 8;
@@ -118,9 +109,9 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
d_size += sizeof(s64) - (d_size % sizeof(s64));
}
- if (indio_dev->ring->access->set_bytes_per_datum)
- indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
- d_size);
+ if (indio_dev->buffer->access->set_bytes_per_datum)
+ indio_dev->buffer->access->
+ set_bytes_per_datum(indio_dev->buffer, d_size);
ade7758_write_waveform_type(&indio_dev->dev,
st->ade7758_ring_channels[channel].address);
@@ -128,22 +119,16 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
return 0;
}
-static const struct iio_ring_setup_ops ade7758_ring_setup_ops = {
+static const struct iio_buffer_setup_ops ade7758_ring_setup_ops = {
.preenable = &ade7758_ring_preenable,
- .postenable = &iio_triggered_ring_postenable,
- .predisable = &iio_triggered_ring_predisable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
};
void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
{
- /* ensure that the trigger has been detached */
- if (indio_dev->trig) {
- iio_put_trigger(indio_dev->trig);
- iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- }
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
}
int ade7758_configure_ring(struct iio_dev *indio_dev)
@@ -151,16 +136,16 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
struct ade7758_state *st = iio_priv(indio_dev);
int ret = 0;
- indio_dev->ring = iio_sw_rb_allocate(indio_dev);
- if (!indio_dev->ring) {
+ indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->buffer) {
ret = -ENOMEM;
return ret;
}
/* Effectively select the ring buffer implementation */
- indio_dev->ring->access = &ring_sw_access_funcs;
- indio_dev->ring->setup_ops = &ade7758_ring_setup_ops;
- indio_dev->ring->owner = THIS_MODULE;
+ indio_dev->buffer->access = &ring_sw_access_funcs;
+ indio_dev->buffer->setup_ops = &ade7758_ring_setup_ops;
+ indio_dev->buffer->owner = THIS_MODULE;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ade7758_trigger_handler,
@@ -173,7 +158,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
st->tx_buf[1] = 0;
@@ -211,11 +196,11 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
return 0;
error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->ring);
+ iio_sw_rb_free(indio_dev->buffer);
return ret;
}
-void ade7758_uninitialize_ring(struct iio_ring_buffer *ring)
+void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
{
- iio_ring_buffer_unregister(ring);
+ iio_buffer_unregister(indio_dev);
}
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
index a5c3248151e..392dfe30244 100644
--- a/drivers/staging/iio/meter/ade7758_trigger.c
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
@@ -7,16 +7,10 @@
*/
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "ade7758.h"
@@ -57,6 +51,12 @@ static int ade7758_trig_try_reen(struct iio_trigger *trig)
return 0;
}
+static const struct iio_trigger_ops ade7758_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &ade7758_data_rdy_trigger_set_state,
+ .try_reenable = &ade7758_trig_try_reen,
+};
+
int ade7758_probe_trigger(struct iio_dev *indio_dev)
{
struct ade7758_state *st = iio_priv(indio_dev);
@@ -79,10 +79,8 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &ade7758_trigger_ops;
st->trig->private_data = indio_dev;
- st->trig->set_trigger_state = &ade7758_data_rdy_trigger_set_state;
- st->trig->try_reenable = &ade7758_trig_try_reen;
ret = iio_trigger_register(st->trig);
/* select default trigger */
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index a51a64cad03..b691f10ce98 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -17,6 +16,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -412,8 +412,8 @@ out:
return ret ? ret : len;
}
static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit);
-static IIO_CONST_ATTR(temp_offset, "70 C");
-static IIO_CONST_ATTR(temp_scale, "1 C");
+static IIO_CONST_ATTR(in_temp_offset, "70 C");
+static IIO_CONST_ATTR(in_temp_scale, "1 C");
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7759_read_frequency,
@@ -424,9 +424,9 @@ static IIO_DEV_ATTR_RESET(ade7759_write_reset);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
static struct attribute *ade7759_attributes[] = {
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in_temp_raw.dev_attr.attr,
+ &iio_const_attr_in_temp_offset.dev_attr.attr,
+ &iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
@@ -479,19 +479,17 @@ static int __devinit ade7759_probe(struct spi_device *spi)
indio_dev->info = &ade7759_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
+ /* Get the device into a sane initial state */
+ ret = ade7759_initial_setup(indio_dev);
if (ret)
goto error_free_dev;
- /* Get the device into a sane initial state */
- ret = ade7759_initial_setup(indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
- goto error_unreg_dev;
- return 0;
+ goto error_free_dev;
+ return 0;
-error_unreg_dev:
- iio_device_unregister(indio_dev);
error_free_dev:
iio_free_device(indio_dev);
error_ret:
@@ -504,11 +502,12 @@ static int ade7759_remove(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ iio_device_unregister(indio_dev);
ret = ade7759_stop_device(&(indio_dev->dev));
if (ret)
goto err_ret;
- iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
err_ret:
return ret;
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index dd723435340..cbca3fd8fcd 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "../iio.h"
#include "ade7854.h"
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index e0d10865590..cfa23ba30ef 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "../iio.h"
#include "ade7854.h"
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index b82659f43bc..49c01c5c1b5 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -16,6 +15,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -582,7 +582,7 @@ error_unreg_dev:
iio_device_unregister(indio_dev);
error_free_dev:
iio_free_device(indio_dev);
-error_ret:
+
return ret;
}
EXPORT_SYMBOL(ade7854_probe);
@@ -590,6 +590,7 @@ EXPORT_SYMBOL(ade7854_probe);
int ade7854_remove(struct iio_dev *indio_dev)
{
iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
index 6ecd79e3003..49f69ef986f 100644
--- a/drivers/staging/iio/resolver/Kconfig
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -1,7 +1,7 @@
#
# Resolver/Synchro drivers
#
-comment "Resolver to digital converters"
+menu "Resolver to digital converters"
config AD2S90
tristate "Analog Devices ad2s90 driver"
@@ -10,9 +10,10 @@ config AD2S90
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s90, provides direct access via sysfs.
-config AD2S120X
- tristate "Analog Devices ad2s120x driver"
+config AD2S1200
+ tristate "Analog Devices ad2s1200/ad2s1205 driver"
depends on SPI
+ depends on GENERIC_GPIO
help
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1200 and ad2s1205, provides direct access
@@ -21,7 +22,9 @@ config AD2S120X
config AD2S1210
tristate "Analog Devices ad2s1210 driver"
depends on SPI
+ depends on GENERIC_GPIO
help
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1210, provides direct access via sysfs.
+endmenu
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
index 0b84a89e6ca..14375e444eb 100644
--- a/drivers/staging/iio/resolver/Makefile
+++ b/drivers/staging/iio/resolver/Makefile
@@ -3,5 +3,5 @@
#
obj-$(CONFIG_AD2S90) += ad2s90.o
-obj-$(CONFIG_AD2S120X) += ad2s120x.o
+obj-$(CONFIG_AD2S1200) += ad2s1200.o
obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
new file mode 100644
index 00000000000..d7ad46aed0f
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -0,0 +1,188 @@
+/*
+ * ad2s1200.c simple support for the ADI Resolver to Digital Converters:
+ * AD2S1200/1205
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s1200"
+
+/* input pin sample and rdvel is controlled by driver */
+#define AD2S1200_PN 2
+
+/* input clock on serial interface */
+#define AD2S1200_HZ 8192000
+/* clock period in nano second */
+#define AD2S1200_TSCLK (1000000000/AD2S1200_HZ)
+
+struct ad2s1200_state {
+ struct mutex lock;
+ struct spi_device *sdev;
+ int sample;
+ int rdvel;
+ u8 rx[2] ____cacheline_aligned;
+};
+
+static int ad2s1200_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret = 0;
+ s16 vel;
+ struct ad2s1200_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+ gpio_set_value(st->sample, 0);
+ /* delay (6 * AD2S1200_TSCLK + 20) nano seconds */
+ udelay(1);
+ gpio_set_value(st->sample, 1);
+ gpio_set_value(st->rdvel, !!(chan->type == IIO_ANGL));
+ ret = spi_read(st->sdev, st->rx, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ *val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ break;
+ case IIO_ANGL_VEL:
+ vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ vel = (vel << 4) >> 4;
+ *val = vel;
+ default:
+ mutex_unlock(&st->lock);
+ return -EINVAL;
+ }
+ /* delay (2 * AD2S1200_TSCLK + 20) ns for sample pulse */
+ udelay(1);
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
+}
+
+static const struct iio_chan_spec ad2s1200_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 0,
+ }, {
+ .type = IIO_ANGL_VEL,
+ .indexed = 1,
+ .channel = 0,
+ }
+};
+
+static const struct iio_info ad2s1200_info = {
+ .read_raw = &ad2s1200_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad2s1200_probe(struct spi_device *spi)
+{
+ struct ad2s1200_state *st;
+ struct iio_dev *indio_dev;
+ int pn, ret = 0;
+ unsigned short *pins = spi->dev.platform_data;
+
+ for (pn = 0; pn < AD2S1200_PN; pn++)
+ if (gpio_request_one(pins[pn], GPIOF_DIR_OUT, DRV_NAME)) {
+ pr_err("%s: request gpio pin %d failed\n",
+ DRV_NAME, pins[pn]);
+ goto error_ret;
+ }
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+ st->sdev = spi;
+ st->sample = pins[0];
+ st->rdvel = pins[1];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad2s1200_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad2s1200_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad2s1200_channels);
+ indio_dev->name = spi_get_device_id(spi)->name;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ spi->max_speed_hz = AD2S1200_HZ;
+ spi->mode = SPI_MODE_3;
+ spi_setup(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(indio_dev);
+error_ret:
+ for (--pn; pn >= 0; pn--)
+ gpio_free(pins[pn]);
+ return ret;
+}
+
+static int __devexit ad2s1200_remove(struct spi_device *spi)
+{
+ iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id ad2s1200_id[] = {
+ { "ad2s1200" },
+ { "ad2s1205" },
+ {}
+};
+
+static struct spi_driver ad2s1200_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad2s1200_probe,
+ .remove = __devexit_p(ad2s1200_remove),
+ .id_table = ad2s1200_id,
+};
+
+static __init int ad2s1200_spi_init(void)
+{
+ return spi_register_driver(&ad2s1200_driver);
+}
+module_init(ad2s1200_spi_init);
+
+static __exit void ad2s1200_spi_exit(void)
+{
+ spi_unregister_driver(&ad2s1200_driver);
+}
+module_exit(ad2s1200_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s120x.c b/drivers/staging/iio/resolver/ad2s120x.c
deleted file mode 100644
index bed4c725f2d..00000000000
--- a/drivers/staging/iio/resolver/ad2s120x.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * ad2s120x.c simple support for the ADI Resolver to Digital Converters: AD2S1200/1205
- *
- * Copyright (c) 2010-2010 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-
-#include "../iio.h"
-#include "../sysfs.h"
-
-#define DRV_NAME "ad2s120x"
-
-/* input pin sample and rdvel is controlled by driver */
-#define AD2S120X_PN 2
-
-/* input clock on serial interface */
-#define AD2S120X_HZ 8192000
-/* clock period in nano second */
-#define AD2S120X_TSCLK (1000000000/AD2S120X_HZ)
-
-struct ad2s120x_state {
- struct mutex lock;
- struct spi_device *sdev;
- int sample;
- int rdvel;
- u8 rx[2] ____cacheline_aligned;
-};
-
-static ssize_t ad2s120x_show_val(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret = 0;
- ssize_t len = 0;
- u16 pos;
- s16 vel;
- u8 status;
- struct ad2s120x_state *st = iio_priv(dev_get_drvdata(dev));
- struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
-
- mutex_lock(&st->lock);
-
- gpio_set_value(st->sample, 0);
- /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
- udelay(1);
- gpio_set_value(st->sample, 1);
- gpio_set_value(st->rdvel, iattr->address);
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret < 0)
- goto error_ret;
- status = st->rx[1];
- if (iattr->address)
- pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- else {
- vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
- vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- }
- len = sprintf(buf, "%d %c%c%c%c ", iattr->address ? pos : vel,
- (status & 0x8) ? 'P' : 'V',
- (status & 0x4) ? 'd' : '_',
- (status & 0x2) ? 'l' : '_',
- (status & 0x1) ? '1' : '0');
-error_ret:
- /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
- udelay(1);
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_val, NULL, 1);
-static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_val, NULL, 0);
-
-static struct attribute *ad2s120x_attributes[] = {
- &iio_dev_attr_pos.dev_attr.attr,
- &iio_dev_attr_vel.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad2s120x_attribute_group = {
- .attrs = ad2s120x_attributes,
-};
-
-static const struct iio_info ad2s120x_info = {
- .attrs = &ad2s120x_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-static int __devinit ad2s120x_probe(struct spi_device *spi)
-{
- struct ad2s120x_state *st;
- struct iio_dev *indio_dev;
- int pn, ret = 0;
- unsigned short *pins = spi->dev.platform_data;
-
- for (pn = 0; pn < AD2S120X_PN; pn++)
- if (gpio_request_one(pins[pn], GPIOF_DIR_OUT, DRV_NAME)) {
- pr_err("%s: request gpio pin %d failed\n",
- DRV_NAME, pins[pn]);
- goto error_ret;
- }
- indio_dev = iio_allocate_device(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- spi_set_drvdata(spi, indio_dev);
- st = iio_priv(indio_dev);
- mutex_init(&st->lock);
- st->sdev = spi;
- st->sample = pins[0];
- st->rdvel = pins[1];
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ad2s120x_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
- spi->max_speed_hz = AD2S120X_HZ;
- spi->mode = SPI_MODE_3;
- spi_setup(spi);
-
- return 0;
-
-error_free_dev:
- iio_free_device(indio_dev);
-error_ret:
- for (--pn; pn >= 0; pn--)
- gpio_free(pins[pn]);
- return ret;
-}
-
-static int __devexit ad2s120x_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
-
- return 0;
-}
-
-static struct spi_driver ad2s120x_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = ad2s120x_probe,
- .remove = __devexit_p(ad2s120x_remove),
-};
-
-static __init int ad2s120x_spi_init(void)
-{
- return spi_register_driver(&ad2s120x_driver);
-}
-module_init(ad2s120x_spi_init);
-
-static __exit void ad2s120x_spi_exit(void)
-{
- spi_unregister_driver(&ad2s120x_driver);
-}
-module_exit(ad2s120x_spi_exit);
-
-MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ecaf7bb790f..6401a627362 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -16,6 +16,7 @@
#include <linux/sysfs.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -194,47 +195,6 @@ static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
return ad2s1210_config_write(st, 0x0);
}
-
-/* return the OLD DATA since last spi bus write */
-static ssize_t ad2s1210_show_raw(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
- int ret = 0;
-
- mutex_lock(&st->lock);
- if (st->old_data) {
- ret = sprintf(buf, "0x%x\n", st->rx[0]);
- st->old_data = false;
- }
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-static ssize_t ad2s1210_store_raw(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
- unsigned long udata;
- unsigned char data;
- int ret;
-
- ret = strict_strtoul(buf, 16, &udata);
- if (ret)
- return -EINVAL;
-
- data = udata & 0xff;
- mutex_lock(&st->lock);
- ret = ad2s1210_config_write(st, data);
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
static ssize_t ad2s1210_store_softreset(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -513,75 +473,72 @@ error_ret:
return ret < 0 ? ret : len;
}
-static ssize_t ad2s1210_show_pos(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad2s1210_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+ bool negative;
int ret = 0;
- ssize_t len = 0;
u16 pos;
- struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
-
- mutex_lock(&st->lock);
- gpio_set_value(st->pdata->sample, 0);
- /* delay (6 * tck + 20) nano seconds */
- udelay(1);
-
- ad2s1210_set_mode(MOD_POS, st);
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret)
- goto error_ret;
- pos = be16_to_cpup((u16 *)st->rx);
- if (st->hysteresis)
- pos >>= 16 - st->resolution;
- len = sprintf(buf, "%d\n", pos);
-error_ret:
- gpio_set_value(st->pdata->sample, 1);
- /* delay (2 * tck + 20) nano seconds */
- udelay(1);
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_vel(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned short negative;
- int ret = 0;
- ssize_t len = 0;
s16 vel;
- struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
mutex_lock(&st->lock);
gpio_set_value(st->pdata->sample, 0);
/* delay (6 * tck + 20) nano seconds */
udelay(1);
- ad2s1210_set_mode(MOD_VEL, st);
+ switch (chan->type) {
+ case IIO_ANGL:
+ ad2s1210_set_mode(MOD_POS, st);
+ break;
+ case IIO_ANGL_VEL:
+ ad2s1210_set_mode(MOD_VEL, st);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret < 0)
+ goto error_ret;
ret = spi_read(st->sdev, st->rx, 2);
- if (ret)
+ if (ret < 0)
goto error_ret;
- negative = st->rx[0] & 0x80;
- vel = be16_to_cpup((s16 *)st->rx);
- vel >>= 16 - st->resolution;
- if (vel & 0x8000) {
- negative = (0xffff >> st->resolution) << st->resolution;
- vel |= negative;
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ pos = be16_to_cpup((u16 *)st->rx);
+ if (st->hysteresis)
+ pos >>= 16 - st->resolution;
+ *val = pos;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_ANGL_VEL:
+ negative = st->rx[0] & 0x80;
+ vel = be16_to_cpup((s16 *)st->rx);
+ vel >>= 16 - st->resolution;
+ if (vel & 0x8000) {
+ negative = (0xffff >> st->resolution) << st->resolution;
+ vel |= negative;
+ }
+ *val = vel;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ mutex_unlock(&st->lock);
+ return -EINVAL;
}
- len = sprintf(buf, "%d\n", vel);
+
error_ret:
gpio_set_value(st->pdata->sample, 1);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
+ return ret;
}
-static IIO_DEVICE_ATTR(raw_io, S_IRUGO | S_IWUSR,
- ad2s1210_show_raw, ad2s1210_store_raw, 0);
static IIO_DEVICE_ATTR(reset, S_IWUSR,
NULL, ad2s1210_store_softreset, 0);
static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
@@ -594,8 +551,7 @@ static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
ad2s1210_show_fault, ad2s1210_clear_fault, 0);
-static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s1210_show_pos, NULL, 0);
-static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s1210_show_vel, NULL, 0);
+
static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_LOS_THRD);
@@ -618,16 +574,26 @@ static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_LOT_LOW_THRD);
+
+static struct iio_chan_spec ad2s1210_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 0,
+ }, {
+ .type = IIO_ANGL_VEL,
+ .indexed = 1,
+ .channel = 0,
+ }
+};
+
static struct attribute *ad2s1210_attributes[] = {
- &iio_dev_attr_raw_io.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_fclkin.dev_attr.attr,
&iio_dev_attr_fexcit.dev_attr.attr,
&iio_dev_attr_control.dev_attr.attr,
&iio_dev_attr_bits.dev_attr.attr,
&iio_dev_attr_fault.dev_attr.attr,
- &iio_dev_attr_pos.dev_attr.attr,
- &iio_dev_attr_vel.dev_attr.attr,
&iio_dev_attr_los_thrd.dev_attr.attr,
&iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
&iio_dev_attr_dos_mis_thrd.dev_attr.attr,
@@ -639,7 +605,6 @@ static struct attribute *ad2s1210_attributes[] = {
};
static const struct attribute_group ad2s1210_attribute_group = {
- .name = DRV_NAME,
.attrs = ad2s1210_attributes,
};
@@ -681,51 +646,37 @@ error_ret:
}
static const struct iio_info ad2s1210_info = {
+ .read_raw = &ad2s1210_read_raw,
.attrs = &ad2s1210_attribute_group,
.driver_module = THIS_MODULE,
};
static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
{
- int ret;
unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+ struct gpio ad2s1210_gpios[] = {
+ { st->pdata->sample, GPIOF_DIR_IN, "sample" },
+ { st->pdata->a[0], flags, "a0" },
+ { st->pdata->a[1], flags, "a1" },
+ { st->pdata->res[0], flags, "res0" },
+ { st->pdata->res[0], flags, "res1" },
+ };
- ret = gpio_request_one(st->pdata->sample, GPIOF_DIR_IN, "sample");
- if (ret < 0)
- goto error_ret;
- ret = gpio_request_one(st->pdata->a[0], flags, "a0");
- if (ret < 0)
- goto error_free_sample;
- ret = gpio_request_one(st->pdata->a[1], flags, "a1");
- if (ret < 0)
- goto error_free_a0;
- ret = gpio_request_one(st->pdata->res[1], flags, "res0");
- if (ret < 0)
- goto error_free_a1;
- ret = gpio_request_one(st->pdata->res[1], flags, "res1");
- if (ret < 0)
- goto error_free_res0;
-
- return 0;
-error_free_res0:
- gpio_free(st->pdata->res[0]);
-error_free_a1:
- gpio_free(st->pdata->a[1]);
-error_free_a0:
- gpio_free(st->pdata->a[0]);
-error_free_sample:
- gpio_free(st->pdata->sample);
-error_ret:
- return ret;
+ return gpio_request_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios));
}
static void ad2s1210_free_gpios(struct ad2s1210_state *st)
{
- gpio_free(st->pdata->res[1]);
- gpio_free(st->pdata->res[0]);
- gpio_free(st->pdata->a[1]);
- gpio_free(st->pdata->a[0]);
- gpio_free(st->pdata->sample);
+ unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+ struct gpio ad2s1210_gpios[] = {
+ { st->pdata->sample, GPIOF_DIR_IN, "sample" },
+ { st->pdata->a[0], flags, "a0" },
+ { st->pdata->a[1], flags, "a1" },
+ { st->pdata->res[0], flags, "res0" },
+ { st->pdata->res[0], flags, "res1" },
+ };
+
+ gpio_free_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios));
}
static int __devinit ad2s1210_probe(struct spi_device *spi)
@@ -760,6 +711,9 @@ static int __devinit ad2s1210_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &ad2s1210_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad2s1210_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels);
+ indio_dev->name = spi_get_device_id(spi)->name;
ret = iio_device_register(indio_dev);
if (ret)
@@ -783,13 +737,19 @@ error_ret:
static int __devexit ad2s1210_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad2s1210_state *st = iio_priv(indio_dev);
+
iio_device_unregister(indio_dev);
- ad2s1210_free_gpios(st);
+ ad2s1210_free_gpios(iio_priv(indio_dev));
+ iio_free_device(indio_dev);
return 0;
}
+static const struct spi_device_id ad2s1210_id[] = {
+ { "ad2s1210" },
+ {}
+};
+
static struct spi_driver ad2s1210_driver = {
.driver = {
.name = DRV_NAME,
@@ -797,6 +757,7 @@ static struct spi_driver ad2s1210_driver = {
},
.probe = ad2s1210_probe,
.remove = __devexit_p(ad2s1210_remove),
+ .id_table = ad2s1210_id,
};
static __init int ad2s1210_spi_init(void)
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 166e2414ac8..a9200d949dc 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -14,59 +14,49 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#define DRV_NAME "ad2s90"
-
struct ad2s90_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
u8 rx[2] ____cacheline_aligned;
};
-static ssize_t ad2s90_show_angular(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ad2s90_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
int ret;
- ssize_t len = 0;
- u16 val;
- struct ad2s90_state *st = iio_priv(dev_get_drvdata(dev));
+ struct ad2s90_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
ret = spi_read(st->sdev, st->rx, 2);
if (ret)
goto error_ret;
- val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- len = sprintf(buf, "%d\n", val);
+ *val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+
error_ret:
mutex_unlock(&st->lock);
- return ret ? ret : len;
+ return IIO_VAL_INT;
}
-#define IIO_DEV_ATTR_SIMPLE_RESOLVER(_show) \
- IIO_DEVICE_ATTR(angular, S_IRUGO, _show, NULL, 0)
-
-static IIO_DEV_ATTR_SIMPLE_RESOLVER(ad2s90_show_angular);
-
-static struct attribute *ad2s90_attributes[] = {
- &iio_dev_attr_angular.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad2s90_attribute_group = {
- .name = DRV_NAME,
- .attrs = ad2s90_attributes,
-};
-
static const struct iio_info ad2s90_info = {
- .attrs = &ad2s90_attribute_group,
+ .read_raw = &ad2s90_read_raw,
.driver_module = THIS_MODULE,
};
+static const struct iio_chan_spec ad2s90_chan = {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 0,
+};
+
static int __devinit ad2s90_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -86,8 +76,11 @@ static int __devinit ad2s90_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &ad2s90_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &ad2s90_chan;
+ indio_dev->num_channels = 1;
+ indio_dev->name = spi_get_device_id(spi)->name;
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -99,7 +92,7 @@ static int __devinit ad2s90_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->idev);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -107,17 +100,24 @@ error_ret:
static int __devexit ad2s90_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
+ iio_free_device(spi_get_drvdata(spi));
return 0;
}
+static const struct spi_device_id ad2s90_id[] = {
+ { "ad2s90" },
+ {}
+};
+
static struct spi_driver ad2s90_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "ad2s90",
.owner = THIS_MODULE,
},
.probe = ad2s90_probe,
.remove = __devexit_p(ad2s90_remove),
+ .id_table = ad2s90_id,
};
static __init int ad2s90_spi_init(void)
diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
deleted file mode 100644
index 3f26f7175b6..00000000000
--- a/drivers/staging/iio/ring_generic.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/* The industrial I/O core - generic ring buffer interfaces.
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#ifndef _IIO_RING_GENERIC_H_
-#define _IIO_RING_GENERIC_H_
-#include "iio.h"
-
-#ifdef CONFIG_IIO_RING_BUFFER
-
-struct iio_ring_buffer;
-
-/**
- * struct iio_ring_access_funcs - access functions for ring buffers.
- * @mark_in_use: reference counting, typically to prevent module removal
- * @unmark_in_use: reduce reference count when no longer using ring buffer
- * @store_to: actually store stuff to the ring buffer
- * @read_last: get the last element stored
- * @read_first_n: try to get a specified number of elements (must exist)
- * @mark_param_change: notify ring that some relevant parameter has changed
- * Often this means the underlying storage may need to
- * change.
- * @request_update: if a parameter change has been marked, update underlying
- * storage.
- * @get_bytes_per_datum:get current bytes per datum
- * @set_bytes_per_datum:set number of bytes per datum
- * @get_length: get number of datums in ring
- * @set_length: set number of datums in ring
- * @is_enabled: query if ring is currently being used
- * @enable: enable the ring
- *
- * The purpose of this structure is to make the ring buffer element
- * modular as event for a given driver, different usecases may require
- * different ring designs (space efficiency vs speed for example).
- *
- * It is worth noting that a given ring implementation may only support a small
- * proportion of these functions. The core code 'should' cope fine with any of
- * them not existing.
- **/
-struct iio_ring_access_funcs {
- void (*mark_in_use)(struct iio_ring_buffer *ring);
- void (*unmark_in_use)(struct iio_ring_buffer *ring);
-
- int (*store_to)(struct iio_ring_buffer *ring, u8 *data, s64 timestamp);
- int (*read_last)(struct iio_ring_buffer *ring, u8 *data);
- int (*read_first_n)(struct iio_ring_buffer *ring,
- size_t n,
- char __user *buf);
-
- int (*mark_param_change)(struct iio_ring_buffer *ring);
- int (*request_update)(struct iio_ring_buffer *ring);
-
- int (*get_bytes_per_datum)(struct iio_ring_buffer *ring);
- int (*set_bytes_per_datum)(struct iio_ring_buffer *ring, size_t bpd);
- int (*get_length)(struct iio_ring_buffer *ring);
- int (*set_length)(struct iio_ring_buffer *ring, int length);
-
- int (*is_enabled)(struct iio_ring_buffer *ring);
- int (*enable)(struct iio_ring_buffer *ring);
-};
-
-struct iio_ring_setup_ops {
- int (*preenable)(struct iio_dev *);
- int (*postenable)(struct iio_dev *);
- int (*predisable)(struct iio_dev *);
- int (*postdisable)(struct iio_dev *);
-};
-
-/**
- * struct iio_ring_buffer - general ring buffer structure
- * @dev: ring buffer device struct
- * @indio_dev: industrial I/O device structure
- * @owner: module that owns the ring buffer (for ref counting)
- * @length: [DEVICE] number of datums in ring
- * @bytes_per_datum: [DEVICE] size of individual datum including timestamp
- * @bpe: [DEVICE] size of individual channel value
- * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
- * control method is used
- * @scan_count: [INTERN] the number of elements in the current scan mode
- * @scan_mask: [INTERN] bitmask used in masking scan mode elements
- * @scan_timestamp: [INTERN] does the scan mode include a timestamp
- * @access_handler: [INTERN] chrdev access handling
- * @access: [DRIVER] ring access functions associated with the
- * implementation.
- * @preenable: [DRIVER] function to run prior to marking ring enabled
- * @postenable: [DRIVER] function to run after marking ring enabled
- * @predisable: [DRIVER] function to run prior to marking ring disabled
- * @postdisable: [DRIVER] function to run after marking ring disabled
- **/
-struct iio_ring_buffer {
- struct device dev;
- struct iio_dev *indio_dev;
- struct module *owner;
- int length;
- int bytes_per_datum;
- int bpe;
- struct attribute_group *scan_el_attrs;
- int scan_count;
- unsigned long scan_mask;
- bool scan_timestamp;
- struct iio_handler access_handler;
- const struct iio_ring_access_funcs *access;
- const struct iio_ring_setup_ops *setup_ops;
- struct list_head scan_el_dev_attr_list;
-
- wait_queue_head_t pollq;
- bool stufftoread;
-};
-
-/**
- * iio_ring_buffer_init() - Initialize the buffer structure
- * @ring: buffer to be initialized
- * @dev_info: the iio device the buffer is assocated with
- **/
-void iio_ring_buffer_init(struct iio_ring_buffer *ring,
- struct iio_dev *dev_info);
-
-/**
- * __iio_update_ring_buffer() - update common elements of ring buffers
- * @ring: ring buffer that is the event source
- * @bytes_per_datum: size of individual datum including timestamp
- * @length: number of datums in ring
- **/
-static inline void __iio_update_ring_buffer(struct iio_ring_buffer *ring,
- int bytes_per_datum, int length)
-{
- ring->bytes_per_datum = bytes_per_datum;
- ring->length = length;
-}
-
-/*
- * These are mainly provided to allow for a change of implementation if a device
- * has a large number of scan elements
- */
-#define IIO_MAX_SCAN_LENGTH 31
-
-/* note 0 used as error indicator as it doesn't make sense. */
-static inline u32 iio_scan_mask_match(u32 *av_masks, u32 mask)
-{
- while (*av_masks) {
- if (!(~*av_masks & mask))
- return *av_masks;
- av_masks++;
- }
- return 0;
-}
-
-static inline int iio_scan_mask_query(struct iio_ring_buffer *ring, int bit)
-{
- struct iio_dev *dev_info = ring->indio_dev;
- u32 mask;
-
- if (bit > IIO_MAX_SCAN_LENGTH)
- return -EINVAL;
-
- if (!ring->scan_mask)
- return 0;
-
- if (dev_info->available_scan_masks)
- mask = iio_scan_mask_match(dev_info->available_scan_masks,
- ring->scan_mask);
- else
- mask = ring->scan_mask;
-
- if (!mask)
- return -EINVAL;
-
- return !!(mask & (1 << bit));
-};
-
-/**
- * iio_scan_mask_set() - set particular bit in the scan mask
- * @ring: the ring buffer whose scan mask we are interested in
- * @bit: the bit to be set.
- **/
-static inline int iio_scan_mask_set(struct iio_ring_buffer *ring, int bit)
-{
- struct iio_dev *dev_info = ring->indio_dev;
- u32 mask;
- u32 trialmask = ring->scan_mask | (1 << bit);
-
- if (bit > IIO_MAX_SCAN_LENGTH)
- return -EINVAL;
- if (dev_info->available_scan_masks) {
- mask = iio_scan_mask_match(dev_info->available_scan_masks,
- trialmask);
- if (!mask)
- return -EINVAL;
- }
- ring->scan_mask = trialmask;
- ring->scan_count++;
-
- return 0;
-};
-
-/**
- * iio_put_ring_buffer() - notify done with buffer
- * @ring: the buffer we are done with.
- **/
-static inline void iio_put_ring_buffer(struct iio_ring_buffer *ring)
-{
- put_device(&ring->dev);
-};
-
-#define to_iio_ring_buffer(d) \
- container_of(d, struct iio_ring_buffer, dev)
-
-/**
- * iio_ring_buffer_register_ex() - register the buffer with IIO core
- * @ring: the buffer to be registered
- * @id: the id of the buffer (typically 0)
- **/
-int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, int id,
- const struct iio_chan_spec *channels,
- int num_channels);
-
-void iio_ring_access_release(struct device *dev);
-
-/**
- * iio_ring_buffer_unregister() - unregister the buffer from IIO core
- * @ring: the buffer to be unregistered
- **/
-void iio_ring_buffer_unregister(struct iio_ring_buffer *ring);
-
-/**
- * iio_read_ring_length() - attr func to get number of datums in the buffer
- **/
-ssize_t iio_read_ring_length(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
- * iio_write_ring_length() - attr func to set number of datums in the buffer
- **/
-ssize_t iio_write_ring_length(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len);
-/**
- * iio_read_ring_bytes_per_datum() - attr for number of bytes in whole datum
- **/
-ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
- * iio_store_ring_enable() - attr to turn the buffer on
- **/
-ssize_t iio_store_ring_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len);
-/**
- * iio_show_ring_enable() - attr to see if the buffer is on
- **/
-ssize_t iio_show_ring_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-#define IIO_RING_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
- iio_read_ring_length, \
- iio_write_ring_length)
-#define IIO_RING_BYTES_PER_DATUM_ATTR DEVICE_ATTR(bytes_per_datum, S_IRUGO | S_IWUSR, \
- iio_read_ring_bytes_per_datum, NULL)
-#define IIO_RING_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
- iio_show_ring_enable, \
- iio_store_ring_enable)
-
-int iio_sw_ring_preenable(struct iio_dev *indio_dev);
-
-#else /* CONFIG_IIO_RING_BUFFER */
-
-static inline int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring,
- int id,
- struct iio_chan_spec *channels,
- int num_channels)
-{
- return 0;
-}
-
-static inline void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
-{};
-
-#endif /* CONFIG_IIO_RING_BUFFER */
-
-#endif /* _IIO_RING_GENERIC_H_ */
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
index bb8cfd28d45..cad8a2ed9b6 100644
--- a/drivers/staging/iio/ring_hw.h
+++ b/drivers/staging/iio/ring_hw.h
@@ -14,9 +14,9 @@
* @buf: generic ring buffer elements
* @private: device specific data
*/
-struct iio_hw_ring_buffer {
- struct iio_ring_buffer buf;
+struct iio_hw_buffer {
+ struct iio_buffer buf;
void *private;
};
-#define iio_to_hw_ring_buf(r) container_of(r, struct iio_hw_ring_buffer, buf)
+#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf)
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index feb84e27c36..66a34addb75 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -30,10 +30,10 @@
* @use_lock: lock to prevent change in size when in use
*
* Note that the first element of all ring buffers must be a
- * struct iio_ring_buffer.
+ * struct iio_buffer.
**/
struct iio_sw_ring_buffer {
- struct iio_ring_buffer buf;
+ struct iio_buffer buf;
unsigned char *data;
unsigned char *read_p;
unsigned char *write_p;
@@ -52,7 +52,7 @@ static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
- __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
+ __iio_update_buffer(&ring->buf, bytes_per_datum, length);
ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
ring->read_p = NULL;
ring->write_p = NULL;
@@ -71,7 +71,7 @@ static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
kfree(ring->data);
}
-static void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
+static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
@@ -79,7 +79,7 @@ static void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
spin_unlock(&ring->use_lock);
}
-static void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
+static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
@@ -166,7 +166,7 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
return ret;
}
-static int iio_read_first_n_sw_rb(struct iio_ring_buffer *r,
+static int iio_read_first_n_sw_rb(struct iio_buffer *r,
size_t n, char __user *buf)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
@@ -297,7 +297,7 @@ error_ret:
return ret;
}
-static int iio_store_to_sw_rb(struct iio_ring_buffer *r,
+static int iio_store_to_sw_rb(struct iio_buffer *r,
u8 *data,
s64 timestamp)
{
@@ -327,13 +327,13 @@ again:
return 0;
}
-static int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
+static int iio_read_last_from_sw_rb(struct iio_buffer *r,
unsigned char *data)
{
return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
}
-static int iio_request_update_sw_rb(struct iio_ring_buffer *r)
+static int iio_request_update_sw_rb(struct iio_buffer *r)
{
int ret = 0;
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
@@ -354,13 +354,13 @@ error_ret:
return ret;
}
-static int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r)
+static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
return ring->buf.bytes_per_datum;
}
-static int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
+static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
@@ -370,12 +370,12 @@ static int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
return 0;
}
-static int iio_get_length_sw_rb(struct iio_ring_buffer *r)
+static int iio_get_length_sw_rb(struct iio_buffer *r)
{
return r->length;
}
-static int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
+static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
@@ -385,23 +385,16 @@ static int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
return 0;
}
-static int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
+static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
ring->update_needed = true;
return 0;
}
-static void iio_sw_rb_release(struct device *dev)
-{
- struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
- iio_ring_access_release(&r->dev);
- kfree(iio_to_sw_ring(r));
-}
-
-static IIO_RING_ENABLE_ATTR;
-static IIO_RING_BYTES_PER_DATUM_ATTR;
-static IIO_RING_LENGTH_ATTR;
+static IIO_BUFFER_ENABLE_ATTR;
+static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
+static IIO_BUFFER_LENGTH_ATTR;
/* Standard set of ring buffer attributes */
static struct attribute *iio_ring_attributes[] = {
@@ -413,21 +406,12 @@ static struct attribute *iio_ring_attributes[] = {
static struct attribute_group iio_ring_attribute_group = {
.attrs = iio_ring_attributes,
+ .name = "buffer",
};
-static const struct attribute_group *iio_ring_attribute_groups[] = {
- &iio_ring_attribute_group,
- NULL
-};
-
-static struct device_type iio_sw_ring_type = {
- .release = iio_sw_rb_release,
- .groups = iio_ring_attribute_groups,
-};
-
-struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
+struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
{
- struct iio_ring_buffer *buf;
+ struct iio_buffer *buf;
struct iio_sw_ring_buffer *ring;
ring = kzalloc(sizeof *ring, GFP_KERNEL);
@@ -435,24 +419,21 @@ struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
return NULL;
ring->update_needed = true;
buf = &ring->buf;
- iio_ring_buffer_init(buf, indio_dev);
+ iio_buffer_init(buf, indio_dev);
__iio_init_sw_ring_buffer(ring);
- buf->dev.type = &iio_sw_ring_type;
- buf->dev.parent = &indio_dev->dev;
- dev_set_drvdata(&buf->dev, (void *)buf);
+ buf->attrs = &iio_ring_attribute_group;
return buf;
}
EXPORT_SYMBOL(iio_sw_rb_allocate);
-void iio_sw_rb_free(struct iio_ring_buffer *r)
+void iio_sw_rb_free(struct iio_buffer *r)
{
- if (r)
- iio_put_ring_buffer(r);
+ kfree(iio_to_sw_ring(r));
}
EXPORT_SYMBOL(iio_sw_rb_free);
-const struct iio_ring_access_funcs ring_sw_access_funcs = {
+const struct iio_buffer_access_funcs ring_sw_access_funcs = {
.mark_in_use = &iio_mark_sw_rb_in_use,
.unmark_in_use = &iio_unmark_sw_rb_in_use,
.store_to = &iio_store_to_sw_rb,
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index 15271639534..a3e15784c22 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -23,13 +23,13 @@
#ifndef _IIO_RING_SW_H_
#define _IIO_RING_SW_H_
-#include "ring_generic.h"
+#include "buffer_generic.h"
/**
* ring_sw_access_funcs - access functions for a software ring buffer
**/
-extern const struct iio_ring_access_funcs ring_sw_access_funcs;
+extern const struct iio_buffer_access_funcs ring_sw_access_funcs;
-struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
-void iio_sw_rb_free(struct iio_ring_buffer *ring);
+struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
+void iio_sw_rb_free(struct iio_buffer *ring);
#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index dd79b584421..868952b5ba6 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -12,19 +12,17 @@
#ifndef _INDUSTRIAL_IO_SYSFS_H_
#define _INDUSTRIAL_IO_SYSFS_H_
-#include "iio.h"
+struct iio_chan_spec;
/**
* struct iio_dev_attr - iio specific device attribute
* @dev_attr: underlying device attribute
* @address: associated register address
- * @val2: secondary attribute value
* @l: list head for maintaining list of dynamically created attrs.
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
- int address;
- int val2;
+ u64 address;
struct list_head l;
struct iio_chan_spec const *c;
};
@@ -64,10 +62,6 @@ struct iio_const_attr {
struct iio_dev_attr iio_dev_attr_##_vname \
= IIO_ATTR(_name, _mode, _show, _store, _addr)
-#define IIO_DEVICE_ATTR_2(_name, _mode, _show, _store, _addr, _val2) \
- struct iio_dev_attr iio_dev_attr_##_name \
- = IIO_ATTR_2(_name, _mode, _show, _store, _addr, _val2)
-
#define IIO_CONST_ATTR(_name, _string) \
struct iio_const_attr iio_const_attr_##_name \
= { .string = _string, \
@@ -77,17 +71,8 @@ struct iio_const_attr {
struct iio_const_attr iio_const_attr_##_vname \
= { .string = _string, \
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
-/* Generic attributes of onetype or another */
-
-/**
- * IIO_DEV_ATTR_REV - revision number for the device
- * @_show: output method for the attribute
- *
- * Very much device dependent.
- **/
-#define IIO_DEV_ATTR_REV(_show) \
- IIO_DEVICE_ATTR(revision, S_IRUGO, _show, NULL, 0)
+/* Generic attributes of onetype or another */
/**
* IIO_DEV_ATTR_RESET: resets the device
**/
@@ -95,13 +80,6 @@ struct iio_const_attr {
IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, _store, 0)
/**
- * IIO_CONST_ATTR_NAME - constant identifier
- * @_string: the name
- **/
-#define IIO_CONST_ATTR_NAME(_string) \
- IIO_CONST_ATTR(name, _string)
-
-/**
* IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency
* @_mode: sysfs file mode/permissions
* @_show: output method for the attribute
@@ -111,15 +89,11 @@ struct iio_const_attr {
IIO_DEVICE_ATTR(sampling_frequency, _mode, _show, _store, 0)
/**
- * IIO_DEV_ATTR_AVAIL_SAMP_FREQ - list available sampling frequencies
+ * IIO_DEV_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies
* @_show: output method for the attribute
*
* May be mode dependent on some devices
**/
-/* Deprecated */
-#define IIO_DEV_ATTR_AVAIL_SAMP_FREQ(_show) \
- IIO_DEVICE_ATTR(available_sampling_frequency, S_IRUGO, _show, NULL, 0)
-
#define IIO_DEV_ATTR_SAMP_FREQ_AVAIL(_show) \
IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, _show, NULL, 0)
/**
@@ -131,92 +105,56 @@ struct iio_const_attr {
#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \
IIO_CONST_ATTR(sampling_frequency_available, _string)
-/**
- * IIO_DEV_ATTR_SW_RING_ENABLE - enable software ring buffer
- * @_show: output method for the attribute
- * @_store: input method for the attribute
- *
- * Success may be dependent on attachment of trigger previously.
- **/
-#define IIO_DEV_ATTR_SW_RING_ENABLE(_show, _store) \
- IIO_DEVICE_ATTR(sw_ring_enable, S_IRUGO | S_IWUSR, _show, _store, 0)
-
-/**
- * IIO_DEV_ATTR_HW_RING_ENABLE - enable hardware ring buffer
- * @_show: output method for the attribute
- * @_store: input method for the attribute
- *
- * This is a different attribute from the software one as one can envision
- * schemes where a combination of the two may be used.
- **/
-#define IIO_DEV_ATTR_HW_RING_ENABLE(_show, _store) \
- IIO_DEVICE_ATTR(hw_ring_enable, S_IRUGO | S_IWUSR, _show, _store, 0)
-
#define IIO_DEV_ATTR_TEMP_RAW(_show) \
- IIO_DEVICE_ATTR(temp_raw, S_IRUGO, _show, NULL, 0)
+ IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0)
#define IIO_CONST_ATTR_TEMP_OFFSET(_string) \
- IIO_CONST_ATTR(temp_offset, _string)
+ IIO_CONST_ATTR(in_temp_offset, _string)
#define IIO_CONST_ATTR_TEMP_SCALE(_string) \
- IIO_CONST_ATTR(temp_scale, _string)
-
-/* must match our channel defs */
-#define IIO_EV_CLASS_IN IIO_IN
-#define IIO_EV_CLASS_IN_DIFF IIO_IN_DIFF
-#define IIO_EV_CLASS_ACCEL IIO_ACCEL
-#define IIO_EV_CLASS_GYRO IIO_GYRO
-#define IIO_EV_CLASS_MAGN IIO_MAGN
-#define IIO_EV_CLASS_LIGHT IIO_LIGHT
-#define IIO_EV_CLASS_PROXIMITY IIO_PROXIMITY
-#define IIO_EV_CLASS_TEMP IIO_TEMP
-
-#define IIO_EV_MOD_X IIO_MOD_X
-#define IIO_EV_MOD_Y IIO_MOD_Y
-#define IIO_EV_MOD_Z IIO_MOD_Z
-#define IIO_EV_MOD_X_AND_Y IIO_MOD_X_AND_Y
-#define IIO_EV_MOD_X_ANX_Z IIO_MOD_X_AND_Z
-#define IIO_EV_MOD_Y_AND_Z IIO_MOD_Y_AND_Z
-#define IIO_EV_MOD_X_AND_Y_AND_Z IIO_MOD_X_AND_Y_AND_Z
-#define IIO_EV_MOD_X_OR_Y IIO_MOD_X_OR_Y
-#define IIO_EV_MOD_X_OR_Z IIO_MOD_X_OR_Z
-#define IIO_EV_MOD_Y_OR_Z IIO_MOD_Y_OR_Z
-#define IIO_EV_MOD_X_OR_Y_OR_Z IIO_MOD_X_OR_Y_OR_Z
-
-#define IIO_EV_TYPE_THRESH 0
-#define IIO_EV_TYPE_MAG 1
-#define IIO_EV_TYPE_ROC 2
-
-#define IIO_EV_DIR_EITHER 0
-#define IIO_EV_DIR_RISING 1
-#define IIO_EV_DIR_FALLING 2
-
-#define IIO_EV_TYPE_MAX 8
-#define IIO_EV_BIT(type, direction) \
- (1 << (type*IIO_EV_TYPE_MAX + direction))
+ IIO_CONST_ATTR(in_temp_scale, _string)
+
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+};
-#define IIO_EVENT_CODE(channelclass, orient_bit, number, \
- modifier, type, direction) \
- (channelclass | (orient_bit << 8) | ((number) << 9) | \
- ((modifier) << 13) | ((type) << 16) | ((direction) << 24))
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+};
+
+#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
+ (((u64)type << 56) | ((u64)diff << 55) | \
+ ((u64)direction << 48) | ((u64)modifier << 40) | \
+ ((u64)chan_type << 32) | (chan2 << 16) | chan1 | chan)
+
+#define IIO_EV_DIR_MAX 4
+#define IIO_EV_BIT(type, direction) \
+ (1 << (type*IIO_EV_DIR_MAX + direction))
#define IIO_MOD_EVENT_CODE(channelclass, number, modifier, \
type, direction) \
- IIO_EVENT_CODE(channelclass, 1, number, modifier, type, direction)
+ IIO_EVENT_CODE(channelclass, 0, modifier, direction, type, number, 0, 0)
#define IIO_UNMOD_EVENT_CODE(channelclass, number, type, direction) \
- IIO_EVENT_CODE(channelclass, 0, number, 0, type, direction)
+ IIO_EVENT_CODE(channelclass, 0, 0, direction, type, number, 0, 0)
+#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-#define IIO_BUFFER_EVENT_CODE(code) \
- (IIO_EV_CLASS_BUFFER | (code << 8))
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 24) & 0xf)
+#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
/* Event code number extraction depends on which type of event we have.
* Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((mask >> 9) & 0x0f)
+#define IIO_EVENT_CODE_EXTRACT_NUM(mask) (mask & 0xFFFF)
-#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 13) & 0x7)
+#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index e0b58ed749b..598fcb3599f 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -7,6 +7,7 @@
* the Free Software Foundation.
*/
#include <linux/irq.h>
+#include <linux/module.h>
#ifndef _IIO_TRIGGER_H_
#define _IIO_TRIGGER_H_
@@ -16,6 +17,27 @@ struct iio_subirq {
};
/**
+ * struct iio_trigger_ops - operations structure for an iio_trigger.
+ * @owner: used to monitor usage count of the trigger.
+ * @set_trigger_state: switch on/off the trigger on demand
+ * @try_reenable: function to reenable the trigger when the
+ * use count is zero (may be NULL)
+ * @validate_device: function to validate the device when the
+ * current trigger gets changed.
+ *
+ * This is typically static const within a driver and shared by
+ * instances of a given device.
+ **/
+struct iio_trigger_ops {
+ struct module *owner;
+ int (*set_trigger_state)(struct iio_trigger *trig, bool state);
+ int (*try_reenable)(struct iio_trigger *trig);
+ int (*validate_device)(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
+};
+
+
+/**
* struct iio_trigger - industrial I/O trigger device
*
* @id: [INTERN] unique id number
@@ -26,11 +48,6 @@ struct iio_subirq {
* @alloc_list: [DRIVER] used for driver specific trigger list
* @owner: [DRIVER] used to monitor usage count of the trigger.
* @use_count: use count for the trigger
- * @set_trigger_state: [DRIVER] switch on/off the trigger on demand
- * @try_reenable: function to reenable the trigger when the
- * use count is zero (may be NULL)
- * @validate_device: function to validate the device when the
- * current trigger gets changed.
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
* @subirqs: [INTERN] information about the 'child' irqs.
@@ -38,6 +55,7 @@ struct iio_subirq {
* @pool_lock: [INTERN] protection of the irq pool.
**/
struct iio_trigger {
+ const struct iio_trigger_ops *ops;
int id;
const char *name;
struct device dev;
@@ -48,11 +66,6 @@ struct iio_trigger {
struct module *owner;
int use_count;
- int (*set_trigger_state)(struct iio_trigger *trig, bool state);
- int (*try_reenable)(struct iio_trigger *trig);
- int (*validate_device)(struct iio_trigger *trig,
- struct iio_dev *indio_dev);
-
struct irq_chip subirq_chip;
int subirq_base;
@@ -61,29 +74,6 @@ struct iio_trigger {
struct mutex pool_lock;
};
-/**
- * struct iio_poll_func - poll function pair
- *
- * @private_data: data specific to device (passed into poll func)
- * @h: the function that is actually run on trigger
- * @thread: threaded interrupt part
- * @type: the type of interrupt (basically if oneshot)
- * @name: name used to identify the trigger consumer.
- * @irq: the corresponding irq as allocated from the
- * trigger pool
- * @timestamp: some devices need a timestamp grabbed as soon
- * as possible after the trigger - hence handler
- * passes it via here.
- **/
-struct iio_poll_func {
- void *private_data;
- irqreturn_t (*h)(int irq, void *p);
- irqreturn_t (*thread)(int irq, void *p);
- int type;
- char *name;
- int irq;
- s64 timestamp;
-};
static inline struct iio_trigger *to_iio_trigger(struct device *d)
{
@@ -92,14 +82,14 @@ static inline struct iio_trigger *to_iio_trigger(struct device *d)
static inline void iio_put_trigger(struct iio_trigger *trig)
{
+ module_put(trig->ops->owner);
put_device(&trig->dev);
- module_put(trig->owner);
};
static inline void iio_get_trigger(struct iio_trigger *trig)
{
- __module_get(trig->owner);
get_device(&trig->dev);
+ __module_get(trig->ops->owner);
};
/**
@@ -115,23 +105,6 @@ int iio_trigger_register(struct iio_trigger *trig_info);
void iio_trigger_unregister(struct iio_trigger *trig_info);
/**
- * iio_trigger_attach_poll_func() - add a function pair to be run on trigger
- * @trig: trigger to which the function pair are being added
- * @pf: poll function pair
- **/
-int iio_trigger_attach_poll_func(struct iio_trigger *trig,
- struct iio_poll_func *pf);
-
-/**
- * iio_trigger_dettach_poll_func() - remove function pair from those to be
- * run on trigger
- * @trig: trigger from which the function is being removed
- * @pf: poll function pair
- **/
-int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
- struct iio_poll_func *pf);
-
-/**
* iio_trigger_poll() - called on a trigger occurring
* @trig: trigger which occurred
*
@@ -139,48 +112,9 @@ int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
**/
void iio_trigger_poll(struct iio_trigger *trig, s64 time);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time);
-void iio_trigger_notify_done(struct iio_trigger *trig);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-static inline int iio_trigger_get_irq(struct iio_trigger *trig)
-{
- int ret;
- mutex_lock(&trig->pool_lock);
- ret = bitmap_find_free_region(trig->pool,
- CONFIG_IIO_CONSUMERS_PER_TRIGGER,
- ilog2(1));
- mutex_unlock(&trig->pool_lock);
- if (ret >= 0)
- ret += trig->subirq_base;
-
- return ret;
-};
-
-static inline void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
-{
- mutex_lock(&trig->pool_lock);
- clear_bit(irq - trig->subirq_base, trig->pool);
- mutex_unlock(&trig->pool_lock);
-};
-
-struct iio_poll_func
-*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
- irqreturn_t (*thread)(int irq, void *p),
- int type,
- void *private,
- const char *fmt,
- ...);
-void iio_dealloc_pollfunc(struct iio_poll_func *pf);
-irqreturn_t iio_pollfunc_store_time(int irq, void *p);
-
-/*
- * Two functions for common case where all that happens is a pollfunc
- * is attached and detached from a trigger
- */
-int iio_triggered_ring_postenable(struct iio_dev *indio_dev);
-int iio_triggered_ring_predisable(struct iio_dev *indio_dev);
-
struct iio_trigger *iio_allocate_trigger(const char *fmt, ...)
__attribute__((format(printf, 1, 2)));
void iio_free_trigger(struct iio_trigger *trig);
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 4f172956558..1cbb25dff8b 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -143,6 +143,10 @@ static int iio_bfin_tmr_get_number(int irq)
return -ENODEV;
}
+static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int __devinit iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
{
struct bfin_tmr_state *st;
@@ -175,7 +179,7 @@ static int __devinit iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
}
st->trig->private_data = st;
- st->trig->owner = THIS_MODULE;
+ st->trig->ops = &iio_bfin_tmr_trigger_ops;
st->trig->dev.groups = iio_bfin_tmr_trigger_attr_groups;
ret = iio_trigger_register(st->trig);
if (ret)
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index f1fb795e641..f2a65598162 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -47,6 +47,10 @@ static irqreturn_t iio_gpio_trigger_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static const struct iio_trigger_ops iio_gpio_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int iio_gpio_trigger_probe(struct platform_device *pdev)
{
struct iio_gpio_trigger_info *trig_info;
@@ -81,7 +85,7 @@ static int iio_gpio_trigger_probe(struct platform_device *pdev)
}
trig->private_data = trig_info;
trig_info->irq = irq;
- trig->owner = THIS_MODULE;
+ trig->ops = &iio_gpio_trigger_ops;
ret = request_irq(irq, iio_gpio_trigger_poll,
irqflags, trig->name, trig);
if (ret) {
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 01cf7e20b51..d35d085da94 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -96,6 +96,11 @@ static void iio_prtc_trigger_poll(void *private_data)
iio_trigger_poll(private_data, 0);
}
+static const struct iio_trigger_ops iio_prtc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &iio_trig_periodic_rtc_set_state,
+};
+
static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
{
char **pdata = dev->dev.platform_data;
@@ -121,7 +126,7 @@ static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
}
trig->private_data = trig_info;
trig->owner = THIS_MODULE;
- trig->set_trigger_state = &iio_trig_periodic_rtc_set_state;
+ trig->ops = &iio_prtc_trigger_ops;
/* RTC access */
trig_info->rtc
= rtc_class_open(pdata[i]);
diff --git a/drivers/staging/iio/trigger/iio-trig-sysfs.c b/drivers/staging/iio/trigger/iio-trig-sysfs.c
index 47248cd1fa0..174dc65709d 100644
--- a/drivers/staging/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/staging/iio/trigger/iio-trig-sysfs.c
@@ -77,9 +77,16 @@ static const struct attribute_group *iio_sysfs_trig_groups[] = {
NULL
};
+
+/* Nothing to actually do upon release */
+static void iio_trigger_sysfs_release(struct device *dev)
+{
+}
+
static struct device iio_sysfs_trig_dev = {
.bus = &iio_bus_type,
.groups = iio_sysfs_trig_groups,
+ .release = &iio_trigger_sysfs_release,
};
static ssize_t iio_sysfs_trigger_poll(struct device *dev,
@@ -107,6 +114,10 @@ static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
NULL
};
+static const struct iio_trigger_ops iio_sysfs_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int iio_sysfs_trigger_probe(int id)
{
struct iio_sysfs_trig *t;
@@ -135,7 +146,7 @@ static int iio_sysfs_trigger_probe(int id)
}
t->trig->dev.groups = iio_sysfs_trigger_attr_groups;
- t->trig->owner = THIS_MODULE;
+ t->trig->ops = &iio_sysfs_trigger_ops;
t->trig->dev.parent = &iio_sysfs_trig_dev;
ret = iio_trigger_register(t->trig);
diff --git a/drivers/staging/iio/trigger_consumer.h b/drivers/staging/iio/trigger_consumer.h
index 9d52d963777..60d64b35694 100644
--- a/drivers/staging/iio/trigger_consumer.h
+++ b/drivers/staging/iio/trigger_consumer.h
@@ -1,47 +1,52 @@
-
-/* The industrial I/O core, trigger consumer handling functions
+/* The industrial I/O core, trigger consumer functions
*
- * Copyright (c) 2008 Jonathan Cameron
+ * Copyright (c) 2008-2011 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-#ifdef CONFIG_IIO_TRIGGER
-/**
- * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
- * @dev_info: iio_dev associated with the device that will consume the trigger
- **/
-int iio_device_register_trigger_consumer(struct iio_dev *dev_info);
-
-/**
- * iio_device_unregister_trigger_consumer() - reverse the registration process
- * @dev_info: iio_dev associated with the device that consumed the trigger
- **/
-int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info);
-
-#else
-
/**
- * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
- * @dev_info: iio_dev associated with the device that will consume the trigger
- **/
-static int iio_device_register_trigger_consumer(struct iio_dev *dev_info)
-{
- return 0;
-};
-
-/**
- * iio_device_unregister_trigger_consumer() - reverse the registration process
- * @dev_info: iio_dev associated with the device that consumed the trigger
+ * struct iio_poll_func - poll function pair
+ *
+ * @indio_dev: data specific to device (passed into poll func)
+ * @h: the function that is actually run on trigger
+ * @thread: threaded interrupt part
+ * @type: the type of interrupt (basically if oneshot)
+ * @name: name used to identify the trigger consumer.
+ * @irq: the corresponding irq as allocated from the
+ * trigger pool
+ * @timestamp: some devices need a timestamp grabbed as soon
+ * as possible after the trigger - hence handler
+ * passes it via here.
**/
-static int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info)
-{
- return 0;
+struct iio_poll_func {
+ struct iio_dev *indio_dev;
+ irqreturn_t (*h)(int irq, void *p);
+ irqreturn_t (*thread)(int irq, void *p);
+ int type;
+ char *name;
+ int irq;
+ s64 timestamp;
};
-#endif /* CONFIG_TRIGGER_CONSUMER */
+struct iio_poll_func
+*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ int type,
+ struct iio_dev *indio_dev,
+ const char *fmt,
+ ...);
+void iio_dealloc_pollfunc(struct iio_poll_func *pf);
+irqreturn_t iio_pollfunc_store_time(int irq, void *p);
+void iio_trigger_notify_done(struct iio_trigger *trig);
+/*
+ * Two functions for common case where all that happens is a pollfunc
+ * is attached and detached from a trigger
+ */
+int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
+int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
index d892861346f..c303d85011b 100644
--- a/drivers/staging/intel_sst/intel_sst.c
+++ b/drivers/staging/intel_sst/intel_sst.c
@@ -321,7 +321,7 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
if (ret) {
pr_err("couldn't register LPE device\n");
goto do_free_misc;
- }
+ }
} else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
u32 csr;
@@ -524,9 +524,11 @@ int intel_sst_resume(struct pci_dev *pci)
return 0;
}
-/* The runtime_suspend/resume is pretty much similar to the legacy suspend/resume with the noted exception below:
- * The PCI core takes care of taking the system through D3hot and restoring it back to D0 and so there is
- * no need to duplicate that here.
+/* The runtime_suspend/resume is pretty much similar to the legacy
+ * suspend/resume with the noted exception below:
+ * The PCI core takes care of taking the system through D3hot and
+ * restoring it back to D0 and so there is no need to duplicate
+ * that here.
*/
static int intel_sst_runtime_suspend(struct device *dev)
{
diff --git a/drivers/staging/intel_sst/intel_sst_dsp.c b/drivers/staging/intel_sst/intel_sst_dsp.c
index a89e1ade847..426d2b92073 100644
--- a/drivers/staging/intel_sst/intel_sst_dsp.c
+++ b/drivers/staging/intel_sst/intel_sst_dsp.c
@@ -104,7 +104,7 @@ static int sst_start_mrst(void)
/**
* sst_start_medfield - Start the SST DSP processor
*
- * This starts the DSP in MRST platfroms
+ * This starts the DSP in Medfield platfroms
*/
static int sst_start_medfield(void)
{
diff --git a/drivers/staging/intel_sst/intelmid.c b/drivers/staging/intel_sst/intelmid.c
index 25656ad2802..492b660246b 100644
--- a/drivers/staging/intel_sst/intelmid.c
+++ b/drivers/staging/intel_sst/intelmid.c
@@ -27,6 +27,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c
index 135f7f21dfd..e1f3931d41e 100644
--- a/drivers/staging/keucr/scsiglue.c
+++ b/drivers/staging/keucr/scsiglue.c
@@ -144,7 +144,7 @@ static int command_abort(struct scsi_cmnd *srb)
scsi_lock(us_to_host(us));
if (us->srb != srb) {
scsi_unlock(us_to_host(us));
- printk ("-- nothing to abort\n");
+ printk("-- nothing to abort\n");
return FAILED;
}
@@ -279,7 +279,8 @@ static int proc_info(struct Scsi_Host *host, char *buffer, char **start,
pos += sprintf(pos, " Quirks:");
#define US_FLAG(name, value) \
- if (us->fflags & value) pos += sprintf(pos, " " #name);
+ if (us->fflags & value)\
+ pos += sprintf(pos, " " #name);
US_DO_ALL_FLAGS
#undef US_FLAG
diff --git a/drivers/staging/keucr/smil.h b/drivers/staging/keucr/smil.h
index b5a8fa7c798..24a636a4aa1 100644
--- a/drivers/staging/keucr/smil.h
+++ b/drivers/staging/keucr/smil.h
@@ -177,8 +177,7 @@ struct SSFDCTYPE {
WORD MaxLogBlocks;
};
-typedef struct SSFDCTYPE_T
-{
+typedef struct SSFDCTYPE_T {
BYTE Model;
BYTE Attribute;
BYTE MaxZones;
@@ -194,8 +193,7 @@ struct ADDRESS {
WORD LogBlock; /* Logical Block Number of Zone */
};
-typedef struct ADDRESS_T
-{
+typedef struct ADDRESS_T {
BYTE Zone; /* Zone Number */
BYTE Sector; /* Sector(512byte) Number on Block */
WORD PhyBlock; /* Physical Block Number on Zone */
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index b315d5f8e19..d4dd5ed516c 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -2,42 +2,37 @@
#include "usb.h"
#include "scsiglue.h"
#include "transport.h"
-//#include "init.h"
-//#include "stdlib.h"
-//#include "EUCR6SK.h"
#include "smcommon.h"
#include "smil.h"
-void _Set_D_SsfdcRdCmd (BYTE);
-void _Set_D_SsfdcRdAddr (BYTE);
-void _Set_D_SsfdcRdChip (void);
-void _Set_D_SsfdcRdStandby (void);
-void _Start_D_SsfdcRdHwECC (void);
-void _Stop_D_SsfdcRdHwECC (void);
-void _Load_D_SsfdcRdHwECC (BYTE);
-void _Set_D_SsfdcWrCmd (BYTE);
-void _Set_D_SsfdcWrAddr (BYTE);
-void _Set_D_SsfdcWrBlock (void);
-void _Set_D_SsfdcWrStandby (void);
-void _Start_D_SsfdcWrHwECC (void);
-void _Load_D_SsfdcWrHwECC (BYTE);
-int _Check_D_SsfdcBusy (WORD);
-int _Check_D_SsfdcStatus (void);
-void _Reset_D_SsfdcErr (void);
-void _Read_D_SsfdcBuf (BYTE *);
-void _Write_D_SsfdcBuf (BYTE *);
-void _Read_D_SsfdcByte (BYTE *);
-void _ReadRedt_D_SsfdcBuf (BYTE *);
-void _WriteRedt_D_SsfdcBuf (BYTE *);
-BYTE _Check_D_DevCode (BYTE);
-
-void _Set_D_ECCdata (BYTE,BYTE *);
-void _Calc_D_ECCdata (BYTE *);
-
-//void SM_ReadDataWithDMA (PFDO_DEVICE_EXTENSION, BYTE *, WORD);
-//void SM_WriteDataWithDMA (PFDO_DEVICE_EXTENSION, BYTE *, WORD);
-//
+void _Set_D_SsfdcRdCmd(BYTE);
+void _Set_D_SsfdcRdAddr(BYTE);
+void _Set_D_SsfdcRdChip(void);
+void _Set_D_SsfdcRdStandby(void);
+void _Start_D_SsfdcRdHwECC(void);
+void _Stop_D_SsfdcRdHwECC(void);
+void _Load_D_SsfdcRdHwECC(BYTE);
+void _Set_D_SsfdcWrCmd(BYTE);
+void _Set_D_SsfdcWrAddr(BYTE);
+void _Set_D_SsfdcWrBlock(void);
+void _Set_D_SsfdcWrStandby(void);
+void _Start_D_SsfdcWrHwECC(void);
+void _Load_D_SsfdcWrHwECC(BYTE);
+int _Check_D_SsfdcBusy(WORD);
+int _Check_D_SsfdcStatus(void);
+void _Reset_D_SsfdcErr(void);
+void _Read_D_SsfdcBuf(BYTE *);
+void _Write_D_SsfdcBuf(BYTE *);
+void _Read_D_SsfdcByte(BYTE *);
+void _ReadRedt_D_SsfdcBuf(BYTE *);
+void _WriteRedt_D_SsfdcBuf(BYTE *);
+BYTE _Check_D_DevCode(BYTE);
+
+void _Set_D_ECCdata(BYTE, BYTE *);
+void _Calc_D_ECCdata(BYTE *);
+
+
struct SSFDCTYPE Ssfdc;
struct ADDRESS Media;
struct CIS_AREA CisArea;
@@ -49,221 +44,179 @@ extern DWORD ErrXDCode;
extern WORD ReadBlock;
extern WORD WriteBlock;
-//KEVENT SM_DMADoneEvent;
-#define EVEN 0 // Even Page for 256byte/page
-#define ODD 1 // Odd Page for 256byte/page
+#define EVEN 0 /* Even Page for 256byte/page */
+#define ODD 1 /* Odd Page for 256byte/page */
-//SmartMedia Redundant buffer data Control Subroutine
-//----- Check_D_DataBlank() --------------------------------------------
+
+/* SmartMedia Redundant buffer data Control Subroutine
+ *----- Check_D_DataBlank() --------------------------------------------
+ */
int Check_D_DataBlank(BYTE *redundant)
{
char i;
- for(i=0; i<REDTSIZE; i++)
- if (*redundant++!=0xFF)
- return(ERROR);
+ for (i = 0; i < REDTSIZE; i++)
+ if (*redundant++ != 0xFF)
+ return ERROR;
- return(SMSUCCESS);
+ return SMSUCCESS;
}
-//----- Check_D_FailBlock() --------------------------------------------
+/* ----- Check_D_FailBlock() -------------------------------------------- */
int Check_D_FailBlock(BYTE *redundant)
{
- redundant+=REDT_BLOCK;
+ redundant += REDT_BLOCK;
- if (*redundant==0xFF)
- return(SMSUCCESS);
+ if (*redundant == 0xFF)
+ return SMSUCCESS;
if (!*redundant)
- return(ERROR);
- if (hweight8(*redundant)<7)
- return(ERROR);
+ return ERROR;
+ if (hweight8(*redundant) < 7)
+ return ERROR;
- return(SMSUCCESS);
+ return SMSUCCESS;
}
-//----- Check_D_DataStatus() -------------------------------------------
+/* ----- Check_D_DataStatus() ------------------------------------------- */
int Check_D_DataStatus(BYTE *redundant)
{
- redundant+=REDT_DATA;
+ redundant += REDT_DATA;
- if (*redundant==0xFF)
- return(SMSUCCESS);
- if (!*redundant)
- {
+ if (*redundant == 0xFF)
+ return SMSUCCESS;
+ if (!*redundant) {
ErrXDCode = ERR_DataStatus;
- return(ERROR);
- }
- else
+ return ERROR;
+ } else
ErrXDCode = NO_ERROR;
- if (hweight8(*redundant)<5)
- return(ERROR);
+ if (hweight8(*redundant) < 5)
+ return ERROR;
- return(SMSUCCESS);
+ return SMSUCCESS;
}
-//----- Load_D_LogBlockAddr() ------------------------------------------
+/* ----- Load_D_LogBlockAddr() ------------------------------------------ */
int Load_D_LogBlockAddr(BYTE *redundant)
{
- WORD addr1,addr2;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
+ WORD addr1, addr2;
- addr1=(WORD)*(redundant+REDT_ADDR1H)*0x0100+(WORD)*(redundant+REDT_ADDR1L);
- addr2=(WORD)*(redundant+REDT_ADDR2H)*0x0100+(WORD)*(redundant+REDT_ADDR2L);
+ addr1 = (WORD)*(redundant + REDT_ADDR1H)*0x0100 + (WORD)*(redundant + REDT_ADDR1L);
+ addr2 = (WORD)*(redundant + REDT_ADDR2H)*0x0100 + (WORD)*(redundant + REDT_ADDR2L);
- if (addr1==addr2)
- if ((addr1 &0xF000)==0x1000)
- { Media.LogBlock=(addr1 &0x0FFF)/2; return(SMSUCCESS); }
+ if (addr1 == addr2)
+ if ((addr1 & 0xF000) == 0x1000) {
+ Media.LogBlock = (addr1 & 0x0FFF) / 2;
+ return SMSUCCESS;
+ }
- if (hweight16((WORD)(addr1^addr2))!=0x01) return(ERROR);
+ if (hweight16((WORD)(addr1^addr2)) != 0x01)
+ return ERROR;
- if ((addr1 &0xF000)==0x1000)
- if (!(hweight16(addr1) &0x01))
- { Media.LogBlock=(addr1 &0x0FFF)/2; return(SMSUCCESS); }
+ if ((addr1 & 0xF000) == 0x1000)
+ if (!(hweight16(addr1) & 0x01)) {
+ Media.LogBlock = (addr1 & 0x0FFF) / 2;
+ return SMSUCCESS;
+ }
- if ((addr2 &0xF000)==0x1000)
- if (!(hweight16(addr2) &0x01))
- { Media.LogBlock=(addr2 &0x0FFF)/2; return(SMSUCCESS); }
+ if ((addr2 & 0xF000) == 0x1000)
+ if (!(hweight16(addr2) & 0x01)) {
+ Media.LogBlock = (addr2 & 0x0FFF) / 2;
+ return SMSUCCESS;
+ }
- return(ERROR);
+ return ERROR;
}
-//----- Clr_D_RedundantData() ------------------------------------------
+/* ----- Clr_D_RedundantData() ------------------------------------------ */
void Clr_D_RedundantData(BYTE *redundant)
{
char i;
- for(i=0; i<REDTSIZE; i++)
- *(redundant+i)=0xFF;
+ for (i = 0; i < REDTSIZE; i++)
+ *(redundant + i) = 0xFF;
}
-//----- Set_D_LogBlockAddr() -------------------------------------------
+/* ----- Set_D_LogBlockAddr() ------------------------------------------- */
void Set_D_LogBlockAddr(BYTE *redundant)
{
WORD addr;
- *(redundant+REDT_BLOCK)=0xFF;
- *(redundant+REDT_DATA) =0xFF;
- addr=Media.LogBlock*2+0x1000;
+ *(redundant + REDT_BLOCK) = 0xFF;
+ *(redundant + REDT_DATA) = 0xFF;
+ addr = Media.LogBlock*2 + 0x1000;
- if ((hweight16(addr)%2))
+ if ((hweight16(addr) % 2))
addr++;
- *(redundant+REDT_ADDR1H)=*(redundant+REDT_ADDR2H)=(BYTE)(addr/0x0100);
- *(redundant+REDT_ADDR1L)=*(redundant+REDT_ADDR2L)=(BYTE)addr;
+ *(redundant + REDT_ADDR1H) = *(redundant + REDT_ADDR2H) = (BYTE)(addr / 0x0100);
+ *(redundant + REDT_ADDR1L) = *(redundant + REDT_ADDR2L) = (BYTE)addr;
}
-//----- Set_D_FailBlock() ----------------------------------------------
+/*----- Set_D_FailBlock() ---------------------------------------------- */
void Set_D_FailBlock(BYTE *redundant)
{
- char i;
-
- for(i=0; i<REDTSIZE; i++)
- *redundant++=(BYTE)((i==REDT_BLOCK)?0xF0:0xFF);
+ char i;
+ for (i = 0; i < REDTSIZE; i++)
+ *redundant++ = (BYTE)((i == REDT_BLOCK) ? 0xF0 : 0xFF);
}
-//----- Set_D_DataStaus() ----------------------------------------------
+/* ----- Set_D_DataStaus() ---------------------------------------------- */
void Set_D_DataStaus(BYTE *redundant)
{
- redundant+=REDT_DATA;
- *redundant=0x00;
+ redundant += REDT_DATA;
+ *redundant = 0x00;
}
-//SmartMedia Function Command Subroutine
-// 6250 CMD 6
-//----- Ssfdc_D_Reset() ------------------------------------------------
+/* SmartMedia Function Command Subroutine
+ * 6250 CMD 6
+ */
+/* ----- Ssfdc_D_Reset() ------------------------------------------------ */
void Ssfdc_D_Reset(struct us_data *us)
{
- //NTSTATUS ntStatus = STATUS_SUCCESS;
- //PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- //BYTE buf[0x200];
-
- //printk("Ssfdc_D_Reset --- But do nothing !!\n");
return;
-/* RtlZeroMemory(pBulkCbw, sizeof(struct _BULK_CBW));
- pBulkCbw->dCBWSignature = CBW_SIGNTURE;
- pBulkCbw->bCBWLun = CBW_LUN;
- //pBulkCbw->dCBWDataTransferLength = 0x200;
- pBulkCbw->bmCBWFlags = 0x80;
- pBulkCbw->CBWCb[0] = 0xF2;
- pBulkCbw->CBWCb[1] = 0x07;
-
- ntStatus = ENE_SendScsiCmd(fdoExt, FDIR_READ, NULL);
-
- if (!NT_SUCCESS(ntStatus))
- {
- ENE_Print("Ssfdc_D_Reset Fail !!\n");
- //return ntStatus;
- }*/
}
-//----- Ssfdc_D_ReadCisSect() ------------------------------------------
-int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf,BYTE *redundant)
+/* ----- Ssfdc_D_ReadCisSect() ------------------------------------------ */
+int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf, BYTE *redundant)
{
- BYTE zone,sector;
+ BYTE zone, sector;
WORD block;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
-
- zone=Media.Zone; block=Media.PhyBlock; sector=Media.Sector;
- Media.Zone=0;
- Media.PhyBlock=CisArea.PhyBlock;
- Media.Sector=CisArea.Sector;
-
- if (Ssfdc_D_ReadSect(us,buf,redundant))
- {
- Media.Zone=zone; Media.PhyBlock=block; Media.Sector=sector;
- return(ERROR);
+
+ zone = Media.Zone; block = Media.PhyBlock; sector = Media.Sector;
+ Media.Zone = 0;
+ Media.PhyBlock = CisArea.PhyBlock;
+ Media.Sector = CisArea.Sector;
+
+ if (Ssfdc_D_ReadSect(us, buf, redundant)) {
+ Media.Zone = zone; Media.PhyBlock = block; Media.Sector = sector;
+ return ERROR;
}
- Media.Zone=zone; Media.PhyBlock=block; Media.Sector=sector;
- return(SMSUCCESS);
+ Media.Zone = zone; Media.PhyBlock = block; Media.Sector = sector;
+ return SMSUCCESS;
}
-/*
-////----- Ssfdc_D_WriteRedtMode() ----------------------------------------
-//void Ssfdc_D_WriteRedtMode(void)
-//{
-// _Set_D_SsfdcRdCmd (RST_CHIP);
-// _Check_D_SsfdcBusy (BUSY_RESET);
-// _Set_D_SsfdcRdCmd (READ_REDT);
-// _Check_D_SsfdcBusy (BUSY_READ);
-// _Set_D_SsfdcRdStandby ();
-//}
-//
-////----- Ssfdc_D_ReadID() -----------------------------------------------
-//void Ssfdc_D_ReadID(BYTE *buf, BYTE ReadID)
-//{
-// _Set_D_SsfdcRdCmd (ReadID);
-// _Set_D_SsfdcRdChip ();
-// _Read_D_SsfdcByte (buf++);
-// _Read_D_SsfdcByte (buf++);
-// _Read_D_SsfdcByte (buf++);
-// _Read_D_SsfdcByte (buf);
-// _Set_D_SsfdcRdStandby ();
-//}
-*/
-// 6250 CMD 1
-//----- Ssfdc_D_ReadSect() ---------------------------------------------
-int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
+
+/* 6250 CMD 1 */
+/* ----- Ssfdc_D_ReadSect() --------------------------------------------- */
+int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
WORD addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
+ addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
- // Read sect data
+ /* Read sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
@@ -271,14 +224,14 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr/0x0100);
- bcb->CDB[2] = Media.Zone/2;
+ bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[2] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- // Read redundant
+ /* Read redundant */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x10;
@@ -286,8 +239,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr/0x0100);
- bcb->CDB[2] = Media.Zone/2;
+ bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -298,25 +251,23 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- Ssfdc_D_ReadBlock() ---------------------------------------------
-int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
+/* ----- Ssfdc_D_ReadBlock() --------------------------------------------- */
+int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
WORD addr;
- //printk("Ssfdc_D_ReadBlock\n");
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
+ addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
- // Read sect data
+ /* Read sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200*count;
@@ -324,14 +275,14 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr/0x0100);
- bcb->CDB[2] = Media.Zone/2;
+ bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[2] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- // Read redundant
+ /* Read redundant */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x10;
@@ -339,8 +290,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr/0x0100);
- bcb->CDB[2] = Media.Zone/2;
+ bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -350,190 +301,27 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
return USB_STOR_TRANSPORT_GOOD;
}
-/*
-////----- Ssfdc_D_ReadSect_DMA() ---------------------------------------------
-//int Ssfdc_D_ReadSect_DMA(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf,BYTE *redundant)
-//{
-// WORD SectByteCount, addr;
-// DWORD Buffer[4];
-// WORD len;
-//
-// if (!_Hw_D_ChkCardIn())
-// return(ERROR);
-// addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
-// addr=addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-// // cycle starting address
-// SM_STARTADDR_LSB = 0x00;
-// SM_STARTADDR_IISB = (BYTE)addr;
-// SM_STARTADDR_IIISB = (BYTE)(addr/0x0100);
-// SM_STARTADDR_MSB = Media.Zone/2;
-//
-// //Sector byte count = 0x200(DMA)
-// SectByteCount = 0x20f;
-// SM_BYTECNT_LO = (BYTE)SectByteCount;
-// SM_CMD_CTRL3 = (SM_CMD_CTRL3 & 0xFC) | (BYTE)(SectByteCount/0x0100);
-// if ( ((fdoExt->ChipID==READER_CB712)&&(fdoExt->RevID==CHIP_A)) || fdoExt->IsHibernate )
-// SM_FIFO_CTRL = (SM_APB08_MASK | SM_DMAEN_MASK | SM_DMA_UPSTREAM_MASK | SM_FIFOSHLDVLU_8_MASK);
-// else
-// SM_FIFO_CTRL = (SM_APB32_MASK | SM_DMAEN_MASK | SM_DMA_UPSTREAM_MASK | SM_FIFOSHLDVLU_8_MASK);
-//
-// _Hw_D_EccRdReset();
-// _Hw_D_EccRdStart();
-//
-// SM_CMD_CTRL1 = (SM_CMD_READ_1);
-// SM_CMD_CTRL1 = (SM_CMD_READ_1 | SM_CMD_START_BIT);
-//
-// SectByteCount = 0x1ff;
-// //SM_ReadDataWithDMA(fdoExt, buf, SectByteCount);
-// //_ReadRedt_D_SsfdcBuf(redundant);
-// len = 0x1000 - ((WORD)(buf) & 0x0FFF);
-// if (len < 0x200)
-// {
-// SM_ReadDataWithDMA(fdoExt, buf, len-1);
-// SM_ReadDataWithDMA(fdoExt, buf+len, SectByteCount-len);
-// //ENE_Print("Read DMA !!! buf1 = %p, len = %x, buf2 = %p\n", buf, len, buf+len);
-// }
-// else
-// SM_ReadDataWithDMA(fdoExt, buf, SectByteCount);
-//
-// if ( ((fdoExt->ChipID==READER_CB712)&&(fdoExt->RevID==CHIP_A)) || fdoExt->IsHibernate )
-// {
-// _ReadRedt_D_SsfdcBuf(redundant);
-// }
-// else
-// {
-// Buffer[0] = READ_PORT_DWORD(SM_REG_DATA);
-// Buffer[1] = READ_PORT_DWORD(SM_REG_DATA);
-// Buffer[2] = READ_PORT_DWORD(SM_REG_DATA);
-// Buffer[3] = READ_PORT_DWORD(SM_REG_DATA);
-// memcpy(redundant, Buffer, 0x10);
-// }
-//
-// while ( _Hw_D_ChkCardIn() )
-// {
-// if((READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x10))
-// {
-// WRITE_PORT_BYTE(SM_REG_INT_STATUS, 0x10);
-// break;
-// }
-// }
-// _Hw_D_EccRdStop();
-// _Hw_D_SetRdStandby();
-// _Load_D_SsfdcRdHwECC(EVEN);
-//
-// _Calc_D_ECCdata(buf);
-// _Set_D_SsfdcRdStandby();
-//
-// if (!_Hw_D_ChkCardIn())
-// return(ERROR);
-// return(SMSUCCESS);
-//}
-//
-////----- Ssfdc_D_ReadSect_PIO() ---------------------------------------------
-//int Ssfdc_D_ReadSect_PIO(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf,BYTE *redundant)
-//{
-// _Set_D_SsfdcRdCmd(READ);
-// _Set_D_SsfdcRdAddr(EVEN);
-//
-// if (_Check_D_SsfdcBusy(BUSY_READ))
-// { _Reset_D_SsfdcErr(); return(ERROR); }
-//
-// _Start_D_SsfdcRdHwECC();
-// _Read_D_SsfdcBuf(buf);
-// _Stop_D_SsfdcRdHwECC();
-// _ReadRedt_D_SsfdcBuf(redundant);
-// _Load_D_SsfdcRdHwECC(EVEN);
-//
-// if (_Check_D_SsfdcBusy(BUSY_READ))
-// { _Reset_D_SsfdcErr(); return(ERROR); }
-//
-// _Calc_D_ECCdata(buf);
-// _Set_D_SsfdcRdStandby();
-// return(SMSUCCESS);
-//}
-
-// 6250 CMD 3
-//----- Ssfdc_D_WriteSect() --------------------------------------------
-int Ssfdc_D_WriteSect(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf,BYTE *redundant)
-{
- PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- NTSTATUS ntStatus;
- WORD addr;
-
- //ENE_Print("SMILSUB --- Ssfdc_D_WriteSect\n");
- ENE_LoadBinCode(fdoExt, SM_RW_PATTERN);
-
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-
- // Write sect data
- RtlZeroMemory(pBulkCbw, sizeof(struct _BULK_CBW));
- pBulkCbw->dCBWSignature = CBW_SIGNTURE;
- pBulkCbw->bCBWLun = CBW_LUN;
- pBulkCbw->dCBWDataTransferLength = 0x200;
- pBulkCbw->bmCBWFlags = 0x00;
- pBulkCbw->CBWCb[0] = 0xF0;
- pBulkCbw->CBWCb[1] = 0x04;
- //pBulkCbw->CBWCb[4] = (BYTE)addr;
- //pBulkCbw->CBWCb[3] = (BYTE)(addr/0x0100);
- //pBulkCbw->CBWCb[2] = Media.Zone/2;
- //pBulkCbw->CBWCb[5] = *(redundant+REDT_ADDR1H);
- //pBulkCbw->CBWCb[6] = *(redundant+REDT_ADDR1L);
- pBulkCbw->CBWCb[7] = (BYTE)addr;
- pBulkCbw->CBWCb[6] = (BYTE)(addr/0x0100);
- pBulkCbw->CBWCb[5] = Media.Zone/2;
- pBulkCbw->CBWCb[8] = *(redundant+REDT_ADDR1H);
- pBulkCbw->CBWCb[9] = *(redundant+REDT_ADDR1L);
-
- ntStatus = ENE_SendScsiCmd(fdoExt, FDIR_WRITE, buf);
-
- if (!NT_SUCCESS(ntStatus))
- return(ERROR);
-
-// // For Test
-// {
-// BYTE bf[0x200], rdd[0x10];
-// ULONG i;
-//
-// RtlZeroMemory(bf, 0x200);
-// RtlZeroMemory(rdd, 0x10);
-// ntStatus = SM_ReadBlock(fdoExt, bf, rdd);
-// for (i=0; i<0x200; i++)
-// {
-// if (buf[i] != bf[i])
-// ENE_Print("buf[%x] = %x, bf[%x] = %x\n", buf, bf);
-// }
-// if (!NT_SUCCESS(ntStatus))
-// ENE_Print("Error\n");
-// }
-
- return(SMSUCCESS);
-}
-*/
-//----- Ssfdc_D_CopyBlock() --------------------------------------------
-int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
+
+
+/* ----- Ssfdc_D_CopyBlock() -------------------------------------------- */
+int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- //PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- //NTSTATUS ntStatus;
WORD ReadAddr, WriteAddr;
- //printk("Ssfdc_D_WriteSect --- ZONE = %x, ReadBlock = %x, WriteBlock = %x\n", Media.Zone, ReadBlock, WriteBlock);
-
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- ReadAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks+ReadBlock;
+ ReadAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks + ReadBlock;
ReadAddr = ReadAddr*(WORD)Ssfdc.MaxSectors;
- WriteAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks+WriteBlock;
+ WriteAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks + WriteBlock;
WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
- // Write sect data
+ /* Write sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200*count;
@@ -541,19 +329,17 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x08;
bcb->CDB[7] = (BYTE)WriteAddr;
- bcb->CDB[6] = (BYTE)(WriteAddr/0x0100);
- bcb->CDB[5] = Media.Zone/2;
- bcb->CDB[8] = *(redundant+REDT_ADDR1H);
- bcb->CDB[9] = *(redundant+REDT_ADDR1L);
+ bcb->CDB[6] = (BYTE)(WriteAddr / 0x0100);
+ bcb->CDB[5] = Media.Zone / 2;
+ bcb->CDB[8] = *(redundant + REDT_ADDR1H);
+ bcb->CDB[9] = *(redundant + REDT_ADDR1L);
bcb->CDB[10] = Media.Sector;
- if (ReadBlock != NO_ASSIGN)
- {
+ if (ReadBlock != NO_ASSIGN) {
bcb->CDB[4] = (BYTE)ReadAddr;
- bcb->CDB[3] = (BYTE)(ReadAddr/0x0100);
- bcb->CDB[2] = Media.Zone/2;
- }
- else
+ bcb->CDB[3] = (BYTE)(ReadAddr / 0x0100);
+ bcb->CDB[2] = Media.Zone / 2;
+ } else
bcb->CDB[11] = 1;
result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
@@ -562,196 +348,25 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
return USB_STOR_TRANSPORT_GOOD;
}
-/*
-//----- Ssfdc_D_WriteBlock() --------------------------------------------
-int Ssfdc_D_WriteBlock(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf,BYTE *redundant)
-{
- PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- NTSTATUS ntStatus;
- WORD addr;
-
- //ENE_Print("SMILSUB --- Ssfdc_D_WriteSect\n");
- ENE_LoadBinCode(fdoExt, SM_RW_PATTERN);
-
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-
- // Write sect data
- RtlZeroMemory(pBulkCbw, sizeof(struct _BULK_CBW));
- pBulkCbw->dCBWSignature = CBW_SIGNTURE;
- pBulkCbw->bCBWLun = CBW_LUN;
- pBulkCbw->dCBWDataTransferLength = 0x200*count;
- pBulkCbw->bmCBWFlags = 0x00;
- pBulkCbw->CBWCb[0] = 0xF0;
- pBulkCbw->CBWCb[1] = 0x04;
- pBulkCbw->CBWCb[7] = (BYTE)addr;
- pBulkCbw->CBWCb[6] = (BYTE)(addr/0x0100);
- pBulkCbw->CBWCb[5] = Media.Zone/2;
- pBulkCbw->CBWCb[8] = *(redundant+REDT_ADDR1H);
- pBulkCbw->CBWCb[9] = *(redundant+REDT_ADDR1L);
-
- ntStatus = ENE_SendScsiCmd(fdoExt, FDIR_WRITE, buf);
-
- if (!NT_SUCCESS(ntStatus))
- return(ERROR);
-
-// // For Test
-// {
-// BYTE bf[0x200], rdd[0x10];
-// ULONG i;
-//
-// RtlZeroMemory(bf, 0x200);
-// RtlZeroMemory(rdd, 0x10);
-// ntStatus = SM_ReadBlock(fdoExt, bf, rdd);
-// for (i=0; i<0x200; i++)
-// {
-// if (buf[i] != bf[i])
-// ENE_Print("buf[%x] = %x, bf[%x] = %x\n", buf, bf);
-// }
-// if (!NT_SUCCESS(ntStatus))
-// ENE_Print("Error\n");
-// }
-
- return(SMSUCCESS);
-}
-//
-////----- Ssfdc_D_WriteSect_DMA() --------------------------------------------
-//int Ssfdc_D_WriteSect_DMA(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf,BYTE *redundant)
-//{
-// WORD SectByteCount, addr;
-// DWORD Buffer[4];
-// WORD len;
-//
-// if (!_Hw_D_ChkCardIn())
-// return(ERROR);
-// addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
-// addr=addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-// // cycle starting address
-// SM_STARTADDR_LSB = 0x00;
-// SM_STARTADDR_IISB = (BYTE)addr;
-// SM_STARTADDR_IIISB = (BYTE)(addr/0x0100);
-// SM_STARTADDR_MSB = Media.Zone/2;
-//
-// //Sector byte count (DMA)
-// SectByteCount = 0x20f;
-// SM_BYTECNT_LO = (BYTE)SectByteCount;
-// SM_CMD_CTRL3 = (SM_CMD_CTRL3 & 0xFC) | 0x20 | (BYTE)(SectByteCount/0x0100);
-// if ( ((fdoExt->ChipID==READER_CB712)&&(fdoExt->RevID==CHIP_A)) || fdoExt->IsHibernate )
-// SM_FIFO_CTRL = (SM_APB08_MASK | SM_DMAEN_MASK | SM_DMA_DOWNSTREAM_MASK | SM_FIFOSHLDVLU_8_MASK);
-// else
-// SM_FIFO_CTRL = (SM_APB32_MASK | SM_DMAEN_MASK | SM_DMA_DOWNSTREAM_MASK | SM_FIFOSHLDVLU_8_MASK);
-//
-// _Hw_D_EccRdReset();
-// _Hw_D_EccRdStart();
-//
-// SM_CMD_CTRL1 = SM_CMD_PAGPRGM_TRUE;
-// SM_CMD_CTRL1 = (SM_CMD_PAGPRGM_TRUE | SM_CMD_START_BIT);
-//
-// SectByteCount = 0x1ff;
-// //SM_WriteDataWithDMA(fdoExt, buf, SectByteCount);
-// //_WriteRedt_D_SsfdcBuf(redundant);
-// len = 0x1000 - ((WORD)(buf) & 0x0FFF);
-// if (len < 0x200)
-// {
-// SM_WriteDataWithDMA(fdoExt, buf, len-1);
-// SM_WriteDataWithDMA(fdoExt, buf+len, SectByteCount-len);
-// //ENE_Print("Read DMA !!! buf1 = %p, len = %x, buf2 = %p\n", buf, len, buf+len);
-// }
-// else
-// SM_WriteDataWithDMA(fdoExt, buf, SectByteCount);
-//
-// //T1 = (ULONGLONG)buf & 0xFFFFFFFFFFFFF000;
-// //T2 = ((ULONGLONG)buf + 0x1FF) & 0xFFFFFFFFFFFFF000;
-// //if (T1 != T2)
-// // ENE_Print("Ssfdc_D_WriteSect_DMA !!! buf = %p, T1 = %p, T2 = %p\n", buf, T1, T2);
-// //if (T2-T1)
-// //{
-// // l1 = (WORD)(T2 - (ULONGLONG)buf);
-// // SM_WriteDataWithDMA(fdoExt, buf, l1-1);
-// // SM_WriteDataWithDMA(fdoExt, (PBYTE)T2, SectByteCount-l1);
-// //}
-// //else
-// // SM_WriteDataWithDMA(fdoExt, buf, SectByteCount);
-//
-// if ( ((fdoExt->ChipID==READER_CB712)&&(fdoExt->RevID==CHIP_A)) || fdoExt->IsHibernate )
-// {
-// _WriteRedt_D_SsfdcBuf(redundant);
-// }
-// else
-// {
-// memcpy(Buffer, redundant, 0x10);
-// WRITE_PORT_DWORD(SM_REG_DATA, Buffer[0]);
-// WRITE_PORT_DWORD(SM_REG_DATA, Buffer[1]);
-// WRITE_PORT_DWORD(SM_REG_DATA, Buffer[2]);
-// WRITE_PORT_DWORD(SM_REG_DATA, Buffer[3]);
-// }
-//
-// while ( _Hw_D_ChkCardIn() )
-// {
-// if ((READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x10))
-// {
-// WRITE_PORT_BYTE(SM_REG_INT_STATUS, 0x10);
-// break;
-// }
-// }
-// _Hw_D_EccRdStop();
-// _Hw_D_SetRdStandby();
-//
-// _Set_D_SsfdcWrStandby();
-// _Set_D_SsfdcRdStandby();
-// if (!_Hw_D_ChkCardIn())
-// return(ERROR);
-//
-// return(SMSUCCESS);
-//}
-//
-////----- Ssfdc_D_WriteSect_PIO() --------------------------------------------
-//int Ssfdc_D_WriteSect_PIO(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf,BYTE *redundant)
-//{
-// _Calc_D_ECCdata(buf);
-// _Set_D_SsfdcWrCmd(WRDATA);
-// _Set_D_SsfdcWrAddr(EVEN);
-// _Start_D_SsfdcWrHwECC();
-//
-// _Write_D_SsfdcBuf(buf);
-//
-// _Load_D_SsfdcWrHwECC(EVEN);
-// _Set_D_ECCdata(EVEN,redundant);
-//
-// _WriteRedt_D_SsfdcBuf(redundant);
-//
-// _Set_D_SsfdcWrCmd(WRITE);
-//
-// if (_Check_D_SsfdcBusy(BUSY_PROG))
-// { _Reset_D_SsfdcErr(); return(ERROR); }
-//
-// _Set_D_SsfdcWrStandby();
-// _Set_D_SsfdcRdStandby();
-// return(SMSUCCESS);
-//}
-*/
-//----- Ssfdc_D_WriteSectForCopy() -------------------------------------
+
+/* ----- Ssfdc_D_WriteSectForCopy() ------------------------------------- */
int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- //PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- //NTSTATUS ntStatus;
- WORD addr;
+ WORD addr;
- //printk("SMILSUB --- Ssfdc_D_WriteSectForCopy\n");
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
+ addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
- // Write sect data
+ /* Write sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
@@ -759,10 +374,10 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x04;
bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr/0x0100);
- bcb->CDB[5] = Media.Zone/2;
- bcb->CDB[8] = *(redundant+REDT_ADDR1H);
- bcb->CDB[9] = *(redundant+REDT_ADDR1L);
+ bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[5] = Media.Zone / 2;
+ bcb->CDB[8] = *(redundant + REDT_ADDR1H);
+ bcb->CDB[9] = *(redundant + REDT_ADDR1L);
result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
if (result != USB_STOR_XFER_GOOD)
@@ -771,8 +386,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
return USB_STOR_TRANSPORT_GOOD;
}
-// 6250 CMD 5
-//----- Ssfdc_D_EraseBlock() -------------------------------------------
+/* 6250 CMD 5 */
+/* ----- Ssfdc_D_EraseBlock() ------------------------------------------- */
int Ssfdc_D_EraseBlock(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
@@ -780,14 +395,13 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
WORD addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr=addr*(WORD)Ssfdc.MaxSectors;
+ addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(WORD)Ssfdc.MaxSectors;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -796,8 +410,8 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x06;
bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr/0x0100);
- bcb->CDB[5] = Media.Zone/2;
+ bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[5] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD)
@@ -806,24 +420,23 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
return USB_STOR_TRANSPORT_GOOD;
}
-// 6250 CMD 2
-//----- Ssfdc_D_ReadRedtData() -----------------------------------------
+/* 6250 CMD 2 */
+/*----- Ssfdc_D_ReadRedtData() ----------------------------------------- */
int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
WORD addr;
- BYTE *buf;
+ BYTE *buf;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
+ addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -832,13 +445,12 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr/0x0100);
- bcb->CDB[2] = Media.Zone/2;
+ bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
buf = kmalloc(0x10, GFP_KERNEL);
- //result = ENE_SendScsiCmd(us, FDIR_READ, redundant, 0);
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
memcpy(redundant, buf, 0x10);
kfree(buf);
@@ -848,25 +460,22 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
return USB_STOR_TRANSPORT_GOOD;
}
-// 6250 CMD 4
-//----- Ssfdc_D_WriteRedtData() ----------------------------------------
+/* 6250 CMD 4 */
+/* ----- Ssfdc_D_WriteRedtData() ---------------------------------------- */
int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- //PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- //NTSTATUS ntStatus;
WORD addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
printk("Load SM RW Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
+ addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -875,10 +484,10 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x05;
bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr/0x0100);
- bcb->CDB[5] = Media.Zone/2;
- bcb->CDB[8] = *(redundant+REDT_ADDR1H);
- bcb->CDB[9] = *(redundant+REDT_ADDR1L);
+ bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[5] = Media.Zone / 2;
+ bcb->CDB[8] = *(redundant + REDT_ADDR1H);
+ bcb->CDB[9] = *(redundant + REDT_ADDR1L);
result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD)
@@ -887,609 +496,167 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- Ssfdc_D_CheckStatus() ------------------------------------------
+/* ----- Ssfdc_D_CheckStatus() ------------------------------------------ */
int Ssfdc_D_CheckStatus(void)
{
- // Driver ¤£°µ
- return(SMSUCCESS);
- //_Set_D_SsfdcRdCmd(RDSTATUS);
- //
- //if (_Check_D_SsfdcStatus())
- //{ _Set_D_SsfdcRdStandby(); return(ERROR); }
- //
- //_Set_D_SsfdcRdStandby();
- //return(SMSUCCESS);
+ return SMSUCCESS;
}
-/*
-////NAND Memory (SmartMedia) Control Subroutine for Read Data
-////----- _Set_D_SsfdcRdCmd() --------------------------------------------
-//void _Set_D_SsfdcRdCmd(BYTE cmd)
-//{
-// _Hw_D_SetRdCmd();
-// _Hw_D_OutData(cmd);
-// _Hw_D_SetRdData();
-//}
-//
-////----- _Set_D_SsfdcRdAddr() -------------------------------------------
-//void _Set_D_SsfdcRdAddr(BYTE add)
-//{
-// WORD addr;
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
-// addr=addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-//
-// //if ((Ssfdc.Attribute &MPS)==PS256) // for 256byte/page
-// // addr=addr*2+(WORD)add;
-//
-// _Hw_D_SetRdAddr();
-// _Hw_D_OutData(0x00);
-// _Hw_D_OutData((BYTE)addr);
-// _Hw_D_OutData((BYTE)(addr/0x0100));
-//
-// if ((Ssfdc.Attribute &MADC)==AD4CYC)
-// _Hw_D_OutData((BYTE)(Media.Zone/2)); // Patch
-//
-// _Hw_D_SetRdData();
-//}
-//
-////----- _Set_D_SsfdcRdChip() -------------------------------------------
-//void _Set_D_SsfdcRdChip(void)
-//{
-// _Hw_D_SetRdAddr();
-// _Hw_D_OutData(0x00);
-// _Hw_D_SetRdData();
-//}
-//
-////----- _Set_D_SsfdcRdStandby() ----------------------------------------
-//void _Set_D_SsfdcRdStandby(void)
-//{
-// _Hw_D_SetRdStandby();
-//}
-//
-////----- _Start_D_SsfdcRdHwECC() ----------------------------------------
-//void _Start_D_SsfdcRdHwECC(void)
-//{
-//#ifdef HW_ECC_SUPPORTED
-// _Hw_D_EccRdReset();
-// _Hw_D_InData();
-// _Hw_D_EccRdStart();
-//#endif
-//}
-//
-////----- _Stop_D_SsfdcRdHwECC() -----------------------------------------
-//void _Stop_D_SsfdcRdHwECC(void)
-//{
-//#ifdef HW_ECC_SUPPORTED
-// _Hw_D_EccRdStop();
-//#endif
-//}
-//
-////----- _Load_D_SsfdcRdHwECC() -----------------------------------------
-//void _Load_D_SsfdcRdHwECC(BYTE add)
-//{
-//#ifdef HW_ECC_SUPPORTED
-// _Hw_D_EccRdRead();
-// //if (!(add==ODD && (Ssfdc.Attribute &MPS)==PS256))
-// {
-// EccBuf[0]=_Hw_D_InData();
-// EccBuf[1]=_Hw_D_InData();
-// EccBuf[2]=_Hw_D_InData();
-// }
-//
-// //if (!(add==EVEN && (Ssfdc.Attribute &MPS)==PS256))
-// {
-// EccBuf[3]=_Hw_D_InData();
-// EccBuf[4]=_Hw_D_InData();
-// EccBuf[5]=_Hw_D_InData();
-// }
-//
-// _Hw_D_EccRdStop();
-//#endif
-//}
-//
-////NAND Memory (SmartMedia) Control Subroutine for Write Data
-//
-////----- _Set_D_SsfdcWrCmd() -----------------------------------------
-//void _Set_D_SsfdcWrCmd(BYTE cmd)
-//{
-// _Hw_D_SetWrCmd();
-// _Hw_D_OutData(cmd);
-// _Hw_D_SetWrData();
-//}
-//
-////----- _Set_D_SsfdcWrAddr() -----------------------------------------
-//void _Set_D_SsfdcWrAddr(BYTE add)
-//{
-// WORD addr;
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
-// addr=addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-//
-// //if ((Ssfdc.Attribute &MPS)==PS256) // for 256byte/page
-// // addr=addr*2+(WORD)add;
-//
-// _Hw_D_SetWrAddr();
-// _Hw_D_OutData(0x00);
-// _Hw_D_OutData((BYTE)addr);
-// _Hw_D_OutData((BYTE)(addr/0x0100));
-//
-// if ((Ssfdc.Attribute &MADC)==AD4CYC)
-// _Hw_D_OutData((BYTE)(Media.Zone/2)); // Patch
-//
-// _Hw_D_SetWrData();
-//}
-//
-////----- _Set_D_SsfdcWrBlock() -----------------------------------------
-//void _Set_D_SsfdcWrBlock(void)
-//{
-// WORD addr;
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
-// addr=addr*(WORD)Ssfdc.MaxSectors;
-//
-// //if ((Ssfdc.Attribute &MPS)==PS256) // for 256byte/page
-// // addr=addr*2;
-//
-// _Hw_D_SetWrAddr();
-// _Hw_D_OutData((BYTE)addr);
-// _Hw_D_OutData((BYTE)(addr/0x0100));
-//
-// if ((Ssfdc.Attribute &MADC)==AD4CYC)
-// _Hw_D_OutData((BYTE)(Media.Zone/2)); // Patch
-//
-// _Hw_D_SetWrData();
-//}
-//
-////----- _Set_D_SsfdcWrStandby() -----------------------------------------
-//void _Set_D_SsfdcWrStandby(void)
-//{
-// _Hw_D_SetWrStandby();
-//}
-//
-////----- _Start_D_SsfdcWrHwECC() -----------------------------------------
-//void _Start_D_SsfdcWrHwECC(void)
-//{
-//#ifdef HW_ECC_SUPPORTED
-// _Hw_D_EccWrReset();
-// _Hw_D_InData();
-// _Hw_D_EccWrStart();
-//#endif
-//}
-//
-////----- _Load_D_SsfdcWrHwECC() -----------------------------------------
-//void _Load_D_SsfdcWrHwECC(BYTE add)
-//{
-//#ifdef HW_ECC_SUPPORTED
-// _Hw_D_EccWrRead();
-// //if (!(add==ODD && (Ssfdc.Attribute &MPS)==PS256))
-// {
-// EccBuf[0]=_Hw_D_InData();
-// EccBuf[1]=_Hw_D_InData();
-// EccBuf[2]=_Hw_D_InData();
-// }
-//
-// //if (!(add==EVEN && (Ssfdc.Attribute &MPS)==PS256))
-// {
-// EccBuf[3]=_Hw_D_InData();
-// EccBuf[4]=_Hw_D_InData();
-// EccBuf[5]=_Hw_D_InData();
-// }
-//
-// _Hw_D_EccWrStop();
-//#endif
-//}
-//
-////NAND Memory (SmartMedia) Control Subroutine
-////----- _Check_D_SsfdcBusy() -------------------------------------------
-//int _Check_D_SsfdcBusy(WORD time)
-//{
-// WORD count = 0;
-//
-// do {
-// if (!_Hw_D_ChkBusy())
-// return(SMSUCCESS);
-// EDelay(100);
-// count++;
-// } while (count<=time);
-//
-// return(ERROR);
-//}
-//
-////----- _Check_D_SsfdcStatus() -----------------------------------------
-//int _Check_D_SsfdcStatus(void)
-//{
-// if (_Hw_D_InData() & WR_FAIL)
-// return(ERROR);
-//
-// return(SMSUCCESS);
-//}
-//
-//// For 712
-////----- _Reset_D_SsfdcErr() -----------------------------------------
-//void _Reset_D_SsfdcErr(void)
-//{
-// WORD count = 0;
-//
-// _Hw_D_SetRdCmd();
-// _Hw_D_OutData(RST_CHIP);
-// _Hw_D_SetRdData();
-//
-// do {
-// if (!_Hw_D_ChkBusy())
-// break;
-// EDelay(100);
-// count++;
-// } while (count<=BUSY_RESET);
-//
-// _Hw_D_SetRdStandby();
-//}
-//
-////NAND Memory (SmartMedia) Buffer Data Xfer Subroutine
-////----- SM_ReadDataWithDMA() -----------------------------------------
-//void SM_ReadDataWithDMA(PFDO_DEVICE_EXTENSION fdoExt, BYTE *databuf, WORD SectByteCount)
-//{
-// PHYSICAL_ADDRESS Addr;
-// LARGE_INTEGER ptimeout ;
-//
-// KeClearEvent(&fdoExt->SM_DMADoneEvent);
-//
-// Addr = MmGetPhysicalAddress(databuf);
-//
-// WRITE_PORT_DWORD(SM_DMA_ADDR_REG, (DWORD)Addr.LowPart);
-// WRITE_PORT_BYTE(SM_DMA_DATA_CTRL, 0);
-// WRITE_PORT_WORD(SM_DMA_BYTE_COUNT_REG, SectByteCount);
-//
-// while ( _Hw_D_ChkCardIn() )
-// {
-// if ((READ_PORT_BYTE(SM_REG_FIFO_STATUS) & 0x80))
-// break;
-// }
-// if (!_Hw_D_ChkCardIn()) return;
-// WRITE_PORT_BYTE(SM_DMA_DATA_CTRL, 0x01);
-//
-// ptimeout.QuadPart = 2000 * (-10000); // 2 sec
-// KeWaitForSingleObject(&fdoExt->SM_DMADoneEvent, Executive, KernelMode, FALSE, &ptimeout);
-// _Hw_D_SetDMAIntMask();
-//}
-//
-////----- SM_WriteDataWithDMA() -----------------------------------------
-//void SM_WriteDataWithDMA(PFDO_DEVICE_EXTENSION fdoExt, BYTE *databuf, WORD SectByteCount)
-//{
-// PHYSICAL_ADDRESS Addr;
-// LARGE_INTEGER ptimeout ;
-//
-// KeClearEvent(&fdoExt->SM_DMADoneEvent);
-//
-// Addr = MmGetPhysicalAddress(databuf);
-//
-// WRITE_PORT_DWORD(SM_DMA_ADDR_REG, (DWORD)Addr.LowPart);
-// WRITE_PORT_BYTE(SM_DMA_DATA_CTRL, 2);
-// WRITE_PORT_WORD(SM_DMA_BYTE_COUNT_REG, SectByteCount);
-//
-// while ( _Hw_D_ChkCardIn() )
-// {
-// if ((READ_PORT_BYTE(SM_REG_FIFO_STATUS) & 0x40))
-// break;
-// }
-// if (!_Hw_D_ChkCardIn()) return;
-// WRITE_PORT_BYTE(SM_DMA_DATA_CTRL, 0x03);
-//
-// ptimeout.QuadPart = 2000 * (-10000); // 2 sec
-// KeWaitForSingleObject(&fdoExt->SM_DMADoneEvent, Executive, KernelMode, FALSE, &ptimeout);
-// _Hw_D_SetDMAIntMask();
-//}
-//
-////----- _Read_D_SsfdcBuf() -----------------------------------------
-//void _Read_D_SsfdcBuf(BYTE *databuf)
-//{
-// int i;
-//
-// //for(i=0x0000;i<(((Ssfdc.Attribute &MPS)==PS256)?0x0100:0x0200);i++)
-// for(i=0; i<0x200; i++)
-// *databuf++ =_Hw_D_InData();
-//}
-//
-////----- _Write_D_SsfdcBuf() -----------------------------------------
-//void _Write_D_SsfdcBuf(BYTE *databuf)
-//{
-// int i;
-//
-// //for(i=0x0000;i<(((Ssfdc.Attribute &MPS)==PS256)?0x0100:0x0200);i++)
-// for(i=0; i<0x200; i++)
-// _Hw_D_OutData(*databuf++);
-//}
-//
-////----- _Read_D_SsfdcByte() -----------------------------------------
-//void _Read_D_SsfdcByte(BYTE *databuf)
-//{
-// *databuf=(BYTE)_Hw_D_InData();
-//}
-//
-////----- _ReadRedt_D_SsfdcBuf() -----------------------------------------
-//void _ReadRedt_D_SsfdcBuf(BYTE *redundant)
-//{
-// char i;
-//
-// //for(i=0x00;i<(((Ssfdc.Attribute &MPS)==PS256)?0x08:0x10);i++)
-// for(i=0; i<0x10; i++)
-// redundant[i] =_Hw_D_InData();
-//}
-//
-////----- _WriteRedt_D_SsfdcBuf() -----------------------------------------
-//void _WriteRedt_D_SsfdcBuf(BYTE *redundant)
-//{
-// char i;
-//
-// //for(i=0x00;i<(((Ssfdc.Attribute &MPS)==PS256)?0x08:0x10);i++)
-// for(i=0; i<0x10; i++)
-// _Hw_D_OutData(*redundant++);
-//}
-*/
-//SmartMedia ID Code Check & Mode Set Subroutine
-//----- Set_D_SsfdcModel() ---------------------------------------------
+
+
+
+/* SmartMedia ID Code Check & Mode Set Subroutine
+ * ----- Set_D_SsfdcModel() ---------------------------------------------
+ */
int Set_D_SsfdcModel(BYTE dcode)
{
- switch (_Check_D_DevCode(dcode)) {
- case SSFDC1MB:
- Ssfdc.Model = SSFDC1MB;
- Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS256;
- Ssfdc.MaxZones = 1;
- Ssfdc.MaxBlocks = 256;
- Ssfdc.MaxLogBlocks = 250;
- Ssfdc.MaxSectors = 8;
- break;
- case SSFDC2MB:
- Ssfdc.Model = SSFDC2MB;
- Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS256;
- Ssfdc.MaxZones = 1;
- Ssfdc.MaxBlocks = 512;
- Ssfdc.MaxLogBlocks = 500;
- Ssfdc.MaxSectors = 8;
- break;
- case SSFDC4MB:
- Ssfdc.Model = SSFDC4MB;
- Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS512;
- Ssfdc.MaxZones = 1;
- Ssfdc.MaxBlocks = 512;
- Ssfdc.MaxLogBlocks = 500;
- Ssfdc.MaxSectors = 16;
- break;
- case SSFDC8MB:
- Ssfdc.Model = SSFDC8MB;
- Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS512;
- Ssfdc.MaxZones = 1;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 16;
- break;
- case SSFDC16MB:
- Ssfdc.Model = SSFDC16MB;
- Ssfdc.Attribute = FLASH | AD3CYC | BS32 | PS512;
- Ssfdc.MaxZones = 1;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC32MB:
- Ssfdc.Model = SSFDC32MB;
- Ssfdc.Attribute = FLASH | AD3CYC | BS32 | PS512;
- Ssfdc.MaxZones = 2;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC64MB:
- Ssfdc.Model = SSFDC64MB;
- Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
- Ssfdc.MaxZones = 4;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC128MB:
- Ssfdc.Model = SSFDC128MB;
- Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
- Ssfdc.MaxZones = 8;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC256MB:
- Ssfdc.Model = SSFDC256MB;
- Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
- Ssfdc.MaxZones = 16;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC512MB:
- Ssfdc.Model = SSFDC512MB;
- Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
- Ssfdc.MaxZones = 32;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC1GB:
- Ssfdc.Model = SSFDC1GB;
- Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
- Ssfdc.MaxZones = 64;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- case SSFDC2GB:
- Ssfdc.Model = SSFDC2GB;
- Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
- Ssfdc.MaxZones = 128;
- Ssfdc.MaxBlocks = 1024;
- Ssfdc.MaxLogBlocks = 1000;
- Ssfdc.MaxSectors = 32;
- break;
- default:
- Ssfdc.Model = NOSSFDC;
- return(ERROR);
- }
-
- return(SMSUCCESS);
+ switch (_Check_D_DevCode(dcode)) {
+ case SSFDC1MB:
+ Ssfdc.Model = SSFDC1MB;
+ Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS256;
+ Ssfdc.MaxZones = 1;
+ Ssfdc.MaxBlocks = 256;
+ Ssfdc.MaxLogBlocks = 250;
+ Ssfdc.MaxSectors = 8;
+ break;
+ case SSFDC2MB:
+ Ssfdc.Model = SSFDC2MB;
+ Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS256;
+ Ssfdc.MaxZones = 1;
+ Ssfdc.MaxBlocks = 512;
+ Ssfdc.MaxLogBlocks = 500;
+ Ssfdc.MaxSectors = 8;
+ break;
+ case SSFDC4MB:
+ Ssfdc.Model = SSFDC4MB;
+ Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS512;
+ Ssfdc.MaxZones = 1;
+ Ssfdc.MaxBlocks = 512;
+ Ssfdc.MaxLogBlocks = 500;
+ Ssfdc.MaxSectors = 16;
+ break;
+ case SSFDC8MB:
+ Ssfdc.Model = SSFDC8MB;
+ Ssfdc.Attribute = FLASH | AD3CYC | BS16 | PS512;
+ Ssfdc.MaxZones = 1;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 16;
+ break;
+ case SSFDC16MB:
+ Ssfdc.Model = SSFDC16MB;
+ Ssfdc.Attribute = FLASH | AD3CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 1;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC32MB:
+ Ssfdc.Model = SSFDC32MB;
+ Ssfdc.Attribute = FLASH | AD3CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 2;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC64MB:
+ Ssfdc.Model = SSFDC64MB;
+ Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 4;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC128MB:
+ Ssfdc.Model = SSFDC128MB;
+ Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 8;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC256MB:
+ Ssfdc.Model = SSFDC256MB;
+ Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 16;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC512MB:
+ Ssfdc.Model = SSFDC512MB;
+ Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 32;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC1GB:
+ Ssfdc.Model = SSFDC1GB;
+ Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 64;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ case SSFDC2GB:
+ Ssfdc.Model = SSFDC2GB;
+ Ssfdc.Attribute = FLASH | AD4CYC | BS32 | PS512;
+ Ssfdc.MaxZones = 128;
+ Ssfdc.MaxBlocks = 1024;
+ Ssfdc.MaxLogBlocks = 1000;
+ Ssfdc.MaxSectors = 32;
+ break;
+ default:
+ Ssfdc.Model = NOSSFDC;
+ return ERROR;
+ }
+
+ return SMSUCCESS;
}
-//----- _Check_D_DevCode() ---------------------------------------------
+/* ----- _Check_D_DevCode() --------------------------------------------- */
BYTE _Check_D_DevCode(BYTE dcode)
{
- switch(dcode){
- case 0x6E:
- case 0xE8:
- case 0xEC: return(SSFDC1MB); // 8Mbit (1M) NAND
- case 0x64:
- case 0xEA: return(SSFDC2MB); // 16Mbit (2M) NAND
- case 0x6B:
- case 0xE3:
- case 0xE5: return(SSFDC4MB); // 32Mbit (4M) NAND
- case 0xE6: return(SSFDC8MB); // 64Mbit (8M) NAND
- case 0x73: return(SSFDC16MB); // 128Mbit (16M)NAND
- case 0x75: return(SSFDC32MB); // 256Mbit (32M)NAND
- case 0x76: return(SSFDC64MB); // 512Mbit (64M)NAND
- case 0x79: return(SSFDC128MB); // 1Gbit(128M)NAND
- case 0x71: return(SSFDC256MB);
- case 0xDC: return(SSFDC512MB);
- case 0xD3: return(SSFDC1GB);
- case 0xD5: return(SSFDC2GB);
- default: return(NOSSFDC);
- }
+ switch (dcode) {
+ case 0x6E:
+ case 0xE8:
+ case 0xEC: return SSFDC1MB; /* 8Mbit (1M) NAND */
+ case 0x64:
+ case 0xEA: return SSFDC2MB; /* 16Mbit (2M) NAND */
+ case 0x6B:
+ case 0xE3:
+ case 0xE5: return SSFDC4MB; /* 32Mbit (4M) NAND */
+ case 0xE6: return SSFDC8MB; /* 64Mbit (8M) NAND */
+ case 0x73: return SSFDC16MB; /* 128Mbit (16M)NAND */
+ case 0x75: return SSFDC32MB; /* 256Mbit (32M)NAND */
+ case 0x76: return SSFDC64MB; /* 512Mbit (64M)NAND */
+ case 0x79: return SSFDC128MB; /* 1Gbit(128M)NAND */
+ case 0x71: return SSFDC256MB;
+ case 0xDC: return SSFDC512MB;
+ case 0xD3: return SSFDC1GB;
+ case 0xD5: return SSFDC2GB;
+ default: return NOSSFDC;
+ }
}
-/*
-////SmartMedia Power Control Subroutine
-////----- Cnt_D_Reset() ----------------------------------------------
-//void Cnt_D_Reset(void)
-//{
-// _Hw_D_LedOff();
-// _Hw_D_SetRdStandby();
-// _Hw_D_VccOff();
-//}
-//
-////----- Cnt_D_PowerOn() ----------------------------------------------
-//int Cnt_D_PowerOn(void)
-//{
-// // No support 5V.
-// _Hw_D_EnableVcc3VOn(); // Set SM_REG_CTRL_5 Reg. to 3V
-// _Hw_D_VccOn();
-// _Hw_D_SetRdStandby();
-// _Wait_D_Timer(TIME_PON);
-//
-// if (_Hw_D_ChkPower())
-// {
-// _Hw_D_EnableOB(); // Set SM_REG_CTRL_5 Reg. to 0x83
-// return(SMSUCCESS);
-// }
-//
-// _Hw_D_SetVccOff();
-// return(ERROR);
-//}
-//
-////----- Cnt_D_PowerOff() ----------------------------------------------
-//void Cnt_D_PowerOff(void)
-//{
-// _Hw_D_SetRdStandby();
-// _Hw_D_SetVccOff();
-// _Hw_D_VccOff();
-//}
-//
-////----- Cnt_D_LedOn() ----------------------------------------------
-//void Cnt_D_LedOn(void)
-//{
-// _Hw_D_LedOn();
-//}
-//
-////----- Cnt_D_LedOff() ----------------------------------------------
-//void Cnt_D_LedOff(void)
-//{
-// _Hw_D_LedOff();
-//}
-//
-////----- Check_D_CntPower() ----------------------------------------------
-//int Check_D_CntPower(void)
-//{
-// if (_Hw_D_ChkPower())
-// return(SMSUCCESS); // Power On
-//
-// return(ERROR); // Power Off
-//}
-//
-////----- Check_D_CardExist() ----------------------------------------------
-//int Check_D_CardExist(void)
-//{
-// char i,j,k;
-//
-// if (!_Hw_D_ChkStatus()) // Not Status Change
-// if (_Hw_D_ChkCardIn())
-// return(SMSUCCESS); // Card exist in Slot
-//
-// for(i=0,j=0,k=0; i<16; i++) {
-// if (_Hw_D_ChkCardIn()) // Status Change
-// {
-// j++; k=0;
-// }
-// else
-// {
-// j=0; k++;
-// }
-//
-// if (j>3)
-// return(SMSUCCESS); // Card exist in Slot
-// if (k>3)
-// return(ERROR); // NO Card exist in Slot
-//
-// _Wait_D_Timer(TIME_CDCHK);
-// }
-//
-// return(ERROR);
-//}
-//
-////----- Check_D_CardStsChg() ----------------------------------------------
-//int Check_D_CardStsChg(void)
-//{
-// if (_Hw_D_ChkStatus())
-// return(ERROR); // Status Change
-//
-// return(SMSUCCESS); // Not Status Change
-//}
-//
-////----- Check_D_SsfdcWP() ----------------------------------------------
-//int Check_D_SsfdcWP(void)
-//{ // ERROR: WP, SMSUCCESS: Not WP
-// char i;
-//
-// for(i=0; i<8; i++) {
-// if (_Hw_D_ChkWP())
-// return(ERROR);
-// _Wait_D_Timer(TIME_WPCHK);
-// }
-//
-// return(SMSUCCESS);
-//}
-//
-*/
-//SmartMedia ECC Control Subroutine
-//----- Check_D_ReadError() ----------------------------------------------
+
+
+
+
+/* SmartMedia ECC Control Subroutine
+ * ----- Check_D_ReadError() ----------------------------------------------
+ */
int Check_D_ReadError(BYTE *redundant)
{
return SMSUCCESS;
}
-//----- Check_D_Correct() ----------------------------------------------
-int Check_D_Correct(BYTE *buf,BYTE *redundant)
+/* ----- Check_D_Correct() ---------------------------------------------- */
+int Check_D_Correct(BYTE *buf, BYTE *redundant)
{
return SMSUCCESS;
}
-//----- Check_D_CISdata() ----------------------------------------------
+/* ----- Check_D_CISdata() ---------------------------------------------- */
int Check_D_CISdata(BYTE *buf, BYTE *redundant)
{
BYTE cis[] = {0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02,
@@ -1516,86 +683,11 @@ int Check_D_CISdata(BYTE *buf, BYTE *redundant)
return ERROR;
}
-//----- Set_D_RightECC() ----------------------------------------------
+/* ----- Set_D_RightECC() ---------------------------------------------- */
void Set_D_RightECC(BYTE *redundant)
{
- // Driver ¤£°µ ECC Check
+ /* Driver ECC Check */
return;
- //StringCopy((char *)(redundant+0x0D),(char *)EccBuf,3);
- //StringCopy((char *)(redundant+0x08),(char *)(EccBuf+0x03),3);
}
-/*
-////----- _Calc_D_ECCdata() ----------------------------------------------
-//void _Calc_D_ECCdata(BYTE *buf)
-//{
-//#ifdef HW_ECC_SUPPORTED
-//#else
-// _Calculate_D_SwECC(buf,EccBuf);
-// buf+=0x0100;
-// _Calculate_D_SwECC(buf,EccBuf+0x03);
-//#endif
-//}
-//
-////----- _Set_D_ECCdata() ----------------------------------------------
-//void _Set_D_ECCdata(BYTE add,BYTE *redundant)
-//{
-// //if (add==EVEN && (Ssfdc.Attribute &MPS)==PS256)
-// // return;
-//
-// // for 256byte/page
-// StringCopy((char *)(redundant+0x0D),(char *)EccBuf,3);
-// StringCopy((char *)(redundant+0x08),(char *)(EccBuf+0x03),3);
-//}
-*/
-
-/*
-//----- SM_ReadBlock() ---------------------------------------------
-int SM_ReadBlock(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf,BYTE *redundant)
-{
- PBULK_CBW pBulkCbw = fdoExt->pBulkCbw;
- NTSTATUS ntStatus;
- WORD addr;
-
- ENE_LoadBinCode(fdoExt, SM_RW_PATTERN);
-
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
-
- // Read sect data
- RtlZeroMemory(pBulkCbw, sizeof(struct _BULK_CBW));
- pBulkCbw->dCBWSignature = CBW_SIGNTURE;
- pBulkCbw->bCBWLun = CBW_LUN;
- pBulkCbw->dCBWDataTransferLength = 0x200;
- pBulkCbw->bmCBWFlags = 0x80;
- pBulkCbw->CBWCb[0] = 0xF1;
- pBulkCbw->CBWCb[1] = 0x02;
- pBulkCbw->CBWCb[4] = (BYTE)addr;
- pBulkCbw->CBWCb[3] = (BYTE)(addr/0x0100);
- pBulkCbw->CBWCb[2] = Media.Zone/2;
-
- ntStatus = ENE_SendScsiCmd(fdoExt, FDIR_READ, buf);
-
- if (!NT_SUCCESS(ntStatus))
- return(ERROR);
-
- // Read redundant
- RtlZeroMemory(pBulkCbw, sizeof(struct _BULK_CBW));
- pBulkCbw->dCBWSignature = CBW_SIGNTURE;
- pBulkCbw->bCBWLun = CBW_LUN;
- pBulkCbw->dCBWDataTransferLength = 0x10;
- pBulkCbw->bmCBWFlags = 0x80;
- pBulkCbw->CBWCb[0] = 0xF1;
- pBulkCbw->CBWCb[1] = 0x03;
- pBulkCbw->CBWCb[4] = (BYTE)addr;
- pBulkCbw->CBWCb[3] = (BYTE)(addr/0x0100);
- pBulkCbw->CBWCb[2] = Media.Zone/2;
- pBulkCbw->CBWCb[5] = 0;
- pBulkCbw->CBWCb[6] = 1;
-
- ntStatus = ENE_SendScsiCmd(fdoExt, FDIR_READ, redundant);
-
- if (!NT_SUCCESS(ntStatus))
- return(ERROR);
-
- return(SMSUCCESS);
-}*/
+
+
diff --git a/drivers/staging/keucr/smscsi.c b/drivers/staging/keucr/smscsi.c
index a6fa77f9c48..58b55557118 100644
--- a/drivers/staging/keucr/smscsi.c
+++ b/drivers/staging/keucr/smscsi.c
@@ -9,36 +9,46 @@
#include "usb.h"
#include "scsiglue.h"
#include "transport.h"
-//#include "smcommon.h"
#include "smil.h"
-int SM_SCSI_Test_Unit_Ready (struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Inquiry (struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Mode_Sense (struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Start_Stop (struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Read_Capacity (struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Read (struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Write (struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Start_Stop(struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb);
+int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb);
extern PBYTE SMHostAddr;
extern DWORD ErrXDCode;
-//----- SM_SCSIIrp() --------------------------------------------------
+/* ----- SM_SCSIIrp() -------------------------------------------------- */
int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
us->SrbStatus = SS_SUCCESS;
- switch (srb->cmnd[0])
- {
- case TEST_UNIT_READY : result = SM_SCSI_Test_Unit_Ready (us, srb); break; //0x00
- case INQUIRY : result = SM_SCSI_Inquiry (us, srb); break; //0x12
- case MODE_SENSE : result = SM_SCSI_Mode_Sense (us, srb); break; //0x1A
- case READ_CAPACITY : result = SM_SCSI_Read_Capacity (us, srb); break; //0x25
- case READ_10 : result = SM_SCSI_Read (us, srb); break; //0x28
- case WRITE_10 : result = SM_SCSI_Write (us, srb); break; //0x2A
-
- default:
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ result = SM_SCSI_Test_Unit_Ready(us, srb);
+ break; /* 0x00 */
+ case INQUIRY:
+ result = SM_SCSI_Inquiry(us, srb);
+ break; /* 0x12 */
+ case MODE_SENSE:
+ result = SM_SCSI_Mode_Sense(us, srb);
+ break; /* 0x1A */
+ case READ_CAPACITY:
+ result = SM_SCSI_Read_Capacity(us, srb);
+ break; /* 0x25 */
+ case READ_10:
+ result = SM_SCSI_Read(us, srb);
+ break; /* 0x28 */
+ case WRITE_10:
+ result = SM_SCSI_Write(us, srb);
+ break; /* 0x2A */
+
+ default:
us->SrbStatus = SS_ILLEGAL_REQUEST;
result = USB_STOR_TRANSPORT_FAILED;
break;
@@ -46,25 +56,22 @@ int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
return result;
}
-//----- SM_SCSI_Test_Unit_Ready() --------------------------------------------------
+/* ----- SM_SCSI_Test_Unit_Ready() -------------------------------------------------- */
int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
{
- //printk("SM_SCSI_Test_Unit_Ready\n");
if (us->SM_Status.Insert && us->SM_Status.Ready)
return USB_STOR_TRANSPORT_GOOD;
- else
- {
+ else {
ENE_SMInit(us);
return USB_STOR_TRANSPORT_GOOD;
}
-
+
return USB_STOR_TRANSPORT_GOOD;
}
-//----- SM_SCSI_Inquiry() --------------------------------------------------
+/* ----- SM_SCSI_Inquiry() -------------------------------------------------- */
int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
- //printk("SM_SCSI_Inquiry\n");
BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
usb_stor_set_xfer_buf(us, data_ptr, 36, srb, TO_XFER_BUF);
@@ -72,11 +79,11 @@ int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
}
-//----- SM_SCSI_Mode_Sense() --------------------------------------------------
+/* ----- SM_SCSI_Mode_Sense() -------------------------------------------------- */
int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE mediaNoWP[12] = {0x0b,0x00,0x00,0x08,0x00,0x00,0x71,0xc0,0x00,0x00,0x02,0x00};
- BYTE mediaWP[12] = {0x0b,0x00,0x80,0x08,0x00,0x00,0x71,0xc0,0x00,0x00,0x02,0x00};
+ BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
+ BYTE mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
if (us->SM_Status.WtP)
usb_stor_set_xfer_buf(us, mediaWP, 12, srb, TO_XFER_BUF);
@@ -87,7 +94,7 @@ int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- SM_SCSI_Read_Capacity() --------------------------------------------------
+/* ----- SM_SCSI_Read_Capacity() -------------------------------------------------- */
int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
@@ -100,45 +107,37 @@ int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
bl_len = 0x200;
bl_num = Ssfdc.MaxLogBlocks * Ssfdc.MaxSectors * Ssfdc.MaxZones - 1;
- //printk("MaxLogBlocks = %x\n", Ssfdc.MaxLogBlocks);
- //printk("MaxSectors = %x\n", Ssfdc.MaxSectors);
- //printk("MaxZones = %x\n", Ssfdc.MaxZones);
- //printk("bl_num = %x\n", bl_num);
us->bl_num = bl_num;
printk("bl_len = %x\n", bl_len);
printk("bl_num = %x\n", bl_num);
- //srb->request_bufflen = 8;
- buf[0] = (bl_num>>24) & 0xff;
- buf[1] = (bl_num>>16) & 0xff;
- buf[2] = (bl_num>> 8) & 0xff;
- buf[3] = (bl_num>> 0) & 0xff;
- buf[4] = (bl_len>>24) & 0xff;
- buf[5] = (bl_len>>16) & 0xff;
- buf[6] = (bl_len>> 8) & 0xff;
- buf[7] = (bl_len>> 0) & 0xff;
-
+ buf[0] = (bl_num >> 24) & 0xff;
+ buf[1] = (bl_num >> 16) & 0xff;
+ buf[2] = (bl_num >> 8) & 0xff;
+ buf[3] = (bl_num >> 0) & 0xff;
+ buf[4] = (bl_len >> 24) & 0xff;
+ buf[5] = (bl_len >> 16) & 0xff;
+ buf[6] = (bl_len >> 8) & 0xff;
+ buf[7] = (bl_len >> 0) & 0xff;
+
usb_stor_access_xfer_buf(us, buf, 8, srb, &sg, &offset, TO_XFER_BUF);
- //usb_stor_set_xfer_buf(us, buf, srb->request_bufflen, srb, TO_XFER_BUF);
return USB_STOR_TRANSPORT_GOOD;
}
-//----- SM_SCSI_Read() --------------------------------------------------
+/* ----- SM_SCSI_Read() -------------------------------------------------- */
int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
{
- //struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result=0;
+ int result = 0;
PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2]<<24) & 0xff000000) | ((Cdb[3]<<16) & 0x00ff0000) |
- ((Cdb[4]<< 8) & 0x0000ff00) | ((Cdb[5]<< 0) & 0x000000ff);
- WORD blen = ((Cdb[7]<< 8) & 0xff00) | ((Cdb[8]<< 0) & 0x00ff);
+ DWORD bn = ((Cdb[2] << 24) & 0xff000000) | ((Cdb[3] << 16) & 0x00ff0000) |
+ ((Cdb[4] << 8) & 0x0000ff00) | ((Cdb[5] << 0) & 0x000000ff);
+ WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
DWORD blenByte = blen * 0x200;
void *buf;
- //printk("SCSIOP_READ --- bn = %X, blen = %X, srb->use_sg = %X\n", bn, blen, srb->use_sg);
-
+
if (bn > us->bl_num)
return USB_STOR_TRANSPORT_ERROR;
@@ -157,19 +156,17 @@ int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- SM_SCSI_Write() --------------------------------------------------
+/* ----- SM_SCSI_Write() -------------------------------------------------- */
int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
{
- //struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result=0;
+ int result = 0;
PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2]<<24) & 0xff000000) | ((Cdb[3]<<16) & 0x00ff0000) |
- ((Cdb[4]<< 8) & 0x0000ff00) | ((Cdb[5]<< 0) & 0x000000ff);
- WORD blen = ((Cdb[7]<< 8) & 0xff00) | ((Cdb[8]<< 0) & 0x00ff);
+ DWORD bn = ((Cdb[2] << 24) & 0xff000000) | ((Cdb[3] << 16) & 0x00ff0000) |
+ ((Cdb[4] << 8) & 0x0000ff00) | ((Cdb[5] << 0) & 0x000000ff);
+ WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
DWORD blenByte = blen * 0x200;
void *buf;
- //printk("SCSIOP_Write --- bn = %X, blen = %X, srb->use_sg = %X\n", bn, blen, srb->use_sg);
if (bn > us->bl_num)
return USB_STOR_TRANSPORT_ERROR;
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 4a9e563f40f..f5308d5929c 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -727,6 +727,9 @@ static int imon_probe(struct usb_interface *interface,
int i;
u16 vendor, product;
+ /* prevent races probing devices w/multiple interfaces */
+ mutex_lock(&driver_lock);
+
context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
if (!context) {
err("%s: kzalloc failed for context", __func__);
@@ -753,9 +756,6 @@ static int imon_probe(struct usb_interface *interface,
dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
__func__, vendor, product, ifnum);
- /* prevent races probing devices w/multiple interfaces */
- mutex_lock(&driver_lock);
-
/*
* Scan the endpoint list and set:
* first input endpoint = IR endpoint
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c
index 7080cdeab5a..a2d18b0aa04 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/lirc/lirc_sasem.c
@@ -814,29 +814,6 @@ static int sasem_probe(struct usb_interface *interface,
printk(KERN_INFO "%s: Registered Sasem driver (minor:%d)\n",
__func__, lirc_minor);
-alloc_status_switch:
-
- switch (alloc_status) {
-
- case 7:
- if (vfd_ep_found)
- usb_free_urb(tx_urb);
- case 6:
- usb_free_urb(rx_urb);
- case 5:
- lirc_buffer_free(rbuf);
- case 4:
- kfree(rbuf);
- case 3:
- kfree(driver);
- case 2:
- kfree(context);
- context = NULL;
- case 1:
- retval = -ENOMEM;
- goto unlock;
- }
-
/* Needed while unregistering! */
driver->minor = lirc_minor;
@@ -867,6 +844,29 @@ alloc_status_switch:
__func__, dev->bus->busnum, dev->devnum);
unlock:
mutex_unlock(&context->ctx_lock);
+
+alloc_status_switch:
+ switch (alloc_status) {
+
+ case 7:
+ if (vfd_ep_found)
+ usb_free_urb(tx_urb);
+ case 6:
+ usb_free_urb(rx_urb);
+ case 5:
+ lirc_buffer_free(rbuf);
+ case 4:
+ kfree(rbuf);
+ case 3:
+ kfree(driver);
+ case 2:
+ kfree(context);
+ context = NULL;
+ case 1:
+ if (retval == 0)
+ retval = -ENOMEM;
+ }
+
exit:
return retval;
}
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 805df913bb6..8a060a8a722 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -841,7 +841,7 @@ static int init_port(void)
int i, nlow, nhigh, result;
result = request_irq(irq, irq_handler,
- IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
+ (share_irq ? IRQF_SHARED : 0),
LIRC_DRIVER_NAME, (void *)&hardware);
switch (result) {
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index 0d3864594b1..6903d3992ec 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -967,7 +967,7 @@ static int init_port(void)
return -EBUSY;
}
#endif
- retval = request_irq(irq, sir_interrupt, IRQF_DISABLED,
+ retval = request_irq(irq, sir_interrupt, 0,
LIRC_DRIVER_NAME, NULL);
if (retval < 0) {
# ifndef LIRC_ON_SA1100
diff --git a/drivers/staging/mei/Kconfig b/drivers/staging/mei/Kconfig
index 3f3f170890e..47d78a72db2 100644
--- a/drivers/staging/mei/Kconfig
+++ b/drivers/staging/mei/Kconfig
@@ -1,6 +1,6 @@
config INTEL_MEI
tristate "Intel Management Engine Interface (Intel MEI)"
- depends on X86 && PCI && EXPERIMENTAL
+ depends on X86 && PCI && EXPERIMENTAL && WATCHDOG_CORE
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
diff --git a/drivers/staging/mei/TODO b/drivers/staging/mei/TODO
index 3b6a667a580..7d9a13b0f2d 100644
--- a/drivers/staging/mei/TODO
+++ b/drivers/staging/mei/TODO
@@ -1,14 +1,4 @@
TODO:
- - Create in-kernel Client API. Examples of in-kernel clients are watchdog and AMTHI.
- - ME Watchdog Driver to expose standard Linux watchdog interface
- - Rewrite AMTHI to use in-kernel client interface
- - Cleanup init and probe functions
- - Review BUG/BUG_ON usage
- - Cleanup and reorganize header files
- - Rewrite client data structure
- - Make state machine more readable
- - Add mei.txt with driver explanation and it's driver
- - Fix Kconfig
- Cleanup and split the timer function
Upon Unstaging:
- move mei.h to include/linux/mei.h
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 0fa8216fd0e..8bf34794489 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -133,6 +133,7 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
init_waitqueue_head(&dev->wait_stop_wd);
dev->mei_state = MEI_INITIALIZING;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ dev->wd_interface_reg = false;
mei_io_list_init(&dev->read_list);
@@ -469,9 +470,12 @@ void mei_allocate_me_clients_storage(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns none.
+ * returns:
+ * < 0 - Error.
+ * = 0 - no more clients.
+ * = 1 - still have clients to send properties request.
*/
-void mei_host_client_properties(struct mei_device *dev)
+int mei_host_client_properties(struct mei_device *dev)
{
struct mei_msg_hdr *mei_header;
struct hbm_props_request *host_cli_req;
@@ -503,26 +507,15 @@ void mei_host_client_properties(struct mei_device *dev)
dev->mei_state = MEI_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
mei_reset(dev, 1);
- return;
+ return -EIO;
}
dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
dev->me_client_index = b;
- return;
+ return 1;
}
-
- /*
- * Clear Map for indicating now ME clients
- * with associated host client
- */
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
- bitmap_set(dev->host_clients_map, 0, 3);
- dev->mei_state = MEI_ENABLED;
-
- mei_wd_host_init(dev);
- return;
+ return 0;
}
/**
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index cfec92dfc1c..a65dacf0021 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
mei_hdr->reserved = 0;
mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
- memset(mei_flow_control, 0, sizeof(mei_flow_control));
+ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
mei_flow_control->host_addr = cl->host_client_id;
mei_flow_control->me_addr = cl->me_client_id;
mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
mei_cli_disconnect =
(struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
mei_cli_disconnect->host_addr = cl->host_client_id;
mei_cli_disconnect->me_addr = cl->me_client_id;
mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index d0bf5cf4f3e..7bd38ae2c23 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -23,7 +23,9 @@
#include "mei_dev.h"
-#define AMT_WD_VALUE 120 /* seconds */
+#define AMT_WD_DEFAULT_TIMEOUT 120 /* seconds */
+#define AMT_WD_MIN_TIMEOUT 120 /* seconds */
+#define AMT_WD_MAX_TIMEOUT 65535 /* seconds */
#define MEI_WATCHDOG_DATA_SIZE 16
#define MEI_START_WD_DATA_SIZE 20
@@ -48,8 +50,8 @@ int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev, bool preserve);
-void mei_wd_host_init(struct mei_device *dev);
-void mei_wd_start_setup(struct mei_device *dev);
+bool mei_wd_host_init(struct mei_device *dev);
+void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 9cb186bf187..882d106d54e 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -396,6 +396,18 @@ static void mei_client_connect_response(struct mei_device *dev,
dev->wd_due_counter = (dev->wd_timeout) ? 1 : 0;
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+
+ /* Registering watchdog interface device once we got connection
+ to the WD Client
+ */
+ if (watchdog_register_device(&amt_wd_dev)) {
+ printk(KERN_ERR "mei: unable to register watchdog device.\n");
+ dev->wd_interface_reg = false;
+ } else {
+ dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
+ dev->wd_interface_reg = true;
+ }
+
mei_host_init_iamthif(dev);
return;
}
@@ -641,6 +653,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct hbm_host_enum_response *enum_res;
struct hbm_client_disconnect_request *disconnect_req;
struct hbm_host_stop_request *host_stop_req;
+ int res;
unsigned char *buffer;
@@ -734,7 +747,38 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
MEI_CLIENT_PROPERTIES_MESSAGE) {
dev->me_client_index++;
dev->me_client_presentation_num++;
- mei_host_client_properties(dev);
+
+ /** Send Client Propeties request **/
+ res = mei_host_client_properties(dev);
+ if (res < 0) {
+ dev_dbg(&dev->pdev->dev, "mei_host_client_properties() failed");
+ return;
+ } else if (!res) {
+ /*
+ * No more clients to send to.
+ * Clear Map for indicating now ME clients
+ * with associated host client
+ */
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first three client IDs
+ * Client Id 0 - Reserved for MEI Bus Message communications
+ * Client Id 1 - Reserved for Watchdog
+ * Client ID 2 - Reserved for AMTHI
+ */
+ bitmap_set(dev->host_clients_map, 0, 3);
+ dev->mei_state = MEI_ENABLED;
+
+ /* if wd initialization fails, initialization the AMTHI client,
+ * otherwise the AMTHI client will be initialized after the WD client connect response
+ * will be received
+ */
+ if (mei_wd_host_init(dev))
+ mei_host_init_iamthif(dev);
+ }
+
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message");
mei_reset(dev, 1);
@@ -1381,7 +1425,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
*
* NOTE: This function is called by timer interrupt work
*/
-void mei_wd_timer(struct work_struct *work)
+void mei_timer(struct work_struct *work)
{
unsigned long timeout;
struct mei_cl *cl_pos = NULL;
@@ -1391,7 +1435,7 @@ void mei_wd_timer(struct work_struct *work)
struct mei_cl_cb *cb_next = NULL;
struct mei_device *dev = container_of(work,
- struct mei_device, wd_work.work);
+ struct mei_device, timer_work.work);
mutex_lock(&dev->device_lock);
@@ -1418,33 +1462,6 @@ void mei_wd_timer(struct work_struct *work)
}
}
- if (dev->wd_cl.state != MEI_FILE_CONNECTED)
- goto out;
-
- /* Watchdog */
- if (dev->wd_due_counter && !dev->wd_bypass) {
- if (--dev->wd_due_counter == 0) {
- if (dev->mei_host_buffer_is_empty &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
- dev->mei_host_buffer_is_empty = false;
- dev_dbg(&dev->pdev->dev, "send watchdog.\n");
-
- if (mei_wd_send(dev))
- dev_dbg(&dev->pdev->dev, "wd send failed.\n");
- else
- if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
- goto out;
-
- if (dev->wd_timeout)
- dev->wd_due_counter = 2;
- else
- dev->wd_due_counter = 0;
-
- } else
- dev->wd_pending = true;
-
- }
- }
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
dev_dbg(&dev->pdev->dev, "reseting because of hang to amthi.\n");
@@ -1510,7 +1527,7 @@ void mei_wd_timer(struct work_struct *work)
}
}
out:
- schedule_delayed_work(&dev->wd_work, 2 * HZ);
+ schedule_delayed_work(&dev->timer_work, 2 * HZ);
mutex_unlock(&dev->device_lock);
}
@@ -1540,6 +1557,12 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
mutex_lock(&dev->device_lock);
mei_io_list_init(&complete_list);
dev->host_hw_state = mei_hcsr_read(dev);
+
+ /* Ack the interrupt here
+ * In case of MSI we don't go throuhg the quick handler */
+ if (pci_dev_msi_enabled(dev->pdev))
+ mei_reg_write(dev, H_CSR, dev->host_hw_state);
+
dev->me_hw_state = mei_mecsr_read(dev);
/* check if ME wants a reset */
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index de8825fcd8c..eb05c36f45d 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -58,7 +58,7 @@ static struct cdev mei_cdev;
static int mei_major;
/* The device pointer */
/* Currently this driver works as long as there is only a single AMT device. */
-static struct pci_dev *mei_device;
+struct pci_dev *mei_device;
static struct class *mei_class;
@@ -151,17 +151,26 @@ static int __devinit mei_probe(struct pci_dev *pdev,
err = -ENOMEM;
goto free_device;
}
- /* request and enable interrupt */
- err = request_threaded_irq(pdev->irq,
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_interrupt_thread_handler,
+ 0, mei_driver_name, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
mei_interrupt_thread_handler,
IRQF_SHARED, mei_driver_name, dev);
+
if (err) {
printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
pdev->irq);
goto unmap_memory;
}
- INIT_DELAYED_WORK(&dev->wd_work, mei_wd_timer);
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
if (mei_hw_init(dev)) {
printk(KERN_ERR "mei: Init hw failure.\n");
err = -ENODEV;
@@ -169,7 +178,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
}
mei_device = pdev;
pci_set_drvdata(pdev, dev);
- schedule_delayed_work(&dev->wd_work, HZ);
+ schedule_delayed_work(&dev->timer_work, HZ);
mutex_unlock(&mei_mutex);
@@ -183,6 +192,7 @@ release_irq:
mei_disable_interrupts(dev);
flush_scheduled_work();
free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
unmap_memory:
pci_iounmap(pdev, dev->mem_addr);
free_device:
@@ -231,6 +241,10 @@ static void __devexit mei_remove(struct pci_dev *pdev)
mei_disconnect_host_client(dev, &dev->wd_cl);
}
+ /* Unregistering watchdog device */
+ if (dev->wd_interface_reg)
+ watchdog_unregister_device(&amt_wd_dev);
+
/* remove entry if already in list */
dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
@@ -247,6 +261,7 @@ static void __devexit mei_remove(struct pci_dev *pdev)
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
pci_set_drvdata(pdev, NULL);
if (dev->mem_addr)
@@ -402,7 +417,7 @@ static int mei_open(struct inode *inode, struct file *file)
err = -ENOMEM;
cl = mei_cl_allocate(dev);
if (!cl)
- goto out;
+ goto out_unlock;
err = -ENODEV;
if (dev->mei_state != MEI_ENABLED) {
@@ -1096,7 +1111,7 @@ static int mei_pci_suspend(struct device *device)
mutex_unlock(&dev->device_lock);
free_irq(pdev->irq, dev);
-
+ pci_disable_msi(pdev);
return err;
}
@@ -1111,11 +1126,20 @@ static int mei_pci_resume(struct device *device)
if (!dev)
return -ENODEV;
- /* request and enable interrupt */
- err = request_threaded_irq(pdev->irq,
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_interrupt_thread_handler,
+ 0, mei_driver_name, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
mei_interrupt_thread_handler,
IRQF_SHARED, mei_driver_name, dev);
+
if (err) {
printk(KERN_ERR "mei: Request_irq failure. irq = %d\n",
pdev->irq);
@@ -1127,12 +1151,9 @@ static int mei_pci_resume(struct device *device)
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
- /* Start watchdog if stopped in suspend */
- if (dev->wd_timeout) {
- mei_wd_start_setup(dev);
- dev->wd_due_counter = 1;
- schedule_delayed_work(&dev->wd_work, HZ);
- }
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+
return err;
}
static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index d7bc10c612b..af4b1af9eea 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -18,6 +18,7 @@
#define _MEI_DEV_H_
#include <linux/types.h>
+#include <linux/watchdog.h>
#include "mei.h"
#include "hw.h"
@@ -37,6 +38,17 @@
#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
/*
+ * MEI PCI Device object
+ */
+extern struct pci_dev *mei_device;
+
+/*
+ * AMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
+extern struct watchdog_device amt_wd_dev;
+
+/*
* AMTHI Client UUID
*/
extern const uuid_le mei_amthi_guid;
@@ -197,7 +209,7 @@ struct mei_device {
* lock for the device
*/
struct mutex device_lock; /* device lock */
- struct delayed_work wd_work; /* watch dog deleye work */
+ struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
bool recvd_msg;
/*
* hw states of host and fw(ME)
@@ -258,6 +270,8 @@ struct mei_device {
bool iamthif_flow_control_pending;
bool iamthif_ioctl;
bool iamthif_canceled;
+
+ bool wd_interface_reg;
};
@@ -315,14 +329,14 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
*/
void mei_host_start_message(struct mei_device *dev);
void mei_host_enum_clients_message(struct mei_device *dev);
-void mei_host_client_properties(struct mei_device *dev);
+int mei_host_client_properties(struct mei_device *dev);
/*
* MEI interrupt functions prototype
*/
irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
-void mei_wd_timer(struct work_struct *work);
+void mei_timer(struct work_struct *work);
/*
* MEI input output function prototype
@@ -356,7 +370,7 @@ int mei_find_me_client_index(const struct mei_device *dev, uuid_le cuuid);
* @dev: the device structure
* @offset: offset from which to read the data
*
- * returns the byte read.
+ * returns register value (u32)
*/
static inline u32 mei_reg_read(struct mei_device *dev, unsigned long offset)
{
@@ -368,7 +382,7 @@ static inline u32 mei_reg_read(struct mei_device *dev, unsigned long offset)
*
* @dev: the device structure
* @offset: offset from which to write the data
- * @value: the byte to write
+ * @value: register value to write (u32)
*/
static inline void mei_reg_write(struct mei_device *dev,
unsigned long offset, u32 value)
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 42f04efc90e..ffca7ca3265 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -19,22 +19,13 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/sched.h>
+#include <linux/watchdog.h>
#include "mei_dev.h"
#include "hw.h"
#include "interface.h"
#include "mei.h"
-/*
- * MEI Watchdog Module Parameters
- */
-static u16 watchdog_timeout = AMT_WD_VALUE;
-module_param(watchdog_timeout, ushort, 0);
-MODULE_PARM_DESC(watchdog_timeout,
- "Intel(R) AMT Watchdog timeout value in seconds. (default="
- __MODULE_STRING(AMT_WD_VALUE)
- ", disable=0)");
-
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
@@ -50,12 +41,12 @@ const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
0x32, 0xAB);
-void mei_wd_start_setup(struct mei_device *dev)
+void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
{
- dev_dbg(&dev->pdev->dev, "dev->wd_timeout=%d.\n", dev->wd_timeout);
+ dev_dbg(&dev->pdev->dev, "timeout=%d.\n", timeout);
memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_PARAMS_SIZE);
memcpy(dev->wd_data + MEI_WD_PARAMS_SIZE,
- &dev->wd_timeout, sizeof(u16));
+ &timeout, sizeof(u16));
}
/**
@@ -63,39 +54,39 @@ void mei_wd_start_setup(struct mei_device *dev)
*
* @dev: the device structure
*/
-void mei_wd_host_init(struct mei_device *dev)
+bool mei_wd_host_init(struct mei_device *dev)
{
+ bool ret = false;
+
mei_cl_init(&dev->wd_cl, dev);
/* look for WD client and connect to it */
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
- dev->wd_timeout = watchdog_timeout;
-
- if (dev->wd_timeout > 0) {
- mei_wd_start_setup(dev);
- /* find ME WD client */
- mei_find_me_client_update_filext(dev, &dev->wd_cl,
- &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
-
- dev_dbg(&dev->pdev->dev, "check wd_cl\n");
- if (MEI_FILE_CONNECTING == dev->wd_cl.state) {
- if (!mei_connect(dev, &dev->wd_cl)) {
- dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n");
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
- dev->wd_cl.host_client_id = 0;
- mei_host_init_iamthif(dev) ;
- } else {
- dev->wd_cl.timer_count = CONNECT_TIMEOUT;
- }
+ dev->wd_timeout = AMT_WD_DEFAULT_TIMEOUT;
+
+ /* find ME WD client */
+ mei_find_me_client_update_filext(dev, &dev->wd_cl,
+ &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
+
+ dev_dbg(&dev->pdev->dev, "check wd_cl\n");
+ if (MEI_FILE_CONNECTING == dev->wd_cl.state) {
+ if (!mei_connect(dev, &dev->wd_cl)) {
+ dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n");
+ dev->wd_cl.state = MEI_FILE_DISCONNECTED;
+ dev->wd_cl.host_client_id = 0;
+ ret = false;
+ goto end;
} else {
- dev_dbg(&dev->pdev->dev, "Failed to find WD client\n");
- mei_host_init_iamthif(dev) ;
+ dev->wd_cl.timer_count = CONNECT_TIMEOUT;
}
} else {
- dev->wd_bypass = true;
- dev_dbg(&dev->pdev->dev, "WD requested to be disabled\n");
- mei_host_init_iamthif(dev) ;
+ dev_dbg(&dev->pdev->dev, "Failed to find WD client\n");
+ ret = false;
+ goto end;
}
+
+end:
+ return ret;
}
/**
@@ -129,12 +120,22 @@ int mei_wd_send(struct mei_device *dev)
return -EIO;
}
+/**
+ * mei_wd_stop - sends watchdog stop message to fw.
+ *
+ * @dev: the device structure
+ * @preserve: indicate if to keep the timeout value
+ *
+ * returns 0 if success,
+ * -EIO when message send fails
+ * -EINVAL when invalid message is to be sent
+ */
int mei_wd_stop(struct mei_device *dev, bool preserve)
{
int ret;
u16 wd_timeout = dev->wd_timeout;
- cancel_delayed_work(&dev->wd_work);
+ cancel_delayed_work(&dev->timer_work);
if (dev->wd_cl.state != MEI_FILE_CONNECTED || !dev->wd_timeout)
return 0;
@@ -186,3 +187,168 @@ out:
return ret;
}
+/*
+ * mei_wd_ops_start - wd start command from the watchdog core.
+ *
+ * @wd_dev - watchdog device struct
+ *
+ * returns 0 if success, negative errno code for failure
+ */
+static int mei_wd_ops_start(struct watchdog_device *wd_dev)
+{
+ int err = -ENODEV;
+ struct mei_device *dev;
+
+ dev = pci_get_drvdata(mei_device);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (dev->mei_state != MEI_ENABLED) {
+ dev_dbg(&dev->pdev->dev, "mei_state != MEI_ENABLED mei_state= %d\n",
+ dev->mei_state);
+ goto end_unlock;
+ }
+
+ if (dev->wd_cl.state != MEI_FILE_CONNECTED) {
+ dev_dbg(&dev->pdev->dev, "MEI Driver is not connected to Watchdog Client\n");
+ goto end_unlock;
+ }
+
+ mei_wd_set_start_timeout(dev, dev->wd_timeout);
+
+ err = 0;
+end_unlock:
+ mutex_unlock(&dev->device_lock);
+ return err;
+}
+
+/*
+ * mei_wd_ops_stop - wd stop command from the watchdog core.
+ *
+ * @wd_dev - watchdog device struct
+ *
+ * returns 0 if success, negative errno code for failure
+ */
+static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
+{
+ struct mei_device *dev;
+ dev = pci_get_drvdata(mei_device);
+
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+ mei_wd_stop(dev, false);
+ mutex_unlock(&dev->device_lock);
+
+ return 0;
+}
+
+/*
+ * mei_wd_ops_ping - wd ping command from the watchdog core.
+ *
+ * @wd_dev - watchdog device struct
+ *
+ * returns 0 if success, negative errno code for failure
+ */
+static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
+{
+ int ret = 0;
+ struct mei_device *dev;
+ dev = pci_get_drvdata(mei_device);
+
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (dev->wd_cl.state != MEI_FILE_CONNECTED) {
+ dev_dbg(&dev->pdev->dev, "wd is not connected.\n");
+ ret = -ENODEV;
+ goto end;
+ }
+
+ /* Check if we can send the ping to HW*/
+ if (dev->mei_host_buffer_is_empty &&
+ mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+
+ dev->mei_host_buffer_is_empty = false;
+ dev_dbg(&dev->pdev->dev, "sending watchdog ping\n");
+
+ if (mei_wd_send(dev)) {
+ dev_dbg(&dev->pdev->dev, "wd send failed.\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
+ dev_dbg(&dev->pdev->dev, "mei_flow_ctrl_reduce() failed.\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ } else {
+ dev->wd_pending = true;
+ }
+
+end:
+ mutex_unlock(&dev->device_lock);
+ return ret;
+}
+
+/*
+ * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
+ *
+ * @wd_dev - watchdog device struct
+ * @timeout - timeout value to set
+ *
+ * returns 0 if success, negative errno code for failure
+ */
+static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, unsigned int timeout)
+{
+ struct mei_device *dev;
+ dev = pci_get_drvdata(mei_device);
+
+ if (!dev)
+ return -ENODEV;
+
+ /* Check Timeout value */
+ if (timeout < AMT_WD_MIN_TIMEOUT || timeout > AMT_WD_MAX_TIMEOUT)
+ return -EINVAL;
+
+ mutex_lock(&dev->device_lock);
+
+ dev->wd_timeout = timeout;
+ mei_wd_set_start_timeout(dev, dev->wd_timeout);
+
+ mutex_unlock(&dev->device_lock);
+
+ return 0;
+}
+
+/*
+ * Watchdog Device structs
+ */
+const struct watchdog_ops wd_ops = {
+ .owner = THIS_MODULE,
+ .start = mei_wd_ops_start,
+ .stop = mei_wd_ops_stop,
+ .ping = mei_wd_ops_ping,
+ .set_timeout = mei_wd_ops_set_timeout,
+};
+const struct watchdog_info wd_info = {
+ .identity = INTEL_AMT_WATCHDOG_ID,
+ .options = WDIOF_KEEPALIVEPING,
+};
+
+struct watchdog_device amt_wd_dev = {
+ .info = &wd_info,
+ .ops = &wd_ops,
+ .timeout = AMT_WD_DEFAULT_TIMEOUT,
+ .min_timeout = AMT_WD_MIN_TIMEOUT,
+ .max_timeout = AMT_WD_MAX_TIMEOUT,
+};
+
+
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 987ad48ff93..86a8b8c418c 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -7,21 +7,27 @@ config MFD_NVEC
config KEYBOARD_NVEC
bool "Keyboard on nVidia compliant EC"
- depends on MFD_NVEC
+ depends on MFD_NVEC && INPUT=y
help
Say Y here to enable support for a keyboard connected to
a nVidia compliant embedded controller.
config SERIO_NVEC_PS2
bool "PS2 on nVidia EC"
- depends on MFD_NVEC
+ depends on MFD_NVEC && MOUSE_PS2
help
Say Y here to enable support for a Touchpad / Mouse connected
to a nVidia compliant embedded controller.
config NVEC_POWER
bool "NVEC charger and battery"
- depends on MFD_NVEC
+ depends on MFD_NVEC && POWER_SUPPLY=y
help
Say Y to enable support for battery and charger interface for
nVidia compliant embedded controllers.
+
+config NVEC_LEDS
+ bool "NVEC leds"
+ depends on MFD_NVEC && LEDS_CLASS
+ help
+ Say Y to enable yellow side leds on AC100 or other nVidia tegra nvec leds
diff --git a/drivers/staging/nvec/Makefile b/drivers/staging/nvec/Makefile
index 4b5fcec1a10..b844d604e3a 100644
--- a/drivers/staging/nvec/Makefile
+++ b/drivers/staging/nvec/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_SERIO_NVEC_PS2) += nvec_ps2.o
obj-$(CONFIG_MFD_NVEC) += nvec.o
obj-$(CONFIG_NVEC_POWER) += nvec_power.o
obj-$(CONFIG_KEYBOARD_NVEC) += nvec_kbd.o
+obj-$(CONFIG_NVEC_LEDS) += nvec_leds.o
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index 649d6b70dea..f950ab890e2 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -1,10 +1,12 @@
ToDo list (incomplete, unordered)
- - convert mouse, keyboard, and power to platform devices
- - add copyright / driver author / license
- add compile as module support
- - move nvec devices to mfd cells?
- - adjust to kernel style
- fix clk usage
should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
where conn is either NULL if the device only has one clock, or
the device specific name if it has multiple clocks.
+ - move half of the nvec init stuff to i2c-tegra.c
+ - move event handling to nvec_events
+ - finish suspend/resume support
+ - modifiy the sync_write method to return the received
+ message in a variable (and return the error code).
+ - add support for more device implementations
diff --git a/drivers/staging/nvec/nvec-keytable.h b/drivers/staging/nvec/nvec-keytable.h
index 6a1c4f7f460..1dc22cb8812 100644
--- a/drivers/staging/nvec/nvec-keytable.h
+++ b/drivers/staging/nvec/nvec-keytable.h
@@ -22,7 +22,8 @@
*/
static unsigned short code_tab_102us[] = {
- KEY_GRAVE, // 0x00
+ /* 0x00 */
+ KEY_GRAVE,
KEY_ESC,
KEY_1,
KEY_2,
@@ -38,7 +39,8 @@ static unsigned short code_tab_102us[] = {
KEY_EQUAL,
KEY_BACKSPACE,
KEY_TAB,
- KEY_Q, // 0x10
+ /* 0x10 */
+ KEY_Q,
KEY_W,
KEY_E,
KEY_R,
@@ -54,7 +56,8 @@ static unsigned short code_tab_102us[] = {
KEY_LEFTCTRL,
KEY_A,
KEY_S,
- KEY_D, // 0x20
+ /* 0x20 */
+ KEY_D,
KEY_F,
KEY_G,
KEY_H,
@@ -70,7 +73,8 @@ static unsigned short code_tab_102us[] = {
KEY_X,
KEY_C,
KEY_V,
- KEY_B, // 0x30
+ /* 0x30 */
+ KEY_B,
KEY_N,
KEY_M,
KEY_COMMA,
@@ -86,13 +90,15 @@ static unsigned short code_tab_102us[] = {
KEY_F3,
KEY_F4,
KEY_F5,
- KEY_F6, // 0x40
+ /* 0x40 */
+ KEY_F6,
KEY_F7,
KEY_F8,
KEY_F9,
KEY_F10,
KEY_FN,
- 0, //VK_SCROLL
+ /* VK_SCROLL */
+ 0,
KEY_KP7,
KEY_KP8,
KEY_KP9,
@@ -102,52 +108,57 @@ static unsigned short code_tab_102us[] = {
KEY_KP6,
KEY_KPPLUS,
KEY_KP1,
- KEY_KP2, // 0x50
+ /* 0x50 */
+ KEY_KP2,
KEY_KP3,
KEY_KP0,
KEY_KPDOT,
- KEY_MENU, //VK_SNAPSHOT
+ /* VK_SNAPSHOT */
+ KEY_MENU,
KEY_POWER,
- KEY_102ND, //VK_OEM_102 henry+ 0x2B (43) BACKSLASH have been used,change to use 0X56 (86)
- KEY_F11, //VK_F11
- KEY_F12, //VK_F12
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0, // 60
- 0,
- 0,
- KEY_SEARCH, // add search key map
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0, // 70
- 0,
- 0,
- KEY_KP5, //73 for JP keyboard '\' key, report 0x4c
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- KEY_KP9, //7d for JP keyboard '|' key, report 0x49
+ /* VK_OEM_102 */
+ KEY_102ND,
+ KEY_F11,
+ KEY_F12,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* 0x60 */
+ 0,
+ 0,
+ 0,
+ KEY_SEARCH,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* 0x70 */
+ 0,
+ 0,
+ 0,
+ KEY_KP5,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ KEY_KP9,
};
static unsigned short extcode_tab_us102[] = {
@@ -167,27 +178,35 @@ static unsigned short extcode_tab_us102[] = {
0,
0,
0,
- 0, // 0xE0 0x10
+ /* 0x10 */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
0,
0,
+ /* VK_MEDIA_NEXT_TRACK */
0,
0,
0,
+ /* VK_RETURN */
0,
+ KEY_RIGHTCTRL,
0,
0,
- 0, //VK_MEDIA_NEXT_TRACK,
+ /* 0x20 */
+ KEY_MUTE,
+ /* VK_LAUNCH_APP1 */
0,
+ /* VK_MEDIA_PLAY_PAUSE */
0,
- 0, //VK_RETURN,
- KEY_RIGHTCTRL, //VK_RCONTROL,
0,
+ /* VK_MEDIA_STOP */
0,
- KEY_MUTE, // 0xE0 0x20
- 0, //VK_LAUNCH_APP1
- 0, //VK_MEDIA_PLAY_PAUSE
0,
- 0, //VK_MEDIA_STOP
0,
0,
0,
@@ -198,41 +217,54 @@ static unsigned short extcode_tab_us102[] = {
0,
0,
0,
+ /* 0x30 */
+ KEY_VOLUMEUP,
0,
- KEY_VOLUMEUP, // 0xE0 0x30
+ /* VK_BROWSER_HOME */
+ 0,
+ 0,
+ 0,
+ /* VK_DIVIDE */
+ KEY_KPSLASH,
+ 0,
+ /* VK_SNAPSHOT */
+ KEY_SYSRQ,
+ /* VK_RMENU */
+ KEY_RIGHTALT,
+ /* VK_OEM_NV_BACKLIGHT_UP */
+ 0,
+ /* VK_OEM_NV_BACKLIGHT_DN */
+ 0,
+ /* VK_OEM_NV_BACKLIGHT_AUTOTOGGLE */
+ 0,
+ /* VK_OEM_NV_POWER_INFO */
+ 0,
+ /* VK_OEM_NV_WIFI_TOGGLE */
+ 0,
+ /* VK_OEM_NV_DISPLAY_SELECT */
+ 0,
+ /* VK_OEM_NV_AIRPLANE_TOGGLE */
+ 0,
+ /* 0x40 */
+ 0,
+ KEY_LEFT,
0,
- 0, //VK_BROWSER_HOME
0,
0,
- KEY_KPSLASH, //VK_DIVIDE
0,
- KEY_SYSRQ, //VK_SNAPSHOT
- KEY_RIGHTALT, //VK_RMENU
- 0, //VK_OEM_NV_BACKLIGHT_UP
- 0, //VK_OEM_NV_BACKLIGHT_DN
- 0, //VK_OEM_NV_BACKLIGHT_AUTOTOGGLE
- 0, //VK_OEM_NV_POWER_INFO
- 0, //VK_OEM_NV_WIFI_TOGGLE
- 0, //VK_OEM_NV_DISPLAY_SELECT
- 0, //VK_OEM_NV_AIRPLANE_TOGGLE
- 0, //0xE0 0x40
- KEY_LEFT, //VK_OEM_NV_RESERVED henry+ for JP keyboard
- 0, //VK_OEM_NV_RESERVED
- 0, //VK_OEM_NV_RESERVED
- 0, //VK_OEM_NV_RESERVED
- 0, //VK_OEM_NV_RESERVED
KEY_CANCEL,
KEY_HOME,
KEY_UP,
- KEY_PAGEUP, //VK_PRIOR
+ KEY_PAGEUP,
0,
KEY_LEFT,
0,
KEY_RIGHT,
0,
KEY_END,
- KEY_DOWN, // 0xE0 0x50
- KEY_PAGEDOWN, //VK_NEXT
+ /* 0x50 */
+ KEY_DOWN,
+ KEY_PAGEDOWN,
KEY_INSERT,
KEY_DELETE,
0,
@@ -242,25 +274,34 @@ static unsigned short extcode_tab_us102[] = {
0,
0,
0,
- KEY_LEFTMETA, //VK_LWIN
- 0, //VK_RWIN
- KEY_ESC, //VK_APPS
- KEY_KPMINUS, //for power button workaround
- 0,
+ KEY_LEFTMETA,
+ 0,
+ KEY_ESC,
+ KEY_KPMINUS,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* VK_BROWSER_SEARCH */
+ 0,
+ /* VK_BROWSER_FAVORITES */
+ 0,
+ /* VK_BROWSER_REFRESH */
+ 0,
+ /* VK_BROWSER_STOP */
+ 0,
+ /* VK_BROWSER_FORWARD */
0,
+ /* VK_BROWSER_BACK */
0,
+ /* VK_LAUNCH_APP2 */
0,
+ /* VK_LAUNCH_MAIL */
0,
+ /* VK_LAUNCH_MEDIA_SELECT */
0,
- 0, //VK_BROWSER_SEARCH
- 0, //VK_BROWSER_FAVORITES
- 0, //VK_BROWSER_REFRESH
- 0, //VK_BROWSER_STOP
- 0, //VK_BROWSER_FORWARD
- 0, //VK_BROWSER_BACK
- 0, //VK_LAUNCH_APP2
- 0, //VK_LAUNCH_MAIL
- 0, //VK_LAUNCH_MEDIA_SELECT
};
-static unsigned short* code_tabs[] = {code_tab_102us, extcode_tab_us102 };
+static unsigned short *code_tabs[] = { code_tab_102us, extcode_tab_us102 };
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 72258e8c64c..e06b867d1e0 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -1,294 +1,706 @@
-// #define DEBUG
-
-/* ToDo list (incomplete, unorderd)
- - convert mouse, keyboard, and power to platform devices
-*/
-
-#include <asm/io.h>
-#include <asm/irq.h>
+/*
+ * NVEC: NVIDIA compliant embedded controller interface
+ *
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
+ *
+ * Authors: Pierre-Hugues Husson <phhusson@free.fr>
+ * Ilya Petrov <ilya.muromec@gmail.com>
+ * Marc Dietrich <marvin24@gmx.de>
+ * Julian Andres Klode <jak@jak-linux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+/* #define DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/serio.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/workqueue.h>
-#include <linux/clk.h>
-#include <mach/iomap.h>
-#include <mach/clk.h>
-#include <linux/semaphore.h>
#include <linux/list.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+
#include "nvec.h"
-static unsigned char EC_DISABLE_EVENT_REPORTING[] = {'\x04','\x00','\x00'};
-static unsigned char EC_ENABLE_EVENT_REPORTING[] = {'\x04','\x00','\x01'};
-static unsigned char EC_GET_FIRMWARE_VERSION[] = {'\x07','\x15'};
+#define I2C_CNFG 0x00
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_SFM (1<<11)
+#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
+
+#define I2C_SL_CNFG 0x20
+#define I2C_SL_NEWL (1<<2)
+#define I2C_SL_NACK (1<<1)
+#define I2C_SL_RESP (1<<0)
+#define I2C_SL_IRQ (1<<3)
+#define END_TRANS (1<<4)
+#define RCVD (1<<2)
+#define RNW (1<<1)
+
+#define I2C_SL_RCVD 0x24
+#define I2C_SL_STATUS 0x28
+#define I2C_SL_ADDR1 0x2c
+#define I2C_SL_ADDR2 0x30
+#define I2C_SL_DELAY_COUNT 0x3c
+
+/**
+ * enum nvec_msg_category - Message categories for nvec_msg_alloc()
+ * @NVEC_MSG_RX: The message is an incoming message (from EC)
+ * @NVEC_MSG_TX: The message is an outgoing message (to EC)
+ */
+enum nvec_msg_category {
+ NVEC_MSG_RX,
+ NVEC_MSG_TX,
+};
+
+static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
+static const unsigned char EC_ENABLE_EVENT_REPORTING[3] = "\x04\x00\x01";
+static const unsigned char EC_GET_FIRMWARE_VERSION[2] = "\x07\x15";
static struct nvec_chip *nvec_power_handle;
+static struct mfd_cell nvec_devices[] = {
+ {
+ .name = "nvec-kbd",
+ .id = 1,
+ },
+ {
+ .name = "nvec-mouse",
+ .id = 1,
+ },
+ {
+ .name = "nvec-power",
+ .id = 1,
+ },
+ {
+ .name = "nvec-power",
+ .id = 2,
+ },
+ {
+ .name = "nvec-leds",
+ .id = 1,
+ },
+};
+
+/**
+ * nvec_register_notifier - Register a notifier with nvec
+ * @nvec: A &struct nvec_chip
+ * @nb: The notifier block to register
+ *
+ * Registers a notifier with @nvec. The notifier will be added to an atomic
+ * notifier chain that is called for all received messages except those that
+ * correspond to a request initiated by nvec_write_sync().
+ */
int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
- unsigned int events)
+ unsigned int events)
{
return atomic_notifier_chain_register(&nvec->notifier_list, nb);
}
EXPORT_SYMBOL_GPL(nvec_register_notifier);
-static int nvec_status_notifier(struct notifier_block *nb, unsigned long event_type,
- void *data)
+/**
+ * nvec_status_notifier - The final notifier
+ *
+ * Prints a message about control events not handled in the notifier
+ * chain.
+ */
+static int nvec_status_notifier(struct notifier_block *nb,
+ unsigned long event_type, void *data)
{
unsigned char *msg = (unsigned char *)data;
- int i;
- if(event_type != NVEC_CNTL)
+ if (event_type != NVEC_CNTL)
return NOTIFY_DONE;
- printk("unhandled msg type %ld, payload: ", event_type);
- for (i = 0; i < msg[1]; i++)
- printk("%0x ", msg[i+2]);
- printk("\n");
+ printk(KERN_WARNING "unhandled msg type %ld\n", event_type);
+ print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, msg[1] + 2, true);
return NOTIFY_OK;
}
-void nvec_write_async(struct nvec_chip *nvec, unsigned char *data, short size)
+/**
+ * nvec_msg_alloc:
+ * @nvec: A &struct nvec_chip
+ * @category: Pool category, see &enum nvec_msg_category
+ *
+ * Allocate a single &struct nvec_msg object from the message pool of
+ * @nvec. The result shall be passed to nvec_msg_free() if no longer
+ * used.
+ *
+ * Outgoing messages are placed in the upper 75% of the pool, keeping the
+ * lower 25% available for RX buffers only. The reason is to prevent a
+ * situation where all buffers are full and a message is thus endlessly
+ * retried because the response could never be processed.
+ */
+static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
+ enum nvec_msg_category category)
+{
+ int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
+
+ for (; i < NVEC_POOL_SIZE; i++) {
+ if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
+ dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
+ return &nvec->msg_pool[i];
+ }
+ }
+
+ dev_err(nvec->dev, "could not allocate %s buffer\n",
+ (category == NVEC_MSG_TX) ? "TX" : "RX");
+
+ return NULL;
+}
+
+/**
+ * nvec_msg_free:
+ * @nvec: A &struct nvec_chip
+ * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
+ *
+ * Free the given message
+ */
+inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
+{
+ if (msg != &nvec->tx_scratch)
+ dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
+ atomic_set(&msg->used, 0);
+}
+EXPORT_SYMBOL_GPL(nvec_msg_free);
+
+/**
+ * nvec_msg_is_event - Return %true if @msg is an event
+ * @msg: A message
+ */
+static bool nvec_msg_is_event(struct nvec_msg *msg)
+{
+ return msg->data[0] >> 7;
+}
+
+/**
+ * nvec_msg_size - Get the size of a message
+ * @msg: The message to get the size for
+ *
+ * This only works for received messages, not for outgoing messages.
+ */
+static size_t nvec_msg_size(struct nvec_msg *msg)
+{
+ bool is_event = nvec_msg_is_event(msg);
+ int event_length = (msg->data[0] & 0x60) >> 5;
+
+ /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
+ if (!is_event || event_length == NVEC_VAR_SIZE)
+ return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
+ else if (event_length == NVEC_2BYTES)
+ return 2;
+ else if (event_length == NVEC_3BYTES)
+ return 3;
+ else
+ return 0;
+}
+
+/**
+ * nvec_gpio_set_value - Set the GPIO value
+ * @nvec: A &struct nvec_chip
+ * @value: The value to write (0 or 1)
+ *
+ * Like gpio_set_value(), but generating debugging information
+ */
+static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
+{
+ dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
+ gpio_get_value(nvec->gpio), value);
+ gpio_set_value(nvec->gpio, value);
+}
+
+/**
+ * nvec_write_async - Asynchronously write a message to NVEC
+ * @nvec: An nvec_chip instance
+ * @data: The message data, starting with the request type
+ * @size: The size of @data
+ *
+ * Queue a single message to be transferred to the embedded controller
+ * and return immediately.
+ *
+ * Returns: 0 on success, a negative error code on failure. If a failure
+ * occured, the nvec driver may print an error.
+ */
+int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
+ short size)
{
- struct nvec_msg *msg = kzalloc(sizeof(struct nvec_msg), GFP_NOWAIT);
+ struct nvec_msg *msg;
+ unsigned long flags;
+
+ msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
+
+ if (msg == NULL)
+ return -ENOMEM;
- msg->data = kzalloc(size, GFP_NOWAIT);
msg->data[0] = size;
memcpy(msg->data + 1, data, size);
msg->size = size + 1;
- msg->pos = 0;
- INIT_LIST_HEAD(&msg->node);
+ spin_lock_irqsave(&nvec->tx_lock, flags);
list_add_tail(&msg->node, &nvec->tx_data);
+ spin_unlock_irqrestore(&nvec->tx_lock, flags);
+
+ queue_work(nvec->wq, &nvec->tx_work);
- gpio_set_value(nvec->gpio, 0);
+ return 0;
}
EXPORT_SYMBOL(nvec_write_async);
+/**
+ * nvec_write_sync - Write a message to nvec and read the response
+ * @nvec: An &struct nvec_chip
+ * @data: The data to write
+ * @size: The size of @data
+ *
+ * This is similar to nvec_write_async(), but waits for the
+ * request to be answered before returning. This function
+ * uses a mutex and can thus not be called from e.g.
+ * interrupt handlers.
+ *
+ * Returns: A pointer to the response message on success,
+ * %NULL on failure. Free with nvec_msg_free() once no longer
+ * used.
+ */
+struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size)
+{
+ struct nvec_msg *msg;
+
+ mutex_lock(&nvec->sync_write_mutex);
+
+ nvec->sync_write_pending = (data[1] << 8) + data[0];
+
+ if (nvec_write_async(nvec, data, size) < 0)
+ return NULL;
+
+ dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
+ nvec->sync_write_pending);
+ if (!(wait_for_completion_timeout(&nvec->sync_write,
+ msecs_to_jiffies(2000)))) {
+ dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
+ mutex_unlock(&nvec->sync_write_mutex);
+ return NULL;
+ }
+
+ dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
+
+ msg = nvec->last_sync_msg;
+
+ mutex_unlock(&nvec->sync_write_mutex);
+
+ return msg;
+}
+EXPORT_SYMBOL(nvec_write_sync);
+
+/**
+ * nvec_request_master - Process outgoing messages
+ * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
+ *
+ * Processes all outgoing requests by sending the request and awaiting the
+ * response, then continuing with the next request. Once a request has a
+ * matching response, it will be freed and removed from the list.
+ */
static void nvec_request_master(struct work_struct *work)
{
struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
+ unsigned long flags;
+ long err;
+ struct nvec_msg *msg;
+
+ spin_lock_irqsave(&nvec->tx_lock, flags);
+ while (!list_empty(&nvec->tx_data)) {
+ msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
+ spin_unlock_irqrestore(&nvec->tx_lock, flags);
+ nvec_gpio_set_value(nvec, 0);
+ err = wait_for_completion_interruptible_timeout(
+ &nvec->ec_transfer, msecs_to_jiffies(5000));
+
+ if (err == 0) {
+ dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
+ nvec_gpio_set_value(nvec, 1);
+ msg->pos = 0;
+ }
- if(!list_empty(&nvec->tx_data)) {
- gpio_set_value(nvec->gpio, 0);
+ spin_lock_irqsave(&nvec->tx_lock, flags);
+
+ if (err > 0) {
+ list_del_init(&msg->node);
+ nvec_msg_free(nvec, msg);
+ }
}
+ spin_unlock_irqrestore(&nvec->tx_lock, flags);
}
+/**
+ * parse_msg - Print some information and call the notifiers on an RX message
+ * @nvec: A &struct nvec_chip
+ * @msg: A message received by @nvec
+ *
+ * Paarse some pieces of the message and then call the chain of notifiers
+ * registered via nvec_register_notifier.
+ */
static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
{
- int i;
-
- if((msg->data[0] & 1<<7) == 0 && msg->data[3]) {
- dev_err(nvec->dev, "ec responded %02x %02x %02x %02x\n", msg->data[0],
- msg->data[1], msg->data[2], msg->data[3]);
+ if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
+ dev_err(nvec->dev, "ec responded %02x %02x %02x %02x\n",
+ msg->data[0], msg->data[1], msg->data[2], msg->data[3]);
return -EINVAL;
}
- if ((msg->data[0] >> 7 ) == 1 && (msg->data[0] & 0x0f) == 5)
- {
- dev_warn(nvec->dev, "ec system event ");
- for (i=0; i < msg->data[1]; i++)
- dev_warn(nvec->dev, "%02x ", msg->data[2+i]);
- dev_warn(nvec->dev, "\n");
- }
+ if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
+ print_hex_dump(KERN_WARNING, "ec system event ",
+ DUMP_PREFIX_NONE, 16, 1, msg->data,
+ msg->data[1] + 2, true);
- atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f, msg->data);
+ atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
+ msg->data);
return 0;
}
-static struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec, unsigned char *data, short size)
-{
- down(&nvec->sync_write_mutex);
-
- nvec->sync_write_pending = (data[1] << 8) + data[0];
- nvec_write_async(nvec, data, size);
-
- dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n", nvec->sync_write_pending);
- wait_for_completion(&nvec->sync_write);
- dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
-
- up(&nvec->sync_write_mutex);
-
- return nvec->last_sync_msg;
-}
-
-/* RX worker */
+/**
+ * nvec_dispatch - Process messages received from the EC
+ * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
+ *
+ * Process messages previously received from the EC and put into the RX
+ * queue of the &struct nvec_chip instance associated with @work.
+ */
static void nvec_dispatch(struct work_struct *work)
{
struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
+ unsigned long flags;
struct nvec_msg *msg;
- while(!list_empty(&nvec->rx_data))
- {
+ spin_lock_irqsave(&nvec->rx_lock, flags);
+ while (!list_empty(&nvec->rx_data)) {
msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
list_del_init(&msg->node);
+ spin_unlock_irqrestore(&nvec->rx_lock, flags);
- if(nvec->sync_write_pending == (msg->data[2] << 8) + msg->data[0])
- {
+ if (nvec->sync_write_pending ==
+ (msg->data[2] << 8) + msg->data[0]) {
dev_dbg(nvec->dev, "sync write completed!\n");
nvec->sync_write_pending = 0;
nvec->last_sync_msg = msg;
complete(&nvec->sync_write);
} else {
parse_msg(nvec, msg);
- if((!msg) || (!msg->data))
- dev_warn(nvec->dev, "attempt access zero pointer");
- else {
- kfree(msg->data);
- kfree(msg);
- }
+ nvec_msg_free(nvec, msg);
}
+ spin_lock_irqsave(&nvec->rx_lock, flags);
+ }
+ spin_unlock_irqrestore(&nvec->rx_lock, flags);
+}
+
+/**
+ * nvec_tx_completed - Complete the current transfer
+ * @nvec: A &struct nvec_chip
+ *
+ * This is called when we have received an END_TRANS on a TX transfer.
+ */
+static void nvec_tx_completed(struct nvec_chip *nvec)
+{
+ /* We got an END_TRANS, let's skip this, maybe there's an event */
+ if (nvec->tx->pos != nvec->tx->size) {
+ dev_err(nvec->dev, "premature END_TRANS, resending\n");
+ nvec->tx->pos = 0;
+ nvec_gpio_set_value(nvec, 0);
+ } else {
+ nvec->state = 0;
+ }
+}
+
+/**
+ * nvec_rx_completed - Complete the current transfer
+ * @nvec: A &struct nvec_chip
+ *
+ * This is called when we have received an END_TRANS on a RX transfer.
+ */
+static void nvec_rx_completed(struct nvec_chip *nvec)
+{
+ if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
+ dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
+ (uint) nvec_msg_size(nvec->rx),
+ (uint) nvec->rx->pos);
+
+ nvec_msg_free(nvec, nvec->rx);
+ nvec->state = 0;
+
+ /* Battery quirk - Often incomplete, and likes to crash */
+ if (nvec->rx->data[0] == NVEC_BAT)
+ complete(&nvec->ec_transfer);
+
+ return;
+ }
+
+ spin_lock(&nvec->rx_lock);
+
+ /* add the received data to the work list
+ and move the ring buffer pointer to the next entry */
+ list_add_tail(&nvec->rx->node, &nvec->rx_data);
+
+ spin_unlock(&nvec->rx_lock);
+
+ nvec->state = 0;
+
+ if (!nvec_msg_is_event(nvec->rx))
+ complete(&nvec->ec_transfer);
+
+ queue_work(nvec->wq, &nvec->rx_work);
+}
+
+/**
+ * nvec_invalid_flags - Send an error message about invalid flags and jump
+ * @nvec: The nvec device
+ * @status: The status flags
+ * @reset: Whether we shall jump to state 0.
+ */
+static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
+ bool reset)
+{
+ dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
+ status, nvec->state);
+ if (reset)
+ nvec->state = 0;
+}
+
+/**
+ * nvec_tx_set - Set the message to transfer (nvec->tx)
+ * @nvec: A &struct nvec_chip
+ *
+ * Gets the first entry from the tx_data list of @nvec and sets the
+ * tx member to it. If the tx_data list is empty, this uses the
+ * tx_scratch message to send a no operation message.
+ */
+static void nvec_tx_set(struct nvec_chip *nvec)
+{
+ spin_lock(&nvec->tx_lock);
+ if (list_empty(&nvec->tx_data)) {
+ dev_err(nvec->dev, "empty tx - sending no-op\n");
+ memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
+ nvec->tx_scratch.size = 3;
+ nvec->tx_scratch.pos = 0;
+ nvec->tx = &nvec->tx_scratch;
+ list_add_tail(&nvec->tx->node, &nvec->tx_data);
+ } else {
+ nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
+ node);
+ nvec->tx->pos = 0;
}
+ spin_unlock(&nvec->tx_lock);
+
+ dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
+ (uint)nvec->tx->size, nvec->tx->data[1]);
}
-static irqreturn_t i2c_interrupt(int irq, void *dev)
+/**
+ * nvec_interrupt - Interrupt handler
+ * @irq: The IRQ
+ * @dev: The nvec device
+ *
+ * Interrupt handler that fills our RX buffers and empties our TX
+ * buffers. This uses a finite state machine with ridiculous amounts
+ * of error checking, in order to be fairly reliable.
+ */
+static irqreturn_t nvec_interrupt(int irq, void *dev)
{
unsigned long status;
- unsigned long received;
- unsigned char to_send;
- struct nvec_msg *msg;
- struct nvec_chip *nvec = (struct nvec_chip *)dev;
- unsigned char *i2c_regs = nvec->i2c_regs;
+ unsigned int received = 0;
+ unsigned char to_send = 0xff;
+ const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
+ struct nvec_chip *nvec = dev;
+ unsigned int state = nvec->state;
- status = readl(i2c_regs + I2C_SL_STATUS);
+ status = readl(nvec->base + I2C_SL_STATUS);
- if(!(status & I2C_SL_IRQ))
- {
- dev_warn(nvec->dev, "nvec Spurious IRQ\n");
- //Yup, handled. ahum.
- goto handled;
+ /* Filter out some errors */
+ if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
+ dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
+ return IRQ_HANDLED;
}
- if(status & END_TRANS && !(status & RCVD))
- {
- //Reenable IRQ only when even has been sent
- //printk("Write sequence ended !\n");
- //parse_msg(nvec);
- nvec->state = NVEC_WAIT;
- if(nvec->rx->size > 1)
- {
- list_add_tail(&nvec->rx->node, &nvec->rx_data);
- schedule_work(&nvec->rx_work);
+ if ((status & I2C_SL_IRQ) == 0) {
+ dev_err(nvec->dev, "Spurious IRQ\n");
+ return IRQ_HANDLED;
+ }
+
+ /* The EC did not request a read, so it send us something, read it */
+ if ((status & RNW) == 0) {
+ received = readl(nvec->base + I2C_SL_RCVD);
+ if (status & RCVD)
+ writel(0, nvec->base + I2C_SL_RCVD);
+ }
+
+ if (status == (I2C_SL_IRQ | RCVD))
+ nvec->state = 0;
+
+ switch (nvec->state) {
+ case 0: /* Verify that its a transfer start, the rest later */
+ if (status != (I2C_SL_IRQ | RCVD))
+ nvec_invalid_flags(nvec, status, false);
+ break;
+ case 1: /* command byte */
+ if (status != I2C_SL_IRQ) {
+ nvec_invalid_flags(nvec, status, true);
} else {
- kfree(nvec->rx->data);
- kfree(nvec->rx);
+ nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
+ /* Should not happen in a normal world */
+ if (unlikely(nvec->rx == NULL)) {
+ nvec->state = 0;
+ break;
+ }
+ nvec->rx->data[0] = received;
+ nvec->rx->pos = 1;
+ nvec->state = 2;
}
- return IRQ_HANDLED;
- } else if(status & RNW)
- {
- // Work around for AP20 New Slave Hw Bug. Give 1us extra.
- // nvec/smbus/nvec_i2c_transport.c in NV`s crap for reference
- if(status & RCVD)
- udelay(3);
-
- if(status & RCVD)
- {
- nvec->state = NVEC_WRITE;
- //Master wants something from us. New communication
-// dev_dbg(nvec->dev, "New read comm!\n");
+ break;
+ case 2: /* first byte after command */
+ if (status == (I2C_SL_IRQ | RNW | RCVD)) {
+ udelay(33);
+ if (nvec->rx->data[0] != 0x01) {
+ dev_err(nvec->dev,
+ "Read without prior read command\n");
+ nvec->state = 0;
+ break;
+ }
+ nvec_msg_free(nvec, nvec->rx);
+ nvec->state = 3;
+ nvec_tx_set(nvec);
+ BUG_ON(nvec->tx->size < 1);
+ to_send = nvec->tx->data[0];
+ nvec->tx->pos = 1;
+ } else if (status == (I2C_SL_IRQ)) {
+ BUG_ON(nvec->rx == NULL);
+ nvec->rx->data[1] = received;
+ nvec->rx->pos = 2;
+ nvec->state = 4;
} else {
- //Master wants something from us from a communication we've already started
-// dev_dbg(nvec->dev, "Read comm cont !\n");
+ nvec_invalid_flags(nvec, status, true);
}
- //if(msg_pos<msg_size) {
- if(list_empty(&nvec->tx_data))
- {
- dev_err(nvec->dev, "nvec empty tx - sending no-op\n");
- to_send = 0x8a;
- nvec_write_async(nvec, "\x07\x02", 2);
-// to_send = 0x01;
+ break;
+ case 3: /* EC does a block read, we transmit data */
+ if (status & END_TRANS) {
+ nvec_tx_completed(nvec);
+ } else if ((status & RNW) == 0 || (status & RCVD)) {
+ nvec_invalid_flags(nvec, status, true);
+ } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
+ to_send = nvec->tx->data[nvec->tx->pos++];
} else {
- msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
- if(msg->pos < msg->size) {
- to_send = msg->data[msg->pos];
- msg->pos++;
- } else {
- dev_err(nvec->dev, "nvec crap! %d\n", msg->size);
- to_send = 0x01;
- }
-
- if(msg->pos >= msg->size)
- {
- list_del_init(&msg->node);
- kfree(msg->data);
- kfree(msg);
- schedule_work(&nvec->tx_work);
- nvec->state = NVEC_WAIT;
- }
+ dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
+ nvec->tx,
+ (uint) (nvec->tx ? nvec->tx->pos : 0),
+ (uint) (nvec->tx ? nvec->tx->size : 0));
+ nvec->state = 0;
}
- writel(to_send, i2c_regs + I2C_SL_RCVD);
-
- gpio_set_value(nvec->gpio, 1);
+ break;
+ case 4: /* EC does some write, we read the data */
+ if ((status & (END_TRANS | RNW)) == END_TRANS)
+ nvec_rx_completed(nvec);
+ else if (status & (RNW | RCVD))
+ nvec_invalid_flags(nvec, status, true);
+ else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
+ nvec->rx->data[nvec->rx->pos++] = received;
+ else
+ dev_err(nvec->dev,
+ "RX buffer overflow on %p: "
+ "Trying to write byte %u of %u\n",
+ nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
+ break;
+ default:
+ nvec->state = 0;
+ }
- dev_dbg(nvec->dev, "nvec sent %x\n", to_send);
+ /* If we are told that a new transfer starts, verify it */
+ if ((status & (RCVD | RNW)) == RCVD) {
+ if (received != nvec->i2c_addr)
+ dev_err(nvec->dev,
+ "received address 0x%02x, expected 0x%02x\n",
+ received, nvec->i2c_addr);
+ nvec->state = 1;
+ }
- goto handled;
- } else {
- received = readl(i2c_regs + I2C_SL_RCVD);
- //Workaround?
- if(status & RCVD) {
- writel(0, i2c_regs + I2C_SL_RCVD);
- goto handled;
- }
+ /* Send data if requested, but not on end of transmission */
+ if ((status & (RNW | END_TRANS)) == RNW)
+ writel(to_send, nvec->base + I2C_SL_RCVD);
+
+ /* If we have send the first byte */
+ if (status == (I2C_SL_IRQ | RNW | RCVD))
+ nvec_gpio_set_value(nvec, 1);
+
+ dev_dbg(nvec->dev,
+ "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
+ (status & RNW) == 0 ? "received" : "R=",
+ received,
+ (status & (RNW | END_TRANS)) ? "sent" : "S=",
+ to_send,
+ state,
+ status & END_TRANS ? " END_TRANS" : "",
+ status & RCVD ? " RCVD" : "",
+ status & RNW ? " RNW" : "");
+
+
+ /*
+ * TODO: A correct fix needs to be found for this.
+ *
+ * We experience less incomplete messages with this delay than without
+ * it, but we don't know why. Help is appreciated.
+ */
+ udelay(100);
- if (nvec->state == NVEC_WAIT)
- {
- nvec->state = NVEC_READ;
- msg = kzalloc(sizeof(struct nvec_msg), GFP_NOWAIT);
- msg->data = kzalloc(32, GFP_NOWAIT);
- INIT_LIST_HEAD(&msg->node);
- nvec->rx = msg;
- } else
- msg = nvec->rx;
-
- BUG_ON(msg->pos > 32);
-
- msg->data[msg->pos] = received;
- msg->pos++;
- msg->size = msg->pos;
- dev_dbg(nvec->dev, "Got %02lx from Master (pos: %d)!\n", received, msg->pos);
- }
-handled:
return IRQ_HANDLED;
}
-static int __devinit nvec_add_subdev(struct nvec_chip *nvec, struct nvec_subdev *subdev)
+static void tegra_init_i2c_slave(struct nvec_chip *nvec)
{
- struct platform_device *pdev;
+ u32 val;
- pdev = platform_device_alloc(subdev->name, subdev->id);
- pdev->dev.parent = nvec->dev;
- pdev->dev.platform_data = subdev->platform_data;
+ clk_enable(nvec->i2c_clk);
- return platform_device_add(pdev);
-}
+ tegra_periph_reset_assert(nvec->i2c_clk);
+ udelay(2);
+ tegra_periph_reset_deassert(nvec->i2c_clk);
-static void tegra_init_i2c_slave(struct nvec_platform_data *pdata, unsigned char *i2c_regs,
- struct clk *i2c_clk)
-{
- u32 val;
+ val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
+ (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
+ writel(val, nvec->base + I2C_CNFG);
- clk_enable(i2c_clk);
- tegra_periph_reset_assert(i2c_clk);
- udelay(2);
- tegra_periph_reset_deassert(i2c_clk);
+ clk_set_rate(nvec->i2c_clk, 8 * 80000);
- writel(pdata->i2c_addr>>1, i2c_regs + I2C_SL_ADDR1);
- writel(0, i2c_regs + I2C_SL_ADDR2);
+ writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
+ writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
- writel(0x1E, i2c_regs + I2C_SL_DELAY_COUNT);
- val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
- (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
- writel(val, i2c_regs + I2C_CNFG);
- writel(I2C_SL_NEWL, i2c_regs + I2C_SL_CNFG);
+ writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
+ writel(0, nvec->base + I2C_SL_ADDR2);
+
+ enable_irq(nvec->irq);
- clk_disable(i2c_clk);
+ clk_disable(nvec->i2c_clk);
+}
+
+static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
+{
+ disable_irq(nvec->irq);
+ writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
+ clk_disable(nvec->i2c_clk);
}
static void nvec_power_off(void)
@@ -299,86 +711,96 @@ static void nvec_power_off(void)
static int __devinit tegra_nvec_probe(struct platform_device *pdev)
{
- int err, i, ret;
+ int err, ret;
struct clk *i2c_clk;
struct nvec_platform_data *pdata = pdev->dev.platform_data;
struct nvec_chip *nvec;
struct nvec_msg *msg;
- unsigned char *i2c_regs;
+ struct resource *res;
+ struct resource *iomem;
+ void __iomem *base;
nvec = kzalloc(sizeof(struct nvec_chip), GFP_KERNEL);
- if(nvec == NULL) {
+ if (nvec == NULL) {
dev_err(&pdev->dev, "failed to reserve memory\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, nvec);
nvec->dev = &pdev->dev;
nvec->gpio = pdata->gpio;
- nvec->irq = pdata->irq;
+ nvec->i2c_addr = pdata->i2c_addr;
-/*
- i2c_clk=clk_get_sys(NULL, "i2c");
- if(IS_ERR_OR_NULL(i2c_clk))
- printk(KERN_ERR"No such clock tegra-i2c.2\n");
- else
- clk_enable(i2c_clk);
-*/
- i2c_regs = ioremap(pdata->base, pdata->size);
- if(!i2c_regs) {
- dev_err(nvec->dev, "failed to ioremap registers\n");
- goto failed;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
}
- nvec->i2c_regs = i2c_regs;
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
- i2c_clk = clk_get_sys(pdata->clock, NULL);
- if(IS_ERR_OR_NULL(i2c_clk)) {
- dev_err(nvec->dev, "failed to get clock tegra-i2c.2\n");
- goto failed;
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Can't ioremap I2C region\n");
+ return -ENOMEM;
}
- tegra_init_i2c_slave(pdata, i2c_regs, i2c_clk);
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
- err = request_irq(nvec->irq, i2c_interrupt, IRQF_DISABLED, "nvec", nvec);
- if(err) {
- dev_err(nvec->dev, "couldn't request irq");
- goto failed;
+ i2c_clk = clk_get_sys("tegra-i2c.2", NULL);
+ if (IS_ERR(i2c_clk)) {
+ dev_err(nvec->dev, "failed to get controller clock\n");
+ goto err_iounmap;
}
- clk_enable(i2c_clk);
- clk_set_rate(i2c_clk, 8*80000);
+ nvec->base = base;
+ nvec->irq = res->start;
+ nvec->i2c_clk = i2c_clk;
+ nvec->rx = &nvec->msg_pool[0];
/* Set the gpio to low when we've got something to say */
err = gpio_request(nvec->gpio, "nvec gpio");
- if(err < 0)
+ if (err < 0)
dev_err(nvec->dev, "couldn't request gpio\n");
- tegra_gpio_enable(nvec->gpio);
- gpio_direction_output(nvec->gpio, 1);
- gpio_set_value(nvec->gpio, 1);
-
ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
init_completion(&nvec->sync_write);
- sema_init(&nvec->sync_write_mutex, 1);
- INIT_LIST_HEAD(&nvec->tx_data);
+ init_completion(&nvec->ec_transfer);
+ mutex_init(&nvec->sync_write_mutex);
+ spin_lock_init(&nvec->tx_lock);
+ spin_lock_init(&nvec->rx_lock);
INIT_LIST_HEAD(&nvec->rx_data);
+ INIT_LIST_HEAD(&nvec->tx_data);
INIT_WORK(&nvec->rx_work, nvec_dispatch);
INIT_WORK(&nvec->tx_work, nvec_request_master);
+ nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
- /* enable event reporting */
- nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
- sizeof(EC_ENABLE_EVENT_REPORTING));
+ err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
+ if (err) {
+ dev_err(nvec->dev, "couldn't request irq\n");
+ goto failed;
+ }
+ disable_irq(nvec->irq);
- nvec_kbd_init(nvec);
-#ifdef CONFIG_SERIO_NVEC_PS2
- nvec_ps2(nvec);
-#endif
+ tegra_init_i2c_slave(nvec);
- /* setup subdevs */
- for (i = 0; i < pdata->num_subdevs; i++) {
- ret = nvec_add_subdev(nvec, &pdata->subdevs[i]);
- }
+ clk_enable(i2c_clk);
+
+ gpio_direction_output(nvec->gpio, 1);
+ gpio_set_value(nvec->gpio, 1);
+
+ /* enable event reporting */
+ nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
+ sizeof(EC_ENABLE_EVENT_REPORTING));
nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
@@ -390,14 +812,20 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
sizeof(EC_GET_FIRMWARE_VERSION));
- dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
+ if (msg) {
+ dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
- kfree(msg->data);
- kfree(msg);
+ nvec_msg_free(nvec, msg);
+ }
+
+ ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+ ARRAY_SIZE(nvec_devices), base, 0);
+ if (ret)
+ dev_err(nvec->dev, "error adding subdevices\n");
/* unmute speakers? */
- nvec_write_async(nvec, "\x0d\x10\x59\x94", 4);
+ nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
/* enable lid switch event */
nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
@@ -407,6 +835,8 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
return 0;
+err_iounmap:
+ iounmap(base);
failed:
kfree(nvec);
return -ENOMEM;
@@ -414,7 +844,16 @@ failed:
static int __devexit tegra_nvec_remove(struct platform_device *pdev)
{
- // TODO: unregister
+ struct nvec_chip *nvec = platform_get_drvdata(pdev);
+
+ nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
+ mfd_remove_devices(nvec->dev);
+ free_irq(nvec->irq, &nvec_interrupt);
+ iounmap(nvec->base);
+ gpio_free(nvec->gpio);
+ destroy_workqueue(nvec->wq);
+ kfree(nvec);
+
return 0;
}
@@ -423,19 +862,27 @@ static int __devexit tegra_nvec_remove(struct platform_device *pdev)
static int tegra_nvec_suspend(struct platform_device *pdev, pm_message_t state)
{
struct nvec_chip *nvec = platform_get_drvdata(pdev);
+ struct nvec_msg *msg;
dev_dbg(nvec->dev, "suspending\n");
- nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
- nvec_write_async(nvec, "\x04\x02", 2);
+
+ /* keep these sync or you'll break suspend */
+ msg = nvec_write_sync(nvec, EC_DISABLE_EVENT_REPORTING, 3);
+ nvec_msg_free(nvec, msg);
+ msg = nvec_write_sync(nvec, "\x04\x02", 2);
+ nvec_msg_free(nvec, msg);
+
+ nvec_disable_i2c_slave(nvec);
return 0;
}
-static int tegra_nvec_resume(struct platform_device *pdev) {
-
+static int tegra_nvec_resume(struct platform_device *pdev)
+{
struct nvec_chip *nvec = platform_get_drvdata(pdev);
dev_dbg(nvec->dev, "resuming\n");
+ tegra_init_i2c_slave(nvec);
nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
return 0;
@@ -446,13 +893,12 @@ static int tegra_nvec_resume(struct platform_device *pdev) {
#define tegra_nvec_resume NULL
#endif
-static struct platform_driver nvec_device_driver =
-{
- .probe = tegra_nvec_probe,
- .remove = __devexit_p(tegra_nvec_remove),
+static struct platform_driver nvec_device_driver = {
+ .probe = tegra_nvec_probe,
+ .remove = __devexit_p(tegra_nvec_remove),
.suspend = tegra_nvec_suspend,
- .resume = tegra_nvec_resume,
- .driver = {
+ .resume = tegra_nvec_resume,
+ .driver = {
.name = "nvec",
.owner = THIS_MODULE,
}
@@ -464,4 +910,8 @@ static int __init tegra_nvec_init(void)
}
module_init(tegra_nvec_init);
+
MODULE_ALIAS("platform:nvec");
+MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
+MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index a2d82dce62d..a4c17b0e10c 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -1,110 +1,203 @@
+/*
+ * NVEC: NVIDIA compliant embedded controller interface
+ *
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
+ *
+ * Authors: Pierre-Hugues Husson <phhusson@free.fr>
+ * Ilya Petrov <ilya.muromec@gmail.com>
+ * Marc Dietrich <marvin24@gmx.de>
+ * Julian Andres Klode <jak@jak-linux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
#ifndef __LINUX_MFD_NVEC
#define __LINUX_MFD_NVEC
-#include <linux/semaphore.h>
-
-typedef enum {
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+/* NVEC_POOL_SIZE - Size of the pool in &struct nvec_msg */
+#define NVEC_POOL_SIZE 64
+
+/*
+ * NVEC_MSG_SIZE - Maximum size of the data field of &struct nvec_msg.
+ *
+ * A message must store up to a SMBus block operation which consists of
+ * one command byte, one count byte, and up to 32 payload bytes = 34
+ * byte.
+ */
+#define NVEC_MSG_SIZE 34
+
+/**
+ * enum nvec_event_size - The size of an event message
+ * @NVEC_2BYTES: The message has one command byte and one data byte
+ * @NVEC_3BYTES: The message has one command byte and two data bytes
+ * @NVEC_VAR_SIZE: The message has one command byte, one count byte, and as
+ * up to as many bytes as the number in the count byte. The
+ * maximum is 32
+ *
+ * Events can be fixed or variable sized. This is useless on other message
+ * types, which are always variable sized.
+ */
+enum nvec_event_size {
NVEC_2BYTES,
NVEC_3BYTES,
- NVEC_VAR_SIZE
-} nvec_size;
-
-typedef enum {
- NOT_REALLY,
- YES,
- NOT_AT_ALL,
-} how_care;
+ NVEC_VAR_SIZE,
+};
-typedef enum {
- NVEC_SYS=1,
+/**
+ * enum nvec_msg_type - The type of a message
+ * @NVEC_SYS: A system request/response
+ * @NVEC_BAT: A battery request/response
+ * @NVEC_KBD: A keyboard request/response
+ * @NVEC_PS2: A mouse request/response
+ * @NVEC_CNTL: A EC control request/response
+ * @NVEC_KB_EVT: An event from the keyboard
+ * @NVEC_PS2_EVT: An event from the mouse
+ *
+ * Events can be fixed or variable sized. This is useless on other message
+ * types, which are always variable sized.
+ */
+enum nvec_msg_type {
+ NVEC_SYS = 1,
NVEC_BAT,
NVEC_KBD = 5,
NVEC_PS2,
NVEC_CNTL,
NVEC_KB_EVT = 0x80,
- NVEC_PS2_EVT
-} nvec_event;
-
-typedef enum {
- NVEC_WAIT,
- NVEC_READ,
- NVEC_WRITE
-} nvec_state;
+ NVEC_PS2_EVT,
+};
+/**
+ * struct nvec_msg - A buffer for a single message
+ * @node: Messages are part of various lists in a &struct nvec_chip
+ * @data: The data of the message
+ * @size: For TX messages, the number of bytes used in @data
+ * @pos: For RX messages, the current position to write to. For TX messages,
+ * the position to read from.
+ * @used: Used for the message pool to mark a message as free/allocated.
+ *
+ * This structure is used to hold outgoing and incoming messages. Outgoing
+ * messages have a different format than incoming messages, and that is not
+ * documented yet.
+ */
struct nvec_msg {
- unsigned char *data;
+ struct list_head node;
+ unsigned char data[NVEC_MSG_SIZE];
unsigned short size;
unsigned short pos;
- struct list_head node;
+ atomic_t used;
};
+/**
+ * struct nvec_subdev - A subdevice of nvec, such as nvec_kbd
+ * @name: The name of the sub device
+ * @platform_data: Platform data
+ * @id: Identifier of the sub device
+ */
struct nvec_subdev {
const char *name;
void *platform_data;
int id;
};
+/**
+ * struct nvec_platform_data - platform data for a tegra slave controller
+ * @i2c_addr: number of i2c slave adapter the ec is connected to
+ * @gpio: gpio number for the ec request line
+ *
+ * Platform data, to be used in board definitions. For an example, take a
+ * look at the paz00 board in arch/arm/mach-tegra/board-paz00.c
+ */
struct nvec_platform_data {
- int num_subdevs;
int i2c_addr;
int gpio;
- int irq;
- int base;
- int size;
- char clock[16];
- struct nvec_subdev *subdevs;
};
+/**
+ * struct nvec_chip - A single connection to an NVIDIA Embedded controller
+ * @dev: The device
+ * @gpio: The same as for &struct nvec_platform_data
+ * @irq: The IRQ of the I2C device
+ * @i2c_addr: The address of the I2C slave
+ * @base: The base of the memory mapped region of the I2C device
+ * @clk: The clock of the I2C device
+ * @notifier_list: Notifiers to be called on received messages, see
+ * nvec_register_notifier()
+ * @rx_data: Received messages that have to be processed
+ * @tx_data: Messages waiting to be sent to the controller
+ * @nvec_status_notifier: Internal notifier (see nvec_status_notifier())
+ * @rx_work: A work structure for the RX worker nvec_dispatch()
+ * @tx_work: A work structure for the TX worker nvec_request_master()
+ * @wq: The work queue in which @rx_work and @tx_work are executed
+ * @rx: The message currently being retrieved or %NULL
+ * @msg_pool: A pool of messages for allocation
+ * @tx: The message currently being transferred
+ * @tx_scratch: Used for building pseudo messages
+ * @ec_transfer: A completion that will be completed once a message has been
+ * received (see nvec_rx_completed())
+ * @tx_lock: Spinlock for modifications on @tx_data
+ * @rx_lock: Spinlock for modifications on @rx_data
+ * @sync_write_mutex: A mutex for nvec_write_sync()
+ * @sync_write: A completion to signal that a synchronous message is complete
+ * @sync_write_pending: The first two bytes of the request (type and subtype)
+ * @last_sync_msg: The last synchronous message.
+ * @state: State of our finite state machine used in nvec_interrupt()
+ */
struct nvec_chip {
struct device *dev;
int gpio;
int irq;
- unsigned char *i2c_regs;
- nvec_state state;
+ int i2c_addr;
+ void __iomem *base;
+ struct clk *i2c_clk;
struct atomic_notifier_head notifier_list;
struct list_head rx_data, tx_data;
struct notifier_block nvec_status_notifier;
struct work_struct rx_work, tx_work;
- struct nvec_msg *rx, *tx;
+ struct workqueue_struct *wq;
+ struct nvec_msg msg_pool[NVEC_POOL_SIZE];
+ struct nvec_msg *rx;
+
+ struct nvec_msg *tx;
+ struct nvec_msg tx_scratch;
+ struct completion ec_transfer;
+
+ spinlock_t tx_lock, rx_lock;
-/* sync write stuff */
- struct semaphore sync_write_mutex;
+ /* sync write stuff */
+ struct mutex sync_write_mutex;
struct completion sync_write;
u16 sync_write_pending;
struct nvec_msg *last_sync_msg;
+
+ int state;
};
-extern void nvec_write_async(struct nvec_chip *nvec, unsigned char *data, short size);
+extern int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
+ short size);
+
+extern struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size);
extern int nvec_register_notifier(struct nvec_chip *nvec,
- struct notifier_block *nb, unsigned int events);
+ struct notifier_block *nb,
+ unsigned int events);
extern int nvec_unregister_notifier(struct device *dev,
- struct notifier_block *nb, unsigned int events);
-
-const char *nvec_send_msg(unsigned char *src, unsigned char *dst_size, how_care care_resp, void (*rt_handler)(unsigned char *data));
-
-extern int nvec_ps2(struct nvec_chip *nvec);
-extern int nvec_kbd_init(struct nvec_chip *nvec);
-
-#define I2C_CNFG 0x00
-#define I2C_CNFG_PACKET_MODE_EN (1<<10)
-#define I2C_CNFG_NEW_MASTER_SFM (1<<11)
-#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
-
-#define I2C_SL_CNFG 0x20
-#define I2C_SL_NEWL (1<<2)
-#define I2C_SL_NACK (1<<1)
-#define I2C_SL_RESP (1<<0)
-#define I2C_SL_IRQ (1<<3)
-#define END_TRANS (1<<4)
-#define RCVD (1<<2)
-#define RNW (1<<1)
-
-#define I2C_SL_RCVD 0x24
-#define I2C_SL_STATUS 0x28
-#define I2C_SL_ADDR1 0x2c
-#define I2C_SL_ADDR2 0x30
-#define I2C_SL_DELAY_COUNT 0x3c
+ struct notifier_block *nb,
+ unsigned int events);
+
+extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
#endif
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index 9a9850725b5..a4ce5a740e2 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -1,42 +1,76 @@
+/*
+ * nvec_kbd: keyboard driver for a NVIDIA compliant embedded controller
+ *
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
+ *
+ * Authors: Pierre-Hugues Husson <phhusson@free.fr>
+ * Marc Dietrich <marvin24@gmx.de>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
+
#include "nvec-keytable.h"
#include "nvec.h"
-#define ACK_KBD_EVENT {'\x05','\xed','\x01'}
+#define ACK_KBD_EVENT {'\x05', '\xed', '\x01'}
+static const char led_on[3] = "\x05\xed\x07";
+static const char led_off[3] = "\x05\xed\x00";
static unsigned char keycodes[ARRAY_SIZE(code_tab_102us)
- + ARRAY_SIZE(extcode_tab_us102)];
+ + ARRAY_SIZE(extcode_tab_us102)];
struct nvec_keys {
struct input_dev *input;
struct notifier_block notifier;
struct nvec_chip *nvec;
+ bool caps_lock;
};
static struct nvec_keys keys_dev;
+static void nvec_kbd_toggle_led(void)
+{
+ keys_dev.caps_lock = !keys_dev.caps_lock;
+
+ if (keys_dev.caps_lock)
+ nvec_write_async(keys_dev.nvec, led_on, sizeof(led_on));
+ else
+ nvec_write_async(keys_dev.nvec, led_off, sizeof(led_off));
+}
+
static int nvec_keys_notifier(struct notifier_block *nb,
- unsigned long event_type, void *data)
+ unsigned long event_type, void *data)
{
int code, state;
unsigned char *msg = (unsigned char *)data;
if (event_type == NVEC_KB_EVT) {
- nvec_size _size = (msg[0] & (3 << 5)) >> 5;
+ int _size = (msg[0] & (3 << 5)) >> 5;
/* power on/off button */
- if(_size == NVEC_VAR_SIZE)
+ if (_size == NVEC_VAR_SIZE)
return NOTIFY_STOP;
- if(_size == NVEC_3BYTES)
+ if (_size == NVEC_3BYTES)
msg++;
code = msg[1] & 0x7f;
state = msg[1] & 0x80;
- input_report_key(keys_dev.input, code_tabs[_size][code], !state);
+ if (code_tabs[_size][code] == KEY_CAPSLOCK && state)
+ nvec_kbd_toggle_led();
+
+ input_report_key(keys_dev.input, code_tabs[_size][code],
+ !state);
input_sync(keys_dev.input);
return NOTIFY_STOP;
@@ -46,18 +80,18 @@ static int nvec_keys_notifier(struct notifier_block *nb,
}
static int nvec_kbd_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
+ unsigned int code, int value)
{
unsigned char buf[] = ACK_KBD_EVENT;
struct nvec_chip *nvec = keys_dev.nvec;
- if(type==EV_REP)
+ if (type == EV_REP)
return 0;
- if(type!=EV_LED)
+ if (type != EV_LED)
return -1;
- if(code!=LED_CAPSL)
+ if (code != LED_CAPSL)
return -1;
buf[2] = !!value;
@@ -66,22 +100,23 @@ static int nvec_kbd_event(struct input_dev *dev, unsigned int type,
return 0;
}
-int __init nvec_kbd_init(struct nvec_chip *nvec)
+static int __devinit nvec_kbd_probe(struct platform_device *pdev)
{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
int i, j, err;
struct input_dev *idev;
j = 0;
- for(i = 0; i < ARRAY_SIZE(code_tab_102us); ++i)
+ for (i = 0; i < ARRAY_SIZE(code_tab_102us); ++i)
keycodes[j++] = code_tab_102us[i];
- for(i = 0; i < ARRAY_SIZE(extcode_tab_us102); ++i)
- keycodes[j++]=extcode_tab_us102[i];
+ for (i = 0; i < ARRAY_SIZE(extcode_tab_us102); ++i)
+ keycodes[j++] = extcode_tab_us102[i];
idev = input_allocate_device();
- idev->name = "Tegra nvec keyboard";
- idev->phys = "i2c3_slave/nvec";
+ idev->name = "nvec keyboard";
+ idev->phys = "nvec";
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED);
idev->ledbit[0] = BIT_MASK(LED_CAPSL);
idev->event = nvec_kbd_event;
@@ -89,12 +124,12 @@ int __init nvec_kbd_init(struct nvec_chip *nvec)
idev->keycodesize = sizeof(unsigned char);
idev->keycodemax = ARRAY_SIZE(keycodes);
- for( i = 0; i < ARRAY_SIZE(keycodes); ++i)
+ for (i = 0; i < ARRAY_SIZE(keycodes); ++i)
set_bit(keycodes[i], idev->keybit);
clear_bit(0, idev->keybit);
err = input_register_device(idev);
- if(err)
+ if (err)
goto fail;
keys_dev.input = idev;
@@ -114,9 +149,31 @@ int __init nvec_kbd_init(struct nvec_chip *nvec)
or until we have a sync write */
mdelay(1000);
+ /* Disable caps lock LED */
+ nvec_write_async(nvec, led_off, sizeof(led_off));
+
return 0;
fail:
input_free_device(idev);
return err;
}
+
+static struct platform_driver nvec_kbd_driver = {
+ .probe = nvec_kbd_probe,
+ .driver = {
+ .name = "nvec-kbd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvec_kbd_init(void)
+{
+ return platform_driver_register(&nvec_kbd_driver);
+}
+
+module_init(nvec_kbd_init);
+
+MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_DESCRIPTION("NVEC keyboard driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/nvec/nvec_leds.c b/drivers/staging/nvec/nvec_leds.c
new file mode 100644
index 00000000000..f4cbcd62500
--- /dev/null
+++ b/drivers/staging/nvec/nvec_leds.c
@@ -0,0 +1,114 @@
+/*
+ * nvec_leds: LED driver for a NVIDIA compliant embedded controller
+ *
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
+ *
+ * Authors: Ilya Petrov <ilya.muromec@gmail.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include "nvec.h"
+
+#define to_nvec_led(led_cdev) \
+ container_of(led_cdev, struct nvec_led, cdev)
+
+#define NVEC_LED_REQ {'\x0d', '\x10', '\x45', '\x10', '\x00'}
+
+#define NVEC_LED_MAX 8
+
+struct nvec_led {
+ struct led_classdev cdev;
+ struct nvec_chip *nvec;
+};
+
+static void nvec_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct nvec_led *led = to_nvec_led(led_cdev);
+ unsigned char buf[] = NVEC_LED_REQ;
+ buf[4] = value;
+
+ nvec_write_async(led->nvec, buf, sizeof(buf));
+
+ led->cdev.brightness = value;
+
+}
+
+static int __devinit nvec_led_probe(struct platform_device *pdev)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+ struct nvec_led *led;
+ int ret = 0;
+
+ led = kzalloc(sizeof(*led), GFP_KERNEL);
+ if (led == NULL)
+ return -ENOMEM;
+
+ led->cdev.max_brightness = NVEC_LED_MAX;
+
+ led->cdev.brightness_set = nvec_led_brightness_set;
+ led->cdev.name = "nvec-led";
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led->nvec = nvec;
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret < 0)
+ goto err_led;
+
+ /* to expose the default value to userspace */
+ led->cdev.brightness = 0;
+
+ return 0;
+
+err_led:
+ kfree(led);
+ return ret;
+}
+
+static int __devexit nvec_led_remove(struct platform_device *pdev)
+{
+ struct nvec_led *led = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&led->cdev);
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver nvec_led_driver = {
+ .probe = nvec_led_probe,
+ .remove = __devexit_p(nvec_led_remove),
+ .driver = {
+ .name = "nvec-leds",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvec_led_init(void)
+{
+ return platform_driver_register(&nvec_led_driver);
+}
+
+module_init(nvec_led_init);
+
+static void __exit nvec_led_exit(void)
+{
+ platform_driver_unregister(&nvec_led_driver);
+}
+
+module_exit(nvec_led_exit);
+
+MODULE_AUTHOR("Ilya Petrov <ilya.muromec@gmail.com>");
+MODULE_DESCRIPTION("Tegra NVEC LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nvec-leds");
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index df164add837..dfa966f6189 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -1,3 +1,17 @@
+/*
+ * nvec_power: power supply driver for a NVIDIA compliant embedded controller
+ *
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
+ *
+ * Authors: Ilya Petrov <ilya.muromec@gmail.com>
+ * Marc Dietrich <marvin24@gmx.de>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -5,10 +19,10 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
+
#include "nvec.h"
-struct nvec_power
-{
+struct nvec_power {
struct notifier_block notifier;
struct delayed_work poller;
struct nvec_chip *nvec;
@@ -58,7 +72,8 @@ struct bat_response {
u8 length;
u8 sub_type;
u8 status;
- union { /* payload */
+ /* payload */
+ union {
char plc[30];
u16 plu;
s16 pls;
@@ -69,18 +84,17 @@ static struct power_supply nvec_bat_psy;
static struct power_supply nvec_psy;
static int nvec_power_notifier(struct notifier_block *nb,
- unsigned long event_type, void *data)
+ unsigned long event_type, void *data)
{
- struct nvec_power *power = container_of(nb, struct nvec_power, notifier);
+ struct nvec_power *power =
+ container_of(nb, struct nvec_power, notifier);
struct bat_response *res = (struct bat_response *)data;
if (event_type != NVEC_SYS)
return NOTIFY_DONE;
- if(res->sub_type == 0)
- {
- if (power->on != res->plu)
- {
+ if (res->sub_type == 0) {
+ if (power->on != res->plu) {
power->on = res->plu;
power_supply_changed(&nvec_psy);
}
@@ -89,8 +103,7 @@ static int nvec_power_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static const int bat_init[] =
-{
+static const int bat_init[] = {
LAST_FULL_CHARGE_CAPACITY, DESIGN_CAPACITY, CRITICAL_CAPACITY,
MANUFACTURER, MODEL, TYPE,
};
@@ -100,116 +113,115 @@ static void get_bat_mfg_data(struct nvec_power *power)
int i;
char buf[] = { '\x02', '\x00' };
- for (i = 0; i < ARRAY_SIZE(bat_init); i++)
- {
+ for (i = 0; i < ARRAY_SIZE(bat_init); i++) {
buf[1] = bat_init[i];
nvec_write_async(power->nvec, buf, 2);
}
}
static int nvec_power_bat_notifier(struct notifier_block *nb,
- unsigned long event_type, void *data)
+ unsigned long event_type, void *data)
{
- struct nvec_power *power = container_of(nb, struct nvec_power, notifier);
+ struct nvec_power *power =
+ container_of(nb, struct nvec_power, notifier);
struct bat_response *res = (struct bat_response *)data;
int status_changed = 0;
if (event_type != NVEC_BAT)
return NOTIFY_DONE;
- switch(res->sub_type)
- {
- case SLOT_STATUS:
- if (res->plc[0] & 1)
- {
- if (power->bat_present == 0)
- {
- status_changed = 1;
- get_bat_mfg_data(power);
- }
-
- power->bat_present = 1;
-
- switch ((res->plc[0] >> 1) & 3)
- {
- case 0:
- power->bat_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- break;
- case 1:
- power->bat_status = POWER_SUPPLY_STATUS_CHARGING;
- break;
- case 2:
- power->bat_status = POWER_SUPPLY_STATUS_DISCHARGING;
- break;
- default:
- power->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
- }
- } else {
- if (power->bat_present == 1)
- status_changed = 1;
-
- power->bat_present = 0;
+ switch (res->sub_type) {
+ case SLOT_STATUS:
+ if (res->plc[0] & 1) {
+ if (power->bat_present == 0) {
+ status_changed = 1;
+ get_bat_mfg_data(power);
+ }
+
+ power->bat_present = 1;
+
+ switch ((res->plc[0] >> 1) & 3) {
+ case 0:
+ power->bat_status =
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case 1:
+ power->bat_status =
+ POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 2:
+ power->bat_status =
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ default:
power->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
}
- power->bat_cap = res->plc[1];
- if (status_changed)
- power_supply_changed(&nvec_bat_psy);
- break;
- case VOLTAGE:
- power->bat_voltage_now = res->plu * 1000;
- break;
- case TIME_REMAINING:
- power->time_remain = res->plu * 3600;
- break;
- case CURRENT:
- power->bat_current_now = res->pls * 1000;
- break;
- case AVERAGE_CURRENT:
- power->bat_current_avg = res->pls * 1000;
- break;
- case CAPACITY_REMAINING:
- power->capacity_remain = res->plu * 1000;
- break;
- case LAST_FULL_CHARGE_CAPACITY:
- power->charge_last_full = res->plu * 1000;
- break;
- case DESIGN_CAPACITY:
- power->charge_full_design = res->plu * 1000;
- break;
- case CRITICAL_CAPACITY:
- power->critical_capacity = res->plu * 1000;
- break;
- case TEMPERATURE:
- power->bat_temperature = res->plu - 2732;
- break;
- case MANUFACTURER:
- memcpy(power->bat_manu, &res->plc, res->length-2);
- power->bat_model[res->length-2] = '\0';
- break;
- case MODEL:
- memcpy(power->bat_model, &res->plc, res->length-2);
- power->bat_model[res->length-2] = '\0';
- break;
- case TYPE:
- memcpy(power->bat_type, &res->plc, res->length-2);
- power->bat_type[res->length-2] = '\0';
- /* this differs a little from the spec
- fill in more if you find some */
- if (!strncmp(power->bat_type, "Li", 30))
- power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_LION;
- else
- power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
- break;
- default:
- return NOTIFY_STOP;
+ } else {
+ if (power->bat_present == 1)
+ status_changed = 1;
+
+ power->bat_present = 0;
+ power->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+ power->bat_cap = res->plc[1];
+ if (status_changed)
+ power_supply_changed(&nvec_bat_psy);
+ break;
+ case VOLTAGE:
+ power->bat_voltage_now = res->plu * 1000;
+ break;
+ case TIME_REMAINING:
+ power->time_remain = res->plu * 3600;
+ break;
+ case CURRENT:
+ power->bat_current_now = res->pls * 1000;
+ break;
+ case AVERAGE_CURRENT:
+ power->bat_current_avg = res->pls * 1000;
+ break;
+ case CAPACITY_REMAINING:
+ power->capacity_remain = res->plu * 1000;
+ break;
+ case LAST_FULL_CHARGE_CAPACITY:
+ power->charge_last_full = res->plu * 1000;
+ break;
+ case DESIGN_CAPACITY:
+ power->charge_full_design = res->plu * 1000;
+ break;
+ case CRITICAL_CAPACITY:
+ power->critical_capacity = res->plu * 1000;
+ break;
+ case TEMPERATURE:
+ power->bat_temperature = res->plu - 2732;
+ break;
+ case MANUFACTURER:
+ memcpy(power->bat_manu, &res->plc, res->length - 2);
+ power->bat_model[res->length - 2] = '\0';
+ break;
+ case MODEL:
+ memcpy(power->bat_model, &res->plc, res->length - 2);
+ power->bat_model[res->length - 2] = '\0';
+ break;
+ case TYPE:
+ memcpy(power->bat_type, &res->plc, res->length - 2);
+ power->bat_type[res->length - 2] = '\0';
+ /* this differs a little from the spec
+ fill in more if you find some */
+ if (!strncmp(power->bat_type, "Li", 30))
+ power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_LION;
+ else
+ power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ break;
+ default:
+ return NOTIFY_STOP;
}
return NOTIFY_STOP;
}
static int nvec_power_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
struct nvec_power *power = dev_get_drvdata(psy->dev->parent);
switch (psp) {
@@ -223,61 +235,60 @@ static int nvec_power_get_property(struct power_supply *psy,
}
static int nvec_battery_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
struct nvec_power *power = dev_get_drvdata(psy->dev->parent);
- switch(psp)
- {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = power->bat_status;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = power->bat_cap;
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = power->bat_present;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = power->bat_voltage_now;
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = power->bat_current_now;
- break;
- case POWER_SUPPLY_PROP_CURRENT_AVG:
- val->intval = power->bat_current_avg;
- break;
- case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
- val->intval = power->time_remain;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = power->charge_full_design;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL:
- val->intval = power->charge_last_full;
- break;
- case POWER_SUPPLY_PROP_CHARGE_EMPTY:
- val->intval = power->critical_capacity;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW:
- val->intval = power->capacity_remain;
- break;
- case POWER_SUPPLY_PROP_TEMP:
- val->intval = power->bat_temperature;
- break;
- case POWER_SUPPLY_PROP_MANUFACTURER:
- val->strval = power->bat_manu;
- break;
- case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = power->bat_model;
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = power->bat_type_enum;
- break;
- default:
- return -EINVAL;
- }
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = power->bat_status;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = power->bat_cap;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = power->bat_present;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = power->bat_voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = power->bat_current_now;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = power->bat_current_avg;
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ val->intval = power->time_remain;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = power->charge_full_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = power->charge_last_full;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY:
+ val->intval = power->critical_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = power->capacity_remain;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = power->bat_temperature;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = power->bat_manu;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = power->bat_model;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = power->bat_type_enum;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -310,11 +321,11 @@ static char *nvec_power_supplied_to[] = {
};
static struct power_supply nvec_bat_psy = {
- .name = "battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = nvec_battery_props,
- .num_properties = ARRAY_SIZE(nvec_battery_props),
- .get_property = nvec_battery_get_property,
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = nvec_battery_props,
+ .num_properties = ARRAY_SIZE(nvec_battery_props),
+ .get_property = nvec_battery_get_property,
};
static struct power_supply nvec_psy = {
@@ -327,9 +338,8 @@ static struct power_supply nvec_psy = {
.get_property = nvec_power_get_property,
};
-static int counter = 0;
-static int const bat_iter[] =
-{
+static int counter;
+static int const bat_iter[] = {
SLOT_STATUS, VOLTAGE, CURRENT, CAPACITY_REMAINING,
#ifdef EC_FULL_DIAG
AVERAGE_CURRENT, TEMPERATURE, TIME_REMAINING,
@@ -340,7 +350,7 @@ static void nvec_power_poll(struct work_struct *work)
{
char buf[] = { '\x01', '\x00' };
struct nvec_power *power = container_of(work, struct nvec_power,
- poller.work);
+ poller.work);
if (counter >= ARRAY_SIZE(bat_iter))
counter = 0;
@@ -351,19 +361,18 @@ static void nvec_power_poll(struct work_struct *work)
/* select a battery request function via round robin
doing it all at once seems to overload the power supply */
- buf[0] = '\x02'; /* battery */
- buf[1] = bat_iter[counter++];
+ buf[0] = '\x02'; /* battery */
+ buf[1] = bat_iter[counter++];
nvec_write_async(power->nvec, buf, 2);
-// printk("%02x %02x\n", buf[0], buf[1]);
-
schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(5000));
};
static int __devinit nvec_power_probe(struct platform_device *pdev)
{
struct power_supply *psy;
- struct nvec_power *power = kzalloc(sizeof(struct nvec_power), GFP_NOWAIT);
+ struct nvec_power *power =
+ kzalloc(sizeof(struct nvec_power), GFP_NOWAIT);
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
dev_set_drvdata(&pdev->dev, power);
@@ -381,7 +390,7 @@ static int __devinit nvec_power_probe(struct platform_device *pdev)
case BAT:
psy = &nvec_bat_psy;
- power->notifier.notifier_call = nvec_power_bat_notifier;
+ power->notifier.notifier_call = nvec_power_bat_notifier;
break;
default:
kfree(power);
@@ -398,14 +407,13 @@ static int __devinit nvec_power_probe(struct platform_device *pdev)
static struct platform_driver nvec_power_driver = {
.probe = nvec_power_probe,
-// .remove = __devexit_p(nvec_power_remove),
.driver = {
- .name = "nvec-power",
- .owner = THIS_MODULE,
- }
+ .name = "nvec-power",
+ .owner = THIS_MODULE,
+ }
};
-static int __init nvec_power_init(void)
+static int __init nvec_power_init(void)
{
return platform_driver_register(&nvec_power_driver);
}
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 6bb9430f352..742f5ccfe76 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -1,14 +1,33 @@
+/*
+ * nvec_ps2: mouse driver for a NVIDIA compliant embedded controller
+ *
+ * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
+ *
+ * Authors: Pierre-Hugues Husson <phhusson@free.fr>
+ * Ilya Petrov <ilya.muromec@gmail.com>
+ * Marc Dietrich <marvin24@gmx.de>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/serio.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
+
#include "nvec.h"
-#define START_STREAMING {'\x06','\x03','\x01'}
-#define STOP_STREAMING {'\x06','\x04'}
-#define SEND_COMMAND {'\x06','\x01','\xf4','\x01'}
+#define START_STREAMING {'\x06', '\x03', '\x04'}
+#define STOP_STREAMING {'\x06', '\x04'}
+#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
-struct nvec_ps2
-{
+static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
+
+struct nvec_ps2 {
struct serio *ser_dev;
struct notifier_block notifier;
struct nvec_chip *nvec;
@@ -19,8 +38,7 @@ static struct nvec_ps2 ps2_dev;
static int ps2_startstreaming(struct serio *ser_dev)
{
unsigned char buf[] = START_STREAMING;
- nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
- return 0;
+ return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static void ps2_stopstreaming(struct serio *ser_dev)
@@ -29,12 +47,6 @@ static void ps2_stopstreaming(struct serio *ser_dev)
nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
-/* is this really needed?
-static void nvec_resp_handler(unsigned char *data) {
- serio_interrupt(ser_dev, data[4], 0);
-}
-*/
-
static int ps2_sendcommand(struct serio *ser_dev, unsigned char cmd)
{
unsigned char buf[] = SEND_COMMAND;
@@ -42,52 +54,49 @@ static int ps2_sendcommand(struct serio *ser_dev, unsigned char cmd)
buf[2] = cmd & 0xff;
dev_dbg(&ser_dev->dev, "Sending ps2 cmd %02x\n", cmd);
- nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
-
- return 0;
+ return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static int nvec_ps2_notifier(struct notifier_block *nb,
- unsigned long event_type, void *data)
+ unsigned long event_type, void *data)
{
int i;
unsigned char *msg = (unsigned char *)data;
switch (event_type) {
- case NVEC_PS2_EVT:
- serio_interrupt(ps2_dev.ser_dev, msg[2], 0);
- return NOTIFY_STOP;
-
- case NVEC_PS2:
- if (msg[2] == 1)
- for(i = 0; i < (msg[1] - 2); i++)
- serio_interrupt(ps2_dev.ser_dev, msg[i+4], 0);
- else if (msg[1] != 2) /* !ack */
- {
- printk("nvec_ps2: unhandled mouse event ");
- for(i = 0; i <= (msg[1]+1); i++)
- printk("%02x ", msg[i]);
- printk(".\n");
- }
-
- return NOTIFY_STOP;
+ case NVEC_PS2_EVT:
+ for (i = 0; i < msg[1]; i++)
+ serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
+ return NOTIFY_STOP;
+
+ case NVEC_PS2:
+ if (msg[2] == 1)
+ for (i = 0; i < (msg[1] - 2); i++)
+ serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
+ else if (msg[1] != 2) { /* !ack */
+ print_hex_dump(KERN_WARNING, "unhandled mouse event: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ msg, msg[1] + 2, true);
+ }
+
+ return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
-
-int __init nvec_ps2(struct nvec_chip *nvec)
+static int __devinit nvec_mouse_probe(struct platform_device *pdev)
{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
- ser_dev->id.type=SERIO_8042;
- ser_dev->write=ps2_sendcommand;
- ser_dev->open=ps2_startstreaming;
- ser_dev->close=ps2_stopstreaming;
+ ser_dev->id.type = SERIO_8042;
+ ser_dev->write = ps2_sendcommand;
+ ser_dev->open = ps2_startstreaming;
+ ser_dev->close = ps2_stopstreaming;
- strlcpy(ser_dev->name, "NVEC PS2", sizeof(ser_dev->name));
- strlcpy(ser_dev->phys, "NVEC I2C slave", sizeof(ser_dev->phys));
+ strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
+ strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
ps2_dev.ser_dev = ser_dev;
ps2_dev.notifier.notifier_call = nvec_ps2_notifier;
@@ -97,7 +106,26 @@ int __init nvec_ps2(struct nvec_chip *nvec)
serio_register_port(ser_dev);
/* mouse reset */
- nvec_write_async(nvec, "\x06\x01\xff\x03", 4);
+ nvec_write_async(nvec, MOUSE_RESET, 4);
return 0;
}
+
+static struct platform_driver nvec_mouse_driver = {
+ .probe = nvec_mouse_probe,
+ .driver = {
+ .name = "nvec-mouse",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvec_mouse_init(void)
+{
+ return platform_driver_register(&nvec_mouse_driver);
+}
+
+module_init(nvec_mouse_init);
+
+MODULE_DESCRIPTION("NVEC mouse driver");
+MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1a7c19ae766..8b307b42879 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
- if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
+ if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
+ work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index a8f780e95e0..076f86675ce 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -512,7 +512,7 @@ static const struct net_device_ops cvm_oct_npi_netdev_ops = {
.ndo_init = cvm_oct_common_init,
.ndo_uninit = cvm_oct_common_uninit,
.ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
.ndo_do_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
@@ -527,7 +527,7 @@ static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
.ndo_open = cvm_oct_xaui_open,
.ndo_stop = cvm_oct_xaui_stop,
.ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
.ndo_do_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
@@ -542,7 +542,7 @@ static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_open = cvm_oct_sgmii_open,
.ndo_stop = cvm_oct_sgmii_stop,
.ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
.ndo_do_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
@@ -555,7 +555,7 @@ static const struct net_device_ops cvm_oct_spi_netdev_ops = {
.ndo_init = cvm_oct_spi_init,
.ndo_uninit = cvm_oct_spi_uninit,
.ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
.ndo_do_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
@@ -570,7 +570,7 @@ static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
.ndo_open = cvm_oct_rgmii_open,
.ndo_stop = cvm_oct_rgmii_stop,
.ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
.ndo_do_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
@@ -582,7 +582,7 @@ static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
static const struct net_device_ops cvm_oct_pow_netdev_ops = {
.ndo_init = cvm_oct_common_init,
.ndo_start_xmit = cvm_oct_xmit_pow,
- .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
.ndo_do_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 750fe5045ef..af24ddfb58c 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -21,6 +21,7 @@
#include <linux/pci_ids.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/uaccess.h>
@@ -36,9 +37,6 @@
static int resumeline = 898;
module_param(resumeline, int, 0444);
-static int noinit;
-module_param(noinit, int, 0444);
-
/* Default off since it doesn't work on DCON ASIC in B-test OLPC board */
static int useaa = 1;
module_param(useaa, int, 0444);
@@ -68,11 +66,10 @@ static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
{
- struct i2c_client *client = dcon->client;
uint16_t ver;
int rc = 0;
- ver = i2c_smbus_read_word_data(client, DCON_REG_ID);
+ ver = dcon_read(dcon, DCON_REG_ID);
if ((ver >> 8) != 0xDC) {
printk(KERN_ERR "olpc-dcon: DCON ID not 0xDCxx: 0x%04x "
"instead.\n", ver);
@@ -90,30 +87,19 @@ static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
}
}
- if (ver < 0xdc02 && !noinit) {
- /* Initialize the DCON registers */
-
- /* Start with work-arounds for DCON ASIC */
- i2c_smbus_write_word_data(client, 0x4b, 0x00cc);
- i2c_smbus_write_word_data(client, 0x4b, 0x00cc);
- i2c_smbus_write_word_data(client, 0x4b, 0x00cc);
- i2c_smbus_write_word_data(client, 0x0b, 0x007a);
- i2c_smbus_write_word_data(client, 0x36, 0x025c);
- i2c_smbus_write_word_data(client, 0x37, 0x025e);
-
- /* Initialise SDRAM */
-
- i2c_smbus_write_word_data(client, 0x3b, 0x002b);
- i2c_smbus_write_word_data(client, 0x41, 0x0101);
- i2c_smbus_write_word_data(client, 0x42, 0x0101);
- } else if (!noinit) {
- /* SDRAM setup/hold time */
- i2c_smbus_write_word_data(client, 0x3a, 0xc040);
- i2c_smbus_write_word_data(client, 0x41, 0x0000);
- i2c_smbus_write_word_data(client, 0x41, 0x0101);
- i2c_smbus_write_word_data(client, 0x42, 0x0101);
+ if (ver < 0xdc02) {
+ dev_err(&dcon->client->dev,
+ "DCON v1 is unsupported, giving up..\n");
+ rc = -ENODEV;
+ goto err;
}
+ /* SDRAM setup/hold time */
+ dcon_write(dcon, 0x3a, 0xc040);
+ dcon_write(dcon, 0x41, 0x0000);
+ dcon_write(dcon, 0x41, 0x0101);
+ dcon_write(dcon, 0x42, 0x0101);
+
/* Colour swizzle, AA, no passthrough, backlight */
if (is_init) {
dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
@@ -121,11 +107,11 @@ static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
if (useaa)
dcon->disp_mode |= MODE_COL_AA;
}
- i2c_smbus_write_word_data(client, DCON_REG_MODE, dcon->disp_mode);
+ dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
/* Set the scanline to interrupt on during resume */
- i2c_smbus_write_word_data(client, DCON_REG_SCAN_INT, resumeline);
+ dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
err:
return rc;
@@ -712,7 +698,6 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
eirq:
free_irq(DCON_IRQ, dcon);
einit:
- i2c_set_clientdata(client, NULL);
kfree(dcon);
return rc;
}
@@ -721,8 +706,6 @@ static int dcon_remove(struct i2c_client *client)
{
struct dcon_priv *dcon = i2c_get_clientdata(client);
- i2c_set_clientdata(client, NULL);
-
fb_unregister_client(&dcon->fbevent_nb);
unregister_reboot_notifier(&dcon->reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index b303b7e42b6..4683d5f355c 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -1179,16 +1179,14 @@ static inline int handle_lcd_special_code(void)
break;
while (*esc) {
- char *endp;
-
if (*esc == 'x') {
esc++;
- lcd_addr_x = simple_strtoul(esc, &endp, 10);
- esc = endp;
+ if (kstrtoul(esc, 10, &lcd_addr_x) < 0)
+ break;
} else if (*esc == 'y') {
esc++;
- lcd_addr_y = simple_strtoul(esc, &endp, 10);
- esc = endp;
+ if (kstrtoul(esc, 10, &lcd_addr_y) < 0)
+ break;
} else
break;
}
diff --git a/drivers/staging/pohmelfs/Kconfig b/drivers/staging/pohmelfs/Kconfig
index 58158b8780f..8d53b1a1e71 100644
--- a/drivers/staging/pohmelfs/Kconfig
+++ b/drivers/staging/pohmelfs/Kconfig
@@ -18,11 +18,3 @@ config POHMELFS_DEBUG
Turns on excessive POHMELFS debugging facilities.
You usually do not want to slow things down noticeably and get really
lots of kernel messages in syslog.
-
-config POHMELFS_CRYPTO
- bool "POHMELFS crypto support"
- depends on POHMELFS
- help
- This option allows to encrypt and/or protect with strong
- cryptographic hash all dataflow between server and clients.
- Each config group can have its own keys.
diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
index 36a25358256..06c1a7451b1 100644
--- a/drivers/staging/pohmelfs/trans.c
+++ b/drivers/staging/pohmelfs/trans.c
@@ -50,7 +50,7 @@ static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
int err = 0;
unsigned int i, attached_pages = t->attached_pages, ci;
struct msghdr msg;
- struct page **pages = (t->eng)?t->eng->pages:t->pages;
+ struct page **pages = (t->eng) ? t->eng->pages : t->pages;
struct page *p;
unsigned int size;
@@ -61,7 +61,7 @@ static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
msg.msg_flags = MSG_WAITALL | MSG_MORE;
ci = 0;
- for (i=0; i<t->page_num; ++i) {
+ for (i = 0; i < t->page_num; ++i) {
struct page *page = pages[ci];
struct netfs_cmd cmd;
struct iovec io;
@@ -169,7 +169,7 @@ int netfs_trans_send(struct netfs_trans *t, struct netfs_state *st)
}
dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
- __func__, (t->page_num)?"partial":"full",
+ __func__, (t->page_num) ? "partial" : "full",
t, t->gen, t->iovec.iov_len, t->page_num);
err = 0;
diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
index ca098cabc2b..02fafecd477 100644
--- a/drivers/staging/quatech_usb2/quatech_usb2.c
+++ b/drivers/staging/quatech_usb2/quatech_usb2.c
@@ -916,9 +916,10 @@ static int qt2_ioctl(struct tty_struct *tty,
dbg("%s() port %d, cmd == TIOCMIWAIT enter",
__func__, port->number);
prev_msr_value = port_extra->shadowMSR & QT2_SERIAL_MSR_MASK;
+ barrier();
+ __set_current_state(TASK_INTERRUPTIBLE);
while (1) {
add_wait_queue(&port_extra->wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
dbg("%s(): port %d, cmd == TIOCMIWAIT here\n",
__func__, port->number);
@@ -926,9 +927,12 @@ static int qt2_ioctl(struct tty_struct *tty,
/* see if a signal woke us up */
if (signal_pending(current))
return -ERESTARTSYS;
+ set_current_state(TASK_INTERRUPTIBLE);
msr_value = port_extra->shadowMSR & QT2_SERIAL_MSR_MASK;
- if (msr_value == prev_msr_value)
+ if (msr_value == prev_msr_value) {
+ __set_current_state(TASK_RUNNING);
return -EIO; /* no change - error */
+ }
if ((arg & TIOCM_RNG &&
((prev_msr_value & QT2_SERIAL_MSR_RI) ==
(msr_value & QT2_SERIAL_MSR_RI))) ||
@@ -941,6 +945,7 @@ static int qt2_ioctl(struct tty_struct *tty,
(arg & TIOCM_CTS &&
((prev_msr_value & QT2_SERIAL_MSR_CTS) ==
(msr_value & QT2_SERIAL_MSR_CTS)))) {
+ __set_current_state(TASK_RUNNING);
return 0;
}
} /* end inifinite while */
diff --git a/drivers/staging/rtl8187se/Makefile b/drivers/staging/rtl8187se/Makefile
index 11a92262c66..72db504b23b 100644
--- a/drivers/staging/rtl8187se/Makefile
+++ b/drivers/staging/rtl8187se/Makefile
@@ -1,7 +1,7 @@
-#EXTRA_CFLAGS += -DCONFIG_IEEE80211_NOWEP=y
-#EXTRA_CFLAGS += -std=gnu89
-#EXTRA_CFLAGS += -O2
+#ccflags-y += -DCONFIG_IEEE80211_NOWEP=y
+#ccflags-y += -std=gnu89
+#ccflags-y += -O2
#CC = gcc
ccflags-y := -DSW_ANTE
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index e79a7e21297..40dd715d9df 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -1339,8 +1339,8 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
/* ieee80211_softmac.c */
-extern short ieee80211_is_54g(struct ieee80211_network net);
-extern short ieee80211_is_shortslot(struct ieee80211_network net);
+extern short ieee80211_is_54g(const struct ieee80211_network *net);
+extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats, u16 type,
u16 stype);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 38e67f0bf62..26bacb96d24 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -32,14 +32,14 @@ u8 rsn_authen_cipher_suite[16][4] = {
{0x00,0x0F,0xAC,0x05}, //WEP-104
};
-short ieee80211_is_54g(struct ieee80211_network net)
+short ieee80211_is_54g(const struct ieee80211_network *net)
{
- return ((net.rates_ex_len > 0) || (net.rates_len > 4));
+ return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
-short ieee80211_is_shortslot(struct ieee80211_network net)
+short ieee80211_is_shortslot(const struct ieee80211_network *net)
{
- return (net.capability & WLAN_CAPABILITY_SHORT_SLOT);
+ return net->capability & WLAN_CAPABILITY_SHORT_SLOT;
}
/* returns the total length needed for pleacing the RATE MFIE
@@ -789,7 +789,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
else
atim_len = 0;
- if(ieee80211_is_54g(ieee->current_network))
+ if(ieee80211_is_54g(&ieee->current_network))
erp_len = 3;
else
erp_len = 0;
@@ -1258,7 +1258,7 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
- if(ieee80211_is_54g(ieee->current_network) &&
+ if(ieee80211_is_54g(&ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)){
ieee->rate = 540;
@@ -1379,7 +1379,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
ieee->beinretry = false;
queue_work(ieee->wq, &ieee->associate_procedure_wq);
}else{
- if(ieee80211_is_54g(ieee->current_network) &&
+ if(ieee80211_is_54g(&ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)){
ieee->rate = 540;
printk(KERN_INFO"Using G rates\n");
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 4c6651aac30..04c23919f4d 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -3534,7 +3534,7 @@ static const struct net_device_ops rtl8180_netdev_ops = {
.ndo_get_stats = rtl8180_stats,
.ndo_tx_timeout = rtl8180_restart,
.ndo_do_ioctl = rtl8180_ioctl,
- .ndo_set_multicast_list = r8180_set_multicast,
+ .ndo_set_rx_mode = r8180_set_multicast,
.ndo_set_mac_address = r8180_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index 3f09f76080a..ee5b867fd0d 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -596,7 +596,7 @@ static void rtl8225_rf_set_chan(struct net_device *dev, short ch)
{
struct r8180_priv *priv = ieee80211_priv(dev);
short gset = (priv->ieee80211->state == IEEE80211_LINKED &&
- ieee80211_is_54g(priv->ieee80211->current_network)) ||
+ ieee80211_is_54g(&priv->ieee80211->current_network)) ||
priv->ieee80211->iw_mode == IW_MODE_MONITOR;
rtl8225_SetTXPowerLevel(dev, ch);
@@ -615,7 +615,7 @@ static void rtl8225_rf_set_chan(struct net_device *dev, short ch)
}
if (priv->ieee80211->state == IEEE80211_LINKED &&
- ieee80211_is_shortslot(priv->ieee80211->current_network))
+ ieee80211_is_shortslot(&priv->ieee80211->current_network))
write_nic_byte(dev, SLOT, 0x9);
else
write_nic_byte(dev, SLOT, 0x14);
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index 6e410674359..a66a9ad3368 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -1,38 +1,41 @@
-NIC_SELECT = RTL8192E
-
-ccflags-y := -DRTL8192E
-ccflags-y += -std=gnu89
-ccflags-y += -O2
-ccflags-y += -DTHOMAS_TURBO
+ccflags-y += -DUSE_FW_SOURCE_IMG_FILE
+ccflags-y += -DCONFIG_PM_RTL
+ccflags-y += -DCONFIG_PM
+ccflags-y += -DHAVE_NET_DEVICE_OPS
ccflags-y += -DENABLE_DOT11D
-ccflags-y += -DENABLE_IPS
-ccflags-y += -DENABLE_LPS
-
r8192e_pci-objs := \
- r8192E_core.o \
- r8180_93cx6.o \
- r8192E_wx.o \
- r8190_rtl8256.o \
- r819xE_phy.o \
- r819xE_firmware.o \
- r819xE_cmdpkt.o \
- r8192E_dm.o \
- r8192_pm.o \
- ieee80211/ieee80211_rx.o \
- ieee80211/ieee80211_softmac.o \
- ieee80211/ieee80211_tx.o \
- ieee80211/ieee80211_wx.o \
- ieee80211/ieee80211_module.o \
- ieee80211/ieee80211_softmac_wx.o \
- ieee80211/rtl819x_HTProc.o \
- ieee80211/rtl819x_TSProc.o \
- ieee80211/rtl819x_BAProc.o \
- ieee80211/dot11d.o \
- ieee80211/ieee80211_crypt.o \
- ieee80211/ieee80211_crypt_tkip.o \
- ieee80211/ieee80211_crypt_ccmp.o \
- ieee80211/ieee80211_crypt_wep.o
+ rtl_core.o \
+ rtl_eeprom.o \
+ rtl_ps.o \
+ rtl_wx.o \
+ rtl_cam.o \
+ rtl_dm.o \
+ rtl_pm.o \
+ rtl_pci.o \
+ rtl_debug.o \
+ rtl_ethtool.o \
+ r8192E_dev.o \
+ r8192E_phy.o \
+ r8192E_firmware.o \
+ r8192E_cmdpkt.o \
+ r8192E_hwimg.o \
+ r8190P_rtl8256.o \
+ rtllib_rx.o \
+ rtllib_softmac.o \
+ rtllib_tx.o \
+ rtllib_wx.o \
+ rtllib_module.o \
+ rtllib_softmac_wx.o \
+ rtl819x_HTProc.o \
+ rtl819x_TSProc.o \
+ rtl819x_BAProc.o \
+ dot11d.o \
+ rtllib_crypt.o \
+ rtllib_crypt_tkip.o \
+ rtllib_crypt_ccmp.o \
+ rtllib_crypt_wep.o
obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/TODO b/drivers/staging/rtl8192e/TODO
new file mode 100644
index 00000000000..d51f159d1ad
--- /dev/null
+++ b/drivers/staging/rtl8192e/TODO
@@ -0,0 +1,2 @@
+* merge into drivers/net/wireless/rtllib/rtl8192e
+* clean up function naming
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
new file mode 100644
index 00000000000..ee0381e0a81
--- /dev/null
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -0,0 +1,216 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "dot11d.h"
+
+struct channel_list {
+ u8 Channel[32];
+ u8 Len;
+};
+
+static struct channel_list ChannelPlan[] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64,
+ 149, 153, 157, 161, 165}, 24},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56,
+ 60, 64}, 21},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
+ 56, 60, 64}, 22},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
+ 56, 60, 64}, 22},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
+ 56, 60, 64}, 22},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
+ 56, 60, 64}, 22},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52,
+ 56, 60, 64}, 21}
+};
+
+void Dot11d_Init(struct rtllib_device *ieee)
+{
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
+ pDot11dInfo->bEnabled = false;
+
+ pDot11dInfo->State = DOT11D_STATE_NONE;
+ pDot11dInfo->CountryIeLen = 0;
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ RESET_CIE_WATCHDOG(ieee);
+
+}
+
+void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
+{
+ int i, max_chan = 14, min_chan = 1;
+
+ ieee->bGlobalDomain = false;
+
+ if (ChannelPlan[channel_plan].Len != 0) {
+ memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
+ sizeof(GET_DOT11D_INFO(ieee)->channel_map));
+ for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
+ if (ChannelPlan[channel_plan].Channel[i] < min_chan ||
+ ChannelPlan[channel_plan].Channel[i] > max_chan)
+ break;
+ GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan
+ [channel_plan].Channel[i]] = 1;
+ }
+ }
+
+ switch (channel_plan) {
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
+ ieee->bGlobalDomain = true;
+ for (i = 12; i <= 14; i++)
+ GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
+ ieee->IbssStartChnl = 10;
+ ieee->ibss_maxjoin_chal = 11;
+ break;
+
+ case COUNTRY_CODE_WORLD_WIDE_13:
+ for (i = 12; i <= 13; i++)
+ GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
+ ieee->IbssStartChnl = 10;
+ ieee->ibss_maxjoin_chal = 11;
+ break;
+
+ default:
+ ieee->IbssStartChnl = 1;
+ ieee->ibss_maxjoin_chal = 14;
+ break;
+ }
+}
+
+
+void Dot11d_Reset(struct rtllib_device *ieee)
+{
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
+ u32 i;
+
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ for (i = 1; i <= 11; i++)
+ (pDot11dInfo->channel_map)[i] = 1;
+ for (i = 12; i <= 14; i++)
+ (pDot11dInfo->channel_map)[i] = 2;
+ pDot11dInfo->State = DOT11D_STATE_NONE;
+ pDot11dInfo->CountryIeLen = 0;
+ RESET_CIE_WATCHDOG(ieee);
+}
+
+void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe)
+{
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
+ u8 i, j, NumTriples, MaxChnlNum;
+ struct chnl_txpow_triple *pTriple;
+
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ MaxChnlNum = 0;
+ NumTriples = (CoutryIeLen - 3) / 3;
+ pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
+ for (i = 0; i < NumTriples; i++) {
+ if (MaxChnlNum >= pTriple->FirstChnl) {
+ printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid"
+ " country IE, skip it........1\n");
+ return;
+ }
+ if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl +
+ pTriple->NumChnls)) {
+ printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid "
+ "country IE, skip it........2\n");
+ return;
+ }
+
+ for (j = 0 ; j < pTriple->NumChnls; j++) {
+ pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
+ pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] =
+ pTriple->MaxTxPowerInDbm;
+ MaxChnlNum = pTriple->FirstChnl + j;
+ }
+
+ pTriple = (struct chnl_txpow_triple *)((u8*)pTriple + 3);
+ }
+
+ UPDATE_CIE_SRC(dev, pTaddr);
+
+ pDot11dInfo->CountryIeLen = CoutryIeLen;
+ memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
+ pDot11dInfo->State = DOT11D_STATE_LEARNED;
+}
+
+u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel)
+{
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
+ u8 MaxTxPwrInDbm = 255;
+
+ if (MAX_CHANNEL_NUMBER < Channel) {
+ printk(KERN_INFO "DOT11D_GetMaxTxPwrInDbm(): Invalid "
+ "Channel\n");
+ return MaxTxPwrInDbm;
+ }
+ if (pDot11dInfo->channel_map[Channel])
+ MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
+
+ return MaxTxPwrInDbm;
+}
+
+void DOT11D_ScanComplete(struct rtllib_device *dev)
+{
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
+
+ switch (pDot11dInfo->State) {
+ case DOT11D_STATE_LEARNED:
+ pDot11dInfo->State = DOT11D_STATE_DONE;
+ break;
+ case DOT11D_STATE_DONE:
+ Dot11d_Reset(dev);
+ break;
+ case DOT11D_STATE_NONE:
+ break;
+ }
+}
+
+int ToLegalChannel(struct rtllib_device *dev, u8 channel)
+{
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
+ u8 default_chn = 0;
+ u32 i;
+
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
+ if (pDot11dInfo->channel_map[i] > 0) {
+ default_chn = i;
+ break;
+ }
+ }
+
+ if (MAX_CHANNEL_NUMBER < channel) {
+ printk(KERN_ERR "%s(): Invalid Channel\n", __func__);
+ return default_chn;
+ }
+
+ if (pDot11dInfo->channel_map[channel] > 0)
+ return channel;
+
+ return default_chn;
+}
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 8e644614f21..032f7004a7f 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -1,24 +1,42 @@
-#ifndef INC_DOT11D_H
-#define INC_DOT11D_H
-
-#ifdef ENABLE_DOT11D
-#include "ieee80211.h"
-
-struct _CHNL_TXPOWER_TRIPLE {
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef __INC_DOT11D_H
+#define __INC_DOT11D_H
+
+#include "rtllib.h"
+
+
+
+struct chnl_txpow_triple {
u8 FirstChnl;
u8 NumChnls;
u8 MaxTxPowerInDbm;
};
-enum _DOT11D_STATE {
+enum dot11d_state {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
};
/**
- * struct _RT_DOT11D_INFO
- * @CountryIeLen: value greater than 0 if @CountryIeBuf contains
+ * struct rt_dot11d_info * @CountryIeLen: value greater than 0 if @CountryIeBuf contains
* valid country information element.
* @chanell_map: holds channel values
* 0 - invalid,
@@ -27,18 +45,19 @@ enum _DOT11D_STATE {
* @CountryIeSrcAddr - Source AP of the country IE
*/
-struct _RT_DOT11D_INFO {
+struct rt_dot11d_info {
+
bool bEnabled;
u16 CountryIeLen;
- u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6];
- u8 CountryIeWatchdog;
+ u8 CountryIeBuf[MAX_IE_LEN];
+ u8 CountryIeSrcAddr[6];
+ u8 CountryIeWatchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1];
- u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
+ u8 channel_map[MAX_CHANNEL_NUMBER+1];
+ u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
- DOT11D_STATE State;
+ enum dot11d_state State;
};
static inline void cpMacAddr(unsigned char *des, unsigned char *src)
@@ -46,49 +65,41 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
memcpy(des, src, 6);
}
-#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO) \
- ((__pIeeeDev)->pDot11dInfo))
+#define GET_DOT11D_INFO(__pIeeeDev) \
+ ((struct rt_dot11d_info *)((__pIeeeDev)->pDot11dInfo))
-#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
-#define IS_COUNTRY_IE_VALID(__pIeeeDev) \
- (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
+#define IS_DOT11D_ENABLE(__pIeeeDev) \
+ (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
+#define IS_COUNTRY_IE_VALID(__pIeeeDev) \
+ (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
-#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) \
- eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-
-#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) \
- cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
+#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) \
+ eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
+#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) \
+ cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
#define IS_COUNTRY_IE_CHANGED(__pIeeeDev, __Ie) \
- (((__Ie).Length == 0 || (__Ie).Length != \
- GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen) ? FALSE : \
- (!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, \
+ (((__Ie).Length == 0 || (__Ie).Length != \
+ GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen) ? \
+ false : (!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, \
(__Ie).Octet, (__Ie).Length)))
#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)-> \
- CountryIeWatchdog)
+#define GET_CIE_WATCHDOG(__pIeeeDev) \
+ (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
-#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
- (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-
-
-void Dot11d_Init(struct ieee80211_device *dev);
-
-void Dot11d_Reset(struct ieee80211_device *dev);
-
-void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
- u16 CoutryIeLen, u8 *pCoutryIe);
-
-u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 channel);
-
-void DOT11D_ScanComplete(struct ieee80211_device *dev);
-
-int IsLegalChannel(struct ieee80211_device *dev, u8 channel);
+#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
+ (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-int ToLegalChannel(struct ieee80211_device *dev, u8 channel);
+void Dot11d_Init(struct rtllib_device *dev);
+void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
+void Dot11d_Reset(struct rtllib_device *dev);
+void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe);
+u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel);
+void DOT11D_ScanComplete(struct rtllib_device *dev);
+int ToLegalChannel(struct rtllib_device *dev, u8 channel);
-#endif /* ENABLE_DOT11D */
-#endif /* INC_DOT11D_H */
+#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/dot11d.c b/drivers/staging/rtl8192e/ieee80211/dot11d.c
deleted file mode 100644
index 98e46487dc0..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/dot11d.c
+++ /dev/null
@@ -1,218 +0,0 @@
-#ifdef ENABLE_DOT11D
-//-----------------------------------------------------------------------------
-// File:
-// Dot11d.c
-//
-// Description:
-// Implement 802.11d.
-//
-//-----------------------------------------------------------------------------
-
-#include "dot11d.h"
-
-void
-Dot11d_Init(struct ieee80211_device *ieee)
-{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
-
- pDot11dInfo->bEnabled = 0;
-
- pDot11dInfo->State = DOT11D_STATE_NONE;
- pDot11dInfo->CountryIeLen = 0;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- RESET_CIE_WATCHDOG(ieee);
-
- printk("Dot11d_Init()\n");
-}
-
-//
-// Description:
-// Reset to the state as we are just entering a regulatory domain.
-//
-void
-Dot11d_Reset(struct ieee80211_device *ieee)
-{
- u32 i;
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
-#if 0
- if(!pDot11dInfo->bEnabled)
- return;
-#endif
- // Clear old channel map
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- // Set new channel map
- for (i=1; i<=11; i++) {
- (pDot11dInfo->channel_map)[i] = 1;
- }
- for (i=12; i<=14; i++) {
- (pDot11dInfo->channel_map)[i] = 2;
- }
-
- pDot11dInfo->State = DOT11D_STATE_NONE;
- pDot11dInfo->CountryIeLen = 0;
- RESET_CIE_WATCHDOG(ieee);
-}
-
-//
-// Description:
-// Update country IE from Beacon or Probe Resopnse
-// and configure PHY for operation in the regulatory domain.
-//
-// TODO:
-// Configure Tx power.
-//
-// Assumption:
-// 1. IS_DOT11D_ENABLE() is TRUE.
-// 2. Input IE is an valid one.
-//
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 * pTaddr,
- u16 CoutryIeLen,
- u8 * pCoutryIe
- )
-{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- u8 i, j, NumTriples, MaxChnlNum;
- PCHNL_TXPOWER_TRIPLE pTriple;
-
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
- pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
- for(i = 0; i < NumTriples; i++)
- {
- if(MaxChnlNum >= pTriple->FirstChnl)
- { // It is not in a monotonically increasing order, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
- return;
- }
- if(MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls))
- { // It is not a valid set of channel id, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
- return;
- }
-
- for(j = 0 ; j < pTriple->NumChnls; j++)
- {
- pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
- pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
- MaxChnlNum = pTriple->FirstChnl + j;
- }
-
- pTriple = (PCHNL_TXPOWER_TRIPLE)((u8*)pTriple + 3);
- }
-#if 1
- printk("Channel List:");
- for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
- if(pDot11dInfo->channel_map[i] > 0)
- printk(" %d", i);
- printk("\n");
-#endif
-
- UPDATE_CIE_SRC(dev, pTaddr);
-
- pDot11dInfo->CountryIeLen = CoutryIeLen;
- memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe,CoutryIeLen);
- pDot11dInfo->State = DOT11D_STATE_LEARNED;
-}
-
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- )
-{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- u8 MaxTxPwrInDbm = 255;
-
- if(MAX_CHANNEL_NUMBER < Channel)
- {
- printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
- return MaxTxPwrInDbm;
- }
- if(pDot11dInfo->channel_map[Channel])
- {
- MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
- }
-
- return MaxTxPwrInDbm;
-}
-
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device * dev
- )
-{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
-
- switch(pDot11dInfo->State)
- {
- case DOT11D_STATE_LEARNED:
- pDot11dInfo->State = DOT11D_STATE_DONE;
- break;
-
- case DOT11D_STATE_DONE:
- if( GET_CIE_WATCHDOG(dev) == 0 )
- { // Reset country IE if previous one is gone.
- Dot11d_Reset(dev);
- }
- break;
- case DOT11D_STATE_NONE:
- break;
- }
-}
-
-int IsLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-)
-{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
-
- if(MAX_CHANNEL_NUMBER < channel)
- {
- printk("IsLegalChannel(): Invalid Channel\n");
- return 0;
- }
- if(pDot11dInfo->channel_map[channel] > 0)
- return 1;
- return 0;
-}
-
-int ToLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-)
-{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- u8 default_chn = 0;
- u32 i = 0;
-
- for (i=1; i<= MAX_CHANNEL_NUMBER; i++)
- {
- if(pDot11dInfo->channel_map[i] > 0)
- {
- default_chn = i;
- break;
- }
- }
-
- if(MAX_CHANNEL_NUMBER < channel)
- {
- printk("IsLegalChannel(): Invalid Channel\n");
- return default_chn;
- }
-
- if(pDot11dInfo->channel_map[channel] > 0)
- return channel;
-
- return default_chn;
-}
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/dot11d.h b/drivers/staging/rtl8192e/ieee80211/dot11d.h
deleted file mode 100644
index 15b7a4ba37b..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/dot11d.h
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef __INC_DOT11D_H
-#define __INC_DOT11D_H
-
-#ifdef ENABLE_DOT11D
-#include "ieee80211.h"
-
-//#define ENABLE_DOT11D
-
-//#define DOT11D_MAX_CHNL_NUM 83
-
-typedef struct _CHNL_TXPOWER_TRIPLE {
- u8 FirstChnl;
- u8 NumChnls;
- u8 MaxTxPowerInDbm;
-}CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
-
-typedef enum _DOT11D_STATE {
- DOT11D_STATE_NONE = 0,
- DOT11D_STATE_LEARNED,
- DOT11D_STATE_DONE,
-}DOT11D_STATE;
-
-typedef struct _RT_DOT11D_INFO {
- //DECLARE_RT_OBJECT(RT_DOT11D_INFO);
-
- bool bEnabled; // dot11MultiDomainCapabilityEnabled
-
- u16 CountryIeLen; // > 0 if CountryIeBuf[] contains valid country information element.
- u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; // Source AP of the country IE.
- u8 CountryIeWatchdog;
-
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; //!!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan)
- //u8 ChnlListLen; // #Bytes valid in ChnlList[].
- //u8 ChnlList[DOT11D_MAX_CHNL_NUM];
- u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
-
- DOT11D_STATE State;
-}RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
-#define cpMacAddr(des,src) ((des)[0]=(src)[0],(des)[1]=(src)[1],(des)[2]=(src)[2],(des)[3]=(src)[3],(des)[4]=(src)[4],(des)[5]=(src)[5])
-#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
-
-#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
-#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
-
-#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-
-#define IS_COUNTRY_IE_CHANGED(__pIeeeDev, __Ie) \
- (((__Ie).Length == 0 || (__Ie).Length != GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen) ? \
- FALSE : \
- (!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, (__Ie).Octet, (__Ie).Length)))
-
-#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog
-#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
-#define UPDATE_CIE_WATCHDOG(__pIeeeDev) ++GET_CIE_WATCHDOG(__pIeeeDev)
-
-#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-
-
-void
-Dot11d_Init(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_Reset(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 * pTaddr,
- u16 CoutryIeLen,
- u8 * pCoutryIe
- );
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- );
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device * dev
- );
-
-int IsLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-);
-
-int ToLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-);
-#endif //ENABLE_DOT11D
-#endif // #ifndef __INC_DOT11D_H
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
deleted file mode 100644
index 6d7963e5b6a..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ /dev/null
@@ -1,2636 +0,0 @@
-/*
- * Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11
- * remains copyright by the original authors
- *
- * Portions of the merged code are based on Host AP (software wireless
- * LAN access point) driver for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- * Copyright (c) 2004, Intel Corporation
- *
- * Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-#ifndef IEEE80211_H
-#define IEEE80211_H
-#include <linux/if_ether.h> /* ETH_ALEN */
-#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/semaphore.h>
-#include <linux/interrupt.h>
-
-#include <linux/delay.h>
-#include <linux/wireless.h>
-
-#include "rtl819x_HT.h"
-#include "rtl819x_BA.h"
-#include "rtl819x_TS.h"
-
-#ifndef IW_MODE_MONITOR
-#define IW_MODE_MONITOR 6
-#endif
-
-#ifndef IWEVCUSTOM
-#define IWEVCUSTOM 0x8c02
-#endif
-
-#ifndef container_of
-/**
- * container_of - cast a member of a structure out to the containing structure
- *
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
-#endif
-
-#define KEY_TYPE_NA 0x0
-#define KEY_TYPE_WEP40 0x1
-#define KEY_TYPE_TKIP 0x2
-#define KEY_TYPE_CCMP 0x4
-#define KEY_TYPE_WEP104 0x5
-
-/* added for rtl819x tx procedure */
-#define MAX_QUEUE_SIZE 0x10
-
-//
-// 8190 queue mapping
-//
-#define BK_QUEUE 0
-#define BE_QUEUE 1
-#define VI_QUEUE 2
-#define VO_QUEUE 3
-#define HCCA_QUEUE 4
-#define TXCMD_QUEUE 5
-#define MGNT_QUEUE 6
-#define HIGH_QUEUE 7
-#define BEACON_QUEUE 8
-
-#define LOW_QUEUE BE_QUEUE
-#define NORMAL_QUEUE MGNT_QUEUE
-
-//added by amy for ps
-#define SWRF_TIMEOUT 50
-
-//added by amy for LEAP related
-#define IE_CISCO_FLAG_POSITION 0x08 // Flag byte: byte 8, numbered from 0.
-#define SUPPORT_CKIP_MIC 0x08 // bit3
-#define SUPPORT_CKIP_PK 0x10 // bit4
-/* defined for skb cb field */
-/* At most 28 byte */
-typedef struct cb_desc {
- /* Tx Desc Related flags (8-9) */
- u8 bLastIniPkt:1;
- u8 bCmdOrInit:1;
- u8 bFirstSeg:1;
- u8 bLastSeg:1;
- u8 bEncrypt:1;
- u8 bTxDisableRateFallBack:1;
- u8 bTxUseDriverAssingedRate:1;
- u8 bHwSec:1; //indicate whether use Hw security. WB
-
- u8 reserved1;
-
- /* Tx Firmware Relaged flags (10-11)*/
- u8 bCTSEnable:1;
- u8 bRTSEnable:1;
- u8 bUseShortGI:1;
- u8 bUseShortPreamble:1;
- u8 bTxEnableFwCalcDur:1;
- u8 bAMPDUEnable:1;
- u8 bRTSSTBC:1;
- u8 RTSSC:1;
-
- u8 bRTSBW:1;
- u8 bPacketBW:1;
- u8 bRTSUseShortPreamble:1;
- u8 bRTSUseShortGI:1;
- u8 bMulticast:1;
- u8 bBroadcast:1;
- //u8 reserved2:2;
- u8 drv_agg_enable:1;
- u8 reserved2:1;
-
- /* Tx Desc related element(12-19) */
- u8 rata_index;
- u8 queue_index;
- //u8 reserved3;
- //u8 reserved4;
- u16 txbuf_size;
- //u8 reserved5;
- u8 RATRIndex;
- u8 reserved6;
- u8 reserved7;
- u8 reserved8;
-
- /* Tx firmware related element(20-27) */
- u8 data_rate;
- u8 rts_rate;
- u8 ampdu_factor;
- u8 ampdu_density;
- //u8 reserved9;
- //u8 reserved10;
- //u8 reserved11;
- u8 DrvAggrNum;
- u16 pkt_size;
- u8 reserved12;
-
- u8 bdhcp;
-}cb_desc, *pcb_desc;
-
-/*--------------------------Define -------------------------------------------*/
-#define MGN_1M 0x02
-#define MGN_2M 0x04
-#define MGN_5_5M 0x0b
-#define MGN_11M 0x16
-
-#define MGN_6M 0x0c
-#define MGN_9M 0x12
-#define MGN_12M 0x18
-#define MGN_18M 0x24
-#define MGN_24M 0x30
-#define MGN_36M 0x48
-#define MGN_48M 0x60
-#define MGN_54M 0x6c
-
-#define MGN_MCS0 0x80
-#define MGN_MCS1 0x81
-#define MGN_MCS2 0x82
-#define MGN_MCS3 0x83
-#define MGN_MCS4 0x84
-#define MGN_MCS5 0x85
-#define MGN_MCS6 0x86
-#define MGN_MCS7 0x87
-#define MGN_MCS8 0x88
-#define MGN_MCS9 0x89
-#define MGN_MCS10 0x8a
-#define MGN_MCS11 0x8b
-#define MGN_MCS12 0x8c
-#define MGN_MCS13 0x8d
-#define MGN_MCS14 0x8e
-#define MGN_MCS15 0x8f
-
-//----------------------------------------------------------------------------
-// 802.11 Management frame Reason Code field
-//----------------------------------------------------------------------------
-enum _ReasonCode{
- unspec_reason = 0x1,
- auth_not_valid = 0x2,
- deauth_lv_ss = 0x3,
- inactivity = 0x4,
- ap_overload = 0x5,
- class2_err = 0x6,
- class3_err = 0x7,
- disas_lv_ss = 0x8,
- asoc_not_auth = 0x9,
-
- //----MIC_CHECK
- mic_failure = 0xe,
- //----END MIC_CHECK
-
- // Reason code defined in 802.11i D10.0 p.28.
- invalid_IE = 0x0d,
- four_way_tmout = 0x0f,
- two_way_tmout = 0x10,
- IE_dismatch = 0x11,
- invalid_Gcipher = 0x12,
- invalid_Pcipher = 0x13,
- invalid_AKMP = 0x14,
- unsup_RSNIEver = 0x15,
- invalid_RSNIE = 0x16,
- auth_802_1x_fail= 0x17,
- ciper_reject = 0x18,
-
- // Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie, 2005-11-15.
- QoS_unspec = 0x20, // 32
- QAP_bandwidth = 0x21, // 33
- poor_condition = 0x22, // 34
- no_facility = 0x23, // 35
- // Where is 36???
- req_declined = 0x25, // 37
- invalid_param = 0x26, // 38
- req_not_honored= 0x27, // 39
- TS_not_created = 0x2F, // 47
- DL_not_allowed = 0x30, // 48
- dest_not_exist = 0x31, // 49
- dest_not_QSTA = 0x32, // 50
-};
-
-
-
-#define aSifsTime (((priv->ieee80211->current_network.mode == IEEE_A)||(priv->ieee80211->current_network.mode == IEEE_N_24G)||(priv->ieee80211->current_network.mode == IEEE_N_5G))? 16 : 10)
-
-#define MGMT_QUEUE_NUM 5
-
-#define IEEE_CMD_SET_WPA_PARAM 1
-#define IEEE_CMD_SET_WPA_IE 2
-#define IEEE_CMD_SET_ENCRYPTION 3
-#define IEEE_CMD_MLME 4
-
-#define IEEE_PARAM_WPA_ENABLED 1
-#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
-#define IEEE_PARAM_DROP_UNENCRYPTED 3
-#define IEEE_PARAM_PRIVACY_INVOKED 4
-#define IEEE_PARAM_AUTH_ALGS 5
-#define IEEE_PARAM_IEEE_802_1X 6
-//It should consistent with the driver_XXX.c
-// David, 2006.9.26
-#define IEEE_PARAM_WPAX_SELECT 7
-//Added for notify the encryption type selection
-// David, 2006.9.26
-#define IEEE_PROTO_WPA 1
-#define IEEE_PROTO_RSN 2
-//Added for notify the encryption type selection
-// David, 2006.9.26
-#define IEEE_WPAX_USEGROUP 0
-#define IEEE_WPAX_WEP40 1
-#define IEEE_WPAX_TKIP 2
-#define IEEE_WPAX_WRAP 3
-#define IEEE_WPAX_CCMP 4
-#define IEEE_WPAX_WEP104 5
-
-#define IEEE_KEY_MGMT_IEEE8021X 1
-#define IEEE_KEY_MGMT_PSK 2
-
-#define IEEE_MLME_STA_DEAUTH 1
-#define IEEE_MLME_STA_DISASSOC 2
-
-
-#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
-#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
-#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
-#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
-#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
-#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
-
-
-#define IEEE_CRYPT_ALG_NAME_LEN 16
-
-#define MAX_IE_LEN 0xff
-
-// added for kernel conflict
-#define ieee80211_crypt_deinit_entries ieee80211_crypt_deinit_entries_rsl
-#define ieee80211_crypt_deinit_handler ieee80211_crypt_deinit_handler_rsl
-#define ieee80211_crypt_delayed_deinit ieee80211_crypt_delayed_deinit_rsl
-#define ieee80211_register_crypto_ops ieee80211_register_crypto_ops_rsl
-#define ieee80211_unregister_crypto_ops ieee80211_unregister_crypto_ops_rsl
-#define ieee80211_get_crypto_ops ieee80211_get_crypto_ops_rsl
-
-#define ieee80211_ccmp_null ieee80211_ccmp_null_rsl
-
-#define ieee80211_tkip_null ieee80211_tkip_null_rsl
-
-#define ieee80211_wep_null ieee80211_wep_null_rsl
-
-#define free_ieee80211 free_ieee80211_rsl
-#define alloc_ieee80211 alloc_ieee80211_rsl
-
-#define ieee80211_rx ieee80211_rx_rsl
-#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
-
-#define ieee80211_get_beacon ieee80211_get_beacon_rsl
-#define ieee80211_rtl_wake_queue ieee80211_rtl_wake_queue_rsl
-#define ieee80211_rtl_stop_queue ieee80211_rtl_stop_queue_rsl
-#define ieee80211_reset_queue ieee80211_reset_queue_rsl
-#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl
-#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl
-#define ieee80211_is_shortslot ieee80211_is_shortslot_rsl
-#define ieee80211_is_54g ieee80211_is_54g_rsl
-#define ieee80211_wpa_supplicant_ioctl ieee80211_wpa_supplicant_ioctl_rsl
-#define ieee80211_ps_tx_ack ieee80211_ps_tx_ack_rsl
-#define ieee80211_softmac_xmit ieee80211_softmac_xmit_rsl
-#define ieee80211_stop_send_beacons ieee80211_stop_send_beacons_rsl
-#define notify_wx_assoc_event notify_wx_assoc_event_rsl
-#define SendDisassociation SendDisassociation_rsl
-#define ieee80211_disassociate ieee80211_disassociate_rsl
-#define ieee80211_start_send_beacons ieee80211_start_send_beacons_rsl
-#define ieee80211_stop_scan ieee80211_stop_scan_rsl
-#define ieee80211_send_probe_requests ieee80211_send_probe_requests_rsl
-#define ieee80211_softmac_scan_syncro ieee80211_softmac_scan_syncro_rsl
-#define ieee80211_start_scan_syncro ieee80211_start_scan_syncro_rsl
-
-#define ieee80211_wx_get_essid ieee80211_wx_get_essid_rsl
-#define ieee80211_wx_set_essid ieee80211_wx_set_essid_rsl
-#define ieee80211_wx_set_rate ieee80211_wx_set_rate_rsl
-#define ieee80211_wx_get_rate ieee80211_wx_get_rate_rsl
-#define ieee80211_wx_set_wap ieee80211_wx_set_wap_rsl
-#define ieee80211_wx_get_wap ieee80211_wx_get_wap_rsl
-#define ieee80211_wx_set_mode ieee80211_wx_set_mode_rsl
-#define ieee80211_wx_get_mode ieee80211_wx_get_mode_rsl
-#define ieee80211_wx_set_scan ieee80211_wx_set_scan_rsl
-#define ieee80211_wx_get_freq ieee80211_wx_get_freq_rsl
-#define ieee80211_wx_set_freq ieee80211_wx_set_freq_rsl
-#define ieee80211_wx_set_rawtx ieee80211_wx_set_rawtx_rsl
-#define ieee80211_wx_get_name ieee80211_wx_get_name_rsl
-#define ieee80211_wx_set_power ieee80211_wx_set_power_rsl
-#define ieee80211_wx_get_power ieee80211_wx_get_power_rsl
-#define ieee80211_wlan_frequencies ieee80211_wlan_frequencies_rsl
-#define ieee80211_wx_set_rts ieee80211_wx_set_rts_rsl
-#define ieee80211_wx_get_rts ieee80211_wx_get_rts_rsl
-
-#define ieee80211_txb_free ieee80211_txb_free_rsl
-
-#define ieee80211_wx_set_gen_ie ieee80211_wx_set_gen_ie_rsl
-#define ieee80211_wx_get_scan ieee80211_wx_get_scan_rsl
-#define ieee80211_wx_set_encode ieee80211_wx_set_encode_rsl
-#define ieee80211_wx_get_encode ieee80211_wx_get_encode_rsl
-#define ieee80211_wx_set_mlme ieee80211_wx_set_mlme_rsl
-#define ieee80211_wx_set_auth ieee80211_wx_set_auth_rsl
-#define ieee80211_wx_set_encode_ext ieee80211_wx_set_encode_ext_rsl
-#define ieee80211_wx_get_encode_ext ieee80211_wx_get_encode_ext_rsl
-
-
-typedef struct ieee_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 name;
- u32 value;
- } wpa_param;
- struct {
- u32 len;
- u8 reserved[32];
- u8 data[0];
- } wpa_ie;
- struct{
- int command;
- int reason_code;
- } mlme;
- struct {
- u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
- u8 set_tx;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- } u;
-}ieee_param;
-
-
-// linux under 2.6.9 release may not support it, so modify it for common use
-#define MSECS(t) msecs_to_jiffies(t)
-#define msleep_interruptible_rsl msleep_interruptible
-
-#define IEEE80211_DATA_LEN 2304
-/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
- 6.2.1.1.2.
-
- The figure in section 7.1.2 suggests a body size of up to 2312
- bytes is allowed, which is a bit confusing, I suspect this
- represents the 2304 bytes of real data, plus a possible 8 bytes of
- WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
-#define IEEE80211_1ADDR_LEN 10
-#define IEEE80211_2ADDR_LEN 16
-#define IEEE80211_3ADDR_LEN 24
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_FCS_LEN 4
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-#define IEEE80211_MGMT_HDR_LEN 24
-#define IEEE80211_DATA_HDR3_LEN 24
-#define IEEE80211_DATA_HDR4_LEN 30
-
-#define MIN_FRAG_THRESHOLD 256U
-#define MAX_FRAG_THRESHOLD 2346U
-
-
-/* Frame control field constants */
-#define IEEE80211_FCTL_VERS 0x0003
-#define IEEE80211_FCTL_FTYPE 0x000c
-#define IEEE80211_FCTL_STYPE 0x00f0
-#define IEEE80211_FCTL_FRAMETYPE 0x00fc
-#define IEEE80211_FCTL_TODS 0x0100
-#define IEEE80211_FCTL_FROMDS 0x0200
-#define IEEE80211_FCTL_DSTODS 0x0300 //added by david
-#define IEEE80211_FCTL_MOREFRAGS 0x0400
-#define IEEE80211_FCTL_RETRY 0x0800
-#define IEEE80211_FCTL_PM 0x1000
-#define IEEE80211_FCTL_MOREDATA 0x2000
-#define IEEE80211_FCTL_WEP 0x4000
-#define IEEE80211_FCTL_ORDER 0x8000
-
-#define IEEE80211_FTYPE_MGMT 0x0000
-#define IEEE80211_FTYPE_CTL 0x0004
-#define IEEE80211_FTYPE_DATA 0x0008
-
-/* management */
-#define IEEE80211_STYPE_ASSOC_REQ 0x0000
-#define IEEE80211_STYPE_ASSOC_RESP 0x0010
-#define IEEE80211_STYPE_REASSOC_REQ 0x0020
-#define IEEE80211_STYPE_REASSOC_RESP 0x0030
-#define IEEE80211_STYPE_PROBE_REQ 0x0040
-#define IEEE80211_STYPE_PROBE_RESP 0x0050
-#define IEEE80211_STYPE_BEACON 0x0080
-#define IEEE80211_STYPE_ATIM 0x0090
-#define IEEE80211_STYPE_DISASSOC 0x00A0
-#define IEEE80211_STYPE_AUTH 0x00B0
-#define IEEE80211_STYPE_DEAUTH 0x00C0
-#define IEEE80211_STYPE_MANAGE_ACT 0x00D0
-
-/* control */
-#define IEEE80211_STYPE_PSPOLL 0x00A0
-#define IEEE80211_STYPE_RTS 0x00B0
-#define IEEE80211_STYPE_CTS 0x00C0
-#define IEEE80211_STYPE_ACK 0x00D0
-#define IEEE80211_STYPE_CFEND 0x00E0
-#define IEEE80211_STYPE_CFENDACK 0x00F0
-#define IEEE80211_STYPE_BLOCKACK 0x0094
-
-/* data */
-#define IEEE80211_STYPE_DATA 0x0000
-#define IEEE80211_STYPE_DATA_CFACK 0x0010
-#define IEEE80211_STYPE_DATA_CFPOLL 0x0020
-#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
-#define IEEE80211_STYPE_NULLFUNC 0x0040
-#define IEEE80211_STYPE_CFACK 0x0050
-#define IEEE80211_STYPE_CFPOLL 0x0060
-#define IEEE80211_STYPE_CFACKPOLL 0x0070
-#define IEEE80211_STYPE_QOS_DATA 0x0080 //added for WMM 2006/8/2
-#define IEEE80211_STYPE_QOS_NULL 0x00C0
-
-#define IEEE80211_SCTL_FRAG 0x000F
-#define IEEE80211_SCTL_SEQ 0xFFF0
-
-/* QOS control */
-#define IEEE80211_QCTL_TID 0x000F
-
-#define FC_QOS_BIT BIT7
-#define IsDataFrame(pdu) ( ((pdu[0] & 0x0C)==0x08) ? true : false )
-#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)) )
-//added by wb. Is this right?
-#define IsQoSDataFrame(pframe) ((*(u16*)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16*)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
-#define SN_EQUAL(a, b) (a == b)
-#define MAX_DEV_ADDR_SIZE 8
-typedef enum _ACT_CATEGORY{
- ACT_CAT_QOS = 1,
- ACT_CAT_DLS = 2,
- ACT_CAT_BA = 3,
- ACT_CAT_HT = 7,
- ACT_CAT_WMM = 17,
-} ACT_CATEGORY, *PACT_CATEGORY;
-
-typedef enum _TS_ACTION{
- ACT_ADDTSREQ = 0,
- ACT_ADDTSRSP = 1,
- ACT_DELTS = 2,
- ACT_SCHEDULE = 3,
-} TS_ACTION, *PTS_ACTION;
-
-typedef enum _BA_ACTION{
- ACT_ADDBAREQ = 0,
- ACT_ADDBARSP = 1,
- ACT_DELBA = 2,
-} BA_ACTION, *PBA_ACTION;
-
-typedef enum _InitialGainOpType{
- IG_Backup=0,
- IG_Restore,
- IG_Max
-}InitialGainOpType;
-
-/* debug macros */
-#define CONFIG_IEEE80211_DEBUG
-#ifdef CONFIG_IEEE80211_DEBUG
-extern u32 ieee80211_debug_level;
-#define IEEE80211_DEBUG(level, fmt, args...) \
-do { if (ieee80211_debug_level & (level)) \
- printk(KERN_DEBUG "ieee80211: " fmt, ## args); } while (0)
-//wb added to debug out data buf
-//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
-#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do{ if ((ieee80211_debug_level & (level)) == (level)) \
- { \
- int i; \
- u8* pdata = (u8*) data; \
- printk(KERN_DEBUG "ieee80211: %s()\n", __FUNCTION__); \
- for(i=0; i<(int)(datalen); i++) \
- { \
- printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) printk("\n"); \
- } \
- printk("\n"); \
- } \
- } while (0)
-#else
-#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA(level, data, datalen) do {} while(0)
-#endif /* CONFIG_IEEE80211_DEBUG */
-
-/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
-
-/*
- * To use the debug system;
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of:
- *
- * #define IEEE80211_DL_xxxx VALUE
- *
- * shifting value to the left one bit from the previous entry. xxxx should be
- * the name of the classification (for example, WEP)
- *
- * You then need to either add a IEEE80211_xxxx_DEBUG() macro definition for your
- * classification, or use IEEE80211_DEBUG(IEEE80211_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * To add your debug level to the list of levels seen when you perform
- *
- * % cat /proc/net/ipw/debug_level
- *
- * you simply need to add your entry to the ipw_debug_levels array.
- *
- * If you do not see debug_level in /proc/net/ipw then you do not have
- * CONFIG_IEEE80211_DEBUG defined in your kernel configuration
- *
- */
-
-#define IEEE80211_DL_INFO (1<<0)
-#define IEEE80211_DL_WX (1<<1)
-#define IEEE80211_DL_SCAN (1<<2)
-#define IEEE80211_DL_STATE (1<<3)
-#define IEEE80211_DL_MGMT (1<<4)
-#define IEEE80211_DL_FRAG (1<<5)
-#define IEEE80211_DL_EAP (1<<6)
-#define IEEE80211_DL_DROP (1<<7)
-
-#define IEEE80211_DL_TX (1<<8)
-#define IEEE80211_DL_RX (1<<9)
-
-#define IEEE80211_DL_HT (1<<10) //HT
-#define IEEE80211_DL_BA (1<<11) //ba
-#define IEEE80211_DL_TS (1<<12) //TS
-#define IEEE80211_DL_QOS (1<<13)
-#define IEEE80211_DL_REORDER (1<<14)
-#define IEEE80211_DL_IOT (1<<15)
-#define IEEE80211_DL_IPS (1<<16)
-#define IEEE80211_DL_TRACE (1<<29) //trace function, need to user net_ratelimit() together in order not to print too much to the screen
-#define IEEE80211_DL_DATA (1<<30) //use this flag to control whether print data buf out.
-#define IEEE80211_DL_ERR (1<<31) //always open
-#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
-#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
-#define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a)
-
-#define IEEE80211_DEBUG_WX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_WX, f, ## a)
-#define IEEE80211_DEBUG_SCAN(f, a...) IEEE80211_DEBUG(IEEE80211_DL_SCAN, f, ## a)
-#define IEEE80211_DEBUG_STATE(f, a...) IEEE80211_DEBUG(IEEE80211_DL_STATE, f, ## a)
-#define IEEE80211_DEBUG_MGMT(f, a...) IEEE80211_DEBUG(IEEE80211_DL_MGMT, f, ## a)
-#define IEEE80211_DEBUG_FRAG(f, a...) IEEE80211_DEBUG(IEEE80211_DL_FRAG, f, ## a)
-#define IEEE80211_DEBUG_EAP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_EAP, f, ## a)
-#define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a)
-#define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a)
-#define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a)
-#define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a)
-
-#ifdef CONFIG_IEEE80211_DEBUG
-/* Added by Annie, 2005-11-22. */
-#define MAX_STR_LEN 64
-/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. Annie, 2005-11-22.*/
-#define PRINTABLE(_ch) (_ch>'!' && _ch<'~')
-#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) \
- if((_Comp) & level) \
- { \
- int __i; \
- u8 buffer[MAX_STR_LEN]; \
- int length = (_Len<MAX_STR_LEN)? _Len : (MAX_STR_LEN-1) ; \
- memset(buffer, 0, MAX_STR_LEN); \
- memcpy(buffer, (u8 *)_Ptr, length ); \
- for( __i=0; __i<MAX_STR_LEN; __i++ ) \
- { \
- if( !PRINTABLE(buffer[__i]) ) buffer[__i] = '?'; \
- } \
- buffer[length] = '\0'; \
- printk("Rtl819x: "); \
- printk(_TitleString); \
- printk(": %d, <%s>\n", _Len, buffer); \
- }
-#else
-#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) do {} while (0)
-#endif
-
-#include <linux/netdevice.h>
-#include <linux/if_arp.h> /* ARPHRD_ETHER */
-
-#ifndef WIRELESS_SPY
-#define WIRELESS_SPY // enable iwspy support
-#endif
-#include <net/iw_handler.h> // new driver API
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
-
-#ifndef ETH_P_80211_RAW
-#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
-#endif
-
-/* IEEE 802.11 defines */
-
-#define P80211_OUI_LEN 3
-
-struct ieee80211_snap_hdr {
-
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-
-} __attribute__ ((packed));
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
-
-
-#define WLAN_FC_GET_FRAMETYPE(fc) ((fc) & IEEE80211_FCTL_FRAMETYPE)
-#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG)
-#define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-#define WLAN_AUTH_LEAP 2
-
-#define WLAN_AUTH_CHALLENGE_LEN 128
-
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
-#define WLAN_CAPABILITY_QOS (1<<9)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
-#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
-
-/* 802.11g ERP information element */
-#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
-#define WLAN_ERP_USE_PROTECTION (1<<1)
-#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
-
-/* Status codes */
-enum ieee80211_statuscode {
- WLAN_STATUS_SUCCESS = 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
- WLAN_STATUS_CAPS_UNSUPPORTED = 10,
- WLAN_STATUS_REASSOC_NO_ASSOC = 11,
- WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
- WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
- WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
- WLAN_STATUS_CHALLENGE_FAIL = 15,
- WLAN_STATUS_AUTH_TIMEOUT = 16,
- WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
- WLAN_STATUS_ASSOC_DENIED_RATES = 18,
- /* 802.11b */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
- WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
- WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
- /* 802.11h */
- WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
- WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
- WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
- /* 802.11g */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
- WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
- /* 802.11i */
- WLAN_STATUS_INVALID_IE = 40,
- WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
- WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
- WLAN_STATUS_INVALID_AKMP = 43,
- WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
- WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
- WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
-};
-
-/* Reason codes */
-enum ieee80211_reasoncode {
- WLAN_REASON_UNSPECIFIED = 1,
- WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
- WLAN_REASON_DEAUTH_LEAVING = 3,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
- WLAN_REASON_DISASSOC_AP_BUSY = 5,
- WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
- WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
- /* 802.11h */
- WLAN_REASON_DISASSOC_BAD_POWER = 10,
- WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
- /* 802.11i */
- WLAN_REASON_INVALID_IE = 13,
- WLAN_REASON_MIC_FAILURE = 14,
- WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
- WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
- WLAN_REASON_IE_DIFFERENT = 17,
- WLAN_REASON_INVALID_GROUP_CIPHER = 18,
- WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
- WLAN_REASON_INVALID_AKMP = 20,
- WLAN_REASON_UNSUPP_RSN_VERSION = 21,
- WLAN_REASON_INVALID_RSN_IE_CAP = 22,
- WLAN_REASON_IEEE8021X_FAILED = 23,
- WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
-};
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
-#define IEEE80211_STATMASK_WEMASK 0x7
-
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
-
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
-
-#define IEEE80211_CCK_RATE_LEN 4
-#define IEEE80211_CCK_RATE_1MB 0x02
-#define IEEE80211_CCK_RATE_2MB 0x04
-#define IEEE80211_CCK_RATE_5MB 0x0B
-#define IEEE80211_CCK_RATE_11MB 0x16
-#define IEEE80211_OFDM_RATE_LEN 8
-#define IEEE80211_OFDM_RATE_6MB 0x0C
-#define IEEE80211_OFDM_RATE_9MB 0x12
-#define IEEE80211_OFDM_RATE_12MB 0x18
-#define IEEE80211_OFDM_RATE_18MB 0x24
-#define IEEE80211_OFDM_RATE_24MB 0x30
-#define IEEE80211_OFDM_RATE_36MB 0x48
-#define IEEE80211_OFDM_RATE_48MB 0x60
-#define IEEE80211_OFDM_RATE_54MB 0x6C
-#define IEEE80211_BASIC_RATE_MASK 0x80
-
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
-
-#define IEEE80211_CCK_RATES_MASK 0x0000000F
-#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
- IEEE80211_CCK_RATE_2MB_MASK)
-#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
- IEEE80211_CCK_RATE_5MB_MASK | \
- IEEE80211_CCK_RATE_11MB_MASK)
-
-#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
-#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
- IEEE80211_OFDM_RATE_12MB_MASK | \
- IEEE80211_OFDM_RATE_24MB_MASK)
-#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
- IEEE80211_OFDM_RATE_9MB_MASK | \
- IEEE80211_OFDM_RATE_18MB_MASK | \
- IEEE80211_OFDM_RATE_36MB_MASK | \
- IEEE80211_OFDM_RATE_48MB_MASK | \
- IEEE80211_OFDM_RATE_54MB_MASK)
-#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
- IEEE80211_CCK_DEFAULT_RATES_MASK)
-
-#define IEEE80211_NUM_OFDM_RATES 8
-#define IEEE80211_NUM_CCK_RATES 4
-#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-
-/* this is stolen and modified from the madwifi driver*/
-#define IEEE80211_FC0_TYPE_MASK 0x0c
-#define IEEE80211_FC0_TYPE_DATA 0x08
-#define IEEE80211_FC0_SUBTYPE_MASK 0xB0
-#define IEEE80211_FC0_SUBTYPE_QOS 0x80
-
-#define IEEE80211_QOS_HAS_SEQ(fc) \
- (((fc) & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == \
- (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
-
-/* this is stolen from ipw2200 driver */
-#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct ieee_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num[17];
- u16 frag_num[17];
- unsigned long packet_time[17];
- struct list_head list;
-};
-
-/* NOTE: This data is for statistical purposes; not all hardware provides this
- * information for frames received. Not setting these will not cause
- * any adverse affects. */
-struct ieee80211_rx_stats {
- u32 mac_time[2];
- s8 rssi;
- u8 signal;
- u8 noise;
- u16 rate; /* in 100 kbps */
- u8 received_channel;
- u8 control;
- u8 mask;
- u8 freq;
- u16 len;
- u64 tsf;
- u32 beacon_time;
- u8 nic_type;
- u16 Length;
- u8 SignalQuality; // in 0-100 index.
- s32 RecvSignalPower; // Real power in dBm for this packet, no beautification and aggregation.
- s8 RxPower; // in dBm Translate from PWdB
- u8 SignalStrength; // in 0-100 index.
- u16 bHwError:1;
- u16 bCRC:1;
- u16 bICV:1;
- u16 bShortPreamble:1;
- u16 Antenna:1; //for rtl8185
- u16 Decrypted:1; //for rtl8185, rtl8187
- u16 Wakeup:1; //for rtl8185
- u16 Reserved0:1; //for rtl8185
- u8 AGC;
- u32 TimeStampLow;
- u32 TimeStampHigh;
- bool bShift;
- bool bIsQosData; // Added by Annie, 2005-12-22.
- u8 UserPriority;
-
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
- //1Attention Please!!!<11n or 8190 specific code should be put below this line>
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- u8 RxDrvInfoSize;
- u8 RxBufShift;
- bool bIsAMPDU;
- bool bFirstMPDU;
- bool bContainHTC;
- u32 RxPWDBAll;
- u8 RxMIMOSignalStrength[4]; // in 0~100 index
- s8 RxMIMOSignalQuality[2];
- bool bPacketMatchBSSID;
- bool bIsCCK;
- bool bPacketToSelf;
- u8* virtual_address;
- bool bPacketBeacon; //cosa add for rssi
- bool bToSelfBA; //cosa add for rssi
- char cck_adc_pwdb[4]; //cosa add for rx path selection
-};
-
-/* IEEE 802.11 requires that STA supports concurrent reception of at least
- * three fragmented frames. This define can be increased to support more
- * concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
-#define IEEE80211_FRAG_CACHE_LEN 4
-
-struct ieee80211_frag_entry {
- unsigned long first_frag_time;
- unsigned int seq;
- unsigned int last_frag;
- struct sk_buff *skb;
- u8 src_addr[ETH_ALEN];
- u8 dst_addr[ETH_ALEN];
-};
-
-struct ieee80211_stats {
- unsigned int tx_unicast_frames;
- unsigned int tx_multicast_frames;
- unsigned int tx_fragments;
- unsigned int tx_unicast_octets;
- unsigned int tx_multicast_octets;
- unsigned int tx_deferred_transmissions;
- unsigned int tx_single_retry_frames;
- unsigned int tx_multiple_retry_frames;
- unsigned int tx_retry_limit_exceeded;
- unsigned int tx_discards;
- unsigned int rx_unicast_frames;
- unsigned int rx_multicast_frames;
- unsigned int rx_fragments;
- unsigned int rx_unicast_octets;
- unsigned int rx_multicast_octets;
- unsigned int rx_fcs_errors;
- unsigned int rx_discards_no_buffer;
- unsigned int tx_discards_wrong_sa;
- unsigned int rx_discards_undecryptable;
- unsigned int rx_message_in_msg_fragments;
- unsigned int rx_message_in_bad_msg_fragments;
-};
-
-struct ieee80211_device;
-
-#include "ieee80211_crypt.h"
-
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
-#define SEC_ENCRYPT (1<<9)
-
-#define SEC_LEVEL_0 0 /* None */
-#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
-#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
-#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
-#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
-
-#define SEC_ALG_NONE 0
-#define SEC_ALG_WEP 1
-#define SEC_ALG_TKIP 2
-#define SEC_ALG_CCMP 4
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
-#define SCM_KEY_LEN 32
-#define SCM_TEMPORAL_KEY_LENGTH 16
-
-struct ieee80211_security {
- u16 active_key:2,
- enabled:1,
- auth_mode:2,
- auth_algo:4,
- unicast_uses_group:1,
- encrypt:1;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][SCM_KEY_LEN];
- u8 level;
- u16 flags;
-} __attribute__ ((packed));
-
-
-/*
- 802.11 data frame from AP
- ,-------------------------------------------------------------------.
-Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- |------|------|---------|---------|---------|------|---------|------|
-Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
- | | tion | (BSSID) | | | ence | data | |
- `-------------------------------------------------------------------'
-Total: 28-2340 bytes
-*/
-
-/* Management Frame Information Element Types */
-enum ieee80211_mfie {
- MFIE_TYPE_SSID = 0,
- MFIE_TYPE_RATES = 1,
- MFIE_TYPE_FH_SET = 2,
- MFIE_TYPE_DS_SET = 3,
- MFIE_TYPE_CF_SET = 4,
- MFIE_TYPE_TIM = 5,
- MFIE_TYPE_IBSS_SET = 6,
- MFIE_TYPE_COUNTRY = 7,
- MFIE_TYPE_HOP_PARAMS = 8,
- MFIE_TYPE_HOP_TABLE = 9,
- MFIE_TYPE_REQUEST = 10,
- MFIE_TYPE_CHALLENGE = 16,
- MFIE_TYPE_POWER_CONSTRAINT = 32,
- MFIE_TYPE_POWER_CAPABILITY = 33,
- MFIE_TYPE_TPC_REQUEST = 34,
- MFIE_TYPE_TPC_REPORT = 35,
- MFIE_TYPE_SUPP_CHANNELS = 36,
- MFIE_TYPE_CSA = 37,
- MFIE_TYPE_MEASURE_REQUEST = 38,
- MFIE_TYPE_MEASURE_REPORT = 39,
- MFIE_TYPE_QUIET = 40,
- MFIE_TYPE_IBSS_DFS = 41,
- MFIE_TYPE_ERP = 42,
- MFIE_TYPE_RSN = 48,
- MFIE_TYPE_RATES_EX = 50,
- MFIE_TYPE_HT_CAP= 45,
- MFIE_TYPE_HT_INFO= 61,
- MFIE_TYPE_AIRONET=133,
- MFIE_TYPE_GENERIC = 221,
- MFIE_TYPE_QOS_PARAMETER = 222,
-};
-
-/* Minimal header; can be used for passing 802.11 frames with sufficient
- * information to determine what type of underlying data type is actually
- * stored in the data. */
- struct ieee80211_pspoll_hdr {
- __le16 frame_ctl;
- __le16 aid;
- u8 bssid[ETH_ALEN];
- u8 ta[ETH_ALEN];
- //u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_1addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_2addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_4addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_3addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
- __le16 qos_ctl;
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_4addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[0];
- __le16 qos_ctl;
-} __attribute__ ((packed));
-
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[0];
-} __attribute__ ((packed));
-
-struct ieee80211_authentication {
- struct ieee80211_hdr_3addr header;
- __le16 algorithm;
- __le16 transaction;
- __le16 status;
- /*challenge*/
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_disassoc {
- struct ieee80211_hdr_3addr header;
- __le16 reason;
-} __attribute__ ((packed));
-
-struct ieee80211_probe_request {
- struct ieee80211_hdr_3addr header;
- /* SSID, supported rates */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_probe_response {
- struct ieee80211_hdr_3addr header;
- u32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- /* SSID, supported rates, FH params, DS params,
- * CF params, IBSS params, TIM (if beacon), RSN */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-/* Alias beacon for probe_response */
-#define ieee80211_beacon ieee80211_probe_response
-
-struct ieee80211_assoc_request_frame {
- struct ieee80211_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- /* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_reassoc_request_frame {
- struct ieee80211_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- u8 current_ap[ETH_ALEN];
- /* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_assoc_response_frame {
- struct ieee80211_hdr_3addr header;
- __le16 capability;
- __le16 status;
- __le16 aid;
- struct ieee80211_info_element info_element[0]; /* supported rates */
-} __attribute__ ((packed));
-
-struct ieee80211_txb {
- u8 nr_frags;
- u8 encrypted;
- u8 queue_index;
- u8 rts_included;
- u16 reserved;
- __le16 frag_size;
- __le16 payload_size;
- struct sk_buff *fragments[0];
-};
-
-#define MAX_TX_AGG_COUNT 16
-struct ieee80211_drv_agg_txb {
- u8 nr_drv_agg_frames;
- struct sk_buff *tx_agg_frames[MAX_TX_AGG_COUNT];
-}__attribute__((packed));
-
-#define MAX_SUBFRAME_COUNT 64
-struct ieee80211_rxb {
- u8 nr_subframes;
- struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
-}__attribute__((packed));
-
-typedef union _frameqos {
- u16 shortdata;
- u8 chardata[2];
- struct {
- u16 tid:4;
- u16 eosp:1;
- u16 ack_policy:2;
- u16 reserved:1;
- u16 txop:8;
- }field;
-}frameqos,*pframeqos;
-
-/* SWEEP TABLE ENTRIES NUMBER*/
-#define MAX_SWEEP_TAB_ENTRIES 42
-#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
-/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
- * only use 8, and then use extended rates for the remaining supported
- * rates. Other APs, however, stick all of their supported rates on the
- * main rates information element... */
-#define MAX_RATES_LENGTH ((u8)12)
-#define MAX_RATES_EX_LENGTH ((u8)16)
-#define MAX_NETWORK_COUNT 128
-
-#define MAX_CHANNEL_NUMBER 161
-#define IEEE80211_SOFTMAC_SCAN_TIME 100
-//(HZ / 2)
-#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
-
-#define CRC_LENGTH 4U
-
-#define MAX_WPA_IE_LEN 64
-
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
-
-/* QoS structure */
-#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
-#define NETWORK_HAS_QOS_INFORMATION (1<<4)
-#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
- NETWORK_HAS_QOS_INFORMATION)
-/* 802.11h */
-#define NETWORK_HAS_POWER_CONSTRAINT (1<<5)
-#define NETWORK_HAS_CSA (1<<6)
-#define NETWORK_HAS_QUIET (1<<7)
-#define NETWORK_HAS_IBSS_DFS (1<<8)
-#define NETWORK_HAS_TPC_REPORT (1<<9)
-
-#define NETWORK_HAS_ERP_VALUE (1<<10)
-
-#define QOS_QUEUE_NUM 4
-#define QOS_OUI_LEN 3
-#define QOS_OUI_TYPE 2
-#define QOS_ELEMENT_ID 221
-#define QOS_OUI_INFO_SUB_TYPE 0
-#define QOS_OUI_PARAM_SUB_TYPE 1
-#define QOS_VERSION_1 1
-#define QOS_AIFSN_MIN_VALUE 2
-
-struct ieee80211_qos_information_element {
- u8 elementID;
- u8 length;
- u8 qui[QOS_OUI_LEN];
- u8 qui_type;
- u8 qui_subtype;
- u8 version;
- u8 ac_info;
-} __attribute__ ((packed));
-
-struct ieee80211_qos_ac_parameter {
- u8 aci_aifsn;
- u8 ecw_min_max;
- __le16 tx_op_limit;
-} __attribute__ ((packed));
-
-struct ieee80211_qos_parameter_info {
- struct ieee80211_qos_information_element info_element;
- u8 reserved;
- struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
-
-struct ieee80211_qos_parameters {
- __le16 cw_min[QOS_QUEUE_NUM];
- __le16 cw_max[QOS_QUEUE_NUM];
- u8 aifs[QOS_QUEUE_NUM];
- u8 flag[QOS_QUEUE_NUM];
- __le16 tx_op_limit[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
-
-struct ieee80211_qos_data {
- struct ieee80211_qos_parameters parameters;
- int active;
- int supported;
- u8 param_count;
- u8 old_param_count;
-};
-
-struct ieee80211_tim_parameters {
- u8 tim_count;
- u8 tim_period;
-} __attribute__ ((packed));
-
-//#else
-struct ieee80211_wmm_ac_param {
- u8 ac_aci_acm_aifsn;
- u8 ac_ecwmin_ecwmax;
- u16 ac_txop_limit;
-};
-
-struct ieee80211_wmm_ts_info {
- u8 ac_dir_tid;
- u8 ac_up_psb;
- u8 reserved;
-} __attribute__ ((packed));
-
-struct ieee80211_wmm_tspec_elem {
- struct ieee80211_wmm_ts_info ts_info;
- u16 norm_msdu_size;
- u16 max_msdu_size;
- u32 min_serv_inter;
- u32 max_serv_inter;
- u32 inact_inter;
- u32 suspen_inter;
- u32 serv_start_time;
- u32 min_data_rate;
- u32 mean_data_rate;
- u32 peak_data_rate;
- u32 max_burst_size;
- u32 delay_bound;
- u32 min_phy_rate;
- u16 surp_band_allow;
- u16 medium_time;
-}__attribute__((packed));
-
-enum eap_type {
- EAP_PACKET = 0,
- EAPOL_START,
- EAPOL_LOGOFF,
- EAPOL_KEY,
- EAPOL_ENCAP_ASF_ALERT
-};
-
-static const char *eap_types[] = {
- [EAP_PACKET] = "EAP-Packet",
- [EAPOL_START] = "EAPOL-Start",
- [EAPOL_LOGOFF] = "EAPOL-Logoff",
- [EAPOL_KEY] = "EAPOL-Key",
- [EAPOL_ENCAP_ASF_ALERT] = "EAPOL-Encap-ASF-Alert"
-};
-
-static inline const char *eap_get_type(int type)
-{
- return ((u32)type >= ARRAY_SIZE(eap_types)) ? "Unknown" : eap_types[type];
-}
-//added by amy for reorder
-static inline u8 Frame_QoSTID(u8* buf)
-{
- struct ieee80211_hdr_3addr *hdr;
- u16 fc;
- hdr = (struct ieee80211_hdr_3addr *)buf;
- fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos*)(buf + (((fc & IEEE80211_FCTL_TODS)&&(fc & IEEE80211_FCTL_FROMDS))? 30 : 24)))->field.tid;
-}
-
-//added by amy for reorder
-
-struct eapol {
- u8 snap[6];
- u16 ethertype;
- u8 version;
- u8 type;
- u16 length;
-} __attribute__ ((packed));
-
-struct ieee80211_softmac_stats{
- unsigned int rx_ass_ok;
- unsigned int rx_ass_err;
- unsigned int rx_probe_rq;
- unsigned int tx_probe_rs;
- unsigned int tx_beacons;
- unsigned int rx_auth_rq;
- unsigned int rx_auth_rs_ok;
- unsigned int rx_auth_rs_err;
- unsigned int tx_auth_rq;
- unsigned int no_auth_rs;
- unsigned int no_ass_rs;
- unsigned int tx_ass_rq;
- unsigned int rx_ass_rq;
- unsigned int tx_probe_rq;
- unsigned int reassoc;
- unsigned int swtxstop;
- unsigned int swtxawake;
- unsigned char CurrentShowTxate;
- unsigned char last_packet_rate;
- unsigned int txretrycount;
-};
-
-#define BEACON_PROBE_SSID_ID_POSITION 12
-
-struct ieee80211_info_element_hdr {
- u8 id;
- u8 len;
-} __attribute__ ((packed));
-
-/*
- * These are the data types that can make up management packets
- *
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
- u8 current_ap[ETH_ALEN];
- u16 listen_interval;
- struct {
- u16 association_id:14, reserved:2;
- } __attribute__ ((packed));
- u32 time_stamp[2];
- u16 reason;
- u16 status;
-*/
-
-#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 2 //1Mbps
-
-enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
-#define MAX_SP_Len (WMM_all_frame << 4)
-#define IEEE80211_QOS_TID 0x0f
-#define QOS_CTL_NOTCONTAIN_ACK (0x01 << 5)
-
-#define IEEE80211_DTIM_MBCAST 4
-#define IEEE80211_DTIM_UCAST 2
-#define IEEE80211_DTIM_VALID 1
-#define IEEE80211_DTIM_INVALID 0
-
-#define IEEE80211_PS_DISABLED 0
-#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
-#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
-
-//added by David for QoS 2006/6/30
-//#define WMM_Hang_8187
-#ifdef WMM_Hang_8187
-#undef WMM_Hang_8187
-#endif
-
-#define WME_AC_BK 0x00
-#define WME_AC_BE 0x01
-#define WME_AC_VI 0x02
-#define WME_AC_VO 0x03
-#define WME_ACI_MASK 0x03
-#define WME_AIFSN_MASK 0x03
-#define WME_AC_PRAM_LEN 16
-
-#define MAX_RECEIVE_BUFFER_SIZE 9100
-
-//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
-#define UP2AC(up) ( \
- ((up) < 1) ? WME_AC_BE : \
- ((up) < 3) ? WME_AC_BK : \
- ((up) < 4) ? WME_AC_BE : \
- ((up) < 6) ? WME_AC_VI : \
- WME_AC_VO)
-
-//AC Mapping to UP, using in Tx part for selecting the corresponding TX queue
-#define AC2UP(_ac) ( \
- ((_ac) == WME_AC_VO) ? 6 : \
- ((_ac) == WME_AC_VI) ? 5 : \
- ((_ac) == WME_AC_BK) ? 1 : \
- 0)
-
-#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
-#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address plus ether type*/
-
-struct ether_header {
- u8 ether_dhost[ETHER_ADDR_LEN];
- u8 ether_shost[ETHER_ADDR_LEN];
- u16 ether_type;
-} __attribute__((packed));
-
-#ifndef ETHERTYPE_PAE
-#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
-#endif
-#ifndef ETHERTYPE_IP
-#define ETHERTYPE_IP 0x0800 /* IP protocol */
-#endif
-
-typedef struct _bss_ht{
-
- bool support_ht;
-
- // HT related elements
- u8 ht_cap_buf[32];
- u16 ht_cap_len;
- u8 ht_info_buf[32];
- u16 ht_info_len;
-
- HT_SPEC_VER ht_spec_ver;
-
- bool aggregation;
- bool long_slot_time;
-}bss_ht, *pbss_ht;
-
-typedef enum _erp_t{
- ERP_NonERPpresent = 0x01,
- ERP_UseProtection = 0x02,
- ERP_BarkerPreambleMode = 0x04,
-} erp_t;
-
-
-struct ieee80211_network {
- /* These entries are used to identify a unique network */
- u8 bssid[ETH_ALEN];
- u8 channel;
- /* Ensure null-terminated for any debug msgs */
- u8 ssid[IW_ESSID_MAX_SIZE + 1];
- u8 ssid_len;
- struct ieee80211_qos_data qos_data;
-
- //added by amy for LEAP
- bool bWithAironetIE;
- bool bCkipSupported;
- bool bCcxRmEnable;
- u16 CcxRmState[2];
- // CCXv4 S59, MBSSID.
- bool bMBssidValid;
- u8 MBssidMask;
- u8 MBssid[6];
- // CCX 2 S38, WLAN Device Version Number element. Annie, 2006-08-20.
- bool bWithCcxVerNum;
- u8 BssCcxVerNumber;
- /* These are network statistics */
- struct ieee80211_rx_stats stats;
- u16 capability;
- u8 rates[MAX_RATES_LENGTH];
- u8 rates_len;
- u8 rates_ex[MAX_RATES_EX_LENGTH];
- u8 rates_ex_len;
- unsigned long last_scanned;
- u8 mode;
- u32 flags;
- u32 last_associate;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 listen_interval;
- u16 atim_window;
- u8 erp_value;
- u8 wpa_ie[MAX_WPA_IE_LEN];
- size_t wpa_ie_len;
- u8 rsn_ie[MAX_WPA_IE_LEN];
- size_t rsn_ie_len;
-
- struct ieee80211_tim_parameters tim;
- u8 dtim_period;
- u8 dtim_data;
- u32 last_dtim_sta_time[2];
-
- //appeded for QoS
- u8 wmm_info;
- struct ieee80211_wmm_ac_param wmm_param[4];
- u8 QoS_Enable;
-#ifdef THOMAS_TURBO
- u8 Turbo_Enable;//enable turbo mode, added by thomas
-#endif
-#ifdef ENABLE_DOT11D
- u16 CountryIeLen;
- u8 CountryIeBuf[MAX_IE_LEN];
-#endif
- // HT Related, by amy, 2008.04.29
- BSS_HT bssht;
- // Add to handle broadcom AP management frame CCK rate.
- bool broadcom_cap_exist;
- bool ralink_cap_exist;
- bool atheros_cap_exist;
- bool cisco_cap_exist;
- bool marvell_cap_exist;
- bool unknown_cap_exist;
-// u8 berp_info;
- bool berp_info_valid;
- bool buseprotection;
- //put at the end of the structure.
- struct list_head list;
-};
-
-enum ieee80211_state {
-
- /* the card is not linked at all */
- IEEE80211_NOLINK = 0,
-
- /* IEEE80211_ASSOCIATING* are for BSS client mode
- * the driver shall not perform RX filtering unless
- * the state is LINKED.
- * The driver shall just check for the state LINKED and
- * defaults to NOLINK for ALL the other states (including
- * LINKED_SCANNING)
- */
-
- /* the association procedure will start (wq scheduling)*/
- IEEE80211_ASSOCIATING,
- IEEE80211_ASSOCIATING_RETRY,
-
- /* the association procedure is sending AUTH request*/
- IEEE80211_ASSOCIATING_AUTHENTICATING,
-
- /* the association procedure has successfully authentcated
- * and is sending association request
- */
- IEEE80211_ASSOCIATING_AUTHENTICATED,
-
- /* the link is ok. the card associated to a BSS or linked
- * to a ibss cell or acting as an AP and creating the bss
- */
- IEEE80211_LINKED,
-
- /* same as LINKED, but the driver shall apply RX filter
- * rules as we are in NO_LINK mode. As the card is still
- * logically linked, but it is doing a syncro site survey
- * then it will be back to LINKED state.
- */
- IEEE80211_LINKED_SCANNING,
-
-};
-
-#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
-#define DEFAULT_FTS 2346
-
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
-#define CFG_IEEE80211_RTS (1<<2)
-
-#define IEEE80211_24GHZ_MIN_CHANNEL 1
-#define IEEE80211_24GHZ_MAX_CHANNEL 14
-#define IEEE80211_24GHZ_CHANNELS (IEEE80211_24GHZ_MAX_CHANNEL - \
- IEEE80211_24GHZ_MIN_CHANNEL + 1)
-
-#define IEEE80211_52GHZ_MIN_CHANNEL 34
-#define IEEE80211_52GHZ_MAX_CHANNEL 165
-#define IEEE80211_52GHZ_CHANNELS (IEEE80211_52GHZ_MAX_CHANNEL - \
- IEEE80211_52GHZ_MIN_CHANNEL + 1)
-
-typedef struct tx_pending_t{
- int frag;
- struct ieee80211_txb *txb;
-}tx_pending_t;
-
-typedef struct _bandwidth_autoswitch
-{
- long threshold_20Mhzto40Mhz;
- long threshold_40Mhzto20Mhz;
- bool bforced_tx20Mhz;
- bool bautoswitch_enable;
-}bandwidth_autoswitch,*pbandwidth_autoswitch;
-
-
-//added by amy for order
-
-#define REORDER_WIN_SIZE 128
-#define REORDER_ENTRY_NUM 128
-typedef struct _RX_REORDER_ENTRY
-{
- struct list_head List;
- u16 SeqNum;
- struct ieee80211_rxb* prxb;
-} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
-//added by amy for order
-typedef enum _Fsync_State{
- Default_Fsync,
- HW_Fsync,
- SW_Fsync
-}Fsync_State;
-
-typedef struct _IbssParms{
- u16 atimWin;
-}IbssParms, *PIbssParms;
-#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
-
-#ifdef ENABLE_DOT11D
-typedef enum
-{
- COUNTRY_CODE_FCC = 0,
- COUNTRY_CODE_IC = 1,
- COUNTRY_CODE_ETSI = 2,
- COUNTRY_CODE_SPAIN = 3,
- COUNTRY_CODE_FRANCE = 4,
- COUNTRY_CODE_MKK = 5,
- COUNTRY_CODE_MKK1 = 6,
- COUNTRY_CODE_ISRAEL = 7,
- COUNTRY_CODE_TELEC,
- COUNTRY_CODE_MIC,
- COUNTRY_CODE_GLOBAL_DOMAIN
-}country_code_type_t;
-#endif
-
-#define RT_MAX_LD_SLOT_NUM 10
-typedef struct _RT_LINK_DETECT_T{
-
- u32 NumRecvBcnInPeriod;
- u32 NumRecvDataInPeriod;
-
- u32 RxBcnNum[RT_MAX_LD_SLOT_NUM]; // number of Rx beacon / CheckForHang_period to determine link status
- u32 RxDataNum[RT_MAX_LD_SLOT_NUM]; // number of Rx data / CheckForHang_period to determine link status
- u16 SlotNum; // number of CheckForHang period to determine link status
- u16 SlotIndex;
-
- u32 NumTxOkInPeriod;
- u32 NumRxOkInPeriod;
- u32 NumRxUnicastOkInPeriod;
- bool bBusyTraffic;
-}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
-
-//added by amy 090330
-typedef enum _HW_VARIABLES{
- HW_VAR_ETHER_ADDR,
- HW_VAR_MULTICAST_REG,
- HW_VAR_BASIC_RATE,
- HW_VAR_BSSID,
- HW_VAR_MEDIA_STATUS,
- HW_VAR_SECURITY_CONF,
- HW_VAR_BEACON_INTERVAL,
- HW_VAR_ATIM_WINDOW,
- HW_VAR_LISTEN_INTERVAL,
- HW_VAR_CS_COUNTER,
- HW_VAR_DEFAULTKEY0,
- HW_VAR_DEFAULTKEY1,
- HW_VAR_DEFAULTKEY2,
- HW_VAR_DEFAULTKEY3,
- HW_VAR_SIFS,
- HW_VAR_DIFS,
- HW_VAR_EIFS,
- HW_VAR_SLOT_TIME,
- HW_VAR_ACK_PREAMBLE,
- HW_VAR_CW_CONFIG,
- HW_VAR_CW_VALUES,
- HW_VAR_RATE_FALLBACK_CONTROL,
- HW_VAR_CONTENTION_WINDOW,
- HW_VAR_RETRY_COUNT,
- HW_VAR_TR_SWITCH,
- HW_VAR_COMMAND, // For Command Register, Annie, 2006-04-07.
- HW_VAR_WPA_CONFIG, //2004/08/23, kcwu, for 8187 Security config
- HW_VAR_AMPDU_MIN_SPACE, // The spacing between sub-frame. Roger, 2008.07.04.
- HW_VAR_SHORTGI_DENSITY, // The density for shortGI. Roger, 2008.07.04.
- HW_VAR_AMPDU_FACTOR,
- HW_VAR_MCS_RATE_AVAILABLE,
- HW_VAR_AC_PARAM, // For AC Parameters, 2005.12.01, by rcnjko.
- HW_VAR_ACM_CTRL, // For ACM Control, Annie, 2005-12-13.
- HW_VAR_DIS_Req_Qsize, // For DIS_Reg_Qsize, Joseph
- HW_VAR_CCX_CHNL_LOAD, // For CCX 2 channel load request, 2006.05.04.
- HW_VAR_CCX_NOISE_HISTOGRAM, // For CCX 2 noise histogram request, 2006.05.04.
- HW_VAR_CCX_CLM_NHM, // For CCX 2 parallel channel load request and noise histogram request, 2006.05.12.
- HW_VAR_TxOPLimit, // For turbo mode related settings, added by Roger, 2006.12.07
- HW_VAR_TURBO_MODE, // For turbo mode related settings, added by Roger, 2006.12.15.
- HW_VAR_RF_STATE, // For change or query RF power state, 061214, rcnjko.
- HW_VAR_RF_OFF_BY_HW, // For UI to query if external HW signal disable RF, 061229, rcnjko.
- HW_VAR_BUS_SPEED, // In unit of bps. 2006.07.03, by rcnjko.
- HW_VAR_SET_DEV_POWER, // Set to low power, added by LanHsin, 2007.
-
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
- //1Attention Please!!!<11n or 8190 specific code should be put below this line>
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
- HW_VAR_RCR, //for RCR, David 2006,05,11
- HW_VAR_RATR_0,
- HW_VAR_RRSR,
- HW_VAR_CPU_RST,
- HW_VAR_CHECK_BSSID,
- HW_VAR_LBK_MODE, // Set lookback mode, 2008.06.11. added by Roger.
- // Set HW related setting for 11N AES bug.
- HW_VAR_AES_11N_FIX,
- // Set Usb Rx Aggregation
- HW_VAR_USB_RX_AGGR,
- HW_VAR_USER_CONTROL_TURBO_MODE,
- HW_VAR_RETRY_LIMIT,
-#ifndef _RTL8192_EXT_PATCH_
- HW_VAR_INIT_TX_RATE, //Get Current Tx rate register. 2008.12.10. Added by tynli
-#endif
- HW_VAR_TX_RATE_REG, //Get Current Tx rate register. 2008.12.10. Added by tynli
- HW_VAR_EFUSE_USAGE, //Get current EFUSE utilization. 2008.12.19. Added by Roger.
- HW_VAR_EFUSE_BYTES,
- HW_VAR_AUTOLOAD_STATUS, //Get current autoload status, 0: autoload success, 1: autoload fail. 2008.12.19. Added by Roger.
- HW_VAR_RF_2R_DISABLE, // 2R disable
- HW_VAR_SET_RPWM,
- HW_VAR_H2C_FW_PWRMODE, // For setting FW related H2C cmd structure. by tynli. 2009.2.18
- HW_VAR_H2C_FW_JOINBSSRPT, // For setting FW related H2C cmd structure. by tynli. 2009.2.18
- HW_VAR_1X1_RECV_COMBINE, // For 1T2R but only 1SS, Add by hpfan 2009.04.16 hpfan
- HW_VAR_STOP_SEND_BEACON,
- HW_VAR_TSF_TIMER, // Read from TSF register to get the current TSF timer, by Bruce, 2009-07-22.
- HW_VAR_IO_CMD,
- HW_VAR_HANDLE_FW_C2H, //Added by tynli. For handling FW C2H command. 2009.10.07.
- HW_VAR_DL_FW_RSVD_PAGE, //Added by tynli. Download the packets that FW will use to RSVD page. 2009.10.14.
- HW_VAR_AID, //Added by tynli.
- HW_VAR_HW_SEQ_ENABLE, //Added by tynli. 2009.10.20.
- HW_VAR_UPDATE_TSF, //Added by tynli. 2009.10.22. For Hw count TBTT time.
- HW_VAR_BCN_VALID, //Added by tynli.
- HW_VAR_FWLPS_RF_ON //Added by tynli. 2009.11.09. For checking if Fw finishs RF on sequence.
-}HW_VARIABLES;
-
-#define RT_CHECK_FOR_HANG_PERIOD 2
-
-struct ieee80211_device {
- struct net_device *dev;
- struct ieee80211_security sec;
-
- bool need_sw_enc;
-#ifdef ENABLE_LPS
- bool bAwakePktSent;
- u8 LPSDelayCnt;
- bool bIsAggregateFrame;
- bool polling;
- void (*LeisurePSLeave)(struct ieee80211_device *ieee);
-#endif
-
-#ifdef ENABLE_IPS
- bool proto_stoppping;
- bool wx_set_enc;
- struct semaphore ips_sem;
- struct work_struct ips_leave_wq;
- void (*ieee80211_ips_leave_wq) (struct ieee80211_device *ieee);
- void (*ieee80211_ips_leave)(struct ieee80211_device *ieee);
-#endif
- void (*SetHwRegHandler)(struct ieee80211_device *ieee, u8 variable, u8 *val);
- u8 (*rtllib_ap_sec_type)(struct ieee80211_device *ieee);
-
- //hw security related
- u8 hwsec_active; //hw security active.
- bool is_silent_reset;
- bool is_roaming;
- bool ieee_up;
- bool bSupportRemoteWakeUp;
- bool actscanning;
- bool beinretry;
- bool is_set_key;
- //11n spec related I wonder if These info structure need to be moved out of ieee80211_device
-
- //11n HT below
- PRT_HIGH_THROUGHPUT pHTInfo;
- spinlock_t bw_spinlock;
-
- spinlock_t reorder_spinlock;
- // for HT operation rate set. we use this one for HT data rate to separate different descriptors
- //the way fill this is the same as in the IE
- u8 Regdot11HTOperationalRateSet[16]; //use RATR format
- u8 dot11HTOperationalRateSet[16]; //use RATR format
- u8 RegHTSuppRateSet[16];
- u8 HTCurrentOperaRate;
- u8 HTHighestOperaRate;
- //wb added for rate operation mode to firmware
- u8 bTxDisableRateFallBack;
- u8 bTxUseDriverAssingedRate;
- atomic_t atm_chnlop;
- atomic_t atm_swbw;
-
- // 802.11e and WMM Traffic Stream Info (TX)
- struct list_head Tx_TS_Admit_List;
- struct list_head Tx_TS_Pending_List;
- struct list_head Tx_TS_Unused_List;
- TX_TS_RECORD TxTsRecord[TOTAL_TS_NUM];
- // 802.11e and WMM Traffic Stream Info (RX)
- struct list_head Rx_TS_Admit_List;
- struct list_head Rx_TS_Pending_List;
- struct list_head Rx_TS_Unused_List;
- RX_TS_RECORD RxTsRecord[TOTAL_TS_NUM];
- RX_REORDER_ENTRY RxReorderEntry[128];
- struct list_head RxReorder_Unused_List;
- u8 ForcedPriority; // Force per-packet priority 1~7. (default: 0, not to force it.)
-
-
- /* Bookkeeping structures */
- struct net_device_stats stats;
- struct ieee80211_stats ieee_stats;
- struct ieee80211_softmac_stats softmac_stats;
-
- /* Probe / Beacon management */
- struct list_head network_free_list;
- struct list_head network_list;
- struct ieee80211_network *networks;
- int scans;
- int scan_age;
-
- int iw_mode; /* operating mode (IW_MODE_*) */
- struct iw_spy_data spy_data;
-
- spinlock_t lock;
- spinlock_t wpax_suitlist_lock;
-
- int tx_headroom; /* Set to size of any additional room needed at front
- * of allocated Tx SKBs */
- u32 config;
-
- /* WEP and other encryption related settings at the device level */
- int open_wep; /* Set to 1 to allow unencrypted frames */
- int auth_mode;
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
- * WEP key changes */
-
- /* If the host performs {en,de}cryption, then set to 1 */
- int host_encrypt;
- int host_encrypt_msdu;
- int host_decrypt;
- /* host performs multicast decryption */
- int host_mc_decrypt;
-
- /* host should strip IV and ICV from protected frames */
- /* meaningful only when hardware decryption is being used */
- int host_strip_iv_icv;
-
- int host_open_frag;
- int host_build_iv;
- int ieee802_1x; /* is IEEE 802.1X used */
-
- /* WPA data */
- bool bHalfWirelessN24GMode;
- int wpa_enabled;
- int drop_unencrypted;
- int tkip_countermeasures;
- int privacy_invoked;
- size_t wpa_ie_len;
- u8 *wpa_ie;
- u8 ap_mac_addr[6];
- u16 pairwise_key_type;
- u16 group_key_type;
- struct list_head crypt_deinit_list;
- struct ieee80211_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
-
- int bcrx_sta_key; /* use individual keys to override default keys even
- * with RX of broad/multicast frames */
-
- /* Fragmentation structures */
- // each streaming contain a entry
- struct ieee80211_frag_entry frag_cache[17][IEEE80211_FRAG_CACHE_LEN];
- unsigned int frag_next_idx[17];
- u16 fts; /* Fragmentation Threshold */
-#define DEFAULT_RTS_THRESHOLD 2346U
-#define MIN_RTS_THRESHOLD 1
-#define MAX_RTS_THRESHOLD 2346U
- u16 rts; /* RTS threshold */
-
- /* Association info */
- u8 bssid[ETH_ALEN];
-
- /* This stores infos for the current network.
- * Either the network we are associated in INFRASTRUCTURE
- * or the network that we are creating in MASTER mode.
- * ad-hoc is a mixture ;-).
- * Note that in infrastructure mode, even when not associated,
- * fields bssid and essid may be valid (if wpa_set and essid_set
- * are true) as thy carry the value set by the user via iwconfig
- */
- struct ieee80211_network current_network;
-
- enum ieee80211_state state;
-
- int short_slot;
- int reg_mode;
- int mode; /* A, B, G */
- int modulation; /* CCK, OFDM */
- int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
- int abg_true; /* ABG flag */
-
- /* used for forcing the ibss workqueue to terminate
- * without wait for the syncro scan to terminate
- */
- short sync_scan_hurryup;
-
- int perfect_rssi;
- int worst_rssi;
-
- u16 prev_seq_ctl; /* used to drop duplicate frames */
-
- /* map of allowed channels. 0 is dummy */
- // FIXME: remember to default to a basic channel plan depending of the PHY type
-#ifdef ENABLE_DOT11D
- void* pDot11dInfo;
- bool bGlobalDomain;
-#else
- int channel_map[MAX_CHANNEL_NUMBER+1];
-#endif
- int rate; /* current rate */
- int basic_rate;
- //FIXME: pleace callback, see if redundant with softmac_features
- short active_scan;
-
- /* this contains flags for selectively enable softmac support */
- u16 softmac_features;
-
- /* if the sequence control field is not filled by HW */
- u16 seq_ctrl[5];
-
- /* association procedure transaction sequence number */
- u16 associate_seq;
-
- /* AID for RTXed association responses */
- u16 assoc_id;
-
- /* power save mode related*/
- u8 ack_tx_to_ieee;
- short ps;
- short sta_sleep;
- int ps_timeout;
- int ps_period;
- struct tasklet_struct ps_task;
- u32 ps_th;
- u32 ps_tl;
-
- short raw_tx;
- /* used if IEEE_SOFTMAC_TX_QUEUE is set */
- short queue_stop;
- short scanning;
- short proto_started;
-
- struct semaphore wx_sem;
- struct semaphore scan_sem;
-
- spinlock_t mgmt_tx_lock;
- spinlock_t beacon_lock;
-
- short beacon_txing;
-
- short wap_set;
- short ssid_set;
-
- u8 wpax_type_set; //{added by David, 2006.9.28}
- u32 wpax_type_notify; //{added by David, 2006.9.26}
-
- /* QoS related flag */
- char init_wmmparam_flag;
- /* set on initialization */
- u8 qos_support;
-
- /* for discarding duplicated packets in IBSS */
- struct list_head ibss_mac_hash[IEEE_IBSS_MAC_HASH_SIZE];
-
- /* for discarding duplicated packets in BSS */
- u16 last_rxseq_num[17]; /* rx seq previous per-tid */
- u16 last_rxfrag_num[17];/* tx frag previous per-tid */
- unsigned long last_packet_time[17];
-
- /* for PS mode */
- unsigned long last_rx_ps_time;
- u8 LPSAwakeIntvl;
- u8 RegMaxLPSAwakeIntvl;
-
- /* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
- struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
- int mgmt_queue_head;
- int mgmt_queue_tail;
-#define IEEE80211_QUEUE_LIMIT 128
- u8 AsocRetryCount;
- unsigned int hw_header;
- struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE];
- struct sk_buff_head skb_aggQ[MAX_QUEUE_SIZE];
- struct sk_buff_head skb_drv_aggQ[MAX_QUEUE_SIZE];
- u32 sta_edca_param[4];
- bool aggregation;
- // Enable/Disable Rx immediate BA capability.
- bool enable_rx_imm_BA;
- bool bibsscoordinator;
-
- //+by amy for DM ,080515
- //Dynamic Tx power for near/far range enable/Disable , by amy , 2008-05-15
- bool bdynamic_txpower_enable;
-
- bool bCTSToSelfEnable;
- u8 CTSToSelfTH;
-
- u32 fsync_time_interval;
- u32 fsync_rate_bitmap;
- u8 fsync_rssi_threshold;
- bool bfsync_enable;
-
- u8 fsync_multiple_timeinterval; // FsyncMultipleTimeInterval * FsyncTimeInterval
- u32 fsync_firstdiff_ratethreshold; // low threshold
- u32 fsync_seconddiff_ratethreshold; // decrease threshold
- Fsync_State fsync_state;
- bool bis_any_nonbepkts;
- //20Mhz 40Mhz AutoSwitch Threshold
- bandwidth_autoswitch bandwidth_auto_switch;
- //for txpower tracking
- bool FwRWRF;
-
- //added by amy for AP roaming
- RT_LINK_DETECT_T LinkDetectInfo;
-
- /* used if IEEE_SOFTMAC_TX_QUEUE is set */
- struct tx_pending_t tx_pending;
-
- /* used if IEEE_SOFTMAC_ASSOCIATE is set */
- struct timer_list associate_timer;
-
- /* used if IEEE_SOFTMAC_BEACONS is set */
- struct timer_list beacon_timer;
- struct work_struct associate_complete_wq;
- struct work_struct associate_procedure_wq;
- struct delayed_work softmac_scan_wq;
- struct delayed_work associate_retry_wq;
- struct delayed_work start_ibss_wq;
- struct delayed_work hw_wakeup_wq;
-
- struct work_struct wx_sync_scan_wq;
- struct workqueue_struct *wq;
-
- /* Callback functions */
- void (*set_security)(struct ieee80211_device *ieee,
- struct ieee80211_security *sec);
-
- /* Used to TX data frame by using txb structs.
- * this is not used if in the softmac_features
- * is set the flag IEEE_SOFTMAC_TX_QUEUE
- */
- int (*hard_start_xmit)(struct ieee80211_txb *txb,
- struct ieee80211_device *ieee);
-
- int (*reset_port)(struct ieee80211_device *ieee);
- int (*is_queue_full) (struct ieee80211_device *ieee, int pri);
-
- int (*handle_management) (struct ieee80211_device *ieee,
- struct ieee80211_network * network, u16 type);
- int (*is_qos_active) (struct ieee80211_device *ieee, struct sk_buff *skb);
-
- /* Softmac-generated frames (mamagement) are TXed via this
- * callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
- * not set. As some cards may have different HW queues that
- * one might want to use for data and management frames
- * the option to have two callbacks might be useful.
- * This function can't sleep.
- */
- int (*softmac_hard_start_xmit)(struct sk_buff *skb,
- struct ieee80211_device *ieee80211);
-
- /* used instead of hard_start_xmit (not softmac_hard_start_xmit)
- * if the IEEE_SOFTMAC_TX_QUEUE feature is used to TX data
- * frames. I the option IEEE_SOFTMAC_SINGLE_QUEUE is also set
- * then also management frames are sent via this callback.
- * This function can't sleep.
- */
- void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
- struct ieee80211_device *ieee80211, int rate);
-
- /* stops the HW queue for DATA frames. Useful to avoid
- * waste time to TX data frame when we are reassociating
- * This function can sleep.
- */
- void (*data_hard_stop)(struct ieee80211_device *ieee80211);
-
- /* OK this is complementar to data_poll_hard_stop */
- void (*data_hard_resume)(struct ieee80211_device *ieee80211);
-
- /* ask to the driver to retune the radio .
- * This function can sleep. the driver should ensure
- * the radio has been swithced before return.
- */
- void (*set_chan)(struct ieee80211_device *ieee80211, short ch);
-
- /* These are not used if the ieee stack takes care of
- * scanning (IEEE_SOFTMAC_SCAN feature set).
- * In this case only the set_chan is used.
- *
- * The syncro version is similar to the start_scan but
- * does not return until all channels has been scanned.
- * this is called in user context and should sleep,
- * it is called in a work_queue when swithcing to ad-hoc mode
- * or in behalf of iwlist scan when the card is associated
- * and root user ask for a scan.
- * the function stop_scan should stop both the syncro and
- * background scanning and can sleep.
- * The function start_scan should initiate the background
- * scanning and can't sleep.
- */
- void (*scan_syncro)(struct ieee80211_device *ieee80211);
- void (*start_scan)(struct ieee80211_device *ieee80211);
- void (*stop_scan)(struct ieee80211_device *ieee80211);
-
- /* indicate the driver that the link state is changed
- * for example it may indicate the card is associated now.
- * Driver might be interested in this to apply RX filter
- * rules or simply light the LINK led
- */
- void (*link_change)(struct ieee80211_device *ieee80211);
-
- /* these two function indicates to the HW when to start
- * and stop to send beacons. This is used when the
- * IEEE_SOFTMAC_BEACONS is not set. For now the
- * stop_send_bacons is NOT guaranteed to be called only
- * after start_send_beacons.
- */
- void (*start_send_beacons) (struct ieee80211_device *dev);
- void (*stop_send_beacons) (struct ieee80211_device *dev);
-
- /* power save mode related */
- void (*sta_wake_up) (struct ieee80211_device *ieee80211);
- void (*enter_sleep_state) (struct ieee80211_device *ieee80211, u32 th, u32 tl);
- short (*ps_is_queue_empty) (struct ieee80211_device *ieee80211);
- int (*handle_beacon) (struct ieee80211_device *ieee80211, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
- int (*handle_assoc_response) (struct ieee80211_device *ieee80211, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
-
- /* check whether Tx hw resouce available */
- short (*check_nic_enough_desc)(struct ieee80211_device *ieee80211, int queue_index);
- //added by wb for HT related
- void (*SetBWModeHandler)(struct ieee80211_device *ieee80211, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
- bool (*GetNmodeSupportBySecCfg)(struct ieee80211_device *ieee80211);
- void (*SetWirelessMode)(struct ieee80211_device *ieee80211, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct ieee80211_device *ieee80211);
- void (*InitialGainHandler)(struct ieee80211_device *ieee80211, u8 Operation);
-
- /* This must be the last item so that it points to the data
- * allocated beyond this structure by alloc_ieee80211 */
- u8 priv[0];
-};
-
-#define RT_RF_OFF_LEVL_ASPM BIT0 // PCI ASPM
-#define RT_RF_OFF_LEVL_CLK_REQ BIT1 // PCI clock request
-#define RT_RF_OFF_LEVL_PCI_D3 BIT2 // PCI D3 mode
-#define RT_RF_OFF_LEVL_HALT_NIC BIT3 // NIC halt, re-initialize hw parameters
-#define RT_RF_OFF_LEVL_FREE_FW BIT4 // FW free, re-download the FW
-#define RT_RF_OFF_LEVL_FW_32K BIT5 // FW in 32k
-#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT6 // Always enable ASPM and Clock Req in initialization.
-#define RT_RF_LPS_DISALBE_2R BIT30 // When LPS is on, disable 2R if no packet is received or transmittd.
-#define RT_RF_LPS_LEVEL_ASPM BIT31 // LPS with ASPM
-#define RT_IN_PS_LEVEL(pPSC, _PS_FLAG) ((pPSC->CurPsLevel & _PS_FLAG) ? true : false)
-#define RT_CLEAR_PS_LEVEL(pPSC, _PS_FLAG) (pPSC->CurPsLevel &= (~(_PS_FLAG)))
-#define RT_SET_PS_LEVEL(pPSC, _PS_FLAG) (pPSC->CurPsLevel |= _PS_FLAG)
-
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
-#define IEEE_N_5G (1<<5)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
-
-/* Generate a 802.11 header */
-
-/* Uses the channel change callback directly
- * instead of [start/stop] scan callbacks
- */
-#define IEEE_SOFTMAC_SCAN (1<<2)
-
-/* Perform authentication and association handshake */
-#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
-
-/* Generate probe requests */
-#define IEEE_SOFTMAC_PROBERQ (1<<4)
-
-/* Generate respones to probe requests */
-#define IEEE_SOFTMAC_PROBERS (1<<5)
-
-/* The ieee802.11 stack will manages the netif queue
- * wake/stop for the driver, taking care of 802.11
- * fragmentation. See softmac.c for details. */
-#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
-
-/* Uses only the softmac_data_hard_start_xmit
- * even for TX management frames.
- */
-#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
-
-/* Generate beacons. The stack will enqueue beacons
- * to the card
- */
-#define IEEE_SOFTMAC_BEACONS (1<<6)
-
-static inline void *ieee80211_priv(struct net_device *dev)
-{
- return ((struct ieee80211_device *)netdev_priv(dev))->priv;
-}
-
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
-{
- /* Single white space is for Linksys APs */
- if (essid_len == 1 && essid[0] == ' ')
- return 1;
-
- /* Otherwise, if the entire essid is 0, we assume it is hidden */
- while (essid_len) {
- essid_len--;
- if (essid[essid_len] != '\0')
- return 0;
- }
-
- return 1;
-}
-
-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
-{
- /*
- * It is possible for both access points and our device to support
- * combinations of modes, so as long as there is one valid combination
- * of ap/device supported modes, then return success
- *
- */
- if ((mode & IEEE_A) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
- (ieee->freq_band & IEEE80211_52GHZ_BAND))
- return 1;
-
- if ((mode & IEEE_G) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
- (ieee->freq_band & IEEE80211_24GHZ_BAND))
- return 1;
-
- if ((mode & IEEE_B) &&
- (ieee->modulation & IEEE80211_CCK_MODULATION) &&
- (ieee->freq_band & IEEE80211_24GHZ_BAND))
- return 1;
-
- return 0;
-}
-
-extern inline int ieee80211_get_hdrlen(u16 fc)
-{
- int hdrlen = IEEE80211_3ADDR_LEN;
-
- switch (WLAN_FC_GET_TYPE(fc)) {
- case IEEE80211_FTYPE_DATA:
- if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
- hdrlen = IEEE80211_4ADDR_LEN; /* Addr4 */
- if(IEEE80211_QOS_HAS_SEQ(fc))
- hdrlen += 2; /* QOS ctrl*/
- break;
- case IEEE80211_FTYPE_CTL:
- switch (WLAN_FC_GET_STYPE(fc)) {
- case IEEE80211_STYPE_CTS:
- case IEEE80211_STYPE_ACK:
- hdrlen = IEEE80211_1ADDR_LEN;
- break;
- default:
- hdrlen = IEEE80211_2ADDR_LEN;
- break;
- }
- break;
- }
-
- return hdrlen;
-}
-
-static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr)
-{
- switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
- case IEEE80211_1ADDR_LEN:
- return ((struct ieee80211_hdr_1addr *)hdr)->payload;
- case IEEE80211_2ADDR_LEN:
- return ((struct ieee80211_hdr_2addr *)hdr)->payload;
- case IEEE80211_3ADDR_LEN:
- return ((struct ieee80211_hdr_3addr *)hdr)->payload;
- case IEEE80211_4ADDR_LEN:
- return ((struct ieee80211_hdr_4addr *)hdr)->payload;
- }
- return NULL;
-}
-
-static inline int ieee80211_is_ofdm_rate(u8 rate)
-{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_9MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_18MB:
- case IEEE80211_OFDM_RATE_24MB:
- case IEEE80211_OFDM_RATE_36MB:
- case IEEE80211_OFDM_RATE_48MB:
- case IEEE80211_OFDM_RATE_54MB:
- return 1;
- }
- return 0;
-}
-
-static inline int ieee80211_is_cck_rate(u8 rate)
-{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- return 1;
- }
- return 0;
-}
-
-
-/* ieee80211.c */
-void free_ieee80211(struct net_device *dev);
-struct net_device *alloc_ieee80211(int sizeof_priv);
-
-int ieee80211_set_encryption(struct ieee80211_device *ieee);
-
-/* ieee80211_tx.c */
-
-int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len);
-
-int ieee80211_rtl_xmit(struct sk_buff *skb,
- struct net_device *dev);
-void ieee80211_txb_free(struct ieee80211_txb *);
-
-
-/* ieee80211_rx.c */
-int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats);
-void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *header,
- struct ieee80211_rx_stats *stats);
-
-/* ieee80211_wx.c */
-int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
-int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
-int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
-
-/* ieee80211_softmac.c */
-short ieee80211_is_54g(struct ieee80211_network net);
-short ieee80211_is_shortslot(struct ieee80211_network net);
-int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype);
-void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
-
-void SendDisassociation(struct ieee80211_device *ieee, u8* asSta, u8 asRsn);
-void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
-
-void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-void notify_wx_assoc_event(struct ieee80211_device *ieee);
-void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
-void ieee80211_start_bss(struct ieee80211_device *ieee);
-void ieee80211_start_master_bss(struct ieee80211_device *ieee);
-void ieee80211_start_ibss(struct ieee80211_device *ieee);
-void ieee80211_softmac_init(struct ieee80211_device *ieee);
-void ieee80211_softmac_free(struct ieee80211_device *ieee);
-void ieee80211_associate_abort(struct ieee80211_device *ieee);
-void ieee80211_disassociate(struct ieee80211_device *ieee);
-void ieee80211_stop_scan(struct ieee80211_device *ieee);
-void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
-void ieee80211_check_all_nets(struct ieee80211_device *ieee);
-void ieee80211_start_protocol(struct ieee80211_device *ieee);
-void ieee80211_stop_protocol(struct ieee80211_device *ieee,u8 shutdown);
-void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
-void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee,u8 shutdown);
-void ieee80211_reset_queue(struct ieee80211_device *ieee);
-void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
-void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
-struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
-void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
-void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p);
-void notify_wx_assoc_event(struct ieee80211_device *ieee);
-void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
-
-void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
-
-/* ieee80211_crypt_ccmp&tkip&wep.c */
-void ieee80211_tkip_null(void);
-void ieee80211_wep_null(void);
-void ieee80211_ccmp_null(void);
-
-/* ieee80211_softmac_wx.c */
-
-int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
-
-int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra);
-
-int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b);
-
-int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-void ieee80211_wx_sync_scan_wq(struct work_struct *work);
-
-int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-//HT
-#define MAX_RECEIVE_BUFFER_SIZE 9100
-void HTDebugHTCapability(u8 *CapIE, u8 *TitleString );
-void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
-
-void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
-void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 isEncrypt);
-void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 isEncrypt);
-void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len);
-void HTOnAssocRsp(struct ieee80211_device *ieee);
-void HTInitializeHTInfo(struct ieee80211_device *ieee);
-void HTInitializeBssDesc(PBSS_HT pBssHT);
-void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter);
-extern u8 MCS_FILTER_ALL[];
-extern u16 MCS_DATA_RATE[2][2][77] ;
-
-u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
-void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
-bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
-u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
-u16 HTMcsToDataRate( struct ieee80211_device *ieee, u8 nMcsRate);
-u16 TxCountToDataRate( struct ieee80211_device *ieee, u8 nDataRate);
-int ieee80211_rx_ADDBAReq( struct ieee80211_device *ieee, struct sk_buff *skb);
-int ieee80211_rx_ADDBARsp( struct ieee80211_device *ieee, struct sk_buff *skb);
-int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
-void TsInitAddBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTS, u8 Policy, u8 bOverwritePending);
-void TsInitDelBA( struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
-void BaSetupTimeOut(unsigned long data);
-void TxBaInactTimeout(unsigned long data);
-void RxBaInactTimeout(unsigned long data);
-void ResetBaEntry( PBA_RECORD pBA);
-//function in TS.c
-bool GetTs(
- struct ieee80211_device* ieee,
- PTS_COMMON_INFO *ppTS,
- u8* Addr,
- u8 TID,
- TR_SELECT TxRxSelect, //Rx:1, Tx:0
- bool bAddNewTs
- );
-void TSInitialize(struct ieee80211_device *ieee);
-void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
-void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
-void RemoveAllTS(struct ieee80211_device *ieee);
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
-
-extern const long ieee80211_wlan_frequencies[];
-
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
-{
- ieee->scans++;
-}
-
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
-{
- return ieee->scans;
-}
-
-static inline const char *escape_essid(const char *essid, u8 essid_len) {
- static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
- const char *s = essid;
- char *d = escaped;
-
- if (ieee80211_is_empty_essid(essid, essid_len)) {
- memcpy(escaped, "<hidden>", sizeof("<hidden>"));
- return escaped;
- }
-
- essid_len = min(essid_len, (u8)IW_ESSID_MAX_SIZE);
- while (essid_len--) {
- if (*s == '\0') {
- *d++ = '\\';
- *d++ = '0';
- s++;
- } else {
- *d++ = *s++;
- }
- }
- *d = '\0';
- return escaped;
-}
-
-/* For the function is more related to hardware setting, it's better to use the
- * ieee handler to refer to it.
- */
-int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
-int ieee80211_parse_info_param(struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- u16 length,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats);
-
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index);
-void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr);
-void ieee80211_sta_ps_send_pspoll_frame(struct ieee80211_device *ieee);
-#define RT_ASOC_RETRY_LIMIT 5
-#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
deleted file mode 100644
index 61fd4ced452..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Host AP crypto routines
- *
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- *
- */
-
-//#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/string.h>
-#include <asm/errno.h>
-
-#include "ieee80211.h"
-
-//MODULE_AUTHOR("Jouni Malinen");
-//MODULE_DESCRIPTION("HostAP crypto");
-//MODULE_LICENSE("GPL");
-
-struct ieee80211_crypto_alg {
- struct list_head list;
- struct ieee80211_crypto_ops *ops;
-};
-
-
-struct ieee80211_crypto {
- struct list_head algs;
- spinlock_t lock;
-};
-
-static struct ieee80211_crypto *hcrypt;
-
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
- int force)
-{
- struct list_head *ptr, *n;
- struct ieee80211_crypt_data *entry;
-
- for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
- ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
- entry = list_entry(ptr, struct ieee80211_crypt_data, list);
-
- if (atomic_read(&entry->refcnt) != 0 && !force)
- continue;
-
- list_del(ptr);
-
- if (entry->ops)
- entry->ops->deinit(entry->priv);
- kfree(entry);
- }
-}
-
-void ieee80211_crypt_deinit_handler(unsigned long data)
-{
- struct ieee80211_device *ieee = (struct ieee80211_device *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->lock, flags);
- ieee80211_crypt_deinit_entries(ieee, 0);
- if (!list_empty(&ieee->crypt_deinit_list)) {
- printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
- "deletion list\n", ieee->dev->name);
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-
-}
-
-void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
- struct ieee80211_crypt_data **crypt)
-{
- struct ieee80211_crypt_data *tmp;
- unsigned long flags;
-
- if (*crypt == NULL)
- return;
-
- tmp = *crypt;
- *crypt = NULL;
-
- /* must not run ops->deinit() while there may be pending encrypt or
- * decrypt operations. Use a list of delayed deinits to avoid needing
- * locking. */
-
- spin_lock_irqsave(&ieee->lock, flags);
- list_add(&tmp->list, &ieee->crypt_deinit_list);
- if (!timer_pending(&ieee->crypt_deinit_timer)) {
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
-{
- unsigned long flags;
- struct ieee80211_crypto_alg *alg;
-
- if (hcrypt == NULL)
- return -1;
-
- alg = kzalloc(sizeof(*alg), GFP_KERNEL);
- if (alg == NULL)
- return -ENOMEM;
-
- alg->ops = ops;
-
- spin_lock_irqsave(&hcrypt->lock, flags);
- list_add(&alg->list, &hcrypt->algs);
- spin_unlock_irqrestore(&hcrypt->lock, flags);
-
- printk(KERN_DEBUG "ieee80211_crypt: registered algorithm '%s'\n",
- ops->name);
-
- return 0;
-}
-
-int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
-{
- unsigned long flags;
- struct list_head *ptr;
- struct ieee80211_crypto_alg *del_alg = NULL;
-
- if (hcrypt == NULL)
- return -1;
-
- spin_lock_irqsave(&hcrypt->lock, flags);
- for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
- struct ieee80211_crypto_alg *alg =
- (struct ieee80211_crypto_alg *) ptr;
- if (alg->ops == ops) {
- list_del(&alg->list);
- del_alg = alg;
- break;
- }
- }
- spin_unlock_irqrestore(&hcrypt->lock, flags);
-
- if (del_alg) {
- printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
- "'%s'\n", ops->name);
- kfree(del_alg);
- }
-
- return del_alg ? 0 : -1;
-}
-
-
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
-{
- unsigned long flags;
- struct list_head *ptr;
- struct ieee80211_crypto_alg *found_alg = NULL;
-
- if (hcrypt == NULL)
- return NULL;
-
- spin_lock_irqsave(&hcrypt->lock, flags);
- for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
- struct ieee80211_crypto_alg *alg =
- (struct ieee80211_crypto_alg *) ptr;
- if (strcmp(alg->ops->name, name) == 0) {
- found_alg = alg;
- break;
- }
- }
- spin_unlock_irqrestore(&hcrypt->lock, flags);
-
- if (found_alg)
- return found_alg->ops;
- else
- return NULL;
-}
-
-
-static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
-static void ieee80211_crypt_null_deinit(void *priv) {}
-
-static struct ieee80211_crypto_ops ieee80211_crypt_null = {
- .name = "NULL",
- .init = ieee80211_crypt_null_init,
- .deinit = ieee80211_crypt_null_deinit,
- .encrypt_mpdu = NULL,
- .decrypt_mpdu = NULL,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = NULL,
- .get_key = NULL,
- .extra_prefix_len = 0,
- .extra_postfix_len = 0,
- .owner = THIS_MODULE,
-};
-
-
-int __init ieee80211_crypto_init(void)
-{
- int ret = -ENOMEM;
-
- hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
- if (!hcrypt)
- goto out;
-
- INIT_LIST_HEAD(&hcrypt->algs);
- spin_lock_init(&hcrypt->lock);
-
- ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null);
- if (ret < 0) {
- kfree(hcrypt);
- hcrypt = NULL;
- }
-out:
- return ret;
-}
-
-
-void ieee80211_crypto_deinit(void)
-{
- struct list_head *ptr, *n;
-
- if (hcrypt == NULL)
- return;
-
- for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
- ptr = n, n = ptr->next) {
- struct ieee80211_crypto_alg *alg =
- (struct ieee80211_crypto_alg *) ptr;
- list_del(ptr);
- printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
- "'%s' (deinit)\n", alg->ops->name);
- kfree(alg);
- }
-
- kfree(hcrypt);
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h
deleted file mode 100644
index ca7dd0dda82..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Original code based on Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- *
- * Copyright (c) 2004, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-/*
- * This file defines the interface to the ieee80211 crypto module.
- */
-#ifndef IEEE80211_CRYPT_H
-#define IEEE80211_CRYPT_H
-
-#include <linux/skbuff.h>
-
-struct ieee80211_crypto_ops {
- const char *name;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void * (*init)(int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv);
-
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- char * (*print_stats)(char *p, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the buffer and correct
- * length must be returned */
- int extra_prefix_len, extra_postfix_len;
-
- struct module *owner;
-};
-
-struct ieee80211_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct ieee80211_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
-int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
-void ieee80211_crypt_deinit_handler(unsigned long);
-void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
- struct ieee80211_crypt_data **crypt);
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
deleted file mode 100644
index 48267a058d1..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
- *
- * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-//#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-#include <asm/string.h>
-#include <linux/wireless.h>
-
-#include "ieee80211.h"
-
-#include <linux/crypto.h>
-
- #include <linux/scatterlist.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: CCMP");
-MODULE_LICENSE("GPL");
-
-#ifndef OPENSUSE_SLED
-#define OPENSUSE_SLED 0
-#endif
-
-#define AES_BLOCK_LEN 16
-#define CCMP_HDR_LEN 8
-#define CCMP_MIC_LEN 8
-#define CCMP_TK_LEN 16
-#define CCMP_PN_LEN 6
-
-struct ieee80211_ccmp_data {
- u8 key[CCMP_TK_LEN];
- int key_set;
-
- u8 tx_pn[CCMP_PN_LEN];
- u8 rx_pn[CCMP_PN_LEN];
-
- u32 dot11RSNAStatsCCMPFormatErrors;
- u32 dot11RSNAStatsCCMPReplays;
- u32 dot11RSNAStatsCCMPDecryptErrors;
-
- int key_idx;
-
- struct crypto_tfm *tfm;
-
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
- tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
- u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
-};
-
-void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
-{
- crypto_cipher_encrypt_one((void*)tfm, ct, pt);
-}
-
-static void * ieee80211_ccmp_init(int key_idx)
-{
- struct ieee80211_ccmp_data *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
- goto fail;
- priv->key_idx = key_idx;
-
- priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->tfm)) {
- printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
- "crypto API aes\n");
- priv->tfm = NULL;
- goto fail;
- }
- return priv;
-
-fail:
- if (priv) {
- if (priv->tfm)
- crypto_free_cipher((void*)priv->tfm);
- kfree(priv);
- }
-
- return NULL;
-}
-
-
-static void ieee80211_ccmp_deinit(void *priv)
-{
- struct ieee80211_ccmp_data *_priv = priv;
- if (_priv && _priv->tfm)
- crypto_free_cipher((void*)_priv->tfm);
- kfree(priv);
-}
-
-
-static inline void xor_block(u8 *b, u8 *a, size_t len)
-{
- int i;
- for (i = 0; i < len; i++)
- b[i] ^= a[i];
-}
-
-
-
-static void ccmp_init_blocks(struct crypto_tfm *tfm,
- struct ieee80211_hdr_4addr *hdr,
- u8 *pn, size_t dlen, u8 *b0, u8 *auth,
- u8 *s0)
-{
- u8 *pos, qc = 0;
- size_t aad_len;
- u16 fc;
- int a4_included, qc_included;
- u8 aad[2 * AES_BLOCK_LEN];
-
- fc = le16_to_cpu(hdr->frame_ctl);
- a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
- /*
- qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
- (WLAN_FC_GET_STYPE(fc) & 0x08));
- */
- // fixed by David :2006.9.6
- qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
- (WLAN_FC_GET_STYPE(fc) & 0x80));
- aad_len = 22;
- if (a4_included)
- aad_len += 6;
- if (qc_included) {
- pos = (u8 *) &hdr->addr4;
- if (a4_included)
- pos += 6;
- qc = *pos & 0x0f;
- aad_len += 2;
- }
- /* CCM Initial Block:
- * Flag (Include authentication header, M=3 (8-octet MIC),
- * L=1 (2-octet Dlen))
- * Nonce: 0x00 | A2 | PN
- * Dlen */
- b0[0] = 0x59;
- b0[1] = qc;
- memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
- memcpy(b0 + 8, pn, CCMP_PN_LEN);
- b0[14] = (dlen >> 8) & 0xff;
- b0[15] = dlen & 0xff;
-
- /* AAD:
- * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
- * A1 | A2 | A3
- * SC with bits 4..15 (seq#) masked to zero
- * A4 (if present)
- * QC (if present)
- */
- pos = (u8 *) hdr;
- aad[0] = 0; /* aad_len >> 8 */
- aad[1] = aad_len & 0xff;
- aad[2] = pos[0] & 0x8f;
- aad[3] = pos[1] & 0xc7;
- memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
- pos = (u8 *) &hdr->seq_ctl;
- aad[22] = pos[0] & 0x0f;
- aad[23] = 0; /* all bits masked */
- memset(aad + 24, 0, 8);
- if (a4_included)
- memcpy(aad + 24, hdr->addr4, ETH_ALEN);
- if (qc_included) {
- aad[a4_included ? 30 : 24] = qc;
- /* rest of QC masked */
- }
-
- /* Start with the first block and AAD */
- ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
- xor_block(auth, aad, AES_BLOCK_LEN);
- ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
- xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
- ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
- b0[0] &= 0x07;
- b0[14] = b0[15] = 0;
- ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
-}
-
-
-
-static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_ccmp_data *key = priv;
- int data_len, i;
- u8 *pos;
- struct ieee80211_hdr_4addr *hdr;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-
- if (skb_headroom(skb) < CCMP_HDR_LEN ||
- skb_tailroom(skb) < CCMP_MIC_LEN ||
- skb->len < hdr_len)
- return -1;
-
- data_len = skb->len - hdr_len;
- pos = skb_push(skb, CCMP_HDR_LEN);
- memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
- pos += hdr_len;
-// mic = skb_put(skb, CCMP_MIC_LEN);
-
- i = CCMP_PN_LEN - 1;
- while (i >= 0) {
- key->tx_pn[i]++;
- if (key->tx_pn[i] != 0)
- break;
- i--;
- }
-
- *pos++ = key->tx_pn[5];
- *pos++ = key->tx_pn[4];
- *pos++ = 0;
- *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
- *pos++ = key->tx_pn[3];
- *pos++ = key->tx_pn[2];
- *pos++ = key->tx_pn[1];
- *pos++ = key->tx_pn[0];
-
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- if (!tcb_desc->bHwSec)
- {
- int blocks, last, len;
- u8 *mic;
- u8 *b0 = key->tx_b0;
- u8 *b = key->tx_b;
- u8 *e = key->tx_e;
- u8 *s0 = key->tx_s0;
-
- //mic is moved to here by john
- mic = skb_put(skb, CCMP_MIC_LEN);
-
- ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
-
- blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Authentication */
- xor_block(b, pos, len);
- ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
- /* Encryption, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
- xor_block(pos, e, len);
- pos += len;
- }
-
- for (i = 0; i < CCMP_MIC_LEN; i++)
- mic[i] = b[i] ^ s0[i];
- }
- return 0;
-}
-
-
-static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_ccmp_data *key = priv;
- u8 keyidx, *pos;
- struct ieee80211_hdr_4addr *hdr;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 pn[6];
-
- if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
- key->dot11RSNAStatsCCMPFormatErrors++;
- return -1;
- }
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- pos = skb->data + hdr_len;
- keyidx = pos[3];
- if (!(keyidx & (1 << 5))) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
- key->dot11RSNAStatsCCMPFormatErrors++;
- return -2;
- }
- keyidx >>= 6;
- if (key->key_idx != keyidx) {
- printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
- "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
- return -6;
- }
- if (!key->key_set) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
- return -3;
- }
-
- pn[0] = pos[7];
- pn[1] = pos[6];
- pn[2] = pos[5];
- pn[3] = pos[4];
- pn[4] = pos[1];
- pn[5] = pos[0];
- pos += 8;
-
- if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
- key->dot11RSNAStatsCCMPReplays++;
- return -4;
- }
- if (!tcb_desc->bHwSec)
- {
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
- u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
- u8 *b0 = key->rx_b0;
- u8 *b = key->rx_b;
- u8 *a = key->rx_a;
- int i, blocks, last, len;
-
-
- ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
- xor_block(mic, b, CCMP_MIC_LEN);
-
- blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Decrypt, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
- xor_block(pos, b, len);
- /* Authentication */
- xor_block(a, pos, len);
- ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
- pos += len;
- }
-
- if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: decrypt failed: STA="
- "%pM\n", hdr->addr2);
- }
- key->dot11RSNAStatsCCMPDecryptErrors++;
- return -5;
- }
-
- memcpy(key->rx_pn, pn, CCMP_PN_LEN);
- }
- /* Remove hdr and MIC */
- memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
- skb_pull(skb, CCMP_HDR_LEN);
- skb_trim(skb, skb->len - CCMP_MIC_LEN);
-
- return keyidx;
-}
-
-
-static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_ccmp_data *data = priv;
- int keyidx;
- struct crypto_tfm *tfm = data->tfm;
-
- keyidx = data->key_idx;
- memset(data, 0, sizeof(*data));
- data->key_idx = keyidx;
- data->tfm = tfm;
- if (len == CCMP_TK_LEN) {
- memcpy(data->key, key, CCMP_TK_LEN);
- data->key_set = 1;
- if (seq) {
- data->rx_pn[0] = seq[5];
- data->rx_pn[1] = seq[4];
- data->rx_pn[2] = seq[3];
- data->rx_pn[3] = seq[2];
- data->rx_pn[4] = seq[1];
- data->rx_pn[5] = seq[0];
- }
- crypto_cipher_setkey((void*)data->tfm, data->key, CCMP_TK_LEN);
- } else if (len == 0)
- data->key_set = 0;
- else
- return -1;
-
- return 0;
-}
-
-
-static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_ccmp_data *data = priv;
-
- if (len < CCMP_TK_LEN)
- return -1;
-
- if (!data->key_set)
- return 0;
- memcpy(key, data->key, CCMP_TK_LEN);
-
- if (seq) {
- seq[0] = data->tx_pn[5];
- seq[1] = data->tx_pn[4];
- seq[2] = data->tx_pn[3];
- seq[3] = data->tx_pn[2];
- seq[4] = data->tx_pn[1];
- seq[5] = data->tx_pn[0];
- }
-
- return CCMP_TK_LEN;
-}
-
-
-static char * ieee80211_ccmp_print_stats(char *p, void *priv)
-{
- struct ieee80211_ccmp_data *ccmp = priv;
- int i;
-
- p += sprintf(p, "key[%d] alg=CCMP key_set=%d tx_pn=",
- ccmp->key_idx, ccmp->key_set);
-
- for (i = 0; i < ARRAY_SIZE(ccmp->tx_pn); i++)
- p += sprintf(p, "%02x", ccmp->tx_pn[i]);
-
- sprintf(p, " rx_pn=");
- for (i = 0; i < ARRAY_SIZE(ccmp->rx_pn); i++)
- p += sprintf(p, "%02x", ccmp->tx_pn[i]);
-
- p += sprintf(p, " format_errors=%d replays=%d decrypt_errors=%d\n",
- ccmp->dot11RSNAStatsCCMPFormatErrors,
- ccmp->dot11RSNAStatsCCMPReplays,
- ccmp->dot11RSNAStatsCCMPDecryptErrors);
-
- return p;
-}
-
-void ieee80211_ccmp_null(void)
-{
- return;
-}
-
-static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
- .name = "CCMP",
- .init = ieee80211_ccmp_init,
- .deinit = ieee80211_ccmp_deinit,
- .encrypt_mpdu = ieee80211_ccmp_encrypt,
- .decrypt_mpdu = ieee80211_ccmp_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = ieee80211_ccmp_set_key,
- .get_key = ieee80211_ccmp_get_key,
- .print_stats = ieee80211_ccmp_print_stats,
- .extra_prefix_len = CCMP_HDR_LEN,
- .extra_postfix_len = CCMP_MIC_LEN,
- .owner = THIS_MODULE,
-};
-
-
-int __init ieee80211_crypto_ccmp_init(void)
-{
- return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
-}
-
-
-void ieee80211_crypto_ccmp_exit(void)
-{
- ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
deleted file mode 100644
index ed623a911e4..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
- *
- * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-//#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-#include <asm/string.h>
-
-#include "ieee80211.h"
-
-
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-
-#include <linux/crc32.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: TKIP");
-MODULE_LICENSE("GPL");
-
-#ifndef OPENSUSE_SLED
-#define OPENSUSE_SLED 0
-#endif
-
-struct ieee80211_tkip_data {
-#define TKIP_KEY_LEN 32
- u8 key[TKIP_KEY_LEN];
- int key_set;
-
- u32 tx_iv32;
- u16 tx_iv16;
- u16 tx_ttak[5];
- int tx_phase1_done;
-
- u32 rx_iv32;
- u16 rx_iv16;
- u16 rx_ttak[5];
- int rx_phase1_done;
- u32 rx_iv32_new;
- u16 rx_iv16_new;
-
- u32 dot11RSNAStatsTKIPReplays;
- u32 dot11RSNAStatsTKIPICVErrors;
- u32 dot11RSNAStatsTKIPLocalMICFailures;
-
- int key_idx;
- struct crypto_blkcipher *rx_tfm_arc4;
- struct crypto_hash *rx_tfm_michael;
- struct crypto_blkcipher *tx_tfm_arc4;
- struct crypto_hash *tx_tfm_michael;
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 rx_hdr[16], tx_hdr[16];
-};
-
-static void * ieee80211_tkip_init(int key_idx)
-{
- struct ieee80211_tkip_data *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
- goto fail;
- priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->tx_tfm_arc4)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
- priv->tx_tfm_arc4 = NULL;
- goto fail;
- }
-
- priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->tx_tfm_michael)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
- priv->tx_tfm_michael = NULL;
- goto fail;
- }
-
- priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->rx_tfm_arc4)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
- priv->rx_tfm_arc4 = NULL;
- goto fail;
- }
-
- priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->rx_tfm_michael)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
- priv->rx_tfm_michael = NULL;
- goto fail;
- }
- return priv;
-
-fail:
- if (priv) {
- if (priv->tx_tfm_michael)
- crypto_free_hash(priv->tx_tfm_michael);
- if (priv->tx_tfm_arc4)
- crypto_free_blkcipher(priv->tx_tfm_arc4);
- if (priv->rx_tfm_michael)
- crypto_free_hash(priv->rx_tfm_michael);
- if (priv->rx_tfm_arc4)
- crypto_free_blkcipher(priv->rx_tfm_arc4);
- kfree(priv);
- }
-
- return NULL;
-}
-
-
-static void ieee80211_tkip_deinit(void *priv)
-{
- struct ieee80211_tkip_data *_priv = priv;
- if (_priv) {
- if (_priv->tx_tfm_michael)
- crypto_free_hash(_priv->tx_tfm_michael);
- if (_priv->tx_tfm_arc4)
- crypto_free_blkcipher(_priv->tx_tfm_arc4);
- if (_priv->rx_tfm_michael)
- crypto_free_hash(_priv->rx_tfm_michael);
- if (_priv->rx_tfm_arc4)
- crypto_free_blkcipher(_priv->rx_tfm_arc4);
- }
- kfree(priv);
-}
-
-
-static inline u16 RotR1(u16 val)
-{
- return (val >> 1) | (val << 15);
-}
-
-
-static inline u8 Lo8(u16 val)
-{
- return val & 0xff;
-}
-
-
-static inline u8 Hi8(u16 val)
-{
- return val >> 8;
-}
-
-
-static inline u16 Lo16(u32 val)
-{
- return val & 0xffff;
-}
-
-
-static inline u16 Hi16(u32 val)
-{
- return val >> 16;
-}
-
-
-static inline u16 Mk16(u8 hi, u8 lo)
-{
- return lo | (((u16) hi) << 8);
-}
-
-
-static inline u16 Mk16_le(u16 *v)
-{
- return le16_to_cpu(*v);
-}
-
-
-static const u16 Sbox[256] =
-{
- 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
- 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
- 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
- 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
- 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
- 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
- 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
- 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
- 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
- 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
- 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
- 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
- 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
- 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
- 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
- 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
- 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
- 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
- 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
- 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
- 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
- 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
- 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
- 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
- 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
- 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
- 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
- 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
- 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
- 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
- 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
- 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
-};
-
-
-static inline u16 _S_(u16 v)
-{
- u16 t = Sbox[Hi8(v)];
- return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
-}
-
-
-#define PHASE1_LOOP_COUNT 8
-
-
-static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
-{
- int i, j;
-
- /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
- TTAK[0] = Lo16(IV32);
- TTAK[1] = Hi16(IV32);
- TTAK[2] = Mk16(TA[1], TA[0]);
- TTAK[3] = Mk16(TA[3], TA[2]);
- TTAK[4] = Mk16(TA[5], TA[4]);
-
- for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
- j = 2 * (i & 1);
- TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
- TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
- TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
- TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
- TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
- }
-}
-
-
-static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
- u16 IV16)
-{
- /* Make temporary area overlap WEP seed so that the final copy can be
- * avoided on little endian hosts. */
- u16 *PPK = (u16 *) &WEPSeed[4];
-
- /* Step 1 - make copy of TTAK and bring in TSC */
- PPK[0] = TTAK[0];
- PPK[1] = TTAK[1];
- PPK[2] = TTAK[2];
- PPK[3] = TTAK[3];
- PPK[4] = TTAK[4];
- PPK[5] = TTAK[4] + IV16;
-
- /* Step 2 - 96-bit bijective mixing using S-box */
- PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
- PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
- PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
- PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
- PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
- PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
-
- PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
- PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
- PPK[2] += RotR1(PPK[1]);
- PPK[3] += RotR1(PPK[2]);
- PPK[4] += RotR1(PPK[3]);
- PPK[5] += RotR1(PPK[4]);
-
- /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
- * WEPSeed[0..2] is transmitted as WEP IV */
- WEPSeed[0] = Hi8(IV16);
- WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
- WEPSeed[2] = Lo8(IV16);
- WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
-
-#ifdef __BIG_ENDIAN
- {
- int i;
- for (i = 0; i < 6; i++)
- PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
- }
-#endif
-}
-
-
-static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- int len;
- u8 *pos;
- struct ieee80211_hdr_4addr *hdr;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-
- struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
- int ret = 0;
- u8 rc4key[16], *icv;
- u32 crc;
- struct scatterlist sg;
-
- if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
- skb->len < hdr_len)
- return -1;
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
-
- if (!tcb_desc->bHwSec)
- {
- if (!tkey->tx_phase1_done) {
- tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
- tkey->tx_iv32);
- tkey->tx_phase1_done = 1;
- }
- tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
- }
- else
- tkey->tx_phase1_done = 1;
-
-
- len = skb->len - hdr_len;
- pos = skb_push(skb, 8);
- memmove(pos, pos + 8, hdr_len);
- pos += hdr_len;
-
- if (tcb_desc->bHwSec)
- {
- *pos++ = Hi8(tkey->tx_iv16);
- *pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F;
- *pos++ = Lo8(tkey->tx_iv16);
- }
- else
- {
- *pos++ = rc4key[0];
- *pos++ = rc4key[1];
- *pos++ = rc4key[2];
- }
-
- *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
- *pos++ = tkey->tx_iv32 & 0xff;
- *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
- *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
- *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
-
- if (!tcb_desc->bHwSec)
- {
- icv = skb_put(skb, 4);
- crc = ~crc32_le(~0, pos, len);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
- crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, len+4);
- ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
-
- }
-
- tkey->tx_iv16++;
- if (tkey->tx_iv16 == 0) {
- tkey->tx_phase1_done = 0;
- tkey->tx_iv32++;
- }
-
- if (!tcb_desc->bHwSec)
- return ret;
- else
- return 0;
-
-
-}
-
-static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- u8 keyidx, *pos;
- u32 iv32;
- u16 iv16;
- struct ieee80211_hdr_4addr *hdr;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
- u8 rc4key[16];
- u8 icv[4];
- u32 crc;
- struct scatterlist sg;
- int plen;
- if (skb->len < hdr_len + 8 + 4)
- return -1;
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- pos = skb->data + hdr_len;
- keyidx = pos[3];
- if (!(keyidx & (1 << 5))) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
- return -2;
- }
- keyidx >>= 6;
- if (tkey->key_idx != keyidx) {
- printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
- "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
- return -6;
- }
- if (!tkey->key_set) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
- return -3;
- }
- iv16 = (pos[0] << 8) | pos[2];
- iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
- pos += 8;
-
- if (!tcb_desc->bHwSec)
- {
- if (iv32 < tkey->rx_iv32 ||
- (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
- " previous TSC %08x%04x received TSC "
- "%08x%04x\n", hdr->addr2,
- tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
- }
- tkey->dot11RSNAStatsTKIPReplays++;
- return -4;
- }
-
- if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
- tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
- tkey->rx_phase1_done = 1;
- }
- tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
-
- plen = skb->len - hdr_len - 12;
-
- crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen+4);
- if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG ": TKIP: failed to decrypt "
- "received packet from %pM\n",
- hdr->addr2);
- }
- return -7;
- }
-
- crc = ~crc32_le(~0, pos, plen);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- if (memcmp(icv, pos + plen, 4) != 0) {
- if (iv32 != tkey->rx_iv32) {
- /* Previously cached Phase1 result was already lost, so
- * it needs to be recalculated for the next packet. */
- tkey->rx_phase1_done = 0;
- }
- if (net_ratelimit()) {
- printk(KERN_DEBUG
- "TKIP: ICV error detected: STA=%pM\n",
- hdr->addr2);
- }
- tkey->dot11RSNAStatsTKIPICVErrors++;
- return -5;
- }
-
- }
-
- /* Update real counters only after Michael MIC verification has
- * completed */
- tkey->rx_iv32_new = iv32;
- tkey->rx_iv16_new = iv16;
-
- /* Remove IV and ICV */
- memmove(skb->data + 8, skb->data, hdr_len);
- skb_pull(skb, 8);
- skb_trim(skb, skb->len - 4);
-
- return keyidx;
-}
-
-
-static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
- u8 * data, size_t data_len, u8 * mic)
-{
- struct hash_desc desc;
- struct scatterlist sg[2];
-
- if (tfm_michael == NULL) {
- printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
- return -1;
- }
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hdr, 16);
- sg_set_buf(&sg[1], data, data_len);
-
- if (crypto_hash_setkey(tfm_michael, key, 8))
- return -1;
-
- desc.tfm = tfm_michael;
- desc.flags = 0;
- return crypto_hash_digest(&desc, sg, data_len + 16, mic);
-}
-
-
-
-static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
-{
- struct ieee80211_hdr_4addr *hdr11;
-
- hdr11 = (struct ieee80211_hdr_4addr *) skb->data;
- switch (le16_to_cpu(hdr11->frame_ctl) &
- (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case IEEE80211_FCTL_TODS:
- memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
- break;
- case IEEE80211_FCTL_FROMDS:
- memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
- break;
- case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
- memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
- break;
- case 0:
- memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
- break;
- }
-
- hdr[12] = 0; /* priority */
-
- hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
-}
-
-
-static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- u8 *pos;
- struct ieee80211_hdr_4addr *hdr;
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
-
- if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
- printk(KERN_DEBUG "Invalid packet for Michael MIC add "
- "(tailroom=%d hdr_len=%d skb->len=%d)\n",
- skb_tailroom(skb), hdr_len, skb->len);
- return -1;
- }
-
- michael_mic_hdr(skb, tkey->tx_hdr);
-
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
- if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
- tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- }
- // }
- pos = skb_put(skb, 8);
- if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
- return -1;
-
- return 0;
-}
-
-
-#if WIRELESS_EXT >= 18
-static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr_4addr *hdr,
- int keyidx)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
-
- /* TODO: needed parameters: count, keyid, key type, TSC */
- memset(&ev, 0, sizeof(ev));
- ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
- if (hdr->addr1[0] & 0x01)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
-}
-#elif WIRELESS_EXT >= 15
-static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr_4addr *hdr,
- int keyidx)
-{
- union iwreq_data wrqu;
- char buf[128];
-
- /* TODO: needed parameters: count, keyid, key type, TSC */
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
- "%pM)", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni",
- hdr->addr2);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
-}
-#else /* WIRELESS_EXT >= 15 */
-static inline void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr_4addr *hdr,
- int keyidx)
-{
-}
-#endif /* WIRELESS_EXT >= 15 */
-
-static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- u8 mic[8];
- struct ieee80211_hdr_4addr *hdr;
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
-
- if (!tkey->key_set)
- return -1;
-
- michael_mic_hdr(skb, tkey->rx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
- if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
- tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- }
- // }
-
- if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
- return -1;
- if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
- struct ieee80211_hdr_4addr *hdr;
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- printk(KERN_DEBUG "%s: Michael MIC verification failed for "
- "MSDU from %pM keyidx=%d\n",
- skb->dev ? skb->dev->name : "N/A", hdr->addr2,
- keyidx);
- if (skb->dev)
- ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
- tkey->dot11RSNAStatsTKIPLocalMICFailures++;
- return -1;
- }
-
- /* Update TSC counters for RX now that the packet verification has
- * completed. */
- tkey->rx_iv32 = tkey->rx_iv32_new;
- tkey->rx_iv16 = tkey->rx_iv16_new;
-
- skb_trim(skb, skb->len - 8);
-
- return 0;
-}
-
-
-static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
- int keyidx;
- struct crypto_hash *tfm = tkey->tx_tfm_michael;
- struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
-
- keyidx = tkey->key_idx;
- memset(tkey, 0, sizeof(*tkey));
- tkey->key_idx = keyidx;
- tkey->tx_tfm_michael = tfm;
- tkey->tx_tfm_arc4 = tfm2;
- tkey->rx_tfm_michael = tfm3;
- tkey->rx_tfm_arc4 = tfm4;
-
- if (len == TKIP_KEY_LEN) {
- memcpy(tkey->key, key, TKIP_KEY_LEN);
- tkey->key_set = 1;
- tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
- if (seq) {
- tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
- (seq[3] << 8) | seq[2];
- tkey->rx_iv16 = (seq[1] << 8) | seq[0];
- }
- } else if (len == 0)
- tkey->key_set = 0;
- else
- return -1;
-
- return 0;
-}
-
-
-static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
-{
- struct ieee80211_tkip_data *tkey = priv;
-
- if (len < TKIP_KEY_LEN)
- return -1;
-
- if (!tkey->key_set)
- return 0;
- memcpy(key, tkey->key, TKIP_KEY_LEN);
-
- if (seq) {
- /* Return the sequence number of the last transmitted frame. */
- u16 iv16 = tkey->tx_iv16;
- u32 iv32 = tkey->tx_iv32;
- if (iv16 == 0)
- iv32--;
- iv16--;
- seq[0] = tkey->tx_iv16;
- seq[1] = tkey->tx_iv16 >> 8;
- seq[2] = tkey->tx_iv32;
- seq[3] = tkey->tx_iv32 >> 8;
- seq[4] = tkey->tx_iv32 >> 16;
- seq[5] = tkey->tx_iv32 >> 24;
- }
-
- return TKIP_KEY_LEN;
-}
-
-
-static char * ieee80211_tkip_print_stats(char *p, void *priv)
-{
- struct ieee80211_tkip_data *tkip = priv;
- p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
- "tx_pn=%02x%02x%02x%02x%02x%02x "
- "rx_pn=%02x%02x%02x%02x%02x%02x "
- "replays=%d icv_errors=%d local_mic_failures=%d\n",
- tkip->key_idx, tkip->key_set,
- (tkip->tx_iv32 >> 24) & 0xff,
- (tkip->tx_iv32 >> 16) & 0xff,
- (tkip->tx_iv32 >> 8) & 0xff,
- tkip->tx_iv32 & 0xff,
- (tkip->tx_iv16 >> 8) & 0xff,
- tkip->tx_iv16 & 0xff,
- (tkip->rx_iv32 >> 24) & 0xff,
- (tkip->rx_iv32 >> 16) & 0xff,
- (tkip->rx_iv32 >> 8) & 0xff,
- tkip->rx_iv32 & 0xff,
- (tkip->rx_iv16 >> 8) & 0xff,
- tkip->rx_iv16 & 0xff,
- tkip->dot11RSNAStatsTKIPReplays,
- tkip->dot11RSNAStatsTKIPICVErrors,
- tkip->dot11RSNAStatsTKIPLocalMICFailures);
- return p;
-}
-
-
-static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
- .name = "TKIP",
- .init = ieee80211_tkip_init,
- .deinit = ieee80211_tkip_deinit,
- .encrypt_mpdu = ieee80211_tkip_encrypt,
- .decrypt_mpdu = ieee80211_tkip_decrypt,
- .encrypt_msdu = ieee80211_michael_mic_add,
- .decrypt_msdu = ieee80211_michael_mic_verify,
- .set_key = ieee80211_tkip_set_key,
- .get_key = ieee80211_tkip_get_key,
- .print_stats = ieee80211_tkip_print_stats,
- .extra_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_postfix_len = 8 + 4, /* MIC + ICV */
- .owner = THIS_MODULE,
-};
-
-
-int __init ieee80211_crypto_tkip_init(void)
-{
- return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
-}
-
-
-void ieee80211_crypto_tkip_exit(void)
-{
- ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
-}
-
-void ieee80211_tkip_null(void)
-{
- return;
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
deleted file mode 100644
index 55043913afc..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Host AP crypt: host-based WEP encryption implementation for Host AP driver
- *
- * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <asm/string.h>
-
-#include "ieee80211.h"
-
-
-#include <linux/crypto.h>
-
-#include <linux/scatterlist.h>
-#include <linux/crc32.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: WEP");
-MODULE_LICENSE("GPL");
-#ifndef OPENSUSE_SLED
-#define OPENSUSE_SLED 0
-#endif
-
-struct prism2_wep_data {
- u32 iv;
-#define WEP_KEY_LEN 13
- u8 key[WEP_KEY_LEN + 1];
- u8 key_len;
- u8 key_idx;
- struct crypto_blkcipher *tx_tfm;
- struct crypto_blkcipher *rx_tfm;
-};
-
-
-static void * prism2_wep_init(int keyidx)
-{
- struct prism2_wep_data *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
- goto fail;
- priv->key_idx = keyidx;
-
- priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->tx_tfm)) {
- printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
- priv->tx_tfm = NULL;
- goto fail;
- }
- priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->rx_tfm)) {
- printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
- priv->rx_tfm = NULL;
- goto fail;
- }
-
- /* start WEP IV from a random value */
- get_random_bytes(&priv->iv, 4);
-
- return priv;
-
-fail:
- if (priv) {
- if (priv->tx_tfm)
- crypto_free_blkcipher(priv->tx_tfm);
- if (priv->rx_tfm)
- crypto_free_blkcipher(priv->rx_tfm);
- kfree(priv);
- }
- return NULL;
-}
-
-
-static void prism2_wep_deinit(void *priv)
-{
- struct prism2_wep_data *_priv = priv;
- if (_priv) {
- if (_priv->tx_tfm)
- crypto_free_blkcipher(_priv->tx_tfm);
- if (_priv->rx_tfm)
- crypto_free_blkcipher(_priv->rx_tfm);
- }
- kfree(priv);
-}
-
-/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
- * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
- * so the payload length increases with 8 bytes.
- *
- * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
- */
-static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct prism2_wep_data *wep = priv;
- u32 klen, len;
- u8 key[WEP_KEY_LEN + 3];
- u8 *pos;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
- u32 crc;
- u8 *icv;
- struct scatterlist sg;
- if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
- skb->len < hdr_len)
- return -1;
-
- len = skb->len - hdr_len;
- pos = skb_push(skb, 4);
- memmove(pos, pos + 4, hdr_len);
- pos += hdr_len;
-
- klen = 3 + wep->key_len;
-
- wep->iv++;
-
- /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
- * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
- * can be used to speedup attacks, so avoid using them. */
- if ((wep->iv & 0xff00) == 0xff00) {
- u8 B = (wep->iv >> 16) & 0xff;
- if (B >= 3 && B < klen)
- wep->iv += 0x0100;
- }
-
- /* Prepend 24-bit IV to RC4 key and TX frame */
- *pos++ = key[0] = (wep->iv >> 16) & 0xff;
- *pos++ = key[1] = (wep->iv >> 8) & 0xff;
- *pos++ = key[2] = wep->iv & 0xff;
- *pos++ = wep->key_idx << 6;
-
- /* Copy rest of the WEP key (the secret part) */
- memcpy(key + 3, wep->key, wep->key_len);
-
- if (!tcb_desc->bHwSec)
- {
-
- /* Append little-endian CRC32 and encrypt it to produce ICV */
- crc = ~crc32_le(~0, pos, len);
- icv = skb_put(skb, 4);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
- sg_init_one(&sg, pos, len+4);
- return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
- }
-
- return 0;
-}
-
-
-/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
- * the frame: IV (4 bytes), encrypted payload (including SNAP header),
- * ICV (4 bytes). len includes both IV and ICV.
- *
- * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
- * failure. If frame is OK, IV and ICV will be removed.
- */
-static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct prism2_wep_data *wep = priv;
- u32 klen, plen;
- u8 key[WEP_KEY_LEN + 3];
- u8 keyidx, *pos;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
- u32 crc;
- u8 icv[4];
- struct scatterlist sg;
- if (skb->len < hdr_len + 8)
- return -1;
-
- pos = skb->data + hdr_len;
- key[0] = *pos++;
- key[1] = *pos++;
- key[2] = *pos++;
- keyidx = *pos++ >> 6;
- if (keyidx != wep->key_idx)
- return -1;
-
- klen = 3 + wep->key_len;
-
- /* Copy rest of the WEP key (the secret part) */
- memcpy(key + 3, wep->key, wep->key_len);
-
- /* Apply RC4 to data and compute CRC32 over decrypted data */
- plen = skb->len - hdr_len - 8;
-
- if (!tcb_desc->bHwSec)
- {
- crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
- sg_init_one(&sg, pos, plen+4);
- if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
- return -7;
- crc = ~crc32_le(~0, pos, plen);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
- if (memcmp(icv, pos + plen, 4) != 0) {
- /* ICV mismatch - drop frame */
- return -2;
- }
- }
- /* Remove IV and ICV */
- memmove(skb->data + 4, skb->data, hdr_len);
- skb_pull(skb, 4);
- skb_trim(skb, skb->len - 4);
-
- return 0;
-}
-
-
-static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
-{
- struct prism2_wep_data *wep = priv;
-
- if (len < 0 || len > WEP_KEY_LEN)
- return -1;
-
- memcpy(wep->key, key, len);
- wep->key_len = len;
-
- return 0;
-}
-
-
-static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
-{
- struct prism2_wep_data *wep = priv;
-
- if (len < wep->key_len)
- return -1;
-
- memcpy(key, wep->key, wep->key_len);
-
- return wep->key_len;
-}
-
-
-static char * prism2_wep_print_stats(char *p, void *priv)
-{
- struct prism2_wep_data *wep = priv;
- p += sprintf(p, "key[%d] alg=WEP len=%d\n",
- wep->key_idx, wep->key_len);
- return p;
-}
-
-
-static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
- .name = "WEP",
- .init = prism2_wep_init,
- .deinit = prism2_wep_deinit,
- .encrypt_mpdu = prism2_wep_encrypt,
- .decrypt_mpdu = prism2_wep_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = prism2_wep_set_key,
- .get_key = prism2_wep_get_key,
- .print_stats = prism2_wep_print_stats,
- .extra_prefix_len = 4, /* IV */
- .extra_postfix_len = 4, /* ICV */
- .owner = THIS_MODULE,
-};
-
-
-int __init ieee80211_crypto_wep_init(void)
-{
- return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
-}
-
-
-void __exit ieee80211_crypto_wep_exit(void)
-{
- ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
-}
-
-void ieee80211_wep_null(void)
-{
- return;
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
deleted file mode 100644
index 37a65ff4b12..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ /dev/null
@@ -1,352 +0,0 @@
-/*******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
- Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
-#include <net/arp.h>
-
-#include "ieee80211.h"
-
-MODULE_DESCRIPTION("802.11 data/management/control stack");
-MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
-MODULE_LICENSE("GPL");
-
-#define DRV_NAME "ieee80211"
-
-static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
-{
- if (ieee->networks)
- return 0;
-
- ieee->networks = kcalloc(
- MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
- GFP_KERNEL);
- if (!ieee->networks) {
- printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
- ieee->dev->name);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
-{
- if (!ieee->networks)
- return;
- kfree(ieee->networks);
- ieee->networks = NULL;
-}
-
-static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
-{
- int i;
-
- INIT_LIST_HEAD(&ieee->network_free_list);
- INIT_LIST_HEAD(&ieee->network_list);
- for (i = 0; i < MAX_NETWORK_COUNT; i++)
- list_add_tail(&ieee->networks[i].list, &ieee->network_free_list);
-}
-
-
-struct net_device *alloc_ieee80211(int sizeof_priv)
-{
- struct ieee80211_device *ieee;
- struct net_device *dev;
- int i, err;
-
- IEEE80211_DEBUG_INFO("Initializing...\n");
-
- dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
- if (!dev) {
- IEEE80211_ERROR("Unable to network device.\n");
- goto failed;
- }
-
- ieee = netdev_priv(dev);
-
- memset(ieee, 0, sizeof(struct ieee80211_device) + sizeof_priv);
- ieee->dev = dev;
-
- err = ieee80211_networks_allocate(ieee);
- if (err) {
- IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
- err);
- goto failed;
- }
- ieee80211_networks_initialize(ieee);
-
-
- /* Default fragmentation threshold is maximum payload size */
- ieee->fts = DEFAULT_FTS;
- ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
- ieee->open_wep = 1;
-
- /* Default to enabling full open WEP with host based encrypt/decrypt */
- ieee->host_encrypt = 1;
- ieee->host_decrypt = 1;
- ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
-
- INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- init_timer(&ieee->crypt_deinit_timer);
- ieee->crypt_deinit_timer.data = (unsigned long)ieee;
- ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
-
- spin_lock_init(&ieee->lock);
- spin_lock_init(&ieee->wpax_suitlist_lock);
- spin_lock_init(&ieee->bw_spinlock);
- spin_lock_init(&ieee->reorder_spinlock);
-
- /* added by WB */
- atomic_set(&(ieee->atm_chnlop), 0);
- atomic_set(&(ieee->atm_swbw), 0);
-
- ieee->wpax_type_set = 0;
- ieee->wpa_enabled = 0;
- ieee->tkip_countermeasures = 0;
- ieee->drop_unencrypted = 0;
- ieee->privacy_invoked = 0;
- ieee->ieee802_1x = 1;
- ieee->raw_tx = 0;
- /* ieee->hwsec_support = 1; default support hw security: use module_param instead */
- ieee->hwsec_active = 0; /* disable hwsec, switch it on when necessary */
-
- ieee80211_softmac_init(ieee);
-
- ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
- if (ieee->pHTInfo == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
- return NULL;
- }
- HTUpdateDefaultSetting(ieee);
- HTInitializeHTInfo(ieee); /* may move to other place */
- TSInitialize(ieee);
- for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
- INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
-
- for (i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
- }
-
- /* Functions to load crypt module automatically */
- ieee80211_tkip_null();
- ieee80211_wep_null();
- ieee80211_ccmp_null();
-
- return dev;
-
-failed:
- if (dev)
- free_netdev(dev);
- return NULL;
-}
-
-
-void free_ieee80211(struct net_device *dev)
-{
- struct ieee80211_device *ieee = netdev_priv(dev);
- int i;
- kfree(ieee->pHTInfo);
- ieee->pHTInfo = NULL;
- RemoveAllTS(ieee);
- ieee80211_softmac_free(ieee);
- del_timer_sync(&ieee->crypt_deinit_timer);
- ieee80211_crypt_deinit_entries(ieee, 1);
-
- for (i = 0; i < WEP_KEYS; i++) {
- struct ieee80211_crypt_data *crypt = ieee->crypt[i];
- if (crypt) {
- if (crypt->ops)
- crypt->ops->deinit(crypt->priv);
- kfree(crypt);
- ieee->crypt[i] = NULL;
- }
- }
-
- ieee80211_networks_free(ieee);
- free_netdev(dev);
-}
-
-#ifdef CONFIG_IEEE80211_DEBUG
-
-u32 ieee80211_debug_level = 0;
-static int debug =
- /* IEEE80211_DL_INFO | */
- /* IEEE80211_DL_WX | */
- /* IEEE80211_DL_SCAN | */
- /* IEEE80211_DL_STATE | */
- /* IEEE80211_DL_MGMT | */
- /* IEEE80211_DL_FRAG | */
- /* IEEE80211_DL_EAP | */
- /* IEEE80211_DL_DROP | */
- /* IEEE80211_DL_TX | */
- /* IEEE80211_DL_RX | */
- /* IEEE80211_DL_QOS | */
- /* IEEE80211_DL_HT | */
- /* IEEE80211_DL_TS | */
- /* IEEE80211_DL_BA | */
- /* IEEE80211_DL_REORDER | */
- /* IEEE80211_DL_TRACE | */
- /* IEEE80211_DL_DATA | */
- IEEE80211_DL_ERR /* always open this flag to show error out */
- ;
-struct proc_dir_entry *ieee80211_proc = NULL;
-
-static int show_debug_level(char *page, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
-}
-
-static int store_debug_level(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char buf[] = "0x00000000";
- unsigned long len = min(sizeof(buf) - 1, (u32)count);
- char *p = (char *)buf;
- unsigned long val;
-
- if (copy_from_user(buf, buffer, len))
- return count;
- buf[len] = 0;
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- p++;
- if (p[0] == 'x' || p[0] == 'X')
- p++;
- val = simple_strtoul(p, &p, 16);
- } else
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- printk(KERN_INFO DRV_NAME
- ": %s is not in hex or decimal form.\n", buf);
- else
- ieee80211_debug_level = val;
-
- return strnlen(buf, count);
-}
-
-extern int ieee80211_crypto_init(void);
-extern void ieee80211_crypto_deinit(void);
-extern int ieee80211_crypto_tkip_init(void);
-extern void ieee80211_crypto_tkip_exit(void);
-extern int ieee80211_crypto_ccmp_init(void);
-extern void ieee80211_crypto_ccmp_exit(void);
-extern int ieee80211_crypto_wep_init(void);
-extern void ieee80211_crypto_wep_exit(void);
-
-int __init ieee80211_rtl_init(void)
-{
- struct proc_dir_entry *e;
- int retval;
-
- retval = ieee80211_crypto_init();
- if (retval)
- return retval;
- retval = ieee80211_crypto_tkip_init();
- if (retval) {
- ieee80211_crypto_deinit();
- return retval;
- }
- retval = ieee80211_crypto_ccmp_init();
- if (retval) {
- ieee80211_crypto_tkip_exit();
- ieee80211_crypto_deinit();
- return retval;
- }
- retval = ieee80211_crypto_wep_init();
- if (retval) {
- ieee80211_crypto_ccmp_exit();
- ieee80211_crypto_tkip_exit();
- ieee80211_crypto_deinit();
- return retval;
- }
-
- ieee80211_debug_level = debug;
- ieee80211_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
- if (ieee80211_proc == NULL) {
- IEEE80211_ERROR("Unable to create " DRV_NAME
- " proc directory\n");
- return -EIO;
- }
- e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
- ieee80211_proc);
- if (!e) {
- remove_proc_entry(DRV_NAME, init_net.proc_net);
- ieee80211_proc = NULL;
- return -EIO;
- }
- e->read_proc = show_debug_level;
- e->write_proc = store_debug_level;
- e->data = NULL;
-
- return 0;
-}
-
-void __exit ieee80211_rtl_exit(void)
-{
- if (ieee80211_proc) {
- remove_proc_entry("debug_level", ieee80211_proc);
- remove_proc_entry(DRV_NAME, init_net.proc_net);
- ieee80211_proc = NULL;
- }
- ieee80211_crypto_wep_exit();
- ieee80211_crypto_ccmp_exit();
- ieee80211_crypto_tkip_exit();
- ieee80211_crypto_deinit();
-}
-
-#include <linux/moduleparam.h>
-module_param(debug, int, 0444);
-MODULE_PARM_DESC(debug, "debug output mask");
-
-
-#endif
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
deleted file mode 100644
index 022086d2a3f..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+++ /dev/null
@@ -1,2676 +0,0 @@
-/*
- * Original code based Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3 - hostap.o module, common routines
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- * Copyright (c) 2004, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- ******************************************************************************
-
- Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
-
- A special thanks goes to Realtek for their support !
-
-******************************************************************************/
-
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
-#include <linux/ctype.h>
-
-#include "ieee80211.h"
-#ifdef ENABLE_DOT11D
-#include "dot11d.h"
-#endif
-static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
- struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats)
-{
- struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *)skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
-
- skb->dev = ieee->dev;
- skb_reset_mac_header(skb);
-
- skb_pull(skb, ieee80211_get_hdrlen(fc));
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = __constant_htons(ETH_P_80211_RAW);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static struct ieee80211_frag_entry *
-ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
- unsigned int frag, u8 tid,u8 *src, u8 *dst)
-{
- struct ieee80211_frag_entry *entry;
- int i;
-
- for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
- entry = &ieee->frag_cache[tid][i];
- if (entry->skb != NULL &&
- time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
- IEEE80211_DEBUG_FRAG(
- "expiring fragment cache entry "
- "seq=%u last_frag=%u\n",
- entry->seq, entry->last_frag);
- dev_kfree_skb_any(entry->skb);
- entry->skb = NULL;
- }
-
- if (entry->skb != NULL && entry->seq == seq &&
- (entry->last_frag + 1 == frag || frag == -1) &&
- memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
- memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
- return entry;
- }
-
- return NULL;
-}
-
-/* Called only as a tasklet (software IRQ) */
-static struct sk_buff *
-ieee80211_frag_cache_get(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *hdr)
-{
- struct sk_buff *skb = NULL;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 sc = le16_to_cpu(hdr->seq_ctl);
- unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
- unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
- struct ieee80211_frag_entry *entry;
- struct ieee80211_hdr_3addrqos *hdr_3addrqos;
- struct ieee80211_hdr_4addrqos *hdr_4addrqos;
- u8 tid;
-
- if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid ++;
- } else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid ++;
- } else {
- tid = 0;
- }
-
- if (frag == 0) {
- /* Reserve enough space to fit maximum frame length */
- skb = dev_alloc_skb(ieee->dev->mtu +
- sizeof(struct ieee80211_hdr_4addr) +
- 8 /* LLC */ +
- 2 /* alignment */ +
- 8 /* WEP */ +
- ETH_ALEN /* WDS */ +
- (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
- if (skb == NULL)
- return NULL;
-
- entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
- ieee->frag_next_idx[tid]++;
- if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN)
- ieee->frag_next_idx[tid] = 0;
-
- if (entry->skb != NULL)
- dev_kfree_skb_any(entry->skb);
-
- entry->first_frag_time = jiffies;
- entry->seq = seq;
- entry->last_frag = frag;
- entry->skb = skb;
- memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
- memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
- } else {
- /* received a fragment of a frame for which the head fragment
- * should have already been received */
- entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2,
- hdr->addr1);
- if (entry != NULL) {
- entry->last_frag = frag;
- skb = entry->skb;
- }
- }
-
- return skb;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *hdr)
-{
- u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 sc = le16_to_cpu(hdr->seq_ctl);
- unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
- struct ieee80211_frag_entry *entry;
- struct ieee80211_hdr_3addrqos *hdr_3addrqos;
- struct ieee80211_hdr_4addrqos *hdr_4addrqos;
- u8 tid;
-
- if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid ++;
- } else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid ++;
- } else {
- tid = 0;
- }
-
- entry = ieee80211_frag_cache_find(ieee, seq, -1, tid,hdr->addr2,
- hdr->addr1);
-
- if (entry == NULL) {
- IEEE80211_DEBUG_FRAG(
- "could not invalidate fragment cache "
- "entry (seq=%u)\n", seq);
- return -1;
- }
-
- entry->skb = NULL;
- return 0;
-}
-
-
-
-/* ieee80211_rx_frame_mgtmt
- *
- * Responsible for handling management control frames
- *
- * Called by ieee80211_rx */
-static inline int
-ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
-{
- /* On the struct stats definition there is written that
- * this is not mandatory.... but seems that the probe
- * response parser uses it
- */
- struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data;
-
- rx_stats->len = skb->len;
- ieee80211_rx_mgt(ieee,(struct ieee80211_hdr_4addr *)skb->data,rx_stats);
- if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))//use ADDR1 to perform address matching for Management frames
- {
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
-
- dev_kfree_skb_any(skb);
-
- return 0;
-
- #ifdef NOT_YET
- if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
- ieee->dev->name);
- return 0;
- }
-
- if (ieee->hostapd && type == IEEE80211_TYPE_MGMT) {
- if (stype == WLAN_FC_STYPE_BEACON &&
- ieee->iw_mode == IW_MODE_MASTER) {
- struct sk_buff *skb2;
- /* Process beacon frames also in kernel driver to
- * update STA(AP) table statistics */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2)
- hostap_rx(skb2->dev, skb2, rx_stats);
- }
-
- /* send management frames to the user space daemon for
- * processing */
- ieee->apdevstats.rx_packets++;
- ieee->apdevstats.rx_bytes += skb->len;
- prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
- return 0;
- }
-
- if (ieee->iw_mode == IW_MODE_MASTER) {
- if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
- printk(KERN_DEBUG "%s: unknown management frame "
- "(type=0x%02x, stype=0x%02x) dropped\n",
- skb->dev->name, type, stype);
- return -1;
- }
-
- hostap_rx(skb->dev, skb, rx_stats);
- return 0;
- }
-
- printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
- "received in non-Host AP mode\n", skb->dev->name);
- return -1;
- #endif
-}
-
-
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-/* Called by ieee80211_rx_frame_decrypt */
-static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
- struct sk_buff *skb, size_t hdrlen)
-{
- struct net_device *dev = ieee->dev;
- u16 fc, ethertype;
- struct ieee80211_hdr_4addr *hdr;
- u8 *pos;
-
- if (skb->len < 24)
- return 0;
-
- if (ieee->hwsec_active)
- {
- cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
- tcb_desc->bHwSec = 1;
-
- if(ieee->need_sw_enc)
- tcb_desc->bHwSec = 0;
- }
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- fc = le16_to_cpu(hdr->frame_ctl);
-
- /* check that the frame is unicast frame to us */
- if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_TODS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
- memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
- /* ToDS frame with own addr BSSID and DA */
- } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_FROMDS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
- /* FromDS frame with own addr as DA */
- } else
- return 0;
-
- if (skb->len < 24 + 8)
- return 0;
-
- /* check for port access entity Ethernet type */
- pos = skb->data + hdrlen;
- ethertype = (pos[6] << 8) | pos[7];
- if (ethertype == ETH_P_PAE)
- return 1;
-
- return 0;
-}
-
-/* Called only as a tasklet (software IRQ), by ieee80211_rx */
-static inline int
-ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
- struct ieee80211_crypt_data *crypt)
-{
- struct ieee80211_hdr_4addr *hdr;
- int res, hdrlen;
-
- if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
- return 0;
-
- if (ieee->hwsec_active)
- {
- cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
- tcb_desc->bHwSec = 1;
- }
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
-
-#ifdef CONFIG_IEEE80211_CRYPT_TKIP
- if (ieee->tkip_countermeasures &&
- strcmp(crypt->ops->name, "TKIP") == 0) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "received packet from %pM\n",
- ieee->dev->name, hdr->addr2);
- }
- return -1;
- }
-#endif
-
- atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- IEEE80211_DEBUG_DROP(
- "decryption failed (SA=%pM"
- ") res=%d\n", hdr->addr2, res);
- if (res == -2)
- IEEE80211_DEBUG_DROP("Decryption failed ICV "
- "mismatch (key %d)\n",
- skb->data[hdrlen + 3] >> 6);
- ieee->ieee_stats.rx_discards_undecryptable++;
- return -1;
- }
-
- return res;
-}
-
-
-/* Called only as a tasklet (software IRQ), by ieee80211_rx */
-static inline int
-ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb,
- int keyidx, struct ieee80211_crypt_data *crypt)
-{
- struct ieee80211_hdr_4addr *hdr;
- int res, hdrlen;
-
- if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
- return 0;
- if (ieee->hwsec_active)
- {
- cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
- tcb_desc->bHwSec = 1;
-
- if(ieee->need_sw_enc)
- tcb_desc->bHwSec = 0;
-
- }
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
-
- atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
- " (SA=%pM keyidx=%d)\n",
- ieee->dev->name, hdr->addr2, keyidx);
- return -1;
- }
-
- return 0;
-}
-
-
-/* this function is stolen from ipw2200 driver*/
-#define IEEE_PACKET_RETRY_TIME (5*HZ)
-static int is_duplicate_packet(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *header)
-{
- u16 fc = le16_to_cpu(header->frame_ctl);
- u16 sc = le16_to_cpu(header->seq_ctl);
- u16 seq = WLAN_GET_SEQ_SEQ(sc);
- u16 frag = WLAN_GET_SEQ_FRAG(sc);
- u16 *last_seq, *last_frag;
- unsigned long *last_time;
- struct ieee80211_hdr_3addrqos *hdr_3addrqos;
- struct ieee80211_hdr_4addrqos *hdr_4addrqos;
- u8 tid;
-
-
- //TO2DS and QoS
- if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)header;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid ++;
- } else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
- hdr_3addrqos = (struct ieee80211_hdr_3addrqos*)header;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid ++;
- } else { // no QoS
- tid = 0;
- }
-
- switch (ieee->iw_mode) {
- case IW_MODE_ADHOC:
- {
- struct list_head *p;
- struct ieee_ibss_seq *entry = NULL;
- u8 *mac = header->addr2;
- int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
-
- list_for_each(p, &ieee->ibss_mac_hash[index]) {
- entry = list_entry(p, struct ieee_ibss_seq, list);
- if (!memcmp(entry->mac, mac, ETH_ALEN))
- break;
- }
-
- if (p == &ieee->ibss_mac_hash[index]) {
- entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
- if (!entry) {
- printk(KERN_WARNING "Cannot malloc new mac entry\n");
- return 0;
- }
- memcpy(entry->mac, mac, ETH_ALEN);
- entry->seq_num[tid] = seq;
- entry->frag_num[tid] = frag;
- entry->packet_time[tid] = jiffies;
- list_add(&entry->list, &ieee->ibss_mac_hash[index]);
- return 0;
- }
- last_seq = &entry->seq_num[tid];
- last_frag = &entry->frag_num[tid];
- last_time = &entry->packet_time[tid];
- break;
- }
-
- case IW_MODE_INFRA:
- last_seq = &ieee->last_rxseq_num[tid];
- last_frag = &ieee->last_rxfrag_num[tid];
- last_time = &ieee->last_packet_time[tid];
-
- break;
- default:
- return 0;
- }
-
- if ((*last_seq == seq) &&
- time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) {
- if (*last_frag == frag){
- goto drop;
-
- }
- if (*last_frag + 1 != frag)
- /* out-of-order fragment */
- goto drop;
- } else
- *last_seq = seq;
-
- *last_frag = frag;
- *last_time = jiffies;
- return 0;
-
-drop:
- return 1;
-}
-bool
-AddReorderEntry(
- PRX_TS_RECORD pTS,
- PRX_REORDER_ENTRY pReorderEntry
- )
-{
- struct list_head *pList = &pTS->RxPendingPktList;
-
- while(pList->next != &pTS->RxPendingPktList)
- {
- if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
- {
- pList = pList->next;
- }
- else if( SN_EQUAL(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
- {
- return false;
- }
- else
- {
- break;
- }
- }
-
- pReorderEntry->List.next = pList->next;
- pReorderEntry->List.next->prev = &pReorderEntry->List;
- pReorderEntry->List.prev = pList;
- pList->next = &pReorderEntry->List;
-
- return true;
-}
-
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index)
-{
- u8 i = 0 , j=0;
- u16 ethertype;
-
- for(j = 0; j<index; j++)
- {
-//added by amy for reorder
- struct ieee80211_rxb* prxb = prxbIndicateArray[j];
- for(i = 0; i<prxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = prxb->subframes[i];
-
- /* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
- } else {
- u16 len;
- /* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
- memcpy(skb_push(sub_skb, 2), &len, 2);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
- }
-
- /* Indicat the packets to upper layer */
- if (sub_skb) {
- sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = ieee->dev;
- sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
- ieee->last_rx_ps_time = jiffies;
- netif_rx(sub_skb);
- }
- }
- kfree(prxb);
- prxb = NULL;
- }
-}
-
-
-void RxReorderIndicatePacket( struct ieee80211_device *ieee,
- struct ieee80211_rxb* prxb,
- PRX_TS_RECORD pTS,
- u16 SeqNum)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- PRX_REORDER_ENTRY pReorderEntry = NULL;
- struct ieee80211_rxb* prxbIndicateArray[REORDER_WIN_SIZE];
- u8 WinSize = pHTInfo->RxReorderWinSize;
- u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
- u8 index = 0;
- bool bMatchWinStart = false, bPktInBuf = false;
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__FUNCTION__,SeqNum,pTS->RxIndicateSeq,WinSize);
-
- /* Rx Reorder initialize condition.*/
- if(pTS->RxIndicateSeq == 0xffff) {
- pTS->RxIndicateSeq = SeqNum;
- }
-
- /* Drop out the packet which SeqNum is smaller than WinStart */
- if(SN_LESS(SeqNum, pTS->RxIndicateSeq)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
- pTS->RxIndicateSeq, SeqNum);
- pHTInfo->RxReorderDropCounter++;
- {
- int i;
- for(i =0; i < prxb->nr_subframes; i++) {
- dev_kfree_skb(prxb->subframes[i]);
- }
- kfree(prxb);
- prxb = NULL;
- }
- return;
- }
-
- /*
- * Sliding window manipulation. Conditions includes:
- * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
- * 2. Incoming SeqNum is larger than the WinEnd => Window shift N
- */
- if(SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) {
- pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
- bMatchWinStart = true;
- } else if(SN_LESS(WinEnd, SeqNum)) {
- if(SeqNum >= (WinSize - 1)) {
- pTS->RxIndicateSeq = SeqNum + 1 -WinSize;
- } else {
- pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum +1)) + 1;
- }
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
- }
-
- /*
- * Indication process.
- * After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets
- * with the SeqNum smaller than latest WinStart and buffer other packets.
- */
- /* For Rx Reorder condition:
- * 1. All packets with SeqNum smaller than WinStart => Indicate
- * 2. All packets with SeqNum larger than or equal to WinStart => Buffer it.
- */
- if(bMatchWinStart) {
- /* Current packet is going to be indicated.*/
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\
- pTS->RxIndicateSeq, SeqNum);
- prxbIndicateArray[0] = prxb;
- index = 1;
- } else {
- /* Current packet is going to be inserted into pending list.*/
- if(!list_empty(&ieee->RxReorder_Unused_List)) {
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(ieee->RxReorder_Unused_List.next,RX_REORDER_ENTRY,List);
- list_del_init(&pReorderEntry->List);
-
- /* Make a reorder entry and insert into a the packet list.*/
- pReorderEntry->SeqNum = SeqNum;
- pReorderEntry->prxb = prxb;
-
- if(!AddReorderEntry(pTS, pReorderEntry)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n",
- __FUNCTION__, pTS->RxIndicateSeq, SeqNum);
- list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
- {
- int i;
- for(i =0; i < prxb->nr_subframes; i++) {
- dev_kfree_skb(prxb->subframes[i]);
- }
- kfree(prxb);
- prxb = NULL;
- }
- } else {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,
- "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
- }
- }
- else {
- /*
- * Packets are dropped if there is not enough reorder entries.
- * This part shall be modified!! We can just indicate all the
- * packets in buffer and get reorder entries.
- */
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n");
- {
- int i;
- for(i =0; i < prxb->nr_subframes; i++) {
- dev_kfree_skb(prxb->subframes[i]);
- }
- kfree(prxb);
- prxb = NULL;
- }
- }
- }
-
- /* Check if there is any packet need indicate.*/
- while(!list_empty(&pTS->RxPendingPktList)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__FUNCTION__);
-#if 1
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- if( SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
- {
- /* This protect buffer from overflow. */
- if(index >= REORDER_WIN_SIZE) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n");
- bPktInBuf = true;
- break;
- }
-
- list_del_init(&pReorderEntry->List);
-
- if(SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
- pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
-
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
- prxbIndicateArray[index] = pReorderEntry->prxb;
- index++;
-
- list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
- } else {
- bPktInBuf = true;
- break;
- }
-#endif
- }
-
- /* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/
- if(index>0) {
- // Cancel previous pending timer.
- if (timer_pending(&pTS->RxPktPendingTimer))
- del_timer_sync(&pTS->RxPktPendingTimer);
- pTS->RxTimeoutIndicateSeq = 0xffff;
-
- // Indicate packets
- if(index>REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
- return;
- }
- ieee80211_indicate_packets(ieee, prxbIndicateArray, index);
- bPktInBuf = false;
- }
-
- if(bPktInBuf && pTS->RxTimeoutIndicateSeq==0xffff) {
- // Set new pending timer.
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __FUNCTION__);
- pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq;
-
- mod_timer(&pTS->RxPktPendingTimer, jiffies + MSECS(pHTInfo->RxReorderPendingTime));
- }
-}
-
-u8 parse_subframe(struct ieee80211_device* ieee,struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats,
- struct ieee80211_rxb *rxb,u8* src,u8* dst)
-{
- struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
-
- u16 LLCOffset= sizeof(struct ieee80211_hdr_3addr);
- u16 ChkLength;
- bool bIsAggregateFrame = false;
- u16 nSubframe_Length;
- u8 nPadding_Length = 0;
- u16 SeqNum=0;
-
- struct sk_buff *sub_skb;
- u8 *data_ptr;
- /* just for debug purpose */
- SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
-
- if((IEEE80211_QOS_HAS_SEQ(fc))&&\
- (((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) {
- bIsAggregateFrame = true;
- }
-
- if(IEEE80211_QOS_HAS_SEQ(fc)) {
- LLCOffset += 2;
- }
-
- if(rx_stats->bContainHTC) {
- LLCOffset += sHTCLng;
- }
- // Null packet, don't indicate it to upper layer
- ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/
-
- if( skb->len <= ChkLength ) {
- return 0;
- }
-
- skb_pull(skb, LLCOffset);
- ieee->bIsAggregateFrame = bIsAggregateFrame;//added by amy for Leisure PS
-
- if(!bIsAggregateFrame) {
- rxb->nr_subframes = 1;
-#ifdef JOHN_NOCPY
- rxb->subframes[0] = skb;
-#else
- rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC);
-#endif
-
- memcpy(rxb->src,src,ETH_ALEN);
- memcpy(rxb->dst,dst,ETH_ALEN);
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len);
- return 1;
- } else {
- rxb->nr_subframes = 0;
- memcpy(rxb->src,src,ETH_ALEN);
- memcpy(rxb->dst,dst,ETH_ALEN);
- while(skb->len > ETHERNET_HEADER_SIZE) {
- /* Offset 12 denote 2 mac address */
- nSubframe_Length = *((u16*)(skb->data + 12));
- //==m==>change the length order
- nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8);
-
- if(skb->len<(ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
- __FUNCTION__,rxb->nr_subframes);
- printk("%s: A-MSDU parse error!! Subframe Length: %d\n",__FUNCTION__, nSubframe_Length);
- printk("nRemain_Length is %d and nSubframe_Length is : %d\n",skb->len,nSubframe_Length);
- printk("The Packet SeqNum is %d\n",SeqNum);
- return 0;
- }
-
- /* move the data point to data content */
- skb_pull(skb, ETHERNET_HEADER_SIZE);
-
-#ifdef JOHN_NOCPY
- sub_skb = skb_clone(skb, GFP_ATOMIC);
- sub_skb->len = nSubframe_Length;
- sub_skb->tail = sub_skb->data + nSubframe_Length;
-#else
- /* Allocate new skb for releasing to upper layer */
- sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr,skb->data,nSubframe_Length);
-#endif
- rxb->subframes[rxb->nr_subframes++] = sub_skb;
- if(rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
- IEEE80211_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n");
- break;
- }
- skb_pull(skb,nSubframe_Length);
-
- if(skb->len != 0) {
- nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4);
- if(nPadding_Length == 4) {
- nPadding_Length = 0;
- }
-
- if(skb->len < nPadding_Length) {
- return 0;
- }
-
- skb_pull(skb,nPadding_Length);
- }
- }
-#ifdef JOHN_NOCPY
- dev_kfree_skb(skb);
-#endif
- return rxb->nr_subframes;
- }
-}
-
-/* All received frames are sent to this function. @skb contains the frame in
- * IEEE 802.11 format, i.e., in the format it was sent over air.
- * This function is called only as a tasklet (software IRQ). */
-int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats)
-{
- struct net_device *dev = ieee->dev;
- struct ieee80211_hdr_4addr *hdr;
-
- size_t hdrlen;
- u16 fc, type, stype, sc;
- struct net_device_stats *stats;
- unsigned int frag;
- u8 *payload;
- u16 ethertype;
- //added by amy for reorder
- u8 TID = 0;
- u16 SeqNum = 0;
- PRX_TS_RECORD pTS = NULL;
- bool unicast_packet = false;
- //added by amy for reorder
-#ifdef NOT_YET
- struct net_device *wds = NULL;
- struct sk_buff *skb2 = NULL;
- struct net_device *wds = NULL;
- int frame_authorized = 0;
- int from_assoc_ap = 0;
- void *sta = NULL;
-#endif
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
- u8 bssid[ETH_ALEN];
- struct ieee80211_crypt_data *crypt = NULL;
- int keyidx = 0;
-
- int i;
- struct ieee80211_rxb* rxb = NULL;
- // cheat the the hdr type
- hdr = (struct ieee80211_hdr_4addr *)skb->data;
- stats = &ieee->stats;
-
- if (skb->len < 10) {
- printk(KERN_INFO "%s: SKB length < 10\n",
- dev->name);
- goto rx_dropped;
- }
-
- fc = le16_to_cpu(hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
- stype = WLAN_FC_GET_STYPE(fc);
- sc = le16_to_cpu(hdr->seq_ctl);
-
- frag = WLAN_GET_SEQ_FRAG(sc);
- hdrlen = ieee80211_get_hdrlen(fc);
-
- if(HTCCheck(ieee, skb->data))
- {
- if(net_ratelimit())
- printk("find HTCControl\n");
- hdrlen += 4;
- rx_stats->bContainHTC = 1;
- }
-
-#ifdef NOT_YET
-#if WIRELESS_EXT > 15
- /* Put this code here so that we avoid duplicating it in all
- * Rx paths. - Jean II */
-#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
- /* If spy monitoring on */
- if (iface->spy_data.spy_number > 0) {
- struct iw_quality wstats;
- wstats.level = rx_stats->rssi;
- wstats.noise = rx_stats->noise;
- wstats.updated = 6; /* No qual value */
- /* Update spy records */
- wireless_spy_update(dev, hdr->addr2, &wstats);
- }
-#endif /* IW_WIRELESS_SPY */
-#endif /* WIRELESS_EXT > 15 */
- hostap_update_rx_stats(local->ap, hdr, rx_stats);
-#endif
-
-#if WIRELESS_EXT > 15
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- ieee80211_monitor_rx(ieee, skb, rx_stats);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- return 1;
- }
-#endif
- if (ieee->host_decrypt) {
- int idx = 0;
- if (skb->len >= hdrlen + 3)
- idx = skb->data[hdrlen + 3] >> 6;
- crypt = ieee->crypt[idx];
-#ifdef NOT_YET
- sta = NULL;
-
- /* Use station specific key to override default keys if the
- * receiver address is a unicast address ("individual RA"). If
- * bcrx_sta_key parameter is set, station specific key is used
- * even with broad/multicast targets (this is against IEEE
- * 802.11, but makes it easier to use different keys with
- * stations that do not support WEP key mapping). */
-
- if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
- (void) hostap_handle_sta_crypto(local, hdr, &crypt,
- &sta);
-#endif
-
- /* allow NULL decrypt to indicate an station specific override
- * for default encryption */
- if (crypt && (crypt->ops == NULL ||
- crypt->ops->decrypt_mpdu == NULL))
- crypt = NULL;
-
- if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
- /* This seems to be triggered by some (multicast?)
- * frames from other than current BSS, so just drop the
- * frames silently instead of filling system log with
- * these reports. */
- IEEE80211_DEBUG_DROP("Decryption failed (not set)"
- " (SA=%pM)\n",
- hdr->addr2);
- ieee->ieee_stats.rx_discards_undecryptable++;
- goto rx_dropped;
- }
- }
-
- if (skb->len < IEEE80211_DATA_HDR3_LEN)
- goto rx_dropped;
-
- // if QoS enabled, should check the sequence for each of the AC
- if( (ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)){
- if (is_duplicate_packet(ieee, hdr))
- goto rx_dropped;
-
- }
- else
- {
- PRX_TS_RECORD pRxTS = NULL;
-
- if(GetTs(
- ieee,
- (PTS_COMMON_INFO*) &pRxTS,
- hdr->addr2,
- (u8)Frame_QoSTID((u8*)(skb->data)),
- RX_DIR,
- true))
- {
-
- if( (fc & (1<<11)) &&
- (frag == pRxTS->RxLastFragNum) &&
- (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) )
- {
- goto rx_dropped;
- }
- else
- {
- pRxTS->RxLastFragNum = frag;
- pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc);
- }
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n",__FUNCTION__);
- goto rx_dropped;
- }
- }
-
- if (type == IEEE80211_FTYPE_MGMT) {
-
- if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
- goto rx_dropped;
- else
- goto rx_exit;
- }
-
- /* Data frame - extract src/dst addresses */
- switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case IEEE80211_FCTL_FROMDS:
- memcpy(dst, hdr->addr1, ETH_ALEN);
- memcpy(src, hdr->addr3, ETH_ALEN);
- memcpy(bssid, hdr->addr2, ETH_ALEN);
- break;
- case IEEE80211_FCTL_TODS:
- memcpy(dst, hdr->addr3, ETH_ALEN);
- memcpy(src, hdr->addr2, ETH_ALEN);
- memcpy(bssid, hdr->addr1, ETH_ALEN);
- break;
- case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
- if (skb->len < IEEE80211_DATA_HDR4_LEN)
- goto rx_dropped;
- memcpy(dst, hdr->addr3, ETH_ALEN);
- memcpy(src, hdr->addr4, ETH_ALEN);
- memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
- break;
- case 0:
- memcpy(dst, hdr->addr1, ETH_ALEN);
- memcpy(src, hdr->addr2, ETH_ALEN);
- memcpy(bssid, hdr->addr3, ETH_ALEN);
- break;
- }
-
-#ifdef NOT_YET
- if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
- goto rx_dropped;
- if (wds) {
- skb->dev = dev = wds;
- stats = hostap_get_stats(dev);
- }
-
- if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
- (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS &&
- ieee->stadev &&
- memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
- /* Frame from BSSID of the AP for which we are a client */
- skb->dev = dev = ieee->stadev;
- stats = hostap_get_stats(dev);
- from_assoc_ap = 1;
- }
-#endif
-
- dev->last_rx = jiffies;
-
-#ifdef NOT_YET
- if ((ieee->iw_mode == IW_MODE_MASTER ||
- ieee->iw_mode == IW_MODE_REPEAT) &&
- !from_assoc_ap) {
- switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
- wds != NULL)) {
- case AP_RX_CONTINUE_NOT_AUTHORIZED:
- frame_authorized = 0;
- break;
- case AP_RX_CONTINUE:
- frame_authorized = 1;
- break;
- case AP_RX_DROP:
- goto rx_dropped;
- case AP_RX_EXIT:
- goto rx_exit;
- }
- }
-#endif
- /* Nullfunc frames may have PS-bit set, so they must be passed to
- * hostap_handle_sta_rx() before being dropped here. */
- if (stype != IEEE80211_STYPE_DATA &&
- stype != IEEE80211_STYPE_DATA_CFACK &&
- stype != IEEE80211_STYPE_DATA_CFPOLL &&
- stype != IEEE80211_STYPE_DATA_CFACKPOLL&&
- stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4
- ) {
- if (stype != IEEE80211_STYPE_NULLFUNC)
- IEEE80211_DEBUG_DROP(
- "RX: dropped data frame "
- "with no data (type=0x%02x, "
- "subtype=0x%02x, len=%d)\n",
- type, stype, skb->len);
- goto rx_dropped;
- }
- if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
- goto rx_dropped;
-
-#ifdef ENABLE_LPS
- if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->sta_sleep == 1)
- && (ieee->polling)) {
- if (WLAN_FC_MORE_DATA(fc)) {
- /* more data bit is set, let's request a new frame from the AP */
- ieee80211_sta_ps_send_pspoll_frame(ieee);
- } else {
- ieee->polling = false;
- }
- }
-#endif
-
- ieee->need_sw_enc = 0;
-
- if((!rx_stats->Decrypted)){
- ieee->need_sw_enc = 1;
- }
-
- /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
-
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
- {
- printk("decrypt frame error\n");
- goto rx_dropped;
- }
-
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
-
- /* skb: hdr + (possibly fragmented) plaintext payload */
- // PR: FIXME: hostap has additional conditions in the "if" below:
- // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
- int flen;
- struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
- IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
-
- if (!frag_skb) {
- IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
- "Rx cannot get skb from fragment "
- "cache (morefrag=%d seq=%u frag=%u)\n",
- (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
- WLAN_GET_SEQ_SEQ(sc), frag);
- goto rx_dropped;
- }
- flen = skb->len;
- if (frag != 0)
- flen -= hdrlen;
-
- if (frag_skb->tail + flen > frag_skb->end) {
- printk(KERN_WARNING "%s: host decrypted and "
- "reassembled frame did not fit skb\n",
- dev->name);
- ieee80211_frag_cache_invalidate(ieee, hdr);
- goto rx_dropped;
- }
-
- if (frag == 0) {
- /* copy first fragment (including full headers) into
- * beginning of the fragment cache skb */
- memcpy(skb_put(frag_skb, flen), skb->data, flen);
- } else {
- /* append frame payload to the end of the fragment
- * cache skb */
- memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
- flen);
- }
- dev_kfree_skb_any(skb);
- skb = NULL;
-
- if (fc & IEEE80211_FCTL_MOREFRAGS) {
- /* more fragments expected - leave the skb in fragment
- * cache for now; it will be delivered to upper layers
- * after all fragments have been received */
- goto rx_exit;
- }
-
- /* this was the last fragment and the frame will be
- * delivered, so remove skb from fragment cache */
- skb = frag_skb;
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- ieee80211_frag_cache_invalidate(ieee, hdr);
- }
-
- /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
- * encrypted/authenticated */
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
- {
- printk("==>decrypt msdu error\n");
- goto rx_dropped;
- }
-
- //added by amy for AP roaming
- ieee->LinkDetectInfo.NumRecvDataInPeriod++;
- ieee->LinkDetectInfo.NumRxOkInPeriod++;
-
- hdr = (struct ieee80211_hdr_4addr *) skb->data;
- if((!is_multicast_ether_addr(hdr->addr1)) && (!is_broadcast_ether_addr(hdr->addr1)))
- unicast_packet = true;
-
- if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
- if (/*ieee->ieee802_1x &&*/
- ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
-
-#ifdef CONFIG_IEEE80211_DEBUG
- /* pass unencrypted EAPOL frames even if encryption is
- * configured */
- struct eapol *eap = (struct eapol *)(skb->data +
- 24);
- IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
- eap_get_type(eap->type));
-#endif
- } else {
- IEEE80211_DEBUG_DROP(
- "encryption configured, but RX "
- "frame not encrypted (SA=%pM)\n",
- hdr->addr2);
- goto rx_dropped;
- }
- }
-
-#ifdef CONFIG_IEEE80211_DEBUG
- if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
- ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- struct eapol *eap = (struct eapol *)(skb->data +
- 24);
- IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
- eap_get_type(eap->type));
- }
-#endif
-
- if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep &&
- !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- IEEE80211_DEBUG_DROP(
- "dropped unencrypted RX data "
- "frame from %pM"
- " (drop_unencrypted=1)\n",
- hdr->addr2);
- goto rx_dropped;
- }
-//added by amy for reorder
- if(ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
- && !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1))
- {
- TID = Frame_QoSTID(skb->data);
- SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee,(PTS_COMMON_INFO*) &pTS,hdr->addr2,TID,RX_DIR,true);
- if(TID !=0 && TID !=3)
- {
- ieee->bis_any_nonbepkts = true;
- }
- }
-
-//added by amy for reorder
- /* skb: hdr + (possible reassembled) full plaintext payload */
- payload = skb->data + hdrlen;
-
- rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
- if(rxb == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__);
- goto rx_dropped;
- }
- /* to parse amsdu packets */
- /* qos data packets & reserved bit is 1 */
- if(parse_subframe(ieee, skb,rx_stats,rxb,src,dst) == 0) {
- /* only to free rxb, and not submit the packets to upper layer */
- for(i =0; i < rxb->nr_subframes; i++) {
- dev_kfree_skb(rxb->subframes[i]);
- }
- kfree(rxb);
- rxb = NULL;
- goto rx_dropped;
- }
-
-#ifdef ENABLE_LPS
- if(unicast_packet)
- {
- if (type == IEEE80211_FTYPE_DATA)
- {
-
- if(ieee->bIsAggregateFrame)
- ieee->LinkDetectInfo.NumRxUnicastOkInPeriod+=rxb->nr_subframes;
- else
- ieee->LinkDetectInfo.NumRxUnicastOkInPeriod++;
-
- // 2009.03.03 Leave DC mode immediately when detect high traffic
- if((ieee->state == IEEE80211_LINKED) /*&& !MgntInitAdapterInProgress(pMgntInfo)*/)
- {
- if( ((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod +ieee->LinkDetectInfo.NumTxOkInPeriod) > 8 ) ||
- (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2) )
- {
- if(ieee->LeisurePSLeave)
- ieee->LeisurePSLeave(ieee);
- }
- }
- }
- }
-#endif
-
- ieee->last_rx_ps_time = jiffies;
-//added by amy for reorder
- if(ieee->pHTInfo->bCurRxReorderEnable == false ||pTS == NULL){
-//added by amy for reorder
- for(i = 0; i<rxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = rxb->subframes[i];
-
- if (sub_skb) {
- /* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
- } else {
- u16 len;
- /* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
- memcpy(skb_push(sub_skb, 2), &len, 2);
- memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
- }
-
- stats->rx_packets++;
- stats->rx_bytes += sub_skb->len;
- if(is_multicast_ether_addr(dst)) {
- stats->multicast++;
- }
-
- /* Indicat the packets to upper layer */
- sub_skb->protocol = eth_type_trans(sub_skb, dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = dev;
- sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
- netif_rx(sub_skb);
- }
- }
- kfree(rxb);
- rxb = NULL;
-
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n",__FUNCTION__);
- RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
- }
-#ifndef JOHN_NOCPY
- dev_kfree_skb(skb);
-#endif
-
- rx_exit:
-#ifdef NOT_YET
- if (sta)
- hostap_handle_sta_release(sta);
-#endif
- return 1;
-
- rx_dropped:
- kfree(rxb);
- rxb = NULL;
- stats->rx_dropped++;
-
- /* Returning 0 indicates to caller that we have not handled the SKB--
- * so it is still allocated and can be used again by underlying
- * hardware as a DMA target */
- return 0;
-}
-
-#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
-
-static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
-
-/*
-* Make the structure we read from the beacon packet to have
-* the right values
-*/
-static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
- *info_element, int sub_type)
-{
-
- if (info_element->qui_subtype != sub_type)
- return -1;
- if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
- return -1;
- if (info_element->qui_type != QOS_OUI_TYPE)
- return -1;
- if (info_element->version != QOS_VERSION_1)
- return -1;
-
- return 0;
-}
-
-
-/*
- * Parse a QoS parameter element
- */
-static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
- *element_param, struct ieee80211_info_element
- *info_element)
-{
- int ret = 0;
- u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
-
- if ((info_element == NULL) || (element_param == NULL))
- return -1;
-
- if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
- memcpy(element_param->info_element.qui, info_element->data,
- info_element->len);
- element_param->info_element.elementID = info_element->id;
- element_param->info_element.length = info_element->len;
- } else
- ret = -1;
- if (ret == 0)
- ret = ieee80211_verify_qos_info(&element_param->info_element,
- QOS_OUI_PARAM_SUB_TYPE);
- return ret;
-}
-
-/*
- * Parse a QoS information element
- */
-static int ieee80211_read_qos_info_element(struct
- ieee80211_qos_information_element
- *element_info, struct ieee80211_info_element
- *info_element)
-{
- int ret = 0;
- u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
-
- if (element_info == NULL)
- return -1;
- if (info_element == NULL)
- return -1;
-
- if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
- memcpy(element_info->qui, info_element->data,
- info_element->len);
- element_info->elementID = info_element->id;
- element_info->length = info_element->len;
- } else
- ret = -1;
-
- if (ret == 0)
- ret = ieee80211_verify_qos_info(element_info,
- QOS_OUI_INFO_SUB_TYPE);
- return ret;
-}
-
-
-/*
- * Write QoS parameters from the ac parameters.
- */
-static int ieee80211_qos_convert_ac_to_parameters(struct
- ieee80211_qos_parameter_info
- *param_elm, struct
- ieee80211_qos_parameters
- *qos_param)
-{
- int rc = 0;
- int i;
- struct ieee80211_qos_ac_parameter *ac_params;
- u8 aci;
-
- for (i = 0; i < QOS_QUEUE_NUM; i++) {
- ac_params = &(param_elm->ac_params_record[i]);
-
- aci = (ac_params->aci_aifsn & 0x60) >> 5;
-
- if(aci >= QOS_QUEUE_NUM)
- continue;
- qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
-
- /* WMM spec P.11: The minimum value for AIFSN shall be 2 */
- qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci];
-
- qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
-
- qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
-
- qos_param->flag[aci] =
- (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
- qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
- }
- return rc;
-}
-
-/*
- * we have a generic data element which it may contain QoS information or
- * parameters element. check the information element length to decide
- * which type to read
- */
-static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
- *info_element,
- struct ieee80211_network *network)
-{
- int rc = 0;
- struct ieee80211_qos_parameters *qos_param = NULL;
- struct ieee80211_qos_information_element qos_info_element;
-
- rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
-
- if (rc == 0) {
- network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
- network->flags |= NETWORK_HAS_QOS_INFORMATION;
- } else {
- struct ieee80211_qos_parameter_info param_element;
-
- rc = ieee80211_read_qos_param_element(&param_element,
- info_element);
- if (rc == 0) {
- qos_param = &(network->qos_data.parameters);
- ieee80211_qos_convert_ac_to_parameters(&param_element,
- qos_param);
- network->flags |= NETWORK_HAS_QOS_PARAMETERS;
- network->qos_data.param_count =
- param_element.info_element.ac_info & 0x0F;
- }
- }
-
- if (rc == 0) {
- IEEE80211_DEBUG_QOS("QoS is supported\n");
- network->qos_data.supported = 1;
- }
- return rc;
-}
-
-#ifdef CONFIG_IEEE80211_DEBUG
-#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
-
-static const char *get_info_element_string(u16 id)
-{
- switch (id) {
- MFIE_STRING(SSID);
- MFIE_STRING(RATES);
- MFIE_STRING(FH_SET);
- MFIE_STRING(DS_SET);
- MFIE_STRING(CF_SET);
- MFIE_STRING(TIM);
- MFIE_STRING(IBSS_SET);
- MFIE_STRING(COUNTRY);
- MFIE_STRING(HOP_PARAMS);
- MFIE_STRING(HOP_TABLE);
- MFIE_STRING(REQUEST);
- MFIE_STRING(CHALLENGE);
- MFIE_STRING(POWER_CONSTRAINT);
- MFIE_STRING(POWER_CAPABILITY);
- MFIE_STRING(TPC_REQUEST);
- MFIE_STRING(TPC_REPORT);
- MFIE_STRING(SUPP_CHANNELS);
- MFIE_STRING(CSA);
- MFIE_STRING(MEASURE_REQUEST);
- MFIE_STRING(MEASURE_REPORT);
- MFIE_STRING(QUIET);
- MFIE_STRING(IBSS_DFS);
- // MFIE_STRING(ERP_INFO);
- MFIE_STRING(RSN);
- MFIE_STRING(RATES_EX);
- MFIE_STRING(GENERIC);
- MFIE_STRING(QOS_PARAMETER);
- default:
- return "UNKNOWN";
- }
-}
-#endif
-
-#ifdef ENABLE_DOT11D
-static inline void ieee80211_extract_country_ie(
- struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- struct ieee80211_network *network,
- u8 * addr2
-)
-{
- if(IS_DOT11D_ENABLE(ieee))
- {
- if(info_element->len!= 0)
- {
- memcpy(network->CountryIeBuf, info_element->data, info_element->len);
- network->CountryIeLen = info_element->len;
-
- if(!IS_COUNTRY_IE_VALID(ieee))
- {
- Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data);
- }
- }
-
- //
- // 070305, rcnjko: I update country IE watch dog here because
- // some AP (e.g. Cisco 1242) don't include country IE in their
- // probe response frame.
- //
- if(IS_EQUAL_CIE_SRC(ieee, addr2) )
- {
- UPDATE_CIE_WATCHDOG(ieee);
- }
- }
-
-}
-#endif
-
-int ieee80211_parse_info_param(struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- u16 length,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats)
-{
- u8 i;
- short offset;
- u16 tmp_htcap_len=0;
- u16 tmp_htinfo_len=0;
- u16 ht_realtek_agg_len=0;
- u8 ht_realtek_agg_buf[MAX_IE_LEN];
-#ifdef CONFIG_IEEE80211_DEBUG
- char rates_str[64];
- char *p;
-#endif
-
- while (length >= sizeof(*info_element)) {
- if (sizeof(*info_element) + info_element->len > length) {
- IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
- "info_element->len + 2 > left : "
- "info_element->len+2=%zd left=%d, id=%d.\n",
- info_element->len +
- sizeof(*info_element),
- length, info_element->id);
- /* We stop processing but don't return an error here
- * because some misbehaviour APs break this rule. ie.
- * Orinoco AP1000. */
- break;
- }
-
- switch (info_element->id) {
- case MFIE_TYPE_SSID:
- if (ieee80211_is_empty_essid(info_element->data,
- info_element->len)) {
- network->flags |= NETWORK_EMPTY_ESSID;
- break;
- }
-
- network->ssid_len = min(info_element->len,
- (u8) IW_ESSID_MAX_SIZE);
- memcpy(network->ssid, info_element->data, network->ssid_len);
- if (network->ssid_len < IW_ESSID_MAX_SIZE)
- memset(network->ssid + network->ssid_len, 0,
- IW_ESSID_MAX_SIZE - network->ssid_len);
-
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
- network->ssid, network->ssid_len);
- break;
-
- case MFIE_TYPE_RATES:
-#ifdef CONFIG_IEEE80211_DEBUG
- p = rates_str;
-#endif
- network->rates_len = min(info_element->len,
- MAX_RATES_LENGTH);
- for (i = 0; i < network->rates_len; i++) {
- network->rates[i] = info_element->data[i];
-#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
- (p - rates_str), "%02X ",
- network->rates[i]);
-#endif
- if (ieee80211_is_ofdm_rate
- (info_element->data[i])) {
- network->flags |= NETWORK_HAS_OFDM;
- if (info_element->data[i] &
- IEEE80211_BASIC_RATE_MASK)
- network->flags &=
- ~NETWORK_HAS_CCK;
- }
- }
-
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
- rates_str, network->rates_len);
- break;
-
- case MFIE_TYPE_RATES_EX:
-#ifdef CONFIG_IEEE80211_DEBUG
- p = rates_str;
-#endif
- network->rates_ex_len = min(info_element->len,
- MAX_RATES_EX_LENGTH);
- for (i = 0; i < network->rates_ex_len; i++) {
- network->rates_ex[i] = info_element->data[i];
-#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
- (p - rates_str), "%02X ",
- network->rates[i]);
-#endif
- if (ieee80211_is_ofdm_rate
- (info_element->data[i])) {
- network->flags |= NETWORK_HAS_OFDM;
- if (info_element->data[i] &
- IEEE80211_BASIC_RATE_MASK)
- network->flags &=
- ~NETWORK_HAS_CCK;
- }
- }
-
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
- rates_str, network->rates_ex_len);
- break;
-
- case MFIE_TYPE_DS_SET:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
- info_element->data[0]);
- network->channel = info_element->data[0];
- break;
-
- case MFIE_TYPE_FH_SET:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
- break;
-
- case MFIE_TYPE_CF_SET:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
- break;
-
- case MFIE_TYPE_TIM:
- if(info_element->len < 4)
- break;
-
- network->tim.tim_count = info_element->data[0];
- network->tim.tim_period = info_element->data[1];
-
- network->dtim_period = info_element->data[1];
- if(ieee->state != IEEE80211_LINKED)
- break;
- //we use jiffies for legacy Power save
- network->last_dtim_sta_time[0] = jiffies;
- network->last_dtim_sta_time[1] = stats->mac_time[1];
-
- network->dtim_data = IEEE80211_DTIM_VALID;
-
- if(info_element->data[0] != 0)
- break;
-
- if(info_element->data[2] & 1)
- network->dtim_data |= IEEE80211_DTIM_MBCAST;
-
- offset = (info_element->data[2] >> 1)*2;
-
- if(ieee->assoc_id < 8*offset ||
- ieee->assoc_id > 8*(offset + info_element->len -3))
-
- break;
-
- offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ;
-
- if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8)))
- network->dtim_data |= IEEE80211_DTIM_UCAST;
-
- break;
-
- case MFIE_TYPE_ERP:
- network->erp_value = info_element->data[0];
- network->flags |= NETWORK_HAS_ERP_VALUE;
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
- network->erp_value);
- break;
- case MFIE_TYPE_IBSS_SET:
- network->atim_window = info_element->data[0];
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
- network->atim_window);
- break;
-
- case MFIE_TYPE_CHALLENGE:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
- break;
-
- case MFIE_TYPE_GENERIC:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
- info_element->len);
- if (!ieee80211_parse_qos_info_param_IE(info_element,
- network))
- break;
-
- if (info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x50 &&
- info_element->data[2] == 0xf2 &&
- info_element->data[3] == 0x01) {
- network->wpa_ie_len = min(info_element->len + 2,
- MAX_WPA_IE_LEN);
- memcpy(network->wpa_ie, info_element,
- network->wpa_ie_len);
- break;
- }
-
-#ifdef THOMAS_TURBO
- if (info_element->len == 7 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0xe0 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x01 &&
- info_element->data[4] == 0x02) {
- network->Turbo_Enable = 1;
- }
-#endif
-
- //for HTcap and HTinfo parameters
- if(tmp_htcap_len == 0){
- if(info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x90 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x033){
-
- tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htcap_len != 0){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
- sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
- }
- }
- if(tmp_htcap_len != 0)
- network->bssht.bdSupportHT = true;
- else
- network->bssht.bdSupportHT = false;
- }
-
-
- if(tmp_htinfo_len == 0){
- if(info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x90 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x034){
-
- tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htinfo_len != 0){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- if(tmp_htinfo_len){
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\
- sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen);
- }
-
- }
-
- }
- }
-
- if(ieee->aggregation){
- if(network->bssht.bdSupportHT){
- if(info_element->len >= 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0xe0 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x02){
-
- ht_realtek_agg_len = min(info_element->len,(u8)MAX_IE_LEN);
- memcpy(ht_realtek_agg_buf,info_element->data,info_element->len);
-
- }
- if(ht_realtek_agg_len >= 5){
- network->bssht.bdRT2RTAggregation = true;
-
- if((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
- network->bssht.bdRT2RTLongSlotTime = true;
- }
- }
-
- }
-
- if((info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x05 &&
- info_element->data[2] == 0xb5) ||
- (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x0a &&
- info_element->data[2] == 0xf7) ||
- (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x10 &&
- info_element->data[2] == 0x18)){
-
- network->broadcom_cap_exist = true;
-
- }
-
- if(info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x0c &&
- info_element->data[2] == 0x43)
- {
- network->ralink_cap_exist = true;
- }
- else
- network->ralink_cap_exist = false;
- //added by amy for atheros AP
- if((info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x03 &&
- info_element->data[2] == 0x7f) ||
- (info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x13 &&
- info_element->data[2] == 0x74))
- {
- network->atheros_cap_exist = true;
- }
- else
- network->atheros_cap_exist = false;
-
- if ((info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x50 &&
- info_element->data[2] == 0x43) )
- {
- network->marvell_cap_exist = true;
- }
-
-
- if(info_element->len >= 3 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96)
- {
- network->cisco_cap_exist = true;
- }
- else
- network->cisco_cap_exist = false;
- //added by amy for LEAP of cisco
- if(info_element->len > 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x01)
- {
- if(info_element->len == 6)
- {
- memcpy(network->CcxRmState, &info_element[4], 2);
- if(network->CcxRmState[0] != 0)
- {
- network->bCcxRmEnable = true;
- }
- else
- network->bCcxRmEnable = false;
- //
- // CCXv4 Table 59-1 MBSSID Masks.
- //
- network->MBssidMask = network->CcxRmState[1] & 0x07;
- if(network->MBssidMask != 0)
- {
- network->bMBssidValid = true;
- network->MBssidMask = 0xff << (network->MBssidMask);
- cpMacAddr(network->MBssid, network->bssid);
- network->MBssid[5] &= network->MBssidMask;
- }
- else
- {
- network->bMBssidValid = false;
- }
- }
- else
- {
- network->bCcxRmEnable = false;
- }
- }
- if(info_element->len > 4 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x03)
- {
- if(info_element->len == 5)
- {
- network->bWithCcxVerNum = true;
- network->BssCcxVerNumber = info_element->data[4];
- }
- else
- {
- network->bWithCcxVerNum = false;
- network->BssCcxVerNumber = 0;
- }
- }
- break;
-
- case MFIE_TYPE_RSN:
- IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
- info_element->len);
- network->rsn_ie_len = min(info_element->len + 2,
- MAX_WPA_IE_LEN);
- memcpy(network->rsn_ie, info_element,
- network->rsn_ie_len);
- break;
-
- //HT related element.
- case MFIE_TYPE_HT_CAP:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n",
- info_element->len);
- tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htcap_len != 0){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
- sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
-
- //If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
- // windows driver will update WMM parameters each beacon received once connected
- // Linux driver is a bit different.
- network->bssht.bdSupportHT = true;
- }
- else
- network->bssht.bdSupportHT = false;
- break;
-
-
- case MFIE_TYPE_HT_INFO:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n",
- info_element->len);
- tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htinfo_len){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE;
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\
- sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen);
- }
- break;
-
- case MFIE_TYPE_AIRONET:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n",
- info_element->len);
- if(info_element->len >IE_CISCO_FLAG_POSITION)
- {
- network->bWithAironetIE = true;
-
- // CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23):
- // "A Cisco access point advertises support for CKIP in beacon and probe response packets,
- // by adding an Aironet element and setting one or both of the CKIP negotiation bits."
- if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) ||
- (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) )
- {
- network->bCkipSupported = true;
- }
- else
- {
- network->bCkipSupported = false;
- }
- }
- else
- {
- network->bWithAironetIE = false;
- network->bCkipSupported = false;
- }
- break;
- case MFIE_TYPE_QOS_PARAMETER:
- printk(KERN_ERR
- "QoS Error need to parse QOS_PARAMETER IE\n");
- break;
-
-#ifdef ENABLE_DOT11D
- case MFIE_TYPE_COUNTRY:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
- info_element->len);
- ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP
- break;
-#endif
-
- default:
- IEEE80211_DEBUG_MGMT
- ("Unsupported info element: %s (%d)\n",
- get_info_element_string(info_element->id),
- info_element->id);
- break;
- }
-
- length -= sizeof(*info_element) + info_element->len;
- info_element =
- (struct ieee80211_info_element *)&info_element->
- data[info_element->len];
- }
-
- if(!network->atheros_cap_exist && !network->broadcom_cap_exist &&
- !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation &&
- !network->marvell_cap_exist)
- {
- network->unknown_cap_exist = true;
- }
- else
- {
- network->unknown_cap_exist = false;
- }
- return 0;
-}
-
-static inline u8 ieee80211_SignalStrengthTranslate(
- u8 CurrSS
- )
-{
- u8 RetSS;
-
- // Step 1. Scale mapping.
- if(CurrSS >= 71 && CurrSS <= 100)
- {
- RetSS = 90 + ((CurrSS - 70) / 3);
- }
- else if(CurrSS >= 41 && CurrSS <= 70)
- {
- RetSS = 78 + ((CurrSS - 40) / 3);
- }
- else if(CurrSS >= 31 && CurrSS <= 40)
- {
- RetSS = 66 + (CurrSS - 30);
- }
- else if(CurrSS >= 21 && CurrSS <= 30)
- {
- RetSS = 54 + (CurrSS - 20);
- }
- else if(CurrSS >= 5 && CurrSS <= 20)
- {
- RetSS = 42 + (((CurrSS - 5) * 2) / 3);
- }
- else if(CurrSS == 4)
- {
- RetSS = 36;
- }
- else if(CurrSS == 3)
- {
- RetSS = 27;
- }
- else if(CurrSS == 2)
- {
- RetSS = 18;
- }
- else if(CurrSS == 1)
- {
- RetSS = 9;
- }
- else
- {
- RetSS = CurrSS;
- }
-
- return RetSS;
-}
-
-long ieee80211_translate_todbm(u8 signal_strength_index )// 0-100 index.
-{
- long signal_power; // in dBm.
-
- // Translate to dBm (x=0.5y-95).
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
-
- return signal_power;
-}
-
-static inline int ieee80211_network_init(
- struct ieee80211_device *ieee,
- struct ieee80211_probe_response *beacon,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats)
-{
- network->qos_data.active = 0;
- network->qos_data.supported = 0;
- network->qos_data.param_count = 0;
- network->qos_data.old_param_count = 0;
-
- /* Pull out fixed field data */
- memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
- network->capability = le16_to_cpu(beacon->capability);
- network->last_scanned = jiffies;
- network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
- network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
- network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
- /* Where to pull this? beacon->listen_interval;*/
- network->listen_interval = 0x0A;
- network->rates_len = network->rates_ex_len = 0;
- network->last_associate = 0;
- network->ssid_len = 0;
- network->flags = 0;
- network->atim_window = 0;
- network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
- 0x3 : 0x0;
- network->berp_info_valid = false;
- network->broadcom_cap_exist = false;
- network->ralink_cap_exist = false;
- network->atheros_cap_exist = false;
- network->marvell_cap_exist = false;
- network->cisco_cap_exist = false;
- network->unknown_cap_exist = false;
-#ifdef THOMAS_TURBO
- network->Turbo_Enable = 0;
-#endif
-#ifdef ENABLE_DOT11D
- network->CountryIeLen = 0;
- memset(network->CountryIeBuf, 0, MAX_IE_LEN);
-#endif
-//Initialize HT parameters
- HTInitializeBssDesc(&network->bssht);
- if (stats->freq == IEEE80211_52GHZ_BAND) {
- /* for A band (No DS info) */
- network->channel = stats->received_channel;
- } else
- network->flags |= NETWORK_HAS_CCK;
-
- network->wpa_ie_len = 0;
- network->rsn_ie_len = 0;
-
- if (ieee80211_parse_info_param
- (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats))
- return 1;
-
- network->mode = 0;
- if (stats->freq == IEEE80211_52GHZ_BAND)
- network->mode = IEEE_A;
- else {
- if (network->flags & NETWORK_HAS_OFDM)
- network->mode |= IEEE_G;
- if (network->flags & NETWORK_HAS_CCK)
- network->mode |= IEEE_B;
- }
-
- if (network->mode == 0) {
- IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' "
- "network.\n",
- escape_essid(network->ssid,
- network->ssid_len),
- network->bssid);
- return 1;
- }
-
- if(network->bssht.bdSupportHT){
- if(network->mode == IEEE_A)
- network->mode = IEEE_N_5G;
- else if(network->mode & (IEEE_G | IEEE_B))
- network->mode = IEEE_N_24G;
- }
- if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
- network->flags |= NETWORK_EMPTY_ESSID;
-
- stats->signal = 30 + (stats->SignalStrength * 70) / 100;
- stats->noise = ieee80211_translate_todbm((u8)(100-stats->signal)) -25;
-
- memcpy(&network->stats, stats, sizeof(network->stats));
-
- return 0;
-}
-
-static inline int is_same_network(struct ieee80211_network *src,
- struct ieee80211_network *dst, struct ieee80211_device* ieee)
-{
- /* A network is only a duplicate if the channel, BSSID, ESSID
- * and the capability field (in particular IBSS and BSS) all match.
- * We treat all <hidden> with the same BSSID and channel
- * as one network */
- return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) &&
- (src->channel == dst->channel) &&
- !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
- (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) &&
- ((src->capability & WLAN_CAPABILITY_IBSS) ==
- (dst->capability & WLAN_CAPABILITY_IBSS)) &&
- ((src->capability & WLAN_CAPABILITY_BSS) ==
- (dst->capability & WLAN_CAPABILITY_BSS)));
-}
-
-static inline void update_network(struct ieee80211_network *dst,
- struct ieee80211_network *src)
-{
- int qos_active;
- u8 old_param;
-
- memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
- dst->capability = src->capability;
- memcpy(dst->rates, src->rates, src->rates_len);
- dst->rates_len = src->rates_len;
- memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
- dst->rates_ex_len = src->rates_ex_len;
- if(src->ssid_len > 0)
- {
- memset(dst->ssid, 0, dst->ssid_len);
- dst->ssid_len = src->ssid_len;
- memcpy(dst->ssid, src->ssid, src->ssid_len);
- }
- dst->mode = src->mode;
- dst->flags = src->flags;
- dst->time_stamp[0] = src->time_stamp[0];
- dst->time_stamp[1] = src->time_stamp[1];
- if (src->flags & NETWORK_HAS_ERP_VALUE)
- {
- dst->erp_value = src->erp_value;
- dst->berp_info_valid = src->berp_info_valid = true;
- }
- dst->beacon_interval = src->beacon_interval;
- dst->listen_interval = src->listen_interval;
- dst->atim_window = src->atim_window;
- dst->dtim_period = src->dtim_period;
- dst->dtim_data = src->dtim_data;
- dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0];
- dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1];
- memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters));
-
- dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
- dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation;
- dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen;
- memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen);
- dst->bssht.bdHTInfoLen= src->bssht.bdHTInfoLen;
- memcpy(dst->bssht.bdHTInfoBuf,src->bssht.bdHTInfoBuf,src->bssht.bdHTInfoLen);
- dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer;
- dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime;
- dst->broadcom_cap_exist = src->broadcom_cap_exist;
- dst->ralink_cap_exist = src->ralink_cap_exist;
- dst->atheros_cap_exist = src->atheros_cap_exist;
- dst->marvell_cap_exist = src->marvell_cap_exist;
- dst->cisco_cap_exist = src->cisco_cap_exist;
- dst->unknown_cap_exist = src->unknown_cap_exist;
- memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
- dst->wpa_ie_len = src->wpa_ie_len;
- memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
- dst->rsn_ie_len = src->rsn_ie_len;
-
- dst->last_scanned = jiffies;
- /* qos related parameters */
- qos_active = dst->qos_data.active;
- old_param = dst->qos_data.param_count;
- if(dst->flags & NETWORK_HAS_QOS_MASK){
- //not update QOS paramter in beacon, as most AP will set all these parameter to 0.//WB
- }
- else {
- dst->qos_data.supported = src->qos_data.supported;
- dst->qos_data.param_count = src->qos_data.param_count;
- }
-
- if(dst->qos_data.supported == 1) {
- dst->QoS_Enable = 1;
- if(dst->ssid_len)
- IEEE80211_DEBUG_QOS
- ("QoS the network %s is QoS supported\n",
- dst->ssid);
- else
- IEEE80211_DEBUG_QOS
- ("QoS the network is QoS supported\n");
- }
- dst->qos_data.active = qos_active;
- dst->qos_data.old_param_count = old_param;
-
- /* dst->last_associate is not overwritten */
- dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame.
- if(src->wmm_param[0].ac_aci_acm_aifsn|| \
- src->wmm_param[1].ac_aci_acm_aifsn|| \
- src->wmm_param[2].ac_aci_acm_aifsn|| \
- src->wmm_param[3].ac_aci_acm_aifsn) {
- memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
- }
-#ifdef THOMAS_TURBO
- dst->Turbo_Enable = src->Turbo_Enable;
-#endif
-
-#ifdef ENABLE_DOT11D
- dst->CountryIeLen = src->CountryIeLen;
- memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
-#endif
-
- //added by amy for LEAP
- dst->bWithAironetIE = src->bWithAironetIE;
- dst->bCkipSupported = src->bCkipSupported;
- memcpy(dst->CcxRmState,src->CcxRmState,2);
- dst->bCcxRmEnable = src->bCcxRmEnable;
- dst->MBssidMask = src->MBssidMask;
- dst->bMBssidValid = src->bMBssidValid;
- memcpy(dst->MBssid,src->MBssid,6);
- dst->bWithCcxVerNum = src->bWithCcxVerNum;
- dst->BssCcxVerNumber = src->BssCcxVerNumber;
-
-}
-
-static inline int is_beacon(__le16 fc)
-{
- return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
-}
-
-static inline void ieee80211_process_probe_response(
- struct ieee80211_device *ieee,
- struct ieee80211_probe_response *beacon,
- struct ieee80211_rx_stats *stats)
-{
- struct ieee80211_network network;
- struct ieee80211_network *target;
- struct ieee80211_network *oldest = NULL;
-#ifdef CONFIG_IEEE80211_DEBUG
- struct ieee80211_info_element *info_element = &beacon->info_element[0];
-#endif
- unsigned long flags;
- short renew;
-
- memset(&network, 0, sizeof(struct ieee80211_network));
- IEEE80211_DEBUG_SCAN(
- "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
- escape_essid(info_element->data, info_element->len),
- beacon->header.addr3,
- (beacon->capability & (1<<0xf)) ? '1' : '0',
- (beacon->capability & (1<<0xe)) ? '1' : '0',
- (beacon->capability & (1<<0xd)) ? '1' : '0',
- (beacon->capability & (1<<0xc)) ? '1' : '0',
- (beacon->capability & (1<<0xb)) ? '1' : '0',
- (beacon->capability & (1<<0xa)) ? '1' : '0',
- (beacon->capability & (1<<0x9)) ? '1' : '0',
- (beacon->capability & (1<<0x8)) ? '1' : '0',
- (beacon->capability & (1<<0x7)) ? '1' : '0',
- (beacon->capability & (1<<0x6)) ? '1' : '0',
- (beacon->capability & (1<<0x5)) ? '1' : '0',
- (beacon->capability & (1<<0x4)) ? '1' : '0',
- (beacon->capability & (1<<0x3)) ? '1' : '0',
- (beacon->capability & (1<<0x2)) ? '1' : '0',
- (beacon->capability & (1<<0x1)) ? '1' : '0',
- (beacon->capability & (1<<0x0)) ? '1' : '0');
-
- if (ieee80211_network_init(ieee, beacon, &network, stats)) {
- IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
- escape_essid(info_element->data,
- info_element->len),
- beacon->header.addr3,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
- return;
- }
-
-#ifdef ENABLE_DOT11D
- // For Asus EeePc request,
- // (1) if wireless adapter receive get any 802.11d country code in AP beacon,
- // wireless adapter should follow the country code.
- // (2) If there is no any country code in beacon,
- // then wireless adapter should do active scan from ch1~11 and
- // passive scan from ch12~14
-
- if( !IsLegalChannel(ieee, network.channel) )
- return;
- if(ieee->bGlobalDomain)
- {
- if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP)
- {
- // Case 1: Country code
- if(IS_COUNTRY_IE_VALID(ieee) )
- {
- if( !IsLegalChannel(ieee, network.channel) )
- {
- printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel);
- return;
- }
- }
- // Case 2: No any country code.
- else
- {
- // Filter over channel ch12~14
- if(network.channel > 11)
- {
- printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel);
- return;
- }
- }
- }
- else
- {
- // Case 1: Country code
- if(IS_COUNTRY_IE_VALID(ieee) )
- {
- if( !IsLegalChannel(ieee, network.channel) )
- {
- printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel);
- return;
- }
- }
- // Case 2: No any country code.
- else
- {
- // Filter over channel ch12~14
- if(network.channel > 14)
- {
- printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel);
- return;
- }
- }
- }
- }
-#endif
-
- /* The network parsed correctly -- so now we scan our known networks
- * to see if we can find it in our list.
- *
- * NOTE: This search is definitely not optimized. Once its doing
- * the "right thing" we'll optimize it for efficiency if
- * necessary */
-
- /* Search for this entry in the list and update it if it is
- * already there. */
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if(is_same_network(&ieee->current_network, &network, ieee)) {
- update_network(&ieee->current_network, &network);
- if((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G)
- && ieee->current_network.berp_info_valid){
- if(ieee->current_network.erp_value& ERP_UseProtection)
- ieee->current_network.buseprotection = true;
- else
- ieee->current_network.buseprotection = false;
- }
- if(is_beacon(beacon->header.frame_ctl))
- {
- if(ieee->state == IEEE80211_LINKED)
- ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
- }
- else //hidden AP
- network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags);
- }
-
- list_for_each_entry(target, &ieee->network_list, list) {
- if (is_same_network(target, &network, ieee))
- break;
- if ((oldest == NULL) ||
- (target->last_scanned < oldest->last_scanned))
- oldest = target;
- }
-
- /* If we didn't find a match, then get a new network slot to initialize
- * with this beacon's information */
- if (&target->list == &ieee->network_list) {
- if (list_empty(&ieee->network_free_list)) {
- /* If there are no more slots, expire the oldest */
- list_del(&oldest->list);
- target = oldest;
- IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from "
- "network list.\n",
- escape_essid(target->ssid,
- target->ssid_len),
- target->bssid);
- } else {
- /* Otherwise just pull from the free list */
- target = list_entry(ieee->network_free_list.next,
- struct ieee80211_network, list);
- list_del(ieee->network_free_list.next);
- }
-
-
-#ifdef CONFIG_IEEE80211_DEBUG
- IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n",
- escape_essid(network.ssid,
- network.ssid_len),
- network.bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
-#endif
- memcpy(target, &network, sizeof(*target));
- list_add_tail(&target->list, &ieee->network_list);
- } else {
- IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
- escape_essid(target->ssid,
- target->ssid_len),
- target->bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
-
- /* we have an entry and we are going to update it. But this entry may
- * be already expired. In this case we do the same as we found a new
- * net and call the new_net handler
- */
- renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
- //YJ,add,080819,for hidden ap
- if(is_beacon(beacon->header.frame_ctl) == 0)
- network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags);
- if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
- && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\
- ||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK))))
- renew = 1;
- //YJ,add,080819,for hidden ap,end
-
- update_network(target, &network);
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
- if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\
- (ieee->state == IEEE80211_LINKED)) {
- if(ieee->handle_beacon != NULL) {
- ieee->handle_beacon(ieee, beacon, &ieee->current_network);
- }
- }
-}
-
-void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *header,
- struct ieee80211_rx_stats *stats)
-{
- if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
- ieee->last_rx_ps_time = jiffies;
-
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
-
- case IEEE80211_STYPE_BEACON:
- IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
- IEEE80211_DEBUG_SCAN("Beacon\n");
- ieee80211_process_probe_response(
- ieee, (struct ieee80211_probe_response *)header, stats);
-
- if(ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
- ieee->iw_mode == IW_MODE_INFRA &&
- ieee->state == IEEE80211_LINKED))
- {
- tasklet_schedule(&ieee->ps_task);
- }
-
- break;
-
- case IEEE80211_STYPE_PROBE_RESP:
- IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
- IEEE80211_DEBUG_SCAN("Probe response\n");
- ieee80211_process_probe_response(
- ieee, (struct ieee80211_probe_response *)header, stats);
- break;
-
- }
-}
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
deleted file mode 100644
index 60e9a09d933..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ /dev/null
@@ -1,3278 +0,0 @@
-/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
- *
- * Mostly extracted from the rtl8180-sa2400 driver for the
- * in-kernel generic ieee802.11 stack.
- *
- * Few lines might be stolen from other part of the ieee80211
- * stack. Copyright who own it's copyright
- *
- * WPA code stolen from the ipw2200 driver.
- * Copyright who own it's copyright.
- *
- * released under the GPL
- */
-
-
-#include "ieee80211.h"
-
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#ifdef ENABLE_DOT11D
-#include "dot11d.h"
-#endif
-
-u8 rsn_authen_cipher_suite[16][4] = {
- {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
- {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default
- {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default}
- {0x00,0x0F,0xAC,0x03}, //WRAP-historical
- {0x00,0x0F,0xAC,0x04}, //CCMP
- {0x00,0x0F,0xAC,0x05}, //WEP-104
-};
-
-short ieee80211_is_54g(struct ieee80211_network net)
-{
- return ((net.rates_ex_len > 0) || (net.rates_len > 4));
-}
-
-short ieee80211_is_shortslot(struct ieee80211_network net)
-{
- return (net.capability & WLAN_CAPABILITY_SHORT_SLOT);
-}
-
-/* returns the total length needed for pleacing the RATE MFIE
- * tag and the EXTENDED RATE MFIE tag if needed.
- * It encludes two bytes per tag for the tag itself and its len
- */
-unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
-{
- unsigned int rate_len = 0;
-
- if (ieee->modulation & IEEE80211_CCK_MODULATION)
- rate_len = IEEE80211_CCK_RATE_LEN + 2;
-
- if (ieee->modulation & IEEE80211_OFDM_MODULATION)
-
- rate_len += IEEE80211_OFDM_RATE_LEN + 2;
-
- return rate_len;
-}
-
-/* pleace the MFIE rate, tag to the memory (double) poined.
- * Then it updates the pointer so that
- * it points after the new MFIE tag added.
- */
-void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
-{
- u8 *tag = *tag_p;
-
- if (ieee->modulation & IEEE80211_CCK_MODULATION){
- *tag++ = MFIE_TYPE_RATES;
- *tag++ = 4;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- }
-
- /* We may add an option for custom rates that specific HW might support */
- *tag_p = tag;
-}
-
-void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
-{
- u8 *tag = *tag_p;
-
- if (ieee->modulation & IEEE80211_OFDM_MODULATION){
-
- *tag++ = MFIE_TYPE_RATES_EX;
- *tag++ = 8;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
-
- }
-
- /* We may add an option for custom rates that specific HW might support */
- *tag_p = tag;
-}
-
-
-void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
- u8 *tag = *tag_p;
-
- *tag++ = MFIE_TYPE_GENERIC; //0
- *tag++ = 7;
- *tag++ = 0x00;
- *tag++ = 0x50;
- *tag++ = 0xf2;
- *tag++ = 0x02;//5
- *tag++ = 0x00;
- *tag++ = 0x01;
-#ifdef SUPPORT_USPD
- if(ieee->current_network.wmm_info & 0x80) {
- *tag++ = 0x0f|MAX_SP_Len;
- } else {
- *tag++ = MAX_SP_Len;
- }
-#else
- *tag++ = MAX_SP_Len;
-#endif
- *tag_p = tag;
-}
-
-#ifdef THOMAS_TURBO
-void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) {
- u8 *tag = *tag_p;
-
- *tag++ = MFIE_TYPE_GENERIC; //0
- *tag++ = 7;
- *tag++ = 0x00;
- *tag++ = 0xe0;
- *tag++ = 0x4c;
- *tag++ = 0x01;//5
- *tag++ = 0x02;
- *tag++ = 0x11;
- *tag++ = 0x00;
-
- *tag_p = tag;
- printk(KERN_ALERT "This is enable turbo mode IE process\n");
-}
-#endif
-
-void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- int nh;
- nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
-
-/*
- * if the queue is full but we have newer frames then
- * just overwrites the oldest.
- *
- * if (nh == ieee->mgmt_queue_tail)
- * return -1;
- */
- ieee->mgmt_queue_head = nh;
- ieee->mgmt_queue_ring[nh] = skb;
-}
-
-struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
-{
- struct sk_buff *ret;
-
- if(ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
- return NULL;
-
- ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
-
- ieee->mgmt_queue_tail =
- (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM;
-
- return ret;
-}
-
-void init_mgmt_queue(struct ieee80211_device *ieee)
-{
- ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
-}
-
-u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8 rate;
-
- // 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M.
- if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
- rate = 0x0c;
- else
- rate = ieee->basic_rate & 0x7f;
-
- if(rate == 0){
- // 2005.01.26, by rcnjko.
- if(ieee->mode == IEEE_A||
- ieee->mode== IEEE_N_5G||
- (ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK))
- rate = 0x0c;
- else
- rate = 0x02;
- }
-
- return rate;
-}
-
-
-void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
-
-inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
-{
- unsigned long flags;
- short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct ieee80211_hdr_3addr *header=
- (struct ieee80211_hdr_3addr *) skb->data;
-
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + 8);
- spin_lock_irqsave(&ieee->lock, flags);
-
- /* called with 2nd param 0, no mgmt lock required */
- ieee80211_sta_wakeup(ieee,0);
-
- tcb_desc->queue_index = MGNT_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
- tcb_desc->RATRIndex = 7;
- tcb_desc->bTxDisableRateFallBack = 1;
- tcb_desc->bTxUseDriverAssingedRate = 1;
-
- if(single){
- if(ieee->queue_stop){
- enqueue_mgmt(ieee,skb);
- }else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- /* avoid watchdog triggers */
- ieee->softmac_data_hard_start_xmit(skb, ieee, ieee->basic_rate);
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
- }else{
- spin_unlock_irqrestore(&ieee->lock, flags);
- spin_lock(&ieee->mgmt_tx_lock);
-
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- /* check wether the managed packet queued greater than 5 */
- if(!ieee->check_nic_enough_desc(ieee, tcb_desc->queue_index)||
- (skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) != 0)||
- (ieee->queue_stop) ) {
- /* insert the skb packet to the management queue */
- /* as for the completion function, it does not need
- * to check it any more.
- * */
- skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- ieee->softmac_hard_start_xmit(skb, ieee);
- }
- spin_unlock(&ieee->mgmt_tx_lock);
- }
-}
-
-inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
-{
-
- short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct ieee80211_hdr_3addr *header =
- (struct ieee80211_hdr_3addr *) skb->data;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + 8);
-
- tcb_desc->queue_index = MGNT_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
- tcb_desc->RATRIndex = 7;
- tcb_desc->bTxDisableRateFallBack = 1;
- tcb_desc->bTxUseDriverAssingedRate = 1;
- if(single){
-
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- /* avoid watchdog triggers */
- ieee->softmac_data_hard_start_xmit(skb, ieee, ieee->basic_rate);
-
- }else{
-
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- ieee->softmac_hard_start_xmit(skb, ieee);
-
- }
-}
-
-inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
-{
- unsigned int len,rate_len;
- u8 *tag;
- struct sk_buff *skb;
- struct ieee80211_probe_request *req;
-
- len = ieee->current_network.ssid_len;
-
- rate_len = ieee80211_MFIE_rate_len(ieee);
-
- skb = dev_alloc_skb(sizeof(struct ieee80211_probe_request) +
- 2 + len + rate_len + ieee->tx_headroom);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
- req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- req->header.duration_id = 0; //FIXME: is this OK ?
-
- memset(req->header.addr1, 0xff, ETH_ALEN);
- memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memset(req->header.addr3, 0xff, ETH_ALEN);
-
- tag = (u8 *) skb_put(skb,len+2+rate_len);
-
- *tag++ = MFIE_TYPE_SSID;
- *tag++ = len;
- memcpy(tag, ieee->current_network.ssid, len);
- tag += len;
-
- ieee80211_MFIE_Brate(ieee,&tag);
- ieee80211_MFIE_Grate(ieee,&tag);
- return skb;
-}
-
-struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
-void ieee80211_send_beacon(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- if(!ieee->ieee_up)
- return;
-
- skb = ieee80211_get_beacon_(ieee);
-
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_beacons++;
- }
-
- if(ieee->beacon_txing && ieee->ieee_up){
- mod_timer(&ieee->beacon_timer,jiffies+(MSECS(ieee->current_network.beacon_interval-5)));
- }
-}
-
-
-void ieee80211_send_beacon_cb(unsigned long _ieee)
-{
- struct ieee80211_device *ieee =
- (struct ieee80211_device *) _ieee;
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock, flags);
- ieee80211_send_beacon(ieee);
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-}
-
-
-void ieee80211_send_probe(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
-
- skb = ieee80211_probe_req(ieee);
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_probe_rq++;
- }
-}
-
-void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
-{
- if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
- ieee80211_send_probe(ieee);
- ieee80211_send_probe(ieee);
- }
-}
-
-/* this performs syncro scan blocking the caller until all channels
- * in the allowed channel map has been checked.
- */
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
-{
- short ch = 0;
-#ifdef ENABLE_DOT11D
- u8 channel_map[MAX_CHANNEL_NUMBER+1];
- memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-#endif
- down(&ieee->scan_sem);
-
- while(1)
- {
-
- do{
- ch++;
- if (ch > MAX_CHANNEL_NUMBER)
- goto out; /* scan completed */
-#ifdef ENABLE_DOT11D
- }while(!channel_map[ch]);
-#else
- }while(!ieee->channel_map[ch]);
-#endif
-
- /* this function can be called in two situations
- * 1- We have switched to ad-hoc mode and we are
- * performing a complete syncro scan before conclude
- * there are no interesting cell and to create a
- * new one. In this case the link state is
- * IEEE80211_NOLINK until we found an interesting cell.
- * If so the ieee8021_new_net, called by the RX path
- * will set the state to IEEE80211_LINKED, so we stop
- * scanning
- * 2- We are linked and the root uses run iwlist scan.
- * So we switch to IEEE80211_LINKED_SCANNING to remember
- * that we are still logically linked (not interested in
- * new network events, despite for updating the net list,
- * but we are temporarly 'unlinked' as the driver shall
- * not filter RX frames and the channel is changing.
- * So the only situation in witch are interested is to check
- * if the state become LINKED because of the #1 situation
- */
-
- if (ieee->state == IEEE80211_LINKED)
- goto out;
- ieee->set_chan(ieee, ch);
-#ifdef ENABLE_DOT11D
- if(channel_map[ch] == 1)
-#endif
- ieee80211_send_probe_requests(ieee);
-
- /* this prevent excessive time wait when we
- * need to wait for a syncro scan to end..
- */
- if(ieee->state < IEEE80211_LINKED)
- ;
- else
- if (ieee->sync_scan_hurryup)
- goto out;
-
-
- msleep_interruptible_rsl(IEEE80211_SOFTMAC_SCAN_TIME);
-
- }
-out:
- if(ieee->state < IEEE80211_LINKED){
- ieee->actscanning = false;
- up(&ieee->scan_sem);
- }
- else{
- ieee->sync_scan_hurryup = 0;
-#ifdef ENABLE_DOT11D
- if(IS_DOT11D_ENABLE(ieee))
- DOT11D_ScanComplete(ieee);
-#endif
- up(&ieee->scan_sem);
-}
-}
-
-
-void ieee80211_softmac_scan_wq(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
- static short watchdog = 0;
- u8 last_channel = ieee->current_network.channel;
-#ifdef ENABLE_DOT11D
- u8 channel_map[MAX_CHANNEL_NUMBER+1];
- memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-#endif
- if(!ieee->ieee_up)
- return;
- down(&ieee->scan_sem);
- do{
- ieee->current_network.channel =
- (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
- if (watchdog++ > MAX_CHANNEL_NUMBER) {
- /* if current channel is not in channel map, set to default channel. */
-#ifdef ENABLE_DOT11D
- if (!channel_map[ieee->current_network.channel]) {
-#else
- if (!ieee->channel_map[ieee->current_network.channel]) {
-#endif
- ieee->current_network.channel = 6;
- goto out; /* no good chans */
- }
- }
-#ifdef ENABLE_DOT11D
- }while(!channel_map[ieee->current_network.channel]);
-#else
- }while(!ieee->channel_map[ieee->current_network.channel]);
-#endif
- if (ieee->scanning == 0 )
- goto out;
- ieee->set_chan(ieee, ieee->current_network.channel);
-#ifdef ENABLE_DOT11D
- if(channel_map[ieee->current_network.channel] == 1)
-#endif
- ieee80211_send_probe_requests(ieee);
-
-
- queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
-
- up(&ieee->scan_sem);
- return;
-out:
-#ifdef ENABLE_DOT11D
- if(IS_DOT11D_ENABLE(ieee))
- DOT11D_ScanComplete(ieee);
-#endif
- ieee->current_network.channel = last_channel;
- ieee->actscanning = false;
- watchdog = 0;
- ieee->scanning = 0;
- up(&ieee->scan_sem);
-}
-
-void ieee80211_beacons_start(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock,flags);
-
- ieee->beacon_txing = 1;
- ieee80211_send_beacon(ieee);
-
- spin_unlock_irqrestore(&ieee->beacon_lock,flags);
-}
-
-void ieee80211_beacons_stop(struct ieee80211_device *ieee)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock,flags);
-
- ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
-
- spin_unlock_irqrestore(&ieee->beacon_lock,flags);
-
-}
-
-
-void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
-{
- if(ieee->stop_send_beacons)
- ieee->stop_send_beacons(ieee);
- if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
- ieee80211_beacons_stop(ieee);
-}
-
-
-void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
-{
- if(ieee->start_send_beacons)
- ieee->start_send_beacons(ieee);
- if(ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
- ieee80211_beacons_start(ieee);
-}
-
-
-void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
-{
- down(&ieee->scan_sem);
-
- if (ieee->scanning == 1){
- ieee->scanning = 0;
-
- cancel_delayed_work(&ieee->softmac_scan_wq);
- }
-
- up(&ieee->scan_sem);
-}
-
-void ieee80211_stop_scan(struct ieee80211_device *ieee)
-{
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
- ieee80211_softmac_stop_scan(ieee);
- else
- ieee->stop_scan(ieee);
-}
-
-/* called with ieee->lock held */
-void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
-{
-#ifdef ENABLE_IPS
- if(ieee->ieee80211_ips_leave_wq != NULL)
- ieee->ieee80211_ips_leave_wq(ieee);
-#endif
-
-#ifdef ENABLE_DOT11D
- if(IS_DOT11D_ENABLE(ieee) )
- {
- if(IS_COUNTRY_IE_VALID(ieee))
- {
- RESET_CIE_WATCHDOG(ieee);
- }
- }
-#endif
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
- if (ieee->scanning == 0){
- ieee->scanning = 1;
- queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
- }
- }else
- ieee->start_scan(ieee);
-
-}
-
-/* called with wx_sem held */
-void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
-{
-#ifdef ENABLE_DOT11D
- if(IS_DOT11D_ENABLE(ieee) )
- {
- if(IS_COUNTRY_IE_VALID(ieee))
- {
- RESET_CIE_WATCHDOG(ieee);
- }
- }
-#endif
- ieee->sync_scan_hurryup = 0;
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
- ieee80211_softmac_scan_syncro(ieee);
- else
- ieee->scan_syncro(ieee);
-
-}
-
-inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee, int challengelen)
-{
- struct sk_buff *skb;
- struct ieee80211_authentication *auth;
- int len = sizeof(struct ieee80211_authentication) + challengelen + ieee->tx_headroom;
-
-
- skb = dev_alloc_skb(len);
- if (!skb) return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
- auth = (struct ieee80211_authentication *)
- skb_put(skb, sizeof(struct ieee80211_authentication));
-
- auth->header.frame_ctl = IEEE80211_STYPE_AUTH;
- if (challengelen) auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
-
- auth->header.duration_id = 0x013a; //FIXME
-
- memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
- memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
-
- if(ieee->auth_mode == 0)
- auth->algorithm = WLAN_AUTH_OPEN;
- else if(ieee->auth_mode == 1)
- auth->algorithm = WLAN_AUTH_SHARED_KEY;
- else if(ieee->auth_mode == 2)
- auth->algorithm = WLAN_AUTH_OPEN;//0x80;
- printk("=================>%s():auth->algorithm is %d\n",__FUNCTION__,auth->algorithm);
- auth->transaction = cpu_to_le16(ieee->associate_seq);
- ieee->associate_seq++;
-
- auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
-
- return skb;
-
-}
-
-
-static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
-{
- u8 *tag;
- int beacon_size;
- struct ieee80211_probe_response *beacon_buf;
- struct sk_buff *skb = NULL;
- int encrypt;
- int atim_len,erp_len;
- struct ieee80211_crypt_data* crypt;
-
- char *ssid = ieee->current_network.ssid;
- int ssid_len = ieee->current_network.ssid_len;
- int rate_len = ieee->current_network.rates_len+2;
- int rate_ex_len = ieee->current_network.rates_ex_len;
- int wpa_ie_len = ieee->wpa_ie_len;
- u8 erpinfo_content = 0;
-
- u8* tmp_ht_cap_buf;
- u8 tmp_ht_cap_len=0;
- u8* tmp_ht_info_buf;
- u8 tmp_ht_info_len=0;
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8* tmp_generic_ie_buf=NULL;
- u8 tmp_generic_ie_len=0;
-
- if(rate_ex_len > 0) rate_ex_len+=2;
-
- if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
- atim_len = 4;
- else
- atim_len = 0;
-
- if(ieee80211_is_54g(ieee->current_network))
- erp_len = 3;
- else
- erp_len = 0;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
-
-
- encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
- //HT ralated element
-
- tmp_ht_cap_buf =(u8*) &(ieee->pHTInfo->SelfHTCap);
- tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf =(u8*) &(ieee->pHTInfo->SelfHTInfo);
- tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
- HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len,encrypt);
- HTConstructInfoElement(ieee,tmp_ht_info_buf,&tmp_ht_info_len, encrypt);
-
-
- if(pHTInfo->bRegRT2RTAggregation)
- {
- tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
- tmp_generic_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
- HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len);
- }
-
- beacon_size = sizeof(struct ieee80211_probe_response)+2+
- ssid_len
- +3 //channel
- +rate_len
- +rate_ex_len
- +atim_len
- +erp_len
- +wpa_ie_len
- +ieee->tx_headroom;
- skb = dev_alloc_skb(beacon_size);
- if (!skb)
- return NULL;
- skb_reserve(skb, ieee->tx_headroom);
- beacon_buf = (struct ieee80211_probe_response*) skb_put(skb, (beacon_size - ieee->tx_headroom));
- memcpy (beacon_buf->header.addr1, dest,ETH_ALEN);
- memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
-
- beacon_buf->header.duration_id = 0; //FIXME
- beacon_buf->beacon_interval =
- cpu_to_le16(ieee->current_network.beacon_interval);
- beacon_buf->capability =
- cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
- beacon_buf->capability |=
- cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here
-
- if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
- beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
-
- crypt = ieee->crypt[ieee->tx_keyidx];
- if (encrypt)
- beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
-
- beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
- beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
- beacon_buf->info_element[0].len = ssid_len;
-
- tag = (u8*) beacon_buf->info_element[0].data;
-
- memcpy(tag, ssid, ssid_len);
-
- tag += ssid_len;
-
- *(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len-2;
- memcpy(tag,ieee->current_network.rates,rate_len-2);
- tag+=rate_len-2;
-
- *(tag++) = MFIE_TYPE_DS_SET;
- *(tag++) = 1;
- *(tag++) = ieee->current_network.channel;
-
- if(atim_len){
- u16 val16;
- *(tag++) = MFIE_TYPE_IBSS_SET;
- *(tag++) = 2;
- val16 = cpu_to_le16(ieee->current_network.atim_window);
- memcpy((u8 *)tag, (u8 *)&val16, 2);
- tag+=2;
- }
-
- if(erp_len){
- *(tag++) = MFIE_TYPE_ERP;
- *(tag++) = 1;
- *(tag++) = erpinfo_content;
- }
- if(rate_ex_len){
- *(tag++) = MFIE_TYPE_RATES_EX;
- *(tag++) = rate_ex_len-2;
- memcpy(tag,ieee->current_network.rates_ex,rate_ex_len-2);
- tag+=rate_ex_len-2;
- }
-
- if (wpa_ie_len)
- {
- if (ieee->iw_mode == IW_MODE_ADHOC)
- {//as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07
- memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
- }
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
- tag += wpa_ie_len;
- }
-
- return skb;
-}
-
-
-struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
-{
- struct sk_buff *skb;
- u8* tag;
-
- struct ieee80211_crypt_data* crypt;
- struct ieee80211_assoc_response_frame *assoc;
- short encrypt;
-
- unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
- int len = sizeof(struct ieee80211_assoc_response_frame) + rate_len + ieee->tx_headroom;
-
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- assoc = (struct ieee80211_assoc_response_frame *)
- skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
-
- assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
- memcpy(assoc->header.addr1, dest,ETH_ALEN);
- memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
- WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS);
-
-
- if(ieee->short_slot)
- assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
-
- if (ieee->host_encrypt)
- crypt = ieee->crypt[ieee->tx_keyidx];
- else crypt = NULL;
-
- encrypt = ( crypt && crypt->ops);
-
- if (encrypt)
- assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- assoc->status = 0;
- assoc->aid = cpu_to_le16(ieee->assoc_id);
- if (ieee->assoc_id == 0x2007) ieee->assoc_id=0;
- else ieee->assoc_id++;
-
- tag = (u8*) skb_put(skb, rate_len);
-
- ieee80211_MFIE_Brate(ieee, &tag);
- ieee80211_MFIE_Grate(ieee, &tag);
-
- return skb;
-}
-
-struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest)
-{
- struct sk_buff *skb;
- struct ieee80211_authentication *auth;
- int len = ieee->tx_headroom + sizeof(struct ieee80211_authentication)+1;
-
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb->len = sizeof(struct ieee80211_authentication);
-
- auth = (struct ieee80211_authentication *)skb->data;
-
- auth->status = cpu_to_le16(status);
- auth->transaction = cpu_to_le16(2);
- auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN);
-
- memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(auth->header.addr1, dest, ETH_ALEN);
- auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
- return skb;
-
-
-}
-
-struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
-{
- struct sk_buff *skb;
- struct ieee80211_hdr_3addr* hdr;
-
- skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr));
-
- if (!skb)
- return NULL;
-
- hdr = (struct ieee80211_hdr_3addr*)skb_put(skb,sizeof(struct ieee80211_hdr_3addr));
-
- memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
- memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
-
- hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
- (pwr ? IEEE80211_FCTL_PM:0));
-
- return skb;
-
-
-}
-
-struct sk_buff* ieee80211_pspoll_func(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- struct ieee80211_pspoll_hdr* hdr;
-
-#ifdef USB_USE_ALIGNMENT
- u32 Tmpaddr=0;
- int alignment=0;
- skb = dev_alloc_skb(sizeof(struct ieee80211_pspoll_hdr) + ieee->tx_headroom + USB_512B_ALIGNMENT_SIZE);
-#else
- skb = dev_alloc_skb(sizeof(struct ieee80211_pspoll_hdr)+ieee->tx_headroom);
-#endif
- if (!skb)
- return NULL;
-
-#ifdef USB_USE_ALIGNMENT
- Tmpaddr = (u32)skb->data;
- alignment = Tmpaddr & 0x1ff;
- skb_reserve(skb,(USB_512B_ALIGNMENT_SIZE - alignment));
-#endif
- skb_reserve(skb, ieee->tx_headroom);
-
- hdr = (struct ieee80211_pspoll_hdr*)skb_put(skb,sizeof(struct ieee80211_pspoll_hdr));
-
- memcpy(hdr->bssid, ieee->current_network.bssid, ETH_ALEN);
- memcpy(hdr->ta, ieee->dev->dev_addr, ETH_ALEN);
-
- hdr->aid = cpu_to_le16(ieee->assoc_id | 0xc000);
- hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_CTL |IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM);
-
- return skb;
-
-}
-
-
-void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
-{
- struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-
-void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest)
-{
- struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-
-void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
-{
-
-
- struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-
-inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- struct ieee80211_assoc_request_frame *hdr;
- u8 *tag;
- u8* ht_cap_buf = NULL;
- u8 ht_cap_len=0;
- u8* realtek_ie_buf=NULL;
- u8 realtek_ie_len=0;
- int wpa_ie_len= ieee->wpa_ie_len;
- unsigned int ckip_ie_len=0;
- unsigned int ccxrm_ie_len=0;
- unsigned int cxvernum_ie_len=0;
- struct ieee80211_crypt_data* crypt;
- int encrypt;
-
- unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
- unsigned int wmm_info_len = beacon->qos_data.supported?9:0;
-#ifdef THOMAS_TURBO
- unsigned int turbo_info_len = beacon->Turbo_Enable?9:0;
-#endif
-
- int len = 0;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
- encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name,"WEP") || wpa_ie_len));
-
- //Include High Throuput capability && Realtek proprietary
- if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
- {
- ht_cap_buf = (u8*)&(ieee->pHTInfo->SelfHTCap);
- ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len, encrypt);
- if(ieee->pHTInfo->bCurrentRT2RTAggregation)
- {
- realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
- realtek_ie_len = sizeof( ieee->pHTInfo->szRT2RTAggBuffer);
- HTConstructRT2RTAggElement(ieee, realtek_ie_buf, &realtek_ie_len);
-
- }
- }
- if(ieee->qos_support){
- wmm_info_len = beacon->qos_data.supported?9:0;
- }
-
-
- if(beacon->bCkipSupported)
- {
- ckip_ie_len = 30+2;
- }
- if(beacon->bCcxRmEnable)
- {
- ccxrm_ie_len = 6+2;
- }
- if( beacon->BssCcxVerNumber >= 2 )
- {
- cxvernum_ie_len = 5+2;
- }
-#ifdef THOMAS_TURBO
- len = sizeof(struct ieee80211_assoc_request_frame)+ 2
- + beacon->ssid_len//essid tagged val
- + rate_len//rates tagged val
- + wpa_ie_len
- + wmm_info_len
- + turbo_info_len
- + ht_cap_len
- + realtek_ie_len
- + ckip_ie_len
- + ccxrm_ie_len
- + cxvernum_ie_len
- + ieee->tx_headroom;
-#else
- len = sizeof(struct ieee80211_assoc_request_frame)+ 2
- + beacon->ssid_len//essid tagged val
- + rate_len//rates tagged val
- + wpa_ie_len
- + wmm_info_len
- + ht_cap_len
- + realtek_ie_len
- + ckip_ie_len
- + ccxrm_ie_len
- + cxvernum_ie_len
- + ieee->tx_headroom;
-#endif
-
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- hdr = (struct ieee80211_assoc_request_frame *)
- skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)+2);
-
-
- hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
- hdr->header.duration_id= 37; //FIXME
- memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
- memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
-
- memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John
-
- hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS);
- if (beacon->capability & WLAN_CAPABILITY_PRIVACY )
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); //add short_preamble here
-
- if(ieee->short_slot)
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
- if (wmm_info_len) //QOS
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
-
- hdr->listen_interval = 0xa; //FIXME
-
- hdr->info_element[0].id = MFIE_TYPE_SSID;
-
- hdr->info_element[0].len = beacon->ssid_len;
- tag = skb_put(skb, beacon->ssid_len);
- memcpy(tag, beacon->ssid, beacon->ssid_len);
-
- tag = skb_put(skb, rate_len);
-
- ieee80211_MFIE_Brate(ieee, &tag);
- ieee80211_MFIE_Grate(ieee, &tag);
- // For CCX 1 S13, CKIP. Added by Annie, 2006-08-14.
- if( beacon->bCkipSupported )
- {
- static u8 AironetIeOui[] = {0x00, 0x01, 0x66}; // "4500-client"
- u8 CcxAironetBuf[30];
- OCTET_STRING osCcxAironetIE;
-
- memset(CcxAironetBuf, 0,30);
- osCcxAironetIE.Octet = CcxAironetBuf;
- osCcxAironetIE.Length = sizeof(CcxAironetBuf);
- //
- // Ref. CCX test plan v3.61, 3.2.3.1 step 13.
- // We want to make the device type as "4500-client". 060926, by CCW.
- //
- memcpy(osCcxAironetIE.Octet, AironetIeOui, sizeof(AironetIeOui));
-
- // CCX1 spec V1.13, A01.1 CKIP Negotiation (page23):
- // "The CKIP negotiation is started with the associate request from the client to the access point,
- // containing an Aironet element with both the MIC and KP bits set."
- osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |= (SUPPORT_CKIP_PK|SUPPORT_CKIP_MIC) ;
- tag = skb_put(skb, ckip_ie_len);
- *tag++ = MFIE_TYPE_AIRONET;
- *tag++ = osCcxAironetIE.Length;
- memcpy(tag,osCcxAironetIE.Octet,osCcxAironetIE.Length);
- tag += osCcxAironetIE.Length;
- }
-
- if(beacon->bCcxRmEnable)
- {
- static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00};
- OCTET_STRING osCcxRmCap;
-
- osCcxRmCap.Octet = CcxRmCapBuf;
- osCcxRmCap.Length = sizeof(CcxRmCapBuf);
- tag = skb_put(skb,ccxrm_ie_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxRmCap.Length;
- memcpy(tag,osCcxRmCap.Octet,osCcxRmCap.Length);
- tag += osCcxRmCap.Length;
- }
-
- if( beacon->BssCcxVerNumber >= 2 )
- {
- u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
- OCTET_STRING osCcxVerNum;
- CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
- osCcxVerNum.Octet = CcxVerNumBuf;
- osCcxVerNum.Length = sizeof(CcxVerNumBuf);
- tag = skb_put(skb,cxvernum_ie_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxVerNum.Length;
- memcpy(tag,osCcxVerNum.Octet,osCcxVerNum.Length);
- tag += osCcxVerNum.Length;
- }
- //HT cap element
- if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT){
- if(ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC)
- {
- tag = skb_put(skb, ht_cap_len);
- *tag++ = MFIE_TYPE_HT_CAP;
- *tag++ = ht_cap_len - 2;
- memcpy(tag, ht_cap_buf,ht_cap_len -2);
- tag += ht_cap_len -2;
- }
- }
-
-
- //choose what wpa_supplicant gives to associate.
- tag = skb_put(skb, wpa_ie_len);
- if (wpa_ie_len){
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
- }
-
- tag = skb_put(skb,wmm_info_len);
- if(wmm_info_len) {
- ieee80211_WMM_Info(ieee, &tag);
- }
-#ifdef THOMAS_TURBO
- tag = skb_put(skb,turbo_info_len);
- if(turbo_info_len) {
- ieee80211_TURBO_Info(ieee, &tag);
- }
-#endif
-
- if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT){
- if(ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC)
- {
- tag = skb_put(skb, ht_cap_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = ht_cap_len - 2;
- memcpy(tag, ht_cap_buf,ht_cap_len - 2);
- tag += ht_cap_len -2;
- }
-
- if(ieee->pHTInfo->bCurrentRT2RTAggregation){
- tag = skb_put(skb, realtek_ie_len);
- *tag++ = MFIE_TYPE_GENERIC;
- *tag++ = realtek_ie_len - 2;
- memcpy(tag, realtek_ie_buf,realtek_ie_len -2 );
- }
- }
- return skb;
-}
-
-void ieee80211_associate_abort(struct ieee80211_device *ieee)
-{
-
- unsigned long flags;
- spin_lock_irqsave(&ieee->lock, flags);
-
- ieee->associate_seq++;
-
- /* don't scan, and avoid to have the RX path possibily
- * try again to associate. Even do not react to AUTH or
- * ASSOC response. Just wait for the retry wq to be scheduled.
- * Here we will check if there are good nets to associate
- * with, so we retry or just get back to NO_LINK and scanning
- */
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING){
- IEEE80211_DEBUG_MGMT("Authentication failed\n");
- ieee->softmac_stats.no_auth_rs++;
- }else{
- IEEE80211_DEBUG_MGMT("Association failed\n");
- ieee->softmac_stats.no_ass_rs++;
- }
-
- ieee->state = IEEE80211_ASSOCIATING_RETRY;
-
- queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \
- IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-void ieee80211_associate_abort_cb(unsigned long dev)
-{
- ieee80211_associate_abort((struct ieee80211_device *) dev);
-}
-
-
-void ieee80211_associate_step1(struct ieee80211_device *ieee)
-{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
-
- IEEE80211_DEBUG_MGMT("Stopping scan\n");
-
- ieee->softmac_stats.tx_auth_rq++;
- skb=ieee80211_authentication_req(beacon, ieee, 0);
-
- if (!skb)
- ieee80211_associate_abort(ieee);
- else{
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ;
- IEEE80211_DEBUG_MGMT("Sending authentication request\n");
- softmac_mgmt_xmit(skb, ieee);
- //BUGON when you try to add_timer twice, using mod_timer may be better, john0709
- if(!timer_pending(&ieee->associate_timer)){
- ieee->associate_timer.expires = jiffies + (HZ / 2);
- add_timer(&ieee->associate_timer);
- }
- }
-}
-
-void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
-{
- u8 *c;
- struct sk_buff *skb;
- struct ieee80211_network *beacon = &ieee->current_network;
-
- ieee->associate_seq++;
- ieee->softmac_stats.tx_auth_rq++;
-
- skb = ieee80211_authentication_req(beacon, ieee, chlen+2);
- if (!skb)
- ieee80211_associate_abort(ieee);
- else{
- c = skb_put(skb, chlen+2);
- *(c++) = MFIE_TYPE_CHALLENGE;
- *(c++) = chlen;
- memcpy(c, challenge, chlen);
-
- IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n");
-
- ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr ));
-
- softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
- }
- kfree(challenge);
-}
-
-void ieee80211_associate_step2(struct ieee80211_device *ieee)
-{
- struct sk_buff* skb;
- struct ieee80211_network *beacon = &ieee->current_network;
-
- del_timer_sync(&ieee->associate_timer);
-
- IEEE80211_DEBUG_MGMT("Sending association request\n");
-
- ieee->softmac_stats.tx_ass_rq++;
- skb=ieee80211_association_req(beacon, ieee);
- if (!skb)
- ieee80211_associate_abort(ieee);
- else{
- softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
- }
-}
-void ieee80211_associate_complete_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
- printk(KERN_INFO "Associated successfully\n");
- ieee->is_roaming = false;
- if(ieee80211_is_54g(ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
-
- ieee->rate = 108;
- printk(KERN_INFO"Using G rates:%d\n", ieee->rate);
- }else{
- ieee->rate = 22;
- printk(KERN_INFO"Using B rates:%d\n", ieee->rate);
- }
- if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
- {
- printk("Successfully associated, ht enabled\n");
- HTOnAssocRsp(ieee);
- }
- else
- {
- printk("Successfully associated, ht not enabled(%d, %d)\n", ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bEnableHT);
- memset(ieee->dot11HTOperationalRateSet, 0, 16);
- }
- ieee->LinkDetectInfo.SlotNum = 2 * (1 + ieee->current_network.beacon_interval/500);
- // To prevent the immediately calling watch_dog after association.
- if(ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 )
- {
- ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
- ieee->LinkDetectInfo.NumRecvDataInPeriod= 1;
- }
- ieee->link_change(ieee);
- if(ieee->is_silent_reset == 0){
- printk("============>normal associate\n");
- notify_wx_assoc_event(ieee);
- }
- else if(ieee->is_silent_reset == 1)
- {
- printk("==================>silent reset associate\n");
- ieee->is_silent_reset = 0;
- }
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee);
- netif_carrier_on(ieee->dev);
-}
-
-void ieee80211_associate_complete(struct ieee80211_device *ieee)
-{
- del_timer_sync(&ieee->associate_timer);
-
- ieee->state = IEEE80211_LINKED;
- queue_work(ieee->wq, &ieee->associate_complete_wq);
-}
-
-void ieee80211_associate_procedure_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
- ieee->sync_scan_hurryup = 1;
-#ifdef ENABLE_IPS
- if(ieee->ieee80211_ips_leave != NULL)
- ieee->ieee80211_ips_leave(ieee);
-#endif
-
- down(&ieee->wx_sem);
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee);
-
- ieee80211_stop_scan(ieee);
- printk("===>%s(), chan:%d\n", __FUNCTION__, ieee->current_network.channel);
- HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
-
- ieee->associate_seq = 1;
- ieee80211_associate_step1(ieee);
-
- up(&ieee->wx_sem);
-}
-
-inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
-{
- u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
- int tmp_ssid_len = 0;
-
- short apset,ssidset,ssidbroad,apmatch,ssidmatch;
-
- /* we are interested in new new only if we are not associated
- * and we are not associating / authenticating
- */
- if (ieee->state != IEEE80211_NOLINK)
- return;
-
- if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability & WLAN_CAPABILITY_BSS))
- return;
-
- if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
- return;
-
-
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC){
- /* if the user specified the AP MAC, we need also the essid
- * This could be obtained by beacons or, if the network does not
- * broadcast it, it can be put manually.
- */
- apset = ieee->wap_set;
- ssidset = ieee->ssid_set;
- ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0');
- apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0);
- ssidmatch = (ieee->current_network.ssid_len == net->ssid_len)&&\
- (!strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
-
-
- if ( /* if the user set the AP check if match.
- * if the network does not broadcast essid we check the user supplyed ANY essid
- * if the network does broadcast and the user does not set essid it is OK
- * if the network does broadcast and the user did set essid chech if essid match
- */
- ( apset && apmatch &&
- ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset)) ) ||
- /* if the ap is not set, check that the user set the bssid
- * and the network does bradcast and that those two bssid matches
- */
- (!apset && ssidset && ssidbroad && ssidmatch)
- ){
- /* if the essid is hidden replace it with the
- * essid provided by the user.
- */
- if (!ssidbroad){
- strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
- tmp_ssid_len = ieee->current_network.ssid_len;
- }
- memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
-
- if (!ssidbroad){
- strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
- ieee->current_network.ssid_len = tmp_ssid_len;
- }
- printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",ieee->current_network.ssid,ieee->current_network.channel, ieee->current_network.qos_data.supported, ieee->pHTInfo->bEnableHT, ieee->current_network.bssht.bdSupportHT);
-
- HTResetIOTSetting(ieee->pHTInfo);
- if (ieee->iw_mode == IW_MODE_INFRA){
- /* Join the network for the first time */
- ieee->AsocRetryCount = 0;
- //for HT by amy 080514
- if((ieee->current_network.qos_data.supported == 1) &&
- ieee->current_network.bssht.bdSupportHT)
-/*WB, 2008.09.09:bCurrentHTSupport and bEnableHT two flags are going to put together to check whether we are in HT now, so needn't to check bEnableHT flags here. That's is to say we will set to HT support whenever joined AP has the ability to support HT. And whether we are in HT or not, please check bCurrentHTSupport&&bEnableHT now please.*/
- {
- HTResetSelfAndSavePeerSetting(ieee, &(ieee->current_network));
- }
- else
- {
- ieee->pHTInfo->bCurrentHTSupport = false;
- }
-
- ieee->state = IEEE80211_ASSOCIATING;
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
- }else{
- if(ieee80211_is_54g(ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
- ieee->rate = 108;
- ieee->SetWirelessMode(ieee, IEEE_G);
- printk(KERN_INFO"Using G rates\n");
- }else{
- ieee->rate = 22;
- ieee->SetWirelessMode(ieee, IEEE_B);
- printk(KERN_INFO"Using B rates\n");
- }
- memset(ieee->dot11HTOperationalRateSet, 0, 16);
- ieee->state = IEEE80211_LINKED;
- }
-
- }
- }
-
-}
-
-void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
-{
- unsigned long flags;
- struct ieee80211_network *target;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- list_for_each_entry(target, &ieee->network_list, list) {
-
- /* if the state become different that NOLINK means
- * we had found what we are searching for
- */
-
- if (ieee->state != IEEE80211_NOLINK)
- break;
-
- if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
- ieee80211_softmac_new_net(ieee, target);
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-
-}
-
-
-static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
-{
- struct ieee80211_authentication *a;
- u8 *t;
- if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n",skb->len);
- return 0xcafe;
- }
- *challenge = NULL;
- a = (struct ieee80211_authentication*) skb->data;
- if(skb->len > (sizeof(struct ieee80211_authentication) +3)){
- t = skb->data + sizeof(struct ieee80211_authentication);
-
- if(*(t++) == MFIE_TYPE_CHALLENGE){
- *chlen = *(t++);
- *challenge = kmemdup(t, *chlen, GFP_ATOMIC);
- if (!*challenge)
- return -ENOMEM;
- }
- }
-
- return cpu_to_le16(a->status);
-
-}
-
-
-int auth_rq_parse(struct sk_buff *skb,u8* dest)
-{
- struct ieee80211_authentication *a;
-
- if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){
- IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len);
- return -1;
- }
- a = (struct ieee80211_authentication*) skb->data;
-
- memcpy(dest,a->header.addr2, ETH_ALEN);
-
- if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
- return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
-
- return WLAN_STATUS_SUCCESS;
-}
-
-static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src)
-{
- u8 *tag;
- u8 *skbend;
- u8 *ssid=NULL;
- u8 ssidlen = 0;
-
- struct ieee80211_hdr_3addr *header =
- (struct ieee80211_hdr_3addr *) skb->data;
-
- if (skb->len < sizeof (struct ieee80211_hdr_3addr ))
- return -1; /* corrupted */
-
- memcpy(src,header->addr2, ETH_ALEN);
-
- skbend = (u8*)skb->data + skb->len;
-
- tag = skb->data + sizeof (struct ieee80211_hdr_3addr );
-
- while (tag+1 < skbend){
- if (*tag == 0){
- ssid = tag+2;
- ssidlen = *(tag+1);
- break;
- }
- tag++; /* point to the len field */
- tag = tag + *(tag); /* point to the last data byte of the tag */
- tag++; /* point to the next tag */
- }
-
- if (ssidlen == 0) return 1;
-
- if (!ssid) return 1; /* ssid not found in tagged param */
- return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
-
-}
-
-int assoc_rq_parse(struct sk_buff *skb,u8* dest)
-{
- struct ieee80211_assoc_request_frame *a;
-
- if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
- sizeof(struct ieee80211_info_element))) {
-
- IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len);
- return -1;
- }
-
- a = (struct ieee80211_assoc_request_frame*) skb->data;
-
- memcpy(dest,a->header.addr2,ETH_ALEN);
-
- return 0;
-}
-
-static inline u16 assoc_parse(struct ieee80211_device *ieee, struct sk_buff *skb, int *aid)
-{
- struct ieee80211_assoc_response_frame *response_head;
- u16 status_code;
-
- if (skb->len < sizeof(struct ieee80211_assoc_response_frame)){
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
- return 0xcafe;
- }
-
- response_head = (struct ieee80211_assoc_response_frame*) skb->data;
- *aid = le16_to_cpu(response_head->aid) & 0x3fff;
-
- status_code = le16_to_cpu(response_head->status);
- if((status_code==WLAN_STATUS_ASSOC_DENIED_RATES || \
- status_code==WLAN_STATUS_CAPS_UNSUPPORTED)&&
- ((ieee->mode == IEEE_G) &&
- (ieee->current_network.mode == IEEE_N_24G) &&
- (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) {
- ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
- }else {
- ieee->AsocRetryCount = 0;
- }
-
- return le16_to_cpu(response_head->status);
-}
-
-static inline void
-ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
-
- ieee->softmac_stats.rx_probe_rq++;
- if (probe_rq_parse(ieee, skb, dest)){
- ieee->softmac_stats.tx_probe_rs++;
- ieee80211_resp_to_probe(ieee, dest);
- }
-}
-
-static inline void
-ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
- int status;
- ieee->softmac_stats.rx_auth_rq++;
-
- status = auth_rq_parse(skb, dest);
- if (status != -1) {
- ieee80211_resp_to_auth(ieee, status, dest);
- }
-}
-
-static inline void
-ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
-
- ieee->softmac_stats.rx_ass_rq++;
- if (assoc_rq_parse(skb,dest) != -1){
- ieee80211_resp_to_assoc_rq(ieee, dest);
- }
-
- printk(KERN_INFO"New client associated: %pM\n", dest);
-}
-
-
-
-void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
-{
-
- struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
-
- if (buf)
- softmac_ps_mgmt_xmit(buf, ieee);
-
-}
-
-void ieee80211_sta_ps_send_pspoll_frame(struct ieee80211_device *ieee)
-{
-
- struct sk_buff *buf = ieee80211_pspoll_func(ieee);
-
- if (buf)
- softmac_ps_mgmt_xmit(buf, ieee);
-
-}
-
-short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l)
-{
- int timeout = ieee->ps_timeout;
- u8 dtim;
-
- if(ieee->LPSDelayCnt)
- {
- ieee->LPSDelayCnt --;
- return 0;
- }
-
- dtim = ieee->current_network.dtim_data;
- if(!(dtim & IEEE80211_DTIM_VALID))
- return 0;
- timeout = ieee->current_network.beacon_interval; //should we use ps_timeout value or beacon_interval
- ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID;
- /* there's no need to nofity AP that I find you buffered with broadcast packet */
- if(dtim & (IEEE80211_DTIM_UCAST & ieee->ps))
- return 2;
-
- if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout))){
- return 0;
- }
- if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout))){
- return 0;
- }
- if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) &&
- (ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
- return 0;
-
- if(time_l){
- if(ieee->bAwakePktSent == true) {
- ieee->LPSAwakeIntvl = 1;//tx wake one beacon
- } else {
- u8 MaxPeriod = 1;
-
- if(ieee->LPSAwakeIntvl == 0)
- ieee->LPSAwakeIntvl = 1;
- if(ieee->RegMaxLPSAwakeIntvl == 0) // Default (0x0 - eFastPs, 0xFF -DTIM, 0xNN - 0xNN * BeaconIntvl)
- MaxPeriod = 1; // 1 Beacon interval
- else if(ieee->RegMaxLPSAwakeIntvl == 0xFF) // DTIM
- MaxPeriod = ieee->current_network.dtim_period;
- else
- MaxPeriod = ieee->RegMaxLPSAwakeIntvl;
- ieee->LPSAwakeIntvl = (ieee->LPSAwakeIntvl >= MaxPeriod) ? MaxPeriod : (ieee->LPSAwakeIntvl + 1);
- }
- {
- u8 LPSAwakeIntvl_tmp = 0;
- u8 period = ieee->current_network.dtim_period;
- u8 count = ieee->current_network.tim.tim_count;
- if(count == 0 ) {
- if(ieee->LPSAwakeIntvl > period)
- LPSAwakeIntvl_tmp = period + (ieee->LPSAwakeIntvl - period) -((ieee->LPSAwakeIntvl-period)%period);
- else
- LPSAwakeIntvl_tmp = ieee->LPSAwakeIntvl;
-
- } else {
- if(ieee->LPSAwakeIntvl > ieee->current_network.tim.tim_count)
- LPSAwakeIntvl_tmp = count + (ieee->LPSAwakeIntvl - count) -((ieee->LPSAwakeIntvl-count)%period);
- else
- LPSAwakeIntvl_tmp = ieee->LPSAwakeIntvl;
- }
-
- *time_l = ieee->current_network.last_dtim_sta_time[0]
- + MSECS(ieee->current_network.beacon_interval * LPSAwakeIntvl_tmp);
- }
- }
-
- if(time_h){
- *time_h = ieee->current_network.last_dtim_sta_time[1];
- if(time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
- *time_h += 1;
- }
-
- return 1;
-
-
-}
-
-inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
-{
-
- u32 th,tl;
- short sleep;
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if((ieee->ps == IEEE80211_PS_DISABLED ||
- ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)){
-
- // #warning CHECK_LOCK_HERE
- printk("=====>%s(): no need to ps,wake up!! ieee->ps is %d,ieee->iw_mode is %d,ieee->state is %d\n",
- __FUNCTION__,ieee->ps,ieee->iw_mode,ieee->state);
- spin_lock(&ieee->mgmt_tx_lock);
-
- ieee80211_sta_wakeup(ieee, 1);
-
- spin_unlock(&ieee->mgmt_tx_lock);
- }
-
- sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl);
- /* 2 wake, 1 sleep, 0 do nothing */
- if(sleep == 0)//it is not time out or dtim is not valid
- {
- goto out;
- }
- if(sleep == 1){
- if(ieee->sta_sleep == 1){
- ieee->enter_sleep_state(ieee, th, tl);
- }
-
- else if(ieee->sta_sleep == 0){
- spin_lock(&ieee->mgmt_tx_lock);
-
- if (ieee->ps_is_queue_empty(ieee)) {
- ieee->sta_sleep = 2;
- ieee->ack_tx_to_ieee = 1;
- ieee80211_sta_ps_send_null_frame(ieee,1);
- ieee->ps_th = th;
- ieee->ps_tl = tl;
- }
- spin_unlock(&ieee->mgmt_tx_lock);
-
- }
-
- ieee->bAwakePktSent = false;//after null to power save we set it to false. not listen every beacon.
-
- }else if(sleep == 2){
- spin_lock(&ieee->mgmt_tx_lock);
-
- ieee80211_sta_wakeup(ieee,1);
-
- spin_unlock(&ieee->mgmt_tx_lock);
- }
-
-out:
- spin_unlock_irqrestore(&ieee->lock, flags);
-
-}
-
-void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
-{
- if(ieee->sta_sleep == 0){
- if(nl){
- if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_NULL_DATA_POWER_SAVING)
- {
- ieee->ack_tx_to_ieee = 1;
- ieee80211_sta_ps_send_null_frame(ieee, 0);
- }
- else
- {
- ieee->ack_tx_to_ieee = 1;
- ieee80211_sta_ps_send_pspoll_frame(ieee);
- }
- }
- return;
-
- }
-
- if(ieee->sta_sleep == 1)
- ieee->sta_wake_up(ieee);
- if(nl){
-
- if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_NULL_DATA_POWER_SAVING)
- {
- ieee->ack_tx_to_ieee = 1;
- ieee80211_sta_ps_send_null_frame(ieee, 0);
- }
- else
- {
- ieee->ack_tx_to_ieee = 1;
- ieee->polling = true;
- //ieee80211_sta_ps_send_null_frame(ieee, 0);
- ieee80211_sta_ps_send_pspoll_frame(ieee);
- }
-
- } else {
- ieee->sta_sleep = 0;
- ieee->polling = false;
- }
-}
-
-void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if(ieee->sta_sleep == 2){
- /* Null frame with PS bit set */
- if(success){
- ieee->sta_sleep = 1;
- ieee->enter_sleep_state(ieee, ieee->ps_th, ieee->ps_tl);
- }
- } else {/* 21112005 - tx again null without PS bit if lost */
-
- if((ieee->sta_sleep == 0) && !success){
- spin_lock(&ieee->mgmt_tx_lock);
- //ieee80211_sta_ps_send_null_frame(ieee, 0);
- if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_NULL_DATA_POWER_SAVING)
- {
- ieee80211_sta_ps_send_null_frame(ieee, 0);
- }
- else
- {
- ieee80211_sta_ps_send_pspoll_frame(ieee);
- }
- spin_unlock(&ieee->mgmt_tx_lock);
- }
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-void ieee80211_process_action(struct ieee80211_device* ieee, struct sk_buff* skb)
-{
- struct ieee80211_hdr* header = (struct ieee80211_hdr*)skb->data;
- u8* act = ieee80211_get_payload(header);
- u8 tmp = 0;
-
- if (act == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "error to get payload of action frame\n");
- return;
- }
- tmp = *act;
- act ++;
- switch (tmp)
- {
- case ACT_CAT_BA:
- if (*act == ACT_ADDBAREQ)
- ieee80211_rx_ADDBAReq(ieee, skb);
- else if (*act == ACT_ADDBARSP)
- ieee80211_rx_ADDBARsp(ieee, skb);
- else if (*act == ACT_DELBA)
- ieee80211_rx_DELBA(ieee, skb);
- break;
- default:
- break;
- }
- return;
-
-}
-inline int
-ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
-{
- struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
- u16 errcode;
- u8* challenge;
- int chlen=0;
- int aid;
- struct ieee80211_assoc_response_frame *assoc_resp;
- bool bSupportNmode = true, bHalfSupportNmode = false; //default support N mode, disable halfNmode
-
- if(!ieee->proto_started)
- return 0;
-
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
-
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
-
- IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
- ieee->iw_mode == IW_MODE_INFRA){
- struct ieee80211_network network_resp;
- struct ieee80211_network *network = &network_resp;
-
- if (0 == (errcode=assoc_parse(ieee,skb, &aid))){
- ieee->state=IEEE80211_LINKED;
- ieee->assoc_id = aid;
- ieee->softmac_stats.rx_ass_ok++;
- /* station support qos */
- /* Let the register setting defaultly with Legacy station */
- if(ieee->qos_support) {
- assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data;
- memset(network, 0, sizeof(*network));
- if (ieee80211_parse_info_param(ieee,assoc_resp->info_element,\
- rx_stats->len - sizeof(*assoc_resp),\
- network,rx_stats)){
- return 1;
- }
- else
- { //filling the PeerHTCap. //maybe not necessary as we can get its info from current_network.
- memcpy(ieee->pHTInfo->PeerHTCapBuf, network->bssht.bdHTCapBuf, network->bssht.bdHTCapLen);
- memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen);
- }
- if (ieee->handle_assoc_response != NULL)
- ieee->handle_assoc_response(ieee, (struct ieee80211_assoc_response_frame*)header, network);
- }
- ieee80211_associate_complete(ieee);
- } else {
- /* aid could not been allocated */
- ieee->softmac_stats.rx_ass_err++;
- printk(
- "Association response status code 0x%x\n",
- errcode);
- IEEE80211_DEBUG_MGMT(
- "Association response status code 0x%x\n",
- errcode);
- if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
- } else {
- ieee80211_associate_abort(ieee);
- }
- }
- }
- break;
-
- case IEEE80211_STYPE_ASSOC_REQ:
- case IEEE80211_STYPE_REASSOC_REQ:
-
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->iw_mode == IW_MODE_MASTER)
-
- ieee80211_rx_assoc_rq(ieee, skb);
- break;
-
- case IEEE80211_STYPE_AUTH:
-
- if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING &&
- ieee->iw_mode == IW_MODE_INFRA){
-
- IEEE80211_DEBUG_MGMT("Received authentication response");
-
- if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
- if(ieee->open_wep || !challenge){
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
- ieee->softmac_stats.rx_auth_rs_ok++;
- if(!(ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE))
- {
- if (!ieee->GetNmodeSupportBySecCfg(ieee))
- {
- // WEP or TKIP encryption
- if(IsHTHalfNmodeAPs(ieee))
- {
- bSupportNmode = true;
- bHalfSupportNmode = true;
- }
- else
- {
- bSupportNmode = false;
- bHalfSupportNmode = false;
- }
- printk("==========>to link with AP using SEC(%d, %d)\n", bSupportNmode, bHalfSupportNmode);
- }
- }
- /* Dummy wirless mode setting to avoid encryption issue */
- if(bSupportNmode) {
- //N mode setting
- ieee->SetWirelessMode(ieee,
- ieee->current_network.mode);
- }else{
- //b/g mode setting
- /*TODO*/
- ieee->SetWirelessMode(ieee, IEEE_G);
- }
-
- if (ieee->current_network.mode == IEEE_N_24G && bHalfSupportNmode == true)
- {
- printk("===============>entern half N mode\n");
- ieee->bHalfWirelessN24GMode = true;
- }
- else
- ieee->bHalfWirelessN24GMode = false;
-
- ieee80211_associate_step2(ieee);
- }else{
- ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
- }
- }else{
- ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Authentication respose status code 0x%x",errcode);
-
- printk("Authentication respose status code 0x%x",errcode);
- ieee80211_associate_abort(ieee);
- }
-
- }else if (ieee->iw_mode == IW_MODE_MASTER){
- ieee80211_rx_auth_rq(ieee, skb);
- }
- }
- break;
-
- case IEEE80211_STYPE_PROBE_REQ:
-
- if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
- ((ieee->iw_mode == IW_MODE_ADHOC ||
- ieee->iw_mode == IW_MODE_MASTER) &&
- ieee->state == IEEE80211_LINKED)){
- ieee80211_rx_probe_rq(ieee, skb);
- }
- break;
-
- case IEEE80211_STYPE_DISASSOC:
- case IEEE80211_STYPE_DEAUTH:
- /* FIXME for now repeat all the association procedure
- * both for disassociation and deauthentication
- */
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_LINKED &&
- ieee->iw_mode == IW_MODE_INFRA){
-
- ieee->state = IEEE80211_ASSOCIATING;
- ieee->softmac_stats.reassoc++;
- ieee->is_roaming = true;
- ieee80211_disassociate(ieee);
- RemovePeerTS(ieee, header->addr2);
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
- }
- break;
- case IEEE80211_STYPE_MANAGE_ACT:
- ieee80211_process_action(ieee,skb);
- break;
- default:
- return -1;
- break;
- }
-
- return 0;
-}
-
-/* following are for a simpler TX queue management.
- * Instead of using netif_[stop/wake]_queue the driver
- * will uses these two function (plus a reset one), that
- * will internally uses the kernel netif_* and takes
- * care of the ieee802.11 fragmentation.
- * So the driver receives a fragment per time and might
- * call the stop function when it want without take care
- * to have enough room to TX an entire packet.
- * This might be useful if each fragment need it's own
- * descriptor, thus just keep a total free memory > than
- * the max fragmentation threshold is not enough.. If the
- * ieee802.11 stack passed a TXB struct then you needed
- * to keep N free descriptors where
- * N = MAX_PACKET_SIZE / MIN_FRAG_TRESHOLD
- * In this way you need just one and the 802.11 stack
- * will take care of buffering fragments and pass them to
- * to the driver later, when it wakes the queue.
- */
-void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
-{
-
- unsigned int queue_index = txb->queue_index;
- unsigned long flags;
- int i;
- cb_desc *tcb_desc = NULL;
-
- spin_lock_irqsave(&ieee->lock,flags);
-
- /* called with 2nd parm 0, no tx mgmt lock required */
- ieee80211_sta_wakeup(ieee,0);
-
- /* update the tx status */
- tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
- if(tcb_desc->bMulticast) {
- ieee->stats.multicast++;
- }
-
- /* if xmit available, just xmit it immediately, else just insert it to the wait queue */
- for(i = 0; i < txb->nr_frags; i++) {
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- if ((skb_queue_len(&ieee->skb_drv_aggQ[queue_index]) != 0) ||
-#else
- if ((skb_queue_len(&ieee->skb_waitQ[queue_index]) != 0) ||
-#endif
- (!ieee->check_nic_enough_desc(ieee, queue_index))||
- (ieee->queue_stop)) {
- /* insert the skb packet to the wait queue */
- /* as for the completion function, it does not need
- * to check it any more.
- * */
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]);
-#else
- skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]);
-#endif
- }else{
- ieee->softmac_data_hard_start_xmit(
- txb->fragments[i],
- ieee, ieee->rate);
- }
- }
-
- ieee80211_txb_free(txb);
-
- spin_unlock_irqrestore(&ieee->lock,flags);
-
-}
-
-/* called with ieee->lock acquired */
-void ieee80211_resume_tx(struct ieee80211_device *ieee)
-{
- int i;
- for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
-
- if (ieee->queue_stop){
- ieee->tx_pending.frag = i;
- return;
- }else{
-
- ieee->softmac_data_hard_start_xmit(
- ieee->tx_pending.txb->fragments[i],
- ieee, ieee->rate);
- ieee->stats.tx_packets++;
- }
- }
-
-
- ieee80211_txb_free(ieee->tx_pending.txb);
- ieee->tx_pending.txb = NULL;
-}
-
-
-void ieee80211_reset_queue(struct ieee80211_device *ieee)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->lock,flags);
- init_mgmt_queue(ieee);
- if (ieee->tx_pending.txb){
- ieee80211_txb_free(ieee->tx_pending.txb);
- ieee->tx_pending.txb = NULL;
- }
- ieee->queue_stop = 0;
- spin_unlock_irqrestore(&ieee->lock,flags);
-
-}
-
-void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
-{
-
- unsigned long flags;
- struct sk_buff *skb;
- struct ieee80211_hdr_3addr *header;
-
- spin_lock_irqsave(&ieee->lock,flags);
- if (! ieee->queue_stop) goto exit;
-
- ieee->queue_stop = 0;
-
- if(ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE){
- while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))){
-
- header = (struct ieee80211_hdr_3addr *) skb->data;
-
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- ieee->softmac_data_hard_start_xmit(skb, ieee, ieee->basic_rate);
- }
- }
- if (!ieee->queue_stop && ieee->tx_pending.txb)
- ieee80211_resume_tx(ieee);
-
- if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)){
- ieee->softmac_stats.swtxawake++;
- netif_wake_queue(ieee->dev);
- }
-
-exit :
- spin_unlock_irqrestore(&ieee->lock,flags);
-}
-
-
-void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
-{
- if (! netif_queue_stopped(ieee->dev)){
- netif_stop_queue(ieee->dev);
- ieee->softmac_stats.swtxstop++;
- }
- ieee->queue_stop = 1;
-}
-
-
-inline void ieee80211_randomize_cell(struct ieee80211_device *ieee)
-{
-
- get_random_bytes(ieee->current_network.bssid, ETH_ALEN);
-
- /* an IBSS cell address must have the two less significant
- * bits of the first byte = 2
- */
- ieee->current_network.bssid[0] &= ~0x01;
- ieee->current_network.bssid[0] |= 0x02;
-}
-
-/* called in user context only */
-void ieee80211_start_master_bss(struct ieee80211_device *ieee)
-{
- ieee->assoc_id = 1;
-
- if (ieee->current_network.ssid_len == 0){
- strncpy(ieee->current_network.ssid,
- IEEE80211_DEFAULT_TX_ESSID,
- IW_ESSID_MAX_SIZE);
-
- ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
- ieee->ssid_set = 1;
- }
-
- memcpy(ieee->current_network.bssid, ieee->dev->dev_addr, ETH_ALEN);
-
- ieee->set_chan(ieee, ieee->current_network.channel);
- ieee->state = IEEE80211_LINKED;
- ieee->link_change(ieee);
- notify_wx_assoc_event(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee);
-
- netif_carrier_on(ieee->dev);
-}
-
-void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
-{
- if(ieee->raw_tx){
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee);
-
- netif_carrier_on(ieee->dev);
- }
-}
-void ieee80211_start_ibss_wq(struct work_struct *work)
-{
-
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
- /* iwconfig mode ad-hoc will schedule this and return
- * on the other hand this will block further iwconfig SET
- * operations because of the wx_sem hold.
- * Anyway some most set operations set a flag to speed-up
- * (abort) this wq (when syncro scanning) before sleeping
- * on the semaphore
- */
- if(!ieee->proto_started){
- printk("==========oh driver down return\n");
- return;
- }
- down(&ieee->wx_sem);
-
- if (ieee->current_network.ssid_len == 0){
- strcpy(ieee->current_network.ssid,IEEE80211_DEFAULT_TX_ESSID);
- ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
- ieee->ssid_set = 1;
- }
-
- ieee->state = IEEE80211_NOLINK;
- /* check if we have this cell in our network list */
- ieee80211_softmac_check_all_nets(ieee);
-
-
-#ifdef ENABLE_DOT11D //if creating an ad-hoc, set its channel to 10 temporarily--this is the requirement for ASUS, not 11D, so disable 11d.
- if (ieee->state == IEEE80211_NOLINK)
- ieee->current_network.channel = 6;
-#endif
- /* if not then the state is not linked. Maybe the user swithced to
- * ad-hoc mode just after being in monitor mode, or just after
- * being very few time in managed mode (so the card have had no
- * time to scan all the chans..) or we have just run up the iface
- * after setting ad-hoc mode. So we have to give another try..
- * Here, in ibss mode, should be safe to do this without extra care
- * (in bss mode we had to make sure no-one tryed to associate when
- * we had just checked the ieee->state and we was going to start the
- * scan) beacause in ibss mode the ieee80211_new_net function, when
- * finds a good net, just set the ieee->state to IEEE80211_LINKED,
- * so, at worst, we waste a bit of time to initiate an unneeded syncro
- * scan, that will stop at the first round because it sees the state
- * associated.
- */
- if (ieee->state == IEEE80211_NOLINK)
- ieee80211_start_scan_syncro(ieee);
-
- /* the network definitively is not here.. create a new cell */
- if (ieee->state == IEEE80211_NOLINK){
- printk("creating new IBSS cell\n");
- if(!ieee->wap_set)
- ieee80211_randomize_cell(ieee);
-
- if(ieee->modulation & IEEE80211_CCK_MODULATION){
-
- ieee->current_network.rates_len = 4;
-
- ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- ieee->current_network.rates[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- }else
- ieee->current_network.rates_len = 0;
-
- if(ieee->modulation & IEEE80211_OFDM_MODULATION){
- ieee->current_network.rates_ex_len = 8;
-
- ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- ieee->current_network.rates_ex[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- ieee->current_network.rates_ex[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- ieee->current_network.rates_ex[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- ieee->current_network.rates_ex[4] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- ieee->current_network.rates_ex[5] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- ieee->current_network.rates_ex[6] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
-
- ieee->rate = 108;
- }else{
- ieee->current_network.rates_ex_len = 0;
- ieee->rate = 22;
- }
-
- // By default, WMM function will be disabled in IBSS mode
- ieee->current_network.QoS_Enable = 0;
- ieee->SetWirelessMode(ieee, IEEE_G);
- ieee->current_network.atim_window = 0;
- ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
- if(ieee->short_slot)
- ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT;
-
- }
-
- ieee->state = IEEE80211_LINKED;
-
- ieee->set_chan(ieee, ieee->current_network.channel);
- ieee->link_change(ieee);
-
- notify_wx_assoc_event(ieee);
-
- ieee80211_start_send_beacons(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee);
- netif_carrier_on(ieee->dev);
-
- up(&ieee->wx_sem);
-}
-
-inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
-{
- queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150);
-}
-
-/* this is called only in user context, with wx_sem held */
-void ieee80211_start_bss(struct ieee80211_device *ieee)
-{
- unsigned long flags;
-#ifdef ENABLE_DOT11D
- //
- // Ref: 802.11d 11.1.3.3
- // STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
- //
- if(IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee))
- {
- if(! ieee->bGlobalDomain)
- {
- return;
- }
- }
-#endif
- /* check if we have already found the net we
- * are interested in (if any).
- * if not (we are disassociated and we are not
- * in associating / authenticating phase) start the background scanning.
- */
- ieee80211_softmac_check_all_nets(ieee);
-
- /* ensure no-one start an associating process (thus setting
- * the ieee->state to ieee80211_ASSOCIATING) while we
- * have just cheked it and we are going to enable scan.
- * The ieee80211_new_net function is always called with
- * lock held (from both ieee80211_softmac_check_all_nets and
- * the rx path), so we cannot be in the middle of such function
- */
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->state == IEEE80211_NOLINK){
-#ifdef ENABLE_IPS
- if(ieee->ieee80211_ips_leave_wq != NULL)
- ieee->ieee80211_ips_leave_wq(ieee);
-#endif
- ieee->actscanning = true;
- ieee80211_rtl_start_scan(ieee);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-
-/* called only in userspace context */
-void ieee80211_disassociate(struct ieee80211_device *ieee)
-{
-
-
- netif_carrier_off(ieee->dev);
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
- ieee80211_reset_queue(ieee);
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee);
-#ifdef ENABLE_DOT11D
- if(IS_DOT11D_ENABLE(ieee))
- Dot11d_Reset(ieee);
-#endif
- ieee->is_set_key = false;
- ieee->link_change(ieee);
- if (ieee->state == IEEE80211_LINKED ||
- ieee->state == IEEE80211_ASSOCIATING) {
- ieee->state = IEEE80211_NOLINK;
- notify_wx_assoc_event(ieee);
- }
-
- ieee->state = IEEE80211_NOLINK;
-
-}
-void ieee80211_associate_retry_wq(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
- unsigned long flags;
-
- down(&ieee->wx_sem);
- if(!ieee->proto_started)
- goto exit;
-
- if(ieee->state != IEEE80211_ASSOCIATING_RETRY)
- goto exit;
-
- /* until we do not set the state to IEEE80211_NOLINK
- * there are no possibility to have someone else trying
- * to start an association procdure (we get here with
- * ieee->state = IEEE80211_ASSOCIATING).
- * When we set the state to IEEE80211_NOLINK it is possible
- * that the RX path run an attempt to associate, but
- * both ieee80211_softmac_check_all_nets and the
- * RX path works with ieee->lock held so there are no
- * problems. If we are still disassociated then start a scan.
- * the lock here is necessary to ensure no one try to start
- * an association procedure when we have just checked the
- * state and we are going to start the scan.
- */
- ieee->beinretry = true;
- ieee->state = IEEE80211_NOLINK;
-
- ieee80211_softmac_check_all_nets(ieee);
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- if(ieee->state == IEEE80211_NOLINK)
- {
- ieee->is_roaming= false;
- ieee->actscanning = true;
- ieee80211_rtl_start_scan(ieee);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- ieee->beinretry = false;
-exit:
- up(&ieee->wx_sem);
-}
-
-struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
-{
- u8 broadcast_addr[] = {0xff,0xff,0xff,0xff,0xff,0xff};
-
- struct sk_buff *skb;
- struct ieee80211_probe_response *b;
-
- skb = ieee80211_probe_resp(ieee, broadcast_addr);
-
- if (!skb)
- return NULL;
-
- b = (struct ieee80211_probe_response *) skb->data;
- b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
-
- return skb;
-
-}
-
-struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
- struct ieee80211_probe_response *b;
-
- skb = ieee80211_get_beacon_(ieee);
- if(!skb)
- return NULL;
-
- b = (struct ieee80211_probe_response *) skb->data;
- b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- return skb;
-}
-
-void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee, u8 shutdown)
-{
- ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
- ieee80211_stop_protocol(ieee, shutdown);
- up(&ieee->wx_sem);
-}
-
-
-void ieee80211_stop_protocol(struct ieee80211_device *ieee, u8 shutdown)
-{
- if (!ieee->proto_started)
- return;
-
- if(shutdown)
- ieee->proto_started = 0;
- ieee->proto_stoppping = 1;
-
- ieee80211_stop_send_beacons(ieee);
- del_timer_sync(&ieee->associate_timer);
- cancel_delayed_work(&ieee->associate_retry_wq);
- cancel_delayed_work(&ieee->start_ibss_wq);
- ieee80211_stop_scan(ieee);
-
- ieee80211_disassociate(ieee);
- RemoveAllTS(ieee); //added as we disconnect from the previous BSS, Remove all TS
-
- ieee->proto_stoppping = 0;
-}
-
-void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
-{
- ieee->sync_scan_hurryup = 0;
- down(&ieee->wx_sem);
- ieee80211_start_protocol(ieee);
- up(&ieee->wx_sem);
-}
-
-void ieee80211_start_protocol(struct ieee80211_device *ieee)
-{
- short ch = 0;
- int i = 0;
- if (ieee->proto_started)
- return;
-
- ieee->proto_started = 1;
-
- if (ieee->current_network.channel == 0){
- do{
- ch++;
- if (ch > MAX_CHANNEL_NUMBER)
- return; /* no channel found */
-#ifdef ENABLE_DOT11D
- }while(!GET_DOT11D_INFO(ieee)->channel_map[ch]);
-#else
- }while(!ieee->channel_map[ch]);
-#endif
- ieee->current_network.channel = ch;
- }
-
- if (ieee->current_network.beacon_interval == 0)
- ieee->current_network.beacon_interval = 100;
-
- for(i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
- }
-
- ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers.
-
- ieee->state = IEEE80211_NOLINK;
-
-
- /* if the user set the MAC of the ad-hoc cell and then
- * switch to managed mode, shall we make sure that association
- * attempts does not fail just because the user provide the essid
- * and the nic is still checking for the AP MAC ??
- */
- if (ieee->iw_mode == IW_MODE_INFRA)
- ieee80211_start_bss(ieee);
-
- else if (ieee->iw_mode == IW_MODE_ADHOC)
- ieee80211_start_ibss(ieee);
-
- else if (ieee->iw_mode == IW_MODE_MASTER)
- ieee80211_start_master_bss(ieee);
-
- else if(ieee->iw_mode == IW_MODE_MONITOR)
- ieee80211_start_monitor_mode(ieee);
-}
-
-
-#define DRV_NAME "Ieee80211"
-void ieee80211_softmac_init(struct ieee80211_device *ieee)
-{
- int i;
- memset(&ieee->current_network, 0, sizeof(struct ieee80211_network));
-
- ieee->state = IEEE80211_NOLINK;
- ieee->sync_scan_hurryup = 0;
- for(i = 0; i < 5; i++) {
- ieee->seq_ctrl[i] = 0;
- }
-#ifdef ENABLE_DOT11D
- ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
- if (!ieee->pDot11dInfo)
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
-#endif
- //added for AP roaming
- ieee->LinkDetectInfo.SlotNum = 2;
- ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
- ieee->LinkDetectInfo.NumRecvDataInPeriod=0;
-
- ieee->assoc_id = 0;
- ieee->queue_stop = 0;
- ieee->scanning = 0;
- ieee->softmac_features = 0; //so IEEE2100-like driver are happy
- ieee->wap_set = 0;
- ieee->ssid_set = 0;
- ieee->proto_started = 0;
- ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE;
- ieee->rate = 22;
- ieee->ps = IEEE80211_PS_DISABLED;
- ieee->sta_sleep = 0;
- ieee->Regdot11HTOperationalRateSet[0]= 0xff;//support MCS 0~7
- ieee->Regdot11HTOperationalRateSet[1]= 0xff;//support MCS 8~15
- ieee->Regdot11HTOperationalRateSet[4]= 0x01;
- //added by amy
- ieee->actscanning = false;
- ieee->beinretry = false;
- ieee->is_set_key = false;
- init_mgmt_queue(ieee);
-
- ieee->sta_edca_param[0] = 0x0000A403;
- ieee->sta_edca_param[1] = 0x0000A427;
- ieee->sta_edca_param[2] = 0x005E4342;
- ieee->sta_edca_param[3] = 0x002F3262;
- ieee->aggregation = true;
- ieee->enable_rx_imm_BA = 1;
- ieee->tx_pending.txb = NULL;
-
- init_timer(&ieee->associate_timer);
- ieee->associate_timer.data = (unsigned long)ieee;
- ieee->associate_timer.function = ieee80211_associate_abort_cb;
-
- init_timer(&ieee->beacon_timer);
- ieee->beacon_timer.data = (unsigned long) ieee;
- ieee->beacon_timer.function = ieee80211_send_beacon_cb;
-
- ieee->wq = create_workqueue(DRV_NAME);
-
- INIT_DELAYED_WORK(&ieee->start_ibss_wq,ieee80211_start_ibss_wq);
- INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
- INIT_WORK(&ieee->associate_procedure_wq, ieee80211_associate_procedure_wq);
- INIT_DELAYED_WORK(&ieee->softmac_scan_wq,ieee80211_softmac_scan_wq);
- INIT_DELAYED_WORK(&ieee->associate_retry_wq, ieee80211_associate_retry_wq);
- INIT_WORK(&ieee->wx_sync_scan_wq,ieee80211_wx_sync_scan_wq);
-
- sema_init(&ieee->wx_sem, 1);
- sema_init(&ieee->scan_sem, 1);
-#ifdef ENABLE_IPS
- sema_init(&ieee->ips_sem,1);
-#endif
- spin_lock_init(&ieee->mgmt_tx_lock);
- spin_lock_init(&ieee->beacon_lock);
-
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) ieee80211_sta_ps,
- (unsigned long)ieee);
-
-}
-
-void ieee80211_softmac_free(struct ieee80211_device *ieee)
-{
- down(&ieee->wx_sem);
-#ifdef ENABLE_DOT11D
- kfree(ieee->pDot11dInfo);
- ieee->pDot11dInfo = NULL;
-#endif
- del_timer_sync(&ieee->associate_timer);
-
- cancel_delayed_work(&ieee->associate_retry_wq);
- destroy_workqueue(ieee->wq);
-
- up(&ieee->wx_sem);
-}
-
-/********************************************************
- * Start of WPA code. *
- * this is stolen from the ipw2200 driver *
- ********************************************************/
-
-
-static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
-{
- /* This is called when wpa_supplicant loads and closes the driver
- * interface. */
- printk("%s WPA\n",value ? "enabling" : "disabling");
- ieee->wpa_enabled = value;
- return 0;
-}
-
-
-void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len)
-{
- /* make sure WPA is enabled */
- ieee80211_wpa_enable(ieee, 1);
-
- ieee80211_disassociate(ieee);
-}
-
-
-static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason)
-{
-
- int ret = 0;
-
- switch (command) {
- case IEEE_MLME_STA_DEAUTH:
- // silently ignore
- break;
-
- case IEEE_MLME_STA_DISASSOC:
- ieee80211_disassociate(ieee);
- break;
-
- default:
- printk("Unknown MLME request: %d\n", command);
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-
-static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
- struct ieee_param *param, int plen)
-{
- u8 *buf;
-
- if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
- (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
- return -EINVAL;
-
- if (param->u.wpa_ie.len) {
- buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
- GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = buf;
- ieee->wpa_ie_len = param->u.wpa_ie.len;
- } else {
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = NULL;
- ieee->wpa_ie_len = 0;
- }
-
- ieee80211_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len);
- return 0;
-}
-
-#define AUTH_ALG_OPEN_SYSTEM 0x1
-#define AUTH_ALG_SHARED_KEY 0x2
-
-static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
-{
-
- struct ieee80211_security sec = {
- .flags = SEC_AUTH_MODE,
- };
- int ret = 0;
-
- if (value & AUTH_ALG_SHARED_KEY) {
- sec.auth_mode = WLAN_AUTH_SHARED_KEY;
- ieee->open_wep = 0;
- ieee->auth_mode = 1;
- } else if (value & AUTH_ALG_OPEN_SYSTEM){
- sec.auth_mode = WLAN_AUTH_OPEN;
- ieee->open_wep = 1;
- ieee->auth_mode = 0;
- }
- else if (value & IW_AUTH_ALG_LEAP){
- sec.auth_mode = WLAN_AUTH_LEAP;
- ieee->open_wep = 1;
- ieee->auth_mode = 2;
- }
-
-
- if (ieee->set_security)
- ieee->set_security(ieee, &sec);
-
- return ret;
-}
-
-static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 value)
-{
- int ret=0;
- unsigned long flags;
-
- switch (name) {
- case IEEE_PARAM_WPA_ENABLED:
- ret = ieee80211_wpa_enable(ieee, value);
- break;
-
- case IEEE_PARAM_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures=value;
- break;
-
- case IEEE_PARAM_DROP_UNENCRYPTED: {
- /* HACK:
- *
- * wpa_supplicant calls set_wpa_enabled when the driver
- * is loaded and unloaded, regardless of if WPA is being
- * used. No other calls are made which can be used to
- * determine if encryption will be used or not prior to
- * association being expected. If encryption is not being
- * used, drop_unencrypted is set to false, else true -- we
- * can use this to determine if the CAP_PRIVACY_ON bit should
- * be set.
- */
- struct ieee80211_security sec = {
- .flags = SEC_ENABLED,
- .enabled = value,
- };
- ieee->drop_unencrypted = value;
- /* We only change SEC_LEVEL for open mode. Others
- * are set by ipw_wpa_set_encryption.
- */
- if (!value) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_0;
- }
- else {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- }
- if (ieee->set_security)
- ieee->set_security(ieee, &sec);
- break;
- }
-
- case IEEE_PARAM_PRIVACY_INVOKED:
- ieee->privacy_invoked=value;
- break;
-
- case IEEE_PARAM_AUTH_ALGS:
- ret = ieee80211_wpa_set_auth_algs(ieee, value);
- break;
-
- case IEEE_PARAM_IEEE_802_1X:
- ieee->ieee802_1x=value;
- break;
- case IEEE_PARAM_WPAX_SELECT:
- // added for WPA2 mixed mode
- spin_lock_irqsave(&ieee->wpax_suitlist_lock,flags);
- ieee->wpax_type_set = 1;
- ieee->wpax_type_notify = value;
- spin_unlock_irqrestore(&ieee->wpax_suitlist_lock,flags);
- break;
-
- default:
- printk("Unknown WPA param: %d\n",name);
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-/* implementation borrowed from hostap driver */
-
-static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
- struct ieee_param *param, int param_len)
-{
- int ret = 0;
-
- struct ieee80211_crypto_ops *ops;
- struct ieee80211_crypt_data **crypt;
-
- struct ieee80211_security sec = {
- .flags = 0,
- };
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len !=
- (int) ((char *) param->u.crypt.key - (char *) param) +
- param->u.crypt.key_len) {
- printk("Len mismatch %d, %d\n", param_len,
- param->u.crypt.key_len);
- return -EINVAL;
- }
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS)
- return -EINVAL;
- crypt = &ieee->crypt[param->u.crypt.idx];
- } else {
- return -EINVAL;
- }
-
- if (strcmp(param->u.crypt.alg, "none") == 0) {
- if (crypt) {
- sec.enabled = 0;
- // FIXME FIXME
- //sec.encrypt = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_ENABLED | SEC_LEVEL;
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- }
- goto done;
- }
- sec.enabled = 1;
-// FIXME FIXME
-// sec.encrypt = 1;
- sec.flags |= SEC_ENABLED;
-
- /* IPW HW cannot build TKIP MIC, host decryption still needed. */
- if (!(ieee->host_encrypt || ieee->host_decrypt) &&
- strcmp(param->u.crypt.alg, "TKIP"))
- goto skip_host_crypt;
-
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0)
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- /* set WEP40 first, it will be modified according to WEP104 or
- * WEP40 at other place */
- else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0)
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0)
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL) {
- printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
- param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
- ret = -EINVAL;
- goto done;
- }
-
- if (*crypt == NULL || (*crypt)->ops != ops) {
- struct ieee80211_crypt_data *new_crypt;
-
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
- if (new_crypt == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
- new_crypt->ops = ops;
- if (new_crypt->ops)
- new_crypt->priv =
- new_crypt->ops->init(param->u.crypt.idx);
-
- if (new_crypt->priv == NULL) {
- kfree(new_crypt);
- param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- *crypt = new_crypt;
- }
-
- if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(param->u.crypt.key,
- param->u.crypt.key_len, param->u.crypt.seq,
- (*crypt)->priv) < 0) {
- printk("key setting failed\n");
- param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- skip_host_crypt:
- if (param->u.crypt.set_tx) {
- ieee->tx_keyidx = param->u.crypt.idx;
- sec.active_key = param->u.crypt.idx;
- sec.flags |= SEC_ACTIVE_KEY;
- } else
- sec.flags &= ~SEC_ACTIVE_KEY;
-
- if (param->u.crypt.alg != NULL) {
- memcpy(sec.keys[param->u.crypt.idx],
- param->u.crypt.key,
- param->u.crypt.key_len);
- sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
- sec.flags |= (1 << param->u.crypt.idx);
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_2;
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_3;
- }
- }
- done:
- if (ieee->set_security)
- ieee->set_security(ieee, &sec);
-
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port &&
- ieee->reset_port(ieee)) {
- printk("reset_port failed\n");
- param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED;
- return -EINVAL;
- }
-
- return ret;
-}
-
-inline struct sk_buff *ieee80211_disassociate_skb(
- struct ieee80211_network *beacon,
- struct ieee80211_device *ieee,
- u8 asRsn)
-{
- struct sk_buff *skb;
- struct ieee80211_disassoc *disass;
-
- skb = dev_alloc_skb(sizeof(struct ieee80211_disassoc));
- if (!skb)
- return NULL;
-
- disass = (struct ieee80211_disassoc *) skb_put(skb,sizeof(struct ieee80211_disassoc));
- disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
- disass->header.duration_id = 0;
-
- memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
- memcpy(disass->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(disass->header.addr3, beacon->bssid, ETH_ALEN);
-
- disass->reason = asRsn;
- return skb;
-}
-
-
-void
-SendDisassociation(
- struct ieee80211_device *ieee,
- u8* asSta,
- u8 asRsn
-)
-{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
- skb = ieee80211_disassociate_skb(beacon,ieee,asRsn);
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
-}
-
-int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p)
-{
- struct ieee_param *param;
- int ret=0;
-
- down(&ieee->wx_sem);
- //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
-
- if (p->length < sizeof(struct ieee_param) || !p->pointer){
- ret = -EINVAL;
- goto out;
- }
-
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL){
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- ret = -EFAULT;
- goto out;
- }
-
- switch (param->cmd) {
-
- case IEEE_CMD_SET_WPA_PARAM:
- ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name,
- param->u.wpa_param.value);
- break;
-
- case IEEE_CMD_SET_WPA_IE:
- ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length);
- break;
-
- case IEEE_CMD_SET_ENCRYPTION:
- ret = ieee80211_wpa_set_encryption(ieee, param, p->length);
- break;
-
- case IEEE_CMD_MLME:
- ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command,
- param->u.mlme.reason_code);
- break;
-
- default:
- printk("Unknown WPA supplicant request: %d\n",param->cmd);
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-out:
- up(&ieee->wx_sem);
-
- return ret;
-}
-
-void notify_wx_assoc_event(struct ieee80211_device *ieee)
-{
- union iwreq_data wrqu;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (ieee->state == IEEE80211_LINKED)
- memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN);
- else
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
- wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
-}
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
deleted file mode 100644
index d8a068e32e5..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
+++ /dev/null
@@ -1,600 +0,0 @@
-/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
- *
- * Mostly extracted from the rtl8180-sa2400 driver for the
- * in-kernel generic ieee802.11 stack.
- *
- * Some pieces of code might be stolen from ipw2100 driver
- * copyright of who own it's copyright ;-)
- *
- * PS wx handler mostly stolen from hostap, copyright who
- * own it's copyright ;-)
- *
- * released under the GPL
- */
-
-
-#include "ieee80211.h"
-#ifdef ENABLE_DOT11D
-#include "dot11d.h"
-#endif
-/* FIXME: add A freqs */
-
-const long ieee80211_wlan_frequencies[] = {
- 2412, 2417, 2422, 2427,
- 2432, 2437, 2442, 2447,
- 2452, 2457, 2462, 2467,
- 2472, 2484
-};
-
-
-int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct iw_freq *fwrq = & wrqu->freq;
-
- down(&ieee->wx_sem);
-
- if(ieee->iw_mode == IW_MODE_INFRA){
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- /* if setting by freq convert to channel */
- if (fwrq->e == 1) {
- if ((fwrq->m >= (int) 2.412e8 &&
- fwrq->m <= (int) 2.487e8)) {
- int f = fwrq->m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != ieee80211_wlan_frequencies[c]))
- c++;
-
- /* hack to fall through */
- fwrq->e = 0;
- fwrq->m = c + 1;
- }
- }
-
- if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1 ){
- ret = -EOPNOTSUPP;
- goto out;
-
- }else { /* Set the channel */
-
-#ifdef ENABLE_DOT11D
- if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) {
- ret = -EINVAL;
- goto out;
- }
-#endif
- ieee->current_network.channel = fwrq->m;
- ieee->set_chan(ieee, ieee->current_network.channel);
-
- if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
- if(ieee->state == IEEE80211_LINKED){
-
- ieee80211_stop_send_beacons(ieee);
- ieee80211_start_send_beacons(ieee);
- }
- }
-
- ret = 0;
-out:
- up(&ieee->wx_sem);
- return ret;
-}
-
-
-int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct iw_freq *fwrq = & wrqu->freq;
-
- if (ieee->current_network.channel == 0)
- return -1;
- //NM 0.7.0 will not accept channel any more.
- fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel-1] * 100000;
- fwrq->e = 1;
-
- return 0;
-}
-
-int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- unsigned long flags;
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
-
- if (ieee->iw_mode == IW_MODE_MONITOR)
- return -1;
-
- /* We want avoid to give to the user inconsistent infos*/
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->state != IEEE80211_LINKED &&
- ieee->state != IEEE80211_LINKED_SCANNING &&
- ieee->wap_set == 0)
-
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
- else
- memcpy(wrqu->ap_addr.sa_data,
- ieee->current_network.bssid, ETH_ALEN);
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- return 0;
-}
-
-
-int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
-{
-
- int ret = 0;
- u8 zero[] = {0,0,0,0,0,0};
- unsigned long flags;
-
- short ifup = ieee->proto_started;//dev->flags & IFF_UP;
- struct sockaddr *temp = (struct sockaddr *)awrq;
-
- ieee->sync_scan_hurryup = 1;
-
- down(&ieee->wx_sem);
- /* use ifconfig hw ether */
- if (ieee->iw_mode == IW_MODE_MASTER){
- ret = -1;
- goto out;
- }
-
- if (temp->sa_family != ARPHRD_ETHER){
- ret = -EINVAL;
- goto out;
- }
-
- if (ifup)
- ieee80211_stop_protocol(ieee,true);
-
- /* just to avoid to give inconsistent infos in the
- * get wx method. not really needed otherwise
- */
- spin_lock_irqsave(&ieee->lock, flags);
-
- memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN);
- ieee->wap_set = memcmp(temp->sa_data, zero,ETH_ALEN)!=0;
-
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- if (ifup)
- ieee80211_start_protocol(ieee);
-out:
- up(&ieee->wx_sem);
- return ret;
-}
-
- int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b)
-{
- int len,ret = 0;
- unsigned long flags;
-
- if (ieee->iw_mode == IW_MODE_MONITOR)
- return -1;
-
- /* We want avoid to give to the user inconsistent infos*/
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (ieee->current_network.ssid[0] == '\0' ||
- ieee->current_network.ssid_len == 0){
- ret = -1;
- goto out;
- }
-
- if (ieee->state != IEEE80211_LINKED &&
- ieee->state != IEEE80211_LINKED_SCANNING &&
- ieee->ssid_set == 0){
- ret = -1;
- goto out;
- }
- len = ieee->current_network.ssid_len;
- wrqu->essid.length = len;
- strncpy(b,ieee->current_network.ssid,len);
- wrqu->essid.flags = 1;
-
-out:
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- return ret;
-
-}
-
-int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-
- u32 target_rate = wrqu->bitrate.value;
-
- ieee->rate = target_rate/100000;
- //FIXME: we might want to limit rate also in management protocols.
- return 0;
-}
-
-
-
-int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u32 tmp_rate;
- tmp_rate = TxCountToDataRate(ieee, ieee->softmac_stats.CurrentShowTxate);
-
- wrqu->bitrate.value = tmp_rate * 500000;
-
- return 0;
-}
-
-
-int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- if (wrqu->rts.disabled || !wrqu->rts.fixed)
- ieee->rts = DEFAULT_RTS_THRESHOLD;
- else
- {
- if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
- wrqu->rts.value > MAX_RTS_THRESHOLD)
- return -EINVAL;
- ieee->rts = wrqu->rts.value;
- }
- return 0;
-}
-
-int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wrqu->rts.value = ieee->rts;
- wrqu->rts.fixed = 0; /* no auto select */
- wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
- return 0;
-}
-int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
-
- ieee->sync_scan_hurryup = 1;
-
- down(&ieee->wx_sem);
-
- if (wrqu->mode == ieee->iw_mode)
- goto out;
-
- if (wrqu->mode == IW_MODE_MONITOR){
-
- ieee->dev->type = ARPHRD_IEEE80211;
- }else{
- ieee->dev->type = ARPHRD_ETHER;
- }
-
- if (!ieee->proto_started){
- ieee->iw_mode = wrqu->mode;
- }else{
- ieee80211_stop_protocol(ieee,true);
- ieee->iw_mode = wrqu->mode;
- ieee80211_start_protocol(ieee);
- }
-
-out:
- up(&ieee->wx_sem);
- return 0;
-}
-
-void ieee80211_wx_sync_scan_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq);
- short chan;
- HT_EXTCHNL_OFFSET chan_offset=0;
- HT_CHANNEL_WIDTH bandwidth=0;
- int b40M = 0;
- static int count = 0;
- chan = ieee->current_network.channel;
-
-#ifdef ENABLE_LPS
- if (ieee->LeisurePSLeave) {
- ieee->LeisurePSLeave(ieee);
- }
-
- /* notify AP to be in PS mode */
- ieee80211_sta_ps_send_null_frame(ieee, 1);
- ieee80211_sta_ps_send_null_frame(ieee, 1);
-#endif
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee);
-
- ieee80211_stop_send_beacons(ieee);
-
- ieee->state = IEEE80211_LINKED_SCANNING;
- ieee->link_change(ieee);
- ieee->InitialGainHandler(ieee, IG_Backup);
- if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) {
- b40M = 1;
- chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset;
- bandwidth = (HT_CHANNEL_WIDTH)ieee->pHTInfo->bCurBW40MHz;
- printk("Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth);
- ieee->SetBWModeHandler(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- }
- ieee80211_start_scan_syncro(ieee);
- if (b40M) {
- printk("Scan in 20M, back to 40M\n");
- if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
- ieee->set_chan(ieee, chan + 2);
- else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER)
- ieee->set_chan(ieee, chan - 2);
- else
- ieee->set_chan(ieee, chan);
- ieee->SetBWModeHandler(ieee, bandwidth, chan_offset);
- } else {
- ieee->set_chan(ieee, chan);
- }
-
- ieee->InitialGainHandler(ieee, IG_Restore);
- ieee->state = IEEE80211_LINKED;
- ieee->link_change(ieee);
-
-#ifdef ENABLE_LPS
- /* Notify AP that I wake up again */
- ieee80211_sta_ps_send_null_frame(ieee, 0);
-#endif
-
- // To prevent the immediately calling watch_dog after scan.
- if(ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 )
- {
- ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
- ieee->LinkDetectInfo.NumRecvDataInPeriod= 1;
- }
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee);
-
- if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
- ieee80211_start_send_beacons(ieee);
-
- count = 0;
- up(&ieee->wx_sem);
-
-}
-
-int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret = 0;
-
- down(&ieee->wx_sem);
-
- if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)){
- ret = -1;
- goto out;
- }
-
- if ( ieee->state == IEEE80211_LINKED){
- queue_work(ieee->wq, &ieee->wx_sync_scan_wq);
- /* intentionally forget to up sem */
- return 0;
- }
-
-out:
- up(&ieee->wx_sem);
- return ret;
-}
-
-int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
-
- int ret=0,len;
- short proto_started;
- unsigned long flags;
-
- ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
-
- proto_started = ieee->proto_started;
-
- if (wrqu->essid.length > IW_ESSID_MAX_SIZE){
- ret= -E2BIG;
- goto out;
- }
-
- if (ieee->iw_mode == IW_MODE_MONITOR){
- ret= -1;
- goto out;
- }
-
- if(proto_started){
- ieee80211_stop_protocol(ieee,true);
- }
-
-
- /* this is just to be sure that the GET wx callback
- * has consisten infos. not needed otherwise
- */
- spin_lock_irqsave(&ieee->lock, flags);
-
- if (wrqu->essid.flags && wrqu->essid.length) {
- //first flush current network.ssid
- len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE;
- strncpy(ieee->current_network.ssid, extra, len+1);
- ieee->current_network.ssid_len = len+1;
- ieee->ssid_set = 1;
- }
- else{
- ieee->ssid_set = 0;
- ieee->current_network.ssid[0] = '\0';
- ieee->current_network.ssid_len = 0;
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
-
- if (proto_started)
- ieee80211_start_protocol(ieee);
-out:
- up(&ieee->wx_sem);
- return ret;
-}
-
- int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
-
- wrqu->mode = ieee->iw_mode;
- return 0;
-}
-
- int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-
- int *parms = (int *)extra;
- int enable = (parms[0] > 0);
- short prev = ieee->raw_tx;
-
- down(&ieee->wx_sem);
-
- if(enable)
- ieee->raw_tx = 1;
- else
- ieee->raw_tx = 0;
-
- printk(KERN_INFO"raw TX is %s\n",
- ieee->raw_tx ? "enabled" : "disabled");
-
- if(ieee->iw_mode == IW_MODE_MONITOR)
- {
- if(prev == 0 && ieee->raw_tx){
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee);
-
- netif_carrier_on(ieee->dev);
- }
-
- if(prev && ieee->raw_tx == 1)
- netif_carrier_off(ieee->dev);
- }
-
- up(&ieee->wx_sem);
-
- return 0;
-}
-
-int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- strcpy(wrqu->name, "802.11");
- if(ieee->modulation & IEEE80211_CCK_MODULATION)
- strcat(wrqu->name, "b");
- if(ieee->modulation & IEEE80211_OFDM_MODULATION)
- strcat(wrqu->name, "g");
- if (ieee->mode & (IEEE_N_24G | IEEE_N_5G))
- strcat(wrqu->name, "n");
- return 0;
-}
-
-
-/* this is mostly stolen from hostap */
-int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
-
- if(
- (!ieee->sta_wake_up) ||
- (!ieee->enter_sleep_state) ||
- (!ieee->ps_is_queue_empty)){
-
- return -1;
- }
-
- down(&ieee->wx_sem);
-
- if (wrqu->power.disabled){
- ieee->ps = IEEE80211_PS_DISABLED;
- goto exit;
- }
- if (wrqu->power.flags & IW_POWER_TIMEOUT) {
- ieee->ps_timeout = wrqu->power.value / 1000;
- }
-
- if (wrqu->power.flags & IW_POWER_PERIOD) {
- ieee->ps_period = wrqu->power.value / 1000;
- }
- switch (wrqu->power.flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- ieee->ps = IEEE80211_PS_UNICAST;
- break;
- case IW_POWER_MULTICAST_R:
- ieee->ps = IEEE80211_PS_MBCAST;
- break;
- case IW_POWER_ALL_R:
- ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST;
- break;
-
- case IW_POWER_ON:
- break;
-
- default:
- ret = -EINVAL;
- goto exit;
-
- }
-exit:
- up(&ieee->wx_sem);
- return ret;
-
-}
-
-/* this is stolen from hostap */
-int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret =0;
-
- down(&ieee->wx_sem);
-
- if(ieee->ps == IEEE80211_PS_DISABLED){
- wrqu->power.disabled = 1;
- goto exit;
- }
-
- wrqu->power.disabled = 0;
-
- if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- wrqu->power.flags = IW_POWER_TIMEOUT;
- wrqu->power.value = ieee->ps_timeout * 1000;
- } else {
- wrqu->power.flags = IW_POWER_PERIOD;
- wrqu->power.value = ieee->ps_period * 1000;
- }
-
- if ((ieee->ps & (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) == (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST))
- wrqu->power.flags |= IW_POWER_ALL_R;
- else if (ieee->ps & IEEE80211_PS_MBCAST)
- wrqu->power.flags |= IW_POWER_MULTICAST_R;
- else
- wrqu->power.flags |= IW_POWER_UNICAST_R;
-
-exit:
- up(&ieee->wx_sem);
- return ret;
-
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
deleted file mode 100644
index 424dd48da66..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ /dev/null
@@ -1,955 +0,0 @@
-/******************************************************************************
-
- Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
- Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************
-
- Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
-
- A special thanks goes to Realtek for their support !
-
-******************************************************************************/
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
-#include <linux/if_vlan.h>
-
-#include "ieee80211.h"
-
-
-/*
-
-
-802.11 Data Frame
-
-
-802.11 frame_contorl for data frames - 2 bytes
- ,-----------------------------------------------------------------------------------------.
-bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
- |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
-val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
- |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
-desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
- | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
- '-----------------------------------------------------------------------------------------'
- /\
- |
-802.11 Data Frame |
- ,--------- 'ctrl' expands to >-----------'
- |
- ,--'---,-------------------------------------------------------------.
-Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- |------|------|---------|---------|---------|------|---------|------|
-Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
- | | tion | (BSSID) | | | ence | data | |
- `--------------------------------------------------| |------'
-Total: 28 non-data bytes `----.----'
- |
- .- 'Frame data' expands to <---------------------------'
- |
- V
- ,---------------------------------------------------.
-Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
- |------|------|---------|----------|------|---------|
-Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
- | DSAP | SSAP | | | | Packet |
- | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
- `-----------------------------------------| |
-Total: 8 non-data bytes `----.----'
- |
- .- 'IP Packet' expands, if WEP enabled, to <--'
- |
- V
- ,-----------------------.
-Bytes | 4 | 0-2296 | 4 |
- |-----|-----------|-----|
-Desc. | IV | Encrypted | ICV |
- | | IP Packet | |
- `-----------------------'
-Total: 8 non-data bytes
-
-
-802.3 Ethernet Data Frame
-
- ,-----------------------------------------.
-Bytes | 6 | 6 | 2 | Variable | 4 |
- |-------|-------|------|-----------|------|
-Desc. | Dest. | Source| Type | IP Packet | fcs |
- | MAC | MAC | | | |
- `-----------------------------------------'
-Total: 18 non-data bytes
-
-In the event that fragmentation is required, the incoming payload is split into
-N parts of size ieee->fts. The first fragment contains the SNAP header and the
-remaining packets are just data.
-
-If encryption is enabled, each fragment payload size is reduced by enough space
-to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
-So if you have 1500 bytes of payload with ieee->fts set to 500 without
-encryption it will take 3 frames. With WEP it will take 4 frames as the
-payload of each frame is reduced to 492 bytes.
-
-* SKB visualization
-*
-* ,- skb->data
-* |
-* | ETHERNET HEADER ,-<-- PAYLOAD
-* | | 14 bytes from skb->data
-* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
-* | | | |
-* |,-Dest.--. ,--Src.---. | | |
-* | 6 bytes| | 6 bytes | | | |
-* v | | | | | |
-* 0 | v 1 | v | v 2
-* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
-* ^ | ^ | ^ |
-* | | | | | |
-* | | | | `T' <---- 2 bytes for Type
-* | | | |
-* | | '---SNAP--' <-------- 6 bytes for SNAP
-* | |
-* `-IV--' <-------------------- 4 bytes for IV (WEP)
-*
-* SNAP HEADER
-*
-*/
-
-static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
-static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-
-static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
-{
- struct ieee80211_snap_hdr *snap;
- u8 *oui;
-
- snap = (struct ieee80211_snap_hdr *)data;
- snap->dsap = 0xaa;
- snap->ssap = 0xaa;
- snap->ctrl = 0x03;
-
- if (h_proto == 0x8137 || h_proto == 0x80f3)
- oui = P802_1H_OUI;
- else
- oui = RFC1042_OUI;
- snap->oui[0] = oui[0];
- snap->oui[1] = oui[1];
- snap->oui[2] = oui[2];
-
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
-
- return SNAP_SIZE + sizeof(u16);
-}
-
-int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len)
-{
- struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
- int res;
-
- if (!(crypt && crypt->ops))
- {
- printk("=========>%s(), crypt is null\n", __FUNCTION__);
- return -1;
- }
-#ifdef CONFIG_IEEE80211_CRYPT_TKIP
- struct ieee80211_hdr *header;
-
- if (ieee->tkip_countermeasures &&
- crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
- header = (struct ieee80211_hdr *) frag->data;
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "TX packet to %pM\n",
- ieee->dev->name, header->addr1);
- }
- return -1;
- }
-#endif
- /* To encrypt, frame format is:
- * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
-
- // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
- /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
- * call both MSDU and MPDU encryption functions from here. */
- atomic_inc(&crypt->refcnt);
- res = 0;
- if (crypt->ops->encrypt_msdu)
- res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
- if (res == 0 && crypt->ops->encrypt_mpdu)
- res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
-
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
- ieee->dev->name, frag->len);
- ieee->ieee_stats.tx_discards++;
- return -1;
- }
-
- return 0;
-}
-
-
-void ieee80211_txb_free(struct ieee80211_txb *txb) {
- if (unlikely(!txb))
- return;
- kfree(txb);
-}
-
-struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
- int gfp_mask)
-{
- struct ieee80211_txb *txb;
- int i;
- txb = kmalloc(
- sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
- gfp_mask);
- if (!txb)
- return NULL;
-
- memset(txb, 0, sizeof(struct ieee80211_txb));
- txb->nr_frags = nr_frags;
- txb->frag_size = txb_size;
-
- for (i = 0; i < nr_frags; i++) {
- txb->fragments[i] = dev_alloc_skb(txb_size);
- if (unlikely(!txb->fragments[i])) {
- i--;
- break;
- }
- memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
- }
- if (unlikely(i != nr_frags)) {
- while (i >= 0)
- dev_kfree_skb_any(txb->fragments[i--]);
- kfree(txb);
- return NULL;
- }
- return txb;
-}
-
-// Classify the to-be send data packet
-// Need to acquire the sent queue index.
-static int
-ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
-{
- struct ethhdr *eth;
- struct iphdr *ip;
- eth = (struct ethhdr *)skb->data;
- if (eth->h_proto != htons(ETH_P_IP))
- return 0;
-
- ip = ip_hdr(skb);
- switch (ip->tos & 0xfc) {
- case 0x20:
- return 2;
- case 0x40:
- return 1;
- case 0x60:
- return 3;
- case 0x80:
- return 4;
- case 0xa0:
- return 5;
- case 0xc0:
- return 6;
- case 0xe0:
- return 7;
- default:
- return 0;
- }
-}
-
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
-void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- PTX_TS_RECORD pTxTs = NULL;
- struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data;
-
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
- return;
- if (!IsQoSDataFrame(skb->data))
- return;
-
- if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1))
- return;
- //check packet and mode later
-#ifdef TO_DO_LIST
- if(pTcb->PacketLength >= 4096)
- return;
- // For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation.
- if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
- return;
-#endif
-
- if(tcb_desc->bdhcp)// || ieee->CntAfterLink<2)
- {
- return;
- }
-
-
-#if 1
- if (!ieee->GetNmodeSupportBySecCfg(ieee))
- {
- return;
- }
-#endif
- if(pHTInfo->bCurrentAMPDUEnable)
- {
- if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
- {
- printk("===>can't get TS\n");
- return;
- }
- if (pTxTs->TxAdmittedBARecord.bValid == false)
- {
- //as some AP will refuse our action frame until key handshake has been finished. WB
- if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA))
- ;
- else
- TsStartAddBaProcess(ieee, pTxTs);
- goto FORCED_AGG_SETTING;
- }
- else if (pTxTs->bUsingBa == false)
- {
- if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
- pTxTs->bUsingBa = true;
- else
- goto FORCED_AGG_SETTING;
- }
-
- if (ieee->iw_mode == IW_MODE_INFRA)
- {
- tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
- tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
- }
- }
-FORCED_AGG_SETTING:
- switch(pHTInfo->ForcedAMPDUMode )
- {
- case HT_AGG_AUTO:
- break;
-
- case HT_AGG_FORCE_ENABLE:
- tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
- tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
- break;
-
- case HT_AGG_FORCE_DISABLE:
- tcb_desc->bAMPDUEnable = false;
- tcb_desc->ampdu_density = 0;
- tcb_desc->ampdu_factor = 0;
- break;
-
- }
- return;
-}
-
-extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
-{
- tcb_desc->bUseShortPreamble = false;
- if (tcb_desc->data_rate == 2)
- {//// 1M can only use Long Preamble. 11B spec
- return;
- }
- else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- {
- tcb_desc->bUseShortPreamble = true;
- }
- return;
-}
-extern void
-ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- tcb_desc->bUseShortGI = false;
-
- if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
- return;
-
- if(pHTInfo->bForcedShortGI)
- {
- tcb_desc->bUseShortGI = true;
- return;
- }
-
- if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
- tcb_desc->bUseShortGI = true;
- else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
- tcb_desc->bUseShortGI = true;
-}
-
-void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- tcb_desc->bPacketBW = false;
-
- if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
- return;
-
- if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
- return;
-
- if((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel.
- return;
- //BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance
- if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
- tcb_desc->bPacketBW = true;
- return;
-}
-
-void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb)
-{
- // Common Settings
- tcb_desc->bRTSSTBC = false;
- tcb_desc->bRTSUseShortGI = false; // Since protection frames are always sent by legacy rate, ShortGI will never be used.
- tcb_desc->bCTSEnable = false; // Most of protection using RTS/CTS
- tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate.
- tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz
-
- if(tcb_desc->bBroadcast || tcb_desc->bMulticast)//only unicast frame will use rts/cts
- return;
-
- if (is_broadcast_ether_addr(skb->data+16)) //check addr3 as infrastructure add3 is DA.
- return;
-
- if (ieee->mode < IEEE_N_24G) //b, g mode
- {
- // (1) RTS_Threshold is compared to the MPDU, not MSDU.
- // (2) If there are more than one frag in this MSDU, only the first frag uses protection frame.
- // Other fragments are protected by previous fragment.
- // So we only need to check the length of first fragment.
- if (skb->len > ieee->rts)
- {
- tcb_desc->bRTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- }
- else if (ieee->current_network.buseprotection)
- {
- // Use CTS-to-SELF in protection mode.
- tcb_desc->bRTSEnable = true;
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- }
- //otherwise return;
- return;
- }
- else
- {// 11n High throughput case.
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- while (true)
- {
- //check ERP protection
- if (ieee->current_network.buseprotection)
- {// CTS-to-SELF
- tcb_desc->bRTSEnable = true;
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- break;
- }
- //check HT op mode
- if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
- {
- u8 HTOpMode = pHTInfo->CurrentOpMode;
- if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
- (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
- {
- tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
- tcb_desc->bRTSEnable = true;
- break;
- }
- }
- //check rts
- if (skb->len > ieee->rts)
- {
- tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
- tcb_desc->bRTSEnable = true;
- break;
- }
- //to do list: check MIMO power save condition.
- //check AMPDU aggregation for TXOP
- if(tcb_desc->bAMPDUEnable)
- {
- tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
- // According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads
- // throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily
- tcb_desc->bRTSEnable = false;
- break;
- }
- //check IOT action
- if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
- {
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- tcb_desc->bRTSEnable = true;
- break;
- }
- // Totally no protection case!!
- goto NO_PROTECTION;
- }
- }
- // For test , CTS replace with RTS
- if( 0 )
- {
- tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
- tcb_desc->bRTSEnable = true;
- }
- if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- tcb_desc->bUseShortPreamble = true;
- if (ieee->mode == IW_MODE_MASTER)
- goto NO_PROTECTION;
- return;
-NO_PROTECTION:
- tcb_desc->bRTSEnable = false;
- tcb_desc->bCTSEnable = false;
- tcb_desc->rts_rate = 0;
- tcb_desc->RTSSC = 0;
- tcb_desc->bRTSBW = false;
-}
-
-
-void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
-{
-#ifdef TO_DO_LIST
- if(!IsDataFrame(pFrame))
- {
- pTcb->bTxDisableRateFallBack = TRUE;
- pTcb->bTxUseDriverAssingedRate = TRUE;
- pTcb->RATRIndex = 7;
- return;
- }
-
- if(pMgntInfo->ForcedDataRate!= 0)
- {
- pTcb->bTxDisableRateFallBack = TRUE;
- pTcb->bTxUseDriverAssingedRate = TRUE;
- return;
- }
-#endif
- if(ieee->bTxDisableRateFallBack)
- tcb_desc->bTxDisableRateFallBack = true;
-
- if(ieee->bTxUseDriverAssingedRate)
- tcb_desc->bTxUseDriverAssingedRate = true;
- if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
- {
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
- tcb_desc->RATRIndex = 0;
- }
-}
-
-void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst)
-{
- if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
- return;
- if (IsQoSDataFrame(skb->data)) //we deal qos data only
- {
- PTX_TS_RECORD pTS = NULL;
- if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true))
- {
- return;
- }
- pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
- }
-}
-
-int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ieee80211_device *ieee = netdev_priv(dev);
- struct ieee80211_txb *txb = NULL;
- struct ieee80211_hdr_3addrqos *frag_hdr;
- int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
- unsigned long flags;
- struct net_device_stats *stats = &ieee->stats;
- int ether_type = 0, encrypt;
- int bytes, fc, qos_ctl = 0, hdr_len;
- struct sk_buff *skb_frag;
- struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */
- .duration_id = 0,
- .seq_ctl = 0,
- .qos_ctl = 0
- };
- u8 dest[ETH_ALEN], src[ETH_ALEN];
- int qos_actived = ieee->current_network.qos_data.active;
-
- struct ieee80211_crypt_data* crypt;
- bool bdhcp =false;
-
- cb_desc *tcb_desc;
-
- spin_lock_irqsave(&ieee->lock, flags);
-
- /* If there is no driver handler to take the TXB, dont' bother
- * creating it... */
- if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
- ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
- printk(KERN_WARNING "%s: No xmit handler.\n",
- ieee->dev->name);
- goto success;
- }
-
-
- if(likely(ieee->raw_tx == 0)){
- if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
- printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, skb->len);
- goto success;
- }
-
- memset(skb->cb, 0, sizeof(skb->cb));
- ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
-
- crypt = ieee->crypt[ieee->tx_keyidx];
-
- encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
- ieee->host_encrypt && crypt && crypt->ops;
-
- if (!encrypt && ieee->ieee802_1x &&
- ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
- stats->tx_dropped++;
- goto success;
- }
- #ifdef CONFIG_IEEE80211_DEBUG
- if (crypt && !encrypt && ether_type == ETH_P_PAE) {
- struct eapol *eap = (struct eapol *)(skb->data +
- sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
- IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
- eap_get_type(eap->type));
- }
- #endif
-
- // The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time
- // to prevent DHCP protocol fail
- if (skb->len > 282){//MINIMUM_DHCP_PACKET_SIZE) {
- if (ETH_P_IP == ether_type) {// IP header
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
- if (IPPROTO_UDP == ip->protocol) {//FIXME windows is 11 but here UDP in linux kernel is 17.
- struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
- if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) ||
- ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) {
- // 68 : UDP BOOTP client
- // 67 : UDP BOOTP server
- printk("DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]);
-
- bdhcp = true;
-#ifdef _RTL8192_EXT_PATCH_
- ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2; //AMY,090701
-#else
- ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2;
-#endif
- }
- }
- }else if(ETH_P_ARP == ether_type){// IP ARP packet
- printk("=================>DHCP Protocol start tx ARP pkt!!\n");
- bdhcp = true;
- ieee->LPSDelayCnt = ieee->current_network.tim.tim_count;
-
- }
- }
-
- /* Save source and destination addresses */
- memcpy(&dest, skb->data, ETH_ALEN);
- memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
-
- /* Advance the SKB to the start of the payload */
- skb_pull(skb, sizeof(struct ethhdr));
-
- /* Determine total amount of storage required for TXB packets */
- bytes = skb->len + SNAP_SIZE + sizeof(u16);
-
- if (encrypt)
- fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
- else
-
- fc = IEEE80211_FTYPE_DATA;
-
- if(qos_actived)
- fc |= IEEE80211_STYPE_QOS_DATA;
- else
- fc |= IEEE80211_STYPE_DATA;
-
- if (ieee->iw_mode == IW_MODE_INFRA) {
- fc |= IEEE80211_FCTL_TODS;
- /* To DS: Addr1 = BSSID, Addr2 = SA,
- Addr3 = DA */
- memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
- memcpy(&header.addr2, &src, ETH_ALEN);
- memcpy(&header.addr3, &dest, ETH_ALEN);
- } else if (ieee->iw_mode == IW_MODE_ADHOC) {
- /* not From/To DS: Addr1 = DA, Addr2 = SA,
- Addr3 = BSSID */
- memcpy(&header.addr1, dest, ETH_ALEN);
- memcpy(&header.addr2, src, ETH_ALEN);
- memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
- }
-
- header.frame_ctl = cpu_to_le16(fc);
-
- /* Determine fragmentation size based on destination (multicast
- * and broadcast are not fragmented) */
- if (is_multicast_ether_addr(header.addr1) ||
- is_broadcast_ether_addr(header.addr1)) {
- frag_size = MAX_FRAG_THRESHOLD;
- qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
- }
- else {
- frag_size = ieee->fts;//default:392
- qos_ctl = 0;
- }
-
- if(qos_actived)
- {
- hdr_len = IEEE80211_3ADDR_LEN + 2;
-
- skb->priority = ieee80211_classify(skb, &ieee->current_network);
- qos_ctl |= skb->priority; //set in the ieee80211_classify
- header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
- } else {
- hdr_len = IEEE80211_3ADDR_LEN;
- }
- /* Determine amount of payload per fragment. Regardless of if
- * this stack is providing the full 802.11 header, one will
- * eventually be affixed to this fragment -- so we must account for
- * it when determining the amount of payload space. */
- bytes_per_frag = frag_size - hdr_len;
- if (ieee->config &
- (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
- bytes_per_frag -= IEEE80211_FCS_LEN;
-
- /* Each fragment may need to have room for encryption pre/postfix */
- if (encrypt)
- bytes_per_frag -= crypt->ops->extra_prefix_len +
- crypt->ops->extra_postfix_len;
-
- /* Number of fragments is the total bytes_per_frag /
- * payload_per_fragment */
- nr_frags = bytes / bytes_per_frag;
- bytes_last_frag = bytes % bytes_per_frag;
- if (bytes_last_frag)
- nr_frags++;
- else
- bytes_last_frag = bytes_per_frag;
-
- /* When we allocate the TXB we allocate enough space for the reserve
- * and full fragment bytes (bytes_per_frag doesn't include prefix,
- * postfix, header, FCS, etc.) */
- txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
- if (unlikely(!txb)) {
- printk(KERN_WARNING "%s: Could not allocate TXB\n",
- ieee->dev->name);
- goto failed;
- }
- txb->encrypted = encrypt;
- txb->payload_size = bytes;
-
- if(qos_actived)
- {
- txb->queue_index = UP2AC(skb->priority);
- } else {
- txb->queue_index = WME_AC_BK;
- }
-
-
-
- for (i = 0; i < nr_frags; i++) {
- skb_frag = txb->fragments[i];
- tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
- if(qos_actived){
- skb_frag->priority = skb->priority;//UP2AC(skb->priority);
- tcb_desc->queue_index = UP2AC(skb->priority);
- } else {
- skb_frag->priority = WME_AC_BK;
- tcb_desc->queue_index = WME_AC_BK;
- }
- skb_reserve(skb_frag, ieee->tx_headroom);
-
- if (encrypt){
- if (ieee->hwsec_active)
- tcb_desc->bHwSec = 1;
- else
- tcb_desc->bHwSec = 0;
- skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
- }
- else
- {
- tcb_desc->bHwSec = 0;
- }
- frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
- memcpy(frag_hdr, &header, hdr_len);
-
- /* If this is not the last fragment, then add the MOREFRAGS
- * bit to the frame control */
- if (i != nr_frags - 1) {
- frag_hdr->frame_ctl = cpu_to_le16(
- fc | IEEE80211_FCTL_MOREFRAGS);
- bytes = bytes_per_frag;
-
- } else {
- /* The last fragment takes the remaining length */
- bytes = bytes_last_frag;
- }
-
- if(qos_actived)
- {
- // add 1 only indicate to corresponding seq number control 2006/7/12
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
- } else {
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
- }
-
- /* Put a SNAP header on the first fragment */
- if (i == 0) {
- ieee80211_put_snap(
- skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
- ether_type);
- bytes -= SNAP_SIZE + sizeof(u16);
- }
-
- memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
-
- /* Advance the SKB... */
- skb_pull(skb, bytes);
-
- /* Encryption routine will move the header forward in order
- * to insert the IV between the header and the payload */
- if (encrypt)
- ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
- if (ieee->config &
- (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
- skb_put(skb_frag, 4);
- }
-
- if(qos_actived)
- {
- if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
- ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
- else
- ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
- } else {
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
- }
- }else{
- if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
- printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, skb->len);
- goto success;
- }
-
- txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
- if(!txb){
- printk(KERN_WARNING "%s: Could not allocate TXB\n",
- ieee->dev->name);
- goto failed;
- }
-
- txb->encrypted = 0;
- txb->payload_size = skb->len;
- memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
- }
-
- success:
-//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
- if (txb)
- {
- cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->bTxEnableFwCalcDur = 1;
- if (is_multicast_ether_addr(header.addr1))
- tcb_desc->bMulticast = 1;
- if (is_broadcast_ether_addr(header.addr1))
- tcb_desc->bBroadcast = 1;
- ieee80211_txrate_selectmode(ieee, tcb_desc);
- if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
- tcb_desc->data_rate = ieee->basic_rate;
- else
- tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
-
- if(bdhcp == true){
- tcb_desc->data_rate = MGN_1M;
- tcb_desc->bTxDisableRateFallBack = 1;
-
- tcb_desc->RATRIndex = 7;
- tcb_desc->bTxUseDriverAssingedRate = 1;
- tcb_desc->bdhcp = 1;
- }
-
-
- ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
- ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
- ieee80211_query_HTCapShortGI(ieee, tcb_desc);
- ieee80211_query_BandwidthMode(ieee, tcb_desc);
- ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
- ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
- }
- spin_unlock_irqrestore(&ieee->lock, flags);
- dev_kfree_skb_any(skb);
- if (txb) {
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
- ieee80211_softmac_xmit(txb, ieee);
- }else{
- if ((*ieee->hard_start_xmit)(txb, ieee) == 0) {
- stats->tx_packets++;
- stats->tx_bytes += txb->payload_size;
- return 0;
- }
- ieee80211_txb_free(txb);
- }
- }
-
- return 0;
-
- failed:
- spin_unlock_irqrestore(&ieee->lock, flags);
- netif_stop_queue(dev);
- stats->tx_errors++;
- return 1;
-
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
deleted file mode 100644
index 6530d9b6829..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
+++ /dev/null
@@ -1,872 +0,0 @@
-/******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
- Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
-#include <linux/wireless.h>
-#include <linux/version.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include "ieee80211.h"
-
-struct modes_unit {
- char *mode_string;
- int mode_size;
-};
-struct modes_unit ieee80211_modes[] = {
- {"a",1},
- {"b",1},
- {"g",1},
- {"?",1},
- {"N-24G",5},
- {"N-5G",4},
-};
-
-#define iwe_stream_add_event_rsl iwe_stream_add_event
-
-#define MAX_CUSTOM_LEN 64
-static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
- char *start, char *stop,
- struct ieee80211_network *network,
- struct iw_request_info *info)
-{
- char custom[MAX_CUSTOM_LEN];
- char proto_name[IFNAMSIZ];
- char *pname = proto_name;
- char *p;
- struct iw_event iwe;
- int i, j;
- u16 max_rate, rate;
- static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
-
- /* First entry *MUST* be the AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN);
- /* Remaining entries will be displayed in the order we provide them */
-
- /* Add the ESSID */
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
-// if (network->flags & NETWORK_EMPTY_ESSID) {
- if (network->ssid_len == 0) {
- iwe.u.data.length = sizeof("<hidden>");
- start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
- } else {
- iwe.u.data.length = min(network->ssid_len, (u8)32);
- start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
- }
- /* Add the protocol name */
- iwe.cmd = SIOCGIWNAME;
- for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
- if(network->mode&(1<<i)) {
- sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
- pname +=ieee80211_modes[i].mode_size;
- }
- }
- *pname = '\0';
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN);
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- if (network->capability &
- (WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
- if (network->capability & WLAN_CAPABILITY_BSS)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN);
- }
-
- /* Add frequency/channel */
- iwe.cmd = SIOCGIWFREQ;
-/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
- iwe.u.freq.e = 3; */
- iwe.u.freq.m = network->channel;
- iwe.u.freq.e = 0;
- iwe.u.freq.i = 0;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN);
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if (network->capability & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
- /* Add basic and extended rates */
- max_rate = 0;
- p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
- for (i = 0, j = 0; i < network->rates_len; ) {
- if (j < network->rates_ex_len &&
- ((network->rates_ex[j] & 0x7F) <
- (network->rates[i] & 0x7F)))
- rate = network->rates_ex[j++] & 0x7F;
- else
- rate = network->rates[i++] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
- }
- for (; j < network->rates_ex_len; j++) {
- rate = network->rates_ex[j] & 0x7F;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
- if (rate > max_rate)
- max_rate = rate;
- }
-
- if (network->mode >= IEEE_N_24G)//add N rate here;
- {
- PHT_CAPABILITY_ELE ht_cap = NULL;
- bool is40M = false, isShortGI = false;
- u8 max_mcs = 0;
- if (!memcmp(network->bssht.bdHTCapBuf, EWC11NHTCap, 4))
- ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[4];
- else
- ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[0];
- is40M = (ht_cap->ChlWidth)?1:0;
- isShortGI = (ht_cap->ChlWidth)?
- ((ht_cap->ShortGI40Mhz)?1:0):
- ((ht_cap->ShortGI20Mhz)?1:0);
-
- max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL);
- rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs&0x7f];
- if (rate > max_rate)
- max_rate = rate;
- }
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- iwe.u.bitrate.value = max_rate * 500000;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
- IW_EV_PARAM_LEN);
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
- /* Add quality statistics */
- /* TODO: Fix these values... */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = network->stats.signal;
- iwe.u.qual.level = network->stats.rssi;
- iwe.u.qual.noise = network->stats.noise;
- iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
- if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
- iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
- if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
- iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
- if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
- iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
- iwe.u.qual.updated = 7;
- start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN);
- iwe.cmd = IWEVCUSTOM;
- p = custom;
-
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-#if (WIRELESS_EXT < 18)
- if (ieee->wpa_enabled && network->wpa_ie_len){
- char buf[MAX_WPA_IE_LEN * 2 + 30];
- u8 *p = buf;
- p += sprintf(p, "wpa_ie=");
- for (i = 0; i < network->wpa_ie_len; i++) {
- p += sprintf(p, "%02x", network->wpa_ie[i]);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-
- if (ieee->wpa_enabled && network->rsn_ie_len){
- char buf[MAX_WPA_IE_LEN * 2 + 30];
-
- u8 *p = buf;
- p += sprintf(p, "rsn_ie=");
- for (i = 0; i < network->rsn_ie_len; i++) {
- p += sprintf(p, "%02x", network->rsn_ie[i]);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-#else
- memset(&iwe, 0, sizeof(iwe));
- if (network->wpa_ie_len)
- {
- char buf[MAX_WPA_IE_LEN];
- memcpy(buf, network->wpa_ie, network->wpa_ie_len);
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = network->wpa_ie_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
- memset(&iwe, 0, sizeof(iwe));
- if (network->rsn_ie_len)
- {
- char buf[MAX_WPA_IE_LEN];
- memcpy(buf, network->rsn_ie, network->rsn_ie_len);
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = network->rsn_ie_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-#endif
-
-
- /* Add EXTRA: Age to display seconds since last beacon/probe response
- * for given network. */
- iwe.cmd = IWEVCUSTOM;
- p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-
- return start;
-}
-
-int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct ieee80211_network *network;
- unsigned long flags;
-
- char *ev = extra;
-// char *stop = ev + IW_SCAN_MAX_DATA;
- char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
- //char *stop = ev + IW_SCAN_MAX_DATA;
- int i = 0;
- int err = 0;
- IEEE80211_DEBUG_WX("Getting scan\n");
- down(&ieee->wx_sem);
- spin_lock_irqsave(&ieee->lock, flags);
-
- list_for_each_entry(network, &ieee->network_list, list) {
- i++;
- if((stop-ev)<200)
- {
- err = -E2BIG;
- break;
- }
- if (ieee->scan_age == 0 ||
- time_after(network->last_scanned + ieee->scan_age, jiffies))
- ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
- else
- IEEE80211_DEBUG_SCAN(
- "Not showing network '%s ("
- "%pM)' due to age (%lums).\n",
- escape_essid(network->ssid,
- network->ssid_len),
- network->bssid,
- (jiffies - network->last_scanned) / (HZ / 100));
- }
-
- spin_unlock_irqrestore(&ieee->lock, flags);
- up(&ieee->wx_sem);
- wrqu->data.length = ev - extra;
- wrqu->data.flags = 0;
-
- IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
-
- return err;
-}
-
-int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- struct iw_point *erq = &(wrqu->encoding);
- struct net_device *dev = ieee->dev;
- struct ieee80211_security sec = {
- .flags = 0
- };
- int i, key, key_provided, len;
- struct ieee80211_crypt_data **crypt;
-
- IEEE80211_DEBUG_WX("SET_ENCODE\n");
-
- key = erq->flags & IW_ENCODE_INDEX;
- if (key) {
- if (key > WEP_KEYS)
- return -EINVAL;
- key--;
- key_provided = 1;
- } else {
- key_provided = 0;
- key = ieee->tx_keyidx;
- }
-
- IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
- "provided" : "default");
- crypt = &ieee->crypt[key];
-
- if (erq->flags & IW_ENCODE_DISABLED) {
- if (key_provided && *crypt) {
- IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
- key);
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- } else
- IEEE80211_DEBUG_WX("Disabling encryption.\n");
-
- /* Check all the keys to see if any are still configured,
- * and if no key index was provided, de-init them all */
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL) {
- if (key_provided)
- break;
- ieee80211_crypt_delayed_deinit(
- ieee, &ieee->crypt[i]);
- }
- }
-
- if (i == WEP_KEYS) {
- sec.enabled = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_ENABLED | SEC_LEVEL;
- }
-
- goto done;
- }
-
-
-
- sec.enabled = 1;
- sec.flags |= SEC_ENABLED;
-
- if (*crypt != NULL && (*crypt)->ops != NULL &&
- strcmp((*crypt)->ops->name, "WEP") != 0) {
- /* changing to use WEP; deinit previously used algorithm
- * on this key */
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- }
-
- if (*crypt == NULL) {
- struct ieee80211_crypt_data *new_crypt;
-
- /* take WEP into use */
- new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
- GFP_KERNEL);
- if (new_crypt == NULL)
- return -ENOMEM;
- new_crypt->ops = ieee80211_get_crypto_ops("WEP");
- if (!new_crypt->ops)
- new_crypt->ops = ieee80211_get_crypto_ops("WEP");
- if (new_crypt->ops)
- new_crypt->priv = new_crypt->ops->init(key);
-
- if (!new_crypt->ops || !new_crypt->priv) {
- kfree(new_crypt);
- new_crypt = NULL;
-
- printk(KERN_WARNING "%s: could not initialize WEP: "
- "load module ieee80211_crypt_wep\n",
- dev->name);
- return -EOPNOTSUPP;
- }
- *crypt = new_crypt;
- }
-
- /* If a new key was provided, set it up */
- if (erq->length > 0) {
- len = erq->length <= 5 ? 5 : 13;
- memcpy(sec.keys[key], keybuf, erq->length);
- if (len > erq->length)
- memset(sec.keys[key] + erq->length, 0,
- len - erq->length);
- IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
- key, escape_essid(sec.keys[key], len),
- erq->length, len);
- sec.key_sizes[key] = len;
- (*crypt)->ops->set_key(sec.keys[key], len, NULL,
- (*crypt)->priv);
- sec.flags |= (1 << key);
- /* This ensures a key will be activated if no key is
- * explicitely set */
- if (key == sec.active_key)
- sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;
-
- } else {
- len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
- NULL, (*crypt)->priv);
- if (len == 0) {
- /* Set a default key of all 0 */
- printk("Setting key %d to all zero.\n",
- key);
-
- IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
- key);
- memset(sec.keys[key], 0, 13);
- (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
- (*crypt)->priv);
- sec.key_sizes[key] = 13;
- sec.flags |= (1 << key);
- }
-
- /* No key data - just set the default TX key index */
- if (key_provided) {
- IEEE80211_DEBUG_WX(
- "Setting key %d to default Tx key.\n", key);
- ieee->tx_keyidx = key;
- sec.active_key = key;
- sec.flags |= SEC_ACTIVE_KEY;
- }
- }
-
- done:
- ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
- ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- sec.flags |= SEC_AUTH_MODE;
- IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
- "OPEN" : "SHARED KEY");
-
- /* For now we just support WEP, so only set that security level...
- * TODO: When WPA is added this is one place that needs to change */
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
-
- if (ieee->set_security)
- ieee->set_security(ieee, &sec);
-
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(ieee)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
- return 0;
-}
-
-int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- struct iw_point *erq = &(wrqu->encoding);
- int len, key;
- struct ieee80211_crypt_data *crypt;
-
- IEEE80211_DEBUG_WX("GET_ENCODE\n");
-
- if(ieee->iw_mode == IW_MODE_MONITOR)
- return -1;
-
- key = erq->flags & IW_ENCODE_INDEX;
- if (key) {
- if (key > WEP_KEYS)
- return -EINVAL;
- key--;
- } else
- key = ieee->tx_keyidx;
-
- crypt = ieee->crypt[key];
- erq->flags = key + 1;
-
- if (crypt == NULL || crypt->ops == NULL) {
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- return 0;
- }
-#if 0
- if (strcmp(crypt->ops->name, "WEP") != 0) {
- /* only WEP is supported with wireless extensions, so just
- * report that encryption is used */
- erq->length = 0;
- erq->flags |= IW_ENCODE_ENABLED;
- return 0;
- }
-#endif
- len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
- erq->length = (len >= 0 ? len : 0);
-
- erq->flags |= IW_ENCODE_ENABLED;
-
- if (ieee->open_wep)
- erq->flags |= IW_ENCODE_OPEN;
- else
- erq->flags |= IW_ENCODE_RESTRICTED;
-
- return 0;
-}
-#if (WIRELESS_EXT >= 18)
-int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct net_device *dev = ieee->dev;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int i, idx;
- int group_key = 0;
- const char *alg;
- struct ieee80211_crypto_ops *ops;
- struct ieee80211_crypt_data **crypt;
-
- struct ieee80211_security sec = {
- .flags = 0,
- };
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
- return -EINVAL;
- idx--;
- } else
- idx = ieee->tx_keyidx;
-
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
-
- crypt = &ieee->crypt[idx];
-
- group_key = 1;
- } else {
- /* some Cisco APs use idx>0 for unicast in dynamic WEP */
- if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
- return -EINVAL;
- if (ieee->iw_mode == IW_MODE_INFRA)
-
- crypt = &ieee->crypt[idx];
-
- else
- return -EINVAL;
- }
-
- sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
- if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- if (*crypt)
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- for (i = 0; i < WEP_KEYS; i++)
-
- if (ieee->crypt[i] != NULL)
-
- break;
-
- if (i == WEP_KEYS) {
- sec.enabled = 0;
- // sec.encrypt = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_LEVEL;
- }
- goto done;
- }
-
- sec.enabled = 1;
- // sec.encrypt = 1;
-#if 0
- if (group_key ? !ieee->host_mc_decrypt :
- !(ieee->host_encrypt || ieee->host_decrypt ||
- ieee->host_encrypt_msdu))
- goto skip_host_crypt;
-#endif
- switch (ext->alg) {
- case IW_ENCODE_ALG_WEP:
- alg = "WEP";
- break;
- case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
- break;
- case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
- break;
- default:
- IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
- dev->name, ext->alg);
- ret = -EINVAL;
- goto done;
- }
- printk("alg name:%s\n",alg);
-
- ops = ieee80211_get_crypto_ops(alg);
- if (ops == NULL)
- ops = ieee80211_get_crypto_ops(alg);
- if (ops == NULL) {
- IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
- dev->name, ext->alg);
- printk("========>unknown crypto alg %d\n", ext->alg);
- ret = -EINVAL;
- goto done;
- }
-
- if (*crypt == NULL || (*crypt)->ops != ops) {
- struct ieee80211_crypt_data *new_crypt;
-
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
- new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
-#else
- new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
- memset(new_crypt,0,sizeof(*new_crypt));
-#endif
- if (new_crypt == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- if (new_crypt->ops)
- new_crypt->priv = new_crypt->ops->init(idx);
- if (new_crypt->priv == NULL) {
- kfree(new_crypt);
- ret = -EINVAL;
- goto done;
- }
- *crypt = new_crypt;
-
- }
-
- if (ext->key_len > 0 && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
- (*crypt)->priv) < 0) {
- IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
- printk("key setting failed\n");
- ret = -EINVAL;
- goto done;
- }
-#if 1
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- ieee->tx_keyidx = idx;
- sec.active_key = idx;
- sec.flags |= SEC_ACTIVE_KEY;
- }
-
- if (ext->alg != IW_ENCODE_ALG_NONE) {
- //memcpy(sec.keys[idx], ext->key, ext->key_len);
- sec.key_sizes[idx] = ext->key_len;
- sec.flags |= (1 << idx);
- if (ext->alg == IW_ENCODE_ALG_WEP) {
- // sec.encode_alg[idx] = SEC_ALG_WEP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
- // sec.encode_alg[idx] = SEC_ALG_TKIP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_2;
- } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
- // sec.encode_alg[idx] = SEC_ALG_CCMP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_3;
- }
- /* Don't set sec level for group keys. */
- if (group_key)
- sec.flags &= ~SEC_LEVEL;
- }
-#endif
-done:
- if (ieee->set_security)
- ieee->set_security(ieee, &sec);
-
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(ieee)) {
- IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
- return ret;
-}
-
-int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct ieee80211_crypt_data *crypt;
- int idx, max_key_len;
-
- max_key_len = encoding->length - sizeof(*ext);
- if (max_key_len < 0)
- return -EINVAL;
-
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
- return -EINVAL;
- idx--;
- } else
- idx = ieee->tx_keyidx;
-
- if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
- ext->alg != IW_ENCODE_ALG_WEP)
- if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
- return -EINVAL;
-
- crypt = ieee->crypt[idx];
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- if (crypt == NULL || crypt->ops == NULL ) {
- ext->alg = IW_ENCODE_ALG_NONE;
- ext->key_len = 0;
- encoding->flags |= IW_ENCODE_DISABLED;
- } else {
- if (strcmp(crypt->ops->name, "WEP") == 0 )
- ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP"))
- ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP"))
- ext->alg = IW_ENCODE_ALG_CCMP;
- else
- return -EINVAL;
- ext->key_len = crypt->ops->get_key(ext->key, SCM_KEY_LEN, NULL, crypt->priv);
- encoding->flags |= IW_ENCODE_ENABLED;
- if (ext->key_len &&
- (ext->alg == IW_ENCODE_ALG_TKIP ||
- ext->alg == IW_ENCODE_ALG_CCMP))
- ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
-
- }
-
- return 0;
-}
-
-int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_mlme *mlme = (struct iw_mlme *) extra;
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- case IW_MLME_DISASSOC:
- ieee80211_disassociate(ieee);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
-{
- switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- /*need to support wpa2 here*/
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- /*
- * * Host AP driver does not use these parameters and allows
- * * wpa_supplicant to control them internally.
- * */
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures = data->value;
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- ieee->drop_unencrypted = data->value;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- if(data->value & IW_AUTH_ALG_SHARED_KEY){
- ieee->open_wep = 0;
- ieee->auth_mode = 1;
- }
- else if(data->value & IW_AUTH_ALG_OPEN_SYSTEM){
- ieee->open_wep = 1;
- ieee->auth_mode = 0;
- }
- else if(data->value & IW_AUTH_ALG_LEAP){
- ieee->open_wep = 1;
- ieee->auth_mode = 2;
- }
- else
- return -EINVAL;
- break;
-
-#if 1
- case IW_AUTH_WPA_ENABLED:
- ieee->wpa_enabled = (data->value)?1:0;
- break;
-
-#endif
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- ieee->ieee802_1x = data->value;
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- ieee->privacy_invoked = data->value;
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-#endif
-#if 1
-int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
-{
- u8 *buf;
-
- if (len>MAX_WPA_IE_LEN || (len && ie == NULL))
- {
- return -EINVAL;
- }
-
-
- if (len)
- {
- if (len != ie[1]+2)
- {
- printk("len:%zu, ie:%d\n", len, ie[1]);
- return -EINVAL;
- }
- buf = kmemdup(ie, len, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = buf;
- ieee->wpa_ie_len = len;
- }
- else{
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = NULL;
- ieee->wpa_ie_len = 0;
- }
- return 0;
-
-}
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_BA.h b/drivers/staging/rtl8192e/ieee80211/rtl819x_BA.h
deleted file mode 100644
index 8ddc8bf9dc2..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_BA.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _BATYPE_H_
-#define _BATYPE_H_
-
-#define TOTAL_TXBA_NUM 16
-#define TOTAL_RXBA_NUM 16
-
-#define BA_SETUP_TIMEOUT 200
-#define BA_INACT_TIMEOUT 60000
-
-#define BA_POLICY_DELAYED 0
-#define BA_POLICY_IMMEDIATE 1
-
-#define ADDBA_STATUS_SUCCESS 0
-#define ADDBA_STATUS_REFUSED 37
-#define ADDBA_STATUS_INVALID_PARAM 38
-
-#define DELBA_REASON_QSTA_LEAVING 36
-#define DELBA_REASON_END_BA 37
-#define DELBA_REASON_UNKNOWN_BA 38
-#define DELBA_REASON_TIMEOUT 39
-/* whether need define BA Action frames here?
-struct ieee80211_ADDBA_Req{
- struct ieee80211_header_data header;
- u8 category;
- u8
-} __attribute__ ((packed));
-*/
-//Is this need?I put here just to make it easier to define structure BA_RECORD //WB
-typedef union _SEQUENCE_CONTROL{
- u16 ShortData;
- struct
- {
- u16 FragNum:4;
- u16 SeqNum:12;
- }field;
-}SEQUENCE_CONTROL, *PSEQUENCE_CONTROL;
-
-typedef union _BA_PARAM_SET {
- u8 charData[2];
- u16 shortData;
- struct {
- u16 AMSDU_Support:1;
- u16 BAPolicy:1;
- u16 TID:4;
- u16 BufferSize:10;
- } field;
-} BA_PARAM_SET, *PBA_PARAM_SET;
-
-typedef union _DELBA_PARAM_SET {
- u8 charData[2];
- u16 shortData;
- struct {
- u16 Reserved:11;
- u16 Initiator:1;
- u16 TID:4;
- } field;
-} DELBA_PARAM_SET, *PDELBA_PARAM_SET;
-
-typedef struct _BA_RECORD {
- struct timer_list Timer;
- u8 bValid;
- u8 DialogToken;
- BA_PARAM_SET BaParamSet;
- u16 BaTimeoutValue;
- SEQUENCE_CONTROL BaStartSeqCtrl;
-} BA_RECORD, *PBA_RECORD;
-
-#endif //end _BATYPE_H_
-
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
deleted file mode 100644
index b690cbc5136..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * This file is created to process BA Action Frame. According to 802.11 spec,
- * there are 3 BA action types at all. And as BA is related to TS, this part
- * need some struture defined in QOS side code. Also TX RX is going to be
- * resturctured, so how to send ADDBAREQ ADDBARSP and DELBA packet is still
- * on consideration. Temporarily use MANAGE QUEUE instead of Normal Queue.
- */
-#include "ieee80211.h"
-#include "rtl819x_BA.h"
-
-/*
- * Activate BA entry. And if Time is nozero, start timer.
- */
-void ActivateBAEntry(struct ieee80211_device* ieee, PBA_RECORD pBA, u16 Time)
-{
- pBA->bValid = true;
- if(Time != 0)
- mod_timer(&pBA->Timer, jiffies + MSECS(Time));
-}
-
-/*
- * deactivate BA entry, including its timer.
- */
-void DeActivateBAEntry( struct ieee80211_device* ieee, PBA_RECORD pBA)
-{
- pBA->bValid = false;
- del_timer_sync(&pBA->Timer);
-}
-
-/*
- * deactivete BA entry in Tx Ts, and send DELBA.
- */
-u8 TxTsDeleteBA( struct ieee80211_device* ieee, PTX_TS_RECORD pTxTs)
-{
- PBA_RECORD pAdmittedBa = &pTxTs->TxAdmittedBARecord; //These two BA entries must exist in TS structure
- PBA_RECORD pPendingBa = &pTxTs->TxPendingBARecord;
- u8 bSendDELBA = false;
-
- // Delete pending BA
- if(pPendingBa->bValid)
- {
- DeActivateBAEntry(ieee, pPendingBa);
- bSendDELBA = true;
- }
-
- // Delete admitted BA
- if(pAdmittedBa->bValid)
- {
- DeActivateBAEntry(ieee, pAdmittedBa);
- bSendDELBA = true;
- }
-
- return bSendDELBA;
-}
-
-/*
- * deactivete BA entry in Tx Ts, and send DELBA.
- */
-u8 RxTsDeleteBA( struct ieee80211_device* ieee, PRX_TS_RECORD pRxTs)
-{
- PBA_RECORD pBa = &pRxTs->RxAdmittedBARecord;
- u8 bSendDELBA = false;
-
- if(pBa->bValid)
- {
- DeActivateBAEntry(ieee, pBa);
- bSendDELBA = true;
- }
-
- return bSendDELBA;
-}
-
-/*
- * reset BA entry
- */
-void ResetBaEntry( PBA_RECORD pBA)
-{
- pBA->bValid = false;
- pBA->BaParamSet.shortData = 0;
- pBA->BaTimeoutValue = 0;
- pBA->DialogToken = 0;
- pBA->BaStartSeqCtrl.ShortData = 0;
-}
-
-/*
- * construct ADDBAREQ and ADDBARSP frame here together.
- * return constructed skb to xmit
- */
-static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, PBA_RECORD pBA, u16 StatusCode, u8 type)
-{
- struct sk_buff *skb = NULL;
- struct ieee80211_hdr_3addr* BAReq = NULL;
- u8* tag = NULL;
- u16 tmp = 0;
- u16 len = ieee->tx_headroom + 9;
- //category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
- IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __FUNCTION__, type, Dst, ieee->dev);
- if (pBA == NULL||ieee == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pBA(%p) is NULL or ieee(%p) is NULL\n", pBA, ieee);
- return NULL;
- }
- skb = dev_alloc_skb(len + sizeof( struct ieee80211_hdr_3addr)); //need to add something others? FIXME
- if (skb == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
- return NULL;
- }
-
- memset(skb->data, 0, sizeof( struct ieee80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
- skb_reserve(skb, ieee->tx_headroom);
-
- BAReq = ( struct ieee80211_hdr_3addr *) skb_put(skb,sizeof( struct ieee80211_hdr_3addr));
-
- memcpy(BAReq->addr1, Dst, ETH_ALEN);
- memcpy(BAReq->addr2, ieee->dev->dev_addr, ETH_ALEN);
-
- memcpy(BAReq->addr3, ieee->current_network.bssid, ETH_ALEN);
-
- BAReq->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
-
- //tag += sizeof( struct ieee80211_hdr_3addr); //move to action field
- tag = (u8*)skb_put(skb, 9);
- *tag ++= ACT_CAT_BA;
- *tag ++= type;
- // Dialog Token
- *tag ++= pBA->DialogToken;
-
- if (ACT_ADDBARSP == type)
- {
- // Status Code
- printk("=====>to send ADDBARSP\n");
- tmp = cpu_to_le16(StatusCode);
- memcpy(tag, (u8*)&tmp, 2);
- tag += 2;
- }
- // BA Parameter Set
- tmp = cpu_to_le16(pBA->BaParamSet.shortData);
- memcpy(tag, (u8*)&tmp, 2);
- tag += 2;
- // BA Timeout Value
- tmp = cpu_to_le16(pBA->BaTimeoutValue);
- memcpy(tag, (u8*)&tmp, 2);
- tag += 2;
-
- if (ACT_ADDBAREQ == type)
- {
- // BA Start SeqCtrl
- memcpy(tag,(u8*)&(pBA->BaStartSeqCtrl), 2);
- tag += 2;
- }
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- return skb;
- //return NULL;
-}
-
-/*
- * construct DELBA frame
- */
-static struct sk_buff* ieee80211_DELBA(
- struct ieee80211_device* ieee,
- u8* dst,
- PBA_RECORD pBA,
- TR_SELECT TxRxSelect,
- u16 ReasonCode
- )
-{
- DELBA_PARAM_SET DelbaParamSet;
- struct sk_buff *skb = NULL;
- struct ieee80211_hdr_3addr* Delba = NULL;
- u8* tag = NULL;
- u16 tmp = 0;
- //len = head len + DELBA Parameter Set(2) + Reason Code(2)
- u16 len = 6 + ieee->tx_headroom;
-
- if (net_ratelimit())
- IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), ReasonCode(%d) sentd to:%pM\n", __FUNCTION__, ReasonCode, dst);
-
- memset(&DelbaParamSet, 0, 2);
-
- DelbaParamSet.field.Initiator = (TxRxSelect==TX_DIR)?1:0;
- DelbaParamSet.field.TID = pBA->BaParamSet.field.TID;
-
- skb = dev_alloc_skb(len + sizeof( struct ieee80211_hdr_3addr)); //need to add something others? FIXME
- if (skb == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
- return NULL;
- }
-// memset(skb->data, 0, len+sizeof( struct ieee80211_hdr_3addr));
- skb_reserve(skb, ieee->tx_headroom);
-
- Delba = ( struct ieee80211_hdr_3addr *) skb_put(skb,sizeof( struct ieee80211_hdr_3addr));
-
- memcpy(Delba->addr1, dst, ETH_ALEN);
- memcpy(Delba->addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy(Delba->addr3, ieee->current_network.bssid, ETH_ALEN);
- Delba->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
-
- tag = (u8*)skb_put(skb, 6);
-
- *tag ++= ACT_CAT_BA;
- *tag ++= ACT_DELBA;
-
- // DELBA Parameter Set
- tmp = cpu_to_le16(DelbaParamSet.shortData);
- memcpy(tag, (u8*)&tmp, 2);
- tag += 2;
- // Reason Code
- tmp = cpu_to_le16(ReasonCode);
- memcpy(tag, (u8*)&tmp, 2);
- tag += 2;
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- if (net_ratelimit())
- IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "<=====%s()\n", __FUNCTION__);
- return skb;
-}
-
-/*
- * send ADDBAReq frame out
- * If any possible, please hide pBA in ieee.
- * And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
- */
-void ieee80211_send_ADDBAReq(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA)
-{
- struct sk_buff *skb = NULL;
- skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero.
-
- if (skb)
- {
- softmac_mgmt_xmit(skb, ieee);
- //add statistic needed here.
- //and skb will be freed in softmac_mgmt_xmit(), so omit all dev_kfree_skb_any() outside softmac_mgmt_xmit()
- //WB
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __FUNCTION__);
- }
-}
-
-/*
- * send ADDBARSP frame out
- * If any possible, please hide pBA in ieee.
- * And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
- */
-void ieee80211_send_ADDBARsp(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA, u16 StatusCode)
-{
- struct sk_buff *skb = NULL;
- skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames
- if (skb)
- {
- softmac_mgmt_xmit(skb, ieee);
- //same above
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __FUNCTION__);
- }
-}
-
-/*
- * send ADDBARSP frame out
- * If any possible, please hide pBA in ieee.
- * And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
- */
-void ieee80211_send_DELBA(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA, TR_SELECT TxRxSelect, u16 ReasonCode)
-{
- struct sk_buff *skb = NULL;
- skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
- if (skb)
- {
- softmac_mgmt_xmit(skb, ieee);
- //same above
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __FUNCTION__);
- }
- return ;
-}
-
-int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
-{
- struct ieee80211_hdr_3addr* req = NULL;
- u16 rc = 0;
- u8 * dst = NULL, *pDialogToken = NULL, *tag = NULL;
- PBA_RECORD pBA = NULL;
- PBA_PARAM_SET pBaParamSet = NULL;
- u16* pBaTimeoutVal = NULL;
- PSEQUENCE_CONTROL pBaStartSeqCtrl = NULL;
- PRX_TS_RECORD pTS = NULL;
-
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
- return -1;
- }
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
-
- req = ( struct ieee80211_hdr_3addr*) skb->data;
- tag = (u8*)req;
- dst = (u8*)(&req->addr2[0]);
- tag += sizeof( struct ieee80211_hdr_3addr);
- pDialogToken = tag + 2; //category+action
- pBaParamSet = (PBA_PARAM_SET)(tag + 3); //+DialogToken
- pBaTimeoutVal = (u16*)(tag + 5);
- pBaStartSeqCtrl = (PSEQUENCE_CONTROL)(req + 7);
-
- printk("====================>rx ADDBAREQ from :%pM\n", dst);
-//some other capability is not ready now.
- if( (ieee->current_network.qos_data.active == 0) ||
- (ieee->pHTInfo->bCurrentHTSupport == false)) //||
- // (ieee->pStaQos->bEnableRxImmBA == false) )
- {
- rc = ADDBA_STATUS_REFUSED;
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
- goto OnADDBAReq_Fail;
- }
- // Search for related traffic stream.
- // If there is no matched TS, reject the ADDBA request.
- if( !GetTs(
- ieee,
- (PTS_COMMON_INFO*)(&pTS),
- dst,
- (u8)(pBaParamSet->field.TID),
- RX_DIR,
- true) )
- {
- rc = ADDBA_STATUS_REFUSED;
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS in %s()\n", __FUNCTION__);
- goto OnADDBAReq_Fail;
- }
- pBA = &pTS->RxAdmittedBARecord;
- // To Determine the ADDBA Req content
- // We can do much more check here, including BufferSize, AMSDU_Support, Policy, StartSeqCtrl...
- // I want to check StartSeqCtrl to make sure when we start aggregation!!!
- //
- if(pBaParamSet->field.BAPolicy == BA_POLICY_DELAYED)
- {
- rc = ADDBA_STATUS_INVALID_PARAM;
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "BA Policy is not correct in %s()\n", __FUNCTION__);
- goto OnADDBAReq_Fail;
- }
- // Admit the ADDBA Request
- //
- DeActivateBAEntry(ieee, pBA);
- pBA->DialogToken = *pDialogToken;
- pBA->BaParamSet = *pBaParamSet;
- pBA->BaTimeoutValue = *pBaTimeoutVal;
- pBA->BaStartSeqCtrl = *pBaStartSeqCtrl;
- //for half N mode we only aggregate 1 frame
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee))
- pBA->BaParamSet.field.BufferSize = 1;
- else
- pBA->BaParamSet.field.BufferSize = 32;
- ActivateBAEntry(ieee, pBA, pBA->BaTimeoutValue);
- ieee80211_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
-
- // End of procedure.
- return 0;
-
-OnADDBAReq_Fail:
- {
- BA_RECORD BA;
- BA.BaParamSet = *pBaParamSet;
- BA.BaTimeoutValue = *pBaTimeoutVal;
- BA.DialogToken = *pDialogToken;
- BA.BaParamSet.field.BAPolicy = BA_POLICY_IMMEDIATE;
- ieee80211_send_ADDBARsp(ieee, dst, &BA, rc);
- return 0; //we send RSP out.
- }
-
-}
-
-int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
-{
- struct ieee80211_hdr_3addr* rsp = NULL;
- PBA_RECORD pPendingBA, pAdmittedBA;
- PTX_TS_RECORD pTS = NULL;
- u8* dst = NULL, *pDialogToken = NULL, *tag = NULL;
- u16* pStatusCode = NULL, *pBaTimeoutVal = NULL;
- PBA_PARAM_SET pBaParamSet = NULL;
- u16 ReasonCode;
-
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
- return -1;
- }
- rsp = ( struct ieee80211_hdr_3addr*)skb->data;
- tag = (u8*)rsp;
- dst = (u8*)(&rsp->addr2[0]);
- tag += sizeof( struct ieee80211_hdr_3addr);
- pDialogToken = tag + 2;
- pStatusCode = (u16*)(tag + 3);
- pBaParamSet = (PBA_PARAM_SET)(tag + 5);
- pBaTimeoutVal = (u16*)(tag + 7);
-
- // Check the capability
- // Since we can always receive A-MPDU, we just check if it is under HT mode.
- if( ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false ||
- ieee->pHTInfo->bCurrentAMPDUEnable == false )
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
- }
-
-
- //
- // Search for related TS.
- // If there is no TS found, we wil reject ADDBA Rsp by sending DELBA frame.
- //
- if (!GetTs(
- ieee,
- (PTS_COMMON_INFO*)(&pTS),
- dst,
- (u8)(pBaParamSet->field.TID),
- TX_DIR,
- false) )
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS in %s()\n", __FUNCTION__);
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
- }
-
- pTS->bAddBaReqInProgress = false;
- pPendingBA = &pTS->TxPendingBARecord;
- pAdmittedBA = &pTS->TxAdmittedBARecord;
-
-
- //
- // Check if related BA is waiting for setup.
- // If not, reject by sending DELBA frame.
- //
- if((pAdmittedBA->bValid==true))
- {
- // Since BA is already setup, we ignore all other ADDBA Response.
- IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. Drop because already admit it! \n");
- return -1;
- }
- else if((pPendingBA->bValid == false) ||(*pDialogToken != pPendingBA->DialogToken))
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "OnADDBARsp(): Recv ADDBA Rsp. BA invalid, DELBA! \n");
- ReasonCode = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n", *pStatusCode);
- DeActivateBAEntry(ieee, pPendingBA);
- }
-
-
- if(*pStatusCode == ADDBA_STATUS_SUCCESS)
- {
- //
- // Determine ADDBA Rsp content here.
- // We can compare the value of BA parameter set that Peer returned and Self sent.
- // If it is OK, then admitted. Or we can send DELBA to cancel BA mechanism.
- //
- if(pBaParamSet->field.BAPolicy == BA_POLICY_DELAYED)
- {
- // Since this is a kind of ADDBA failed, we delay next ADDBA process.
- pTS->bAddBaReqDelayed = true;
- DeActivateBAEntry(ieee, pAdmittedBA);
- ReasonCode = DELBA_REASON_END_BA;
- goto OnADDBARsp_Reject;
- }
-
-
- //
- // Admitted condition
- //
- pAdmittedBA->DialogToken = *pDialogToken;
- pAdmittedBA->BaTimeoutValue = *pBaTimeoutVal;
- pAdmittedBA->BaStartSeqCtrl = pPendingBA->BaStartSeqCtrl;
- pAdmittedBA->BaParamSet = *pBaParamSet;
- DeActivateBAEntry(ieee, pAdmittedBA);
- ActivateBAEntry(ieee, pAdmittedBA, *pBaTimeoutVal);
- }
- else
- {
- // Delay next ADDBA process.
- pTS->bAddBaReqDelayed = true;
- }
-
- // End of procedure
- return 0;
-
-OnADDBARsp_Reject:
- {
- BA_RECORD BA;
- BA.BaParamSet = *pBaParamSet;
- ieee80211_send_DELBA(ieee, dst, &BA, TX_DIR, ReasonCode);
- return 0;
- }
-
-}
-
-int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
-{
- struct ieee80211_hdr_3addr* delba = NULL;
- PDELBA_PARAM_SET pDelBaParamSet = NULL;
- u16* pReasonCode = NULL;
- u8* dst = NULL;
-
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
- return -1;
- }
-
- if(ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false )
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "received DELBA while QOS or HT is not supported(%d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
- return -1;
- }
-
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- delba = ( struct ieee80211_hdr_3addr*)skb->data;
- dst = (u8*)(&delba->addr2[0]);
- delba += sizeof( struct ieee80211_hdr_3addr);
- pDelBaParamSet = (PDELBA_PARAM_SET)(delba+2);
- pReasonCode = (u16*)(delba+4);
-
- if(pDelBaParamSet->field.Initiator == 1)
- {
- PRX_TS_RECORD pRxTs;
-
- if( !GetTs(
- ieee,
- (PTS_COMMON_INFO*)&pRxTs,
- dst,
- (u8)pDelBaParamSet->field.TID,
- RX_DIR,
- false) )
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS for RXTS in %s()\n", __FUNCTION__);
- return -1;
- }
-
- RxTsDeleteBA(ieee, pRxTs);
- }
- else
- {
- PTX_TS_RECORD pTxTs;
-
- if(!GetTs(
- ieee,
- (PTS_COMMON_INFO*)&pTxTs,
- dst,
- (u8)pDelBaParamSet->field.TID,
- TX_DIR,
- false) )
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS for TXTS in %s()\n", __FUNCTION__);
- return -1;
- }
-
- pTxTs->bUsingBa = false;
- pTxTs->bAddBaReqInProgress = false;
- pTxTs->bAddBaReqDelayed = false;
- del_timer_sync(&pTxTs->TsAddBaTimer);
- //PlatformCancelTimer(Adapter, &pTxTs->TsAddBaTimer);
- TxTsDeleteBA(ieee, pTxTs);
- }
- return 0;
-}
-
-/* ADDBA initiate. This can only be called by TX side. */
-void
-TsInitAddBA(
- struct ieee80211_device* ieee,
- PTX_TS_RECORD pTS,
- u8 Policy,
- u8 bOverwritePending
- )
-{
- PBA_RECORD pBA = &pTS->TxPendingBARecord;
-
- if(pBA->bValid==true && bOverwritePending==false)
- return;
-
- // Set parameters to "Pending" variable set
- DeActivateBAEntry(ieee, pBA);
-
- pBA->DialogToken++; // DialogToken: Only keep the latest dialog token
- pBA->BaParamSet.field.AMSDU_Support = 0; // Do not support A-MSDU with A-MPDU now!!
- pBA->BaParamSet.field.BAPolicy = Policy; // Policy: Delayed or Immediate
- pBA->BaParamSet.field.TID = pTS->TsCommonInfo.TSpec.f.TSInfo.field.ucTSID; // TID
- // BufferSize: This need to be set according to A-MPDU vector
- pBA->BaParamSet.field.BufferSize = 32; // BufferSize: This need to be set according to A-MPDU vector
- pBA->BaTimeoutValue = 0; // Timeout value: Set 0 to disable Timer
- pBA->BaStartSeqCtrl.field.SeqNum = (pTS->TxCurSeq + 3) % 4096; // Block Ack will start after 3 packets later.
-
- ActivateBAEntry(ieee, pBA, BA_SETUP_TIMEOUT);
-
- ieee80211_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
-}
-
-void
-TsInitDelBA( struct ieee80211_device* ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect)
-{
-
- if(TxRxSelect == TX_DIR)
- {
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)pTsCommonInfo;
-
- if(TxTsDeleteBA(ieee, pTxTs))
- ieee80211_send_DELBA(
- ieee,
- pTsCommonInfo->Addr,
- (pTxTs->TxAdmittedBARecord.bValid)?(&pTxTs->TxAdmittedBARecord):(&pTxTs->TxPendingBARecord),
- TxRxSelect,
- DELBA_REASON_END_BA);
- }
- else if(TxRxSelect == RX_DIR)
- {
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)pTsCommonInfo;
- if(RxTsDeleteBA(ieee, pRxTs))
- ieee80211_send_DELBA(
- ieee,
- pTsCommonInfo->Addr,
- &pRxTs->RxAdmittedBARecord,
- TxRxSelect,
- DELBA_REASON_END_BA );
- }
-}
-
-/*
- * BA setup timer
- * acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer
- */
-void BaSetupTimeOut(unsigned long data)
-{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
-
- pTxTs->bAddBaReqInProgress = false;
- pTxTs->bAddBaReqDelayed = true;
- pTxTs->TxPendingBARecord.bValid = false;
-}
-
-void TxBaInactTimeout(unsigned long data)
-{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
- struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[pTxTs->num]);
- TxTsDeleteBA(ieee, pTxTs);
- ieee80211_send_DELBA(
- ieee,
- pTxTs->TsCommonInfo.Addr,
- &pTxTs->TxAdmittedBARecord,
- TX_DIR,
- DELBA_REASON_TIMEOUT);
-}
-
-void RxBaInactTimeout(unsigned long data)
-{
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
- struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
-
- RxTsDeleteBA(ieee, pRxTs);
- ieee80211_send_DELBA(
- ieee,
- pRxTs->TsCommonInfo.Addr,
- &pRxTs->RxAdmittedBARecord,
- RX_DIR,
- DELBA_REASON_TIMEOUT);
-}
-
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192e/ieee80211/rtl819x_HT.h
deleted file mode 100644
index 56a120cf629..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_HT.h
+++ /dev/null
@@ -1,483 +0,0 @@
-#ifndef _RTL819XU_HTTYPE_H_
-#define _RTL819XU_HTTYPE_H_
-
-//------------------------------------------------------------
-// The HT Capability element is present in beacons, association request,
-// reassociation request and probe response frames
-//------------------------------------------------------------
-
-//
-// Operation mode value
-//
-#define HT_OPMODE_NO_PROTECT 0
-#define HT_OPMODE_OPTIONAL 1
-#define HT_OPMODE_40MHZ_PROTECT 2
-#define HT_OPMODE_MIXED 3
-
-//
-// MIMO Power Save Setings
-//
-#define MIMO_PS_STATIC 0
-#define MIMO_PS_DYNAMIC 1
-#define MIMO_PS_NOLIMIT 3
-
-
-//
-// There should be 128 bits to cover all of the MCS rates. However, since
-// 8190 does not support too much rates, one integer is quite enough.
-//
-
-#define sHTCLng 4
-
-
-#define HT_SUPPORTED_MCS_1SS_BITMAP 0x000000ff
-#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
-#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP HT_MCS_1SS_BITMAP|HT_MCS_1SS_2SS_BITMAP
-
-
-typedef enum _HT_MCS_RATE{
- HT_MCS0 = 0x00000001,
- HT_MCS1 = 0x00000002,
- HT_MCS2 = 0x00000004,
- HT_MCS3 = 0x00000008,
- HT_MCS4 = 0x00000010,
- HT_MCS5 = 0x00000020,
- HT_MCS6 = 0x00000040,
- HT_MCS7 = 0x00000080,
- HT_MCS8 = 0x00000100,
- HT_MCS9 = 0x00000200,
- HT_MCS10 = 0x00000400,
- HT_MCS11 = 0x00000800,
- HT_MCS12 = 0x00001000,
- HT_MCS13 = 0x00002000,
- HT_MCS14 = 0x00004000,
- HT_MCS15 = 0x00008000,
- // Do not define MCS32 here although 8190 support MCS32
-}HT_MCS_RATE,*PHT_MCS_RATE;
-
-//
-// Represent Channel Width in HT Capabilities
-//
-typedef enum _HT_CHANNEL_WIDTH{
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_20_40 = 1,
-}HT_CHANNEL_WIDTH, *PHT_CHANNEL_WIDTH;
-
-//
-// Represent Extension Channel Offset in HT Capabilities
-// This is available only in 40Mhz mode.
-//
-typedef enum _HT_EXTCHNL_OFFSET{
- HT_EXTCHNL_OFFSET_NO_EXT = 0,
- HT_EXTCHNL_OFFSET_UPPER = 1,
- HT_EXTCHNL_OFFSET_NO_DEF = 2,
- HT_EXTCHNL_OFFSET_LOWER = 3,
-}HT_EXTCHNL_OFFSET, *PHT_EXTCHNL_OFFSET;
-
-typedef enum _CHNLOP{
- CHNLOP_NONE = 0, // No Action now
- CHNLOP_SCAN = 1, // Scan in progress
- CHNLOP_SWBW = 2, // Bandwidth switching in progress
- CHNLOP_SWCHNL = 3, // Software Channel switching in progress
-} CHNLOP, *PCHNLOP;
-
-// Determine if the Channel Operation is in progress
-#define CHHLOP_IN_PROGRESS(_pHTInfo) \
- ((_pHTInfo)->ChnlOp > CHNLOP_NONE) ? TRUE : FALSE
-
-/*
-typedef union _HT_CAPABILITY{
- u16 ShortData;
- u8 CharData[2];
- struct
- {
- u16 AdvCoding:1;
- u16 ChlWidth:1;
- u16 MimoPwrSave:2;
- u16 GreenField:1;
- u16 ShortGI20Mhz:1;
- u16 ShortGI40Mhz:1;
- u16 STBC:1;
- u16 BeamForm:1;
- u16 DelayBA:1;
- u16 MaxAMSDUSize:1;
- u16 DssCCk:1;
- u16 PSMP:1;
- u16 Rsvd:3;
- }Field;
-}HT_CAPABILITY, *PHT_CAPABILITY;
-
-typedef union _HT_CAPABILITY_MACPARA{
- u8 ShortData;
- u8 CharData[1];
- struct
- {
- u8 MaxRxAMPDU:2;
- u8 MPDUDensity:2;
- u8 Rsvd:4;
- }Field;
-}HT_CAPABILITY_MACPARA, *PHT_CAPABILITY_MACPARA;
-*/
-
-typedef enum _HT_ACTION{
- ACT_RECOMMAND_WIDTH = 0,
- ACT_MIMO_PWR_SAVE = 1,
- ACT_PSMP = 2,
- ACT_SET_PCO_PHASE = 3,
- ACT_MIMO_CHL_MEASURE = 4,
- ACT_RECIPROCITY_CORRECT = 5,
- ACT_MIMO_CSI_MATRICS = 6,
- ACT_MIMO_NOCOMPR_STEER = 7,
- ACT_MIMO_COMPR_STEER = 8,
- ACT_ANTENNA_SELECT = 9,
-} HT_ACTION, *PHT_ACTION;
-
-
-/* 2007/06/07 MH Define sub-carrier mode for 40MHZ. */
-typedef enum _HT_Bandwidth_40MHZ_Sub_Carrier{
- SC_MODE_DUPLICATE = 0,
- SC_MODE_LOWER = 1,
- SC_MODE_UPPER = 2,
- SC_MODE_FULL40MHZ = 3,
-}HT_BW40_SC_E;
-
-typedef struct _HT_CAPABILITY_ELE{
-
- //HT capability info
- u8 AdvCoding:1;
- u8 ChlWidth:1;
- u8 MimoPwrSave:2;
- u8 GreenField:1;
- u8 ShortGI20Mhz:1;
- u8 ShortGI40Mhz:1;
- u8 TxSTBC:1;
- u8 RxSTBC:2;
- u8 DelayBA:1;
- u8 MaxAMSDUSize:1;
- u8 DssCCk:1;
- u8 PSMP:1;
- u8 Rsvd1:1;
- u8 LSigTxopProtect:1;
-
- //MAC HT parameters info
- u8 MaxRxAMPDUFactor:2;
- u8 MPDUDensity:3;
- u8 Rsvd2:3;
-
- //Supported MCS set
- u8 MCS[16];
-
-
- //Extended HT Capability Info
- u16 ExtHTCapInfo;
-
- //TXBF Capabilities
- u8 TxBFCap[4];
-
- //Antenna Selection Capabilities
- u8 ASCap;
-
-} __attribute__ ((packed)) HT_CAPABILITY_ELE, *PHT_CAPABILITY_ELE;
-
-//------------------------------------------------------------
-// The HT Information element is present in beacons
-// Only AP is required to include this element
-//------------------------------------------------------------
-
-typedef struct _HT_INFORMATION_ELE{
- u8 ControlChl;
-
- u8 ExtChlOffset:2;
- u8 RecommemdedTxWidth:1;
- u8 RIFS:1;
- u8 PSMPAccessOnly:1;
- u8 SrvIntGranularity:3;
-
- u8 OptMode:2;
- u8 NonGFDevPresent:1;
- u8 Revd1:5;
- u8 Revd2:8;
-
- u8 Rsvd3:6;
- u8 DualBeacon:1;
- u8 DualCTSProtect:1;
-
- u8 SecondaryBeacon:1;
- u8 LSigTxopProtectFull:1;
- u8 PcoActive:1;
- u8 PcoPhase:1;
- u8 Rsvd4:4;
-
- u8 BasicMSC[16];
-} __attribute__ ((packed)) HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
-
-//
-// MIMO Power Save control field.
-// This is appear in MIMO Power Save Action Frame
-//
-typedef struct _MIMOPS_CTRL{
- u8 MimoPsEnable:1;
- u8 MimoPsMode:1;
- u8 Reserved:6;
-} MIMOPS_CTRL, *PMIMOPS_CTRL;
-
-typedef enum _HT_SPEC_VER{
- HT_SPEC_VER_IEEE = 0,
- HT_SPEC_VER_EWC = 1,
-}HT_SPEC_VER, *PHT_SPEC_VER;
-
-typedef enum _HT_AGGRE_MODE_E{
- HT_AGG_AUTO = 0,
- HT_AGG_FORCE_ENABLE = 1,
- HT_AGG_FORCE_DISABLE = 2,
-}HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
-
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variables when card is
-// configured as non-AP STA mode. **Note** Current_xxx should be set
-// to default value in HTInitializeHTInfo()
-//------------------------------------------------------------
-
-typedef struct _RT_HIGH_THROUGHPUT{
- u8 bEnableHT;
- u8 bCurrentHTSupport;
-
- u8 bRegBW40MHz; // Tx 40MHz channel capablity
- u8 bCurBW40MHz; // Tx 40MHz channel capability
-
- u8 bRegShortGI40MHz; // Tx Short GI for 40Mhz
- u8 bCurShortGI40MHz; // Tx Short GI for 40MHz
-
- u8 bRegShortGI20MHz; // Tx Short GI for 20MHz
- u8 bCurShortGI20MHz; // Tx Short GI for 20MHz
-
- u8 bRegSuppCCK; // Tx CCK rate capability
- u8 bCurSuppCCK; // Tx CCK rate capability
-
- // 802.11n spec version for "peer"
- HT_SPEC_VER ePeerHTSpecVer;
-
-
- // HT related information for "Self"
- HT_CAPABILITY_ELE SelfHTCap; // This is HT cap element sent to peer STA, which also indicate HT Rx capabilities.
- HT_INFORMATION_ELE SelfHTInfo; // This is HT info element sent to peer STA, which also indicate HT Rx capabilities.
-
- // HT related information for "Peer"
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
-
-
- // A-MSDU related
- u8 bAMSDU_Support; // This indicates Tx A-MSDU capability
- u16 nAMSDU_MaxSize; // This indicates Tx A-MSDU capability
- u8 bCurrent_AMSDU_Support; // This indicates Tx A-MSDU capability
- u16 nCurrent_AMSDU_MaxSize; // This indicates Tx A-MSDU capability
-
-
- // AMPDU related <2006.08.10 Emily>
- u8 bAMPDUEnable; // This indicate Tx A-MPDU capability
- u8 bCurrentAMPDUEnable; // This indicate Tx A-MPDU capability
- u8 AMPDU_Factor; // This indicate Tx A-MPDU capability
- u8 CurrentAMPDUFactor; // This indicate Tx A-MPDU capability
- u8 MPDU_Density; // This indicate Tx A-MPDU capability
- u8 CurrentMPDUDensity; // This indicate Tx A-MPDU capability
-
- // Forced A-MPDU enable
- HT_AGGRE_MODE_E ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
-
- // Forced A-MSDU enable
- HT_AGGRE_MODE_E ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
-
- u8 bForcedShortGI;
-
- u8 CurrentOpMode;
-
- // MIMO PS related
- u8 SelfMimoPs;
- u8 PeerMimoPs;
-
- // 40MHz Channel Offset settings.
- HT_EXTCHNL_OFFSET CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz; // If we use 40 MHz to Tx
- u8 PeerBandwidth;
-
- // For Bandwidth Switching
- u8 bSwBwInProgress;
- CHNLOP ChnlOp; // software switching channel in progress. By Bruce, 2008-02-15.
- u8 SwBwStep;
- //struct timer_list SwBwTimer; //moved to ieee80211_device. as timer_list need include some header file here.
-
- // For Realtek proprietary A-MPDU factor for aggregation
- u8 bRegRT2RTAggregation;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- // Rx Reorder control
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- u8 UsbTxAggrNum;
-#endif
-#ifdef USB_RX_AGGREGATION_SUPPORT
- u8 UsbRxFwAggrEn;
- u8 UsbRxFwAggrPageNum;
- u8 UsbRxFwAggrPacketNum;
- u8 UsbRxFwAggrTimeout;
-#endif
-
- // Add for Broadcom(Linksys) IOT. Joseph
- u8 bIsPeerBcm;
-
- // For IOT issue.
- u8 IOTPeer;
- u32 IOTAction;
-} __attribute__ ((packed)) RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
-
-
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variable for "each Sta"
-// when card is configured as "AP mode"
-//------------------------------------------------------------
-
-typedef struct _RT_HTINFO_STA_ENTRY{
- u8 bEnableHT;
-
- u8 bSupportCck;
-
- u16 AMSDU_MaxSize;
-
- u8 AMPDU_Factor;
- u8 MPDU_Density;
-
- u8 HTHighestOperaRate;
-
- u8 bBw40MHz;
-
- u8 MimoPs;
-
- u8 McsRateSet[16];
-
-
-}RT_HTINFO_STA_ENTRY, *PRT_HTINFO_STA_ENTRY;
-
-
-
-
-
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variable for "each AP"
-// when card is configured as "STA mode"
-//------------------------------------------------------------
-
-typedef struct _BSS_HT{
-
- u8 bdSupportHT;
-
- // HT related elements
- u8 bdHTCapBuf[32];
- u16 bdHTCapLen;
- u8 bdHTInfoBuf[32];
- u16 bdHTInfoLen;
-
- HT_SPEC_VER bdHTSpecVer;
- //HT_CAPABILITY_ELE bdHTCapEle;
- //HT_INFORMATION_ELE bdHTInfoEle;
-
- u8 bdRT2RTAggregation;
- u8 bdRT2RTLongSlotTime;
-} __attribute__ ((packed)) BSS_HT, *PBSS_HT;
-
-typedef struct _MIMO_RSSI{
- u32 EnableAntenna;
- u32 AntennaA;
- u32 AntennaB;
- u32 AntennaC;
- u32 AntennaD;
- u32 Average;
-}MIMO_RSSI, *PMIMO_RSSI;
-
-typedef struct _MIMO_EVM{
- u32 EVM1;
- u32 EVM2;
-}MIMO_EVM, *PMIMO_EVM;
-
-typedef struct _FALSE_ALARM_STATISTICS{
- u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
- u32 Cnt_Crc8_fail;
- u32 Cnt_all;
-}FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
-
-
-extern u8 MCS_FILTER_ALL[16];
-extern u8 MCS_FILTER_1SS[16];
-
-/* 2007/07/11 MH Modify the macro. Becaus STA may link with a N-AP. If we set
- STA in A/B/G mode and AP is still in N mode. The macro will be wrong. We have
- to add a macro to judge wireless mode. */
-#define PICK_RATE(_nLegacyRate, _nMcsRate) \
- (_nMcsRate==0)?(_nLegacyRate&0x7f):(_nMcsRate)
-/* 2007/07/12 MH We only define legacy and HT wireless mode now. */
-#define LEGACY_WIRELESS_MODE IEEE_MODE_MASK
-
-#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
- ((WirelessMode & (LEGACY_WIRELESS_MODE))!=0)?\
- (LegacyRate):\
- (PICK_RATE(LegacyRate, HTRate))
-
-
-
-// MCS Bw 40 {1~7, 12~15,32}
-#define RATE_ADPT_1SS_MASK 0xFF
-#define RATE_ADPT_2SS_MASK 0xF0 //Skip MCS8~11 because mcs7 > mcs6, 9, 10, 11. 2007.01.16 by Emily
-#define RATE_ADPT_MCS32_MASK 0x01
-
-#define IS_11N_MCS_RATE(rate) (rate&0x80)
-
-typedef enum _HT_AGGRE_SIZE{
- HT_AGG_SIZE_8K = 0,
- HT_AGG_SIZE_16K = 1,
- HT_AGG_SIZE_32K = 2,
- HT_AGG_SIZE_64K = 3,
-}HT_AGGRE_SIZE_E, *PHT_AGGRE_SIZE_E;
-
-/* Indicate different AP vendor for IOT issue */
-typedef enum _HT_IOT_PEER
-{
- HT_IOT_PEER_UNKNOWN = 0,
- HT_IOT_PEER_REALTEK = 1,
- HT_IOT_PEER_BROADCOM = 2,
- HT_IOT_PEER_RALINK = 3,
- HT_IOT_PEER_ATHEROS = 4,
- HT_IOT_PEER_CISCO= 5,
- HT_IOT_PEER_MARVELL=6,
- HT_IOT_PEER_MAX = 7
-}HT_IOT_PEER_E, *PHTIOT_PEER_E;
-
-//
-// IOT Action for different AP
-//
-typedef enum _HT_IOT_ACTION{
- HT_IOT_ACT_TX_USE_AMSDU_4K = 0x00000001,
- HT_IOT_ACT_TX_USE_AMSDU_8K = 0x00000002,
- HT_IOT_ACT_DISABLE_MCS14 = 0x00000004,
- HT_IOT_ACT_DISABLE_MCS15 = 0x00000008,
- HT_IOT_ACT_DISABLE_ALL_2SS = 0x00000010,
- HT_IOT_ACT_DISABLE_EDCA_TURBO = 0x00000020,
- HT_IOT_ACT_MGNT_USE_CCK_6M = 0x00000040,
- HT_IOT_ACT_CDD_FSYNC = 0x00000080,
- HT_IOT_ACT_PURE_N_MODE = 0x00000100,
- HT_IOT_ACT_FORCED_CTS2SELF = 0x00000200,
- HT_IOT_ACT_NULL_DATA_POWER_SAVING = 0x00800000,
-}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
-
-#endif //_RTL819XU_HTTYPE_H_
-
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c
deleted file mode 100644
index f7a9da3ec82..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c
+++ /dev/null
@@ -1,1732 +0,0 @@
-
-//As this function is mainly ported from Windows driver, so leave the name little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
-#include "ieee80211.h"
-#include "rtl819x_HT.h"
-u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-u16 MCS_DATA_RATE[2][2][77] =
- { { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78 ,104, 156, 208, 234, 260,
- 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
- 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195,
- 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
- 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz
- {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
- 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
- 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
- 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
- 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz
- { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
- 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
- 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
- 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
- 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
- {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
- 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
- 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
- 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
- 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz
- };
-
-static const u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
-static const u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
-static const u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
-//static u8 NETGEAR834Bv2_BROADCOM[3] = {0x00, 0x1b, 0x2f};
-static const u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f};
-static const u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
-static const u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
-static const u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
-static const u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
-static const u8 DLINK_ATHEROS[3] = {0x00, 0x1c, 0xf0};
-static const u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
-static const u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
-
-// 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we put the
-// code in other place??
-//static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96};
-/********************************************************************************************************************
- *function: This function update default settings in pHTInfo structure
- * input: PRT_HIGH_THROUGHPUT pHTInfo
- * output: none
- * return: none
- * notice: These value need be modified if any changes.
- * *****************************************************************************************************************/
-void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- // ShortGI support
- pHTInfo->bRegShortGI20MHz= 1;
- pHTInfo->bRegShortGI40MHz= 1;
-
- // 40MHz channel support
- pHTInfo->bRegBW40MHz = 1;
-
- // CCK rate support in 40MHz channel
- if(pHTInfo->bRegBW40MHz)
- pHTInfo->bRegSuppCCK = 1;
- else
- pHTInfo->bRegSuppCCK = true;
-
- // AMSDU related
- pHTInfo->nAMSDU_MaxSize = 7935UL;
- pHTInfo->bAMSDU_Support = 0;
-
- // AMPDU related
- pHTInfo->bAMPDUEnable = 1;
- pHTInfo->AMPDU_Factor = 2; //// 0: 2n13(8K), 1:2n14(16K), 2:2n15(32K), 3:2n16(64k)
- pHTInfo->MPDU_Density = 0;// 0: No restriction, 1: 1/8usec, 2: 1/4usec, 3: 1/2usec, 4: 1usec, 5: 2usec, 6: 4usec, 7:8usec
-
- // MIMO Power Save
- pHTInfo->SelfMimoPs = 3;// 0: Static Mimo Ps, 1: Dynamic Mimo Ps, 3: No Limitation, 2: Reserved(Set to 3 automatically.)
- if(pHTInfo->SelfMimoPs == 2)
- pHTInfo->SelfMimoPs = 3;
- // 8190 only. Assign rate operation mode to firmware
- ieee->bTxDisableRateFallBack = 0;
- ieee->bTxUseDriverAssingedRate = 0;
-
-#ifdef TO_DO_LIST
- // 8190 only. Assign duration operation mode to firmware
- pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur;
-#endif
- // 8190 only, Realtek proprietary aggregation mode
- // Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
- pHTInfo->bRegRT2RTAggregation = 1;//0: Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
-
- // For Rx Reorder Control
- pHTInfo->bRegRxReorderEnable = 1;
- pHTInfo->RxReorderWinSize = 64;
- pHTInfo->RxReorderPendingTime = 30;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- pHTInfo->UsbTxAggrNum = 4;
-#endif
-#ifdef USB_RX_AGGREGATION_SUPPORT
- pHTInfo->UsbRxFwAggrEn = 1;
- pHTInfo->UsbRxFwAggrPageNum = 24;
- pHTInfo->UsbRxFwAggrPacketNum = 8;
- pHTInfo->UsbRxFwAggrTimeout = 16; ////usb rx FW aggregation timeout threshold.It's in units of 64us
-#endif
-
-
-}
-/********************************************************************************************************************
- *function: This function print out each field on HT capability IE mainly from (Beacon/ProbeRsp/AssocReq)
- * input: u8* CapIE //Capability IE to be printed out
- * u8* TitleString //mainly print out caller function
- * output: none
- * return: none
- * notice: Driver should not print out this message by default.
- * *****************************************************************************************************************/
-void HTDebugHTCapability(u8* CapIE, u8* TitleString )
-{
-
- static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
- PHT_CAPABILITY_ELE pCapELE;
-
- if(!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap)))
- {
- //EWC IE
- IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__);
- pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[4]);
- }else
- pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[0]);
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString );
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth)?"20MHz": "20/40MHz");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize)?"3839": "7935");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor);
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity);
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\
- pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]);
-}
-/********************************************************************************************************************
- *function: This function print out each field on HT Information IE mainly from (Beacon/ProbeRsp)
- * input: u8* InfoIE //Capability IE to be printed out
- * u8* TitleString //mainly print out caller function
- * output: none
- * return: none
- * notice: Driver should not print out this message by default.
- * *****************************************************************************************************************/
-void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
-{
-
- static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
- PHT_INFORMATION_ELE pHTInfoEle;
-
- if(!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
- {
- // Not EWC IE
- IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__);
- pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[4]);
- }else
- pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[0]);
-
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString);
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl);
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSenondary channel =");
- switch(pHTInfoEle->ExtChlOffset)
- {
- case 0:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n");
- break;
- case 1:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n");
- break;
- case 2:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n");
- break;
- case 3:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n");
- break;
- }
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth)?"20Mhz": "40Mhz");
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = ");
- switch(pHTInfoEle->OptMode)
- {
- case 0:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n");
- break;
- case 1:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n");
- break;
- case 2:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n");
- break;
- case 3:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n");
- break;
- }
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\
- pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]);
-}
-
-/*
-* Return: true if station in half n mode and AP supports 40 bw
-*/
-bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee)
-{
- bool retValue = false;
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
- retValue = false;
- else if(pHTInfo->bRegBW40MHz == false) // station supports 40 bw
- retValue = false;
- else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee)) // station in half n mode
- retValue = false;
- else if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw
- retValue = true;
- else
- retValue = false;
-
- return retValue;
-}
-
-bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz)
-{
- bool retValue = false;
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
- retValue = false;
- else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee)) // station in half n mode
- retValue = false;
- else if(is40MHz) // ap support 40 bw
- {
- if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) // ap support 40 bw short GI
- retValue = true;
- else
- retValue = false;
- }
- else
- {
- if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) // ap support 40 bw short GI
- retValue = true;
- else
- retValue = false;
- }
-
- return retValue;
-}
-
-u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate)
-{
-
- u8 is40MHz;
- u8 isShortGI;
-
- is40MHz = (IsHTHalfNmode40Bandwidth(ieee))?1:0;
- isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz))? 1:0;
-
- return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
-}
-
-
-u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- u8 is40MHz = (pHTInfo->bCurBW40MHz)?1:0;
- u8 isShortGI = (pHTInfo->bCurBW40MHz)?
- ((pHTInfo->bCurShortGI40MHz)?1:0):
- ((pHTInfo->bCurShortGI20MHz)?1:0);
- return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
-}
-
-/********************************************************************************************************************
- *function: This function returns current datarate.
- * input: struct ieee80211_device* ieee
- * u8 nDataRate
- * output: none
- * return: tx rate
- * notice: quite unsure about how to use this function //wb
- * *****************************************************************************************************************/
-u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate)
-{
- //PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c};
- u8 is40MHz = 0;
- u8 isShortGI = 0;
-
- if(nDataRate < 12)
- {
- return CCKOFDMRate[nDataRate];
- }
- else
- {
- if (nDataRate >= 0x10 && nDataRate <= 0x1f)//if(nDataRate > 11 && nDataRate < 28 )
- {
- is40MHz = 0;
- isShortGI = 0;
-
- // nDataRate = nDataRate - 12;
- }
- else if(nDataRate >=0x20 && nDataRate <= 0x2f ) //(27, 44)
- {
- is40MHz = 1;
- isShortGI = 0;
-
- //nDataRate = nDataRate - 28;
- }
- else if(nDataRate >= 0x30 && nDataRate <= 0x3f ) //(43, 60)
- {
- is40MHz = 0;
- isShortGI = 1;
-
- //nDataRate = nDataRate - 44;
- }
- else if(nDataRate >= 0x40 && nDataRate <= 0x4f ) //(59, 76)
- {
- is40MHz = 1;
- isShortGI = 1;
-
- //nDataRate = nDataRate - 60;
- }
- return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf];
- }
-}
-
-
-
-bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee)
-{
- bool retValue = false;
- struct ieee80211_network* net = &ieee->current_network;
-#if 0
- if(ieee->bHalfNMode == false)
- retValue = false;
- else
-#endif
- if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
- (memcmp(net->bssid, PCI_RALINK, 3)==0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) ||
- (net->ralink_cap_exist))
- retValue = true;
- else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
- //(memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) ||
- (net->broadcom_cap_exist))
- retValue = true;
- else if(net->bssht.bdRT2RTAggregation)
- retValue = true;
- else
- retValue = false;
-
- return retValue;
-}
-
-/********************************************************************************************************************
- *function: This function returns peer IOT.
- * input: struct ieee80211_device* ieee
- * output: none
- * return:
- * notice:
- * *****************************************************************************************************************/
-void HTIOTPeerDetermine(struct ieee80211_device* ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- struct ieee80211_network* net = &ieee->current_network;
- if(net->bssht.bdRT2RTAggregation)
- pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
- else if(net->broadcom_cap_exist){
- pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
- }
- else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)){//||
- //(memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) ){
- pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
- }
- else if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
- (memcmp(net->bssid, PCI_RALINK, 3)==0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) ||
- net->ralink_cap_exist)
- pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
- else if((net->atheros_cap_exist )|| (memcmp(net->bssid, DLINK_ATHEROS, 3) == 0))
- pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
- else if(memcmp(net->bssid, CISCO_BROADCOM, 3)==0)
- pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
- else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
- net->marvell_cap_exist){
- pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL;
- }
- else
- pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
-
- IEEE80211_DEBUG(IEEE80211_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer);
-}
-/********************************************************************************************************************
- *function: Check whether driver should declare received rate up to MCS13 only since some chipset is not good
- * at receiving MCS14~15 frame from some AP.
- * input: struct ieee80211_device* ieee
- * u8 * PeerMacAddr
- * output: none
- * return: return 1 if driver should declare MCS13 only(otherwise return 0)
- * *****************************************************************************************************************/
-u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr)
-{
- u8 ret = 0;
-#if 0
- // Apply for 819u only
-#if (HAL_CODE_BASE==RTL8192 && DEV_BUS_TYPE==USB_INTERFACE)
- if((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)
- )
- {
- ret = 1;
- }
-
-
- if(pHTInfo->bCurrentRT2RTAggregation)
- {
- // The parameter of pHTInfo->bCurrentRT2RTAggregation must be decided previously
- ret = 1;
- }
-#endif
-#endif
- return ret;
- }
-
-u8 HTIOTActIsForcedCTS2Self(struct ieee80211_device *ieee, struct ieee80211_network *network)
-{
- u8 retValue = 0;
- //if(network->marvell_cap_exist)
- if(ieee->pHTInfo->IOTPeer == HT_IOT_PEER_MARVELL)
- {
- retValue = 1;
- }
-
- return retValue;
-}
-
-
-/**
-* Function: HTIOTActIsDisableMCS15
-*
-* Overview: Check whether driver should declare capability of receiving MCS15
-*
-* Input:
-* PADAPTER Adapter,
-*
-* Output: None
-* Return: true if driver should disable MCS15
-* 2008.04.15 Emily
-*/
-bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
-{
- bool retValue = false;
-
-#ifdef TODO
- // Apply for 819u only
-#if (HAL_CODE_BASE==RTL8192)
-
-#if (DEV_BUS_TYPE == USB_INTERFACE)
- // Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15
- retValue = true;
-#elif (DEV_BUS_TYPE == PCI_INTERFACE)
- // Enable MCS15 if the peer is Cisco AP. by Emily, 2008.05.12
-// if(pBssDesc->bCiscoCapExist)
-// retValue = false;
-// else
- retValue = false;
-#endif
-#endif
-#endif
- // Jerry Chang suggest that 8190 1x2 does not need to disable MCS15
-
- return retValue;
-}
-
-/**
-* Function: HTIOTActIsDisableMCSTwoSpatialStream
-*
-* Overview: Check whether driver should declare capability of receiving All 2 ss packets
-*
-* Input:
-* PADAPTER Adapter,
-*
-* Output: None
-* Return: true if driver should disable all two spatial stream packet
-* 2008.04.21 Emily
-*/
-bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *PeerMacAddr)
-{
- bool retValue = false;
-
-#ifdef TODO
- // Apply for 819u only
-//#if (HAL_CODE_BASE==RTL8192)
-
- //This rule only apply to Belkin(Ralink) AP
- if(IS_UNDER_11N_AES_MODE(Adapter))
- {
- if((PlatformCompareMemory(PeerMacAddr, BELKINF5D8233V1_RALINK, 3)==0) ||
- (PlatformCompareMemory(PeerMacAddr, PCI_RALINK, 3)==0) ||
- (PlatformCompareMemory(PeerMacAddr, EDIMAX_RALINK, 3)==0))
- {
- //Set True to disable this function. Disable by default, Emily, 2008.04.23
- retValue = false;
- }
- }
-
-//#endif
-#endif
- return retValue;
-}
-
-/********************************************************************************************************************
- *function: Check whether driver should disable EDCA turbo mode
- * input: struct ieee80211_device* ieee
- * u8* PeerMacAddr
- * output: none
- * return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
- * *****************************************************************************************************************/
-u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr)
-{
- u8 retValue = false; // default enable EDCA Turbo mode.
- // Set specific EDCA parameter for different AP in DM handler.
-
- return retValue;
-#if 0
- if((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0)||
- (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
- (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
- (memcmp(PeerMacAddr, NETGEAR834Bv2_BROADCOM, 3)==0))
-
- {
- retValue = 1; //Linksys disable EDCA turbo mode
- }
-
- return retValue;
-#endif
-}
-
-/********************************************************************************************************************
- *function: Check whether we need to use OFDM to sned MGNT frame for broadcom AP
- * input: struct ieee80211_network *network //current network we live
- * output: none
- * return: return 1 if true
- * *****************************************************************************************************************/
-u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
-{
- u8 retValue = 0;
-
- // 2008/01/25 MH Judeg if we need to use OFDM to sned MGNT frame for broadcom AP.
- // 2008/01/28 MH We must prevent that we select null bssid to link.
-
- if(network->broadcom_cap_exist)
- {
- retValue = 1;
- }
-
- return retValue;
-}
-
-u8 HTIOTActIsCCDFsync(u8* PeerMacAddr)
-{
- u8 retValue = 0;
- if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ==0))
- {
- retValue = 1;
- }
-
- return retValue;
-}
-
-//
-// Send null data for to tell AP that we are awake.
-//
-bool
-HTIOTActIsNullDataPowerSaving(struct ieee80211_device* ieee,struct ieee80211_network *network)
-{
- bool retValue = false;
-
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- {
- if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) // ||(pBssDesc->Vender == HT_IOT_PEER_ATHEROS && pBssDesc->SubTypeOfVender == HT_IOT_PEER_ATHEROS_DIR635))
- return true;
-
- }
- return retValue;
-}
-
-void HTResetIOTSetting(
- PRT_HIGH_THROUGHPUT pHTInfo
-)
-{
- pHTInfo->IOTAction = 0;
- pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
-}
-
-
-/********************************************************************************************************************
- *function: Construct Capablility Element in Beacon... if HTEnable is turned on
- * input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Capability Ele
- * u8* len //store length of CE
- * u8 IsEncrypt //whether encrypt, needed further
- * output: none
- * return: none
- * notice: posHTCap can't be null and should be initialized before.
- * *****************************************************************************************************************/
-void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 IsEncrypt)
-{
- PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
- PHT_CAPABILITY_ELE pCapELE = NULL;
- //u8 bIsDeclareMCS13;
-
- if ((posHTCap == NULL) || (pHT == NULL))
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTCap or pHTInfo can't be null in HTConstructCapabilityElement()\n");
- return;
- }
- memset(posHTCap, 0, *len);
- if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
- {
- u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
- memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
- pCapELE = (PHT_CAPABILITY_ELE)&(posHTCap[4]);
- }else
- {
- pCapELE = (PHT_CAPABILITY_ELE)posHTCap;
- }
-
-
- //HT capability info
- pCapELE->AdvCoding = 0; // This feature is not supported now!!
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee))
- {
- pCapELE->ChlWidth = 0;
- }
- else
- {
- pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
- }
-
-// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
- pCapELE->MimoPwrSave = pHT->SelfMimoPs;
- pCapELE->GreenField = 0; // This feature is not supported now!!
- pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!!
- pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!!
- //DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r",
- //pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
- pCapELE->TxSTBC = 1;
- pCapELE->RxSTBC = 0;
- pCapELE->DelayBA = 0; // Do not support now!!
- pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE>=7935)?1:0;
- pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0);
- pCapELE->PSMP = 0; // Do not support now!!
- pCapELE->LSigTxopProtect = 0; // Do not support now!!
-
-
- //MAC HT parameters info
- // TODO: Nedd to take care of this part
- IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
-
- if( IsEncrypt)
- {
- pCapELE->MPDUDensity = 7; // 8us
- pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
- }
- else
- {
- pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
- pCapELE->MPDUDensity = 0; // no density
- }
-
- //Supported MCS set
- memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16);
- if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
- pCapELE->MCS[1] &= 0x7f;
-
- if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
- pCapELE->MCS[1] &= 0xbf;
-
- if(pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
- pCapELE->MCS[1] &= 0x00;
-
- // 2008.06.12
- // For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7.
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee))
- {
- int i;
- for(i = 1; i< 16; i++)
- pCapELE->MCS[i] = 0;
- }
-
- //Extended HT Capability Info
- memset(&pCapELE->ExtHTCapInfo, 0, 2);
-
-
- //TXBF Capabilities
- memset(pCapELE->TxBFCap, 0, 4);
-
- //Antenna Selection Capabilities
- pCapELE->ASCap = 0;
-//add 2 to give space for element ID and len when construct frames
- if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
- *len = 30 + 2;
- else
- *len = 26 + 2;
-}
-/********************************************************************************************************************
- *function: Construct Information Element in Beacon... if HTEnable is turned on
- * input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Information Ele
- * u8* len //store len of
- * u8 IsEncrypt //whether encrypt, needed further
- * output: none
- * return: none
- * notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this
- * *****************************************************************************************************************/
-void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 IsEncrypt)
-{
- PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
- PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo;
- if ((posHTInfo == NULL) || (pHTInfoEle == NULL))
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTInfo or pHTInfoEle can't be null in HTConstructInfoElement()\n");
- return;
- }
-
- memset(posHTInfo, 0, *len);
- if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported
- {
- pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT:
- (ieee->current_network.channel<=6)?
- HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER);
- pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
- pHTInfoEle->RIFS = 0;
- pHTInfoEle->PSMPAccessOnly = 0;
- pHTInfoEle->SrvIntGranularity = 0;
- pHTInfoEle->OptMode = pHT->CurrentOpMode;
- pHTInfoEle->NonGFDevPresent = 0;
- pHTInfoEle->DualBeacon = 0;
- pHTInfoEle->SecondaryBeacon = 0;
- pHTInfoEle->LSigTxopProtectFull = 0;
- pHTInfoEle->PcoActive = 0;
- pHTInfoEle->PcoPhase = 0;
-
- memset(pHTInfoEle->BasicMSC, 0, 16);
-
-
- *len = 22 + 2; //same above
-
- }
- else
- {
- //STA should not generate High Throughput Information Element
- *len = 0;
- }
-}
-
-/*
- * According to experiment, Realtek AP to STA (based on rtl8190) may achieve best performance
- * if both STA and AP set limitation of aggregation size to 32K, that is, set AMPDU density to 2
- * (Ref: IEEE 11n specification). However, if Realtek STA associates to other AP, STA should set
- * limitation of aggregation size to 8K, otherwise, performance of traffic stream from STA to AP
- * will be much less than the traffic stream from AP to STA if both of the stream runs concurrently
- * at the same time.
- *
- * Frame Format
- * Element ID Length OUI Type1 Reserved
- * 1 byte 1 byte 3 bytes 1 byte 1 byte
- *
- * OUI = 0x00, 0xe0, 0x4c,
- * Type = 0x02
- * Reserved = 0x00
- *
- * 2007.8.21 by Emily
-*/
-/********************************************************************************************************************
- *function: Construct Information Element in Beacon... in RT2RT condition
- * input: struct ieee80211_device* ieee
- * u8* posRT2RTAgg //pointer to store Information Ele
- * u8* len //store len
- * output: none
- * return: none
- * notice:
- * *****************************************************************************************************************/
-void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len)
-{
- if (posRT2RTAgg == NULL) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "posRT2RTAgg can't be null in HTConstructRT2RTAggElement()\n");
- return;
- }
- memset(posRT2RTAgg, 0, *len);
- *posRT2RTAgg++ = 0x00;
- *posRT2RTAgg++ = 0xe0;
- *posRT2RTAgg++ = 0x4c;
- *posRT2RTAgg++ = 0x02;
- *posRT2RTAgg++ = 0x01;
- *posRT2RTAgg = 0x10;//*posRT2RTAgg = 0x02;
-
- if(ieee->bSupportRemoteWakeUp) {
- *posRT2RTAgg |= 0x08;//RT_HT_CAP_USE_WOW;
- }
-
- *len = 6 + 2;
- return;
-#ifdef TODO
-#if(HAL_CODE_BASE == RTL8192 && DEV_BUS_TYPE == USB_INTERFACE)
- /*
- //Emily. If it is required to Ask Realtek AP to send AMPDU during AES mode, enable this
- section of code.
- if(IS_UNDER_11N_AES_MODE(Adapter))
- {
- posRT2RTAgg->Octet[5] |=RT_HT_CAP_USE_AMPDU;
- }else
- {
- posRT2RTAgg->Octet[5] &= 0xfb;
- }
- */
-
-#else
- // Do Nothing
-#endif
-
- posRT2RTAgg->Length = 6;
-#endif
-
-
-
-
-}
-
-
-/********************************************************************************************************************
- *function: Pick the right Rate Adaptive table to use
- * input: struct ieee80211_device* ieee
- * u8* pOperateMCS //A pointer to MCS rate bitmap
- * return: always we return true
- * notice:
- * *****************************************************************************************************************/
-u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
-{
- u8 i;
- if (pOperateMCS == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pOperateMCS can't be null in HT_PickMCSRate()\n");
- return false;
- }
-
- switch(ieee->mode)
- {
- case IEEE_A:
- case IEEE_B:
- case IEEE_G:
- //legacy rate routine handled at selectedrate
-
- //no MCS rate
- for(i=0;i<=15;i++){
- pOperateMCS[i] = 0;
- }
- break;
-
- case IEEE_N_24G: //assume CCK rate ok
- case IEEE_N_5G:
- // Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G.
- // Legacy part shall be handled at SelectRateSet().
-
- //HT part
- // TODO: may be different if we have different number of antenna
- pOperateMCS[0] &=RATE_ADPT_1SS_MASK; //support MCS 0~7
- pOperateMCS[1] &=RATE_ADPT_2SS_MASK;
- pOperateMCS[3] &=RATE_ADPT_MCS32_MASK;
- break;
-
- //should never reach here
- default:
-
- break;
-
- }
-
- return true;
-}
-
-/*
-* Description:
-* This function will get the highest speed rate in input MCS set.
-*
-* /param Adapter Pionter to Adapter entity
-* pMCSRateSet Pointer to MCS rate bitmap
-* pMCSFilter Pointer to MCS rate filter
-*
-* /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter.
-*
-*/
-/********************************************************************************************************************
- *function: This function will get the highest speed rate in input MCS set.
- * input: struct ieee80211_device* ieee
- * u8* pMCSRateSet //Pointer to MCS rate bitmap
- * u8* pMCSFilter //Pointer to MCS rate filter
- * return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter
- * notice:
- * *****************************************************************************************************************/
-u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter)
-{
- u8 i, j;
- u8 bitMap;
- u8 mcsRate = 0;
- u8 availableMcsRate[16];
- if (pMCSRateSet == NULL || pMCSFilter == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pMCSRateSet or pMCSFilter can't be null in HTGetHighestMCSRate()\n");
- return false;
- }
- for(i=0; i<16; i++)
- availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
-
- for(i = 0; i < 16; i++)
- {
- if(availableMcsRate[i] != 0)
- break;
- }
- if(i == 16)
- return false;
-
- for(i = 0; i < 16; i++)
- {
- if(availableMcsRate[i] != 0)
- {
- bitMap = availableMcsRate[i];
- for(j = 0; j < 8; j++)
- {
- if((bitMap%2) != 0)
- {
- if(HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate))
- mcsRate = (8*i+j);
- }
- bitMap = bitMap>>1;
- }
- }
- }
- return (mcsRate|0x80);
-}
-
-
-
-/*
-**
-**1.Filter our operation rate set with AP's rate set
-**2.shall reference channel bandwidth, STBC, Antenna number
-**3.generate rate adative table for firmware
-**David 20060906
-**
-** \pHTSupportedCap: the connected STA's supported rate Capability element
-*/
-u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperateMCS)
-{
-
- u8 i=0;
-
- // filter out operational rate set not supported by AP, the lenth of it is 16
- for(i=0;i<=15;i++){
- pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i]&pSupportMCS[i];
- }
-
-
- // TODO: adjust our operational rate set according to our channel bandwidth, STBC and Antenna number
-
- // TODO: fill suggested rate adaptive rate index and give firmware info using Tx command packet
- // we also shall suggested the first start rate set according to our singal strength
- HT_PickMCSRate(ieee, pOperateMCS);
-
- // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee))
- pOperateMCS[1] = 0;
-
- //
- // For RTL819X, we support only MCS0~15.
- // And also, we do not know how to use MCS32 now.
- //
- for(i=2; i<=15; i++)
- pOperateMCS[i] = 0;
-
- return true;
-}
-void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-void HTOnAssocRsp(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- PHT_CAPABILITY_ELE pPeerHTCap = NULL;
- PHT_INFORMATION_ELE pPeerHTInfo = NULL;
- u16 nMaxAMSDUSize = 0;
- u8* pMcsFilter = NULL;
-
- static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
- static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
-
- if( pHTInfo->bCurrentHTSupport == false )
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
- return;
- }
- IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n");
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTCapBuf, sizeof(HT_CAPABILITY_ELE));
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTInfoBuf, sizeof(HT_INFORMATION_ELE));
-
-// HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq");
-// HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq");
- //
- if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap)))
- pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]);
- else
- pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf);
-
- if(!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
- pPeerHTInfo = (PHT_INFORMATION_ELE)(&pHTInfo->PeerHTInfoBuf[4]);
- else
- pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf);
-
-
- ////////////////////////////////////////////////////////
- // Configurations:
- ////////////////////////////////////////////////////////
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE));
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTInfo, sizeof(HT_INFORMATION_ELE));
- // Config Supported Channel Width setting
- //
- HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
-
-// if(pHTInfo->bCurBW40MHz == true)
- pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false);
-
- //
- // Update short GI/ long GI setting
- //
- // TODO:
- pHTInfo->bCurShortGI20MHz=
- ((pHTInfo->bRegShortGI20MHz)?((pPeerHTCap->ShortGI20Mhz==1)?true:false):false);
- pHTInfo->bCurShortGI40MHz=
- ((pHTInfo->bRegShortGI40MHz)?((pPeerHTCap->ShortGI40Mhz==1)?true:false):false);
-
- //
- // Config TX STBC setting
- //
- // TODO:
-
- //
- // Config DSSS/CCK mode in 40MHz mode
- //
- // TODO:
- pHTInfo->bCurSuppCCK =
- ((pHTInfo->bRegSuppCCK)?((pPeerHTCap->DssCCk==1)?true:false):false);
-
-
- //
- // Config and configure A-MSDU setting
- //
- pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
- if (ieee->rtllib_ap_sec_type &&
- (ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP))){
- if( (pHTInfo->IOTPeer== HT_IOT_PEER_ATHEROS) ||
- (pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN) )
- pHTInfo->bCurrentAMPDUEnable = false;
- }
-
-
- nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize==0)?3839:7935;
-
- if(pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize )
- pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
- else
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
-
- //
- // Config A-MPDU setting
- //
- pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
-
- // <1> Decide AMPDU Factor
-
- // By Emily
- if(!pHTInfo->bRegRT2RTAggregation)
- {
- // Decide AMPDU Factor according to protocol handshake
- if(pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
- }else
- {
- // Set MPDU density to 2 to Realtek AP, and set it to 0 for others
- // Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
-#if 0
- osTmp= PacketGetElement( asocpdu, EID_Vendor, OUI_SUB_REALTEK_AGG, OUI_SUBTYPE_DONT_CARE);
- if(osTmp.Length >= 5) //00:e0:4c:02:00
-#endif
- if (ieee->current_network.bssht.bdRT2RTAggregation)
- {
- if( ieee->pairwise_key_type != KEY_TYPE_NA)
- // Realtek may set 32k in security mode and 64k for others
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
- }else
- {
- if(pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
- }
- }
-
- // <2> Set AMPDU Minimum MPDU Start Spacing
- // 802.11n 3.0 section 9.7d.3
-#if 1
- if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
- else
- pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
- if(ieee->pairwise_key_type != KEY_TYPE_NA )
- pHTInfo->CurrentMPDUDensity = 7; // 8us
-#else
- if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
- else
- pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
-#endif
- // Force TX AMSDU
-
- // Lanhsin: mark for tmp to avoid deauth by ap from s3
- //if(memcmp(pMgntInfo->Bssid, NETGEAR834Bv2_BROADCOM, 3)==0)
- if(0)
- {
-
- pHTInfo->bCurrentAMPDUEnable = false;
- pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
- pHTInfo->ForcedAMSDUMaxSize = 7935;
-
- pHTInfo->IOTAction |= HT_IOT_ACT_TX_USE_AMSDU_8K;
- }
-
- // Rx Reorder Setting
- pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
-
- //
- // Filter out unsupported HT rate for this AP
- // Update RATR table
- // This is only for 8190 ,8192 or later product which using firmware to handle rate adaptive mechanism.
- //
-
- // Handle Ralink AP bad MCS rate set condition. Joseph.
- // This fix the bug of Ralink AP. This may be removed in the future.
- if(pPeerHTCap->MCS[0] == 0)
- pPeerHTCap->MCS[0] = 0xff;
-
- HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
-
- //
- // Config MIMO Power Save setting
- //
- pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
- if(pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
- pMcsFilter = MCS_FILTER_1SS;
- else
- pMcsFilter = MCS_FILTER_ALL;
- //WB add for MCS8 bug
-// pMcsFilter = MCS_FILTER_1SS;
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter);
- ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
-
- //
- // Config current operation mode.
- //
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
-
-
-
-}
-
-void HTSetConnectBwModeCallback(struct ieee80211_device* ieee);
-/********************************************************************************************************************
- *function: initialize HT info(struct PRT_HIGH_THROUGHPUT)
- * input: struct ieee80211_device* ieee
- * output: none
- * return: none
- * notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP
-********************************************************************************************************************/
-// TODO: Should this funciton be called when receiving of Disassociation?
-void HTInitializeHTInfo(struct ieee80211_device* ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- //
- // These parameters will be reset when receiving deauthentication packet
- //
- IEEE80211_DEBUG(IEEE80211_DL_HT, "===========>%s()\n", __FUNCTION__);
- pHTInfo->bCurrentHTSupport = false;
-
- // 40MHz channel support
- pHTInfo->bCurBW40MHz = false;
- pHTInfo->bCurTxBW40MHz = false;
-
- // Short GI support
- pHTInfo->bCurShortGI20MHz = false;
- pHTInfo->bCurShortGI40MHz = false;
- pHTInfo->bForcedShortGI = false;
-
- // CCK rate support
- // This flag is set to true to support CCK rate by default.
- // It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities only when associate to
- // 11N BSS.
- pHTInfo->bCurSuppCCK = true;
-
- // AMSDU related
- pHTInfo->bCurrent_AMSDU_Support = false;
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
- // AMPUD related
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
-
-
- // Initialize all of the parameters related to 11n
- memset((void*)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap));
- memset((void*)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo));
- memset((void*)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf));
- memset((void*)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf));
-
- pHTInfo->bSwBwInProgress = false;
- pHTInfo->ChnlOp = CHNLOP_NONE;
-
- // Set default IEEE spec for Draft N
- pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
-
- // Realtek proprietary aggregation mode
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
- pHTInfo->IOTPeer = 0;
- pHTInfo->IOTAction = 0;
-
- //MCS rate initialized here
- {
- u8* RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
- RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7
- RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15
- RegHTSuppRateSets[4] = 0x01; //support MCS 32
- }
-}
-/********************************************************************************************************************
- *function: initialize Bss HT structure(struct PBSS_HT)
- * input: PBSS_HT pBssHT //to be initialized
- * output: none
- * return: none
- * notice: This function is called when initialize network structure
-********************************************************************************************************************/
-void HTInitializeBssDesc(PBSS_HT pBssHT)
-{
-
- pBssHT->bdSupportHT = false;
- memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf));
- pBssHT->bdHTCapLen = 0;
- memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf));
- pBssHT->bdHTInfoLen = 0;
-
- pBssHT->bdHTSpecVer= HT_SPEC_VER_IEEE;
-
- pBssHT->bdRT2RTAggregation = false;
- pBssHT->bdRT2RTLongSlotTime = false;
-}
-#if 0
-//below function has merged into ieee80211_network_init() in ieee80211_rx.c
-void
-HTParsingHTCapElement(
- IN PADAPTER Adapter,
- IN OCTET_STRING HTCapIE,
- OUT PRT_WLAN_BSS pBssDesc
-)
-{
- PMGNT_INFO pMgntInfo = &Adapter->MgntInfo;
-
- if( HTCapIE.Length > sizeof(pBssDesc->BssHT.bdHTCapBuf) )
- {
- RT_TRACE( COMP_HT, DBG_LOUD, ("HTParsingHTCapElement(): HT Capability Element length is too long!\n") );
- return;
- }
-
- // TODO: Check the correctness of HT Cap
- //Print each field in detail. Driver should not print out this message by default
- if(!pMgntInfo->mActingAsAp && !pMgntInfo->mAssoc)
- HTDebugHTCapability(DBG_TRACE, Adapter, &HTCapIE, (pu8)"HTParsingHTCapElement()");
-
- HTCapIE.Length = HTCapIE.Length > sizeof(pBssDesc->BssHT.bdHTCapBuf)?\
- sizeof(pBssDesc->BssHT.bdHTCapBuf):HTCapIE.Length; //prevent from overflow
-
- CopyMem(pBssDesc->BssHT.bdHTCapBuf, HTCapIE.Octet, HTCapIE.Length);
- pBssDesc->BssHT.bdHTCapLen = HTCapIE.Length;
-
-}
-
-
-void
-HTParsingHTInfoElement(
- PADAPTER Adapter,
- OCTET_STRING HTInfoIE,
- PRT_WLAN_BSS pBssDesc
-)
-{
- PMGNT_INFO pMgntInfo = &Adapter->MgntInfo;
-
- if( HTInfoIE.Length > sizeof(pBssDesc->BssHT.bdHTInfoBuf))
- {
- RT_TRACE( COMP_HT, DBG_LOUD, ("HTParsingHTInfoElement(): HT Information Element length is too long!\n") );
- return;
- }
-
- // TODO: Check the correctness of HT Info
- //Print each field in detail. Driver should not print out this message by default
- if(!pMgntInfo->mActingAsAp && !pMgntInfo->mAssoc)
- HTDebugHTInfo(DBG_TRACE, Adapter, &HTInfoIE, (pu8)"HTParsingHTInfoElement()");
-
- HTInfoIE.Length = HTInfoIE.Length > sizeof(pBssDesc->BssHT.bdHTInfoBuf)?\
- sizeof(pBssDesc->BssHT.bdHTInfoBuf):HTInfoIE.Length; //prevent from overflow
-
- CopyMem( pBssDesc->BssHT.bdHTInfoBuf, HTInfoIE.Octet, HTInfoIE.Length);
- pBssDesc->BssHT.bdHTInfoLen = HTInfoIE.Length;
-}
-
-/*
- * Get HT related information from beacon and save it in BssDesc
- *
- * (1) Parse HTCap, and HTInfo, and record whether it is 11n AP
- * (2) If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
- * (3) Check whether peer is Realtek AP (for Realtek proprietary aggregation mode).
- * Input:
- * PADAPTER Adapter
- *
- * Output:
- * PRT_TCB BssDesc
- *
-*/
-void HTGetValueFromBeaconOrProbeRsp(
- PADAPTER Adapter,
- POCTET_STRING pSRCmmpdu,
- PRT_WLAN_BSS bssDesc
-)
-{
- PMGNT_INFO pMgntInfo = &Adapter->MgntInfo;
- PRT_HIGH_THROUGHPUT pHTInfo = GET_HT_INFO(pMgntInfo);
- OCTET_STRING HTCapIE, HTInfoIE, HTRealtekAgg, mmpdu;
- OCTET_STRING BroadcomElement, CiscoElement;
-
- mmpdu.Octet = pSRCmmpdu->Octet;
- mmpdu.Length = pSRCmmpdu->Length;
-
- //2Note:
- // Mark for IOT testing using Linksys WRT350N, This AP does not contain WMM IE when
- // it is configured at pure-N mode.
- // if(bssDesc->BssQos.bdQoSMode & QOS_WMM)
- //
-
- HTInitializeBssDesc (&bssDesc->BssHT);
-
- //2<1> Parse HTCap, and HTInfo
- // Get HT Capability IE: (1) Get IEEE Draft N IE or (2) Get EWC IE
- HTCapIE = PacketGetElement(mmpdu, EID_HTCapability, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE);
- if(HTCapIE.Length == 0)
- {
- HTCapIE = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_11N_EWC_HT_CAP, OUI_SUBTYPE_DONT_CARE);
- if(HTCapIE.Length != 0)
- bssDesc->BssHT.bdHTSpecVer= HT_SPEC_VER_EWC;
- }
- if(HTCapIE.Length != 0)
- HTParsingHTCapElement(Adapter, HTCapIE, bssDesc);
-
- // Get HT Information IE: (1) Get IEEE Draft N IE or (2) Get EWC IE
- HTInfoIE = PacketGetElement(mmpdu, EID_HTInfo, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE);
- if(HTInfoIE.Length == 0)
- {
- HTInfoIE = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_11N_EWC_HT_INFO, OUI_SUBTYPE_DONT_CARE);
- if(HTInfoIE.Length != 0)
- bssDesc->BssHT.bdHTSpecVer = HT_SPEC_VER_EWC;
- }
- if(HTInfoIE.Length != 0)
- HTParsingHTInfoElement(Adapter, HTInfoIE, bssDesc);
-
- //2<2>If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
- if(HTCapIE.Length != 0)
- {
- bssDesc->BssHT.bdSupportHT = true;
- if(bssDesc->BssQos.bdQoSMode == QOS_DISABLE)
- QosSetLegacyWMMParamWithHT(Adapter, bssDesc);
- }
- else
- {
- bssDesc->BssHT.bdSupportHT = false;
- }
-
- //2<3>Check whether the peer is Realtek AP/STA
- if(pHTInfo->bRegRT2RTAggregation)
- {
- if(bssDesc->BssHT.bdSupportHT)
- {
- HTRealtekAgg = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_REALTEK_AGG, OUI_SUBTYPE_DONT_CARE);
- if(HTRealtekAgg.Length >=5 )
- {
- bssDesc->BssHT.bdRT2RTAggregation = true;
-
- if((HTRealtekAgg.Octet[4]==1) && (HTRealtekAgg.Octet[5] & 0x02))
- bssDesc->BssHT.bdRT2RTLongSlotTime = true;
- }
- }
- }
-
- //
- // 2008/01/25 MH Get Broadcom AP IE for manamgent frame CCK rate problem.
- // AP can not receive CCK managemtn from from 92E.
- //
-
- // Initialize every new bss broadcom cap exist as false..
- bssDesc->bBroadcomCapExist= false;
-
- if(HTCapIE.Length != 0 || HTInfoIE.Length != 0)
- {
- u4Byte Length = 0;
-
- FillOctetString(BroadcomElement, NULL, 0);
-
- BroadcomElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_BROADCOM_IE_1, OUI_SUBTYPE_DONT_CARE);
- Length += BroadcomElement.Length;
- BroadcomElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_BROADCOM_IE_2, OUI_SUBTYPE_DONT_CARE);
- Length += BroadcomElement.Length;
- BroadcomElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_BROADCOM_IE_3, OUI_SUBTYPE_DONT_CARE);
- Length += BroadcomElement.Length;
-
- if(Length > 0)
- bssDesc->bBroadcomCapExist = true;
- }
-
-
- // For Cisco IOT issue
- CiscoElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_CISCO_IE, OUI_SUBTYPE_DONT_CARE);
- if(CiscoElement.Length != 0){ // 3: 0x00, 0x40, 0x96 ....
- bssDesc->bCiscoCapExist = true;
- }else{
- bssDesc->bCiscoCapExist = false;
- }
-}
-
-
-#endif
-/********************************************************************************************************************
- *function: initialize Bss HT structure(struct PBSS_HT)
- * input: struct ieee80211_device *ieee
- * struct ieee80211_network *pNetwork //usually current network we are live in
- * output: none
- * return: none
- * notice: This function should ONLY be called before association
-********************************************************************************************************************/
-void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u16 nMaxAMSDUSize;
-// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
-// PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
-// u8* pMcsFilter;
- u8 bIOTAction = 0;
-
- //
- // Save Peer Setting before Association
- //
- IEEE80211_DEBUG(IEEE80211_DL_HT, "==============>%s()\n", __FUNCTION__);
- /*unmark bEnableHT flag here is the same reason why unmarked in function ieee80211_softmac_new_net. WB 2008.09.10*/
-// if( pHTInfo->bEnableHT && pNetwork->bssht.bdSupportHT)
- if (pNetwork->bssht.bdSupportHT)
- {
- pHTInfo->bCurrentHTSupport = true;
- pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
-
- // Save HTCap and HTInfo information Element
- if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
- memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen);
-
- if(pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf))
- memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen);
-
- // Check whether RT to RT aggregation mode is enabled
- if(pHTInfo->bRegRT2RTAggregation)
- {
- pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation;
- pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime;
- }
- else
- {
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
- }
-
- // Determine the IOT Peer Vendor.
- HTIOTPeerDetermine(ieee);
-
- // Decide IOT Action
- // Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided
- pHTInfo->IOTAction = 0;
- bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
- bIOTAction = HTIOTActIsForcedCTS2Self(ieee, pNetwork);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
-
- bIOTAction = HTIOTActIsDisableMCS15(ieee);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
-
- bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee, pNetwork->bssid);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
-
-
- bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
-
- bIOTAction = HTIOTActIsMgntUseCCK6M(pNetwork);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
-
- bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
-
- bIOTAction = HTIOTActIsNullDataPowerSaving(ieee, pNetwork);
- if(bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_NULL_DATA_POWER_SAVING;
-
- }
- else
- {
- pHTInfo->bCurrentHTSupport = false;
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
-
- pHTInfo->IOTAction = 0;
- }
-
-}
-
-void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
- PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
-
- if(pHTInfo->bCurrentHTSupport)
- {
- //
- // Config current operation mode.
- //
- if(pNetwork->bssht.bdHTInfoLen != 0)
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
-
- //
- // <TODO: Config according to OBSS non-HT STA present!!>
- //
- }
-}
-
-void HTUseDefaultSetting(struct ieee80211_device* ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u8 regBwOpMode;
-
- if(pHTInfo->bEnableHT)
- {
- pHTInfo->bCurrentHTSupport = true;
-
- pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK;
-
- pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz;
-
- pHTInfo->bCurShortGI20MHz= pHTInfo->bRegShortGI20MHz;
-
- pHTInfo->bCurShortGI40MHz= pHTInfo->bRegShortGI40MHz;
-
- pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
-
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
- pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
-
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
- pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity;
-
- // Set BWOpMode register
-
- //update RATR index0
- HTFilterMCSRate(ieee, ieee->Regdot11HTOperationalRateSet, ieee->dot11HTOperationalRateSet);
- //function below is not implemented at all. WB
-#ifdef TODO
- Adapter->HalFunc.InitHalRATRTableHandler( Adapter, &pMgntInfo->dot11OperationalRateSet, pMgntInfo->dot11HTOperationalRateSet);
-#endif
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL);
- ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
-
- }
- else
- {
- pHTInfo->bCurrentHTSupport = false;
- }
-}
-/********************************************************************************************************************
- *function: check whether HT control field exists
- * input: struct ieee80211_device *ieee
- * u8* pFrame //coming skb->data
- * output: none
- * return: return true if HT control field exists(false otherwise)
- * notice:
-********************************************************************************************************************/
-u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame)
-{
- if(ieee->pHTInfo->bCurrentHTSupport)
- {
- if( (IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1)
- {
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n");
- return true;
- }
- }
- return false;
-}
-
-//
-// This function set bandwidth mode in protocol layer.
-//
-void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u32 flags = 0;
-
- if(pHTInfo->bRegBW40MHz == false)
- return;
-
-
-
- // To reduce dummy operation
-// if((pHTInfo->bCurBW40MHz==false && Bandwidth==HT_CHANNEL_WIDTH_20) ||
-// (pHTInfo->bCurBW40MHz==true && Bandwidth==HT_CHANNEL_WIDTH_20_40 && Offset==pHTInfo->CurSTAExtChnlOffset))
-// return;
-
-// spin_lock_irqsave(&(ieee->bw_spinlock), flags);
- if(pHTInfo->bSwBwInProgress) {
-// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
- return;
- }
- //if in half N mode, set to 20M bandwidth please 09.08.2008 WB.
- if (Bandwidth==HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee)))
- {
- // Handle Illegal extension channel offset!!
- if(ieee->current_network.channel<2 && Offset==HT_EXTCHNL_OFFSET_LOWER)
- Offset = HT_EXTCHNL_OFFSET_NO_EXT;
- if(Offset==HT_EXTCHNL_OFFSET_UPPER || Offset==HT_EXTCHNL_OFFSET_LOWER) {
- pHTInfo->bCurBW40MHz = true;
- pHTInfo->CurSTAExtChnlOffset = Offset;
- } else {
- pHTInfo->bCurBW40MHz = false;
- pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
- }
- } else {
- pHTInfo->bCurBW40MHz = false;
- pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
- }
-
- pHTInfo->bSwBwInProgress = true;
-
- // TODO: 2007.7.13 by Emily Wait 2000ms in order to guarantee that switching
- // bandwidth is executed after scan is finished. It is a temporal solution
- // because software should ganrantee the last operation of switching bandwidth
- // is executed properlly.
- HTSetConnectBwModeCallback(ieee);
-
-// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
-}
-
-void HTSetConnectBwModeCallback(struct ieee80211_device* ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "======>%s()\n", __FUNCTION__);
-
- if(pHTInfo->bCurBW40MHz)
- {
- if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_UPPER)
- ieee->set_chan(ieee, ieee->current_network.channel+2);
- else if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_LOWER)
- ieee->set_chan(ieee, ieee->current_network.channel-2);
- else
- ieee->set_chan(ieee, ieee->current_network.channel);
-
- ieee->SetBWModeHandler(ieee, HT_CHANNEL_WIDTH_20_40, pHTInfo->CurSTAExtChnlOffset);
- } else {
- ieee->set_chan(ieee, ieee->current_network.channel);
- ieee->SetBWModeHandler(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- }
-
- pHTInfo->bSwBwInProgress = false;
-}
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h
deleted file mode 100644
index d4565ecc7ab..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h
+++ /dev/null
@@ -1,582 +0,0 @@
-#ifndef __INC_QOS_TYPE_H
-#define __INC_QOS_TYPE_H
-
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-#define MAX_WMMELE_LENGTH 64
-
-//
-// QoS mode.
-// enum 0, 1, 2, 4: since we can use the OR(|) operation.
-//
-// QOS_MODE is redefined for enum can't be ++, | under C++ compiler, 2006.05.17, by rcnjko.
-//typedef enum _QOS_MODE{
-// QOS_DISABLE = 0,
-// QOS_WMM = 1,
-// QOS_EDCA = 2,
-// QOS_HCCA = 4,
-//}QOS_MODE,*PQOS_MODE;
-//
-typedef u32 QOS_MODE, *PQOS_MODE;
-#define QOS_DISABLE 0
-#define QOS_WMM 1
-#define QOS_WMMSA 2
-#define QOS_EDCA 4
-#define QOS_HCCA 8
-#define QOS_WMM_UAPSD 16 //WMM Power Save, 2006-06-14 Isaiah
-
-#define AC_PARAM_SIZE 4
-#define WMM_PARAM_ELE_BODY_LEN 18
-
-//
-// QoS ACK Policy Field Values
-// Ref: WMM spec 2.1.6: QoS Control Field, p.10.
-//
-typedef enum _ACK_POLICY{
- eAckPlc0_ACK = 0x00,
- eAckPlc1_NoACK = 0x01,
-}ACK_POLICY,*PACK_POLICY;
-
-#define WMM_PARAM_ELEMENT_SIZE (8+(4*AC_PARAM_SIZE))
-
-//
-// QoS Control Field
-// Ref:
-// 1. WMM spec 2.1.6: QoS Control Field, p.9.
-// 2. 802.11e/D13.0 7.1.3.5, p.26.
-//
-typedef union _QOS_CTRL_FIELD{
- u8 charData[2];
- u16 shortData;
-
- // WMM spec
- struct
- {
- u8 UP:3;
- u8 usRsvd1:1;
- u8 EOSP:1;
- u8 AckPolicy:2;
- u8 usRsvd2:1;
- u8 ucRsvdByte;
- }WMM;
-
- // 802.11e: QoS data type frame sent by non-AP QSTAs.
- struct
- {
- u8 TID:4;
- u8 bIsQsize:1;// 0: BIT[8:15] is TXOP Duration Requested, 1: BIT[8:15] is Queue Size.
- u8 AckPolicy:2;
- u8 usRsvd:1;
- u8 TxopOrQsize; // (BIT4=0)TXOP Duration Requested or (BIT4=1)Queue Size.
- }BySta;
-
- // 802.11e: QoS data, QoS Null, and QoS Data+CF-Ack frames sent by HC.
- struct
- {
- u8 TID:4;
- u8 EOSP:1;
- u8 AckPolicy:2;
- u8 usRsvd:1;
- u8 PSBufState; // QAP PS Buffer State.
- }ByHc_Data;
-
- // 802.11e: QoS (+) CF-Poll frames sent by HC.
- struct
- {
- u8 TID:4;
- u8 EOSP:1;
- u8 AckPolicy:2;
- u8 usRsvd:1;
- u8 TxopLimit; // TXOP Limit.
- }ByHc_CFP;
-
-}QOS_CTRL_FIELD, *PQOS_CTRL_FIELD;
-
-
-//
-// QoS Info Field
-// Ref:
-// 1. WMM spec 2.2.1: WME Information Element, p.11.
-// 2. 8185 QoS code: QOS_INFO [def. in QoS_mp.h]
-//
-typedef union _QOS_INFO_FIELD{
- u8 charData;
-
- struct
- {
- u8 ucParameterSetCount:4;
- u8 ucReserved:4;
- }WMM;
-
- struct
- {
- //Ref WMM_Specification_1-1.pdf, 2006-06-13 Isaiah
- u8 ucAC_VO_UAPSD:1;
- u8 ucAC_VI_UAPSD:1;
- u8 ucAC_BE_UAPSD:1;
- u8 ucAC_BK_UAPSD:1;
- u8 ucReserved1:1;
- u8 ucMaxSPLen:2;
- u8 ucReserved2:1;
-
- }ByWmmPsSta;
-
- struct
- {
- //Ref WMM_Specification_1-1.pdf, 2006-06-13 Isaiah
- u8 ucParameterSetCount:4;
- u8 ucReserved:3;
- u8 ucApUapsd:1;
- }ByWmmPsAp;
-
- struct
- {
- u8 ucAC3_UAPSD:1;
- u8 ucAC2_UAPSD:1;
- u8 ucAC1_UAPSD:1;
- u8 ucAC0_UAPSD:1;
- u8 ucQAck:1;
- u8 ucMaxSPLen:2;
- u8 ucMoreDataAck:1;
- } By11eSta;
-
- struct
- {
- u8 ucParameterSetCount:4;
- u8 ucQAck:1;
- u8 ucQueueReq:1;
- u8 ucTXOPReq:1;
- u8 ucReserved:1;
- } By11eAp;
-
- struct
- {
- u8 ucReserved1:4;
- u8 ucQAck:1;
- u8 ucReserved2:2;
- u8 ucMoreDataAck:1;
- } ByWmmsaSta;
-
- struct
- {
- u8 ucReserved1:4;
- u8 ucQAck:1;
- u8 ucQueueReq:1;
- u8 ucTXOPReq:1;
- u8 ucReserved2:1;
- } ByWmmsaAp;
-
- struct
- {
- u8 ucAC3_UAPSD:1;
- u8 ucAC2_UAPSD:1;
- u8 ucAC1_UAPSD:1;
- u8 ucAC0_UAPSD:1;
- u8 ucQAck:1;
- u8 ucMaxSPLen:2;
- u8 ucMoreDataAck:1;
- } ByAllSta;
-
- struct
- {
- u8 ucParameterSetCount:4;
- u8 ucQAck:1;
- u8 ucQueueReq:1;
- u8 ucTXOPReq:1;
- u8 ucApUapsd:1;
- } ByAllAp;
-
-}QOS_INFO_FIELD, *PQOS_INFO_FIELD;
-
-//
-// ACI to AC coding.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-// AC_CODING is redefined for enum can't be ++, | under C++ compiler, 2006.05.17, by rcnjko.
-//typedef enum _AC_CODING{
-// AC0_BE = 0, // ACI: 0x00 // Best Effort
-// AC1_BK = 1, // ACI: 0x01 // Background
-// AC2_VI = 2, // ACI: 0x10 // Video
-// AC3_VO = 3, // ACI: 0x11 // Voice
-// AC_MAX = 4, // Max: define total number; Should not to be used as a real enum.
-//}AC_CODING,*PAC_CODING;
-//
-typedef u32 AC_CODING;
-#define AC0_BE 0 // ACI: 0x00 // Best Effort
-#define AC1_BK 1 // ACI: 0x01 // Background
-#define AC2_VI 2 // ACI: 0x10 // Video
-#define AC3_VO 3 // ACI: 0x11 // Voice
-#define AC_MAX 4 // Max: define total number; Should not to be used as a real enum.
-
-//
-// ACI/AIFSN Field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _ACI_AIFSN{
- u8 charData;
-
- struct
- {
- u8 AIFSN:4;
- u8 ACM:1;
- u8 ACI:2;
- u8 Reserved:1;
- }f; // Field
-}ACI_AIFSN, *PACI_AIFSN;
-
-//
-// ECWmin/ECWmax field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-typedef union _ECW{
- u8 charData;
- struct
- {
- u8 ECWmin:4;
- u8 ECWmax:4;
- }f; // Field
-}ECW, *PECW;
-
-//
-// AC Parameters Record Format.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _AC_PARAM{
- u32 longData;
- u8 charData[4];
-
- struct
- {
- ACI_AIFSN AciAifsn;
- ECW Ecw;
- u16 TXOPLimit;
- }f; // Field
-}AC_PARAM, *PAC_PARAM;
-
-
-
-//
-// QoS element subtype
-//
-typedef enum _QOS_ELE_SUBTYPE{
- QOSELE_TYPE_INFO = 0x00, // 0x00: Information element
- QOSELE_TYPE_PARAM = 0x01, // 0x01: parameter element
-}QOS_ELE_SUBTYPE,*PQOS_ELE_SUBTYPE;
-
-
-//
-// Direction Field Values.
-// Ref: WMM spec 2.2.11: WME TSPEC Element, p.18.
-//
-typedef enum _DIRECTION_VALUE{
- DIR_UP = 0, // 0x00 // UpLink
- DIR_DOWN = 1, // 0x01 // DownLink
- DIR_DIRECT = 2, // 0x10 // DirectLink
- DIR_BI_DIR = 3, // 0x11 // Bi-Direction
-}DIRECTION_VALUE,*PDIRECTION_VALUE;
-
-
-//
-// TS Info field in WMM TSPEC Element.
-// Ref:
-// 1. WMM spec 2.2.11: WME TSPEC Element, p.18.
-// 2. 8185 QoS code: QOS_TSINFO [def. in QoS_mp.h]
-//
-typedef union _QOS_TSINFO{
- u8 charData[3];
- struct {
- u8 ucTrafficType:1; //WMM is reserved
- u8 ucTSID:4;
- u8 ucDirection:2;
- u8 ucAccessPolicy:2; //WMM: bit8=0, bit7=1
- u8 ucAggregation:1; //WMM is reserved
- u8 ucPSB:1; //WMMSA is APSD
- u8 ucUP:3;
- u8 ucTSInfoAckPolicy:2; //WMM is reserved
- u8 ucSchedule:1; //WMM is reserved
- u8 ucReserved:7;
- }field;
-}QOS_TSINFO, *PQOS_TSINFO;
-
-//
-// WMM TSPEC Body.
-// Ref: WMM spec 2.2.11: WME TSPEC Element, p.16.
-//
-typedef union _TSPEC_BODY{
- u8 charData[55];
-
- struct
- {
- QOS_TSINFO TSInfo; //u8 TSInfo[3];
- u16 NominalMSDUsize;
- u16 MaxMSDUsize;
- u32 MinServiceItv;
- u32 MaxServiceItv;
- u32 InactivityItv;
- u32 SuspenItv;
- u32 ServiceStartTime;
- u32 MinDataRate;
- u32 MeanDataRate;
- u32 PeakDataRate;
- u32 MaxBurstSize;
- u32 DelayBound;
- u32 MinPhyRate;
- u16 SurplusBandwidthAllowance;
- u16 MediumTime;
- } f; // Field
-}TSPEC_BODY, *PTSPEC_BODY;
-
-
-//
-// WMM TSPEC Element.
-// Ref: WMM spec 2.2.11: WME TSPEC Element, p.16.
-//
-typedef struct _WMM_TSPEC{
- u8 ID;
- u8 Length;
- u8 OUI[3];
- u8 OUI_Type;
- u8 OUI_SubType;
- u8 Version;
- TSPEC_BODY Body;
-} WMM_TSPEC, *PWMM_TSPEC;
-
-//
-// ACM implementation method.
-// Annie, 2005-12-13.
-//
-typedef enum _ACM_METHOD{
- eAcmWay0_SwAndHw = 0, // By SW and HW.
- eAcmWay1_HW = 1, // By HW.
- eAcmWay2_SW = 2, // By SW.
-}ACM_METHOD,*PACM_METHOD;
-
-
-typedef struct _ACM{
-// u8 RegEnableACM;
- u64 UsedTime;
- u64 MediumTime;
- u8 HwAcmCtl; // TRUE: UsedTime exceed => Do NOT USE this AC. It wll be written to ACM_CONTROL(0xBF BIT 0/1/2 in 8185B).
-}ACM, *PACM;
-
-typedef u8 AC_UAPSD, *PAC_UAPSD;
-
-#define GET_VO_UAPSD(_apsd) ((_apsd) & BIT0)
-#define SET_VO_UAPSD(_apsd) ((_apsd) |= BIT0)
-
-#define GET_VI_UAPSD(_apsd) ((_apsd) & BIT1)
-#define SET_VI_UAPSD(_apsd) ((_apsd) |= BIT1)
-
-#define GET_BK_UAPSD(_apsd) ((_apsd) & BIT2)
-#define SET_BK_UAPSD(_apsd) ((_apsd) |= BIT2)
-
-#define GET_BE_UAPSD(_apsd) ((_apsd) & BIT3)
-#define SET_BE_UAPSD(_apsd) ((_apsd) |= BIT3)
-
-
-//typedef struct _TCLASS{
-// TODO
-//} TCLASS, *PTCLASS;
-typedef union _QOS_TCLAS{
-
- struct _TYPE_GENERAL{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- } TYPE_GENERAL;
-
- struct _TYPE0_ETH{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 SrcAddr[6];
- u8 DstAddr[6];
- u16 Type;
- } TYPE0_ETH;
-
- struct _TYPE1_IPV4{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
- u8 SrcIP[4];
- u8 DstIP[4];
- u16 SrcPort;
- u16 DstPort;
- u8 DSCP;
- u8 Protocol;
- u8 Reserved;
- } TYPE1_IPV4;
-
- struct _TYPE1_IPV6{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
- u8 SrcIP[16];
- u8 DstIP[16];
- u16 SrcPort;
- u16 DstPort;
- u8 FlowLabel[3];
- } TYPE1_IPV6;
-
- struct _TYPE2_8021Q{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u16 TagType;
- } TYPE2_8021Q;
-} QOS_TCLAS, *PQOS_TCLAS;
-
-//typedef struct _WMM_TSTREAM{
-//
-//- TSPEC
-//- AC (which to mapping)
-//} WMM_TSTREAM, *PWMM_TSTREAM;
-typedef struct _QOS_TSTREAM{
- u8 AC;
- WMM_TSPEC TSpec;
- QOS_TCLAS TClass;
-} QOS_TSTREAM, *PQOS_TSTREAM;
-
-//typedef struct _U_APSD{
-//- TriggerEnable [4]
-//- MaxSPLength
-//- HighestAcBuffered
-//} U_APSD, *PU_APSD;
-
-//joseph TODO:
-// UAPSD function should be implemented by 2 data structure
-// "Qos control field" and "Qos info field"
-//typedef struct _QOS_UAPSD{
-// u8 bTriggerEnable[4];
-// u8 MaxSPLength;
-// u8 HighestBufAC;
-//} QOS_UAPSD, *PQOS_APSD;
-
-//----------------------------------------------------------------------------
-// 802.11 Management frame Status Code field
-//----------------------------------------------------------------------------
-typedef struct _OCTET_STRING{
- u8 *Octet;
- u16 Length;
-}OCTET_STRING, *POCTET_STRING;
-
-//
-// STA QoS data.
-// Ref: DOT11_QOS in 8185 code. [def. in QoS_mp.h]
-//
-typedef struct _STA_QOS{
- //DECLARE_RT_OBJECT(STA_QOS);
- u8 WMMIEBuf[MAX_WMMELE_LENGTH];
- u8* WMMIE;
-
- // Part 1. Self QoS Mode.
- QOS_MODE QosCapability; //QoS Capability, 2006-06-14 Isaiah
- QOS_MODE CurrentQosMode;
-
- // For WMM Power Save Mode :
- // ACs are trigger/delivery enabled or legacy power save enabled. 2006-06-13 Isaiah
- AC_UAPSD b4ac_Uapsd; //VoUapsd(bit0), ViUapsd(bit1), BkUapsd(bit2), BeUapsd(bit3),
- AC_UAPSD Curr4acUapsd;
- u8 bInServicePeriod;
- u8 MaxSPLength;
- int NumBcnBeforeTrigger;
-
- // Part 2. EDCA Parameter (perAC)
- u8 * pWMMInfoEle;
- u8 WMMParamEle[WMM_PARAM_ELEMENT_SIZE];
- u8 WMMPELength;
-
- // <Bruce_Note>
- //2 ToDo: remove the Qos Info Field and replace it by the above WMM Info element.
- // By Bruce, 2008-01-30.
- // Part 2. EDCA Parameter (perAC)
- QOS_INFO_FIELD QosInfoField_STA; // Maintained by STA
- QOS_INFO_FIELD QosInfoField_AP; // Retrieved from AP
-
- AC_PARAM CurAcParameters[4];
-
- // Part 3. ACM
- ACM acm[4];
- ACM_METHOD AcmMethod;
-
- // Part 4. Per TID (Part 5: TCLASS will be described by TStream)
- QOS_TSTREAM TStream[16];
- WMM_TSPEC TSpec;
-
- u32 QBssWirelessMode;
-
- // No Ack Setting
- u8 bNoAck;
-
- // Enable/Disable Rx immediate BA capability.
- u8 bEnableRxImmBA;
-
-}STA_QOS, *PSTA_QOS;
-
-//
-// BSS QOS data.
-// Ref: BssDscr in 8185 code. [def. in BssDscr.h]
-//
-typedef struct _BSS_QOS{
- QOS_MODE bdQoSMode;
-
- u8 bdWMMIEBuf[MAX_WMMELE_LENGTH];
- u8* bdWMMIE;
-
- QOS_ELE_SUBTYPE EleSubType;
-
- u8 * pWMMInfoEle;
- u8 * pWMMParamEle;
-
- QOS_INFO_FIELD QosInfoField;
- AC_PARAM AcParameter[4];
-}BSS_QOS, *PBSS_QOS;
-
-
-//
-// Ref: sQoSCtlLng and QoSCtl definition in 8185 QoS code.
-//#define QoSCtl (( (Adapter->bRegQoS) && (Adapter->dot11QoS.QoSMode &(QOS_EDCA|QOS_HCCA)) ) ?sQoSCtlLng:0)
-//
-#define sQoSCtlLng 2
-#define QOS_CTRL_LEN(_QosMode) ((_QosMode > QOS_DISABLE)? sQoSCtlLng : 0)
-
-
-//Added by joseph
-//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
-//#define UP2AC(up) ((up<3)?((up==0)?1:0):(up>>1))
-#define IsACValid(ac) ((ac<=7 )?true:false )
-
-#endif // #ifndef __INC_QOS_TYPE_H
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_TS.h b/drivers/staging/rtl8192e/ieee80211/rtl819x_TS.h
deleted file mode 100644
index e7e26fd9639..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_TS.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef _TSTYPE_H_
-#define _TSTYPE_H_
-#include "rtl819x_Qos.h"
-#define TS_SETUP_TIMEOUT 60 // In millisecond
-#define TS_INACT_TIMEOUT 60
-#define TS_ADDBA_DELAY 60
-
-#define TOTAL_TS_NUM 16
-#define TCLAS_NUM 4
-
-// This define the Tx/Rx directions
-typedef enum _TR_SELECT {
- TX_DIR = 0,
- RX_DIR = 1,
-} TR_SELECT, *PTR_SELECT;
-
-typedef struct _TS_COMMON_INFO{
- struct list_head List;
- struct timer_list SetupTimer;
- struct timer_list InactTimer;
- u8 Addr[6];
- TSPEC_BODY TSpec;
- QOS_TCLAS TClass[TCLAS_NUM];
- u8 TClasProc;
- u8 TClasNum;
-} TS_COMMON_INFO, *PTS_COMMON_INFO;
-
-typedef struct _TX_TS_RECORD{
- TS_COMMON_INFO TsCommonInfo;
- u16 TxCurSeq;
- BA_RECORD TxPendingBARecord; // For BA Originator
- BA_RECORD TxAdmittedBARecord; // For BA Originator
-// QOS_DL_RECORD DLRecord;
- u8 bAddBaReqInProgress;
- u8 bAddBaReqDelayed;
- u8 bUsingBa;
- struct timer_list TsAddBaTimer;
- u8 num;
-} TX_TS_RECORD, *PTX_TS_RECORD;
-
-typedef struct _RX_TS_RECORD {
- TS_COMMON_INFO TsCommonInfo;
- u16 RxIndicateSeq;
- u16 RxTimeoutIndicateSeq;
- struct list_head RxPendingPktList;
- struct timer_list RxPktPendingTimer;
- BA_RECORD RxAdmittedBARecord; // For BA Recipient
- u16 RxLastSeqNum;
- u8 RxLastFragNum;
- u8 num;
-// QOS_DL_RECORD DLRecord;
-} RX_TS_RECORD, *PRX_TS_RECORD;
-
-
-#endif
-
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c
deleted file mode 100644
index ad6872dcf1c..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c
+++ /dev/null
@@ -1,627 +0,0 @@
-#include "ieee80211.h"
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include "rtl819x_TS.h"
-
-void TsSetupTimeOut(unsigned long data)
-{
- // Not implement yet
- // This is used for WMMSA and ACM , that would send ADDTSReq frame.
-}
-
-void TsInactTimeout(unsigned long data)
-{
- // Not implement yet
- // This is used for WMMSA and ACM.
- // This function would be call when TS is no Tx/Rx for some period of time.
-}
-
-/********************************************************************************************************************
- *function: I still not understand this function, so wait for further implementation
- * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer
- * return: NULL
- * notice:
-********************************************************************************************************************/
-void RxPktPendingTimeout(unsigned long data)
-{
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
- struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
-
- PRX_REORDER_ENTRY pReorderEntry = NULL;
-
- //u32 flags = 0;
- unsigned long flags = 0;
- struct ieee80211_rxb *stats_IndicateArray[REORDER_WIN_SIZE];
- u8 index = 0;
- bool bPktInBuf = false;
-
-
- spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__FUNCTION__);
- if(pRxTs->RxTimeoutIndicateSeq != 0xffff)
- {
- // Indicate the pending packets sequentially according to SeqNum until meet the gap.
- while(!list_empty(&pRxTs->RxPendingPktList))
- {
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- if(index == 0)
- pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;
-
- if( SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) )
- {
- list_del_init(&pReorderEntry->List);
-
- if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq))
- pRxTs->RxIndicateSeq = (pRxTs->RxIndicateSeq + 1) % 4096;
-
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"RxPktPendingTimeout(): IndicateSeq: %d\n", pReorderEntry->SeqNum);
- stats_IndicateArray[index] = pReorderEntry->prxb;
- index++;
-
- list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
- }
- else
- {
- bPktInBuf = true;
- break;
- }
- }
- }
-
- if(index>0)
- {
- // Set RxTimeoutIndicateSeq to 0xffff to indicate no pending packets in buffer now.
- pRxTs->RxTimeoutIndicateSeq = 0xffff;
-
- // Indicate packets
- if(index > REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- return;
- }
- ieee80211_indicate_packets(ieee, stats_IndicateArray, index);
- }
-
- if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
- {
- pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
- mod_timer(&pRxTs->RxPktPendingTimer, jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime));
- }
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
-}
-
-/********************************************************************************************************************
- *function: Add BA timer function
- * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer
- * return: NULL
- * notice:
-********************************************************************************************************************/
-void TsAddBaProcess(unsigned long data)
-{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
- u8 num = pTxTs->num;
- struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[num]);
-
- TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
- IEEE80211_DEBUG(IEEE80211_DL_BA, "TsAddBaProcess(): ADDBA Req is started!! \n");
-}
-
-
-void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
-{
- memset(pTsCommonInfo->Addr, 0, 6);
- memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY));
- memset(&pTsCommonInfo->TClass, 0, sizeof(QOS_TCLAS)*TCLAS_NUM);
- pTsCommonInfo->TClasProc = 0;
- pTsCommonInfo->TClasNum = 0;
-}
-
-void ResetTxTsEntry(PTX_TS_RECORD pTS)
-{
- ResetTsCommonInfo(&pTS->TsCommonInfo);
- pTS->TxCurSeq = 0;
- pTS->bAddBaReqInProgress = false;
- pTS->bAddBaReqDelayed = false;
- pTS->bUsingBa = false;
- ResetBaEntry(&pTS->TxAdmittedBARecord); //For BA Originator
- ResetBaEntry(&pTS->TxPendingBARecord);
-}
-
-void ResetRxTsEntry(PRX_TS_RECORD pTS)
-{
- ResetTsCommonInfo(&pTS->TsCommonInfo);
- pTS->RxIndicateSeq = 0xffff; // This indicate the RxIndicateSeq is not used now!!
- pTS->RxTimeoutIndicateSeq = 0xffff; // This indicate the RxTimeoutIndicateSeq is not used now!!
- ResetBaEntry(&pTS->RxAdmittedBARecord); // For BA Recipient
-}
-
-void TSInitialize(struct ieee80211_device *ieee)
-{
- PTX_TS_RECORD pTxTS = ieee->TxTsRecord;
- PRX_TS_RECORD pRxTS = ieee->RxTsRecord;
- PRX_REORDER_ENTRY pRxReorderEntry = ieee->RxReorderEntry;
- u8 count = 0;
- IEEE80211_DEBUG(IEEE80211_DL_TS, "==========>%s()\n", __FUNCTION__);
- // Initialize Tx TS related info.
- INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List);
- INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
- INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
-
- for(count = 0; count < TOTAL_TS_NUM; count++)
- {
- //
- pTxTS->num = count;
- // The timers for the operation of Traffic Stream and Block Ack.
- // DLS related timer will be add here in the future!!
- init_timer(&pTxTS->TsCommonInfo.SetupTimer);
- pTxTS->TsCommonInfo.SetupTimer.data = (unsigned long)pTxTS;
- pTxTS->TsCommonInfo.SetupTimer.function = TsSetupTimeOut;
-
- init_timer(&pTxTS->TsCommonInfo.InactTimer);
- pTxTS->TsCommonInfo.InactTimer.data = (unsigned long)pTxTS;
- pTxTS->TsCommonInfo.InactTimer.function = TsInactTimeout;
-
- init_timer(&pTxTS->TsAddBaTimer);
- pTxTS->TsAddBaTimer.data = (unsigned long)pTxTS;
- pTxTS->TsAddBaTimer.function = TsAddBaProcess;
-
- init_timer(&pTxTS->TxPendingBARecord.Timer);
- pTxTS->TxPendingBARecord.Timer.data = (unsigned long)pTxTS;
- pTxTS->TxPendingBARecord.Timer.function = BaSetupTimeOut;
-
- init_timer(&pTxTS->TxAdmittedBARecord.Timer);
- pTxTS->TxAdmittedBARecord.Timer.data = (unsigned long)pTxTS;
- pTxTS->TxAdmittedBARecord.Timer.function = TxBaInactTimeout;
-
- ResetTxTsEntry(pTxTS);
- list_add_tail(&pTxTS->TsCommonInfo.List, &ieee->Tx_TS_Unused_List);
- pTxTS++;
- }
-
- // Initialize Rx TS related info.
- INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
- INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
- INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
- for(count = 0; count < TOTAL_TS_NUM; count++)
- {
- pRxTS->num = count;
- INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
-
- init_timer(&pRxTS->TsCommonInfo.SetupTimer);
- pRxTS->TsCommonInfo.SetupTimer.data = (unsigned long)pRxTS;
- pRxTS->TsCommonInfo.SetupTimer.function = TsSetupTimeOut;
-
- init_timer(&pRxTS->TsCommonInfo.InactTimer);
- pRxTS->TsCommonInfo.InactTimer.data = (unsigned long)pRxTS;
- pRxTS->TsCommonInfo.InactTimer.function = TsInactTimeout;
-
- init_timer(&pRxTS->RxAdmittedBARecord.Timer);
- pRxTS->RxAdmittedBARecord.Timer.data = (unsigned long)pRxTS;
- pRxTS->RxAdmittedBARecord.Timer.function = RxBaInactTimeout;
-
- init_timer(&pRxTS->RxPktPendingTimer);
- pRxTS->RxPktPendingTimer.data = (unsigned long)pRxTS;
- pRxTS->RxPktPendingTimer.function = RxPktPendingTimeout;
-
- ResetRxTsEntry(pRxTS);
- list_add_tail(&pRxTS->TsCommonInfo.List, &ieee->Rx_TS_Unused_List);
- pRxTS++;
- }
- // Initialize unused Rx Reorder List.
- INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
-//#ifdef TO_DO_LIST
- for(count = 0; count < REORDER_ENTRY_NUM; count++)
- {
- list_add_tail( &pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
- if(count == (REORDER_ENTRY_NUM-1))
- break;
- pRxReorderEntry = &ieee->RxReorderEntry[count+1];
- }
-//#endif
-
-}
-
-void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 InactTime)
-{
- del_timer_sync(&pTsCommonInfo->SetupTimer);
- del_timer_sync(&pTsCommonInfo->InactTimer);
-
- if(InactTime!=0)
- mod_timer(&pTsCommonInfo->InactTimer, jiffies + MSECS(InactTime));
-}
-
-
-PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8 TID, TR_SELECT TxRxSelect)
-{
- //DIRECTION_VALUE dir;
- u8 dir;
- bool search_dir[4] = {0, 0, 0, 0};
- struct list_head* psearch_list; //FIXME
- PTS_COMMON_INFO pRet = NULL;
- if(ieee->iw_mode == IW_MODE_MASTER) //ap mode
- {
- if(TxRxSelect == TX_DIR)
- {
- search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR]= true;
- }
- else
- {
- search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR]= true;
- }
- }
- else if(ieee->iw_mode == IW_MODE_ADHOC)
- {
- if(TxRxSelect == TX_DIR)
- search_dir[DIR_UP] = true;
- else
- search_dir[DIR_DOWN] = true;
- }
- else
- {
- if(TxRxSelect == TX_DIR)
- {
- search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR]= true;
- search_dir[DIR_DIRECT]= true;
- }
- else
- {
- search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR]= true;
- search_dir[DIR_DIRECT]= true;
- }
- }
-
- if(TxRxSelect == TX_DIR)
- psearch_list = &ieee->Tx_TS_Admit_List;
- else
- psearch_list = &ieee->Rx_TS_Admit_List;
-
- //for(dir = DIR_UP; dir <= DIR_BI_DIR; dir++)
- for(dir = 0; dir <= DIR_BI_DIR; dir++)
- {
- if(search_dir[dir] ==false )
- continue;
- list_for_each_entry(pRet, psearch_list, List){
- // IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.f.TSInfo.field.ucTSID, pRet->TSpec.f.TSInfo.field.ucDirection);
- if (memcmp(pRet->Addr, Addr, 6) == 0)
- if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
- if(pRet->TSpec.f.TSInfo.field.ucDirection == dir)
- {
- break;
- }
-
- }
- if(&pRet->List != psearch_list)
- break;
- }
-
- if(&pRet->List != psearch_list){
- return pRet ;
- }
- else
- return NULL;
-}
-
-void MakeTSEntry(
- PTS_COMMON_INFO pTsCommonInfo,
- u8* Addr,
- PTSPEC_BODY pTSPEC,
- PQOS_TCLAS pTCLAS,
- u8 TCLAS_Num,
- u8 TCLAS_Proc
- )
-{
- u8 count;
-
- if(pTsCommonInfo == NULL)
- return;
-
- memcpy(pTsCommonInfo->Addr, Addr, 6);
-
- if(pTSPEC != NULL)
- memcpy((u8*)(&(pTsCommonInfo->TSpec)), (u8*)pTSPEC, sizeof(TSPEC_BODY));
-
- for(count = 0; count < TCLAS_Num; count++)
- memcpy((u8*)(&(pTsCommonInfo->TClass[count])), (u8*)pTCLAS, sizeof(QOS_TCLAS));
-
- pTsCommonInfo->TClasProc = TCLAS_Proc;
- pTsCommonInfo->TClasNum = TCLAS_Num;
-}
-
-
-bool GetTs(
- struct ieee80211_device* ieee,
- PTS_COMMON_INFO *ppTS,
- u8* Addr,
- u8 TID,
- TR_SELECT TxRxSelect, //Rx:1, Tx:0
- bool bAddNewTs
- )
-{
- u8 UP = 0;
- //
- // We do not build any TS for Broadcast or Multicast stream.
- // So reject these kinds of search here.
- //
- if(is_broadcast_ether_addr(Addr) || is_multicast_ether_addr(Addr))
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n");
- return false;
- }
-
- if (ieee->current_network.qos_data.supported == 0)
- UP = 0;
- else
- {
- // In WMM case: we use 4 TID only
- if (!IsACValid(TID))
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " in %s(), TID(%d) is not valid\n", __FUNCTION__, TID);
- return false;
- }
-
- switch(TID)
- {
- case 0:
- case 3:
- UP = 0;
- break;
-
- case 1:
- case 2:
- UP = 2;
- break;
-
- case 4:
- case 5:
- UP = 5;
- break;
-
- case 6:
- case 7:
- UP = 7;
- break;
- }
- }
-
- *ppTS = SearchAdmitTRStream(
- ieee,
- Addr,
- UP,
- TxRxSelect);
- if(*ppTS != NULL)
- {
- return true;
- }
- else
- {
- if(bAddNewTs == false)
- {
- IEEE80211_DEBUG(IEEE80211_DL_TS, "add new TS failed(tid:%d)\n", UP);
- return false;
- }
- else
- {
- //
- // Create a new Traffic stream for current Tx/Rx
- // This is for EDCA and WMM to add a new TS.
- // For HCCA or WMMSA, TS cannot be addmit without negotiation.
- //
- TSPEC_BODY TSpec;
- PQOS_TSINFO pTSInfo = &TSpec.f.TSInfo;
- struct list_head* pUnusedList =
- (TxRxSelect == TX_DIR)?
- (&ieee->Tx_TS_Unused_List):
- (&ieee->Rx_TS_Unused_List);
-
- struct list_head* pAddmitList =
- (TxRxSelect == TX_DIR)?
- (&ieee->Tx_TS_Admit_List):
- (&ieee->Rx_TS_Admit_List);
-
- DIRECTION_VALUE Dir = (ieee->iw_mode == IW_MODE_MASTER)?
- ((TxRxSelect==TX_DIR)?DIR_DOWN:DIR_UP):
- ((TxRxSelect==TX_DIR)?DIR_UP:DIR_DOWN);
- IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
- if(!list_empty(pUnusedList))
- {
- (*ppTS) = list_entry(pUnusedList->next, TS_COMMON_INFO, List);
- list_del_init(&(*ppTS)->List);
- if(TxRxSelect==TX_DIR)
- {
- PTX_TS_RECORD tmp = container_of(*ppTS, TX_TS_RECORD, TsCommonInfo);
- ResetTxTsEntry(tmp);
- }
- else{
- PRX_TS_RECORD tmp = container_of(*ppTS, RX_TS_RECORD, TsCommonInfo);
- ResetRxTsEntry(tmp);
- }
-
- IEEE80211_DEBUG(IEEE80211_DL_TS, "to init current TS, UP:%d, Dir:%d, addr:%pM\n", UP, Dir, Addr);
- // Prepare TS Info releated field
- pTSInfo->field.ucTrafficType = 0; // Traffic type: WMM is reserved in this field
- pTSInfo->field.ucTSID = UP; // TSID
- pTSInfo->field.ucDirection = Dir; // Direction: if there is DirectLink, this need additional consideration.
- pTSInfo->field.ucAccessPolicy = 1; // Access policy
- pTSInfo->field.ucAggregation = 0; // Aggregation
- pTSInfo->field.ucPSB = 0; // Aggregation
- pTSInfo->field.ucUP = UP; // User priority
- pTSInfo->field.ucTSInfoAckPolicy = 0; // Ack policy
- pTSInfo->field.ucSchedule = 0; // Schedule
-
- MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
- AdmitTS(ieee, *ppTS, 0);
- list_add_tail(&((*ppTS)->List), pAddmitList);
- // if there is DirectLink, we need to do additional operation here!!
-
- return true;
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "in function %s() There is not enough TS record to be used!!", __FUNCTION__);
- return false;
- }
- }
- }
-}
-
-void RemoveTsEntry(
- struct ieee80211_device* ieee,
- PTS_COMMON_INFO pTs,
- TR_SELECT TxRxSelect
- )
-{
- //u32 flags = 0;
- unsigned long flags = 0;
- del_timer_sync(&pTs->SetupTimer);
- del_timer_sync(&pTs->InactTimer);
- TsInitDelBA(ieee, pTs, TxRxSelect);
-
- if(TxRxSelect == RX_DIR)
- {
-//#ifdef TO_DO_LIST
- PRX_REORDER_ENTRY pRxReorderEntry;
- PRX_TS_RECORD pRxTS = (PRX_TS_RECORD)pTs;
- if(timer_pending(&pRxTS->RxPktPendingTimer))
- del_timer_sync(&pRxTS->RxPktPendingTimer);
-
- while(!list_empty(&pRxTS->RxPendingPktList))
- {
- // PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
- spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- list_del_init(&pRxReorderEntry->List);
- {
- int i = 0;
- struct ieee80211_rxb * prxb = pRxReorderEntry->prxb;
- if (unlikely(!prxb))
- {
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- return;
- }
- for(i =0; i < prxb->nr_subframes; i++) {
- dev_kfree_skb(prxb->subframes[i]);
- }
- kfree(prxb);
- prxb = NULL;
- }
- list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
- //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
- spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- }
-
-//#endif
- }
- else
- {
- PTX_TS_RECORD pTxTS = (PTX_TS_RECORD)pTs;
- del_timer_sync(&pTxTS->TsAddBaTimer);
- }
-}
-
-void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
-{
- PTS_COMMON_INFO pTS, pTmpTS;
-
- printk("===========>RemovePeerTS,%pM\n", Addr);
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
- {
- if (memcmp(pTS->Addr, Addr, 6) == 0)
- {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
- }
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List)
- {
- if (memcmp(pTS->Addr, Addr, 6) == 0)
- {
- printk("====>remove Tx_TS_admin_list\n");
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
- }
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List)
- {
- if (memcmp(pTS->Addr, Addr, 6) == 0)
- {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
- }
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List)
- {
- if (memcmp(pTS->Addr, Addr, 6) == 0)
- {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
- }
- }
-}
-
-void RemoveAllTS(struct ieee80211_device* ieee)
-{
- PTS_COMMON_INFO pTS, pTmpTS;
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
- {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List)
- {
- RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List)
- {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
- }
-
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List)
- {
- RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
- }
-}
-
-void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
-{
- if(pTxTS->bAddBaReqInProgress == false)
- {
- pTxTS->bAddBaReqInProgress = true;
- if(pTxTS->bAddBaReqDelayed)
- {
- IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
- mod_timer(&pTxTS->TsAddBaTimer, jiffies + MSECS(TS_ADDBA_DELAY));
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_BA,"TsStartAddBaProcess(): Immediately Start ADDBA now!!\n");
- mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); //set 10 ticks
- }
- }
- else
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __FUNCTION__);
-}
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl_crypto.h b/drivers/staging/rtl8192e/ieee80211/rtl_crypto.h
deleted file mode 100644
index ccf6ae76357..00000000000
--- a/drivers/staging/rtl8192e/ieee80211/rtl_crypto.h
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
- *
- * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
- * and Nettle, by Niels Mé°ˆler.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _LINUX_CRYPTO_H
-#define _LINUX_CRYPTO_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
-#include <asm/page.h>
-#include <asm/errno.h>
-
-#define crypto_register_alg crypto_register_alg_rsl
-#define crypto_unregister_alg crypto_unregister_alg_rsl
-#define crypto_alloc_tfm crypto_alloc_tfm_rsl
-#define crypto_free_tfm crypto_free_tfm_rsl
-#define crypto_alg_available crypto_alg_available_rsl
-
-/*
- * Algorithm masks and types.
- */
-#define CRYPTO_ALG_TYPE_MASK 0x000000ff
-#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004
-
-/*
- * Transform masks and values (for crt_flags).
- */
-#define CRYPTO_TFM_MODE_MASK 0x000000ff
-#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
-#define CRYPTO_TFM_MODE_ECB 0x00000001
-#define CRYPTO_TFM_MODE_CBC 0x00000002
-#define CRYPTO_TFM_MODE_CFB 0x00000004
-#define CRYPTO_TFM_MODE_CTR 0x00000008
-
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
-
-/*
- * Miscellaneous stuff.
- */
-#define CRYPTO_UNSPEC 0
-#define CRYPTO_MAX_ALG_NAME 64
-
-struct scatterlist;
-
-/*
- * Algorithms: modular crypto algorithm implementations, managed
- * via crypto_register_alg() and crypto_unregister_alg().
- */
-struct cipher_alg {
- unsigned int cia_min_keysize;
- unsigned int cia_max_keysize;
- int (*cia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
- void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
- void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
-};
-
-struct digest_alg {
- unsigned int dia_digestsize;
- void (*dia_init)(void *ctx);
- void (*dia_update)(void *ctx, const u8 *data, unsigned int len);
- void (*dia_final)(void *ctx, u8 *out);
- int (*dia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
-};
-
-struct compress_alg {
- int (*coa_init)(void *ctx);
- void (*coa_exit)(void *ctx);
- int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define cra_cipher cra_u.cipher
-#define cra_digest cra_u.digest
-#define cra_compress cra_u.compress
-
-struct crypto_alg {
- struct list_head cra_list;
- u32 cra_flags;
- unsigned int cra_blocksize;
- unsigned int cra_ctxsize;
- const char cra_name[CRYPTO_MAX_ALG_NAME];
-
- union {
- struct cipher_alg cipher;
- struct digest_alg digest;
- struct compress_alg compress;
- } cra_u;
-
- struct module *cra_module;
-};
-
-/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
-
-/*
- * Algorithm query interface.
- */
-int crypto_alg_available(const char *name, u32 flags);
-
-/*
- * Transforms: user-instantiated objects which encapsulate algorithms
- * and core processing logic. Managed via crypto_alloc_tfm() and
- * crypto_free_tfm(), as well as the various helpers below.
- */
-struct crypto_tfm;
-
-struct cipher_tfm {
- void *cit_iv;
- unsigned int cit_ivsize;
- u32 cit_mode;
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- int (*cit_encrypt)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
- int (*cit_encrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
- int (*cit_decrypt)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
- int (*cit_decrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
- void (*cit_xor_block)(u8 *dst, const u8 *src);
-};
-
-struct digest_tfm {
- void (*dit_init)(struct crypto_tfm *tfm);
- void (*dit_update)(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
- void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
- void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
- unsigned int nsg, u8 *out);
- int (*dit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
-#ifdef CONFIG_CRYPTO_HMAC
- void *dit_hmac_block;
-#endif
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_cipher crt_u.cipher
-#define crt_digest crt_u.digest
-#define crt_compress crt_u.compress
-
-struct crypto_tfm {
-
- u32 crt_flags;
-
- union {
- struct cipher_tfm cipher;
- struct digest_tfm digest;
- struct compress_tfm compress;
- } crt_u;
-
- struct crypto_alg *__crt_alg;
-};
-
-/*
- * Transform user interface.
- */
-
-/*
- * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm.
- * If that fails and the kernel supports dynamically loadable modules, it
- * will then attempt to load a module of the same name or alias. A refcount
- * is grabbed on the algorithm which is then associated with the new transform.
- *
- * crypto_free_tfm() frees up the transform and any associated resources,
- * then drops the refcount on the associated algorithm.
- */
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
-void crypto_free_tfm(struct crypto_tfm *tfm);
-
-/*
- * Transform helpers which query the underlying algorithm.
- */
-static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_name;
-}
-
-static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
-
- if (alg->cra_module)
- return alg->cra_module->name;
- else
- return NULL;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
-static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_min_keysize;
-}
-
-static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_max_keysize;
-}
-
-static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_ivsize;
-}
-
-static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_blocksize;
-}
-
-static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- return tfm->__crt_alg->cra_digest.dia_digestsize;
-}
-
-/*
- * API wrappers.
- */
-static inline void crypto_digest_init(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_init(tfm);
-}
-
-static inline void crypto_digest_update(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_update(tfm, sg, nsg);
-}
-
-static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_final(tfm, out);
-}
-
-static inline void crypto_digest_digest(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg, u8 *out)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
-}
-
-static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- if (tfm->crt_digest.dit_setkey == NULL)
- return -ENOSYS;
- return tfm->crt_digest.dit_setkey(tfm, key, keylen);
-}
-
-static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
-}
-
-static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
-}
-
-static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
- return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
-}
-
-static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
- return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
- const u8 *src, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(tfm->crt_cipher.cit_iv, src, len);
-}
-
-static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
- u8 *dst, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(dst, tfm->crt_cipher.cit_iv, len);
-}
-
-static inline int crypto_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
- return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
- return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
-}
-
-/*
- * HMAC support.
- */
-#ifdef CONFIG_CRYPTO_HMAC
-void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
-void crypto_hmac_update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
-void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
- unsigned int *keylen, u8 *out);
-void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
- struct scatterlist *sg, unsigned int nsg, u8 *out);
-#endif /* CONFIG_CRYPTO_HMAC */
-
-#endif /* _LINUX_CRYPTO_H */
-
diff --git a/drivers/staging/rtl8192e/license b/drivers/staging/rtl8192e/license
new file mode 100644
index 00000000000..4bea9fa60da
--- /dev/null
+++ b/drivers/staging/rtl8192e/license
@@ -0,0 +1,339 @@
+
+"This software program is licensed subject to the GNU General Public License
+(GPL). Version 2, June 1991, available at
+<http:
+
+GNU General Public License
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public License is intended
+to guarantee your freedom to share and change free software--to make sure
+the software is free for all its users. This General Public License applies
+to most of the Free Software Foundation's software and to any other program
+whose authors commit to using it. (Some other Free Software Foundation
+software is covered by the GNU Library General Public License instead.) You
+can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or
+for a fee, you must give the recipients all the rights that you have. You
+must make sure that they, too, receive or can get the source code. And you
+must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If
+the software is modified by someone else and passed on, we want its
+recipients to know that what they have is not the original, so that any
+problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification
+follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+ placed by the copyright holder saying it may be distributed under the
+ terms of this General Public License. The "Program", below, refers to any
+ such program or work, and a "work based on the Program" means either the
+ Program or any derivative work under copyright law: that is to say, a
+ work containing the Program or a portion of it, either verbatim or with
+ modifications and/or translated into another language. (Hereinafter,
+ translation is included without limitation in the term "modification".)
+ Each licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ the Program is not restricted, and the output from the Program is covered
+ only if its contents constitute a work based on the Program (independent
+ of having been made by running the Program). Whether that is true depends
+ on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code
+ as you receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice and
+ disclaimer of warranty; keep intact all the notices that refer to this
+ License and to the absence of any warranty; and give any other recipients
+ of the Program a copy of this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy, and you
+ may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it,
+ thus forming a work based on the Program, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ * a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ * b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any part
+ thereof, to be licensed as a whole at no charge to all third parties
+ under the terms of this License.
+
+ * c) If the modified program normally reads commands interactively when
+ run, you must cause it, when started running for such interactive
+ use in the most ordinary way, to print or display an announcement
+ including an appropriate copyright notice and a notice that there is
+ no warranty (or else, saying that you provide a warranty) and that
+ users may redistribute the program under these conditions, and
+ telling the user how to view a copy of this License. (Exception: if
+ the Program itself is interactive but does not normally print such
+ an announcement, your work based on the Program is not required to
+ print an announcement.)
+
+ These requirements apply to the modified work as a whole. If identifiable
+ sections of that work are not derived from the Program, and can be
+ reasonably considered independent and separate works in themselves, then
+ this License, and its terms, do not apply to those sections when you
+ distribute them as separate works. But when you distribute the same
+ sections as part of a whole which is a work based on the Program, the
+ distribution of the whole must be on the terms of this License, whose
+ permissions for other licensees extend to the entire whole, and thus to
+ each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Program.
+
+ In addition, mere aggregation of another work not based on the Program
+ with the Program (or with a work based on the Program) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+ Section 2) in object code or executable form under the terms of Sections
+ 1 and 2 above provided that you also do one of the following:
+
+ * a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2
+ above on a medium customarily used for software interchange; or,
+
+ * b) Accompany it with a written offer, valid for at least three years,
+ to give any third party, for a charge no more than your cost of
+ physically performing source distribution, a complete machine-
+ readable copy of the corresponding source code, to be distributed
+ under the terms of Sections 1 and 2 above on a medium customarily
+ used for software interchange; or,
+
+ * c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed
+ only for noncommercial distribution and only if you received the
+ program in object code or executable form with such an offer, in
+ accord with Subsection b above.)
+
+ The source code for a work means the preferred form of the work for
+ making modifications to it. For an executable work, complete source code
+ means all the source code for all modules it contains, plus any
+ associated interface definition files, plus the scripts used to control
+ compilation and installation of the executable. However, as a special
+ exception, the source code distributed need not include anything that is
+ normally distributed (in either source or binary form) with the major
+ components (compiler, kernel, and so on) of the operating system on which
+ the executable runs, unless that component itself accompanies the
+ executable.
+
+ If distribution of executable or object code is made by offering access
+ to copy from a designated place, then offering equivalent access to copy
+ the source code from the same place counts as distribution of the source
+ code, even though third parties are not compelled to copy the source
+ along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+ expressly provided under this License. Any attempt otherwise to copy,
+ modify, sublicense or distribute the Program is void, and will
+ automatically terminate your rights under this License. However, parties
+ who have received copies, or rights, from you under this License will not
+ have their licenses terminated so long as such parties remain in full
+ compliance.
+
+5. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Program or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Program (or any work based on the Program), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Program or works
+ based on it.
+
+6. Each time you redistribute the Program (or any work based on the
+ Program), the recipient automatically receives a license from the
+ original licensor to copy, distribute or modify the Program subject to
+ these terms and conditions. You may not impose any further restrictions
+ on the recipients' exercise of the rights granted herein. You are not
+ responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot distribute
+ so as to satisfy simultaneously your obligations under this License and
+ any other pertinent obligations, then as a consequence you may not
+ distribute the Program at all. For example, if a patent license would
+ not permit royalty-free redistribution of the Program by all those who
+ receive copies directly or indirectly through you, then the only way you
+ could satisfy both it and this License would be to refrain entirely from
+ distribution of the Program.
+
+ If any portion of this section is held invalid or unenforceable under any
+ particular circumstance, the balance of the section is intended to apply
+ and the section as a whole is intended to apply in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system, which is implemented
+ by public license practices. Many people have made generous contributions
+ to the wide range of software distributed through that system in
+ reliance on consistent application of that system; it is up to the
+ author/donor to decide if he or she is willing to distribute software
+ through any other system and a licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed to be
+ a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Program under this License may add an
+ explicit geographical distribution limitation excluding those countries,
+ so that distribution is permitted only in or among countries not thus
+ excluded. In such case, this License incorporates the limitation as if
+ written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of
+ the General Public License from time to time. Such new versions will be
+ similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Program
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Program does not specify a version
+ number of this License, you may choose any version ever published by the
+ Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+ whose distribution conditions are different, write to the author to ask
+ for permission. For software which is copyrighted by the Free Software
+ Foundation, write to the Free Software Foundation; we sometimes make
+ exceptions for this. Our decision will be guided by the two goals of
+ preserving the free status of all derivatives of our free software and
+ of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+ YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it free
+software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey the
+exclusion of warranty; and each file should have at least the "copyright"
+line and a pointer to where the full notice is found.
+
+one line to give the program's name and an idea of what it does.
+Copyright (C) yyyy name of author
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59
+Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when
+it starts in an interactive mode:
+
+Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
+with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
+software, and you are welcome to redistribute it under certain conditions;
+type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may be
+called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+signature of Ty Coon, 1 April 1989
+Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General Public
+License instead of this License.
diff --git a/drivers/staging/rtl8192e/r8180_93cx6.c b/drivers/staging/rtl8192e/r8180_93cx6.c
deleted file mode 100644
index 55d4f56dc42..00000000000
--- a/drivers/staging/rtl8192e/r8180_93cx6.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- This files contains card eeprom (93c46 or 93c56) programming routines,
- memory is addressed by 16 bits words.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- We want to tanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
-
-#include "r8180_93cx6.h"
-
-static void eprom_cs(struct r8192_priv *priv, short bit)
-{
- if (bit)
- write_nic_byte(priv, EPROM_CMD,
- (1<<EPROM_CS_SHIFT) |
- read_nic_byte(priv, EPROM_CMD)); //enable EPROM
- else
- write_nic_byte(priv, EPROM_CMD, read_nic_byte(priv, EPROM_CMD)
- &~(1<<EPROM_CS_SHIFT)); //disable EPROM
-
- udelay(EPROM_DELAY);
-}
-
-
-static void eprom_ck_cycle(struct r8192_priv *priv)
-{
- write_nic_byte(priv, EPROM_CMD,
- (1<<EPROM_CK_SHIFT) | read_nic_byte(priv, EPROM_CMD));
- udelay(EPROM_DELAY);
- write_nic_byte(priv, EPROM_CMD,
- read_nic_byte(priv, EPROM_CMD) & ~(1<<EPROM_CK_SHIFT));
- udelay(EPROM_DELAY);
-}
-
-
-static void eprom_w(struct r8192_priv *priv, short bit)
-{
- if (bit)
- write_nic_byte(priv, EPROM_CMD, (1<<EPROM_W_SHIFT) |
- read_nic_byte(priv, EPROM_CMD));
- else
- write_nic_byte(priv, EPROM_CMD, read_nic_byte(priv, EPROM_CMD)
- &~(1<<EPROM_W_SHIFT));
-
- udelay(EPROM_DELAY);
-}
-
-
-static short eprom_r(struct r8192_priv *priv)
-{
- short bit;
-
- bit = (read_nic_byte(priv, EPROM_CMD) & (1<<EPROM_R_SHIFT));
- udelay(EPROM_DELAY);
-
- if (bit)
- return 1;
- return 0;
-}
-
-
-static void eprom_send_bits_string(struct r8192_priv *priv, short b[], int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- eprom_w(priv, b[i]);
- eprom_ck_cycle(priv);
- }
-}
-
-
-u32 eprom_read(struct r8192_priv *priv, u32 addr)
-{
- short read_cmd[] = {1, 1, 0};
- short addr_str[8];
- int i;
- int addr_len;
- u32 ret;
-
- ret = 0;
- //enable EPROM programming
- write_nic_byte(priv, EPROM_CMD,
- (EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT));
- udelay(EPROM_DELAY);
-
- if (priv->epromtype == EPROM_93c56) {
- addr_str[7] = addr & 1;
- addr_str[6] = addr & (1<<1);
- addr_str[5] = addr & (1<<2);
- addr_str[4] = addr & (1<<3);
- addr_str[3] = addr & (1<<4);
- addr_str[2] = addr & (1<<5);
- addr_str[1] = addr & (1<<6);
- addr_str[0] = addr & (1<<7);
- addr_len = 8;
- } else {
- addr_str[5] = addr & 1;
- addr_str[4] = addr & (1<<1);
- addr_str[3] = addr & (1<<2);
- addr_str[2] = addr & (1<<3);
- addr_str[1] = addr & (1<<4);
- addr_str[0] = addr & (1<<5);
- addr_len = 6;
- }
- eprom_cs(priv, 1);
- eprom_ck_cycle(priv);
- eprom_send_bits_string(priv, read_cmd, 3);
- eprom_send_bits_string(priv, addr_str, addr_len);
-
- //keep chip pin D to low state while reading.
- //I'm unsure if it is necessary, but anyway shouldn't hurt
- eprom_w(priv, 0);
-
- for (i = 0; i < 16; i++) {
- //eeprom needs a clk cycle between writing opcode&adr
- //and reading data. (eeprom outs a dummy 0)
- eprom_ck_cycle(priv);
- ret |= (eprom_r(priv)<<(15-i));
- }
-
- eprom_cs(priv, 0);
- eprom_ck_cycle(priv);
-
- //disable EPROM programming
- write_nic_byte(priv, EPROM_CMD,
- (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
- return ret;
-}
diff --git a/drivers/staging/rtl8192e/r8180_93cx6.h b/drivers/staging/rtl8192e/r8180_93cx6.h
deleted file mode 100644
index 55d20544a9c..00000000000
--- a/drivers/staging/rtl8192e/r8180_93cx6.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* r8180_93cx6.h - 93c46 or 93c56 eeprom card programming routines
- *
- * This is part of rtl8187 OpenSource driver
- * Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- * Released under the terms of GPL (General Public Licence)
- * Parts of this driver are based on the GPL part of the official realtek driver
- *
- * Parts of this driver are based on the rtl8180 driver skeleton from
- * Patric Schenke & Andres Salomon.
- *
- * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
- *
- * We want to thank the authors of the above mentioned projects and to
- * the authors of the Ndiswrapper project.
- */
-
-#include "r8192E.h"
-#include "r8192E_hw.h"
-
-#define EPROM_DELAY 10
-
-#define EPROM_ANAPARAM_ADDRLWORD 0xd
-#define EPROM_ANAPARAM_ADDRHWORD 0xe
-
-#define EPROM_RFCHIPID 0x6
-#define EPROM_TXPW_BASE 0x05
-#define EPROM_RFCHIPID_RTL8225U 5
-#define EPROM_RF_PARAM 0x4
-#define EPROM_CONFIG2 0xc
-
-#define EPROM_VERSION 0x1E
-#define MAC_ADR 0x7
-
-#define CIS 0x18
-
-#define EPROM_TXPW0 0x16
-#define EPROM_TXPW2 0x1b
-#define EPROM_TXPW1 0x3d
-
-/* Reads a 16 bits word. */
-u32 eprom_read(struct r8192_priv *priv, u32 addr);
diff --git a/drivers/staging/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/r8190P_def.h
new file mode 100644
index 00000000000..b7bb71fa9ec
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8190P_def.h
@@ -0,0 +1,410 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+
+#ifndef R8190P_DEF_H
+#define R8190P_DEF_H
+
+#include <linux/types.h>
+
+#define MAX_SILENT_RESET_RX_SLOT_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+
+
+enum rtl819x_loopback {
+ RTL819X_NO_LOOPBACK = 0,
+ RTL819X_MAC_LOOPBACK = 1,
+ RTL819X_DMA_LOOPBACK = 2,
+ RTL819X_CCK_LOOPBACK = 3,
+};
+
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BcnInt | IMR_BcnInt | IMR_TBDOK | IMR_TBDER)
+
+#define DESC90_RATE1M 0x00
+#define DESC90_RATE2M 0x01
+#define DESC90_RATE5_5M 0x02
+#define DESC90_RATE11M 0x03
+#define DESC90_RATE6M 0x04
+#define DESC90_RATE9M 0x05
+#define DESC90_RATE12M 0x06
+#define DESC90_RATE18M 0x07
+#define DESC90_RATE24M 0x08
+#define DESC90_RATE36M 0x09
+#define DESC90_RATE48M 0x0a
+#define DESC90_RATE54M 0x0b
+#define DESC90_RATEMCS0 0x00
+#define DESC90_RATEMCS1 0x01
+#define DESC90_RATEMCS2 0x02
+#define DESC90_RATEMCS3 0x03
+#define DESC90_RATEMCS4 0x04
+#define DESC90_RATEMCS5 0x05
+#define DESC90_RATEMCS6 0x06
+#define DESC90_RATEMCS7 0x07
+#define DESC90_RATEMCS8 0x08
+#define DESC90_RATEMCS9 0x09
+#define DESC90_RATEMCS10 0x0a
+#define DESC90_RATEMCS11 0x0b
+#define DESC90_RATEMCS12 0x0c
+#define DESC90_RATEMCS13 0x0d
+#define DESC90_RATEMCS14 0x0e
+#define DESC90_RATEMCS15 0x0f
+#define DESC90_RATEMCS32 0x20
+
+#define SHORT_SLOT_TIME 9
+#define NON_SHORT_SLOT_TIME 20
+
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 128
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define RX_SMOOTH 20
+
+#define QSLT_BK 0x1
+#define QSLT_BE 0x0
+#define QSLT_VI 0x4
+#define QSLT_VO 0x6
+#define QSLT_BEACON 0x10
+#define QSLT_HIGH 0x11
+#define QSLT_MGNT 0x12
+#define QSLT_CMD 0x13
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x007
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x0aa
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x024
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x007
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x10
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x4
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xd
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define APPLIED_RESERVED_QUEUE_IN_FW 0x80000000
+#define RSVD_FW_QUEUE_PAGE_BK_SHIFT 0x00
+#define RSVD_FW_QUEUE_PAGE_BE_SHIFT 0x08
+#define RSVD_FW_QUEUE_PAGE_VI_SHIFT 0x10
+#define RSVD_FW_QUEUE_PAGE_VO_SHIFT 0x18
+#define RSVD_FW_QUEUE_PAGE_MGNT_SHIFT 0x10
+#define RSVD_FW_QUEUE_PAGE_BCN_SHIFT 0x00
+#define RSVD_FW_QUEUE_PAGE_PUB_SHIFT 0x08
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+
+enum version_8190_loopback {
+ VERSION_8190_BD = 0x3,
+ VERSION_8190_BE
+};
+
+#define IC_VersionCut_C 0x2
+#define IC_VersionCut_D 0x3
+#define IC_VersionCut_E 0x4
+
+enum rf_optype {
+ RF_OP_By_SW_3wire = 0,
+ RF_OP_By_FW,
+ RF_OP_MAX
+};
+
+
+enum power_save_mode {
+ POWER_SAVE_MODE_ACTIVE,
+ POWER_SAVE_MODE_SAVE,
+};
+
+enum interface_select_8190pci {
+ INTF_SEL1_MINICARD = 0,
+ INTF_SEL0_PCIE = 1,
+ INTF_SEL2_RSV = 2,
+ INTF_SEL3_RSV = 3,
+};
+
+struct bb_reg_definition {
+ u32 rfintfs;
+ u32 rfintfi;
+ u32 rfintfo;
+ u32 rfintfe;
+ u32 rf3wireOffset;
+ u32 rfLSSI_Select;
+ u32 rfTxGainStage;
+ u32 rfHSSIPara1;
+ u32 rfHSSIPara2;
+ u32 rfSwitchControl;
+ u32 rfAGCControl1;
+ u32 rfAGCControl2;
+ u32 rfRxIQImbalance;
+ u32 rfRxAFE;
+ u32 rfTxIQImbalance;
+ u32 rfTxAFE;
+ u32 rfLSSIReadBack;
+ u32 rfLSSIReadBackPi;
+};
+
+struct tx_fwinfo {
+ u8 TxRate:7;
+ u8 CtsEnable:1;
+ u8 RtsRate:7;
+ u8 RtsEnable:1;
+ u8 TxHT:1;
+ u8 Short:1;
+ u8 TxBandwidth:1;
+ u8 TxSubCarrier:2;
+ u8 STBC:2;
+ u8 AllowAggregation:1;
+ u8 RtsHT:1;
+ u8 RtsShort:1;
+ u8 RtsBandwidth:1;
+ u8 RtsSubcarrier:2;
+ u8 RtsSTBC:2;
+ u8 EnableCPUDur:1;
+
+ u32 RxMF:2;
+ u32 RxAMD:3;
+ u32 Reserved1:3;
+ u32 TxAGCOffset:4;
+ u32 TxAGCSign:1;
+ u32 Tx_INFO_RSVD:6;
+ u32 PacketID:13;
+};
+
+struct tx_fwinfo_8190pci {
+ u8 TxRate:7;
+ u8 CtsEnable:1;
+ u8 RtsRate:7;
+ u8 RtsEnable:1;
+ u8 TxHT:1;
+ u8 Short:1;
+ u8 TxBandwidth:1;
+ u8 TxSubCarrier:2;
+ u8 STBC:2;
+ u8 AllowAggregation:1;
+ u8 RtsHT:1;
+ u8 RtsShort:1;
+ u8 RtsBandwidth:1;
+ u8 RtsSubcarrier:2;
+ u8 RtsSTBC:2;
+ u8 EnableCPUDur:1;
+
+ u32 RxMF:2;
+ u32 RxAMD:3;
+ u32 TxPerPktInfoFeedback:1;
+ u32 Reserved1:2;
+ u32 TxAGCOffset:4;
+ u32 TxAGCSign:1;
+ u32 RAW_TXD:1;
+ u32 Retry_Limit:4;
+ u32 Reserved2:1;
+ u32 PacketID:13;
+
+
+};
+
+
+#define TX_DESC_SIZE 32
+
+#define TX_DESC_CMD_SIZE 32
+
+
+#define TX_STATUS_DESC_SIZE 32
+
+#define TX_FWINFO_SIZE 8
+
+
+#define RX_DESC_SIZE 16
+
+#define RX_STATUS_DESC_SIZE 16
+
+#define RX_DRIVER_INFO_SIZE 8
+
+struct log_int_8190 {
+ u32 nIMR_COMDOK;
+ u32 nIMR_MGNTDOK;
+ u32 nIMR_HIGH;
+ u32 nIMR_VODOK;
+ u32 nIMR_VIDOK;
+ u32 nIMR_BEDOK;
+ u32 nIMR_BKDOK;
+ u32 nIMR_ROK;
+ u32 nIMR_RCOK;
+ u32 nIMR_TBDOK;
+ u32 nIMR_BDOK;
+ u32 nIMR_RXFOVW;
+};
+
+struct phy_ofdm_rx_status_rxsc_sgien_exintfflag {
+ u8 reserved:4;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 ex_intf_flag:1;
+};
+
+struct phy_sts_ofdm_819xpci {
+ u8 trsw_gain_X[4];
+ u8 pwdb_all;
+ u8 cfosho_X[4];
+ u8 cfotail_X[4];
+ u8 rxevm_X[2];
+ u8 rxsnr_X[4];
+ u8 pdsnr_X[2];
+ u8 csi_current_X[2];
+ u8 csi_target_X[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 sgi_en;
+ u8 rxsc_sgien_exflg;
+};
+
+struct phy_sts_cck_819xpci {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
+
+struct tx_desc {
+ u16 PktSize;
+ u8 Offset;
+ u8 Reserved1:3;
+ u8 CmdInit:1;
+ u8 LastSeg:1;
+ u8 FirstSeg:1;
+ u8 LINIP:1;
+ u8 OWN:1;
+
+ u8 TxFWInfoSize;
+ u8 RATid:3;
+ u8 DISFB:1;
+ u8 USERATE:1;
+ u8 MOREFRAG:1;
+ u8 NoEnc:1;
+ u8 PIFS:1;
+ u8 QueueSelect:5;
+ u8 NoACM:1;
+ u8 Resv:2;
+ u8 SecCAMID:5;
+ u8 SecDescAssign:1;
+ u8 SecType:2;
+
+ u16 TxBufferSize;
+ u8 PktId:7;
+ u8 Resv1:1;
+ u8 Reserved2;
+
+ u32 TxBuffAddr;
+
+ u32 NextDescAddress;
+
+ u32 Reserved5;
+ u32 Reserved6;
+ u32 Reserved7;
+};
+
+
+struct tx_desc_cmd {
+ u16 PktSize;
+ u8 Reserved1;
+ u8 CmdType:3;
+ u8 CmdInit:1;
+ u8 LastSeg:1;
+ u8 FirstSeg:1;
+ u8 LINIP:1;
+ u8 OWN:1;
+
+ u16 ElementReport;
+ u16 Reserved2;
+
+ u16 TxBufferSize;
+ u16 Reserved3;
+
+ u32 TxBuffAddr;
+ u32 NextDescAddress;
+ u32 Reserved4;
+ u32 Reserved5;
+ u32 Reserved6;
+};
+
+struct rx_desc {
+ u16 Length:14;
+ u16 CRC32:1;
+ u16 ICV:1;
+ u8 RxDrvInfoSize;
+ u8 Shift:2;
+ u8 PHYStatus:1;
+ u8 SWDec:1;
+ u8 LastSeg:1;
+ u8 FirstSeg:1;
+ u8 EOR:1;
+ u8 OWN:1;
+
+ u32 Reserved2;
+
+ u32 Reserved3;
+
+ u32 BufferAddress;
+
+};
+
+
+struct rx_fwinfo {
+ u16 Reserved1:12;
+ u16 PartAggr:1;
+ u16 FirstAGGR:1;
+ u16 Reserved2:2;
+
+ u8 RxRate:7;
+ u8 RxHT:1;
+
+ u8 BW:1;
+ u8 SPLCP:1;
+ u8 Reserved3:2;
+ u8 PAM:1;
+ u8 Mcast:1;
+ u8 Bcast:1;
+ u8 Reserved4:1;
+
+ u32 TSFL;
+
+};
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/r8190P_rtl8256.c
new file mode 100644
index 00000000000..0da56c80f08
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8190P_rtl8256.c
@@ -0,0 +1,306 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#include "rtl_core.h"
+#include "r8192E_phyreg.h"
+#include "r8192E_phy.h"
+#include "r8190P_rtl8256.h"
+
+void PHY_SetRF8256Bandwidth(struct net_device *dev,
+ enum ht_channel_width Bandwidth)
+{
+ u8 eRFPath;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ for (eRFPath = 0; eRFPath < priv->NumTotalRFPath; eRFPath++) {
+ if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ continue;
+
+ switch (Bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ if (priv->card_8192_version == VERSION_8190_BD ||
+ priv->card_8192_version == VERSION_8190_BE) {
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ 0x0b, bMask12Bits, 0x100);
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ 0x2c, bMask12Bits, 0x3d7);
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ 0x0e, bMask12Bits, 0x021);
+
+ } else {
+ RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): "
+ "unknown hardware version\n");
+ }
+
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ if (priv->card_8192_version == VERSION_8190_BD ||
+ priv->card_8192_version == VERSION_8190_BE) {
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ 0x0b, bMask12Bits, 0x300);
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ 0x2c, bMask12Bits, 0x3ff);
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ 0x0e, bMask12Bits, 0x0e1);
+
+ } else {
+ RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): "
+ "unknown hardware version\n");
+ }
+
+
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown "
+ "Bandwidth: %#X\n", Bandwidth);
+ break;
+
+ }
+ }
+ return;
+}
+
+bool PHY_RF8256_Config(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool rtStatus = true;
+ priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
+ rtStatus = phy_RF8256_Config_ParaFile(dev);
+
+ return rtStatus;
+}
+
+bool phy_RF8256_Config_ParaFile(struct net_device *dev)
+{
+ u32 u4RegValue = 0;
+ u8 eRFPath;
+ bool rtStatus = true;
+ struct bb_reg_definition *pPhyReg;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 RegOffSetToBeCheck = 0x3;
+ u32 RegValueToBeCheck = 0x7f1;
+ u32 RF3_Final_Value = 0;
+ u8 ConstRetryTimes = 5, RetryTimes = 5;
+ u8 ret = 0;
+
+ for (eRFPath = (enum rf90_radio_path)RF90_PATH_A;
+ eRFPath < priv->NumTotalRFPath; eRFPath++) {
+ if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ continue;
+
+ pPhyReg = &priv->PHYRegDef[eRFPath];
+
+
+ switch (eRFPath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs,
+ bRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs,
+ bRFSI_RFENV<<16);
+ break;
+ }
+
+ rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+
+ rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,
+ b3WireAddressLength, 0x0);
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,
+ b3WireDataLength, 0x0);
+
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path) eRFPath, 0x0,
+ bMask12Bits, 0xbf);
+
+ rtStatus = rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF,
+ (enum rf90_radio_path)eRFPath);
+ if (rtStatus != true) {
+ RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check "
+ "Radio[%d] Fail!!\n", eRFPath);
+ goto phy_RF8256_Config_ParaFile_Fail;
+ }
+
+ RetryTimes = ConstRetryTimes;
+ RF3_Final_Value = 0;
+ switch (eRFPath) {
+ case RF90_PATH_A:
+ while (RF3_Final_Value != RegValueToBeCheck &&
+ RetryTimes != 0) {
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
+ (enum rf90_radio_path)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ RegOffSetToBeCheck,
+ bMask12Bits);
+ RT_TRACE(COMP_RF, "RF %d %d register final "
+ "value: %x\n", eRFPath,
+ RegOffSetToBeCheck, RF3_Final_Value);
+ RetryTimes--;
+ }
+ break;
+ case RF90_PATH_B:
+ while (RF3_Final_Value != RegValueToBeCheck &&
+ RetryTimes != 0) {
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
+ (enum rf90_radio_path)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ RegOffSetToBeCheck,
+ bMask12Bits);
+ RT_TRACE(COMP_RF, "RF %d %d register final "
+ "value: %x\n", eRFPath,
+ RegOffSetToBeCheck, RF3_Final_Value);
+ RetryTimes--;
+ }
+ break;
+ case RF90_PATH_C:
+ while (RF3_Final_Value != RegValueToBeCheck &&
+ RetryTimes != 0) {
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
+ (enum rf90_radio_path)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ RegOffSetToBeCheck,
+ bMask12Bits);
+ RT_TRACE(COMP_RF, "RF %d %d register final "
+ "value: %x\n", eRFPath,
+ RegOffSetToBeCheck, RF3_Final_Value);
+ RetryTimes--;
+ }
+ break;
+ case RF90_PATH_D:
+ while (RF3_Final_Value != RegValueToBeCheck &&
+ RetryTimes != 0) {
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
+ (enum rf90_radio_path)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ RegOffSetToBeCheck, bMask12Bits);
+ RT_TRACE(COMP_RF, "RF %d %d register final "
+ "value: %x\n", eRFPath,
+ RegOffSetToBeCheck, RF3_Final_Value);
+ RetryTimes--;
+ }
+ break;
+ }
+
+ switch (eRFPath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV,
+ u4RegValue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16,
+ u4RegValue);
+ break;
+ }
+
+ if (ret) {
+ RT_TRACE(COMP_ERR, "phy_RF8256_Config_ParaFile():"
+ "Radio[%d] Fail!!", eRFPath);
+ goto phy_RF8256_Config_ParaFile_Fail;
+ }
+
+ }
+
+ RT_TRACE(COMP_PHY, "PHY Initialization Success\n") ;
+ return true;
+
+phy_RF8256_Config_ParaFile_Fail:
+ RT_TRACE(COMP_ERR, "PHY Initialization failed\n") ;
+ return false;
+}
+
+void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel)
+{
+ u32 TxAGC = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ TxAGC = powerlevel;
+ if (priv->bDynamicTxLowPower == true) {
+ if (priv->CustomerID == RT_CID_819x_Netcore)
+ TxAGC = 0x22;
+ else
+ TxAGC += priv->CckPwEnl;
+ }
+ if (TxAGC > 0x24)
+ TxAGC = 0x24;
+ rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
+}
+
+
+void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 writeVal, powerBase0, powerBase1, writeVal_tmp;
+ u8 index = 0;
+ u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c};
+ u8 byte0, byte1, byte2, byte3;
+
+ powerBase0 = powerlevel + priv->LegacyHTTxPowerDiff;
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+ (powerBase0 << 8) | powerBase0;
+ powerBase1 = powerlevel;
+ powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) |
+ (powerBase1 << 8) | powerBase1;
+
+ for (index = 0; index < 6; index++) {
+ writeVal = (u32)(priv->MCSTxPowerLevelOriginalOffset[index] +
+ ((index < 2) ? powerBase0 : powerBase1));
+ byte0 = (u8)(writeVal & 0x7f);
+ byte1 = (u8)((writeVal & 0x7f00)>>8);
+ byte2 = (u8)((writeVal & 0x7f0000)>>16);
+ byte3 = (u8)((writeVal & 0x7f000000)>>24);
+ if (byte0 > 0x24)
+ byte0 = 0x24;
+ if (byte1 > 0x24)
+ byte1 = 0x24;
+ if (byte2 > 0x24)
+ byte2 = 0x24;
+ if (byte3 > 0x24)
+ byte3 = 0x24;
+
+ if (index == 3) {
+ writeVal_tmp = (byte3 << 24) | (byte2 << 16) |
+ (byte1 << 8) | byte0;
+ priv->Pwr_Track = writeVal_tmp;
+ }
+
+ if (priv->bDynamicTxHighPower == true)
+ writeVal = 0x03030303;
+ else
+ writeVal = (byte3 << 24) | (byte2 << 16) |
+ (byte1 << 8) | byte0;
+ rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
+ }
+
+ return;
+}
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/r8190P_rtl8256.h
new file mode 100644
index 00000000000..64e831d2f4e
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8190P_rtl8256.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#ifndef RTL8225H
+#define RTL8225H
+
+#define RTL819X_TOTAL_RF_PATH 2
+extern void PHY_SetRF8256Bandwidth(struct net_device *dev,
+ enum ht_channel_width Bandwidth);
+extern bool PHY_RF8256_Config(struct net_device *dev);
+extern bool phy_RF8256_Config_ParaFile(struct net_device *dev);
+extern void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel);
+extern void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8190_rtl8256.c b/drivers/staging/rtl8192e/r8190_rtl8256.c
deleted file mode 100644
index 286462cc819..00000000000
--- a/drivers/staging/rtl8192e/r8190_rtl8256.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- This is part of the rtl8192 driver
- released under the GPL (See file COPYING for details).
-
- This files contains programming code for the rtl8256
- radio frontend.
-
- *Many* thanks to Realtek Corp. for their great support!
-
-*/
-
-#include "r8192E.h"
-#include "r8192E_hw.h"
-#include "r819xE_phyreg.h"
-#include "r819xE_phy.h"
-#include "r8190_rtl8256.h"
-
-/*--------------------------------------------------------------------------
- * Overview: set RF band width (20M or 40M)
- * Input: struct net_device* dev
- * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M
- * Output: NONE
- * Return: NONE
- * Note: 8226 support both 20M and 40 MHz
- *---------------------------------------------------------------------------*/
-void PHY_SetRF8256Bandwidth(struct r8192_priv *priv, HT_CHANNEL_WIDTH Bandwidth) //20M or 40M
-{
- u8 eRFPath;
-
- //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
- for(eRFPath = 0; eRFPath <priv->NumTotalRFPath; eRFPath++)
- {
- if (!rtl8192_phy_CheckIsLegalRFPath(priv, eRFPath))
- continue;
-
- switch(Bandwidth)
- {
- case HT_CHANNEL_WIDTH_20:
- if(priv->card_8192_version == VERSION_8190_BD || priv->card_8192_version == VERSION_8190_BE)// 8256 D-cut, E-cut, xiong: consider it later!
- {
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x100); //phy para:1ba
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3d7);
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x021);
-
- //cosa add for sd3's request 01/23/2008
- //rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab);
- }
- else
- {
- RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n");
- }
-
- break;
- case HT_CHANNEL_WIDTH_20_40:
- if(priv->card_8192_version == VERSION_8190_BD ||priv->card_8192_version == VERSION_8190_BE)// 8256 D-cut, E-cut, xiong: consider it later!
- {
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x300); //phy para:3ba
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3ff);
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x0e1);
-
- }
- else
- {
- RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n");
- }
-
-
- break;
- default:
- RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown Bandwidth: %#X\n",Bandwidth );
- break;
-
- }
- }
-}
-/*--------------------------------------------------------------------------
- * Overview: Interface to config 8256
- * Input: struct net_device* dev
- * Output: NONE
- * Return: NONE
- *---------------------------------------------------------------------------*/
-RT_STATUS PHY_RF8256_Config(struct r8192_priv *priv)
-{
- // Initialize general global value
- //
- RT_STATUS rtStatus = RT_STATUS_SUCCESS;
- // TODO: Extend RF_PATH_C and RF_PATH_D in the future
- priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
- // Config BB and RF
- rtStatus = phy_RF8256_Config_ParaFile(priv);
-
- return rtStatus;
-}
-
-/*--------------------------------------------------------------------------
- * Overview: Interface to config 8256
- * Input: struct net_device* dev
- * Output: NONE
- * Return: NONE
- *---------------------------------------------------------------------------*/
-RT_STATUS phy_RF8256_Config_ParaFile(struct r8192_priv *priv)
-{
- u32 u4RegValue = 0;
- u8 eRFPath;
- RT_STATUS rtStatus = RT_STATUS_SUCCESS;
- BB_REGISTER_DEFINITION_T *pPhyReg;
- u32 RegOffSetToBeCheck = 0x3;
- u32 RegValueToBeCheck = 0x7f1;
- u32 RF3_Final_Value = 0;
- u8 ConstRetryTimes = 5, RetryTimes = 5;
- u8 ret = 0;
-
- //3//-----------------------------------------------------------------
- //3// <2> Initialize RF
- //3//-----------------------------------------------------------------
- for(eRFPath = (RF90_RADIO_PATH_E)RF90_PATH_A; eRFPath <priv->NumTotalRFPath; eRFPath++)
- {
- if (!rtl8192_phy_CheckIsLegalRFPath(priv, eRFPath))
- continue;
-
- pPhyReg = &priv->PHYRegDef[eRFPath];
-
- /*----Store original RFENV control type----*/
- switch(eRFPath)
- {
- case RF90_PATH_A:
- case RF90_PATH_C:
- u4RegValue = rtl8192_QueryBBReg(priv, pPhyReg->rfintfs, bRFSI_RFENV);
- break;
- case RF90_PATH_B :
- case RF90_PATH_D:
- u4RegValue = rtl8192_QueryBBReg(priv, pPhyReg->rfintfs, bRFSI_RFENV<<16);
- break;
- }
-
- /*----Set RF_ENV enable----*/
- rtl8192_setBBreg(priv, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
-
- /*----Set RF_ENV output high----*/
- rtl8192_setBBreg(priv, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
-
- /* Set bit number of Address and Data for RF register */
- rtl8192_setBBreg(priv, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); // Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258
- rtl8192_setBBreg(priv, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); // Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ???
-
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E) eRFPath, 0x0, bMask12Bits, 0xbf);
-
- /*----Check RF block (for FPGA platform only)----*/
- // TODO: this function should be removed on ASIC , Emily 2007.2.2
- rtStatus = rtl8192_phy_checkBBAndRF(priv, HW90_BLOCK_RF, (RF90_RADIO_PATH_E)eRFPath);
- if(rtStatus!= RT_STATUS_SUCCESS)
- {
- RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check Radio[%d] Fail!!\n", eRFPath);
- goto phy_RF8256_Config_ParaFile_Fail;
- }
-
- RetryTimes = ConstRetryTimes;
- RF3_Final_Value = 0;
- /*----Initialize RF fom connfiguration file----*/
- switch(eRFPath)
- {
- case RF90_PATH_A:
- while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0)
- {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(priv,(RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_B:
- while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0)
- {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(priv,(RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_C:
- while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0)
- {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(priv,(RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_D:
- while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0)
- {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(priv,(RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
- RetryTimes--;
- }
- break;
- }
-
- /*----Restore RFENV control type----*/;
- switch(eRFPath)
- {
- case RF90_PATH_A:
- case RF90_PATH_C:
- rtl8192_setBBreg(priv, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
- break;
- case RF90_PATH_B :
- case RF90_PATH_D:
- rtl8192_setBBreg(priv, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
- break;
- }
-
- if(ret){
- RT_TRACE(COMP_ERR, "phy_RF8256_Config_ParaFile():Radio[%d] Fail!!", eRFPath);
- goto phy_RF8256_Config_ParaFile_Fail;
- }
-
- }
-
- RT_TRACE(COMP_PHY, "PHY Initialization Success\n") ;
- return RT_STATUS_SUCCESS;
-
-phy_RF8256_Config_ParaFile_Fail:
- RT_TRACE(COMP_ERR, "PHY Initialization failed\n") ;
- return RT_STATUS_FAILURE;
-}
-
-
-void PHY_SetRF8256CCKTxPower(struct r8192_priv *priv, u8 powerlevel)
-{
- u32 TxAGC=0;
-
- TxAGC = powerlevel;
- if(priv->bDynamicTxLowPower == true)//cosa 04282008 for cck long range
- {
- if(priv->CustomerID == RT_CID_819x_Netcore)
- TxAGC = 0x22;
- else
- TxAGC += priv->CckPwEnl;
- }
- if(TxAGC > 0x24)
- TxAGC = 0x24;
- rtl8192_setBBreg(priv, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
-}
-
-
-void PHY_SetRF8256OFDMTxPower(struct r8192_priv *priv, u8 powerlevel)
-{
-
- u32 writeVal, powerBase0, powerBase1, writeVal_tmp;
- u8 index = 0;
- u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c};
- u8 byte0, byte1, byte2, byte3;
-
- powerBase0 = powerlevel + priv->LegacyHTTxPowerDiff; //OFDM rates
- powerBase0 = (powerBase0<<24) | (powerBase0<<16) |(powerBase0<<8) |powerBase0;
- powerBase1 = powerlevel; //MCS rates
- powerBase1 = (powerBase1<<24) | (powerBase1<<16) |(powerBase1<<8) |powerBase1;
-
- for(index=0; index<6; index++)
- {
- writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index<2)?powerBase0:powerBase1);
- byte0 = (u8)(writeVal & 0x7f);
- byte1 = (u8)((writeVal & 0x7f00)>>8);
- byte2 = (u8)((writeVal & 0x7f0000)>>16);
- byte3 = (u8)((writeVal & 0x7f000000)>>24);
- if(byte0 > 0x24) // Max power index = 0x24
- byte0 = 0x24;
- if(byte1 > 0x24)
- byte1 = 0x24;
- if(byte2 > 0x24)
- byte2 = 0x24;
- if(byte3 > 0x24)
- byte3 = 0x24;
-
- if(index == 3)
- {
- writeVal_tmp = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0;
- priv->Pwr_Track = writeVal_tmp;
- }
-
- if(priv->bDynamicTxHighPower == true) //Add by Jacken 2008/03/06 //when DM implement, add this
- {
- writeVal = 0x03030303;
- }
- else
- {
- writeVal = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0;
- }
- rtl8192_setBBreg(priv, RegOffset[index], 0x7f7f7f7f, writeVal);
- }
-}
-
-#define MAX_DOZE_WAITING_TIMES_9x 64
-static void r8192e_drain_tx_queues(struct r8192_priv *priv)
-{
- u8 i, QueueID;
-
- for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; )
- {
- struct rtl8192_tx_ring *ring = &priv->tx_ring[QueueID];
-
- if(skb_queue_len(&ring->queue) == 0)
- {
- QueueID++;
- continue;
- }
-
- udelay(10);
- i++;
-
- if (i >= MAX_DOZE_WAITING_TIMES_9x)
- {
- RT_TRACE(COMP_POWER, "r8192e_drain_tx_queues() timeout queue %d\n", QueueID);
- break;
- }
- }
-}
-
-static bool SetRFPowerState8190(struct r8192_priv *priv,
- RT_RF_POWER_STATE eRFPowerState)
-{
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
- bool bResult = true;
-
- if (eRFPowerState == priv->eRFPowerState &&
- priv->bHwRfOffAction == 0) {
- bResult = false;
- goto out;
- }
-
- switch( eRFPowerState )
- {
- case eRfOn:
-
- // turn on RF
- if ((priv->eRFPowerState == eRfOff) &&
- RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC))
- {
- /*
- * The current RF state is OFF and the RF OFF level
- * is halting the NIC, re-initialize the NIC.
- */
- if (!NicIFEnableNIC(priv)) {
- RT_TRACE(COMP_ERR, "%s(): NicIFEnableNIC failed\n",__FUNCTION__);
- bResult = false;
- goto out;
- }
-
- RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
- } else {
- write_nic_byte(priv, ANAPAR, 0x37);//160MHz
- mdelay(1);
- //enable clock 80/88 MHz
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x4, 0x1); // 0x880[2]
- priv->bHwRfOffAction = 0;
-
- //RF-A, RF-B
- //enable RF-Chip A/B
- rtl8192_setBBreg(priv, rFPGA0_XA_RFInterfaceOE, BIT4, 0x1); // 0x860[4]
- //analog to digital on
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8]
- //digital to analog on
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x18, 0x3); // 0x880[4:3]
- //rx antenna on
- rtl8192_setBBreg(priv, rOFDM0_TRxPathEnable, 0x3, 0x3);// 0xc04[1:0]
- //rx antenna on
- rtl8192_setBBreg(priv, rOFDM1_TRxPathEnable, 0x3, 0x3);// 0xd04[1:0]
- //analog to digital part2 on
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x60, 0x3); // 0x880[6:5]
-
- }
-
- break;
-
- //
- // In current solution, RFSleep=RFOff in order to save power under 802.11 power save.
- // By Bruce, 2008-01-16.
- //
- case eRfSleep:
-
- // HW setting had been configured with deeper mode.
- if(priv->eRFPowerState == eRfOff)
- break;
-
- r8192e_drain_tx_queues(priv);
-
- PHY_SetRtl8192eRfOff(priv);
-
- break;
-
- case eRfOff:
-
- //
- // Disconnect with Any AP or STA.
- //
- r8192e_drain_tx_queues(priv);
-
-
- if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC && !RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC))
- {
- /* Disable all components. */
- NicIFDisableNIC(priv);
- RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
- }
- else if (!(pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC))
- {
- /* Normal case - IPS should go to this. */
- PHY_SetRtl8192eRfOff(priv);
- }
- break;
-
- default:
- bResult = false;
- RT_TRACE(COMP_ERR, "SetRFPowerState8190(): unknow state to set: 0x%X!!!\n", eRFPowerState);
- break;
- }
-
- if(bResult)
- {
- // Update current RF state variable.
- priv->eRFPowerState = eRFPowerState;
- }
-
-out:
- return bResult;
-}
-
-
-
-
-
-static void MgntDisconnectIBSS(struct r8192_priv *priv)
-{
- u8 i;
- bool bFilterOutNonAssociatedBSSID = false;
-
- priv->ieee80211->state = IEEE80211_NOLINK;
-
- for(i=0;i<6;i++) priv->ieee80211->current_network.bssid[i]= 0x55;
- priv->OpMode = RT_OP_MODE_NO_LINK;
- write_nic_word(priv, BSSIDR, ((u16*)priv->ieee80211->current_network.bssid)[0]);
- write_nic_dword(priv, BSSIDR+2, ((u32*)(priv->ieee80211->current_network.bssid+2))[0]);
- {
- RT_OP_MODE OpMode = priv->OpMode;
- u8 btMsr = read_nic_byte(priv, MSR);
-
- btMsr &= 0xfc;
-
- switch(OpMode)
- {
- case RT_OP_MODE_INFRASTRUCTURE:
- btMsr |= MSR_LINK_MANAGED;
- break;
-
- case RT_OP_MODE_IBSS:
- btMsr |= MSR_LINK_ADHOC;
- // led link set separate
- break;
-
- case RT_OP_MODE_AP:
- btMsr |= MSR_LINK_MASTER;
- break;
-
- default:
- btMsr |= MSR_LINK_NONE;
- break;
- }
-
- write_nic_byte(priv, MSR, btMsr);
- }
- ieee80211_stop_send_beacons(priv->ieee80211);
-
- // If disconnect, clear RCR CBSSID bit
- bFilterOutNonAssociatedBSSID = false;
- {
- u32 RegRCR, Type;
- Type = bFilterOutNonAssociatedBSSID;
- RegRCR = read_nic_dword(priv, RCR);
- priv->ReceiveConfig = RegRCR;
- if (Type == true)
- RegRCR |= (RCR_CBSSID);
- else if (Type == false)
- RegRCR &= (~RCR_CBSSID);
-
- {
- write_nic_dword(priv, RCR, RegRCR);
- priv->ReceiveConfig = RegRCR;
- }
-
- }
- notify_wx_assoc_event(priv->ieee80211);
-
-}
-
-static void MlmeDisassociateRequest(struct r8192_priv *priv, u8 *asSta,
- u8 asRsn)
-{
- u8 i;
-
- RemovePeerTS(priv->ieee80211, asSta);
-
- SendDisassociation( priv->ieee80211, asSta, asRsn );
-
- if(memcpy(priv->ieee80211->current_network.bssid,asSta,6) == NULL)
- {
- //ShuChen TODO: change media status.
- //ShuChen TODO: What to do when disassociate.
- priv->ieee80211->state = IEEE80211_NOLINK;
- for(i=0;i<6;i++) priv->ieee80211->current_network.bssid[i] = 0x22;
- priv->OpMode = RT_OP_MODE_NO_LINK;
- {
- RT_OP_MODE OpMode = priv->OpMode;
- u8 btMsr = read_nic_byte(priv, MSR);
-
- btMsr &= 0xfc;
-
- switch(OpMode)
- {
- case RT_OP_MODE_INFRASTRUCTURE:
- btMsr |= MSR_LINK_MANAGED;
- break;
-
- case RT_OP_MODE_IBSS:
- btMsr |= MSR_LINK_ADHOC;
- // led link set separate
- break;
-
- case RT_OP_MODE_AP:
- btMsr |= MSR_LINK_MASTER;
- break;
-
- default:
- btMsr |= MSR_LINK_NONE;
- break;
- }
-
- write_nic_byte(priv, MSR, btMsr);
- }
- ieee80211_disassociate(priv->ieee80211);
-
- write_nic_word(priv, BSSIDR, ((u16*)priv->ieee80211->current_network.bssid)[0]);
- write_nic_dword(priv, BSSIDR+2, ((u32*)(priv->ieee80211->current_network.bssid+2))[0]);
-
- }
-
-}
-
-
-static void MgntDisconnectAP(struct r8192_priv *priv, u8 asRsn)
-{
- bool bFilterOutNonAssociatedBSSID = false;
- u32 RegRCR, Type;
-
- /* If disconnect, clear RCR CBSSID bit */
- bFilterOutNonAssociatedBSSID = false;
-
- Type = bFilterOutNonAssociatedBSSID;
- RegRCR = read_nic_dword(priv, RCR);
- priv->ReceiveConfig = RegRCR;
-
- if (Type == true)
- RegRCR |= (RCR_CBSSID);
- else if (Type == false)
- RegRCR &= (~RCR_CBSSID);
-
- write_nic_dword(priv, RCR, RegRCR);
- priv->ReceiveConfig = RegRCR;
-
- MlmeDisassociateRequest(priv, priv->ieee80211->current_network.bssid, asRsn);
-
- priv->ieee80211->state = IEEE80211_NOLINK;
-}
-
-
-static bool MgntDisconnect(struct r8192_priv *priv, u8 asRsn)
-{
- // In adhoc mode, update beacon frame.
- if( priv->ieee80211->state == IEEE80211_LINKED )
- {
- if( priv->ieee80211->iw_mode == IW_MODE_ADHOC )
- {
- MgntDisconnectIBSS(priv);
- }
- if( priv->ieee80211->iw_mode == IW_MODE_INFRA )
- {
- // We clear key here instead of MgntDisconnectAP() because that
- // MgntActSet_802_11_DISASSOCIATE() is an interface called by OS,
- // e.g. OID_802_11_DISASSOCIATE in Windows while as MgntDisconnectAP() is
- // used to handle disassociation related things to AP, e.g. send Disassoc
- // frame to AP. 2005.01.27, by rcnjko.
- MgntDisconnectAP(priv, asRsn);
- }
- }
-
- return true;
-}
-
-//
-// Description:
-// Chang RF Power State.
-// Note that, only MgntActSet_RF_State() is allowed to set HW_VAR_RF_STATE.
-//
-// Assumption:
-// PASSIVE LEVEL.
-//
-bool MgntActSet_RF_State(struct r8192_priv *priv, RT_RF_POWER_STATE StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource)
-{
- bool bActionAllowed = false;
- bool bConnectBySSID = false;
- RT_RF_POWER_STATE rtState;
-
- RT_TRACE(COMP_POWER, "===>MgntActSet_RF_State(): StateToSet(%d)\n",StateToSet);
-
- spin_lock(&priv->rf_ps_lock);
-
- rtState = priv->eRFPowerState;
-
- switch(StateToSet)
- {
- case eRfOn:
- priv->RfOffReason &= (~ChangeSource);
-
- if (!priv->RfOffReason)
- {
- priv->RfOffReason = 0;
- bActionAllowed = true;
-
-
- if(rtState == eRfOff && ChangeSource >=RF_CHANGE_BY_HW )
- {
- bConnectBySSID = true;
- }
- }
- else
- RT_TRACE(COMP_POWER, "MgntActSet_RF_State - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n", priv->RfOffReason, ChangeSource);
-
- break;
-
- case eRfOff:
-
- if (priv->RfOffReason > RF_CHANGE_BY_IPS)
- {
- // Disconnect to current BSS when radio off. Asked by QuanTa.
- MgntDisconnect(priv, disas_lv_ss);
- }
-
- priv->RfOffReason |= ChangeSource;
- bActionAllowed = true;
- break;
-
- case eRfSleep:
- priv->RfOffReason |= ChangeSource;
- bActionAllowed = true;
- break;
- }
-
- if (bActionAllowed)
- {
- RT_TRACE(COMP_POWER, "MgntActSet_RF_State(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n", StateToSet, priv->RfOffReason);
- // Config HW to the specified mode.
- SetRFPowerState8190(priv, StateToSet);
- }
- else
- {
- RT_TRACE(COMP_POWER, "MgntActSet_RF_State(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n", StateToSet, ChangeSource, priv->RfOffReason);
- }
-
- // Release RF spinlock
- spin_unlock(&priv->rf_ps_lock);
-
- RT_TRACE(COMP_POWER, "<===MgntActSet_RF_State()\n");
- return bActionAllowed;
-}
-
-
diff --git a/drivers/staging/rtl8192e/r8190_rtl8256.h b/drivers/staging/rtl8192e/r8190_rtl8256.h
deleted file mode 100644
index 58f92903fca..00000000000
--- a/drivers/staging/rtl8192e/r8190_rtl8256.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* r8190_rtl8256.h - rtl8256 radio frontend
- *
- * This is part of the rtl8180-sa2400 driver
- * released under the GPL (See file COPYING for details).
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
- *
- * Many thanks to Realtek Corp. for their great support!
- */
-
-#ifndef RTL8225_H
-#define RTL8225_H
-
-#define RTL819X_TOTAL_RF_PATH 2 /* for 8192E */
-
-void PHY_SetRF8256Bandwidth(struct r8192_priv *priv,
- HT_CHANNEL_WIDTH Bandwidth);
-
-RT_STATUS PHY_RF8256_Config(struct r8192_priv *priv);
-
-RT_STATUS phy_RF8256_Config_ParaFile(struct r8192_priv *priv);
-
-void PHY_SetRF8256CCKTxPower(struct r8192_priv *priv, u8 powerlevel);
-void PHY_SetRF8256OFDMTxPower(struct r8192_priv *priv, u8 powerlevel);
-
-bool MgntActSet_RF_State(struct r8192_priv *priv,
- RT_RF_POWER_STATE StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource);
-
-#endif /* RTL8225_H */
diff --git a/drivers/staging/rtl8192e/r8192E.h b/drivers/staging/rtl8192e/r8192E.h
deleted file mode 100644
index 137f66b034b..00000000000
--- a/drivers/staging/rtl8192e/r8192E.h
+++ /dev/null
@@ -1,1148 +0,0 @@
-/*
- This is part of rtl8187 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver
-
- Parts of this driver are based on the rtl8192 driver skeleton
- from Patric Schenke & Andres Salomon
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to tanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
-
-#ifndef R819xU_H
-#define R819xU_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/rtnetlink.h> //for rtnl_lock()
-#include <linux/wireless.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h> // Necessary because we use the proc fs
-#include <linux/if_arp.h>
-#include <linux/random.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include "ieee80211/rtl819x_HT.h"
-#include "ieee80211/ieee80211.h"
-
-
-
-
-#define RTL819xE_MODULE_NAME "rtl819xE"
-
-#define FALSE 0
-#define TRUE 1
-#define MAX_KEY_LEN 61
-#define KEY_BUF_SIZE 5
-
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-// Rx smooth factor
-#define Rx_Smooth_Factor 20
-/* 2007/06/04 MH Define sliding window for RSSI history. */
-#define PHY_RSSI_SLID_WIN_MAX 100
-#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
-
-#define IC_VersionCut_D 0x3
-#define IC_VersionCut_E 0x4
-
-#if 0 //we need to use RT_TRACE instead DMESG as RT_TRACE will clearly show debug level wb.
-#define DMESG(x,a...) printk(KERN_INFO RTL819xE_MODULE_NAME ": " x "\n", ## a)
-#else
-#define DMESG(x,a...)
-extern u32 rt_global_debug_component;
-#define RT_TRACE(component, x, args...) \
-do { if(rt_global_debug_component & component) \
- printk(KERN_DEBUG RTL819xE_MODULE_NAME ":" x , \
- ##args);\
-}while(0);
-
-#define COMP_TRACE BIT0 // For function call tracing.
-#define COMP_DBG BIT1 // Only for temporary debug message.
-#define COMP_INIT BIT2 // during driver initialization / halt / reset.
-
-
-#define COMP_RECV BIT3 // Reveive part data path.
-#define COMP_SEND BIT4 // Send part path.
-#define COMP_IO BIT5 // I/O Related. Added by Annie, 2006-03-02.
-#define COMP_POWER BIT6 // 802.11 Power Save mode or System/Device Power state related.
-#define COMP_EPROM BIT7 // 802.11 link related: join/start BSS, leave BSS.
-#define COMP_SWBW BIT8 // For bandwidth switch.
-#define COMP_SEC BIT9// For Security.
-
-
-#define COMP_TURBO BIT10 // For Turbo Mode related. By Annie, 2005-10-21.
-#define COMP_QOS BIT11 // For QoS.
-
-#define COMP_RATE BIT12 // For Rate Adaptive mechanism, 2006.07.02, by rcnjko. #define COMP_EVENTS 0x00000080 // Event handling
-#define COMP_RXDESC BIT13 // Show Rx desc information for SD3 debug. Added by Annie, 2006-07-15.
-#define COMP_PHY BIT14
-#define COMP_DIG BIT15 // For DIG, 2006.09.25, by rcnjko.
-#define COMP_TXAGC BIT16 // For Tx power, 060928, by rcnjko.
-#define COMP_HALDM BIT17 // For HW Dynamic Mechanism, 061010, by rcnjko.
-#define COMP_POWER_TRACKING BIT18 //FOR 8190 TX POWER TRACKING
-#define COMP_EVENTS BIT19 // Event handling
-
-#define COMP_RF BIT20 // For RF.
-
-/* 11n or 8190 specific code should be put below this line */
-
-
-#define COMP_FIRMWARE BIT21 //for firmware downloading
-#define COMP_HT BIT22 // For 802.11n HT related information. by Emily 2006-8-11
-
-#define COMP_RESET BIT23
-#define COMP_CMDPKT BIT24
-#define COMP_SCAN BIT25
-#define COMP_IPS BIT26
-#define COMP_DOWN BIT27 // for rm driver module
-#define COMP_INTR BIT28 // for interrupt
-#define COMP_ERR BIT31 // for error out, always on
-#endif
-
-
-//
-// Queue Select Value in TxDesc
-//
-#define QSLT_BK 0x1
-#define QSLT_BE 0x0
-#define QSLT_VI 0x4
-#define QSLT_VO 0x6
-#define QSLT_BEACON 0x10
-#define QSLT_HIGH 0x11
-#define QSLT_MGNT 0x12
-#define QSLT_CMD 0x13
-
-#define DESC90_RATE1M 0x00
-#define DESC90_RATE2M 0x01
-#define DESC90_RATE5_5M 0x02
-#define DESC90_RATE11M 0x03
-#define DESC90_RATE6M 0x04
-#define DESC90_RATE9M 0x05
-#define DESC90_RATE12M 0x06
-#define DESC90_RATE18M 0x07
-#define DESC90_RATE24M 0x08
-#define DESC90_RATE36M 0x09
-#define DESC90_RATE48M 0x0a
-#define DESC90_RATE54M 0x0b
-#define DESC90_RATEMCS0 0x00
-#define DESC90_RATEMCS1 0x01
-#define DESC90_RATEMCS2 0x02
-#define DESC90_RATEMCS3 0x03
-#define DESC90_RATEMCS4 0x04
-#define DESC90_RATEMCS5 0x05
-#define DESC90_RATEMCS6 0x06
-#define DESC90_RATEMCS7 0x07
-#define DESC90_RATEMCS8 0x08
-#define DESC90_RATEMCS9 0x09
-#define DESC90_RATEMCS10 0x0a
-#define DESC90_RATEMCS11 0x0b
-#define DESC90_RATEMCS12 0x0c
-#define DESC90_RATEMCS13 0x0d
-#define DESC90_RATEMCS14 0x0e
-#define DESC90_RATEMCS15 0x0f
-#define DESC90_RATEMCS32 0x20
-
-#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x4
-#define IEEE80211_WATCH_DOG_TIME 2000
-
-typedef u32 RT_RF_CHANGE_SOURCE;
-#define RF_CHANGE_BY_SW BIT31
-#define RF_CHANGE_BY_HW BIT30
-#define RF_CHANGE_BY_PS BIT29
-#define RF_CHANGE_BY_IPS BIT28
-#define RF_CHANGE_BY_INIT 0 // Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
-
-// RF state.
-typedef enum _RT_RF_POWER_STATE {
- eRfOn,
- eRfSleep,
- eRfOff
-} RT_RF_POWER_STATE;
-
-typedef enum _RT_JOIN_ACTION {
- RT_JOIN_INFRA = 1,
- RT_JOIN_IBSS = 2,
- RT_START_IBSS = 3,
- RT_NO_ACTION = 4,
-} RT_JOIN_ACTION;
-
-typedef enum _IPS_CALLBACK_FUNCION {
- IPS_CALLBACK_NONE = 0,
- IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
- IPS_CALLBACK_JOIN_REQUEST = 2,
-} IPS_CALLBACK_FUNCION;
-
-typedef struct _RT_POWER_SAVE_CONTROL {
- /* Inactive Power Save(IPS) : Disable RF when disconnected */
- bool bInactivePs;
- bool bIPSModeBackup;
- bool bSwRfProcessing;
- RT_RF_POWER_STATE eInactivePowerState;
- struct work_struct InactivePsWorkItem;
- struct timer_list InactivePsTimer;
-
- /* Return point for join action */
- IPS_CALLBACK_FUNCION ReturnPoint;
-
- /* Recored Parameters for rescheduled JoinRequest */
- bool bTmpBssDesc;
- RT_JOIN_ACTION tmpJoinAction;
- struct ieee80211_network tmpBssDesc;
-
- /* Recored Parameters for rescheduled MgntLinkRequest */
- bool bTmpScanOnly;
- bool bTmpActiveScan;
- bool bTmpFilterHiddenAP;
- bool bTmpUpdateParms;
- u8 tmpSsidBuf[33];
- OCTET_STRING tmpSsid2Scan;
- bool bTmpSsid2Scan;
- u8 tmpNetworkType;
- u8 tmpChannelNumber;
- u16 tmpBcnPeriod;
- u8 tmpDtimPeriod;
- u16 tmpmCap;
- OCTET_STRING tmpSuppRateSet;
- u8 tmpSuppRateBuf[MAX_NUM_RATES];
- bool bTmpSuppRate;
- IbssParms tmpIbpm;
- bool bTmpIbpm;
-
- /*
- * Leisure Power Save:
- * Disable RF if connected but traffic is not busy
- */
- bool bLeisurePs;
- u32 PowerProfile;
- u8 LpsIdleCount;
-
- u32 CurPsLevel;
- u32 RegRfPsLevel;
-
- bool bFwCtrlLPS;
- u8 FWCtrlPSMode;
-
- bool LinkReqInIPSRFOffPgs;
- bool BufConnectinfoBefore;
-} RT_POWER_SAVE_CONTROL, *PRT_POWER_SAVE_CONTROL;
-
-/* For rtl819x */
-typedef struct _tx_desc_819x_pci {
- //DWORD 0
- u16 PktSize;
- u8 Offset;
- u8 Reserved1:3;
- u8 CmdInit:1;
- u8 LastSeg:1;
- u8 FirstSeg:1;
- u8 LINIP:1;
- u8 OWN:1;
-
- //DWORD 1
- u8 TxFWInfoSize;
- u8 RATid:3;
- u8 DISFB:1;
- u8 USERATE:1;
- u8 MOREFRAG:1;
- u8 NoEnc:1;
- u8 PIFS:1;
- u8 QueueSelect:5;
- u8 NoACM:1;
- u8 Resv:2;
- u8 SecCAMID:5;
- u8 SecDescAssign:1;
- u8 SecType:2;
-
- //DWORD 2
- u16 TxBufferSize;
- u8 PktId:7;
- u8 Resv1:1;
- u8 Reserved2;
-
- //DWORD 3
- u32 TxBuffAddr;
-
- //DWORD 4
- u32 NextDescAddress;
-
- //DWORD 5,6,7
- u32 Reserved5;
- u32 Reserved6;
- u32 Reserved7;
-}tx_desc_819x_pci, *ptx_desc_819x_pci;
-
-
-typedef struct _tx_desc_cmd_819x_pci {
- //DWORD 0
- u16 PktSize;
- u8 Reserved1;
- u8 CmdType:3;
- u8 CmdInit:1;
- u8 LastSeg:1;
- u8 FirstSeg:1;
- u8 LINIP:1;
- u8 OWN:1;
-
- //DOWRD 1
- u16 ElementReport;
- u16 Reserved2;
-
- //DOWRD 2
- u16 TxBufferSize;
- u16 Reserved3;
-
- //DWORD 3,4,5
- u32 TxBufferAddr;
- u32 NextDescAddress;
- u32 Reserved4;
- u32 Reserved5;
- u32 Reserved6;
-}tx_desc_cmd_819x_pci, *ptx_desc_cmd_819x_pci;
-
-
-typedef struct _tx_fwinfo_819x_pci {
- //DOWRD 0
- u8 TxRate:7;
- u8 CtsEnable:1;
- u8 RtsRate:7;
- u8 RtsEnable:1;
- u8 TxHT:1;
- u8 Short:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 TxBandwidth:1; // This is used for HT MCS rate only.
- u8 TxSubCarrier:2; // This is used for legacy OFDM rate only.
- u8 STBC:2;
- u8 AllowAggregation:1;
- u8 RtsHT:1; //Interpre RtsRate field as high throughput data rate
- u8 RtsShort:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 RtsBandwidth:1; // This is used for HT MCS rate only.
- u8 RtsSubcarrier:2; // This is used for legacy OFDM rate only.
- u8 RtsSTBC:2;
- u8 EnableCPUDur:1; //Enable firmware to recalculate and assign packet duration
-
- //DWORD 1
- u8 RxMF:2;
- u8 RxAMD:3;
- u8 Reserved1:3;
- u8 Reserved2;
- u8 Reserved3;
- u8 Reserved4;
-
- //u32 Reserved;
-}tx_fwinfo_819x_pci, *ptx_fwinfo_819x_pci;
-
-typedef struct _rx_desc_819x_pci{
- //DOWRD 0
- u16 Length:14;
- u16 CRC32:1;
- u16 ICV:1;
- u8 RxDrvInfoSize;
- u8 Shift:2;
- u8 PHYStatus:1;
- u8 SWDec:1;
- u8 LastSeg:1;
- u8 FirstSeg:1;
- u8 EOR:1;
- u8 OWN:1;
-
- //DWORD 1
- u32 Reserved2;
-
- //DWORD 2
- u32 Reserved3;
-
- //DWORD 3
- u32 BufferAddress;
-
-}rx_desc_819x_pci, *prx_desc_819x_pci;
-
-typedef struct _rx_fwinfo_819x_pci{
- //DWORD 0
- u16 Reserved1:12;
- u16 PartAggr:1;
- u16 FirstAGGR:1;
- u16 Reserved2:2;
-
- u8 RxRate:7;
- u8 RxHT:1;
-
- u8 BW:1;
- u8 SPLCP:1;
- u8 Reserved3:2;
- u8 PAM:1;
- u8 Mcast:1;
- u8 Bcast:1;
- u8 Reserved4:1;
-
- //DWORD 1
- u32 TSFL;
-
-}rx_fwinfo_819x_pci, *prx_fwinfo_819x_pci;
-
-#define MAX_DEV_ADDR_SIZE 8 /* support till 64 bit bus width OS */
-#define MAX_FIRMWARE_INFORMATION_SIZE 32 /*2006/04/30 by Emily forRTL8190*/
-#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
-#define ENCRYPTION_MAX_OVERHEAD 128
-#define MAX_FRAGMENT_COUNT 8
-#define MAX_TRANSMIT_BUFFER_SIZE (1600+(MAX_802_11_HEADER_LENGTH+ENCRYPTION_MAX_OVERHEAD)*MAX_FRAGMENT_COUNT)
-
-#define scrclng 4 // octets for crc32 (FCS, ICV)
-/* 8190 Loopback Mode definition */
-typedef enum _rtl819x_loopback{
- RTL819X_NO_LOOPBACK = 0,
- RTL819X_MAC_LOOPBACK = 1,
- RTL819X_DMA_LOOPBACK = 2,
- RTL819X_CCK_LOOPBACK = 3,
-}rtl819x_loopback_e;
-
-/* due to rtl8192 firmware */
-typedef enum _desc_packet_type_e{
- DESC_PACKET_TYPE_INIT = 0,
- DESC_PACKET_TYPE_NORMAL = 1,
-}desc_packet_type_e;
-
-typedef enum _firmware_status{
- FW_STATUS_0_INIT = 0,
- FW_STATUS_1_MOVE_BOOT_CODE = 1,
- FW_STATUS_2_MOVE_MAIN_CODE = 2,
- FW_STATUS_3_TURNON_CPU = 3,
- FW_STATUS_4_MOVE_DATA_CODE = 4,
- FW_STATUS_5_READY = 5,
-}firmware_status_e;
-
-typedef struct _rt_firmware{
- firmware_status_e firmware_status;
- u16 cmdpacket_frag_thresold;
-#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000 //64k
-#define MAX_FW_INIT_STEP 3
- u8 firmware_buf[MAX_FW_INIT_STEP][RTL8190_MAX_FIRMWARE_CODE_SIZE];
- u16 firmware_buf_size[MAX_FW_INIT_STEP];
-}rt_firmware, *prt_firmware;
-
-#define MAX_RECEIVE_BUFFER_SIZE 9100 // Add this to 9100 bytes to receive A-MSDU from RT-AP
-
-/* Firmware Queue Layout */
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x0aa
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x007
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x024
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x007
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x10
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x4
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xd
-#define APPLIED_RESERVED_QUEUE_IN_FW 0x80000000
-#define RSVD_FW_QUEUE_PAGE_BK_SHIFT 0x00
-#define RSVD_FW_QUEUE_PAGE_BE_SHIFT 0x08
-#define RSVD_FW_QUEUE_PAGE_VI_SHIFT 0x10
-#define RSVD_FW_QUEUE_PAGE_VO_SHIFT 0x18
-#define RSVD_FW_QUEUE_PAGE_MGNT_SHIFT 0x10
-#define RSVD_FW_QUEUE_PAGE_CMD_SHIFT 0x08
-#define RSVD_FW_QUEUE_PAGE_BCN_SHIFT 0x00
-#define RSVD_FW_QUEUE_PAGE_PUB_SHIFT 0x08
-
-#define DCAM 0xAC // Debug CAM Interface
-#define AESMSK_FC 0xB2 // AES Mask register for frame control (0xB2~0xB3). Added by Annie, 2006-03-06.
-
-
-#define CAM_CONTENT_COUNT 8
-#define CFG_VALID BIT15
-#define EPROM_93c46 0
-#define EPROM_93c56 1
-
-#define DEFAULT_FRAG_THRESHOLD 2342U
-#define MIN_FRAG_THRESHOLD 256U
-#define DEFAULT_BEACONINTERVAL 0x64U
-
-#define DEFAULT_RETRY_RTS 7
-#define DEFAULT_RETRY_DATA 7
-
-#define PHY_RSSI_SLID_WIN_MAX 100
-
-
-typedef enum _WIRELESS_MODE {
- WIRELESS_MODE_UNKNOWN = 0x00,
- WIRELESS_MODE_A = 0x01,
- WIRELESS_MODE_B = 0x02,
- WIRELESS_MODE_G = 0x04,
- WIRELESS_MODE_AUTO = 0x08,
- WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20
-} WIRELESS_MODE;
-
-#define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
-
-typedef struct buffer
-{
- struct buffer *next;
- u32 *buf;
- dma_addr_t dma;
-
-} buffer;
-
-typedef struct _rt_9x_tx_rate_history {
- u32 cck[4];
- u32 ofdm[8];
- // HT_MCS[0][]: BW=0 SG=0
- // HT_MCS[1][]: BW=1 SG=0
- // HT_MCS[2][]: BW=0 SG=1
- // HT_MCS[3][]: BW=1 SG=1
- u32 ht_mcs[4][16];
-}rt_tx_rahis_t, *prt_tx_rahis_t;
-
-typedef struct _RT_SMOOTH_DATA_4RF {
- char elements[4][100];//array to store values
- u32 index; //index to current array to store
- u32 TotalNum; //num of valid elements
- u32 TotalVal[4]; //sum of valid elements
-}RT_SMOOTH_DATA_4RF, *PRT_SMOOTH_DATA_4RF;
-
-typedef enum _tag_TxCmd_Config_Index{
- TXCMD_TXRA_HISTORY_CTRL = 0xFF900000,
- TXCMD_RESET_TX_PKT_BUFF = 0xFF900001,
- TXCMD_RESET_RX_PKT_BUFF = 0xFF900002,
- TXCMD_SET_TX_DURATION = 0xFF900003,
- TXCMD_SET_RX_RSSI = 0xFF900004,
- TXCMD_SET_TX_PWR_TRACKING = 0xFF900005,
- TXCMD_XXXX_CTRL,
-}DCMD_TXCMD_OP;
-
-typedef struct Stats
-{
- unsigned long rxrdu;
- unsigned long rxok;
- unsigned long received_rate_histogram[4][32]; //0: Total, 1:OK, 2:CRC, 3:ICV
- unsigned long rxoverflow;
- unsigned long rxint;
- unsigned long txoverflow;
- unsigned long txbeokint;
- unsigned long txbkokint;
- unsigned long txviokint;
- unsigned long txvookint;
- unsigned long txbeaconokint;
- unsigned long txbeaconerr;
- unsigned long txmanageokint;
- unsigned long txcmdpktokint;
- unsigned long txfeedback;
- unsigned long txfeedbackok;
- unsigned long txoktotal;
- unsigned long txbytesunicast;
- unsigned long rxbytesunicast;
-
- unsigned long slide_signal_strength[100];
- unsigned long slide_evm[100];
- unsigned long slide_rssi_total; // For recording sliding window's RSSI value
- unsigned long slide_evm_total; // For recording sliding window's EVM value
- long signal_strength; // Transformed, in dbm. Beautified signal strength for UI, not correct.
- u8 rx_rssi_percentage[4];
- u8 rx_evm_percentage[2];
- u32 Slide_Beacon_pwdb[100];
- u32 Slide_Beacon_Total;
- RT_SMOOTH_DATA_4RF cck_adc_pwdb;
-} Stats;
-
-
-// Bandwidth Offset
-#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
-#define HAL_PRIME_CHNL_OFFSET_LOWER 1
-#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
-typedef struct ChnlAccessSetting {
- u16 SIFS_Timer;
- u16 DIFS_Timer;
- u16 SlotTimeTimer;
- u16 EIFS_Timer;
- u16 CWminIndex;
- u16 CWmaxIndex;
-}*PCHANNEL_ACCESS_SETTING,CHANNEL_ACCESS_SETTING;
-
-typedef struct _BB_REGISTER_DEFINITION{
- u32 rfintfs; // set software control: // 0x870~0x877[8 bytes]
- u32 rfintfi; // readback data: // 0x8e0~0x8e7[8 bytes]
- u32 rfintfo; // output data: // 0x860~0x86f [16 bytes]
- u32 rfintfe; // output enable: // 0x860~0x86f [16 bytes]
- u32 rf3wireOffset; // LSSI data: // 0x840~0x84f [16 bytes]
- u32 rfLSSI_Select; // BB Band Select: // 0x878~0x87f [8 bytes]
- u32 rfTxGainStage; // Tx gain stage: // 0x80c~0x80f [4 bytes]
- u32 rfHSSIPara1; // wire parameter control1 : // 0x820~0x823,0x828~0x82b, 0x830~0x833, 0x838~0x83b [16 bytes]
- u32 rfHSSIPara2; // wire parameter control2 : // 0x824~0x827,0x82c~0x82f, 0x834~0x837, 0x83c~0x83f [16 bytes]
- u32 rfSwitchControl; //Tx Rx antenna control : // 0x858~0x85f [16 bytes]
- u32 rfAGCControl1; //AGC parameter control1 : // 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63, 0xc68~0xc6b [16 bytes]
- u32 rfAGCControl2; //AGC parameter control2 : // 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67, 0xc6c~0xc6f [16 bytes]
- u32 rfRxIQImbalance; //OFDM Rx IQ imbalance matrix : // 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27, 0xc2c~0xc2f [16 bytes]
- u32 rfRxAFE; //Rx IQ DC ofset and Rx digital filter, Rx DC notch filter : // 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23, 0xc28~0xc2b [16 bytes]
- u32 rfTxIQImbalance; //OFDM Tx IQ imbalance matrix // 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93, 0xc98~0xc9b [16 bytes]
- u32 rfTxAFE; //Tx IQ DC Offset and Tx DFIR type // 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97, 0xc9c~0xc9f [16 bytes]
- u32 rfLSSIReadBack; //LSSI RF readback data // 0x8a0~0x8af [16 bytes]
-}BB_REGISTER_DEFINITION_T, *PBB_REGISTER_DEFINITION_T;
-
-typedef struct _rate_adaptive
-{
- u8 rate_adaptive_disabled;
- u8 ratr_state;
- u16 reserve;
-
- u32 high_rssi_thresh_for_ra;
- u32 high2low_rssi_thresh_for_ra;
- u8 low2high_rssi_thresh_for_ra40M;
- u32 low_rssi_thresh_for_ra40M;
- u8 low2high_rssi_thresh_for_ra20M;
- u32 low_rssi_thresh_for_ra20M;
- u32 upper_rssi_threshold_ratr;
- u32 middle_rssi_threshold_ratr;
- u32 low_rssi_threshold_ratr;
- u32 low_rssi_threshold_ratr_40M;
- u32 low_rssi_threshold_ratr_20M;
- u8 ping_rssi_enable; //cosa add for test
- u32 ping_rssi_ratr; //cosa add for test
- u32 ping_rssi_thresh_for_ra;//cosa add for test
- u32 last_ratr;
-
-} rate_adaptive, *prate_adaptive;
-#define TxBBGainTableLength 37
-#define CCKTxBBGainTableLength 23
-typedef struct _txbbgain_struct
-{
- long txbb_iq_amplifygain;
- u32 txbbgain_value;
-} txbbgain_struct, *ptxbbgain_struct;
-
-typedef struct _ccktxbbgain_struct
-{
- //The Value is from a22 to a29 one Byte one time is much Safer
- u8 ccktxbb_valuearray[8];
-} ccktxbbgain_struct,*pccktxbbgain_struct;
-
-
-typedef struct _init_gain
-{
- u8 xaagccore1;
- u8 xbagccore1;
- u8 xcagccore1;
- u8 xdagccore1;
- u8 cca;
-
-} init_gain, *pinit_gain;
-
-/* 2007/11/02 MH Define RF mode temporarily for test. */
-typedef enum tag_Rf_Operatetion_State
-{
- RF_STEP_INIT = 0,
- RF_STEP_NORMAL,
- RF_STEP_MAX
-}RF_STEP_E;
-
-typedef enum _RT_STATUS{
- RT_STATUS_SUCCESS,
- RT_STATUS_FAILURE,
- RT_STATUS_PENDING,
- RT_STATUS_RESOURCE
-}RT_STATUS,*PRT_STATUS;
-
-typedef enum _RT_CUSTOMER_ID
-{
- RT_CID_DEFAULT = 0,
- RT_CID_8187_ALPHA0 = 1,
- RT_CID_8187_SERCOMM_PS = 2,
- RT_CID_8187_HW_LED = 3,
- RT_CID_8187_NETGEAR = 4,
- RT_CID_WHQL = 5,
- RT_CID_819x_CAMEO = 6,
- RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
- RT_CID_TOSHIBA = 9, // Merge by Jacken, 2008/01/31.
- RT_CID_819x_Netcore = 10,
- RT_CID_Nettronix = 11,
- RT_CID_DLINK = 12,
- RT_CID_PRONET = 13,
- RT_CID_COREGA = 14,
-}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
-
-/* LED customization. */
-
-typedef enum _LED_STRATEGY_8190{
- SW_LED_MODE0, // SW control 1 LED via GPIO0. It is default option.
- SW_LED_MODE1, // SW control for PCI Express
- SW_LED_MODE2, // SW control for Cameo.
- SW_LED_MODE3, // SW contorl for RunTop.
- SW_LED_MODE4, // SW control for Netcore
- SW_LED_MODE5, //added by vivi, for led new mode, DLINK
- SW_LED_MODE6, //added by vivi, for led new mode, PRONET
- HW_LED, // HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes)
-}LED_STRATEGY_8190, *PLED_STRATEGY_8190;
-
-#define CHANNEL_PLAN_LEN 10
-
-#define sCrcLng 4
-
-typedef struct _TX_FWINFO_STRUCUTRE{
- //DOWRD 0
- u8 TxRate:7;
- u8 CtsEnable:1;
- u8 RtsRate:7;
- u8 RtsEnable:1;
- u8 TxHT:1;
- u8 Short:1;
- u8 TxBandwidth:1;
- u8 TxSubCarrier:2;
- u8 STBC:2;
- u8 AllowAggregation:1;
- u8 RtsHT:1;
- u8 RtsShort:1;
- u8 RtsBandwidth:1;
- u8 RtsSubcarrier:2;
- u8 RtsSTBC:2;
- u8 EnableCPUDur:1;
-
- //DWORD 1
- u32 RxMF:2;
- u32 RxAMD:3;
- u32 Reserved1:3;
- u32 TxAGCOffset:4;
- u32 TxAGCSign:1;
- u32 Tx_INFO_RSVD:6;
- u32 PacketID:13;
-}TX_FWINFO_T;
-
-
-typedef struct _TX_FWINFO_8190PCI{
- //DOWRD 0
- u8 TxRate:7;
- u8 CtsEnable:1;
- u8 RtsRate:7;
- u8 RtsEnable:1;
- u8 TxHT:1;
- u8 Short:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 TxBandwidth:1; // This is used for HT MCS rate only.
- u8 TxSubCarrier:2; // This is used for legacy OFDM rate only.
- u8 STBC:2;
- u8 AllowAggregation:1;
- u8 RtsHT:1; //Interpre RtsRate field as high throughput data rate
- u8 RtsShort:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 RtsBandwidth:1; // This is used for HT MCS rate only.
- u8 RtsSubcarrier:2; // This is used for legacy OFDM rate only.
- u8 RtsSTBC:2;
- u8 EnableCPUDur:1; //Enable firmware to recalculate and assign packet duration
-
- //DWORD 1
- u32 RxMF:2;
- u32 RxAMD:3;
- u32 TxPerPktInfoFeedback:1; // 1: indicate that the transimission info of this packet should be gathered by Firmware and retured by Rx Cmd.
- u32 Reserved1:2;
- u32 TxAGCOffset:4; // Only 90 support
- u32 TxAGCSign:1; // Only 90 support
- u32 RAW_TXD:1; // MAC will send data in txpktbuffer without any processing,such as CRC check
- u32 Retry_Limit:4; // CCX Support relative retry limit FW page only support 4 bits now.
- u32 Reserved2:1;
- u32 PacketID:13;
-
- // DW 2
-
-}TX_FWINFO_8190PCI, *PTX_FWINFO_8190PCI;
-
-typedef struct _phy_ofdm_rx_status_report_819xpci
-{
- u8 trsw_gain_X[4];
- u8 pwdb_all;
- u8 cfosho_X[4];
- u8 cfotail_X[4];
- u8 rxevm_X[2];
- u8 rxsnr_X[4];
- u8 pdsnr_X[2];
- u8 csi_current_X[2];
- u8 csi_target_X[2];
- u8 sigevm;
- u8 max_ex_pwr;
- u8 sgi_en;
- u8 rxsc_sgien_exflg;
-}phy_sts_ofdm_819xpci_t;
-
-typedef struct _phy_cck_rx_status_report_819xpci
-{
- /* For CCK rate descriptor. This is a unsigned 8:1 variable. LSB bit presend
- 0.5. And MSB 7 bts presend a signed value. Range from -64~+63.5. */
- u8 adc_pwdb_X[4];
- u8 sq_rpt;
- u8 cck_agc_rpt;
-}phy_sts_cck_819xpci_t;
-
-typedef struct _phy_ofdm_rx_status_rxsc_sgien_exintfflag{
- u8 reserved:4;
- u8 rxsc:2;
- u8 sgi_en:1;
- u8 ex_intf_flag:1;
-}phy_ofdm_rx_status_rxsc_sgien_exintfflag;
-
-typedef enum _RT_OP_MODE{
- RT_OP_MODE_AP,
- RT_OP_MODE_INFRASTRUCTURE,
- RT_OP_MODE_IBSS,
- RT_OP_MODE_NO_LINK,
-}RT_OP_MODE, *PRT_OP_MODE;
-
-
-/* 2007/11/02 MH Define RF mode temporarily for test. */
-typedef enum tag_Rf_OpType
-{
- RF_OP_By_SW_3wire = 0,
- RF_OP_By_FW,
- RF_OP_MAX
-}RF_OpType_E;
-
-typedef enum _RESET_TYPE {
- RESET_TYPE_NORESET = 0x00,
- RESET_TYPE_NORMAL = 0x01,
- RESET_TYPE_SILENT = 0x02
-} RESET_TYPE;
-
-typedef struct _tx_ring{
- u32 * desc;
- u8 nStuckCount;
- struct _tx_ring * next;
-}__attribute__ ((packed)) tx_ring, * ptx_ring;
-
-struct rtl8192_tx_ring {
- tx_desc_819x_pci *desc;
- dma_addr_t dma;
- unsigned int idx;
- unsigned int entries;
- struct sk_buff_head queue;
-};
-
-#define NIC_SEND_HANG_THRESHOLD_NORMAL 4
-#define NIC_SEND_HANG_THRESHOLD_POWERSAVE 8
-#define MAX_TX_QUEUE 9 // BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON.
-
-#define MAX_RX_COUNT 64
-#define MAX_TX_QUEUE_COUNT 9
-
-typedef struct r8192_priv
-{
- struct pci_dev *pdev;
- u8 *mem_start;
-
- /* maintain info from eeprom */
- short epromtype;
- u16 eeprom_vid;
- u16 eeprom_did;
- u8 eeprom_CustomerID;
- u16 eeprom_ChannelPlan;
- RT_CUSTOMER_ID CustomerID;
- u8 IC_Cut;
- int irq;
- struct ieee80211_device *ieee80211;
-#ifdef ENABLE_LPS
- bool ps_force;
- bool force_lps;
- bool bdisable_nic;
-#endif
- bool being_init_adapter;
- u8 Rf_Mode;
- u8 card_8192_version; /* if TCR reports card V B/C this discriminates */
- spinlock_t irq_th_lock;
- spinlock_t rf_ps_lock;
- struct mutex mutex;
-
- short chan;
- short sens;
- /* RX stuff */
- rx_desc_819x_pci *rx_ring;
- dma_addr_t rx_ring_dma;
- unsigned int rx_idx;
- struct sk_buff *rx_buf[MAX_RX_COUNT];
- int rxringcount;
- u16 rxbuffersize;
-
- /* TX stuff */
- struct rtl8192_tx_ring tx_ring[MAX_TX_QUEUE_COUNT];
- int txringcount;
-
- struct tasklet_struct irq_rx_tasklet;
- struct tasklet_struct irq_tx_tasklet;
- struct tasklet_struct irq_prepare_beacon_tasklet;
-
- short up;
- short crcmon; //if 1 allow bad crc frame reception in monitor mode
- struct semaphore wx_sem;
- struct semaphore rf_sem; //used to lock rf write operation added by wb, modified by david
- u8 rf_type; /* 0 means 1T2R, 1 means 2T4R */
-
- short (*rf_set_sens)(struct net_device *dev, short sens);
- u8 (*rf_set_chan)(struct ieee80211_device *ieee80211, u8 ch);
- short promisc;
- /* stats */
- struct Stats stats;
- struct iw_statistics wstats;
- struct proc_dir_entry *dir_dev;
- struct ieee80211_rx_stats previous_stats;
-
- /* RX stuff */
- struct sk_buff_head skb_queue;
- struct work_struct qos_activate;
-
- //2 Tx Related variables
- u16 ShortRetryLimit;
- u16 LongRetryLimit;
-
- u32 LastRxDescTSFHigh;
- u32 LastRxDescTSFLow;
-
-
- //2 Rx Related variables
- u32 ReceiveConfig;
-
- u8 retry_data;
- u8 retry_rts;
-
- struct work_struct reset_wq;
- u8 rx_chk_cnt;
-
-//for rtl819xPci
- // Data Rate Config. Added by Annie, 2006-04-13.
- u16 basic_rate;
- u8 short_preamble;
- u8 slot_time;
- u16 SifsTime;
-/* WirelessMode*/
- u8 RegWirelessMode;
-/*Firmware*/
- prt_firmware pFirmware;
- rtl819x_loopback_e LoopbackMode;
- bool AutoloadFailFlag;
- u16 EEPROMAntPwDiff; // Antenna gain offset from B/C/D to A
- u8 EEPROMThermalMeter;
- u8 EEPROMCrystalCap;
- u8 EEPROMTxPowerLevelCCK[14];// CCK channel 1~14
- // The following definition is for eeprom 93c56
- u8 EEPROMRfACCKChnl1TxPwLevel[3]; //RF-A CCK Tx Power Level at channel 7
- u8 EEPROMRfAOfdmChnlTxPwLevel[3];//RF-A CCK Tx Power Level at [0],[1],[2] = channel 1,7,13
- u8 EEPROMRfCCCKChnl1TxPwLevel[3]; //RF-C CCK Tx Power Level at channel 7
- u8 EEPROMRfCOfdmChnlTxPwLevel[3];//RF-C CCK Tx Power Level at [0],[1],[2] = channel 1,7,13
- u8 EEPROMTxPowerLevelOFDM24G[14]; // OFDM 2.4G channel 1~14
- u8 EEPROMLegacyHTTxPowerDiff; // Legacy to HT rate power diff
- bool bTXPowerDataReadFromEEPORM;
-/*channel plan*/
- u16 RegChannelPlan; // Channel Plan specifed by user, 15: following setting of EEPROM, 0-14: default channel plan index specified by user.
- u16 ChannelPlan;
-/*PS related*/
- // Rf off action for power save
- u8 bHwRfOffAction; //0:No action, 1:By GPIO, 2:By Disable
-/*PHY related*/
- BB_REGISTER_DEFINITION_T PHYRegDef[4]; //Radio A/B/C/D
- // Read/write are allow for following hardware information variables
- u32 MCSTxPowerLevelOriginalOffset[6];
- u32 CCKTxPowerLevelOriginalOffset;
- u8 TxPowerLevelCCK[14]; // CCK channel 1~14
- u8 TxPowerLevelCCK_A[14]; // RF-A, CCK channel 1~14
- u8 TxPowerLevelCCK_C[14];
- u8 TxPowerLevelOFDM24G[14]; // OFDM 2.4G channel 1~14
- u8 TxPowerLevelOFDM5G[14]; // OFDM 5G
- u8 TxPowerLevelOFDM24G_A[14]; // RF-A, OFDM 2.4G channel 1~14
- u8 TxPowerLevelOFDM24G_C[14]; // RF-C, OFDM 2.4G channel 1~14
- u8 LegacyHTTxPowerDiff; // Legacy to HT rate power diff
- u8 AntennaTxPwDiff[3]; // Antenna gain offset, index 0 for B, 1 for C, and 2 for D
- u8 CrystalCap; // CrystalCap.
- u8 ThermalMeter[2]; // ThermalMeter, index 0 for RFIC0, and 1 for RFIC1
- //05/27/2008 cck power enlarge
- u8 CckPwEnl;
- u16 TSSI_13dBm;
- u32 Pwr_Track;
- u8 CCKPresentAttentuation_20Mdefault;
- u8 CCKPresentAttentuation_40Mdefault;
- char CCKPresentAttentuation_difference;
- char CCKPresentAttentuation;
- // Use to calculate PWBD.
- RT_RF_POWER_STATE eRFPowerState;
- RT_RF_CHANGE_SOURCE RfOffReason;
- RT_POWER_SAVE_CONTROL PowerSaveControl;
- u8 bCckHighPower;
- long undecorated_smoothed_pwdb;
- long undecorated_smoothed_cck_adc_pwdb[4];
- //for set channel
- u8 SwChnlInProgress;
- u8 SwChnlStage;
- u8 SwChnlStep;
- u8 SetBWModeInProgress;
- HT_CHANNEL_WIDTH CurrentChannelBW;
-
- // 8190 40MHz mode
- //
- u8 nCur40MhzPrimeSC; // Control channel sub-carrier
- // Joseph test for shorten RF configuration time.
- // We save RF reg0 in this variable to reduce RF reading.
- //
- u32 RfReg0Value[4];
- u8 NumTotalRFPath;
- bool brfpath_rxenable[4];
-//+by amy 080507
- struct timer_list watch_dog_timer;
- u8 watchdog_last_time;
- u8 watchdog_check_reset_cnt;
-
-//+by amy 080515 for dynamic mechenism
- //Add by amy Tx Power Control for Near/Far Range 2008/05/15
- bool bDynamicTxHighPower; // Tx high power state
- bool bDynamicTxLowPower; // Tx low power state
- bool bLastDTPFlag_High;
- bool bLastDTPFlag_Low;
-
- /* OFDM RSSI. For high power or not */
- u8 phy_check_reg824;
- u32 phy_reg824_bit9;
-
- //Add by amy for Rate Adaptive
- rate_adaptive rate_adaptive;
- //Add by amy for TX power tracking
- //2008/05/15 Mars OPEN/CLOSE TX POWER TRACKING
- const txbbgain_struct * txbbgain_table;
- u8 txpower_count;//For 6 sec do tracking again
- bool btxpower_trackingInit;
- u8 OFDM_index;
- u8 CCK_index;
- u8 Record_CCK_20Mindex;
- u8 Record_CCK_40Mindex;
- //2007/09/10 Mars Add CCK TX Power Tracking
- const ccktxbbgain_struct *cck_txbbgain_table;
- const ccktxbbgain_struct *cck_txbbgain_ch14_table;
- u8 rfa_txpowertrackingindex;
- u8 rfa_txpowertrackingindex_real;
- u8 rfa_txpowertracking_default;
- u8 rfc_txpowertrackingindex;
- u8 rfc_txpowertrackingindex_real;
- u8 rfc_txpowertracking_default;
- bool btxpower_tracking;
- bool bcck_in_ch14;
-
- //For Backup Initial Gain
- init_gain initgain_backup;
- u8 DefaultInitialGain[4];
- // For EDCA Turbo mode, Added by amy 080515.
- bool bis_any_nonbepkts;
- bool bcurrent_turbo_EDCA;
-
- bool bis_cur_rdlstate;
- struct timer_list fsync_timer;
- u32 rate_record;
- u32 rateCountDiffRecord;
- u32 ContiuneDiffCount;
- bool bswitch_fsync;
-
- u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
-
- //by amy for gpio
- bool bHwRadioOff;
- //by amy for ps
- RT_OP_MODE OpMode;
- //by amy for reset_count
- u32 reset_count;
-
- //by amy for silent reset
- RESET_TYPE ResetProgress;
- bool bForcedSilentReset;
- bool bDisableNormalResetCheck;
- u16 TxCounter;
- u16 RxCounter;
- int IrpPendingCount;
- bool bResetInProgress;
- bool force_reset;
- u8 InitialGainOperateType;
-
- //define work item by amy 080526
- struct delayed_work update_beacon_wq;
- struct delayed_work watch_dog_wq;
- struct delayed_work txpower_tracking_wq;
- struct delayed_work rfpath_check_wq;
- struct delayed_work gpio_change_rf_wq;
- struct delayed_work initialgain_operate_wq;
- struct workqueue_struct *priv_wq;
-}r8192_priv;
-
-bool init_firmware(struct r8192_priv *priv);
-u32 read_cam(struct r8192_priv *priv, u8 addr);
-void write_cam(struct r8192_priv *priv, u8 addr, u32 data);
-u8 read_nic_byte(struct r8192_priv *priv, int x);
-u32 read_nic_dword(struct r8192_priv *priv, int x);
-u16 read_nic_word(struct r8192_priv *priv, int x) ;
-void write_nic_byte(struct r8192_priv *priv, int x,u8 y);
-void write_nic_word(struct r8192_priv *priv, int x,u16 y);
-void write_nic_dword(struct r8192_priv *priv, int x,u32 y);
-
-int rtl8192_down(struct net_device *dev);
-int rtl8192_up(struct net_device *dev);
-void rtl8192_commit(struct r8192_priv *priv);
-void write_phy(struct net_device *dev, u8 adr, u8 data);
-void CamResetAllEntry(struct r8192_priv *priv);
-void EnableHWSecurityConfig8192(struct r8192_priv *priv);
-void setKey(struct r8192_priv *priv, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
-void firmware_init_param(struct r8192_priv *priv);
-RT_STATUS cmpk_message_handle_tx(struct r8192_priv *priv, u8 *codevirtualaddress, u32 packettype, u32 buffer_len);
-
-#ifdef ENABLE_IPS
-void IPSEnter(struct r8192_priv *priv);
-void IPSLeave(struct r8192_priv *priv);
-void IPSLeave_wq(struct work_struct *work);
-void ieee80211_ips_leave_wq(struct ieee80211_device *ieee80211);
-void ieee80211_ips_leave(struct ieee80211_device *ieee80211);
-#endif
-#ifdef ENABLE_LPS
-void LeisurePSEnter(struct ieee80211_device *ieee80211);
-void LeisurePSLeave(struct ieee80211_device *ieee80211);
-#endif
-
-bool NicIFEnableNIC(struct r8192_priv *priv);
-bool NicIFDisableNIC(struct r8192_priv *priv);
-
-void PHY_SetRtl8192eRfOff(struct r8192_priv *priv);
-#endif
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/r8192E_cmdpkt.c
new file mode 100644
index 00000000000..58d044ea552
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_cmdpkt.c
@@ -0,0 +1,418 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#include "rtl_core.h"
+#include "r8192E_hw.h"
+#include "r8192E_cmdpkt.h"
+/*---------------------------Define Local Constant---------------------------*/
+/* Debug constant*/
+#define CMPK_DEBOUNCE_CNT 1
+#define CMPK_PRINT(Address)\
+{\
+ unsigned char i;\
+ u32 temp[10];\
+ \
+ memcpy(temp, Address, 40);\
+ for (i = 0; i < 40; i += 4)\
+ printk(KERN_INFO "\r\n %08x", temp[i]);\
+}
+
+/*---------------------------Define functions---------------------------------*/
+bool cmpk_message_handle_tx(
+ struct net_device *dev,
+ u8 *code_virtual_address,
+ u32 packettype,
+ u32 buffer_len)
+{
+
+ bool rt_status = true;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u16 frag_threshold;
+ u16 frag_length = 0, frag_offset = 0;
+ struct rt_firmware *pfirmware = priv->pFirmware;
+ struct sk_buff *skb;
+ unsigned char *seg_ptr;
+ struct cb_desc *tcb_desc;
+ u8 bLastIniPkt;
+
+ struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
+
+ RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, buffer_len);
+ firmware_init_param(dev);
+ frag_threshold = pfirmware->cmdpacket_frag_thresold;
+
+ do {
+ if ((buffer_len - frag_offset) > frag_threshold) {
+ frag_length = frag_threshold ;
+ bLastIniPkt = 0;
+
+ } else {
+ frag_length = (u16)(buffer_len - frag_offset);
+ bLastIniPkt = 1;
+ }
+
+ skb = dev_alloc_skb(frag_length +
+ priv->rtllib->tx_headroom + 4);
+
+ if (skb == NULL) {
+ rt_status = false;
+ goto Failed;
+ }
+
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc->queue_index = TXCMD_QUEUE;
+ tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
+ tcb_desc->bLastIniPkt = bLastIniPkt;
+ tcb_desc->pkt_size = frag_length;
+
+ seg_ptr = skb_put(skb, priv->rtllib->tx_headroom);
+ pTxFwInfo = (struct tx_fwinfo_8190pci *)seg_ptr;
+ memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
+ memset(pTxFwInfo, 0x12, 8);
+
+ seg_ptr = skb_put(skb, frag_length);
+ memcpy(seg_ptr, code_virtual_address, (u32)frag_length);
+
+ priv->rtllib->softmac_hard_start_xmit(skb, dev);
+
+ code_virtual_address += frag_length;
+ frag_offset += frag_length;
+
+ } while (frag_offset < buffer_len);
+
+ write_nic_byte(dev, TPPoll, TPPoll_CQ);
+Failed:
+ return rt_status;
+} /* CMPK_Message_Handle_Tx */
+
+static void
+cmpk_count_txstatistic(
+ struct net_device *dev,
+ struct cmpk_txfb *pstx_fb)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+#ifdef ENABLE_PS
+ enum rt_rf_power_state rtState;
+
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
+
+ if (rtState == eRfOff)
+ return;
+#endif
+
+ if (pstx_fb->tok) {
+ priv->stats.txfeedbackok++;
+ priv->stats.txoktotal++;
+ priv->stats.txokbytestotal += pstx_fb->pkt_length;
+ priv->stats.txokinperiod++;
+
+ if (pstx_fb->pkt_type == PACKET_MULTICAST) {
+ priv->stats.txmulticast++;
+ priv->stats.txbytesmulticast += pstx_fb->pkt_length;
+ } else if (pstx_fb->pkt_type == PACKET_BROADCAST) {
+ priv->stats.txbroadcast++;
+ priv->stats.txbytesbroadcast += pstx_fb->pkt_length;
+ } else {
+ priv->stats.txunicast++;
+ priv->stats.txbytesunicast += pstx_fb->pkt_length;
+ }
+ } else {
+ priv->stats.txfeedbackfail++;
+ priv->stats.txerrtotal++;
+ priv->stats.txerrbytestotal += pstx_fb->pkt_length;
+
+ if (pstx_fb->pkt_type == PACKET_MULTICAST)
+ priv->stats.txerrmulticast++;
+ else if (pstx_fb->pkt_type == PACKET_BROADCAST)
+ priv->stats.txerrbroadcast++;
+ else
+ priv->stats.txerrunicast++;
+ }
+
+ priv->stats.txretrycount += pstx_fb->retry_cnt;
+ priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
+
+} /* cmpk_CountTxStatistic */
+
+
+
+static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct cmpk_txfb rx_tx_fb; /* */
+
+ priv->stats.txfeedback++;
+
+
+ memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(struct cmpk_txfb));
+ cmpk_count_txstatistic(dev, &rx_tx_fb);
+
+} /* cmpk_Handle_Tx_Feedback */
+
+static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u16 tx_rate;
+
+ if ((priv->rtllib->current_network.mode == IEEE_A) ||
+ (priv->rtllib->current_network.mode == IEEE_N_5G) ||
+ ((priv->rtllib->current_network.mode == IEEE_N_24G) &&
+ (!priv->rtllib->pHTInfo->bCurSuppCCK))) {
+ tx_rate = 60;
+ DMESG("send beacon frame tx rate is 6Mbpm\n");
+ } else {
+ tx_rate = 10;
+ DMESG("send beacon frame tx rate is 1Mbpm\n");
+ }
+
+}
+
+static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
+{
+ struct cmpk_intr_sta rx_intr_status; /* */
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ DMESG("---> cmpk_Handle_Interrupt_Status()\n");
+
+
+ rx_intr_status.length = pmsg[1];
+ if (rx_intr_status.length != (sizeof(struct cmpk_intr_sta) - 2)) {
+ DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
+ return;
+ }
+
+
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
+
+ DMESG("interrupt status = 0x%x\n",
+ rx_intr_status.interrupt_status);
+
+ if (rx_intr_status.interrupt_status & ISR_TxBcnOk) {
+ priv->rtllib->bibsscoordinator = true;
+ priv->stats.txbeaconokint++;
+ } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) {
+ priv->rtllib->bibsscoordinator = false;
+ priv->stats.txbeaconerr++;
+ }
+
+ if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr)
+ cmdpkt_beacontimerinterrupt_819xusb(dev);
+ }
+
+ DMESG("<---- cmpk_handle_interrupt_status()\n");
+
+} /* cmpk_handle_interrupt_status */
+
+
+static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
+{
+ cmpk_query_cfg_t rx_query_cfg; /* */
+
+
+ rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
+ rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
+ rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
+ rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
+ rx_query_cfg.cfg_offset = pmsg[7];
+ rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
+ (pmsg[10] << 8) | (pmsg[11] << 0);
+ rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
+ (pmsg[14] << 8) | (pmsg[15] << 0);
+
+} /* cmpk_Handle_Query_Config_Rx */
+
+
+static void cmpk_count_tx_status(struct net_device *dev,
+ struct cmpk_tx_status *pstx_status)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+#ifdef ENABLE_PS
+
+ enum rt_rf_power_state rtstate;
+
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
+
+ if (rtState == eRfOff)
+ return;
+#endif
+
+ priv->stats.txfeedbackok += pstx_status->txok;
+ priv->stats.txoktotal += pstx_status->txok;
+
+ priv->stats.txfeedbackfail += pstx_status->txfail;
+ priv->stats.txerrtotal += pstx_status->txfail;
+
+ priv->stats.txretrycount += pstx_status->txretry;
+ priv->stats.txfeedbackretry += pstx_status->txretry;
+
+
+ priv->stats.txmulticast += pstx_status->txmcok;
+ priv->stats.txbroadcast += pstx_status->txbcok;
+ priv->stats.txunicast += pstx_status->txucok;
+
+ priv->stats.txerrmulticast += pstx_status->txmcfail;
+ priv->stats.txerrbroadcast += pstx_status->txbcfail;
+ priv->stats.txerrunicast += pstx_status->txucfail;
+
+ priv->stats.txbytesmulticast += pstx_status->txmclength;
+ priv->stats.txbytesbroadcast += pstx_status->txbclength;
+ priv->stats.txbytesunicast += pstx_status->txuclength;
+
+ priv->stats.last_packet_rate = pstx_status->rate;
+} /* cmpk_CountTxStatus */
+
+
+
+static void cmpk_handle_tx_status(struct net_device *dev, u8 *pmsg)
+{
+ struct cmpk_tx_status rx_tx_sts; /* */
+
+ memcpy((void *)&rx_tx_sts, (void *)pmsg, sizeof(struct cmpk_tx_status));
+ cmpk_count_tx_status(dev, &rx_tx_sts);
+}
+
+static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
+{
+ struct cmpk_tx_rahis *ptxrate;
+ u8 i, j;
+ u16 length = sizeof(struct cmpk_tx_rahis);
+ u32 *ptemp;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+
+#ifdef ENABLE_PS
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
+
+ if (rtState == eRfOff)
+ return;
+#endif
+
+ ptemp = (u32 *)pmsg;
+
+ for (i = 0; i < (length / 4); i++) {
+ u16 temp1, temp2;
+
+ temp1 = ptemp[i] & 0x0000FFFF;
+ temp2 = ptemp[i] >> 16;
+ ptemp[i] = (temp1 << 16) | temp2;
+ }
+
+ ptxrate = (struct cmpk_tx_rahis *)pmsg;
+
+ if (ptxrate == NULL)
+ return;
+
+ for (i = 0; i < 16; i++) {
+ if (i < 4)
+ priv->stats.txrate.cck[i] += ptxrate->cck[i];
+
+ if (i < 8)
+ priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i];
+
+ for (j = 0; j < 4; j++)
+ priv->stats.txrate.ht_mcs[j][i] +=
+ ptxrate->ht_mcs[j][i];
+ }
+
+}
+
+
+u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct rtllib_rx_stats *pstats)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int total_length;
+ u8 cmd_length, exe_cnt = 0;
+ u8 element_id;
+ u8 *pcmd_buff;
+
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx()\n");
+
+ if (pstats == NULL) {
+ /* Print error message. */
+ /*RT_TRACE(COMP_SEND, DebugLevel,
+ ("\n\r[CMPK]-->Err queue id or pointer"));*/
+ return 0;
+ }
+
+ total_length = pstats->Length;
+
+ pcmd_buff = pstats->virtual_address;
+
+ element_id = pcmd_buff[0];
+
+ while (total_length > 0 || exe_cnt++ > 100) {
+ element_id = pcmd_buff[0];
+
+ switch (element_id) {
+ case RX_TX_FEEDBACK:
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "RX_TX_FEEDBACK\n");
+ cmpk_handle_tx_feedback(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+ case RX_INTERRUPT_STATUS:
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "RX_INTERRUPT_STATUS\n");
+ cmpk_handle_interrupt_status(dev, pcmd_buff);
+ cmd_length = sizeof(struct cmpk_intr_sta);
+ break;
+ case BOTH_QUERY_CONFIG:
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "BOTH_QUERY_CONFIG\n");
+ cmpk_handle_query_config_rx(dev, pcmd_buff);
+ cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
+ break;
+ case RX_TX_STATUS:
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "RX_TX_STATUS\n");
+ cmpk_handle_tx_status(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_STS_SIZE;
+ break;
+ case RX_TX_PER_PKT_FEEDBACK:
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "RX_TX_PER_PKT_FEEDBACK\n");
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+ case RX_TX_RATE_HISTORY:
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "RX_TX_HISTORY\n");
+ cmpk_handle_tx_rate_history(dev, pcmd_buff);
+ cmd_length = CMPK_TX_RAHIS_SIZE;
+ break;
+ default:
+
+ RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx():"
+ "unknow CMD Element\n");
+ return 1;
+ }
+
+ priv->stats.rxcmdpkt[element_id]++;
+
+ total_length -= cmd_length;
+ pcmd_buff += cmd_length;
+ }
+ return 1;
+}
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/r8192E_cmdpkt.h
new file mode 100644
index 00000000000..23219e17e5a
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_cmdpkt.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef R819XUSB_CMDPKT_H
+#define R819XUSB_CMDPKT_H
+#define CMPK_RX_TX_FB_SIZE sizeof(struct cmpk_txfb)
+#define CMPK_TX_SET_CONFIG_SIZE sizeof(struct cmpk_set_cfg)
+#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(struct cmpk_set_cfg)
+#define CMPK_RX_TX_STS_SIZE sizeof(struct cmpk_tx_status)
+#define CMPK_RX_DBG_MSG_SIZE sizeof(struct cmpk_rx_dbginfo)
+#define CMPK_TX_RAHIS_SIZE sizeof(struct cmpk_tx_rahis)
+
+#define ISR_TxBcnOk BIT27
+#define ISR_TxBcnErr BIT26
+#define ISR_BcnTimerIntr BIT13
+
+
+struct cmpk_txfb {
+ u8 element_id;
+ u8 length;
+ u8 TID:4; /* */
+ u8 fail_reason:3; /* */
+ u8 tok:1;
+ u8 reserve1:4; /* */
+ u8 pkt_type:2; /* */
+ u8 bandwidth:1; /* */
+ u8 qos_pkt:1; /* */
+
+ u8 reserve2; /* */
+ u8 retry_cnt; /* */
+ u16 pkt_id; /* */
+
+ u16 seq_num; /* */
+ u8 s_rate;
+ u8 f_rate;
+
+ u8 s_rts_rate; /* */
+ u8 f_rts_rate; /* */
+ u16 pkt_length; /* */
+
+ u16 reserve3; /* */
+ u16 duration; /* */
+};
+
+struct cmpk_intr_sta {
+ u8 element_id;
+ u8 length;
+ u16 reserve;
+ u32 interrupt_status;
+};
+
+
+struct cmpk_set_cfg {
+ u8 element_id;
+ u8 length;
+ u16 reserve1;
+ u8 cfg_reserve1:3;
+ u8 cfg_size:2;
+ u8 cfg_type:2;
+ u8 cfg_action:1;
+ u8 cfg_reserve2;
+ u8 cfg_page:4;
+ u8 cfg_reserve3:4;
+ u8 cfg_offset;
+ u32 value;
+ u32 mask;
+};
+
+#define cmpk_query_cfg_t struct cmpk_set_cfg
+
+struct cmpk_tx_status {
+ u16 reserve1;
+ u8 length;
+ u8 element_id;
+
+ u16 txfail;
+ u16 txok;
+
+ u16 txmcok;
+ u16 txretry;
+
+ u16 txucok;
+ u16 txbcok;
+
+ u16 txbcfail;
+ u16 txmcfail;
+
+ u16 reserve2;
+ u16 txucfail;
+
+ u32 txmclength;
+ u32 txbclength;
+ u32 txuclength;
+
+ u16 reserve3_23;
+ u8 reserve3_1;
+ u8 rate;
+} __packed;
+
+struct cmpk_rx_dbginfo {
+ u16 reserve1;
+ u8 length;
+ u8 element_id;
+
+
+};
+
+struct cmpk_tx_rahis {
+ u8 element_id;
+ u8 length;
+ u16 reserved1;
+
+ u16 cck[4];
+
+ u16 ofdm[8];
+
+
+
+
+
+ u16 ht_mcs[4][16];
+
+} __packed;
+
+enum cmpk_element {
+ RX_TX_FEEDBACK = 0,
+ RX_INTERRUPT_STATUS = 1,
+ TX_SET_CONFIG = 2,
+ BOTH_QUERY_CONFIG = 3,
+ RX_TX_STATUS = 4,
+ RX_DBGINFO_FEEDBACK = 5,
+ RX_TX_PER_PKT_FEEDBACK = 6,
+ RX_TX_RATE_HISTORY = 7,
+ RX_CMD_ELE_MAX
+};
+
+extern u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct rtllib_rx_stats *pstats);
+extern bool cmpk_message_handle_tx(struct net_device *dev,
+ u8 *codevirtualaddress, u32 packettype,
+ u32 buffer_len);
+
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
deleted file mode 100644
index 94d9c8d5d09..00000000000
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ /dev/null
@@ -1,5039 +0,0 @@
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- * Linux device driver for RTL8192E
- *
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Jerry chuang <wlanfae@realtek.com>
- */
-
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/hardirq.h>
-#include <asm/uaccess.h>
-#include "r8192E_hw.h"
-#include "r8192E.h"
-#include "r8190_rtl8256.h" /* RTL8225 Radio frontend */
-#include "r8180_93cx6.h" /* Card EEPROM */
-#include "r8192E_wx.h"
-#include "r819xE_phy.h" //added by WB 4.30.2008
-#include "r819xE_phyreg.h"
-#include "r819xE_cmdpkt.h"
-#include "r8192E_dm.h"
-
-#ifdef CONFIG_PM
-#include "r8192_pm.h"
-#endif
-
-#ifdef ENABLE_DOT11D
-#include "ieee80211/dot11d.h"
-#endif
-
-//set here to open your trace code. //WB
-u32 rt_global_debug_component = COMP_ERR ; //always open err flags on
-
-static DEFINE_PCI_DEVICE_TABLE(rtl8192_pci_id_tbl) = {
- /* Realtek */
- { PCI_DEVICE(0x10ec, 0x8192) },
-
- /* Corega */
- { PCI_DEVICE(0x07aa, 0x0044) },
- { PCI_DEVICE(0x07aa, 0x0047) },
- {}
-};
-
-static char ifname[IFNAMSIZ] = "wlan%d";
-static int hwwep = 1; //default use hw. set 0 to use software security
-static int channels = 0x3fff;
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION("V 1.1");
-MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
-//MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
-MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
-
-
-module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
-module_param(hwwep,int, S_IRUGO|S_IWUSR);
-module_param(channels,int, S_IRUGO|S_IWUSR);
-
-MODULE_PARM_DESC(ifname," Net interface name, wlan%d=default");
-MODULE_PARM_DESC(hwwep," Try to use hardware WEP support. Still broken and not available on all cards");
-MODULE_PARM_DESC(channels," Channel bitmask for specific locales. NYI");
-
-static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev);
-
-static struct pci_driver rtl8192_pci_driver = {
- .name = RTL819xE_MODULE_NAME, /* Driver name */
- .id_table = rtl8192_pci_id_tbl, /* PCI_ID table */
- .probe = rtl8192_pci_probe, /* probe fn */
- .remove = __devexit_p(rtl8192_pci_disconnect), /* remove fn */
-#ifdef CONFIG_PM
- .suspend = rtl8192E_suspend, /* PM suspend fn */
- .resume = rtl8192E_resume, /* PM resume fn */
-#else
- .suspend = NULL, /* PM suspend fn */
- .resume = NULL, /* PM resume fn */
-#endif
-};
-
-static void rtl8192_start_beacon(struct ieee80211_device *ieee80211);
-static void rtl8192_stop_beacon(struct ieee80211_device *ieee80211);
-static void rtl819x_watchdog_wqcallback(struct work_struct *work);
-static void rtl8192_irq_rx_tasklet(unsigned long arg);
-static void rtl8192_irq_tx_tasklet(unsigned long arg);
-static void rtl8192_prepare_beacon(unsigned long arg);
-static irqreturn_t rtl8192_interrupt(int irq, void *param);
-static void rtl819xE_tx_cmd(struct r8192_priv *priv, struct sk_buff *skb);
-static void rtl8192_update_ratr_table(struct r8192_priv *priv);
-static void rtl8192_restart(struct work_struct *work);
-static void watch_dog_timer_callback(unsigned long data);
-static int _rtl8192_up(struct r8192_priv *priv);
-static void rtl8192_cancel_deferred_work(struct r8192_priv* priv);
-static short rtl8192_tx(struct r8192_priv *priv, struct sk_buff* skb);
-
-#ifdef ENABLE_DOT11D
-
-typedef struct _CHANNEL_LIST
-{
- u8 Channel[32];
- u8 Len;
-}CHANNEL_LIST, *PCHANNEL_LIST;
-
-static const CHANNEL_LIST ChannelPlan[] = {
- {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
- {{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Spain. Change to ETSI.
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //France. Change to ETSI.
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, //MKK //MKK
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22},//MKK1
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Israel.
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, // For 11a , TELEC
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64}, 22}, //MIC
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14} //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626
-};
-
-static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
-{
- int i, max_chan=-1, min_chan=-1;
- struct ieee80211_device* ieee = priv->ieee80211;
- switch (channel_plan)
- {
- case COUNTRY_CODE_FCC:
- case COUNTRY_CODE_IC:
- case COUNTRY_CODE_ETSI:
- case COUNTRY_CODE_SPAIN:
- case COUNTRY_CODE_FRANCE:
- case COUNTRY_CODE_MKK:
- case COUNTRY_CODE_MKK1:
- case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_TELEC:
- case COUNTRY_CODE_MIC:
- {
- Dot11d_Init(ieee);
- ieee->bGlobalDomain = false;
- //acturally 8225 & 8256 rf chip only support B,G,24N mode
- min_chan = 1;
- max_chan = 14;
-
- if (ChannelPlan[channel_plan].Len != 0){
- // Clear old channel map
- memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- // Set new channel map
- for (i=0;i<ChannelPlan[channel_plan].Len;i++)
- {
- if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
- break;
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
- }
- }
- break;
- }
- case COUNTRY_CODE_GLOBAL_DOMAIN:
- {
- GET_DOT11D_INFO(ieee)->bEnabled = 0; //this flag enabled to follow 11d country IE setting, otherwise, it shall follow global domain setting
- Dot11d_Reset(ieee);
- ieee->bGlobalDomain = true;
- break;
- }
- default:
- break;
- }
-}
-#endif
-
-static inline bool rx_hal_is_cck_rate(prx_fwinfo_819x_pci pdrvinfo)
-{
- return (pdrvinfo->RxRate == DESC90_RATE1M ||
- pdrvinfo->RxRate == DESC90_RATE2M ||
- pdrvinfo->RxRate == DESC90_RATE5_5M ||
- pdrvinfo->RxRate == DESC90_RATE11M) &&
- !pdrvinfo->RxHT;
-}
-
-void CamResetAllEntry(struct r8192_priv* priv)
-{
- write_nic_dword(priv, RWCAM, BIT31|BIT30);
-}
-
-void write_cam(struct r8192_priv *priv, u8 addr, u32 data)
-{
- write_nic_dword(priv, WCAMI, data);
- write_nic_dword(priv, RWCAM, BIT31|BIT16|(addr&0xff) );
-}
-
-u32 read_cam(struct r8192_priv *priv, u8 addr)
-{
- write_nic_dword(priv, RWCAM, 0x80000000|(addr&0xff) );
- return read_nic_dword(priv, 0xa8);
-}
-
-u8 read_nic_byte(struct r8192_priv *priv, int x)
-{
- return 0xff & readb(priv->mem_start + x);
-}
-
-u32 read_nic_dword(struct r8192_priv *priv, int x)
-{
- return readl(priv->mem_start + x);
-}
-
-u16 read_nic_word(struct r8192_priv *priv, int x)
-{
- return readw(priv->mem_start + x);
-}
-
-void write_nic_byte(struct r8192_priv *priv, int x,u8 y)
-{
- writeb(y, priv->mem_start + x);
- udelay(20);
-}
-
-void write_nic_dword(struct r8192_priv *priv, int x,u32 y)
-{
- writel(y, priv->mem_start + x);
- udelay(20);
-}
-
-void write_nic_word(struct r8192_priv *priv, int x,u16 y)
-{
- writew(y, priv->mem_start + x);
- udelay(20);
-}
-
-u8 rtl8192e_ap_sec_type(struct ieee80211_device *ieee)
-{
- static const u8 ccmp_ie[4] = {0x00,0x50,0xf2,0x04};
- static const u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
- int wpa_ie_len= ieee->wpa_ie_len;
- struct ieee80211_crypt_data* crypt;
- int encrypt;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
-
- encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY) ||
- (ieee->host_encrypt && crypt && crypt->ops &&
- (0 == strcmp(crypt->ops->name,"WEP")));
-
- /* simply judge */
- if(encrypt && (wpa_ie_len == 0)) {
- // wep encryption, no N mode setting */
- return SEC_ALG_WEP;
- } else if((wpa_ie_len != 0)) {
- // parse pairwise key type */
- if (((ieee->wpa_ie[0] == 0xdd) && (!memcmp(&(ieee->wpa_ie[14]),ccmp_ie,4))) ||
- ((ieee->wpa_ie[0] == 0x30) && (!memcmp(&ieee->wpa_ie[10],ccmp_rsn_ie, 4))))
- return SEC_ALG_CCMP;
- else
- return SEC_ALG_TKIP;
- } else {
- return SEC_ALG_NONE;
- }
-}
-
-void rtl8192e_SetHwReg(struct ieee80211_device *ieee80211, u8 variable, u8 *val)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
-
- switch(variable)
- {
-
- case HW_VAR_BSSID:
- write_nic_dword(priv, BSSIDR, ((u32*)(val))[0]);
- write_nic_word(priv, BSSIDR+2, ((u16*)(val+2))[0]);
- break;
-
- case HW_VAR_MEDIA_STATUS:
- {
- RT_OP_MODE OpMode = *((RT_OP_MODE *)(val));
- u8 btMsr = read_nic_byte(priv, MSR);
-
- btMsr &= 0xfc;
-
- switch(OpMode)
- {
- case RT_OP_MODE_INFRASTRUCTURE:
- btMsr |= MSR_INFRA;
- break;
-
- case RT_OP_MODE_IBSS:
- btMsr |= MSR_ADHOC;
- break;
-
- case RT_OP_MODE_AP:
- btMsr |= MSR_AP;
- break;
-
- default:
- btMsr |= MSR_NOLINK;
- break;
- }
-
- write_nic_byte(priv, MSR, btMsr);
- }
- break;
-
- case HW_VAR_CHECK_BSSID:
- {
- u32 RegRCR, Type;
-
- Type = ((u8*)(val))[0];
- RegRCR = read_nic_dword(priv, RCR);
- priv->ReceiveConfig = RegRCR;
-
- if (Type == true)
- RegRCR |= (RCR_CBSSID);
- else if (Type == false)
- RegRCR &= (~RCR_CBSSID);
-
- write_nic_dword(priv, RCR,RegRCR);
- priv->ReceiveConfig = RegRCR;
-
- }
- break;
-
- case HW_VAR_SLOT_TIME:
- {
- priv->slot_time = val[0];
- write_nic_byte(priv, SLOT_TIME, val[0]);
-
- }
- break;
-
- case HW_VAR_ACK_PREAMBLE:
- {
- u32 regTmp = 0;
- priv->short_preamble = (bool)(*(u8*)val );
- regTmp = priv->basic_rate;
- if (priv->short_preamble)
- regTmp |= BRSR_AckShortPmb;
- write_nic_dword(priv, RRSR, regTmp);
- }
- break;
-
- case HW_VAR_CPU_RST:
- write_nic_dword(priv, CPU_GEN, ((u32*)(val))[0]);
- break;
-
- default:
- break;
- }
-
-}
-
-static struct proc_dir_entry *rtl8192_proc = NULL;
-
-static int proc_get_stats_ap(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct r8192_priv *priv = data;
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *target;
- int len = 0;
-
- list_for_each_entry(target, &ieee->network_list, list) {
-
- len += snprintf(page + len, count - len,
- "%s ", target->ssid);
-
- if(target->wpa_ie_len>0 || target->rsn_ie_len>0){
- len += snprintf(page + len, count - len,
- "WPA\n");
- }
- else{
- len += snprintf(page + len, count - len,
- "non_WPA\n");
- }
-
- }
-
- *eof = 1;
- return len;
-}
-
-static int proc_get_registers(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct r8192_priv *priv = data;
- int len = 0;
- int i,n;
- int max=0xff;
-
- /* This dump the current register page */
- len += snprintf(page + len, count - len,
- "\n####################page 0##################\n ");
-
- for(n=0;n<=max;)
- {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
-
- for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(priv,n));
- }
- len += snprintf(page + len, count - len,"\n");
- len += snprintf(page + len, count - len,
- "\n####################page 1##################\n ");
- for(n=0;n<=max;)
- {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
-
- for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(priv,0x100|n));
- }
-
- len += snprintf(page + len, count - len,
- "\n####################page 3##################\n ");
- for(n=0;n<=max;)
- {
- len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
-
- for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(priv,0x300|n));
- }
-
- *eof = 1;
- return len;
-
-}
-
-static int proc_get_stats_tx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct r8192_priv *priv = data;
-
- int len = 0;
-
- len += snprintf(page + len, count - len,
- "TX VI priority ok int: %lu\n"
- "TX VO priority ok int: %lu\n"
- "TX BE priority ok int: %lu\n"
- "TX BK priority ok int: %lu\n"
- "TX MANAGE priority ok int: %lu\n"
- "TX BEACON priority ok int: %lu\n"
- "TX BEACON priority error int: %lu\n"
- "TX CMDPKT priority ok int: %lu\n"
- "TX queue stopped?: %d\n"
- "TX fifo overflow: %lu\n"
- "TX total data packets %lu\n"
- "TX total data bytes :%lu\n",
- priv->stats.txviokint,
- priv->stats.txvookint,
- priv->stats.txbeokint,
- priv->stats.txbkokint,
- priv->stats.txmanageokint,
- priv->stats.txbeaconokint,
- priv->stats.txbeaconerr,
- priv->stats.txcmdpktokint,
- netif_queue_stopped(priv->ieee80211->dev),
- priv->stats.txoverflow,
- priv->ieee80211->stats.tx_packets,
- priv->ieee80211->stats.tx_bytes);
-
- *eof = 1;
- return len;
-}
-
-
-
-static int proc_get_stats_rx(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct r8192_priv *priv = data;
- int len = 0;
-
- len += snprintf(page + len, count - len,
- "RX packets: %lu\n"
- "RX desc err: %lu\n"
- "RX rx overflow error: %lu\n",
- priv->stats.rxint,
- priv->stats.rxrdu,
- priv->stats.rxoverflow);
-
- *eof = 1;
- return len;
-}
-
-static void rtl8192_proc_module_init(void)
-{
- RT_TRACE(COMP_INIT, "Initializing proc filesystem\n");
- rtl8192_proc = proc_mkdir(RTL819xE_MODULE_NAME, init_net.proc_net);
-}
-
-
-static void rtl8192_proc_module_remove(void)
-{
- remove_proc_entry(RTL819xE_MODULE_NAME, init_net.proc_net);
-}
-
-
-static void rtl8192_proc_remove_one(struct r8192_priv *priv)
-{
- struct net_device *dev = priv->ieee80211->dev;
-
- printk("dev name=======> %s\n",dev->name);
-
- if (priv->dir_dev) {
- remove_proc_entry("stats-tx", priv->dir_dev);
- remove_proc_entry("stats-rx", priv->dir_dev);
- remove_proc_entry("stats-ap", priv->dir_dev);
- remove_proc_entry("registers", priv->dir_dev);
- remove_proc_entry("wlan0", rtl8192_proc);
- priv->dir_dev = NULL;
- }
-}
-
-
-static void rtl8192_proc_init_one(struct r8192_priv *priv)
-{
- struct net_device *dev = priv->ieee80211->dev;
- struct proc_dir_entry *e;
-
- priv->dir_dev = proc_mkdir(dev->name, rtl8192_proc);
- if (!priv->dir_dev) {
- RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
- dev->name);
- return;
- }
- e = create_proc_read_entry("stats-rx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_rx, priv);
-
- if (!e) {
- RT_TRACE(COMP_ERR,"Unable to initialize "
- "/proc/net/rtl8192/%s/stats-rx\n",
- dev->name);
- }
-
-
- e = create_proc_read_entry("stats-tx", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_tx, priv);
-
- if (!e) {
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-tx\n",
- dev->name);
- }
-
- e = create_proc_read_entry("stats-ap", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_stats_ap, priv);
-
- if (!e) {
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/stats-ap\n",
- dev->name);
- }
-
- e = create_proc_read_entry("registers", S_IFREG | S_IRUGO,
- priv->dir_dev, proc_get_registers, priv);
- if (!e) {
- RT_TRACE(COMP_ERR, "Unable to initialize "
- "/proc/net/rtl8192/%s/registers\n",
- dev->name);
- }
-}
-
-static short check_nic_enough_desc(struct ieee80211_device *ieee, int prio)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
- struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
-
- /* for now we reserve two free descriptor as a safety boundary
- * between the tail and the head
- */
- return (ring->entries - skb_queue_len(&ring->queue) >= 2);
-}
-
-static void tx_timeout(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- schedule_work(&priv->reset_wq);
- printk("TXTIMEOUT");
-}
-
-static void rtl8192_irq_enable(struct r8192_priv *priv)
-{
- u32 mask;
-
- mask = IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK |
- IMR_HCCADOK | IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK |
- IMR_BDOK | IMR_RXCMDOK | IMR_TIMEOUT0 | IMR_RDU | IMR_RXFOVW |
- IMR_TXFOVW | IMR_BcnInt | IMR_TBDOK | IMR_TBDER;
-
- write_nic_dword(priv, INTA_MASK, mask);
-}
-
-static void rtl8192_irq_disable(struct r8192_priv *priv)
-{
- write_nic_dword(priv, INTA_MASK, 0);
- synchronize_irq(priv->irq);
-}
-
-static void rtl8192_update_msr(struct r8192_priv *priv)
-{
- u8 msr;
-
- msr = read_nic_byte(priv, MSR);
- msr &= ~ MSR_LINK_MASK;
-
- /* do not change in link_state != WLAN_LINK_ASSOCIATED.
- * msr must be updated if the state is ASSOCIATING.
- * this is intentional and make sense for ad-hoc and
- * master (see the create BSS/IBSS func)
- */
- if (priv->ieee80211->state == IEEE80211_LINKED){
-
- if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
- msr |= (MSR_LINK_MANAGED<<MSR_LINK_SHIFT);
- else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- msr |= (MSR_LINK_ADHOC<<MSR_LINK_SHIFT);
- else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
- msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
-
- }else
- msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
-
- write_nic_byte(priv, MSR, msr);
-}
-
-static void rtl8192_set_chan(struct ieee80211_device *ieee80211, short ch)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
-
- priv->chan = ch;
-
- /* need to implement rf set channel here WB */
-
- if (priv->rf_set_chan)
- priv->rf_set_chan(ieee80211, priv->chan);
-}
-
-static void rtl8192_rx_enable(struct r8192_priv *priv)
-{
- write_nic_dword(priv, RDQDA, priv->rx_ring_dma);
-}
-
-/* the TX_DESC_BASE setting is according to the following queue index
- * BK_QUEUE ===> 0
- * BE_QUEUE ===> 1
- * VI_QUEUE ===> 2
- * VO_QUEUE ===> 3
- * HCCA_QUEUE ===> 4
- * TXCMD_QUEUE ===> 5
- * MGNT_QUEUE ===> 6
- * HIGH_QUEUE ===> 7
- * BEACON_QUEUE ===> 8
- * */
-static const u32 TX_DESC_BASE[] = {BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA};
-static void rtl8192_tx_enable(struct r8192_priv *priv)
-{
- u32 i;
-
- for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
- write_nic_dword(priv, TX_DESC_BASE[i], priv->tx_ring[i].dma);
-
- ieee80211_reset_queue(priv->ieee80211);
-}
-
-
-static void rtl8192_free_rx_ring(struct r8192_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->rxringcount; i++) {
- struct sk_buff *skb = priv->rx_buf[i];
- if (!skb)
- continue;
-
- pci_unmap_single(priv->pdev,
- *((dma_addr_t *)skb->cb),
- priv->rxbuffersize, PCI_DMA_FROMDEVICE);
- kfree_skb(skb);
- }
-
- pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * priv->rxringcount,
- priv->rx_ring, priv->rx_ring_dma);
- priv->rx_ring = NULL;
-}
-
-static void rtl8192_free_tx_ring(struct r8192_priv *priv, unsigned int prio)
-{
- struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
-
- while (skb_queue_len(&ring->queue)) {
- tx_desc_819x_pci *entry = &ring->desc[ring->idx];
- struct sk_buff *skb = __skb_dequeue(&ring->queue);
-
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
- skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
- ring->idx = (ring->idx + 1) % ring->entries;
- }
-
- pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
- ring->desc, ring->dma);
- ring->desc = NULL;
-}
-
-void PHY_SetRtl8192eRfOff(struct r8192_priv *priv)
-{
- //disable RF-Chip A/B
- rtl8192_setBBreg(priv, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
- //analog to digital off, for power save
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0x300, 0x0);
- //digital to analog off, for power save
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x18, 0x0);
- //rx antenna off
- rtl8192_setBBreg(priv, rOFDM0_TRxPathEnable, 0xf, 0x0);
- //rx antenna off
- rtl8192_setBBreg(priv, rOFDM1_TRxPathEnable, 0xf, 0x0);
- //analog to digital part2 off, for power save
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x60, 0x0);
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x4, 0x0);
- // Analog parameter!!Change bias and Lbus control.
- write_nic_byte(priv, ANAPAR_FOR_8192PciE, 0x07);
-}
-
-static void rtl8192_halt_adapter(struct r8192_priv *priv, bool reset)
-{
- int i;
- u8 OpMode;
- u32 ulRegRead;
-
- OpMode = RT_OP_MODE_NO_LINK;
- priv->ieee80211->SetHwRegHandler(priv->ieee80211, HW_VAR_MEDIA_STATUS, &OpMode);
-
- if (!priv->ieee80211->bSupportRemoteWakeUp) {
- /*
- * disable tx/rx. In 8185 we write 0x10 (Reset bit),
- * but here we make reference to WMAC and wirte 0x0
- */
- write_nic_byte(priv, CMDR, 0);
- }
-
- mdelay(20);
-
- if (!reset) {
- mdelay(150);
-
- priv->bHwRfOffAction = 2;
-
- /*
- * Call MgntActSet_RF_State instead to
- * prevent RF config race condition.
- */
- if (!priv->ieee80211->bSupportRemoteWakeUp) {
- PHY_SetRtl8192eRfOff(priv);
- ulRegRead = read_nic_dword(priv, CPU_GEN);
- ulRegRead |= CPU_GEN_SYSTEM_RESET;
- write_nic_dword(priv,CPU_GEN, ulRegRead);
- } else {
- /* for WOL */
- write_nic_dword(priv, WFCRC0, 0xffffffff);
- write_nic_dword(priv, WFCRC1, 0xffffffff);
- write_nic_dword(priv, WFCRC2, 0xffffffff);
-
- /* Write PMR register */
- write_nic_byte(priv, PMR, 0x5);
- /* Disable tx, enanble rx */
- write_nic_byte(priv, MacBlkCtrl, 0xa);
- }
- }
-
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_purge(&priv->ieee80211->skb_waitQ [i]);
- }
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_purge(&priv->ieee80211->skb_aggQ [i]);
- }
-
- skb_queue_purge(&priv->skb_queue);
-}
-
-static void rtl8192_data_hard_stop(struct ieee80211_device *ieee80211)
-{
-}
-
-static void rtl8192_data_hard_resume(struct ieee80211_device *ieee80211)
-{
-}
-
-/*
- * this function TX data frames when the ieee80211 stack requires this.
- * It checks also if we need to stop the ieee tx queue, eventually do it
- */
-static void rtl8192_hard_data_xmit(struct sk_buff *skb,
- struct ieee80211_device *ieee80211, int rate)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- int ret;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 queue_index = tcb_desc->queue_index;
-
- /* shall not be referred by command packet */
- BUG_ON(queue_index == TXCMD_QUEUE);
-
- if (priv->bHwRadioOff || (!priv->up))
- {
- kfree_skb(skb);
- return;
- }
-
- skb_push(skb, priv->ieee80211->tx_headroom);
- ret = rtl8192_tx(priv, skb);
- if (ret != 0) {
- kfree_skb(skb);
- }
-
- if (queue_index != MGNT_QUEUE) {
- priv->ieee80211->stats.tx_bytes += (skb->len - priv->ieee80211->tx_headroom);
- priv->ieee80211->stats.tx_packets++;
- }
-}
-
-/*
- * This is a rough attempt to TX a frame
- * This is called by the ieee 80211 stack to TX management frames.
- * If the ring is full packet are dropped (for data frame the queue
- * is stopped before this can happen).
- */
-static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct ieee80211_device *ieee80211)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- int ret;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 queue_index = tcb_desc->queue_index;
-
- if (queue_index != TXCMD_QUEUE) {
- if (priv->bHwRadioOff || (!priv->up))
- {
- kfree_skb(skb);
- return 0;
- }
- }
-
- if (queue_index == TXCMD_QUEUE) {
- rtl819xE_tx_cmd(priv, skb);
- ret = 0;
- return ret;
- } else {
- tcb_desc->RATRIndex = 7;
- tcb_desc->bTxDisableRateFallBack = 1;
- tcb_desc->bTxUseDriverAssingedRate = 1;
- tcb_desc->bTxEnableFwCalcDur = 1;
- skb_push(skb, ieee80211->tx_headroom);
- ret = rtl8192_tx(priv, skb);
- if (ret != 0) {
- kfree_skb(skb);
- }
- }
-
- return ret;
-}
-
-
-static void rtl8192_tx_isr(struct r8192_priv *priv, int prio)
-{
- struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
-
- while (skb_queue_len(&ring->queue)) {
- tx_desc_819x_pci *entry = &ring->desc[ring->idx];
- struct sk_buff *skb;
-
- /*
- * beacon packet will only use the first descriptor defaultly,
- * and the OWN may not be cleared by the hardware
- */
- if (prio != BEACON_QUEUE) {
- if (entry->OWN)
- return;
- ring->idx = (ring->idx + 1) % ring->entries;
- }
-
- skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
- skb->len, PCI_DMA_TODEVICE);
-
- kfree_skb(skb);
- }
-
- if (prio != BEACON_QUEUE) {
- /* try to deal with the pending packets */
- tasklet_schedule(&priv->irq_tx_tasklet);
- }
-}
-
-static void rtl8192_stop_beacon(struct ieee80211_device *ieee80211)
-{
-}
-
-static void rtl8192_config_rate(struct r8192_priv *priv, u16* rate_config)
-{
- struct ieee80211_network *net;
- u8 i=0, basic_rate = 0;
- net = & priv->ieee80211->current_network;
-
- for (i=0; i<net->rates_len; i++)
- {
- basic_rate = net->rates[i]&0x7f;
- switch(basic_rate)
- {
- case MGN_1M: *rate_config |= RRSR_1M; break;
- case MGN_2M: *rate_config |= RRSR_2M; break;
- case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
- case MGN_11M: *rate_config |= RRSR_11M; break;
- case MGN_6M: *rate_config |= RRSR_6M; break;
- case MGN_9M: *rate_config |= RRSR_9M; break;
- case MGN_12M: *rate_config |= RRSR_12M; break;
- case MGN_18M: *rate_config |= RRSR_18M; break;
- case MGN_24M: *rate_config |= RRSR_24M; break;
- case MGN_36M: *rate_config |= RRSR_36M; break;
- case MGN_48M: *rate_config |= RRSR_48M; break;
- case MGN_54M: *rate_config |= RRSR_54M; break;
- }
- }
- for (i=0; i<net->rates_ex_len; i++)
- {
- basic_rate = net->rates_ex[i]&0x7f;
- switch(basic_rate)
- {
- case MGN_1M: *rate_config |= RRSR_1M; break;
- case MGN_2M: *rate_config |= RRSR_2M; break;
- case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
- case MGN_11M: *rate_config |= RRSR_11M; break;
- case MGN_6M: *rate_config |= RRSR_6M; break;
- case MGN_9M: *rate_config |= RRSR_9M; break;
- case MGN_12M: *rate_config |= RRSR_12M; break;
- case MGN_18M: *rate_config |= RRSR_18M; break;
- case MGN_24M: *rate_config |= RRSR_24M; break;
- case MGN_36M: *rate_config |= RRSR_36M; break;
- case MGN_48M: *rate_config |= RRSR_48M; break;
- case MGN_54M: *rate_config |= RRSR_54M; break;
- }
- }
-}
-
-
-#define SHORT_SLOT_TIME 9
-#define NON_SHORT_SLOT_TIME 20
-
-static void rtl8192_update_cap(struct r8192_priv *priv, u16 cap)
-{
- u32 tmp = 0;
- struct ieee80211_network *net = &priv->ieee80211->current_network;
-
- priv->short_preamble = cap & WLAN_CAPABILITY_SHORT_PREAMBLE;
- tmp = priv->basic_rate;
- if (priv->short_preamble)
- tmp |= BRSR_AckShortPmb;
- write_nic_dword(priv, RRSR, tmp);
-
- if (net->mode & (IEEE_G|IEEE_N_24G))
- {
- u8 slot_time = 0;
- if ((cap & WLAN_CAPABILITY_SHORT_SLOT)&&(!priv->ieee80211->pHTInfo->bCurrentRT2RTLongSlotTime))
- {//short slot time
- slot_time = SHORT_SLOT_TIME;
- }
- else //long slot time
- slot_time = NON_SHORT_SLOT_TIME;
- priv->slot_time = slot_time;
- write_nic_byte(priv, SLOT_TIME, slot_time);
- }
-
-}
-
-static void rtl8192_net_update(struct r8192_priv *priv)
-{
- struct ieee80211_network *net;
- u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
- u16 rate_config = 0;
- net = &priv->ieee80211->current_network;
-
- /* update Basic rate: RR, BRSR */
- rtl8192_config_rate(priv, &rate_config);
-
- /*
- * Select RRSR (in Legacy-OFDM and CCK)
- * For 8190, we select only 24M, 12M, 6M, 11M, 5.5M,
- * 2M, and 1M from the Basic rate.
- * We do not use other rates.
- */
- priv->basic_rate = rate_config &= 0x15f;
-
- /* BSSID */
- write_nic_dword(priv, BSSIDR, ((u32 *)net->bssid)[0]);
- write_nic_word(priv, BSSIDR+4, ((u16 *)net->bssid)[2]);
-
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
- write_nic_word(priv, ATIMWND, 2);
- write_nic_word(priv, BCN_DMATIME, 256);
- write_nic_word(priv, BCN_INTERVAL, net->beacon_interval);
- /*
- * BIT15 of BCN_DRV_EARLY_INT will indicate
- * whether software beacon or hw beacon is applied.
- */
- write_nic_word(priv, BCN_DRV_EARLY_INT, 10);
- write_nic_byte(priv, BCN_ERR_THRESH, 100);
-
- BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
- /* TODO: BcnIFS may required to be changed on ASIC */
- BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
- write_nic_word(priv, BCN_TCFG, BcnTimeCfg);
- }
-}
-
-static void rtl819xE_tx_cmd(struct r8192_priv *priv, struct sk_buff *skb)
-{
- struct rtl8192_tx_ring *ring;
- tx_desc_819x_pci *entry;
- unsigned int idx;
- dma_addr_t mapping;
- cb_desc *tcb_desc;
- unsigned long flags;
-
- ring = &priv->tx_ring[TXCMD_QUEUE];
- mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
- spin_lock_irqsave(&priv->irq_th_lock,flags);
- idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
- entry = &ring->desc[idx];
-
- tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- memset(entry,0,12);
- entry->LINIP = tcb_desc->bLastIniPkt;
- entry->FirstSeg = 1;//first segment
- entry->LastSeg = 1; //last segment
- if(tcb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
- entry->CmdInit = DESC_PACKET_TYPE_INIT;
- } else {
- entry->CmdInit = DESC_PACKET_TYPE_NORMAL;
- entry->Offset = sizeof(TX_FWINFO_8190PCI) + 8;
- entry->PktSize = (u16)(tcb_desc->pkt_size + entry->Offset);
- entry->QueueSelect = QSLT_CMD;
- entry->TxFWInfoSize = 0x08;
- entry->RATid = (u8)DESC_PACKET_TYPE_INIT;
- }
- entry->TxBufferSize = skb->len;
- entry->TxBuffAddr = cpu_to_le32(mapping);
- entry->OWN = 1;
-
- __skb_queue_tail(&ring->queue, skb);
- spin_unlock_irqrestore(&priv->irq_th_lock,flags);
-
- write_nic_byte(priv, TPPoll, TPPoll_CQ);
-
- return;
-}
-
-/*
- * Mapping Software/Hardware descriptor queue id to "Queue Select Field"
- * in TxFwInfo data structure
- */
-static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
-{
- u8 QueueSelect = 0;
-
- switch (QueueID) {
- case BE_QUEUE:
- QueueSelect = QSLT_BE;
- break;
-
- case BK_QUEUE:
- QueueSelect = QSLT_BK;
- break;
-
- case VO_QUEUE:
- QueueSelect = QSLT_VO;
- break;
-
- case VI_QUEUE:
- QueueSelect = QSLT_VI;
- break;
-
- case MGNT_QUEUE:
- QueueSelect = QSLT_MGNT;
- break;
-
- case BEACON_QUEUE:
- QueueSelect = QSLT_BEACON;
- break;
-
- case TXCMD_QUEUE:
- QueueSelect = QSLT_CMD;
- break;
-
- case HIGH_QUEUE:
- default:
- RT_TRACE(COMP_ERR, "Impossible Queue Selection: %d\n", QueueID);
- break;
- }
- return QueueSelect;
-}
-
-static u8 MRateToHwRate8190Pci(u8 rate)
-{
- u8 ret = DESC90_RATE1M;
-
- switch(rate) {
- case MGN_1M: ret = DESC90_RATE1M; break;
- case MGN_2M: ret = DESC90_RATE2M; break;
- case MGN_5_5M: ret = DESC90_RATE5_5M; break;
- case MGN_11M: ret = DESC90_RATE11M; break;
- case MGN_6M: ret = DESC90_RATE6M; break;
- case MGN_9M: ret = DESC90_RATE9M; break;
- case MGN_12M: ret = DESC90_RATE12M; break;
- case MGN_18M: ret = DESC90_RATE18M; break;
- case MGN_24M: ret = DESC90_RATE24M; break;
- case MGN_36M: ret = DESC90_RATE36M; break;
- case MGN_48M: ret = DESC90_RATE48M; break;
- case MGN_54M: ret = DESC90_RATE54M; break;
-
- // HT rate since here
- case MGN_MCS0: ret = DESC90_RATEMCS0; break;
- case MGN_MCS1: ret = DESC90_RATEMCS1; break;
- case MGN_MCS2: ret = DESC90_RATEMCS2; break;
- case MGN_MCS3: ret = DESC90_RATEMCS3; break;
- case MGN_MCS4: ret = DESC90_RATEMCS4; break;
- case MGN_MCS5: ret = DESC90_RATEMCS5; break;
- case MGN_MCS6: ret = DESC90_RATEMCS6; break;
- case MGN_MCS7: ret = DESC90_RATEMCS7; break;
- case MGN_MCS8: ret = DESC90_RATEMCS8; break;
- case MGN_MCS9: ret = DESC90_RATEMCS9; break;
- case MGN_MCS10: ret = DESC90_RATEMCS10; break;
- case MGN_MCS11: ret = DESC90_RATEMCS11; break;
- case MGN_MCS12: ret = DESC90_RATEMCS12; break;
- case MGN_MCS13: ret = DESC90_RATEMCS13; break;
- case MGN_MCS14: ret = DESC90_RATEMCS14; break;
- case MGN_MCS15: ret = DESC90_RATEMCS15; break;
- case (0x80|0x20): ret = DESC90_RATEMCS32; break;
-
- default: break;
- }
- return ret;
-}
-
-
-static u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc)
-{
- u8 tmp_Short;
-
- tmp_Short = (TxHT==1)?((tcb_desc->bUseShortGI)?1:0):((tcb_desc->bUseShortPreamble)?1:0);
-
- if(TxHT==1 && TxRate != DESC90_RATEMCS15)
- tmp_Short = 0;
-
- return tmp_Short;
-}
-
-/*
- * The tx procedure is just as following,
- * skb->cb will contain all the following information,
- * priority, morefrag, rate, &dev.
- */
-static short rtl8192_tx(struct r8192_priv *priv, struct sk_buff* skb)
-{
- struct rtl8192_tx_ring *ring;
- unsigned long flags;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tx_desc_819x_pci *pdesc = NULL;
- TX_FWINFO_8190PCI *pTxFwInfo = NULL;
- dma_addr_t mapping;
- bool multi_addr = false, broad_addr = false, uni_addr = false;
- u8 *pda_addr = NULL;
- int idx;
-
- if (priv->bdisable_nic) {
- RT_TRACE(COMP_ERR, "Nic is disabled! Can't tx packet len=%d qidx=%d!!!\n",
- skb->len, tcb_desc->queue_index);
- return skb->len;
- }
-
-#ifdef ENABLE_LPS
- priv->ieee80211->bAwakePktSent = true;
-#endif
-
- mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
- /* collect the tx packets statitcs */
- pda_addr = ((u8 *)skb->data) + sizeof(TX_FWINFO_8190PCI);
- if (is_multicast_ether_addr(pda_addr))
- multi_addr = true;
- else if (is_broadcast_ether_addr(pda_addr))
- broad_addr = true;
- else
- uni_addr = true;
-
- if (uni_addr)
- priv->stats.txbytesunicast += (u8)(skb->len) - sizeof(TX_FWINFO_8190PCI);
-
- /* fill tx firmware */
- pTxFwInfo = (PTX_FWINFO_8190PCI)skb->data;
- memset(pTxFwInfo, 0, sizeof(TX_FWINFO_8190PCI));
- pTxFwInfo->TxHT = (tcb_desc->data_rate&0x80) ? 1 : 0;
- pTxFwInfo->TxRate = MRateToHwRate8190Pci((u8)tcb_desc->data_rate);
- pTxFwInfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
- pTxFwInfo->Short = QueryIsShort(pTxFwInfo->TxHT, pTxFwInfo->TxRate, tcb_desc);
-
- /* Aggregation related */
- if (tcb_desc->bAMPDUEnable) {
- pTxFwInfo->AllowAggregation = 1;
- pTxFwInfo->RxMF = tcb_desc->ampdu_factor;
- pTxFwInfo->RxAMD = tcb_desc->ampdu_density;
- } else {
- pTxFwInfo->AllowAggregation = 0;
- pTxFwInfo->RxMF = 0;
- pTxFwInfo->RxAMD = 0;
- }
-
- /* Protection mode related */
- pTxFwInfo->RtsEnable = (tcb_desc->bRTSEnable) ? 1 : 0;
- pTxFwInfo->CtsEnable = (tcb_desc->bCTSEnable) ? 1 : 0;
- pTxFwInfo->RtsSTBC = (tcb_desc->bRTSSTBC) ? 1 : 0;
- pTxFwInfo->RtsHT = (tcb_desc->rts_rate&0x80) ? 1 : 0;
- pTxFwInfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
- pTxFwInfo->RtsBandwidth = 0;
- pTxFwInfo->RtsSubcarrier = tcb_desc->RTSSC;
- pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ? (tcb_desc->bRTSUseShortPreamble ? 1 : 0) : (tcb_desc->bRTSUseShortGI? 1 : 0);
-
- /* Set Bandwidth and sub-channel settings. */
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
- if (tcb_desc->bPacketBW) {
- pTxFwInfo->TxBandwidth = 1;
- /* use duplicated mode */
- pTxFwInfo->TxSubCarrier = 0;
- } else {
- pTxFwInfo->TxBandwidth = 0;
- pTxFwInfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
- }
- } else {
- pTxFwInfo->TxBandwidth = 0;
- pTxFwInfo->TxSubCarrier = 0;
- }
-
- spin_lock_irqsave(&priv->irq_th_lock, flags);
- ring = &priv->tx_ring[tcb_desc->queue_index];
- if (tcb_desc->queue_index != BEACON_QUEUE)
- idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
- else
- idx = 0;
-
- pdesc = &ring->desc[idx];
- if ((pdesc->OWN == 1) && (tcb_desc->queue_index != BEACON_QUEUE)) {
- RT_TRACE(COMP_ERR, "No more TX desc@%d, ring->idx = %d,idx = %d,%x\n",
- tcb_desc->queue_index, ring->idx, idx, skb->len);
- spin_unlock_irqrestore(&priv->irq_th_lock, flags);
- return skb->len;
- }
-
- /* fill tx descriptor */
- memset(pdesc, 0, 12);
-
- /*DWORD 0*/
- pdesc->LINIP = 0;
- pdesc->CmdInit = 1;
- pdesc->Offset = sizeof(TX_FWINFO_8190PCI) + 8; /* We must add 8!! */
- pdesc->PktSize = (u16)skb->len-sizeof(TX_FWINFO_8190PCI);
-
- /*DWORD 1*/
- pdesc->SecCAMID = 0;
- pdesc->RATid = tcb_desc->RATRIndex;
-
- pdesc->NoEnc = 1;
- pdesc->SecType = 0x0;
- if (tcb_desc->bHwSec) {
- switch (priv->ieee80211->pairwise_key_type) {
- case KEY_TYPE_WEP40:
- case KEY_TYPE_WEP104:
- pdesc->SecType = 0x1;
- pdesc->NoEnc = 0;
- break;
- case KEY_TYPE_TKIP:
- pdesc->SecType = 0x2;
- pdesc->NoEnc = 0;
- break;
- case KEY_TYPE_CCMP:
- pdesc->SecType = 0x3;
- pdesc->NoEnc = 0;
- break;
- case KEY_TYPE_NA:
- pdesc->SecType = 0x0;
- pdesc->NoEnc = 1;
- break;
- }
- }
-
- /* Set Packet ID */
- pdesc->PktId = 0x0;
-
- pdesc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
- pdesc->TxFWInfoSize = sizeof(TX_FWINFO_8190PCI);
-
- pdesc->DISFB = tcb_desc->bTxDisableRateFallBack;
- pdesc->USERATE = tcb_desc->bTxUseDriverAssingedRate;
-
- pdesc->FirstSeg = 1;
- pdesc->LastSeg = 1;
- pdesc->TxBufferSize = skb->len;
-
- pdesc->TxBuffAddr = cpu_to_le32(mapping);
- __skb_queue_tail(&ring->queue, skb);
- pdesc->OWN = 1;
- spin_unlock_irqrestore(&priv->irq_th_lock, flags);
- priv->ieee80211->dev->trans_start = jiffies;
- write_nic_word(priv, TPPoll, 0x01<<tcb_desc->queue_index);
- return 0;
-}
-
-static short rtl8192_alloc_rx_desc_ring(struct r8192_priv *priv)
-{
- rx_desc_819x_pci *entry = NULL;
- int i;
-
- priv->rx_ring = pci_alloc_consistent(priv->pdev,
- sizeof(*priv->rx_ring) * priv->rxringcount, &priv->rx_ring_dma);
-
- if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
- RT_TRACE(COMP_ERR,"Cannot allocate RX ring\n");
- return -ENOMEM;
- }
-
- memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * priv->rxringcount);
- priv->rx_idx = 0;
-
- for (i = 0; i < priv->rxringcount; i++) {
- struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
- dma_addr_t *mapping;
- entry = &priv->rx_ring[i];
- if (!skb)
- return 0;
- priv->rx_buf[i] = skb;
- mapping = (dma_addr_t *)skb->cb;
- *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
- priv->rxbuffersize, PCI_DMA_FROMDEVICE);
-
- entry->BufferAddress = cpu_to_le32(*mapping);
-
- entry->Length = priv->rxbuffersize;
- entry->OWN = 1;
- }
-
- entry->EOR = 1;
- return 0;
-}
-
-static int rtl8192_alloc_tx_desc_ring(struct r8192_priv *priv,
- unsigned int prio, unsigned int entries)
-{
- tx_desc_819x_pci *ring;
- dma_addr_t dma;
- int i;
-
- ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
- if (!ring || (unsigned long)ring & 0xFF) {
- RT_TRACE(COMP_ERR, "Cannot allocate TX ring (prio = %d)\n", prio);
- return -ENOMEM;
- }
-
- memset(ring, 0, sizeof(*ring)*entries);
- priv->tx_ring[prio].desc = ring;
- priv->tx_ring[prio].dma = dma;
- priv->tx_ring[prio].idx = 0;
- priv->tx_ring[prio].entries = entries;
- skb_queue_head_init(&priv->tx_ring[prio].queue);
-
- for (i = 0; i < entries; i++)
- ring[i].NextDescAddress =
- cpu_to_le32((u32)dma + ((i + 1) % entries) * sizeof(*ring));
-
- return 0;
-}
-
-static short rtl8192_pci_initdescring(struct r8192_priv *priv)
-{
- u32 ret;
- int i;
-
- ret = rtl8192_alloc_rx_desc_ring(priv);
- if (ret)
- return ret;
-
- /* general process for other queue */
- for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
- ret = rtl8192_alloc_tx_desc_ring(priv, i, priv->txringcount);
- if (ret)
- goto err_free_rings;
- }
-
- return 0;
-
-err_free_rings:
- rtl8192_free_rx_ring(priv);
- for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
- if (priv->tx_ring[i].desc)
- rtl8192_free_tx_ring(priv, i);
- return 1;
-}
-
-static void rtl8192_pci_resetdescring(struct r8192_priv *priv)
-{
- int i;
-
- /* force the rx_idx to the first one */
- if(priv->rx_ring) {
- rx_desc_819x_pci *entry = NULL;
- for (i = 0; i < priv->rxringcount; i++) {
- entry = &priv->rx_ring[i];
- entry->OWN = 1;
- }
- priv->rx_idx = 0;
- }
-
- /* after reset, release previous pending packet, and force the
- * tx idx to the first one */
- for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
- if (priv->tx_ring[i].desc) {
- struct rtl8192_tx_ring *ring = &priv->tx_ring[i];
-
- while (skb_queue_len(&ring->queue)) {
- tx_desc_819x_pci *entry = &ring->desc[ring->idx];
- struct sk_buff *skb = __skb_dequeue(&ring->queue);
-
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
- skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
- ring->idx = (ring->idx + 1) % ring->entries;
- }
- ring->idx = 0;
- }
- }
-}
-
-static void rtl8192_link_change(struct ieee80211_device *ieee)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
-
- if (ieee->state == IEEE80211_LINKED)
- {
- rtl8192_net_update(priv);
- rtl8192_update_ratr_table(priv);
-
- //add this as in pure N mode, wep encryption will use software way, but there is no chance to set this as wep will not set group key in wext. WB.2008.07.08
- if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type))
- EnableHWSecurityConfig8192(priv);
- }
- else
- {
- write_nic_byte(priv, 0x173, 0);
- }
-
- rtl8192_update_msr(priv);
-
- // 2007/10/16 MH MAC Will update TSF according to all received beacon, so we have
- // // To set CBSSID bit when link with any AP or STA.
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
- {
- u32 reg = 0;
- reg = read_nic_dword(priv, RCR);
- if (priv->ieee80211->state == IEEE80211_LINKED)
- priv->ReceiveConfig = reg |= RCR_CBSSID;
- else
- priv->ReceiveConfig = reg &= ~RCR_CBSSID;
- write_nic_dword(priv, RCR, reg);
- }
-}
-
-
-static const struct ieee80211_qos_parameters def_qos_parameters = {
- {3,3,3,3},/* cw_min */
- {7,7,7,7},/* cw_max */
- {2,2,2,2},/* aifs */
- {0,0,0,0},/* flags */
- {0,0,0,0} /* tx_op_limit */
-};
-
-static void rtl8192_update_beacon(struct work_struct * work)
-{
- struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
- struct ieee80211_device* ieee = priv->ieee80211;
- struct ieee80211_network* net = &ieee->current_network;
-
- if (ieee->pHTInfo->bCurrentHTSupport)
- HTUpdateSelfAndPeerSetting(ieee, net);
- ieee->pHTInfo->bCurrentRT2RTLongSlotTime = net->bssht.bdRT2RTLongSlotTime;
- rtl8192_update_cap(priv, net->capability);
-}
-
-/*
-* background support to run QoS activate functionality
-*/
-static const int WDCAPARA_ADD[] = {EDCAPARA_BE,EDCAPARA_BK,EDCAPARA_VI,EDCAPARA_VO};
-static void rtl8192_qos_activate(struct work_struct * work)
-{
- struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
- struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
- u8 mode = priv->ieee80211->current_network.mode;
- u8 u1bAIFS;
- u32 u4bAcParam;
- int i;
-
- mutex_lock(&priv->mutex);
- if(priv->ieee80211->state != IEEE80211_LINKED)
- goto success;
- RT_TRACE(COMP_QOS,"qos active process with associate response received\n");
- /* It better set slot time at first */
- /* For we just support b/g mode at present, let the slot time at 9/20 selection */
- /* update the ac parameter to related registers */
- for(i = 0; i < QOS_QUEUE_NUM; i++) {
- //Mode G/A: slotTimeTimer = 9; Mode B: 20
- u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[i]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
- ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
- write_nic_dword(priv, WDCAPARA_ADD[i], u4bAcParam);
- }
-
-success:
- mutex_unlock(&priv->mutex);
-}
-
-static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
- int active_network,
- struct ieee80211_network *network)
-{
- int ret = 0;
- u32 size = sizeof(struct ieee80211_qos_parameters);
-
- if(priv->ieee80211->state !=IEEE80211_LINKED)
- return ret;
-
- if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
- return ret;
-
- if (network->flags & NETWORK_HAS_QOS_MASK) {
- if (active_network &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS))
- network->qos_data.active = network->qos_data.supported;
-
- if ((network->qos_data.active == 1) && (active_network == 1) &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
- (network->qos_data.old_param_count !=
- network->qos_data.param_count)) {
- network->qos_data.old_param_count =
- network->qos_data.param_count;
- queue_work(priv->priv_wq, &priv->qos_activate);
- RT_TRACE (COMP_QOS, "QoS parameters change call "
- "qos_activate\n");
- }
- } else {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,
- &def_qos_parameters, size);
-
- if ((network->qos_data.active == 1) && (active_network == 1)) {
- queue_work(priv->priv_wq, &priv->qos_activate);
- RT_TRACE(COMP_QOS, "QoS was disabled call qos_activate\n");
- }
- network->qos_data.active = 0;
- network->qos_data.supported = 0;
- }
-
- return 0;
-}
-
-/* handle manage frame frame beacon and probe response */
-static int rtl8192_handle_beacon(struct ieee80211_device *ieee,
- struct ieee80211_beacon * beacon,
- struct ieee80211_network * network)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
-
- rtl8192_qos_handle_probe_response(priv,1,network);
-
- queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0);
- return 0;
-
-}
-
-/*
- * handling the beaconing responses. if we get different QoS setting
- * off the network from the associated setting, adjust the QoS setting
- */
-static int rtl8192_qos_association_resp(struct r8192_priv *priv,
- struct ieee80211_network *network)
-{
- int ret = 0;
- unsigned long flags;
- u32 size = sizeof(struct ieee80211_qos_parameters);
- int set_qos_param = 0;
-
- if ((priv == NULL) || (network == NULL))
- return ret;
-
- if (priv->ieee80211->state != IEEE80211_LINKED)
- return ret;
-
- if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
- return ret;
-
- spin_lock_irqsave(&priv->ieee80211->lock, flags);
- if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,
- &network->qos_data.parameters,
- sizeof(struct ieee80211_qos_parameters));
- priv->ieee80211->current_network.qos_data.active = 1;
- set_qos_param = 1;
- /* update qos parameter for current network */
- priv->ieee80211->current_network.qos_data.old_param_count =
- priv->ieee80211->current_network.qos_data.param_count;
- priv->ieee80211->current_network.qos_data.param_count =
- network->qos_data.param_count;
-
- } else {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,
- &def_qos_parameters, size);
- priv->ieee80211->current_network.qos_data.active = 0;
- priv->ieee80211->current_network.qos_data.supported = 0;
- set_qos_param = 1;
- }
-
- spin_unlock_irqrestore(&priv->ieee80211->lock, flags);
-
- RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __FUNCTION__,
- network->flags, priv->ieee80211->current_network.qos_data.active);
- if (set_qos_param == 1)
- queue_work(priv->priv_wq, &priv->qos_activate);
-
- return ret;
-}
-
-
-static int rtl8192_handle_assoc_response(struct ieee80211_device *ieee,
- struct ieee80211_assoc_response_frame *resp,
- struct ieee80211_network *network)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
- rtl8192_qos_association_resp(priv, network);
- return 0;
-}
-
-
-/* updateRATRTabel for MCS only. Basic rate is not implemented. */
-static void rtl8192_update_ratr_table(struct r8192_priv* priv)
-{
- struct ieee80211_device* ieee = priv->ieee80211;
- u8* pMcsRate = ieee->dot11HTOperationalRateSet;
- u32 ratr_value = 0;
- u8 rate_index = 0;
-
- rtl8192_config_rate(priv, (u16*)(&ratr_value));
- ratr_value |= (*(u16*)(pMcsRate)) << 12;
-
- switch (ieee->mode)
- {
- case IEEE_A:
- ratr_value &= 0x00000FF0;
- break;
- case IEEE_B:
- ratr_value &= 0x0000000F;
- break;
- case IEEE_G:
- ratr_value &= 0x00000FF7;
- break;
- case IEEE_N_24G:
- case IEEE_N_5G:
- if (ieee->pHTInfo->PeerMimoPs == 0) //MIMO_PS_STATIC
- ratr_value &= 0x0007F007;
- else{
- if (priv->rf_type == RF_1T2R)
- ratr_value &= 0x000FF007;
- else
- ratr_value &= 0x0F81F007;
- }
- break;
- default:
- break;
- }
- ratr_value &= 0x0FFFFFFF;
- if(ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI40MHz){
- ratr_value |= 0x80000000;
- }else if(!ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI20MHz){
- ratr_value |= 0x80000000;
- }
- write_nic_dword(priv, RATR0+rate_index*4, ratr_value);
- write_nic_byte(priv, UFWP, 1);
-}
-
-static bool GetNmodeSupportBySecCfg8190Pci(struct ieee80211_device *ieee)
-{
- return !(ieee->rtllib_ap_sec_type &&
- (ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP)));
-}
-
-static void rtl8192_refresh_supportrate(struct r8192_priv* priv)
-{
- struct ieee80211_device* ieee = priv->ieee80211;
- //we donot consider set support rate for ABG mode, only HT MCS rate is set here.
- if (ieee->mode == WIRELESS_MODE_N_24G || ieee->mode == WIRELESS_MODE_N_5G)
- {
- memcpy(ieee->Regdot11HTOperationalRateSet, ieee->RegHTSuppRateSet, 16);
- }
- else
- memset(ieee->Regdot11HTOperationalRateSet, 0, 16);
-}
-
-static u8 rtl8192_getSupportedWireleeMode(void)
-{
- return (WIRELESS_MODE_N_24G|WIRELESS_MODE_G|WIRELESS_MODE_B);
-}
-
-static void rtl8192_SetWirelessMode(struct ieee80211_device *ieee, u8 wireless_mode)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
- u8 bSupportMode = rtl8192_getSupportedWireleeMode();
-
- if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode&bSupportMode)==0))
- {
- if(bSupportMode & WIRELESS_MODE_N_24G)
- {
- wireless_mode = WIRELESS_MODE_N_24G;
- }
- else if(bSupportMode & WIRELESS_MODE_N_5G)
- {
- wireless_mode = WIRELESS_MODE_N_5G;
- }
- else if((bSupportMode & WIRELESS_MODE_A))
- {
- wireless_mode = WIRELESS_MODE_A;
- }
- else if((bSupportMode & WIRELESS_MODE_G))
- {
- wireless_mode = WIRELESS_MODE_G;
- }
- else if((bSupportMode & WIRELESS_MODE_B))
- {
- wireless_mode = WIRELESS_MODE_B;
- }
- else{
- RT_TRACE(COMP_ERR, "%s(), No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n", __FUNCTION__,bSupportMode);
- wireless_mode = WIRELESS_MODE_B;
- }
- }
- priv->ieee80211->mode = wireless_mode;
-
- if ((wireless_mode == WIRELESS_MODE_N_24G) || (wireless_mode == WIRELESS_MODE_N_5G))
- priv->ieee80211->pHTInfo->bEnableHT = 1;
- else
- priv->ieee80211->pHTInfo->bEnableHT = 0;
- RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
- rtl8192_refresh_supportrate(priv);
-}
-
-static bool GetHalfNmodeSupportByAPs819xPci(struct ieee80211_device* ieee)
-{
- return ieee->bHalfWirelessN24GMode;
-}
-
-static short rtl8192_is_tx_queue_empty(struct ieee80211_device *ieee)
-{
- int i=0;
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
-
- for (i=0; i<=MGNT_QUEUE; i++)
- {
- if ((i== TXCMD_QUEUE) || (i == HCCA_QUEUE) )
- continue;
- if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0){
- printk("===>tx queue is not empty:%d, %d\n", i, skb_queue_len(&(&priv->tx_ring[i])->queue));
- return 0;
- }
- }
- return 1;
-}
-
-static void rtl8192_hw_sleep_down(struct r8192_priv *priv)
-{
- MgntActSet_RF_State(priv, eRfSleep, RF_CHANGE_BY_PS);
-}
-
-static void rtl8192_hw_wakeup(struct ieee80211_device *ieee)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
- MgntActSet_RF_State(priv, eRfOn, RF_CHANGE_BY_PS);
-}
-
-static void rtl8192_hw_wakeup_wq (struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq);
-
- rtl8192_hw_wakeup(ieee);
-}
-
-#define MIN_SLEEP_TIME 50
-#define MAX_SLEEP_TIME 10000
-static void rtl8192_hw_to_sleep(struct ieee80211_device *ieee, u32 th, u32 tl)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
- u32 tmp;
- u32 rb = jiffies;
-
- // Writing HW register with 0 equals to disable
- // the timer, that is not really what we want
- //
- tl -= MSECS(8+16+7);
-
- // If the interval in witch we are requested to sleep is too
- // short then give up and remain awake
- // when we sleep after send null frame, the timer will be too short to sleep.
- //
- if(((tl>=rb)&& (tl-rb) <= MSECS(MIN_SLEEP_TIME))
- ||((rb>tl)&& (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
- printk("too short to sleep::%x, %x, %lx\n",tl, rb, MSECS(MIN_SLEEP_TIME));
- return;
- }
-
- if(((tl > rb) && ((tl-rb) > MSECS(MAX_SLEEP_TIME)))||
- ((tl < rb) && (tl>MSECS(69)) && ((rb-tl) > MSECS(MAX_SLEEP_TIME)))||
- ((tl<rb)&&(tl<MSECS(69))&&((tl+0xffffffff-rb)>MSECS(MAX_SLEEP_TIME)))) {
- printk("========>too long to sleep:%x, %x, %lx\n", tl, rb, MSECS(MAX_SLEEP_TIME));
- return;
- }
-
- tmp = (tl>rb)?(tl-rb):(rb-tl);
- queue_delayed_work(priv->ieee80211->wq,
- &priv->ieee80211->hw_wakeup_wq,tmp);
-
- rtl8192_hw_sleep_down(priv);
-}
-
-static void rtl8192_init_priv_variable(struct r8192_priv *priv)
-{
- u8 i;
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
-
- // Default Halt the NIC if RF is OFF.
- pPSC->RegRfPsLevel |= RT_RF_OFF_LEVL_HALT_NIC;
- pPSC->RegRfPsLevel |= RT_RF_OFF_LEVL_CLK_REQ;
- pPSC->RegRfPsLevel |= RT_RF_OFF_LEVL_ASPM;
- pPSC->RegRfPsLevel |= RT_RF_LPS_LEVEL_ASPM;
- pPSC->bLeisurePs = true;
- priv->ieee80211->RegMaxLPSAwakeIntvl = 5;
- priv->bHwRadioOff = false;
-
- priv->being_init_adapter = false;
- priv->txringcount = 64;//32;
- priv->rxbuffersize = 9100;//2048;//1024;
- priv->rxringcount = MAX_RX_COUNT;//64;
- priv->chan = 1; //set to channel 1
- priv->RegWirelessMode = WIRELESS_MODE_AUTO;
- priv->RegChannelPlan = 0xf;
- priv->ieee80211->mode = WIRELESS_MODE_AUTO; //SET AUTO
- priv->ieee80211->iw_mode = IW_MODE_INFRA;
- priv->ieee80211->ieee_up=0;
- priv->retry_rts = DEFAULT_RETRY_RTS;
- priv->retry_data = DEFAULT_RETRY_DATA;
- priv->ieee80211->rts = DEFAULT_RTS_THRESHOLD;
- priv->ieee80211->rate = 110; //11 mbps
- priv->ieee80211->short_slot = 1;
- priv->promisc = (priv->ieee80211->dev->flags & IFF_PROMISC) ? 1:0;
- priv->bcck_in_ch14 = false;
- priv->CCKPresentAttentuation = 0;
- priv->rfa_txpowertrackingindex = 0;
- priv->rfc_txpowertrackingindex = 0;
- priv->CckPwEnl = 6;
- //added by amy for silent reset
- priv->ResetProgress = RESET_TYPE_NORESET;
- priv->bForcedSilentReset = 0;
- priv->bDisableNormalResetCheck = false;
- priv->force_reset = false;
- //added by amy for power save
- priv->RfOffReason = 0;
- priv->bHwRfOffAction = 0;
- priv->PowerSaveControl.bInactivePs = true;
- priv->PowerSaveControl.bIPSModeBackup = false;
-
- priv->ieee80211->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
- priv->ieee80211->iw_mode = IW_MODE_INFRA;
- priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
- IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
- IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE;/* |
- IEEE_SOFTMAC_BEACONS;*///added by amy 080604 //| //IEEE_SOFTMAC_SINGLE_QUEUE;
-
- priv->ieee80211->active_scan = 1;
- priv->ieee80211->modulation = IEEE80211_CCK_MODULATION | IEEE80211_OFDM_MODULATION;
- priv->ieee80211->host_encrypt = 1;
- priv->ieee80211->host_decrypt = 1;
- priv->ieee80211->start_send_beacons = rtl8192_start_beacon;
- priv->ieee80211->stop_send_beacons = rtl8192_stop_beacon;
- priv->ieee80211->softmac_hard_start_xmit = rtl8192_hard_start_xmit;
- priv->ieee80211->set_chan = rtl8192_set_chan;
- priv->ieee80211->link_change = rtl8192_link_change;
- priv->ieee80211->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
- priv->ieee80211->data_hard_stop = rtl8192_data_hard_stop;
- priv->ieee80211->data_hard_resume = rtl8192_data_hard_resume;
- priv->ieee80211->init_wmmparam_flag = 0;
- priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
- priv->ieee80211->check_nic_enough_desc = check_nic_enough_desc;
- priv->ieee80211->tx_headroom = sizeof(TX_FWINFO_8190PCI);
- priv->ieee80211->qos_support = 1;
- priv->ieee80211->SetBWModeHandler = rtl8192_SetBWMode;
- priv->ieee80211->handle_assoc_response = rtl8192_handle_assoc_response;
- priv->ieee80211->handle_beacon = rtl8192_handle_beacon;
-
- priv->ieee80211->sta_wake_up = rtl8192_hw_wakeup;
- priv->ieee80211->enter_sleep_state = rtl8192_hw_to_sleep;
- priv->ieee80211->ps_is_queue_empty = rtl8192_is_tx_queue_empty;
- priv->ieee80211->GetNmodeSupportBySecCfg = GetNmodeSupportBySecCfg8190Pci;
- priv->ieee80211->SetWirelessMode = rtl8192_SetWirelessMode;
- priv->ieee80211->GetHalfNmodeSupportByAPsHandler = GetHalfNmodeSupportByAPs819xPci;
-
- priv->ieee80211->InitialGainHandler = InitialGain819xPci;
-
-#ifdef ENABLE_IPS
- priv->ieee80211->ieee80211_ips_leave_wq = ieee80211_ips_leave_wq;
- priv->ieee80211->ieee80211_ips_leave = ieee80211_ips_leave;
-#endif
-#ifdef ENABLE_LPS
- priv->ieee80211->LeisurePSLeave = LeisurePSLeave;
-#endif
-
- priv->ieee80211->SetHwRegHandler = rtl8192e_SetHwReg;
- priv->ieee80211->rtllib_ap_sec_type = rtl8192e_ap_sec_type;
-
- priv->ShortRetryLimit = 0x30;
- priv->LongRetryLimit = 0x30;
-
- priv->ReceiveConfig = RCR_ADD3 |
- RCR_AMF | RCR_ADF | //accept management/data
- RCR_AICV | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
- RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
- RCR_AAP | ((u32)7<<RCR_MXDMA_OFFSET) |
- ((u32)7 << RCR_FIFO_OFFSET) | RCR_ONLYERLPKT;
-
- priv->pFirmware = vzalloc(sizeof(rt_firmware));
-
- /* rx related queue */
- skb_queue_head_init(&priv->skb_queue);
-
- /* Tx related queue */
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_head_init(&priv->ieee80211->skb_waitQ [i]);
- }
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_head_init(&priv->ieee80211->skb_aggQ [i]);
- }
- priv->rf_set_chan = rtl8192_phy_SwChnl;
-}
-
-static void rtl8192_init_priv_lock(struct r8192_priv* priv)
-{
- spin_lock_init(&priv->irq_th_lock);
- spin_lock_init(&priv->rf_ps_lock);
- sema_init(&priv->wx_sem,1);
- sema_init(&priv->rf_sem,1);
- mutex_init(&priv->mutex);
-}
-
-/* init tasklet and wait_queue here */
-#define DRV_NAME "wlan0"
-static void rtl8192_init_priv_task(struct r8192_priv *priv)
-{
- priv->priv_wq = create_workqueue(DRV_NAME);
-
-#ifdef ENABLE_IPS
- INIT_WORK(&priv->ieee80211->ips_leave_wq, IPSLeave_wq);
-#endif
-
- INIT_WORK(&priv->reset_wq, rtl8192_restart);
- INIT_DELAYED_WORK(&priv->watch_dog_wq, rtl819x_watchdog_wqcallback);
- INIT_DELAYED_WORK(&priv->txpower_tracking_wq, dm_txpower_trackingcallback);
- INIT_DELAYED_WORK(&priv->rfpath_check_wq, dm_rf_pathcheck_workitemcallback);
- INIT_DELAYED_WORK(&priv->update_beacon_wq, rtl8192_update_beacon);
- INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
- INIT_DELAYED_WORK(&priv->ieee80211->hw_wakeup_wq, rtl8192_hw_wakeup_wq);
-
- tasklet_init(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet,
- (unsigned long) priv);
- tasklet_init(&priv->irq_tx_tasklet, rtl8192_irq_tx_tasklet,
- (unsigned long) priv);
- tasklet_init(&priv->irq_prepare_beacon_tasklet, rtl8192_prepare_beacon,
- (unsigned long) priv);
-}
-
-static void rtl8192_get_eeprom_size(struct r8192_priv *priv)
-{
- u16 curCR = 0;
- RT_TRACE(COMP_INIT, "===========>%s()\n", __FUNCTION__);
- curCR = read_nic_dword(priv, EPROM_CMD);
- RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, curCR);
- //whether need I consider BIT5?
- priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EPROM_93c56 : EPROM_93c46;
- RT_TRACE(COMP_INIT, "<===========%s(), epromtype:%d\n", __FUNCTION__, priv->epromtype);
-}
-
-/*
- * Adapter->EEPROMAddressSize should be set before this function call.
- * EEPROM address size can be got through GetEEPROMSize8185()
- */
-static void rtl8192_read_eeprom_info(struct r8192_priv *priv)
-{
- struct net_device *dev = priv->ieee80211->dev;
- u8 tempval;
- u8 ICVer8192, ICVer8256;
- u16 i,usValue, IC_Version;
- u16 EEPROMId;
- u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x01};
- RT_TRACE(COMP_INIT, "====> rtl8192_read_eeprom_info\n");
-
-
- // TODO: I don't know if we need to apply EF function to EEPROM read function
-
- //2 Read EEPROM ID to make sure autoload is success
- EEPROMId = eprom_read(priv, 0);
- if( EEPROMId != RTL8190_EEPROM_ID )
- {
- RT_TRACE(COMP_ERR, "EEPROM ID is invalid:%x, %x\n", EEPROMId, RTL8190_EEPROM_ID);
- priv->AutoloadFailFlag=true;
- }
- else
- {
- priv->AutoloadFailFlag=false;
- }
-
- //
- // Assign Chip Version ID
- //
- // Read IC Version && Channel Plan
- if(!priv->AutoloadFailFlag)
- {
- // VID, PID
- priv->eeprom_vid = eprom_read(priv, (EEPROM_VID >> 1));
- priv->eeprom_did = eprom_read(priv, (EEPROM_DID >> 1));
-
- usValue = eprom_read(priv, (u16)(EEPROM_Customer_ID>>1)) >> 8 ;
- priv->eeprom_CustomerID = (u8)( usValue & 0xff);
- usValue = eprom_read(priv, (EEPROM_ICVersion_ChannelPlan>>1));
- priv->eeprom_ChannelPlan = usValue&0xff;
- IC_Version = ((usValue&0xff00)>>8);
-
- ICVer8192 = (IC_Version&0xf); //bit0~3; 1:A cut, 2:B cut, 3:C cut...
- ICVer8256 = ((IC_Version&0xf0)>>4);//bit4~6, bit7 reserved for other RF chip; 1:A cut, 2:B cut, 3:C cut...
- RT_TRACE(COMP_INIT, "ICVer8192 = 0x%x\n", ICVer8192);
- RT_TRACE(COMP_INIT, "ICVer8256 = 0x%x\n", ICVer8256);
- if(ICVer8192 == 0x2) //B-cut
- {
- if(ICVer8256 == 0x5) //E-cut
- priv->card_8192_version= VERSION_8190_BE;
- }
-
- switch(priv->card_8192_version)
- {
- case VERSION_8190_BD:
- case VERSION_8190_BE:
- break;
- default:
- priv->card_8192_version = VERSION_8190_BD;
- break;
- }
- RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n", priv->card_8192_version);
- }
- else
- {
- priv->card_8192_version = VERSION_8190_BD;
- priv->eeprom_vid = 0;
- priv->eeprom_did = 0;
- priv->eeprom_CustomerID = 0;
- priv->eeprom_ChannelPlan = 0;
- RT_TRACE(COMP_INIT, "IC Version = 0x%x\n", 0xff);
- }
-
- RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
- RT_TRACE(COMP_INIT, "EEPROM DID = 0x%4x\n", priv->eeprom_did);
- RT_TRACE(COMP_INIT,"EEPROM Customer ID: 0x%2x\n", priv->eeprom_CustomerID);
-
- //2 Read Permanent MAC address
- if(!priv->AutoloadFailFlag)
- {
- for(i = 0; i < 6; i += 2)
- {
- usValue = eprom_read(priv, (u16) ((EEPROM_NODE_ADDRESS_BYTE_0+i)>>1));
- *(u16*)(&dev->dev_addr[i]) = usValue;
- }
- } else {
- // when auto load failed, the last address byte set to be a random one.
- // added by david woo.2007/11/7
- memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
- }
-
- RT_TRACE(COMP_INIT, "Permanent Address = %pM\n", dev->dev_addr);
-
- //2 TX Power Check EEPROM Fail or not
- if(priv->card_8192_version > VERSION_8190_BD) {
- priv->bTXPowerDataReadFromEEPORM = true;
- } else {
- priv->bTXPowerDataReadFromEEPORM = false;
- }
-
- // 2007/11/15 MH 8190PCI Default=2T4R, 8192PCIE default=1T2R
- priv->rf_type = RTL819X_DEFAULT_RF_TYPE;
-
- if(priv->card_8192_version > VERSION_8190_BD)
- {
- // Read RF-indication and Tx Power gain index diff of legacy to HT OFDM rate.
- if(!priv->AutoloadFailFlag)
- {
- tempval = (eprom_read(priv, (EEPROM_RFInd_PowerDiff>>1))) & 0xff;
- priv->EEPROMLegacyHTTxPowerDiff = tempval & 0xf; // bit[3:0]
-
- if (tempval&0x80) //RF-indication, bit[7]
- priv->rf_type = RF_1T2R;
- else
- priv->rf_type = RF_2T4R;
- }
- else
- {
- priv->EEPROMLegacyHTTxPowerDiff = EEPROM_Default_LegacyHTTxPowerDiff;
- }
- RT_TRACE(COMP_INIT, "EEPROMLegacyHTTxPowerDiff = %d\n",
- priv->EEPROMLegacyHTTxPowerDiff);
-
- // Read ThermalMeter from EEPROM
- if(!priv->AutoloadFailFlag)
- {
- priv->EEPROMThermalMeter = (u8)(((eprom_read(priv, (EEPROM_ThermalMeter>>1))) & 0xff00)>>8);
- }
- else
- {
- priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- }
- RT_TRACE(COMP_INIT, "ThermalMeter = %d\n", priv->EEPROMThermalMeter);
- //vivi, for tx power track
- priv->TSSI_13dBm = priv->EEPROMThermalMeter *100;
-
- if(priv->epromtype == EPROM_93c46)
- {
- // Read antenna tx power offset of B/C/D to A and CrystalCap from EEPROM
- if(!priv->AutoloadFailFlag)
- {
- usValue = eprom_read(priv, (EEPROM_TxPwDiff_CrystalCap>>1));
- priv->EEPROMAntPwDiff = (usValue&0x0fff);
- priv->EEPROMCrystalCap = (u8)((usValue&0xf000)>>12);
- }
- else
- {
- priv->EEPROMAntPwDiff = EEPROM_Default_AntTxPowerDiff;
- priv->EEPROMCrystalCap = EEPROM_Default_TxPwDiff_CrystalCap;
- }
- RT_TRACE(COMP_INIT, "EEPROMAntPwDiff = %d\n", priv->EEPROMAntPwDiff);
- RT_TRACE(COMP_INIT, "EEPROMCrystalCap = %d\n", priv->EEPROMCrystalCap);
-
- //
- // Get per-channel Tx Power Level
- //
- for(i=0; i<14; i+=2)
- {
- if(!priv->AutoloadFailFlag)
- {
- usValue = eprom_read(priv, (u16) ((EEPROM_TxPwIndex_CCK+i)>>1) );
- }
- else
- {
- usValue = EEPROM_Default_TxPower;
- }
- *((u16*)(&priv->EEPROMTxPowerLevelCCK[i])) = usValue;
- RT_TRACE(COMP_INIT,"CCK Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelCCK[i]);
- RT_TRACE(COMP_INIT, "CCK Tx Power Level, Index %d = 0x%02x\n", i+1, priv->EEPROMTxPowerLevelCCK[i+1]);
- }
- for(i=0; i<14; i+=2)
- {
- if(!priv->AutoloadFailFlag)
- {
- usValue = eprom_read(priv, (u16) ((EEPROM_TxPwIndex_OFDM_24G+i)>>1) );
- }
- else
- {
- usValue = EEPROM_Default_TxPower;
- }
- *((u16*)(&priv->EEPROMTxPowerLevelOFDM24G[i])) = usValue;
- RT_TRACE(COMP_INIT, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelOFDM24G[i]);
- RT_TRACE(COMP_INIT, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i+1, priv->EEPROMTxPowerLevelOFDM24G[i+1]);
- }
- }
-
- //
- // Update HAL variables.
- //
- if(priv->epromtype == EPROM_93c46)
- {
- for(i=0; i<14; i++)
- {
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK[i];
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[i];
- }
- priv->LegacyHTTxPowerDiff = priv->EEPROMLegacyHTTxPowerDiff;
- // Antenna B gain offset to antenna A, bit0~3
- priv->AntennaTxPwDiff[0] = (priv->EEPROMAntPwDiff & 0xf);
- // Antenna C gain offset to antenna A, bit4~7
- priv->AntennaTxPwDiff[1] = ((priv->EEPROMAntPwDiff & 0xf0)>>4);
- // Antenna D gain offset to antenna A, bit8~11
- priv->AntennaTxPwDiff[2] = ((priv->EEPROMAntPwDiff & 0xf00)>>8);
- // CrystalCap, bit12~15
- priv->CrystalCap = priv->EEPROMCrystalCap;
- // ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
- priv->ThermalMeter[0] = (priv->EEPROMThermalMeter & 0xf);
- priv->ThermalMeter[1] = ((priv->EEPROMThermalMeter & 0xf0)>>4);
- }
- else if(priv->epromtype == EPROM_93c56)
- {
- for(i=0; i<3; i++) // channel 1~3 use the same Tx Power Level.
- {
- priv->TxPowerLevelCCK_A[i] = priv->EEPROMRfACCKChnl1TxPwLevel[0];
- priv->TxPowerLevelOFDM24G_A[i] = priv->EEPROMRfAOfdmChnlTxPwLevel[0];
- priv->TxPowerLevelCCK_C[i] = priv->EEPROMRfCCCKChnl1TxPwLevel[0];
- priv->TxPowerLevelOFDM24G_C[i] = priv->EEPROMRfCOfdmChnlTxPwLevel[0];
- }
- for(i=3; i<9; i++) // channel 4~9 use the same Tx Power Level
- {
- priv->TxPowerLevelCCK_A[i] = priv->EEPROMRfACCKChnl1TxPwLevel[1];
- priv->TxPowerLevelOFDM24G_A[i] = priv->EEPROMRfAOfdmChnlTxPwLevel[1];
- priv->TxPowerLevelCCK_C[i] = priv->EEPROMRfCCCKChnl1TxPwLevel[1];
- priv->TxPowerLevelOFDM24G_C[i] = priv->EEPROMRfCOfdmChnlTxPwLevel[1];
- }
- for(i=9; i<14; i++) // channel 10~14 use the same Tx Power Level
- {
- priv->TxPowerLevelCCK_A[i] = priv->EEPROMRfACCKChnl1TxPwLevel[2];
- priv->TxPowerLevelOFDM24G_A[i] = priv->EEPROMRfAOfdmChnlTxPwLevel[2];
- priv->TxPowerLevelCCK_C[i] = priv->EEPROMRfCCCKChnl1TxPwLevel[2];
- priv->TxPowerLevelOFDM24G_C[i] = priv->EEPROMRfCOfdmChnlTxPwLevel[2];
- }
- for(i=0; i<14; i++)
- RT_TRACE(COMP_INIT, "priv->TxPowerLevelCCK_A[%d] = 0x%x\n", i, priv->TxPowerLevelCCK_A[i]);
- for(i=0; i<14; i++)
- RT_TRACE(COMP_INIT,"priv->TxPowerLevelOFDM24G_A[%d] = 0x%x\n", i, priv->TxPowerLevelOFDM24G_A[i]);
- for(i=0; i<14; i++)
- RT_TRACE(COMP_INIT, "priv->TxPowerLevelCCK_C[%d] = 0x%x\n", i, priv->TxPowerLevelCCK_C[i]);
- for(i=0; i<14; i++)
- RT_TRACE(COMP_INIT, "priv->TxPowerLevelOFDM24G_C[%d] = 0x%x\n", i, priv->TxPowerLevelOFDM24G_C[i]);
- priv->LegacyHTTxPowerDiff = priv->EEPROMLegacyHTTxPowerDiff;
- priv->AntennaTxPwDiff[0] = 0;
- priv->AntennaTxPwDiff[1] = 0;
- priv->AntennaTxPwDiff[2] = 0;
- priv->CrystalCap = priv->EEPROMCrystalCap;
- // ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
- priv->ThermalMeter[0] = (priv->EEPROMThermalMeter & 0xf);
- priv->ThermalMeter[1] = ((priv->EEPROMThermalMeter & 0xf0)>>4);
- }
- }
-
- if(priv->rf_type == RF_1T2R)
- {
- RT_TRACE(COMP_INIT, "1T2R config\n");
- }
- else if (priv->rf_type == RF_2T4R)
- {
- RT_TRACE(COMP_INIT, "2T4R config\n");
- }
-
- // 2008/01/16 MH We can only know RF type in the function. So we have to init
- // DIG RATR table again.
- init_rate_adaptive(priv);
-
- //1 Make a copy for following variables and we can change them if we want
-
- if(priv->RegChannelPlan == 0xf)
- {
- priv->ChannelPlan = priv->eeprom_ChannelPlan;
- }
- else
- {
- priv->ChannelPlan = priv->RegChannelPlan;
- }
-
- //
- // Used PID and DID to Set CustomerID
- //
- if( priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304 )
- {
- priv->CustomerID = RT_CID_DLINK;
- }
-
- switch(priv->eeprom_CustomerID)
- {
- case EEPROM_CID_DEFAULT:
- priv->CustomerID = RT_CID_DEFAULT;
- break;
- case EEPROM_CID_CAMEO:
- priv->CustomerID = RT_CID_819x_CAMEO;
- break;
- case EEPROM_CID_RUNTOP:
- priv->CustomerID = RT_CID_819x_RUNTOP;
- break;
- case EEPROM_CID_NetCore:
- priv->CustomerID = RT_CID_819x_Netcore;
- break;
- case EEPROM_CID_TOSHIBA: // Merge by Jacken, 2008/01/31
- priv->CustomerID = RT_CID_TOSHIBA;
- if(priv->eeprom_ChannelPlan&0x80)
- priv->ChannelPlan = priv->eeprom_ChannelPlan&0x7f;
- else
- priv->ChannelPlan = 0x0;
- RT_TRACE(COMP_INIT, "Toshiba ChannelPlan = 0x%x\n",
- priv->ChannelPlan);
- break;
- case EEPROM_CID_Nettronix:
- priv->CustomerID = RT_CID_Nettronix;
- break;
- case EEPROM_CID_Pronet:
- priv->CustomerID = RT_CID_PRONET;
- break;
- case EEPROM_CID_DLINK:
- priv->CustomerID = RT_CID_DLINK;
- break;
-
- case EEPROM_CID_WHQL:
- break;
- default:
- // value from RegCustomerID
- break;
- }
-
- //Avoid the channel plan array overflow, by Bruce, 2007-08-27.
- if(priv->ChannelPlan > CHANNEL_PLAN_LEN - 1)
- priv->ChannelPlan = 0; //FCC
-
- if( priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
- priv->ieee80211->bSupportRemoteWakeUp = true;
- else
- priv->ieee80211->bSupportRemoteWakeUp = false;
-
-
- RT_TRACE(COMP_INIT, "RegChannelPlan(%d)\n", priv->RegChannelPlan);
- RT_TRACE(COMP_INIT, "ChannelPlan = %d\n", priv->ChannelPlan);
- RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
-}
-
-
-static short rtl8192_get_channel_map(struct r8192_priv *priv)
-{
-#ifdef ENABLE_DOT11D
- if(priv->ChannelPlan> COUNTRY_CODE_GLOBAL_DOMAIN){
- printk("rtl8180_init:Error channel plan! Set to default.\n");
- priv->ChannelPlan= 0;
- }
- RT_TRACE(COMP_INIT, "Channel plan is %d\n",priv->ChannelPlan);
-
- rtl819x_set_channel_map(priv->ChannelPlan, priv);
-#else
- int ch,i;
- //Set Default Channel Plan
- if(!channels){
- DMESG("No channels, aborting");
- return -1;
- }
- ch=channels;
- priv->ChannelPlan= 0;//hikaru
- // set channels 1..14 allowed in given locale
- for (i=1; i<=14; i++) {
- (priv->ieee80211->channel_map)[i] = (u8)(ch & 0x01);
- ch >>= 1;
- }
-#endif
- return 0;
-}
-
-static short rtl8192_init(struct r8192_priv *priv)
-{
- struct net_device *dev = priv->ieee80211->dev;
-
- memset(&(priv->stats),0,sizeof(struct Stats));
- rtl8192_init_priv_variable(priv);
- rtl8192_init_priv_lock(priv);
- rtl8192_init_priv_task(priv);
- rtl8192_get_eeprom_size(priv);
- rtl8192_read_eeprom_info(priv);
- rtl8192_get_channel_map(priv);
- init_hal_dm(priv);
- init_timer(&priv->watch_dog_timer);
- priv->watch_dog_timer.data = (unsigned long)priv;
- priv->watch_dog_timer.function = watch_dog_timer_callback;
- if (request_irq(dev->irq, rtl8192_interrupt, IRQF_SHARED, dev->name, priv)) {
- printk("Error allocating IRQ %d",dev->irq);
- return -1;
- }else{
- priv->irq=dev->irq;
- printk("IRQ %d",dev->irq);
- }
- if (rtl8192_pci_initdescring(priv) != 0){
- printk("Endopoints initialization failed");
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Actually only set RRSR, RATR and BW_OPMODE registers
- * not to do all the hw config as its name says
- * This part need to modified according to the rate set we filtered
- */
-static void rtl8192_hwconfig(struct r8192_priv *priv)
-{
- u32 regRATR = 0, regRRSR = 0;
- u8 regBwOpMode = 0, regTmp = 0;
-
-// Set RRSR, RATR, and BW_OPMODE registers
- //
- switch (priv->ieee80211->mode)
- {
- case WIRELESS_MODE_B:
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK;
- regRRSR = RATE_ALL_CCK;
- break;
- case WIRELESS_MODE_A:
- regBwOpMode = BW_OPMODE_5G |BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_G:
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_AUTO:
- case WIRELESS_MODE_N_24G:
- // It support CCK rate by default.
- // CCK rate will be filtered out only when associated AP does not support it.
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- break;
- case WIRELESS_MODE_N_5G:
- regBwOpMode = BW_OPMODE_5G;
- regRATR = RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_OFDM_AG;
- break;
- }
-
- write_nic_byte(priv, BW_OPMODE, regBwOpMode);
- {
- u32 ratr_value = 0;
- ratr_value = regRATR;
- if (priv->rf_type == RF_1T2R)
- {
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- }
- write_nic_dword(priv, RATR0, ratr_value);
- write_nic_byte(priv, UFWP, 1);
- }
- regTmp = read_nic_byte(priv, 0x313);
- regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
- write_nic_dword(priv, RRSR, regRRSR);
-
- //
- // Set Retry Limit here
- //
- write_nic_word(priv, RETRY_LIMIT,
- priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
- priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
- // Set Contention Window here
-
- // Set Tx AGC
-
- // Set Tx Antenna including Feedback control
-
- // Set Auto Rate fallback control
-
-
-}
-
-
-static RT_STATUS rtl8192_adapter_start(struct r8192_priv *priv)
-{
- struct net_device *dev = priv->ieee80211->dev;
- u32 ulRegRead;
- RT_STATUS rtStatus = RT_STATUS_SUCCESS;
- u8 tmpvalue;
- u8 ICVersion,SwitchingRegulatorOutput;
- bool bfirmwareok = true;
- u32 tmpRegA, tmpRegC, TempCCk;
- int i =0;
-
- RT_TRACE(COMP_INIT, "====>%s()\n", __FUNCTION__);
- priv->being_init_adapter = true;
- rtl8192_pci_resetdescring(priv);
- // 2007/11/02 MH Before initalizing RF. We can not use FW to do RF-R/W.
- priv->Rf_Mode = RF_OP_By_SW_3wire;
-
- //dPLL on
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- {
- write_nic_byte(priv, ANAPAR, 0x37);
- // Accordign to designer's explain, LBUS active will never > 10ms. We delay 10ms
- // Joseph increae the time to prevent firmware download fail
- mdelay(500);
- }
-
- //PlatformSleepUs(10000);
- // For any kind of InitializeAdapter process, we shall use system now!!
- priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
-
- //
- //3 //Config CPUReset Register
- //3//
- //3 Firmware Reset Or Not
- ulRegRead = read_nic_dword(priv, CPU_GEN);
- if(priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
- { //called from MPInitialized. do nothing
- ulRegRead |= CPU_GEN_SYSTEM_RESET;
- }else if(priv->pFirmware->firmware_status == FW_STATUS_5_READY)
- ulRegRead |= CPU_GEN_FIRMWARE_RESET; // Called from MPReset
- else
- RT_TRACE(COMP_ERR, "ERROR in %s(): undefined firmware state(%d)\n", __FUNCTION__, priv->pFirmware->firmware_status);
-
- write_nic_dword(priv, CPU_GEN, ulRegRead);
-
- //3//
- //3 //Fix the issue of E-cut high temperature issue
- //3//
- // TODO: E cut only
- ICVersion = read_nic_byte(priv, IC_VERRSION);
- if(ICVersion >= 0x4) //E-cut only
- {
- // HW SD suggest that we should not wirte this register too often, so driver
- // should readback this register. This register will be modified only when
- // power on reset
- SwitchingRegulatorOutput = read_nic_byte(priv, SWREGULATOR);
- if(SwitchingRegulatorOutput != 0xb8)
- {
- write_nic_byte(priv, SWREGULATOR, 0xa8);
- mdelay(1);
- write_nic_byte(priv, SWREGULATOR, 0xb8);
- }
- }
-
- //3//
- //3// Initialize BB before MAC
- //3//
- RT_TRACE(COMP_INIT, "BB Config Start!\n");
- rtStatus = rtl8192_BBConfig(priv);
- if(rtStatus != RT_STATUS_SUCCESS)
- {
- RT_TRACE(COMP_ERR, "BB Config failed\n");
- return rtStatus;
- }
- RT_TRACE(COMP_INIT,"BB Config Finished!\n");
-
- //3//Set Loopback mode or Normal mode
- //3//
- //2006.12.13 by emily. Note!We should not merge these two CPU_GEN register writings
- // because setting of System_Reset bit reset MAC to default transmission mode.
- //Loopback mode or not
- priv->LoopbackMode = RTL819X_NO_LOOPBACK;
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- {
- ulRegRead = read_nic_dword(priv, CPU_GEN);
- if(priv->LoopbackMode == RTL819X_NO_LOOPBACK)
- {
- ulRegRead = ((ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) | CPU_GEN_NO_LOOPBACK_SET);
- }
- else if (priv->LoopbackMode == RTL819X_MAC_LOOPBACK )
- {
- ulRegRead |= CPU_CCK_LOOPBACK;
- }
- else
- {
- RT_TRACE(COMP_ERR,"Serious error: wrong loopback mode setting\n");
- }
-
- //2008.06.03, for WOL
- //ulRegRead &= (~(CPU_GEN_GPIO_UART));
- write_nic_dword(priv, CPU_GEN, ulRegRead);
-
- // 2006.11.29. After reset cpu, we sholud wait for a second, otherwise, it may fail to write registers. Emily
- udelay(500);
- }
- //3Set Hardware(Do nothing now)
- rtl8192_hwconfig(priv);
- //2=======================================================
- // Common Setting for all of the FPGA platform. (part 1)
- //2=======================================================
- // If there is changes, please make sure it applies to all of the FPGA version
- //3 Turn on Tx/Rx
- write_nic_byte(priv, CMDR, CR_RE|CR_TE);
-
- //2Set Tx dma burst
- write_nic_byte(priv, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
- (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT) ));
-
- //set IDR0 here
- write_nic_dword(priv, MAC0, ((u32*)dev->dev_addr)[0]);
- write_nic_word(priv, MAC4, ((u16*)(dev->dev_addr + 4))[0]);
- //set RCR
- write_nic_dword(priv, RCR, priv->ReceiveConfig);
-
- //3 Initialize Number of Reserved Pages in Firmware Queue
- write_nic_dword(priv, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK << RSVD_FW_QUEUE_PAGE_BK_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_BE << RSVD_FW_QUEUE_PAGE_BE_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_VI << RSVD_FW_QUEUE_PAGE_VI_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_VO <<RSVD_FW_QUEUE_PAGE_VO_SHIFT);
- write_nic_dword(priv, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT << RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
- write_nic_dword(priv, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW|
- NUM_OF_PAGE_IN_FW_QUEUE_BCN<<RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
- NUM_OF_PAGE_IN_FW_QUEUE_PUB<<RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
-
- rtl8192_tx_enable(priv);
- rtl8192_rx_enable(priv);
- //3Set Response Rate Setting Register
- // CCK rate is supported by default.
- // CCK rate will be filtered out only when associated AP does not support it.
- ulRegRead = (0xFFF00000 & read_nic_dword(priv, RRSR)) | RATE_ALL_OFDM_AG | RATE_ALL_CCK;
- write_nic_dword(priv, RRSR, ulRegRead);
- write_nic_dword(priv, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
-
- //2Set AckTimeout
- // TODO: (it value is only for FPGA version). need to be changed!!2006.12.18, by Emily
- write_nic_byte(priv, ACK_TIMEOUT, 0x30);
-
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- rtl8192_SetWirelessMode(priv->ieee80211, priv->ieee80211->mode);
- //-----------------------------------------------------------------------------
- // Set up security related. 070106, by rcnjko:
- // 1. Clear all H/W keys.
- // 2. Enable H/W encryption/decryption.
- //-----------------------------------------------------------------------------
- CamResetAllEntry(priv);
- {
- u8 SECR_value = 0x0;
- SECR_value |= SCR_TxEncEnable;
- SECR_value |= SCR_RxDecEnable;
- SECR_value |= SCR_NoSKMC;
- write_nic_byte(priv, SECR, SECR_value);
- }
- //3Beacon related
- write_nic_word(priv, ATIMWND, 2);
- write_nic_word(priv, BCN_INTERVAL, 100);
- for (i=0; i<QOS_QUEUE_NUM; i++)
- write_nic_dword(priv, WDCAPARA_ADD[i], 0x005e4332);
- //
- // Switching regulator controller: This is set temporarily.
- // It's not sure if this can be removed in the future.
- // PJ advised to leave it by default.
- //
- write_nic_byte(priv, 0xbe, 0xc0);
-
- //2=======================================================
- // Set PHY related configuration defined in MAC register bank
- //2=======================================================
- rtl8192_phy_configmac(priv);
-
- if (priv->card_8192_version > (u8) VERSION_8190_BD) {
- rtl8192_phy_getTxPower(priv);
- rtl8192_phy_setTxPower(priv, priv->chan);
- }
-
- //if D or C cut
- tmpvalue = read_nic_byte(priv, IC_VERRSION);
- priv->IC_Cut = tmpvalue;
- RT_TRACE(COMP_INIT, "priv->IC_Cut = 0x%x\n", priv->IC_Cut);
- if(priv->IC_Cut >= IC_VersionCut_D)
- {
- //pHalData->bDcut = TRUE;
- if(priv->IC_Cut == IC_VersionCut_D)
- RT_TRACE(COMP_INIT, "D-cut\n");
- if(priv->IC_Cut == IC_VersionCut_E)
- {
- RT_TRACE(COMP_INIT, "E-cut\n");
- // HW SD suggest that we should not wirte this register too often, so driver
- // should readback this register. This register will be modified only when
- // power on reset
- }
- }
- else
- {
- //pHalData->bDcut = FALSE;
- RT_TRACE(COMP_INIT, "Before C-cut\n");
- }
-
- //Firmware download
- RT_TRACE(COMP_INIT, "Load Firmware!\n");
- bfirmwareok = init_firmware(priv);
- if(bfirmwareok != true) {
- rtStatus = RT_STATUS_FAILURE;
- return rtStatus;
- }
- RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
-
- //RF config
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- {
- RT_TRACE(COMP_INIT, "RF Config Started!\n");
- rtStatus = rtl8192_phy_RFConfig(priv);
- if(rtStatus != RT_STATUS_SUCCESS)
- {
- RT_TRACE(COMP_ERR, "RF Config failed\n");
- return rtStatus;
- }
- RT_TRACE(COMP_INIT, "RF Config Finished!\n");
- }
- rtl8192_phy_updateInitGain(priv);
-
- /*---- Set CCK and OFDM Block "ON"----*/
- rtl8192_setBBreg(priv, rFPGA0_RFMOD, bCCKEn, 0x1);
- rtl8192_setBBreg(priv, rFPGA0_RFMOD, bOFDMEn, 0x1);
-
- //Enable Led
- write_nic_byte(priv, 0x87, 0x0);
-
- //2=======================================================
- // RF Power Save
- //2=======================================================
-#ifdef ENABLE_IPS
-
-{
- if(priv->RfOffReason > RF_CHANGE_BY_PS)
- { // H/W or S/W RF OFF before sleep.
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): Turn off RF for RfOffReason(%d)\n", __FUNCTION__,priv->RfOffReason);
- MgntActSet_RF_State(priv, eRfOff, priv->RfOffReason);
- }
- else if(priv->RfOffReason >= RF_CHANGE_BY_IPS)
- { // H/W or S/W RF OFF before sleep.
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): Turn off RF for RfOffReason(%d)\n", __FUNCTION__, priv->RfOffReason);
- MgntActSet_RF_State(priv, eRfOff, priv->RfOffReason);
- }
- else
- {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON \n",__FUNCTION__);
- priv->eRFPowerState = eRfOn;
- priv->RfOffReason = 0;
- }
-}
-#endif
- // We can force firmware to do RF-R/W
- if(priv->ieee80211->FwRWRF)
- priv->Rf_Mode = RF_OP_By_FW;
- else
- priv->Rf_Mode = RF_OP_By_SW_3wire;
-
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- {
- dm_initialize_txpower_tracking(priv);
-
- if(priv->IC_Cut >= IC_VersionCut_D)
- {
- tmpRegA = rtl8192_QueryBBReg(priv, rOFDM0_XATxIQImbalance, bMaskDWord);
- tmpRegC = rtl8192_QueryBBReg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord);
- for(i = 0; i<TxBBGainTableLength; i++)
- {
- if(tmpRegA == priv->txbbgain_table[i].txbbgain_value)
- {
- priv->rfa_txpowertrackingindex= (u8)i;
- priv->rfa_txpowertrackingindex_real= (u8)i;
- priv->rfa_txpowertracking_default = priv->rfa_txpowertrackingindex;
- break;
- }
- }
-
- TempCCk = rtl8192_QueryBBReg(priv, rCCK0_TxFilter1, bMaskByte2);
-
- for(i=0 ; i<CCKTxBBGainTableLength ; i++)
- {
- if(TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0])
- {
- priv->CCKPresentAttentuation_20Mdefault =(u8) i;
- break;
- }
- }
- priv->CCKPresentAttentuation_40Mdefault = 0;
- priv->CCKPresentAttentuation_difference = 0;
- priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault;
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_initial = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real__initial = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference_initial = %d\n", priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_initial = %d\n", priv->CCKPresentAttentuation);
- priv->btxpower_tracking = FALSE;//TEMPLY DISABLE
- }
- }
-
- rtl8192_irq_enable(priv);
- priv->being_init_adapter = false;
- return rtStatus;
-
-}
-
-static void rtl8192_prepare_beacon(unsigned long arg)
-{
- struct r8192_priv *priv = (struct r8192_priv*) arg;
- struct sk_buff *skb;
- cb_desc *tcb_desc;
-
- skb = ieee80211_get_beacon(priv->ieee80211);
- tcb_desc = (cb_desc *)(skb->cb + 8);
- /* prepare misc info for the beacon xmit */
- tcb_desc->queue_index = BEACON_QUEUE;
- /* IBSS does not support HT yet, use 1M defaultly */
- tcb_desc->data_rate = 2;
- tcb_desc->RATRIndex = 7;
- tcb_desc->bTxDisableRateFallBack = 1;
- tcb_desc->bTxUseDriverAssingedRate = 1;
-
- skb_push(skb, priv->ieee80211->tx_headroom);
- if(skb){
- rtl8192_tx(priv, skb);
- }
-}
-
-
-/*
- * configure registers for beacon tx and enables it via
- * rtl8192_beacon_tx_enable(). rtl8192_beacon_tx_disable() might
- * be used to stop beacon transmission
- */
-static void rtl8192_start_beacon(struct ieee80211_device *ieee80211)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- struct ieee80211_network *net = &priv->ieee80211->current_network;
- u16 BcnTimeCfg = 0;
- u16 BcnCW = 6;
- u16 BcnIFS = 0xf;
-
- DMESG("Enabling beacon TX");
- rtl8192_irq_disable(priv);
- //rtl8192_beacon_tx_enable(dev);
-
- /* ATIM window */
- write_nic_word(priv, ATIMWND, 2);
-
- /* Beacon interval (in unit of TU) */
- write_nic_word(priv, BCN_INTERVAL, net->beacon_interval);
-
- /*
- * DrvErlyInt (in unit of TU).
- * (Time to send interrupt to notify driver to c
- * hange beacon content)
- * */
- write_nic_word(priv, BCN_DRV_EARLY_INT, 10);
-
- /*
- * BcnDMATIM(in unit of us).
- * Indicates the time before TBTT to perform beacon queue DMA
- * */
- write_nic_word(priv, BCN_DMATIME, 256);
-
- /*
- * Force beacon frame transmission even after receiving
- * beacon frame from other ad hoc STA
- * */
- write_nic_byte(priv, BCN_ERR_THRESH, 100);
-
- /* Set CW and IFS */
- BcnTimeCfg |= BcnCW<<BCN_TCFG_CW_SHIFT;
- BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
- write_nic_word(priv, BCN_TCFG, BcnTimeCfg);
-
-
- /* enable the interrupt for ad-hoc process */
- rtl8192_irq_enable(priv);
-}
-
-static bool HalRxCheckStuck8190Pci(struct r8192_priv *priv)
-{
- u16 RegRxCounter = read_nic_word(priv, 0x130);
- bool bStuck = FALSE;
-
- RT_TRACE(COMP_RESET,"%s(): RegRxCounter is %d,RxCounter is %d\n",__FUNCTION__,RegRxCounter,priv->RxCounter);
- // If rssi is small, we should check rx for long time because of bad rx.
- // or maybe it will continuous silent reset every 2 seconds.
- priv->rx_chk_cnt++;
- if(priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5))
- {
- priv->rx_chk_cnt = 0; /* high rssi, check rx stuck right now. */
- }
- else if(priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High+5) &&
- ((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_40M) ||
- (priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_20M)) )
-
- {
- if(priv->rx_chk_cnt < 2)
- {
- return bStuck;
- }
- else
- {
- priv->rx_chk_cnt = 0;
- }
- }
- else if(((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_40M) ||
- (priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_20M)) &&
- priv->undecorated_smoothed_pwdb >= VeryLowRSSI)
- {
- if(priv->rx_chk_cnt < 4)
- {
- return bStuck;
- }
- else
- {
- priv->rx_chk_cnt = 0;
- }
- }
- else
- {
- if(priv->rx_chk_cnt < 8)
- {
- return bStuck;
- }
- else
- {
- priv->rx_chk_cnt = 0;
- }
- }
- if(priv->RxCounter==RegRxCounter)
- bStuck = TRUE;
-
- priv->RxCounter = RegRxCounter;
-
- return bStuck;
-}
-
-static RESET_TYPE RxCheckStuck(struct r8192_priv *priv)
-{
-
- if(HalRxCheckStuck8190Pci(priv))
- {
- RT_TRACE(COMP_RESET, "RxStuck Condition\n");
- return RESET_TYPE_SILENT;
- }
-
- return RESET_TYPE_NORESET;
-}
-
-static RESET_TYPE rtl819x_check_reset(struct r8192_priv *priv)
-{
- RESET_TYPE RxResetType = RESET_TYPE_NORESET;
- RT_RF_POWER_STATE rfState;
-
- rfState = priv->eRFPowerState;
-
- if (rfState != eRfOff && (priv->ieee80211->iw_mode != IW_MODE_ADHOC)) {
- /*
- * If driver is in the status of firmware download failure,
- * driver skips RF initialization and RF is in turned off state.
- * Driver should check whether Rx stuck and do silent reset. And
- * if driver is in firmware download failure status, driver
- * should initialize RF in the following silent reset procedure
- *
- * Driver should not check RX stuck in IBSS mode because it is
- * required to set Check BSSID in order to send beacon, however,
- * if check BSSID is set, STA cannot hear any packet a all.
- */
- RxResetType = RxCheckStuck(priv);
- }
-
- RT_TRACE(COMP_RESET, "%s(): RxResetType is %d\n", __FUNCTION__, RxResetType);
-
- return RxResetType;
-}
-
-#ifdef ENABLE_IPS
-static void InactivePsWorkItemCallback(struct r8192_priv *priv)
-{
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
-
- RT_TRACE(COMP_POWER, "InactivePsWorkItemCallback() --------->\n");
- //
- // This flag "bSwRfProcessing", indicates the status of IPS procedure, should be set if the IPS workitem
- // is really scheduled.
- // The old code, sets this flag before scheduling the IPS workitem and however, at the same time the
- // previous IPS workitem did not end yet, fails to schedule the current workitem. Thus, bSwRfProcessing
- // blocks the IPS procedure of switching RF.
- // By Bruce, 2007-12-25.
- //
- pPSC->bSwRfProcessing = TRUE;
-
- RT_TRACE(COMP_RF, "InactivePsWorkItemCallback(): Set RF to %s.\n",
- pPSC->eInactivePowerState == eRfOff?"OFF":"ON");
-
-
- MgntActSet_RF_State(priv, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
-
- //
- // To solve CAM values miss in RF OFF, rewrite CAM values after RF ON. By Bruce, 2007-09-20.
- //
- pPSC->bSwRfProcessing = FALSE;
- RT_TRACE(COMP_POWER, "InactivePsWorkItemCallback() <---------\n");
-}
-
-#ifdef ENABLE_LPS
-/* Change current and default preamble mode. */
-bool MgntActSet_802_11_PowerSaveMode(struct r8192_priv *priv, u8 rtPsMode)
-{
-
- // Currently, we do not change power save mode on IBSS mode.
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
- return false;
- }
-
- //
- // <RJ_NOTE> If we make HW to fill up the PwrMgt bit for us,
- // some AP will not response to our mgnt frames with PwrMgt bit set,
- // e.g. cannot associate the AP.
- // So I commented out it. 2005.02.16, by rcnjko.
- //
-// // Change device's power save mode.
-// Adapter->HalFunc.SetPSModeHandler( Adapter, rtPsMode );
-
- // Update power save mode configured.
- //RT_TRACE(COMP_LPS,"%s(): set ieee->ps = %x\n",__FUNCTION__,rtPsMode);
- if(!priv->ps_force) {
- priv->ieee80211->ps = rtPsMode;
- }
-
- // Awake immediately
- if(priv->ieee80211->sta_sleep != 0 && rtPsMode == IEEE80211_PS_DISABLED)
- {
- // Notify the AP we awke.
- rtl8192_hw_wakeup(priv->ieee80211);
- priv->ieee80211->sta_sleep = 0;
-
- spin_lock(&priv->ieee80211->mgmt_tx_lock);
- printk("LPS leave: notify AP we are awaked ++++++++++ SendNullFunctionData\n");
- ieee80211_sta_ps_send_null_frame(priv->ieee80211, 0);
- spin_unlock(&priv->ieee80211->mgmt_tx_lock);
- }
-
- return true;
-}
-
-/* Enter the leisure power save mode. */
-void LeisurePSEnter(struct ieee80211_device *ieee80211)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
-
- if(!((priv->ieee80211->iw_mode == IW_MODE_INFRA) &&
- (priv->ieee80211->state == IEEE80211_LINKED)) ||
- (priv->ieee80211->iw_mode == IW_MODE_ADHOC) ||
- (priv->ieee80211->iw_mode == IW_MODE_MASTER))
- return;
-
- if (pPSC->bLeisurePs)
- {
- // Idle for a while if we connect to AP a while ago.
- if(pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) // 4 Sec
- {
-
- if(priv->ieee80211->ps == IEEE80211_PS_DISABLED)
- {
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
-
- }
- }
- else
- pPSC->LpsIdleCount++;
- }
-}
-
-
-/* Leave leisure power save mode. */
-void LeisurePSLeave(struct ieee80211_device *ieee80211)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
-
- if (pPSC->bLeisurePs)
- {
- if(priv->ieee80211->ps != IEEE80211_PS_DISABLED)
- {
- // move to lps_wakecomplete()
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_DISABLED);
-
- }
- }
-}
-#endif
-
-
-/* Enter the inactive power save mode. RF will be off */
-void IPSEnter(struct r8192_priv *priv)
-{
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
- RT_RF_POWER_STATE rtState;
-
- if (pPSC->bInactivePs)
- {
- rtState = priv->eRFPowerState;
- //
- // Added by Bruce, 2007-12-25.
- // Do not enter IPS in the following conditions:
- // (1) RF is already OFF or Sleep
- // (2) bSwRfProcessing (indicates the IPS is still under going)
- // (3) Connectted (only disconnected can trigger IPS)
- // (4) IBSS (send Beacon)
- // (5) AP mode (send Beacon)
- //
- if (rtState == eRfOn && !pPSC->bSwRfProcessing
- && (priv->ieee80211->state != IEEE80211_LINKED) )
- {
- RT_TRACE(COMP_RF,"IPSEnter(): Turn off RF.\n");
- pPSC->eInactivePowerState = eRfOff;
-// queue_work(priv->priv_wq,&(pPSC->InactivePsWorkItem));
- InactivePsWorkItemCallback(priv);
- }
- }
-}
-
-//
-// Description:
-// Leave the inactive power save mode, RF will be on.
-// 2007.08.17, by shien chang.
-//
-void IPSLeave(struct r8192_priv *priv)
-{
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
- RT_RF_POWER_STATE rtState;
-
- if (pPSC->bInactivePs)
- {
- rtState = priv->eRFPowerState;
- if (rtState != eRfOn && !pPSC->bSwRfProcessing && priv->RfOffReason <= RF_CHANGE_BY_IPS)
- {
- RT_TRACE(COMP_POWER, "IPSLeave(): Turn on RF.\n");
- pPSC->eInactivePowerState = eRfOn;
- InactivePsWorkItemCallback(priv);
- }
- }
-}
-
-void IPSLeave_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, ips_leave_wq);
- struct net_device *dev = ieee->dev;
-
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
-}
-
-void ieee80211_ips_leave_wq(struct ieee80211_device *ieee80211)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- RT_RF_POWER_STATE rtState;
- rtState = priv->eRFPowerState;
-
- if (priv->PowerSaveControl.bInactivePs){
- if(rtState == eRfOff){
- if(priv->RfOffReason > RF_CHANGE_BY_IPS)
- {
- RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
- return;
- }
- else{
- printk("=========>%s(): IPSLeave\n",__FUNCTION__);
- queue_work(priv->ieee80211->wq,&priv->ieee80211->ips_leave_wq);
- }
- }
- }
-}
-//added by amy 090331 end
-void ieee80211_ips_leave(struct ieee80211_device *ieee80211)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
- down(&ieee80211->ips_sem);
- IPSLeave(priv);
- up(&ieee80211->ips_sem);
-}
-#endif
-
-static void rtl819x_update_rxcounts(
- struct r8192_priv *priv,
- u32* TotalRxBcnNum,
- u32* TotalRxDataNum
-)
-{
- u16 SlotIndex;
- u8 i;
-
- *TotalRxBcnNum = 0;
- *TotalRxDataNum = 0;
-
- SlotIndex = (priv->ieee80211->LinkDetectInfo.SlotIndex++)%(priv->ieee80211->LinkDetectInfo.SlotNum);
- priv->ieee80211->LinkDetectInfo.RxBcnNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod;
- priv->ieee80211->LinkDetectInfo.RxDataNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod;
- for( i=0; i<priv->ieee80211->LinkDetectInfo.SlotNum; i++ ){
- *TotalRxBcnNum += priv->ieee80211->LinkDetectInfo.RxBcnNum[i];
- *TotalRxDataNum += priv->ieee80211->LinkDetectInfo.RxDataNum[i];
- }
-}
-
-
-static void rtl819x_watchdog_wqcallback(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,watch_dog_wq);
- struct ieee80211_device* ieee = priv->ieee80211;
- RESET_TYPE ResetType = RESET_TYPE_NORESET;
- bool bBusyTraffic = false;
- bool bEnterPS = false;
-
- if ((!priv->up) || priv->bHwRadioOff)
- return;
-
- if(!priv->up)
- return;
- hal_dm_watchdog(priv);
-#ifdef ENABLE_IPS
- if(ieee->actscanning == false){
- if((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state == IEEE80211_NOLINK) &&
- (priv->eRFPowerState == eRfOn) && !ieee->is_set_key &&
- (!ieee->proto_stoppping) && !ieee->wx_set_enc){
- if (priv->PowerSaveControl.ReturnPoint == IPS_CALLBACK_NONE){
- IPSEnter(priv);
- }
- }
- }
-#endif
- {//to get busy traffic condition
- if(ieee->state == IEEE80211_LINKED)
- {
- if( ieee->LinkDetectInfo.NumRxOkInPeriod> 100 ||
- ieee->LinkDetectInfo.NumTxOkInPeriod> 100 ) {
- bBusyTraffic = true;
- }
-
-#ifdef ENABLE_LPS
- //added by amy for Leisure PS
- if( ((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + ieee->LinkDetectInfo.NumTxOkInPeriod) > 8 ) ||
- (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2) )
- {
- bEnterPS= false;
- }
- else
- {
- bEnterPS= true;
- }
-
- // LeisurePS only work in infra mode.
- if(bEnterPS)
- {
- LeisurePSEnter(priv->ieee80211);
- }
- else
- {
- LeisurePSLeave(priv->ieee80211);
- }
-#endif
-
- }
- else
- {
-#ifdef ENABLE_LPS
- LeisurePSLeave(priv->ieee80211);
-#endif
- }
-
- ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
- ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
- ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
- ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
- }
-
-
- //added by amy for AP roaming
- if(ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_INFRA)
- {
- u32 TotalRxBcnNum = 0;
- u32 TotalRxDataNum = 0;
-
- rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
- if((TotalRxBcnNum+TotalRxDataNum) == 0)
- {
- if (priv->eRFPowerState == eRfOff)
- RT_TRACE(COMP_ERR,"========>%s()\n",__FUNCTION__);
- printk("===>%s(): AP is power off,connect another one\n",__FUNCTION__);
- // Dot11d_Reset(dev);
- ieee->state = IEEE80211_ASSOCIATING;
- notify_wx_assoc_event(priv->ieee80211);
- RemovePeerTS(priv->ieee80211,priv->ieee80211->current_network.bssid);
- ieee->is_roaming = true;
- ieee->is_set_key = false;
- ieee->link_change(ieee);
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
- }
- }
- ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
- ieee->LinkDetectInfo.NumRecvDataInPeriod=0;
-
- //check if reset the driver
- if (priv->watchdog_check_reset_cnt++ >= 3 && !ieee->is_roaming &&
- priv->watchdog_last_time != 1)
- {
- ResetType = rtl819x_check_reset(priv);
- priv->watchdog_check_reset_cnt = 3;
- }
- if(!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL)
- {
- priv->ResetProgress = RESET_TYPE_NORMAL;
- RT_TRACE(COMP_RESET,"%s(): NOMAL RESET\n",__FUNCTION__);
- return;
- }
- /* disable silent reset temply 2008.9.11*/
-
- if( ((priv->force_reset) || (!priv->bDisableNormalResetCheck && ResetType==RESET_TYPE_SILENT))) // This is control by OID set in Pomelo
- {
- priv->watchdog_last_time = 1;
- }
- else
- priv->watchdog_last_time = 0;
-
- priv->force_reset = false;
- priv->bForcedSilentReset = false;
- priv->bResetInProgress = false;
- RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
-
-}
-
-void watch_dog_timer_callback(unsigned long data)
-{
- struct r8192_priv *priv = (struct r8192_priv *) data;
- queue_delayed_work(priv->priv_wq,&priv->watch_dog_wq,0);
- mod_timer(&priv->watch_dog_timer, jiffies + MSECS(IEEE80211_WATCH_DOG_TIME));
-
-}
-
-static int _rtl8192_up(struct r8192_priv *priv)
-{
- RT_STATUS init_status = RT_STATUS_SUCCESS;
- struct net_device *dev = priv->ieee80211->dev;
-
- priv->up=1;
- priv->ieee80211->ieee_up=1;
- priv->bdisable_nic = false; //YJ,add,091111
- RT_TRACE(COMP_INIT, "Bringing up iface\n");
-
- init_status = rtl8192_adapter_start(priv);
- if(init_status != RT_STATUS_SUCCESS)
- {
- RT_TRACE(COMP_ERR,"ERR!!! %s(): initialization is failed!\n",__FUNCTION__);
- return -1;
- }
- RT_TRACE(COMP_INIT, "start adapter finished\n");
-
- if (priv->eRFPowerState != eRfOn)
- MgntActSet_RF_State(priv, eRfOn, priv->RfOffReason);
-
- if(priv->ieee80211->state != IEEE80211_LINKED)
- ieee80211_softmac_start_protocol(priv->ieee80211);
- ieee80211_reset_queue(priv->ieee80211);
- watch_dog_timer_callback((unsigned long) priv);
- if(!netif_queue_stopped(dev))
- netif_start_queue(dev);
- else
- netif_wake_queue(dev);
-
- return 0;
-}
-
-
-static int rtl8192_open(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- down(&priv->wx_sem);
- ret = rtl8192_up(dev);
- up(&priv->wx_sem);
- return ret;
-
-}
-
-
-int rtl8192_up(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->up == 1) return -1;
-
- return _rtl8192_up(priv);
-}
-
-
-static int rtl8192_close(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- down(&priv->wx_sem);
-
- ret = rtl8192_down(dev);
-
- up(&priv->wx_sem);
-
- return ret;
-
-}
-
-int rtl8192_down(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->up == 0) return -1;
-
-#ifdef ENABLE_LPS
- //LZM for PS-Poll AID issue. 090429
- if(priv->ieee80211->state == IEEE80211_LINKED)
- LeisurePSLeave(priv->ieee80211);
-#endif
-
- priv->up=0;
- priv->ieee80211->ieee_up = 0;
- RT_TRACE(COMP_DOWN, "==========>%s()\n", __FUNCTION__);
-/* FIXME */
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
-
- rtl8192_irq_disable(priv);
- rtl8192_cancel_deferred_work(priv);
- deinit_hal_dm(priv);
- del_timer_sync(&priv->watch_dog_timer);
-
- ieee80211_softmac_stop_protocol(priv->ieee80211,true);
-
- rtl8192_halt_adapter(priv, false);
- memset(&priv->ieee80211->current_network, 0 , offsetof(struct ieee80211_network, list));
-
- RT_TRACE(COMP_DOWN, "<==========%s()\n", __FUNCTION__);
-
- return 0;
-}
-
-
-void rtl8192_commit(struct r8192_priv *priv)
-{
- if (priv->up == 0) return ;
-
-
- ieee80211_softmac_stop_protocol(priv->ieee80211,true);
-
- rtl8192_irq_disable(priv);
- rtl8192_halt_adapter(priv, true);
- _rtl8192_up(priv);
-}
-
-static void rtl8192_restart(struct work_struct *work)
-{
- struct r8192_priv *priv = container_of(work, struct r8192_priv, reset_wq);
-
- down(&priv->wx_sem);
-
- rtl8192_commit(priv);
-
- up(&priv->wx_sem);
-}
-
-static void r8192_set_multicast(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
-}
-
-
-static int r8192_set_mac_adr(struct net_device *dev, void *mac)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sockaddr *addr = mac;
-
- down(&priv->wx_sem);
-
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-
- schedule_work(&priv->reset_wq);
- up(&priv->wx_sem);
-
- return 0;
-}
-
-static void r8192e_set_hw_key(struct r8192_priv *priv, struct ieee_param *ipw)
-{
- struct ieee80211_device *ieee = priv->ieee80211;
- u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
- u32 key[4];
-
- if (ipw->u.crypt.set_tx) {
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
- ieee->pairwise_key_type = KEY_TYPE_CCMP;
- else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
- ieee->pairwise_key_type = KEY_TYPE_TKIP;
- else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
- if (ipw->u.crypt.key_len == 13)
- ieee->pairwise_key_type = KEY_TYPE_WEP104;
- else if (ipw->u.crypt.key_len == 5)
- ieee->pairwise_key_type = KEY_TYPE_WEP40;
- } else
- ieee->pairwise_key_type = KEY_TYPE_NA;
-
- if (ieee->pairwise_key_type) {
- memcpy(key, ipw->u.crypt.key, 16);
- EnableHWSecurityConfig8192(priv);
- /*
- * We fill both index entry and 4th entry for pairwise
- * key as in IPW interface, adhoc will only get here,
- * so we need index entry for its default key serching!
- */
- setKey(priv, 4, ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8*)ieee->ap_mac_addr, 0, key);
-
- /* LEAP WEP will never set this. */
- if (ieee->auth_mode != 2)
- setKey(priv, ipw->u.crypt.idx, ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8*)ieee->ap_mac_addr, 0, key);
- }
- if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
- ieee->pHTInfo->bCurrentHTSupport) {
- write_nic_byte(priv, 0x173, 1); /* fix aes bug */
- }
- } else {
- memcpy(key, ipw->u.crypt.key, 16);
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
- ieee->group_key_type= KEY_TYPE_CCMP;
- else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
- ieee->group_key_type = KEY_TYPE_TKIP;
- else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
- if (ipw->u.crypt.key_len == 13)
- ieee->group_key_type = KEY_TYPE_WEP104;
- else if (ipw->u.crypt.key_len == 5)
- ieee->group_key_type = KEY_TYPE_WEP40;
- } else
- ieee->group_key_type = KEY_TYPE_NA;
-
- if (ieee->group_key_type) {
- setKey(priv, ipw->u.crypt.idx, ipw->u.crypt.idx,
- ieee->group_key_type, broadcast_addr, 0, key);
- }
- }
-}
-
-/* based on ipw2200 driver */
-static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct iwreq *wrq = (struct iwreq *)rq;
- int ret=-1;
- struct iw_point *p = &wrq->u.data;
- struct ieee_param *ipw = NULL;//(struct ieee_param *)wrq->u.data.pointer;
-
- down(&priv->wx_sem);
-
-
- if (p->length < sizeof(struct ieee_param) || !p->pointer){
- ret = -EINVAL;
- goto out;
- }
-
- ipw = kmalloc(p->length, GFP_KERNEL);
- if (ipw == NULL){
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(ipw, p->pointer, p->length)) {
- kfree(ipw);
- ret = -EFAULT;
- goto out;
- }
-
- switch (cmd) {
- case RTL_IOCTL_WPA_SUPPLICANT:
- /* parse here for HW security */
- if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION)
- r8192e_set_hw_key(priv, ipw);
- ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- kfree(ipw);
-out:
- up(&priv->wx_sem);
-
- return ret;
-}
-
-static u8 HwRateToMRate90(bool bIsHT, u8 rate)
-{
- u8 ret_rate = 0x02;
-
- if(!bIsHT) {
- switch(rate) {
- case DESC90_RATE1M: ret_rate = MGN_1M; break;
- case DESC90_RATE2M: ret_rate = MGN_2M; break;
- case DESC90_RATE5_5M: ret_rate = MGN_5_5M; break;
- case DESC90_RATE11M: ret_rate = MGN_11M; break;
- case DESC90_RATE6M: ret_rate = MGN_6M; break;
- case DESC90_RATE9M: ret_rate = MGN_9M; break;
- case DESC90_RATE12M: ret_rate = MGN_12M; break;
- case DESC90_RATE18M: ret_rate = MGN_18M; break;
- case DESC90_RATE24M: ret_rate = MGN_24M; break;
- case DESC90_RATE36M: ret_rate = MGN_36M; break;
- case DESC90_RATE48M: ret_rate = MGN_48M; break;
- case DESC90_RATE54M: ret_rate = MGN_54M; break;
-
- default:
- RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
- break;
- }
-
- } else {
- switch(rate) {
- case DESC90_RATEMCS0: ret_rate = MGN_MCS0; break;
- case DESC90_RATEMCS1: ret_rate = MGN_MCS1; break;
- case DESC90_RATEMCS2: ret_rate = MGN_MCS2; break;
- case DESC90_RATEMCS3: ret_rate = MGN_MCS3; break;
- case DESC90_RATEMCS4: ret_rate = MGN_MCS4; break;
- case DESC90_RATEMCS5: ret_rate = MGN_MCS5; break;
- case DESC90_RATEMCS6: ret_rate = MGN_MCS6; break;
- case DESC90_RATEMCS7: ret_rate = MGN_MCS7; break;
- case DESC90_RATEMCS8: ret_rate = MGN_MCS8; break;
- case DESC90_RATEMCS9: ret_rate = MGN_MCS9; break;
- case DESC90_RATEMCS10: ret_rate = MGN_MCS10; break;
- case DESC90_RATEMCS11: ret_rate = MGN_MCS11; break;
- case DESC90_RATEMCS12: ret_rate = MGN_MCS12; break;
- case DESC90_RATEMCS13: ret_rate = MGN_MCS13; break;
- case DESC90_RATEMCS14: ret_rate = MGN_MCS14; break;
- case DESC90_RATEMCS15: ret_rate = MGN_MCS15; break;
- case DESC90_RATEMCS32: ret_rate = (0x80|0x20); break;
-
- default:
- RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",rate, bIsHT);
- break;
- }
- }
-
- return ret_rate;
-}
-
-/* Record the TSF time stamp when receiving a packet */
-static void UpdateRxPktTimeStamp8190(struct r8192_priv *priv, struct ieee80211_rx_stats *stats)
-{
-
- if(stats->bIsAMPDU && !stats->bFirstMPDU) {
- stats->mac_time[0] = priv->LastRxDescTSFLow;
- stats->mac_time[1] = priv->LastRxDescTSFHigh;
- } else {
- priv->LastRxDescTSFLow = stats->mac_time[0];
- priv->LastRxDescTSFHigh = stats->mac_time[1];
- }
-}
-
-static long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
-{
- long signal_power; // in dBm.
-
- // Translate to dBm (x=0.5y-95).
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
-
- return signal_power;
-}
-
-/* 2008/01/22 MH We can not delcare RSSI/EVM total value of sliding window to
- be a local static. Otherwise, it may increase when we return from S3/S4. The
- value will be kept in memory or disk. We must delcare the value in adapter
- and it will be reinitialized when return from S3/S4. */
-static void rtl8192_process_phyinfo(struct r8192_priv * priv, u8* buffer,struct ieee80211_rx_stats * pprevious_stats, struct ieee80211_rx_stats * pcurrent_stats)
-{
- bool bcheck = false;
- u8 rfpath;
- u32 nspatial_stream, tmp_val;
- static u32 slide_rssi_index=0, slide_rssi_statistics=0;
- static u32 slide_evm_index=0, slide_evm_statistics=0;
- static u32 last_rssi=0, last_evm=0;
- //cosa add for beacon rssi smoothing
- static u32 slide_beacon_adc_pwdb_index=0, slide_beacon_adc_pwdb_statistics=0;
- static u32 last_beacon_adc_pwdb=0;
-
- struct ieee80211_hdr_3addr *hdr;
- u16 sc ;
- unsigned int frag,seq;
- hdr = (struct ieee80211_hdr_3addr *)buffer;
- sc = le16_to_cpu(hdr->seq_ctl);
- frag = WLAN_GET_SEQ_FRAG(sc);
- seq = WLAN_GET_SEQ_SEQ(sc);
-
- //
- // Check whether we should take the previous packet into accounting
- //
- if(!pprevious_stats->bIsAMPDU)
- {
- // if previous packet is not aggregated packet
- bcheck = true;
- }
-
- if(slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX)
- {
- slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
- last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
- priv->stats.slide_rssi_total -= last_rssi;
- }
- priv->stats.slide_rssi_total += pprevious_stats->SignalStrength;
-
- priv->stats.slide_signal_strength[slide_rssi_index++] = pprevious_stats->SignalStrength;
- if(slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
- slide_rssi_index = 0;
-
- // <1> Showed on UI for user, in dbm
- tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
- priv->stats.signal_strength = rtl819x_translate_todbm((u8)tmp_val);
- pcurrent_stats->rssi = priv->stats.signal_strength;
- //
- // If the previous packet does not match the criteria, neglect it
- //
- if(!pprevious_stats->bPacketMatchBSSID)
- {
- if(!pprevious_stats->bToSelfBA)
- return;
- }
-
- if(!bcheck)
- return;
-
- // <2> Showed on UI for engineering
- // hardware does not provide rssi information for each rf path in CCK
- if(!pprevious_stats->bIsCCK && pprevious_stats->bPacketToSelf)
- {
- for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++)
- {
- if (!rtl8192_phy_CheckIsLegalRFPath(priv, rfpath))
- continue;
- RT_TRACE(COMP_DBG, "pPreviousstats->RxMIMOSignalStrength[rfpath] = %d\n", pprevious_stats->RxMIMOSignalStrength[rfpath]);
- //Fixed by Jacken 2008-03-20
- if(priv->stats.rx_rssi_percentage[rfpath] == 0)
- {
- priv->stats.rx_rssi_percentage[rfpath] = pprevious_stats->RxMIMOSignalStrength[rfpath];
- }
- if(pprevious_stats->RxMIMOSignalStrength[rfpath] > priv->stats.rx_rssi_percentage[rfpath])
- {
- priv->stats.rx_rssi_percentage[rfpath] =
- ( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
- priv->stats.rx_rssi_percentage[rfpath] = priv->stats.rx_rssi_percentage[rfpath] + 1;
- }
- else
- {
- priv->stats.rx_rssi_percentage[rfpath] =
- ( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
- }
- RT_TRACE(COMP_DBG, "priv->RxStats.RxRSSIPercentage[rfPath] = %d \n" , priv->stats.rx_rssi_percentage[rfpath]);
- }
- }
-
-
- //
- // Check PWDB.
- //
- //cosa add for beacon rssi smoothing by average.
- if(pprevious_stats->bPacketBeacon)
- {
- /* record the beacon pwdb to the sliding window. */
- if(slide_beacon_adc_pwdb_statistics++ >= PHY_Beacon_RSSI_SLID_WIN_MAX)
- {
- slide_beacon_adc_pwdb_statistics = PHY_Beacon_RSSI_SLID_WIN_MAX;
- last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index];
- priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
- // slide_beacon_adc_pwdb_index, last_beacon_adc_pwdb, Adapter->RxStats.Slide_Beacon_Total);
- }
- priv->stats.Slide_Beacon_Total += pprevious_stats->RxPWDBAll;
- priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] = pprevious_stats->RxPWDBAll;
- slide_beacon_adc_pwdb_index++;
- if(slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
- slide_beacon_adc_pwdb_index = 0;
- pprevious_stats->RxPWDBAll = priv->stats.Slide_Beacon_Total/slide_beacon_adc_pwdb_statistics;
- if(pprevious_stats->RxPWDBAll >= 3)
- pprevious_stats->RxPWDBAll -= 3;
- }
-
- RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- pprevious_stats->bIsCCK? "CCK": "OFDM",
- pprevious_stats->RxPWDBAll);
-
- if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
- {
- if(priv->undecorated_smoothed_pwdb < 0) // initialize
- {
- priv->undecorated_smoothed_pwdb = pprevious_stats->RxPWDBAll;
- }
-
- if(pprevious_stats->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb)
- {
- priv->undecorated_smoothed_pwdb =
- ( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
- priv->undecorated_smoothed_pwdb = priv->undecorated_smoothed_pwdb + 1;
- }
- else
- {
- priv->undecorated_smoothed_pwdb =
- ( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
- }
- }
-
- //
- // Check EVM
- //
- /* record the general EVM to the sliding window. */
- if(pprevious_stats->SignalQuality == 0)
- {
- }
- else
- {
- if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA){
- if(slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX){
- slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
- last_evm = priv->stats.slide_evm[slide_evm_index];
- priv->stats.slide_evm_total -= last_evm;
- }
-
- priv->stats.slide_evm_total += pprevious_stats->SignalQuality;
-
- priv->stats.slide_evm[slide_evm_index++] = pprevious_stats->SignalQuality;
- if(slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
- slide_evm_index = 0;
-
- // <1> Showed on UI for user, in percentage.
- tmp_val = priv->stats.slide_evm_total/slide_evm_statistics;
- //cosa add 10/11/2007, Showed on UI for user in Windows Vista, for Link quality.
- }
-
- // <2> Showed on UI for engineering
- if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
- {
- for(nspatial_stream = 0; nspatial_stream<2 ; nspatial_stream++) // 2 spatial stream
- {
- if(pprevious_stats->RxMIMOSignalQuality[nspatial_stream] != -1)
- {
- if(priv->stats.rx_evm_percentage[nspatial_stream] == 0) // initialize
- {
- priv->stats.rx_evm_percentage[nspatial_stream] = pprevious_stats->RxMIMOSignalQuality[nspatial_stream];
- }
- priv->stats.rx_evm_percentage[nspatial_stream] =
- ( (priv->stats.rx_evm_percentage[nspatial_stream]* (Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxMIMOSignalQuality[nspatial_stream]* 1)) / (Rx_Smooth_Factor);
- }
- }
- }
- }
-
-}
-
-static u8 rtl819x_query_rxpwrpercentage(
- char antpower
- )
-{
- if ((antpower <= -100) || (antpower >= 20))
- {
- return 0;
- }
- else if (antpower >= 0)
- {
- return 100;
- }
- else
- {
- return (100+antpower);
- }
-
-}
-
-static u8
-rtl819x_evm_dbtopercentage(
- char value
- )
-{
- char ret_val;
-
- ret_val = value;
-
- if(ret_val >= 0)
- ret_val = 0;
- if(ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val*=3;
- if(ret_val == 99)
- ret_val = 100;
- return ret_val;
-}
-
-/* We want good-looking for signal strength/quality */
-static long rtl819x_signal_scale_mapping(long currsig)
-{
- long retsig;
-
- // Step 1. Scale mapping.
- if(currsig >= 61 && currsig <= 100)
- {
- retsig = 90 + ((currsig - 60) / 4);
- }
- else if(currsig >= 41 && currsig <= 60)
- {
- retsig = 78 + ((currsig - 40) / 2);
- }
- else if(currsig >= 31 && currsig <= 40)
- {
- retsig = 66 + (currsig - 30);
- }
- else if(currsig >= 21 && currsig <= 30)
- {
- retsig = 54 + (currsig - 20);
- }
- else if(currsig >= 5 && currsig <= 20)
- {
- retsig = 42 + (((currsig - 5) * 2) / 3);
- }
- else if(currsig == 4)
- {
- retsig = 36;
- }
- else if(currsig == 3)
- {
- retsig = 27;
- }
- else if(currsig == 2)
- {
- retsig = 18;
- }
- else if(currsig == 1)
- {
- retsig = 9;
- }
- else
- {
- retsig = currsig;
- }
-
- return retsig;
-}
-
-static void rtl8192_query_rxphystatus(
- struct r8192_priv * priv,
- struct ieee80211_rx_stats * pstats,
- prx_desc_819x_pci pdesc,
- prx_fwinfo_819x_pci pdrvinfo,
- struct ieee80211_rx_stats * precord_stats,
- bool bpacket_match_bssid,
- bool bpacket_toself,
- bool bPacketBeacon,
- bool bToSelfBA
- )
-{
- //PRT_RFD_STATUS pRtRfdStatus = &(pRfd->Status);
- phy_sts_ofdm_819xpci_t* pofdm_buf;
- phy_sts_cck_819xpci_t * pcck_buf;
- phy_ofdm_rx_status_rxsc_sgien_exintfflag* prxsc;
- u8 *prxpkt;
- u8 i,max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
- char rx_pwr[4], rx_pwr_all=0;
- //long rx_avg_pwr = 0;
- char rx_snrX, rx_evmX;
- u8 evm, pwdb_all;
- u32 RSSI, total_rssi=0;//, total_evm=0;
-// long signal_strength_index = 0;
- u8 is_cck_rate=0;
- u8 rf_rx_num = 0;
-
- is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
-
- // Record it for next packet processing
- memset(precord_stats, 0, sizeof(struct ieee80211_rx_stats));
- pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID = bpacket_match_bssid;
- pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
- pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;//RX_HAL_IS_CCK_RATE(pDrvInfo);
- pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
- pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
- /*2007.08.30 requested by SD3 Jerry */
- if (priv->phy_check_reg824 == 0)
- {
- priv->phy_reg824_bit9 = rtl8192_QueryBBReg(priv, rFPGA0_XA_HSSIParameter2, 0x200);
- priv->phy_check_reg824 = 1;
- }
-
-
- prxpkt = (u8*)pdrvinfo;
-
- /* Move pointer to the 16th bytes. Phy status start address. */
- prxpkt += sizeof(rx_fwinfo_819x_pci);
-
- /* Initial the cck and ofdm buffer pointer */
- pcck_buf = (phy_sts_cck_819xpci_t *)prxpkt;
- pofdm_buf = (phy_sts_ofdm_819xpci_t *)prxpkt;
-
- pstats->RxMIMOSignalQuality[0] = -1;
- pstats->RxMIMOSignalQuality[1] = -1;
- precord_stats->RxMIMOSignalQuality[0] = -1;
- precord_stats->RxMIMOSignalQuality[1] = -1;
-
- if(is_cck_rate)
- {
- //
- // (1)Hardware does not provide RSSI for CCK
- //
-
- //
- // (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
- //
- u8 report;//, cck_agc_rpt;
-
- if (!priv->phy_reg824_bit9)
- {
- report = pcck_buf->cck_agc_rpt & 0xc0;
- report = report>>6;
- switch(report)
- {
- //Fixed by Jacken from Bryant 2008-03-20
- //Original value is -38 , -26 , -14 , -2
- //Fixed value is -35 , -23 , -11 , 6
- case 0x3:
- rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 8 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- }
- }
- else
- {
- report = pcck_buf->cck_agc_rpt & 0x60;
- report = report>>5;
- switch(report)
- {
- case 0x3:
- rx_pwr_all = -35 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
- break;
- case 0x2:
- rx_pwr_all = -23 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x1:
- rx_pwr_all = -11 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
- break;
- case 0x0:
- rx_pwr_all = -8 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
- break;
- }
- }
-
- pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
- pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
- pstats->RecvSignalPower = rx_pwr_all;
-
- //
- // (3) Get Signal Quality (EVM)
- //
- if(bpacket_match_bssid)
- {
- u8 sq;
-
- if(pstats->RxPWDBAll > 40)
- {
- sq = 100;
- }else
- {
- sq = pcck_buf->sq_rpt;
-
- if(pcck_buf->sq_rpt > 64)
- sq = 0;
- else if (pcck_buf->sq_rpt < 20)
- sq = 100;
- else
- sq = ((64-sq) * 100) / 44;
- }
- pstats->SignalQuality = precord_stats->SignalQuality = sq;
- pstats->RxMIMOSignalQuality[0] = precord_stats->RxMIMOSignalQuality[0] = sq;
- pstats->RxMIMOSignalQuality[1] = precord_stats->RxMIMOSignalQuality[1] = -1;
- }
- }
- else
- {
- //
- // (1)Get RSSI for HT rate
- //
- for(i=RF90_PATH_A; i<RF90_PATH_MAX; i++)
- {
- // 2008/01/30 MH we will judge RF RX path now.
- if (priv->brfpath_rxenable[i])
- rf_rx_num++;
- //else
- //continue;
-
- //Fixed by Jacken from Bryant 2008-03-20
- //Original value is 106
- rx_pwr[i] = ((pofdm_buf->trsw_gain_X[i]&0x3F)*2) - 110;
-
- //Get Rx snr value in DB
- tmp_rxsnr = pofdm_buf->rxsnr_X[i];
- rx_snrX = (char)(tmp_rxsnr);
- rx_snrX /= 2;
-
- /* Translate DBM to percentage. */
- RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
- if (priv->brfpath_rxenable[i])
- total_rssi += RSSI;
-
- /* Record Signal Strength for next packet */
- if(bpacket_match_bssid)
- {
- pstats->RxMIMOSignalStrength[i] =(u8) RSSI;
- precord_stats->RxMIMOSignalStrength[i] =(u8) RSSI;
- }
- }
-
-
- //
- // (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
- //
- //Fixed by Jacken from Bryant 2008-03-20
- //Original value is 106
- rx_pwr_all = (((pofdm_buf->pwdb_all ) >> 1 )& 0x7f) -106;
- pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
-
- pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
- pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
- pstats->RecvSignalPower = rx_pwr_all;
- //
- // (3)EVM of HT rate
- //
- if(pdrvinfo->RxHT && pdrvinfo->RxRate>=DESC90_RATEMCS8 &&
- pdrvinfo->RxRate<=DESC90_RATEMCS15)
- max_spatial_stream = 2; //both spatial stream make sense
- else
- max_spatial_stream = 1; //only spatial stream 1 makes sense
-
- for(i=0; i<max_spatial_stream; i++)
- {
- tmp_rxevm = pofdm_buf->rxevm_X[i];
- rx_evmX = (char)(tmp_rxevm);
-
- // Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment
- // fill most significant bit to "zero" when doing shifting operation which may change a negative
- // value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore.
- rx_evmX /= 2; //dbm
-
- evm = rtl819x_evm_dbtopercentage(rx_evmX);
- if(bpacket_match_bssid)
- {
- if(i==0) // Fill value in RFD, Get the first spatial stream only
- pstats->SignalQuality = precord_stats->SignalQuality = (u8)(evm & 0xff);
- pstats->RxMIMOSignalQuality[i] = precord_stats->RxMIMOSignalQuality[i] = (u8)(evm & 0xff);
- }
- }
-
-
- /* record rx statistics for debug */
- rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
- prxsc = (phy_ofdm_rx_status_rxsc_sgien_exintfflag *)&rxsc_sgien_exflg;
- }
-
- //UI BSS List signal strength(in percentage), make it good looking, from 0~100.
- //It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp().
- if(is_cck_rate)
- {
- pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)pwdb_all));//PWDB_ALL;
-
- }
- else
- {
- //pRfd->Status.SignalStrength = pRecordRfd->Status.SignalStrength = (u1Byte)(SignalScaleMapping(total_rssi/=RF90_PATH_MAX));//(u1Byte)(total_rssi/=RF90_PATH_MAX);
- // We can judge RX path number now.
- if (rf_rx_num != 0)
- pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)(total_rssi/=rf_rx_num)));
- }
-}
-
-static void
-rtl8192_record_rxdesc_forlateruse(
- struct ieee80211_rx_stats * psrc_stats,
- struct ieee80211_rx_stats * ptarget_stats
-)
-{
- ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
- ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
-}
-
-
-
-static void TranslateRxSignalStuff819xpci(struct r8192_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_rx_stats * pstats,
- prx_desc_819x_pci pdesc,
- prx_fwinfo_819x_pci pdrvinfo)
-{
- // TODO: We must only check packet for current MAC address. Not finish
- bool bpacket_match_bssid, bpacket_toself;
- bool bPacketBeacon=false, bToSelfBA=false;
- struct ieee80211_hdr_3addr *hdr;
- u16 fc,type;
-
- // Get Signal Quality for only RX data queue (but not command queue)
-
- u8* tmp_buf;
- u8 *praddr;
-
- /* Get MAC frame start address. */
- tmp_buf = skb->data;
-
- hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
- fc = le16_to_cpu(hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
- praddr = hdr->addr1;
-
- /* Check if the received packet is acceptabe. */
- bpacket_match_bssid = ((IEEE80211_FTYPE_CTL != type) &&
- (!compare_ether_addr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS)? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS )? hdr->addr2 : hdr->addr3))
- && (!pstats->bHwError) && (!pstats->bCRC)&& (!pstats->bICV));
- bpacket_toself = bpacket_match_bssid & (!compare_ether_addr(praddr, priv->ieee80211->dev->dev_addr));
-
- if(WLAN_FC_GET_FRAMETYPE(fc)== IEEE80211_STYPE_BEACON)
- {
- bPacketBeacon = true;
- }
- if(WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BLOCKACK)
- {
- if (!compare_ether_addr(praddr, priv->ieee80211->dev->dev_addr))
- bToSelfBA = true;
- }
-
- //
- // Process PHY information for previous packet (RSSI/PWDB/EVM)
- //
- // Because phy information is contained in the last packet of AMPDU only, so driver
- // should process phy information of previous packet
- rtl8192_process_phyinfo(priv, tmp_buf, &priv->previous_stats, pstats);
- rtl8192_query_rxphystatus(priv, pstats, pdesc, pdrvinfo, &priv->previous_stats, bpacket_match_bssid,
- bpacket_toself ,bPacketBeacon, bToSelfBA);
- rtl8192_record_rxdesc_forlateruse(pstats, &priv->previous_stats);
-
-}
-
-
-static void rtl8192_tx_resume(struct r8192_priv *priv)
-{
- struct ieee80211_device *ieee = priv->ieee80211;
- struct sk_buff *skb;
- int i;
-
- for (i = BK_QUEUE; i < TXCMD_QUEUE; i++) {
- while ((!skb_queue_empty(&ieee->skb_waitQ[i])) &&
- (priv->ieee80211->check_nic_enough_desc(ieee, i) > 0)) {
- /* 1. dequeue the packet from the wait queue */
- skb = skb_dequeue(&ieee->skb_waitQ[i]);
- /* 2. tx the packet directly */
- ieee->softmac_data_hard_start_xmit(skb, ieee, 0);
- }
- }
-}
-
-static void rtl8192_irq_tx_tasklet(unsigned long arg)
-{
- struct r8192_priv *priv = (struct r8192_priv*) arg;
- struct rtl8192_tx_ring *mgnt_ring = &priv->tx_ring[MGNT_QUEUE];
- unsigned long flags;
-
- /* check if we need to report that the management queue is drained */
- spin_lock_irqsave(&priv->irq_th_lock, flags);
-
- if (!skb_queue_len(&mgnt_ring->queue) &&
- priv->ieee80211->ack_tx_to_ieee &&
- rtl8192_is_tx_queue_empty(priv->ieee80211)) {
- priv->ieee80211->ack_tx_to_ieee = 0;
- ieee80211_ps_tx_ack(priv->ieee80211, 1);
- }
-
- spin_unlock_irqrestore(&priv->irq_th_lock, flags);
-
- rtl8192_tx_resume(priv);
-}
-
-/* Record the received data rate */
-static void UpdateReceivedRateHistogramStatistics8190(
- struct r8192_priv *priv,
- struct ieee80211_rx_stats* pstats
- )
-{
- u32 rcvType=1; //0: Total, 1:OK, 2:CRC, 3:ICV
- u32 rateIndex;
- u32 preamble_guardinterval; //1: short preamble/GI, 0: long preamble/GI
-
- if(pstats->bCRC)
- rcvType = 2;
- else if(pstats->bICV)
- rcvType = 3;
-
- if(pstats->bShortPreamble)
- preamble_guardinterval = 1;// short
- else
- preamble_guardinterval = 0;// long
-
- switch(pstats->rate)
- {
- //
- // CCK rate
- //
- case MGN_1M: rateIndex = 0; break;
- case MGN_2M: rateIndex = 1; break;
- case MGN_5_5M: rateIndex = 2; break;
- case MGN_11M: rateIndex = 3; break;
- //
- // Legacy OFDM rate
- //
- case MGN_6M: rateIndex = 4; break;
- case MGN_9M: rateIndex = 5; break;
- case MGN_12M: rateIndex = 6; break;
- case MGN_18M: rateIndex = 7; break;
- case MGN_24M: rateIndex = 8; break;
- case MGN_36M: rateIndex = 9; break;
- case MGN_48M: rateIndex = 10; break;
- case MGN_54M: rateIndex = 11; break;
- //
- // 11n High throughput rate
- //
- case MGN_MCS0: rateIndex = 12; break;
- case MGN_MCS1: rateIndex = 13; break;
- case MGN_MCS2: rateIndex = 14; break;
- case MGN_MCS3: rateIndex = 15; break;
- case MGN_MCS4: rateIndex = 16; break;
- case MGN_MCS5: rateIndex = 17; break;
- case MGN_MCS6: rateIndex = 18; break;
- case MGN_MCS7: rateIndex = 19; break;
- case MGN_MCS8: rateIndex = 20; break;
- case MGN_MCS9: rateIndex = 21; break;
- case MGN_MCS10: rateIndex = 22; break;
- case MGN_MCS11: rateIndex = 23; break;
- case MGN_MCS12: rateIndex = 24; break;
- case MGN_MCS13: rateIndex = 25; break;
- case MGN_MCS14: rateIndex = 26; break;
- case MGN_MCS15: rateIndex = 27; break;
- default: rateIndex = 28; break;
- }
- priv->stats.received_rate_histogram[0][rateIndex]++; //total
- priv->stats.received_rate_histogram[rcvType][rateIndex]++;
-}
-
-static void rtl8192_rx(struct r8192_priv *priv)
-{
- struct ieee80211_hdr_1addr *ieee80211_hdr = NULL;
- bool unicast_packet = false;
- struct ieee80211_rx_stats stats = {
- .signal = 0,
- .noise = -98,
- .rate = 0,
- .freq = IEEE80211_24GHZ_BAND,
- };
- unsigned int count = priv->rxringcount;
- prx_fwinfo_819x_pci pDrvInfo = NULL;
- struct sk_buff *new_skb;
-
- while (count--) {
- rx_desc_819x_pci *pdesc = &priv->rx_ring[priv->rx_idx];//rx descriptor
- struct sk_buff *skb = priv->rx_buf[priv->rx_idx];//rx pkt
-
- if (pdesc->OWN)
- /* wait data to be filled by hardware */
- return;
-
- stats.bICV = pdesc->ICV;
- stats.bCRC = pdesc->CRC32;
- stats.bHwError = pdesc->CRC32 | pdesc->ICV;
-
- stats.Length = pdesc->Length;
- if(stats.Length < 24)
- stats.bHwError |= 1;
-
- if(stats.bHwError) {
- stats.bShift = false;
- goto done;
- }
- pDrvInfo = NULL;
- new_skb = dev_alloc_skb(priv->rxbuffersize);
-
- if (unlikely(!new_skb))
- goto done;
-
- stats.RxDrvInfoSize = pdesc->RxDrvInfoSize;
- stats.RxBufShift = ((pdesc->Shift)&0x03);
- stats.Decrypted = !pdesc->SWDec;
-
- pci_dma_sync_single_for_cpu(priv->pdev,
- *((dma_addr_t *)skb->cb),
- priv->rxbuffersize,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, pdesc->Length);
- pDrvInfo = (rx_fwinfo_819x_pci *)(skb->data + stats.RxBufShift);
- skb_reserve(skb, stats.RxDrvInfoSize + stats.RxBufShift);
-
- stats.rate = HwRateToMRate90((bool)pDrvInfo->RxHT, (u8)pDrvInfo->RxRate);
- stats.bShortPreamble = pDrvInfo->SPLCP;
-
- /* it is debug only. It should be disabled in released driver.
- * 2007.1.11 by Emily
- * */
- UpdateReceivedRateHistogramStatistics8190(priv, &stats);
-
- stats.bIsAMPDU = (pDrvInfo->PartAggr==1);
- stats.bFirstMPDU = (pDrvInfo->PartAggr==1) && (pDrvInfo->FirstAGGR==1);
-
- stats.TimeStampLow = pDrvInfo->TSFL;
- stats.TimeStampHigh = read_nic_dword(priv, TSFR+4);
-
- UpdateRxPktTimeStamp8190(priv, &stats);
-
- //
- // Get Total offset of MPDU Frame Body
- //
- if((stats.RxBufShift + stats.RxDrvInfoSize) > 0)
- stats.bShift = 1;
-
- /* ???? */
- TranslateRxSignalStuff819xpci(priv, skb, &stats, pdesc, pDrvInfo);
-
- /* Rx A-MPDU */
- if(pDrvInfo->FirstAGGR==1 || pDrvInfo->PartAggr == 1)
- RT_TRACE(COMP_RXDESC, "pDrvInfo->FirstAGGR = %d, pDrvInfo->PartAggr = %d\n",
- pDrvInfo->FirstAGGR, pDrvInfo->PartAggr);
- skb_trim(skb, skb->len - 4/*sCrcLng*/);
- /* rx packets statistics */
- ieee80211_hdr = (struct ieee80211_hdr_1addr *)skb->data;
- unicast_packet = false;
-
- if(is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
- //TODO
- }else if(is_multicast_ether_addr(ieee80211_hdr->addr1)){
- //TODO
- }else {
- /* unicast packet */
- unicast_packet = true;
- }
-
- if(!ieee80211_rtl_rx(priv->ieee80211, skb, &stats)){
- dev_kfree_skb_any(skb);
- } else {
- priv->stats.rxok++;
- if(unicast_packet) {
- priv->stats.rxbytesunicast += skb->len;
- }
- }
-
- pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
- priv->rxbuffersize, PCI_DMA_FROMDEVICE);
-
- skb = new_skb;
- priv->rx_buf[priv->rx_idx] = skb;
- *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
-
-done:
- pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
- pdesc->OWN = 1;
- pdesc->Length = priv->rxbuffersize;
- if (priv->rx_idx == priv->rxringcount-1)
- pdesc->EOR = 1;
- priv->rx_idx = (priv->rx_idx + 1) % priv->rxringcount;
- }
-
-}
-
-static void rtl8192_irq_rx_tasklet(unsigned long arg)
-{
- struct r8192_priv *priv = (struct r8192_priv*) arg;
- rtl8192_rx(priv);
- /* unmask RDU */
- write_nic_dword(priv, INTA_MASK, read_nic_dword(priv, INTA_MASK) | IMR_RDU);
-}
-
-static const struct net_device_ops rtl8192_netdev_ops = {
- .ndo_open = rtl8192_open,
- .ndo_stop = rtl8192_close,
- .ndo_tx_timeout = tx_timeout,
- .ndo_do_ioctl = rtl8192_ioctl,
- .ndo_set_multicast_list = r8192_set_multicast,
- .ndo_set_mac_address = r8192_set_mac_adr,
- .ndo_start_xmit = ieee80211_rtl_xmit,
-};
-
-static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct net_device *dev = NULL;
- struct r8192_priv *priv= NULL;
- u8 unit = 0;
- int ret = -ENODEV;
- unsigned long pmem_start, pmem_len, pmem_flags;
- u8 revisionid;
-
- RT_TRACE(COMP_INIT,"Configuring chip resources\n");
-
- if( pci_enable_device (pdev) ){
- RT_TRACE(COMP_ERR,"Failed to enable PCI device");
- return -EIO;
- }
-
- pci_set_master(pdev);
- //pci_set_wmi(pdev);
- pci_set_dma_mask(pdev, 0xffffff00ULL);
- pci_set_consistent_dma_mask(pdev,0xffffff00ULL);
- dev = alloc_ieee80211(sizeof(struct r8192_priv));
- if (!dev) {
- ret = -ENOMEM;
- goto fail_free;
- }
-
- pci_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
- priv = ieee80211_priv(dev);
- priv->ieee80211 = netdev_priv(dev);
- priv->pdev=pdev;
- if((pdev->subsystem_vendor == PCI_VENDOR_ID_DLINK)&&(pdev->subsystem_device == 0x3304)){
- priv->ieee80211->bSupportRemoteWakeUp = 1;
- } else
- {
- priv->ieee80211->bSupportRemoteWakeUp = 0;
- }
-
- pmem_start = pci_resource_start(pdev, 1);
- pmem_len = pci_resource_len(pdev, 1);
- pmem_flags = pci_resource_flags (pdev, 1);
-
- if (!(pmem_flags & IORESOURCE_MEM)) {
- RT_TRACE(COMP_ERR, "region #1 not a MMIO resource, aborting\n");
- goto fail;
- }
-
- //DMESG("Memory mapped space @ 0x%08lx ", pmem_start);
- if( ! request_mem_region(pmem_start, pmem_len, RTL819xE_MODULE_NAME)) {
- RT_TRACE(COMP_ERR,"request_mem_region failed!\n");
- goto fail;
- }
-
- priv->mem_start = ioremap_nocache(pmem_start, pmem_len);
- if (!priv->mem_start) {
- RT_TRACE(COMP_ERR,"ioremap failed!\n");
- goto fail1;
- }
-
- dev->mem_start = (unsigned long) priv->mem_start;
- dev->mem_end = (unsigned long) (priv->mem_start +
- pci_resource_len(pdev, 0));
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, 0x41, 0x00);
-
-
- pci_read_config_byte(pdev, 0x08, &revisionid);
- /* If the revisionid is 0x10, the device uses rtl8192se. */
- if (pdev->device == 0x8192 && revisionid == 0x10)
- goto fail1;
-
- pci_read_config_byte(pdev, 0x05, &unit);
- pci_write_config_byte(pdev, 0x05, unit & (~0x04));
-
- dev->irq = pdev->irq;
- priv->irq = 0;
-
- dev->netdev_ops = &rtl8192_netdev_ops;
-
- dev->wireless_handlers = &r8192_wx_handlers_def;
- dev->type=ARPHRD_ETHER;
-
- dev->watchdog_timeo = HZ*3;
-
- if (dev_alloc_name(dev, ifname) < 0){
- RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
- strcpy(ifname, "wlan%d");
- dev_alloc_name(dev, ifname);
- }
-
- RT_TRACE(COMP_INIT, "Driver probe completed1\n");
- if (rtl8192_init(priv)!=0) {
- RT_TRACE(COMP_ERR, "Initialization failed\n");
- goto fail;
- }
-
- register_netdev(dev);
- RT_TRACE(COMP_INIT, "dev name=======> %s\n",dev->name);
- rtl8192_proc_init_one(priv);
-
-
- RT_TRACE(COMP_INIT, "Driver probe completed\n");
- return 0;
-
-fail1:
-
- if (priv->mem_start) {
- iounmap(priv->mem_start);
- release_mem_region( pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1) );
- }
-
-fail:
- if(dev){
-
- if (priv->irq) {
- free_irq(priv->irq, priv);
- priv->irq = 0;
- }
- free_ieee80211(dev);
- }
-
-fail_free:
- pci_disable_device(pdev);
-
- DMESG("wlan driver load failed\n");
- pci_set_drvdata(pdev, NULL);
- return ret;
-
-}
-
-/* detach all the work and timer structure declared or inititialized
- * in r8192_init function.
- * */
-static void rtl8192_cancel_deferred_work(struct r8192_priv* priv)
-{
- /* call cancel_work_sync instead of cancel_delayed_work if and only if Linux_version_code
- * is or is newer than 2.6.20 and work structure is defined to be struct work_struct.
- * Otherwise call cancel_delayed_work is enough.
- * FIXME (2.6.20 should 2.6.22, work_struct should not cancel)
- * */
- cancel_delayed_work(&priv->watch_dog_wq);
- cancel_delayed_work(&priv->update_beacon_wq);
- cancel_delayed_work(&priv->ieee80211->hw_wakeup_wq);
- cancel_delayed_work(&priv->gpio_change_rf_wq);
- cancel_work_sync(&priv->reset_wq);
- cancel_work_sync(&priv->qos_activate);
-}
-
-
-static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct r8192_priv *priv ;
- u32 i;
-
- if (dev) {
-
- unregister_netdev(dev);
-
- priv = ieee80211_priv(dev);
-
- rtl8192_proc_remove_one(priv);
-
- rtl8192_down(dev);
- if (priv->pFirmware)
- {
- vfree(priv->pFirmware);
- priv->pFirmware = NULL;
- }
- destroy_workqueue(priv->priv_wq);
-
- /* free tx/rx rings */
- rtl8192_free_rx_ring(priv);
- for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
- rtl8192_free_tx_ring(priv, i);
-
- if (priv->irq) {
- printk("Freeing irq %d\n", priv->irq);
- free_irq(priv->irq, priv);
- priv->irq = 0;
- }
-
- if (priv->mem_start) {
- iounmap(priv->mem_start);
- release_mem_region( pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1) );
- }
-
- free_ieee80211(dev);
- }
-
- pci_disable_device(pdev);
- RT_TRACE(COMP_DOWN, "wlan driver removed\n");
-}
-
-extern int ieee80211_rtl_init(void);
-extern void ieee80211_rtl_exit(void);
-
-static int __init rtl8192_pci_module_init(void)
-{
- int retval;
-
- retval = ieee80211_rtl_init();
- if (retval)
- return retval;
-
- printk(KERN_INFO "\nLinux kernel driver for RTL8192 based WLAN cards\n");
- printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan\n");
- RT_TRACE(COMP_INIT, "Initializing module\n");
- rtl8192_proc_module_init();
- if(0!=pci_register_driver(&rtl8192_pci_driver))
- {
- DMESG("No device found");
- /*pci_unregister_driver (&rtl8192_pci_driver);*/
- return -ENODEV;
- }
- return 0;
-}
-
-
-static void __exit rtl8192_pci_module_exit(void)
-{
- pci_unregister_driver(&rtl8192_pci_driver);
-
- RT_TRACE(COMP_DOWN, "Exiting\n");
- rtl8192_proc_module_remove();
- ieee80211_rtl_exit();
-}
-
-static irqreturn_t rtl8192_interrupt(int irq, void *param)
-{
- struct r8192_priv *priv = param;
- struct net_device *dev = priv->ieee80211->dev;
- unsigned long flags;
- u32 inta;
- irqreturn_t ret = IRQ_HANDLED;
-
- spin_lock_irqsave(&priv->irq_th_lock, flags);
-
- /* ISR: 4bytes */
-
- inta = read_nic_dword(priv, ISR); /* & priv->IntrMask; */
- write_nic_dword(priv, ISR, inta); /* reset int situation */
-
- if (!inta) {
- /*
- * most probably we can safely return IRQ_NONE,
- * but for now is better to avoid problems
- */
- goto out_unlock;
- }
-
- if (inta == 0xffff) {
- /* HW disappared */
- goto out_unlock;
- }
-
- if (!netif_running(dev))
- goto out_unlock;
-
- if (inta & IMR_TBDOK) {
- RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
- rtl8192_tx_isr(priv, BEACON_QUEUE);
- priv->stats.txbeaconokint++;
- }
-
- if (inta & IMR_TBDER) {
- RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
- rtl8192_tx_isr(priv, BEACON_QUEUE);
- priv->stats.txbeaconerr++;
- }
-
- if (inta & IMR_MGNTDOK ) {
- RT_TRACE(COMP_INTR, "Manage ok interrupt!\n");
- priv->stats.txmanageokint++;
- rtl8192_tx_isr(priv, MGNT_QUEUE);
- }
-
- if (inta & IMR_COMDOK)
- {
- priv->stats.txcmdpktokint++;
- rtl8192_tx_isr(priv, TXCMD_QUEUE);
- }
-
- if (inta & IMR_ROK) {
- priv->stats.rxint++;
- tasklet_schedule(&priv->irq_rx_tasklet);
- }
-
- if (inta & IMR_BcnInt) {
- RT_TRACE(COMP_INTR, "prepare beacon for interrupt!\n");
- tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
- }
-
- if (inta & IMR_RDU) {
- RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
- priv->stats.rxrdu++;
- /* reset int situation */
- write_nic_dword(priv, INTA_MASK, read_nic_dword(priv, INTA_MASK) & ~IMR_RDU);
- tasklet_schedule(&priv->irq_rx_tasklet);
- }
-
- if (inta & IMR_RXFOVW) {
- RT_TRACE(COMP_INTR, "rx overflow !\n");
- priv->stats.rxoverflow++;
- tasklet_schedule(&priv->irq_rx_tasklet);
- }
-
- if (inta & IMR_TXFOVW)
- priv->stats.txoverflow++;
-
- if (inta & IMR_BKDOK) {
- RT_TRACE(COMP_INTR, "BK Tx OK interrupt!\n");
- priv->stats.txbkokint++;
- priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
- rtl8192_tx_isr(priv, BK_QUEUE);
- }
-
- if (inta & IMR_BEDOK) {
- RT_TRACE(COMP_INTR, "BE TX OK interrupt!\n");
- priv->stats.txbeokint++;
- priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
- rtl8192_tx_isr(priv, BE_QUEUE);
- }
-
- if (inta & IMR_VIDOK) {
- RT_TRACE(COMP_INTR, "VI TX OK interrupt!\n");
- priv->stats.txviokint++;
- priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
- rtl8192_tx_isr(priv, VI_QUEUE);
- }
-
- if (inta & IMR_VODOK) {
- priv->stats.txvookint++;
- priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
- rtl8192_tx_isr(priv, VO_QUEUE);
- }
-
-out_unlock:
- spin_unlock_irqrestore(&priv->irq_th_lock, flags);
-
- return ret;
-}
-
-void EnableHWSecurityConfig8192(struct r8192_priv *priv)
-{
- u8 SECR_value = 0x0;
- struct ieee80211_device* ieee = priv->ieee80211;
-
- SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
-
- if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2))
- {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
- }
- else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP)))
- {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
- }
-
- //add HWSec active enable here.
-//default using hwsec. when peer AP is in N mode only and pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates it), use software security. when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes, use g mode hw security. WB on 2008.7.4
- ieee->hwsec_active = 1;
-
- if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep)//!ieee->hwsec_support) //add hwsec_support flag to totol control hw_sec on/off
- {
- ieee->hwsec_active = 0;
- SECR_value &= ~SCR_RxDecEnable;
- }
-
- RT_TRACE(COMP_SEC,"%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n", __FUNCTION__,
- ieee->hwsec_active, ieee->pairwise_key_type, SECR_value);
- {
- write_nic_byte(priv, SECR, SECR_value);//SECR_value | SCR_UseDK );
- }
-
-}
-#define TOTAL_CAM_ENTRY 32
-//#define CAM_CONTENT_COUNT 8
-void setKey(struct r8192_priv *priv, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
-{
- u32 TargetCommand = 0;
- u32 TargetContent = 0;
- u16 usConfig = 0;
- u8 i;
-#ifdef ENABLE_IPS
- RT_RF_POWER_STATE rtState;
-
- rtState = priv->eRFPowerState;
- if (priv->PowerSaveControl.bInactivePs){
- if(rtState == eRfOff){
- if(priv->RfOffReason > RF_CHANGE_BY_IPS)
- {
- RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
- //up(&priv->wx_sem);
- return ;
- }
- else{
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
- }
- }
- }
- priv->ieee80211->is_set_key = true;
-#endif
- if (EntryNo >= TOTAL_CAM_ENTRY)
- RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
-
- RT_TRACE(COMP_SEC, "====>to setKey(), priv:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", priv, EntryNo, KeyIndex, KeyType, MacAddr);
-
- if (DefaultKey)
- usConfig |= BIT15 | (KeyType<<2);
- else
- usConfig |= BIT15 | (KeyType<<2) | KeyIndex;
-// usConfig |= BIT15 | (KeyType<<2) | (DefaultKey<<5) | KeyIndex;
-
-
- for(i=0 ; i<CAM_CONTENT_COUNT; i++){
- TargetCommand = i+CAM_CONTENT_COUNT*EntryNo;
- TargetCommand |= BIT31|BIT16;
-
- if(i==0){//MAC|Config
- TargetContent = (u32)(*(MacAddr+0)) << 16|
- (u32)(*(MacAddr+1)) << 24|
- (u32)usConfig;
-
- write_nic_dword(priv, WCAMI, TargetContent);
- write_nic_dword(priv, RWCAM, TargetCommand);
- }
- else if(i==1){//MAC
- TargetContent = (u32)(*(MacAddr+2)) |
- (u32)(*(MacAddr+3)) << 8|
- (u32)(*(MacAddr+4)) << 16|
- (u32)(*(MacAddr+5)) << 24;
- write_nic_dword(priv, WCAMI, TargetContent);
- write_nic_dword(priv, RWCAM, TargetCommand);
- }
- else { //Key Material
- if(KeyContent != NULL)
- {
- write_nic_dword(priv, WCAMI, (u32)(*(KeyContent+i-2)) );
- write_nic_dword(priv, RWCAM, TargetCommand);
- }
- }
- }
- RT_TRACE(COMP_SEC,"=========>after set key, usconfig:%x\n", usConfig);
-}
-
-bool NicIFEnableNIC(struct r8192_priv *priv)
-{
- RT_STATUS init_status = RT_STATUS_SUCCESS;
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
-
- //YJ,add,091109
- if (priv->up == 0){
- RT_TRACE(COMP_ERR, "ERR!!! %s(): Driver is already down!\n",__FUNCTION__);
- priv->bdisable_nic = false; //YJ,add,091111
- return false;
- }
- // <1> Reset memory: descriptor, buffer,..
- //NicIFResetMemory(Adapter);
-
- // <2> Enable Adapter
- //priv->bfirst_init = true;
- init_status = rtl8192_adapter_start(priv);
- if (init_status != RT_STATUS_SUCCESS) {
- RT_TRACE(COMP_ERR,"ERR!!! %s(): initialization is failed!\n",__FUNCTION__);
- priv->bdisable_nic = false; //YJ,add,091111
- return -1;
- }
- RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
- //priv->bfirst_init = false;
-
- // <3> Enable Interrupt
- rtl8192_irq_enable(priv);
- priv->bdisable_nic = false;
-
- return (init_status == RT_STATUS_SUCCESS);
-}
-
-bool NicIFDisableNIC(struct r8192_priv *priv)
-{
- bool status = true;
- u8 tmp_state = 0;
- // <1> Disable Interrupt
-
- priv->bdisable_nic = true; //YJ,move,091109
- tmp_state = priv->ieee80211->state;
-
- ieee80211_softmac_stop_protocol(priv->ieee80211, false);
-
- priv->ieee80211->state = tmp_state;
- rtl8192_cancel_deferred_work(priv);
- rtl8192_irq_disable(priv);
- // <2> Stop all timer
-
- // <3> Disable Adapter
- rtl8192_halt_adapter(priv, false);
-// priv->bdisable_nic = true;
-
- return status;
-}
-
-module_init(rtl8192_pci_module_init);
-module_exit(rtl8192_pci_module_exit);
diff --git a/drivers/staging/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/r8192E_dev.c
new file mode 100644
index 00000000000..808aab6fa5e
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_dev.c
@@ -0,0 +1,2395 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtl_core.h"
+#include "r8192E_phy.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h"
+#include "r8192E_cmdpkt.h"
+#include "rtl_dm.h"
+#include "rtl_wx.h"
+
+extern int WDCAPARA_ADD[];
+
+void rtl8192e_start_beacon(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct rtllib_network *net = &priv->rtllib->current_network;
+ u16 BcnTimeCfg = 0;
+ u16 BcnCW = 6;
+ u16 BcnIFS = 0xf;
+
+ DMESG("Enabling beacon TX");
+ rtl8192_irq_disable(dev);
+
+ write_nic_word(dev, ATIMWND, 2);
+
+ write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
+ write_nic_word(dev, BCN_DRV_EARLY_INT, 10);
+ write_nic_word(dev, BCN_DMATIME, 256);
+
+ write_nic_byte(dev, BCN_ERR_THRESH, 100);
+
+ BcnTimeCfg |= BcnCW<<BCN_TCFG_CW_SHIFT;
+ BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
+ write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
+ rtl8192_irq_enable(dev);
+}
+
+static void rtl8192e_update_msr(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 msr;
+ enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
+ msr = read_nic_byte(dev, MSR);
+ msr &= ~MSR_LINK_MASK;
+
+ switch (priv->rtllib->iw_mode) {
+ case IW_MODE_INFRA:
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
+ else
+ msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
+ LedAction = LED_CTL_LINK;
+ break;
+ case IW_MODE_ADHOC:
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
+ else
+ msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
+ break;
+ case IW_MODE_MASTER:
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
+ else
+ msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
+ break;
+ default:
+ break;
+ }
+
+ write_nic_byte(dev, MSR, msr);
+ if (priv->rtllib->LedControlHandler)
+ priv->rtllib->LedControlHandler(dev, LedAction);
+}
+
+void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ switch (variable) {
+ case HW_VAR_BSSID:
+ write_nic_dword(dev, BSSIDR, ((u32 *)(val))[0]);
+ write_nic_word(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+ break;
+
+ case HW_VAR_MEDIA_STATUS:
+ {
+ enum rt_op_mode OpMode = *((enum rt_op_mode *)(val));
+ enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
+ u8 btMsr = read_nic_byte(dev, MSR);
+
+ btMsr &= 0xfc;
+
+ switch (OpMode) {
+ case RT_OP_MODE_INFRASTRUCTURE:
+ btMsr |= MSR_INFRA;
+ LedAction = LED_CTL_LINK;
+ break;
+
+ case RT_OP_MODE_IBSS:
+ btMsr |= MSR_ADHOC;
+ break;
+
+ case RT_OP_MODE_AP:
+ btMsr |= MSR_AP;
+ LedAction = LED_CTL_LINK;
+ break;
+
+ default:
+ btMsr |= MSR_NOLINK;
+ break;
+ }
+
+ write_nic_byte(dev, MSR, btMsr);
+
+ }
+ break;
+
+ case HW_VAR_CECHK_BSSID:
+ {
+ u32 RegRCR, Type;
+
+ Type = ((u8 *)(val))[0];
+ RegRCR = read_nic_dword(dev, RCR);
+ priv->ReceiveConfig = RegRCR;
+
+ if (Type == true)
+ RegRCR |= (RCR_CBSSID);
+ else if (Type == false)
+ RegRCR &= (~RCR_CBSSID);
+
+ write_nic_dword(dev, RCR, RegRCR);
+ priv->ReceiveConfig = RegRCR;
+
+ }
+ break;
+
+ case HW_VAR_SLOT_TIME:
+
+ priv->slot_time = val[0];
+ write_nic_byte(dev, SLOT_TIME, val[0]);
+
+ break;
+
+ case HW_VAR_ACK_PREAMBLE:
+ {
+ u32 regTmp;
+ priv->short_preamble = (bool)(*(u8 *)val);
+ regTmp = priv->basic_rate;
+ if (priv->short_preamble)
+ regTmp |= BRSR_AckShortPmb;
+ write_nic_dword(dev, RRSR, regTmp);
+ break;
+ }
+
+ case HW_VAR_CPU_RST:
+ write_nic_dword(dev, CPU_GEN, ((u32 *)(val))[0]);
+ break;
+
+ case HW_VAR_AC_PARAM:
+ {
+ u8 pAcParam = *((u8 *)val);
+ u32 eACI = pAcParam;
+ u8 u1bAIFS;
+ u32 u4bAcParam;
+ u8 mode = priv->rtllib->mode;
+ struct rtllib_qos_parameters *qos_parameters =
+ &priv->rtllib->current_network.qos_data.parameters;
+
+ u1bAIFS = qos_parameters->aifs[pAcParam] *
+ ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
+
+ dm_init_edca_turbo(dev);
+
+ u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[pAcParam])) <<
+ AC_PARAM_TXOP_LIMIT_OFFSET) |
+ (((u32)(qos_parameters->cw_max[pAcParam])) <<
+ AC_PARAM_ECW_MAX_OFFSET) |
+ (((u32)(qos_parameters->cw_min[pAcParam])) <<
+ AC_PARAM_ECW_MIN_OFFSET) |
+ (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
+
+ RT_TRACE(COMP_DBG, "%s():HW_VAR_AC_PARAM eACI:%x:%x\n",
+ __func__, eACI, u4bAcParam);
+ switch (eACI) {
+ case AC1_BK:
+ write_nic_dword(dev, EDCAPARA_BK, u4bAcParam);
+ break;
+
+ case AC0_BE:
+ write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
+ break;
+
+ case AC2_VI:
+ write_nic_dword(dev, EDCAPARA_VI, u4bAcParam);
+ break;
+
+ case AC3_VO:
+ write_nic_dword(dev, EDCAPARA_VO, u4bAcParam);
+ break;
+
+ default:
+ printk(KERN_INFO "SetHwReg8185(): invalid ACI: %d !\n",
+ eACI);
+ break;
+ }
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACM_CTRL,
+ (u8 *)(&pAcParam));
+ break;
+ }
+
+ case HW_VAR_ACM_CTRL:
+ {
+ struct rtllib_qos_parameters *qos_parameters =
+ &priv->rtllib->current_network.qos_data.parameters;
+ u8 pAcParam = *((u8 *)val);
+ u32 eACI = pAcParam;
+ union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
+ (qos_parameters->aifs[0]);
+ u8 acm = pAciAifsn->f.acm;
+ u8 AcmCtrl = read_nic_byte(dev, AcmHwCtrl);
+
+ RT_TRACE(COMP_DBG, "===========>%s():HW_VAR_ACM_CTRL:%x\n",
+ __func__, eACI);
+ AcmCtrl = AcmCtrl | ((priv->AcmMethod == 2) ? 0x0 : 0x1);
+
+ if (acm) {
+ switch (eACI) {
+ case AC0_BE:
+ AcmCtrl |= AcmHw_BeqEn;
+ break;
+
+ case AC2_VI:
+ AcmCtrl |= AcmHw_ViqEn;
+ break;
+
+ case AC3_VO:
+ AcmCtrl |= AcmHw_VoqEn;
+ break;
+
+ default:
+ RT_TRACE(COMP_QOS, "SetHwReg8185(): [HW_VAR_"
+ "ACM_CTRL] acm set failed: eACI is "
+ "%d\n", eACI);
+ break;
+ }
+ } else {
+ switch (eACI) {
+ case AC0_BE:
+ AcmCtrl &= (~AcmHw_BeqEn);
+ break;
+
+ case AC2_VI:
+ AcmCtrl &= (~AcmHw_ViqEn);
+ break;
+
+ case AC3_VO:
+ AcmCtrl &= (~AcmHw_BeqEn);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ RT_TRACE(COMP_QOS, "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write"
+ " 0x%X\n", AcmCtrl);
+ write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
+ break;
+ }
+
+ case HW_VAR_SIFS:
+ write_nic_byte(dev, SIFS, val[0]);
+ write_nic_byte(dev, SIFS+1, val[0]);
+ break;
+
+ case HW_VAR_RF_TIMING:
+ {
+ u8 Rf_Timing = *((u8 *)val);
+ write_nic_byte(dev, rFPGA0_RFTiming1, Rf_Timing);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+}
+
+static void rtl8192_read_eeprom_info(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ u8 tempval;
+ u8 ICVer8192, ICVer8256;
+ u16 i, usValue, IC_Version;
+ u16 EEPROMId;
+ u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x01};
+ RT_TRACE(COMP_INIT, "====> rtl8192_read_eeprom_info\n");
+
+ EEPROMId = eprom_read(dev, 0);
+ if (EEPROMId != RTL8190_EEPROM_ID) {
+ RT_TRACE(COMP_ERR, "EEPROM ID is invalid:%x, %x\n",
+ EEPROMId, RTL8190_EEPROM_ID);
+ priv->AutoloadFailFlag = true;
+ } else {
+ priv->AutoloadFailFlag = false;
+ }
+
+ if (!priv->AutoloadFailFlag) {
+ priv->eeprom_vid = eprom_read(dev, (EEPROM_VID >> 1));
+ priv->eeprom_did = eprom_read(dev, (EEPROM_DID >> 1));
+
+ usValue = eprom_read(dev, (u16)(EEPROM_Customer_ID>>1)) >> 8;
+ priv->eeprom_CustomerID = (u8)(usValue & 0xff);
+ usValue = eprom_read(dev, (EEPROM_ICVersion_ChannelPlan>>1));
+ priv->eeprom_ChannelPlan = usValue&0xff;
+ IC_Version = ((usValue&0xff00)>>8);
+
+ ICVer8192 = (IC_Version&0xf);
+ ICVer8256 = ((IC_Version&0xf0)>>4);
+ RT_TRACE(COMP_INIT, "\nICVer8192 = 0x%x\n", ICVer8192);
+ RT_TRACE(COMP_INIT, "\nICVer8256 = 0x%x\n", ICVer8256);
+ if (ICVer8192 == 0x2) {
+ if (ICVer8256 == 0x5)
+ priv->card_8192_version = VERSION_8190_BE;
+ }
+ switch (priv->card_8192_version) {
+ case VERSION_8190_BD:
+ case VERSION_8190_BE:
+ break;
+ default:
+ priv->card_8192_version = VERSION_8190_BD;
+ break;
+ }
+ RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n",
+ priv->card_8192_version);
+ } else {
+ priv->card_8192_version = VERSION_8190_BD;
+ priv->eeprom_vid = 0;
+ priv->eeprom_did = 0;
+ priv->eeprom_CustomerID = 0;
+ priv->eeprom_ChannelPlan = 0;
+ RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n", 0xff);
+ }
+
+ RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
+ RT_TRACE(COMP_INIT, "EEPROM DID = 0x%4x\n", priv->eeprom_did);
+ RT_TRACE(COMP_INIT, "EEPROM Customer ID: 0x%2x\n",
+ priv->eeprom_CustomerID);
+
+ if (!priv->AutoloadFailFlag) {
+ for (i = 0; i < 6; i += 2) {
+ usValue = eprom_read(dev,
+ (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
+ *(u16 *)(&dev->dev_addr[i]) = usValue;
+ }
+ } else {
+ memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
+ }
+
+ RT_TRACE(COMP_INIT, "Permanent Address = %pM\n",
+ dev->dev_addr);
+
+ if (priv->card_8192_version > VERSION_8190_BD)
+ priv->bTXPowerDataReadFromEEPORM = true;
+ else
+ priv->bTXPowerDataReadFromEEPORM = false;
+
+ priv->rf_type = RTL819X_DEFAULT_RF_TYPE;
+
+ if (priv->card_8192_version > VERSION_8190_BD) {
+ if (!priv->AutoloadFailFlag) {
+ tempval = (eprom_read(dev, (EEPROM_RFInd_PowerDiff >>
+ 1))) & 0xff;
+ priv->EEPROMLegacyHTTxPowerDiff = tempval & 0xf;
+
+ if (tempval&0x80)
+ priv->rf_type = RF_1T2R;
+ else
+ priv->rf_type = RF_2T4R;
+ } else {
+ priv->EEPROMLegacyHTTxPowerDiff = 0x04;
+ }
+ RT_TRACE(COMP_INIT, "EEPROMLegacyHTTxPowerDiff = %d\n",
+ priv->EEPROMLegacyHTTxPowerDiff);
+
+ if (!priv->AutoloadFailFlag)
+ priv->EEPROMThermalMeter = (u8)(((eprom_read(dev,
+ (EEPROM_ThermalMeter>>1))) &
+ 0xff00)>>8);
+ else
+ priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
+ RT_TRACE(COMP_INIT, "ThermalMeter = %d\n",
+ priv->EEPROMThermalMeter);
+ priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
+
+ if (priv->epromtype == EEPROM_93C46) {
+ if (!priv->AutoloadFailFlag) {
+ usValue = eprom_read(dev,
+ (EEPROM_TxPwDiff_CrystalCap >> 1));
+ priv->EEPROMAntPwDiff = (usValue&0x0fff);
+ priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
+ >> 12);
+ } else {
+ priv->EEPROMAntPwDiff =
+ EEPROM_Default_AntTxPowerDiff;
+ priv->EEPROMCrystalCap =
+ EEPROM_Default_TxPwDiff_CrystalCap;
+ }
+ RT_TRACE(COMP_INIT, "EEPROMAntPwDiff = %d\n",
+ priv->EEPROMAntPwDiff);
+ RT_TRACE(COMP_INIT, "EEPROMCrystalCap = %d\n",
+ priv->EEPROMCrystalCap);
+
+ for (i = 0; i < 14; i += 2) {
+ if (!priv->AutoloadFailFlag)
+ usValue = eprom_read(dev,
+ (u16)((EEPROM_TxPwIndex_CCK +
+ i) >> 1));
+ else
+ usValue = EEPROM_Default_TxPower;
+ *((u16 *)(&priv->EEPROMTxPowerLevelCCK[i])) =
+ usValue;
+ RT_TRACE(COMP_INIT, "CCK Tx Power Level, Index"
+ " %d = 0x%02x\n", i,
+ priv->EEPROMTxPowerLevelCCK[i]);
+ RT_TRACE(COMP_INIT, "CCK Tx Power Level, Index"
+ " %d = 0x%02x\n", i+1,
+ priv->EEPROMTxPowerLevelCCK[i+1]);
+ }
+ for (i = 0; i < 14; i += 2) {
+ if (!priv->AutoloadFailFlag)
+ usValue = eprom_read(dev,
+ (u16)((EEPROM_TxPwIndex_OFDM_24G
+ + i) >> 1));
+ else
+ usValue = EEPROM_Default_TxPower;
+ *((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[i]))
+ = usValue;
+ RT_TRACE(COMP_INIT, "OFDM 2.4G Tx Power Level,"
+ " Index %d = 0x%02x\n", i,
+ priv->EEPROMTxPowerLevelOFDM24G[i]);
+ RT_TRACE(COMP_INIT, "OFDM 2.4G Tx Power Level,"
+ " Index %d = 0x%02x\n", i + 1,
+ priv->EEPROMTxPowerLevelOFDM24G[i+1]);
+ }
+ }
+ if (priv->epromtype == EEPROM_93C46) {
+ for (i = 0; i < 14; i++) {
+ priv->TxPowerLevelCCK[i] =
+ priv->EEPROMTxPowerLevelCCK[i];
+ priv->TxPowerLevelOFDM24G[i] =
+ priv->EEPROMTxPowerLevelOFDM24G[i];
+ }
+ priv->LegacyHTTxPowerDiff =
+ priv->EEPROMLegacyHTTxPowerDiff;
+ priv->AntennaTxPwDiff[0] = (priv->EEPROMAntPwDiff &
+ 0xf);
+ priv->AntennaTxPwDiff[1] = ((priv->EEPROMAntPwDiff &
+ 0xf0)>>4);
+ priv->AntennaTxPwDiff[2] = ((priv->EEPROMAntPwDiff &
+ 0xf00)>>8);
+ priv->CrystalCap = priv->EEPROMCrystalCap;
+ priv->ThermalMeter[0] = (priv->EEPROMThermalMeter &
+ 0xf);
+ priv->ThermalMeter[1] = ((priv->EEPROMThermalMeter &
+ 0xf0)>>4);
+ } else if (priv->epromtype == EEPROM_93C56) {
+
+ for (i = 0; i < 3; i++) {
+ priv->TxPowerLevelCCK_A[i] =
+ priv->EEPROMRfACCKChnl1TxPwLevel[0];
+ priv->TxPowerLevelOFDM24G_A[i] =
+ priv->EEPROMRfAOfdmChnlTxPwLevel[0];
+ priv->TxPowerLevelCCK_C[i] =
+ priv->EEPROMRfCCCKChnl1TxPwLevel[0];
+ priv->TxPowerLevelOFDM24G_C[i] =
+ priv->EEPROMRfCOfdmChnlTxPwLevel[0];
+ }
+ for (i = 3; i < 9; i++) {
+ priv->TxPowerLevelCCK_A[i] =
+ priv->EEPROMRfACCKChnl1TxPwLevel[1];
+ priv->TxPowerLevelOFDM24G_A[i] =
+ priv->EEPROMRfAOfdmChnlTxPwLevel[1];
+ priv->TxPowerLevelCCK_C[i] =
+ priv->EEPROMRfCCCKChnl1TxPwLevel[1];
+ priv->TxPowerLevelOFDM24G_C[i] =
+ priv->EEPROMRfCOfdmChnlTxPwLevel[1];
+ }
+ for (i = 9; i < 14; i++) {
+ priv->TxPowerLevelCCK_A[i] =
+ priv->EEPROMRfACCKChnl1TxPwLevel[2];
+ priv->TxPowerLevelOFDM24G_A[i] =
+ priv->EEPROMRfAOfdmChnlTxPwLevel[2];
+ priv->TxPowerLevelCCK_C[i] =
+ priv->EEPROMRfCCCKChnl1TxPwLevel[2];
+ priv->TxPowerLevelOFDM24G_C[i] =
+ priv->EEPROMRfCOfdmChnlTxPwLevel[2];
+ }
+ for (i = 0; i < 14; i++)
+ RT_TRACE(COMP_INIT, "priv->TxPowerLevelCCK_A"
+ "[%d] = 0x%x\n", i,
+ priv->TxPowerLevelCCK_A[i]);
+ for (i = 0; i < 14; i++)
+ RT_TRACE(COMP_INIT, "priv->TxPowerLevelOFDM"
+ "24G_A[%d] = 0x%x\n", i,
+ priv->TxPowerLevelOFDM24G_A[i]);
+ for (i = 0; i < 14; i++)
+ RT_TRACE(COMP_INIT, "priv->TxPowerLevelCCK_C"
+ "[%d] = 0x%x\n", i,
+ priv->TxPowerLevelCCK_C[i]);
+ for (i = 0; i < 14; i++)
+ RT_TRACE(COMP_INIT, "priv->TxPowerLevelOFDM"
+ "24G_C[%d] = 0x%x\n", i,
+ priv->TxPowerLevelOFDM24G_C[i]);
+ priv->LegacyHTTxPowerDiff =
+ priv->EEPROMLegacyHTTxPowerDiff;
+ priv->AntennaTxPwDiff[0] = 0;
+ priv->AntennaTxPwDiff[1] = 0;
+ priv->AntennaTxPwDiff[2] = 0;
+ priv->CrystalCap = priv->EEPROMCrystalCap;
+ priv->ThermalMeter[0] = (priv->EEPROMThermalMeter &
+ 0xf);
+ priv->ThermalMeter[1] = ((priv->EEPROMThermalMeter &
+ 0xf0)>>4);
+ }
+ }
+
+ if (priv->rf_type == RF_1T2R) {
+ /* no matter what checkpatch says, the braces are needed */
+ RT_TRACE(COMP_INIT, "\n1T2R config\n");
+ } else if (priv->rf_type == RF_2T4R) {
+ RT_TRACE(COMP_INIT, "\n2T4R config\n");
+ }
+
+ init_rate_adaptive(dev);
+
+ priv->rf_chip = RF_8256;
+
+ if (priv->RegChannelPlan == 0xf)
+ priv->ChannelPlan = priv->eeprom_ChannelPlan;
+ else
+ priv->ChannelPlan = priv->RegChannelPlan;
+
+ if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
+ priv->CustomerID = RT_CID_DLINK;
+
+ switch (priv->eeprom_CustomerID) {
+ case EEPROM_CID_DEFAULT:
+ priv->CustomerID = RT_CID_DEFAULT;
+ break;
+ case EEPROM_CID_CAMEO:
+ priv->CustomerID = RT_CID_819x_CAMEO;
+ break;
+ case EEPROM_CID_RUNTOP:
+ priv->CustomerID = RT_CID_819x_RUNTOP;
+ break;
+ case EEPROM_CID_NetCore:
+ priv->CustomerID = RT_CID_819x_Netcore;
+ break;
+ case EEPROM_CID_TOSHIBA:
+ priv->CustomerID = RT_CID_TOSHIBA;
+ if (priv->eeprom_ChannelPlan&0x80)
+ priv->ChannelPlan = priv->eeprom_ChannelPlan&0x7f;
+ else
+ priv->ChannelPlan = 0x0;
+ RT_TRACE(COMP_INIT, "Toshiba ChannelPlan = 0x%x\n",
+ priv->ChannelPlan);
+ break;
+ case EEPROM_CID_Nettronix:
+ priv->ScanDelay = 100;
+ priv->CustomerID = RT_CID_Nettronix;
+ break;
+ case EEPROM_CID_Pronet:
+ priv->CustomerID = RT_CID_PRONET;
+ break;
+ case EEPROM_CID_DLINK:
+ priv->CustomerID = RT_CID_DLINK;
+ break;
+
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ break;
+ }
+
+ if (priv->ChannelPlan > CHANNEL_PLAN_LEN - 1)
+ priv->ChannelPlan = 0;
+ priv->ChannelPlan = COUNTRY_CODE_WORLD_WIDE_13;
+
+ if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
+ priv->rtllib->bSupportRemoteWakeUp = true;
+ else
+ priv->rtllib->bSupportRemoteWakeUp = false;
+
+ RT_TRACE(COMP_INIT, "RegChannelPlan(%d)\n", priv->RegChannelPlan);
+ RT_TRACE(COMP_INIT, "ChannelPlan = %d\n", priv->ChannelPlan);
+ RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
+}
+
+void rtl8192_get_eeprom_size(struct net_device *dev)
+{
+ u16 curCR;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
+ curCR = read_nic_dword(dev, EPROM_CMD);
+ RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
+ curCR);
+ priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
+ EEPROM_93C46;
+ RT_TRACE(COMP_INIT, "<===========%s(), epromtype:%d\n", __func__,
+ priv->epromtype);
+ rtl8192_read_eeprom_info(dev);
+}
+
+static void rtl8192_hwconfig(struct net_device *dev)
+{
+ u32 regRATR = 0, regRRSR = 0;
+ u8 regBwOpMode = 0, regTmp = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ switch (priv->rtllib->mode) {
+ case WIRELESS_MODE_B:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK;
+ regRRSR = RATE_ALL_CCK;
+ break;
+ case WIRELESS_MODE_A:
+ regBwOpMode = BW_OPMODE_5G | BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_OFDM_AG;
+ regRRSR = RATE_ALL_OFDM_AG;
+ break;
+ case WIRELESS_MODE_G:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ break;
+ case WIRELESS_MODE_AUTO:
+ case WIRELESS_MODE_N_24G:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ break;
+ case WIRELESS_MODE_N_5G:
+ regBwOpMode = BW_OPMODE_5G;
+ regRATR = RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS |
+ RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_OFDM_AG;
+ break;
+ default:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ break;
+ }
+
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ {
+ u32 ratr_value = 0;
+ ratr_value = regRATR;
+ if (priv->rf_type == RF_1T2R)
+ ratr_value &= ~(RATE_ALL_OFDM_2SS);
+ write_nic_dword(dev, RATR0, ratr_value);
+ write_nic_byte(dev, UFWP, 1);
+ }
+ regTmp = read_nic_byte(dev, 0x313);
+ regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
+ write_nic_dword(dev, RRSR, regRRSR);
+
+ write_nic_word(dev, RETRY_LIMIT,
+ priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
+ priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
+}
+
+bool rtl8192_adapter_start(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 ulRegRead;
+ bool rtStatus = true;
+ u8 tmpvalue;
+ u8 ICVersion, SwitchingRegulatorOutput;
+ bool bfirmwareok = true;
+ u32 tmpRegA, tmpRegC, TempCCk;
+ int i = 0;
+ u32 retry_times = 0;
+
+ RT_TRACE(COMP_INIT, "====>%s()\n", __func__);
+ priv->being_init_adapter = true;
+
+start:
+ rtl8192_pci_resetdescring(dev);
+ priv->Rf_Mode = RF_OP_By_SW_3wire;
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+ write_nic_byte(dev, ANAPAR, 0x37);
+ mdelay(500);
+ }
+ priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
+
+ if (priv->RegRfOff == true)
+ priv->rtllib->eRFPowerState = eRfOff;
+
+ ulRegRead = read_nic_dword(dev, CPU_GEN);
+ if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
+ ulRegRead |= CPU_GEN_SYSTEM_RESET;
+ else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
+ ulRegRead |= CPU_GEN_FIRMWARE_RESET;
+ else
+ RT_TRACE(COMP_ERR, "ERROR in %s(): undefined firmware state(%d)"
+ "\n", __func__, priv->pFirmware->firmware_status);
+
+ write_nic_dword(dev, CPU_GEN, ulRegRead);
+
+ ICVersion = read_nic_byte(dev, IC_VERRSION);
+ if (ICVersion >= 0x4) {
+ SwitchingRegulatorOutput = read_nic_byte(dev, SWREGULATOR);
+ if (SwitchingRegulatorOutput != 0xb8) {
+ write_nic_byte(dev, SWREGULATOR, 0xa8);
+ mdelay(1);
+ write_nic_byte(dev, SWREGULATOR, 0xb8);
+ }
+ }
+ RT_TRACE(COMP_INIT, "BB Config Start!\n");
+ rtStatus = rtl8192_BBConfig(dev);
+ if (rtStatus != true) {
+ RT_TRACE(COMP_ERR, "BB Config failed\n");
+ return rtStatus;
+ }
+ RT_TRACE(COMP_INIT, "BB Config Finished!\n");
+
+ priv->LoopbackMode = RTL819X_NO_LOOPBACK;
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+ ulRegRead = read_nic_dword(dev, CPU_GEN);
+ if (priv->LoopbackMode == RTL819X_NO_LOOPBACK)
+ ulRegRead = ((ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
+ CPU_GEN_NO_LOOPBACK_SET);
+ else if (priv->LoopbackMode == RTL819X_MAC_LOOPBACK)
+ ulRegRead |= CPU_CCK_LOOPBACK;
+ else
+ RT_TRACE(COMP_ERR, "Serious error: wrong loopback"
+ " mode setting\n");
+
+ write_nic_dword(dev, CPU_GEN, ulRegRead);
+
+ udelay(500);
+ }
+ rtl8192_hwconfig(dev);
+ write_nic_byte(dev, CMDR, CR_RE | CR_TE);
+
+ write_nic_byte(dev, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
+ (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT)));
+ write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
+ write_nic_word(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
+ write_nic_dword(dev, RCR, priv->ReceiveConfig);
+
+ write_nic_dword(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK <<
+ RSVD_FW_QUEUE_PAGE_BK_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_BE <<
+ RSVD_FW_QUEUE_PAGE_BE_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_VI <<
+ RSVD_FW_QUEUE_PAGE_VI_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_VO <<
+ RSVD_FW_QUEUE_PAGE_VO_SHIFT);
+ write_nic_dword(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT <<
+ RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
+ write_nic_dword(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
+ NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
+ RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
+ NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
+ RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
+
+ rtl8192_tx_enable(dev);
+ rtl8192_rx_enable(dev);
+ ulRegRead = (0xFFF00000 & read_nic_dword(dev, RRSR)) |
+ RATE_ALL_OFDM_AG | RATE_ALL_CCK;
+ write_nic_dword(dev, RRSR, ulRegRead);
+ write_nic_dword(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
+
+ write_nic_byte(dev, ACK_TIMEOUT, 0x30);
+
+ if (priv->ResetProgress == RESET_TYPE_NORESET)
+ rtl8192_SetWirelessMode(dev, priv->rtllib->mode);
+ CamResetAllEntry(dev);
+ {
+ u8 SECR_value = 0x0;
+ SECR_value |= SCR_TxEncEnable;
+ SECR_value |= SCR_RxDecEnable;
+ SECR_value |= SCR_NoSKMC;
+ write_nic_byte(dev, SECR, SECR_value);
+ }
+ write_nic_word(dev, ATIMWND, 2);
+ write_nic_word(dev, BCN_INTERVAL, 100);
+ {
+ int i;
+ for (i = 0; i < QOS_QUEUE_NUM; i++)
+ write_nic_dword(dev, WDCAPARA_ADD[i], 0x005e4332);
+ }
+ write_nic_byte(dev, 0xbe, 0xc0);
+
+ rtl8192_phy_configmac(dev);
+
+ if (priv->card_8192_version > (u8) VERSION_8190_BD) {
+ rtl8192_phy_getTxPower(dev);
+ rtl8192_phy_setTxPower(dev, priv->chan);
+ }
+
+ tmpvalue = read_nic_byte(dev, IC_VERRSION);
+ priv->IC_Cut = tmpvalue;
+ RT_TRACE(COMP_INIT, "priv->IC_Cut= 0x%x\n", priv->IC_Cut);
+ if (priv->IC_Cut >= IC_VersionCut_D) {
+ if (priv->IC_Cut == IC_VersionCut_D) {
+ /* no matter what checkpatch says, braces are needed */
+ RT_TRACE(COMP_INIT, "D-cut\n");
+ } else if (priv->IC_Cut == IC_VersionCut_E) {
+ RT_TRACE(COMP_INIT, "E-cut\n");
+ }
+ } else {
+ RT_TRACE(COMP_INIT, "Before C-cut\n");
+ }
+
+ RT_TRACE(COMP_INIT, "Load Firmware!\n");
+ bfirmwareok = init_firmware(dev);
+ if (!bfirmwareok) {
+ if (retry_times < 10) {
+ retry_times++;
+ goto start;
+ } else {
+ rtStatus = false;
+ goto end;
+ }
+ }
+ RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+ RT_TRACE(COMP_INIT, "RF Config Started!\n");
+ rtStatus = rtl8192_phy_RFConfig(dev);
+ if (rtStatus != true) {
+ RT_TRACE(COMP_ERR, "RF Config failed\n");
+ return rtStatus;
+ }
+ RT_TRACE(COMP_INIT, "RF Config Finished!\n");
+ }
+ rtl8192_phy_updateInitGain(dev);
+
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
+
+ write_nic_byte(dev, 0x87, 0x0);
+
+ if (priv->RegRfOff == true) {
+ RT_TRACE((COMP_INIT | COMP_RF | COMP_POWER),
+ "%s(): Turn off RF for RegRfOff ----------\n",
+ __func__);
+ MgntActSet_RF_State(dev, eRfOff, RF_CHANGE_BY_SW, true);
+ } else if (priv->rtllib->RfOffReason > RF_CHANGE_BY_PS) {
+ RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): Turn off RF for"
+ " RfOffReason(%d) ----------\n", __func__,
+ priv->rtllib->RfOffReason);
+ MgntActSet_RF_State(dev, eRfOff, priv->rtllib->RfOffReason,
+ true);
+ } else if (priv->rtllib->RfOffReason >= RF_CHANGE_BY_IPS) {
+ RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): Turn off RF for"
+ " RfOffReason(%d) ----------\n", __func__,
+ priv->rtllib->RfOffReason);
+ MgntActSet_RF_State(dev, eRfOff, priv->rtllib->RfOffReason,
+ true);
+ } else {
+ RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON\n",
+ __func__);
+ priv->rtllib->eRFPowerState = eRfOn;
+ priv->rtllib->RfOffReason = 0;
+ }
+
+ if (priv->rtllib->FwRWRF)
+ priv->Rf_Mode = RF_OP_By_FW;
+ else
+ priv->Rf_Mode = RF_OP_By_SW_3wire;
+
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+ dm_initialize_txpower_tracking(dev);
+
+ if (priv->IC_Cut >= IC_VersionCut_D) {
+ tmpRegA = rtl8192_QueryBBReg(dev,
+ rOFDM0_XATxIQImbalance, bMaskDWord);
+ tmpRegC = rtl8192_QueryBBReg(dev,
+ rOFDM0_XCTxIQImbalance, bMaskDWord);
+ for (i = 0; i < TxBBGainTableLength; i++) {
+ if (tmpRegA ==
+ priv->txbbgain_table[i].txbbgain_value) {
+ priv->rfa_txpowertrackingindex = (u8)i;
+ priv->rfa_txpowertrackingindex_real =
+ (u8)i;
+ priv->rfa_txpowertracking_default =
+ priv->rfa_txpowertrackingindex;
+ break;
+ }
+ }
+
+ TempCCk = rtl8192_QueryBBReg(dev,
+ rCCK0_TxFilter1, bMaskByte2);
+
+ for (i = 0; i < CCKTxBBGainTableLength; i++) {
+ if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
+ priv->CCKPresentAttentuation_20Mdefault = (u8)i;
+ break;
+ }
+ }
+ priv->CCKPresentAttentuation_40Mdefault = 0;
+ priv->CCKPresentAttentuation_difference = 0;
+ priv->CCKPresentAttentuation =
+ priv->CCKPresentAttentuation_20Mdefault;
+ RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpower"
+ "trackingindex_initial = %d\n",
+ priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpower"
+ "trackingindex_real__initial = %d\n",
+ priv->rfa_txpowertrackingindex_real);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresent"
+ "Attentuation_difference_initial = %d\n",
+ priv->CCKPresentAttentuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresent"
+ "Attentuation_initial = %d\n",
+ priv->CCKPresentAttentuation);
+ priv->btxpower_tracking = false;
+ }
+ }
+ rtl8192_irq_enable(dev);
+end:
+ priv->being_init_adapter = false;
+ return rtStatus;
+}
+
+static void rtl8192_net_update(struct net_device *dev)
+{
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_network *net;
+ u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
+ u16 rate_config = 0;
+
+ net = &priv->rtllib->current_network;
+ rtl8192_config_rate(dev, &rate_config);
+ priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->basic_rate = rate_config &= 0x15f;
+ write_nic_dword(dev, BSSIDR, ((u32 *)net->bssid)[0]);
+ write_nic_word(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ write_nic_word(dev, ATIMWND, 2);
+ write_nic_word(dev, BCN_DMATIME, 256);
+ write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
+ write_nic_word(dev, BCN_DRV_EARLY_INT, 10);
+ write_nic_byte(dev, BCN_ERR_THRESH, 100);
+
+ BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
+ BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
+
+ write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
+ }
+}
+
+void rtl8192_link_change(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ if (!priv->up)
+ return;
+
+ if (ieee->state == RTLLIB_LINKED) {
+ rtl8192_net_update(dev);
+ priv->ops->update_ratr_table(dev);
+ if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) ||
+ (KEY_TYPE_WEP104 == ieee->pairwise_key_type))
+ EnableHWSecurityConfig8192(dev);
+ } else {
+ write_nic_byte(dev, 0x173, 0);
+ }
+ rtl8192e_update_msr(dev);
+
+ if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
+ u32 reg = 0;
+ reg = read_nic_dword(dev, RCR);
+ if (priv->rtllib->state == RTLLIB_LINKED) {
+ if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn)
+ ;
+ else
+ priv->ReceiveConfig = reg |= RCR_CBSSID;
+ } else
+ priv->ReceiveConfig = reg &= ~RCR_CBSSID;
+
+ write_nic_dword(dev, RCR, reg);
+ }
+}
+
+void rtl8192_AllowAllDestAddr(struct net_device *dev,
+ bool bAllowAllDA, bool WriteIntoReg)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (bAllowAllDA)
+ priv->ReceiveConfig |= RCR_AAP;
+ else
+ priv->ReceiveConfig &= ~RCR_AAP;
+
+ if (WriteIntoReg)
+ write_nic_dword(dev, RCR, priv->ReceiveConfig);
+}
+
+static u8 MRateToHwRate8190Pci(u8 rate)
+{
+ u8 ret = DESC90_RATE1M;
+
+ switch (rate) {
+ case MGN_1M:
+ ret = DESC90_RATE1M;
+ break;
+ case MGN_2M:
+ ret = DESC90_RATE2M;
+ break;
+ case MGN_5_5M:
+ ret = DESC90_RATE5_5M;
+ break;
+ case MGN_11M:
+ ret = DESC90_RATE11M;
+ break;
+ case MGN_6M:
+ ret = DESC90_RATE6M;
+ break;
+ case MGN_9M:
+ ret = DESC90_RATE9M;
+ break;
+ case MGN_12M:
+ ret = DESC90_RATE12M;
+ break;
+ case MGN_18M:
+ ret = DESC90_RATE18M;
+ break;
+ case MGN_24M:
+ ret = DESC90_RATE24M;
+ break;
+ case MGN_36M:
+ ret = DESC90_RATE36M;
+ break;
+ case MGN_48M:
+ ret = DESC90_RATE48M;
+ break;
+ case MGN_54M:
+ ret = DESC90_RATE54M;
+ break;
+ case MGN_MCS0:
+ ret = DESC90_RATEMCS0;
+ break;
+ case MGN_MCS1:
+ ret = DESC90_RATEMCS1;
+ break;
+ case MGN_MCS2:
+ ret = DESC90_RATEMCS2;
+ break;
+ case MGN_MCS3:
+ ret = DESC90_RATEMCS3;
+ break;
+ case MGN_MCS4:
+ ret = DESC90_RATEMCS4;
+ break;
+ case MGN_MCS5:
+ ret = DESC90_RATEMCS5;
+ break;
+ case MGN_MCS6:
+ ret = DESC90_RATEMCS6;
+ break;
+ case MGN_MCS7:
+ ret = DESC90_RATEMCS7;
+ break;
+ case MGN_MCS8:
+ ret = DESC90_RATEMCS8;
+ break;
+ case MGN_MCS9:
+ ret = DESC90_RATEMCS9;
+ break;
+ case MGN_MCS10:
+ ret = DESC90_RATEMCS10;
+ break;
+ case MGN_MCS11:
+ ret = DESC90_RATEMCS11;
+ break;
+ case MGN_MCS12:
+ ret = DESC90_RATEMCS12;
+ break;
+ case MGN_MCS13:
+ ret = DESC90_RATEMCS13;
+ break;
+ case MGN_MCS14:
+ ret = DESC90_RATEMCS14;
+ break;
+ case MGN_MCS15:
+ ret = DESC90_RATEMCS15;
+ break;
+ case (0x80|0x20):
+ ret = DESC90_RATEMCS32;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static u8 rtl8192_MapHwQueueToFirmwareQueue(u8 QueueID, u8 priority)
+{
+ u8 QueueSelect = 0x0;
+
+ switch (QueueID) {
+ case BE_QUEUE:
+ QueueSelect = QSLT_BE;
+ break;
+
+ case BK_QUEUE:
+ QueueSelect = QSLT_BK;
+ break;
+
+ case VO_QUEUE:
+ QueueSelect = QSLT_VO;
+ break;
+
+ case VI_QUEUE:
+ QueueSelect = QSLT_VI;
+ break;
+ case MGNT_QUEUE:
+ QueueSelect = QSLT_MGNT;
+ break;
+ case BEACON_QUEUE:
+ QueueSelect = QSLT_BEACON;
+ break;
+ case TXCMD_QUEUE:
+ QueueSelect = QSLT_CMD;
+ break;
+ case HIGH_QUEUE:
+ QueueSelect = QSLT_HIGH;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "TransmitTCB(): Impossible Queue Selection:"
+ " %d\n", QueueID);
+ break;
+ }
+ return QueueSelect;
+}
+
+void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
+ struct cb_desc *cb_desc, struct sk_buff *skb)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
+ pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
+ memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
+ pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
+ pTxFwInfo->TxRate = MRateToHwRate8190Pci((u8)cb_desc->data_rate);
+ pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
+ pTxFwInfo->Short = rtl8192_QueryIsShort(pTxFwInfo->TxHT,
+ pTxFwInfo->TxRate,
+ cb_desc);
+
+ if (cb_desc->bAMPDUEnable) {
+ pTxFwInfo->AllowAggregation = 1;
+ pTxFwInfo->RxMF = cb_desc->ampdu_factor;
+ pTxFwInfo->RxAMD = cb_desc->ampdu_density;
+ } else {
+ pTxFwInfo->AllowAggregation = 0;
+ pTxFwInfo->RxMF = 0;
+ pTxFwInfo->RxAMD = 0;
+ }
+
+ pTxFwInfo->RtsEnable = (cb_desc->bRTSEnable) ? 1 : 0;
+ pTxFwInfo->CtsEnable = (cb_desc->bCTSEnable) ? 1 : 0;
+ pTxFwInfo->RtsSTBC = (cb_desc->bRTSSTBC) ? 1 : 0;
+ pTxFwInfo->RtsHT = (cb_desc->rts_rate&0x80) ? 1 : 0;
+ pTxFwInfo->RtsRate = MRateToHwRate8190Pci((u8)cb_desc->rts_rate);
+ pTxFwInfo->RtsBandwidth = 0;
+ pTxFwInfo->RtsSubcarrier = cb_desc->RTSSC;
+ pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ?
+ (cb_desc->bRTSUseShortPreamble ? 1 : 0) :
+ (cb_desc->bRTSUseShortGI ? 1 : 0);
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
+ if (cb_desc->bPacketBW) {
+ pTxFwInfo->TxBandwidth = 1;
+ pTxFwInfo->TxSubCarrier = 0;
+ } else {
+ pTxFwInfo->TxBandwidth = 0;
+ pTxFwInfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
+ }
+ } else {
+ pTxFwInfo->TxBandwidth = 0;
+ pTxFwInfo->TxSubCarrier = 0;
+ }
+
+ memset((u8 *)pdesc, 0, 12);
+ pdesc->LINIP = 0;
+ pdesc->CmdInit = 1;
+ pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
+ pdesc->PktSize = (u16)skb->len-sizeof(struct tx_fwinfo_8190pci);
+
+ pdesc->SecCAMID = 0;
+ pdesc->RATid = cb_desc->RATRIndex;
+
+
+ pdesc->NoEnc = 1;
+ pdesc->SecType = 0x0;
+ if (cb_desc->bHwSec) {
+ static u8 tmp;
+ if (!tmp) {
+ RT_TRACE(COMP_DBG, "==>================hw sec\n");
+ tmp = 1;
+ }
+ switch (priv->rtllib->pairwise_key_type) {
+ case KEY_TYPE_WEP40:
+ case KEY_TYPE_WEP104:
+ pdesc->SecType = 0x1;
+ pdesc->NoEnc = 0;
+ break;
+ case KEY_TYPE_TKIP:
+ pdesc->SecType = 0x2;
+ pdesc->NoEnc = 0;
+ break;
+ case KEY_TYPE_CCMP:
+ pdesc->SecType = 0x3;
+ pdesc->NoEnc = 0;
+ break;
+ case KEY_TYPE_NA:
+ pdesc->SecType = 0x0;
+ pdesc->NoEnc = 1;
+ break;
+ }
+ }
+
+ pdesc->PktId = 0x0;
+
+ pdesc->QueueSelect = rtl8192_MapHwQueueToFirmwareQueue(
+ cb_desc->queue_index,
+ cb_desc->priority);
+ pdesc->TxFWInfoSize = sizeof(struct tx_fwinfo_8190pci);
+
+ pdesc->DISFB = cb_desc->bTxDisableRateFallBack;
+ pdesc->USERATE = cb_desc->bTxUseDriverAssingedRate;
+
+ pdesc->FirstSeg = 1;
+ pdesc->LastSeg = 1;
+ pdesc->TxBufferSize = skb->len;
+
+ pdesc->TxBuffAddr = cpu_to_le32(mapping);
+}
+
+void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
+ struct tx_desc_cmd *entry,
+ struct cb_desc *cb_desc, struct sk_buff* skb)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ memset(entry, 0, 12);
+ entry->LINIP = cb_desc->bLastIniPkt;
+ entry->FirstSeg = 1;
+ entry->LastSeg = 1;
+ if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
+ entry->CmdInit = DESC_PACKET_TYPE_INIT;
+ } else {
+ struct tx_desc * entry_tmp = (struct tx_desc *)entry;
+ entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
+ entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
+ entry_tmp->PktSize = (u16)(cb_desc->pkt_size +
+ entry_tmp->Offset);
+ entry_tmp->QueueSelect = QSLT_CMD;
+ entry_tmp->TxFWInfoSize = 0x08;
+ entry_tmp->RATid = (u8)DESC_PACKET_TYPE_INIT;
+ }
+ entry->TxBufferSize = skb->len;
+ entry->TxBuffAddr = cpu_to_le32(mapping);
+ entry->OWN = 1;
+}
+
+static u8 HwRateToMRate90(bool bIsHT, u8 rate)
+{
+ u8 ret_rate = 0x02;
+
+ if (!bIsHT) {
+ switch (rate) {
+ case DESC90_RATE1M:
+ ret_rate = MGN_1M;
+ break;
+ case DESC90_RATE2M:
+ ret_rate = MGN_2M;
+ break;
+ case DESC90_RATE5_5M:
+ ret_rate = MGN_5_5M;
+ break;
+ case DESC90_RATE11M:
+ ret_rate = MGN_11M;
+ break;
+ case DESC90_RATE6M:
+ ret_rate = MGN_6M;
+ break;
+ case DESC90_RATE9M:
+ ret_rate = MGN_9M;
+ break;
+ case DESC90_RATE12M:
+ ret_rate = MGN_12M;
+ break;
+ case DESC90_RATE18M:
+ ret_rate = MGN_18M;
+ break;
+ case DESC90_RATE24M:
+ ret_rate = MGN_24M;
+ break;
+ case DESC90_RATE36M:
+ ret_rate = MGN_36M;
+ break;
+ case DESC90_RATE48M:
+ ret_rate = MGN_48M;
+ break;
+ case DESC90_RATE54M:
+ ret_rate = MGN_54M;
+ break;
+
+ default:
+ RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported"
+ "Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
+ break;
+ }
+
+ } else {
+ switch (rate) {
+ case DESC90_RATEMCS0:
+ ret_rate = MGN_MCS0;
+ break;
+ case DESC90_RATEMCS1:
+ ret_rate = MGN_MCS1;
+ break;
+ case DESC90_RATEMCS2:
+ ret_rate = MGN_MCS2;
+ break;
+ case DESC90_RATEMCS3:
+ ret_rate = MGN_MCS3;
+ break;
+ case DESC90_RATEMCS4:
+ ret_rate = MGN_MCS4;
+ break;
+ case DESC90_RATEMCS5:
+ ret_rate = MGN_MCS5;
+ break;
+ case DESC90_RATEMCS6:
+ ret_rate = MGN_MCS6;
+ break;
+ case DESC90_RATEMCS7:
+ ret_rate = MGN_MCS7;
+ break;
+ case DESC90_RATEMCS8:
+ ret_rate = MGN_MCS8;
+ break;
+ case DESC90_RATEMCS9:
+ ret_rate = MGN_MCS9;
+ break;
+ case DESC90_RATEMCS10:
+ ret_rate = MGN_MCS10;
+ break;
+ case DESC90_RATEMCS11:
+ ret_rate = MGN_MCS11;
+ break;
+ case DESC90_RATEMCS12:
+ ret_rate = MGN_MCS12;
+ break;
+ case DESC90_RATEMCS13:
+ ret_rate = MGN_MCS13;
+ break;
+ case DESC90_RATEMCS14:
+ ret_rate = MGN_MCS14;
+ break;
+ case DESC90_RATEMCS15:
+ ret_rate = MGN_MCS15;
+ break;
+ case DESC90_RATEMCS32:
+ ret_rate = (0x80|0x20);
+ break;
+
+ default:
+ RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported "
+ "Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
+ break;
+ }
+ }
+
+ return ret_rate;
+}
+
+static long rtl8192_signal_scale_mapping(struct r8192_priv *priv, long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+
+ return retsig;
+}
+
+
+#define rx_hal_is_cck_rate(_pdrvinfo)\
+ ((_pdrvinfo->RxRate == DESC90_RATE1M ||\
+ _pdrvinfo->RxRate == DESC90_RATE2M ||\
+ _pdrvinfo->RxRate == DESC90_RATE5_5M ||\
+ _pdrvinfo->RxRate == DESC90_RATE11M) &&\
+ !_pdrvinfo->RxHT)
+
+static void rtl8192_query_rxphystatus(
+ struct r8192_priv *priv,
+ struct rtllib_rx_stats *pstats,
+ struct rx_desc *pdesc,
+ struct rx_fwinfo *pdrvinfo,
+ struct rtllib_rx_stats *precord_stats,
+ bool bpacket_match_bssid,
+ bool bpacket_toself,
+ bool bPacketBeacon,
+ bool bToSelfBA
+ )
+{
+ struct phy_sts_ofdm_819xpci *pofdm_buf;
+ struct phy_sts_cck_819xpci *pcck_buf;
+ struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
+ u8 *prxpkt;
+ u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
+ char rx_pwr[4], rx_pwr_all = 0;
+ char rx_snrX, rx_evmX;
+ u8 evm, pwdb_all;
+ u32 RSSI, total_rssi = 0;
+ u8 is_cck_rate = 0;
+ u8 rf_rx_num = 0;
+ static u8 check_reg824;
+ static u32 reg824_bit9;
+
+ priv->stats.numqry_phystatus++;
+
+ is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
+ memset(precord_stats, 0, sizeof(struct rtllib_rx_stats));
+ pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID =
+ bpacket_match_bssid;
+ pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
+ pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;
+ pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
+ pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
+ if (check_reg824 == 0) {
+ reg824_bit9 = rtl8192_QueryBBReg(priv->rtllib->dev,
+ rFPGA0_XA_HSSIParameter2, 0x200);
+ check_reg824 = 1;
+ }
+
+
+ prxpkt = (u8 *)pdrvinfo;
+
+ prxpkt += sizeof(struct rx_fwinfo);
+
+ pcck_buf = (struct phy_sts_cck_819xpci *)prxpkt;
+ pofdm_buf = (struct phy_sts_ofdm_819xpci *)prxpkt;
+
+ pstats->RxMIMOSignalQuality[0] = -1;
+ pstats->RxMIMOSignalQuality[1] = -1;
+ precord_stats->RxMIMOSignalQuality[0] = -1;
+ precord_stats->RxMIMOSignalQuality[1] = -1;
+
+ if (is_cck_rate) {
+ u8 report;
+
+ priv->stats.numqry_phystatusCCK++;
+ if (!reg824_bit9) {
+ report = pcck_buf->cck_agc_rpt & 0xc0;
+ report = report>>6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt &
+ 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt &
+ 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt &
+ 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 8 - (pcck_buf->cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ report = pcck_buf->cck_agc_rpt & 0x60;
+ report = report>>5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -35 -
+ ((pcck_buf->cck_agc_rpt &
+ 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -23 -
+ ((pcck_buf->cck_agc_rpt &
+ 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -11 -
+ ((pcck_buf->cck_agc_rpt &
+ 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = -8 -
+ ((pcck_buf->cck_agc_rpt &
+ 0x1f) << 1);
+ break;
+ }
+ }
+
+ pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
+ pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
+ pstats->RecvSignalPower = rx_pwr_all;
+
+ if (bpacket_match_bssid) {
+ u8 sq;
+
+ if (pstats->RxPWDBAll > 40) {
+ sq = 100;
+ } else {
+ sq = pcck_buf->sq_rpt;
+
+ if (pcck_buf->sq_rpt > 64)
+ sq = 0;
+ else if (pcck_buf->sq_rpt < 20)
+ sq = 100;
+ else
+ sq = ((64-sq) * 100) / 44;
+ }
+ pstats->SignalQuality = sq;
+ precord_stats->SignalQuality = sq;
+ pstats->RxMIMOSignalQuality[0] = sq;
+ precord_stats->RxMIMOSignalQuality[0] = sq;
+ pstats->RxMIMOSignalQuality[1] = -1;
+ precord_stats->RxMIMOSignalQuality[1] = -1;
+ }
+ } else {
+ priv->stats.numqry_phystatusHT++;
+ for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
+ if (priv->brfpath_rxenable[i])
+ rf_rx_num++;
+
+ rx_pwr[i] = ((pofdm_buf->trsw_gain_X[i] & 0x3F) *
+ 2) - 110;
+
+ tmp_rxsnr = pofdm_buf->rxsnr_X[i];
+ rx_snrX = (char)(tmp_rxsnr);
+ rx_snrX /= 2;
+ priv->stats.rxSNRdB[i] = (long)rx_snrX;
+
+ RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
+ if (priv->brfpath_rxenable[i])
+ total_rssi += RSSI;
+
+ if (bpacket_match_bssid) {
+ pstats->RxMIMOSignalStrength[i] = (u8) RSSI;
+ precord_stats->RxMIMOSignalStrength[i] =
+ (u8) RSSI;
+ }
+ }
+
+
+ rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
+ pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
+
+ pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
+ pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
+ pstats->RecvSignalPower = rx_pwr_all;
+ if (pdrvinfo->RxHT && pdrvinfo->RxRate >= DESC90_RATEMCS8 &&
+ pdrvinfo->RxRate <= DESC90_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+
+ for (i = 0; i < max_spatial_stream; i++) {
+ tmp_rxevm = pofdm_buf->rxevm_X[i];
+ rx_evmX = (char)(tmp_rxevm);
+
+ rx_evmX /= 2;
+
+ evm = rtl819x_evm_dbtopercentage(rx_evmX);
+ if (bpacket_match_bssid) {
+ if (i == 0) {
+ pstats->SignalQuality = (u8)(evm &
+ 0xff);
+ precord_stats->SignalQuality = (u8)(evm
+ & 0xff);
+ }
+ pstats->RxMIMOSignalQuality[i] = (u8)(evm &
+ 0xff);
+ precord_stats->RxMIMOSignalQuality[i] = (u8)(evm
+ & 0xff);
+ }
+ }
+
+
+ rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
+ prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
+ &rxsc_sgien_exflg;
+ if (pdrvinfo->BW)
+ priv->stats.received_bwtype[1+prxsc->rxsc]++;
+ else
+ priv->stats.received_bwtype[0]++;
+ }
+
+ if (is_cck_rate) {
+ pstats->SignalStrength = precord_stats->SignalStrength =
+ (u8)(rtl8192_signal_scale_mapping(priv,
+ (long)pwdb_all));
+
+ } else {
+ if (rf_rx_num != 0)
+ pstats->SignalStrength = precord_stats->SignalStrength =
+ (u8)(rtl8192_signal_scale_mapping(priv,
+ (long)(total_rssi /= rf_rx_num)));
+ }
+}
+
+static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
+ struct rtllib_rx_stats *prev_st,
+ struct rtllib_rx_stats *curr_st)
+{
+ bool bcheck = false;
+ u8 rfpath;
+ u32 ij, tmp_val;
+ static u32 slide_rssi_index, slide_rssi_statistics;
+ static u32 slide_evm_index, slide_evm_statistics;
+ static u32 last_rssi, last_evm;
+ static u32 slide_beacon_adc_pwdb_index;
+ static u32 slide_beacon_adc_pwdb_statistics;
+ static u32 last_beacon_adc_pwdb;
+ struct rtllib_hdr_3addr *hdr;
+ u16 sc;
+ unsigned int frag, seq;
+
+ hdr = (struct rtllib_hdr_3addr *)buffer;
+ sc = le16_to_cpu(hdr->seq_ctl);
+ frag = WLAN_GET_SEQ_FRAG(sc);
+ seq = WLAN_GET_SEQ_SEQ(sc);
+ curr_st->Seq_Num = seq;
+ if (!prev_st->bIsAMPDU)
+ bcheck = true;
+
+ if (slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
+ slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
+ priv->stats.slide_rssi_total -= last_rssi;
+ }
+ priv->stats.slide_rssi_total += prev_st->SignalStrength;
+
+ priv->stats.slide_signal_strength[slide_rssi_index++] =
+ prev_st->SignalStrength;
+ if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
+ slide_rssi_index = 0;
+
+ tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
+ priv->stats.signal_strength = rtl819x_translate_todbm(priv,
+ (u8)tmp_val);
+ curr_st->rssi = priv->stats.signal_strength;
+ if (!prev_st->bPacketMatchBSSID) {
+ if (!prev_st->bToSelfBA)
+ return;
+ }
+
+ if (!bcheck)
+ return;
+
+ rtl819x_process_cck_rxpathsel(priv, prev_st);
+
+ priv->stats.num_process_phyinfo++;
+ if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
+ for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++) {
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->rtllib->dev,
+ rfpath))
+ continue;
+ RT_TRACE(COMP_DBG, "Jacken -> pPreviousstats->RxMIMO"
+ "SignalStrength[rfpath] = %d\n",
+ prev_st->RxMIMOSignalStrength[rfpath]);
+ if (priv->stats.rx_rssi_percentage[rfpath] == 0) {
+ priv->stats.rx_rssi_percentage[rfpath] =
+ prev_st->RxMIMOSignalStrength[rfpath];
+ }
+ if (prev_st->RxMIMOSignalStrength[rfpath] >
+ priv->stats.rx_rssi_percentage[rfpath]) {
+ priv->stats.rx_rssi_percentage[rfpath] =
+ ((priv->stats.rx_rssi_percentage[rfpath]
+ * (RX_SMOOTH - 1)) +
+ (prev_st->RxMIMOSignalStrength
+ [rfpath])) / (RX_SMOOTH);
+ priv->stats.rx_rssi_percentage[rfpath] =
+ priv->stats.rx_rssi_percentage[rfpath]
+ + 1;
+ } else {
+ priv->stats.rx_rssi_percentage[rfpath] =
+ ((priv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH-1)) +
+ (prev_st->RxMIMOSignalStrength[rfpath])) /
+ (RX_SMOOTH);
+ }
+ RT_TRACE(COMP_DBG, "Jacken -> priv->RxStats.RxRSSI"
+ "Percentage[rfPath] = %d\n",
+ priv->stats.rx_rssi_percentage[rfpath]);
+ }
+ }
+
+
+ if (prev_st->bPacketBeacon) {
+ if (slide_beacon_adc_pwdb_statistics++ >=
+ PHY_Beacon_RSSI_SLID_WIN_MAX) {
+ slide_beacon_adc_pwdb_statistics =
+ PHY_Beacon_RSSI_SLID_WIN_MAX;
+ last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb
+ [slide_beacon_adc_pwdb_index];
+ priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
+ }
+ priv->stats.Slide_Beacon_Total += prev_st->RxPWDBAll;
+ priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] =
+ prev_st->RxPWDBAll;
+ slide_beacon_adc_pwdb_index++;
+ if (slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
+ slide_beacon_adc_pwdb_index = 0;
+ prev_st->RxPWDBAll = priv->stats.Slide_Beacon_Total /
+ slide_beacon_adc_pwdb_statistics;
+ if (prev_st->RxPWDBAll >= 3)
+ prev_st->RxPWDBAll -= 3;
+ }
+
+ RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
+ prev_st->bIsCCK ? "CCK" : "OFDM",
+ prev_st->RxPWDBAll);
+
+ if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
+ prev_st->bToSelfBA) {
+ if (priv->undecorated_smoothed_pwdb < 0)
+ priv->undecorated_smoothed_pwdb = prev_st->RxPWDBAll;
+ if (prev_st->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb) {
+ priv->undecorated_smoothed_pwdb =
+ (((priv->undecorated_smoothed_pwdb) *
+ (RX_SMOOTH-1)) +
+ (prev_st->RxPWDBAll)) / (RX_SMOOTH);
+ priv->undecorated_smoothed_pwdb =
+ priv->undecorated_smoothed_pwdb + 1;
+ } else {
+ priv->undecorated_smoothed_pwdb =
+ (((priv->undecorated_smoothed_pwdb) *
+ (RX_SMOOTH-1)) +
+ (prev_st->RxPWDBAll)) / (RX_SMOOTH);
+ }
+ rtl819x_update_rxsignalstatistics8190pci(priv, prev_st);
+ }
+
+ if (prev_st->SignalQuality != 0) {
+ if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
+ prev_st->bToSelfBA) {
+ if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
+ slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
+ last_evm =
+ priv->stats.slide_evm[slide_evm_index];
+ priv->stats.slide_evm_total -= last_evm;
+ }
+
+ priv->stats.slide_evm_total += prev_st->SignalQuality;
+
+ priv->stats.slide_evm[slide_evm_index++] =
+ prev_st->SignalQuality;
+ if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
+ slide_evm_index = 0;
+
+ tmp_val = priv->stats.slide_evm_total /
+ slide_evm_statistics;
+ priv->stats.signal_quality = tmp_val;
+ priv->stats.last_signal_strength_inpercent = tmp_val;
+ }
+
+ if (prev_st->bPacketToSelf ||
+ prev_st->bPacketBeacon ||
+ prev_st->bToSelfBA) {
+ for (ij = 0; ij < 2; ij++) {
+ if (prev_st->RxMIMOSignalQuality[ij] != -1) {
+ if (priv->stats.rx_evm_percentage[ij] == 0)
+ priv->stats.rx_evm_percentage[ij] =
+ prev_st->RxMIMOSignalQuality[ij];
+ priv->stats.rx_evm_percentage[ij] =
+ ((priv->stats.rx_evm_percentage[ij] *
+ (RX_SMOOTH - 1)) +
+ (prev_st->RxMIMOSignalQuality[ij])) /
+ (RX_SMOOTH);
+ }
+ }
+ }
+ }
+}
+
+static void rtl8192_TranslateRxSignalStuff(struct net_device *dev,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *pstats,
+ struct rx_desc *pdesc,
+ struct rx_fwinfo *pdrvinfo)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ bool bpacket_match_bssid, bpacket_toself;
+ bool bPacketBeacon = false;
+ struct rtllib_hdr_3addr *hdr;
+ bool bToSelfBA = false;
+ static struct rtllib_rx_stats previous_stats;
+ u16 fc, type;
+ u8 *tmp_buf;
+ u8 *praddr;
+
+ tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift;
+
+ hdr = (struct rtllib_hdr_3addr *)tmp_buf;
+ fc = le16_to_cpu(hdr->frame_ctl);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+
+ bpacket_match_bssid = ((RTLLIB_FTYPE_CTL != type) &&
+ (!compare_ether_addr(priv->rtllib->
+ current_network.bssid,
+ (fc & RTLLIB_FCTL_TODS) ? hdr->addr1 :
+ (fc & RTLLIB_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
+ && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
+ bpacket_toself = bpacket_match_bssid && /* check this */
+ (!compare_ether_addr(praddr,
+ priv->rtllib->dev->dev_addr));
+ if (WLAN_FC_GET_FRAMETYPE(fc) == RTLLIB_STYPE_BEACON)
+ bPacketBeacon = true;
+ if (bpacket_match_bssid)
+ priv->stats.numpacket_matchbssid++;
+ if (bpacket_toself)
+ priv->stats.numpacket_toself++;
+ rtl8192_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
+ rtl8192_query_rxphystatus(priv, pstats, pdesc, pdrvinfo,
+ &previous_stats, bpacket_match_bssid,
+ bpacket_toself, bPacketBeacon, bToSelfBA);
+ rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
+}
+
+static void rtl8192_UpdateReceivedRateHistogramStatistics(
+ struct net_device *dev,
+ struct rtllib_rx_stats *pstats)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ u32 rcvType = 1;
+ u32 rateIndex;
+ u32 preamble_guardinterval;
+
+ if (pstats->bCRC)
+ rcvType = 2;
+ else if (pstats->bICV)
+ rcvType = 3;
+
+ if (pstats->bShortPreamble)
+ preamble_guardinterval = 1;
+ else
+ preamble_guardinterval = 0;
+
+ switch (pstats->rate) {
+ case MGN_1M:
+ rateIndex = 0;
+ break;
+ case MGN_2M:
+ rateIndex = 1;
+ break;
+ case MGN_5_5M:
+ rateIndex = 2;
+ break;
+ case MGN_11M:
+ rateIndex = 3;
+ break;
+ case MGN_6M:
+ rateIndex = 4;
+ break;
+ case MGN_9M:
+ rateIndex = 5;
+ break;
+ case MGN_12M:
+ rateIndex = 6;
+ break;
+ case MGN_18M:
+ rateIndex = 7;
+ break;
+ case MGN_24M:
+ rateIndex = 8;
+ break;
+ case MGN_36M:
+ rateIndex = 9;
+ break;
+ case MGN_48M:
+ rateIndex = 10;
+ break;
+ case MGN_54M:
+ rateIndex = 11;
+ break;
+ case MGN_MCS0:
+ rateIndex = 12;
+ break;
+ case MGN_MCS1:
+ rateIndex = 13;
+ break;
+ case MGN_MCS2:
+ rateIndex = 14;
+ break;
+ case MGN_MCS3:
+ rateIndex = 15;
+ break;
+ case MGN_MCS4:
+ rateIndex = 16;
+ break;
+ case MGN_MCS5:
+ rateIndex = 17;
+ break;
+ case MGN_MCS6:
+ rateIndex = 18;
+ break;
+ case MGN_MCS7:
+ rateIndex = 19;
+ break;
+ case MGN_MCS8:
+ rateIndex = 20;
+ break;
+ case MGN_MCS9:
+ rateIndex = 21;
+ break;
+ case MGN_MCS10:
+ rateIndex = 22;
+ break;
+ case MGN_MCS11:
+ rateIndex = 23;
+ break;
+ case MGN_MCS12:
+ rateIndex = 24;
+ break;
+ case MGN_MCS13:
+ rateIndex = 25;
+ break;
+ case MGN_MCS14:
+ rateIndex = 26;
+ break;
+ case MGN_MCS15:
+ rateIndex = 27;
+ break;
+ default:
+ rateIndex = 28;
+ break;
+ }
+ priv->stats.received_preamble_GI[preamble_guardinterval][rateIndex]++;
+ priv->stats.received_rate_histogram[0][rateIndex]++;
+ priv->stats.received_rate_histogram[rcvType][rateIndex]++;
+}
+
+bool rtl8192_rx_query_status_desc(struct net_device *dev,
+ struct rtllib_rx_stats *stats,
+ struct rx_desc *pdesc,
+ struct sk_buff *skb)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ stats->bICV = pdesc->ICV;
+ stats->bCRC = pdesc->CRC32;
+ stats->bHwError = pdesc->CRC32 | pdesc->ICV;
+
+ stats->Length = pdesc->Length;
+ if (stats->Length < 24)
+ stats->bHwError |= 1;
+
+ if (stats->bHwError) {
+ stats->bShift = false;
+
+ if (pdesc->CRC32) {
+ if (pdesc->Length < 500)
+ priv->stats.rxcrcerrmin++;
+ else if (pdesc->Length > 1000)
+ priv->stats.rxcrcerrmax++;
+ else
+ priv->stats.rxcrcerrmid++;
+ }
+ return false;
+ } else {
+ struct rx_fwinfo *pDrvInfo = NULL;
+ stats->RxDrvInfoSize = pdesc->RxDrvInfoSize;
+ stats->RxBufShift = ((pdesc->Shift)&0x03);
+ stats->Decrypted = !pdesc->SWDec;
+
+ pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
+
+ stats->rate = HwRateToMRate90((bool)pDrvInfo->RxHT,
+ (u8)pDrvInfo->RxRate);
+ stats->bShortPreamble = pDrvInfo->SPLCP;
+
+ rtl8192_UpdateReceivedRateHistogramStatistics(dev, stats);
+
+ stats->bIsAMPDU = (pDrvInfo->PartAggr == 1);
+ stats->bFirstMPDU = (pDrvInfo->PartAggr == 1) &&
+ (pDrvInfo->FirstAGGR == 1);
+
+ stats->TimeStampLow = pDrvInfo->TSFL;
+ stats->TimeStampHigh = read_nic_dword(dev, TSFR+4);
+
+ rtl819x_UpdateRxPktTimeStamp(dev, stats);
+
+ if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
+ stats->bShift = 1;
+
+ stats->RxIs40MHzPacket = pDrvInfo->BW;
+
+ rtl8192_TranslateRxSignalStuff(dev, skb, stats, pdesc,
+ pDrvInfo);
+
+ if (pDrvInfo->FirstAGGR == 1 || pDrvInfo->PartAggr == 1)
+ RT_TRACE(COMP_RXDESC, "pDrvInfo->FirstAGGR = %d,"
+ " pDrvInfo->PartAggr = %d\n",
+ pDrvInfo->FirstAGGR, pDrvInfo->PartAggr);
+ skb_trim(skb, skb->len - 4/*sCrcLng*/);
+
+
+ stats->packetlength = stats->Length-4;
+ stats->fraglength = stats->packetlength;
+ stats->fragoffset = 0;
+ stats->ntotalfrag = 1;
+ return true;
+ }
+}
+
+void rtl8192_halt_adapter(struct net_device *dev, bool reset)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int i;
+ u8 OpMode;
+ u8 u1bTmp;
+ u32 ulRegRead;
+
+ OpMode = RT_OP_MODE_NO_LINK;
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &OpMode);
+
+ if (!priv->rtllib->bSupportRemoteWakeUp) {
+ u1bTmp = 0x0;
+ write_nic_byte(dev, CMDR, u1bTmp);
+ }
+
+ mdelay(20);
+
+ if (!reset) {
+ mdelay(150);
+
+ priv->bHwRfOffAction = 2;
+
+ if (!priv->rtllib->bSupportRemoteWakeUp) {
+ PHY_SetRtl8192eRfOff(dev);
+ ulRegRead = read_nic_dword(dev, CPU_GEN);
+ ulRegRead |= CPU_GEN_SYSTEM_RESET;
+ write_nic_dword(dev, CPU_GEN, ulRegRead);
+ } else {
+ write_nic_dword(dev, WFCRC0, 0xffffffff);
+ write_nic_dword(dev, WFCRC1, 0xffffffff);
+ write_nic_dword(dev, WFCRC2, 0xffffffff);
+
+
+ write_nic_byte(dev, PMR, 0x5);
+ write_nic_byte(dev, MacBlkCtrl, 0xa);
+ }
+ }
+
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_purge(&priv->rtllib->skb_waitQ[i]);
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_purge(&priv->rtllib->skb_aggQ[i]);
+
+ skb_queue_purge(&priv->skb_queue);
+ return;
+}
+
+void rtl8192_update_ratr_table(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ u8 *pMcsRate = ieee->dot11HTOperationalRateSet;
+ u32 ratr_value = 0;
+ u8 rate_index = 0;
+
+ rtl8192_config_rate(dev, (u16 *)(&ratr_value));
+ ratr_value |= (*(u16 *)(pMcsRate)) << 12;
+ switch (ieee->mode) {
+ case IEEE_A:
+ ratr_value &= 0x00000FF0;
+ break;
+ case IEEE_B:
+ ratr_value &= 0x0000000F;
+ break;
+ case IEEE_G:
+ case IEEE_G|IEEE_B:
+ ratr_value &= 0x00000FF7;
+ break;
+ case IEEE_N_24G:
+ case IEEE_N_5G:
+ if (ieee->pHTInfo->PeerMimoPs == 0) {
+ ratr_value &= 0x0007F007;
+ } else {
+ if (priv->rf_type == RF_1T2R)
+ ratr_value &= 0x000FF007;
+ else
+ ratr_value &= 0x0F81F007;
+ }
+ break;
+ default:
+ break;
+ }
+ ratr_value &= 0x0FFFFFFF;
+ if (ieee->pHTInfo->bCurTxBW40MHz &&
+ ieee->pHTInfo->bCurShortGI40MHz)
+ ratr_value |= 0x80000000;
+ else if (!ieee->pHTInfo->bCurTxBW40MHz &&
+ ieee->pHTInfo->bCurShortGI20MHz)
+ ratr_value |= 0x80000000;
+ write_nic_dword(dev, RATR0+rate_index*4, ratr_value);
+ write_nic_byte(dev, UFWP, 1);
+}
+
+void
+rtl8192_InitializeVariables(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ strcpy(priv->nick, "rtl8192E");
+
+ priv->rtllib->softmac_features = IEEE_SOFTMAC_SCAN |
+ IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
+ IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE /* |
+ IEEE_SOFTMAC_BEACONS*/;
+
+ priv->rtllib->tx_headroom = sizeof(struct tx_fwinfo_8190pci);
+
+ priv->ShortRetryLimit = 0x30;
+ priv->LongRetryLimit = 0x30;
+
+ priv->EarlyRxThreshold = 7;
+ priv->pwrGroupCnt = 0;
+
+ priv->bIgnoreSilentReset = false;
+ priv->enable_gpio0 = 0;
+
+ priv->TransmitConfig = 0;
+
+ priv->ReceiveConfig = RCR_ADD3 |
+ RCR_AMF | RCR_ADF |
+ RCR_AICV |
+ RCR_AB | RCR_AM | RCR_APM |
+ RCR_AAP | ((u32)7<<RCR_MXDMA_OFFSET) |
+ ((u32)7 << RCR_FIFO_OFFSET) | RCR_ONLYERLPKT;
+
+ priv->irq_mask[0] = (u32)(IMR_ROK | IMR_VODOK | IMR_VIDOK |
+ IMR_BEDOK | IMR_BKDOK | IMR_HCCADOK |
+ IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK |
+ IMR_BDOK | IMR_RXCMDOK | IMR_TIMEOUT0 |
+ IMR_RDU | IMR_RXFOVW | IMR_TXFOVW |
+ IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
+
+
+ priv->MidHighPwrTHR_L1 = 0x3B;
+ priv->MidHighPwrTHR_L2 = 0x40;
+ priv->PwrDomainProtect = false;
+
+ priv->bfirst_after_down = 0;
+}
+
+void rtl8192_EnableInterrupt(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ priv->irq_enabled = 1;
+
+ write_nic_dword(dev, INTA_MASK, priv->irq_mask[0]);
+
+}
+
+void rtl8192_DisableInterrupt(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ write_nic_dword(dev, INTA_MASK, 0);
+
+ priv->irq_enabled = 0;
+}
+
+void rtl8192_ClearInterrupt(struct net_device *dev)
+{
+ u32 tmp = 0;
+ tmp = read_nic_dword(dev, ISR);
+ write_nic_dword(dev, ISR, tmp);
+}
+
+
+void rtl8192_enable_rx(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ write_nic_dword(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
+}
+
+static const u32 TX_DESC_BASE[] = {
+ BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA
+};
+
+void rtl8192_enable_tx(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ u32 i;
+
+ for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
+ write_nic_dword(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
+}
+
+
+void rtl8192_interrupt_recognized(struct net_device *dev, u32 *p_inta,
+ u32 *p_intb)
+{
+ *p_inta = read_nic_dword(dev, ISR);
+ write_nic_dword(dev, ISR, *p_inta);
+}
+
+bool rtl8192_HalRxCheckStuck(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u16 RegRxCounter = read_nic_word(dev, 0x130);
+ bool bStuck = false;
+ static u8 rx_chk_cnt;
+ u32 SlotIndex = 0, TotalRxStuckCount = 0;
+ u8 i;
+ u8 SilentResetRxSoltNum = 4;
+
+ RT_TRACE(COMP_RESET, "%s(): RegRxCounter is %d, RxCounter is %d\n",
+ __func__, RegRxCounter, priv->RxCounter);
+
+ rx_chk_cnt++;
+ if (priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5)) {
+ rx_chk_cnt = 0;
+ } else if ((priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High + 5))
+ && (((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_40M))
+ || ((priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_20M)))) {
+ if (rx_chk_cnt < 2)
+ return bStuck;
+ else
+ rx_chk_cnt = 0;
+ } else if ((((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_40M)) ||
+ ((priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_20M))) &&
+ priv->undecorated_smoothed_pwdb >= VeryLowRSSI) {
+ if (rx_chk_cnt < 4)
+ return bStuck;
+ else
+ rx_chk_cnt = 0;
+ } else {
+ if (rx_chk_cnt < 8)
+ return bStuck;
+ else
+ rx_chk_cnt = 0;
+ }
+
+
+ SlotIndex = (priv->SilentResetRxSlotIndex++)%SilentResetRxSoltNum;
+
+ if (priv->RxCounter == RegRxCounter) {
+ priv->SilentResetRxStuckEvent[SlotIndex] = 1;
+
+ for (i = 0; i < SilentResetRxSoltNum; i++)
+ TotalRxStuckCount += priv->SilentResetRxStuckEvent[i];
+
+ if (TotalRxStuckCount == SilentResetRxSoltNum) {
+ bStuck = true;
+ for (i = 0; i < SilentResetRxSoltNum; i++)
+ TotalRxStuckCount +=
+ priv->SilentResetRxStuckEvent[i];
+ }
+
+
+ } else {
+ priv->SilentResetRxStuckEvent[SlotIndex] = 0;
+ }
+
+ priv->RxCounter = RegRxCounter;
+
+ return bStuck;
+}
+
+bool rtl8192_HalTxCheckStuck(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool bStuck = false;
+ u16 RegTxCounter = read_nic_word(dev, 0x128);
+
+ RT_TRACE(COMP_RESET, "%s():RegTxCounter is %d,TxCounter is %d\n",
+ __func__, RegTxCounter, priv->TxCounter);
+
+ if (priv->TxCounter == RegTxCounter)
+ bStuck = true;
+
+ priv->TxCounter = RegTxCounter;
+
+ return bStuck;
+}
+
+bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ if (ieee->rtllib_ap_sec_type &&
+ (ieee->rtllib_ap_sec_type(priv->rtllib)&(SEC_ALG_WEP |
+ SEC_ALG_TKIP))) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool rtl8192_GetHalfNmodeSupportByAPs(struct net_device *dev)
+{
+ bool Reval;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ if (ieee->bHalfWirelessN24GMode == true)
+ Reval = true;
+ else
+ Reval = false;
+
+ return Reval;
+}
+
+u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
+{
+ u8 tmp_Short;
+
+ tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) :
+ ((tcb_desc->bUseShortPreamble) ? 1 : 0);
+ if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
+ tmp_Short = 0;
+
+ return tmp_Short;
+}
+
+void ActUpdateChannelAccessSetting(struct net_device *dev,
+ enum wireless_mode WirelessMode,
+ struct channel_access_setting *ChnlAccessSetting)
+{
+ return;
+}
diff --git a/drivers/staging/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/r8192E_dev.h
new file mode 100644
index 00000000000..b9b3b52f912
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_dev.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL8192E_H
+#define _RTL8192E_H
+
+#include "r8190P_def.h"
+
+u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc);
+bool rtl8192_GetHalfNmodeSupportByAPs(struct net_device *dev);
+bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev);
+bool rtl8192_HalTxCheckStuck(struct net_device *dev);
+bool rtl8192_HalRxCheckStuck(struct net_device *dev);
+void rtl8192_interrupt_recognized(struct net_device *dev, u32 *p_inta,
+ u32 *p_intb);
+void rtl8192_enable_rx(struct net_device *dev);
+void rtl8192_enable_tx(struct net_device *dev);
+void rtl8192_EnableInterrupt(struct net_device *dev);
+void rtl8192_DisableInterrupt(struct net_device *dev);
+void rtl8192_ClearInterrupt(struct net_device *dev);
+void rtl8192_InitializeVariables(struct net_device *dev);
+void rtl8192e_start_beacon(struct net_device *dev);
+void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val);
+void rtl8192_get_eeprom_size(struct net_device *dev);
+bool rtl8192_adapter_start(struct net_device *dev);
+void rtl8192_link_change(struct net_device *dev);
+void rtl8192_AllowAllDestAddr(struct net_device *dev, bool bAllowAllDA,
+ bool WriteIntoReg);
+void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
+ struct cb_desc *cb_desc,
+ struct sk_buff *skb);
+void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
+ struct tx_desc_cmd *entry,
+ struct cb_desc *cb_desc, struct sk_buff *skb);
+bool rtl8192_rx_query_status_desc(struct net_device *dev,
+ struct rtllib_rx_stats *stats,
+ struct rx_desc *pdesc,
+ struct sk_buff *skb);
+void rtl8192_halt_adapter(struct net_device *dev, bool reset);
+void rtl8192_update_ratr_table(struct net_device *dev);
+#endif
diff --git a/drivers/staging/rtl8192e/r8192E_dm.c b/drivers/staging/rtl8192e/r8192E_dm.c
deleted file mode 100644
index 688d29b5588..00000000000
--- a/drivers/staging/rtl8192e/r8192E_dm.c
+++ /dev/null
@@ -1,2554 +0,0 @@
-/*++
-Copyright-c Realtek Semiconductor Corp. All rights reserved.
-
-Module Name:
- r8192U_dm.c
-
-Abstract:
- HW dynamic mechanism.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2008-05-14 amy create version 0 porting from windows code.
-
---*/
-#include "r8192E.h"
-#include "r8192E_dm.h"
-#include "r8192E_hw.h"
-#include "r819xE_phy.h"
-#include "r819xE_phyreg.h"
-#include "r8190_rtl8256.h"
-
-#define DRV_NAME "rtl819xE"
-
-//
-// Indicate different AP vendor for IOT issue.
-//
-static const u32 edca_setting_DL[HT_IOT_PEER_MAX] =
-{ 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5e4322, 0x5e4322};
-static const u32 edca_setting_UL[HT_IOT_PEER_MAX] =
-{ 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5e4322, 0x5e4322, 0x5e4322};
-
-#define RTK_UL_EDCA 0xa44f
-#define RTK_DL_EDCA 0x5e4322
-
-
-dig_t dm_digtable;
-// For Dynamic Rx Path Selection by Signal Strength
-DRxPathSel DM_RxPathSelTable;
-
-void dm_gpio_change_rf_callback(struct work_struct *work);
-
-// DM --> Rate Adaptive
-static void dm_check_rate_adaptive(struct r8192_priv *priv);
-
-// DM --> Bandwidth switch
-static void dm_init_bandwidth_autoswitch(struct r8192_priv *priv);
-static void dm_bandwidth_autoswitch(struct r8192_priv *priv);
-
-// DM --> TX power control
-static void dm_check_txpower_tracking(struct r8192_priv *priv);
-
-// DM --> Dynamic Init Gain by RSSI
-static void dm_dig_init(struct r8192_priv *priv);
-static void dm_ctrl_initgain_byrssi(struct r8192_priv *priv);
-static void dm_ctrl_initgain_byrssi_highpwr(struct r8192_priv *priv);
-static void dm_ctrl_initgain_byrssi_by_driverrssi(struct r8192_priv *priv);
-static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct r8192_priv *priv);
-static void dm_initial_gain(struct r8192_priv *priv);
-static void dm_pd_th(struct r8192_priv *priv);
-static void dm_cs_ratio(struct r8192_priv *priv);
-
-static void dm_init_ctstoself(struct r8192_priv *priv);
-// DM --> EDCA turboe mode control
-static void dm_check_edca_turbo(struct r8192_priv *priv);
-static void dm_init_edca_turbo(struct r8192_priv *priv);
-
-// DM --> HW RF control
-static void dm_check_rfctrl_gpio(struct r8192_priv *priv);
-
-// DM --> Check current RX RF path state
-static void dm_check_rx_path_selection(struct r8192_priv *priv);
-static void dm_init_rxpath_selection(struct r8192_priv *priv);
-static void dm_rxpath_sel_byrssi(struct r8192_priv *priv);
-
-// DM --> Fsync for broadcom ap
-static void dm_init_fsync(struct r8192_priv *priv);
-static void dm_deInit_fsync(struct r8192_priv *priv);
-
-static void dm_check_txrateandretrycount(struct r8192_priv *priv);
-static void dm_check_fsync(struct r8192_priv *priv);
-
-
-/*---------------------Define of Tx Power Control For Near/Far Range --------*/ //Add by Jacken 2008/02/18
-static void dm_init_dynamic_txpower(struct r8192_priv *priv);
-static void dm_dynamic_txpower(struct r8192_priv *priv);
-
-// DM --> For rate adaptive and DIG, we must send RSSI to firmware
-static void dm_send_rssi_tofw(struct r8192_priv *priv);
-static void dm_ctstoself(struct r8192_priv *priv);
-
-static void dm_fsync_timer_callback(unsigned long data);
-
-/*
- * Prepare SW resource for HW dynamic mechanism.
- * This function is only invoked at driver intialization once.
- */
-void init_hal_dm(struct r8192_priv *priv)
-{
- // Undecorated Smoothed Signal Strength, it can utilized to dynamic mechanism.
- priv->undecorated_smoothed_pwdb = -1;
-
- //Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code.
- dm_init_dynamic_txpower(priv);
- init_rate_adaptive(priv);
- //dm_initialize_txpower_tracking(dev);
- dm_dig_init(priv);
- dm_init_edca_turbo(priv);
- dm_init_bandwidth_autoswitch(priv);
- dm_init_fsync(priv);
- dm_init_rxpath_selection(priv);
- dm_init_ctstoself(priv);
- INIT_DELAYED_WORK(&priv->gpio_change_rf_wq, dm_gpio_change_rf_callback);
-
-}
-
-void deinit_hal_dm(struct r8192_priv *priv)
-{
- dm_deInit_fsync(priv);
-}
-
-void hal_dm_watchdog(struct r8192_priv *priv)
-{
-
- /*Add by amy 2008/05/15 ,porting from windows code.*/
- dm_check_rate_adaptive(priv);
- dm_dynamic_txpower(priv);
- dm_check_txrateandretrycount(priv);
-
- dm_check_txpower_tracking(priv);
-
- dm_ctrl_initgain_byrssi(priv);
- dm_check_edca_turbo(priv);
- dm_bandwidth_autoswitch(priv);
-
- dm_check_rfctrl_gpio(priv);
- dm_check_rx_path_selection(priv);
- dm_check_fsync(priv);
-
- // Add by amy 2008-05-15 porting from windows code.
- dm_send_rssi_tofw(priv);
- dm_ctstoself(priv);
-}
-
-
-/*
- * Decide Rate Adaptive Set according to distance (signal strength)
- * 01/11/2008 MHC Modify input arguments and RATR table level.
- * 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
- * the function after making sure RF_Type.
- */
-void init_rate_adaptive(struct r8192_priv *priv)
-{
- prate_adaptive pra = &priv->rate_adaptive;
-
- pra->ratr_state = DM_RATR_STA_MAX;
- pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High;
- pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5;
- pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5;
-
- pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5;
- pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M;
- pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M;
-
- if(priv->CustomerID == RT_CID_819x_Netcore)
- pra->ping_rssi_enable = 1;
- else
- pra->ping_rssi_enable = 0;
- pra->ping_rssi_thresh_for_ra = 15;
-
-
- if (priv->rf_type == RF_2T4R)
- {
- // 07/10/08 MH Modify for RA smooth scheme.
- /* 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.*/
- pra->upper_rssi_threshold_ratr = 0x8f0f0000;
- pra->middle_rssi_threshold_ratr = 0x8f0ff000;
- pra->low_rssi_threshold_ratr = 0x8f0ff001;
- pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
- pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
- pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
- }
- else if (priv->rf_type == RF_1T2R)
- {
- pra->upper_rssi_threshold_ratr = 0x000f0000;
- pra->middle_rssi_threshold_ratr = 0x000ff000;
- pra->low_rssi_threshold_ratr = 0x000ff001;
- pra->low_rssi_threshold_ratr_40M = 0x000ff005;
- pra->low_rssi_threshold_ratr_20M = 0x000ff001;
- pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
- }
-
-}
-
-
-static void dm_check_rate_adaptive(struct r8192_priv *priv)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
- u32 currentRATR, targetRATR = 0;
- u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
- bool bshort_gi_enabled = false;
- static u8 ping_rssi_state=0;
-
-
- if(!priv->up)
- {
- RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n");
- return;
- }
-
- if(pra->rate_adaptive_disabled)//this variable is set by ioctl.
- return;
-
- // TODO: Only 11n mode is implemented currently,
- if( !(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
- priv->ieee80211->mode == WIRELESS_MODE_N_5G))
- return;
-
- if( priv->ieee80211->state == IEEE80211_LINKED )
- {
- // RT_TRACE(COMP_RATE, "dm_CheckRateAdaptive(): \t");
-
- //
- // Check whether Short GI is enabled
- //
- bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) ||
- (!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz);
-
-
- pra->upper_rssi_threshold_ratr =
- (pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
-
- pra->middle_rssi_threshold_ratr =
- (pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
-
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
- }
- else
- {
- pra->low_rssi_threshold_ratr =
- (pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
- }
- //cosa add for test
- pra->ping_rssi_ratr =
- (pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
-
- /* 2007/10/08 MH We support RA smooth scheme now. When it is the first
- time to link with AP. We will not change upper/lower threshold. If
- STA stay in high or low level, we must change two different threshold
- to prevent jumping frequently. */
- if (pra->ratr_state == DM_RATR_STA_HIGH)
- {
- HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
- (pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
- }
- else if (pra->ratr_state == DM_RATR_STA_LOW)
- {
- HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
- (pra->low2high_rssi_thresh_for_ra40M):(pra->low2high_rssi_thresh_for_ra20M);
- }
- else
- {
- HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
- (pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
- }
-
- if(priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA)
- {
- pra->ratr_state = DM_RATR_STA_HIGH;
- targetRATR = pra->upper_rssi_threshold_ratr;
- }else if(priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA)
- {
- pra->ratr_state = DM_RATR_STA_MIDDLE;
- targetRATR = pra->middle_rssi_threshold_ratr;
- }else
- {
- pra->ratr_state = DM_RATR_STA_LOW;
- targetRATR = pra->low_rssi_threshold_ratr;
- }
-
- //cosa add for test
- if(pra->ping_rssi_enable)
- {
- //pHalData->UndecoratedSmoothedPWDB = 19;
- if(priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5))
- {
- if( (priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
- ping_rssi_state )
- {
- pra->ratr_state = DM_RATR_STA_LOW;
- targetRATR = pra->ping_rssi_ratr;
- ping_rssi_state = 1;
- }
- }
- else
- {
- ping_rssi_state = 0;
- }
- }
-
- // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
- if(priv->ieee80211->GetHalfNmodeSupportByAPsHandler(priv->ieee80211))
- targetRATR &= 0xf00fffff;
-
- //
- // Check whether updating of RATR0 is required
- //
- currentRATR = read_nic_dword(priv, RATR0);
- if( targetRATR != currentRATR )
- {
- u32 ratr_value;
- ratr_value = targetRATR;
- RT_TRACE(COMP_RATE,"currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR);
- if(priv->rf_type == RF_1T2R)
- {
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- }
- write_nic_dword(priv, RATR0, ratr_value);
- write_nic_byte(priv, UFWP, 1);
-
- pra->last_ratr = targetRATR;
- }
-
- }
- else
- {
- pra->ratr_state = DM_RATR_STA_MAX;
- }
-
-}
-
-
-static void dm_init_bandwidth_autoswitch(struct r8192_priv *priv)
-{
- priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH;
- priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW;
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
- priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable = false;
-
-}
-
-
-static void dm_bandwidth_autoswitch(struct r8192_priv *priv)
-{
- if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||!priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable){
- return;
- }else{
- if(priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false){//If send packets in 40 Mhz in 20/40
- if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
- }else{//in force send packets in 20 Mhz in 20/40
- if(priv->undecorated_smoothed_pwdb >= priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
- priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
-
- }
- }
-}
-
-//OFDM default at 0db, index=6.
-static const u32 OFDMSwingTable[OFDM_Table_Length] = {
- 0x7f8001fe, // 0, +6db
- 0x71c001c7, // 1, +5db
- 0x65400195, // 2, +4db
- 0x5a400169, // 3, +3db
- 0x50800142, // 4, +2db
- 0x47c0011f, // 5, +1db
- 0x40000100, // 6, +0db ===> default, upper for higher temperature, lower for low temperature
- 0x390000e4, // 7, -1db
- 0x32c000cb, // 8, -2db
- 0x2d4000b5, // 9, -3db
- 0x288000a2, // 10, -4db
- 0x24000090, // 11, -5db
- 0x20000080, // 12, -6db
- 0x1c800072, // 13, -7db
- 0x19800066, // 14, -8db
- 0x26c0005b, // 15, -9db
- 0x24400051, // 16, -10db
- 0x12000048, // 17, -11db
- 0x10000040 // 18, -12db
-};
-static const u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, // 0, +0db ===> CCK40M default
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 1, -1db
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 2, -2db
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 3, -3db
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 4, -4db
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 5, -5db
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 6, -6db ===> CCK20M default
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 7, -7db
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 8, -8db
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 9, -9db
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 10, -10db
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} // 11, -11db
-};
-
-static const u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, // 0, +0db ===> CCK40M default
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 1, -1db
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 2, -2db
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 3, -3db
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 4, -4db
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 5, -5db
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 6, -6db ===> CCK20M default
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 7, -7db
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 8, -8db
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 9, -9db
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 10, -10db
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} // 11, -11db
-};
-
-#define Pw_Track_Flag 0x11d
-#define Tssi_Mea_Value 0x13c
-#define Tssi_Report_Value1 0x134
-#define Tssi_Report_Value2 0x13e
-#define FW_Busy_Flag 0x13f
-static void dm_TXPowerTrackingCallback_TSSI(struct r8192_priv *priv)
-{
- bool bHighpowerstate, viviflag = FALSE;
- DCMD_TXCMD_T tx_cmd;
- u8 powerlevelOFDM24G;
- int i =0, j = 0, k = 0;
- u8 RF_Type, tmp_report[5]={0, 0, 0, 0, 0};
- u32 Value;
- u8 Pwr_Flag;
- u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver=0;
-// bool rtStatus = true;
- u32 delta=0;
- RT_TRACE(COMP_POWER_TRACKING,"%s()\n",__FUNCTION__);
-// write_nic_byte(priv, 0x1ba, 0);
- write_nic_byte(priv, Pw_Track_Flag, 0);
- write_nic_byte(priv, FW_Busy_Flag, 0);
- priv->ieee80211->bdynamic_txpower_enable = false;
- bHighpowerstate = priv->bDynamicTxHighPower;
-
- powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
- RF_Type = priv->rf_type;
- Value = (RF_Type<<8) | powerlevelOFDM24G;
-
- RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G);
-
- for(j = 0; j<=30; j++)
-{ //fill tx_cmd
-
- tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
- tx_cmd.Length = 4;
- tx_cmd.Value = Value;
- cmpk_message_handle_tx(priv, (u8*)&tx_cmd, DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
- mdelay(1);
-
- for(i = 0;i <= 30; i++)
- {
- Pwr_Flag = read_nic_byte(priv, Pw_Track_Flag);
-
- if (Pwr_Flag == 0)
- {
- mdelay(1);
- continue;
- }
-
- Avg_TSSI_Meas = read_nic_word(priv, Tssi_Mea_Value);
-
- if(Avg_TSSI_Meas == 0)
- {
- write_nic_byte(priv, Pw_Track_Flag, 0);
- write_nic_byte(priv, FW_Busy_Flag, 0);
- return;
- }
-
- for(k = 0;k < 5; k++)
- {
- if(k !=4)
- tmp_report[k] = read_nic_byte(priv, Tssi_Report_Value1+k);
- else
- tmp_report[k] = read_nic_byte(priv, Tssi_Report_Value2);
-
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
- }
-
- //check if the report value is right
- for(k = 0;k < 5; k++)
- {
- if(tmp_report[k] <= 20)
- {
- viviflag =TRUE;
- break;
- }
- }
- if(viviflag ==TRUE)
- {
- write_nic_byte(priv, Pw_Track_Flag, 0);
- viviflag = FALSE;
- RT_TRACE(COMP_POWER_TRACKING, "we filted this data\n");
- for(k = 0;k < 5; k++)
- tmp_report[k] = 0;
- break;
- }
-
- for(k = 0;k < 5; k++)
- {
- Avg_TSSI_Meas_from_driver += tmp_report[k];
- }
-
- Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
- RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver);
- TSSI_13dBm = priv->TSSI_13dBm;
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm);
-
- //if(abs(Avg_TSSI_Meas_from_driver - TSSI_13dBm) <= E_FOR_TX_POWER_TRACK)
- // For MacOS-compatible
- if(Avg_TSSI_Meas_from_driver > TSSI_13dBm)
- delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
- else
- delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
-
- if(delta <= E_FOR_TX_POWER_TRACK)
- {
- priv->ieee80211->bdynamic_txpower_enable = TRUE;
- write_nic_byte(priv, Pw_Track_Flag, 0);
- write_nic_byte(priv, FW_Busy_Flag, 0);
- RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference = %d\n", priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation);
- return;
- }
- else
- {
- if(Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
- {
- if (RF_Type == RF_2T4R)
- {
-
- if((priv->rfa_txpowertrackingindex > 0) &&(priv->rfc_txpowertrackingindex > 0))
- {
- priv->rfa_txpowertrackingindex--;
- if(priv->rfa_txpowertrackingindex_real > 4)
- {
- priv->rfa_txpowertrackingindex_real--;
- rtl8192_setBBreg(priv, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
- }
-
- priv->rfc_txpowertrackingindex--;
- if(priv->rfc_txpowertrackingindex_real > 4)
- {
- priv->rfc_txpowertrackingindex_real--;
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
- }
- }
- else
- {
- rtl8192_setBBreg(priv, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value);
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value);
- }
- }
- else
- {
- if(priv->rfc_txpowertrackingindex > 0)
- {
- priv->rfc_txpowertrackingindex--;
- if(priv->rfc_txpowertrackingindex_real > 4)
- {
- priv->rfc_txpowertrackingindex_real--;
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
- }
- }
- else
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value);
- }
- }
- else
- {
- if (RF_Type == RF_2T4R)
- {
- if((priv->rfa_txpowertrackingindex < TxBBGainTableLength - 1) &&(priv->rfc_txpowertrackingindex < TxBBGainTableLength - 1))
- {
- priv->rfa_txpowertrackingindex++;
- priv->rfa_txpowertrackingindex_real++;
- rtl8192_setBBreg(priv, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
- priv->rfc_txpowertrackingindex++;
- priv->rfc_txpowertrackingindex_real++;
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
- }
- else
- {
- rtl8192_setBBreg(priv, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value);
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value);
- }
- }
- else
- {
- if(priv->rfc_txpowertrackingindex < (TxBBGainTableLength - 1))
- {
- priv->rfc_txpowertrackingindex++;
- priv->rfc_txpowertrackingindex_real++;
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
- }
- else
- rtl8192_setBBreg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value);
- }
- }
- if (RF_Type == RF_2T4R)
- priv->CCKPresentAttentuation_difference
- = priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
- else
- priv->CCKPresentAttentuation_difference
- = priv->rfc_txpowertrackingindex - priv->rfc_txpowertracking_default;
-
- if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- priv->CCKPresentAttentuation
- = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference;
- else
- priv->CCKPresentAttentuation
- = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference;
-
- if(priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1))
- priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1;
- if(priv->CCKPresentAttentuation < 0)
- priv->CCKPresentAttentuation = 0;
-
- if(1)
- {
- if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = TRUE;
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = FALSE;
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- else
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference = %d\n", priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation);
-
- if (priv->CCKPresentAttentuation_difference <= -12||priv->CCKPresentAttentuation_difference >= 24)
- {
- priv->ieee80211->bdynamic_txpower_enable = TRUE;
- write_nic_byte(priv, Pw_Track_Flag, 0);
- write_nic_byte(priv, FW_Busy_Flag, 0);
- RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
- return;
- }
-
-
- }
- write_nic_byte(priv, Pw_Track_Flag, 0);
- Avg_TSSI_Meas_from_driver = 0;
- for(k = 0;k < 5; k++)
- tmp_report[k] = 0;
- break;
- }
- write_nic_byte(priv, FW_Busy_Flag, 0);
-}
- priv->ieee80211->bdynamic_txpower_enable = TRUE;
- write_nic_byte(priv, Pw_Track_Flag, 0);
-}
-
-static void dm_TXPowerTrackingCallback_ThermalMeter(struct r8192_priv *priv)
-{
-#define ThermalMeterVal 9
- u32 tmpRegA, TempCCk;
- u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
- int i =0, CCKSwingNeedUpdate=0;
-
- if(!priv->btxpower_trackingInit)
- {
- //Query OFDM default setting
- tmpRegA = rtl8192_QueryBBReg(priv, rOFDM0_XATxIQImbalance, bMaskDWord);
- for(i=0; i<OFDM_Table_Length; i++) //find the index
- {
- if(tmpRegA == OFDMSwingTable[i])
- {
- priv->OFDM_index= (u8)i;
- RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index=0x%x\n",
- rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index);
- }
- }
-
- //Query CCK default setting From 0xa22
- TempCCk = rtl8192_QueryBBReg(priv, rCCK0_TxFilter1, bMaskByte2);
- for(i=0 ; i<CCK_Table_length ; i++)
- {
- if(TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0])
- {
- priv->CCK_index =(u8) i;
- RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, CCK_index=0x%x\n",
- rCCK0_TxFilter1, TempCCk, priv->CCK_index);
- break;
- }
- }
- priv->btxpower_trackingInit = TRUE;
- //pHalData->TXPowercount = 0;
- return;
- }
-
- // read and filter out unreasonable value
- tmpRegA = rtl8192_phy_QueryRFReg(priv, RF90_PATH_A, 0x12, 0x078); // 0x12: RF Reg[10:7]
- RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
- if(tmpRegA < 3 || tmpRegA > 13)
- return;
- if(tmpRegA >= 12) // if over 12, TP will be bad when high temperature
- tmpRegA = 12;
- RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA);
- priv->ThermalMeter[0] = ThermalMeterVal; //We use fixed value by Bryant's suggestion
- priv->ThermalMeter[1] = ThermalMeterVal; //We use fixed value by Bryant's suggestion
-
- //Get current RF-A temperature index
- if(priv->ThermalMeter[0] >= (u8)tmpRegA) //lower temperature
- {
- tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0]-(u8)tmpRegA);
- tmpCCK40Mindex = tmpCCK20Mindex - 6;
- if(tmpOFDMindex >= OFDM_Table_Length)
- tmpOFDMindex = OFDM_Table_Length-1;
- if(tmpCCK20Mindex >= CCK_Table_length)
- tmpCCK20Mindex = CCK_Table_length-1;
- if(tmpCCK40Mindex >= CCK_Table_length)
- tmpCCK40Mindex = CCK_Table_length-1;
- }
- else
- {
- tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]);
- if(tmpval >= 6) // higher temperature
- tmpOFDMindex = tmpCCK20Mindex = 0; // max to +6dB
- else
- tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
- tmpCCK40Mindex = 0;
- }
-
- if(priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) //40M
- tmpCCKindex = tmpCCK40Mindex;
- else
- tmpCCKindex = tmpCCK20Mindex;
-
- //record for bandwidth swith
- priv->Record_CCK_20Mindex = tmpCCK20Mindex;
- priv->Record_CCK_40Mindex = tmpCCK40Mindex;
- RT_TRACE(COMP_POWER_TRACKING, "Record_CCK_20Mindex / Record_CCK_40Mindex = %d / %d.\n",
- priv->Record_CCK_20Mindex, priv->Record_CCK_40Mindex);
-
- if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = TRUE;
- CCKSwingNeedUpdate = 1;
- }
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = FALSE;
- CCKSwingNeedUpdate = 1;
- }
-
- if(priv->CCK_index != tmpCCKindex)
-{
- priv->CCK_index = tmpCCKindex;
- CCKSwingNeedUpdate = 1;
- }
-
- if(CCKSwingNeedUpdate)
- {
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- if(priv->OFDM_index != tmpOFDMindex)
- {
- priv->OFDM_index = tmpOFDMindex;
- rtl8192_setBBreg(priv, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[priv->OFDM_index]);
- RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
- priv->OFDM_index, OFDMSwingTable[priv->OFDM_index]);
- }
- priv->txpower_count = 0;
-}
-
-void dm_txpower_trackingcallback(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq);
-
- if(priv->IC_Cut >= IC_VersionCut_D)
- dm_TXPowerTrackingCallback_TSSI(priv);
- else
- dm_TXPowerTrackingCallback_ThermalMeter(priv);
-}
-
-
-static const txbbgain_struct rtl8192_txbbgain_table[] = {
- { 12, 0x7f8001fe },
- { 11, 0x788001e2 },
- { 10, 0x71c001c7 },
- { 9, 0x6b8001ae },
- { 8, 0x65400195 },
- { 7, 0x5fc0017f },
- { 6, 0x5a400169 },
- { 5, 0x55400155 },
- { 4, 0x50800142 },
- { 3, 0x4c000130 },
- { 2, 0x47c0011f },
- { 1, 0x43c0010f },
- { 0, 0x40000100 },
- { -1, 0x3c8000f2 },
- { -2, 0x390000e4 },
- { -3, 0x35c000d7 },
- { -4, 0x32c000cb },
- { -5, 0x300000c0 },
- { -6, 0x2d4000b5 },
- { -7, 0x2ac000ab },
- { -8, 0x288000a2 },
- { -9, 0x26000098 },
- { -10, 0x24000090 },
- { -11, 0x22000088 },
- { -12, 0x20000080 },
- { -13, 0x1a00006c },
- { -14, 0x1c800072 },
- { -15, 0x18000060 },
- { -16, 0x19800066 },
- { -17, 0x15800056 },
- { -18, 0x26c0005b },
- { -19, 0x14400051 },
- { -20, 0x24400051 },
- { -21, 0x1300004c },
- { -22, 0x12000048 },
- { -23, 0x11000044 },
- { -24, 0x10000040 },
-};
-
-/*
- * ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
- * This Table is for CH1~CH13
- */
-static const ccktxbbgain_struct rtl8192_cck_txbbgain_table[] = {
- {{ 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04 }},
- {{ 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04 }},
- {{ 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03 }},
- {{ 0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03 }},
- {{ 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03 }},
- {{ 0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03 }},
- {{ 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03 }},
- {{ 0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03 }},
- {{ 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02 }},
- {{ 0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02 }},
- {{ 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02 }},
- {{ 0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02 }},
- {{ 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02 }},
- {{ 0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02 }},
- {{ 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02 }},
- {{ 0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02 }},
- {{ 0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01 }},
- {{ 0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02 }},
- {{ 0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01 }},
- {{ 0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01 }},
- {{ 0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01 }},
- {{ 0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01 }},
- {{ 0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01 }},
-};
-
-/*
- * ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
- * This Table is for CH14
- */
-static const ccktxbbgain_struct rtl8192_cck_txbbgain_ch14_table[] = {
- {{ 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x2d, 0x2d, 0x27, 0x17, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x28, 0x28, 0x22, 0x14, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00 }},
- {{ 0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00 }},
-};
-
-static void dm_InitializeTXPowerTracking_TSSI(struct r8192_priv *priv)
-{
- priv->txbbgain_table = rtl8192_txbbgain_table;
- priv->cck_txbbgain_table = rtl8192_cck_txbbgain_table;
- priv->cck_txbbgain_ch14_table = rtl8192_cck_txbbgain_ch14_table;
-
- priv->btxpower_tracking = TRUE;
- priv->txpower_count = 0;
- priv->btxpower_trackingInit = FALSE;
-
-}
-
-static void dm_InitializeTXPowerTracking_ThermalMeter(struct r8192_priv *priv)
-{
- // Tx Power tracking by Theremal Meter require Firmware R/W 3-wire. This mechanism
- // can be enabled only when Firmware R/W 3-wire is enabled. Otherwise, frequent r/w
- // 3-wire by driver cause RF goes into wrong state.
- if(priv->ieee80211->FwRWRF)
- priv->btxpower_tracking = TRUE;
- else
- priv->btxpower_tracking = FALSE;
- priv->txpower_count = 0;
- priv->btxpower_trackingInit = FALSE;
-}
-
-void dm_initialize_txpower_tracking(struct r8192_priv *priv)
-{
- if(priv->IC_Cut >= IC_VersionCut_D)
- dm_InitializeTXPowerTracking_TSSI(priv);
- else
- dm_InitializeTXPowerTracking_ThermalMeter(priv);
-}
-
-
-static void dm_CheckTXPowerTracking_TSSI(struct r8192_priv *priv)
-{
- static u32 tx_power_track_counter = 0;
- RT_TRACE(COMP_POWER_TRACKING,"%s()\n",__FUNCTION__);
- if(read_nic_byte(priv, 0x11e) ==1)
- return;
- if(!priv->btxpower_tracking)
- return;
- tx_power_track_counter++;
-
- if (tx_power_track_counter > 90) {
- queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0);
- tx_power_track_counter =0;
- }
-}
-
-static void dm_CheckTXPowerTracking_ThermalMeter(struct r8192_priv *priv)
-{
- static u8 TM_Trigger=0;
-
- if(!priv->btxpower_tracking)
- return;
- else
- {
- if(priv->txpower_count <= 2)
- {
- priv->txpower_count++;
- return;
- }
- }
-
- if(!TM_Trigger)
- {
- //Attention!! You have to wirte all 12bits data to RF, or it may cause RF to crash
- //actually write reg0x02 bit1=0, then bit1=1.
- rtl8192_phy_SetRFReg(priv, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
- rtl8192_phy_SetRFReg(priv, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
- rtl8192_phy_SetRFReg(priv, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
- rtl8192_phy_SetRFReg(priv, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
- TM_Trigger = 1;
- return;
- }
- else {
- queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0);
- TM_Trigger = 0;
- }
-}
-
-static void dm_check_txpower_tracking(struct r8192_priv *priv)
-{
- if(priv->IC_Cut >= IC_VersionCut_D)
- dm_CheckTXPowerTracking_TSSI(priv);
- else
- dm_CheckTXPowerTracking_ThermalMeter(priv);
-}
-
-
-static void dm_CCKTxPowerAdjust_TSSI(struct r8192_priv *priv, bool bInCH14)
-{
- u32 TempVal;
- //Write 0xa22 0xa23
- TempVal = 0;
- if(!bInCH14){
- //Write 0xa22 0xa23
- TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
-
- rtl8192_setBBreg(priv, rCCK0_TxFilter1, bMaskHWord, TempVal);
- //Write 0xa24 ~ 0xa27
- TempVal = 0;
- TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
- (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16 )+
- (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
- rtl8192_setBBreg(priv, rCCK0_TxFilter2, bMaskDWord, TempVal);
- //Write 0xa28 0xa29
- TempVal = 0;
- TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
-
- rtl8192_setBBreg(priv, rCCK0_DebugPort, bMaskLWord, TempVal);
- }
- else
- {
- TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
-
- rtl8192_setBBreg(priv, rCCK0_TxFilter1, bMaskHWord, TempVal);
- //Write 0xa24 ~ 0xa27
- TempVal = 0;
- TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
- (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16 )+
- (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
- rtl8192_setBBreg(priv, rCCK0_TxFilter2, bMaskDWord, TempVal);
- //Write 0xa28 0xa29
- TempVal = 0;
- TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
-
- rtl8192_setBBreg(priv, rCCK0_DebugPort, bMaskLWord, TempVal);
- }
-
-
-}
-
-static void dm_CCKTxPowerAdjust_ThermalMeter(struct r8192_priv *priv,
- bool bInCH14)
-{
- u32 TempVal;
-
- TempVal = 0;
- if(!bInCH14)
- {
- //Write 0xa22 0xa23
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ;
- rtl8192_setBBreg(priv, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
- //Write 0xa24 ~ 0xa27
- TempVal = 0;
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16 )+
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24);
- rtl8192_setBBreg(priv, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
- //Write 0xa28 0xa29
- TempVal = 0;
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
-
- rtl8192_setBBreg(priv, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
- }
- else
- {
-// priv->CCKTxPowerAdjustCntNotCh14++; //cosa add for debug.
- //Write 0xa22 0xa23
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
- (CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ;
-
- rtl8192_setBBreg(priv, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
- //Write 0xa24 ~ 0xa27
- TempVal = 0;
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
- (CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
- (CCKSwingTable_Ch14[priv->CCK_index][4]<<16 )+
- (CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
- rtl8192_setBBreg(priv, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
- //Write 0xa28 0xa29
- TempVal = 0;
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
- (CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
-
- rtl8192_setBBreg(priv, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,"CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
- }
-}
-
-void dm_cck_txpower_adjust(struct r8192_priv *priv, bool binch14)
-{
- if(priv->IC_Cut >= IC_VersionCut_D)
- dm_CCKTxPowerAdjust_TSSI(priv, binch14);
- else
- dm_CCKTxPowerAdjust_ThermalMeter(priv, binch14);
-}
-
-/* Set DIG scheme init value. */
-static void dm_dig_init(struct r8192_priv *priv)
-{
- /* 2007/10/05 MH Disable DIG scheme now. Not tested. */
- dm_digtable.dig_enable_flag = true;
- dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
- dm_digtable.dbg_mode = DM_DBG_OFF; //off=by real rssi value, on=by DM_DigTable.Rssi_val for new dig
- dm_digtable.dig_algorithm_switch = 0;
-
- /* 2007/10/04 MH Define init gain threshold. */
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
- dm_digtable.initialgain_lowerbound_state = false;
-
- dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
- dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
-
- dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
- dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
-
- dm_digtable.rssi_val = 50; //for new dig debug rssi value
- dm_digtable.backoff_val = DM_DIG_BACKOFF;
- dm_digtable.rx_gain_range_max = DM_DIG_MAX;
- if(priv->CustomerID == RT_CID_819x_Netcore)
- dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
- else
- dm_digtable.rx_gain_range_min = DM_DIG_MIN;
-
-}
-
-
-/*
- * Driver must monitor RSSI and notify firmware to change initial
- * gain according to different threshold. BB team provide the
- * suggested solution.
- */
-static void dm_ctrl_initgain_byrssi(struct r8192_priv *priv)
-{
- if (dm_digtable.dig_enable_flag == false)
- return;
-
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- dm_ctrl_initgain_byrssi_by_fwfalse_alarm(priv);
- else if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
- dm_ctrl_initgain_byrssi_by_driverrssi(priv);
-}
-
-
-static void dm_ctrl_initgain_byrssi_by_driverrssi(struct r8192_priv *priv)
-{
- u8 i;
- static u8 fw_dig=0;
-
- if (dm_digtable.dig_enable_flag == false)
- return;
-
- if(dm_digtable.dig_algorithm_switch) // if swithed algorithm, we have to disable FW Dig.
- fw_dig = 0;
- if(fw_dig <= 3) // execute several times to make sure the FW Dig is disabled
- {// FW DIG Off
- for(i=0; i<3; i++)
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
- fw_dig++;
- dm_digtable.dig_state = DM_STA_DIG_OFF; //fw dig off.
- }
-
- if(priv->ieee80211->state == IEEE80211_LINKED)
- dm_digtable.cur_connect_state = DIG_CONNECT;
- else
- dm_digtable.cur_connect_state = DIG_DISCONNECT;
-
- if(dm_digtable.dbg_mode == DM_DBG_OFF)
- dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
-
- dm_initial_gain(priv);
- dm_pd_th(priv);
- dm_cs_ratio(priv);
- if(dm_digtable.dig_algorithm_switch)
- dm_digtable.dig_algorithm_switch = 0;
- dm_digtable.pre_connect_state = dm_digtable.cur_connect_state;
-
-}
-
-static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct r8192_priv *priv)
-{
- static u32 reset_cnt = 0;
- u8 i;
-
- if (dm_digtable.dig_enable_flag == false)
- return;
-
- if(dm_digtable.dig_algorithm_switch)
- {
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- // Fw DIG On.
- for(i=0; i<3; i++)
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
- dm_digtable.dig_algorithm_switch = 0;
- }
-
- if (priv->ieee80211->state != IEEE80211_LINKED)
- return;
-
- // For smooth, we can not change DIG state.
- if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
- (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
- {
- return;
- }
-
- /* 1. When RSSI decrease, We have to judge if it is smaller than a threshold
- and then execute below step. */
- if ((priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh))
- {
- /* 2008/02/05 MH When we execute silent reset, the DIG PHY parameters
- will be reset to init value. We must prevent the condition. */
- if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
- (priv->reset_count == reset_cnt))
- {
- return;
- }
- else
- {
- reset_cnt = priv->reset_count;
- }
-
- // If DIG is off, DIG high power state must reset.
- dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
- dm_digtable.dig_state = DM_STA_DIG_OFF;
-
- // 1.1 DIG Off.
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
-
- // 1.2 Set initial gain.
- write_nic_byte(priv, rOFDM0_XAAGCCore1, 0x17);
- write_nic_byte(priv, rOFDM0_XBAGCCore1, 0x17);
- write_nic_byte(priv, rOFDM0_XCAGCCore1, 0x17);
- write_nic_byte(priv, rOFDM0_XDAGCCore1, 0x17);
-
- // 1.3 Lower PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x00);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x42);
-
- // 1.4 Lower CS ratio for CCK.
- write_nic_byte(priv, 0xa0a, 0x08);
-
- // 1.5 Higher EDCCA.
- //PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x325);
- return;
-
- }
-
- /* 2. When RSSI increase, We have to judge if it is larger than a threshold
- and then execute below step. */
- if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) )
- {
- u8 reset_flag = 0;
-
- if (dm_digtable.dig_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt))
- {
- dm_ctrl_initgain_byrssi_highpwr(priv);
- return;
- }
- else
- {
- if (priv->reset_count != reset_cnt)
- reset_flag = 1;
-
- reset_cnt = priv->reset_count;
- }
-
- dm_digtable.dig_state = DM_STA_DIG_ON;
-
- // 2.1 Set initial gain.
- // 2008/02/26 MH SD3-Jerry suggest to prevent dirty environment.
- if (reset_flag == 1)
- {
- write_nic_byte(priv, rOFDM0_XAAGCCore1, 0x2c);
- write_nic_byte(priv, rOFDM0_XBAGCCore1, 0x2c);
- write_nic_byte(priv, rOFDM0_XCAGCCore1, 0x2c);
- write_nic_byte(priv, rOFDM0_XDAGCCore1, 0x2c);
- }
- else
- {
- write_nic_byte(priv, rOFDM0_XAAGCCore1, 0x20);
- write_nic_byte(priv, rOFDM0_XBAGCCore1, 0x20);
- write_nic_byte(priv, rOFDM0_XCAGCCore1, 0x20);
- write_nic_byte(priv, rOFDM0_XDAGCCore1, 0x20);
- }
-
- // 2.2 Higher PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x20);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x44);
-
- // 2.3 Higher CS ratio for CCK.
- write_nic_byte(priv, 0xa0a, 0xcd);
-
- // 2.4 Lower EDCCA.
- /* 2008/01/11 MH 90/92 series are the same. */
- //PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x346);
-
- // 2.5 DIG On.
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
-
- }
-
- dm_ctrl_initgain_byrssi_highpwr(priv);
-
-}
-
-static void dm_ctrl_initgain_byrssi_highpwr(struct r8192_priv *priv)
-{
- static u32 reset_cnt_highpwr = 0;
-
- // For smooth, we can not change high power DIG state in the range.
- if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) &&
- (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh))
- {
- return;
- }
-
- /* 3. When RSSI >75% or <70%, it is a high power issue. We have to judge if
- it is larger than a threshold and then execute below step. */
- // 2008/02/05 MH SD3-Jerry Modify PD_TH for high power issue.
- if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh)
- {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
- (priv->reset_count == reset_cnt_highpwr))
- return;
- else
- dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
-
- // 3.1 Higher PD_TH for OFDM for high power state.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x10);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x43);
- }
- else
- {
- if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF&&
- (priv->reset_count == reset_cnt_highpwr))
- return;
- else
- dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
-
- if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh &&
- priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh)
- {
- // 3.2 Recover PD_TH for OFDM for normal power region.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x20);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x44);
- }
- }
-
- reset_cnt_highpwr = priv->reset_count;
-
-}
-
-
-static void dm_initial_gain(struct r8192_priv *priv)
-{
- u8 initial_gain=0;
- static u8 initialized=0, force_write=0;
- static u32 reset_cnt=0;
-
- if(dm_digtable.dig_algorithm_switch)
- {
- initialized = 0;
- reset_cnt = 0;
- }
-
- if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
- {
- if(dm_digtable.cur_connect_state == DIG_CONNECT)
- {
- if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max)
- dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max;
- else if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
- dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min;
- else
- dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val;
- }
- else //current state is disconnected
- {
- if(dm_digtable.cur_ig_value == 0)
- dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
- else
- dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
- }
- }
- else // disconnected -> connected or connected -> disconnected
- {
- dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
- dm_digtable.pre_ig_value = 0;
- }
-
- // if silent reset happened, we should rewrite the values back
- if(priv->reset_count != reset_cnt)
- {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
- if(dm_digtable.pre_ig_value != read_nic_byte(priv, rOFDM0_XAAGCCore1))
- force_write = 1;
-
- {
- if((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
- || !initialized || force_write)
- {
- initial_gain = (u8)dm_digtable.cur_ig_value;
- // Set initial gain.
- write_nic_byte(priv, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(priv, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(priv, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(priv, rOFDM0_XDAGCCore1, initial_gain);
- dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
- initialized = 1;
- force_write = 0;
- }
- }
-}
-
-static void dm_pd_th(struct r8192_priv *priv)
-{
- static u8 initialized=0, force_write=0;
- static u32 reset_cnt = 0;
-
- if(dm_digtable.dig_algorithm_switch)
- {
- initialized = 0;
- reset_cnt = 0;
- }
-
- if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
- {
- if(dm_digtable.cur_connect_state == DIG_CONNECT)
- {
- if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh)
- dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER;
- else if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
- dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
- else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) &&
- (dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh))
- dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER;
- else
- dm_digtable.curpd_thstate = dm_digtable.prepd_thstate;
- }
- else
- {
- dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
- }
- }
- else // disconnected -> connected or connected -> disconnected
- {
- dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
- }
-
- // if silent reset happened, we should rewrite the values back
- if(priv->reset_count != reset_cnt)
- {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
- {
- if((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
- (initialized<=3) || force_write)
- {
- if(dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER)
- {
- // Lower PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x00);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x42);
- }
- else if(dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER)
- {
- // Higher PD_TH for OFDM.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
- // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x20);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x44);
- }
- else if(dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER)
- {
- // Higher PD_TH for OFDM for high power state.
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- {
- write_nic_byte(priv, (rOFDM0_XATxAFE+3), 0x10);
- }
- else
- write_nic_byte(priv, rOFDM0_RxDetector1, 0x43);
- }
- dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
- if(initialized <= 3)
- initialized++;
- force_write = 0;
- }
- }
-}
-
-static void dm_cs_ratio(struct r8192_priv *priv)
-{
- static u8 initialized=0,force_write=0;
- static u32 reset_cnt = 0;
-
- if(dm_digtable.dig_algorithm_switch)
- {
- initialized = 0;
- reset_cnt = 0;
- }
-
- if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
- {
- if(dm_digtable.cur_connect_state == DIG_CONNECT)
- {
- if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) )
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
- else
- dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
- }
- else
- {
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- }
- }
- else // disconnected -> connected or connected -> disconnected
- {
- dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- }
-
- // if silent reset happened, we should rewrite the values back
- if(priv->reset_count != reset_cnt)
- {
- force_write = 1;
- reset_cnt = priv->reset_count;
- }
-
-
- if((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
- !initialized || force_write)
- {
- if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
- {
- // Lower CS ratio for CCK.
- write_nic_byte(priv, 0xa0a, 0x08);
- }
- else if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
- {
- // Higher CS ratio for CCK.
- write_nic_byte(priv, 0xa0a, 0xcd);
- }
- dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
- initialized = 1;
- force_write = 0;
- }
-}
-
-void dm_init_edca_turbo(struct r8192_priv *priv)
-{
-
- priv->bcurrent_turbo_EDCA = false;
- priv->ieee80211->bis_any_nonbepkts = false;
- priv->bis_cur_rdlstate = false;
-}
-
-static void dm_check_edca_turbo(struct r8192_priv *priv)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- //PSTA_QOS pStaQos = pMgntInfo->pStaQos;
-
- // Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.
- static unsigned long lastTxOkCnt = 0;
- static unsigned long lastRxOkCnt = 0;
- unsigned long curTxOkCnt = 0;
- unsigned long curRxOkCnt = 0;
-
- //
- // Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters
- // should follow the settings from QAP. By Bruce, 2007-12-07.
- //
- if(priv->ieee80211->state != IEEE80211_LINKED)
- goto dm_CheckEdcaTurbo_EXIT;
- // We do not turn on EDCA turbo mode for some AP that has IOT issue
- if(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
- goto dm_CheckEdcaTurbo_EXIT;
-
- // Check the status for current condition.
- if(!priv->ieee80211->bis_any_nonbepkts)
- {
- curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
- curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- // For RT-AP, we needs to turn it on when Rx>Tx
- if(curRxOkCnt > 4*curTxOkCnt)
- {
- if(!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
- {
- write_nic_dword(priv, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]);
- priv->bis_cur_rdlstate = true;
- }
- }
- else
- {
- if(priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
- {
- write_nic_dword(priv, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
- priv->bis_cur_rdlstate = false;
- }
-
- }
-
- priv->bcurrent_turbo_EDCA = true;
- }
- else
- {
- //
- // Turn Off EDCA turbo here.
- // Restore original EDCA according to the declaration of AP.
- //
- if(priv->bcurrent_turbo_EDCA)
- {
-
- {
- u8 u1bAIFS;
- u32 u4bAcParam;
- struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
- u8 mode = priv->ieee80211->mode;
-
- // For Each time updating EDCA parameter, reset EDCA turbo mode status.
- dm_init_edca_turbo(priv);
- u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[0]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[0]))<< AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[0]))<< AC_PARAM_ECW_MIN_OFFSET)|
- ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
- printk("===>u4bAcParam:%x, ", u4bAcParam);
- //write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
- write_nic_dword(priv, EDCAPARA_BE, u4bAcParam);
-
- // Check ACM bit.
- // If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
- {
- // TODO: Modified this part and try to set acm control in only 1 IO processing!!
-
- PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
- u8 AcmCtrl = read_nic_byte(priv, AcmHwCtrl );
- if( pAciAifsn->f.ACM )
- { // ACM bit is 1.
- AcmCtrl |= AcmHw_BeqEn;
- }
- else
- { // ACM bit is 0.
- AcmCtrl &= (~AcmHw_BeqEn);
- }
-
- RT_TRACE( COMP_QOS,"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl ) ;
- write_nic_byte(priv, AcmHwCtrl, AcmCtrl );
- }
- }
- priv->bcurrent_turbo_EDCA = false;
- }
- }
-
-
-dm_CheckEdcaTurbo_EXIT:
- // Set variables for next time.
- priv->ieee80211->bis_any_nonbepkts = false;
- lastTxOkCnt = priv->stats.txbytesunicast;
- lastRxOkCnt = priv->stats.rxbytesunicast;
-}
-
-static void dm_init_ctstoself(struct r8192_priv *priv)
-{
- priv->ieee80211->bCTSToSelfEnable = TRUE;
- priv->ieee80211->CTSToSelfTH = CTSToSelfTHVal;
-}
-
-static void dm_ctstoself(struct r8192_priv *priv)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- static unsigned long lastTxOkCnt = 0;
- static unsigned long lastRxOkCnt = 0;
- unsigned long curTxOkCnt = 0;
- unsigned long curRxOkCnt = 0;
-
- if(priv->ieee80211->bCTSToSelfEnable != TRUE)
- {
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
- return;
- }
- /*
- 1. Uplink
- 2. Linksys350/Linksys300N
- 3. <50 disable, >55 enable
- */
-
- if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
- {
- curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
- curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- if(curRxOkCnt > 4*curTxOkCnt) //downlink, disable CTS to self
- {
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
- }
- else //uplink
- {
- pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
- }
-
- lastTxOkCnt = priv->stats.txbytesunicast;
- lastRxOkCnt = priv->stats.rxbytesunicast;
- }
-}
-
-
-
-/* Copy 8187B template for 9xseries */
-static void dm_check_rfctrl_gpio(struct r8192_priv *priv)
-{
-
- // Walk around for DTM test, we will not enable HW - radio on/off because r/w
- // page 1 register before Lextra bus is enabled cause system fails when resuming
- // from S4. 20080218, Emily
-
- // Stop to execute workitem to prevent S3/S4 bug.
- queue_delayed_work(priv->priv_wq,&priv->gpio_change_rf_wq,0);
-}
-
-/* PCI will not support workitem call back HW radio on-off control. */
-void dm_gpio_change_rf_callback(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,gpio_change_rf_wq);
- u8 tmp1byte;
- RT_RF_POWER_STATE eRfPowerStateToSet;
- bool bActuallySet = false;
-
- if (!priv->up) {
- RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),"dm_gpio_change_rf_callback(): Callback function breaks out!!\n");
- } else {
- // 0x108 GPIO input register is read only
- //set 0x108 B1= 1: RF-ON; 0: RF-OFF.
- tmp1byte = read_nic_byte(priv, GPI);
-
- eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
-
- if (priv->bHwRadioOff && (eRfPowerStateToSet == eRfOn)) {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
-
- priv->bHwRadioOff = false;
- bActuallySet = true;
- } else if ((!priv->bHwRadioOff) && (eRfPowerStateToSet == eRfOff)) {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
- priv->bHwRadioOff = true;
- bActuallySet = true;
- }
-
- if (bActuallySet) {
- priv->bHwRfOffAction = 1;
- MgntActSet_RF_State(priv, eRfPowerStateToSet, RF_CHANGE_BY_HW);
- //DrvIFIndicateCurrentPhyStatus(pAdapter);
- } else {
- msleep(2000);
- }
- }
-}
-
-/* Check if Current RF RX path is enabled */
-void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
-{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,rfpath_check_wq);
- u8 rfpath = 0, i;
-
-
- /* 2008/01/30 MH After discussing with SD3 Jerry, 0xc04/0xd04 register will
- always be the same. We only read 0xc04 now. */
- rfpath = read_nic_byte(priv, 0xc04);
-
- // Check Bit 0-3, it means if RF A-D is enabled.
- for (i = 0; i < RF90_PATH_MAX; i++)
- {
- if (rfpath & (0x01<<i))
- priv->brfpath_rxenable[i] = 1;
- else
- priv->brfpath_rxenable[i] = 0;
- }
- if(!DM_RxPathSelTable.Enable)
- return;
-
- dm_rxpath_sel_byrssi(priv);
-}
-
-static void dm_init_rxpath_selection(struct r8192_priv *priv)
-{
- u8 i;
-
- DM_RxPathSelTable.Enable = 1; //default enabled
- DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low;
- DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH;
- if(priv->CustomerID == RT_CID_819x_Netcore)
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
- else
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_1;
- DM_RxPathSelTable.DbgMode = DM_DBG_OFF;
- DM_RxPathSelTable.disabledRF = 0;
- for(i=0; i<4; i++)
- {
- DM_RxPathSelTable.rf_rssi[i] = 50;
- DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
- DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
- }
-}
-
-static void dm_rxpath_sel_byrssi(struct r8192_priv *priv)
-{
- u8 i, max_rssi_index=0, min_rssi_index=0, sec_rssi_index=0, rf_num=0;
- u8 tmp_max_rssi=0, tmp_min_rssi=0, tmp_sec_rssi=0;
- u8 cck_default_Rx=0x2; //RF-C
- u8 cck_optional_Rx=0x3;//RF-D
- long tmp_cck_max_pwdb=0, tmp_cck_min_pwdb=0, tmp_cck_sec_pwdb=0;
- u8 cck_rx_ver2_max_index=0, cck_rx_ver2_min_index=0, cck_rx_ver2_sec_index=0;
- u8 cur_rf_rssi;
- long cur_cck_pwdb;
- static u8 disabled_rf_cnt=0, cck_Rx_Path_initialized=0;
- u8 update_cck_rx_path;
-
- if(priv->rf_type != RF_2T4R)
- return;
-
- if(!cck_Rx_Path_initialized)
- {
- DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(priv, 0xa07)&0xf);
- cck_Rx_Path_initialized = 1;
- }
-
- DM_RxPathSelTable.disabledRF = 0xf;
- DM_RxPathSelTable.disabledRF &=~ (read_nic_byte(priv, 0xc04));
-
- if(priv->ieee80211->mode == WIRELESS_MODE_B)
- {
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; //pure B mode, fixed cck version2
- }
-
- //decide max/sec/min rssi index
- for (i=0; i<RF90_PATH_MAX; i++)
- {
- if(!DM_RxPathSelTable.DbgMode)
- DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
-
- if(priv->brfpath_rxenable[i])
- {
- rf_num++;
- cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i];
-
- if(rf_num == 1) // find first enabled rf path and the rssi values
- { //initialize, set all rssi index to the same one
- max_rssi_index = min_rssi_index = sec_rssi_index = i;
- tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
- }
- else if(rf_num == 2)
- { // we pick up the max index first, and let sec and min to be the same one
- if(cur_rf_rssi >= tmp_max_rssi)
- {
- tmp_max_rssi = cur_rf_rssi;
- max_rssi_index = i;
- }
- else
- {
- tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
- sec_rssi_index = min_rssi_index = i;
- }
- }
- else
- {
- if(cur_rf_rssi > tmp_max_rssi)
- {
- tmp_sec_rssi = tmp_max_rssi;
- sec_rssi_index = max_rssi_index;
- tmp_max_rssi = cur_rf_rssi;
- max_rssi_index = i;
- }
- else if(cur_rf_rssi == tmp_max_rssi)
- { // let sec and min point to the different index
- tmp_sec_rssi = cur_rf_rssi;
- sec_rssi_index = i;
- }
- else if((cur_rf_rssi < tmp_max_rssi) &&(cur_rf_rssi > tmp_sec_rssi))
- {
- tmp_sec_rssi = cur_rf_rssi;
- sec_rssi_index = i;
- }
- else if(cur_rf_rssi == tmp_sec_rssi)
- {
- if(tmp_sec_rssi == tmp_min_rssi)
- { // let sec and min point to the different index
- tmp_sec_rssi = cur_rf_rssi;
- sec_rssi_index = i;
- }
- else
- {
- // This case we don't need to set any index
- }
- }
- else if((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi))
- {
- // This case we don't need to set any index
- }
- else if(cur_rf_rssi == tmp_min_rssi)
- {
- if(tmp_sec_rssi == tmp_min_rssi)
- { // let sec and min point to the different index
- tmp_min_rssi = cur_rf_rssi;
- min_rssi_index = i;
- }
- else
- {
- // This case we don't need to set any index
- }
- }
- else if(cur_rf_rssi < tmp_min_rssi)
- {
- tmp_min_rssi = cur_rf_rssi;
- min_rssi_index = i;
- }
- }
- }
- }
-
- rf_num = 0;
- // decide max/sec/min cck pwdb index
- if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2)
- {
- for (i=0; i<RF90_PATH_MAX; i++)
- {
- if(priv->brfpath_rxenable[i])
- {
- rf_num++;
- cur_cck_pwdb = DM_RxPathSelTable.cck_pwdb_sta[i];
-
- if(rf_num == 1) // find first enabled rf path and the rssi values
- { //initialize, set all rssi index to the same one
- cck_rx_ver2_max_index = cck_rx_ver2_min_index = cck_rx_ver2_sec_index = i;
- tmp_cck_max_pwdb = tmp_cck_min_pwdb = tmp_cck_sec_pwdb = cur_cck_pwdb;
- }
- else if(rf_num == 2)
- { // we pick up the max index first, and let sec and min to be the same one
- if(cur_cck_pwdb >= tmp_cck_max_pwdb)
- {
- tmp_cck_max_pwdb = cur_cck_pwdb;
- cck_rx_ver2_max_index = i;
- }
- else
- {
- tmp_cck_sec_pwdb = tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = cck_rx_ver2_min_index = i;
- }
- }
- else
- {
- if(cur_cck_pwdb > tmp_cck_max_pwdb)
- {
- tmp_cck_sec_pwdb = tmp_cck_max_pwdb;
- cck_rx_ver2_sec_index = cck_rx_ver2_max_index;
- tmp_cck_max_pwdb = cur_cck_pwdb;
- cck_rx_ver2_max_index = i;
- }
- else if(cur_cck_pwdb == tmp_cck_max_pwdb)
- { // let sec and min point to the different index
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- }
- else if((cur_cck_pwdb < tmp_cck_max_pwdb) &&(cur_cck_pwdb > tmp_cck_sec_pwdb))
- {
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- }
- else if(cur_cck_pwdb == tmp_cck_sec_pwdb)
- {
- if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
- { // let sec and min point to the different index
- tmp_cck_sec_pwdb = cur_cck_pwdb;
- cck_rx_ver2_sec_index = i;
- }
- else
- {
- // This case we don't need to set any index
- }
- }
- else if((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb))
- {
- // This case we don't need to set any index
- }
- else if(cur_cck_pwdb == tmp_cck_min_pwdb)
- {
- if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
- { // let sec and min point to the different index
- tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- }
- else
- {
- // This case we don't need to set any index
- }
- }
- else if(cur_cck_pwdb < tmp_cck_min_pwdb)
- {
- tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- }
- }
-
- }
- }
- }
-
-
- // Set CCK Rx path
- // reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path.
- update_cck_rx_path = 0;
- if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2)
- {
- cck_default_Rx = cck_rx_ver2_max_index;
- cck_optional_Rx = cck_rx_ver2_sec_index;
- if(tmp_cck_max_pwdb != -64)
- update_cck_rx_path = 1;
- }
-
- if(tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2)
- {
- if((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH)
- {
- //record the enabled rssi threshold
- DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5;
- //disable the BB Rx path, OFDM
- rtl8192_setBBreg(priv, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xc04[3:0]
- rtl8192_setBBreg(priv, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xd04[3:0]
- disabled_rf_cnt++;
- }
- if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_1)
- {
- cck_default_Rx = max_rssi_index;
- cck_optional_Rx = sec_rssi_index;
- if(tmp_max_rssi)
- update_cck_rx_path = 1;
- }
- }
-
- if(update_cck_rx_path)
- {
- DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
- rtl8192_setBBreg(priv, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_Rx_path);
- }
-
- if(DM_RxPathSelTable.disabledRF)
- {
- for(i=0; i<4; i++)
- {
- if((DM_RxPathSelTable.disabledRF>>i) & 0x1) //disabled rf
- {
- if(tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i])
- {
- //enable the BB Rx path
- rtl8192_setBBreg(priv, rOFDM0_TRxPathEnable, 0x1<<i, 0x1); // 0xc04[3:0]
- rtl8192_setBBreg(priv, rOFDM1_TRxPathEnable, 0x1<<i, 0x1); // 0xd04[3:0]
- DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
- disabled_rf_cnt--;
- }
- }
- }
- }
-}
-
-/*
- * Call a workitem to check current RXRF path and Rx Path selection by RSSI.
- */
-static void dm_check_rx_path_selection(struct r8192_priv *priv)
-{
- queue_delayed_work(priv->priv_wq,&priv->rfpath_check_wq,0);
-}
-
-static void dm_init_fsync(struct r8192_priv *priv)
-{
- priv->ieee80211->fsync_time_interval = 500;
- priv->ieee80211->fsync_rate_bitmap = 0x0f000800;
- priv->ieee80211->fsync_rssi_threshold = 30;
- priv->ieee80211->bfsync_enable = false;
- priv->ieee80211->fsync_multiple_timeinterval = 3;
- priv->ieee80211->fsync_firstdiff_ratethreshold= 100;
- priv->ieee80211->fsync_seconddiff_ratethreshold= 200;
- priv->ieee80211->fsync_state = Default_Fsync;
- priv->framesyncMonitor = 1; // current default 0xc38 monitor on
-
- init_timer(&priv->fsync_timer);
- priv->fsync_timer.data = (unsigned long)priv;
- priv->fsync_timer.function = dm_fsync_timer_callback;
-}
-
-
-static void dm_deInit_fsync(struct r8192_priv *priv)
-{
- del_timer_sync(&priv->fsync_timer);
-}
-
-static void dm_fsync_timer_callback(unsigned long data)
-{
- struct r8192_priv *priv = (struct r8192_priv *)data;
- u32 rate_index, rate_count = 0, rate_count_diff=0;
- bool bSwitchFromCountDiff = false;
- bool bDoubleTimeInterval = false;
-
- if( priv->ieee80211->state == IEEE80211_LINKED &&
- priv->ieee80211->bfsync_enable &&
- (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
- {
- // Count rate 54, MCS [7], [12, 13, 14, 15]
- u32 rate_bitmap;
- for(rate_index = 0; rate_index <= 27; rate_index++)
- {
- rate_bitmap = 1 << rate_index;
- if(priv->ieee80211->fsync_rate_bitmap & rate_bitmap)
- rate_count+= priv->stats.received_rate_histogram[1][rate_index];
- }
-
- if(rate_count < priv->rate_record)
- rate_count_diff = 0xffffffff - rate_count + priv->rate_record;
- else
- rate_count_diff = rate_count - priv->rate_record;
- if(rate_count_diff < priv->rateCountDiffRecord)
- {
-
- u32 DiffNum = priv->rateCountDiffRecord - rate_count_diff;
- // Contiune count
- if(DiffNum >= priv->ieee80211->fsync_seconddiff_ratethreshold)
- priv->ContiuneDiffCount++;
- else
- priv->ContiuneDiffCount = 0;
-
- // Contiune count over
- if(priv->ContiuneDiffCount >=2)
- {
- bSwitchFromCountDiff = true;
- priv->ContiuneDiffCount = 0;
- }
- }
- else
- {
- // Stop contiune count
- priv->ContiuneDiffCount = 0;
- }
-
- //If Count diff <= FsyncRateCountThreshold
- if(rate_count_diff <= priv->ieee80211->fsync_firstdiff_ratethreshold)
- {
- bSwitchFromCountDiff = true;
- priv->ContiuneDiffCount = 0;
- }
- priv->rate_record = rate_count;
- priv->rateCountDiffRecord = rate_count_diff;
- RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
- // if we never receive those mcs rate and rssi > 30 % then switch fsyn
- if(priv->undecorated_smoothed_pwdb > priv->ieee80211->fsync_rssi_threshold && bSwitchFromCountDiff)
- {
- bDoubleTimeInterval = true;
- priv->bswitch_fsync = !priv->bswitch_fsync;
- if(priv->bswitch_fsync)
- {
- write_nic_byte(priv,0xC36, 0x1c);
- write_nic_byte(priv, 0xC3e, 0x90);
- }
- else
- {
- write_nic_byte(priv, 0xC36, 0x5c);
- write_nic_byte(priv, 0xC3e, 0x96);
- }
- }
- else if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->fsync_rssi_threshold)
- {
- if(priv->bswitch_fsync)
- {
- priv->bswitch_fsync = false;
- write_nic_byte(priv, 0xC36, 0x5c);
- write_nic_byte(priv, 0xC3e, 0x96);
- }
- }
- if(bDoubleTimeInterval){
- if(timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
- add_timer(&priv->fsync_timer);
- }
- else{
- if(timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
- }
- }
- else
- {
- // Let Register return to default value;
- if(priv->bswitch_fsync)
- {
- priv->bswitch_fsync = false;
- write_nic_byte(priv, 0xC36, 0x5c);
- write_nic_byte(priv, 0xC3e, 0x96);
- }
- priv->ContiuneDiffCount = 0;
- write_nic_dword(priv, rOFDM0_RxDetector2, 0x465c52cd);
- }
- RT_TRACE(COMP_HALDM, "ContiuneDiffCount %d\n", priv->ContiuneDiffCount);
- RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
-}
-
-static void dm_StartHWFsync(struct r8192_priv *priv)
-{
- RT_TRACE(COMP_HALDM, "%s\n", __FUNCTION__);
- write_nic_dword(priv, rOFDM0_RxDetector2, 0x465c12cf);
- write_nic_byte(priv, 0xc3b, 0x41);
-}
-
-static void dm_EndSWFsync(struct r8192_priv *priv)
-{
- RT_TRACE(COMP_HALDM, "%s\n", __FUNCTION__);
- del_timer_sync(&(priv->fsync_timer));
-
- // Let Register return to default value;
- if(priv->bswitch_fsync)
- {
- priv->bswitch_fsync = false;
-
- write_nic_byte(priv, 0xC36, 0x40);
-
- write_nic_byte(priv, 0xC3e, 0x96);
- }
-
- priv->ContiuneDiffCount = 0;
-
- write_nic_dword(priv, rOFDM0_RxDetector2, 0x465c52cd);
-}
-
-static void dm_StartSWFsync(struct r8192_priv *priv)
-{
- u32 rateIndex;
- u32 rateBitmap;
-
- RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__);
- // Initial rate record to zero, start to record.
- priv->rate_record = 0;
- // Initial contiune diff count to zero, start to record.
- priv->ContiuneDiffCount = 0;
- priv->rateCountDiffRecord = 0;
- priv->bswitch_fsync = false;
-
- if(priv->ieee80211->mode == WIRELESS_MODE_N_24G)
- {
- priv->ieee80211->fsync_firstdiff_ratethreshold= 600;
- priv->ieee80211->fsync_seconddiff_ratethreshold = 0xffff;
- }
- else
- {
- priv->ieee80211->fsync_firstdiff_ratethreshold= 200;
- priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
- }
- for(rateIndex = 0; rateIndex <= 27; rateIndex++)
- {
- rateBitmap = 1 << rateIndex;
- if(priv->ieee80211->fsync_rate_bitmap & rateBitmap)
- priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
- }
- if(timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
-
- write_nic_dword(priv, rOFDM0_RxDetector2, 0x465c12cd);
-}
-
-static void dm_EndHWFsync(struct r8192_priv *priv)
-{
- RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__);
- write_nic_dword(priv, rOFDM0_RxDetector2, 0x465c52cd);
- write_nic_byte(priv, 0xc3b, 0x49);
-}
-
-static void dm_check_fsync(struct r8192_priv *priv)
-{
-#define RegC38_Default 0
-#define RegC38_NonFsync_Other_AP 1
-#define RegC38_Fsync_AP_BCM 2
- //u32 framesyncC34;
- static u8 reg_c38_State=RegC38_Default;
- static u32 reset_cnt=0;
-
- RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval);
- RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold);
-
- if( priv->ieee80211->state == IEEE80211_LINKED &&
- (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
- {
- if(priv->ieee80211->bfsync_enable == 0)
- {
- switch(priv->ieee80211->fsync_state)
- {
- case Default_Fsync:
- dm_StartHWFsync(priv);
- priv->ieee80211->fsync_state = HW_Fsync;
- break;
- case SW_Fsync:
- dm_EndSWFsync(priv);
- dm_StartHWFsync(priv);
- priv->ieee80211->fsync_state = HW_Fsync;
- break;
- case HW_Fsync:
- default:
- break;
- }
- }
- else
- {
- switch(priv->ieee80211->fsync_state)
- {
- case Default_Fsync:
- dm_StartSWFsync(priv);
- priv->ieee80211->fsync_state = SW_Fsync;
- break;
- case HW_Fsync:
- dm_EndHWFsync(priv);
- dm_StartSWFsync(priv);
- priv->ieee80211->fsync_state = SW_Fsync;
- break;
- case SW_Fsync:
- default:
- break;
-
- }
- }
- if(priv->framesyncMonitor)
- {
- if(reg_c38_State != RegC38_Fsync_AP_BCM)
- { //For broadcom AP we write different default value
- write_nic_byte(priv, rOFDM0_RxDetector3, 0x95);
-
- reg_c38_State = RegC38_Fsync_AP_BCM;
- }
- }
- }
- else
- {
- switch(priv->ieee80211->fsync_state)
- {
- case HW_Fsync:
- dm_EndHWFsync(priv);
- priv->ieee80211->fsync_state = Default_Fsync;
- break;
- case SW_Fsync:
- dm_EndSWFsync(priv);
- priv->ieee80211->fsync_state = Default_Fsync;
- break;
- case Default_Fsync:
- default:
- break;
- }
-
- if(priv->framesyncMonitor)
- {
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- if(priv->undecorated_smoothed_pwdb <= RegC38_TH)
- {
- if(reg_c38_State != RegC38_NonFsync_Other_AP)
- {
- write_nic_byte(priv, rOFDM0_RxDetector3, 0x90);
-
- reg_c38_State = RegC38_NonFsync_Other_AP;
- }
- }
- else if(priv->undecorated_smoothed_pwdb >= (RegC38_TH+5))
- {
- if(reg_c38_State)
- {
- write_nic_byte(priv, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- }
- }
- }
- else
- {
- if(reg_c38_State)
- {
- write_nic_byte(priv, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- }
- }
- }
- }
- if(priv->framesyncMonitor)
- {
- if(priv->reset_count != reset_cnt)
- { //After silent reset, the reg_c38_State will be returned to default value
- write_nic_byte(priv, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- reset_cnt = priv->reset_count;
- }
- }
- else
- {
- if(reg_c38_State)
- {
- write_nic_byte(priv, rOFDM0_RxDetector3, priv->framesync);
- reg_c38_State = RegC38_Default;
- }
- }
-}
-
-/*
- * Detect Signal strength to control TX Registry
- * Tx Power Control For Near/Far Range
- */
-static void dm_init_dynamic_txpower(struct r8192_priv *priv)
-{
- //Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code.
- priv->ieee80211->bdynamic_txpower_enable = true; //Default to enable Tx Power Control
- priv->bLastDTPFlag_High = false;
- priv->bLastDTPFlag_Low = false;
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
-}
-
-static void dm_dynamic_txpower(struct r8192_priv *priv)
-{
- unsigned int txhipower_threshhold=0;
- unsigned int txlowpower_threshold=0;
- if(priv->ieee80211->bdynamic_txpower_enable != true)
- {
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
- return;
- }
- if((priv->ieee80211->current_network.atheros_cap_exist ) && (priv->ieee80211->mode == IEEE_G)){
- txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
- txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
- }
- else
- {
- txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
- txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
- }
-
- RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n" , priv->undecorated_smoothed_pwdb);
-
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- if(priv->undecorated_smoothed_pwdb >= txhipower_threshhold)
- {
- priv->bDynamicTxHighPower = true;
- priv->bDynamicTxLowPower = false;
- }
- else
- {
- // high power state check
- if(priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower == true)
- {
- priv->bDynamicTxHighPower = false;
- }
- // low power state check
- if(priv->undecorated_smoothed_pwdb < 35)
- {
- priv->bDynamicTxLowPower = true;
- }
- else if(priv->undecorated_smoothed_pwdb >= 40)
- {
- priv->bDynamicTxLowPower = false;
- }
- }
- }
- else
- {
- //pHalData->bTXPowerCtrlforNearFarRange = !pHalData->bTXPowerCtrlforNearFarRange;
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
- }
-
- if( (priv->bDynamicTxHighPower != priv->bLastDTPFlag_High ) ||
- (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low ) )
- {
- RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n", priv->ieee80211->current_network.channel);
-
-
- rtl8192_phy_setTxPower(priv, priv->ieee80211->current_network.channel);
-
- }
- priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
- priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
-
-}
-
-//added by vivi, for read tx rate and retrycount
-static void dm_check_txrateandretrycount(struct r8192_priv *priv)
-{
- struct ieee80211_device* ieee = priv->ieee80211;
-
- //for initial tx rate
- ieee->softmac_stats.last_packet_rate = read_nic_byte(priv ,Initial_Tx_Rate_Reg);
- //for tx tx retry count
- ieee->softmac_stats.txretrycount = read_nic_dword(priv, Tx_Retry_Count_Reg);
-}
-
-static void dm_send_rssi_tofw(struct r8192_priv *priv)
-{
- // If we test chariot, we should stop the TX command ?
- // Because 92E will always silent reset when we send tx command. We use register
- // 0x1e0(byte) to botify driver.
- write_nic_byte(priv, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
- return;
-}
-
diff --git a/drivers/staging/rtl8192e/r8192E_dm.h b/drivers/staging/rtl8192e/r8192E_dm.h
deleted file mode 100644
index b5b34eaaee9..00000000000
--- a/drivers/staging/rtl8192e/r8192E_dm.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/*****************************************************************************
- * Copyright(c) 2007, RealTEK Technology Inc. All Right Reserved.
- *
- * Module: Hal819xUsbDM.h (RTL8192 Header H File)
- *
- *
- * Note: For dynamic control definition constant structure.
- *
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- * 10/04/2007 MHC Create initial version.
- *
- *****************************************************************************/
-
-#ifndef __R8192UDM_H__
-#define __R8192UDM_H__
-
-#define OFDM_Table_Length 19
-#define CCK_Table_length 12
-
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
-#define DM_DIG_HIGH_PWR_THRESH_LOW 70
-
-#define BW_AUTO_SWITCH_HIGH_LOW 25
-#define BW_AUTO_SWITCH_LOW_HIGH 30
-
-#define DM_check_fsync_time_interval 500
-
-
-#define DM_DIG_BACKOFF 12
-#define DM_DIG_MAX 0x36
-#define DM_DIG_MIN 0x1c
-#define DM_DIG_MIN_Netcore 0x12
-
-#define RxPathSelection_SS_TH_low 30
-#define RxPathSelection_diff_TH 18
-
-#define RateAdaptiveTH_High 50
-#define RateAdaptiveTH_Low_20M 30
-#define RateAdaptiveTH_Low_40M 10
-#define VeryLowRSSI 15
-#define CTSToSelfTHVal 35
-
-//defined by vivi, for tx power track
-#define E_FOR_TX_POWER_TRACK 300
-//Dynamic Tx Power Control Threshold
-#define TX_POWER_NEAR_FIELD_THRESH_HIGH 68
-#define TX_POWER_NEAR_FIELD_THRESH_LOW 62
-//added by amy for atheros AP
-#define TX_POWER_ATHEROAP_THRESH_HIGH 78
-#define TX_POWER_ATHEROAP_THRESH_LOW 72
-
-//defined by vivi, for showing on UI. Newer firmware has changed to 0x1e0
-#define Current_Tx_Rate_Reg 0x1e0//0x1b8
-#define Initial_Tx_Rate_Reg 0x1e1 //0x1b9
-#define Tx_Retry_Count_Reg 0x1ac
-#define RegC38_TH 20
-
-/* 2007/10/04 MH Define upper and lower threshold of DIG enable or disable. */
-typedef struct _dynamic_initial_gain_threshold_
-{
- u8 dig_enable_flag;
- u8 dig_algorithm;
- u8 dbg_mode;
- u8 dig_algorithm_switch;
-
- long rssi_low_thresh;
- long rssi_high_thresh;
-
- long rssi_high_power_lowthresh;
- long rssi_high_power_highthresh;
-
- u8 dig_state;
- u8 dig_highpwr_state;
- u8 cur_connect_state;
- u8 pre_connect_state;
-
- u8 curpd_thstate;
- u8 prepd_thstate;
- u8 curcs_ratio_state;
- u8 precs_ratio_state;
-
- u32 pre_ig_value;
- u32 cur_ig_value;
-
- u8 backoff_val;
- u8 rx_gain_range_max;
- u8 rx_gain_range_min;
- bool initialgain_lowerbound_state;
-
- long rssi_val;
-}dig_t;
-
-typedef enum tag_dynamic_init_gain_state_definition
-{
- DM_STA_DIG_OFF = 0,
- DM_STA_DIG_ON,
- DM_STA_DIG_MAX
-}dm_dig_sta_e;
-
-
-/* 2007/10/08 MH Define RATR state. */
-typedef enum tag_dynamic_ratr_state_definition
-{
- DM_RATR_STA_HIGH = 0,
- DM_RATR_STA_MIDDLE = 1,
- DM_RATR_STA_LOW = 2,
- DM_RATR_STA_MAX
-}dm_ratr_sta_e;
-
-/* 2007/10/11 MH Define DIG operation type. */
-typedef enum tag_dynamic_init_gain_operation_type_definition
-{
- DIG_TYPE_THRESH_HIGH = 0,
- DIG_TYPE_THRESH_LOW = 1,
- DIG_TYPE_THRESH_HIGHPWR_HIGH = 2,
- DIG_TYPE_THRESH_HIGHPWR_LOW = 3,
- DIG_TYPE_DBG_MODE = 4,
- DIG_TYPE_RSSI = 5,
- DIG_TYPE_ALGORITHM = 6,
- DIG_TYPE_BACKOFF = 7,
- DIG_TYPE_PWDB_FACTOR = 8,
- DIG_TYPE_RX_GAIN_MIN = 9,
- DIG_TYPE_RX_GAIN_MAX = 10,
- DIG_TYPE_ENABLE = 20,
- DIG_TYPE_DISABLE = 30,
- DIG_OP_TYPE_MAX
-}dm_dig_op_e;
-
-typedef enum tag_dig_algorithm_definition
-{
- DIG_ALGO_BY_FALSE_ALARM = 0,
- DIG_ALGO_BY_RSSI = 1,
- DIG_ALGO_MAX
-}dm_dig_alg_e;
-
-typedef enum tag_dig_dbgmode_definition
-{
- DIG_DBG_OFF = 0,
- DIG_DBG_ON = 1,
- DIG_DBG_MAX
-}dm_dig_dbg_e;
-
-typedef enum tag_dig_connect_definition
-{
- DIG_DISCONNECT = 0,
- DIG_CONNECT = 1,
- DIG_CONNECT_MAX
-}dm_dig_connect_e;
-
-typedef enum tag_dig_packetdetection_threshold_definition
-{
- DIG_PD_AT_LOW_POWER = 0,
- DIG_PD_AT_NORMAL_POWER = 1,
- DIG_PD_AT_HIGH_POWER = 2,
- DIG_PD_MAX
-}dm_dig_pd_th_e;
-
-typedef enum tag_dig_cck_cs_ratio_state_definition
-{
- DIG_CS_RATIO_LOWER = 0,
- DIG_CS_RATIO_HIGHER = 1,
- DIG_CS_MAX
-}dm_dig_cs_ratio_e;
-typedef struct _Dynamic_Rx_Path_Selection_
-{
- u8 Enable;
- u8 DbgMode;
- u8 cck_method;
- u8 cck_Rx_path;
-
- u8 SS_TH_low;
- u8 diff_TH;
- u8 disabledRF;
- u8 reserved;
-
- u8 rf_rssi[4];
- u8 rf_enable_rssi_th[4];
- long cck_pwdb_sta[4];
-}DRxPathSel;
-
-typedef enum tag_CCK_Rx_Path_Method_Definition
-{
- CCK_Rx_Version_1 = 0,
- CCK_Rx_Version_2= 1,
- CCK_Rx_Version_MAX
-}DM_CCK_Rx_Path_Method;
-
-typedef enum tag_DM_DbgMode_Definition
-{
- DM_DBG_OFF = 0,
- DM_DBG_ON = 1,
- DM_DBG_MAX
-}DM_DBG_E;
-
-typedef struct tag_Tx_Config_Cmd_Format
-{
- u32 Op; /* Command packet type. */
- u32 Length; /* Command packet length. */
- u32 Value;
-}DCMD_TXCMD_T, *PDCMD_TXCMD_T;
-
-
-extern dig_t dm_digtable;
-extern DRxPathSel DM_RxPathSelTable;
-
-void init_hal_dm(struct r8192_priv *priv);
-void deinit_hal_dm(struct r8192_priv *priv);
-
-void hal_dm_watchdog(struct r8192_priv *priv);
-
-void init_rate_adaptive(struct r8192_priv *priv);
-void dm_txpower_trackingcallback(struct work_struct *work);
-void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_initialize_txpower_tracking(struct r8192_priv *priv);
-void dm_cck_txpower_adjust(struct r8192_priv *priv, bool binch14);
-
-
-#endif /*__R8192UDM_H__ */
-
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/r8192E_firmware.c
new file mode 100644
index 00000000000..37719859bda
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_firmware.c
@@ -0,0 +1,348 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#include "rtl_core.h"
+#include "r8192E_hw.h"
+#include "r8192E_hwimg.h"
+#include "r8192E_firmware.h"
+#include <linux/firmware.h>
+
+void firmware_init_param(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_firmware *pfirmware = priv->pFirmware;
+
+ pfirmware->cmdpacket_frag_thresold = GET_COMMAND_PACKET_FRAG_THRESHOLD(
+ MAX_TRANSMIT_BUFFER_SIZE);
+}
+
+static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
+ u32 buffer_len)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool rt_status = true;
+ u16 frag_threshold;
+ u16 frag_length, frag_offset = 0;
+ int i;
+
+ struct rt_firmware *pfirmware = priv->pFirmware;
+ struct sk_buff *skb;
+ unsigned char *seg_ptr;
+ struct cb_desc *tcb_desc;
+ u8 bLastIniPkt;
+
+ firmware_init_param(dev);
+ frag_threshold = pfirmware->cmdpacket_frag_thresold;
+ do {
+ if ((buffer_len - frag_offset) > frag_threshold) {
+ frag_length = frag_threshold ;
+ bLastIniPkt = 0;
+
+ } else {
+ frag_length = buffer_len - frag_offset;
+ bLastIniPkt = 1;
+
+ }
+
+ skb = dev_alloc_skb(frag_length + 4);
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc->queue_index = TXCMD_QUEUE;
+ tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
+ tcb_desc->bLastIniPkt = bLastIniPkt;
+
+ seg_ptr = skb->data;
+ for (i = 0; i < frag_length; i += 4) {
+ *seg_ptr++ = ((i+0) < frag_length) ?
+ code_virtual_address[i+3] : 0;
+ *seg_ptr++ = ((i+1) < frag_length) ?
+ code_virtual_address[i+2] : 0;
+ *seg_ptr++ = ((i+2) < frag_length) ?
+ code_virtual_address[i+1] : 0;
+ *seg_ptr++ = ((i+3) < frag_length) ?
+ code_virtual_address[i+0] : 0;
+ }
+ tcb_desc->txbuf_size = (u16)i;
+ skb_put(skb, i);
+
+ if (!priv->rtllib->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->rtllib->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->rtllib->queue_stop)) {
+ RT_TRACE(COMP_FIRMWARE, "===================> tx "
+ "full!\n");
+ skb_queue_tail(&priv->rtllib->skb_waitQ
+ [tcb_desc->queue_index], skb);
+ } else {
+ priv->rtllib->softmac_hard_start_xmit(skb, dev);
+ }
+
+ code_virtual_address += frag_length;
+ frag_offset += frag_length;
+
+ } while (frag_offset < buffer_len);
+
+ write_nic_byte(dev, TPPoll, TPPoll_CQ);
+
+ return rt_status;
+}
+
+static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
+{
+ bool rt_status = true;
+ u32 CPU_status = 0;
+ unsigned long timeout;
+
+ timeout = jiffies + MSECS(200);
+ while (time_before(jiffies, timeout)) {
+ CPU_status = read_nic_dword(dev, CPU_GEN);
+ if (CPU_status & CPU_GEN_PUT_CODE_OK)
+ break;
+ mdelay(2);
+ }
+
+ if (!(CPU_status&CPU_GEN_PUT_CODE_OK)) {
+ RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n");
+ goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
+ } else {
+ RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n");
+ }
+
+ CPU_status = read_nic_dword(dev, CPU_GEN);
+ write_nic_byte(dev, CPU_GEN,
+ (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+ mdelay(1);
+
+ timeout = jiffies + MSECS(200);
+ while (time_before(jiffies, timeout)) {
+ CPU_status = read_nic_dword(dev, CPU_GEN);
+ if (CPU_status&CPU_GEN_BOOT_RDY)
+ break;
+ mdelay(2);
+ }
+
+ if (!(CPU_status&CPU_GEN_BOOT_RDY))
+ goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
+ else
+ RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n");
+
+ return rt_status;
+
+CPUCheckMainCodeOKAndTurnOnCPU_Fail:
+ RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
+ rt_status = false;
+ return rt_status;
+}
+
+static bool CPUcheck_firmware_ready(struct net_device *dev)
+{
+
+ bool rt_status = true;
+ u32 CPU_status = 0;
+ unsigned long timeout;
+
+ timeout = jiffies + MSECS(20);
+ while (time_before(jiffies, timeout)) {
+ CPU_status = read_nic_dword(dev, CPU_GEN);
+ if (CPU_status&CPU_GEN_FIRM_RDY)
+ break;
+ mdelay(2);
+ }
+
+ if (!(CPU_status&CPU_GEN_FIRM_RDY))
+ goto CPUCheckFirmwareReady_Fail;
+ else
+ RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n");
+
+ return rt_status;
+
+CPUCheckFirmwareReady_Fail:
+ RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
+ rt_status = false;
+ return rt_status;
+
+}
+
+static bool firmware_check_ready(struct net_device *dev,
+ u8 load_fw_status)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_firmware *pfirmware = priv->pFirmware;
+ bool rt_status = true;
+
+ switch (load_fw_status) {
+ case FW_INIT_STEP0_BOOT:
+ pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE;
+ break;
+
+ case FW_INIT_STEP1_MAIN:
+ pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE;
+
+ rt_status = CPUcheck_maincodeok_turnonCPU(dev);
+ if (rt_status)
+ pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU;
+ else
+ RT_TRACE(COMP_FIRMWARE, "CPUcheck_maincodeok_turnon"
+ "CPU fail!\n");
+
+ break;
+
+ case FW_INIT_STEP2_DATA:
+ pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE;
+ mdelay(1);
+
+ rt_status = CPUcheck_firmware_ready(dev);
+ if (rt_status)
+ pfirmware->firmware_status = FW_STATUS_5_READY;
+ else
+ RT_TRACE(COMP_FIRMWARE, "CPUcheck_firmware_ready fail"
+ "(%d)!\n", rt_status);
+
+ break;
+ default:
+ rt_status = false;
+ RT_TRACE(COMP_FIRMWARE, "Unknown firware status");
+ break;
+ }
+
+ return rt_status;
+}
+
+bool init_firmware(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool rt_status = true;
+
+ u8 *firmware_img_buf[3] = { &Rtl8192PciEFwBootArray[0],
+ &Rtl8192PciEFwMainArray[0],
+ &Rtl8192PciEFwDataArray[0]};
+
+ u32 firmware_img_len[3] = { sizeof(Rtl8192PciEFwBootArray),
+ sizeof(Rtl8192PciEFwMainArray),
+ sizeof(Rtl8192PciEFwDataArray)};
+ u32 file_length = 0;
+ u8 *mapped_file = NULL;
+ u8 init_step = 0;
+ enum opt_rst_type rst_opt = OPT_SYSTEM_RESET;
+ enum firmware_init_step starting_state = FW_INIT_STEP0_BOOT;
+
+ struct rt_firmware *pfirmware = priv->pFirmware;
+
+ RT_TRACE(COMP_FIRMWARE, " PlatformInitFirmware()==>\n");
+
+ if (pfirmware->firmware_status == FW_STATUS_0_INIT) {
+ rst_opt = OPT_SYSTEM_RESET;
+ starting_state = FW_INIT_STEP0_BOOT;
+
+ } else if (pfirmware->firmware_status == FW_STATUS_5_READY) {
+ rst_opt = OPT_FIRMWARE_RESET;
+ starting_state = FW_INIT_STEP2_DATA;
+ } else {
+ RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined"
+ " firmware state\n");
+ }
+
+ priv->firmware_source = FW_SOURCE_IMG_FILE;
+ for (init_step = starting_state; init_step <= FW_INIT_STEP2_DATA;
+ init_step++) {
+ if (rst_opt == OPT_SYSTEM_RESET) {
+ switch (priv->firmware_source) {
+ case FW_SOURCE_IMG_FILE:
+ {
+ if (pfirmware->firmware_buf_size[init_step] == 0) {
+ const char *fw_name[3] = { "RTL8192E/boot.img",
+ "RTL8192E/main.img",
+ "RTL8192E/data.img"
+ };
+ const struct firmware *fw_entry;
+ int rc;
+ rc = request_firmware(&fw_entry,
+ fw_name[init_step], &priv->pdev->dev);
+ if (rc < 0) {
+ RT_TRACE(COMP_FIRMWARE, "request firm"
+ "ware fail!\n");
+ goto download_firmware_fail;
+ }
+ if (fw_entry->size >
+ sizeof(pfirmware->firmware_buf[init_step])) {
+ RT_TRACE(COMP_FIRMWARE, "img file size "
+ "exceed the container struct "
+ "buffer fail!\n");
+ goto download_firmware_fail;
+ }
+
+ if (init_step != FW_INIT_STEP1_MAIN) {
+ memcpy(pfirmware->firmware_buf[init_step],
+ fw_entry->data, fw_entry->size);
+ pfirmware->firmware_buf_size[init_step] =
+ fw_entry->size;
+
+ } else {
+ memset(pfirmware->firmware_buf[init_step],
+ 0, 128);
+ memcpy(&pfirmware->firmware_buf[init_step][128],
+ fw_entry->data, fw_entry->size);
+ pfirmware->firmware_buf_size[init_step] =
+ fw_entry->size + 128;
+ }
+
+ if (rst_opt == OPT_SYSTEM_RESET)
+ release_firmware(fw_entry);
+ }
+ mapped_file = pfirmware->firmware_buf[init_step];
+ file_length = pfirmware->firmware_buf_size[init_step];
+ break;
+ }
+ case FW_SOURCE_HEADER_FILE:
+ mapped_file = firmware_img_buf[init_step];
+ file_length = firmware_img_len[init_step];
+ if (init_step == FW_INIT_STEP2_DATA) {
+ memcpy(pfirmware->firmware_buf[init_step], mapped_file, file_length);
+ pfirmware->firmware_buf_size[init_step] = file_length;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+
+ } else if (rst_opt == OPT_FIRMWARE_RESET) {
+ mapped_file = pfirmware->firmware_buf[init_step];
+ file_length = pfirmware->firmware_buf_size[init_step];
+ }
+
+ rt_status = fw_download_code(dev, mapped_file, file_length);
+ if (rt_status != true) {
+ goto download_firmware_fail;
+ }
+
+ if (!firmware_check_ready(dev, init_step)) {
+ goto download_firmware_fail;
+ }
+ }
+
+ RT_TRACE(COMP_FIRMWARE, "Firmware Download Success\n");
+ return rt_status;
+
+download_firmware_fail:
+ RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
+ rt_status = false;
+ return rt_status;
+
+}
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/r8192E_firmware.h
new file mode 100644
index 00000000000..caa87883310
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_firmware.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef __INC_FIRMWARE_H
+#define __INC_FIRMWARE_H
+
+#define RTL8190_CPU_START_OFFSET 0x80
+
+#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4*(v/4) - 8)
+
+enum firmware_init_step {
+ FW_INIT_STEP0_BOOT = 0,
+ FW_INIT_STEP1_MAIN = 1,
+ FW_INIT_STEP2_DATA = 2,
+};
+
+enum opt_rst_type {
+ OPT_SYSTEM_RESET = 0,
+ OPT_FIRMWARE_RESET = 1,
+};
+
+enum desc_packet_type {
+ DESC_PACKET_TYPE_INIT = 0,
+ DESC_PACKET_TYPE_NORMAL = 1,
+};
+
+enum firmware_source {
+ FW_SOURCE_IMG_FILE = 0,
+ FW_SOURCE_HEADER_FILE = 1,
+};
+
+enum firmware_status {
+ FW_STATUS_0_INIT = 0,
+ FW_STATUS_1_MOVE_BOOT_CODE = 1,
+ FW_STATUS_2_MOVE_MAIN_CODE = 2,
+ FW_STATUS_3_TURNON_CPU = 3,
+ FW_STATUS_4_MOVE_DATA_CODE = 4,
+ FW_STATUS_5_READY = 5,
+};
+
+struct fw_seg_container {
+ u16 seg_size;
+ u8 *seg_ptr;
+};
+
+struct rt_firmware {
+ enum firmware_status firmware_status;
+ u16 cmdpacket_frag_thresold;
+#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000
+#define MAX_FW_INIT_STEP 3
+ u8 firmware_buf[MAX_FW_INIT_STEP][RTL8190_MAX_FIRMWARE_CODE_SIZE];
+ u16 firmware_buf_size[MAX_FW_INIT_STEP];
+};
+
+bool init_firmware(struct net_device *dev);
+extern void firmware_init_param(struct net_device *dev);
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/r8192E_hw.h
index 24e7303e56a..43c3fb859d1 100644
--- a/drivers/staging/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/r8192E_hw.h
@@ -1,49 +1,36 @@
-/*
- This is part of rtl8187 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
- Parts of this driver are based on the GPL part of the
- official Realtek driver.
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
- Parts of this driver are based on the Intel Pro Wireless
- 2100 GPL driver.
- We want to tanks the Authors of those projects
- and the Ndiswrapper project Authors.
-*/
-
-/* Mariusz Matuszek added full registers definition with Realtek's name */
-
-/* this file contains register definitions for the rtl8187 MAC controller */
#ifndef R8180_HW
#define R8180_HW
-typedef enum _VERSION_8190{
- VERSION_8190_BD=0x3,
- VERSION_8190_BE
-}VERSION_8190,*PVERSION_8190;
-//added for different RF type
-typedef enum _RT_RF_TYPE_DEF
-{
- RF_1T2R = 0,
- RF_2T4R,
-
- RF_819X_MAX_TYPE
-}RT_RF_TYPE_DEF;
-
-typedef enum _BaseBand_Config_Type{
- BaseBand_Config_PHY_REG = 0, //Radio Path A
- BaseBand_Config_AGC_TAB = 1, //Radio Path B
-}BaseBand_Config_Type, *PBaseBand_Config_Type;
+enum baseband_config {
+ BaseBand_Config_PHY_REG = 0,
+ BaseBand_Config_AGC_TAB = 1,
+};
#define RTL8187_REQT_READ 0xc0
#define RTL8187_REQT_WRITE 0x40
#define RTL8187_REQ_GET_REGS 0x05
#define RTL8187_REQ_SET_REGS 0x05
-#define R8180_MAX_RETRY 255
#define MAX_TX_URB 5
#define MAX_RX_URB 16
#define RX_URB_SIZE 9100
@@ -65,14 +52,14 @@ typedef enum _BaseBand_Config_Type{
#define EEPROM_TxPowerDiff 0x1F
-#define EEPROM_PwDiff 0x21 //0x21
-#define EEPROM_CrystalCap 0x22 //0x22
+#define EEPROM_PwDiff 0x21
+#define EEPROM_CrystalCap 0x22
-#define EEPROM_TxPwIndex_CCK_V1 0x29 //0x29~0x2B
-#define EEPROM_TxPwIndex_OFDM_24G_V1 0x2C //0x2C~0x2E
-#define EEPROM_TxPwIndex_Ver 0x27 //0x27
+#define EEPROM_TxPwIndex_CCK_V1 0x29
+#define EEPROM_TxPwIndex_OFDM_24G_V1 0x2C
+#define EEPROM_TxPwIndex_Ver 0x27
#define EEPROM_Default_TxPowerDiff 0x0
#define EEPROM_Default_ThermalMeter 0x77
@@ -81,42 +68,35 @@ typedef enum _BaseBand_Config_Type{
#define EEPROM_Default_PwDiff 0x4
#define EEPROM_Default_CrystalCap 0x5
#define EEPROM_Default_TxPower 0x1010
-#define EEPROM_ICVersion_ChannelPlan 0x7C //0x7C:ChannelPlan, 0x7D:IC_Version
-#define EEPROM_Customer_ID 0x7B //0x7B:CustomerID
-
+#define EEPROM_ICVersion_ChannelPlan 0x7C
+#define EEPROM_Customer_ID 0x7B
#define EEPROM_RFInd_PowerDiff 0x28
#define EEPROM_ThermalMeter 0x29
-#define EEPROM_TxPwDiff_CrystalCap 0x2A //0x2A~0x2B
-#define EEPROM_TxPwIndex_CCK 0x2C //0x23
-#define EEPROM_TxPwIndex_OFDM_24G 0x3A //0x24~0x26
-
+#define EEPROM_TxPwDiff_CrystalCap 0x2A
+#define EEPROM_TxPwIndex_CCK 0x2C
+#define EEPROM_TxPwIndex_OFDM_24G 0x3A
#define EEPROM_Default_TxPowerLevel 0x10
-
-#define EEPROM_IC_VER 0x7d //0x7D
-#define EEPROM_CRC 0x7e //0x7E~0x7F
+#define EEPROM_IC_VER 0x7d
+#define EEPROM_CRC 0x7e
#define EEPROM_CID_DEFAULT 0x0
#define EEPROM_CID_CAMEO 0x1
#define EEPROM_CID_RUNTOP 0x2
#define EEPROM_CID_Senao 0x3
-#define EEPROM_CID_TOSHIBA 0x4 // Toshiba setting, Merge by Jacken, 2008/01/31
+#define EEPROM_CID_TOSHIBA 0x4
#define EEPROM_CID_NetCore 0x5
#define EEPROM_CID_Nettronix 0x6
#define EEPROM_CID_Pronet 0x7
#define EEPROM_CID_DLINK 0x8
-#define EEPROM_CID_WHQL 0xFE //added by sherry for dtm, 20080728
-
+#define EEPROM_CID_WHQL 0xFE
enum _RTL8192Pci_HW {
- MAC0 = 0x000,
- MAC1 = 0x001,
- MAC2 = 0x002,
- MAC3 = 0x003,
- MAC4 = 0x004,
- MAC5 = 0x005,
- PCIF = 0x009, // PCI Function Register 0x0009h~0x000bh
-//----------------------------------------------------------------------------
-// 8190 PCIF bits (Offset 0x009-000b, 24bit)
-//----------------------------------------------------------------------------
+ MAC0 = 0x000,
+ MAC1 = 0x001,
+ MAC2 = 0x002,
+ MAC3 = 0x003,
+ MAC4 = 0x004,
+ MAC5 = 0x005,
+ PCIF = 0x009,
#define MXDMA2_16bytes 0x000
#define MXDMA2_32bytes 0x001
#define MXDMA2_64bytes 0x010
@@ -129,8 +109,8 @@ enum _RTL8192Pci_HW {
#define MULRW_SHIFT 3
#define MXDMA2_RX_SHIFT 4
#define MXDMA2_TX_SHIFT 0
- PMR = 0x00c, // Power management register
- EPROM_CMD = 0x00e,
+ PMR = 0x00c,
+ EPROM_CMD = 0x00e,
#define EPROM_CMD_RESERVED_MASK BIT5
#define EPROM_CMD_9356SEL BIT4
#define EPROM_CMD_OPERATING_MODE_SHIFT 6
@@ -151,71 +131,66 @@ enum _RTL8192Pci_HW {
ANAPAR = 0x17,
#define BB_GLOBAL_RESET_BIT 0x1
- BB_GLOBAL_RESET = 0x020, // BasebandGlobal Reset Register
- BSSIDR = 0x02E, // BSSID Register
- CMDR = 0x037, // Command register
-#define CR_RST 0x10
-#define CR_RE 0x08
-#define CR_TE 0x04
-#define CR_MulRW 0x01
- SIFS = 0x03E, // SIFS register
- TCR = 0x040, // Transmit Configuration Register
- RCR = 0x044, // Receive Configuration Register
-//----------------------------------------------------------------------------
-//// 8190 (RCR) Receive Configuration Register (Offset 0x44~47, 32 bit)
-////----------------------------------------------------------------------------
-#define RCR_FILTER_MASK (BIT0|BIT1|BIT2|BIT3|BIT5|BIT12|BIT18|BIT19|BIT20|BIT21|BIT22|BIT23)
-#define RCR_ONLYERLPKT BIT31 // Early Receiving based on Packet Size.
-#define RCR_ENCS2 BIT30 // Enable Carrier Sense Detection Method 2
-#define RCR_ENCS1 BIT29 // Enable Carrier Sense Detection Method 1
-#define RCR_ENMBID BIT27 // Enable Multiple BssId.
-#define RCR_ACKTXBW (BIT24|BIT25) // TXBW Setting of ACK frames
-#define RCR_CBSSID BIT23 // Accept BSSID match packet
-#define RCR_APWRMGT BIT22 // Accept power management packet
-#define RCR_ADD3 BIT21 // Accept address 3 match packet
-#define RCR_AMF BIT20 // Accept management type frame
-#define RCR_ACF BIT19 // Accept control type frame
-#define RCR_ADF BIT18 // Accept data type frame
-#define RCR_RXFTH BIT13 // Rx FIFO Threshold
-#define RCR_AICV BIT12 // Accept ICV error packet
-#define RCR_ACRC32 BIT5 // Accept CRC32 error packet
-#define RCR_AB BIT3 // Accept broadcast packet
-#define RCR_AM BIT2 // Accept multicast packet
-#define RCR_APM BIT1 // Accept physical match packet
-#define RCR_AAP BIT0 // Accept all unicast packet
+ BB_GLOBAL_RESET = 0x020,
+ BSSIDR = 0x02E,
+ CMDR = 0x037,
+#define CR_RST 0x10
+#define CR_RE 0x08
+#define CR_TE 0x04
+#define CR_MulRW 0x01
+ SIFS = 0x03E,
+ TCR = 0x040,
+ RCR = 0x044,
+#define RCR_FILTER_MASK (BIT0 | BIT1 | BIT2 | BIT3 | BIT5 | BIT12 | \
+ BIT18 | BIT19 | BIT20 | BIT21 | BIT22 | BIT23)
+#define RCR_ONLYERLPKT BIT31
+#define RCR_ENCS2 BIT30
+#define RCR_ENCS1 BIT29
+#define RCR_ENMBID BIT27
+#define RCR_ACKTXBW (BIT24|BIT25)
+#define RCR_CBSSID BIT23
+#define RCR_APWRMGT BIT22
+#define RCR_ADD3 BIT21
+#define RCR_AMF BIT20
+#define RCR_ACF BIT19
+#define RCR_ADF BIT18
+#define RCR_RXFTH BIT13
+#define RCR_AICV BIT12
+#define RCR_ACRC32 BIT5
+#define RCR_AB BIT3
+#define RCR_AM BIT2
+#define RCR_APM BIT1
+#define RCR_AAP BIT0
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
- SLOT_TIME = 0x049, // Slot Time Register
- ACK_TIMEOUT = 0x04c, // Ack Timeout Register
- PIFS_TIME = 0x04d, // PIFS time
- USTIME = 0x04e, // Microsecond Tuning Register, Sets the microsecond time unit used by MAC clock.
- EDCAPARA_BE = 0x050, // EDCA Parameter of AC BE
- EDCAPARA_BK = 0x054, // EDCA Parameter of AC BK
- EDCAPARA_VO = 0x058, // EDCA Parameter of AC VO
- EDCAPARA_VI = 0x05C, // EDCA Parameter of AC VI
+ SLOT_TIME = 0x049,
+ ACK_TIMEOUT = 0x04c,
+ PIFS_TIME = 0x04d,
+ USTIME = 0x04e,
+ EDCAPARA_BE = 0x050,
+ EDCAPARA_BK = 0x054,
+ EDCAPARA_VO = 0x058,
+ EDCAPARA_VI = 0x05C,
#define AC_PARAM_TXOP_LIMIT_OFFSET 16
#define AC_PARAM_ECW_MAX_OFFSET 12
#define AC_PARAM_ECW_MIN_OFFSET 8
#define AC_PARAM_AIFS_OFFSET 0
- RFPC = 0x05F, // Rx FIFO Packet Count
- CWRR = 0x060, // Contention Window Report Register
- BCN_TCFG = 0x062, // Beacon Time Configuration
+ RFPC = 0x05F,
+ CWRR = 0x060,
+ BCN_TCFG = 0x062,
#define BCN_TCFG_CW_SHIFT 8
#define BCN_TCFG_IFS 0
- BCN_INTERVAL = 0x070, // Beacon Interval (TU)
- ATIMWND = 0x072, // ATIM Window Size (TU)
- BCN_DRV_EARLY_INT = 0x074, // Driver Early Interrupt Time (TU). Time to send interrupt to notify to change beacon content before TBTT
+ BCN_INTERVAL = 0x070,
+ ATIMWND = 0x072,
+ BCN_DRV_EARLY_INT = 0x074,
#define BCN_DRV_EARLY_INT_SWBCN_SHIFT 8
#define BCN_DRV_EARLY_INT_TIME_SHIFT 0
- BCN_DMATIME = 0x076, // Beacon DMA and ATIM interrupt time (US). Indicates the time before TBTT to perform beacon queue DMA
- BCN_ERR_THRESH = 0x078, // Beacon Error Threshold
- RWCAM = 0x0A0, //IN 8190 Data Sheet is called CAMcmd
- //----------------------------------------------------------------------------
- //// 8190 CAM Command Register (offset 0xA0, 4 byte)
- ////----------------------------------------------------------------------------
-#define CAM_CM_SecCAMPolling BIT31 //Security CAM Polling
-#define CAM_CM_SecCAMClr BIT30 //Clear all bits in CAM
-#define CAM_CM_SecCAMWE BIT16 //Security CAM enable
+ BCN_DMATIME = 0x076,
+ BCN_ERR_THRESH = 0x078,
+ RWCAM = 0x0A0,
+#define CAM_CM_SecCAMPolling BIT31
+#define CAM_CM_SecCAMClr BIT30
+#define CAM_CM_SecCAMWE BIT16
#define CAM_VALID BIT15
#define CAM_NOTVALID 0x0000
#define CAM_USEDK BIT5
@@ -234,68 +209,62 @@ enum _RTL8192Pci_HW {
#define CAM_READ 0x00000000
#define CAM_POLLINIG BIT31
#define SCR_UseDK 0x01
- WCAMI = 0x0A4, // Software write CAM input content
- RCAMO = 0x0A8, // Software read/write CAM config
- SECR = 0x0B0, //Security Configuration Register
-#define SCR_TxUseDK BIT0 //Force Tx Use Default Key
-#define SCR_RxUseDK BIT1 //Force Rx Use Default Key
-#define SCR_TxEncEnable BIT2 //Enable Tx Encryption
-#define SCR_RxDecEnable BIT3 //Enable Rx Decryption
-#define SCR_SKByA2 BIT4 //Search kEY BY A2
-#define SCR_NoSKMC BIT5 //No Key Search for Multicast
- SWREGULATOR = 0x0BD, // Switching Regulator
- INTA_MASK = 0x0f4,
-//----------------------------------------------------------------------------
-// 8190 IMR/ISR bits (offset 0xfd, 8bits)
-//----------------------------------------------------------------------------
+ WCAMI = 0x0A4,
+ RCAMO = 0x0A8,
+ SECR = 0x0B0,
+#define SCR_TxUseDK BIT0
+#define SCR_RxUseDK BIT1
+#define SCR_TxEncEnable BIT2
+#define SCR_RxDecEnable BIT3
+#define SCR_SKByA2 BIT4
+#define SCR_NoSKMC BIT5
+ SWREGULATOR = 0x0BD,
+ INTA_MASK = 0x0f4,
#define IMR8190_DISABLED 0x0
-#define IMR_ATIMEND BIT28 // ATIM Window End Interrupt
-#define IMR_TBDOK BIT27 // Transmit Beacon OK Interrupt
-#define IMR_TBDER BIT26 // Transmit Beacon Error Interrupt
-#define IMR_TXFOVW BIT15 // Transmit FIFO Overflow
-#define IMR_TIMEOUT0 BIT14 // TimeOut0
-#define IMR_BcnInt BIT13 // Beacon DMA Interrupt 0
-#define IMR_RXFOVW BIT12 // Receive FIFO Overflow
-#define IMR_RDU BIT11 // Receive Descriptor Unavailable
-#define IMR_RXCMDOK BIT10 // Receive Command Packet OK
-#define IMR_BDOK BIT9 // Beacon Queue DMA OK Interrup
-#define IMR_HIGHDOK BIT8 // High Queue DMA OK Interrupt
-#define IMR_COMDOK BIT7 // Command Queue DMA OK Interrupt
-#define IMR_MGNTDOK BIT6 // Management Queue DMA OK Interrupt
-#define IMR_HCCADOK BIT5 // HCCA Queue DMA OK Interrupt
-#define IMR_BKDOK BIT4 // AC_BK DMA OK Interrupt
-#define IMR_BEDOK BIT3 // AC_BE DMA OK Interrupt
-#define IMR_VIDOK BIT2 // AC_VI DMA OK Interrupt
-#define IMR_VODOK BIT1 // AC_VO DMA Interrupt
-#define IMR_ROK BIT0 // Receive DMA OK Interrupt
- ISR = 0x0f8, // Interrupt Status Register
- TPPoll = 0x0fd, // Transmit priority polling register
-#define TPPoll_BKQ BIT0 // BK queue polling
-#define TPPoll_BEQ BIT1 // BE queue polling
-#define TPPoll_VIQ BIT2 // VI queue polling
-#define TPPoll_VOQ BIT3 // VO queue polling
-#define TPPoll_BQ BIT4 // Beacon queue polling
-#define TPPoll_CQ BIT5 // Command queue polling
-#define TPPoll_MQ BIT6 // Management queue polling
-#define TPPoll_HQ BIT7 // High queue polling
-#define TPPoll_HCCAQ BIT8 // HCCA queue polling
-#define TPPoll_StopBK BIT9 // Stop BK queue
-#define TPPoll_StopBE BIT10 // Stop BE queue
-#define TPPoll_StopVI BIT11 // Stop VI queue
-#define TPPoll_StopVO BIT12 // Stop VO queue
-#define TPPoll_StopMgt BIT13 // Stop Mgnt queue
-#define TPPoll_StopHigh BIT14 // Stop High queue
-#define TPPoll_StopHCCA BIT15 // Stop HCCA queue
-#define TPPoll_SHIFT 8 // Queue ID mapping
-
- PSR = 0x0ff, // Page Select Register
-#define PSR_GEN 0x0 // Page 0 register general MAC Control
-#define PSR_CPU 0x1 // Page 1 register for CPU
- CPU_GEN = 0x100, // CPU Reset Register
- BB_RESET = 0x101, // Baseband Reset
-//----------------------------------------------------------------------------
-// 8190 CPU General Register (offset 0x100, 4 byte)
-//----------------------------------------------------------------------------
+#define IMR_ATIMEND BIT28
+#define IMR_TBDOK BIT27
+#define IMR_TBDER BIT26
+#define IMR_TXFOVW BIT15
+#define IMR_TIMEOUT0 BIT14
+#define IMR_BcnInt BIT13
+#define IMR_RXFOVW BIT12
+#define IMR_RDU BIT11
+#define IMR_RXCMDOK BIT10
+#define IMR_BDOK BIT9
+#define IMR_HIGHDOK BIT8
+#define IMR_COMDOK BIT7
+#define IMR_MGNTDOK BIT6
+#define IMR_HCCADOK BIT5
+#define IMR_BKDOK BIT4
+#define IMR_BEDOK BIT3
+#define IMR_VIDOK BIT2
+#define IMR_VODOK BIT1
+#define IMR_ROK BIT0
+ ISR = 0x0f8,
+ TPPoll = 0x0fd,
+#define TPPoll_BKQ BIT0
+#define TPPoll_BEQ BIT1
+#define TPPoll_VIQ BIT2
+#define TPPoll_VOQ BIT3
+#define TPPoll_BQ BIT4
+#define TPPoll_CQ BIT5
+#define TPPoll_MQ BIT6
+#define TPPoll_HQ BIT7
+#define TPPoll_HCCAQ BIT8
+#define TPPoll_StopBK BIT9
+#define TPPoll_StopBE BIT10
+#define TPPoll_StopVI BIT11
+#define TPPoll_StopVO BIT12
+#define TPPoll_StopMgt BIT13
+#define TPPoll_StopHigh BIT14
+#define TPPoll_StopHCCA BIT15
+#define TPPoll_SHIFT 8
+
+ PSR = 0x0ff,
+#define PSR_GEN 0x0
+#define PSR_CPU 0x1
+ CPU_GEN = 0x100,
+ BB_RESET = 0x101,
#define CPU_CCK_LOOPBACK 0x00030000
#define CPU_GEN_SYSTEM_RESET 0x00000001
#define CPU_GEN_FIRMWARE_RESET 0x00000008
@@ -304,19 +273,15 @@ enum _RTL8192Pci_HW {
#define CPU_GEN_PUT_CODE_OK 0x00000080
#define CPU_GEN_BB_RST 0x00000100
#define CPU_GEN_PWR_STB_CPU 0x00000004
-#define CPU_GEN_NO_LOOPBACK_MSK 0xFFF8FFFF // Set bit18,17,16 to 0. Set bit19
-#define CPU_GEN_NO_LOOPBACK_SET 0x00080000 // Set BIT19 to 1
+#define CPU_GEN_NO_LOOPBACK_MSK 0xFFF8FFFF
+#define CPU_GEN_NO_LOOPBACK_SET 0x00080000
#define CPU_GEN_GPIO_UART 0x00007000
- LED1Cfg = 0x154,// LED1 Configuration Register
- LED0Cfg = 0x155,// LED0 Configuration Register
+ LED1Cfg = 0x154,
+ LED0Cfg = 0x155,
- AcmAvg = 0x170, // ACM Average Period Register
- AcmHwCtrl = 0x171, // ACM Hardware Control Register
-//----------------------------------------------------------------------------
-//
-// 8190 AcmHwCtrl bits (offset 0x171, 1 byte)
-//----------------------------------------------------------------------------
+ AcmAvg = 0x170,
+ AcmHwCtrl = 0x171,
#define AcmHw_HwEn BIT0
#define AcmHw_BeqEn BIT1
#define AcmHw_ViqEn BIT2
@@ -324,67 +289,65 @@ enum _RTL8192Pci_HW {
#define AcmHw_BeqStatus BIT4
#define AcmHw_ViqStatus BIT5
#define AcmHw_VoqStatus BIT6
- AcmFwCtrl = 0x172, // ACM Firmware Control Register
+ AcmFwCtrl = 0x172,
#define AcmFw_BeqStatus BIT0
#define AcmFw_ViqStatus BIT1
#define AcmFw_VoqStatus BIT2
- VOAdmTime = 0x174, // VO Queue Admitted Time Register
- VIAdmTime = 0x178, // VI Queue Admitted Time Register
- BEAdmTime = 0x17C, // BE Queue Admitted Time Register
- RQPN1 = 0x180, // Reserved Queue Page Number , Vo Vi, Be, Bk
- RQPN2 = 0x184, // Reserved Queue Page Number, HCCA, Cmd, Mgnt, High
- RQPN3 = 0x188, // Reserved Queue Page Number, Bcn, Public,
- QPRR = 0x1E0, // Queue Page Report per TID
- QPNR = 0x1F0, // Queue Packet Number report per TID
-/* there's 9 tx descriptor base address available */
- BQDA = 0x200, // Beacon Queue Descriptor Address
- HQDA = 0x204, // High Priority Queue Descriptor Address
- CQDA = 0x208, // Command Queue Descriptor Address
- MQDA = 0x20C, // Management Queue Descriptor Address
- HCCAQDA = 0x210, // HCCA Queue Descriptor Address
- VOQDA = 0x214, // VO Queue Descriptor Address
- VIQDA = 0x218, // VI Queue Descriptor Address
- BEQDA = 0x21C, // BE Queue Descriptor Address
- BKQDA = 0x220, // BK Queue Descriptor Address
-/* there's 2 rx descriptor base address availalbe */
- RCQDA = 0x224, // Receive command Queue Descriptor Address
- RDQDA = 0x228, // Receive Queue Descriptor Start Address
-
- MAR0 = 0x240, // Multicast filter.
+ VOAdmTime = 0x174,
+ VIAdmTime = 0x178,
+ BEAdmTime = 0x17C,
+ RQPN1 = 0x180,
+ RQPN2 = 0x184,
+ RQPN3 = 0x188,
+ QPRR = 0x1E0,
+ QPNR = 0x1F0,
+ BQDA = 0x200,
+ HQDA = 0x204,
+ CQDA = 0x208,
+ MQDA = 0x20C,
+ HCCAQDA = 0x210,
+ VOQDA = 0x214,
+ VIQDA = 0x218,
+ BEQDA = 0x21C,
+ BKQDA = 0x220,
+ RCQDA = 0x224,
+ RDQDA = 0x228,
+
+ MAR0 = 0x240,
MAR4 = 0x244,
- CCX_PERIOD = 0x250, // CCX Measurement Period Register, in unit of TU.
- CLM_RESULT = 0x251, // CCA Busy fraction register.
- NHM_PERIOD = 0x252, // NHM Measurement Period register, in unit of TU.
-
- NHM_THRESHOLD0 = 0x253, // Noise Histogram Meashorement0.
- NHM_THRESHOLD1 = 0x254, // Noise Histogram Meashorement1.
- NHM_THRESHOLD2 = 0x255, // Noise Histogram Meashorement2.
- NHM_THRESHOLD3 = 0x256, // Noise Histogram Meashorement3.
- NHM_THRESHOLD4 = 0x257, // Noise Histogram Meashorement4.
- NHM_THRESHOLD5 = 0x258, // Noise Histogram Meashorement5.
- NHM_THRESHOLD6 = 0x259, // Noise Histogram Meashorement6
-
- MCTRL = 0x25A, // Measurement Control
-
- NHM_RPI_COUNTER0 = 0x264, // Noise Histogram RPI counter0, the fraction of signal strength < NHM_THRESHOLD0.
- NHM_RPI_COUNTER1 = 0x265, // Noise Histogram RPI counter1, the fraction of signal strength in (NHM_THRESHOLD0, NHM_THRESHOLD1].
- NHM_RPI_COUNTER2 = 0x266, // Noise Histogram RPI counter2, the fraction of signal strength in (NHM_THRESHOLD1, NHM_THRESHOLD2].
- NHM_RPI_COUNTER3 = 0x267, // Noise Histogram RPI counter3, the fraction of signal strength in (NHM_THRESHOLD2, NHM_THRESHOLD3].
- NHM_RPI_COUNTER4 = 0x268, // Noise Histogram RPI counter4, the fraction of signal strength in (NHM_THRESHOLD3, NHM_THRESHOLD4].
- NHM_RPI_COUNTER5 = 0x269, // Noise Histogram RPI counter5, the fraction of signal strength in (NHM_THRESHOLD4, NHM_THRESHOLD5].
- NHM_RPI_COUNTER6 = 0x26A, // Noise Histogram RPI counter6, the fraction of signal strength in (NHM_THRESHOLD5, NHM_THRESHOLD6].
- NHM_RPI_COUNTER7 = 0x26B, // Noise Histogram RPI counter7, the fraction of signal strength in (NHM_THRESHOLD6, NHM_THRESHOLD7].
- WFCRC0 = 0x2f0,
- WFCRC1 = 0x2f4,
- WFCRC2 = 0x2f8,
-
- BW_OPMODE = 0x300, // Bandwidth operation mode
+ CCX_PERIOD = 0x250,
+ CLM_RESULT = 0x251,
+ NHM_PERIOD = 0x252,
+
+ NHM_THRESHOLD0 = 0x253,
+ NHM_THRESHOLD1 = 0x254,
+ NHM_THRESHOLD2 = 0x255,
+ NHM_THRESHOLD3 = 0x256,
+ NHM_THRESHOLD4 = 0x257,
+ NHM_THRESHOLD5 = 0x258,
+ NHM_THRESHOLD6 = 0x259,
+
+ MCTRL = 0x25A,
+
+ NHM_RPI_COUNTER0 = 0x264,
+ NHM_RPI_COUNTER1 = 0x265,
+ NHM_RPI_COUNTER2 = 0x266,
+ NHM_RPI_COUNTER3 = 0x267,
+ NHM_RPI_COUNTER4 = 0x268,
+ NHM_RPI_COUNTER5 = 0x269,
+ NHM_RPI_COUNTER6 = 0x26A,
+ NHM_RPI_COUNTER7 = 0x26B,
+ WFCRC0 = 0x2f0,
+ WFCRC1 = 0x2f4,
+ WFCRC2 = 0x2f8,
+
+ BW_OPMODE = 0x300,
#define BW_OPMODE_11J BIT0
#define BW_OPMODE_5G BIT1
#define BW_OPMODE_20MHZ BIT2
- IC_VERRSION = 0x301, //IC_VERSION
- MSR = 0x303, // Media Status register
+ IC_VERRSION = 0x301,
+ MSR = 0x303,
#define MSR_LINK_MASK ((1<<0)|(1<<1))
#define MSR_LINK_MANAGED 2
#define MSR_LINK_NONE 0
@@ -392,49 +355,50 @@ enum _RTL8192Pci_HW {
#define MSR_LINK_ADHOC 1
#define MSR_LINK_MASTER 3
#define MSR_LINK_ENEDCA (1<<4)
- RETRY_LIMIT = 0x304, // Retry Limit [15:8]-short, [7:0]-long
+
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+ RETRY_LIMIT = 0x304,
#define RETRY_LIMIT_SHORT_SHIFT 8
#define RETRY_LIMIT_LONG_SHIFT 0
TSFR = 0x308,
- RRSR = 0x310, // Response Rate Set
-#define RRSR_RSC_OFFSET 21
+ RRSR = 0x310,
+#define RRSR_RSC_OFFSET 21
#define RRSR_SHORT_OFFSET 23
#define RRSR_RSC_DUPLICATE 0x600000
#define RRSR_RSC_UPSUBCHNL 0x400000
-#define RRSR_RSC_LOWSUBCHNL 0x200000
-#define RRSR_SHORT 0x800000
-#define RRSR_1M BIT0
-#define RRSR_2M BIT1
-#define RRSR_5_5M BIT2
-#define RRSR_11M BIT3
-#define RRSR_6M BIT4
-#define RRSR_9M BIT5
-#define RRSR_12M BIT6
-#define RRSR_18M BIT7
-#define RRSR_24M BIT8
-#define RRSR_36M BIT9
-#define RRSR_48M BIT10
-#define RRSR_54M BIT11
-#define RRSR_MCS0 BIT12
-#define RRSR_MCS1 BIT13
-#define RRSR_MCS2 BIT14
-#define RRSR_MCS3 BIT15
-#define RRSR_MCS4 BIT16
-#define RRSR_MCS5 BIT17
-#define RRSR_MCS6 BIT18
-#define RRSR_MCS7 BIT19
-#define BRSR_AckShortPmb BIT23 // CCK ACK: use Short Preamble or not
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT0
+#define RRSR_2M BIT1
+#define RRSR_5_5M BIT2
+#define RRSR_11M BIT3
+#define RRSR_6M BIT4
+#define RRSR_9M BIT5
+#define RRSR_12M BIT6
+#define RRSR_18M BIT7
+#define RRSR_24M BIT8
+#define RRSR_36M BIT9
+#define RRSR_48M BIT10
+#define RRSR_54M BIT11
+#define RRSR_MCS0 BIT12
+#define RRSR_MCS1 BIT13
+#define RRSR_MCS2 BIT14
+#define RRSR_MCS3 BIT15
+#define RRSR_MCS4 BIT16
+#define RRSR_MCS5 BIT17
+#define RRSR_MCS6 BIT18
+#define RRSR_MCS7 BIT19
+#define BRSR_AckShortPmb BIT23
UFWP = 0x318,
- RATR0 = 0x320, // Rate Adaptive Table register1
-//----------------------------------------------------------------------------
-// 8190 Rate Adaptive Table Register (offset 0x320, 4 byte)
-//----------------------------------------------------------------------------
-//CCK
+ RATR0 = 0x320,
#define RATR_1M 0x00000001
#define RATR_2M 0x00000002
#define RATR_55M 0x00000004
#define RATR_11M 0x00000008
-//OFDM
#define RATR_6M 0x00000010
#define RATR_9M 0x00000020
#define RATR_12M 0x00000040
@@ -443,7 +407,6 @@ enum _RTL8192Pci_HW {
#define RATR_36M 0x00000200
#define RATR_48M 0x00000400
#define RATR_54M 0x00000800
-//MCS 1 Spatial Stream
#define RATR_MCS0 0x00001000
#define RATR_MCS1 0x00002000
#define RATR_MCS2 0x00004000
@@ -452,7 +415,6 @@ enum _RTL8192Pci_HW {
#define RATR_MCS5 0x00020000
#define RATR_MCS6 0x00040000
#define RATR_MCS7 0x00080000
-//MCS 2 Spatial Stream
#define RATR_MCS8 0x00100000
#define RATR_MCS9 0x00200000
#define RATR_MCS10 0x00400000
@@ -461,31 +423,31 @@ enum _RTL8192Pci_HW {
#define RATR_MCS13 0x02000000
#define RATR_MCS14 0x04000000
#define RATR_MCS15 0x08000000
-// ALL CCK Rate
-#define RATE_ALL_CCK RATR_1M|RATR_2M|RATR_55M|RATR_11M
-#define RATE_ALL_OFDM_AG RATR_6M|RATR_9M|RATR_12M|RATR_18M|RATR_24M|RATR_36M|RATR_48M|RATR_54M
-#define RATE_ALL_OFDM_1SS RATR_MCS0|RATR_MCS1|RATR_MCS2|RATR_MCS3 | \
- RATR_MCS4|RATR_MCS5|RATR_MCS6 |RATR_MCS7
-#define RATE_ALL_OFDM_2SS RATR_MCS8|RATR_MCS9 |RATR_MCS10|RATR_MCS11| \
- RATR_MCS12|RATR_MCS13|RATR_MCS14|RATR_MCS15
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M | \
+ RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 | \
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
+ RATR_MCS14|RATR_MCS15)
- DRIVER_RSSI = 0x32c, // Driver tell Firmware current RSSI
- MCS_TXAGC = 0x340, // MCS AGC
- CCK_TXAGC = 0x348, // CCK AGC
- MacBlkCtrl = 0x403, // Mac block on/off control register
+ DRIVER_RSSI = 0x32c,
+ MCS_TXAGC = 0x340,
+ CCK_TXAGC = 0x348,
+ MacBlkCtrl = 0x403,
-};
+}
+;
#define GPI 0x108
#define GPO 0x109
#define GPE 0x10a
-#define ANAPAR_FOR_8192PciE 0x17 // Analog parameter register
+#define HWSET_MAX_SIZE_92S 128
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
+#define ANAPAR_FOR_8192PciE 0x17
#endif
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.c b/drivers/staging/rtl8192e/r8192E_hwimg.c
new file mode 100644
index 00000000000..08e7dbb6694
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_hwimg.c
@@ -0,0 +1,3336 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+/*Created on 2008/11/18, 3: 7*/
+
+#include "r8192E_hwimg.h"
+
+u8 Rtl8192PciEFwBootArray[BootArrayLengthPciE] = {
+0x10,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x3c,0x08,0xbf,0xc0,0x25,0x08,0x00,0x08,
+0x3c,0x09,0xb0,0x03,0xad,0x28,0x00,0x20,0x40,0x80,0x68,0x00,0x00,0x00,0x00,0x00,
+0x3c,0x0a,0xd0,0x00,0x40,0x8a,0x60,0x00,0x00,0x00,0x00,0x00,0x3c,0x08,0x80,0x01,
+0x25,0x08,0xa8,0x04,0x24,0x09,0x00,0x01,0x3c,0x01,0x7f,0xff,0x34,0x21,0xff,0xff,
+0x01,0x01,0x50,0x24,0x00,0x09,0x48,0x40,0x35,0x29,0x00,0x01,0x01,0x2a,0x10,0x2b,
+0x14,0x40,0xff,0xfc,0x00,0x00,0x00,0x00,0x3c,0x0a,0x00,0x00,0x25,0x4a,0x00,0x00,
+0x4c,0x8a,0x00,0x00,0x4c,0x89,0x08,0x00,0x00,0x00,0x00,0x00,0x3c,0x08,0x80,0x01,
+0x25,0x08,0xa8,0x04,0x3c,0x01,0x80,0x00,0x01,0x21,0x48,0x25,0x3c,0x0a,0xbf,0xc0,
+0x25,0x4a,0x00,0x7c,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,0xad,0x00,0x00,0x00,
+0x21,0x08,0x00,0x04,0x01,0x09,0x10,0x2b,0x14,0x40,0xff,0xf8,0x00,0x00,0x00,0x00,
+0x3c,0x08,0x80,0x01,0x25,0x08,0x7f,0xff,0x24,0x09,0x00,0x01,0x3c,0x01,0x7f,0xff,
+0x34,0x21,0xff,0xff,0x01,0x01,0x50,0x24,0x00,0x09,0x48,0x40,0x35,0x29,0x00,0x01,
+0x01,0x2a,0x10,0x2b,0x14,0x40,0xff,0xfc,0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x01,
+0x25,0x4a,0x00,0x00,0x3c,0x01,0x7f,0xff,0x34,0x21,0xff,0xff,0x01,0x41,0x50,0x24,
+0x3c,0x09,0x00,0x01,0x35,0x29,0x7f,0xff,0x4c,0x8a,0x20,0x00,0x4c,0x89,0x28,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x24,0x08,0x04,0x10,
+0x00,0x00,0x00,0x00,0x40,0x88,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x3c,0x08,0xbf,0xc0,0x00,0x00,0x00,0x00,0x8d,0x09,0x00,0x00,0x00,0x00,0x00,0x00,
+0x3c,0x0a,0xbf,0xc0,0x25,0x4a,0x01,0x20,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,
+0x3c,0x08,0xb0,0x03,0x8d,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x35,0x29,0x00,0x10,
+0xad,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0x08,0x80,0x00,0x25,0x08,0x4b,0x94,
+0x01,0x00,0x00,0x08,0x00,0x00,0x00,0x00,};
+
+u8 Rtl8192PciEFwMainArray[MainArrayLengthPciE] = {
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x40,0x04,0x68,0x00,0x40,0x05,0x70,0x00,0x40,0x06,0x40,0x00,0x0c,0x00,0x12,0x98,
+0x00,0x00,0x00,0x00,0x40,0x1a,0x68,0x00,0x33,0x5b,0x00,0x3c,0x17,0x60,0x00,0x09,
+0x00,0x00,0x00,0x00,0x40,0x1b,0x60,0x00,0x00,0x00,0x00,0x00,0x03,0x5b,0xd0,0x24,
+0x40,0x1a,0x70,0x00,0x03,0x40,0x00,0x08,0x42,0x00,0x00,0x10,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0xff,0x34,0x42,0xff,0xff,0x8c,0x43,0x00,0x00,
+0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x00,0xd0,
+0xac,0x62,0x00,0x00,0x00,0x00,0x20,0x21,0x27,0x85,0x8b,0x70,0x00,0x85,0x18,0x21,
+0x24,0x84,0x00,0x01,0x28,0x82,0x00,0x0a,0x14,0x40,0xff,0xfc,0xa0,0x60,0x00,0x00,
+0x27,0x82,0x8b,0x7a,0x24,0x04,0x00,0x06,0x24,0x84,0xff,0xff,0xa4,0x40,0x00,0x00,
+0x04,0x81,0xff,0xfd,0x24,0x42,0x00,0x02,0x24,0x02,0x00,0x03,0xa3,0x82,0x8b,0x70,
+0x24,0x02,0x00,0x0a,0x24,0x03,0x09,0xc4,0xa3,0x82,0x8b,0x72,0x24,0x02,0x00,0x04,
+0x24,0x04,0x00,0x01,0x24,0x05,0x00,0x02,0xa7,0x83,0x8b,0x86,0xa3,0x82,0x8b,0x78,
+0x24,0x03,0x04,0x00,0x24,0x02,0x02,0x00,0xaf,0x83,0x8b,0x8c,0xa3,0x85,0x8b,0x79,
+0xa7,0x82,0x8b,0x7a,0xa7,0x84,0x8b,0x7c,0xaf,0x84,0x8b,0x88,0xa3,0x84,0x8b,0x71,
+0xa3,0x80,0x8b,0x73,0xa3,0x80,0x8b,0x74,0xa3,0x80,0x8b,0x75,0xa3,0x84,0x8b,0x76,
+0xa3,0x85,0x8b,0x77,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0x24,0x42,0x01,0x7c,0x34,0x63,0x00,0x20,0xac,0x62,0x00,0x00,
+0x27,0x84,0x8b,0x98,0x00,0x00,0x10,0x21,0x24,0x42,0x00,0x01,0x00,0x02,0x16,0x00,
+0x00,0x02,0x16,0x03,0x28,0x43,0x00,0x03,0xac,0x80,0xff,0xfc,0xa0,0x80,0x00,0x00,
+0x14,0x60,0xff,0xf9,0x24,0x84,0x00,0x0c,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x01,0xc0,
+0x3c,0x08,0xb0,0x03,0xac,0x62,0x00,0x00,0x35,0x08,0x00,0x70,0x8d,0x02,0x00,0x00,
+0x00,0xa0,0x48,0x21,0x00,0x04,0x26,0x00,0x00,0x02,0x2a,0x43,0x00,0x06,0x36,0x00,
+0x00,0x07,0x3e,0x00,0x00,0x02,0x12,0x03,0x29,0x23,0x00,0x03,0x00,0x04,0x56,0x03,
+0x00,0x06,0x36,0x03,0x00,0x07,0x3e,0x03,0x30,0x48,0x00,0x01,0x10,0x60,0x00,0x11,
+0x30,0xa5,0x00,0x07,0x24,0x02,0x00,0x02,0x00,0x49,0x10,0x23,0x00,0x45,0x10,0x07,
+0x30,0x42,0x00,0x01,0x10,0x40,0x00,0x66,0x00,0x00,0x00,0x00,0x8f,0xa2,0x00,0x10,
+0x00,0x00,0x00,0x00,0x00,0x02,0x21,0x43,0x11,0x00,0x00,0x10,0x00,0x07,0x20,0x0b,
+0x15,0x20,0x00,0x06,0x24,0x02,0x00,0x01,0x3c,0x02,0xb0,0x05,0x34,0x42,0x01,0x20,
+0xa4,0x44,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x11,0x22,0x00,0x04,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,0x08,0x00,0x00,0x94,0x34,0x42,0x01,0x24,
+0x3c,0x02,0xb0,0x05,0x08,0x00,0x00,0x94,0x34,0x42,0x01,0x22,0x15,0x20,0x00,0x54,
+0x24,0x02,0x00,0x01,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x74,0x90,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0xaf,0x83,0x8b,0x94,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x70,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x6b,0x00,0x08,0x11,0x60,0x00,0x18,
+0x00,0x09,0x28,0x40,0x00,0x00,0x40,0x21,0x27,0x85,0x8b,0x90,0x8c,0xa3,0x00,0x00,
+0x8c,0xa2,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x62,0x38,0x23,0x00,0x43,0x10,0x2a,
+0x10,0x40,0x00,0x3d,0x00,0x00,0x00,0x00,0xac,0xa7,0x00,0x00,0x25,0x02,0x00,0x01,
+0x00,0x02,0x16,0x00,0x00,0x02,0x46,0x03,0x29,0x03,0x00,0x03,0x14,0x60,0xff,0xf3,
+0x24,0xa5,0x00,0x0c,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x70,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x4b,0x10,0x23,0xa0,0x62,0x00,0x00,0x00,0x09,0x28,0x40,
+0x00,0xa9,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x8b,0x98,0x00,0x0a,0x20,0x0b,
+0x00,0x43,0x18,0x21,0x10,0xc0,0x00,0x05,0x00,0x00,0x38,0x21,0x80,0x62,0x00,0x01,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x05,0x00,0x00,0x00,0x00,0x80,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x03,0x00,0xa9,0x10,0x21,0x24,0x07,0x00,0x01,
+0x00,0xa9,0x10,0x21,0x00,0x02,0x30,0x80,0x27,0x82,0x8b,0x98,0xa0,0x67,0x00,0x01,
+0x00,0xc2,0x38,0x21,0x80,0xe3,0x00,0x01,0x00,0x00,0x00,0x00,0x10,0x60,0x00,0x07,
+0x00,0x00,0x00,0x00,0x27,0x83,0x8b,0x90,0x00,0xc3,0x18,0x21,0x8c,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x21,0xac,0x62,0x00,0x00,0x27,0x85,0x8b,0x94,
+0x27,0x82,0x8b,0x90,0x00,0xc5,0x28,0x21,0x00,0xc2,0x10,0x21,0x8c,0x43,0x00,0x00,
+0x8c,0xa4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x18,0x2a,0x14,0x60,0x00,0x03,
+0x24,0x02,0x00,0x01,0x03,0xe0,0x00,0x08,0xa0,0xe2,0x00,0x00,0xa0,0xe0,0x00,0x00,
+0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0xb7,0xac,0xa0,0x00,0x00,
+0x11,0x22,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x7c,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0xaf,0x83,0x8b,0xac,0x08,0x00,0x00,0xa7,
+0x3c,0x02,0xb0,0x03,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x78,0x90,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0xaf,0x83,0x8b,0xa0,0x08,0x00,0x00,0xa7,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x04,0x10,
+0x3c,0x05,0xb0,0x03,0xac,0x62,0x00,0x00,0x34,0xa5,0x00,0x70,0x8c,0xa2,0x00,0x00,
+0x90,0x84,0x00,0x08,0x3c,0x06,0xb0,0x03,0x00,0x02,0x16,0x00,0x2c,0x83,0x00,0x03,
+0x34,0xc6,0x00,0x72,0x24,0x07,0x00,0x01,0x10,0x60,0x00,0x11,0x00,0x02,0x2f,0xc2,
+0x90,0xc2,0x00,0x00,0x00,0x00,0x18,0x21,0x00,0x02,0x16,0x00,0x10,0xa7,0x00,0x09,
+0x00,0x02,0x16,0x03,0x14,0x80,0x00,0x0c,0x30,0x43,0x00,0x03,0x83,0x82,0x8b,0x98,
+0x00,0x00,0x00,0x00,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0x02,0x16,0x00,
+0x00,0x02,0x1e,0x03,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x72,0xa0,0x43,0x00,0x00,
+0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0x45,0x00,0x05,0x10,0x87,0x00,0x04,
+0x30,0x43,0x00,0x06,0x93,0x82,0x8b,0xb0,0x08,0x00,0x01,0x1f,0x00,0x43,0x10,0x21,
+0x83,0x82,0x8b,0xa4,0x00,0x00,0x00,0x00,0x00,0x02,0x10,0x40,0x08,0x00,0x01,0x1f,
+0x00,0x45,0x10,0x21,0x10,0x80,0x00,0x05,0x00,0x00,0x18,0x21,0x24,0x63,0x00,0x01,
+0x00,0x64,0x10,0x2b,0x14,0x40,0xff,0xfd,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x04,0xe4,
+0x3c,0x04,0xb0,0x02,0x34,0x63,0x00,0x20,0xac,0x62,0x00,0x00,0x34,0x84,0x00,0x08,
+0x24,0x02,0x00,0x01,0xaf,0x84,0x8b,0xc0,0xa3,0x82,0x8b,0xd0,0xa7,0x80,0x8b,0xc4,
+0xa7,0x80,0x8b,0xc6,0xaf,0x80,0x8b,0xc8,0xaf,0x80,0x8b,0xcc,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,
+0x24,0x42,0x05,0x24,0x3c,0x04,0xb0,0x03,0xac,0x62,0x00,0x00,0x34,0x84,0x00,0xac,
+0x80,0xa2,0x00,0x15,0x8c,0x83,0x00,0x00,0x27,0xbd,0xff,0xf0,0x00,0x43,0x10,0x21,
+0xac,0x82,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x10,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0x80,0x00,0x34,0x42,0x00,0x20,0x24,0x63,0x05,0x5c,0x27,0xbd,0xff,0xe0,
+0xac,0x43,0x00,0x00,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x18,
+0x8f,0x90,0x8b,0xc0,0x0c,0x00,0x02,0x98,0x00,0x80,0x88,0x21,0x14,0x40,0x00,0x2a,
+0x3c,0x02,0x00,0x80,0x16,0x20,0x00,0x02,0x34,0x42,0x02,0x01,0x24,0x02,0x02,0x01,
+0xae,0x02,0x00,0x00,0x97,0x84,0x8b,0xc4,0x97,0x82,0x8b,0xc6,0x3c,0x03,0xb0,0x02,
+0x00,0x83,0x20,0x21,0x24,0x42,0x00,0x04,0xa7,0x82,0x8b,0xc6,0xa4,0x82,0x00,0x00,
+0x8f,0x84,0x8b,0xc8,0x8f,0x82,0x8b,0xc0,0x93,0x85,0x8b,0x72,0x24,0x84,0x00,0x01,
+0x24,0x42,0x00,0x04,0x24,0x03,0x8f,0xff,0x3c,0x07,0xb0,0x06,0x3c,0x06,0xb0,0x03,
+0x00,0x43,0x10,0x24,0x00,0x85,0x28,0x2a,0x34,0xe7,0x80,0x18,0xaf,0x82,0x8b,0xc0,
+0xaf,0x84,0x8b,0xc8,0x10,0xa0,0x00,0x08,0x34,0xc6,0x01,0x08,0x8f,0x83,0x8b,0xcc,
+0x8f,0x84,0x8b,0x8c,0x8c,0xc2,0x00,0x00,0x00,0x64,0x18,0x21,0x00,0x43,0x10,0x2b,
+0x14,0x40,0x00,0x09,0x00,0x00,0x00,0x00,0x8c,0xe2,0x00,0x00,0x3c,0x03,0x0f,0x00,
+0x3c,0x04,0x04,0x00,0x00,0x43,0x10,0x24,0x10,0x44,0x00,0x03,0x00,0x00,0x00,0x00,
+0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x18,0x7b,0xb0,0x00,0xbc,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x27,0xbd,0xff,0xd8,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0x80,0x00,0x24,0x63,0x06,0x48,0xaf,0xb0,0x00,0x10,0x34,0x42,0x00,0x20,
+0x8f,0x90,0x8b,0xc0,0xac,0x43,0x00,0x00,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,
+0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x20,0x00,0x80,0x88,0x21,0x00,0xa0,0x90,0x21,
+0x0c,0x00,0x02,0x98,0x00,0xc0,0x98,0x21,0x24,0x07,0x8f,0xff,0x14,0x40,0x00,0x19,
+0x26,0x03,0x00,0x04,0x24,0x02,0x0e,0x03,0xae,0x02,0x00,0x00,0x00,0x67,0x80,0x24,
+0x26,0x02,0x00,0x04,0xae,0x11,0x00,0x00,0x00,0x47,0x80,0x24,0x97,0x86,0x8b,0xc4,
+0x26,0x03,0x00,0x04,0xae,0x12,0x00,0x00,0x00,0x67,0x80,0x24,0xae,0x13,0x00,0x00,
+0x8f,0x84,0x8b,0xc0,0x3c,0x02,0xb0,0x02,0x97,0x85,0x8b,0xc6,0x00,0xc2,0x30,0x21,
+0x8f,0x82,0x8b,0xc8,0x24,0x84,0x00,0x10,0x24,0xa5,0x00,0x10,0x00,0x87,0x20,0x24,
+0x24,0x42,0x00,0x01,0xa7,0x85,0x8b,0xc6,0xaf,0x84,0x8b,0xc0,0xaf,0x82,0x8b,0xc8,
+0xa4,0xc5,0x00,0x00,0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,
+0x94,0x82,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x42,0xe0,0x00,0x14,0x40,0x00,0x14,
+0x00,0x00,0x00,0x00,0x90,0x82,0x00,0x02,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfc,
+0x00,0x82,0x28,0x21,0x8c,0xa4,0x00,0x00,0x3c,0x02,0x00,0x70,0x8c,0xa6,0x00,0x08,
+0x00,0x82,0x10,0x21,0x2c,0x43,0x00,0x06,0x10,0x60,0x00,0x09,0x3c,0x03,0x80,0x01,
+0x00,0x02,0x10,0x80,0x24,0x63,0x01,0xe8,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0xaf,0x86,0x80,0x14,
+0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,
+0x8c,0xa4,0x00,0x00,0x0c,0x00,0x17,0x84,0x00,0x00,0x00,0x00,0x08,0x00,0x01,0xdc,
+0x00,0x00,0x00,0x00,0x0c,0x00,0x24,0x49,0x00,0xc0,0x20,0x21,0x08,0x00,0x01,0xdc,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,0x8c,0x44,0x00,0x00,
+0x8f,0x82,0x80,0x18,0x3c,0x03,0x00,0x0f,0x34,0x63,0x42,0x40,0x00,0x43,0x10,0x21,
+0x00,0x82,0x20,0x2b,0x10,0x80,0x00,0x09,0x24,0x03,0x00,0x05,0x8f,0x82,0x83,0x60,
+0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xaf,0x82,0x83,0x60,0x10,0x43,0x00,0x03,
+0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,
+0x8c,0x63,0x01,0x08,0x24,0x02,0x00,0x01,0xa3,0x82,0x80,0x11,0xaf,0x80,0x83,0x60,
+0xaf,0x83,0x80,0x18,0x08,0x00,0x01,0xf9,0x00,0x00,0x00,0x00,0x30,0x84,0x00,0xff,
+0x14,0x80,0x00,0x2f,0x00,0x00,0x00,0x00,0x8f,0x82,0x80,0x14,0xa3,0x85,0x83,0x93,
+0x10,0x40,0x00,0x2b,0x2c,0xa2,0x00,0x04,0x14,0x40,0x00,0x06,0x00,0x05,0x10,0x40,
+0x24,0xa2,0xff,0xfc,0x2c,0x42,0x00,0x08,0x10,0x40,0x00,0x09,0x24,0xa2,0xff,0xf0,
+0x00,0x05,0x10,0x40,0x27,0x84,0x83,0x9c,0x00,0x44,0x10,0x21,0x94,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x24,0x63,0x00,0x01,0x03,0xe0,0x00,0x08,0xa4,0x43,0x00,0x00,
+0x2c,0x42,0x00,0x10,0x14,0x40,0x00,0x0a,0x00,0x05,0x10,0x40,0x24,0xa2,0xff,0xe0,
+0x2c,0x42,0x00,0x10,0x14,0x40,0x00,0x06,0x00,0x05,0x10,0x40,0x24,0xa2,0xff,0xd0,
+0x2c,0x42,0x00,0x10,0x10,0x40,0x00,0x09,0x24,0xa2,0xff,0xc0,0x00,0x05,0x10,0x40,
+0x27,0x84,0x83,0x9c,0x00,0x44,0x10,0x21,0x94,0x43,0xff,0xf8,0x00,0x00,0x00,0x00,
+0x24,0x63,0x00,0x01,0x03,0xe0,0x00,0x08,0xa4,0x43,0xff,0xf8,0x2c,0x42,0x00,0x10,
+0x10,0x40,0x00,0x07,0x00,0x05,0x10,0x40,0x27,0x84,0x83,0x9c,0x00,0x44,0x10,0x21,
+0x94,0x43,0xff,0xf8,0x00,0x00,0x00,0x00,0x24,0x63,0x00,0x01,0xa4,0x43,0xff,0xf8,
+0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x8f,0x86,0x8b,0xc0,0x8f,0x82,0x80,0x14,
+0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,0x10,0x40,0x00,0x2a,0x00,0xc0,0x38,0x21,
+0x24,0x02,0x00,0x07,0x24,0x03,0xff,0x9c,0xa3,0x82,0x83,0x9b,0xa3,0x83,0x83,0x9a,
+0x27,0x8a,0x83,0x98,0x00,0x00,0x20,0x21,0x24,0x09,0x8f,0xff,0x00,0x04,0x10,0x80,
+0x00,0x4a,0x28,0x21,0x8c,0xa2,0x00,0x00,0x24,0xe3,0x00,0x04,0x24,0x88,0x00,0x01,
+0xac,0xe2,0x00,0x00,0x10,0x80,0x00,0x02,0x00,0x69,0x38,0x24,0xac,0xa0,0x00,0x00,
+0x31,0x04,0x00,0xff,0x2c,0x82,0x00,0x27,0x14,0x40,0xff,0xf5,0x00,0x04,0x10,0x80,
+0x97,0x83,0x8b,0xc6,0x97,0x85,0x8b,0xc4,0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x9c,
+0x00,0xa2,0x28,0x21,0x3c,0x04,0xb0,0x06,0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,
+0xa4,0xa3,0x00,0x00,0x8c,0x85,0x00,0x00,0x24,0x02,0x8f,0xff,0x24,0xc6,0x00,0x9c,
+0x3c,0x03,0x0f,0x00,0x00,0xc2,0x30,0x24,0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,
+0xaf,0x86,0x8b,0xc0,0x10,0xa2,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,
+0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x18,0x8f,0x86,0x8b,0xc0,0x27,0xbd,0xff,0xc8,0x24,0x02,0x00,0x08,
+0x24,0x03,0x00,0x20,0xaf,0xbf,0x00,0x30,0xa3,0xa2,0x00,0x13,0xa3,0xa3,0x00,0x12,
+0xa7,0xa4,0x00,0x10,0x00,0xc0,0x28,0x21,0x27,0xa9,0x00,0x10,0x00,0x00,0x38,0x21,
+0x24,0x08,0x8f,0xff,0x00,0x07,0x10,0x80,0x00,0x49,0x10,0x21,0x8c,0x44,0x00,0x00,
+0x24,0xe3,0x00,0x01,0x30,0x67,0x00,0xff,0x24,0xa2,0x00,0x04,0x2c,0xe3,0x00,0x08,
+0xac,0xa4,0x00,0x00,0x14,0x60,0xff,0xf7,0x00,0x48,0x28,0x24,0x97,0x83,0x8b,0xc6,
+0x97,0x85,0x8b,0xc4,0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x20,0x00,0xa2,0x28,0x21,
+0x3c,0x04,0xb0,0x06,0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,0xa4,0xa3,0x00,0x00,
+0x8c,0x85,0x00,0x00,0x24,0x02,0x8f,0xff,0x24,0xc6,0x00,0x20,0x3c,0x03,0x0f,0x00,
+0x00,0xc2,0x30,0x24,0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,0xaf,0x86,0x8b,0xc0,
+0x10,0xa2,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,
+0x8f,0xbf,0x00,0x30,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,
+0x93,0x82,0x8b,0xd0,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x11,0x24,0x06,0x00,0x01,
+0x8f,0x82,0x8b,0xc8,0x3c,0x05,0xb0,0x06,0x3c,0x04,0xb0,0x03,0x34,0xa5,0x80,0x18,
+0x34,0x84,0x01,0x08,0x14,0x40,0x00,0x09,0x00,0x00,0x30,0x21,0x97,0x82,0x8b,0xc4,
+0x8c,0x84,0x00,0x00,0x3c,0x03,0xb0,0x02,0x00,0x43,0x10,0x21,0xaf,0x84,0x8b,0xcc,
+0xa7,0x80,0x8b,0xc6,0xac,0x40,0x00,0x00,0xac,0x40,0x00,0x04,0x8c,0xa2,0x00,0x00,
+0x03,0xe0,0x00,0x08,0x00,0xc0,0x10,0x21,0x8f,0x86,0x8b,0xc0,0x8f,0x82,0x8b,0xc8,
+0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,0x00,0xc0,0x40,0x21,0x14,0x40,0x00,0x0a,
+0x00,0x40,0x50,0x21,0x00,0x00,0x38,0x21,0x27,0x89,0x83,0x68,0x24,0xe2,0x00,0x01,
+0x00,0x07,0x18,0x80,0x30,0x47,0x00,0xff,0x00,0x69,0x18,0x21,0x2c,0xe2,0x00,0x0a,
+0x14,0x40,0xff,0xfa,0xac,0x60,0x00,0x00,0x3c,0x02,0x00,0x80,0x10,0x82,0x00,0x6f,
+0x00,0x00,0x00,0x00,0x97,0x82,0x83,0x6e,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,
+0xa7,0x82,0x83,0x6e,0x90,0xa3,0x00,0x15,0x97,0x82,0x83,0x70,0x00,0x03,0x1e,0x00,
+0x00,0x03,0x1e,0x03,0x00,0x43,0x10,0x21,0xa7,0x82,0x83,0x70,0x8c,0xa4,0x00,0x20,
+0x3c,0x02,0x00,0x60,0x3c,0x03,0x00,0x20,0x00,0x82,0x20,0x24,0x10,0x83,0x00,0x54,
+0x00,0x00,0x00,0x00,0x14,0x80,0x00,0x47,0x00,0x00,0x00,0x00,0x97,0x82,0x83,0x74,
+0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xa7,0x82,0x83,0x74,0x84,0xa3,0x00,0x06,
+0x8f,0x82,0x83,0x84,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x21,0xaf,0x82,0x83,0x84,
+0x25,0x42,0x00,0x01,0x28,0x43,0x27,0x10,0xaf,0x82,0x8b,0xc8,0x10,0x60,0x00,0x09,
+0x24,0x02,0x00,0x04,0x93,0x83,0x80,0x11,0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x05,
+0x24,0x02,0x00,0x04,0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x18,0x24,0x03,0x00,0x28,0xa3,0x83,0x83,0x6a,0xa3,0x82,0x83,0x6b,
+0x90,0xa2,0x00,0x18,0x93,0x83,0x83,0x93,0x00,0x00,0x38,0x21,0x00,0x02,0x16,0x00,
+0x00,0x02,0x16,0x03,0xa7,0x82,0x83,0x7e,0xa3,0x83,0x83,0x8c,0x27,0x89,0x83,0x68,
+0x24,0x05,0x8f,0xff,0x00,0x07,0x10,0x80,0x00,0x49,0x10,0x21,0x8c,0x44,0x00,0x00,
+0x24,0xe3,0x00,0x01,0x30,0x67,0x00,0xff,0x25,0x02,0x00,0x04,0x2c,0xe3,0x00,0x0a,
+0xad,0x04,0x00,0x00,0x14,0x60,0xff,0xf7,0x00,0x45,0x40,0x24,0x97,0x83,0x8b,0xc6,
+0x97,0x85,0x8b,0xc4,0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x28,0x00,0xa2,0x28,0x21,
+0x3c,0x04,0xb0,0x06,0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,0xa4,0xa3,0x00,0x00,
+0x8c,0x85,0x00,0x00,0x24,0x02,0x8f,0xff,0x24,0xc6,0x00,0x28,0x3c,0x03,0x0f,0x00,
+0x00,0xc2,0x30,0x24,0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,0xaf,0x86,0x8b,0xc0,
+0x10,0xa2,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,
+0x0c,0x00,0x02,0x36,0x00,0x00,0x00,0x00,0xa3,0x80,0x80,0x11,0x08,0x00,0x02,0xe5,
+0x00,0x00,0x00,0x00,0x97,0x82,0x83,0x76,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,
+0xa7,0x82,0x83,0x76,0x84,0xa3,0x00,0x06,0x8f,0x82,0x83,0x88,0x00,0x00,0x00,0x00,
+0x00,0x43,0x10,0x21,0xaf,0x82,0x83,0x88,0x08,0x00,0x02,0xdd,0x25,0x42,0x00,0x01,
+0x97,0x82,0x83,0x72,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xa7,0x82,0x83,0x72,
+0x84,0xa3,0x00,0x06,0x8f,0x82,0x83,0x80,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x21,
+0xaf,0x82,0x83,0x80,0x08,0x00,0x02,0xdd,0x25,0x42,0x00,0x01,0x97,0x82,0x83,0x6c,
+0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xa7,0x82,0x83,0x6c,0x08,0x00,0x02,0xc5,
+0x00,0x00,0x00,0x00,0x27,0xbd,0xff,0xd0,0xaf,0xbf,0x00,0x28,0x8c,0xa3,0x00,0x20,
+0x8f,0x8a,0x8b,0xc0,0x3c,0x02,0x00,0x10,0x00,0x62,0x10,0x24,0x00,0xa0,0x38,0x21,
+0x01,0x40,0x48,0x21,0x10,0x40,0x00,0x3d,0x00,0x80,0x28,0x21,0x8c,0xe4,0x00,0x1c,
+0x34,0xa5,0x12,0x06,0xaf,0xa5,0x00,0x10,0x8c,0x82,0x00,0x08,0x00,0x03,0x1c,0x42,
+0x30,0x63,0x00,0x30,0x00,0x02,0x13,0x02,0x30,0x42,0x00,0x40,0x00,0x43,0x10,0x25,
+0x90,0xe6,0x00,0x10,0x90,0xe4,0x00,0x13,0x94,0xe8,0x00,0x0c,0x94,0xe3,0x00,0x1a,
+0x00,0x02,0x16,0x00,0x90,0xe7,0x00,0x12,0x00,0xa2,0x28,0x25,0x24,0x02,0x12,0x34,
+0xa7,0xa2,0x00,0x1c,0x24,0x02,0x56,0x78,0xaf,0xa5,0x00,0x10,0xa3,0xa6,0x00,0x18,
+0xa3,0xa7,0x00,0x1f,0xa7,0xa3,0x00,0x1a,0xa3,0xa4,0x00,0x19,0xa7,0xa8,0x00,0x20,
+0xa7,0xa2,0x00,0x22,0x00,0x00,0x28,0x21,0x27,0xa7,0x00,0x10,0x24,0x06,0x8f,0xff,
+0x00,0x05,0x10,0x80,0x00,0x47,0x10,0x21,0x8c,0x44,0x00,0x00,0x24,0xa3,0x00,0x01,
+0x30,0x65,0x00,0xff,0x25,0x22,0x00,0x04,0x2c,0xa3,0x00,0x05,0xad,0x24,0x00,0x00,
+0x14,0x60,0xff,0xf7,0x00,0x46,0x48,0x24,0x97,0x83,0x8b,0xc6,0x97,0x85,0x8b,0xc4,
+0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x14,0x00,0xa2,0x28,0x21,0x3c,0x04,0xb0,0x06,
+0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,0xa4,0xa3,0x00,0x00,0x8c,0x85,0x00,0x00,
+0x24,0x02,0x8f,0xff,0x25,0x46,0x00,0x14,0x3c,0x03,0x0f,0x00,0x00,0xc2,0x50,0x24,
+0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,0xaf,0x8a,0x8b,0xc0,0x10,0xa2,0x00,0x03,
+0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x28,
+0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30,0x3c,0x05,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xc8,0x00,0x04,0x22,0x00,0x34,0xa5,0x00,0x20,
+0x24,0x42,0x0d,0xfc,0x3c,0x03,0xb0,0x00,0xaf,0xb5,0x00,0x24,0xaf,0xb4,0x00,0x20,
+0xaf,0xb2,0x00,0x18,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x30,0x00,0x83,0x80,0x21,
+0xaf,0xb7,0x00,0x2c,0xaf,0xb6,0x00,0x28,0xaf,0xb3,0x00,0x1c,0xaf,0xb1,0x00,0x14,
+0xac,0xa2,0x00,0x00,0x8e,0x09,0x00,0x00,0x00,0x00,0x90,0x21,0x26,0x10,0x00,0x08,
+0x00,0x09,0xa6,0x02,0x12,0x80,0x00,0x13,0x00,0x00,0xa8,0x21,0x24,0x13,0x00,0x02,
+0x3c,0x16,0x00,0xff,0x3c,0x17,0xff,0x00,0x8e,0x09,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x09,0x12,0x02,0x24,0x42,0x00,0x02,0x31,0x25,0x00,0xff,0x10,0xb3,0x00,0x76,
+0x30,0x51,0x00,0xff,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x18,0x00,0x00,0x00,0x00,
+0x02,0x51,0x10,0x21,0x30,0x52,0xff,0xff,0x02,0x54,0x18,0x2b,0x14,0x60,0xff,0xf2,
+0x02,0x11,0x80,0x21,0x12,0xa0,0x00,0x0a,0x3c,0x02,0xb0,0x06,0x34,0x42,0x80,0x18,
+0x8c,0x43,0x00,0x00,0x3c,0x04,0x0f,0x00,0x3c,0x02,0x04,0x00,0x00,0x64,0x18,0x24,
+0x10,0x62,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,
+0x8f,0xbf,0x00,0x30,0x7b,0xb6,0x01,0x7c,0x7b,0xb4,0x01,0x3c,0x7b,0xb2,0x00,0xfc,
+0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,0x8e,0x09,0x00,0x04,
+0x24,0x15,0x00,0x01,0x8e,0x06,0x00,0x0c,0x00,0x09,0x11,0x42,0x00,0x09,0x18,0xc2,
+0x30,0x48,0x00,0x03,0x00,0x09,0x14,0x02,0x30,0x6c,0x00,0x03,0x00,0x09,0x26,0x02,
+0x11,0x15,0x00,0x45,0x30,0x43,0x00,0x0f,0x29,0x02,0x00,0x02,0x14,0x40,0x00,0x26,
+0x00,0x00,0x00,0x00,0x11,0x13,0x00,0x0f,0x00,0x00,0x38,0x21,0x00,0x07,0x22,0x02,
+0x30,0x84,0xff,0x00,0x3c,0x03,0x00,0xff,0x00,0x07,0x2e,0x02,0x00,0x07,0x12,0x00,
+0x00,0x43,0x10,0x24,0x00,0xa4,0x28,0x25,0x00,0xa2,0x28,0x25,0x00,0x07,0x1e,0x00,
+0x00,0xa3,0x28,0x25,0x0c,0x00,0x01,0x92,0x01,0x20,0x20,0x21,0x08,0x00,0x03,0xa5,
+0x02,0x51,0x10,0x21,0x11,0x95,0x00,0x0f,0x00,0x00,0x00,0x00,0x11,0x88,0x00,0x07,
+0x00,0x00,0x00,0x00,0x00,0x04,0x10,0x80,0x27,0x83,0x8b,0x70,0x00,0x43,0x10,0x21,
+0x8c,0x47,0x00,0x18,0x08,0x00,0x03,0xcc,0x00,0x07,0x22,0x02,0x00,0x04,0x10,0x40,
+0x27,0x83,0x8b,0x78,0x00,0x43,0x10,0x21,0x94,0x47,0x00,0x02,0x08,0x00,0x03,0xcc,
+0x00,0x07,0x22,0x02,0x27,0x82,0x8b,0x70,0x00,0x82,0x10,0x21,0x90,0x47,0x00,0x00,
+0x08,0x00,0x03,0xcc,0x00,0x07,0x22,0x02,0x15,0x00,0xff,0xdc,0x00,0x00,0x38,0x21,
+0x10,0x75,0x00,0x05,0x00,0x80,0x38,0x21,0x00,0x65,0x18,0x26,0x24,0x82,0x01,0x00,
+0x00,0x00,0x38,0x21,0x00,0x43,0x38,0x0a,0x24,0x02,0x00,0x01,0x11,0x82,0x00,0x0e,
+0x3c,0x02,0xb0,0x03,0x24,0x02,0x00,0x02,0x11,0x82,0x00,0x06,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xb0,0x03,0x00,0xe2,0x10,0x21,0x8c,0x47,0x00,0x00,0x08,0x00,0x03,0xcc,
+0x00,0x07,0x22,0x02,0x3c,0x02,0xb0,0x03,0x00,0xe2,0x10,0x21,0x94,0x43,0x00,0x00,
+0x08,0x00,0x03,0xcb,0x30,0x67,0xff,0xff,0x00,0xe2,0x10,0x21,0x90,0x43,0x00,0x00,
+0x08,0x00,0x03,0xcb,0x30,0x67,0x00,0xff,0x30,0x62,0x00,0x03,0x00,0x02,0x12,0x00,
+0x11,0x95,0x00,0x07,0x00,0x44,0x38,0x21,0x11,0x93,0x00,0x03,0x00,0x00,0x00,0x00,
+0x08,0x00,0x03,0xfd,0x3c,0x02,0xb0,0x0a,0x08,0x00,0x04,0x02,0x3c,0x02,0xb0,0x0a,
+0x08,0x00,0x04,0x06,0x3c,0x02,0xb0,0x0a,0x8e,0x09,0x00,0x04,0x8e,0x02,0x00,0x08,
+0x8e,0x03,0x00,0x0c,0x00,0x09,0x41,0x42,0x00,0x02,0x22,0x02,0x00,0x03,0x3a,0x02,
+0x30,0x84,0xff,0x00,0x30,0xe7,0xff,0x00,0x00,0x02,0x5e,0x02,0x00,0x02,0x32,0x00,
+0x00,0x03,0x56,0x02,0x00,0x03,0x2a,0x00,0x01,0x64,0x58,0x25,0x00,0xd6,0x30,0x24,
+0x01,0x47,0x50,0x25,0x00,0x02,0x16,0x00,0x00,0xb6,0x28,0x24,0x00,0x03,0x1e,0x00,
+0x01,0x66,0x58,0x25,0x01,0x45,0x50,0x25,0x00,0x57,0x10,0x24,0x00,0x77,0x18,0x24,
+0x01,0x62,0x38,0x25,0x01,0x43,0x30,0x25,0x00,0x09,0x10,0xc2,0x00,0x09,0x1c,0x02,
+0x31,0x08,0x00,0x03,0x30,0x4c,0x00,0x03,0x30,0x63,0x00,0x0f,0x00,0x09,0x26,0x02,
+0x00,0xe0,0x58,0x21,0x15,0x00,0x00,0x28,0x00,0xc0,0x50,0x21,0x24,0x02,0x00,0x01,
+0x10,0x62,0x00,0x06,0x00,0x80,0x28,0x21,0x24,0x02,0x00,0x03,0x14,0x62,0xff,0x69,
+0x02,0x51,0x10,0x21,0x24,0x85,0x01,0x00,0x24,0x02,0x00,0x01,0x11,0x82,0x00,0x15,
+0x24,0x02,0x00,0x02,0x11,0x82,0x00,0x0a,0x3c,0x03,0xb0,0x03,0x00,0xa3,0x18,0x21,
+0x8c,0x62,0x00,0x00,0x00,0x0a,0x20,0x27,0x01,0x6a,0x28,0x24,0x00,0x44,0x10,0x24,
+0x00,0x45,0x10,0x25,0xac,0x62,0x00,0x00,0x08,0x00,0x03,0xa5,0x02,0x51,0x10,0x21,
+0x00,0xa3,0x18,0x21,0x94,0x62,0x00,0x00,0x00,0x0a,0x20,0x27,0x01,0x6a,0x28,0x24,
+0x00,0x44,0x10,0x24,0x00,0x45,0x10,0x25,0xa4,0x62,0x00,0x00,0x08,0x00,0x03,0xa5,
+0x02,0x51,0x10,0x21,0x3c,0x03,0xb0,0x03,0x00,0xa3,0x18,0x21,0x90,0x62,0x00,0x00,
+0x00,0x0a,0x20,0x27,0x01,0x6a,0x28,0x24,0x00,0x44,0x10,0x24,0x00,0x45,0x10,0x25,
+0x08,0x00,0x03,0xa4,0xa0,0x62,0x00,0x00,0x24,0x02,0x00,0x01,0x11,0x02,0x00,0x21,
+0x00,0x00,0x00,0x00,0x15,0x13,0xff,0x42,0x00,0x00,0x00,0x00,0x11,0x82,0x00,0x17,
+0x00,0x00,0x00,0x00,0x11,0x88,0x00,0x0b,0x00,0x00,0x00,0x00,0x27,0x83,0x8b,0x70,
+0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x18,0x00,0x06,0x18,0x27,
+0x00,0xe6,0x28,0x24,0x00,0x43,0x10,0x24,0x00,0x45,0x10,0x25,0x08,0x00,0x03,0xa4,
+0xac,0x82,0x00,0x18,0x27,0x83,0x8b,0x78,0x00,0x04,0x20,0x40,0x00,0x83,0x20,0x21,
+0x94,0x82,0x00,0x02,0x00,0x06,0x18,0x27,0x00,0xe6,0x28,0x24,0x00,0x43,0x10,0x24,
+0x00,0x45,0x10,0x25,0x08,0x00,0x03,0xa4,0xa4,0x82,0x00,0x02,0x27,0x83,0x8b,0x70,
+0x00,0x83,0x18,0x21,0x90,0x62,0x00,0x00,0x00,0x06,0x20,0x27,0x08,0x00,0x04,0x5a,
+0x00,0xe6,0x28,0x24,0x30,0x62,0x00,0x07,0x00,0x02,0x12,0x00,0x11,0x88,0x00,0x0f,
+0x00,0x44,0x10,0x21,0x11,0x93,0x00,0x07,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x0a,
+0x00,0x43,0x18,0x21,0x8c,0x62,0x00,0x00,0x00,0x06,0x20,0x27,0x08,0x00,0x04,0x47,
+0x00,0xe6,0x28,0x24,0x3c,0x03,0xb0,0x0a,0x00,0x43,0x18,0x21,0x94,0x62,0x00,0x00,
+0x00,0x06,0x20,0x27,0x08,0x00,0x04,0x50,0x00,0xe6,0x28,0x24,0x3c,0x03,0xb0,0x0a,
+0x08,0x00,0x04,0x7d,0x00,0x43,0x18,0x21,0x97,0x85,0x8b,0xc4,0x3c,0x07,0xb0,0x02,
+0x3c,0x04,0xb0,0x03,0x3c,0x02,0x80,0x00,0x00,0xa7,0x28,0x21,0x34,0x84,0x00,0x20,
+0x24,0x42,0x12,0x58,0x24,0x03,0xff,0x80,0xac,0x82,0x00,0x00,0xa0,0xa3,0x00,0x07,
+0x97,0x82,0x8b,0xc6,0x97,0x85,0x8b,0xc4,0x3c,0x06,0xb0,0x06,0x30,0x42,0xff,0xf8,
+0x24,0x42,0x00,0x10,0x00,0xa2,0x10,0x21,0x30,0x42,0x0f,0xff,0x24,0x44,0x00,0x08,
+0x30,0x84,0x0f,0xff,0x00,0x05,0x28,0xc2,0x3c,0x03,0x00,0x40,0x00,0xa3,0x28,0x25,
+0x00,0x87,0x20,0x21,0x34,0xc6,0x80,0x18,0xac,0xc5,0x00,0x00,0xaf,0x84,0x8b,0xc0,
+0xa7,0x82,0x8b,0xc4,0xa7,0x80,0x8b,0xc6,0xaf,0x80,0x8b,0xc8,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x30,0xa5,0x00,0xff,0x30,0x84,0x00,0xff,0x24,0x02,0x00,0x01,
+0x00,0xe0,0x48,0x21,0x30,0xc6,0x00,0xff,0x8f,0xa7,0x00,0x10,0x10,0x82,0x00,0x07,
+0x00,0xa0,0x40,0x21,0x24,0x02,0x00,0x03,0x10,0x82,0x00,0x03,0x00,0x00,0x00,0x00,
+0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x24,0xa8,0x01,0x00,0x3c,0x03,0xb0,0x03,
+0x24,0x02,0x00,0x01,0x00,0x07,0x20,0x27,0x01,0x27,0x28,0x24,0x10,0xc2,0x00,0x14,
+0x01,0x03,0x18,0x21,0x24,0x02,0x00,0x02,0x10,0xc2,0x00,0x09,0x00,0x07,0x50,0x27,
+0x3c,0x03,0xb0,0x03,0x01,0x03,0x18,0x21,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x4a,0x10,0x24,0x00,0x45,0x10,0x25,0x08,0x00,0x04,0xe1,0xac,0x62,0x00,0x00,
+0x3c,0x03,0xb0,0x03,0x01,0x03,0x18,0x21,0x94,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x4a,0x10,0x24,0x00,0x45,0x10,0x25,0x03,0xe0,0x00,0x08,0xa4,0x62,0x00,0x00,
+0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x24,0x00,0x45,0x10,0x25,
+0xa0,0x62,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0x84,0x00,0x07,
+0x00,0x04,0x22,0x00,0x30,0xa5,0x00,0xff,0x00,0x85,0x28,0x21,0x3c,0x02,0xb0,0x0a,
+0x00,0xa2,0x40,0x21,0x30,0xc6,0x00,0xff,0x24,0x02,0x00,0x01,0x8f,0xa4,0x00,0x10,
+0x10,0xc2,0x00,0x14,0x24,0x02,0x00,0x02,0x00,0x04,0x50,0x27,0x10,0xc2,0x00,0x09,
+0x00,0xe4,0x48,0x24,0x3c,0x03,0xb0,0x0a,0x00,0xa3,0x18,0x21,0x8c,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x4a,0x10,0x24,0x00,0x49,0x10,0x25,0x03,0xe0,0x00,0x08,
+0xac,0x62,0x00,0x00,0x3c,0x03,0xb0,0x0a,0x00,0xa3,0x18,0x21,0x94,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x4a,0x10,0x24,0x00,0x49,0x10,0x25,0x03,0xe0,0x00,0x08,
+0xa4,0x62,0x00,0x00,0x91,0x02,0x00,0x00,0x00,0x04,0x18,0x27,0x00,0xe4,0x20,0x24,
+0x00,0x43,0x10,0x24,0x00,0x44,0x10,0x25,0x03,0xe0,0x00,0x08,0xa1,0x02,0x00,0x00,
+0x30,0xa9,0x00,0xff,0x27,0x83,0x8b,0x70,0x30,0x85,0x00,0xff,0x24,0x02,0x00,0x01,
+0x00,0x07,0x50,0x27,0x00,0xc7,0x40,0x24,0x11,0x22,0x00,0x17,0x00,0xa3,0x18,0x21,
+0x00,0x05,0x20,0x40,0x27,0x82,0x8b,0x70,0x00,0x05,0x28,0x80,0x27,0x83,0x8b,0x78,
+0x00,0x83,0x50,0x21,0x00,0xa2,0x20,0x21,0x24,0x02,0x00,0x02,0x00,0x07,0x40,0x27,
+0x11,0x22,0x00,0x07,0x00,0xc7,0x28,0x24,0x8c,0x82,0x00,0x18,0x00,0x00,0x00,0x00,
+0x00,0x48,0x10,0x24,0x00,0x45,0x10,0x25,0x03,0xe0,0x00,0x08,0xac,0x82,0x00,0x18,
+0x95,0x42,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x48,0x10,0x24,0x00,0x45,0x10,0x25,
+0x03,0xe0,0x00,0x08,0xa5,0x42,0x00,0x02,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x4a,0x10,0x24,0x00,0x48,0x10,0x25,0x03,0xe0,0x00,0x08,0xa0,0x62,0x00,0x00,
+0x00,0x04,0x32,0x02,0x30,0xc6,0xff,0x00,0x00,0x04,0x16,0x02,0x00,0x04,0x1a,0x00,
+0x3c,0x05,0x00,0xff,0x00,0x65,0x18,0x24,0x00,0x46,0x10,0x25,0x00,0x43,0x10,0x25,
+0x00,0x04,0x26,0x00,0x03,0xe0,0x00,0x08,0x00,0x44,0x10,0x25,0x3c,0x03,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xe8,0x34,0x63,0x00,0x20,0x24,0x42,0x14,0xdc,
+0x3c,0x04,0xb0,0x03,0xaf,0xbf,0x00,0x14,0xac,0x62,0x00,0x00,0xaf,0xb0,0x00,0x10,
+0x34,0x84,0x00,0x2c,0x8c,0x83,0x00,0x00,0xa7,0x80,0xbc,0x00,0x00,0x03,0x12,0x02,
+0x00,0x03,0x2d,0x02,0x30,0x42,0x0f,0xff,0xa3,0x83,0xbc,0x08,0xa7,0x85,0xbc,0x0c,
+0xa7,0x82,0xbc,0x0a,0xa7,0x80,0xbc,0x02,0xa7,0x80,0xbc,0x04,0xa7,0x80,0xbc,0x06,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x05,0x00,0x3c,0x05,0x08,0x00,0x00,0x45,0x28,0x25,
+0x24,0x04,0x05,0x00,0x0c,0x00,0x06,0xbf,0x00,0x40,0x80,0x21,0x3c,0x02,0xf7,0xff,
+0x34,0x42,0xff,0xff,0x02,0x02,0x80,0x24,0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf,
+0x24,0x04,0x05,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0xb0,0x03,0x34,0x42,0x01,0x08,
+0x34,0x63,0x01,0x18,0x8c,0x45,0x00,0x00,0x8c,0x64,0x00,0x00,0x3c,0x02,0x00,0x0f,
+0x3c,0x03,0x00,0x4c,0x30,0x84,0x02,0x00,0x34,0x63,0x4b,0x40,0xaf,0x85,0xbc,0x10,
+0x10,0x80,0x00,0x06,0x34,0x42,0x42,0x40,0xaf,0x83,0xbc,0x14,0x8f,0xbf,0x00,0x14,
+0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0xaf,0x82,0xbc,0x14,
+0x08,0x00,0x05,0x67,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,
+0x27,0xbd,0xff,0xc8,0x34,0x63,0x00,0x20,0x24,0x42,0x15,0xb8,0x30,0x84,0x00,0xff,
+0xaf,0xbf,0x00,0x30,0xaf,0xb7,0x00,0x2c,0xaf,0xb6,0x00,0x28,0xaf,0xb5,0x00,0x24,
+0xaf,0xb4,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,
+0xaf,0xb0,0x00,0x10,0xac,0x62,0x00,0x00,0x10,0x80,0x00,0x1c,0x24,0x02,0x00,0x02,
+0x10,0x82,0x00,0x08,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x30,0x7b,0xb6,0x01,0x7c,
+0x7b,0xb4,0x01,0x3c,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x38,0xa7,0x80,0xbc,0x00,0xa7,0x80,0xbc,0x02,0xa7,0x80,0xbc,0x04,
+0xa7,0x80,0xbc,0x06,0x0c,0x00,0x06,0xd1,0x24,0x04,0x05,0x00,0x3c,0x05,0x08,0x00,
+0x00,0x45,0x28,0x25,0x24,0x04,0x05,0x00,0x0c,0x00,0x06,0xbf,0x00,0x40,0x80,0x21,
+0x3c,0x05,0xf7,0xff,0x34,0xa5,0xff,0xff,0x02,0x05,0x28,0x24,0x0c,0x00,0x06,0xbf,
+0x24,0x04,0x05,0x00,0x08,0x00,0x05,0x82,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,
+0x24,0x04,0x05,0xa0,0x24,0x04,0x05,0xa4,0x0c,0x00,0x06,0xd1,0x00,0x02,0xbc,0x02,
+0x24,0x04,0x05,0xa8,0x00,0x02,0xb4,0x02,0x0c,0x00,0x06,0xd1,0x30,0x55,0xff,0xff,
+0x00,0x40,0x80,0x21,0x97,0x84,0xbc,0x00,0x97,0x82,0xbc,0x02,0x97,0x83,0xbc,0x06,
+0x02,0xe4,0x20,0x23,0x02,0xa2,0x10,0x23,0x00,0x82,0x20,0x21,0x97,0x82,0xbc,0x04,
+0x32,0x14,0xff,0xff,0x02,0x83,0x18,0x23,0x02,0xc2,0x10,0x23,0x00,0x82,0x20,0x21,
+0x93,0x82,0xbc,0x08,0x00,0x83,0x20,0x21,0x30,0x84,0xff,0xff,0x00,0x82,0x10,0x2b,
+0x14,0x40,0x00,0xaa,0x00,0x00,0x00,0x00,0x97,0x82,0xbc,0x0c,0x00,0x00,0x00,0x00,
+0x00,0x44,0x10,0x2b,0x14,0x40,0x00,0x7f,0x00,0x00,0x00,0x00,0x97,0x82,0xbc,0x0a,
+0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x2b,0x10,0x40,0x00,0x3a,0x00,0x00,0x00,0x00,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x50,0x30,0x51,0x00,0x7f,0x00,0x40,0x80,0x21,
+0x2e,0x22,0x00,0x32,0x10,0x40,0x00,0x13,0x24,0x02,0x00,0x20,0x12,0x22,0x00,0x17,
+0x24,0x02,0xff,0x80,0x02,0x02,0x10,0x24,0x26,0x31,0x00,0x01,0x00,0x51,0x80,0x25,
+0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x50,0x02,0x00,0x28,0x21,
+0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x58,0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf,
+0x24,0x04,0x04,0x60,0x02,0x00,0x28,0x21,0x24,0x04,0x04,0x68,0x0c,0x00,0x06,0xbf,
+0x00,0x00,0x00,0x00,0xa7,0x97,0xbc,0x00,0xa7,0x95,0xbc,0x02,0xa7,0x96,0xbc,0x04,
+0xa7,0x94,0xbc,0x06,0x08,0x00,0x05,0x82,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,
+0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0,0x00,0x40,0x28,0x21,0x00,0x44,0x10,0x24,
+0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x03,0x10,0x43,0x00,0x07,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25,
+0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c,
+0x00,0x40,0x90,0x21,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,0x02,0x42,0x90,0x24,
+0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x2c,0x08,0x00,0x05,0xc9,
+0x24,0x02,0xff,0x80,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x50,0x30,0x51,0x00,0x7f,
+0x24,0x02,0x00,0x20,0x16,0x22,0xff,0xdb,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,
+0x24,0x04,0x02,0x2c,0x34,0x52,0x40,0x00,0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,
+0x24,0x04,0x02,0x2c,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x58,0x24,0x04,0x02,0x5c,
+0x0c,0x00,0x06,0xd1,0x00,0x02,0x9e,0x02,0x30,0x43,0x00,0xff,0x00,0x13,0x12,0x00,
+0x00,0x43,0x10,0x25,0x2c,0x43,0x00,0x04,0x14,0x60,0x00,0x1d,0x2c,0x42,0x00,0x11,
+0x10,0x40,0x00,0x0b,0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,
+0x02,0x42,0x90,0x24,0x02,0x40,0x28,0x21,0x24,0x04,0x02,0x2c,0x0c,0x00,0x06,0xbf,
+0x36,0x52,0x80,0x00,0x02,0x40,0x28,0x21,0x08,0x00,0x05,0xd7,0x24,0x04,0x02,0x2c,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0,0x00,0x40,0x28,0x21,
+0x00,0x44,0x10,0x24,0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x02,0x14,0x43,0xff,0xee,
+0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25,
+0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08,0x08,0x00,0x06,0x13,0x3c,0x02,0xff,0xff,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x00,0x40,0x28,0x21,0x00,0x02,0x15,0x82,
+0x30,0x42,0x00,0x03,0x24,0x03,0x00,0x03,0x14,0x43,0xff,0xdf,0x3c,0x02,0xff,0x3f,
+0x34,0x42,0xff,0xff,0x00,0xa2,0x10,0x24,0x3c,0x03,0x00,0x80,0x08,0x00,0x06,0x28,
+0x00,0x43,0x28,0x25,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x50,0x30,0x51,0x00,0x7f,
+0x00,0x40,0x80,0x21,0x2e,0x22,0x00,0x32,0x10,0x40,0xff,0x9a,0x24,0x02,0x00,0x20,
+0x12,0x22,0x00,0x04,0x24,0x02,0xff,0x80,0x02,0x02,0x10,0x24,0x08,0x00,0x05,0xcb,
+0x26,0x31,0x00,0x02,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0,
+0x00,0x40,0x28,0x21,0x00,0x44,0x10,0x24,0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x03,
+0x10,0x43,0x00,0x07,0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,
+0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c,0x00,0x40,0x90,0x21,0x3c,0x02,0xff,0xff,
+0x34,0x42,0x3f,0xff,0x02,0x42,0x90,0x24,0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,
+0x24,0x04,0x02,0x2c,0x08,0x00,0x06,0x42,0x24,0x02,0xff,0x80,0x0c,0x00,0x06,0xd1,
+0x24,0x04,0x04,0x50,0x00,0x40,0x80,0x21,0x30,0x51,0x00,0x7f,0x24,0x02,0x00,0x20,
+0x12,0x22,0x00,0x1d,0x2e,0x22,0x00,0x21,0x14,0x40,0xff,0x72,0x24,0x02,0xff,0x80,
+0x02,0x02,0x10,0x24,0x26,0x31,0xff,0xff,0x00,0x51,0x80,0x25,0x24,0x04,0x04,0x50,
+0x0c,0x00,0x06,0xbf,0x02,0x00,0x28,0x21,0x24,0x04,0x04,0x58,0x0c,0x00,0x06,0xbf,
+0x02,0x00,0x28,0x21,0x24,0x04,0x04,0x60,0x0c,0x00,0x06,0xbf,0x02,0x00,0x28,0x21,
+0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x68,0x24,0x02,0x00,0x20,
+0x16,0x22,0xff,0x60,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c,
+0x00,0x40,0x90,0x21,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,0x02,0x42,0x10,0x24,
+0x08,0x00,0x06,0x19,0x34,0x52,0x80,0x00,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c,
+0x34,0x52,0x40,0x00,0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x2c,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x58,0x24,0x04,0x02,0x5c,0x0c,0x00,0x06,0xd1,
+0x00,0x02,0x9e,0x02,0x30,0x43,0x00,0xff,0x00,0x13,0x12,0x00,0x00,0x43,0x10,0x25,
+0x2c,0x43,0x00,0x04,0x14,0x60,0x00,0x20,0x2c,0x42,0x00,0x11,0x10,0x40,0x00,0x0d,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,0x02,0x42,0x90,0x24,
+0x02,0x40,0x28,0x21,0x24,0x04,0x02,0x2c,0x0c,0x00,0x06,0xbf,0x36,0x52,0x80,0x00,
+0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x2c,0x08,0x00,0x06,0x66,
+0x2e,0x22,0x00,0x21,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0,
+0x00,0x40,0x28,0x21,0x00,0x44,0x10,0x24,0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x02,
+0x14,0x43,0xff,0xec,0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,
+0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08,
+0x08,0x00,0x06,0x96,0x3c,0x02,0xff,0xff,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,
+0x00,0x40,0x28,0x21,0x00,0x02,0x15,0x82,0x30,0x42,0x00,0x03,0x24,0x03,0x00,0x03,
+0x14,0x43,0xff,0xdc,0x3c,0x03,0x00,0x80,0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,
+0x00,0xa2,0x10,0x24,0x08,0x00,0x06,0xae,0x00,0x43,0x28,0x25,0x30,0x83,0x00,0x03,
+0x00,0x04,0x20,0x40,0x00,0x83,0x20,0x23,0x3c,0x02,0xb0,0x0a,0x00,0x82,0x20,0x21,
+0x3c,0x06,0x00,0x01,0xac,0x85,0x00,0x00,0x24,0x07,0x00,0x01,0x00,0x00,0x28,0x21,
+0x34,0xc6,0x86,0x9f,0x8c,0x82,0x10,0x00,0x24,0xa5,0x00,0x01,0x10,0x47,0x00,0x03,
+0x00,0xc5,0x18,0x2b,0x10,0x60,0xff,0xfb,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x30,0x83,0x00,0x03,0x00,0x04,0x20,0x40,0x3c,0x02,0xb0,0x0a,
+0x00,0x83,0x20,0x23,0x00,0x82,0x20,0x21,0x3c,0x06,0x00,0x01,0x24,0x02,0xff,0xff,
+0xac,0x82,0x10,0x00,0x00,0x00,0x28,0x21,0x24,0x07,0x00,0x01,0x34,0xc6,0x86,0x9f,
+0x8c,0x82,0x10,0x00,0x24,0xa5,0x00,0x01,0x10,0x47,0x00,0x03,0x00,0xc5,0x18,0x2b,
+0x10,0x60,0xff,0xfb,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x3c,0x05,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x1b,0x94,
+0x24,0x03,0x00,0x01,0x34,0xa5,0x00,0x20,0x3c,0x06,0xb0,0x03,0xac,0xa2,0x00,0x00,
+0x34,0xc6,0x01,0x04,0xa0,0x83,0x00,0x48,0xa0,0x80,0x00,0x04,0xa0,0x80,0x00,0x05,
+0xa0,0x80,0x00,0x06,0xa0,0x80,0x00,0x07,0xa0,0x80,0x00,0x08,0xa0,0x80,0x00,0x09,
+0xa0,0x80,0x00,0x0a,0xa0,0x80,0x00,0x11,0xa0,0x80,0x00,0x13,0xa0,0x80,0x00,0x49,
+0x94,0xc2,0x00,0x00,0xac,0x80,0x00,0x00,0xa0,0x80,0x00,0x4e,0x00,0x02,0x14,0x00,
+0x00,0x02,0x14,0x03,0x30,0x43,0x00,0xff,0x30,0x42,0xff,0x00,0xa4,0x82,0x00,0x44,
+0xa4,0x83,0x00,0x46,0xac,0x80,0x00,0x24,0xac,0x80,0x00,0x28,0xac,0x80,0x00,0x2c,
+0xac,0x80,0x00,0x30,0xac,0x80,0x00,0x34,0xac,0x80,0x00,0x38,0xac,0x80,0x00,0x3c,
+0x03,0xe0,0x00,0x08,0xac,0x80,0x00,0x40,0x84,0x83,0x00,0x0c,0x3c,0x07,0xb0,0x03,
+0x34,0xe7,0x00,0x20,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,
+0x27,0x83,0x90,0x04,0x00,0x43,0x10,0x21,0x8c,0x48,0x00,0x18,0x3c,0x02,0x80,0x00,
+0x24,0x42,0x1c,0x28,0xac,0xe2,0x00,0x00,0x8d,0x03,0x00,0x08,0x80,0x82,0x00,0x13,
+0x00,0x05,0x2c,0x00,0x00,0x03,0x1e,0x02,0x00,0x02,0x12,0x00,0x30,0x63,0x00,0x7e,
+0x00,0x62,0x18,0x21,0x00,0x65,0x18,0x21,0x3c,0x02,0xc0,0x00,0x3c,0x05,0xb0,0x05,
+0x34,0x42,0x04,0x00,0x24,0x63,0x00,0x01,0x3c,0x07,0xb0,0x05,0x3c,0x08,0xb0,0x05,
+0x34,0xa5,0x04,0x20,0xac,0xa3,0x00,0x00,0x00,0xc2,0x30,0x21,0x34,0xe7,0x04,0x24,
+0x35,0x08,0x02,0x28,0x24,0x02,0x00,0x01,0x24,0x03,0x00,0x20,0xac,0xe6,0x00,0x00,
+0xac,0x82,0x00,0x3c,0x03,0xe0,0x00,0x08,0xa1,0x03,0x00,0x00,0x27,0xbd,0xff,0xa8,
+0x00,0x07,0x60,0x80,0x27,0x82,0xb4,0x00,0xaf,0xbe,0x00,0x50,0xaf,0xb7,0x00,0x4c,
+0xaf,0xb5,0x00,0x44,0xaf,0xb4,0x00,0x40,0xaf,0xbf,0x00,0x54,0xaf,0xb6,0x00,0x48,
+0xaf,0xb3,0x00,0x3c,0xaf,0xb2,0x00,0x38,0xaf,0xb1,0x00,0x34,0xaf,0xb0,0x00,0x30,
+0x01,0x82,0x10,0x21,0x8c,0x43,0x00,0x00,0x00,0xe0,0x70,0x21,0x3c,0x02,0x80,0x00,
+0x94,0x73,0x00,0x14,0x3c,0x07,0xb0,0x03,0x34,0xe7,0x00,0x20,0x24,0x42,0x1c,0xbc,
+0x3c,0x03,0xb0,0x05,0xac,0xe2,0x00,0x00,0x34,0x63,0x01,0x28,0x90,0x67,0x00,0x00,
+0x00,0x13,0xa8,0xc0,0x02,0xb3,0x18,0x21,0x27,0x82,0x90,0x04,0x00,0x03,0x18,0x80,
+0x00,0x62,0x18,0x21,0x00,0x05,0x2c,0x00,0x00,0x07,0x3e,0x00,0x28,0xc2,0x00,0x03,
+0x00,0xc0,0xa0,0x21,0x00,0x80,0x78,0x21,0x00,0x05,0xbc,0x03,0x8c,0x68,0x00,0x18,
+0x02,0xa0,0x58,0x21,0x10,0x40,0x01,0x81,0x00,0x07,0xf6,0x03,0x00,0xde,0x10,0x07,
+0x30,0x5e,0x00,0x01,0x01,0x73,0x10,0x21,0x27,0x83,0x90,0x08,0x00,0x02,0x10,0x80,
+0x00,0x43,0x10,0x21,0x80,0x4d,0x00,0x06,0x8d,0x03,0x00,0x00,0x8d,0x02,0x00,0x04,
+0x8d,0x0a,0x00,0x08,0x8d,0x03,0x00,0x0c,0xaf,0xa2,0x00,0x20,0x11,0xa0,0x01,0x71,
+0xaf,0xa3,0x00,0x18,0x27,0x82,0xb4,0x00,0x01,0x82,0x10,0x21,0x8c,0x44,0x00,0x00,
+0x00,0x00,0x00,0x00,0x90,0x83,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x04,
+0x14,0x60,0x00,0x12,0x00,0x00,0xb0,0x21,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x46,
+0x90,0x43,0x00,0x00,0x2a,0x84,0x00,0x04,0x10,0x80,0x01,0x56,0x30,0x65,0x00,0x01,
+0x91,0xe2,0x00,0x09,0x00,0x00,0x00,0x00,0x12,0x82,0x00,0x02,0x00,0x00,0x00,0x00,
+0x00,0x00,0x28,0x21,0x14,0xa0,0x00,0x03,0x00,0x00,0x38,0x21,0x13,0xc0,0x00,0x03,
+0x38,0xf6,0x00,0x01,0x24,0x07,0x00,0x01,0x38,0xf6,0x00,0x01,0x01,0x73,0x10,0x21,
+0x00,0x02,0x30,0x80,0x27,0x83,0x90,0x10,0x00,0xc3,0x48,0x21,0x91,0x25,0x00,0x00,
+0x8f,0xa4,0x00,0x20,0x2c,0xa3,0x00,0x04,0x00,0x04,0x11,0xc3,0x30,0x42,0x00,0x01,
+0x00,0x03,0xb0,0x0b,0x12,0xc0,0x00,0xd8,0xaf,0xa2,0x00,0x24,0x93,0x90,0xbb,0xea,
+0x00,0x0a,0x16,0x42,0x30,0x52,0x00,0x3f,0x2e,0x06,0x00,0x0c,0x10,0xc0,0x00,0xc0,
+0x00,0xa0,0x20,0x21,0x2c,0xa2,0x00,0x10,0x14,0x40,0x00,0x04,0x00,0x90,0x10,0x2b,
+0x30,0xa2,0x00,0x07,0x24,0x44,0x00,0x04,0x00,0x90,0x10,0x2b,0x10,0x40,0x00,0x0b,
+0x01,0x73,0x10,0x21,0x27,0x85,0xbb,0x1c,0x00,0x10,0x10,0x40,0x00,0x50,0x10,0x21,
+0x00,0x45,0x10,0x21,0x90,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x18,0x2b,
+0x14,0x60,0xff,0xfa,0x00,0x10,0x10,0x40,0x01,0x73,0x10,0x21,0x00,0x02,0x10,0x80,
+0x27,0x83,0x90,0x08,0x00,0x43,0x10,0x21,0x31,0xa4,0x00,0x01,0x10,0x80,0x00,0xa5,
+0xa0,0x50,0x00,0x07,0x3c,0x04,0xb0,0x05,0x34,0x84,0x00,0x08,0x24,0x02,0x00,0x01,
+0x3c,0x03,0x80,0x00,0xa1,0xe2,0x00,0x4e,0xac,0x83,0x00,0x00,0x8c,0x85,0x00,0x00,
+0x3c,0x02,0x00,0xf0,0x3c,0x03,0x40,0xf0,0x34,0x42,0xf0,0x00,0x34,0x63,0xf0,0x00,
+0x24,0x17,0x00,0x0e,0x24,0x13,0x01,0x06,0xac,0x82,0x00,0x00,0xac,0x83,0x00,0x00,
+0x27,0x82,0xb4,0x00,0x01,0x82,0x10,0x21,0x8c,0x43,0x00,0x00,0x24,0x05,0x00,0x01,
+0xaf,0xa5,0x00,0x1c,0x90,0x62,0x00,0x16,0x00,0x13,0xa8,0xc0,0x32,0x51,0x00,0x02,
+0x34,0x42,0x00,0x04,0xa0,0x62,0x00,0x16,0x8f,0xa3,0x00,0x20,0x8f,0xa4,0x00,0x18,
+0x00,0x03,0x13,0x43,0x00,0x04,0x1a,0x02,0x30,0x47,0x00,0x01,0x12,0x20,0x00,0x04,
+0x30,0x64,0x07,0xff,0x2e,0x03,0x00,0x04,0x32,0x42,0x00,0x33,0x00,0x43,0x90,0x0b,
+0x8f,0xa5,0x00,0x24,0x8f,0xa6,0x00,0x1c,0x00,0x12,0x10,0x40,0x00,0x05,0x19,0xc0,
+0x00,0x47,0x10,0x21,0x00,0x06,0x2a,0x80,0x00,0x43,0x10,0x21,0x00,0x10,0x32,0x00,
+0x00,0x04,0x24,0x80,0x02,0x65,0x28,0x21,0x00,0xa4,0x28,0x21,0x00,0x46,0x10,0x21,
+0x00,0x17,0x1c,0x00,0x3c,0x04,0xc0,0x00,0x00,0x43,0x30,0x21,0x16,0x80,0x00,0x29,
+0x00,0xa4,0x28,0x21,0x3c,0x02,0xb0,0x05,0x34,0x42,0x04,0x00,0x3c,0x03,0xb0,0x05,
+0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x04,0x34,0x84,0x02,0x28,
+0x24,0x02,0x00,0x01,0xac,0x65,0x00,0x00,0xa0,0x82,0x00,0x00,0x3c,0x02,0xb0,0x09,
+0x34,0x42,0x01,0x46,0x90,0x44,0x00,0x00,0x91,0xe3,0x00,0x09,0x30,0x86,0x00,0x01,
+0x02,0x83,0x18,0x26,0x00,0x03,0x30,0x0b,0x14,0xc0,0x00,0x03,0x00,0x00,0x28,0x21,
+0x13,0xc0,0x00,0x03,0x02,0xb3,0x10,0x21,0x24,0x05,0x00,0x01,0x02,0xb3,0x10,0x21,
+0x27,0x83,0x90,0x08,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x84,0x48,0x00,0x04,
+0x00,0xa0,0x30,0x21,0x00,0xe0,0x20,0x21,0x02,0x80,0x28,0x21,0x02,0xc0,0x38,0x21,
+0x0c,0x00,0x00,0x70,0xaf,0xa8,0x00,0x10,0x7b,0xbe,0x02,0xbc,0x7b,0xb6,0x02,0x7c,
+0x7b,0xb4,0x02,0x3c,0x7b,0xb2,0x01,0xfc,0x7b,0xb0,0x01,0xbc,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x58,0x24,0x02,0x00,0x01,0x12,0x82,0x00,0x3d,0x3c,0x02,0xb0,0x05,
+0x24,0x02,0x00,0x02,0x12,0x82,0x00,0x31,0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x03,
+0x12,0x82,0x00,0x25,0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x10,0x12,0x82,0x00,0x19,
+0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x11,0x12,0x82,0x00,0x0d,0x3c,0x02,0xb0,0x05,
+0x24,0x02,0x00,0x12,0x16,0x82,0xff,0xd1,0x3c,0x02,0xb0,0x05,0x3c,0x03,0xb0,0x05,
+0x34,0x42,0x04,0x20,0x3c,0x04,0xb0,0x05,0x34,0x63,0x04,0x24,0xac,0x46,0x00,0x00,
+0x34,0x84,0x02,0x28,0xac,0x65,0x00,0x00,0x08,0x00,0x07,0xe6,0x24,0x02,0x00,0x20,
+0x34,0x42,0x04,0x40,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,
+0x34,0x63,0x04,0x44,0x34,0x84,0x02,0x28,0x24,0x02,0x00,0x40,0x08,0x00,0x07,0xe6,
+0xac,0x65,0x00,0x00,0x34,0x42,0x04,0x28,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,
+0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x2c,0x34,0x84,0x02,0x28,0x24,0x02,0xff,0x80,
+0x08,0x00,0x07,0xe6,0xac,0x65,0x00,0x00,0x34,0x42,0x04,0x18,0x3c,0x03,0xb0,0x05,
+0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x1c,0x34,0x84,0x02,0x28,
+0x24,0x02,0x00,0x08,0x08,0x00,0x07,0xe6,0xac,0x65,0x00,0x00,0x34,0x42,0x04,0x10,
+0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x14,
+0x34,0x84,0x02,0x28,0x24,0x02,0x00,0x04,0x08,0x00,0x07,0xe6,0xac,0x65,0x00,0x00,
+0x34,0x42,0x04,0x08,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,
+0x34,0x63,0x04,0x0c,0x34,0x84,0x02,0x28,0x24,0x02,0x00,0x02,0x08,0x00,0x07,0xe6,
+0xac,0x65,0x00,0x00,0x24,0x17,0x00,0x14,0x08,0x00,0x07,0xb8,0x24,0x13,0x01,0x02,
+0x30,0xa2,0x00,0x07,0x24,0x44,0x00,0x0c,0x00,0x90,0x18,0x2b,0x10,0x60,0x00,0x0c,
+0x26,0x02,0x00,0x04,0x27,0x85,0xbb,0x1c,0x00,0x10,0x10,0x40,0x00,0x50,0x10,0x21,
+0x00,0x45,0x10,0x21,0x90,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x18,0x2b,
+0x14,0x60,0xff,0xfa,0x00,0x10,0x10,0x40,0x2e,0x06,0x00,0x0c,0x26,0x02,0x00,0x04,
+0x08,0x00,0x07,0xa2,0x00,0x46,0x80,0x0a,0x27,0x82,0xb4,0x00,0x01,0x82,0x20,0x21,
+0x8c,0x87,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0xe2,0x00,0x19,0x00,0x00,0x00,0x00,
+0x14,0x40,0x00,0x07,0x00,0x00,0x00,0x00,0x27,0x82,0x90,0x20,0x00,0xc2,0x10,0x21,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x60,0x00,0x14,0x00,0x00,0x00,0x00,
+0x90,0xe3,0x00,0x16,0x27,0x82,0x90,0x08,0x00,0xc2,0x10,0x21,0x34,0x63,0x00,0x20,
+0x90,0x50,0x00,0x07,0xa0,0xe3,0x00,0x16,0x8c,0x84,0x00,0x00,0x00,0x0a,0x1e,0x42,
+0x24,0x06,0x00,0x01,0x90,0x82,0x00,0x16,0x30,0x71,0x00,0x02,0x30,0x72,0x00,0x3f,
+0x30,0x42,0x00,0xfb,0x24,0x17,0x00,0x18,0x24,0x13,0x01,0x03,0x24,0x15,0x08,0x18,
+0xaf,0xa6,0x00,0x1c,0x08,0x00,0x07,0xc2,0xa0,0x82,0x00,0x16,0x8d,0x02,0x00,0x04,
+0x00,0x0a,0x1c,0x42,0x30,0x42,0x00,0x10,0x14,0x40,0x00,0x15,0x30,0x72,0x00,0x3f,
+0x81,0x22,0x00,0x05,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x11,0x30,0x72,0x00,0x3e,
+0x27,0x83,0x90,0x18,0x00,0xc3,0x18,0x21,0x80,0x64,0x00,0x00,0x27,0x83,0xb5,0x78,
+0x00,0x04,0x11,0x00,0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x23,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x05,0x90,0x43,0x00,0x04,
+0x00,0x00,0x00,0x00,0x00,0x64,0x18,0x24,0x30,0x63,0x00,0x01,0x02,0x43,0x90,0x25,
+0x27,0x85,0xb4,0x00,0x01,0x85,0x28,0x21,0x8c,0xa6,0x00,0x00,0x01,0x73,0x10,0x21,
+0x27,0x83,0x90,0x10,0x90,0xc4,0x00,0x16,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,
+0x30,0x84,0x00,0xdf,0x90,0x50,0x00,0x00,0xa0,0xc4,0x00,0x16,0x80,0xc6,0x00,0x12,
+0x8c,0xa3,0x00,0x00,0x2d,0xc4,0x00,0x02,0xaf,0xa6,0x00,0x1c,0x90,0x62,0x00,0x16,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfb,0x14,0x80,0x00,0x06,0xa0,0x62,0x00,0x16,
+0x24,0x02,0x00,0x06,0x11,0xc2,0x00,0x03,0x24,0x02,0x00,0x04,0x15,0xc2,0xff,0x0e,
+0x32,0x51,0x00,0x02,0x32,0x51,0x00,0x02,0x2e,0x02,0x00,0x0c,0x14,0x40,0x00,0x0f,
+0x00,0x11,0x18,0x2b,0x32,0x02,0x00,0x0f,0x34,0x42,0x00,0x10,0x00,0x03,0x19,0x00,
+0x00,0x43,0x18,0x21,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xe0,0xa0,0x43,0x00,0x00,
+0x00,0x00,0x20,0x21,0x02,0x00,0x28,0x21,0x0c,0x00,0x02,0x03,0xaf,0xaf,0x00,0x28,
+0x8f,0xaf,0x00,0x28,0x08,0x00,0x07,0xc2,0x00,0x00,0x00,0x00,0x08,0x00,0x08,0xbd,
+0x32,0x03,0x00,0xff,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x42,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x0f,0x14,0x40,0xfe,0xaa,0x00,0x00,0x00,0x00,
+0x91,0xe2,0x00,0x09,0x00,0x00,0x00,0x00,0x02,0x82,0x10,0x26,0x08,0x00,0x07,0x79,
+0x00,0x02,0x28,0x0b,0x08,0x00,0x07,0x7f,0x00,0x00,0xb0,0x21,0x24,0x02,0x00,0x10,
+0x10,0xc2,0x00,0x08,0x24,0x02,0x00,0x11,0x10,0xc2,0xfe,0x7d,0x00,0x07,0x17,0x83,
+0x24,0x02,0x00,0x12,0x14,0xc2,0xfe,0x7b,0x00,0x07,0x17,0x43,0x08,0x00,0x07,0x59,
+0x30,0x5e,0x00,0x01,0x08,0x00,0x07,0x59,0x00,0x07,0xf7,0xc2,0x00,0x04,0x10,0x40,
+0x27,0x83,0x80,0x1c,0x00,0x43,0x10,0x21,0x00,0x80,0x40,0x21,0x94,0x44,0x00,0x00,
+0x2d,0x07,0x00,0x04,0x24,0xc2,0x00,0x03,0x00,0x47,0x30,0x0a,0x00,0x86,0x00,0x18,
+0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x23,0x8c,
+0xac,0x62,0x00,0x00,0x2d,0x06,0x00,0x10,0x00,0x00,0x20,0x12,0x00,0x04,0x22,0x42,
+0x24,0x84,0x00,0x01,0x24,0x83,0x00,0xc0,0x10,0xe0,0x00,0x0b,0x24,0x82,0x00,0x60,
+0x00,0x40,0x20,0x21,0x00,0x65,0x20,0x0a,0x3c,0x03,0xb0,0x03,0x34,0x63,0x01,0x00,
+0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x00,0x44,0x20,0x04,
+0x03,0xe0,0x00,0x08,0x00,0x80,0x10,0x21,0x24,0x85,0x00,0x28,0x24,0x83,0x00,0x24,
+0x31,0x02,0x00,0x08,0x14,0xc0,0xff,0xf4,0x24,0x84,0x00,0x14,0x00,0x60,0x20,0x21,
+0x08,0x00,0x08,0xfa,0x00,0xa2,0x20,0x0b,0x27,0xbd,0xff,0xe0,0x3c,0x03,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0xaf,0xb0,0x00,0x10,0x24,0x42,0x24,0x28,0x00,0x80,0x80,0x21,
+0x34,0x63,0x00,0x20,0x3c,0x04,0xb0,0x03,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,
+0xaf,0xbf,0x00,0x1c,0x83,0xb1,0x00,0x33,0x83,0xa8,0x00,0x37,0x34,0x84,0x01,0x10,
+0xac,0x62,0x00,0x00,0x2e,0x02,0x00,0x10,0x00,0xe0,0x90,0x21,0x8c,0x87,0x00,0x00,
+0x14,0x40,0x00,0x0c,0x2e,0x02,0x00,0x0c,0x3c,0x02,0x00,0x0f,0x34,0x42,0xf0,0x00,
+0x00,0xe2,0x10,0x24,0x14,0x40,0x00,0x37,0x32,0x02,0x00,0x08,0x32,0x02,0x00,0x07,
+0x27,0x83,0x80,0xcc,0x00,0x43,0x10,0x21,0x90,0x50,0x00,0x00,0x00,0x00,0x00,0x00,
+0x2e,0x02,0x00,0x0c,0x14,0x40,0x00,0x03,0x02,0x00,0x20,0x21,0x32,0x02,0x00,0x0f,
+0x24,0x44,0x00,0x0c,0x00,0x87,0x10,0x06,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0x07,
+0x2c,0x82,0x00,0x0c,0x00,0x04,0x10,0x80,0x27,0x83,0xb4,0x50,0x00,0x43,0x10,0x21,
+0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0x82,0x00,0x0c,0x14,0x40,0x00,0x05,
+0x00,0x05,0x10,0x40,0x00,0x46,0x10,0x21,0x00,0x02,0x11,0x00,0x00,0x82,0x10,0x21,
+0x24,0x44,0x00,0x04,0x15,0x00,0x00,0x02,0x24,0x06,0x00,0x20,0x24,0x06,0x00,0x0e,
+0x0c,0x00,0x08,0xe3,0x00,0x00,0x00,0x00,0x00,0x40,0x30,0x21,0x3c,0x02,0xb0,0x03,
+0x34,0x42,0x01,0x00,0x90,0x43,0x00,0x00,0x2e,0x04,0x00,0x04,0x24,0x02,0x00,0x10,
+0x24,0x05,0x00,0x0a,0x00,0x44,0x28,0x0a,0x30,0x63,0x00,0x01,0x14,0x60,0x00,0x02,
+0x00,0x05,0x10,0x40,0x00,0xa0,0x10,0x21,0x30,0x45,0x00,0xff,0x00,0xc5,0x10,0x21,
+0x24,0x46,0x00,0x46,0x02,0x26,0x18,0x04,0xa6,0x43,0x00,0x00,0x8f,0xbf,0x00,0x1c,
+0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x00,0xc0,0x10,0x21,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x20,0x10,0x40,0xff,0xcf,0x2e,0x02,0x00,0x0c,0x32,0x02,0x00,0x07,
+0x27,0x83,0x80,0xc4,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x00,0x08,0x00,0x09,0x28,
+0x02,0x04,0x80,0x23,0x27,0xbd,0xff,0xb8,0x00,0x05,0x38,0x80,0x27,0x82,0xb4,0x00,
+0xaf,0xbe,0x00,0x40,0xaf,0xb6,0x00,0x38,0xaf,0xb3,0x00,0x2c,0xaf,0xbf,0x00,0x44,
+0xaf,0xb7,0x00,0x3c,0xaf,0xb5,0x00,0x34,0xaf,0xb4,0x00,0x30,0xaf,0xb2,0x00,0x28,
+0xaf,0xb1,0x00,0x24,0xaf,0xb0,0x00,0x20,0x00,0xe2,0x38,0x21,0x8c,0xe6,0x00,0x00,
+0xaf,0xa5,0x00,0x4c,0x3c,0x02,0x80,0x00,0x3c,0x05,0xb0,0x03,0x34,0xa5,0x00,0x20,
+0x24,0x42,0x25,0x84,0x24,0x03,0x00,0x01,0xac,0xa2,0x00,0x00,0xa0,0xc3,0x00,0x12,
+0x8c,0xe5,0x00,0x00,0x94,0xc3,0x00,0x06,0x90,0xa2,0x00,0x16,0xa4,0xc3,0x00,0x14,
+0x27,0x83,0x90,0x00,0x34,0x42,0x00,0x08,0xa0,0xa2,0x00,0x16,0x8c,0xe8,0x00,0x00,
+0xaf,0xa4,0x00,0x48,0x27,0x82,0x90,0x04,0x95,0x11,0x00,0x14,0x00,0x00,0x00,0x00,
+0x00,0x11,0x98,0xc0,0x02,0x71,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x82,0x10,0x21,
+0x8c,0x52,0x00,0x18,0x00,0x83,0x18,0x21,0x84,0x75,0x00,0x06,0x8e,0x45,0x00,0x08,
+0x8e,0x46,0x00,0x04,0x8e,0x47,0x00,0x04,0x00,0x05,0x1c,0x82,0x00,0x06,0x31,0x42,
+0x27,0x82,0x90,0x10,0x30,0x63,0x00,0x01,0x30,0xc6,0x00,0x01,0x00,0x82,0x20,0x21,
+0xa5,0x15,0x00,0x1a,0x00,0x05,0x14,0x42,0xaf,0xa3,0x00,0x18,0xaf,0xa6,0x00,0x1c,
+0x30,0xe7,0x00,0x10,0x30,0x56,0x00,0x01,0x80,0x97,0x00,0x06,0x14,0xe0,0x00,0x47,
+0x00,0x05,0xf7,0xc2,0x80,0x82,0x00,0x05,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x44,
+0x02,0x71,0x10,0x21,0x93,0x90,0xbb,0xe9,0x00,0x00,0x00,0x00,0x2e,0x02,0x00,0x0c,
+0x14,0x40,0x00,0x06,0x02,0x00,0x20,0x21,0x00,0x16,0x10,0x40,0x00,0x43,0x10,0x21,
+0x00,0x02,0x11,0x00,0x02,0x02,0x10,0x21,0x24,0x44,0x00,0x04,0x02,0x71,0x10,0x21,
+0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0x00,0x80,0x80,0x21,
+0xa0,0x44,0x00,0x03,0xa0,0x44,0x00,0x00,0x02,0x00,0x20,0x21,0x02,0xc0,0x28,0x21,
+0x0c,0x00,0x08,0xe3,0x02,0xa0,0x30,0x21,0x02,0x71,0x18,0x21,0x00,0x03,0x88,0x80,
+0x00,0x40,0xa0,0x21,0x27,0x82,0x90,0x20,0x02,0x22,0x10,0x21,0x8c,0x44,0x00,0x00,
+0x26,0xe3,0x00,0x02,0x00,0x03,0x17,0xc2,0x00,0x62,0x18,0x21,0x00,0x04,0x25,0xc2,
+0x00,0x03,0x18,0x43,0x30,0x84,0x00,0x01,0x00,0x03,0x18,0x40,0x03,0xc4,0x20,0x24,
+0x14,0x80,0x00,0x15,0x02,0x43,0x38,0x21,0x3c,0x08,0xb0,0x03,0x35,0x08,0x00,0x28,
+0x8d,0x03,0x00,0x00,0x8f,0xa6,0x00,0x4c,0x8f,0xa4,0x00,0x48,0x27,0x82,0x90,0x08,
+0x02,0x22,0x10,0x21,0x24,0x63,0x00,0x01,0x02,0xa0,0x28,0x21,0xa4,0x54,0x00,0x04,
+0x00,0xc0,0x38,0x21,0x0c,0x00,0x07,0x2f,0xad,0x03,0x00,0x00,0x7b,0xbe,0x02,0x3c,
+0x7b,0xb6,0x01,0xfc,0x7b,0xb4,0x01,0xbc,0x7b,0xb2,0x01,0x7c,0x7b,0xb0,0x01,0x3c,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x48,0x8f,0xa2,0x00,0x1c,0x8f,0xa6,0x00,0x18,
+0x02,0x00,0x20,0x21,0x02,0xc0,0x28,0x21,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0x0a,
+0xaf,0xa0,0x00,0x14,0x08,0x00,0x09,0xc6,0x02,0x82,0xa0,0x21,0x02,0x71,0x10,0x21,
+0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0x90,0x50,0x00,0x00,
+0x08,0x00,0x09,0xb2,0xa0,0x50,0x00,0x03,0x27,0xbd,0xff,0xb8,0xaf,0xb1,0x00,0x24,
+0x8f,0xb1,0x00,0x5c,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,
+0x24,0x42,0x27,0xa8,0xaf,0xbe,0x00,0x40,0xaf,0xb7,0x00,0x3c,0xaf,0xb6,0x00,0x38,
+0xaf,0xb5,0x00,0x34,0xaf,0xb4,0x00,0x30,0xaf,0xa5,0x00,0x4c,0x8f,0xb5,0x00,0x58,
+0xaf,0xbf,0x00,0x44,0xaf,0xb3,0x00,0x2c,0xaf,0xb2,0x00,0x28,0xaf,0xb0,0x00,0x20,
+0x00,0xe0,0xb0,0x21,0xac,0x62,0x00,0x00,0x00,0x80,0xf0,0x21,0x00,0x00,0xb8,0x21,
+0x16,0x20,0x00,0x2b,0x00,0x00,0xa0,0x21,0x27,0x85,0xb4,0x00,0x00,0x07,0x10,0x80,
+0x00,0x45,0x10,0x21,0x8c,0x53,0x00,0x00,0x00,0x15,0x18,0x80,0x00,0x65,0x18,0x21,
+0x92,0x62,0x00,0x16,0x8c,0x72,0x00,0x00,0x30,0x42,0x00,0x03,0x14,0x40,0x00,0x2d,
+0x00,0x00,0x00,0x00,0x92,0x42,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x03,
+0x14,0x40,0x00,0x28,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x34,0x00,0x00,0x00,0x00,
+0x14,0x40,0x00,0x18,0x02,0x20,0x10,0x21,0x8c,0x82,0x00,0x38,0x00,0x00,0x00,0x00,
+0x14,0x40,0x00,0x14,0x02,0x20,0x10,0x21,0x8c,0x82,0x00,0x3c,0x00,0x00,0x00,0x00,
+0x14,0x40,0x00,0x0f,0x3c,0x03,0xb0,0x09,0x3c,0x05,0xb0,0x05,0x34,0x63,0x01,0x44,
+0x34,0xa5,0x02,0x52,0x94,0x66,0x00,0x00,0x90,0xa2,0x00,0x00,0x8f,0xa3,0x00,0x4c,
+0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x06,0x30,0x42,0x00,0x01,0x10,0x40,0x00,0x04,
+0x30,0xc6,0xff,0xff,0x2c,0xc2,0x00,0x41,0x10,0x40,0x00,0x09,0x24,0x05,0x00,0x14,
+0x02,0x20,0x10,0x21,0x7b,0xbe,0x02,0x3c,0x7b,0xb6,0x01,0xfc,0x7b,0xb4,0x01,0xbc,
+0x7b,0xb2,0x01,0x7c,0x7b,0xb0,0x01,0x3c,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x48,
+0x0c,0x00,0x07,0x0a,0x24,0x06,0x01,0x07,0x24,0x02,0x00,0x01,0x08,0x00,0x0a,0x2c,
+0xa3,0xc2,0x00,0x11,0x10,0xc0,0x00,0x1c,0x24,0x02,0x00,0x01,0x10,0xc2,0x00,0x17,
+0x00,0xc0,0x88,0x21,0x96,0x54,0x00,0x1a,0x02,0xa0,0xb8,0x21,0x12,0x20,0xff,0xed,
+0x02,0x20,0x10,0x21,0x27,0x83,0xb4,0x00,0x00,0x17,0x10,0x80,0x00,0x43,0x10,0x21,
+0x8c,0x44,0x00,0x00,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x28,0x80,0x86,0x00,0x12,
+0x8c,0x62,0x00,0x00,0x00,0x14,0x2c,0x00,0x00,0x05,0x2c,0x03,0x00,0x46,0x10,0x21,
+0x8f,0xa6,0x00,0x4c,0x02,0xe0,0x38,0x21,0x03,0xc0,0x20,0x21,0x0c,0x00,0x07,0x2f,
+0xac,0x62,0x00,0x00,0x08,0x00,0x0a,0x2c,0xaf,0xd1,0x00,0x40,0x96,0x74,0x00,0x1a,
+0x08,0x00,0x0a,0x3f,0x02,0xc0,0xb8,0x21,0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,
+0x8c,0x50,0x00,0x00,0x02,0x60,0x20,0x21,0x0c,0x00,0x1e,0xf3,0x02,0x00,0x28,0x21,
+0x30,0x42,0x00,0xff,0x02,0x00,0x28,0x21,0x02,0x40,0x20,0x21,0x0c,0x00,0x1e,0xf3,
+0xaf,0xa2,0x00,0x18,0x8f,0xa4,0x00,0x18,0x00,0x00,0x00,0x00,0x10,0x80,0x00,0xed,
+0x30,0x50,0x00,0xff,0x12,0x00,0x00,0x18,0x24,0x11,0x00,0x01,0x96,0x63,0x00,0x14,
+0x96,0x44,0x00,0x14,0x27,0x85,0x90,0x00,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x45,0x10,0x21,0x00,0x04,0x18,0xc0,0x8c,0x46,0x00,0x08,
+0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x65,0x18,0x21,0x00,0x06,0x17,0x02,
+0x24,0x04,0x00,0xff,0x8c,0x63,0x00,0x08,0x10,0x44,0x00,0xd6,0x00,0x03,0x17,0x02,
+0x10,0x44,0x00,0xd5,0x3c,0x02,0x80,0x00,0x00,0x66,0x18,0x2b,0x24,0x11,0x00,0x02,
+0x24,0x02,0x00,0x01,0x00,0x43,0x88,0x0a,0x24,0x02,0x00,0x01,0x12,0x22,0x00,0x5a,
+0x24,0x02,0x00,0x02,0x16,0x22,0xff,0xbd,0x00,0x00,0x00,0x00,0x96,0x49,0x00,0x14,
+0x27,0x82,0x90,0x04,0x02,0xa0,0xb8,0x21,0x00,0x09,0x50,0xc0,0x01,0x49,0x18,0x21,
+0x00,0x03,0x40,0x80,0x01,0x02,0x10,0x21,0x8c,0x43,0x00,0x18,0x00,0x00,0x00,0x00,
+0x8c,0x65,0x00,0x08,0x8c,0x62,0x00,0x0c,0x8c,0x62,0x00,0x04,0x00,0x05,0x24,0x42,
+0x00,0x05,0x1c,0x82,0x30,0x42,0x00,0x10,0x30,0x66,0x00,0x01,0x14,0x40,0x00,0x41,
+0x30,0x87,0x00,0x01,0x27,0x82,0x90,0x18,0x01,0x02,0x10,0x21,0x80,0x44,0x00,0x00,
+0x27,0x82,0xb5,0x78,0x00,0x04,0x19,0x00,0x00,0x64,0x18,0x23,0x00,0x03,0x18,0x80,
+0x00,0x64,0x18,0x23,0x00,0x03,0x18,0x80,0x00,0x62,0x10,0x21,0x90,0x45,0x00,0x05,
+0x27,0x84,0xb4,0xa0,0x00,0x64,0x18,0x21,0x90,0x63,0x00,0x00,0x10,0xa0,0x00,0x2b,
+0x2c,0x64,0x00,0x0c,0x14,0x80,0x00,0x04,0x00,0x60,0x10,0x21,0x00,0x06,0x11,0x00,
+0x00,0x62,0x10,0x21,0x24,0x42,0x00,0x24,0x3c,0x01,0xb0,0x03,0xa0,0x22,0x00,0xe1,
+0x14,0x80,0x00,0x06,0x00,0x60,0x28,0x21,0x00,0x07,0x10,0x40,0x00,0x46,0x10,0x21,
+0x00,0x02,0x11,0x00,0x00,0x62,0x10,0x21,0x24,0x45,0x00,0x04,0x01,0x49,0x10,0x21,
+0x27,0x83,0x90,0x10,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0xa0,0x18,0x21,
+0xa0,0x45,0x00,0x03,0xa0,0x45,0x00,0x00,0x24,0x02,0x00,0x08,0x12,0x02,0x00,0x0b,
+0x24,0x02,0x00,0x01,0x00,0x60,0x28,0x21,0x02,0x40,0x20,0x21,0x0c,0x00,0x1f,0x6f,
+0xaf,0xa2,0x00,0x10,0x30,0x54,0xff,0xff,0x92,0x42,0x00,0x16,0x00,0x00,0x00,0x00,
+0x02,0x02,0x10,0x25,0x08,0x00,0x0a,0x3f,0xa2,0x42,0x00,0x16,0x00,0x60,0x28,0x21,
+0x02,0x40,0x20,0x21,0x0c,0x00,0x1f,0x20,0xaf,0xa0,0x00,0x10,0x08,0x00,0x0a,0xc2,
+0x30,0x54,0xff,0xff,0x08,0x00,0x0a,0xaa,0x00,0x60,0x10,0x21,0x14,0x80,0xff,0xfd,
+0x00,0x00,0x00,0x00,0x00,0x06,0x11,0x00,0x00,0x62,0x10,0x21,0x08,0x00,0x0a,0xaa,
+0x24,0x42,0x00,0x04,0x27,0x82,0x90,0x10,0x01,0x02,0x10,0x21,0x90,0x43,0x00,0x00,
+0x08,0x00,0x0a,0xba,0xa0,0x43,0x00,0x03,0x96,0x69,0x00,0x14,0x02,0xc0,0xb8,0x21,
+0x24,0x0b,0x00,0x01,0x00,0x09,0x10,0xc0,0x00,0x49,0x18,0x21,0x00,0x03,0x40,0x80,
+0x00,0x40,0x50,0x21,0x27,0x82,0x90,0x04,0x01,0x02,0x10,0x21,0x8c,0x43,0x00,0x18,
+0x00,0x00,0x00,0x00,0x8c,0x65,0x00,0x08,0x8c,0x62,0x00,0x0c,0x8c,0x62,0x00,0x04,
+0x00,0x05,0x24,0x42,0x00,0x05,0x1c,0x82,0x30,0x42,0x00,0x10,0x30,0x66,0x00,0x01,
+0x10,0x40,0x00,0x0d,0x30,0x87,0x00,0x01,0x27,0x82,0x90,0x18,0x01,0x02,0x10,0x21,
+0x80,0x43,0x00,0x00,0x00,0x00,0x58,0x21,0x00,0x03,0x11,0x00,0x00,0x43,0x10,0x23,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x23,0x00,0x02,0x10,0x80,0x27,0x83,0xb5,0x70,
+0x00,0x43,0x10,0x21,0xa0,0x40,0x00,0x04,0x11,0x60,0x00,0x4f,0x00,0x00,0x00,0x00,
+0x01,0x49,0x10,0x21,0x00,0x02,0x20,0x80,0x27,0x85,0x90,0x10,0x00,0x85,0x10,0x21,
+0x80,0x43,0x00,0x05,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x42,0x01,0x49,0x10,0x21,
+0x27,0x82,0x90,0x18,0x00,0x82,0x10,0x21,0x80,0x44,0x00,0x00,0x27,0x82,0xb5,0x78,
+0x00,0x04,0x19,0x00,0x00,0x64,0x18,0x23,0x00,0x03,0x18,0x80,0x00,0x64,0x18,0x23,
+0x00,0x03,0x18,0x80,0x00,0x62,0x10,0x21,0x90,0x45,0x00,0x05,0x27,0x84,0xb4,0xa0,
+0x00,0x64,0x18,0x21,0x90,0x63,0x00,0x00,0x10,0xa0,0x00,0x2c,0x2c,0x64,0x00,0x0c,
+0x14,0x80,0x00,0x04,0x00,0x60,0x10,0x21,0x00,0x06,0x11,0x00,0x00,0x62,0x10,0x21,
+0x24,0x42,0x00,0x24,0x3c,0x01,0xb0,0x03,0xa0,0x22,0x00,0xe1,0x14,0x80,0x00,0x06,
+0x00,0x60,0x28,0x21,0x00,0x07,0x10,0x40,0x00,0x46,0x10,0x21,0x00,0x02,0x11,0x00,
+0x00,0x62,0x10,0x21,0x24,0x45,0x00,0x04,0x01,0x49,0x10,0x21,0x27,0x83,0x90,0x10,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0xa0,0x18,0x21,0xa0,0x45,0x00,0x03,
+0xa0,0x45,0x00,0x00,0x8f,0xa4,0x00,0x18,0x24,0x02,0x00,0x08,0x10,0x82,0x00,0x0c,
+0x00,0x60,0x28,0x21,0x24,0x02,0x00,0x01,0x02,0x60,0x20,0x21,0x0c,0x00,0x1f,0x6f,
+0xaf,0xa2,0x00,0x10,0x8f,0xa3,0x00,0x18,0x30,0x54,0xff,0xff,0x92,0x62,0x00,0x16,
+0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x25,0x08,0x00,0x0a,0x3f,0xa2,0x62,0x00,0x16,
+0x02,0x60,0x20,0x21,0x0c,0x00,0x1f,0x20,0xaf,0xa0,0x00,0x10,0x08,0x00,0x0b,0x31,
+0x00,0x00,0x00,0x00,0x08,0x00,0x0b,0x19,0x00,0x60,0x10,0x21,0x14,0x80,0xff,0xfd,
+0x00,0x00,0x00,0x00,0x00,0x06,0x11,0x00,0x00,0x62,0x10,0x21,0x08,0x00,0x0b,0x19,
+0x24,0x42,0x00,0x04,0x00,0x02,0x10,0x80,0x00,0x45,0x10,0x21,0x90,0x43,0x00,0x00,
+0x08,0x00,0x0b,0x29,0xa0,0x43,0x00,0x03,0x27,0x85,0x90,0x10,0x08,0x00,0x0b,0x45,
+0x01,0x49,0x10,0x21,0x3c,0x02,0x80,0x00,0x00,0x62,0x18,0x26,0x08,0x00,0x0a,0x7a,
+0x00,0xc2,0x30,0x26,0x12,0x00,0xff,0x2d,0x24,0x02,0x00,0x01,0x08,0x00,0x0a,0x7f,
+0x24,0x11,0x00,0x02,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xd0,
+0x24,0x42,0x2d,0x54,0x34,0x63,0x00,0x20,0x3c,0x05,0xb0,0x05,0xaf,0xb3,0x00,0x24,
+0xaf,0xb2,0x00,0x20,0xaf,0xb1,0x00,0x1c,0xaf,0xbf,0x00,0x28,0xaf,0xb0,0x00,0x18,
+0xac,0x62,0x00,0x00,0x34,0xa5,0x02,0x42,0x90,0xa2,0x00,0x00,0x00,0x80,0x90,0x21,
+0x24,0x11,0x00,0x10,0x30,0x53,0x00,0xff,0x24,0x02,0x00,0x10,0x12,0x22,0x00,0xcf,
+0x00,0x00,0x18,0x21,0x24,0x02,0x00,0x11,0x12,0x22,0x00,0xc1,0x24,0x02,0x00,0x12,
+0x12,0x22,0x00,0xb4,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0xad,0xae,0x43,0x00,0x40,
+0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2c,0x8c,0x44,0x00,0x00,0x3c,0x03,0x00,0x02,
+0x34,0x63,0x00,0xff,0x00,0x83,0x80,0x24,0x00,0x10,0x14,0x43,0x10,0x40,0x00,0x05,
+0x00,0x00,0x00,0x00,0x8e,0x42,0x00,0x34,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x92,
+0x00,0x00,0x00,0x00,0x93,0x83,0x8b,0x71,0x00,0x00,0x00,0x00,0x30,0x62,0x00,0x02,
+0x10,0x40,0x00,0x04,0x32,0x10,0x00,0xff,0x00,0x10,0x11,0xc3,0x14,0x40,0x00,0x86,
+0x00,0x00,0x00,0x00,0x16,0x00,0x00,0x15,0x02,0x00,0x10,0x21,0x26,0x22,0x00,0x01,
+0x30,0x51,0x00,0xff,0x2e,0x23,0x00,0x13,0x14,0x60,0xff,0xdb,0x24,0x03,0x00,0x02,
+0x12,0x63,0x00,0x73,0x24,0x02,0x00,0x05,0x2a,0x62,0x00,0x03,0x10,0x40,0x00,0x58,
+0x24,0x02,0x00,0x04,0x24,0x02,0x00,0x01,0x12,0x62,0x00,0x4b,0x02,0x40,0x20,0x21,
+0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2c,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x70,0x00,0xff,0x12,0x00,0x00,0x06,0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x28,
+0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30,
+0x92,0x46,0x00,0x04,0x8e,0x43,0x00,0x24,0x24,0x02,0x00,0x07,0x02,0x40,0x20,0x21,
+0x00,0x00,0x28,0x21,0x24,0x07,0x00,0x06,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea,
+0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x24,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c,
+0x00,0x00,0x00,0x00,0x30,0x50,0x00,0xff,0x16,0x00,0xff,0xec,0x02,0x00,0x10,0x21,
+0x92,0x46,0x00,0x05,0x8e,0x43,0x00,0x28,0x24,0x02,0x00,0x05,0x02,0x40,0x20,0x21,
+0x24,0x05,0x00,0x01,0x24,0x07,0x00,0x04,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea,
+0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x28,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c,
+0x00,0x00,0x00,0x00,0x30,0x50,0x00,0xff,0x16,0x00,0xff,0xdc,0x02,0x00,0x10,0x21,
+0x92,0x46,0x00,0x06,0x8e,0x43,0x00,0x2c,0x24,0x02,0x00,0x03,0x02,0x40,0x20,0x21,
+0x24,0x05,0x00,0x02,0x00,0x00,0x38,0x21,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea,
+0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x2c,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c,
+0x00,0x00,0x00,0x00,0x30,0x50,0x00,0xff,0x16,0x00,0xff,0xcc,0x02,0x00,0x10,0x21,
+0x92,0x46,0x00,0x07,0x8e,0x43,0x00,0x30,0x24,0x02,0x00,0x02,0x02,0x40,0x20,0x21,
+0x24,0x05,0x00,0x03,0x24,0x07,0x00,0x01,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea,
+0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x30,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c,
+0x08,0x00,0x0b,0x9b,0x30,0x42,0x00,0xff,0x92,0x46,0x00,0x04,0x8e,0x43,0x00,0x24,
+0x24,0x02,0x00,0x07,0x00,0x00,0x28,0x21,0x24,0x07,0x00,0x06,0xaf,0xa2,0x00,0x10,
+0x0c,0x00,0x09,0xea,0xaf,0xa3,0x00,0x14,0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x24,
+0x12,0x62,0x00,0x0d,0x24,0x02,0x00,0x03,0x24,0x02,0x00,0x08,0x16,0x62,0xff,0xa8,
+0x02,0x40,0x20,0x21,0x92,0x46,0x00,0x07,0x8e,0x42,0x00,0x30,0x24,0x05,0x00,0x03,
+0x24,0x07,0x00,0x01,0xaf,0xa3,0x00,0x10,0x0c,0x00,0x09,0xea,0xaf,0xa2,0x00,0x14,
+0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x30,0x92,0x46,0x00,0x06,0x8e,0x43,0x00,0x2c,
+0x02,0x40,0x20,0x21,0x24,0x05,0x00,0x02,0x00,0x00,0x38,0x21,0xaf,0xa2,0x00,0x10,
+0x0c,0x00,0x09,0xea,0xaf,0xa3,0x00,0x14,0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x2c,
+0x92,0x46,0x00,0x05,0x8e,0x43,0x00,0x28,0x02,0x40,0x20,0x21,0x24,0x05,0x00,0x01,
+0x24,0x07,0x00,0x04,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea,0xaf,0xa3,0x00,0x14,
+0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x28,0x0c,0x00,0x01,0x57,0x24,0x04,0x00,0x01,
+0x08,0x00,0x0b,0x85,0x00,0x00,0x00,0x00,0x8f,0x84,0xb4,0x40,0xae,0x40,0x00,0x34,
+0x94,0x85,0x00,0x14,0x0c,0x00,0x1b,0x66,0x00,0x00,0x00,0x00,0x93,0x83,0x8b,0x71,
+0x00,0x00,0x00,0x00,0x30,0x62,0x00,0x02,0x10,0x40,0xff,0x69,0x00,0x00,0x00,0x00,
+0x0c,0x00,0x01,0x57,0x00,0x00,0x20,0x21,0x08,0x00,0x0b,0x7d,0x00,0x00,0x00,0x00,
+0x02,0x40,0x20,0x21,0x0c,0x00,0x09,0x61,0x02,0x20,0x28,0x21,0x08,0x00,0x0b,0x71,
+0x3c,0x02,0xb0,0x05,0x8e,0x42,0x00,0x3c,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x4a,
+0x00,0x00,0x00,0x00,0x8f,0x82,0xb4,0x48,0x00,0x00,0x00,0x00,0x90,0x42,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x00,0x02,0x18,0x2b,0x08,0x00,0x0b,0x6e,0xae,0x43,0x00,0x3c,
+0x8e,0x42,0x00,0x38,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x3d,0x24,0x02,0x00,0x12,
+0x8f,0x82,0xb4,0x44,0x00,0x00,0x00,0x00,0x90,0x42,0x00,0x0a,0x00,0x00,0x00,0x00,
+0x00,0x02,0x18,0x2b,0x08,0x00,0x0b,0x6e,0xae,0x43,0x00,0x38,0x8e,0x42,0x00,0x34,
+0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x30,0x24,0x02,0x00,0x11,0x8f,0x82,0xb4,0x40,
+0x00,0x00,0x00,0x00,0x90,0x42,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x02,0x18,0x2b,
+0x08,0x00,0x0b,0x6e,0xae,0x43,0x00,0x34,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,
+0x27,0xbd,0xff,0xe0,0x34,0x63,0x00,0x20,0x24,0x42,0x31,0x08,0x3c,0x08,0xb0,0x03,
+0xaf,0xb1,0x00,0x14,0xac,0x62,0x00,0x00,0x35,0x08,0x01,0x00,0xaf,0xbf,0x00,0x18,
+0xaf,0xb0,0x00,0x10,0x91,0x03,0x00,0x00,0x00,0xa0,0x48,0x21,0x24,0x11,0x00,0x0a,
+0x2c,0xa5,0x00,0x04,0x24,0x02,0x00,0x10,0x00,0x45,0x88,0x0a,0x30,0x63,0x00,0x01,
+0x00,0xc0,0x28,0x21,0x14,0x60,0x00,0x02,0x00,0x11,0x40,0x40,0x02,0x20,0x40,0x21,
+0x84,0x83,0x00,0x0c,0x31,0x11,0x00,0xff,0x01,0x20,0x20,0x21,0x00,0x03,0x10,0xc0,
+0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x08,0x00,0x43,0x10,0x21,
+0x84,0x43,0x00,0x04,0x24,0x06,0x00,0x0e,0x10,0xe0,0x00,0x06,0x02,0x23,0x80,0x21,
+0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x20,0x0c,0x00,0x08,0xe3,0x00,0x00,0x00,0x00,0x02,0x11,0x18,0x21,
+0x08,0x00,0x0c,0x64,0x00,0x62,0x80,0x21,0x27,0xbd,0xff,0xd0,0xaf,0xbf,0x00,0x28,
+0xaf,0xb4,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,0xaf,0xb5,0x00,0x24,
+0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0x84,0x82,0x00,0x0c,0x3c,0x06,0xb0,0x03,
+0x34,0xc6,0x00,0x20,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x80,
+0x27,0x82,0x90,0x04,0x00,0x62,0x10,0x21,0x8c,0x55,0x00,0x18,0x3c,0x02,0x80,0x00,
+0x24,0x42,0x31,0xb8,0xac,0xc2,0x00,0x00,0x8e,0xb0,0x00,0x08,0x27,0x82,0x90,0x08,
+0x00,0x62,0x18,0x21,0x90,0x71,0x00,0x07,0x00,0x10,0x86,0x43,0x32,0x10,0x00,0x01,
+0x00,0xa0,0x38,0x21,0x02,0x00,0x30,0x21,0x00,0xa0,0x98,0x21,0x02,0x20,0x28,0x21,
+0x0c,0x00,0x0c,0x42,0x00,0x80,0x90,0x21,0x02,0x20,0x20,0x21,0x02,0x00,0x28,0x21,
+0x24,0x06,0x00,0x14,0x0c,0x00,0x08,0xe3,0x00,0x40,0xa0,0x21,0x86,0x43,0x00,0x0c,
+0x3c,0x09,0xb0,0x09,0x3c,0x08,0xb0,0x09,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0x80,0x43,0x00,0x06,
+0x3c,0x07,0xb0,0x09,0x3c,0x05,0xb0,0x09,0x28,0x62,0x00,0x00,0x24,0x64,0x00,0x03,
+0x00,0x82,0x18,0x0b,0x00,0x03,0x18,0x83,0x3c,0x02,0xb0,0x09,0x00,0x03,0x18,0x80,
+0x34,0x42,0x01,0x02,0x35,0x29,0x01,0x10,0x35,0x08,0x01,0x14,0x34,0xe7,0x01,0x20,
+0x34,0xa5,0x01,0x24,0xa4,0x54,0x00,0x00,0x12,0x60,0x00,0x11,0x02,0xa3,0xa8,0x21,
+0x8e,0xa2,0x00,0x0c,0x8e,0xa3,0x00,0x08,0x00,0x02,0x14,0x00,0x00,0x03,0x1c,0x02,
+0x00,0x43,0x10,0x21,0xad,0x22,0x00,0x00,0x8e,0xa3,0x00,0x0c,0x00,0x00,0x00,0x00,
+0x00,0x03,0x1c,0x02,0xa5,0x03,0x00,0x00,0x8f,0xbf,0x00,0x28,0x7b,0xb4,0x01,0x3c,
+0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30,
+0x8e,0xa2,0x00,0x04,0x00,0x00,0x00,0x00,0xad,0x22,0x00,0x00,0x8e,0xa4,0x00,0x08,
+0x00,0x00,0x00,0x00,0xa5,0x04,0x00,0x00,0x7a,0xa2,0x00,0x7c,0x00,0x00,0x00,0x00,
+0x00,0x03,0x1c,0x00,0x00,0x02,0x14,0x02,0x00,0x62,0x18,0x21,0xac,0xe3,0x00,0x00,
+0x8e,0xa2,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x02,0x14,0x02,0x08,0x00,0x0c,0xb6,
+0xa4,0xa2,0x00,0x00,0x27,0xbd,0xff,0xe0,0xaf,0xb2,0x00,0x18,0xaf,0xb0,0x00,0x10,
+0xaf,0xbf,0x00,0x1c,0xaf,0xb1,0x00,0x14,0x84,0x82,0x00,0x0c,0x00,0x80,0x90,0x21,
+0x3c,0x05,0xb0,0x03,0x00,0x02,0x20,0xc0,0x00,0x82,0x20,0x21,0x00,0x04,0x20,0x80,
+0x27,0x82,0x90,0x04,0x00,0x82,0x10,0x21,0x8c,0x51,0x00,0x18,0x3c,0x02,0x80,0x00,
+0x34,0xa5,0x00,0x20,0x24,0x42,0x33,0x34,0x27,0x83,0x90,0x08,0xac,0xa2,0x00,0x00,
+0x00,0x83,0x20,0x21,0x3c,0x02,0xb0,0x03,0x90,0x86,0x00,0x07,0x34,0x42,0x01,0x00,
+0x8e,0x23,0x00,0x08,0x90,0x44,0x00,0x00,0x2c,0xc5,0x00,0x04,0x24,0x02,0x00,0x10,
+0x24,0x10,0x00,0x0a,0x00,0x45,0x80,0x0a,0x00,0x03,0x1e,0x43,0x30,0x84,0x00,0x01,
+0x30,0x65,0x00,0x01,0x14,0x80,0x00,0x02,0x00,0x10,0x10,0x40,0x02,0x00,0x10,0x21,
+0x00,0xc0,0x20,0x21,0x24,0x06,0x00,0x20,0x0c,0x00,0x08,0xe3,0x30,0x50,0x00,0xff,
+0x86,0x44,0x00,0x0c,0x27,0x85,0x90,0x10,0x3c,0x06,0xb0,0x09,0x00,0x04,0x18,0xc0,
+0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x65,0x18,0x21,0x80,0x64,0x00,0x06,
+0x00,0x50,0x10,0x21,0x34,0xc6,0x01,0x02,0x24,0x85,0x00,0x03,0x28,0x83,0x00,0x00,
+0x00,0xa3,0x20,0x0b,0x00,0x04,0x20,0x83,0x00,0x04,0x20,0x80,0xa4,0xc2,0x00,0x00,
+0x02,0x24,0x20,0x21,0x8c,0x83,0x00,0x04,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x10,
+0xac,0x43,0x00,0x00,0x8c,0x86,0x00,0x08,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x14,
+0xa4,0x46,0x00,0x00,0x8c,0x85,0x00,0x0c,0x8c,0x82,0x00,0x08,0x3c,0x06,0xb0,0x09,
+0x00,0x05,0x2c,0x00,0x00,0x02,0x14,0x02,0x00,0xa2,0x28,0x21,0x34,0xc6,0x01,0x20,
+0xac,0xc5,0x00,0x00,0x8c,0x83,0x00,0x0c,0x3c,0x05,0xb0,0x09,0x34,0xa5,0x01,0x24,
+0x00,0x03,0x1c,0x02,0xa4,0xa3,0x00,0x00,0x92,0x42,0x00,0x0a,0x3c,0x03,0xb0,0x09,
+0x34,0x63,0x01,0x30,0x00,0x02,0x13,0x00,0x24,0x42,0x00,0x04,0x30,0x42,0xff,0xff,
+0xa4,0x62,0x00,0x00,0x86,0x44,0x00,0x0c,0x27,0x83,0x90,0x18,0x8f,0xbf,0x00,0x1c,
+0x00,0x04,0x10,0xc0,0x00,0x44,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,
+0x94,0x44,0x00,0x02,0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x3c,0x05,0xb0,0x09,
+0x34,0xa5,0x01,0x32,0xa4,0xa4,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,
+0x27,0xbd,0xff,0xe0,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x00,0xaf,0xb0,0x00,0x10,
+0x34,0x42,0x00,0x20,0x00,0xa0,0x80,0x21,0x24,0x63,0x34,0xc0,0x00,0x05,0x2c,0x43,
+0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x18,0xac,0x43,0x00,0x00,0x10,0xa0,0x00,0x05,
+0x00,0x80,0x88,0x21,0x8c,0x82,0x00,0x34,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0xb6,
+0x00,0x00,0x00,0x00,0x32,0x10,0x00,0xff,0x12,0x00,0x00,0x4c,0x00,0x00,0x10,0x21,
+0x24,0x02,0x00,0x08,0x12,0x02,0x00,0xa3,0x2a,0x02,0x00,0x09,0x10,0x40,0x00,0x89,
+0x24,0x02,0x00,0x40,0x24,0x04,0x00,0x02,0x12,0x04,0x00,0x79,0x2a,0x02,0x00,0x03,
+0x10,0x40,0x00,0x69,0x24,0x02,0x00,0x04,0x24,0x02,0x00,0x01,0x12,0x02,0x00,0x5a,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,0x34,0x42,0x00,0x08,0x3c,0x03,0x80,0x00,
+0xa2,0x20,0x00,0x4e,0xac,0x43,0x00,0x00,0x82,0x24,0x00,0x11,0x92,0x27,0x00,0x11,
+0x10,0x80,0x00,0x4e,0x00,0x00,0x00,0x00,0x92,0x26,0x00,0x0a,0x24,0x02,0x00,0x12,
+0x10,0x46,0x00,0x09,0x30,0xc2,0x00,0xff,0x27,0x83,0xb4,0x00,0x00,0x02,0x10,0x80,
+0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x94,0x83,0x00,0x14,
+0x00,0x00,0x00,0x00,0xa6,0x23,0x00,0x0c,0x3c,0x02,0xb0,0x09,0x34,0x42,0x00,0x40,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x03,0xa2,0x23,0x00,0x10,
+0x14,0x60,0x00,0x2b,0x30,0x65,0x00,0x01,0x30,0xc2,0x00,0xff,0x27,0x83,0xb4,0x00,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x82,0x23,0x00,0x12,
+0x90,0x82,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x02,0x11,0x42,0x30,0x42,0x00,0x01,
+0x00,0x62,0x18,0x21,0x00,0x03,0x26,0x00,0x14,0x80,0x00,0x18,0xa2,0x23,0x00,0x12,
+0x00,0x07,0x16,0x00,0x14,0x40,0x00,0x11,0x24,0x02,0x00,0x01,0x96,0x23,0x00,0x0c,
+0x27,0x84,0x90,0x10,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,
+0x00,0x44,0x10,0x21,0x80,0x45,0x00,0x06,0x00,0x03,0x1a,0x00,0x3c,0x02,0xb0,0x00,
+0x00,0x65,0x18,0x21,0x00,0x62,0x18,0x21,0x90,0x64,0x00,0x00,0x90,0x62,0x00,0x04,
+0xa2,0x20,0x00,0x15,0xa3,0x80,0x8b,0xd4,0x24,0x02,0x00,0x01,0x8f,0xbf,0x00,0x18,
+0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x0c,0x00,0x0c,0xcd,
+0x02,0x20,0x20,0x21,0x92,0x27,0x00,0x11,0x08,0x00,0x0d,0x7d,0x00,0x07,0x16,0x00,
+0x0c,0x00,0x0c,0x6e,0x02,0x20,0x20,0x21,0x86,0x23,0x00,0x0c,0x27,0x84,0x90,0x08,
+0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x44,0x20,0x21,
+0x90,0x85,0x00,0x07,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0xa2,0x25,0x00,0x13,
+0x90,0x83,0x00,0x07,0x08,0x00,0x0d,0x95,0xa0,0x43,0x00,0x02,0x92,0x26,0x00,0x0a,
+0x08,0x00,0x0d,0x5e,0x30,0xc2,0x00,0xff,0x8e,0x22,0x00,0x24,0x00,0x00,0x00,0x00,
+0x10,0x50,0x00,0x07,0xa2,0x20,0x00,0x08,0x24,0x02,0x00,0x07,0xa2,0x22,0x00,0x0a,
+0x92,0x22,0x00,0x27,0xae,0x20,0x00,0x24,0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x04,
+0x08,0x00,0x0d,0xaf,0x24,0x02,0x00,0x06,0x16,0x02,0xff,0x9b,0x3c,0x02,0xb0,0x05,
+0x8e,0x23,0x00,0x2c,0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x07,0xa2,0x24,0x00,0x08,
+0x24,0x02,0x00,0x03,0xa2,0x22,0x00,0x0a,0x92,0x22,0x00,0x2f,0xae,0x20,0x00,0x2c,
+0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x06,0x08,0x00,0x0d,0xbe,0xa2,0x20,0x00,0x0a,
+0x8e,0x22,0x00,0x28,0x24,0x03,0x00,0x01,0x24,0x04,0x00,0x01,0x10,0x44,0x00,0x07,
+0xa2,0x23,0x00,0x08,0x24,0x02,0x00,0x05,0xa2,0x22,0x00,0x0a,0x92,0x22,0x00,0x2b,
+0xae,0x20,0x00,0x28,0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x05,0x08,0x00,0x0d,0xca,
+0x24,0x02,0x00,0x04,0x12,0x02,0x00,0x12,0x2a,0x02,0x00,0x41,0x10,0x40,0x00,0x09,
+0x24,0x02,0x00,0x80,0x24,0x02,0x00,0x20,0x16,0x02,0xff,0x7b,0x3c,0x02,0xb0,0x05,
+0x24,0x02,0x00,0x12,0xa2,0x22,0x00,0x0a,0xa2,0x22,0x00,0x08,0x08,0x00,0x0d,0x51,
+0xae,0x20,0x00,0x3c,0x16,0x02,0xff,0x74,0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x10,
+0xa2,0x22,0x00,0x0a,0xa2,0x22,0x00,0x08,0x08,0x00,0x0d,0x51,0xae,0x20,0x00,0x34,
+0x24,0x02,0x00,0x11,0xa2,0x22,0x00,0x0a,0xa2,0x22,0x00,0x08,0x08,0x00,0x0d,0x51,
+0xae,0x20,0x00,0x38,0x8e,0x24,0x00,0x30,0x24,0x02,0x00,0x03,0x24,0x03,0x00,0x01,
+0x10,0x83,0x00,0x07,0xa2,0x22,0x00,0x08,0x24,0x02,0x00,0x02,0xa2,0x22,0x00,0x0a,
+0x92,0x22,0x00,0x33,0xae,0x20,0x00,0x30,0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x07,
+0x08,0x00,0x0d,0xf0,0xa2,0x24,0x00,0x0a,0x8f,0x84,0xb4,0x40,0xae,0x20,0x00,0x34,
+0x94,0x85,0x00,0x14,0x0c,0x00,0x1b,0x66,0x32,0x10,0x00,0xff,0x08,0x00,0x0d,0x42,
+0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x37,0xf4,
+0x34,0x63,0x00,0x20,0xac,0x62,0x00,0x00,0x80,0xa2,0x00,0x15,0x3c,0x06,0xb0,0x05,
+0x10,0x40,0x00,0x0a,0x34,0xc6,0x02,0x54,0x83,0x83,0x8b,0xd4,0x00,0x00,0x00,0x00,
+0xac,0x83,0x00,0x24,0x8c,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x17,0x42,
+0x30,0x42,0x00,0x01,0x03,0xe0,0x00,0x08,0xac,0x82,0x00,0x28,0x8c,0x82,0x00,0x2c,
+0x3c,0x06,0xb0,0x05,0x34,0xc6,0x04,0x50,0x00,0x02,0x18,0x43,0x30,0x63,0x00,0x01,
+0x10,0x40,0x00,0x04,0x30,0x45,0x00,0x01,0xac,0x83,0x00,0x28,0x03,0xe0,0x00,0x08,
+0xac,0x85,0x00,0x24,0x90,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,
+0x30,0x43,0x00,0x02,0x30,0x42,0x00,0x01,0xac,0x83,0x00,0x28,0x03,0xe0,0x00,0x08,
+0xac,0x82,0x00,0x24,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xd8,
+0x34,0x63,0x00,0x20,0x24,0x42,0x38,0x84,0xac,0x62,0x00,0x00,0xaf,0xb1,0x00,0x1c,
+0xaf,0xbf,0x00,0x20,0xaf,0xb0,0x00,0x18,0x90,0xa6,0x00,0x0a,0x27,0x83,0xb4,0x00,
+0x00,0xa0,0x88,0x21,0x00,0x06,0x10,0x80,0x00,0x43,0x10,0x21,0x8c,0x50,0x00,0x00,
+0x80,0xa5,0x00,0x11,0x92,0x03,0x00,0x12,0x10,0xa0,0x00,0x04,0xa2,0x20,0x00,0x15,
+0x24,0x02,0x00,0x12,0x10,0xc2,0x00,0xda,0x00,0x00,0x00,0x00,0x82,0x22,0x00,0x12,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x67,0x00,0x00,0x00,0x00,0xa2,0x20,0x00,0x12,
+0xa2,0x00,0x00,0x19,0x86,0x23,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0xc0,
+0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x20,0x00,0x43,0x10,0x21,
+0xa0,0x40,0x00,0x00,0x92,0x03,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0xdf,
+0xa2,0x03,0x00,0x16,0x82,0x02,0x00,0x12,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x20,
+0x00,0x00,0x00,0x00,0x92,0x23,0x00,0x08,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x45,
+0x24,0x02,0x00,0x01,0xa2,0x20,0x00,0x04,0x92,0x08,0x00,0x04,0x00,0x00,0x00,0x00,
+0x15,0x00,0x00,0x1e,0x24,0x02,0x00,0x01,0x92,0x07,0x00,0x0a,0xa2,0x02,0x00,0x17,
+0x92,0x02,0x00,0x16,0x30,0xe3,0x00,0xff,0x30,0x42,0x00,0xe4,0x10,0x60,0x00,0x03,
+0xa2,0x02,0x00,0x16,0x34,0x42,0x00,0x01,0xa2,0x02,0x00,0x16,0x11,0x00,0x00,0x05,
+0x00,0x00,0x00,0x00,0x92,0x02,0x00,0x16,0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x02,
+0xa2,0x02,0x00,0x16,0x92,0x02,0x00,0x17,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x08,
+0x00,0x00,0x00,0x00,0x96,0x02,0x00,0x06,0x00,0x00,0x00,0x00,0xa6,0x02,0x00,0x14,
+0x8f,0xbf,0x00,0x20,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,
+0x96,0x02,0x00,0x00,0x08,0x00,0x0e,0x6c,0xa6,0x02,0x00,0x14,0x92,0x07,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x14,0xe0,0x00,0x03,0x00,0x00,0x00,0x00,0x08,0x00,0x0e,0x58,
+0xa2,0x00,0x00,0x17,0x96,0x04,0x00,0x00,0x96,0x05,0x00,0x06,0x27,0x86,0x90,0x00,
+0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x05,0x10,0xc0,0x00,0x45,0x10,0x21,
+0x00,0x03,0x18,0x80,0x00,0x66,0x18,0x21,0x00,0x02,0x10,0x80,0x00,0x46,0x10,0x21,
+0x8c,0x66,0x00,0x08,0x8c,0x45,0x00,0x08,0x3c,0x03,0x80,0x00,0x00,0xc3,0x20,0x24,
+0x10,0x80,0x00,0x08,0x00,0xa3,0x10,0x24,0x10,0x40,0x00,0x04,0x00,0x00,0x18,0x21,
+0x10,0x80,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0xa6,0x18,0x2b,0x08,0x00,0x0e,0x58,
+0xa2,0x03,0x00,0x17,0x10,0x40,0xff,0xfd,0x00,0xa6,0x18,0x2b,0x08,0x00,0x0e,0x8c,
+0x00,0x00,0x00,0x00,0x10,0x62,0x00,0x09,0x24,0x02,0x00,0x02,0x10,0x62,0x00,0x05,
+0x24,0x02,0x00,0x03,0x14,0x62,0xff,0xb8,0x00,0x00,0x00,0x00,0x08,0x00,0x0e,0x52,
+0xa2,0x20,0x00,0x07,0x08,0x00,0x0e,0x52,0xa2,0x20,0x00,0x06,0x08,0x00,0x0e,0x52,
+0xa2,0x20,0x00,0x05,0x82,0x22,0x00,0x10,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x69,
+0x2c,0x62,0x00,0x02,0x10,0x40,0x00,0x49,0x3c,0x02,0xb0,0x09,0x92,0x25,0x00,0x08,
+0x00,0x00,0x00,0x00,0x30,0xa6,0x00,0xff,0x2c,0xc2,0x00,0x04,0x10,0x40,0x00,0x3b,
+0x2c,0xc2,0x00,0x10,0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,
+0x24,0x02,0x00,0x01,0x00,0xc2,0x10,0x04,0x00,0x02,0x10,0x27,0x00,0x62,0x18,0x24,
+0xa0,0x83,0x00,0x00,0x86,0x23,0x00,0x0c,0x96,0x26,0x00,0x0c,0x00,0x03,0x10,0xc0,
+0x00,0x43,0x10,0x21,0x00,0x02,0x28,0x80,0x27,0x83,0x90,0x04,0x00,0xa3,0x18,0x21,
+0x8c,0x64,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x04,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0x10,0x10,0x40,0x00,0x18,0x24,0x07,0x00,0x01,0x93,0x82,0x8b,0x71,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0x0a,0x24,0x05,0x00,0x24,
+0x00,0x06,0x2c,0x00,0x00,0x05,0x2c,0x03,0x0c,0x00,0x1b,0x66,0x02,0x00,0x20,0x21,
+0x92,0x02,0x00,0x16,0xa2,0x00,0x00,0x12,0x30,0x42,0x00,0xe7,0x08,0x00,0x0e,0x49,
+0xa2,0x02,0x00,0x16,0xf0,0xc5,0x00,0x06,0x00,0x00,0x28,0x12,0x27,0x82,0x90,0x00,
+0x00,0xa2,0x28,0x21,0x0c,0x00,0x01,0x49,0x3c,0x04,0x00,0x80,0x96,0x26,0x00,0x0c,
+0x08,0x00,0x0e,0xc9,0x00,0x06,0x2c,0x00,0x27,0x83,0x90,0x10,0x27,0x82,0x90,0x18,
+0x00,0xa2,0x10,0x21,0x00,0xa3,0x18,0x21,0x90,0x44,0x00,0x00,0x90,0x65,0x00,0x05,
+0x93,0x82,0x80,0x10,0x00,0x00,0x30,0x21,0x0c,0x00,0x21,0x9a,0xaf,0xa2,0x00,0x10,
+0x96,0x26,0x00,0x0c,0x08,0x00,0x0e,0xc3,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xcd,
+0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x30,0xa5,0x00,0x0f,
+0x24,0x02,0x00,0x80,0x08,0x00,0x0e,0xb2,0x00,0xa2,0x10,0x07,0x86,0x26,0x00,0x0c,
+0x3c,0x03,0xb0,0x09,0x34,0x42,0x01,0x72,0x34,0x63,0x01,0x78,0x94,0x47,0x00,0x00,
+0x8c,0x65,0x00,0x00,0x00,0x06,0x10,0xc0,0x00,0x46,0x10,0x21,0x3c,0x04,0xb0,0x09,
+0xae,0x25,0x00,0x1c,0x34,0x84,0x01,0x7c,0x27,0x83,0x90,0x04,0x00,0x02,0x10,0x80,
+0x8c,0x85,0x00,0x00,0x00,0x43,0x10,0x21,0x8c,0x43,0x00,0x18,0xae,0x25,0x00,0x20,
+0xa6,0x27,0x00,0x18,0x8c,0x66,0x00,0x08,0x02,0x20,0x20,0x21,0x0c,0x00,0x0f,0x19,
+0x00,0x00,0x28,0x21,0x86,0x25,0x00,0x18,0x8e,0x26,0x00,0x1c,0x8e,0x27,0x00,0x20,
+0x02,0x20,0x20,0x21,0x0c,0x00,0x1c,0x68,0xaf,0xa2,0x00,0x10,0x08,0x00,0x0e,0x49,
+0xa2,0x02,0x00,0x12,0x92,0x22,0x00,0x08,0x08,0x00,0x0e,0x49,0xa2,0x22,0x00,0x09,
+0xa2,0x20,0x00,0x11,0x80,0x82,0x00,0x50,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x03,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xd0,0xac,0x40,0x00,0x00,0x08,0x00,0x0e,0x49,
+0xa0,0x80,0x00,0x50,0x94,0x8a,0x00,0x0c,0x24,0x03,0x00,0x24,0x00,0x80,0x70,0x21,
+0x3c,0x02,0x80,0x00,0x3c,0x04,0xb0,0x03,0x24,0x42,0x3c,0x64,0xf1,0x43,0x00,0x06,
+0x34,0x84,0x00,0x20,0x00,0x00,0x18,0x12,0x00,0xa0,0x68,0x21,0xac,0x82,0x00,0x00,
+0x27,0x85,0x90,0x10,0x27,0x82,0x90,0x0f,0x27,0xbd,0xff,0xf8,0x00,0x62,0x60,0x21,
+0x00,0x65,0x58,0x21,0x00,0x00,0xc0,0x21,0x11,0xa0,0x00,0xcc,0x00,0x00,0x78,0x21,
+0x00,0x0a,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x45,0x10,0x21,0x91,0x87,0x00,0x00,0x80,0x48,0x00,0x04,
+0x03,0xa0,0x60,0x21,0x00,0x0a,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,
+0x00,0x43,0x10,0x21,0x00,0x02,0x48,0x80,0x27,0x83,0x90,0x04,0xa3,0xa7,0x00,0x00,
+0x01,0x23,0x18,0x21,0x8c,0x64,0x00,0x18,0x25,0x02,0xff,0xff,0x00,0x48,0x40,0x0b,
+0x8c,0x83,0x00,0x04,0x2d,0x05,0x00,0x07,0x24,0x02,0x00,0x06,0x30,0x63,0x00,0x08,
+0x14,0x60,0x00,0x35,0x00,0x45,0x40,0x0a,0x93,0xa7,0x00,0x00,0x27,0x82,0x90,0x18,
+0x01,0x22,0x10,0x21,0x30,0xe3,0x00,0xf0,0x38,0x63,0x00,0x50,0x30,0xe5,0x00,0xff,
+0x00,0x05,0x20,0x2b,0x00,0x03,0x18,0x2b,0x00,0x64,0x18,0x24,0x90,0x49,0x00,0x00,
+0x10,0x60,0x00,0x16,0x30,0xe4,0x00,0x0f,0x24,0x02,0x00,0x04,0x10,0xa2,0x00,0x9d,
+0x00,0x00,0x00,0x00,0x11,0xa0,0x00,0x3a,0x2c,0xa2,0x00,0x0c,0x10,0x40,0x00,0x02,
+0x24,0x84,0x00,0x0c,0x00,0xe0,0x20,0x21,0x30,0x84,0x00,0xff,0x00,0x04,0x10,0x40,
+0x27,0x83,0xbb,0x1c,0x00,0x44,0x10,0x21,0x00,0x43,0x10,0x21,0x90,0x47,0x00,0x00,
+0x00,0x00,0x00,0x00,0x2c,0xe3,0x00,0x0c,0xa3,0xa7,0x00,0x00,0x10,0x60,0x00,0x02,
+0x24,0xe2,0x00,0x04,0x00,0xe0,0x10,0x21,0xa3,0xa2,0x00,0x00,0x91,0x65,0x00,0x00,
+0x91,0x82,0x00,0x00,0x30,0xa3,0x00,0xff,0x00,0x62,0x10,0x2b,0x10,0x40,0x00,0x0e,
+0x2c,0x62,0x00,0x0c,0x14,0x40,0x00,0x03,0x00,0x60,0x20,0x21,0x30,0xa2,0x00,0x0f,
+0x24,0x44,0x00,0x0c,0x00,0x04,0x10,0x40,0x00,0x44,0x20,0x21,0x27,0x83,0xbb,0x1c,
+0x00,0x83,0x18,0x21,0x90,0x62,0x00,0x02,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x05,
+0x00,0x09,0x11,0x00,0xa1,0x85,0x00,0x00,0x93,0xa2,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x08,0x00,0x49,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x49,0x10,0x23,
+0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x21,0x27,0x83,0xb4,0xa8,0x00,0x43,0x10,0x21,
+0x90,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0x83,0x00,0x0c,0x14,0x60,0x00,0x06,
+0x00,0x80,0x10,0x21,0x00,0x18,0x10,0x40,0x00,0x4f,0x10,0x21,0x00,0x02,0x11,0x00,
+0x00,0x82,0x10,0x21,0x24,0x42,0x00,0x04,0x08,0x00,0x0f,0x7a,0xa1,0x82,0x00,0x00,
+0x8f,0x8d,0x81,0x5c,0x00,0x00,0x00,0x00,0x01,0xa8,0x10,0x21,0x90,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x10,0x60,0xff,0xd1,0x00,0x00,0x28,0x21,0x00,0x06,0x74,0x82,
+0x30,0xe2,0x00,0xff,0x2c,0x42,0x00,0x0c,0x14,0x40,0x00,0x03,0x00,0xe0,0x10,0x21,
+0x30,0xe2,0x00,0x0f,0x24,0x42,0x00,0x0c,0x30,0x44,0x00,0xff,0xa3,0xa2,0x00,0x00,
+0x24,0x02,0x00,0x0c,0x10,0x82,0x00,0x0d,0x00,0x09,0x11,0x00,0x00,0x49,0x10,0x23,
+0x00,0x02,0x10,0x80,0x00,0x04,0x18,0x40,0x00,0x49,0x10,0x23,0x00,0x64,0x18,0x21,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x27,0x84,0xb4,0xa8,0x00,0x44,0x10,0x21,
+0x90,0x47,0x00,0x00,0x00,0x00,0x00,0x00,0xa3,0xa7,0x00,0x00,0x00,0x0a,0x1c,0x00,
+0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,
+0x27,0x83,0x90,0x04,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x18,0x00,0x00,0x00,0x00,
+0x8c,0x83,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x10,0x14,0x60,0x00,0x33,
+0x00,0x06,0x14,0x42,0x00,0x09,0x11,0x00,0x00,0x49,0x10,0x23,0x00,0x02,0x10,0x80,
+0x00,0x49,0x10,0x23,0x27,0x83,0xb5,0x78,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,
+0x90,0x44,0x00,0x04,0x90,0x43,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x64,0xc0,0x24,
+0x93,0xa7,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0xe2,0x00,0x0f,0x10,0x40,0x00,0x0f,
+0x31,0xcf,0x00,0x01,0x00,0x0a,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,
+0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x84,0x90,0x00,0x00,0x44,0x10,0x21,
+0x84,0x43,0x00,0x06,0x00,0x00,0x00,0x00,0x28,0x63,0x06,0x41,0x14,0x60,0x00,0x04,
+0x30,0xe2,0x00,0xff,0x24,0x07,0x00,0x0f,0xa3,0xa7,0x00,0x00,0x30,0xe2,0x00,0xff,
+0x2c,0x42,0x00,0x0c,0x14,0x40,0x00,0x06,0x00,0xe0,0x10,0x21,0x00,0x18,0x10,0x40,
+0x00,0x4f,0x10,0x21,0x00,0x02,0x11,0x00,0x00,0x47,0x10,0x21,0x24,0x42,0x00,0x04,
+0xa3,0xa2,0x00,0x00,0x00,0x40,0x38,0x21,0x01,0xa8,0x10,0x21,0x90,0x43,0x00,0x00,
+0x24,0xa4,0x00,0x01,0x30,0x85,0xff,0xff,0x00,0xa3,0x18,0x2b,0x14,0x60,0xff,0xad,
+0x30,0xe2,0x00,0xff,0x08,0x00,0x0f,0x67,0x00,0x00,0x00,0x00,0x08,0x00,0x0f,0xc8,
+0x30,0x58,0x00,0x01,0x81,0xc2,0x00,0x48,0x00,0x00,0x00,0x00,0x10,0x40,0xff,0x73,
+0x00,0x00,0x00,0x00,0x08,0x00,0x0f,0x55,0x00,0x00,0x00,0x00,0x00,0x0a,0x1c,0x00,
+0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,
+0x00,0x45,0x10,0x21,0x80,0x48,0x00,0x05,0x91,0x67,0x00,0x00,0x08,0x00,0x0f,0x35,
+0x03,0xa0,0x58,0x21,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,
+0x24,0x42,0x40,0x04,0x03,0xe0,0x00,0x08,0xac,0x62,0x00,0x00,0x27,0xbd,0xff,0xc0,
+0xaf,0xb7,0x00,0x34,0xaf,0xb6,0x00,0x30,0xaf,0xb5,0x00,0x2c,0xaf,0xb4,0x00,0x28,
+0xaf,0xb3,0x00,0x24,0xaf,0xb2,0x00,0x20,0xaf,0xbf,0x00,0x3c,0xaf,0xbe,0x00,0x38,
+0xaf,0xb1,0x00,0x1c,0xaf,0xb0,0x00,0x18,0x84,0x82,0x00,0x0c,0x27,0x93,0x90,0x04,
+0x3c,0x05,0xb0,0x03,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x80,
+0x00,0x73,0x10,0x21,0x8c,0x5e,0x00,0x18,0x3c,0x02,0x80,0x00,0x34,0xa5,0x00,0x20,
+0x24,0x42,0x40,0x1c,0xac,0xa2,0x00,0x00,0x8f,0xd0,0x00,0x08,0x27,0x95,0x90,0x10,
+0x00,0x75,0x18,0x21,0x00,0x00,0x28,0x21,0x02,0x00,0x30,0x21,0x90,0x71,0x00,0x00,
+0x0c,0x00,0x0f,0x19,0x00,0x80,0xb0,0x21,0x00,0x40,0x90,0x21,0x00,0x10,0x14,0x42,
+0x30,0x54,0x00,0x01,0x02,0x40,0x20,0x21,0x00,0x10,0x14,0x82,0x02,0x80,0x28,0x21,
+0x12,0x51,0x00,0x23,0x00,0x10,0xbf,0xc2,0x86,0xc3,0x00,0x0c,0x30,0x50,0x00,0x01,
+0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x55,0x10,0x21,
+0xa0,0x52,0x00,0x00,0x86,0xc3,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0xc0,
+0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x53,0x30,0x21,0x8c,0xc7,0x00,0x18,
+0x27,0x83,0x90,0x00,0x00,0x43,0x10,0x21,0x8c,0xe3,0x00,0x04,0x84,0x46,0x00,0x06,
+0x00,0x03,0x19,0x42,0x0c,0x00,0x08,0xe3,0x30,0x73,0x00,0x01,0x00,0x40,0x88,0x21,
+0x02,0x40,0x20,0x21,0x02,0x80,0x28,0x21,0x16,0xe0,0x00,0x10,0x02,0x00,0x30,0x21,
+0x86,0xc2,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,
+0x00,0x03,0x18,0x80,0x27,0x82,0x90,0x08,0x00,0x62,0x18,0x21,0xa4,0x71,0x00,0x04,
+0x7b,0xbe,0x01,0xfc,0x7b,0xb6,0x01,0xbc,0x7b,0xb4,0x01,0x7c,0x7b,0xb2,0x01,0x3c,
+0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x40,0x86,0xc3,0x00,0x0c,
+0xaf,0xb3,0x00,0x10,0xaf,0xa0,0x00,0x14,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x55,0x10,0x21,0x80,0x47,0x00,0x06,0x00,0x00,0x00,0x00,
+0x24,0xe7,0x00,0x02,0x00,0x07,0x17,0xc2,0x00,0xe2,0x38,0x21,0x00,0x07,0x38,0x43,
+0x00,0x07,0x38,0x40,0x0c,0x00,0x09,0x0a,0x03,0xc7,0x38,0x21,0x08,0x00,0x10,0x48,
+0x02,0x22,0x88,0x21,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xd0,
+0x34,0x63,0x00,0x20,0x24,0x42,0x41,0xa4,0xaf,0xb2,0x00,0x20,0xac,0x62,0x00,0x00,
+0xaf,0xbf,0x00,0x28,0xaf,0xb3,0x00,0x24,0xaf,0xb1,0x00,0x1c,0xaf,0xb0,0x00,0x18,
+0x3c,0x02,0xb0,0x03,0x90,0x83,0x00,0x0a,0x34,0x42,0x01,0x04,0x94,0x45,0x00,0x00,
+0x00,0x03,0x18,0x80,0x27,0x82,0xb4,0x00,0x00,0x62,0x18,0x21,0x30,0xa6,0xff,0xff,
+0x8c,0x71,0x00,0x00,0x80,0x85,0x00,0x12,0x30,0xc9,0x00,0xff,0x00,0x06,0x32,0x02,
+0xa4,0x86,0x00,0x44,0xa4,0x89,0x00,0x46,0x82,0x22,0x00,0x12,0x00,0x80,0x90,0x21,
+0x10,0xa0,0x00,0x1b,0xa0,0x80,0x00,0x15,0x00,0xc5,0x10,0x2a,0x10,0x40,0x00,0x14,
+0x00,0x00,0x00,0x00,0xa2,0x20,0x00,0x19,0x84,0x83,0x00,0x0c,0x00,0x00,0x00,0x00,
+0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x20,
+0x00,0x43,0x10,0x21,0xa0,0x40,0x00,0x00,0xa0,0x80,0x00,0x12,0x92,0x22,0x00,0x16,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xdf,0xa2,0x22,0x00,0x16,0x8f,0xbf,0x00,0x28,
+0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30,
+0x0c,0x00,0x10,0x01,0x00,0x00,0x00,0x00,0x08,0x00,0x10,0x97,0x00,0x00,0x00,0x00,
+0x28,0x42,0x00,0x02,0x10,0x40,0x01,0x76,0x00,0x00,0x28,0x21,0x94,0x87,0x00,0x0c,
+0x00,0x00,0x00,0x00,0x00,0xe0,0x10,0x21,0x00,0x02,0x14,0x00,0x00,0x02,0x14,0x03,
+0x00,0x07,0x24,0x00,0x00,0x04,0x24,0x03,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,
+0x00,0x04,0x28,0xc0,0x00,0xa4,0x28,0x21,0x27,0x82,0x90,0x20,0x00,0x03,0x18,0x80,
+0x00,0x62,0x18,0x21,0x00,0x05,0x28,0x80,0x27,0x82,0x90,0x08,0x00,0xa2,0x10,0x21,
+0x8c,0x68,0x00,0x00,0x80,0x44,0x00,0x06,0x27,0x82,0x90,0x10,0x00,0x08,0x1d,0x02,
+0x00,0xa2,0x28,0x21,0x38,0x84,0x00,0x00,0x30,0x63,0x00,0x01,0x01,0x24,0x30,0x0b,
+0x80,0xaa,0x00,0x04,0x80,0xa9,0x00,0x05,0x10,0x60,0x00,0x02,0x00,0x08,0x14,0x02,
+0x30,0x46,0x00,0x0f,0x15,0x20,0x00,0x28,0x01,0x49,0x10,0x21,0x15,0x40,0x00,0x11,
+0x30,0xe3,0xff,0xff,0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0xa8,0x00,0xff,
+0x2d,0x02,0x00,0x04,0x10,0x40,0x01,0x46,0x2d,0x02,0x00,0x10,0x3c,0x04,0xb0,0x05,
+0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x24,0x02,0x00,0x01,0x01,0x02,0x10,0x04,
+0x00,0x62,0x18,0x25,0xa0,0x83,0x00,0x00,0x96,0x47,0x00,0x0c,0x00,0x00,0x00,0x00,
+0x30,0xe3,0xff,0xff,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x27,0x84,0x90,0x10,
+0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x21,0x80,0x45,0x00,0x06,0x00,0x03,0x1a,0x00,
+0x3c,0x04,0xb0,0x00,0x00,0x65,0x18,0x21,0x00,0x64,0x20,0x21,0x94,0x82,0x00,0x00,
+0x82,0x43,0x00,0x10,0x00,0x02,0x14,0x00,0x14,0x60,0x00,0x06,0x00,0x02,0x3c,0x03,
+0x30,0xe2,0x00,0x04,0x14,0x40,0x00,0x04,0x01,0x49,0x10,0x21,0x34,0xe2,0x08,0x00,
+0xa4,0x82,0x00,0x00,0x01,0x49,0x10,0x21,0x00,0x02,0x16,0x00,0x00,0x02,0x16,0x03,
+0x00,0x46,0x10,0x2a,0x10,0x40,0x00,0x7c,0x00,0x00,0x00,0x00,0x82,0x42,0x00,0x10,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0e,0x00,0x00,0x00,0x00,0x86,0x43,0x00,0x0c,
+0x25,0x44,0x00,0x01,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,
+0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0xa0,0x44,0x00,0x04,0x92,0x23,0x00,0x16,
+0x02,0x40,0x20,0x21,0x30,0x63,0x00,0xfb,0x08,0x00,0x10,0x9c,0xa2,0x23,0x00,0x16,
+0x86,0x43,0x00,0x0c,0x25,0x24,0x00,0x01,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0xa0,0x44,0x00,0x05,
+0x86,0x45,0x00,0x0c,0x0c,0x00,0x1e,0xea,0x02,0x20,0x20,0x21,0x10,0x40,0x00,0x5a,
+0x00,0x00,0x00,0x00,0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0xa6,0x00,0xff,
+0x2c,0xc2,0x00,0x04,0x10,0x40,0x00,0x4c,0x2c,0xc2,0x00,0x10,0x3c,0x04,0xb0,0x05,
+0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x24,0x02,0x00,0x01,0x00,0xc2,0x10,0x04,
+0x00,0x02,0x10,0x27,0x00,0x62,0x18,0x24,0xa0,0x83,0x00,0x00,0x92,0x45,0x00,0x08,
+0x00,0x00,0x00,0x00,0x30,0xa5,0x00,0xff,0x14,0xa0,0x00,0x33,0x24,0x02,0x00,0x01,
+0xa2,0x40,0x00,0x04,0x92,0x22,0x00,0x04,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x0c,
+0x24,0x02,0x00,0x01,0xa2,0x22,0x00,0x17,0x92,0x22,0x00,0x17,0x00,0x00,0x00,0x00,
+0x10,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0x96,0x22,0x00,0x06,0x08,0x00,0x10,0x97,
+0xa6,0x22,0x00,0x14,0x96,0x22,0x00,0x00,0x08,0x00,0x10,0x97,0xa6,0x22,0x00,0x14,
+0x92,0x22,0x00,0x0a,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x03,0x00,0x00,0x00,0x00,
+0x08,0x00,0x11,0x26,0xa2,0x20,0x00,0x17,0x96,0x24,0x00,0x00,0x96,0x25,0x00,0x06,
+0x27,0x86,0x90,0x00,0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x05,0x10,0xc0,
+0x00,0x45,0x10,0x21,0x00,0x03,0x18,0x80,0x00,0x66,0x18,0x21,0x00,0x02,0x10,0x80,
+0x00,0x46,0x10,0x21,0x8c,0x65,0x00,0x08,0x8c,0x44,0x00,0x08,0x3c,0x03,0x80,0x00,
+0x00,0xa3,0x30,0x24,0x10,0xc0,0x00,0x08,0x00,0x83,0x10,0x24,0x10,0x40,0x00,0x04,
+0x00,0x00,0x18,0x21,0x10,0xc0,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0x85,0x18,0x2b,
+0x08,0x00,0x11,0x26,0xa2,0x23,0x00,0x17,0x10,0x40,0xff,0xfd,0x00,0x85,0x18,0x2b,
+0x08,0x00,0x11,0x49,0x00,0x00,0x00,0x00,0x10,0xa2,0x00,0x09,0x24,0x02,0x00,0x02,
+0x10,0xa2,0x00,0x05,0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xca,0x00,0x00,0x00,0x00,
+0x08,0x00,0x11,0x21,0xa2,0x40,0x00,0x07,0x08,0x00,0x11,0x21,0xa2,0x40,0x00,0x06,
+0x08,0x00,0x11,0x21,0xa2,0x40,0x00,0x05,0x14,0x40,0xff,0xbe,0x3c,0x04,0xb0,0x05,
+0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x30,0xa5,0x00,0x0f,0x24,0x02,0x00,0x80,
+0x08,0x00,0x11,0x18,0x00,0xa2,0x10,0x07,0x0c,0x00,0x10,0x07,0x02,0x40,0x20,0x21,
+0x08,0x00,0x10,0x97,0x00,0x00,0x00,0x00,0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,
+0x30,0xa6,0x00,0xff,0x2c,0xc2,0x00,0x04,0x10,0x40,0x00,0x99,0x2c,0xc2,0x00,0x10,
+0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x24,0x02,0x00,0x01,
+0x00,0xc2,0x10,0x04,0x00,0x02,0x10,0x27,0x00,0x62,0x18,0x24,0xa0,0x83,0x00,0x00,
+0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0xa5,0x00,0xff,0x14,0xa0,0x00,0x80,
+0x24,0x02,0x00,0x01,0xa2,0x40,0x00,0x04,0x86,0x43,0x00,0x0c,0x27,0x93,0x90,0x04,
+0x96,0x47,0x00,0x0c,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x28,0x80,
+0x00,0xb3,0x18,0x21,0x8c,0x64,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x04,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x10,0x10,0x40,0x00,0x64,0x00,0x00,0x30,0x21,
+0x00,0x07,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x53,0x10,0x21,0x8c,0x43,0x00,0x18,0x93,0x82,0x8b,0x71,
+0x8c,0x64,0x00,0x04,0x30,0x42,0x00,0x01,0x00,0x04,0x21,0x42,0x14,0x40,0x00,0x4d,
+0x30,0x90,0x00,0x01,0x00,0x07,0x2c,0x00,0x00,0x05,0x2c,0x03,0x0c,0x00,0x1b,0x66,
+0x02,0x20,0x20,0x21,0x96,0x26,0x00,0x06,0x12,0x00,0x00,0x14,0x30,0xc5,0xff,0xff,
+0x02,0x60,0x90,0x21,0x00,0x05,0x10,0xc0,0x00,0x45,0x10,0x21,0x00,0x02,0x10,0x80,
+0x00,0x52,0x18,0x21,0x92,0x22,0x00,0x0a,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0b,
+0x02,0x20,0x20,0x21,0x8c,0x63,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x62,0x00,0x04,
+0x00,0x00,0x00,0x00,0x00,0x02,0x11,0x42,0x0c,0x00,0x1b,0x66,0x30,0x50,0x00,0x01,
+0x96,0x26,0x00,0x06,0x16,0x00,0xff,0xef,0x30,0xc5,0xff,0xff,0x92,0x22,0x00,0x04,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x0d,0x24,0x02,0x00,0x01,0xa2,0x22,0x00,0x17,
+0x92,0x22,0x00,0x17,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x05,0x00,0x00,0x00,0x00,
+0xa6,0x26,0x00,0x14,0x92,0x22,0x00,0x16,0x08,0x00,0x10,0x96,0x30,0x42,0x00,0xc3,
+0x96,0x22,0x00,0x00,0x08,0x00,0x11,0xbd,0xa6,0x22,0x00,0x14,0x92,0x22,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x03,0x00,0x00,0x00,0x00,0x08,0x00,0x11,0xb8,
+0xa2,0x20,0x00,0x17,0x96,0x24,0x00,0x00,0x30,0xc5,0xff,0xff,0x00,0x05,0x18,0xc0,
+0x00,0x04,0x10,0xc0,0x00,0x44,0x10,0x21,0x00,0x65,0x18,0x21,0x27,0x84,0x90,0x00,
+0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x21,0x00,0x03,0x18,0x80,0x8c,0x45,0x00,0x08,
+0x00,0x64,0x18,0x21,0x8c,0x64,0x00,0x08,0x3c,0x02,0x80,0x00,0x00,0xa2,0x38,0x24,
+0x10,0xe0,0x00,0x08,0x00,0x82,0x10,0x24,0x10,0x40,0x00,0x04,0x00,0x00,0x18,0x21,
+0x10,0xe0,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0x85,0x18,0x2b,0x08,0x00,0x11,0xb8,
+0xa2,0x23,0x00,0x17,0x10,0x40,0xff,0xfd,0x00,0x85,0x18,0x2b,0x08,0x00,0x11,0xdc,
+0x00,0x00,0x00,0x00,0x24,0x05,0x00,0x24,0xf0,0xe5,0x00,0x06,0x00,0x00,0x28,0x12,
+0x27,0x82,0x90,0x00,0x00,0xa2,0x28,0x21,0x0c,0x00,0x01,0x49,0x00,0x00,0x20,0x21,
+0x96,0x47,0x00,0x0c,0x08,0x00,0x11,0x9a,0x00,0x07,0x2c,0x00,0x27,0x83,0x90,0x10,
+0x27,0x82,0x90,0x18,0x00,0xa2,0x10,0x21,0x00,0xa3,0x18,0x21,0x90,0x44,0x00,0x00,
+0x90,0x65,0x00,0x05,0x93,0x82,0x80,0x10,0x24,0x07,0x00,0x01,0x0c,0x00,0x21,0x9a,
+0xaf,0xa2,0x00,0x10,0x96,0x47,0x00,0x0c,0x08,0x00,0x11,0x8d,0x00,0x07,0x1c,0x00,
+0x10,0xa2,0x00,0x09,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x05,0x24,0x02,0x00,0x03,
+0x14,0xa2,0xff,0x7d,0x00,0x00,0x00,0x00,0x08,0x00,0x11,0x7e,0xa2,0x40,0x00,0x07,
+0x08,0x00,0x11,0x7e,0xa2,0x40,0x00,0x06,0x08,0x00,0x11,0x7e,0xa2,0x40,0x00,0x05,
+0x14,0x40,0xff,0x71,0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,
+0x30,0xa5,0x00,0x0f,0x24,0x02,0x00,0x80,0x08,0x00,0x11,0x75,0x00,0xa2,0x10,0x07,
+0x14,0x40,0xfe,0xc3,0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,
+0x30,0xa5,0x00,0x0f,0x24,0x02,0x00,0x80,0x08,0x00,0x10,0xd0,0x00,0xa2,0x10,0x07,
+0x84,0x83,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,
+0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x04,0x00,0x43,0x10,0x21,0x8c,0x47,0x00,0x18,
+0x00,0x00,0x00,0x00,0x8c,0xe6,0x00,0x08,0x0c,0x00,0x0f,0x19,0x00,0x00,0x00,0x00,
+0x02,0x40,0x20,0x21,0x00,0x00,0x28,0x21,0x00,0x00,0x30,0x21,0x00,0x00,0x38,0x21,
+0x0c,0x00,0x1c,0x68,0xaf,0xa2,0x00,0x10,0x00,0x02,0x1e,0x00,0x14,0x60,0xfe,0x6b,
+0xa2,0x22,0x00,0x12,0x92,0x43,0x00,0x08,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x40,
+0x24,0x02,0x00,0x01,0xa2,0x40,0x00,0x04,0x92,0x28,0x00,0x04,0x00,0x00,0x00,0x00,
+0x15,0x00,0x00,0x19,0x24,0x02,0x00,0x01,0x92,0x27,0x00,0x0a,0xa2,0x22,0x00,0x17,
+0x92,0x22,0x00,0x17,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x10,0x00,0x00,0x00,0x00,
+0x96,0x22,0x00,0x06,0x00,0x00,0x00,0x00,0xa6,0x22,0x00,0x14,0x92,0x22,0x00,0x16,
+0x30,0xe3,0x00,0xff,0x30,0x42,0x00,0xc0,0x10,0x60,0x00,0x03,0xa2,0x22,0x00,0x16,
+0x34,0x42,0x00,0x01,0xa2,0x22,0x00,0x16,0x11,0x00,0xfe,0x50,0x00,0x00,0x00,0x00,
+0x92,0x22,0x00,0x16,0x08,0x00,0x10,0x96,0x34,0x42,0x00,0x02,0x96,0x22,0x00,0x00,
+0x08,0x00,0x12,0x3f,0xa6,0x22,0x00,0x14,0x92,0x27,0x00,0x0a,0x00,0x00,0x00,0x00,
+0x14,0xe0,0x00,0x03,0x00,0x00,0x00,0x00,0x08,0x00,0x12,0x38,0xa2,0x20,0x00,0x17,
+0x96,0x24,0x00,0x00,0x96,0x25,0x00,0x06,0x27,0x86,0x90,0x00,0x00,0x04,0x18,0xc0,
+0x00,0x64,0x18,0x21,0x00,0x05,0x10,0xc0,0x00,0x45,0x10,0x21,0x00,0x03,0x18,0x80,
+0x00,0x66,0x18,0x21,0x00,0x02,0x10,0x80,0x00,0x46,0x10,0x21,0x8c,0x65,0x00,0x08,
+0x8c,0x44,0x00,0x08,0x3c,0x03,0x80,0x00,0x00,0xa3,0x30,0x24,0x10,0xc0,0x00,0x08,
+0x00,0x83,0x10,0x24,0x10,0x40,0x00,0x04,0x00,0x00,0x18,0x21,0x10,0xc0,0x00,0x02,
+0x24,0x03,0x00,0x01,0x00,0x85,0x18,0x2b,0x08,0x00,0x12,0x38,0xa2,0x23,0x00,0x17,
+0x10,0x40,0xff,0xfd,0x00,0x85,0x18,0x2b,0x08,0x00,0x12,0x67,0x00,0x00,0x00,0x00,
+0x10,0x62,0x00,0x09,0x24,0x02,0x00,0x02,0x10,0x62,0x00,0x05,0x24,0x02,0x00,0x03,
+0x14,0x62,0xff,0xbd,0x00,0x00,0x00,0x00,0x08,0x00,0x12,0x32,0xa2,0x40,0x00,0x07,
+0x08,0x00,0x12,0x32,0xa2,0x40,0x00,0x06,0x08,0x00,0x12,0x32,0xa2,0x40,0x00,0x05,
+0x3c,0x02,0x80,0x00,0x00,0x82,0x30,0x24,0x10,0xc0,0x00,0x08,0x00,0xa2,0x18,0x24,
+0x10,0x60,0x00,0x04,0x00,0x00,0x10,0x21,0x10,0xc0,0x00,0x02,0x24,0x02,0x00,0x01,
+0x00,0xa4,0x10,0x2b,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x10,0x60,0xff,0xfd,
+0x00,0xa4,0x10,0x2b,0x08,0x00,0x12,0x82,0x00,0x00,0x00,0x00,0x30,0x82,0xff,0xff,
+0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,0x27,0x84,0x90,0x10,0x00,0x03,0x18,0x80,
+0x00,0x64,0x18,0x21,0x80,0x66,0x00,0x06,0x00,0x02,0x12,0x00,0x3c,0x03,0xb0,0x00,
+0x00,0x46,0x10,0x21,0x00,0x45,0x10,0x21,0x03,0xe0,0x00,0x08,0x00,0x43,0x10,0x21,
+0x27,0xbd,0xff,0xe0,0x30,0x82,0x00,0x7c,0x30,0x84,0xff,0x00,0xaf,0xbf,0x00,0x1c,
+0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0x14,0x40,0x00,0x41,
+0x00,0x04,0x22,0x03,0x24,0x02,0x00,0x04,0x3c,0x10,0xb0,0x03,0x8e,0x10,0x00,0x00,
+0x10,0x82,0x00,0x32,0x24,0x02,0x00,0x08,0x10,0x82,0x00,0x03,0x32,0x02,0x00,0x20,
+0x08,0x00,0x12,0xa8,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x17,0x3c,0x02,0xb0,0x06,
+0x34,0x42,0x80,0x24,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x67,0x00,0xff,
+0x10,0xe0,0x00,0x23,0x00,0x00,0x88,0x21,0x8f,0x85,0x8f,0xe0,0x00,0x40,0x30,0x21,
+0x94,0xa2,0x00,0x08,0x8c,0xc3,0x00,0x00,0x26,0x31,0x00,0x01,0x24,0x42,0x00,0x02,
+0x30,0x42,0x01,0xff,0x34,0x63,0x01,0x00,0x02,0x27,0x20,0x2a,0xa4,0xa2,0x00,0x08,
+0x14,0x80,0xff,0xf7,0xac,0xc3,0x00,0x00,0x84,0xa3,0x00,0x08,0x3c,0x02,0xb0,0x03,
+0x34,0x42,0x00,0x30,0xac,0x43,0x00,0x00,0x27,0x92,0xb4,0x00,0x24,0x11,0x00,0x12,
+0x8e,0x44,0x00,0x00,0x26,0x31,0xff,0xff,0x90,0x82,0x00,0x10,0x00,0x00,0x00,0x00,
+0x10,0x40,0x00,0x03,0x26,0x52,0x00,0x04,0x0c,0x00,0x18,0xd0,0x00,0x00,0x00,0x00,
+0x06,0x21,0xff,0xf7,0x24,0x02,0xff,0xdf,0x02,0x02,0x80,0x24,0x3c,0x01,0xb0,0x03,
+0x0c,0x00,0x13,0x1c,0xac,0x30,0x00,0x00,0x08,0x00,0x12,0xa8,0x00,0x00,0x00,0x00,
+0x8f,0x85,0x8f,0xe0,0x08,0x00,0x12,0xbe,0x00,0x00,0x00,0x00,0x24,0x02,0xff,0x95,
+0x3c,0x03,0xb0,0x03,0x02,0x02,0x80,0x24,0x34,0x63,0x00,0x30,0x3c,0x01,0xb0,0x03,
+0xac,0x30,0x00,0x00,0x0c,0x00,0x12,0xe5,0xac,0x60,0x00,0x00,0x08,0x00,0x12,0xa8,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x50,0x08,0x00,0x12,0xa8,
+0xac,0x46,0x00,0x00,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4b,0x94,0x3c,0x0b,0xb0,0x03,
+0xad,0x6a,0x00,0x20,0x3c,0x08,0x80,0x01,0x25,0x08,0x00,0x00,0x3c,0x09,0x80,0x01,
+0x25,0x29,0x03,0x50,0x11,0x09,0x00,0x10,0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x00,
+0x25,0x4a,0x4b,0xbc,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,0x3c,0x08,0xb0,0x06,
+0x35,0x08,0x80,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x8d,0x09,0x00,0x00,
+0x00,0x00,0x00,0x00,0x31,0x29,0x00,0x01,0x00,0x00,0x00,0x00,0x24,0x01,0x00,0x01,
+0x15,0x21,0xff,0xf2,0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4b,0xf8,
+0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,0x3c,0x02,0xb0,0x03,0x8c,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x34,0x63,0x00,0x40,0x00,0x00,0x00,0x00,0xac,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0x24,0x3c,0x0b,0xb0,0x03,
+0xad,0x6a,0x00,0x20,0x3c,0x02,0x80,0x01,0x24,0x42,0x00,0x00,0x3c,0x03,0x80,0x01,
+0x24,0x63,0x03,0x50,0x3c,0x04,0xb0,0x00,0x8c,0x85,0x00,0x00,0x00,0x00,0x00,0x00,
+0xac,0x45,0x00,0x00,0x24,0x42,0x00,0x04,0x24,0x84,0x00,0x04,0x00,0x43,0x08,0x2a,
+0x14,0x20,0xff,0xf9,0x00,0x00,0x00,0x00,0x0c,0x00,0x13,0x1c,0x00,0x00,0x00,0x00,
+0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0x70,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,
+0x3c,0x02,0x80,0x01,0x24,0x42,0x03,0x50,0x3c,0x03,0x80,0x01,0x24,0x63,0x3f,0x24,
+0xac,0x40,0x00,0x00,0xac,0x40,0x00,0x04,0xac,0x40,0x00,0x08,0xac,0x40,0x00,0x0c,
+0x24,0x42,0x00,0x10,0x00,0x43,0x08,0x2a,0x14,0x20,0xff,0xf9,0x00,0x00,0x00,0x00,
+0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0xb0,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,
+0x3c,0x1c,0x80,0x01,0x27,0x9c,0x7f,0xf0,0x27,0x9d,0x8b,0xe0,0x00,0x00,0x00,0x00,
+0x27,0x9d,0x8f,0xc8,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0xd4,0x3c,0x0b,0xb0,0x03,
+0xad,0x6a,0x00,0x20,0x40,0x80,0x68,0x00,0x40,0x08,0x60,0x00,0x00,0x00,0x00,0x00,
+0x35,0x08,0xff,0x01,0x40,0x88,0x60,0x00,0x00,0x00,0x00,0x00,0x0c,0x00,0x15,0x62,
+0x00,0x00,0x00,0x00,0x24,0x84,0xf8,0x00,0x30,0x87,0x00,0x03,0x00,0x04,0x30,0x40,
+0x00,0xc7,0x20,0x23,0x3c,0x02,0xb0,0x0a,0x27,0xbd,0xff,0xe0,0x24,0x03,0xff,0xff,
+0x00,0x82,0x20,0x21,0xaf,0xb1,0x00,0x14,0xac,0x83,0x10,0x00,0xaf,0xbf,0x00,0x18,
+0xaf,0xb0,0x00,0x10,0x00,0xa0,0x88,0x21,0x24,0x03,0x00,0x01,0x8c,0x82,0x10,0x00,
+0x00,0x00,0x00,0x00,0x14,0x43,0xff,0xfd,0x00,0xc7,0x10,0x23,0x3c,0x03,0xb0,0x0a,
+0x00,0x43,0x10,0x21,0x8c,0x50,0x00,0x00,0x0c,0x00,0x13,0x99,0x02,0x20,0x20,0x21,
+0x02,0x11,0x80,0x24,0x00,0x50,0x80,0x06,0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x18,
+0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x27,0xbd,0xff,0xd8,
+0xaf,0xb2,0x00,0x18,0x00,0xa0,0x90,0x21,0x24,0x05,0xff,0xff,0xaf,0xb3,0x00,0x1c,
+0xaf,0xbf,0x00,0x20,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0x00,0xc0,0x98,0x21,
+0x12,0x45,0x00,0x23,0x24,0x84,0xf8,0x00,0x30,0x83,0x00,0x03,0x00,0x04,0x10,0x40,
+0x00,0x40,0x88,0x21,0x00,0x60,0x20,0x21,0x00,0x43,0x10,0x23,0x3c,0x03,0xb0,0x0a,
+0x00,0x43,0x10,0x21,0xac,0x45,0x10,0x00,0x00,0x40,0x18,0x21,0x24,0x05,0x00,0x01,
+0x8c,0x62,0x10,0x00,0x00,0x00,0x00,0x00,0x14,0x45,0xff,0xfd,0x3c,0x02,0xb0,0x0a,
+0x02,0x24,0x88,0x23,0x02,0x22,0x88,0x21,0x8e,0x30,0x00,0x00,0x0c,0x00,0x13,0x99,
+0x02,0x40,0x20,0x21,0x00,0x12,0x18,0x27,0x02,0x03,0x80,0x24,0x00,0x53,0x10,0x04,
+0x02,0x02,0x80,0x25,0xae,0x30,0x00,0x00,0x24,0x03,0x00,0x01,0x8e,0x22,0x10,0x00,
+0x00,0x00,0x00,0x00,0x14,0x43,0xff,0xfd,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x20,
+0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,
+0x30,0x82,0x00,0x03,0x00,0x04,0x18,0x40,0x00,0x62,0x18,0x23,0x3c,0x04,0xb0,0x0a,
+0x00,0x64,0x18,0x21,0xac,0x66,0x00,0x00,0x24,0x04,0x00,0x01,0x8c,0x62,0x10,0x00,
+0x00,0x00,0x00,0x00,0x14,0x44,0xff,0xfd,0x00,0x00,0x00,0x00,0x08,0x00,0x13,0x87,
+0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x21,0x00,0x64,0x10,0x06,0x30,0x42,0x00,0x01,
+0x14,0x40,0x00,0x05,0x00,0x00,0x00,0x00,0x24,0x63,0x00,0x01,0x2c,0x62,0x00,0x20,
+0x14,0x40,0xff,0xf9,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x60,0x10,0x21,
+0x27,0xbd,0xff,0xe0,0x3c,0x03,0xb0,0x05,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,
+0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x1c,0x00,0x80,0x90,0x21,0x00,0xa0,0x80,0x21,
+0x00,0xc0,0x88,0x21,0x34,0x63,0x02,0x2e,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0x01,0x14,0x40,0xff,0xfc,0x24,0x04,0x08,0x24,0x3c,0x05,0x00,0xc0,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x24,0x04,0x08,0x34,0x3c,0x05,0x00,0xc0,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x3c,0x02,0xc0,0x00,0x00,0x10,0x1c,0x00,
+0x34,0x42,0x04,0x00,0x3c,0x04,0xb0,0x05,0x3c,0x05,0xb0,0x05,0x24,0x63,0x16,0x09,
+0x02,0x22,0x10,0x21,0x34,0x84,0x04,0x20,0x34,0xa5,0x04,0x24,0x3c,0x06,0xb0,0x05,
+0xac,0x83,0x00,0x00,0x24,0x07,0x00,0x01,0xac,0xa2,0x00,0x00,0x34,0xc6,0x02,0x28,
+0x24,0x02,0x00,0x20,0xae,0x47,0x00,0x3c,0x24,0x04,0x08,0x24,0xa0,0xc2,0x00,0x00,
+0x3c,0x05,0x00,0xc0,0xa2,0x47,0x00,0x11,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,
+0x24,0x04,0x08,0x34,0x3c,0x05,0x00,0xc0,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,
+0x8f,0xbf,0x00,0x1c,0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x20,0x24,0x02,0x00,0x06,0xac,0x82,0x00,0x0c,0xa0,0x80,0x00,0x50,
+0xac,0x80,0x00,0x00,0xac,0x80,0x00,0x04,0xac,0x80,0x00,0x08,0xac,0x80,0x00,0x14,
+0xac,0x80,0x00,0x18,0xac,0x80,0x00,0x1c,0xa4,0x80,0x00,0x20,0xac,0x80,0x00,0x24,
+0xac,0x80,0x00,0x28,0xac,0x80,0x00,0x2c,0xa0,0x80,0x00,0x30,0xa0,0x80,0x00,0x31,
+0xac,0x80,0x00,0x34,0xac,0x80,0x00,0x38,0xa0,0x80,0x00,0x3c,0xac,0x82,0x00,0x10,
+0xa0,0x80,0x00,0x44,0xac,0x80,0x00,0x48,0x03,0xe0,0x00,0x08,0xac,0x80,0x00,0x4c,
+0x3c,0x04,0xb0,0x06,0x34,0x84,0x80,0x00,0x8c,0x83,0x00,0x00,0x3c,0x02,0x12,0x00,
+0x3c,0x05,0xb0,0x03,0x00,0x62,0x18,0x25,0x34,0xa5,0x00,0x8b,0x24,0x02,0xff,0x80,
+0xac,0x83,0x00,0x00,0x03,0xe0,0x00,0x08,0xa0,0xa2,0x00,0x00,0x3c,0x04,0xb0,0x03,
+0x34,0x84,0x00,0x0b,0x24,0x02,0x00,0x22,0x3c,0x05,0xb0,0x01,0x3c,0x06,0x45,0x67,
+0x3c,0x0a,0xb0,0x09,0xa0,0x82,0x00,0x00,0x34,0xa5,0x00,0x04,0x34,0xc6,0x89,0xaa,
+0x35,0x4a,0x00,0x04,0x24,0x02,0x01,0x23,0x3c,0x0b,0xb0,0x09,0x3c,0x07,0x01,0x23,
+0x3c,0x0c,0xb0,0x09,0x3c,0x01,0xb0,0x01,0xac,0x20,0x00,0x00,0x27,0xbd,0xff,0xe0,
+0xac,0xa0,0x00,0x00,0x35,0x6b,0x00,0x08,0x3c,0x01,0xb0,0x09,0xac,0x26,0x00,0x00,
+0x34,0xe7,0x45,0x66,0xa5,0x42,0x00,0x00,0x35,0x8c,0x00,0x0c,0x24,0x02,0xcd,0xef,
+0x3c,0x0d,0xb0,0x09,0x3c,0x08,0xcd,0xef,0x3c,0x0e,0xb0,0x09,0xad,0x67,0x00,0x00,
+0xaf,0xb7,0x00,0x1c,0xa5,0x82,0x00,0x00,0xaf,0xb6,0x00,0x18,0xaf,0xb5,0x00,0x14,
+0xaf,0xb4,0x00,0x10,0xaf,0xb3,0x00,0x0c,0xaf,0xb2,0x00,0x08,0xaf,0xb1,0x00,0x04,
+0xaf,0xb0,0x00,0x00,0x35,0xad,0x00,0x10,0x35,0x08,0x01,0x22,0x35,0xce,0x00,0x14,
+0x24,0x02,0x89,0xab,0x3c,0x0f,0xb0,0x09,0x3c,0x09,0x89,0xab,0x3c,0x10,0xb0,0x09,
+0x3c,0x11,0xb0,0x09,0x3c,0x12,0xb0,0x09,0x3c,0x13,0xb0,0x09,0x3c,0x14,0xb0,0x09,
+0x3c,0x15,0xb0,0x09,0x3c,0x16,0xb0,0x09,0x3c,0x17,0xb0,0x09,0xad,0xa8,0x00,0x00,
+0x24,0x03,0xff,0xff,0xa5,0xc2,0x00,0x00,0x35,0xef,0x00,0x18,0x35,0x29,0xcd,0xee,
+0x36,0x10,0x00,0x1c,0x36,0x31,0x00,0x20,0x36,0x52,0x00,0x24,0x36,0x73,0x00,0x28,
+0x36,0x94,0x00,0x2c,0x36,0xb5,0x00,0x30,0x36,0xd6,0x00,0x34,0x36,0xf7,0x00,0x38,
+0x24,0x02,0x45,0x67,0xad,0xe9,0x00,0x00,0xa6,0x02,0x00,0x00,0xae,0x23,0x00,0x00,
+0x8f,0xb0,0x00,0x00,0xa6,0x43,0x00,0x00,0x8f,0xb1,0x00,0x04,0xae,0x63,0x00,0x00,
+0x8f,0xb2,0x00,0x08,0xa6,0x83,0x00,0x00,0x8f,0xb3,0x00,0x0c,0xae,0xa3,0x00,0x00,
+0x8f,0xb4,0x00,0x10,0xa6,0xc3,0x00,0x00,0x8f,0xb5,0x00,0x14,0xae,0xe3,0x00,0x00,
+0x7b,0xb6,0x00,0xfc,0x3c,0x18,0xb0,0x09,0x37,0x18,0x00,0x3c,0xa7,0x03,0x00,0x00,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,
+0x34,0x63,0x00,0x20,0x24,0x42,0x51,0x48,0xac,0x62,0x00,0x00,0x8c,0x83,0x00,0x34,
+0x34,0x02,0xff,0xff,0x00,0x43,0x10,0x2a,0x14,0x40,0x01,0x04,0x00,0x80,0x28,0x21,
+0x8c,0x86,0x00,0x08,0x24,0x02,0x00,0x03,0x10,0xc2,0x00,0xf7,0x00,0x00,0x00,0x00,
+0x8c,0xa2,0x00,0x2c,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x4f,0x24,0x02,0x00,0x06,
+0x3c,0x03,0xb0,0x05,0x34,0x63,0x04,0x50,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0xff,0x14,0x40,0x00,0xdd,0xac,0xa2,0x00,0x2c,0x24,0x02,0x00,0x01,
+0x10,0xc2,0x00,0xdc,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xc2,0x00,0xca,
+0x00,0x00,0x00,0x00,0x8c,0xa7,0x00,0x04,0x24,0x02,0x00,0x02,0x10,0xe2,0x00,0xc0,
+0x00,0x00,0x00,0x00,0x8c,0xa2,0x00,0x14,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x09,
+0x24,0x02,0x00,0x01,0x3c,0x03,0xb0,0x09,0x34,0x63,0x01,0x60,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0x10,0x40,0x00,0x05,0xac,0xa2,0x00,0x14,
+0x24,0x02,0x00,0x01,0xac,0xa2,0x00,0x00,0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x14,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xd0,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,
+0x04,0x61,0x00,0x19,0x3c,0x02,0xb0,0x03,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2e,
+0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0x12,
+0x3c,0x02,0xb0,0x03,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x42,0x90,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x0c,0x3c,0x02,0xb0,0x03,0x80,0xa2,0x00,0x50,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x08,0x3c,0x02,0xb0,0x03,0x14,0xc0,0x00,0x07,
+0x34,0x42,0x00,0x3f,0x24,0x02,0x00,0x0e,0x24,0x03,0x00,0x01,0xac,0xa2,0x00,0x00,
+0x03,0xe0,0x00,0x08,0xa0,0xa3,0x00,0x50,0x34,0x42,0x00,0x3f,0x90,0x44,0x00,0x00,
+0x24,0x03,0x00,0x01,0x10,0x64,0x00,0x7f,0x3c,0x03,0xb0,0x05,0x80,0xa2,0x00,0x31,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0a,0x3c,0x02,0xb0,0x06,0x34,0x42,0x80,0x18,
+0x8c,0x43,0x00,0x00,0x3c,0x04,0xf0,0x00,0x3c,0x02,0x80,0x00,0x00,0x64,0x18,0x24,
+0x10,0x62,0x00,0x03,0x24,0x02,0x00,0x09,0x03,0xe0,0x00,0x08,0xac,0xa2,0x00,0x00,
+0x8c,0xa2,0x00,0x40,0x00,0x00,0x00,0x00,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,
+0x10,0x60,0x00,0x09,0x3c,0x03,0xb0,0x03,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2c,
+0x8c,0x43,0x00,0x00,0x3c,0x04,0x00,0x02,0x00,0x64,0x18,0x24,0x14,0x60,0xff,0xf2,
+0x24,0x02,0x00,0x10,0x3c,0x03,0xb0,0x03,0x34,0x63,0x02,0x01,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x80,0x10,0x40,0x00,0x0e,0x00,0x00,0x00,0x00,
+0x8c,0xa3,0x00,0x0c,0x00,0x00,0x00,0x00,0xac,0xa3,0x00,0x10,0x3c,0x02,0xb0,0x03,
+0x90,0x42,0x02,0x01,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x0f,0xac,0xa2,0x00,0x0c,
+0x90,0xa3,0x00,0x0f,0x24,0x02,0x00,0x0d,0x3c,0x01,0xb0,0x03,0x08,0x00,0x14,0xb2,
+0xa0,0x23,0x02,0x01,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x80,0x90,0x44,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x04,0x1e,0x00,0x00,0x03,0x1e,0x03,0x10,0x60,0x00,0x15,
+0xa0,0xa4,0x00,0x44,0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x0b,0x24,0x02,0x00,0x02,
+0x10,0x62,0x00,0x03,0x24,0x03,0x00,0x0d,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x8c,0xa2,0x00,0x0c,0xac,0xa3,0x00,0x00,0x24,0x03,0x00,0x04,0xac,0xa2,0x00,0x10,
+0x03,0xe0,0x00,0x08,0xac,0xa3,0x00,0x0c,0x24,0x02,0x00,0x0d,0xac,0xa2,0x00,0x00,
+0x24,0x03,0x00,0x04,0x24,0x02,0x00,0x06,0xac,0xa3,0x00,0x10,0x03,0xe0,0x00,0x08,
+0xac,0xa2,0x00,0x0c,0x8c,0xa3,0x00,0x38,0x24,0x04,0x00,0x01,0x10,0x64,0x00,0x2d,
+0x24,0x02,0x00,0x02,0x10,0x60,0x00,0x19,0x00,0x00,0x00,0x00,0x10,0x62,0x00,0x10,
+0x24,0x02,0x00,0x04,0x10,0x62,0x00,0x04,0x00,0x00,0x00,0x00,0xac,0xa0,0x00,0x38,
+0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x00,0x10,0xe4,0x00,0x07,0x24,0x02,0x00,0x03,
+0x80,0xa2,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x02,0x18,0x0b,0xac,0xa3,0x00,0x00,
+0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x38,0x08,0x00,0x15,0x04,0xac,0xa2,0x00,0x00,
+0x10,0xe4,0x00,0x02,0x24,0x02,0x00,0x03,0x24,0x02,0x00,0x0c,0xac,0xa2,0x00,0x00,
+0x24,0x02,0x00,0x04,0x03,0xe0,0x00,0x08,0xac,0xa2,0x00,0x38,0x10,0xe4,0x00,0x0e,
+0x3c,0x03,0xb0,0x06,0x34,0x63,0x80,0x24,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0xff,0x10,0x40,0x00,0x06,0xac,0xa2,0x00,0x18,0x24,0x02,0x00,0x02,
+0xac,0xa2,0x00,0x00,0xac,0xa0,0x00,0x18,0x08,0x00,0x15,0x0d,0x24,0x02,0x00,0x01,
+0x08,0x00,0x15,0x1a,0xac,0xa0,0x00,0x00,0x24,0x02,0x00,0x03,0x08,0x00,0x15,0x1a,
+0xac,0xa2,0x00,0x00,0x24,0x03,0x00,0x0b,0xac,0xa2,0x00,0x38,0x03,0xe0,0x00,0x08,
+0xac,0xa3,0x00,0x00,0x34,0x63,0x02,0x2e,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0x01,0x14,0x40,0xff,0x7d,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x42,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x60,0xff,0x78,0x00,0x00,0x00,0x00,
+0x10,0xc0,0xff,0x81,0x24,0x02,0x00,0x0e,0x08,0x00,0x14,0xa7,0x00,0x00,0x00,0x00,
+0x80,0xa2,0x00,0x30,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x3e,0x24,0x02,0x00,0x04,
+0x08,0x00,0x14,0xb2,0x00,0x00,0x00,0x00,0x84,0xa2,0x00,0x20,0x00,0x00,0x00,0x00,
+0x10,0x40,0xff,0x75,0x24,0x02,0x00,0x06,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff,
+0x00,0x60,0x10,0x21,0x14,0x40,0xff,0x2b,0xa4,0xa3,0x00,0x20,0x08,0x00,0x14,0xb2,
+0x24,0x02,0x00,0x06,0x8c,0xa2,0x00,0x1c,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x66,
+0x24,0x02,0x00,0x05,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2c,0x8c,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0x10,0x40,0xff,0x1b,0xac,0xa2,0x00,0x1c,
+0x08,0x00,0x14,0xb2,0x24,0x02,0x00,0x05,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x02,0x17,0x42,0x30,0x42,0x00,0x01,0x14,0x40,0xff,0x56,
+0x24,0x02,0x00,0x06,0x08,0x00,0x14,0x60,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x0a,
+0x03,0xe0,0x00,0x08,0xac,0x82,0x00,0x00,0x27,0xbd,0xff,0xd8,0xaf,0xb0,0x00,0x10,
+0x27,0x90,0x86,0x58,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,
+0x0c,0x00,0x29,0xd5,0xaf,0xb1,0x00,0x14,0xaf,0x90,0x8f,0xe0,0x48,0x02,0x00,0x00,
+0x0c,0x00,0x13,0xf0,0x00,0x00,0x00,0x00,0x0c,0x00,0x18,0x1f,0x02,0x00,0x20,0x21,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x3a,0x94,0x43,0x00,0x00,0x00,0x00,0x00,0x00,
+0xa3,0x83,0x8f,0xe4,0x0c,0x00,0x00,0x34,0x00,0x00,0x00,0x00,0x0c,0x00,0x13,0xfb,
+0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x98,0x0c,0x00,0x27,0x59,0x00,0x00,0x00,0x00,
+0x93,0x84,0x80,0x10,0x0c,0x00,0x21,0x3f,0x00,0x00,0x00,0x00,0x27,0x84,0x89,0x18,
+0x0c,0x00,0x06,0xe5,0x00,0x00,0x00,0x00,0x0c,0x00,0x01,0x39,0x00,0x00,0x00,0x00,
+0x27,0x84,0x84,0x40,0x0c,0x00,0x13,0xd9,0x00,0x00,0x00,0x00,0x27,0x82,0x89,0x4c,
+0xaf,0x82,0x84,0x80,0x0c,0x00,0x00,0x5f,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,
+0x34,0x63,0x01,0x08,0x3c,0x04,0xb0,0x09,0x3c,0x05,0xb0,0x09,0x8c,0x66,0x00,0x00,
+0x34,0x84,0x01,0x68,0x34,0xa5,0x01,0x40,0x24,0x02,0xc8,0x80,0x24,0x03,0x00,0x0a,
+0xa4,0x82,0x00,0x00,0xa4,0xa3,0x00,0x00,0x3c,0x04,0xb0,0x03,0x8c,0x82,0x00,0x00,
+0x8f,0x85,0x84,0x40,0xaf,0x86,0x84,0x38,0x34,0x42,0x00,0x20,0xac,0x82,0x00,0x00,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x58,0x8c,0x43,0x00,0x00,0x2c,0xa4,0x00,0x11,
+0x34,0x63,0x01,0x00,0xac,0x43,0x00,0x00,0x10,0x80,0xff,0xfa,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0x80,0x01,0x00,0x05,0x10,0x80,0x24,0x63,0x02,0x00,0x00,0x43,0x10,0x21,
+0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,
+0x27,0x84,0x84,0x98,0x0c,0x00,0x26,0x8e,0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x40,
+0x0c,0x00,0x14,0x52,0x00,0x00,0x00,0x00,0x93,0x83,0x81,0xf1,0x24,0x02,0x00,0x01,
+0x10,0x62,0x00,0x08,0x00,0x00,0x00,0x00,0x8f,0x85,0x84,0x40,0x8f,0x82,0x84,0x74,
+0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xaf,0x82,0x84,0x74,0x08,0x00,0x15,0x9d,
+0x3c,0x02,0xb0,0x03,0x27,0x84,0x84,0x98,0x0c,0x00,0x27,0x0d,0x00,0x00,0x00,0x00,
+0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x98,0x0c,0x00,0x28,0xdd,
+0x00,0x00,0x00,0x00,0xa3,0x82,0x84,0x71,0x8f,0x82,0x84,0x74,0xaf,0x80,0x84,0x40,
+0x24,0x42,0x00,0x01,0xaf,0x82,0x84,0x74,0x08,0x00,0x15,0x9c,0x00,0x00,0x28,0x21,
+0x27,0x84,0x86,0x58,0x0c,0x00,0x19,0x5b,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,
+0x14,0x40,0x00,0x05,0x3c,0x03,0xb0,0x05,0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x44,
+0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x34,0x63,0x04,0x50,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x6c,0x14,0x40,0x00,0x20,
+0x24,0x02,0x00,0x01,0x8f,0x84,0x84,0x48,0x00,0x00,0x00,0x00,0x10,0x82,0x00,0x20,
+0x3c,0x03,0xb0,0x09,0x34,0x63,0x01,0x60,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x54,0x14,0x40,0x00,0x15,0x24,0x02,0x00,0x01,
+0x24,0x02,0x00,0x02,0x10,0x82,0x00,0x07,0x00,0x00,0x00,0x00,0x24,0x05,0x00,0x03,
+0x24,0x02,0x00,0x01,0xaf,0x82,0x84,0x44,0xaf,0x85,0x84,0x40,0x08,0x00,0x15,0xb6,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e,0x90,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff,0x00,0x60,0x10,0x21,
+0xa7,0x83,0x84,0x60,0x14,0x40,0xff,0xf1,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0xaf,0x82,0x84,0x44,0xaf,0x80,0x84,0x40,0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,
+0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2c,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x5c,0x14,0x40,0xff,0xf5,0x24,0x02,0x00,0x01,
+0x08,0x00,0x15,0xe1,0x3c,0x03,0xb0,0x09,0x27,0x84,0x86,0x58,0x0c,0x00,0x1a,0xd1,
+0x00,0x00,0x00,0x00,0x83,0x82,0x84,0x70,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xec,
+0x24,0x02,0x00,0x02,0x3c,0x03,0xb0,0x05,0x34,0x63,0x04,0x50,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x6c,0x14,0x40,0xff,0xe4,
+0x24,0x02,0x00,0x02,0x8f,0x84,0x84,0x48,0x24,0x02,0x00,0x01,0x10,0x82,0x00,0x12,
+0x24,0x02,0x00,0x02,0x10,0x82,0x00,0x04,0x00,0x00,0x00,0x00,0x24,0x05,0x00,0x04,
+0x08,0x00,0x15,0xed,0x24,0x02,0x00,0x02,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e,
+0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff,
+0x00,0x60,0x10,0x21,0xa7,0x83,0x84,0x60,0x14,0x40,0xff,0xf4,0x00,0x00,0x00,0x00,
+0x08,0x00,0x15,0xfc,0x24,0x02,0x00,0x02,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2c,
+0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x5c,
+0x14,0x40,0xff,0xf7,0x00,0x00,0x00,0x00,0x08,0x00,0x16,0x1d,0x24,0x02,0x00,0x02,
+0x27,0x84,0x89,0x18,0x0c,0x00,0x0b,0x55,0x00,0x00,0x00,0x00,0x8f,0x83,0x84,0x44,
+0xaf,0x82,0x84,0x5c,0x38,0x64,0x00,0x02,0x00,0x04,0x18,0x0a,0xaf,0x83,0x84,0x44,
+0x14,0x40,0xff,0xad,0x24,0x05,0x00,0x05,0x8f,0x82,0x89,0x58,0xaf,0x80,0x84,0x40,
+0x10,0x40,0x00,0x02,0x24,0x04,0x00,0x01,0xaf,0x84,0x84,0x48,0x93,0x82,0x89,0x66,
+0x00,0x00,0x00,0x00,0x10,0x40,0xff,0x6c,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,
+0x34,0x42,0x00,0x08,0x8c,0x43,0x00,0x00,0x3c,0x04,0x20,0x00,0x00,0x64,0x18,0x24,
+0x10,0x60,0xff,0x65,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xa0,
+0x8c,0x43,0x00,0x00,0x3c,0x04,0x80,0x00,0xaf,0x80,0x89,0x40,0x24,0x63,0x00,0x01,
+0xac,0x43,0x00,0x00,0x3c,0x01,0xb0,0x05,0xac,0x24,0x00,0x08,0xaf,0x80,0x89,0x3c,
+0xaf,0x80,0x89,0x44,0xaf,0x80,0x89,0x48,0xaf,0x80,0x89,0x54,0xaf,0x80,0x89,0x4c,
+0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x83,0x82,0x84,0x90,0x00,0x00,0x00,0x00,
+0x10,0x40,0x00,0x02,0x24,0x02,0x00,0x20,0xaf,0x82,0x84,0x5c,0x8f,0x85,0x84,0x5c,
+0x27,0x84,0x89,0x18,0x0c,0x00,0x0d,0x30,0x00,0x00,0x00,0x00,0x00,0x02,0x1e,0x00,
+0xa3,0x82,0x84,0x70,0xaf,0x80,0x84,0x5c,0x10,0x60,0xff,0x8e,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e,0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff,0x00,0x60,0x10,0x21,0xa7,0x83,0x84,0x60,
+0x10,0x40,0x00,0x04,0x24,0x04,0x00,0x02,0xaf,0x84,0x84,0x48,0x08,0x00,0x15,0xfd,
+0x00,0x00,0x00,0x00,0x08,0x00,0x15,0xee,0x24,0x05,0x00,0x06,0x27,0x84,0x84,0x40,
+0x27,0x85,0x89,0x18,0x0c,0x00,0x0d,0xfd,0x00,0x00,0x00,0x00,0x8f,0x82,0x84,0x64,
+0xaf,0x80,0x84,0x6c,0x14,0x40,0x00,0x19,0x00,0x40,0x18,0x21,0x8f,0x82,0x84,0x68,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x15,0x24,0x02,0x00,0x02,0x8f,0x83,0x84,0x48,
+0x00,0x00,0x00,0x00,0x10,0x62,0x00,0x0b,0x3c,0x02,0x40,0x00,0x8f,0x83,0x84,0x44,
+0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x02,0x24,0x05,0x00,0x03,0x24,0x05,0x00,0x06,
+0xaf,0x85,0x84,0x40,0x24,0x04,0x00,0x03,0xaf,0x84,0x84,0x48,0x08,0x00,0x15,0xb6,
+0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x14,0x3c,0x01,0xb0,0x05,0xac,0x22,0x00,0x00,
+0xaf,0x80,0x84,0x40,0x08,0x00,0x16,0x96,0x24,0x04,0x00,0x03,0x10,0x60,0x00,0x10,
+0x00,0x00,0x00,0x00,0x27,0x85,0x89,0x18,0x27,0x84,0x84,0x40,0x0c,0x00,0x0e,0x21,
+0x00,0x00,0x00,0x00,0x8f,0x83,0x84,0x44,0x24,0x02,0x00,0x01,0xa3,0x80,0x84,0x70,
+0xaf,0x80,0x84,0x48,0x10,0x62,0x00,0x02,0x24,0x05,0x00,0x03,0x24,0x05,0x00,0x04,
+0xaf,0x85,0x84,0x40,0xaf,0x80,0x84,0x64,0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,
+0x83,0x82,0x84,0x90,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00,
+0x27,0x84,0x89,0x18,0x0c,0x00,0x10,0x69,0x00,0x00,0x00,0x00,0x8f,0x82,0x84,0x44,
+0xa3,0x80,0x84,0x70,0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x48,0x14,0x40,0x00,0x03,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0xaf,0x82,0x84,0x44,0xaf,0x80,0x84,0x68,
+0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x40,0x27,0x85,0x89,0x18,
+0x0c,0x00,0x0e,0x21,0x00,0x00,0x00,0x00,0x8f,0x82,0x84,0x44,0xa3,0x80,0x84,0x70,
+0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x48,0x14,0x40,0xfe,0xeb,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x02,0xaf,0x82,0x84,0x44,0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,
+0x27,0x84,0x89,0x18,0x0c,0x00,0x10,0x69,0x00,0x00,0x00,0x00,0x08,0x00,0x16,0xc6,
+0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x98,0x0c,0x00,0x29,0x73,0x00,0x00,0x00,0x00,
+0x08,0x00,0x15,0xc5,0x00,0x00,0x00,0x00,0x0c,0x00,0x24,0x05,0x00,0x00,0x00,0x00,
+0x0c,0x00,0x26,0xff,0x00,0x00,0x00,0x00,0x0c,0x00,0x18,0x11,0x00,0x00,0x00,0x00,
+0x93,0x83,0xbc,0x18,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x2b,0x3c,0x02,0xb0,0x03,
+0x34,0x42,0x01,0x08,0x8c,0x44,0x00,0x00,0x8f,0x83,0xbc,0x10,0x8f,0x82,0xbc,0x14,
+0x00,0x83,0x18,0x23,0x00,0x43,0x10,0x2b,0x10,0x40,0x00,0x23,0x3c,0x02,0xb0,0x03,
+0x24,0x04,0x05,0xa0,0x34,0x42,0x01,0x18,0x8c,0x42,0x00,0x00,0x0c,0x00,0x06,0xd1,
+0x00,0x00,0x00,0x00,0x24,0x04,0x05,0xa4,0x0c,0x00,0x06,0xd1,0x00,0x02,0x84,0x02,
+0x30,0x51,0xff,0xff,0x24,0x04,0x05,0xa8,0x00,0x02,0x94,0x02,0x0c,0x00,0x06,0xd1,
+0x3a,0x10,0xff,0xff,0x3a,0x31,0xff,0xff,0x30,0x42,0xff,0xff,0x2e,0x10,0x00,0x01,
+0x2e,0x31,0x00,0x01,0x3a,0x52,0xff,0xff,0x02,0x11,0x80,0x25,0x2e,0x52,0x00,0x01,
+0x38,0x42,0xff,0xff,0x02,0x12,0x80,0x25,0x2c,0x42,0x00,0x01,0x02,0x02,0x80,0x25,
+0x16,0x00,0x00,0x02,0x24,0x04,0x00,0x02,0x00,0x00,0x20,0x21,0x0c,0x00,0x05,0x6e,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,0x8c,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0xaf,0x83,0xbc,0x10,0x0c,0x00,0x01,0xe9,0x00,0x00,0x00,0x00,
+0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x74,0x08,0x00,0x15,0x9c,0x00,0x00,0x28,0x21,
+0x27,0x90,0xb4,0x00,0x24,0x11,0x00,0x12,0x8e,0x04,0x00,0x00,0x00,0x00,0x00,0x00,
+0x90,0x82,0x00,0x10,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x03,0x00,0x00,0x00,0x00,
+0x0c,0x00,0x18,0xd0,0x00,0x00,0x00,0x00,0x26,0x31,0xff,0xff,0x06,0x21,0xff,0xf6,
+0x26,0x10,0x00,0x04,0xaf,0x80,0x84,0x40,0x08,0x00,0x15,0xb7,0x00,0x00,0x28,0x21,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,0x8c,0x44,0x00,0x00,0x8f,0x82,0x84,0x38,
+0x00,0x04,0x19,0xc2,0x00,0x02,0x11,0xc2,0x10,0x62,0xff,0xf6,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x02,0x90,0x43,0x00,0x00,0x3c,0x12,0xb0,0x05,
+0xaf,0x84,0x84,0x38,0x30,0x63,0x00,0xff,0x00,0x03,0x11,0x40,0x00,0x43,0x10,0x23,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0x02,0x99,0x00,0x00,0x00,0x88,0x21,
+0x36,0x52,0x02,0x2c,0x27,0x90,0xb4,0x00,0x8e,0x04,0x00,0x00,0x00,0x00,0x00,0x00,
+0x90,0x83,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x62,0x00,0x03,0x10,0x40,0x00,0x06,
+0x30,0x62,0x00,0x1c,0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0x8f,0x85,0x84,0x38,
+0x0c,0x00,0x1e,0x94,0x02,0x60,0x30,0x21,0x8e,0x42,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0xff,0x14,0x40,0xff,0xd7,0x00,0x00,0x00,0x00,0x26,0x31,0x00,0x01,
+0x2a,0x22,0x00,0x13,0x14,0x40,0xff,0xec,0x26,0x10,0x00,0x04,0x08,0x00,0x17,0x21,
+0x00,0x00,0x00,0x00,0x8f,0x84,0x84,0x4c,0x27,0x85,0x89,0x18,0x0c,0x00,0x17,0xa4,
+0x00,0x00,0x00,0x00,0x8f,0x83,0x84,0x4c,0x24,0x02,0x00,0x04,0x14,0x62,0xfe,0xa5,
+0x00,0x00,0x00,0x00,0x08,0x00,0x15,0xee,0x24,0x05,0x00,0x05,0x3c,0x02,0xb0,0x03,
+0x34,0x42,0x00,0x3f,0x90,0x44,0x00,0x00,0x24,0x03,0x00,0x01,0x10,0x64,0x00,0x08,
+0x00,0x00,0x00,0x00,0x27,0x84,0x89,0x18,0x0c,0x00,0x24,0x2c,0x00,0x00,0x00,0x00,
+0x24,0x05,0x00,0x05,0xaf,0x85,0x84,0x40,0x08,0x00,0x15,0xb7,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x14,0x8c,0x44,0x00,0x00,0x0c,0x00,0x24,0x49,
+0x00,0x00,0x00,0x00,0x08,0x00,0x17,0x65,0x24,0x05,0x00,0x05,0x8f,0x82,0x89,0x4c,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0d,0x00,0x00,0x00,0x00,0x8f,0x84,0xb4,0x40,
+0xaf,0x80,0x89,0x4c,0x94,0x85,0x00,0x14,0x0c,0x00,0x1b,0x66,0x00,0x00,0x00,0x00,
+0x93,0x82,0x8b,0x71,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x02,0x10,0x40,0x00,0x03,
+0x00,0x00,0x00,0x00,0x0c,0x00,0x01,0x57,0x00,0x00,0x20,0x21,0x8f,0x84,0xb4,0x40,
+0x0c,0x00,0x18,0xd0,0x00,0x00,0x00,0x00,0x08,0x00,0x17,0x21,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xff,0x90,0x27,0xbd,0xff,0xe8,0x00,0x80,0x18,0x21,0x34,0x42,0x00,0x01,
+0x27,0x84,0x89,0x18,0x10,0x62,0x00,0x05,0xaf,0xbf,0x00,0x10,0x8f,0xbf,0x00,0x10,
+0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x06,0xe5,
+0x00,0x00,0x00,0x00,0x27,0x84,0x86,0x58,0x0c,0x00,0x18,0x1f,0x00,0x00,0x00,0x00,
+0x27,0x84,0x84,0x40,0x0c,0x00,0x13,0xd9,0x00,0x00,0x00,0x00,0x08,0x00,0x17,0x8b,
+0x00,0x00,0x00,0x00,0x8f,0x82,0x89,0x58,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x05,
+0x00,0x00,0x18,0x21,0x8f,0x82,0x84,0x48,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x02,
+0x00,0x00,0x00,0x00,0x24,0x03,0x00,0x01,0x03,0xe0,0x00,0x08,0x00,0x60,0x10,0x21,
+0x27,0xbd,0xff,0xe0,0x3c,0x06,0xb0,0x03,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,
+0x34,0xc6,0x00,0x5f,0xaf,0xbf,0x00,0x18,0x90,0xc3,0x00,0x00,0x3c,0x07,0xb0,0x03,
+0x34,0xe7,0x00,0x5d,0x34,0x63,0x00,0x01,0x3c,0x09,0xb0,0x03,0x24,0x02,0x00,0x01,
+0xa0,0xc3,0x00,0x00,0x00,0x80,0x80,0x21,0xa0,0xe2,0x00,0x00,0x00,0xa0,0x88,0x21,
+0x35,0x29,0x00,0x5e,0x00,0xe0,0x40,0x21,0x24,0x04,0x00,0x01,0x91,0x22,0x00,0x00,
+0x91,0x03,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x83,0x00,0x03,0x30,0x42,0x00,0x01,
+0x14,0x40,0xff,0xfa,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x04,0x12,0x02,0x00,0x2c,
+0x24,0x05,0x0f,0x00,0x24,0x02,0x00,0x06,0x12,0x02,0x00,0x08,0x24,0x05,0x00,0x0f,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x02,0x00,0xa0,0x50,0x00,0x00,0x8f,0xbf,0x00,0x18,
+0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x24,0x04,0x0c,0x04,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x0d,0x04,0x24,0x05,0x00,0x0f,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x08,0x80,0x24,0x05,0x1e,0x00,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x08,0x8c,0x24,0x05,0x0f,0x00,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x08,0x24,0x3c,0x05,0x00,0x30,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x24,0x04,0x08,0x2c,0x3c,0x05,0x00,0x30,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x24,0x04,0x08,0x34,0x3c,0x05,0x00,0x30,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x24,0x04,0x08,0x3c,0x3c,0x05,0x00,0x30,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x08,0x00,0x17,0xc5,0x3c,0x02,0xb0,0x03,
+0x24,0x04,0x08,0x8c,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x08,0x80,
+0x24,0x05,0x1e,0x00,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x0c,0x04,
+0x24,0x05,0x00,0x0f,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x0d,0x04,
+0x24,0x05,0x00,0x0f,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x08,0x24,
+0x3c,0x05,0x00,0x30,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x24,0x04,0x08,0x2c,
+0x3c,0x05,0x00,0x30,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x24,0x04,0x08,0x34,
+0x3c,0x05,0x00,0x30,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x3c,0x05,0x00,0x30,
+0x24,0x06,0x00,0x03,0x0c,0x00,0x13,0x5f,0x24,0x04,0x08,0x3c,0x02,0x20,0x20,0x21,
+0x24,0x05,0x00,0x14,0x0c,0x00,0x13,0xa4,0x24,0x06,0x01,0x07,0x08,0x00,0x17,0xc5,
+0x3c,0x02,0xb0,0x03,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x73,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x02,0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00,
+0xa3,0x80,0x81,0x58,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0xa3,0x82,0x81,0x58,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0x00,0x80,0x70,0x21,0x34,0x63,0x00,0x20,0x24,0x42,0x60,0x7c,
+0x3c,0x04,0xb0,0x03,0xac,0x62,0x00,0x00,0x34,0x84,0x00,0x30,0xad,0xc0,0x02,0xb8,
+0x8c,0x83,0x00,0x00,0x24,0x02,0x00,0xff,0xa5,0xc0,0x00,0x0a,0x00,0x00,0x30,0x21,
+0xa7,0x82,0x8f,0xf0,0x27,0x88,0x90,0x00,0xa5,0xc3,0x00,0x08,0x3c,0x07,0xb0,0x08,
+0x30,0xc2,0xff,0xff,0x00,0x02,0x20,0xc0,0x24,0xc3,0x00,0x01,0x00,0x82,0x10,0x21,
+0x00,0x60,0x30,0x21,0x00,0x02,0x10,0x80,0x30,0x63,0xff,0xff,0x00,0x48,0x10,0x21,
+0x00,0x87,0x20,0x21,0x28,0xc5,0x00,0xff,0xac,0x83,0x00,0x00,0x14,0xa0,0xff,0xf4,
+0xa4,0x43,0x00,0x00,0x3c,0x02,0xb0,0x08,0x34,0x03,0xff,0xff,0x25,0xc4,0x00,0x0c,
+0x24,0x0a,0x00,0x02,0x34,0x42,0x07,0xf8,0x3c,0x06,0xb0,0x03,0xa7,0x83,0xb3,0xdc,
+0xac,0x43,0x00,0x00,0xaf,0x84,0xb4,0x00,0x34,0xc6,0x00,0x64,0xa0,0x8a,0x00,0x18,
+0x94,0xc5,0x00,0x00,0x8f,0x82,0xb4,0x00,0x25,0xc4,0x00,0x30,0x24,0x08,0x00,0x03,
+0x3c,0x03,0xb0,0x03,0xa0,0x45,0x00,0x21,0x34,0x63,0x00,0x66,0xaf,0x84,0xb4,0x04,
+0xa0,0x88,0x00,0x18,0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x04,0x25,0xc4,0x00,0x54,
+0x25,0xc7,0x00,0x78,0xa0,0x45,0x00,0x21,0xaf,0x84,0xb4,0x08,0xa0,0x88,0x00,0x18,
+0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x08,0x25,0xc8,0x00,0x9c,0x24,0x09,0x00,0x01,
+0xa0,0x45,0x00,0x21,0xaf,0x87,0xb4,0x0c,0xa0,0xea,0x00,0x18,0x94,0xc4,0x00,0x00,
+0x8f,0x82,0xb4,0x0c,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x62,0xa0,0x44,0x00,0x21,
+0xaf,0x88,0xb4,0x10,0xa1,0x09,0x00,0x18,0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x10,
+0x25,0xc4,0x00,0xc0,0x3c,0x06,0xb0,0x03,0xa0,0x45,0x00,0x21,0xaf,0x84,0xb4,0x14,
+0xa0,0x89,0x00,0x18,0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x14,0x25,0xc4,0x00,0xe4,
+0x34,0xc6,0x00,0x60,0xa0,0x45,0x00,0x21,0xaf,0x84,0xb4,0x18,0xa0,0x80,0x00,0x18,
+0x94,0xc5,0x00,0x00,0x8f,0x82,0xb4,0x18,0x25,0xc3,0x01,0x08,0x25,0xc7,0x01,0x2c,
+0xa0,0x45,0x00,0x21,0xaf,0x83,0xb4,0x1c,0xa0,0x60,0x00,0x18,0x94,0xc8,0x00,0x00,
+0x8f,0x82,0xb4,0x1c,0x25,0xc4,0x01,0x50,0x25,0xc5,0x01,0x74,0xa0,0x48,0x00,0x21,
+0x25,0xc6,0x01,0x98,0x25,0xc9,0x01,0xbc,0x25,0xca,0x01,0xe0,0x25,0xcb,0x02,0x04,
+0x25,0xcc,0x02,0x28,0x25,0xcd,0x02,0x4c,0x24,0x02,0x00,0x10,0x3c,0x03,0xb0,0x03,
+0xaf,0x87,0xb4,0x20,0x34,0x63,0x00,0x38,0xa0,0xe0,0x00,0x18,0xaf,0x84,0xb4,0x24,
+0xa0,0x80,0x00,0x18,0xaf,0x85,0xb4,0x28,0xa0,0xa0,0x00,0x18,0xaf,0x86,0xb4,0x2c,
+0xa0,0xc0,0x00,0x18,0xaf,0x89,0xb4,0x30,0xa1,0x20,0x00,0x18,0xaf,0x8a,0xb4,0x34,
+0xa1,0x40,0x00,0x18,0xaf,0x8b,0xb4,0x38,0xa1,0x60,0x00,0x18,0xaf,0x8c,0xb4,0x3c,
+0xa1,0x80,0x00,0x18,0xaf,0x8d,0xb4,0x40,0xa1,0xa2,0x00,0x18,0x94,0x64,0x00,0x00,
+0x8f,0x82,0xb4,0x40,0x25,0xc5,0x02,0x70,0x3c,0x03,0xb0,0x03,0xa0,0x44,0x00,0x21,
+0x24,0x02,0x00,0x11,0xaf,0x85,0xb4,0x44,0x34,0x63,0x00,0x6e,0xa0,0xa2,0x00,0x18,
+0x94,0x64,0x00,0x00,0x8f,0x82,0xb4,0x44,0x25,0xc5,0x02,0x94,0x3c,0x03,0xb0,0x03,
+0xa0,0x44,0x00,0x21,0x24,0x02,0x00,0x12,0xaf,0x85,0xb4,0x48,0x34,0x63,0x00,0x6c,
+0xa0,0xa2,0x00,0x18,0x94,0x64,0x00,0x00,0x8f,0x82,0xb4,0x48,0x24,0x05,0xff,0xff,
+0x24,0x07,0x00,0x01,0xa0,0x44,0x00,0x21,0x24,0x06,0x00,0x12,0x27,0x84,0xb4,0x00,
+0x8c,0x82,0x00,0x00,0x24,0xc6,0xff,0xff,0xa0,0x40,0x00,0x04,0x8c,0x83,0x00,0x00,
+0xa4,0x45,0x00,0x00,0xa4,0x45,0x00,0x02,0xa0,0x60,0x00,0x0a,0x8c,0x82,0x00,0x00,
+0xa4,0x65,0x00,0x06,0xa4,0x65,0x00,0x08,0xa0,0x40,0x00,0x10,0x8c,0x83,0x00,0x00,
+0xa4,0x45,0x00,0x0c,0xa4,0x45,0x00,0x0e,0xa0,0x60,0x00,0x12,0x8c,0x82,0x00,0x00,
+0x00,0x00,0x00,0x00,0xa0,0x40,0x00,0x16,0x8c,0x83,0x00,0x00,0xa4,0x45,0x00,0x14,
+0xa0,0x67,0x00,0x17,0x8c,0x82,0x00,0x00,0x24,0x84,0x00,0x04,0xa0,0x40,0x00,0x20,
+0x04,0xc1,0xff,0xe7,0xac,0x40,0x00,0x1c,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x00,0x34,0x42,0x00,0x20,0x24,0x63,0x63,0x40,
+0xac,0x43,0x00,0x00,0x90,0x82,0x00,0x10,0x00,0x80,0x60,0x21,0x10,0x40,0x00,0x56,
+0x00,0x00,0x70,0x21,0x97,0x82,0x8f,0xf0,0x94,0x8a,0x00,0x0c,0x27,0x87,0x90,0x00,
+0x00,0x02,0x40,0xc0,0x01,0x02,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x47,0x10,0x21,
+0x90,0x8b,0x00,0x18,0xa4,0x4a,0x00,0x00,0x94,0x83,0x00,0x0e,0x39,0x64,0x00,0x10,
+0x2c,0x84,0x00,0x01,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x34,0x85,0x00,0x02,
+0x39,0x63,0x00,0x11,0x00,0x83,0x28,0x0b,0x34,0xa3,0x00,0x08,0x39,0x64,0x00,0x12,
+0x00,0x02,0x10,0x80,0x00,0xa4,0x18,0x0b,0x00,0x47,0x10,0x21,0x94,0x49,0x00,0x04,
+0x34,0x64,0x00,0x20,0x00,0x6b,0x20,0x0b,0x34,0x83,0x00,0x40,0x39,0x62,0x00,0x01,
+0x00,0x82,0x18,0x0b,0x00,0x09,0x30,0xc0,0x34,0x64,0x00,0x80,0x00,0xc9,0x28,0x21,
+0x39,0x62,0x00,0x02,0x00,0x60,0x68,0x21,0x00,0x82,0x68,0x0a,0x00,0x05,0x28,0x80,
+0x3c,0x02,0xb0,0x08,0x00,0xa7,0x28,0x21,0x00,0xc2,0x30,0x21,0x01,0x02,0x40,0x21,
+0x34,0x03,0xff,0xff,0x35,0xa4,0x01,0x00,0x39,0x62,0x00,0x03,0x2d,0x67,0x00,0x13,
+0xad,0x0a,0x00,0x00,0xa4,0xa3,0x00,0x00,0xac,0xc3,0x00,0x00,0xa7,0x89,0x8f,0xf0,
+0x10,0xe0,0x00,0x0f,0x00,0x82,0x68,0x0a,0x3c,0x03,0x80,0x01,0x00,0x0b,0x10,0x80,
+0x24,0x63,0x02,0x44,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x60,
+0x94,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x14,0x00,0x00,0x02,0x74,0x03,
+0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x3a,0x94,0x44,0x00,0x00,0x93,0x83,0x8f,0xe4,
+0x91,0x82,0x00,0x21,0x01,0xc4,0x20,0x21,0x91,0x85,0x00,0x10,0x00,0x04,0x24,0x00,
+0x00,0x62,0x18,0x21,0x00,0x04,0x74,0x03,0x00,0x6e,0x18,0x23,0x00,0x65,0x10,0x2a,
+0x00,0xa2,0x18,0x0a,0x00,0x0d,0x24,0x00,0x3c,0x02,0xb0,0x06,0x24,0x05,0xff,0xff,
+0x00,0x64,0x18,0x25,0x34,0x42,0x80,0x20,0xac,0x43,0x00,0x00,0xa5,0x85,0x00,0x0e,
+0xa1,0x80,0x00,0x10,0xa5,0x85,0x00,0x0c,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x62,0x3c,0x03,0xb0,0x03,
+0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x64,0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,
+0x34,0x63,0x00,0x66,0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x38,
+0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x6e,0x3c,0x03,0xb0,0x03,
+0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x6c,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,
+0x34,0x63,0x00,0x20,0x24,0x42,0x65,0x08,0x00,0x05,0x28,0x40,0xac,0x62,0x00,0x00,
+0x00,0xa6,0x28,0x21,0x2c,0xe2,0x00,0x10,0x14,0x80,0x00,0x06,0x00,0x00,0x18,0x21,
+0x10,0x40,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0xe0,0x18,0x21,0x03,0xe0,0x00,0x08,
+0x00,0x60,0x10,0x21,0x24,0x02,0x00,0x20,0x10,0xe2,0x00,0x06,0x2c,0xe4,0x00,0x10,
+0x24,0xa2,0x00,0x01,0x10,0x80,0xff,0xf9,0x00,0x02,0x11,0x00,0x08,0x00,0x19,0x4f,
+0x00,0x47,0x18,0x21,0x08,0x00,0x19,0x4f,0x24,0xa3,0x00,0x50,0x27,0xbd,0xff,0xc8,
+0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x30,
+0xaf,0xb7,0x00,0x2c,0xaf,0xb6,0x00,0x28,0xaf,0xb5,0x00,0x24,0xaf,0xb4,0x00,0x20,
+0xaf,0xb0,0x00,0x10,0x00,0x80,0x88,0x21,0x84,0x84,0x00,0x08,0x3c,0x05,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0x34,0xa5,0x00,0x20,0x24,0x42,0x65,0x6c,0x3c,0x03,0xb0,0x06,
+0x00,0x04,0x20,0x80,0xac,0xa2,0x00,0x00,0x00,0x83,0x20,0x21,0x3c,0x06,0xb0,0x06,
+0x8c,0x82,0x00,0x00,0x34,0xc6,0x80,0x24,0x8c,0x88,0x00,0x00,0x8c,0xc4,0x00,0x00,
+0x96,0x25,0x00,0x08,0x30,0x52,0xff,0xff,0x00,0x08,0x44,0x02,0x34,0x84,0x01,0x00,
+0x3c,0x02,0xb0,0x00,0x00,0x08,0x18,0xc0,0x00,0x12,0x3a,0x00,0xac,0xc4,0x00,0x00,
+0x00,0xe2,0x38,0x21,0xae,0x32,0x02,0xb8,0x00,0x68,0x18,0x21,0x24,0xa5,0x00,0x02,
+0x8c,0xf6,0x00,0x00,0x30,0xa5,0x01,0xff,0x8c,0xf4,0x00,0x04,0x27,0x86,0x90,0x00,
+0x00,0x03,0x18,0x80,0x00,0x12,0x98,0xc0,0xa6,0x25,0x00,0x08,0x00,0x66,0x18,0x21,
+0x02,0x72,0x10,0x21,0x94,0x65,0x00,0x00,0x00,0x02,0x48,0x80,0x01,0x26,0x30,0x21,
+0x24,0x02,0xff,0xff,0x00,0x14,0x1a,0x02,0x27,0x84,0x90,0x10,0xa4,0xc2,0x00,0x02,
+0x30,0x63,0x00,0x1f,0x24,0x02,0x00,0x10,0x01,0x24,0x20,0x21,0xa4,0xc8,0x00,0x04,
+0x8c,0xf0,0x00,0x08,0xa6,0x23,0x00,0x06,0xa6,0x25,0x00,0x0a,0xa0,0x82,0x00,0x06,
+0x86,0x25,0x00,0x06,0x27,0x82,0x90,0x04,0x01,0x22,0x10,0x21,0x24,0x03,0x00,0x13,
+0x10,0xa3,0x00,0xee,0xac,0x47,0x00,0x18,0x3c,0x03,0xb0,0x03,0x34,0x63,0x01,0x00,
+0xa6,0x20,0x00,0x02,0x3c,0x02,0xb0,0x03,0x90,0x64,0x00,0x00,0x34,0x42,0x01,0x08,
+0x8c,0x45,0x00,0x00,0x00,0x10,0x1b,0xc2,0x00,0x04,0x20,0x82,0x30,0x63,0x00,0x01,
+0xac,0xc5,0x00,0x08,0x10,0x60,0x00,0xc7,0x30,0x97,0x00,0x01,0x00,0x10,0x16,0x82,
+0x30,0x46,0x00,0x01,0x00,0x10,0x12,0x02,0x00,0x10,0x19,0xc2,0x00,0x10,0x26,0x02,
+0x00,0x10,0x2e,0x42,0x30,0x48,0x00,0x7f,0x24,0x02,0x00,0x01,0x30,0x75,0x00,0x01,
+0x30,0x84,0x00,0x01,0x10,0xc2,0x00,0xb3,0x30,0xa3,0x00,0x01,0x00,0x60,0x28,0x21,
+0x0c,0x00,0x19,0x42,0x01,0x00,0x38,0x21,0x02,0x72,0x18,0x21,0x00,0x03,0x18,0x80,
+0x2c,0x46,0x00,0x54,0x27,0x85,0x90,0x10,0x27,0x84,0x90,0x08,0x00,0x06,0x10,0x0a,
+0x00,0x65,0x28,0x21,0x26,0xa6,0x00,0x02,0x00,0x64,0x18,0x21,0xa0,0xa2,0x00,0x02,
+0xa0,0x66,0x00,0x06,0xa0,0x62,0x00,0x07,0xa0,0xa2,0x00,0x01,0x02,0x72,0x28,0x21,
+0x00,0x05,0x28,0x80,0x27,0x82,0x90,0x04,0x00,0xa2,0x58,0x21,0x8d,0x64,0x00,0x18,
+0x00,0x10,0x15,0xc2,0x30,0x42,0x00,0x01,0x8c,0x83,0x00,0x0c,0x27,0x84,0x90,0x20,
+0x00,0xa4,0x48,0x21,0xa6,0x22,0x00,0x00,0xa6,0x36,0x00,0x04,0x8d,0x26,0x00,0x00,
+0x00,0x03,0x19,0x42,0x3c,0x02,0xff,0xef,0x34,0x42,0xff,0xff,0x30,0x63,0x00,0x01,
+0x00,0xc2,0x40,0x24,0x00,0x03,0x1d,0x00,0x01,0x03,0x40,0x25,0x00,0x08,0x15,0x02,
+0x00,0x14,0x19,0x82,0x00,0x14,0x25,0x82,0x00,0x10,0x34,0x42,0x00,0x10,0x3c,0x82,
+0x00,0x10,0x2c,0x02,0x30,0x42,0x00,0x01,0x30,0xcd,0x00,0x01,0x30,0x6c,0x00,0x01,
+0x30,0xe6,0x00,0x01,0x30,0x8a,0x00,0x03,0x32,0x94,0x00,0x07,0x30,0xa5,0x00,0x01,
+0xad,0x28,0x00,0x00,0x10,0x40,0x00,0x0b,0x32,0x07,0x00,0x7f,0x8d,0x64,0x00,0x18,
+0x3c,0x03,0xff,0xf0,0x34,0x63,0xff,0xff,0x8c,0x82,0x00,0x0c,0x01,0x03,0x18,0x24,
+0x00,0x02,0x13,0x82,0x30,0x42,0x00,0x0f,0x00,0x02,0x14,0x00,0x00,0x62,0x18,0x25,
+0xad,0x23,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xc2,0x00,0x6a,0x00,0x00,0x00,0x00,
+0x15,0x80,0x00,0x03,0x00,0x00,0x00,0x00,0x15,0x40,0x00,0x5b,0x24,0x02,0x00,0x01,
+0x96,0x22,0x00,0x04,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x04,0xa6,0x22,0x00,0x04,
+0x00,0xa0,0x20,0x21,0x0c,0x00,0x19,0x42,0x01,0xa0,0x28,0x21,0x02,0x72,0x18,0x21,
+0x00,0x03,0x40,0x80,0x2c,0x45,0x00,0x54,0x27,0x84,0x90,0x10,0x01,0x04,0x20,0x21,
+0x00,0x05,0x10,0x0a,0xa0,0x82,0x00,0x00,0xa0,0x80,0x00,0x04,0xa0,0x80,0x00,0x05,
+0x96,0x23,0x00,0x04,0x27,0x82,0x90,0x00,0x01,0x02,0x10,0x21,0xa4,0x43,0x00,0x06,
+0x27,0x82,0x90,0x04,0x92,0x26,0x00,0x01,0x01,0x02,0x10,0x21,0x8c,0x45,0x00,0x18,
+0x27,0x83,0x90,0x20,0x01,0x03,0x18,0x21,0xa0,0x60,0x00,0x00,0xa0,0x86,0x00,0x07,
+0x94,0xa2,0x00,0x10,0x24,0x03,0x00,0x04,0x30,0x42,0x00,0x0f,0x10,0x43,0x00,0x36,
+0x24,0xa5,0x00,0x10,0x94,0xa3,0x00,0x16,0x27,0x87,0x90,0x18,0x01,0x07,0x10,0x21,
+0xa4,0x43,0x00,0x02,0x94,0xa2,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,
+0x14,0x40,0x00,0x24,0x02,0x72,0x20,0x21,0x94,0xa2,0x00,0x00,0x24,0x03,0x00,0xa4,
+0x30,0x42,0x00,0xff,0x10,0x43,0x00,0x1f,0x00,0x00,0x00,0x00,0x94,0xa2,0x00,0x00,
+0x24,0x03,0x00,0x88,0x30,0x42,0x00,0x88,0x10,0x43,0x00,0x14,0x02,0x72,0x18,0x21,
+0x27,0x84,0x90,0x20,0x00,0x03,0x18,0x80,0x00,0x64,0x18,0x21,0x8c,0x62,0x00,0x00,
+0x3c,0x04,0x00,0x80,0x00,0x44,0x10,0x25,0xac,0x62,0x00,0x00,0x02,0x72,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x47,0x10,0x21,0xa0,0x54,0x00,0x00,0x8f,0xbf,0x00,0x30,
+0x7b,0xb6,0x01,0x7c,0x7b,0xb4,0x01,0x3c,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,
+0x24,0x02,0x00,0x01,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,0x94,0xa2,0x00,0x18,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x60,0x10,0x40,0xff,0xe9,0x02,0x72,0x18,0x21,
+0x02,0x72,0x20,0x21,0x27,0x82,0x90,0x20,0x00,0x04,0x20,0x80,0x00,0x82,0x20,0x21,
+0x8c,0x83,0x00,0x00,0x3c,0x02,0xff,0x7f,0x34,0x42,0xff,0xff,0x00,0x62,0x18,0x24,
+0x08,0x00,0x1a,0x37,0xac,0x83,0x00,0x00,0x27,0x87,0x90,0x18,0x01,0x07,0x10,0x21,
+0x08,0x00,0x1a,0x21,0xa4,0x40,0x00,0x02,0x11,0x42,0x00,0x07,0x00,0x00,0x00,0x00,
+0x2d,0x42,0x00,0x02,0x14,0x40,0xff,0xa7,0x00,0xa0,0x20,0x21,0x96,0x22,0x00,0x04,
+0x08,0x00,0x19,0xff,0x24,0x42,0x00,0x0c,0x96,0x22,0x00,0x04,0x08,0x00,0x19,0xff,
+0x24,0x42,0x00,0x08,0x16,0xe6,0xff,0x96,0x3c,0x02,0xff,0xfb,0x8d,0x63,0x00,0x18,
+0x34,0x42,0xff,0xff,0x02,0x02,0x10,0x24,0xac,0x62,0x00,0x08,0x08,0x00,0x19,0xf8,
+0x00,0x00,0x30,0x21,0x16,0xe6,0xff,0x4e,0x00,0x60,0x28,0x21,0x3c,0x02,0xfb,0xff,
+0x34,0x42,0xff,0xff,0x02,0x02,0x10,0x24,0xac,0xe2,0x00,0x08,0x08,0x00,0x19,0xb7,
+0x00,0x00,0x30,0x21,0x93,0x87,0xbb,0x14,0x00,0x10,0x1e,0x42,0x00,0x10,0x26,0x82,
+0x27,0x82,0x90,0x08,0x2c,0xe5,0x00,0x0c,0x01,0x22,0x48,0x21,0x30,0x63,0x00,0x01,
+0x30,0x86,0x00,0x01,0x14,0xa0,0x00,0x06,0x00,0xe0,0x40,0x21,0x00,0x03,0x10,0x40,
+0x00,0x46,0x10,0x21,0x00,0x02,0x11,0x00,0x00,0xe2,0x10,0x21,0x24,0x48,0x00,0x04,
+0x02,0x72,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x84,0x90,0x10,0x27,0x83,0x90,0x08,
+0x00,0x44,0x20,0x21,0x00,0x43,0x10,0x21,0xa1,0x28,0x00,0x07,0xa0,0x40,0x00,0x06,
+0xa0,0x80,0x00,0x02,0x08,0x00,0x19,0xc7,0xa0,0x80,0x00,0x01,0x24,0x02,0x00,0x01,
+0xa6,0x22,0x00,0x02,0x0c,0x00,0x01,0xc2,0x00,0xe0,0x20,0x21,0x08,0x00,0x1a,0x3b,
+0x00,0x00,0x00,0x00,0x30,0xa7,0xff,0xff,0x00,0x07,0x18,0xc0,0x00,0x67,0x18,0x21,
+0x3c,0x06,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x6a,0x44,0x27,0x85,0x90,0x10,
+0x00,0x03,0x18,0x80,0x34,0xc6,0x00,0x20,0x00,0x65,0x18,0x21,0xac,0xc2,0x00,0x00,
+0x80,0x62,0x00,0x07,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x29,0x00,0x80,0x28,0x21,
+0x90,0x82,0x00,0x16,0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x02,0x30,0x43,0x00,0x01,
+0x14,0x60,0x00,0x02,0xa0,0x82,0x00,0x16,0xa0,0x80,0x00,0x17,0x90,0xa2,0x00,0x04,
+0x3c,0x03,0xb0,0x03,0x27,0x86,0x90,0x00,0x14,0x40,0x00,0x06,0x34,0x63,0x00,0x20,
+0x24,0x02,0x00,0x01,0xa0,0xa2,0x00,0x04,0xa4,0xa7,0x00,0x02,0x03,0xe0,0x00,0x08,
+0xa4,0xa7,0x00,0x00,0x94,0xa4,0x00,0x02,0x3c,0x02,0x80,0x01,0x24,0x42,0x82,0x6c,
+0xac,0x62,0x00,0x00,0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80,
+0x00,0x66,0x18,0x21,0x94,0x62,0x00,0x04,0xa4,0x67,0x00,0x02,0x3c,0x03,0xb0,0x08,
+0x00,0x02,0x20,0xc0,0x00,0x82,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x46,0x10,0x21,
+0x00,0x83,0x20,0x21,0xa4,0x47,0x00,0x00,0xac,0x87,0x00,0x00,0x90,0xa2,0x00,0x04,
+0xa4,0xa7,0x00,0x02,0x24,0x42,0x00,0x01,0x03,0xe0,0x00,0x08,0xa0,0xa2,0x00,0x04,
+0x90,0x82,0x00,0x16,0x24,0x85,0x00,0x06,0x34,0x42,0x00,0x01,0x30,0x43,0x00,0x02,
+0x14,0x60,0xff,0xda,0xa0,0x82,0x00,0x16,0x24,0x02,0x00,0x01,0x08,0x00,0x1a,0xa7,
+0xa0,0x82,0x00,0x17,0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,0x00,0x80,0x38,0x21,
+0x84,0x84,0x00,0x02,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x3c,0x0a,0xb0,0x06,
+0x34,0x63,0x00,0x20,0x24,0x42,0x6b,0x44,0x3c,0x0b,0xb0,0x08,0x27,0x89,0x90,0x00,
+0x34,0x0c,0xff,0xff,0x35,0x4a,0x80,0x20,0x10,0x80,0x00,0x30,0xac,0x62,0x00,0x00,
+0x97,0x82,0x8f,0xf0,0x94,0xe6,0x02,0xba,0x00,0x02,0x18,0xc0,0x00,0x6b,0x28,0x21,
+0xac,0xa6,0x00,0x00,0x8c,0xe4,0x02,0xb8,0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x80,
+0x00,0x04,0x10,0xc0,0x00,0x44,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x49,0x10,0x21,
+0x94,0x48,0x00,0x04,0x00,0x69,0x18,0x21,0xa4,0x66,0x00,0x00,0x00,0x08,0x28,0xc0,
+0x00,0xab,0x10,0x21,0xac,0x4c,0x00,0x00,0x8c,0xe4,0x02,0xb8,0x27,0x82,0x90,0x04,
+0x00,0xa8,0x28,0x21,0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80,
+0x00,0x62,0x10,0x21,0x8c,0x46,0x00,0x18,0x27,0x84,0x90,0x10,0x00,0x64,0x18,0x21,
+0x8c,0xc2,0x00,0x00,0x80,0x67,0x00,0x06,0x00,0x05,0x28,0x80,0x30,0x42,0xff,0xff,
+0x00,0x47,0x10,0x21,0x30,0x43,0x00,0xff,0x00,0x03,0x18,0x2b,0x00,0x02,0x12,0x02,
+0x00,0x43,0x10,0x21,0x3c,0x04,0x00,0x04,0x00,0xa9,0x28,0x21,0x00,0x44,0x10,0x25,
+0xa4,0xac,0x00,0x00,0xad,0x42,0x00,0x00,0xa7,0x88,0x8f,0xf0,0x8f,0xbf,0x00,0x10,
+0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x84,0xe3,0x00,0x06,
+0x27,0x82,0xb4,0x00,0x94,0xe5,0x02,0xba,0x00,0x03,0x18,0x80,0x00,0x62,0x18,0x21,
+0x8c,0x64,0x00,0x00,0x0c,0x00,0x1a,0x91,0x00,0x00,0x00,0x00,0x08,0x00,0x1b,0x0b,
+0x00,0x00,0x00,0x00,0x94,0x88,0x00,0x00,0x00,0x80,0x58,0x21,0x27,0x8a,0x90,0x00,
+0x00,0x08,0x18,0xc0,0x00,0x68,0x18,0x21,0x3c,0x04,0xb0,0x03,0x00,0x03,0x18,0x80,
+0x3c,0x02,0x80,0x00,0x00,0x6a,0x18,0x21,0x34,0x84,0x00,0x20,0x24,0x42,0x6c,0x64,
+0x30,0xa5,0xff,0xff,0xac,0x82,0x00,0x00,0x94,0x67,0x00,0x02,0x11,0x05,0x00,0x35,
+0x24,0x04,0x00,0x01,0x91,0x66,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x86,0x10,0x2a,
+0x10,0x40,0x00,0x10,0x00,0xc0,0x48,0x21,0x3c,0x0d,0xb0,0x03,0x01,0x40,0x60,0x21,
+0x35,0xad,0x00,0x20,0x10,0xe5,0x00,0x0d,0x24,0x84,0x00,0x01,0x00,0x07,0x10,0xc0,
+0x00,0x47,0x10,0x21,0x00,0x02,0x10,0x80,0x01,0x20,0x30,0x21,0x00,0x4a,0x10,0x21,
+0x00,0x86,0x18,0x2a,0x00,0xe0,0x40,0x21,0x94,0x47,0x00,0x02,0x14,0x60,0xff,0xf5,
+0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x10,0x21,0x00,0x08,0x20,0xc0,
+0x00,0x88,0x20,0x21,0x24,0xc2,0xff,0xff,0x00,0x04,0x20,0x80,0xa1,0x62,0x00,0x04,
+0x00,0x8c,0x20,0x21,0x94,0x83,0x00,0x04,0x00,0x07,0x10,0xc0,0x00,0x47,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x4c,0x10,0x21,0x00,0x03,0x28,0xc0,0x94,0x46,0x00,0x02,
+0x00,0xa3,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x6c,0x18,0x21,0xa4,0x66,0x00,0x00,
+0xa4,0x86,0x00,0x02,0x95,0x64,0x00,0x02,0x3c,0x03,0xb0,0x08,0x3c,0x02,0x80,0x01,
+0x00,0xa3,0x28,0x21,0x24,0x42,0x82,0x6c,0xad,0xa2,0x00,0x00,0x10,0x87,0x00,0x03,
+0xac,0xa6,0x00,0x00,0x03,0xe0,0x00,0x08,0x24,0x02,0x00,0x01,0x08,0x00,0x1b,0x59,
+0xa5,0x68,0x00,0x02,0x91,0x62,0x00,0x04,0xa5,0x67,0x00,0x00,0x24,0x42,0xff,0xff,
+0x30,0x43,0x00,0xff,0x14,0x60,0xff,0xf7,0xa1,0x62,0x00,0x04,0x24,0x02,0xff,0xff,
+0x08,0x00,0x1b,0x59,0xa5,0x62,0x00,0x02,0x00,0x05,0x40,0xc0,0x01,0x05,0x30,0x21,
+0x27,0xbd,0xff,0xd8,0x00,0x06,0x30,0x80,0x27,0x82,0x90,0x04,0xaf,0xb2,0x00,0x18,
+0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb0,0x00,0x10,
+0x00,0xc2,0x10,0x21,0x8c,0x47,0x00,0x18,0x00,0xa0,0x90,0x21,0x3c,0x02,0x80,0x00,
+0x3c,0x05,0xb0,0x03,0x34,0xa5,0x00,0x20,0x24,0x42,0x6d,0x98,0xac,0xa2,0x00,0x00,
+0x27,0x83,0x90,0x10,0x00,0xc3,0x30,0x21,0x8c,0xe2,0x00,0x00,0x80,0xc5,0x00,0x06,
+0x00,0x80,0x88,0x21,0x30,0x42,0xff,0xff,0x00,0x45,0x10,0x21,0x30,0x43,0x00,0xff,
+0x10,0x60,0x00,0x02,0x00,0x02,0x12,0x02,0x24,0x42,0x00,0x01,0x30,0x53,0x00,0xff,
+0x01,0x12,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,
+0x80,0x44,0x00,0x07,0x00,0x00,0x00,0x00,0x10,0x80,0x00,0x4b,0x26,0x24,0x00,0x06,
+0x32,0x50,0xff,0xff,0x02,0x20,0x20,0x21,0x0c,0x00,0x1b,0x19,0x02,0x00,0x28,0x21,
+0x92,0x22,0x00,0x10,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x2e,0x3c,0x03,0xb0,0x08,
+0x3c,0x09,0x80,0x01,0x27,0x88,0x90,0x00,0xa6,0x32,0x00,0x0c,0x00,0x10,0x20,0xc0,
+0x00,0x90,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x88,0x20,0x21,0x94,0x82,0x00,0x04,
+0x3c,0x03,0xb0,0x08,0x3c,0x07,0xb0,0x03,0x00,0x02,0x28,0xc0,0x00,0xa2,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0x48,0x10,0x21,0x00,0xa3,0x28,0x21,0x25,0x26,0x82,0x6c,
+0x34,0x03,0xff,0xff,0x34,0xe7,0x00,0x20,0xac,0xe6,0x00,0x00,0xa4,0x83,0x00,0x02,
+0xa4,0x43,0x00,0x00,0xac,0xa3,0x00,0x00,0x92,0x22,0x00,0x10,0x92,0x23,0x00,0x0a,
+0xa6,0x32,0x00,0x0e,0x02,0x62,0x10,0x21,0x14,0x60,0x00,0x05,0xa2,0x22,0x00,0x10,
+0x92,0x22,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfe,0xa2,0x22,0x00,0x16,
+0x92,0x22,0x00,0x04,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x05,0x00,0x00,0x00,0x00,
+0x92,0x22,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfd,0xa2,0x22,0x00,0x16,
+0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x28,0x96,0x22,0x00,0x0e,0x27,0x88,0x90,0x00,0x00,0x02,0x20,0xc0,
+0x00,0x82,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x88,0x20,0x21,0x94,0x82,0x00,0x04,
+0x3c,0x06,0xb0,0x03,0x3c,0x09,0x80,0x01,0x00,0x02,0x28,0xc0,0x00,0xa2,0x10,0x21,
+0x00,0x02,0x10,0x80,0x00,0xa3,0x28,0x21,0x00,0x48,0x10,0x21,0x34,0xc6,0x00,0x20,
+0x25,0x23,0x82,0x6c,0xac,0xc3,0x00,0x00,0xa4,0x50,0x00,0x00,0xac,0xb0,0x00,0x00,
+0x08,0x00,0x1b,0x97,0xa4,0x90,0x00,0x02,0x08,0x00,0x1b,0x8e,0x32,0x50,0xff,0xff,
+0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x6f,0x60,0x34,0x63,0x00,0x20,
+0xac,0x62,0x00,0x00,0x90,0x82,0x00,0x04,0x97,0xaa,0x00,0x12,0x00,0x80,0x60,0x21,
+0x30,0xa8,0xff,0xff,0x00,0x4a,0x20,0x23,0x34,0x09,0xff,0xff,0x30,0xcf,0xff,0xff,
+0x30,0xee,0xff,0xff,0x11,0x09,0x00,0x73,0xa1,0x84,0x00,0x04,0x00,0x0e,0xc0,0xc0,
+0x00,0x08,0x10,0xc0,0x00,0x48,0x10,0x21,0x03,0x0e,0x20,0x21,0x27,0x8d,0x90,0x00,
+0x00,0x04,0x20,0x80,0x00,0x02,0x10,0x80,0x00,0x4d,0x10,0x21,0x00,0x8d,0x20,0x21,
+0x94,0x86,0x00,0x02,0x94,0x43,0x00,0x04,0x3c,0x19,0x80,0x01,0xa4,0x46,0x00,0x02,
+0x00,0x03,0x28,0xc0,0x00,0xa3,0x18,0x21,0x94,0x87,0x00,0x02,0x3c,0x02,0xb0,0x08,
+0x00,0x03,0x18,0x80,0x00,0xa2,0x28,0x21,0x00,0x6d,0x18,0x21,0x27,0x22,0x82,0x6c,
+0x3c,0x01,0xb0,0x03,0xac,0x22,0x00,0x20,0xa4,0x66,0x00,0x00,0x10,0xe9,0x00,0x57,
+0xac,0xa6,0x00,0x00,0x01,0xe0,0x30,0x21,0x11,0x40,0x00,0x1d,0x00,0x00,0x48,0x21,
+0x01,0x40,0x38,0x21,0x27,0x8b,0x90,0x04,0x27,0x8a,0x90,0x10,0x00,0x06,0x40,0xc0,
+0x01,0x06,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x6b,0x10,0x21,0x8c,0x44,0x00,0x18,
+0x00,0x6a,0x18,0x21,0x80,0x65,0x00,0x06,0x8c,0x82,0x00,0x00,0x00,0x00,0x00,0x00,
+0x30,0x42,0xff,0xff,0x00,0x45,0x10,0x21,0x30,0x44,0x00,0xff,0x00,0x02,0x12,0x02,
+0x01,0x22,0x18,0x21,0x24,0x62,0x00,0x01,0x14,0x80,0x00,0x02,0x30,0x49,0x00,0xff,
+0x30,0x69,0x00,0xff,0x01,0x06,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x4d,0x10,0x21,
+0x24,0xe7,0xff,0xff,0x94,0x46,0x00,0x02,0x14,0xe0,0xff,0xe9,0x00,0x06,0x40,0xc0,
+0x91,0x82,0x00,0x10,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x20,0x3c,0x06,0xb0,0x03,
+0xa5,0x8f,0x00,0x0c,0x03,0x0e,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x8d,0x20,0x21,
+0x94,0x82,0x00,0x04,0x3c,0x03,0xb0,0x08,0x3c,0x07,0xb0,0x03,0x00,0x02,0x28,0xc0,
+0x00,0xa2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x4d,0x10,0x21,0x00,0xa3,0x28,0x21,
+0x27,0x26,0x82,0x6c,0x34,0x03,0xff,0xff,0x34,0xe7,0x00,0x20,0xac,0xe6,0x00,0x00,
+0xa4,0x83,0x00,0x02,0xa4,0x43,0x00,0x00,0xac,0xa3,0x00,0x00,0x91,0x82,0x00,0x10,
+0x91,0x83,0x00,0x04,0xa5,0x8e,0x00,0x0e,0x01,0x22,0x10,0x21,0x14,0x60,0x00,0x05,
+0xa1,0x82,0x00,0x10,0x91,0x82,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfd,
+0xa1,0x82,0x00,0x16,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x95,0x82,0x00,0x0e,
+0x3c,0x03,0xb0,0x08,0x00,0x02,0x20,0xc0,0x00,0x82,0x20,0x21,0x00,0x04,0x20,0x80,
+0x00,0x8d,0x20,0x21,0x94,0x82,0x00,0x04,0x34,0xc6,0x00,0x20,0x27,0x27,0x82,0x6c,
+0x00,0x02,0x28,0xc0,0x00,0xa2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0xa3,0x28,0x21,
+0x00,0x4d,0x10,0x21,0xac,0xc7,0x00,0x00,0xa4,0x8f,0x00,0x02,0xa4,0x4f,0x00,0x00,
+0xac,0xaf,0x00,0x00,0x08,0x00,0x1c,0x26,0x03,0x0e,0x20,0x21,0x08,0x00,0x1c,0x01,
+0xa5,0x88,0x00,0x02,0x00,0x0e,0xc0,0xc0,0x03,0x0e,0x10,0x21,0x00,0x02,0x10,0x80,
+0x27,0x8d,0x90,0x00,0x00,0x4d,0x10,0x21,0x94,0x43,0x00,0x02,0x30,0x84,0x00,0xff,
+0x14,0x80,0x00,0x05,0xa5,0x83,0x00,0x00,0x24,0x02,0xff,0xff,0x3c,0x19,0x80,0x01,
+0x08,0x00,0x1c,0x01,0xa5,0x82,0x00,0x02,0x08,0x00,0x1c,0x01,0x3c,0x19,0x80,0x01,
+0x3c,0x08,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0x78,0x35,0x08,0x00,0x20,
+0x24,0x42,0x71,0xa0,0xaf,0xb2,0x00,0x68,0xaf,0xb1,0x00,0x64,0xaf,0xb0,0x00,0x60,
+0xad,0x02,0x00,0x00,0xaf,0xbf,0x00,0x84,0xaf,0xbe,0x00,0x80,0xaf,0xb7,0x00,0x7c,
+0xaf,0xb6,0x00,0x78,0xaf,0xb5,0x00,0x74,0xaf,0xb4,0x00,0x70,0xaf,0xb3,0x00,0x6c,
+0xaf,0xa4,0x00,0x88,0x90,0x83,0x00,0x0a,0x27,0x82,0xb4,0x00,0xaf,0xa6,0x00,0x90,
+0x00,0x03,0x18,0x80,0x00,0x62,0x18,0x21,0x8c,0x63,0x00,0x00,0xaf,0xa7,0x00,0x94,
+0x27,0x86,0x90,0x04,0xaf,0xa3,0x00,0x1c,0x94,0x63,0x00,0x14,0x30,0xb1,0xff,0xff,
+0x24,0x08,0x00,0x01,0x00,0x03,0x20,0xc0,0xaf,0xa3,0x00,0x18,0x00,0x83,0x18,0x21,
+0xaf,0xa4,0x00,0x54,0x00,0x03,0x18,0x80,0x27,0x84,0x90,0x10,0x00,0x64,0x20,0x21,
+0x80,0x82,0x00,0x06,0x00,0x66,0x18,0x21,0x8c,0x66,0x00,0x18,0x24,0x42,0x00,0x02,
+0x00,0x02,0x1f,0xc2,0x8c,0xc4,0x00,0x08,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,
+0x00,0x02,0x10,0x40,0x00,0x04,0x2f,0xc2,0x00,0x04,0x1c,0x82,0x00,0xc2,0x38,0x21,
+0x00,0x04,0x24,0x42,0x8f,0xa2,0x00,0x1c,0x30,0x63,0x00,0x01,0x30,0x84,0x00,0x01,
+0xaf,0xa5,0x00,0x3c,0xaf,0xa3,0x00,0x34,0xaf,0xa4,0x00,0x38,0xaf,0xa0,0x00,0x40,
+0xaf,0xa0,0x00,0x44,0xaf,0xa0,0x00,0x50,0xaf,0xa8,0x00,0x20,0x80,0x42,0x00,0x12,
+0x8f,0xb2,0x00,0x18,0xaf,0xa2,0x00,0x28,0x8c,0xd0,0x00,0x0c,0x14,0xa0,0x01,0xe4,
+0x00,0x60,0x30,0x21,0x00,0x10,0x10,0x82,0x30,0x45,0x00,0x07,0x10,0xa0,0x00,0x11,
+0xaf,0xa0,0x00,0x30,0x8f,0xa4,0x00,0x98,0x27,0x82,0x80,0x1c,0x00,0x04,0x18,0x40,
+0x00,0x62,0x18,0x21,0x24,0xa2,0x00,0x06,0x8f,0xa5,0x00,0x20,0x94,0x64,0x00,0x00,
+0x00,0x45,0x10,0x04,0x00,0x44,0x00,0x1a,0x14,0x80,0x00,0x02,0x00,0x00,0x00,0x00,
+0x00,0x07,0x00,0x0d,0x00,0x00,0x10,0x12,0x24,0x42,0x00,0x20,0x30,0x42,0xff,0xfc,
+0xaf,0xa2,0x00,0x30,0x8f,0xa3,0x00,0x18,0x8f,0xa4,0x00,0x28,0x34,0x02,0xff,0xff,
+0xaf,0xa0,0x00,0x2c,0xaf,0xa2,0x00,0x48,0xaf,0xa3,0x00,0x4c,0x00,0x60,0xf0,0x21,
+0x00,0x00,0xb8,0x21,0x18,0x80,0x00,0x48,0xaf,0xa0,0x00,0x24,0x00,0x11,0x89,0x02,
+0xaf,0xb1,0x00,0x58,0x00,0x80,0xa8,0x21,0x00,0x12,0x10,0xc0,0x00,0x52,0x18,0x21,
+0x00,0x03,0x80,0x80,0x27,0x85,0x90,0x00,0x02,0x40,0x20,0x21,0x00,0x40,0xa0,0x21,
+0x02,0x05,0x10,0x21,0x94,0x56,0x00,0x02,0x0c,0x00,0x12,0x8b,0x00,0x00,0x28,0x21,
+0x90,0x42,0x00,0x00,0x24,0x03,0x00,0x08,0x30,0x42,0x00,0x0c,0x10,0x43,0x01,0x9e,
+0x24,0x04,0x00,0x01,0x24,0x02,0x00,0x01,0x10,0x82,0x01,0x7c,0x3c,0x02,0xb0,0x03,
+0x8f,0xa6,0x00,0x88,0x34,0x42,0x01,0x04,0x84,0xc5,0x00,0x0c,0x02,0x92,0x18,0x21,
+0x94,0x46,0x00,0x00,0x00,0x05,0x20,0xc0,0x00,0x85,0x20,0x21,0x00,0x03,0x18,0x80,
+0x27,0x82,0x90,0x10,0x27,0x85,0x90,0x08,0x00,0x65,0x28,0x21,0x00,0x62,0x18,0x21,
+0x80,0x71,0x00,0x05,0x80,0x73,0x00,0x04,0x8f,0xa3,0x00,0x88,0x30,0xd0,0xff,0xff,
+0x00,0x10,0x3a,0x03,0x32,0x08,0x00,0xff,0x27,0x82,0x90,0x20,0x00,0x04,0x20,0x80,
+0x80,0xa6,0x00,0x06,0x00,0x82,0x20,0x21,0xa4,0x67,0x00,0x44,0xa4,0x68,0x00,0x46,
+0x8c,0x84,0x00,0x00,0x38,0xc6,0x00,0x00,0x01,0x00,0x80,0x21,0x00,0x04,0x15,0x02,
+0x30,0x42,0x00,0x01,0x10,0x40,0x00,0x03,0x00,0xe6,0x80,0x0a,0x00,0x04,0x14,0x02,
+0x30,0x50,0x00,0x0f,0x12,0x20,0x01,0x50,0x02,0x40,0x20,0x21,0x02,0x71,0x10,0x21,
+0x00,0x50,0x10,0x2a,0x14,0x40,0x00,0xed,0x02,0x92,0x10,0x21,0x93,0x82,0x8b,0x71,
+0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0xe0,0x02,0x92,0x28,0x21,
+0x26,0xe2,0x00,0x01,0x30,0x57,0xff,0xff,0x02,0x40,0xf0,0x21,0x26,0xb5,0xff,0xff,
+0x16,0xa0,0xff,0xbd,0x02,0xc0,0x90,0x21,0x16,0xe0,0x00,0xd0,0x00,0x00,0x00,0x00,
+0x8f,0xa3,0x00,0x98,0x00,0x00,0x00,0x00,0x2c,0x62,0x00,0x10,0x10,0x40,0x00,0x2e,
+0x00,0x00,0x00,0x00,0x8f,0xa4,0x00,0x24,0x00,0x00,0x00,0x00,0x18,0x80,0x00,0x2a,
+0x24,0x03,0x00,0x01,0x8f,0xa5,0x00,0x1c,0x27,0x84,0x90,0x04,0x94,0xb2,0x00,0x14,
+0xa0,0xa3,0x00,0x12,0x8f,0xa6,0x00,0x3c,0x00,0x12,0x10,0xc0,0x00,0x52,0x10,0x21,
+0x00,0x02,0x80,0x80,0x27,0x82,0x90,0x10,0x02,0x02,0x10,0x21,0x80,0x43,0x00,0x06,
+0x02,0x04,0x20,0x21,0x8c,0x85,0x00,0x18,0x24,0x63,0x00,0x02,0x00,0x03,0x17,0xc2,
+0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x43,0x00,0x03,0x18,0x40,0x14,0xc0,0x00,0x0e,
+0x00,0xa3,0x38,0x21,0x27,0x82,0x90,0x00,0x02,0x02,0x10,0x21,0x94,0x43,0x00,0x06,
+0x8f,0xa8,0x00,0x1c,0x24,0x02,0x00,0x01,0xa5,0x03,0x00,0x1a,0x7b,0xbe,0x04,0x3c,
+0x7b,0xb6,0x03,0xfc,0x7b,0xb4,0x03,0xbc,0x7b,0xb2,0x03,0x7c,0x7b,0xb0,0x03,0x3c,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x88,0x8f,0xa4,0x00,0x98,0x8f,0xa5,0x00,0x38,
+0x8f,0xa6,0x00,0x34,0xaf,0xa0,0x00,0x10,0x0c,0x00,0x09,0x0a,0xaf,0xa0,0x00,0x14,
+0x08,0x00,0x1d,0x2d,0x00,0x00,0x00,0x00,0x8f,0xa3,0x00,0x44,0x93,0x82,0x81,0x58,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x61,0x30,0x69,0x00,0x03,0x8f,0xa4,0x00,0x24,
+0x8f,0xa5,0x00,0x28,0x00,0x00,0x00,0x00,0x00,0x85,0x10,0x2a,0x10,0x40,0x00,0x8f,
+0x00,0x00,0x00,0x00,0x8f,0xa6,0x00,0x1c,0x00,0x00,0x00,0x00,0x90,0xc4,0x00,0x04,
+0x00,0x00,0x00,0x00,0x30,0x83,0x00,0xff,0x00,0xa3,0x10,0x2a,0x10,0x40,0x00,0x87,
+0x00,0x00,0x00,0x00,0x8f,0xa8,0x00,0x24,0x00,0x00,0x00,0x00,0x11,0x00,0x00,0x83,
+0x00,0x65,0x10,0x23,0x00,0xa8,0x18,0x23,0x00,0x62,0x10,0x2a,0x14,0x40,0x00,0x7d,
+0x30,0x63,0x00,0xff,0x00,0x85,0x10,0x23,0x30,0x42,0x00,0xff,0xaf,0xa2,0x00,0x50,
+0x8f,0xa2,0x00,0x50,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x73,0x00,0x00,0xa8,0x21,
+0x27,0x8c,0x90,0x00,0x3c,0x0b,0x80,0xff,0x24,0x10,0x00,0x04,0x27,0x91,0x90,0x04,
+0x35,0x6b,0xff,0xff,0x3c,0x0d,0x7f,0x00,0x27,0x8e,0x90,0x10,0x01,0x80,0x78,0x21,
+0x00,0x12,0x30,0xc0,0x00,0xd2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x4c,0x10,0x21,
+0x94,0x42,0x00,0x06,0x8f,0xa3,0x00,0x2c,0x8f,0xa4,0x00,0x30,0xaf,0xa2,0x00,0x44,
+0x8f,0xa5,0x00,0x44,0x30,0x49,0x00,0x03,0x02,0x09,0x10,0x23,0x30,0x42,0x00,0x03,
+0x00,0xa2,0x10,0x21,0x8f,0xa8,0x00,0x30,0x24,0x42,0x00,0x04,0x30,0x42,0xff,0xff,
+0x00,0x64,0x38,0x21,0x01,0x02,0x28,0x23,0x00,0x62,0x18,0x21,0x00,0x48,0x10,0x2b,
+0x10,0x40,0x00,0x52,0x00,0x00,0x20,0x21,0x30,0xe7,0xff,0xff,0x30,0xa4,0xff,0xff,
+0xaf,0xa7,0x00,0x2c,0x00,0xd2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x51,0x18,0x21,
+0x8c,0x65,0x00,0x18,0x00,0x04,0x25,0x40,0x00,0x8d,0x20,0x24,0x8c,0xa8,0x00,0x04,
+0x00,0x4e,0x18,0x21,0x00,0x4f,0x50,0x21,0x01,0x0b,0x40,0x24,0x01,0x04,0x40,0x25,
+0xac,0xa8,0x00,0x04,0x8f,0xa4,0x00,0x98,0x8f,0xa2,0x00,0x50,0x26,0xb5,0x00,0x01,
+0xa0,0x64,0x00,0x00,0x8c,0xa4,0x00,0x08,0x00,0x00,0x00,0x00,0x04,0x81,0x00,0x0c,
+0x02,0xa2,0x30,0x2a,0x80,0x62,0x00,0x06,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x02,
+0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,0x00,0x02,0x10,0x40,
+0x00,0xa2,0x38,0x21,0x8f,0xa5,0x00,0x40,0x00,0x00,0x00,0x00,0xa4,0xe5,0x00,0x00,
+0x95,0x52,0x00,0x02,0x14,0xc0,0xff,0xc7,0x00,0x12,0x30,0xc0,0x8f,0xa4,0x00,0x24,
+0x8f,0xa5,0x00,0x50,0x8f,0xa6,0x00,0x1c,0x8f,0xa3,0x00,0x2c,0x00,0x85,0x80,0x21,
+0xa0,0xd0,0x00,0x12,0x00,0x09,0x10,0x23,0x30,0x42,0x00,0x03,0x8f,0xa8,0x00,0x88,
+0x00,0x62,0x10,0x23,0xa4,0xc2,0x00,0x1a,0x85,0x03,0x00,0x0c,0x00,0x00,0x00,0x00,
+0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x04,
+0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x83,0x00,0x04,
+0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x10,0x14,0x60,0xff,0x74,0x02,0x00,0x10,0x21,
+0x8f,0xa3,0x00,0x54,0x8f,0xa4,0x00,0x18,0x8f,0xa5,0x00,0x24,0x00,0x64,0x10,0x21,
+0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x18,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x00,
+0x10,0xa0,0x00,0x03,0x00,0x00,0x30,0x21,0x08,0x00,0x1d,0x33,0x02,0x00,0x10,0x21,
+0x93,0x82,0x80,0x10,0x00,0x00,0x28,0x21,0x00,0x00,0x38,0x21,0x0c,0x00,0x21,0x9a,
+0xaf,0xa2,0x00,0x10,0x08,0x00,0x1d,0x33,0x02,0x00,0x10,0x21,0x30,0x63,0xff,0xff,
+0x08,0x00,0x1d,0x85,0xaf,0xa3,0x00,0x2c,0x8f,0xa8,0x00,0x44,0x08,0x00,0x1d,0xa7,
+0x31,0x09,0x00,0x03,0x08,0x00,0x1d,0x60,0xaf,0xa3,0x00,0x50,0x8f,0xa6,0x00,0x44,
+0xaf,0xa0,0x00,0x50,0x08,0x00,0x1d,0xa7,0x30,0xc9,0x00,0x03,0x8f,0xa5,0x00,0x48,
+0x8f,0xa6,0x00,0x4c,0x8f,0xa4,0x00,0x1c,0x03,0xc0,0x38,0x21,0x0c,0x00,0x1b,0xd8,
+0xaf,0xb7,0x00,0x10,0x08,0x00,0x1d,0x10,0x00,0x00,0x00,0x00,0x00,0x05,0x28,0x80,
+0x27,0x82,0x90,0x00,0x00,0xa2,0x28,0x21,0x00,0x00,0x20,0x21,0x0c,0x00,0x01,0x49,
+0x00,0x00,0x00,0x00,0x08,0x00,0x1d,0x09,0x26,0xe2,0x00,0x01,0x00,0x02,0x80,0x80,
+0x27,0x83,0x90,0x10,0x8f,0xa4,0x00,0x1c,0x02,0x03,0x18,0x21,0x26,0x31,0x00,0x01,
+0x02,0x40,0x28,0x21,0x0c,0x00,0x1e,0xea,0xa0,0x71,0x00,0x05,0x14,0x40,0xff,0x13,
+0x00,0x00,0x00,0x00,0x16,0xe0,0x00,0x4d,0x03,0xc0,0x38,0x21,0x8f,0xa4,0x00,0x24,
+0x8f,0xa5,0x00,0x20,0x24,0x02,0x00,0x01,0x24,0x84,0x00,0x01,0xaf,0xb2,0x00,0x48,
+0xaf,0xb6,0x00,0x4c,0x02,0xc0,0xf0,0x21,0x10,0xa2,0x00,0x41,0xaf,0xa4,0x00,0x24,
+0x27,0x82,0x90,0x00,0x02,0x02,0x10,0x21,0x94,0x42,0x00,0x06,0x8f,0xa4,0x00,0x30,
+0xaf,0xa0,0x00,0x20,0xaf,0xa2,0x00,0x44,0x30,0x49,0x00,0x03,0x8f,0xa8,0x00,0x44,
+0x00,0x09,0x10,0x23,0x30,0x42,0x00,0x03,0x01,0x02,0x10,0x21,0x24,0x42,0x00,0x04,
+0x30,0x42,0xff,0xff,0x00,0x44,0x18,0x2b,0x10,0x60,0x00,0x2b,0x00,0x00,0x00,0x00,
+0x8f,0xa5,0x00,0x2c,0x00,0x82,0x10,0x23,0x00,0xa4,0x18,0x21,0x30,0x63,0xff,0xff,
+0x30,0x44,0xff,0xff,0xaf,0xa3,0x00,0x2c,0x02,0x92,0x28,0x21,0x00,0x05,0x28,0x80,
+0x27,0x82,0x90,0x04,0x00,0xa2,0x10,0x21,0x8c,0x46,0x00,0x18,0x3c,0x03,0x80,0xff,
+0x3c,0x02,0x7f,0x00,0x8c,0xc8,0x00,0x04,0x00,0x04,0x25,0x40,0x34,0x63,0xff,0xff,
+0x00,0x82,0x20,0x24,0x01,0x03,0x40,0x24,0x01,0x04,0x40,0x25,0xac,0xc8,0x00,0x04,
+0x8f,0xa8,0x00,0x98,0x27,0x82,0x90,0x10,0x00,0xa2,0x10,0x21,0xa0,0x48,0x00,0x00,
+0x8c,0xc4,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x04,0x27,0xc2,0x10,0x80,0xfe,0xdb,
+0xaf,0xa4,0x00,0x3c,0x80,0x42,0x00,0x06,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x02,
+0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,0x00,0x02,0x10,0x40,
+0x00,0xc2,0x38,0x21,0x8f,0xa2,0x00,0x40,0x00,0x00,0x00,0x00,0xa4,0xe2,0x00,0x00,
+0x08,0x00,0x1d,0x0c,0x26,0xb5,0xff,0xff,0x8f,0xa6,0x00,0x2c,0x00,0x00,0x20,0x21,
+0x00,0xc2,0x10,0x21,0x30,0x42,0xff,0xff,0x08,0x00,0x1e,0x1a,0xaf,0xa2,0x00,0x2c,
+0x8f,0xa6,0x00,0x1c,0x08,0x00,0x1e,0x04,0xa4,0xd2,0x00,0x14,0x8f,0xa5,0x00,0x48,
+0x8f,0xa6,0x00,0x4c,0x8f,0xa4,0x00,0x1c,0x0c,0x00,0x1b,0xd8,0xaf,0xb7,0x00,0x10,
+0x08,0x00,0x1d,0xfb,0x00,0x00,0xb8,0x21,0x0c,0x00,0x12,0x8b,0x00,0x00,0x28,0x21,
+0x00,0x40,0x18,0x21,0x94,0x42,0x00,0x00,0x00,0x00,0x00,0x00,0x34,0x42,0x08,0x00,
+0xa4,0x62,0x00,0x00,0x08,0x00,0x1d,0x00,0x02,0x71,0x10,0x21,0x02,0x92,0x18,0x21,
+0x00,0x03,0x80,0x80,0x27,0x82,0x90,0x04,0x02,0x02,0x10,0x21,0x8c,0x44,0x00,0x18,
+0x00,0x00,0x00,0x00,0x8c,0x83,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x10,
+0x10,0x60,0x00,0x09,0x24,0x06,0x00,0x01,0x93,0x82,0x8b,0x71,0x00,0x00,0x00,0x00,
+0x30,0x42,0x00,0x01,0x10,0x40,0xfe,0xa2,0x3c,0x04,0x00,0x80,0x27,0x85,0x90,0x00,
+0x08,0x00,0x1d,0xeb,0x02,0x05,0x28,0x21,0x27,0x83,0x90,0x18,0x27,0x82,0x90,0x10,
+0x02,0x03,0x18,0x21,0x02,0x02,0x10,0x21,0x90,0x64,0x00,0x00,0x90,0x45,0x00,0x05,
+0x93,0x83,0x80,0x10,0x00,0x00,0x38,0x21,0x0c,0x00,0x21,0x9a,0xaf,0xa3,0x00,0x10,
+0x08,0x00,0x1e,0x62,0x00,0x00,0x00,0x00,0x27,0x82,0x90,0x18,0x02,0x02,0x10,0x21,
+0x94,0x43,0x00,0x02,0x8f,0xa6,0x00,0x58,0x00,0x03,0x19,0x02,0x00,0x66,0x18,0x23,
+0x30,0x63,0x0f,0xff,0x28,0x62,0x00,0x20,0x10,0x40,0x00,0x06,0x28,0x62,0x00,0x40,
+0x8f,0xa8,0x00,0x90,0x00,0x00,0x00,0x00,0x00,0x68,0x10,0x06,0x08,0x00,0x1c,0xd9,
+0x30,0x44,0x00,0x01,0x10,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0x8f,0xa4,0x00,0x94,
+0x08,0x00,0x1e,0x83,0x00,0x64,0x10,0x06,0x08,0x00,0x1c,0xd9,0x00,0x00,0x20,0x21,
+0x8f,0xa4,0x00,0x98,0x8f,0xa5,0x00,0x38,0xaf,0xa0,0x00,0x10,0x0c,0x00,0x09,0x0a,
+0xaf,0xa8,0x00,0x14,0x30,0x42,0xff,0xff,0x08,0x00,0x1c,0xa9,0xaf,0xa2,0x00,0x40,
+0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x00,0x27,0xbd,0xff,0xe0,0x34,0x42,0x00,0x20,
+0x24,0x63,0x7a,0x50,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x18,
+0xac,0x43,0x00,0x00,0x90,0x82,0x00,0x0a,0x00,0x80,0x80,0x21,0x14,0x40,0x00,0x45,
+0x00,0x00,0x88,0x21,0x92,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x3c,
+0x00,0x00,0x00,0x00,0x12,0x20,0x00,0x18,0x00,0x00,0x00,0x00,0x92,0x02,0x00,0x16,
+0x92,0x05,0x00,0x0a,0x30,0x42,0x00,0xfc,0x10,0xa0,0x00,0x03,0xa2,0x02,0x00,0x16,
+0x34,0x42,0x00,0x01,0xa2,0x02,0x00,0x16,0x92,0x04,0x00,0x04,0x00,0x00,0x00,0x00,
+0x30,0x83,0x00,0xff,0x10,0x60,0x00,0x05,0x00,0x00,0x00,0x00,0x92,0x02,0x00,0x16,
+0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x02,0xa2,0x02,0x00,0x16,0x10,0x60,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x14,0xa0,0x00,0x08,0x00,0x00,0x00,0x00,0x96,0x02,0x00,0x00,
+0xa2,0x00,0x00,0x17,0xa6,0x02,0x00,0x14,0x8f,0xbf,0x00,0x18,0x7b,0xb0,0x00,0xbc,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x14,0x80,0x00,0x05,0x24,0x02,0x00,0x01,
+0x96,0x03,0x00,0x06,0xa2,0x02,0x00,0x17,0x08,0x00,0x1e,0xbe,0xa6,0x03,0x00,0x14,
+0x96,0x04,0x00,0x00,0x96,0x05,0x00,0x06,0x27,0x86,0x90,0x00,0x00,0x04,0x10,0xc0,
+0x00,0x05,0x18,0xc0,0x00,0x44,0x10,0x21,0x00,0x65,0x18,0x21,0x00,0x02,0x10,0x80,
+0x00,0x03,0x18,0x80,0x00,0x66,0x18,0x21,0x00,0x46,0x10,0x21,0x8c,0x65,0x00,0x08,
+0x8c,0x44,0x00,0x08,0x0c,0x00,0x12,0x7c,0x00,0x00,0x00,0x00,0x30,0x43,0x00,0xff,
+0x10,0x60,0x00,0x04,0xa2,0x02,0x00,0x17,0x96,0x02,0x00,0x06,0x08,0x00,0x1e,0xbe,
+0xa6,0x02,0x00,0x14,0x96,0x02,0x00,0x00,0x08,0x00,0x1e,0xbe,0xa6,0x02,0x00,0x14,
+0x96,0x05,0x00,0x00,0x0c,0x00,0x1e,0xea,0x02,0x00,0x20,0x21,0x08,0x00,0x1e,0xa5,
+0x02,0x22,0x88,0x21,0x94,0x85,0x00,0x06,0x0c,0x00,0x1e,0xea,0x00,0x00,0x00,0x00,
+0x08,0x00,0x1e,0xa1,0x00,0x40,0x88,0x21,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,
+0x34,0x63,0x00,0x20,0x24,0x42,0x7b,0xa8,0x27,0xbd,0xff,0xf0,0xac,0x62,0x00,0x00,
+0x00,0x00,0x10,0x21,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x10,0x3c,0x03,0xb0,0x03,
+0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x7b,0xcc,0xac,0x62,0x00,0x00,
+0x90,0x89,0x00,0x0a,0x00,0x80,0x30,0x21,0x11,0x20,0x00,0x05,0x00,0xa0,0x50,0x21,
+0x90,0x82,0x00,0x17,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x1b,0x00,0x00,0x00,0x00,
+0x90,0xc7,0x00,0x04,0x00,0x00,0x00,0x00,0x10,0xe0,0x00,0x1b,0x00,0x00,0x00,0x00,
+0x94,0xc8,0x00,0x00,0x27,0x83,0x90,0x00,0x93,0x85,0x8b,0x70,0x00,0x08,0x10,0xc0,
+0x00,0x48,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x08,
+0x00,0xe5,0x28,0x2b,0x10,0xa0,0x00,0x06,0x01,0x44,0x18,0x23,0x8f,0x82,0x8b,0x88,
+0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x2b,0x10,0x40,0x00,0x05,0x00,0x00,0x00,0x00,
+0x24,0x03,0x00,0x10,0xa4,0xc8,0x00,0x14,0x03,0xe0,0x00,0x08,0x00,0x60,0x10,0x21,
+0x11,0x20,0x00,0x05,0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x06,0x24,0x03,0x00,0x08,
+0x08,0x00,0x1f,0x16,0xa4,0xc2,0x00,0x14,0x08,0x00,0x1f,0x16,0x00,0x00,0x18,0x21,
+0x27,0xbd,0xff,0xc8,0xaf,0xb5,0x00,0x2c,0xaf,0xb4,0x00,0x28,0xaf,0xb3,0x00,0x24,
+0xaf,0xb0,0x00,0x18,0xaf,0xbf,0x00,0x30,0xaf,0xb2,0x00,0x20,0xaf,0xb1,0x00,0x1c,
+0x94,0x91,0x00,0x06,0x00,0x80,0xa0,0x21,0x3c,0x02,0x80,0x00,0x3c,0x04,0xb0,0x03,
+0x00,0x11,0xa8,0xc0,0x34,0x84,0x00,0x20,0x24,0x42,0x7c,0x80,0x02,0xb1,0x48,0x21,
+0xac,0x82,0x00,0x00,0x00,0x09,0x48,0x80,0x24,0x03,0x00,0x01,0x27,0x82,0x90,0x10,
+0xa2,0x83,0x00,0x12,0x01,0x22,0x10,0x21,0x27,0x84,0x90,0x04,0x01,0x24,0x20,0x21,
+0x80,0x48,0x00,0x06,0x8c,0x8a,0x00,0x18,0x27,0x83,0x90,0x20,0x01,0x23,0x48,0x21,
+0x8d,0x24,0x00,0x00,0x25,0x08,0x00,0x02,0x8d,0x42,0x00,0x00,0x8d,0x49,0x00,0x04,
+0x00,0x08,0x17,0xc2,0x8d,0x43,0x00,0x08,0x01,0x02,0x40,0x21,0x00,0x04,0x25,0xc2,
+0x00,0x08,0x40,0x43,0x30,0x84,0x00,0x01,0x00,0x03,0x1f,0xc2,0x00,0x08,0x40,0x40,
+0x00,0xe0,0x80,0x21,0x00,0x64,0x18,0x24,0x00,0x09,0x49,0x42,0x01,0x48,0x10,0x21,
+0x00,0xa0,0x98,0x21,0x00,0xa0,0x20,0x21,0x00,0x40,0x38,0x21,0x02,0x00,0x28,0x21,
+0x14,0x60,0x00,0x19,0x31,0x29,0x00,0x01,0x94,0x42,0x00,0x00,0x02,0xb1,0x88,0x21,
+0x02,0x00,0x28,0x21,0x00,0x11,0x88,0x80,0x27,0x90,0x90,0x00,0x02,0x30,0x80,0x21,
+0x96,0x03,0x00,0x06,0x30,0x52,0xff,0xff,0x02,0x60,0x20,0x21,0x00,0x60,0x30,0x21,
+0xa6,0x83,0x00,0x1a,0x27,0x82,0x90,0x08,0x0c,0x00,0x08,0xe3,0x02,0x22,0x88,0x21,
+0x00,0x52,0x10,0x21,0x96,0x03,0x00,0x06,0xa6,0x22,0x00,0x04,0x8f,0xbf,0x00,0x30,
+0x7b,0xb4,0x01,0x7c,0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x00,0x60,0x10,0x21,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,0xaf,0xa9,0x00,0x10,0x0c,0x00,0x09,0x0a,
+0xaf,0xa0,0x00,0x14,0x08,0x00,0x1f,0x54,0x02,0xb1,0x88,0x21,0x27,0xbd,0xff,0xc0,
+0xaf,0xbe,0x00,0x38,0xaf,0xb7,0x00,0x34,0xaf,0xb6,0x00,0x30,0xaf,0xb5,0x00,0x2c,
+0xaf,0xb3,0x00,0x24,0xaf,0xb1,0x00,0x1c,0xaf,0xbf,0x00,0x3c,0xaf,0xb4,0x00,0x28,
+0xaf,0xb2,0x00,0x20,0xaf,0xb0,0x00,0x18,0x94,0x90,0x00,0x00,0x3c,0x08,0xb0,0x03,
+0x35,0x08,0x00,0x20,0x00,0x10,0x10,0xc0,0x00,0x50,0x18,0x21,0x00,0x40,0x88,0x21,
+0x3c,0x02,0x80,0x00,0x00,0x03,0x48,0x80,0x24,0x42,0x7d,0xbc,0x00,0x80,0x98,0x21,
+0x27,0x84,0x90,0x10,0x01,0x24,0x20,0x21,0x93,0xb7,0x00,0x53,0xad,0x02,0x00,0x00,
+0x80,0x83,0x00,0x06,0x27,0x82,0x90,0x04,0x01,0x22,0x10,0x21,0x8c,0x44,0x00,0x18,
+0x24,0x63,0x00,0x02,0x00,0x03,0x17,0xc2,0x8c,0x88,0x00,0x08,0x00,0x62,0x18,0x21,
+0x00,0x03,0x18,0x43,0x00,0x03,0x18,0x40,0xaf,0xa7,0x00,0x4c,0x2c,0xa2,0x00,0x10,
+0x00,0xa0,0xa8,0x21,0x00,0x83,0x50,0x21,0x00,0x08,0x47,0xc2,0x00,0xc0,0x58,0x21,
+0x00,0x00,0xb0,0x21,0x8c,0x92,0x00,0x0c,0x14,0x40,0x00,0x13,0x00,0x00,0xf0,0x21,
+0x92,0x67,0x00,0x04,0x24,0x14,0x00,0x01,0x12,0x87,0x00,0x10,0x02,0x30,0x10,0x21,
+0x27,0x83,0x90,0x18,0x01,0x23,0x18,0x21,0x80,0x64,0x00,0x00,0x27,0x83,0xb5,0x70,
+0x00,0x04,0x11,0x00,0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x23,
+0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x04,0x00,0x00,0x00,0x00,
+0x10,0x80,0x00,0x23,0x00,0x00,0x00,0x00,0x02,0x30,0x10,0x21,0x00,0x02,0x80,0x80,
+0x24,0x04,0x00,0x01,0x27,0x83,0x90,0x20,0xa2,0x64,0x00,0x12,0x02,0x03,0x18,0x21,
+0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x15,0xc2,0x30,0x42,0x00,0x01,
+0x01,0x02,0x10,0x24,0x14,0x40,0x00,0x0e,0x02,0xa0,0x20,0x21,0x27,0x82,0x90,0x00,
+0x02,0x02,0x10,0x21,0x94,0x43,0x00,0x06,0x00,0x00,0x00,0x00,0xa6,0x63,0x00,0x1a,
+0x94,0x42,0x00,0x06,0x7b,0xbe,0x01,0xfc,0x7b,0xb6,0x01,0xbc,0x7b,0xb4,0x01,0x7c,
+0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x40,
+0x8f,0xa5,0x00,0x4c,0x01,0x60,0x30,0x21,0x01,0x40,0x38,0x21,0xaf,0xa0,0x00,0x10,
+0x0c,0x00,0x09,0x0a,0xaf,0xa0,0x00,0x14,0x08,0x00,0x1f,0xbb,0x00,0x00,0x00,0x00,
+0x27,0x83,0x90,0x20,0x01,0x23,0x18,0x21,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x02,0x15,0xc2,0x30,0x42,0x00,0x01,0x01,0x02,0x10,0x24,0x14,0x40,0x00,0xaf,
+0x00,0xa0,0x20,0x21,0x32,0x4f,0x00,0x03,0x00,0x12,0x10,0x82,0x25,0xe3,0x00,0x0d,
+0x30,0x45,0x00,0x07,0x00,0x74,0x78,0x04,0x10,0xa0,0x00,0x0e,0x00,0x00,0x90,0x21,
+0x27,0x82,0x80,0x1c,0x00,0x15,0x18,0x40,0x00,0x62,0x18,0x21,0x94,0x64,0x00,0x00,
+0x24,0xa2,0x00,0x06,0x00,0x54,0x10,0x04,0x00,0x44,0x00,0x1a,0x14,0x80,0x00,0x02,
+0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0d,0x00,0x00,0x10,0x12,0x24,0x42,0x00,0x20,
+0x30,0x52,0xff,0xfc,0x02,0x30,0x10,0x21,0x27,0x83,0x90,0x10,0x00,0x02,0x10,0x80,
+0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x03,0x00,0x00,0x00,0x00,0x30,0x83,0x00,0xff,
+0x2c,0x62,0x00,0x0c,0x14,0x40,0x00,0x04,0x2c,0x62,0x00,0x19,0x30,0x82,0x00,0x0f,
+0x24,0x43,0x00,0x0c,0x2c,0x62,0x00,0x19,0x10,0x40,0x00,0x19,0x24,0x0e,0x00,0x20,
+0x24,0x62,0xff,0xe9,0x2c,0x42,0x00,0x02,0x14,0x40,0x00,0x15,0x24,0x0e,0x00,0x10,
+0x24,0x62,0xff,0xeb,0x2c,0x42,0x00,0x02,0x14,0x40,0x00,0x11,0x24,0x0e,0x00,0x08,
+0x24,0x02,0x00,0x14,0x10,0x62,0x00,0x0e,0x24,0x0e,0x00,0x02,0x24,0x62,0xff,0xef,
+0x2c,0x42,0x00,0x03,0x14,0x40,0x00,0x0a,0x24,0x0e,0x00,0x10,0x24,0x62,0xff,0xf1,
+0x2c,0x42,0x00,0x02,0x14,0x40,0x00,0x06,0x24,0x0e,0x00,0x08,0x24,0x62,0xff,0xf3,
+0x2c,0x42,0x00,0x02,0x24,0x0e,0x00,0x04,0x24,0x03,0x00,0x02,0x00,0x62,0x70,0x0a,
+0x30,0xe2,0x00,0xff,0x00,0x00,0x48,0x21,0x00,0x00,0x68,0x21,0x10,0x40,0x00,0x6d,
+0x00,0x00,0x58,0x21,0x3c,0x14,0x80,0xff,0x27,0x99,0x90,0x00,0x01,0xf2,0xc0,0x23,
+0x36,0x94,0xff,0xff,0x01,0xc9,0x10,0x2a,0x14,0x40,0x00,0x64,0x24,0x03,0x00,0x04,
+0x00,0x10,0x28,0xc0,0x00,0xb0,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x59,0x10,0x21,
+0x94,0x56,0x00,0x06,0x00,0x00,0x00,0x00,0x32,0xcc,0x00,0x03,0x00,0x6c,0x10,0x23,
+0x30,0x42,0x00,0x03,0x02,0xc2,0x10,0x21,0x24,0x42,0x00,0x04,0x30,0x51,0xff,0xff,
+0x02,0x32,0x18,0x2b,0x10,0x60,0x00,0x4d,0x01,0xf1,0x10,0x23,0x02,0x51,0x10,0x23,
+0x01,0x78,0x18,0x2b,0x10,0x60,0x00,0x34,0x30,0x44,0xff,0xff,0x29,0x22,0x00,0x40,
+0x10,0x40,0x00,0x31,0x01,0x72,0x18,0x21,0x25,0x22,0x00,0x01,0x00,0x02,0x16,0x00,
+0x00,0x02,0x4e,0x03,0x00,0xb0,0x10,0x21,0x00,0x02,0x30,0x80,0x27,0x82,0x90,0x04,
+0x30,0x6b,0xff,0xff,0x00,0xc2,0x18,0x21,0x8c,0x67,0x00,0x18,0x00,0x04,0x25,0x40,
+0x3c,0x03,0x7f,0x00,0x8c,0xe2,0x00,0x04,0x00,0x83,0x20,0x24,0x27,0x83,0x90,0x10,
+0x00,0x54,0x10,0x24,0x00,0xc3,0x28,0x21,0x00,0x44,0x10,0x25,0xac,0xe2,0x00,0x04,
+0x16,0xe0,0x00,0x02,0xa0,0xb5,0x00,0x00,0xa0,0xb5,0x00,0x03,0x27,0x84,0x90,0x20,
+0x00,0xc4,0x18,0x21,0x8c,0x62,0x00,0x00,0x8c,0xe8,0x00,0x08,0x00,0x02,0x15,0xc2,
+0x00,0x08,0x47,0xc2,0x30,0x42,0x00,0x01,0x01,0x02,0x10,0x24,0x10,0x40,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x80,0xa2,0x00,0x06,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x02,
+0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,0x00,0x02,0x10,0x40,
+0x00,0xe2,0x50,0x21,0xa5,0x5e,0x00,0x00,0x92,0x62,0x00,0x04,0x25,0xad,0x00,0x01,
+0x27,0x84,0x90,0x00,0x00,0xc4,0x18,0x21,0x01,0xa2,0x10,0x2a,0x94,0x70,0x00,0x02,
+0x14,0x40,0xff,0xb8,0x00,0x00,0x00,0x00,0x96,0x63,0x00,0x14,0x00,0x0c,0x10,0x23,
+0xa2,0x69,0x00,0x12,0x30,0x42,0x00,0x03,0x01,0x62,0x10,0x23,0x00,0x03,0x80,0xc0,
+0x8f,0xa5,0x00,0x4c,0x30,0x4b,0xff,0xff,0x02,0x03,0x80,0x21,0x27,0x82,0x90,0x08,
+0x00,0x10,0x80,0x80,0xa6,0x6b,0x00,0x1a,0x02,0xa0,0x20,0x21,0x01,0x60,0x30,0x21,
+0x01,0x60,0x88,0x21,0x0c,0x00,0x08,0xe3,0x02,0x02,0x80,0x21,0x00,0x5e,0x10,0x21,
+0xa6,0x02,0x00,0x04,0x08,0x00,0x1f,0xc1,0x02,0x20,0x10,0x21,0x01,0x62,0x10,0x2b,
+0x10,0x40,0xff,0xe9,0x00,0x00,0x20,0x21,0x29,0x22,0x00,0x40,0x10,0x40,0xff,0xe6,
+0x01,0x71,0x18,0x21,0x08,0x00,0x20,0x37,0x25,0x22,0x00,0x01,0x08,0x00,0x20,0x66,
+0x32,0xcc,0x00,0x03,0x08,0x00,0x20,0x66,0x00,0x00,0x60,0x21,0x8f,0xa5,0x00,0x4c,
+0x01,0x40,0x38,0x21,0xaf,0xa0,0x00,0x10,0x0c,0x00,0x09,0x0a,0xaf,0xb4,0x00,0x14,
+0x92,0x67,0x00,0x04,0x08,0x00,0x1f,0xd9,0x30,0x5e,0xff,0xff,0x30,0x84,0xff,0xff,
+0x00,0x04,0x30,0xc0,0x00,0xc4,0x20,0x21,0x00,0x04,0x20,0x80,0x27,0x82,0x90,0x00,
+0x3c,0x03,0xb0,0x08,0x30,0xa5,0xff,0xff,0x00,0x82,0x20,0x21,0x00,0xc3,0x30,0x21,
+0xac,0xc5,0x00,0x00,0x03,0xe0,0x00,0x08,0xa4,0x85,0x00,0x00,0x30,0x84,0xff,0xff,
+0x00,0x04,0x30,0xc0,0x00,0xc4,0x30,0x21,0x27,0x88,0x90,0x00,0x00,0x06,0x30,0x80,
+0x00,0xc8,0x30,0x21,0x94,0xc3,0x00,0x04,0x3c,0x02,0xb0,0x08,0x3c,0x07,0xb0,0x03,
+0x00,0x03,0x20,0xc0,0x00,0x83,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x82,0x20,0x21,
+0x3c,0x02,0x80,0x01,0x30,0xa5,0xff,0xff,0x00,0x68,0x18,0x21,0x34,0xe7,0x00,0x20,
+0x24,0x42,0x82,0x6c,0xac,0xe2,0x00,0x00,0xa4,0xc5,0x00,0x02,0xa4,0x65,0x00,0x00,
+0x03,0xe0,0x00,0x08,0xac,0x85,0x00,0x00,0x30,0x84,0xff,0xff,0x00,0x04,0x10,0xc0,
+0x00,0x44,0x10,0x21,0x27,0x89,0x90,0x00,0x00,0x02,0x10,0x80,0x00,0x49,0x10,0x21,
+0x97,0x83,0x8f,0xf0,0x94,0x4a,0x00,0x04,0x3c,0x02,0xb0,0x08,0x00,0x03,0x38,0xc0,
+0x00,0x0a,0x40,0xc0,0x00,0xe3,0x18,0x21,0x01,0x0a,0x28,0x21,0x00,0xe2,0x38,0x21,
+0x01,0x02,0x40,0x21,0x00,0x03,0x18,0x80,0x00,0x05,0x28,0x80,0x3c,0x06,0xb0,0x03,
+0x3c,0x02,0x80,0x01,0x00,0xa9,0x28,0x21,0x00,0x69,0x18,0x21,0x34,0xc6,0x00,0x20,
+0x34,0x09,0xff,0xff,0x24,0x42,0x82,0xc8,0xac,0xc2,0x00,0x00,0xa4,0x64,0x00,0x00,
+0xac,0xe4,0x00,0x00,0xa4,0xa9,0x00,0x00,0xad,0x09,0x00,0x00,0xa7,0x8a,0x8f,0xf0,
+0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x01,
+0x34,0x63,0x00,0x20,0x24,0x42,0x83,0x48,0x3c,0x04,0xb0,0x03,0xac,0x62,0x00,0x00,
+0x34,0x84,0x01,0x10,0x8c,0x82,0x00,0x00,0x97,0x83,0x81,0x60,0x30,0x42,0xff,0xff,
+0x10,0x62,0x00,0x16,0x24,0x0a,0x00,0x01,0xa7,0x82,0x81,0x60,0xaf,0x80,0xb4,0x50,
+0x00,0x40,0x28,0x21,0x24,0x06,0x00,0x01,0x27,0x84,0xb4,0x54,0x25,0x43,0xff,0xff,
+0x00,0x66,0x10,0x04,0x00,0xa2,0x10,0x24,0x14,0x40,0x00,0x07,0x00,0x00,0x00,0x00,
+0x8c,0x83,0xff,0xfc,0x00,0x00,0x00,0x00,0x00,0x66,0x10,0x04,0x00,0xa2,0x10,0x24,
+0x38,0x42,0x00,0x00,0x01,0x42,0x18,0x0a,0x25,0x4a,0x00,0x01,0x2d,0x42,0x00,0x14,
+0xac,0x83,0x00,0x00,0x14,0x40,0xff,0xf1,0x24,0x84,0x00,0x04,0x3c,0x0b,0xb0,0x03,
+0x00,0x00,0x50,0x21,0x3c,0x0c,0x80,0x00,0x27,0x89,0xb4,0xa0,0x35,0x6b,0x01,0x20,
+0x8d,0x68,0x00,0x00,0x8d,0x23,0x00,0x04,0x01,0x0c,0x10,0x24,0x00,0x02,0x17,0xc2,
+0x11,0x03,0x00,0x37,0xa1,0x22,0x00,0xdc,0xa1,0x20,0x00,0xd5,0xa1,0x20,0x00,0xd6,
+0x01,0x20,0x30,0x21,0x00,0x00,0x38,0x21,0x00,0x00,0x28,0x21,0x01,0x20,0x20,0x21,
+0x00,0xa8,0x10,0x06,0x30,0x42,0x00,0x01,0x10,0xe0,0x00,0x10,0xa0,0x82,0x00,0x0a,
+0x90,0x82,0x00,0x07,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x31,0x24,0xa2,0xff,0xff,
+0xa0,0x82,0x00,0x08,0x90,0x82,0x00,0x0a,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x09,
+0x00,0x00,0x00,0x00,0x90,0x83,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0x40,
+0x00,0x43,0x10,0x21,0x00,0x46,0x10,0x21,0xa0,0x45,0x00,0x09,0x90,0x82,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x07,0x00,0x00,0x00,0x00,0x14,0xe0,0x00,0x04,
+0x00,0x00,0x00,0x00,0xa0,0xc5,0x00,0xd5,0x24,0x07,0x00,0x01,0xa0,0x85,0x00,0x08,
+0xa0,0xc5,0x00,0xd6,0x24,0xa5,0x00,0x01,0x2c,0xa2,0x00,0x1c,0x14,0x40,0xff,0xe0,
+0x24,0x84,0x00,0x03,0x90,0xc4,0x00,0xd5,0x00,0x00,0x28,0x21,0x00,0xa4,0x10,0x2b,
+0x10,0x40,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0xc0,0x18,0x21,0xa0,0x64,0x00,0x08,
+0x90,0xc2,0x00,0xd5,0x24,0xa5,0x00,0x01,0xa0,0x62,0x00,0x09,0x90,0xc4,0x00,0xd5,
+0x00,0x00,0x00,0x00,0x00,0xa4,0x10,0x2b,0x14,0x40,0xff,0xf8,0x24,0x63,0x00,0x03,
+0x25,0x4a,0x00,0x01,0x2d,0x42,0x00,0x08,0xad,0x28,0x00,0x04,0x25,0x6b,0x00,0x04,
+0x14,0x40,0xff,0xbf,0x25,0x29,0x00,0xec,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x90,0x82,0x00,0x05,0x08,0x00,0x21,0x0d,0xa0,0x82,0x00,0x08,0x97,0x85,0x8b,0x7a,
+0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x63,0x00,0x20,
+0x24,0x42,0x84,0xfc,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x62,0x00,0x00,
+0x30,0x90,0x00,0xff,0x00,0x05,0x28,0x42,0x00,0x00,0x48,0x21,0x27,0x8f,0xb4,0xa4,
+0x00,0x00,0x50,0x21,0x00,0x00,0x58,0x21,0x27,0x98,0xb5,0x84,0x27,0x99,0xb5,0x80,
+0x27,0x8e,0xb5,0x7e,0x27,0x8c,0xb4,0xa8,0x27,0x8d,0xb5,0x00,0x27,0x88,0xb5,0x78,
+0x00,0x0a,0x18,0x80,0x01,0x6f,0x10,0x21,0xac,0x40,0x00,0x00,0xac,0x45,0x00,0x58,
+0x00,0x6e,0x20,0x21,0x00,0x78,0x10,0x21,0xa1,0x00,0xff,0xfc,0xad,0x00,0x00,0x00,
+0xa1,0x00,0x00,0x04,0xa1,0x00,0x00,0x05,0xad,0x00,0xff,0xf8,0x00,0x79,0x18,0x21,
+0x24,0x06,0x00,0x01,0x24,0xc6,0xff,0xff,0xa0,0x80,0x00,0x00,0xa4,0x60,0x00,0x00,
+0xac,0x40,0x00,0x00,0x24,0x63,0x00,0x02,0x24,0x42,0x00,0x04,0x04,0xc1,0xff,0xf9,
+0x24,0x84,0x00,0x01,0x00,0x0a,0x10,0x80,0x00,0x4d,0x20,0x21,0x00,0x00,0x30,0x21,
+0x00,0x4c,0x18,0x21,0x27,0x87,0x81,0x64,0x8c,0xe2,0x00,0x00,0x24,0xe7,0x00,0x04,
+0xac,0x82,0x00,0x00,0xa0,0x66,0x00,0x00,0xa0,0x66,0x00,0x01,0x24,0xc6,0x00,0x01,
+0x28,0xc2,0x00,0x1c,0xa0,0x60,0x00,0x02,0x24,0x84,0x00,0x04,0x14,0x40,0xff,0xf6,
+0x24,0x63,0x00,0x03,0x25,0x29,0x00,0x01,0x29,0x22,0x00,0x08,0x25,0x4a,0x00,0x3b,
+0x25,0x08,0x00,0xec,0x14,0x40,0xff,0xd6,0x25,0x6b,0x00,0xec,0xa7,0x80,0x81,0x60,
+0x00,0x00,0x48,0x21,0x27,0x83,0xb4,0x50,0xac,0x69,0x00,0x00,0x25,0x29,0x00,0x01,
+0x29,0x22,0x00,0x0c,0x14,0x40,0xff,0xfc,0x24,0x63,0x00,0x04,0x0c,0x00,0x20,0xd2,
+0x00,0x00,0x00,0x00,0x2e,0x04,0x00,0x14,0x27,0x83,0xb4,0xa0,0x24,0x09,0x00,0x07,
+0x10,0x80,0x00,0x0a,0x00,0x00,0x00,0x00,0x90,0x62,0x00,0xd5,0x25,0x29,0xff,0xff,
+0xa0,0x62,0x00,0x00,0x05,0x21,0xff,0xfa,0x24,0x63,0x00,0xec,0x8f,0xbf,0x00,0x14,
+0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x90,0x62,0x00,0xd6,
+0x08,0x00,0x21,0x90,0x25,0x29,0xff,0xff,0x30,0x84,0x00,0xff,0x00,0x04,0x11,0x00,
+0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,
+0x27,0x83,0xb4,0xa0,0x00,0x43,0x60,0x21,0x3c,0x04,0xb0,0x03,0x3c,0x02,0x80,0x01,
+0x34,0x84,0x00,0x20,0x24,0x42,0x86,0x68,0x30,0xc6,0x00,0xff,0x93,0xaa,0x00,0x13,
+0x30,0xa5,0x00,0xff,0x30,0xe7,0x00,0xff,0xac,0x82,0x00,0x00,0x10,0xc0,0x00,0xe8,
+0x25,0x8f,0x00,0xd0,0x91,0x82,0x00,0x00,0x00,0x00,0x00,0x00,0x24,0x42,0xff,0xfc,
+0x2c,0x43,0x00,0x18,0x10,0x60,0x00,0xc7,0x3c,0x03,0x80,0x01,0x00,0x02,0x10,0x80,
+0x24,0x63,0x02,0x90,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x30,0x14,0x40,0x00,0x1c,
+0x00,0x00,0x00,0x00,0x10,0xa0,0x00,0x17,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0x00,0x11,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x0c,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x06,0x00,0x00,0x00,0x00,
+0x8d,0x82,0x00,0xd0,0x00,0x00,0x00,0x00,0x24,0x42,0xff,0xe0,0x03,0xe0,0x00,0x08,
+0xad,0x82,0x00,0xd0,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xe8,
+0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,
+0x24,0x42,0x00,0x01,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,0x24,0x42,0x00,0x02,
+0x10,0xa0,0xff,0xf9,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0x00,0x0a,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xe9,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0xe6,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,
+0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xd0,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,
+0x24,0x42,0xff,0xfc,0x10,0xa0,0xff,0xeb,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0xe5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xe0,
+0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xdb,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,
+0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xf8,0x2d,0x42,0x00,0x19,0x14,0x40,0xff,0xc5,
+0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xdb,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0xd5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xd0,
+0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0xf1,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,
+0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xf0,0x2d,0x42,0x00,0x1b,0x10,0x40,0xff,0xf1,
+0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xcb,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0xc5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x14,0xa2,0xff,0xb5,
+0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xf4,
+0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xe3,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xbd,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xb5,0x24,0x02,0x00,0x02,
+0x10,0xa2,0xff,0xd6,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xc6,0x24,0x02,0x00,0x03,
+0x2d,0x42,0x00,0x23,0x10,0x40,0xff,0xd7,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xae,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xa9,0x24,0x02,0x00,0x02,
+0x14,0xa2,0xff,0xb7,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0x03,0x00,0x00,0x00,0x00,
+0x2d,0x42,0x00,0x25,0x10,0x40,0xff,0xcb,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xd8,
+0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x16,0x14,0x40,0x00,0x0e,0x00,0x00,0x00,0x00,
+0x10,0xa0,0xff,0xa0,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x9a,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x95,0x24,0x02,0x00,0x03,
+0x14,0xa2,0xff,0xb6,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,
+0x24,0x42,0xff,0xfa,0x10,0xa0,0xff,0x93,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0x8d,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x88,
+0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xf3,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x17,
+0x14,0x40,0xff,0xac,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0x34,0x00,0x00,0x00,0x00,
+0x2d,0x42,0x00,0x19,0x10,0x40,0xff,0xe2,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0x81,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x7b,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x76,0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0x97,
+0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xc8,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0x51,
+0x2d,0x42,0x00,0x1b,0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xde,0x00,0x00,0x00,0x00,
+0x10,0xa0,0xff,0x70,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x6a,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x65,0x24,0x02,0x00,0x03,
+0x10,0xa2,0xff,0x96,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xc8,0x00,0x00,0x00,0x00,
+0x2d,0x42,0x00,0x23,0x14,0x40,0xff,0xf2,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xf9,
+0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xf7,0x2d,0x42,0x00,0x25,0x08,0x00,0x22,0x2d,
+0x2d,0x42,0x00,0x27,0x10,0xa0,0xff,0x5b,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0x55,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x50,
+0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0x71,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xe6,
+0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x27,0x14,0x40,0xff,0xad,0x00,0x00,0x00,0x00,
+0x08,0x00,0x22,0x79,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x2a,0x14,0x40,0xff,0xd8,
+0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xe9,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x2c,
+0x14,0x40,0xff,0x78,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xbd,0x00,0x00,0x00,0x00,
+0x91,0x86,0x00,0x00,0x91,0x83,0x00,0xd4,0x25,0x8d,0x00,0x5c,0x30,0xc4,0x00,0xff,
+0x00,0x04,0x10,0x40,0x00,0x44,0x10,0x21,0x00,0x04,0x48,0x80,0x01,0x82,0x58,0x21,
+0x01,0x89,0x40,0x21,0x25,0x78,0x00,0x08,0x10,0x60,0x00,0x37,0x25,0x0e,0x00,0x60,
+0x2c,0xa2,0x00,0x03,0x14,0x40,0x00,0x25,0x00,0x00,0x00,0x00,0x91,0x82,0x00,0xdd,
+0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x1e,0x00,0x00,0x00,0x00,0x27,0x87,0x81,0x64,
+0x01,0x27,0x10,0x21,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0xad,0x03,0x00,0x60,
+0x91,0x62,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x40,0x30,0x21,0xa1,0x82,0x00,0x00,
+0x30,0xc2,0x00,0xff,0x00,0x02,0x10,0x80,0x00,0x47,0x10,0x21,0x8c,0x43,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x03,0x18,0x42,0xad,0xa3,0x00,0x00,0x91,0x84,0x00,0x00,
+0x8d,0xc5,0x00,0x00,0x00,0x04,0x20,0x80,0x00,0x87,0x10,0x21,0x8c,0x43,0x00,0x00,
+0x00,0x05,0x28,0x40,0x00,0x8c,0x20,0x21,0x00,0x03,0x18,0x80,0x00,0xa3,0x10,0x2b,
+0x00,0x62,0x28,0x0a,0xac,0x85,0x00,0x60,0x03,0xe0,0x00,0x08,0xa1,0x80,0x00,0xd4,
+0x27,0x87,0x81,0x64,0x08,0x00,0x22,0xb0,0xa1,0x80,0x00,0xdd,0x27,0x82,0x81,0xd4,
+0x8d,0x83,0x00,0xd8,0x00,0x82,0x10,0x21,0x90,0x44,0x00,0x00,0x24,0x63,0x00,0x01,
+0x00,0x64,0x20,0x2b,0x14,0x80,0xff,0x02,0xad,0x83,0x00,0xd8,0x8d,0x02,0x00,0x60,
+0xa1,0x80,0x00,0xd4,0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,
+0x03,0xe0,0x00,0x08,0xad,0x82,0x00,0x5c,0x10,0xe0,0x00,0x1d,0x24,0x83,0xff,0xfc,
+0x2c,0x62,0x00,0x18,0x10,0x40,0x01,0x10,0x00,0x03,0x10,0x80,0x3c,0x03,0x80,0x01,
+0x24,0x63,0x02,0xf0,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x30,0x14,0x40,0x00,0x65,
+0x00,0x00,0x00,0x00,0x10,0xa0,0x00,0x60,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0x00,0x5a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x08,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x51,0x00,0x00,0x00,0x00,
+0x8d,0x82,0x00,0xd0,0x00,0x00,0x00,0x00,0x24,0x42,0xff,0xe0,0xad,0x82,0x00,0xd0,
+0x8d,0xe3,0x00,0x00,0x8d,0xa2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x21,
+0xad,0xa2,0x00,0x00,0xad,0xe0,0x00,0x00,0x8d,0xa3,0x00,0x00,0x8d,0xc4,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x83,0x10,0x2a,0x10,0x40,0x00,0x22,0x00,0x00,0x00,0x00,
+0x93,0x05,0x00,0x01,0x91,0x82,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x45,0x00,0x05,
+0x24,0x02,0x00,0x01,0xa1,0x85,0x00,0x00,0xa1,0x82,0x00,0xd4,0x03,0xe0,0x00,0x08,
+0xad,0x80,0x00,0xd8,0x91,0x82,0x00,0xdd,0x24,0x03,0x00,0x01,0x10,0x43,0x00,0x05,
+0x00,0x00,0x00,0x00,0xa1,0x83,0x00,0xd4,0xad,0x80,0x00,0xd8,0x03,0xe0,0x00,0x08,
+0xa1,0x83,0x00,0xdd,0x00,0x04,0x17,0xc2,0x00,0x82,0x10,0x21,0x00,0x02,0x10,0x43,
+0xad,0xa2,0x00,0x00,0x91,0x83,0x00,0x00,0x27,0x82,0x81,0x64,0x8d,0xc5,0x00,0x00,
+0x00,0x03,0x18,0x80,0x00,0x62,0x18,0x21,0x8c,0x64,0x00,0x00,0x00,0x05,0x28,0x40,
+0x00,0x04,0x18,0x80,0x00,0xa3,0x10,0x2b,0x00,0x62,0x28,0x0a,0x08,0x00,0x22,0xc2,
+0xad,0xc5,0x00,0x00,0x97,0x82,0x8b,0x7c,0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x2a,
+0x10,0x40,0xfe,0xab,0x00,0x00,0x00,0x00,0x91,0x82,0x00,0xdd,0x00,0x00,0x00,0x00,
+0x14,0x40,0x00,0x15,0x00,0x00,0x00,0x00,0x91,0x83,0x00,0x00,0x27,0x82,0x81,0x64,
+0x00,0x03,0x18,0x80,0x00,0x62,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x6c,0x18,0x21,
+0xac,0x64,0x00,0x60,0x93,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x10,0x80,
+0x01,0x82,0x10,0x21,0x24,0x4e,0x00,0x60,0xa1,0x85,0x00,0x00,0x8d,0xc2,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,
+0x03,0xe0,0x00,0x08,0xad,0xa2,0x00,0x00,0x08,0x00,0x23,0x37,0xa1,0x80,0x00,0xdd,
+0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xe8,0x8d,0x82,0x00,0xd0,
+0x08,0x00,0x22,0xf3,0x24,0x42,0x00,0x01,0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,
+0x24,0x42,0x00,0x02,0x10,0xa0,0xff,0xf9,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0x00,0x0a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xa0,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0x9d,0x00,0x00,0x00,0x00,
+0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xd0,0x8d,0x82,0x00,0xd0,
+0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xfc,0x10,0xa0,0xff,0xeb,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xe5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,
+0x10,0xa2,0xff,0x93,0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xdd,0x00,0x00,0x00,0x00,
+0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xf8,0x2d,0x42,0x00,0x19,
+0x14,0x40,0xff,0x7c,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xdb,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xd5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,
+0x10,0xa2,0xff,0x83,0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0xf1,0x00,0x00,0x00,0x00,
+0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xf0,0x2d,0x42,0x00,0x1b,
+0x10,0x40,0xff,0xf1,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xcb,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xc5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,
+0x14,0xa2,0xff,0x6c,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,
+0x24,0x42,0xff,0xf4,0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xe3,0x00,0x00,0x00,0x00,
+0x10,0xa0,0xff,0xbd,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x68,
+0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xd6,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xee,
+0x24,0x02,0x00,0x03,0x2d,0x42,0x00,0x23,0x10,0x40,0xff,0xd7,0x00,0x00,0x00,0x00,
+0x10,0xa0,0xff,0xae,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x5c,
+0x24,0x02,0x00,0x02,0x14,0xa2,0xff,0xb7,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x74,
+0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x25,0x10,0x40,0xff,0xcb,0x00,0x00,0x00,0x00,
+0x08,0x00,0x23,0x49,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x16,0x14,0x40,0x00,0x0e,
+0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xa0,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0x9a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x48,
+0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xb6,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,
+0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xfa,0x10,0xa0,0xff,0x93,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x8d,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,
+0x10,0xa2,0xff,0x3b,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x64,0x00,0x00,0x00,0x00,
+0x2d,0x42,0x00,0x17,0x14,0x40,0xff,0xac,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0xa5,
+0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x19,0x10,0x40,0xff,0xe2,0x00,0x00,0x00,0x00,
+0x10,0xa0,0xff,0x81,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x7b,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x29,0x24,0x02,0x00,0x03,
+0x10,0xa2,0xff,0x97,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xf0,0x00,0x00,0x00,0x00,
+0x08,0x00,0x23,0xc2,0x2d,0x42,0x00,0x1b,0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xde,
+0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0x70,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,
+0x10,0xa2,0xff,0x6a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x18,
+0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0x96,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xf0,
+0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x23,0x14,0x40,0xff,0xf2,0x00,0x00,0x00,0x00,
+0x08,0x00,0x23,0x6a,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x68,0x2d,0x42,0x00,0x25,
+0x08,0x00,0x23,0x9e,0x2d,0x42,0x00,0x27,0x10,0xa0,0xff,0x5b,0x00,0x00,0x00,0x00,
+0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x55,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,
+0x10,0xa2,0xff,0x03,0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0x71,0x00,0x00,0x00,0x00,
+0x08,0x00,0x23,0x57,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x27,0x14,0x40,0xff,0xad,
+0x00,0x00,0x00,0x00,0x08,0x00,0x23,0xea,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x2a,
+0x14,0x40,0xff,0xd8,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x5a,0x00,0x00,0x00,0x00,
+0x2d,0x42,0x00,0x2c,0x14,0x40,0xff,0x78,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xe5,
+0x00,0x00,0x00,0x00,0x27,0xbd,0xff,0xe8,0x3c,0x02,0xb0,0x03,0xaf,0xbf,0x00,0x14,
+0xaf,0xb0,0x00,0x10,0x34,0x42,0x01,0x18,0x3c,0x03,0xb0,0x03,0x8c,0x50,0x00,0x00,
+0x34,0x63,0x01,0x2c,0x90,0x62,0x00,0x00,0x32,0x05,0x00,0x01,0xa3,0x82,0x80,0x10,
+0x14,0xa0,0x00,0x14,0x30,0x44,0x00,0xff,0x32,0x02,0x01,0x00,0x14,0x40,0x00,0x09,
+0x00,0x00,0x00,0x00,0x32,0x02,0x08,0x00,0x10,0x40,0x00,0x02,0x24,0x02,0x00,0x01,
+0xa3,0x82,0xbc,0x18,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x18,0x0c,0x00,0x05,0x37,0x00,0x00,0x00,0x00,0x26,0x02,0xff,0x00,
+0xa3,0x80,0xbc,0x18,0x3c,0x01,0xb0,0x03,0xac,0x22,0x01,0x18,0x08,0x00,0x24,0x16,
+0x32,0x02,0x08,0x00,0x0c,0x00,0x21,0x3f,0x00,0x00,0x00,0x00,0x26,0x02,0xff,0xff,
+0x3c,0x01,0xb0,0x03,0xac,0x22,0x01,0x18,0x08,0x00,0x24,0x13,0x32,0x02,0x01,0x00,
+0x27,0xbd,0xff,0xe0,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xd0,0xaf,0xbf,0x00,0x18,
+0x8c,0x43,0x00,0x00,0x3c,0x02,0x00,0x40,0x24,0x07,0x0f,0xff,0x00,0x03,0x33,0x02,
+0x00,0x03,0x2d,0x02,0x00,0x03,0x43,0x02,0x30,0x69,0x0f,0xff,0x00,0x62,0x18,0x24,
+0x30,0xa5,0x00,0x03,0x30,0xc6,0x00,0xff,0x10,0x60,0x00,0x08,0x31,0x08,0x00,0xff,
+0x01,0x00,0x30,0x21,0x0c,0x00,0x24,0xdf,0xaf,0xa9,0x00,0x10,0x8f,0xbf,0x00,0x18,
+0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x0c,0x00,0x25,0x31,
+0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0xd4,0x08,0x00,0x24,0x3f,
+0xac,0x62,0x00,0x00,0x27,0xbd,0xff,0xc0,0x3c,0x02,0xb0,0x03,0xaf,0xbe,0x00,0x38,
+0xaf,0xb5,0x00,0x2c,0xaf,0xb1,0x00,0x1c,0xaf,0xb0,0x00,0x18,0xaf,0xbf,0x00,0x3c,
+0xaf,0xb7,0x00,0x34,0xaf,0xb6,0x00,0x30,0xaf,0xb4,0x00,0x28,0xaf,0xb3,0x00,0x24,
+0xaf,0xb2,0x00,0x20,0x34,0x42,0x00,0x3f,0x90,0x43,0x00,0x00,0x00,0x80,0x80,0x21,
+0x00,0x00,0xf0,0x21,0x00,0x00,0x88,0x21,0x10,0x60,0x00,0x76,0x00,0x00,0xa8,0x21,
+0x3c,0x01,0xb0,0x03,0xa0,0x20,0x00,0x3f,0x00,0x10,0x12,0x02,0x24,0x04,0x06,0x14,
+0x0c,0x00,0x06,0xd1,0x30,0x54,0x00,0x0f,0x24,0x04,0x06,0x14,0x0c,0x00,0x06,0xd1,
+0xaf,0xa2,0x00,0x10,0x3c,0x03,0x00,0xff,0x34,0x63,0xff,0xff,0x32,0x10,0x00,0x7f,
+0x00,0x43,0x10,0x24,0x00,0x10,0x86,0x00,0x02,0x02,0x80,0x25,0x02,0x00,0x28,0x21,
+0x24,0x04,0x06,0x14,0x3c,0x13,0xbf,0xff,0x0c,0x00,0x06,0xbf,0x3c,0x16,0xb0,0x03,
+0x00,0x00,0x90,0x21,0x3c,0x17,0x40,0x00,0x36,0x73,0xff,0xff,0x36,0xd6,0x00,0x3e,
+0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x00,0x00,0x57,0x10,0x25,0x00,0x40,0x28,0x21,
+0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x00,0x00,0x00,0x80,0x21,0x0c,0x00,0x25,0xf9,
+0x00,0x00,0x00,0x00,0x26,0x03,0x00,0x01,0x10,0x40,0x00,0x46,0x30,0x70,0x00,0xff,
+0x12,0x00,0xff,0xfa,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x00,
+0x00,0x53,0x10,0x24,0x00,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x00,
+0x24,0x02,0x00,0x01,0x12,0x82,0x00,0x37,0x00,0x00,0x00,0x00,0x12,0x80,0x00,0x35,
+0x00,0x00,0x00,0x00,0x32,0x31,0x00,0x7f,0x12,0x20,0x00,0x04,0x24,0x03,0x00,0x04,
+0x27,0xc2,0x00,0x01,0x30,0x5e,0x00,0xff,0x02,0xb1,0xa8,0x21,0x12,0x43,0x00,0x2a,
+0x3c,0x03,0xb0,0x03,0x02,0x43,0x10,0x21,0xa0,0x51,0x00,0x34,0x26,0x42,0x00,0x01,
+0x30,0x52,0x00,0xff,0x2e,0x43,0x00,0x05,0x14,0x60,0xff,0xd9,0x00,0x00,0x00,0x00,
+0x8f,0xa5,0x00,0x10,0x0c,0x00,0x06,0xbf,0x24,0x04,0x06,0x14,0x12,0xa0,0x00,0x0e,
+0x3c,0x02,0xb0,0x03,0x13,0xc0,0x00,0x0d,0x34,0x42,0x00,0x3c,0x00,0x15,0x10,0x40,
+0x00,0x55,0x10,0x21,0x00,0x02,0x10,0xc0,0x00,0x55,0x10,0x21,0x00,0x02,0xa8,0x80,
+0x02,0xbe,0x00,0x1b,0x17,0xc0,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0d,
+0x00,0x00,0xa8,0x12,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x3c,0x3c,0x03,0xb0,0x03,
+0x3c,0x04,0xb0,0x03,0xa4,0x55,0x00,0x00,0x34,0x63,0x00,0x1c,0x34,0x84,0x00,0x1d,
+0x24,0x02,0x00,0x01,0xa0,0x60,0x00,0x00,0xa0,0x82,0x00,0x00,0x7b,0xbe,0x01,0xfc,
+0x7b,0xb6,0x01,0xbc,0x7b,0xb4,0x01,0x7c,0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x40,0xa2,0xd1,0x00,0x00,0x08,0x00,0x24,0x98,
+0x26,0x42,0x00,0x01,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0xfc,0x08,0x00,0x24,0x8d,
+0x00,0x40,0x88,0x21,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x3c,0x3c,0x04,0xb0,0x03,
+0x3c,0x05,0xb0,0x03,0xa4,0x60,0x00,0x00,0x34,0x84,0x00,0x1c,0x34,0xa5,0x00,0x1d,
+0x24,0x02,0x00,0x02,0x24,0x03,0x00,0x01,0xa0,0x82,0x00,0x00,0x08,0x00,0x24,0xb7,
+0xa0,0xa3,0x00,0x00,0x0c,0x00,0x17,0x99,0x00,0x00,0x00,0x00,0x10,0x40,0xff,0x8b,
+0x00,0x10,0x12,0x02,0x3c,0x02,0xb0,0x03,0x3c,0x04,0xb0,0x03,0x34,0x42,0x00,0x3c,
+0x34,0x84,0x00,0x14,0x24,0x03,0x00,0x01,0xa4,0x40,0x00,0x00,0x3c,0x01,0xb0,0x03,
+0xa0,0x23,0x00,0x3f,0x08,0x00,0x24,0xb7,0xac,0x90,0x00,0x00,0x27,0xbd,0xff,0xd8,
+0xaf,0xb0,0x00,0x10,0x30,0xd0,0x00,0xff,0x2e,0x02,0x00,0x2e,0xaf,0xb2,0x00,0x18,
+0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,0x30,0xb1,0x00,0xff,
+0x14,0x40,0x00,0x06,0x00,0x80,0x90,0x21,0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,
+0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,0x2e,0x13,0x00,0x10,
+0x24,0x05,0x00,0x14,0x0c,0x00,0x13,0xa4,0x24,0x06,0x01,0x07,0x12,0x60,0x00,0x38,
+0x02,0x00,0x30,0x21,0x8f,0xa2,0x00,0x38,0x30,0xc3,0x00,0x3f,0x3c,0x04,0xb0,0x09,
+0x00,0x02,0x14,0x00,0x00,0x43,0x30,0x25,0x34,0x84,0x01,0x60,0x90,0x82,0x00,0x00,
+0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xfd,0x24,0x02,0x00,0x01,0x12,0x22,0x00,0x2a,
+0x2a,0x22,0x00,0x02,0x14,0x40,0x00,0x24,0x24,0x02,0x00,0x02,0x12,0x22,0x00,0x20,
+0x24,0x02,0x00,0x03,0x12,0x22,0x00,0x19,0x00,0x00,0x00,0x00,0x16,0x60,0xff,0xe2,
+0x24,0x02,0x00,0x01,0x12,0x22,0x00,0x13,0x2a,0x22,0x00,0x02,0x14,0x40,0x00,0x0d,
+0x24,0x02,0x00,0x02,0x12,0x22,0x00,0x09,0x24,0x02,0x00,0x03,0x16,0x22,0xff,0xda,
+0x00,0x00,0x00,0x00,0x24,0x04,0x08,0x4c,0x24,0x05,0xff,0xff,0x0c,0x00,0x13,0x5f,
+0x3c,0x06,0x0c,0xb8,0x08,0x00,0x24,0xea,0x00,0x00,0x00,0x00,0x08,0x00,0x25,0x12,
+0x24,0x04,0x08,0x48,0x16,0x20,0xff,0xd0,0x00,0x00,0x00,0x00,0x08,0x00,0x25,0x12,
+0x24,0x04,0x08,0x40,0x08,0x00,0x25,0x12,0x24,0x04,0x08,0x44,0x24,0x04,0x08,0x4c,
+0x0c,0x00,0x13,0x5f,0x24,0x05,0xff,0xff,0x08,0x00,0x25,0x07,0x00,0x00,0x00,0x00,
+0x08,0x00,0x25,0x20,0x24,0x04,0x08,0x48,0x16,0x20,0xff,0xe0,0x00,0x00,0x00,0x00,
+0x08,0x00,0x25,0x20,0x24,0x04,0x08,0x40,0x08,0x00,0x25,0x20,0x24,0x04,0x08,0x44,
+0x02,0x40,0x20,0x21,0x0c,0x00,0x25,0x71,0x02,0x20,0x28,0x21,0x08,0x00,0x24,0xf5,
+0x00,0x40,0x30,0x21,0x27,0xbd,0xff,0xd8,0x2c,0xc2,0x00,0x2e,0xaf,0xb2,0x00,0x18,
+0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,
+0x00,0xc0,0x80,0x21,0x30,0xb1,0x00,0xff,0x00,0x80,0x90,0x21,0x14,0x40,0x00,0x07,
+0x00,0x00,0x18,0x21,0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,
+0x00,0x60,0x10,0x21,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,0x2e,0x13,0x00,0x10,
+0x24,0x05,0x00,0x14,0x0c,0x00,0x13,0xa4,0x24,0x06,0x01,0x07,0x12,0x60,0x00,0x24,
+0x02,0x00,0x30,0x21,0x3c,0x03,0xb0,0x09,0x34,0x63,0x01,0x60,0x90,0x62,0x00,0x00,
+0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xfd,0x30,0xc5,0x00,0x3f,0x0c,0x00,0x25,0xae,
+0x02,0x20,0x20,0x21,0x16,0x60,0x00,0x0a,0x00,0x40,0x80,0x21,0x24,0x02,0x00,0x01,
+0x12,0x22,0x00,0x15,0x2a,0x22,0x00,0x02,0x14,0x40,0x00,0x0f,0x24,0x02,0x00,0x02,
+0x12,0x22,0x00,0x0b,0x24,0x02,0x00,0x03,0x12,0x22,0x00,0x03,0x00,0x00,0x00,0x00,
+0x08,0x00,0x25,0x3d,0x02,0x00,0x18,0x21,0x24,0x04,0x08,0x4c,0x24,0x05,0xff,0xff,
+0x0c,0x00,0x13,0x5f,0x3c,0x06,0x0c,0xb8,0x08,0x00,0x25,0x3d,0x02,0x00,0x18,0x21,
+0x08,0x00,0x25,0x5f,0x24,0x04,0x08,0x48,0x16,0x20,0xff,0xf5,0x00,0x00,0x00,0x00,
+0x08,0x00,0x25,0x5f,0x24,0x04,0x08,0x40,0x08,0x00,0x25,0x5f,0x24,0x04,0x08,0x44,
+0x02,0x40,0x20,0x21,0x0c,0x00,0x25,0x71,0x02,0x20,0x28,0x21,0x08,0x00,0x25,0x49,
+0x00,0x40,0x30,0x21,0x27,0xbd,0xff,0xe8,0x2c,0xc2,0x00,0x1f,0xaf,0xb0,0x00,0x10,
+0xaf,0xbf,0x00,0x14,0x00,0xc0,0x80,0x21,0x14,0x40,0x00,0x1d,0x30,0xa5,0x00,0xff,
+0x24,0x02,0x00,0x01,0x10,0xa2,0x00,0x18,0x28,0xa2,0x00,0x02,0x14,0x40,0x00,0x12,
+0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x0e,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x07,
+0x24,0x04,0x08,0x4c,0x26,0x10,0xff,0xe2,0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x14,
+0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x24,0x05,0xff,0xff,
+0x0c,0x00,0x13,0x5f,0x3c,0x06,0x0d,0xf8,0x08,0x00,0x25,0x82,0x26,0x10,0xff,0xe2,
+0x08,0x00,0x25,0x87,0x24,0x04,0x08,0x48,0x14,0xa0,0xff,0xf2,0x24,0x04,0x08,0x40,
+0x08,0x00,0x25,0x88,0x24,0x05,0xff,0xff,0x08,0x00,0x25,0x87,0x24,0x04,0x08,0x44,
+0x2c,0xc2,0x00,0x10,0x14,0x40,0xff,0xec,0x24,0x02,0x00,0x01,0x10,0xa2,0x00,0x14,
+0x28,0xa2,0x00,0x02,0x14,0x40,0x00,0x0e,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x0a,
+0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x03,0x24,0x04,0x08,0x4c,0x08,0x00,0x25,0x82,
+0x26,0x10,0xff,0xf1,0x24,0x05,0xff,0xff,0x0c,0x00,0x13,0x5f,0x3c,0x06,0x0d,0xb8,
+0x08,0x00,0x25,0x82,0x26,0x10,0xff,0xf1,0x08,0x00,0x25,0xa1,0x24,0x04,0x08,0x48,
+0x14,0xa0,0xff,0xf6,0x24,0x04,0x08,0x40,0x08,0x00,0x25,0xa2,0x24,0x05,0xff,0xff,
+0x08,0x00,0x25,0xa1,0x24,0x04,0x08,0x44,0x27,0xbd,0xff,0xe8,0x30,0x84,0x00,0xff,
+0x24,0x02,0x00,0x01,0x10,0x82,0x00,0x39,0xaf,0xbf,0x00,0x10,0x28,0x82,0x00,0x02,
+0x14,0x40,0x00,0x27,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0x82,0x00,0x17,
+0x00,0xa0,0x30,0x21,0x24,0x02,0x00,0x03,0x10,0x82,0x00,0x05,0x24,0x04,0x08,0x3c,
+0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,
+0x0c,0x00,0x13,0x5f,0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x3c,0x3c,0x05,0x80,0x00,
+0x0c,0x00,0x13,0x5f,0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x3c,0x3c,0x05,0x80,0x00,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,0x24,0x04,0x08,0xac,0x0c,0x00,0x13,0x41,
+0x24,0x05,0x0f,0xff,0x08,0x00,0x25,0xbc,0x00,0x00,0x00,0x00,0x24,0x04,0x08,0x34,
+0x0c,0x00,0x13,0x5f,0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x34,0x3c,0x05,0x80,0x00,
+0x0c,0x00,0x13,0x5f,0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x34,0x3c,0x05,0x80,0x00,
+0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,0x08,0x00,0x25,0xcb,0x24,0x04,0x08,0xa8,
+0x14,0x80,0xff,0xdf,0x00,0xa0,0x30,0x21,0x24,0x04,0x08,0x24,0x0c,0x00,0x13,0x5f,
+0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x24,0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f,
+0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x24,0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f,
+0x24,0x06,0x00,0x01,0x08,0x00,0x25,0xcb,0x24,0x04,0x08,0xa0,0x00,0xa0,0x30,0x21,
+0x24,0x04,0x08,0x2c,0x0c,0x00,0x13,0x5f,0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x2c,
+0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f,0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x2c,
+0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,0x08,0x00,0x25,0xcb,
+0x24,0x04,0x08,0xa4,0x3c,0x05,0x00,0x14,0x3c,0x02,0xb0,0x05,0x34,0x42,0x04,0x20,
+0x3c,0x06,0xc0,0x00,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0x34,0xa5,0x17,0x09,
+0xac,0x45,0x00,0x00,0x34,0xc6,0x05,0x07,0x34,0x63,0x04,0x24,0x34,0x84,0x02,0x28,
+0x3c,0x07,0xb0,0x05,0x24,0x02,0x00,0x20,0xac,0x66,0x00,0x00,0x34,0xe7,0x04,0x50,
+0xa0,0x82,0x00,0x00,0x90,0xe2,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x03,
+0x10,0x40,0xff,0xfc,0x24,0x02,0x00,0x01,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x93,0x85,0x81,0xf1,0x24,0x02,0x00,0x01,0x14,0xa2,0x00,0x53,0x00,0x80,0x40,0x21,
+0x8c,0x89,0x00,0x04,0x3c,0x02,0xb0,0x01,0x01,0x22,0x30,0x21,0x8c,0xc3,0x00,0x04,
+0x3c,0x02,0x01,0x00,0x00,0x62,0x10,0x24,0x10,0x40,0x00,0x4b,0x30,0x62,0x00,0x08,
+0x10,0x45,0x00,0x59,0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x38,0x24,0x03,0x00,0xb4,
+0x30,0x44,0x00,0xff,0x10,0x83,0x00,0x61,0x24,0x02,0x00,0xc4,0x10,0x82,0x00,0x54,
+0x24,0x02,0x00,0x94,0x10,0x82,0x00,0x45,0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x38,
+0x00,0x00,0x00,0x00,0x30,0x47,0xff,0xff,0x30,0xe3,0x40,0xff,0x24,0x02,0x40,0x88,
+0x14,0x62,0x00,0x39,0x30,0xe3,0x03,0x00,0x24,0x02,0x03,0x00,0x10,0x62,0x00,0x38,
+0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x56,0x00,0x00,0x00,0x00,0x30,0x47,0xff,0xff,
+0x30,0xe2,0x00,0x80,0x14,0x40,0x00,0x30,0x3c,0x02,0xb0,0x01,0x01,0x22,0x30,0x21,
+0x94,0xc3,0x00,0x60,0x24,0x02,0x00,0x08,0x14,0x43,0x00,0x3b,0x00,0x00,0x00,0x00,
+0x90,0xc2,0x00,0x62,0x24,0x03,0x00,0x04,0x00,0x02,0x39,0x02,0x10,0xe3,0x00,0x15,
+0x24,0x02,0x00,0x06,0x14,0xe2,0x00,0x34,0x00,0x00,0x00,0x00,0x8d,0x05,0x01,0xac,
+0x94,0xc4,0x00,0x66,0x27,0x82,0x89,0x68,0x00,0x05,0x28,0x80,0x30,0x87,0xff,0xff,
+0x00,0xa2,0x28,0x21,0x00,0x07,0x1a,0x00,0x8c,0xa4,0x00,0x00,0x00,0x07,0x12,0x02,
+0x00,0x43,0x10,0x25,0x24,0x42,0x00,0x5e,0x24,0x03,0xc0,0x00,0x30,0x47,0xff,0xff,
+0x00,0x83,0x20,0x24,0x00,0x87,0x20,0x25,0xac,0xa4,0x00,0x00,0x08,0x00,0x26,0x76,
+0xad,0x07,0x00,0x10,0x8d,0x05,0x01,0xac,0x94,0xc4,0x00,0x64,0x27,0x82,0x89,0x68,
+0x00,0x05,0x28,0x80,0x30,0x87,0xff,0xff,0x00,0xa2,0x28,0x21,0x00,0x07,0x1a,0x00,
+0x8c,0xa4,0x00,0x00,0x00,0x07,0x12,0x02,0x00,0x43,0x10,0x25,0x24,0x42,0x00,0x36,
+0x3c,0x03,0xff,0xff,0x30,0x47,0xff,0xff,0x00,0x83,0x20,0x24,0x00,0x87,0x20,0x25,
+0xac,0xa4,0x00,0x00,0xad,0x07,0x00,0x10,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x94,0xc2,0x00,0x50,0x08,0x00,0x26,0x34,0x30,0x47,0xff,0xff,0x8d,0x04,0x01,0xac,
+0x27,0x83,0x89,0x68,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x00,
+0x3c,0x03,0xff,0xff,0x00,0x43,0x10,0x24,0x34,0x42,0x00,0x2e,0xac,0x82,0x00,0x00,
+0x24,0x03,0x00,0x2e,0xad,0x03,0x00,0x10,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x8d,0x04,0x01,0xac,0x27,0x83,0x89,0x68,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,
+0x8c,0x82,0x00,0x00,0x3c,0x03,0xff,0xff,0x00,0x43,0x10,0x24,0x34,0x42,0x00,0x0e,
+0x24,0x03,0x00,0x0e,0x08,0x00,0x26,0x75,0xac,0x82,0x00,0x00,0x8d,0x04,0x01,0xac,
+0x27,0x83,0x89,0x68,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x00,
+0x3c,0x03,0xff,0xff,0x00,0x43,0x10,0x24,0x34,0x42,0x00,0x14,0x24,0x03,0x00,0x14,
+0x08,0x00,0x26,0x75,0xac,0x82,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x30,0xc6,0x00,0xff,0x00,0x06,0x48,0x40,0x01,0x26,0x10,0x21,0x00,0x02,0x10,0x80,
+0x27,0x8b,0xbc,0x30,0x27,0x83,0xbc,0x36,0x00,0x4b,0x40,0x21,0x00,0x43,0x10,0x21,
+0x94,0x47,0x00,0x00,0x30,0xa2,0x3f,0xff,0x10,0xe2,0x00,0x29,0x30,0x8a,0xff,0xff,
+0x95,0x02,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0x02,0x11,0x82,0x30,0x42,0x00,0x01,
+0x10,0x43,0x00,0x18,0x00,0x00,0x00,0x00,0x01,0x26,0x10,0x21,0x00,0x02,0x10,0x80,
+0x00,0x4b,0x30,0x21,0x94,0xc4,0x00,0x02,0x27,0x83,0xbc,0x36,0x27,0x85,0xbc,0x34,
+0x00,0x45,0x28,0x21,0x30,0x84,0xff,0xdf,0x00,0x43,0x10,0x21,0xa4,0xc4,0x00,0x02,
+0xa4,0x40,0x00,0x00,0xa4,0xa0,0x00,0x00,0x94,0xc3,0x00,0x02,0x3c,0x04,0xb0,0x01,
+0x01,0x44,0x20,0x21,0x30,0x63,0xff,0xbf,0xa4,0xc3,0x00,0x02,0xa0,0xc0,0x00,0x00,
+0x8c,0x82,0x00,0x04,0x24,0x03,0xf0,0xff,0x00,0x43,0x10,0x24,0x03,0xe0,0x00,0x08,
+0xac,0x82,0x00,0x04,0x24,0x02,0xc0,0x00,0x91,0x04,0x00,0x01,0x00,0xa2,0x10,0x24,
+0x00,0x47,0x28,0x25,0x3c,0x03,0xb0,0x01,0x24,0x02,0x00,0x02,0x14,0x82,0xff,0xe2,
+0x01,0x43,0x18,0x21,0xac,0x65,0x00,0x00,0x08,0x00,0x26,0xa3,0x01,0x26,0x10,0x21,
+0x08,0x00,0x26,0xa3,0x01,0x26,0x10,0x21,0x93,0x83,0x81,0xf1,0x24,0x02,0x00,0x01,
+0x14,0x62,0x00,0x0d,0x3c,0x02,0xb0,0x01,0x8c,0x84,0x00,0x04,0x3c,0x06,0xb0,0x09,
+0x00,0x82,0x20,0x21,0x8c,0x85,0x00,0x08,0x8c,0x83,0x00,0x04,0x3c,0x02,0x01,0x00,
+0x34,0xc6,0x01,0x00,0x00,0x62,0x18,0x24,0x14,0x60,0x00,0x05,0x30,0xa5,0x20,0x00,
+0x24,0x02,0x00,0x06,0xa0,0xc2,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,
+0x3c,0x03,0xb0,0x09,0x10,0xa0,0xff,0xfc,0x34,0x63,0x01,0x00,0x24,0x02,0x00,0x0e,
+0x08,0x00,0x26,0xd6,0xa0,0x62,0x00,0x00,0x3c,0x02,0xb0,0x01,0x30,0xa5,0xff,0xff,
+0x00,0xa2,0x28,0x21,0x8c,0xa3,0x00,0x00,0x3c,0x02,0x10,0x00,0x00,0x80,0x30,0x21,
+0x00,0x62,0x18,0x24,0x8c,0xa2,0x00,0x04,0x10,0x60,0x00,0x04,0x00,0x00,0x00,0x00,
+0x30,0x42,0x80,0x00,0x10,0x40,0x00,0x13,0x00,0x00,0x00,0x00,0x8c,0xc2,0x01,0xa8,
+0x00,0x00,0x00,0x00,0x24,0x44,0x00,0x01,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x40,
+0x00,0x83,0x10,0x0a,0x93,0x83,0x81,0xf0,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,
+0x00,0x82,0x20,0x23,0x24,0x63,0xff,0xff,0xac,0xc4,0x01,0xa8,0xa3,0x83,0x81,0xf0,
+0x8c,0xc4,0x01,0xac,0x8c,0xc2,0x01,0xa8,0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x26,
+0x00,0x02,0x10,0x2b,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,
+0x34,0x63,0x00,0x73,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,
+0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0xa3,0x80,0x81,0xf1,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0xa3,0x82,0x81,0xf1,0x03,0xe0,0x00,0x08,
+0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x04,0x3c,0x05,0xb0,0x01,0x00,0x80,0x50,0x21,
+0x00,0x45,0x10,0x21,0x8c,0x43,0x00,0x04,0x24,0x02,0x00,0x05,0x00,0x03,0x1a,0x02,
+0x30,0x69,0x00,0x0f,0x11,0x22,0x00,0x0b,0x24,0x02,0x00,0x07,0x11,0x22,0x00,0x09,
+0x24,0x02,0x00,0x0a,0x11,0x22,0x00,0x07,0x24,0x02,0x00,0x0b,0x11,0x22,0x00,0x05,
+0x24,0x02,0x00,0x01,0x93,0x83,0x81,0xf0,0x3c,0x04,0xb0,0x06,0x10,0x62,0x00,0x03,
+0x34,0x84,0x80,0x18,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x02,0x17,0x02,0x14,0x40,0xff,0xfa,0x00,0x00,0x00,0x00,
+0x8d,0x43,0x01,0xa8,0x27,0x82,0x89,0x68,0x00,0x03,0x18,0x80,0x00,0x6a,0x20,0x21,
+0x8c,0x87,0x00,0xa8,0x00,0x62,0x18,0x21,0x8c,0x68,0x00,0x00,0x00,0xe5,0x28,0x21,
+0x8c,0xa9,0x00,0x00,0x3c,0x02,0xff,0xff,0x27,0x83,0x8a,0x68,0x01,0x22,0x10,0x24,
+0x00,0x48,0x10,0x25,0xac,0xa2,0x00,0x00,0x8d,0x44,0x01,0xa8,0x00,0x07,0x30,0xc2,
+0x3c,0x02,0x00,0x80,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x00,0x06,0x32,0x00,
+0x8c,0xa9,0x00,0x04,0x00,0xc2,0x30,0x25,0x8c,0x82,0x00,0x00,0x3c,0x03,0x80,0x00,
+0x01,0x22,0x10,0x25,0x00,0x43,0x10,0x25,0xac,0xa2,0x00,0x04,0xaf,0x87,0xbc,0x20,
+0x8c,0xa2,0x00,0x00,0x00,0x00,0x00,0x00,0xaf,0x82,0xbc,0x28,0x8c,0xa3,0x00,0x04,
+0x3c,0x01,0xb0,0x07,0xac,0x26,0x80,0x18,0x8d,0x42,0x01,0xa8,0xaf,0x83,0xbc,0x24,
+0x93,0x85,0x81,0xf0,0x24,0x44,0x00,0x01,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x40,
+0x00,0x83,0x10,0x0a,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x24,0xa5,0xff,0xff,
+0x00,0x82,0x20,0x23,0xad,0x44,0x01,0xa8,0xa3,0x85,0x81,0xf0,0x08,0x00,0x27,0x21,
+0x00,0x00,0x00,0x00,0x3c,0x05,0xb0,0x03,0x3c,0x02,0x80,0x01,0x34,0xa5,0x00,0x20,
+0x24,0x42,0x9d,0x64,0xac,0xa2,0x00,0x00,0x24,0x02,0x00,0x02,0x24,0x03,0x00,0x20,
+0xac,0x82,0x00,0x64,0x3c,0x02,0x80,0x01,0xac,0x83,0x00,0x60,0xac,0x80,0x00,0x00,
+0xac,0x80,0x00,0x04,0xac,0x80,0x00,0x08,0xac,0x80,0x00,0x4c,0xac,0x80,0x00,0x50,
+0xac,0x80,0x00,0x54,0xac,0x80,0x00,0x0c,0xac,0x80,0x00,0x58,0xa0,0x80,0x00,0x5c,
+0x24,0x42,0x9e,0x28,0x24,0x83,0x00,0x68,0x24,0x05,0x00,0x0f,0x24,0xa5,0xff,0xff,
+0xac,0x62,0x00,0x00,0x04,0xa1,0xff,0xfd,0x24,0x63,0x00,0x04,0x3c,0x02,0x80,0x01,
+0x24,0x42,0x9f,0x10,0xac,0x82,0x00,0x78,0x3c,0x03,0x80,0x01,0x3c,0x02,0x80,0x01,
+0x24,0x63,0xa0,0x9c,0x24,0x42,0xa0,0x08,0xac,0x83,0x00,0x88,0xac,0x82,0x00,0x98,
+0x3c,0x03,0x80,0x01,0x3c,0x02,0x80,0x01,0x24,0x63,0xa1,0x44,0x24,0x42,0xa2,0x5c,
+0xac,0x83,0x00,0xa0,0xac,0x82,0x00,0xa4,0xa0,0x80,0x01,0xba,0xac,0x80,0x01,0xa8,
+0xac,0x80,0x01,0xac,0xac,0x80,0x01,0xb0,0xac,0x80,0x01,0xb4,0xa0,0x80,0x01,0xb8,
+0x03,0xe0,0x00,0x08,0xa0,0x80,0x01,0xb9,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x01,
+0x34,0x63,0x00,0x20,0x24,0x42,0x9e,0x28,0x03,0xe0,0x00,0x08,0xac,0x62,0x00,0x00,
+0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x34,0x42,0x00,0x20,0x24,0x63,0x9e,0x40,
+0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x10,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x11,
+0x00,0x80,0x28,0x21,0x8c,0x82,0x00,0x14,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0d,
+0x00,0x00,0x00,0x00,0x8c,0x84,0x00,0x10,0x8c,0xa3,0x00,0x14,0x8c,0xa2,0x00,0x04,
+0x00,0x83,0x20,0x21,0x00,0x44,0x10,0x21,0x30,0x43,0x00,0xff,0x00,0x03,0x18,0x2b,
+0x00,0x02,0x12,0x02,0x00,0x43,0x10,0x21,0x00,0x02,0x12,0x00,0x30,0x42,0x3f,0xff,
+0xac,0xa2,0x00,0x04,0xac,0xa0,0x00,0x00,0xac,0xa0,0x00,0x4c,0xac,0xa0,0x00,0x50,
+0xac,0xa0,0x00,0x54,0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x0c,0x3c,0x03,0xb0,0x03,
+0x3c,0x02,0x80,0x01,0x34,0x63,0x00,0x20,0x24,0x42,0x9e,0xbc,0xac,0x62,0x00,0x00,
+0x8c,0x86,0x00,0x04,0x3c,0x02,0xb0,0x01,0x24,0x03,0x00,0x01,0x00,0xc2,0x10,0x21,
+0x8c,0x45,0x00,0x00,0xac,0x83,0x00,0x4c,0x00,0x05,0x14,0x02,0x30,0xa3,0x3f,0xff,
+0x30,0x42,0x00,0xff,0xac,0x83,0x00,0x10,0xac,0x82,0x00,0x14,0x8c,0x83,0x00,0x14,
+0xac,0x85,0x00,0x40,0x00,0xc3,0x30,0x21,0x03,0xe0,0x00,0x08,0xac,0x86,0x00,0x08,
+0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,
+0x24,0x63,0x9f,0x10,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,
+0x8c,0x82,0x00,0x4c,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0a,0x00,0x80,0x80,0x21,
+0xae,0x00,0x00,0x00,0xae,0x00,0x00,0x4c,0xae,0x00,0x00,0x50,0xae,0x00,0x00,0x54,
+0xae,0x00,0x00,0x0c,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xaf,0x00,0x00,0x00,0x00,0x08,0x00,0x27,0xd1,
+0xae,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,
+0x34,0x42,0x00,0x20,0x24,0x63,0x9f,0x74,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,
+0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x4c,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x16,
+0x00,0x80,0x80,0x21,0x8e,0x03,0x00,0x08,0x3c,0x02,0xb0,0x01,0x8e,0x04,0x00,0x44,
+0x00,0x62,0x18,0x21,0x90,0x65,0x00,0x00,0x24,0x02,0x00,0x01,0xae,0x02,0x00,0x50,
+0x30,0xa3,0x00,0xff,0x00,0x03,0x10,0x82,0x00,0x04,0x23,0x02,0x30,0x84,0x00,0x0f,
+0x30,0x42,0x00,0x03,0x00,0x03,0x19,0x02,0xae,0x04,0x00,0x34,0xae,0x02,0x00,0x2c,
+0xae,0x03,0x00,0x30,0xa2,0x05,0x00,0x48,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,
+0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xaf,0x00,0x00,0x00,0x00,
+0x08,0x00,0x27,0xe9,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,
+0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,0x24,0x63,0xa0,0x08,0xaf,0xb0,0x00,0x10,
+0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x50,0x00,0x00,0x00,0x00,
+0x10,0x40,0x00,0x16,0x00,0x80,0x80,0x21,0x92,0x03,0x00,0x44,0x8e,0x02,0x00,0x40,
+0x83,0x85,0x8b,0xd4,0x92,0x04,0x00,0x41,0x30,0x63,0x00,0x01,0x00,0x02,0x16,0x02,
+0xae,0x04,0x00,0x14,0x00,0x00,0x30,0x21,0xae,0x02,0x00,0x18,0x10,0xa0,0x00,0x04,
+0xae,0x03,0x00,0x3c,0x10,0x60,0x00,0x03,0x24,0x02,0x00,0x01,0x24,0x06,0x00,0x01,
+0x24,0x02,0x00,0x01,0xa3,0x86,0x8b,0xd4,0x8f,0xbf,0x00,0x14,0xae,0x02,0x00,0x54,
+0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xdd,
+0x00,0x00,0x00,0x00,0x08,0x00,0x28,0x0e,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,0x24,0x63,0xa0,0x9c,
+0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x50,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x1b,0x00,0x80,0x80,0x21,0x3c,0x02,0xb0,0x03,
+0x8c,0x42,0x00,0x00,0x92,0x04,0x00,0x44,0x8e,0x03,0x00,0x40,0x83,0x86,0x8b,0xd4,
+0x92,0x05,0x00,0x41,0x30,0x42,0x08,0x00,0x30,0x84,0x00,0x01,0x00,0x02,0x12,0xc2,
+0x00,0x03,0x1e,0x02,0x00,0x82,0x20,0x25,0xae,0x05,0x00,0x14,0x00,0x00,0x38,0x21,
+0xae,0x03,0x00,0x18,0x10,0xc0,0x00,0x04,0xae,0x04,0x00,0x3c,0x10,0x80,0x00,0x03,
+0x24,0x02,0x00,0x01,0x24,0x07,0x00,0x01,0x24,0x02,0x00,0x01,0xa3,0x87,0x8b,0xd4,
+0x8f,0xbf,0x00,0x14,0xae,0x02,0x00,0x54,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xdd,0x00,0x00,0x00,0x00,0x08,0x00,0x28,0x33,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,
+0x34,0x42,0x00,0x20,0x24,0x63,0xa1,0x44,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,
+0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x54,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x37,
+0x00,0x80,0x80,0x21,0x8e,0x04,0x00,0x04,0x8e,0x03,0x00,0x44,0x3c,0x02,0x80,0x00,
+0x3c,0x05,0xb0,0x01,0x34,0x42,0x00,0x10,0x00,0x85,0x20,0x21,0x00,0x62,0x18,0x25,
+0xac,0x83,0x00,0x04,0x8e,0x02,0x00,0x04,0x8e,0x03,0x01,0xac,0x02,0x00,0x20,0x21,
+0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x00,0x00,0x03,0x18,0x80,0x27,0x82,0x89,0x68,
+0x00,0x62,0x18,0x21,0xac,0x66,0x00,0x00,0x8e,0x02,0x00,0x04,0x8e,0x03,0x01,0xac,
+0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x04,0x00,0x03,0x18,0x80,0x27,0x82,0x8a,0x68,
+0x00,0x62,0x18,0x21,0x0c,0x00,0x26,0x10,0xac,0x66,0x00,0x00,0x8e,0x03,0x01,0xac,
+0x8e,0x07,0x00,0x04,0x3c,0x06,0xb0,0x03,0x24,0x65,0x00,0x01,0x28,0xa4,0x00,0x00,
+0x24,0x62,0x00,0x40,0x00,0xa4,0x10,0x0a,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,
+0x00,0x03,0x18,0x80,0x00,0xa2,0x28,0x23,0x00,0x70,0x18,0x21,0xae,0x05,0x01,0xac,
+0xac,0x67,0x00,0xa8,0x34,0xc6,0x00,0x30,0x8c,0xc3,0x00,0x00,0x93,0x82,0x81,0xf0,
+0x02,0x00,0x20,0x21,0x24,0x63,0x00,0x01,0x24,0x42,0x00,0x01,0xac,0xc3,0x00,0x00,
+0xa3,0x82,0x81,0xf0,0x0c,0x00,0x27,0x90,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x14,
+0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x28,0x27,
+0x00,0x00,0x00,0x00,0x08,0x00,0x28,0x5d,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,0x24,0x63,0xa2,0x5c,
+0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x54,
+0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x37,0x00,0x80,0x80,0x21,0x8e,0x04,0x00,0x04,
+0x8e,0x03,0x00,0x44,0x3c,0x02,0x80,0x00,0x3c,0x05,0xb0,0x01,0x34,0x42,0x00,0x10,
+0x00,0x85,0x20,0x21,0x00,0x62,0x18,0x25,0xac,0x83,0x00,0x04,0x8e,0x02,0x00,0x04,
+0x8e,0x03,0x01,0xac,0x02,0x00,0x20,0x21,0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x00,
+0x00,0x03,0x18,0x80,0x27,0x82,0x89,0x68,0x00,0x62,0x18,0x21,0xac,0x66,0x00,0x00,
+0x8e,0x02,0x00,0x04,0x8e,0x03,0x01,0xac,0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x04,
+0x00,0x03,0x18,0x80,0x27,0x82,0x8a,0x68,0x00,0x62,0x18,0x21,0x0c,0x00,0x26,0x10,
+0xac,0x66,0x00,0x00,0x8e,0x03,0x01,0xac,0x8e,0x07,0x00,0x04,0x3c,0x06,0xb0,0x03,
+0x24,0x65,0x00,0x01,0x28,0xa4,0x00,0x00,0x24,0x62,0x00,0x40,0x00,0xa4,0x10,0x0a,
+0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x00,0x03,0x18,0x80,0x00,0xa2,0x28,0x23,
+0x00,0x70,0x18,0x21,0xae,0x05,0x01,0xac,0xac,0x67,0x00,0xa8,0x34,0xc6,0x00,0x30,
+0x8c,0xc3,0x00,0x00,0x93,0x82,0x81,0xf0,0x02,0x00,0x20,0x21,0x24,0x63,0x00,0x01,
+0x24,0x42,0x00,0x01,0xac,0xc3,0x00,0x00,0xa3,0x82,0x81,0xf0,0x0c,0x00,0x27,0x90,
+0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x18,0x0c,0x00,0x28,0x27,0x00,0x00,0x00,0x00,0x08,0x00,0x28,0xa3,
+0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x34,0x42,0x00,0x20,
+0x24,0x63,0xa3,0x74,0x27,0xbd,0xff,0xe0,0xac,0x43,0x00,0x00,0x3c,0x02,0x80,0x01,
+0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x1c,
+0x00,0x80,0x80,0x21,0x24,0x52,0x9e,0x28,0x00,0x00,0x88,0x21,0x3c,0x03,0xb0,0x09,
+0x34,0x63,0x00,0x06,0x8e,0x06,0x00,0x04,0x90,0x62,0x00,0x00,0x00,0x06,0x22,0x02,
+0x00,0x44,0x10,0x23,0x24,0x44,0x00,0x40,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x7f,
+0x00,0x83,0x10,0x0a,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x24,0x84,0xff,0xff,
+0x10,0x44,0x00,0x68,0x00,0x00,0x28,0x21,0x3c,0x02,0xb0,0x01,0x00,0xc2,0x10,0x21,
+0x8c,0x44,0x00,0x04,0x3c,0x03,0x7c,0x00,0x34,0x63,0x00,0xf0,0x00,0x83,0x18,0x24,
+0xae,0x04,0x00,0x44,0x8c,0x44,0x00,0x00,0x10,0x60,0x00,0x69,0x00,0x00,0x38,0x21,
+0x3c,0x09,0xb0,0x03,0x3c,0x06,0x7c,0x00,0x35,0x29,0x00,0x99,0x3c,0x0a,0xb0,0x01,
+0x24,0x08,0x00,0x40,0x34,0xc6,0x00,0xf0,0x3c,0x0b,0xff,0xff,0x3c,0x0c,0x28,0x38,
+0x16,0x20,0x00,0x06,0x24,0xa5,0x00,0x01,0x93,0x82,0x81,0xf6,0x24,0x11,0x00,0x01,
+0x24,0x42,0x00,0x01,0xa1,0x22,0x00,0x00,0xa3,0x82,0x81,0xf6,0x8e,0x02,0x00,0x04,
+0x24,0x07,0x00,0x01,0x24,0x42,0x01,0x00,0x30,0x42,0x3f,0xff,0xae,0x02,0x00,0x04,
+0x00,0x4a,0x10,0x21,0x8c,0x43,0x00,0x04,0x00,0x00,0x00,0x00,0xae,0x03,0x00,0x44,
+0x8c,0x44,0x00,0x00,0x10,0xa8,0x00,0x2d,0x00,0x66,0x18,0x24,0x14,0x60,0xff,0xec,
+0x00,0x8b,0x10,0x24,0x14,0x4c,0xff,0xea,0x24,0x02,0x00,0x01,0x10,0xe2,0x00,0x2f,
+0x3c,0x03,0xb0,0x09,0x8e,0x02,0x00,0x44,0x8e,0x04,0x00,0x60,0x00,0x02,0x1e,0x42,
+0x00,0x02,0x12,0x02,0x30,0x42,0x00,0x0f,0x30,0x63,0x00,0x01,0xae,0x02,0x00,0x00,
+0x10,0x44,0x00,0x1a,0xae,0x03,0x00,0x58,0x8e,0x02,0x00,0x64,0x8e,0x04,0x00,0x58,
+0x00,0x00,0x00,0x00,0x10,0x82,0x00,0x05,0x00,0x00,0x00,0x00,0xae,0x00,0x00,0x4c,
+0xae,0x00,0x00,0x50,0xae,0x00,0x00,0x54,0xae,0x00,0x00,0x0c,0x8e,0x03,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x03,0x10,0x80,0x00,0x50,0x10,0x21,0x8c,0x42,0x00,0x68,
+0x00,0x00,0x00,0x00,0x10,0x52,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x40,0xf8,0x09,
+0x02,0x00,0x20,0x21,0x8e,0x04,0x00,0x58,0x8e,0x03,0x00,0x00,0x00,0x00,0x00,0x00,
+0xae,0x03,0x00,0x60,0x08,0x00,0x28,0xeb,0xae,0x04,0x00,0x64,0x8e,0x02,0x00,0x64,
+0x00,0x00,0x00,0x00,0x14,0x62,0xff,0xe5,0x00,0x00,0x00,0x00,0x7a,0x02,0x0d,0x7c,
+0x8f,0xbf,0x00,0x1c,0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x00,0x43,0x10,0x26,
+0x00,0x02,0x10,0x2b,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x34,0x63,0x00,0x06,
+0x8e,0x04,0x00,0x04,0x90,0x62,0x00,0x00,0x00,0x04,0x22,0x02,0x00,0x44,0x10,0x23,
+0x24,0x44,0x00,0x40,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x7f,0x00,0x83,0x10,0x0a,
+0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x00,0x82,0x20,0x23,0x14,0x87,0xff,0xc5,
+0x00,0x00,0x00,0x00,0x8e,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0x62,0x00,0x03,
+0x14,0x40,0x00,0x05,0x24,0x02,0x00,0x0d,0x10,0x62,0x00,0x03,0x24,0x02,0x00,0x01,
+0x08,0x00,0x29,0x4b,0xa2,0x02,0x00,0x5c,0x08,0x00,0x29,0x4b,0xa2,0x00,0x00,0x5c,
+0x3c,0x02,0xff,0xff,0x00,0x82,0x10,0x24,0x3c,0x03,0x28,0x38,0x14,0x43,0xff,0x94,
+0x24,0x02,0x00,0x01,0x08,0x00,0x29,0x23,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,
+0x3c,0x03,0x80,0x01,0x34,0x42,0x00,0x20,0x24,0x63,0xa5,0xcc,0xac,0x43,0x00,0x00,
+0x8c,0x83,0x01,0xa8,0x8c,0x82,0x01,0xac,0x00,0x80,0x40,0x21,0x10,0x62,0x00,0x20,
+0x00,0x00,0x20,0x21,0x93,0x82,0x81,0xf1,0x00,0x03,0x28,0x80,0x3c,0x07,0xb0,0x06,
+0x00,0xa8,0x18,0x21,0x24,0x04,0x00,0x01,0x8c,0x66,0x00,0xa8,0x10,0x44,0x00,0x1c,
+0x34,0xe7,0x80,0x18,0x3c,0x05,0xb0,0x01,0xaf,0x86,0xbc,0x20,0x00,0xc5,0x28,0x21,
+0x8c,0xa3,0x00,0x00,0x00,0x06,0x20,0xc2,0x3c,0x02,0x00,0x80,0x00,0x04,0x22,0x00,
+0x00,0x82,0x20,0x25,0xaf,0x83,0xbc,0x28,0x8c,0xa2,0x00,0x04,0xac,0xe4,0x00,0x00,
+0x8d,0x03,0x01,0xa8,0xaf,0x82,0xbc,0x24,0x24,0x64,0x00,0x01,0x04,0x80,0x00,0x0a,
+0x00,0x80,0x10,0x21,0x00,0x02,0x11,0x83,0x8d,0x03,0x01,0xac,0x00,0x02,0x11,0x80,
+0x00,0x82,0x10,0x23,0x00,0x43,0x18,0x26,0xad,0x02,0x01,0xa8,0x00,0x03,0x20,0x2b,
+0x03,0xe0,0x00,0x08,0x00,0x80,0x10,0x21,0x08,0x00,0x29,0x95,0x24,0x62,0x00,0x40,
+0x27,0x82,0x89,0x68,0x00,0x06,0x20,0xc2,0x00,0x04,0x22,0x00,0x00,0xa2,0x48,0x21,
+0x3c,0x02,0x00,0x80,0x00,0x82,0x58,0x25,0x93,0x82,0x81,0xf0,0x3c,0x0a,0xb0,0x06,
+0x3c,0x03,0xb0,0x01,0x2c,0x42,0x00,0x02,0x00,0xc3,0x38,0x21,0x35,0x4a,0x80,0x18,
+0x14,0x40,0xff,0xef,0x00,0x00,0x20,0x21,0x8c,0xe5,0x00,0x00,0x8d,0x23,0x00,0x00,
+0x24,0x02,0xc0,0x00,0x00,0xa2,0x10,0x24,0x00,0x43,0x10,0x25,0xac,0xe2,0x00,0x00,
+0x8d,0x04,0x01,0xa8,0x27,0x83,0x8a,0x68,0x8c,0xe5,0x00,0x04,0x00,0x04,0x20,0x80,
+0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x00,0x3c,0x03,0x80,0x00,0x00,0xa2,0x10,0x25,
+0x00,0x43,0x10,0x25,0xac,0xe2,0x00,0x04,0xaf,0x86,0xbc,0x20,0x8c,0xe2,0x00,0x00,
+0x93,0x85,0x81,0xf0,0xaf,0x82,0xbc,0x28,0x8c,0xe3,0x00,0x04,0xad,0x4b,0x00,0x00,
+0x8d,0x02,0x01,0xa8,0xaf,0x83,0xbc,0x24,0x24,0xa5,0xff,0xff,0x24,0x44,0x00,0x01,
+0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x40,0x00,0x83,0x10,0x0a,0x00,0x02,0x11,0x83,
+0x00,0x02,0x11,0x80,0x00,0x82,0x20,0x23,0xad,0x04,0x01,0xa8,0xa3,0x85,0x81,0xf0,
+0x79,0x02,0x0d,0x7c,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x26,0x08,0x00,0x29,0x9c,
+0x00,0x02,0x20,0x2b,0x3c,0x04,0xb0,0x03,0x3c,0x06,0xb0,0x07,0x3c,0x02,0x80,0x01,
+0x34,0xc6,0x00,0x18,0x34,0x84,0x00,0x20,0x24,0x42,0xa7,0x54,0x24,0x03,0xff,0x83,
+0xac,0x82,0x00,0x00,0xa0,0xc3,0x00,0x00,0x90,0xc4,0x00,0x00,0x27,0xbd,0xff,0xf8,
+0x3c,0x03,0xb0,0x07,0x24,0x02,0xff,0x82,0xa3,0xa4,0x00,0x00,0xa0,0x62,0x00,0x00,
+0x90,0x64,0x00,0x00,0x3c,0x02,0xb0,0x07,0x34,0x42,0x00,0x08,0xa3,0xa4,0x00,0x01,
+0xa0,0x40,0x00,0x00,0x90,0x43,0x00,0x00,0x24,0x02,0x00,0x03,0x3c,0x05,0xb0,0x07,
+0xa3,0xa3,0x00,0x00,0xa0,0xc2,0x00,0x00,0x90,0xc4,0x00,0x00,0x34,0xa5,0x00,0x10,
+0x24,0x02,0x00,0x06,0x3c,0x03,0xb0,0x07,0xa3,0xa4,0x00,0x00,0x34,0x63,0x00,0x38,
+0xa0,0xa2,0x00,0x00,0x90,0x64,0x00,0x00,0x3c,0x02,0xb0,0x07,0x34,0x42,0x00,0x20,
+0xa3,0xa4,0x00,0x00,0xa0,0xa0,0x00,0x00,0x90,0xa3,0x00,0x00,0xaf,0x82,0xbf,0x30,
+0xa3,0xa3,0x00,0x00,0xa0,0x40,0x00,0x00,0x90,0x43,0x00,0x00,0x03,0xe0,0x00,0x08,
+0x27,0xbd,0x00,0x08,};
+
+u8 Rtl8192PciEFwDataArray[DataArrayLengthPciE] = {
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x10,0x00,0x08,0x00,
+0x02,0xe9,0x01,0x74,0x02,0xab,0x01,0xc7,0x01,0x55,0x00,0xe4,0x00,0xab,0x00,0x72,
+0x00,0x55,0x00,0x4c,0x00,0x4c,0x00,0x4c,0x00,0x4c,0x00,0x4c,0x02,0x76,0x01,0x3b,
+0x00,0xd2,0x00,0x9e,0x00,0x69,0x00,0x4f,0x00,0x46,0x00,0x3f,0x01,0x3b,0x00,0x9e,
+0x00,0x69,0x00,0x4f,0x00,0x35,0x00,0x27,0x00,0x23,0x00,0x20,0x01,0x2f,0x00,0x98,
+0x00,0x65,0x00,0x4c,0x00,0x33,0x00,0x26,0x00,0x22,0x00,0x1e,0x00,0x98,0x00,0x4c,
+0x00,0x33,0x00,0x26,0x00,0x19,0x00,0x13,0x00,0x11,0x00,0x0f,0x02,0x39,0x01,0x1c,
+0x00,0xbd,0x00,0x8e,0x00,0x5f,0x00,0x47,0x00,0x3f,0x00,0x39,0x01,0x1c,0x00,0x8e,
+0x00,0x5f,0x00,0x47,0x00,0x2f,0x00,0x23,0x00,0x20,0x00,0x1c,0x01,0x11,0x00,0x89,
+0x00,0x5b,0x00,0x44,0x00,0x2e,0x00,0x22,0x00,0x1e,0x00,0x1b,0x00,0x89,0x00,0x44,
+0x00,0x2e,0x00,0x22,0x00,0x17,0x00,0x11,0x00,0x0f,0x00,0x0e,0x02,0xab,0x02,0xab,
+0x02,0x66,0x02,0x66,0x07,0x06,0x06,0x06,0x05,0x06,0x07,0x08,0x04,0x06,0x07,0x08,
+0x09,0x0a,0x0b,0x0b,0x49,0x6e,0x74,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x4c,
+0x42,0x4d,0x4f,0x44,0x00,0x00,0x00,0x00,0x54,0x4c,0x42,0x4c,0x5f,0x64,0x61,0x74,
+0x61,0x00,0x54,0x4c,0x42,0x53,0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x64,0x45,0x4c,
+0x5f,0x64,0x61,0x74,0x61,0x00,0x41,0x64,0x45,0x53,0x00,0x00,0x00,0x00,0x00,0x00,
+0x45,0x78,0x63,0x43,0x6f,0x64,0x65,0x36,0x00,0x00,0x45,0x78,0x63,0x43,0x6f,0x64,
+0x65,0x37,0x00,0x00,0x53,0x79,0x73,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x42,0x70,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x49,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x43,0x70,0x55,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x4f,0x76,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x01,0x0b,0x63,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x2c,
+0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x48,0x00,0x00,0x00,0x60,
+0x00,0x00,0x00,0x90,0x00,0x00,0x00,0xc0,0x00,0x00,0x01,0x20,0x00,0x00,0x01,0x80,
+0x00,0x00,0x01,0xb0,0x00,0x00,0x00,0x34,0x00,0x00,0x00,0x68,0x00,0x00,0x00,0x9c,
+0x00,0x00,0x00,0xd0,0x00,0x00,0x01,0x38,0x00,0x00,0x01,0xa0,0x00,0x00,0x01,0xd4,
+0x00,0x00,0x02,0x08,0x00,0x00,0x00,0x68,0x00,0x00,0x00,0xd0,0x00,0x00,0x01,0x38,
+0x00,0x00,0x01,0xa0,0x00,0x00,0x02,0x6f,0x00,0x00,0x03,0x40,0x00,0x00,0x03,0xa8,
+0x00,0x00,0x04,0x10,0x01,0x01,0x01,0x02,0x01,0x01,0x02,0x02,0x03,0x03,0x04,0x04,
+0x01,0x01,0x02,0x02,0x03,0x03,0x04,0x04,0x02,0x03,0x03,0x04,0x05,0x06,0x07,0x08,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x07,0x6c,0x80,0x00,0x07,0x80,
+0x80,0x00,0x07,0x80,0x80,0x00,0x07,0x70,0x80,0x00,0x07,0x70,0x80,0x00,0x07,0x94,
+0x80,0x00,0x56,0xb0,0x80,0x00,0x57,0x08,0x80,0x00,0x57,0x30,0x80,0x00,0x58,0x28,
+0x80,0x00,0x58,0xe0,0x80,0x00,0x59,0x88,0x80,0x00,0x59,0xfc,0x80,0x00,0x5b,0x08,
+0x80,0x00,0x5b,0x40,0x80,0x00,0x5b,0x54,0x80,0x00,0x5b,0x68,0x80,0x00,0x5c,0x50,
+0x80,0x00,0x5c,0x90,0x80,0x00,0x5d,0x44,0x80,0x00,0x5d,0x6c,0x80,0x00,0x56,0x70,
+0x80,0x00,0x5d,0xbc,0x80,0x00,0x64,0x48,0x80,0x00,0x64,0xc0,0x80,0x00,0x64,0xcc,
+0x80,0x00,0x64,0xd8,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,
+0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,
+0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,
+0x80,0x00,0x64,0x60,0x80,0x00,0x64,0xe4,0x80,0x00,0x64,0xf0,0x80,0x00,0x64,0xfc,
+0x80,0x00,0x87,0xa4,0x80,0x00,0x87,0xa4,0x80,0x00,0x87,0xa4,0x80,0x00,0x87,0xd8,
+0x80,0x00,0x88,0x18,0x80,0x00,0x88,0x50,0x80,0x00,0x88,0x80,0x80,0x00,0x88,0xb0,
+0x80,0x00,0x88,0xc4,0x80,0x00,0x89,0x2c,0x80,0x00,0x89,0x40,0x80,0x00,0x89,0x7c,
+0x80,0x00,0x89,0x84,0x80,0x00,0x89,0xc0,0x80,0x00,0x89,0xd4,0x80,0x00,0x89,0xdc,
+0x80,0x00,0x89,0xe4,0x80,0x00,0x89,0xe4,0x80,0x00,0x89,0xe4,0x80,0x00,0x89,0xe4,
+0x80,0x00,0x8a,0x14,0x80,0x00,0x8a,0x28,0x80,0x00,0x8a,0x3c,0x80,0x00,0x86,0xe8,
+0x80,0x00,0x8d,0x68,0x80,0x00,0x8d,0x68,0x80,0x00,0x8d,0x68,0x80,0x00,0x8d,0x9c,
+0x80,0x00,0x8d,0xdc,0x80,0x00,0x8e,0x14,0x80,0x00,0x8e,0x44,0x80,0x00,0x8e,0x74,
+0x80,0x00,0x8e,0x88,0x80,0x00,0x8e,0xf0,0x80,0x00,0x8f,0x04,0x80,0x00,0x8f,0x40,
+0x80,0x00,0x8f,0x48,0x80,0x00,0x8f,0x84,0x80,0x00,0x8f,0x98,0x80,0x00,0x8f,0xa0,
+0x80,0x00,0x8f,0xa8,0x80,0x00,0x8f,0xa8,0x80,0x00,0x8f,0xa8,0x80,0x00,0x8f,0xa8,
+0x80,0x00,0x8f,0xd8,0x80,0x00,0x8f,0xec,0x80,0x00,0x90,0x00,0x80,0x00,0x8b,0x88,
+};
+
+u32 Rtl8192PciEPHY_REGArray[PHY_REGArrayLengthPciE] = {0x0,};
+
+u32 Rtl8192PciEPHY_REG_1T2RArray[PHY_REG_1T2RArrayLengthPciE] = {
+ 0x800, 0x00000000,
+ 0x804, 0x00000001,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000001c,
+ 0x810, 0x801010aa,
+ 0x814, 0x008514d0,
+ 0x818, 0x00000040,
+ 0x81c, 0x00000000,
+ 0x820, 0x00000004,
+ 0x824, 0x00690000,
+ 0x828, 0x00000004,
+ 0x82c, 0x00e90000,
+ 0x830, 0x00000004,
+ 0x834, 0x00690000,
+ 0x838, 0x00000004,
+ 0x83c, 0x00e90000,
+ 0x840, 0x00000000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84c, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x65a965a9,
+ 0x85c, 0x65a965a9,
+ 0x860, 0x001f0010,
+ 0x864, 0x007f0010,
+ 0x868, 0x001f0010,
+ 0x86c, 0x007f0010,
+ 0x870, 0x0f100f70,
+ 0x874, 0x0f100f70,
+ 0x878, 0x00000000,
+ 0x87c, 0x00000000,
+ 0x880, 0x6870e36c,
+ 0x884, 0xe3573600,
+ 0x888, 0x4260c340,
+ 0x88c, 0x0000ff00,
+ 0x890, 0x00000000,
+ 0x894, 0xfffffffe,
+ 0x898, 0x4c42382f,
+ 0x89c, 0x00656056,
+ 0x8b0, 0x00000000,
+ 0x8e0, 0x00000000,
+ 0x8e4, 0x00000000,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x31121311,
+ 0xa00, 0x00d0c7d8,
+ 0xa04, 0x811f0008,
+ 0xa08, 0x80cd8300,
+ 0xa0c, 0x2e62740f,
+ 0xa10, 0x95009b78,
+ 0xa14, 0x11145008,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140fa0,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00000000,
+ 0xc00, 0x00000040,
+ 0xc04, 0x00005433,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08000000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x08000000,
+ 0xc24, 0x40000100,
+ 0xc28, 0x08000000,
+ 0xc2c, 0x40000100,
+ 0xc30, 0x6de9ac44,
+ 0xc34, 0x465c52cd,
+ 0xc38, 0x497f5994,
+ 0xc3c, 0x0a969764,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020000,
+ 0xc4c, 0x00000300,
+ 0xc50, 0x69543420,
+ 0xc54, 0x433c0094,
+ 0xc58, 0x69543420,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x69543420,
+ 0xc64, 0x433c0094,
+ 0xc68, 0x69543420,
+ 0xc6c, 0x433c0094,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x0186175b,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20000000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x40000100,
+ 0xc94, 0x00000000,
+ 0xc98, 0x40000100,
+ 0xc9c, 0x00000000,
+ 0xca0, 0x00492492,
+ 0xca4, 0x00000000,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x00492492,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xd00, 0x00000750,
+ 0xd04, 0x00000403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x00000001,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x33333c63,
+ 0xd18, 0x6a8f5b6b,
+ 0xd1c, 0x00000000,
+ 0xd20, 0x00000000,
+ 0xd24, 0x00000000,
+ 0xd28, 0x00000000,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x00000000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x024dbd02,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x04032064,
+ 0xe00, 0x161a1a1a,
+ 0xe04, 0x12121416,
+ 0xe08, 0x00001800,
+ 0xe0c, 0x00000000,
+ 0xe10, 0x161a1a1a,
+ 0xe14, 0x12121416,
+ 0xe18, 0x161a1a1a,
+ 0xe1c, 0x12121416,
+};
+
+u32 Rtl8192PciERadioA_Array[RadioA_ArrayLengthPciE] = {
+ 0x019, 0x00000003,
+ 0x000, 0x000000bf,
+ 0x001, 0x00000ee0,
+ 0x002, 0x0000004c,
+ 0x003, 0x000007f1,
+ 0x004, 0x00000975,
+ 0x005, 0x00000c58,
+ 0x006, 0x00000ae6,
+ 0x007, 0x000000ca,
+ 0x008, 0x00000e1c,
+ 0x009, 0x000007f0,
+ 0x00a, 0x000009d0,
+ 0x00b, 0x000001ba,
+ 0x00c, 0x00000240,
+ 0x00e, 0x00000020,
+ 0x00f, 0x00000990,
+ 0x012, 0x00000806,
+ 0x014, 0x000005ab,
+ 0x015, 0x00000f80,
+ 0x016, 0x00000020,
+ 0x017, 0x00000597,
+ 0x018, 0x0000050a,
+ 0x01a, 0x00000f80,
+ 0x01b, 0x00000f5e,
+ 0x01c, 0x00000008,
+ 0x01d, 0x00000607,
+ 0x01e, 0x000006cc,
+ 0x01f, 0x00000000,
+ 0x020, 0x000001a5,
+ 0x01f, 0x00000001,
+ 0x020, 0x00000165,
+ 0x01f, 0x00000002,
+ 0x020, 0x000000c6,
+ 0x01f, 0x00000003,
+ 0x020, 0x00000086,
+ 0x01f, 0x00000004,
+ 0x020, 0x00000046,
+ 0x01f, 0x00000005,
+ 0x020, 0x000001e6,
+ 0x01f, 0x00000006,
+ 0x020, 0x000001a6,
+ 0x01f, 0x00000007,
+ 0x020, 0x00000166,
+ 0x01f, 0x00000008,
+ 0x020, 0x000000c7,
+ 0x01f, 0x00000009,
+ 0x020, 0x00000087,
+ 0x01f, 0x0000000a,
+ 0x020, 0x000000f7,
+ 0x01f, 0x0000000b,
+ 0x020, 0x000000d7,
+ 0x01f, 0x0000000c,
+ 0x020, 0x000000b7,
+ 0x01f, 0x0000000d,
+ 0x020, 0x00000097,
+ 0x01f, 0x0000000e,
+ 0x020, 0x00000077,
+ 0x01f, 0x0000000f,
+ 0x020, 0x00000057,
+ 0x01f, 0x00000010,
+ 0x020, 0x00000037,
+ 0x01f, 0x00000011,
+ 0x020, 0x000000fb,
+ 0x01f, 0x00000012,
+ 0x020, 0x000000db,
+ 0x01f, 0x00000013,
+ 0x020, 0x000000bb,
+ 0x01f, 0x00000014,
+ 0x020, 0x000000ff,
+ 0x01f, 0x00000015,
+ 0x020, 0x000000e3,
+ 0x01f, 0x00000016,
+ 0x020, 0x000000c3,
+ 0x01f, 0x00000017,
+ 0x020, 0x000000a3,
+ 0x01f, 0x00000018,
+ 0x020, 0x00000083,
+ 0x01f, 0x00000019,
+ 0x020, 0x00000063,
+ 0x01f, 0x0000001a,
+ 0x020, 0x00000043,
+ 0x01f, 0x0000001b,
+ 0x020, 0x00000023,
+ 0x01f, 0x0000001c,
+ 0x020, 0x00000003,
+ 0x01f, 0x0000001d,
+ 0x020, 0x000001e3,
+ 0x01f, 0x0000001e,
+ 0x020, 0x000001c3,
+ 0x01f, 0x0000001f,
+ 0x020, 0x000001a3,
+ 0x01f, 0x00000020,
+ 0x020, 0x00000183,
+ 0x01f, 0x00000021,
+ 0x020, 0x00000163,
+ 0x01f, 0x00000022,
+ 0x020, 0x00000143,
+ 0x01f, 0x00000023,
+ 0x020, 0x00000123,
+ 0x01f, 0x00000024,
+ 0x020, 0x00000103,
+ 0x023, 0x00000203,
+ 0x024, 0x00000100,
+ 0x00b, 0x000001ba,
+ 0x02c, 0x000003d7,
+ 0x02d, 0x00000ff0,
+ 0x000, 0x00000037,
+ 0x004, 0x00000160,
+ 0x007, 0x00000080,
+ 0x002, 0x0000088d,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x016, 0x00000200,
+ 0x016, 0x00000380,
+ 0x016, 0x00000020,
+ 0x016, 0x000001a0,
+ 0x000, 0x000000bf,
+ 0x00d, 0x0000001f,
+ 0x00d, 0x00000c9f,
+ 0x002, 0x0000004d,
+ 0x000, 0x00000cbf,
+ 0x004, 0x00000975,
+ 0x007, 0x00000700,
+};
+
+u32 Rtl8192PciERadioB_Array[RadioB_ArrayLengthPciE] = {
+ 0x019, 0x00000003,
+ 0x000, 0x000000bf,
+ 0x001, 0x000006e0,
+ 0x002, 0x0000004c,
+ 0x003, 0x000007f1,
+ 0x004, 0x00000975,
+ 0x005, 0x00000c58,
+ 0x006, 0x00000ae6,
+ 0x007, 0x000000ca,
+ 0x008, 0x00000e1c,
+ 0x000, 0x000000b7,
+ 0x00a, 0x00000850,
+ 0x000, 0x000000bf,
+ 0x00b, 0x000001ba,
+ 0x00c, 0x00000240,
+ 0x00e, 0x00000020,
+ 0x015, 0x00000f80,
+ 0x016, 0x00000020,
+ 0x017, 0x00000597,
+ 0x018, 0x0000050a,
+ 0x01a, 0x00000e00,
+ 0x01b, 0x00000f5e,
+ 0x01d, 0x00000607,
+ 0x01e, 0x000006cc,
+ 0x00b, 0x000001ba,
+ 0x023, 0x00000203,
+ 0x024, 0x00000100,
+ 0x000, 0x00000037,
+ 0x004, 0x00000160,
+ 0x016, 0x00000200,
+ 0x016, 0x00000380,
+ 0x016, 0x00000020,
+ 0x016, 0x000001a0,
+ 0x00d, 0x00000ccc,
+ 0x000, 0x000000bf,
+ 0x002, 0x0000004d,
+ 0x000, 0x00000cbf,
+ 0x004, 0x00000975,
+ 0x007, 0x00000700,
+};
+
+u32 Rtl8192PciERadioC_Array[RadioC_ArrayLengthPciE] = {
+ 0x0, };
+
+u32 Rtl8192PciERadioD_Array[RadioD_ArrayLengthPciE] = {
+ 0x0, };
+
+u32 Rtl8192PciEMACPHY_Array[] = {
+ 0x03c, 0xffff0000, 0x00000f0f,
+ 0x340, 0xffffffff, 0x161a1a1a,
+ 0x344, 0xffffffff, 0x12121416,
+ 0x348, 0x0000ffff, 0x00001818,
+ 0x12c, 0xffffffff, 0x04000802,
+ 0x318, 0x00000fff, 0x00000100,
+};
+
+u32 Rtl8192PciEMACPHY_Array_PG[] = {
+ 0x03c, 0xffff0000, 0x00000f0f,
+ 0xe00, 0xffffffff, 0x06090909,
+ 0xe04, 0xffffffff, 0x00030306,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0a0c0d0f,
+ 0xe14, 0xffffffff, 0x06070809,
+ 0xe18, 0xffffffff, 0x0a0c0d0f,
+ 0xe1c, 0xffffffff, 0x06070809,
+ 0x12c, 0xffffffff, 0x04000802,
+ 0x318, 0x00000fff, 0x00000800,
+};
+
+u32 Rtl8192PciEAGCTAB_Array[AGCTAB_ArrayLengthPciE] = {
+ 0xc78, 0x7d000001,
+ 0xc78, 0x7d010001,
+ 0xc78, 0x7d020001,
+ 0xc78, 0x7d030001,
+ 0xc78, 0x7d040001,
+ 0xc78, 0x7d050001,
+ 0xc78, 0x7c060001,
+ 0xc78, 0x7b070001,
+ 0xc78, 0x7a080001,
+ 0xc78, 0x79090001,
+ 0xc78, 0x780a0001,
+ 0xc78, 0x770b0001,
+ 0xc78, 0x760c0001,
+ 0xc78, 0x750d0001,
+ 0xc78, 0x740e0001,
+ 0xc78, 0x730f0001,
+ 0xc78, 0x72100001,
+ 0xc78, 0x71110001,
+ 0xc78, 0x70120001,
+ 0xc78, 0x6f130001,
+ 0xc78, 0x6e140001,
+ 0xc78, 0x6d150001,
+ 0xc78, 0x6c160001,
+ 0xc78, 0x6b170001,
+ 0xc78, 0x6a180001,
+ 0xc78, 0x69190001,
+ 0xc78, 0x681a0001,
+ 0xc78, 0x671b0001,
+ 0xc78, 0x661c0001,
+ 0xc78, 0x651d0001,
+ 0xc78, 0x641e0001,
+ 0xc78, 0x491f0001,
+ 0xc78, 0x48200001,
+ 0xc78, 0x47210001,
+ 0xc78, 0x46220001,
+ 0xc78, 0x45230001,
+ 0xc78, 0x44240001,
+ 0xc78, 0x43250001,
+ 0xc78, 0x28260001,
+ 0xc78, 0x27270001,
+ 0xc78, 0x26280001,
+ 0xc78, 0x25290001,
+ 0xc78, 0x242a0001,
+ 0xc78, 0x232b0001,
+ 0xc78, 0x222c0001,
+ 0xc78, 0x212d0001,
+ 0xc78, 0x202e0001,
+ 0xc78, 0x0a2f0001,
+ 0xc78, 0x08300001,
+ 0xc78, 0x06310001,
+ 0xc78, 0x05320001,
+ 0xc78, 0x04330001,
+ 0xc78, 0x03340001,
+ 0xc78, 0x02350001,
+ 0xc78, 0x01360001,
+ 0xc78, 0x00370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7d400001,
+ 0xc78, 0x7d410001,
+ 0xc78, 0x7d420001,
+ 0xc78, 0x7d430001,
+ 0xc78, 0x7d440001,
+ 0xc78, 0x7d450001,
+ 0xc78, 0x7c460001,
+ 0xc78, 0x7b470001,
+ 0xc78, 0x7a480001,
+ 0xc78, 0x79490001,
+ 0xc78, 0x784a0001,
+ 0xc78, 0x774b0001,
+ 0xc78, 0x764c0001,
+ 0xc78, 0x754d0001,
+ 0xc78, 0x744e0001,
+ 0xc78, 0x734f0001,
+ 0xc78, 0x72500001,
+ 0xc78, 0x71510001,
+ 0xc78, 0x70520001,
+ 0xc78, 0x6f530001,
+ 0xc78, 0x6e540001,
+ 0xc78, 0x6d550001,
+ 0xc78, 0x6c560001,
+ 0xc78, 0x6b570001,
+ 0xc78, 0x6a580001,
+ 0xc78, 0x69590001,
+ 0xc78, 0x685a0001,
+ 0xc78, 0x675b0001,
+ 0xc78, 0x665c0001,
+ 0xc78, 0x655d0001,
+ 0xc78, 0x645e0001,
+ 0xc78, 0x495f0001,
+ 0xc78, 0x48600001,
+ 0xc78, 0x47610001,
+ 0xc78, 0x46620001,
+ 0xc78, 0x45630001,
+ 0xc78, 0x44640001,
+ 0xc78, 0x43650001,
+ 0xc78, 0x28660001,
+ 0xc78, 0x27670001,
+ 0xc78, 0x26680001,
+ 0xc78, 0x25690001,
+ 0xc78, 0x246a0001,
+ 0xc78, 0x236b0001,
+ 0xc78, 0x226c0001,
+ 0xc78, 0x216d0001,
+ 0xc78, 0x206e0001,
+ 0xc78, 0x0a6f0001,
+ 0xc78, 0x08700001,
+ 0xc78, 0x06710001,
+ 0xc78, 0x05720001,
+ 0xc78, 0x04730001,
+ 0xc78, 0x03740001,
+ 0xc78, 0x02750001,
+ 0xc78, 0x01760001,
+ 0xc78, 0x00770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x2e00001e,
+ 0xc78, 0x2e01001e,
+ 0xc78, 0x2e02001e,
+ 0xc78, 0x2e03001e,
+ 0xc78, 0x2e04001e,
+ 0xc78, 0x2e05001e,
+ 0xc78, 0x3006001e,
+ 0xc78, 0x3407001e,
+ 0xc78, 0x3908001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3f0a001e,
+ 0xc78, 0x420b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x450d001e,
+ 0xc78, 0x460e001e,
+ 0xc78, 0x460f001e,
+ 0xc78, 0x4710001e,
+ 0xc78, 0x4811001e,
+ 0xc78, 0x4912001e,
+ 0xc78, 0x4a13001e,
+ 0xc78, 0x4b14001e,
+ 0xc78, 0x4b15001e,
+ 0xc78, 0x4c16001e,
+ 0xc78, 0x4d17001e,
+ 0xc78, 0x4e18001e,
+ 0xc78, 0x4f19001e,
+ 0xc78, 0x4f1a001e,
+ 0xc78, 0x501b001e,
+ 0xc78, 0x511c001e,
+ 0xc78, 0x521d001e,
+ 0xc78, 0x521e001e,
+ 0xc78, 0x531f001e,
+ 0xc78, 0x5320001e,
+ 0xc78, 0x5421001e,
+ 0xc78, 0x5522001e,
+ 0xc78, 0x5523001e,
+ 0xc78, 0x5624001e,
+ 0xc78, 0x5725001e,
+ 0xc78, 0x5726001e,
+ 0xc78, 0x5827001e,
+ 0xc78, 0x5828001e,
+ 0xc78, 0x5929001e,
+ 0xc78, 0x592a001e,
+ 0xc78, 0x5a2b001e,
+ 0xc78, 0x5b2c001e,
+ 0xc78, 0x5c2d001e,
+ 0xc78, 0x5c2e001e,
+ 0xc78, 0x5d2f001e,
+ 0xc78, 0x5e30001e,
+ 0xc78, 0x5f31001e,
+ 0xc78, 0x6032001e,
+ 0xc78, 0x6033001e,
+ 0xc78, 0x6134001e,
+ 0xc78, 0x6235001e,
+ 0xc78, 0x6336001e,
+ 0xc78, 0x6437001e,
+ 0xc78, 0x6438001e,
+ 0xc78, 0x6539001e,
+ 0xc78, 0x663a001e,
+ 0xc78, 0x673b001e,
+ 0xc78, 0x673c001e,
+ 0xc78, 0x683d001e,
+ 0xc78, 0x693e001e,
+ 0xc78, 0x6a3f001e,
+};
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.h b/drivers/staging/rtl8192e/r8192E_hwimg.h
new file mode 100644
index 00000000000..019836bb36c
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_hwimg.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef __INC_HAL8192PciE_FW_IMG_H
+#define __INC_HAL8192PciE_FW_IMG_H
+
+/*Created on 2008/11/18, 3: 7*/
+
+#include <linux/types.h>
+
+#define BootArrayLengthPciE 344
+extern u8 Rtl8192PciEFwBootArray[BootArrayLengthPciE];
+#define MainArrayLengthPciE 43012
+extern u8 Rtl8192PciEFwMainArray[MainArrayLengthPciE];
+#define DataArrayLengthPciE 848
+extern u8 Rtl8192PciEFwDataArray[DataArrayLengthPciE];
+#define PHY_REGArrayLengthPciE 1
+extern u32 Rtl8192PciEPHY_REGArray[PHY_REGArrayLengthPciE];
+#define PHY_REG_1T2RArrayLengthPciE 296
+extern u32 Rtl8192PciEPHY_REG_1T2RArray[PHY_REG_1T2RArrayLengthPciE];
+#define RadioA_ArrayLengthPciE 246
+extern u32 Rtl8192PciERadioA_Array[RadioA_ArrayLengthPciE] ;
+#define RadioB_ArrayLengthPciE 78
+extern u32 Rtl8192PciERadioB_Array[RadioB_ArrayLengthPciE] ;
+#define RadioC_ArrayLengthPciE 2
+extern u32 Rtl8192PciERadioC_Array[RadioC_ArrayLengthPciE] ;
+#define RadioD_ArrayLengthPciE 2
+extern u32 Rtl8192PciERadioD_Array[RadioD_ArrayLengthPciE] ;
+#define MACPHY_ArrayLengthPciE 18
+extern u32 Rtl8192PciEMACPHY_Array[MACPHY_ArrayLengthPciE] ;
+#define MACPHY_Array_PGLengthPciE 30
+extern u32 Rtl8192PciEMACPHY_Array_PG[MACPHY_Array_PGLengthPciE] ;
+#define AGCTAB_ArrayLengthPciE 384
+extern u32 Rtl8192PciEAGCTAB_Array[AGCTAB_ArrayLengthPciE] ;
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/r8192E_phy.c
new file mode 100644
index 00000000000..7fe69a3348d
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_phy.c
@@ -0,0 +1,1637 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#include "rtl_core.h"
+#include "r8192E_hw.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h"
+#include "r8192E_phy.h"
+#include "rtl_dm.h"
+#include "dot11d.h"
+
+#include "r8192E_hwimg.h"
+
+static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
+ 0,
+ 0x085c,
+ 0x08dc,
+ 0x095c,
+ 0x09dc,
+ 0x0a5c,
+ 0x0adc,
+ 0x0b5c,
+ 0x0bdc,
+ 0x0c5c,
+ 0x0cdc,
+ 0x0d5c,
+ 0x0ddc,
+ 0x0e5c,
+ 0x0f72,
+};
+
+/*************************Define local function prototype**********************/
+
+static u32 phy_FwRFSerialRead(struct net_device *dev,
+ enum rf90_radio_path eRFPath,
+ u32 Offset);
+static void phy_FwRFSerialWrite(struct net_device *dev,
+ enum rf90_radio_path eRFPath,
+ u32 Offset, u32 Data);
+
+static u32 rtl8192_CalculateBitShift(u32 dwBitMask)
+{
+ u32 i;
+ for (i = 0; i <= 31; i++) {
+ if (((dwBitMask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+
+u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
+{
+ u8 ret = 1;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if (priv->rf_type == RF_2T4R)
+ ret = 0;
+ else if (priv->rf_type == RF_1T2R) {
+ if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B)
+ ret = 1;
+ else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D)
+ ret = 0;
+ }
+ return ret;
+}
+
+void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
+ u32 dwData)
+{
+
+ u32 OriginalValue, BitShift, NewValue;
+
+ if (dwBitMask != bMaskDWord) {
+ OriginalValue = read_nic_dword(dev, dwRegAddr);
+ BitShift = rtl8192_CalculateBitShift(dwBitMask);
+ NewValue = (((OriginalValue) & (~dwBitMask)) |
+ (dwData << BitShift));
+ write_nic_dword(dev, dwRegAddr, NewValue);
+ } else
+ write_nic_dword(dev, dwRegAddr, dwData);
+ return;
+}
+
+u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
+{
+ u32 Ret = 0, OriginalValue, BitShift;
+
+ OriginalValue = read_nic_dword(dev, dwRegAddr);
+ BitShift = rtl8192_CalculateBitShift(dwBitMask);
+ Ret = (OriginalValue & dwBitMask) >> BitShift;
+
+ return Ret;
+}
+static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
+ enum rf90_radio_path eRFPath, u32 Offset)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 ret = 0;
+ u32 NewOffset = 0;
+ struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath];
+ Offset &= 0x3f;
+
+ if (priv->rf_chip == RF_8256) {
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
+ if (Offset >= 31) {
+ priv->RfReg0Value[eRFPath] |= 0x140;
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath]<<16));
+ NewOffset = Offset - 30;
+ } else if (Offset >= 16) {
+ priv->RfReg0Value[eRFPath] |= 0x100;
+ priv->RfReg0Value[eRFPath] &= (~0x40);
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath]<<16));
+
+ NewOffset = Offset - 15;
+ } else
+ NewOffset = Offset;
+ } else {
+ RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need"
+ " to be 8256\n");
+ NewOffset = Offset;
+ }
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
+ NewOffset);
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
+
+ mdelay(1);
+
+ ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack,
+ bLSSIReadBackData);
+
+ if (priv->rf_chip == RF_8256) {
+ priv->RfReg0Value[eRFPath] &= 0xebf;
+
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
+
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
+ }
+
+
+ return ret;
+
+}
+
+static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
+ enum rf90_radio_path eRFPath, u32 Offset,
+ u32 Data)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 DataAndAddr = 0, NewOffset = 0;
+ struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath];
+
+ Offset &= 0x3f;
+ if (priv->rf_chip == RF_8256) {
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
+
+ if (Offset >= 31) {
+ priv->RfReg0Value[eRFPath] |= 0x140;
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
+ NewOffset = Offset - 30;
+ } else if (Offset >= 16) {
+ priv->RfReg0Value[eRFPath] |= 0x100;
+ priv->RfReg0Value[eRFPath] &= (~0x40);
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
+ NewOffset = Offset - 15;
+ } else
+ NewOffset = Offset;
+ } else {
+ RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be"
+ " 8256\n");
+ NewOffset = Offset;
+ }
+
+ DataAndAddr = (Data<<16) | (NewOffset&0x3f);
+
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+
+ if (Offset == 0x0)
+ priv->RfReg0Value[eRFPath] = Data;
+
+ if (priv->rf_chip == RF_8256) {
+ if (Offset != 0) {
+ priv->RfReg0Value[eRFPath] &= 0xebf;
+ rtl8192_setBBreg(
+ dev,
+ pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
+ }
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
+ }
+ return;
+}
+
+void rtl8192_phy_SetRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 Original_Value, BitShift, New_Value;
+
+ if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ return;
+ if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
+ return;
+
+ RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n");
+ if (priv->Rf_Mode == RF_OP_By_FW) {
+ if (BitMask != bMask12Bits) {
+ Original_Value = phy_FwRFSerialRead(dev, eRFPath,
+ RegAddr);
+ BitShift = rtl8192_CalculateBitShift(BitMask);
+ New_Value = (((Original_Value) & (~BitMask)) |
+ (Data << BitShift));
+
+ phy_FwRFSerialWrite(dev, eRFPath, RegAddr, New_Value);
+ } else
+ phy_FwRFSerialWrite(dev, eRFPath, RegAddr, Data);
+ udelay(200);
+
+ } else {
+ if (BitMask != bMask12Bits) {
+ Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath,
+ RegAddr);
+ BitShift = rtl8192_CalculateBitShift(BitMask);
+ New_Value = (((Original_Value) & (~BitMask)) |
+ (Data << BitShift));
+
+ rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr,
+ New_Value);
+ } else
+ rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, Data);
+ }
+ return;
+}
+
+u32 rtl8192_phy_QueryRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask)
+{
+ u32 Original_Value, Readback_Value, BitShift;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ return 0;
+ if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
+ return 0;
+ down(&priv->rf_sem);
+ if (priv->Rf_Mode == RF_OP_By_FW) {
+ Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr);
+ udelay(200);
+ } else {
+ Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath,
+ RegAddr);
+ }
+ BitShift = rtl8192_CalculateBitShift(BitMask);
+ Readback_Value = (Original_Value & BitMask) >> BitShift;
+ up(&priv->rf_sem);
+ return Readback_Value;
+}
+
+static u32 phy_FwRFSerialRead(struct net_device *dev,
+ enum rf90_radio_path eRFPath, u32 Offset)
+{
+ u32 retValue = 0;
+ u32 Data = 0;
+ u8 time = 0;
+ Data |= ((Offset & 0xFF) << 12);
+ Data |= ((eRFPath & 0x3) << 20);
+ Data |= 0x80000000;
+ while (read_nic_dword(dev, QPNR)&0x80000000) {
+ if (time++ < 100)
+ udelay(10);
+ else
+ break;
+ }
+ write_nic_dword(dev, QPNR, Data);
+ while (read_nic_dword(dev, QPNR) & 0x80000000) {
+ if (time++ < 100)
+ udelay(10);
+ else
+ return 0;
+ }
+ retValue = read_nic_dword(dev, RF_DATA);
+
+ return retValue;
+
+} /* phy_FwRFSerialRead */
+
+static void phy_FwRFSerialWrite(struct net_device *dev,
+ enum rf90_radio_path eRFPath,
+ u32 Offset, u32 Data)
+{
+ u8 time = 0;
+
+ Data |= ((Offset & 0xFF) << 12);
+ Data |= ((eRFPath & 0x3) << 20);
+ Data |= 0x400000;
+ Data |= 0x80000000;
+
+ while (read_nic_dword(dev, QPNR) & 0x80000000) {
+ if (time++ < 100)
+ udelay(10);
+ else
+ break;
+ }
+ write_nic_dword(dev, QPNR, Data);
+
+} /* phy_FwRFSerialWrite */
+
+
+void rtl8192_phy_configmac(struct net_device *dev)
+{
+ u32 dwArrayLen = 0, i = 0;
+ u32 *pdwArray = NULL;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bTXPowerDataReadFromEEPORM) {
+ RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
+ dwArrayLen = MACPHY_Array_PGLength;
+ pdwArray = Rtl819XMACPHY_Array_PG;
+
+ } else {
+ RT_TRACE(COMP_PHY, "Read rtl819XMACPHY_Array\n");
+ dwArrayLen = MACPHY_ArrayLength;
+ pdwArray = Rtl819XMACPHY_Array;
+ }
+ for (i = 0; i < dwArrayLen; i += 3) {
+ RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MAC"
+ "PHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n",
+ pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
+ if (pdwArray[i] == 0x318)
+ pdwArray[i+2] = 0x00000800;
+ rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1],
+ pdwArray[i+2]);
+ }
+ return;
+
+}
+
+void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
+{
+ int i;
+ u32 *Rtl819XPHY_REGArray_Table = NULL;
+ u32 *Rtl819XAGCTAB_Array_Table = NULL;
+ u16 AGCTAB_ArrayLen, PHY_REGArrayLen = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ AGCTAB_ArrayLen = AGCTAB_ArrayLength;
+ Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_Array;
+ if (priv->rf_type == RF_2T4R) {
+ PHY_REGArrayLen = PHY_REGArrayLength;
+ Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArray;
+ } else if (priv->rf_type == RF_1T2R) {
+ PHY_REGArrayLen = PHY_REG_1T2RArrayLength;
+ Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArray;
+ }
+
+ if (ConfigType == BaseBand_Config_PHY_REG) {
+ for (i = 0; i < PHY_REGArrayLen; i += 2) {
+ rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i],
+ bMaskDWord,
+ Rtl819XPHY_REGArray_Table[i+1]);
+ RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray"
+ "[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n",
+ i, Rtl819XPHY_REGArray_Table[i],
+ Rtl819XPHY_REGArray_Table[i+1]);
+ }
+ } else if (ConfigType == BaseBand_Config_AGC_TAB) {
+ for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
+ rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i],
+ bMaskDWord,
+ Rtl819XAGCTAB_Array_Table[i+1]);
+ RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] "
+ "is %x rtl819XAGCTAB_Array[1] is %x\n", i,
+ Rtl819XAGCTAB_Array_Table[i],
+ Rtl819XAGCTAB_Array_Table[i+1]);
+ }
+ }
+ return;
+}
+
+static void rtl8192_InitBBRFRegDef(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;
+ priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;
+
+ priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;
+ priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;
+
+ priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
+ priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
+ priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;
+ priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;
+
+ priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
+ priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
+ priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;
+ priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;
+
+ priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
+ priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
+ priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter;
+ priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter;
+
+ priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+ priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+
+ priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage;
+
+ priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
+ priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
+ priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1;
+ priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1;
+
+ priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
+ priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
+ priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2;
+ priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2;
+
+ priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl;
+ priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
+ priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+ priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+
+ priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
+ priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
+ priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
+ priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
+
+ priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
+ priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
+ priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
+ priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
+
+ priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
+ priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
+ priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
+ priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
+
+ priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
+ priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
+ priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
+ priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
+
+ priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
+ priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
+ priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
+ priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
+
+ priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
+ priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
+ priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
+ priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
+
+ priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
+ priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
+ priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
+ priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
+
+}
+
+bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
+ enum hw90_block CheckBlock,
+ enum rf90_radio_path eRFPath)
+{
+ bool ret = true;
+ u32 i, CheckTimes = 4, dwRegRead = 0;
+ u32 WriteAddr[4];
+ u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f};
+
+ WriteAddr[HW90_BLOCK_MAC] = 0x100;
+ WriteAddr[HW90_BLOCK_PHY0] = 0x900;
+ WriteAddr[HW90_BLOCK_PHY1] = 0x800;
+ WriteAddr[HW90_BLOCK_RF] = 0x3;
+ RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __func__,
+ CheckBlock);
+ for (i = 0; i < CheckTimes; i++) {
+ switch (CheckBlock) {
+ case HW90_BLOCK_MAC:
+ RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write "
+ "0x100 here!");
+ break;
+
+ case HW90_BLOCK_PHY0:
+ case HW90_BLOCK_PHY1:
+ write_nic_dword(dev, WriteAddr[CheckBlock],
+ WriteData[i]);
+ dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]);
+ break;
+
+ case HW90_BLOCK_RF:
+ WriteData[i] &= 0xfff;
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ WriteAddr[HW90_BLOCK_RF],
+ bMask12Bits, WriteData[i]);
+ mdelay(10);
+ dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath,
+ WriteAddr[HW90_BLOCK_RF],
+ bMaskDWord);
+ mdelay(10);
+ break;
+
+ default:
+ ret = false;
+ break;
+ }
+
+
+ if (dwRegRead != WriteData[i]) {
+ RT_TRACE(COMP_ERR, "====>error=====dwRegRead: %x, "
+ "WriteData: %x\n", dwRegRead, WriteData[i]);
+ ret = false;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static bool rtl8192_BB_Config_ParaFile(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool rtStatus = true;
+ u8 bRegValue = 0, eCheckItem = 0;
+ u32 dwRegValue = 0;
+
+ bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET);
+ write_nic_byte(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT));
+
+ dwRegValue = read_nic_dword(dev, CPU_GEN);
+ write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
+
+ for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0;
+ eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
+ rtStatus = rtl8192_phy_checkBBAndRF(dev,
+ (enum hw90_block)eCheckItem,
+ (enum rf90_radio_path)0);
+ if (rtStatus != true) {
+ RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():"
+ "Check PHY%d Fail!!\n", eCheckItem-1);
+ return rtStatus;
+ }
+ }
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
+ rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG);
+
+ dwRegValue = read_nic_dword(dev, CPU_GEN);
+ write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
+
+ rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB);
+
+ if (priv->IC_Cut > VERSION_8190_BD) {
+ if (priv->rf_type == RF_2T4R)
+ dwRegValue = (priv->AntennaTxPwDiff[2]<<8 |
+ priv->AntennaTxPwDiff[1]<<4 |
+ priv->AntennaTxPwDiff[0]);
+ else
+ dwRegValue = 0x0;
+ rtl8192_setBBreg(dev, rFPGA0_TxGainStage,
+ (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
+
+
+ dwRegValue = priv->CrystalCap;
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
+ dwRegValue);
+ }
+
+ return rtStatus;
+}
+bool rtl8192_BBConfig(struct net_device *dev)
+{
+ bool rtStatus = true;
+
+ rtl8192_InitBBRFRegDef(dev);
+ rtStatus = rtl8192_BB_Config_ParaFile(dev);
+ return rtStatus;
+}
+
+void rtl8192_phy_getTxPower(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->MCSTxPowerLevelOriginalOffset[0] =
+ read_nic_dword(dev, rTxAGC_Rate18_06);
+ priv->MCSTxPowerLevelOriginalOffset[1] =
+ read_nic_dword(dev, rTxAGC_Rate54_24);
+ priv->MCSTxPowerLevelOriginalOffset[2] =
+ read_nic_dword(dev, rTxAGC_Mcs03_Mcs00);
+ priv->MCSTxPowerLevelOriginalOffset[3] =
+ read_nic_dword(dev, rTxAGC_Mcs07_Mcs04);
+ priv->MCSTxPowerLevelOriginalOffset[4] =
+ read_nic_dword(dev, rTxAGC_Mcs11_Mcs08);
+ priv->MCSTxPowerLevelOriginalOffset[5] =
+ read_nic_dword(dev, rTxAGC_Mcs15_Mcs12);
+
+ priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1);
+ priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1);
+ priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1);
+ priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1);
+ RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, "
+ "c60=0x%x, c68=0x%x)\n",
+ priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
+ priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
+
+ priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3);
+ priv->framesyncC34 = read_nic_dword(dev, rOFDM0_RxDetector2);
+ RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
+ rOFDM0_RxDetector3, priv->framesync);
+ priv->SifsTime = read_nic_word(dev, SIFS);
+ return;
+}
+
+void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 powerlevel = 0, powerlevelOFDM24G = 0;
+ char ant_pwr_diff;
+ u32 u4RegValue;
+
+ if (priv->epromtype == EEPROM_93C46) {
+ powerlevel = priv->TxPowerLevelCCK[channel-1];
+ powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
+ } else if (priv->epromtype == EEPROM_93C56) {
+ if (priv->rf_type == RF_1T2R) {
+ powerlevel = priv->TxPowerLevelCCK_C[channel-1];
+ powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_C[channel-1];
+ } else if (priv->rf_type == RF_2T4R) {
+ powerlevel = priv->TxPowerLevelCCK_A[channel-1];
+ powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_A[channel-1];
+
+ ant_pwr_diff = priv->TxPowerLevelOFDM24G_C[channel-1]
+ - priv->TxPowerLevelOFDM24G_A[channel-1];
+
+ priv->RF_C_TxPwDiff = ant_pwr_diff;
+
+ ant_pwr_diff &= 0xf;
+
+ priv->AntennaTxPwDiff[2] = 0;
+ priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff);
+ priv->AntennaTxPwDiff[0] = 0;
+
+ u4RegValue = (priv->AntennaTxPwDiff[2]<<8 |
+ priv->AntennaTxPwDiff[1]<<4 |
+ priv->AntennaTxPwDiff[0]);
+
+ rtl8192_setBBreg(dev, rFPGA0_TxGainStage,
+ (bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue);
+ }
+ }
+ switch (priv->rf_chip) {
+ case RF_8225:
+ break;
+ case RF_8256:
+ PHY_SetRF8256CCKTxPower(dev, powerlevel);
+ PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
+ break;
+ case RF_8258:
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "unknown rf chip in funtion %s()\n",
+ __func__);
+ break;
+ }
+ return;
+}
+
+bool rtl8192_phy_RFConfig(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool rtStatus = true;
+ switch (priv->rf_chip) {
+ case RF_8225:
+ break;
+ case RF_8256:
+ rtStatus = PHY_RF8256_Config(dev);
+ break;
+
+ case RF_8258:
+ break;
+ case RF_PSEUDO_11N:
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "error chip id\n");
+ break;
+ }
+ return rtStatus;
+}
+
+void rtl8192_phy_updateInitGain(struct net_device *dev)
+{
+ return;
+}
+
+u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
+ enum rf90_radio_path eRFPath)
+{
+
+ int i;
+ u8 ret = 0;
+
+ switch (eRFPath) {
+ case RF90_PATH_A:
+ for (i = 0; i < RadioA_ArrayLength; i += 2) {
+ if (Rtl819XRadioA_Array[i] == 0xfe) {
+ msleep(100);
+ continue;
+ }
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ Rtl819XRadioA_Array[i],
+ bMask12Bits,
+ Rtl819XRadioA_Array[i+1]);
+
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < RadioB_ArrayLength; i += 2) {
+ if (Rtl819XRadioB_Array[i] == 0xfe) {
+ msleep(100);
+ continue;
+ }
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ Rtl819XRadioB_Array[i],
+ bMask12Bits,
+ Rtl819XRadioB_Array[i+1]);
+
+ }
+ break;
+ case RF90_PATH_C:
+ for (i = 0; i < RadioC_ArrayLength; i += 2) {
+ if (Rtl819XRadioC_Array[i] == 0xfe) {
+ msleep(100);
+ continue;
+ }
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ Rtl819XRadioC_Array[i],
+ bMask12Bits,
+ Rtl819XRadioC_Array[i+1]);
+
+ }
+ break;
+ case RF90_PATH_D:
+ for (i = 0; i < RadioD_ArrayLength; i += 2) {
+ if (Rtl819XRadioD_Array[i] == 0xfe) {
+ msleep(100);
+ continue;
+ }
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ Rtl819XRadioD_Array[i], bMask12Bits,
+ Rtl819XRadioD_Array[i+1]);
+
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+
+}
+static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
+ u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
+
+ switch (priv->rf_chip) {
+ case RF_8225:
+ break;
+
+ case RF_8256:
+ PHY_SetRF8256CCKTxPower(dev, powerlevel);
+ PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
+ break;
+
+ case RF_8258:
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "unknown rf chip ID in rtl8192_SetTxPower"
+ "Level()\n");
+ break;
+ }
+ return;
+}
+
+static u8 rtl8192_phy_SetSwChnlCmdArray(struct sw_chnl_cmd *CmdTable,
+ u32 CmdTableIdx, u32 CmdTableSz,
+ enum sw_chnl_cmd_id CmdID,
+ u32 Para1, u32 Para2, u32 msDelay)
+{
+ struct sw_chnl_cmd *pCmd;
+
+ if (CmdTable == NULL) {
+ RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot "
+ "be NULL.\n");
+ return false;
+ }
+ if (CmdTableIdx >= CmdTableSz) {
+ RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid"
+ " index, please check size of the table, CmdTableIdx:"
+ "%d, CmdTableSz:%d\n",
+ CmdTableIdx, CmdTableSz);
+ return false;
+ }
+
+ pCmd = CmdTable + CmdTableIdx;
+ pCmd->CmdID = CmdID;
+ pCmd->Para1 = Para1;
+ pCmd->Para2 = Para2;
+ pCmd->msDelay = msDelay;
+
+ return true;
+}
+
+static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
+ u8 *stage, u8 *step, u32 *delay)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ u32 PreCommonCmdCnt;
+ u32 PostCommonCmdCnt;
+ u32 RfDependCmdCnt;
+ struct sw_chnl_cmd *CurrentCmd = NULL;
+ u8 eRFPath;
+
+ RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n",
+ __func__, *stage, *step, channel);
+
+ if (!IsLegalChannel(priv->rtllib, channel)) {
+ RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n",
+ channel);
+ return true;
+ }
+
+ {
+ PreCommonCmdCnt = 0;
+ rtl8192_phy_SetSwChnlCmdArray(ieee->PreCommonCmd,
+ PreCommonCmdCnt++,
+ MAX_PRECMD_CNT, CmdID_SetTxPowerLevel,
+ 0, 0, 0);
+ rtl8192_phy_SetSwChnlCmdArray(ieee->PreCommonCmd,
+ PreCommonCmdCnt++,
+ MAX_PRECMD_CNT, CmdID_End, 0, 0, 0);
+
+ PostCommonCmdCnt = 0;
+
+ rtl8192_phy_SetSwChnlCmdArray(ieee->PostCommonCmd,
+ PostCommonCmdCnt++,
+ MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0);
+
+ RfDependCmdCnt = 0;
+ switch (priv->rf_chip) {
+ case RF_8225:
+ if (!(channel >= 1 && channel <= 14)) {
+ RT_TRACE(COMP_ERR, "illegal channel for Zebra "
+ "8225: %d\n", channel);
+ return false;
+ }
+ rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd,
+ RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
+ CmdID_RF_WriteReg, rZebra1_Channel,
+ RF_CHANNEL_TABLE_ZEBRA[channel], 10);
+ rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd,
+ RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
+ CmdID_End, 0, 0, 0);
+ break;
+
+ case RF_8256:
+ if (!(channel >= 1 && channel <= 14)) {
+ RT_TRACE(COMP_ERR, "illegal channel for Zebra"
+ " 8256: %d\n", channel);
+ return false;
+ }
+ rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd,
+ RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
+ CmdID_RF_WriteReg, rZebra1_Channel, channel,
+ 10);
+ rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd,
+
+ RfDependCmdCnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CmdID_End, 0, 0, 0);
+ break;
+
+ case RF_8258:
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n",
+ priv->rf_chip);
+ return false;
+ break;
+ }
+
+
+ do {
+ switch (*stage) {
+ case 0:
+ CurrentCmd = &ieee->PreCommonCmd[*step];
+ break;
+ case 1:
+ CurrentCmd = &ieee->RfDependCmd[*step];
+ break;
+ case 2:
+ CurrentCmd = &ieee->PostCommonCmd[*step];
+ break;
+ }
+
+ if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) {
+ if ((*stage) == 2) {
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
+ }
+ }
+
+ if (!CurrentCmd)
+ continue;
+ switch (CurrentCmd->CmdID) {
+ case CmdID_SetTxPowerLevel:
+ if (priv->IC_Cut > (u8)VERSION_8190_BD)
+ rtl8192_SetTxPowerLevel(dev, channel);
+ break;
+ case CmdID_WritePortUlong:
+ write_nic_dword(dev, CurrentCmd->Para1,
+ CurrentCmd->Para2);
+ break;
+ case CmdID_WritePortUshort:
+ write_nic_word(dev, CurrentCmd->Para1,
+ (u16)CurrentCmd->Para2);
+ break;
+ case CmdID_WritePortUchar:
+ write_nic_byte(dev, CurrentCmd->Para1,
+ (u8)CurrentCmd->Para2);
+ break;
+ case CmdID_RF_WriteReg:
+ for (eRFPath = 0; eRFPath <
+ priv->NumTotalRFPath; eRFPath++)
+ rtl8192_phy_SetRFReg(dev,
+ (enum rf90_radio_path)eRFPath,
+ CurrentCmd->Para1, bMask12Bits,
+ CurrentCmd->Para2<<7);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ } while (true);
+ } /*for (Number of RF paths)*/
+
+ (*delay) = CurrentCmd->msDelay;
+ (*step)++;
+ return false;
+}
+
+static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 delay = 0;
+
+ while (!rtl8192_phy_SwChnlStepByStep(dev, channel, &priv->SwChnlStage,
+ &priv->SwChnlStep, &delay)) {
+ if (delay > 0)
+ msleep(delay);
+ if (IS_NIC_DOWN(priv))
+ break;
+ }
+}
+void rtl8192_SwChnl_WorkItem(struct net_device *dev)
+{
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n");
+
+ RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __func__,
+ priv->chan, priv);
+
+ rtl8192_phy_FinishSwChnlNow(dev , priv->chan);
+
+ RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n");
+}
+
+u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ RT_TRACE(COMP_PHY, "=====>%s()\n", __func__);
+ if (IS_NIC_DOWN(priv)) {
+ RT_TRACE(COMP_ERR, "%s(): ERR !! driver is not up\n", __func__);
+ return false;
+ }
+ if (priv->SwChnlInProgress)
+ return false;
+
+
+ switch (priv->rtllib->mode) {
+ case WIRELESS_MODE_A:
+ case WIRELESS_MODE_N_5G:
+ if (channel <= 14) {
+ RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14");
+ return false;
+ }
+ break;
+ case WIRELESS_MODE_B:
+ if (channel > 14) {
+ RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14");
+ return false;
+ }
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ if (channel > 14) {
+ RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14");
+ return false;
+ }
+ break;
+ }
+
+ priv->SwChnlInProgress = true;
+ if (channel == 0)
+ channel = 1;
+
+ priv->chan = channel;
+
+ priv->SwChnlStage = 0;
+ priv->SwChnlStep = 0;
+
+ if (!IS_NIC_DOWN(priv))
+ rtl8192_SwChnl_WorkItem(dev);
+ priv->SwChnlInProgress = false;
+ return true;
+}
+
+static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ priv->CCKPresentAttentuation =
+ priv->CCKPresentAttentuation_20Mdefault +
+ priv->CCKPresentAttentuation_difference;
+
+ if (priv->CCKPresentAttentuation >
+ (CCKTxBBGainTableLength-1))
+ priv->CCKPresentAttentuation =
+ CCKTxBBGainTableLength-1;
+ if (priv->CCKPresentAttentuation < 0)
+ priv->CCKPresentAttentuation = 0;
+
+ RT_TRACE(COMP_POWER_TRACKING, "20M, priv->CCKPresent"
+ "Attentuation = %d\n",
+ priv->CCKPresentAttentuation);
+
+ if (priv->rtllib->current_network.channel == 14 &&
+ !priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = true;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else if (priv->rtllib->current_network.channel !=
+ 14 && priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = false;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else {
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ }
+ break;
+
+ case HT_CHANNEL_WIDTH_20_40:
+ priv->CCKPresentAttentuation =
+ priv->CCKPresentAttentuation_40Mdefault +
+ priv->CCKPresentAttentuation_difference;
+
+ RT_TRACE(COMP_POWER_TRACKING, "40M, priv->CCKPresent"
+ "Attentuation = %d\n",
+ priv->CCKPresentAttentuation);
+ if (priv->CCKPresentAttentuation >
+ (CCKTxBBGainTableLength - 1))
+ priv->CCKPresentAttentuation =
+ CCKTxBBGainTableLength-1;
+ if (priv->CCKPresentAttentuation < 0)
+ priv->CCKPresentAttentuation = 0;
+
+ if (priv->rtllib->current_network.channel == 14 &&
+ !priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = true;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else if (priv->rtllib->current_network.channel != 14
+ && priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = false;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else {
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ }
+ break;
+ }
+}
+
+static void CCK_Tx_Power_Track_BW_Switch_ThermalMeter(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->rtllib->current_network.channel == 14 &&
+ !priv->bcck_in_ch14)
+ priv->bcck_in_ch14 = true;
+ else if (priv->rtllib->current_network.channel != 14 &&
+ priv->bcck_in_ch14)
+ priv->bcck_in_ch14 = false;
+
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ if (priv->Record_CCK_20Mindex == 0)
+ priv->Record_CCK_20Mindex = 6;
+ priv->CCK_index = priv->Record_CCK_20Mindex;
+ RT_TRACE(COMP_POWER_TRACKING, "20MHz, CCK_Tx_Power_Track_BW_"
+ "Switch_ThermalMeter(),CCK_index = %d\n",
+ priv->CCK_index);
+ break;
+
+ case HT_CHANNEL_WIDTH_20_40:
+ priv->CCK_index = priv->Record_CCK_40Mindex;
+ RT_TRACE(COMP_POWER_TRACKING, "40MHz, CCK_Tx_Power_Track_BW_"
+ "Switch_ThermalMeter(), CCK_index = %d\n",
+ priv->CCK_index);
+ break;
+ }
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+}
+
+static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->IC_Cut >= IC_VersionCut_D)
+ CCK_Tx_Power_Track_BW_Switch_TSSI(dev);
+ else
+ CCK_Tx_Power_Track_BW_Switch_ThermalMeter(dev);
+}
+
+void rtl8192_SetBWModeWorkItem(struct net_device *dev)
+{
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 regBwOpMode;
+
+ RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s "
+ "bandwidth\n", priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz")
+
+
+ if (priv->rf_chip == RF_PSEUDO_11N) {
+ priv->SetBWModeInProgress = false;
+ return;
+ }
+ if (IS_NIC_DOWN(priv)) {
+ RT_TRACE(COMP_ERR, "%s(): ERR!! driver is not up\n", __func__);
+ return;
+ }
+ regBwOpMode = read_nic_byte(dev, BW_OPMODE);
+
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ regBwOpMode |= BW_OPMODE_20MHZ;
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ break;
+
+ case HT_CHANNEL_WIDTH_20_40:
+ regBwOpMode &= ~BW_OPMODE_20MHZ;
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown "
+ "Bandwidth: %#X\n", priv->CurrentChannelBW);
+ break;
+ }
+
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
+ rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
+
+ if (!priv->btxpower_tracking) {
+ write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000);
+ write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317);
+ write_nic_dword(dev, rCCK0_DebugPort, 0x00000204);
+ } else {
+ CCK_Tx_Power_Track_BW_Switch(dev);
+ }
+
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
+
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
+ rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
+
+ if (!priv->btxpower_tracking) {
+ write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000);
+ write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e);
+ write_nic_dword(dev, rCCK0_DebugPort, 0x00000409);
+ } else {
+ CCK_Tx_Power_Track_BW_Switch(dev);
+ }
+
+ rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
+ (priv->nCur40MhzPrimeSC>>1));
+ rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
+ priv->nCur40MhzPrimeSC);
+
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown "
+ "Bandwidth: %#X\n", priv->CurrentChannelBW);
+ break;
+
+ }
+
+ switch (priv->rf_chip) {
+ case RF_8225:
+ break;
+
+ case RF_8256:
+ PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW);
+ break;
+
+ case RF_8258:
+ break;
+
+ case RF_PSEUDO_11N:
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
+ break;
+ }
+
+ atomic_dec(&(priv->rtllib->atm_swbw));
+ priv->SetBWModeInProgress = false;
+
+ RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()");
+}
+
+void rtl8192_SetBWMode(struct net_device *dev, enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+
+ if (priv->SetBWModeInProgress)
+ return;
+
+ atomic_inc(&(priv->rtllib->atm_swbw));
+ priv->SetBWModeInProgress = true;
+
+ priv->CurrentChannelBW = Bandwidth;
+
+ if (Offset == HT_EXTCHNL_OFFSET_LOWER)
+ priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
+ else if (Offset == HT_EXTCHNL_OFFSET_UPPER)
+ priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER;
+ else
+ priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ rtl8192_SetBWModeWorkItem(dev);
+
+}
+
+void InitialGain819xPci(struct net_device *dev, u8 Operation)
+{
+#define SCAN_RX_INITIAL_GAIN 0x17
+#define POWER_DETECTION_TH 0x08
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 BitMask;
+ u8 initial_gain;
+
+ if (!IS_NIC_DOWN(priv)) {
+ switch (Operation) {
+ case IG_Backup:
+ RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial"
+ " gain.\n");
+ initial_gain = SCAN_RX_INITIAL_GAIN;
+ BitMask = bMaskByte0;
+ if (dm_digtable.dig_algorithm ==
+ DIG_ALGO_BY_FALSE_ALARM)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ priv->initgain_backup.xaagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1,
+ BitMask);
+ priv->initgain_backup.xbagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1,
+ BitMask);
+ priv->initgain_backup.xcagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1,
+ BitMask);
+ priv->initgain_backup.xdagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1,
+ BitMask);
+ BitMask = bMaskByte2;
+ priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev,
+ rCCK0_CCA, BitMask);
+
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is"
+ " %x\n", priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is"
+ " %x\n", priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is"
+ " %x\n", priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is"
+ " %x\n", priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is"
+ " %x\n", priv->initgain_backup.cca);
+
+ RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n",
+ initial_gain);
+ write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+ RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n",
+ POWER_DETECTION_TH);
+ write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH);
+ break;
+ case IG_Restore:
+ RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial "
+ "gain.\n");
+ BitMask = 0x7f;
+ if (dm_digtable.dig_algorithm ==
+ DIG_ALGO_BY_FALSE_ALARM)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+
+ rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask,
+ (u32)priv->initgain_backup.xaagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask,
+ (u32)priv->initgain_backup.xbagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask,
+ (u32)priv->initgain_backup.xcagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask,
+ (u32)priv->initgain_backup.xdagccore1);
+ BitMask = bMaskByte2;
+ rtl8192_setBBreg(dev, rCCK0_CCA, BitMask,
+ (u32)priv->initgain_backup.cca);
+
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50"
+ " is %x\n", priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58"
+ " is %x\n", priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60"
+ " is %x\n", priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68"
+ " is %x\n", priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a"
+ " is %x\n", priv->initgain_backup.cca);
+
+ rtl8192_phy_setTxPower(dev,
+ priv->rtllib->current_network.channel);
+
+ if (dm_digtable.dig_algorithm ==
+ DIG_ALGO_BY_FALSE_ALARM)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ break;
+ default:
+ RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
+ break;
+ }
+ }
+}
+
+void PHY_SetRtl8192eRfOff(struct net_device *dev)
+{
+
+ rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
+ write_nic_byte(dev, ANAPAR_FOR_8192PciE, 0x07);
+
+}
+
+static bool SetRFPowerState8190(struct net_device *dev,
+ enum rt_rf_power_state eRFPowerState)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(priv->rtllib->PowerSaveControl));
+ bool bResult = true;
+ u8 i = 0, QueueID = 0;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ if (priv->SetRFPowerStateInProgress == true)
+ return false;
+ RT_TRACE(COMP_PS, "===========> SetRFPowerState8190()!\n");
+ priv->SetRFPowerStateInProgress = true;
+
+ switch (priv->rf_chip) {
+ case RF_8256:
+ switch (eRFPowerState) {
+ case eRfOn:
+ RT_TRACE(COMP_PS, "SetRFPowerState8190() eRfOn!\n");
+ if ((priv->rtllib->eRFPowerState == eRfOff) &&
+ RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus = true;
+ u32 InitilizeCount = 3;
+ do {
+ InitilizeCount--;
+ priv->RegRfOff = false;
+ rtstatus = NicIFEnableNIC(dev);
+ } while ((rtstatus != true) &&
+ (InitilizeCount > 0));
+
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, "%s():Initialize Ada"
+ "pter fail,return\n",
+ __func__);
+ priv->SetRFPowerStateInProgress = false;
+ return false;
+ }
+
+ RT_CLEAR_PS_LEVEL(pPSC,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ write_nic_byte(dev, ANAPAR, 0x37);
+ mdelay(1);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
+ 0x4, 0x1);
+ priv->bHwRfOffAction = 0;
+
+ rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE,
+ BIT4, 0x1);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4,
+ 0x300, 0x3);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
+ 0x18, 0x3);
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3,
+ 0x3);
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3,
+ 0x3);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
+ 0x60, 0x3);
+
+ }
+
+ break;
+
+ case eRfSleep:
+ if (priv->rtllib->eRFPowerState == eRfOff)
+ break;
+
+
+ for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
+ ring = &priv->tx_ring[QueueID];
+
+ if (skb_queue_len(&ring->queue) == 0) {
+ QueueID++;
+ continue;
+ } else {
+ RT_TRACE((COMP_POWER|COMP_RF), "eRf Off"
+ "/Sleep: %d times TcbBusyQueue"
+ "[%d] !=0 before doze!\n",
+ (i+1), QueueID);
+ udelay(10);
+ i++;
+ }
+
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! "
+ "SetRFPowerState8190(): eRfOff"
+ ": %d times TcbBusyQueue[%d] "
+ "!= 0 !!!\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ QueueID);
+ break;
+ }
+ }
+ PHY_SetRtl8192eRfOff(dev);
+ break;
+
+ case eRfOff:
+ RT_TRACE(COMP_PS, "SetRFPowerState8190() eRfOff/"
+ "Sleep !\n");
+
+ for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
+ ring = &priv->tx_ring[QueueID];
+
+ if (skb_queue_len(&ring->queue) == 0) {
+ QueueID++;
+ continue;
+ } else {
+ RT_TRACE(COMP_POWER, "eRf Off/Sleep: %d"
+ " times TcbBusyQueue[%d] !=0 b"
+ "efore doze!\n", (i+1),
+ QueueID);
+ udelay(10);
+ i++;
+ }
+
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(COMP_POWER, "\n\n\n SetZebra: "
+ "RFPowerState8185B(): eRfOff:"
+ " %d times TcbBusyQueue[%d] "
+ "!= 0 !!!\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ QueueID);
+ break;
+ }
+ }
+
+ if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC &&
+ !RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
+ NicIFDisableNIC(dev);
+ RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
+ } else if (!(pPSC->RegRfPsLevel &
+ RT_RF_OFF_LEVL_HALT_NIC)) {
+ PHY_SetRtl8192eRfOff(dev);
+ }
+
+ break;
+
+ default:
+ bResult = false;
+ RT_TRACE(COMP_ERR, "SetRFPowerState8190(): unknow state"
+ " to set: 0x%X!!!\n", eRFPowerState);
+ break;
+ }
+
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "SetRFPowerState8190(): Unknown RF type\n");
+ break;
+ }
+
+ if (bResult) {
+ priv->rtllib->eRFPowerState = eRFPowerState;
+
+ switch (priv->rf_chip) {
+ case RF_8256:
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "SetRFPowerState8190(): Unknown "
+ "RF type\n");
+ break;
+ }
+ }
+
+ priv->SetRFPowerStateInProgress = false;
+ RT_TRACE(COMP_PS, "<=========== SetRFPowerState8190() bResult = %d!\n",
+ bResult);
+ return bResult;
+}
+
+bool SetRFPowerState(struct net_device *dev,
+ enum rt_rf_power_state eRFPowerState)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ bool bResult = false;
+
+ RT_TRACE(COMP_PS, "---------> SetRFPowerState(): eRFPowerState(%d)\n",
+ eRFPowerState);
+ if (eRFPowerState == priv->rtllib->eRFPowerState &&
+ priv->bHwRfOffAction == 0) {
+ RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): discard the "
+ "request for eRFPowerState(%d) is the same.\n",
+ eRFPowerState);
+ return bResult;
+ }
+
+ bResult = SetRFPowerState8190(dev, eRFPowerState);
+
+ RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): bResult(%d)\n",
+ bResult);
+
+ return bResult;
+}
+
+void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->up) {
+ switch (Operation) {
+ case SCAN_OPT_BACKUP:
+ priv->rtllib->InitialGainHandler(dev, IG_Backup);
+ break;
+
+ case SCAN_OPT_RESTORE:
+ priv->rtllib->InitialGainHandler(dev, IG_Restore);
+ break;
+
+ default:
+ RT_TRACE(COMP_SCAN, "Unknown Scan Backup Operation.\n");
+ break;
+ }
+ }
+
+}
diff --git a/drivers/staging/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/r8192E_phy.h
new file mode 100644
index 00000000000..7318f8857af
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_phy.h
@@ -0,0 +1,120 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _R819XU_PHY_H
+#define _R819XU_PHY_H
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define AGCTAB_ArrayLength AGCTAB_ArrayLengthPciE
+#define MACPHY_ArrayLength MACPHY_ArrayLengthPciE
+#define RadioA_ArrayLength RadioA_ArrayLengthPciE
+#define RadioB_ArrayLength RadioB_ArrayLengthPciE
+#define MACPHY_Array_PGLength MACPHY_Array_PGLengthPciE
+#define RadioC_ArrayLength RadioC_ArrayLengthPciE
+#define RadioD_ArrayLength RadioD_ArrayLengthPciE
+#define PHY_REGArrayLength PHY_REGArrayLengthPciE
+#define PHY_REG_1T2RArrayLength PHY_REG_1T2RArrayLengthPciE
+
+#define Rtl819XMACPHY_Array_PG Rtl8192PciEMACPHY_Array_PG
+#define Rtl819XMACPHY_Array Rtl8192PciEMACPHY_Array
+#define Rtl819XRadioA_Array Rtl8192PciERadioA_Array
+#define Rtl819XRadioB_Array Rtl8192PciERadioB_Array
+#define Rtl819XRadioC_Array Rtl8192PciERadioC_Array
+#define Rtl819XRadioD_Array Rtl8192PciERadioD_Array
+#define Rtl819XAGCTAB_Array Rtl8192PciEAGCTAB_Array
+#define Rtl819XPHY_REGArray Rtl8192PciEPHY_REGArray
+#define Rtl819XPHY_REG_1T2RArray Rtl8192PciEPHY_REG_1T2RArray
+
+extern u32 rtl819XMACPHY_Array_PG[];
+extern u32 rtl819XPHY_REG_1T2RArray[];
+extern u32 rtl819XAGCTAB_Array[];
+extern u32 rtl819XRadioA_Array[];
+extern u32 rtl819XRadioB_Array[];
+extern u32 rtl819XRadioC_Array[];
+extern u32 rtl819XRadioD_Array[];
+
+enum hw90_block {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum rf90_radio_path {
+ RF90_PATH_A = 0,
+ RF90_PATH_B = 1,
+ RF90_PATH_C = 2,
+ RF90_PATH_D = 3,
+ RF90_PATH_MAX
+};
+
+#define bMaskByte0 0xff
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+
+extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev,
+ u32 eRFPath);
+extern void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr,
+ u32 dwBitMask, u32 dwData);
+extern u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr,
+ u32 dwBitMask);
+extern void rtl8192_phy_SetRFReg(struct net_device *dev,
+ enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data);
+extern u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
+ enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask);
+extern void rtl8192_phy_configmac(struct net_device *dev);
+extern void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
+extern bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
+ enum hw90_block CheckBlock,
+ enum rf90_radio_path eRFPath);
+extern bool rtl8192_BBConfig(struct net_device *dev);
+extern void rtl8192_phy_getTxPower(struct net_device *dev);
+extern void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
+extern bool rtl8192_phy_RFConfig(struct net_device *dev);
+extern void rtl8192_phy_updateInitGain(struct net_device *dev);
+extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
+ enum rf90_radio_path eRFPath);
+
+extern u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
+extern void rtl8192_SetBWMode(struct net_device *dev,
+ enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset);
+extern void rtl8192_SwChnl_WorkItem(struct net_device *dev);
+extern void rtl8192_SetBWModeWorkItem(struct net_device *dev);
+extern void InitialGain819xPci(struct net_device *dev, u8 Operation);
+
+extern void PHY_SetRtl8192eRfOff(struct net_device *dev);
+
+bool
+SetRFPowerState(
+ struct net_device *dev,
+ enum rt_rf_power_state eRFPowerState
+ );
+#define PHY_SetRFPowerState SetRFPowerState
+
+extern void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation);
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/r8192E_phyreg.h
new file mode 100644
index 00000000000..7899dd538dc
--- /dev/null
+++ b/drivers/staging/rtl8192e/r8192E_phyreg.h
@@ -0,0 +1,852 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _R819XU_PHYREG_H
+#define _R819XU_PHYREG_H
+
+
+#define RF_DATA 0x1d4
+
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+#define MCS_TXAGC 0x340
+#define CCK_TXAGC 0x348
+
+/*---------------------0x400~0x4ff----------------------*/
+#define MacBlkCtrl 0x403
+
+#define rFPGA0_RFMOD 0x800
+#define rFPGA0_TxInfo 0x804
+#define rFPGA0_PSDFunction 0x808
+#define rFPGA0_TxGainStage 0x80c
+#define rFPGA0_RFTiming1 0x810
+#define rFPGA0_RFTiming2 0x814
+#define rFPGA0_XA_HSSIParameter1 0x820
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+#define rFPGA0_XC_HSSIParameter1 0x830
+#define rFPGA0_XC_HSSIParameter2 0x834
+#define rFPGA0_XD_HSSIParameter1 0x838
+#define rFPGA0_XD_HSSIParameter2 0x83c
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+#define rFPGA0_XC_LSSIParameter 0x848
+#define rFPGA0_XD_LSSIParameter 0x84c
+#define rFPGA0_RFWakeUpParameter 0x850
+#define rFPGA0_RFSleepUpParameter 0x854
+#define rFPGA0_XAB_SwitchControl 0x858
+#define rFPGA0_XCD_SwitchControl 0x85c
+#define rFPGA0_XA_RFInterfaceOE 0x860
+#define rFPGA0_XB_RFInterfaceOE 0x864
+#define rFPGA0_XC_RFInterfaceOE 0x868
+#define rFPGA0_XD_RFInterfaceOE 0x86c
+#define rFPGA0_XAB_RFInterfaceSW 0x870
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+#define rFPGA0_XAB_RFParameter 0x878
+#define rFPGA0_XCD_RFParameter 0x87c
+#define rFPGA0_AnalogParameter1 0x880
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888
+#define rFPGA0_AnalogParameter4 0x88c
+#define rFPGA0_XA_LSSIReadBack 0x8a0
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+#define rFPGA0_PSDReport 0x8b4
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4
+
+#define rFPGA1_RFMOD 0x900
+#define rFPGA1_TxBlock 0x904
+#define rFPGA1_DebugSelect 0x908
+#define rFPGA1_TxInfo 0x90c
+
+#define rCCK0_System 0xa00
+#define rCCK0_AFESetting 0xa04
+#define rCCK0_CCA 0xa08
+#define rCCK0_RxAGC1 0xa0c
+#define rCCK0_RxAGC2 0xa10
+#define rCCK0_RxHP 0xa14
+#define rCCK0_DSPParameter1 0xa18
+#define rCCK0_DSPParameter2 0xa1c
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28
+#define rCCK0_FalseAlarmReport 0xa2c
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54
+#define rCCK0_FACounterLower 0xa5c
+#define rCCK0_FACounterUpper 0xa58
+
+#define rOFDM0_LSTF 0xc00
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+#define rOFDM0_XARxAFE 0xc10
+#define rOFDM0_XARxIQImbalance 0xc14
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+#define rOFDM0_RxDetector1 0xc30
+#define rOFDM0_RxDetector2 0xc34
+#define rOFDM0_RxDetector3 0xc38
+#define rOFDM0_RxDetector4 0xc3c
+#define rOFDM0_RxDSP 0xc40
+#define rOFDM0_CFOandDAGC 0xc44
+#define rOFDM0_CCADropThreshold 0xc48
+#define rOFDM0_ECCAThreshold 0xc4c
+#define rOFDM0_XAAGCCore1 0xc50
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+#define rOFDM0_XATxIQImbalance 0xc80
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+
+
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+#define rOFDM1_CFO 0xd08
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+#define rOFDM_PHYCounter1 0xda0
+#define rOFDM_PHYCounter2 0xda4
+#define rOFDM_PHYCounter3 0xda8
+#define rOFDM_ShortCFOAB 0xdac
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+#define rTxAGC_Rate18_06 0xe00
+#define rTxAGC_Rate54_24 0xe04
+#define rTxAGC_CCK_Mcs32 0xe08
+#define rTxAGC_Mcs03_Mcs00 0xe10
+#define rTxAGC_Mcs07_Mcs04 0xe14
+#define rTxAGC_Mcs11_Mcs08 0xe18
+#define rTxAGC_Mcs15_Mcs12 0xe1c
+
+
+#define rZebra1_HSSIEnable 0x0
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+#define rZebra1_Channel 0x7
+#define rZebra1_TxGain 0x8
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+#define rGlobalCtrl 0
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+#define rRTL8258_TxLPF 0x11
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+#define bBBResetB 0x100
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define bRFMOD 0x1
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+#define bOFDMRxADCPhase 0x10000
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+#define bXBTxAGC 0xf00
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+#define bPAStart 0xf0000000
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+#define b3WireDataLength 0x800
+#define b3WireAddressLength 0x400
+#define b3WireRFPowerDown 0x1
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+#define bRFSI_RFENV 0x10
+#define bRFSI_TRSW 0x20
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+#define bLSSIReadAddress 0x3f000000
+#define bLSSIReadEdge 0x80000000
+#define bLSSIReadBackData 0xfff
+#define bLSSIReadOKFlag 0x1000
+#define bCCKSampleRate 0x8
+
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+#define bADClkPhase 0x4000000
+#define b80MClkDelay 0x18000000
+#define bAFEWatchDogEnable 0x20000000
+#define bXtalCap 0x0f000000
+#define bXtalCap01 0xc0000000
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bIntDifClkEnable 0x400
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+
+#define bCCKRxAGCFormat 0x200
+
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+#define bOFDMTxSC 0x30000000
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff
+#define bDebugItem 0xff
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+#define bCCKBBMode 0x3
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+#define bCCKSideBand 0x10
+#define bCCKScramble 0x8
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f
+#define bCCKFixedRxAGC 0x8000
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+#define bNumOfSTF 0x3
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000
+#define bRSSI_Gen 0x7f000000
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+
+#define bDAFormat 0x40000
+
+#define bTxChEmuEnable 0x01000000
+
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+
+#define bExtLNAGain 0x7c00
+
+#define bSTBCEn 0x4
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12
+#define bShortCFOFLength 11
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+#define bTxAGCRate18_06 0x7f7f7f7f
+#define bTxAGCRate54_24 0x7f7f7f7f
+#define bTxAGCRateMCS32 0x7f
+#define bTxAGCRateCCK 0x7f00
+#define bTxAGCRateMCS3_MCS0 0x7f7f7f7f
+#define bTxAGCRateMCS7_MCS4 0x7f7f7f7f
+#define bTxAGCRateMCS11_MCS8 0x7f7f7f7f
+#define bTxAGCRateMCS15_MCS12 0x7f7f7f7f
+
+
+#define bRxPesudoNoiseOn 0x20000000
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+#define bZebra1_HSSIEnable 0x8
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+#define bRTL8256RegModeCtrl1 0x100
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+#define bRTL8258_TxLPFBW 0xc
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+#define bByte0 0x1
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+#define bMaskByte0 0xff
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+
+#define bMask12Bits 0xfff
+
+#define bEnable 0x1
+#define bDisable 0x0
+
+#define LeftAntenna 0x0
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500
+#define tUpdateRxCounter 100
+
+#define rateCCK 0
+#define rateOFDM 1
+#define rateHT 2
+
+#define bPMAC_End 0x1ff
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+
+#define bPMACControl 0x0
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define PathA 0x0
+#define PathB 0x1
+#define PathC 0x2
+#define PathD 0x3
+
+#define rRTL8256RxMixerPole 0xb
+#define bZebraRxMixerPole 0x6
+#define rRTL8256TxBBOPBias 0x9
+#define bRTL8256TxBBOPBias 0x400
+#define rRTL8256TxBBBW 19
+#define bRTL8256TxBBBW 0x18
+
+#endif
diff --git a/drivers/staging/rtl8192e/r8192E_wx.c b/drivers/staging/rtl8192e/r8192E_wx.c
deleted file mode 100644
index adad91b0b6b..00000000000
--- a/drivers/staging/rtl8192e/r8192E_wx.c
+++ /dev/null
@@ -1,1163 +0,0 @@
-/*
- This file contains wireless extension handlers.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part
- of the official realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- We want to tanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
-
-#include <linux/string.h>
-#include "r8192E.h"
-#include "r8192E_hw.h"
-#include "r8192E_wx.h"
-#ifdef ENABLE_DOT11D
-#include "ieee80211/dot11d.h"
-#endif
-
-#define RATE_COUNT 12
-static const u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000,
- 6000000,9000000,12000000,18000000,24000000,36000000,48000000,54000000};
-
-
-#ifndef ENETDOWN
-#define ENETDOWN 1
-#endif
-static int r8192_wx_get_freq(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_freq(priv->ieee80211,a,wrqu,b);
-}
-
-
-static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv=ieee80211_priv(dev);
-
- return ieee80211_wx_get_mode(priv->ieee80211,a,wrqu,b);
-}
-
-
-
-static int r8192_wx_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- return ieee80211_wx_get_rate(priv->ieee80211,info,wrqu,extra);
-}
-
-
-
-static int r8192_wx_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_rate(priv->ieee80211,info,wrqu,extra);
-
- up(&priv->wx_sem);
-
- return ret;
-}
-
-
-static int r8192_wx_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_rts(priv->ieee80211,info,wrqu,extra);
-
- up(&priv->wx_sem);
-
- return ret;
-}
-
-static int r8192_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- return ieee80211_wx_get_rts(priv->ieee80211,info,wrqu,extra);
-}
-
-static int r8192_wx_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_power(priv->ieee80211,info,wrqu,extra);
-
- up(&priv->wx_sem);
-
- return ret;
-}
-
-static int r8192_wx_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- return ieee80211_wx_get_power(priv->ieee80211,info,wrqu,extra);
-}
-
-static int r8192_wx_set_rawtx(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
-
- up(&priv->wx_sem);
-
- return ret;
-
-}
-
-static int r8192_wx_force_reset(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- down(&priv->wx_sem);
-
- printk("%s(): force reset ! extra is %d\n",__FUNCTION__, *extra);
- priv->force_reset = *extra;
- up(&priv->wx_sem);
- return 0;
-
-}
-
-
-static int r8192_wx_set_crcmon(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int *parms = (int *)extra;
- int enable = (parms[0] > 0);
- short prev = priv->crcmon;
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- if(enable)
- priv->crcmon=1;
- else
- priv->crcmon=0;
-
- DMESG("bad CRC in monitor mode are %s",
- priv->crcmon ? "accepted" : "rejected");
-
- if(prev != priv->crcmon && priv->up){
- //rtl8180_down(dev);
- //rtl8180_up(dev);
- }
-
- up(&priv->wx_sem);
-
- return 0;
-}
-
-static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
- int ret;
-
- if (priv->bHwRadioOff)
- return 0;
-
- rtState = priv->eRFPowerState;
- down(&priv->wx_sem);
-#ifdef ENABLE_IPS
- if(wrqu->mode == IW_MODE_ADHOC){
-
- if (priv->PowerSaveControl.bInactivePs) {
- if(rtState == eRfOff){
- if(priv->RfOffReason > RF_CHANGE_BY_IPS)
- {
- RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
- up(&priv->wx_sem);
- return -1;
- }
- else{
- RT_TRACE(COMP_ERR, "%s(): IPSLeave\n",__FUNCTION__);
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
- }
- }
- }
- }
-#endif
- ret = ieee80211_wx_set_mode(priv->ieee80211,a,wrqu,b);
-
- //rtl8187_set_rxconf(dev);
-
- up(&priv->wx_sem);
- return ret;
-}
-
-struct iw_range_with_scan_capa
-{
- /* Informative stuff (to choose between different interface) */
- __u32 throughput; /* To give an idea... */
- /* In theory this value should be the maximum benchmarked
- * TCP/IP throughput, because with most of these devices the
- * bit rate is meaningless (overhead an co) to estimate how
- * fast the connection will go and pick the fastest one.
- * I suggest people to play with Netperf or any benchmark...
- */
-
- /* NWID (or domain id) */
- __u32 min_nwid; /* Minimal NWID we are able to set */
- __u32 max_nwid; /* Maximal NWID we are able to set */
-
- /* Old Frequency (backward compat - moved lower ) */
- __u16 old_num_channels;
- __u8 old_num_frequency;
-
- /* Scan capabilities */
- __u8 scan_capa;
-};
-static int rtl8180_wx_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range;
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 val;
- int i;
-
- wrqu->data.length = sizeof(*range);
- memset(range, 0, sizeof(*range));
-
- /* Let's try to keep this struct in the same order as in
- * linux/include/wireless.h
- */
-
- /* TODO: See what values we can set, and remove the ones we can't
- * set, or fill them with some default data.
- */
-
- /* ~5 Mb/s real (802.11b) */
- range->throughput = 130 * 1000 * 1000;
-
- // TODO: Not used in 802.11b?
-// range->min_nwid; /* Minimal NWID we are able to set */
- // TODO: Not used in 802.11b?
-// range->max_nwid; /* Maximal NWID we are able to set */
-
- /* Old Frequency (backward compat - moved lower ) */
-// range->old_num_channels;
-// range->old_num_frequency;
-// range->old_freq[6]; /* Filler to keep "version" at the same offset */
-
- range->max_qual.qual = 100;
- /* TODO: Find real max RSSI and stick here */
- range->max_qual.level = 0;
- range->max_qual.noise = -98;
- range->max_qual.updated = 7; /* Updated all three */
-
- range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
- /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
- range->avg_qual.level = 20 + -98;
- range->avg_qual.noise = 0;
- range->avg_qual.updated = 7; /* Updated all three */
-
- range->num_bitrates = RATE_COUNT;
-
- for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
- range->bitrate[i] = rtl8180_rates[i];
- }
-
- range->min_frag = MIN_FRAG_THRESHOLD;
- range->max_frag = MAX_FRAG_THRESHOLD;
-
- range->min_pmp=0;
- range->max_pmp = 5000000;
- range->min_pmt = 0;
- range->max_pmt = 65535*1000;
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 18;
-
-// range->retry_capa; /* What retry options are supported */
-// range->retry_flags; /* How to decode max/min retry limit */
-// range->r_time_flags; /* How to decode max/min retry life */
-// range->min_retry; /* Minimal number of retries */
-// range->max_retry; /* Maximal number of retries */
-// range->min_r_time; /* Minimal retry lifetime */
-// range->max_r_time; /* Maximal retry lifetime */
-
-
- for (i = 0, val = 0; i < 14; i++) {
-
- // Include only legal frequencies for some countries
-#ifdef ENABLE_DOT11D
- if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) {
-#else
- if ((priv->ieee80211->channel_map)[i+1]) {
-#endif
- range->freq[val].i = i + 1;
- range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
- range->freq[val].e = 1;
- val++;
- } else {
- // FIXME: do we need to set anything for channels
- // we don't use ?
- }
-
- if (val == IW_MAX_FREQUENCIES)
- break;
- }
- range->num_frequency = val;
- range->num_channels = val;
-
- range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2|
- IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP;
-
- tmp->scan_capa = 0x01;
- return 0;
-}
-
-
-static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- RT_RF_POWER_STATE rtState;
- int ret;
-
- if (priv->bHwRadioOff)
- return 0;
-
- rtState = priv->eRFPowerState;
-
- if(!priv->up) return -ENETDOWN;
- if (priv->ieee80211->LinkDetectInfo.bBusyTraffic == true)
- return -EAGAIN;
-
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID)
- {
- struct iw_scan_req* req = (struct iw_scan_req*)b;
- if (req->essid_len)
- {
- //printk("==**&*&*&**===>scan set ssid:%s\n", req->essid);
- ieee->current_network.ssid_len = req->essid_len;
- memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
- //printk("=====>network ssid:%s\n", ieee->current_network.ssid);
- }
- }
-
- down(&priv->wx_sem);
-#ifdef ENABLE_IPS
- priv->ieee80211->actscanning = true;
- if(priv->ieee80211->state != IEEE80211_LINKED){
- if (priv->PowerSaveControl.bInactivePs) {
- if(rtState == eRfOff){
- if(priv->RfOffReason > RF_CHANGE_BY_IPS)
- {
- RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
- up(&priv->wx_sem);
- return -1;
- }
- else{
- //RT_TRACE(COMP_PS, "%s(): IPSLeave\n",__FUNCTION__);
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
- }
- }
- }
- priv->ieee80211->scanning = 0;
- ieee80211_softmac_scan_syncro(priv->ieee80211);
- ret = 0;
- }
- else
-#else
-
- if(priv->ieee80211->state != IEEE80211_LINKED){
- priv->ieee80211->scanning = 0;
- ieee80211_softmac_scan_syncro(priv->ieee80211);
- ret = 0;
- }
- else
-#endif
- ret = ieee80211_wx_set_scan(priv->ieee80211,a,wrqu,b);
-
- up(&priv->wx_sem);
- return ret;
-}
-
-
-static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
-
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- if(!priv->up) return -ENETDOWN;
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_get_scan(priv->ieee80211,a,wrqu,b);
-
- up(&priv->wx_sem);
-
- return ret;
-}
-
-static int r8192_wx_set_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
- int ret;
-
- if (priv->bHwRadioOff)
- return 0;
-
- rtState = priv->eRFPowerState;
- down(&priv->wx_sem);
-
-#ifdef ENABLE_IPS
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
-#endif
- ret = ieee80211_wx_set_essid(priv->ieee80211,a,wrqu,b);
-
- up(&priv->wx_sem);
-
- return ret;
-}
-
-
-
-
-static int r8192_wx_get_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
-
- up(&priv->wx_sem);
-
- return ret;
-}
-
-
-static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
-
- up(&priv->wx_sem);
- return ret;
-}
-
-static int r8192_wx_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
-}
-
-
-static int r8192_wx_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- if (wrqu->frag.disabled)
- priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
- else {
- if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
- wrqu->frag.value > MAX_FRAG_THRESHOLD)
- return -EINVAL;
-
- priv->ieee80211->fts = wrqu->frag.value & ~0x1;
- }
-
- return 0;
-}
-
-
-static int r8192_wx_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- wrqu->frag.value = priv->ieee80211->fts;
- wrqu->frag.fixed = 0; /* no auto select */
- wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
-
- return 0;
-}
-
-
-static int r8192_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
-{
-
- int ret;
- struct r8192_priv *priv = ieee80211_priv(dev);
-// struct sockaddr *temp = (struct sockaddr *)awrq;
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
-#ifdef ENABLE_IPS
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
-#endif
- ret = ieee80211_wx_set_wap(priv->ieee80211,info,awrq,extra);
-
- up(&priv->wx_sem);
-
- return ret;
-
-}
-
-
-static int r8192_wx_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_wap(priv->ieee80211,info,wrqu,extra);
-}
-
-
-static int r8192_wx_get_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key);
-}
-
-static int r8192_wx_set_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int ret;
-
- struct ieee80211_device *ieee = priv->ieee80211;
- //u32 TargetContent;
- u32 hwkey[4]={0,0,0,0};
- u8 mask=0xff;
- u32 key_idx=0;
- u8 zero_addr[4][6] ={{0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x01},
- {0x00,0x00,0x00,0x00,0x00,0x02},
- {0x00,0x00,0x00,0x00,0x00,0x03} };
- int i;
-
- if (priv->bHwRadioOff)
- return 0;
-
- if(!priv->up) return -ENETDOWN;
-
- priv->ieee80211->wx_set_enc = 1;
-#ifdef ENABLE_IPS
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
-#endif
-
- down(&priv->wx_sem);
-
- RT_TRACE(COMP_SEC, "Setting SW wep key\n");
- ret = ieee80211_wx_set_encode(priv->ieee80211,info,wrqu,key);
-
- up(&priv->wx_sem);
-
- //sometimes, the length is zero while we do not type key value
- if(wrqu->encoding.length!=0){
-
- for(i=0 ; i<4 ; i++){
- hwkey[i] |= key[4*i+0]&mask;
- if(i==1&&(4*i+1)==wrqu->encoding.length) mask=0x00;
- if(i==3&&(4*i+1)==wrqu->encoding.length) mask=0x00;
- hwkey[i] |= (key[4*i+1]&mask)<<8;
- hwkey[i] |= (key[4*i+2]&mask)<<16;
- hwkey[i] |= (key[4*i+3]&mask)<<24;
- }
-
- #define CONF_WEP40 0x4
- #define CONF_WEP104 0x14
-
- switch(wrqu->encoding.flags & IW_ENCODE_INDEX){
- case 0: key_idx = ieee->tx_keyidx; break;
- case 1: key_idx = 0; break;
- case 2: key_idx = 1; break;
- case 3: key_idx = 2; break;
- case 4: key_idx = 3; break;
- default: break;
- }
-
- //printk("-------====>length:%d, key_idx:%d, flag:%x\n", wrqu->encoding.length, key_idx, wrqu->encoding.flags);
- if(wrqu->encoding.length==0x5){
- ieee->pairwise_key_type = KEY_TYPE_WEP40;
- EnableHWSecurityConfig8192(priv);
- setKey(priv, key_idx, key_idx, KEY_TYPE_WEP40,
- zero_addr[key_idx], 0, hwkey);
- }
-
- else if(wrqu->encoding.length==0xd){
- ieee->pairwise_key_type = KEY_TYPE_WEP104;
- EnableHWSecurityConfig8192(priv);
- setKey(priv, key_idx, key_idx, KEY_TYPE_WEP104,
- zero_addr[key_idx], 0, hwkey);
- }
- else printk("wrong type in WEP, not WEP40 and WEP104\n");
- }
-
- priv->ieee80211->wx_set_enc = 0;
-
- return ret;
-}
-
-
-static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union
- iwreq_data *wrqu, char *p){
-
- struct r8192_priv *priv = ieee80211_priv(dev);
- int *parms=(int*)p;
- int mode=parms[0];
-
- priv->ieee80211->active_scan = mode;
-
- return 1;
-}
-
-
-
-static int r8192_wx_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- int err = 0;
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
- wrqu->retry.disabled){
- err = -EINVAL;
- goto exit;
- }
- if (!(wrqu->retry.flags & IW_RETRY_LIMIT)){
- err = -EINVAL;
- goto exit;
- }
-
- if(wrqu->retry.value > R8180_MAX_RETRY){
- err= -EINVAL;
- goto exit;
- }
- if (wrqu->retry.flags & IW_RETRY_MAX) {
- priv->retry_rts = wrqu->retry.value;
- DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value);
-
- }else {
- priv->retry_data = wrqu->retry.value;
- DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value);
- }
-
- /* FIXME !
- * We might try to write directly the TX config register
- * or to restart just the (R)TX process.
- * I'm unsure if whole reset is really needed
- */
-
- rtl8192_commit(priv);
- /*
- if(priv->up){
- rtl8180_rtx_disable(dev);
- rtl8180_rx_enable(dev);
- rtl8180_tx_enable(dev);
-
- }
- */
-exit:
- up(&priv->wx_sem);
-
- return err;
-}
-
-static int r8192_wx_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-
-
- wrqu->retry.disabled = 0; /* can't be disabled */
-
- if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
- IW_RETRY_LIFETIME)
- return -EINVAL;
-
- if (wrqu->retry.flags & IW_RETRY_MAX) {
- wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX;
- wrqu->retry.value = priv->retry_rts;
- } else {
- wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN;
- wrqu->retry.value = priv->retry_data;
- }
- //DMESG("returning %d",wrqu->retry.value);
-
-
- return 0;
-}
-
-static int r8192_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->rf_set_sens == NULL)
- return -1; /* we have not this support for this radio */
- wrqu->sens.value = priv->sens;
- return 0;
-}
-
-
-static int r8192_wx_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- short err = 0;
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
- //DMESG("attempt to set sensivity to %ddb",wrqu->sens.value);
- if(priv->rf_set_sens == NULL) {
- err= -1; /* we have not this support for this radio */
- goto exit;
- }
- if(priv->rf_set_sens(dev, wrqu->sens.value) == 0)
- priv->sens = wrqu->sens.value;
- else
- err= -EINVAL;
-
-exit:
- up(&priv->wx_sem);
-
- return err;
-}
-
-static int r8192_wx_set_enc_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret=0;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
-
- priv->ieee80211->wx_set_enc = 1;
-
-#ifdef ENABLE_IPS
- down(&priv->ieee80211->ips_sem);
- IPSLeave(priv);
- up(&priv->ieee80211->ips_sem);
-#endif
-
- ret = ieee80211_wx_set_encode_ext(ieee, info, wrqu, extra);
-
- {
- u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
- u8 zero[6] = {0};
- u32 key[4] = {0};
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct iw_point *encoding = &wrqu->encoding;
- u8 idx = 0, alg = 0, group = 0;
-
- if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) //none is not allowed to use hwsec WB 2008.07.01
- {
- ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
- CamResetAllEntry(priv);
- goto end_hw_sec;
- }
- alg = (ext->alg == IW_ENCODE_ALG_CCMP)?KEY_TYPE_CCMP:ext->alg; // as IW_ENCODE_ALG_CCMP is defined to be 3 and KEY_TYPE_CCMP is defined to 4;
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx)
- idx --;
- group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
-
- if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) || (alg == KEY_TYPE_WEP40))
- {
- if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40) )
- alg = KEY_TYPE_WEP104;
- ieee->pairwise_key_type = alg;
- EnableHWSecurityConfig8192(priv);
- }
- memcpy((u8*)key, ext->key, 16); //we only get 16 bytes key.why? WB 2008.7.1
-
- if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode !=2) )
- {
- if (ext->key_len == 13)
- ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
- setKey(priv, idx, idx, alg, zero, 0, key);
- }
- else if (group)
- {
- ieee->group_key_type = alg;
- setKey(priv, idx, idx, alg, broadcast_addr, 0, key);
- }
- else //pairwise key
- {
- if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) && ieee->pHTInfo->bCurrentHTSupport){
- write_nic_byte(priv, 0x173, 1); //fix aes bug
- }
- setKey(priv, 4, idx, alg,
- (u8*)ieee->ap_mac_addr, 0, key);
- }
-
-
- }
-
-end_hw_sec:
- priv->ieee80211->wx_set_enc = 0;
- up(&priv->wx_sem);
- return ret;
-
-}
-static int r8192_wx_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- int ret=0;
- //printk("====>%s()\n", __FUNCTION__);
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
- ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra);
- up(&priv->wx_sem);
- return ret;
-}
-
-static int r8192_wx_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- //printk("====>%s()\n", __FUNCTION__);
-
- int ret=0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
- ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
- return ret;
-}
-
-static int r8192_wx_set_gen_ie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- //printk("====>%s(), len:%d\n", __FUNCTION__, data->length);
- int ret=0;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- if (priv->bHwRadioOff)
- return 0;
-
- down(&priv->wx_sem);
- ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
- up(&priv->wx_sem);
- //printk("<======%s(), ret:%d\n", __FUNCTION__, ret);
- return ret;
-}
-
-static int dummy(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu,char *b)
-{
- return -1;
-}
-
-// check ac/dc status with the help of user space application */
-static int r8192_wx_adapter_power_status(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef ENABLE_LPS
- PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
- struct ieee80211_device* ieee = priv->ieee80211;
-#endif
- down(&priv->wx_sem);
-
-#ifdef ENABLE_LPS
- RT_TRACE(COMP_POWER, "%s(): %s\n",__FUNCTION__, (*extra == 6)?"DC power":"AC power");
- // ieee->ps shall not be set under DC mode, otherwise it conflict
- // with Leisure power save mode setting.
- //
- if(*extra || priv->force_lps) {
- priv->ps_force = false;
- pPSC->bLeisurePs = true;
- } else {
- //LZM for PS-Poll AID issue. 090429
- if(priv->ieee80211->state == IEEE80211_LINKED)
- LeisurePSLeave(priv->ieee80211);
-
- priv->ps_force = true;
- pPSC->bLeisurePs = false;
- ieee->ps = *extra;
- }
-
-#endif
- up(&priv->wx_sem);
- return 0;
-
-}
-
-
-static iw_handler r8192_wx_handlers[] =
-{
- NULL, /* SIOCSIWCOMMIT */
- r8192_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
- r8192_wx_set_freq, /* SIOCSIWFREQ */
- r8192_wx_get_freq, /* SIOCGIWFREQ */
- r8192_wx_set_mode, /* SIOCSIWMODE */
- r8192_wx_get_mode, /* SIOCGIWMODE */
- r8192_wx_set_sens, /* SIOCSIWSENS */
- r8192_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtl8180_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
- r8192_wx_set_wap, /* SIOCSIWAP */
- r8192_wx_get_wap, /* SIOCGIWAP */
- r8192_wx_set_mlme, /* MLME-- */
- dummy, /* SIOCGIWAPLIST -- depricated */
- r8192_wx_set_scan, /* SIOCSIWSCAN */
- r8192_wx_get_scan, /* SIOCGIWSCAN */
- r8192_wx_set_essid, /* SIOCSIWESSID */
- r8192_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- dummy, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- r8192_wx_set_rate, /* SIOCSIWRATE */
- r8192_wx_get_rate, /* SIOCGIWRATE */
- r8192_wx_set_rts, /* SIOCSIWRTS */
- r8192_wx_get_rts, /* SIOCGIWRTS */
- r8192_wx_set_frag, /* SIOCSIWFRAG */
- r8192_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
- r8192_wx_set_retry, /* SIOCSIWRETRY */
- r8192_wx_get_retry, /* SIOCGIWRETRY */
- r8192_wx_set_enc, /* SIOCSIWENCODE */
- r8192_wx_get_enc, /* SIOCGIWENCODE */
- r8192_wx_set_power, /* SIOCSIWPOWER */
- r8192_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8192_wx_set_gen_ie,//NULL, /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
- r8192_wx_set_auth,//NULL, /* SIOCSIWAUTH */
- NULL,//r8192_wx_get_auth,//NULL, /* SIOCSIWAUTH */
- r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL,//r8192_wx_get_enc_ext,//NULL, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
-
-};
-
-
-static const struct iw_priv_args r8192_private_args[] = {
-
- {
- SIOCIWFIRSTPRIV + 0x0,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc"
- },
-
- {
- SIOCIWFIRSTPRIV + 0x1,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
-
- },
- {
- SIOCIWFIRSTPRIV + 0x2,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x3,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
-
- }
- ,
- {
- SIOCIWFIRSTPRIV + 0x4,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
- "set_power"
- }
-
-};
-
-
-static iw_handler r8192_private_handler[] = {
- r8192_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
- r8192_wx_set_scan_type,
- r8192_wx_set_rawtx,
- r8192_wx_force_reset,
- r8192_wx_adapter_power_status,
-};
-
-static struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- struct iw_statistics* wstats = &priv->wstats;
- int tmp_level = 0;
- int tmp_qual = 0;
- int tmp_noise = 0;
- if(ieee->state < IEEE80211_LINKED)
- {
- wstats->qual.qual = 0;
- wstats->qual.level = 0;
- wstats->qual.noise = 0;
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- return wstats;
- }
-
- tmp_level = (&ieee->current_network)->stats.rssi;
- tmp_qual = (&ieee->current_network)->stats.signal;
- tmp_noise = (&ieee->current_network)->stats.noise;
- //printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise);
-
- wstats->qual.level = tmp_level;
- wstats->qual.qual = tmp_qual;
- wstats->qual.noise = tmp_noise;
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- return wstats;
-}
-
-
-struct iw_handler_def r8192_wx_handlers_def={
- .standard = r8192_wx_handlers,
- .num_standard = sizeof(r8192_wx_handlers) / sizeof(iw_handler),
- .private = r8192_private_handler,
- .num_private = sizeof(r8192_private_handler) / sizeof(iw_handler),
- .num_private_args = sizeof(r8192_private_args) / sizeof(struct iw_priv_args),
- .get_wireless_stats = r8192_get_wireless_stats,
- .private_args = (struct iw_priv_args *)r8192_private_args,
-};
diff --git a/drivers/staging/rtl8192e/r8192E_wx.h b/drivers/staging/rtl8192e/r8192E_wx.h
deleted file mode 100644
index 25f06c165d4..00000000000
--- a/drivers/staging/rtl8192e/r8192E_wx.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- This is part of rtl8180 OpenSource driver - v 0.3
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the official realtek driver
- Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to tanks the Authors of such projects and the Ndiswrapper project Authors.
-*/
-
-/* this file (will) contains wireless extension handlers*/
-
-#ifndef R8180_WX_H
-#define R8180_WX_H
-extern struct iw_handler_def r8192_wx_handlers_def;
-#endif
diff --git a/drivers/staging/rtl8192e/r8192_pm.c b/drivers/staging/rtl8192e/r8192_pm.c
deleted file mode 100644
index 7bcc4a35099..00000000000
--- a/drivers/staging/rtl8192e/r8192_pm.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- Power management interface routines.
- Written by Mariusz Matuszek.
- This code is currently just a placeholder for later work and
- does not do anything useful.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-*/
-
-#include "r8192E.h"
-#include "r8192E_hw.h"
-#include "r8192_pm.h"
-#include "r8190_rtl8256.h"
-
-int rtl8192E_save_state (struct pci_dev *dev, pm_message_t state)
-{
- printk(KERN_NOTICE "r8192E save state call (state %u).\n", state.event);
- return -EAGAIN;
-}
-
-
-int rtl8192E_suspend (struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 ulRegRead;
-
- RT_TRACE(COMP_POWER, "============> r8192E suspend call.\n");
- if (!netif_running(dev))
- goto out_pci_suspend;
-
- if (dev->netdev_ops->ndo_stop)
- dev->netdev_ops->ndo_stop(dev);
-
- // Call MgntActSet_RF_State instead to prevent RF config race condition.
- if(!priv->ieee80211->bSupportRemoteWakeUp) {
- MgntActSet_RF_State(priv, eRfOff, RF_CHANGE_BY_INIT);
- // 2006.11.30. System reset bit
- ulRegRead = read_nic_dword(priv, CPU_GEN);
- ulRegRead|=CPU_GEN_SYSTEM_RESET;
- write_nic_dword(priv, CPU_GEN, ulRegRead);
- } else {
- //2008.06.03 for WOL
- write_nic_dword(priv, WFCRC0, 0xffffffff);
- write_nic_dword(priv, WFCRC1, 0xffffffff);
- write_nic_dword(priv, WFCRC2, 0xffffffff);
- //Write PMR register
- write_nic_byte(priv, PMR, 0x5);
- //Disable tx, enanble rx
- write_nic_byte(priv, MacBlkCtrl, 0xa);
- }
-
-out_pci_suspend:
- RT_TRACE(COMP_POWER, "r8192E support WOL call??????????????????????\n");
- if(priv->ieee80211->bSupportRemoteWakeUp) {
- RT_TRACE(COMP_POWER, "r8192E support WOL call!!!!!!!!!!!!!!!!!!.\n");
- }
- netif_device_detach(dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev,state),
- priv->ieee80211->bSupportRemoteWakeUp?1:0);
- pci_set_power_state(pdev,pci_choose_state(pdev,state));
-
- return 0;
-}
-
-int rtl8192E_resume (struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- int err;
- u32 val;
-
- RT_TRACE(COMP_POWER, "================>r8192E resume call.\n");
-
- pci_set_power_state(pdev, PCI_D0);
-
- err = pci_enable_device(pdev);
- if(err) {
- printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
- dev->name);
- return err;
- }
-
- pci_restore_state(pdev);
-
- /*
- * Suspend/Resume resets the PCI configuration space, so we have to
- * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
- * from interfering with C3 CPU state. pci_restore_state won't help
- * here since it only restores the first 64 bytes pci config header.
- */
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0) {
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- }
-
-
-
- pci_enable_wake(pdev, PCI_D0, 0);
-
- if(!netif_running(dev))
- goto out;
-
- netif_device_attach(dev);
-
- if (dev->netdev_ops->ndo_open)
- dev->netdev_ops->ndo_open(dev);
-
-out:
- RT_TRACE(COMP_POWER, "<================r8192E resume call.\n");
- return 0;
-}
-
-
-int rtl8192E_enable_wake (struct pci_dev *dev, pm_message_t state, int enable)
-{
- printk(KERN_NOTICE "r8192E enable wake call (state %u, enable %d).\n",
- state.event, enable);
- return -EAGAIN;
-}
diff --git a/drivers/staging/rtl8192e/r8192_pm.h b/drivers/staging/rtl8192e/r8192_pm.h
deleted file mode 100644
index 4da9b464b41..00000000000
--- a/drivers/staging/rtl8192e/r8192_pm.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- Power management interface routines.
- Written by Mariusz Matuszek.
- This code is currently just a placeholder for later work and
- does not do anything useful.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
-*/
-
-#ifndef R8192E_PM_H
-#define R8192E_PM_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-
-int rtl8192E_save_state (struct pci_dev *dev, pm_message_t state);
-int rtl8192E_suspend (struct pci_dev *dev, pm_message_t state);
-int rtl8192E_resume (struct pci_dev *dev);
-int rtl8192E_enable_wake (struct pci_dev *dev, pm_message_t state, int enable);
-
-#endif //R8192E_PM_H
diff --git a/drivers/staging/rtl8192e/r819xE_cmdpkt.c b/drivers/staging/rtl8192e/r819xE_cmdpkt.c
deleted file mode 100644
index 756e0660bbe..00000000000
--- a/drivers/staging/rtl8192e/r819xE_cmdpkt.c
+++ /dev/null
@@ -1,444 +0,0 @@
-/******************************************************************************
-
- (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
-
- Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File)
-
- Note: The module is responsible for handling TX and RX command packet.
- 1. TX : Send set and query configuration command packet.
- 2. RX : Receive tx feedback, beacon state, query configuration
- command packet.
-
- Function:
-
- Export:
-
- Abbrev:
-
- History:
- Data Who Remark
-
- 05/06/2008 amy Create initial version porting from windows driver.
-
-******************************************************************************/
-#include "r8192E.h"
-#include "r8192E_hw.h"
-#include "r819xE_cmdpkt.h"
-
-/*
- * Driver internal module can call the API to send message to
- * firmware side. For example, you can send a debug command packet.
- * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
- * Otherwise, you can change MAC/PHT/RF register by firmware at
- * run time. We do not support message more than one segment now.
- */
-RT_STATUS cmpk_message_handle_tx(
- struct r8192_priv *priv,
- u8* code_virtual_address,
- u32 packettype,
- u32 buffer_len)
-{
- RT_STATUS rt_status = RT_STATUS_SUCCESS;
- u16 frag_threshold;
- u16 frag_length = 0, frag_offset = 0;
- rt_firmware *pfirmware = priv->pFirmware;
- struct sk_buff *skb;
- unsigned char *seg_ptr;
- cb_desc *tcb_desc;
- u8 bLastIniPkt;
-
- PTX_FWINFO_8190PCI pTxFwInfo = NULL;
- int i;
-
- RT_TRACE(COMP_CMDPKT,"%s(),buffer_len is %d\n",__FUNCTION__,buffer_len);
- firmware_init_param(priv);
- //Fragmentation might be required
- frag_threshold = pfirmware->cmdpacket_frag_thresold;
- do {
- if((buffer_len - frag_offset) > frag_threshold) {
- frag_length = frag_threshold ;
- bLastIniPkt = 0;
-
- } else {
- frag_length =(u16)(buffer_len - frag_offset);
- bLastIniPkt = 1;
-
- }
-
- /* Allocate skb buffer to contain firmware info and tx descriptor info
- * add 4 to avoid packet appending overflow.
- * */
- skb = dev_alloc_skb(frag_length + priv->ieee80211->tx_headroom + 4);
- if(skb == NULL) {
- rt_status = RT_STATUS_FAILURE;
- goto Failed;
- }
-
- tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = packettype;
- tcb_desc->bLastIniPkt = bLastIniPkt;
- tcb_desc->pkt_size = frag_length;
-
- //seg_ptr = skb_put(skb, frag_length + priv->ieee80211->tx_headroom);
- seg_ptr = skb_put(skb, priv->ieee80211->tx_headroom);
-
- pTxFwInfo = (PTX_FWINFO_8190PCI)seg_ptr;
- memset(pTxFwInfo,0,sizeof(TX_FWINFO_8190PCI));
- memset(pTxFwInfo,0x12,8);
-
- seg_ptr +=sizeof(TX_FWINFO_8190PCI);
-
- /*
- * Transform from little endian to big endian
- * and pending zero
- */
- seg_ptr = skb_tail_pointer(skb);
- for(i=0 ; i < frag_length; i+=4) {
- *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0;
- *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0;
- *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0;
- *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0;
- }
- skb_put(skb, i);
- priv->ieee80211->softmac_hard_start_xmit(skb, priv->ieee80211);
-
- code_virtual_address += frag_length;
- frag_offset += frag_length;
-
- }while(frag_offset < buffer_len);
-
-Failed:
- return rt_status;
-}
-
-static void cmpk_count_txstatistic(struct r8192_priv *priv, cmpk_txfb_t *pstx_fb)
-{
-#ifdef ENABLE_PS
- RT_RF_POWER_STATE rtState;
-
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
-
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
- if (rtState == eRfOff)
- {
- return;
- }
-#endif
-
-#ifdef TODO
- if(pAdapter->bInHctTest)
- return;
-#endif
- /* We can not know the packet length and transmit type: broadcast or uni
- or multicast. So the relative statistics must be collected in tx
- feedback info. */
- if (pstx_fb->tok)
- {
- priv->stats.txoktotal++;
-
- /* We can not make sure broadcast/multicast or unicast mode. */
- if (pstx_fb->pkt_type != PACKET_MULTICAST &&
- pstx_fb->pkt_type != PACKET_BROADCAST) {
- priv->stats.txbytesunicast += pstx_fb->pkt_length;
- }
- }
-}
-
-
-
-/*
- * The function is responsible for extract the message inside TX
- * feedbck message from firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "TX Feedback Element". We have to read 20 bytes
- * in the command packet.
- */
-static void cmpk_handle_tx_feedback(struct r8192_priv *priv, u8 *pmsg)
-{
- cmpk_txfb_t rx_tx_fb; /* */
-
- priv->stats.txfeedback++;
-
- memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
- /* Use tx feedback info to count TX statistics. */
- cmpk_count_txstatistic(priv, &rx_tx_fb);
-}
-
-
-/*
- * The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
- * Please refer to chapter "Interrupt Status Element".
- */
-static void cmpk_handle_interrupt_status(struct r8192_priv *priv, u8 *pmsg)
-{
- cmpk_intr_sta_t rx_intr_status; /* */
-
- DMESG("---> cmpk_Handle_Interrupt_Status()\n");
-
- /* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- windows OS. So we have to read the content byte by byte or transfer
- endian type before copy the message copy. */
- //rx_bcn_state.Element_ID = pMsg[0];
- //rx_bcn_state.Length = pMsg[1];
- rx_intr_status.length = pmsg[1];
- if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2))
- {
- DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
- return;
- }
-
-
- // Statistics of beacon for ad-hoc mode.
- if( priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
- //2 maybe need endian transform?
- rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
- //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4)));
-
- DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status);
-
- if (rx_intr_status.interrupt_status & ISR_TxBcnOk)
- {
- priv->ieee80211->bibsscoordinator = true;
- priv->stats.txbeaconokint++;
- }
- else if (rx_intr_status.interrupt_status & ISR_TxBcnErr)
- {
- priv->ieee80211->bibsscoordinator = false;
- priv->stats.txbeaconerr++;
- }
- }
-
- // Other informations in interrupt status we need?
-
-
- DMESG("<---- cmpk_handle_interrupt_status()\n");
-
-}
-
-
-/*
- * The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "Beacon State Element".
- */
-static void cmpk_handle_query_config_rx(struct r8192_priv *priv, u8 *pmsg)
-{
- cmpk_query_cfg_t rx_query_cfg; /* */
-
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
-
- /* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- windows OS. So we have to read the content byte by byte or transfer
- endian type before copy the message copy. */
- //rx_query_cfg.Element_ID = pMsg[0];
- //rx_query_cfg.Length = pMsg[1];
- rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
- rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
- rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
- rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
- rx_query_cfg.cfg_offset = pmsg[7];
- rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
- (pmsg[10] << 8) | (pmsg[11] << 0);
- rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
- (pmsg[14] << 8) | (pmsg[15] << 0);
-
-}
-
-
-/*
- * Count aggregated tx status from firmwar of one type rx command
- * packet element id = RX_TX_STATUS.
- */
-static void cmpk_count_tx_status(struct r8192_priv *priv, cmpk_tx_status_t *pstx_status)
-{
-
-#ifdef ENABLE_PS
-
- RT_RF_POWER_STATE rtstate;
-
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
-
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
- if (rtState == eRfOff)
- {
- return;
- }
-#endif
-
- priv->stats.txfeedbackok += pstx_status->txok;
- priv->stats.txoktotal += pstx_status->txok;
-
- priv->stats.txbytesunicast += pstx_status->txuclength;
-}
-
-
-
-/*
- * Firmware add a new tx feedback status to reduce rx command
- * packet buffer operation load.
- */
-static void cmpk_handle_tx_status(struct r8192_priv *priv, u8 *pmsg)
-{
- cmpk_tx_status_t rx_tx_sts; /* */
-
- memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t));
- /* 2. Use tx feedback info to count TX statistics. */
- cmpk_count_tx_status(priv, &rx_tx_sts);
-
-}
-
-
-/* Firmware add a new tx rate history */
-static void cmpk_handle_tx_rate_history(struct r8192_priv *priv, u8 *pmsg)
-{
- u8 i;
- u16 length = sizeof(cmpk_tx_rahis_t);
- u32 *ptemp;
-
-#ifdef ENABLE_PS
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
-
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
- if (rtState == eRfOff)
- {
- return;
- }
-#endif
-
- ptemp = (u32 *)pmsg;
-
- //
- // Do endian transfer to word alignment(16 bits) for windows system.
- // You must do different endian transfer for linux and MAC OS
- //
- for (i = 0; i < (length/4); i++)
- {
- u16 temp1, temp2;
-
- temp1 = ptemp[i]&0x0000FFFF;
- temp2 = ptemp[i]>>16;
- ptemp[i] = (temp1<<16)|temp2;
- }
-}
-
-
-/*
- * In the function, we will capture different RX command packet
- * info. Every RX command packet element has different message
- * length and meaning in content. We only support three type of RX
- * command packet now. Please refer to document
- * ws-06-0063-rtl8190-command-packet-specification.
- */
-u32 cmpk_message_handle_rx(struct r8192_priv *priv, struct ieee80211_rx_stats *pstats)
-{
-// u32 debug_level = DBG_LOUD;
- int total_length;
- u8 cmd_length, exe_cnt = 0;
- u8 element_id;
- u8 *pcmd_buff;
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx()\n");
-
- /* 0. Check inpt arguments. If is is a command queue message or pointer is
- null. */
- if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL))
- {
- /* Print error message. */
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->Err queue id or pointer"));*/
- return 0; /* This is not a command packet. */
- }
-
- /* 1. Read received command packet message length from RFD. */
- total_length = pstats->Length;
-
- /* 2. Read virtual address from RFD. */
- pcmd_buff = pstats->virtual_address;
-
- /* 3. Read command pakcet element id and length. */
- element_id = pcmd_buff[0];
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/
-
- /* 4. Check every received command packet conent according to different
- element type. Because FW may aggregate RX command packet to minimize
- transmit time between DRV and FW.*/
- // Add a counter to prevent to locked in the loop too long
- while (total_length > 0 || exe_cnt++ >100)
- {
- /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */
- element_id = pcmd_buff[0];
-
- switch(element_id)
- {
- case RX_TX_FEEDBACK:
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_FEEDBACK\n");
- cmpk_handle_tx_feedback(priv, pcmd_buff);
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_INTERRUPT_STATUS:
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_INTERRUPT_STATUS\n");
- cmpk_handle_interrupt_status(priv, pcmd_buff);
- cmd_length = sizeof(cmpk_intr_sta_t);
- break;
-
- case BOTH_QUERY_CONFIG:
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():BOTH_QUERY_CONFIG\n");
- cmpk_handle_query_config_rx(priv, pcmd_buff);
- cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
- break;
-
- case RX_TX_STATUS:
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_STATUS\n");
- cmpk_handle_tx_status(priv, pcmd_buff);
- cmd_length = CMPK_RX_TX_STS_SIZE;
- break;
-
- case RX_TX_PER_PKT_FEEDBACK:
- // You must at lease add a switch case element here,
- // Otherwise, we will jump to default case.
- //DbgPrint("CCX Test\r\n");
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_PER_PKT_FEEDBACK\n");
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_TX_RATE_HISTORY:
- //DbgPrint(" rx tx rate history\r\n");
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_HISTORY\n");
- cmpk_handle_tx_rate_history(priv, pcmd_buff);
- cmd_length = CMPK_TX_RAHIS_SIZE;
- break;
-
- default:
-
- RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():unknown CMD Element\n");
- return 1; /* This is a command packet. */
- }
-
- total_length -= cmd_length;
- pcmd_buff += cmd_length;
- } /* while (total_length > 0) */
- return 1; /* This is a command packet. */
-
- RT_TRACE(COMP_EVENTS, "<----cmpk_message_handle_rx()\n");
-}
diff --git a/drivers/staging/rtl8192e/r819xE_cmdpkt.h b/drivers/staging/rtl8192e/r819xE_cmdpkt.h
deleted file mode 100644
index 312e4f84ded..00000000000
--- a/drivers/staging/rtl8192e/r819xE_cmdpkt.h
+++ /dev/null
@@ -1,207 +0,0 @@
-#ifndef R819XUSB_CMDPKT_H
-#define R819XUSB_CMDPKT_H
-/* Different command packet have dedicated message length and definition. */
-#define CMPK_RX_TX_FB_SIZE sizeof(cmpk_txfb_t) //20
-#define CMPK_TX_SET_CONFIG_SIZE sizeof(cmpk_set_cfg_t) //16
-#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(cmpk_set_cfg_t) //16
-#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)//
-#define CMPK_RX_DBG_MSG_SIZE sizeof(cmpk_rx_dbginfo_t)//
-#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
-
-/* 2008/05/08 amy For USB constant. */
-#define ISR_TxBcnOk BIT27 // Transmit Beacon OK
-#define ISR_TxBcnErr BIT26 // Transmit Beacon Error
-#define ISR_BcnTimerIntr BIT13 // Beacon Timer Interrupt
-
-#if 0
-/* Define packet type. */
-typedef enum tag_packet_type
-{
- PACKET_BROADCAST,
- PACKET_MULTICAST,
- PACKET_UNICAST,
- PACKET_TYPE_MAX
-}cmpk_pkt_type_e;
-#endif
-
-/* Define element ID of command packet. */
-
-/*------------------------------Define structure----------------------------*/
-/* Define different command packet structure. */
-/* 1. RX side: TX feedback packet. */
-typedef struct tag_cmd_pkt_tx_feedback
-{
- // DWORD 0
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- /* 2007/07/05 MH Change tx feedback info field. */
- /*------TX Feedback Info Field */
- u8 TID:4; /* */
- u8 fail_reason:3; /* */
- u8 tok:1; /* Transmit ok. */
- u8 reserve1:4; /* */
- u8 pkt_type:2; /* */
- u8 bandwidth:1; /* */
- u8 qos_pkt:1; /* */
-
- // DWORD 1
- u8 reserve2; /* */
- /*------TX Feedback Info Field */
- u8 retry_cnt; /* */
- u16 pkt_id; /* */
-
- // DWORD 3
- u16 seq_num; /* */
- u8 s_rate; /* Start rate. */
- u8 f_rate; /* Final rate. */
-
- // DWORD 4
- u8 s_rts_rate; /* */
- u8 f_rts_rate; /* */
- u16 pkt_length; /* */
-
- // DWORD 5
- u16 reserve3; /* */
- u16 duration; /* */
-}cmpk_txfb_t;
-
-/* 2. RX side: Interrupt status packet. It includes Beacon State,
- Beacon Timer Interrupt and other useful informations in MAC ISR Reg. */
-typedef struct tag_cmd_pkt_interrupt_status
-{
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- u16 reserve;
- u32 interrupt_status; /* Interrupt Status. */
-}cmpk_intr_sta_t;
-
-
-/* 3. TX side: Set configuration packet. */
-typedef struct tag_cmd_pkt_set_configuration
-{
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- u16 reserve1; /* */
- u8 cfg_reserve1:3;
- u8 cfg_size:2; /* Configuration info. */
- u8 cfg_type:2; /* Configuration info. */
- u8 cfg_action:1; /* Configuration info. */
- u8 cfg_reserve2; /* Configuration info. */
- u8 cfg_page:4; /* Configuration info. */
- u8 cfg_reserve3:4; /* Configuration info. */
- u8 cfg_offset; /* Configuration info. */
- u32 value; /* */
- u32 mask; /* */
-}cmpk_set_cfg_t;
-
-/* 4. Both side : TX/RX query configuraton packet. The query structure is the
- same as set configuration. */
-#define cmpk_query_cfg_t cmpk_set_cfg_t
-
-/* 5. Multi packet feedback status. */
-typedef struct tag_tx_stats_feedback // PJ quick rxcmd 09042007
-{
- // For endian transfer --> Driver will not the same as firmware structure.
- // DW 0
- u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
-
- // DW 1
- u16 txfail; // Tx Fail count
- u16 txok; // Tx ok count
-
- // DW 2
- u16 txmcok; // tx multicast
- u16 txretry; // Tx Retry count
-
- // DW 3
- u16 txucok; // tx unicast
- u16 txbcok; // tx broadcast
-
- // DW 4
- u16 txbcfail; //
- u16 txmcfail; //
-
- // DW 5
- u16 reserve2; //
- u16 txucfail; //
-
- // DW 6-8
- u32 txmclength;
- u32 txbclength;
- u32 txuclength;
-
- // DW 9
- u16 reserve3_23;
- u8 reserve3_1;
- u8 rate;
-}__attribute__((packed)) cmpk_tx_status_t;
-
-/* 6. Debug feedback message. */
-/* 2007/10/23 MH Define RX debug message */
-typedef struct tag_rx_debug_message_feedback
-{
- // For endian transfer --> for driver
- // DW 0
- u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
-
- // DW 1-??
- // Variable debug message.
-
-}cmpk_rx_dbginfo_t;
-
-/* 2008/03/20 MH Define transmit rate history. For big endian format. */
-typedef struct tag_tx_rate_history
-{
- // For endian transfer --> for driver
- // DW 0
- u8 element_id; // Command packet type
- u8 length; // Command packet length
- u16 reserved1;
-
- // DW 1-2 CCK rate counter
- u16 cck[4];
-
- // DW 3-6
- u16 ofdm[8];
-
- // DW 7-14
- //UINT16 MCS_BW0_SG0[16];
-
- // DW 15-22
- //UINT16 MCS_BW1_SG0[16];
-
- // DW 23-30
- //UINT16 MCS_BW0_SG1[16];
-
- // DW 31-38
- //UINT16 MCS_BW1_SG1[16];
-
- // DW 7-14 BW=0 SG=0
- // DW 15-22 BW=1 SG=0
- // DW 23-30 BW=0 SG=1
- // DW 31-38 BW=1 SG=1
- u16 ht_mcs[4][16];
-
-}__attribute__((packed)) cmpk_tx_rahis_t;
-
-typedef enum tag_command_packet_directories
-{
- RX_TX_FEEDBACK = 0,
- RX_INTERRUPT_STATUS = 1,
- TX_SET_CONFIG = 2,
- BOTH_QUERY_CONFIG = 3,
- RX_TX_STATUS = 4,
- RX_DBGINFO_FEEDBACK = 5,
- RX_TX_PER_PKT_FEEDBACK = 6,
- RX_TX_RATE_HISTORY = 7,
- RX_CMD_ELE_MAX
-}cmpk_element_e;
-
-u32 cmpk_message_handle_rx(struct r8192_priv *priv, struct ieee80211_rx_stats *pstats);
-
-
-#endif
diff --git a/drivers/staging/rtl8192e/r819xE_firmware.c b/drivers/staging/rtl8192e/r819xE_firmware.c
deleted file mode 100644
index d9e8b5a0890..00000000000
--- a/drivers/staging/rtl8192e/r819xE_firmware.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Procedure: Init boot code/firmware code/data session
- *
- * Description: This routine will initialize firmware. If any error occurs
- * during the initialization process, the routine shall terminate
- * immediately and return fail.
- */
-
-#include "r8192E.h"
-#include "r8192E_hw.h"
-
-#include <linux/firmware.h>
-
-/* It should be double word alignment */
-#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4 * (v / 4) - 8)
-
-enum firmware_init_step {
- FW_INIT_STEP0_BOOT = 0,
- FW_INIT_STEP1_MAIN = 1,
- FW_INIT_STEP2_DATA = 2,
-};
-
-enum opt_rst_type {
- OPT_SYSTEM_RESET = 0,
- OPT_FIRMWARE_RESET = 1,
-};
-
-void firmware_init_param(struct r8192_priv *priv)
-{
- rt_firmware *pfirmware = priv->pFirmware;
-
- pfirmware->cmdpacket_frag_thresold =
- GET_COMMAND_PACKET_FRAG_THRESHOLD(MAX_TRANSMIT_BUFFER_SIZE);
-}
-
-/*
- * segment the img and use the ptr and length to remember info on each segment
- */
-static bool fw_download_code(struct r8192_priv *priv, u8 *code_virtual_address,
- u32 buffer_len)
-{
- bool rt_status = true;
- u16 frag_threshold;
- u16 frag_length, frag_offset = 0;
- int i;
-
- rt_firmware *pfirmware = priv->pFirmware;
- struct sk_buff *skb;
- unsigned char *seg_ptr;
- cb_desc *tcb_desc;
- u8 bLastIniPkt;
-
- firmware_init_param(priv);
-
- /* Fragmentation might be required */
- frag_threshold = pfirmware->cmdpacket_frag_thresold;
- do {
- if ((buffer_len - frag_offset) > frag_threshold) {
- frag_length = frag_threshold ;
- bLastIniPkt = 0;
- } else {
- frag_length = buffer_len - frag_offset;
- bLastIniPkt = 1;
- }
-
- /*
- * Allocate skb buffer to contain firmware info and tx
- * descriptor info add 4 to avoid packet appending overflow.
- */
- skb = dev_alloc_skb(frag_length + 4);
- tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
- tcb_desc->bLastIniPkt = bLastIniPkt;
-
- seg_ptr = skb->data;
-
- /*
- * Transform from little endian to big endian and pending zero
- */
- for (i = 0; i < frag_length; i += 4) {
- *seg_ptr++ = ((i+0) < frag_length) ?
- code_virtual_address[i+3] : 0;
-
- *seg_ptr++ = ((i+1) < frag_length) ?
- code_virtual_address[i+2] : 0;
-
- *seg_ptr++ = ((i+2) < frag_length) ?
- code_virtual_address[i+1] : 0;
-
- *seg_ptr++ = ((i+3) < frag_length) ?
- code_virtual_address[i+0] : 0;
- }
- tcb_desc->txbuf_size = (u16)i;
- skb_put(skb, i);
- priv->ieee80211->softmac_hard_start_xmit(skb, priv->ieee80211);
-
- code_virtual_address += frag_length;
- frag_offset += frag_length;
-
- } while (frag_offset < buffer_len);
-
- return rt_status;
-}
-
-/*
- * Check whether main code is download OK. If OK, turn on CPU
- *
- * CPU register locates in different page against general
- * register. Switch to CPU register in the begin and switch
- * back before return
- */
-static bool CPUcheck_maincodeok_turnonCPU(struct r8192_priv *priv)
-{
- unsigned long timeout;
- bool rt_status = true;
- u32 CPU_status = 0;
-
- /* Check whether put code OK */
- timeout = jiffies + msecs_to_jiffies(20);
- while (time_before(jiffies, timeout)) {
- CPU_status = read_nic_dword(priv, CPU_GEN);
-
- if (CPU_status & CPU_GEN_PUT_CODE_OK)
- break;
- msleep(2);
- }
-
- if (!(CPU_status & CPU_GEN_PUT_CODE_OK)) {
- RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n");
- goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
- } else {
- RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n");
- }
-
- /* Turn On CPU */
- CPU_status = read_nic_dword(priv, CPU_GEN);
- write_nic_byte(priv, CPU_GEN,
- (u8)((CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff));
- mdelay(1);
-
- /* Check whether CPU boot OK */
- timeout = jiffies + msecs_to_jiffies(20);
- while (time_before(jiffies, timeout)) {
- CPU_status = read_nic_dword(priv, CPU_GEN);
-
- if (CPU_status & CPU_GEN_BOOT_RDY)
- break;
- msleep(2);
- }
-
- if (!(CPU_status & CPU_GEN_BOOT_RDY))
- goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
- else
- RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n");
-
- return rt_status;
-
-CPUCheckMainCodeOKAndTurnOnCPU_Fail:
- RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
- rt_status = FALSE;
- return rt_status;
-}
-
-static bool CPUcheck_firmware_ready(struct r8192_priv *priv)
-{
- unsigned long timeout;
- bool rt_status = true;
- u32 CPU_status = 0;
-
- /* Check Firmware Ready */
- timeout = jiffies + msecs_to_jiffies(20);
- while (time_before(jiffies, timeout)) {
- CPU_status = read_nic_dword(priv, CPU_GEN);
-
- if (CPU_status & CPU_GEN_FIRM_RDY)
- break;
- msleep(2);
- }
-
- if (!(CPU_status & CPU_GEN_FIRM_RDY))
- goto CPUCheckFirmwareReady_Fail;
- else
- RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n");
-
- return rt_status;
-
-CPUCheckFirmwareReady_Fail:
- RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
- rt_status = false;
- return rt_status;
-
-}
-
-bool init_firmware(struct r8192_priv *priv)
-{
- bool rt_status = true;
- u32 file_length = 0;
- u8 *mapped_file = NULL;
- u32 init_step = 0;
- enum opt_rst_type rst_opt = OPT_SYSTEM_RESET;
- enum firmware_init_step starting_state = FW_INIT_STEP0_BOOT;
-
- rt_firmware *pfirmware = priv->pFirmware;
- const struct firmware *fw_entry;
- const char *fw_name[3] = { "RTL8192E/boot.img",
- "RTL8192E/main.img",
- "RTL8192E/data.img"};
- int rc;
-
- RT_TRACE(COMP_FIRMWARE, " PlatformInitFirmware()==>\n");
-
- if (pfirmware->firmware_status == FW_STATUS_0_INIT) {
- /* it is called by reset */
- rst_opt = OPT_SYSTEM_RESET;
- starting_state = FW_INIT_STEP0_BOOT;
- /* TODO: system reset */
-
- } else if (pfirmware->firmware_status == FW_STATUS_5_READY) {
- /* it is called by Initialize */
- rst_opt = OPT_FIRMWARE_RESET;
- starting_state = FW_INIT_STEP2_DATA;
- } else {
- RT_TRACE(COMP_FIRMWARE,
- "PlatformInitFirmware: undefined firmware state\n");
- }
-
- /*
- * Download boot, main, and data image for System reset.
- * Download data image for firmware reseta
- */
- for (init_step = starting_state; init_step <= FW_INIT_STEP2_DATA;
- init_step++) {
- /*
- * Open Image file, and map file to contineous memory if open file success.
- * or read image file from array. Default load from IMG file
- */
- if (rst_opt == OPT_SYSTEM_RESET) {
- if (pfirmware->firmware_buf_size[init_step] == 0) {
- rc = request_firmware(&fw_entry,
- fw_name[init_step], &priv->pdev->dev);
-
- if (rc < 0) {
- RT_TRACE(COMP_FIRMWARE, "request firmware fail!\n");
- goto download_firmware_fail;
- }
-
- if (fw_entry->size > sizeof(pfirmware->firmware_buf[init_step])) {
- RT_TRACE(COMP_FIRMWARE,
- "img file size exceed the container buffer fail!\n");
- goto download_firmware_fail;
- }
-
- if (init_step != FW_INIT_STEP1_MAIN) {
- memcpy(pfirmware->firmware_buf[init_step],
- fw_entry->data, fw_entry->size);
- pfirmware->firmware_buf_size[init_step] = fw_entry->size;
-
- } else {
- memset(pfirmware->firmware_buf[init_step], 0, 128);
- memcpy(&pfirmware->firmware_buf[init_step][128], fw_entry->data,
- fw_entry->size);
- pfirmware->firmware_buf_size[init_step] = fw_entry->size+128;
- }
-
- if (rst_opt == OPT_SYSTEM_RESET)
- release_firmware(fw_entry);
- }
- mapped_file = pfirmware->firmware_buf[init_step];
- file_length = pfirmware->firmware_buf_size[init_step];
-
- } else if (rst_opt == OPT_FIRMWARE_RESET) {
- /* we only need to download data.img here */
- mapped_file = pfirmware->firmware_buf[init_step];
- file_length = pfirmware->firmware_buf_size[init_step];
- }
-
- /* Download image file */
- /* The firmware download process is just as following,
- * 1. that is each packet will be segmented and inserted to the
- * wait queue.
- * 2. each packet segment will be put in the skb_buff packet.
- * 3. each skb_buff packet data content will already include
- * the firmware info and Tx descriptor info
- */
- rt_status = fw_download_code(priv, mapped_file, file_length);
- if (rt_status != TRUE)
- goto download_firmware_fail;
-
- switch (init_step) {
- case FW_INIT_STEP0_BOOT:
- /* Download boot
- * initialize command descriptor.
- * will set polling bit when firmware code is also
- * configured
- */
- pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE;
- /* mdelay(1000); */
- /*
- * To initialize IMEM, CPU move code from 0x80000080,
- * hence, we send 0x80 byte packet
- */
- break;
-
- case FW_INIT_STEP1_MAIN:
- /* Download firmware code.
- * Wait until Boot Ready and Turn on CPU */
- pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE;
-
- /* Check Put Code OK and Turn On CPU */
- rt_status = CPUcheck_maincodeok_turnonCPU(priv);
- if (rt_status != TRUE) {
- RT_TRACE(COMP_FIRMWARE,
- "CPUcheck_maincodeok_turnonCPU fail!\n");
- goto download_firmware_fail;
- }
-
- pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU;
- break;
-
- case FW_INIT_STEP2_DATA:
- /* download initial data code */
- pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE;
- mdelay(1);
-
- rt_status = CPUcheck_firmware_ready(priv);
- if (rt_status != TRUE) {
- RT_TRACE(COMP_FIRMWARE,
- "CPUcheck_firmware_ready fail(%d)!\n",
- rt_status);
- goto download_firmware_fail;
- }
-
- /* wait until data code is initialized ready.*/
- pfirmware->firmware_status = FW_STATUS_5_READY;
- break;
- }
- }
-
- RT_TRACE(COMP_FIRMWARE, "Firmware Download Success\n");
- return rt_status;
-
-download_firmware_fail:
- RT_TRACE(COMP_ERR, "ERR in %s() step %d\n", __func__, init_step);
- rt_status = false;
- return rt_status;
-}
-
-MODULE_FIRMWARE("RTL8192E/boot.img");
-MODULE_FIRMWARE("RTL8192E/main.img");
-MODULE_FIRMWARE("RTL8192E/data.img");
diff --git a/drivers/staging/rtl8192e/r819xE_phy.c b/drivers/staging/rtl8192e/r819xE_phy.c
deleted file mode 100644
index 9e7828ef1cb..00000000000
--- a/drivers/staging/rtl8192e/r819xE_phy.c
+++ /dev/null
@@ -1,2225 +0,0 @@
-#include "r8192E.h"
-#include "r8192E_hw.h"
-#include "r819xE_phyreg.h"
-#include "r8190_rtl8256.h"
-#include "r819xE_phy.h"
-#include "r8192E_dm.h"
-#ifdef ENABLE_DOT11D
-#include "ieee80211/dot11d.h"
-#endif
-static const u32 RF_CHANNEL_TABLE_ZEBRA[] = {
- 0,
- 0x085c, //2412 1
- 0x08dc, //2417 2
- 0x095c, //2422 3
- 0x09dc, //2427 4
- 0x0a5c, //2432 5
- 0x0adc, //2437 6
- 0x0b5c, //2442 7
- 0x0bdc, //2447 8
- 0x0c5c, //2452 9
- 0x0cdc, //2457 10
- 0x0d5c, //2462 11
- 0x0ddc, //2467 12
- 0x0e5c, //2472 13
- 0x0f72, //2484
-};
-
-static u32 Rtl8192PciEMACPHY_Array[] = {
-0x03c,0xffff0000,0x00000f0f,
-0x340,0xffffffff,0x161a1a1a,
-0x344,0xffffffff,0x12121416,
-0x348,0x0000ffff,0x00001818,
-0x12c,0xffffffff,0x04000802,
-0x318,0x00000fff,0x00000100,
-};
-static u32 Rtl8192PciEMACPHY_Array_PG[] = {
-0x03c,0xffff0000,0x00000f0f,
-0xe00,0xffffffff,0x06090909,
-0xe04,0xffffffff,0x00030306,
-0xe08,0x0000ff00,0x00000000,
-0xe10,0xffffffff,0x0a0c0d0f,
-0xe14,0xffffffff,0x06070809,
-0xe18,0xffffffff,0x0a0c0d0f,
-0xe1c,0xffffffff,0x06070809,
-0x12c,0xffffffff,0x04000802,
-0x318,0x00000fff,0x00000800,
-};
-static u32 Rtl8192PciEAGCTAB_Array[AGCTAB_ArrayLength] = {
-0xc78,0x7d000001,
-0xc78,0x7d010001,
-0xc78,0x7d020001,
-0xc78,0x7d030001,
-0xc78,0x7d040001,
-0xc78,0x7d050001,
-0xc78,0x7c060001,
-0xc78,0x7b070001,
-0xc78,0x7a080001,
-0xc78,0x79090001,
-0xc78,0x780a0001,
-0xc78,0x770b0001,
-0xc78,0x760c0001,
-0xc78,0x750d0001,
-0xc78,0x740e0001,
-0xc78,0x730f0001,
-0xc78,0x72100001,
-0xc78,0x71110001,
-0xc78,0x70120001,
-0xc78,0x6f130001,
-0xc78,0x6e140001,
-0xc78,0x6d150001,
-0xc78,0x6c160001,
-0xc78,0x6b170001,
-0xc78,0x6a180001,
-0xc78,0x69190001,
-0xc78,0x681a0001,
-0xc78,0x671b0001,
-0xc78,0x661c0001,
-0xc78,0x651d0001,
-0xc78,0x641e0001,
-0xc78,0x491f0001,
-0xc78,0x48200001,
-0xc78,0x47210001,
-0xc78,0x46220001,
-0xc78,0x45230001,
-0xc78,0x44240001,
-0xc78,0x43250001,
-0xc78,0x28260001,
-0xc78,0x27270001,
-0xc78,0x26280001,
-0xc78,0x25290001,
-0xc78,0x242a0001,
-0xc78,0x232b0001,
-0xc78,0x222c0001,
-0xc78,0x212d0001,
-0xc78,0x202e0001,
-0xc78,0x0a2f0001,
-0xc78,0x08300001,
-0xc78,0x06310001,
-0xc78,0x05320001,
-0xc78,0x04330001,
-0xc78,0x03340001,
-0xc78,0x02350001,
-0xc78,0x01360001,
-0xc78,0x00370001,
-0xc78,0x00380001,
-0xc78,0x00390001,
-0xc78,0x003a0001,
-0xc78,0x003b0001,
-0xc78,0x003c0001,
-0xc78,0x003d0001,
-0xc78,0x003e0001,
-0xc78,0x003f0001,
-0xc78,0x7d400001,
-0xc78,0x7d410001,
-0xc78,0x7d420001,
-0xc78,0x7d430001,
-0xc78,0x7d440001,
-0xc78,0x7d450001,
-0xc78,0x7c460001,
-0xc78,0x7b470001,
-0xc78,0x7a480001,
-0xc78,0x79490001,
-0xc78,0x784a0001,
-0xc78,0x774b0001,
-0xc78,0x764c0001,
-0xc78,0x754d0001,
-0xc78,0x744e0001,
-0xc78,0x734f0001,
-0xc78,0x72500001,
-0xc78,0x71510001,
-0xc78,0x70520001,
-0xc78,0x6f530001,
-0xc78,0x6e540001,
-0xc78,0x6d550001,
-0xc78,0x6c560001,
-0xc78,0x6b570001,
-0xc78,0x6a580001,
-0xc78,0x69590001,
-0xc78,0x685a0001,
-0xc78,0x675b0001,
-0xc78,0x665c0001,
-0xc78,0x655d0001,
-0xc78,0x645e0001,
-0xc78,0x495f0001,
-0xc78,0x48600001,
-0xc78,0x47610001,
-0xc78,0x46620001,
-0xc78,0x45630001,
-0xc78,0x44640001,
-0xc78,0x43650001,
-0xc78,0x28660001,
-0xc78,0x27670001,
-0xc78,0x26680001,
-0xc78,0x25690001,
-0xc78,0x246a0001,
-0xc78,0x236b0001,
-0xc78,0x226c0001,
-0xc78,0x216d0001,
-0xc78,0x206e0001,
-0xc78,0x0a6f0001,
-0xc78,0x08700001,
-0xc78,0x06710001,
-0xc78,0x05720001,
-0xc78,0x04730001,
-0xc78,0x03740001,
-0xc78,0x02750001,
-0xc78,0x01760001,
-0xc78,0x00770001,
-0xc78,0x00780001,
-0xc78,0x00790001,
-0xc78,0x007a0001,
-0xc78,0x007b0001,
-0xc78,0x007c0001,
-0xc78,0x007d0001,
-0xc78,0x007e0001,
-0xc78,0x007f0001,
-0xc78,0x2e00001e,
-0xc78,0x2e01001e,
-0xc78,0x2e02001e,
-0xc78,0x2e03001e,
-0xc78,0x2e04001e,
-0xc78,0x2e05001e,
-0xc78,0x3006001e,
-0xc78,0x3407001e,
-0xc78,0x3908001e,
-0xc78,0x3c09001e,
-0xc78,0x3f0a001e,
-0xc78,0x420b001e,
-0xc78,0x440c001e,
-0xc78,0x450d001e,
-0xc78,0x460e001e,
-0xc78,0x460f001e,
-0xc78,0x4710001e,
-0xc78,0x4811001e,
-0xc78,0x4912001e,
-0xc78,0x4a13001e,
-0xc78,0x4b14001e,
-0xc78,0x4b15001e,
-0xc78,0x4c16001e,
-0xc78,0x4d17001e,
-0xc78,0x4e18001e,
-0xc78,0x4f19001e,
-0xc78,0x4f1a001e,
-0xc78,0x501b001e,
-0xc78,0x511c001e,
-0xc78,0x521d001e,
-0xc78,0x521e001e,
-0xc78,0x531f001e,
-0xc78,0x5320001e,
-0xc78,0x5421001e,
-0xc78,0x5522001e,
-0xc78,0x5523001e,
-0xc78,0x5624001e,
-0xc78,0x5725001e,
-0xc78,0x5726001e,
-0xc78,0x5827001e,
-0xc78,0x5828001e,
-0xc78,0x5929001e,
-0xc78,0x592a001e,
-0xc78,0x5a2b001e,
-0xc78,0x5b2c001e,
-0xc78,0x5c2d001e,
-0xc78,0x5c2e001e,
-0xc78,0x5d2f001e,
-0xc78,0x5e30001e,
-0xc78,0x5f31001e,
-0xc78,0x6032001e,
-0xc78,0x6033001e,
-0xc78,0x6134001e,
-0xc78,0x6235001e,
-0xc78,0x6336001e,
-0xc78,0x6437001e,
-0xc78,0x6438001e,
-0xc78,0x6539001e,
-0xc78,0x663a001e,
-0xc78,0x673b001e,
-0xc78,0x673c001e,
-0xc78,0x683d001e,
-0xc78,0x693e001e,
-0xc78,0x6a3f001e,
-};
-static u32 Rtl8192PciEPHY_REGArray[PHY_REGArrayLength] = {
-0x0, };
-static u32 Rtl8192PciEPHY_REG_1T2RArray[PHY_REG_1T2RArrayLength] = {
-0x800,0x00000000,
-0x804,0x00000001,
-0x808,0x0000fc00,
-0x80c,0x0000001c,
-0x810,0x801010aa,
-0x814,0x008514d0,
-0x818,0x00000040,
-0x81c,0x00000000,
-0x820,0x00000004,
-0x824,0x00690000,
-0x828,0x00000004,
-0x82c,0x00e90000,
-0x830,0x00000004,
-0x834,0x00690000,
-0x838,0x00000004,
-0x83c,0x00e90000,
-0x840,0x00000000,
-0x844,0x00000000,
-0x848,0x00000000,
-0x84c,0x00000000,
-0x850,0x00000000,
-0x854,0x00000000,
-0x858,0x65a965a9,
-0x85c,0x65a965a9,
-0x860,0x001f0010,
-0x864,0x007f0010,
-0x868,0x001f0010,
-0x86c,0x007f0010,
-0x870,0x0f100f70,
-0x874,0x0f100f70,
-0x878,0x00000000,
-0x87c,0x00000000,
-0x880,0x6870e36c,
-0x884,0xe3573600,
-0x888,0x4260c340,
-0x88c,0x0000ff00,
-0x890,0x00000000,
-0x894,0xfffffffe,
-0x898,0x4c42382f,
-0x89c,0x00656056,
-0x8b0,0x00000000,
-0x8e0,0x00000000,
-0x8e4,0x00000000,
-0x900,0x00000000,
-0x904,0x00000023,
-0x908,0x00000000,
-0x90c,0x31121311,
-0xa00,0x00d0c7d8,
-0xa04,0x811f0008,
-0xa08,0x80cd8300,
-0xa0c,0x2e62740f,
-0xa10,0x95009b78,
-0xa14,0x11145008,
-0xa18,0x00881117,
-0xa1c,0x89140fa0,
-0xa20,0x1a1b0000,
-0xa24,0x090e1317,
-0xa28,0x00000204,
-0xa2c,0x00000000,
-0xc00,0x00000040,
-0xc04,0x00005433,
-0xc08,0x000000e4,
-0xc0c,0x6c6c6c6c,
-0xc10,0x08800000,
-0xc14,0x40000100,
-0xc18,0x08000000,
-0xc1c,0x40000100,
-0xc20,0x08000000,
-0xc24,0x40000100,
-0xc28,0x08000000,
-0xc2c,0x40000100,
-0xc30,0x6de9ac44,
-0xc34,0x465c52cd,
-0xc38,0x497f5994,
-0xc3c,0x0a969764,
-0xc40,0x1f7c403f,
-0xc44,0x000100b7,
-0xc48,0xec020000,
-0xc4c,0x00000300,
-0xc50,0x69543420,
-0xc54,0x433c0094,
-0xc58,0x69543420,
-0xc5c,0x433c0094,
-0xc60,0x69543420,
-0xc64,0x433c0094,
-0xc68,0x69543420,
-0xc6c,0x433c0094,
-0xc70,0x2c7f000d,
-0xc74,0x0186175b,
-0xc78,0x0000001f,
-0xc7c,0x00b91612,
-0xc80,0x40000100,
-0xc84,0x20000000,
-0xc88,0x40000100,
-0xc8c,0x20200000,
-0xc90,0x40000100,
-0xc94,0x00000000,
-0xc98,0x40000100,
-0xc9c,0x00000000,
-0xca0,0x00492492,
-0xca4,0x00000000,
-0xca8,0x00000000,
-0xcac,0x00000000,
-0xcb0,0x00000000,
-0xcb4,0x00000000,
-0xcb8,0x00000000,
-0xcbc,0x00492492,
-0xcc0,0x00000000,
-0xcc4,0x00000000,
-0xcc8,0x00000000,
-0xccc,0x00000000,
-0xcd0,0x00000000,
-0xcd4,0x00000000,
-0xcd8,0x64b22427,
-0xcdc,0x00766932,
-0xce0,0x00222222,
-0xd00,0x00000750,
-0xd04,0x00000403,
-0xd08,0x0000907f,
-0xd0c,0x00000001,
-0xd10,0xa0633333,
-0xd14,0x33333c63,
-0xd18,0x6a8f5b6b,
-0xd1c,0x00000000,
-0xd20,0x00000000,
-0xd24,0x00000000,
-0xd28,0x00000000,
-0xd2c,0xcc979975,
-0xd30,0x00000000,
-0xd34,0x00000000,
-0xd38,0x00000000,
-0xd3c,0x00027293,
-0xd40,0x00000000,
-0xd44,0x00000000,
-0xd48,0x00000000,
-0xd4c,0x00000000,
-0xd50,0x6437140a,
-0xd54,0x024dbd02,
-0xd58,0x00000000,
-0xd5c,0x04032064,
-0xe00,0x161a1a1a,
-0xe04,0x12121416,
-0xe08,0x00001800,
-0xe0c,0x00000000,
-0xe10,0x161a1a1a,
-0xe14,0x12121416,
-0xe18,0x161a1a1a,
-0xe1c,0x12121416,
-};
-static u32 Rtl8192PciERadioA_Array[RadioA_ArrayLength] = {
-0x019,0x00000003,
-0x000,0x000000bf,
-0x001,0x00000ee0,
-0x002,0x0000004c,
-0x003,0x000007f1,
-0x004,0x00000975,
-0x005,0x00000c58,
-0x006,0x00000ae6,
-0x007,0x000000ca,
-0x008,0x00000e1c,
-0x009,0x000007f0,
-0x00a,0x000009d0,
-0x00b,0x000001ba,
-0x00c,0x00000240,
-0x00e,0x00000020,
-0x00f,0x00000990,
-0x012,0x00000806,
-0x014,0x000005ab,
-0x015,0x00000f80,
-0x016,0x00000020,
-0x017,0x00000597,
-0x018,0x0000050a,
-0x01a,0x00000f80,
-0x01b,0x00000f5e,
-0x01c,0x00000008,
-0x01d,0x00000607,
-0x01e,0x000006cc,
-0x01f,0x00000000,
-0x020,0x000001a5,
-0x01f,0x00000001,
-0x020,0x00000165,
-0x01f,0x00000002,
-0x020,0x000000c6,
-0x01f,0x00000003,
-0x020,0x00000086,
-0x01f,0x00000004,
-0x020,0x00000046,
-0x01f,0x00000005,
-0x020,0x000001e6,
-0x01f,0x00000006,
-0x020,0x000001a6,
-0x01f,0x00000007,
-0x020,0x00000166,
-0x01f,0x00000008,
-0x020,0x000000c7,
-0x01f,0x00000009,
-0x020,0x00000087,
-0x01f,0x0000000a,
-0x020,0x000000f7,
-0x01f,0x0000000b,
-0x020,0x000000d7,
-0x01f,0x0000000c,
-0x020,0x000000b7,
-0x01f,0x0000000d,
-0x020,0x00000097,
-0x01f,0x0000000e,
-0x020,0x00000077,
-0x01f,0x0000000f,
-0x020,0x00000057,
-0x01f,0x00000010,
-0x020,0x00000037,
-0x01f,0x00000011,
-0x020,0x000000fb,
-0x01f,0x00000012,
-0x020,0x000000db,
-0x01f,0x00000013,
-0x020,0x000000bb,
-0x01f,0x00000014,
-0x020,0x000000ff,
-0x01f,0x00000015,
-0x020,0x000000e3,
-0x01f,0x00000016,
-0x020,0x000000c3,
-0x01f,0x00000017,
-0x020,0x000000a3,
-0x01f,0x00000018,
-0x020,0x00000083,
-0x01f,0x00000019,
-0x020,0x00000063,
-0x01f,0x0000001a,
-0x020,0x00000043,
-0x01f,0x0000001b,
-0x020,0x00000023,
-0x01f,0x0000001c,
-0x020,0x00000003,
-0x01f,0x0000001d,
-0x020,0x000001e3,
-0x01f,0x0000001e,
-0x020,0x000001c3,
-0x01f,0x0000001f,
-0x020,0x000001a3,
-0x01f,0x00000020,
-0x020,0x00000183,
-0x01f,0x00000021,
-0x020,0x00000163,
-0x01f,0x00000022,
-0x020,0x00000143,
-0x01f,0x00000023,
-0x020,0x00000123,
-0x01f,0x00000024,
-0x020,0x00000103,
-0x023,0x00000203,
-0x024,0x00000100,
-0x00b,0x000001ba,
-0x02c,0x000003d7,
-0x02d,0x00000ff0,
-0x000,0x00000037,
-0x004,0x00000160,
-0x007,0x00000080,
-0x002,0x0000088d,
-0x0fe,0x00000000,
-0x0fe,0x00000000,
-0x016,0x00000200,
-0x016,0x00000380,
-0x016,0x00000020,
-0x016,0x000001a0,
-0x000,0x000000bf,
-0x00d,0x0000001f,
-0x00d,0x00000c9f,
-0x002,0x0000004d,
-0x000,0x00000cbf,
-0x004,0x00000975,
-0x007,0x00000700,
-};
-static u32 Rtl8192PciERadioB_Array[RadioB_ArrayLength] = {
-0x019,0x00000003,
-0x000,0x000000bf,
-0x001,0x000006e0,
-0x002,0x0000004c,
-0x003,0x000007f1,
-0x004,0x00000975,
-0x005,0x00000c58,
-0x006,0x00000ae6,
-0x007,0x000000ca,
-0x008,0x00000e1c,
-0x000,0x000000b7,
-0x00a,0x00000850,
-0x000,0x000000bf,
-0x00b,0x000001ba,
-0x00c,0x00000240,
-0x00e,0x00000020,
-0x015,0x00000f80,
-0x016,0x00000020,
-0x017,0x00000597,
-0x018,0x0000050a,
-0x01a,0x00000e00,
-0x01b,0x00000f5e,
-0x01d,0x00000607,
-0x01e,0x000006cc,
-0x00b,0x000001ba,
-0x023,0x00000203,
-0x024,0x00000100,
-0x000,0x00000037,
-0x004,0x00000160,
-0x016,0x00000200,
-0x016,0x00000380,
-0x016,0x00000020,
-0x016,0x000001a0,
-0x00d,0x00000ccc,
-0x000,0x000000bf,
-0x002,0x0000004d,
-0x000,0x00000cbf,
-0x004,0x00000975,
-0x007,0x00000700,
-};
-static u32 Rtl8192PciERadioC_Array[RadioC_ArrayLength] = {
-0x0, };
-static u32 Rtl8192PciERadioD_Array[RadioD_ArrayLength] = {
-0x0, };
-
-/*************************Define local function prototype**********************/
-
-static u32 phy_FwRFSerialRead(struct r8192_priv *priv, RF90_RADIO_PATH_E eRFPath, u32 Offset);
-static void phy_FwRFSerialWrite(struct r8192_priv *priv, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data);
-
-/*************************Define local function prototype**********************/
-/******************************************************************************
- *function: This function read BB parameters from Header file we gen,
- * and do register read/write
- * input: u32 dwBitMask //taget bit pos in the addr to be modified
- * output: none
- * return: u32 return the shift bit bit position of the mask
- * ****************************************************************************/
-static u32 rtl8192_CalculateBitShift(u32 dwBitMask)
-{
- u32 i;
- for (i=0; i<=31; i++)
- {
- if (((dwBitMask>>i)&0x1) == 1)
- break;
- }
- return i;
-}
-/******************************************************************************
- *function: This function check different RF type to execute legal judgement. If RF Path is illegal, we will return false.
- * input: none
- * output: none
- * return: 0(illegal, false), 1(legal,true)
- * ***************************************************************************/
-u8 rtl8192_phy_CheckIsLegalRFPath(struct r8192_priv *priv, u32 eRFPath)
-{
- u8 ret = 1;
-
- if (priv->rf_type == RF_2T4R)
- ret = 0;
- else if (priv->rf_type == RF_1T2R)
- {
- if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B)
- ret = 1;
- else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D)
- ret = 0;
- }
-
- return ret;
-}
-/******************************************************************************
- *function: This function set specific bits to BB register
- * input: net_device dev
- * u32 dwRegAddr //target addr to be modified
- * u32 dwBitMask //taget bit pos in the addr to be modified
- * u32 dwData //value to be write
- * output: none
- * return: none
- * notice:
- * ****************************************************************************/
-void rtl8192_setBBreg(struct r8192_priv *priv, u32 dwRegAddr, u32 dwBitMask, u32 dwData)
-{
- u32 OriginalValue, BitShift, NewValue;
-
- if(dwBitMask!= bMaskDWord)
- {//if not "double word" write
- OriginalValue = read_nic_dword(priv, dwRegAddr);
- BitShift = rtl8192_CalculateBitShift(dwBitMask);
- NewValue = (((OriginalValue) & (~dwBitMask)) | (dwData << BitShift));
- write_nic_dword(priv, dwRegAddr, NewValue);
- }else
- write_nic_dword(priv, dwRegAddr, dwData);
-}
-/******************************************************************************
- *function: This function reads specific bits from BB register
- * input: net_device dev
- * u32 dwRegAddr //target addr to be readback
- * u32 dwBitMask //taget bit pos in the addr to be readback
- * output: none
- * return: u32 Data //the readback register value
- * notice:
- * ****************************************************************************/
-u32 rtl8192_QueryBBReg(struct r8192_priv *priv, u32 dwRegAddr, u32 dwBitMask)
-{
- u32 OriginalValue, BitShift;
-
- OriginalValue = read_nic_dword(priv, dwRegAddr);
- BitShift = rtl8192_CalculateBitShift(dwBitMask);
- return (OriginalValue & dwBitMask) >> BitShift;
-}
-/******************************************************************************
- *function: This function read register from RF chip
- * input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
- * u32 Offset //target address to be read
- * output: none
- * return: u32 readback value
- * notice: There are three types of serial operations:(1) Software serial write.(2)Hardware LSSI-Low Speed Serial Interface.(3)Hardware HSSI-High speed serial write. Driver here need to implement (1) and (2)---need more spec for this information.
- * ****************************************************************************/
-static u32 rtl8192_phy_RFSerialRead(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath, u32 Offset)
-{
- u32 ret = 0;
- u32 NewOffset = 0;
- BB_REGISTER_DEFINITION_T* pPhyReg = &priv->PHYRegDef[eRFPath];
- //rtl8192_setBBreg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData, 0);
- //make sure RF register offset is correct
- Offset &= 0x3f;
-
- //switch page for 8256 RF IC
- //analog to digital off, for protection
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8]
- if (Offset >= 31)
- {
- priv->RfReg0Value[eRFPath] |= 0x140;
- //Switch to Reg_Mode2 for Reg 31-45
- rtl8192_setBBreg(priv, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) );
- //modify offset
- NewOffset = Offset -30;
- }
- else if (Offset >= 16)
- {
- priv->RfReg0Value[eRFPath] |= 0x100;
- priv->RfReg0Value[eRFPath] &= (~0x40);
- //Switch to Reg_Mode 1 for Reg16-30
- rtl8192_setBBreg(priv, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) );
-
- NewOffset = Offset - 15;
- }
- else
- NewOffset = Offset;
-
- //put desired read addr to LSSI control Register
- rtl8192_setBBreg(priv, pPhyReg->rfHSSIPara2, bLSSIReadAddress, NewOffset);
- //Issue a posedge trigger
- //
- rtl8192_setBBreg(priv, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
- rtl8192_setBBreg(priv, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
-
-
- // TODO: we should not delay such a long time. Ask help from SD3
- msleep(1);
-
- ret = rtl8192_QueryBBReg(priv, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
-
-
- // Switch back to Reg_Mode0;
- priv->RfReg0Value[eRFPath] &= 0xebf;
-
- rtl8192_setBBreg(
- priv,
- pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
-
- //analog to digital on
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8]
-
- return ret;
-}
-
-/******************************************************************************
- *function: This function write data to RF register
- * input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
- * u32 Offset //target address to be written
- * u32 Data //The new register data to be written
- * output: none
- * return: none
- * notice: For RF8256 only.
- ===========================================================
- *Reg Mode RegCTL[1] RegCTL[0] Note
- * (Reg00[12]) (Reg00[10])
- *===========================================================
- *Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
- *------------------------------------------------------------------
- *Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
- *------------------------------------------------------------------
- * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
- *------------------------------------------------------------------
- * ****************************************************************************/
-static void rtl8192_phy_RFSerialWrite(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath, u32 Offset,
- u32 Data)
-{
- u32 DataAndAddr = 0, NewOffset = 0;
- BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
-
- Offset &= 0x3f;
-
- //analog to digital off, for protection
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8]
-
- if (Offset >= 31)
- {
- priv->RfReg0Value[eRFPath] |= 0x140;
- rtl8192_setBBreg(priv, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16));
- NewOffset = Offset - 30;
- }
- else if (Offset >= 16)
- {
- priv->RfReg0Value[eRFPath] |= 0x100;
- priv->RfReg0Value[eRFPath] &= (~0x40);
- rtl8192_setBBreg(priv, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16));
- NewOffset = Offset - 15;
- }
- else
- NewOffset = Offset;
-
- // Put write addr in [5:0] and write data in [31:16]
- DataAndAddr = (Data<<16) | (NewOffset&0x3f);
-
- // Write Operation
- rtl8192_setBBreg(priv, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
-
-
- if(Offset==0x0)
- priv->RfReg0Value[eRFPath] = Data;
-
- // Switch back to Reg_Mode0;
- if(Offset != 0)
- {
- priv->RfReg0Value[eRFPath] &= 0xebf;
- rtl8192_setBBreg(
- priv,
- pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
- }
- //analog to digital on
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8]
-}
-
-/******************************************************************************
- *function: This function set specific bits to RF register
- * input: RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
- * u32 RegAddr //target addr to be modified
- * u32 BitMask //taget bit pos in the addr to be modified
- * u32 Data //value to be write
- * output: none
- * return: none
- * notice:
- * ****************************************************************************/
-void rtl8192_phy_SetRFReg(struct r8192_priv *priv, RF90_RADIO_PATH_E eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data)
-{
- u32 Original_Value, BitShift, New_Value;
-// u8 time = 0;
-
- if (!rtl8192_phy_CheckIsLegalRFPath(priv, eRFPath))
- return;
- if (priv->eRFPowerState != eRfOn && !priv->being_init_adapter)
- return;
- //down(&priv->rf_sem);
-
- RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n");
- if (priv->Rf_Mode == RF_OP_By_FW)
- {
- if (BitMask != bMask12Bits) // RF data is 12 bits only
- {
- Original_Value = phy_FwRFSerialRead(priv, eRFPath, RegAddr);
- BitShift = rtl8192_CalculateBitShift(BitMask);
- New_Value = (((Original_Value) & (~BitMask)) | (Data<< BitShift));
-
- phy_FwRFSerialWrite(priv, eRFPath, RegAddr, New_Value);
- }else
- phy_FwRFSerialWrite(priv, eRFPath, RegAddr, Data);
- udelay(200);
-
- }
- else
- {
- if (BitMask != bMask12Bits) // RF data is 12 bits only
- {
- Original_Value = rtl8192_phy_RFSerialRead(priv, eRFPath, RegAddr);
- BitShift = rtl8192_CalculateBitShift(BitMask);
- New_Value = (((Original_Value) & (~BitMask)) | (Data<< BitShift));
-
- rtl8192_phy_RFSerialWrite(priv, eRFPath, RegAddr, New_Value);
- }else
- rtl8192_phy_RFSerialWrite(priv, eRFPath, RegAddr, Data);
- }
- //up(&priv->rf_sem);
-}
-
-/******************************************************************************
- *function: This function reads specific bits from RF register
- * input: net_device dev
- * u32 RegAddr //target addr to be readback
- * u32 BitMask //taget bit pos in the addr to be readback
- * output: none
- * return: u32 Data //the readback register value
- * notice:
- * ****************************************************************************/
-u32 rtl8192_phy_QueryRFReg(struct r8192_priv *priv, RF90_RADIO_PATH_E eRFPath,
- u32 RegAddr, u32 BitMask)
-{
- u32 Original_Value, Readback_Value, BitShift;
-
- if (!rtl8192_phy_CheckIsLegalRFPath(priv, eRFPath))
- return 0;
- if (priv->eRFPowerState != eRfOn && !priv->being_init_adapter)
- return 0;
- down(&priv->rf_sem);
- if (priv->Rf_Mode == RF_OP_By_FW)
- {
- Original_Value = phy_FwRFSerialRead(priv, eRFPath, RegAddr);
- udelay(200);
- }
- else
- {
- Original_Value = rtl8192_phy_RFSerialRead(priv, eRFPath, RegAddr);
-
- }
- BitShift = rtl8192_CalculateBitShift(BitMask);
- Readback_Value = (Original_Value & BitMask) >> BitShift;
- up(&priv->rf_sem);
-// udelay(200);
- return Readback_Value;
-}
-
-/******************************************************************************
- *function: We support firmware to execute RF-R/W.
- * input: dev
- * output: none
- * return: none
- * notice:
- * ***************************************************************************/
-static u32 phy_FwRFSerialRead(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath, u32 Offset)
-{
- u32 Data = 0;
- u8 time = 0;
- //DbgPrint("FW RF CTRL\n\r");
- /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can
- not execute the scheme in the initial step. Otherwise, RF-R/W will waste
- much time. This is only for site survey. */
- // 1. Read operation need not insert data. bit 0-11
- //Data &= bMask12Bits;
- // 2. Write RF register address. Bit 12-19
- Data |= ((Offset&0xFF)<<12);
- // 3. Write RF path. bit 20-21
- Data |= ((eRFPath&0x3)<<20);
- // 4. Set RF read indicator. bit 22=0
- //Data |= 0x00000;
- // 5. Trigger Fw to operate the command. bit 31
- Data |= 0x80000000;
- // 6. We can not execute read operation if bit 31 is 1.
- while (read_nic_dword(priv, QPNR)&0x80000000)
- {
- // If FW can not finish RF-R/W for more than ?? times. We must reset FW.
- if (time++ < 100)
- {
- //DbgPrint("FW not finish RF-R Time=%d\n\r", time);
- udelay(10);
- }
- else
- break;
- }
- // 7. Execute read operation.
- write_nic_dword(priv, QPNR, Data);
- // 8. Check if firmawre send back RF content.
- while (read_nic_dword(priv, QPNR)&0x80000000)
- {
- // If FW can not finish RF-R/W for more than ?? times. We must reset FW.
- if (time++ < 100)
- {
- //DbgPrint("FW not finish RF-W Time=%d\n\r", time);
- udelay(10);
- }
- else
- return 0;
- }
- return read_nic_dword(priv, RF_DATA);
-}
-
-/******************************************************************************
- *function: We support firmware to execute RF-R/W.
- * input: dev
- * output: none
- * return: none
- * notice:
- * ***************************************************************************/
-static void phy_FwRFSerialWrite(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data)
-{
- u8 time = 0;
-
- //DbgPrint("N FW RF CTRL RF-%d OF%02x DATA=%03x\n\r", eRFPath, Offset, Data);
- /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can
- not execute the scheme in the initial step. Otherwise, RF-R/W will waste
- much time. This is only for site survey. */
-
- // 1. Set driver write bit and 12 bit data. bit 0-11
- //Data &= bMask12Bits; // Done by uper layer.
- // 2. Write RF register address. bit 12-19
- Data |= ((Offset&0xFF)<<12);
- // 3. Write RF path. bit 20-21
- Data |= ((eRFPath&0x3)<<20);
- // 4. Set RF write indicator. bit 22=1
- Data |= 0x400000;
- // 5. Trigger Fw to operate the command. bit 31=1
- Data |= 0x80000000;
-
- // 6. Write operation. We can not write if bit 31 is 1.
- while (read_nic_dword(priv, QPNR)&0x80000000)
- {
- // If FW can not finish RF-R/W for more than ?? times. We must reset FW.
- if (time++ < 100)
- {
- //DbgPrint("FW not finish RF-W Time=%d\n\r", time);
- udelay(10);
- }
- else
- break;
- }
- // 7. No matter check bit. We always force the write. Because FW will
- // not accept the command.
- write_nic_dword(priv, QPNR, Data);
- /* 2007/11/02 MH Acoording to test, we must delay 20us to wait firmware
- to finish RF write operation. */
- /* 2008/01/17 MH We support delay in firmware side now. */
- //delay_us(20);
-
-}
-
-
-/******************************************************************************
- *function: This function read BB parameters from Header file we gen,
- * and do register read/write
- * input: dev
- * output: none
- * return: none
- * notice: BB parameters may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-void rtl8192_phy_configmac(struct r8192_priv *priv)
-{
- u32 dwArrayLen = 0, i = 0;
- u32* pdwArray = NULL;
-#ifdef TO_DO_LIST
-if(Adapter->bInHctTest)
- {
- RT_TRACE(COMP_PHY, "Rtl819XMACPHY_ArrayDTM\n");
- dwArrayLen = MACPHY_ArrayLengthDTM;
- pdwArray = Rtl819XMACPHY_ArrayDTM;
- }
- else if(priv->bTXPowerDataReadFromEEPORM)
-#endif
- if(priv->bTXPowerDataReadFromEEPORM)
- {
- RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
- dwArrayLen = MACPHY_Array_PGLength;
- pdwArray = Rtl819XMACPHY_Array_PG;
-
- }
- else
- {
- RT_TRACE(COMP_PHY,"Read rtl819XMACPHY_Array\n");
- dwArrayLen = MACPHY_ArrayLength;
- pdwArray = Rtl819XMACPHY_Array;
- }
- for(i = 0; i<dwArrayLen; i=i+3){
- RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MACPHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n",
- pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
- if(pdwArray[i] == 0x318)
- {
- pdwArray[i+2] = 0x00000800;
- //DbgPrint("ptrArray[i], ptrArray[i+1], ptrArray[i+2] = %x, %x, %x\n",
- // ptrArray[i], ptrArray[i+1], ptrArray[i+2]);
- }
- rtl8192_setBBreg(priv, pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
- }
-}
-
-/******************************************************************************
- *function: This function do dirty work
- * input: dev
- * output: none
- * return: none
- * notice: BB parameters may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-
-void rtl8192_phyConfigBB(struct r8192_priv *priv, u8 ConfigType)
-{
- int i;
- //u8 ArrayLength;
- u32* Rtl819XPHY_REGArray_Table = NULL;
- u32* Rtl819XAGCTAB_Array_Table = NULL;
- u16 AGCTAB_ArrayLen, PHY_REGArrayLen = 0;
-#ifdef TO_DO_LIST
- u32 *rtl8192PhyRegArrayTable = NULL, *rtl8192AgcTabArrayTable = NULL;
- if(Adapter->bInHctTest)
- {
- AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM;
- Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM;
-
- if(priv->RF_Type == RF_2T4R)
- {
- PHY_REGArrayLen = PHY_REGArrayLengthDTM;
- Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArrayDTM;
- }
- else if (priv->RF_Type == RF_1T2R)
- {
- PHY_REGArrayLen = PHY_REG_1T2RArrayLengthDTM;
- Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArrayDTM;
- }
- }
- else
-#endif
- {
- AGCTAB_ArrayLen = AGCTAB_ArrayLength;
- Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_Array;
- if(priv->rf_type == RF_2T4R)
- {
- PHY_REGArrayLen = PHY_REGArrayLength;
- Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArray;
- }
- else if (priv->rf_type == RF_1T2R)
- {
- PHY_REGArrayLen = PHY_REG_1T2RArrayLength;
- Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArray;
- }
- }
-
- if (ConfigType == BaseBand_Config_PHY_REG)
- {
- for (i=0; i<PHY_REGArrayLen; i+=2)
- {
- rtl8192_setBBreg(priv, Rtl819XPHY_REGArray_Table[i], bMaskDWord, Rtl819XPHY_REGArray_Table[i+1]);
- RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n",i, Rtl819XPHY_REGArray_Table[i], Rtl819XPHY_REGArray_Table[i+1]);
- }
- }
- else if (ConfigType == BaseBand_Config_AGC_TAB)
- {
- for (i=0; i<AGCTAB_ArrayLen; i+=2)
- {
- rtl8192_setBBreg(priv, Rtl819XAGCTAB_Array_Table[i], bMaskDWord, Rtl819XAGCTAB_Array_Table[i+1]);
- RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x\n",i, Rtl819XAGCTAB_Array_Table[i], Rtl819XAGCTAB_Array_Table[i+1]);
- }
- }
-}
-/******************************************************************************
- *function: This function initialize Register definition offset for Radio Path
- * A/B/C/D
- * input: net_device dev
- * output: none
- * return: none
- * notice: Initialization value here is constant and it should never be changed
- * ***************************************************************************/
-static void rtl8192_InitBBRFRegDef(struct r8192_priv *priv)
-{
-// RF Interface Sowrtware Control
- priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 LSBs if read 32-bit from 0x870
- priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872)
- priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 LSBs if read 32-bit from 0x874
- priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876)
-
- // RF Interface Readback Value
- priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; // 16 LSBs if read 32-bit from 0x8E0
- priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2)
- priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 LSBs if read 32-bit from 0x8E4
- priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6)
-
- // RF Interface Output (and Enable)
- priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x860
- priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x864
- priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x868
- priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x86C
-
- // RF Interface (Output and) Enable
- priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862)
- priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866)
- priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86A (16-bit for 0x86A)
- priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86C (16-bit for 0x86E)
-
- //Addr of LSSI. Wirte RF register by driver
- priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; //LSSI Parameter
- priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter;
-
- // RF parameter
- priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; //BB Band Select
- priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
- priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
-
- // Tx AGC Gain Stage (same for all path. Should we remove this?)
- priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
- priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
- priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
- priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
-
- // Tranceiver A~D HSSI Parameter-1
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; //wire control parameter1
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; //wire control parameter1
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1; //wire control parameter1
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1; //wire control parameter1
-
- // Tranceiver A~D HSSI Parameter-2
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; //wire control parameter2
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; //wire control parameter2
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2; //wire control parameter2
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2; //wire control parameter1
-
- // RF switch Control
- priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; //TR/Ant switch control
- priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
- priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
-
- // AGC control 1
- priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
- priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
- priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
- priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
-
- // AGC control 2
- priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
- priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
- priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
- priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
-
- // RX AFE control 1
- priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
- priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
- priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
- priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
-
- // RX AFE control 1
- priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
- priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
- priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
- priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
-
- // Tx AFE control 1
- priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
- priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
- priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
- priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
-
- // Tx AFE control 2
- priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
- priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
- priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
- priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
-
- // Tranceiver LSSI Readback
- priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
-
-}
-/******************************************************************************
- *function: This function is to write register and then readback to make sure whether BB and RF is OK
- * input: net_device dev
- * HW90_BLOCK_E CheckBlock
- * RF90_RADIO_PATH_E eRFPath //only used when checkblock is HW90_BLOCK_RF
- * output: none
- * return: return whether BB and RF is ok(0:OK; 1:Fail)
- * notice: This function may be removed in the ASIC
- * ***************************************************************************/
-RT_STATUS rtl8192_phy_checkBBAndRF(struct r8192_priv *priv,
- HW90_BLOCK_E CheckBlock,
- RF90_RADIO_PATH_E eRFPath)
-{
-// BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
- RT_STATUS ret = RT_STATUS_SUCCESS;
- u32 i, CheckTimes = 4, dwRegRead = 0;
- u32 WriteAddr[4];
- u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f};
- // Initialize register address offset to be checked
- WriteAddr[HW90_BLOCK_MAC] = 0x100;
- WriteAddr[HW90_BLOCK_PHY0] = 0x900;
- WriteAddr[HW90_BLOCK_PHY1] = 0x800;
- WriteAddr[HW90_BLOCK_RF] = 0x3;
- RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __FUNCTION__, CheckBlock);
- for(i=0 ; i < CheckTimes ; i++)
- {
-
- //
- // Write Data to register and readback
- //
- switch(CheckBlock)
- {
- case HW90_BLOCK_MAC:
- RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write 0x100 here!\n");
- break;
-
- case HW90_BLOCK_PHY0:
- case HW90_BLOCK_PHY1:
- write_nic_dword(priv, WriteAddr[CheckBlock], WriteData[i]);
- dwRegRead = read_nic_dword(priv, WriteAddr[CheckBlock]);
- break;
-
- case HW90_BLOCK_RF:
- WriteData[i] &= 0xfff;
- rtl8192_phy_SetRFReg(priv, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits, WriteData[i]);
- // TODO: we should not delay for such a long time. Ask SD3
- mdelay(10);
- dwRegRead = rtl8192_phy_QueryRFReg(priv, eRFPath, WriteAddr[HW90_BLOCK_RF], bMaskDWord);
- mdelay(10);
- break;
-
- default:
- ret = RT_STATUS_FAILURE;
- break;
- }
-
-
- //
- // Check whether readback data is correct
- //
- if(dwRegRead != WriteData[i])
- {
- RT_TRACE(COMP_ERR, "====>error=====dwRegRead: %x, WriteData: %x\n", dwRegRead, WriteData[i]);
- ret = RT_STATUS_FAILURE;
- break;
- }
- }
-
- return ret;
-}
-
-
-/******************************************************************************
- *function: This function initialize BB&RF
- * input: net_device dev
- * output: none
- * return: none
- * notice: Initialization value may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-static RT_STATUS rtl8192_BB_Config_ParaFile(struct r8192_priv *priv)
-{
- RT_STATUS rtStatus = RT_STATUS_SUCCESS;
-
- u8 bRegValue = 0, eCheckItem = 0;
- u32 dwRegValue = 0;
- /**************************************
- //<1>Initialize BaseBand
- **************************************/
-
- /*--set BB Global Reset--*/
- bRegValue = read_nic_byte(priv, BB_GLOBAL_RESET);
- write_nic_byte(priv, BB_GLOBAL_RESET,(bRegValue|BB_GLOBAL_RESET_BIT));
-
- /*---set BB reset Active---*/
- dwRegValue = read_nic_dword(priv, CPU_GEN);
- write_nic_dword(priv, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
-
- /*----Ckeck FPGAPHY0 and PHY1 board is OK----*/
- // TODO: this function should be removed on ASIC , Emily 2007.2.2
- for(eCheckItem=(HW90_BLOCK_E)HW90_BLOCK_PHY0; eCheckItem<=HW90_BLOCK_PHY1; eCheckItem++)
- {
- rtStatus = rtl8192_phy_checkBBAndRF(priv, (HW90_BLOCK_E)eCheckItem, (RF90_RADIO_PATH_E)0); //don't care RF path
- if(rtStatus != RT_STATUS_SUCCESS)
- {
- RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():Check PHY%d Fail!!\n", eCheckItem-1);
- return rtStatus;
- }
- }
- /*---- Set CCK and OFDM Block "OFF"----*/
- rtl8192_setBBreg(priv, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
- /*----BB Register Initilazation----*/
- //==m==>Set PHY REG From Header<==m==
- rtl8192_phyConfigBB(priv, BaseBand_Config_PHY_REG);
-
- /*----Set BB reset de-Active----*/
- dwRegValue = read_nic_dword(priv, CPU_GEN);
- write_nic_dword(priv, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
-
- /*----BB AGC table Initialization----*/
- //==m==>Set PHY REG From Header<==m==
- rtl8192_phyConfigBB(priv, BaseBand_Config_AGC_TAB);
-
- if (priv->card_8192_version > VERSION_8190_BD)
- {
- if(priv->rf_type == RF_2T4R)
- {
- // Antenna gain offset from B/C/D to A
- dwRegValue = ( priv->AntennaTxPwDiff[2]<<8 |
- priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0]);
- }
- else
- dwRegValue = 0x0; //Antenna gain offset doesn't make sense in RF 1T2R.
- rtl8192_setBBreg(priv, rFPGA0_TxGainStage,
- (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
-
-
- //XSTALLCap
- dwRegValue = priv->CrystalCap;
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, bXtalCap92x, dwRegValue);
- }
-
- // Check if the CCK HighPower is turned ON.
- // This is used to calculate PWDB.
-// priv->bCckHighPower = (u8)(rtl8192_QueryBBReg(dev, rFPGA0_XA_HSSIParameter2, 0x200));
- return rtStatus;
-}
-/******************************************************************************
- *function: This function initialize BB&RF
- * input: net_device dev
- * output: none
- * return: none
- * notice: Initialization value may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-RT_STATUS rtl8192_BBConfig(struct r8192_priv *priv)
-{
- rtl8192_InitBBRFRegDef(priv);
- //config BB&RF. As hardCode based initialization has not been well
- //implemented, so use file first.FIXME:should implement it for hardcode?
- return rtl8192_BB_Config_ParaFile(priv);
-}
-
-/******************************************************************************
- *function: This function obtains the initialization value of Tx power Level offset
- * input: net_device dev
- * output: none
- * return: none
- * ***************************************************************************/
-void rtl8192_phy_getTxPower(struct r8192_priv *priv)
-{
- priv->MCSTxPowerLevelOriginalOffset[0] =
- read_nic_dword(priv, rTxAGC_Rate18_06);
- priv->MCSTxPowerLevelOriginalOffset[1] =
- read_nic_dword(priv, rTxAGC_Rate54_24);
- priv->MCSTxPowerLevelOriginalOffset[2] =
- read_nic_dword(priv, rTxAGC_Mcs03_Mcs00);
- priv->MCSTxPowerLevelOriginalOffset[3] =
- read_nic_dword(priv, rTxAGC_Mcs07_Mcs04);
- priv->MCSTxPowerLevelOriginalOffset[4] =
- read_nic_dword(priv, rTxAGC_Mcs11_Mcs08);
- priv->MCSTxPowerLevelOriginalOffset[5] =
- read_nic_dword(priv, rTxAGC_Mcs15_Mcs12);
-
- // read rx initial gain
- priv->DefaultInitialGain[0] = read_nic_byte(priv, rOFDM0_XAAGCCore1);
- priv->DefaultInitialGain[1] = read_nic_byte(priv, rOFDM0_XBAGCCore1);
- priv->DefaultInitialGain[2] = read_nic_byte(priv, rOFDM0_XCAGCCore1);
- priv->DefaultInitialGain[3] = read_nic_byte(priv, rOFDM0_XDAGCCore1);
- RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
- priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
- priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
-
- // read framesync
- priv->framesync = read_nic_byte(priv, rOFDM0_RxDetector3);
- priv->framesyncC34 = read_nic_dword(priv, rOFDM0_RxDetector2);
- RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
- rOFDM0_RxDetector3, priv->framesync);
- // read SIFS (save the value read fome MACPHY_REG.txt)
- priv->SifsTime = read_nic_word(priv, SIFS);
-}
-
-/******************************************************************************
- *function: This function obtains the initialization value of Tx power Level offset
- * input: net_device dev
- * output: none
- * return: none
- * ***************************************************************************/
-void rtl8192_phy_setTxPower(struct r8192_priv *priv, u8 channel)
-{
- u8 powerlevel = 0,powerlevelOFDM24G = 0;
- char ant_pwr_diff;
- u32 u4RegValue;
-
- if(priv->epromtype == EPROM_93c46)
- {
- powerlevel = priv->TxPowerLevelCCK[channel-1];
- powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
- }
- else if(priv->epromtype == EPROM_93c56)
- {
- if(priv->rf_type == RF_1T2R)
- {
- powerlevel = priv->TxPowerLevelCCK_C[channel-1];
- powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_C[channel-1];
- }
- else if(priv->rf_type == RF_2T4R)
- {
- // Mainly we use RF-A Tx Power to write the Tx Power registers, but the RF-C Tx
- // Power must be calculated by the antenna diff.
- // So we have to rewrite Antenna gain offset register here.
- powerlevel = priv->TxPowerLevelCCK_A[channel-1];
- powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_A[channel-1];
-
- ant_pwr_diff = priv->TxPowerLevelOFDM24G_C[channel-1]
- -priv->TxPowerLevelOFDM24G_A[channel-1];
- ant_pwr_diff &= 0xf;
-
- priv->AntennaTxPwDiff[2] = 0;// RF-D, don't care
- priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff);// RF-C
- priv->AntennaTxPwDiff[0] = 0;// RF-B, don't care
-
- // Antenna gain offset from B/C/D to A
- u4RegValue = ( priv->AntennaTxPwDiff[2]<<8 |
- priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0]);
-
- rtl8192_setBBreg(priv, rFPGA0_TxGainStage,
- (bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue);
- }
- }
-#ifdef TODO
- //
- // CCX 2 S31, AP control of client transmit power:
- // 1. We shall not exceed Cell Power Limit as possible as we can.
- // 2. Tolerance is +/- 5dB.
- // 3. 802.11h Power Contraint takes higher precedence over CCX Cell Power Limit.
- //
- // TODO:
- // 1. 802.11h power contraint
- //
- // 071011, by rcnjko.
- //
- if( pMgntInfo->OpMode == RT_OP_MODE_INFRASTRUCTURE &&
- pMgntInfo->bWithCcxCellPwr &&
- channel == pMgntInfo->dot11CurrentChannelNumber)
- {
- u8 CckCellPwrIdx = DbmToTxPwrIdx(Adapter, WIRELESS_MODE_B, pMgntInfo->CcxCellPwr);
- u8 LegacyOfdmCellPwrIdx = DbmToTxPwrIdx(Adapter, WIRELESS_MODE_G, pMgntInfo->CcxCellPwr);
- u8 OfdmCellPwrIdx = DbmToTxPwrIdx(Adapter, WIRELESS_MODE_N_24G, pMgntInfo->CcxCellPwr);
-
- RT_TRACE(COMP_TXAGC, DBG_LOUD,
- ("CCX Cell Limit: %d dbm => CCK Tx power index : %d, Legacy OFDM Tx power index : %d, OFDM Tx power index: %d\n",
- pMgntInfo->CcxCellPwr, CckCellPwrIdx, LegacyOfdmCellPwrIdx, OfdmCellPwrIdx));
- RT_TRACE(COMP_TXAGC, DBG_LOUD,
- ("EEPROM channel(%d) => CCK Tx power index: %d, Legacy OFDM Tx power index : %d, OFDM Tx power index: %d\n",
- channel, powerlevel, powerlevelOFDM24G + pHalData->LegacyHTTxPowerDiff, powerlevelOFDM24G));
-
- // CCK
- if(powerlevel > CckCellPwrIdx)
- powerlevel = CckCellPwrIdx;
- // Legacy OFDM, HT OFDM
- if(powerlevelOFDM24G + pHalData->LegacyHTTxPowerDiff > OfdmCellPwrIdx)
- {
- if((OfdmCellPwrIdx - pHalData->LegacyHTTxPowerDiff) > 0)
- {
- powerlevelOFDM24G = OfdmCellPwrIdx - pHalData->LegacyHTTxPowerDiff;
- }
- else
- {
- LegacyOfdmCellPwrIdx = 0;
- }
- }
-
- RT_TRACE(COMP_TXAGC, DBG_LOUD,
- ("Altered CCK Tx power index : %d, Legacy OFDM Tx power index: %d, OFDM Tx power index: %d\n",
- powerlevel, powerlevelOFDM24G + pHalData->LegacyHTTxPowerDiff, powerlevelOFDM24G));
- }
-
- pHalData->CurrentCckTxPwrIdx = powerlevel;
- pHalData->CurrentOfdm24GTxPwrIdx = powerlevelOFDM24G;
-#endif
- PHY_SetRF8256CCKTxPower(priv, powerlevel); //need further implement
- PHY_SetRF8256OFDMTxPower(priv, powerlevelOFDM24G);
-}
-
-/******************************************************************************
- *function: This function check Rf chip to do RF config
- * input: net_device dev
- * output: none
- * return: only 8256 is supported
- * ***************************************************************************/
-RT_STATUS rtl8192_phy_RFConfig(struct r8192_priv *priv)
-{
- return PHY_RF8256_Config(priv);
-}
-
-/******************************************************************************
- *function: This function update Initial gain
- * input: net_device dev
- * output: none
- * return: As Windows has not implemented this, wait for complement
- * ***************************************************************************/
-void rtl8192_phy_updateInitGain(struct r8192_priv *priv)
-{
-}
-
-/******************************************************************************
- *function: This function read RF parameters from general head file, and do RF 3-wire
- * input: net_device dev
- * output: none
- * return: return code show if RF configuration is successful(0:pass, 1:fail)
- * Note: Delay may be required for RF configuration
- * ***************************************************************************/
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath)
-{
-
- int i;
- //u32* pRFArray;
- u8 ret = 0;
-
- switch(eRFPath){
- case RF90_PATH_A:
- for(i = 0;i<RadioA_ArrayLength; i=i+2){
-
- if(Rtl819XRadioA_Array[i] == 0xfe){
- msleep(100);
- continue;
- }
- rtl8192_phy_SetRFReg(priv, eRFPath, Rtl819XRadioA_Array[i], bMask12Bits, Rtl819XRadioA_Array[i+1]);
- //msleep(1);
-
- }
- break;
- case RF90_PATH_B:
- for(i = 0;i<RadioB_ArrayLength; i=i+2){
-
- if(Rtl819XRadioB_Array[i] == 0xfe){
- msleep(100);
- continue;
- }
- rtl8192_phy_SetRFReg(priv, eRFPath, Rtl819XRadioB_Array[i], bMask12Bits, Rtl819XRadioB_Array[i+1]);
- //msleep(1);
-
- }
- break;
- case RF90_PATH_C:
- for(i = 0;i<RadioC_ArrayLength; i=i+2){
-
- if(Rtl819XRadioC_Array[i] == 0xfe){
- msleep(100);
- continue;
- }
- rtl8192_phy_SetRFReg(priv, eRFPath, Rtl819XRadioC_Array[i], bMask12Bits, Rtl819XRadioC_Array[i+1]);
- //msleep(1);
-
- }
- break;
- case RF90_PATH_D:
- for(i = 0;i<RadioD_ArrayLength; i=i+2){
-
- if(Rtl819XRadioD_Array[i] == 0xfe){
- msleep(100);
- continue;
- }
- rtl8192_phy_SetRFReg(priv, eRFPath, Rtl819XRadioD_Array[i], bMask12Bits, Rtl819XRadioD_Array[i+1]);
- //msleep(1);
-
- }
- break;
- default:
- break;
- }
-
- return ret;
-
-}
-/******************************************************************************
- *function: This function set Tx Power of the channel
- * input: struct net_device *dev
- * u8 channel
- * output: none
- * return: none
- * Note:
- * ***************************************************************************/
-static void rtl8192_SetTxPowerLevel(struct r8192_priv *priv, u8 channel)
-{
- u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
- u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
-
- PHY_SetRF8256CCKTxPower(priv, powerlevel);
- PHY_SetRF8256OFDMTxPower(priv, powerlevelOFDM24G);
-}
-
-/****************************************************************************************
- *function: This function set command table variable(struct SwChnlCmd).
- * input: SwChnlCmd* CmdTable //table to be set.
- * u32 CmdTableIdx //variable index in table to be set
- * u32 CmdTableSz //table size.
- * SwChnlCmdID CmdID //command ID to set.
- * u32 Para1
- * u32 Para2
- * u32 msDelay
- * output:
- * return: true if finished, false otherwise
- * Note:
- * ************************************************************************************/
-static u8 rtl8192_phy_SetSwChnlCmdArray(
- SwChnlCmd* CmdTable,
- u32 CmdTableIdx,
- u32 CmdTableSz,
- SwChnlCmdID CmdID,
- u32 Para1,
- u32 Para2,
- u32 msDelay
- )
-{
- SwChnlCmd* pCmd;
-
- if(CmdTable == NULL)
- {
- RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot be NULL.\n");
- return false;
- }
- if(CmdTableIdx >= CmdTableSz)
- {
- RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n",
- CmdTableIdx, CmdTableSz);
- return false;
- }
-
- pCmd = CmdTable + CmdTableIdx;
- pCmd->CmdID = CmdID;
- pCmd->Para1 = Para1;
- pCmd->Para2 = Para2;
- pCmd->msDelay = msDelay;
-
- return true;
-}
-/******************************************************************************
- *function: This function set channel step by step
- * input: struct net_device *dev
- * u8 channel
- * u8* stage //3 stages
- * u8* step //
- * u32* delay //whether need to delay
- * output: store new stage, step and delay for next step(combine with function above)
- * return: true if finished, false otherwise
- * Note: Wait for simpler function to replace it //wb
- * ***************************************************************************/
-static u8 rtl8192_phy_SwChnlStepByStep(struct r8192_priv *priv, u8 channel,
- u8* stage, u8* step, u32* delay)
-{
-// PCHANNEL_ACCESS_SETTING pChnlAccessSetting;
- SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
- u32 PreCommonCmdCnt;
- SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT];
- u32 PostCommonCmdCnt;
- SwChnlCmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
- u32 RfDependCmdCnt;
- SwChnlCmd *CurrentCmd = NULL;
- //RF90_RADIO_PATH_E eRFPath;
- u8 eRFPath;
-// u32 RfRetVal;
-// u8 RetryCnt;
-
- RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n", __FUNCTION__, *stage, *step, channel);
-// RT_ASSERT(IsLegalChannel(Adapter, channel), ("illegal channel: %d\n", channel));
-
-#ifdef ENABLE_DOT11D
- if (!IsLegalChannel(priv->ieee80211, channel))
- {
- RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel);
- return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop.
- }
-#endif
-
- //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
- //for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++)
- {
- //if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
- // return false;
- // <1> Fill up pre common command.
- PreCommonCmdCnt = 0;
- rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT,
- CmdID_SetTxPowerLevel, 0, 0, 0);
- rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT,
- CmdID_End, 0, 0, 0);
-
- // <2> Fill up post common command.
- PostCommonCmdCnt = 0;
-
- rtl8192_phy_SetSwChnlCmdArray(PostCommonCmd, PostCommonCmdCnt++, MAX_POSTCMD_CNT,
- CmdID_End, 0, 0, 0);
-
- // <3> Fill up RF dependent command.
- RfDependCmdCnt = 0;
-
- // TEST!! This is not the table for 8256!!
- if (!(channel >= 1 && channel <= 14))
- {
- RT_TRACE(COMP_ERR, "illegal channel for Zebra 8256: %d\n", channel);
- return false;
- }
- rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg, rZebra1_Channel, channel, 10);
- rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
-
- do{
- switch(*stage)
- {
- case 0:
- CurrentCmd=&PreCommonCmd[*step];
- break;
- case 1:
- CurrentCmd=&RfDependCmd[*step];
- break;
- case 2:
- CurrentCmd=&PostCommonCmd[*step];
- break;
- }
-
- if(CurrentCmd->CmdID==CmdID_End)
- {
- if((*stage)==2)
- {
- return true;
- }
- else
- {
- (*stage)++;
- (*step)=0;
- continue;
- }
- }
-
- switch(CurrentCmd->CmdID)
- {
- case CmdID_SetTxPowerLevel:
- if(priv->card_8192_version > (u8)VERSION_8190_BD) //xiong: consider it later!
- rtl8192_SetTxPowerLevel(priv, channel);
- break;
- case CmdID_WritePortUlong:
- write_nic_dword(priv, CurrentCmd->Para1, CurrentCmd->Para2);
- break;
- case CmdID_WritePortUshort:
- write_nic_word(priv, CurrentCmd->Para1, (u16)CurrentCmd->Para2);
- break;
- case CmdID_WritePortUchar:
- write_nic_byte(priv, CurrentCmd->Para1, (u8)CurrentCmd->Para2);
- break;
- case CmdID_RF_WriteReg:
- for(eRFPath = 0; eRFPath <priv->NumTotalRFPath; eRFPath++)
- rtl8192_phy_SetRFReg(priv, (RF90_RADIO_PATH_E)eRFPath, CurrentCmd->Para1, bMask12Bits, CurrentCmd->Para2<<7);
- break;
- default:
- break;
- }
-
- break;
- }while(true);
- }/*for(Number of RF paths)*/
-
- (*delay)=CurrentCmd->msDelay;
- (*step)++;
- return false;
-}
-
-/******************************************************************************
- *function: This function does acturally set channel work
- * input: struct net_device *dev
- * u8 channel
- * output: none
- * return: noin
- * Note: We should not call this function directly
- * ***************************************************************************/
-static void rtl8192_phy_FinishSwChnlNow(struct r8192_priv *priv, u8 channel)
-{
- u32 delay = 0;
-
- while (!rtl8192_phy_SwChnlStepByStep(priv, channel, &priv->SwChnlStage, &priv->SwChnlStep, &delay))
- {
- if(delay>0)
- msleep(delay);//or mdelay? need further consideration
- if(!priv->up)
- break;
- }
-}
-/******************************************************************************
- *function: Callback routine of the work item for switch channel.
- * input:
- *
- * output: none
- * return: noin
- * ***************************************************************************/
-void rtl8192_SwChnl_WorkItem(struct r8192_priv *priv)
-{
- RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n");
-
- RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __FUNCTION__, priv->chan, priv);
-
- rtl8192_phy_FinishSwChnlNow(priv, priv->chan);
-
- RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n");
-}
-
-/******************************************************************************
- *function: This function scheduled actural workitem to set channel
- * input: net_device dev
- * u8 channel //channel to set
- * output: none
- * return: return code show if workitem is scheduled(1:pass, 0:fail)
- * Note: Delay may be required for RF configuration
- * ***************************************************************************/
-u8 rtl8192_phy_SwChnl(struct ieee80211_device *ieee80211, u8 channel)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
-
- RT_TRACE(COMP_PHY, "=====>%s()\n", __FUNCTION__);
- if(!priv->up)
- return false;
- if(priv->SwChnlInProgress)
- return false;
-
-// if(pHalData->SetBWModeInProgress)
-// return;
-
- //--------------------------------------------
- switch(priv->ieee80211->mode)
- {
- case WIRELESS_MODE_A:
- case WIRELESS_MODE_N_5G:
- if (channel<=14){
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14\n");
- return false;
- }
- break;
- case WIRELESS_MODE_B:
- if (channel>14){
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14\n");
- return false;
- }
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- if (channel>14){
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14\n");
- return false;
- }
- break;
- }
- //--------------------------------------------
-
- priv->SwChnlInProgress = true;
- if(channel == 0)
- channel = 1;
-
- priv->chan=channel;
-
- priv->SwChnlStage=0;
- priv->SwChnlStep=0;
- if (priv->up)
- rtl8192_SwChnl_WorkItem(priv);
-
- priv->SwChnlInProgress = false;
- return true;
-}
-
-static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct r8192_priv *priv)
-{
- switch(priv->CurrentChannelBW)
- {
- /* 20 MHz channel*/
- case HT_CHANNEL_WIDTH_20:
- //added by vivi, cck,tx power track, 20080703
- priv->CCKPresentAttentuation =
- priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference;
-
- if(priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1))
- priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1;
- if(priv->CCKPresentAttentuation < 0)
- priv->CCKPresentAttentuation = 0;
-
- RT_TRACE(COMP_POWER_TRACKING, "20M, priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation);
-
- if(priv->ieee80211->current_network.channel== 14 && !priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = TRUE;
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = FALSE;
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- else
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- break;
-
- /* 40 MHz channel*/
- case HT_CHANNEL_WIDTH_20_40:
- //added by vivi, cck,tx power track, 20080703
- priv->CCKPresentAttentuation =
- priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference;
-
- RT_TRACE(COMP_POWER_TRACKING, "40M, priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation);
- if(priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1))
- priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1;
- if(priv->CCKPresentAttentuation < 0)
- priv->CCKPresentAttentuation = 0;
-
- if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = TRUE;
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = FALSE;
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- }
- else
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
- break;
- }
-}
-
-static void CCK_Tx_Power_Track_BW_Switch_ThermalMeter(struct r8192_priv *priv)
-{
- if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
- priv->bcck_in_ch14 = TRUE;
- else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
- priv->bcck_in_ch14 = FALSE;
-
- //write to default index and tx power track will be done in dm.
- switch(priv->CurrentChannelBW)
- {
- /* 20 MHz channel*/
- case HT_CHANNEL_WIDTH_20:
- if(priv->Record_CCK_20Mindex == 0)
- priv->Record_CCK_20Mindex = 6; //set default value.
- priv->CCK_index = priv->Record_CCK_20Mindex;//6;
- RT_TRACE(COMP_POWER_TRACKING, "20MHz, CCK_Tx_Power_Track_BW_Switch_ThermalMeter(),CCK_index = %d\n", priv->CCK_index);
- break;
-
- /* 40 MHz channel*/
- case HT_CHANNEL_WIDTH_20_40:
- priv->CCK_index = priv->Record_CCK_40Mindex;//0;
- RT_TRACE(COMP_POWER_TRACKING, "40MHz, CCK_Tx_Power_Track_BW_Switch_ThermalMeter(), CCK_index = %d\n", priv->CCK_index);
- break;
- }
- dm_cck_txpower_adjust(priv, priv->bcck_in_ch14);
-}
-
-static void CCK_Tx_Power_Track_BW_Switch(struct r8192_priv *priv)
-{
-
- //if(pHalData->bDcut == TRUE)
- if(priv->IC_Cut >= IC_VersionCut_D)
- CCK_Tx_Power_Track_BW_Switch_TSSI(priv);
- else
- CCK_Tx_Power_Track_BW_Switch_ThermalMeter(priv);
-}
-
-
-//
-/******************************************************************************
- *function: Callback routine of the work item for set bandwidth mode.
- * input: struct net_device *dev
- * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
- * output: none
- * return: none
- * Note: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
- * ***************************************************************************/
-void rtl8192_SetBWModeWorkItem(struct r8192_priv *priv)
-{
- u8 regBwOpMode;
-
- RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s bandwidth\n",
- priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz")
-
-
- if(!priv->up)
- {
- priv->SetBWModeInProgress= false;
- return;
- }
- //<1>Set MAC register
- regBwOpMode = read_nic_byte(priv, BW_OPMODE);
-
- switch(priv->CurrentChannelBW)
- {
- case HT_CHANNEL_WIDTH_20:
- regBwOpMode |= BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily because we have not verify whether this register works
- write_nic_byte(priv, BW_OPMODE, regBwOpMode);
- break;
-
- case HT_CHANNEL_WIDTH_20_40:
- regBwOpMode &= ~BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily because we have not verify whether this register works
- write_nic_byte(priv, BW_OPMODE, regBwOpMode);
- break;
-
- default:
- RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",priv->CurrentChannelBW);
- break;
- }
-
- //<2>Set PHY related register
- switch(priv->CurrentChannelBW)
- {
- case HT_CHANNEL_WIDTH_20:
- // Add by Vivi 20071119
- rtl8192_setBBreg(priv, rFPGA0_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(priv, rFPGA1_RFMOD, bRFMOD, 0x0);
-// rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
-
- // Correct the tx power for CCK rate in 20M. Suggest by YN, 20071207
-// write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000);
-// write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317);
-// write_nic_dword(dev, rCCK0_DebugPort, 0x00000204);
- if(!priv->btxpower_tracking)
- {
- write_nic_dword(priv, rCCK0_TxFilter1, 0x1a1b0000);
- write_nic_dword(priv, rCCK0_TxFilter2, 0x090e1317);
- write_nic_dword(priv, rCCK0_DebugPort, 0x00000204);
- }
- else
- CCK_Tx_Power_Track_BW_Switch(priv);
-
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x00100000, 1);
- break;
- case HT_CHANNEL_WIDTH_20_40:
- // Add by Vivi 20071119
- rtl8192_setBBreg(priv, rFPGA0_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(priv, rFPGA1_RFMOD, bRFMOD, 0x1);
- //rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1));
- //rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
- //rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC);
-
- // Correct the tx power for CCK rate in 40M. Suggest by YN, 20071207
- //write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000);
- //write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e);
- //write_nic_dword(dev, rCCK0_DebugPort, 0x00000409);
- if(!priv->btxpower_tracking)
- {
- write_nic_dword(priv, rCCK0_TxFilter1, 0x35360000);
- write_nic_dword(priv, rCCK0_TxFilter2, 0x121c252e);
- write_nic_dword(priv, rCCK0_DebugPort, 0x00000409);
- }
- else
- CCK_Tx_Power_Track_BW_Switch(priv);
-
- // Set Control channel to upper or lower. These settings are required only for 40MHz
- rtl8192_setBBreg(priv, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1));
- rtl8192_setBBreg(priv, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC);
-
-
- rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x00100000, 0);
- break;
- default:
- RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n" ,priv->CurrentChannelBW);
- break;
-
- }
- //Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315
-
- //<3>Set RF related register
- PHY_SetRF8256Bandwidth(priv, priv->CurrentChannelBW);
-
- atomic_dec(&(priv->ieee80211->atm_swbw));
- priv->SetBWModeInProgress= false;
-
- RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()\n");
-}
-
-/******************************************************************************
- *function: This function schedules bandwidth switch work.
- * input: struct net_device *dev
- * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
- * output: none
- * return: none
- * Note: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
- * ***************************************************************************/
-void rtl8192_SetBWMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
-{
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
-
-
- if(priv->SetBWModeInProgress)
- return;
-
- atomic_inc(&(priv->ieee80211->atm_swbw));
- priv->SetBWModeInProgress= true;
-
- priv->CurrentChannelBW = Bandwidth;
-
- if(Offset==HT_EXTCHNL_OFFSET_LOWER)
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
- else if(Offset==HT_EXTCHNL_OFFSET_UPPER)
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER;
- else
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- //queue_work(priv->priv_wq, &(priv->SetBWModeWorkItem));
- // schedule_work(&(priv->SetBWModeWorkItem));
- rtl8192_SetBWModeWorkItem(priv);
-
-}
-
-
-void InitialGain819xPci(struct ieee80211_device *ieee, u8 Operation)
-{
-#define SCAN_RX_INITIAL_GAIN 0x17
-#define POWER_DETECTION_TH 0x08
- struct r8192_priv *priv = ieee80211_priv(ieee->dev);
- u32 BitMask;
- u8 initial_gain;
-
- if(priv->up)
- {
- switch(Operation)
- {
- case IG_Backup:
- RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial gain.\n");
- initial_gain = SCAN_RX_INITIAL_GAIN;//pHalData->DefaultInitialGain[0];//
- BitMask = bMaskByte0;
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x8); // FW DIG OFF
- priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(priv, rOFDM0_XAAGCCore1, BitMask);
- priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(priv, rOFDM0_XBAGCCore1, BitMask);
- priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(priv, rOFDM0_XCAGCCore1, BitMask);
- priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(priv, rOFDM0_XDAGCCore1, BitMask);
- BitMask = bMaskByte2;
- priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(priv, rCCK0_CCA, BitMask);
-
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x \n", initial_gain);
- write_nic_byte(priv, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(priv, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(priv, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(priv, rOFDM0_XDAGCCore1, initial_gain);
- RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x \n", POWER_DETECTION_TH);
- write_nic_byte(priv, 0xa0a, POWER_DETECTION_TH);
- break;
- case IG_Restore:
- RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial gain.\n");
- BitMask = 0x7f; //Bit0~ Bit6
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x8); // FW DIG OFF
-
- rtl8192_setBBreg(priv, rOFDM0_XAAGCCore1, BitMask, (u32)priv->initgain_backup.xaagccore1);
- rtl8192_setBBreg(priv, rOFDM0_XBAGCCore1, BitMask, (u32)priv->initgain_backup.xbagccore1);
- rtl8192_setBBreg(priv, rOFDM0_XCAGCCore1, BitMask, (u32)priv->initgain_backup.xcagccore1);
- rtl8192_setBBreg(priv, rOFDM0_XDAGCCore1, BitMask, (u32)priv->initgain_backup.xdagccore1);
- BitMask = bMaskByte2;
- rtl8192_setBBreg(priv, rCCK0_CCA, BitMask, (u32)priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca);
-
- rtl8192_phy_setTxPower(priv, priv->ieee80211->current_network.channel);
-
-
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(priv, UFWP, bMaskByte1, 0x1); // FW DIG ON
- break;
- default:
- RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
- break;
- }
- }
-}
-
diff --git a/drivers/staging/rtl8192e/r819xE_phy.h b/drivers/staging/rtl8192e/r819xE_phy.h
deleted file mode 100644
index 496e76fbadb..00000000000
--- a/drivers/staging/rtl8192e/r819xE_phy.h
+++ /dev/null
@@ -1,131 +0,0 @@
-#ifndef _R819XU_PHY_H
-#define _R819XU_PHY_H
-
-/* Channel switch: the size of command tables for switch channel */
-#define MAX_PRECMD_CNT 16
-#define MAX_RFDEPENDCMD_CNT 16
-#define MAX_POSTCMD_CNT 16
-
-#define MACPHY_Array_PGLength 30
-#define Rtl819XMACPHY_Array_PG Rtl8192PciEMACPHY_Array_PG
-#define Rtl819XMACPHY_Array Rtl8192PciEMACPHY_Array
-#define RadioC_ArrayLength 1
-#define RadioD_ArrayLength 1
-#define Rtl819XRadioA_Array Rtl8192PciERadioA_Array
-#define Rtl819XRadioB_Array Rtl8192PciERadioB_Array
-#define Rtl819XRadioC_Array Rtl8192PciERadioC_Array
-#define Rtl819XRadioD_Array Rtl8192PciERadioD_Array
-#define Rtl819XAGCTAB_Array Rtl8192PciEAGCTAB_Array
-#define PHY_REGArrayLength 1
-#define Rtl819XPHY_REGArray Rtl8192PciEPHY_REGArray
-#define PHY_REG_1T2RArrayLength 296
-#define Rtl819XPHY_REG_1T2RArray Rtl8192PciEPHY_REG_1T2RArray
-
-#define AGCTAB_ArrayLength 384
-#define MACPHY_ArrayLength 18
-
-#define RadioA_ArrayLength 246
-#define RadioB_ArrayLength 78
-
-
-typedef enum _SwChnlCmdID {
- CmdID_End,
- CmdID_SetTxPowerLevel,
- CmdID_BBRegWrite10,
- CmdID_WritePortUlong,
- CmdID_WritePortUshort,
- CmdID_WritePortUchar,
- CmdID_RF_WriteReg,
-} SwChnlCmdID;
-
-/* switch channel data structure */
-typedef struct _SwChnlCmd {
- SwChnlCmdID CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
-} __attribute__ ((packed)) SwChnlCmd;
-
-extern u32 rtl819XMACPHY_Array_PG[];
-extern u32 rtl819XPHY_REG_1T2RArray[];
-extern u32 rtl819XAGCTAB_Array[];
-extern u32 rtl819XRadioA_Array[];
-extern u32 rtl819XRadioB_Array[];
-extern u32 rtl819XRadioC_Array[];
-extern u32 rtl819XRadioD_Array[];
-
-typedef enum _HW90_BLOCK {
- HW90_BLOCK_MAC = 0,
- HW90_BLOCK_PHY0 = 1,
- HW90_BLOCK_PHY1 = 2,
- HW90_BLOCK_RF = 3,
- /* Don't ever use this. */
- HW90_BLOCK_MAXIMUM = 4,
-} HW90_BLOCK_E, *PHW90_BLOCK_E;
-
-typedef enum _RF90_RADIO_PATH {
- /* Radio paths */
- RF90_PATH_A = 0,
- RF90_PATH_B = 1,
- RF90_PATH_C = 2,
- RF90_PATH_D = 3,
-
- /* Max RF number 92 support */
- RF90_PATH_MAX
-} RF90_RADIO_PATH_E, *PRF90_RADIO_PATH_E;
-
-#define bMaskByte0 0xff
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
-
-u8 rtl8192_phy_CheckIsLegalRFPath(struct r8192_priv *priv, u32 eRFPath);
-
-void rtl8192_setBBreg(struct r8192_priv *priv, u32 dwRegAddr,
- u32 dwBitMask, u32 dwData);
-
-u32 rtl8192_QueryBBReg(struct r8192_priv *priv, u32 dwRegAddr,
- u32 dwBitMask);
-
-void rtl8192_phy_SetRFReg(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 rtl8192_phy_QueryRFReg(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask);
-
-void rtl8192_phy_configmac(struct r8192_priv *priv);
-
-void rtl8192_phyConfigBB(struct r8192_priv *priv, u8 ConfigType);
-
-RT_STATUS rtl8192_phy_checkBBAndRF(struct r8192_priv *priv,
- HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
-
-RT_STATUS rtl8192_BBConfig(struct r8192_priv *priv);
-
-void rtl8192_phy_getTxPower(struct r8192_priv *priv);
-
-void rtl8192_phy_setTxPower(struct r8192_priv *priv, u8 channel);
-
-RT_STATUS rtl8192_phy_RFConfig(struct r8192_priv *priv);
-
-void rtl8192_phy_updateInitGain(struct r8192_priv *priv);
-
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct r8192_priv *priv,
- RF90_RADIO_PATH_E eRFPath);
-
-u8 rtl8192_phy_SwChnl(struct ieee80211_device *ieee80211, u8 channel);
-
-void rtl8192_SetBWMode(struct ieee80211_device *ieee80211,
- HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-
-void rtl8192_SwChnl_WorkItem(struct r8192_priv *priv);
-
-void rtl8192_SetBWModeWorkItem(struct r8192_priv *priv);
-
-void InitialGain819xPci(struct ieee80211_device *ieee, u8 Operation);
-
-#endif /* _R819XU_PHY_H */
diff --git a/drivers/staging/rtl8192e/rtl819x_BA.h b/drivers/staging/rtl8192e/rtl819x_BA.h
new file mode 100644
index 00000000000..613e14c12df
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_BA.h
@@ -0,0 +1,77 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _BATYPE_H_
+#define _BATYPE_H_
+
+#define TOTAL_TXBA_NUM 16
+#define TOTAL_RXBA_NUM 16
+
+#define BA_SETUP_TIMEOUT 200
+#define BA_INACT_TIMEOUT 60000
+
+#define BA_POLICY_DELAYED 0
+#define BA_POLICY_IMMEDIATE 1
+
+#define ADDBA_STATUS_SUCCESS 0
+#define ADDBA_STATUS_REFUSED 37
+#define ADDBA_STATUS_INVALID_PARAM 38
+
+#define DELBA_REASON_QSTA_LEAVING 36
+#define DELBA_REASON_END_BA 37
+#define DELBA_REASON_UNKNOWN_BA 38
+#define DELBA_REASON_TIMEOUT 39
+union sequence_control {
+ u16 ShortData;
+ struct {
+ u16 FragNum:4;
+ u16 SeqNum:12;
+ } field;
+};
+
+union ba_param_set {
+ u8 charData[2];
+ u16 shortData;
+ struct {
+ u16 AMSDU_Support:1;
+ u16 BAPolicy:1;
+ u16 TID:4;
+ u16 BufferSize:10;
+ } field;
+};
+
+union delba_param_set {
+ u8 charData[2];
+ u16 shortData;
+ struct {
+ u16 Reserved:11;
+ u16 Initiator:1;
+ u16 TID:4;
+ } field;
+};
+
+struct ba_record {
+ struct timer_list Timer;
+ u8 bValid;
+ u8 DialogToken;
+ union ba_param_set BaParamSet;
+ u16 BaTimeoutValue;
+ union sequence_control BaStartSeqCtrl;
+};
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
new file mode 100644
index 00000000000..8b9d85c48be
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -0,0 +1,566 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtllib.h"
+#include "rtl819x_BA.h"
+#include "rtl_core.h"
+
+static void ActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA,
+ u16 Time)
+{
+ pBA->bValid = true;
+ if (Time != 0)
+ mod_timer(&pBA->Timer, jiffies + MSECS(Time));
+}
+
+static void DeActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA)
+{
+ pBA->bValid = false;
+ del_timer_sync(&pBA->Timer);
+}
+
+static u8 TxTsDeleteBA(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
+{
+ struct ba_record *pAdmittedBa = &pTxTs->TxAdmittedBARecord;
+ struct ba_record *pPendingBa = &pTxTs->TxPendingBARecord;
+ u8 bSendDELBA = false;
+
+ if (pPendingBa->bValid) {
+ DeActivateBAEntry(ieee, pPendingBa);
+ bSendDELBA = true;
+ }
+
+ if (pAdmittedBa->bValid) {
+ DeActivateBAEntry(ieee, pAdmittedBa);
+ bSendDELBA = true;
+ }
+ return bSendDELBA;
+}
+
+static u8 RxTsDeleteBA(struct rtllib_device *ieee, struct rx_ts_record *pRxTs)
+{
+ struct ba_record *pBa = &pRxTs->RxAdmittedBARecord;
+ u8 bSendDELBA = false;
+
+ if (pBa->bValid) {
+ DeActivateBAEntry(ieee, pBa);
+ bSendDELBA = true;
+ }
+
+ return bSendDELBA;
+}
+
+void ResetBaEntry(struct ba_record *pBA)
+{
+ pBA->bValid = false;
+ pBA->BaParamSet.shortData = 0;
+ pBA->BaTimeoutValue = 0;
+ pBA->DialogToken = 0;
+ pBA->BaStartSeqCtrl.ShortData = 0;
+}
+static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
+ struct ba_record *pBA,
+ u16 StatusCode, u8 type)
+{
+ struct sk_buff *skb = NULL;
+ struct rtllib_hdr_3addr *BAReq = NULL;
+ u8 *tag = NULL;
+ u16 tmp = 0;
+ u16 len = ieee->tx_headroom + 9;
+
+ RTLLIB_DEBUG(RTLLIB_DL_TRACE | RTLLIB_DL_BA, "========>%s(), frame(%d)"
+ " sentd to: %pM, ieee->dev:%p\n", __func__,
+ type, Dst, ieee->dev);
+ if (pBA == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "pBA is NULL\n");
+ return NULL;
+ }
+ skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
+ if (skb == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
+ return NULL;
+ }
+
+ memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ BAReq = (struct rtllib_hdr_3addr *)skb_put(skb,
+ sizeof(struct rtllib_hdr_3addr));
+
+ memcpy(BAReq->addr1, Dst, ETH_ALEN);
+ memcpy(BAReq->addr2, ieee->dev->dev_addr, ETH_ALEN);
+
+ memcpy(BAReq->addr3, ieee->current_network.bssid, ETH_ALEN);
+ BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
+
+ tag = (u8 *)skb_put(skb, 9);
+ *tag ++= ACT_CAT_BA;
+ *tag ++= type;
+ *tag ++= pBA->DialogToken;
+
+ if (ACT_ADDBARSP == type) {
+ RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
+ tmp = cpu_to_le16(StatusCode);
+ memcpy(tag, (u8 *)&tmp, 2);
+ tag += 2;
+ }
+ tmp = cpu_to_le16(pBA->BaParamSet.shortData);
+ memcpy(tag, (u8 *)&tmp, 2);
+ tag += 2;
+ tmp = cpu_to_le16(pBA->BaTimeoutValue);
+ memcpy(tag, (u8 *)&tmp, 2);
+ tag += 2;
+
+ if (ACT_ADDBAREQ == type) {
+ memcpy(tag, (u8 *)&(pBA->BaStartSeqCtrl), 2);
+ tag += 2;
+ }
+
+ RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA|RTLLIB_DL_BA, skb->data, skb->len);
+ return skb;
+}
+
+static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *pBA,
+ enum tr_select TxRxSelect, u16 ReasonCode)
+{
+ union delba_param_set DelbaParamSet;
+ struct sk_buff *skb = NULL;
+ struct rtllib_hdr_3addr *Delba = NULL;
+ u8 *tag = NULL;
+ u16 tmp = 0;
+ u16 len = 6 + ieee->tx_headroom;
+
+ if (net_ratelimit())
+ RTLLIB_DEBUG(RTLLIB_DL_TRACE | RTLLIB_DL_BA,
+ "========>%s(), Reason"
+ "Code(%d) sentd to: %pM\n", __func__,
+ ReasonCode, dst);
+
+ memset(&DelbaParamSet, 0, 2);
+
+ DelbaParamSet.field.Initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
+ DelbaParamSet.field.TID = pBA->BaParamSet.field.TID;
+
+ skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
+ if (skb == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ Delba = (struct rtllib_hdr_3addr *) skb_put(skb,
+ sizeof(struct rtllib_hdr_3addr));
+
+ memcpy(Delba->addr1, dst, ETH_ALEN);
+ memcpy(Delba->addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(Delba->addr3, ieee->current_network.bssid, ETH_ALEN);
+ Delba->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
+
+ tag = (u8 *)skb_put(skb, 6);
+
+ *tag ++= ACT_CAT_BA;
+ *tag ++= ACT_DELBA;
+
+ tmp = cpu_to_le16(DelbaParamSet.shortData);
+ memcpy(tag, (u8 *)&tmp, 2);
+ tag += 2;
+ tmp = cpu_to_le16(ReasonCode);
+ memcpy(tag, (u8 *)&tmp, 2);
+ tag += 2;
+
+ RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA|RTLLIB_DL_BA, skb->data, skb->len);
+ if (net_ratelimit())
+ RTLLIB_DEBUG(RTLLIB_DL_TRACE | RTLLIB_DL_BA, "<=====%s()\n",
+ __func__);
+ return skb;
+}
+
+static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *pBA)
+{
+ struct sk_buff *skb = NULL;
+ skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
+
+ if (skb) {
+ RT_TRACE(COMP_DBG, "====>to send ADDBAREQ!!!!!\n");
+ softmac_mgmt_xmit(skb, ieee);
+ } else {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "alloc skb error in function"
+ " %s()\n", __func__);
+ }
+ return;
+}
+
+static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *pBA, u16 StatusCode)
+{
+ struct sk_buff *skb = NULL;
+ skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP);
+ if (skb)
+ softmac_mgmt_xmit(skb, ieee);
+ else
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "alloc skb error in function"
+ " %s()\n", __func__);
+ return;
+}
+
+static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *pBA, enum tr_select TxRxSelect,
+ u16 ReasonCode)
+{
+ struct sk_buff *skb = NULL;
+ skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode);
+ if (skb)
+ softmac_mgmt_xmit(skb, ieee);
+ else
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "alloc skb error in func"
+ "tion %s()\n", __func__);
+ return ;
+}
+
+int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ struct rtllib_hdr_3addr *req = NULL;
+ u16 rc = 0;
+ u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
+ struct ba_record *pBA = NULL;
+ union ba_param_set *pBaParamSet = NULL;
+ u16 *pBaTimeoutVal = NULL;
+ union sequence_control *pBaStartSeqCtrl = NULL;
+ struct rx_ts_record *pTS = NULL;
+
+ if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, " Invalid skb len in BAREQ(%d / "
+ "%d)\n", (int)skb->len,
+ (int)(sizeof(struct rtllib_hdr_3addr) + 9));
+ return -1;
+ }
+
+ RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA|RTLLIB_DL_BA, skb->data, skb->len);
+
+ req = (struct rtllib_hdr_3addr *) skb->data;
+ tag = (u8 *)req;
+ dst = (u8 *)(&req->addr2[0]);
+ tag += sizeof(struct rtllib_hdr_3addr);
+ pDialogToken = tag + 2;
+ pBaParamSet = (union ba_param_set *)(tag + 3);
+ pBaTimeoutVal = (u16 *)(tag + 5);
+ pBaStartSeqCtrl = (union sequence_control *)(req + 7);
+
+ RT_TRACE(COMP_DBG, "====>rx ADDBAREQ from : %pM\n", dst);
+ if (ieee->current_network.qos_data.active == 0 ||
+ (ieee->pHTInfo->bCurrentHTSupport == false) ||
+ (ieee->pHTInfo->IOTAction & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
+ rc = ADDBA_STATUS_REFUSED;
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "Failed to reply on ADDBA_REQ as "
+ "some capability is not ready(%d, %d)\n",
+ ieee->current_network.qos_data.active,
+ ieee->pHTInfo->bCurrentHTSupport);
+ goto OnADDBAReq_Fail;
+ }
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
+ (u8)(pBaParamSet->field.TID), RX_DIR, true)) {
+ rc = ADDBA_STATUS_REFUSED;
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't get TS in %s()\n", __func__);
+ goto OnADDBAReq_Fail;
+ }
+ pBA = &pTS->RxAdmittedBARecord;
+
+ if (pBaParamSet->field.BAPolicy == BA_POLICY_DELAYED) {
+ rc = ADDBA_STATUS_INVALID_PARAM;
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "BA Policy is not correct in "
+ "%s()\n", __func__);
+ goto OnADDBAReq_Fail;
+ }
+
+ rtllib_FlushRxTsPendingPkts(ieee, pTS);
+
+ DeActivateBAEntry(ieee, pBA);
+ pBA->DialogToken = *pDialogToken;
+ pBA->BaParamSet = *pBaParamSet;
+ pBA->BaTimeoutValue = *pBaTimeoutVal;
+ pBA->BaStartSeqCtrl = *pBaStartSeqCtrl;
+
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
+ (ieee->pHTInfo->IOTAction & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
+ pBA->BaParamSet.field.BufferSize = 1;
+ else
+ pBA->BaParamSet.field.BufferSize = 32;
+
+ ActivateBAEntry(ieee, pBA, 0);
+ rtllib_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
+
+ return 0;
+
+OnADDBAReq_Fail:
+ {
+ struct ba_record BA;
+ BA.BaParamSet = *pBaParamSet;
+ BA.BaTimeoutValue = *pBaTimeoutVal;
+ BA.DialogToken = *pDialogToken;
+ BA.BaParamSet.field.BAPolicy = BA_POLICY_IMMEDIATE;
+ rtllib_send_ADDBARsp(ieee, dst, &BA, rc);
+ return 0;
+ }
+}
+
+int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ struct rtllib_hdr_3addr *rsp = NULL;
+ struct ba_record *pPendingBA, *pAdmittedBA;
+ struct tx_ts_record *pTS = NULL;
+ u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
+ u16 *pStatusCode = NULL, *pBaTimeoutVal = NULL;
+ union ba_param_set *pBaParamSet = NULL;
+ u16 ReasonCode;
+
+ if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, " Invalid skb len in BARSP(%d / "
+ "%d)\n", (int)skb->len,
+ (int)(sizeof(struct rtllib_hdr_3addr) + 9));
+ return -1;
+ }
+ rsp = (struct rtllib_hdr_3addr *)skb->data;
+ tag = (u8 *)rsp;
+ dst = (u8 *)(&rsp->addr2[0]);
+ tag += sizeof(struct rtllib_hdr_3addr);
+ pDialogToken = tag + 2;
+ pStatusCode = (u16 *)(tag + 3);
+ pBaParamSet = (union ba_param_set *)(tag + 5);
+ pBaTimeoutVal = (u16 *)(tag + 7);
+
+ RT_TRACE(COMP_DBG, "====>rx ADDBARSP from : %pM\n", dst);
+ if (ieee->current_network.qos_data.active == 0 ||
+ ieee->pHTInfo->bCurrentHTSupport == false ||
+ ieee->pHTInfo->bCurrentAMPDUEnable == false) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "reject to ADDBA_RSP as some capab"
+ "ility is not ready(%d, %d, %d)\n",
+ ieee->current_network.qos_data.active,
+ ieee->pHTInfo->bCurrentHTSupport,
+ ieee->pHTInfo->bCurrentAMPDUEnable);
+ ReasonCode = DELBA_REASON_UNKNOWN_BA;
+ goto OnADDBARsp_Reject;
+ }
+
+
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
+ (u8)(pBaParamSet->field.TID), TX_DIR, false)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't get TS in %s()\n", __func__);
+ ReasonCode = DELBA_REASON_UNKNOWN_BA;
+ goto OnADDBARsp_Reject;
+ }
+
+ pTS->bAddBaReqInProgress = false;
+ pPendingBA = &pTS->TxPendingBARecord;
+ pAdmittedBA = &pTS->TxAdmittedBARecord;
+
+
+ if ((pAdmittedBA->bValid == true)) {
+ RTLLIB_DEBUG(RTLLIB_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp."
+ " Drop because already admit it!\n");
+ return -1;
+ } else if ((pPendingBA->bValid == false) ||
+ (*pDialogToken != pPendingBA->DialogToken)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "OnADDBARsp(): Recv ADDBA Rsp. "
+ "BA invalid, DELBA!\n");
+ ReasonCode = DELBA_REASON_UNKNOWN_BA;
+ goto OnADDBARsp_Reject;
+ } else {
+ RTLLIB_DEBUG(RTLLIB_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. BA "
+ "is admitted! Status code:%X\n", *pStatusCode);
+ DeActivateBAEntry(ieee, pPendingBA);
+ }
+
+
+ if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
+ if (pBaParamSet->field.BAPolicy == BA_POLICY_DELAYED) {
+ pTS->bAddBaReqDelayed = true;
+ DeActivateBAEntry(ieee, pAdmittedBA);
+ ReasonCode = DELBA_REASON_END_BA;
+ goto OnADDBARsp_Reject;
+ }
+
+
+ pAdmittedBA->DialogToken = *pDialogToken;
+ pAdmittedBA->BaTimeoutValue = *pBaTimeoutVal;
+ pAdmittedBA->BaStartSeqCtrl = pPendingBA->BaStartSeqCtrl;
+ pAdmittedBA->BaParamSet = *pBaParamSet;
+ DeActivateBAEntry(ieee, pAdmittedBA);
+ ActivateBAEntry(ieee, pAdmittedBA, *pBaTimeoutVal);
+ } else {
+ pTS->bAddBaReqDelayed = true;
+ pTS->bDisable_AddBa = true;
+ ReasonCode = DELBA_REASON_END_BA;
+ goto OnADDBARsp_Reject;
+ }
+
+ return 0;
+
+OnADDBARsp_Reject:
+ {
+ struct ba_record BA;
+ BA.BaParamSet = *pBaParamSet;
+ rtllib_send_DELBA(ieee, dst, &BA, TX_DIR, ReasonCode);
+ return 0;
+ }
+}
+
+int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ struct rtllib_hdr_3addr *delba = NULL;
+ union delba_param_set *pDelBaParamSet = NULL;
+ u16 *pReasonCode = NULL;
+ u8 *dst = NULL;
+
+ if (skb->len < sizeof(struct rtllib_hdr_3addr) + 6) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, " Invalid skb len in DELBA(%d /"
+ " %d)\n", (int)skb->len,
+ (int)(sizeof(struct rtllib_hdr_3addr) + 6));
+ return -1;
+ }
+
+ if (ieee->current_network.qos_data.active == 0 ||
+ ieee->pHTInfo->bCurrentHTSupport == false) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "received DELBA while QOS or HT "
+ "is not supported(%d, %d)\n",
+ ieee->current_network. qos_data.active,
+ ieee->pHTInfo->bCurrentHTSupport);
+ return -1;
+ }
+
+ RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA|RTLLIB_DL_BA, skb->data, skb->len);
+ delba = (struct rtllib_hdr_3addr *)skb->data;
+ dst = (u8 *)(&delba->addr2[0]);
+ delba += sizeof(struct rtllib_hdr_3addr);
+ pDelBaParamSet = (union delba_param_set *)(delba+2);
+ pReasonCode = (u16 *)(delba+4);
+
+ if (pDelBaParamSet->field.Initiator == 1) {
+ struct rx_ts_record *pRxTs;
+
+ if (!GetTs(ieee, (struct ts_common_info **)&pRxTs, dst,
+ (u8)pDelBaParamSet->field.TID, RX_DIR, false)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't get TS for RXTS in "
+ "%s().dst: %pM TID:%d\n", __func__, dst,
+ (u8)pDelBaParamSet->field.TID);
+ return -1;
+ }
+
+ RxTsDeleteBA(ieee, pRxTs);
+ } else {
+ struct tx_ts_record *pTxTs;
+
+ if (!GetTs(ieee, (struct ts_common_info **)&pTxTs, dst,
+ (u8)pDelBaParamSet->field.TID, TX_DIR, false)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't get TS for TXTS in "
+ "%s()\n", __func__);
+ return -1;
+ }
+
+ pTxTs->bUsingBa = false;
+ pTxTs->bAddBaReqInProgress = false;
+ pTxTs->bAddBaReqDelayed = false;
+ del_timer_sync(&pTxTs->TsAddBaTimer);
+ TxTsDeleteBA(ieee, pTxTs);
+ }
+ return 0;
+}
+
+void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
+ u8 Policy, u8 bOverwritePending)
+{
+ struct ba_record *pBA = &pTS->TxPendingBARecord;
+
+ if (pBA->bValid == true && bOverwritePending == false)
+ return;
+
+ DeActivateBAEntry(ieee, pBA);
+
+ pBA->DialogToken++;
+ pBA->BaParamSet.field.AMSDU_Support = 0;
+ pBA->BaParamSet.field.BAPolicy = Policy;
+ pBA->BaParamSet.field.TID =
+ pTS->TsCommonInfo.TSpec.f.TSInfo.field.ucTSID;
+ pBA->BaParamSet.field.BufferSize = 32;
+ pBA->BaTimeoutValue = 0;
+ pBA->BaStartSeqCtrl.field.SeqNum = (pTS->TxCurSeq + 3) % 4096;
+
+ ActivateBAEntry(ieee, pBA, BA_SETUP_TIMEOUT);
+
+ rtllib_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
+}
+
+void TsInitDelBA(struct rtllib_device *ieee,
+ struct ts_common_info *pTsCommonInfo,
+ enum tr_select TxRxSelect)
+{
+ if (TxRxSelect == TX_DIR) {
+ struct tx_ts_record *pTxTs =
+ (struct tx_ts_record *)pTsCommonInfo;
+
+ if (TxTsDeleteBA(ieee, pTxTs))
+ rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
+ (pTxTs->TxAdmittedBARecord.bValid) ?
+ (&pTxTs->TxAdmittedBARecord) :
+ (&pTxTs->TxPendingBARecord),
+ TxRxSelect, DELBA_REASON_END_BA);
+ } else if (TxRxSelect == RX_DIR) {
+ struct rx_ts_record *pRxTs =
+ (struct rx_ts_record *)pTsCommonInfo;
+ if (RxTsDeleteBA(ieee, pRxTs))
+ rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
+ &pRxTs->RxAdmittedBARecord,
+ TxRxSelect, DELBA_REASON_END_BA);
+ }
+}
+
+void BaSetupTimeOut(unsigned long data)
+{
+ struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+
+ pTxTs->bAddBaReqInProgress = false;
+ pTxTs->bAddBaReqDelayed = true;
+ pTxTs->TxPendingBARecord.bValid = false;
+}
+
+void TxBaInactTimeout(unsigned long data)
+{
+ struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
+ TxTsRecord[pTxTs->num]);
+ TxTsDeleteBA(ieee, pTxTs);
+ rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.Addr,
+ &pTxTs->TxAdmittedBARecord, TX_DIR,
+ DELBA_REASON_TIMEOUT);
+}
+
+void RxBaInactTimeout(unsigned long data)
+{
+ struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
+ struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
+ RxTsRecord[pRxTs->num]);
+
+ RxTsDeleteBA(ieee, pRxTs);
+ rtllib_send_DELBA(ieee, pRxTs->TsCommonInfo.Addr,
+ &pRxTs->RxAdmittedBARecord, RX_DIR,
+ DELBA_REASON_TIMEOUT);
+ return ;
+}
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
new file mode 100644
index 00000000000..13f41057461
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -0,0 +1,475 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL819XU_HTTYPE_H_
+#define _RTL819XU_HTTYPE_H_
+
+
+#define HT_OPMODE_NO_PROTECT 0
+#define HT_OPMODE_OPTIONAL 1
+#define HT_OPMODE_40MHZ_PROTECT 2
+#define HT_OPMODE_MIXED 3
+
+#define MIMO_PS_STATIC 0
+#define MIMO_PS_DYNAMIC 1
+#define MIMO_PS_NOLIMIT 3
+
+
+
+#define sHTCLng 4
+
+
+#define HT_SUPPORTED_MCS_1SS_BITMAP 0x000000ff
+#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
+#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP \
+ (HT_MCS_1SS_BITMAP | HT_MCS_1SS_2SS_BITMAP)
+
+enum ht_mcs_rate {
+ HT_MCS0 = 0x00000001,
+ HT_MCS1 = 0x00000002,
+ HT_MCS2 = 0x00000004,
+ HT_MCS3 = 0x00000008,
+ HT_MCS4 = 0x00000010,
+ HT_MCS5 = 0x00000020,
+ HT_MCS6 = 0x00000040,
+ HT_MCS7 = 0x00000080,
+ HT_MCS8 = 0x00000100,
+ HT_MCS9 = 0x00000200,
+ HT_MCS10 = 0x00000400,
+ HT_MCS11 = 0x00000800,
+ HT_MCS12 = 0x00001000,
+ HT_MCS13 = 0x00002000,
+ HT_MCS14 = 0x00004000,
+ HT_MCS15 = 0x00008000,
+};
+
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_20_40 = 1,
+};
+
+enum ht_extchnl_offset {
+ HT_EXTCHNL_OFFSET_NO_EXT = 0,
+ HT_EXTCHNL_OFFSET_UPPER = 1,
+ HT_EXTCHNL_OFFSET_NO_DEF = 2,
+ HT_EXTCHNL_OFFSET_LOWER = 3,
+};
+
+enum chnl_op {
+ CHNLOP_NONE = 0,
+ CHNLOP_SCAN = 1,
+ CHNLOP_SWBW = 2,
+ CHNLOP_SWCHNL = 3,
+};
+
+#define CHHLOP_IN_PROGRESS(_pHTInfo) \
+ ((_pHTInfo)->ChnlOp > CHNLOP_NONE) ? true : false
+
+/*
+union ht_capability {
+ u16 ShortData;
+ u8 CharData[2];
+ struct
+ {
+ u16 AdvCoding:1;
+ u16 ChlWidth:1;
+ u16 MimoPwrSave:2;
+ u16 GreenField:1;
+ u16 ShortGI20Mhz:1;
+ u16 ShortGI40Mhz:1;
+ u16 STBC:1;
+ u16 BeamForm:1;
+ u16 DelayBA:1;
+ u16 MaxAMSDUSize:1;
+ u16 DssCCk:1;
+ u16 PSMP:1;
+ u16 Rsvd:3;
+ }Field;
+};
+
+union ht_capability_macpara {
+ u8 ShortData;
+ u8 CharData[1];
+ struct
+ {
+ u8 MaxRxAMPDU:2;
+ u8 MPDUDensity:2;
+ u8 Rsvd:4;
+ }Field;
+};
+*/
+
+enum ht_action {
+ ACT_RECOMMAND_WIDTH = 0,
+ ACT_MIMO_PWR_SAVE = 1,
+ ACT_PSMP = 2,
+ ACT_SET_PCO_PHASE = 3,
+ ACT_MIMO_CHL_MEASURE = 4,
+ ACT_RECIPROCITY_CORRECT = 5,
+ ACT_MIMO_CSI_MATRICS = 6,
+ ACT_MIMO_NOCOMPR_STEER = 7,
+ ACT_MIMO_COMPR_STEER = 8,
+ ACT_ANTENNA_SELECT = 9,
+};
+
+
+enum ht_bw40_sc {
+ SC_MODE_DUPLICATE = 0,
+ SC_MODE_LOWER = 1,
+ SC_MODE_UPPER = 2,
+ SC_MODE_FULL40MHZ = 3,
+};
+
+struct ht_capab_ele {
+
+ u8 AdvCoding:1;
+ u8 ChlWidth:1;
+ u8 MimoPwrSave:2;
+ u8 GreenField:1;
+ u8 ShortGI20Mhz:1;
+ u8 ShortGI40Mhz:1;
+ u8 TxSTBC:1;
+ u8 RxSTBC:2;
+ u8 DelayBA:1;
+ u8 MaxAMSDUSize:1;
+ u8 DssCCk:1;
+ u8 PSMP:1;
+ u8 Rsvd1:1;
+ u8 LSigTxopProtect:1;
+
+ u8 MaxRxAMPDUFactor:2;
+ u8 MPDUDensity:3;
+ u8 Rsvd2:3;
+
+ u8 MCS[16];
+
+
+ u16 ExtHTCapInfo;
+
+ u8 TxBFCap[4];
+
+ u8 ASCap;
+
+} __packed;
+
+
+struct ht_info_ele {
+ u8 ControlChl;
+
+ u8 ExtChlOffset:2;
+ u8 RecommemdedTxWidth:1;
+ u8 RIFS:1;
+ u8 PSMPAccessOnly:1;
+ u8 SrvIntGranularity:3;
+
+ u8 OptMode:2;
+ u8 NonGFDevPresent:1;
+ u8 Revd1:5;
+ u8 Revd2:8;
+
+ u8 Rsvd3:6;
+ u8 DualBeacon:1;
+ u8 DualCTSProtect:1;
+
+ u8 SecondaryBeacon:1;
+ u8 LSigTxopProtectFull:1;
+ u8 PcoActive:1;
+ u8 PcoPhase:1;
+ u8 Rsvd4:4;
+
+ u8 BasicMSC[16];
+} __packed;
+
+struct mimops_ctrl {
+ u8 MimoPsEnable:1;
+ u8 MimoPsMode:1;
+ u8 Reserved:6;
+};
+
+enum ht_spec_ver {
+ HT_SPEC_VER_IEEE = 0,
+ HT_SPEC_VER_EWC = 1,
+};
+
+enum ht_aggre_mode {
+ HT_AGG_AUTO = 0,
+ HT_AGG_FORCE_ENABLE = 1,
+ HT_AGG_FORCE_DISABLE = 2,
+};
+
+
+struct rt_hi_throughput {
+ u8 bEnableHT;
+ u8 bCurrentHTSupport;
+
+ u8 bRegBW40MHz;
+ u8 bCurBW40MHz;
+
+ u8 bRegShortGI40MHz;
+ u8 bCurShortGI40MHz;
+
+ u8 bRegShortGI20MHz;
+ u8 bCurShortGI20MHz;
+
+ u8 bRegSuppCCK;
+ u8 bCurSuppCCK;
+
+ enum ht_spec_ver ePeerHTSpecVer;
+
+
+ struct ht_capab_ele SelfHTCap;
+ struct ht_info_ele SelfHTInfo;
+
+ u8 PeerHTCapBuf[32];
+ u8 PeerHTInfoBuf[32];
+
+
+ u8 bAMSDU_Support;
+ u16 nAMSDU_MaxSize;
+ u8 bCurrent_AMSDU_Support;
+ u16 nCurrent_AMSDU_MaxSize;
+
+ u8 bAMPDUEnable;
+ u8 bCurrentAMPDUEnable;
+ u8 AMPDU_Factor;
+ u8 CurrentAMPDUFactor;
+ u8 MPDU_Density;
+ u8 CurrentMPDUDensity;
+
+ enum ht_aggre_mode ForcedAMPDUMode;
+ u8 ForcedAMPDUFactor;
+ u8 ForcedMPDUDensity;
+
+ enum ht_aggre_mode ForcedAMSDUMode;
+ u16 ForcedAMSDUMaxSize;
+
+ u8 bForcedShortGI;
+
+ u8 CurrentOpMode;
+
+ u8 SelfMimoPs;
+ u8 PeerMimoPs;
+
+ enum ht_extchnl_offset CurSTAExtChnlOffset;
+ u8 bCurTxBW40MHz;
+ u8 PeerBandwidth;
+
+ u8 bSwBwInProgress;
+ enum chnl_op ChnlOp;
+ u8 SwBwStep;
+
+ u8 bRegRT2RTAggregation;
+ u8 RT2RT_HT_Mode;
+ u8 bCurrentRT2RTAggregation;
+ u8 bCurrentRT2RTLongSlotTime;
+ u8 szRT2RTAggBuffer[10];
+
+ u8 bRegRxReorderEnable;
+ u8 bCurRxReorderEnable;
+ u8 RxReorderWinSize;
+ u8 RxReorderPendingTime;
+ u16 RxReorderDropCounter;
+
+ u8 bIsPeerBcm;
+
+ u8 IOTPeer;
+ u32 IOTAction;
+ u8 IOTRaFunc;
+
+ u8 bWAIotBroadcom;
+ u8 WAIotTH;
+
+ u8 bAcceptAddbaReq;
+} __packed;
+
+
+
+struct rt_htinfo_sta_entry {
+ u8 bEnableHT;
+
+ u8 bSupportCck;
+
+ u16 AMSDU_MaxSize;
+
+ u8 AMPDU_Factor;
+ u8 MPDU_Density;
+
+ u8 HTHighestOperaRate;
+
+ u8 bBw40MHz;
+
+ u8 bCurTxBW40MHz;
+
+ u8 bCurShortGI20MHz;
+
+ u8 bCurShortGI40MHz;
+
+ u8 MimoPs;
+
+ u8 McsRateSet[16];
+
+ u8 bCurRxReorderEnable;
+
+ u16 nAMSDU_MaxSize;
+
+};
+
+
+
+
+
+
+struct bss_ht {
+
+ u8 bdSupportHT;
+
+ u8 bdHTCapBuf[32];
+ u16 bdHTCapLen;
+ u8 bdHTInfoBuf[32];
+ u16 bdHTInfoLen;
+
+ enum ht_spec_ver bdHTSpecVer;
+ enum ht_channel_width bdBandWidth;
+
+ u8 bdRT2RTAggregation;
+ u8 bdRT2RTLongSlotTime;
+ u8 RT2RT_HT_Mode;
+ u8 bdHT1R;
+};
+
+struct mimo_rssi {
+ u32 EnableAntenna;
+ u32 AntennaA;
+ u32 AntennaB;
+ u32 AntennaC;
+ u32 AntennaD;
+ u32 Average;
+};
+
+struct mimo_evm {
+ u32 EVM1;
+ u32 EVM2;
+};
+
+struct false_alarm_stats {
+ u32 Cnt_Parity_Fail;
+ u32 Cnt_Rate_Illegal;
+ u32 Cnt_Crc8_fail;
+ u32 Cnt_Mcs_fail;
+ u32 Cnt_Ofdm_fail;
+ u32 Cnt_Cck_fail;
+ u32 Cnt_all;
+};
+
+
+extern u8 MCS_FILTER_ALL[16];
+extern u8 MCS_FILTER_1SS[16];
+
+#define PICK_RATE(_nLegacyRate, _nMcsRate) \
+ ((_nMcsRate == 0) ? (_nLegacyRate&0x7f) : (_nMcsRate))
+#define LEGACY_WIRELESS_MODE IEEE_MODE_MASK
+
+#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
+ ((WirelessMode & (LEGACY_WIRELESS_MODE)) != 0) ? \
+ (LegacyRate) : (PICK_RATE(LegacyRate, HTRate))
+
+
+
+#define RATE_ADPT_1SS_MASK 0xFF
+#define RATE_ADPT_2SS_MASK 0xF0
+#define RATE_ADPT_MCS32_MASK 0x01
+
+#define IS_11N_MCS_RATE(rate) (rate&0x80)
+
+enum ht_aggre_size {
+ HT_AGG_SIZE_8K = 0,
+ HT_AGG_SIZE_16K = 1,
+ HT_AGG_SIZE_32K = 2,
+ HT_AGG_SIZE_64K = 3,
+};
+
+enum ht_iot_peer {
+ HT_IOT_PEER_UNKNOWN = 0,
+ HT_IOT_PEER_REALTEK = 1,
+ HT_IOT_PEER_REALTEK_92SE = 2,
+ HT_IOT_PEER_BROADCOM = 3,
+ HT_IOT_PEER_RALINK = 4,
+ HT_IOT_PEER_ATHEROS = 5,
+ HT_IOT_PEER_CISCO = 6,
+ HT_IOT_PEER_MARVELL = 7,
+ HT_IOT_PEER_92U_SOFTAP = 8,
+ HT_IOT_PEER_SELF_SOFTAP = 9,
+ HT_IOT_PEER_AIRGO = 10,
+ HT_IOT_PEER_MAX = 11,
+};
+
+enum ht_iot_peer_subtype {
+ HT_IOT_PEER_ATHEROS_DIR635 = 0,
+};
+
+enum ht_iot_action {
+ HT_IOT_ACT_TX_USE_AMSDU_4K = 0x00000001,
+ HT_IOT_ACT_TX_USE_AMSDU_8K = 0x00000002,
+ HT_IOT_ACT_DISABLE_MCS14 = 0x00000004,
+ HT_IOT_ACT_DISABLE_MCS15 = 0x00000008,
+ HT_IOT_ACT_DISABLE_ALL_2SS = 0x00000010,
+ HT_IOT_ACT_DISABLE_EDCA_TURBO = 0x00000020,
+ HT_IOT_ACT_MGNT_USE_CCK_6M = 0x00000040,
+ HT_IOT_ACT_CDD_FSYNC = 0x00000080,
+ HT_IOT_ACT_PURE_N_MODE = 0x00000100,
+ HT_IOT_ACT_FORCED_CTS2SELF = 0x00000200,
+ HT_IOT_ACT_FORCED_RTS = 0x00000400,
+ HT_IOT_ACT_AMSDU_ENABLE = 0x00000800,
+ HT_IOT_ACT_REJECT_ADDBA_REQ = 0x00001000,
+ HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT = 0x00002000,
+ HT_IOT_ACT_EDCA_BIAS_ON_RX = 0x00004000,
+
+ HT_IOT_ACT_HYBRID_AGGREGATION = 0x00010000,
+ HT_IOT_ACT_DISABLE_SHORT_GI = 0x00020000,
+ HT_IOT_ACT_DISABLE_HIGH_POWER = 0x00040000,
+ HT_IOT_ACT_DISABLE_TX_40_MHZ = 0x00080000,
+ HT_IOT_ACT_TX_NO_AGGREGATION = 0x00100000,
+ HT_IOT_ACT_DISABLE_TX_2SS = 0x00200000,
+
+ HT_IOT_ACT_MID_HIGHPOWER = 0x00400000,
+ HT_IOT_ACT_NULL_DATA_POWER_SAVING = 0x00800000,
+
+ HT_IOT_ACT_DISABLE_CCK_RATE = 0x01000000,
+ HT_IOT_ACT_FORCED_ENABLE_BE_TXOP = 0x02000000,
+ HT_IOT_ACT_WA_IOT_Broadcom = 0x04000000,
+
+ HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI = 0x08000000,
+
+};
+
+enum ht_iot_rafunc {
+ HT_IOT_RAFUNC_DISABLE_ALL = 0x00,
+ HT_IOT_RAFUNC_PEER_1R = 0x01,
+ HT_IOT_RAFUNC_TX_AMSDU = 0x02,
+};
+
+enum rt_ht_capability {
+ RT_HT_CAP_USE_TURBO_AGGR = 0x01,
+ RT_HT_CAP_USE_LONG_PREAMBLE = 0x02,
+ RT_HT_CAP_USE_AMPDU = 0x04,
+ RT_HT_CAP_USE_WOW = 0x8,
+ RT_HT_CAP_USE_SOFTAP = 0x10,
+ RT_HT_CAP_USE_92SE = 0x20,
+};
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
new file mode 100644
index 00000000000..b1c0c566882
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -0,0 +1,1075 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtllib.h"
+#include "rtl819x_HT.h"
+u8 MCS_FILTER_ALL[16] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+u8 MCS_FILTER_1SS[16] = {
+ 0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+;
+
+u16 MCS_DATA_RATE[2][2][77] = {
+ {{13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234,
+ 260, 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416,
+ 468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182,
+ 182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156,
+ 181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273,
+ 312, 351, 312, 351, 390, 390, 429} ,
+ {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
+ 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520,
+ 578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231,
+ 173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260,
+ 231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390,
+ 433, 433, 477} } ,
+ {{27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486,
+ 540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648,
+ 864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324,
+ 378, 378, 432, 324, 405, 405, 486, 567, 567, 648, 270, 324, 378, 324,
+ 378, 432, 486, 432, 486, 540, 540, 594, 405, 486, 567, 486, 567, 648,
+ 729, 648, 729, 810, 810, 891},
+ {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540,
+ 600, 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720,
+ 960, 1080, 1200, 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360,
+ 420, 420, 480, 360, 450, 450, 540, 630, 630, 720, 300, 360, 420, 360,
+ 420, 480, 540, 480, 540, 600, 600, 660, 450, 540, 630, 540, 630, 720,
+ 810, 720, 810, 900, 900, 990} }
+};
+
+static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
+
+static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
+
+static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
+
+static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f};
+
+static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
+
+static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
+
+static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
+
+static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
+
+static u8 DLINK_ATHEROS_1[3] = {0x00, 0x1c, 0xf0};
+
+static u8 DLINK_ATHEROS_2[3] = {0x00, 0x21, 0x91};
+
+static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
+
+static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
+
+void HTUpdateDefaultSetting(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ pHTInfo->bAcceptAddbaReq = 1;
+
+ pHTInfo->bRegShortGI20MHz = 1;
+ pHTInfo->bRegShortGI40MHz = 1;
+
+ pHTInfo->bRegBW40MHz = 1;
+
+ if (pHTInfo->bRegBW40MHz)
+ pHTInfo->bRegSuppCCK = 1;
+ else
+ pHTInfo->bRegSuppCCK = true;
+
+ pHTInfo->nAMSDU_MaxSize = 7935UL;
+ pHTInfo->bAMSDU_Support = 0;
+
+ pHTInfo->bAMPDUEnable = 1;
+ pHTInfo->AMPDU_Factor = 2;
+ pHTInfo->MPDU_Density = 0;
+
+ pHTInfo->SelfMimoPs = 3;
+ if (pHTInfo->SelfMimoPs == 2)
+ pHTInfo->SelfMimoPs = 3;
+ ieee->bTxDisableRateFallBack = 0;
+ ieee->bTxUseDriverAssingedRate = 0;
+
+ ieee->bTxEnableFwCalcDur = 1;
+
+ pHTInfo->bRegRT2RTAggregation = 1;
+
+ pHTInfo->bRegRxReorderEnable = 1;
+ pHTInfo->RxReorderWinSize = 64;
+ pHTInfo->RxReorderPendingTime = 30;
+}
+
+void HTDebugHTCapability(u8 *CapIE, u8 *TitleString)
+{
+
+ static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
+ struct ht_capab_ele *pCapELE;
+
+ if (!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap))) {
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "EWC IE in %s()\n", __func__);
+ pCapELE = (struct ht_capab_ele *)(&CapIE[4]);
+ } else
+ pCapELE = (struct ht_capab_ele *)(&CapIE[0]);
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "<Log HT Capability>. Called by %s\n",
+ TitleString);
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupported Channel Width = %s\n",
+ (pCapELE->ChlWidth) ? "20MHz" : "20/40MHz");
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport Short GI for 20M = %s\n",
+ (pCapELE->ShortGI20Mhz) ? "YES" : "NO");
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport Short GI for 40M = %s\n",
+ (pCapELE->ShortGI40Mhz) ? "YES" : "NO");
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport TX STBC = %s\n",
+ (pCapELE->TxSTBC) ? "YES" : "NO");
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMax AMSDU Size = %s\n",
+ (pCapELE->MaxAMSDUSize) ? "3839" : "7935");
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSupport CCK in 20/40 mode = %s\n",
+ (pCapELE->DssCCk) ? "YES" : "NO");
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMax AMPDU Factor = %d\n",
+ pCapELE->MaxRxAMPDUFactor);
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMPDU Density = %d\n",
+ pCapELE->MPDUDensity);
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n",
+ pCapELE->MCS[0], pCapELE->MCS[1], pCapELE->MCS[2],
+ pCapELE->MCS[3], pCapELE->MCS[4]);
+ return;
+
+}
+
+void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
+{
+
+ static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34};
+ struct ht_info_ele *pHTInfoEle;
+
+ if (!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo))) {
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "EWC IE in %s()\n", __func__);
+ pHTInfoEle = (struct ht_info_ele *)(&InfoIE[4]);
+ } else
+ pHTInfoEle = (struct ht_info_ele *)(&InfoIE[0]);
+
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "<Log HT Information Element>. "
+ "Called by %s\n", TitleString);
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tPrimary channel = %d\n",
+ pHTInfoEle->ControlChl);
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tSenondary channel =");
+ switch (pHTInfoEle->ExtChlOffset) {
+ case 0:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "Not Present\n");
+ break;
+ case 1:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "Upper channel\n");
+ break;
+ case 2:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "Reserved. Eooro!!!\n");
+ break;
+ case 3:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "Lower Channel\n");
+ break;
+ }
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tRecommended channel width = %s\n",
+ (pHTInfoEle->RecommemdedTxWidth) ? "20Mhz" : "40Mhz");
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tOperation mode for protection = ");
+ switch (pHTInfoEle->OptMode) {
+ case 0:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "No Protection\n");
+ break;
+ case 1:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "HT non-member protection mode\n");
+ break;
+ case 2:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "Suggest to open protection\n");
+ break;
+ case 3:
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "HT mixed mode\n");
+ break;
+ }
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x]"
+ "[%x]\n", pHTInfoEle->BasicMSC[0], pHTInfoEle->BasicMSC[1],
+ pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3],
+ pHTInfoEle->BasicMSC[4]);
+ return;
+}
+
+static bool IsHTHalfNmode40Bandwidth(struct rtllib_device *ieee)
+{
+ bool retValue = false;
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ if (pHTInfo->bCurrentHTSupport == false)
+ retValue = false;
+ else if (pHTInfo->bRegBW40MHz == false)
+ retValue = false;
+ else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ retValue = false;
+ else if (((struct ht_capab_ele *)(pHTInfo->PeerHTCapBuf))->ChlWidth)
+ retValue = true;
+ else
+ retValue = false;
+
+ return retValue;
+}
+
+static bool IsHTHalfNmodeSGI(struct rtllib_device *ieee, bool is40MHz)
+{
+ bool retValue = false;
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ if (pHTInfo->bCurrentHTSupport == false)
+ retValue = false;
+ else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ retValue = false;
+ else if (is40MHz) {
+ if (((struct ht_capab_ele *)
+ (pHTInfo->PeerHTCapBuf))->ShortGI40Mhz)
+ retValue = true;
+ else
+ retValue = false;
+ } else {
+ if (((struct ht_capab_ele *)
+ (pHTInfo->PeerHTCapBuf))->ShortGI20Mhz)
+ retValue = true;
+ else
+ retValue = false;
+ }
+
+ return retValue;
+}
+
+u16 HTHalfMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
+{
+
+ u8 is40MHz;
+ u8 isShortGI;
+
+ is40MHz = (IsHTHalfNmode40Bandwidth(ieee)) ? 1 : 0;
+ isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz)) ? 1 : 0;
+
+ return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
+}
+
+
+u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ u8 is40MHz = (pHTInfo->bCurBW40MHz) ? 1 : 0;
+ u8 isShortGI = (pHTInfo->bCurBW40MHz) ?
+ ((pHTInfo->bCurShortGI40MHz) ? 1 : 0) :
+ ((pHTInfo->bCurShortGI20MHz) ? 1 : 0);
+ return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
+}
+
+u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate)
+{
+ u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
+ 0x24, 0x30, 0x48, 0x60, 0x6c};
+ u8 is40MHz = 0;
+ u8 isShortGI = 0;
+
+ if (nDataRate < 12) {
+ return CCKOFDMRate[nDataRate];
+ } else {
+ if (nDataRate >= 0x10 && nDataRate <= 0x1f) {
+ is40MHz = 0;
+ isShortGI = 0;
+ } else if (nDataRate >= 0x20 && nDataRate <= 0x2f) {
+ is40MHz = 1;
+ isShortGI = 0;
+
+ } else if (nDataRate >= 0x30 && nDataRate <= 0x3f) {
+ is40MHz = 0;
+ isShortGI = 1;
+ } else if (nDataRate >= 0x40 && nDataRate <= 0x4f) {
+ is40MHz = 1;
+ isShortGI = 1;
+ }
+ return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf];
+ }
+}
+
+bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
+{
+ bool retValue = false;
+ struct rtllib_network *net = &ieee->current_network;
+
+ if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
+ (net->ralink_cap_exist))
+ retValue = true;
+ else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
+ !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
+ !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ||
+ (net->broadcom_cap_exist))
+ retValue = true;
+ else if (net->bssht.bdRT2RTAggregation)
+ retValue = true;
+ else
+ retValue = false;
+
+ return retValue;
+}
+
+static void HTIOTPeerDetermine(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ struct rtllib_network *net = &ieee->current_network;
+ if (net->bssht.bdRT2RTAggregation) {
+ pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
+ if (net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_92SE)
+ pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK_92SE;
+ if (net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_SOFTAP)
+ pHTInfo->IOTPeer = HT_IOT_PEER_92U_SOFTAP;
+ } else if (net->broadcom_cap_exist)
+ pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
+ else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
+ !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
+ !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3))
+ pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
+ else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
+ net->ralink_cap_exist)
+ pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
+ else if ((net->atheros_cap_exist) ||
+ (memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) ||
+ (memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0))
+ pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
+ else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) ||
+ net->cisco_cap_exist)
+ pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
+ else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
+ net->marvell_cap_exist)
+ pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL;
+ else if (net->airgo_cap_exist)
+ pHTInfo->IOTPeer = HT_IOT_PEER_AIRGO;
+ else
+ pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
+
+ RTLLIB_DEBUG(RTLLIB_DL_IOT, "Joseph debug!! IOTPEER: %x\n",
+ pHTInfo->IOTPeer);
+}
+
+static u8 HTIOTActIsDisableMCS14(struct rtllib_device *ieee, u8 *PeerMacAddr)
+{
+ return 0;
+}
+
+
+static bool HTIOTActIsDisableMCS15(struct rtllib_device *ieee)
+{
+ bool retValue = false;
+
+ return retValue;
+}
+
+static bool HTIOTActIsDisableMCSTwoSpatialStream(struct rtllib_device *ieee)
+{
+ return false;
+}
+
+static u8 HTIOTActIsDisableEDCATurbo(struct rtllib_device *ieee, u8 *PeerMacAddr)
+{
+ return false;
+}
+
+static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee,
+ struct rtllib_network *network)
+{
+ u8 retValue = 0;
+
+
+ if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
+ retValue = 1;
+
+ return retValue;
+}
+
+static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee)
+{
+ u8 retValue = 0;
+
+ if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
+ retValue = 1;
+ return retValue;
+}
+
+static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ pHTInfo->IOTRaFunc &= HT_IOT_RAFUNC_DISABLE_ALL;
+
+ if (pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
+ pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_PEER_1R;
+
+ if (pHTInfo->IOTAction & HT_IOT_ACT_AMSDU_ENABLE)
+ pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_TX_AMSDU;
+
+}
+
+void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo)
+{
+ pHTInfo->IOTAction = 0;
+ pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
+ pHTInfo->IOTRaFunc = 0;
+}
+
+void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
+ u8 *len, u8 IsEncrypt, bool bAssoc)
+{
+ struct rt_hi_throughput *pHT = ieee->pHTInfo;
+ struct ht_capab_ele *pCapELE = NULL;
+
+ if ((posHTCap == NULL) || (pHT == NULL)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "posHTCap or pHTInfo can't be "
+ "null in HTConstructCapabilityElement()\n");
+ return;
+ }
+ memset(posHTCap, 0, *len);
+
+ if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) {
+ u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
+ memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
+ pCapELE = (struct ht_capab_ele *)&(posHTCap[4]);
+ *len = 30 + 2;
+ } else {
+ pCapELE = (struct ht_capab_ele *)posHTCap;
+ *len = 26 + 2;
+ }
+
+ pCapELE->AdvCoding = 0;
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ pCapELE->ChlWidth = 0;
+ else
+ pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
+
+ pCapELE->MimoPwrSave = pHT->SelfMimoPs;
+ pCapELE->GreenField = 0;
+ pCapELE->ShortGI20Mhz = 1;
+ pCapELE->ShortGI40Mhz = 1;
+
+ pCapELE->TxSTBC = 1;
+ pCapELE->RxSTBC = 0;
+ pCapELE->DelayBA = 0;
+ pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
+ pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
+ pCapELE->PSMP = 0;
+ pCapELE->LSigTxopProtect = 0;
+
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d "
+ "DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize,
+ pCapELE->DssCCk);
+
+ if (IsEncrypt) {
+ pCapELE->MPDUDensity = 7;
+ pCapELE->MaxRxAMPDUFactor = 2;
+ } else {
+ pCapELE->MaxRxAMPDUFactor = 3;
+ pCapELE->MPDUDensity = 0;
+ }
+
+ memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16);
+ memset(&pCapELE->ExtHTCapInfo, 0, 2);
+ memset(pCapELE->TxBFCap, 0, 4);
+
+ pCapELE->ASCap = 0;
+
+ if (bAssoc) {
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
+ pCapELE->MCS[1] &= 0x7f;
+
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
+ pCapELE->MCS[1] &= 0xbf;
+
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
+ pCapELE->MCS[1] &= 0x00;
+
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
+ pCapELE->ShortGI40Mhz = 0;
+
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
+ pCapELE->ChlWidth = 0;
+ pCapELE->MCS[1] = 0;
+ }
+ }
+ return;
+}
+
+void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
+ u8 *len, u8 IsEncrypt)
+{
+ struct rt_hi_throughput *pHT = ieee->pHTInfo;
+ struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
+ if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "posHTInfo or pHTInfoEle can't be "
+ "null in HTConstructInfoElement()\n");
+ return;
+ }
+
+ memset(posHTInfo, 0, *len);
+ if ((ieee->iw_mode == IW_MODE_ADHOC) ||
+ (ieee->iw_mode == IW_MODE_MASTER)) {
+ pHTInfoEle->ControlChl = ieee->current_network.channel;
+ pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false) ?
+ HT_EXTCHNL_OFFSET_NO_EXT :
+ (ieee->current_network.channel <= 6)
+ ? HT_EXTCHNL_OFFSET_UPPER :
+ HT_EXTCHNL_OFFSET_LOWER);
+ pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
+ pHTInfoEle->RIFS = 0;
+ pHTInfoEle->PSMPAccessOnly = 0;
+ pHTInfoEle->SrvIntGranularity = 0;
+ pHTInfoEle->OptMode = pHT->CurrentOpMode;
+ pHTInfoEle->NonGFDevPresent = 0;
+ pHTInfoEle->DualBeacon = 0;
+ pHTInfoEle->SecondaryBeacon = 0;
+ pHTInfoEle->LSigTxopProtectFull = 0;
+ pHTInfoEle->PcoActive = 0;
+ pHTInfoEle->PcoPhase = 0;
+
+ memset(pHTInfoEle->BasicMSC, 0, 16);
+
+
+ *len = 22 + 2;
+
+ } else {
+ *len = 0;
+ }
+ return;
+}
+
+void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
+ u8 *len)
+{
+ if (posRT2RTAgg == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "posRT2RTAgg can't be null in "
+ "HTConstructRT2RTAggElement()\n");
+ return;
+ }
+ memset(posRT2RTAgg, 0, *len);
+ *posRT2RTAgg++ = 0x00;
+ *posRT2RTAgg++ = 0xe0;
+ *posRT2RTAgg++ = 0x4c;
+ *posRT2RTAgg++ = 0x02;
+ *posRT2RTAgg++ = 0x01;
+
+ *posRT2RTAgg = 0x30;
+
+ if (ieee->bSupportRemoteWakeUp)
+ *posRT2RTAgg |= RT_HT_CAP_USE_WOW;
+
+ *len = 6 + 2;
+
+ return;
+}
+
+static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
+{
+ u8 i;
+ if (pOperateMCS == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "pOperateMCS can't be null"
+ " in HT_PickMCSRate()\n");
+ return false;
+ }
+
+ switch (ieee->mode) {
+ case IEEE_A:
+ case IEEE_B:
+ case IEEE_G:
+ for (i = 0; i <= 15; i++)
+ pOperateMCS[i] = 0;
+ break;
+ case IEEE_N_24G:
+ case IEEE_N_5G:
+ pOperateMCS[0] &= RATE_ADPT_1SS_MASK;
+ pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
+ pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
+ break;
+ default:
+ break;
+
+ }
+
+ return true;
+}
+
+u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+ u8 *pMCSFilter)
+{
+ u8 i, j;
+ u8 bitMap;
+ u8 mcsRate = 0;
+ u8 availableMcsRate[16];
+ if (pMCSRateSet == NULL || pMCSFilter == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "pMCSRateSet or pMCSFilter can't "
+ "be null in HTGetHighestMCSRate()\n");
+ return false;
+ }
+ for (i = 0; i < 16; i++)
+ availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
+
+ for (i = 0; i < 16; i++) {
+ if (availableMcsRate[i] != 0)
+ break;
+ }
+ if (i == 16)
+ return false;
+
+ for (i = 0; i < 16; i++) {
+ if (availableMcsRate[i] != 0) {
+ bitMap = availableMcsRate[i];
+ for (j = 0; j < 8; j++) {
+ if ((bitMap%2) != 0) {
+ if (HTMcsToDataRate(ieee, (8*i+j)) >
+ HTMcsToDataRate(ieee, mcsRate))
+ mcsRate = (8*i+j);
+ }
+ bitMap = bitMap>>1;
+ }
+ }
+ }
+ return mcsRate | 0x80;
+}
+
+u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS)
+{
+
+ u8 i;
+
+ for (i = 0; i <= 15; i++)
+ pOperateMCS[i] = ieee->Regdot11TxHTOperationalRateSet[i] &
+ pSupportMCS[i];
+
+ HT_PickMCSRate(ieee, pOperateMCS);
+
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ pOperateMCS[1] = 0;
+
+ for (i = 2; i <= 15; i++)
+ pOperateMCS[i] = 0;
+
+ return true;
+}
+
+void HTSetConnectBwMode(struct rtllib_device *ieee,
+ enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset);
+
+void HTOnAssocRsp(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ struct ht_capab_ele *pPeerHTCap = NULL;
+ struct ht_info_ele *pPeerHTInfo = NULL;
+ u16 nMaxAMSDUSize = 0;
+ u8 *pMcsFilter = NULL;
+
+ static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
+ static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34};
+
+ if (pHTInfo->bCurrentHTSupport == false) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "<=== HTOnAssocRsp(): "
+ "HT_DISABLE\n");
+ return;
+ }
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n");
+
+ if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
+ pPeerHTCap = (struct ht_capab_ele *)(&pHTInfo->PeerHTCapBuf[4]);
+ else
+ pPeerHTCap = (struct ht_capab_ele *)(pHTInfo->PeerHTCapBuf);
+
+ if (!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
+ pPeerHTInfo = (struct ht_info_ele *)
+ (&pHTInfo->PeerHTInfoBuf[4]);
+ else
+ pPeerHTInfo = (struct ht_info_ele *)(pHTInfo->PeerHTInfoBuf);
+
+ RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA | RTLLIB_DL_HT, pPeerHTCap,
+ sizeof(struct ht_capab_ele));
+ HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
+ (enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
+ pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
+ true : false);
+
+ pHTInfo->bCurShortGI20MHz = ((pHTInfo->bRegShortGI20MHz) ?
+ ((pPeerHTCap->ShortGI20Mhz == 1) ?
+ true : false) : false);
+ pHTInfo->bCurShortGI40MHz = ((pHTInfo->bRegShortGI40MHz) ?
+ ((pPeerHTCap->ShortGI40Mhz == 1) ?
+ true : false) : false);
+
+ pHTInfo->bCurSuppCCK = ((pHTInfo->bRegSuppCCK) ?
+ ((pPeerHTCap->DssCCk == 1) ? true :
+ false) : false);
+
+
+ pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
+
+ nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
+
+ if (pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize)
+ pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
+ else
+ pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
+
+ pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
+ if (ieee->rtllib_ap_sec_type &&
+ (ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP))) {
+ if ((pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) ||
+ (pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN))
+ pHTInfo->bCurrentAMPDUEnable = false;
+ }
+
+ if (!pHTInfo->bRegRT2RTAggregation) {
+ if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
+ pHTInfo->CurrentAMPDUFactor =
+ pPeerHTCap->MaxRxAMPDUFactor;
+ else
+ pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
+
+ } else {
+ if (ieee->current_network.bssht.bdRT2RTAggregation) {
+ if (ieee->pairwise_key_type != KEY_TYPE_NA)
+ pHTInfo->CurrentAMPDUFactor =
+ pPeerHTCap->MaxRxAMPDUFactor;
+ else
+ pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
+ } else {
+ if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
+ pHTInfo->CurrentAMPDUFactor =
+ pPeerHTCap->MaxRxAMPDUFactor;
+ else
+ pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
+ }
+ }
+ if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
+ pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
+ else
+ pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
+ if (pHTInfo->IOTAction & HT_IOT_ACT_TX_USE_AMSDU_8K) {
+ pHTInfo->bCurrentAMPDUEnable = false;
+ pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
+ pHTInfo->ForcedAMSDUMaxSize = 7935;
+ }
+ pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
+
+ if (pPeerHTCap->MCS[0] == 0)
+ pPeerHTCap->MCS[0] = 0xff;
+
+ HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0));
+
+ HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
+
+ pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
+ if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
+ pMcsFilter = MCS_FILTER_1SS;
+ else
+ pMcsFilter = MCS_FILTER_ALL;
+ ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
+ ieee->dot11HTOperationalRateSet, pMcsFilter);
+ ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
+
+ pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
+}
+
+void HTInitializeHTInfo(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "===========>%s()\n", __func__);
+ pHTInfo->bCurrentHTSupport = false;
+
+ pHTInfo->bCurBW40MHz = false;
+ pHTInfo->bCurTxBW40MHz = false;
+
+ pHTInfo->bCurShortGI20MHz = false;
+ pHTInfo->bCurShortGI40MHz = false;
+ pHTInfo->bForcedShortGI = false;
+
+ pHTInfo->bCurSuppCCK = true;
+
+ pHTInfo->bCurrent_AMSDU_Support = false;
+ pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
+ pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
+ pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
+
+ memset((void *)(&(pHTInfo->SelfHTCap)), 0,
+ sizeof(pHTInfo->SelfHTCap));
+ memset((void *)(&(pHTInfo->SelfHTInfo)), 0,
+ sizeof(pHTInfo->SelfHTInfo));
+ memset((void *)(&(pHTInfo->PeerHTCapBuf)), 0,
+ sizeof(pHTInfo->PeerHTCapBuf));
+ memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0,
+ sizeof(pHTInfo->PeerHTInfoBuf));
+
+ pHTInfo->bSwBwInProgress = false;
+ pHTInfo->ChnlOp = CHNLOP_NONE;
+
+ pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
+
+ pHTInfo->bCurrentRT2RTAggregation = false;
+ pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
+
+ pHTInfo->IOTPeer = 0;
+ pHTInfo->IOTAction = 0;
+ pHTInfo->IOTRaFunc = 0;
+
+ {
+ u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
+ RegHTSuppRateSets[0] = 0xFF;
+ RegHTSuppRateSets[1] = 0xFF;
+ RegHTSuppRateSets[4] = 0x01;
+ }
+}
+
+void HTInitializeBssDesc(struct bss_ht *pBssHT)
+{
+
+ pBssHT->bdSupportHT = false;
+ memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf));
+ pBssHT->bdHTCapLen = 0;
+ memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf));
+ pBssHT->bdHTInfoLen = 0;
+
+ pBssHT->bdHTSpecVer = HT_SPEC_VER_IEEE;
+
+ pBssHT->bdRT2RTAggregation = false;
+ pBssHT->bdRT2RTLongSlotTime = false;
+ pBssHT->RT2RT_HT_Mode = (enum rt_ht_capability)0;
+}
+
+void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ u8 bIOTAction = 0;
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "==============>%s()\n", __func__);
+ /* unmark bEnableHT flag here is the same reason why unmarked in
+ * function rtllib_softmac_new_net. WB 2008.09.10*/
+ if (pNetwork->bssht.bdSupportHT) {
+ pHTInfo->bCurrentHTSupport = true;
+ pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
+
+ if (pNetwork->bssht.bdHTCapLen > 0 &&
+ pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
+ memcpy(pHTInfo->PeerHTCapBuf,
+ pNetwork->bssht.bdHTCapBuf,
+ pNetwork->bssht.bdHTCapLen);
+
+ if (pNetwork->bssht.bdHTInfoLen > 0 &&
+ pNetwork->bssht.bdHTInfoLen <=
+ sizeof(pHTInfo->PeerHTInfoBuf))
+ memcpy(pHTInfo->PeerHTInfoBuf,
+ pNetwork->bssht.bdHTInfoBuf,
+ pNetwork->bssht.bdHTInfoLen);
+
+ if (pHTInfo->bRegRT2RTAggregation) {
+ pHTInfo->bCurrentRT2RTAggregation =
+ pNetwork->bssht.bdRT2RTAggregation;
+ pHTInfo->bCurrentRT2RTLongSlotTime =
+ pNetwork->bssht.bdRT2RTLongSlotTime;
+ pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.RT2RT_HT_Mode;
+ } else {
+ pHTInfo->bCurrentRT2RTAggregation = false;
+ pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
+ }
+
+ HTIOTPeerDetermine(ieee);
+
+ pHTInfo->IOTAction = 0;
+ bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
+ if (bIOTAction)
+ pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
+
+ bIOTAction = HTIOTActIsDisableMCS15(ieee);
+ if (bIOTAction)
+ pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
+
+ bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee);
+ if (bIOTAction)
+ pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
+
+
+ bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
+ if (bIOTAction)
+ pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
+
+ bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
+ if (bIOTAction)
+ pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
+ bIOTAction = HTIOTActIsCCDFsync(ieee);
+ if (bIOTAction)
+ pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
+ } else {
+ pHTInfo->bCurrentHTSupport = false;
+ pHTInfo->bCurrentRT2RTAggregation = false;
+ pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
+
+ pHTInfo->IOTAction = 0;
+ pHTInfo->IOTRaFunc = 0;
+ }
+}
+
+void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ struct ht_info_ele *pPeerHTInfo =
+ (struct ht_info_ele *)pNetwork->bssht.bdHTInfoBuf;
+
+ if (pHTInfo->bCurrentHTSupport) {
+ if (pNetwork->bssht.bdHTInfoLen != 0)
+ pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
+ }
+}
+
+void HTUseDefaultSetting(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ if (pHTInfo->bEnableHT) {
+ pHTInfo->bCurrentHTSupport = true;
+ pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK;
+
+ pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz;
+ pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz;
+
+ pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz;
+
+ if (ieee->iw_mode == IW_MODE_ADHOC)
+ ieee->current_network.qos_data.active =
+ ieee->current_network.qos_data.supported;
+ pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
+ pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
+
+ pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
+ pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
+
+ pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity;
+
+ HTFilterMCSRate(ieee, ieee->Regdot11TxHTOperationalRateSet,
+ ieee->dot11HTOperationalRateSet);
+ ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
+ ieee->dot11HTOperationalRateSet,
+ MCS_FILTER_ALL);
+ ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
+
+ } else {
+ pHTInfo->bCurrentHTSupport = false;
+ }
+ return;
+}
+
+u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
+{
+ if (ieee->pHTInfo->bCurrentHTSupport) {
+ if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "HT CONTROL FILED "
+ "EXIST!!\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ RTLLIB_DEBUG(RTLLIB_DL_HT, "======>%s()\n", __func__);
+ if (pHTInfo->bCurBW40MHz) {
+ if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
+ ieee->set_chan(ieee->dev,
+ ieee->current_network.channel + 2);
+ else if (pHTInfo->CurSTAExtChnlOffset ==
+ HT_EXTCHNL_OFFSET_LOWER)
+ ieee->set_chan(ieee->dev,
+ ieee->current_network.channel - 2);
+ else
+ ieee->set_chan(ieee->dev,
+ ieee->current_network.channel);
+
+ ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40,
+ pHTInfo->CurSTAExtChnlOffset);
+ } else {
+ ieee->set_chan(ieee->dev, ieee->current_network.channel);
+ ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
+ HT_EXTCHNL_OFFSET_NO_EXT);
+ }
+
+ pHTInfo->bSwBwInProgress = false;
+}
+
+void HTSetConnectBwMode(struct rtllib_device *ieee,
+ enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ if (pHTInfo->bRegBW40MHz == false)
+ return;
+
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ Bandwidth = HT_CHANNEL_WIDTH_20;
+
+ if (pHTInfo->bSwBwInProgress) {
+ printk(KERN_INFO "%s: bSwBwInProgress!!\n", __func__);
+ return;
+ }
+ if (Bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if (ieee->current_network.channel < 2 &&
+ Offset == HT_EXTCHNL_OFFSET_LOWER)
+ Offset = HT_EXTCHNL_OFFSET_NO_EXT;
+ if (Offset == HT_EXTCHNL_OFFSET_UPPER ||
+ Offset == HT_EXTCHNL_OFFSET_LOWER) {
+ pHTInfo->bCurBW40MHz = true;
+ pHTInfo->CurSTAExtChnlOffset = Offset;
+ } else {
+ pHTInfo->bCurBW40MHz = false;
+ pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
+ }
+ } else {
+ pHTInfo->bCurBW40MHz = false;
+ pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
+ }
+
+ printk(KERN_INFO "%s():pHTInfo->bCurBW40MHz:%x\n", __func__,
+ pHTInfo->bCurBW40MHz);
+
+ pHTInfo->bSwBwInProgress = true;
+
+ HTSetConnectBwModeCallback(ieee);
+}
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
new file mode 100644
index 00000000000..5ecd556f079
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -0,0 +1,444 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef __INC_QOS_TYPE_H
+#define __INC_QOS_TYPE_H
+
+#include "rtllib_endianfree.h"
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+union qos_tsinfo {
+ u8 charData[3];
+ struct {
+ u8 ucTrafficType:1;
+ u8 ucTSID:4;
+ u8 ucDirection:2;
+ u8 ucAccessPolicy:2;
+ u8 ucAggregation:1;
+ u8 ucPSB:1;
+ u8 ucUP:3;
+ u8 ucTSInfoAckPolicy:2;
+ u8 ucSchedule:1;
+ u8 ucReserved:7;
+ } field;
+};
+
+union tspec_body {
+ u8 charData[55];
+
+ struct {
+ union qos_tsinfo TSInfo;
+ u16 NominalMSDUsize;
+ u16 MaxMSDUsize;
+ u32 MinServiceItv;
+ u32 MaxServiceItv;
+ u32 InactivityItv;
+ u32 SuspenItv;
+ u32 ServiceStartTime;
+ u32 MinDataRate;
+ u32 MeanDataRate;
+ u32 PeakDataRate;
+ u32 MaxBurstSize;
+ u32 DelayBound;
+ u32 MinPhyRate;
+ u16 SurplusBandwidthAllowance;
+ u16 MediumTime;
+ } f;
+};
+
+struct wmm_tspec {
+ u8 ID;
+ u8 Length;
+ u8 OUI[3];
+ u8 OUI_Type;
+ u8 OUI_SubType;
+ u8 Version;
+ union tspec_body Body;
+};
+
+struct octet_string {
+ u8 *Octet;
+ u16 Length;
+};
+
+#define MAX_WMMELE_LENGTH 64
+
+#define QOS_MODE u32
+
+#define QOS_DISABLE 0
+#define QOS_WMM 1
+#define QOS_WMMSA 2
+#define QOS_EDCA 4
+#define QOS_HCCA 8
+#define QOS_WMM_UAPSD 16
+
+#define WMM_PARAM_ELE_BODY_LEN 18
+
+#define MAX_STA_TS_COUNT 16
+#define MAX_AP_TS_COUNT 32
+#define QOS_TSTREAM_KEY_SIZE 13
+
+#define WMM_ACTION_CATEGORY_CODE 17
+#define WMM_PARAM_ELE_BODY_LEN 18
+
+#define MAX_TSPEC_TSID 15
+#define SESSION_REJECT_TSID 0xfe
+#define DEFAULT_TSID 0xff
+
+#define ADDTS_TIME_SLOT 100
+
+#define ACM_TIMEOUT 1000
+#define SESSION_REJECT_TIMEOUT 60000
+
+enum ack_policy {
+ eAckPlc0_ACK = 0x00,
+ eAckPlc1_NoACK = 0x01,
+};
+
+
+#define SET_WMM_QOS_INFO_FIELD(_pStart, _val) \
+ WriteEF1Byte(_pStart, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_PARAMETERSET_COUNT(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 0, 4)
+#define SET_WMM_QOS_INFO_FIELD_PARAMETERSET_COUNT(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 0, 4, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_AP_UAPSD(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 7, 1)
+#define SET_WMM_QOS_INFO_FIELD_AP_UAPSD(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 7, 1, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_STA_AC_VO_UAPSD(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 0, 1)
+#define SET_WMM_QOS_INFO_FIELD_STA_AC_VO_UAPSD(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 0, 1, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_STA_AC_VI_UAPSD(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 1, 1)
+#define SET_WMM_QOS_INFO_FIELD_STA_AC_VI_UAPSD(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 1, 1, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_STA_AC_BE_UAPSD(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 2, 1)
+#define SET_WMM_QOS_INFO_FIELD_STA_AC_BE_UAPSD(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 2, 1, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_STA_AC_BK_UAPSD(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 3, 1)
+#define SET_WMM_QOS_INFO_FIELD_STA_AC_BK_UAPSD(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 3, 1, _val)
+
+#define GET_WMM_QOS_INFO_FIELD_STA_MAX_SP_LEN(_pStart) \
+ LE_BITS_TO_1BYTE(_pStart, 5, 2)
+#define SET_WMM_QOS_INFO_FIELD_STA_MAX_SP_LEN(_pStart, _val) \
+ SET_BITS_TO_LE_1BYTE(_pStart, 5, 2, _val)
+
+enum qos_ie_source {
+ QOSIE_SRC_ADDTSREQ,
+ QOSIE_SRC_ADDTSRSP,
+ QOSIE_SRC_REASOCREQ,
+ QOSIE_SRC_REASOCRSP,
+ QOSIE_SRC_DELTS,
+};
+
+
+#define AC_CODING u32
+
+#define AC0_BE 0
+#define AC1_BK 1
+#define AC2_VI 2
+#define AC3_VO 3
+#define AC_MAX 4
+
+
+#define AC_PARAM_SIZE 4
+
+#define GET_WMM_AC_PARAM_AIFSN(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 0, 4))
+#define SET_WMM_AC_PARAM_AIFSN(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 0, 4, _val)
+
+#define GET_WMM_AC_PARAM_ACM(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 4, 1))
+#define SET_WMM_AC_PARAM_ACM(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 4, 1, _val)
+
+#define GET_WMM_AC_PARAM_ACI(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 5, 2))
+#define SET_WMM_AC_PARAM_ACI(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 5, 2, _val)
+
+#define GET_WMM_AC_PARAM_ACI_AIFSN(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 0, 8))
+#define SET_WMM_AC_PARAM_ACI_AIFSN(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 0, 8, _val)
+
+#define GET_WMM_AC_PARAM_ECWMIN(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 8, 4))
+#define SET_WMM_AC_PARAM_ECWMIN(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 8, 4, _val)
+
+#define GET_WMM_AC_PARAM_ECWMAX(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 12, 4))
+#define SET_WMM_AC_PARAM_ECWMAX(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 12, 4, _val)
+
+#define GET_WMM_AC_PARAM_TXOP_LIMIT(_pStart) \
+ ((u8)LE_BITS_TO_4BYTE(_pStart, 16, 16))
+#define SET_WMM_AC_PARAM_TXOP_LIMIT(_pStart, _val) \
+ SET_BITS_TO_LE_4BYTE(_pStart, 16, 16, _val)
+
+
+
+#define WMM_PARAM_ELEMENT_SIZE (8+(4*AC_PARAM_SIZE))
+
+enum qos_ele_subtype {
+ QOSELE_TYPE_INFO = 0x00,
+ QOSELE_TYPE_PARAM = 0x01,
+};
+
+
+enum direction_value {
+ DIR_UP = 0,
+ DIR_DOWN = 1,
+ DIR_DIRECT = 2,
+ DIR_BI_DIR = 3,
+};
+
+enum acm_method {
+ eAcmWay0_SwAndHw = 0,
+ eAcmWay1_HW = 1,
+ eAcmWay2_SW = 2,
+};
+
+
+struct acm {
+ u64 UsedTime;
+ u64 MediumTime;
+ u8 HwAcmCtl;
+};
+
+
+
+#define AC_UAPSD u8
+
+#define GET_VO_UAPSD(_apsd) ((_apsd) & BIT0)
+#define SET_VO_UAPSD(_apsd) ((_apsd) |= BIT0)
+
+#define GET_VI_UAPSD(_apsd) ((_apsd) & BIT1)
+#define SET_VI_UAPSD(_apsd) ((_apsd) |= BIT1)
+
+#define GET_BK_UAPSD(_apsd) ((_apsd) & BIT2)
+#define SET_BK_UAPSD(_apsd) ((_apsd) |= BIT2)
+
+#define GET_BE_UAPSD(_apsd) ((_apsd) & BIT3)
+#define SET_BE_UAPSD(_apsd) ((_apsd) |= BIT3)
+
+union qos_tclas {
+
+ struct _TYPE_GENERAL {
+ u8 Priority;
+ u8 ClassifierType;
+ u8 Mask;
+ } TYPE_GENERAL;
+
+ struct _TYPE0_ETH {
+ u8 Priority;
+ u8 ClassifierType;
+ u8 Mask;
+ u8 SrcAddr[6];
+ u8 DstAddr[6];
+ u16 Type;
+ } TYPE0_ETH;
+
+ struct _TYPE1_IPV4 {
+ u8 Priority;
+ u8 ClassifierType;
+ u8 Mask;
+ u8 Version;
+ u8 SrcIP[4];
+ u8 DstIP[4];
+ u16 SrcPort;
+ u16 DstPort;
+ u8 DSCP;
+ u8 Protocol;
+ u8 Reserved;
+ } TYPE1_IPV4;
+
+ struct _TYPE1_IPV6 {
+ u8 Priority;
+ u8 ClassifierType;
+ u8 Mask;
+ u8 Version;
+ u8 SrcIP[16];
+ u8 DstIP[16];
+ u16 SrcPort;
+ u16 DstPort;
+ u8 FlowLabel[3];
+ } TYPE1_IPV6;
+
+ struct _TYPE2_8021Q {
+ u8 Priority;
+ u8 ClassifierType;
+ u8 Mask;
+ u16 TagType;
+ } TYPE2_8021Q;
+};
+
+struct qos_tstream {
+
+ bool bUsed;
+ u16 MsduLifetime;
+ bool bEstablishing;
+ u8 TimeSlotCount;
+ u8 DialogToken;
+ struct wmm_tspec TSpec;
+ struct wmm_tspec OutStandingTSpec;
+ u8 NominalPhyRate;
+};
+
+struct sta_qos {
+ u8 WMMIEBuf[MAX_WMMELE_LENGTH];
+ u8 *WMMIE;
+
+ QOS_MODE QosCapability;
+ QOS_MODE CurrentQosMode;
+
+ AC_UAPSD b4ac_Uapsd;
+ AC_UAPSD Curr4acUapsd;
+ u8 bInServicePeriod;
+ u8 MaxSPLength;
+ int NumBcnBeforeTrigger;
+
+ u8 *pWMMInfoEle;
+ u8 WMMParamEle[WMM_PARAM_ELEMENT_SIZE];
+
+ struct acm acm[4];
+ enum acm_method AcmMethod;
+
+ struct qos_tstream StaTsArray[MAX_STA_TS_COUNT];
+ u8 DialogToken;
+ struct wmm_tspec TSpec;
+
+ u8 QBssWirelessMode;
+
+ bool bNoAck;
+
+ bool bEnableRxImmBA;
+
+};
+
+#define QBSS_LOAD_SIZE 5
+#define GET_QBSS_LOAD_STA_COUNT(__pStart) \
+ ReadEF2Byte(__pStart)
+#define SET_QBSS_LOAD_STA_COUNT(__pStart, __Value) \
+ WriteEF2Byte(__pStart, __Value)
+#define GET_QBSS_LOAD_CHNL_UTILIZATION(__pStart) \
+ ReadEF1Byte((u8 *)(__pStart) + 2)
+#define SET_QBSS_LOAD_CHNL_UTILIZATION(__pStart, __Value) \
+ WriteEF1Byte((u8 *)(__pStart) + 2, __Value)
+#define GET_QBSS_LOAD_AVAILABLE_CAPACITY(__pStart) \
+ ReadEF2Byte((u8 *)(__pStart) + 3)
+#define SET_QBSS_LOAD_AVAILABLE_CAPACITY(__pStart, __Value) \
+ WriteEF2Byte((u8 *)(__pStart) + 3, __Value)
+
+struct bss_qos {
+ QOS_MODE bdQoSMode;
+ u8 bdWMMIEBuf[MAX_WMMELE_LENGTH];
+ struct octet_string bdWMMIE;
+
+ enum qos_ele_subtype EleSubType;
+
+ u8 *pWMMInfoEle;
+ u8 *pWMMParamEle;
+
+ u8 QBssLoad[QBSS_LOAD_SIZE];
+ bool bQBssLoadValid;
+};
+
+#define sQoSCtlLng 2
+#define QOS_CTRL_LEN(_QosMode) ((_QosMode > QOS_DISABLE) ? sQoSCtlLng : 0)
+
+
+#define IsACValid(ac) ((ac >= 0 && ac <= 7) ? true : false)
+
+
+union aci_aifsn {
+ u8 charData;
+
+ struct {
+ u8 AIFSN:4;
+ u8 acm:1;
+ u8 ACI:2;
+ u8 Reserved:1;
+ } f;
+};
+
+union ecw {
+ u8 charData;
+ struct {
+ u8 ECWmin:4;
+ u8 ECWmax:4;
+ } f;
+};
+
+union ac_param {
+ u32 longData;
+ u8 charData[4];
+
+ struct {
+ union aci_aifsn AciAifsn;
+ union ecw Ecw;
+ u16 TXOPLimit;
+ } f;
+};
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
new file mode 100644
index 00000000000..8601b1ad217
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _TSTYPE_H_
+#define _TSTYPE_H_
+#include "rtl819x_Qos.h"
+#define TS_SETUP_TIMEOUT 60
+#define TS_INACT_TIMEOUT 60
+#define TS_ADDBA_DELAY 60
+
+#define TOTAL_TS_NUM 16
+#define TCLAS_NUM 4
+
+enum tr_select {
+ TX_DIR = 0,
+ RX_DIR = 1,
+};
+
+struct ts_common_info {
+ struct list_head List;
+ struct timer_list SetupTimer;
+ struct timer_list InactTimer;
+ u8 Addr[6];
+ union tspec_body TSpec;
+ union qos_tclas TClass[TCLAS_NUM];
+ u8 TClasProc;
+ u8 TClasNum;
+};
+
+struct tx_ts_record {
+ struct ts_common_info TsCommonInfo;
+ u16 TxCurSeq;
+ struct ba_record TxPendingBARecord;
+ struct ba_record TxAdmittedBARecord;
+ u8 bAddBaReqInProgress;
+ u8 bAddBaReqDelayed;
+ u8 bUsingBa;
+ u8 bDisable_AddBa;
+ struct timer_list TsAddBaTimer;
+ u8 num;
+};
+
+struct rx_ts_record {
+ struct ts_common_info TsCommonInfo;
+ u16 RxIndicateSeq;
+ u16 RxTimeoutIndicateSeq;
+ struct list_head RxPendingPktList;
+ struct timer_list RxPktPendingTimer;
+ struct ba_record RxAdmittedBARecord;
+ u16 RxLastSeqNum;
+ u8 RxLastFragNum;
+ u8 num;
+};
+
+void _setup_timer(struct timer_list *, void *, unsigned long);
+
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
new file mode 100644
index 00000000000..09a602f7432
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -0,0 +1,548 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtllib.h"
+#include <linux/etherdevice.h>
+#include "rtl819x_TS.h"
+
+static void TsSetupTimeOut(unsigned long data)
+{
+}
+
+static void TsInactTimeout(unsigned long data)
+{
+}
+
+static void RxPktPendingTimeout(unsigned long data)
+{
+ struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
+ struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
+ RxTsRecord[pRxTs->num]);
+
+ struct rx_reorder_entry *pReorderEntry = NULL;
+
+ unsigned long flags = 0;
+ u8 index = 0;
+ bool bPktInBuf = false;
+
+ spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
+ if (pRxTs->RxTimeoutIndicateSeq != 0xffff) {
+ while (!list_empty(&pRxTs->RxPendingPktList)) {
+ pReorderEntry = (struct rx_reorder_entry *)
+ list_entry(pRxTs->RxPendingPktList.prev,
+ struct rx_reorder_entry, List);
+ if (index == 0)
+ pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;
+
+ if (SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)) {
+ list_del_init(&pReorderEntry->List);
+
+ if (SN_EQUAL(pReorderEntry->SeqNum,
+ pRxTs->RxIndicateSeq))
+ pRxTs->RxIndicateSeq =
+ (pRxTs->RxIndicateSeq + 1) % 4096;
+
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate"
+ " SeqNum: %d\n", __func__,
+ pReorderEntry->SeqNum);
+ ieee->stats_IndicateArray[index] =
+ pReorderEntry->prxb;
+ index++;
+
+ list_add_tail(&pReorderEntry->List,
+ &ieee->RxReorder_Unused_List);
+ } else {
+ bPktInBuf = true;
+ break;
+ }
+ }
+ }
+
+ if (index > 0) {
+ pRxTs->RxTimeoutIndicateSeq = 0xffff;
+
+ if (index > REORDER_WIN_SIZE) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
+ " Rx Reorer struct buffer full!!\n");
+ spin_unlock_irqrestore(&(ieee->reorder_spinlock),
+ flags);
+ return;
+ }
+ rtllib_indicate_packets(ieee, ieee->stats_IndicateArray, index);
+ bPktInBuf = false;
+ }
+
+ if (bPktInBuf && (pRxTs->RxTimeoutIndicateSeq == 0xffff)) {
+ pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
+ mod_timer(&pRxTs->RxPktPendingTimer, jiffies +
+ MSECS(ieee->pHTInfo->RxReorderPendingTime));
+ }
+ spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
+}
+
+static void TsAddBaProcess(unsigned long data)
+{
+ struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ u8 num = pTxTs->num;
+ struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
+ TxTsRecord[num]);
+
+ TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
+ RTLLIB_DEBUG(RTLLIB_DL_BA, "TsAddBaProcess(): ADDBA Req is "
+ "started!!\n");
+}
+
+static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
+{
+ memset(pTsCommonInfo->Addr, 0, 6);
+ memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
+ memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas)*TCLAS_NUM);
+ pTsCommonInfo->TClasProc = 0;
+ pTsCommonInfo->TClasNum = 0;
+}
+
+static void ResetTxTsEntry(struct tx_ts_record *pTS)
+{
+ ResetTsCommonInfo(&pTS->TsCommonInfo);
+ pTS->TxCurSeq = 0;
+ pTS->bAddBaReqInProgress = false;
+ pTS->bAddBaReqDelayed = false;
+ pTS->bUsingBa = false;
+ pTS->bDisable_AddBa = false;
+ ResetBaEntry(&pTS->TxAdmittedBARecord);
+ ResetBaEntry(&pTS->TxPendingBARecord);
+}
+
+static void ResetRxTsEntry(struct rx_ts_record *pTS)
+{
+ ResetTsCommonInfo(&pTS->TsCommonInfo);
+ pTS->RxIndicateSeq = 0xffff;
+ pTS->RxTimeoutIndicateSeq = 0xffff;
+ ResetBaEntry(&pTS->RxAdmittedBARecord);
+}
+
+void TSInitialize(struct rtllib_device *ieee)
+{
+ struct tx_ts_record *pTxTS = ieee->TxTsRecord;
+ struct rx_ts_record *pRxTS = ieee->RxTsRecord;
+ struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
+ u8 count = 0;
+ RTLLIB_DEBUG(RTLLIB_DL_TS, "==========>%s()\n", __func__);
+ INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List);
+ INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
+ INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
+
+ for (count = 0; count < TOTAL_TS_NUM; count++) {
+ pTxTS->num = count;
+ _setup_timer(&pTxTS->TsCommonInfo.SetupTimer,
+ TsSetupTimeOut,
+ (unsigned long) pTxTS);
+
+ _setup_timer(&pTxTS->TsCommonInfo.InactTimer,
+ TsInactTimeout,
+ (unsigned long) pTxTS);
+
+ _setup_timer(&pTxTS->TsAddBaTimer,
+ TsAddBaProcess,
+ (unsigned long) pTxTS);
+
+ _setup_timer(&pTxTS->TxPendingBARecord.Timer,
+ BaSetupTimeOut,
+ (unsigned long) pTxTS);
+ _setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
+ TxBaInactTimeout,
+ (unsigned long) pTxTS);
+
+ ResetTxTsEntry(pTxTS);
+ list_add_tail(&pTxTS->TsCommonInfo.List,
+ &ieee->Tx_TS_Unused_List);
+ pTxTS++;
+ }
+
+ INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
+ INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
+ INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
+ for (count = 0; count < TOTAL_TS_NUM; count++) {
+ pRxTS->num = count;
+ INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
+
+ _setup_timer(&pRxTS->TsCommonInfo.SetupTimer,
+ TsSetupTimeOut,
+ (unsigned long) pRxTS);
+
+ _setup_timer(&pRxTS->TsCommonInfo.InactTimer,
+ TsInactTimeout,
+ (unsigned long) pRxTS);
+
+ _setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
+ RxBaInactTimeout,
+ (unsigned long) pRxTS);
+
+ _setup_timer(&pRxTS->RxPktPendingTimer,
+ RxPktPendingTimeout,
+ (unsigned long) pRxTS);
+
+ ResetRxTsEntry(pRxTS);
+ list_add_tail(&pRxTS->TsCommonInfo.List,
+ &ieee->Rx_TS_Unused_List);
+ pRxTS++;
+ }
+ INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
+ for (count = 0; count < REORDER_ENTRY_NUM; count++) {
+ list_add_tail(&pRxReorderEntry->List,
+ &ieee->RxReorder_Unused_List);
+ if (count == (REORDER_ENTRY_NUM-1))
+ break;
+ pRxReorderEntry = &ieee->RxReorderEntry[count+1];
+ }
+
+}
+
+static void AdmitTS(struct rtllib_device *ieee,
+ struct ts_common_info *pTsCommonInfo, u32 InactTime)
+{
+ del_timer_sync(&pTsCommonInfo->SetupTimer);
+ del_timer_sync(&pTsCommonInfo->InactTimer);
+
+ if (InactTime != 0)
+ mod_timer(&pTsCommonInfo->InactTimer, jiffies +
+ MSECS(InactTime));
+}
+
+static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
+ u8 *Addr, u8 TID,
+ enum tr_select TxRxSelect)
+{
+ u8 dir;
+ bool search_dir[4] = {0};
+ struct list_head *psearch_list;
+ struct ts_common_info *pRet = NULL;
+ if (ieee->iw_mode == IW_MODE_MASTER) {
+ if (TxRxSelect == TX_DIR) {
+ search_dir[DIR_DOWN] = true;
+ search_dir[DIR_BI_DIR] = true;
+ } else {
+ search_dir[DIR_UP] = true;
+ search_dir[DIR_BI_DIR] = true;
+ }
+ } else if (ieee->iw_mode == IW_MODE_ADHOC) {
+ if (TxRxSelect == TX_DIR)
+ search_dir[DIR_UP] = true;
+ else
+ search_dir[DIR_DOWN] = true;
+ } else {
+ if (TxRxSelect == TX_DIR) {
+ search_dir[DIR_UP] = true;
+ search_dir[DIR_BI_DIR] = true;
+ search_dir[DIR_DIRECT] = true;
+ } else {
+ search_dir[DIR_DOWN] = true;
+ search_dir[DIR_BI_DIR] = true;
+ search_dir[DIR_DIRECT] = true;
+ }
+ }
+
+ if (TxRxSelect == TX_DIR)
+ psearch_list = &ieee->Tx_TS_Admit_List;
+ else
+ psearch_list = &ieee->Rx_TS_Admit_List;
+
+ for (dir = 0; dir <= DIR_BI_DIR; dir++) {
+ if (search_dir[dir] == false)
+ continue;
+ list_for_each_entry(pRet, psearch_list, List) {
+ if (memcmp(pRet->Addr, Addr, 6) == 0)
+ if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
+ if (pRet->TSpec.f.TSInfo.field.ucDirection == dir)
+ break;
+
+ }
+ if (&pRet->List != psearch_list)
+ break;
+ }
+
+ if (pRet && &pRet->List != psearch_list)
+ return pRet ;
+ else
+ return NULL;
+}
+
+static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
+ union tspec_body *pTSPEC, union qos_tclas *pTCLAS,
+ u8 TCLAS_Num, u8 TCLAS_Proc)
+{
+ u8 count;
+
+ if (pTsCommonInfo == NULL)
+ return;
+
+ memcpy(pTsCommonInfo->Addr, Addr, 6);
+
+ if (pTSPEC != NULL)
+ memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
+ sizeof(union tspec_body));
+
+ for (count = 0; count < TCLAS_Num; count++)
+ memcpy((u8 *)(&(pTsCommonInfo->TClass[count])),
+ (u8 *)pTCLAS, sizeof(union qos_tclas));
+
+ pTsCommonInfo->TClasProc = TCLAS_Proc;
+ pTsCommonInfo->TClasNum = TCLAS_Num;
+}
+
+bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
+ u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
+{
+ u8 UP = 0;
+ if (is_broadcast_ether_addr(Addr) || is_multicast_ether_addr(Addr)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR! get TS for Broadcast or "
+ "Multicast\n");
+ return false;
+ }
+ if (ieee->current_network.qos_data.supported == 0) {
+ UP = 0;
+ } else {
+ if (!IsACValid(TID)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR! in %s(), TID(%d) is "
+ "not valid\n", __func__, TID);
+ return false;
+ }
+
+ switch (TID) {
+ case 0:
+ case 3:
+ UP = 0;
+ break;
+ case 1:
+ case 2:
+ UP = 2;
+ break;
+ case 4:
+ case 5:
+ UP = 5;
+ break;
+ case 6:
+ case 7:
+ UP = 7;
+ break;
+ }
+ }
+
+ *ppTS = SearchAdmitTRStream(ieee, Addr, UP, TxRxSelect);
+ if (*ppTS != NULL) {
+ return true;
+ } else {
+ if (bAddNewTs == false) {
+ RTLLIB_DEBUG(RTLLIB_DL_TS, "add new TS failed"
+ "(tid:%d)\n", UP);
+ return false;
+ } else {
+ union tspec_body TSpec;
+ union qos_tsinfo *pTSInfo = &TSpec.f.TSInfo;
+ struct list_head *pUnusedList =
+ (TxRxSelect == TX_DIR) ?
+ (&ieee->Tx_TS_Unused_List) :
+ (&ieee->Rx_TS_Unused_List);
+
+ struct list_head *pAddmitList =
+ (TxRxSelect == TX_DIR) ?
+ (&ieee->Tx_TS_Admit_List) :
+ (&ieee->Rx_TS_Admit_List);
+
+ enum direction_value Dir =
+ (ieee->iw_mode == IW_MODE_MASTER) ?
+ ((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
+ ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
+ RTLLIB_DEBUG(RTLLIB_DL_TS, "to add Ts\n");
+ if (!list_empty(pUnusedList)) {
+ (*ppTS) = list_entry(pUnusedList->next,
+ struct ts_common_info, List);
+ list_del_init(&(*ppTS)->List);
+ if (TxRxSelect == TX_DIR) {
+ struct tx_ts_record *tmp =
+ container_of(*ppTS,
+ struct tx_ts_record,
+ TsCommonInfo);
+ ResetTxTsEntry(tmp);
+ } else {
+ struct rx_ts_record *tmp =
+ container_of(*ppTS,
+ struct rx_ts_record,
+ TsCommonInfo);
+ ResetRxTsEntry(tmp);
+ }
+
+ RTLLIB_DEBUG(RTLLIB_DL_TS, "to init current TS"
+ ", UP:%d, Dir:%d, addr: %pM"
+ " ppTs=%p\n", UP, Dir,
+ Addr, *ppTS);
+ pTSInfo->field.ucTrafficType = 0;
+ pTSInfo->field.ucTSID = UP;
+ pTSInfo->field.ucDirection = Dir;
+ pTSInfo->field.ucAccessPolicy = 1;
+ pTSInfo->field.ucAggregation = 0;
+ pTSInfo->field.ucPSB = 0;
+ pTSInfo->field.ucUP = UP;
+ pTSInfo->field.ucTSInfoAckPolicy = 0;
+ pTSInfo->field.ucSchedule = 0;
+
+ MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
+ AdmitTS(ieee, *ppTS, 0);
+ list_add_tail(&((*ppTS)->List), pAddmitList);
+
+ return true;
+ } else {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!in function "
+ "%s() There is not enough dir=%d"
+ "(0=up down=1) TS record to be "
+ "used!!", __func__, Dir);
+ return false;
+ }
+ }
+ }
+}
+
+static void RemoveTsEntry(struct rtllib_device *ieee, struct ts_common_info *pTs,
+ enum tr_select TxRxSelect)
+{
+ del_timer_sync(&pTs->SetupTimer);
+ del_timer_sync(&pTs->InactTimer);
+ TsInitDelBA(ieee, pTs, TxRxSelect);
+
+ if (TxRxSelect == RX_DIR) {
+ struct rx_reorder_entry *pRxReorderEntry;
+ struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
+
+ if (timer_pending(&pRxTS->RxPktPendingTimer))
+ del_timer_sync(&pRxTS->RxPktPendingTimer);
+
+ while (!list_empty(&pRxTS->RxPendingPktList)) {
+ pRxReorderEntry = (struct rx_reorder_entry *)
+ list_entry(pRxTS->RxPendingPktList.prev,
+ struct rx_reorder_entry, List);
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Delete SeqNum "
+ "%d!\n", __func__,
+ pRxReorderEntry->SeqNum);
+ list_del_init(&pRxReorderEntry->List);
+ {
+ int i = 0;
+ struct rtllib_rxb *prxb = pRxReorderEntry->prxb;
+ if (unlikely(!prxb))
+ return;
+ for (i = 0; i < prxb->nr_subframes; i++)
+ dev_kfree_skb(prxb->subframes[i]);
+ kfree(prxb);
+ prxb = NULL;
+ }
+ list_add_tail(&pRxReorderEntry->List,
+ &ieee->RxReorder_Unused_List);
+ }
+ } else {
+ struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
+ del_timer_sync(&pTxTS->TsAddBaTimer);
+ }
+}
+
+void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
+{
+ struct ts_common_info *pTS, *pTmpTS;
+ printk(KERN_INFO "===========>RemovePeerTS, %pM\n", Addr);
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ RemoveTsEntry(ieee, pTS, TX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ }
+ }
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ printk(KERN_INFO "====>remove Tx_TS_admin_list\n");
+ RemoveTsEntry(ieee, pTS, TX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ }
+ }
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ RemoveTsEntry(ieee, pTS, RX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ }
+ }
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ RemoveTsEntry(ieee, pTS, RX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ }
+ }
+}
+
+void RemoveAllTS(struct rtllib_device *ieee)
+{
+ struct ts_common_info *pTS, *pTmpTS;
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ RemoveTsEntry(ieee, pTS, TX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ }
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ RemoveTsEntry(ieee, pTS, TX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ }
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ RemoveTsEntry(ieee, pTS, RX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ }
+
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ RemoveTsEntry(ieee, pTS, RX_DIR);
+ list_del_init(&pTS->List);
+ list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ }
+}
+
+void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS)
+{
+ if (pTxTS->bAddBaReqInProgress == false) {
+ pTxTS->bAddBaReqInProgress = true;
+
+ if (pTxTS->bAddBaReqDelayed) {
+ RTLLIB_DEBUG(RTLLIB_DL_BA, "TsStartAddBaProcess(): "
+ "Delayed Start ADDBA after 60 sec!!\n");
+ mod_timer(&pTxTS->TsAddBaTimer, jiffies +
+ MSECS(TS_ADDBA_DELAY));
+ } else {
+ RTLLIB_DEBUG(RTLLIB_DL_BA, "TsStartAddBaProcess(): "
+ "Immediately Start ADDBA now!!\n");
+ mod_timer(&pTxTS->TsAddBaTimer, jiffies+10);
+ }
+ } else
+ RTLLIB_DEBUG(RTLLIB_DL_BA, "%s()==>BA timer is already added\n",
+ __func__);
+}
diff --git a/drivers/staging/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl_cam.c
new file mode 100644
index 00000000000..baf3b6342e4
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_cam.c
@@ -0,0 +1,304 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtl_core.h"
+#include "r8192E_phy.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
+#include "r8192E_cmdpkt.h"
+
+extern int hwwep;
+void CamResetAllEntry(struct net_device *dev)
+{
+ u32 ulcommand = 0;
+
+ ulcommand |= BIT31|BIT30;
+ write_nic_dword(dev, RWCAM, ulcommand);
+}
+
+void write_cam(struct net_device *dev, u8 addr, u32 data)
+{
+ write_nic_dword(dev, WCAMI, data);
+ write_nic_dword(dev, RWCAM, BIT31|BIT16|(addr&0xff));
+}
+
+u32 read_cam(struct net_device *dev, u8 addr)
+{
+ write_nic_dword(dev, RWCAM, 0x80000000|(addr&0xff));
+ return read_nic_dword(dev, 0xa8);
+}
+
+void EnableHWSecurityConfig8192(struct net_device *dev)
+{
+ u8 SECR_value = 0x0;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
+ if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) ||
+ (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) &&
+ (priv->rtllib->auth_mode != 2)) {
+ SECR_value |= SCR_RxUseDK;
+ SECR_value |= SCR_TxUseDK;
+ } else if ((ieee->iw_mode == IW_MODE_ADHOC) &&
+ (ieee->pairwise_key_type & (KEY_TYPE_CCMP |
+ KEY_TYPE_TKIP))) {
+ SECR_value |= SCR_RxUseDK;
+ SECR_value |= SCR_TxUseDK;
+ }
+
+
+ ieee->hwsec_active = 1;
+ if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
+ ieee->hwsec_active = 0;
+ SECR_value &= ~SCR_RxDecEnable;
+ }
+
+ RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n",
+ __func__, ieee->hwsec_active, ieee->pairwise_key_type,
+ SECR_value);
+ write_nic_byte(dev, SECR, SECR_value);
+}
+
+void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
+ u8 *MacAddr, u8 DefaultKey, u32 *KeyContent, u8 is_mesh)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ RT_TRACE(COMP_DBG, "===========>%s():EntryNo is %d,KeyIndex is "
+ "%d,KeyType is %d,is_mesh is %d\n", __func__, EntryNo,
+ KeyIndex, KeyType, is_mesh);
+ if (!is_mesh) {
+ ieee->swcamtable[EntryNo].bused = true;
+ ieee->swcamtable[EntryNo].key_index = KeyIndex;
+ ieee->swcamtable[EntryNo].key_type = KeyType;
+ memcpy(ieee->swcamtable[EntryNo].macaddr, MacAddr, 6);
+ ieee->swcamtable[EntryNo].useDK = DefaultKey;
+ memcpy(ieee->swcamtable[EntryNo].key_buf, (u8 *)KeyContent, 16);
+ }
+}
+
+void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
+ u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
+{
+ u32 TargetCommand = 0;
+ u32 TargetContent = 0;
+ u16 usConfig = 0;
+ u8 i;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ enum rt_rf_power_state rtState;
+ rtState = priv->rtllib->eRFPowerState;
+ if (priv->rtllib->PowerSaveControl.bInactivePs) {
+ if (rtState == eRfOff) {
+ if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) {
+ RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",
+ __func__);
+ return ;
+ } else {
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+ }
+ }
+ }
+ priv->rtllib->is_set_key = true;
+ if (EntryNo >= TOTAL_CAM_ENTRY)
+ RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
+
+ RT_TRACE(COMP_SEC, "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d,"
+ "KeyType:%d, MacAddr %pM\n", dev, EntryNo, KeyIndex,
+ KeyType, MacAddr);
+
+ if (DefaultKey)
+ usConfig |= BIT15 | (KeyType<<2);
+ else
+ usConfig |= BIT15 | (KeyType<<2) | KeyIndex;
+
+
+ for (i = 0; i < CAM_CONTENT_COUNT; i++) {
+ TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
+ TargetCommand |= BIT31|BIT16;
+
+ if (i == 0) {
+ TargetContent = (u32)(*(MacAddr+0)) << 16 |
+ (u32)(*(MacAddr+1)) << 24 |
+ (u32)usConfig;
+
+ write_nic_dword(dev, WCAMI, TargetContent);
+ write_nic_dword(dev, RWCAM, TargetCommand);
+ } else if (i == 1) {
+ TargetContent = (u32)(*(MacAddr+2)) |
+ (u32)(*(MacAddr+3)) << 8 |
+ (u32)(*(MacAddr+4)) << 16 |
+ (u32)(*(MacAddr+5)) << 24;
+ write_nic_dword(dev, WCAMI, TargetContent);
+ write_nic_dword(dev, RWCAM, TargetCommand);
+ } else {
+ if (KeyContent != NULL) {
+ write_nic_dword(dev, WCAMI,
+ (u32)(*(KeyContent+i-2)));
+ write_nic_dword(dev, RWCAM, TargetCommand);
+ udelay(100);
+ }
+ }
+ }
+ RT_TRACE(COMP_SEC, "=========>after set key, usconfig:%x\n", usConfig);
+}
+
+void CAM_read_entry(struct net_device *dev, u32 iIndex)
+{
+ u32 target_command = 0;
+ u32 target_content = 0;
+ u8 entry_i = 0;
+ u32 ulStatus;
+ s32 i = 100;
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i+CAM_CONTENT_COUNT*iIndex;
+ target_command = target_command | BIT31;
+
+ while ((i--) >= 0) {
+ ulStatus = read_nic_dword(dev, RWCAM);
+ if (ulStatus & BIT31)
+ continue;
+ else
+ break;
+ }
+ write_nic_dword(dev, RWCAM, target_command);
+ RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A0: %x\n",
+ target_command);
+ target_content = read_nic_dword(dev, RCAMO);
+ RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A8: %x\n",
+ target_content);
+ }
+ printk(KERN_INFO "\n");
+}
+
+void CamRestoreAllEntry(struct net_device *dev)
+{
+ u8 EntryId = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 *MacAddr = priv->rtllib->current_network.bssid;
+
+ static u8 CAM_CONST_ADDR[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 CAM_CONST_BROAD[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ RT_TRACE(COMP_SEC, "CamRestoreAllEntry:\n");
+
+
+ if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
+ (priv->rtllib->pairwise_key_type == KEY_TYPE_WEP104)) {
+
+ for (EntryId = 0; EntryId < 4; EntryId++) {
+ MacAddr = CAM_CONST_ADDR[EntryId];
+ if (priv->rtllib->swcamtable[EntryId].bused) {
+ setKey(dev, EntryId , EntryId,
+ priv->rtllib->pairwise_key_type, MacAddr,
+ 0, (u32 *)(&priv->rtllib->swcamtable
+ [EntryId].key_buf[0]));
+ }
+ }
+
+ } else if (priv->rtllib->pairwise_key_type == KEY_TYPE_TKIP) {
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ setKey(dev, 4, 0, priv->rtllib->pairwise_key_type,
+ (u8 *)dev->dev_addr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
+ } else {
+ setKey(dev, 4, 0, priv->rtllib->pairwise_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
+ }
+
+ } else if (priv->rtllib->pairwise_key_type == KEY_TYPE_CCMP) {
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ setKey(dev, 4, 0,
+ priv->rtllib->pairwise_key_type,
+ (u8 *)dev->dev_addr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[4].
+ key_buf[0]));
+ } else {
+ setKey(dev, 4, 0,
+ priv->rtllib->pairwise_key_type, MacAddr,
+ 0, (u32 *)(&priv->rtllib->swcamtable[4].
+ key_buf[0]));
+ }
+ }
+
+ if (priv->rtllib->group_key_type == KEY_TYPE_TKIP) {
+ MacAddr = CAM_CONST_BROAD;
+ for (EntryId = 1; EntryId < 4; EntryId++) {
+ if (priv->rtllib->swcamtable[EntryId].bused) {
+ setKey(dev, EntryId, EntryId,
+ priv->rtllib->group_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0])
+ );
+ }
+ }
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ if (priv->rtllib->swcamtable[0].bused) {
+ setKey(dev, 0, 0,
+ priv->rtllib->group_key_type,
+ CAM_CONST_ADDR[0], 0,
+ (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0])
+ );
+ } else {
+ RT_TRACE(COMP_ERR, "===>%s():ERR!! ADHOC TKIP "
+ ",but 0 entry is have no data\n",
+ __func__);
+ return;
+ }
+ }
+ } else if (priv->rtllib->group_key_type == KEY_TYPE_CCMP) {
+ MacAddr = CAM_CONST_BROAD;
+ for (EntryId = 1; EntryId < 4; EntryId++) {
+ if (priv->rtllib->swcamtable[EntryId].bused) {
+ setKey(dev, EntryId , EntryId,
+ priv->rtllib->group_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
+ }
+ }
+
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ if (priv->rtllib->swcamtable[0].bused) {
+ setKey(dev, 0 , 0,
+ priv->rtllib->group_key_type,
+ CAM_CONST_ADDR[0], 0,
+ (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
+ } else {
+ RT_TRACE(COMP_ERR, "===>%s():ERR!! ADHOC CCMP ,"
+ "but 0 entry is have no data\n",
+ __func__);
+ return;
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl_cam.h
new file mode 100644
index 00000000000..fa607f98b17
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_cam.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL_CAM_H
+#define _RTL_CAM_H
+
+#include <linux/types.h>
+struct net_device;
+
+void CamResetAllEntry(struct net_device *dev);
+void EnableHWSecurityConfig8192(struct net_device *dev);
+void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
+ u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
+void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
+ u8 *MacAddr, u8 DefaultKey, u32 *KeyContent, u8 is_mesh);
+void CamPrintDbgReg(struct net_device *dev);
+
+u32 read_cam(struct net_device *dev, u8 addr);
+void write_cam(struct net_device *dev, u8 addr, u32 data);
+
+void CamRestoreAllEntry(struct net_device *dev);
+
+void CAM_read_entry(struct net_device *dev, u32 iIndex);
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl_core.c
new file mode 100644
index 00000000000..5ad96649f40
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_core.c
@@ -0,0 +1,3198 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#undef RX_DONT_PASS_UL
+#undef DEBUG_EPROM
+#undef DEBUG_RX_VERBOSE
+#undef DUMMY_RX
+#undef DEBUG_ZERO_RX
+#undef DEBUG_RX_SKB
+#undef DEBUG_TX_FRAG
+#undef DEBUG_RX_FRAG
+#undef DEBUG_TX_FILLDESC
+#undef DEBUG_TX
+#undef DEBUG_IRQ
+#undef DEBUG_RX
+#undef DEBUG_RXALLOC
+#undef DEBUG_REGISTERS
+#undef DEBUG_RING
+#undef DEBUG_IRQ_TASKLET
+#undef DEBUG_TX_ALLOC
+#undef DEBUG_TX_DESC
+
+#include <linux/uaccess.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include "rtl_core.h"
+#include "r8192E_phy.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h"
+#include "r8192E_cmdpkt.h"
+
+#include "rtl_wx.h"
+#include "rtl_dm.h"
+
+#ifdef CONFIG_PM_RTL
+#include "rtl_pm.h"
+#endif
+
+int hwwep = 1;
+static int channels = 0x3fff;
+static char *ifname = "wlan%d";
+
+
+static struct rtl819x_ops rtl819xp_ops = {
+ .nic_type = NIC_8192E,
+ .get_eeprom_size = rtl8192_get_eeprom_size,
+ .init_adapter_variable = rtl8192_InitializeVariables,
+ .initialize_adapter = rtl8192_adapter_start,
+ .link_change = rtl8192_link_change,
+ .tx_fill_descriptor = rtl8192_tx_fill_desc,
+ .tx_fill_cmd_descriptor = rtl8192_tx_fill_cmd_desc,
+ .rx_query_status_descriptor = rtl8192_rx_query_status_desc,
+ .rx_command_packet_handler = NULL,
+ .stop_adapter = rtl8192_halt_adapter,
+ .update_ratr_table = rtl8192_update_ratr_table,
+ .irq_enable = rtl8192_EnableInterrupt,
+ .irq_disable = rtl8192_DisableInterrupt,
+ .irq_clear = rtl8192_ClearInterrupt,
+ .rx_enable = rtl8192_enable_rx,
+ .tx_enable = rtl8192_enable_tx,
+ .interrupt_recognized = rtl8192_interrupt_recognized,
+ .TxCheckStuckHandler = rtl8192_HalTxCheckStuck,
+ .RxCheckStuckHandler = rtl8192_HalRxCheckStuck,
+};
+
+static struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
+ {RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
+ {RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
+ {RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
+
+static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev);
+
+static struct pci_driver rtl8192_pci_driver = {
+ .name = DRV_NAME, /* Driver name */
+ .id_table = rtl8192_pci_id_tbl, /* PCI_ID table */
+ .probe = rtl8192_pci_probe, /* probe fn */
+ .remove = __devexit_p(rtl8192_pci_disconnect), /* remove fn */
+ .suspend = rtl8192E_suspend, /* PM suspend fn */
+ .resume = rtl8192E_resume, /* PM resume fn */
+};
+
+/****************************************************************************
+ -----------------------------IO STUFF-------------------------
+*****************************************************************************/
+static bool PlatformIOCheckPageLegalAndGetRegMask(u32 u4bPage, u8 *pu1bPageMask)
+{
+ bool bReturn = false;
+
+ *pu1bPageMask = 0xfe;
+
+ switch (u4bPage) {
+ case 1: case 2: case 3: case 4:
+ case 8: case 9: case 10: case 12: case 13:
+ bReturn = true;
+ *pu1bPageMask = 0xf0;
+ break;
+
+ default:
+ bReturn = false;
+ break;
+ }
+
+ return bReturn;
+}
+
+void write_nic_io_byte(struct net_device *dev, int x, u8 y)
+{
+ u32 u4bPage = (x >> 8);
+ u8 u1PageMask = 0;
+ bool bIsLegalPage = false;
+
+ if (u4bPage == 0) {
+ outb(y&0xff, dev->base_addr + x);
+
+ } else {
+ bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
+ &u1PageMask);
+ if (bIsLegalPage) {
+ u8 u1bPsr = read_nic_io_byte(dev, PSR);
+
+ write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
+ (u8)u4bPage));
+ write_nic_io_byte(dev, (x & 0xff), y);
+ write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
+ }
+ }
+}
+
+void write_nic_io_word(struct net_device *dev, int x, u16 y)
+{
+ u32 u4bPage = (x >> 8);
+ u8 u1PageMask = 0;
+ bool bIsLegalPage = false;
+
+ if (u4bPage == 0) {
+ outw(y, dev->base_addr + x);
+ } else {
+ bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
+ &u1PageMask);
+ if (bIsLegalPage) {
+ u8 u1bPsr = read_nic_io_byte(dev, PSR);
+
+ write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
+ (u8)u4bPage));
+ write_nic_io_word(dev, (x & 0xff), y);
+ write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
+
+ }
+ }
+}
+
+void write_nic_io_dword(struct net_device *dev, int x, u32 y)
+{
+ u32 u4bPage = (x >> 8);
+ u8 u1PageMask = 0;
+ bool bIsLegalPage = false;
+
+ if (u4bPage == 0) {
+ outl(y, dev->base_addr + x);
+ } else {
+ bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
+ &u1PageMask);
+ if (bIsLegalPage) {
+ u8 u1bPsr = read_nic_io_byte(dev, PSR);
+
+ write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
+ (u8)u4bPage));
+ write_nic_io_dword(dev, (x & 0xff), y);
+ write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
+ }
+ }
+}
+
+u8 read_nic_io_byte(struct net_device *dev, int x)
+{
+ u32 u4bPage = (x >> 8);
+ u8 u1PageMask = 0;
+ bool bIsLegalPage = false;
+ u8 Data = 0;
+
+ if (u4bPage == 0) {
+ return 0xff&inb(dev->base_addr + x);
+ } else {
+ bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
+ &u1PageMask);
+ if (bIsLegalPage) {
+ u8 u1bPsr = read_nic_io_byte(dev, PSR);
+
+ write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
+ (u8)u4bPage));
+ Data = read_nic_io_byte(dev, (x & 0xff));
+ write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
+ }
+ }
+
+ return Data;
+}
+
+u16 read_nic_io_word(struct net_device *dev, int x)
+{
+ u32 u4bPage = (x >> 8);
+ u8 u1PageMask = 0;
+ bool bIsLegalPage = false;
+ u16 Data = 0;
+
+ if (u4bPage == 0) {
+ return inw(dev->base_addr + x);
+ } else {
+ bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
+ &u1PageMask);
+ if (bIsLegalPage) {
+ u8 u1bPsr = read_nic_io_byte(dev, PSR);
+
+ write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
+ (u8)u4bPage));
+ Data = read_nic_io_word(dev, (x & 0xff));
+ write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
+
+ }
+ }
+
+ return Data;
+}
+
+u32 read_nic_io_dword(struct net_device *dev, int x)
+{
+ u32 u4bPage = (x >> 8);
+ u8 u1PageMask = 0;
+ bool bIsLegalPage = false;
+ u32 Data = 0;
+
+ if (u4bPage == 0) {
+ return inl(dev->base_addr + x);
+ } else {
+ bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
+ &u1PageMask);
+ if (bIsLegalPage) {
+ u8 u1bPsr = read_nic_io_byte(dev, PSR);
+
+ write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
+ (u8)u4bPage));
+ Data = read_nic_io_dword(dev, (x & 0xff));
+ write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
+
+ }
+ }
+
+ return Data;
+}
+
+u8 read_nic_byte(struct net_device *dev, int x)
+{
+ return 0xff & readb((u8 __iomem *)dev->mem_start + x);
+}
+
+u32 read_nic_dword(struct net_device *dev, int x)
+{
+ return readl((u8 __iomem *)dev->mem_start + x);
+}
+
+u16 read_nic_word(struct net_device *dev, int x)
+{
+ return readw((u8 __iomem *)dev->mem_start + x);
+}
+
+void write_nic_byte(struct net_device *dev, int x, u8 y)
+{
+ writeb(y, (u8 __iomem *)dev->mem_start + x);
+
+ udelay(20);
+}
+
+void write_nic_dword(struct net_device *dev, int x, u32 y)
+{
+ writel(y, (u8 __iomem *)dev->mem_start + x);
+
+ udelay(20);
+}
+
+void write_nic_word(struct net_device *dev, int x, u16 y)
+{
+ writew(y, (u8 __iomem *)dev->mem_start + x);
+
+ udelay(20);
+}
+
+/****************************************************************************
+ -----------------------------GENERAL FUNCTION-------------------------
+*****************************************************************************/
+bool MgntActSet_RF_State(struct net_device *dev,
+ enum rt_rf_power_state StateToSet,
+ RT_RF_CHANGE_SOURCE ChangeSource,
+ bool ProtectOrNot)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ bool bActionAllowed = false;
+ bool bConnectBySSID = false;
+ enum rt_rf_power_state rtState;
+ u16 RFWaitCounter = 0;
+ unsigned long flag;
+ RT_TRACE((COMP_PS | COMP_RF), "===>MgntActSet_RF_State(): "
+ "StateToSet(%d)\n", StateToSet);
+
+ ProtectOrNot = false;
+
+
+ if (!ProtectOrNot) {
+ while (true) {
+ spin_lock_irqsave(&priv->rf_ps_lock, flag);
+ if (priv->RFChangeInProgress) {
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ RT_TRACE((COMP_PS | COMP_RF),
+ "MgntActSet_RF_State(): RF Change in "
+ "progress! Wait to set..StateToSet"
+ "(%d).\n", StateToSet);
+
+ while (priv->RFChangeInProgress) {
+ RFWaitCounter++;
+ RT_TRACE((COMP_PS | COMP_RF),
+ "MgntActSet_RF_State(): Wait 1"
+ " ms (%d times)...\n",
+ RFWaitCounter);
+ mdelay(1);
+
+ if (RFWaitCounter > 100) {
+ RT_TRACE(COMP_ERR, "MgntActSet_"
+ "RF_State(): Wait too "
+ "logn to set RF\n");
+ return false;
+ }
+ }
+ } else {
+ priv->RFChangeInProgress = true;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ break;
+ }
+ }
+ }
+
+ rtState = priv->rtllib->eRFPowerState;
+
+ switch (StateToSet) {
+ case eRfOn:
+ priv->rtllib->RfOffReason &= (~ChangeSource);
+
+ if ((ChangeSource == RF_CHANGE_BY_HW) &&
+ (priv->bHwRadioOff == true))
+ priv->bHwRadioOff = false;
+
+ if (!priv->rtllib->RfOffReason) {
+ priv->rtllib->RfOffReason = 0;
+ bActionAllowed = true;
+
+
+ if (rtState == eRfOff &&
+ ChangeSource >= RF_CHANGE_BY_HW)
+ bConnectBySSID = true;
+ } else {
+ RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State - "
+ "eRfon reject pMgntInfo->RfOffReason= 0x%x,"
+ " ChangeSource=0x%X\n",
+ priv->rtllib->RfOffReason, ChangeSource);
+ }
+
+ break;
+
+ case eRfOff:
+
+ if ((priv->rtllib->iw_mode == IW_MODE_INFRA) ||
+ (priv->rtllib->iw_mode == IW_MODE_ADHOC)) {
+ if ((priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) ||
+ (ChangeSource > RF_CHANGE_BY_IPS)) {
+ if (ieee->state == RTLLIB_LINKED)
+ priv->blinked_ingpio = true;
+ else
+ priv->blinked_ingpio = false;
+ rtllib_MgntDisconnect(priv->rtllib,
+ disas_lv_ss);
+ }
+ }
+ if ((ChangeSource == RF_CHANGE_BY_HW) &&
+ (priv->bHwRadioOff == false))
+ priv->bHwRadioOff = true;
+ priv->rtllib->RfOffReason |= ChangeSource;
+ bActionAllowed = true;
+ break;
+
+ case eRfSleep:
+ priv->rtllib->RfOffReason |= ChangeSource;
+ bActionAllowed = true;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bActionAllowed) {
+ RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State(): Action is"
+ " allowed.... StateToSet(%d), RfOffReason(%#X)\n",
+ StateToSet, priv->rtllib->RfOffReason);
+ PHY_SetRFPowerState(dev, StateToSet);
+ if (StateToSet == eRfOn) {
+
+ if (bConnectBySSID && (priv->blinked_ingpio == true)) {
+ queue_delayed_work_rsl(ieee->wq,
+ &ieee->associate_procedure_wq, 0);
+ priv->blinked_ingpio = false;
+ }
+ }
+ } else {
+ RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State(): "
+ "Action is rejected.... StateToSet(%d), ChangeSource"
+ "(%#X), RfOffReason(%#X)\n", StateToSet, ChangeSource,
+ priv->rtllib->RfOffReason);
+ }
+
+ if (!ProtectOrNot) {
+ spin_lock_irqsave(&priv->rf_ps_lock, flag);
+ priv->RFChangeInProgress = false;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ }
+
+ RT_TRACE((COMP_PS | COMP_RF), "<===MgntActSet_RF_State()\n");
+ return bActionAllowed;
+}
+
+
+static short rtl8192_get_nic_desc_num(struct net_device *dev, int prio)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
+
+ /* For now, we reserved two free descriptor as a safety boundary
+ * between the tail and the head
+ */
+ if ((prio == MGNT_QUEUE) && (skb_queue_len(&ring->queue) > 10))
+ RT_TRACE(COMP_DBG, "-----[%d]---------ring->idx=%d "
+ "queue_len=%d---------\n", prio, ring->idx,
+ skb_queue_len(&ring->queue));
+ return skb_queue_len(&ring->queue);
+}
+
+static short rtl8192_check_nic_enough_desc(struct net_device *dev, int prio)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
+
+ if (ring->entries - skb_queue_len(&ring->queue) >= 2)
+ return 1;
+ return 0;
+}
+
+void rtl8192_tx_timeout(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ schedule_work(&priv->reset_wq);
+ printk(KERN_INFO "TXTIMEOUT");
+}
+
+void rtl8192_irq_enable(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ priv->irq_enabled = 1;
+
+ priv->ops->irq_enable(dev);
+}
+
+void rtl8192_irq_disable(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ priv->ops->irq_disable(dev);
+
+ priv->irq_enabled = 0;
+}
+
+void rtl8192_set_chan(struct net_device *dev, short ch)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
+ if (priv->chan_forced)
+ return;
+
+ priv->chan = ch;
+
+ if (priv->rf_set_chan)
+ priv->rf_set_chan(dev, priv->chan);
+}
+
+void rtl8192_update_cap(struct net_device *dev, u16 cap)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_network *net = &priv->rtllib->current_network;
+ bool ShortPreamble;
+
+ if (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) {
+ if (priv->dot11CurrentPreambleMode != PREAMBLE_SHORT) {
+ ShortPreamble = true;
+ priv->dot11CurrentPreambleMode = PREAMBLE_SHORT;
+ RT_TRACE(COMP_DBG, "%s(): WLAN_CAPABILITY_SHORT_"
+ "PREAMBLE\n", __func__);
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
+ (unsigned char *)&ShortPreamble);
+ }
+ } else {
+ if (priv->dot11CurrentPreambleMode != PREAMBLE_LONG) {
+ ShortPreamble = false;
+ priv->dot11CurrentPreambleMode = PREAMBLE_LONG;
+ RT_TRACE(COMP_DBG, "%s(): WLAN_CAPABILITY_LONG_"
+ "PREAMBLE\n", __func__);
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
+ (unsigned char *)&ShortPreamble);
+ }
+ }
+
+ if (net->mode & (IEEE_G|IEEE_N_24G)) {
+ u8 slot_time_val;
+ u8 CurSlotTime = priv->slot_time;
+
+ if ((cap & WLAN_CAPABILITY_SHORT_SLOT_TIME) &&
+ (!priv->rtllib->pHTInfo->bCurrentRT2RTLongSlotTime)) {
+ if (CurSlotTime != SHORT_SLOT_TIME) {
+ slot_time_val = SHORT_SLOT_TIME;
+ priv->rtllib->SetHwRegHandler(dev,
+ HW_VAR_SLOT_TIME, &slot_time_val);
+ }
+ } else {
+ if (CurSlotTime != NON_SHORT_SLOT_TIME) {
+ slot_time_val = NON_SHORT_SLOT_TIME;
+ priv->rtllib->SetHwRegHandler(dev,
+ HW_VAR_SLOT_TIME, &slot_time_val);
+ }
+ }
+ }
+}
+
+static struct rtllib_qos_parameters def_qos_parameters = {
+ {3, 3, 3, 3},
+ {7, 7, 7, 7},
+ {2, 2, 2, 2},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static void rtl8192_update_beacon(void *data)
+{
+ struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
+ update_beacon_wq.work);
+ struct net_device *dev = priv->rtllib->dev;
+ struct rtllib_device *ieee = priv->rtllib;
+ struct rtllib_network *net = &ieee->current_network;
+
+ if (ieee->pHTInfo->bCurrentHTSupport)
+ HTUpdateSelfAndPeerSetting(ieee, net);
+ ieee->pHTInfo->bCurrentRT2RTLongSlotTime =
+ net->bssht.bdRT2RTLongSlotTime;
+ ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.RT2RT_HT_Mode;
+ rtl8192_update_cap(dev, net->capability);
+}
+
+int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
+
+static void rtl8192_qos_activate(void *data)
+{
+ struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
+ qos_activate);
+ struct net_device *dev = priv->rtllib->dev;
+ int i;
+
+ mutex_lock(&priv->mutex);
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ goto success;
+ RT_TRACE(COMP_QOS, "qos active process with associate response "
+ "received\n");
+
+ for (i = 0; i < QOS_QUEUE_NUM; i++) {
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
+ }
+
+success:
+ mutex_unlock(&priv->mutex);
+}
+
+static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
+ int active_network,
+ struct rtllib_network *network)
+{
+ int ret = 0;
+ u32 size = sizeof(struct rtllib_qos_parameters);
+
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ return ret;
+
+ if ((priv->rtllib->iw_mode != IW_MODE_INFRA))
+ return ret;
+
+ if (network->flags & NETWORK_HAS_QOS_MASK) {
+ if (active_network &&
+ (network->flags & NETWORK_HAS_QOS_PARAMETERS))
+ network->qos_data.active = network->qos_data.supported;
+
+ if ((network->qos_data.active == 1) && (active_network == 1) &&
+ (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
+ (network->qos_data.old_param_count !=
+ network->qos_data.param_count)) {
+ network->qos_data.old_param_count =
+ network->qos_data.param_count;
+ priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
+ queue_work_rsl(priv->priv_wq, &priv->qos_activate);
+ RT_TRACE(COMP_QOS, "QoS parameters change call "
+ "qos_activate\n");
+ }
+ } else {
+ memcpy(&priv->rtllib->current_network.qos_data.parameters,
+ &def_qos_parameters, size);
+
+ if ((network->qos_data.active == 1) && (active_network == 1)) {
+ queue_work_rsl(priv->priv_wq, &priv->qos_activate);
+ RT_TRACE(COMP_QOS, "QoS was disabled call qos_"
+ "activate\n");
+ }
+ network->qos_data.active = 0;
+ network->qos_data.supported = 0;
+ }
+
+ return 0;
+}
+
+static int rtl8192_handle_beacon(struct net_device *dev,
+ struct rtllib_beacon *beacon,
+ struct rtllib_network *network)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ rtl8192_qos_handle_probe_response(priv, 1, network);
+
+ queue_delayed_work_rsl(priv->priv_wq, &priv->update_beacon_wq, 0);
+ return 0;
+
+}
+
+static int rtl8192_qos_association_resp(struct r8192_priv *priv,
+ struct rtllib_network *network)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 size = sizeof(struct rtllib_qos_parameters);
+ int set_qos_param = 0;
+
+ if ((priv == NULL) || (network == NULL))
+ return ret;
+
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ return ret;
+
+ if ((priv->rtllib->iw_mode != IW_MODE_INFRA))
+ return ret;
+
+ spin_lock_irqsave(&priv->rtllib->lock, flags);
+ if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
+ memcpy(&priv->rtllib->current_network.qos_data.parameters,
+ &network->qos_data.parameters,
+ sizeof(struct rtllib_qos_parameters));
+ priv->rtllib->current_network.qos_data.active = 1;
+ priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
+ set_qos_param = 1;
+ priv->rtllib->current_network.qos_data.old_param_count =
+ priv->rtllib->current_network.qos_data.param_count;
+ priv->rtllib->current_network.qos_data.param_count =
+ network->qos_data.param_count;
+ } else {
+ memcpy(&priv->rtllib->current_network.qos_data.parameters,
+ &def_qos_parameters, size);
+ priv->rtllib->current_network.qos_data.active = 0;
+ priv->rtllib->current_network.qos_data.supported = 0;
+ set_qos_param = 1;
+ }
+
+ spin_unlock_irqrestore(&priv->rtllib->lock, flags);
+
+ RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__,
+ network->flags, priv->rtllib->current_network.qos_data.active);
+ if (set_qos_param == 1) {
+ dm_init_edca_turbo(priv->rtllib->dev);
+ queue_work_rsl(priv->priv_wq, &priv->qos_activate);
+ }
+ return ret;
+}
+
+static int rtl8192_handle_assoc_response(struct net_device *dev,
+ struct rtllib_assoc_response_frame *resp,
+ struct rtllib_network *network)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ rtl8192_qos_association_resp(priv, network);
+ return 0;
+}
+
+static void rtl8192_prepare_beacon(struct r8192_priv *priv)
+{
+ struct net_device *dev = priv->rtllib->dev;
+ struct sk_buff *pskb = NULL, *pnewskb = NULL;
+ struct cb_desc *tcb_desc = NULL;
+ struct rtl8192_tx_ring *ring = NULL;
+ struct tx_desc *pdesc = NULL;
+
+ ring = &priv->tx_ring[BEACON_QUEUE];
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ pnewskb = rtllib_get_beacon(priv->rtllib);
+ if (!pnewskb)
+ return;
+
+ tcb_desc = (struct cb_desc *)(pnewskb->cb + 8);
+ tcb_desc->queue_index = BEACON_QUEUE;
+ tcb_desc->data_rate = 2;
+ tcb_desc->RATRIndex = 7;
+ tcb_desc->bTxDisableRateFallBack = 1;
+ tcb_desc->bTxUseDriverAssingedRate = 1;
+ skb_push(pnewskb, priv->rtllib->tx_headroom);
+
+ pdesc = &ring->desc[0];
+ priv->ops->tx_fill_descriptor(dev, pdesc, tcb_desc, pnewskb);
+ __skb_queue_tail(&ring->queue, pnewskb);
+ pdesc->OWN = 1;
+
+ return;
+}
+
+static void rtl8192_stop_beacon(struct net_device *dev)
+{
+}
+
+void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_network *net;
+ u8 i = 0, basic_rate = 0;
+ net = &priv->rtllib->current_network;
+
+ for (i = 0; i < net->rates_len; i++) {
+ basic_rate = net->rates[i] & 0x7f;
+ switch (basic_rate) {
+ case MGN_1M:
+ *rate_config |= RRSR_1M;
+ break;
+ case MGN_2M:
+ *rate_config |= RRSR_2M;
+ break;
+ case MGN_5_5M:
+ *rate_config |= RRSR_5_5M;
+ break;
+ case MGN_11M:
+ *rate_config |= RRSR_11M;
+ break;
+ case MGN_6M:
+ *rate_config |= RRSR_6M;
+ break;
+ case MGN_9M:
+ *rate_config |= RRSR_9M;
+ break;
+ case MGN_12M:
+ *rate_config |= RRSR_12M;
+ break;
+ case MGN_18M:
+ *rate_config |= RRSR_18M;
+ break;
+ case MGN_24M:
+ *rate_config |= RRSR_24M;
+ break;
+ case MGN_36M:
+ *rate_config |= RRSR_36M;
+ break;
+ case MGN_48M:
+ *rate_config |= RRSR_48M;
+ break;
+ case MGN_54M:
+ *rate_config |= RRSR_54M;
+ break;
+ }
+ }
+
+ for (i = 0; i < net->rates_ex_len; i++) {
+ basic_rate = net->rates_ex[i] & 0x7f;
+ switch (basic_rate) {
+ case MGN_1M:
+ *rate_config |= RRSR_1M;
+ break;
+ case MGN_2M:
+ *rate_config |= RRSR_2M;
+ break;
+ case MGN_5_5M:
+ *rate_config |= RRSR_5_5M;
+ break;
+ case MGN_11M:
+ *rate_config |= RRSR_11M;
+ break;
+ case MGN_6M:
+ *rate_config |= RRSR_6M;
+ break;
+ case MGN_9M:
+ *rate_config |= RRSR_9M;
+ break;
+ case MGN_12M:
+ *rate_config |= RRSR_12M;
+ break;
+ case MGN_18M:
+ *rate_config |= RRSR_18M;
+ break;
+ case MGN_24M:
+ *rate_config |= RRSR_24M;
+ break;
+ case MGN_36M:
+ *rate_config |= RRSR_36M;
+ break;
+ case MGN_48M:
+ *rate_config |= RRSR_48M;
+ break;
+ case MGN_54M:
+ *rate_config |= RRSR_54M;
+ break;
+ }
+ }
+}
+
+static void rtl8192_refresh_supportrate(struct r8192_priv *priv)
+{
+ struct rtllib_device *ieee = priv->rtllib;
+ if (ieee->mode == WIRELESS_MODE_N_24G ||
+ ieee->mode == WIRELESS_MODE_N_5G) {
+ memcpy(ieee->Regdot11HTOperationalRateSet,
+ ieee->RegHTSuppRateSet, 16);
+ memcpy(ieee->Regdot11TxHTOperationalRateSet,
+ ieee->RegHTSuppRateSet, 16);
+
+ } else {
+ memset(ieee->Regdot11HTOperationalRateSet, 0, 16);
+ }
+ return;
+}
+
+static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 ret = 0;
+
+ switch (priv->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ case RF_PSEUDO_11N:
+ ret = (WIRELESS_MODE_N_24G|WIRELESS_MODE_G | WIRELESS_MODE_B);
+ break;
+ case RF_8258:
+ ret = (WIRELESS_MODE_A | WIRELESS_MODE_N_5G);
+ break;
+ default:
+ ret = WIRELESS_MODE_B;
+ break;
+ }
+ return ret;
+}
+
+void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
+
+ if ((wireless_mode == WIRELESS_MODE_AUTO) ||
+ ((wireless_mode & bSupportMode) == 0)) {
+ if (bSupportMode & WIRELESS_MODE_N_24G) {
+ wireless_mode = WIRELESS_MODE_N_24G;
+ } else if (bSupportMode & WIRELESS_MODE_N_5G) {
+ wireless_mode = WIRELESS_MODE_N_5G;
+ } else if ((bSupportMode & WIRELESS_MODE_A)) {
+ wireless_mode = WIRELESS_MODE_A;
+ } else if ((bSupportMode & WIRELESS_MODE_G)) {
+ wireless_mode = WIRELESS_MODE_G;
+ } else if ((bSupportMode & WIRELESS_MODE_B)) {
+ wireless_mode = WIRELESS_MODE_B;
+ } else {
+ RT_TRACE(COMP_ERR, "%s(), No valid wireless mode "
+ "supported (%x)!!!\n", __func__, bSupportMode);
+ wireless_mode = WIRELESS_MODE_B;
+ }
+ }
+
+ if ((wireless_mode & (WIRELESS_MODE_B | WIRELESS_MODE_G)) ==
+ (WIRELESS_MODE_G | WIRELESS_MODE_B))
+ wireless_mode = WIRELESS_MODE_G;
+
+ priv->rtllib->mode = wireless_mode;
+
+ ActUpdateChannelAccessSetting(dev, wireless_mode,
+ &priv->ChannelAccessSetting);
+
+ if ((wireless_mode == WIRELESS_MODE_N_24G) ||
+ (wireless_mode == WIRELESS_MODE_N_5G)) {
+ priv->rtllib->pHTInfo->bEnableHT = 1;
+ RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
+ __func__, wireless_mode);
+ } else {
+ priv->rtllib->pHTInfo->bEnableHT = 0;
+ RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n",
+ __func__, wireless_mode);
+ }
+
+ RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
+ rtl8192_refresh_supportrate(priv);
+}
+
+static int _rtl8192_sta_up(struct net_device *dev, bool is_silent_reset)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(priv->rtllib->PowerSaveControl));
+ bool init_status = true;
+ priv->bDriverIsGoingToUnload = false;
+ priv->bdisable_nic = false;
+
+ priv->up = 1;
+ priv->rtllib->ieee_up = 1;
+
+ priv->up_first_time = 0;
+ RT_TRACE(COMP_INIT, "Bringing up iface");
+ priv->bfirst_init = true;
+ init_status = priv->ops->initialize_adapter(dev);
+ if (init_status != true) {
+ RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization is failed!\n",
+ __func__);
+ priv->bfirst_init = false;
+ return -1;
+ }
+
+ RT_TRACE(COMP_INIT, "start adapter finished\n");
+ RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
+ priv->bfirst_init = false;
+
+ if (priv->polling_timer_on == 0)
+ check_rfctrl_gpio_timer((unsigned long)dev);
+
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ rtllib_softmac_start_protocol(priv->rtllib, 0);
+ rtllib_reset_queue(priv->rtllib);
+ watch_dog_timer_callback((unsigned long) dev);
+
+ if (!netif_queue_stopped(dev))
+ netif_start_queue(dev);
+ else
+ netif_wake_queue(dev);
+
+ return 0;
+}
+
+static int rtl8192_sta_down(struct net_device *dev, bool shutdownrf)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ unsigned long flags = 0;
+ u8 RFInProgressTimeOut = 0;
+
+ if (priv->up == 0)
+ return -1;
+
+ if (priv->rtllib->rtllib_ips_leave != NULL)
+ priv->rtllib->rtllib_ips_leave(dev);
+
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ LeisurePSLeave(dev);
+
+ priv->bDriverIsGoingToUnload = true;
+ priv->up = 0;
+ priv->rtllib->ieee_up = 0;
+ priv->bfirst_after_down = 1;
+ RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__);
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+
+ priv->rtllib->wpa_ie_len = 0;
+ kfree(priv->rtllib->wpa_ie);
+ priv->rtllib->wpa_ie = NULL;
+ CamResetAllEntry(dev);
+ memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
+ rtl8192_irq_disable(dev);
+
+ del_timer_sync(&priv->watch_dog_timer);
+ rtl8192_cancel_deferred_work(priv);
+ cancel_delayed_work(&priv->rtllib->hw_wakeup_wq);
+
+ rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
+ spin_lock_irqsave(&priv->rf_ps_lock, flags);
+ while (priv->RFChangeInProgress) {
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ if (RFInProgressTimeOut > 100) {
+ spin_lock_irqsave(&priv->rf_ps_lock, flags);
+ break;
+ }
+ RT_TRACE(COMP_DBG, "===>%s():RF is in progress, need to wait "
+ "until rf chang is done.\n", __func__);
+ mdelay(1);
+ RFInProgressTimeOut++;
+ spin_lock_irqsave(&priv->rf_ps_lock, flags);
+ }
+ priv->RFChangeInProgress = true;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ priv->ops->stop_adapter(dev, false);
+ spin_lock_irqsave(&priv->rf_ps_lock, flags);
+ priv->RFChangeInProgress = false;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ udelay(100);
+ memset(&priv->rtllib->current_network, 0,
+ offsetof(struct rtllib_network, list));
+ RT_TRACE(COMP_DOWN, "<==========%s()\n", __func__);
+
+ return 0;
+}
+
+static void rtl8192_init_priv_handler(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->rtllib->softmac_hard_start_xmit = rtl8192_hard_start_xmit;
+ priv->rtllib->set_chan = rtl8192_set_chan;
+ priv->rtllib->link_change = priv->ops->link_change;
+ priv->rtllib->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
+ priv->rtllib->data_hard_stop = rtl8192_data_hard_stop;
+ priv->rtllib->data_hard_resume = rtl8192_data_hard_resume;
+ priv->rtllib->check_nic_enough_desc = rtl8192_check_nic_enough_desc;
+ priv->rtllib->get_nic_desc_num = rtl8192_get_nic_desc_num;
+ priv->rtllib->handle_assoc_response = rtl8192_handle_assoc_response;
+ priv->rtllib->handle_beacon = rtl8192_handle_beacon;
+ priv->rtllib->SetWirelessMode = rtl8192_SetWirelessMode;
+ priv->rtllib->LeisurePSLeave = LeisurePSLeave;
+ priv->rtllib->SetBWModeHandler = rtl8192_SetBWMode;
+ priv->rf_set_chan = rtl8192_phy_SwChnl;
+
+ priv->rtllib->start_send_beacons = rtl8192e_start_beacon;
+ priv->rtllib->stop_send_beacons = rtl8192_stop_beacon;
+
+ priv->rtllib->sta_wake_up = rtl8192_hw_wakeup;
+ priv->rtllib->enter_sleep_state = rtl8192_hw_to_sleep;
+ priv->rtllib->ps_is_queue_empty = rtl8192_is_tx_queue_empty;
+
+ priv->rtllib->GetNmodeSupportBySecCfg = rtl8192_GetNmodeSupportBySecCfg;
+ priv->rtllib->GetHalfNmodeSupportByAPsHandler =
+ rtl8192_GetHalfNmodeSupportByAPs;
+
+ priv->rtllib->SetHwRegHandler = rtl8192e_SetHwReg;
+ priv->rtllib->AllowAllDestAddrHandler = rtl8192_AllowAllDestAddr;
+ priv->rtllib->SetFwCmdHandler = NULL;
+ priv->rtllib->InitialGainHandler = InitialGain819xPci;
+ priv->rtllib->rtllib_ips_leave_wq = rtllib_ips_leave_wq;
+ priv->rtllib->rtllib_ips_leave = rtllib_ips_leave;
+
+ priv->rtllib->LedControlHandler = NULL;
+ priv->rtllib->UpdateBeaconInterruptHandler = NULL;
+
+ priv->rtllib->ScanOperationBackupHandler = PHY_ScanOperationBackup8192;
+
+ priv->rtllib->rtllib_rfkill_poll = NULL;
+}
+
+static void rtl8192_init_priv_constant(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ &(priv->rtllib->PowerSaveControl);
+
+ pPSC->RegMaxLPSAwakeIntvl = 5;
+
+ priv->RegPciASPM = 2;
+
+ priv->RegDevicePciASPMSetting = 0x03;
+
+ priv->RegHostPciASPMSetting = 0x02;
+
+ priv->RegHwSwRfOffD3 = 2;
+
+ priv->RegSupportPciASPM = 2;
+}
+
+
+static void rtl8192_init_priv_variable(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 i;
+
+ priv->AcmMethod = eAcmWay2_SW;
+ priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->rtllib->hwscan_sem_up = 1;
+ priv->rtllib->status = 0;
+ priv->H2CTxCmdSeq = 0;
+ priv->bDisableFrameBursting = 0;
+ priv->bDMInitialGainEnable = 1;
+ priv->polling_timer_on = 0;
+ priv->up_first_time = 1;
+ priv->blinked_ingpio = false;
+ priv->bDriverIsGoingToUnload = false;
+ priv->being_init_adapter = false;
+ priv->initialized_at_probe = false;
+ priv->sw_radio_on = true;
+ priv->bdisable_nic = false;
+ priv->bfirst_init = false;
+ priv->txringcount = 64;
+ priv->rxbuffersize = 9100;
+ priv->rxringcount = MAX_RX_COUNT;
+ priv->irq_enabled = 0;
+ priv->chan = 1;
+ priv->RegWirelessMode = WIRELESS_MODE_AUTO;
+ priv->RegChannelPlan = 0xf;
+ priv->nrxAMPDU_size = 0;
+ priv->nrxAMPDU_aggr_num = 0;
+ priv->last_rxdesc_tsf_high = 0;
+ priv->last_rxdesc_tsf_low = 0;
+ priv->rtllib->mode = WIRELESS_MODE_AUTO;
+ priv->rtllib->iw_mode = IW_MODE_INFRA;
+ priv->rtllib->bNetPromiscuousMode = false;
+ priv->rtllib->IntelPromiscuousModeInfo.bPromiscuousOn = false;
+ priv->rtllib->IntelPromiscuousModeInfo.bFilterSourceStationFrame =
+ false;
+ priv->rtllib->ieee_up = 0;
+ priv->retry_rts = DEFAULT_RETRY_RTS;
+ priv->retry_data = DEFAULT_RETRY_DATA;
+ priv->rtllib->rts = DEFAULT_RTS_THRESHOLD;
+ priv->rtllib->rate = 110;
+ priv->rtllib->short_slot = 1;
+ priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
+ priv->bcck_in_ch14 = false;
+ priv->bfsync_processing = false;
+ priv->CCKPresentAttentuation = 0;
+ priv->rfa_txpowertrackingindex = 0;
+ priv->rfc_txpowertrackingindex = 0;
+ priv->CckPwEnl = 6;
+ priv->ScanDelay = 50;
+ priv->ResetProgress = RESET_TYPE_NORESET;
+ priv->bForcedSilentReset = 0;
+ priv->bDisableNormalResetCheck = false;
+ priv->force_reset = false;
+ memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
+
+ memset(&priv->InterruptLog, 0, sizeof(struct log_int_8190));
+ priv->RxCounter = 0;
+ priv->rtllib->wx_set_enc = 0;
+ priv->bHwRadioOff = false;
+ priv->RegRfOff = 0;
+ priv->isRFOff = false;
+ priv->bInPowerSaveMode = false;
+ priv->rtllib->RfOffReason = 0;
+ priv->RFChangeInProgress = false;
+ priv->bHwRfOffAction = 0;
+ priv->SetRFPowerStateInProgress = false;
+ priv->rtllib->PowerSaveControl.bInactivePs = true;
+ priv->rtllib->PowerSaveControl.bIPSModeBackup = false;
+ priv->rtllib->PowerSaveControl.bLeisurePs = true;
+ priv->rtllib->PowerSaveControl.bFwCtrlLPS = false;
+ priv->rtllib->LPSDelayCnt = 0;
+ priv->rtllib->sta_sleep = LPS_IS_WAKE;
+ priv->rtllib->eRFPowerState = eRfOn;
+
+ priv->txpower_checkcnt = 0;
+ priv->thermal_readback_index = 0;
+ priv->txpower_tracking_callback_cnt = 0;
+ priv->ccktxpower_adjustcnt_ch14 = 0;
+ priv->ccktxpower_adjustcnt_not_ch14 = 0;
+
+ priv->rtllib->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
+ priv->rtllib->iw_mode = IW_MODE_INFRA;
+ priv->rtllib->active_scan = 1;
+ priv->rtllib->be_scan_inprogress = false;
+ priv->rtllib->modulation = RTLLIB_CCK_MODULATION |
+ RTLLIB_OFDM_MODULATION;
+ priv->rtllib->host_encrypt = 1;
+ priv->rtllib->host_decrypt = 1;
+
+ priv->rtllib->dot11PowerSaveMode = eActive;
+ priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
+ priv->rtllib->MaxMssDensity = 0;
+ priv->rtllib->MinSpaceCfg = 0;
+
+ priv->card_type = PCI;
+
+ priv->AcmControl = 0;
+ priv->pFirmware = vzalloc(sizeof(struct rt_firmware));
+ if (!priv->pFirmware)
+ printk(KERN_ERR "rtl8193e: Unable to allocate space "
+ "for firmware\n");
+
+ skb_queue_head_init(&priv->rx_queue);
+ skb_queue_head_init(&priv->skb_queue);
+
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_head_init(&priv->rtllib->skb_waitQ[i]);
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_head_init(&priv->rtllib->skb_aggQ[i]);
+}
+
+static void rtl8192_init_priv_lock(struct r8192_priv *priv)
+{
+ spin_lock_init(&priv->fw_scan_lock);
+ spin_lock_init(&priv->tx_lock);
+ spin_lock_init(&priv->irq_lock);
+ spin_lock_init(&priv->irq_th_lock);
+ spin_lock_init(&priv->rf_ps_lock);
+ spin_lock_init(&priv->ps_lock);
+ spin_lock_init(&priv->rf_lock);
+ spin_lock_init(&priv->rt_h2c_lock);
+ sema_init(&priv->wx_sem, 1);
+ sema_init(&priv->rf_sem, 1);
+ mutex_init(&priv->mutex);
+}
+
+static void rtl8192_init_priv_task(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->priv_wq = create_workqueue(DRV_NAME);
+ INIT_WORK_RSL(&priv->reset_wq, (void *)rtl8192_restart, dev);
+ INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)IPSLeave_wq, dev);
+ INIT_DELAYED_WORK_RSL(&priv->watch_dog_wq,
+ (void *)rtl819x_watchdog_wqcallback, dev);
+ INIT_DELAYED_WORK_RSL(&priv->txpower_tracking_wq,
+ (void *)dm_txpower_trackingcallback, dev);
+ INIT_DELAYED_WORK_RSL(&priv->rfpath_check_wq,
+ (void *)dm_rf_pathcheck_workitemcallback, dev);
+ INIT_DELAYED_WORK_RSL(&priv->update_beacon_wq,
+ (void *)rtl8192_update_beacon, dev);
+ INIT_WORK_RSL(&priv->qos_activate, (void *)rtl8192_qos_activate, dev);
+ INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq,
+ (void *) rtl8192_hw_wakeup_wq, dev);
+ INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
+ (void *) rtl8192_hw_sleep_wq, dev);
+ tasklet_init(&priv->irq_rx_tasklet,
+ (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->irq_tx_tasklet,
+ (void(*)(unsigned long))rtl8192_irq_tx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->irq_prepare_beacon_tasklet,
+ (void(*)(unsigned long))rtl8192_prepare_beacon,
+ (unsigned long)priv);
+}
+
+static short rtl8192_get_channel_map(struct net_device *dev)
+{
+ int i;
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if ((priv->rf_chip != RF_8225) && (priv->rf_chip != RF_8256)
+ && (priv->rf_chip != RF_6052)) {
+ RT_TRACE(COMP_ERR, "%s: unknown rf chip, can't set channel "
+ "map\n", __func__);
+ return -1;
+ }
+
+ if (priv->ChannelPlan >= COUNTRY_CODE_MAX) {
+ printk(KERN_INFO "rtl819x_init:Error channel plan! Set to "
+ "default.\n");
+ priv->ChannelPlan = COUNTRY_CODE_FCC;
+ }
+ RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
+ Dot11d_Init(priv->rtllib);
+ Dot11d_Channelmap(priv->ChannelPlan, priv->rtllib);
+ for (i = 1; i <= 11; i++)
+ (priv->rtllib->active_channel_map)[i] = 1;
+ (priv->rtllib->active_channel_map)[12] = 2;
+ (priv->rtllib->active_channel_map)[13] = 2;
+
+ return 0;
+}
+
+static short rtl8192_init(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ memset(&(priv->stats), 0, sizeof(struct rt_stats));
+
+ rtl8192_dbgp_flag_init(dev);
+ rtl8192_init_priv_handler(dev);
+ rtl8192_init_priv_constant(dev);
+ rtl8192_init_priv_variable(dev);
+ rtl8192_init_priv_lock(priv);
+ rtl8192_init_priv_task(dev);
+ priv->ops->get_eeprom_size(dev);
+ priv->ops->init_adapter_variable(dev);
+ rtl8192_get_channel_map(dev);
+
+ init_hal_dm(dev);
+
+ init_timer(&priv->watch_dog_timer);
+ setup_timer(&priv->watch_dog_timer,
+ watch_dog_timer_callback,
+ (unsigned long) dev);
+
+ init_timer(&priv->gpio_polling_timer);
+ setup_timer(&priv->gpio_polling_timer,
+ check_rfctrl_gpio_timer,
+ (unsigned long)dev);
+
+ rtl8192_irq_disable(dev);
+ if (request_irq(dev->irq, (void *)rtl8192_interrupt_rsl, IRQF_SHARED,
+ dev->name, dev)) {
+ printk(KERN_ERR "Error allocating IRQ %d", dev->irq);
+ return -1;
+ } else {
+ priv->irq = dev->irq;
+ RT_TRACE(COMP_INIT, "IRQ %d\n", dev->irq);
+ }
+
+ if (rtl8192_pci_initdescring(dev) != 0) {
+ printk(KERN_ERR "Endopoints initialization failed");
+ free_irq(dev->irq, dev);
+ return -1;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+ -------------------------------WATCHDOG STUFF---------------------------
+***************************************************************************/
+short rtl8192_is_tx_queue_empty(struct net_device *dev)
+{
+ int i = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ for (i = 0; i <= MGNT_QUEUE; i++) {
+ if ((i == TXCMD_QUEUE) || (i == HCCA_QUEUE))
+ continue;
+ if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0) {
+ printk(KERN_INFO "===>tx queue is not empty:%d, %d\n",
+ i, skb_queue_len(&(&priv->tx_ring[i])->queue));
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static enum reset_type rtl819x_TxCheckStuck(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 QueueID;
+ u8 ResetThreshold = NIC_SEND_HANG_THRESHOLD_POWERSAVE;
+ bool bCheckFwTxCnt = false;
+ struct rtl8192_tx_ring *ring = NULL;
+ struct sk_buff *skb = NULL;
+ struct cb_desc *tcb_desc = NULL;
+ unsigned long flags = 0;
+
+ switch (priv->rtllib->ps) {
+ case RTLLIB_PS_DISABLED:
+ ResetThreshold = NIC_SEND_HANG_THRESHOLD_NORMAL;
+ break;
+ case (RTLLIB_PS_MBCAST|RTLLIB_PS_UNICAST):
+ ResetThreshold = NIC_SEND_HANG_THRESHOLD_POWERSAVE;
+ break;
+ default:
+ ResetThreshold = NIC_SEND_HANG_THRESHOLD_POWERSAVE;
+ break;
+ }
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
+ for (QueueID = 0; QueueID < MAX_TX_QUEUE; QueueID++) {
+ if (QueueID == TXCMD_QUEUE)
+ continue;
+
+ if (QueueID == BEACON_QUEUE)
+ continue;
+
+ ring = &priv->tx_ring[QueueID];
+
+ if (skb_queue_len(&ring->queue) == 0) {
+ continue;
+ } else {
+ skb = (&ring->queue)->next;
+ tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ tcb_desc->nStuckCount++;
+ bCheckFwTxCnt = true;
+ if (tcb_desc->nStuckCount > 1)
+ printk(KERN_INFO "%s: QueueID=%d tcb_desc->n"
+ "StuckCount=%d\n", __func__, QueueID,
+ tcb_desc->nStuckCount);
+ }
+ }
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+
+ if (bCheckFwTxCnt) {
+ if (priv->ops->TxCheckStuckHandler(dev)) {
+ RT_TRACE(COMP_RESET, "TxCheckStuck(): Fw indicates no"
+ " Tx condition!\n");
+ return RESET_TYPE_SILENT;
+ }
+ }
+
+ return RESET_TYPE_NORESET;
+}
+
+static enum reset_type rtl819x_RxCheckStuck(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->ops->RxCheckStuckHandler(dev)) {
+ RT_TRACE(COMP_RESET, "RxStuck Condition\n");
+ return RESET_TYPE_SILENT;
+ }
+
+ return RESET_TYPE_NORESET;
+}
+
+static enum reset_type rtl819x_ifcheck_resetornot(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ enum reset_type TxResetType = RESET_TYPE_NORESET;
+ enum reset_type RxResetType = RESET_TYPE_NORESET;
+ enum rt_rf_power_state rfState;
+
+ rfState = priv->rtllib->eRFPowerState;
+
+ if (rfState == eRfOn)
+ TxResetType = rtl819x_TxCheckStuck(dev);
+
+ if (rfState == eRfOn &&
+ (priv->rtllib->iw_mode == IW_MODE_INFRA) &&
+ (priv->rtllib->state == RTLLIB_LINKED))
+ RxResetType = rtl819x_RxCheckStuck(dev);
+
+ if (TxResetType == RESET_TYPE_NORMAL ||
+ RxResetType == RESET_TYPE_NORMAL) {
+ printk(KERN_INFO "%s(): TxResetType is %d, RxResetType is %d\n",
+ __func__, TxResetType, RxResetType);
+ return RESET_TYPE_NORMAL;
+ } else if (TxResetType == RESET_TYPE_SILENT ||
+ RxResetType == RESET_TYPE_SILENT) {
+ printk(KERN_INFO "%s(): TxResetType is %d, RxResetType is %d\n",
+ __func__, TxResetType, RxResetType);
+ return RESET_TYPE_SILENT;
+ } else {
+ return RESET_TYPE_NORESET;
+ }
+
+}
+
+static void rtl819x_silentreset_mesh_bk(struct net_device *dev, u8 IsPortal)
+{
+}
+
+static void rtl819x_ifsilentreset(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 reset_times = 0;
+ int reset_status = 0;
+ struct rtllib_device *ieee = priv->rtllib;
+ unsigned long flag;
+
+ u8 IsPortal = 0;
+
+
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+
+ RT_TRACE(COMP_RESET, "=========>Reset progress!!\n");
+
+ priv->ResetProgress = RESET_TYPE_SILENT;
+
+ spin_lock_irqsave(&priv->rf_ps_lock, flag);
+ if (priv->RFChangeInProgress) {
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ goto END;
+ }
+ priv->RFChangeInProgress = true;
+ priv->bResetInProgress = true;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+
+RESET_START:
+
+ down(&priv->wx_sem);
+
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ LeisurePSLeave(dev);
+
+ if (IS_NIC_DOWN(priv)) {
+ RT_TRACE(COMP_ERR, "%s():the driver is not up! "
+ "return\n", __func__);
+ up(&priv->wx_sem);
+ return ;
+ }
+ priv->up = 0;
+
+ RT_TRACE(COMP_RESET, "%s():======>start to down the driver\n",
+ __func__);
+ mdelay(1000);
+ RT_TRACE(COMP_RESET, "%s():111111111111111111111111======>start"
+ " to down the driver\n", __func__);
+
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+
+ rtl8192_irq_disable(dev);
+ del_timer_sync(&priv->watch_dog_timer);
+ rtl8192_cancel_deferred_work(priv);
+ deinit_hal_dm(dev);
+ rtllib_stop_scan_syncro(ieee);
+
+ if (ieee->state == RTLLIB_LINKED) {
+ SEM_DOWN_IEEE_WX(&ieee->wx_sem);
+ printk(KERN_INFO "ieee->state is RTLLIB_LINKED\n");
+ rtllib_stop_send_beacons(priv->rtllib);
+ del_timer_sync(&ieee->associate_timer);
+ cancel_delayed_work(&ieee->associate_retry_wq);
+ rtllib_stop_scan(ieee);
+ netif_carrier_off(dev);
+ SEM_UP_IEEE_WX(&ieee->wx_sem);
+ } else {
+ printk(KERN_INFO "ieee->state is NOT LINKED\n");
+ rtllib_softmac_stop_protocol(priv->rtllib, 0 , true);
+ }
+
+ dm_backup_dynamic_mechanism_state(dev);
+
+ up(&priv->wx_sem);
+ RT_TRACE(COMP_RESET, "%s():<==========down process is "
+ "finished\n", __func__);
+
+ RT_TRACE(COMP_RESET, "%s():<===========up process start\n",
+ __func__);
+ reset_status = _rtl8192_up(dev, true);
+
+ RT_TRACE(COMP_RESET, "%s():<===========up process is "
+ "finished\n", __func__);
+ if (reset_status == -1) {
+ if (reset_times < 3) {
+ reset_times++;
+ goto RESET_START;
+ } else {
+ RT_TRACE(COMP_ERR, " ERR!!! %s(): Reset "
+ "Failed!!\n", __func__);
+ }
+ }
+
+ ieee->is_silent_reset = 1;
+
+ spin_lock_irqsave(&priv->rf_ps_lock, flag);
+ priv->RFChangeInProgress = false;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+
+ EnableHWSecurityConfig8192(dev);
+
+ if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
+ IW_MODE_INFRA) {
+ ieee->set_chan(ieee->dev,
+ ieee->current_network.channel);
+
+ queue_work_rsl(ieee->wq, &ieee->associate_complete_wq);
+
+ } else if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
+ IW_MODE_ADHOC) {
+ ieee->set_chan(ieee->dev,
+ ieee->current_network.channel);
+ ieee->link_change(ieee->dev);
+
+ notify_wx_assoc_event(ieee);
+
+ rtllib_start_send_beacons(ieee);
+
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+ netif_carrier_on(ieee->dev);
+ } else if (ieee->iw_mode == IW_MODE_MESH) {
+ rtl819x_silentreset_mesh_bk(dev, IsPortal);
+ }
+
+ CamRestoreAllEntry(dev);
+ dm_restore_dynamic_mechanism_state(dev);
+END:
+ priv->ResetProgress = RESET_TYPE_NORESET;
+ priv->reset_count++;
+
+ priv->bForcedSilentReset = false;
+ priv->bResetInProgress = false;
+
+ write_nic_byte(dev, UFWP, 1);
+ RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n",
+ priv->reset_count);
+ }
+}
+
+static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+ u32 *TotalRxDataNum)
+{
+ u16 SlotIndex;
+ u8 i;
+
+ *TotalRxBcnNum = 0;
+ *TotalRxDataNum = 0;
+
+ SlotIndex = (priv->rtllib->LinkDetectInfo.SlotIndex++) %
+ (priv->rtllib->LinkDetectInfo.SlotNum);
+ priv->rtllib->LinkDetectInfo.RxBcnNum[SlotIndex] =
+ priv->rtllib->LinkDetectInfo.NumRecvBcnInPeriod;
+ priv->rtllib->LinkDetectInfo.RxDataNum[SlotIndex] =
+ priv->rtllib->LinkDetectInfo.NumRecvDataInPeriod;
+ for (i = 0; i < priv->rtllib->LinkDetectInfo.SlotNum; i++) {
+ *TotalRxBcnNum += priv->rtllib->LinkDetectInfo.RxBcnNum[i];
+ *TotalRxDataNum += priv->rtllib->LinkDetectInfo.RxDataNum[i];
+ }
+}
+
+
+void rtl819x_watchdog_wqcallback(void *data)
+{
+ struct r8192_priv *priv = container_of_dwork_rsl(data,
+ struct r8192_priv, watch_dog_wq);
+ struct net_device *dev = priv->rtllib->dev;
+ struct rtllib_device *ieee = priv->rtllib;
+ enum reset_type ResetType = RESET_TYPE_NORESET;
+ static u8 check_reset_cnt;
+ unsigned long flags;
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(priv->rtllib->PowerSaveControl));
+ bool bBusyTraffic = false;
+ bool bHigherBusyTraffic = false;
+ bool bHigherBusyRxTraffic = false;
+ bool bEnterPS = false;
+
+ if (IS_NIC_DOWN(priv) || (priv->bHwRadioOff == true))
+ return;
+
+ if (priv->rtllib->state >= RTLLIB_LINKED) {
+ if (priv->rtllib->CntAfterLink < 2)
+ priv->rtllib->CntAfterLink++;
+ } else {
+ priv->rtllib->CntAfterLink = 0;
+ }
+
+ hal_dm_watchdog(dev);
+
+ if (rtllib_act_scanning(priv->rtllib, false) == false) {
+ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
+ RTLLIB_NOLINK) &&
+ (ieee->eRFPowerState == eRfOn) && !ieee->is_set_key &&
+ (!ieee->proto_stoppping) && !ieee->wx_set_enc) {
+ if ((ieee->PowerSaveControl.ReturnPoint ==
+ IPS_CALLBACK_NONE) &&
+ (!ieee->bNetPromiscuousMode)) {
+ RT_TRACE(COMP_PS, "====================>haha: "
+ "IPSEnter()\n");
+ IPSEnter(dev);
+ }
+ }
+ }
+ if ((ieee->state == RTLLIB_LINKED) && (ieee->iw_mode ==
+ IW_MODE_INFRA) && (!ieee->bNetPromiscuousMode)) {
+ if (ieee->LinkDetectInfo.NumRxOkInPeriod > 100 ||
+ ieee->LinkDetectInfo.NumTxOkInPeriod > 100)
+ bBusyTraffic = true;
+
+
+ if (ieee->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
+ ieee->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ bHigherBusyTraffic = true;
+ if (ieee->LinkDetectInfo.NumRxOkInPeriod > 5000)
+ bHigherBusyRxTraffic = true;
+ else
+ bHigherBusyRxTraffic = false;
+ }
+
+ if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod +
+ ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
+ (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2))
+ bEnterPS = false;
+ else
+ bEnterPS = true;
+
+ if (ieee->current_network.beacon_interval < 95)
+ bEnterPS = false;
+
+ if (bEnterPS)
+ LeisurePSEnter(dev);
+ else
+ LeisurePSLeave(dev);
+
+ } else {
+ RT_TRACE(COMP_LPS, "====>no link LPS leave\n");
+ LeisurePSLeave(dev);
+ }
+
+ ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
+ ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
+ ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
+ ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+
+ ieee->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
+ ieee->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+
+ if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_INFRA) {
+ u32 TotalRxBcnNum = 0;
+ u32 TotalRxDataNum = 0;
+
+ rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
+
+ if ((TotalRxBcnNum+TotalRxDataNum) == 0)
+ priv->check_roaming_cnt++;
+ else
+ priv->check_roaming_cnt = 0;
+
+
+ if (priv->check_roaming_cnt > 0) {
+ if (ieee->eRFPowerState == eRfOff)
+ RT_TRACE(COMP_ERR, "========>%s()\n", __func__);
+
+ printk(KERN_INFO "===>%s(): AP is power off, chan:%d,"
+ " connect another one\n", __func__, priv->chan);
+
+ ieee->state = RTLLIB_ASSOCIATING;
+
+ RemovePeerTS(priv->rtllib,
+ priv->rtllib->current_network.bssid);
+ ieee->is_roaming = true;
+ ieee->is_set_key = false;
+ ieee->link_change(dev);
+ if (ieee->LedControlHandler)
+ ieee->LedControlHandler(ieee->dev,
+ LED_CTL_START_TO_LINK);
+
+ notify_wx_assoc_event(ieee);
+
+ if (!(ieee->rtllib_ap_sec_type(ieee) &
+ (SEC_ALG_CCMP|SEC_ALG_TKIP)))
+ queue_delayed_work_rsl(ieee->wq,
+ &ieee->associate_procedure_wq, 0);
+
+ priv->check_roaming_cnt = 0;
+ }
+ ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
+ ieee->LinkDetectInfo.NumRecvDataInPeriod = 0;
+
+ }
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) &&
+ (!priv->RFChangeInProgress) && (!pPSC->bSwRfProcessing)) {
+ ResetType = rtl819x_ifcheck_resetornot(dev);
+ check_reset_cnt = 3;
+ }
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ if (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL) {
+ priv->ResetProgress = RESET_TYPE_NORMAL;
+ RT_TRACE(COMP_RESET, "%s(): NOMAL RESET\n", __func__);
+ return;
+ }
+
+ if (((priv->force_reset) || (!priv->bDisableNormalResetCheck &&
+ ResetType == RESET_TYPE_SILENT)))
+ rtl819x_ifsilentreset(dev);
+ priv->force_reset = false;
+ priv->bForcedSilentReset = false;
+ priv->bResetInProgress = false;
+ RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
+}
+
+void watch_dog_timer_callback(unsigned long data)
+{
+ struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ queue_delayed_work_rsl(priv->priv_wq, &priv->watch_dog_wq, 0);
+ mod_timer(&priv->watch_dog_timer, jiffies +
+ MSECS(RTLLIB_WATCH_DOG_TIME));
+}
+
+/****************************************************************************
+ ---------------------------- NIC TX/RX STUFF---------------------------
+*****************************************************************************/
+void rtl8192_rx_enable(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ priv->ops->rx_enable(dev);
+}
+
+void rtl8192_tx_enable(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ priv->ops->tx_enable(dev);
+
+ rtllib_reset_queue(priv->rtllib);
+}
+
+
+static void rtl8192_free_rx_ring(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int i, rx_queue_idx;
+
+ for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE;
+ rx_queue_idx++) {
+ for (i = 0; i < priv->rxringcount; i++) {
+ struct sk_buff *skb = priv->rx_buf[rx_queue_idx][i];
+ if (!skb)
+ continue;
+
+ pci_unmap_single(priv->pdev,
+ *((dma_addr_t *)skb->cb),
+ priv->rxbuffersize, PCI_DMA_FROMDEVICE);
+ kfree_skb(skb);
+ }
+
+ pci_free_consistent(priv->pdev,
+ sizeof(*priv->rx_ring[rx_queue_idx]) *
+ priv->rxringcount,
+ priv->rx_ring[rx_queue_idx],
+ priv->rx_ring_dma[rx_queue_idx]);
+ priv->rx_ring[rx_queue_idx] = NULL;
+ }
+}
+
+static void rtl8192_free_tx_ring(struct net_device *dev, unsigned int prio)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+ pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
+ ring->desc, ring->dma);
+ ring->desc = NULL;
+}
+
+void rtl8192_data_hard_stop(struct net_device *dev)
+{
+}
+
+
+void rtl8192_data_hard_resume(struct net_device *dev)
+{
+}
+
+void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+ int rate)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ int ret;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ u8 queue_index = tcb_desc->queue_index;
+
+ if ((priv->rtllib->eRFPowerState == eRfOff) || IS_NIC_DOWN(priv) ||
+ priv->bResetInProgress) {
+ kfree_skb(skb);
+ return;
+ }
+
+ assert(queue_index != TXCMD_QUEUE);
+
+
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ skb_push(skb, priv->rtllib->tx_headroom);
+ ret = rtl8192_tx(dev, skb);
+ if (ret != 0) {
+ kfree_skb(skb);
+ };
+
+ if (queue_index != MGNT_QUEUE) {
+ priv->rtllib->stats.tx_bytes += (skb->len -
+ priv->rtllib->tx_headroom);
+ priv->rtllib->stats.tx_packets++;
+ }
+
+
+ return;
+}
+
+int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ int ret;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ u8 queue_index = tcb_desc->queue_index;
+
+ if (queue_index != TXCMD_QUEUE) {
+ if ((priv->rtllib->eRFPowerState == eRfOff) ||
+ IS_NIC_DOWN(priv) || priv->bResetInProgress) {
+ kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ if (queue_index == TXCMD_QUEUE) {
+ rtl8192_tx_cmd(dev, skb);
+ ret = 0;
+ return ret;
+ } else {
+ tcb_desc->RATRIndex = 7;
+ tcb_desc->bTxDisableRateFallBack = 1;
+ tcb_desc->bTxUseDriverAssingedRate = 1;
+ tcb_desc->bTxEnableFwCalcDur = 1;
+ skb_push(skb, priv->rtllib->tx_headroom);
+ ret = rtl8192_tx(dev, skb);
+ if (ret != 0) {
+ kfree_skb(skb);
+ };
+ }
+
+
+
+ return ret;
+
+}
+
+static void rtl8192_tx_isr(struct net_device *dev, int prio)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb;
+
+ if (prio != BEACON_QUEUE) {
+ if (entry->OWN)
+ return;
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+ skb = __skb_dequeue(&ring->queue);
+ pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
+ skb->len, PCI_DMA_TODEVICE);
+
+ kfree_skb(skb);
+ }
+ if (prio != BEACON_QUEUE)
+ tasklet_schedule(&priv->irq_tx_tasklet);
+}
+
+void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtl8192_tx_ring *ring;
+ struct tx_desc_cmd *entry;
+ unsigned int idx;
+ struct cb_desc *tcb_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
+ ring = &priv->tx_ring[TXCMD_QUEUE];
+
+ idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
+ entry = (struct tx_desc_cmd *) &ring->desc[idx];
+
+ tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+
+ priv->ops->tx_fill_cmd_descriptor(dev, entry, tcb_desc, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+
+ return;
+}
+
+short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtl8192_tx_ring *ring;
+ unsigned long flags;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ struct tx_desc *pdesc = NULL;
+ struct rtllib_hdr_1addr *header = NULL;
+ u16 fc = 0, type = 0, stype = 0;
+ bool multi_addr = false, broad_addr = false, uni_addr = false;
+ u8 *pda_addr = NULL;
+ int idx;
+ u32 fwinfo_size = 0;
+
+ if (priv->bdisable_nic) {
+ RT_TRACE(COMP_ERR, "%s: ERR!! Nic is disabled! Can't tx packet"
+ " len=%d qidx=%d!!!\n", __func__, skb->len,
+ tcb_desc->queue_index);
+ return skb->len;
+ }
+
+ priv->rtllib->bAwakePktSent = true;
+
+ fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
+
+ header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
+ fc = header->frame_ctl;
+ type = WLAN_FC_GET_TYPE(fc);
+ stype = WLAN_FC_GET_STYPE(fc);
+ pda_addr = header->addr1;
+
+ if (is_multicast_ether_addr(pda_addr))
+ multi_addr = true;
+ else if (is_broadcast_ether_addr(pda_addr))
+ broad_addr = true;
+ else
+ uni_addr = true;
+
+ if (uni_addr)
+ priv->stats.txbytesunicast += skb->len - fwinfo_size;
+ else if (multi_addr)
+ priv->stats.txbytesmulticast += skb->len - fwinfo_size;
+ else
+ priv->stats.txbytesbroadcast += skb->len - fwinfo_size;
+
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
+ ring = &priv->tx_ring[tcb_desc->queue_index];
+ if (tcb_desc->queue_index != BEACON_QUEUE)
+ idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
+ else
+ idx = 0;
+
+ pdesc = &ring->desc[idx];
+ if ((pdesc->OWN == 1) && (tcb_desc->queue_index != BEACON_QUEUE)) {
+ RT_TRACE(COMP_ERR, "No more TX desc@%d, ring->idx = %d, idx = "
+ "%d, skblen = 0x%x queuelen=%d",
+ tcb_desc->queue_index, ring->idx, idx, skb->len,
+ skb_queue_len(&ring->queue));
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+ return skb->len;
+ }
+
+ if (type == RTLLIB_FTYPE_DATA) {
+ if (priv->rtllib->LedControlHandler)
+ priv->rtllib->LedControlHandler(dev, LED_CTL_TX);
+ }
+ priv->ops->tx_fill_descriptor(dev, pdesc, tcb_desc, skb);
+ __skb_queue_tail(&ring->queue, skb);
+ pdesc->OWN = 1;
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+ dev->trans_start = jiffies;
+
+ write_nic_word(dev, TPPoll, 0x01 << tcb_desc->queue_index);
+ return 0;
+}
+
+static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rx_desc *entry = NULL;
+ int i, rx_queue_idx;
+
+ for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
+ priv->rx_ring[rx_queue_idx] = pci_alloc_consistent(priv->pdev,
+ sizeof(*priv->rx_ring[rx_queue_idx]) *
+ priv->rxringcount,
+ &priv->rx_ring_dma[rx_queue_idx]);
+
+ if (!priv->rx_ring[rx_queue_idx] ||
+ (unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) {
+ RT_TRACE(COMP_ERR, "Cannot allocate RX ring\n");
+ return -ENOMEM;
+ }
+
+ memset(priv->rx_ring[rx_queue_idx], 0,
+ sizeof(*priv->rx_ring[rx_queue_idx]) *
+ priv->rxringcount);
+ priv->rx_idx[rx_queue_idx] = 0;
+
+ for (i = 0; i < priv->rxringcount; i++) {
+ struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
+ dma_addr_t *mapping;
+ entry = &priv->rx_ring[rx_queue_idx][i];
+ if (!skb)
+ return 0;
+ skb->dev = dev;
+ priv->rx_buf[rx_queue_idx][i] = skb;
+ mapping = (dma_addr_t *)skb->cb;
+ *mapping = pci_map_single(priv->pdev,
+ skb_tail_pointer_rsl(skb),
+ priv->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
+ entry->BufferAddress = cpu_to_le32(*mapping);
+
+ entry->Length = priv->rxbuffersize;
+ entry->OWN = 1;
+ }
+
+ if(entry)
+ entry->EOR = 1;
+ }
+ return 0;
+}
+
+static int rtl8192_alloc_tx_desc_ring(struct net_device *dev,
+ unsigned int prio, unsigned int entries)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct tx_desc *ring;
+ dma_addr_t dma;
+ int i;
+
+ ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
+ if (!ring || (unsigned long)ring & 0xFF) {
+ RT_TRACE(COMP_ERR, "Cannot allocate TX ring (prio = %d)\n",
+ prio);
+ return -ENOMEM;
+ }
+
+ memset(ring, 0, sizeof(*ring)*entries);
+ priv->tx_ring[prio].desc = ring;
+ priv->tx_ring[prio].dma = dma;
+ priv->tx_ring[prio].idx = 0;
+ priv->tx_ring[prio].entries = entries;
+ skb_queue_head_init(&priv->tx_ring[prio].queue);
+
+ for (i = 0; i < entries; i++)
+ ring[i].NextDescAddress =
+ cpu_to_le32((u32)dma + ((i + 1) % entries) *
+ sizeof(*ring));
+
+ return 0;
+}
+
+
+short rtl8192_pci_initdescring(struct net_device *dev)
+{
+ u32 ret;
+ int i;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ ret = rtl8192_alloc_rx_desc_ring(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
+ ret = rtl8192_alloc_tx_desc_ring(dev, i, priv->txringcount);
+ if (ret)
+ goto err_free_rings;
+ }
+
+ return 0;
+
+err_free_rings:
+ rtl8192_free_rx_ring(dev);
+ for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
+ if (priv->tx_ring[i].desc)
+ rtl8192_free_tx_ring(dev, i);
+ return 1;
+}
+
+void rtl8192_pci_resetdescring(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int i, rx_queue_idx;
+ unsigned long flags = 0;
+
+ for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
+ if (priv->rx_ring[rx_queue_idx]) {
+ struct rx_desc *entry = NULL;
+ for (i = 0; i < priv->rxringcount; i++) {
+ entry = &priv->rx_ring[rx_queue_idx][i];
+ entry->OWN = 1;
+ }
+ priv->rx_idx[rx_queue_idx] = 0;
+ }
+ }
+
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
+ for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
+ if (priv->tx_ring[i].desc) {
+ struct rtl8192_tx_ring *ring = &priv->tx_ring[i];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb =
+ __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(priv->pdev,
+ le32_to_cpu(entry->TxBuffAddr),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+ ring->idx = 0;
+ }
+ }
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+}
+
+void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
+ struct rtllib_rx_stats *stats)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ if (stats->bIsAMPDU && !stats->bFirstMPDU)
+ stats->mac_time = priv->LastRxDescTSF;
+ else
+ priv->LastRxDescTSF = stats->mac_time;
+}
+
+long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+
+ return signal_power;
+}
+
+
+void
+rtl819x_update_rxsignalstatistics8190pci(
+ struct r8192_priv *priv,
+ struct rtllib_rx_stats *pprevious_stats
+ )
+{
+ int weighting = 0;
+
+
+ if (priv->stats.recv_signal_power == 0)
+ priv->stats.recv_signal_power =
+ pprevious_stats->RecvSignalPower;
+
+ if (pprevious_stats->RecvSignalPower > priv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pprevious_stats->RecvSignalPower <
+ priv->stats.recv_signal_power)
+ weighting = (-5);
+ priv->stats.recv_signal_power = (priv->stats.recv_signal_power * 5 +
+ pprevious_stats->RecvSignalPower +
+ weighting) / 6;
+}
+
+void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pprevious_stats)
+{
+}
+
+
+u8 rtl819x_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return 100 + antpower;
+
+} /* QueryRxPwrPercentage */
+
+u8
+rtl819x_evm_dbtopercentage(
+ char value
+ )
+{
+ char ret_val;
+
+ ret_val = value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
+ ret_val = 100;
+ return ret_val;
+}
+
+void
+rtl8192_record_rxdesc_forlateruse(
+ struct rtllib_rx_stats *psrc_stats,
+ struct rtllib_rx_stats *ptarget_stats
+)
+{
+ ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
+ ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
+}
+
+
+
+static void rtl8192_rx_normal(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct rtllib_hdr_1addr *rtllib_hdr = NULL;
+ bool unicast_packet = false;
+ bool bLedBlinking = true;
+ u16 fc = 0, type = 0;
+ u32 skb_len = 0;
+ int rx_queue_idx = RX_MPDU_QUEUE;
+
+ struct rtllib_rx_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ .freq = RTLLIB_24GHZ_BAND,
+ };
+ unsigned int count = priv->rxringcount;
+
+ stats.nic_type = NIC_8192E;
+
+ while (count--) {
+ struct rx_desc *pdesc = &priv->rx_ring[rx_queue_idx]
+ [priv->rx_idx[rx_queue_idx]];
+ struct sk_buff *skb = priv->rx_buf[rx_queue_idx]
+ [priv->rx_idx[rx_queue_idx]];
+
+ if (pdesc->OWN) {
+ return;
+ } else {
+ struct sk_buff *new_skb;
+
+ if (!priv->ops->rx_query_status_descriptor(dev, &stats,
+ pdesc, skb))
+ goto done;
+ new_skb = dev_alloc_skb(priv->rxbuffersize);
+ /* if allocation of new skb failed - drop current packet
+ * and reuse skb */
+ if (unlikely(!new_skb))
+ goto done;
+
+ pci_unmap_single(priv->pdev,
+ *((dma_addr_t *)skb->cb),
+ priv->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, pdesc->Length);
+ skb_reserve(skb, stats.RxDrvInfoSize +
+ stats.RxBufShift);
+ skb_trim(skb, skb->len - 4/*sCrcLng*/);
+ rtllib_hdr = (struct rtllib_hdr_1addr *)skb->data;
+ if (!is_broadcast_ether_addr(rtllib_hdr->addr1) &&
+ !is_multicast_ether_addr(rtllib_hdr->addr1)) {
+ /* unicast packet */
+ unicast_packet = true;
+ }
+ fc = le16_to_cpu(rtllib_hdr->frame_ctl);
+ type = WLAN_FC_GET_TYPE(fc);
+ if (type == RTLLIB_FTYPE_MGMT)
+ bLedBlinking = false;
+
+ if (bLedBlinking)
+ if (priv->rtllib->LedControlHandler)
+ priv->rtllib->LedControlHandler(dev,
+ LED_CTL_RX);
+
+ if (stats.bCRC) {
+ if (type != RTLLIB_FTYPE_MGMT)
+ priv->stats.rxdatacrcerr++;
+ else
+ priv->stats.rxmgmtcrcerr++;
+ }
+
+ skb_len = skb->len;
+
+ if (!rtllib_rx(priv->rtllib, skb, &stats)) {
+ dev_kfree_skb_any(skb);
+ } else {
+ priv->stats.rxok++;
+ if (unicast_packet)
+ priv->stats.rxbytesunicast += skb_len;
+ }
+
+ skb = new_skb;
+ skb->dev = dev;
+
+ priv->rx_buf[rx_queue_idx][priv->rx_idx[rx_queue_idx]] =
+ skb;
+ *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev,
+ skb_tail_pointer_rsl(skb),
+ priv->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
+ }
+done:
+ pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
+ pdesc->OWN = 1;
+ pdesc->Length = priv->rxbuffersize;
+ if (priv->rx_idx[rx_queue_idx] == priv->rxringcount-1)
+ pdesc->EOR = 1;
+ priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) %
+ priv->rxringcount;
+ }
+
+}
+
+static void rtl8192_rx_cmd(struct net_device *dev)
+{
+}
+
+
+static void rtl8192_tx_resume(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ struct sk_buff *skb;
+ int queue_index;
+
+ for (queue_index = BK_QUEUE;
+ queue_index < MAX_QUEUE_SIZE; queue_index++) {
+ while ((!skb_queue_empty(&ieee->skb_waitQ[queue_index])) &&
+ (priv->rtllib->check_nic_enough_desc(dev, queue_index) > 0)) {
+ skb = skb_dequeue(&ieee->skb_waitQ[queue_index]);
+ ieee->softmac_data_hard_start_xmit(skb, dev, 0);
+ }
+ }
+}
+
+void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
+{
+ rtl8192_tx_resume(priv->rtllib->dev);
+}
+
+void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+{
+ rtl8192_rx_normal(priv->rtllib->dev);
+
+ if (MAX_RX_QUEUE > 1)
+ rtl8192_rx_cmd(priv->rtllib->dev);
+
+ write_nic_dword(priv->rtllib->dev, INTA_MASK,
+ read_nic_dword(priv->rtllib->dev, INTA_MASK) | IMR_RDU);
+}
+
+/****************************************************************************
+ ---------------------------- NIC START/CLOSE STUFF---------------------------
+*****************************************************************************/
+void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
+{
+ cancel_delayed_work(&priv->watch_dog_wq);
+ cancel_delayed_work(&priv->update_beacon_wq);
+ cancel_delayed_work(&priv->rtllib->hw_sleep_wq);
+ cancel_work_sync(&priv->reset_wq);
+ cancel_work_sync(&priv->qos_activate);
+}
+
+int _rtl8192_up(struct net_device *dev, bool is_silent_reset)
+{
+ if (_rtl8192_sta_up(dev, is_silent_reset) == -1)
+ return -1;
+ return 0;
+}
+
+
+static int rtl8192_open(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int ret;
+
+ down(&priv->wx_sem);
+ ret = rtl8192_up(dev);
+ up(&priv->wx_sem);
+ return ret;
+
+}
+
+
+int rtl8192_up(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->up == 1)
+ return -1;
+ return _rtl8192_up(dev, false);
+}
+
+
+static int rtl8192_close(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int ret;
+
+ if ((rtllib_act_scanning(priv->rtllib, false)) &&
+ !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) {
+ rtllib_stop_scan(priv->rtllib);
+ }
+
+ down(&priv->wx_sem);
+
+ ret = rtl8192_down(dev, true);
+
+ up(&priv->wx_sem);
+
+ return ret;
+
+}
+
+int rtl8192_down(struct net_device *dev, bool shutdownrf)
+{
+ if (rtl8192_sta_down(dev, shutdownrf) == -1)
+ return -1;
+
+ return 0;
+}
+
+void rtl8192_commit(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->up == 0)
+ return;
+ rtllib_softmac_stop_protocol(priv->rtllib, 0 , true);
+ rtl8192_irq_disable(dev);
+ priv->ops->stop_adapter(dev, true);
+ _rtl8192_up(dev, false);
+}
+
+void rtl8192_restart(void *data)
+{
+ struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
+ reset_wq);
+ struct net_device *dev = priv->rtllib->dev;
+
+ down(&priv->wx_sem);
+
+ rtl8192_commit(dev);
+
+ up(&priv->wx_sem);
+}
+
+static void r8192_set_multicast(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ short promisc;
+
+ promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
+ priv->promisc = promisc;
+
+}
+
+
+static int r8192_set_mac_adr(struct net_device *dev, void *mac)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct sockaddr *addr = mac;
+
+ down(&priv->wx_sem);
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ schedule_work(&priv->reset_wq);
+ up(&priv->wx_sem);
+
+ return 0;
+}
+
+/* based on ipw2200 driver */
+static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int ret = -1;
+ struct rtllib_device *ieee = priv->rtllib;
+ u32 key[4];
+ u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 zero_addr[6] = {0};
+ struct iw_point *p = &wrq->u.data;
+ struct ieee_param *ipw = NULL;
+
+ down(&priv->wx_sem);
+
+ switch (cmd) {
+ case RTL_IOCTL_WPA_SUPPLICANT:
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ipw = kmalloc(p->length, GFP_KERNEL);
+ if (ipw == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(ipw, p->pointer, p->length)) {
+ kfree(ipw);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION) {
+ if (ipw->u.crypt.set_tx) {
+ if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
+ ieee->pairwise_key_type = KEY_TYPE_CCMP;
+ else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
+ ieee->pairwise_key_type = KEY_TYPE_TKIP;
+ else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
+ if (ipw->u.crypt.key_len == 13)
+ ieee->pairwise_key_type =
+ KEY_TYPE_WEP104;
+ else if (ipw->u.crypt.key_len == 5)
+ ieee->pairwise_key_type =
+ KEY_TYPE_WEP40;
+ } else {
+ ieee->pairwise_key_type = KEY_TYPE_NA;
+ }
+
+ if (ieee->pairwise_key_type) {
+ if (memcmp(ieee->ap_mac_addr, zero_addr,
+ 6) == 0)
+ ieee->iw_mode = IW_MODE_ADHOC;
+ memcpy((u8 *)key, ipw->u.crypt.key, 16);
+ EnableHWSecurityConfig8192(dev);
+ set_swcam(dev, 4, ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr,
+ 0, key, 0);
+ setKey(dev, 4, ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr, 0, key);
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
+ set_swcam(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr,
+ 0, key, 0);
+ setKey(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr,
+ 0, key);
+ }
+ }
+ if ((ieee->pairwise_key_type == KEY_TYPE_CCMP)
+ && ieee->pHTInfo->bCurrentHTSupport) {
+ write_nic_byte(dev, 0x173, 1);
+ }
+
+ } else {
+ memcpy((u8 *)key, ipw->u.crypt.key, 16);
+ if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
+ ieee->group_key_type = KEY_TYPE_CCMP;
+ else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
+ ieee->group_key_type = KEY_TYPE_TKIP;
+ else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
+ if (ipw->u.crypt.key_len == 13)
+ ieee->group_key_type =
+ KEY_TYPE_WEP104;
+ else if (ipw->u.crypt.key_len == 5)
+ ieee->group_key_type =
+ KEY_TYPE_WEP40;
+ } else
+ ieee->group_key_type = KEY_TYPE_NA;
+
+ if (ieee->group_key_type) {
+ set_swcam(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->group_key_type,
+ broadcast_addr, 0, key, 0);
+ setKey(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->group_key_type,
+ broadcast_addr, 0, key);
+ }
+ }
+ }
+
+ ret = rtllib_wpa_supplicant_ioctl(priv->rtllib, &wrq->u.data,
+ 0);
+ kfree(ipw);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+out:
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+
+irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) netdev;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ unsigned long flags;
+ u32 inta;
+ u32 intb;
+ intb = 0;
+
+ if (priv->irq_enabled == 0)
+ goto done;
+
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
+
+ priv->ops->interrupt_recognized(dev, &inta, &intb);
+ priv->stats.shints++;
+
+ if (!inta) {
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+ goto done;
+ }
+
+ if (inta == 0xffff) {
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+ goto done;
+ }
+
+ priv->stats.ints++;
+
+ if (!netif_running(dev)) {
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+ goto done;
+ }
+
+ if (inta & IMR_TBDOK) {
+ RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
+ priv->stats.txbeaconokint++;
+ }
+
+ if (inta & IMR_TBDER) {
+ RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
+ priv->stats.txbeaconerr++;
+ }
+
+ if (inta & IMR_BDOK)
+ RT_TRACE(COMP_INTR, "beacon interrupt!\n");
+
+ if (inta & IMR_MGNTDOK) {
+ RT_TRACE(COMP_INTR, "Manage ok interrupt!\n");
+ priv->stats.txmanageokint++;
+ rtl8192_tx_isr(dev, MGNT_QUEUE);
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+ if (priv->rtllib->ack_tx_to_ieee) {
+ if (rtl8192_is_tx_queue_empty(dev)) {
+ priv->rtllib->ack_tx_to_ieee = 0;
+ rtllib_ps_tx_ack(priv->rtllib, 1);
+ }
+ }
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
+ }
+
+ if (inta & IMR_COMDOK) {
+ priv->stats.txcmdpktokint++;
+ rtl8192_tx_isr(dev, TXCMD_QUEUE);
+ }
+
+ if (inta & IMR_HIGHDOK)
+ rtl8192_tx_isr(dev, HIGH_QUEUE);
+
+ if (inta & IMR_ROK) {
+ priv->stats.rxint++;
+ priv->InterruptLog.nIMR_ROK++;
+ tasklet_schedule(&priv->irq_rx_tasklet);
+ }
+
+ if (inta & IMR_BcnInt) {
+ RT_TRACE(COMP_INTR, "prepare beacon for interrupt!\n");
+ tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
+ }
+
+ if (inta & IMR_RDU) {
+ RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
+ priv->stats.rxrdu++;
+ write_nic_dword(dev, INTA_MASK,
+ read_nic_dword(dev, INTA_MASK) & ~IMR_RDU);
+ tasklet_schedule(&priv->irq_rx_tasklet);
+ }
+
+ if (inta & IMR_RXFOVW) {
+ RT_TRACE(COMP_INTR, "rx overflow !\n");
+ priv->stats.rxoverflow++;
+ tasklet_schedule(&priv->irq_rx_tasklet);
+ }
+
+ if (inta & IMR_TXFOVW)
+ priv->stats.txoverflow++;
+
+ if (inta & IMR_BKDOK) {
+ RT_TRACE(COMP_INTR, "BK Tx OK interrupt!\n");
+ priv->stats.txbkokint++;
+ priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
+ rtl8192_tx_isr(dev, BK_QUEUE);
+ }
+
+ if (inta & IMR_BEDOK) {
+ RT_TRACE(COMP_INTR, "BE TX OK interrupt!\n");
+ priv->stats.txbeokint++;
+ priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
+ rtl8192_tx_isr(dev, BE_QUEUE);
+ }
+
+ if (inta & IMR_VIDOK) {
+ RT_TRACE(COMP_INTR, "VI TX OK interrupt!\n");
+ priv->stats.txviokint++;
+ priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
+ rtl8192_tx_isr(dev, VI_QUEUE);
+ }
+
+ if (inta & IMR_VODOK) {
+ priv->stats.txvookint++;
+ RT_TRACE(COMP_INTR, "Vo TX OK interrupt!\n");
+ priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
+ rtl8192_tx_isr(dev, VO_QUEUE);
+ }
+
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
+
+done:
+
+ return IRQ_HANDLED;
+}
+
+
+
+/****************************************************************************
+ ---------------------------- PCI_STUFF---------------------------
+*****************************************************************************/
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops rtl8192_netdev_ops = {
+ .ndo_open = rtl8192_open,
+ .ndo_stop = rtl8192_close,
+ .ndo_tx_timeout = rtl8192_tx_timeout,
+ .ndo_do_ioctl = rtl8192_ioctl,
+ .ndo_set_rx_mode = r8192_set_multicast,
+ .ndo_set_mac_address = r8192_set_mac_adr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_start_xmit = rtllib_xmit,
+};
+#endif
+
+static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ unsigned long ioaddr = 0;
+ struct net_device *dev = NULL;
+ struct r8192_priv *priv = NULL;
+ struct rtl819x_ops *ops = (struct rtl819x_ops *)(id->driver_data);
+ unsigned long pmem_start, pmem_len, pmem_flags;
+ int err = -ENOMEM;
+ bool bdma64 = false;
+ u8 revision_id;
+
+ RT_TRACE(COMP_INIT, "Configuring chip resources");
+
+ if (pci_enable_device(pdev)) {
+ RT_TRACE(COMP_ERR, "Failed to enable PCI device");
+ return -EIO;
+ }
+
+ pci_set_master(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_INFO "Unable to obtain 32bit DMA for consistent allocations\n");
+ goto err_pci_disable;
+ }
+ }
+ dev = alloc_rtllib(sizeof(struct r8192_priv));
+ if (!dev)
+ goto err_pci_disable;
+
+ err = -ENODEV;
+ if (bdma64)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ priv = rtllib_priv(dev);
+ priv->rtllib = (struct rtllib_device *)netdev_priv_rsl(dev);
+ priv->pdev = pdev;
+ priv->rtllib->pdev = pdev;
+ if ((pdev->subsystem_vendor == PCI_VENDOR_ID_DLINK) &&
+ (pdev->subsystem_device == 0x3304))
+ priv->rtllib->bSupportRemoteWakeUp = 1;
+ else
+ priv->rtllib->bSupportRemoteWakeUp = 0;
+
+ pmem_start = pci_resource_start(pdev, 1);
+ pmem_len = pci_resource_len(pdev, 1);
+ pmem_flags = pci_resource_flags(pdev, 1);
+
+ if (!(pmem_flags & IORESOURCE_MEM)) {
+ RT_TRACE(COMP_ERR, "region #1 not a MMIO resource, aborting");
+ goto err_rel_rtllib;
+ }
+
+ printk(KERN_INFO "Memory mapped space start: 0x%08lx\n", pmem_start);
+ if (!request_mem_region(pmem_start, pmem_len, DRV_NAME)) {
+ RT_TRACE(COMP_ERR, "request_mem_region failed!");
+ goto err_rel_rtllib;
+ }
+
+
+ ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
+ if (ioaddr == (unsigned long)NULL) {
+ RT_TRACE(COMP_ERR, "ioremap failed!");
+ goto err_rel_mem;
+ }
+
+ dev->mem_start = ioaddr;
+ dev->mem_end = ioaddr + pci_resource_len(pdev, 0);
+
+ pci_read_config_byte(pdev, 0x08, &revision_id);
+ /* If the revisionid is 0x10, the device uses rtl8192se. */
+ if (pdev->device == 0x8192 && revision_id == 0x10)
+ goto err_rel_mem;
+
+ priv->ops = ops;
+
+ if (rtl8192_pci_findadapter(pdev, dev) == false)
+ goto err_rel_mem;
+
+ dev->irq = pdev->irq;
+ priv->irq = 0;
+
+#ifdef HAVE_NET_DEVICE_OPS
+ dev->netdev_ops = &rtl8192_netdev_ops;
+#else
+ dev->open = rtl8192_open;
+ dev->stop = rtl8192_close;
+ dev->tx_timeout = rtl8192_tx_timeout;
+ dev->do_ioctl = rtl8192_ioctl;
+ dev->set_multicast_list = r8192_set_multicast;
+ dev->set_mac_address = r8192_set_mac_adr;
+ dev->hard_start_xmit = rtllib_xmit;
+#endif
+
+ dev->wireless_handlers = (struct iw_handler_def *)
+ &r8192_wx_handlers_def;
+ dev->ethtool_ops = &rtl819x_ethtool_ops;
+
+ dev->type = ARPHRD_ETHER;
+ dev->watchdog_timeo = HZ * 3;
+
+ if (dev_alloc_name(dev, ifname) < 0) {
+ RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying "
+ "wlan%%d...\n");
+ dev_alloc_name(dev, ifname);
+ }
+
+ RT_TRACE(COMP_INIT, "Driver probe completed1\n");
+ if (rtl8192_init(dev) != 0) {
+ RT_TRACE(COMP_ERR, "Initialization failed");
+ goto err_free_irq;
+ }
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ register_netdev(dev);
+ RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
+ err = rtl_debug_module_init(priv, dev->name);
+ if (err)
+ RT_TRACE(COMP_DBG, "failed to create debugfs files. Ignoring "
+ "error: %d\n", err);
+ rtl8192_proc_init_one(dev);
+
+ if (priv->polling_timer_on == 0)
+ check_rfctrl_gpio_timer((unsigned long)dev);
+
+ RT_TRACE(COMP_INIT, "Driver probe completed\n");
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+ priv->irq = 0;
+err_rel_mem:
+ release_mem_region(pmem_start, pmem_len);
+err_rel_rtllib:
+ free_rtllib(dev);
+
+ DMESG("wlan driver load failed\n");
+ pci_set_drvdata(pdev, NULL);
+err_pci_disable:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct r8192_priv *priv ;
+ u32 i;
+
+ if (dev) {
+ unregister_netdev(dev);
+
+ priv = rtllib_priv(dev);
+
+ del_timer_sync(&priv->gpio_polling_timer);
+ cancel_delayed_work(&priv->gpio_change_rf_wq);
+ priv->polling_timer_on = 0;
+ rtl_debug_module_remove(priv);
+ rtl8192_proc_remove_one(dev);
+ rtl8192_down(dev, true);
+ deinit_hal_dm(dev);
+ if (priv->pFirmware) {
+ vfree(priv->pFirmware);
+ priv->pFirmware = NULL;
+ }
+ destroy_workqueue(priv->priv_wq);
+ rtl8192_free_rx_ring(dev);
+ for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
+ rtl8192_free_tx_ring(dev, i);
+
+ if (priv->irq) {
+ printk(KERN_INFO "Freeing irq %d\n", dev->irq);
+ free_irq(dev->irq, dev);
+ priv->irq = 0;
+ }
+ free_rtllib(dev);
+
+ kfree(priv->scan_cmd);
+
+ if (dev->mem_start != 0) {
+ iounmap((void __iomem *)dev->mem_start);
+ release_mem_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ }
+ } else {
+ priv = rtllib_priv(dev);
+ }
+
+ pci_disable_device(pdev);
+ RT_TRACE(COMP_DOWN, "wlan driver removed\n");
+}
+
+bool NicIFEnableNIC(struct net_device *dev)
+{
+ bool init_status = true;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(priv->rtllib->PowerSaveControl));
+
+ if (IS_NIC_DOWN(priv)) {
+ RT_TRACE(COMP_ERR, "ERR!!! %s(): Driver is already down!\n",
+ __func__);
+ priv->bdisable_nic = false;
+ return RT_STATUS_FAILURE;
+ }
+
+ RT_TRACE(COMP_PS, "===========>%s()\n", __func__);
+ priv->bfirst_init = true;
+ init_status = priv->ops->initialize_adapter(dev);
+ if (init_status != true) {
+ RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization is failed!\n",
+ __func__);
+ priv->bdisable_nic = false;
+ return -1;
+ }
+ RT_TRACE(COMP_INIT, "start adapter finished\n");
+ RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
+ priv->bfirst_init = false;
+
+ rtl8192_irq_enable(dev);
+ priv->bdisable_nic = false;
+ RT_TRACE(COMP_PS, "<===========%s()\n", __func__);
+ return init_status;
+}
+bool NicIFDisableNIC(struct net_device *dev)
+{
+ bool status = true;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 tmp_state = 0;
+ RT_TRACE(COMP_PS, "=========>%s()\n", __func__);
+ priv->bdisable_nic = true;
+ tmp_state = priv->rtllib->state;
+ rtllib_softmac_stop_protocol(priv->rtllib, 0, false);
+ priv->rtllib->state = tmp_state;
+ rtl8192_cancel_deferred_work(priv);
+ rtl8192_irq_disable(dev);
+
+ priv->ops->stop_adapter(dev, false);
+ RT_TRACE(COMP_PS, "<=========%s()\n", __func__);
+
+ return status;
+}
+
+static int __init rtl8192_pci_module_init(void)
+{
+ int ret;
+ int error;
+
+ ret = rtllib_init();
+ if (ret) {
+ printk(KERN_ERR "rtllib_init() failed %d\n", ret);
+ return ret;
+ }
+ ret = rtllib_crypto_init();
+ if (ret) {
+ printk(KERN_ERR "rtllib_crypto_init() failed %d\n", ret);
+ return ret;
+ }
+ ret = rtllib_crypto_tkip_init();
+ if (ret) {
+ printk(KERN_ERR "rtllib_crypto_tkip_init() failed %d\n", ret);
+ return ret;
+ }
+ ret = rtllib_crypto_ccmp_init();
+ if (ret) {
+ printk(KERN_ERR "rtllib_crypto_ccmp_init() failed %d\n", ret);
+ return ret;
+ }
+ ret = rtllib_crypto_wep_init();
+ if (ret) {
+ printk(KERN_ERR "rtllib_crypto_wep_init() failed %d\n", ret);
+ return ret;
+ }
+ printk(KERN_INFO "\nLinux kernel driver for RTL8192E WLAN cards\n");
+ printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan Driver\n");
+
+ error = rtl_create_debugfs_root();
+ if (error) {
+ RT_TRACE(COMP_DBG, "Create debugfs root fail: %d\n", error);
+ goto err_out;
+ }
+
+ rtl8192_proc_module_init();
+ if (0 != pci_register_driver(&rtl8192_pci_driver)) {
+ DMESG("No device found");
+ /*pci_unregister_driver (&rtl8192_pci_driver);*/
+ return -ENODEV;
+ }
+ return 0;
+err_out:
+ return error;
+
+}
+
+static void __exit rtl8192_pci_module_exit(void)
+{
+ pci_unregister_driver(&rtl8192_pci_driver);
+
+ RT_TRACE(COMP_DOWN, "Exiting");
+ rtl8192_proc_module_remove();
+ rtl_remove_debugfs_root();
+ rtllib_crypto_tkip_exit();
+ rtllib_crypto_ccmp_exit();
+ rtllib_crypto_wep_exit();
+ rtllib_crypto_deinit();
+ rtllib_exit();
+}
+
+void check_rfctrl_gpio_timer(unsigned long data)
+{
+ struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+
+ priv->polling_timer_on = 1;
+
+ queue_delayed_work_rsl(priv->priv_wq, &priv->gpio_change_rf_wq, 0);
+
+ mod_timer(&priv->gpio_polling_timer, jiffies +
+ MSECS(RTLLIB_WATCH_DOG_TIME));
+}
+
+/***************************************************************************
+ ------------------- module init / exit stubs ----------------
+****************************************************************************/
+module_init(rtl8192_pci_module_init);
+module_exit(rtl8192_pci_module_exit);
+
+MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(ifname, charp, S_IRUGO|S_IWUSR);
+module_param(hwwep, int, S_IRUGO|S_IWUSR);
+module_param(channels, int, S_IRUGO|S_IWUSR);
+
+MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default");
+MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support(default use hw. set 0 to use software security)");
+MODULE_PARM_DESC(channels, " Channel bitmask for specific locales. NYI");
diff --git a/drivers/staging/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl_core.h
new file mode 100644
index 00000000000..f9af5153d9c
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_core.h
@@ -0,0 +1,1124 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#ifndef _RTL_CORE_H
+#define _RTL_CORE_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/wireless.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include "rtllib.h"
+
+#include "dot11d.h"
+
+#include "r8192E_firmware.h"
+#include "r8192E_hw.h"
+
+#include "r8190P_def.h"
+#include "r8192E_dev.h"
+
+#include "rtl_debug.h"
+#include "rtl_eeprom.h"
+#include "rtl_ps.h"
+#include "rtl_pci.h"
+#include "rtl_cam.h"
+
+#define DRV_COPYRIGHT \
+ "Copyright(c) 2008 - 2010 Realsil Semiconductor Corporation"
+#define DRV_AUTHOR "<wlanfae@realtek.com>"
+#define DRV_VERSION "0014.0401.2010"
+
+#define DRV_NAME "rtl819xE"
+
+#define IS_HARDWARE_TYPE_819xP(_priv) \
+ ((((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8190P) || \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192E))
+#define IS_HARDWARE_TYPE_8192SE(_priv) \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
+#define IS_HARDWARE_TYPE_8192CE(_priv) \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192CE)
+#define IS_HARDWARE_TYPE_8192CU(_priv) \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192CU)
+#define IS_HARDWARE_TYPE_8192DE(_priv) \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192DE)
+#define IS_HARDWARE_TYPE_8192DU(_priv) \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192DU)
+
+#define RTL_PCI_DEVICE(vend, dev, cfg) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID , \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define irqreturn_type irqreturn_t
+
+#define rtl8192_interrupt(x, y, z) rtl8192_interrupt_rsl(x, y)
+
+#define RTL_MAX_SCAN_SIZE 128
+
+#define RTL_RATE_MAX 30
+
+#define TOTAL_CAM_ENTRY 32
+#define CAM_CONTENT_COUNT 8
+
+#ifndef BIT
+#define BIT(_i) (1<<(_i))
+#endif
+
+#define IS_NIC_DOWN(priv) (!(priv)->up)
+
+#define IS_ADAPTER_SENDS_BEACON(dev) 0
+
+#define IS_UNDER_11N_AES_MODE(_rtllib) \
+ ((_rtllib->pHTInfo->bCurrentHTSupport == true) && \
+ (_rtllib->pairwise_key_type == KEY_TYPE_CCMP))
+
+#define HAL_MEMORY_MAPPED_IO_RANGE_8190PCI 0x1000
+#define HAL_HW_PCI_REVISION_ID_8190PCI 0x00
+#define HAL_MEMORY_MAPPED_IO_RANGE_8192PCIE 0x4000
+#define HAL_HW_PCI_REVISION_ID_8192PCIE 0x01
+#define HAL_MEMORY_MAPPED_IO_RANGE_8192SE 0x4000
+#define HAL_HW_PCI_REVISION_ID_8192SE 0x10
+#define HAL_HW_PCI_REVISION_ID_8192CE 0x1
+#define HAL_MEMORY_MAPPED_IO_RANGE_8192CE 0x4000
+#define HAL_HW_PCI_REVISION_ID_8192DE 0x0
+#define HAL_MEMORY_MAPPED_IO_RANGE_8192DE 0x4000
+
+#define HAL_HW_PCI_8180_DEVICE_ID 0x8180
+#define HAL_HW_PCI_8185_DEVICE_ID 0x8185
+#define HAL_HW_PCI_8188_DEVICE_ID 0x8188
+#define HAL_HW_PCI_8198_DEVICE_ID 0x8198
+#define HAL_HW_PCI_8190_DEVICE_ID 0x8190
+#define HAL_HW_PCI_8192_DEVICE_ID 0x8192
+#define HAL_HW_PCI_8192SE_DEVICE_ID 0x8192
+#define HAL_HW_PCI_8174_DEVICE_ID 0x8174
+#define HAL_HW_PCI_8173_DEVICE_ID 0x8173
+#define HAL_HW_PCI_8172_DEVICE_ID 0x8172
+#define HAL_HW_PCI_8171_DEVICE_ID 0x8171
+#define HAL_HW_PCI_0045_DEVICE_ID 0x0045
+#define HAL_HW_PCI_0046_DEVICE_ID 0x0046
+#define HAL_HW_PCI_0044_DEVICE_ID 0x0044
+#define HAL_HW_PCI_0047_DEVICE_ID 0x0047
+#define HAL_HW_PCI_700F_DEVICE_ID 0x700F
+#define HAL_HW_PCI_701F_DEVICE_ID 0x701F
+#define HAL_HW_PCI_DLINK_DEVICE_ID 0x3304
+#define HAL_HW_PCI_8192CET_DEVICE_ID 0x8191
+#define HAL_HW_PCI_8192CE_DEVICE_ID 0x8178
+#define HAL_HW_PCI_8191CE_DEVICE_ID 0x8177
+#define HAL_HW_PCI_8188CE_DEVICE_ID 0x8176
+#define HAL_HW_PCI_8192CU_DEVICE_ID 0x8191
+#define HAL_HW_PCI_8192DE_DEVICE_ID 0x092D
+#define HAL_HW_PCI_8192DU_DEVICE_ID 0x092D
+
+#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
+
+#define RTLLIB_WATCH_DOG_TIME 2000
+
+#define MAX_DEV_ADDR_SIZE 8 /*support till 64 bit bus width OS*/
+#define MAX_FIRMWARE_INFORMATION_SIZE 32
+#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
+#define ENCRYPTION_MAX_OVERHEAD 128
+#define MAX_FRAGMENT_COUNT 8
+#define MAX_TRANSMIT_BUFFER_SIZE \
+ (1600 + (MAX_802_11_HEADER_LENGTH + ENCRYPTION_MAX_OVERHEAD) * \
+ MAX_FRAGMENT_COUNT)
+
+#define scrclng 4
+
+#define DEFAULT_FRAG_THRESHOLD 2342U
+#define MIN_FRAG_THRESHOLD 256U
+#define DEFAULT_BEACONINTERVAL 0x64U
+
+#define DEFAULT_SSID ""
+#define DEFAULT_RETRY_RTS 7
+#define DEFAULT_RETRY_DATA 7
+#define PRISM_HDR_SIZE 64
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
+
+#define TxBBGainTableLength 37
+#define CCKTxBBGainTableLength 23
+
+#define CHANNEL_PLAN_LEN 10
+#define sCrcLng 4
+
+#define NIC_SEND_HANG_THRESHOLD_NORMAL 4
+#define NIC_SEND_HANG_THRESHOLD_POWERSAVE 8
+
+#define MAX_TX_QUEUE 9
+
+#define MAX_RX_QUEUE 1
+
+#define MAX_RX_COUNT 64
+#define MAX_TX_QUEUE_COUNT 9
+
+enum RTL819x_PHY_PARAM {
+ RTL819X_PHY_MACPHY_REG = 0,
+ RTL819X_PHY_MACPHY_REG_PG = 1,
+ RTL8188C_PHY_MACREG = 2,
+ RTL8192C_PHY_MACREG = 3,
+ RTL819X_PHY_REG = 4,
+ RTL819X_PHY_REG_1T2R = 5,
+ RTL819X_PHY_REG_to1T1R = 6,
+ RTL819X_PHY_REG_to1T2R = 7,
+ RTL819X_PHY_REG_to2T2R = 8,
+ RTL819X_PHY_REG_PG = 9,
+ RTL819X_AGC_TAB = 10,
+ RTL819X_PHY_RADIO_A = 11,
+ RTL819X_PHY_RADIO_A_1T = 12,
+ RTL819X_PHY_RADIO_A_2T = 13,
+ RTL819X_PHY_RADIO_B = 14,
+ RTL819X_PHY_RADIO_B_GM = 15,
+ RTL819X_PHY_RADIO_C = 16,
+ RTL819X_PHY_RADIO_D = 17,
+ RTL819X_EEPROM_MAP = 18,
+ RTL819X_EFUSE_MAP = 19,
+};
+
+enum RTL_DEBUG {
+ COMP_TRACE = BIT0,
+ COMP_DBG = BIT1,
+ COMP_INIT = BIT2,
+ COMP_RECV = BIT3,
+ COMP_SEND = BIT4,
+ COMP_CMD = BIT5,
+ COMP_POWER = BIT6,
+ COMP_EPROM = BIT7,
+ COMP_SWBW = BIT8,
+ COMP_SEC = BIT9,
+ COMP_LPS = BIT10,
+ COMP_QOS = BIT11,
+ COMP_RATE = BIT12,
+ COMP_RXDESC = BIT13,
+ COMP_PHY = BIT14,
+ COMP_DIG = BIT15,
+ COMP_TXAGC = BIT16,
+ COMP_HALDM = BIT17,
+ COMP_POWER_TRACKING = BIT18,
+ COMP_CH = BIT19,
+ COMP_RF = BIT20,
+ COMP_FIRMWARE = BIT21,
+ COMP_HT = BIT22,
+ COMP_RESET = BIT23,
+ COMP_CMDPKT = BIT24,
+ COMP_SCAN = BIT25,
+ COMP_PS = BIT26,
+ COMP_DOWN = BIT27,
+ COMP_INTR = BIT28,
+ COMP_LED = BIT29,
+ COMP_MLME = BIT30,
+ COMP_ERR = BIT31
+};
+
+enum nic_t {
+ NIC_UNKNOWN = 0,
+ NIC_8192E = 1,
+ NIC_8190P = 2,
+ NIC_8192SE = 4,
+ NIC_8192CE = 5,
+ NIC_8192CU = 6,
+ NIC_8192DE = 7,
+ NIC_8192DU = 8,
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+enum dcmg_txcmd_op {
+ TXCMD_TXRA_HISTORY_CTRL = 0xFF900000,
+ TXCMD_RESET_TX_PKT_BUFF = 0xFF900001,
+ TXCMD_RESET_RX_PKT_BUFF = 0xFF900002,
+ TXCMD_SET_TX_DURATION = 0xFF900003,
+ TXCMD_SET_RX_RSSI = 0xFF900004,
+ TXCMD_SET_TX_PWR_TRACKING = 0xFF900005,
+ TXCMD_XXXX_CTRL,
+};
+
+enum rt_rf_type_819xu {
+ RF_TYPE_MIN = 0,
+ RF_8225,
+ RF_8256,
+ RF_8258,
+ RF_6052 = 4,
+ RF_PSEUDO_11N = 5,
+};
+
+enum rf_step {
+ RF_STEP_INIT = 0,
+ RF_STEP_NORMAL,
+ RF_STEP_MAX
+};
+
+enum rt_status {
+ RT_STATUS_SUCCESS,
+ RT_STATUS_FAILURE,
+ RT_STATUS_PENDING,
+ RT_STATUS_RESOURCE
+};
+
+enum rt_customer_id {
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9,
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_819x_ALPHA = 15,
+ RT_CID_819x_Sitecom = 16,
+ RT_CID_CCX = 17,
+ RT_CID_819x_Lenovo = 18,
+ RT_CID_819x_QMI = 19,
+ RT_CID_819x_Edimax_Belkin = 20,
+ RT_CID_819x_Sercomm_Belkin = 21,
+ RT_CID_819x_CAMEO1 = 22,
+ RT_CID_819x_MSI = 23,
+ RT_CID_819x_Acer = 24,
+ RT_CID_819x_HP = 27,
+ RT_CID_819x_CLEVO = 28,
+ RT_CID_819x_Arcadyan_Belkin = 29,
+ RT_CID_819x_SAMSUNG = 30,
+ RT_CID_819x_WNC_COREGA = 31,
+};
+
+enum reset_type {
+ RESET_TYPE_NORESET = 0x00,
+ RESET_TYPE_NORMAL = 0x01,
+ RESET_TYPE_SILENT = 0x02
+};
+
+enum ic_inferiority_8192s {
+ IC_INFERIORITY_A = 0,
+ IC_INFERIORITY_B = 1,
+};
+
+enum pci_bridge_vendor {
+ PCI_BRIDGE_VENDOR_INTEL = 0x0,
+ PCI_BRIDGE_VENDOR_ATI,
+ PCI_BRIDGE_VENDOR_AMD,
+ PCI_BRIDGE_VENDOR_SIS ,
+ PCI_BRIDGE_VENDOR_UNKNOWN,
+ PCI_BRIDGE_VENDOR_MAX ,
+};
+
+struct buffer {
+ struct buffer *next;
+ u32 *buf;
+ dma_addr_t dma;
+
+};
+
+struct rtl_reg_debug {
+ unsigned int cmd;
+ struct {
+ unsigned char type;
+ unsigned char addr;
+ unsigned char page;
+ unsigned char length;
+ } head;
+ unsigned char buf[0xff];
+};
+
+struct rt_tx_rahis {
+ u32 cck[4];
+ u32 ofdm[8];
+ u32 ht_mcs[4][16];
+};
+
+struct rt_smooth_data_4rf {
+ char elements[4][100];
+ u32 index;
+ u32 TotalNum;
+ u32 TotalVal[4];
+};
+
+struct rt_stats {
+ unsigned long txrdu;
+ unsigned long rxrdu;
+ unsigned long rxok;
+ unsigned long rxframgment;
+ unsigned long rxcmdpkt[8];
+ unsigned long rxurberr;
+ unsigned long rxstaterr;
+ unsigned long rxdatacrcerr;
+ unsigned long rxmgmtcrcerr;
+ unsigned long rxcrcerrmin;
+ unsigned long rxcrcerrmid;
+ unsigned long rxcrcerrmax;
+ unsigned long received_rate_histogram[4][32];
+ unsigned long received_preamble_GI[2][32];
+ unsigned long rx_AMPDUsize_histogram[5];
+ unsigned long rx_AMPDUnum_histogram[5];
+ unsigned long numpacket_matchbssid;
+ unsigned long numpacket_toself;
+ unsigned long num_process_phyinfo;
+ unsigned long numqry_phystatus;
+ unsigned long numqry_phystatusCCK;
+ unsigned long numqry_phystatusHT;
+ unsigned long received_bwtype[5];
+ unsigned long txnperr;
+ unsigned long txnpdrop;
+ unsigned long txresumed;
+ unsigned long rxoverflow;
+ unsigned long rxint;
+ unsigned long txnpokint;
+ unsigned long ints;
+ unsigned long shints;
+ unsigned long txoverflow;
+ unsigned long txlpokint;
+ unsigned long txlpdrop;
+ unsigned long txlperr;
+ unsigned long txbeokint;
+ unsigned long txbedrop;
+ unsigned long txbeerr;
+ unsigned long txbkokint;
+ unsigned long txbkdrop;
+ unsigned long txbkerr;
+ unsigned long txviokint;
+ unsigned long txvidrop;
+ unsigned long txvierr;
+ unsigned long txvookint;
+ unsigned long txvodrop;
+ unsigned long txvoerr;
+ unsigned long txbeaconokint;
+ unsigned long txbeacondrop;
+ unsigned long txbeaconerr;
+ unsigned long txmanageokint;
+ unsigned long txmanagedrop;
+ unsigned long txmanageerr;
+ unsigned long txcmdpktokint;
+ unsigned long txdatapkt;
+ unsigned long txfeedback;
+ unsigned long txfeedbackok;
+ unsigned long txoktotal;
+ unsigned long txokbytestotal;
+ unsigned long txokinperiod;
+ unsigned long txmulticast;
+ unsigned long txbytesmulticast;
+ unsigned long txbroadcast;
+ unsigned long txbytesbroadcast;
+ unsigned long txunicast;
+ unsigned long txbytesunicast;
+ unsigned long rxbytesunicast;
+ unsigned long txfeedbackfail;
+ unsigned long txerrtotal;
+ unsigned long txerrbytestotal;
+ unsigned long txerrmulticast;
+ unsigned long txerrbroadcast;
+ unsigned long txerrunicast;
+ unsigned long txretrycount;
+ unsigned long txfeedbackretry;
+ u8 last_packet_rate;
+ unsigned long slide_signal_strength[100];
+ unsigned long slide_evm[100];
+ unsigned long slide_rssi_total;
+ unsigned long slide_evm_total;
+ long signal_strength;
+ long signal_quality;
+ long last_signal_strength_inpercent;
+ long recv_signal_power;
+ u8 rx_rssi_percentage[4];
+ u8 rx_evm_percentage[2];
+ long rxSNRdB[4];
+ struct rt_tx_rahis txrate;
+ u32 Slide_Beacon_pwdb[100];
+ u32 Slide_Beacon_Total;
+ struct rt_smooth_data_4rf cck_adc_pwdb;
+ u32 CurrentShowTxate;
+};
+
+struct channel_access_setting {
+ u16 SIFS_Timer;
+ u16 DIFS_Timer;
+ u16 SlotTimeTimer;
+ u16 EIFS_Timer;
+ u16 CWminIndex;
+ u16 CWmaxIndex;
+};
+
+enum two_port_status {
+ TWO_PORT_STATUS__DEFAULT_ONLY,
+ TWO_PORT_STATUS__EXTENSION_ONLY,
+ TWO_PORT_STATUS__EXTENSION_FOLLOW_DEFAULT,
+ TWO_PORT_STATUS__DEFAULT_G_EXTENSION_N20,
+ TWO_PORT_STATUS__ADHOC,
+ TWO_PORT_STATUS__WITHOUT_ANY_ASSOCIATE
+};
+
+struct txbbgain_struct {
+ long txbb_iq_amplifygain;
+ u32 txbbgain_value;
+};
+
+struct ccktxbbgain {
+ u8 ccktxbb_valuearray[8];
+};
+
+struct init_gain {
+ u8 xaagccore1;
+ u8 xbagccore1;
+ u8 xcagccore1;
+ u8 xdagccore1;
+ u8 cca;
+
+};
+
+struct tx_ring {
+ u32 *desc;
+ u8 nStuckCount;
+ struct tx_ring *next;
+} __packed;
+
+struct rtl8192_tx_ring {
+ struct tx_desc *desc;
+ dma_addr_t dma;
+ unsigned int idx;
+ unsigned int entries;
+ struct sk_buff_head queue;
+};
+
+
+
+struct rtl819x_ops {
+ enum nic_t nic_type;
+ void (*get_eeprom_size)(struct net_device *dev);
+ void (*init_adapter_variable)(struct net_device *dev);
+ void (*init_before_adapter_start)(struct net_device *dev);
+ bool (*initialize_adapter)(struct net_device *dev);
+ void (*link_change)(struct net_device *dev);
+ void (*tx_fill_descriptor)(struct net_device *dev,
+ struct tx_desc *tx_desc,
+ struct cb_desc *cb_desc,
+ struct sk_buff *skb);
+ void (*tx_fill_cmd_descriptor)(struct net_device *dev,
+ struct tx_desc_cmd *entry,
+ struct cb_desc *cb_desc,
+ struct sk_buff *skb);
+ bool (*rx_query_status_descriptor)(struct net_device *dev,
+ struct rtllib_rx_stats *stats,
+ struct rx_desc *pdesc,
+ struct sk_buff *skb);
+ bool (*rx_command_packet_handler)(struct net_device *dev,
+ struct sk_buff *skb,
+ struct rx_desc *pdesc);
+ void (*stop_adapter)(struct net_device *dev, bool reset);
+ void (*update_ratr_table)(struct net_device *dev);
+ void (*irq_enable)(struct net_device *dev);
+ void (*irq_disable)(struct net_device *dev);
+ void (*irq_clear)(struct net_device *dev);
+ void (*rx_enable)(struct net_device *dev);
+ void (*tx_enable)(struct net_device *dev);
+ void (*interrupt_recognized)(struct net_device *dev,
+ u32 *p_inta, u32 *p_intb);
+ bool (*TxCheckStuckHandler)(struct net_device *dev);
+ bool (*RxCheckStuckHandler)(struct net_device *dev);
+};
+
+struct r8192_priv {
+ struct pci_dev *pdev;
+ struct pci_dev *bridge_pdev;
+
+ bool bfirst_init;
+ bool bfirst_after_down;
+ bool initialized_at_probe;
+ bool being_init_adapter;
+ bool bDriverIsGoingToUnload;
+
+ int irq;
+ short irq_enabled;
+
+ short up;
+ short up_first_time;
+ struct delayed_work update_beacon_wq;
+ struct delayed_work watch_dog_wq;
+ struct delayed_work txpower_tracking_wq;
+ struct delayed_work rfpath_check_wq;
+ struct delayed_work gpio_change_rf_wq;
+ struct delayed_work initialgain_operate_wq;
+ struct delayed_work check_hw_scan_wq;
+ struct delayed_work hw_scan_simu_wq;
+ struct delayed_work start_hw_scan_wq;
+
+ struct workqueue_struct *priv_wq;
+
+ struct channel_access_setting ChannelAccessSetting;
+
+ struct mp_adapter NdisAdapter;
+
+ struct rtl819x_ops *ops;
+ struct rtllib_device *rtllib;
+
+ struct work_struct reset_wq;
+
+ struct log_int_8190 InterruptLog;
+
+ enum rt_customer_id CustomerID;
+
+
+ enum rt_rf_type_819xu rf_chip;
+ enum ic_inferiority_8192s IC_Class;
+ enum ht_channel_width CurrentChannelBW;
+ struct bb_reg_definition PHYRegDef[4];
+ struct rate_adaptive rate_adaptive;
+
+ struct ccktxbbgain cck_txbbgain_table[CCKTxBBGainTableLength];
+ struct ccktxbbgain cck_txbbgain_ch14_table[CCKTxBBGainTableLength];
+
+ struct txbbgain_struct txbbgain_table[TxBBGainTableLength];
+
+ enum acm_method AcmMethod;
+
+ struct rt_firmware *pFirmware;
+ enum rtl819x_loopback LoopbackMode;
+ enum firmware_source firmware_source;
+
+ struct timer_list watch_dog_timer;
+ struct timer_list fsync_timer;
+ struct timer_list gpio_polling_timer;
+
+ spinlock_t fw_scan_lock;
+ spinlock_t irq_lock;
+ spinlock_t irq_th_lock;
+ spinlock_t tx_lock;
+ spinlock_t rf_ps_lock;
+ spinlock_t rw_lock;
+ spinlock_t rt_h2c_lock;
+ spinlock_t rf_lock;
+ spinlock_t ps_lock;
+
+ struct sk_buff_head rx_queue;
+ struct sk_buff_head skb_queue;
+
+ struct tasklet_struct irq_rx_tasklet;
+ struct tasklet_struct irq_tx_tasklet;
+ struct tasklet_struct irq_prepare_beacon_tasklet;
+
+ struct semaphore wx_sem;
+ struct semaphore rf_sem;
+ struct mutex mutex;
+
+ struct rt_stats stats;
+ struct iw_statistics wstats;
+ struct proc_dir_entry *dir_dev;
+
+ short (*rf_set_sens)(struct net_device *dev, short sens);
+ u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
+ void (*rf_close)(struct net_device *dev);
+ void (*rf_init)(struct net_device *dev);
+
+ struct rx_desc *rx_ring[MAX_RX_QUEUE];
+ struct sk_buff *rx_buf[MAX_RX_QUEUE][MAX_RX_COUNT];
+ dma_addr_t rx_ring_dma[MAX_RX_QUEUE];
+ unsigned int rx_idx[MAX_RX_QUEUE];
+ int rxringcount;
+ u16 rxbuffersize;
+
+ u64 LastRxDescTSF;
+
+ u16 EarlyRxThreshold;
+ u32 ReceiveConfig;
+ u8 AcmControl;
+ u8 RFProgType;
+ u8 retry_data;
+ u8 retry_rts;
+ u16 rts;
+
+ struct rtl8192_tx_ring tx_ring[MAX_TX_QUEUE_COUNT];
+ int txringcount;
+ int txbuffsize;
+ int txfwbuffersize;
+ atomic_t tx_pending[0x10];
+
+ u16 ShortRetryLimit;
+ u16 LongRetryLimit;
+ u32 TransmitConfig;
+ u8 RegCWinMin;
+ u8 keepAliveLevel;
+
+ bool sw_radio_on;
+ bool bHwRadioOff;
+ bool pwrdown;
+ bool blinked_ingpio;
+ u8 polling_timer_on;
+
+ /**********************************************************/
+
+ enum card_type {
+ PCI, MINIPCI,
+ CARDBUS, USB
+ } card_type;
+
+ struct work_struct qos_activate;
+
+ u8 bIbssCoordinator;
+
+ short promisc;
+ short crcmon;
+
+ int txbeaconcount;
+
+ short chan;
+ short sens;
+ short max_sens;
+ u32 rx_prevlen;
+
+ u8 ScanDelay;
+ bool ps_force;
+
+ u32 irq_mask[2];
+
+ u8 Rf_Mode;
+ enum nic_t card_8192;
+ u8 card_8192_version;
+
+ short enable_gpio0;
+
+ u8 rf_type;
+ u8 IC_Cut;
+ char nick[IW_ESSID_MAX_SIZE + 1];
+
+ u8 RegBcnCtrlVal;
+ bool bHwAntDiv;
+
+ bool bTKIPinNmodeFromReg;
+ bool bWEPinNmodeFromReg;
+
+ bool bLedOpenDrain;
+
+ u8 check_roaming_cnt;
+
+ bool bIgnoreSilentReset;
+ u32 SilentResetRxSoltNum;
+ u32 SilentResetRxSlotIndex;
+ u32 SilentResetRxStuckEvent[MAX_SILENT_RESET_RX_SLOT_NUM];
+
+ void *scan_cmd;
+ u8 hwscan_bw_40;
+
+ u16 nrxAMPDU_size;
+ u8 nrxAMPDU_aggr_num;
+
+ u32 last_rxdesc_tsf_high;
+ u32 last_rxdesc_tsf_low;
+
+ u16 basic_rate;
+ u8 short_preamble;
+ u8 dot11CurrentPreambleMode;
+ u8 slot_time;
+ u16 SifsTime;
+
+ u8 RegWirelessMode;
+
+ u8 firmware_version;
+ u16 FirmwareSubVersion;
+ u16 rf_pathmap;
+ bool AutoloadFailFlag;
+
+ u8 RegPciASPM;
+ u8 RegAMDPciASPM;
+ u8 RegHwSwRfOffD3;
+ u8 RegSupportPciASPM;
+ bool bSupportASPM;
+
+ u32 RfRegChnlVal[2];
+
+ u8 ShowRateMode;
+ u8 RATRTableBitmap;
+
+ u8 EfuseMap[2][HWSET_MAX_SIZE_92S];
+ u16 EfuseUsedBytes;
+ u8 EfuseUsedPercentage;
+
+ short epromtype;
+ u16 eeprom_vid;
+ u16 eeprom_did;
+ u16 eeprom_svid;
+ u16 eeprom_smid;
+ u8 eeprom_CustomerID;
+ u16 eeprom_ChannelPlan;
+ u8 eeprom_version;
+
+ u8 EEPROMRegulatory;
+ u8 EEPROMPwrGroup[2][3];
+ u8 EEPROMOptional;
+
+ u8 EEPROMTxPowerLevelCCK[14];
+ u8 EEPROMTxPowerLevelOFDM24G[14];
+ u8 EEPROMTxPowerLevelOFDM5G[24];
+ u8 EEPROMRfACCKChnl1TxPwLevel[3];
+ u8 EEPROMRfAOfdmChnlTxPwLevel[3];
+ u8 EEPROMRfCCCKChnl1TxPwLevel[3];
+ u8 EEPROMRfCOfdmChnlTxPwLevel[3];
+ u16 EEPROMTxPowerDiff;
+ u16 EEPROMAntPwDiff;
+ u8 EEPROMThermalMeter;
+ u8 EEPROMPwDiff;
+ u8 EEPROMCrystalCap;
+
+ u8 EEPROMBluetoothCoexist;
+ u8 EEPROMBluetoothType;
+ u8 EEPROMBluetoothAntNum;
+ u8 EEPROMBluetoothAntIsolation;
+ u8 EEPROMBluetoothRadioShared;
+
+
+ u8 EEPROMSupportWoWLAN;
+ u8 EEPROMBoardType;
+ u8 EEPROM_Def_Ver;
+ u8 EEPROMHT2T_TxPwr[6];
+ u8 EEPROMTSSI_A;
+ u8 EEPROMTSSI_B;
+ u8 EEPROMTxPowerLevelCCK_V1[3];
+ u8 EEPROMLegacyHTTxPowerDiff;
+
+ u8 BluetoothCoexist;
+
+ u8 CrystalCap;
+ u8 ThermalMeter[2];
+
+ u16 FwCmdIOMap;
+ u32 FwCmdIOParam;
+
+ u8 SwChnlInProgress;
+ u8 SwChnlStage;
+ u8 SwChnlStep;
+ u8 SetBWModeInProgress;
+
+ u8 nCur40MhzPrimeSC;
+
+ u32 RfReg0Value[4];
+ u8 NumTotalRFPath;
+ bool brfpath_rxenable[4];
+
+ bool bTXPowerDataReadFromEEPORM;
+
+ u16 RegChannelPlan;
+ u16 ChannelPlan;
+ bool bChnlPlanFromHW;
+
+ bool RegRfOff;
+ bool isRFOff;
+ bool bInPowerSaveMode;
+ u8 bHwRfOffAction;
+
+ bool aspm_clkreq_enable;
+ u32 pci_bridge_vendor;
+ u8 RegHostPciASPMSetting;
+ u8 RegDevicePciASPMSetting;
+
+ bool RFChangeInProgress;
+ bool SetRFPowerStateInProgress;
+ bool bdisable_nic;
+
+ u8 pwrGroupCnt;
+
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ bool bRfPiEnable;
+
+ u32 APKoutput[2][2];
+ bool bAPKdone;
+
+ long RegE94;
+ long RegE9C;
+ long RegEB4;
+ long RegEBC;
+
+ u32 RegC04;
+ u32 Reg874;
+ u32 RegC08;
+ u32 ADDA_backup[16];
+ u32 IQK_MAC_backup[3];
+
+ bool SetFwCmdInProgress;
+ u8 CurrentFwCmdIO;
+
+ u8 rssi_level;
+
+ bool bInformFWDriverControlDM;
+ u8 PwrGroupHT20[2][14];
+ u8 PwrGroupHT40[2][14];
+
+ u8 ThermalValue;
+ long EntryMinUndecoratedSmoothedPWDB;
+ long EntryMaxUndecoratedSmoothedPWDB;
+ u8 DynamicTxHighPowerLvl;
+ u8 LastDTPLvl;
+ u32 CurrentRATR0;
+ struct false_alarm_stats FalseAlmCnt;
+
+ u8 DMFlag;
+ u8 DM_Type;
+
+ u8 CckPwEnl;
+ u16 TSSI_13dBm;
+ u32 Pwr_Track;
+ u8 CCKPresentAttentuation_20Mdefault;
+ u8 CCKPresentAttentuation_40Mdefault;
+ char CCKPresentAttentuation_difference;
+ char CCKPresentAttentuation;
+ u8 bCckHighPower;
+ long undecorated_smoothed_pwdb;
+ long undecorated_smoothed_cck_adc_pwdb[4];
+
+ u32 MCSTxPowerLevelOriginalOffset[6];
+ u32 CCKTxPowerLevelOriginalOffset;
+ u8 TxPowerLevelCCK[14];
+ u8 TxPowerLevelCCK_A[14];
+ u8 TxPowerLevelCCK_C[14];
+ u8 TxPowerLevelOFDM24G[14];
+ u8 TxPowerLevelOFDM5G[14];
+ u8 TxPowerLevelOFDM24G_A[14];
+ u8 TxPowerLevelOFDM24G_C[14];
+ u8 LegacyHTTxPowerDiff;
+ u8 TxPowerDiff;
+ s8 RF_C_TxPwDiff;
+ s8 RF_B_TxPwDiff;
+ u8 RfTxPwrLevelCck[2][14];
+ u8 RfTxPwrLevelOfdm1T[2][14];
+ u8 RfTxPwrLevelOfdm2T[2][14];
+ u8 AntennaTxPwDiff[3];
+ u8 TxPwrHt20Diff[2][14];
+ u8 TxPwrLegacyHtDiff[2][14];
+ u8 TxPwrSafetyFlag;
+ u8 HT2T_TxPwr_A[14];
+ u8 HT2T_TxPwr_B[14];
+ u8 CurrentCckTxPwrIdx;
+ u8 CurrentOfdm24GTxPwrIdx;
+
+ bool bdynamic_txpower;
+ bool bDynamicTxHighPower;
+ bool bDynamicTxLowPower;
+ bool bLastDTPFlag_High;
+ bool bLastDTPFlag_Low;
+
+ bool bstore_last_dtpflag;
+ bool bstart_txctrl_bydtp;
+
+ u8 rfa_txpowertrackingindex;
+ u8 rfa_txpowertrackingindex_real;
+ u8 rfa_txpowertracking_default;
+ u8 rfc_txpowertrackingindex;
+ u8 rfc_txpowertrackingindex_real;
+ u8 rfc_txpowertracking_default;
+ bool btxpower_tracking;
+ bool bcck_in_ch14;
+
+ u8 TxPowerTrackControl;
+ u8 txpower_count;
+ bool btxpower_trackingInit;
+
+ u8 OFDM_index[2];
+ u8 CCK_index;
+
+ u8 Record_CCK_20Mindex;
+ u8 Record_CCK_40Mindex;
+
+ struct init_gain initgain_backup;
+ u8 DefaultInitialGain[4];
+ bool bis_any_nonbepkts;
+ bool bcurrent_turbo_EDCA;
+ bool bis_cur_rdlstate;
+
+ bool bCCKinCH14;
+
+ u8 MidHighPwrTHR_L1;
+ u8 MidHighPwrTHR_L2;
+
+ bool bfsync_processing;
+ u32 rate_record;
+ u32 rateCountDiffRecord;
+ u32 ContiuneDiffCount;
+ bool bswitch_fsync;
+ u8 framesync;
+ u32 framesyncC34;
+ u8 framesyncMonitor;
+
+ bool bDMInitialGainEnable;
+ bool MutualAuthenticationFail;
+
+ bool bDisableFrameBursting;
+
+ u32 reset_count;
+ bool bpbc_pressed;
+
+ u32 txpower_checkcnt;
+ u32 txpower_tracking_callback_cnt;
+ u8 thermal_read_val[40];
+ u8 thermal_readback_index;
+ u32 ccktxpower_adjustcnt_not_ch14;
+ u32 ccktxpower_adjustcnt_ch14;
+
+ enum reset_type ResetProgress;
+ bool bForcedSilentReset;
+ bool bDisableNormalResetCheck;
+ u16 TxCounter;
+ u16 RxCounter;
+ int IrpPendingCount;
+ bool bResetInProgress;
+ bool force_reset;
+ bool force_lps;
+ u8 InitialGainOperateType;
+
+ bool chan_forced;
+ bool bSingleCarrier;
+ bool RegBoard;
+ bool bCckContTx;
+ bool bOfdmContTx;
+ bool bStartContTx;
+ u8 RegPaModel;
+ u8 btMpCckTxPower;
+ u8 btMpOfdmTxPower;
+
+ u32 MptActType;
+ u32 MptIoOffset;
+ u32 MptIoValue;
+ u32 MptRfPath;
+
+ u32 MptBandWidth;
+ u32 MptRateIndex;
+ u8 MptChannelToSw;
+ u32 MptRCR;
+
+ u8 PwrDomainProtect;
+ u8 H2CTxCmdSeq;
+
+
+};
+
+extern const struct ethtool_ops rtl819x_ethtool_ops;
+
+void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb);
+short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
+
+u8 read_nic_io_byte(struct net_device *dev, int x);
+u32 read_nic_io_dword(struct net_device *dev, int x);
+u16 read_nic_io_word(struct net_device *dev, int x) ;
+void write_nic_io_byte(struct net_device *dev, int x, u8 y);
+void write_nic_io_word(struct net_device *dev, int x, u16 y);
+void write_nic_io_dword(struct net_device *dev, int x, u32 y);
+
+u8 read_nic_byte(struct net_device *dev, int x);
+u32 read_nic_dword(struct net_device *dev, int x);
+u16 read_nic_word(struct net_device *dev, int x) ;
+void write_nic_byte(struct net_device *dev, int x, u8 y);
+void write_nic_word(struct net_device *dev, int x, u16 y);
+void write_nic_dword(struct net_device *dev, int x, u32 y);
+
+void force_pci_posting(struct net_device *dev);
+
+void rtl8192_rx_enable(struct net_device *);
+void rtl8192_tx_enable(struct net_device *);
+
+int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+ int rate);
+void rtl8192_data_hard_stop(struct net_device *dev);
+void rtl8192_data_hard_resume(struct net_device *dev);
+void rtl8192_restart(void *data);
+void rtl819x_watchdog_wqcallback(void *data);
+void rtl8192_hw_sleep_wq(void *data);
+void watch_dog_timer_callback(unsigned long data);
+void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
+int rtl8192_down(struct net_device *dev, bool shutdownrf);
+int rtl8192_up(struct net_device *dev);
+void rtl8192_commit(struct net_device *dev);
+void rtl8192_set_chan(struct net_device *dev, short ch);
+
+void check_rfctrl_gpio_timer(unsigned long data);
+
+void rtl8192_hw_wakeup_wq(void *data);
+irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs);
+
+short rtl8192_pci_initdescring(struct net_device *dev);
+
+void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
+
+int _rtl8192_up(struct net_device *dev, bool is_silent_reset);
+
+short rtl8192_is_tx_queue_empty(struct net_device *dev);
+void rtl8192_irq_disable(struct net_device *dev);
+
+void rtl8192_tx_timeout(struct net_device *dev);
+void rtl8192_pci_resetdescring(struct net_device *dev);
+void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode);
+void rtl8192_irq_enable(struct net_device *dev);
+void rtl8192_config_rate(struct net_device *dev, u16 *rate_config);
+void rtl8192_update_cap(struct net_device *dev, u16 cap);
+void rtl8192_irq_disable(struct net_device *dev);
+
+void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
+ struct rtllib_rx_stats *stats);
+long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index);
+void rtl819x_update_rxsignalstatistics8190pci(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pprevious_stats);
+u8 rtl819x_evm_dbtopercentage(char value);
+void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pprevious_stats);
+u8 rtl819x_query_rxpwrpercentage(char antpower);
+void rtl8192_record_rxdesc_forlateruse(struct rtllib_rx_stats *psrc_stats,
+ struct rtllib_rx_stats *ptarget_stats);
+bool NicIFEnableNIC(struct net_device *dev);
+bool NicIFDisableNIC(struct net_device *dev);
+
+bool MgntActSet_RF_State(struct net_device *dev,
+ enum rt_rf_power_state StateToSet,
+ RT_RF_CHANGE_SOURCE ChangeSource,
+ bool ProtectOrNot);
+void ActUpdateChannelAccessSetting(struct net_device *dev,
+ enum wireless_mode WirelessMode,
+ struct channel_access_setting *ChnlAccessSetting);
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_crypto.h b/drivers/staging/rtl8192e/rtl_crypto.h
new file mode 100644
index 00000000000..ee57c0f4fa6
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_crypto.h
@@ -0,0 +1,382 @@
+/*
+ * Scatterlist Cryptographic API.
+ *
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Mé°ˆler.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _LINUX_CRYPTO_H
+#define _LINUX_CRYPTO_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/errno.h>
+
+#define crypto_register_alg crypto_register_alg_rsl
+#define crypto_unregister_alg crypto_unregister_alg_rsl
+#define crypto_alloc_tfm crypto_alloc_tfm_rsl
+#define crypto_free_tfm crypto_free_tfm_rsl
+#define crypto_alg_available crypto_alg_available_rsl
+
+/*
+ * Algorithm masks and types.
+ */
+#define CRYPTO_ALG_TYPE_MASK 0x000000ff
+#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
+#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
+#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004
+
+/*
+ * Transform masks and values (for crt_flags).
+ */
+#define CRYPTO_TFM_MODE_MASK 0x000000ff
+#define CRYPTO_TFM_REQ_MASK 0x000fff00
+#define CRYPTO_TFM_RES_MASK 0xfff00000
+
+#define CRYPTO_TFM_MODE_ECB 0x00000001
+#define CRYPTO_TFM_MODE_CBC 0x00000002
+#define CRYPTO_TFM_MODE_CFB 0x00000004
+#define CRYPTO_TFM_MODE_CTR 0x00000008
+
+#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
+#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
+#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
+#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
+#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
+#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
+
+/*
+ * Miscellaneous stuff.
+ */
+#define CRYPTO_UNSPEC 0
+#define CRYPTO_MAX_ALG_NAME 64
+
+struct scatterlist;
+
+/*
+ * Algorithms: modular crypto algorithm implementations, managed
+ * via crypto_register_alg() and crypto_unregister_alg().
+ */
+struct cipher_alg {
+ unsigned int cia_min_keysize;
+ unsigned int cia_max_keysize;
+ int (*cia_setkey)(void *ctx, const u8 *key,
+ unsigned int keylen, u32 *flags);
+ void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
+ void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
+};
+
+struct digest_alg {
+ unsigned int dia_digestsize;
+ void (*dia_init)(void *ctx);
+ void (*dia_update)(void *ctx, const u8 *data, unsigned int len);
+ void (*dia_final)(void *ctx, u8 *out);
+ int (*dia_setkey)(void *ctx, const u8 *key,
+ unsigned int keylen, u32 *flags);
+};
+
+struct compress_alg {
+ int (*coa_init)(void *ctx);
+ void (*coa_exit)(void *ctx);
+ int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+ int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+};
+
+#define cra_cipher cra_u.cipher
+#define cra_digest cra_u.digest
+#define cra_compress cra_u.compress
+
+struct crypto_alg {
+ struct list_head cra_list;
+ u32 cra_flags;
+ unsigned int cra_blocksize;
+ unsigned int cra_ctxsize;
+ const char cra_name[CRYPTO_MAX_ALG_NAME];
+
+ union {
+ struct cipher_alg cipher;
+ struct digest_alg digest;
+ struct compress_alg compress;
+ } cra_u;
+
+ struct module *cra_module;
+};
+
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+int crypto_unregister_alg(struct crypto_alg *alg);
+
+/*
+ * Algorithm query interface.
+ */
+int crypto_alg_available(const char *name, u32 flags);
+
+/*
+ * Transforms: user-instantiated objects which encapsulate algorithms
+ * and core processing logic. Managed via crypto_alloc_tfm() and
+ * crypto_free_tfm(), as well as the various helpers below.
+ */
+struct crypto_tfm;
+
+struct cipher_tfm {
+ void *cit_iv;
+ unsigned int cit_ivsize;
+ u32 cit_mode;
+ int (*cit_setkey)(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen);
+ int (*cit_encrypt)(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
+ int (*cit_encrypt_iv)(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv);
+ int (*cit_decrypt)(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
+ int (*cit_decrypt_iv)(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv);
+ void (*cit_xor_block)(u8 *dst, const u8 *src);
+};
+
+struct digest_tfm {
+ void (*dit_init)(struct crypto_tfm *tfm);
+ void (*dit_update)(struct crypto_tfm *tfm,
+ struct scatterlist *sg, unsigned int nsg);
+ void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
+ void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
+ unsigned int nsg, u8 *out);
+ int (*dit_setkey)(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen);
+};
+
+struct compress_tfm {
+ int (*cot_compress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+ int (*cot_decompress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+};
+
+#define crt_cipher crt_u.cipher
+#define crt_digest crt_u.digest
+#define crt_compress crt_u.compress
+
+struct crypto_tfm {
+
+ u32 crt_flags;
+
+ union {
+ struct cipher_tfm cipher;
+ struct digest_tfm digest;
+ struct compress_tfm compress;
+ } crt_u;
+
+ struct crypto_alg *__crt_alg;
+};
+
+/*
+ * Transform user interface.
+ */
+
+/*
+ * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm.
+ * If that fails and the kernel supports dynamically loadable modules, it
+ * will then attempt to load a module of the same name or alias. A refcount
+ * is grabbed on the algorithm which is then associated with the new transform.
+ *
+ * crypto_free_tfm() frees up the transform and any associated resources,
+ * then drops the refcount on the associated algorithm.
+ */
+struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
+void crypto_free_tfm(struct crypto_tfm *tfm);
+
+/*
+ * Transform helpers which query the underlying algorithm.
+ */
+static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_name;
+}
+
+static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ if (alg->cra_module)
+ return alg->cra_module->name;
+ else
+ return NULL;
+}
+
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
+static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return tfm->__crt_alg->cra_cipher.cia_min_keysize;
+}
+
+static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return tfm->__crt_alg->cra_cipher.cia_max_keysize;
+}
+
+static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return tfm->crt_cipher.cit_ivsize;
+}
+
+static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_blocksize;
+}
+
+static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
+ return tfm->__crt_alg->cra_digest.dia_digestsize;
+}
+
+/*
+ * API wrappers.
+ */
+static inline void crypto_digest_init(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
+ tfm->crt_digest.dit_init(tfm);
+}
+
+static inline void crypto_digest_update(struct crypto_tfm *tfm,
+ struct scatterlist *sg,
+ unsigned int nsg)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
+ tfm->crt_digest.dit_update(tfm, sg, nsg);
+}
+
+static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
+ tfm->crt_digest.dit_final(tfm, out);
+}
+
+static inline void crypto_digest_digest(struct crypto_tfm *tfm,
+ struct scatterlist *sg,
+ unsigned int nsg, u8 *out)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
+ tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
+}
+
+static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
+ if (tfm->crt_digest.dit_setkey == NULL)
+ return -ENOSYS;
+ return tfm->crt_digest.dit_setkey(tfm, key, keylen);
+}
+
+static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
+}
+
+static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
+}
+
+static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
+ return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
+}
+
+static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
+}
+
+static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
+ return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
+}
+
+static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int len)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ memcpy(tfm->crt_cipher.cit_iv, src, len);
+}
+
+static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
+ u8 *dst, unsigned int len)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ memcpy(dst, tfm->crt_cipher.cit_iv, len);
+}
+
+static inline int crypto_comp_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
+ return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
+}
+
+static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
+ return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
+}
+
+#endif /* _LINUX_CRYPTO_H */
diff --git a/drivers/staging/rtl8192e/rtl_debug.c b/drivers/staging/rtl8192e/rtl_debug.c
new file mode 100644
index 00000000000..22bc2dd6e43
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_debug.c
@@ -0,0 +1,1108 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtl_debug.h"
+#include "rtl_core.h"
+#include "r8192E_phy.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
+#include "r8192E_cmdpkt.h"
+
+u32 rt_global_debug_component = \
+ COMP_ERR ;
+
+/*------------------Declare variable-----------------------*/
+u32 DBGP_Type[DBGP_TYPE_MAX];
+
+/*-----------------------------------------------------------------------------
+ * Function: DBGP_Flag_Init
+ *
+ * Overview: Refresh all debug print control flag content to zero.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 10/20/2006 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8192_dbgp_flag_init(struct net_device *dev)
+{
+ u8 i;
+
+ for (i = 0; i < DBGP_TYPE_MAX; i++)
+ DBGP_Type[i] = 0;
+
+
+} /* DBGP_Flag_Init */
+
+/* this is only for debugging */
+void print_buffer(u32 *buffer, int len)
+{
+ int i;
+ u8 *buf = (u8 *)buffer;
+
+ printk(KERN_INFO "ASCII BUFFER DUMP (len: %x):\n", len);
+
+ for (i = 0; i < len; i++)
+ printk(KERN_INFO "%c", buf[i]);
+
+ printk(KERN_INFO "\nBINARY BUFFER DUMP (len: %x):\n", len);
+
+ for (i = 0; i < len; i++)
+ printk(KERN_INFO "%x", buf[i]);
+
+ printk(KERN_INFO "\n");
+}
+
+/* this is only for debug */
+void dump_eprom(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < 0xff; i++)
+ RT_TRACE(COMP_INIT, "EEPROM addr %x : %x", i,
+ eprom_read(dev, i));
+}
+
+/* this is only for debug */
+void rtl8192_dump_reg(struct net_device *dev)
+{
+ int i;
+ int n;
+ int max = 0x5ff;
+
+ RT_TRACE(COMP_INIT, "Dumping NIC register map");
+
+ for (n = 0; n <= max; ) {
+ printk(KERN_INFO "\nD: %2x> ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ printk(KERN_INFO "%2x ", read_nic_byte(dev, n));
+ }
+ printk(KERN_INFO "\n");
+}
+
+/****************************************************************************
+ -----------------------------PROCFS STUFF-------------------------
+*****************************************************************************/
+/*This part is related to PROC, which will record some statistics. */
+static struct proc_dir_entry *rtl8192_proc;
+
+static int proc_get_stats_ap(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ struct rtllib_network *target;
+ int len = 0;
+
+ list_for_each_entry(target, &ieee->network_list, list) {
+
+ len += snprintf(page + len, count - len,
+ "%s ", target->ssid);
+
+ if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
+ len += snprintf(page + len, count - len,
+ "WPA\n");
+ else
+ len += snprintf(page + len, count - len,
+ "non_WPA\n");
+
+ }
+
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_registers_0(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x000;
+
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0>>8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
+ "0C 0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; n++, i++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x100;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0>>8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
+ "0C 0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x200;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0 >> 8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B 0C "
+ "0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x300;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0>>8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
+ "0C 0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_4(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x400;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0>>8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
+ "0C 0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_5(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x500;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0 >> 8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
+ "0C 0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_6(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x600;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0>>8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B "
+ "0C 0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_7(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x700;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n ",
+ (page0 >> 8));
+ len += snprintf(page + len, count - len,
+ "\nD: OF > 00 01 02 03 04 05 06 07 08 09 0A 0B 0C "
+ "0D 0E 0F");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len,
+ "\nD: %2x > ", n);
+ for (i = 0; i < 16 && n <= max; i++, n++)
+ len += snprintf(page + len, count - len,
+ "%2.2x ", read_nic_byte(dev,
+ (page0 | n)));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_8(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x800;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0 >> 8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+
+}
+static int proc_get_registers_9(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0x900;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0>>8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+static int proc_get_registers_a(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0xa00;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0>>8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+static int proc_get_registers_b(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0xb00;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0 >> 8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+static int proc_get_registers_c(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0xc00;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0>>8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+static int proc_get_registers_d(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0xd00;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0>>8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+static int proc_get_registers_e(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n, page0;
+
+ int max = 0xff;
+ page0 = 0xe00;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n####################page %x##################\n",
+ (page0>>8));
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_QueryBBReg(dev,
+ (page0 | n), bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_reg_rf_a(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n;
+
+ int max = 0xff;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### RF-A ##################\n ");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)RF90_PATH_A, n,
+ bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_reg_rf_b(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n;
+
+ int max = 0xff;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### RF-B ##################\n ");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)RF90_PATH_B, n,
+ bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_reg_rf_c(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n;
+
+ int max = 0xff;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### RF-C ##################\n");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)RF90_PATH_C, n,
+ bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_reg_rf_d(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+
+ int len = 0;
+ int i, n;
+
+ int max = 0xff;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### RF-D ##################\n ");
+ for (n = 0; n <= max;) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", n);
+ for (i = 0; i < 4 && n <= max; n += 4, i++)
+ len += snprintf(page + len, count - len,
+ "%8.8x ", rtl8192_phy_QueryRFReg(dev,
+ (enum rf90_radio_path)RF90_PATH_D, n,
+ bMaskDWord));
+ }
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_cam_register_1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ u32 target_command = 0;
+ u32 target_content = 0;
+ u8 entry_i = 0;
+ u32 ulStatus;
+ int len = 0;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### SECURITY CAM (0-10) ######"
+ "############\n ");
+ for (j = 0; j < 11; j++) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i+CAM_CONTENT_COUNT*j;
+ target_command = target_command | BIT31;
+
+ while ((i--) >= 0) {
+ ulStatus = read_nic_dword(dev, RWCAM);
+ if (ulStatus & BIT31)
+ continue;
+ else
+ break;
+ }
+ write_nic_dword(dev, RWCAM, target_command);
+ target_content = read_nic_dword(dev, RCAMO);
+ len += snprintf(page + len, count - len, "%8.8x ",
+ target_content);
+ }
+ }
+
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_cam_register_2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ u32 target_command = 0;
+ u32 target_content = 0;
+ u8 entry_i = 0;
+ u32 ulStatus;
+ int len = 0;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### SECURITY CAM (11-21) "
+ "##################\n ");
+ for (j = 11; j < 22; j++) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i + CAM_CONTENT_COUNT * j;
+ target_command = target_command | BIT31;
+
+ while ((i--) >= 0) {
+ ulStatus = read_nic_dword(dev, RWCAM);
+ if (ulStatus & BIT31)
+ continue;
+ else
+ break;
+ }
+ write_nic_dword(dev, RWCAM, target_command);
+ target_content = read_nic_dword(dev, RCAMO);
+ len += snprintf(page + len, count - len, "%8.8x ",
+ target_content);
+ }
+ }
+
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+
+static int proc_get_cam_register_3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ u32 target_command = 0;
+ u32 target_content = 0;
+ u8 entry_i = 0;
+ u32 ulStatus;
+ int len = 0;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ len += snprintf(page + len, count - len,
+ "\n#################### SECURITY CAM (22-31) ######"
+ "############\n ");
+ for (j = 22; j < TOTAL_CAM_ENTRY; j++) {
+ len += snprintf(page + len, count - len, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i + CAM_CONTENT_COUNT * j;
+ target_command = target_command | BIT31;
+
+ while ((i--) >= 0) {
+ ulStatus = read_nic_dword(dev, RWCAM);
+ if (ulStatus & BIT31)
+ continue;
+ else
+ break;
+ }
+ write_nic_dword(dev, RWCAM, target_command);
+ target_content = read_nic_dword(dev, RCAMO);
+ len += snprintf(page + len, count - len, "%8.8x ",
+ target_content);
+ }
+ }
+
+ len += snprintf(page + len, count - len, "\n");
+ *eof = 1;
+ return len;
+}
+static int proc_get_stats_tx(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len,
+ "TX VI priority ok int: %lu\n"
+ "TX VO priority ok int: %lu\n"
+ "TX BE priority ok int: %lu\n"
+ "TX BK priority ok int: %lu\n"
+ "TX MANAGE priority ok int: %lu\n"
+ "TX BEACON priority ok int: %lu\n"
+ "TX BEACON priority error int: %lu\n"
+ "TX CMDPKT priority ok int: %lu\n"
+ "TX queue stopped?: %d\n"
+ "TX fifo overflow: %lu\n"
+ "TX total data packets %lu\n"
+ "TX total data bytes :%lu\n",
+ priv->stats.txviokint,
+ priv->stats.txvookint,
+ priv->stats.txbeokint,
+ priv->stats.txbkokint,
+ priv->stats.txmanageokint,
+ priv->stats.txbeaconokint,
+ priv->stats.txbeaconerr,
+ priv->stats.txcmdpktokint,
+ netif_queue_stopped(dev),
+ priv->stats.txoverflow,
+ priv->rtllib->stats.tx_packets,
+ priv->rtllib->stats.tx_bytes
+
+
+ );
+
+ *eof = 1;
+ return len;
+}
+
+
+
+static int proc_get_stats_rx(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len,
+ "RX packets: %lu\n"
+ "RX data crc err: %lu\n"
+ "RX mgmt crc err: %lu\n"
+ "RX desc err: %lu\n"
+ "RX rx overflow error: %lu\n",
+ priv->stats.rxint,
+ priv->stats.rxdatacrcerr,
+ priv->stats.rxmgmtcrcerr,
+ priv->stats.rxrdu,
+ priv->stats.rxoverflow);
+
+ *eof = 1;
+ return len;
+}
+
+void rtl8192_proc_module_init(void)
+{
+ RT_TRACE(COMP_INIT, "Initializing proc filesystem");
+ rtl8192_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
+}
+
+
+void rtl8192_proc_module_remove(void)
+{
+ remove_proc_entry(DRV_NAME, init_net.proc_net);
+}
+
+
+void rtl8192_proc_remove_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ printk(KERN_INFO "dev name %s\n", dev->name);
+
+ if (priv->dir_dev) {
+ remove_proc_entry("stats-tx", priv->dir_dev);
+ remove_proc_entry("stats-rx", priv->dir_dev);
+ remove_proc_entry("stats-ap", priv->dir_dev);
+ remove_proc_entry("registers-0", priv->dir_dev);
+ remove_proc_entry("registers-1", priv->dir_dev);
+ remove_proc_entry("registers-2", priv->dir_dev);
+ remove_proc_entry("registers-3", priv->dir_dev);
+ remove_proc_entry("registers-4", priv->dir_dev);
+ remove_proc_entry("registers-5", priv->dir_dev);
+ remove_proc_entry("registers-6", priv->dir_dev);
+ remove_proc_entry("registers-7", priv->dir_dev);
+ remove_proc_entry("registers-8", priv->dir_dev);
+ remove_proc_entry("registers-9", priv->dir_dev);
+ remove_proc_entry("registers-a", priv->dir_dev);
+ remove_proc_entry("registers-b", priv->dir_dev);
+ remove_proc_entry("registers-c", priv->dir_dev);
+ remove_proc_entry("registers-d", priv->dir_dev);
+ remove_proc_entry("registers-e", priv->dir_dev);
+ remove_proc_entry("RF-A", priv->dir_dev);
+ remove_proc_entry("RF-B", priv->dir_dev);
+ remove_proc_entry("RF-C", priv->dir_dev);
+ remove_proc_entry("RF-D", priv->dir_dev);
+ remove_proc_entry("SEC-CAM-1", priv->dir_dev);
+ remove_proc_entry("SEC-CAM-2", priv->dir_dev);
+ remove_proc_entry("SEC-CAM-3", priv->dir_dev);
+ remove_proc_entry("wlan0", rtl8192_proc);
+ priv->dir_dev = NULL;
+ }
+}
+
+
+void rtl8192_proc_init_one(struct net_device *dev)
+{
+ struct proc_dir_entry *e;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ priv->dir_dev = create_proc_entry(dev->name,
+ S_IFDIR | S_IRUGO | S_IXUGO,
+ rtl8192_proc);
+ if (!priv->dir_dev) {
+ RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192"
+ "/%s\n", dev->name);
+ return;
+ }
+ e = create_proc_read_entry("stats-rx", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_stats_rx, dev);
+
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/stats-rx\n",
+ dev->name);
+
+ e = create_proc_read_entry("stats-tx", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_stats_tx, dev);
+
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/stats-tx\n",
+ dev->name);
+
+ e = create_proc_read_entry("stats-ap", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_stats_ap, dev);
+
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/stats-ap\n",
+ dev->name);
+
+ e = create_proc_read_entry("registers-0", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_0, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-0\n",
+ dev->name);
+ e = create_proc_read_entry("registers-1", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_1, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-1\n",
+ dev->name);
+ e = create_proc_read_entry("registers-2", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_2, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-2\n",
+ dev->name);
+ e = create_proc_read_entry("registers-3", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_3, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-3\n",
+ dev->name);
+ e = create_proc_read_entry("registers-4", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_4, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-4\n",
+ dev->name);
+ e = create_proc_read_entry("registers-5", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_5, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-5\n",
+ dev->name);
+ e = create_proc_read_entry("registers-6", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_6, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-6\n",
+ dev->name);
+ e = create_proc_read_entry("registers-7", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_7, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-7\n",
+ dev->name);
+ e = create_proc_read_entry("registers-8", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_8, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-8\n",
+ dev->name);
+ e = create_proc_read_entry("registers-9", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_9, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-9\n",
+ dev->name);
+ e = create_proc_read_entry("registers-a", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_a, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-a\n",
+ dev->name);
+ e = create_proc_read_entry("registers-b", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_b, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-b\n",
+ dev->name);
+ e = create_proc_read_entry("registers-c", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_c, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-c\n",
+ dev->name);
+ e = create_proc_read_entry("registers-d", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_d, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-d\n",
+ dev->name);
+ e = create_proc_read_entry("registers-e", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_registers_e, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/registers-e\n",
+ dev->name);
+ e = create_proc_read_entry("RF-A", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_reg_rf_a, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/RF-A\n",
+ dev->name);
+ e = create_proc_read_entry("RF-B", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_reg_rf_b, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/RF-B\n",
+ dev->name);
+ e = create_proc_read_entry("RF-C", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_reg_rf_c, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/RF-C\n",
+ dev->name);
+ e = create_proc_read_entry("RF-D", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_reg_rf_d, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/RF-D\n",
+ dev->name);
+ e = create_proc_read_entry("SEC-CAM-1", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_cam_register_1, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/SEC-CAM-1\n",
+ dev->name);
+ e = create_proc_read_entry("SEC-CAM-2", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_cam_register_2, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/SEC-CAM-2\n",
+ dev->name);
+ e = create_proc_read_entry("SEC-CAM-3", S_IFREG | S_IRUGO,
+ priv->dir_dev, proc_get_cam_register_3, dev);
+ if (!e)
+ RT_TRACE(COMP_ERR, "Unable to initialize "
+ "/proc/net/rtl8192/%s/SEC-CAM-3\n",
+ dev->name);
+}
diff --git a/drivers/staging/rtl8192e/rtl_debug.h b/drivers/staging/rtl8192e/rtl_debug.h
new file mode 100644
index 00000000000..50fb9a9b828
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_debug.h
@@ -0,0 +1,299 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL_DEBUG_H
+#define _RTL_DEBUG_H
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/debugfs.h>
+
+struct r8192_priv;
+struct _tx_desc_8192se;
+struct _TX_DESC_8192CE;
+struct net_device;
+
+#define DBG_LOUD 4
+
+#define RT_ASSERT(_Exp, Fmt) \
+ if (!(_Exp)) { \
+ printk("Rtl819x: "); \
+ printk Fmt; \
+ }
+
+enum dbgp_flag {
+ FQoS = 0,
+ FTX = 1,
+ FRX = 2,
+ FSEC = 3,
+ FMGNT = 4,
+ FMLME = 5,
+ FRESOURCE = 6,
+ FBEACON = 7,
+ FISR = 8,
+ FPHY = 9,
+ FMP = 10,
+ FEEPROM = 11,
+ FPWR = 12,
+ FDM = 13,
+ FDBGCtrl = 14,
+ FC2H = 15,
+ FBT = 16,
+ FINIT = 17,
+ FIOCTL = 18,
+ DBGP_TYPE_MAX
+};
+
+#define QoS_INIT BIT0
+#define QoS_VISTA BIT1
+
+#define TX_DESC BIT0
+#define TX_DESC_TID BIT1
+
+#define RX_DATA BIT0
+#define RX_PHY_STS BIT1
+#define RX_PHY_SS BIT2
+#define RX_PHY_SQ BIT3
+#define RX_PHY_ASTS BIT4
+#define RX_ERR_LEN BIT5
+#define RX_DEFRAG BIT6
+#define RX_ERR_RATE BIT7
+
+
+
+#define MEDIA_STS BIT0
+#define LINK_STS BIT1
+
+#define OS_CHK BIT0
+
+#define BCN_SHOW BIT0
+#define BCN_PEER BIT1
+
+#define ISR_CHK BIT0
+
+#define PHY_BBR BIT0
+#define PHY_BBW BIT1
+#define PHY_RFR BIT2
+#define PHY_RFW BIT3
+#define PHY_MACR BIT4
+#define PHY_MACW BIT5
+#define PHY_ALLR BIT6
+#define PHY_ALLW BIT7
+#define PHY_TXPWR BIT8
+#define PHY_PWRDIFF BIT9
+
+#define MP_RX BIT0
+#define MP_SWICH_CH BIT1
+
+#define EEPROM_W BIT0
+#define EFUSE_PG BIT1
+#define EFUSE_READ_ALL BIT2
+
+#define LPS BIT0
+#define IPS BIT1
+#define PWRSW BIT2
+#define PWRHW BIT3
+#define PWRHAL BIT4
+
+#define WA_IOT BIT0
+#define DM_PWDB BIT1
+#define DM_Monitor BIT2
+#define DM_DIG BIT3
+#define DM_EDCA_Turbo BIT4
+
+#define DbgCtrl_Trace BIT0
+#define DbgCtrl_InbandNoise BIT1
+
+#define BT_TRACE BIT0
+#define BT_RFPoll BIT1
+
+#define C2H_Summary BIT0
+#define C2H_PacketData BIT1
+#define C2H_ContentData BIT2
+#define BT_TRACE BIT0
+#define BT_RFPoll BIT1
+
+#define INIT_EEPROM BIT0
+#define INIT_TxPower BIT1
+#define INIT_IQK BIT2
+#define INIT_RF BIT3
+
+#define IOCTL_TRACE BIT0
+#define IOCTL_BT_EVENT BIT1
+#define IOCTL_BT_EVENT_DETAIL BIT2
+#define IOCTL_BT_TX_ACLDATA BIT3
+#define IOCTL_BT_TX_ACLDATA_DETAIL BIT4
+#define IOCTL_BT_RX_ACLDATA BIT5
+#define IOCTL_BT_RX_ACLDATA_DETAIL BIT6
+#define IOCTL_BT_HCICMD BIT7
+#define IOCTL_BT_HCICMD_DETAIL BIT8
+#define IOCTL_IRP BIT9
+#define IOCTL_IRP_DETAIL BIT10
+#define IOCTL_CALLBACK_FUN BIT11
+#define IOCTL_STATE BIT12
+#define IOCTL_BT_TP BIT13
+#define IOCTL_BT_LOGO BIT14
+
+/* 2007/07/13 MH ------For DeBuG Print modeue------*/
+/*------------------------------Define structure----------------------------*/
+
+
+/*------------------------Export Marco Definition---------------------------*/
+#define DEBUG_PRINT 1
+
+#if (DEBUG_PRINT == 1)
+#define RTPRINT(dbgtype, dbgflag, printstr) \
+{ \
+ if (DBGP_Type[dbgtype] & dbgflag) { \
+ printk printstr; \
+ } \
+}
+
+#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr) \
+{ \
+ if (DBGP_Type[dbgtype] & dbgflag) { \
+ int __i; \
+ u8 *ptr = (u8 *)_Ptr; \
+ printk printstr; \
+ printk(" "); \
+ for (__i = 0; __i < 6; __i++) \
+ printk("%02X%s", ptr[__i], \
+ (__i == 5) ? "" : "-"); \
+ printk("\n"); \
+ } \
+}
+
+#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
+{ \
+ if (DBGP_Type[dbgtype] & dbgflag) { \
+ int __i; \
+ u8 *ptr = (u8 *)_HexData; \
+ printk(_TitleString); \
+ for (__i = 0; __i < (int)_HexDataLen; __i++) { \
+ printk("%02X%s", ptr[__i], (((__i + 1) \
+ % 4) == 0) ? " " : " "); \
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
+}
+#else
+#define RTPRINT(dbgtype, dbgflag, printstr)
+#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)
+#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)
+#endif
+
+extern u32 DBGP_Type[DBGP_TYPE_MAX];
+
+#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) \
+do {\
+ if (((_Comp) & rt_global_debug_component) && \
+ (_Level <= rt_global_debug_component)) { \
+ int __i; \
+ u8* ptr = (u8 *)_HexData; \
+ printk(KERN_INFO "Rtl819x: "); \
+ printk(_TitleString); \
+ for (__i = 0; __i < (int)_HexDataLen; __i++) { \
+ printk("%02X%s", ptr[__i], (((__i + 1) % \
+ 4) == 0) ? " " : " "); \
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
+} while (0);
+
+#define DMESG(x, a...)
+#define DMESGW(x, a...)
+#define DMESGE(x, a...)
+extern u32 rt_global_debug_component;
+#define RT_TRACE(component, x, args...) \
+do { \
+ if (rt_global_debug_component & component) \
+ printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
+ ##args);\
+} while (0);
+
+#define assert(expr) \
+ if (!(expr)) { \
+ printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
+#define RT_DEBUG_DATA(level, data, datalen) \
+ do { \
+ if ((rt_global_debug_component & (level)) == (level)) {\
+ int _i; \
+ u8 *_pdata = (u8 *)data; \
+ printk(KERN_DEBUG DRV_NAME ": %s()\n", __func__); \
+ for (_i = 0; _i < (int)(datalen); _i++) { \
+ printk(KERN_INFO "%2x ", _pdata[_i]); \
+ if ((_i+1) % 16 == 0) \
+ printk("\n"); \
+ } \
+ printk(KERN_INFO "\n"); \
+ } \
+ } while (0)
+
+struct rtl_fs_debug {
+ const char *name;
+ struct dentry *dir_drv;
+ struct dentry *debug_register;
+ u32 hw_type;
+ u32 hw_offset;
+ bool hw_holding;
+};
+
+void print_buffer(u32 *buffer, int len);
+void dump_eprom(struct net_device *dev);
+void rtl8192_dump_reg(struct net_device *dev);
+
+/* debugfs stuff */
+static inline int rtl_debug_module_init(struct r8192_priv *priv,
+ const char *name)
+{
+ return 0;
+}
+
+static inline void rtl_debug_module_remove(struct r8192_priv *priv)
+{
+}
+
+static inline int rtl_create_debugfs_root(void)
+{
+ return 0;
+}
+
+static inline void rtl_remove_debugfs_root(void)
+{
+}
+
+/* proc stuff */
+void rtl8192_proc_init_one(struct net_device *dev);
+void rtl8192_proc_remove_one(struct net_device *dev);
+void rtl8192_proc_module_init(void);
+void rtl8192_proc_module_remove(void);
+void rtl8192_dbgp_flag_init(struct net_device *dev);
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl_dm.c
new file mode 100644
index 00000000000..a7fa9aad6f2
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_dm.c
@@ -0,0 +1,2995 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtl_core.h"
+#include "rtl_dm.h"
+#include "r8192E_hw.h"
+#include "r8192E_phy.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h"
+#include "r8192E_cmdpkt.h"
+
+/*---------------------------Define Local Constant---------------------------*/
+static u32 edca_setting_DL[HT_IOT_PEER_MAX] = {
+ 0x5e4322,
+ 0x5e4322,
+ 0x5ea44f,
+ 0x5e4322,
+ 0x604322,
+ 0xa44f,
+ 0x5e4322,
+ 0x5e4332
+};
+
+static u32 edca_setting_DL_GMode[HT_IOT_PEER_MAX] = {
+ 0x5e4322,
+ 0x5e4322,
+ 0x5e4322,
+ 0x5e4322,
+ 0x604322,
+ 0xa44f,
+ 0x5e4322,
+ 0x5e4322
+};
+
+static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
+ 0x5e4322,
+ 0xa44f,
+ 0x5ea44f,
+ 0x5e4322,
+ 0x604322,
+ 0x5e4322,
+ 0x5e4322,
+ 0x5e4332
+};
+
+#define RTK_UL_EDCA 0xa44f
+#define RTK_DL_EDCA 0x5e4322
+/*---------------------------Define Local Constant---------------------------*/
+
+
+/*------------------------Define global variable-----------------------------*/
+struct dig_t dm_digtable;
+u8 dm_shadow[16][256] = {
+ {0}
+};
+
+struct drx_path_sel DM_RxPathSelTable;
+/*------------------------Define global variable-----------------------------*/
+
+
+/*------------------------Define local variable------------------------------*/
+/*------------------------Define local variable------------------------------*/
+
+
+
+/*---------------------Define local function prototype-----------------------*/
+static void dm_check_rate_adaptive(struct net_device *dev);
+
+static void dm_init_bandwidth_autoswitch(struct net_device *dev);
+static void dm_bandwidth_autoswitch(struct net_device *dev);
+
+
+static void dm_check_txpower_tracking(struct net_device *dev);
+
+
+
+
+
+static void dm_bb_initialgain_restore(struct net_device *dev);
+
+
+static void dm_bb_initialgain_backup(struct net_device *dev);
+
+static void dm_dig_init(struct net_device *dev);
+static void dm_ctrl_initgain_byrssi(struct net_device *dev);
+static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
+static void dm_ctrl_initgain_byrssi_by_driverrssi(struct net_device *dev);
+static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct net_device *dev);
+static void dm_initial_gain(struct net_device *dev);
+static void dm_pd_th(struct net_device *dev);
+static void dm_cs_ratio(struct net_device *dev);
+
+static void dm_init_ctstoself(struct net_device *dev);
+static void dm_Init_WA_Broadcom_IOT(struct net_device *dev);
+
+static void dm_check_edca_turbo(struct net_device *dev);
+
+static void dm_check_pbc_gpio(struct net_device *dev);
+
+
+static void dm_check_rx_path_selection(struct net_device *dev);
+static void dm_init_rxpath_selection(struct net_device *dev);
+static void dm_rxpath_sel_byrssi(struct net_device *dev);
+
+
+static void dm_init_fsync(struct net_device *dev);
+static void dm_deInit_fsync(struct net_device *dev);
+
+static void dm_check_txrateandretrycount(struct net_device *dev);
+static void dm_check_ac_dc_power(struct net_device *dev);
+
+/*---------------------Define local function prototype-----------------------*/
+
+static void dm_init_dynamic_txpower(struct net_device *dev);
+static void dm_dynamic_txpower(struct net_device *dev);
+
+
+static void dm_send_rssi_tofw(struct net_device *dev);
+static void dm_ctstoself(struct net_device *dev);
+/*---------------------------Define function prototype------------------------*/
+
+void init_hal_dm(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ priv->DM_Type = DM_Type_ByDriver;
+
+ priv->undecorated_smoothed_pwdb = -1;
+
+ dm_init_dynamic_txpower(dev);
+
+ init_rate_adaptive(dev);
+
+ dm_dig_init(dev);
+ dm_init_edca_turbo(dev);
+ dm_init_bandwidth_autoswitch(dev);
+ dm_init_fsync(dev);
+ dm_init_rxpath_selection(dev);
+ dm_init_ctstoself(dev);
+ if (IS_HARDWARE_TYPE_8192SE(dev))
+ dm_Init_WA_Broadcom_IOT(dev);
+
+ INIT_DELAYED_WORK_RSL(&priv->gpio_change_rf_wq, (void *)dm_CheckRfCtrlGPIO, dev);
+}
+
+void deinit_hal_dm(struct net_device *dev)
+{
+
+ dm_deInit_fsync(dev);
+
+}
+
+void hal_dm_watchdog(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if (priv->being_init_adapter)
+ return;
+
+ dm_check_ac_dc_power(dev);
+
+ dm_check_pbc_gpio(dev);
+ dm_check_txrateandretrycount(dev);
+ dm_check_edca_turbo(dev);
+
+ dm_check_rate_adaptive(dev);
+ dm_dynamic_txpower(dev);
+ dm_check_txpower_tracking(dev);
+
+ dm_ctrl_initgain_byrssi(dev);
+ dm_bandwidth_autoswitch(dev);
+
+ dm_check_rx_path_selection(dev);
+ dm_check_fsync(dev);
+
+ dm_send_rssi_tofw(dev);
+ dm_ctstoself(dev);
+}
+
+static void dm_check_ac_dc_power(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static char *ac_dc_check_script_path = "/etc/acpi/wireless-rtl-ac-dc-power.sh";
+ char *argv[] = {ac_dc_check_script_path, DRV_NAME, NULL};
+ static char *envp[] = {"HOME=/",
+ "TERM=linux",
+ "PATH=/usr/bin:/bin",
+ NULL};
+
+ if (priv->ResetProgress == RESET_TYPE_SILENT) {
+ RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),
+ "GPIOChangeRFWorkItemCallBack(): Silent Reseting!!!!!!!\n");
+ return;
+ }
+
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ return;
+ call_usermodehelper(ac_dc_check_script_path, argv, envp, 1);
+
+ return;
+};
+
+
+void init_rate_adaptive(struct net_device *dev)
+{
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rate_adaptive *pra = (struct rate_adaptive *)&priv->rate_adaptive;
+
+ pra->ratr_state = DM_RATR_STA_MAX;
+ pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High;
+ pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5;
+ pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5;
+
+ pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5;
+ pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M;
+ pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M;
+
+ if (priv->CustomerID == RT_CID_819x_Netcore)
+ pra->ping_rssi_enable = 1;
+ else
+ pra->ping_rssi_enable = 0;
+ pra->ping_rssi_thresh_for_ra = 15;
+
+
+ if (priv->rf_type == RF_2T4R) {
+ pra->upper_rssi_threshold_ratr = 0x8f0f0000;
+ pra->middle_rssi_threshold_ratr = 0x8f0ff000;
+ pra->low_rssi_threshold_ratr = 0x8f0ff001;
+ pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
+ pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
+ pra->ping_rssi_ratr = 0x0000000d;
+ } else if (priv->rf_type == RF_1T2R) {
+ pra->upper_rssi_threshold_ratr = 0x000fc000;
+ pra->middle_rssi_threshold_ratr = 0x000ff000;
+ pra->low_rssi_threshold_ratr = 0x000ff001;
+ pra->low_rssi_threshold_ratr_40M = 0x000ff005;
+ pra->low_rssi_threshold_ratr_20M = 0x000ff001;
+ pra->ping_rssi_ratr = 0x0000000d;
+ }
+
+}
+
+
+static void dm_check_rate_adaptive(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo;
+ struct rate_adaptive *pra = (struct rate_adaptive *)&priv->rate_adaptive;
+ u32 currentRATR, targetRATR = 0;
+ u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
+ bool bshort_gi_enabled = false;
+ static u8 ping_rssi_state;
+
+ if (IS_NIC_DOWN(priv)) {
+ RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n");
+ return;
+ }
+
+ if (pra->rate_adaptive_disabled)
+ return;
+
+ if (!(priv->rtllib->mode == WIRELESS_MODE_N_24G ||
+ priv->rtllib->mode == WIRELESS_MODE_N_5G))
+ return;
+
+ if (priv->rtllib->state == RTLLIB_LINKED) {
+
+ bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) ||
+ (!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz);
+
+
+ pra->upper_rssi_threshold_ratr =
+ (pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0);
+
+ pra->middle_rssi_threshold_ratr =
+ (pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0);
+
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
+ pra->low_rssi_threshold_ratr =
+ (pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0);
+ } else {
+ pra->low_rssi_threshold_ratr =
+ (pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0);
+ }
+ pra->ping_rssi_ratr =
+ (pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled) ? BIT31 : 0);
+
+ if (pra->ratr_state == DM_RATR_STA_HIGH) {
+ HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
+ (pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
+ } else if (pra->ratr_state == DM_RATR_STA_LOW) {
+ HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
+ (pra->low2high_rssi_thresh_for_ra40M) : (pra->low2high_rssi_thresh_for_ra20M);
+ } else {
+ HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
+ (pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
+ }
+
+ if (priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA) {
+ pra->ratr_state = DM_RATR_STA_HIGH;
+ targetRATR = pra->upper_rssi_threshold_ratr;
+ } else if (priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA) {
+ pra->ratr_state = DM_RATR_STA_MIDDLE;
+ targetRATR = pra->middle_rssi_threshold_ratr;
+ } else {
+ pra->ratr_state = DM_RATR_STA_LOW;
+ targetRATR = pra->low_rssi_threshold_ratr;
+ }
+
+ if (pra->ping_rssi_enable) {
+ if (priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5)) {
+ if ((priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
+ ping_rssi_state) {
+ pra->ratr_state = DM_RATR_STA_LOW;
+ targetRATR = pra->ping_rssi_ratr;
+ ping_rssi_state = 1;
+ }
+ } else {
+ ping_rssi_state = 0;
+ }
+ }
+
+ if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
+ targetRATR &= 0xf00fffff;
+
+ currentRATR = read_nic_dword(dev, RATR0);
+ if (targetRATR != currentRATR) {
+ u32 ratr_value;
+ ratr_value = targetRATR;
+ RT_TRACE(COMP_RATE,
+ "currentRATR = %x, targetRATR = %x\n",
+ currentRATR, targetRATR);
+ if (priv->rf_type == RF_1T2R)
+ ratr_value &= ~(RATE_ALL_OFDM_2SS);
+ write_nic_dword(dev, RATR0, ratr_value);
+ write_nic_byte(dev, UFWP, 1);
+
+ pra->last_ratr = targetRATR;
+ }
+
+ } else {
+ pra->ratr_state = DM_RATR_STA_MAX;
+ }
+}
+
+static void dm_init_bandwidth_autoswitch(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->rtllib->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH;
+ priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW;
+ priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = false;
+ priv->rtllib->bandwidth_auto_switch.bautoswitch_enable = false;
+}
+
+static void dm_bandwidth_autoswitch(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||
+ !priv->rtllib->bandwidth_auto_switch.bautoswitch_enable) {
+ return;
+ } else {
+ if (priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz == false) {
+ if (priv->undecorated_smoothed_pwdb <=
+ priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
+ priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = true;
+ } else {
+ if (priv->undecorated_smoothed_pwdb >=
+ priv->rtllib->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
+ priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = false;
+
+ }
+ }
+}
+
+static u32 OFDMSwingTable[OFDM_Table_Length] = {
+ 0x7f8001fe,
+ 0x71c001c7,
+ 0x65400195,
+ 0x5a400169,
+ 0x50800142,
+ 0x47c0011f,
+ 0x40000100,
+ 0x390000e4,
+ 0x32c000cb,
+ 0x2d4000b5,
+ 0x288000a2,
+ 0x24000090,
+ 0x20000080,
+ 0x1c800072,
+ 0x19800066,
+ 0x26c0005b,
+ 0x24400051,
+ 0x12000048,
+ 0x10000040
+};
+
+static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}
+};
+
+static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}
+};
+
+#define Pw_Track_Flag 0x11d
+#define Tssi_Mea_Value 0x13c
+#define Tssi_Report_Value1 0x134
+#define Tssi_Report_Value2 0x13e
+#define FW_Busy_Flag 0x13f
+
+static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ bool bHighpowerstate, viviflag = false;
+ struct dcmd_txcmd tx_cmd;
+ u8 powerlevelOFDM24G;
+ int i = 0, j = 0, k = 0;
+ u8 RF_Type, tmp_report[5] = {0, 0, 0, 0, 0};
+ u32 Value;
+ u8 Pwr_Flag;
+ u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0;
+ u32 delta = 0;
+
+ RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ priv->rtllib->bdynamic_txpower_enable = false;
+ bHighpowerstate = priv->bDynamicTxHighPower;
+
+ powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
+ RF_Type = priv->rf_type;
+ Value = (RF_Type<<8) | powerlevelOFDM24G;
+
+ RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n",
+ powerlevelOFDM24G);
+
+
+ for (j = 0; j <= 30; j++) {
+
+ tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
+ tx_cmd.Length = 4;
+ tx_cmd.Value = Value;
+ cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
+ DESC_PACKET_TYPE_INIT,
+ sizeof(struct dcmd_txcmd));
+ mdelay(1);
+ for (i = 0; i <= 30; i++) {
+ Pwr_Flag = read_nic_byte(dev, Pw_Track_Flag);
+
+ if (Pwr_Flag == 0) {
+ mdelay(1);
+
+ if (priv->bResetInProgress) {
+ RT_TRACE(COMP_POWER_TRACKING,
+ "we are in slient reset progress, so return\n");
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ return;
+ }
+ if ((priv->rtllib->eRFPowerState != eRfOn)) {
+ RT_TRACE(COMP_POWER_TRACKING,
+ "we are in power save, so return\n");
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ return;
+ }
+
+ continue;
+ }
+
+ Avg_TSSI_Meas = read_nic_word(dev, Tssi_Mea_Value);
+
+ if (Avg_TSSI_Meas == 0) {
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ return;
+ }
+
+ for (k = 0; k < 5; k++) {
+ if (k != 4)
+ tmp_report[k] = read_nic_byte(dev,
+ Tssi_Report_Value1+k);
+ else
+ tmp_report[k] = read_nic_byte(dev,
+ Tssi_Report_Value2);
+
+ RT_TRACE(COMP_POWER_TRACKING,
+ "TSSI_report_value = %d\n",
+ tmp_report[k]);
+
+ if (tmp_report[k] <= 20) {
+ viviflag = true;
+ break;
+ }
+ }
+
+ if (viviflag == true) {
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ viviflag = false;
+ RT_TRACE(COMP_POWER_TRACKING, "we filted this data\n");
+ for (k = 0; k < 5; k++)
+ tmp_report[k] = 0;
+ break;
+ }
+
+ for (k = 0; k < 5; k++)
+ Avg_TSSI_Meas_from_driver += tmp_report[k];
+
+ Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
+ RT_TRACE(COMP_POWER_TRACKING,
+ "Avg_TSSI_Meas_from_driver = %d\n",
+ Avg_TSSI_Meas_from_driver);
+ TSSI_13dBm = priv->TSSI_13dBm;
+ RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm);
+
+ if (Avg_TSSI_Meas_from_driver > TSSI_13dBm)
+ delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
+ else
+ delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
+
+ if (delta <= E_FOR_TX_POWER_TRACK) {
+ priv->rtllib->bdynamic_txpower_enable = true;
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "tx power track is done\n");
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->rfa_txpowertrackingindex = %d\n",
+ priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->rfa_txpowertrackingindex_real = %d\n",
+ priv->rfa_txpowertrackingindex_real);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->CCKPresentAttentuation_difference = %d\n",
+ priv->CCKPresentAttentuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->CCKPresentAttentuation = %d\n",
+ priv->CCKPresentAttentuation);
+ return;
+ } else {
+ if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK) {
+ if (RF_Type == RF_2T4R) {
+
+ if ((priv->rfa_txpowertrackingindex > 0) &&
+ (priv->rfc_txpowertrackingindex > 0)) {
+ priv->rfa_txpowertrackingindex--;
+ if (priv->rfa_txpowertrackingindex_real > 4) {
+ priv->rfa_txpowertrackingindex_real--;
+ rtl8192_setBBreg(dev,
+ rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
+ }
+
+ priv->rfc_txpowertrackingindex--;
+ if (priv->rfc_txpowertrackingindex_real > 4) {
+ priv->rfc_txpowertrackingindex_real--;
+ rtl8192_setBBreg(dev,
+ rOFDM0_XCTxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
+ }
+ } else {
+ rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[4].txbbgain_value);
+ rtl8192_setBBreg(dev,
+ rOFDM0_XCTxIQImbalance,
+ bMaskDWord, priv->txbbgain_table[4].txbbgain_value);
+ }
+ } else {
+ if (priv->rfa_txpowertrackingindex > 0) {
+ priv->rfa_txpowertrackingindex--;
+ if (priv->rfa_txpowertrackingindex_real > 4) {
+ priv->rfa_txpowertrackingindex_real--;
+ rtl8192_setBBreg(dev,
+ rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
+ }
+ } else
+ rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord, priv->txbbgain_table[4].txbbgain_value);
+
+ }
+ } else {
+ if (RF_Type == RF_2T4R) {
+ if ((priv->rfa_txpowertrackingindex <
+ TxBBGainTableLength - 1) &&
+ (priv->rfc_txpowertrackingindex <
+ TxBBGainTableLength - 1)) {
+ priv->rfa_txpowertrackingindex++;
+ priv->rfa_txpowertrackingindex_real++;
+ rtl8192_setBBreg(dev,
+ rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table
+ [priv->rfa_txpowertrackingindex_real].txbbgain_value);
+ priv->rfc_txpowertrackingindex++;
+ priv->rfc_txpowertrackingindex_real++;
+ rtl8192_setBBreg(dev,
+ rOFDM0_XCTxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
+ } else {
+ rtl8192_setBBreg(dev,
+ rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value);
+ rtl8192_setBBreg(dev,
+ rOFDM0_XCTxIQImbalance,
+ bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value);
+ }
+ } else {
+ if (priv->rfa_txpowertrackingindex < (TxBBGainTableLength - 1)) {
+ priv->rfa_txpowertrackingindex++;
+ priv->rfa_txpowertrackingindex_real++;
+ rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
+ } else
+ rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value);
+ }
+ }
+ if (RF_Type == RF_2T4R) {
+ priv->CCKPresentAttentuation_difference
+ = priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
+ } else {
+ priv->CCKPresentAttentuation_difference
+ = priv->rfa_txpowertrackingindex_real - priv->rfa_txpowertracking_default;
+ }
+
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ priv->CCKPresentAttentuation =
+ priv->CCKPresentAttentuation_20Mdefault +
+ priv->CCKPresentAttentuation_difference;
+ else
+ priv->CCKPresentAttentuation =
+ priv->CCKPresentAttentuation_40Mdefault +
+ priv->CCKPresentAttentuation_difference;
+
+ if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1))
+ priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1;
+ if (priv->CCKPresentAttentuation < 0)
+ priv->CCKPresentAttentuation = 0;
+
+ if (priv->CCKPresentAttentuation > -1 &&
+ priv->CCKPresentAttentuation < CCKTxBBGainTableLength) {
+ if (priv->rtllib->current_network.channel == 14 &&
+ !priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = true;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = false;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ }
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->rfa_txpowertrackingindex = %d\n",
+ priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->rfa_txpowertrackingindex_real = %d\n",
+ priv->rfa_txpowertrackingindex_real);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->CCKPresentAttentuation_difference = %d\n",
+ priv->CCKPresentAttentuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING,
+ "priv->CCKPresentAttentuation = %d\n",
+ priv->CCKPresentAttentuation);
+
+ if (priv->CCKPresentAttentuation_difference <= -12 || priv->CCKPresentAttentuation_difference >= 24) {
+ priv->rtllib->bdynamic_txpower_enable = true;
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
+ return;
+ }
+ }
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+ Avg_TSSI_Meas_from_driver = 0;
+ for (k = 0; k < 5; k++)
+ tmp_report[k] = 0;
+ break;
+ }
+ write_nic_byte(dev, FW_Busy_Flag, 0);
+ }
+ priv->rtllib->bdynamic_txpower_enable = true;
+ write_nic_byte(dev, Pw_Track_Flag, 0);
+}
+
+static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
+{
+#define ThermalMeterVal 9
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 tmpRegA, TempCCk;
+ u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
+ int i = 0, CCKSwingNeedUpdate = 0;
+
+ if (!priv->btxpower_trackingInit) {
+ tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord);
+ for (i = 0; i < OFDM_Table_Length; i++) {
+ if (tmpRegA == OFDMSwingTable[i]) {
+ priv->OFDM_index[0] = (u8)i;
+ RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n",
+ rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index[0]);
+ }
+ }
+
+ TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
+ for (i = 0; i < CCK_Table_length; i++) {
+ if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
+ priv->CCK_index = (u8) i;
+ RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x"
+ " = 0x%x, CCK_index = 0x%x\n",
+ rCCK0_TxFilter1, TempCCk,
+ priv->CCK_index);
+ break;
+ }
+ }
+ priv->btxpower_trackingInit = true;
+ return;
+ }
+
+ tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078);
+ RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
+ if (tmpRegA < 3 || tmpRegA > 13)
+ return;
+ if (tmpRegA >= 12)
+ tmpRegA = 12;
+ RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA);
+ priv->ThermalMeter[0] = ThermalMeterVal;
+ priv->ThermalMeter[1] = ThermalMeterVal;
+
+ if (priv->ThermalMeter[0] >= (u8)tmpRegA) {
+ tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0] -
+ (u8)tmpRegA);
+ tmpCCK40Mindex = tmpCCK20Mindex - 6;
+ if (tmpOFDMindex >= OFDM_Table_Length)
+ tmpOFDMindex = OFDM_Table_Length-1;
+ if (tmpCCK20Mindex >= CCK_Table_length)
+ tmpCCK20Mindex = CCK_Table_length-1;
+ if (tmpCCK40Mindex >= CCK_Table_length)
+ tmpCCK40Mindex = CCK_Table_length-1;
+ } else {
+ tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]);
+ if (tmpval >= 6)
+ tmpOFDMindex = tmpCCK20Mindex = 0;
+ else
+ tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
+ tmpCCK40Mindex = 0;
+ }
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ tmpCCKindex = tmpCCK40Mindex;
+ else
+ tmpCCKindex = tmpCCK20Mindex;
+
+ priv->Record_CCK_20Mindex = tmpCCK20Mindex;
+ priv->Record_CCK_40Mindex = tmpCCK40Mindex;
+ RT_TRACE(COMP_POWER_TRACKING, "Record_CCK_20Mindex / Record_CCK_40"
+ "Mindex = %d / %d.\n",
+ priv->Record_CCK_20Mindex, priv->Record_CCK_40Mindex);
+
+ if (priv->rtllib->current_network.channel == 14 &&
+ !priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = true;
+ CCKSwingNeedUpdate = 1;
+ } else if (priv->rtllib->current_network.channel != 14 &&
+ priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = false;
+ CCKSwingNeedUpdate = 1;
+ }
+
+ if (priv->CCK_index != tmpCCKindex) {
+ priv->CCK_index = tmpCCKindex;
+ CCKSwingNeedUpdate = 1;
+ }
+
+ if (CCKSwingNeedUpdate)
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ if (priv->OFDM_index[0] != tmpOFDMindex) {
+ priv->OFDM_index[0] = tmpOFDMindex;
+ rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
+ OFDMSwingTable[priv->OFDM_index[0]]);
+ RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
+ priv->OFDM_index[0],
+ OFDMSwingTable[priv->OFDM_index[0]]);
+ }
+ priv->txpower_count = 0;
+}
+
+void dm_txpower_trackingcallback(void *data)
+{
+ struct r8192_priv *priv = container_of_dwork_rsl(data,
+ struct r8192_priv, txpower_tracking_wq);
+ struct net_device *dev = priv->rtllib->dev;
+
+ if (priv->IC_Cut >= IC_VersionCut_D)
+ dm_TXPowerTrackingCallback_TSSI(dev);
+ else
+ dm_TXPowerTrackingCallback_ThermalMeter(dev);
+}
+
+static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
+{
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->txbbgain_table[0].txbb_iq_amplifygain = 12;
+ priv->txbbgain_table[0].txbbgain_value = 0x7f8001fe;
+ priv->txbbgain_table[1].txbb_iq_amplifygain = 11;
+ priv->txbbgain_table[1].txbbgain_value = 0x788001e2;
+ priv->txbbgain_table[2].txbb_iq_amplifygain = 10;
+ priv->txbbgain_table[2].txbbgain_value = 0x71c001c7;
+ priv->txbbgain_table[3].txbb_iq_amplifygain = 9;
+ priv->txbbgain_table[3].txbbgain_value = 0x6b8001ae;
+ priv->txbbgain_table[4].txbb_iq_amplifygain = 8;
+ priv->txbbgain_table[4].txbbgain_value = 0x65400195;
+ priv->txbbgain_table[5].txbb_iq_amplifygain = 7;
+ priv->txbbgain_table[5].txbbgain_value = 0x5fc0017f;
+ priv->txbbgain_table[6].txbb_iq_amplifygain = 6;
+ priv->txbbgain_table[6].txbbgain_value = 0x5a400169;
+ priv->txbbgain_table[7].txbb_iq_amplifygain = 5;
+ priv->txbbgain_table[7].txbbgain_value = 0x55400155;
+ priv->txbbgain_table[8].txbb_iq_amplifygain = 4;
+ priv->txbbgain_table[8].txbbgain_value = 0x50800142;
+ priv->txbbgain_table[9].txbb_iq_amplifygain = 3;
+ priv->txbbgain_table[9].txbbgain_value = 0x4c000130;
+ priv->txbbgain_table[10].txbb_iq_amplifygain = 2;
+ priv->txbbgain_table[10].txbbgain_value = 0x47c0011f;
+ priv->txbbgain_table[11].txbb_iq_amplifygain = 1;
+ priv->txbbgain_table[11].txbbgain_value = 0x43c0010f;
+ priv->txbbgain_table[12].txbb_iq_amplifygain = 0;
+ priv->txbbgain_table[12].txbbgain_value = 0x40000100;
+ priv->txbbgain_table[13].txbb_iq_amplifygain = -1;
+ priv->txbbgain_table[13].txbbgain_value = 0x3c8000f2;
+ priv->txbbgain_table[14].txbb_iq_amplifygain = -2;
+ priv->txbbgain_table[14].txbbgain_value = 0x390000e4;
+ priv->txbbgain_table[15].txbb_iq_amplifygain = -3;
+ priv->txbbgain_table[15].txbbgain_value = 0x35c000d7;
+ priv->txbbgain_table[16].txbb_iq_amplifygain = -4;
+ priv->txbbgain_table[16].txbbgain_value = 0x32c000cb;
+ priv->txbbgain_table[17].txbb_iq_amplifygain = -5;
+ priv->txbbgain_table[17].txbbgain_value = 0x300000c0;
+ priv->txbbgain_table[18].txbb_iq_amplifygain = -6;
+ priv->txbbgain_table[18].txbbgain_value = 0x2d4000b5;
+ priv->txbbgain_table[19].txbb_iq_amplifygain = -7;
+ priv->txbbgain_table[19].txbbgain_value = 0x2ac000ab;
+ priv->txbbgain_table[20].txbb_iq_amplifygain = -8;
+ priv->txbbgain_table[20].txbbgain_value = 0x288000a2;
+ priv->txbbgain_table[21].txbb_iq_amplifygain = -9;
+ priv->txbbgain_table[21].txbbgain_value = 0x26000098;
+ priv->txbbgain_table[22].txbb_iq_amplifygain = -10;
+ priv->txbbgain_table[22].txbbgain_value = 0x24000090;
+ priv->txbbgain_table[23].txbb_iq_amplifygain = -11;
+ priv->txbbgain_table[23].txbbgain_value = 0x22000088;
+ priv->txbbgain_table[24].txbb_iq_amplifygain = -12;
+ priv->txbbgain_table[24].txbbgain_value = 0x20000080;
+ priv->txbbgain_table[25].txbb_iq_amplifygain = -13;
+ priv->txbbgain_table[25].txbbgain_value = 0x1a00006c;
+ priv->txbbgain_table[26].txbb_iq_amplifygain = -14;
+ priv->txbbgain_table[26].txbbgain_value = 0x1c800072;
+ priv->txbbgain_table[27].txbb_iq_amplifygain = -15;
+ priv->txbbgain_table[27].txbbgain_value = 0x18000060;
+ priv->txbbgain_table[28].txbb_iq_amplifygain = -16;
+ priv->txbbgain_table[28].txbbgain_value = 0x19800066;
+ priv->txbbgain_table[29].txbb_iq_amplifygain = -17;
+ priv->txbbgain_table[29].txbbgain_value = 0x15800056;
+ priv->txbbgain_table[30].txbb_iq_amplifygain = -18;
+ priv->txbbgain_table[30].txbbgain_value = 0x26c0005b;
+ priv->txbbgain_table[31].txbb_iq_amplifygain = -19;
+ priv->txbbgain_table[31].txbbgain_value = 0x14400051;
+ priv->txbbgain_table[32].txbb_iq_amplifygain = -20;
+ priv->txbbgain_table[32].txbbgain_value = 0x24400051;
+ priv->txbbgain_table[33].txbb_iq_amplifygain = -21;
+ priv->txbbgain_table[33].txbbgain_value = 0x1300004c;
+ priv->txbbgain_table[34].txbb_iq_amplifygain = -22;
+ priv->txbbgain_table[34].txbbgain_value = 0x12000048;
+ priv->txbbgain_table[35].txbb_iq_amplifygain = -23;
+ priv->txbbgain_table[35].txbbgain_value = 0x11000044;
+ priv->txbbgain_table[36].txbb_iq_amplifygain = -24;
+ priv->txbbgain_table[36].txbbgain_value = 0x10000040;
+
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[0] = 0x36;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[1] = 0x35;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[2] = 0x2e;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[3] = 0x25;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[4] = 0x1c;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[5] = 0x12;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[6] = 0x09;
+ priv->cck_txbbgain_table[0].ccktxbb_valuearray[7] = 0x04;
+
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[0] = 0x33;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[1] = 0x32;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[2] = 0x2b;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[3] = 0x23;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[4] = 0x1a;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[5] = 0x11;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[6] = 0x08;
+ priv->cck_txbbgain_table[1].ccktxbb_valuearray[7] = 0x04;
+
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[0] = 0x30;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[1] = 0x2f;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[2] = 0x29;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[3] = 0x21;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[4] = 0x19;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[5] = 0x10;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[6] = 0x08;
+ priv->cck_txbbgain_table[2].ccktxbb_valuearray[7] = 0x03;
+
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[0] = 0x2d;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[1] = 0x2d;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[2] = 0x27;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[3] = 0x1f;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[4] = 0x18;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[5] = 0x0f;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[6] = 0x08;
+ priv->cck_txbbgain_table[3].ccktxbb_valuearray[7] = 0x03;
+
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[0] = 0x2b;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[1] = 0x2a;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[2] = 0x25;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[3] = 0x1e;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[4] = 0x16;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[5] = 0x0e;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[6] = 0x07;
+ priv->cck_txbbgain_table[4].ccktxbb_valuearray[7] = 0x03;
+
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[0] = 0x28;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[1] = 0x28;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[2] = 0x22;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[3] = 0x1c;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[4] = 0x15;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[5] = 0x0d;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[6] = 0x07;
+ priv->cck_txbbgain_table[5].ccktxbb_valuearray[7] = 0x03;
+
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[0] = 0x26;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[1] = 0x25;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[2] = 0x21;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[3] = 0x1b;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[4] = 0x14;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[5] = 0x0d;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[6] = 0x06;
+ priv->cck_txbbgain_table[6].ccktxbb_valuearray[7] = 0x03;
+
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[0] = 0x24;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[1] = 0x23;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[2] = 0x1f;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[3] = 0x19;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[4] = 0x13;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[5] = 0x0c;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[6] = 0x06;
+ priv->cck_txbbgain_table[7].ccktxbb_valuearray[7] = 0x03;
+
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[0] = 0x22;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[1] = 0x21;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[2] = 0x1d;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[3] = 0x18;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[4] = 0x11;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[5] = 0x0b;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[6] = 0x06;
+ priv->cck_txbbgain_table[8].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[0] = 0x20;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[1] = 0x20;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[2] = 0x1b;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[3] = 0x16;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[4] = 0x11;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[5] = 0x08;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[6] = 0x05;
+ priv->cck_txbbgain_table[9].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[0] = 0x1f;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[1] = 0x1e;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[2] = 0x1a;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[3] = 0x15;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[4] = 0x10;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[5] = 0x0a;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[6] = 0x05;
+ priv->cck_txbbgain_table[10].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[0] = 0x1d;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[1] = 0x1c;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[2] = 0x18;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[3] = 0x14;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[4] = 0x0f;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[5] = 0x0a;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[6] = 0x05;
+ priv->cck_txbbgain_table[11].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[0] = 0x1b;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[1] = 0x1a;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[2] = 0x17;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[3] = 0x13;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[4] = 0x0e;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[5] = 0x09;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[6] = 0x04;
+ priv->cck_txbbgain_table[12].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[0] = 0x1a;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[1] = 0x19;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[2] = 0x16;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[3] = 0x12;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[4] = 0x0d;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[5] = 0x09;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[6] = 0x04;
+ priv->cck_txbbgain_table[13].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[0] = 0x18;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[1] = 0x17;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[2] = 0x15;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[3] = 0x11;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[4] = 0x0c;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[5] = 0x08;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[6] = 0x04;
+ priv->cck_txbbgain_table[14].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[0] = 0x17;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[1] = 0x16;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[2] = 0x13;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[3] = 0x10;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[4] = 0x0c;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[5] = 0x08;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[6] = 0x04;
+ priv->cck_txbbgain_table[15].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[0] = 0x16;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[1] = 0x15;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[2] = 0x12;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[3] = 0x0f;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[4] = 0x0b;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[5] = 0x07;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[6] = 0x04;
+ priv->cck_txbbgain_table[16].ccktxbb_valuearray[7] = 0x01;
+
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[0] = 0x14;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[1] = 0x14;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[2] = 0x11;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[3] = 0x0e;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[4] = 0x0b;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[5] = 0x07;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[6] = 0x03;
+ priv->cck_txbbgain_table[17].ccktxbb_valuearray[7] = 0x02;
+
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[0] = 0x13;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[1] = 0x13;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[2] = 0x10;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[3] = 0x0d;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[4] = 0x0a;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[5] = 0x06;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[6] = 0x03;
+ priv->cck_txbbgain_table[18].ccktxbb_valuearray[7] = 0x01;
+
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[0] = 0x12;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[1] = 0x12;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[2] = 0x0f;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[3] = 0x0c;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[4] = 0x09;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[5] = 0x06;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[6] = 0x03;
+ priv->cck_txbbgain_table[19].ccktxbb_valuearray[7] = 0x01;
+
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[0] = 0x11;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[1] = 0x11;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[2] = 0x0f;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[3] = 0x0c;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[4] = 0x09;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[5] = 0x06;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[6] = 0x03;
+ priv->cck_txbbgain_table[20].ccktxbb_valuearray[7] = 0x01;
+
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[0] = 0x10;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[1] = 0x10;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[2] = 0x0e;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[3] = 0x0b;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[4] = 0x08;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[5] = 0x05;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[6] = 0x03;
+ priv->cck_txbbgain_table[21].ccktxbb_valuearray[7] = 0x01;
+
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[0] = 0x0f;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[1] = 0x0f;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[2] = 0x0d;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[3] = 0x0b;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[4] = 0x08;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[5] = 0x05;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[6] = 0x03;
+ priv->cck_txbbgain_table[22].ccktxbb_valuearray[7] = 0x01;
+
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[0] = 0x36;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[1] = 0x35;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[2] = 0x2e;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[3] = 0x1b;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[0] = 0x33;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[1] = 0x32;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[2] = 0x2b;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[3] = 0x19;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[0] = 0x30;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[1] = 0x2f;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[2] = 0x29;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[3] = 0x18;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[0] = 0x2d;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[1] = 0x2d;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[2] = 0x27;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[3] = 0x17;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[0] = 0x2b;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[1] = 0x2a;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[2] = 0x25;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[3] = 0x15;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[0] = 0x28;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[1] = 0x28;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[2] = 0x22;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[3] = 0x14;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[0] = 0x26;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[1] = 0x25;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[2] = 0x21;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[3] = 0x13;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[0] = 0x24;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[1] = 0x23;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[2] = 0x1f;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[3] = 0x12;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[0] = 0x22;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[1] = 0x21;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[2] = 0x1d;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[3] = 0x11;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[0] = 0x20;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[1] = 0x20;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[2] = 0x1b;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[3] = 0x10;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[0] = 0x1f;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[1] = 0x1e;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[2] = 0x1a;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[3] = 0x0f;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[0] = 0x1d;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[1] = 0x1c;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[2] = 0x18;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[3] = 0x0e;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[0] = 0x1b;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[1] = 0x1a;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[2] = 0x17;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[3] = 0x0e;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[0] = 0x1a;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[1] = 0x19;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[2] = 0x16;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[3] = 0x0d;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[0] = 0x18;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[1] = 0x17;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[2] = 0x15;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[3] = 0x0c;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[0] = 0x17;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[1] = 0x16;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[2] = 0x13;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[3] = 0x0b;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[0] = 0x16;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[1] = 0x15;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[2] = 0x12;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[3] = 0x0b;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[0] = 0x14;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[1] = 0x14;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[2] = 0x11;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[3] = 0x0a;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[0] = 0x13;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[1] = 0x13;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[2] = 0x10;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[3] = 0x0a;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[0] = 0x12;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[1] = 0x12;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[2] = 0x0f;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[3] = 0x09;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[0] = 0x11;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[1] = 0x11;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[2] = 0x0f;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[3] = 0x09;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[0] = 0x10;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[1] = 0x10;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[2] = 0x0e;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[3] = 0x08;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[7] = 0x00;
+
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[0] = 0x0f;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[1] = 0x0f;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[2] = 0x0d;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[3] = 0x08;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[4] = 0x00;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[5] = 0x00;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[6] = 0x00;
+ priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[7] = 0x00;
+
+ priv->btxpower_tracking = true;
+ priv->txpower_count = 0;
+ priv->btxpower_trackingInit = false;
+
+}
+
+static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+
+ if (priv->rtllib->FwRWRF)
+ priv->btxpower_tracking = true;
+ else
+ priv->btxpower_tracking = false;
+ priv->txpower_count = 0;
+ priv->btxpower_trackingInit = false;
+ RT_TRACE(COMP_POWER_TRACKING, "pMgntInfo->bTXPowerTracking = %d\n",
+ priv->btxpower_tracking);
+}
+
+void dm_initialize_txpower_tracking(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if (priv->IC_Cut >= IC_VersionCut_D)
+ dm_InitializeTXPowerTracking_TSSI(dev);
+ else
+ dm_InitializeTXPowerTracking_ThermalMeter(dev);
+}
+
+static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u32 tx_power_track_counter;
+ RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
+ if (read_nic_byte(dev, 0x11e) == 1)
+ return;
+ if (!priv->btxpower_tracking)
+ return;
+ tx_power_track_counter++;
+
+
+ if (tx_power_track_counter >= 180) {
+ queue_delayed_work_rsl(priv->priv_wq, &priv->txpower_tracking_wq, 0);
+ tx_power_track_counter = 0;
+ }
+
+}
+static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u8 TM_Trigger;
+ u8 TxPowerCheckCnt = 0;
+
+ if (IS_HARDWARE_TYPE_8192SE(dev))
+ TxPowerCheckCnt = 5;
+ else
+ TxPowerCheckCnt = 2;
+ if (!priv->btxpower_tracking) {
+ return;
+ } else {
+ if (priv->txpower_count <= TxPowerCheckCnt) {
+ priv->txpower_count++;
+ return;
+ }
+ }
+
+ if (!TM_Trigger) {
+ {
+ rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
+ rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
+ rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
+ rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
+ }
+ TM_Trigger = 1;
+ return;
+ } else {
+ printk(KERN_INFO "===============>Schedule TxPowerTrackingWorkItem\n");
+
+ queue_delayed_work_rsl(priv->priv_wq, &priv->txpower_tracking_wq, 0);
+ TM_Trigger = 0;
+ }
+
+ }
+
+static void dm_check_txpower_tracking(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->IC_Cut >= IC_VersionCut_D)
+ dm_CheckTXPowerTracking_TSSI(dev);
+ else
+ dm_CheckTXPowerTracking_ThermalMeter(dev);
+}
+
+static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
+{
+ u32 TempVal;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ TempVal = 0;
+ if (!bInCH14) {
+ TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] +
+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
+
+ rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ TempVal = 0;
+ TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+
+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
+ rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ TempVal = 0;
+ TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
+
+ rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ } else {
+ TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] +
+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
+
+ rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ TempVal = 0;
+ TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+
+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
+ rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ TempVal = 0;
+ TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
+
+ rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ }
+
+
+}
+
+static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14)
+{
+ u32 TempVal;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ TempVal = 0;
+ if (!bInCH14) {
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ;
+ rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
+ rCCK0_TxFilter1, TempVal);
+ TempVal = 0;
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24);
+ rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
+ rCCK0_TxFilter2, TempVal);
+ TempVal = 0;
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
+
+ rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
+ rCCK0_DebugPort, TempVal);
+ } else {
+ TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
+ (CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ;
+
+ rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
+ rCCK0_TxFilter1, TempVal);
+ TempVal = 0;
+ TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
+ (CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
+ (CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
+ (CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
+ rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
+ rCCK0_TxFilter2, TempVal);
+ TempVal = 0;
+ TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
+ (CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
+
+ rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
+ rCCK0_DebugPort, TempVal);
+ }
+ }
+
+void dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if (priv->IC_Cut >= IC_VersionCut_D)
+ dm_CCKTxPowerAdjust_TSSI(dev, binch14);
+ else
+ dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14);
+}
+
+static void dm_txpower_reset_recovery(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
+ rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
+ priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",
+ priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n",
+ priv->rfa_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n",
+ priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n",
+ priv->CCKPresentAttentuation);
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+
+ rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
+ priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",
+ priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n",
+ priv->rfc_txpowertrackingindex);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n",
+ priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain);
+
+}
+
+void dm_restore_dynamic_mechanism_state(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 reg_ratr = priv->rate_adaptive.last_ratr;
+ u32 ratr_value;
+
+ if (IS_NIC_DOWN(priv)) {
+ RT_TRACE(COMP_RATE, "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
+ return;
+ }
+
+ if (priv->rate_adaptive.rate_adaptive_disabled)
+ return;
+ if (!(priv->rtllib->mode == WIRELESS_MODE_N_24G ||
+ priv->rtllib->mode == WIRELESS_MODE_N_5G))
+ return;
+ ratr_value = reg_ratr;
+ if (priv->rf_type == RF_1T2R)
+ ratr_value &= ~(RATE_ALL_OFDM_2SS);
+ write_nic_dword(dev, RATR0, ratr_value);
+ write_nic_byte(dev, UFWP, 1);
+ if (priv->btxpower_trackingInit && priv->btxpower_tracking)
+ dm_txpower_reset_recovery(dev);
+
+ dm_bb_initialgain_restore(dev);
+
+}
+
+static void dm_bb_initialgain_restore(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 bit_mask = 0x7f;
+
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
+ return;
+
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask, (u32)priv->initgain_backup.xaagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask, (u32)priv->initgain_backup.xbagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask, (u32)priv->initgain_backup.xcagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask, (u32)priv->initgain_backup.xdagccore1);
+ bit_mask = bMaskByte2;
+ rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask, (u32)priv->initgain_backup.cca);
+
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n", priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n", priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n", priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n", priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n", priv->initgain_backup.cca);
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+
+}
+
+
+void dm_backup_dynamic_mechanism_state(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->bswitch_fsync = false;
+ priv->bfsync_processing = false;
+ dm_bb_initialgain_backup(dev);
+
+}
+
+
+static void dm_bb_initialgain_backup(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 bit_mask = bMaskByte0;
+
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
+ return;
+
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask);
+ priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask);
+ priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask);
+ priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask);
+ bit_mask = bMaskByte2;
+ priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask);
+
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n", priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n", priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n", priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n", priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n", priv->initgain_backup.cca);
+
+}
+
+void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+ u32 dm_type, u32 dm_value)
+{
+ if (dm_type == DIG_TYPE_THRESH_HIGH) {
+ dm_digtable.rssi_high_thresh = dm_value;
+ } else if (dm_type == DIG_TYPE_THRESH_LOW) {
+ dm_digtable.rssi_low_thresh = dm_value;
+ } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
+ dm_digtable.rssi_high_power_highthresh = dm_value;
+ } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
+ dm_digtable.rssi_high_power_highthresh = dm_value;
+ } else if (dm_type == DIG_TYPE_ENABLE) {
+ dm_digtable.dig_state = DM_STA_DIG_MAX;
+ dm_digtable.dig_enable_flag = true;
+ } else if (dm_type == DIG_TYPE_DISABLE) {
+ dm_digtable.dig_state = DM_STA_DIG_MAX;
+ dm_digtable.dig_enable_flag = false;
+ } else if (dm_type == DIG_TYPE_DBG_MODE) {
+ if (dm_value >= DM_DBG_MAX)
+ dm_value = DM_DBG_OFF;
+ dm_digtable.dbg_mode = (u8)dm_value;
+ } else if (dm_type == DIG_TYPE_RSSI) {
+ if (dm_value > 100)
+ dm_value = 30;
+ dm_digtable.rssi_val = (long)dm_value;
+ } else if (dm_type == DIG_TYPE_ALGORITHM) {
+ if (dm_value >= DIG_ALGO_MAX)
+ dm_value = DIG_ALGO_BY_FALSE_ALARM;
+ if (dm_digtable.dig_algorithm != (u8)dm_value)
+ dm_digtable.dig_algorithm_switch = 1;
+ dm_digtable.dig_algorithm = (u8)dm_value;
+ } else if (dm_type == DIG_TYPE_BACKOFF) {
+ if (dm_value > 30)
+ dm_value = 30;
+ dm_digtable.backoff_val = (u8)dm_value;
+ } else if (dm_type == DIG_TYPE_RX_GAIN_MIN) {
+ if (dm_value == 0)
+ dm_value = 0x1;
+ dm_digtable.rx_gain_range_min = (u8)dm_value;
+ } else if (dm_type == DIG_TYPE_RX_GAIN_MAX) {
+ if (dm_value > 0x50)
+ dm_value = 0x50;
+ dm_digtable.rx_gain_range_max = (u8)dm_value;
+ }
+}
+
+static void dm_dig_init(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ dm_digtable.dig_enable_flag = true;
+ dm_digtable.Backoff_Enable_Flag = true;
+
+ dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
+
+ dm_digtable.Dig_TwoPort_Algorithm = DIG_TWO_PORT_ALGO_RSSI;
+ dm_digtable.Dig_Ext_Port_Stage = DIG_EXT_PORT_STAGE_MAX;
+ dm_digtable.dbg_mode = DM_DBG_OFF;
+ dm_digtable.dig_algorithm_switch = 0;
+
+ dm_digtable.dig_state = DM_STA_DIG_MAX;
+ dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
+ dm_digtable.CurSTAConnectState = dm_digtable.PreSTAConnectState = DIG_STA_DISCONNECT;
+ dm_digtable.CurAPConnectState = dm_digtable.PreAPConnectState = DIG_AP_DISCONNECT;
+ dm_digtable.initialgain_lowerbound_state = false;
+
+ dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
+ dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
+
+ dm_digtable.FALowThresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable.FAHighThresh = DM_FALSEALARM_THRESH_HIGH;
+
+ dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
+ dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
+
+ dm_digtable.rssi_val = 50;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF;
+ dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+ if (priv->CustomerID == RT_CID_819x_Netcore)
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
+ else
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+
+ dm_digtable.BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable.BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
+}
+
+static void dm_ctrl_initgain_byrssi(struct net_device *dev)
+{
+
+ if (dm_digtable.dig_enable_flag == false)
+ return;
+
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
+ dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev);
+ else if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
+ dm_ctrl_initgain_byrssi_by_driverrssi(dev);
+ else
+ return;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: dm_CtrlInitGainBeforeConnectByRssiAndFalseAlarm()
+ *
+ * Overview: Driver monitor RSSI and False Alarm to change initial gain.
+ Only change initial gain during link in progress.
+ *
+ * Input: IN PADAPTER pAdapter
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 03/04/2009 hpfan Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+
+static void dm_ctrl_initgain_byrssi_by_driverrssi(
+ struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 i;
+ static u8 fw_dig;
+
+ if (dm_digtable.dig_enable_flag == false)
+ return;
+
+ if (dm_digtable.dig_algorithm_switch)
+ fw_dig = 0;
+ if (fw_dig <= 3) {
+ for (i = 0; i < 3; i++)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ fw_dig++;
+ dm_digtable.dig_state = DM_STA_DIG_OFF;
+ }
+
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ dm_digtable.CurSTAConnectState = DIG_STA_CONNECT;
+ else
+ dm_digtable.CurSTAConnectState = DIG_STA_DISCONNECT;
+
+
+ if (dm_digtable.dbg_mode == DM_DBG_OFF)
+ dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
+ dm_initial_gain(dev);
+ dm_pd_th(dev);
+ dm_cs_ratio(dev);
+ if (dm_digtable.dig_algorithm_switch)
+ dm_digtable.dig_algorithm_switch = 0;
+ dm_digtable.PreSTAConnectState = dm_digtable.CurSTAConnectState;
+
+}
+
+static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
+ struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u32 reset_cnt;
+ u8 i;
+
+ if (dm_digtable.dig_enable_flag == false)
+ return;
+
+ if (dm_digtable.dig_algorithm_switch) {
+ dm_digtable.dig_state = DM_STA_DIG_MAX;
+ for (i = 0; i < 3; i++)
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ dm_digtable.dig_algorithm_switch = 0;
+ }
+
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ return;
+
+ if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
+ (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
+ return;
+ if ((priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh)) {
+ if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
+ (priv->reset_count == reset_cnt))
+ return;
+ else
+ reset_cnt = priv->reset_count;
+
+ dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
+ dm_digtable.dig_state = DM_STA_DIG_OFF;
+
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+
+ write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17);
+ write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17);
+ write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17);
+ write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17);
+
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+
+ write_nic_byte(dev, 0xa0a, 0x08);
+
+ return;
+ }
+
+ if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh)) {
+ u8 reset_flag = 0;
+
+ if (dm_digtable.dig_state == DM_STA_DIG_ON &&
+ (priv->reset_count == reset_cnt)) {
+ dm_ctrl_initgain_byrssi_highpwr(dev);
+ return;
+ } else {
+ if (priv->reset_count != reset_cnt)
+ reset_flag = 1;
+
+ reset_cnt = priv->reset_count;
+ }
+
+ dm_digtable.dig_state = DM_STA_DIG_ON;
+
+ if (reset_flag == 1) {
+ write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c);
+ write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c);
+ write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c);
+ write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c);
+ } else {
+ write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20);
+ write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20);
+ write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20);
+ write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20);
+ }
+
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+
+ write_nic_byte(dev, 0xa0a, 0xcd);
+
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ }
+ dm_ctrl_initgain_byrssi_highpwr(dev);
+}
+
+
+static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u32 reset_cnt_highpwr;
+
+ if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) &&
+ (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh))
+ return;
+
+ if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh) {
+ if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
+ (priv->reset_count == reset_cnt_highpwr))
+ return;
+ else
+ dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
+
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
+ } else {
+ if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
+ (priv->reset_count == reset_cnt_highpwr))
+ return;
+ else
+ dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
+
+ if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh &&
+ priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) {
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+ }
+ }
+ reset_cnt_highpwr = priv->reset_count;
+}
+
+static void dm_initial_gain(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 initial_gain = 0;
+ static u8 initialized, force_write;
+ static u32 reset_cnt;
+
+ if (dm_digtable.dig_algorithm_switch) {
+ initialized = 0;
+ reset_cnt = 0;
+ }
+
+ if (rtllib_act_scanning(priv->rtllib, true) == true) {
+ force_write = 1;
+ return;
+ }
+
+ if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) {
+ if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) {
+ if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max)
+ dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max;
+ else if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
+ dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min;
+ else
+ dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val;
+ } else {
+ if (dm_digtable.cur_ig_value == 0)
+ dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
+ else
+ dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
+ }
+ } else {
+ dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
+ dm_digtable.pre_ig_value = 0;
+ }
+
+ if (priv->reset_count != reset_cnt) {
+ force_write = 1;
+ reset_cnt = priv->reset_count;
+ }
+
+ if (dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1))
+ force_write = 1;
+
+ if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
+ || !initialized || force_write) {
+ initial_gain = (u8)dm_digtable.cur_ig_value;
+ write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+ dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
+ initialized = 1;
+ force_write = 0;
+ }
+}
+
+static void dm_pd_th(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u8 initialized, force_write;
+ static u32 reset_cnt;
+
+ if (dm_digtable.dig_algorithm_switch) {
+ initialized = 0;
+ reset_cnt = 0;
+ }
+
+ if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) {
+ if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) {
+ if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh)
+ dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER;
+ else if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
+ dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
+ else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) &&
+ (dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh))
+ dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER;
+ else
+ dm_digtable.curpd_thstate = dm_digtable.prepd_thstate;
+ } else {
+ dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
+ }
+ } else {
+ dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
+ }
+
+ if (priv->reset_count != reset_cnt) {
+ force_write = 1;
+ reset_cnt = priv->reset_count;
+ }
+
+ if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
+ (initialized <= 3) || force_write) {
+ if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+ } else if (dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER) {
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+ } else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
+ if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
+ else
+ write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
+ }
+ dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
+ if (initialized <= 3)
+ initialized++;
+ force_write = 0;
+ }
+}
+
+static void dm_cs_ratio(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u8 initialized, force_write;
+ static u32 reset_cnt;
+
+ if (dm_digtable.dig_algorithm_switch) {
+ initialized = 0;
+ reset_cnt = 0;
+ }
+
+ if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) {
+ if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) {
+ if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
+ dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
+ else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh))
+ dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
+ else
+ dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
+ } else {
+ dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
+ }
+ } else {
+ dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
+ }
+
+ if (priv->reset_count != reset_cnt) {
+ force_write = 1;
+ reset_cnt = priv->reset_count;
+ }
+
+
+ if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
+ !initialized || force_write) {
+ if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
+ write_nic_byte(dev, 0xa0a, 0x08);
+ else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
+ write_nic_byte(dev, 0xa0a, 0xcd);
+ dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
+ initialized = 1;
+ force_write = 0;
+ }
+}
+
+void dm_init_edca_turbo(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->bcurrent_turbo_EDCA = false;
+ priv->rtllib->bis_any_nonbepkts = false;
+ priv->bis_cur_rdlstate = false;
+}
+
+static void dm_check_edca_turbo(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo;
+
+ static unsigned long lastTxOkCnt;
+ static unsigned long lastRxOkCnt;
+ unsigned long curTxOkCnt = 0;
+ unsigned long curRxOkCnt = 0;
+
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
+ goto dm_CheckEdcaTurbo_EXIT;
+ if (priv->rtllib->state != RTLLIB_LINKED)
+ goto dm_CheckEdcaTurbo_EXIT;
+ if (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
+ goto dm_CheckEdcaTurbo_EXIT;
+
+ {
+ u8 *peername[11] = {
+ "unknown", "realtek_90", "realtek_92se", "broadcom",
+ "ralink", "atheros", "cisco", "marvell", "92u_softap",
+ "self_softap"
+ };
+ static int wb_tmp;
+ if (wb_tmp == 0) {
+ printk(KERN_INFO "%s():iot peer is %s, bssid:"
+ " %pM\n", __func__,
+ peername[pHTInfo->IOTPeer],
+ priv->rtllib->current_network.bssid);
+ wb_tmp = 1;
+ }
+ }
+ if (!priv->rtllib->bis_any_nonbepkts) {
+ curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
+ curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
+ if (pHTInfo->IOTAction & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
+ if (curTxOkCnt > 4*curRxOkCnt) {
+ if (priv->bis_cur_rdlstate ||
+ !priv->bcurrent_turbo_EDCA) {
+ write_nic_dword(dev, EDCAPARA_BE,
+ edca_setting_UL[pHTInfo->IOTPeer]);
+ priv->bis_cur_rdlstate = false;
+ }
+ } else {
+ if (!priv->bis_cur_rdlstate ||
+ !priv->bcurrent_turbo_EDCA) {
+ if (priv->rtllib->mode == WIRELESS_MODE_G)
+ write_nic_dword(dev, EDCAPARA_BE,
+ edca_setting_DL_GMode[pHTInfo->IOTPeer]);
+ else
+ write_nic_dword(dev, EDCAPARA_BE,
+ edca_setting_DL[pHTInfo->IOTPeer]);
+ priv->bis_cur_rdlstate = true;
+ }
+ }
+ priv->bcurrent_turbo_EDCA = true;
+ } else {
+ if (curRxOkCnt > 4*curTxOkCnt) {
+ if (!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) {
+ if (priv->rtllib->mode == WIRELESS_MODE_G)
+ write_nic_dword(dev, EDCAPARA_BE,
+ edca_setting_DL_GMode[pHTInfo->IOTPeer]);
+ else
+ write_nic_dword(dev, EDCAPARA_BE,
+ edca_setting_DL[pHTInfo->IOTPeer]);
+ priv->bis_cur_rdlstate = true;
+ }
+ } else {
+ if (priv->bis_cur_rdlstate ||
+ !priv->bcurrent_turbo_EDCA) {
+ write_nic_dword(dev, EDCAPARA_BE,
+ edca_setting_UL[pHTInfo->IOTPeer]);
+ priv->bis_cur_rdlstate = false;
+ }
+
+ }
+
+ priv->bcurrent_turbo_EDCA = true;
+ }
+ } else {
+ if (priv->bcurrent_turbo_EDCA) {
+ u8 tmp = AC0_BE;
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&tmp));
+ priv->bcurrent_turbo_EDCA = false;
+ }
+ }
+
+
+dm_CheckEdcaTurbo_EXIT:
+ priv->rtllib->bis_any_nonbepkts = false;
+ lastTxOkCnt = priv->stats.txbytesunicast;
+ lastRxOkCnt = priv->stats.rxbytesunicast;
+}
+
+static void dm_init_ctstoself(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv((struct net_device *)dev);
+
+ priv->rtllib->bCTSToSelfEnable = true;
+ priv->rtllib->CTSToSelfTH = CTSToSelfTHVal;
+}
+
+static void dm_ctstoself(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv((struct net_device *)dev);
+ struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo;
+ static unsigned long lastTxOkCnt;
+ static unsigned long lastRxOkCnt;
+ unsigned long curTxOkCnt = 0;
+ unsigned long curRxOkCnt = 0;
+
+ if (priv->rtllib->bCTSToSelfEnable != true) {
+ pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
+ return;
+ }
+ if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
+ curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
+ curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
+ if (curRxOkCnt > 4*curTxOkCnt)
+ pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
+ else
+ pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
+
+ lastTxOkCnt = priv->stats.txbytesunicast;
+ lastRxOkCnt = priv->stats.rxbytesunicast;
+ }
+}
+
+
+static void dm_Init_WA_Broadcom_IOT(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv((struct net_device *)dev);
+ struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo;
+
+ pHTInfo->bWAIotBroadcom = false;
+ pHTInfo->WAIotTH = WAIotTHVal;
+}
+
+static void dm_check_pbc_gpio(struct net_device *dev)
+{
+}
+
+void dm_CheckRfCtrlGPIO(void *data)
+{
+ struct r8192_priv *priv = container_of_dwork_rsl(data,
+ struct r8192_priv, gpio_change_rf_wq);
+ struct net_device *dev = priv->rtllib->dev;
+ u8 tmp1byte;
+ enum rt_rf_power_state eRfPowerStateToSet;
+ bool bActuallySet = false;
+ char *argv[3];
+ static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh";
+ static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL};
+
+ bActuallySet = false;
+
+ if ((priv->up_first_time == 1) || (priv->being_init_adapter))
+ return;
+
+ if (priv->bfirst_after_down) {
+ priv->bfirst_after_down = 1;
+ return;
+ }
+
+ tmp1byte = read_nic_byte(dev, GPI);
+
+ eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
+
+ if ((priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn)) {
+ RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
+ printk(KERN_INFO "gpiochangeRF - HW Radio ON\n");
+ priv->bHwRadioOff = false;
+ bActuallySet = true;
+ } else if ((priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff)) {
+ RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
+ printk(KERN_INFO "gpiochangeRF - HW Radio OFF\n");
+ priv->bHwRadioOff = true;
+ bActuallySet = true;
+ }
+
+ if (bActuallySet) {
+ mdelay(1000);
+ priv->bHwRfOffAction = 1;
+ MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW, true);
+ if (priv->bHwRadioOff == true)
+ argv[1] = "RFOFF";
+ else
+ argv[1] = "RFON";
+
+ argv[0] = RadioPowerPath;
+ argv[2] = NULL;
+ call_usermodehelper(RadioPowerPath, argv, envp, 1);
+ }
+}
+
+void dm_rf_pathcheck_workitemcallback(void *data)
+{
+ struct r8192_priv *priv = container_of_dwork_rsl(data,
+ struct r8192_priv,
+ rfpath_check_wq);
+ struct net_device *dev = priv->rtllib->dev;
+ u8 rfpath = 0, i;
+
+ rfpath = read_nic_byte(dev, 0xc04);
+
+ for (i = 0; i < RF90_PATH_MAX; i++) {
+ if (rfpath & (0x01<<i))
+ priv->brfpath_rxenable[i] = 1;
+ else
+ priv->brfpath_rxenable[i] = 0;
+ }
+ if (!DM_RxPathSelTable.Enable)
+ return;
+
+ dm_rxpath_sel_byrssi(dev);
+}
+
+static void dm_init_rxpath_selection(struct net_device *dev)
+{
+ u8 i;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ DM_RxPathSelTable.Enable = 1;
+ DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low;
+ DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH;
+ if (priv->CustomerID == RT_CID_819x_Netcore)
+ DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
+ else
+ DM_RxPathSelTable.cck_method = CCK_Rx_Version_1;
+ DM_RxPathSelTable.DbgMode = DM_DBG_OFF;
+ DM_RxPathSelTable.disabledRF = 0;
+ for (i = 0; i < 4; i++) {
+ DM_RxPathSelTable.rf_rssi[i] = 50;
+ DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
+ DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
+ }
+}
+
+#define PWDB_IN_RANGE ((cur_cck_pwdb < tmp_cck_max_pwdb) && \
+ (cur_cck_pwdb > tmp_cck_sec_pwdb))
+
+static void dm_rxpath_sel_byrssi(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 i, max_rssi_index = 0, min_rssi_index = 0;
+ u8 sec_rssi_index = 0, rf_num = 0;
+ u8 tmp_max_rssi = 0, tmp_min_rssi = 0, tmp_sec_rssi = 0;
+ u8 cck_default_Rx = 0x2;
+ u8 cck_optional_Rx = 0x3;
+ long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0;
+ u8 cck_rx_ver2_max_index = 0, cck_rx_ver2_min_index = 0;
+ u8 cck_rx_ver2_sec_index = 0;
+ u8 cur_rf_rssi;
+ long cur_cck_pwdb;
+ static u8 disabled_rf_cnt, cck_Rx_Path_initialized;
+ u8 update_cck_rx_path;
+
+ if (priv->rf_type != RF_2T4R)
+ return;
+
+ if (!cck_Rx_Path_initialized) {
+ DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf);
+ cck_Rx_Path_initialized = 1;
+ }
+
+ DM_RxPathSelTable.disabledRF = 0xf;
+ DM_RxPathSelTable.disabledRF &= ~(read_nic_byte(dev, 0xc04));
+
+ if (priv->rtllib->mode == WIRELESS_MODE_B)
+ DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
+
+ for (i = 0; i < RF90_PATH_MAX; i++) {
+ if (!DM_RxPathSelTable.DbgMode)
+ DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
+
+ if (priv->brfpath_rxenable[i]) {
+ rf_num++;
+ cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i];
+
+ if (rf_num == 1) {
+ max_rssi_index = min_rssi_index = sec_rssi_index = i;
+ tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
+ } else if (rf_num == 2) {
+ if (cur_rf_rssi >= tmp_max_rssi) {
+ tmp_max_rssi = cur_rf_rssi;
+ max_rssi_index = i;
+ } else {
+ tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
+ sec_rssi_index = min_rssi_index = i;
+ }
+ } else {
+ if (cur_rf_rssi > tmp_max_rssi) {
+ tmp_sec_rssi = tmp_max_rssi;
+ sec_rssi_index = max_rssi_index;
+ tmp_max_rssi = cur_rf_rssi;
+ max_rssi_index = i;
+ } else if (cur_rf_rssi == tmp_max_rssi) {
+ tmp_sec_rssi = cur_rf_rssi;
+ sec_rssi_index = i;
+ } else if ((cur_rf_rssi < tmp_max_rssi) &&
+ (cur_rf_rssi > tmp_sec_rssi)) {
+ tmp_sec_rssi = cur_rf_rssi;
+ sec_rssi_index = i;
+ } else if (cur_rf_rssi == tmp_sec_rssi) {
+ if (tmp_sec_rssi == tmp_min_rssi) {
+ tmp_sec_rssi = cur_rf_rssi;
+ sec_rssi_index = i;
+ }
+ } else if ((cur_rf_rssi < tmp_sec_rssi) &&
+ (cur_rf_rssi > tmp_min_rssi)) {
+ ;
+ } else if (cur_rf_rssi == tmp_min_rssi) {
+ if (tmp_sec_rssi == tmp_min_rssi) {
+ tmp_min_rssi = cur_rf_rssi;
+ min_rssi_index = i;
+ }
+ } else if (cur_rf_rssi < tmp_min_rssi) {
+ tmp_min_rssi = cur_rf_rssi;
+ min_rssi_index = i;
+ }
+ }
+ }
+ }
+
+ rf_num = 0;
+ if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ for (i = 0; i < RF90_PATH_MAX; i++) {
+ if (priv->brfpath_rxenable[i]) {
+ rf_num++;
+ cur_cck_pwdb =
+ DM_RxPathSelTable.cck_pwdb_sta[i];
+
+ if (rf_num == 1) {
+ cck_rx_ver2_max_index = i;
+ cck_rx_ver2_min_index = i;
+ cck_rx_ver2_sec_index = i;
+ tmp_cck_max_pwdb = cur_cck_pwdb;
+ tmp_cck_min_pwdb = cur_cck_pwdb;
+ tmp_cck_sec_pwdb = cur_cck_pwdb;
+ } else if (rf_num == 2) {
+ if (cur_cck_pwdb >= tmp_cck_max_pwdb) {
+ tmp_cck_max_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_max_index = i;
+ } else {
+ tmp_cck_sec_pwdb = cur_cck_pwdb;
+ tmp_cck_min_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_sec_index = i;
+ cck_rx_ver2_min_index = i;
+ }
+ } else {
+ if (cur_cck_pwdb > tmp_cck_max_pwdb) {
+ tmp_cck_sec_pwdb =
+ tmp_cck_max_pwdb;
+ cck_rx_ver2_sec_index =
+ cck_rx_ver2_max_index;
+ tmp_cck_max_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_max_index = i;
+ } else if (cur_cck_pwdb ==
+ tmp_cck_max_pwdb) {
+ tmp_cck_sec_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_sec_index = i;
+ } else if (PWDB_IN_RANGE) {
+ tmp_cck_sec_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_sec_index = i;
+ } else if (cur_cck_pwdb ==
+ tmp_cck_sec_pwdb) {
+ if (tmp_cck_sec_pwdb ==
+ tmp_cck_min_pwdb) {
+ tmp_cck_sec_pwdb =
+ cur_cck_pwdb;
+ cck_rx_ver2_sec_index =
+ i;
+ }
+ } else if ((cur_cck_pwdb < tmp_cck_sec_pwdb) &&
+ (cur_cck_pwdb > tmp_cck_min_pwdb)) {
+ ;
+ } else if (cur_cck_pwdb == tmp_cck_min_pwdb) {
+ if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
+ tmp_cck_min_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_min_index = i;
+ }
+ } else if (cur_cck_pwdb < tmp_cck_min_pwdb) {
+ tmp_cck_min_pwdb = cur_cck_pwdb;
+ cck_rx_ver2_min_index = i;
+ }
+ }
+
+ }
+ }
+ }
+
+ update_cck_rx_path = 0;
+ if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ cck_default_Rx = cck_rx_ver2_max_index;
+ cck_optional_Rx = cck_rx_ver2_sec_index;
+ if (tmp_cck_max_pwdb != -64)
+ update_cck_rx_path = 1;
+ }
+
+ if (tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2) {
+ if ((tmp_max_rssi - tmp_min_rssi) >=
+ DM_RxPathSelTable.diff_TH) {
+ DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] =
+ tmp_max_rssi+5;
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable,
+ 0x1<<min_rssi_index, 0x0);
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable,
+ 0x1<<min_rssi_index, 0x0);
+ disabled_rf_cnt++;
+ }
+ if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) {
+ cck_default_Rx = max_rssi_index;
+ cck_optional_Rx = sec_rssi_index;
+ if (tmp_max_rssi)
+ update_cck_rx_path = 1;
+ }
+ }
+
+ if (update_cck_rx_path) {
+ DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2) |
+ (cck_optional_Rx);
+ rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000,
+ DM_RxPathSelTable.cck_Rx_path);
+ }
+
+ if (DM_RxPathSelTable.disabledRF) {
+ for (i = 0; i < 4; i++) {
+ if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) {
+ if (tmp_max_rssi >=
+ DM_RxPathSelTable.rf_enable_rssi_th[i]) {
+ rtl8192_setBBreg(dev,
+ rOFDM0_TRxPathEnable, 0x1 << i,
+ 0x1);
+ rtl8192_setBBreg(dev,
+ rOFDM1_TRxPathEnable,
+ 0x1 << i, 0x1);
+ DM_RxPathSelTable.rf_enable_rssi_th[i]
+ = 100;
+ disabled_rf_cnt--;
+ }
+ }
+ }
+ }
+}
+
+static void dm_check_rx_path_selection(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ queue_delayed_work_rsl(priv->priv_wq, &priv->rfpath_check_wq, 0);
+}
+
+
+static void dm_init_fsync(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->rtllib->fsync_time_interval = 500;
+ priv->rtllib->fsync_rate_bitmap = 0x0f000800;
+ priv->rtllib->fsync_rssi_threshold = 30;
+ priv->rtllib->bfsync_enable = false;
+ priv->rtllib->fsync_multiple_timeinterval = 3;
+ priv->rtllib->fsync_firstdiff_ratethreshold = 100;
+ priv->rtllib->fsync_seconddiff_ratethreshold = 200;
+ priv->rtllib->fsync_state = Default_Fsync;
+ priv->framesyncMonitor = 1;
+
+ init_timer(&priv->fsync_timer);
+ setup_timer(&priv->fsync_timer, dm_fsync_timer_callback,
+ (unsigned long) dev);
+}
+
+
+static void dm_deInit_fsync(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ del_timer_sync(&priv->fsync_timer);
+}
+
+void dm_fsync_timer_callback(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ u32 rate_index, rate_count = 0, rate_count_diff = 0;
+ bool bSwitchFromCountDiff = false;
+ bool bDoubleTimeInterval = false;
+
+ if (priv->rtllib->state == RTLLIB_LINKED &&
+ priv->rtllib->bfsync_enable &&
+ (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
+ u32 rate_bitmap;
+ for (rate_index = 0; rate_index <= 27; rate_index++) {
+ rate_bitmap = 1 << rate_index;
+ if (priv->rtllib->fsync_rate_bitmap & rate_bitmap)
+ rate_count +=
+ priv->stats.received_rate_histogram[1]
+ [rate_index];
+ }
+
+ if (rate_count < priv->rate_record)
+ rate_count_diff = 0xffffffff - rate_count +
+ priv->rate_record;
+ else
+ rate_count_diff = rate_count - priv->rate_record;
+ if (rate_count_diff < priv->rateCountDiffRecord) {
+
+ u32 DiffNum = priv->rateCountDiffRecord -
+ rate_count_diff;
+ if (DiffNum >=
+ priv->rtllib->fsync_seconddiff_ratethreshold)
+ priv->ContiuneDiffCount++;
+ else
+ priv->ContiuneDiffCount = 0;
+
+ if (priv->ContiuneDiffCount >= 2) {
+ bSwitchFromCountDiff = true;
+ priv->ContiuneDiffCount = 0;
+ }
+ } else {
+ priv->ContiuneDiffCount = 0;
+ }
+
+ if (rate_count_diff <=
+ priv->rtllib->fsync_firstdiff_ratethreshold) {
+ bSwitchFromCountDiff = true;
+ priv->ContiuneDiffCount = 0;
+ }
+ priv->rate_record = rate_count;
+ priv->rateCountDiffRecord = rate_count_diff;
+ RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rate"
+ "Countdiff %d bSwitchFsync %d\n", priv->rate_record,
+ rate_count, rate_count_diff, priv->bswitch_fsync);
+ if (priv->undecorated_smoothed_pwdb >
+ priv->rtllib->fsync_rssi_threshold &&
+ bSwitchFromCountDiff) {
+ bDoubleTimeInterval = true;
+ priv->bswitch_fsync = !priv->bswitch_fsync;
+ if (priv->bswitch_fsync) {
+ write_nic_byte(dev, 0xC36, 0x1c);
+ write_nic_byte(dev, 0xC3e, 0x90);
+ } else {
+ write_nic_byte(dev, 0xC36, 0x5c);
+ write_nic_byte(dev, 0xC3e, 0x96);
+ }
+ } else if (priv->undecorated_smoothed_pwdb <=
+ priv->rtllib->fsync_rssi_threshold) {
+ if (priv->bswitch_fsync) {
+ priv->bswitch_fsync = false;
+ write_nic_byte(dev, 0xC36, 0x5c);
+ write_nic_byte(dev, 0xC3e, 0x96);
+ }
+ }
+ if (bDoubleTimeInterval) {
+ if (timer_pending(&priv->fsync_timer))
+ del_timer_sync(&priv->fsync_timer);
+ priv->fsync_timer.expires = jiffies +
+ MSECS(priv->rtllib->fsync_time_interval *
+ priv->rtllib->fsync_multiple_timeinterval);
+ add_timer(&priv->fsync_timer);
+ } else {
+ if (timer_pending(&priv->fsync_timer))
+ del_timer_sync(&priv->fsync_timer);
+ priv->fsync_timer.expires = jiffies +
+ MSECS(priv->rtllib->fsync_time_interval);
+ add_timer(&priv->fsync_timer);
+ }
+ } else {
+ if (priv->bswitch_fsync) {
+ priv->bswitch_fsync = false;
+ write_nic_byte(dev, 0xC36, 0x5c);
+ write_nic_byte(dev, 0xC3e, 0x96);
+ }
+ priv->ContiuneDiffCount = 0;
+ write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+ }
+ RT_TRACE(COMP_HALDM, "ContiuneDiffCount %d\n", priv->ContiuneDiffCount);
+ RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d "
+ "bSwitchFsync %d\n", priv->rate_record, rate_count,
+ rate_count_diff, priv->bswitch_fsync);
+}
+
+static void dm_StartHWFsync(struct net_device *dev)
+{
+ u8 rf_timing = 0x77;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ RT_TRACE(COMP_HALDM, "%s\n", __func__);
+ write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf);
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
+ (u8 *)(&rf_timing));
+ write_nic_byte(dev, 0xc3b, 0x41);
+}
+
+static void dm_EndHWFsync(struct net_device *dev)
+{
+ u8 rf_timing = 0xaa;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ RT_TRACE(COMP_HALDM, "%s\n", __func__);
+ write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
+ (&rf_timing));
+ write_nic_byte(dev, 0xc3b, 0x49);
+}
+
+static void dm_EndSWFsync(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ RT_TRACE(COMP_HALDM, "%s\n", __func__);
+ del_timer_sync(&(priv->fsync_timer));
+
+ if (priv->bswitch_fsync) {
+ priv->bswitch_fsync = false;
+
+ write_nic_byte(dev, 0xC36, 0x5c);
+
+ write_nic_byte(dev, 0xC3e, 0x96);
+ }
+
+ priv->ContiuneDiffCount = 0;
+ write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+}
+
+static void dm_StartSWFsync(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 rateIndex;
+ u32 rateBitmap;
+
+ RT_TRACE(COMP_HALDM, "%s\n", __func__);
+ priv->rate_record = 0;
+ priv->ContiuneDiffCount = 0;
+ priv->rateCountDiffRecord = 0;
+ priv->bswitch_fsync = false;
+
+ if (priv->rtllib->mode == WIRELESS_MODE_N_24G) {
+ priv->rtllib->fsync_firstdiff_ratethreshold = 600;
+ priv->rtllib->fsync_seconddiff_ratethreshold = 0xffff;
+ } else {
+ priv->rtllib->fsync_firstdiff_ratethreshold = 200;
+ priv->rtllib->fsync_seconddiff_ratethreshold = 200;
+ }
+ for (rateIndex = 0; rateIndex <= 27; rateIndex++) {
+ rateBitmap = 1 << rateIndex;
+ if (priv->rtllib->fsync_rate_bitmap & rateBitmap)
+ priv->rate_record +=
+ priv->stats.received_rate_histogram[1]
+ [rateIndex];
+ }
+ if (timer_pending(&priv->fsync_timer))
+ del_timer_sync(&priv->fsync_timer);
+ priv->fsync_timer.expires = jiffies +
+ MSECS(priv->rtllib->fsync_time_interval);
+ add_timer(&priv->fsync_timer);
+
+ write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
+
+}
+
+void dm_check_fsync(struct net_device *dev)
+{
+#define RegC38_Default 0
+#define RegC38_NonFsync_Other_AP 1
+#define RegC38_Fsync_AP_BCM 2
+ struct r8192_priv *priv = rtllib_priv(dev);
+ static u8 reg_c38_State = RegC38_Default;
+ static u32 reset_cnt;
+
+ RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval "
+ "%d\n", priv->rtllib->fsync_rssi_threshold,
+ priv->rtllib->fsync_time_interval,
+ priv->rtllib->fsync_multiple_timeinterval);
+ RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d Second"
+ "DiffRateThreshold %d\n", priv->rtllib->fsync_rate_bitmap,
+ priv->rtllib->fsync_firstdiff_ratethreshold,
+ priv->rtllib->fsync_seconddiff_ratethreshold);
+
+ if (priv->rtllib->state == RTLLIB_LINKED &&
+ priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
+ if (priv->rtllib->bfsync_enable == 0) {
+ switch (priv->rtllib->fsync_state) {
+ case Default_Fsync:
+ dm_StartHWFsync(dev);
+ priv->rtllib->fsync_state = HW_Fsync;
+ break;
+ case SW_Fsync:
+ dm_EndSWFsync(dev);
+ dm_StartHWFsync(dev);
+ priv->rtllib->fsync_state = HW_Fsync;
+ break;
+ case HW_Fsync:
+ default:
+ break;
+ }
+ } else {
+ switch (priv->rtllib->fsync_state) {
+ case Default_Fsync:
+ dm_StartSWFsync(dev);
+ priv->rtllib->fsync_state = SW_Fsync;
+ break;
+ case HW_Fsync:
+ dm_EndHWFsync(dev);
+ dm_StartSWFsync(dev);
+ priv->rtllib->fsync_state = SW_Fsync;
+ break;
+ case SW_Fsync:
+ default:
+ break;
+
+ }
+ }
+ if (priv->framesyncMonitor) {
+ if (reg_c38_State != RegC38_Fsync_AP_BCM) {
+ write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
+
+ reg_c38_State = RegC38_Fsync_AP_BCM;
+ }
+ }
+ } else {
+ switch (priv->rtllib->fsync_state) {
+ case HW_Fsync:
+ dm_EndHWFsync(dev);
+ priv->rtllib->fsync_state = Default_Fsync;
+ break;
+ case SW_Fsync:
+ dm_EndSWFsync(dev);
+ priv->rtllib->fsync_state = Default_Fsync;
+ break;
+ case Default_Fsync:
+ default:
+ break;
+ }
+
+ if (priv->framesyncMonitor) {
+ if (priv->rtllib->state == RTLLIB_LINKED) {
+ if (priv->undecorated_smoothed_pwdb <=
+ RegC38_TH) {
+ if (reg_c38_State !=
+ RegC38_NonFsync_Other_AP) {
+ write_nic_byte(dev,
+ rOFDM0_RxDetector3,
+ 0x90);
+
+ reg_c38_State =
+ RegC38_NonFsync_Other_AP;
+ }
+ } else if (priv->undecorated_smoothed_pwdb >=
+ (RegC38_TH+5)) {
+ if (reg_c38_State) {
+ write_nic_byte(dev,
+ rOFDM0_RxDetector3,
+ priv->framesync);
+ reg_c38_State = RegC38_Default;
+ }
+ }
+ } else {
+ if (reg_c38_State) {
+ write_nic_byte(dev, rOFDM0_RxDetector3,
+ priv->framesync);
+ reg_c38_State = RegC38_Default;
+ }
+ }
+ }
+ }
+ if (priv->framesyncMonitor) {
+ if (priv->reset_count != reset_cnt) {
+ write_nic_byte(dev, rOFDM0_RxDetector3,
+ priv->framesync);
+ reg_c38_State = RegC38_Default;
+ reset_cnt = priv->reset_count;
+ }
+ } else {
+ if (reg_c38_State) {
+ write_nic_byte(dev, rOFDM0_RxDetector3,
+ priv->framesync);
+ reg_c38_State = RegC38_Default;
+ }
+ }
+}
+
+void dm_shadow_init(struct net_device *dev)
+{
+ u8 page;
+ u16 offset;
+
+ for (page = 0; page < 5; page++)
+ for (offset = 0; offset < 256; offset++)
+ dm_shadow[page][offset] = read_nic_byte(dev,
+ offset+page * 256);
+
+ for (page = 8; page < 11; page++)
+ for (offset = 0; offset < 256; offset++)
+ dm_shadow[page][offset] = read_nic_byte(dev,
+ offset+page * 256);
+
+ for (page = 12; page < 15; page++)
+ for (offset = 0; offset < 256; offset++)
+ dm_shadow[page][offset] = read_nic_byte(dev,
+ offset+page*256);
+
+}
+
+/*---------------------------Define function prototype------------------------*/
+static void dm_init_dynamic_txpower(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ priv->rtllib->bdynamic_txpower_enable = true;
+ priv->bLastDTPFlag_High = false;
+ priv->bLastDTPFlag_Low = false;
+ priv->bDynamicTxHighPower = false;
+ priv->bDynamicTxLowPower = false;
+}
+
+static void dm_dynamic_txpower(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ unsigned int txhipower_threshhold = 0;
+ unsigned int txlowpower_threshold = 0;
+ if (priv->rtllib->bdynamic_txpower_enable != true) {
+ priv->bDynamicTxHighPower = false;
+ priv->bDynamicTxLowPower = false;
+ return;
+ }
+ if ((priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) &&
+ (priv->rtllib->mode == IEEE_G)) {
+ txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
+ txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
+ } else {
+ txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
+ txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
+ }
+
+ RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n",
+ priv->undecorated_smoothed_pwdb);
+
+ if (priv->rtllib->state == RTLLIB_LINKED) {
+ if (priv->undecorated_smoothed_pwdb >= txhipower_threshhold) {
+ priv->bDynamicTxHighPower = true;
+ priv->bDynamicTxLowPower = false;
+ } else {
+ if (priv->undecorated_smoothed_pwdb <
+ txlowpower_threshold &&
+ priv->bDynamicTxHighPower == true)
+ priv->bDynamicTxHighPower = false;
+ if (priv->undecorated_smoothed_pwdb < 35)
+ priv->bDynamicTxLowPower = true;
+ else if (priv->undecorated_smoothed_pwdb >= 40)
+ priv->bDynamicTxLowPower = false;
+ }
+ } else {
+ priv->bDynamicTxHighPower = false;
+ priv->bDynamicTxLowPower = false;
+ }
+
+ if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
+ (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) {
+ RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n",
+ priv->rtllib->current_network.channel);
+
+ rtl8192_phy_setTxPower(dev,
+ priv->rtllib->current_network.channel);
+ }
+ priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
+ priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
+
+}
+
+static void dm_check_txrateandretrycount(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev,
+ Current_Tx_Rate_Reg);
+
+ ieee->softmac_stats.last_packet_rate = read_nic_byte(dev,
+ Initial_Tx_Rate_Reg);
+
+ ieee->softmac_stats.txretrycount = read_nic_dword(dev,
+ Tx_Retry_Count_Reg);
+}
+
+static void dm_send_rssi_tofw(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
+}
diff --git a/drivers/staging/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl_dm.h
new file mode 100644
index 00000000000..ab44a9a6927
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_dm.h
@@ -0,0 +1,324 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef __R8192UDM_H__
+#define __R8192UDM_H__
+
+
+/*--------------------------Define Parameters-------------------------------*/
+#define OFDM_Table_Length 19
+#define CCK_Table_length 12
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 40
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
+#define DM_DIG_HIGH_PWR_THRESH_LOW 70
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_check_fsync_time_interval 500
+
+
+#define DM_DIG_BACKOFF 12
+#define DM_DIG_MAX 0x36
+#define DM_DIG_MIN 0x1c
+#define DM_DIG_MIN_Netcore 0x12
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+
+#define RxPathSelection_SS_TH_low 30
+#define RxPathSelection_diff_TH 18
+
+#define RateAdaptiveTH_High 50
+#define RateAdaptiveTH_Low_20M 30
+#define RateAdaptiveTH_Low_40M 10
+#define VeryLowRSSI 15
+
+#define CTSToSelfTHVal 35
+
+#define WAIotTHVal 25
+
+#define E_FOR_TX_POWER_TRACK 300
+#define TX_POWER_NEAR_FIELD_THRESH_HIGH 68
+#define TX_POWER_NEAR_FIELD_THRESH_LOW 62
+#define TX_POWER_ATHEROAP_THRESH_HIGH 78
+#define TX_POWER_ATHEROAP_THRESH_LOW 72
+
+#define Current_Tx_Rate_Reg 0x1e0
+#define Initial_Tx_Rate_Reg 0x1e1
+#define Tx_Retry_Count_Reg 0x1ac
+#define RegC38_TH 20
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+
+#define TxHighPwrLevel_Normal 0
+#define TxHighPwrLevel_Level1 1
+#define TxHighPwrLevel_Level2 2
+
+#define DM_Type_ByFW 0
+#define DM_Type_ByDriver 1
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+/*------------------------------Define structure----------------------------*/
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_algorithm;
+ u8 Dig_TwoPort_Algorithm;
+ u8 Dig_Ext_Port_Stage;
+ u8 dbg_mode;
+ u8 dig_algorithm_switch;
+
+ long rssi_low_thresh;
+ long rssi_high_thresh;
+
+ u32 FALowThresh;
+ u32 FAHighThresh;
+
+ long rssi_high_power_lowthresh;
+ long rssi_high_power_highthresh;
+
+ u8 dig_state;
+ u8 dig_highpwr_state;
+ u8 CurSTAConnectState;
+ u8 PreSTAConnectState;
+ u8 CurAPConnectState;
+ u8 PreAPConnectState;
+
+ u8 curpd_thstate;
+ u8 prepd_thstate;
+ u8 curcs_ratio_state;
+ u8 precs_ratio_state;
+
+ u32 pre_ig_value;
+ u32 cur_ig_value;
+
+ u8 Backoff_Enable_Flag;
+ u8 backoff_val;
+ char BackoffVal_range_max;
+ char BackoffVal_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ bool initialgain_lowerbound_state;
+
+ long rssi_val;
+};
+
+enum dm_dig_sta {
+ DM_STA_DIG_OFF = 0,
+ DM_STA_DIG_ON,
+ DM_STA_DIG_MAX
+};
+
+
+enum dm_ratr_sta {
+ DM_RATR_STA_HIGH = 0,
+ DM_RATR_STA_MIDDLE = 1,
+ DM_RATR_STA_LOW = 2,
+ DM_RATR_STA_MAX
+};
+
+enum dm_dig_op_sta {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_THRESH_HIGHPWR_HIGH = 2,
+ DIG_TYPE_THRESH_HIGHPWR_LOW = 3,
+ DIG_TYPE_DBG_MODE = 4,
+ DIG_TYPE_RSSI = 5,
+ DIG_TYPE_ALGORITHM = 6,
+ DIG_TYPE_BACKOFF = 7,
+ DIG_TYPE_PWDB_FACTOR = 8,
+ DIG_TYPE_RX_GAIN_MIN = 9,
+ DIG_TYPE_RX_GAIN_MAX = 10,
+ DIG_TYPE_ENABLE = 20,
+ DIG_TYPE_DISABLE = 30,
+ DIG_OP_TYPE_MAX
+};
+
+enum dm_dig_alg {
+ DIG_ALGO_BY_FALSE_ALARM = 0,
+ DIG_ALGO_BY_RSSI = 1,
+ DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM = 2,
+ DIG_ALGO_BY_TOW_PORT = 3,
+ DIG_ALGO_MAX
+};
+
+enum dm_dig_two_port_alg {
+ DIG_TWO_PORT_ALGO_RSSI = 0,
+ DIG_TWO_PORT_ALGO_FALSE_ALARM = 1,
+};
+
+
+enum dm_dig_ext_port_alg {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_dbg {
+ DIG_DBG_OFF = 0,
+ DIG_DBG_ON = 1,
+ DIG_DBG_MAX
+};
+
+enum dm_dig_connect {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_AP_DISCONNECT = 3,
+ DIG_AP_CONNECT = 4,
+ DIG_AP_ADD_STATION = 5,
+ DIG_CONNECT_MAX
+};
+
+enum dm_dig_pd_th {
+ DIG_PD_AT_LOW_POWER = 0,
+ DIG_PD_AT_NORMAL_POWER = 1,
+ DIG_PD_AT_HIGH_POWER = 2,
+ DIG_PD_MAX
+};
+
+enum dm_dig_cs_ratio {
+ DIG_CS_RATIO_LOWER = 0,
+ DIG_CS_RATIO_HIGHER = 1,
+ DIG_CS_MAX
+};
+
+struct drx_path_sel {
+ u8 Enable;
+ u8 DbgMode;
+ u8 cck_method;
+ u8 cck_Rx_path;
+
+ u8 SS_TH_low;
+ u8 diff_TH;
+ u8 disabledRF;
+ u8 reserved;
+
+ u8 rf_rssi[4];
+ u8 rf_enable_rssi_th[4];
+ long cck_pwdb_sta[4];
+};
+
+enum dm_cck_rx_path_method {
+ CCK_Rx_Version_1 = 0,
+ CCK_Rx_Version_2 = 1,
+ CCK_Rx_Version_MAX
+};
+
+
+enum dm_dbg {
+ DM_DBG_OFF = 0,
+ DM_DBG_ON = 1,
+ DM_DBG_MAX
+};
+
+struct dcmd_txcmd {
+ u32 Op;
+ u32 Length;
+ u32 Value;
+};
+/*------------------------------Define structure----------------------------*/
+
+
+/*------------------------Export global variable----------------------------*/
+extern struct dig_t dm_digtable;
+extern u8 dm_shadow[16][256];
+extern struct drx_path_sel DM_RxPathSelTable;
+
+extern u8 test_flag;
+/*------------------------Export global variable----------------------------*/
+
+
+/*------------------------Export Marco Definition---------------------------*/
+#define DM_APInitGainChangeNotify(Event) \
+ { \
+ dm_digtable.CurAPConnectState = Event; \
+ }
+/*------------------------Export Marco Definition---------------------------*/
+
+
+/*--------------------------Exported Function prototype---------------------*/
+/*--------------------------Exported Function prototype---------------------*/
+extern void init_hal_dm(struct net_device *dev);
+extern void deinit_hal_dm(struct net_device *dev);
+
+extern void hal_dm_watchdog(struct net_device *dev);
+
+
+extern void init_rate_adaptive(struct net_device *dev);
+extern void dm_txpower_trackingcallback(void *data);
+
+extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+
+extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+ u32 dm_type,
+ u32 dm_value);
+extern void DM_ChangeFsyncSetting(struct net_device *dev,
+ s32 DM_Type,
+ s32 DM_Value);
+extern void dm_force_tx_fw_info(struct net_device *dev,
+ u32 force_type,
+ u32 force_value);
+extern void dm_init_edca_turbo(struct net_device *dev);
+extern void dm_rf_operation_test_callback(unsigned long data);
+extern void dm_rf_pathcheck_workitemcallback(void *data);
+extern void dm_fsync_timer_callback(unsigned long data);
+extern void dm_check_fsync(struct net_device *dev);
+extern void dm_shadow_init(struct net_device *dev);
+extern void dm_initialize_txpower_tracking(struct net_device *dev);
+extern void dm_CheckRfCtrlGPIO(void *data);
+extern void dm_InitRateAdaptiveMask(struct net_device *dev);
+extern void init_hal_dm(struct net_device *dev);
+extern void deinit_hal_dm(struct net_device *dev);
+extern void hal_dm_watchdog(struct net_device *dev);
+extern void init_rate_adaptive(struct net_device *dev);
+extern void dm_txpower_trackingcallback(void *data);
+extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+ u32 dm_type,
+ u32 dm_value);
+extern void DM_ChangeFsyncSetting(struct net_device *dev,
+ s32 DM_Type,
+ s32 DM_Value);
+extern void dm_force_tx_fw_info(struct net_device *dev,
+ u32 force_type,
+ u32 force_value);
+extern void dm_init_edca_turbo(struct net_device *dev);
+extern void dm_rf_operation_test_callback(unsigned long data);
+extern void dm_rf_pathcheck_workitemcallback(void *data);
+extern void dm_fsync_timer_callback(unsigned long data);
+extern void dm_check_fsync(struct net_device *dev);
+extern void dm_shadow_init(struct net_device *dev);
+extern void dm_initialize_txpower_tracking(struct net_device *dev);
+extern void dm_CheckRfCtrlGPIO(void *data);
+
+#endif /*__R8192UDM_H__ */
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl_eeprom.c
new file mode 100644
index 00000000000..c1ccff4a832
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_eeprom.c
@@ -0,0 +1,139 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#include "rtl_core.h"
+#include "rtl_eeprom.h"
+
+static void eprom_cs(struct net_device *dev, short bit)
+{
+ if (bit)
+ write_nic_byte(dev, EPROM_CMD,
+ (1 << EPROM_CS_SHIFT) |
+ read_nic_byte(dev, EPROM_CMD));
+ else
+ write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
+ & ~(1<<EPROM_CS_SHIFT));
+
+ udelay(EPROM_DELAY);
+}
+
+
+static void eprom_ck_cycle(struct net_device *dev)
+{
+ write_nic_byte(dev, EPROM_CMD,
+ (1<<EPROM_CK_SHIFT) | read_nic_byte(dev, EPROM_CMD));
+ udelay(EPROM_DELAY);
+ write_nic_byte(dev, EPROM_CMD,
+ read_nic_byte(dev, EPROM_CMD) & ~(1<<EPROM_CK_SHIFT));
+ udelay(EPROM_DELAY);
+}
+
+
+static void eprom_w(struct net_device *dev, short bit)
+{
+ if (bit)
+ write_nic_byte(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) |
+ read_nic_byte(dev, EPROM_CMD));
+ else
+ write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
+ & ~(1<<EPROM_W_SHIFT));
+
+ udelay(EPROM_DELAY);
+}
+
+
+static short eprom_r(struct net_device *dev)
+{
+ short bit;
+
+ bit = (read_nic_byte(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT));
+ udelay(EPROM_DELAY);
+
+ if (bit)
+ return 1;
+ return 0;
+}
+
+static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ eprom_w(dev, b[i]);
+ eprom_ck_cycle(dev);
+ }
+}
+
+u32 eprom_read(struct net_device *dev, u32 addr)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ short read_cmd[] = {1, 1, 0};
+ short addr_str[8];
+ int i;
+ int addr_len;
+ u32 ret;
+
+ ret = 0;
+ write_nic_byte(dev, EPROM_CMD,
+ (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
+ udelay(EPROM_DELAY);
+
+ if (priv->epromtype == EEPROM_93C56) {
+ addr_str[7] = addr & 1;
+ addr_str[6] = addr & (1<<1);
+ addr_str[5] = addr & (1<<2);
+ addr_str[4] = addr & (1<<3);
+ addr_str[3] = addr & (1<<4);
+ addr_str[2] = addr & (1<<5);
+ addr_str[1] = addr & (1<<6);
+ addr_str[0] = addr & (1<<7);
+ addr_len = 8;
+ } else {
+ addr_str[5] = addr & 1;
+ addr_str[4] = addr & (1<<1);
+ addr_str[3] = addr & (1<<2);
+ addr_str[2] = addr & (1<<3);
+ addr_str[1] = addr & (1<<4);
+ addr_str[0] = addr & (1<<5);
+ addr_len = 6;
+ }
+ eprom_cs(dev, 1);
+ eprom_ck_cycle(dev);
+ eprom_send_bits_string(dev, read_cmd, 3);
+ eprom_send_bits_string(dev, addr_str, addr_len);
+
+ eprom_w(dev, 0);
+
+ for (i = 0; i < 16; i++) {
+ eprom_ck_cycle(dev);
+ ret |= (eprom_r(dev)<<(15-i));
+ }
+
+ eprom_cs(dev, 0);
+ eprom_ck_cycle(dev);
+
+ write_nic_byte(dev, EPROM_CMD,
+ (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
+ return ret;
+}
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl_eeprom.h
new file mode 100644
index 00000000000..9452e1683a7
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_eeprom.h
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+
+#define EPROM_DELAY 10
+
+u32 eprom_read(struct net_device *dev, u32 addr);
diff --git a/drivers/staging/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl_ethtool.c
new file mode 100644
index 00000000000..36452fb7cef
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_ethtool.c
@@ -0,0 +1,53 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ *****************************************************************************
+ */
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+#include "rtl_core.h"
+
+static void rtl819x_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(priv->pdev));
+}
+
+static u32 rtl819x_ethtool_get_link(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ return ((priv->rtllib->state == RTLLIB_LINKED) ||
+ (priv->rtllib->state == RTLLIB_LINKED_SCANNING));
+}
+
+const struct ethtool_ops rtl819x_ethtool_ops = {
+ .get_drvinfo = rtl819x_ethtool_get_drvinfo,
+ .get_link = rtl819x_ethtool_get_link,
+};
diff --git a/drivers/staging/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl_pci.c
new file mode 100644
index 00000000000..ddadcc3e4e7
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_pci.c
@@ -0,0 +1,97 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ *****************************************************************************/
+#include "rtl_pci.h"
+#include "rtl_core.h"
+
+static void rtl8192_parse_pci_configuration(struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+
+ u8 tmp;
+ int pos;
+ u8 LinkCtrlReg;
+
+ pos = pci_find_capability(priv->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_byte(priv->pdev, pos + PCI_EXP_LNKCTL, &LinkCtrlReg);
+ priv->NdisAdapter.LinkCtrlReg = LinkCtrlReg;
+
+ RT_TRACE(COMP_INIT, "Link Control Register =%x\n",
+ priv->NdisAdapter.LinkCtrlReg);
+
+ pci_read_config_byte(pdev, 0x98, &tmp);
+ tmp |= BIT4;
+ pci_write_config_byte(pdev, 0x98, tmp);
+
+ tmp = 0x17;
+ pci_write_config_byte(pdev, 0x70f, tmp);
+}
+
+bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ u16 VenderID;
+ u16 DeviceID;
+ u8 RevisionID;
+ u16 IrqLine;
+
+ VenderID = pdev->vendor;
+ DeviceID = pdev->device;
+ RevisionID = pdev->revision;
+ pci_read_config_word(pdev, 0x3C, &IrqLine);
+
+ priv->card_8192 = priv->ops->nic_type;
+
+ if (DeviceID == 0x8172) {
+ switch (RevisionID) {
+ case HAL_HW_PCI_REVISION_ID_8192PCIE:
+ printk(KERN_INFO "Adapter(8192 PCI-E) is found - "
+ "DeviceID=%x\n", DeviceID);
+ priv->card_8192 = NIC_8192E;
+ break;
+ case HAL_HW_PCI_REVISION_ID_8192SE:
+ printk(KERN_INFO "Adapter(8192SE) is found - "
+ "DeviceID=%x\n", DeviceID);
+ priv->card_8192 = NIC_8192SE;
+ break;
+ default:
+ printk(KERN_INFO "UNKNOWN nic type(%4x:%4x)\n",
+ pdev->vendor, pdev->device);
+ priv->card_8192 = NIC_UNKNOWN;
+ return false;
+ }
+ }
+
+ if (priv->ops->nic_type != priv->card_8192) {
+ printk(KERN_INFO "Detect info(%x) and hardware info(%x) not match!\n",
+ priv->ops->nic_type, priv->card_8192);
+ printk(KERN_INFO "Please select proper driver before install!!!!\n");
+ return false;
+ }
+
+ rtl8192_parse_pci_configuration(pdev, dev);
+
+ return true;
+}
diff --git a/drivers/staging/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl_pci.h
new file mode 100644
index 00000000000..7ea5a47dfd2
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_pci.h
@@ -0,0 +1,104 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ ******************************************************************************/
+#ifndef _RTL_PCI_H
+#define _RTL_PCI_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "rtllib.h"
+
+static inline void NdisRawWritePortUlong(u32 port, u32 val)
+{
+ outl(val, port);
+}
+
+static inline void NdisRawWritePortUchar(u32 port, u8 val)
+{
+ outb(val, port);
+}
+
+static inline void NdisRawReadPortUchar(u32 port, u8 *pval)
+{
+ *pval = inb(port);
+}
+
+static inline void NdisRawReadPortUshort(u32 port, u16 *pval)
+{
+ *pval = inw(port);
+}
+
+static inline void NdisRawReadPortUlong(u32 port, u32 *pval)
+{
+ *pval = inl(port);
+}
+
+struct mp_adapter {
+ u8 LinkCtrlReg;
+
+ u8 BusNumber;
+ u8 DevNumber;
+ u8 FuncNumber;
+
+ u8 PciBridgeBusNum;
+ u8 PciBridgeDevNum;
+ u8 PciBridgeFuncNum;
+ u8 PciBridgeVendor;
+ u16 PciBridgeVendorId;
+ u16 PciBridgeDeviceId;
+ u8 PciBridgePCIeHdrOffset;
+ u8 PciBridgeLinkCtrlReg;
+};
+
+struct rt_pci_capab_header {
+ unsigned char CapabilityID;
+ unsigned char Next;
+};
+
+#define PCI_MAX_BRIDGE_NUMBER 255
+#define PCI_MAX_DEVICES 32
+#define PCI_MAX_FUNCTION 8
+
+#define PCI_CONF_ADDRESS 0x0CF8
+#define PCI_CONF_DATA 0x0CFC
+
+#define PCI_CLASS_BRIDGE_DEV 0x06
+#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
+
+#define U1DONTCARE 0xFF
+#define U2DONTCARE 0xFFFF
+#define U4DONTCARE 0xFFFFFFFF
+
+#define INTEL_VENDOR_ID 0x8086
+#define SIS_VENDOR_ID 0x1039
+#define ATI_VENDOR_ID 0x1002
+#define ATI_DEVICE_ID 0x7914
+#define AMD_VENDOR_ID 0x1022
+
+#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
+
+struct net_device;
+bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev);
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl_pm.c
new file mode 100644
index 00000000000..92e2fde7f5f
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_pm.c
@@ -0,0 +1,136 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#ifdef CONFIG_PM_RTL
+#include "rtl_core.h"
+#include "r8192E_hw.h"
+#include "r8190P_rtl8256.h"
+#include "rtl_pm.h"
+
+int rtl8192E_save_state(struct pci_dev *dev, pm_message_t state)
+{
+ printk(KERN_NOTICE "r8192E save state call (state %u).\n", state.event);
+ return -EAGAIN;
+}
+
+
+int rtl8192E_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u32 ulRegRead;
+
+ printk(KERN_INFO "============> r8192E suspend call.\n");
+ del_timer_sync(&priv->gpio_polling_timer);
+ cancel_delayed_work(&priv->gpio_change_rf_wq);
+ priv->polling_timer_on = 0;
+
+ if (!netif_running(dev)) {
+ printk(KERN_INFO "RTL819XE:UI is open out of suspend "
+ "function\n");
+ goto out_pci_suspend;
+ }
+
+ if (dev->netdev_ops->ndo_stop)
+ dev->netdev_ops->ndo_stop(dev);
+ netif_device_detach(dev);
+
+ if (!priv->rtllib->bSupportRemoteWakeUp) {
+ MgntActSet_RF_State(dev, eRfOff, RF_CHANGE_BY_INIT, true);
+ ulRegRead = read_nic_dword(dev, CPU_GEN);
+ ulRegRead |= CPU_GEN_SYSTEM_RESET;
+ write_nic_dword(dev, CPU_GEN, ulRegRead);
+ } else {
+ write_nic_dword(dev, WFCRC0, 0xffffffff);
+ write_nic_dword(dev, WFCRC1, 0xffffffff);
+ write_nic_dword(dev, WFCRC2, 0xffffffff);
+ write_nic_byte(dev, PMR, 0x5);
+ write_nic_byte(dev, MacBlkCtrl, 0xa);
+ }
+out_pci_suspend:
+ printk("r8192E support WOL call??????????????????????\n");
+ if (priv->rtllib->bSupportRemoteWakeUp)
+ RT_TRACE(COMP_POWER, "r8192E support WOL call!!!!!!!"
+ "!!!!!!!!!!!.\n");
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state),
+ priv->rtllib->bSupportRemoteWakeUp ? 1 : 0);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ mdelay(20);
+
+ return 0;
+}
+
+int rtl8192E_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int err;
+ u32 val;
+
+ printk(KERN_INFO "================>r8192E resume call.\n");
+
+ pci_set_power_state(pdev, PCI_D0);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
+ pci_restore_state(pdev);
+
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ if (priv->polling_timer_on == 0)
+ check_rfctrl_gpio_timer((unsigned long)dev);
+
+ if (!netif_running(dev)) {
+ printk(KERN_INFO "RTL819XE:UI is open out of resume "
+ "function\n");
+ goto out;
+ }
+
+ netif_device_attach(dev);
+ if (dev->netdev_ops->ndo_open)
+ dev->netdev_ops->ndo_open(dev);
+
+ if (!priv->rtllib->bSupportRemoteWakeUp)
+ MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_INIT, true);
+
+out:
+ RT_TRACE(COMP_POWER, "<================r8192E resume call.\n");
+ return 0;
+}
+
+
+int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable)
+{
+ printk(KERN_NOTICE "r8192E enable wake call (state %u, enable %d).\n",
+ state.event, enable);
+ return -EAGAIN;
+}
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl_pm.h
new file mode 100644
index 00000000000..4d7f4067cc7
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_pm.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#ifdef CONFIG_PM_RTL
+
+#ifndef R8192E_PM_H
+#define R8192E_PM_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+int rtl8192E_save_state(struct pci_dev *dev, pm_message_t state);
+int rtl8192E_suspend(struct pci_dev *dev, pm_message_t state);
+int rtl8192E_resume(struct pci_dev *dev);
+int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable);
+
+#endif
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl_ps.c
new file mode 100644
index 00000000000..c9a7c563b68
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_ps.c
@@ -0,0 +1,310 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ *****************************************************************************/
+#include "rtl_ps.h"
+#include "rtl_core.h"
+#include "r8192E_phy.h"
+#include "r8192E_phyreg.h"
+#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
+#include "r8192E_cmdpkt.h"
+
+static void rtl8192_hw_sleep_down(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ unsigned long flags = 0;
+ spin_lock_irqsave(&priv->rf_ps_lock, flags);
+ if (priv->RFChangeInProgress) {
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ RT_TRACE(COMP_DBG, "rtl8192_hw_sleep_down(): RF Change in "
+ "progress!\n");
+ return;
+ }
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ RT_TRACE(COMP_DBG, "%s()============>come to sleep down\n", __func__);
+
+ MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS, false);
+}
+
+void rtl8192_hw_sleep_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device, hw_sleep_wq);
+ struct net_device *dev = ieee->dev;
+ rtl8192_hw_sleep_down(dev);
+}
+
+void rtl8192_hw_wakeup(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ unsigned long flags = 0;
+ spin_lock_irqsave(&priv->rf_ps_lock, flags);
+ if (priv->RFChangeInProgress) {
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ RT_TRACE(COMP_DBG, "rtl8192_hw_wakeup(): RF Change in "
+ "progress!\n");
+ queue_delayed_work_rsl(priv->rtllib->wq,
+ &priv->rtllib->hw_wakeup_wq, MSECS(10));
+ return;
+ }
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
+ RT_TRACE(COMP_PS, "%s()============>come to wake up\n", __func__);
+ MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS, false);
+}
+
+void rtl8192_hw_wakeup_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device, hw_wakeup_wq);
+ struct net_device *dev = ieee->dev;
+ rtl8192_hw_wakeup(dev);
+
+}
+
+#define MIN_SLEEP_TIME 50
+#define MAX_SLEEP_TIME 10000
+void rtl8192_hw_to_sleep(struct net_device *dev, u64 time)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ u32 tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ps_lock, flags);
+
+ time -= MSECS(8+16+7);
+
+ if ((time - jiffies) <= MSECS(MIN_SLEEP_TIME)) {
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
+ printk(KERN_INFO "too short to sleep::%lld < %ld\n",
+ time - jiffies, MSECS(MIN_SLEEP_TIME));
+ return;
+ }
+
+ if ((time - jiffies) > MSECS(MAX_SLEEP_TIME)) {
+ printk(KERN_INFO "========>too long to sleep:%lld > %ld\n",
+ time - jiffies, MSECS(MAX_SLEEP_TIME));
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
+ return;
+ }
+ tmp = time - jiffies;
+ queue_delayed_work_rsl(priv->rtllib->wq,
+ &priv->rtllib->hw_wakeup_wq, tmp);
+ queue_delayed_work_rsl(priv->rtllib->wq,
+ (void *)&priv->rtllib->hw_sleep_wq, 0);
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
+}
+
+static void InactivePsWorkItemCallback(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ &(priv->rtllib->PowerSaveControl);
+
+ RT_TRACE(COMP_PS, "InactivePsWorkItemCallback() --------->\n");
+ pPSC->bSwRfProcessing = true;
+
+ RT_TRACE(COMP_PS, "InactivePsWorkItemCallback(): Set RF to %s.\n",
+ pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON");
+ MgntActSet_RF_State(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS,
+ false);
+
+ pPSC->bSwRfProcessing = false;
+ RT_TRACE(COMP_PS, "InactivePsWorkItemCallback() <---------\n");
+}
+
+void IPSEnter(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ &(priv->rtllib->PowerSaveControl);
+ enum rt_rf_power_state rtState;
+
+ if (pPSC->bInactivePs) {
+ rtState = priv->rtllib->eRFPowerState;
+ if (rtState == eRfOn && !pPSC->bSwRfProcessing &&
+ (priv->rtllib->state != RTLLIB_LINKED) &&
+ (priv->rtllib->iw_mode != IW_MODE_MASTER)) {
+ RT_TRACE(COMP_PS, "IPSEnter(): Turn off RF.\n");
+ pPSC->eInactivePowerState = eRfOff;
+ priv->isRFOff = true;
+ priv->bInPowerSaveMode = true;
+ InactivePsWorkItemCallback(dev);
+ }
+ }
+}
+
+void IPSLeave(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ &(priv->rtllib->PowerSaveControl);
+ enum rt_rf_power_state rtState;
+
+ if (pPSC->bInactivePs) {
+ rtState = priv->rtllib->eRFPowerState;
+ if (rtState != eRfOn && !pPSC->bSwRfProcessing &&
+ priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) {
+ RT_TRACE(COMP_PS, "IPSLeave(): Turn on RF.\n");
+ pPSC->eInactivePowerState = eRfOn;
+ priv->bInPowerSaveMode = false;
+ InactivePsWorkItemCallback(dev);
+ }
+ }
+}
+
+void IPSLeave_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_work_rsl(data,
+ struct rtllib_device, ips_leave_wq);
+ struct net_device *dev = ieee->dev;
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+}
+
+void rtllib_ips_leave_wq(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ enum rt_rf_power_state rtState;
+ rtState = priv->rtllib->eRFPowerState;
+
+ if (priv->rtllib->PowerSaveControl.bInactivePs) {
+ if (rtState == eRfOff) {
+ if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) {
+ RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",
+ __func__);
+ return;
+ } else {
+ printk(KERN_INFO "=========>%s(): IPSLeave\n",
+ __func__);
+ queue_work_rsl(priv->rtllib->wq,
+ &priv->rtllib->ips_leave_wq);
+ }
+ }
+ }
+}
+
+void rtllib_ips_leave(struct net_device *dev)
+{
+ struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+}
+
+static bool MgntActSet_802_11_PowerSaveMode(struct net_device *dev,
+ u8 rtPsMode)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
+ return false;
+
+ RT_TRACE(COMP_LPS, "%s(): set ieee->ps = %x\n", __func__, rtPsMode);
+ if (!priv->ps_force)
+ priv->rtllib->ps = rtPsMode;
+ if (priv->rtllib->sta_sleep != LPS_IS_WAKE &&
+ rtPsMode == RTLLIB_PS_DISABLED) {
+ unsigned long flags;
+
+ rtl8192_hw_wakeup(dev);
+ priv->rtllib->sta_sleep = LPS_IS_WAKE;
+
+ spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags);
+ RT_TRACE(COMP_DBG, "LPS leave: notify AP we are awaked"
+ " ++++++++++ SendNullFunctionData\n");
+ rtllib_sta_ps_send_null_frame(priv->rtllib, 0);
+ spin_unlock_irqrestore(&(priv->rtllib->mgmt_tx_lock), flags);
+ }
+
+ return true;
+}
+
+void LeisurePSEnter(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ &(priv->rtllib->PowerSaveControl);
+
+ RT_TRACE(COMP_PS, "LeisurePSEnter()...\n");
+ RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdle"
+ "Count is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n",
+ pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount,
+ RT_CHECK_FOR_HANG_PERIOD);
+
+ if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
+ (priv->rtllib->state == RTLLIB_LINKED))
+ || (priv->rtllib->iw_mode == IW_MODE_ADHOC) ||
+ (priv->rtllib->iw_mode == IW_MODE_MASTER))
+ return;
+
+ if (pPSC->bLeisurePs) {
+ if (pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
+
+ if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
+
+ RT_TRACE(COMP_LPS, "LeisurePSEnter(): Enter "
+ "802.11 power save mode...\n");
+
+ if (!pPSC->bFwCtrlLPS) {
+ if (priv->rtllib->SetFwCmdHandler)
+ priv->rtllib->SetFwCmdHandler(
+ dev, FW_CMD_LPS_ENTER);
+ }
+ MgntActSet_802_11_PowerSaveMode(dev,
+ RTLLIB_PS_MBCAST |
+ RTLLIB_PS_UNICAST);
+ }
+ } else
+ pPSC->LpsIdleCount++;
+ }
+}
+
+void LeisurePSLeave(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ &(priv->rtllib->PowerSaveControl);
+
+
+ RT_TRACE(COMP_PS, "LeisurePSLeave()...\n");
+ RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n",
+ pPSC->bLeisurePs, priv->rtllib->ps);
+
+ if (pPSC->bLeisurePs) {
+ if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
+ RT_TRACE(COMP_LPS, "LeisurePSLeave(): Busy Traffic , "
+ "Leave 802.11 power save..\n");
+ MgntActSet_802_11_PowerSaveMode(dev,
+ RTLLIB_PS_DISABLED);
+
+ if (!pPSC->bFwCtrlLPS) {
+ if (priv->rtllib->SetFwCmdHandler)
+ priv->rtllib->SetFwCmdHandler(dev,
+ FW_CMD_LPS_LEAVE);
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl_ps.h
new file mode 100644
index 00000000000..a9c2d799c08
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_ps.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ ******************************************************************************/
+#ifndef _RTL_PS_H
+#define _RTL_PS_H
+
+#include <linux/types.h>
+#include "rtllib.h"
+struct net_device;
+
+#define RT_CHECK_FOR_HANG_PERIOD 2
+#define INIT_DEFAULT_CHAN 1
+
+void rtl8192_hw_wakeup(struct net_device *dev);
+void rtl8192_hw_to_sleep(struct net_device *dev, u64 time);
+void rtllib_ips_leave_wq(struct net_device *dev);
+void rtllib_ips_leave(struct net_device *dev);
+void IPSLeave_wq(void *data);
+
+void IPSEnter(struct net_device *dev);
+void IPSLeave(struct net_device *dev);
+
+void LeisurePSEnter(struct net_device *dev);
+void LeisurePSLeave(struct net_device *dev);
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl_wx.c
new file mode 100644
index 00000000000..93b1edbe6ba
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_wx.c
@@ -0,0 +1,1333 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#include <linux/string.h>
+#include "rtl_core.h"
+#include "dot11d.h"
+
+#define RATE_COUNT 12
+static u32 rtl8192_rates[] = {
+ 1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000,
+ 18000000, 24000000, 36000000, 48000000, 54000000
+};
+
+#ifndef ENETDOWN
+#define ENETDOWN 1
+#endif
+
+static int r8192_wx_get_freq(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ return rtllib_wx_get_freq(priv->rtllib, a, wrqu, b);
+}
+
+
+static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ return rtllib_wx_get_mode(priv->rtllib, a, wrqu, b);
+}
+
+static int r8192_wx_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ return rtllib_wx_get_rate(priv->rtllib, info, wrqu, extra);
+}
+
+
+
+static int r8192_wx_set_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_set_rate(priv->rtllib, info, wrqu, extra);
+
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+
+static int r8192_wx_set_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_set_rts(priv->rtllib, info, wrqu, extra);
+
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+static int r8192_wx_get_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ return rtllib_wx_get_rts(priv->rtllib, info, wrqu, extra);
+}
+
+static int r8192_wx_set_power(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true) {
+ RT_TRACE(COMP_ERR, "%s():Hw is Radio Off, we can't set "
+ "Power,return\n", __func__);
+ return 0;
+ }
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_set_power(priv->rtllib, info, wrqu, extra);
+
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+static int r8192_wx_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ return rtllib_wx_get_power(priv->rtllib, info, wrqu, extra);
+}
+
+static int r8192_wx_set_rawtx(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int ret;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_set_rawtx(priv->rtllib, info, wrqu, extra);
+
+ up(&priv->wx_sem);
+
+ return ret;
+
+}
+
+static int r8192_wx_force_reset(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ down(&priv->wx_sem);
+
+ RT_TRACE(COMP_DBG, "%s(): force reset ! extra is %d\n",
+ __func__, *extra);
+ priv->force_reset = *extra;
+ up(&priv->wx_sem);
+ return 0;
+
+}
+
+static int r8192_wx_force_mic_error(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ down(&priv->wx_sem);
+
+ RT_TRACE(COMP_DBG, "%s(): force mic error !\n", __func__);
+ ieee->force_mic_error = true;
+ up(&priv->wx_sem);
+ return 0;
+
+}
+
+#define MAX_ADHOC_PEER_NUM 64
+struct adhoc_peer_entry {
+ unsigned char MacAddr[ETH_ALEN];
+ unsigned char WirelessMode;
+ unsigned char bCurTxBW40MHz;
+};
+struct adhoc_peers_info {
+ struct adhoc_peer_entry Entry[MAX_ADHOC_PEER_NUM];
+ unsigned char num;
+};
+
+static int r8192_wx_get_adhoc_peers(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+
+static int r8191se_wx_get_firm_version(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrqu, char *extra)
+{
+ return 0;
+}
+
+static int r8192_wx_adapter_power_status(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(priv->rtllib->PowerSaveControl));
+ struct rtllib_device *ieee = priv->rtllib;
+
+ down(&priv->wx_sem);
+
+ RT_TRACE(COMP_POWER, "%s(): %s\n", __func__, (*extra == 6) ?
+ "DC power" : "AC power");
+ if (*extra || priv->force_lps) {
+ priv->ps_force = false;
+ pPSC->bLeisurePs = true;
+ } else {
+ if (priv->rtllib->state == RTLLIB_LINKED)
+ LeisurePSLeave(dev);
+
+ priv->ps_force = true;
+ pPSC->bLeisurePs = false;
+ ieee->ps = *extra;
+ }
+
+ up(&priv->wx_sem);
+
+ return 0;
+}
+
+static int r8192se_wx_set_radio(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ down(&priv->wx_sem);
+
+ printk(KERN_INFO "%s(): set radio ! extra is %d\n", __func__, *extra);
+ if ((*extra != 0) && (*extra != 1)) {
+ RT_TRACE(COMP_ERR, "%s(): set radio an err value,must 0(radio "
+ "off) or 1(radio on)\n", __func__);
+ up(&priv->wx_sem);
+ return -1;
+ }
+ priv->sw_radio_on = *extra;
+ up(&priv->wx_sem);
+ return 0;
+
+}
+
+static int r8192se_wx_set_lps_awake_interval(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(priv->rtllib->PowerSaveControl));
+
+ down(&priv->wx_sem);
+
+ printk(KERN_INFO "%s(): set lps awake interval ! extra is %d\n",
+ __func__, *extra);
+
+ pPSC->RegMaxLPSAwakeIntvl = *extra;
+ up(&priv->wx_sem);
+ return 0;
+}
+
+static int r8192se_wx_set_force_lps(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ down(&priv->wx_sem);
+
+ printk(KERN_INFO "%s(): force LPS ! extra is %d (1 is open 0 is "
+ "close)\n", __func__, *extra);
+ priv->force_lps = *extra;
+ up(&priv->wx_sem);
+ return 0;
+
+}
+
+static int r8192_wx_set_debugflag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u8 c = *extra;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ printk(KERN_INFO "=====>%s(), *extra:%x, debugflag:%x\n", __func__,
+ *extra, rt_global_debug_component);
+ if (c > 0)
+ rt_global_debug_component |= (1<<c);
+ else
+ rt_global_debug_component &= BIT31;
+ return 0;
+}
+
+static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = netdev_priv_rsl(dev);
+
+ enum rt_rf_power_state rtState;
+ int ret;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+ rtState = priv->rtllib->eRFPowerState;
+ down(&priv->wx_sem);
+ if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
+ ieee->bNetPromiscuousMode) {
+ if (priv->rtllib->PowerSaveControl.bInactivePs) {
+ if (rtState == eRfOff) {
+ if (priv->rtllib->RfOffReason >
+ RF_CHANGE_BY_IPS) {
+ RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",
+ __func__);
+ up(&priv->wx_sem);
+ return -1;
+ } else {
+ printk(KERN_INFO "=========>%s(): "
+ "IPSLeave\n", __func__);
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+ }
+ }
+ }
+ }
+ ret = rtllib_wx_set_mode(priv->rtllib, a, wrqu, b);
+
+ up(&priv->wx_sem);
+ return ret;
+}
+
+struct iw_range_with_scan_capa {
+ /* Informative stuff (to choose between different interface) */
+ __u32 throughput; /* To give an idea... */
+ /* In theory this value should be the maximum benchmarked
+ * TCP/IP throughput, because with most of these devices the
+ * bit rate is meaningless (overhead an co) to estimate how
+ * fast the connection will go and pick the fastest one.
+ * I suggest people to play with Netperf or any benchmark...
+ */
+
+ /* NWID (or domain id) */
+ __u32 min_nwid; /* Minimal NWID we are able to set */
+ __u32 max_nwid; /* Maximal NWID we are able to set */
+
+ /* Old Frequency (backward compat - moved lower ) */
+ __u16 old_num_channels;
+ __u8 old_num_frequency;
+
+ /* Scan capabilities */
+ __u8 scan_capa;
+};
+
+static int rtl8192_wx_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ u16 val;
+ int i;
+
+ wrqu->data.length = sizeof(*range);
+ memset(range, 0, sizeof(*range));
+
+ /* ~130 Mb/s real (802.11n) */
+ range->throughput = 130 * 1000 * 1000;
+
+ if (priv->rf_set_sens != NULL) {
+ /* signal level threshold range */
+ range->sensitivity = priv->max_sens;
+ }
+
+ range->max_qual.qual = 100;
+ range->max_qual.level = 0;
+ range->max_qual.noise = 0;
+ range->max_qual.updated = 7; /* Updated all three */
+
+ range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
+ range->avg_qual.level = 0;
+ range->avg_qual.noise = 0;
+ range->avg_qual.updated = 7; /* Updated all three */
+
+ range->num_bitrates = min(RATE_COUNT, IW_MAX_BITRATES);
+
+ for (i = 0; i < range->num_bitrates; i++)
+ range->bitrate[i] = rtl8192_rates[i];
+
+ range->max_rts = DEFAULT_RTS_THRESHOLD;
+ range->min_frag = MIN_FRAG_THRESHOLD;
+ range->max_frag = MAX_FRAG_THRESHOLD;
+
+ range->min_pmp = 0;
+ range->max_pmp = 5000000;
+ range->min_pmt = 0;
+ range->max_pmt = 65535*1000;
+ range->pmp_flags = IW_POWER_PERIOD;
+ range->pmt_flags = IW_POWER_TIMEOUT;
+ range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 18;
+
+ for (i = 0, val = 0; i < 14; i++) {
+ if ((priv->rtllib->active_channel_map)[i+1]) {
+ range->freq[val].i = i + 1;
+ range->freq[val].m = rtllib_wlan_frequencies[i] *
+ 100000;
+ range->freq[val].e = 1;
+ val++;
+ }
+
+ if (val == IW_MAX_FREQUENCIES)
+ break;
+ }
+ range->num_frequency = val;
+ range->num_channels = val;
+ range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2|
+ IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP;
+ range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
+
+ /* Event capability (kernel + driver) */
+
+ return 0;
+}
+
+static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ enum rt_rf_power_state rtState;
+ int ret;
+
+ if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
+ if ((ieee->state >= RTLLIB_ASSOCIATING) &&
+ (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED))
+ return 0;
+ if ((priv->rtllib->state == RTLLIB_LINKED) &&
+ (priv->rtllib->CntAfterLink < 2))
+ return 0;
+ }
+
+ if (priv->bHwRadioOff == true) {
+ printk(KERN_INFO "================>%s(): hwradio off\n",
+ __func__);
+ return 0;
+ }
+ rtState = priv->rtllib->eRFPowerState;
+ if (!priv->up)
+ return -ENETDOWN;
+ if (priv->rtllib->LinkDetectInfo.bBusyTraffic == true)
+ return -EAGAIN;
+
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)b;
+ if (req->essid_len) {
+ ieee->current_network.ssid_len = req->essid_len;
+ memcpy(ieee->current_network.ssid, req->essid,
+ req->essid_len);
+ }
+ }
+
+ down(&priv->wx_sem);
+
+ priv->rtllib->FirstIe_InScan = true;
+
+ if (priv->rtllib->state != RTLLIB_LINKED) {
+ if (priv->rtllib->PowerSaveControl.bInactivePs) {
+ if (rtState == eRfOff) {
+ if (priv->rtllib->RfOffReason >
+ RF_CHANGE_BY_IPS) {
+ RT_TRACE(COMP_ERR, "%s(): RF is "
+ "OFF.\n", __func__);
+ up(&priv->wx_sem);
+ return -1;
+ } else {
+ RT_TRACE(COMP_PS, "=========>%s(): "
+ "IPSLeave\n", __func__);
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+ }
+ }
+ }
+ rtllib_stop_scan(priv->rtllib);
+ if (priv->rtllib->LedControlHandler)
+ priv->rtllib->LedControlHandler(dev,
+ LED_CTL_SITE_SURVEY);
+
+ if (priv->rtllib->eRFPowerState != eRfOff) {
+ priv->rtllib->actscanning = true;
+
+ if (ieee->ScanOperationBackupHandler)
+ ieee->ScanOperationBackupHandler(ieee->dev,
+ SCAN_OPT_BACKUP);
+
+ rtllib_start_scan_syncro(priv->rtllib, 0);
+
+ if (ieee->ScanOperationBackupHandler)
+ ieee->ScanOperationBackupHandler(ieee->dev,
+ SCAN_OPT_RESTORE);
+ }
+ ret = 0;
+ } else {
+ priv->rtllib->actscanning = true;
+ ret = rtllib_wx_set_scan(priv->rtllib, a, wrqu, b);
+ }
+
+ up(&priv->wx_sem);
+ return ret;
+}
+
+
+static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (!priv->up)
+ return -ENETDOWN;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_get_scan(priv->rtllib, a, wrqu, b);
+
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+static int r8192_wx_set_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int ret;
+
+ if ((rtllib_act_scanning(priv->rtllib, false)) &&
+ !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) {
+ ; /* TODO - get rid of if */
+ }
+ if (priv->bHwRadioOff == true) {
+ printk(KERN_INFO "=========>%s():hw radio off,or Rf state is "
+ "eRfOff, return\n", __func__);
+ return 0;
+ }
+ down(&priv->wx_sem);
+ ret = rtllib_wx_set_essid(priv->rtllib, a, wrqu, b);
+
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+static int r8192_wx_get_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_get_essid(priv->rtllib, a, wrqu, b);
+
+ up(&priv->wx_sem);
+
+ return ret;
+}
+
+static int r8192_wx_set_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (wrqu->data.length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+ down(&priv->wx_sem);
+ wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ memset(priv->nick, 0, sizeof(priv->nick));
+ memcpy(priv->nick, extra, wrqu->data.length);
+ up(&priv->wx_sem);
+ return 0;
+
+}
+
+static int r8192_wx_get_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ down(&priv->wx_sem);
+ wrqu->data.length = strlen(priv->nick);
+ memcpy(extra, priv->nick, wrqu->data.length);
+ wrqu->data.flags = 1; /* active */
+ up(&priv->wx_sem);
+ return 0;
+}
+
+static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_set_freq(priv->rtllib, a, wrqu, b);
+
+ up(&priv->wx_sem);
+ return ret;
+}
+
+static int r8192_wx_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ return rtllib_wx_get_name(priv->rtllib, info, wrqu, extra);
+}
+
+
+static int r8192_wx_set_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ if (wrqu->frag.disabled)
+ priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
+ else {
+ if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+ wrqu->frag.value > MAX_FRAG_THRESHOLD)
+ return -EINVAL;
+
+ priv->rtllib->fts = wrqu->frag.value & ~0x1;
+ }
+
+ return 0;
+}
+
+
+static int r8192_wx_get_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ wrqu->frag.value = priv->rtllib->fts;
+ wrqu->frag.fixed = 0; /* no auto select */
+ wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
+
+ return 0;
+}
+
+
+static int r8192_wx_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+ int ret;
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if ((rtllib_act_scanning(priv->rtllib, false)) &&
+ !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) {
+ ; /* TODO - get rid of if */
+ }
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ ret = rtllib_wx_set_wap(priv->rtllib, info, awrq, extra);
+
+ up(&priv->wx_sem);
+
+ return ret;
+
+}
+
+
+static int r8192_wx_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ return rtllib_wx_get_wap(priv->rtllib, info, wrqu, extra);
+}
+
+
+static int r8192_wx_get_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ return rtllib_wx_get_encode(priv->rtllib, info, wrqu, key);
+}
+
+static int r8192_wx_set_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int ret;
+
+ struct rtllib_device *ieee = priv->rtllib;
+ u32 hwkey[4] = {0, 0, 0, 0};
+ u8 mask = 0xff;
+ u32 key_idx = 0;
+ u8 zero_addr[4][6] = {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
+ int i;
+
+ if ((rtllib_act_scanning(priv->rtllib, false)) &&
+ !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN))
+ ; /* TODO - get rid of if */
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ if (!priv->up)
+ return -ENETDOWN;
+
+ priv->rtllib->wx_set_enc = 1;
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+ down(&priv->wx_sem);
+
+ RT_TRACE(COMP_SEC, "Setting SW wep key");
+ ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key);
+ up(&priv->wx_sem);
+
+
+ if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
+ ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
+ CamResetAllEntry(dev);
+ memset(priv->rtllib->swcamtable, 0,
+ sizeof(struct sw_cam_table) * 32);
+ goto end_hw_sec;
+ }
+ if (wrqu->encoding.length != 0) {
+
+ for (i = 0; i < 4; i++) {
+ hwkey[i] |= key[4*i+0]&mask;
+ if (i == 1 && (4 * i + 1) == wrqu->encoding.length)
+ mask = 0x00;
+ if (i == 3 && (4 * i + 1) == wrqu->encoding.length)
+ mask = 0x00;
+ hwkey[i] |= (key[4 * i + 1] & mask) << 8;
+ hwkey[i] |= (key[4 * i + 2] & mask) << 16;
+ hwkey[i] |= (key[4 * i + 3] & mask) << 24;
+ }
+
+ #define CONF_WEP40 0x4
+ #define CONF_WEP104 0x14
+
+ switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
+ case 0:
+ key_idx = ieee->tx_keyidx;
+ break;
+ case 1:
+ key_idx = 0;
+ break;
+ case 2:
+ key_idx = 1;
+ break;
+ case 3:
+ key_idx = 2;
+ break;
+ case 4:
+ key_idx = 3;
+ break;
+ default:
+ break;
+ }
+ if (wrqu->encoding.length == 0x5) {
+ ieee->pairwise_key_type = KEY_TYPE_WEP40;
+ EnableHWSecurityConfig8192(dev);
+ }
+
+ else if (wrqu->encoding.length == 0xd) {
+ ieee->pairwise_key_type = KEY_TYPE_WEP104;
+ EnableHWSecurityConfig8192(dev);
+ setKey(dev, key_idx, key_idx, KEY_TYPE_WEP104,
+ zero_addr[key_idx], 0, hwkey);
+ set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
+ zero_addr[key_idx], 0, hwkey, 0);
+ } else {
+ printk(KERN_INFO "wrong type in WEP, not WEP40 and WEP104\n");
+ }
+ }
+
+end_hw_sec:
+ priv->rtllib->wx_set_enc = 0;
+ return ret;
+}
+
+static int r8192_wx_set_scan_type(struct net_device *dev,
+ struct iw_request_info *aa,
+ union iwreq_data *wrqu, char *p)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int *parms = (int *)p;
+ int mode = parms[0];
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ priv->rtllib->active_scan = mode;
+
+ return 1;
+}
+
+
+
+#define R8192_MAX_RETRY 255
+static int r8192_wx_set_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ int err = 0;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
+ wrqu->retry.disabled) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (wrqu->retry.value > R8192_MAX_RETRY) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (wrqu->retry.flags & IW_RETRY_MAX) {
+ priv->retry_rts = wrqu->retry.value;
+ DMESG("Setting retry for RTS/CTS data to %d",
+ wrqu->retry.value);
+
+ } else {
+ priv->retry_data = wrqu->retry.value;
+ DMESG("Setting retry for non RTS/CTS data to %d",
+ wrqu->retry.value);
+ }
+
+
+ rtl8192_commit(dev);
+exit:
+ up(&priv->wx_sem);
+
+ return err;
+}
+
+static int r8192_wx_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+
+ wrqu->retry.disabled = 0; /* can't be disabled */
+
+ if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
+ IW_RETRY_LIFETIME)
+ return -EINVAL;
+
+ if (wrqu->retry.flags & IW_RETRY_MAX) {
+ wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX;
+ wrqu->retry.value = priv->retry_rts;
+ } else {
+ wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN;
+ wrqu->retry.value = priv->retry_data;
+ }
+ return 0;
+}
+
+static int r8192_wx_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ if (priv->rf_set_sens == NULL)
+ return -1; /* we have not this support for this radio */
+ wrqu->sens.value = priv->sens;
+ return 0;
+}
+
+
+static int r8192_wx_set_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ short err = 0;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+ if (priv->rf_set_sens == NULL) {
+ err = -1; /* we have not this support for this radio */
+ goto exit;
+ }
+ if (priv->rf_set_sens(dev, wrqu->sens.value) == 0)
+ priv->sens = wrqu->sens.value;
+ else
+ err = -EINVAL;
+
+exit:
+ up(&priv->wx_sem);
+
+ return err;
+}
+
+static int r8192_wx_set_enc_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+
+ priv->rtllib->wx_set_enc = 1;
+ down(&priv->rtllib->ips_sem);
+ IPSLeave(dev);
+ up(&priv->rtllib->ips_sem);
+
+ ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra);
+ {
+ u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 zero[6] = {0};
+ u32 key[4] = {0};
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ struct iw_point *encoding = &wrqu->encoding;
+ u8 idx = 0, alg = 0, group = 0;
+ if ((encoding->flags & IW_ENCODE_DISABLED) ||
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ ieee->pairwise_key_type = ieee->group_key_type
+ = KEY_TYPE_NA;
+ CamResetAllEntry(dev);
+ memset(priv->rtllib->swcamtable, 0,
+ sizeof(struct sw_cam_table) * 32);
+ goto end_hw_sec;
+ }
+ alg = (ext->alg == IW_ENCODE_ALG_CCMP) ? KEY_TYPE_CCMP :
+ ext->alg;
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx)
+ idx--;
+ group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
+
+ if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) ||
+ (alg == KEY_TYPE_WEP40)) {
+ if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
+ alg = KEY_TYPE_WEP104;
+ ieee->pairwise_key_type = alg;
+ EnableHWSecurityConfig8192(dev);
+ }
+ memcpy((u8 *)key, ext->key, 16);
+
+ if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
+ if (ext->key_len == 13)
+ ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
+ setKey(dev, idx, idx, alg, zero, 0, key);
+ set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
+ } else if (group) {
+ ieee->group_key_type = alg;
+ setKey(dev, idx, idx, alg, broadcast_addr, 0, key);
+ set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
+ key, 0);
+ } else {
+ if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
+ ieee->pHTInfo->bCurrentHTSupport)
+ write_nic_byte(dev, 0x173, 1);
+ setKey(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
+ 0, key);
+ set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
+ 0, key, 0);
+ }
+
+
+ }
+
+end_hw_sec:
+ priv->rtllib->wx_set_enc = 0;
+ up(&priv->wx_sem);
+ return ret;
+
+}
+static int r8192_wx_set_auth(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ int ret = 0;
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+ ret = rtllib_wx_set_auth(priv->rtllib, info, &(data->param), extra);
+ up(&priv->wx_sem);
+ return ret;
+}
+
+static int r8192_wx_set_mlme(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0;
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+ ret = rtllib_wx_set_mlme(priv->rtllib, info, wrqu, extra);
+ up(&priv->wx_sem);
+ return ret;
+}
+
+static int r8192_wx_set_gen_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ int ret = 0;
+
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ if (priv->bHwRadioOff == true)
+ return 0;
+
+ down(&priv->wx_sem);
+ ret = rtllib_wx_set_gen_ie(priv->rtllib, extra, data->data.length);
+ up(&priv->wx_sem);
+ return ret;
+}
+
+static int r8192_wx_get_gen_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ int ret = 0;
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
+ data->data.length = 0;
+ return 0;
+ }
+
+ if (data->data.length < ieee->wpa_ie_len)
+ return -E2BIG;
+
+ data->data.length = ieee->wpa_ie_len;
+ memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
+ return ret;
+}
+
+#define OID_RT_INTEL_PROMISCUOUS_MODE 0xFF0101F6
+
+static int r8192_wx_set_PromiscuousMode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ u32 *info_buf = (u32 *)(wrqu->data.pointer);
+
+ u32 oid = info_buf[0];
+ u32 bPromiscuousOn = info_buf[1];
+ u32 bFilterSourceStationFrame = info_buf[2];
+
+ if (OID_RT_INTEL_PROMISCUOUS_MODE == oid) {
+ ieee->IntelPromiscuousModeInfo.bPromiscuousOn =
+ (bPromiscuousOn) ? (true) : (false);
+ ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame =
+ (bFilterSourceStationFrame) ? (true) : (false);
+ (bPromiscuousOn) ?
+ (rtllib_EnableIntelPromiscuousMode(dev, false)) :
+ (rtllib_DisableIntelPromiscuousMode(dev, false));
+
+ printk(KERN_INFO "=======>%s(), on = %d, filter src sta = %d\n",
+ __func__, bPromiscuousOn, bFilterSourceStationFrame);
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int r8192_wx_get_PromiscuousMode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+
+ down(&priv->wx_sem);
+
+ snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d",
+ ieee->IntelPromiscuousModeInfo.bPromiscuousOn,
+ ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame);
+ wrqu->data.length = strlen(extra) + 1;
+
+ up(&priv->wx_sem);
+
+ return 0;
+}
+
+
+#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
+static iw_handler r8192_wx_handlers[] = {
+ IW_IOCTL(SIOCGIWNAME) = r8192_wx_get_name,
+ IW_IOCTL(SIOCSIWFREQ) = r8192_wx_set_freq,
+ IW_IOCTL(SIOCGIWFREQ) = r8192_wx_get_freq,
+ IW_IOCTL(SIOCSIWMODE) = r8192_wx_set_mode,
+ IW_IOCTL(SIOCGIWMODE) = r8192_wx_get_mode,
+ IW_IOCTL(SIOCSIWSENS) = r8192_wx_set_sens,
+ IW_IOCTL(SIOCGIWSENS) = r8192_wx_get_sens,
+ IW_IOCTL(SIOCGIWRANGE) = rtl8192_wx_get_range,
+ IW_IOCTL(SIOCSIWAP) = r8192_wx_set_wap,
+ IW_IOCTL(SIOCGIWAP) = r8192_wx_get_wap,
+ IW_IOCTL(SIOCSIWSCAN) = r8192_wx_set_scan,
+ IW_IOCTL(SIOCGIWSCAN) = r8192_wx_get_scan,
+ IW_IOCTL(SIOCSIWESSID) = r8192_wx_set_essid,
+ IW_IOCTL(SIOCGIWESSID) = r8192_wx_get_essid,
+ IW_IOCTL(SIOCSIWNICKN) = r8192_wx_set_nick,
+ IW_IOCTL(SIOCGIWNICKN) = r8192_wx_get_nick,
+ IW_IOCTL(SIOCSIWRATE) = r8192_wx_set_rate,
+ IW_IOCTL(SIOCGIWRATE) = r8192_wx_get_rate,
+ IW_IOCTL(SIOCSIWRTS) = r8192_wx_set_rts,
+ IW_IOCTL(SIOCGIWRTS) = r8192_wx_get_rts,
+ IW_IOCTL(SIOCSIWFRAG) = r8192_wx_set_frag,
+ IW_IOCTL(SIOCGIWFRAG) = r8192_wx_get_frag,
+ IW_IOCTL(SIOCSIWRETRY) = r8192_wx_set_retry,
+ IW_IOCTL(SIOCGIWRETRY) = r8192_wx_get_retry,
+ IW_IOCTL(SIOCSIWENCODE) = r8192_wx_set_enc,
+ IW_IOCTL(SIOCGIWENCODE) = r8192_wx_get_enc,
+ IW_IOCTL(SIOCSIWPOWER) = r8192_wx_set_power,
+ IW_IOCTL(SIOCGIWPOWER) = r8192_wx_get_power,
+ IW_IOCTL(SIOCSIWGENIE) = r8192_wx_set_gen_ie,
+ IW_IOCTL(SIOCGIWGENIE) = r8192_wx_get_gen_ie,
+ IW_IOCTL(SIOCSIWMLME) = r8192_wx_set_mlme,
+ IW_IOCTL(SIOCSIWAUTH) = r8192_wx_set_auth,
+ IW_IOCTL(SIOCSIWENCODEEXT) = r8192_wx_set_enc_ext,
+};
+
+/*
+ * the following rule need to be follwing,
+ * Odd : get (world access),
+ * even : set (root access)
+ * */
+static const struct iw_priv_args r8192_private_args[] = {
+ {
+ SIOCIWFIRSTPRIV + 0x0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_debugflag"
+ }, {
+ SIOCIWFIRSTPRIV + 0x1,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
+ }, {
+ SIOCIWFIRSTPRIV + 0x2,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
+ }, {
+ SIOCIWFIRSTPRIV + 0x3,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
+ }, {
+ SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "force_mic_error"
+ }, {
+ SIOCIWFIRSTPRIV + 0x5,
+ IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT|IW_PRIV_SIZE_FIXED|1,
+ "firm_ver"
+ }, {
+ SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
+ "set_power"
+ }, {
+ SIOCIWFIRSTPRIV + 0x9,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
+ "radio"
+ }, {
+ SIOCIWFIRSTPRIV + 0xa,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
+ "lps_interv"
+ }, {
+ SIOCIWFIRSTPRIV + 0xb,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
+ "lps_force"
+ }, {
+ SIOCIWFIRSTPRIV + 0xc,
+ 0, IW_PRIV_TYPE_CHAR|2047, "adhoc_peer_list"
+ }, {
+ SIOCIWFIRSTPRIV + 0x16,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "setpromisc"
+ }, {
+ SIOCIWFIRSTPRIV + 0x17,
+ 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 45, "getpromisc"
+ }
+
+};
+
+static iw_handler r8192_private_handler[] = {
+ (iw_handler)r8192_wx_set_debugflag, /*SIOCIWSECONDPRIV*/
+ (iw_handler)r8192_wx_set_scan_type,
+ (iw_handler)r8192_wx_set_rawtx,
+ (iw_handler)r8192_wx_force_reset,
+ (iw_handler)r8192_wx_force_mic_error,
+ (iw_handler)r8191se_wx_get_firm_version,
+ (iw_handler)r8192_wx_adapter_power_status,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)r8192se_wx_set_radio,
+ (iw_handler)r8192se_wx_set_lps_awake_interval,
+ (iw_handler)r8192se_wx_set_force_lps,
+ (iw_handler)r8192_wx_get_adhoc_peers,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)NULL,
+ (iw_handler)r8192_wx_set_PromiscuousMode,
+ (iw_handler)r8192_wx_get_PromiscuousMode,
+};
+
+static struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
+{
+ struct r8192_priv *priv = rtllib_priv(dev);
+ struct rtllib_device *ieee = priv->rtllib;
+ struct iw_statistics *wstats = &priv->wstats;
+ int tmp_level = 0;
+ int tmp_qual = 0;
+ int tmp_noise = 0;
+ if (ieee->state < RTLLIB_LINKED) {
+ wstats->qual.qual = 10;
+ wstats->qual.level = 0;
+ wstats->qual.noise = -100;
+ wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+ return wstats;
+ }
+
+ tmp_level = (&ieee->current_network)->stats.rssi;
+ tmp_qual = (&ieee->current_network)->stats.signal;
+ tmp_noise = (&ieee->current_network)->stats.noise;
+
+ wstats->qual.level = tmp_level;
+ wstats->qual.qual = tmp_qual;
+ wstats->qual.noise = tmp_noise;
+ wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+ return wstats;
+}
+
+struct iw_handler_def r8192_wx_handlers_def = {
+ .standard = r8192_wx_handlers,
+ .num_standard = sizeof(r8192_wx_handlers) / sizeof(iw_handler),
+ .private = r8192_private_handler,
+ .num_private = sizeof(r8192_private_handler) / sizeof(iw_handler),
+ .num_private_args = sizeof(r8192_private_args) /
+ sizeof(struct iw_priv_args),
+ .get_wireless_stats = r8192_get_wireless_stats,
+ .private_args = (struct iw_priv_args *)r8192_private_args,
+};
diff --git a/drivers/staging/rtl8192e/rtl_wx.h b/drivers/staging/rtl8192e/rtl_wx.h
new file mode 100644
index 00000000000..6a51a25ec87
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl_wx.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
+#ifndef R819x_WX_H
+#define R819x_WX_H
+
+struct net_device;
+struct iw_handler_def;
+struct iw_statistics;
+
+extern struct iw_handler_def r8192_wx_handlers_def;
+struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
+u16 rtl8192_11n_user_show_rates(struct net_device *dev);
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
new file mode 100644
index 00000000000..de25975ccee
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -0,0 +1,3144 @@
+/*
+ * Merged with mainline rtllib.h in Aug 2004. Original ieee802_11
+ * remains copyright by the original authors
+ *
+ * Portions of the merged code are based on Host AP (software wireless
+ * LAN access point) driver for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ * Copyright (c) 2004, Intel Corporation
+ *
+ * Modified for Realtek's wi-fi cards by Andrea Merello
+ * <andreamrl@tiscali.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+#ifndef RTLLIB_H
+#define RTLLIB_H
+#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/kernel.h> /* ARRAY_SIZE */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <linux/delay.h>
+#include <linux/wireless.h>
+
+#include "rtl819x_HT.h"
+#include "rtl819x_BA.h"
+#include "rtl819x_TS.h"
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* ARPHRD_ETHER */
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#ifndef WIRELESS_SPY
+#define WIRELESS_SPY
+#endif
+#include <net/iw_handler.h>
+
+#ifndef IW_MODE_MONITOR
+#define IW_MODE_MONITOR 6
+#endif
+
+#ifndef IWEVCUSTOM
+#define IWEVCUSTOM 0x8c02
+#endif
+
+#ifndef IW_CUSTOM_MAX
+/* Max number of char in custom event - use multiple of them if needed */
+#define IW_CUSTOM_MAX 256 /* In bytes */
+#endif
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ *
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member)*__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define skb_tail_pointer_rsl(skb) skb_tail_pointer(skb)
+
+#define EXPORT_SYMBOL_RSL(x) EXPORT_SYMBOL(x)
+
+
+#define queue_delayed_work_rsl(x, y, z) queue_delayed_work(x, y, z)
+#define INIT_DELAYED_WORK_RSL(x, y, z) INIT_DELAYED_WORK(x, y)
+
+#define queue_work_rsl(x, y) queue_work(x, y)
+#define INIT_WORK_RSL(x, y, z) INIT_WORK(x, y)
+
+#define container_of_work_rsl(x, y, z) container_of(x, y, z)
+#define container_of_dwork_rsl(x, y, z) \
+ container_of(container_of(x, struct delayed_work, work), y, z)
+
+#define iwe_stream_add_event_rsl(info, start, stop, iwe, len) \
+ iwe_stream_add_event(info, start, stop, iwe, len)
+
+#define iwe_stream_add_point_rsl(info, start, stop, iwe, p) \
+ iwe_stream_add_point(info, start, stop, iwe, p)
+
+#define usb_alloc_urb_rsl(x, y) usb_alloc_urb(x, y)
+#define usb_submit_urb_rsl(x, y) usb_submit_urb(x, y)
+
+static inline void *netdev_priv_rsl(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+#define KEY_TYPE_NA 0x0
+#define KEY_TYPE_WEP40 0x1
+#define KEY_TYPE_TKIP 0x2
+#define KEY_TYPE_CCMP 0x4
+#define KEY_TYPE_WEP104 0x5
+/* added for rtl819x tx procedure */
+#define MAX_QUEUE_SIZE 0x10
+
+#define BK_QUEUE 0
+#define BE_QUEUE 1
+#define VI_QUEUE 2
+#define VO_QUEUE 3
+#define HCCA_QUEUE 4
+#define TXCMD_QUEUE 5
+#define MGNT_QUEUE 6
+#define HIGH_QUEUE 7
+#define BEACON_QUEUE 8
+
+#define LOW_QUEUE BE_QUEUE
+#define NORMAL_QUEUE MGNT_QUEUE
+
+#ifndef IW_MODE_MESH
+#define IW_MODE_MESH 7
+#endif
+#define AMSDU_SUBHEADER_LEN 14
+#define SWRF_TIMEOUT 50
+
+#define IE_CISCO_FLAG_POSITION 0x08
+#define SUPPORT_CKIP_MIC 0x08
+#define SUPPORT_CKIP_PK 0x10
+#define RT_RF_OFF_LEVL_ASPM BIT0
+#define RT_RF_OFF_LEVL_CLK_REQ BIT1
+#define RT_RF_OFF_LEVL_PCI_D3 BIT2
+#define RT_RF_OFF_LEVL_HALT_NIC BIT3
+#define RT_RF_OFF_LEVL_FREE_FW BIT4
+#define RT_RF_OFF_LEVL_FW_32K BIT5
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT6
+#define RT_RF_LPS_DISALBE_2R BIT30
+#define RT_RF_LPS_LEVEL_ASPM BIT31
+#define RT_IN_PS_LEVEL(pPSC, _PS_FLAG) \
+ ((pPSC->CurPsLevel & _PS_FLAG) ? true : false)
+#define RT_CLEAR_PS_LEVEL(pPSC, _PS_FLAG) \
+ (pPSC->CurPsLevel &= (~(_PS_FLAG)))
+#define RT_SET_PS_LEVEL(pPSC, _PS_FLAG) (pPSC->CurPsLevel |= _PS_FLAG)
+
+/* defined for skb cb field */
+/* At most 28 byte */
+struct cb_desc {
+ /* Tx Desc Related flags (8-9) */
+ u8 bLastIniPkt:1;
+ u8 bCmdOrInit:1;
+ u8 bFirstSeg:1;
+ u8 bLastSeg:1;
+ u8 bEncrypt:1;
+ u8 bTxDisableRateFallBack:1;
+ u8 bTxUseDriverAssingedRate:1;
+ u8 bHwSec:1;
+
+ u8 nStuckCount;
+
+ /* Tx Firmware Relaged flags (10-11)*/
+ u8 bCTSEnable:1;
+ u8 bRTSEnable:1;
+ u8 bUseShortGI:1;
+ u8 bUseShortPreamble:1;
+ u8 bTxEnableFwCalcDur:1;
+ u8 bAMPDUEnable:1;
+ u8 bRTSSTBC:1;
+ u8 RTSSC:1;
+
+ u8 bRTSBW:1;
+ u8 bPacketBW:1;
+ u8 bRTSUseShortPreamble:1;
+ u8 bRTSUseShortGI:1;
+ u8 bMulticast:1;
+ u8 bBroadcast:1;
+ u8 drv_agg_enable:1;
+ u8 reserved2:1;
+
+ /* Tx Desc related element(12-19) */
+ u8 rata_index;
+ u8 queue_index;
+ u16 txbuf_size;
+ u8 RATRIndex;
+ u8 bAMSDU:1;
+ u8 bFromAggrQ:1;
+ u8 reserved6:6;
+ u8 macId;
+ u8 priority;
+
+ /* Tx firmware related element(20-27) */
+ u8 data_rate;
+ u8 rts_rate;
+ u8 ampdu_factor;
+ u8 ampdu_density;
+ u8 DrvAggrNum;
+ u8 bdhcp;
+ u16 pkt_size;
+ u8 bIsSpecialDataFrame;
+
+ u8 bBTTxPacket;
+ u8 bIsBTProbRsp;
+};
+
+enum sw_chnl_cmd_id {
+ CmdID_End,
+ CmdID_SetTxPowerLevel,
+ CmdID_BBRegWrite10,
+ CmdID_WritePortUlong,
+ CmdID_WritePortUshort,
+ CmdID_WritePortUchar,
+ CmdID_RF_WriteReg,
+};
+
+struct sw_chnl_cmd {
+ enum sw_chnl_cmd_id CmdID;
+ u32 Para1;
+ u32 Para2;
+ u32 msDelay;
+} __packed;
+
+/*--------------------------Define -------------------------------------------*/
+#define MGN_1M 0x02
+#define MGN_2M 0x04
+#define MGN_5_5M 0x0b
+#define MGN_11M 0x16
+
+#define MGN_6M 0x0c
+#define MGN_9M 0x12
+#define MGN_12M 0x18
+#define MGN_18M 0x24
+#define MGN_24M 0x30
+#define MGN_36M 0x48
+#define MGN_48M 0x60
+#define MGN_54M 0x6c
+
+#define MGN_MCS0 0x80
+#define MGN_MCS1 0x81
+#define MGN_MCS2 0x82
+#define MGN_MCS3 0x83
+#define MGN_MCS4 0x84
+#define MGN_MCS5 0x85
+#define MGN_MCS6 0x86
+#define MGN_MCS7 0x87
+#define MGN_MCS8 0x88
+#define MGN_MCS9 0x89
+#define MGN_MCS10 0x8a
+#define MGN_MCS11 0x8b
+#define MGN_MCS12 0x8c
+#define MGN_MCS13 0x8d
+#define MGN_MCS14 0x8e
+#define MGN_MCS15 0x8f
+#define MGN_MCS0_SG 0x90
+#define MGN_MCS1_SG 0x91
+#define MGN_MCS2_SG 0x92
+#define MGN_MCS3_SG 0x93
+#define MGN_MCS4_SG 0x94
+#define MGN_MCS5_SG 0x95
+#define MGN_MCS6_SG 0x96
+#define MGN_MCS7_SG 0x97
+#define MGN_MCS8_SG 0x98
+#define MGN_MCS9_SG 0x99
+#define MGN_MCS10_SG 0x9a
+#define MGN_MCS11_SG 0x9b
+#define MGN_MCS12_SG 0x9c
+#define MGN_MCS13_SG 0x9d
+#define MGN_MCS14_SG 0x9e
+#define MGN_MCS15_SG 0x9f
+
+
+enum _ReasonCode {
+ unspec_reason = 0x1,
+ auth_not_valid = 0x2,
+ deauth_lv_ss = 0x3,
+ inactivity = 0x4,
+ ap_overload = 0x5,
+ class2_err = 0x6,
+ class3_err = 0x7,
+ disas_lv_ss = 0x8,
+ asoc_not_auth = 0x9,
+
+ mic_failure = 0xe,
+
+ invalid_IE = 0x0d,
+ four_way_tmout = 0x0f,
+ two_way_tmout = 0x10,
+ IE_dismatch = 0x11,
+ invalid_Gcipher = 0x12,
+ invalid_Pcipher = 0x13,
+ invalid_AKMP = 0x14,
+ unsup_RSNIEver = 0x15,
+ invalid_RSNIE = 0x16,
+ auth_802_1x_fail = 0x17,
+ ciper_reject = 0x18,
+
+ QoS_unspec = 0x20,
+ QAP_bandwidth = 0x21,
+ poor_condition = 0x22,
+ no_facility = 0x23,
+ req_declined = 0x25,
+ invalid_param = 0x26,
+ req_not_honored = 0x27,
+ TS_not_created = 0x2F,
+ DL_not_allowed = 0x30,
+ dest_not_exist = 0x31,
+ dest_not_QSTA = 0x32,
+};
+
+enum hal_def_variable {
+ HAL_DEF_TPC_ENABLE,
+ HAL_DEF_INIT_GAIN,
+ HAL_DEF_PROT_IMP_MODE,
+ HAL_DEF_HIGH_POWER_MECHANISM,
+ HAL_DEF_RATE_ADAPTIVE_MECHANISM,
+ HAL_DEF_ANTENNA_DIVERSITY_MECHANISM,
+ HAL_DEF_LED,
+ HAL_DEF_CW_MAX_MIN,
+
+ HAL_DEF_WOWLAN,
+ HAL_DEF_ENDPOINTS,
+ HAL_DEF_MIN_TX_POWER_DBM,
+ HAL_DEF_MAX_TX_POWER_DBM,
+ HW_DEF_EFUSE_REPG_SECTION1_FLAG,
+ HW_DEF_EFUSE_REPG_DATA,
+ HW_DEF_GPIO,
+ HAL_DEF_PCI_SUPPORT_ASPM,
+ HAL_DEF_THERMAL_VALUE,
+ HAL_DEF_USB_IN_TOKEN_REV,
+};
+
+enum hw_variables {
+ HW_VAR_ETHER_ADDR,
+ HW_VAR_MULTICAST_REG,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_BSSID,
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_SECURITY_CONF,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_ATIM_WINDOW,
+ HW_VAR_LISTEN_INTERVAL,
+ HW_VAR_CS_COUNTER,
+ HW_VAR_DEFAULTKEY0,
+ HW_VAR_DEFAULTKEY1,
+ HW_VAR_DEFAULTKEY2,
+ HW_VAR_DEFAULTKEY3,
+ HW_VAR_SIFS,
+ HW_VAR_DIFS,
+ HW_VAR_EIFS,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_CW_CONFIG,
+ HW_VAR_CW_VALUES,
+ HW_VAR_RATE_FALLBACK_CONTROL,
+ HW_VAR_CONTENTION_WINDOW,
+ HW_VAR_RETRY_COUNT,
+ HW_VAR_TR_SWITCH,
+ HW_VAR_COMMAND,
+ HW_VAR_WPA_CONFIG,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_SHORTGI_DENSITY,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_MCS_RATE_AVAILABLE,
+ HW_VAR_AC_PARAM,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_DIS_Req_Qsize,
+ HW_VAR_CCX_CHNL_LOAD,
+ HW_VAR_CCX_NOISE_HISTOGRAM,
+ HW_VAR_CCX_CLM_NHM,
+ HW_VAR_TxOPLimit,
+ HW_VAR_TURBO_MODE,
+ HW_VAR_RF_STATE,
+ HW_VAR_RF_OFF_BY_HW,
+ HW_VAR_BUS_SPEED,
+ HW_VAR_SET_DEV_POWER,
+
+ HW_VAR_RCR,
+ HW_VAR_RATR_0,
+ HW_VAR_RRSR,
+ HW_VAR_CPU_RST,
+ HW_VAR_CECHK_BSSID,
+ HW_VAR_LBK_MODE,
+ HW_VAR_AES_11N_FIX,
+ HW_VAR_USB_RX_AGGR,
+ HW_VAR_USER_CONTROL_TURBO_MODE,
+ HW_VAR_RETRY_LIMIT,
+ HW_VAR_INIT_TX_RATE,
+ HW_VAR_TX_RATE_REG,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_AUTOLOAD_STATUS,
+ HW_VAR_RF_2R_DISABLE,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_1X1_RECV_COMBINE,
+ HW_VAR_STOP_SEND_BEACON,
+ HW_VAR_TSF_TIMER,
+ HW_VAR_IO_CMD,
+
+ HW_VAR_RF_RECOVERY,
+ HW_VAR_H2C_FW_UPDATE_GTK,
+ HW_VAR_WF_MASK,
+ HW_VAR_WF_CRC,
+ HW_VAR_WF_IS_MAC_ADDR,
+ HW_VAR_H2C_FW_OFFLOAD,
+ HW_VAR_RESET_WFCRC,
+
+ HW_VAR_HANDLE_FW_C2H,
+ HW_VAR_DL_FW_RSVD_PAGE,
+ HW_VAR_AID,
+ HW_VAR_HW_SEQ_ENABLE,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_BCN_VALID,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_DUAL_TSF_RST,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_INT_MIGRATION,
+ HW_VAR_INT_AC,
+ HW_VAR_RF_TIMING,
+};
+
+enum rt_op_mode {
+ RT_OP_MODE_AP,
+ RT_OP_MODE_INFRASTRUCTURE,
+ RT_OP_MODE_IBSS,
+ RT_OP_MODE_NO_LINK,
+};
+
+
+#define aSifsTime \
+ (((priv->rtllib->current_network.mode == IEEE_A) \
+ || (priv->rtllib->current_network.mode == IEEE_N_24G) \
+ || (priv->rtllib->current_network.mode == IEEE_N_5G)) ? 16 : 10)
+
+#define MGMT_QUEUE_NUM 5
+
+#define IEEE_CMD_SET_WPA_PARAM 1
+#define IEEE_CMD_SET_WPA_IE 2
+#define IEEE_CMD_SET_ENCRYPTION 3
+#define IEEE_CMD_MLME 4
+
+#define IEEE_PARAM_WPA_ENABLED 1
+#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
+#define IEEE_PARAM_DROP_UNENCRYPTED 3
+#define IEEE_PARAM_PRIVACY_INVOKED 4
+#define IEEE_PARAM_AUTH_ALGS 5
+#define IEEE_PARAM_IEEE_802_1X 6
+#define IEEE_PARAM_WPAX_SELECT 7
+#define IEEE_PROTO_WPA 1
+#define IEEE_PROTO_RSN 2
+#define IEEE_WPAX_USEGROUP 0
+#define IEEE_WPAX_WEP40 1
+#define IEEE_WPAX_TKIP 2
+#define IEEE_WPAX_WRAP 3
+#define IEEE_WPAX_CCMP 4
+#define IEEE_WPAX_WEP104 5
+
+#define IEEE_KEY_MGMT_IEEE8021X 1
+#define IEEE_KEY_MGMT_PSK 2
+
+#define IEEE_MLME_STA_DEAUTH 1
+#define IEEE_MLME_STA_DISASSOC 2
+
+
+#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
+#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
+#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
+#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
+#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
+#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
+#define IEEE_CRYPT_ALG_NAME_LEN 16
+
+#define MAX_IE_LEN 0xff
+#define RT_ASSERT_RET(_Exp) do {} while (0)
+#define RT_ASSERT_RET_VALUE(_Exp, Ret) \
+ do {} while (0)
+
+struct ieee_param {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ union {
+ struct {
+ u8 name;
+ u32 value;
+ } wpa_param;
+ struct {
+ u32 len;
+ u8 reserved[32];
+ u8 data[0];
+ } wpa_ie;
+ struct {
+ int command;
+ int reason_code;
+ } mlme;
+ struct {
+ u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
+ u8 set_tx;
+ u32 err;
+ u8 idx;
+ u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+ u16 key_len;
+ u8 key[0];
+ } crypt;
+ } u;
+};
+
+
+#if WIRELESS_EXT < 17
+#define IW_QUAL_QUAL_INVALID 0x10
+#define IW_QUAL_LEVEL_INVALID 0x20
+#define IW_QUAL_NOISE_INVALID 0x40
+#define IW_QUAL_QUAL_UPDATED 0x1
+#define IW_QUAL_LEVEL_UPDATED 0x2
+#define IW_QUAL_NOISE_UPDATED 0x4
+#endif
+
+#define MSECS(t) msecs_to_jiffies(t)
+#define msleep_interruptible_rsl msleep_interruptible
+
+#define RTLLIB_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+#define RTLLIB_1ADDR_LEN 10
+#define RTLLIB_2ADDR_LEN 16
+#define RTLLIB_3ADDR_LEN 24
+#define RTLLIB_4ADDR_LEN 30
+#define RTLLIB_FCS_LEN 4
+#define RTLLIB_HLEN (RTLLIB_4ADDR_LEN)
+#define RTLLIB_FRAME_LEN (RTLLIB_DATA_LEN + RTLLIB_HLEN)
+#define RTLLIB_MGMT_HDR_LEN 24
+#define RTLLIB_DATA_HDR3_LEN 24
+#define RTLLIB_DATA_HDR4_LEN 30
+
+#define RTLLIB_SKBBUFFER_SIZE 2500
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+#define MAX_HT_DATA_FRAG_THRESHOLD 0x2000
+
+#define HT_AMSDU_SIZE_4K 3839
+#define HT_AMSDU_SIZE_8K 7935
+
+/* Frame control field constants */
+#define RTLLIB_FCTL_VERS 0x0003
+#define RTLLIB_FCTL_FTYPE 0x000c
+#define RTLLIB_FCTL_STYPE 0x00f0
+#define RTLLIB_FCTL_FRAMETYPE 0x00fc
+#define RTLLIB_FCTL_TODS 0x0100
+#define RTLLIB_FCTL_FROMDS 0x0200
+#define RTLLIB_FCTL_DSTODS 0x0300
+#define RTLLIB_FCTL_MOREFRAGS 0x0400
+#define RTLLIB_FCTL_RETRY 0x0800
+#define RTLLIB_FCTL_PM 0x1000
+#define RTLLIB_FCTL_MOREDATA 0x2000
+#define RTLLIB_FCTL_WEP 0x4000
+#define RTLLIB_FCTL_ORDER 0x8000
+
+#define RTLLIB_FTYPE_MGMT 0x0000
+#define RTLLIB_FTYPE_CTL 0x0004
+#define RTLLIB_FTYPE_DATA 0x0008
+
+/* management */
+#define RTLLIB_STYPE_ASSOC_REQ 0x0000
+#define RTLLIB_STYPE_ASSOC_RESP 0x0010
+#define RTLLIB_STYPE_REASSOC_REQ 0x0020
+#define RTLLIB_STYPE_REASSOC_RESP 0x0030
+#define RTLLIB_STYPE_PROBE_REQ 0x0040
+#define RTLLIB_STYPE_PROBE_RESP 0x0050
+#define RTLLIB_STYPE_BEACON 0x0080
+#define RTLLIB_STYPE_ATIM 0x0090
+#define RTLLIB_STYPE_DISASSOC 0x00A0
+#define RTLLIB_STYPE_AUTH 0x00B0
+#define RTLLIB_STYPE_DEAUTH 0x00C0
+#define RTLLIB_STYPE_MANAGE_ACT 0x00D0
+
+/* control */
+#define RTLLIB_STYPE_PSPOLL 0x00A0
+#define RTLLIB_STYPE_RTS 0x00B0
+#define RTLLIB_STYPE_CTS 0x00C0
+#define RTLLIB_STYPE_ACK 0x00D0
+#define RTLLIB_STYPE_CFEND 0x00E0
+#define RTLLIB_STYPE_CFENDACK 0x00F0
+#define RTLLIB_STYPE_BLOCKACK 0x0094
+
+/* data */
+#define RTLLIB_STYPE_DATA 0x0000
+#define RTLLIB_STYPE_DATA_CFACK 0x0010
+#define RTLLIB_STYPE_DATA_CFPOLL 0x0020
+#define RTLLIB_STYPE_DATA_CFACKPOLL 0x0030
+#define RTLLIB_STYPE_NULLFUNC 0x0040
+#define RTLLIB_STYPE_CFACK 0x0050
+#define RTLLIB_STYPE_CFPOLL 0x0060
+#define RTLLIB_STYPE_CFACKPOLL 0x0070
+#define RTLLIB_STYPE_QOS_DATA 0x0080
+#define RTLLIB_STYPE_QOS_NULL 0x00C0
+
+#define RTLLIB_SCTL_FRAG 0x000F
+#define RTLLIB_SCTL_SEQ 0xFFF0
+
+/* QOS control */
+#define RTLLIB_QCTL_TID 0x000F
+
+#define FC_QOS_BIT BIT7
+#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
+#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)))
+#define IsQoSDataFrame(pframe) \
+ ((*(u16 *)pframe&(RTLLIB_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) == \
+ (RTLLIB_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA))
+#define Frame_Order(pframe) (*(u16 *)pframe&RTLLIB_FCTL_ORDER)
+#define SN_LESS(a, b) (((a-b)&0x800) != 0)
+#define SN_EQUAL(a, b) (a == b)
+#define MAX_DEV_ADDR_SIZE 8
+
+enum act_category {
+ ACT_CAT_QOS = 1,
+ ACT_CAT_DLS = 2,
+ ACT_CAT_BA = 3,
+ ACT_CAT_HT = 7,
+ ACT_CAT_WMM = 17,
+};
+
+enum ts_action {
+ ACT_ADDTSREQ = 0,
+ ACT_ADDTSRSP = 1,
+ ACT_DELTS = 2,
+ ACT_SCHEDULE = 3,
+};
+
+enum ba_action {
+ ACT_ADDBAREQ = 0,
+ ACT_ADDBARSP = 1,
+ ACT_DELBA = 2,
+};
+
+enum init_gain_op_type {
+ IG_Backup = 0,
+ IG_Restore,
+ IG_Max
+};
+
+enum led_ctl_mode {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+ LED_CTL_START_WPS_BOTTON = 11,
+ LED_CTL_STOP_WPS_FAIL = 12,
+ LED_CTL_STOP_WPS_FAIL_OVERLAP = 13,
+};
+
+enum rt_rf_type_def {
+ RF_1T2R = 0,
+ RF_2T4R,
+ RF_2T2R,
+ RF_1T1R,
+ RF_2T2R_GREEN,
+ RF_819X_MAX_TYPE
+};
+
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = 0x01,
+ WIRELESS_MODE_B = 0x02,
+ WIRELESS_MODE_G = 0x04,
+ WIRELESS_MODE_AUTO = 0x08,
+ WIRELESS_MODE_N_24G = 0x10,
+ WIRELESS_MODE_N_5G = 0x20
+};
+
+enum wireless_network_type {
+ WIRELESS_11B = 1,
+ WIRELESS_11G = 2,
+ WIRELESS_11A = 4,
+ WIRELESS_11N = 8
+};
+
+#define OUI_SUBTYPE_WMM_INFO 0
+#define OUI_SUBTYPE_WMM_PARAM 1
+#define OUI_SUBTYPE_QOS_CAPABI 5
+
+/* debug macros */
+extern u32 rtllib_debug_level;
+#define RTLLIB_DEBUG(level, fmt, args...) \
+do { \
+ if (rtllib_debug_level & (level)) \
+ printk(KERN_DEBUG "rtllib: " fmt, ## args); \
+} while (0)
+
+#define RTLLIB_DEBUG_DATA(level, data, datalen) \
+ do { \
+ if ((rtllib_debug_level & (level)) == (level)) { \
+ int i; \
+ u8 *pdata = (u8 *)data; \
+ printk(KERN_DEBUG "rtllib: %s()\n", __func__); \
+ for (i = 0; i < (int)(datalen); i++) { \
+ printk("%2.2x ", pdata[i]); \
+ if ((i+1)%16 == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
+ } while (0)
+
+/*
+ * To use the debug system;
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of:
+ *
+ * #define RTLLIB_DL_xxxx VALUE
+ *
+ * shifting value to the left one bit from the previous entry. xxxx should be
+ * the name of the classification (for example, WEP)
+ *
+ * You then need to either add a RTLLIB_xxxx_DEBUG() macro definition for your
+ * classification, or use RTLLIB_DEBUG(RTLLIB_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * To add your debug level to the list of levels seen when you perform
+ *
+ * % cat /proc/net/ipw/debug_level
+ *
+ * you simply need to add your entry to the ipw_debug_levels array.
+ *
+ *
+ */
+
+#define RTLLIB_DL_INFO (1<<0)
+#define RTLLIB_DL_WX (1<<1)
+#define RTLLIB_DL_SCAN (1<<2)
+#define RTLLIB_DL_STATE (1<<3)
+#define RTLLIB_DL_MGMT (1<<4)
+#define RTLLIB_DL_FRAG (1<<5)
+#define RTLLIB_DL_EAP (1<<6)
+#define RTLLIB_DL_DROP (1<<7)
+
+#define RTLLIB_DL_TX (1<<8)
+#define RTLLIB_DL_RX (1<<9)
+
+#define RTLLIB_DL_HT (1<<10)
+#define RTLLIB_DL_BA (1<<11)
+#define RTLLIB_DL_TS (1<<12)
+#define RTLLIB_DL_QOS (1<<13)
+#define RTLLIB_DL_REORDER (1<<14)
+#define RTLLIB_DL_IOT (1<<15)
+#define RTLLIB_DL_IPS (1<<16)
+#define RTLLIB_DL_TRACE (1<<29)
+#define RTLLIB_DL_DATA (1<<30)
+#define RTLLIB_DL_ERR (1<<31)
+#define RTLLIB_ERROR(f, a...) printk(KERN_ERR "rtllib: " f, ## a)
+#define RTLLIB_WARNING(f, a...) printk(KERN_WARNING "rtllib: " f, ## a)
+#define RTLLIB_DEBUG_INFO(f, a...) RTLLIB_DEBUG(RTLLIB_DL_INFO, f, ## a)
+
+#define RTLLIB_DEBUG_WX(f, a...) RTLLIB_DEBUG(RTLLIB_DL_WX, f, ## a)
+#define RTLLIB_DEBUG_SCAN(f, a...) RTLLIB_DEBUG(RTLLIB_DL_SCAN, f, ## a)
+#define RTLLIB_DEBUG_STATE(f, a...) RTLLIB_DEBUG(RTLLIB_DL_STATE, f, ## a)
+#define RTLLIB_DEBUG_MGMT(f, a...) RTLLIB_DEBUG(RTLLIB_DL_MGMT, f, ## a)
+#define RTLLIB_DEBUG_FRAG(f, a...) RTLLIB_DEBUG(RTLLIB_DL_FRAG, f, ## a)
+#define RTLLIB_DEBUG_EAP(f, a...) RTLLIB_DEBUG(RTLLIB_DL_EAP, f, ## a)
+#define RTLLIB_DEBUG_DROP(f, a...) RTLLIB_DEBUG(RTLLIB_DL_DROP, f, ## a)
+#define RTLLIB_DEBUG_TX(f, a...) RTLLIB_DEBUG(RTLLIB_DL_TX, f, ## a)
+#define RTLLIB_DEBUG_RX(f, a...) RTLLIB_DEBUG(RTLLIB_DL_RX, f, ## a)
+#define RTLLIB_DEBUG_QOS(f, a...) RTLLIB_DEBUG(RTLLIB_DL_QOS, f, ## a)
+
+/* Added by Annie, 2005-11-22. */
+#define MAX_STR_LEN 64
+/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. */
+#define PRINTABLE(_ch) (_ch > '!' && _ch < '~')
+#define RTLLIB_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) \
+ if ((_Comp) & level) { \
+ int __i; \
+ u8 struct buffer[MAX_STR_LEN]; \
+ int length = (_Len < MAX_STR_LEN) ? _Len : (MAX_STR_LEN-1) ;\
+ memset(struct buffer, 0, MAX_STR_LEN); \
+ memcpy(struct buffer, (u8 *)_Ptr, length); \
+ for (__i = 0; __i < MAX_STR_LEN; __i++) { \
+ if (!PRINTABLE(struct buffer[__i])) \
+ struct buffer[__i] = '?'; \
+ } \
+ struct buffer[length] = '\0'; \
+ printk(KERN_INFO "Rtl819x: "); \
+ printk(_TitleString); \
+ printk(": %d, <%s>\n", _Len, struct buffer); \
+ }
+#ifndef ETH_P_PAE
+#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#endif /* ETH_P_PAE */
+
+#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
+
+#ifndef ETH_P_80211_RAW
+#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
+#endif
+
+/* IEEE 802.11 defines */
+
+#define P80211_OUI_LEN 3
+
+struct rtllib_snap_hdr {
+
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+
+} __packed;
+
+enum _REG_PREAMBLE_MODE {
+ PREAMBLE_LONG = 1,
+ PREAMBLE_AUTO = 2,
+ PREAMBLE_SHORT = 3,
+};
+
+#define SNAP_SIZE sizeof(struct rtllib_snap_hdr)
+
+#define WLAN_FC_GET_VERS(fc) ((fc) & RTLLIB_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc) & RTLLIB_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & RTLLIB_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) ((fc) & RTLLIB_FCTL_MOREDATA)
+
+#define WLAN_FC_GET_FRAMETYPE(fc) ((fc) & RTLLIB_FCTL_FRAMETYPE)
+#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTLLIB_SCTL_FRAG)
+#define WLAN_GET_SEQ_SEQ(seq) (((seq) & RTLLIB_SCTL_SEQ) >> 4)
+
+/* Authentication algorithms */
+#define WLAN_AUTH_OPEN 0
+#define WLAN_AUTH_SHARED_KEY 1
+#define WLAN_AUTH_LEAP 128
+
+#define WLAN_AUTH_CHALLENGE_LEN 128
+
+#define WLAN_CAPABILITY_ESS (1<<0)
+#define WLAN_CAPABILITY_IBSS (1<<1)
+#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
+#define WLAN_CAPABILITY_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
+#define WLAN_CAPABILITY_PBCC (1<<6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
+#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
+#define WLAN_CAPABILITY_QOS (1<<9)
+#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
+#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
+
+/* 802.11g ERP information element */
+#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
+#define WLAN_ERP_USE_PROTECTION (1<<1)
+#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
+
+/* Status codes */
+enum rtllib_statuscode {
+ WLAN_STATUS_SUCCESS = 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
+ WLAN_STATUS_CAPS_UNSUPPORTED = 10,
+ WLAN_STATUS_REASSOC_NO_ASSOC = 11,
+ WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
+ WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
+ WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
+ WLAN_STATUS_CHALLENGE_FAIL = 15,
+ WLAN_STATUS_AUTH_TIMEOUT = 16,
+ WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
+ WLAN_STATUS_ASSOC_DENIED_RATES = 18,
+ /* 802.11b */
+ WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
+ WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
+ WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
+ /* 802.11h */
+ WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
+ WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
+ WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
+ /* 802.11g */
+ WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
+ WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
+ /* 802.11i */
+ WLAN_STATUS_INVALID_IE = 40,
+ WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
+ WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
+ WLAN_STATUS_INVALID_AKMP = 43,
+ WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
+ WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
+ WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
+};
+
+/* Reason codes */
+enum rtllib_reasoncode {
+ WLAN_REASON_UNSPECIFIED = 1,
+ WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
+ WLAN_REASON_DEAUTH_LEAVING = 3,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
+ WLAN_REASON_DISASSOC_AP_BUSY = 5,
+ WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
+ WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
+ WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
+ WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
+ /* 802.11h */
+ WLAN_REASON_DISASSOC_BAD_POWER = 10,
+ WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
+ /* 802.11i */
+ WLAN_REASON_INVALID_IE = 13,
+ WLAN_REASON_MIC_FAILURE = 14,
+ WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
+ WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
+ WLAN_REASON_IE_DIFFERENT = 17,
+ WLAN_REASON_INVALID_GROUP_CIPHER = 18,
+ WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
+ WLAN_REASON_INVALID_AKMP = 20,
+ WLAN_REASON_UNSUPP_RSN_VERSION = 21,
+ WLAN_REASON_INVALID_RSN_IE_CAP = 22,
+ WLAN_REASON_IEEE8021X_FAILED = 23,
+ WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
+};
+
+#define RTLLIB_STATMASK_SIGNAL (1<<0)
+#define RTLLIB_STATMASK_RSSI (1<<1)
+#define RTLLIB_STATMASK_NOISE (1<<2)
+#define RTLLIB_STATMASK_RATE (1<<3)
+#define RTLLIB_STATMASK_WEMASK 0x7
+
+#define RTLLIB_CCK_MODULATION (1<<0)
+#define RTLLIB_OFDM_MODULATION (1<<1)
+
+#define RTLLIB_24GHZ_BAND (1<<0)
+#define RTLLIB_52GHZ_BAND (1<<1)
+
+#define RTLLIB_CCK_RATE_LEN 4
+#define RTLLIB_CCK_RATE_1MB 0x02
+#define RTLLIB_CCK_RATE_2MB 0x04
+#define RTLLIB_CCK_RATE_5MB 0x0B
+#define RTLLIB_CCK_RATE_11MB 0x16
+#define RTLLIB_OFDM_RATE_LEN 8
+#define RTLLIB_OFDM_RATE_6MB 0x0C
+#define RTLLIB_OFDM_RATE_9MB 0x12
+#define RTLLIB_OFDM_RATE_12MB 0x18
+#define RTLLIB_OFDM_RATE_18MB 0x24
+#define RTLLIB_OFDM_RATE_24MB 0x30
+#define RTLLIB_OFDM_RATE_36MB 0x48
+#define RTLLIB_OFDM_RATE_48MB 0x60
+#define RTLLIB_OFDM_RATE_54MB 0x6C
+#define RTLLIB_BASIC_RATE_MASK 0x80
+
+#define RTLLIB_CCK_RATE_1MB_MASK (1<<0)
+#define RTLLIB_CCK_RATE_2MB_MASK (1<<1)
+#define RTLLIB_CCK_RATE_5MB_MASK (1<<2)
+#define RTLLIB_CCK_RATE_11MB_MASK (1<<3)
+#define RTLLIB_OFDM_RATE_6MB_MASK (1<<4)
+#define RTLLIB_OFDM_RATE_9MB_MASK (1<<5)
+#define RTLLIB_OFDM_RATE_12MB_MASK (1<<6)
+#define RTLLIB_OFDM_RATE_18MB_MASK (1<<7)
+#define RTLLIB_OFDM_RATE_24MB_MASK (1<<8)
+#define RTLLIB_OFDM_RATE_36MB_MASK (1<<9)
+#define RTLLIB_OFDM_RATE_48MB_MASK (1<<10)
+#define RTLLIB_OFDM_RATE_54MB_MASK (1<<11)
+
+#define RTLLIB_CCK_RATES_MASK 0x0000000F
+#define RTLLIB_CCK_BASIC_RATES_MASK (RTLLIB_CCK_RATE_1MB_MASK | \
+ RTLLIB_CCK_RATE_2MB_MASK)
+#define RTLLIB_CCK_DEFAULT_RATES_MASK (RTLLIB_CCK_BASIC_RATES_MASK | \
+ RTLLIB_CCK_RATE_5MB_MASK | \
+ RTLLIB_CCK_RATE_11MB_MASK)
+
+#define RTLLIB_OFDM_RATES_MASK 0x00000FF0
+#define RTLLIB_OFDM_BASIC_RATES_MASK (RTLLIB_OFDM_RATE_6MB_MASK | \
+ RTLLIB_OFDM_RATE_12MB_MASK | \
+ RTLLIB_OFDM_RATE_24MB_MASK)
+#define RTLLIB_OFDM_DEFAULT_RATES_MASK (RTLLIB_OFDM_BASIC_RATES_MASK | \
+ RTLLIB_OFDM_RATE_9MB_MASK | \
+ RTLLIB_OFDM_RATE_18MB_MASK | \
+ RTLLIB_OFDM_RATE_36MB_MASK | \
+ RTLLIB_OFDM_RATE_48MB_MASK | \
+ RTLLIB_OFDM_RATE_54MB_MASK)
+#define RTLLIB_DEFAULT_RATES_MASK (RTLLIB_OFDM_DEFAULT_RATES_MASK | \
+ RTLLIB_CCK_DEFAULT_RATES_MASK)
+
+#define RTLLIB_NUM_OFDM_RATES 8
+#define RTLLIB_NUM_CCK_RATES 4
+#define RTLLIB_OFDM_SHIFT_MASK_A 4
+
+
+/* this is stolen and modified from the madwifi driver*/
+#define RTLLIB_FC0_TYPE_MASK 0x0c
+#define RTLLIB_FC0_TYPE_DATA 0x08
+#define RTLLIB_FC0_SUBTYPE_MASK 0xB0
+#define RTLLIB_FC0_SUBTYPE_QOS 0x80
+
+#define RTLLIB_QOS_HAS_SEQ(fc) \
+ (((fc) & (RTLLIB_FC0_TYPE_MASK | RTLLIB_FC0_SUBTYPE_MASK)) == \
+ (RTLLIB_FC0_TYPE_DATA | RTLLIB_FC0_SUBTYPE_QOS))
+
+/* this is stolen from ipw2200 driver */
+#define IEEE_IBSS_MAC_HASH_SIZE 31
+struct ieee_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num[17];
+ u16 frag_num[17];
+ unsigned long packet_time[17];
+ struct list_head list;
+};
+
+/* NOTE: This data is for statistical purposes; not all hardware provides this
+ * information for frames received. Not setting these will not cause
+ * any adverse affects. */
+struct rtllib_rx_stats {
+ u64 mac_time;
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u16 rate; /* in 100 kbps */
+ u8 received_channel;
+ u8 control;
+ u8 mask;
+ u8 freq;
+ u16 len;
+ u64 tsf;
+ u32 beacon_time;
+ u8 nic_type;
+ u16 Length;
+ u8 SignalQuality;
+ s32 RecvSignalPower;
+ s8 RxPower;
+ u8 SignalStrength;
+ u16 bHwError:1;
+ u16 bCRC:1;
+ u16 bICV:1;
+ u16 bShortPreamble:1;
+ u16 Antenna:1;
+ u16 Decrypted:1;
+ u16 Wakeup:1;
+ u16 Reserved0:1;
+ u8 AGC;
+ u32 TimeStampLow;
+ u32 TimeStampHigh;
+ bool bShift;
+ bool bIsQosData;
+ u8 UserPriority;
+
+ u8 RxDrvInfoSize;
+ u8 RxBufShift;
+ bool bIsAMPDU;
+ bool bFirstMPDU;
+ bool bContainHTC;
+ bool RxIs40MHzPacket;
+ u32 RxPWDBAll;
+ u8 RxMIMOSignalStrength[4];
+ s8 RxMIMOSignalQuality[2];
+ bool bPacketMatchBSSID;
+ bool bIsCCK;
+ bool bPacketToSelf;
+ u8 *virtual_address;
+ u16 packetlength;
+ u16 fraglength;
+ u16 fragoffset;
+ u16 ntotalfrag;
+ bool bisrxaggrsubframe;
+ bool bPacketBeacon;
+ bool bToSelfBA;
+ char cck_adc_pwdb[4];
+ u16 Seq_Num;
+ u8 nTotalAggPkt;
+};
+
+/* IEEE 802.11 requires that STA supports concurrent reception of at least
+ * three fragmented frames. This define can be increased to support more
+ * concurrent frames, but it should be noted that each entry can consume about
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+#define RTLLIB_FRAG_CACHE_LEN 4
+
+struct rtllib_frag_entry {
+ unsigned long first_frag_time;
+ unsigned int seq;
+ unsigned int last_frag;
+ struct sk_buff *skb;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct rtllib_stats {
+ unsigned int tx_unicast_frames;
+ unsigned int tx_multicast_frames;
+ unsigned int tx_fragments;
+ unsigned int tx_unicast_octets;
+ unsigned int tx_multicast_octets;
+ unsigned int tx_deferred_transmissions;
+ unsigned int tx_single_retry_frames;
+ unsigned int tx_multiple_retry_frames;
+ unsigned int tx_retry_limit_exceeded;
+ unsigned int tx_discards;
+ unsigned int rx_unicast_frames;
+ unsigned int rx_multicast_frames;
+ unsigned int rx_fragments;
+ unsigned int rx_unicast_octets;
+ unsigned int rx_multicast_octets;
+ unsigned int rx_fcs_errors;
+ unsigned int rx_discards_no_buffer;
+ unsigned int tx_discards_wrong_sa;
+ unsigned int rx_discards_undecryptable;
+ unsigned int rx_message_in_msg_fragments;
+ unsigned int rx_message_in_bad_msg_fragments;
+};
+
+struct rtllib_device;
+
+#include "rtllib_crypt.h"
+
+#define SEC_KEY_1 (1<<0)
+#define SEC_KEY_2 (1<<1)
+#define SEC_KEY_3 (1<<2)
+#define SEC_KEY_4 (1<<3)
+#define SEC_ACTIVE_KEY (1<<4)
+#define SEC_AUTH_MODE (1<<5)
+#define SEC_UNICAST_GROUP (1<<6)
+#define SEC_LEVEL (1<<7)
+#define SEC_ENABLED (1<<8)
+#define SEC_ENCRYPT (1<<9)
+
+#define SEC_LEVEL_0 0 /* None */
+#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
+#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
+#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
+
+#define SEC_ALG_NONE 0
+#define SEC_ALG_WEP 1
+#define SEC_ALG_TKIP 2
+#define SEC_ALG_CCMP 4
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+#define SCM_KEY_LEN 32
+#define SCM_TEMPORAL_KEY_LENGTH 16
+
+struct rtllib_security {
+ u16 active_key:2,
+ enabled:1,
+ auth_mode:2,
+ auth_algo:4,
+ unicast_uses_group:1,
+ encrypt:1;
+ u8 key_sizes[WEP_KEYS];
+ u8 keys[WEP_KEYS][SCM_KEY_LEN];
+ u8 level;
+ u16 flags;
+} __packed;
+
+
+/*
+ 802.11 data frame from AP
+ ,-------------------------------------------------------------------.
+Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
+ |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
+ | | tion | (BSSID) | | | ence | data | |
+ `-------------------------------------------------------------------'
+Total: 28-2340 bytes
+*/
+
+/* Management Frame Information Element Types */
+enum rtllib_mfie {
+ MFIE_TYPE_SSID = 0,
+ MFIE_TYPE_RATES = 1,
+ MFIE_TYPE_FH_SET = 2,
+ MFIE_TYPE_DS_SET = 3,
+ MFIE_TYPE_CF_SET = 4,
+ MFIE_TYPE_TIM = 5,
+ MFIE_TYPE_IBSS_SET = 6,
+ MFIE_TYPE_COUNTRY = 7,
+ MFIE_TYPE_HOP_PARAMS = 8,
+ MFIE_TYPE_HOP_TABLE = 9,
+ MFIE_TYPE_REQUEST = 10,
+ MFIE_TYPE_CHALLENGE = 16,
+ MFIE_TYPE_POWER_CONSTRAINT = 32,
+ MFIE_TYPE_POWER_CAPABILITY = 33,
+ MFIE_TYPE_TPC_REQUEST = 34,
+ MFIE_TYPE_TPC_REPORT = 35,
+ MFIE_TYPE_SUPP_CHANNELS = 36,
+ MFIE_TYPE_CSA = 37,
+ MFIE_TYPE_MEASURE_REQUEST = 38,
+ MFIE_TYPE_MEASURE_REPORT = 39,
+ MFIE_TYPE_QUIET = 40,
+ MFIE_TYPE_IBSS_DFS = 41,
+ MFIE_TYPE_ERP = 42,
+ MFIE_TYPE_HT_CAP = 45,
+ MFIE_TYPE_RSN = 48,
+ MFIE_TYPE_RATES_EX = 50,
+ MFIE_TYPE_HT_INFO = 61,
+ MFIE_TYPE_AIRONET = 133,
+ MFIE_TYPE_GENERIC = 221,
+ MFIE_TYPE_QOS_PARAMETER = 222,
+};
+
+/* Minimal header; can be used for passing 802.11 frames with sufficient
+ * information to determine what type of underlying data type is actually
+ * stored in the data. */
+struct rtllib_pspoll_hdr {
+ __le16 frame_ctl;
+ __le16 aid;
+ u8 bssid[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+} __packed;
+
+struct rtllib_hdr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 payload[0];
+} __packed;
+
+struct rtllib_hdr_1addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 payload[0];
+} __packed;
+
+struct rtllib_hdr_2addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 payload[0];
+} __packed;
+
+struct rtllib_hdr_3addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+} __packed;
+
+struct rtllib_hdr_4addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+ u8 payload[0];
+} __packed;
+
+struct rtllib_hdr_3addrqos {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ __le16 qos_ctl;
+ u8 payload[0];
+} __packed;
+
+struct rtllib_hdr_4addrqos {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctl;
+ u8 payload[0];
+} __packed;
+
+struct rtllib_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+struct rtllib_authentication {
+ struct rtllib_hdr_3addr header;
+ __le16 algorithm;
+ __le16 transaction;
+ __le16 status;
+ /*challenge*/
+ struct rtllib_info_element info_element[0];
+} __packed;
+
+struct rtllib_disauth {
+ struct rtllib_hdr_3addr header;
+ __le16 reason;
+} __packed;
+
+struct rtllib_disassoc {
+ struct rtllib_hdr_3addr header;
+ __le16 reason;
+} __packed;
+
+struct rtllib_probe_request {
+ struct rtllib_hdr_3addr header;
+ /* SSID, supported rates */
+ struct rtllib_info_element info_element[0];
+} __packed;
+
+struct rtllib_probe_response {
+ struct rtllib_hdr_3addr header;
+ u32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
+ /* SSID, supported rates, FH params, DS params,
+ * CF params, IBSS params, TIM (if beacon), RSN */
+ struct rtllib_info_element info_element[0];
+} __packed;
+
+/* Alias beacon for probe_response */
+#define rtllib_beacon rtllib_probe_response
+
+struct rtllib_assoc_request_frame {
+ struct rtllib_hdr_3addr header;
+ __le16 capability;
+ __le16 listen_interval;
+ /* SSID, supported rates, RSN */
+ struct rtllib_info_element info_element[0];
+} __packed;
+
+struct rtllib_reassoc_request_frame {
+ struct rtllib_hdr_3addr header;
+ __le16 capability;
+ __le16 listen_interval;
+ u8 current_ap[ETH_ALEN];
+ /* SSID, supported rates, RSN */
+ struct rtllib_info_element info_element[0];
+} __packed;
+
+struct rtllib_assoc_response_frame {
+ struct rtllib_hdr_3addr header;
+ __le16 capability;
+ __le16 status;
+ __le16 aid;
+ struct rtllib_info_element info_element[0]; /* supported rates */
+} __packed;
+
+struct rtllib_txb {
+ u8 nr_frags;
+ u8 encrypted;
+ u8 queue_index;
+ u8 rts_included;
+ u16 reserved;
+ __le16 frag_size;
+ __le16 payload_size;
+ struct sk_buff *fragments[0];
+};
+
+#define MAX_TX_AGG_COUNT 16
+struct rtllib_drv_agg_txb {
+ u8 nr_drv_agg_frames;
+ struct sk_buff *tx_agg_frames[MAX_TX_AGG_COUNT];
+} __packed;
+
+#define MAX_SUBFRAME_COUNT 64
+struct rtllib_rxb {
+ u8 nr_subframes;
+ struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+} __packed;
+
+union frameqos {
+ u16 shortdata;
+ u8 chardata[2];
+ struct {
+ u16 tid:4;
+ u16 eosp:1;
+ u16 ack_policy:2;
+ u16 reserved:1;
+ u16 txop:8;
+ } field;
+};
+
+/* SWEEP TABLE ENTRIES NUMBER*/
+#define MAX_SWEEP_TAB_ENTRIES 42
+#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
+/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
+ * only use 8, and then use extended rates for the remaining supported
+ * rates. Other APs, however, stick all of their supported rates on the
+ * main rates information element... */
+#define MAX_RATES_LENGTH ((u8)12)
+#define MAX_RATES_EX_LENGTH ((u8)16)
+#define MAX_NETWORK_COUNT 96
+
+#define MAX_CHANNEL_NUMBER 161
+#define RTLLIB_SOFTMAC_SCAN_TIME 100
+#define RTLLIB_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
+
+#define CRC_LENGTH 4U
+
+#define MAX_WPA_IE_LEN 64
+#define MAX_WZC_IE_LEN 256
+
+#define NETWORK_EMPTY_ESSID (1<<0)
+#define NETWORK_HAS_OFDM (1<<1)
+#define NETWORK_HAS_CCK (1<<2)
+
+/* QoS structure */
+#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
+#define NETWORK_HAS_QOS_INFORMATION (1<<4)
+#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
+ NETWORK_HAS_QOS_INFORMATION)
+/* 802.11h */
+#define NETWORK_HAS_POWER_CONSTRAINT (1<<5)
+#define NETWORK_HAS_CSA (1<<6)
+#define NETWORK_HAS_QUIET (1<<7)
+#define NETWORK_HAS_IBSS_DFS (1<<8)
+#define NETWORK_HAS_TPC_REPORT (1<<9)
+
+#define NETWORK_HAS_ERP_VALUE (1<<10)
+
+#define QOS_QUEUE_NUM 4
+#define QOS_OUI_LEN 3
+#define QOS_OUI_TYPE 2
+#define QOS_ELEMENT_ID 221
+#define QOS_OUI_INFO_SUB_TYPE 0
+#define QOS_OUI_PARAM_SUB_TYPE 1
+#define QOS_VERSION_1 1
+#define QOS_AIFSN_MIN_VALUE 2
+
+struct rtllib_qos_information_element {
+ u8 elementID;
+ u8 length;
+ u8 qui[QOS_OUI_LEN];
+ u8 qui_type;
+ u8 qui_subtype;
+ u8 version;
+ u8 ac_info;
+} __packed;
+
+struct rtllib_qos_ac_parameter {
+ u8 aci_aifsn;
+ u8 ecw_min_max;
+ __le16 tx_op_limit;
+} __packed;
+
+struct rtllib_qos_parameter_info {
+ struct rtllib_qos_information_element info_element;
+ u8 reserved;
+ struct rtllib_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
+} __packed;
+
+struct rtllib_qos_parameters {
+ __le16 cw_min[QOS_QUEUE_NUM];
+ __le16 cw_max[QOS_QUEUE_NUM];
+ u8 aifs[QOS_QUEUE_NUM];
+ u8 flag[QOS_QUEUE_NUM];
+ __le16 tx_op_limit[QOS_QUEUE_NUM];
+} __packed;
+
+struct rtllib_qos_data {
+ struct rtllib_qos_parameters parameters;
+ unsigned int wmm_acm;
+ int active;
+ int supported;
+ u8 param_count;
+ u8 old_param_count;
+};
+
+struct rtllib_tim_parameters {
+ u8 tim_count;
+ u8 tim_period;
+} __packed;
+
+struct rtllib_wmm_ac_param {
+ u8 ac_aci_acm_aifsn;
+ u8 ac_ecwmin_ecwmax;
+ u16 ac_txop_limit;
+};
+
+struct rtllib_wmm_ts_info {
+ u8 ac_dir_tid;
+ u8 ac_up_psb;
+ u8 reserved;
+} __packed;
+
+struct rtllib_wmm_tspec_elem {
+ struct rtllib_wmm_ts_info ts_info;
+ u16 norm_msdu_size;
+ u16 max_msdu_size;
+ u32 min_serv_inter;
+ u32 max_serv_inter;
+ u32 inact_inter;
+ u32 suspen_inter;
+ u32 serv_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_size;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surp_band_allow;
+ u16 medium_time;
+} __packed;
+
+enum eap_type {
+ EAP_PACKET = 0,
+ EAPOL_START,
+ EAPOL_LOGOFF,
+ EAPOL_KEY,
+ EAPOL_ENCAP_ASF_ALERT
+};
+
+static const char *eap_types[] = {
+ [EAP_PACKET] = "EAP-Packet",
+ [EAPOL_START] = "EAPOL-Start",
+ [EAPOL_LOGOFF] = "EAPOL-Logoff",
+ [EAPOL_KEY] = "EAPOL-Key",
+ [EAPOL_ENCAP_ASF_ALERT] = "EAPOL-Encap-ASF-Alert"
+};
+
+static inline const char *eap_get_type(int type)
+{
+ return ((u32)type >= ARRAY_SIZE(eap_types)) ? "Unknown" :
+ eap_types[type];
+}
+static inline u8 Frame_QoSTID(u8 *buf)
+{
+ struct rtllib_hdr_3addr *hdr;
+ u16 fc;
+ hdr = (struct rtllib_hdr_3addr *)buf;
+ fc = le16_to_cpu(hdr->frame_ctl);
+ return (u8)((union frameqos *)(buf + (((fc & RTLLIB_FCTL_TODS) &&
+ (fc & RTLLIB_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
+}
+
+
+struct eapol {
+ u8 snap[6];
+ u16 ethertype;
+ u8 version;
+ u8 type;
+ u16 length;
+} __packed;
+
+struct rtllib_softmac_stats {
+ unsigned int rx_ass_ok;
+ unsigned int rx_ass_err;
+ unsigned int rx_probe_rq;
+ unsigned int tx_probe_rs;
+ unsigned int tx_beacons;
+ unsigned int rx_auth_rq;
+ unsigned int rx_auth_rs_ok;
+ unsigned int rx_auth_rs_err;
+ unsigned int tx_auth_rq;
+ unsigned int no_auth_rs;
+ unsigned int no_ass_rs;
+ unsigned int tx_ass_rq;
+ unsigned int rx_ass_rq;
+ unsigned int tx_probe_rq;
+ unsigned int reassoc;
+ unsigned int swtxstop;
+ unsigned int swtxawake;
+ unsigned char CurrentShowTxate;
+ unsigned char last_packet_rate;
+ unsigned int txretrycount;
+};
+
+#define BEACON_PROBE_SSID_ID_POSITION 12
+
+struct rtllib_info_element_hdr {
+ u8 id;
+ u8 len;
+} __packed;
+
+/*
+ * These are the data types that can make up management packets
+ *
+ u16 auth_algorithm;
+ u16 auth_sequence;
+ u16 beacon_interval;
+ u16 capability;
+ u8 current_ap[ETH_ALEN];
+ u16 listen_interval;
+ struct {
+ u16 association_id:14, reserved:2;
+ } __packed;
+ u32 time_stamp[2];
+ u16 reason;
+ u16 status;
+*/
+
+#define RTLLIB_DEFAULT_TX_ESSID "Penguin"
+#define RTLLIB_DEFAULT_BASIC_RATE 2
+
+enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
+#define MAX_SP_Len (WMM_all_frame << 4)
+#define RTLLIB_QOS_TID 0x0f
+#define QOS_CTL_NOTCONTAIN_ACK (0x01 << 5)
+
+#define RTLLIB_DTIM_MBCAST 4
+#define RTLLIB_DTIM_UCAST 2
+#define RTLLIB_DTIM_VALID 1
+#define RTLLIB_DTIM_INVALID 0
+
+#define RTLLIB_PS_DISABLED 0
+#define RTLLIB_PS_UNICAST RTLLIB_DTIM_UCAST
+#define RTLLIB_PS_MBCAST RTLLIB_DTIM_MBCAST
+
+#define WME_AC_BK 0x00
+#define WME_AC_BE 0x01
+#define WME_AC_VI 0x02
+#define WME_AC_VO 0x03
+#define WME_ACI_MASK 0x03
+#define WME_AIFSN_MASK 0x03
+#define WME_AC_PRAM_LEN 16
+
+#define MAX_RECEIVE_BUFFER_SIZE 9100
+
+#define UP2AC(up) ( \
+ ((up) < 1) ? WME_AC_BE : \
+ ((up) < 3) ? WME_AC_BK : \
+ ((up) < 4) ? WME_AC_BE : \
+ ((up) < 6) ? WME_AC_VI : \
+ WME_AC_VO)
+
+#define AC2UP(_ac) ( \
+ ((_ac) == WME_AC_VO) ? 6 : \
+ ((_ac) == WME_AC_VI) ? 5 : \
+ ((_ac) == WME_AC_BK) ? 1 : \
+ 0)
+
+#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
+#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address
+ * plus ether type*/
+
+struct ether_header {
+ u8 ether_dhost[ETHER_ADDR_LEN];
+ u8 ether_shost[ETHER_ADDR_LEN];
+ u16 ether_type;
+} __packed;
+
+#ifndef ETHERTYPE_PAE
+#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
+#endif
+#ifndef ETHERTYPE_IP
+#define ETHERTYPE_IP 0x0800 /* IP protocol */
+#endif
+
+
+enum erp_t {
+ ERP_NonERPpresent = 0x01,
+ ERP_UseProtection = 0x02,
+ ERP_BarkerPreambleMode = 0x04,
+};
+
+struct rtllib_network {
+ /* These entries are used to identify a unique network */
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+ /* Ensure null-terminated for any debug msgs */
+ u8 ssid[IW_ESSID_MAX_SIZE + 1];
+ u8 ssid_len;
+ u8 hidden_ssid[IW_ESSID_MAX_SIZE + 1];
+ u8 hidden_ssid_len;
+ struct rtllib_qos_data qos_data;
+
+ bool bWithAironetIE;
+ bool bCkipSupported;
+ bool bCcxRmEnable;
+ u16 CcxRmState[2];
+ bool bMBssidValid;
+ u8 MBssidMask;
+ u8 MBssid[6];
+ bool bWithCcxVerNum;
+ u8 BssCcxVerNumber;
+ /* These are network statistics */
+ struct rtllib_rx_stats stats;
+ u16 capability;
+ u8 rates[MAX_RATES_LENGTH];
+ u8 rates_len;
+ u8 rates_ex[MAX_RATES_EX_LENGTH];
+ u8 rates_ex_len;
+ unsigned long last_scanned;
+ u8 mode;
+ u32 flags;
+ u32 last_associate;
+ u32 time_stamp[2];
+ u16 beacon_interval;
+ u16 listen_interval;
+ u16 atim_window;
+ u8 erp_value;
+ u8 wpa_ie[MAX_WPA_IE_LEN];
+ size_t wpa_ie_len;
+ u8 rsn_ie[MAX_WPA_IE_LEN];
+ size_t rsn_ie_len;
+ u8 wzc_ie[MAX_WZC_IE_LEN];
+ size_t wzc_ie_len;
+
+ struct rtllib_tim_parameters tim;
+ u8 dtim_period;
+ u8 dtim_data;
+ u64 last_dtim_sta_time;
+
+ u8 wmm_info;
+ struct rtllib_wmm_ac_param wmm_param[4];
+ u8 Turbo_Enable;
+ u16 CountryIeLen;
+ u8 CountryIeBuf[MAX_IE_LEN];
+ struct bss_ht bssht;
+ bool broadcom_cap_exist;
+ bool realtek_cap_exit;
+ bool marvell_cap_exist;
+ bool ralink_cap_exist;
+ bool atheros_cap_exist;
+ bool cisco_cap_exist;
+ bool airgo_cap_exist;
+ bool unknown_cap_exist;
+ bool berp_info_valid;
+ bool buseprotection;
+ bool bIsNetgear854T;
+ u8 SignalStrength;
+ u8 RSSI;
+ struct list_head list;
+};
+
+#if 1
+enum rtllib_state {
+
+ /* the card is not linked at all */
+ RTLLIB_NOLINK = 0,
+
+ /* RTLLIB_ASSOCIATING* are for BSS client mode
+ * the driver shall not perform RX filtering unless
+ * the state is LINKED.
+ * The driver shall just check for the state LINKED and
+ * defaults to NOLINK for ALL the other states (including
+ * LINKED_SCANNING)
+ */
+
+ /* the association procedure will start (wq scheduling)*/
+ RTLLIB_ASSOCIATING,
+ RTLLIB_ASSOCIATING_RETRY,
+
+ /* the association procedure is sending AUTH request*/
+ RTLLIB_ASSOCIATING_AUTHENTICATING,
+
+ /* the association procedure has successfully authentcated
+ * and is sending association request
+ */
+ RTLLIB_ASSOCIATING_AUTHENTICATED,
+
+ /* the link is ok. the card associated to a BSS or linked
+ * to a ibss cell or acting as an AP and creating the bss
+ */
+ RTLLIB_LINKED,
+
+ /* same as LINKED, but the driver shall apply RX filter
+ * rules as we are in NO_LINK mode. As the card is still
+ * logically linked, but it is doing a syncro site survey
+ * then it will be back to LINKED state.
+ */
+ RTLLIB_LINKED_SCANNING,
+};
+#else
+enum rtllib_state {
+ RTLLIB_UNINITIALIZED = 0,
+ RTLLIB_INITIALIZED,
+ RTLLIB_ASSOCIATING,
+ RTLLIB_ASSOCIATED,
+ RTLLIB_AUTHENTICATING,
+ RTLLIB_AUTHENTICATED,
+ RTLLIB_SHUTDOWN
+};
+#endif
+
+#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
+#define DEFAULT_FTS 2346
+
+#define CFG_RTLLIB_RESERVE_FCS (1<<0)
+#define CFG_RTLLIB_COMPUTE_FCS (1<<1)
+#define CFG_RTLLIB_RTS (1<<2)
+
+#define RTLLIB_24GHZ_MIN_CHANNEL 1
+#define RTLLIB_24GHZ_MAX_CHANNEL 14
+#define RTLLIB_24GHZ_CHANNELS (RTLLIB_24GHZ_MAX_CHANNEL - \
+ RTLLIB_24GHZ_MIN_CHANNEL + 1)
+
+#define RTLLIB_52GHZ_MIN_CHANNEL 34
+#define RTLLIB_52GHZ_MAX_CHANNEL 165
+#define RTLLIB_52GHZ_CHANNELS (RTLLIB_52GHZ_MAX_CHANNEL - \
+ RTLLIB_52GHZ_MIN_CHANNEL + 1)
+#ifndef eqMacAddr
+#define eqMacAddr(a, b) \
+ (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && \
+ (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
+#endif
+struct tx_pending {
+ int frag;
+ struct rtllib_txb *txb;
+};
+
+struct bandwidth_autoswitch {
+ long threshold_20Mhzto40Mhz;
+ long threshold_40Mhzto20Mhz;
+ bool bforced_tx20Mhz;
+ bool bautoswitch_enable;
+};
+
+
+
+#define REORDER_WIN_SIZE 128
+#define REORDER_ENTRY_NUM 128
+struct rx_reorder_entry {
+ struct list_head List;
+ u16 SeqNum;
+ struct rtllib_rxb *prxb;
+};
+enum fsync_state {
+ Default_Fsync,
+ HW_Fsync,
+ SW_Fsync
+};
+
+enum rt_ps_mode {
+ eActive,
+ eMaxPs,
+ eFastPs,
+ eAutoPs,
+};
+
+enum ips_callback_function {
+ IPS_CALLBACK_NONE = 0,
+ IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
+ IPS_CALLBACK_JOIN_REQUEST = 2,
+};
+
+enum rt_join_action {
+ RT_JOIN_INFRA = 1,
+ RT_JOIN_IBSS = 2,
+ RT_START_IBSS = 3,
+ RT_NO_ACTION = 4,
+};
+
+struct ibss_parms {
+ u16 atimWin;
+};
+#define MAX_NUM_RATES 264
+
+enum rt_rf_power_state {
+ eRfOn,
+ eRfSleep,
+ eRfOff
+};
+
+#define MAX_SUPPORT_WOL_PATTERN_NUM 8
+
+#define MAX_WOL_BIT_MASK_SIZE 16
+#define MAX_WOL_PATTERN_SIZE 128
+
+enum wol_pattern_type {
+ eNetBIOS = 0,
+ eIPv4IPv6ARP,
+ eIPv4IPv6TCPSYN,
+ eMACIDOnly,
+ eNoDefined,
+};
+
+struct rt_pm_wol_info {
+ u32 PatternId;
+ u32 Mask[4];
+ u16 CrcRemainder;
+ u8 WFMIndex;
+ enum wol_pattern_type PatternType;
+};
+
+struct rt_pwr_save_ctrl {
+
+ bool bInactivePs;
+ bool bIPSModeBackup;
+ bool bHaltAdapterClkRQ;
+ bool bSwRfProcessing;
+ enum rt_rf_power_state eInactivePowerState;
+ struct work_struct InactivePsWorkItem;
+ struct timer_list InactivePsTimer;
+
+ enum ips_callback_function ReturnPoint;
+
+ bool bTmpBssDesc;
+ enum rt_join_action tmpJoinAction;
+ struct rtllib_network tmpBssDesc;
+
+ bool bTmpScanOnly;
+ bool bTmpActiveScan;
+ bool bTmpFilterHiddenAP;
+ bool bTmpUpdateParms;
+ u8 tmpSsidBuf[33];
+ struct octet_string tmpSsid2Scan;
+ bool bTmpSsid2Scan;
+ u8 tmpNetworkType;
+ u8 tmpChannelNumber;
+ u16 tmpBcnPeriod;
+ u8 tmpDtimPeriod;
+ u16 tmpmCap;
+ struct octet_string tmpSuppRateSet;
+ u8 tmpSuppRateBuf[MAX_NUM_RATES];
+ bool bTmpSuppRate;
+ struct ibss_parms tmpIbpm;
+ bool bTmpIbpm;
+
+ bool bLeisurePs;
+ u32 PowerProfile;
+ u8 LpsIdleCount;
+ u8 RegMaxLPSAwakeIntvl;
+ u8 LPSAwakeIntvl;
+
+ u32 CurPsLevel;
+ u32 RegRfPsLevel;
+
+ bool bFwCtrlLPS;
+ u8 FWCtrlPSMode;
+
+ bool LinkReqInIPSRFOffPgs;
+ bool BufConnectinfoBefore;
+
+
+ bool bGpioRfSw;
+
+ u8 RegAMDPciASPM;
+
+ u8 oWLANMode;
+ struct rt_pm_wol_info PmWoLPatternInfo[MAX_SUPPORT_WOL_PATTERN_NUM];
+
+};
+
+#define RT_RF_CHANGE_SOURCE u32
+
+#define RF_CHANGE_BY_SW BIT31
+#define RF_CHANGE_BY_HW BIT30
+#define RF_CHANGE_BY_PS BIT29
+#define RF_CHANGE_BY_IPS BIT28
+#define RF_CHANGE_BY_INIT 0
+
+enum country_code_type {
+ COUNTRY_CODE_FCC = 0,
+ COUNTRY_CODE_IC = 1,
+ COUNTRY_CODE_ETSI = 2,
+ COUNTRY_CODE_SPAIN = 3,
+ COUNTRY_CODE_FRANCE = 4,
+ COUNTRY_CODE_MKK = 5,
+ COUNTRY_CODE_MKK1 = 6,
+ COUNTRY_CODE_ISRAEL = 7,
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_MAX
+};
+
+enum scan_op_backup_opt {
+ SCAN_OPT_BACKUP = 0,
+ SCAN_OPT_RESTORE,
+ SCAN_OPT_MAX
+};
+
+enum fw_cmd_io_type {
+ FW_CMD_DIG_ENABLE = 0,
+ FW_CMD_DIG_DISABLE = 1,
+ FW_CMD_DIG_HALT = 2,
+ FW_CMD_DIG_RESUME = 3,
+ FW_CMD_HIGH_PWR_ENABLE = 4,
+ FW_CMD_HIGH_PWR_DISABLE = 5,
+ FW_CMD_RA_RESET = 6,
+ FW_CMD_RA_ACTIVE = 7,
+ FW_CMD_RA_REFRESH_N = 8,
+ FW_CMD_RA_REFRESH_BG = 9,
+ FW_CMD_RA_INIT = 10,
+ FW_CMD_IQK_ENABLE = 11,
+ FW_CMD_TXPWR_TRACK_ENABLE = 12,
+ FW_CMD_TXPWR_TRACK_DISABLE = 13,
+ FW_CMD_TXPWR_TRACK_THERMAL = 14,
+ FW_CMD_PAUSE_DM_BY_SCAN = 15,
+ FW_CMD_RESUME_DM_BY_SCAN = 16,
+ FW_CMD_RA_REFRESH_N_COMB = 17,
+ FW_CMD_RA_REFRESH_BG_COMB = 18,
+ FW_CMD_ANTENNA_SW_ENABLE = 19,
+ FW_CMD_ANTENNA_SW_DISABLE = 20,
+ FW_CMD_TX_FEEDBACK_CCX_ENABLE = 21,
+ FW_CMD_LPS_ENTER = 22,
+ FW_CMD_LPS_LEAVE = 23,
+ FW_CMD_DIG_MODE_SS = 24,
+ FW_CMD_DIG_MODE_FA = 25,
+ FW_CMD_ADD_A2_ENTRY = 26,
+ FW_CMD_CTRL_DM_BY_DRIVER = 27,
+ FW_CMD_CTRL_DM_BY_DRIVER_NEW = 28,
+ FW_CMD_PAPE_CONTROL = 29,
+ FW_CMD_CHAN_SET = 30,
+};
+
+#define RT_MAX_LD_SLOT_NUM 10
+struct rt_link_detect {
+
+ u32 NumRecvBcnInPeriod;
+ u32 NumRecvDataInPeriod;
+
+ u32 RxBcnNum[RT_MAX_LD_SLOT_NUM];
+ u32 RxDataNum[RT_MAX_LD_SLOT_NUM];
+ u16 SlotNum;
+ u16 SlotIndex;
+
+ u32 NumTxOkInPeriod;
+ u32 NumRxOkInPeriod;
+ u32 NumRxUnicastOkInPeriod;
+ bool bBusyTraffic;
+ bool bHigherBusyTraffic;
+ bool bHigherBusyRxTraffic;
+ u8 IdleCount;
+ u32 NumTxUnicastOkInPeriod;
+ u32 LastNumTxUnicast;
+ u32 LastNumRxUnicast;
+};
+
+struct sw_cam_table {
+
+ u8 macaddr[6];
+ bool bused;
+ u8 key_buf[16];
+ u16 key_type;
+ u8 useDK;
+ u8 key_index;
+
+};
+#define TOTAL_CAM_ENTRY 32
+struct rate_adaptive {
+ u8 rate_adaptive_disabled;
+ u8 ratr_state;
+ u16 reserve;
+
+ u32 high_rssi_thresh_for_ra;
+ u32 high2low_rssi_thresh_for_ra;
+ u8 low2high_rssi_thresh_for_ra40M;
+ u32 low_rssi_thresh_for_ra40M;
+ u8 low2high_rssi_thresh_for_ra20M;
+ u32 low_rssi_thresh_for_ra20M;
+ u32 upper_rssi_threshold_ratr;
+ u32 middle_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr_40M;
+ u32 low_rssi_threshold_ratr_20M;
+ u8 ping_rssi_enable;
+ u32 ping_rssi_ratr;
+ u32 ping_rssi_thresh_for_ra;
+ u32 last_ratr;
+ u8 PreRATRState;
+
+};
+enum ratr_table_mode_8192s {
+ RATR_INX_WIRELESS_NGB = 0,
+ RATR_INX_WIRELESS_NG = 1,
+ RATR_INX_WIRELESS_NB = 2,
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_A = 8,
+};
+
+#define NUM_PMKID_CACHE 16
+struct rt_pmkid_list {
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
+ u8 *ssid_octet;
+ u16 ssid_length;
+};
+
+struct rt_intel_promisc_mode {
+ bool bPromiscuousOn;
+ bool bFilterSourceStationFrame;
+};
+
+
+/*************** DRIVER STATUS *****/
+#define STATUS_SCANNING 0
+#define STATUS_SCAN_HW 1
+#define STATUS_SCAN_ABORTING 2
+#define STATUS_SETTING_CHAN 3
+/*************** DRIVER STATUS *****/
+
+enum {
+ NO_USE = 0,
+ USED = 1,
+ HW_SEC = 2,
+ SW_SEC = 3,
+};
+
+enum {
+ LPS_IS_WAKE = 0,
+ LPS_IS_SLEEP = 1,
+ LPS_WAIT_NULL_DATA_SEND = 2,
+};
+
+struct rtllib_device {
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ struct rtllib_security sec;
+
+ bool disable_mgnt_queue;
+
+ unsigned long status;
+ short hwscan_ch_bk;
+ enum ht_extchnl_offset chan_offset_bk;
+ enum ht_channel_width bandwidth_bk;
+ u8 hwscan_sem_up;
+ u8 CntAfterLink;
+
+ enum rt_op_mode OpMode;
+
+ u8 VersionID;
+ /* The last AssocReq/Resp IEs */
+ u8 *assocreq_ies, *assocresp_ies;
+ size_t assocreq_ies_len, assocresp_ies_len;
+
+ bool b_customer_lenovo_id;
+ bool bForcedShowRxRate;
+ bool bForcedShowRateStill;
+ u8 SystemQueryDataRateCount;
+ bool bForcedBgMode;
+ bool bUseRAMask;
+ bool b1x1RecvCombine;
+ u8 RF_Type;
+ bool b1SSSupport;
+
+ u8 hwsec_active;
+ bool is_silent_reset;
+ bool force_mic_error;
+ bool is_roaming;
+ bool ieee_up;
+ bool cannot_notify;
+ bool bSupportRemoteWakeUp;
+ enum rt_ps_mode dot11PowerSaveMode;
+ bool actscanning;
+ bool FirstIe_InScan;
+ bool be_scan_inprogress;
+ bool beinretry;
+ enum rt_rf_power_state eRFPowerState;
+ RT_RF_CHANGE_SOURCE RfOffReason;
+ bool is_set_key;
+ bool wx_set_enc;
+ struct rt_hi_throughput *pHTInfo;
+ spinlock_t bw_spinlock;
+
+ spinlock_t reorder_spinlock;
+ u8 Regdot11HTOperationalRateSet[16];
+ u8 Regdot11TxHTOperationalRateSet[16];
+ u8 dot11HTOperationalRateSet[16];
+ u8 RegHTSuppRateSet[16];
+ u8 HTCurrentOperaRate;
+ u8 HTHighestOperaRate;
+ u8 MinSpaceCfg;
+ u8 MaxMssDensity;
+ u8 bTxDisableRateFallBack;
+ u8 bTxUseDriverAssingedRate;
+ u8 bTxEnableFwCalcDur;
+ atomic_t atm_chnlop;
+ atomic_t atm_swbw;
+
+ struct list_head Tx_TS_Admit_List;
+ struct list_head Tx_TS_Pending_List;
+ struct list_head Tx_TS_Unused_List;
+ struct tx_ts_record TxTsRecord[TOTAL_TS_NUM];
+ struct list_head Rx_TS_Admit_List;
+ struct list_head Rx_TS_Pending_List;
+ struct list_head Rx_TS_Unused_List;
+ struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
+ struct rx_reorder_entry RxReorderEntry[128];
+ struct list_head RxReorder_Unused_List;
+ u8 ForcedPriority;
+
+
+ /* Bookkeeping structures */
+ struct net_device_stats stats;
+ struct rtllib_stats ieee_stats;
+ struct rtllib_softmac_stats softmac_stats;
+
+ /* Probe / Beacon management */
+ struct list_head network_free_list;
+ struct list_head network_list;
+ struct rtllib_network *networks;
+ int scans;
+ int scan_age;
+
+ int iw_mode; /* operating mode (IW_MODE_*) */
+ bool bNetPromiscuousMode;
+ struct rt_intel_promisc_mode IntelPromiscuousModeInfo;
+
+ struct iw_spy_data spy_data;
+
+ spinlock_t lock;
+ spinlock_t wpax_suitlist_lock;
+
+ int tx_headroom; /* Set to size of any additional room needed at front
+ * of allocated Tx SKBs */
+ u32 config;
+
+ /* WEP and other encryption related settings at the device level */
+ int open_wep; /* Set to 1 to allow unencrypted frames */
+ int auth_mode;
+ int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
+ * WEP key changes */
+
+ /* If the host performs {en,de}cryption, then set to 1 */
+ int host_encrypt;
+ int host_encrypt_msdu;
+ int host_decrypt;
+ /* host performs multicast decryption */
+ int host_mc_decrypt;
+
+ /* host should strip IV and ICV from protected frames */
+ /* meaningful only when hardware decryption is being used */
+ int host_strip_iv_icv;
+
+ int host_open_frag;
+ int host_build_iv;
+ int ieee802_1x; /* is IEEE 802.1X used */
+
+ /* WPA data */
+ bool bHalfNMode;
+ bool bHalfWirelessN24GMode;
+ int wpa_enabled;
+ int drop_unencrypted;
+ int tkip_countermeasures;
+ int privacy_invoked;
+ size_t wpa_ie_len;
+ u8 *wpa_ie;
+ size_t wps_ie_len;
+ u8 *wps_ie;
+ u8 ap_mac_addr[6];
+ u16 pairwise_key_type;
+ u16 group_key_type;
+ struct list_head crypt_deinit_list;
+ struct rtllib_crypt_data *crypt[WEP_KEYS];
+
+ int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
+ struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
+ struct timer_list crypt_deinit_timer;
+ int crypt_quiesced;
+
+ int bcrx_sta_key; /* use individual keys to override default keys even
+ * with RX of broad/multicast frames */
+
+ struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
+
+ /* Fragmentation structures */
+ struct rtllib_frag_entry frag_cache[17][RTLLIB_FRAG_CACHE_LEN];
+ unsigned int frag_next_idx[17];
+ u16 fts; /* Fragmentation Threshold */
+#define DEFAULT_RTS_THRESHOLD 2346U
+#define MIN_RTS_THRESHOLD 1
+#define MAX_RTS_THRESHOLD 2346U
+ u16 rts; /* RTS threshold */
+
+ /* Association info */
+ u8 bssid[ETH_ALEN];
+
+ /* This stores infos for the current network.
+ * Either the network we are associated in INFRASTRUCTURE
+ * or the network that we are creating in MASTER mode.
+ * ad-hoc is a mixture ;-).
+ * Note that in infrastructure mode, even when not associated,
+ * fields bssid and essid may be valid (if wpa_set and essid_set
+ * are true) as thy carry the value set by the user via iwconfig
+ */
+ struct rtllib_network current_network;
+
+ enum rtllib_state state;
+
+ int short_slot;
+ int reg_mode;
+ int mode; /* A, B, G */
+ int modulation; /* CCK, OFDM */
+ int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
+ int abg_true; /* ABG flag */
+
+ /* used for forcing the ibss workqueue to terminate
+ * without wait for the syncro scan to terminate
+ */
+ short sync_scan_hurryup;
+ u16 scan_watch_dog;
+ int perfect_rssi;
+ int worst_rssi;
+
+ u16 prev_seq_ctl; /* used to drop duplicate frames */
+
+ /* map of allowed channels. 0 is dummy */
+ void *pDot11dInfo;
+ bool bGlobalDomain;
+ u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
+
+ u8 IbssStartChnl;
+ u8 ibss_maxjoin_chal;
+
+ int rate; /* current rate */
+ int basic_rate;
+ u32 currentRate;
+
+ short active_scan;
+
+ /* this contains flags for selectively enable softmac support */
+ u16 softmac_features;
+
+ /* if the sequence control field is not filled by HW */
+ u16 seq_ctrl[5];
+
+ /* association procedure transaction sequence number */
+ u16 associate_seq;
+
+ /* AID for RTXed association responses */
+ u16 assoc_id;
+
+ /* power save mode related*/
+ u8 ack_tx_to_ieee;
+ short ps;
+ short sta_sleep;
+ int ps_timeout;
+ int ps_period;
+ struct tasklet_struct ps_task;
+ u64 ps_time;
+ bool polling;
+
+ short raw_tx;
+ /* used if IEEE_SOFTMAC_TX_QUEUE is set */
+ short queue_stop;
+ short scanning_continue ;
+ short proto_started;
+ short proto_stoppping;
+
+ struct semaphore wx_sem;
+ struct semaphore scan_sem;
+ struct semaphore ips_sem;
+
+ spinlock_t mgmt_tx_lock;
+ spinlock_t beacon_lock;
+
+ short beacon_txing;
+
+ short wap_set;
+ short ssid_set;
+
+ /* set on initialization */
+ u8 qos_support;
+ unsigned int wmm_acm;
+
+ /* for discarding duplicated packets in IBSS */
+ struct list_head ibss_mac_hash[IEEE_IBSS_MAC_HASH_SIZE];
+
+ /* for discarding duplicated packets in BSS */
+ u16 last_rxseq_num[17]; /* rx seq previous per-tid */
+ u16 last_rxfrag_num[17];/* tx frag previous per-tid */
+ unsigned long last_packet_time[17];
+
+ /* for PS mode */
+ unsigned long last_rx_ps_time;
+ bool bAwakePktSent;
+ u8 LPSDelayCnt;
+
+ /* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
+ struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
+ int mgmt_queue_head;
+ int mgmt_queue_tail;
+#define RTLLIB_QUEUE_LIMIT 128
+ u8 AsocRetryCount;
+ unsigned int hw_header;
+ struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE];
+ struct sk_buff_head skb_aggQ[MAX_QUEUE_SIZE];
+ struct sk_buff_head skb_drv_aggQ[MAX_QUEUE_SIZE];
+ u32 sta_edca_param[4];
+ bool aggregation;
+ bool enable_rx_imm_BA;
+ bool bibsscoordinator;
+
+ bool bdynamic_txpower_enable;
+
+ bool bCTSToSelfEnable;
+ u8 CTSToSelfTH;
+
+ u32 fsync_time_interval;
+ u32 fsync_rate_bitmap;
+ u8 fsync_rssi_threshold;
+ bool bfsync_enable;
+
+ u8 fsync_multiple_timeinterval;
+ u32 fsync_firstdiff_ratethreshold;
+ u32 fsync_seconddiff_ratethreshold;
+ enum fsync_state fsync_state;
+ bool bis_any_nonbepkts;
+ struct bandwidth_autoswitch bandwidth_auto_switch;
+ bool FwRWRF;
+
+ struct rt_link_detect LinkDetectInfo;
+ bool bIsAggregateFrame;
+ struct rt_pwr_save_ctrl PowerSaveControl;
+ u8 amsdu_in_process;
+
+ /* used if IEEE_SOFTMAC_TX_QUEUE is set */
+ struct tx_pending tx_pending;
+
+ /* used if IEEE_SOFTMAC_ASSOCIATE is set */
+ struct timer_list associate_timer;
+
+ /* used if IEEE_SOFTMAC_BEACONS is set */
+ struct timer_list beacon_timer;
+ u8 need_sw_enc;
+ struct work_struct associate_complete_wq;
+ struct work_struct ips_leave_wq;
+ struct delayed_work associate_procedure_wq;
+ struct delayed_work softmac_scan_wq;
+ struct delayed_work softmac_hint11d_wq;
+ struct delayed_work associate_retry_wq;
+ struct delayed_work start_ibss_wq;
+ struct delayed_work hw_wakeup_wq;
+ struct delayed_work hw_sleep_wq;
+ struct delayed_work link_change_wq;
+ struct work_struct wx_sync_scan_wq;
+
+ struct workqueue_struct *wq;
+ union {
+ struct rtllib_rxb *RfdArray[REORDER_WIN_SIZE];
+ struct rtllib_rxb *stats_IndicateArray[REORDER_WIN_SIZE];
+ struct rtllib_rxb *prxbIndicateArray[REORDER_WIN_SIZE];
+ struct {
+ struct sw_chnl_cmd PreCommonCmd[MAX_PRECMD_CNT];
+ struct sw_chnl_cmd PostCommonCmd[MAX_POSTCMD_CNT];
+ struct sw_chnl_cmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
+ };
+ };
+
+ /* Callback functions */
+ void (*set_security)(struct net_device *dev,
+ struct rtllib_security *sec);
+
+ /* Used to TX data frame by using txb structs.
+ * this is not used if in the softmac_features
+ * is set the flag IEEE_SOFTMAC_TX_QUEUE
+ */
+ int (*hard_start_xmit)(struct rtllib_txb *txb,
+ struct net_device *dev);
+
+ int (*reset_port)(struct net_device *dev);
+ int (*is_queue_full)(struct net_device *dev, int pri);
+
+ int (*handle_management)(struct net_device *dev,
+ struct rtllib_network *network, u16 type);
+ int (*is_qos_active)(struct net_device *dev, struct sk_buff *skb);
+
+ /* Softmac-generated frames (mamagement) are TXed via this
+ * callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
+ * not set. As some cards may have different HW queues that
+ * one might want to use for data and management frames
+ * the option to have two callbacks might be useful.
+ * This fucntion can't sleep.
+ */
+ int (*softmac_hard_start_xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+
+ /* used instead of hard_start_xmit (not softmac_hard_start_xmit)
+ * if the IEEE_SOFTMAC_TX_QUEUE feature is used to TX data
+ * frames. I the option IEEE_SOFTMAC_SINGLE_QUEUE is also set
+ * then also management frames are sent via this callback.
+ * This function can't sleep.
+ */
+ void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
+ struct net_device *dev, int rate);
+
+ /* stops the HW queue for DATA frames. Useful to avoid
+ * waste time to TX data frame when we are reassociating
+ * This function can sleep.
+ */
+ void (*data_hard_stop)(struct net_device *dev);
+
+ /* OK this is complementar to data_poll_hard_stop */
+ void (*data_hard_resume)(struct net_device *dev);
+
+ /* ask to the driver to retune the radio .
+ * This function can sleep. the driver should ensure
+ * the radio has been swithced before return.
+ */
+ void (*set_chan)(struct net_device *dev, short ch);
+
+ /* These are not used if the ieee stack takes care of
+ * scanning (IEEE_SOFTMAC_SCAN feature set).
+ * In this case only the set_chan is used.
+ *
+ * The syncro version is similar to the start_scan but
+ * does not return until all channels has been scanned.
+ * this is called in user context and should sleep,
+ * it is called in a work_queue when swithcing to ad-hoc mode
+ * or in behalf of iwlist scan when the card is associated
+ * and root user ask for a scan.
+ * the fucntion stop_scan should stop both the syncro and
+ * background scanning and can sleep.
+ * The fucntion start_scan should initiate the background
+ * scanning and can't sleep.
+ */
+ void (*scan_syncro)(struct net_device *dev);
+ void (*start_scan)(struct net_device *dev);
+ void (*stop_scan)(struct net_device *dev);
+
+ void (*rtllib_start_hw_scan)(struct net_device *dev);
+ void (*rtllib_stop_hw_scan)(struct net_device *dev);
+
+ /* indicate the driver that the link state is changed
+ * for example it may indicate the card is associated now.
+ * Driver might be interested in this to apply RX filter
+ * rules or simply light the LINK led
+ */
+ void (*link_change)(struct net_device *dev);
+
+ /* these two function indicates to the HW when to start
+ * and stop to send beacons. This is used when the
+ * IEEE_SOFTMAC_BEACONS is not set. For now the
+ * stop_send_bacons is NOT guaranteed to be called only
+ * after start_send_beacons.
+ */
+ void (*start_send_beacons)(struct net_device *dev);
+ void (*stop_send_beacons)(struct net_device *dev);
+
+ /* power save mode related */
+ void (*sta_wake_up)(struct net_device *dev);
+ void (*enter_sleep_state)(struct net_device *dev, u64 time);
+ short (*ps_is_queue_empty)(struct net_device *dev);
+ int (*handle_beacon)(struct net_device *dev,
+ struct rtllib_beacon *beacon,
+ struct rtllib_network *network);
+ int (*handle_assoc_response)(struct net_device *dev,
+ struct rtllib_assoc_response_frame *resp,
+ struct rtllib_network *network);
+
+
+ /* check whether Tx hw resouce available */
+ short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
+ short (*get_nic_desc_num)(struct net_device *dev, int queue_index);
+ void (*SetBWModeHandler)(struct net_device *dev,
+ enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset);
+ bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
+ void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
+ bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
+ u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee);
+ void (*HalUsbRxAggrHandler)(struct net_device *dev, bool Value);
+ void (*InitialGainHandler)(struct net_device *dev, u8 Operation);
+ bool (*SetFwCmdHandler)(struct net_device *dev,
+ enum fw_cmd_io_type FwCmdIO);
+ void (*UpdateHalRAMaskHandler)(struct net_device *dev, bool bMulticast,
+ u8 macId, u8 MimoPs, u8 WirelessMode,
+ u8 bCurTxBW40MHz, u8 rssi_level);
+ void (*UpdateBeaconInterruptHandler)(struct net_device *dev,
+ bool start);
+ void (*UpdateInterruptMaskHandler)(struct net_device *dev, u32 AddMSR,
+ u32 RemoveMSR);
+ u16 (*rtl_11n_user_show_rates)(struct net_device *dev);
+ void (*ScanOperationBackupHandler)(struct net_device *dev,
+ u8 Operation);
+ void (*LedControlHandler)(struct net_device *dev,
+ enum led_ctl_mode LedAction);
+ void (*SetHwRegHandler)(struct net_device *dev, u8 variable, u8 *val);
+ void (*GetHwRegHandler)(struct net_device *dev, u8 variable, u8 *val);
+
+ void (*AllowAllDestAddrHandler)(struct net_device *dev,
+ bool bAllowAllDA, bool WriteIntoReg);
+
+ void (*rtllib_ips_leave_wq) (struct net_device *dev);
+ void (*rtllib_ips_leave)(struct net_device *dev);
+ void (*LeisurePSLeave)(struct net_device *dev);
+ void (*rtllib_rfkill_poll)(struct net_device *dev);
+
+ /* This must be the last item so that it points to the data
+ * allocated beyond this structure by alloc_rtllib */
+ u8 priv[0];
+};
+
+#define IEEE_A (1<<0)
+#define IEEE_B (1<<1)
+#define IEEE_G (1<<2)
+#define IEEE_N_24G (1<<4)
+#define IEEE_N_5G (1<<5)
+#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+
+/* Generate a 802.11 header */
+
+/* Uses the channel change callback directly
+ * instead of [start/stop] scan callbacks
+ */
+#define IEEE_SOFTMAC_SCAN (1<<2)
+
+/* Perform authentication and association handshake */
+#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
+
+/* Generate probe requests */
+#define IEEE_SOFTMAC_PROBERQ (1<<4)
+
+/* Generate respones to probe requests */
+#define IEEE_SOFTMAC_PROBERS (1<<5)
+
+/* The ieee802.11 stack will manages the netif queue
+ * wake/stop for the driver, taking care of 802.11
+ * fragmentation. See softmac.c for details. */
+#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
+
+/* Uses only the softmac_data_hard_start_xmit
+ * even for TX management frames.
+ */
+#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
+
+/* Generate beacons. The stack will enqueue beacons
+ * to the card
+ */
+#define IEEE_SOFTMAC_BEACONS (1<<6)
+
+
+static inline void *rtllib_priv(struct net_device *dev)
+{
+ return ((struct rtllib_device *)netdev_priv(dev))->priv;
+}
+
+extern inline int rtllib_is_empty_essid(const char *essid, int essid_len)
+{
+ /* Single white space is for Linksys APs */
+ if (essid_len == 1 && essid[0] == ' ')
+ return 1;
+
+ /* Otherwise, if the entire essid is 0, we assume it is hidden */
+ while (essid_len) {
+ essid_len--;
+ if (essid[essid_len] != '\0')
+ return 0;
+ }
+
+ return 1;
+}
+
+extern inline int rtllib_is_valid_mode(struct rtllib_device *ieee, int mode)
+{
+ /*
+ * It is possible for both access points and our device to support
+ * combinations of modes, so as long as there is one valid combination
+ * of ap/device supported modes, then return success
+ *
+ */
+ if ((mode & IEEE_A) &&
+ (ieee->modulation & RTLLIB_OFDM_MODULATION) &&
+ (ieee->freq_band & RTLLIB_52GHZ_BAND))
+ return 1;
+
+ if ((mode & IEEE_G) &&
+ (ieee->modulation & RTLLIB_OFDM_MODULATION) &&
+ (ieee->freq_band & RTLLIB_24GHZ_BAND))
+ return 1;
+
+ if ((mode & IEEE_B) &&
+ (ieee->modulation & RTLLIB_CCK_MODULATION) &&
+ (ieee->freq_band & RTLLIB_24GHZ_BAND))
+ return 1;
+
+ return 0;
+}
+
+extern inline int rtllib_get_hdrlen(u16 fc)
+{
+ int hdrlen = RTLLIB_3ADDR_LEN;
+
+ switch (WLAN_FC_GET_TYPE(fc)) {
+ case RTLLIB_FTYPE_DATA:
+ if ((fc & RTLLIB_FCTL_FROMDS) && (fc & RTLLIB_FCTL_TODS))
+ hdrlen = RTLLIB_4ADDR_LEN; /* Addr4 */
+ if (RTLLIB_QOS_HAS_SEQ(fc))
+ hdrlen += 2; /* QOS ctrl*/
+ break;
+ case RTLLIB_FTYPE_CTL:
+ switch (WLAN_FC_GET_STYPE(fc)) {
+ case RTLLIB_STYPE_CTS:
+ case RTLLIB_STYPE_ACK:
+ hdrlen = RTLLIB_1ADDR_LEN;
+ break;
+ default:
+ hdrlen = RTLLIB_2ADDR_LEN;
+ break;
+ }
+ break;
+ }
+
+ return hdrlen;
+}
+
+static inline u8 *rtllib_get_payload(struct rtllib_hdr *hdr)
+{
+ switch (rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
+ case RTLLIB_1ADDR_LEN:
+ return ((struct rtllib_hdr_1addr *)hdr)->payload;
+ case RTLLIB_2ADDR_LEN:
+ return ((struct rtllib_hdr_2addr *)hdr)->payload;
+ case RTLLIB_3ADDR_LEN:
+ return ((struct rtllib_hdr_3addr *)hdr)->payload;
+ case RTLLIB_4ADDR_LEN:
+ return ((struct rtllib_hdr_4addr *)hdr)->payload;
+ }
+ return NULL;
+}
+
+static inline int rtllib_is_ofdm_rate(u8 rate)
+{
+ switch (rate & ~RTLLIB_BASIC_RATE_MASK) {
+ case RTLLIB_OFDM_RATE_6MB:
+ case RTLLIB_OFDM_RATE_9MB:
+ case RTLLIB_OFDM_RATE_12MB:
+ case RTLLIB_OFDM_RATE_18MB:
+ case RTLLIB_OFDM_RATE_24MB:
+ case RTLLIB_OFDM_RATE_36MB:
+ case RTLLIB_OFDM_RATE_48MB:
+ case RTLLIB_OFDM_RATE_54MB:
+ return 1;
+ }
+ return 0;
+}
+
+static inline int rtllib_is_cck_rate(u8 rate)
+{
+ switch (rate & ~RTLLIB_BASIC_RATE_MASK) {
+ case RTLLIB_CCK_RATE_1MB:
+ case RTLLIB_CCK_RATE_2MB:
+ case RTLLIB_CCK_RATE_5MB:
+ case RTLLIB_CCK_RATE_11MB:
+ return 1;
+ }
+ return 0;
+}
+
+
+/* rtllib.c */
+extern void free_rtllib(struct net_device *dev);
+extern struct net_device *alloc_rtllib(int sizeof_priv);
+
+extern int rtllib_set_encryption(struct rtllib_device *ieee);
+
+/* rtllib_tx.c */
+
+extern int rtllib_encrypt_fragment(
+ struct rtllib_device *ieee,
+ struct sk_buff *frag,
+ int hdr_len);
+
+extern int rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
+extern int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev);
+extern void rtllib_txb_free(struct rtllib_txb *);
+
+/* rtllib_rx.c */
+extern int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats);
+extern void rtllib_rx_mgt(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *stats);
+extern void rtllib_rx_probe_rq(struct rtllib_device *ieee,
+ struct sk_buff *skb);
+extern int IsLegalChannel(struct rtllib_device *rtllib, u8 channel);
+
+/* rtllib_wx.c */
+extern int rtllib_wx_get_scan(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+extern int rtllib_wx_set_encode(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+extern int rtllib_wx_get_encode(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+#if WIRELESS_EXT >= 18
+extern int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+extern int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+#endif
+extern int rtllib_wx_set_auth(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra);
+extern int rtllib_wx_set_mlme(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+extern int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
+
+/* rtllib_softmac.c */
+extern short rtllib_is_54g(struct rtllib_network *net);
+extern short rtllib_is_shortslot(struct rtllib_network net);
+extern int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats, u16 type,
+ u16 stype);
+extern void rtllib_softmac_new_net(struct rtllib_device *ieee,
+ struct rtllib_network *net);
+
+void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
+extern void rtllib_softmac_xmit(struct rtllib_txb *txb,
+ struct rtllib_device *ieee);
+
+extern void rtllib_stop_send_beacons(struct rtllib_device *ieee);
+extern void notify_wx_assoc_event(struct rtllib_device *ieee);
+extern void rtllib_softmac_check_all_nets(struct rtllib_device *ieee);
+extern void rtllib_start_bss(struct rtllib_device *ieee);
+extern void rtllib_start_master_bss(struct rtllib_device *ieee);
+extern void rtllib_start_ibss(struct rtllib_device *ieee);
+extern void rtllib_softmac_init(struct rtllib_device *ieee);
+extern void rtllib_softmac_free(struct rtllib_device *ieee);
+extern void rtllib_associate_abort(struct rtllib_device *ieee);
+extern void rtllib_disassociate(struct rtllib_device *ieee);
+extern void rtllib_stop_scan(struct rtllib_device *ieee);
+extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
+extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
+extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+extern inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee);
+extern u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee);
+extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
+ short pwr);
+extern void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
+extern void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
+extern void rtllib_check_all_nets(struct rtllib_device *ieee);
+extern void rtllib_start_protocol(struct rtllib_device *ieee);
+extern void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown);
+
+extern void rtllib_EnableNetMonitorMode(struct net_device *dev,
+ bool bInitState);
+extern void rtllib_DisableNetMonitorMode(struct net_device *dev,
+ bool bInitState);
+extern void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
+ bool bInitState);
+extern void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
+ bool bInitState);
+extern void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh);
+
+extern void rtllib_softmac_stop_protocol(struct rtllib_device *ieee,
+ u8 mesh_flag, u8 shutdown);
+extern void rtllib_softmac_start_protocol(struct rtllib_device *ieee,
+ u8 mesh_flag);
+
+extern void rtllib_reset_queue(struct rtllib_device *ieee);
+extern void rtllib_wake_queue(struct rtllib_device *ieee);
+extern void rtllib_stop_queue(struct rtllib_device *ieee);
+extern void rtllib_wake_all_queues(struct rtllib_device *ieee);
+extern void rtllib_stop_all_queues(struct rtllib_device *ieee);
+extern struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
+extern void rtllib_start_send_beacons(struct rtllib_device *ieee);
+extern void rtllib_stop_send_beacons(struct rtllib_device *ieee);
+extern int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee,
+ struct iw_point *p, u8 is_mesh);
+
+extern void notify_wx_assoc_event(struct rtllib_device *ieee);
+extern void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
+
+extern void softmac_mgmt_xmit(struct sk_buff *skb,
+ struct rtllib_device *ieee);
+extern u16 rtllib_query_seqnum(struct rtllib_device *ieee,
+ struct sk_buff *skb, u8 *dst);
+extern u8 rtllib_ap_sec_type(struct rtllib_device *ieee);
+
+/* rtllib_crypt_ccmp&tkip&wep.c */
+extern void rtllib_tkip_null(void);
+extern void rtllib_wep_null(void);
+extern void rtllib_ccmp_null(void);
+
+/* rtllib_softmac_wx.c */
+
+extern int rtllib_wx_get_wap(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *ext);
+
+extern int rtllib_wx_set_wap(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra);
+
+extern int rtllib_wx_get_essid(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+
+extern int rtllib_wx_set_rate(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_get_rate(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_set_mode(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+
+extern int rtllib_wx_set_scan(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+
+extern int rtllib_wx_set_essid(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_get_mode(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+
+extern int rtllib_wx_set_freq(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+
+extern int rtllib_wx_get_freq(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+extern void rtllib_wx_sync_scan_wq(void *data);
+
+extern int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_get_name(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_set_power(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_get_power(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_set_rts(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+extern int rtllib_wx_get_rts(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+#define MAX_RECEIVE_BUFFER_SIZE 9100
+extern void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
+extern void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
+
+void HTSetConnectBwMode(struct rtllib_device *ieee,
+ enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset);
+extern void HTUpdateDefaultSetting(struct rtllib_device *ieee);
+extern void HTConstructCapabilityElement(struct rtllib_device *ieee,
+ u8 *posHTCap, u8 *len,
+ u8 isEncrypt, bool bAssoc);
+extern void HTConstructInfoElement(struct rtllib_device *ieee,
+ u8 *posHTInfo, u8 *len, u8 isEncrypt);
+extern void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
+ u8 *posRT2RTAgg, u8* len);
+extern void HTOnAssocRsp(struct rtllib_device *ieee);
+extern void HTInitializeHTInfo(struct rtllib_device *ieee);
+extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
+extern void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
+extern void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
+extern u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+ u8 *pMCSFilter);
+extern u8 MCS_FILTER_ALL[];
+extern u16 MCS_DATA_RATE[2][2][77] ;
+extern u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
+extern void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo);
+extern bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
+extern u16 HTHalfMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate);
+extern u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate);
+extern u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
+extern int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
+extern int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
+extern int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
+extern void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
+ u8 Policy, u8 bOverwritePending);
+extern void TsInitDelBA(struct rtllib_device *ieee,
+ struct ts_common_info *pTsCommonInfo,
+ enum tr_select TxRxSelect);
+extern void BaSetupTimeOut(unsigned long data);
+extern void TxBaInactTimeout(unsigned long data);
+extern void RxBaInactTimeout(unsigned long data);
+extern void ResetBaEntry(struct ba_record *pBA);
+extern bool GetTs(
+ struct rtllib_device *ieee,
+ struct ts_common_info **ppTS,
+ u8 *Addr,
+ u8 TID,
+ enum tr_select TxRxSelect,
+ bool bAddNewTs
+);
+extern void TSInitialize(struct rtllib_device *ieee);
+extern void TsStartAddBaProcess(struct rtllib_device *ieee,
+ struct tx_ts_record *pTxTS);
+extern void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
+extern void RemoveAllTS(struct rtllib_device *ieee);
+void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+
+extern const long rtllib_wlan_frequencies[];
+
+extern inline void rtllib_increment_scans(struct rtllib_device *ieee)
+{
+ ieee->scans++;
+}
+
+extern inline int rtllib_get_scans(struct rtllib_device *ieee)
+{
+ return ieee->scans;
+}
+
+static inline const char *escape_essid(const char *essid, u8 essid_len)
+{
+ static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
+ const char *s = essid;
+ char *d = escaped;
+
+ if (rtllib_is_empty_essid(essid, essid_len)) {
+ memcpy(escaped, "<hidden>", sizeof("<hidden>"));
+ return escaped;
+ }
+
+ essid_len = min(essid_len, (u8)IW_ESSID_MAX_SIZE);
+ while (essid_len--) {
+ if (*s == '\0') {
+ *d++ = '\\';
+ *d++ = '0';
+ s++;
+ } else {
+ *d++ = *s++;
+ }
+ }
+ *d = '\0';
+ return escaped;
+}
+
+#define CONVERT_RATE(_ieee, _MGN_RATE) \
+ ((_MGN_RATE < MGN_MCS0) ? (_MGN_RATE) : \
+ (HTMcsToDataRate(_ieee, (u8)_MGN_RATE)))
+
+/* fun with the built-in rtllib stack... */
+int rtllib_init(void);
+void rtllib_exit(void);
+int rtllib_crypto_init(void);
+void rtllib_crypto_deinit(void);
+int rtllib_crypto_tkip_init(void);
+void rtllib_crypto_tkip_exit(void);
+int rtllib_crypto_ccmp_init(void);
+void rtllib_crypto_ccmp_exit(void);
+int rtllib_crypto_wep_init(void);
+void rtllib_crypto_wep_exit(void);
+
+void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib);
+void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
+ u8 asRsn);
+void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn);
+bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
+
+
+/* For the function is more related to hardware setting, it's better to use the
+ * ieee handler to refer to it.
+ */
+extern void rtllib_update_active_chan_map(struct rtllib_device *ieee);
+extern void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
+ struct rx_ts_record *pTS);
+extern int rtllib_data_xmit(struct sk_buff *skb, struct net_device *dev);
+extern int rtllib_parse_info_param(struct rtllib_device *ieee,
+ struct rtllib_info_element *info_element,
+ u16 length,
+ struct rtllib_network *network,
+ struct rtllib_rx_stats *stats);
+
+void rtllib_indicate_packets(struct rtllib_device *ieee,
+ struct rtllib_rxb **prxbIndicateArray, u8 index);
+extern u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
+ u8 *pOperateMCS);
+extern void HTUseDefaultSetting(struct rtllib_device *ieee);
+#define RT_ASOC_RETRY_LIMIT 5
+u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
+extern void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p);
+#ifndef ENABLE_LOCK_DEBUG
+#define SPIN_LOCK_IEEE(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_IEEE(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_IEEE_REORDER(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_IEEE_REORDER(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_IEEE_WPAX(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_IEEE_WPAX(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_IEEE_MGNTTX(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_IEEE_MGNTTX(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_IEEE_BCN(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_IEEE_BCN(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_MSH_STAINFO(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_MSH_STAINFO(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_MSH_PREQ(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_MSH_PREQ(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_MSH_QUEUE(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_MSH_QUEUE(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_PRIV_RFPS(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_PRIV_RFPS(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_PRIV_IRQTH(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_PRIV_IRQTH(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_PRIV_TX(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_PRIV_TX(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_PRIV_D3(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_PRIV_D3(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_PRIV_RF(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_PRIV_RF(plock) spin_unlock_irqrestore((plock), flags)
+#define SPIN_LOCK_PRIV_PS(plock) spin_lock_irqsave((plock), flags)
+#define SPIN_UNLOCK_PRIV_PS(plock) spin_unlock_irqrestore((plock), flags)
+#define SEM_DOWN_IEEE_WX(psem) down(psem)
+#define SEM_UP_IEEE_WX(psem) up(psem)
+#define SEM_DOWN_IEEE_SCAN(psem) down(psem)
+#define SEM_UP_IEEE_SCAN(psem) up(psem)
+#define SEM_DOWN_IEEE_IPS(psem) down(psem)
+#define SEM_UP_IEEE_IPS(psem) up(psem)
+#define SEM_DOWN_PRIV_WX(psem) down(psem)
+#define SEM_UP_PRIV_WX(psem) up(psem)
+#define SEM_DOWN_PRIV_RF(psem) down(psem)
+#define SEM_UP_PRIV_RF(psem) up(psem)
+#define MUTEX_LOCK_PRIV(pmutex) mutex_lock(pmutex)
+#define MUTEX_UNLOCK_PRIV(pmutex) mutex_unlock(pmutex)
+#endif
+static inline void dump_buf(u8 *buf, u32 len)
+{
+ u32 i;
+ printk(KERN_INFO "-----------------Len %d----------------\n", len);
+ for (i = 0; i < len; i++)
+ printk("%2.2x-", *(buf+i));
+ printk("\n");
+}
+#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.c b/drivers/staging/rtl8192e/rtllib_crypt.c
new file mode 100644
index 00000000000..acda37b8184
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_crypt.c
@@ -0,0 +1,241 @@
+/*
+ * Host AP crypto routines
+ *
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include "rtllib.h"
+
+struct rtllib_crypto_alg {
+ struct list_head list;
+ struct rtllib_crypto_ops *ops;
+};
+
+
+struct rtllib_crypto {
+ struct list_head algs;
+ spinlock_t lock;
+};
+
+static struct rtllib_crypto *hcrypt;
+
+void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
+ int force)
+{
+ struct list_head *ptr, *n;
+ struct rtllib_crypt_data *entry;
+
+ for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
+ ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
+ entry = list_entry(ptr, struct rtllib_crypt_data, list);
+
+ if (atomic_read(&entry->refcnt) != 0 && !force)
+ continue;
+
+ list_del(ptr);
+
+ if (entry->ops)
+ entry->ops->deinit(entry->priv);
+ kfree(entry);
+ }
+}
+
+void rtllib_crypt_deinit_handler(unsigned long data)
+{
+ struct rtllib_device *ieee = (struct rtllib_device *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ rtllib_crypt_deinit_entries(ieee, 0);
+ if (!list_empty(&ieee->crypt_deinit_list)) {
+ printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
+ "deletion list\n", ieee->dev->name);
+ ieee->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&ieee->crypt_deinit_timer);
+ }
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+}
+
+void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
+ struct rtllib_crypt_data **crypt)
+{
+ struct rtllib_crypt_data *tmp;
+ unsigned long flags;
+
+ if (*crypt == NULL)
+ return;
+
+ tmp = *crypt;
+ *crypt = NULL;
+
+ /* must not run ops->deinit() while there may be pending encrypt or
+ * decrypt operations. Use a list of delayed deinits to avoid needing
+ * locking. */
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ list_add(&tmp->list, &ieee->crypt_deinit_list);
+ if (!timer_pending(&ieee->crypt_deinit_timer)) {
+ ieee->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&ieee->crypt_deinit_timer);
+ }
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
+{
+ unsigned long flags;
+ struct rtllib_crypto_alg *alg;
+
+ if (hcrypt == NULL)
+ return -1;
+
+ alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ if (alg == NULL)
+ return -ENOMEM;
+
+ memset(alg, 0, sizeof(*alg));
+ alg->ops = ops;
+
+ spin_lock_irqsave(&hcrypt->lock, flags);
+ list_add(&alg->list, &hcrypt->algs);
+ spin_unlock_irqrestore(&hcrypt->lock, flags);
+
+ printk(KERN_DEBUG "rtllib_crypt: registered algorithm '%s'\n",
+ ops->name);
+
+ return 0;
+}
+
+int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
+{
+ unsigned long flags;
+ struct list_head *ptr;
+ struct rtllib_crypto_alg *del_alg = NULL;
+
+ if (hcrypt == NULL)
+ return -1;
+
+ spin_lock_irqsave(&hcrypt->lock, flags);
+ for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
+ struct rtllib_crypto_alg *alg =
+ (struct rtllib_crypto_alg *) ptr;
+ if (alg->ops == ops) {
+ list_del(&alg->list);
+ del_alg = alg;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hcrypt->lock, flags);
+
+ if (del_alg) {
+ printk(KERN_DEBUG "rtllib_crypt: unregistered algorithm "
+ "'%s'\n", ops->name);
+ kfree(del_alg);
+ }
+
+ return del_alg ? 0 : -1;
+}
+
+
+struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
+{
+ unsigned long flags;
+ struct list_head *ptr;
+ struct rtllib_crypto_alg *found_alg = NULL;
+
+ if (hcrypt == NULL)
+ return NULL;
+
+ spin_lock_irqsave(&hcrypt->lock, flags);
+ for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
+ struct rtllib_crypto_alg *alg =
+ (struct rtllib_crypto_alg *) ptr;
+ if (strcmp(alg->ops->name, name) == 0) {
+ found_alg = alg;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hcrypt->lock, flags);
+
+ if (found_alg)
+ return found_alg->ops;
+ else
+ return NULL;
+}
+
+
+static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
+static void rtllib_crypt_null_deinit(void *priv) {}
+
+static struct rtllib_crypto_ops rtllib_crypt_null = {
+ .name = "NULL",
+ .init = rtllib_crypt_null_init,
+ .deinit = rtllib_crypt_null_deinit,
+ .encrypt_mpdu = NULL,
+ .decrypt_mpdu = NULL,
+ .encrypt_msdu = NULL,
+ .decrypt_msdu = NULL,
+ .set_key = NULL,
+ .get_key = NULL,
+ .extra_prefix_len = 0,
+ .extra_postfix_len = 0,
+ .owner = THIS_MODULE,
+};
+
+
+int __init rtllib_crypto_init(void)
+{
+ int ret = -ENOMEM;
+
+ hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ if (!hcrypt)
+ goto out;
+
+ memset(hcrypt, 0, sizeof(*hcrypt));
+ INIT_LIST_HEAD(&hcrypt->algs);
+ spin_lock_init(&hcrypt->lock);
+
+ ret = rtllib_register_crypto_ops(&rtllib_crypt_null);
+ if (ret < 0) {
+ kfree(hcrypt);
+ hcrypt = NULL;
+ }
+out:
+ return ret;
+}
+
+
+void __exit rtllib_crypto_deinit(void)
+{
+ struct list_head *ptr, *n;
+
+ if (hcrypt == NULL)
+ return;
+
+ for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
+ ptr = n, n = ptr->next) {
+ struct rtllib_crypto_alg *alg =
+ (struct rtllib_crypto_alg *) ptr;
+ list_del(ptr);
+ printk(KERN_DEBUG "rtllib_crypt: unregistered algorithm "
+ "'%s' (deinit)\n", alg->ops->name);
+ kfree(alg);
+ }
+
+ kfree(hcrypt);
+}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.h b/drivers/staging/rtl8192e/rtllib_crypt.h
new file mode 100644
index 00000000000..49b90b73ed9
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_crypt.h
@@ -0,0 +1,85 @@
+/*
+ * Original code based on Host AP (software wireless LAN access point) driver
+ * for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ *
+ * Copyright (c) 2004, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+/*
+ * This file defines the interface to the rtllib crypto module.
+ */
+#ifndef RTLLIB_CRYPT_H
+#define RTLLIB_CRYPT_H
+
+#include <linux/skbuff.h>
+
+struct rtllib_crypto_ops {
+ const char *name;
+
+ /* init new crypto context (e.g., allocate private data space,
+ * select IV, etc.); returns NULL on failure or pointer to allocated
+ * private data on success */
+ void * (*init)(int keyidx);
+
+ /* deinitialize crypto context and free allocated private data */
+ void (*deinit)(void *priv);
+
+ /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
+ * value from decrypt_mpdu is passed as the keyidx value for
+ * decrypt_msdu. skb must have enough head and tail room for the
+ * encryption; if not, error will be returned; these functions are
+ * called for all MPDUs (i.e., fragments).
+ */
+ int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
+ int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
+
+ /* These functions are called for full MSDUs, i.e. full frames.
+ * These can be NULL if full MSDU operations are not needed. */
+ int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
+ int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
+ void *priv, struct rtllib_device* ieee);
+
+ int (*set_key)(void *key, int len, u8 *seq, void *priv);
+ int (*get_key)(void *key, int len, u8 *seq, void *priv);
+
+ /* procfs handler for printing out key information and possible
+ * statistics */
+ char * (*print_stats)(char *p, void *priv);
+
+ /* maximum number of bytes added by encryption; encrypt buf is
+ * allocated with extra_prefix_len bytes, copy of in_buf, and
+ * extra_postfix_len; encrypt need not use all this space, but
+ * the result must start at the beginning of the struct buffer and
+ * correct length must be returned */
+ int extra_prefix_len, extra_postfix_len;
+
+ struct module *owner;
+};
+
+struct rtllib_crypt_data {
+ struct list_head list; /* delayed deletion list */
+ struct rtllib_crypto_ops *ops;
+ void *priv;
+ atomic_t refcnt;
+};
+
+int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops);
+int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops);
+struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name);
+void rtllib_crypt_deinit_entries(struct rtllib_device *, int);
+void rtllib_crypt_deinit_handler(unsigned long);
+void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
+ struct rtllib_crypt_data **crypt);
+#endif
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
new file mode 100644
index 00000000000..6196b9aa3a0
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -0,0 +1,463 @@
+/*
+ * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/string.h>
+#include <linux/wireless.h>
+#include "rtllib.h"
+
+#include <linux/crypto.h>
+
+#include <linux/scatterlist.h>
+
+#define AES_BLOCK_LEN 16
+#define CCMP_HDR_LEN 8
+#define CCMP_MIC_LEN 8
+#define CCMP_TK_LEN 16
+#define CCMP_PN_LEN 6
+
+struct rtllib_ccmp_data {
+ u8 key[CCMP_TK_LEN];
+ int key_set;
+
+ u8 tx_pn[CCMP_PN_LEN];
+ u8 rx_pn[CCMP_PN_LEN];
+
+ u32 dot11RSNAStatsCCMPFormatErrors;
+ u32 dot11RSNAStatsCCMPReplays;
+ u32 dot11RSNAStatsCCMPDecryptErrors;
+
+ int key_idx;
+
+ struct crypto_tfm *tfm;
+
+ /* scratch buffers for virt_to_page() (crypto API) */
+ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
+ tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
+ u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
+};
+
+static void rtllib_ccmp_aes_encrypt(struct crypto_tfm *tfm,
+ const u8 pt[16], u8 ct[16])
+{
+ crypto_cipher_encrypt_one((void *)tfm, ct, pt);
+}
+
+static void *rtllib_ccmp_init(int key_idx)
+{
+ struct rtllib_ccmp_data *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ if (priv == NULL)
+ goto fail;
+ memset(priv, 0, sizeof(*priv));
+ priv->key_idx = key_idx;
+
+ priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->tfm)) {
+ printk(KERN_DEBUG "rtllib_crypt_ccmp: could not allocate "
+ "crypto API aes\n");
+ priv->tfm = NULL;
+ goto fail;
+ }
+ return priv;
+
+fail:
+ if (priv) {
+ if (priv->tfm)
+ crypto_free_cipher((void *)priv->tfm);
+ kfree(priv);
+ }
+
+ return NULL;
+}
+
+
+static void rtllib_ccmp_deinit(void *priv)
+{
+ struct rtllib_ccmp_data *_priv = priv;
+ if (_priv && _priv->tfm)
+ crypto_free_cipher((void *)_priv->tfm);
+ kfree(priv);
+}
+
+
+static inline void xor_block(u8 *b, u8 *a, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ b[i] ^= a[i];
+}
+
+
+
+static void ccmp_init_blocks(struct crypto_tfm *tfm,
+ struct rtllib_hdr_4addr *hdr,
+ u8 *pn, size_t dlen, u8 *b0, u8 *auth,
+ u8 *s0)
+{
+ u8 *pos, qc = 0;
+ size_t aad_len;
+ u16 fc;
+ int a4_included, qc_included;
+ u8 aad[2 * AES_BLOCK_LEN];
+
+ fc = le16_to_cpu(hdr->frame_ctl);
+ a4_included = ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
+ (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS));
+ /*
+ qc_included = ((WLAN_FC_GET_TYPE(fc) == RTLLIB_FTYPE_DATA) &&
+ (WLAN_FC_GET_STYPE(fc) & 0x08));
+ */
+ qc_included = ((WLAN_FC_GET_TYPE(fc) == RTLLIB_FTYPE_DATA) &&
+ (WLAN_FC_GET_STYPE(fc) & 0x80));
+ aad_len = 22;
+ if (a4_included)
+ aad_len += 6;
+ if (qc_included) {
+ pos = (u8 *) &hdr->addr4;
+ if (a4_included)
+ pos += 6;
+ qc = *pos & 0x0f;
+ aad_len += 2;
+ }
+ /* CCM Initial Block:
+ * Flag (Include authentication header, M=3 (8-octet MIC),
+ * L=1 (2-octet Dlen))
+ * Nonce: 0x00 | A2 | PN
+ * Dlen */
+ b0[0] = 0x59;
+ b0[1] = qc;
+ memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
+ memcpy(b0 + 8, pn, CCMP_PN_LEN);
+ b0[14] = (dlen >> 8) & 0xff;
+ b0[15] = dlen & 0xff;
+
+ /* AAD:
+ * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
+ * A1 | A2 | A3
+ * SC with bits 4..15 (seq#) masked to zero
+ * A4 (if present)
+ * QC (if present)
+ */
+ pos = (u8 *) hdr;
+ aad[0] = 0; /* aad_len >> 8 */
+ aad[1] = aad_len & 0xff;
+ aad[2] = pos[0] & 0x8f;
+ aad[3] = pos[1] & 0xc7;
+ memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
+ pos = (u8 *) &hdr->seq_ctl;
+ aad[22] = pos[0] & 0x0f;
+ aad[23] = 0; /* all bits masked */
+ memset(aad + 24, 0, 8);
+ if (a4_included)
+ memcpy(aad + 24, hdr->addr4, ETH_ALEN);
+ if (qc_included) {
+ aad[a4_included ? 30 : 24] = qc;
+ /* rest of QC masked */
+ }
+
+ /* Start with the first block and AAD */
+ rtllib_ccmp_aes_encrypt(tfm, b0, auth);
+ xor_block(auth, aad, AES_BLOCK_LEN);
+ rtllib_ccmp_aes_encrypt(tfm, auth, auth);
+ xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
+ rtllib_ccmp_aes_encrypt(tfm, auth, auth);
+ b0[0] &= 0x07;
+ b0[14] = b0[15] = 0;
+ rtllib_ccmp_aes_encrypt(tfm, b0, s0);
+}
+
+
+
+static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct rtllib_ccmp_data *key = priv;
+ int data_len, i;
+ u8 *pos;
+ struct rtllib_hdr_4addr *hdr;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ if (skb_headroom(skb) < CCMP_HDR_LEN ||
+ skb_tailroom(skb) < CCMP_MIC_LEN ||
+ skb->len < hdr_len)
+ return -1;
+
+ data_len = skb->len - hdr_len;
+ pos = skb_push(skb, CCMP_HDR_LEN);
+ memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
+ pos += hdr_len;
+
+ i = CCMP_PN_LEN - 1;
+ while (i >= 0) {
+ key->tx_pn[i]++;
+ if (key->tx_pn[i] != 0)
+ break;
+ i--;
+ }
+
+ *pos++ = key->tx_pn[5];
+ *pos++ = key->tx_pn[4];
+ *pos++ = 0;
+ *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
+ *pos++ = key->tx_pn[3];
+ *pos++ = key->tx_pn[2];
+ *pos++ = key->tx_pn[1];
+ *pos++ = key->tx_pn[0];
+
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ if (!tcb_desc->bHwSec) {
+ int blocks, last, len;
+ u8 *mic;
+ u8 *b0 = key->tx_b0;
+ u8 *b = key->tx_b;
+ u8 *e = key->tx_e;
+ u8 *s0 = key->tx_s0;
+
+ mic = skb_put(skb, CCMP_MIC_LEN);
+
+ ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len,
+ b0, b, s0);
+
+ blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+ last = data_len % AES_BLOCK_LEN;
+
+ for (i = 1; i <= blocks; i++) {
+ len = (i == blocks && last) ? last : AES_BLOCK_LEN;
+ /* Authentication */
+ xor_block(b, pos, len);
+ rtllib_ccmp_aes_encrypt(key->tfm, b, b);
+ /* Encryption, with counter */
+ b0[14] = (i >> 8) & 0xff;
+ b0[15] = i & 0xff;
+ rtllib_ccmp_aes_encrypt(key->tfm, b0, e);
+ xor_block(pos, e, len);
+ pos += len;
+ }
+
+ for (i = 0; i < CCMP_MIC_LEN; i++)
+ mic[i] = b[i] ^ s0[i];
+ }
+ return 0;
+}
+
+
+static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct rtllib_ccmp_data *key = priv;
+ u8 keyidx, *pos;
+ struct rtllib_hdr_4addr *hdr;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ u8 pn[6];
+
+ if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
+ key->dot11RSNAStatsCCMPFormatErrors++;
+ return -1;
+ }
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ pos = skb->data + hdr_len;
+ keyidx = pos[3];
+ if (!(keyidx & (1 << 5))) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "CCMP: received packet without ExtIV"
+ " flag from %pM\n", hdr->addr2);
+ }
+ key->dot11RSNAStatsCCMPFormatErrors++;
+ return -2;
+ }
+ keyidx >>= 6;
+ if (key->key_idx != keyidx) {
+ printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
+ "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
+ return -6;
+ }
+ if (!key->key_set) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "CCMP: received packet from %pM"
+ " with keyid=%d that does not have a configured"
+ " key\n", hdr->addr2, keyidx);
+ }
+ return -3;
+ }
+
+ pn[0] = pos[7];
+ pn[1] = pos[6];
+ pn[2] = pos[5];
+ pn[3] = pos[4];
+ pn[4] = pos[1];
+ pn[5] = pos[0];
+ pos += 8;
+ if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
+ key->dot11RSNAStatsCCMPReplays++;
+ return -4;
+ }
+ if (!tcb_desc->bHwSec) {
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN -
+ CCMP_MIC_LEN;
+ u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
+ u8 *b0 = key->rx_b0;
+ u8 *b = key->rx_b;
+ u8 *a = key->rx_a;
+ int i, blocks, last, len;
+
+
+ ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
+ xor_block(mic, b, CCMP_MIC_LEN);
+
+ blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+ last = data_len % AES_BLOCK_LEN;
+
+ for (i = 1; i <= blocks; i++) {
+ len = (i == blocks && last) ? last : AES_BLOCK_LEN;
+ /* Decrypt, with counter */
+ b0[14] = (i >> 8) & 0xff;
+ b0[15] = i & 0xff;
+ rtllib_ccmp_aes_encrypt(key->tfm, b0, b);
+ xor_block(pos, b, len);
+ /* Authentication */
+ xor_block(a, pos, len);
+ rtllib_ccmp_aes_encrypt(key->tfm, a, a);
+ pos += len;
+ }
+
+ if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "CCMP: decrypt failed: STA="
+ " %pM\n", hdr->addr2);
+ }
+ key->dot11RSNAStatsCCMPDecryptErrors++;
+ return -5;
+ }
+
+ memcpy(key->rx_pn, pn, CCMP_PN_LEN);
+ }
+ /* Remove hdr and MIC */
+ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
+ skb_pull(skb, CCMP_HDR_LEN);
+ skb_trim(skb, skb->len - CCMP_MIC_LEN);
+
+ return keyidx;
+}
+
+
+static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
+{
+ struct rtllib_ccmp_data *data = priv;
+ int keyidx;
+ struct crypto_tfm *tfm = data->tfm;
+
+ keyidx = data->key_idx;
+ memset(data, 0, sizeof(*data));
+ data->key_idx = keyidx;
+ data->tfm = tfm;
+ if (len == CCMP_TK_LEN) {
+ memcpy(data->key, key, CCMP_TK_LEN);
+ data->key_set = 1;
+ if (seq) {
+ data->rx_pn[0] = seq[5];
+ data->rx_pn[1] = seq[4];
+ data->rx_pn[2] = seq[3];
+ data->rx_pn[3] = seq[2];
+ data->rx_pn[4] = seq[1];
+ data->rx_pn[5] = seq[0];
+ }
+ crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
+ } else if (len == 0)
+ data->key_set = 0;
+ else
+ return -1;
+
+ return 0;
+}
+
+
+static int rtllib_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
+{
+ struct rtllib_ccmp_data *data = priv;
+
+ if (len < CCMP_TK_LEN)
+ return -1;
+
+ if (!data->key_set)
+ return 0;
+ memcpy(key, data->key, CCMP_TK_LEN);
+
+ if (seq) {
+ seq[0] = data->tx_pn[5];
+ seq[1] = data->tx_pn[4];
+ seq[2] = data->tx_pn[3];
+ seq[3] = data->tx_pn[2];
+ seq[4] = data->tx_pn[1];
+ seq[5] = data->tx_pn[0];
+ }
+
+ return CCMP_TK_LEN;
+}
+
+
+static char *rtllib_ccmp_print_stats(char *p, void *priv)
+{
+ struct rtllib_ccmp_data *ccmp = priv;
+ p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
+ "tx_pn=%pM rx_pn=%pM "
+ "format_errors=%d replays=%d decrypt_errors=%d\n",
+ ccmp->key_idx, ccmp->key_set,
+ ccmp->tx_pn, ccmp->rx_pn,
+ ccmp->dot11RSNAStatsCCMPFormatErrors,
+ ccmp->dot11RSNAStatsCCMPReplays,
+ ccmp->dot11RSNAStatsCCMPDecryptErrors);
+
+ return p;
+}
+
+void rtllib_ccmp_null(void)
+{
+ return;
+}
+
+static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
+ .name = "CCMP",
+ .init = rtllib_ccmp_init,
+ .deinit = rtllib_ccmp_deinit,
+ .encrypt_mpdu = rtllib_ccmp_encrypt,
+ .decrypt_mpdu = rtllib_ccmp_decrypt,
+ .encrypt_msdu = NULL,
+ .decrypt_msdu = NULL,
+ .set_key = rtllib_ccmp_set_key,
+ .get_key = rtllib_ccmp_get_key,
+ .print_stats = rtllib_ccmp_print_stats,
+ .extra_prefix_len = CCMP_HDR_LEN,
+ .extra_postfix_len = CCMP_MIC_LEN,
+ .owner = THIS_MODULE,
+};
+
+
+int __init rtllib_crypto_ccmp_init(void)
+{
+ return rtllib_register_crypto_ops(&rtllib_crypt_ccmp);
+}
+
+
+void __exit rtllib_crypto_ccmp_exit(void)
+{
+ rtllib_unregister_crypto_ops(&rtllib_crypt_ccmp);
+}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
new file mode 100644
index 00000000000..6a0c8788642
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -0,0 +1,775 @@
+/*
+ * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/crc32.h>
+
+#include "rtllib.h"
+
+struct rtllib_tkip_data {
+#define TKIP_KEY_LEN 32
+ u8 key[TKIP_KEY_LEN];
+ int key_set;
+
+ u32 tx_iv32;
+ u16 tx_iv16;
+ u16 tx_ttak[5];
+ int tx_phase1_done;
+
+ u32 rx_iv32;
+ u16 rx_iv16;
+ bool initialized;
+ u16 rx_ttak[5];
+ int rx_phase1_done;
+ u32 rx_iv32_new;
+ u16 rx_iv16_new;
+
+ u32 dot11RSNAStatsTKIPReplays;
+ u32 dot11RSNAStatsTKIPICVErrors;
+ u32 dot11RSNAStatsTKIPLocalMICFailures;
+
+ int key_idx;
+ struct crypto_blkcipher *rx_tfm_arc4;
+ struct crypto_hash *rx_tfm_michael;
+ struct crypto_blkcipher *tx_tfm_arc4;
+ struct crypto_hash *tx_tfm_michael;
+ /* scratch buffers for virt_to_page() (crypto API) */
+ u8 rx_hdr[16], tx_hdr[16];
+};
+
+static void *rtllib_tkip_init(int key_idx)
+{
+ struct rtllib_tkip_data *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ if (priv == NULL)
+ goto fail;
+ memset(priv, 0, sizeof(*priv));
+ priv->key_idx = key_idx;
+ priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->tx_tfm_arc4)) {
+ printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate "
+ "crypto API arc4\n");
+ priv->tx_tfm_arc4 = NULL;
+ goto fail;
+ }
+
+ priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->tx_tfm_michael)) {
+ printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate "
+ "crypto API michael_mic\n");
+ priv->tx_tfm_michael = NULL;
+ goto fail;
+ }
+
+ priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->rx_tfm_arc4)) {
+ printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate "
+ "crypto API arc4\n");
+ priv->rx_tfm_arc4 = NULL;
+ goto fail;
+ }
+
+ priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->rx_tfm_michael)) {
+ printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate "
+ "crypto API michael_mic\n");
+ priv->rx_tfm_michael = NULL;
+ goto fail;
+ }
+ return priv;
+
+fail:
+ if (priv) {
+ if (priv->tx_tfm_michael)
+ crypto_free_hash(priv->tx_tfm_michael);
+ if (priv->tx_tfm_arc4)
+ crypto_free_blkcipher(priv->tx_tfm_arc4);
+ if (priv->rx_tfm_michael)
+ crypto_free_hash(priv->rx_tfm_michael);
+ if (priv->rx_tfm_arc4)
+ crypto_free_blkcipher(priv->rx_tfm_arc4);
+ kfree(priv);
+ }
+
+ return NULL;
+}
+
+
+static void rtllib_tkip_deinit(void *priv)
+{
+ struct rtllib_tkip_data *_priv = priv;
+
+ if (_priv) {
+ if (_priv->tx_tfm_michael)
+ crypto_free_hash(_priv->tx_tfm_michael);
+ if (_priv->tx_tfm_arc4)
+ crypto_free_blkcipher(_priv->tx_tfm_arc4);
+ if (_priv->rx_tfm_michael)
+ crypto_free_hash(_priv->rx_tfm_michael);
+ if (_priv->rx_tfm_arc4)
+ crypto_free_blkcipher(_priv->rx_tfm_arc4);
+ }
+ kfree(priv);
+}
+
+
+static inline u16 RotR1(u16 val)
+{
+ return (val >> 1) | (val << 15);
+}
+
+
+static inline u8 Lo8(u16 val)
+{
+ return val & 0xff;
+}
+
+
+static inline u8 Hi8(u16 val)
+{
+ return val >> 8;
+}
+
+
+static inline u16 Lo16(u32 val)
+{
+ return val & 0xffff;
+}
+
+
+static inline u16 Hi16(u32 val)
+{
+ return val >> 16;
+}
+
+
+static inline u16 Mk16(u8 hi, u8 lo)
+{
+ return lo | (((u16) hi) << 8);
+}
+
+
+static inline u16 Mk16_le(u16 *v)
+{
+ return le16_to_cpu(*v);
+}
+
+
+static const u16 Sbox[256] = {
+ 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+ 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+ 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+ 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+ 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+ 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+ 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+ 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+ 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+ 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+ 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+ 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+ 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+ 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+ 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+ 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+ 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+ 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+ 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+ 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+ 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+ 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+ 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+ 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+ 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+ 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+ 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+ 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+ 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+ 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+ 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+ 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+};
+
+
+static inline u16 _S_(u16 v)
+{
+ u16 t = Sbox[Hi8(v)];
+ return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
+}
+
+
+#define PHASE1_LOOP_COUNT 8
+
+
+static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
+{
+ int i, j;
+
+ /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
+ TTAK[0] = Lo16(IV32);
+ TTAK[1] = Hi16(IV32);
+ TTAK[2] = Mk16(TA[1], TA[0]);
+ TTAK[3] = Mk16(TA[3], TA[2]);
+ TTAK[4] = Mk16(TA[5], TA[4]);
+
+ for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
+ j = 2 * (i & 1);
+ TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
+ TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
+ TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
+ TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
+ TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
+ }
+}
+
+
+static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
+ u16 IV16)
+{
+ /* Make temporary area overlap WEP seed so that the final copy can be
+ * avoided on little endian hosts. */
+ u16 *PPK = (u16 *) &WEPSeed[4];
+
+ /* Step 1 - make copy of TTAK and bring in TSC */
+ PPK[0] = TTAK[0];
+ PPK[1] = TTAK[1];
+ PPK[2] = TTAK[2];
+ PPK[3] = TTAK[3];
+ PPK[4] = TTAK[4];
+ PPK[5] = TTAK[4] + IV16;
+
+ /* Step 2 - 96-bit bijective mixing using S-box */
+ PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
+ PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
+ PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
+ PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
+ PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
+ PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
+
+ PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
+ PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
+ PPK[2] += RotR1(PPK[1]);
+ PPK[3] += RotR1(PPK[2]);
+ PPK[4] += RotR1(PPK[3]);
+ PPK[5] += RotR1(PPK[4]);
+
+ /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
+ * WEPSeed[0..2] is transmitted as WEP IV */
+ WEPSeed[0] = Hi8(IV16);
+ WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
+ WEPSeed[2] = Lo8(IV16);
+ WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
+
+#ifdef __BIG_ENDIAN
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
+ }
+#endif
+}
+
+
+static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct rtllib_tkip_data *tkey = priv;
+ int len;
+ u8 *pos;
+ struct rtllib_hdr_4addr *hdr;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
+ int ret = 0;
+ u8 rc4key[16], *icv;
+ u32 crc;
+ struct scatterlist sg;
+
+ if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
+ skb->len < hdr_len)
+ return -1;
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+
+ if (!tcb_desc->bHwSec) {
+ if (!tkey->tx_phase1_done) {
+ tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
+ tkey->tx_iv32);
+ tkey->tx_phase1_done = 1;
+ }
+ tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak,
+ tkey->tx_iv16);
+ } else
+ tkey->tx_phase1_done = 1;
+
+
+ len = skb->len - hdr_len;
+ pos = skb_push(skb, 8);
+ memmove(pos, pos + 8, hdr_len);
+ pos += hdr_len;
+
+ if (tcb_desc->bHwSec) {
+ *pos++ = Hi8(tkey->tx_iv16);
+ *pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F;
+ *pos++ = Lo8(tkey->tx_iv16);
+ } else {
+ *pos++ = rc4key[0];
+ *pos++ = rc4key[1];
+ *pos++ = rc4key[2];
+ }
+
+ *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
+ *pos++ = tkey->tx_iv32 & 0xff;
+ *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
+ *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
+ *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
+
+ if (!tcb_desc->bHwSec) {
+ icv = skb_put(skb, 4);
+ crc = ~crc32_le(~0, pos, len);
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+
+ sg_init_one(&sg, pos, len+4);
+
+
+ crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ }
+
+ tkey->tx_iv16++;
+ if (tkey->tx_iv16 == 0) {
+ tkey->tx_phase1_done = 0;
+ tkey->tx_iv32++;
+ }
+
+ if (!tcb_desc->bHwSec)
+ return ret;
+ else
+ return 0;
+
+
+}
+
+static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct rtllib_tkip_data *tkey = priv;
+ u8 keyidx, *pos;
+ u32 iv32;
+ u16 iv16;
+ struct rtllib_hdr_4addr *hdr;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
+ u8 rc4key[16];
+ u8 icv[4];
+ u32 crc;
+ struct scatterlist sg;
+ int plen;
+ if (skb->len < hdr_len + 8 + 4)
+ return -1;
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ pos = skb->data + hdr_len;
+ keyidx = pos[3];
+ if (!(keyidx & (1 << 5))) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "TKIP: received packet without ExtIV"
+ " flag from %pM\n", hdr->addr2);
+ }
+ return -2;
+ }
+ keyidx >>= 6;
+ if (tkey->key_idx != keyidx) {
+ printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
+ "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
+ return -6;
+ }
+ if (!tkey->key_set) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "TKIP: received packet from %pM"
+ " with keyid=%d that does not have a configured"
+ " key\n", hdr->addr2, keyidx);
+ }
+ return -3;
+ }
+ iv16 = (pos[0] << 8) | pos[2];
+ iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
+ pos += 8;
+
+ if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
+ if ((iv32 < tkey->rx_iv32 ||
+ (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
+ tkey->initialized) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "TKIP: replay detected: STA="
+ " %pM previous TSC %08x%04x received "
+ "TSC %08x%04x\n",hdr->addr2,
+ tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
+ }
+ tkey->dot11RSNAStatsTKIPReplays++;
+ return -4;
+ }
+ tkey->initialized = true;
+
+ if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
+ tkip_mixing_phase1(tkey->rx_ttak, tkey->key,
+ hdr->addr2, iv32);
+ tkey->rx_phase1_done = 1;
+ }
+ tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
+
+ plen = skb->len - hdr_len - 12;
+
+ sg_init_one(&sg, pos, plen+4);
+
+ crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG ": TKIP: failed to decrypt "
+ "received packet from %pM\n",
+ hdr->addr2);
+ }
+ return -7;
+ }
+
+ crc = ~crc32_le(~0, pos, plen);
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+
+ if (memcmp(icv, pos + plen, 4) != 0) {
+ if (iv32 != tkey->rx_iv32) {
+ /* Previously cached Phase1 result was already
+ * lost, so it needs to be recalculated for the
+ * next packet. */
+ tkey->rx_phase1_done = 0;
+ }
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "TKIP: ICV error detected: STA="
+ " %pM\n", hdr->addr2);
+ }
+ tkey->dot11RSNAStatsTKIPICVErrors++;
+ return -5;
+ }
+
+ }
+
+ /* Update real counters only after Michael MIC verification has
+ * completed */
+ tkey->rx_iv32_new = iv32;
+ tkey->rx_iv16_new = iv16;
+
+ /* Remove IV and ICV */
+ memmove(skb->data + 8, skb->data, hdr_len);
+ skb_pull(skb, 8);
+ skb_trim(skb, skb->len - 4);
+
+ return keyidx;
+}
+
+
+static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+ u8 *data, size_t data_len, u8 *mic)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[2];
+
+ if (tfm_michael == NULL) {
+ printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
+ return -1;
+ }
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], hdr, 16);
+ sg_set_buf(&sg[1], data, data_len);
+
+ if (crypto_hash_setkey(tfm_michael, key, 8))
+ return -1;
+
+ desc.tfm = tfm_michael;
+ desc.flags = 0;
+ return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+}
+
+static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
+{
+ struct rtllib_hdr_4addr *hdr11;
+
+ hdr11 = (struct rtllib_hdr_4addr *) skb->data;
+ switch (le16_to_cpu(hdr11->frame_ctl) &
+ (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
+ case RTLLIB_FCTL_TODS:
+ memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
+ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
+ break;
+ case RTLLIB_FCTL_FROMDS:
+ memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
+ memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
+ break;
+ case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS:
+ memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
+ memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
+ break;
+ case 0:
+ memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
+ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
+ break;
+ }
+
+ hdr[12] = 0; /* priority */
+
+ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
+}
+
+
+static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct rtllib_tkip_data *tkey = priv;
+ u8 *pos;
+ struct rtllib_hdr_4addr *hdr;
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+
+ if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
+ printk(KERN_DEBUG "Invalid packet for Michael MIC add "
+ "(tailroom=%d hdr_len=%d skb->len=%d)\n",
+ skb_tailroom(skb), hdr_len, skb->len);
+ return -1;
+ }
+
+ michael_mic_hdr(skb, tkey->tx_hdr);
+
+ if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
+ tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
+ pos = skb_put(skb, 8);
+ if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
+ skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
+ return -1;
+
+ return 0;
+}
+
+
+static void rtllib_michael_mic_failure(struct net_device *dev,
+ struct rtllib_hdr_4addr *hdr,
+ int keyidx)
+{
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+
+ /* TODO: needed parameters: count, keyid, key type, TSC */
+ memset(&ev, 0, sizeof(ev));
+ ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
+ if (hdr->addr1[0] & 0x01)
+ ev.flags |= IW_MICFAILURE_GROUP;
+ else
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+ wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+}
+
+static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
+ int hdr_len, void *priv,
+ struct rtllib_device *ieee)
+{
+ struct rtllib_tkip_data *tkey = priv;
+ u8 mic[8];
+ struct rtllib_hdr_4addr *hdr;
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+
+ if (!tkey->key_set)
+ return -1;
+
+ michael_mic_hdr(skb, tkey->rx_hdr);
+ if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
+ tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
+
+ if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
+ skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
+ return -1;
+
+ if ((memcmp(mic, skb->data + skb->len - 8, 8) != 0) ||
+ (ieee->force_mic_error)) {
+ struct rtllib_hdr_4addr *hdr;
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ printk(KERN_DEBUG "%s: Michael MIC verification failed for "
+ "MSDU from %pM keyidx=%d\n",
+ skb->dev ? skb->dev->name : "N/A", hdr->addr2,
+ keyidx);
+ printk(KERN_DEBUG "%d, force_mic_error = %d\n",
+ (memcmp(mic, skb->data + skb->len - 8, 8) != 0),\
+ ieee->force_mic_error);
+ if (skb->dev) {
+ printk(KERN_INFO "skb->dev != NULL\n");
+ rtllib_michael_mic_failure(skb->dev, hdr, keyidx);
+ }
+ tkey->dot11RSNAStatsTKIPLocalMICFailures++;
+ ieee->force_mic_error = false;
+ return -1;
+ }
+
+ /* Update TSC counters for RX now that the packet verification has
+ * completed. */
+ tkey->rx_iv32 = tkey->rx_iv32_new;
+ tkey->rx_iv16 = tkey->rx_iv16_new;
+
+ skb_trim(skb, skb->len - 8);
+
+ return 0;
+}
+
+
+static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
+{
+ struct rtllib_tkip_data *tkey = priv;
+ int keyidx;
+ struct crypto_hash *tfm = tkey->tx_tfm_michael;
+ struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
+ struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+
+ keyidx = tkey->key_idx;
+ memset(tkey, 0, sizeof(*tkey));
+ tkey->key_idx = keyidx;
+ tkey->tx_tfm_michael = tfm;
+ tkey->tx_tfm_arc4 = tfm2;
+ tkey->rx_tfm_michael = tfm3;
+ tkey->rx_tfm_arc4 = tfm4;
+
+ if (len == TKIP_KEY_LEN) {
+ memcpy(tkey->key, key, TKIP_KEY_LEN);
+ tkey->key_set = 1;
+ tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
+ if (seq) {
+ tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
+ (seq[3] << 8) | seq[2];
+ tkey->rx_iv16 = (seq[1] << 8) | seq[0];
+ }
+ } else if (len == 0)
+ tkey->key_set = 0;
+ else
+ return -1;
+
+ return 0;
+}
+
+
+static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv)
+{
+ struct rtllib_tkip_data *tkey = priv;
+
+ if (len < TKIP_KEY_LEN)
+ return -1;
+
+ if (!tkey->key_set)
+ return 0;
+ memcpy(key, tkey->key, TKIP_KEY_LEN);
+
+ if (seq) {
+ /* Return the sequence number of the last transmitted frame. */
+ u16 iv16 = tkey->tx_iv16;
+ u32 iv32 = tkey->tx_iv32;
+ if (iv16 == 0)
+ iv32--;
+ iv16--;
+ seq[0] = tkey->tx_iv16;
+ seq[1] = tkey->tx_iv16 >> 8;
+ seq[2] = tkey->tx_iv32;
+ seq[3] = tkey->tx_iv32 >> 8;
+ seq[4] = tkey->tx_iv32 >> 16;
+ seq[5] = tkey->tx_iv32 >> 24;
+ }
+
+ return TKIP_KEY_LEN;
+}
+
+
+static char *rtllib_tkip_print_stats(char *p, void *priv)
+{
+ struct rtllib_tkip_data *tkip = priv;
+ p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
+ "tx_pn=%02x%02x%02x%02x%02x%02x "
+ "rx_pn=%02x%02x%02x%02x%02x%02x "
+ "replays=%d icv_errors=%d local_mic_failures=%d\n",
+ tkip->key_idx, tkip->key_set,
+ (tkip->tx_iv32 >> 24) & 0xff,
+ (tkip->tx_iv32 >> 16) & 0xff,
+ (tkip->tx_iv32 >> 8) & 0xff,
+ tkip->tx_iv32 & 0xff,
+ (tkip->tx_iv16 >> 8) & 0xff,
+ tkip->tx_iv16 & 0xff,
+ (tkip->rx_iv32 >> 24) & 0xff,
+ (tkip->rx_iv32 >> 16) & 0xff,
+ (tkip->rx_iv32 >> 8) & 0xff,
+ tkip->rx_iv32 & 0xff,
+ (tkip->rx_iv16 >> 8) & 0xff,
+ tkip->rx_iv16 & 0xff,
+ tkip->dot11RSNAStatsTKIPReplays,
+ tkip->dot11RSNAStatsTKIPICVErrors,
+ tkip->dot11RSNAStatsTKIPLocalMICFailures);
+ return p;
+}
+
+
+static struct rtllib_crypto_ops rtllib_crypt_tkip = {
+ .name = "TKIP",
+ .init = rtllib_tkip_init,
+ .deinit = rtllib_tkip_deinit,
+ .encrypt_mpdu = rtllib_tkip_encrypt,
+ .decrypt_mpdu = rtllib_tkip_decrypt,
+ .encrypt_msdu = rtllib_michael_mic_add,
+ .decrypt_msdu = rtllib_michael_mic_verify,
+ .set_key = rtllib_tkip_set_key,
+ .get_key = rtllib_tkip_get_key,
+ .print_stats = rtllib_tkip_print_stats,
+ .extra_prefix_len = 4 + 4, /* IV + ExtIV */
+ .extra_postfix_len = 8 + 4, /* MIC + ICV */
+ .owner = THIS_MODULE,
+};
+
+
+int __init rtllib_crypto_tkip_init(void)
+{
+ return rtllib_register_crypto_ops(&rtllib_crypt_tkip);
+}
+
+
+void __exit rtllib_crypto_tkip_exit(void)
+{
+ rtllib_unregister_crypto_ops(&rtllib_crypt_tkip);
+}
+
+void rtllib_tkip_null(void)
+{
+ return;
+}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
new file mode 100644
index 00000000000..c59bf10fe78
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -0,0 +1,292 @@
+/*
+ * Host AP crypt: host-based WEP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include "rtllib.h"
+
+#include <linux/crypto.h>
+
+#include <linux/scatterlist.h>
+#include <linux/crc32.h>
+
+struct prism2_wep_data {
+ u32 iv;
+#define WEP_KEY_LEN 13
+ u8 key[WEP_KEY_LEN + 1];
+ u8 key_len;
+ u8 key_idx;
+ struct crypto_blkcipher *tx_tfm;
+ struct crypto_blkcipher *rx_tfm;
+};
+
+
+static void *prism2_wep_init(int keyidx)
+{
+ struct prism2_wep_data *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ if (priv == NULL)
+ goto fail;
+ memset(priv, 0, sizeof(*priv));
+ priv->key_idx = keyidx;
+
+ priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->tx_tfm)) {
+ printk(KERN_DEBUG "rtllib_crypt_wep: could not allocate "
+ "crypto API arc4\n");
+ priv->tx_tfm = NULL;
+ goto fail;
+ }
+ priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(priv->rx_tfm)) {
+ printk(KERN_DEBUG "rtllib_crypt_wep: could not allocate "
+ "crypto API arc4\n");
+ priv->rx_tfm = NULL;
+ goto fail;
+ }
+
+ /* start WEP IV from a random value */
+ get_random_bytes(&priv->iv, 4);
+
+ return priv;
+
+fail:
+ if (priv) {
+ if (priv->tx_tfm)
+ crypto_free_blkcipher(priv->tx_tfm);
+ if (priv->rx_tfm)
+ crypto_free_blkcipher(priv->rx_tfm);
+ kfree(priv);
+ }
+ return NULL;
+}
+
+
+static void prism2_wep_deinit(void *priv)
+{
+ struct prism2_wep_data *_priv = priv;
+
+ if (_priv) {
+ if (_priv->tx_tfm)
+ crypto_free_blkcipher(_priv->tx_tfm);
+ if (_priv->rx_tfm)
+ crypto_free_blkcipher(_priv->rx_tfm);
+ }
+ kfree(priv);
+}
+
+/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
+ * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
+ * so the payload length increases with 8 bytes.
+ *
+ * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
+ */
+static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct prism2_wep_data *wep = priv;
+ u32 klen, len;
+ u8 key[WEP_KEY_LEN + 3];
+ u8 *pos;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
+ u32 crc;
+ u8 *icv;
+ struct scatterlist sg;
+ if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
+ skb->len < hdr_len){
+ printk(KERN_ERR "Error!!! headroom=%d tailroom=%d skblen=%d"
+ " hdr_len=%d\n", skb_headroom(skb), skb_tailroom(skb),
+ skb->len, hdr_len);
+ return -1;
+ }
+ len = skb->len - hdr_len;
+ pos = skb_push(skb, 4);
+ memmove(pos, pos + 4, hdr_len);
+ pos += hdr_len;
+
+ klen = 3 + wep->key_len;
+
+ wep->iv++;
+
+ /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
+ * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
+ * can be used to speedup attacks, so avoid using them. */
+ if ((wep->iv & 0xff00) == 0xff00) {
+ u8 B = (wep->iv >> 16) & 0xff;
+ if (B >= 3 && B < klen)
+ wep->iv += 0x0100;
+ }
+
+ /* Prepend 24-bit IV to RC4 key and TX frame */
+ *pos++ = key[0] = (wep->iv >> 16) & 0xff;
+ *pos++ = key[1] = (wep->iv >> 8) & 0xff;
+ *pos++ = key[2] = wep->iv & 0xff;
+ *pos++ = wep->key_idx << 6;
+
+ /* Copy rest of the WEP key (the secret part) */
+ memcpy(key + 3, wep->key, wep->key_len);
+
+ if (!tcb_desc->bHwSec) {
+
+ /* Append little-endian CRC32 and encrypt it to produce ICV */
+ crc = ~crc32_le(~0, pos, len);
+ icv = skb_put(skb, 4);
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+
+ sg_init_one(&sg, pos, len+4);
+ crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
+ return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ }
+
+ return 0;
+}
+
+
+/* Perform WEP decryption on given struct buffer. Buffer includes whole WEP
+ * part of the frame: IV (4 bytes), encrypted payload (including SNAP header),
+ * ICV (4 bytes). len includes both IV and ICV.
+ *
+ * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
+ * failure. If frame is OK, IV and ICV will be removed.
+ */
+static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct prism2_wep_data *wep = priv;
+ u32 klen, plen;
+ u8 key[WEP_KEY_LEN + 3];
+ u8 keyidx, *pos;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
+ MAX_DEV_ADDR_SIZE);
+ struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
+ u32 crc;
+ u8 icv[4];
+ struct scatterlist sg;
+ if (skb->len < hdr_len + 8)
+ return -1;
+
+ pos = skb->data + hdr_len;
+ key[0] = *pos++;
+ key[1] = *pos++;
+ key[2] = *pos++;
+ keyidx = *pos++ >> 6;
+ if (keyidx != wep->key_idx)
+ return -1;
+
+ klen = 3 + wep->key_len;
+
+ /* Copy rest of the WEP key (the secret part) */
+ memcpy(key + 3, wep->key, wep->key_len);
+
+ /* Apply RC4 to data and compute CRC32 over decrypted data */
+ plen = skb->len - hdr_len - 8;
+
+ if (!tcb_desc->bHwSec) {
+ sg_init_one(&sg, pos, plen+4);
+ crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
+ if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+ return -7;
+ crc = ~crc32_le(~0, pos, plen);
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+ if (memcmp(icv, pos + plen, 4) != 0) {
+ /* ICV mismatch - drop frame */
+ return -2;
+ }
+ }
+ /* Remove IV and ICV */
+ memmove(skb->data + 4, skb->data, hdr_len);
+ skb_pull(skb, 4);
+ skb_trim(skb, skb->len - 4);
+
+ return 0;
+}
+
+
+static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
+{
+ struct prism2_wep_data *wep = priv;
+
+ if (len < 0 || len > WEP_KEY_LEN)
+ return -1;
+
+ memcpy(wep->key, key, len);
+ wep->key_len = len;
+
+ return 0;
+}
+
+
+static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
+{
+ struct prism2_wep_data *wep = priv;
+
+ if (len < wep->key_len)
+ return -1;
+
+ memcpy(key, wep->key, wep->key_len);
+
+ return wep->key_len;
+}
+
+
+static char *prism2_wep_print_stats(char *p, void *priv)
+{
+ struct prism2_wep_data *wep = priv;
+ p += sprintf(p, "key[%d] alg=WEP len=%d\n",
+ wep->key_idx, wep->key_len);
+ return p;
+}
+
+
+static struct rtllib_crypto_ops rtllib_crypt_wep = {
+ .name = "WEP",
+ .init = prism2_wep_init,
+ .deinit = prism2_wep_deinit,
+ .encrypt_mpdu = prism2_wep_encrypt,
+ .decrypt_mpdu = prism2_wep_decrypt,
+ .encrypt_msdu = NULL,
+ .decrypt_msdu = NULL,
+ .set_key = prism2_wep_set_key,
+ .get_key = prism2_wep_get_key,
+ .print_stats = prism2_wep_print_stats,
+ .extra_prefix_len = 4, /* IV */
+ .extra_postfix_len = 4, /* ICV */
+ .owner = THIS_MODULE,
+};
+
+
+int __init rtllib_crypto_wep_init(void)
+{
+ return rtllib_register_crypto_ops(&rtllib_crypt_wep);
+}
+
+
+void __exit rtllib_crypto_wep_exit(void)
+{
+ rtllib_unregister_crypto_ops(&rtllib_crypt_wep);
+}
+
+void rtllib_wep_null(void)
+{
+ return;
+}
diff --git a/drivers/staging/rtl8192e/rtllib_endianfree.h b/drivers/staging/rtl8192e/rtllib_endianfree.h
new file mode 100644
index 00000000000..b268605a52a
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_endianfree.h
@@ -0,0 +1,160 @@
+#ifndef __INC_ENDIANFREE_H
+#define __INC_ENDIANFREE_H
+
+/*
+ * Call endian free function when
+ * 1. Read/write packet content.
+ * 2. Before write integer to IO.
+ * 3. After read integer from IO.
+ */
+
+#define __MACHINE_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define __MACHINE_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net, ppc */
+
+#define BYTE_ORDER __MACHINE_LITTLE_ENDIAN
+
+#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
+#define EF1Byte(_val) ((u8)(_val))
+#define EF2Byte(_val) ((u16)(_val))
+#define EF4Byte(_val) ((u32)(_val))
+
+#else
+#define EF1Byte(_val) ((u8)(_val))
+#define EF2Byte(_val) \
+ (((((u16)(_val))&0x00ff)<<8)|((((u16)(_val))&0xff00)>>8))
+#define EF4Byte(_val) \
+ (((((u32)(_val))&0x000000ff)<<24)|\
+ ((((u32)(_val))&0x0000ff00)<<8)|\
+ ((((u32)(_val))&0x00ff0000)>>8)|\
+ ((((u32)(_val))&0xff000000)>>24))
+#endif
+
+#define ReadEF1Byte(_ptr) EF1Byte(*((u8 *)(_ptr)))
+#define ReadEF2Byte(_ptr) EF2Byte(*((u16 *)(_ptr)))
+#define ReadEF4Byte(_ptr) EF4Byte(*((u32 *)(_ptr)))
+
+#define WriteEF1Byte(_ptr, _val) (*((u8 *)(_ptr))) = EF1Byte(_val)
+#define WriteEF2Byte(_ptr, _val) (*((u16 *)(_ptr))) = EF2Byte(_val)
+#define WriteEF4Byte(_ptr, _val) (*((u32 *)(_ptr))) = EF4Byte(_val)
+#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
+#define H2N1BYTE(_val) ((u8)(_val))
+#define H2N2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
+ ((((u16)(_val))&0xff00)>>8))
+#define H2N4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
+ ((((u32)(_val))&0x0000ff00)<<8) |\
+ ((((u32)(_val))&0x00ff0000)>>8) |\
+ ((((u32)(_val))&0xff000000)>>24))
+#else
+#define H2N1BYTE(_val) ((u8)(_val))
+#define H2N2BYTE(_val) ((u16)(_val))
+#define H2N4BYTE(_val) ((u32)(_val))
+#endif
+
+#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
+#define N2H1BYTE(_val) ((u8)(_val))
+#define N2H2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
+ ((((u16)(_val))&0xff00)>>8))
+#define N2H4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
+ ((((u32)(_val))&0x0000ff00)<<8) |\
+ ((((u32)(_val))&0x00ff0000)>>8) |\
+ ((((u32)(_val))&0xff000000)>>24))
+#else
+#define N2H1BYTE(_val) ((u8)(_val))
+#define N2H2BYTE(_val) ((u16)(_val))
+#define N2H4BYTE(_val) ((u32)(_val))
+#endif
+
+#define BIT_LEN_MASK_32(__BitLen) (0xFFFFFFFF >> (32 - (__BitLen)))
+#define BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) \
+ (BIT_LEN_MASK_32(__BitLen) << (__BitOffset))
+
+#define LE_P4BYTE_TO_HOST_4BYTE(__pStart) (EF4Byte(*((u32 *)(__pStart))))
+
+#define LE_BITS_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
+ ( \
+ (LE_P4BYTE_TO_HOST_4BYTE(__pStart) >> (__BitOffset)) \
+ & \
+ BIT_LEN_MASK_32(__BitLen) \
+ )
+
+#define LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
+ ( \
+ LE_P4BYTE_TO_HOST_4BYTE(__pStart) \
+ & \
+ (~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen)) \
+ )
+
+#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
+ *((u32 *)(__pStart)) = \
+ EF4Byte( \
+ LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
+ | \
+ ((((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset)) \
+ );
+
+
+#define BIT_LEN_MASK_16(__BitLen) \
+ (0xFFFF >> (16 - (__BitLen)))
+
+#define BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) \
+ (BIT_LEN_MASK_16(__BitLen) << (__BitOffset))
+
+#define LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
+ (EF2Byte(*((u16 *)(__pStart))))
+
+#define LE_BITS_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
+ ( \
+ (LE_P2BYTE_TO_HOST_2BYTE(__pStart) >> (__BitOffset)) \
+ & \
+ BIT_LEN_MASK_16(__BitLen) \
+ )
+
+#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
+ ( \
+ LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
+ & \
+ (~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen)) \
+ )
+
+#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
+ *((u16 *)(__pStart)) = \
+ EF2Byte( \
+ LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
+ | ((((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << \
+ (__BitOffset)) \
+ );
+
+#define BIT_LEN_MASK_8(__BitLen) \
+ (0xFF >> (8 - (__BitLen)))
+
+#define BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) \
+ (BIT_LEN_MASK_8(__BitLen) << (__BitOffset))
+
+#define LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
+ (EF1Byte(*((u8 *)(__pStart))))
+
+#define LE_BITS_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
+ ( \
+ (LE_P1BYTE_TO_HOST_1BYTE(__pStart) >> (__BitOffset)) \
+ & \
+ BIT_LEN_MASK_8(__BitLen) \
+ )
+
+#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
+ ( \
+ LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
+ & \
+ (~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen)) \
+ )
+
+#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
+ *((u8 *)(__pStart)) = EF1Byte( \
+ LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
+ | ((((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << \
+ (__BitOffset)) \
+ );
+
+#define N_BYTE_ALIGMENT(__Value, __Aligment) \
+ ((__Aligment == 1) ? (__Value) : (((__Value + __Aligment - 1) / \
+ __Aligment) * __Aligment))
+#endif
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
new file mode 100644
index 00000000000..c36a140a456
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -0,0 +1,289 @@
+/*******************************************************************************
+
+ Copyright(c) 2004 Intel Corporation. All rights reserved.
+
+ Portions of this file are based on the WEP enablement code provided by the
+ Host AP project hostap-drivers v0.1.3
+ Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ <jkmaline@cc.hut.fi>
+ Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include <net/arp.h>
+
+#include "rtllib.h"
+
+
+#define DRV_NAME "rtllib_92e"
+
+void _setup_timer(struct timer_list *ptimer, void *fun, unsigned long data)
+{
+ ptimer->function = fun;
+ ptimer->data = data;
+ init_timer(ptimer);
+}
+
+static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
+{
+ if (ieee->networks)
+ return 0;
+
+ ieee->networks = kzalloc(
+ MAX_NETWORK_COUNT * sizeof(struct rtllib_network),
+ GFP_KERNEL);
+ if (!ieee->networks) {
+ printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
+ ieee->dev->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline void rtllib_networks_free(struct rtllib_device *ieee)
+{
+ if (!ieee->networks)
+ return;
+ kfree(ieee->networks);
+ ieee->networks = NULL;
+}
+
+static inline void rtllib_networks_initialize(struct rtllib_device *ieee)
+{
+ int i;
+
+ INIT_LIST_HEAD(&ieee->network_free_list);
+ INIT_LIST_HEAD(&ieee->network_list);
+ for (i = 0; i < MAX_NETWORK_COUNT; i++)
+ list_add_tail(&ieee->networks[i].list,
+ &ieee->network_free_list);
+}
+
+struct net_device *alloc_rtllib(int sizeof_priv)
+{
+ struct rtllib_device *ieee = NULL;
+ struct net_device *dev;
+ int i, err;
+
+ RTLLIB_DEBUG_INFO("Initializing...\n");
+
+ dev = alloc_etherdev(sizeof(struct rtllib_device) + sizeof_priv);
+ if (!dev) {
+ RTLLIB_ERROR("Unable to network device.\n");
+ goto failed;
+ }
+ ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
+ memset(ieee, 0, sizeof(struct rtllib_device)+sizeof_priv);
+ ieee->dev = dev;
+
+ err = rtllib_networks_allocate(ieee);
+ if (err) {
+ RTLLIB_ERROR("Unable to allocate beacon storage: %d\n",
+ err);
+ goto failed;
+ }
+ rtllib_networks_initialize(ieee);
+
+
+ /* Default fragmentation threshold is maximum payload size */
+ ieee->fts = DEFAULT_FTS;
+ ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
+ ieee->open_wep = 1;
+
+ /* Default to enabling full open WEP with host based encrypt/decrypt */
+ ieee->host_encrypt = 1;
+ ieee->host_decrypt = 1;
+ ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
+
+ INIT_LIST_HEAD(&ieee->crypt_deinit_list);
+ _setup_timer(&ieee->crypt_deinit_timer,
+ rtllib_crypt_deinit_handler,
+ (unsigned long) ieee);
+ ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
+
+ spin_lock_init(&ieee->lock);
+ spin_lock_init(&ieee->wpax_suitlist_lock);
+ spin_lock_init(&ieee->bw_spinlock);
+ spin_lock_init(&ieee->reorder_spinlock);
+ atomic_set(&(ieee->atm_chnlop), 0);
+ atomic_set(&(ieee->atm_swbw), 0);
+
+ ieee->bHalfNMode = false;
+ ieee->wpa_enabled = 0;
+ ieee->tkip_countermeasures = 0;
+ ieee->drop_unencrypted = 0;
+ ieee->privacy_invoked = 0;
+ ieee->ieee802_1x = 1;
+ ieee->raw_tx = 0;
+ ieee->hwsec_active = 0;
+
+ memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
+ rtllib_softmac_init(ieee);
+
+ ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
+ if (ieee->pHTInfo == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for HTInfo\n");
+ return NULL;
+ }
+ HTUpdateDefaultSetting(ieee);
+ HTInitializeHTInfo(ieee);
+ TSInitialize(ieee);
+ for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
+
+ for (i = 0; i < 17; i++) {
+ ieee->last_rxseq_num[i] = -1;
+ ieee->last_rxfrag_num[i] = -1;
+ ieee->last_packet_time[i] = 0;
+ }
+
+ rtllib_tkip_null();
+ rtllib_wep_null();
+ rtllib_ccmp_null();
+
+ return dev;
+
+ failed:
+ if (dev)
+ free_netdev(dev);
+ return NULL;
+}
+
+void free_rtllib(struct net_device *dev)
+{
+ struct rtllib_device *ieee = (struct rtllib_device *)
+ netdev_priv_rsl(dev);
+ int i;
+
+ kfree(ieee->pHTInfo);
+ ieee->pHTInfo = NULL;
+ rtllib_softmac_free(ieee);
+ del_timer_sync(&ieee->crypt_deinit_timer);
+ rtllib_crypt_deinit_entries(ieee, 1);
+
+ for (i = 0; i < WEP_KEYS; i++) {
+ struct rtllib_crypt_data *crypt = ieee->crypt[i];
+ if (crypt) {
+ if (crypt->ops)
+ crypt->ops->deinit(crypt->priv);
+ kfree(crypt);
+ ieee->crypt[i] = NULL;
+ }
+ }
+
+ rtllib_networks_free(ieee);
+ free_netdev(dev);
+}
+
+u32 rtllib_debug_level;
+static int debug = \
+ RTLLIB_DL_ERR
+ ;
+static struct proc_dir_entry *rtllib_proc;
+
+static int show_debug_level(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ return snprintf(page, count, "0x%08X\n", rtllib_debug_level);
+}
+
+static int store_debug_level(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char buf[] = "0x00000000";
+ unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
+ char *p = (char *)buf;
+ unsigned long val;
+
+ if (copy_from_user(buf, buffer, len))
+ return count;
+ buf[len] = 0;
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ p++;
+ if (p[0] == 'x' || p[0] == 'X')
+ p++;
+ val = simple_strtoul(p, &p, 16);
+ } else
+ val = simple_strtoul(p, &p, 10);
+ if (p == buf)
+ printk(KERN_INFO DRV_NAME
+ ": %s is not in hex or decimal form.\n", buf);
+ else
+ rtllib_debug_level = val;
+
+ return strnlen(buf, count);
+}
+
+int __init rtllib_init(void)
+{
+ struct proc_dir_entry *e;
+
+ rtllib_debug_level = debug;
+ rtllib_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
+ if (rtllib_proc == NULL) {
+ RTLLIB_ERROR("Unable to create " DRV_NAME
+ " proc directory\n");
+ return -EIO;
+ }
+ e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
+ rtllib_proc);
+ if (!e) {
+ remove_proc_entry(DRV_NAME, init_net.proc_net);
+ rtllib_proc = NULL;
+ return -EIO;
+ }
+ e->read_proc = show_debug_level;
+ e->write_proc = store_debug_level;
+ e->data = NULL;
+
+ return 0;
+}
+
+void __exit rtllib_exit(void)
+{
+ if (rtllib_proc) {
+ remove_proc_entry("debug_level", rtllib_proc);
+ remove_proc_entry(DRV_NAME, init_net.proc_net);
+ rtllib_proc = NULL;
+ }
+}
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
new file mode 100644
index 00000000000..8d0af5ed8ec
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -0,0 +1,2720 @@
+/*
+ * Original code based Host AP (software wireless LAN access point) driver
+ * for Intersil Prism2/2.5/3 - hostap.o module, common routines
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2004, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ ******************************************************************************
+
+ Few modifications for Realtek's Wi-Fi drivers by
+ Andrea Merello <andreamrl@tiscali.it>
+
+ A special thanks goes to Realtek for their support !
+
+******************************************************************************/
+
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+
+#include "rtllib.h"
+#include "dot11d.h"
+
+static inline void rtllib_monitor_rx(struct rtllib_device *ieee,
+ struct sk_buff *skb, struct rtllib_rx_stats *rx_status,
+ size_t hdr_length)
+{
+ skb->dev = ieee->dev;
+ skb_reset_mac_header(skb);
+ skb_pull(skb, hdr_length);
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = __constant_htons(ETH_P_80211_RAW);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ netif_rx(skb);
+}
+
+/* Called only as a tasklet (software IRQ) */
+static struct rtllib_frag_entry *
+rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq,
+ unsigned int frag, u8 tid, u8 *src, u8 *dst)
+{
+ struct rtllib_frag_entry *entry;
+ int i;
+
+ for (i = 0; i < RTLLIB_FRAG_CACHE_LEN; i++) {
+ entry = &ieee->frag_cache[tid][i];
+ if (entry->skb != NULL &&
+ time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
+ RTLLIB_DEBUG_FRAG(
+ "expiring fragment cache entry "
+ "seq=%u last_frag=%u\n",
+ entry->seq, entry->last_frag);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+ }
+
+ if (entry->skb != NULL && entry->seq == seq &&
+ (entry->last_frag + 1 == frag || frag == -1) &&
+ memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
+ memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Called only as a tasklet (software IRQ) */
+static struct sk_buff *
+rtllib_frag_cache_get(struct rtllib_device *ieee,
+ struct rtllib_hdr_4addr *hdr)
+{
+ struct sk_buff *skb = NULL;
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+ u16 sc = le16_to_cpu(hdr->seq_ctl);
+ unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
+ unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
+ struct rtllib_frag_entry *entry;
+ struct rtllib_hdr_3addrqos *hdr_3addrqos;
+ struct rtllib_hdr_4addrqos *hdr_4addrqos;
+ u8 tid;
+
+ if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) {
+ hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else if (RTLLIB_QOS_HAS_SEQ(fc)) {
+ hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else {
+ tid = 0;
+ }
+
+ if (frag == 0) {
+ /* Reserve enough space to fit maximum frame length */
+ skb = dev_alloc_skb(ieee->dev->mtu +
+ sizeof(struct rtllib_hdr_4addr) +
+ 8 /* LLC */ +
+ 2 /* alignment */ +
+ 8 /* WEP */ +
+ ETH_ALEN /* WDS */ +
+ (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0) /* QOS Control */);
+ if (skb == NULL)
+ return NULL;
+
+ entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
+ ieee->frag_next_idx[tid]++;
+ if (ieee->frag_next_idx[tid] >= RTLLIB_FRAG_CACHE_LEN)
+ ieee->frag_next_idx[tid] = 0;
+
+ if (entry->skb != NULL)
+ dev_kfree_skb_any(entry->skb);
+
+ entry->first_frag_time = jiffies;
+ entry->seq = seq;
+ entry->last_frag = frag;
+ entry->skb = skb;
+ memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
+ memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
+ } else {
+ /* received a fragment of a frame for which the head fragment
+ * should have already been received */
+ entry = rtllib_frag_cache_find(ieee, seq, frag, tid, hdr->addr2,
+ hdr->addr1);
+ if (entry != NULL) {
+ entry->last_frag = frag;
+ skb = entry->skb;
+ }
+ }
+
+ return skb;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee,
+ struct rtllib_hdr_4addr *hdr)
+{
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+ u16 sc = le16_to_cpu(hdr->seq_ctl);
+ unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
+ struct rtllib_frag_entry *entry;
+ struct rtllib_hdr_3addrqos *hdr_3addrqos;
+ struct rtllib_hdr_4addrqos *hdr_4addrqos;
+ u8 tid;
+
+ if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) {
+ hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else if (RTLLIB_QOS_HAS_SEQ(fc)) {
+ hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else {
+ tid = 0;
+ }
+
+ entry = rtllib_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
+ hdr->addr1);
+
+ if (entry == NULL) {
+ RTLLIB_DEBUG_FRAG(
+ "could not invalidate fragment cache "
+ "entry (seq=%u)\n", seq);
+ return -1;
+ }
+
+ entry->skb = NULL;
+ return 0;
+}
+
+/* rtllib_rx_frame_mgtmt
+ *
+ * Responsible for handling management control frames
+ *
+ * Called by rtllib_rx */
+static inline int
+rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats, u16 type,
+ u16 stype)
+{
+ /* On the struct stats definition there is written that
+ * this is not mandatory.... but seems that the probe
+ * response parser uses it
+ */
+ struct rtllib_hdr_3addr * hdr = (struct rtllib_hdr_3addr *)skb->data;
+
+ rx_stats->len = skb->len;
+ rtllib_rx_mgt(ieee, skb, rx_stats);
+ if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ rtllib_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
+
+ dev_kfree_skb_any(skb);
+
+ return 0;
+}
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
+};
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
+};
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
+/* Called by rtllib_rx_frame_decrypt */
+static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
+ struct sk_buff *skb, size_t hdrlen)
+{
+ struct net_device *dev = ieee->dev;
+ u16 fc, ethertype;
+ struct rtllib_hdr_4addr *hdr;
+ u8 *pos;
+
+ if (skb->len < 24)
+ return 0;
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ fc = le16_to_cpu(hdr->frame_ctl);
+
+ /* check that the frame is unicast frame to us */
+ if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
+ RTLLIB_FCTL_TODS &&
+ memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
+ memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ /* ToDS frame with own addr BSSID and DA */
+ } else if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
+ RTLLIB_FCTL_FROMDS &&
+ memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ /* FromDS frame with own addr as DA */
+ } else
+ return 0;
+
+ if (skb->len < 24 + 8)
+ return 0;
+
+ /* check for port access entity Ethernet type */
+ pos = skb->data + hdrlen;
+ ethertype = (pos[6] << 8) | pos[7];
+ if (ethertype == ETH_P_PAE)
+ return 1;
+
+ return 0;
+}
+
+/* Called only as a tasklet (software IRQ), by rtllib_rx */
+static inline int
+rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_crypt_data *crypt)
+{
+ struct rtllib_hdr_4addr *hdr;
+ int res, hdrlen;
+
+ if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
+ return 0;
+
+ if (ieee->hwsec_active) {
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc->bHwSec = 1;
+
+ if (ieee->need_sw_enc)
+ tcb_desc->bHwSec = 0;
+ }
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+ atomic_inc(&crypt->refcnt);
+ res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
+ atomic_dec(&crypt->refcnt);
+ if (res < 0) {
+ RTLLIB_DEBUG_DROP(
+ "decryption failed (SA= %pM"
+ ") res=%d\n", hdr->addr2, res);
+ if (res == -2)
+ RTLLIB_DEBUG_DROP("Decryption failed ICV "
+ "mismatch (key %d)\n",
+ skb->data[hdrlen + 3] >> 6);
+ ieee->ieee_stats.rx_discards_undecryptable++;
+ return -1;
+ }
+
+ return res;
+}
+
+
+/* Called only as a tasklet (software IRQ), by rtllib_rx */
+static inline int
+rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
+ int keyidx, struct rtllib_crypt_data *crypt)
+{
+ struct rtllib_hdr_4addr *hdr;
+ int res, hdrlen;
+
+ if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
+ return 0;
+ if (ieee->hwsec_active) {
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc->bHwSec = 1;
+
+ if (ieee->need_sw_enc)
+ tcb_desc->bHwSec = 0;
+ }
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+ atomic_inc(&crypt->refcnt);
+ res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv, ieee);
+ atomic_dec(&crypt->refcnt);
+ if (res < 0) {
+ printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+ " (SA= %pM keyidx=%d)\n",
+ ieee->dev->name, hdr->addr2, keyidx);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* this function is stolen from ipw2200 driver*/
+#define IEEE_PACKET_RETRY_TIME (5*HZ)
+static int is_duplicate_packet(struct rtllib_device *ieee,
+ struct rtllib_hdr_4addr *header)
+{
+ u16 fc = le16_to_cpu(header->frame_ctl);
+ u16 sc = le16_to_cpu(header->seq_ctl);
+ u16 seq = WLAN_GET_SEQ_SEQ(sc);
+ u16 frag = WLAN_GET_SEQ_FRAG(sc);
+ u16 *last_seq, *last_frag;
+ unsigned long *last_time;
+ struct rtllib_hdr_3addrqos *hdr_3addrqos;
+ struct rtllib_hdr_4addrqos *hdr_4addrqos;
+ u8 tid;
+
+ if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) {
+ hdr_4addrqos = (struct rtllib_hdr_4addrqos *)header;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else if (RTLLIB_QOS_HAS_SEQ(fc)) {
+ hdr_3addrqos = (struct rtllib_hdr_3addrqos *)header;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else {
+ tid = 0;
+ }
+
+ switch (ieee->iw_mode) {
+ case IW_MODE_ADHOC:
+ {
+ struct list_head *p;
+ struct ieee_ibss_seq *entry = NULL;
+ u8 *mac = header->addr2;
+ int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
+ list_for_each(p, &ieee->ibss_mac_hash[index]) {
+ entry = list_entry(p, struct ieee_ibss_seq, list);
+ if (!memcmp(entry->mac, mac, ETH_ALEN))
+ break;
+ }
+ if (p == &ieee->ibss_mac_hash[index]) {
+ entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
+ if (!entry) {
+ printk(KERN_WARNING "Cannot malloc new mac entry\n");
+ return 0;
+ }
+ memcpy(entry->mac, mac, ETH_ALEN);
+ entry->seq_num[tid] = seq;
+ entry->frag_num[tid] = frag;
+ entry->packet_time[tid] = jiffies;
+ list_add(&entry->list, &ieee->ibss_mac_hash[index]);
+ return 0;
+ }
+ last_seq = &entry->seq_num[tid];
+ last_frag = &entry->frag_num[tid];
+ last_time = &entry->packet_time[tid];
+ break;
+ }
+
+ case IW_MODE_INFRA:
+ last_seq = &ieee->last_rxseq_num[tid];
+ last_frag = &ieee->last_rxfrag_num[tid];
+ last_time = &ieee->last_packet_time[tid];
+ break;
+ default:
+ return 0;
+ }
+
+ if ((*last_seq == seq) &&
+ time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) {
+ if (*last_frag == frag)
+ goto drop;
+ if (*last_frag + 1 != frag)
+ /* out-of-order fragment */
+ goto drop;
+ } else
+ *last_seq = seq;
+
+ *last_frag = frag;
+ *last_time = jiffies;
+ return 0;
+
+drop:
+
+ return 1;
+}
+
+static bool AddReorderEntry(struct rx_ts_record *pTS,
+ struct rx_reorder_entry *pReorderEntry)
+{
+ struct list_head *pList = &pTS->RxPendingPktList;
+
+ while (pList->next != &pTS->RxPendingPktList) {
+ if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
+ list_entry(pList->next, struct rx_reorder_entry,
+ List))->SeqNum))
+ pList = pList->next;
+ else if (SN_EQUAL(pReorderEntry->SeqNum,
+ ((struct rx_reorder_entry *)list_entry(pList->next,
+ struct rx_reorder_entry, List))->SeqNum))
+ return false;
+ else
+ break;
+ }
+ pReorderEntry->List.next = pList->next;
+ pReorderEntry->List.next->prev = &pReorderEntry->List;
+ pReorderEntry->List.prev = pList;
+ pList->next = &pReorderEntry->List;
+
+ return true;
+}
+
+void rtllib_indicate_packets(struct rtllib_device *ieee, struct rtllib_rxb **prxbIndicateArray, u8 index)
+{
+ struct net_device_stats *stats = &ieee->stats;
+ u8 i = 0 , j = 0;
+ u16 ethertype;
+ for (j = 0; j < index; j++) {
+ struct rtllib_rxb *prxb = prxbIndicateArray[j];
+ for (i = 0; i < prxb->nr_subframes; i++) {
+ struct sk_buff *sub_skb = prxb->subframes[i];
+
+ /* convert hdr + possible LLC headers into Ethernet header */
+ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
+ if (sub_skb->len >= 8 &&
+ ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
+ ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+ memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation
+ * and replace EtherType */
+ skb_pull(sub_skb, SNAP_SIZE);
+ memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
+ } else {
+ u16 len;
+ /* Leave Ethernet header part of hdr and full payload */
+ len = htons(sub_skb->len);
+ memcpy(skb_push(sub_skb, 2), &len, 2);
+ memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
+ }
+
+ /* Indicat the packets to upper layer */
+ if (sub_skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += sub_skb->len;
+
+ memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
+ sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev);
+ sub_skb->dev = ieee->dev;
+ sub_skb->dev->stats.rx_packets++;
+ sub_skb->dev->stats.rx_bytes += sub_skb->len;
+ sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
+ ieee->last_rx_ps_time = jiffies;
+ netif_rx(sub_skb);
+ }
+ }
+ kfree(prxb);
+ prxb = NULL;
+ }
+}
+
+void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee, struct rx_ts_record *pTS)
+{
+ struct rx_reorder_entry *pRxReorderEntry;
+ u8 RfdCnt = 0;
+
+ del_timer_sync(&pTS->RxPktPendingTimer);
+ while (!list_empty(&pTS->RxPendingPktList)) {
+ if (RfdCnt >= REORDER_WIN_SIZE) {
+ printk(KERN_INFO "-------------->%s() error! RfdCnt >= REORDER_WIN_SIZE\n", __func__);
+ break;
+ }
+
+ pRxReorderEntry = (struct rx_reorder_entry *)list_entry(pTS->RxPendingPktList.prev, struct rx_reorder_entry, List);
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum %d!\n", __func__, pRxReorderEntry->SeqNum);
+ list_del_init(&pRxReorderEntry->List);
+
+ ieee->RfdArray[RfdCnt] = pRxReorderEntry->prxb;
+
+ RfdCnt = RfdCnt + 1;
+ list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
+ }
+ rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt);
+
+ pTS->RxIndicateSeq = 0xffff;
+}
+
+static void RxReorderIndicatePacket(struct rtllib_device *ieee,
+ struct rtllib_rxb *prxb,
+ struct rx_ts_record *pTS, u16 SeqNum)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ struct rx_reorder_entry *pReorderEntry = NULL;
+ u8 WinSize = pHTInfo->RxReorderWinSize;
+ u16 WinEnd = 0;
+ u8 index = 0;
+ bool bMatchWinStart = false, bPktInBuf = false;
+ unsigned long flags;
+
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Seq is %d, pTS->RxIndicateSeq"
+ " is %d, WinSize is %d\n", __func__, SeqNum,
+ pTS->RxIndicateSeq, WinSize);
+
+ spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
+
+ WinEnd = (pTS->RxIndicateSeq + WinSize - 1) % 4096;
+ /* Rx Reorder initialize condition.*/
+ if (pTS->RxIndicateSeq == 0xffff)
+ pTS->RxIndicateSeq = SeqNum;
+
+ /* Drop out the packet which SeqNum is smaller than WinStart */
+ if (SN_LESS(SeqNum, pTS->RxIndicateSeq)) {
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
+ pTS->RxIndicateSeq, SeqNum);
+ pHTInfo->RxReorderDropCounter++;
+ {
+ int i;
+ for (i = 0; i < prxb->nr_subframes; i++)
+ dev_kfree_skb(prxb->subframes[i]);
+ kfree(prxb);
+ prxb = NULL;
+ }
+ spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
+ return;
+ }
+
+ /*
+ * Sliding window manipulation. Conditions includes:
+ * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
+ * 2. Incoming SeqNum is larger than the WinEnd => Window shift N
+ */
+ if (SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) {
+ pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
+ bMatchWinStart = true;
+ } else if (SN_LESS(WinEnd, SeqNum)) {
+ if (SeqNum >= (WinSize - 1))
+ pTS->RxIndicateSeq = SeqNum + 1 - WinSize;
+ else
+ pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum + 1)) + 1;
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Window Shift! IndicateSeq: %d,"
+ " NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum);
+ }
+
+ /*
+ * Indication process.
+ * After Packet dropping and Sliding Window shifting as above, we can
+ * now just indicate the packets with the SeqNum smaller than latest
+ * WinStart and struct buffer other packets.
+ */
+ /* For Rx Reorder condition:
+ * 1. All packets with SeqNum smaller than WinStart => Indicate
+ * 2. All packets with SeqNum larger than or equal to
+ * WinStart => Buffer it.
+ */
+ if (bMatchWinStart) {
+ /* Current packet is going to be indicated.*/
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packets indication!! "
+ "IndicateSeq: %d, NewSeq: %d\n",
+ pTS->RxIndicateSeq, SeqNum);
+ ieee->prxbIndicateArray[0] = prxb;
+ index = 1;
+ } else {
+ /* Current packet is going to be inserted into pending list.*/
+ if (!list_empty(&ieee->RxReorder_Unused_List)) {
+ pReorderEntry = (struct rx_reorder_entry *)
+ list_entry(ieee->RxReorder_Unused_List.next,
+ struct rx_reorder_entry, List);
+ list_del_init(&pReorderEntry->List);
+
+ /* Make a reorder entry and insert into a the packet list.*/
+ pReorderEntry->SeqNum = SeqNum;
+ pReorderEntry->prxb = prxb;
+
+ if (!AddReorderEntry(pTS, pReorderEntry)) {
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER,
+ "%s(): Duplicate packet is "
+ "dropped!! IndicateSeq: %d, "
+ "NewSeq: %d\n",
+ __func__, pTS->RxIndicateSeq,
+ SeqNum);
+ list_add_tail(&pReorderEntry->List,
+ &ieee->RxReorder_Unused_List); {
+ int i;
+ for (i = 0; i < prxb->nr_subframes; i++)
+ dev_kfree_skb(prxb->subframes[i]);
+ kfree(prxb);
+ prxb = NULL;
+ }
+ } else {
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER,
+ "Pkt insert into struct buffer!! "
+ "IndicateSeq: %d, NewSeq: %d\n",
+ pTS->RxIndicateSeq, SeqNum);
+ }
+ } else {
+ /*
+ * Packets are dropped if there are not enough reorder
+ * entries. This part should be modified!! We can just
+ * indicate all the packets in struct buffer and get
+ * reorder entries.
+ */
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
+ " There is no reorder entry!! Packet is "
+ "dropped!!\n");
+ {
+ int i;
+ for (i = 0; i < prxb->nr_subframes; i++)
+ dev_kfree_skb(prxb->subframes[i]);
+ kfree(prxb);
+ prxb = NULL;
+ }
+ }
+ }
+
+ /* Check if there is any packet need indicate.*/
+ while (!list_empty(&pTS->RxPendingPktList)) {
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): start RREORDER indicate\n", __func__);
+
+ pReorderEntry = (struct rx_reorder_entry *)list_entry(pTS->RxPendingPktList.prev,
+ struct rx_reorder_entry, List);
+ if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) {
+ /* This protect struct buffer from overflow. */
+ if (index >= REORDER_WIN_SIZE) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicate"
+ "Packet(): Buffer overflow!!\n");
+ bPktInBuf = true;
+ break;
+ }
+
+ list_del_init(&pReorderEntry->List);
+
+ if (SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
+ pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
+
+ ieee->prxbIndicateArray[index] = pReorderEntry->prxb;
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum"
+ " %d!\n", __func__, pReorderEntry->SeqNum);
+ index++;
+
+ list_add_tail(&pReorderEntry->List,
+ &ieee->RxReorder_Unused_List);
+ } else {
+ bPktInBuf = true;
+ break;
+ }
+ }
+
+ /* Handling pending timer. Set this timer to prevent from long time
+ * Rx buffering.*/
+ if (index > 0) {
+ if (timer_pending(&pTS->RxPktPendingTimer))
+ del_timer_sync(&pTS->RxPktPendingTimer);
+ pTS->RxTimeoutIndicateSeq = 0xffff;
+
+ if (index > REORDER_WIN_SIZE) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
+ " Rx Reorer struct buffer full!!\n");
+ spin_unlock_irqrestore(&(ieee->reorder_spinlock),
+ flags);
+ return;
+ }
+ rtllib_indicate_packets(ieee, ieee->prxbIndicateArray, index);
+ bPktInBuf = false;
+ }
+
+ if (bPktInBuf && pTS->RxTimeoutIndicateSeq == 0xffff) {
+ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): SET rx timeout timer\n",
+ __func__);
+ pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq;
+ mod_timer(&pTS->RxPktPendingTimer, jiffies +
+ MSECS(pHTInfo->RxReorderPendingTime));
+ }
+ spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
+}
+
+static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats,
+ struct rtllib_rxb *rxb, u8 *src, u8 *dst)
+{
+ struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+
+ u16 LLCOffset = sizeof(struct rtllib_hdr_3addr);
+ u16 ChkLength;
+ bool bIsAggregateFrame = false;
+ u16 nSubframe_Length;
+ u8 nPadding_Length = 0;
+ u16 SeqNum = 0;
+ struct sk_buff *sub_skb;
+ u8 *data_ptr;
+ /* just for debug purpose */
+ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
+ if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
+ (((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
+ bIsAggregateFrame = true;
+
+ if (RTLLIB_QOS_HAS_SEQ(fc))
+ LLCOffset += 2;
+ if (rx_stats->bContainHTC)
+ LLCOffset += sHTCLng;
+
+ ChkLength = LLCOffset;
+
+ if (skb->len <= ChkLength)
+ return 0;
+
+ skb_pull(skb, LLCOffset);
+ ieee->bIsAggregateFrame = bIsAggregateFrame;
+ if (!bIsAggregateFrame) {
+ rxb->nr_subframes = 1;
+
+ /* altered by clark 3/30/2010
+ * The struct buffer size of the skb indicated to upper layer
+ * must be less than 5000, or the defraged IP datagram
+ * in the IP layer will exceed "ipfrag_high_tresh" and be
+ * discarded. so there must not use the function
+ * "skb_copy" and "skb_clone" for "skb".
+ */
+
+ /* Allocate new skb for releasing to upper layer */
+ sub_skb = dev_alloc_skb(RTLLIB_SKBBUFFER_SIZE);
+ skb_reserve(sub_skb, 12);
+ data_ptr = (u8 *)skb_put(sub_skb, skb->len);
+ memcpy(data_ptr, skb->data, skb->len);
+ sub_skb->dev = ieee->dev;
+
+ rxb->subframes[0] = sub_skb;
+
+ memcpy(rxb->src, src, ETH_ALEN);
+ memcpy(rxb->dst, dst, ETH_ALEN);
+ rxb->subframes[0]->dev = ieee->dev;
+ return 1;
+ } else {
+ rxb->nr_subframes = 0;
+ memcpy(rxb->src, src, ETH_ALEN);
+ memcpy(rxb->dst, dst, ETH_ALEN);
+ while (skb->len > ETHERNET_HEADER_SIZE) {
+ /* Offset 12 denote 2 mac address */
+ nSubframe_Length = *((u16 *)(skb->data + 12));
+ nSubframe_Length = (nSubframe_Length >> 8) +
+ (nSubframe_Length << 8);
+
+ if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ printk(KERN_INFO "%s: A-MSDU parse error!! "
+ "pRfd->nTotalSubframe : %d\n",\
+ __func__, rxb->nr_subframes);
+ printk(KERN_INFO "%s: A-MSDU parse error!! "
+ "Subframe Length: %d\n", __func__,
+ nSubframe_Length);
+ printk(KERN_INFO "nRemain_Length is %d and "
+ "nSubframe_Length is : %d\n", skb->len,
+ nSubframe_Length);
+ printk(KERN_INFO "The Packet SeqNum is %d\n", SeqNum);
+ return 0;
+ }
+
+ /* move the data point to data content */
+ skb_pull(skb, ETHERNET_HEADER_SIZE);
+
+ /* altered by clark 3/30/2010
+ * The struct buffer size of the skb indicated to upper layer
+ * must be less than 5000, or the defraged IP datagram
+ * in the IP layer will exceed "ipfrag_high_tresh" and be
+ * discarded. so there must not use the function
+ * "skb_copy" and "skb_clone" for "skb".
+ */
+
+ /* Allocate new skb for releasing to upper layer */
+ sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ skb_reserve(sub_skb, 12);
+ data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
+ memcpy(data_ptr, skb->data, nSubframe_Length);
+
+ sub_skb->dev = ieee->dev;
+ rxb->subframes[rxb->nr_subframes++] = sub_skb;
+ if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
+ RTLLIB_DEBUG_RX("ParseSubframe(): Too many "
+ "Subframes! Packets dropped!\n");
+ break;
+ }
+ skb_pull(skb, nSubframe_Length);
+
+ if (skb->len != 0) {
+ nPadding_Length = 4 - ((nSubframe_Length +
+ ETHERNET_HEADER_SIZE) % 4);
+ if (nPadding_Length == 4)
+ nPadding_Length = 0;
+
+ if (skb->len < nPadding_Length)
+ return 0;
+
+ skb_pull(skb, nPadding_Length);
+ }
+ }
+
+ return rxb->nr_subframes;
+ }
+}
+
+
+static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+ size_t hdrlen = 0;
+
+ hdrlen = rtllib_get_hdrlen(fc);
+ if (HTCCheck(ieee, skb->data)) {
+ if (net_ratelimit())
+ printk(KERN_INFO "%s: find HTCControl!\n", __func__);
+ hdrlen += 4;
+ rx_stats->bContainHTC = 1;
+ }
+
+ if (RTLLIB_QOS_HAS_SEQ(fc))
+ rx_stats->bIsQosData = 1;
+
+ return hdrlen;
+}
+
+static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
+ struct sk_buff *skb, u8 multicast)
+{
+ struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ u16 fc, sc;
+ u8 frag, type, stype;
+
+ fc = le16_to_cpu(hdr->frame_ctl);
+ type = WLAN_FC_GET_TYPE(fc);
+ stype = WLAN_FC_GET_STYPE(fc);
+ sc = le16_to_cpu(hdr->seq_ctl);
+ frag = WLAN_GET_SEQ_FRAG(sc);
+
+ if ((ieee->pHTInfo->bCurRxReorderEnable == false) ||
+ !ieee->current_network.qos_data.active ||
+ !IsDataFrame(skb->data) ||
+ IsLegacyDataFrame(skb->data)) {
+ if (!((type == RTLLIB_FTYPE_MGMT) && (stype == RTLLIB_STYPE_BEACON))) {
+ if (is_duplicate_packet(ieee, hdr))
+ return -1;
+ }
+ } else {
+ struct rx_ts_record *pRxTS = NULL;
+ if (GetTs(ieee, (struct ts_common_info **) &pRxTS, hdr->addr2,
+ (u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
+ if ((fc & (1<<11)) && (frag == pRxTS->RxLastFragNum) &&
+ (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) {
+ return -1;
+ } else {
+ pRxTS->RxLastFragNum = frag;
+ pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc);
+ }
+ } else {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!%s(): No TS!! Skip"
+ " the check!!\n", __func__);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
+ struct rtllib_hdr_4addr *hdr, u8 *dst,
+ u8 *src, u8 *bssid)
+{
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+
+ switch (fc & (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
+ case RTLLIB_FCTL_FROMDS:
+ memcpy(dst, hdr->addr1, ETH_ALEN);
+ memcpy(src, hdr->addr3, ETH_ALEN);
+ memcpy(bssid, hdr->addr2, ETH_ALEN);
+ break;
+ case RTLLIB_FCTL_TODS:
+ memcpy(dst, hdr->addr3, ETH_ALEN);
+ memcpy(src, hdr->addr2, ETH_ALEN);
+ memcpy(bssid, hdr->addr1, ETH_ALEN);
+ break;
+ case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS:
+ memcpy(dst, hdr->addr3, ETH_ALEN);
+ memcpy(src, hdr->addr4, ETH_ALEN);
+ memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
+ break;
+ case 0:
+ memcpy(dst, hdr->addr1, ETH_ALEN);
+ memcpy(src, hdr->addr2, ETH_ALEN);
+ memcpy(bssid, hdr->addr3, ETH_ALEN);
+ break;
+ }
+}
+
+static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
+ u8 *dst, u8 *src, u8 *bssid, u8 *addr2)
+{
+ u8 zero_addr[ETH_ALEN] = {0};
+ u8 type, stype;
+
+ type = WLAN_FC_GET_TYPE(fc);
+ stype = WLAN_FC_GET_STYPE(fc);
+
+ /* Filter frames from different BSS */
+ if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS)
+ && (compare_ether_addr(ieee->current_network.bssid, bssid) != 0)
+ && memcmp(ieee->current_network.bssid, zero_addr, ETH_ALEN)) {
+ return -1;
+ }
+
+ /* Filter packets sent by an STA that will be forwarded by AP */
+ if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn &&
+ ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame) {
+ if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) &&
+ (compare_ether_addr(dst, ieee->current_network.bssid) != 0) &&
+ (compare_ether_addr(bssid, ieee->current_network.bssid) == 0)) {
+ return -1;
+ }
+ }
+
+ /* Nullfunc frames may have PS-bit set, so they must be passed to
+ * hostap_handle_sta_rx() before being dropped here. */
+ if (!ieee->IntelPromiscuousModeInfo.bPromiscuousOn) {
+ if (stype != RTLLIB_STYPE_DATA &&
+ stype != RTLLIB_STYPE_DATA_CFACK &&
+ stype != RTLLIB_STYPE_DATA_CFPOLL &&
+ stype != RTLLIB_STYPE_DATA_CFACKPOLL &&
+ stype != RTLLIB_STYPE_QOS_DATA) {
+ if (stype != RTLLIB_STYPE_NULLFUNC)
+ RTLLIB_DEBUG_DROP(
+ "RX: dropped data frame "
+ "with no data (type=0x%02x, "
+ "subtype=0x%02x)\n",
+ type, stype);
+ return -1;
+ }
+ }
+
+ if (ieee->iw_mode != IW_MODE_MESH) {
+ /* packets from our adapter are dropped (echo) */
+ if (!memcmp(src, ieee->dev->dev_addr, ETH_ALEN))
+ return -1;
+
+ /* {broad,multi}cast packets to our BSS go through */
+ if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst)) {
+ if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_crypt_data **crypt, size_t hdrlen)
+{
+ struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+ int idx = 0;
+
+ if (ieee->host_decrypt) {
+ if (skb->len >= hdrlen + 3)
+ idx = skb->data[hdrlen + 3] >> 6;
+
+ *crypt = ieee->crypt[idx];
+ /* allow NULL decrypt to indicate an station specific override
+ * for default encryption */
+ if (*crypt && ((*crypt)->ops == NULL ||
+ (*crypt)->ops->decrypt_mpdu == NULL))
+ *crypt = NULL;
+
+ if (!*crypt && (fc & RTLLIB_FCTL_WEP)) {
+ /* This seems to be triggered by some (multicast?)
+ * frames from other than current BSS, so just drop the
+ * frames silently instead of filling system log with
+ * these reports. */
+ RTLLIB_DEBUG_DROP("Decryption failed (not set)"
+ " (SA= %pM)\n",
+ hdr->addr2);
+ ieee->ieee_stats.rx_discards_undecryptable++;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats,
+ struct rtllib_crypt_data *crypt, size_t hdrlen)
+{
+ struct rtllib_hdr_4addr *hdr;
+ int keyidx = 0;
+ u16 fc, sc;
+ u8 frag;
+
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_ctl);
+ sc = le16_to_cpu(hdr->seq_ctl);
+ frag = WLAN_GET_SEQ_FRAG(sc);
+
+ if ((!rx_stats->Decrypted))
+ ieee->need_sw_enc = 1;
+ else
+ ieee->need_sw_enc = 0;
+
+ keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt);
+ if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) {
+ printk(KERN_INFO "%s: decrypt frame error\n", __func__);
+ return -1;
+ }
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) {
+ int flen;
+ struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr);
+ RTLLIB_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
+
+ if (!frag_skb) {
+ RTLLIB_DEBUG(RTLLIB_DL_RX | RTLLIB_DL_FRAG,
+ "Rx cannot get skb from fragment "
+ "cache (morefrag=%d seq=%u frag=%u)\n",
+ (fc & RTLLIB_FCTL_MOREFRAGS) != 0,
+ WLAN_GET_SEQ_SEQ(sc), frag);
+ return -1;
+ }
+ flen = skb->len;
+ if (frag != 0)
+ flen -= hdrlen;
+
+ if (frag_skb->tail + flen > frag_skb->end) {
+ printk(KERN_WARNING "%s: host decrypted and "
+ "reassembled frame did not fit skb\n",
+ __func__);
+ rtllib_frag_cache_invalidate(ieee, hdr);
+ return -1;
+ }
+
+ if (frag == 0) {
+ /* copy first fragment (including full headers) into
+ * beginning of the fragment cache skb */
+ memcpy(skb_put(frag_skb, flen), skb->data, flen);
+ } else {
+ /* append frame payload to the end of the fragment
+ * cache skb */
+ memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
+ flen);
+ }
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+
+ if (fc & RTLLIB_FCTL_MOREFRAGS) {
+ /* more fragments expected - leave the skb in fragment
+ * cache for now; it will be delivered to upper layers
+ * after all fragments have been received */
+ return -2;
+ }
+
+ /* this was the last fragment and the frame will be
+ * delivered, so remove skb from fragment cache */
+ skb = frag_skb;
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ rtllib_frag_cache_invalidate(ieee, hdr);
+ }
+
+ /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
+ * encrypted/authenticated */
+ if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) &&
+ rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
+ printk(KERN_INFO "%s: ==>decrypt msdu error\n", __func__);
+ return -1;
+ }
+
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) {
+ if (/*ieee->ieee802_1x &&*/
+ rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
+
+ /* pass unencrypted EAPOL frames even if encryption is
+ * configured */
+ struct eapol *eap = (struct eapol *)(skb->data +
+ 24);
+ RTLLIB_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
+ eap_get_type(eap->type));
+ } else {
+ RTLLIB_DEBUG_DROP(
+ "encryption configured, but RX "
+ "frame not encrypted (SA= %pM)\n",
+ hdr->addr2);
+ return -1;
+ }
+ }
+
+ if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
+ rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
+ struct eapol *eap = (struct eapol *)(skb->data +
+ 24);
+ RTLLIB_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
+ eap_get_type(eap->type));
+ }
+
+ if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep &&
+ !rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
+ RTLLIB_DEBUG_DROP(
+ "dropped unencrypted RX data "
+ "frame from %pM"
+ " (drop_unencrypted=1)\n",
+ hdr->addr2);
+ return -1;
+ }
+
+ if (rtllib_is_eapol_frame(ieee, skb, hdrlen))
+ printk(KERN_WARNING "RX: IEEE802.1X EAPOL frame!\n");
+
+ return 0;
+}
+
+static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast, u8 nr_subframes)
+{
+ if (unicast) {
+
+ if ((ieee->state == RTLLIB_LINKED)) {
+ if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod +
+ ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
+ (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) {
+ if (ieee->LeisurePSLeave)
+ ieee->LeisurePSLeave(ieee->dev);
+ }
+ }
+ }
+ ieee->last_rx_ps_time = jiffies;
+}
+
+static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee,
+ struct rtllib_rx_stats *rx_stats,
+ struct rtllib_rxb *rxb,
+ u8 *dst,
+ u8 *src)
+{
+ struct net_device *dev = ieee->dev;
+ u16 ethertype;
+ int i = 0;
+
+ if (rxb == NULL) {
+ printk(KERN_INFO "%s: rxb is NULL!!\n", __func__);
+ return ;
+ }
+
+ for (i = 0; i < rxb->nr_subframes; i++) {
+ struct sk_buff *sub_skb = rxb->subframes[i];
+
+ if (sub_skb) {
+ /* convert hdr + possible LLC headers into Ethernet header */
+ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
+ if (sub_skb->len >= 8 &&
+ ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
+ ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+ memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and
+ * replace EtherType */
+ skb_pull(sub_skb, SNAP_SIZE);
+ memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
+ } else {
+ u16 len;
+ /* Leave Ethernet header part of hdr and full payload */
+ len = htons(sub_skb->len);
+ memcpy(skb_push(sub_skb, 2), &len, 2);
+ memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
+ }
+
+ ieee->stats.rx_packets++;
+ ieee->stats.rx_bytes += sub_skb->len;
+
+ if (is_multicast_ether_addr(dst))
+ ieee->stats.multicast++;
+
+ /* Indicat the packets to upper layer */
+ memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
+ sub_skb->protocol = eth_type_trans(sub_skb, dev);
+ sub_skb->dev = dev;
+ sub_skb->dev->stats.rx_packets++;
+ sub_skb->dev->stats.rx_bytes += sub_skb->len;
+ sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
+ netif_rx(sub_skb);
+ }
+ }
+ kfree(rxb);
+ rxb = NULL;
+}
+
+static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ struct net_device *dev = ieee->dev;
+ struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ struct rtllib_crypt_data *crypt = NULL;
+ struct rtllib_rxb *rxb = NULL;
+ struct rx_ts_record *pTS = NULL;
+ u16 fc, sc, SeqNum = 0;
+ u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0;
+ u8 dst[ETH_ALEN], src[ETH_ALEN], bssid[ETH_ALEN] = {0}, *payload;
+ size_t hdrlen = 0;
+ bool bToOtherSTA = false;
+ int ret = 0, i = 0;
+
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_ctl);
+ type = WLAN_FC_GET_TYPE(fc);
+ stype = WLAN_FC_GET_STYPE(fc);
+ sc = le16_to_cpu(hdr->seq_ctl);
+
+ /*Filter pkt not to me*/
+ multicast = is_multicast_ether_addr(hdr->addr1)|is_broadcast_ether_addr(hdr->addr1);
+ unicast = !multicast;
+ if (unicast && (compare_ether_addr(dev->dev_addr, hdr->addr1) != 0)) {
+ if (ieee->bNetPromiscuousMode)
+ bToOtherSTA = true;
+ else
+ goto rx_dropped;
+ }
+
+ /*Filter pkt has too small length */
+ hdrlen = rtllib_rx_get_hdrlen(ieee, skb, rx_stats);
+ if (skb->len < hdrlen) {
+ printk(KERN_INFO "%s():ERR!!! skb->len is smaller than hdrlen\n", __func__);
+ goto rx_dropped;
+ }
+
+ /* Filter Duplicate pkt */
+ ret = rtllib_rx_check_duplicate(ieee, skb, multicast);
+ if (ret < 0)
+ goto rx_dropped;
+
+ /* Filter CTRL Frame */
+ if (type == RTLLIB_FTYPE_CTL)
+ goto rx_dropped;
+
+ /* Filter MGNT Frame */
+ if (type == RTLLIB_FTYPE_MGMT) {
+ if (bToOtherSTA)
+ goto rx_dropped;
+ if (rtllib_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
+ goto rx_dropped;
+ else
+ goto rx_exit;
+ }
+
+ /* Filter WAPI DATA Frame */
+
+ /* Update statstics for AP roaming */
+ if (!bToOtherSTA) {
+ ieee->LinkDetectInfo.NumRecvDataInPeriod++;
+ ieee->LinkDetectInfo.NumRxOkInPeriod++;
+ }
+ dev->last_rx = jiffies;
+
+ /* Data frame - extract src/dst addresses */
+ rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
+
+ /* Filter Data frames */
+ ret = rtllib_rx_data_filter(ieee, fc, dst, src, bssid, hdr->addr2);
+ if (ret < 0)
+ goto rx_dropped;
+
+ if (skb->len == hdrlen)
+ goto rx_dropped;
+
+ /* Send pspoll based on moredata */
+ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->sta_sleep == LPS_IS_SLEEP)
+ && (ieee->polling) && (!bToOtherSTA)) {
+ if (WLAN_FC_MORE_DATA(fc)) {
+ /* more data bit is set, let's request a new frame from the AP */
+ rtllib_sta_ps_send_pspoll_frame(ieee);
+ } else {
+ ieee->polling = false;
+ }
+ }
+
+ /* Get crypt if encrypted */
+ ret = rtllib_rx_get_crypt(ieee, skb, &crypt, hdrlen);
+ if (ret == -1)
+ goto rx_dropped;
+
+ /* Decrypt data frame (including reassemble) */
+ ret = rtllib_rx_decrypt(ieee, skb, rx_stats, crypt, hdrlen);
+ if (ret == -1)
+ goto rx_dropped;
+ else if (ret == -2)
+ goto rx_exit;
+
+ /* Get TS for Rx Reorder */
+ hdr = (struct rtllib_hdr_4addr *) skb->data;
+ if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
+ && !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1)
+ && (!bToOtherSTA)) {
+ TID = Frame_QoSTID(skb->data);
+ SeqNum = WLAN_GET_SEQ_SEQ(sc);
+ GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID, RX_DIR, true);
+ if (TID != 0 && TID != 3)
+ ieee->bis_any_nonbepkts = true;
+ }
+
+ /* Parse rx data frame (For AMSDU) */
+ /* skb: hdr + (possible reassembled) full plaintext payload */
+ payload = skb->data + hdrlen;
+ rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
+ if (rxb == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR,
+ "%s(): kmalloc rxb error\n", __func__);
+ goto rx_dropped;
+ }
+ /* to parse amsdu packets */
+ /* qos data packets & reserved bit is 1 */
+ if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
+ /* only to free rxb, and not submit the packets to upper layer */
+ for (i = 0; i < rxb->nr_subframes; i++)
+ dev_kfree_skb(rxb->subframes[i]);
+ kfree(rxb);
+ rxb = NULL;
+ goto rx_dropped;
+ }
+
+ /* Update WAPI PN */
+
+ /* Check if leave LPS */
+ if (!bToOtherSTA) {
+ if (ieee->bIsAggregateFrame)
+ nr_subframes = rxb->nr_subframes;
+ else
+ nr_subframes = 1;
+ if (unicast)
+ ieee->LinkDetectInfo.NumRxUnicastOkInPeriod += nr_subframes;
+ rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes);
+ }
+
+ /* Indicate packets to upper layer or Rx Reorder */
+ if (ieee->pHTInfo->bCurRxReorderEnable == false || pTS == NULL || bToOtherSTA)
+ rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
+ else
+ RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
+
+ dev_kfree_skb(skb);
+
+ rx_exit:
+ return 1;
+
+ rx_dropped:
+ if (rxb != NULL) {
+ kfree(rxb);
+ rxb = NULL;
+ }
+ ieee->stats.rx_dropped++;
+
+ /* Returning 0 indicates to caller that we have not handled the SKB--
+ * so it is still allocated and can be used again by underlying
+ * hardware as a DMA target */
+ return 0;
+}
+
+static int rtllib_rx_Master(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ return 0;
+}
+
+static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
+ u16 fc = le16_to_cpu(hdr->frame_ctl);
+ size_t hdrlen = rtllib_get_hdrlen(fc);
+
+ if (skb->len < hdrlen) {
+ printk(KERN_INFO "%s():ERR!!! skb->len is smaller than hdrlen\n", __func__);
+ return 0;
+ }
+
+ if (HTCCheck(ieee, skb->data)) {
+ if (net_ratelimit())
+ printk(KERN_INFO "%s: Find HTCControl!\n", __func__);
+ hdrlen += 4;
+ }
+
+ rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+ ieee->stats.rx_packets++;
+ ieee->stats.rx_bytes += skb->len;
+
+ return 1;
+}
+
+static int rtllib_rx_Mesh(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ return 0;
+}
+
+/* All received frames are sent to this function. @skb contains the frame in
+ * IEEE 802.11 format, i.e., in the format it was sent over air.
+ * This function is called only as a tasklet (software IRQ). */
+int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ int ret = 0;
+
+ if ((NULL == ieee) || (NULL == skb) || (NULL == rx_stats)) {
+ printk(KERN_INFO "%s: Input parameters NULL!\n", __func__);
+ goto rx_dropped;
+ }
+ if (skb->len < 10) {
+ printk(KERN_INFO "%s: SKB length < 10\n", __func__);
+ goto rx_dropped;
+ }
+
+ switch (ieee->iw_mode) {
+ case IW_MODE_ADHOC:
+ case IW_MODE_INFRA:
+ ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats);
+ break;
+ case IW_MODE_MASTER:
+ case IW_MODE_REPEAT:
+ ret = rtllib_rx_Master(ieee, skb, rx_stats);
+ break;
+ case IW_MODE_MONITOR:
+ ret = rtllib_rx_Monitor(ieee, skb, rx_stats);
+ break;
+ case IW_MODE_MESH:
+ ret = rtllib_rx_Mesh(ieee, skb, rx_stats);
+ break;
+ default:
+ printk(KERN_INFO"%s: ERR iw mode!!!\n", __func__);
+ break;
+ }
+
+ return ret;
+
+ rx_dropped:
+ ieee->stats.rx_dropped++;
+ return 0;
+}
+
+static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
+
+/*
+* Make ther structure we read from the beacon packet has
+* the right values
+*/
+static int rtllib_verify_qos_info(struct rtllib_qos_information_element
+ *info_element, int sub_type)
+{
+
+ if (info_element->qui_subtype != sub_type)
+ return -1;
+ if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
+ return -1;
+ if (info_element->qui_type != QOS_OUI_TYPE)
+ return -1;
+ if (info_element->version != QOS_VERSION_1)
+ return -1;
+
+ return 0;
+}
+
+
+/*
+ * Parse a QoS parameter element
+ */
+static int rtllib_read_qos_param_element(struct rtllib_qos_parameter_info
+ *element_param, struct rtllib_info_element
+ *info_element)
+{
+ int ret = 0;
+ u16 size = sizeof(struct rtllib_qos_parameter_info) - 2;
+
+ if ((info_element == NULL) || (element_param == NULL))
+ return -1;
+
+ if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
+ memcpy(element_param->info_element.qui, info_element->data,
+ info_element->len);
+ element_param->info_element.elementID = info_element->id;
+ element_param->info_element.length = info_element->len;
+ } else
+ ret = -1;
+ if (ret == 0)
+ ret = rtllib_verify_qos_info(&element_param->info_element,
+ QOS_OUI_PARAM_SUB_TYPE);
+ return ret;
+}
+
+/*
+ * Parse a QoS information element
+ */
+static int rtllib_read_qos_info_element(struct
+ rtllib_qos_information_element
+ *element_info, struct rtllib_info_element
+ *info_element)
+{
+ int ret = 0;
+ u16 size = sizeof(struct rtllib_qos_information_element) - 2;
+
+ if (element_info == NULL)
+ return -1;
+ if (info_element == NULL)
+ return -1;
+
+ if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
+ memcpy(element_info->qui, info_element->data,
+ info_element->len);
+ element_info->elementID = info_element->id;
+ element_info->length = info_element->len;
+ } else
+ ret = -1;
+
+ if (ret == 0)
+ ret = rtllib_verify_qos_info(element_info,
+ QOS_OUI_INFO_SUB_TYPE);
+ return ret;
+}
+
+
+/*
+ * Write QoS parameters from the ac parameters.
+ */
+static int rtllib_qos_convert_ac_to_parameters(struct rtllib_qos_parameter_info *param_elm,
+ struct rtllib_qos_data *qos_data)
+{
+ struct rtllib_qos_ac_parameter *ac_params;
+ struct rtllib_qos_parameters *qos_param = &(qos_data->parameters);
+ int rc = 0;
+ int i;
+ u8 aci;
+ u8 acm;
+
+ qos_data->wmm_acm = 0;
+ for (i = 0; i < QOS_QUEUE_NUM; i++) {
+ ac_params = &(param_elm->ac_params_record[i]);
+
+ aci = (ac_params->aci_aifsn & 0x60) >> 5;
+ acm = (ac_params->aci_aifsn & 0x10) >> 4;
+
+ if (aci >= QOS_QUEUE_NUM)
+ continue;
+ switch (aci) {
+ case 1:
+ /* BIT(0) | BIT(3) */
+ if (acm)
+ qos_data->wmm_acm |= (0x01<<0)|(0x01<<3);
+ break;
+ case 2:
+ /* BIT(4) | BIT(5) */
+ if (acm)
+ qos_data->wmm_acm |= (0x01<<4)|(0x01<<5);
+ break;
+ case 3:
+ /* BIT(6) | BIT(7) */
+ if (acm)
+ qos_data->wmm_acm |= (0x01<<6)|(0x01<<7);
+ break;
+ case 0:
+ default:
+ /* BIT(1) | BIT(2) */
+ if (acm)
+ qos_data->wmm_acm |= (0x01<<1)|(0x01<<2);
+ break;
+ }
+
+ qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
+
+ /* WMM spec P.11: The minimum value for AIFSN shall be 2 */
+ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci];
+
+ qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
+
+ qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
+
+ qos_param->flag[aci] =
+ (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
+ qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
+ }
+ return rc;
+}
+
+/*
+ * we have a generic data element which it may contain QoS information or
+ * parameters element. check the information element length to decide
+ * which type to read
+ */
+static int rtllib_parse_qos_info_param_IE(struct rtllib_info_element
+ *info_element,
+ struct rtllib_network *network)
+{
+ int rc = 0;
+ struct rtllib_qos_information_element qos_info_element;
+
+ rc = rtllib_read_qos_info_element(&qos_info_element, info_element);
+
+ if (rc == 0) {
+ network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
+ network->flags |= NETWORK_HAS_QOS_INFORMATION;
+ } else {
+ struct rtllib_qos_parameter_info param_element;
+
+ rc = rtllib_read_qos_param_element(&param_element,
+ info_element);
+ if (rc == 0) {
+ rtllib_qos_convert_ac_to_parameters(&param_element,
+ &(network->qos_data));
+ network->flags |= NETWORK_HAS_QOS_PARAMETERS;
+ network->qos_data.param_count =
+ param_element.info_element.ac_info & 0x0F;
+ }
+ }
+
+ if (rc == 0) {
+ RTLLIB_DEBUG_QOS("QoS is supported\n");
+ network->qos_data.supported = 1;
+ }
+ return rc;
+}
+
+#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
+
+static const char *get_info_element_string(u16 id)
+{
+ switch (id) {
+ MFIE_STRING(SSID);
+ MFIE_STRING(RATES);
+ MFIE_STRING(FH_SET);
+ MFIE_STRING(DS_SET);
+ MFIE_STRING(CF_SET);
+ MFIE_STRING(TIM);
+ MFIE_STRING(IBSS_SET);
+ MFIE_STRING(COUNTRY);
+ MFIE_STRING(HOP_PARAMS);
+ MFIE_STRING(HOP_TABLE);
+ MFIE_STRING(REQUEST);
+ MFIE_STRING(CHALLENGE);
+ MFIE_STRING(POWER_CONSTRAINT);
+ MFIE_STRING(POWER_CAPABILITY);
+ MFIE_STRING(TPC_REQUEST);
+ MFIE_STRING(TPC_REPORT);
+ MFIE_STRING(SUPP_CHANNELS);
+ MFIE_STRING(CSA);
+ MFIE_STRING(MEASURE_REQUEST);
+ MFIE_STRING(MEASURE_REPORT);
+ MFIE_STRING(QUIET);
+ MFIE_STRING(IBSS_DFS);
+ MFIE_STRING(RSN);
+ MFIE_STRING(RATES_EX);
+ MFIE_STRING(GENERIC);
+ MFIE_STRING(QOS_PARAMETER);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline void rtllib_extract_country_ie(
+ struct rtllib_device *ieee,
+ struct rtllib_info_element *info_element,
+ struct rtllib_network *network,
+ u8 *addr2)
+{
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (info_element->len != 0) {
+ memcpy(network->CountryIeBuf, info_element->data, info_element->len);
+ network->CountryIeLen = info_element->len;
+
+ if (!IS_COUNTRY_IE_VALID(ieee)) {
+ if ((rtllib_act_scanning(ieee, false) == true) && (ieee->FirstIe_InScan == 1))
+ printk(KERN_INFO "Received beacon ContryIE, SSID: <%s>\n", network->ssid);
+ Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data);
+ }
+ }
+
+ if (IS_EQUAL_CIE_SRC(ieee, addr2))
+ UPDATE_CIE_WATCHDOG(ieee);
+ }
+
+}
+
+int rtllib_parse_info_param(struct rtllib_device *ieee,
+ struct rtllib_info_element *info_element,
+ u16 length,
+ struct rtllib_network *network,
+ struct rtllib_rx_stats *stats)
+{
+ u8 i;
+ short offset;
+ u16 tmp_htcap_len = 0;
+ u16 tmp_htinfo_len = 0;
+ u16 ht_realtek_agg_len = 0;
+ u8 ht_realtek_agg_buf[MAX_IE_LEN];
+ char rates_str[64];
+ char *p;
+
+ while (length >= sizeof(*info_element)) {
+ if (sizeof(*info_element) + info_element->len > length) {
+ RTLLIB_DEBUG_MGMT("Info elem: parse failed: "
+ "info_element->len + 2 > left : "
+ "info_element->len+2=%zd left=%d, id=%d.\n",
+ info_element->len +
+ sizeof(*info_element),
+ length, info_element->id);
+ /* We stop processing but don't return an error here
+ * because some misbehaviour APs break this rule. ie.
+ * Orinoco AP1000. */
+ break;
+ }
+
+ switch (info_element->id) {
+ case MFIE_TYPE_SSID:
+ if (rtllib_is_empty_essid(info_element->data,
+ info_element->len)) {
+ network->flags |= NETWORK_EMPTY_ESSID;
+ break;
+ }
+
+ network->ssid_len = min(info_element->len,
+ (u8) IW_ESSID_MAX_SIZE);
+ memcpy(network->ssid, info_element->data, network->ssid_len);
+ if (network->ssid_len < IW_ESSID_MAX_SIZE)
+ memset(network->ssid + network->ssid_len, 0,
+ IW_ESSID_MAX_SIZE - network->ssid_len);
+
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
+ network->ssid, network->ssid_len);
+ break;
+
+ case MFIE_TYPE_RATES:
+ p = rates_str;
+ network->rates_len = min(info_element->len,
+ MAX_RATES_LENGTH);
+ for (i = 0; i < network->rates_len; i++) {
+ network->rates[i] = info_element->data[i];
+ p += snprintf(p, sizeof(rates_str) -
+ (p - rates_str), "%02X ",
+ network->rates[i]);
+ if (rtllib_is_ofdm_rate
+ (info_element->data[i])) {
+ network->flags |= NETWORK_HAS_OFDM;
+ if (info_element->data[i] &
+ RTLLIB_BASIC_RATE_MASK)
+ network->flags &=
+ ~NETWORK_HAS_CCK;
+ }
+
+ if (rtllib_is_cck_rate
+ (info_element->data[i])) {
+ network->flags |= NETWORK_HAS_CCK;
+ }
+ }
+
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
+ rates_str, network->rates_len);
+ break;
+
+ case MFIE_TYPE_RATES_EX:
+ p = rates_str;
+ network->rates_ex_len = min(info_element->len,
+ MAX_RATES_EX_LENGTH);
+ for (i = 0; i < network->rates_ex_len; i++) {
+ network->rates_ex[i] = info_element->data[i];
+ p += snprintf(p, sizeof(rates_str) -
+ (p - rates_str), "%02X ",
+ network->rates[i]);
+ if (rtllib_is_ofdm_rate
+ (info_element->data[i])) {
+ network->flags |= NETWORK_HAS_OFDM;
+ if (info_element->data[i] &
+ RTLLIB_BASIC_RATE_MASK)
+ network->flags &=
+ ~NETWORK_HAS_CCK;
+ }
+ }
+
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
+ rates_str, network->rates_ex_len);
+ break;
+
+ case MFIE_TYPE_DS_SET:
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
+ info_element->data[0]);
+ network->channel = info_element->data[0];
+ break;
+
+ case MFIE_TYPE_FH_SET:
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
+ break;
+
+ case MFIE_TYPE_CF_SET:
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
+ break;
+
+ case MFIE_TYPE_TIM:
+ if (info_element->len < 4)
+ break;
+
+ network->tim.tim_count = info_element->data[0];
+ network->tim.tim_period = info_element->data[1];
+
+ network->dtim_period = info_element->data[1];
+ if (ieee->state != RTLLIB_LINKED)
+ break;
+ network->last_dtim_sta_time = jiffies;
+
+ network->dtim_data = RTLLIB_DTIM_VALID;
+
+
+ if (info_element->data[2] & 1)
+ network->dtim_data |= RTLLIB_DTIM_MBCAST;
+
+ offset = (info_element->data[2] >> 1)*2;
+
+
+ if (ieee->assoc_id < 8*offset ||
+ ieee->assoc_id > 8*(offset + info_element->len - 3))
+ break;
+
+ offset = (ieee->assoc_id / 8) - offset;
+ if (info_element->data[3 + offset] &
+ (1 << (ieee->assoc_id % 8)))
+ network->dtim_data |= RTLLIB_DTIM_UCAST;
+
+ network->listen_interval = network->dtim_period;
+ break;
+
+ case MFIE_TYPE_ERP:
+ network->erp_value = info_element->data[0];
+ network->flags |= NETWORK_HAS_ERP_VALUE;
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
+ network->erp_value);
+ break;
+ case MFIE_TYPE_IBSS_SET:
+ network->atim_window = info_element->data[0];
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
+ network->atim_window);
+ break;
+
+ case MFIE_TYPE_CHALLENGE:
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
+ break;
+
+ case MFIE_TYPE_GENERIC:
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
+ info_element->len);
+ if (!rtllib_parse_qos_info_param_IE(info_element,
+ network))
+ break;
+ if (info_element->len >= 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x50 &&
+ info_element->data[2] == 0xf2 &&
+ info_element->data[3] == 0x01) {
+ network->wpa_ie_len = min(info_element->len + 2,
+ MAX_WPA_IE_LEN);
+ memcpy(network->wpa_ie, info_element,
+ network->wpa_ie_len);
+ break;
+ }
+ if (info_element->len == 7 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0xe0 &&
+ info_element->data[2] == 0x4c &&
+ info_element->data[3] == 0x01 &&
+ info_element->data[4] == 0x02)
+ network->Turbo_Enable = 1;
+
+ if (tmp_htcap_len == 0) {
+ if (info_element->len >= 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x90 &&
+ info_element->data[2] == 0x4c &&
+ info_element->data[3] == 0x033) {
+
+ tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htcap_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ?
+ sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
+ memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
+ }
+ }
+ if (tmp_htcap_len != 0) {
+ network->bssht.bdSupportHT = true;
+ network->bssht.bdHT1R = ((((struct ht_capab_ele *)(network->bssht.bdHTCapBuf))->MCS[1]) == 0);
+ } else {
+ network->bssht.bdSupportHT = false;
+ network->bssht.bdHT1R = false;
+ }
+ }
+
+
+ if (tmp_htinfo_len == 0) {
+ if (info_element->len >= 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x90 &&
+ info_element->data[2] == 0x4c &&
+ info_element->data[3] == 0x034) {
+ tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htinfo_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ if (tmp_htinfo_len) {
+ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ?
+ sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
+ memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
+ }
+
+ }
+
+ }
+ }
+
+ if (ieee->aggregation) {
+ if (network->bssht.bdSupportHT) {
+ if (info_element->len >= 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0xe0 &&
+ info_element->data[2] == 0x4c &&
+ info_element->data[3] == 0x02) {
+ ht_realtek_agg_len = min(info_element->len, (u8)MAX_IE_LEN);
+ memcpy(ht_realtek_agg_buf, info_element->data, info_element->len);
+ }
+ if (ht_realtek_agg_len >= 5) {
+ network->realtek_cap_exit = true;
+ network->bssht.bdRT2RTAggregation = true;
+
+ if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
+ network->bssht.bdRT2RTLongSlotTime = true;
+
+ if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & RT_HT_CAP_USE_92SE))
+ network->bssht.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE;
+ }
+ }
+ if (ht_realtek_agg_len >= 5) {
+ if ((ht_realtek_agg_buf[5] & RT_HT_CAP_USE_SOFTAP))
+ network->bssht.RT2RT_HT_Mode |= RT_HT_CAP_USE_SOFTAP;
+ }
+ }
+
+ if ((info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x05 &&
+ info_element->data[2] == 0xb5) ||
+ (info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x0a &&
+ info_element->data[2] == 0xf7) ||
+ (info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x10 &&
+ info_element->data[2] == 0x18)) {
+ network->broadcom_cap_exist = true;
+ }
+ if (info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x0c &&
+ info_element->data[2] == 0x43)
+ network->ralink_cap_exist = true;
+ if ((info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x03 &&
+ info_element->data[2] == 0x7f) ||
+ (info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x13 &&
+ info_element->data[2] == 0x74))
+ network->atheros_cap_exist = true;
+
+ if ((info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x50 &&
+ info_element->data[2] == 0x43))
+ network->marvell_cap_exist = true;
+ if (info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x40 &&
+ info_element->data[2] == 0x96)
+ network->cisco_cap_exist = true;
+
+
+ if (info_element->len >= 3 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x0a &&
+ info_element->data[2] == 0xf5)
+ network->airgo_cap_exist = true;
+
+ if (info_element->len > 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x40 &&
+ info_element->data[2] == 0x96 &&
+ info_element->data[3] == 0x01) {
+ if (info_element->len == 6) {
+ memcpy(network->CcxRmState, &info_element[4], 2);
+ if (network->CcxRmState[0] != 0)
+ network->bCcxRmEnable = true;
+ else
+ network->bCcxRmEnable = false;
+ network->MBssidMask = network->CcxRmState[1] & 0x07;
+ if (network->MBssidMask != 0) {
+ network->bMBssidValid = true;
+ network->MBssidMask = 0xff << (network->MBssidMask);
+ memcpy(network->MBssid, network->bssid, ETH_ALEN);
+ network->MBssid[5] &= network->MBssidMask;
+ } else {
+ network->bMBssidValid = false;
+ }
+ } else {
+ network->bCcxRmEnable = false;
+ }
+ }
+ if (info_element->len > 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x40 &&
+ info_element->data[2] == 0x96 &&
+ info_element->data[3] == 0x03) {
+ if (info_element->len == 5) {
+ network->bWithCcxVerNum = true;
+ network->BssCcxVerNumber = info_element->data[4];
+ } else {
+ network->bWithCcxVerNum = false;
+ network->BssCcxVerNumber = 0;
+ }
+ }
+ if (info_element->len > 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x50 &&
+ info_element->data[2] == 0xf2 &&
+ info_element->data[3] == 0x04) {
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_WZC: %d bytes\n",
+ info_element->len);
+ network->wzc_ie_len = min(info_element->len+2,
+ MAX_WZC_IE_LEN);
+ memcpy(network->wzc_ie, info_element,
+ network->wzc_ie_len);
+ }
+ break;
+
+ case MFIE_TYPE_RSN:
+ RTLLIB_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
+ info_element->len);
+ network->rsn_ie_len = min(info_element->len + 2,
+ MAX_WPA_IE_LEN);
+ memcpy(network->rsn_ie, info_element,
+ network->rsn_ie_len);
+ break;
+
+ case MFIE_TYPE_HT_CAP:
+ RTLLIB_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n",
+ info_element->len);
+ tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htcap_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ?
+ sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
+ memcpy(network->bssht.bdHTCapBuf,
+ info_element->data,
+ network->bssht.bdHTCapLen);
+
+ network->bssht.bdSupportHT = true;
+ network->bssht.bdHT1R = ((((struct ht_capab_ele *)
+ network->bssht.bdHTCapBuf))->MCS[1]) == 0;
+
+ network->bssht.bdBandWidth = (enum ht_channel_width)
+ (((struct ht_capab_ele *)
+ (network->bssht.bdHTCapBuf))->ChlWidth);
+ } else {
+ network->bssht.bdSupportHT = false;
+ network->bssht.bdHT1R = false;
+ network->bssht.bdBandWidth = HT_CHANNEL_WIDTH_20;
+ }
+ break;
+
+
+ case MFIE_TYPE_HT_INFO:
+ RTLLIB_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n",
+ info_element->len);
+ tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htinfo_len) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE;
+ network->bssht.bdHTInfoLen = tmp_htinfo_len >
+ sizeof(network->bssht.bdHTInfoBuf) ?
+ sizeof(network->bssht.bdHTInfoBuf) :
+ tmp_htinfo_len;
+ memcpy(network->bssht.bdHTInfoBuf,
+ info_element->data,
+ network->bssht.bdHTInfoLen);
+ }
+ break;
+
+ case MFIE_TYPE_AIRONET:
+ RTLLIB_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n",
+ info_element->len);
+ if (info_element->len > IE_CISCO_FLAG_POSITION) {
+ network->bWithAironetIE = true;
+
+ if ((info_element->data[IE_CISCO_FLAG_POSITION]
+ & SUPPORT_CKIP_MIC) ||
+ (info_element->data[IE_CISCO_FLAG_POSITION]
+ & SUPPORT_CKIP_PK))
+ network->bCkipSupported = true;
+ else
+ network->bCkipSupported = false;
+ } else {
+ network->bWithAironetIE = false;
+ network->bCkipSupported = false;
+ }
+ break;
+ case MFIE_TYPE_QOS_PARAMETER:
+ printk(KERN_ERR
+ "QoS Error need to parse QOS_PARAMETER IE\n");
+ break;
+
+ case MFIE_TYPE_COUNTRY:
+ RTLLIB_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
+ info_element->len);
+ rtllib_extract_country_ie(ieee, info_element, network,
+ network->bssid);
+ break;
+/* TODO */
+ default:
+ RTLLIB_DEBUG_MGMT
+ ("Unsupported info element: %s (%d)\n",
+ get_info_element_string(info_element->id),
+ info_element->id);
+ break;
+ }
+
+ length -= sizeof(*info_element) + info_element->len;
+ info_element =
+ (struct rtllib_info_element *)&info_element->
+ data[info_element->len];
+ }
+
+ if (!network->atheros_cap_exist && !network->broadcom_cap_exist &&
+ !network->cisco_cap_exist && !network->ralink_cap_exist &&
+ !network->bssht.bdRT2RTAggregation)
+ network->unknown_cap_exist = true;
+ else
+ network->unknown_cap_exist = false;
+ return 0;
+}
+
+static inline u8 rtllib_SignalStrengthTranslate(u8 CurrSS)
+{
+ u8 RetSS;
+
+ if (CurrSS >= 71 && CurrSS <= 100)
+ RetSS = 90 + ((CurrSS - 70) / 3);
+ else if (CurrSS >= 41 && CurrSS <= 70)
+ RetSS = 78 + ((CurrSS - 40) / 3);
+ else if (CurrSS >= 31 && CurrSS <= 40)
+ RetSS = 66 + (CurrSS - 30);
+ else if (CurrSS >= 21 && CurrSS <= 30)
+ RetSS = 54 + (CurrSS - 20);
+ else if (CurrSS >= 5 && CurrSS <= 20)
+ RetSS = 42 + (((CurrSS - 5) * 2) / 3);
+ else if (CurrSS == 4)
+ RetSS = 36;
+ else if (CurrSS == 3)
+ RetSS = 27;
+ else if (CurrSS == 2)
+ RetSS = 18;
+ else if (CurrSS == 1)
+ RetSS = 9;
+ else
+ RetSS = CurrSS;
+
+ return RetSS;
+}
+
+static long rtllib_translate_todbm(u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+
+ return signal_power;
+}
+
+static inline int rtllib_network_init(
+ struct rtllib_device *ieee,
+ struct rtllib_probe_response *beacon,
+ struct rtllib_network *network,
+ struct rtllib_rx_stats *stats)
+{
+
+ /*
+ network->qos_data.active = 0;
+ network->qos_data.supported = 0;
+ network->qos_data.param_count = 0;
+ network->qos_data.old_param_count = 0;
+ */
+ memset(&network->qos_data, 0, sizeof(struct rtllib_qos_data));
+
+ /* Pull out fixed field data */
+ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
+ network->capability = le16_to_cpu(beacon->capability);
+ network->last_scanned = jiffies;
+ network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
+ network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
+ network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
+ /* Where to pull this? beacon->listen_interval;*/
+ network->listen_interval = 0x0A;
+ network->rates_len = network->rates_ex_len = 0;
+ network->last_associate = 0;
+ network->ssid_len = 0;
+ network->hidden_ssid_len = 0;
+ memset(network->hidden_ssid, 0, sizeof(network->hidden_ssid));
+ network->flags = 0;
+ network->atim_window = 0;
+ network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
+ 0x3 : 0x0;
+ network->berp_info_valid = false;
+ network->broadcom_cap_exist = false;
+ network->ralink_cap_exist = false;
+ network->atheros_cap_exist = false;
+ network->cisco_cap_exist = false;
+ network->unknown_cap_exist = false;
+ network->realtek_cap_exit = false;
+ network->marvell_cap_exist = false;
+ network->airgo_cap_exist = false;
+ network->Turbo_Enable = 0;
+ network->SignalStrength = stats->SignalStrength;
+ network->RSSI = stats->SignalStrength;
+ network->CountryIeLen = 0;
+ memset(network->CountryIeBuf, 0, MAX_IE_LEN);
+ HTInitializeBssDesc(&network->bssht);
+ if (stats->freq == RTLLIB_52GHZ_BAND) {
+ /* for A band (No DS info) */
+ network->channel = stats->received_channel;
+ } else
+ network->flags |= NETWORK_HAS_CCK;
+
+ network->wpa_ie_len = 0;
+ network->rsn_ie_len = 0;
+ network->wzc_ie_len = 0;
+
+ if (rtllib_parse_info_param(ieee,
+ beacon->info_element,
+ (stats->len - sizeof(*beacon)),
+ network,
+ stats))
+ return 1;
+
+ network->mode = 0;
+ if (stats->freq == RTLLIB_52GHZ_BAND)
+ network->mode = IEEE_A;
+ else {
+ if (network->flags & NETWORK_HAS_OFDM)
+ network->mode |= IEEE_G;
+ if (network->flags & NETWORK_HAS_CCK)
+ network->mode |= IEEE_B;
+ }
+
+ if (network->mode == 0) {
+ RTLLIB_DEBUG_SCAN("Filtered out '%s (%pM)' "
+ "network.\n",
+ escape_essid(network->ssid,
+ network->ssid_len),
+ network->bssid);
+ return 1;
+ }
+
+ if (network->bssht.bdSupportHT) {
+ if (network->mode == IEEE_A)
+ network->mode = IEEE_N_5G;
+ else if (network->mode & (IEEE_G | IEEE_B))
+ network->mode = IEEE_N_24G;
+ }
+ if (rtllib_is_empty_essid(network->ssid, network->ssid_len))
+ network->flags |= NETWORK_EMPTY_ESSID;
+ stats->signal = 30 + (stats->SignalStrength * 70) / 100;
+ stats->noise = rtllib_translate_todbm((u8)(100-stats->signal)) - 25;
+
+ memcpy(&network->stats, stats, sizeof(network->stats));
+
+ return 0;
+}
+
+static inline int is_same_network(struct rtllib_network *src,
+ struct rtllib_network *dst, u8 ssidbroad)
+{
+ /* A network is only a duplicate if the channel, BSSID, ESSID
+ * and the capability field (in particular IBSS and BSS) all match.
+ * We treat all <hidden> with the same BSSID and channel
+ * as one network */
+ return (((src->ssid_len == dst->ssid_len) || (!ssidbroad)) &&
+ (src->channel == dst->channel) &&
+ !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
+ (!memcmp(src->ssid, dst->ssid, src->ssid_len) ||
+ (!ssidbroad)) &&
+ ((src->capability & WLAN_CAPABILITY_IBSS) ==
+ (dst->capability & WLAN_CAPABILITY_IBSS)) &&
+ ((src->capability & WLAN_CAPABILITY_ESS) ==
+ (dst->capability & WLAN_CAPABILITY_ESS)));
+}
+
+static inline void update_ibss_network(struct rtllib_network *dst,
+ struct rtllib_network *src)
+{
+ memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats));
+ dst->last_scanned = jiffies;
+}
+
+
+static inline void update_network(struct rtllib_network *dst,
+ struct rtllib_network *src)
+{
+ int qos_active;
+ u8 old_param;
+
+ memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats));
+ dst->capability = src->capability;
+ memcpy(dst->rates, src->rates, src->rates_len);
+ dst->rates_len = src->rates_len;
+ memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
+ dst->rates_ex_len = src->rates_ex_len;
+ if (src->ssid_len > 0) {
+ if (dst->ssid_len == 0) {
+ memset(dst->hidden_ssid, 0, sizeof(dst->hidden_ssid));
+ dst->hidden_ssid_len = src->ssid_len;
+ memcpy(dst->hidden_ssid, src->ssid, src->ssid_len);
+ } else {
+ memset(dst->ssid, 0, dst->ssid_len);
+ dst->ssid_len = src->ssid_len;
+ memcpy(dst->ssid, src->ssid, src->ssid_len);
+ }
+ }
+ dst->mode = src->mode;
+ dst->flags = src->flags;
+ dst->time_stamp[0] = src->time_stamp[0];
+ dst->time_stamp[1] = src->time_stamp[1];
+ if (src->flags & NETWORK_HAS_ERP_VALUE) {
+ dst->erp_value = src->erp_value;
+ dst->berp_info_valid = src->berp_info_valid = true;
+ }
+ dst->beacon_interval = src->beacon_interval;
+ dst->listen_interval = src->listen_interval;
+ dst->atim_window = src->atim_window;
+ dst->dtim_period = src->dtim_period;
+ dst->dtim_data = src->dtim_data;
+ dst->last_dtim_sta_time = src->last_dtim_sta_time;
+ memcpy(&dst->tim, &src->tim, sizeof(struct rtllib_tim_parameters));
+
+ dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
+ dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation;
+ dst->bssht.bdHTCapLen = src->bssht.bdHTCapLen;
+ memcpy(dst->bssht.bdHTCapBuf, src->bssht.bdHTCapBuf,
+ src->bssht.bdHTCapLen);
+ dst->bssht.bdHTInfoLen = src->bssht.bdHTInfoLen;
+ memcpy(dst->bssht.bdHTInfoBuf, src->bssht.bdHTInfoBuf,
+ src->bssht.bdHTInfoLen);
+ dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer;
+ dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime;
+ dst->broadcom_cap_exist = src->broadcom_cap_exist;
+ dst->ralink_cap_exist = src->ralink_cap_exist;
+ dst->atheros_cap_exist = src->atheros_cap_exist;
+ dst->realtek_cap_exit = src->realtek_cap_exit;
+ dst->marvell_cap_exist = src->marvell_cap_exist;
+ dst->cisco_cap_exist = src->cisco_cap_exist;
+ dst->airgo_cap_exist = src->airgo_cap_exist;
+ dst->unknown_cap_exist = src->unknown_cap_exist;
+ memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
+ dst->wpa_ie_len = src->wpa_ie_len;
+ memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
+ dst->rsn_ie_len = src->rsn_ie_len;
+ memcpy(dst->wzc_ie, src->wzc_ie, src->wzc_ie_len);
+ dst->wzc_ie_len = src->wzc_ie_len;
+
+ dst->last_scanned = jiffies;
+ /* qos related parameters */
+ qos_active = dst->qos_data.active;
+ old_param = dst->qos_data.param_count;
+ dst->qos_data.supported = src->qos_data.supported;
+ if (dst->flags & NETWORK_HAS_QOS_PARAMETERS)
+ memcpy(&dst->qos_data, &src->qos_data,
+ sizeof(struct rtllib_qos_data));
+ if (dst->qos_data.supported == 1) {
+ if (dst->ssid_len)
+ RTLLIB_DEBUG_QOS
+ ("QoS the network %s is QoS supported\n",
+ dst->ssid);
+ else
+ RTLLIB_DEBUG_QOS
+ ("QoS the network is QoS supported\n");
+ }
+ dst->qos_data.active = qos_active;
+ dst->qos_data.old_param_count = old_param;
+
+ /* dst->last_associate is not overwritten */
+ dst->wmm_info = src->wmm_info;
+ if (src->wmm_param[0].ac_aci_acm_aifsn ||
+ src->wmm_param[1].ac_aci_acm_aifsn ||
+ src->wmm_param[2].ac_aci_acm_aifsn ||
+ src->wmm_param[1].ac_aci_acm_aifsn)
+ memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
+
+ dst->SignalStrength = src->SignalStrength;
+ dst->RSSI = src->RSSI;
+ dst->Turbo_Enable = src->Turbo_Enable;
+
+ dst->CountryIeLen = src->CountryIeLen;
+ memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
+
+ dst->bWithAironetIE = src->bWithAironetIE;
+ dst->bCkipSupported = src->bCkipSupported;
+ memcpy(dst->CcxRmState, src->CcxRmState, 2);
+ dst->bCcxRmEnable = src->bCcxRmEnable;
+ dst->MBssidMask = src->MBssidMask;
+ dst->bMBssidValid = src->bMBssidValid;
+ memcpy(dst->MBssid, src->MBssid, 6);
+ dst->bWithCcxVerNum = src->bWithCcxVerNum;
+ dst->BssCcxVerNumber = src->BssCcxVerNumber;
+}
+
+static inline int is_beacon(__le16 fc)
+{
+ return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == RTLLIB_STYPE_BEACON);
+}
+
+static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
+{
+ if (MAX_CHANNEL_NUMBER < channel) {
+ printk(KERN_INFO "%s(): Invalid Channel\n", __func__);
+ return 0;
+ }
+
+ if (rtllib->active_channel_map[channel] == 2)
+ return 1;
+
+ return 0;
+}
+
+int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
+{
+ if (MAX_CHANNEL_NUMBER < channel) {
+ printk(KERN_INFO "%s(): Invalid Channel\n", __func__);
+ return 0;
+ }
+ if (rtllib->active_channel_map[channel] > 0)
+ return 1;
+
+ return 0;
+}
+
+static inline void rtllib_process_probe_response(
+ struct rtllib_device *ieee,
+ struct rtllib_probe_response *beacon,
+ struct rtllib_rx_stats *stats)
+{
+ struct rtllib_network *target;
+ struct rtllib_network *oldest = NULL;
+ struct rtllib_info_element *info_element = &beacon->info_element[0];
+ unsigned long flags;
+ short renew;
+ struct rtllib_network *network = kzalloc(sizeof(struct rtllib_network),
+ GFP_ATOMIC);
+
+ if (!network)
+ return;
+
+ RTLLIB_DEBUG_SCAN(
+ "'%s' ( %pM ): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
+ escape_essid(info_element->data, info_element->len),
+ beacon->header.addr3,
+ (beacon->capability & (1<<0xf)) ? '1' : '0',
+ (beacon->capability & (1<<0xe)) ? '1' : '0',
+ (beacon->capability & (1<<0xd)) ? '1' : '0',
+ (beacon->capability & (1<<0xc)) ? '1' : '0',
+ (beacon->capability & (1<<0xb)) ? '1' : '0',
+ (beacon->capability & (1<<0xa)) ? '1' : '0',
+ (beacon->capability & (1<<0x9)) ? '1' : '0',
+ (beacon->capability & (1<<0x8)) ? '1' : '0',
+ (beacon->capability & (1<<0x7)) ? '1' : '0',
+ (beacon->capability & (1<<0x6)) ? '1' : '0',
+ (beacon->capability & (1<<0x5)) ? '1' : '0',
+ (beacon->capability & (1<<0x4)) ? '1' : '0',
+ (beacon->capability & (1<<0x3)) ? '1' : '0',
+ (beacon->capability & (1<<0x2)) ? '1' : '0',
+ (beacon->capability & (1<<0x1)) ? '1' : '0',
+ (beacon->capability & (1<<0x0)) ? '1' : '0');
+
+ if (rtllib_network_init(ieee, beacon, network, stats)) {
+ RTLLIB_DEBUG_SCAN("Dropped '%s' ( %pM) via %s.\n",
+ escape_essid(info_element->data,
+ info_element->len),
+ beacon->header.addr3,
+ WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ RTLLIB_STYPE_PROBE_RESP ?
+ "PROBE RESPONSE" : "BEACON");
+ goto free_network;
+ }
+
+
+ if (!IsLegalChannel(ieee, network->channel))
+ goto free_network;
+
+ if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ RTLLIB_STYPE_PROBE_RESP) {
+ if (IsPassiveChannel(ieee, network->channel)) {
+ printk(KERN_INFO "GetScanInfo(): For Global Domain, "
+ "filter probe response at channel(%d).\n",
+ network->channel);
+ goto free_network;
+ }
+ }
+
+ /* The network parsed correctly -- so now we scan our known networks
+ * to see if we can find it in our list.
+ *
+ * NOTE: This search is definitely not optimized. Once its doing
+ * the "right thing" we'll optimize it for efficiency if
+ * necessary */
+
+ /* Search for this entry in the list and update it if it is
+ * already there. */
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ if (is_same_network(&ieee->current_network, network,
+ (network->ssid_len ? 1 : 0))) {
+ update_network(&ieee->current_network, network);
+ if ((ieee->current_network.mode == IEEE_N_24G ||
+ ieee->current_network.mode == IEEE_G)
+ && ieee->current_network.berp_info_valid) {
+ if (ieee->current_network.erp_value & ERP_UseProtection)
+ ieee->current_network.buseprotection = true;
+ else
+ ieee->current_network.buseprotection = false;
+ }
+ if (is_beacon(beacon->header.frame_ctl)) {
+ if (ieee->state >= RTLLIB_LINKED)
+ ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
+ }
+ }
+ list_for_each_entry(target, &ieee->network_list, list) {
+ if (is_same_network(target, network,
+ (target->ssid_len ? 1 : 0)))
+ break;
+ if ((oldest == NULL) ||
+ (target->last_scanned < oldest->last_scanned))
+ oldest = target;
+ }
+
+ /* If we didn't find a match, then get a new network slot to initialize
+ * with this beacon's information */
+ if (&target->list == &ieee->network_list) {
+ if (list_empty(&ieee->network_free_list)) {
+ /* If there are no more slots, expire the oldest */
+ list_del(&oldest->list);
+ target = oldest;
+ RTLLIB_DEBUG_SCAN("Expired '%s' ( %pM) from "
+ "network list.\n",
+ escape_essid(target->ssid,
+ target->ssid_len),
+ target->bssid);
+ } else {
+ /* Otherwise just pull from the free list */
+ target = list_entry(ieee->network_free_list.next,
+ struct rtllib_network, list);
+ list_del(ieee->network_free_list.next);
+ }
+
+
+ RTLLIB_DEBUG_SCAN("Adding '%s' ( %pM) via %s.\n",
+ escape_essid(network->ssid,
+ network->ssid_len), network->bssid,
+ WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ RTLLIB_STYPE_PROBE_RESP ?
+ "PROBE RESPONSE" : "BEACON");
+ memcpy(target, network, sizeof(*target));
+ list_add_tail(&target->list, &ieee->network_list);
+ if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
+ rtllib_softmac_new_net(ieee, network);
+ } else {
+ RTLLIB_DEBUG_SCAN("Updating '%s' ( %pM) via %s.\n",
+ escape_essid(target->ssid,
+ target->ssid_len), target->bssid,
+ WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ RTLLIB_STYPE_PROBE_RESP ?
+ "PROBE RESPONSE" : "BEACON");
+
+ /* we have an entry and we are going to update it. But this
+ * entry may be already expired. In this case we do the same
+ * as we found a new net and call the new_net handler
+ */
+ renew = !time_after(target->last_scanned + ieee->scan_age,
+ jiffies);
+ if ((!target->ssid_len) &&
+ (((network->ssid_len > 0) && (target->hidden_ssid_len == 0))
+ || ((ieee->current_network.ssid_len == network->ssid_len) &&
+ (strncmp(ieee->current_network.ssid, network->ssid,
+ network->ssid_len) == 0) &&
+ (ieee->state == RTLLIB_NOLINK))))
+ renew = 1;
+ update_network(target, network);
+ if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
+ rtllib_softmac_new_net(ieee, network);
+ }
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ if (is_beacon(beacon->header.frame_ctl) &&
+ is_same_network(&ieee->current_network, network,
+ (network->ssid_len ? 1 : 0)) &&
+ (ieee->state == RTLLIB_LINKED)) {
+ if (ieee->handle_beacon != NULL)
+ ieee->handle_beacon(ieee->dev, beacon,
+ &ieee->current_network);
+ }
+free_network:
+ kfree(network);
+ return;
+}
+
+void rtllib_rx_mgt(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *stats)
+{
+ struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ;
+
+ if (WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_PROBE_RESP &&
+ WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_BEACON)
+ ieee->last_rx_ps_time = jiffies;
+
+ switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+
+ case RTLLIB_STYPE_BEACON:
+ RTLLIB_DEBUG_MGMT("received BEACON (%d)\n",
+ WLAN_FC_GET_STYPE(header->frame_ctl));
+ RTLLIB_DEBUG_SCAN("Beacon\n");
+ rtllib_process_probe_response(
+ ieee, (struct rtllib_probe_response *)header,
+ stats);
+
+ if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED &&
+ ieee->iw_mode == IW_MODE_INFRA &&
+ ieee->state == RTLLIB_LINKED))
+ tasklet_schedule(&ieee->ps_task);
+
+ break;
+
+ case RTLLIB_STYPE_PROBE_RESP:
+ RTLLIB_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
+ WLAN_FC_GET_STYPE(header->frame_ctl));
+ RTLLIB_DEBUG_SCAN("Probe response\n");
+ rtllib_process_probe_response(ieee,
+ (struct rtllib_probe_response *)header, stats);
+ break;
+ case RTLLIB_STYPE_PROBE_REQ:
+ RTLLIB_DEBUG_MGMT("received PROBE RESQUEST (%d)\n",
+ WLAN_FC_GET_STYPE(header->frame_ctl));
+ RTLLIB_DEBUG_SCAN("Probe request\n");
+ if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
+ ((ieee->iw_mode == IW_MODE_ADHOC ||
+ ieee->iw_mode == IW_MODE_MASTER) &&
+ ieee->state == RTLLIB_LINKED))
+ rtllib_rx_probe_rq(ieee, skb);
+ break;
+ }
+}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
new file mode 100644
index 00000000000..b5086850f0d
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -0,0 +1,3741 @@
+/* IEEE 802.11 SoftMAC layer
+ * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Mostly extracted from the rtl8180-sa2400 driver for the
+ * in-kernel generic ieee802.11 stack.
+ *
+ * Few lines might be stolen from other part of the rtllib
+ * stack. Copyright who own it's copyright
+ *
+ * WPA code stolen from the ipw2200 driver.
+ * Copyright who own it's copyright.
+ *
+ * released under the GPL
+ */
+
+
+#include "rtllib.h"
+#include "rtl_core.h"
+
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include "dot11d.h"
+
+short rtllib_is_54g(struct rtllib_network *net)
+{
+ return (net->rates_ex_len > 0) || (net->rates_len > 4);
+}
+
+short rtllib_is_shortslot(struct rtllib_network net)
+{
+ return net.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
+}
+
+/* returns the total length needed for pleacing the RATE MFIE
+ * tag and the EXTENDED RATE MFIE tag if needed.
+ * It encludes two bytes per tag for the tag itself and its len
+ */
+static unsigned int rtllib_MFIE_rate_len(struct rtllib_device *ieee)
+{
+ unsigned int rate_len = 0;
+
+ if (ieee->modulation & RTLLIB_CCK_MODULATION)
+ rate_len = RTLLIB_CCK_RATE_LEN + 2;
+
+ if (ieee->modulation & RTLLIB_OFDM_MODULATION)
+
+ rate_len += RTLLIB_OFDM_RATE_LEN + 2;
+
+ return rate_len;
+}
+
+/* pleace the MFIE rate, tag to the memory (double) poined.
+ * Then it updates the pointer so that
+ * it points after the new MFIE tag added.
+ */
+static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
+{
+ u8 *tag = *tag_p;
+
+ if (ieee->modulation & RTLLIB_CCK_MODULATION) {
+ *tag++ = MFIE_TYPE_RATES;
+ *tag++ = 4;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
+ }
+
+ /* We may add an option for custom rates that specific HW
+ * might support */
+ *tag_p = tag;
+}
+
+static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
+{
+ u8 *tag = *tag_p;
+
+ if (ieee->modulation & RTLLIB_OFDM_MODULATION) {
+ *tag++ = MFIE_TYPE_RATES_EX;
+ *tag++ = 8;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_6MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_9MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_12MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_18MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_24MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_36MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_48MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_54MB;
+ }
+ /* We may add an option for custom rates that specific HW might
+ * support */
+ *tag_p = tag;
+}
+
+static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
+{
+ u8 *tag = *tag_p;
+
+ *tag++ = MFIE_TYPE_GENERIC;
+ *tag++ = 7;
+ *tag++ = 0x00;
+ *tag++ = 0x50;
+ *tag++ = 0xf2;
+ *tag++ = 0x02;
+ *tag++ = 0x00;
+ *tag++ = 0x01;
+ *tag++ = MAX_SP_Len;
+ *tag_p = tag;
+}
+
+void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
+{
+ u8 *tag = *tag_p;
+
+ *tag++ = MFIE_TYPE_GENERIC;
+ *tag++ = 7;
+ *tag++ = 0x00;
+ *tag++ = 0xe0;
+ *tag++ = 0x4c;
+ *tag++ = 0x01;
+ *tag++ = 0x02;
+ *tag++ = 0x11;
+ *tag++ = 0x00;
+
+ *tag_p = tag;
+ printk(KERN_ALERT "This is enable turbo mode IE process\n");
+}
+
+static void enqueue_mgmt(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ int nh;
+ nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
+
+/*
+ * if the queue is full but we have newer frames then
+ * just overwrites the oldest.
+ *
+ * if (nh == ieee->mgmt_queue_tail)
+ * return -1;
+ */
+ ieee->mgmt_queue_head = nh;
+ ieee->mgmt_queue_ring[nh] = skb;
+
+}
+
+static struct sk_buff *dequeue_mgmt(struct rtllib_device *ieee)
+{
+ struct sk_buff *ret;
+
+ if (ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
+ return NULL;
+
+ ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
+
+ ieee->mgmt_queue_tail =
+ (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM;
+
+ return ret;
+}
+
+static void init_mgmt_queue(struct rtllib_device *ieee)
+{
+ ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
+}
+
+
+u8
+MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
+{
+ u16 i;
+ u8 QueryRate = 0;
+ u8 BasicRate;
+
+
+ for (i = 0; i < ieee->current_network.rates_len; i++) {
+ BasicRate = ieee->current_network.rates[i]&0x7F;
+ if (!rtllib_is_cck_rate(BasicRate)) {
+ if (QueryRate == 0) {
+ QueryRate = BasicRate;
+ } else {
+ if (BasicRate < QueryRate)
+ QueryRate = BasicRate;
+ }
+ }
+ }
+
+ if (QueryRate == 0) {
+ QueryRate = 12;
+ printk(KERN_INFO "No BasicRate found!!\n");
+ }
+ return QueryRate;
+}
+
+u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ u8 rate;
+
+ if (pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
+ rate = 0x0c;
+ else
+ rate = ieee->basic_rate & 0x7f;
+
+ if (rate == 0) {
+ if (ieee->mode == IEEE_A ||
+ ieee->mode == IEEE_N_5G ||
+ (ieee->mode == IEEE_N_24G && !pHTInfo->bCurSuppCCK))
+ rate = 0x0c;
+ else
+ rate = 0x02;
+ }
+
+ return rate;
+}
+
+inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
+{
+ unsigned long flags;
+ short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
+ struct rtllib_hdr_3addr *header =
+ (struct rtllib_hdr_3addr *) skb->data;
+
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ /* called with 2nd param 0, no mgmt lock required */
+ rtllib_sta_wakeup(ieee, 0);
+
+ if (header->frame_ctl == RTLLIB_STYPE_BEACON)
+ tcb_desc->queue_index = BEACON_QUEUE;
+ else
+ tcb_desc->queue_index = MGNT_QUEUE;
+
+ if (ieee->disable_mgnt_queue)
+ tcb_desc->queue_index = HIGH_QUEUE;
+
+ tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
+ tcb_desc->RATRIndex = 7;
+ tcb_desc->bTxDisableRateFallBack = 1;
+ tcb_desc->bTxUseDriverAssingedRate = 1;
+ if (single) {
+ if (ieee->queue_stop) {
+ enqueue_mgmt(ieee, skb);
+ } else {
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
+
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+
+ /* avoid watchdog triggers */
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
+ ieee->basic_rate);
+ }
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
+
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+
+ /* check wether the managed packet queued greater than 5 */
+ if (!ieee->check_nic_enough_desc(ieee->dev, tcb_desc->queue_index) ||
+ (skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) != 0) ||
+ (ieee->queue_stop)) {
+ /* insert the skb packet to the management queue */
+ /* as for the completion function, it does not need
+ * to check it any more.
+ * */
+ printk(KERN_INFO "%s():insert to waitqueue, queue_index"
+ ":%d!\n", __func__, tcb_desc->queue_index);
+ skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index],
+ skb);
+ } else {
+ ieee->softmac_hard_start_xmit(skb, ieee->dev);
+ }
+ spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
+ }
+}
+
+inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
+ struct rtllib_device *ieee)
+{
+ short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
+ struct rtllib_hdr_3addr *header =
+ (struct rtllib_hdr_3addr *) skb->data;
+ u16 fc, type, stype;
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
+
+ fc = header->frame_ctl;
+ type = WLAN_FC_GET_TYPE(fc);
+ stype = WLAN_FC_GET_STYPE(fc);
+
+
+ if (stype != RTLLIB_STYPE_PSPOLL)
+ tcb_desc->queue_index = MGNT_QUEUE;
+ else
+ tcb_desc->queue_index = HIGH_QUEUE;
+
+ if (ieee->disable_mgnt_queue)
+ tcb_desc->queue_index = HIGH_QUEUE;
+
+
+ tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
+ tcb_desc->RATRIndex = 7;
+ tcb_desc->bTxDisableRateFallBack = 1;
+ tcb_desc->bTxUseDriverAssingedRate = 1;
+ if (single) {
+ if (type != RTLLIB_FTYPE_CTL) {
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+
+ }
+ /* avoid watchdog triggers */
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
+ ieee->basic_rate);
+
+ } else {
+ if (type != RTLLIB_FTYPE_CTL) {
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+ }
+ ieee->softmac_hard_start_xmit(skb, ieee->dev);
+
+ }
+}
+
+inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
+{
+ unsigned int len, rate_len;
+ u8 *tag;
+ struct sk_buff *skb;
+ struct rtllib_probe_request *req;
+
+ len = ieee->current_network.ssid_len;
+
+ rate_len = rtllib_MFIE_rate_len(ieee);
+
+ skb = dev_alloc_skb(sizeof(struct rtllib_probe_request) +
+ 2 + len + rate_len + ieee->tx_headroom);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ req = (struct rtllib_probe_request *) skb_put(skb,
+ sizeof(struct rtllib_probe_request));
+ req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
+ req->header.duration_id = 0;
+
+ memset(req->header.addr1, 0xff, ETH_ALEN);
+ memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memset(req->header.addr3, 0xff, ETH_ALEN);
+
+ tag = (u8 *) skb_put(skb, len + 2 + rate_len);
+
+ *tag++ = MFIE_TYPE_SSID;
+ *tag++ = len;
+ memcpy(tag, ieee->current_network.ssid, len);
+ tag += len;
+
+ rtllib_MFIE_Brate(ieee, &tag);
+ rtllib_MFIE_Grate(ieee, &tag);
+
+ return skb;
+}
+
+struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
+
+static void rtllib_send_beacon(struct rtllib_device *ieee)
+{
+ struct sk_buff *skb;
+ if (!ieee->ieee_up)
+ return;
+ skb = rtllib_get_beacon_(ieee);
+
+ if (skb) {
+ softmac_mgmt_xmit(skb, ieee);
+ ieee->softmac_stats.tx_beacons++;
+ }
+
+ if (ieee->beacon_txing && ieee->ieee_up)
+ mod_timer(&ieee->beacon_timer, jiffies +
+ (MSECS(ieee->current_network.beacon_interval - 5)));
+}
+
+
+static void rtllib_send_beacon_cb(unsigned long _ieee)
+{
+ struct rtllib_device *ieee =
+ (struct rtllib_device *) _ieee;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
+ rtllib_send_beacon(ieee);
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+}
+
+/*
+ * Description:
+ * Enable network monitor mode, all rx packets will be received.
+ */
+void rtllib_EnableNetMonitorMode(struct net_device *dev,
+ bool bInitState)
+{
+ struct rtllib_device *ieee = netdev_priv_rsl(dev);
+
+ printk(KERN_INFO "========>Enter Monitor Mode\n");
+
+ ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
+}
+
+
+/*
+ * Description:
+ * Disable network network monitor mode, only packets destinated to
+ * us will be received.
+ */
+void rtllib_DisableNetMonitorMode(struct net_device *dev,
+ bool bInitState)
+{
+ struct rtllib_device *ieee = netdev_priv_rsl(dev);
+
+ printk(KERN_INFO "========>Exit Monitor Mode\n");
+
+ ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
+}
+
+
+/*
+ * Description:
+ * This enables the specialized promiscuous mode required by Intel.
+ * In this mode, Intel intends to hear traffics from/to other STAs in the
+ * same BSS. Therefore we don't have to disable checking BSSID and we only need
+ * to allow all dest. BUT: if we enable checking BSSID then we can't recv
+ * packets from other STA.
+ */
+void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
+ bool bInitState)
+{
+ bool bFilterOutNonAssociatedBSSID = false;
+
+ struct rtllib_device *ieee = netdev_priv_rsl(dev);
+
+ printk(KERN_INFO "========>Enter Intel Promiscuous Mode\n");
+
+ ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
+ ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID,
+ (u8 *)&bFilterOutNonAssociatedBSSID);
+
+ ieee->bNetPromiscuousMode = true;
+}
+
+
+/*
+ * Description:
+ * This disables the specialized promiscuous mode required by Intel.
+ * See MgntEnableIntelPromiscuousMode for detail.
+ */
+void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
+ bool bInitState)
+{
+ bool bFilterOutNonAssociatedBSSID = true;
+
+ struct rtllib_device *ieee = netdev_priv_rsl(dev);
+
+ printk(KERN_INFO "========>Exit Intel Promiscuous Mode\n");
+
+ ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
+ ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID,
+ (u8 *)&bFilterOutNonAssociatedBSSID);
+
+ ieee->bNetPromiscuousMode = false;
+}
+
+static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
+{
+ struct sk_buff *skb;
+ skb = rtllib_probe_req(ieee);
+ if (skb) {
+ softmac_mgmt_xmit(skb, ieee);
+ ieee->softmac_stats.tx_probe_rq++;
+ }
+}
+
+
+void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
+{
+ if (ieee->active_scan && (ieee->softmac_features &
+ IEEE_SOFTMAC_PROBERQ)) {
+ rtllib_send_probe(ieee, 0);
+ rtllib_send_probe(ieee, 0);
+ }
+}
+
+static void rtllib_softmac_hint11d_wq(void *data)
+{
+}
+
+void rtllib_update_active_chan_map(struct rtllib_device *ieee)
+{
+ memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
+ MAX_CHANNEL_NUMBER+1);
+}
+
+/* this performs syncro scan blocking the caller until all channels
+ * in the allowed channel map has been checked.
+ */
+void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
+{
+ union iwreq_data wrqu;
+ short ch = 0;
+
+ rtllib_update_active_chan_map(ieee);
+
+ ieee->be_scan_inprogress = true;
+
+ down(&ieee->scan_sem);
+
+ while (1) {
+ do {
+ ch++;
+ if (ch > MAX_CHANNEL_NUMBER)
+ goto out; /* scan completed */
+ } while (!ieee->active_channel_map[ch]);
+
+ /* this fuction can be called in two situations
+ * 1- We have switched to ad-hoc mode and we are
+ * performing a complete syncro scan before conclude
+ * there are no interesting cell and to create a
+ * new one. In this case the link state is
+ * RTLLIB_NOLINK until we found an interesting cell.
+ * If so the ieee8021_new_net, called by the RX path
+ * will set the state to RTLLIB_LINKED, so we stop
+ * scanning
+ * 2- We are linked and the root uses run iwlist scan.
+ * So we switch to RTLLIB_LINKED_SCANNING to remember
+ * that we are still logically linked (not interested in
+ * new network events, despite for updating the net list,
+ * but we are temporarly 'unlinked' as the driver shall
+ * not filter RX frames and the channel is changing.
+ * So the only situation in witch are interested is to check
+ * if the state become LINKED because of the #1 situation
+ */
+
+ if (ieee->state == RTLLIB_LINKED)
+ goto out;
+ if (ieee->sync_scan_hurryup) {
+ printk(KERN_INFO "============>sync_scan_hurryup out\n");
+ goto out;
+ }
+
+ ieee->set_chan(ieee->dev, ch);
+ if (ieee->active_channel_map[ch] == 1)
+ rtllib_send_probe_requests(ieee, 0);
+
+ /* this prevent excessive time wait when we
+ * need to wait for a syncro scan to end..
+ */
+ msleep_interruptible_rsl(RTLLIB_SOFTMAC_SCAN_TIME);
+ }
+out:
+ ieee->actscanning = false;
+ ieee->sync_scan_hurryup = 0;
+
+ if (ieee->state >= RTLLIB_LINKED) {
+ if (IS_DOT11D_ENABLE(ieee))
+ DOT11D_ScanComplete(ieee);
+ }
+ up(&ieee->scan_sem);
+
+ ieee->be_scan_inprogress = false;
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ wireless_send_event(ieee->dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static void rtllib_softmac_scan_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device, softmac_scan_wq);
+ u8 last_channel = ieee->current_network.channel;
+
+ rtllib_update_active_chan_map(ieee);
+
+ if (!ieee->ieee_up)
+ return;
+ if (rtllib_act_scanning(ieee, true) == true)
+ return;
+
+ down(&ieee->scan_sem);
+
+ if (ieee->eRFPowerState == eRfOff) {
+ printk(KERN_INFO "======>%s():rf state is eRfOff, return\n",
+ __func__);
+ goto out1;
+ }
+
+ do {
+ ieee->current_network.channel =
+ (ieee->current_network.channel + 1) %
+ MAX_CHANNEL_NUMBER;
+ if (ieee->scan_watch_dog++ > MAX_CHANNEL_NUMBER) {
+ if (!ieee->active_channel_map[ieee->current_network.channel])
+ ieee->current_network.channel = 6;
+ goto out; /* no good chans */
+ }
+ } while (!ieee->active_channel_map[ieee->current_network.channel]);
+
+ if (ieee->scanning_continue == 0)
+ goto out;
+
+ ieee->set_chan(ieee->dev, ieee->current_network.channel);
+
+ if (ieee->active_channel_map[ieee->current_network.channel] == 1)
+ rtllib_send_probe_requests(ieee, 0);
+
+ queue_delayed_work_rsl(ieee->wq, &ieee->softmac_scan_wq,
+ MSECS(RTLLIB_SOFTMAC_SCAN_TIME));
+
+ up(&ieee->scan_sem);
+ return;
+
+out:
+ if (IS_DOT11D_ENABLE(ieee))
+ DOT11D_ScanComplete(ieee);
+ ieee->current_network.channel = last_channel;
+
+out1:
+ ieee->actscanning = false;
+ ieee->scan_watch_dog = 0;
+ ieee->scanning_continue = 0;
+ up(&ieee->scan_sem);
+}
+
+
+
+static void rtllib_beacons_start(struct rtllib_device *ieee)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
+
+ ieee->beacon_txing = 1;
+ rtllib_send_beacon(ieee);
+
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+}
+
+static void rtllib_beacons_stop(struct rtllib_device *ieee)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
+
+ ieee->beacon_txing = 0;
+ del_timer_sync(&ieee->beacon_timer);
+
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+
+}
+
+
+void rtllib_stop_send_beacons(struct rtllib_device *ieee)
+{
+ if (ieee->stop_send_beacons)
+ ieee->stop_send_beacons(ieee->dev);
+ if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
+ rtllib_beacons_stop(ieee);
+}
+
+
+void rtllib_start_send_beacons(struct rtllib_device *ieee)
+{
+ if (ieee->start_send_beacons)
+ ieee->start_send_beacons(ieee->dev);
+ if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
+ rtllib_beacons_start(ieee);
+}
+
+
+static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
+{
+ down(&ieee->scan_sem);
+ ieee->scan_watch_dog = 0;
+ if (ieee->scanning_continue == 1) {
+ ieee->scanning_continue = 0;
+ ieee->actscanning = 0;
+
+ cancel_delayed_work(&ieee->softmac_scan_wq);
+ }
+
+ up(&ieee->scan_sem);
+}
+
+void rtllib_stop_scan(struct rtllib_device *ieee)
+{
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ rtllib_softmac_stop_scan(ieee);
+ } else {
+ if (ieee->rtllib_stop_hw_scan)
+ ieee->rtllib_stop_hw_scan(ieee->dev);
+ }
+}
+
+void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
+{
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ ieee->sync_scan_hurryup = 1;
+ } else {
+ if (ieee->rtllib_stop_hw_scan)
+ ieee->rtllib_stop_hw_scan(ieee->dev);
+ }
+}
+
+bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
+{
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (sync_scan)
+ return ieee->be_scan_inprogress;
+ else
+ return ieee->actscanning || ieee->be_scan_inprogress;
+ } else {
+ return test_bit(STATUS_SCANNING, &ieee->status);
+ }
+}
+
+/* called with ieee->lock held */
+static void rtllib_start_scan(struct rtllib_device *ieee)
+{
+ RT_TRACE(COMP_DBG, "===>%s()\n", __func__);
+ if (ieee->rtllib_ips_leave_wq != NULL)
+ ieee->rtllib_ips_leave_wq(ieee->dev);
+
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (IS_COUNTRY_IE_VALID(ieee))
+ RESET_CIE_WATCHDOG(ieee);
+ }
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (ieee->scanning_continue == 0) {
+ ieee->actscanning = true;
+ ieee->scanning_continue = 1;
+ queue_delayed_work_rsl(ieee->wq,
+ &ieee->softmac_scan_wq, 0);
+ }
+ } else {
+ if (ieee->rtllib_start_hw_scan)
+ ieee->rtllib_start_hw_scan(ieee->dev);
+ }
+}
+
+/* called with wx_sem held */
+void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
+{
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (IS_COUNTRY_IE_VALID(ieee))
+ RESET_CIE_WATCHDOG(ieee);
+ }
+ ieee->sync_scan_hurryup = 0;
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ rtllib_softmac_scan_syncro(ieee, is_mesh);
+ } else {
+ if (ieee->rtllib_start_hw_scan)
+ ieee->rtllib_start_hw_scan(ieee->dev);
+ }
+}
+
+inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
+ struct rtllib_device *ieee, int challengelen, u8 *daddr)
+{
+ struct sk_buff *skb;
+ struct rtllib_authentication *auth;
+ int len = 0;
+ len = sizeof(struct rtllib_authentication) + challengelen +
+ ieee->tx_headroom + 4;
+ skb = dev_alloc_skb(len);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ auth = (struct rtllib_authentication *)
+ skb_put(skb, sizeof(struct rtllib_authentication));
+
+ auth->header.frame_ctl = RTLLIB_STYPE_AUTH;
+ if (challengelen)
+ auth->header.frame_ctl |= RTLLIB_FCTL_WEP;
+
+ auth->header.duration_id = 0x013a;
+ memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
+ memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
+ if (ieee->auth_mode == 0)
+ auth->algorithm = WLAN_AUTH_OPEN;
+ else if (ieee->auth_mode == 1)
+ auth->algorithm = WLAN_AUTH_SHARED_KEY;
+ else if (ieee->auth_mode == 2)
+ auth->algorithm = WLAN_AUTH_OPEN;
+ auth->transaction = cpu_to_le16(ieee->associate_seq);
+ ieee->associate_seq++;
+
+ auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
+
+ return skb;
+}
+
+static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
+{
+ u8 *tag;
+ int beacon_size;
+ struct rtllib_probe_response *beacon_buf;
+ struct sk_buff *skb = NULL;
+ int encrypt;
+ int atim_len, erp_len;
+ struct rtllib_crypt_data *crypt;
+
+ char *ssid = ieee->current_network.ssid;
+ int ssid_len = ieee->current_network.ssid_len;
+ int rate_len = ieee->current_network.rates_len+2;
+ int rate_ex_len = ieee->current_network.rates_ex_len;
+ int wpa_ie_len = ieee->wpa_ie_len;
+ u8 erpinfo_content = 0;
+
+ u8 *tmp_ht_cap_buf = NULL;
+ u8 tmp_ht_cap_len = 0;
+ u8 *tmp_ht_info_buf = NULL;
+ u8 tmp_ht_info_len = 0;
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ u8 *tmp_generic_ie_buf = NULL;
+ u8 tmp_generic_ie_len = 0;
+
+ if (rate_ex_len > 0)
+ rate_ex_len += 2;
+
+ if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
+ atim_len = 4;
+ else
+ atim_len = 0;
+
+ if ((ieee->current_network.mode == IEEE_G) ||
+ (ieee->current_network.mode == IEEE_N_24G &&
+ ieee->pHTInfo->bCurSuppCCK)) {
+ erp_len = 3;
+ erpinfo_content = 0;
+ if (ieee->current_network.buseprotection)
+ erpinfo_content |= ERP_UseProtection;
+ } else
+ erp_len = 0;
+
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ encrypt = ieee->host_encrypt && crypt && crypt->ops &&
+ ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
+ if (ieee->pHTInfo->bCurrentHTSupport) {
+ tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_info_buf = (u8 *) &(ieee->pHTInfo->SelfHTInfo);
+ tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
+ HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
+ &tmp_ht_cap_len, encrypt, false);
+ HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len,
+ encrypt);
+
+ if (pHTInfo->bRegRT2RTAggregation) {
+ tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ tmp_generic_ie_len =
+ sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf,
+ &tmp_generic_ie_len);
+ }
+ }
+
+ beacon_size = sizeof(struct rtllib_probe_response)+2+
+ ssid_len + 3 + rate_len + rate_ex_len + atim_len + erp_len
+ + wpa_ie_len + ieee->tx_headroom;
+ skb = dev_alloc_skb(beacon_size);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ beacon_buf = (struct rtllib_probe_response *) skb_put(skb,
+ (beacon_size - ieee->tx_headroom));
+ memcpy(beacon_buf->header.addr1, dest, ETH_ALEN);
+ memcpy(beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
+
+ beacon_buf->header.duration_id = 0;
+ beacon_buf->beacon_interval =
+ cpu_to_le16(ieee->current_network.beacon_interval);
+ beacon_buf->capability =
+ cpu_to_le16(ieee->current_network.capability &
+ WLAN_CAPABILITY_IBSS);
+ beacon_buf->capability |=
+ cpu_to_le16(ieee->current_network.capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE);
+
+ if (ieee->short_slot && (ieee->current_network.capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME))
+ cpu_to_le16((beacon_buf->capability |=
+ WLAN_CAPABILITY_SHORT_SLOT_TIME));
+
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ if (encrypt)
+ beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
+
+
+ beacon_buf->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_RESP);
+ beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
+ beacon_buf->info_element[0].len = ssid_len;
+
+ tag = (u8 *) beacon_buf->info_element[0].data;
+
+ memcpy(tag, ssid, ssid_len);
+
+ tag += ssid_len;
+
+ *(tag++) = MFIE_TYPE_RATES;
+ *(tag++) = rate_len-2;
+ memcpy(tag, ieee->current_network.rates, rate_len-2);
+ tag += rate_len-2;
+
+ *(tag++) = MFIE_TYPE_DS_SET;
+ *(tag++) = 1;
+ *(tag++) = ieee->current_network.channel;
+
+ if (atim_len) {
+ u16 val16;
+ *(tag++) = MFIE_TYPE_IBSS_SET;
+ *(tag++) = 2;
+ val16 = cpu_to_le16(ieee->current_network.atim_window);
+ memcpy((u8 *)tag, (u8 *)&val16, 2);
+ tag += 2;
+ }
+
+ if (erp_len) {
+ *(tag++) = MFIE_TYPE_ERP;
+ *(tag++) = 1;
+ *(tag++) = erpinfo_content;
+ }
+ if (rate_ex_len) {
+ *(tag++) = MFIE_TYPE_RATES_EX;
+ *(tag++) = rate_ex_len-2;
+ memcpy(tag, ieee->current_network.rates_ex, rate_ex_len-2);
+ tag += rate_ex_len-2;
+ }
+
+ if (wpa_ie_len) {
+ if (ieee->iw_mode == IW_MODE_ADHOC)
+ memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
+ memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
+ tag += ieee->wpa_ie_len;
+ }
+ return skb;
+}
+
+static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
+{
+ struct sk_buff *skb;
+ u8 *tag;
+
+ struct rtllib_crypt_data *crypt;
+ struct rtllib_assoc_response_frame *assoc;
+ short encrypt;
+
+ unsigned int rate_len = rtllib_MFIE_rate_len(ieee);
+ int len = sizeof(struct rtllib_assoc_response_frame) + rate_len +
+ ieee->tx_headroom;
+
+ skb = dev_alloc_skb(len);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ assoc = (struct rtllib_assoc_response_frame *)
+ skb_put(skb, sizeof(struct rtllib_assoc_response_frame));
+
+ assoc->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_RESP);
+ memcpy(assoc->header.addr1, dest, ETH_ALEN);
+ memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
+ WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS);
+
+
+ if (ieee->short_slot)
+ assoc->capability |=
+ cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
+
+ if (ieee->host_encrypt)
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ else
+ crypt = NULL;
+
+ encrypt = (crypt && crypt->ops);
+
+ if (encrypt)
+ assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
+
+ assoc->status = 0;
+ assoc->aid = cpu_to_le16(ieee->assoc_id);
+ if (ieee->assoc_id == 0x2007)
+ ieee->assoc_id = 0;
+ else
+ ieee->assoc_id++;
+
+ tag = (u8 *) skb_put(skb, rate_len);
+ rtllib_MFIE_Brate(ieee, &tag);
+ rtllib_MFIE_Grate(ieee, &tag);
+
+ return skb;
+}
+
+static struct sk_buff *rtllib_auth_resp(struct rtllib_device *ieee, int status,
+ u8 *dest)
+{
+ struct sk_buff *skb = NULL;
+ struct rtllib_authentication *auth;
+ int len = ieee->tx_headroom + sizeof(struct rtllib_authentication) + 1;
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return NULL;
+
+ skb->len = sizeof(struct rtllib_authentication);
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ auth = (struct rtllib_authentication *)
+ skb_put(skb, sizeof(struct rtllib_authentication));
+
+ auth->status = cpu_to_le16(status);
+ auth->transaction = cpu_to_le16(2);
+ auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN);
+
+ memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(auth->header.addr1, dest, ETH_ALEN);
+ auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
+ return skb;
+
+
+}
+
+static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
+{
+ struct sk_buff *skb;
+ struct rtllib_hdr_3addr *hdr;
+
+ skb = dev_alloc_skb(sizeof(struct rtllib_hdr_3addr)+ieee->tx_headroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ hdr = (struct rtllib_hdr_3addr *)skb_put(skb,
+ sizeof(struct rtllib_hdr_3addr));
+
+ memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
+
+ hdr->frame_ctl = cpu_to_le16(RTLLIB_FTYPE_DATA |
+ RTLLIB_STYPE_NULLFUNC | RTLLIB_FCTL_TODS |
+ (pwr ? RTLLIB_FCTL_PM : 0));
+
+ return skb;
+
+
+}
+
+static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
+{
+ struct sk_buff *skb;
+ struct rtllib_pspoll_hdr *hdr;
+
+ skb = dev_alloc_skb(sizeof(struct rtllib_pspoll_hdr)+ieee->tx_headroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ hdr = (struct rtllib_pspoll_hdr *)skb_put(skb,
+ sizeof(struct rtllib_pspoll_hdr));
+
+ memcpy(hdr->bssid, ieee->current_network.bssid, ETH_ALEN);
+ memcpy(hdr->ta, ieee->dev->dev_addr, ETH_ALEN);
+
+ hdr->aid = cpu_to_le16(ieee->assoc_id | 0xc000);
+ hdr->frame_ctl = cpu_to_le16(RTLLIB_FTYPE_CTL | RTLLIB_STYPE_PSPOLL |
+ RTLLIB_FCTL_PM);
+
+ return skb;
+
+}
+
+static void rtllib_resp_to_assoc_rq(struct rtllib_device *ieee, u8 *dest)
+{
+ struct sk_buff *buf = rtllib_assoc_resp(ieee, dest);
+
+ if (buf)
+ softmac_mgmt_xmit(buf, ieee);
+}
+
+
+static void rtllib_resp_to_auth(struct rtllib_device *ieee, int s, u8 *dest)
+{
+ struct sk_buff *buf = rtllib_auth_resp(ieee, s, dest);
+
+ if (buf)
+ softmac_mgmt_xmit(buf, ieee);
+}
+
+
+static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
+{
+
+ struct sk_buff *buf = rtllib_probe_resp(ieee, dest);
+ if (buf)
+ softmac_mgmt_xmit(buf, ieee);
+}
+
+
+inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
+{
+ int i = 0;
+
+ do {
+ if ((ieee->PMKIDList[i].bUsed) &&
+ (memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
+ break;
+ else
+ i++;
+ } while (i < NUM_PMKID_CACHE);
+
+ if (i == NUM_PMKID_CACHE)
+ i = -1;
+ return i;
+}
+
+inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
+ struct rtllib_device *ieee)
+{
+ struct sk_buff *skb;
+ struct rtllib_assoc_request_frame *hdr;
+ u8 *tag, *ies;
+ int i;
+ u8 *ht_cap_buf = NULL;
+ u8 ht_cap_len = 0;
+ u8 *realtek_ie_buf = NULL;
+ u8 realtek_ie_len = 0;
+ int wpa_ie_len = ieee->wpa_ie_len;
+ int wps_ie_len = ieee->wps_ie_len;
+ unsigned int ckip_ie_len = 0;
+ unsigned int ccxrm_ie_len = 0;
+ unsigned int cxvernum_ie_len = 0;
+ struct rtllib_crypt_data *crypt;
+ int encrypt;
+ int PMKCacheIdx;
+
+ unsigned int rate_len = (beacon->rates_len ?
+ (beacon->rates_len + 2) : 0) +
+ (beacon->rates_ex_len ? (beacon->rates_ex_len) +
+ 2 : 0);
+
+ unsigned int wmm_info_len = beacon->qos_data.supported ? 9 : 0;
+ unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
+
+ int len = 0;
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ if (crypt != NULL)
+ encrypt = ieee->host_encrypt && crypt && crypt->ops &&
+ ((0 == strcmp(crypt->ops->name, "WEP") ||
+ wpa_ie_len));
+ else
+ encrypt = 0;
+
+ if ((ieee->rtllib_ap_sec_type &&
+ (ieee->rtllib_ap_sec_type(ieee) & SEC_ALG_TKIP)) ||
+ (ieee->bForcedBgMode == true)) {
+ ieee->pHTInfo->bEnableHT = 0;
+ ieee->mode = WIRELESS_MODE_G;
+ }
+
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
+ ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
+ ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
+ HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
+ encrypt, true);
+ if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
+ realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ realtek_ie_len =
+ sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ HTConstructRT2RTAggElement(ieee, realtek_ie_buf,
+ &realtek_ie_len);
+ }
+ }
+
+ if (beacon->bCkipSupported)
+ ckip_ie_len = 30+2;
+ if (beacon->bCcxRmEnable)
+ ccxrm_ie_len = 6+2;
+ if (beacon->BssCcxVerNumber >= 2)
+ cxvernum_ie_len = 5+2;
+
+ PMKCacheIdx = SecIsInPMKIDList(ieee, ieee->current_network.bssid);
+ if (PMKCacheIdx >= 0) {
+ wpa_ie_len += 18;
+ printk(KERN_INFO "[PMK cache]: WPA2 IE length: %x\n",
+ wpa_ie_len);
+ }
+ len = sizeof(struct rtllib_assoc_request_frame) + 2
+ + beacon->ssid_len
+ + rate_len
+ + wpa_ie_len
+ + wps_ie_len
+ + wmm_info_len
+ + turbo_info_len
+ + ht_cap_len
+ + realtek_ie_len
+ + ckip_ie_len
+ + ccxrm_ie_len
+ + cxvernum_ie_len
+ + ieee->tx_headroom;
+
+ skb = dev_alloc_skb(len);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ hdr = (struct rtllib_assoc_request_frame *)
+ skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
+
+
+ hdr->header.frame_ctl = RTLLIB_STYPE_ASSOC_REQ;
+ hdr->header.duration_id = 37;
+ memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
+ memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
+
+ memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);
+
+ hdr->capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
+ if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
+ hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
+
+ if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
+
+ if (ieee->short_slot &&
+ (beacon->capability&WLAN_CAPABILITY_SHORT_SLOT_TIME))
+ hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
+
+
+ hdr->listen_interval = beacon->listen_interval;
+
+ hdr->info_element[0].id = MFIE_TYPE_SSID;
+
+ hdr->info_element[0].len = beacon->ssid_len;
+ tag = skb_put(skb, beacon->ssid_len);
+ memcpy(tag, beacon->ssid, beacon->ssid_len);
+
+ tag = skb_put(skb, rate_len);
+
+ if (beacon->rates_len) {
+ *tag++ = MFIE_TYPE_RATES;
+ *tag++ = beacon->rates_len;
+ for (i = 0; i < beacon->rates_len; i++)
+ *tag++ = beacon->rates[i];
+ }
+
+ if (beacon->rates_ex_len) {
+ *tag++ = MFIE_TYPE_RATES_EX;
+ *tag++ = beacon->rates_ex_len;
+ for (i = 0; i < beacon->rates_ex_len; i++)
+ *tag++ = beacon->rates_ex[i];
+ }
+
+ if (beacon->bCkipSupported) {
+ static u8 AironetIeOui[] = {0x00, 0x01, 0x66};
+ u8 CcxAironetBuf[30];
+ struct octet_string osCcxAironetIE;
+
+ memset(CcxAironetBuf, 0, 30);
+ osCcxAironetIE.Octet = CcxAironetBuf;
+ osCcxAironetIE.Length = sizeof(CcxAironetBuf);
+ memcpy(osCcxAironetIE.Octet, AironetIeOui,
+ sizeof(AironetIeOui));
+
+ osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |=
+ (SUPPORT_CKIP_PK|SUPPORT_CKIP_MIC);
+ tag = skb_put(skb, ckip_ie_len);
+ *tag++ = MFIE_TYPE_AIRONET;
+ *tag++ = osCcxAironetIE.Length;
+ memcpy(tag, osCcxAironetIE.Octet, osCcxAironetIE.Length);
+ tag += osCcxAironetIE.Length;
+ }
+
+ if (beacon->bCcxRmEnable) {
+ static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00};
+ struct octet_string osCcxRmCap;
+
+ osCcxRmCap.Octet = CcxRmCapBuf;
+ osCcxRmCap.Length = sizeof(CcxRmCapBuf);
+ tag = skb_put(skb, ccxrm_ie_len);
+ *tag++ = MFIE_TYPE_GENERIC;
+ *tag++ = osCcxRmCap.Length;
+ memcpy(tag, osCcxRmCap.Octet, osCcxRmCap.Length);
+ tag += osCcxRmCap.Length;
+ }
+
+ if (beacon->BssCcxVerNumber >= 2) {
+ u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
+ struct octet_string osCcxVerNum;
+ CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
+ osCcxVerNum.Octet = CcxVerNumBuf;
+ osCcxVerNum.Length = sizeof(CcxVerNumBuf);
+ tag = skb_put(skb, cxvernum_ie_len);
+ *tag++ = MFIE_TYPE_GENERIC;
+ *tag++ = osCcxVerNum.Length;
+ memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length);
+ tag += osCcxVerNum.Length;
+ }
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
+ if (ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
+ tag = skb_put(skb, ht_cap_len);
+ *tag++ = MFIE_TYPE_HT_CAP;
+ *tag++ = ht_cap_len - 2;
+ memcpy(tag, ht_cap_buf, ht_cap_len - 2);
+ tag += ht_cap_len - 2;
+ }
+ }
+
+ if (wpa_ie_len) {
+ tag = skb_put(skb, ieee->wpa_ie_len);
+ memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
+
+ if (PMKCacheIdx >= 0) {
+ tag = skb_put(skb, 18);
+ *tag = 1;
+ *(tag + 1) = 0;
+ memcpy((tag + 2), &ieee->PMKIDList[PMKCacheIdx].PMKID,
+ 16);
+ }
+ }
+ if (wmm_info_len) {
+ tag = skb_put(skb, wmm_info_len);
+ rtllib_WMM_Info(ieee, &tag);
+ }
+
+ if (wps_ie_len && ieee->wps_ie) {
+ tag = skb_put(skb, wps_ie_len);
+ memcpy(tag, ieee->wps_ie, wps_ie_len);
+ }
+
+ tag = skb_put(skb, turbo_info_len);
+ if (turbo_info_len)
+ rtllib_TURBO_Info(ieee, &tag);
+
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
+ if (ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
+ tag = skb_put(skb, ht_cap_len);
+ *tag++ = MFIE_TYPE_GENERIC;
+ *tag++ = ht_cap_len - 2;
+ memcpy(tag, ht_cap_buf, ht_cap_len - 2);
+ tag += ht_cap_len - 2;
+ }
+
+ if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
+ tag = skb_put(skb, realtek_ie_len);
+ *tag++ = MFIE_TYPE_GENERIC;
+ *tag++ = realtek_ie_len - 2;
+ memcpy(tag, realtek_ie_buf, realtek_ie_len - 2);
+ }
+ }
+
+ kfree(ieee->assocreq_ies);
+ ieee->assocreq_ies = NULL;
+ ies = &(hdr->info_element[0].id);
+ ieee->assocreq_ies_len = (skb->data + skb->len) - ies;
+ ieee->assocreq_ies = kmalloc(ieee->assocreq_ies_len, GFP_ATOMIC);
+ if (ieee->assocreq_ies)
+ memcpy(ieee->assocreq_ies, ies, ieee->assocreq_ies_len);
+ else {
+ printk(KERN_INFO "%s()Warning: can't alloc memory for assocreq"
+ "_ies\n", __func__);
+ ieee->assocreq_ies_len = 0;
+ }
+ return skb;
+}
+
+void rtllib_associate_abort(struct rtllib_device *ieee)
+{
+
+ unsigned long flags;
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ ieee->associate_seq++;
+
+ /* don't scan, and avoid to have the RX path possibily
+ * try again to associate. Even do not react to AUTH or
+ * ASSOC response. Just wait for the retry wq to be scheduled.
+ * Here we will check if there are good nets to associate
+ * with, so we retry or just get back to NO_LINK and scanning
+ */
+ if (ieee->state == RTLLIB_ASSOCIATING_AUTHENTICATING) {
+ RTLLIB_DEBUG_MGMT("Authentication failed\n");
+ ieee->softmac_stats.no_auth_rs++;
+ } else {
+ RTLLIB_DEBUG_MGMT("Association failed\n");
+ ieee->softmac_stats.no_ass_rs++;
+ }
+
+ ieee->state = RTLLIB_ASSOCIATING_RETRY;
+
+ queue_delayed_work_rsl(ieee->wq, &ieee->associate_retry_wq,
+ RTLLIB_SOFTMAC_ASSOC_RETRY_TIME);
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+static void rtllib_associate_abort_cb(unsigned long dev)
+{
+ rtllib_associate_abort((struct rtllib_device *) dev);
+}
+
+static void rtllib_associate_step1(struct rtllib_device *ieee, u8 * daddr)
+{
+ struct rtllib_network *beacon = &ieee->current_network;
+ struct sk_buff *skb;
+
+ RTLLIB_DEBUG_MGMT("Stopping scan\n");
+
+ ieee->softmac_stats.tx_auth_rq++;
+
+ skb = rtllib_authentication_req(beacon, ieee, 0, daddr);
+
+ if (!skb)
+ rtllib_associate_abort(ieee);
+ else {
+ ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATING ;
+ RTLLIB_DEBUG_MGMT("Sending authentication request\n");
+ softmac_mgmt_xmit(skb, ieee);
+ if (!timer_pending(&ieee->associate_timer)) {
+ ieee->associate_timer.expires = jiffies + (HZ / 2);
+ add_timer(&ieee->associate_timer);
+ }
+ }
+}
+
+static void rtllib_auth_challenge(struct rtllib_device *ieee, u8 *challenge, int chlen)
+{
+ u8 *c;
+ struct sk_buff *skb;
+ struct rtllib_network *beacon = &ieee->current_network;
+
+ ieee->associate_seq++;
+ ieee->softmac_stats.tx_auth_rq++;
+
+ skb = rtllib_authentication_req(beacon, ieee, chlen + 2, beacon->bssid);
+
+ if (!skb)
+ rtllib_associate_abort(ieee);
+ else {
+ c = skb_put(skb, chlen+2);
+ *(c++) = MFIE_TYPE_CHALLENGE;
+ *(c++) = chlen;
+ memcpy(c, challenge, chlen);
+
+ RTLLIB_DEBUG_MGMT("Sending authentication challenge "
+ "response\n");
+
+ rtllib_encrypt_fragment(ieee, skb,
+ sizeof(struct rtllib_hdr_3addr));
+
+ softmac_mgmt_xmit(skb, ieee);
+ mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
+ }
+ kfree(challenge);
+}
+
+static void rtllib_associate_step2(struct rtllib_device *ieee)
+{
+ struct sk_buff *skb;
+ struct rtllib_network *beacon = &ieee->current_network;
+
+ del_timer_sync(&ieee->associate_timer);
+
+ RTLLIB_DEBUG_MGMT("Sending association request\n");
+
+ ieee->softmac_stats.tx_ass_rq++;
+ skb = rtllib_association_req(beacon, ieee);
+ if (!skb)
+ rtllib_associate_abort(ieee);
+ else {
+ softmac_mgmt_xmit(skb, ieee);
+ mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
+ }
+}
+
+#define CANCELLED 2
+static void rtllib_associate_complete_wq(void *data)
+{
+ struct rtllib_device *ieee = (struct rtllib_device *)
+ container_of_work_rsl(data,
+ struct rtllib_device,
+ associate_complete_wq);
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(ieee->PowerSaveControl));
+ printk(KERN_INFO "Associated successfully\n");
+ if (ieee->is_silent_reset == 0) {
+ printk(KERN_INFO "normal associate\n");
+ notify_wx_assoc_event(ieee);
+ }
+
+ netif_carrier_on(ieee->dev);
+ ieee->is_roaming = false;
+ if (rtllib_is_54g(&ieee->current_network) &&
+ (ieee->modulation & RTLLIB_OFDM_MODULATION)) {
+ ieee->rate = 108;
+ printk(KERN_INFO"Using G rates:%d\n", ieee->rate);
+ } else {
+ ieee->rate = 22;
+ ieee->SetWirelessMode(ieee->dev, IEEE_B);
+ printk(KERN_INFO"Using B rates:%d\n", ieee->rate);
+ }
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
+ printk(KERN_INFO "Successfully associated, ht enabled\n");
+ HTOnAssocRsp(ieee);
+ } else {
+ printk(KERN_INFO "Successfully associated, ht not "
+ "enabled(%d, %d)\n",
+ ieee->pHTInfo->bCurrentHTSupport,
+ ieee->pHTInfo->bEnableHT);
+ memset(ieee->dot11HTOperationalRateSet, 0, 16);
+ }
+ ieee->LinkDetectInfo.SlotNum = 2 * (1 +
+ ieee->current_network.beacon_interval /
+ 500);
+ if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 ||
+ ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) {
+ ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
+ ieee->LinkDetectInfo.NumRecvDataInPeriod = 1;
+ }
+ pPSC->LpsIdleCount = 0;
+ ieee->link_change(ieee->dev);
+
+ if (ieee->is_silent_reset == 1) {
+ printk(KERN_INFO "silent reset associate\n");
+ ieee->is_silent_reset = 0;
+ }
+
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+
+}
+
+static void rtllib_sta_send_associnfo(struct rtllib_device *ieee)
+{
+}
+
+static void rtllib_associate_complete(struct rtllib_device *ieee)
+{
+ del_timer_sync(&ieee->associate_timer);
+
+ ieee->state = RTLLIB_LINKED;
+ rtllib_sta_send_associnfo(ieee);
+
+ queue_work_rsl(ieee->wq, &ieee->associate_complete_wq);
+}
+
+static void rtllib_associate_procedure_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device,
+ associate_procedure_wq);
+ rtllib_stop_scan_syncro(ieee);
+ if (ieee->rtllib_ips_leave != NULL)
+ ieee->rtllib_ips_leave(ieee->dev);
+ down(&ieee->wx_sem);
+
+ if (ieee->data_hard_stop)
+ ieee->data_hard_stop(ieee->dev);
+
+ rtllib_stop_scan(ieee);
+ RT_TRACE(COMP_DBG, "===>%s(), chan:%d\n", __func__,
+ ieee->current_network.channel);
+ HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+ if (ieee->eRFPowerState == eRfOff) {
+ RT_TRACE(COMP_DBG, "=============>%s():Rf state is eRfOff,"
+ " schedule ipsleave wq again,return\n", __func__);
+ if (ieee->rtllib_ips_leave_wq != NULL)
+ ieee->rtllib_ips_leave_wq(ieee->dev);
+ up(&ieee->wx_sem);
+ return;
+ }
+ ieee->associate_seq = 1;
+
+ rtllib_associate_step1(ieee, ieee->current_network.bssid);
+
+ up(&ieee->wx_sem);
+}
+
+inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
+ struct rtllib_network *net)
+{
+ u8 tmp_ssid[IW_ESSID_MAX_SIZE + 1];
+ int tmp_ssid_len = 0;
+
+ short apset, ssidset, ssidbroad, apmatch, ssidmatch;
+
+ /* we are interested in new new only if we are not associated
+ * and we are not associating / authenticating
+ */
+ if (ieee->state != RTLLIB_NOLINK)
+ return;
+
+ if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability &
+ WLAN_CAPABILITY_ESS))
+ return;
+
+ if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability &
+ WLAN_CAPABILITY_IBSS))
+ return;
+
+ if ((ieee->iw_mode == IW_MODE_ADHOC) &&
+ (net->channel > ieee->ibss_maxjoin_chal))
+ return;
+ if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
+ /* if the user specified the AP MAC, we need also the essid
+ * This could be obtained by beacons or, if the network does not
+ * broadcast it, it can be put manually.
+ */
+ apset = ieee->wap_set;
+ ssidset = ieee->ssid_set;
+ ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
+ apmatch = (memcmp(ieee->current_network.bssid, net->bssid,
+ ETH_ALEN) == 0);
+ if (!ssidbroad) {
+ ssidmatch = (ieee->current_network.ssid_len ==
+ net->hidden_ssid_len) &&
+ (!strncmp(ieee->current_network.ssid,
+ net->hidden_ssid, net->hidden_ssid_len));
+ if (net->hidden_ssid_len > 0) {
+ strncpy(net->ssid, net->hidden_ssid,
+ net->hidden_ssid_len);
+ net->ssid_len = net->hidden_ssid_len;
+ ssidbroad = 1;
+ }
+ } else
+ ssidmatch =
+ (ieee->current_network.ssid_len == net->ssid_len) &&
+ (!strncmp(ieee->current_network.ssid, net->ssid,
+ net->ssid_len));
+
+ /* if the user set the AP check if match.
+ * if the network does not broadcast essid we check the
+ * user supplyed ANY essid
+ * if the network does broadcast and the user does not set
+ * essid it is OK
+ * if the network does broadcast and the user did set essid
+ * check if essid match
+ * if the ap is not set, check that the user set the bssid
+ * and the network does bradcast and that those two bssid match
+ */
+ if ((apset && apmatch &&
+ ((ssidset && ssidbroad && ssidmatch) ||
+ (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
+ (!apset && ssidset && ssidbroad && ssidmatch) ||
+ (ieee->is_roaming && ssidset && ssidbroad && ssidmatch)) {
+ /* if the essid is hidden replace it with the
+ * essid provided by the user.
+ */
+ if (!ssidbroad) {
+ strncpy(tmp_ssid, ieee->current_network.ssid,
+ IW_ESSID_MAX_SIZE);
+ tmp_ssid_len = ieee->current_network.ssid_len;
+ }
+ memcpy(&ieee->current_network, net,
+ sizeof(struct rtllib_network));
+ if (!ssidbroad) {
+ strncpy(ieee->current_network.ssid, tmp_ssid,
+ IW_ESSID_MAX_SIZE);
+ ieee->current_network.ssid_len = tmp_ssid_len;
+ }
+ printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, "
+ "myHT:%d, networkHT:%d, mode:%x cur_net.flags"
+ ":0x%x\n", ieee->current_network.ssid,
+ ieee->current_network.channel,
+ ieee->current_network.qos_data.supported,
+ ieee->pHTInfo->bEnableHT,
+ ieee->current_network.bssht.bdSupportHT,
+ ieee->current_network.mode,
+ ieee->current_network.flags);
+
+ if ((rtllib_act_scanning(ieee, false)) &&
+ !(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
+ rtllib_stop_scan_syncro(ieee);
+
+ ieee->hwscan_ch_bk = ieee->current_network.channel;
+ HTResetIOTSetting(ieee->pHTInfo);
+ ieee->wmm_acm = 0;
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ /* Join the network for the first time */
+ ieee->AsocRetryCount = 0;
+ if ((ieee->current_network.qos_data.supported == 1) &&
+ ieee->current_network.bssht.bdSupportHT)
+ HTResetSelfAndSavePeerSetting(ieee,
+ &(ieee->current_network));
+ else
+ ieee->pHTInfo->bCurrentHTSupport =
+ false;
+
+ ieee->state = RTLLIB_ASSOCIATING;
+ if (ieee->LedControlHandler != NULL)
+ ieee->LedControlHandler(ieee->dev,
+ LED_CTL_START_TO_LINK);
+ queue_delayed_work_rsl(ieee->wq,
+ &ieee->associate_procedure_wq, 0);
+ } else {
+ if (rtllib_is_54g(&ieee->current_network) &&
+ (ieee->modulation & RTLLIB_OFDM_MODULATION)) {
+ ieee->rate = 108;
+ ieee->SetWirelessMode(ieee->dev, IEEE_G);
+ printk(KERN_INFO"Using G rates\n");
+ } else {
+ ieee->rate = 22;
+ ieee->SetWirelessMode(ieee->dev, IEEE_B);
+ printk(KERN_INFO"Using B rates\n");
+ }
+ memset(ieee->dot11HTOperationalRateSet, 0, 16);
+ ieee->state = RTLLIB_LINKED;
+ }
+ }
+ }
+}
+
+void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
+{
+ unsigned long flags;
+ struct rtllib_network *target;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ list_for_each_entry(target, &ieee->network_list, list) {
+
+ /* if the state become different that NOLINK means
+ * we had found what we are searching for
+ */
+
+ if (ieee->state != RTLLIB_NOLINK)
+ break;
+
+ if (ieee->scan_age == 0 || time_after(target->last_scanned +
+ ieee->scan_age, jiffies))
+ rtllib_softmac_new_net(ieee, target);
+ }
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
+{
+ struct rtllib_authentication *a;
+ u8 *t;
+ if (skb->len < (sizeof(struct rtllib_authentication) -
+ sizeof(struct rtllib_info_element))) {
+ RTLLIB_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
+ return 0xcafe;
+ }
+ *challenge = NULL;
+ a = (struct rtllib_authentication *) skb->data;
+ if (skb->len > (sizeof(struct rtllib_authentication) + 3)) {
+ t = skb->data + sizeof(struct rtllib_authentication);
+
+ if (*(t++) == MFIE_TYPE_CHALLENGE) {
+ *chlen = *(t++);
+ *challenge = kmalloc(*chlen, GFP_ATOMIC);
+ memcpy(*challenge, t, *chlen); /*TODO - check here*/
+ }
+ }
+ return cpu_to_le16(a->status);
+}
+
+static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
+{
+ struct rtllib_authentication *a;
+
+ if (skb->len < (sizeof(struct rtllib_authentication) -
+ sizeof(struct rtllib_info_element))) {
+ RTLLIB_DEBUG_MGMT("invalid len in auth request: %d\n",
+ skb->len);
+ return -1;
+ }
+ a = (struct rtllib_authentication *) skb->data;
+
+ memcpy(dest, a->header.addr2, ETH_ALEN);
+
+ if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
+ return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
+
+ return WLAN_STATUS_SUCCESS;
+}
+
+static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
+ u8 *src)
+{
+ u8 *tag;
+ u8 *skbend;
+ u8 *ssid = NULL;
+ u8 ssidlen = 0;
+ struct rtllib_hdr_3addr *header =
+ (struct rtllib_hdr_3addr *) skb->data;
+ bool bssid_match;
+
+ if (skb->len < sizeof(struct rtllib_hdr_3addr))
+ return -1; /* corrupted */
+
+ bssid_match =
+ (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0) &&
+ (memcmp(header->addr3, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0);
+ if (bssid_match)
+ return -1;
+
+ memcpy(src, header->addr2, ETH_ALEN);
+
+ skbend = (u8 *)skb->data + skb->len;
+
+ tag = skb->data + sizeof(struct rtllib_hdr_3addr);
+
+ while (tag + 1 < skbend) {
+ if (*tag == 0) {
+ ssid = tag + 2;
+ ssidlen = *(tag + 1);
+ break;
+ }
+ tag++; /* point to the len field */
+ tag = tag + *(tag); /* point to the last data byte of the tag */
+ tag++; /* point to the next tag */
+ }
+
+ if (ssidlen == 0)
+ return 1;
+
+ if (!ssid)
+ return 1; /* ssid not found in tagged param */
+
+ return !strncmp(ssid, ieee->current_network.ssid, ssidlen);
+}
+
+static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
+{
+ struct rtllib_assoc_request_frame *a;
+
+ if (skb->len < (sizeof(struct rtllib_assoc_request_frame) -
+ sizeof(struct rtllib_info_element))) {
+
+ RTLLIB_DEBUG_MGMT("invalid len in auth request:%d\n", skb->len);
+ return -1;
+ }
+
+ a = (struct rtllib_assoc_request_frame *) skb->data;
+
+ memcpy(dest, a->header.addr2, ETH_ALEN);
+
+ return 0;
+}
+
+static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
+ int *aid)
+{
+ struct rtllib_assoc_response_frame *response_head;
+ u16 status_code;
+
+ if (skb->len < sizeof(struct rtllib_assoc_response_frame)) {
+ RTLLIB_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
+ return 0xcafe;
+ }
+
+ response_head = (struct rtllib_assoc_response_frame *) skb->data;
+ *aid = le16_to_cpu(response_head->aid) & 0x3fff;
+
+ status_code = le16_to_cpu(response_head->status);
+ if ((status_code == WLAN_STATUS_ASSOC_DENIED_RATES ||
+ status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
+ ((ieee->mode == IEEE_G) &&
+ (ieee->current_network.mode == IEEE_N_24G) &&
+ (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) {
+ ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
+ } else {
+ ieee->AsocRetryCount = 0;
+ }
+
+ return le16_to_cpu(response_head->status);
+}
+
+void rtllib_rx_probe_rq(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ u8 dest[ETH_ALEN];
+ ieee->softmac_stats.rx_probe_rq++;
+ if (probe_rq_parse(ieee, skb, dest) > 0) {
+ ieee->softmac_stats.tx_probe_rs++;
+ rtllib_resp_to_probe(ieee, dest);
+ }
+}
+
+static inline void rtllib_rx_auth_rq(struct rtllib_device *ieee,
+ struct sk_buff *skb)
+{
+ u8 dest[ETH_ALEN];
+ int status;
+ ieee->softmac_stats.rx_auth_rq++;
+
+ status = auth_rq_parse(skb, dest);
+ if (status != -1)
+ rtllib_resp_to_auth(ieee, status, dest);
+}
+
+static inline void rtllib_rx_assoc_rq(struct rtllib_device *ieee,
+ struct sk_buff *skb)
+{
+
+ u8 dest[ETH_ALEN];
+
+ ieee->softmac_stats.rx_ass_rq++;
+ if (assoc_rq_parse(skb, dest) != -1)
+ rtllib_resp_to_assoc_rq(ieee, dest);
+
+ printk(KERN_INFO"New client associated: %pM\n", dest);
+}
+
+void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
+{
+
+ struct sk_buff *buf = rtllib_null_func(ieee, pwr);
+
+ if (buf)
+ softmac_ps_mgmt_xmit(buf, ieee);
+}
+
+void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee)
+{
+ struct sk_buff *buf = rtllib_pspoll_func(ieee);
+
+ if (buf)
+ softmac_ps_mgmt_xmit(buf, ieee);
+}
+
+static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
+{
+ int timeout = ieee->ps_timeout;
+ u8 dtim;
+ struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
+ (&(ieee->PowerSaveControl));
+
+ if (ieee->LPSDelayCnt) {
+ ieee->LPSDelayCnt--;
+ return 0;
+ }
+
+ dtim = ieee->current_network.dtim_data;
+ if (!(dtim & RTLLIB_DTIM_VALID))
+ return 0;
+ timeout = ieee->current_network.beacon_interval;
+ ieee->current_network.dtim_data = RTLLIB_DTIM_INVALID;
+ /* there's no need to nofity AP that I find you buffered
+ * with broadcast packet */
+ if (dtim & (RTLLIB_DTIM_UCAST & ieee->ps))
+ return 2;
+
+ if (!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
+ return 0;
+ if (!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
+ return 0;
+ if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
+ (ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
+ return 0;
+
+ if (time) {
+ if (ieee->bAwakePktSent == true) {
+ pPSC->LPSAwakeIntvl = 1;
+ } else {
+ u8 MaxPeriod = 1;
+
+ if (pPSC->LPSAwakeIntvl == 0)
+ pPSC->LPSAwakeIntvl = 1;
+ if (pPSC->RegMaxLPSAwakeIntvl == 0)
+ MaxPeriod = 1;
+ else if (pPSC->RegMaxLPSAwakeIntvl == 0xFF)
+ MaxPeriod = ieee->current_network.dtim_period;
+ else
+ MaxPeriod = pPSC->RegMaxLPSAwakeIntvl;
+ pPSC->LPSAwakeIntvl = (pPSC->LPSAwakeIntvl >=
+ MaxPeriod) ? MaxPeriod :
+ (pPSC->LPSAwakeIntvl + 1);
+ }
+ {
+ u8 LPSAwakeIntvl_tmp = 0;
+ u8 period = ieee->current_network.dtim_period;
+ u8 count = ieee->current_network.tim.tim_count;
+ if (count == 0) {
+ if (pPSC->LPSAwakeIntvl > period)
+ LPSAwakeIntvl_tmp = period +
+ (pPSC->LPSAwakeIntvl -
+ period) -
+ ((pPSC->LPSAwakeIntvl-period) %
+ period);
+ else
+ LPSAwakeIntvl_tmp = pPSC->LPSAwakeIntvl;
+
+ } else {
+ if (pPSC->LPSAwakeIntvl >
+ ieee->current_network.tim.tim_count)
+ LPSAwakeIntvl_tmp = count +
+ (pPSC->LPSAwakeIntvl - count) -
+ ((pPSC->LPSAwakeIntvl-count)%period);
+ else
+ LPSAwakeIntvl_tmp = pPSC->LPSAwakeIntvl;
+ }
+
+ *time = ieee->current_network.last_dtim_sta_time
+ + MSECS(ieee->current_network.beacon_interval *
+ LPSAwakeIntvl_tmp);
+ }
+ }
+
+ return 1;
+
+
+}
+
+static inline void rtllib_sta_ps(struct rtllib_device *ieee)
+{
+ u64 time;
+ short sleep;
+ unsigned long flags, flags2;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if ((ieee->ps == RTLLIB_PS_DISABLED ||
+ ieee->iw_mode != IW_MODE_INFRA ||
+ ieee->state != RTLLIB_LINKED)) {
+ RT_TRACE(COMP_DBG, "=====>%s(): no need to ps,wake up!! "
+ "ieee->ps is %d, ieee->iw_mode is %d, ieee->state"
+ " is %d\n", __func__, ieee->ps, ieee->iw_mode,
+ ieee->state);
+ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
+ rtllib_sta_wakeup(ieee, 1);
+
+ spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
+ }
+ sleep = rtllib_sta_ps_sleep(ieee, &time);
+ /* 2 wake, 1 sleep, 0 do nothing */
+ if (sleep == 0)
+ goto out;
+ if (sleep == 1) {
+ if (ieee->sta_sleep == LPS_IS_SLEEP) {
+ ieee->enter_sleep_state(ieee->dev, time);
+ } else if (ieee->sta_sleep == LPS_IS_WAKE) {
+ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
+
+ if (ieee->ps_is_queue_empty(ieee->dev)) {
+ ieee->sta_sleep = LPS_WAIT_NULL_DATA_SEND;
+ ieee->ack_tx_to_ieee = 1;
+ rtllib_sta_ps_send_null_frame(ieee, 1);
+ ieee->ps_time = time;
+ }
+ spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
+
+ }
+
+ ieee->bAwakePktSent = false;
+
+ } else if (sleep == 2) {
+ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
+
+ rtllib_sta_wakeup(ieee, 1);
+
+ spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
+ }
+
+out:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+}
+
+void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
+{
+ if (ieee->sta_sleep == LPS_IS_WAKE) {
+ if (nl) {
+ if (ieee->pHTInfo->IOTAction &
+ HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
+ ieee->ack_tx_to_ieee = 1;
+ rtllib_sta_ps_send_null_frame(ieee, 0);
+ } else {
+ ieee->ack_tx_to_ieee = 1;
+ rtllib_sta_ps_send_pspoll_frame(ieee);
+ }
+ }
+ return;
+
+ }
+
+ if (ieee->sta_sleep == LPS_IS_SLEEP)
+ ieee->sta_wake_up(ieee->dev);
+ if (nl) {
+ if (ieee->pHTInfo->IOTAction &
+ HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
+ ieee->ack_tx_to_ieee = 1;
+ rtllib_sta_ps_send_null_frame(ieee, 0);
+ } else {
+ ieee->ack_tx_to_ieee = 1;
+ ieee->polling = true;
+ rtllib_sta_ps_send_pspoll_frame(ieee);
+ }
+
+ } else {
+ ieee->sta_sleep = LPS_IS_WAKE;
+ ieee->polling = false;
+ }
+}
+
+void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
+{
+ unsigned long flags, flags2;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (ieee->sta_sleep == LPS_WAIT_NULL_DATA_SEND) {
+ /* Null frame with PS bit set */
+ if (success) {
+ ieee->sta_sleep = LPS_IS_SLEEP;
+ ieee->enter_sleep_state(ieee->dev, ieee->ps_time);
+ }
+ /* if the card report not success we can't be sure the AP
+ * has not RXed so we can't assume the AP believe us awake
+ */
+ } else {/* 21112005 - tx again null without PS bit if lost */
+
+ if ((ieee->sta_sleep == LPS_IS_WAKE) && !success) {
+ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
+ if (ieee->pHTInfo->IOTAction &
+ HT_IOT_ACT_NULL_DATA_POWER_SAVING)
+ rtllib_sta_ps_send_null_frame(ieee, 0);
+ else
+ rtllib_sta_ps_send_pspoll_frame(ieee);
+ spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
+ }
+ }
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+static void rtllib_process_action(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ u8 *act = rtllib_get_payload((struct rtllib_hdr *)header);
+ u8 category = 0;
+
+ if (act == NULL) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "error to get payload of "
+ "action frame\n");
+ return;
+ }
+
+ category = *act;
+ act++;
+ switch (category) {
+ case ACT_CAT_BA:
+ switch (*act) {
+ case ACT_ADDBAREQ:
+ rtllib_rx_ADDBAReq(ieee, skb);
+ break;
+ case ACT_ADDBARSP:
+ rtllib_rx_ADDBARsp(ieee, skb);
+ break;
+ case ACT_DELBA:
+ rtllib_rx_DELBA(ieee, skb);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ u16 errcode;
+ int aid;
+ u8 *ies;
+ struct rtllib_assoc_response_frame *assoc_resp;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+
+ RTLLIB_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
+ WLAN_FC_GET_STYPE(header->frame_ctl));
+
+ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
+ ieee->state == RTLLIB_ASSOCIATING_AUTHENTICATED &&
+ (ieee->iw_mode == IW_MODE_INFRA)) {
+ errcode = assoc_parse(ieee, skb, &aid);
+ if (0 == errcode) {
+ struct rtllib_network *network =
+ kzalloc(sizeof(struct rtllib_network),
+ GFP_ATOMIC);
+
+ if (!network)
+ return 1;
+ memset(network, 0, sizeof(*network));
+ ieee->state = RTLLIB_LINKED;
+ ieee->assoc_id = aid;
+ ieee->softmac_stats.rx_ass_ok++;
+ /* station support qos */
+ /* Let the register setting default with Legacy station */
+ assoc_resp = (struct rtllib_assoc_response_frame *)skb->data;
+ if (ieee->current_network.qos_data.supported == 1) {
+ if (rtllib_parse_info_param(ieee, assoc_resp->info_element,
+ rx_stats->len - sizeof(*assoc_resp),
+ network, rx_stats)) {
+ kfree(network);
+ return 1;
+ } else {
+ memcpy(ieee->pHTInfo->PeerHTCapBuf,
+ network->bssht.bdHTCapBuf,
+ network->bssht.bdHTCapLen);
+ memcpy(ieee->pHTInfo->PeerHTInfoBuf,
+ network->bssht.bdHTInfoBuf,
+ network->bssht.bdHTInfoLen);
+ }
+ if (ieee->handle_assoc_response != NULL)
+ ieee->handle_assoc_response(ieee->dev,
+ (struct rtllib_assoc_response_frame *)header,
+ network);
+ kfree(network);
+ }
+
+ kfree(ieee->assocresp_ies);
+ ieee->assocresp_ies = NULL;
+ ies = &(assoc_resp->info_element[0].id);
+ ieee->assocresp_ies_len = (skb->data + skb->len) - ies;
+ ieee->assocresp_ies = kmalloc(ieee->assocresp_ies_len,
+ GFP_ATOMIC);
+ if (ieee->assocresp_ies)
+ memcpy(ieee->assocresp_ies, ies,
+ ieee->assocresp_ies_len);
+ else {
+ printk(KERN_INFO "%s()Warning: can't alloc "
+ "memory for assocresp_ies\n", __func__);
+ ieee->assocresp_ies_len = 0;
+ }
+ rtllib_associate_complete(ieee);
+ } else {
+ /* aid could not been allocated */
+ ieee->softmac_stats.rx_ass_err++;
+ printk(KERN_INFO "Association response status code 0x%x\n",
+ errcode);
+ RTLLIB_DEBUG_MGMT(
+ "Association response status code 0x%x\n",
+ errcode);
+ if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
+ queue_delayed_work_rsl(ieee->wq,
+ &ieee->associate_procedure_wq, 0);
+ else
+ rtllib_associate_abort(ieee);
+ }
+ }
+ return 0;
+}
+
+inline int rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
+{
+ u16 errcode;
+ u8 *challenge;
+ int chlen = 0;
+ bool bSupportNmode = true, bHalfSupportNmode = false;
+
+ if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
+ if (ieee->state == RTLLIB_ASSOCIATING_AUTHENTICATING &&
+ (ieee->iw_mode == IW_MODE_INFRA)) {
+ RTLLIB_DEBUG_MGMT("Received authentication response");
+
+ errcode = auth_parse(skb, &challenge, &chlen);
+ if (0 == errcode) {
+ if (ieee->open_wep || !challenge) {
+ ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATED;
+ ieee->softmac_stats.rx_auth_rs_ok++;
+ if (!(ieee->pHTInfo->IOTAction &
+ HT_IOT_ACT_PURE_N_MODE)) {
+ if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
+ if (IsHTHalfNmodeAPs(ieee)) {
+ bSupportNmode = true;
+ bHalfSupportNmode = true;
+ } else {
+ bSupportNmode = false;
+ bHalfSupportNmode = false;
+ }
+ }
+ }
+ /* Dummy wirless mode setting to avoid
+ * encryption issue */
+ if (bSupportNmode) {
+ ieee->SetWirelessMode(ieee->dev,
+ ieee->current_network.mode);
+ } else {
+ /*TODO*/
+ ieee->SetWirelessMode(ieee->dev,
+ IEEE_G);
+ }
+
+ if (ieee->current_network.mode ==
+ IEEE_N_24G &&
+ bHalfSupportNmode == true) {
+ printk(KERN_INFO "======>enter "
+ "half N mode\n");
+ ieee->bHalfWirelessN24GMode =
+ true;
+ } else
+ ieee->bHalfWirelessN24GMode =
+ false;
+
+ rtllib_associate_step2(ieee);
+ } else {
+ rtllib_auth_challenge(ieee, challenge,
+ chlen);
+ }
+ } else {
+ ieee->softmac_stats.rx_auth_rs_err++;
+ RTLLIB_DEBUG_MGMT("Authentication respose"
+ " status code 0x%x", errcode);
+
+ printk(KERN_INFO "Authentication respose "
+ "status code 0x%x", errcode);
+ rtllib_associate_abort(ieee);
+ }
+
+ } else if (ieee->iw_mode == IW_MODE_MASTER) {
+ rtllib_rx_auth_rq(ieee, skb);
+ }
+ }
+ return 0;
+}
+
+inline int rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
+{
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+
+ if (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0)
+ return 0;
+
+ /* FIXME for now repeat all the association procedure
+ * both for disassociation and deauthentication
+ */
+ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
+ ieee->state == RTLLIB_LINKED &&
+ (ieee->iw_mode == IW_MODE_INFRA)) {
+ printk(KERN_INFO "==========>received disassoc/deauth(%x) "
+ "frame, reason code:%x\n",
+ WLAN_FC_GET_STYPE(header->frame_ctl),
+ ((struct rtllib_disassoc *)skb->data)->reason);
+ ieee->state = RTLLIB_ASSOCIATING;
+ ieee->softmac_stats.reassoc++;
+ ieee->is_roaming = true;
+ ieee->LinkDetectInfo.bBusyTraffic = false;
+ rtllib_disassociate(ieee);
+ RemovePeerTS(ieee, header->addr2);
+ if (ieee->LedControlHandler != NULL)
+ ieee->LedControlHandler(ieee->dev,
+ LED_CTL_START_TO_LINK);
+
+ if (!(ieee->rtllib_ap_sec_type(ieee) &
+ (SEC_ALG_CCMP|SEC_ALG_TKIP)))
+ queue_delayed_work_rsl(ieee->wq,
+ &ieee->associate_procedure_wq, 5);
+ }
+ return 0;
+}
+
+inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats, u16 type,
+ u16 stype)
+{
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+
+ if (!ieee->proto_started)
+ return 0;
+
+ switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ case RTLLIB_STYPE_ASSOC_RESP:
+ case RTLLIB_STYPE_REASSOC_RESP:
+ if (rtllib_rx_assoc_resp(ieee, skb, rx_stats) == 1)
+ return 1;
+ break;
+ case RTLLIB_STYPE_ASSOC_REQ:
+ case RTLLIB_STYPE_REASSOC_REQ:
+ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
+ ieee->iw_mode == IW_MODE_MASTER)
+ rtllib_rx_assoc_rq(ieee, skb);
+ break;
+ case RTLLIB_STYPE_AUTH:
+ rtllib_rx_auth(ieee, skb, rx_stats);
+ break;
+ case RTLLIB_STYPE_DISASSOC:
+ case RTLLIB_STYPE_DEAUTH:
+ rtllib_rx_deauth(ieee, skb);
+ break;
+ case RTLLIB_STYPE_MANAGE_ACT:
+ rtllib_process_action(ieee, skb);
+ break;
+ default:
+ return -1;
+ break;
+ }
+ return 0;
+}
+
+/* following are for a simplier TX queue management.
+ * Instead of using netif_[stop/wake]_queue the driver
+ * will uses these two function (plus a reset one), that
+ * will internally uses the kernel netif_* and takes
+ * care of the ieee802.11 fragmentation.
+ * So the driver receives a fragment per time and might
+ * call the stop function when it want without take care
+ * to have enought room to TX an entire packet.
+ * This might be useful if each fragment need it's own
+ * descriptor, thus just keep a total free memory > than
+ * the max fragmentation treshold is not enought.. If the
+ * ieee802.11 stack passed a TXB struct then you needed
+ * to keep N free descriptors where
+ * N = MAX_PACKET_SIZE / MIN_FRAG_TRESHOLD
+ * In this way you need just one and the 802.11 stack
+ * will take care of buffering fragments and pass them to
+ * to the driver later, when it wakes the queue.
+ */
+void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
+{
+
+ unsigned int queue_index = txb->queue_index;
+ unsigned long flags;
+ int i;
+ struct cb_desc *tcb_desc = NULL;
+ unsigned long queue_len = 0;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ /* called with 2nd parm 0, no tx mgmt lock required */
+ rtllib_sta_wakeup(ieee, 0);
+
+ /* update the tx status */
+ tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb +
+ MAX_DEV_ADDR_SIZE);
+ if (tcb_desc->bMulticast)
+ ieee->stats.multicast++;
+
+ /* if xmit available, just xmit it immediately, else just insert it to
+ * the wait queue */
+ for (i = 0; i < txb->nr_frags; i++) {
+ queue_len = skb_queue_len(&ieee->skb_waitQ[queue_index]);
+ if ((queue_len != 0) ||\
+ (!ieee->check_nic_enough_desc(ieee->dev, queue_index)) ||
+ (ieee->queue_stop)) {
+ /* insert the skb packet to the wait queue */
+ /* as for the completion function, it does not need
+ * to check it any more.
+ * */
+ if (queue_len < 200)
+ skb_queue_tail(&ieee->skb_waitQ[queue_index],
+ txb->fragments[i]);
+ else
+ kfree_skb(txb->fragments[i]);
+ } else {
+ ieee->softmac_data_hard_start_xmit(
+ txb->fragments[i],
+ ieee->dev, ieee->rate);
+ }
+ }
+
+ rtllib_txb_free(txb);
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+}
+
+/* called with ieee->lock acquired */
+static void rtllib_resume_tx(struct rtllib_device *ieee)
+{
+ int i;
+ for (i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags;
+ i++) {
+
+ if (ieee->queue_stop) {
+ ieee->tx_pending.frag = i;
+ return;
+ } else {
+
+ ieee->softmac_data_hard_start_xmit(
+ ieee->tx_pending.txb->fragments[i],
+ ieee->dev, ieee->rate);
+ ieee->stats.tx_packets++;
+ }
+ }
+
+ rtllib_txb_free(ieee->tx_pending.txb);
+ ieee->tx_pending.txb = NULL;
+}
+
+
+void rtllib_reset_queue(struct rtllib_device *ieee)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ init_mgmt_queue(ieee);
+ if (ieee->tx_pending.txb) {
+ rtllib_txb_free(ieee->tx_pending.txb);
+ ieee->tx_pending.txb = NULL;
+ }
+ ieee->queue_stop = 0;
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+}
+
+void rtllib_wake_queue(struct rtllib_device *ieee)
+{
+
+ unsigned long flags;
+ struct sk_buff *skb;
+ struct rtllib_hdr_3addr *header;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ if (!ieee->queue_stop)
+ goto exit;
+
+ ieee->queue_stop = 0;
+
+ if (ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) {
+ while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
+
+ header = (struct rtllib_hdr_3addr *) skb->data;
+
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
+ ieee->basic_rate);
+ }
+ }
+ if (!ieee->queue_stop && ieee->tx_pending.txb)
+ rtllib_resume_tx(ieee);
+
+ if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)) {
+ ieee->softmac_stats.swtxawake++;
+ netif_wake_queue(ieee->dev);
+ }
+
+exit:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+
+void rtllib_stop_queue(struct rtllib_device *ieee)
+{
+
+ if (!netif_queue_stopped(ieee->dev)) {
+ netif_stop_queue(ieee->dev);
+ ieee->softmac_stats.swtxstop++;
+ }
+ ieee->queue_stop = 1;
+
+}
+
+void rtllib_stop_all_queues(struct rtllib_device *ieee)
+{
+ unsigned int i;
+ for (i = 0; i < ieee->dev->num_tx_queues; i++)
+ netdev_get_tx_queue(ieee->dev, i)->trans_start = jiffies;
+
+ netif_tx_stop_all_queues(ieee->dev);
+}
+
+void rtllib_wake_all_queues(struct rtllib_device *ieee)
+{
+ netif_tx_wake_all_queues(ieee->dev);
+}
+
+inline void rtllib_randomize_cell(struct rtllib_device *ieee)
+{
+
+ get_random_bytes(ieee->current_network.bssid, ETH_ALEN);
+
+ /* an IBSS cell address must have the two less significant
+ * bits of the first byte = 2
+ */
+ ieee->current_network.bssid[0] &= ~0x01;
+ ieee->current_network.bssid[0] |= 0x02;
+}
+
+/* called in user context only */
+void rtllib_start_master_bss(struct rtllib_device *ieee)
+{
+ ieee->assoc_id = 1;
+
+ if (ieee->current_network.ssid_len == 0) {
+ strncpy(ieee->current_network.ssid,
+ RTLLIB_DEFAULT_TX_ESSID,
+ IW_ESSID_MAX_SIZE);
+
+ ieee->current_network.ssid_len =
+ strlen(RTLLIB_DEFAULT_TX_ESSID);
+ ieee->ssid_set = 1;
+ }
+
+ memcpy(ieee->current_network.bssid, ieee->dev->dev_addr, ETH_ALEN);
+
+ ieee->set_chan(ieee->dev, ieee->current_network.channel);
+ ieee->state = RTLLIB_LINKED;
+ ieee->link_change(ieee->dev);
+ notify_wx_assoc_event(ieee);
+
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+
+ netif_carrier_on(ieee->dev);
+}
+
+static void rtllib_start_monitor_mode(struct rtllib_device *ieee)
+{
+ /* reset hardware status */
+ if (ieee->raw_tx) {
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+
+ netif_carrier_on(ieee->dev);
+ }
+}
+
+static void rtllib_start_ibss_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device, start_ibss_wq);
+ /* iwconfig mode ad-hoc will schedule this and return
+ * on the other hand this will block further iwconfig SET
+ * operations because of the wx_sem hold.
+ * Anyway some most set operations set a flag to speed-up
+ * (abort) this wq (when syncro scanning) before sleeping
+ * on the semaphore
+ */
+ if (!ieee->proto_started) {
+ printk(KERN_INFO "==========oh driver down return\n");
+ return;
+ }
+ down(&ieee->wx_sem);
+
+ if (ieee->current_network.ssid_len == 0) {
+ strcpy(ieee->current_network.ssid, RTLLIB_DEFAULT_TX_ESSID);
+ ieee->current_network.ssid_len = strlen(RTLLIB_DEFAULT_TX_ESSID);
+ ieee->ssid_set = 1;
+ }
+
+ ieee->state = RTLLIB_NOLINK;
+ ieee->mode = IEEE_G;
+ /* check if we have this cell in our network list */
+ rtllib_softmac_check_all_nets(ieee);
+
+
+ /* if not then the state is not linked. Maybe the user swithced to
+ * ad-hoc mode just after being in monitor mode, or just after
+ * being very few time in managed mode (so the card have had no
+ * time to scan all the chans..) or we have just run up the iface
+ * after setting ad-hoc mode. So we have to give another try..
+ * Here, in ibss mode, should be safe to do this without extra care
+ * (in bss mode we had to make sure no-one tryed to associate when
+ * we had just checked the ieee->state and we was going to start the
+ * scan) beacause in ibss mode the rtllib_new_net function, when
+ * finds a good net, just set the ieee->state to RTLLIB_LINKED,
+ * so, at worst, we waste a bit of time to initiate an unneeded syncro
+ * scan, that will stop at the first round because it sees the state
+ * associated.
+ */
+ if (ieee->state == RTLLIB_NOLINK)
+ rtllib_start_scan_syncro(ieee, 0);
+
+ /* the network definitively is not here.. create a new cell */
+ if (ieee->state == RTLLIB_NOLINK) {
+ printk(KERN_INFO "creating new IBSS cell\n");
+ ieee->current_network.channel = ieee->IbssStartChnl;
+ if (!ieee->wap_set)
+ rtllib_randomize_cell(ieee);
+
+ if (ieee->modulation & RTLLIB_CCK_MODULATION) {
+
+ ieee->current_network.rates_len = 4;
+
+ ieee->current_network.rates[0] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
+ ieee->current_network.rates[1] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
+ ieee->current_network.rates[2] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
+ ieee->current_network.rates[3] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
+
+ } else
+ ieee->current_network.rates_len = 0;
+
+ if (ieee->modulation & RTLLIB_OFDM_MODULATION) {
+ ieee->current_network.rates_ex_len = 8;
+
+ ieee->current_network.rates_ex[0] =
+ RTLLIB_OFDM_RATE_6MB;
+ ieee->current_network.rates_ex[1] =
+ RTLLIB_OFDM_RATE_9MB;
+ ieee->current_network.rates_ex[2] =
+ RTLLIB_OFDM_RATE_12MB;
+ ieee->current_network.rates_ex[3] =
+ RTLLIB_OFDM_RATE_18MB;
+ ieee->current_network.rates_ex[4] =
+ RTLLIB_OFDM_RATE_24MB;
+ ieee->current_network.rates_ex[5] =
+ RTLLIB_OFDM_RATE_36MB;
+ ieee->current_network.rates_ex[6] =
+ RTLLIB_OFDM_RATE_48MB;
+ ieee->current_network.rates_ex[7] =
+ RTLLIB_OFDM_RATE_54MB;
+
+ ieee->rate = 108;
+ } else {
+ ieee->current_network.rates_ex_len = 0;
+ ieee->rate = 22;
+ }
+
+ ieee->current_network.qos_data.supported = 0;
+ ieee->SetWirelessMode(ieee->dev, IEEE_G);
+ ieee->current_network.mode = ieee->mode;
+ ieee->current_network.atim_window = 0;
+ ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
+ }
+
+ printk(KERN_INFO "%s(): ieee->mode = %d\n", __func__, ieee->mode);
+ if ((ieee->mode == IEEE_N_24G) || (ieee->mode == IEEE_N_5G))
+ HTUseDefaultSetting(ieee);
+ else
+ ieee->pHTInfo->bCurrentHTSupport = false;
+
+ ieee->SetHwRegHandler(ieee->dev, HW_VAR_MEDIA_STATUS,
+ (u8 *)(&ieee->state));
+
+ ieee->state = RTLLIB_LINKED;
+ ieee->link_change(ieee->dev);
+
+ HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+ if (ieee->LedControlHandler != NULL)
+ ieee->LedControlHandler(ieee->dev, LED_CTL_LINK);
+
+ rtllib_start_send_beacons(ieee);
+
+ notify_wx_assoc_event(ieee);
+
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+
+ netif_carrier_on(ieee->dev);
+
+ up(&ieee->wx_sem);
+}
+
+inline void rtllib_start_ibss(struct rtllib_device *ieee)
+{
+ queue_delayed_work_rsl(ieee->wq, &ieee->start_ibss_wq, MSECS(150));
+}
+
+/* this is called only in user context, with wx_sem held */
+void rtllib_start_bss(struct rtllib_device *ieee)
+{
+ unsigned long flags;
+ if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
+ if (!ieee->bGlobalDomain)
+ return;
+ }
+ /* check if we have already found the net we
+ * are interested in (if any).
+ * if not (we are disassociated and we are not
+ * in associating / authenticating phase) start the background scanning.
+ */
+ rtllib_softmac_check_all_nets(ieee);
+
+ /* ensure no-one start an associating process (thus setting
+ * the ieee->state to rtllib_ASSOCIATING) while we
+ * have just cheked it and we are going to enable scan.
+ * The rtllib_new_net function is always called with
+ * lock held (from both rtllib_softmac_check_all_nets and
+ * the rx path), so we cannot be in the middle of such function
+ */
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (ieee->state == RTLLIB_NOLINK)
+ rtllib_start_scan(ieee);
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+static void rtllib_link_change_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device, link_change_wq);
+ ieee->link_change(ieee->dev);
+}
+/* called only in userspace context */
+void rtllib_disassociate(struct rtllib_device *ieee)
+{
+ netif_carrier_off(ieee->dev);
+ if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
+ rtllib_reset_queue(ieee);
+
+ if (ieee->data_hard_stop)
+ ieee->data_hard_stop(ieee->dev);
+ if (IS_DOT11D_ENABLE(ieee))
+ Dot11d_Reset(ieee);
+ ieee->state = RTLLIB_NOLINK;
+ ieee->is_set_key = false;
+ ieee->wap_set = 0;
+
+ queue_delayed_work_rsl(ieee->wq, &ieee->link_change_wq, 0);
+
+ notify_wx_assoc_event(ieee);
+}
+
+static void rtllib_associate_retry_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_dwork_rsl(data,
+ struct rtllib_device, associate_retry_wq);
+ unsigned long flags;
+
+ down(&ieee->wx_sem);
+ if (!ieee->proto_started)
+ goto exit;
+
+ if (ieee->state != RTLLIB_ASSOCIATING_RETRY)
+ goto exit;
+
+ /* until we do not set the state to RTLLIB_NOLINK
+ * there are no possibility to have someone else trying
+ * to start an association procdure (we get here with
+ * ieee->state = RTLLIB_ASSOCIATING).
+ * When we set the state to RTLLIB_NOLINK it is possible
+ * that the RX path run an attempt to associate, but
+ * both rtllib_softmac_check_all_nets and the
+ * RX path works with ieee->lock held so there are no
+ * problems. If we are still disassociated then start a scan.
+ * the lock here is necessary to ensure no one try to start
+ * an association procedure when we have just checked the
+ * state and we are going to start the scan.
+ */
+ ieee->beinretry = true;
+ ieee->state = RTLLIB_NOLINK;
+
+ rtllib_softmac_check_all_nets(ieee);
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (ieee->state == RTLLIB_NOLINK)
+ rtllib_start_scan(ieee);
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+ ieee->beinretry = false;
+exit:
+ up(&ieee->wx_sem);
+}
+
+struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
+{
+ u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ struct sk_buff *skb;
+ struct rtllib_probe_response *b;
+ skb = rtllib_probe_resp(ieee, broadcast_addr);
+
+ if (!skb)
+ return NULL;
+
+ b = (struct rtllib_probe_response *) skb->data;
+ b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
+
+ return skb;
+
+}
+
+struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
+{
+ struct sk_buff *skb;
+ struct rtllib_probe_response *b;
+
+ skb = rtllib_get_beacon_(ieee);
+ if (!skb)
+ return NULL;
+
+ b = (struct rtllib_probe_response *) skb->data;
+ b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+
+ return skb;
+}
+
+void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
+ u8 shutdown)
+{
+ rtllib_stop_scan_syncro(ieee);
+ down(&ieee->wx_sem);
+ rtllib_stop_protocol(ieee, shutdown);
+ up(&ieee->wx_sem);
+}
+
+
+void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
+{
+ if (!ieee->proto_started)
+ return;
+
+ if (shutdown) {
+ ieee->proto_started = 0;
+ ieee->proto_stoppping = 1;
+ if (ieee->rtllib_ips_leave != NULL)
+ ieee->rtllib_ips_leave(ieee->dev);
+ }
+
+ rtllib_stop_send_beacons(ieee);
+ del_timer_sync(&ieee->associate_timer);
+ cancel_delayed_work(&ieee->associate_retry_wq);
+ cancel_delayed_work(&ieee->start_ibss_wq);
+ cancel_delayed_work(&ieee->link_change_wq);
+ rtllib_stop_scan(ieee);
+
+ if (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED)
+ ieee->state = RTLLIB_NOLINK;
+
+ if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->iw_mode == IW_MODE_INFRA)
+ SendDisassociation(ieee, 1, deauth_lv_ss);
+ rtllib_disassociate(ieee);
+ }
+
+ if (shutdown) {
+ RemoveAllTS(ieee);
+ ieee->proto_stoppping = 0;
+ }
+ kfree(ieee->assocreq_ies);
+ ieee->assocreq_ies = NULL;
+ ieee->assocreq_ies_len = 0;
+ kfree(ieee->assocresp_ies);
+ ieee->assocresp_ies = NULL;
+ ieee->assocresp_ies_len = 0;
+}
+
+void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
+{
+ down(&ieee->wx_sem);
+ rtllib_start_protocol(ieee);
+ up(&ieee->wx_sem);
+}
+
+void rtllib_start_protocol(struct rtllib_device *ieee)
+{
+ short ch = 0;
+ int i = 0;
+
+ rtllib_update_active_chan_map(ieee);
+
+ if (ieee->proto_started)
+ return;
+
+ ieee->proto_started = 1;
+
+ if (ieee->current_network.channel == 0) {
+ do {
+ ch++;
+ if (ch > MAX_CHANNEL_NUMBER)
+ return; /* no channel found */
+ } while (!ieee->active_channel_map[ch]);
+ ieee->current_network.channel = ch;
+ }
+
+ if (ieee->current_network.beacon_interval == 0)
+ ieee->current_network.beacon_interval = 100;
+
+ for (i = 0; i < 17; i++) {
+ ieee->last_rxseq_num[i] = -1;
+ ieee->last_rxfrag_num[i] = -1;
+ ieee->last_packet_time[i] = 0;
+ }
+
+ if (ieee->UpdateBeaconInterruptHandler)
+ ieee->UpdateBeaconInterruptHandler(ieee->dev, false);
+
+ ieee->wmm_acm = 0;
+ /* if the user set the MAC of the ad-hoc cell and then
+ * switch to managed mode, shall we make sure that association
+ * attempts does not fail just because the user provide the essid
+ * and the nic is still checking for the AP MAC ??
+ */
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ rtllib_start_bss(ieee);
+ } else if (ieee->iw_mode == IW_MODE_ADHOC) {
+ if (ieee->UpdateBeaconInterruptHandler)
+ ieee->UpdateBeaconInterruptHandler(ieee->dev, true);
+
+ rtllib_start_ibss(ieee);
+
+ } else if (ieee->iw_mode == IW_MODE_MASTER) {
+ rtllib_start_master_bss(ieee);
+ } else if (ieee->iw_mode == IW_MODE_MONITOR) {
+ rtllib_start_monitor_mode(ieee);
+ }
+}
+
+void rtllib_softmac_init(struct rtllib_device *ieee)
+{
+ int i;
+ memset(&ieee->current_network, 0, sizeof(struct rtllib_network));
+
+ ieee->state = RTLLIB_NOLINK;
+ for (i = 0; i < 5; i++)
+ ieee->seq_ctrl[i] = 0;
+ ieee->pDot11dInfo = kmalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
+ if (!ieee->pDot11dInfo)
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for DOT11D\n");
+ memset(ieee->pDot11dInfo, 0, sizeof(struct rt_dot11d_info));
+ ieee->LinkDetectInfo.SlotIndex = 0;
+ ieee->LinkDetectInfo.SlotNum = 2;
+ ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
+ ieee->LinkDetectInfo.NumRecvDataInPeriod = 0;
+ ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
+ ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
+ ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
+ ieee->bIsAggregateFrame = false;
+ ieee->assoc_id = 0;
+ ieee->queue_stop = 0;
+ ieee->scanning_continue = 0;
+ ieee->softmac_features = 0;
+ ieee->wap_set = 0;
+ ieee->ssid_set = 0;
+ ieee->proto_started = 0;
+ ieee->proto_stoppping = 0;
+ ieee->basic_rate = RTLLIB_DEFAULT_BASIC_RATE;
+ ieee->rate = 22;
+ ieee->ps = RTLLIB_PS_DISABLED;
+ ieee->sta_sleep = LPS_IS_WAKE;
+
+ ieee->Regdot11HTOperationalRateSet[0] = 0xff;
+ ieee->Regdot11HTOperationalRateSet[1] = 0xff;
+ ieee->Regdot11HTOperationalRateSet[4] = 0x01;
+
+ ieee->Regdot11TxHTOperationalRateSet[0] = 0xff;
+ ieee->Regdot11TxHTOperationalRateSet[1] = 0xff;
+ ieee->Regdot11TxHTOperationalRateSet[4] = 0x01;
+
+ ieee->FirstIe_InScan = false;
+ ieee->actscanning = false;
+ ieee->beinretry = false;
+ ieee->is_set_key = false;
+ init_mgmt_queue(ieee);
+
+ ieee->sta_edca_param[0] = 0x0000A403;
+ ieee->sta_edca_param[1] = 0x0000A427;
+ ieee->sta_edca_param[2] = 0x005E4342;
+ ieee->sta_edca_param[3] = 0x002F3262;
+ ieee->aggregation = true;
+ ieee->enable_rx_imm_BA = 1;
+ ieee->tx_pending.txb = NULL;
+
+ _setup_timer(&ieee->associate_timer,
+ rtllib_associate_abort_cb,
+ (unsigned long) ieee);
+
+ _setup_timer(&ieee->beacon_timer,
+ rtllib_send_beacon_cb,
+ (unsigned long) ieee);
+
+
+ ieee->wq = create_workqueue(DRV_NAME);
+
+ INIT_DELAYED_WORK_RSL(&ieee->link_change_wq,
+ (void *)rtllib_link_change_wq, ieee);
+ INIT_DELAYED_WORK_RSL(&ieee->start_ibss_wq,
+ (void *)rtllib_start_ibss_wq, ieee);
+ INIT_WORK_RSL(&ieee->associate_complete_wq,
+ (void *)rtllib_associate_complete_wq, ieee);
+ INIT_DELAYED_WORK_RSL(&ieee->associate_procedure_wq,
+ (void *)rtllib_associate_procedure_wq, ieee);
+ INIT_DELAYED_WORK_RSL(&ieee->softmac_scan_wq,
+ (void *)rtllib_softmac_scan_wq, ieee);
+ INIT_DELAYED_WORK_RSL(&ieee->softmac_hint11d_wq,
+ (void *)rtllib_softmac_hint11d_wq, ieee);
+ INIT_DELAYED_WORK_RSL(&ieee->associate_retry_wq,
+ (void *)rtllib_associate_retry_wq, ieee);
+ INIT_WORK_RSL(&ieee->wx_sync_scan_wq, (void *)rtllib_wx_sync_scan_wq,
+ ieee);
+
+ sema_init(&ieee->wx_sem, 1);
+ sema_init(&ieee->scan_sem, 1);
+ sema_init(&ieee->ips_sem, 1);
+
+ spin_lock_init(&ieee->mgmt_tx_lock);
+ spin_lock_init(&ieee->beacon_lock);
+
+ tasklet_init(&ieee->ps_task,
+ (void(*)(unsigned long)) rtllib_sta_ps,
+ (unsigned long)ieee);
+
+}
+
+void rtllib_softmac_free(struct rtllib_device *ieee)
+{
+ down(&ieee->wx_sem);
+ kfree(ieee->pDot11dInfo);
+ ieee->pDot11dInfo = NULL;
+ del_timer_sync(&ieee->associate_timer);
+
+ cancel_delayed_work(&ieee->associate_retry_wq);
+ destroy_workqueue(ieee->wq);
+ up(&ieee->wx_sem);
+}
+
+/********************************************************
+ * Start of WPA code. *
+ * this is stolen from the ipw2200 driver *
+ ********************************************************/
+
+
+static int rtllib_wpa_enable(struct rtllib_device *ieee, int value)
+{
+ /* This is called when wpa_supplicant loads and closes the driver
+ * interface. */
+ printk(KERN_INFO "%s WPA\n", value ? "enabling" : "disabling");
+ ieee->wpa_enabled = value;
+ memset(ieee->ap_mac_addr, 0, 6);
+ return 0;
+}
+
+
+static void rtllib_wpa_assoc_frame(struct rtllib_device *ieee, char *wpa_ie,
+ int wpa_ie_len)
+{
+ /* make sure WPA is enabled */
+ rtllib_wpa_enable(ieee, 1);
+
+ rtllib_disassociate(ieee);
+}
+
+
+static int rtllib_wpa_mlme(struct rtllib_device *ieee, int command, int reason)
+{
+
+ int ret = 0;
+
+ switch (command) {
+ case IEEE_MLME_STA_DEAUTH:
+ break;
+
+ case IEEE_MLME_STA_DISASSOC:
+ rtllib_disassociate(ieee);
+ break;
+
+ default:
+ printk(KERN_INFO "Unknown MLME request: %d\n", command);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+
+static int rtllib_wpa_set_wpa_ie(struct rtllib_device *ieee,
+ struct ieee_param *param, int plen)
+{
+ u8 *buf;
+
+ if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
+ (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
+ return -EINVAL;
+
+ if (param->u.wpa_ie.len) {
+ buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
+ kfree(ieee->wpa_ie);
+ ieee->wpa_ie = buf;
+ ieee->wpa_ie_len = param->u.wpa_ie.len;
+ } else {
+ kfree(ieee->wpa_ie);
+ ieee->wpa_ie = NULL;
+ ieee->wpa_ie_len = 0;
+ }
+
+ rtllib_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len);
+ return 0;
+}
+
+#define AUTH_ALG_OPEN_SYSTEM 0x1
+#define AUTH_ALG_SHARED_KEY 0x2
+#define AUTH_ALG_LEAP 0x4
+static int rtllib_wpa_set_auth_algs(struct rtllib_device *ieee, int value)
+{
+
+ struct rtllib_security sec = {
+ .flags = SEC_AUTH_MODE,
+ };
+ int ret = 0;
+
+ if (value & AUTH_ALG_SHARED_KEY) {
+ sec.auth_mode = WLAN_AUTH_SHARED_KEY;
+ ieee->open_wep = 0;
+ ieee->auth_mode = 1;
+ } else if (value & AUTH_ALG_OPEN_SYSTEM) {
+ sec.auth_mode = WLAN_AUTH_OPEN;
+ ieee->open_wep = 1;
+ ieee->auth_mode = 0;
+ } else if (value & AUTH_ALG_LEAP) {
+ sec.auth_mode = WLAN_AUTH_LEAP >> 6;
+ ieee->open_wep = 1;
+ ieee->auth_mode = 2;
+ }
+
+
+ if (ieee->set_security)
+ ieee->set_security(ieee->dev, &sec);
+
+ return ret;
+}
+
+static int rtllib_wpa_set_param(struct rtllib_device *ieee, u8 name, u32 value)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ switch (name) {
+ case IEEE_PARAM_WPA_ENABLED:
+ ret = rtllib_wpa_enable(ieee, value);
+ break;
+
+ case IEEE_PARAM_TKIP_COUNTERMEASURES:
+ ieee->tkip_countermeasures = value;
+ break;
+
+ case IEEE_PARAM_DROP_UNENCRYPTED:
+ {
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+ struct rtllib_security sec = {
+ .flags = SEC_ENABLED,
+ .enabled = value,
+ };
+ ieee->drop_unencrypted = value;
+ /* We only change SEC_LEVEL for open mode. Others
+ * are set by ipw_wpa_set_encryption.
+ */
+ if (!value) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_0;
+ } else {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_1;
+ }
+ if (ieee->set_security)
+ ieee->set_security(ieee->dev, &sec);
+ break;
+ }
+
+ case IEEE_PARAM_PRIVACY_INVOKED:
+ ieee->privacy_invoked = value;
+ break;
+
+ case IEEE_PARAM_AUTH_ALGS:
+ ret = rtllib_wpa_set_auth_algs(ieee, value);
+ break;
+
+ case IEEE_PARAM_IEEE_802_1X:
+ ieee->ieee802_1x = value;
+ break;
+ case IEEE_PARAM_WPAX_SELECT:
+ spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
+ spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
+ break;
+
+ default:
+ printk(KERN_INFO "Unknown WPA param: %d\n", name);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* implementation borrowed from hostap driver */
+static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
+ struct ieee_param *param, int param_len,
+ u8 is_mesh)
+{
+ int ret = 0;
+ struct rtllib_crypto_ops *ops;
+ struct rtllib_crypt_data **crypt;
+
+ struct rtllib_security sec = {
+ .flags = 0,
+ };
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len !=
+ (int) ((char *) param->u.crypt.key - (char *) param) +
+ param->u.crypt.key_len) {
+ printk(KERN_INFO "Len mismatch %d, %d\n", param_len,
+ param->u.crypt.key_len);
+ return -EINVAL;
+ }
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (param->u.crypt.idx >= WEP_KEYS)
+ return -EINVAL;
+ crypt = &ieee->crypt[param->u.crypt.idx];
+ } else {
+ return -EINVAL;
+ }
+
+ if (strcmp(param->u.crypt.alg, "none") == 0) {
+ if (crypt) {
+ sec.enabled = 0;
+ sec.level = SEC_LEVEL_0;
+ sec.flags |= SEC_ENABLED | SEC_LEVEL;
+ rtllib_crypt_delayed_deinit(ieee, crypt);
+ }
+ goto done;
+ }
+ sec.enabled = 1;
+ sec.flags |= SEC_ENABLED;
+
+ /* IPW HW cannot build TKIP MIC, host decryption still needed. */
+ if (!(ieee->host_encrypt || ieee->host_decrypt) &&
+ strcmp(param->u.crypt.alg, "TKIP"))
+ goto skip_host_crypt;
+
+ ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
+ request_module("rtllib_crypt_wep");
+ ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ request_module("rtllib_crypt_tkip");
+ ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ request_module("rtllib_crypt_ccmp");
+ ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ }
+ if (ops == NULL) {
+ printk(KERN_INFO "unknown crypto alg '%s'\n",
+ param->u.crypt.alg);
+ param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
+ ret = -EINVAL;
+ goto done;
+ }
+ if (*crypt == NULL || (*crypt)->ops != ops) {
+ struct rtllib_crypt_data *new_crypt;
+
+ rtllib_crypt_delayed_deinit(ieee, crypt);
+
+ new_crypt = (struct rtllib_crypt_data *)
+ kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ if (new_crypt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
+ new_crypt->ops = ops;
+ if (new_crypt->ops)
+ new_crypt->priv =
+ new_crypt->ops->init(param->u.crypt.idx);
+
+ if (new_crypt->priv == NULL) {
+ kfree(new_crypt);
+ param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED;
+ ret = -EINVAL;
+ goto done;
+ }
+
+ *crypt = new_crypt;
+ }
+
+ if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
+ (*crypt)->ops->set_key(param->u.crypt.key,
+ param->u.crypt.key_len, param->u.crypt.seq,
+ (*crypt)->priv) < 0) {
+ printk(KERN_INFO "key setting failed\n");
+ param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED;
+ ret = -EINVAL;
+ goto done;
+ }
+
+ skip_host_crypt:
+ if (param->u.crypt.set_tx) {
+ ieee->tx_keyidx = param->u.crypt.idx;
+ sec.active_key = param->u.crypt.idx;
+ sec.flags |= SEC_ACTIVE_KEY;
+ } else
+ sec.flags &= ~SEC_ACTIVE_KEY;
+
+ if (param->u.crypt.alg != NULL) {
+ memcpy(sec.keys[param->u.crypt.idx],
+ param->u.crypt.key,
+ param->u.crypt.key_len);
+ sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
+ sec.flags |= (1 << param->u.crypt.idx);
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_1;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_2;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_3;
+ }
+ }
+ done:
+ if (ieee->set_security)
+ ieee->set_security(ieee->dev, &sec);
+
+ /* Do not reset port if card is in Managed mode since resetting will
+ * generate new IEEE 802.11 authentication which may end up in looping
+ * with IEEE 802.1X. If your hardware requires a reset after WEP
+ * configuration (for example... Prism2), implement the reset_port in
+ * the callbacks structures used to initialize the 802.11 stack. */
+ if (ieee->reset_on_keychange &&
+ ieee->iw_mode != IW_MODE_INFRA &&
+ ieee->reset_port &&
+ ieee->reset_port(ieee->dev)) {
+ printk(KERN_INFO "reset_port failed\n");
+ param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED;
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+inline struct sk_buff *rtllib_disauth_skb(struct rtllib_network *beacon,
+ struct rtllib_device *ieee, u16 asRsn)
+{
+ struct sk_buff *skb;
+ struct rtllib_disauth *disauth;
+ int len = sizeof(struct rtllib_disauth) + ieee->tx_headroom;
+
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ disauth = (struct rtllib_disauth *) skb_put(skb,
+ sizeof(struct rtllib_disauth));
+ disauth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DEAUTH);
+ disauth->header.duration_id = 0;
+
+ memcpy(disauth->header.addr1, beacon->bssid, ETH_ALEN);
+ memcpy(disauth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(disauth->header.addr3, beacon->bssid, ETH_ALEN);
+
+ disauth->reason = cpu_to_le16(asRsn);
+ return skb;
+}
+
+inline struct sk_buff *rtllib_disassociate_skb(struct rtllib_network *beacon,
+ struct rtllib_device *ieee, u16 asRsn)
+{
+ struct sk_buff *skb;
+ struct rtllib_disassoc *disass;
+ int len = sizeof(struct rtllib_disassoc) + ieee->tx_headroom;
+ skb = dev_alloc_skb(len);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, ieee->tx_headroom);
+
+ disass = (struct rtllib_disassoc *) skb_put(skb,
+ sizeof(struct rtllib_disassoc));
+ disass->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DISASSOC);
+ disass->header.duration_id = 0;
+
+ memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
+ memcpy(disass->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(disass->header.addr3, beacon->bssid, ETH_ALEN);
+
+ disass->reason = cpu_to_le16(asRsn);
+ return skb;
+}
+
+void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn)
+{
+ struct rtllib_network *beacon = &ieee->current_network;
+ struct sk_buff *skb;
+
+ if (deauth)
+ skb = rtllib_disauth_skb(beacon, ieee, asRsn);
+ else
+ skb = rtllib_disassociate_skb(beacon, ieee, asRsn);
+
+ if (skb)
+ softmac_mgmt_xmit(skb, ieee);
+}
+
+u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
+{
+ static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
+ static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
+ int wpa_ie_len = ieee->wpa_ie_len;
+ struct rtllib_crypt_data *crypt;
+ int encrypt;
+
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY)
+ || (ieee->host_encrypt && crypt && crypt->ops &&
+ (0 == strcmp(crypt->ops->name, "WEP")));
+
+ /* simply judge */
+ if (encrypt && (wpa_ie_len == 0)) {
+ return SEC_ALG_WEP;
+ } else if ((wpa_ie_len != 0)) {
+ if (((ieee->wpa_ie[0] == 0xdd) &&
+ (!memcmp(&(ieee->wpa_ie[14]), ccmp_ie, 4))) ||
+ ((ieee->wpa_ie[0] == 0x30) &&
+ (!memcmp(&ieee->wpa_ie[10], ccmp_rsn_ie, 4))))
+ return SEC_ALG_CCMP;
+ else
+ return SEC_ALG_TKIP;
+ } else {
+ return SEC_ALG_NONE;
+ }
+}
+
+int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee, struct iw_point *p,
+ u8 is_mesh)
+{
+ struct ieee_param *param;
+ int ret = 0;
+
+ down(&ieee->wx_sem);
+
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = kmalloc(p->length, GFP_KERNEL);
+ if (param == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(param, p->pointer, p->length)) {
+ kfree(param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ switch (param->cmd) {
+ case IEEE_CMD_SET_WPA_PARAM:
+ ret = rtllib_wpa_set_param(ieee, param->u.wpa_param.name,
+ param->u.wpa_param.value);
+ break;
+
+ case IEEE_CMD_SET_WPA_IE:
+ ret = rtllib_wpa_set_wpa_ie(ieee, param, p->length);
+ break;
+
+ case IEEE_CMD_SET_ENCRYPTION:
+ ret = rtllib_wpa_set_encryption(ieee, param, p->length, 0);
+ break;
+
+ case IEEE_CMD_MLME:
+ ret = rtllib_wpa_mlme(ieee, param->u.mlme.command,
+ param->u.mlme.reason_code);
+ break;
+
+ default:
+ printk(KERN_INFO "Unknown WPA supplicant request: %d\n",
+ param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+
+ kfree(param);
+out:
+ up(&ieee->wx_sem);
+
+ return ret;
+}
+
+void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
+{
+ u8 OpMode;
+ u8 i;
+ bool bFilterOutNonAssociatedBSSID = false;
+
+ rtllib->state = RTLLIB_NOLINK;
+
+ for (i = 0; i < 6; i++)
+ rtllib->current_network.bssid[i] = 0x55;
+
+ rtllib->OpMode = RT_OP_MODE_NO_LINK;
+ rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
+ rtllib->current_network.bssid);
+ OpMode = RT_OP_MODE_NO_LINK;
+ rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS, &OpMode);
+ rtllib_stop_send_beacons(rtllib);
+
+ bFilterOutNonAssociatedBSSID = false;
+ rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
+ (u8 *)(&bFilterOutNonAssociatedBSSID));
+ notify_wx_assoc_event(rtllib);
+
+}
+
+void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
+ u8 asRsn)
+{
+ u8 i;
+ u8 OpMode;
+
+ RemovePeerTS(rtllib, asSta);
+
+
+ if (memcpy(rtllib->current_network.bssid, asSta, 6) == NULL) {
+ rtllib->state = RTLLIB_NOLINK;
+
+ for (i = 0; i < 6; i++)
+ rtllib->current_network.bssid[i] = 0x22;
+ OpMode = RT_OP_MODE_NO_LINK;
+ rtllib->OpMode = RT_OP_MODE_NO_LINK;
+ rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS,
+ (u8 *)(&OpMode));
+ rtllib_disassociate(rtllib);
+
+ rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
+ rtllib->current_network.bssid);
+
+ }
+
+}
+
+void
+rtllib_MgntDisconnectAP(
+ struct rtllib_device *rtllib,
+ u8 asRsn
+)
+{
+ bool bFilterOutNonAssociatedBSSID = false;
+
+ bFilterOutNonAssociatedBSSID = false;
+ rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
+ (u8 *)(&bFilterOutNonAssociatedBSSID));
+ rtllib_MlmeDisassociateRequest(rtllib, rtllib->current_network.bssid,
+ asRsn);
+
+ rtllib->state = RTLLIB_NOLINK;
+}
+
+bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
+{
+ if (rtllib->ps != RTLLIB_PS_DISABLED)
+ rtllib->sta_wake_up(rtllib->dev);
+
+ if (rtllib->state == RTLLIB_LINKED) {
+ if (rtllib->iw_mode == IW_MODE_ADHOC)
+ rtllib_MgntDisconnectIBSS(rtllib);
+ if (rtllib->iw_mode == IW_MODE_INFRA)
+ rtllib_MgntDisconnectAP(rtllib, asRsn);
+
+ }
+
+ return true;
+}
+
+void notify_wx_assoc_event(struct rtllib_device *ieee)
+{
+ union iwreq_data wrqu;
+
+ if (ieee->cannot_notify)
+ return;
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ if (ieee->state == RTLLIB_LINKED)
+ memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid,
+ ETH_ALEN);
+ else {
+
+ printk(KERN_INFO "%s(): Tell user space disconnected\n",
+ __func__);
+ memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ }
+ wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
+}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
new file mode 100644
index 00000000000..22988fbd444
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -0,0 +1,645 @@
+/* IEEE 802.11 SoftMAC layer
+ * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Mostly extracted from the rtl8180-sa2400 driver for the
+ * in-kernel generic ieee802.11 stack.
+ *
+ * Some pieces of code might be stolen from ipw2100 driver
+ * copyright of who own it's copyright ;-)
+ *
+ * PS wx handler mostly stolen from hostap, copyright who
+ * own it's copyright ;-)
+ *
+ * released under the GPL
+ */
+
+
+#include "rtllib.h"
+#include "rtl_core.h"
+#include "dot11d.h"
+/* FIXME: add A freqs */
+
+const long rtllib_wlan_frequencies[] = {
+ 2412, 2417, 2422, 2427,
+ 2432, 2437, 2442, 2447,
+ 2452, 2457, 2462, 2467,
+ 2472, 2484
+};
+
+
+int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ int ret;
+ struct iw_freq *fwrq = &wrqu->freq;
+
+ down(&ieee->wx_sem);
+
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ ret = 0;
+ goto out;
+ }
+
+ /* if setting by freq convert to channel */
+ if (fwrq->e == 1) {
+ if ((fwrq->m >= (int) 2.412e8 &&
+ fwrq->m <= (int) 2.487e8)) {
+ int f = fwrq->m / 100000;
+ int c = 0;
+
+ while ((c < 14) && (f != rtllib_wlan_frequencies[c]))
+ c++;
+
+ /* hack to fall through */
+ fwrq->e = 0;
+ fwrq->m = c + 1;
+ }
+ }
+
+ if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) {
+ ret = -EOPNOTSUPP;
+ goto out;
+
+ } else { /* Set the channel */
+
+ if (ieee->active_channel_map[fwrq->m] != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ieee->current_network.channel = fwrq->m;
+ ieee->set_chan(ieee->dev, ieee->current_network.channel);
+
+ if (ieee->iw_mode == IW_MODE_ADHOC ||
+ ieee->iw_mode == IW_MODE_MASTER)
+ if (ieee->state == RTLLIB_LINKED) {
+ rtllib_stop_send_beacons(ieee);
+ rtllib_start_send_beacons(ieee);
+ }
+ }
+
+ ret = 0;
+out:
+ up(&ieee->wx_sem);
+ return ret;
+}
+
+
+int rtllib_wx_get_freq(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct iw_freq *fwrq = &wrqu->freq;
+
+ if (ieee->current_network.channel == 0)
+ return -1;
+ fwrq->m = rtllib_wlan_frequencies[ieee->current_network.channel-1] *
+ 100000;
+ fwrq->e = 1;
+ return 0;
+}
+
+int rtllib_wx_get_wap(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long flags;
+
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+
+ if (ieee->iw_mode == IW_MODE_MONITOR)
+ return -1;
+
+ /* We want avoid to give to the user inconsistent infos*/
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (ieee->state != RTLLIB_LINKED &&
+ ieee->state != RTLLIB_LINKED_SCANNING &&
+ ieee->wap_set == 0)
+
+ memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+ else
+ memcpy(wrqu->ap_addr.sa_data,
+ ieee->current_network.bssid, ETH_ALEN);
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+ return 0;
+}
+
+
+int rtllib_wx_set_wap(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+
+ int ret = 0;
+ u8 zero[] = {0, 0, 0, 0, 0, 0};
+ unsigned long flags;
+
+ short ifup = ieee->proto_started;
+ struct sockaddr *temp = (struct sockaddr *)awrq;
+
+ rtllib_stop_scan_syncro(ieee);
+
+ down(&ieee->wx_sem);
+ /* use ifconfig hw ether */
+ if (ieee->iw_mode == IW_MODE_MASTER) {
+ ret = -1;
+ goto out;
+ }
+
+ if (temp->sa_family != ARPHRD_ETHER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (memcmp(temp->sa_data, zero, ETH_ALEN) == 0) {
+ spin_lock_irqsave(&ieee->lock, flags);
+ memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN);
+ ieee->wap_set = 0;
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ ret = -1;
+ goto out;
+ }
+
+
+ if (ifup)
+ rtllib_stop_protocol(ieee, true);
+
+ /* just to avoid to give inconsistent infos in the
+ * get wx method. not really needed otherwise
+ */
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ ieee->cannot_notify = false;
+ memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN);
+ ieee->wap_set = (memcmp(temp->sa_data, zero, ETH_ALEN) != 0);
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+ if (ifup)
+ rtllib_start_protocol(ieee);
+out:
+ up(&ieee->wx_sem);
+ return ret;
+}
+
+int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ int len, ret = 0;
+ unsigned long flags;
+
+ if (ieee->iw_mode == IW_MODE_MONITOR)
+ return -1;
+
+ /* We want avoid to give to the user inconsistent infos*/
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (ieee->current_network.ssid[0] == '\0' ||
+ ieee->current_network.ssid_len == 0) {
+ ret = -1;
+ goto out;
+ }
+
+ if (ieee->state != RTLLIB_LINKED &&
+ ieee->state != RTLLIB_LINKED_SCANNING &&
+ ieee->ssid_set == 0) {
+ ret = -1;
+ goto out;
+ }
+ len = ieee->current_network.ssid_len;
+ wrqu->essid.length = len;
+ strncpy(b, ieee->current_network.ssid, len);
+ wrqu->essid.flags = 1;
+
+out:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+ return ret;
+
+}
+
+int rtllib_wx_set_rate(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ u32 target_rate = wrqu->bitrate.value;
+
+ ieee->rate = target_rate/100000;
+ return 0;
+}
+
+int rtllib_wx_get_rate(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 tmp_rate = 0;
+ tmp_rate = TxCountToDataRate(ieee,
+ ieee->softmac_stats.CurrentShowTxate);
+ wrqu->bitrate.value = tmp_rate * 500000;
+
+ return 0;
+}
+
+
+int rtllib_wx_set_rts(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ if (wrqu->rts.disabled || !wrqu->rts.fixed)
+ ieee->rts = DEFAULT_RTS_THRESHOLD;
+ else {
+ if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
+ wrqu->rts.value > MAX_RTS_THRESHOLD)
+ return -EINVAL;
+ ieee->rts = wrqu->rts.value;
+ }
+ return 0;
+}
+
+int rtllib_wx_get_rts(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->rts.value = ieee->rts;
+ wrqu->rts.fixed = 0; /* no auto select */
+ wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
+ return 0;
+}
+
+int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ int set_mode_status = 0;
+
+ rtllib_stop_scan_syncro(ieee);
+ down(&ieee->wx_sem);
+ switch (wrqu->mode) {
+ case IW_MODE_MONITOR:
+ case IW_MODE_ADHOC:
+ case IW_MODE_INFRA:
+ break;
+ case IW_MODE_AUTO:
+ wrqu->mode = IW_MODE_INFRA;
+ break;
+ default:
+ set_mode_status = -EINVAL;
+ goto out;
+ }
+
+ if (wrqu->mode == ieee->iw_mode)
+ goto out;
+
+ if (wrqu->mode == IW_MODE_MONITOR) {
+ ieee->dev->type = ARPHRD_IEEE80211;
+ rtllib_EnableNetMonitorMode(ieee->dev, false);
+ } else {
+ ieee->dev->type = ARPHRD_ETHER;
+ if (ieee->iw_mode == IW_MODE_MONITOR)
+ rtllib_DisableNetMonitorMode(ieee->dev, false);
+ }
+
+ if (!ieee->proto_started) {
+ ieee->iw_mode = wrqu->mode;
+ } else {
+ rtllib_stop_protocol(ieee, true);
+ ieee->iw_mode = wrqu->mode;
+ rtllib_start_protocol(ieee);
+ }
+
+out:
+ up(&ieee->wx_sem);
+ return set_mode_status;
+}
+
+void rtllib_wx_sync_scan_wq(void *data)
+{
+ struct rtllib_device *ieee = container_of_work_rsl(data,
+ struct rtllib_device, wx_sync_scan_wq);
+ short chan;
+ enum ht_extchnl_offset chan_offset = 0;
+ enum ht_channel_width bandwidth = 0;
+ int b40M = 0;
+ static int count;
+
+ if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
+ rtllib_start_scan_syncro(ieee, 0);
+ goto out;
+ }
+
+ chan = ieee->current_network.channel;
+
+ if (ieee->LeisurePSLeave)
+ ieee->LeisurePSLeave(ieee->dev);
+ /* notify AP to be in PS mode */
+ rtllib_sta_ps_send_null_frame(ieee, 1);
+ rtllib_sta_ps_send_null_frame(ieee, 1);
+
+ rtllib_stop_all_queues(ieee);
+
+ if (ieee->data_hard_stop)
+ ieee->data_hard_stop(ieee->dev);
+ rtllib_stop_send_beacons(ieee);
+ ieee->state = RTLLIB_LINKED_SCANNING;
+ ieee->link_change(ieee->dev);
+ /* wait for ps packet to be kicked out successfully */
+ msleep(50);
+
+ if (ieee->ScanOperationBackupHandler)
+ ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
+
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT &&
+ ieee->pHTInfo->bCurBW40MHz) {
+ b40M = 1;
+ chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset;
+ bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz;
+ RT_TRACE(COMP_DBG, "Scan in 40M, force to 20M first:%d, %d\n",
+ chan_offset, bandwidth);
+ ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
+ HT_EXTCHNL_OFFSET_NO_EXT);
+ }
+
+ rtllib_start_scan_syncro(ieee, 0);
+
+ if (b40M) {
+ RT_TRACE(COMP_DBG, "Scan in 20M, back to 40M\n");
+ if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
+ ieee->set_chan(ieee->dev, chan + 2);
+ else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER)
+ ieee->set_chan(ieee->dev, chan - 2);
+ else
+ ieee->set_chan(ieee->dev, chan);
+ ieee->SetBWModeHandler(ieee->dev, bandwidth, chan_offset);
+ } else {
+ ieee->set_chan(ieee->dev, chan);
+ }
+
+ if (ieee->ScanOperationBackupHandler)
+ ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
+
+ ieee->state = RTLLIB_LINKED;
+ ieee->link_change(ieee->dev);
+
+ /* Notify AP that I wake up again */
+ rtllib_sta_ps_send_null_frame(ieee, 0);
+
+ if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 ||
+ ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) {
+ ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
+ ieee->LinkDetectInfo.NumRecvDataInPeriod = 1;
+ }
+
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+
+ if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
+ rtllib_start_send_beacons(ieee);
+
+ rtllib_wake_all_queues(ieee);
+
+ count = 0;
+out:
+ up(&ieee->wx_sem);
+
+}
+
+int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ int ret = 0;
+
+ down(&ieee->wx_sem);
+
+ if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
+ ret = -1;
+ goto out;
+ }
+
+ if (ieee->state == RTLLIB_LINKED) {
+ queue_work_rsl(ieee->wq, &ieee->wx_sync_scan_wq);
+ /* intentionally forget to up sem */
+ return 0;
+ }
+
+out:
+ up(&ieee->wx_sem);
+ return ret;
+}
+
+int rtllib_wx_set_essid(struct rtllib_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0, len, i;
+ short proto_started;
+ unsigned long flags;
+
+ rtllib_stop_scan_syncro(ieee);
+ down(&ieee->wx_sem);
+
+ proto_started = ieee->proto_started;
+
+ len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length :
+ IW_ESSID_MAX_SIZE;
+
+ if (len > IW_ESSID_MAX_SIZE) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (extra[i] < 0) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (proto_started)
+ rtllib_stop_protocol(ieee, true);
+
+
+ /* this is just to be sure that the GET wx callback
+ * has consisten infos. not needed otherwise
+ */
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (wrqu->essid.flags && wrqu->essid.length) {
+ strncpy(ieee->current_network.ssid, extra, len);
+ ieee->current_network.ssid_len = len;
+ ieee->cannot_notify = false;
+ ieee->ssid_set = 1;
+ } else {
+ ieee->ssid_set = 0;
+ ieee->current_network.ssid[0] = '\0';
+ ieee->current_network.ssid_len = 0;
+ }
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+ if (proto_started)
+ rtllib_start_protocol(ieee);
+out:
+ up(&ieee->wx_sem);
+ return ret;
+}
+
+int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ wrqu->mode = ieee->iw_mode;
+ return 0;
+}
+
+int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int *parms = (int *)extra;
+ int enable = (parms[0] > 0);
+ short prev = ieee->raw_tx;
+
+ down(&ieee->wx_sem);
+
+ if (enable)
+ ieee->raw_tx = 1;
+ else
+ ieee->raw_tx = 0;
+
+ printk(KERN_INFO"raw TX is %s\n",
+ ieee->raw_tx ? "enabled" : "disabled");
+
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
+ if (prev == 0 && ieee->raw_tx) {
+ if (ieee->data_hard_resume)
+ ieee->data_hard_resume(ieee->dev);
+
+ netif_carrier_on(ieee->dev);
+ }
+
+ if (prev && ieee->raw_tx == 1)
+ netif_carrier_off(ieee->dev);
+ }
+
+ up(&ieee->wx_sem);
+
+ return 0;
+}
+
+int rtllib_wx_get_name(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ strcpy(wrqu->name, "802.11");
+
+ if (ieee->modulation & RTLLIB_CCK_MODULATION)
+ strcat(wrqu->name, "b");
+ if (ieee->modulation & RTLLIB_OFDM_MODULATION)
+ strcat(wrqu->name, "g");
+ if (ieee->mode & (IEEE_N_24G | IEEE_N_5G))
+ strcat(wrqu->name, "n");
+ return 0;
+}
+
+
+/* this is mostly stolen from hostap */
+int rtllib_wx_set_power(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+ if ((!ieee->sta_wake_up) ||
+ (!ieee->enter_sleep_state) ||
+ (!ieee->ps_is_queue_empty)) {
+ RTLLIB_DEBUG(RTLLIB_DL_ERR, "%s(): PS mode is tryied to be use "
+ "but driver missed a callback\n\n", __func__);
+ return -1;
+ }
+
+ down(&ieee->wx_sem);
+
+ if (wrqu->power.disabled) {
+ RT_TRACE(COMP_DBG, "===>%s(): power disable\n", __func__);
+ ieee->ps = RTLLIB_PS_DISABLED;
+ goto exit;
+ }
+ if (wrqu->power.flags & IW_POWER_TIMEOUT) {
+ ieee->ps_timeout = wrqu->power.value / 1000;
+ RT_TRACE(COMP_DBG, "===>%s():ps_timeout is %d\n", __func__,
+ ieee->ps_timeout);
+ }
+
+ if (wrqu->power.flags & IW_POWER_PERIOD)
+ ieee->ps_period = wrqu->power.value / 1000;
+
+ switch (wrqu->power.flags & IW_POWER_MODE) {
+ case IW_POWER_UNICAST_R:
+ ieee->ps = RTLLIB_PS_UNICAST;
+ break;
+ case IW_POWER_MULTICAST_R:
+ ieee->ps = RTLLIB_PS_MBCAST;
+ break;
+ case IW_POWER_ALL_R:
+ ieee->ps = RTLLIB_PS_UNICAST | RTLLIB_PS_MBCAST;
+ break;
+
+ case IW_POWER_ON:
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto exit;
+
+ }
+exit:
+ up(&ieee->wx_sem);
+ return ret;
+
+}
+
+/* this is stolen from hostap */
+int rtllib_wx_get_power(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+ down(&ieee->wx_sem);
+
+ if (ieee->ps == RTLLIB_PS_DISABLED) {
+ wrqu->power.disabled = 1;
+ goto exit;
+ }
+
+ wrqu->power.disabled = 0;
+
+ if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ wrqu->power.flags = IW_POWER_TIMEOUT;
+ wrqu->power.value = ieee->ps_timeout * 1000;
+ } else {
+ wrqu->power.flags = IW_POWER_PERIOD;
+ wrqu->power.value = ieee->ps_period * 1000;
+ }
+
+ if ((ieee->ps & (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST)) ==
+ (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST))
+ wrqu->power.flags |= IW_POWER_ALL_R;
+ else if (ieee->ps & RTLLIB_PS_MBCAST)
+ wrqu->power.flags |= IW_POWER_MULTICAST_R;
+ else
+ wrqu->power.flags |= IW_POWER_UNICAST_R;
+
+exit:
+ up(&ieee->wx_sem);
+ return ret;
+
+}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
new file mode 100644
index 00000000000..44e8006bc1a
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -0,0 +1,967 @@
+/******************************************************************************
+
+ Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************
+
+ Few modifications for Realtek's Wi-Fi drivers by
+ Andrea Merello <andreamrl@tiscali.it>
+
+ A special thanks goes to Realtek for their support !
+
+******************************************************************************/
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include <linux/if_vlan.h>
+
+#include "rtllib.h"
+
+/*
+
+
+802.11 Data Frame
+
+
+802.11 frame_contorl for data frames - 2 bytes
+ ,-----------------------------------------------------------------------------------------.
+bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
+ |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
+val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
+ |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
+desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
+ | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
+ '-----------------------------------------------------------------------------------------'
+ /\
+ |
+802.11 Data Frame |
+ ,--------- 'ctrl' expands to >-----------'
+ |
+ ,--'---,-------------------------------------------------------------.
+Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
+ |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
+ | | tion | (BSSID) | | | ence | data | |
+ `--------------------------------------------------| |------'
+Total: 28 non-data bytes `----.----'
+ |
+ .- 'Frame data' expands to <---------------------------'
+ |
+ V
+ ,---------------------------------------------------.
+Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
+ |------|------|---------|----------|------|---------|
+Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
+ | DSAP | SSAP | | | | Packet |
+ | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
+ `-----------------------------------------| |
+Total: 8 non-data bytes `----.----'
+ |
+ .- 'IP Packet' expands, if WEP enabled, to <--'
+ |
+ V
+ ,-----------------------.
+Bytes | 4 | 0-2296 | 4 |
+ |-----|-----------|-----|
+Desc. | IV | Encrypted | ICV |
+ | | IP Packet | |
+ `-----------------------'
+Total: 8 non-data bytes
+
+
+802.3 Ethernet Data Frame
+
+ ,-----------------------------------------.
+Bytes | 6 | 6 | 2 | Variable | 4 |
+ |-------|-------|------|-----------|------|
+Desc. | Dest. | Source| Type | IP Packet | fcs |
+ | MAC | MAC | | | |
+ `-----------------------------------------'
+Total: 18 non-data bytes
+
+In the event that fragmentation is required, the incoming payload is split into
+N parts of size ieee->fts. The first fragment contains the SNAP header and the
+remaining packets are just data.
+
+If encryption is enabled, each fragment payload size is reduced by enough space
+to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
+So if you have 1500 bytes of payload with ieee->fts set to 500 without
+encryption it will take 3 frames. With WEP it will take 4 frames as the
+payload of each frame is reduced to 492 bytes.
+
+* SKB visualization
+*
+* ,- skb->data
+* |
+* | ETHERNET HEADER ,-<-- PAYLOAD
+* | | 14 bytes from skb->data
+* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
+* | | | |
+* |,-Dest.--. ,--Src.---. | | |
+* | 6 bytes| | 6 bytes | | | |
+* v | | | | | |
+* 0 | v 1 | v | v 2
+* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+* ^ | ^ | ^ |
+* | | | | | |
+* | | | | `T' <---- 2 bytes for Type
+* | | | |
+* | | '---SNAP--' <-------- 6 bytes for SNAP
+* | |
+* `-IV--' <-------------------- 4 bytes for IV (WEP)
+*
+* SNAP HEADER
+*
+*/
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+inline int rtllib_put_snap(u8 *data, u16 h_proto)
+{
+ struct rtllib_snap_hdr *snap;
+ u8 *oui;
+
+ snap = (struct rtllib_snap_hdr *)data;
+ snap->dsap = 0xaa;
+ snap->ssap = 0xaa;
+ snap->ctrl = 0x03;
+
+ if (h_proto == 0x8137 || h_proto == 0x80f3)
+ oui = P802_1H_OUI;
+ else
+ oui = RFC1042_OUI;
+ snap->oui[0] = oui[0];
+ snap->oui[1] = oui[1];
+ snap->oui[2] = oui[2];
+
+ *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+
+ return SNAP_SIZE + sizeof(u16);
+}
+
+int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
+ int hdr_len)
+{
+ struct rtllib_crypt_data *crypt = NULL;
+ int res;
+
+ crypt = ieee->crypt[ieee->tx_keyidx];
+
+ if (!(crypt && crypt->ops)) {
+ printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
+ return -1;
+ }
+ /* To encrypt, frame format is:
+ * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
+
+ /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
+ * call both MSDU and MPDU encryption functions from here. */
+ atomic_inc(&crypt->refcnt);
+ res = 0;
+ if (crypt->ops->encrypt_msdu)
+ res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
+ if (res == 0 && crypt->ops->encrypt_mpdu)
+ res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
+
+ atomic_dec(&crypt->refcnt);
+ if (res < 0) {
+ printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
+ ieee->dev->name, frag->len);
+ ieee->ieee_stats.tx_discards++;
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void rtllib_txb_free(struct rtllib_txb *txb)
+{
+ if (unlikely(!txb))
+ return;
+ kfree(txb);
+}
+
+static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
+ gfp_t gfp_mask)
+{
+ struct rtllib_txb *txb;
+ int i;
+ txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
+ gfp_mask);
+ if (!txb)
+ return NULL;
+
+ memset(txb, 0, sizeof(struct rtllib_txb));
+ txb->nr_frags = nr_frags;
+ txb->frag_size = txb_size;
+
+ for (i = 0; i < nr_frags; i++) {
+ txb->fragments[i] = dev_alloc_skb(txb_size);
+ if (unlikely(!txb->fragments[i])) {
+ i--;
+ break;
+ }
+ memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
+ }
+ if (unlikely(i != nr_frags)) {
+ while (i >= 0)
+ dev_kfree_skb_any(txb->fragments[i--]);
+ kfree(txb);
+ return NULL;
+ }
+ return txb;
+}
+
+static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
+{
+ struct ethhdr *eth;
+ struct iphdr *ip;
+
+ eth = (struct ethhdr *)skb->data;
+ if (eth->h_proto != htons(ETH_P_IP))
+ return 0;
+
+ RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
+ ip = ip_hdr(skb);
+ switch (ip->tos & 0xfc) {
+ case 0x20:
+ return 2;
+ case 0x40:
+ return 1;
+ case 0x60:
+ return 3;
+ case 0x80:
+ return 4;
+ case 0xa0:
+ return 5;
+ case 0xc0:
+ return 6;
+ case 0xe0:
+ return 7;
+ default:
+ return 0;
+ }
+}
+
+static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct cb_desc *tcb_desc)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ struct tx_ts_record *pTxTs = NULL;
+ struct rtllib_hdr_1addr* hdr = (struct rtllib_hdr_1addr *)skb->data;
+
+ if (rtllib_act_scanning(ieee, false))
+ return;
+
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
+ return;
+ if (!IsQoSDataFrame(skb->data))
+ return;
+ if (is_multicast_ether_addr(hdr->addr1) ||
+ is_broadcast_ether_addr(hdr->addr1))
+ return;
+
+ if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
+ return;
+
+ if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
+ return;
+
+ if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
+ return;
+ if (pHTInfo->bCurrentAMPDUEnable) {
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
+ skb->priority, TX_DIR, true)) {
+ printk(KERN_INFO "%s: can't get TS\n", __func__);
+ return;
+ }
+ if (pTxTs->TxAdmittedBARecord.bValid == false) {
+ if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
+ KEY_TYPE_NA)) {
+ ;
+ } else if (tcb_desc->bdhcp == 1) {
+ ;
+ } else if (!pTxTs->bDisable_AddBa) {
+ TsStartAddBaProcess(ieee, pTxTs);
+ }
+ goto FORCED_AGG_SETTING;
+ } else if (pTxTs->bUsingBa == false) {
+ if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
+ (pTxTs->TxCurSeq+1)%4096))
+ pTxTs->bUsingBa = true;
+ else
+ goto FORCED_AGG_SETTING;
+ }
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ tcb_desc->bAMPDUEnable = true;
+ tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
+ tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
+ }
+ }
+FORCED_AGG_SETTING:
+ switch (pHTInfo->ForcedAMPDUMode) {
+ case HT_AGG_AUTO:
+ break;
+
+ case HT_AGG_FORCE_ENABLE:
+ tcb_desc->bAMPDUEnable = true;
+ tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
+ tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
+ break;
+
+ case HT_AGG_FORCE_DISABLE:
+ tcb_desc->bAMPDUEnable = false;
+ tcb_desc->ampdu_density = 0;
+ tcb_desc->ampdu_factor = 0;
+ break;
+ }
+ return;
+}
+
+static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
+ struct cb_desc *tcb_desc)
+{
+ tcb_desc->bUseShortPreamble = false;
+ if (tcb_desc->data_rate == 2)
+ return;
+ else if (ieee->current_network.capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
+ tcb_desc->bUseShortPreamble = true;
+ return;
+}
+
+static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
+ struct cb_desc *tcb_desc)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ tcb_desc->bUseShortGI = false;
+
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
+ return;
+
+ if (pHTInfo->bForcedShortGI) {
+ tcb_desc->bUseShortGI = true;
+ return;
+ }
+
+ if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
+ tcb_desc->bUseShortGI = true;
+ else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
+ tcb_desc->bUseShortGI = true;
+}
+
+static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
+ struct cb_desc *tcb_desc)
+{
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+
+ tcb_desc->bPacketBW = false;
+
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
+ return;
+
+ if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ return;
+
+ if ((tcb_desc->data_rate & 0x80) == 0)
+ return;
+ if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
+ !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
+ tcb_desc->bPacketBW = true;
+ return;
+}
+
+static void rtllib_query_protectionmode(struct rtllib_device *ieee,
+ struct cb_desc *tcb_desc,
+ struct sk_buff *skb)
+{
+ tcb_desc->bRTSSTBC = false;
+ tcb_desc->bRTSUseShortGI = false;
+ tcb_desc->bCTSEnable = false;
+ tcb_desc->RTSSC = 0;
+ tcb_desc->bRTSBW = false;
+
+ if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
+ return;
+
+ if (is_broadcast_ether_addr(skb->data+16))
+ return;
+
+ if (ieee->mode < IEEE_N_24G) {
+ if (skb->len > ieee->rts) {
+ tcb_desc->bRTSEnable = true;
+ tcb_desc->rts_rate = MGN_24M;
+ } else if (ieee->current_network.buseprotection) {
+ tcb_desc->bRTSEnable = true;
+ tcb_desc->bCTSEnable = true;
+ tcb_desc->rts_rate = MGN_24M;
+ }
+ return;
+ } else {
+ struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
+ while (true) {
+ if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
+ tcb_desc->bCTSEnable = true;
+ tcb_desc->rts_rate = MGN_24M;
+ tcb_desc->bRTSEnable = true;
+ break;
+ } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
+ HT_IOT_ACT_PURE_N_MODE)) {
+ tcb_desc->bRTSEnable = true;
+ tcb_desc->rts_rate = MGN_24M;
+ break;
+ }
+ if (ieee->current_network.buseprotection) {
+ tcb_desc->bRTSEnable = true;
+ tcb_desc->bCTSEnable = true;
+ tcb_desc->rts_rate = MGN_24M;
+ break;
+ }
+ if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
+ u8 HTOpMode = pHTInfo->CurrentOpMode;
+ if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
+ HTOpMode == 3)) ||
+ (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
+ tcb_desc->rts_rate = MGN_24M;
+ tcb_desc->bRTSEnable = true;
+ break;
+ }
+ }
+ if (skb->len > ieee->rts) {
+ tcb_desc->rts_rate = MGN_24M;
+ tcb_desc->bRTSEnable = true;
+ break;
+ }
+ if (tcb_desc->bAMPDUEnable) {
+ tcb_desc->rts_rate = MGN_24M;
+ tcb_desc->bRTSEnable = false;
+ break;
+ }
+ goto NO_PROTECTION;
+ }
+ }
+ if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ tcb_desc->bUseShortPreamble = true;
+ if (ieee->iw_mode == IW_MODE_MASTER)
+ goto NO_PROTECTION;
+ return;
+NO_PROTECTION:
+ tcb_desc->bRTSEnable = false;
+ tcb_desc->bCTSEnable = false;
+ tcb_desc->rts_rate = 0;
+ tcb_desc->RTSSC = 0;
+ tcb_desc->bRTSBW = false;
+}
+
+
+static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
+ struct cb_desc *tcb_desc)
+{
+ if (ieee->bTxDisableRateFallBack)
+ tcb_desc->bTxDisableRateFallBack = true;
+
+ if (ieee->bTxUseDriverAssingedRate)
+ tcb_desc->bTxUseDriverAssingedRate = true;
+ if (!tcb_desc->bTxDisableRateFallBack ||
+ !tcb_desc->bTxUseDriverAssingedRate) {
+ if (ieee->iw_mode == IW_MODE_INFRA ||
+ ieee->iw_mode == IW_MODE_ADHOC)
+ tcb_desc->RATRIndex = 0;
+ }
+}
+
+u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
+ u8 *dst)
+{
+ u16 seqnum = 0;
+
+ if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
+ return 0;
+ if (IsQoSDataFrame(skb->data)) {
+ struct tx_ts_record *pTS = NULL;
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
+ skb->priority, TX_DIR, true))
+ return 0;
+ seqnum = pTS->TxCurSeq;
+ pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
+ return seqnum;
+ }
+ return 0;
+}
+
+static int wme_downgrade_ac(struct sk_buff *skb)
+{
+ switch (skb->priority) {
+ case 6:
+ case 7:
+ skb->priority = 5; /* VO -> VI */
+ return 0;
+ case 4:
+ case 5:
+ skb->priority = 3; /* VI -> BE */
+ return 0;
+ case 0:
+ case 3:
+ skb->priority = 1; /* BE -> BK */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rtllib_device *ieee = (struct rtllib_device *)
+ netdev_priv_rsl(dev);
+ struct rtllib_txb *txb = NULL;
+ struct rtllib_hdr_3addrqos *frag_hdr;
+ int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
+ unsigned long flags;
+ struct net_device_stats *stats = &ieee->stats;
+ int ether_type = 0, encrypt;
+ int bytes, fc, qos_ctl = 0, hdr_len;
+ struct sk_buff *skb_frag;
+ struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
+ .duration_id = 0,
+ .seq_ctl = 0,
+ .qos_ctl = 0
+ };
+ u8 dest[ETH_ALEN], src[ETH_ALEN];
+ int qos_actived = ieee->current_network.qos_data.active;
+ struct rtllib_crypt_data *crypt = NULL;
+ struct cb_desc *tcb_desc;
+ u8 bIsMulticast = false;
+ u8 IsAmsdu = false;
+ bool bdhcp = false;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ /* If there is no driver handler to take the TXB, dont' bother
+ * creating it... */
+ if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
+ IEEE_SOFTMAC_TX_QUEUE)) ||
+ ((!ieee->softmac_data_hard_start_xmit &&
+ (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
+ printk(KERN_WARNING "%s: No xmit handler.\n",
+ ieee->dev->name);
+ goto success;
+ }
+
+
+ if (likely(ieee->raw_tx == 0)) {
+ if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
+ printk(KERN_WARNING "%s: skb too small (%d).\n",
+ ieee->dev->name, skb->len);
+ goto success;
+ }
+ /* Save source and destination addresses */
+ memcpy(dest, skb->data, ETH_ALEN);
+ memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
+
+ memset(skb->cb, 0, sizeof(skb->cb));
+ ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
+
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
+ txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
+ if (unlikely(!txb)) {
+ printk(KERN_WARNING "%s: Could not allocate "
+ "TXB\n",
+ ieee->dev->name);
+ goto failed;
+ }
+
+ txb->encrypted = 0;
+ txb->payload_size = skb->len;
+ memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
+ skb->len);
+
+ goto success;
+ }
+
+ if (skb->len > 282) {
+ if (ETH_P_IP == ether_type) {
+ const struct iphdr *ip = (struct iphdr *)
+ ((u8 *)skb->data+14);
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp;
+
+ udp = (struct udphdr *)((u8 *)ip +
+ (ip->ihl << 2));
+ if (((((u8 *)udp)[1] == 68) &&
+ (((u8 *)udp)[3] == 67)) ||
+ ((((u8 *)udp)[1] == 67) &&
+ (((u8 *)udp)[3] == 68))) {
+ bdhcp = true;
+ ieee->LPSDelayCnt = 200;
+ }
+ }
+ } else if (ETH_P_ARP == ether_type) {
+ printk(KERN_INFO "=================>DHCP "
+ "Protocol start tx ARP pkt!!\n");
+ bdhcp = true;
+ ieee->LPSDelayCnt =
+ ieee->current_network.tim.tim_count;
+ }
+ }
+
+ skb->priority = rtllib_classify(skb, IsAmsdu);
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
+ ieee->host_encrypt && crypt && crypt->ops;
+ if (!encrypt && ieee->ieee802_1x &&
+ ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
+ stats->tx_dropped++;
+ goto success;
+ }
+ if (crypt && !encrypt && ether_type == ETH_P_PAE) {
+ struct eapol *eap = (struct eapol *)(skb->data +
+ sizeof(struct ethhdr) - SNAP_SIZE -
+ sizeof(u16));
+ RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
+ eap_get_type(eap->type));
+ }
+
+ /* Advance the SKB to the start of the payload */
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ /* Determine total amount of storage required for TXB packets */
+ bytes = skb->len + SNAP_SIZE + sizeof(u16);
+
+ if (encrypt)
+ fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
+ else
+ fc = RTLLIB_FTYPE_DATA;
+
+ if (qos_actived)
+ fc |= RTLLIB_STYPE_QOS_DATA;
+ else
+ fc |= RTLLIB_STYPE_DATA;
+
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ fc |= RTLLIB_FCTL_TODS;
+ /* To DS: Addr1 = BSSID, Addr2 = SA,
+ Addr3 = DA */
+ memcpy(&header.addr1, ieee->current_network.bssid,
+ ETH_ALEN);
+ memcpy(&header.addr2, &src, ETH_ALEN);
+ if (IsAmsdu)
+ memcpy(&header.addr3,
+ ieee->current_network.bssid, ETH_ALEN);
+ else
+ memcpy(&header.addr3, &dest, ETH_ALEN);
+ } else if (ieee->iw_mode == IW_MODE_ADHOC) {
+ /* not From/To DS: Addr1 = DA, Addr2 = SA,
+ Addr3 = BSSID */
+ memcpy(&header.addr1, dest, ETH_ALEN);
+ memcpy(&header.addr2, src, ETH_ALEN);
+ memcpy(&header.addr3, ieee->current_network.bssid,
+ ETH_ALEN);
+ }
+
+ bIsMulticast = is_broadcast_ether_addr(header.addr1) ||
+ is_multicast_ether_addr(header.addr1);
+
+ header.frame_ctl = cpu_to_le16(fc);
+
+ /* Determine fragmentation size based on destination (multicast
+ * and broadcast are not fragmented) */
+ if (bIsMulticast) {
+ frag_size = MAX_FRAG_THRESHOLD;
+ qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
+ } else {
+ frag_size = ieee->fts;
+ qos_ctl = 0;
+ }
+
+ if (qos_actived) {
+ hdr_len = RTLLIB_3ADDR_LEN + 2;
+
+ /* in case we are a client verify acm is not set for this ac */
+ while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
+ printk(KERN_INFO "skb->priority = %x\n", skb->priority);
+ if (wme_downgrade_ac(skb))
+ break;
+ printk(KERN_INFO "converted skb->priority = %x\n",
+ skb->priority);
+ }
+ qos_ctl |= skb->priority;
+ header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
+ } else {
+ hdr_len = RTLLIB_3ADDR_LEN;
+ }
+ /* Determine amount of payload per fragment. Regardless of if
+ * this stack is providing the full 802.11 header, one will
+ * eventually be affixed to this fragment -- so we must account
+ * for it when determining the amount of payload space. */
+ bytes_per_frag = frag_size - hdr_len;
+ if (ieee->config &
+ (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
+ bytes_per_frag -= RTLLIB_FCS_LEN;
+
+ /* Each fragment may need to have room for encryptiong
+ * pre/postfix */
+ if (encrypt) {
+ bytes_per_frag -= crypt->ops->extra_prefix_len +
+ crypt->ops->extra_postfix_len;
+ }
+ /* Number of fragments is the total bytes_per_frag /
+ * payload_per_fragment */
+ nr_frags = bytes / bytes_per_frag;
+ bytes_last_frag = bytes % bytes_per_frag;
+ if (bytes_last_frag)
+ nr_frags++;
+ else
+ bytes_last_frag = bytes_per_frag;
+
+ /* When we allocate the TXB we allocate enough space for the
+ * reserve and full fragment bytes (bytes_per_frag doesn't
+ * include prefix, postfix, header, FCS, etc.) */
+ txb = rtllib_alloc_txb(nr_frags, frag_size +
+ ieee->tx_headroom, GFP_ATOMIC);
+ if (unlikely(!txb)) {
+ printk(KERN_WARNING "%s: Could not allocate TXB\n",
+ ieee->dev->name);
+ goto failed;
+ }
+ txb->encrypted = encrypt;
+ txb->payload_size = bytes;
+
+ if (qos_actived)
+ txb->queue_index = UP2AC(skb->priority);
+ else
+ txb->queue_index = WME_AC_BE;
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag = txb->fragments[i];
+ tcb_desc = (struct cb_desc *)(skb_frag->cb +
+ MAX_DEV_ADDR_SIZE);
+ if (qos_actived) {
+ skb_frag->priority = skb->priority;
+ tcb_desc->queue_index = UP2AC(skb->priority);
+ } else {
+ skb_frag->priority = WME_AC_BE;
+ tcb_desc->queue_index = WME_AC_BE;
+ }
+ skb_reserve(skb_frag, ieee->tx_headroom);
+
+ if (encrypt) {
+ if (ieee->hwsec_active)
+ tcb_desc->bHwSec = 1;
+ else
+ tcb_desc->bHwSec = 0;
+ skb_reserve(skb_frag,
+ crypt->ops->extra_prefix_len);
+ } else {
+ tcb_desc->bHwSec = 0;
+ }
+ frag_hdr = (struct rtllib_hdr_3addrqos *)
+ skb_put(skb_frag, hdr_len);
+ memcpy(frag_hdr, &header, hdr_len);
+
+ /* If this is not the last fragment, then add the
+ * MOREFRAGS bit to the frame control */
+ if (i != nr_frags - 1) {
+ frag_hdr->frame_ctl = cpu_to_le16(
+ fc | RTLLIB_FCTL_MOREFRAGS);
+ bytes = bytes_per_frag;
+
+ } else {
+ /* The last fragment has the remaining length */
+ bytes = bytes_last_frag;
+ }
+ if ((qos_actived) && (!bIsMulticast)) {
+ frag_hdr->seq_ctl =
+ rtllib_query_seqnum(ieee, skb_frag,
+ header.addr1);
+ frag_hdr->seq_ctl =
+ cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
+ } else {
+ frag_hdr->seq_ctl =
+ cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
+ }
+ /* Put a SNAP header on the first fragment */
+ if (i == 0) {
+ rtllib_put_snap(
+ skb_put(skb_frag, SNAP_SIZE +
+ sizeof(u16)), ether_type);
+ bytes -= SNAP_SIZE + sizeof(u16);
+ }
+
+ memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
+
+ /* Advance the SKB... */
+ skb_pull(skb, bytes);
+
+ /* Encryption routine will move the header forward in
+ * order to insert the IV between the header and the
+ * payload */
+ if (encrypt)
+ rtllib_encrypt_fragment(ieee, skb_frag,
+ hdr_len);
+ if (ieee->config &
+ (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
+ skb_put(skb_frag, 4);
+ }
+
+ if ((qos_actived) && (!bIsMulticast)) {
+ if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
+ else
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
+ } else {
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
+ }
+ } else {
+ if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
+ printk(KERN_WARNING "%s: skb too small (%d).\n",
+ ieee->dev->name, skb->len);
+ goto success;
+ }
+
+ txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
+ if (!txb) {
+ printk(KERN_WARNING "%s: Could not allocate TXB\n",
+ ieee->dev->name);
+ goto failed;
+ }
+
+ txb->encrypted = 0;
+ txb->payload_size = skb->len;
+ memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
+ skb->len);
+ }
+
+ success:
+ if (txb) {
+ struct cb_desc *tcb_desc = (struct cb_desc *)
+ (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc->bTxEnableFwCalcDur = 1;
+ tcb_desc->priority = skb->priority;
+
+ if (ether_type == ETH_P_PAE) {
+ if (ieee->pHTInfo->IOTAction &
+ HT_IOT_ACT_WA_IOT_Broadcom) {
+ tcb_desc->data_rate =
+ MgntQuery_TxRateExcludeCCKRates(ieee);
+ tcb_desc->bTxDisableRateFallBack = false;
+ } else {
+ tcb_desc->data_rate = ieee->basic_rate;
+ tcb_desc->bTxDisableRateFallBack = 1;
+ }
+
+
+ tcb_desc->RATRIndex = 7;
+ tcb_desc->bTxUseDriverAssingedRate = 1;
+ } else {
+ if (is_multicast_ether_addr(header.addr1))
+ tcb_desc->bMulticast = 1;
+ if (is_broadcast_ether_addr(header.addr1))
+ tcb_desc->bBroadcast = 1;
+ rtllib_txrate_selectmode(ieee, tcb_desc);
+ if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ tcb_desc->data_rate = ieee->basic_rate;
+ else
+ tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
+ ieee->rate, ieee->HTCurrentOperaRate);
+
+ if (bdhcp == true) {
+ if (ieee->pHTInfo->IOTAction &
+ HT_IOT_ACT_WA_IOT_Broadcom) {
+ tcb_desc->data_rate =
+ MgntQuery_TxRateExcludeCCKRates(ieee);
+ tcb_desc->bTxDisableRateFallBack = false;
+ } else {
+ tcb_desc->data_rate = MGN_1M;
+ tcb_desc->bTxDisableRateFallBack = 1;
+ }
+
+
+ tcb_desc->RATRIndex = 7;
+ tcb_desc->bTxUseDriverAssingedRate = 1;
+ tcb_desc->bdhcp = 1;
+ }
+
+ rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
+ rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
+ tcb_desc);
+ rtllib_query_HTCapShortGI(ieee, tcb_desc);
+ rtllib_query_BandwidthMode(ieee, tcb_desc);
+ rtllib_query_protectionmode(ieee, tcb_desc,
+ txb->fragments[0]);
+ }
+ }
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ dev_kfree_skb_any(skb);
+ if (txb) {
+ if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += txb->payload_size;
+ rtllib_softmac_xmit(txb, ieee);
+ } else {
+ if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
+ stats->tx_packets++;
+ stats->tx_bytes += txb->payload_size;
+ return 0;
+ }
+ rtllib_txb_free(txb);
+ }
+ }
+
+ return 0;
+
+ failed:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ netif_stop_queue(dev);
+ stats->tx_errors++;
+ return 1;
+
+}
+int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ memset(skb->cb, 0, sizeof(skb->cb));
+ return rtllib_xmit_inter(skb, dev);
+}
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
new file mode 100644
index 00000000000..8cea4a60e1b
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -0,0 +1,876 @@
+/******************************************************************************
+
+ Copyright(c) 2004 Intel Corporation. All rights reserved.
+
+ Portions of this file are based on the WEP enablement code provided by the
+ Host AP project hostap-drivers v0.1.3
+ Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ <jkmaline@cc.hut.fi>
+ Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/wireless.h>
+#include <linux/version.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+
+#include "rtllib.h"
+struct modes_unit {
+ char *mode_string;
+ int mode_size;
+};
+static struct modes_unit rtllib_modes[] = {
+ {"a", 1},
+ {"b", 1},
+ {"g", 1},
+ {"?", 1},
+ {"N-24G", 5},
+ {"N-5G", 4},
+};
+
+#define MAX_CUSTOM_LEN 64
+static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
+ char *start, char *stop,
+ struct rtllib_network *network,
+ struct iw_request_info *info)
+{
+ char custom[MAX_CUSTOM_LEN];
+ char proto_name[IFNAMSIZ];
+ char *pname = proto_name;
+ char *p;
+ struct iw_event iwe;
+ int i, j;
+ u16 max_rate, rate;
+ static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
+
+ /* First entry *MUST* be the AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
+ start = iwe_stream_add_event_rsl(info, start, stop,
+ &iwe, IW_EV_ADDR_LEN);
+ /* Remaining entries will be displayed in the order we provide them */
+
+ /* Add the ESSID */
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ if (network->ssid_len > 0) {
+ iwe.u.data.length = min(network->ssid_len, (u8)32);
+ start = iwe_stream_add_point_rsl(info, start, stop, &iwe,
+ network->ssid);
+ } else if (network->hidden_ssid_len == 0) {
+ iwe.u.data.length = sizeof("<hidden>");
+ start = iwe_stream_add_point_rsl(info, start, stop,
+ &iwe, "<hidden>");
+ } else {
+ iwe.u.data.length = min(network->hidden_ssid_len, (u8)32);
+ start = iwe_stream_add_point_rsl(info, start, stop, &iwe,
+ network->hidden_ssid);
+ }
+ /* Add the protocol name */
+ iwe.cmd = SIOCGIWNAME;
+ for (i = 0; i < (sizeof(rtllib_modes)/sizeof(rtllib_modes[0])); i++) {
+ if (network->mode&(1<<i)) {
+ sprintf(pname, rtllib_modes[i].mode_string,
+ rtllib_modes[i].mode_size);
+ pname += rtllib_modes[i].mode_size;
+ }
+ }
+ *pname = '\0';
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
+ start = iwe_stream_add_event_rsl(info, start, stop,
+ &iwe, IW_EV_CHAR_LEN);
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ if (network->capability &
+ (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
+ if (network->capability & WLAN_CAPABILITY_ESS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ start = iwe_stream_add_event_rsl(info, start, stop,
+ &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* Add frequency/channel */
+ iwe.cmd = SIOCGIWFREQ;
+/* iwe.u.freq.m = rtllib_frequency(network->channel, network->mode);
+ iwe.u.freq.e = 3; */
+ iwe.u.freq.m = network->channel;
+ iwe.u.freq.e = 0;
+ iwe.u.freq.i = 0;
+ start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
+ IW_EV_FREQ_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if (network->capability & WLAN_CAPABILITY_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ start = iwe_stream_add_point_rsl(info, start, stop,
+ &iwe, network->ssid);
+ /* Add basic and extended rates */
+ max_rate = 0;
+ p = custom;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ for (i = 0, j = 0; i < network->rates_len;) {
+ if (j < network->rates_ex_len &&
+ ((network->rates_ex[j] & 0x7F) <
+ (network->rates[i] & 0x7F)))
+ rate = network->rates_ex[j++] & 0x7F;
+ else
+ rate = network->rates[i++] & 0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+ }
+ for (; j < network->rates_ex_len; j++) {
+ rate = network->rates_ex[j] & 0x7F;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+ if (rate > max_rate)
+ max_rate = rate;
+ }
+
+ if (network->mode >= IEEE_N_24G) {
+ struct ht_capab_ele *ht_cap = NULL;
+ bool is40M = false, isShortGI = false;
+ u8 max_mcs = 0;
+ if (!memcmp(network->bssht.bdHTCapBuf, EWC11NHTCap, 4))
+ ht_cap = (struct ht_capab_ele *)
+ &network->bssht.bdHTCapBuf[4];
+ else
+ ht_cap = (struct ht_capab_ele *)
+ &network->bssht.bdHTCapBuf[0];
+ is40M = (ht_cap->ChlWidth) ? 1 : 0;
+ isShortGI = (ht_cap->ChlWidth) ?
+ ((ht_cap->ShortGI40Mhz) ? 1 : 0) :
+ ((ht_cap->ShortGI20Mhz) ? 1 : 0);
+
+ max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS,
+ MCS_FILTER_ALL);
+ rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
+ if (rate > max_rate)
+ max_rate = rate;
+ }
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.value = max_rate * 500000;
+ start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
+ IW_EV_PARAM_LEN);
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = p - custom;
+ if (iwe.u.data.length)
+ start = iwe_stream_add_point_rsl(info, start, stop,
+ &iwe, custom);
+ /* Add quality statistics */
+ /* TODO: Fix these values... */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = network->stats.signal;
+ iwe.u.qual.level = network->stats.rssi;
+ iwe.u.qual.noise = network->stats.noise;
+ iwe.u.qual.updated = network->stats.mask & RTLLIB_STATMASK_WEMASK;
+ if (!(network->stats.mask & RTLLIB_STATMASK_RSSI))
+ iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
+ if (!(network->stats.mask & RTLLIB_STATMASK_NOISE))
+ iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
+ if (!(network->stats.mask & RTLLIB_STATMASK_SIGNAL))
+ iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
+ iwe.u.qual.updated = 7;
+ start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
+ IW_EV_QUAL_LEN);
+
+ iwe.cmd = IWEVCUSTOM;
+ p = custom;
+ iwe.u.data.length = p - custom;
+ if (iwe.u.data.length)
+ start = iwe_stream_add_point_rsl(info, start, stop,
+ &iwe, custom);
+
+ memset(&iwe, 0, sizeof(iwe));
+ if (network->wpa_ie_len) {
+ char buf[MAX_WPA_IE_LEN];
+ memcpy(buf, network->wpa_ie, network->wpa_ie_len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = network->wpa_ie_len;
+ start = iwe_stream_add_point_rsl(info, start, stop, &iwe, buf);
+ }
+ memset(&iwe, 0, sizeof(iwe));
+ if (network->rsn_ie_len) {
+ char buf[MAX_WPA_IE_LEN];
+ memcpy(buf, network->rsn_ie, network->rsn_ie_len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = network->rsn_ie_len;
+ start = iwe_stream_add_point_rsl(info, start, stop, &iwe, buf);
+ }
+
+ /* add info for WZC */
+ memset(&iwe, 0, sizeof(iwe));
+ if (network->wzc_ie_len) {
+ char buf[MAX_WZC_IE_LEN];
+ memcpy(buf, network->wzc_ie, network->wzc_ie_len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = network->wzc_ie_len;
+ start = iwe_stream_add_point_rsl(info, start, stop, &iwe, buf);
+ }
+
+ /* Add EXTRA: Age to display seconds since last beacon/probe response
+ * for given network. */
+ iwe.cmd = IWEVCUSTOM;
+ p = custom;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ " Last beacon: %lums ago",
+ (jiffies - network->last_scanned) / (HZ / 100));
+ iwe.u.data.length = p - custom;
+ if (iwe.u.data.length)
+ start = iwe_stream_add_point_rsl(info, start, stop,
+ &iwe, custom);
+
+ return start;
+}
+
+int rtllib_wx_get_scan(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct rtllib_network *network;
+ unsigned long flags;
+
+ char *ev = extra;
+ char *stop = ev + wrqu->data.length;
+ int i = 0;
+ int err = 0;
+ RTLLIB_DEBUG_WX("Getting scan\n");
+ down(&ieee->wx_sem);
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ list_for_each_entry(network, &ieee->network_list, list) {
+ i++;
+ if ((stop - ev) < 200) {
+ err = -E2BIG;
+ break;
+ }
+ if (ieee->scan_age == 0 ||
+ time_after(network->last_scanned + ieee->scan_age, jiffies))
+ ev = rtl819x_translate_scan(ieee, ev, stop, network,
+ info);
+ else
+ RTLLIB_DEBUG_SCAN("Not showing network '%s ("
+ " %pM)' due to age (%lums).\n",
+ escape_essid(network->ssid,
+ network->ssid_len),
+ network->bssid,
+ (jiffies - network->last_scanned) / (HZ / 100));
+ }
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ up(&ieee->wx_sem);
+ wrqu->data.length = ev - extra;
+ wrqu->data.flags = 0;
+
+ RTLLIB_DEBUG_WX("exit: %d networks returned.\n", i);
+
+ return err;
+}
+
+int rtllib_wx_set_encode(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ struct iw_point *erq = &(wrqu->encoding);
+ struct net_device *dev = ieee->dev;
+ struct rtllib_security sec = {
+ .flags = 0
+ };
+ int i, key, key_provided, len;
+ struct rtllib_crypt_data **crypt;
+
+ RTLLIB_DEBUG_WX("SET_ENCODE\n");
+
+ key = erq->flags & IW_ENCODE_INDEX;
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ key_provided = 1;
+ } else {
+ key_provided = 0;
+ key = ieee->tx_keyidx;
+ }
+
+ RTLLIB_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
+ "provided" : "default");
+ crypt = &ieee->crypt[key];
+ if (erq->flags & IW_ENCODE_DISABLED) {
+ if (key_provided && *crypt) {
+ RTLLIB_DEBUG_WX("Disabling encryption on key %d.\n",
+ key);
+ rtllib_crypt_delayed_deinit(ieee, crypt);
+ } else
+ RTLLIB_DEBUG_WX("Disabling encryption.\n");
+
+ /* Check all the keys to see if any are still configured,
+ * and if no key index was provided, de-init them all */
+ for (i = 0; i < WEP_KEYS; i++) {
+ if (ieee->crypt[i] != NULL) {
+ if (key_provided)
+ break;
+ rtllib_crypt_delayed_deinit(ieee,
+ &ieee->crypt[i]);
+ }
+ }
+
+ if (i == WEP_KEYS) {
+ sec.enabled = 0;
+ sec.level = SEC_LEVEL_0;
+ sec.flags |= SEC_ENABLED | SEC_LEVEL;
+ }
+
+ goto done;
+ }
+
+
+
+ sec.enabled = 1;
+ sec.flags |= SEC_ENABLED;
+
+ if (*crypt != NULL && (*crypt)->ops != NULL &&
+ strcmp((*crypt)->ops->name, "WEP") != 0) {
+ /* changing to use WEP; deinit previously used algorithm
+ * on this key */
+ rtllib_crypt_delayed_deinit(ieee, crypt);
+ }
+
+ if (*crypt == NULL) {
+ struct rtllib_crypt_data *new_crypt;
+
+ /* take WEP into use */
+ new_crypt = kmalloc(sizeof(struct rtllib_crypt_data),
+ GFP_KERNEL);
+ if (new_crypt == NULL)
+ return -ENOMEM;
+ memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
+ new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ if (!new_crypt->ops) {
+ request_module("rtllib_crypt_wep");
+ new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ }
+
+ if (new_crypt->ops)
+ new_crypt->priv = new_crypt->ops->init(key);
+
+ if (!new_crypt->ops || !new_crypt->priv) {
+ kfree(new_crypt);
+ new_crypt = NULL;
+
+ printk(KERN_WARNING "%s: could not initialize WEP: "
+ "load module rtllib_crypt_wep\n",
+ dev->name);
+ return -EOPNOTSUPP;
+ }
+ *crypt = new_crypt;
+ }
+
+ /* If a new key was provided, set it up */
+ if (erq->length > 0) {
+ len = erq->length <= 5 ? 5 : 13;
+ memcpy(sec.keys[key], keybuf, erq->length);
+ if (len > erq->length)
+ memset(sec.keys[key] + erq->length, 0,
+ len - erq->length);
+ RTLLIB_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
+ key, escape_essid(sec.keys[key], len),
+ erq->length, len);
+ sec.key_sizes[key] = len;
+ (*crypt)->ops->set_key(sec.keys[key], len, NULL,
+ (*crypt)->priv);
+ sec.flags |= (1 << key);
+ /* This ensures a key will be activated if no key is
+ * explicitely set */
+ if (key == sec.active_key)
+ sec.flags |= SEC_ACTIVE_KEY;
+ ieee->tx_keyidx = key;
+
+ } else {
+ len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
+ NULL, (*crypt)->priv);
+ if (len == 0) {
+ /* Set a default key of all 0 */
+ printk(KERN_INFO "Setting key %d to all zero.\n",
+ key);
+
+ RTLLIB_DEBUG_WX("Setting key %d to all zero.\n",
+ key);
+ memset(sec.keys[key], 0, 13);
+ (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
+ (*crypt)->priv);
+ sec.key_sizes[key] = 13;
+ sec.flags |= (1 << key);
+ }
+
+ /* No key data - just set the default TX key index */
+ if (key_provided) {
+ RTLLIB_DEBUG_WX(
+ "Setting key %d to default Tx key.\n", key);
+ ieee->tx_keyidx = key;
+ sec.active_key = key;
+ sec.flags |= SEC_ACTIVE_KEY;
+ }
+ }
+ done:
+ ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
+ ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN :
+ WLAN_AUTH_SHARED_KEY;
+ sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
+ sec.flags |= SEC_AUTH_MODE;
+ RTLLIB_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
+ "OPEN" : "SHARED KEY");
+
+ /* For now we just support WEP, so only set that security level...
+ * TODO: When WPA is added this is one place that needs to change */
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
+
+ if (ieee->set_security)
+ ieee->set_security(dev, &sec);
+
+ /* Do not reset port if card is in Managed mode since resetting will
+ * generate new IEEE 802.11 authentication which may end up in looping
+ * with IEEE 802.1X. If your hardware requires a reset after WEP
+ * configuration (for example... Prism2), implement the reset_port in
+ * the callbacks structures used to initialize the 802.11 stack. */
+ if (ieee->reset_on_keychange &&
+ ieee->iw_mode != IW_MODE_INFRA &&
+ ieee->reset_port && ieee->reset_port(dev)) {
+ printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int rtllib_wx_get_encode(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ struct iw_point *erq = &(wrqu->encoding);
+ int len, key;
+ struct rtllib_crypt_data *crypt;
+
+ RTLLIB_DEBUG_WX("GET_ENCODE\n");
+
+ if (ieee->iw_mode == IW_MODE_MONITOR)
+ return -1;
+
+ key = erq->flags & IW_ENCODE_INDEX;
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ } else {
+ key = ieee->tx_keyidx;
+ }
+ crypt = ieee->crypt[key];
+
+ erq->flags = key + 1;
+
+ if (crypt == NULL || crypt->ops == NULL) {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ return 0;
+ }
+ len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
+ erq->length = (len >= 0 ? len : 0);
+
+ erq->flags |= IW_ENCODE_ENABLED;
+
+ if (ieee->open_wep)
+ erq->flags |= IW_ENCODE_OPEN;
+ else
+ erq->flags |= IW_ENCODE_RESTRICTED;
+
+ return 0;
+}
+
+int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct net_device *dev = ieee->dev;
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ int i, idx;
+ int group_key = 0;
+ const char *alg, *module;
+ struct rtllib_crypto_ops *ops;
+ struct rtllib_crypt_data **crypt;
+
+ struct rtllib_security sec = {
+ .flags = 0,
+ };
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else{
+ idx = ieee->tx_keyidx;
+ }
+ if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ crypt = &ieee->crypt[idx];
+ group_key = 1;
+ } else {
+ /* some Cisco APs use idx>0 for unicast in dynamic WEP */
+ if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
+ return -EINVAL;
+ if (ieee->iw_mode == IW_MODE_INFRA)
+ crypt = &ieee->crypt[idx];
+ else
+ return -EINVAL;
+ }
+
+ sec.flags |= SEC_ENABLED;
+ if ((encoding->flags & IW_ENCODE_DISABLED) ||
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ if (*crypt)
+ rtllib_crypt_delayed_deinit(ieee, crypt);
+
+ for (i = 0; i < WEP_KEYS; i++) {
+ if (ieee->crypt[i] != NULL)
+ break;
+ }
+ if (i == WEP_KEYS) {
+ sec.enabled = 0;
+ sec.level = SEC_LEVEL_0;
+ sec.flags |= SEC_LEVEL;
+ }
+ goto done;
+ }
+
+ sec.enabled = 1;
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_WEP:
+ alg = "WEP";
+ module = "rtllib_crypt_wep";
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg = "TKIP";
+ module = "rtllib_crypt_tkip";
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg = "CCMP";
+ module = "rtllib_crypt_ccmp";
+ break;
+ default:
+ RTLLIB_DEBUG_WX("%s: unknown crypto alg %d\n",
+ dev->name, ext->alg);
+ ret = -EINVAL;
+ goto done;
+ }
+ printk(KERN_INFO "alg name:%s\n", alg);
+
+ ops = rtllib_get_crypto_ops(alg);
+ if (ops == NULL) {
+ char tempbuf[100];
+
+ memset(tempbuf, 0x00, 100);
+ sprintf(tempbuf, "%s", module);
+ request_module("%s", tempbuf);
+ ops = rtllib_get_crypto_ops(alg);
+ }
+ if (ops == NULL) {
+ RTLLIB_DEBUG_WX("%s: unknown crypto alg %d\n",
+ dev->name, ext->alg);
+ printk(KERN_INFO "========>unknown crypto alg %d\n", ext->alg);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (*crypt == NULL || (*crypt)->ops != ops) {
+ struct rtllib_crypt_data *new_crypt;
+
+ rtllib_crypt_delayed_deinit(ieee, crypt);
+
+ new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
+ if (new_crypt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ new_crypt->ops = ops;
+ if (new_crypt->ops)
+ new_crypt->priv = new_crypt->ops->init(idx);
+
+ if (new_crypt->priv == NULL) {
+ kfree(new_crypt);
+ ret = -EINVAL;
+ goto done;
+ }
+ *crypt = new_crypt;
+
+ }
+
+ if (ext->key_len > 0 && (*crypt)->ops->set_key &&
+ (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
+ (*crypt)->priv) < 0) {
+ RTLLIB_DEBUG_WX("%s: key setting failed\n", dev->name);
+ printk(KERN_INFO "key setting failed\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ ieee->tx_keyidx = idx;
+ sec.active_key = idx;
+ sec.flags |= SEC_ACTIVE_KEY;
+ }
+ if (ext->alg != IW_ENCODE_ALG_NONE) {
+ sec.key_sizes[idx] = ext->key_len;
+ sec.flags |= (1 << idx);
+ if (ext->alg == IW_ENCODE_ALG_WEP) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_1;
+ } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_2;
+ } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_3;
+ }
+ /* Don't set sec level for group keys. */
+ if (group_key)
+ sec.flags &= ~SEC_LEVEL;
+ }
+done:
+ if (ieee->set_security)
+ ieee->set_security(ieee->dev, &sec);
+
+ if (ieee->reset_on_keychange &&
+ ieee->iw_mode != IW_MODE_INFRA &&
+ ieee->reset_port && ieee->reset_port(dev)) {
+ RTLLIB_DEBUG_WX("%s: reset_port failed\n", dev->name);
+ return -EINVAL;
+ }
+ return ret;
+}
+
+int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ struct rtllib_crypt_data *crypt;
+ int idx, max_key_len;
+
+ max_key_len = encoding->length - sizeof(*ext);
+ if (max_key_len < 0)
+ return -EINVAL;
+
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else {
+ idx = ieee->tx_keyidx;
+ }
+ if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
+ (ext->alg != IW_ENCODE_ALG_WEP))
+ if (idx != 0 || (ieee->iw_mode != IW_MODE_INFRA))
+ return -EINVAL;
+
+ crypt = ieee->crypt[idx];
+
+ encoding->flags = idx + 1;
+ memset(ext, 0, sizeof(*ext));
+
+ if (crypt == NULL || crypt->ops == NULL) {
+ ext->alg = IW_ENCODE_ALG_NONE;
+ ext->key_len = 0;
+ encoding->flags |= IW_ENCODE_DISABLED;
+ } else {
+ if (strcmp(crypt->ops->name, "WEP") == 0)
+ ext->alg = IW_ENCODE_ALG_WEP;
+ else if (strcmp(crypt->ops->name, "TKIP"))
+ ext->alg = IW_ENCODE_ALG_TKIP;
+ else if (strcmp(crypt->ops->name, "CCMP"))
+ ext->alg = IW_ENCODE_ALG_CCMP;
+ else
+ return -EINVAL;
+ ext->key_len = crypt->ops->get_key(ext->key, SCM_KEY_LEN,
+ NULL, crypt->priv);
+ encoding->flags |= IW_ENCODE_ENABLED;
+ if (ext->key_len &&
+ (ext->alg == IW_ENCODE_ALG_TKIP ||
+ ext->alg == IW_ENCODE_ALG_CCMP))
+ ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
+
+ }
+
+ return 0;
+}
+
+int rtllib_wx_set_mlme(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 i = 0;
+ bool deauth = false;
+ struct iw_mlme *mlme = (struct iw_mlme *) extra;
+
+ if (ieee->state != RTLLIB_LINKED)
+ return -ENOLINK;
+
+ down(&ieee->wx_sem);
+
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ deauth = true;
+ /* leave break out intentionly */
+
+ case IW_MLME_DISASSOC:
+ if (deauth == true)
+ printk(KERN_INFO "disauth packet !\n");
+ else
+ printk(KERN_INFO "dis associate packet!\n");
+
+ ieee->cannot_notify = true;
+
+ SendDisassociation(ieee, deauth, mlme->reason_code);
+ rtllib_disassociate(ieee);
+
+ ieee->wap_set = 0;
+ for (i = 0; i < 6; i++)
+ ieee->current_network.bssid[i] = 0x55;
+
+ ieee->ssid_set = 0;
+ ieee->current_network.ssid[0] = '\0';
+ ieee->current_network.ssid_len = 0;
+ break;
+ default:
+ up(&ieee->wx_sem);
+ return -EOPNOTSUPP;
+ }
+
+ up(&ieee->wx_sem);
+
+ return 0;
+}
+
+int rtllib_wx_set_auth(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ switch (data->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+ case IW_AUTH_KEY_MGMT:
+ /*
+ * Host AP driver does not use these parameters and allows
+ * wpa_supplicant to control them internally.
+ */
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ ieee->tkip_countermeasures = data->value;
+ break;
+ case IW_AUTH_DROP_UNENCRYPTED:
+ ieee->drop_unencrypted = data->value;
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+ if (data->value & IW_AUTH_ALG_SHARED_KEY) {
+ ieee->open_wep = 0;
+ ieee->auth_mode = 1;
+ } else if (data->value & IW_AUTH_ALG_OPEN_SYSTEM) {
+ ieee->open_wep = 1;
+ ieee->auth_mode = 0;
+ } else if (data->value & IW_AUTH_ALG_LEAP) {
+ ieee->open_wep = 1;
+ ieee->auth_mode = 2;
+ } else
+ return -EINVAL;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ ieee->wpa_enabled = (data->value) ? 1 : 0;
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ ieee->ieee802_1x = data->value;
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ ieee->privacy_invoked = data->value;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
+{
+ u8 *buf;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (len > MAX_WPA_IE_LEN || (len && ie == NULL))
+ return -EINVAL;
+
+ if (len) {
+ eid = ie[0];
+ if ((eid == MFIE_TYPE_GENERIC) && (!memcmp(&ie[2],
+ wps_oui, 4))) {
+
+ ieee->wps_ie_len = (len < MAX_WZC_IE_LEN) ? (len) :
+ (MAX_WZC_IE_LEN);
+ buf = kmalloc(ieee->wps_ie_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ memcpy(buf, ie, ieee->wps_ie_len);
+ ieee->wps_ie = buf;
+ return 0;
+ }
+ }
+ ieee->wps_ie_len = 0;
+ kfree(ieee->wps_ie);
+ ieee->wps_ie = NULL;
+ if (len) {
+ if (len != ie[1]+2)
+ return -EINVAL;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ memcpy(buf, ie, len);
+ kfree(ieee->wpa_ie);
+ ieee->wpa_ie = buf;
+ ieee->wpa_ie_len = len;
+ } else {
+ kfree(ieee->wpa_ie);
+ ieee->wpa_ie = NULL;
+ ieee->wpa_ie_len = 0;
+ }
+ return 0;
+}
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
index 0775c5599d6..51effd6412a 100644
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ b/drivers/staging/rtl8192u/ieee80211/Makefile
@@ -1,25 +1,9 @@
NIC_SELECT = RTL8192U
-KVER := $(shell uname -r)
-MODDESTDIR := /lib/modules/$(KVER)/kernel/drivers/net/wireless/$(NIC_SELECT)
-
-CC = gcc
-ifneq ($(shell uname -r|cut -d. -f1,2), 2.4)
ccflags-y := -I$(TOPDIR)/drivers/net/wireless
ccflags-y += -O2
ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
-#it will fail to compile in suse linux enterprise 10 sp2. This flag is to solve this problem.
-ifeq ($(shell uname -r | cut -d. -f1,2,3,4), 2.6.16.60-0)
-ccflags-y := -DOPENSUSE_SLED
-endif
-
-ifeq ($(NIC_SELECT),RTL8192U)
-#ccflags-y := -DUSB_TX_DRIVER_AGGREGATION_ENABLE
-#ccflags-y := -DUSB_RX_AGGREGATION_SUPPORT
-endif
-#ccflags-y := -DJOHN_NOCPY
-#flags to enable or disble 80211D feature
ieee80211-rsl-objs := ieee80211_rx.o \
ieee80211_softmac.o \
ieee80211_tx.o \
@@ -42,96 +26,3 @@ obj-m +=ieee80211_crypt_wep-rsl.o
obj-m +=ieee80211_crypt_tkip-rsl.o
obj-m +=ieee80211_crypt_ccmp-rsl.o
-KSRC := /lib/modules/$(KVER)/build
-INSTALL_PREFIX :=
-
-all: modules
-
-modules:
- $(MAKE) -C $(KSRC) M=$(PWD) CC=$(CC) modules
-
-install: modules
- rm -fr $(MODDESTDIR)
- mkdir -p $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt-rsl.ko $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt_wep-rsl.ko $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt_tkip-rsl.ko $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt_ccmp-rsl.ko $(MODDESTDIR)
- @install -p -m 644 ieee80211-rsl.ko $(MODDESTDIR)
- depmod -a
-uninstall:
- rm -fr $(MODDESTDIR)
- depmod -a
-
-else
-LD := ld
-KSRC := /lib/modules/$(KVER)/build
-CONFIG_FILE := $(KSRC)/include/linux/autoconf.h
-
-CFLAGS += -DLINUX -D__KERNEL__ -DMODULE -O2 -pipe -Wall
-CFLAGS += -I$(KSRC)/include -I.
-#Kernel 2.4.31
-CFLAGS += -DMODVERSIONS -DEXPORT_SYMTAB -include $(KSRC)/include/linux/modversions.h
-#Kernel 2.4.20
-#CFLAGS += -D__NO_VERSION__ -DEXPORT_SYMTAB
-SMP := $(shell $(CC) $(MODCFLAGS) -E -dM $(CONFIG_FILE) | \
- grep CONFIG_SMP | awk '{print $$3}')
-ifneq ($(SMP),1)
- SMP := 0
-endif
-ifeq ($(SMP),1)
- CFLAGS += -D__SMP__
-endif
-
-#CFLAGS += -DJOHN_NOCPY
-
-OBJS := ${patsubst %.c, %.o, ${wildcard *.c}}
-all:${OBJS} ieee80211_crypt-rsl.o michael_mic-rsl.o aes-rsl.o ieee80211_crypt_wep-rsl.o ieee80211_crypt_tkip-rsl.o ieee80211_crypt_ccmp-rsl.o crypto-rsl.o ieee80211-rsl.o
-
-ieee80211_crypt-rsl.o: ieee80211_crypt.o
- mv $^ $@
-
-michael_mic-rsl.o: michael_mic.o
- mv $^ $@
-
-aes-rsl.o: aes.o
- mv $^ $@
-
-ieee80211_crypt_wep-rsl.o: ieee80211_crypt_wep.o
- mv $^ $@
-
-ieee80211_crypt_tkip-rsl.o: ieee80211_crypt_tkip.o
- mv $^ $@
-
-ieee80211_crypt_ccmp-rsl.o: ieee80211_crypt_ccmp.o
- mv $^ $@
-
-crypto-rsl.o: arc4.o api.o autoload.o cipher.o compress.o digest.o scatterwalk.o proc.o
- $(LD) -r $^ -o $@
-
-ieee80211-rsl.o: ieee80211_rx.o ieee80211_tx.o ieee80211_wx.o ieee80211_module.o ieee80211_softmac_wx.o ieee80211_softmac.o rtl819x_HTProc.o rtl819x_TSProc.o rtl819x_BAProc.o dot11d.o
- $(LD) -r $^ -o $@
-install:
- rm -fr $(MODDESTDIR)
- mkdir -p $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt-rsl.o $(MODDESTDIR)
- @install -p -m 644 crypto-rsl.o $(MODDESTDIR)
- @install -p -m 644 michael_mic-rsl.o $(MODDESTDIR)
- @install -p -m 644 aes-rsl.o $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt_wep-rsl.o $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt_tkip-rsl.o $(MODDESTDIR)
- @install -p -m 644 ieee80211_crypt_ccmp-rsl.o $(MODDESTDIR)
- @install -p -m 644 ieee80211-rsl.o $(MODDESTDIR)
- /sbin/depmod -a ${shell uname -r}
-
-uninstall:
- rm -fr $(MODDESTDIR)
- /sbin/depmod -a ${shell uname -r}
-
-endif
-
-.PHONY: clean
-clean:
- rm -fr *.mod.c *.mod *.o .*.cmd *.mod.* *.ko *.o *~
- rm -rf .tmp_versions
- rm -rf Module.symvers
diff --git a/drivers/staging/rtl8192u/ieee80211/compress.c b/drivers/staging/rtl8192u/ieee80211/compress.c
index 86c23c9223f..5416ab63a73 100644
--- a/drivers/staging/rtl8192u/ieee80211/compress.c
+++ b/drivers/staging/rtl8192u/ieee80211/compress.c
@@ -12,10 +12,10 @@
*
*/
#include <linux/types.h>
-//#include <linux/crypto.h>
+/*#include <linux/crypto.h>*/
#include "rtl_crypto.h"
#include <linux/errno.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
#include "internal.h"
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 463cc261890..1c0a1db6420 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2416,8 +2416,8 @@ extern int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
extern int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
/* ieee80211_softmac.c */
-extern short ieee80211_is_54g(struct ieee80211_network net);
-extern short ieee80211_is_shortslot(struct ieee80211_network net);
+extern short ieee80211_is_54g(const struct ieee80211_network *net);
+extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats, u16 type,
u16 stype);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index b00eb0e65f3..c2ab5fa1546 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -31,14 +31,14 @@ u8 rsn_authen_cipher_suite[16][4] = {
{0x00,0x0F,0xAC,0x05}, //WEP-104
};
-short ieee80211_is_54g(struct ieee80211_network net)
+short ieee80211_is_54g(const struct ieee80211_network *net)
{
- return ((net.rates_ex_len > 0) || (net.rates_len > 4));
+ return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
-short ieee80211_is_shortslot(struct ieee80211_network net)
+short ieee80211_is_shortslot(const struct ieee80211_network *net)
{
- return (net.capability & WLAN_CAPABILITY_SHORT_SLOT);
+ return net->capability & WLAN_CAPABILITY_SHORT_SLOT;
}
/* returns the total length needed for pleacing the RATE MFIE
@@ -718,7 +718,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
else
atim_len = 0;
- if(ieee80211_is_54g(ieee->current_network))
+ if(ieee80211_is_54g(&ieee->current_network))
erp_len = 3;
else
erp_len = 0;
@@ -1333,7 +1333,7 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
- if(ieee80211_is_54g(ieee->current_network) &&
+ if(ieee80211_is_54g(&ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)){
ieee->rate = 108;
@@ -1489,7 +1489,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
ieee->state = IEEE80211_ASSOCIATING;
queue_work(ieee->wq, &ieee->associate_procedure_wq);
}else{
- if(ieee80211_is_54g(ieee->current_network) &&
+ if(ieee80211_is_54g(&ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)){
ieee->rate = 108;
ieee->SetWirelessMode(ieee->dev, IEEE_G);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index ee86fe8509e..c09be0a6646 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -5739,7 +5739,7 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_get_stats = rtl8192_stats,
.ndo_tx_timeout = tx_timeout,
.ndo_do_ioctl = rtl8192_ioctl,
- .ndo_set_multicast_list = r8192_set_multicast,
+ .ndo_set_rx_mode = r8192_set_multicast,
.ndo_set_mac_address = r8192_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index 041e1e81f31..ea37473f71e 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -16,4 +16,11 @@ config R8712_AP
---help---
This option allows the Realtek RTL8712 USB device to be an Access Point.
+config R8712_TX_AGGR
+ bool "Realtek RTL8712U Transmit Aggregation code"
+ depends on R8712U && BROKEN
+ default N
+ ---help---
+ This option provides transmit aggregation for the Realtek RTL8712 USB device.
+
diff --git a/drivers/staging/rtl8712/basic_types.h b/drivers/staging/rtl8712/basic_types.h
index a0538a8a670..7561bed5dd4 100644
--- a/drivers/staging/rtl8712/basic_types.h
+++ b/drivers/staging/rtl8712/basic_types.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8712/big_endian.h b/drivers/staging/rtl8712/big_endian.h
index 8512d1b5919..b16f8ecf99c 100644
--- a/drivers/staging/rtl8712/big_endian.h
+++ b/drivers/staging/rtl8712/big_endian.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H
#define _LINUX_BYTEORDER_BIG_ENDIAN_H
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 4f380a64aa8..9b5d771e650 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
/*---------------------------------------------------------------------
For type defines and data structure defines
@@ -117,6 +142,11 @@ struct dvobj_priv {
struct usb_device *pusbdev;
};
+/**
+ * struct _adapter - the main adapter structure for this device.
+ *
+ * bup: True indicates that the interface is Up.
+ */
struct _adapter {
struct dvobj_priv dvobjpriv;
struct mlme_priv mlmepriv;
@@ -151,6 +181,9 @@ struct _adapter {
struct net_device_stats stats;
struct iw_statistics iwstats;
int pid; /*process id from UI*/
+ _workitem wkFilterRxFF0;
+ u8 blnEnableRxFF0Filter;
+ spinlock_t lockRxFF0Filter;
};
static inline u8 *myid(struct eeprom_priv *peepriv)
diff --git a/drivers/staging/rtl8712/ethernet.h b/drivers/staging/rtl8712/ethernet.h
index ba8d777d8e1..882d61b2e95 100644
--- a/drivers/staging/rtl8712/ethernet.h
+++ b/drivers/staging/rtl8712/ethernet.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __INC_ETHERNET_H
#define __INC_ETHERNET_H
diff --git a/drivers/staging/rtl8712/generic.h b/drivers/staging/rtl8712/generic.h
index 742424bdf16..8868c9f4adf 100644
--- a/drivers/staging/rtl8712/generic.h
+++ b/drivers/staging/rtl8712/generic.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _LINUX_BYTEORDER_GENERIC_H
#define _LINUX_BYTEORDER_GENERIC_H
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 1411c7bf0d4..d0029aa4cd3 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -28,6 +28,12 @@
#define _HAL_INIT_C_
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "rtl871x_byteorder.h"
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index f06addcf063..cc68d9748ed 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -170,17 +170,11 @@ static uint r8712_get_rateset_len(u8 *rateset)
return i;
}
-int r8712_generate_ie(struct registry_priv *pregistrypriv,
- struct _adapter *padapter)
+int r8712_generate_ie(struct registry_priv *pregistrypriv)
{
int sz = 0, rateLen;
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->IEs;
- struct ieee80211_ht_cap ht_capie;
- struct ieee80211_ht_addt_info ht_addt_info;
- unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct qos_priv *pqospriv = &pmlmepriv->qospriv;
/*timestamp will be inserted by hardware*/
sz += 8;
@@ -219,33 +213,6 @@ int r8712_generate_ie(struct registry_priv *pregistrypriv,
/*IBSS Parameter Set*/
ie = r8712_set_ie(ie, _IBSS_PARA_IE_, 2,
(u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
- if (pregistrypriv->ht_enable == 1) {
- if (pqospriv->qos_option == 0) {
- ie = r8712_set_ie(ie, _VENDOR_SPECIFIC_IE_,
- _WMM_IE_Length_, WMM_IE, &sz);
- pqospriv->qos_option = 1;
- }
- memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap));
- ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC |
- IEEE80211_HT_CAP_MAX_AMSDU |
- IEEE80211_HT_CAP_DSSSCCK40;
- ht_capie.ampdu_params_info = (IEEE80211_HT_CAP_AMPDU_FACTOR &
- 0x03) | (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
- ie = r8712_set_ie(ie, _HT_CAPABILITY_IE_,
- sizeof(struct ieee80211_ht_cap),
- (unsigned char *)&ht_capie, &sz);
- /*add HT info ie*/
- memset(&ht_addt_info, 0,
- sizeof(struct ieee80211_ht_addt_info));
- /*need to add the HT additional IEs*/
- ht_addt_info.control_chan = pregistrypriv->channel;
- ie = r8712_set_ie(ie, _HT_ADD_INFO_IE_,
- sizeof(struct ieee80211_ht_addt_info),
- (unsigned char *)&ht_addt_info, &sz);
- }
return sz;
}
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index d62c6ac0955..3c0092b7de0 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __IEEE80211_H
#define __IEEE80211_H
@@ -112,7 +137,7 @@ struct ieee_ibss_seq {
u8 mac[ETH_ALEN];
u16 seq_num;
u16 frag_num;
- unsigned int packet_time;
+ unsigned long packet_time;
struct list_head list;
};
@@ -644,7 +669,6 @@ struct ieee80211_txb {
#define CRC_LENGTH 4U
#define MAX_WPA_IE_LEN 128
-#define MAX_WPS_IE_LEN 512
#define NETWORK_EMPTY_ESSID (1<<0)
#define NETWORK_HAS_OFDM (1<<1)
@@ -764,8 +788,7 @@ int r8712_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
u8 *wpa_ie, u16 *wpa_len);
int r8712_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
-int r8712_generate_ie(struct registry_priv *pregistrypriv,
- struct _adapter *padapter);
+int r8712_generate_ie(struct registry_priv *pregistrypriv);
uint r8712_is_cckrates_included(u8 *rate);
uint r8712_is_cckratesonly_included(u8 *rate);
diff --git a/drivers/staging/rtl8712/if_ether.h b/drivers/staging/rtl8712/if_ether.h
index 0e9753b9ed3..2bbe527bcd5 100644
--- a/drivers/staging/rtl8712/if_ether.h
+++ b/drivers/staging/rtl8712/if_ether.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
diff --git a/drivers/staging/rtl8712/little_endian.h b/drivers/staging/rtl8712/little_endian.h
index 0248c143c6d..cd57d6c2850 100644
--- a/drivers/staging/rtl8712/little_endian.h
+++ b/drivers/staging/rtl8712/little_endian.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
diff --git a/drivers/staging/rtl8712/mlme_osdep.h b/drivers/staging/rtl8712/mlme_osdep.h
index 968e78765a3..a20fe81f921 100644
--- a/drivers/staging/rtl8712/mlme_osdep.h
+++ b/drivers/staging/rtl8712/mlme_osdep.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __MLME_OSDEP_H_
#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8712/mp_custom_oid.h b/drivers/staging/rtl8712/mp_custom_oid.h
index a9e0b3483e3..40510089b78 100644
--- a/drivers/staging/rtl8712/mp_custom_oid.h
+++ b/drivers/staging/rtl8712/mp_custom_oid.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __CUSTOM_OID_H
#define __CUSTOM_OID_H
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 3f38e8eca3f..9a75c6dbe50 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -177,7 +177,7 @@ static uint loadparam(struct _adapter *padapter, struct net_device *pnetdev)
static int r871x_net_set_mac_address(struct net_device *pnetdev, void *p)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(pnetdev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
struct sockaddr *addr = p;
if (padapter->bup == false)
@@ -187,7 +187,7 @@ static int r871x_net_set_mac_address(struct net_device *pnetdev, void *p)
static struct net_device_stats *r871x_net_get_stats(struct net_device *pnetdev)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(pnetdev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct recv_priv *precvpriv = &(padapter->recvpriv);
@@ -221,7 +221,7 @@ struct net_device *r8712_init_netdev(void)
strcpy(ifname, "wlan%d");
dev_alloc_name(pnetdev, ifname);
}
- padapter = (struct _adapter *) _netdev_priv(pnetdev);
+ padapter = (struct _adapter *) netdev_priv(pnetdev);
padapter->pnetdev = pnetdev;
printk(KERN_INFO "r8712u: register rtl8712_netdev_ops to"
" netdev_ops\n");
@@ -261,7 +261,7 @@ static void start_drv_timers(struct _adapter *padapter)
_set_timer(&padapter->mlmepriv.wdg_timer, 2000);
}
-static void stop_drv_timers(struct _adapter *padapter)
+void r8712_stop_drv_timers(struct _adapter *padapter)
{
_cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
_cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
@@ -286,6 +286,9 @@ static u8 init_default_value(struct _adapter *padapter)
pxmitpriv->vcs_type = pregistrypriv->vcs_type;
pxmitpriv->rts_thresh = pregistrypriv->rts_thresh;
pxmitpriv->frag_len = pregistrypriv->frag_thresh;
+ /* mlme_priv */
+ /* Maybe someday we should rename this variable to "active_mode"(Jeff)*/
+ pmlmepriv->passive_mode = 1; /* 1: active, 0: passive. */
/*ht_priv*/
{
int i;
@@ -348,7 +351,7 @@ u8 r8712_free_drv_sw(struct _adapter *padapter)
_r8712_free_recv_priv(&padapter->recvpriv);
mp871xdeinit(padapter);
if (pnetdev)
- os_free_netdev(pnetdev);
+ free_netdev(pnetdev);
return _SUCCESS;
}
@@ -375,9 +378,15 @@ static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
r8712_fw_cmd(padapter, intcmd);
}
+/**
+ *
+ * This function intends to handle the activation of an interface
+ * i.e. when it is brought Up/Active from a Down state.
+ *
+ */
static int netdev_open(struct net_device *pnetdev)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(pnetdev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
if (padapter->bup == false) {
padapter->bDriverStopped = false;
@@ -434,9 +443,15 @@ netdev_open_error:
return -1;
}
+/**
+ *
+ * This function intends to handle the shutdown of an interface
+ * i.e. when it is brought Down from an Up/Active state.
+ *
+ */
static int netdev_close(struct net_device *pnetdev)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(pnetdev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(pnetdev);
/* Close LED*/
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_POWER_OFF);
@@ -456,8 +471,8 @@ static int netdev_close(struct net_device *pnetdev)
r8712_free_assoc_resources(padapter);
/*s2-4.*/
r8712_free_network_queue(padapter);
- /*Stop driver mlme relation timer*/
- stop_drv_timers(padapter);
+ /* The interface is no longer Up: */
+ padapter->bup = false;
return 0;
}
diff --git a/drivers/staging/rtl8712/osdep_intf.h b/drivers/staging/rtl8712/osdep_intf.h
index 3bc20257b03..aa0ec74af51 100644
--- a/drivers/staging/rtl8712/osdep_intf.h
+++ b/drivers/staging/rtl8712/osdep_intf.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __OSDEP_INTF_H_
#define __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index c683d7609e6..1ee943a58c4 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -1,40 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __OSDEP_SERVICE_H_
#define __OSDEP_SERVICE_H_
#define _SUCCESS 1
#define _FAIL 0
-#include "basic_types.h"
+#include <linux/version.h>
#include <linux/spinlock.h>
+
+#include <linux/interrupt.h>
#include <linux/semaphore.h>
+#include <linux/sched.h>
#include <linux/sem.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/iw_handler.h>
-#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kref.h>
-#include <linux/skbuff.h>
-#include <linux/usb.h>
-#include <linux/usb/ch9.h>
-#include <linux/io.h>
-#include <linux/circ_buf.h>
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/atomic.h>
-#include <linux/wireless.h>
-#include <linux/rtnetlink.h>
-#include "ethernet.h"
-#include <linux/if_arp.h>
-#include <linux/firmware.h>
-#define _usb_alloc_urb(x, y) usb_alloc_urb(x, y)
-#define _usb_submit_urb(x, y) usb_submit_urb(x, y)
+#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
+
+#include "basic_types.h"
struct __queue {
struct list_head queue;
@@ -45,7 +52,6 @@ struct __queue {
#define _buffer unsigned char
#define thread_exit() complete_and_exit(NULL, 0)
#define _workitem struct work_struct
-#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
#define _init_queue(pqueue) \
do { \
@@ -53,16 +59,6 @@ struct __queue {
spin_lock_init(&((pqueue)->lock)); \
} while (0)
-static inline void *_netdev_priv(struct net_device *dev)
-{
- return netdev_priv(dev);
-}
-
-static inline void os_free_netdev(struct net_device *dev)
-{
- free_netdev(dev);
-}
-
static inline struct list_head *get_next(struct list_head *list)
{
return list->next;
@@ -203,7 +199,6 @@ static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
static inline void thread_enter(void *context)
{
- daemonize("%s", "RTKTHREAD");
allow_signal(SIGTERM);
}
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 1f0949ed7ee..0e26d5f6cf2 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -28,11 +28,15 @@
#define _RECV_OSDEP_C_
+#include <linux/usb.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "wifi.h"
#include "recv_osdep.h"
#include "osdep_intf.h"
+#include "ethernet.h"
+#include <linux/if_arp.h>
#include "usb_ops.h"
/*init os related resource in struct recv_priv*/
@@ -51,7 +55,7 @@ int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
int res = _SUCCESS;
precvbuf->irp_pending = false;
- precvbuf->purb = _usb_alloc_urb(0, GFP_KERNEL);
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
if (precvbuf->purb == NULL)
res = _FAIL;
precvbuf->pskb = NULL;
diff --git a/drivers/staging/rtl8712/recv_osdep.h b/drivers/staging/rtl8712/recv_osdep.h
index 60a54dd90ff..f4384ef0086 100644
--- a/drivers/staging/rtl8712/recv_osdep.h
+++ b/drivers/staging/rtl8712/recv_osdep.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RECV_OSDEP_H_
#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8712/rtl8712_bitdef.h b/drivers/staging/rtl8712/rtl8712_bitdef.h
index 356184fa060..bff57a8eef3 100644
--- a/drivers/staging/rtl8712/rtl8712_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_bitdef.h
@@ -1,3 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
#ifndef __RTL8712_BITDEF_H__
#define __RTL8712_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 26c605e8cd6..9f6ebc419b0 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -28,6 +28,24 @@
#define _RTL8712_CMD_C_
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/circ_buf.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/rtnetlink.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 13ef0626b28..766a6463266 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -1,9 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_CMD_H_
#define __RTL8712_CMD_H_
+#define CMD_HDR_SZ 8
+
u8 r8712_fw_cmd(struct _adapter *pAdapter, u32 cmd);
void r8712_fw_cmd_data(struct _adapter *pAdapter, u32 *value, u8 flag);
+struct cmd_hdr {
+ u32 cmd_dw0;
+ u32 cmd_dw1;
+};
enum rtl8712_h2c_cmd {
GEN_CMD_CODE(_Read_MACREG), /*0*/
@@ -72,6 +103,40 @@ enum rtl8712_h2c_cmd {
GEN_CMD_CODE(_SetPowerTracking),
GEN_CMD_CODE(_AMSDU_TO_AMPDU), /*57*/
GEN_CMD_CODE(_SetMacAddress), /*58*/
+
+ GEN_CMD_CODE(_DisconnectCtrl), /*59*/
+ GEN_CMD_CODE(_SetChannelPlan), /*60*/
+ GEN_CMD_CODE(_DisconnectCtrlEx), /*61*/
+
+ /* To do, modify these h2c cmd, add or delete */
+ GEN_CMD_CODE(_GetH2cLbk) ,
+
+ /* WPS extra IE */
+ GEN_CMD_CODE(_SetProbeReqExtraIE) ,
+ GEN_CMD_CODE(_SetAssocReqExtraIE) ,
+ GEN_CMD_CODE(_SetProbeRspExtraIE) ,
+ GEN_CMD_CODE(_SetAssocRspExtraIE) ,
+
+ /* the following is driver will do */
+ GEN_CMD_CODE(_GetCurDataRate) ,
+
+ GEN_CMD_CODE(_GetTxRetrycnt), /* to record times that Tx retry to
+ * transmmit packet after association
+ */
+ GEN_CMD_CODE(_GetRxRetrycnt), /* to record total number of the
+ * received frame with ReTry bit set in
+ * the WLAN header
+ */
+
+ GEN_CMD_CODE(_GetBCNOKcnt),
+ GEN_CMD_CODE(_GetBCNERRcnt),
+ GEN_CMD_CODE(_GetCurTxPwrLevel),
+
+ GEN_CMD_CODE(_SetDIG),
+ GEN_CMD_CODE(_SetRA),
+ GEN_CMD_CODE(_SetPT),
+ GEN_CMD_CODE(_ReadTSSI),
+
MAX_H2CCMD
};
@@ -151,6 +216,28 @@ static struct _cmd_callback cmd_callback[] = {
{GEN_CMD_CODE(_SetPowerTracking), NULL},
{GEN_CMD_CODE(_AMSDU_TO_AMPDU), NULL}, /*57*/
{GEN_CMD_CODE(_SetMacAddress), NULL}, /*58*/
+
+ {GEN_CMD_CODE(_DisconnectCtrl), NULL}, /*59*/
+ {GEN_CMD_CODE(_SetChannelPlan), NULL}, /*60*/
+ {GEN_CMD_CODE(_DisconnectCtrlEx), NULL}, /*61*/
+
+ /* To do, modify these h2c cmd, add or delete */
+ {GEN_CMD_CODE(_GetH2cLbk), NULL},
+
+ {_SetProbeReqExtraIE_CMD_, NULL},
+ {_SetAssocReqExtraIE_CMD_, NULL},
+ {_SetProbeRspExtraIE_CMD_, NULL},
+ {_SetAssocRspExtraIE_CMD_, NULL},
+ {_GetCurDataRate_CMD_, NULL},
+ {_GetTxRetrycnt_CMD_, NULL},
+ {_GetRxRetrycnt_CMD_, NULL},
+ {_GetBCNOKcnt_CMD_, NULL},
+ {_GetBCNERRcnt_CMD_, NULL},
+ {_GetCurTxPwrLevel_CMD_, NULL},
+ {_SetDIG_CMD_, NULL},
+ {_SetRA_CMD_, NULL},
+ {_SetPT_CMD_, NULL},
+ {GEN_CMD_CODE(_ReadTSSI), &r8712_readtssi_cmdrsp_callback}
};
#endif
diff --git a/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
index 6c49903a42f..8dffe101bec 100644
--- a/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_CMDCTRL_BITDEF_H__
#define __RTL8712_CMDCTRL_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h b/drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h
index 5d9a3f20ea6..9374f1c4885 100644
--- a/drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_CMDCTRL_REGDEF_H__
#define __RTL8712_CMDCTRL_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h
index 7b34d4025e3..8bd483795ca 100644
--- a/drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_DEBUGCTRL_BITDEF_H__
#define __RTL8712_DEBUGCTRL_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h b/drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h
index 27cad716868..43630bb068f 100644
--- a/drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_DEBUGCTRL_REGDEF_H__
#define __RTL8712_DEBUGCTRL_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h b/drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h
index c69b1b72c28..32dab81f176 100644
--- a/drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_EDCASETTING_BITDEF_H__
#define __RTL8712_EDCASETTING_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h b/drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h
index de3d3e23ba9..d992cb8b1c7 100644
--- a/drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_EDCASETTING_REGDEF_H__
#define __RTL8712_EDCASETTING_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 1dc12b7a2f5..b08e9a25c9c 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -307,21 +307,25 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr)
continue;
}
for (i = 0; i < PGPKG_MAX_WORDS; i++) {
- if (BIT(i) & word_en)
- continue;
- if (!(BIT(i) & pkt.word_en)) {
- if (efuse_one_byte_read(padapter, addr,
- &value) == true)
- pkt.data[i*2] = value;
- else
- return false;
- if (efuse_one_byte_read(padapter, addr + 1,
- &value) == true)
- pkt.data[i*2 + 1] = value;
- else
- return false;
+ if (BIT(i) & word_en) {
+ if (BIT(i) & pkt.word_en) {
+ if (efuse_one_byte_read(
+ padapter, addr,
+ &value) == true)
+ pkt.data[i*2] = value;
+ else
+ return false;
+ if (efuse_one_byte_read(
+ padapter,
+ addr + 1,
+ &value) == true)
+ pkt.data[i*2 + 1] =
+ value;
+ else
+ return false;
+ }
+ addr += 2;
}
- addr += 2;
}
}
if (addr != header_addr)
@@ -329,26 +333,29 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr)
addr++;
/* fill original data */
for (i = 0; i < PGPKG_MAX_WORDS; i++) {
- if (BIT(i) & pkt.word_en)
- continue;
- efuse_one_byte_write(padapter, addr, pkt.data[i*2]);
- efuse_one_byte_write(padapter, addr+1, pkt.data[i*2 + 1]);
- /* additional check */
- if (efuse_one_byte_read(padapter, addr, &value) == false)
- ret = false;
- else if (pkt.data[i*2] != value) {
- ret = false;
- if (0xFF == value) /* write again */
- efuse_one_byte_write(padapter, addr,
- pkt.data[i * 2]);
- }
- if (efuse_one_byte_read(padapter, addr+1, &value) == false)
- ret = false;
- else if (pkt.data[i*2 + 1] != value) {
- ret = false;
- if (0xFF == value) /* write again */
- efuse_one_byte_write(padapter, addr+1,
- pkt.data[i*2 + 1]);
+ if (BIT(i) & pkt.word_en) {
+ efuse_one_byte_write(padapter, addr, pkt.data[i*2]);
+ efuse_one_byte_write(padapter, addr+1,
+ pkt.data[i*2 + 1]);
+ /* additional check */
+ if (efuse_one_byte_read(padapter, addr, &value)
+ == false)
+ ret = false;
+ else if (pkt.data[i*2] != value) {
+ ret = false;
+ if (0xFF == value) /* write again */
+ efuse_one_byte_write(padapter, addr,
+ pkt.data[i * 2]);
+ }
+ if (efuse_one_byte_read(padapter, addr+1, &value) ==
+ false)
+ ret = false;
+ else if (pkt.data[i*2 + 1] != value) {
+ ret = false;
+ if (0xFF == value) /* write again */
+ efuse_one_byte_write(padapter, addr+1,
+ pkt.data[i*2 + 1]);
+ }
}
addr += 2;
}
diff --git a/drivers/staging/rtl8712/rtl8712_event.h b/drivers/staging/rtl8712/rtl8712_event.h
index 27316934b1a..3d7f79efa2c 100644
--- a/drivers/staging/rtl8712/rtl8712_event.h
+++ b/drivers/staging/rtl8712/rtl8712_event.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL8712_EVENT_H_
#define _RTL8712_EVENT_H_
diff --git a/drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h
index e5df19f4b02..c564dc862d9 100644
--- a/drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_FIFOCTRL_BITDEF_H__
#define __RTL8712_FIFOCTRL_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h b/drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h
index c2e3af2c79f..29b89c45c70 100644
--- a/drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_FIFOCTRL_REGDEF_H__
#define __RTL8712_FIFOCTRL_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_gp_bitdef.h b/drivers/staging/rtl8712/rtl8712_gp_bitdef.h
index 35ca809e179..884a8212176 100644
--- a/drivers/staging/rtl8712/rtl8712_gp_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_gp_bitdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_GP_BITDEF_H__
#define __RTL8712_GP_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_gp_regdef.h b/drivers/staging/rtl8712/rtl8712_gp_regdef.h
index 17e72bda617..8fc68f6a2c7 100644
--- a/drivers/staging/rtl8712/rtl8712_gp_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_gp_regdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_GP_REGDEF_H__
#define __RTL8712_GP_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index c696dd8a2ea..665e7183817 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_HAL_H__
#define __RTL8712_HAL_H__
diff --git a/drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h b/drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h
index 02f24809814..49598c314f0 100644
--- a/drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_INTERRUPT_BITDEF_H__
#define __RTL8712_INTERRUPT_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index cb1751e95f0..bac56e5caf1 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -1786,7 +1786,7 @@ void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction)
{
struct led_priv *ledpriv = &(padapter->ledpriv);
- if (ledpriv == NULL || ledpriv->bRegUseLed == false)
+ if (ledpriv->bRegUseLed == false)
return;
switch (ledpriv->LedStrategy) {
case SW_LED_MODE0:
diff --git a/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h b/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
index 74800cd2340..28e0a7ebcad 100644
--- a/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_MACSETTING_BITDEF_H__
#define __RTL8712_MACSETTING_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h b/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
index 00b003bd690..ced0da9332d 100644
--- a/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_MACSETTING_REGDEF_H__
#define __RTL8712_MACSETTING_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_powersave_bitdef.h b/drivers/staging/rtl8712/rtl8712_powersave_bitdef.h
index 0922a8dc132..8fc68941651 100644
--- a/drivers/staging/rtl8712/rtl8712_powersave_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_powersave_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_POWERSAVE_BITDEF_H__
#define __RTL8712_POWERSAVE_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_powersave_regdef.h b/drivers/staging/rtl8712/rtl8712_powersave_regdef.h
index 72927df3db2..4632ddd5d1f 100644
--- a/drivers/staging/rtl8712/rtl8712_powersave_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_powersave_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_POWERSAVE_REGDEF_H__
#define __RTL8712_POWERSAVE_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h
index 87048b3fe04..6d3d6e8522f 100644
--- a/drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_RATECTRL_BITDEF_H__
#define __RTL8712_RATECTRL_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h b/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
index 31c8363e5bc..73dfc361015 100644
--- a/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_RATECTRL_REGDEF_H__
#define __RTL8712_RATECTRL_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 625a8a03966..6d692657e78 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -192,7 +192,7 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
} else
pattrib->tcpchk_valid = 0; /* invalid */
pattrib->mcs_rate = (u8)((le32_to_cpu(prxstat->rxdw3)) & 0x3f);
- pattrib->htc = (u8)((le32_to_cpu(prxstat->rxdw3) >> 6) & 0x1);
+ pattrib->htc = (u8)((le32_to_cpu(prxstat->rxdw3) >> 14) & 0x1);
/*Offset 16*/
/*Offset 20*/
/*phy_info*/
@@ -207,7 +207,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
- u8 wlanhdr_offset;
+ u8 *data, wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
@@ -224,22 +224,25 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
/*the first fragment number must be 0
*free the whole queue*/
r8712_free_recvframe(prframe, pfree_recv_queue);
- prframe = NULL;
- goto exit;
+ r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
+ return NULL;
}
- plist = get_next(phead);
+ curfragnum++;
+ plist = get_list_head(defrag_q);
+ plist = get_next(plist);
+ data = get_recvframe_data(prframe);
while (end_of_queue_search(phead, plist) == false) {
pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
- /*check the fragment sequence (2nd ~n fragment frame) */
pnfhdr = &pnextrframe->u.hdr;
- curfragnum++;
+ /*check the fragment sequence (2nd ~n fragment frame) */
if (curfragnum != pnfhdr->attrib.frag_num) {
/* the fragment number must increase (after decache)
* release the defrag_q & prframe */
r8712_free_recvframe(prframe, pfree_recv_queue);
- prframe = NULL;
- goto exit;
+ r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
+ return NULL;
}
+ curfragnum++;
/* copy the 2nd~n fragment frame's payload to the first fragment
* get the 2nd~last fragment frame's payload */
wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
@@ -252,7 +255,6 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = get_next(plist);
}
-exit:
/* free the defrag_q queue and return the prframe */
r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
return prframe;
@@ -1074,7 +1076,7 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
/* for first fragment packet, driver need allocate 1536 +
* drvinfo_sz + RXDESC_SIZE to defrag packet. */
if ((mf == 1) && (frag == 0))
- alloc_sz = 1658;
+ alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
else
alloc_sz = tmp_len;
/* 2 is for IP header 4 bytes alignment in QoS packet case.
diff --git a/drivers/staging/rtl8712/rtl8712_recv.h b/drivers/staging/rtl8712/rtl8712_recv.h
index c48757f97da..8efbd1fa035 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.h
+++ b/drivers/staging/rtl8712/rtl8712_recv.h
@@ -1,10 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL8712_RECV_H_
#define _RTL8712_RECV_H_
#include "osdep_service.h"
#include "drv_types.h"
+/* Realtek's v2.6.6 reduced this to 4. However, under heavy network and CPU
+ * loads, even 8 receive buffers might not be enough; cutting it to 4 seemed
+ * unwise.
+ */
#define NR_RECVBUFF (8)
+
#define NR_PREALLOC_RECV_SKB (8)
#define RXDESC_SIZE 24
#define RXDESC_OFFSET RXDESC_SIZE
@@ -115,7 +145,6 @@ union recv_frame {
union {
struct list_head list;
struct recv_frame_hdr hdr;
- addr_t mem[RECVFRAME_HDR_ALIGN>>2];
} u;
};
diff --git a/drivers/staging/rtl8712/rtl8712_regdef.h b/drivers/staging/rtl8712/rtl8712_regdef.h
index 5b0de2ab6b6..e7bca55b59d 100644
--- a/drivers/staging/rtl8712/rtl8712_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_regdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_REGDEF_H__
#define __RTL8712_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_security_bitdef.h b/drivers/staging/rtl8712/rtl8712_security_bitdef.h
index 8df4bf4a069..05dafa0c333 100644
--- a/drivers/staging/rtl8712/rtl8712_security_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_security_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_SECURITY_BITDEF_H__
#define __RTL8712_SECURITY_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_spec.h b/drivers/staging/rtl8712/rtl8712_spec.h
index 3f181eed4ee..af11b44b114 100644
--- a/drivers/staging/rtl8712/rtl8712_spec.h
+++ b/drivers/staging/rtl8712/rtl8712_spec.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_SPEC_H__
#define __RTL8712_SPEC_H__
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
index dce15c2ff50..eed09c872fc 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_SYSCFG_BITDEF_H__
#define __RTL8712_SYSCFG_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
index 687e1b7a03f..767dfdf8d83 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_SYSCFG_REGDEF_H__
#define __RTL8712_SYSCFG_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
index 32ce9fab5db..72442158242 100644
--- a/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_TIMECTRL_BITDEF_H__
#define __RTL8712_TIMECTRL_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_timectrl_regdef.h b/drivers/staging/rtl8712/rtl8712_timectrl_regdef.h
index 8a3dd562ba8..106916c7e31 100644
--- a/drivers/staging/rtl8712/rtl8712_timectrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_timectrl_regdef.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL8712_TIMECTRL_REGDEF_H__
#define __RTL8712_TIMECTRL_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h b/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
index 6d3be2a2422..61a3603aa58 100644
--- a/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_WMAC_BITDEF_H__
#define __RTL8712_WMAC_BITDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_wmac_regdef.h b/drivers/staging/rtl8712/rtl8712_wmac_regdef.h
index ac80dfb317c..d9f8347ab46 100644
--- a/drivers/staging/rtl8712/rtl8712_wmac_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_wmac_regdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_WMAC_REGDEF_H__
#define __RTL8712_WMAC_REGDEF_H__
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 88a15049bc2..693331955d6 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -37,6 +37,7 @@
static void dump_xframe(struct _adapter *padapter,
struct xmit_frame *pxmitframe);
+static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz);
sint _r8712_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag)
{
@@ -238,16 +239,180 @@ exit_dequeue_xframe_ex:
void r8712_do_queue_select(struct _adapter *padapter,
struct pkt_attrib *pattrib)
{
- u8 qsel = 0;
+ unsigned int qsel = 0;
struct dvobj_priv *pdvobj = (struct dvobj_priv *)&padapter->dvobjpriv;
if (pdvobj->nr_endpoint == 6)
- qsel = pattrib->priority;
- else if (pdvobj->nr_endpoint == 4)
- qsel = pattrib->priority;
+ qsel = (unsigned int) pattrib->priority;
+ else if (pdvobj->nr_endpoint == 4) {
+ qsel = (unsigned int) pattrib->priority;
+ if (qsel == 0 || qsel == 3)
+ qsel = 3;
+ else if (qsel == 1 || qsel == 2)
+ qsel = 1;
+ else if (qsel == 4 || qsel == 5)
+ qsel = 5;
+ else if (qsel == 6 || qsel == 7)
+ qsel = 7;
+ else
+ qsel = 3;
+ }
pattrib->qsel = qsel;
}
+#ifdef CONFIG_R8712_TX_AGGR
+u8 r8712_construct_txaggr_cmd_desc(struct xmit_buf *pxmitbuf)
+{
+ struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf;
+
+ /* Fill up TxCmd Descriptor according as USB FW Tx Aaggregation info.*/
+ /* dw0 */
+ ptx_desc->txdw0 = cpu_to_le32(CMD_HDR_SZ&0xffff);
+ ptx_desc->txdw0 |=
+ cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);
+ ptx_desc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+
+ /* dw1 */
+ ptx_desc->txdw1 |= cpu_to_le32((0x13<<QSEL_SHT)&0x00001f00);
+
+ return _SUCCESS;
+}
+
+u8 r8712_construct_txaggr_cmd_hdr(struct xmit_buf *pxmitbuf)
+{
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)
+ pxmitbuf->priv_data;
+ struct _adapter *padapter = pxmitframe->padapter;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_hdr *pcmd_hdr = (struct cmd_hdr *)
+ (pxmitbuf->pbuf + TXDESC_SIZE);
+
+ /* Fill up Cmd Header for USB FW Tx Aggregation.*/
+ /* dw0 */
+ pcmd_hdr->cmd_dw0 = cpu_to_le32((GEN_CMD_CODE(_AMSDU_TO_AMPDU) << 16) |
+ (pcmdpriv->cmd_seq << 24));
+ pcmdpriv->cmd_seq++;
+
+ return _SUCCESS;
+}
+
+u8 r8712_append_mpdu_unit(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
+{
+ struct _adapter *padapter = pxmitframe->padapter;
+ struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf;
+ int last_txcmdsz = 0;
+ int padding_sz = 0;
+
+ /* 802.3->802.11 convertor */
+ r8712_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
+ /* free skb struct */
+ r8712_xmit_complete(padapter, pxmitframe);
+ if (pxmitframe->attrib.ether_type != 0x0806) {
+ if ((pxmitframe->attrib.ether_type != 0x888e) &&
+ (pxmitframe->attrib.dhcp_pkt != 1)) {
+ r8712_issue_addbareq_cmd(padapter,
+ pxmitframe->attrib.priority);
+ }
+ }
+ pxmitframe->last[0] = 1;
+ update_txdesc(pxmitframe, (uint *)(pxmitframe->buf_addr),
+ pxmitframe->attrib.last_txcmdsz);
+ /*padding zero */
+ last_txcmdsz = pxmitframe->attrib.last_txcmdsz;
+ padding_sz = (8 - (last_txcmdsz % 8));
+ if ((last_txcmdsz % 8) != 0) {
+ int i;
+ for (i = 0; i < padding_sz; i++)
+ *(pxmitframe->buf_addr+TXDESC_SIZE+last_txcmdsz+i) = 0;
+ }
+ /* Add the new mpdu's length */
+ ptx_desc->txdw0 = cpu_to_le32((ptx_desc->txdw0&0xffff0000) |
+ ((ptx_desc->txdw0&0x0000ffff)+
+ ((TXDESC_SIZE+last_txcmdsz+padding_sz)&0x0000ffff)));
+
+ return _SUCCESS;
+}
+
+
+u8 r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
+{
+ /* linux complete context doesnt need to protect */
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitbuf->priv_data = pxmitframe;
+ pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0];
+ /* buffer addr assoc */
+ pxmitframe->buf_addr = pxmitbuf->pbuf+TXDESC_SIZE+CMD_HDR_SZ;
+ /*RTL8712_DMA_H2CCMD */
+ r8712_construct_txaggr_cmd_desc(pxmitbuf);
+ r8712_construct_txaggr_cmd_hdr(pxmitbuf);
+ if (r8712_append_mpdu_unit(pxmitbuf, pxmitframe) == _SUCCESS)
+ pxmitbuf->aggr_nr = 1;
+
+ return _SUCCESS;
+}
+
+u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
+{
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitbuf->priv_data = pxmitframe;
+ pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0];
+ /* buffer addr assoc */
+ pxmitframe->buf_addr = pxmitbuf->pbuf + TXDESC_SIZE +
+ (((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff);
+ if (r8712_append_mpdu_unit(pxmitbuf, pxmitframe) == _SUCCESS) {
+ r8712_free_xmitframe_ex(&pxmitframe->padapter->xmitpriv,
+ pxmitframe);
+ pxmitbuf->aggr_nr++;
+ }
+
+ return TXDESC_SIZE +
+ (((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff);
+}
+
+u8 r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
+{
+ struct _adapter *padapter = pxmitframe->padapter;
+ struct dvobj_priv *pdvobj = (struct dvobj_priv *) &padapter->dvobjpriv;
+ struct tx_desc * ptxdesc = (struct tx_desc *)pxmitbuf->pbuf;
+ struct cmd_hdr *pcmd_hdr = (struct cmd_hdr *)
+ (pxmitbuf->pbuf + TXDESC_SIZE);
+ u16 total_length = (u16) (ptxdesc->txdw0 & 0xffff);
+
+ /* use 1st xmitframe as media */
+ xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf);
+ pcmd_hdr->cmd_dw0 = cpu_to_le32(((total_length-CMD_HDR_SZ)&0x0000ffff)|
+ (pcmd_hdr->cmd_dw0&0xffff0000));
+
+ /* urb length in cmd_dw1 */
+ pcmd_hdr->cmd_dw1 = cpu_to_le32((pxmitbuf->aggr_nr & 0xff)|
+ ((total_length+TXDESC_SIZE) << 16));
+ pxmitframe->last[0] = 1;
+ pxmitframe->bpending[0] = false;
+ pxmitframe->mem_addr = pxmitbuf->pbuf;
+
+ if ((pdvobj->ishighspeed && ((total_length+TXDESC_SIZE)%0x200) == 0) ||
+ ((!pdvobj->ishighspeed &&
+ ((total_length+TXDESC_SIZE)%0x40) == 0))) {
+ ptxdesc->txdw0 |= cpu_to_le32
+ (((TXDESC_SIZE+OFFSET_SZ+8)<<OFFSET_SHT)&0x00ff0000);
+ /*32 bytes for TX Desc + 8 bytes pending*/
+ } else {
+ ptxdesc->txdw0 |= cpu_to_le32
+ (((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);
+ /*default = 32 bytes for TX Desc*/
+ }
+ r8712_write_port(pxmitframe->padapter, RTL8712_DMA_H2CCMD,
+ total_length+TXDESC_SIZE, (u8 *)pxmitframe);
+
+ return _SUCCESS;
+}
+
+#endif
+
static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
{
uint qsel;
@@ -258,6 +423,9 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
struct dvobj_priv *pdvobj = (struct dvobj_priv *)&padapter->dvobjpriv;
+#ifdef CONFIG_R8712_TX_AGGR
+ struct cmd_priv *pcmdpriv = (struct cmd_priv *)&padapter->cmdpriv;
+#endif
u8 blnSetTxDescOffset;
sint bmcst = IS_MCAST(pattrib->ra);
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
@@ -291,8 +459,40 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
if (pxmitframe->frame_tag == DATA_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32((pattrib->mac_id)&0x1f);
+
+#ifdef CONFIG_R8712_TX_AGGR
+ /* dirty workaround, need to check if it is aggr cmd. */
+ if ((u8 *)pmem != (u8 *)pxmitframe->pxmitbuf->pbuf) {
+ ptxdesc->txdw0 |= cpu_to_le32
+ ((0x3 << TYPE_SHT)&TYPE_MSK);
+ qsel = (uint)(pattrib->qsel & 0x0000001f);
+ if (qsel == 2)
+ qsel = 0;
+ ptxdesc->txdw1 |= cpu_to_le32
+ ((qsel << QSEL_SHT) & 0x00001f00);
+ ptxdesc->txdw2 = cpu_to_le32
+ ((qsel << RTS_RC_SHT)&0x001f0000);
+ ptxdesc->txdw6 |= cpu_to_le32
+ ((0x5 << RSVD6_SHT)&RSVD6_MSK);
+ } else {
+ ptxdesc->txdw0 |= cpu_to_le32
+ ((0x3 << TYPE_SHT)&TYPE_MSK);
+ ptxdesc->txdw1 |= cpu_to_le32
+ ((0x13 << QSEL_SHT) & 0x00001f00);
+ qsel = (uint)(pattrib->qsel & 0x0000001f);
+ if (qsel == 2)
+ qsel = 0;
+ ptxdesc->txdw2 = cpu_to_le32
+ ((qsel << RTS_RC_SHT)&0x0001f000);
+ ptxdesc->txdw7 |= cpu_to_le32
+ (pcmdpriv->cmd_seq << 24);
+ pcmdpriv->cmd_seq++;
+ }
+ pattrib->qsel = 0x13;
+#else
qsel = (uint)(pattrib->qsel & 0x0000001f);
ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
+#endif
if (!pqospriv->qos_option)
ptxdesc->txdw1 |= cpu_to_le32(BIT(16));/*Non-QoS*/
if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
@@ -414,7 +614,11 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
struct hw_xmit *phwxmits;
sint hwentry;
struct xmit_frame *pxmitframe = NULL;
+#ifdef CONFIG_R8712_TX_AGGR
+ struct xmit_frame *p2ndxmitframe = NULL;
+#else
int res = _SUCCESS, xcnt = 0;
+#endif
phwxmits = pxmitpriv->hwxmits;
hwentry = pxmitpriv->hwxmit_entry;
@@ -422,32 +626,74 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
pxmitbuf = r8712_alloc_xmitbuf(pxmitpriv);
if (!pxmitbuf)
return false;
+#ifdef CONFIG_R8712_TX_AGGR
+ pxmitbuf->aggr_nr = 0;
+#endif
}
- do {
- pxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
- if (pxmitframe) {
- pxmitframe->pxmitbuf = pxmitbuf;
- pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0];
- pxmitframe->buf_addr = pxmitbuf->pbuf;
- if (pxmitframe->frame_tag == DATA_FRAMETAG) {
- if (pxmitframe->attrib.priority <= 15)
- res = r8712_xmitframe_coalesce(padapter,
- pxmitframe->pkt, pxmitframe);
- /* always return ndis_packet after
- * r8712_xmitframe_coalesce */
- r8712_xmit_complete(padapter, pxmitframe);
- }
- if (res == _SUCCESS)
- dump_xframe(padapter, pxmitframe);
- else
- r8712_free_xmitframe_ex(pxmitpriv, pxmitframe);
- xcnt++;
- } else {
+ /* 1st frame dequeued */
+ pxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
+ /* need to remember the 1st frame */
+ if (pxmitframe != NULL) {
+
+#ifdef CONFIG_R8712_TX_AGGR
+ /* 1. dequeue 2nd frame
+ * 2. aggr if 2nd xframe is dequeued, else dump directly
+ */
+ if (AGGR_NR_HIGH_BOUND > 1)
+ p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits,
+ hwentry);
+ if (pxmitframe->frame_tag != DATA_FRAMETAG) {
r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
return false;
}
- break;
- } while (0);
+ if (p2ndxmitframe != NULL)
+ if (p2ndxmitframe->frame_tag != DATA_FRAMETAG) {
+ r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
+ return false;
+ }
+ r8712_xmitframe_aggr_1st(pxmitbuf, pxmitframe);
+ if (p2ndxmitframe != NULL) {
+ u16 total_length;
+ total_length = r8712_xmitframe_aggr_next(
+ pxmitbuf, p2ndxmitframe);
+ do {
+ p2ndxmitframe = dequeue_xframe_ex(
+ pxmitpriv, phwxmits, hwentry);
+ if (p2ndxmitframe != NULL)
+ total_length =
+ r8712_xmitframe_aggr_next(
+ pxmitbuf,
+ p2ndxmitframe);
+ else
+ break;
+ } while (total_length <= 0x1800 &&
+ pxmitbuf->aggr_nr <= AGGR_NR_HIGH_BOUND);
+ }
+ if (pxmitbuf->aggr_nr > 0)
+ r8712_dump_aggr_xframe(pxmitbuf, pxmitframe);
+
+#else
+
+ xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf);
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
+ if (pxmitframe->attrib.priority <= 15)
+ res = r8712_xmitframe_coalesce(padapter,
+ pxmitframe->pkt, pxmitframe);
+ /* always return ndis_packet after
+ * r8712_xmitframe_coalesce */
+ r8712_xmit_complete(padapter, pxmitframe);
+ }
+ if (res == _SUCCESS)
+ dump_xframe(padapter, pxmitframe);
+ else
+ r8712_free_xmitframe_ex(pxmitpriv, pxmitframe);
+ xcnt++;
+#endif
+
+ } else { /* pxmitframe == NULL && p2ndxmitframe == NULL */
+ r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
+ return false;
+ }
return true;
}
@@ -481,8 +727,13 @@ static void dump_xframe(struct _adapter *padapter,
pxmitframe->mem_addr = mem_addr;
pxmitframe->bpending[t] = false;
ff_hwaddr = get_ff_hwaddr(pxmitframe);
+#ifdef CONFIG_R8712_TX_AGGR
+ r8712_write_port(padapter, RTL8712_DMA_H2CCMD, w_sz,
+ (unsigned char *)pxmitframe);
+#else
r8712_write_port(padapter, ff_hwaddr, w_sz,
(unsigned char *)pxmitframe);
+#endif
mem_addr += w_sz;
mem_addr = (u8 *)RND4(((addr_t)(mem_addr)));
}
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.h b/drivers/staging/rtl8712/rtl8712_xmit.h
index 12a080f545a..b50e7a1f3a4 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.h
+++ b/drivers/staging/rtl8712/rtl8712_xmit.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL8712_XMIT_H_
#define _RTL8712_XMIT_H_
@@ -39,6 +64,8 @@
#define OWN BIT(31)
#define FSG BIT(27)
#define LSG BIT(26)
+#define TYPE_SHT (24)
+#define TYPE_MSK (0x03000000)
/*OFFSET 4*/
#define PKT_OFFSET_SZ (0)
@@ -49,6 +76,7 @@
#define BMC BIT(7)
#define BK BIT(30)
#define AGG_EN BIT(29)
+#define RTS_RC_SHT (16)
/*OFFSET 12*/
#define SEQ_SHT (16)
@@ -58,6 +86,8 @@
/*OFFSET 20*/
#define DISFB BIT(15)
+#define RSVD6_MSK (0x00E00000)
+#define RSVD6_SHT (21)
struct tx_desc {
/*DWORD 0*/
@@ -83,4 +113,11 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
void r8712_do_queue_select(struct _adapter *padapter,
struct pkt_attrib *pattrib);
+#ifdef CONFIG_R8712_TX_AGGR
+u8 r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe);
+u8 r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe);
+#endif
+
#endif
diff --git a/drivers/staging/rtl8712/rtl871x_byteorder.h b/drivers/staging/rtl8712/rtl871x_byteorder.h
index 07707e23dbe..bd3703b98bc 100644
--- a/drivers/staging/rtl8712/rtl871x_byteorder.h
+++ b/drivers/staging/rtl8712/rtl871x_byteorder.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef _RTL871X_BYTEORDER_H_
#define _RTL871X_BYTEORDER_H_
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index ba92762dbb1..d77388bdba7 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -28,6 +28,24 @@
#define _RTL871X_CMD_C_
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/circ_buf.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/rtnetlink.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
@@ -222,7 +240,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara,
GEN_CMD_CODE(_SiteSurvey));
psurveyPara->bsslimit = cpu_to_le32(48);
- psurveyPara->passive_mode = cpu_to_le32(1);
+ psurveyPara->passive_mode = cpu_to_le32(pmlmepriv->passive_mode);
psurveyPara->ss_ssidlen = 0;
memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1);
if ((pssid != NULL) && (pssid->SsidLength)) {
@@ -233,6 +251,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
r8712_enqueue_cmd(pcmdpriv, ph2c);
_set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_SITE_SURVEY);
+ padapter->blnEnableRxFF0Filter = 0;
return _SUCCESS;
}
@@ -259,6 +278,28 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
return _SUCCESS;
}
+u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
+{
+ struct cmd_obj *ph2c;
+ struct SetChannelPlan_param *psetchplanpara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+ psetchplanpara = (struct SetChannelPlan_param *)
+ _malloc(sizeof(struct SetChannelPlan_param));
+ if (psetchplanpara == NULL) {
+ kfree((u8 *) ph2c);
+ return _FAIL;
+ }
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara,
+ GEN_CMD_CODE(_SetChannelPlan));
+ psetchplanpara->ChannelPlan = chplan;
+ r8712_enqueue_cmd(pcmdpriv, ph2c);
+ return _SUCCESS;
+}
+
u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
{
struct cmd_obj *ph2c;
@@ -285,20 +326,62 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type)
{
struct cmd_obj *ph2c;
- struct PT_param *pptparm;
+ struct writePTM_parm *pwriteptmparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj));
if (ph2c == NULL)
return _FAIL;
- pptparm = (struct PT_param *)_malloc(sizeof(struct PT_param));
- if (pptparm == NULL) {
+ pwriteptmparm = (struct writePTM_parm *)
+ _malloc(sizeof(struct writePTM_parm));
+ if (pwriteptmparm == NULL) {
+ kfree((u8 *) ph2c);
+ return _FAIL;
+ }
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetPT));
+ pwriteptmparm->type = type;
+ r8712_enqueue_cmd(pcmdpriv, ph2c);
+ return _SUCCESS;
+}
+
+u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
+{
+ struct cmd_obj *ph2c;
+ struct writePTM_parm *pwriteptmparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+ pwriteptmparm = (struct writePTM_parm *)
+ _malloc(sizeof(struct setdig_parm));
+ if (pwriteptmparm == NULL) {
+ kfree((u8 *) ph2c);
+ return _FAIL;
+ }
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetDIG));
+ pwriteptmparm->type = type;
+ r8712_enqueue_cmd(pcmdpriv, ph2c);
+ return _SUCCESS;
+}
+
+u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
+{
+ struct cmd_obj *ph2c;
+ struct writePTM_parm *pwriteptmparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+ pwriteptmparm = (struct writePTM_parm *)
+ _malloc(sizeof(struct setra_parm));
+ if (pwriteptmparm == NULL) {
kfree((u8 *) ph2c);
return _FAIL;
}
- init_h2fwcmd_w_parm_no_rsp(ph2c, pptparm,
- GEN_CMD_CODE(_SetPowerTracking));
- pptparm->PT_En = type;
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetRA));
+ pwriteptmparm->type = type;
r8712_enqueue_cmd(pcmdpriv, ph2c);
return _SUCCESS;
}
@@ -353,8 +436,17 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter,
struct cmd_obj *pcmd)
{
- kfree((unsigned char *) pcmd->parmbuf);
- kfree((unsigned char *) pcmd);
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+ padapter->mppriv.workparam.bcompleted = true;
+}
+
+void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter,
+ struct cmd_obj *pcmd)
+{
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+
padapter->mppriv.workparam.bcompleted = true;
}
@@ -485,12 +577,6 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
&psecnetwork->IEs[0],
pnetwork->network.IELength,
&psecnetwork->IELength);
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
- r8712_add_ht_addt_info(padapter,
- &pnetwork->network.IEs[0],
- &psecnetwork->IEs[0],
- pnetwork->network.IELength,
- &psecnetwork->IELength);
}
}
psecuritypriv->supplicant_ie[0] = (u8)psecnetwork->IELength;
@@ -675,6 +761,33 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
return _SUCCESS;
}
+u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct readTSSI_parm *prdtssiparm;
+
+ ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+ prdtssiparm = (struct readTSSI_parm *)
+ _malloc(sizeof(struct readTSSI_parm));
+ if (prdtssiparm == NULL) {
+ kfree((unsigned char *) ph2c);
+ return _FAIL;
+ }
+ _init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_ReadTSSI);
+ ph2c->parmbuf = (unsigned char *)prdtssiparm;
+ ph2c->cmdsz = sizeof(struct readTSSI_parm);
+ ph2c->rsp = pval;
+ ph2c->rspsz = sizeof(struct readTSSI_rsp);
+
+ prdtssiparm->offset = offset;
+ r8712_enqueue_cmd(pcmdpriv, ph2c);
+ return _SUCCESS;
+}
+
u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -923,3 +1036,32 @@ void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter,
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
r8712_free_cmd_obj(pcmd);
}
+
+u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
+ u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO)
+{
+ struct cmd_obj *ph2c;
+ struct DisconnectCtrlEx_param *param;
+ struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
+
+ ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+ param = (struct DisconnectCtrlEx_param *)
+ _malloc(sizeof(struct DisconnectCtrlEx_param));
+ if (param == NULL) {
+ kfree((unsigned char *) ph2c);
+ return _FAIL;
+ }
+ memset(param, 0, sizeof(struct DisconnectCtrlEx_param));
+
+ param->EnableDrvCtrl = (unsigned char)enableDrvCtrl;
+ param->TryPktCnt = (unsigned char)tryPktCnt;
+ param->TryPktInterval = (unsigned char)tryPktInterval;
+ param->FirstStageTO = (unsigned int)firstStageTO;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, param,
+ GEN_CMD_CODE(_DisconnectCtrlEx));
+ r8712_enqueue_cmd(pcmdpriv, ph2c);
+ return _SUCCESS;
+}
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index dcf256d44d1..757ebf77e9d 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_CMD_H_
#define __RTL871X_CMD_H_
@@ -295,6 +320,35 @@ struct setdatarate_parm {
u8 datarates[NumRates];
};
+enum _RT_CHANNEL_DOMAIN {
+ RT_CHANNEL_DOMAIN_FCC = 0,
+ RT_CHANNEL_DOMAIN_IC = 1,
+ RT_CHANNEL_DOMAIN_ETSI = 2,
+ RT_CHANNEL_DOMAIN_SPAIN = 3,
+ RT_CHANNEL_DOMAIN_FRANCE = 4,
+ RT_CHANNEL_DOMAIN_MKK = 5,
+ RT_CHANNEL_DOMAIN_MKK1 = 6,
+ RT_CHANNEL_DOMAIN_ISRAEL = 7,
+ RT_CHANNEL_DOMAIN_TELEC = 8,
+
+ /* Be compatible with old channel plan. No good! */
+ RT_CHANNEL_DOMAIN_MIC = 9,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 10,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 11,
+ RT_CHANNEL_DOMAIN_TELEC_NETGEAR = 12,
+
+ RT_CHANNEL_DOMAIN_NCC = 13,
+ RT_CHANNEL_DOMAIN_5G = 14,
+ RT_CHANNEL_DOMAIN_5G_40M = 15,
+ /*===== Add new channel plan above this line===============*/
+ RT_CHANNEL_DOMAIN_MAX,
+};
+
+
+struct SetChannelPlan_param {
+ enum _RT_CHANNEL_DOMAIN ChannelPlan;
+};
+
/*
Caller Mode: Any
@@ -367,6 +421,10 @@ struct writeBB_parm {
u8 value;
};
+struct writePTM_parm {
+ u8 type;
+};
+
struct readRF_parm {
u8 offset;
};
@@ -646,9 +704,14 @@ struct SetChannel_parm {
u32 curr_ch;
};
-/*H2C Handler index: 56 */
-struct PT_param {
- u8 PT_En;
+/*H2C Handler index: 61 */
+struct DisconnectCtrlEx_param {
+ /* MAXTIME = (2 * FirstStageTO) + (TryPktCnt * TryPktInterval) */
+ unsigned char EnableDrvCtrl;
+ unsigned char TryPktCnt;
+ unsigned char TryPktInterval; /* Unit: ms */
+ unsigned char rsvd;
+ unsigned int FirstStageTO; /* Unit: ms */
};
#define GEN_CMD_CODE(cmd) cmd ## _CMD_
@@ -684,13 +747,17 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter);
u8 r8712_setopmode_cmd(struct _adapter *padapter,
enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset);
+u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan);
u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset);
u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 * pval);
u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode);
u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val);
u8 r8712_setrttbl_cmd(struct _adapter *padapter,
struct setratable_parm *prate_table);
+u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval);
u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type);
+u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type);
+u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type);
u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid);
u8 r8712_wdg_wk_cmd(struct _adapter *padapter);
void r8712_survey_cmd_callback(struct _adapter *padapter,
@@ -703,10 +770,14 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
struct cmd_obj *pcmd);
void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter,
struct cmd_obj *pcmd);
+void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter,
+ struct cmd_obj *pcmd);
void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter,
struct cmd_obj *pcmd);
void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter,
struct cmd_obj *pcmd);
+u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
+ u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO);
struct _cmd_callback {
u32 cmd_code;
diff --git a/drivers/staging/rtl8712/rtl871x_debug.h b/drivers/staging/rtl8712/rtl871x_debug.h
index c392fd958e4..74468b05825 100644
--- a/drivers/staging/rtl8712/rtl871x_debug.h
+++ b/drivers/staging/rtl8712/rtl871x_debug.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_DEBUG_H__
#define __RTL871X_DEBUG_H__
diff --git a/drivers/staging/rtl8712/rtl871x_eeprom.h b/drivers/staging/rtl8712/rtl871x_eeprom.h
index fb15f5bde42..497276e53bb 100644
--- a/drivers/staging/rtl8712/rtl871x_eeprom.h
+++ b/drivers/staging/rtl8712/rtl871x_eeprom.h
@@ -1,3 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
#ifndef __RTL871X_EEPROM_H__
#define __RTL871X_EEPROM_H__
diff --git a/drivers/staging/rtl8712/rtl871x_event.h b/drivers/staging/rtl8712/rtl871x_event.h
index 6ce06767130..e03ee90d287 100644
--- a/drivers/staging/rtl8712/rtl871x_event.h
+++ b/drivers/staging/rtl8712/rtl871x_event.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL871x_EVENT_H_
#define _RTL871x_EVENT_H_
diff --git a/drivers/staging/rtl8712/rtl871x_ht.h b/drivers/staging/rtl8712/rtl871x_ht.h
index 612203deea7..41872d93740 100644
--- a/drivers/staging/rtl8712/rtl871x_ht.h
+++ b/drivers/staging/rtl8712/rtl871x_ht.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL871X_HT_H_
#define _RTL871X_HT_H_
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index b70cb2b6296..86308a0093e 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _IO_H_
#define _IO_H_
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 40e6b5cc412..507584b837c 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -39,8 +39,17 @@
#include "rtl871x_ioctl_set.h"
#include "rtl871x_mp_ioctl.h"
#include "mlme_osdep.h"
-
-#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
+#include <linux/wireless.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 0x1E)
#define SCAN_ITEM_SIZE 768
#define MAX_CUSTOM_LEN 64
@@ -160,10 +169,6 @@ static inline char *translate_scan(struct _adapter *padapter,
struct iw_event iwe;
struct ieee80211_ht_cap *pht_capie;
char *current_val;
- u8 *buf = (u8 *)_malloc(pnetwork->network.IELength * 2);
- u8 *wpa_ie = (u8 *)_malloc(255);
- u8 *rsn_ie = (u8 *)_malloc(255);
- u8 *wps_ie = (u8 *)_malloc(MAX_WPS_IE_LEN);
s8 *p;
u32 i = 0, ht_ielen = 0;
u16 cap, ht_cap = false, mcs_rate;
@@ -280,6 +285,8 @@ static inline char *translate_scan(struct _adapter *padapter,
start = current_val;
/* parsing WPA/WPA2 IE */
{
+ u8 buf[MAX_WPA_IE_LEN];
+ u8 wpa_ie[255], rsn_ie[255];
u16 wpa_len = 0, rsn_len = 0;
int n;
sint out_len = 0;
@@ -330,6 +337,7 @@ static inline char *translate_scan(struct _adapter *padapter,
}
{ /* parsing WPS IE */
+ u8 wps_ie[512];
uint wps_ielen;
if (r8712_get_wps_ie(pnetwork->network.IEs,
@@ -354,16 +362,12 @@ static inline char *translate_scan(struct _adapter *padapter,
iwe.u.qual.noise = 0; /* noise level */
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
/* how to translate rssi to ?% */
- kfree(buf);
- kfree(wpa_ie);
- kfree(rsn_ie);
- kfree(wps_ie);
return start;
}
static int wpa_set_auth_algs(struct net_device *dev, u32 value)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
int ret = 0;
if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
@@ -395,7 +399,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
int ret = 0;
u32 wep_key_idx, wep_key_len = 0;
struct NDIS_802_11_WEP *pwep = NULL;
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -526,7 +530,7 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
memcpy(buf, pie , ielen);
pos = buf;
if (ielen < RSN_HEADER_LEN) {
- ret = -1;
+ ret = -EINVAL;
goto exit;
}
if (r8712_parse_wpa_ie(buf, ielen, &group_cipher,
@@ -637,7 +641,7 @@ static int r8711_wx_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
@@ -693,7 +697,7 @@ static int r8711_wx_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_freq *fwrq = &wrqu->freq;
int rc = 0;
@@ -727,7 +731,7 @@ static int r8711_wx_get_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
@@ -736,8 +740,9 @@ static int r8711_wx_get_freq(struct net_device *dev,
pcur_bss->Configuration.DSConfig-1] * 100000;
wrqu->freq.e = 1;
wrqu->freq.i = pcur_bss->Configuration.DSConfig;
- } else
- return -1;
+ } else {
+ return -ENOLINK;
+ }
return 0;
}
@@ -745,7 +750,7 @@ static int r8711_wx_set_mode(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType;
switch (wrqu->mode) {
@@ -768,15 +773,15 @@ static int r8711_wx_set_mode(struct net_device *dev,
r8712_setopmode_cmd(padapter, networkType);
else
r8712_setopmode_cmd(padapter, Ndis802_11AutoUnknown);
- if (!r8712_set_802_11_infrastructure_mode(padapter, networkType))
- return -1;
+
+ r8712_set_802_11_infrastructure_mode(padapter, networkType);
return 0;
}
static int r8711_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
@@ -795,7 +800,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct iw_pmksa *pPMK = (struct iw_pmksa *) extra;
u8 strZeroMacAddress[ETH_ALEN] = {0x00};
@@ -949,6 +954,10 @@ static int r8711_wx_get_range(struct net_device *dev,
return 0;
}
+static int r8711_wx_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
static int r871x_wx_set_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq,
@@ -956,6 +965,7 @@ static int r871x_wx_set_priv(struct net_device *dev,
{
int ret = 0, len = 0;
char *ext;
+ struct _adapter *padapter = netdev_priv(dev);
struct iw_point *dwrq = (struct iw_point *)awrq;
len = dwrq->length;
@@ -966,6 +976,87 @@ static int r871x_wx_set_priv(struct net_device *dev,
kfree(ext);
return -EFAULT;
}
+
+ if (0 == strcasecmp(ext, "RSSI")) {
+ /*Return received signal strength indicator in -db for */
+ /* current AP */
+ /*<ssid> Rssi xx */
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_network *pcur_network = &pmlmepriv->cur_network;
+ /*static u8 xxxx; */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ sprintf(ext, "%s rssi %d",
+ pcur_network->network.Ssid.Ssid,
+ /*(xxxx=xxxx+10) */
+ ((padapter->recvpriv.fw_rssi)>>1)-95
+ /*pcur_network->network.Rssi */
+ );
+ } else {
+ sprintf(ext, "OK");
+ }
+ } else if (0 == strcasecmp(ext, "LINKSPEED")) {
+ /*Return link speed in MBPS */
+ /*LinkSpeed xx */
+ union iwreq_data wrqd;
+ int ret_inner;
+ int mbps;
+
+ ret_inner = r8711_wx_get_rate(dev, info, &wrqd, extra);
+ if (0 != ret_inner)
+ mbps = 0;
+ else
+ mbps = wrqd.bitrate.value / 1000000;
+ sprintf(ext, "LINKSPEED %d", mbps);
+ } else if (0 == strcasecmp(ext, "MACADDR")) {
+ /*Return mac address of the station */
+ /*Macaddr = xx.xx.xx.xx.xx.xx */
+ sprintf(ext,
+ "MACADDR = %02x.%02x.%02x.%02x.%02x.%02x",
+ *(dev->dev_addr), *(dev->dev_addr+1),
+ *(dev->dev_addr+2), *(dev->dev_addr+3),
+ *(dev->dev_addr+4), *(dev->dev_addr+5));
+ } else if (0 == strcasecmp(ext, "SCAN-ACTIVE")) {
+ /*Set scan type to active */
+ /*OK if successful */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ pmlmepriv->passive_mode = 1;
+ sprintf(ext, "OK");
+ } else if (0 == strcasecmp(ext, "SCAN-PASSIVE")) {
+ /*Set scan type to passive */
+ /*OK if successful */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ pmlmepriv->passive_mode = 0;
+ sprintf(ext, "OK");
+ } else if (0 == strncmp(ext, "DCE-E", 5)) {
+ /*Set scan type to passive */
+ /*OK if successful */
+ r8712_disconnectCtrlEx_cmd(padapter
+ , 1 /*u32 enableDrvCtrl */
+ , 5 /*u32 tryPktCnt */
+ , 100 /*u32 tryPktInterval */
+ , 5000 /*u32 firstStageTO */
+ );
+ sprintf(ext, "OK");
+ } else if (0 == strncmp(ext, "DCE-D", 5)) {
+ /*Set scan type to passive */
+ /*OK if successfu */
+ r8712_disconnectCtrlEx_cmd(padapter
+ , 0 /*u32 enableDrvCtrl */
+ , 5 /*u32 tryPktCnt */
+ , 100 /*u32 tryPktInterval */
+ , 5000 /*u32 firstStageTO */
+ );
+ sprintf(ext, "OK");
+ } else {
+ printk(KERN_INFO "r8712u: r871x_wx_set_priv: unknown Command"
+ " %s.\n", ext);
+ goto FREE_EXT;
+ }
+ if (copy_to_user(dwrq->pointer, ext,
+ min(dwrq->length, (__u16)(strlen(ext)+1))))
+ ret = -EFAULT;
+
+FREE_EXT:
kfree(ext);
return ret;
}
@@ -975,6 +1066,13 @@ static int r871x_wx_set_priv(struct net_device *dev,
* s2. set_802_11_authentication_mode()
* s3. set_802_11_encryption_mode()
* s4. set_802_11_bssid()
+ *
+ * This function intends to handle the Set AP command, which specifies the
+ * MAC# of a preferred Access Point.
+ * Currently, the request comes via Wireless Extensions' SIOCSIWAP ioctl.
+ *
+ * For this operation to succeed, there is no need for the interface to be Up.
+ *
*/
static int r8711_wx_set_wap(struct net_device *dev,
struct iw_request_info *info,
@@ -982,7 +1080,7 @@ static int r8711_wx_set_wap(struct net_device *dev,
char *extra)
{
int ret = -EINPROGRESS;
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct sockaddr *temp = (struct sockaddr *)awrq;
@@ -992,10 +1090,8 @@ static int r8711_wx_set_wap(struct net_device *dev,
struct wlan_network *pnetwork = NULL;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
- if (padapter->bup == false)
- return -1;
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
- return -1;
+ return -EBUSY;
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
return ret;
if (temp->sa_family != ARPHRD_ETHER)
@@ -1012,16 +1108,15 @@ static int r8711_wx_set_wap(struct net_device *dev,
pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
dst_bssid = pnetwork->network.MacAddress;
if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) {
- if (r8712_set_802_11_infrastructure_mode(padapter,
- pnetwork->network.InfrastructureMode) == false)
- ret = -1;
+ r8712_set_802_11_infrastructure_mode(padapter,
+ pnetwork->network.InfrastructureMode);
break;
}
}
spin_unlock_irqrestore(&queue->lock, irqL);
if (!ret) {
if (!r8712_set_802_11_authentication_mode(padapter, authmode))
- ret = -1;
+ ret = -ENOMEM;
else {
if (!r8712_set_802_11_bssid(padapter, temp->sa_data))
ret = -1;
@@ -1034,7 +1129,7 @@ static int r8711_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
@@ -1053,7 +1148,7 @@ static int r871x_wx_set_mlme(struct net_device *dev,
{
int ret = 0;
u16 reason;
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *) extra;
if (mlme == NULL)
@@ -1074,11 +1169,19 @@ static int r871x_wx_set_mlme(struct net_device *dev,
return ret;
}
+/**
+ *
+ * This function intends to handle the Set Scan command.
+ * Currently, the request comes via Wireless Extensions' SIOCSIWSCAN ioctl.
+ *
+ * For this operation to succeed, the interface is brought Up beforehand.
+ *
+ */
static int r8711_wx_set_scan(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 status = true;
@@ -1088,7 +1191,7 @@ static int r8711_wx_set_scan(struct net_device *dev,
return -1;
}
if (padapter->bup == false)
- return -1;
+ return -ENETDOWN;
if (padapter->hw_init_completed == false)
return -1;
if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) ||
@@ -1126,7 +1229,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
@@ -1141,7 +1244,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
msleep(30);
cnt++;
- if (cnt > 1000)
+ if (cnt > 100)
break;
}
spin_lock_irqsave(&queue->lock, irqL);
@@ -1169,12 +1272,18 @@ static int r8711_wx_get_scan(struct net_device *dev,
* s2. set_802_11_authenticaion_mode()
* s3. set_802_11_encryption_mode()
* s4. set_802_11_ssid()
+ *
+ * This function intends to handle the Set ESSID command.
+ * Currently, the request comes via the Wireless Extensions' SIOCSIWESSID ioctl.
+ *
+ * For this operation to succeed, there is no need for the interface to be Up.
+ *
*/
static int r8711_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
@@ -1184,10 +1293,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
struct list_head *phead;
u32 len;
- if (padapter->bup == false)
- return -1;
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- return -1;
+ return -EBUSY;
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
return 0;
if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
@@ -1212,10 +1319,20 @@ static int r8711_wx_set_essid(struct net_device *dev,
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
&& (pnetwork->network.Ssid.SsidLength ==
ndis_ssid.SsidLength)) {
- if (!r8712_set_802_11_infrastructure_mode(
+ if (check_fwstate(pmlmepriv,
+ WIFI_ADHOC_STATE)) {
+ if (pnetwork->network.
+ InfrastructureMode
+ !=
+ padapter->mlmepriv.
+ cur_network.network.
+ InfrastructureMode)
+ continue;
+ }
+
+ r8712_set_802_11_infrastructure_mode(
padapter,
- pnetwork->network.InfrastructureMode))
- return -1;
+ pnetwork->network.InfrastructureMode);
break;
}
}
@@ -1229,7 +1346,7 @@ static int r8711_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
u32 len, ret = 0;
@@ -1239,8 +1356,9 @@ static int r8711_wx_get_essid(struct net_device *dev,
wrqu->essid.length = len;
memcpy(extra, pcur_bss->Ssid.Ssid, len);
wrqu->essid.flags = 1;
- } else
- ret = -1;
+ } else {
+ ret = -ENOLINK;
+ }
return ret;
}
@@ -1248,7 +1366,7 @@ static int r8711_wx_set_rate(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
u32 target_rate = wrqu->bitrate.value;
u32 fixed = wrqu->bitrate.fixed;
u32 ratevalue = 0;
@@ -1312,7 +1430,7 @@ set_rate:
datarates[i] = 0xff;
}
if (r8712_setdatarate_cmd(padapter, datarates) != _SUCCESS)
- ret = -1;
+ ret = -ENOMEM;
return ret;
}
@@ -1320,10 +1438,11 @@ static int r8711_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
struct ieee80211_ht_cap *pht_capie;
+ unsigned char rf_type = padapter->registrypriv.rf_config;
int i;
u8 *p;
u16 rate, max_rate = 0, ht_cap = false;
@@ -1356,7 +1475,9 @@ static int r8711_wx_get_rate(struct net_device *dev,
i++;
}
if (ht_cap == true) {
- if (mcs_rate & 0x8000) /* MCS15 */
+ if (mcs_rate & 0x8000 /* MCS15 */
+ &&
+ RTL8712_RF_2T2R == rf_type)
max_rate = (bw_40MHz) ? ((short_GI) ? 300 :
270) : ((short_GI) ? 144 : 130);
else if (mcs_rate & 0x0080) /* MCS7 */
@@ -1371,7 +1492,7 @@ static int r8711_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = max_rate * 500000;
}
} else
- return -1;
+ return -ENOLINK;
return 0;
}
@@ -1379,7 +1500,7 @@ static int r8711_wx_get_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
wrqu->rts.value = padapter->registrypriv.rts_thresh;
wrqu->rts.fixed = 0; /* no auto select */
@@ -1390,7 +1511,7 @@ static int r8711_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
if (wrqu->frag.disabled)
padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
@@ -1407,7 +1528,7 @@ static int r8711_wx_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
wrqu->frag.value = padapter->xmitpriv.frag_len;
wrqu->frag.fixed = 0; /* no auto select */
@@ -1433,7 +1554,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
struct NDIS_802_11_WEP wep;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
struct iw_point *erq = &(wrqu->encoding);
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
key = erq->flags & IW_ENCODE_INDEX;
memset(&wep, 0, sizeof(struct NDIS_802_11_WEP));
@@ -1527,7 +1648,7 @@ static int r8711_wx_get_enc(struct net_device *dev,
union iwreq_data *wrqu, char *keybuf)
{
uint key, ret = 0;
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_point *erq = &(wrqu->encoding);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -1599,7 +1720,7 @@ static int r871x_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
return r871x_set_wpa_ie(padapter, extra, wrqu->data.length);
}
@@ -1608,7 +1729,7 @@ static int r871x_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_param *param = (struct iw_param *)&(wrqu->param);
int paramid;
int paramval;
@@ -1701,7 +1822,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
param_len = sizeof(struct ieee_param) + pext->key_len;
param = (struct ieee_param *)_malloc(param_len);
if (param == NULL)
- return -1;
+ return -ENOMEM;
memset(param, 0, param_len);
param->cmd = IEEE_CMD_SET_ENCRYPTION;
memset(param->sta_addr, 0xff, ETH_ALEN);
@@ -1719,7 +1840,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
alg_name = "CCMP";
break;
default:
- return -1;
+ return -EINVAL;
}
strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
@@ -1754,7 +1875,7 @@ static int r8711_wx_read32(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
u32 addr;
u32 data32;
@@ -1771,7 +1892,7 @@ static int r8711_wx_write32(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
u32 addr;
u32 data32;
@@ -1785,7 +1906,7 @@ static int dummy(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
- return -1;
+ return -ENOSYS;
}
static int r8711_drvext_hdl(struct net_device *dev,
@@ -1799,7 +1920,7 @@ static int r871x_mp_ioctl_hdl(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_point *p = &wrqu->data;
struct oid_par_priv oid_par;
struct mp_ioctl_handler *phandler;
@@ -1882,7 +2003,7 @@ static int r871x_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct iw_point *pdata = &wrqu->data;
@@ -1953,7 +2074,7 @@ static int r871x_set_pid(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
if ((padapter->bDriverStopped) || (pdata == NULL))
@@ -1963,11 +2084,32 @@ static int r871x_set_pid(struct net_device *dev,
return 0;
}
+static int r871x_set_chplan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
+ struct iw_point *pdata = &wrqu->data;
+ int ch_plan = -1;
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ch_plan = (int)*extra;
+ r8712_set_chplan_cmd(padapter, ch_plan);
+
+exit:
+
+ return ret;
+}
+
static int r871x_wps_start(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
@@ -1991,7 +2133,7 @@ static int r871x_wps_start(struct net_device *dev,
static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
switch (name) {
case IEEE_PARAM_WPA_ENABLED:
@@ -2044,7 +2186,7 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
switch (command) {
case IEEE_MLME_STA_DEAUTH:
@@ -2065,16 +2207,17 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
{
struct ieee_param *param;
int ret = 0;
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
if (p->length < sizeof(struct ieee_param) || !p->pointer)
return -EINVAL;
param = (struct ieee_param *)_malloc(p->length);
if (param == NULL)
return -ENOMEM;
- if (copy_from_user(param, p->pointer, p->length))
+ if (copy_from_user(param, p->pointer, p->length)) {
kfree((u8 *)param);
return -EFAULT;
+ }
switch (param->cmd) {
case IEEE_CMD_SET_WPA_PARAM:
ret = wpa_set_param(dev, param->u.wpa_param.name,
@@ -2201,6 +2344,10 @@ static const struct iw_priv_args r8711_private_args[] = {
{
SIOCIWFIRSTPRIV + 0x6,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x7,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "chplan"
}
};
@@ -2211,12 +2358,13 @@ static iw_handler r8711_private_handler[] = {
r871x_mp_ioctl_hdl,
r871x_get_ap_info, /*for MM DTV platform*/
r871x_set_pid,
- r871x_wps_start,
+ r871x_wps_start,
+ r871x_set_chplan
};
static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
{
- struct _adapter *padapter = (struct _adapter *) _netdev_priv(dev);
+ struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_statistics *piwstats = &padapter->iwstats;
int tmp_level = 0;
int tmp_qual = 0;
@@ -2232,7 +2380,13 @@ static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
tmp_qual = padapter->recvpriv.signal;
tmp_noise = padapter->recvpriv.noise;
piwstats->qual.level = tmp_level;
- piwstats->qual.qual = tmp_qual;
+ /*piwstats->qual.qual = tmp_qual;
+ * The NetworkManager of Fedora 10, 13 will use the link
+ * quality for its display.
+ * So, use the fw_rssi on link quality variable because
+ * fw_rssi will be updated per 2 seconds.
+ */
+ piwstats->qual.qual = tmp_level;
piwstats->qual.noise = tmp_noise;
}
piwstats->qual.updated = IW_QUAL_ALL_UPDATED;
@@ -2247,5 +2401,5 @@ struct iw_handler_def r871x_handlers_def = {
.num_private = sizeof(r8711_private_handler) / sizeof(iw_handler),
.num_private_args = sizeof(r8711_private_args) /
sizeof(struct iw_priv_args),
- .get_wireless_stats = r871x_get_wireless_stats,
+ .get_wireless_stats = r871x_get_wireless_stats
};
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.h b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.h
index 4f1aa876d75..3bcceae3cbe 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL871X_IOCTL_RTL_H
#define _RTL871X_IOCTL_RTL_H
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 8486eb1503c..fb29b423752 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -243,7 +243,7 @@ done:
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
}
-u8 r8712_set_802_11_infrastructure_mode(struct _adapter *padapter,
+void r8712_set_802_11_infrastructure_mode(struct _adapter *padapter,
enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype)
{
unsigned long irqL;
@@ -290,7 +290,6 @@ u8 r8712_set_802_11_infrastructure_mode(struct _adapter *padapter,
}
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
}
- return true;
}
u8 r8712_set_802_11_disassociate(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.h b/drivers/staging/rtl8712/rtl871x_ioctl_set.h
index 8dff2b196ff..2c94cd151c9 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __IOCTL_SET_H
#define __IOCTL_SET_H
@@ -22,7 +47,7 @@ u8 r8712_set_802_11_disassociate(struct _adapter *padapter);
u8 r8712_set_802_11_bssid_list_scan(struct _adapter *padapter);
-u8 r8712_set_802_11_infrastructure_mode(struct _adapter *padapter,
+void r8712_set_802_11_infrastructure_mode(struct _adapter *padapter,
enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
void r8712_set_802_11_ssid(struct _adapter *padapter,
diff --git a/drivers/staging/rtl8712/rtl871x_led.h b/drivers/staging/rtl8712/rtl871x_led.h
index 8085e5eb82b..1a90c7f4d8f 100644
--- a/drivers/staging/rtl8712/rtl871x_led.h
+++ b/drivers/staging/rtl8712/rtl871x_led.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL8712_LED_H
#define __RTL8712_LED_H
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 866554d48d8..ef8eb6c7ee4 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -52,6 +52,8 @@ static sint _init_mlme_priv(struct _adapter *padapter)
pmlmepriv->fw_state = 0;
pmlmepriv->cur_network.network.InfrastructureMode =
Ndis802_11AutoUnknown;
+ /* Maybe someday we should rename this variable to "active_mode"(Jeff)*/
+ pmlmepriv->passive_mode = 1; /* 1: active, 0: passive. */
spin_lock_init(&(pmlmepriv->lock));
spin_lock_init(&(pmlmepriv->lock2));
_init_queue(&(pmlmepriv->free_bss_pool));
@@ -485,6 +487,12 @@ static int is_desired_network(struct _adapter *adapter,
if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) &&
(pnetwork->network.Privacy == 0))
bselected = false;
+ if (check_fwstate(&adapter->mlmepriv, WIFI_ADHOC_STATE) == true) {
+ if (pnetwork->network.InfrastructureMode !=
+ adapter->mlmepriv.cur_network.network.
+ InfrastructureMode)
+ bselected = false;
+ }
return bselected;
}
@@ -683,9 +691,11 @@ void r8712_ind_disconnect(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
- padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK);
- r8712_os_indicate_disconnect(padapter);
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+ padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK);
+ r8712_os_indicate_disconnect(padapter);
+ }
if (padapter->pwrctrlpriv.pwr_mode !=
padapter->registrypriv.power_mgnt) {
_cancel_timer_ex(&pmlmepriv->dhcp_timer);
@@ -718,9 +728,9 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
if (sizeof(struct list_head) == 4 * sizeof(u32)) {
pnetwork = (struct wlan_network *)
- _malloc(sizeof(struct wlan_network));
+ _malloc(sizeof(struct wlan_network));
memcpy((u8 *)pnetwork+16, (u8 *)pbuf + 8,
- sizeof(struct wlan_network) - 16);
+ sizeof(struct wlan_network) - 16);
} else
pnetwork = (struct wlan_network *)pbuf;
@@ -1271,12 +1281,16 @@ sint r8712_set_key(struct _adapter *adapter,
psecuritypriv->DefKey[keyid].skey, keylen);
break;
case _TKIP_:
+ if (keyid < 1 || keyid > 2)
+ return _FAIL;
keylen = 16;
memcpy(psetkeyparm->key,
&psecuritypriv->XGrpKey[keyid - 1], keylen);
psetkeyparm->grpkey = 1;
break;
case _AES_:
+ if (keyid < 1 || keyid > 2)
+ return _FAIL;
keylen = 16;
memcpy(psetkeyparm->key,
&psecuritypriv->XGrpKey[keyid - 1], keylen);
@@ -1657,7 +1671,7 @@ void r8712_update_registrypriv_dev_network(struct _adapter *adapter)
/* 1. Supported rates
* 2. IE
*/
- sz = r8712_generate_ie(pregistrypriv, adapter);
+ sz = r8712_generate_ie(pregistrypriv);
pdev_network->IELength = sz;
pdev_network->Length = r8712_get_ndis_wlan_bssid_ex_sz(
(struct ndis_wlan_bssid_ex *)pdev_network);
@@ -1802,39 +1816,3 @@ void r8712_issue_addbareq_cmd(struct _adapter *padapter, int priority)
}
}
}
-
-/*the function is >= passive_level*/
-unsigned int r8712_add_ht_addt_info(struct _adapter *padapter,
- u8 *in_ie, u8 *out_ie,
- uint in_len, uint *pout_len)
-{
- u32 ielen, out_len = 0;
- unsigned char *p, *pframe;
- struct ieee80211_ht_addt_info ht_addt_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- out_len = *pout_len;
-
- if (pregistrypriv->ht_enable == 1) {
- p = r8712_get_ie(in_ie+12, _HT_ADD_INFO_IE_,
- &ielen, in_len - 12);
- if (p && (ielen > 0)) {
- ; /* dummy branch */
- } else {
- if (p == NULL) {
- int sz = sizeof(struct ieee80211_ht_addt_info);
- memset(&ht_addt_info, 0, sz);
- /*need to add the HT additional IEs*/
- ht_addt_info.control_chan =
- pregistrypriv->channel;
- pframe = r8712_set_ie(out_ie + out_len,
- _HT_ADD_INFO_IE_,
- sz,
- (unsigned char *)&ht_addt_info,
- pout_len);
- }
- }
- }
- return phtpriv->ht_option;
-}
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index 2794804d082..71ca01350b5 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_MLME_H_
#define __RTL871X_MLME_H_
@@ -70,6 +95,7 @@ struct mlme_priv {
struct __queue scanned_queue;
u8 *free_bss_buf;
unsigned long num_of_scanned;
+ u8 passive_mode; /*add for Android's SCAN-ACTIVE/SCAN-PASSIVE */
struct ndis_802_11_ssid assoc_ssid;
u8 assoc_bssid[6];
struct wlan_network cur_network;
@@ -201,8 +227,6 @@ void r8712_joinbss_reset(struct _adapter *padapter);
unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
u8 *out_ie, uint in_len, uint *pout_len);
void r8712_issue_addbareq_cmd(struct _adapter *padapter, int priority);
-unsigned int r8712_add_ht_addt_info(struct _adapter *padapter, u8 *in_ie,
- u8 *out_ie, uint in_len, uint *pout_len);
int r8712_is_same_ibss(struct _adapter *adapter, struct wlan_network *pnetwork);
#endif /*__RTL871X_MLME_H_*/
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 41e00a2c862..5638d5e065f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -1,18 +1,28 @@
/******************************************************************************
- * rtl871x_mp.c
*
- * Description :
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
*
- * Author :
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
*
- * History :
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
- * Copyright 2007, Realtek Corp.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
- * The contents of this file is the sole property of Realtek Corp. It can not be
- * be used, copied or modified without written permission from Realtek Corp.
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
*
-*******************************************************************************/
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#define _RTL871X_MP_C_
#include "osdep_service.h"
diff --git a/drivers/staging/rtl8712/rtl871x_mp.h b/drivers/staging/rtl8712/rtl871x_mp.h
index 805aba07159..255dc94f090 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.h
+++ b/drivers/staging/rtl8712/rtl871x_mp.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_MP_H_
#define __RTL871X_MP_H_
@@ -101,7 +126,6 @@ struct mp_xmit_frame {
struct urb *pxmit_urb[8];
u8 bpending[8];
u8 last[8];
- uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
};
struct mp_wiparam {
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 67759c31b1d..850143d5dee 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL871X_MP_IOCTL_H
#define _RTL871X_MP_IOCTL_H
@@ -307,7 +332,6 @@ struct rfintfs_parm {
struct mp_xmit_packet {
unsigned int len;
- unsigned int mem[MAX_MP_XMITBUF_SZ >> 2];
};
struct psmode_param {
@@ -410,8 +434,7 @@ static struct mp_ioctl_handler mp_ioctl_hdl[] = {
{sizeof(struct psmode_param), NULL, 0},/*13*/
{sizeof(struct eeprom_rw_param), NULL, 0},/*14*/
{sizeof(struct eeprom_rw_param), NULL, 0},/*15*/
- {sizeof(u8), oid_rt_pro_set_power_tracking_hdl,
- OID_RT_PRO_SET_POWER_TRACKING},/*16*/
+ {sizeof(unsigned char), NULL, 0},/*16*/
{sizeof(u32), NULL, 0},/*17*/
{sizeof(u32), oid_rt_pro_set_continuous_tx_hdl,
OID_RT_PRO_SET_CONTINUOUS_TX},/*18*/
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index 34f074aebd6..b41ca2892be 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_PWRCTRL_H_
#define __RTL871X_PWRCTRL_H_
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index aec83dd8a01..7069f06d9b5 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -28,6 +28,9 @@
#define _RTL871X_RECV_C_
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
@@ -73,6 +76,7 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
RXFRAME_ALIGN_SZ);
if (precvpriv->pallocated_frame_buf == NULL)
return _FAIL;
+ kmemleak_not_leak(precvpriv->pallocated_frame_buf);
memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME *
sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf +
diff --git a/drivers/staging/rtl8712/rtl871x_rf.h b/drivers/staging/rtl8712/rtl871x_rf.h
index 6c54966f13f..133ed646292 100644
--- a/drivers/staging/rtl8712/rtl871x_rf.h
+++ b/drivers/staging/rtl8712/rtl871x_rf.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_RF_H_
#define __RTL871X_RF_H_
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 65321bed4d5..7b92927a04d 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -28,6 +28,21 @@
#define _RTL871X_SECURITY_C_
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/circ_buf.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/semaphore.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "wifi.h"
diff --git a/drivers/staging/rtl8712/rtl871x_security.h b/drivers/staging/rtl8712/rtl871x_security.h
index bff71d2aae9..a13395fe21d 100644
--- a/drivers/staging/rtl8712/rtl871x_security.h
+++ b/drivers/staging/rtl8712/rtl871x_security.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __RTL871X_SECURITY_H_
#define __RTL871X_SECURITY_H_
diff --git a/drivers/staging/rtl8712/rtl871x_wlan_sme.h b/drivers/staging/rtl8712/rtl871x_wlan_sme.h
index d9733ac6a43..44924d5de21 100644
--- a/drivers/staging/rtl8712/rtl871x_wlan_sme.h
+++ b/drivers/staging/rtl8712/rtl871x_wlan_sme.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL871X_WLAN_SME_H_
#define _RTL871X_WLAN_SME_H_
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index ccf08911f58..8bbdee70f86 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -152,11 +152,12 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
pxmitbuf++;
}
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
+ _init_workitem(&padapter->wkFilterRxFF0, r8712_SetFilter, padapter);
alloc_hwxmits(padapter);
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(addr_t))r8712_xmit_bh,
- (addr_t)padapter);
+ (void(*)(unsigned long))r8712_xmit_bh,
+ (unsigned long)padapter);
return _SUCCESS;
}
@@ -612,7 +613,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
if (make_wlanhdr(padapter, mem_start, pattrib) == _FAIL)
return _FAIL;
_r8712_open_pktfile(pkt, &pktfile);
- _r8712_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+ _r8712_pktfile_read(&pktfile, NULL, (uint) pattrib->pkt_hdrlen);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
/* truncate TXDESC_SIZE bytes txcmd if at mp mode for 871x */
if (pattrib->ether_type == 0x8712) {
@@ -826,13 +827,16 @@ void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
unsigned long irqL;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct _adapter *padapter = pxmitpriv->adapter;
+ struct sk_buff *pndis_pkt = NULL;
if (pxmitframe == NULL)
return;
- if (pxmitframe->pkt)
- r8712_xmit_complete(padapter, pxmitframe);
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
list_delete(&pxmitframe->list);
+ if (pxmitframe->pkt) {
+ pndis_pkt = pxmitframe->pkt;
+ pxmitframe->pkt = NULL;
+ }
list_insert_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue));
pxmitpriv->free_xmitframe_cnt++;
spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL);
@@ -1011,6 +1015,19 @@ static void init_hwxmits(struct hw_xmit *phwxmit, sint entry)
}
}
+void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
+ struct xmit_buf *pxmitbuf)
+{
+ /* pxmitbuf attach to pxmitframe */
+ pxmitframe->pxmitbuf = pxmitbuf;
+ /* urb and irp connection */
+ pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0];
+ /* buffer addr assoc */
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ /* pxmitframe attach to pxmitbuf */
+ pxmitbuf->priv_data = pxmitframe;
+}
+
/*
* tx_action == 0 == no frames to transmit
* tx_action > 0 ==> we have frames to transmit
@@ -1042,9 +1059,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe)
} else { /*dump packet directly*/
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
ret = true;
- pxmitframe->pxmitbuf = pxmitbuf;
- pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0];
- pxmitframe->buf_addr = pxmitbuf->pbuf;
+ xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf);
r8712_xmit_direct(padapter, pxmitframe);
}
return ret;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index df13e67b3eb..a034c0fec71 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _RTL871X_XMIT_H_
#define _RTL871X_XMIT_H_
@@ -5,8 +30,19 @@
#include "drv_types.h"
#include "xmit_osdep.h"
-#define MAX_XMITBUF_SZ (2048)
-#define NR_XMITBUFF (4)
+#ifdef CONFIG_R8712_TX_AGGR
+#define MAX_XMITBUF_SZ (16384)
+#else
+#define MAX_XMITBUF_SZ (2048)
+#endif
+
+#define NR_XMITBUFF (4)
+
+#ifdef CONFIG_R8712_TX_AGGR
+#define AGGR_NR_HIGH_BOUND (4) /*(8) */
+#define AGGR_NR_LOW_BOUND (2)
+#endif
+
#define XMITBUF_ALIGN_SZ 512
#define TX_GUARD_BAND 5
#define MAX_NUMBLKS (1)
@@ -68,9 +104,9 @@ struct pkt_attrib {
u16 seqnum;
u16 ether_type;
- u32 pktlen; /* the original 802.3 pkt raw_data len
+ u16 pktlen; /* the original 802.3 pkt raw_data len
* (not include ether_hdr data) */
- u32 last_txcmdsz;
+ u16 last_txcmdsz;
u8 pkt_hdrlen; /*the original 802.3 pkt header len*/
u8 hdrlen; /*the WLAN Header Len*/
@@ -110,7 +146,9 @@ struct xmit_buf {
u8 *pallocated_buf;
u8 *pbuf;
+ void *priv_data;
struct urb *pxmit_urb[8];
+ u32 aggr_nr;
};
struct xmit_frame {
@@ -198,6 +236,9 @@ struct xmit_priv {
struct semaphore tx_retevt;/*all tx return event;*/
u8 txirp_cnt;
struct tasklet_struct xmit_tasklet;
+ _workitem xmit_pipe4_reset_wi;
+ _workitem xmit_pipe6_reset_wi;
+ _workitem xmit_piped_reset_wi;
/*per AC pending irp*/
int beq_cnt;
int bkq_cnt;
@@ -255,6 +296,9 @@ int r8712_xmit_enqueue(struct _adapter *padapter,
int r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
void r8712_xmit_bh(void *priv);
+void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
+ struct xmit_buf *pxmitbuf);
+
#include "rtl8712_xmit.h"
#endif /*_RTL871X_XMIT_H_*/
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 79ad1593214..48d6a14c8f5 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __STA_INFO_H_
#define __STA_INFO_H_
@@ -38,8 +63,8 @@ struct sta_info {
struct sta_recv_priv sta_recvpriv;
uint state;
uint aid;
- u8 mac_id;
- u8 qos_option;
+ uint mac_id;
+ uint qos_option;
u8 hwaddr[ETH_ALEN];
uint ieee8021x_blocked; /*0: allowed, 1:blocked */
uint XPrivacy; /*aes, tkip...*/
diff --git a/drivers/staging/rtl8712/swab.h b/drivers/staging/rtl8712/swab.h
index 44709a91baf..f12781829c1 100644
--- a/drivers/staging/rtl8712/swab.h
+++ b/drivers/staging/rtl8712/swab.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _LINUX_BYTEORDER_SWAB_H
#define _LINUX_BYTEORDER_SWAB_H
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 21ce2af447b..fb2e89c3056 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -28,6 +28,9 @@
#define _HCI_INTF_C_
+#include <linux/usb.h>
+#include <linux/module.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
@@ -366,23 +369,25 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
struct _adapter *padapter = NULL;
struct dvobj_priv *pdvobjpriv;
struct net_device *pnetdev;
+ struct usb_device *udev;
printk(KERN_INFO "r8712u: DriverVersion: %s\n", DRVER);
/* In this probe function, O.S. will provide the usb interface pointer
* to driver. We have to increase the reference count of the usb device
* structure by using the usb_get_dev function.
*/
- usb_get_dev(interface_to_usbdev(pusb_intf));
+ udev = interface_to_usbdev(pusb_intf);
+ usb_get_dev(udev);
pintf = pusb_intf;
/* step 1. */
pnetdev = r8712_init_netdev();
if (!pnetdev)
goto error;
- padapter = (struct _adapter *)_netdev_priv(pnetdev);
+ padapter = netdev_priv(pnetdev);
disable_ht_for_spec_devid(pdid, padapter);
pdvobjpriv = &padapter->dvobjpriv;
pdvobjpriv->padapter = padapter;
- padapter->dvobjpriv.pusbdev = interface_to_usbdev(pusb_intf);
+ padapter->dvobjpriv.pusbdev = udev;
usb_set_intfdata(pusb_intf, pnetdev);
SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
/* step 2. */
@@ -592,14 +597,15 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 6. Tell the network stack we exist */
if (register_netdev(pnetdev) != 0)
goto error;
+ spin_lock_init(&padapter->lockRxFF0Filter);
return 0;
error:
- usb_put_dev(interface_to_usbdev(pusb_intf));
+ usb_put_dev(udev);
usb_set_intfdata(pusb_intf, NULL);
if (padapter->dvobj_deinit != NULL)
padapter->dvobj_deinit(padapter);
if (pnetdev)
- os_free_netdev(pnetdev);
+ free_netdev(pnetdev);
return -ENODEV;
}
@@ -611,6 +617,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
struct _adapter *padapter = netdev_priv(pnetdev);
struct usb_device *udev = interface_to_usbdev(pusb_intf);
+ usb_set_intfdata(pusb_intf, NULL);
if (padapter) {
if (drvpriv.drv_registered == true)
padapter->bSurpriseRemoved = true;
@@ -620,6 +627,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
}
flush_scheduled_work();
udelay(1);
+ /*Stop driver mlme relation timer */
+ r8712_stop_drv_timers(padapter);
r871x_dev_unload(padapter);
r8712_free_drv_sw(padapter);
}
diff --git a/drivers/staging/rtl8712/usb_ops.h b/drivers/staging/rtl8712/usb_ops.h
index dc0611a2a0d..78e775a4636 100644
--- a/drivers/staging/rtl8712/usb_ops.h
+++ b/drivers/staging/rtl8712/usb_ops.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __USB_OPS_H_
#define __USB_OPS_H_
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 7933ea4f632..24e1ec5f006 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -28,6 +28,8 @@
#define _HCI_OPS_OS_C_
+#include <linux/usb.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "osdep_intf.h"
@@ -48,7 +50,7 @@ struct zero_bulkout_context {
uint r8712_usb_init_intf_priv(struct intf_priv *pintfpriv)
{
- pintfpriv->piorw_urb = _usb_alloc_urb(0, GFP_ATOMIC);
+ pintfpriv->piorw_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!pintfpriv->piorw_urb)
return _FAIL;
sema_init(&(pintfpriv->io_retevt), 0);
@@ -187,7 +189,7 @@ void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
usb_fill_bulk_urb(piorw_urb, pusbd, pipe,
wmem, cnt, usb_write_mem_complete,
pio_queue);
- status = _usb_submit_urb(piorw_urb, GFP_ATOMIC);
+ status = usb_submit_urb(piorw_urb, GFP_ATOMIC);
_down_sema(&pintfpriv->io_retevt);
}
@@ -305,7 +307,7 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
precvbuf->pbuf, MAX_RECVBUF_SZ,
r8712_usb_read_port_complete,
precvbuf);
- err = _usb_submit_urb(purb, GFP_ATOMIC);
+ err = usb_submit_urb(purb, GFP_ATOMIC);
if ((err) && (err != (-EPERM)))
ret = _FAIL;
} else
@@ -332,17 +334,16 @@ void r8712_xmit_bh(void *priv)
struct _adapter *padapter = (struct _adapter *)priv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- while (1) {
- if ((padapter->bDriverStopped == true) ||
- (padapter->bSurpriseRemoved == true)) {
- printk(KERN_ERR "r8712u: xmit_bh => bDriverStopped"
- " or bSurpriseRemoved\n");
- break;
- }
- ret = r8712_xmitframe_complete(padapter, pxmitpriv, NULL);
- if (ret == false)
- break;
+ if ((padapter->bDriverStopped == true) ||
+ (padapter->bSurpriseRemoved == true)) {
+ printk(KERN_ERR "r8712u: xmit_bh => bDriverStopped"
+ " or bSurpriseRemoved\n");
+ return;
}
+ ret = r8712_xmitframe_complete(padapter, pxmitpriv, NULL);
+ if (ret == false)
+ return;
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
}
static void usb_write_port_complete(struct urb *purb)
@@ -462,7 +463,7 @@ u32 r8712_usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
pxmitframe->mem_addr,
cnt, usb_write_port_complete,
pxmitframe); /* context is xmit_frame */
- status = _usb_submit_urb(purb, GFP_ATOMIC);
+ status = usb_submit_urb(purb, GFP_ATOMIC);
if (!status)
ret = _SUCCESS;
else
diff --git a/drivers/staging/rtl8712/usb_osintf.h b/drivers/staging/rtl8712/usb_osintf.h
index 0da6c1db048..d95797aac37 100644
--- a/drivers/staging/rtl8712/usb_osintf.h
+++ b/drivers/staging/rtl8712/usb_osintf.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __USB_OSINTF_H
#define __USB_OSINTF_H
@@ -17,6 +42,7 @@ uint rtl8712_hal_deinit(struct _adapter *padapter);
void rtl871x_intf_stop(struct _adapter *padapter);
void r871x_dev_unload(struct _adapter *padapter);
void r8712_stop_drv_threads(struct _adapter *padapter);
+void r8712_stop_drv_timers(struct _adapter *padapter);
u8 r8712_init_drv_sw(struct _adapter *padapter);
u8 r8712_free_drv_sw(struct _adapter *padapter);
struct net_device *r8712_init_netdev(void);
diff --git a/drivers/staging/rtl8712/usb_vendor_req.h b/drivers/staging/rtl8712/usb_vendor_req.h
index d35c538c47a..82335a83d0d 100644
--- a/drivers/staging/rtl8712/usb_vendor_req.h
+++ b/drivers/staging/rtl8712/usb_vendor_req.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _USB_VENDOR_REQUEST_H_
#define _USB_VENDOR_REQUEST_H_
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 427ab7e2705..277398cff0a 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef _WIFI_H_
#define _WIFI_H_
diff --git a/drivers/staging/rtl8712/wlan_bssdef.h b/drivers/staging/rtl8712/wlan_bssdef.h
index 0d90e1f5b29..2ea8a3d6b70 100644
--- a/drivers/staging/rtl8712/wlan_bssdef.h
+++ b/drivers/staging/rtl8712/wlan_bssdef.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __WLAN_BSSDEF_H__
#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 7dea8b5b7e7..c9703627c8f 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -28,6 +28,8 @@
#define _XMIT_OSDEP_C_
+#include <linux/usb.h>
+
#include "osdep_service.h"
#include "drv_types.h"
@@ -42,7 +44,6 @@
static uint remainder_len(struct pkt_file *pfile)
{
- /* Kovich: Need to extend the buf_len to 64 bit ?(unsigned long long) */
return (uint)(pfile->buf_len - ((addr_t)(pfile->cur_addr) -
(addr_t)(pfile->buf_start)));
}
@@ -107,13 +108,33 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
pattrib->subtype = WIFI_QOS_DATA_TYPE;
}
+void r8712_SetFilter(struct work_struct *work)
+{
+ struct _adapter *padapter = container_of(work, struct _adapter,
+ wkFilterRxFF0);
+ u8 oldvalue = 0x00, newvalue = 0x00;
+ unsigned long irqL;
+
+ oldvalue = r8712_read8(padapter, 0x117);
+ newvalue = oldvalue & 0xfe;
+ r8712_write8(padapter, 0x117, newvalue);
+
+ spin_lock_irqsave(&padapter->lockRxFF0Filter, irqL);
+ padapter->blnEnableRxFF0Filter = 1;
+ spin_unlock_irqrestore(&padapter->lockRxFF0Filter, irqL);
+ do {
+ msleep(100);
+ } while (padapter->blnEnableRxFF0Filter == 1);
+ r8712_write8(padapter, 0x117, oldvalue);
+}
+
int r8712_xmit_resource_alloc(struct _adapter *padapter,
struct xmit_buf *pxmitbuf)
{
int i;
for (i = 0; i < 8; i++) {
- pxmitbuf->pxmit_urb[i] = _usb_alloc_urb(0, GFP_KERNEL);
+ pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
if (pxmitbuf->pxmit_urb[i] == NULL) {
printk(KERN_ERR "r8712u: pxmitbuf->pxmit_urb[i]"
" == NULL");
@@ -146,7 +167,7 @@ void r8712_xmit_complete(struct _adapter *padapter, struct xmit_frame *pxframe)
int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev)
{
struct xmit_frame *pxmitframe = NULL;
- struct _adapter *padapter = (struct _adapter *)_netdev_priv(pnetdev);
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
int ret = 0;
diff --git a/drivers/staging/rtl8712/xmit_osdep.h b/drivers/staging/rtl8712/xmit_osdep.h
index ca439378953..8eba7ca0dde 100644
--- a/drivers/staging/rtl8712/xmit_osdep.h
+++ b/drivers/staging/rtl8712/xmit_osdep.h
@@ -1,3 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Modifications for inclusion into the Linux staging tree are
+ * Copyright(c) 2010 Larry Finger. All rights reserved.
+ *
+ * Contact information:
+ * WLAN FAE <wlanfae@realtek.com>
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
#ifndef __XMIT_OSDEP_H_
#define __XMIT_OSDEP_H_
@@ -22,6 +47,7 @@ struct xmit_frame;
struct xmit_buf;
int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev);
+void r8712_SetFilter(struct work_struct *work);
int r8712_xmit_resource_alloc(struct _adapter *padapter,
struct xmit_buf *pxmitbuf);
void r8712_xmit_resource_free(struct _adapter *padapter,
diff --git a/drivers/staging/rts5139/Kconfig b/drivers/staging/rts5139/Kconfig
new file mode 100644
index 00000000000..afd526bf422
--- /dev/null
+++ b/drivers/staging/rts5139/Kconfig
@@ -0,0 +1,16 @@
+config RTS5139
+ tristate "Realtek RTS5139 USB card reader support"
+ depends on USB && SCSI
+ help
+ Say Y here to include driver code to support the Realtek
+ RTS5139 USB card readers.
+
+ If this driver is compiled as a module, it will be named rts5139.
+
+config RTS5139_DEBUG
+ bool "Realtek RTS5139 Card Reader verbose debug"
+ depends on RTS5139
+ help
+ Say Y here in order to have the rts5139 code generate
+ verbose debugging messages.
+
diff --git a/drivers/staging/rts5139/Makefile b/drivers/staging/rts5139/Makefile
new file mode 100644
index 00000000000..82b8958e8d3
--- /dev/null
+++ b/drivers/staging/rts5139/Makefile
@@ -0,0 +1,37 @@
+# Driver for Realtek RTS51xx USB card reader
+#
+# Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# wwang (wei_wang@realsil.com.cn)
+# No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+# Maintainer:
+# Edwin Rong (edwin_rong@realsil.com.cn)
+# No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+#
+# Makefile for the RTS51xx USB Card Reader drivers.
+#
+
+TARGET_MODULE := rts5139
+
+EXTRA_CFLAGS := -Idrivers/scsi -I$(PWD)
+
+obj-m += $(TARGET_MODULE).o
+
+common-obj := rts51x_transport.o rts51x_scsi.o rts51x_fop.o
+
+$(TARGET_MODULE)-objs := $(common-obj) rts51x.o rts51x_chip.o rts51x_card.o \
+ xd.o sd.o ms.o sd_cprm.o ms_mg.o
diff --git a/drivers/staging/rts5139/TODO b/drivers/staging/rts5139/TODO
new file mode 100644
index 00000000000..4bde726ea5f
--- /dev/null
+++ b/drivers/staging/rts5139/TODO
@@ -0,0 +1,5 @@
+TODO:
+- support more USB card reader of Realtek family
+- use kernel coding style
+- checkpatch.pl fixes
+
diff --git a/drivers/staging/rts5139/debug.h b/drivers/staging/rts5139/debug.h
new file mode 100644
index 00000000000..73dec133a1b
--- /dev/null
+++ b/drivers/staging/rts5139/debug.h
@@ -0,0 +1,46 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_DEBUG_H
+#define __RTS51X_DEBUG_H
+
+#include <linux/kernel.h>
+
+#define RTS51X_TIP "rts51x: "
+
+#ifdef CONFIG_RTS5139_DEBUG
+#define RTS51X_DEBUGP(x...) printk(KERN_DEBUG RTS51X_TIP x)
+#define RTS51X_DEBUGPN(x...) printk(KERN_DEBUG x)
+#define RTS51X_DEBUGPX(x...) printk(x)
+#define RTS51X_DEBUG(x) x
+#else
+#define RTS51X_DEBUGP(x...)
+#define RTS51X_DEBUGPN(x...)
+#define RTS51X_DEBUGPX(x...)
+#define RTS51X_DEBUG(x)
+#endif
+
+#endif /* __RTS51X_DEBUG_H */
diff --git a/drivers/staging/rts5139/ms.c b/drivers/staging/rts5139/ms.c
new file mode 100644
index 00000000000..b0e9071c8e5
--- /dev/null
+++ b/drivers/staging/rts5139/ms.c
@@ -0,0 +1,4191 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "rts51x.h"
+#include "rts51x_transport.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "ms.h"
+
+static inline void ms_set_err_code(struct rts51x_chip *chip, u8 err_code)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ ms_card->err_code = err_code;
+}
+
+static inline int ms_check_err_code(struct rts51x_chip *chip, u8 err_code)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ return (ms_card->err_code == err_code);
+}
+
+static int ms_parse_err_code(struct rts51x_chip *chip)
+{
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_transfer_tpc(struct rts51x_chip *chip, u8 trans_mode, u8 tpc,
+ u8 cnt, u8 cfg)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTS51X_DEBUGP("ms_transfer_tpc: tpc = 0x%x\n", tpc);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | trans_mode);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 2, 5000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_ms_error(chip);
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ if (!(tpc & 0x08)) { /* Read Packet */
+ /* Check CRC16 & Ready Timeout */
+ if (chip->rsp_buf[1] & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else { /* Write Packet */
+ if (CHK_MSPRO(ms_card) && !(chip->rsp_buf[1] & 0x80)) {
+ if (chip->rsp_buf[1] & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ /* Check Timeout of Ready Signal */
+ if (chip->rsp_buf[1] & MS_RDY_TIMEOUT) {
+ rts51x_clear_ms_error(chip);
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int ms_transfer_data(struct rts51x_chip *chip, u8 trans_mode, u8 tpc,
+ u16 sec_cnt, u8 cfg, int mode_2k, int use_sg, void *buf,
+ int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val, err_code = 0, flag = 0;
+ enum dma_data_direction dir;
+ unsigned int pipe;
+
+ if (!buf || !buf_len)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (trans_mode == MS_TM_AUTO_READ) {
+ pipe = RCV_BULK_PIPE(chip);
+ dir = DMA_FROM_DEVICE;
+ flag = MODE_CDIR;
+ err_code = MS_FLASH_READ_ERROR;
+ } else if (trans_mode == MS_TM_AUTO_WRITE) {
+ pipe = SND_BULK_PIPE(chip);
+ dir = DMA_TO_DEVICE;
+ flag = MODE_CDOR;
+ err_code = MS_FLASH_WRITE_ERROR;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF,
+ (u8) (sec_cnt >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF,
+ (u8) sec_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ RING_BUFFER);
+
+ if (mode_2k)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE,
+ MS_2K_SECTOR_MODE);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE,
+ 0);
+
+ trans_dma_enable(dir, chip, sec_cnt * 512, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | trans_mode);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, flag | STAGE_MS_STATUS, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_rcc(chip, pipe, buf, buf_len, use_sg, NULL,
+ 15000, flag);
+ if (retval != STATUS_SUCCESS) {
+ ms_set_err_code(chip, err_code);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ retval = rts51x_get_rsp(chip, 3, 15000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ ms_set_err_code(chip, err_code);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->last_rw_int = val = chip->rsp_buf[1];
+ if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
+ int data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ if (!data || (data_len < cnt))
+ TRACE_RET(chip, STATUS_ERROR);
+
+ rts51x_init_cmd(chip);
+
+ for (i = 0; i < cnt; i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF,
+ data[i]);
+ }
+ if (cnt % 2)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i,
+ 0xFF, 0xFF);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_WRITE_BYTES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, 5000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ u8 val = 0;
+
+ rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val);
+ RTS51X_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val);
+
+ rts51x_clear_ms_error(chip);
+
+ if (!(tpc & 0x08)) { /* Read Packet */
+ /* Check CRC16 & Ready Timeout */
+ if (val & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else { /* Write Packet */
+ if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip,
+ ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ /* Check Timeout of Ready Signal */
+ if (val & MS_RDY_TIMEOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
+ int data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ if (!data)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_READ_BYTES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ for (i = 0; i < data_len - 1; i++)
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+
+ if (data_len % 2)
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0,
+ 0);
+ else
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1,
+ 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, data_len + 1, 5000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ u8 val = 0;
+
+ rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val);
+ RTS51X_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val);
+
+ rts51x_clear_ms_error(chip);
+
+ if (!(tpc & 0x08)) { /* Read Packet */
+ /* Check CRC16 & Ready Timeout */
+ if (val & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else { /* Write Packet */
+ if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip,
+ ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ /* Check Timeout of Ready Signal */
+ if (val & MS_RDY_TIMEOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ rts51x_read_rsp_buf(chip, 1, data, data_len);
+
+ return STATUS_SUCCESS;
+}
+
+int ms_set_rw_reg_addr(struct rts51x_chip *chip,
+ u8 read_start, u8 read_cnt, u8 write_start, u8 write_cnt)
+{
+ int retval, i;
+ u8 data[4];
+
+ data[0] = read_start;
+ data[1] = read_cnt;
+ data[2] = write_start;
+ data[3] = write_cnt;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, SET_RW_REG_ADRS, 4, NO_WAIT_INT, data,
+ 4);
+ if (retval == STATUS_SUCCESS)
+ return STATUS_SUCCESS;
+ rts51x_clear_ms_error(chip);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_send_cmd(struct rts51x_chip *chip, u8 cmd, u8 cfg)
+{
+ u8 data[2];
+
+ data[0] = cmd;
+ data[1] = 0;
+
+ return ms_write_bytes(chip, PRO_SET_CMD, 1, cfg, data, 1);
+}
+
+static int ms_set_cmd(struct rts51x_chip *chip,
+ u8 read_start, u8 read_count,
+ u8 write_start, u8 write_count,
+ u8 cmd, u8 cfg, u8 *data, int data_len, u8 *int_stat)
+{
+ int retval, i;
+ u8 val;
+
+ if (!data || (data_len <= 0) || (data_len > 128)) {
+ RTS51X_DEBUGP("ms_set_cmd (data_len = %d)\n", data_len);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval =
+ ms_set_rw_reg_addr(chip, read_start, read_count, write_start,
+ write_count);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, WRITE_REG, write_count, NO_WAIT_INT,
+ data, data_len);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, cmd, WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ /* GET_INT Register */
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (int_stat)
+ *int_stat = val;
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef MS_SPEEDUP
+static int ms_auto_set_cmd(struct rts51x_chip *chip,
+ u8 read_start, u8 read_count,
+ u8 write_start, u8 write_count,
+ u8 cmd, u8 cfg, u8 *data, int data_len,
+ u8 *int_stat)
+{
+ int retval;
+ int i;
+
+ if (!data || (data_len <= 0) || (data_len > 128)) {
+ RTS51X_DEBUGP("ms_auto_set_cmd (data_len = %d)\n", data_len);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_READ_START, 0xFF, read_start);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_READ_COUNT, 0xFF, read_count);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_WRITE_START, 0xFF, write_start);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_WRITE_COUNT, 0xFF, write_count);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_COMMAND, 0xFF, cmd);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ for (i = 0; i < data_len; i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF,
+ data[i]);
+ }
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_SET_CMD);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR | STAGE_MS_STATUS, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 3, 5000);
+
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (int_stat)
+ *int_stat = chip->rsp_buf[2];
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_set_init_para(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (CHK_HG8BIT(ms_card)) {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->option.asic_ms_hg_clk;
+ else
+ ms_card->ms_clock = chip->option.fpga_ms_hg_clk;
+ } else if (CHK_MSPRO(ms_card) || CHK_MS4BIT(ms_card)) {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->option.asic_ms_4bit_clk;
+ else
+ ms_card->ms_clock = chip->option.fpga_ms_4bit_clk;
+ } else {
+ if (chip->asic_code)
+ ms_card->ms_clock = 38;
+ else
+ ms_card->ms_clock = CLK_40;
+ }
+
+ retval = switch_clock(chip, ms_card->ms_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int ms_switch_clock(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ retval = rts51x_select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = switch_clock(chip, ms_card->ms_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static void ms_pull_ctl_disable(struct rts51x_chip *chip)
+{
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+ }
+}
+
+static void ms_pull_ctl_enable(struct rts51x_chip *chip)
+{
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+ }
+}
+
+static int ms_prepare_reset(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ ms_card->ms_type = 0;
+ ms_card->check_ms_flow = 0;
+ ms_card->switch_8bit_fail = 0;
+ ms_card->delay_write.delay_write_flag = 0;
+
+ ms_card->pro_under_formatting = 0;
+
+ rts51x_init_cmd(chip);
+
+ if (chip->asic_code) {
+ ms_pull_ctl_enable(chip);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL,
+ FPGA_MS_PULL_CTL_BIT | 0x20, 0);
+ }
+ /* Tri-state MS output */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
+
+ if (!chip->option.FT2_fast_mode) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!chip->option.FT2_fast_mode) {
+ wait_timeout(250);
+
+ card_power_on(chip, MS_CARD);
+ wait_timeout(150);
+
+#ifdef SUPPORT_OCP
+ rts51x_get_card_status(chip, &(chip->card_status));
+ /* get OCP status */
+ chip->ocp_stat = (chip->card_status >> 4) & 0x03;
+
+ if (chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ RTS51X_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ rts51x_init_cmd(chip);
+
+ /* Enable MS Output */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN,
+ MS_OUTPUT_EN);
+
+ /* Reset Registers */
+ if (chip->asic_code)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, 0xFF,
+ SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, 0xFF,
+ SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF,
+ NO_WAIT_INT | NO_AUTO_READ_INT_REG);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return ms_set_init_para(chip);
+}
+
+static int ms_identify_media_type(struct rts51x_chip *chip, int switch_8bit_bus)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val;
+
+ retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ /* Get Register form MS-PRO card */
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG, 6,
+ NO_WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTS51X_READ_REG(chip, PPBUF_BASE2 + 2, &val);
+ RTS51X_DEBUGP("Type register: 0x%x\n", val);
+ if (val != 0x01) {
+ if (val != 0x02)
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* Category Register */
+ RTS51X_READ_REG(chip, PPBUF_BASE2 + 4, &val);
+ RTS51X_DEBUGP("Category register: 0x%x\n", val);
+ if (val != 0) {
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* Class Register */
+ RTS51X_READ_REG(chip, PPBUF_BASE2 + 5, &val);
+ RTS51X_DEBUGP("Class register: 0x%x\n", val);
+ if (val == 0) {
+ RTS51X_READ_REG(chip, PPBUF_BASE2, &val);
+ if (val & WRT_PRTCT)
+ chip->card_wp |= MS_CARD;
+ else
+ chip->card_wp &= ~MS_CARD;
+ } else if ((val == 0x01) || (val == 0x02) || (val == 0x03)) {
+ chip->card_wp |= MS_CARD;
+ } else {
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->ms_type |= TYPE_MSPRO;
+
+ /* Check MSPro-HG Card, use IF Mode Register to distinguish */
+ RTS51X_READ_REG(chip, PPBUF_BASE2 + 3, &val);
+ RTS51X_DEBUGP("IF Mode register: 0x%x\n", val);
+ if (val == 0) {
+ ms_card->ms_type &= 0x0F;
+ } else if (val == 7) {
+ if (switch_8bit_bus)
+ ms_card->ms_type |= MS_HG;
+ else
+ ms_card->ms_type &= 0x0F;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ /* end Procedure to identify Media Type */
+ return STATUS_SUCCESS;
+}
+
+static int ms_confirm_cpu_startup(struct rts51x_chip *chip)
+{
+ int retval, i, k;
+ u8 val;
+
+ /* Confirm CPU StartUp */
+ k = 0;
+ do {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val,
+ 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (k > 100)
+ TRACE_RET(chip, STATUS_FAIL);
+ k++;
+ wait_timeout(100);
+ } while (!(val & INT_REG_CED));
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_ERR) {
+ if (val & INT_REG_CMDNK) { /* CMDNK = 1 */
+ chip->card_wp |= (MS_CARD);
+ } else { /* CMDNK = 0 */
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ /*-- end confirm CPU startup */
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_parallel_bus(struct rts51x_chip *chip)
+{
+ int retval, i;
+ u8 data[2];
+
+ data[0] = PARALLEL_4BIT_IF;
+ data[1] = 0;
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT, data, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_8bit_bus(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 data[2];
+
+ data[0] = PARALLEL_8BIT_IF;
+ data[1] = 0;
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT, data, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTS51X_WRITE_REG(chip, MS_CFG, 0x98,
+ MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING);
+ ms_card->ms_type |= MS_8BIT;
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pro_reset_flow(struct rts51x_chip *chip, int switch_8bit_bus)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ for (i = 0; i < 3; i++) {
+ retval = ms_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_identify_media_type(chip, switch_8bit_bus);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_confirm_cpu_startup(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_switch_parallel_bus(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_WRITE_REG(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4);
+
+ RTS51X_WRITE_REG(chip, MS_CFG, PUSH_TIME_ODD, PUSH_TIME_ODD);
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (CHK_MSHG(ms_card) && switch_8bit_bus) {
+ retval = ms_switch_8bit_bus(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->switch_8bit_fail = 1;
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef XC_POWERCLASS
+static int msxc_change_power(struct rts51x_chip *chip, u8 mode)
+{
+ int retval;
+ u8 buf[6];
+
+ ms_cleanup_work(chip);
+
+ /* Set Parameter Register */
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ buf[0] = 0;
+ buf[1] = mode;
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, 6, NO_WAIT_INT, buf, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_READ_REG(chip, MS_TRANS_CFG, buf);
+ if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_read_attribute_info(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, *buf, class_code, device_type, sub_class, data[16];
+ u16 total_blk = 0, blk_size = 0;
+#ifdef SUPPORT_MSXC
+ u32 xc_total_blk = 0, xc_blk_size = 0;
+#endif
+ u32 sys_info_addr = 0, sys_info_size;
+#ifdef SUPPORT_PCGL_1P18
+ u32 model_name_addr = 0, model_name_size;
+ int found_sys_info = 0, found_model_name = 0;
+#endif
+
+ retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (CHK_MS8BIT(ms_card))
+ data[0] = PARALLEL_8BIT_IF;
+ else
+ data[0] = PARALLEL_4BIT_IF;
+ data[1] = 0;
+
+ data[2] = 0x40;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = 0;
+ /* Start address 0 */
+ data[6] = 0;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, PRO_WRITE_REG, 7, NO_WAIT_INT, data,
+ 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ buf = kmalloc(64 * 512, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, PRO_READ_ATRB, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ retval = rts51x_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (!(val & MS_INT_BREQ)) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval =
+ ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 0x40, WAIT_INT, 0, 0, buf, 64 * 512);
+ if (retval == STATUS_SUCCESS)
+ break;
+ else
+ rts51x_clear_ms_error(chip);
+ }
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, retval);
+ }
+
+ i = 0;
+ do {
+ retval = rts51x_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, retval);
+ }
+
+ if ((val & MS_INT_CED) || !(val & MS_INT_BREQ))
+ break;
+
+ retval =
+ ms_transfer_tpc(chip, MS_TM_NORMAL_READ, PRO_READ_LONG_DATA,
+ 0, WAIT_INT);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, retval);
+ }
+
+ i++;
+ } while (i < 1024);
+
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, retval);
+ }
+
+ if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) {
+ /* Signature code is wrong */
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((buf[4] < 1) || (buf[4] > 12)) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < buf[4]; i++) {
+ int cur_addr_off = 16 + i * 12;
+
+#ifdef SUPPORT_MSXC
+ if ((buf[cur_addr_off + 8] == 0x10)
+ || (buf[cur_addr_off + 8] == 0x13)) {
+#else
+ if (buf[cur_addr_off + 8] == 0x10) {
+#endif
+ sys_info_addr = ((u32) buf[cur_addr_off + 0] << 24) |
+ ((u32) buf[cur_addr_off + 1] << 16) |
+ ((u32) buf[cur_addr_off + 2] << 8) |
+ buf[cur_addr_off + 3];
+ sys_info_size =
+ ((u32) buf[cur_addr_off + 4] << 24) |
+ ((u32) buf[cur_addr_off + 5] << 16) |
+ ((u32) buf[cur_addr_off + 6] << 8) |
+ buf[cur_addr_off + 7];
+ RTS51X_DEBUGP("sys_info_addr = 0x%x,"
+ "sys_info_size = 0x%x\n",
+ sys_info_addr, sys_info_size);
+ if (sys_info_size != 96) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (sys_info_addr < 0x1A0) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if ((sys_info_size + sys_info_addr) > 0x8000) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_MSXC
+ if (buf[cur_addr_off + 8] == 0x13)
+ ms_card->ms_type |= MS_XC;
+#endif
+#ifdef SUPPORT_PCGL_1P18
+ found_sys_info = 1;
+#else
+ break;
+#endif
+ }
+#ifdef SUPPORT_PCGL_1P18
+ if (buf[cur_addr_off + 8] == 0x15) {
+ model_name_addr = ((u32) buf[cur_addr_off + 0] << 24) |
+ ((u32) buf[cur_addr_off + 1] << 16) |
+ ((u32) buf[cur_addr_off + 2] << 8) |
+ buf[cur_addr_off + 3];
+ model_name_size =
+ ((u32) buf[cur_addr_off + 4] << 24) |
+ ((u32) buf[cur_addr_off + 5] << 16) |
+ ((u32) buf[cur_addr_off + 6] << 8) |
+ buf[cur_addr_off + 7];
+ RTS51X_DEBUGP("model_name_addr = 0x%x,"
+ "model_name_size = 0x%x\n",
+ model_name_addr, model_name_size);
+ if (model_name_size != 48) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (model_name_addr < 0x1A0) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if ((model_name_size + model_name_addr) > 0x8000) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ found_model_name = 1;
+ }
+
+ if (found_sys_info && found_model_name)
+ break;
+#endif
+ }
+
+ if (i == buf[4]) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ class_code = buf[sys_info_addr + 0];
+ device_type = buf[sys_info_addr + 56];
+ sub_class = buf[sys_info_addr + 46];
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ xc_total_blk = ((u32) buf[sys_info_addr + 6] << 24) |
+ ((u32) buf[sys_info_addr + 7] << 16) |
+ ((u32) buf[sys_info_addr + 8] << 8) |
+ buf[sys_info_addr + 9];
+ xc_blk_size = ((u32) buf[sys_info_addr + 32] << 24) |
+ ((u32) buf[sys_info_addr + 33] << 16) |
+ ((u32) buf[sys_info_addr + 34] << 8) |
+ buf[sys_info_addr + 35];
+ RTS51X_DEBUGP("xc_total_blk = 0x%x, xc_blk_size = 0x%x\n",
+ xc_total_blk, xc_blk_size);
+ } else {
+ total_blk =
+ ((u16) buf[sys_info_addr + 6] << 8) | buf[sys_info_addr +
+ 7];
+ blk_size =
+ ((u16) buf[sys_info_addr + 2] << 8) | buf[sys_info_addr +
+ 3];
+ RTS51X_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk,
+ blk_size);
+ }
+#else
+ total_blk =
+ ((u16) buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7];
+ blk_size = ((u16) buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3];
+ RTS51X_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk,
+ blk_size);
+#endif
+
+ RTS51X_DEBUGP("class_code = 0x%x, device_type = 0x%x,"
+ "sub_class = 0x%x\n",
+ class_code, device_type, sub_class);
+
+ memcpy(ms_card->raw_sys_info, buf + sys_info_addr, 96);
+#ifdef SUPPORT_PCGL_1P18
+ memcpy(ms_card->raw_model_name, buf + model_name_addr, 48);
+#endif
+
+ kfree(buf);
+
+ /* Confirm System Information */
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ if (class_code != 0x03)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ if (class_code != 0x02)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#else
+ if (class_code != 0x02)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ if (device_type != 0x00) {
+ if ((device_type == 0x01) || (device_type == 0x02)
+ || (device_type == 0x03))
+ chip->card_wp |= MS_CARD;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (sub_class & 0xC0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTS51X_DEBUGP("class_code: 0x%x, device_type: 0x%x, sub_class: 0x%x\n",
+ class_code, device_type, sub_class);
+
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity =
+ xc_total_blk * xc_blk_size;
+ } else {
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity =
+ total_blk * blk_size;
+ }
+#else
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity =
+ total_blk * blk_size;
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_MAGIC_GATE
+int mg_set_tpc_para_sub(struct rts51x_chip *chip, int type, u8 mg_entry_num);
+#endif
+
+static int reset_ms_pro(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+#ifdef XC_POWERCLASS
+ u8 change_power_class = 2;
+#endif
+
+#ifdef XC_POWERCLASS
+Retry:
+#endif
+ retval = ms_pro_reset_flow(chip, 1);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->switch_8bit_fail) {
+ retval = ms_pro_reset_flow(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ retval = ms_read_attribute_info(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef XC_POWERCLASS
+ if (CHK_HG8BIT(ms_card))
+ change_power_class = 0;
+
+ if (change_power_class && CHK_MSXC(ms_card)) {
+ u8 power_class_mode = (ms_card->raw_sys_info[46] & 0x18) >> 3;
+ RTS51X_DEBUGP("power_class_mode = 0x%x", power_class_mode);
+ if (change_power_class > power_class_mode)
+ change_power_class = power_class_mode;
+ if (change_power_class) {
+ retval = msxc_change_power(chip, change_power_class);
+ if (retval != STATUS_SUCCESS) {
+ change_power_class--;
+ goto Retry;
+ }
+ }
+ }
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ retval = mg_set_tpc_para_sub(chip, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#endif
+
+ if (CHK_HG8BIT(ms_card))
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 8;
+ else
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_status_reg(struct rts51x_chip *chip)
+{
+ int retval;
+ u8 val[2];
+
+ retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_check_boot_block(struct rts51x_chip *chip, u16 block_addr)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 extra[MS_EXTRA_SIZE], data[10], val = 0;
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (block_addr >> 8);
+ data[3] = (u8) block_addr;
+ /* Page Number
+ * Extra data access mode */
+ data[4] = 0x40;
+ data[5] = 0;
+
+ retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_READ, WAIT_INT, data, 6, &val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ retval =
+ ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, extra,
+ MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!(extra[0] & BLOCK_OK) || (extra[1] & NOT_BOOT_BLOCK))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_extra_data(struct rts51x_chip *chip,
+ u16 block_addr, u8 page_num, u8 *buf,
+ int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val = 0, data[10];
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (block_addr >> 8);
+ data[3] = (u8) block_addr;
+ /* Page Number
+ * Extra data access mode */
+ data[4] = 0x40;
+ data[5] = page_num;
+
+#ifdef MS_SPEEDUP
+ retval =
+ ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_READ, WAIT_INT, data, 6, &val);
+#else
+ retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_READ, WAIT_INT, data, 6, &val);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ retval =
+ ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, data,
+ MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (buf && buf_len) {
+ if (buf_len > MS_EXTRA_SIZE)
+ buf_len = MS_EXTRA_SIZE;
+ memcpy(buf, data, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_extra_data(struct rts51x_chip *chip,
+ u16 block_addr, u8 page_num, u8 *buf,
+ int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val = 0, data[16];
+
+ if (!buf || (buf_len < MS_EXTRA_SIZE))
+ TRACE_RET(chip, STATUS_FAIL);
+ /* Write REG */
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (block_addr >> 8);
+ data[3] = (u8) block_addr;
+ /* Page Number
+ * Extra data access mode */
+ data[4] = 0x40;
+ data[5] = page_num;
+
+ for (i = 6; i < MS_EXTRA_SIZE + 6; i++)
+ data[i] = buf[i - 6];
+
+#ifdef MS_SPEEDUP
+ retval =
+ ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ 6 + MS_EXTRA_SIZE, BLOCK_WRITE, WAIT_INT, data, 16,
+ &val);
+#else
+ retval =
+ ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ 6 + MS_EXTRA_SIZE, BLOCK_WRITE, WAIT_INT, data, 16,
+ &val);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_page(struct rts51x_chip *chip, u16 block_addr, u8 page_num)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val = 0, data[6];
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (block_addr >> 8);
+ data[3] = (u8) block_addr;
+ /* Page Number
+ * Single page access mode */
+ data[4] = 0x20;
+ data[5] = page_num;
+
+ retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_READ, WAIT_INT, data, 6, &val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ } else {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ retval =
+ ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, 0,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_set_bad_block(struct rts51x_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val = 0, data[8], extra[MS_EXTRA_SIZE];
+
+ retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (phy_blk >> 8);
+ data[3] = (u8) phy_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = extra[0] & 0x7F;
+ data[7] = 0xFF;
+
+#ifdef MS_SPEEDUP
+ retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7,
+ BLOCK_WRITE, WAIT_INT, data, 7, &val);
+#else
+ retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7,
+ BLOCK_WRITE, WAIT_INT, data, 7, &val);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_erase_block(struct rts51x_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i = 0;
+ u8 val = 0, data[6];
+
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (phy_blk >> 8);
+ data[3] = (u8) phy_blk;
+ data[4] = 0;
+ data[5] = 0;
+
+ERASE_RTY:
+#ifdef MS_SPEEDUP
+ retval =
+ ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_ERASE, WAIT_INT, data, 6, &val);
+#else
+ retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_ERASE, WAIT_INT, data, 6, &val);
+#endif
+
+ if (val & INT_REG_CMDNK) {
+ if (i < 3) {
+ i++;
+ goto ERASE_RTY;
+ }
+ ms_set_err_code(chip, MS_CMD_NK);
+ ms_set_bad_block(chip, phy_blk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
+{
+ if (!extra || (extra_len < MS_EXTRA_SIZE))
+ return;
+
+ memset(extra, 0xFF, MS_EXTRA_SIZE);
+
+ if (type == setPS_NG)
+ extra[0] = 0xB8;
+ else
+ extra[0] = 0x98;
+
+ extra[2] = (u8) (log_blk >> 8);
+ extra[3] = (u8) log_blk;
+}
+
+static int ms_init_page(struct rts51x_chip *chip, u16 phy_blk, u16 log_blk,
+ u8 start_page, u8 end_page)
+{
+ int retval;
+ u8 extra[MS_EXTRA_SIZE], i;
+
+ memset(extra, 0xff, MS_EXTRA_SIZE);
+
+ extra[0] = 0xf8; /* Block, page OK, data erased */
+ extra[1] = 0xff;
+ extra[2] = (u8) (log_blk >> 8);
+ extra[3] = (u8) log_blk;
+
+ for (i = start_page; i < end_page; i++) {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval =
+ ms_write_extra_data(chip, phy_blk, i, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page, u8 end_page)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, rty_cnt, uncorrect_flag = 0;
+ u8 extra[MS_EXTRA_SIZE], val, i, j, data[16];
+
+ RTS51X_DEBUGP("Copy page from 0x%x to 0x%x, logical block is 0x%x\n",
+ old_blk, new_blk, log_blk);
+ RTS51X_DEBUGP("start_page = %d, end_page = %d\n", start_page,
+ end_page);
+
+ retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_READ_REG(chip, PPBUF_BASE2, &val);
+
+ if (val & BUF_FULL) {
+ /* Clear Buffer */
+ retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* GET_INT Register */
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ for (i = start_page; i < end_page; i++) {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
+
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Write REG */
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (old_blk >> 8);
+ data[3] = (u8) old_blk;
+ data[4] = 0x20;
+ data[5] = i;
+
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ uncorrect_flag = 1;
+ RTS51X_DEBUGP("Uncorrectable"
+ "error\n");
+ } else {
+ uncorrect_flag = 0;
+ }
+
+ retval =
+ ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
+ READ_PAGE_DATA, 0, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (uncorrect_flag) {
+ ms_set_page_status(log_blk, setPS_NG,
+ extra, MS_EXTRA_SIZE);
+ if (i == 0)
+ extra[0] &= 0xEF;
+ ms_write_extra_data(chip, old_blk, i,
+ extra,
+ MS_EXTRA_SIZE);
+ RTS51X_DEBUGP("page %d :"
+ "extra[0] = 0x%x\n",
+ i, extra[0]);
+ MS_SET_BAD_BLOCK_FLG(ms_card);
+
+ ms_set_page_status(log_blk, setPS_Error,
+ extra, MS_EXTRA_SIZE);
+ ms_write_extra_data(chip, new_blk, i,
+ extra, MS_EXTRA_SIZE);
+ continue;
+ }
+
+ for (rty_cnt = 0; rty_cnt < MS_MAX_RETRY_COUNT;
+ rty_cnt++) {
+ retval =
+ ms_transfer_tpc(chip,
+ MS_TM_NORMAL_WRITE,
+ WRITE_PAGE_DATA, 0,
+ NO_WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (rty_cnt == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm,
+ (6 + MS_EXTRA_SIZE));
+
+ /* Write REG */
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (new_blk >> 8);
+ data[3] = (u8) new_blk;
+ data[4] = 0x20;
+ data[5] = i;
+
+ /* for MS check procedure */
+ if ((extra[0] & 0x60) != 0x60)
+ data[6] = extra[0];
+ else
+ data[6] = 0xF8;
+
+ data[6 + 1] = 0xFF;
+ data[6 + 2] = (u8) (log_blk >> 8);
+ data[6 + 3] = (u8) log_blk;
+
+ for (j = 4; j <= MS_EXTRA_SIZE; j++)
+ data[6 + j] = 0xFF;
+
+ retval =
+ ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
+ NO_WAIT_INT, data, 16);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* GET_INT Register */
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (i == 0) {
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (old_blk >> 8);
+ data[3] = (u8) old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT,
+ data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval =
+ ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val,
+ 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip,
+ MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef MS_SPEEDUP
+static int ms_auto_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page, u8 end_page)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 page_len, bus_width, val = 0;
+ u8 extra[MS_EXTRA_SIZE];
+
+ RTS51X_DEBUGP("Auto copy page from 0x%x to 0x%x,"
+ "logical block is 0x%x\n",
+ old_blk, new_blk, log_blk);
+ RTS51X_DEBUGP("start_page = %d, end_page = %d\n", start_page,
+ end_page);
+
+ page_len = end_page - start_page;
+
+ retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_READ_REG(chip, PPBUF_BASE2, &val);
+
+ if (val & BUF_FULL) {
+ retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ bus_width = 0x88;
+ } else {
+ /* Serial interface */
+ bus_width = 0x80;
+ }
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_OLD_BLOCK_0, 0xFF, (u8) old_blk);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_OLD_BLOCK_1, 0xFF,
+ (u8) (old_blk >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_NEW_BLOCK_0, 0xFF, (u8) new_blk);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_NEW_BLOCK_1, 0xFF,
+ (u8) (new_blk >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_LOG_BLOCK_0, 0xFF, (u8) log_blk);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_LOG_BLOCK_1, 0xFF,
+ (u8) (log_blk >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_PAGE_START, 0xFF, start_page);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_PAGE_LENGTH, 0xFF, page_len);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BUS_WIDTH, 0xFF, bus_width);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_COPY_PAGE);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ retval = rts51x_get_rsp(chip, 1, 5000);
+
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_ms_error(chip);
+ if (retval == STATUS_TIMEDOUT)
+ TRACE_RET(chip, retval);
+ TRACE_GOTO(chip, Fail);
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ retval = ms_erase_block(chip, new_blk);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ ms_copy_page(chip, old_blk, new_blk, log_blk, start_page, end_page);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int reset_ms(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u16 i, reg_addr, block_size;
+ u8 val, j, *ptr;
+#ifndef SUPPORT_MAGIC_GATE
+ u16 eblock_cnt;
+#endif
+
+ retval = ms_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_card->ms_type |= TYPE_MS;
+
+ retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_READ_REG(chip, PPBUF_BASE2, &val);
+ if (val & WRT_PRTCT)
+ chip->card_wp |= MS_CARD;
+ else
+ chip->card_wp &= ~MS_CARD;
+
+ i = 0;
+
+RE_SEARCH:
+ /* Search For Boot Block */
+ while (i < (MAX_DEFECTIVE_BLOCK + 2)) {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_check_boot_block(chip, i);
+ if (retval != STATUS_SUCCESS) {
+ i++;
+ continue;
+ }
+
+ ms_card->boot_block = i;
+ break;
+ }
+
+ if (i == (MAX_DEFECTIVE_BLOCK + 2)) {
+ RTS51X_DEBUGP("No boot block found!");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ for (j = 0; j < 3; j++) {
+ retval = ms_read_page(chip, ms_card->boot_block, j);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) {
+ i = ms_card->boot_block + 1;
+ ms_set_err_code(chip, MS_NO_ERROR);
+ goto RE_SEARCH;
+ }
+ }
+ }
+
+ /* Read boot block contents */
+ retval = ms_read_page(chip, ms_card->boot_block, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Read MS system information as sys_info */
+ retval =
+ rts51x_seq_read_register(chip, PPBUF_BASE2 + 0x1A0, 96,
+ ms_card->raw_sys_info);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Read useful block contents */
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, HEADER_ID0, 0, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, HEADER_ID1, 0, 0);
+
+ for (reg_addr = DISABLED_BLOCK0; reg_addr <= DISABLED_BLOCK3;
+ reg_addr++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+ }
+
+ for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 16, 100);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ptr = rts51x_get_rsp_data(chip);
+
+ RTS51X_DEBUGP("Boot block data:\n");
+ RTS51X_DUMP(ptr, 16);
+
+ if (ptr[0] != 0x00 || ptr[1] != 0x01) {
+ i = ms_card->boot_block + 1;
+ goto RE_SEARCH;
+ }
+ if (ptr[12] != 0x02 || ptr[13] != 0x00) {
+ i = ms_card->boot_block + 1;
+ goto RE_SEARCH;
+ }
+ if ((ptr[14] == 1) || (ptr[14] == 3))
+ chip->card_wp |= MS_CARD;
+ block_size = ((u16) ptr[6] << 8) | ptr[7];
+ if (block_size == 0x0010) {
+ ms_card->block_shift = 5;
+ ms_card->page_off = 0x1F;
+ } else if (block_size == 0x0008) {
+ ms_card->block_shift = 4;
+ ms_card->page_off = 0x0F;
+ }
+ ms_card->total_block = ((u16) ptr[8] << 8) | ptr[9];
+
+#ifdef SUPPORT_MAGIC_GATE
+ j = ptr[10];
+
+ if (ms_card->block_shift == 4) {
+ if (j < 2)
+ ms_card->capacity = 0x1EE0;
+ else
+ ms_card->capacity = 0x3DE0;
+ } else {
+ if (j < 5)
+ ms_card->capacity = 0x7BC0;
+ else if (j < 0xA)
+ ms_card->capacity = 0xF7C0;
+ else if (j < 0x11)
+ ms_card->capacity = 0x1EF80;
+ else
+ ms_card->capacity = 0x3DF00;
+ }
+#else
+ eblock_cnt = ((u16) ptr[10] << 8) | ptr[11];
+
+ ms_card->capacity = ((u32) eblock_cnt - 2) << ms_card->block_shift;
+#endif
+
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
+
+ if (ptr[15]) {
+ retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ RTS51X_WRITE_REG(chip, PPBUF_BASE2, 0xFF, 0x88);
+ RTS51X_WRITE_REG(chip, PPBUF_BASE2 + 1, 0xFF, 0);
+
+ retval =
+ ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ RTS51X_WRITE_REG(chip, MS_CFG, 0x58 | MS_NO_CHECK_INT,
+ MS_BUS_WIDTH_4 | PUSH_TIME_ODD |
+ MS_NO_CHECK_INT);
+
+ ms_card->ms_type |= MS_4BIT;
+ }
+
+ if (CHK_MS4BIT(ms_card))
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
+ else
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 1;
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_init_l2p_tbl(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int size, i, seg_no, retval;
+ u16 defect_block, reg_addr;
+ u8 val1, val2;
+
+ ms_card->segment_cnt = ms_card->total_block >> 9;
+ RTS51X_DEBUGP("ms_card->segment_cnt = %d\n", ms_card->segment_cnt);
+
+ size = ms_card->segment_cnt * sizeof(struct zone_entry);
+ ms_card->segment = vmalloc(size);
+ if (ms_card->segment == NULL)
+ TRACE_RET(chip, STATUS_FAIL);
+ memset(ms_card->segment, 0, size);
+
+ retval = ms_read_page(chip, ms_card->boot_block, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < (((ms_card->total_block >> 9) * 10) + 1); i++) {
+ retval = rts51x_read_register(chip, reg_addr++, &val1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+ retval = rts51x_read_register(chip, reg_addr++, &val2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ defect_block = ((u16) val1 << 8) | val2;
+ if (defect_block == 0xFFFF)
+ break;
+ seg_no = defect_block / 512;
+ ms_card->segment[seg_no].defect_list[ms_card->segment[seg_no].
+ disable_count++] =
+ defect_block;
+ }
+
+ for (i = 0; i < ms_card->segment_cnt; i++) {
+ ms_card->segment[i].build_flag = 0;
+ ms_card->segment[i].l2p_table = NULL;
+ ms_card->segment[i].free_table = NULL;
+ ms_card->segment[i].get_index = 0;
+ ms_card->segment[i].set_index = 0;
+ ms_card->segment[i].unused_blk_cnt = 0;
+
+ RTS51X_DEBUGP("defective block count of segment %d is %d\n",
+ i, ms_card->segment[i].disable_count);
+ }
+
+ return STATUS_SUCCESS;
+
+INIT_FAIL:
+ if (ms_card->segment) {
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+static u16 ms_get_l2p_tbl(struct rts51x_chip *chip, int seg_no, u16 log_off)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+
+ if (ms_card->segment == NULL)
+ return 0xFFFF;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->l2p_table)
+ return segment->l2p_table[log_off];
+
+ return 0xFFFF;
+}
+
+static void ms_set_l2p_tbl(struct rts51x_chip *chip, int seg_no, u16 log_off,
+ u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+
+ if (ms_card->segment == NULL)
+ return;
+
+ segment = &(ms_card->segment[seg_no]);
+ if (segment->l2p_table)
+ segment->l2p_table[log_off] = phy_blk;
+}
+
+static void ms_set_unused_block(struct rts51x_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int seg_no;
+
+ seg_no = (int)phy_blk >> 9;
+ segment = &(ms_card->segment[seg_no]);
+
+ segment->free_table[segment->set_index++] = phy_blk;
+ if (segment->set_index >= MS_FREE_TABLE_CNT)
+ segment->set_index = 0;
+ segment->unused_blk_cnt++;
+}
+
+static u16 ms_get_unused_block(struct rts51x_chip *chip, int seg_no)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ u16 phy_blk;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->unused_blk_cnt <= 0)
+ return 0xFFFF;
+
+ phy_blk = segment->free_table[segment->get_index];
+ segment->free_table[segment->get_index++] = 0xFFFF;
+ if (segment->get_index >= MS_FREE_TABLE_CNT)
+ segment->get_index = 0;
+ segment->unused_blk_cnt--;
+
+ return phy_blk;
+}
+
+static const unsigned short ms_start_idx[] = {
+ 0, 494, 990, 1486, 1982, 2478, 2974, 3470,
+ 3966, 4462, 4958, 5454, 5950, 6446, 6942, 7438, 7934
+};
+
+static int ms_arbitrate_l2p(struct rts51x_chip *chip, u16 phy_blk, u16 log_off,
+ u8 us1, u8 us2)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int seg_no;
+ u16 tmp_blk;
+
+ seg_no = (int)phy_blk >> 9;
+ segment = &(ms_card->segment[seg_no]);
+ tmp_blk = segment->l2p_table[log_off];
+
+ if (us1 != us2) {
+ if (us1 == 0) {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, tmp_blk);
+ ms_set_unused_block(chip, tmp_blk);
+ segment->l2p_table[log_off] = phy_blk;
+ } else {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, phy_blk);
+ ms_set_unused_block(chip, phy_blk);
+ }
+ } else {
+ if (phy_blk < tmp_blk) {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, phy_blk);
+ ms_set_unused_block(chip, phy_blk);
+ } else {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, tmp_blk);
+ ms_set_unused_block(chip, tmp_blk);
+ segment->l2p_table[log_off] = phy_blk;
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_build_l2p_tbl(struct rts51x_chip *chip, int seg_no)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int retval, table_size, disable_cnt, defect_flag, i;
+ u16 start, end, phy_blk, log_blk, tmp_blk;
+ u8 extra[MS_EXTRA_SIZE], us1, us2;
+
+ RTS51X_DEBUGP("ms_build_l2p_tbl: %d\n", seg_no);
+
+ if (ms_card->segment == NULL) {
+ retval = ms_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ if (ms_card->segment[seg_no].build_flag) {
+ RTS51X_DEBUGP("l2p table of segment %d has been built\n",
+ seg_no);
+ return STATUS_SUCCESS;
+ }
+
+ if (seg_no == 0)
+ table_size = 494;
+ else
+ table_size = 496;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->l2p_table == NULL) {
+ segment->l2p_table = vmalloc(table_size * 2);
+ if (segment->l2p_table == NULL)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ }
+ memset((u8 *) (segment->l2p_table), 0xff, table_size * 2);
+
+ if (segment->free_table == NULL) {
+ segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2);
+ if (segment->free_table == NULL)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ }
+ memset((u8 *) (segment->free_table), 0xff, MS_FREE_TABLE_CNT * 2);
+
+ start = (u16) seg_no << 9;
+ end = (u16) (seg_no + 1) << 9;
+
+ disable_cnt = segment->disable_count;
+
+ segment->get_index = segment->set_index = 0;
+ segment->unused_blk_cnt = 0;
+
+ for (phy_blk = start; phy_blk < end; phy_blk++) {
+ if (disable_cnt) {
+ defect_flag = 0;
+ for (i = 0; i < segment->disable_count; i++) {
+ if (phy_blk == segment->defect_list[i]) {
+ defect_flag = 1;
+ break;
+ }
+ }
+ if (defect_flag) {
+ disable_cnt--;
+ continue;
+ }
+ }
+
+ retval =
+ ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS) {
+ RTS51X_DEBUGP("read extra data fail\n");
+ ms_set_bad_block(chip, phy_blk);
+ continue;
+ }
+
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (!(extra[1] & NOT_TRANSLATION_TABLE)) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ extra[2] = 0xff;
+ extra[3] = 0xff;
+ }
+ }
+ }
+
+ if (!(extra[0] & BLOCK_OK))
+ continue;
+ if (!(extra[1] & NOT_BOOT_BLOCK))
+ continue;
+ if ((extra[0] & PAGE_OK) != PAGE_OK)
+ continue;
+
+ log_blk = ((u16) extra[2] << 8) | extra[3];
+
+ if (log_blk == 0xFFFF) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ }
+ ms_set_unused_block(chip, phy_blk);
+ continue;
+ }
+
+ if ((log_blk < ms_start_idx[seg_no]) ||
+ (log_blk >= ms_start_idx[seg_no + 1])) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ }
+ ms_set_unused_block(chip, phy_blk);
+ continue;
+ }
+
+ if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] ==
+ 0xFFFF) {
+ segment->l2p_table[log_blk - ms_start_idx[seg_no]] =
+ phy_blk;
+ continue;
+ }
+
+ us1 = extra[0] & 0x10;
+ tmp_blk = segment->l2p_table[log_blk - ms_start_idx[seg_no]];
+ retval =
+ ms_read_extra_data(chip, tmp_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ us2 = extra[0] & 0x10;
+
+ (void)ms_arbitrate_l2p(chip, phy_blk,
+ log_blk - ms_start_idx[seg_no], us1,
+ us2);
+ continue;
+ }
+
+ segment->build_flag = 1;
+
+ RTS51X_DEBUGP("unused block count: %d\n", segment->unused_blk_cnt);
+
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (segment->unused_blk_cnt < 2)
+ chip->card_wp |= MS_CARD;
+ } else {
+ if (segment->unused_blk_cnt < 1)
+ chip->card_wp |= MS_CARD;
+ }
+
+ if (chip->card_wp & MS_CARD)
+ return STATUS_SUCCESS;
+
+ for (log_blk = ms_start_idx[seg_no]; log_blk < ms_start_idx[seg_no + 1];
+ log_blk++) {
+ if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] ==
+ 0xFFFF) {
+ phy_blk = ms_get_unused_block(chip, seg_no);
+ if (phy_blk == 0xFFFF) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ retval = ms_init_page(chip, phy_blk, log_blk, 0, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ segment->l2p_table[log_blk - ms_start_idx[seg_no]] =
+ phy_blk;
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (segment->unused_blk_cnt < 2) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ } else {
+ if (segment->unused_blk_cnt < 1) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ }
+ }
+ }
+
+ if (seg_no == 0) {
+ for (log_blk = 0; log_blk < 494; log_blk++) {
+ tmp_blk = segment->l2p_table[log_blk];
+ if (tmp_blk < ms_card->boot_block) {
+ RTS51X_DEBUGP("Boot block is not the first"
+ "normal block.\n");
+
+ if (chip->card_wp & MS_CARD)
+ break;
+
+ phy_blk = ms_get_unused_block(chip, 0);
+#ifdef MS_SPEEDUP
+ retval =
+ ms_auto_copy_page(chip, tmp_blk, phy_blk,
+ log_blk, 0,
+ ms_card->page_off + 1);
+#else
+ retval = ms_copy_page(chip, tmp_blk, phy_blk,
+ log_blk, 0,
+ ms_card->page_off + 1);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ segment->l2p_table[log_blk] = phy_blk;
+
+ retval = ms_set_bad_block(chip, tmp_blk);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+
+BUILD_FAIL:
+ segment->build_flag = 0;
+ if (segment->l2p_table) {
+ vfree(segment->l2p_table);
+ segment->l2p_table = NULL;
+ }
+ if (segment->free_table) {
+ vfree(segment->free_table);
+ segment->free_table = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+int reset_ms_card(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ memset(ms_card, 0, sizeof(struct ms_info));
+
+ enable_card_clock(chip, MS_CARD);
+
+ retval = rts51x_select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_card->ms_type = 0;
+ ms_card->last_rw_int = 0;
+
+ retval = reset_ms_pro(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->check_ms_flow) {
+ retval = reset_ms(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (chip->option.reset_or_rw_fail_set_pad_drive) {
+ rts51x_write_register(chip,
+ CARD_DRIVE_SEL, SD20_DRIVE_MASK,
+ DRIVE_8mA);
+ }
+ TRACE_RET(chip, retval);
+ }
+ } else {
+ if (chip->option.reset_or_rw_fail_set_pad_drive) {
+ rts51x_write_register(chip, CARD_DRIVE_SEL,
+ SD20_DRIVE_MASK,
+ DRIVE_8mA);
+ }
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!CHK_MSPRO(ms_card)) {
+ retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ RTS51X_DEBUGP("ms_card->ms_type = 0x%x\n", ms_card->ms_type);
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_set_rw_cmd(struct rts51x_chip *chip, u32 start_sec,
+ u16 sec_cnt, u8 cmd)
+{
+ int retval, i;
+ u8 data[8];
+
+ data[0] = cmd;
+ data[1] = (u8) (sec_cnt >> 8);
+ data[2] = (u8) sec_cnt;
+ data[3] = (u8) (start_sec >> 24);
+ data[4] = (u8) (start_sec >> 16);
+ data[5] = (u8) (start_sec >> 8);
+ data[6] = (u8) start_sec;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT, data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+void mspro_stop_seq_mode(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (ms_card->seq_mode) {
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ ms_card->seq_mode = 0;
+ ms_card->total_sec_cnt = 0;
+ ms_card->last_rw_int = 0;
+ ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH,
+ FIFO_FLUSH);
+ }
+}
+
+static inline int ms_auto_tune_clock(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (chip->asic_code) {
+ if (ms_card->ms_clock > 30)
+ ms_card->ms_clock -= 20;
+ } else {
+ if (ms_card->ms_clock == CLK_80)
+ ms_card->ms_clock = CLK_60;
+ else if (ms_card->ms_clock == CLK_60)
+ ms_card->ms_clock = CLK_40;
+ }
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
+ struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, mode_2k = 0;
+ u16 count;
+ u8 val, trans_mode, rw_tpc, rw_cmd;
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ ms_card->counter = 0;
+
+ if (CHK_MSHG(ms_card)) {
+ if ((start_sector % 4) || (sector_cnt % 4)) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_LONG_DATA;
+ rw_cmd = PRO_READ_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_LONG_DATA;
+ rw_cmd = PRO_WRITE_DATA;
+ }
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_QUAD_DATA;
+ rw_cmd = PRO_READ_2K_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_QUAD_DATA;
+ rw_cmd = PRO_WRITE_2K_DATA;
+ }
+ mode_2k = 1;
+ }
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_LONG_DATA;
+ rw_cmd = PRO_READ_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_LONG_DATA;
+ rw_cmd = PRO_WRITE_DATA;
+ }
+ }
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ trans_mode = MS_TM_AUTO_READ;
+ else
+ trans_mode = MS_TM_AUTO_WRITE;
+
+ val = ms_card->last_rw_int;
+
+ if (ms_card->seq_mode) {
+ if ((ms_card->pre_dir != srb->sc_data_direction)
+ || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
+ start_sector)
+ || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ))
+ || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ))
+ || !(val & MS_INT_BREQ)
+ || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
+ ms_card->seq_mode = 0;
+ ms_card->total_sec_cnt = 0;
+ ms_card->last_rw_int = 0;
+ if (val & MS_INT_BREQ) {
+ retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL,
+ FIFO_FLUSH, FIFO_FLUSH);
+ }
+ }
+ }
+
+ if (!ms_card->seq_mode) {
+ ms_card->total_sec_cnt = 0;
+ if (sector_cnt >= 0x80) {
+ if ((ms_card->capacity - start_sector) > 0xFE00)
+ count = 0xFE00;
+ else
+ count =
+ (u16) (ms_card->capacity - start_sector);
+ if (count > sector_cnt) {
+ if (mode_2k)
+ ms_card->seq_mode |= MODE_2K_SEQ;
+ else
+ ms_card->seq_mode |= MODE_512_SEQ;
+ }
+ } else {
+ count = sector_cnt;
+ }
+ retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->seq_mode = 0;
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ retval =
+ ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt, WAIT_INT,
+ mode_2k, scsi_sg_count(srb), scsi_sglist(srb),
+ scsi_bufflen(srb));
+ if (retval != STATUS_SUCCESS) {
+ ms_card->seq_mode = 0;
+ rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val);
+ rts51x_clear_ms_error(chip);
+ if (val & MS_INT_BREQ)
+ ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ RTS51X_DEBUGP("MSPro CRC error, tune clock!\n");
+ ms_auto_tune_clock(chip);
+ }
+
+ TRACE_RET(chip, retval);
+ }
+
+ ms_card->pre_sec_addr = start_sector;
+ ms_card->pre_sec_cnt = sector_cnt;
+ ms_card->pre_dir = srb->sc_data_direction;
+ ms_card->total_sec_cnt += sector_cnt;
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_read_format_progress(struct rts51x_chip *chip,
+ const int short_data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u32 total_progress, cur_progress;
+ u8 cnt, tmp;
+ u8 data[8];
+
+ ms_card->format_status = FORMAT_FAIL;
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp);
+
+ if ((tmp & (MS_INT_CED | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) {
+ ms_card->format_status = FORMAT_SUCCESS;
+ ms_card->pro_under_formatting = 0;
+ return STATUS_SUCCESS;
+ }
+ if (!
+ ((tmp & (MS_INT_BREQ | MS_INT_CED | MS_INT_CMDNK | MS_INT_ERR)) ==
+ MS_INT_BREQ)) {
+ ms_card->pro_under_formatting = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (short_data_len >= 256)
+ cnt = 0;
+ else
+ cnt = (u8) short_data_len;
+
+ retval =
+ ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT, data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ total_progress =
+ (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
+ cur_progress =
+ (data[4] << 24) | (data[5] << 16) | (data[6] << 8) | data[7];
+
+ RTS51X_DEBUGP("total_progress = %d, cur_progress = %d\n",
+ total_progress, cur_progress);
+
+ if (total_progress == 0) {
+ ms_card->progress = 0;
+ } else {
+ u64 ulltmp = (u64) cur_progress * (u64) 65535;
+ do_div(ulltmp, total_progress);
+ ms_card->progress = (u16) ulltmp;
+ }
+ RTS51X_DEBUGP("progress = %d\n", ms_card->progress);
+
+ for (i = 0; i < 2500; i++) {
+ RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp);
+ if (tmp &
+ (MS_INT_CED | MS_INT_CMDNK | MS_INT_BREQ | MS_INT_ERR))
+ break;
+
+ wait_timeout(1);
+ }
+
+ if (i == 2500)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTS51X_DEBUGP("MSPro format tmp:%d\n", tmp);
+
+ if (tmp & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+ if (tmp & MS_INT_CED) {
+ ms_card->format_status = FORMAT_SUCCESS;
+ ms_card->pro_under_formatting = 0;
+ } else if (tmp & MS_INT_BREQ) {
+ ms_card->format_status = FORMAT_IN_PROGRESS;
+ } else {
+ ms_card->format_status = FORMAT_FAIL;
+ ms_card->pro_under_formatting = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTS51X_DEBUGP("MSPro format format_status:%d\n",
+ ms_card->format_status);
+
+ return STATUS_SUCCESS;
+}
+
+void mspro_polling_format_status(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int i;
+
+ if (ms_card->pro_under_formatting) {
+ for (i = 0; i < 65535; i++) {
+ mspro_read_format_progress(chip, MS_SHORT_DATA_LEN);
+ if (ms_card->format_status != FORMAT_IN_PROGRESS)
+ break;
+ }
+ }
+
+ return;
+}
+
+void mspro_format_sense(struct rts51x_chip *chip, unsigned int lun)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_FORMAT_STATUS(ms_card, FORMAT_SUCCESS)) {
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ } else if (CHK_FORMAT_STATUS(ms_card, FORMAT_IN_PROGRESS)) {
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16) (ms_card->progress));
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ }
+}
+
+int mspro_format(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ int short_data_len, int quick_format)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 buf[8], tmp;
+ u16 para;
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ memset(buf, 0, 2);
+ switch (short_data_len) {
+ case 32:
+ buf[0] = 0;
+ break;
+ case 64:
+ buf[0] = 1;
+ break;
+ case 128:
+ buf[0] = 2;
+ break;
+ case 256:
+ default:
+ buf[0] = 3;
+ break;
+ }
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, PRO_WRITE_REG, 1, NO_WAIT_INT, buf, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ /* Format command */
+ if (quick_format)
+ para = 0x0000;
+ else
+ para = 0x0001;
+ retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Check INT */
+ RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp);
+ if (tmp & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((tmp & (MS_INT_BREQ | MS_INT_CED)) == MS_INT_BREQ) {
+ ms_card->pro_under_formatting = 1;
+ ms_card->progress = 0;
+ ms_card->format_status = FORMAT_IN_PROGRESS;
+ return STATUS_SUCCESS;
+ }
+
+ if (tmp & MS_INT_CED) {
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ ms_card->format_status = FORMAT_SUCCESS;
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_NO_SENSE);
+ return STATUS_SUCCESS;
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+#ifdef MS_SPEEDUP
+static int ms_read_multiple_pages(struct rts51x_chip *chip, u16 phy_blk,
+ u16 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, void **ptr, unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int send_blkend;
+ u8 extra[MS_EXTRA_SIZE], val1, val2, data[6];
+ u8 page_cnt = end_page - start_page, page_addr, sec_cnt;
+
+ if (end_page != (ms_card->page_off + 1))
+ send_blkend = 1;
+ else
+ send_blkend = 0;
+
+ retval =
+ ms_read_extra_data(chip, phy_blk, start_page, extra, MS_EXTRA_SIZE);
+ if (retval == STATUS_SUCCESS) {
+ if ((extra[1] & 0x30) != 0x30) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (phy_blk >> 8);
+ data[3] = (u8) phy_blk;
+ /* Page Number
+ * Extra data access mode */
+ data[4] = 0;
+ data[5] = start_page;
+
+ retval =
+ ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6,
+ BLOCK_READ, WAIT_INT, data, 6, &val1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_init_cmd(chip);
+
+ if (send_blkend)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND,
+ SET_BLKEND);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, WAIT_INT,
+ NO_WAIT_INT);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF,
+ (u8) page_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, 512 * page_cnt, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_MULTI_READ);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDIR | STAGE_MS_STATUS, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_partial(chip, RCV_BULK_PIPE(chip), (void *)buf,
+ ptr, offset, 512 * page_cnt,
+ scsi_sg_count(chip->srb), NULL, 2000);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_ms_error(chip);
+ if (retval == STATUS_TIMEDOUT)
+ TRACE_RET(chip, retval);
+ TRACE_GOTO(chip, Fail);
+ }
+ retval = rts51x_get_rsp(chip, 3, 200);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_ms_error(chip);
+ if (retval == STATUS_TIMEDOUT)
+ TRACE_RET(chip, retval);
+ TRACE_GOTO(chip, Fail);
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, MS_SECTOR_CNT_L, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR | STAGE_MS_STATUS, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 3, 200);
+
+ if (CHECK_MS_TRANS_FAIL(chip, retval))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sec_cnt = chip->rsp_buf[0];
+ RTS51X_DEBUGP("%d pages need be trasferred, %d pages remained\n",
+ (int)page_cnt, (int)sec_cnt);
+ page_addr = start_page + (page_cnt - sec_cnt);
+
+ if (CHK_MS4BIT(ms_card)) {
+ val1 = chip->rsp_buf[1];
+ RTS51X_DEBUGP("MS_TRANS_CFG: 0x%x\n", val1);
+ } else {
+ val1 = 0;
+ }
+
+ val2 = chip->rsp_buf[2];
+ RTS51X_DEBUGP("GET_INT: 0x%x\n", val2);
+
+ if ((val1 & INT_CMDNK) || (val2 & INT_REG_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((val1 & INT_ERR) || (val2 & INT_REG_ERR)) {
+ if ((val1 & INT_BREQ) || (val2 & INT_REG_BREQ)) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (!(chip->card_wp & MS_CARD)) {
+ reset_ms(chip);
+ ms_set_page_status(log_blk, setPS_NG,
+ extra, MS_EXTRA_SIZE);
+ ms_write_extra_data(chip, phy_blk,
+ page_addr, extra,
+ MS_EXTRA_SIZE);
+ }
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (CHK_MS4BIT(ms_card)) {
+ if (!(val1 & INT_BREQ) && !(val2 & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (!(val2 & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_write_multiple_pages(struct rts51x_chip *chip, u16 old_blk,
+ u16 new_blk, u16 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, void **ptr,
+ unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ int send_blkend;
+ u8 val, data[16];
+ u8 page_cnt = end_page - start_page;
+
+ if ((end_page == (ms_card->page_off + 1)) || (page_cnt == 1))
+ send_blkend = 0;
+ else
+ send_blkend = 1;
+
+ if (!start_page) {
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (old_blk >> 8);
+ data[3] = (u8) old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval =
+ ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 7, BLOCK_WRITE, WAIT_INT, data,
+ 7, &val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ (6 + MS_EXTRA_SIZE));
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (new_blk >> 8);
+ data[3] = (u8) new_blk;
+ /* Page Number
+ * Extra data access mode */
+ if (page_cnt == 1) {
+ /* Single page access mode */
+ data[4] = 0x20;
+ } else {
+ /* Block access mode */
+ data[4] = 0;
+ }
+ data[5] = start_page;
+ data[6] = 0xF8;
+ data[7] = 0xFF;
+ data[8] = (u8) (log_blk >> 8);
+ data[9] = (u8) log_blk;
+
+ for (i = 0x0A; i < 0x10; i++) {
+ /* ECC */
+ data[i] = 0xFF;
+ }
+
+ retval =
+ ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ (6 + MS_EXTRA_SIZE), BLOCK_WRITE, WAIT_INT, data,
+ 16, &val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_init_cmd(chip);
+
+ if (send_blkend)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND,
+ SET_BLKEND);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, WAIT_INT,
+ NO_WAIT_INT);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF,
+ (u8) page_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, WRITE_PAGE_DATA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ RING_BUFFER);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512 * page_cnt, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_MULTI_WRITE);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END,
+ MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDOR | STAGE_MS_STATUS, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_partial(chip, SND_BULK_PIPE(chip), (void *)buf,
+ ptr, offset, 512 * page_cnt,
+ scsi_sg_count(chip->srb), NULL, 2000);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ retval = rts51x_get_rsp(chip, 3, 2000);
+
+
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#else
+
+static int ms_read_multiple_pages(struct rts51x_chip *chip, u16 phy_blk,
+ u16 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, void **ptr, unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6];
+
+ retval =
+ ms_read_extra_data(chip, phy_blk, start_page, extra, MS_EXTRA_SIZE);
+ if (retval == STATUS_SUCCESS) {
+ if ((extra[1] & 0x30) != 0x30) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Write REG */
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (phy_blk >> 8);
+ data[3] = (u8) phy_blk;
+ /* Page Number
+ * Extra data access mode */
+ data[4] = 0;
+ data[5] = start_page;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ for (page_addr = start_page; page_addr < end_page; page_addr++) {
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ chip->card_exist &= ~MS_CARD;
+ chip->card_ready &= ~MS_CARD;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* GET_INT Register */
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_ERR) {
+ if (val & INT_REG_BREQ) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (!(chip->card_wp & MS_CARD)) {
+ reset_ms(chip);
+ ms_set_page_status(log_blk,
+ setPS_NG, extra,
+ MS_EXTRA_SIZE);
+ ms_write_extra_data(chip,
+ phy_blk, page_addr,
+ extra, MS_EXTRA_SIZE);
+ }
+ ms_set_err_code(chip,
+ MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (page_addr == (end_page - 1)) {
+ if (!(val & INT_REG_CED)) {
+ retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ retval =
+ ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val,
+ 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ trans_cfg = NO_WAIT_INT;
+ } else {
+ trans_cfg = WAIT_INT;
+ }
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF,
+ READ_PAGE_DATA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF,
+ trans_cfg);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_READ);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDIR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_partial(chip, RCV_BULK_PIPE(chip),
+ (void *)buf, ptr, offset, 512,
+ scsi_sg_count(chip->srb), NULL,
+ 2000);
+ if (retval != STATUS_SUCCESS) {
+ if (retval == STATUS_TIMEDOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = rts51x_get_rsp(chip, 1, 2000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ if (retval == STATUS_TIMEDOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, retval);
+ }
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ rts51x_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_multiple_pages(struct rts51x_chip *chip, u16 old_blk,
+ u16 new_blk, u16 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, void **ptr,
+ unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 page_addr, val, data[16];
+
+ if (!start_page) {
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (old_blk >> 8);
+ data[3] = (u8) old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* GET_INT Register */
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval =
+ ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm,
+ (6 + MS_EXTRA_SIZE));
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ /* Block Address */
+ data[1] = 0;
+ data[2] = (u8) (new_blk >> 8);
+ data[3] = (u8) new_blk;
+ /* Page Number
+ * Extra data access mode */
+ if ((end_page - start_page) == 1) {
+ /* Single page access mode */
+ data[4] = 0x20;
+ } else {
+ /* Block access mode */
+ data[4] = 0;
+ }
+ data[5] = start_page;
+ data[6] = 0xF8;
+ data[7] = 0xFF;
+ data[8] = (u8) (log_blk >> 8);
+ data[9] = (u8) log_blk;
+
+ for (i = 0x0A; i < 0x10; i++) {
+ /* ECC */
+ data[i] = 0xFF;
+ }
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, WRITE_REG, 6 + MS_EXTRA_SIZE,
+ NO_WAIT_INT, data, 16);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ /* GET_INT Register */
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ for (page_addr = start_page; page_addr < end_page; page_addr++) {
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ udelay(30);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF,
+ WRITE_PAGE_DATA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF,
+ WAIT_INT);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDOR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_partial(chip, SND_BULK_PIPE(chip),
+ (void *)buf, ptr, offset, 512,
+ scsi_sg_count(chip->srb), NULL,
+ 2000);
+ if (retval != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rts51x_clear_ms_error(chip);
+
+ if (retval == STATUS_TIMEDOUT)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = rts51x_get_rsp(chip, 1, 2000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rts51x_clear_ms_error(chip);
+
+ if (retval == STATUS_TIMEDOUT)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* GET_INT Register */
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if ((end_page - start_page) == 1) {
+ if (!(val & INT_REG_CED)) {
+ /* Command can not be executed */
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (page_addr == (end_page - 1)) {
+ if (!(val & INT_REG_CED)) {
+ retval =
+ ms_send_cmd(chip, BLOCK_END,
+ WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ /* GET_INT Register */
+ retval =
+ ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
+ &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ if ((page_addr == (end_page - 1))
+ || (page_addr == ms_card->page_off)) {
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip,
+ MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_finish_write(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 page_off)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, seg_no;
+
+#ifdef MS_SPEEDUP
+ retval = ms_auto_copy_page(chip, old_blk, new_blk, log_blk,
+ page_off, ms_card->page_off + 1);
+#else
+ retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
+ page_off, ms_card->page_off + 1);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ seg_no = old_blk >> 9;
+
+ if (MS_TST_BAD_BLOCK_FLG(ms_card)) {
+ MS_CLR_BAD_BLOCK_FLG(ms_card);
+ ms_set_bad_block(chip, old_blk);
+ } else {
+ retval = ms_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS)
+ ms_set_unused_block(chip, old_blk);
+ }
+
+ ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_prepare_write(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page)
+{
+ int retval;
+
+ if (start_page) {
+#ifdef MS_SPEEDUP
+ retval =
+ ms_auto_copy_page(chip, old_blk, new_blk, log_blk, 0,
+ start_page);
+#else
+ retval =
+ ms_copy_page(chip, old_blk, new_blk, log_blk, 0,
+ start_page);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int ms_delay_write(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+ int retval;
+
+ if (delay_write->delay_write_flag) {
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ delay_write->delay_write_flag = 0;
+ retval = ms_finish_write(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock,
+ delay_write->pageoff);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+}
+
+static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, seg_no;
+ unsigned int offset = 0;
+ u16 old_blk = 0, new_blk = 0, log_blk, total_sec_cnt = sector_cnt;
+ u8 start_page, end_page = 0, page_cnt;
+ u8 *buf;
+ void *ptr = NULL;
+ struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ ms_card->counter = 0;
+
+ buf = (u8 *) scsi_sglist(srb);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, retval);
+ }
+
+ log_blk = (u16) (start_sector >> ms_card->block_shift);
+ start_page = (u8) (start_sector & ms_card->page_off);
+
+ for (seg_no = 0; seg_no < sizeof(ms_start_idx) / 2; seg_no++) {
+ if (log_blk < ms_start_idx[seg_no + 1])
+ break;
+ }
+
+ if (ms_card->segment[seg_no].build_flag == 0) {
+ retval = ms_build_l2p_tbl(chip, seg_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= MS_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+#ifdef MS_SPEEDUP
+ retval = ms_auto_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ log_blk,
+ delay_write->pageoff,
+ start_page);
+#else
+ retval = ms_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ log_blk, delay_write->pageoff,
+ start_page);
+#endif
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page == delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else {
+ retval = ms_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+ old_blk =
+ ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ new_blk = ms_get_unused_block(chip, seg_no);
+ if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval =
+ ms_prepare_write(chip, old_blk, new_blk, log_blk,
+ start_page);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, MS_CARD) ==
+ CD_NOT_EXIST) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+ }
+ } else {
+ retval = ms_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, retval);
+ }
+ old_blk =
+ ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ if (old_blk == 0xFFFF) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTS51X_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n", seg_no,
+ old_blk, new_blk);
+
+ while (total_sec_cnt) {
+ if ((start_page + total_sec_cnt) > (ms_card->page_off + 1))
+ end_page = ms_card->page_off + 1;
+ else
+ end_page = start_page + (u8) total_sec_cnt;
+ page_cnt = end_page - start_page;
+
+ RTS51X_DEBUGP("start_page = %d, end_page = %d,"
+ "page_cnt = %d\n",
+ start_page, end_page, page_cnt);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ retval = ms_read_multiple_pages(chip,
+ old_blk, log_blk,
+ start_page, end_page,
+ buf, &ptr, &offset);
+ else
+ retval = ms_write_multiple_pages(chip, old_blk,
+ new_blk, log_blk,
+ start_page, end_page,
+ buf, &ptr, &offset);
+
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, retval);
+ }
+ /* Update L2P table if need */
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (end_page == (ms_card->page_off + 1)) {
+ retval = ms_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS)
+ ms_set_unused_block(chip, old_blk);
+ ms_set_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no],
+ new_blk);
+ }
+ }
+
+ total_sec_cnt -= page_cnt;
+
+ if (total_sec_cnt == 0)
+ break;
+
+ log_blk++;
+
+ for (seg_no = 0; seg_no < sizeof(ms_start_idx) / 2; seg_no++) {
+ if (log_blk < ms_start_idx[seg_no + 1])
+ break;
+ }
+
+ if (ms_card->segment[seg_no].build_flag == 0) {
+ retval = ms_build_l2p_tbl(chip, seg_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= MS_CARD;
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ old_blk =
+ ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ if (old_blk == 0xFFFF) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ new_blk = ms_get_unused_block(chip, seg_no);
+ if (new_blk == 0xFFFF) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTS51X_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
+ seg_no, old_blk, new_blk);
+
+ start_page = 0;
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (end_page < (ms_card->page_off + 1)) {
+ delay_write->delay_write_flag = 1;
+ delay_write->old_phyblock = old_blk;
+ delay_write->new_phyblock = new_blk;
+ delay_write->logblock = log_blk;
+ delay_write->pageoff = end_page;
+ }
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return STATUS_SUCCESS;
+}
+
+int ms_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (CHK_MSPRO(ms_card))
+ retval =
+ mspro_rw_multi_sector(srb, chip, start_sector, sector_cnt);
+ else
+ retval =
+ ms_rw_multi_sector(srb, chip, start_sector, sector_cnt);
+
+ return retval;
+}
+
+void ms_free_l2p_tbl(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int i = 0;
+
+ if (ms_card->segment != NULL) {
+ for (i = 0; i < ms_card->segment_cnt; i++) {
+ if (ms_card->segment[i].l2p_table != NULL) {
+ vfree(ms_card->segment[i].l2p_table);
+ ms_card->segment[i].l2p_table = NULL;
+ }
+ if (ms_card->segment[i].free_table != NULL) {
+ vfree(ms_card->segment[i].free_table);
+ ms_card->segment[i].free_table = NULL;
+ }
+ }
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
+ }
+}
+
+void ms_cleanup_work(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_MSPRO(ms_card)) {
+ if (ms_card->seq_mode) {
+ RTS51X_DEBUGP("MS Pro: stop transmission\n");
+ mspro_stop_seq_mode(chip);
+ ms_card->counter = 0;
+ }
+ if (CHK_MSHG(ms_card)) {
+ u8 value;
+ rts51x_read_register(chip, MS_CFG, &value);
+ if (value & MS_2K_SECTOR_MODE)
+ rts51x_write_register(chip, MS_CFG,
+ MS_2K_SECTOR_MODE, 0x00);
+ }
+ } else if ((!CHK_MSPRO(ms_card))
+ && ms_card->delay_write.delay_write_flag) {
+ RTS51X_DEBUGP("MS: delay write\n");
+ ms_delay_write(chip);
+ ms_card->counter = 0;
+ }
+}
+
+int ms_power_off_card3v3(struct rts51x_chip *chip)
+{
+ int retval;
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
+ if (chip->asic_code)
+ ms_pull_ctl_disable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL,
+ FPGA_MS_PULL_CTL_BIT | 0x20,
+ FPGA_MS_PULL_CTL_BIT);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
+ if (!chip->option.FT2_fast_mode) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int release_ms_card(struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTS51X_DEBUGP("release_ms_card\n");
+
+ ms_card->delay_write.delay_write_flag = 0;
+ ms_card->pro_under_formatting = 0;
+
+ chip->card_ready &= ~MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ chip->card_wp &= ~MS_CARD;
+
+ ms_free_l2p_tbl(chip);
+
+ rts51x_write_register(chip, SFSM_ED, HW_CMD_STOP, HW_CMD_STOP);
+
+ memset(ms_card->raw_sys_info, 0, 96);
+#ifdef SUPPORT_PCGL_1P18
+ memset(ms_card->raw_model_name, 0, 48);
+#endif
+
+ retval = ms_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5139/ms.h b/drivers/staging/rts5139/ms.h
new file mode 100644
index 00000000000..f9d46d210f2
--- /dev/null
+++ b/drivers/staging/rts5139/ms.h
@@ -0,0 +1,263 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_MS_H
+#define __RTS51X_MS_H
+
+#include "rts51x_chip.h"
+
+#define MS_MAX_RETRY_COUNT 3
+
+#define MS_EXTRA_SIZE 0x9
+
+#define WRT_PRTCT 0x01
+
+/* Error Code */
+#define MS_NO_ERROR 0x00
+#define MS_CRC16_ERROR 0x80
+#define MS_TO_ERROR 0x40
+#define MS_NO_CARD 0x20
+#define MS_NO_MEMORY 0x10
+#define MS_CMD_NK 0x08
+#define MS_FLASH_READ_ERROR 0x04
+#define MS_FLASH_WRITE_ERROR 0x02
+#define MS_BREQ_ERROR 0x01
+#define MS_NOT_FOUND 0x03
+
+/* Transfer Protocol Command */
+#define READ_PAGE_DATA 0x02
+#define READ_REG 0x04
+#define GET_INT 0x07
+#define WRITE_PAGE_DATA 0x0D
+#define WRITE_REG 0x0B
+#define SET_RW_REG_ADRS 0x08
+#define SET_CMD 0x0E
+
+#define PRO_READ_LONG_DATA 0x02
+#define PRO_READ_SHORT_DATA 0x03
+#define PRO_READ_REG 0x04
+#define PRO_READ_QUAD_DATA 0x05
+#define PRO_GET_INT 0x07
+#define PRO_WRITE_LONG_DATA 0x0D
+#define PRO_WRITE_SHORT_DATA 0x0C
+#define PRO_WRITE_QUAD_DATA 0x0A
+#define PRO_WRITE_REG 0x0B
+#define PRO_SET_RW_REG_ADRS 0x08
+#define PRO_SET_CMD 0x0E
+#define PRO_EX_SET_CMD 0x09
+
+#ifdef SUPPORT_MAGIC_GATE
+#define MG_GET_ID 0x40
+#define MG_SET_LID 0x41
+#define MG_GET_LEKB 0x42
+#define MG_SET_RD 0x43
+#define MG_MAKE_RMS 0x44
+#define MG_MAKE_KSE 0x45
+#define MG_SET_IBD 0x46
+#define MG_GET_IBD 0x47
+#endif
+
+#ifdef XC_POWERCLASS
+#define XC_CHG_POWER 0x16
+#endif
+
+/* ++ CMD over Memory Stick */
+/* Flash CMD */
+#define BLOCK_READ 0xAA
+#define BLOCK_WRITE 0x55
+#define BLOCK_END 0x33
+#define BLOCK_ERASE 0x99
+#define FLASH_STOP 0xCC
+
+/* Function CMD */
+#define SLEEP 0x5A
+#define CLEAR_BUF 0xC3
+#define MS_RESET 0x3C
+/* -- CMD over Memory Stick */
+
+/* ++ CMD over Memory Stick Pro */
+/* Flash CMD */
+#define PRO_READ_DATA 0x20
+#define PRO_WRITE_DATA 0x21
+#define PRO_READ_ATRB 0x24
+#define PRO_STOP 0x25
+#define PRO_ERASE 0x26
+#define PRO_READ_2K_DATA 0x27
+#define PRO_WRITE_2K_DATA 0x28
+
+/* Function CMD */
+#define PRO_FORMAT 0x10
+#define PRO_SLEEP 0x11
+/* -- CMD over Memory Stick Pro */
+
+/* register inside memory stick */
+#define IntReg 0x01
+#define StatusReg0 0x02
+#define StatusReg1 0x03
+
+#define SystemParm 0x10
+#define BlockAdrs 0x11
+#define CMDParm 0x14
+#define PageAdrs 0x15
+
+#define OverwriteFlag 0x16
+#define ManagemenFlag 0x17
+#define LogicalAdrs 0x18
+#define ReserveArea 0x1A
+
+/* register inside memory pro */
+#define Pro_IntReg 0x01
+#define Pro_StatusReg 0x02
+#define Pro_TypeReg 0x04
+#define Pro_IFModeReg 0x05
+#define Pro_CatagoryReg 0x06
+#define Pro_ClassReg 0x07
+
+#define Pro_SystemParm 0x10
+#define Pro_DataCount1 0x11
+#define Pro_DataCount0 0x12
+#define Pro_DataAddr3 0x13
+#define Pro_DataAddr2 0x14
+#define Pro_DataAddr1 0x15
+#define Pro_DataAddr0 0x16
+
+#define Pro_TPCParm 0x17
+#define Pro_CMDParm 0x18
+
+/* define for INT Register */
+#define INT_REG_CED 0x80
+#define INT_REG_ERR 0x40
+#define INT_REG_BREQ 0x20
+#define INT_REG_CMDNK 0x01
+
+/* INT signal */
+#define INT_CED 0x01
+#define INT_ERR 0x02
+#define INT_BREQ 0x04
+#define INT_CMDNK 0x08
+
+/* define for OverwriteFlag Register */
+#define BLOCK_BOOT 0xC0
+#define BLOCK_OK 0x80
+#define PAGE_OK 0x60
+#define DATA_COMPL 0x10
+
+/* define for ManagemenFlag Register */
+#define NOT_BOOT_BLOCK 0x4
+#define NOT_TRANSLATION_TABLE 0x8
+
+/* Header */
+#define HEADER_ID0 (PPBUF_BASE2) /* 0 */
+#define HEADER_ID1 (PPBUF_BASE2 + 1) /* 1 */
+/* System Entry */
+#define DISABLED_BLOCK0 (PPBUF_BASE2 + 0x170 + 4) /* 2 */
+#define DISABLED_BLOCK1 (PPBUF_BASE2 + 0x170 + 5) /* 3 */
+#define DISABLED_BLOCK2 (PPBUF_BASE2 + 0x170 + 6) /* 4 */
+#define DISABLED_BLOCK3 (PPBUF_BASE2 + 0x170 + 7) /* 5 */
+/* Boot & Attribute Information */
+#define BLOCK_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 2) /* 6 */
+#define BLOCK_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 3) /* 7 */
+#define BLOCK_COUNT_0 (PPBUF_BASE2 + 0x1a0 + 4) /* 8 */
+#define BLOCK_COUNT_1 (PPBUF_BASE2 + 0x1a0 + 5) /* 9 */
+#define EBLOCK_COUNT_0 (PPBUF_BASE2 + 0x1a0 + 6) /* 10 */
+#define EBLOCK_COUNT_1 (PPBUF_BASE2 + 0x1a0 + 7) /* 11 */
+#define PAGE_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 8) /* 12 */
+#define PAGE_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 9) /* 13 */
+
+/* joey 2004-08-07 for MS check Procedure */
+#define MS_Device_Type (PPBUF_BASE2 + 0x1D8) /* 14 */
+/* end */
+
+/* joey 2004-05-03 */
+#define MS_4bit_Support (PPBUF_BASE2 + 0x1D3) /* 15 */
+/* end */
+
+#define setPS_NG 1
+#define setPS_Error 0
+
+/* define for Pro_SystemParm Register */
+#define PARALLEL_8BIT_IF 0x40
+#define PARALLEL_4BIT_IF 0x00
+#define SERIAL_IF 0x80
+
+/* define for StatusReg0 Register */
+#define BUF_FULL 0x10
+#define BUF_EMPTY 0x20
+
+/* define for StatusReg1 Register */
+#define MEDIA_BUSY 0x80
+#define FLASH_BUSY 0x40
+#define DATA_ERROR 0x20
+#define STS_UCDT 0x10
+#define EXTRA_ERROR 0x08
+#define STS_UCEX 0x04
+#define FLAG_ERROR 0x02
+#define STS_UCFG 0x01
+
+#define MS_SHORT_DATA_LEN 32
+
+#define FORMAT_SUCCESS 0
+#define FORMAT_FAIL 1
+#define FORMAT_IN_PROGRESS 2
+
+#define MS_SET_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag |= 0x80)
+#define MS_CLR_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag &= 0x7F)
+#define MS_TST_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag & 0x80)
+
+#define CHECK_MS_TRANS_FAIL(chip, retval) \
+ (((retval) != STATUS_SUCCESS) || \
+ (chip->rsp_buf[0] & MS_TRANSFER_ERR))
+
+void mspro_polling_format_status(struct rts51x_chip *chip);
+void mspro_format_sense(struct rts51x_chip *chip, unsigned int lun);
+
+void mspro_stop_seq_mode(struct rts51x_chip *chip);
+int reset_ms_card(struct rts51x_chip *chip);
+int ms_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt);
+int mspro_format(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ int short_data_len, int quick_format);
+void ms_free_l2p_tbl(struct rts51x_chip *chip);
+void ms_cleanup_work(struct rts51x_chip *chip);
+int ms_power_off_card3v3(struct rts51x_chip *chip);
+int release_ms_card(struct rts51x_chip *chip);
+int ms_delay_write(struct rts51x_chip *chip);
+
+#ifdef SUPPORT_MAGIC_GATE
+
+int ms_switch_clock(struct rts51x_chip *chip);
+int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+ int data_len);
+int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+ int data_len);
+int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt,
+ u8 write_start, u8 write_cnt);
+int ms_transfer_data(struct rts51x_chip *chip, u8 trans_mode, u8 tpc,
+ u16 sec_cnt, u8 cfg, int mode_2k, int use_sg, void *buf,
+ int buf_len);
+#endif
+
+#endif /* __RTS51X_MS_H */
diff --git a/drivers/staging/rts5139/ms_mg.c b/drivers/staging/rts5139/ms_mg.c
new file mode 100644
index 00000000000..154b5230aa5
--- /dev/null
+++ b/drivers/staging/rts5139/ms_mg.c
@@ -0,0 +1,642 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "rts51x.h"
+#include "rts51x_transport.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "ms.h"
+
+#ifdef SUPPORT_MAGIC_GATE
+
+int mg_check_int_error(struct rts51x_chip *chip)
+{
+ u8 value;
+
+ rts51x_read_register(chip, MS_TRANS_CFG, &value);
+ if (value & (INT_ERR | INT_CMDNK))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int mg_send_ex_cmd(struct rts51x_chip *chip, u8 cmd, u8 entry_num)
+{
+ int retval, i;
+ u8 data[8];
+
+ data[0] = cmd;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = 0;
+ data[6] = entry_num;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval =
+ ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT, data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int mg_set_tpc_para_sub(struct rts51x_chip *chip, int type, u8 mg_entry_num)
+{
+ int retval;
+ u8 buf[6];
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ if (type == 0)
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1);
+ else
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ buf[0] = 0;
+ buf[1] = 0;
+ if (type == 1) {
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = mg_entry_num;
+ }
+ retval =
+ ms_write_bytes(chip, PRO_WRITE_REG, (type == 0) ? 1 : 6,
+ NO_WAIT_INT, buf, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+/**
+ * Get MagciGate ID and set Leaf ID to medium.
+
+ * After receiving this SCSI command, adapter shall fulfill 2 tasks
+ * below in order:
+ * 1. send GET_ID TPC command to get MagicGate ID and hold it till
+ * Response&challenge CMD.
+ * 2. send SET_ID TPC command to medium with Leaf ID released by host
+ * in this SCSI CMD.
+ */
+int mg_set_leaf_id(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ int i;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf1[32], buf2[12];
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ if (scsi_bufflen(srb) < 12) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = mg_send_ex_cmd(chip, MG_SET_LID, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, retval);
+ }
+
+ memset(buf1, 0, 32);
+ rts51x_get_xfer_buf(buf2, min(12, (int)scsi_bufflen(srb)), srb);
+ for (i = 0; i < 8; i++)
+ buf1[8 + i] = buf2[4 + i];
+ retval =
+ ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, buf1, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, retval);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+/**
+ * Send Local EKB to host.
+
+ * After receiving this SCSI command, adapter shall read the divided
+ * data(1536 bytes totally) from medium by using READ_LONG_DATA TPC
+ * for 3 times, and report data to host with data-length is 1052 bytes.
+ */
+int mg_get_local_EKB(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = STATUS_FAIL;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ buf = kmalloc(1540, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ buf[0] = 0x04;
+ buf[1] = 0x1A;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+
+ retval = mg_send_ex_cmd(chip, MG_GET_LEKB, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 3, WAIT_INT, 0, 0, buf + 4, 1536);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rts51x_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
+ MS_STOP | MS_CLR_ERR);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+
+ bufflen = min(1052, (int)scsi_bufflen(srb));
+ rts51x_set_xfer_buf(buf, bufflen, srb);
+
+GetEKBFinish:
+ kfree(buf);
+ return retval;
+}
+
+/**
+ * Send challenge(host) to medium.
+
+ * After receiving this SCSI command, adapter shall sequentially issues
+ * TPC commands to the medium for writing 8-bytes data as challenge
+ * by host within a short data packet.
+ */
+int mg_chg(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ int i;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf[32], tmp;
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = mg_send_ex_cmd(chip, MG_GET_ID, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, retval);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, retval);
+ }
+
+ memcpy(ms_card->magic_gate_id, buf, 16);
+
+ for (i = 0; i < 2500; i++) {
+ RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp);
+ if (tmp &
+ (MS_INT_CED | MS_INT_CMDNK | MS_INT_BREQ | MS_INT_ERR))
+ break;
+
+ wait_timeout(1);
+ }
+
+ if (i == 2500) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = mg_send_ex_cmd(chip, MG_SET_RD, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, retval);
+ }
+
+ bufflen = min(12, (int)scsi_bufflen(srb));
+ rts51x_get_xfer_buf(buf, bufflen, srb);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = buf[4 + i];
+ for (i = 0; i < 24; i++)
+ buf[8 + i] = 0;
+ retval =
+ ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, retval);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, retval);
+ }
+
+ ms_card->mg_auth = 0;
+
+ return STATUS_SUCCESS;
+}
+
+/**
+ * Send Response and Challenge data to host.
+
+ * After receiving this SCSI command, adapter shall communicates with
+ * the medium, get parameters(HRd, Rms, MagicGateID) by using READ_SHORT_DATA
+ * TPC and send the data to host according to certain format required by
+ * MG-R specification.
+ * The paremeter MagicGateID is the one that adapter has obtained from
+ * the medium by TPC commands in Set Leaf ID command phase previously.
+ */
+int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf1[32], buf2[36], tmp;
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = mg_send_ex_cmd(chip, MG_MAKE_RMS, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, buf1, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, retval);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, retval);
+ }
+
+ buf2[0] = 0x00;
+ buf2[1] = 0x22;
+ buf2[2] = 0x00;
+ buf2[3] = 0x00;
+
+ memcpy(buf2 + 4, ms_card->magic_gate_id, 16);
+ memcpy(buf2 + 20, buf1, 16);
+
+ bufflen = min(36, (int)scsi_bufflen(srb));
+ rts51x_set_xfer_buf(buf2, bufflen, srb);
+
+ for (i = 0; i < 2500; i++) {
+ RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp);
+ if (tmp & (MS_INT_CED | MS_INT_CMDNK |
+ MS_INT_BREQ | MS_INT_ERR))
+ break;
+
+ wait_timeout(1);
+ }
+
+ if (i == 2500) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+/**
+ * Send response(host) to medium.
+
+ * After receiving this SCSI command, adapter shall sequentially
+ * issues TPC commands to the medium for writing 8-bytes data as
+ * challenge by host within a short data packet.
+ */
+int mg_rsp(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int i;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf[32];
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = mg_send_ex_cmd(chip, MG_MAKE_KSE, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, retval);
+ }
+
+ bufflen = min(12, (int)scsi_bufflen(srb));
+ rts51x_get_xfer_buf(buf, bufflen, srb);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = buf[4 + i];
+ for (i = 0; i < 24; i++)
+ buf[8 + i] = 0;
+ retval =
+ ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, retval);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, retval);
+ }
+
+ ms_card->mg_auth = 1;
+
+ return STATUS_SUCCESS;
+}
+
+/** * Send ICV data to host.
+
+ * After receiving this SCSI command, adapter shall read the divided
+ * data(1024 bytes totally) from medium by using READ_LONG_DATA TPC
+ * for 2 times, and report data to host with data-length is 1028 bytes.
+ *
+ * Since the extra 4 bytes data is just only a prefix to original data
+ * that read from medium, so that the 4-byte data pushed into Ring buffer
+ * precedes data tramsinssion from medium to Ring buffer by DMA mechanisim
+ * in order to get maximum performance and minimum code size simultaneously.
+ */
+int mg_get_ICV(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ buf = kmalloc(1028, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ buf[0] = 0x04;
+ buf[1] = 0x02;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+
+ retval = mg_send_ex_cmd(chip, MG_GET_IBD, ms_card->mg_entry_num);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ rts51x_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
+ MS_STOP | MS_CLR_ERR);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+ retval = mg_check_int_error(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+
+ bufflen = min(1028, (int)scsi_bufflen(srb));
+ rts51x_set_xfer_buf(buf, bufflen, srb);
+
+GetICVFinish:
+ kfree(buf);
+ return retval;
+}
+
+/**
+ * Send ICV data to medium.
+
+ * After receiving this SCSI command, adapter shall receive 1028 bytes
+ * and write the later 1024 bytes to medium by WRITE_LONG_DATA TPC
+ * consecutively.
+ *
+ * Since the first 4-bytes data is just only a prefix to original data
+ * that sent by host, and it should be skipped by shifting DMA pointer
+ * before writing 1024 bytes to medium.
+ */
+int mg_set_ICV(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+#ifdef MG_SET_ICV_SLOW
+ int i;
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTS51X_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ buf = kmalloc(1028, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ bufflen = min(1028, (int)scsi_bufflen(srb));
+ rts51x_get_xfer_buf(buf, bufflen, srb);
+
+ retval = mg_send_ex_cmd(chip, MG_SET_IBD, ms_card->mg_entry_num);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ }
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+
+#ifdef MG_SET_ICV_SLOW
+ for (i = 0; i < 2; i++) {
+ udelay(50);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF,
+ PRO_WRITE_LONG_DATA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF,
+ WAIT_INT);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDOR, 100);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+
+ retval = rts51x_transfer_data_rcc(chip, SND_BULK_PIPE(chip),
+ buf + 4 + i * 512, 512, 0,
+ NULL, 3000, STAGE_DO);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ }
+ retval = STATUS_FAIL;
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+
+ retval = rts51x_get_rsp(chip, 1, 3000);
+ if (CHECK_MS_TRANS_FAIL(chip, retval)
+ || mg_check_int_error(chip)) {
+ rts51x_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ }
+ retval = STATUS_FAIL;
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+ }
+#else
+ retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ }
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+#endif
+
+SetICVFinish:
+ kfree(buf);
+ return retval;
+}
+
+#endif /* SUPPORT_MAGIC_GATE */
diff --git a/drivers/staging/rts5139/ms_mg.h b/drivers/staging/rts5139/ms_mg.h
new file mode 100644
index 00000000000..e2ca55085f9
--- /dev/null
+++ b/drivers/staging/rts5139/ms_mg.h
@@ -0,0 +1,41 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_MS_MG_H
+#define __RTS51X_MS_MG_H
+
+#include "rts51x_chip.h"
+#include "ms.h"
+
+int mg_set_leaf_id(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int mg_get_local_EKB(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int mg_chg(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int mg_rsp(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int mg_get_ICV(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int mg_set_ICV(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+
+#endif /* __RTS51X_MS_MG_H */
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
new file mode 100644
index 00000000000..d9cee6d0b12
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x.c
@@ -0,0 +1,967 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/utsname.h>
+#include <linux/usb.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+
+#include "debug.h"
+#include "ms.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_card.h"
+#include "rts51x_scsi.h"
+#include "rts51x_transport.h"
+#include "rts51x_fop.h"
+
+MODULE_DESCRIPTION(RTS51X_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+#ifdef SCSI_SCAN_DELAY
+static unsigned int delay_use = 5;
+module_param(delay_use, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
+#endif
+
+static int auto_delink_en;
+module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+
+static int ss_en;
+module_param(ss_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_en, "enable selective suspend");
+
+static int ss_delay = 50;
+module_param(ss_delay, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_delay,
+ "seconds to delay before entering selective suspend");
+
+static int needs_remote_wakeup;
+module_param(needs_remote_wakeup, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(needs_remote_wakeup, "ss state needs remote wakeup supported");
+
+#ifdef SUPPORT_FILE_OP
+static const struct file_operations rts51x_fops = {
+ .owner = THIS_MODULE,
+ .read = rts51x_read,
+ .write = rts51x_write,
+ .unlocked_ioctl = rts51x_ioctl,
+ .open = rts51x_open,
+ .release = rts51x_release,
+};
+
+/*
+ * usb class driver info in order to get a minor number from the usb core,
+ * and to have the device registered with the driver core
+ */
+static struct usb_class_driver rts51x_class = {
+ .name = "rts51x%d",
+ .fops = &rts51x_fops,
+ .minor_base = 192,
+};
+#endif
+
+#ifdef CONFIG_PM /* Minimal support for suspend and resume */
+
+static inline void usb_autopm_enable(struct usb_interface *intf)
+{
+ atomic_set(&intf->pm_usage_cnt, 1);
+ usb_autopm_put_interface(intf);
+}
+
+static inline void usb_autopm_disable(struct usb_interface *intf)
+{
+ atomic_set(&intf->pm_usage_cnt, 0);
+ usb_autopm_get_interface(intf);
+}
+
+void rts51x_try_to_enter_ss(struct rts51x_chip *chip)
+{
+ RTS51X_DEBUGP("Ready to enter SS state\n");
+ usb_autopm_enable(chip->usb->pusb_intf);
+}
+
+void rts51x_try_to_exit_ss(struct rts51x_chip *chip)
+{
+ RTS51X_DEBUGP("Exit from SS state\n");
+ usb_autopm_disable(chip->usb->pusb_intf);
+}
+
+int rts51x_suspend(struct usb_interface *iface, pm_message_t message)
+{
+ struct rts51x_chip *chip = usb_get_intfdata(iface);
+
+ RTS51X_DEBUGP("%s, message.event = 0x%x\n", __func__, message.event);
+
+ /* Wait until no command is running */
+ mutex_lock(&chip->usb->dev_mutex);
+
+ chip->fake_card_ready = chip->card_ready;
+ rts51x_do_before_power_down(chip);
+
+ if (message.event == PM_EVENT_AUTO_SUSPEND) {
+ RTS51X_DEBUGP("Enter SS state");
+ chip->resume_from_scsi = 0;
+ RTS51X_SET_STAT(chip, STAT_SS);
+ } else {
+ RTS51X_DEBUGP("Enter SUSPEND state");
+ RTS51X_SET_STAT(chip, STAT_SUSPEND);
+ }
+
+ /* When runtime PM is working, we'll set a flag to indicate
+ * whether we should autoresume when a SCSI request arrives. */
+
+ mutex_unlock(&chip->usb->dev_mutex);
+ return 0;
+}
+
+int rts51x_resume(struct usb_interface *iface)
+{
+ struct rts51x_chip *chip = usb_get_intfdata(iface);
+
+ RTS51X_DEBUGP("%s\n", __func__);
+
+ if (!RTS51X_CHK_STAT(chip, STAT_SS) || !chip->resume_from_scsi) {
+ mutex_lock(&chip->usb->dev_mutex);
+
+ if (chip->option.ss_en) {
+ if (GET_PM_USAGE_CNT(chip) <= 0) {
+ /* Remote wake up, increase pm_usage_cnt */
+ RTS51X_DEBUGP("Incr pm_usage_cnt\n");
+ SET_PM_USAGE_CNT(chip, 1);
+ }
+ }
+
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ rts51x_init_chip(chip);
+ rts51x_init_cards(chip);
+
+ mutex_unlock(&chip->usb->dev_mutex);
+ }
+
+ return 0;
+}
+
+int rts51x_reset_resume(struct usb_interface *iface)
+{
+ struct rts51x_chip *chip = usb_get_intfdata(iface);
+
+ RTS51X_DEBUGP("%s\n", __func__);
+
+ mutex_lock(&chip->usb->dev_mutex);
+
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ if (chip->option.ss_en)
+ SET_PM_USAGE_CNT(chip, 1);
+
+ rts51x_init_chip(chip);
+ rts51x_init_cards(chip);
+
+ mutex_unlock(&chip->usb->dev_mutex);
+
+ /* FIXME: Notify the subdrivers that they need to reinitialize
+ * the device */
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+void rts51x_try_to_enter_ss(struct rts51x_chip *chip)
+{
+}
+
+void rts51x_try_to_exit_ss(struct rts51x_chip *chip)
+{
+}
+
+#endif /* CONFIG_PM */
+
+/*
+ * The next two routines get called just before and just after
+ * a USB port reset, whether from this driver or a different one.
+ */
+
+int rts51x_pre_reset(struct usb_interface *iface)
+{
+ struct rts51x_chip *chip = usb_get_intfdata(iface);
+
+ RTS51X_DEBUGP("%s\n", __func__);
+
+ /* Make sure no command runs during the reset */
+ mutex_lock(&chip->usb->dev_mutex);
+ return 0;
+}
+
+int rts51x_post_reset(struct usb_interface *iface)
+{
+ struct rts51x_chip *chip = usb_get_intfdata(iface);
+
+ RTS51X_DEBUGP("%s\n", __func__);
+
+ /* Report the reset to the SCSI core */
+ /* usb_stor_report_bus_reset(us); */
+
+ /* FIXME: Notify the subdrivers that they need to reinitialize
+ * the device */
+
+ mutex_unlock(&chip->usb->dev_mutex);
+ return 0;
+}
+
+static int rts51x_control_thread(void *__chip)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)__chip;
+ struct Scsi_Host *host = rts51x_to_host(chip);
+
+ for (;;) {
+ if (wait_for_completion_interruptible(&chip->usb->cmnd_ready))
+ break;
+
+ if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("-- exiting from rts51x-control\n");
+ break;
+ }
+
+ /* lock the device pointers */
+ mutex_lock(&(chip->usb->dev_mutex));
+
+ /* lock access to the state */
+ scsi_lock(host);
+
+ /* When we are called with no command pending, we're done */
+ if (chip->srb == NULL) {
+ scsi_unlock(host);
+ mutex_unlock(&chip->usb->dev_mutex);
+ RTS51X_DEBUGP("-- exiting from control thread\n");
+ break;
+ }
+
+ /* has the command timed out *already* ? */
+ if (test_bit(FLIDX_TIMED_OUT, &chip->usb->dflags)) {
+ chip->srb->result = DID_ABORT << 16;
+ goto SkipForAbort;
+ }
+
+ scsi_unlock(host);
+
+ /* reject the command if the direction indicator
+ * is UNKNOWN
+ */
+ if (chip->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ RTS51X_DEBUGP("UNKNOWN data direction\n");
+ chip->srb->result = DID_ERROR << 16;
+ }
+
+ /* reject if target != 0 or if LUN is higher than
+ * the maximum known LUN
+ */
+ else if (chip->srb->device->id) {
+ RTS51X_DEBUGP("Bad target number (%d:%d)\n",
+ chip->srb->device->id,
+ chip->srb->device->lun);
+ chip->srb->result = DID_BAD_TARGET << 16;
+ }
+
+ else if (chip->srb->device->lun > chip->max_lun) {
+ RTS51X_DEBUGP("Bad LUN (%d:%d)\n",
+ chip->srb->device->id,
+ chip->srb->device->lun);
+ chip->srb->result = DID_BAD_TARGET << 16;
+ }
+
+ /* we've got a command, let's do it! */
+ else {
+ RTS51X_DEBUG(scsi_show_command(chip->srb));
+ rts51x_invoke_transport(chip->srb, chip);
+ }
+
+ /* lock access to the state */
+ scsi_lock(host);
+
+ /* indicate that the command is done */
+ if (chip->srb->result != DID_ABORT << 16)
+ chip->srb->scsi_done(chip->srb);
+ else
+SkipForAbort :
+ RTS51X_DEBUGP("scsi command aborted\n");
+
+ /* If an abort request was received we need to signal that
+ * the abort has finished. The proper test for this is
+ * the TIMED_OUT flag, not srb->result == DID_ABORT, because
+ * the timeout might have occurred after the command had
+ * already completed with a different result code. */
+ if (test_bit(FLIDX_TIMED_OUT, &chip->usb->dflags)) {
+ complete(&(chip->usb->notify));
+
+ /* Allow USB transfers to resume */
+ clear_bit(FLIDX_ABORTING, &chip->usb->dflags);
+ clear_bit(FLIDX_TIMED_OUT, &chip->usb->dflags);
+ }
+
+ /* finished working on this command */
+ chip->srb = NULL;
+ scsi_unlock(host);
+
+ /* unlock the device pointers */
+ mutex_unlock(&chip->usb->dev_mutex);
+ } /* for (;;) */
+
+ complete(&chip->usb->control_exit);
+
+ /* Wait until we are told to stop */
+/* for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);*/
+ return 0;
+}
+
+static int rts51x_polling_thread(void *__chip)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)__chip;
+
+#ifdef SCSI_SCAN_DELAY
+ /* Wait until SCSI scan finished */
+ wait_timeout((delay_use + 5) * HZ);
+#endif
+
+ for (;;) {
+ wait_timeout(POLLING_INTERVAL);
+
+ /* if the device has disconnected, we are free to exit */
+ if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("-- exiting from rts51x-polling\n");
+ break;
+ }
+
+ /* if the device has disconnected, we are free to exit */
+ /* if (kthread_should_stop()) {
+ printk(KERN_INFO "Stop polling thread!\n");
+ break;
+ } */
+
+#ifdef CONFIG_PM
+ if (RTS51X_CHK_STAT(chip, STAT_SS) ||
+ RTS51X_CHK_STAT(chip, STAT_SS_PRE) ||
+ RTS51X_CHK_STAT(chip, STAT_SUSPEND)) {
+ continue;
+ }
+
+ if (ss_en) {
+ if (RTS51X_CHK_STAT(chip, STAT_IDLE)) {
+ if (chip->ss_counter <
+ (ss_delay * 1000 / POLLING_INTERVAL)) {
+ chip->ss_counter++;
+ } else {
+ /* Prepare SS state */
+ RTS51X_SET_STAT(chip, STAT_SS_PRE);
+ rts51x_try_to_enter_ss(chip);
+ continue;
+ }
+ } else {
+ chip->ss_counter = 0;
+ }
+ }
+#endif
+
+ mspro_polling_format_status(chip);
+
+ /* lock the device pointers */
+ mutex_lock(&(chip->usb->dev_mutex));
+
+ rts51x_polling_func(chip);
+
+ /* unlock the device pointers */
+ mutex_unlock(&chip->usb->dev_mutex);
+ } /* for (;;) */
+
+ complete(&chip->usb->polling_exit);
+
+ /* Wait until we are told to stop */
+ /* for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING); */
+ return 0;
+}
+
+#ifdef SCSI_SCAN_DELAY
+/* Thread to carry out delayed SCSI-device scanning */
+static int rts51x_scan_thread(void *__chip)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)__chip;
+
+ printk(KERN_DEBUG
+ "rts51x: device found at %d\n", chip->usb->pusb_dev->devnum);
+
+ set_freezable();
+ /* Wait for the timeout to expire or for a disconnect */
+ if (delay_use > 0) {
+ printk(KERN_DEBUG "rts51x: waiting for device "
+ "to settle before scanning\n");
+ wait_event_freezable_timeout(chip->usb->delay_wait,
+ test_bit(FLIDX_DONT_SCAN,
+ &chip->usb->dflags),
+ delay_use * HZ);
+ }
+
+ /* If the device is still connected, perform the scanning */
+ if (!test_bit(FLIDX_DONT_SCAN, &chip->usb->dflags)) {
+ scsi_scan_host(rts51x_to_host(chip));
+ printk(KERN_DEBUG "rts51x: device scan complete\n");
+
+ /* Should we unbind if no devices were detected? */
+ }
+
+ complete_and_exit(&chip->usb->scanning_done, 0);
+}
+#endif
+
+/* Associate our private data with the USB device */
+static int associate_dev(struct rts51x_chip *chip, struct usb_interface *intf)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+#ifdef SUPPORT_FILE_OP
+ int retval;
+#endif
+
+ /* Fill in the device-related fields */
+ rts51x->pusb_dev = interface_to_usbdev(intf);
+ rts51x->pusb_intf = intf;
+ rts51x->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+ RTS51X_DEBUGP("Vendor: 0x%04x, Product: 0x%04x, Revision: 0x%04x\n",
+ le16_to_cpu(rts51x->pusb_dev->descriptor.idVendor),
+ le16_to_cpu(rts51x->pusb_dev->descriptor.idProduct),
+ le16_to_cpu(rts51x->pusb_dev->descriptor.bcdDevice));
+ RTS51X_DEBUGP("Interface Subclass: 0x%02x, Protocol: 0x%02x\n",
+ intf->cur_altsetting->desc.bInterfaceSubClass,
+ intf->cur_altsetting->desc.bInterfaceProtocol);
+
+ /* Store our private data in the interface */
+ usb_set_intfdata(intf, chip);
+
+#ifdef SUPPORT_FILE_OP
+ /* we can register the device now, as it is ready */
+ retval = usb_register_dev(intf, &rts51x_class);
+ if (retval) {
+ /* something prevented us from registering this driver */
+ RTS51X_DEBUGP("Not able to get a minor for this device.");
+ usb_set_intfdata(intf, NULL);
+ return -ENOMEM;
+ }
+#endif
+
+ /* Allocate the device-related DMA-mapped buffers */
+ rts51x->cr = usb_buffer_alloc(rts51x->pusb_dev, sizeof(*rts51x->cr),
+ GFP_KERNEL, &rts51x->cr_dma);
+ if (!rts51x->cr) {
+ RTS51X_DEBUGP("usb_ctrlrequest allocation failed\n");
+ usb_set_intfdata(intf, NULL);
+ return -ENOMEM;
+ }
+
+ rts51x->iobuf = usb_buffer_alloc(rts51x->pusb_dev, RTS51X_IOBUF_SIZE,
+ GFP_KERNEL, &rts51x->iobuf_dma);
+ if (!rts51x->iobuf) {
+ RTS51X_DEBUGP("I/O buffer allocation failed\n");
+ usb_set_intfdata(intf, NULL);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void rts51x_init_options(struct rts51x_chip *chip)
+{
+ struct rts51x_option *option = &(chip->option);
+
+ option->led_blink_speed = 7;
+ option->mspro_formatter_enable = 1;
+
+ option->fpga_sd_sdr104_clk = CLK_100;
+ option->fpga_sd_sdr50_clk = CLK_100;
+ option->fpga_sd_ddr50_clk = CLK_100;
+ option->fpga_sd_hs_clk = CLK_100;
+ option->fpga_mmc_52m_clk = CLK_80;
+ option->fpga_ms_hg_clk = CLK_80;
+ option->fpga_ms_4bit_clk = CLK_80;
+
+ option->asic_sd_sdr104_clk = 98;
+ option->asic_sd_sdr50_clk = 98;
+ option->asic_sd_ddr50_clk = 98;
+ option->asic_sd_hs_clk = 97;
+ option->asic_mmc_52m_clk = 95;
+ option->asic_ms_hg_clk = 116;
+ option->asic_ms_4bit_clk = 77;
+
+ option->sd_ddr_tx_phase = 0;
+ option->mmc_ddr_tx_phase = 1;
+
+ option->sd_speed_prior = 0;
+ option->sd_ctl =
+ SD_PUSH_POINT_AUTO | SD_SAMPLE_POINT_AUTO | SUPPORT_UHS50_MMC44;
+
+ option->ss_en = ss_en;
+ option->ss_delay = ss_delay;
+ option->needs_remote_wakeup = needs_remote_wakeup;
+
+ option->auto_delink_en = auto_delink_en;
+
+ option->FT2_fast_mode = 0;
+ option->pwr_delay = 800;
+ option->xd_rw_step = 0;
+ option->D3318_off_delay = 50;
+ option->delink_delay = 100;
+ option->rts5129_D3318_off_enable = 0;
+ option->sd20_pad_drive = 0;
+ option->reset_or_rw_fail_set_pad_drive = 1;
+ option->rcc_fail_flag = 0;
+ option->rcc_bug_fix_en = 1;
+ option->debounce_num = 2;
+ option->polling_time = 100;
+ option->led_toggle_interval = 6;
+ option->xd_rwn_step = 0;
+ option->sd_send_status_en = 0;
+ option->sdr50_tx_phase = 0x01;
+ option->sdr50_rx_phase = 0x05;
+ option->ddr50_tx_phase = 0x09;
+ option->ddr50_rx_phase = 0x06;
+ option->sdr50_phase_sel = 0;
+ option->sd30_pad_drive = 1;
+ option->ms_errreg_fix = 0;
+ option->reset_mmc_first = 0;
+ option->speed_mmc = 1;
+ option->led_always_on = 0;
+}
+
+/* Get the pipe settings */
+static int get_pipes(struct rts51x_chip *chip)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+ struct usb_host_interface *altsetting =
+ rts51x->pusb_intf->cur_altsetting;
+ int i;
+ struct usb_endpoint_descriptor *ep;
+ struct usb_endpoint_descriptor *ep_in = NULL;
+ struct usb_endpoint_descriptor *ep_out = NULL;
+ struct usb_endpoint_descriptor *ep_int = NULL;
+
+ /*
+ * Find the first endpoint of each type we need.
+ * We are expecting a minimum of 2 endpoints - in and out (bulk).
+ * An optional interrupt-in is OK (necessary for CBI protocol).
+ * We will ignore any others.
+ */
+ for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
+ ep = &altsetting->endpoint[i].desc;
+
+ if (usb_endpoint_xfer_bulk(ep)) {
+ if (usb_endpoint_dir_in(ep)) {
+ if (!ep_in)
+ ep_in = ep;
+ } else {
+ if (!ep_out)
+ ep_out = ep;
+ }
+ }
+
+ else if (usb_endpoint_is_int_in(ep)) {
+ if (!ep_int)
+ ep_int = ep;
+ }
+ }
+
+ if (!ep_in || !ep_out) {
+ RTS51X_DEBUGP("Endpoint sanity check failed!"
+ "Rejecting dev.\n");
+ return -EIO;
+ }
+
+ /* Calculate and store the pipe values */
+ rts51x->send_ctrl_pipe = usb_sndctrlpipe(rts51x->pusb_dev, 0);
+ rts51x->recv_ctrl_pipe = usb_rcvctrlpipe(rts51x->pusb_dev, 0);
+ rts51x->send_bulk_pipe = usb_sndbulkpipe(rts51x->pusb_dev,
+ usb_endpoint_num(ep_out));
+ rts51x->recv_bulk_pipe = usb_rcvbulkpipe(rts51x->pusb_dev,
+ usb_endpoint_num(ep_in));
+ if (ep_int) {
+ rts51x->recv_intr_pipe = usb_rcvintpipe(rts51x->pusb_dev,
+ usb_endpoint_num
+ (ep_int));
+ rts51x->ep_bInterval = ep_int->bInterval;
+ }
+ return 0;
+}
+
+/* Initialize all the dynamic resources we need */
+static int rts51x_acquire_resources(struct rts51x_chip *chip)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+ int retval;
+
+ rts51x->current_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rts51x->current_urb) {
+ RTS51X_DEBUGP("URB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rts51x->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rts51x->intr_urb) {
+ RTS51X_DEBUGP("URB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ chip->cmd_buf = chip->rsp_buf = rts51x->iobuf;
+
+ rts51x_init_options(chip);
+
+ /* Init rts51xx device */
+ retval = rts51x_init_chip(chip);
+ if (retval != STATUS_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+/* Release all our dynamic resources */
+static void rts51x_release_resources(struct rts51x_chip *chip)
+{
+ RTS51X_DEBUGP("-- %s\n", __func__);
+
+ /* Tell the control thread to exit. The SCSI host must
+ * already have been removed and the DISCONNECTING flag set
+ * so that we won't accept any more commands.
+ */
+ RTS51X_DEBUGP("-- sending exit command to thread\n");
+ complete(&chip->usb->cmnd_ready);
+ if (chip->usb->ctl_thread)
+ wait_for_completion(&chip->usb->control_exit);
+ /* kthread_stop(chip->usb->ctl_thread); */
+ if (chip->usb->polling_thread)
+ wait_for_completion(&chip->usb->polling_exit);
+
+ /* if (chip->usb->polling_thread)
+ kthread_stop(chip->usb->polling_thread); */
+
+ wait_timeout(200);
+
+ /* Release rts51xx device here */
+ rts51x_release_chip(chip);
+
+ usb_free_urb(chip->usb->current_urb);
+ usb_free_urb(chip->usb->intr_urb);
+}
+
+/* Dissociate from the USB device */
+static void dissociate_dev(struct rts51x_chip *chip)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+
+ RTS51X_DEBUGP("-- %s\n", __func__);
+
+ /* Free the device-related DMA-mapped buffers */
+ if (rts51x->cr)
+ usb_buffer_free(rts51x->pusb_dev, sizeof(*rts51x->cr),
+ rts51x->cr, rts51x->cr_dma);
+ if (rts51x->iobuf)
+ usb_buffer_free(rts51x->pusb_dev, RTS51X_IOBUF_SIZE,
+ rts51x->iobuf, rts51x->iobuf_dma);
+
+ /* Remove our private data from the interface */
+ usb_set_intfdata(rts51x->pusb_intf, NULL);
+
+#ifdef SUPPORT_FILE_OP
+ /* give back our minor */
+ usb_deregister_dev(rts51x->pusb_intf, &rts51x_class);
+#endif
+
+ kfree(rts51x);
+ chip->usb = NULL;
+}
+
+/* First stage of disconnect processing: stop SCSI scanning,
+ * remove the host, and stop accepting new commands
+ */
+static void quiesce_and_remove_host(struct rts51x_chip *chip)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+ struct Scsi_Host *host = rts51x_to_host(chip);
+
+ /* If the device is really gone, cut short reset delays */
+ if (rts51x->pusb_dev->state == USB_STATE_NOTATTACHED)
+ set_bit(FLIDX_DISCONNECTING, &rts51x->dflags);
+
+#ifdef SCSI_SCAN_DELAY
+ /* Prevent SCSI-scanning (if it hasn't started yet)
+ * and wait for the SCSI-scanning thread to stop.
+ */
+ set_bit(FLIDX_DONT_SCAN, &rts51x->dflags);
+ wake_up(&rts51x->delay_wait);
+ wait_for_completion(&rts51x->scanning_done);
+#endif
+
+ /* Removing the host will perform an orderly shutdown: caches
+ * synchronized, disks spun down, etc.
+ */
+ scsi_remove_host(host);
+
+ /* Prevent any new commands from being accepted and cut short
+ * reset delays.
+ */
+ scsi_lock(host);
+ set_bit(FLIDX_DISCONNECTING, &rts51x->dflags);
+ scsi_unlock(host);
+#ifdef SCSI_SCAN_DELAY
+ wake_up(&rts51x->delay_wait);
+#endif
+}
+
+/* Second stage of disconnect processing: deallocate all resources */
+static void release_everything(struct rts51x_chip *chip)
+{
+ rts51x_release_resources(chip);
+ dissociate_dev(chip);
+
+ /* Drop our reference to the host; the SCSI core will free it
+ * (and "chip" along with it) when the refcount becomes 0. */
+ scsi_host_put(rts51x_to_host(chip));
+}
+
+static int rts51x_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct rts51x_chip *chip;
+ struct rts51x_usb *rts51x;
+ int result;
+ struct task_struct *th;
+
+ RTS51X_DEBUGP("%s detected\n", RTS51X_NAME);
+
+ rts51x = kzalloc(sizeof(struct rts51x_usb), GFP_KERNEL);
+ if (!rts51x) {
+ printk(KERN_WARNING RTS51X_TIP
+ "Unable to allocate rts51x_usb\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Ask the SCSI layer to allocate a host structure, with extra
+ * space at the end for our private us_data structure.
+ */
+ host = scsi_host_alloc(&rts51x_host_template, sizeof(*chip));
+ if (!host) {
+ printk(KERN_WARNING RTS51X_TIP
+ "Unable to allocate the scsi host\n");
+ kfree(rts51x);
+ return -ENOMEM;
+ }
+
+ /*
+ * Allow 16-byte CDBs and thus > 2TB
+ */
+ host->max_cmd_len = 16;
+ chip = host_to_rts51x(host);
+ memset(chip, 0, sizeof(struct rts51x_chip));
+
+ chip->vendor_id = id->idVendor;
+ chip->product_id = id->idProduct;
+
+ mutex_init(&(rts51x->dev_mutex));
+ init_completion(&rts51x->cmnd_ready);
+ init_completion(&rts51x->control_exit);
+ init_completion(&rts51x->polling_exit);
+ init_completion(&(rts51x->notify));
+#ifdef SCSI_SCAN_DELAY
+ init_waitqueue_head(&rts51x->delay_wait);
+ init_completion(&rts51x->scanning_done);
+#endif
+
+ chip->usb = rts51x;
+
+ /* Associate the us_data structure with the USB device */
+ result = associate_dev(chip, intf);
+ if (result)
+ goto BadDevice;
+
+ /* Find the endpoints and calculate pipe values */
+ result = get_pipes(chip);
+ if (result)
+ goto BadDevice;
+
+ /* Acquire all the other resources and add the host */
+ result = rts51x_acquire_resources(chip);
+ if (result)
+ goto BadDevice;
+
+ /* Start up our control thread */
+ th = kthread_run(rts51x_control_thread, chip, RTS51X_CTL_THREAD);
+ if (IS_ERR(th)) {
+ printk(KERN_WARNING RTS51X_TIP
+ "Unable to start control thread\n");
+ result = PTR_ERR(th);
+ goto BadDevice;
+ }
+ rts51x->ctl_thread = th;
+
+ result = scsi_add_host(rts51x_to_host(chip), &rts51x->pusb_intf->dev);
+ if (result) {
+ printk(KERN_WARNING RTS51X_TIP "Unable to add the scsi host\n");
+ goto BadDevice;
+ }
+#ifdef SCSI_SCAN_DELAY
+ /* Start up the thread for delayed SCSI-device scanning */
+ th = kthread_create(rts51x_scan_thread, chip, RTS51X_SCAN_THREAD);
+ if (IS_ERR(th)) {
+ printk(KERN_WARNING RTS51X_TIP
+ "Unable to start the device-scanning thread\n");
+ complete(&rts51x->scanning_done);
+ quiesce_and_remove_host(chip);
+ result = PTR_ERR(th);
+ goto BadDevice;
+ }
+
+ wake_up_process(th);
+#else
+ scsi_scan_host(rts51x_to_host(chip));
+#endif
+
+ /* Start up our polling thread */
+ th = kthread_run(rts51x_polling_thread, chip, RTS51X_POLLING_THREAD);
+ if (IS_ERR(th)) {
+ printk(KERN_WARNING RTS51X_TIP
+ "Unable to start polling thread\n");
+ result = PTR_ERR(th);
+ goto BadDevice;
+ }
+ rts51x->polling_thread = th;
+
+#ifdef CONFIG_PM
+ if (ss_en) {
+ rts51x->pusb_intf->needs_remote_wakeup = needs_remote_wakeup;
+ SET_PM_USAGE_CNT(chip, 1);
+ RTS51X_DEBUGP("pm_usage_cnt = %d\n", GET_PM_USAGE_CNT(chip));
+ }
+#endif
+
+ return 0;
+
+ /* We come here if there are any problems */
+BadDevice:
+ RTS51X_DEBUGP("rts51x_probe() failed\n");
+ release_everything(chip);
+ return result;
+}
+
+static void rts51x_disconnect(struct usb_interface *intf)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)usb_get_intfdata(intf);
+
+ RTS51X_DEBUGP("rts51x_disconnect() called\n");
+ quiesce_and_remove_host(chip);
+ release_everything(chip);
+}
+
+/***********************************************************************
+ * Initialization and registration
+ ***********************************************************************/
+
+struct usb_device_id rts5139_usb_ids[] = {
+ {USB_DEVICE(0x0BDA, 0x0139)},
+ {USB_DEVICE(0x0BDA, 0x0129)},
+ {} /* Terminating entry */
+};
+EXPORT_SYMBOL_GPL(rts5139_usb_ids);
+
+MODULE_DEVICE_TABLE(usb, rts5139_usb_ids);
+
+struct usb_driver rts51x_driver = {
+ .name = RTS51X_NAME,
+ .probe = rts51x_probe,
+ .disconnect = rts51x_disconnect,
+ .suspend = rts51x_suspend,
+ .resume = rts51x_resume,
+ .reset_resume = rts51x_reset_resume,
+ .pre_reset = rts51x_pre_reset,
+ .post_reset = rts51x_post_reset,
+ .id_table = rts5139_usb_ids,
+ .soft_unbind = 1,
+};
+
+static int __init rts51x_init(void)
+{
+ int retval;
+
+ printk(KERN_INFO "Initializing %s USB card reader driver...\n",
+ RTS51X_NAME);
+
+ /* register the driver, return usb_register return code if error */
+ retval = usb_register(&rts51x_driver);
+ if (retval == 0) {
+ printk(KERN_INFO
+ "Realtek %s USB card reader support registered.\n",
+ RTS51X_NAME);
+ }
+ return retval;
+}
+
+static void __exit rts51x_exit(void)
+{
+ RTS51X_DEBUGP("rts51x_exit() called\n");
+
+ /* Deregister the driver
+ * This will cause disconnect() to be called for each
+ * attached unit
+ */
+ RTS51X_DEBUGP("-- calling usb_deregister()\n");
+ usb_deregister(&rts51x_driver);
+}
+
+module_init(rts51x_init);
+module_exit(rts51x_exit);
diff --git a/drivers/staging/rts5139/rts51x.h b/drivers/staging/rts5139/rts51x.h
new file mode 100644
index 00000000000..9415d5c0550
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x.h
@@ -0,0 +1,205 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_H
+#define __RTS51X_H
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/cdrom.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+
+#define DRIVER_VERSION "v1.04"
+
+#define RTS51X_DESC "Realtek RTS5139/29 USB card reader driver"
+#define RTS51X_NAME "rts5139"
+#define RTS51X_CTL_THREAD "rts5139-control"
+#define RTS51X_SCAN_THREAD "rts5139-scan"
+#define RTS51X_POLLING_THREAD "rts5139-polling"
+
+#define POLLING_IN_THREAD
+/* #define SCSI_SCAN_DELAY */
+#define SUPPORT_FILE_OP
+
+#define wait_timeout_x(task_state, msecs) \
+do { \
+ set_current_state((task_state)); \
+ schedule_timeout((msecs) * HZ / 1000); \
+} while (0)
+
+#define wait_timeout(msecs) wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
+
+#define SCSI_LUN(srb) ((srb)->device->lun)
+
+/* Size of the DMA-mapped I/O buffer */
+#define RTS51X_IOBUF_SIZE 1024
+/* Size of the autosense data buffer */
+#define RTS51X_SENSE_SIZE 18
+
+/* Dynamic bitflag definitions (dflags): used in set_bit() etc. */
+#define FLIDX_URB_ACTIVE 0 /* current_urb is in use */
+#define FLIDX_SG_ACTIVE 1 /* current_sg is in use */
+#define FLIDX_ABORTING 2 /* abort is in progress */
+#define FLIDX_DISCONNECTING 3 /* disconnect in progress */
+#define FLIDX_RESETTING 4 /* device reset in progress */
+#define FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
+#define FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
+
+struct rts51x_chip;
+
+struct rts51x_usb {
+ /* The device we're working with
+ * It's important to note:
+ * (o) you must hold dev_mutex to change pusb_dev
+ */
+ struct mutex dev_mutex; /* protect pusb_dev */
+ struct usb_device *pusb_dev; /* this usb_device */
+ struct usb_interface *pusb_intf; /* this interface */
+
+ unsigned long dflags; /* dynamic atomic bitflags */
+
+ unsigned int send_bulk_pipe; /* cached pipe values */
+ unsigned int recv_bulk_pipe;
+ unsigned int send_ctrl_pipe;
+ unsigned int recv_ctrl_pipe;
+ unsigned int recv_intr_pipe;
+
+ u8 ifnum; /* interface number */
+ u8 ep_bInterval; /* interrupt interval */
+
+ /* control and bulk communications data */
+ struct urb *current_urb; /* USB requests */
+ struct urb *intr_urb; /* Interrupt USB request */
+ struct usb_ctrlrequest *cr; /* control requests */
+ struct usb_sg_request current_sg; /* scatter-gather req. */
+ unsigned char *iobuf; /* I/O buffer */
+ dma_addr_t cr_dma; /* buffer DMA addresses */
+ dma_addr_t iobuf_dma;
+ struct task_struct *ctl_thread; /* the control thread */
+ struct task_struct *polling_thread; /* the polling thread */
+
+ /* mutual exclusion and synchronization structures */
+ struct completion cmnd_ready; /* to sleep thread on */
+ struct completion control_exit; /* control thread exit */
+ struct completion polling_exit; /* polling thread exit */
+ struct completion notify; /* thread begin/end */
+#ifdef SCSI_SCAN_DELAY
+ wait_queue_head_t delay_wait; /* wait during scan, reset */
+ struct completion scanning_done; /* wait for scan thread */
+#endif
+};
+
+extern struct usb_driver rts51x_driver;
+
+static inline void get_current_time(u8 *timeval_buf, int buf_len)
+{
+ struct timeval tv;
+
+ if (!timeval_buf || (buf_len < 8))
+ return;
+
+ do_gettimeofday(&tv);
+
+ timeval_buf[0] = (u8) (tv.tv_sec >> 24);
+ timeval_buf[1] = (u8) (tv.tv_sec >> 16);
+ timeval_buf[2] = (u8) (tv.tv_sec >> 8);
+ timeval_buf[3] = (u8) (tv.tv_sec);
+ timeval_buf[4] = (u8) (tv.tv_usec >> 24);
+ timeval_buf[5] = (u8) (tv.tv_usec >> 16);
+ timeval_buf[6] = (u8) (tv.tv_usec >> 8);
+ timeval_buf[7] = (u8) (tv.tv_usec);
+}
+
+#define SND_CTRL_PIPE(chip) ((chip)->usb->send_ctrl_pipe)
+#define RCV_CTRL_PIPE(chip) ((chip)->usb->recv_ctrl_pipe)
+#define SND_BULK_PIPE(chip) ((chip)->usb->send_bulk_pipe)
+#define RCV_BULK_PIPE(chip) ((chip)->usb->recv_bulk_pipe)
+#define RCV_INTR_PIPE(chip) ((chip)->usb->recv_intr_pipe)
+
+/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
+ * single queue element srb for write access */
+#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
+#define scsi_lock(host) spin_lock_irq(host->host_lock)
+
+#define GET_PM_USAGE_CNT(chip) \
+ atomic_read(&((chip)->usb->pusb_intf->pm_usage_cnt))
+#define SET_PM_USAGE_CNT(chip, cnt) \
+ atomic_set(&((chip)->usb->pusb_intf->pm_usage_cnt), (cnt))
+
+/* Compatible macros while we switch over */
+static inline void *usb_buffer_alloc(struct usb_device *dev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma)
+{
+ return usb_alloc_coherent(dev, size, mem_flags, dma);
+}
+
+static inline void usb_buffer_free(struct usb_device *dev, size_t size,
+ void *addr, dma_addr_t dma)
+{
+ return usb_free_coherent(dev, size, addr, dma);
+}
+
+/* Convert between us_data and the corresponding Scsi_Host */
+static inline struct Scsi_Host *rts51x_to_host(struct rts51x_chip *chip)
+{
+ return container_of((void *)chip, struct Scsi_Host, hostdata);
+}
+
+static inline struct rts51x_chip *host_to_rts51x(struct Scsi_Host *host)
+{
+ return (struct rts51x_chip *)(host->hostdata);
+}
+
+/* struct scsi_cmnd transfer buffer access utilities */
+enum xfer_buf_dir { TO_XFER_BUF, FROM_XFER_BUF };
+
+/* General routines provided by the usb-storage standard core */
+#ifdef CONFIG_PM
+void rts51x_try_to_enter_ss(struct rts51x_chip *chip);
+void rts51x_try_to_exit_ss(struct rts51x_chip *chip);
+int rts51x_suspend(struct usb_interface *iface, pm_message_t message);
+int rts51x_resume(struct usb_interface *iface);
+int rts51x_reset_resume(struct usb_interface *iface);
+#else
+#define rts51x_suspend NULL
+#define rts51x_resume NULL
+#define rts51x_reset_resume NULL
+#endif
+
+extern struct scsi_host_template rts51x_host_template;
+
+#endif /* __RTS51X_H */
diff --git a/drivers/staging/rts5139/rts51x_card.c b/drivers/staging/rts5139/rts51x_card.c
new file mode 100644
index 00000000000..424a84581b8
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_card.c
@@ -0,0 +1,986 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+
+#include "debug.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_card.h"
+#include "rts51x_transport.h"
+#include "rts51x_sys.h"
+#include "xd.h"
+#include "sd.h"
+#include "ms.h"
+
+void do_remaining_work(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (chip->card_ready & SD_CARD) {
+ if (sd_card->seq_mode) {
+ RTS51X_SET_STAT(chip, STAT_RUN);
+ sd_card->counter++;
+ } else {
+ sd_card->counter = 0;
+ }
+ }
+
+ if (chip->card_ready & XD_CARD) {
+ if (xd_card->delay_write.delay_write_flag) {
+ RTS51X_SET_STAT(chip, STAT_RUN);
+ xd_card->counter++;
+ } else {
+ xd_card->counter = 0;
+ }
+ }
+
+ if (chip->card_ready & MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (ms_card->seq_mode) {
+ RTS51X_SET_STAT(chip, STAT_RUN);
+ ms_card->counter++;
+ } else {
+ ms_card->counter = 0;
+ }
+ } else {
+ if (ms_card->delay_write.delay_write_flag) {
+ RTS51X_SET_STAT(chip, STAT_RUN);
+ ms_card->counter++;
+ } else {
+ ms_card->counter = 0;
+ }
+ }
+ }
+
+ if (sd_card->counter > POLLING_WAIT_CNT)
+ sd_cleanup_work(chip);
+
+ if (xd_card->counter > POLLING_WAIT_CNT)
+ xd_cleanup_work(chip);
+
+ if (ms_card->counter > POLLING_WAIT_CNT)
+ ms_cleanup_work(chip);
+}
+
+void do_reset_xd_card(struct rts51x_chip *chip)
+{
+ int retval;
+
+ if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT)
+ return;
+
+ retval = reset_xd_card(chip);
+ if (retval == STATUS_SUCCESS) {
+ chip->card_ready |= XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
+ } else {
+ chip->card_ready &= ~XD_CARD;
+ chip->card_fail |= XD_CARD;
+ chip->capacity[chip->card2lun[XD_CARD]] = 0;
+ chip->rw_card[chip->card2lun[XD_CARD]] = NULL;
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, XD_CLK_EN, 0);
+ rts51x_send_cmd(chip, MODE_C, 100);
+ }
+}
+
+void do_reset_sd_card(struct rts51x_chip *chip)
+{
+ int retval;
+
+ if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT)
+ return;
+
+ retval = reset_sd_card(chip);
+ if (retval == STATUS_SUCCESS) {
+ chip->card_ready |= SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
+ } else {
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->rw_card[chip->card2lun[SD_CARD]] = NULL;
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0);
+ rts51x_send_cmd(chip, MODE_C, 100);
+ }
+}
+
+void do_reset_ms_card(struct rts51x_chip *chip)
+{
+ int retval;
+
+ if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT)
+ return;
+
+ retval = reset_ms_card(chip);
+ if (retval == STATUS_SUCCESS) {
+ chip->card_ready |= MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
+ } else {
+ chip->card_ready &= ~MS_CARD;
+ chip->card_fail |= MS_CARD;
+ chip->capacity[chip->card2lun[MS_CARD]] = 0;
+ chip->rw_card[chip->card2lun[MS_CARD]] = NULL;
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
+ rts51x_send_cmd(chip, MODE_C, 100);
+ }
+}
+
+void card_cd_debounce(struct rts51x_chip *chip, u8 *need_reset,
+ u8 *need_release)
+{
+ int retval;
+ u8 release_map = 0, reset_map = 0;
+ u8 value;
+
+ retval = rts51x_get_card_status(chip, &(chip->card_status));
+#ifdef SUPPORT_OCP
+ chip->ocp_stat = (chip->card_status >> 4) & 0x03;
+#endif
+
+ if (retval != STATUS_SUCCESS)
+ goto Exit_Debounce;
+
+ if (chip->card_exist) {
+ rts51x_clear_start_time(chip);
+ retval = rts51x_read_register(chip, CARD_INT_PEND, &value);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH,
+ FIFO_FLUSH);
+ rts51x_ep0_write_register(chip, SFSM_ED, 0xf8, 0xf8);
+ value = 0;
+ }
+
+ if (chip->card_exist & XD_CARD) {
+ if (!(chip->card_status & XD_CD))
+ release_map |= XD_CARD;
+ } else if (chip->card_exist & SD_CARD) {
+ /* if (!(chip->card_status & SD_CD)) { */
+ if (!(chip->card_status & SD_CD) || (value & SD_INT))
+ release_map |= SD_CARD;
+ } else if (chip->card_exist & MS_CARD) {
+ /* if (!(chip->card_status & MS_CD)) { */
+ if (!(chip->card_status & MS_CD) || (value & MS_INT))
+ release_map |= MS_CARD;
+ }
+ } else {
+ if (chip->card_status & XD_CD) {
+ rts51x_clear_start_time(chip);
+ reset_map |= XD_CARD;
+ } else if (chip->card_status & SD_CD) {
+ rts51x_clear_start_time(chip);
+ reset_map |= SD_CARD;
+ } else if (chip->card_status & MS_CD) {
+ rts51x_clear_start_time(chip);
+ reset_map |= MS_CARD;
+ } else {
+ if (rts51x_check_start_time(chip))
+ rts51x_set_start_time(chip);
+ }
+ }
+
+ if (CHECK_PKG(chip, QFN24) && reset_map) {
+ if (chip->card_exist & XD_CARD) {
+ reset_map = 0;
+ goto Exit_Debounce;
+ }
+ }
+
+ if (reset_map) {
+ int xd_cnt = 0, sd_cnt = 0, ms_cnt = 0;
+ int i;
+
+ for (i = 0; i < (chip->option.debounce_num); i++) {
+ retval =
+ rts51x_get_card_status(chip, &(chip->card_status));
+ if (retval != STATUS_SUCCESS) {
+ reset_map = release_map = 0;
+ goto Exit_Debounce;
+ }
+ if (chip->card_status & XD_CD)
+ xd_cnt++;
+ else
+ xd_cnt = 0;
+ if (chip->card_status & SD_CD)
+ sd_cnt++;
+ else
+ sd_cnt = 0;
+ if (chip->card_status & MS_CD)
+ ms_cnt++;
+ else
+ ms_cnt = 0;
+ wait_timeout(30);
+ }
+
+ reset_map = 0;
+ if (!(chip->card_exist & XD_CARD)
+ && (xd_cnt > (chip->option.debounce_num - 1))) {
+ reset_map |= XD_CARD;
+ }
+ if (!(chip->card_exist & SD_CARD)
+ && (sd_cnt > (chip->option.debounce_num - 1))) {
+ reset_map |= SD_CARD;
+ }
+ if (!(chip->card_exist & MS_CARD)
+ && (ms_cnt > (chip->option.debounce_num - 1))) {
+ reset_map |= MS_CARD;
+ }
+ }
+ rts51x_write_register(chip, CARD_INT_PEND, XD_INT | MS_INT | SD_INT,
+ XD_INT | MS_INT | SD_INT);
+
+Exit_Debounce:
+ if (need_reset)
+ *need_reset = reset_map;
+ if (need_release)
+ *need_release = release_map;
+}
+
+void rts51x_init_cards(struct rts51x_chip *chip)
+{
+ u8 need_reset = 0, need_release = 0;
+
+ card_cd_debounce(chip, &need_reset, &need_release);
+
+ if (need_release) {
+ RTS51X_DEBUGP("need_release = 0x%x\n", need_release);
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+#ifdef SUPPORT_OCP
+ if (chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ rts51x_write_register(chip, OCPCTL, MS_OCP_CLEAR,
+ MS_OCP_CLEAR);
+ chip->ocp_stat = 0;
+ RTS51X_DEBUGP("Clear OCP status.\n");
+ }
+#endif
+
+ if (need_release & XD_CARD) {
+ chip->card_exist &= ~XD_CARD;
+ chip->card_ejected = 0;
+ if (chip->card_ready & XD_CARD) {
+ release_xd_card(chip);
+ chip->rw_card[chip->card2lun[XD_CARD]] = NULL;
+ clear_bit(chip->card2lun[XD_CARD],
+ &(chip->lun_mc));
+ }
+ }
+
+ if (need_release & SD_CARD) {
+ chip->card_exist &= ~SD_CARD;
+ chip->card_ejected = 0;
+ if (chip->card_ready & SD_CARD) {
+ release_sd_card(chip);
+ chip->rw_card[chip->card2lun[SD_CARD]] = NULL;
+ clear_bit(chip->card2lun[SD_CARD],
+ &(chip->lun_mc));
+ }
+ }
+
+ if (need_release & MS_CARD) {
+ chip->card_exist &= ~MS_CARD;
+ chip->card_ejected = 0;
+ if (chip->card_ready & MS_CARD) {
+ release_ms_card(chip);
+ chip->rw_card[chip->card2lun[MS_CARD]] = NULL;
+ clear_bit(chip->card2lun[MS_CARD],
+ &(chip->lun_mc));
+ }
+ }
+ }
+
+ if (need_reset && !chip->card_ready) {
+ RTS51X_DEBUGP("need_reset = 0x%x\n", need_reset);
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ if (need_reset & XD_CARD) {
+ chip->card_exist |= XD_CARD;
+ do_reset_xd_card(chip);
+ } else if (need_reset & SD_CARD) {
+ chip->card_exist |= SD_CARD;
+ do_reset_sd_card(chip);
+ } else if (need_reset & MS_CARD) {
+ chip->card_exist |= MS_CARD;
+ do_reset_ms_card(chip);
+ }
+ }
+}
+
+void rts51x_release_cards(struct rts51x_chip *chip)
+{
+ if (chip->card_ready & SD_CARD) {
+ sd_cleanup_work(chip);
+ release_sd_card(chip);
+ chip->card_ready &= ~SD_CARD;
+ }
+
+ if (chip->card_ready & XD_CARD) {
+ xd_cleanup_work(chip);
+ release_xd_card(chip);
+ chip->card_ready &= ~XD_CARD;
+ }
+
+ if (chip->card_ready & MS_CARD) {
+ ms_cleanup_work(chip);
+ release_ms_card(chip);
+ chip->card_ready &= ~MS_CARD;
+ }
+}
+
+static inline u8 double_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int switch_ssc_clock(struct rts51x_chip *chip, int clk)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 N = (u8) (clk - 2), min_N, max_N;
+ u8 mcu_cnt, div, max_div, ssc_depth;
+ int sd_vpclk_phase_reset = 0;
+
+ if (chip->cur_clk == clk)
+ return STATUS_SUCCESS;
+
+ min_N = 60;
+ max_N = 120;
+ max_div = CLK_DIV_4;
+
+ RTS51X_DEBUGP("Switch SSC clock to %dMHz\n", clk);
+
+ if ((clk <= 2) || (N > max_N))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ mcu_cnt = (u8) (60 / clk + 3);
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+ /* To make sure that the SSC clock div_n is
+ * equal or greater than min_N */
+ div = CLK_DIV_1;
+ while ((N < min_N) && (div < max_div)) {
+ N = (N + 2) * 2 - 2;
+ div++;
+ }
+ RTS51X_DEBUGP("N = %d, div = %d\n", N, div);
+
+ if (chip->option.ssc_en) {
+ if (chip->cur_card == SD_CARD) {
+ if (CHK_SD_SDR104(sd_card)) {
+ ssc_depth = chip->option.ssc_depth_sd_sdr104;
+ } else if (CHK_SD_SDR50(sd_card)) {
+ ssc_depth = chip->option.ssc_depth_sd_sdr50;
+ } else if (CHK_SD_DDR50(sd_card)) {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_sd_ddr50);
+ } else if (CHK_SD_HS(sd_card)) {
+ ssc_depth =
+ double_depth(chip->option.ssc_depth_sd_hs);
+ } else if (CHK_MMC_52M(sd_card)
+ || CHK_MMC_DDR52(sd_card)) {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_mmc_52m);
+ } else {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_low_speed);
+ }
+ } else if (chip->cur_card == MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (CHK_HG8BIT(ms_card)) {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_ms_hg);
+ } else {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_ms_4bit);
+ }
+ } else {
+ if (CHK_MS4BIT(ms_card)) {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_ms_4bit);
+ } else {
+ ssc_depth =
+ double_depth(chip->option.
+ ssc_depth_low_speed);
+ }
+ }
+ } else {
+ ssc_depth =
+ double_depth(chip->option.ssc_depth_low_speed);
+ }
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ /* If clock divided by 2, ssc depth must
+ * be multiplied by 2 */
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = SSC_DEPTH_2M;
+ } else if (div == CLK_DIV_4) {
+ /* If clock divided by 4, ssc depth must
+ * be multiplied by 4 */
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = SSC_DEPTH_2M;
+ }
+ }
+ } else {
+ /* Disable SSC */
+ ssc_depth = 0;
+ }
+
+ RTS51X_DEBUGP("ssc_depth = %d\n", ssc_depth);
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0x3F,
+ (div << 4) | mcu_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SSC_CTL2, SSC_DEPTH_MASK,
+ ssc_depth);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ if (sd_vpclk_phase_reset) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_C, 2000);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (chip->option.ssc_en && ssc_depth)
+ rts51x_write_register(chip, SSC_CTL1, 0xff, 0xD0);
+ else
+ rts51x_write_register(chip, SSC_CTL1, 0xff, 0x50);
+ udelay(100);
+ RTS51X_WRITE_REG(chip, CLK_DIV, CLK_CHANGE, 0);
+
+ chip->cur_clk = clk;
+
+ return STATUS_SUCCESS;
+}
+
+int switch_normal_clock(struct rts51x_chip *chip, int clk)
+{
+ int retval;
+ u8 sel, div, mcu_cnt;
+ int sd_vpclk_phase_reset = 0;
+
+ if (chip->cur_clk == clk)
+ return STATUS_SUCCESS;
+
+ if (chip->cur_card == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+ if (CHK_SD30_SPEED(sd_card) || CHK_MMC_DDR52(sd_card))
+ sd_vpclk_phase_reset = 1;
+ }
+
+ switch (clk) {
+ case CLK_20:
+ RTS51X_DEBUGP("Switch clock to 20MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_4;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_30:
+ RTS51X_DEBUGP("Switch clock to 30MHz\n");
+ sel = SSC_60;
+ div = CLK_DIV_2;
+ mcu_cnt = 4;
+ break;
+
+ case CLK_40:
+ RTS51X_DEBUGP("Switch clock to 40MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_2;
+ mcu_cnt = 3;
+ break;
+
+ case CLK_50:
+ RTS51X_DEBUGP("Switch clock to 50MHz\n");
+ sel = SSC_100;
+ div = CLK_DIV_2;
+ mcu_cnt = 3;
+ break;
+
+ case CLK_60:
+ RTS51X_DEBUGP("Switch clock to 60MHz\n");
+ sel = SSC_60;
+ div = CLK_DIV_1;
+ mcu_cnt = 3;
+ break;
+
+ case CLK_80:
+ RTS51X_DEBUGP("Switch clock to 80MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_1;
+ mcu_cnt = 2;
+ break;
+
+ case CLK_100:
+ RTS51X_DEBUGP("Switch clock to 100MHz\n");
+ sel = SSC_100;
+ div = CLK_DIV_1;
+ mcu_cnt = 2;
+ break;
+
+ /* case CLK_120:
+ RTS51X_DEBUGP("Switch clock to 120MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_1;
+ mcu_cnt = 2;
+ break;
+
+ case CLK_150:
+ RTS51X_DEBUGP("Switch clock to 150MHz\n");
+ sel = SSC_150;
+ div = CLK_DIV_1;
+ mcu_cnt = 2;
+ break; */
+
+ default:
+ RTS51X_DEBUGP("Try to switch to an illegal clock (%d)\n",
+ clk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!sd_vpclk_phase_reset) {
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE,
+ CLK_CHANGE);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0x3F,
+ (div << 4) | mcu_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SSC_CLK_FPGA_SEL, 0xFF,
+ sel);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE,
+ CLK_CHANGE);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0x3F,
+ (div << 4) | mcu_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SSC_CLK_FPGA_SEL, 0xFF,
+ sel);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ udelay(200);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ udelay(200);
+
+ RTS51X_WRITE_REG(chip, CLK_DIV, CLK_CHANGE, 0);
+ }
+
+ chip->cur_clk = clk;
+
+ return STATUS_SUCCESS;
+}
+
+int card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 sec_addr,
+ u16 sec_cnt)
+{
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ int i;
+
+ if (chip->rw_card[lun] == NULL)
+ return STATUS_FAIL;
+
+ RTS51X_DEBUGP("%s card, sector addr: 0x%x, sector cnt: %d\n",
+ (srb->sc_data_direction ==
+ DMA_TO_DEVICE) ? "Write" : "Read", sec_addr, sec_cnt);
+
+ chip->rw_need_retry = 0;
+ for (i = 0; i < 3; i++) {
+ retval = chip->rw_card[lun] (srb, chip, sec_addr, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+ CATCH_TRIGGER(chip);
+ if (chip->option.reset_or_rw_fail_set_pad_drive) {
+ rts51x_write_register(chip, CARD_DRIVE_SEL,
+ SD20_DRIVE_MASK,
+ DRIVE_8mA);
+ }
+ }
+
+ if (!chip->rw_need_retry)
+ break;
+
+ RTS51X_DEBUGP("Retry RW, (i = %d\n)", i);
+ }
+
+ return retval;
+}
+
+u8 get_lun_card(struct rts51x_chip *chip, unsigned int lun)
+{
+ if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
+ return (u8) XD_CARD;
+ else if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD)
+ return (u8) SD_CARD;
+ else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD)
+ return (u8) MS_CARD;
+
+ return 0;
+}
+
+int card_share_mode(struct rts51x_chip *chip, int card)
+{
+ u8 value;
+
+ if (card == SD_CARD)
+ value = CARD_SHARE_SD;
+ else if (card == MS_CARD)
+ value = CARD_SHARE_MS;
+ else if (card == XD_CARD)
+ value = CARD_SHARE_XD;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTS51X_WRITE_REG(chip, CARD_SHARE_MODE, CARD_SHARE_MASK, value);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_select_card(struct rts51x_chip *chip, int card)
+{
+ int retval;
+
+ if (chip->cur_card != card) {
+ u8 mod;
+
+ if (card == SD_CARD)
+ mod = SD_MOD_SEL;
+ else if (card == MS_CARD)
+ mod = MS_MOD_SEL;
+ else if (card == XD_CARD)
+ mod = XD_MOD_SEL;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ RTS51X_WRITE_REG(chip, CARD_SELECT, 0x07, mod);
+ chip->cur_card = card;
+
+ retval = card_share_mode(chip, card);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void eject_card(struct rts51x_chip *chip, unsigned int lun)
+{
+ RTS51X_DEBUGP("eject card\n");
+ RTS51X_SET_STAT(chip, STAT_RUN);
+ do_remaining_work(chip);
+
+ if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD) {
+ release_sd_card(chip);
+ chip->card_ejected |= SD_CARD;
+ chip->card_ready &= ~SD_CARD;
+ chip->capacity[lun] = 0;
+ } else if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD) {
+ release_xd_card(chip);
+ chip->card_ejected |= XD_CARD;
+ chip->card_ready &= ~XD_CARD;
+ chip->capacity[lun] = 0;
+ } else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD) {
+ release_ms_card(chip);
+ chip->card_ejected |= MS_CARD;
+ chip->card_ready &= ~MS_CARD;
+ chip->capacity[lun] = 0;
+ }
+ rts51x_write_register(chip, CARD_INT_PEND, XD_INT | MS_INT | SD_INT,
+ XD_INT | MS_INT | SD_INT);
+}
+
+void trans_dma_enable(enum dma_data_direction dir, struct rts51x_chip *chip,
+ u32 byte_cnt, u8 pack_size)
+{
+ if (pack_size > DMA_1024)
+ pack_size = DMA_512;
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ RING_BUFFER);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MC_DMA_TC3, 0xFF,
+ (u8) (byte_cnt >> 24));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MC_DMA_TC2, 0xFF,
+ (u8) (byte_cnt >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MC_DMA_TC1, 0xFF,
+ (u8) (byte_cnt >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MC_DMA_TC0, 0xFF, (u8) byte_cnt);
+
+ if (dir == DMA_FROM_DEVICE) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MC_DMA_CTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | pack_size);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, MC_DMA_CTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | pack_size);
+ }
+}
+
+int enable_card_clock(struct rts51x_chip *chip, u8 card)
+{
+ u8 clk_en = 0;
+
+ if (card & XD_CARD)
+ clk_en |= XD_CLK_EN;
+ if (card & SD_CARD)
+ clk_en |= SD_CLK_EN;
+ if (card & MS_CARD)
+ clk_en |= MS_CLK_EN;
+
+ RTS51X_WRITE_REG(chip, CARD_CLK_EN, clk_en, clk_en);
+
+ return STATUS_SUCCESS;
+}
+
+int disable_card_clock(struct rts51x_chip *chip, u8 card)
+{
+ u8 clk_en = 0;
+
+ if (card & XD_CARD)
+ clk_en |= XD_CLK_EN;
+ if (card & SD_CARD)
+ clk_en |= SD_CLK_EN;
+ if (card & MS_CARD)
+ clk_en |= MS_CLK_EN;
+
+ RTS51X_WRITE_REG(chip, CARD_CLK_EN, clk_en, 0);
+
+ return STATUS_SUCCESS;
+}
+
+int card_power_on(struct rts51x_chip *chip, u8 card)
+{
+ u8 mask, val1, val2;
+
+ mask = POWER_MASK;
+ val1 = PARTIAL_POWER_ON;
+ val2 = POWER_ON;
+
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if ((card == SD_CARD) || (card == XD_CARD)) {
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL, mask | LDO3318_PWR_MASK,
+ val1 | LDO_SUSPEND);
+ /* RTS51X_WRITE_REG(chip, CARD_PWR_CTL,
+ LDO3318_PWR_MASK, LDO_SUSPEND); */
+ }
+ /* else if(card==XD_CARD)
+ {
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL,
+ mask|LDO3318_PWR_MASK, val1|LDO_SUSPEND);
+ //RTS51X_WRITE_REG(chip, CARD_PWR_CTL,
+ // LDO3318_PWR_MASK, LDO_SUSPEND);
+ } */
+ else {
+#endif
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL, mask, val1);
+#ifdef SD_XD_IO_FOLLOW_PWR
+ }
+#endif
+ udelay(chip->option.pwr_delay);
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL, mask, val2);
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (card == SD_CARD) {
+ rts51x_write_register(chip, CARD_PWR_CTL, LDO3318_PWR_MASK,
+ LDO_ON);
+ }
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+int card_power_off(struct rts51x_chip *chip, u8 card)
+{
+ u8 mask, val;
+
+ mask = POWER_MASK;
+ val = POWER_OFF;
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL, mask, val);
+
+ return STATUS_SUCCESS;
+}
+
+int monitor_card_cd(struct rts51x_chip *chip, u8 card)
+{
+ int retval;
+ u8 card_cd[32] = { 0 };
+
+ card_cd[SD_CARD] = SD_CD;
+ card_cd[XD_CARD] = XD_CD;
+ card_cd[MS_CARD] = MS_CD;
+
+ retval = rts51x_get_card_status(chip, &(chip->card_status));
+ if (retval != STATUS_SUCCESS)
+ return CD_NOT_EXIST;
+
+ if (chip->card_status & card_cd[card])
+ return CD_EXIST;
+
+ return CD_NOT_EXIST;
+}
+
+int toggle_gpio(struct rts51x_chip *chip, u8 gpio)
+{
+ int retval;
+ u8 temp_reg;
+ u8 gpio_output[4] = {
+ 0x01,
+ };
+ u8 gpio_oe[4] = {
+ 0x02,
+ };
+ if (chip->rts5179) {
+ retval = rts51x_ep0_read_register(chip, CARD_GPIO, &temp_reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ temp_reg ^= gpio_oe[gpio];
+ temp_reg &= 0xfe; /* bit 0 always set 0 */
+ retval =
+ rts51x_ep0_write_register(chip, CARD_GPIO, 0x03, temp_reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = rts51x_ep0_read_register(chip, CARD_GPIO, &temp_reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ temp_reg ^= gpio_output[gpio];
+ retval =
+ rts51x_ep0_write_register(chip, CARD_GPIO, 0xFF,
+ temp_reg | gpio_oe[gpio]);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int turn_on_led(struct rts51x_chip *chip, u8 gpio)
+{
+ int retval;
+ u8 gpio_oe[4] = {
+ 0x02,
+ };
+ u8 gpio_mask[4] = {
+ 0x03,
+ };
+
+ retval =
+ rts51x_ep0_write_register(chip, CARD_GPIO, gpio_mask[gpio],
+ gpio_oe[gpio]);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int turn_off_led(struct rts51x_chip *chip, u8 gpio)
+{
+ int retval;
+ u8 gpio_output[4] = {
+ 0x01,
+ };
+ u8 gpio_oe[4] = {
+ 0x02,
+ };
+ u8 gpio_mask[4] = {
+ 0x03,
+ };
+
+ retval =
+ rts51x_ep0_write_register(chip, CARD_GPIO, gpio_mask[gpio],
+ gpio_oe[gpio] | gpio_output[gpio]);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5139/rts51x_card.h b/drivers/staging/rts5139/rts51x_card.h
new file mode 100644
index 00000000000..ac3c1e741ab
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_card.h
@@ -0,0 +1,881 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_CARD_H
+#define __RTS51X_CARD_H
+
+#include "rts51x_chip.h"
+
+/* Register bit definition */
+
+/* Card Power Control Register */
+#define POWER_OFF 0x03
+#define PARTIAL_POWER_ON 0x02
+#define POWER_ON 0x00
+#define POWER_MASK 0x03
+#define LDO3318_PWR_MASK 0x0C
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x08
+#define LDO_OFF 0x0C
+#define DV3318_AUTO_PWR_OFF 0x10
+#define FORCE_LDO_POWERB 0x60
+
+/* Card Output Enable Register */
+#define XD_OUTPUT_EN 0x02
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
+
+/* System Clock Control Register */
+
+/* System Clock Divider Register */
+#define CLK_CHANGE 0x80
+#define CLK_DIV_1 0x00
+#define CLK_DIV_2 0x01
+#define CLK_DIV_4 0x02
+#define CLK_DIV_8 0x03
+
+/* System Clock Select Register */
+#define SSC_60 0
+#define SSC_80 1
+#define SSC_100 2
+#define SSC_120 3
+#define SSC_150 4
+
+/* Card Clock Enable Register */
+#define XD_CLK_EN 0x02
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
+
+/* Card Select Register */
+#define XD_MOD_SEL 1
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
+
+/* Card Transfer Reset Register */
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
+
+/* SD30_drive_sel */
+#define SD30_DRIVE_MASK 0x07
+
+/* CARD_DRIVE_SEL */
+#define SD20_DRIVE_MASK 0x03
+#define DRIVE_4mA 0x00
+#define DRIVE_8mA 0x01
+#define DRIVE_12mA 0x02
+
+/* FPGA_PULL_CTL */
+#define FPGA_MS_PULL_CTL_EN 0xEF
+#define FPGA_SD_PULL_CTL_EN 0xF7
+#define FPGA_XD_PULL_CTL_EN1 0xFE
+#define FPGA_XD_PULL_CTL_EN2 0xFD
+#define FPGA_XD_PULL_CTL_EN3 0xFB
+
+#define FPGA_MS_PULL_CTL_BIT 0x10
+#define FPGA_SD_PULL_CTL_BIT 0x08
+
+/* Card Data Source Register */
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
+
+/* SFSM_ED */
+#define HW_CMD_STOP 0x80
+#define CLR_STAGE_STALL 0x08
+#define CARD_ERR 0x10
+
+/* CARD_SHARE_MODE */
+#define CARD_SHARE_LQFP48 0x04
+#define CARD_SHARE_QFN24 0x00
+#define CARD_SHARE_LQFP_SEL 0x04
+#define CARD_SHARE_XD 0x00
+#define CARD_SHARE_SD 0x01
+#define CARD_SHARE_MS 0x02
+#define CARD_SHARE_MASK 0x03
+
+/* CARD_AUTO_BLINK */
+#define BLINK_ENABLE 0x08
+#define BLINK_SPEED_MASK 0x07
+
+/* CARD_GPIO */
+#define GPIO_OE 0x02
+#define GPIO_OUTPUT 0x01
+
+/* CARD_CLK_SOURCE */
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
+
+/* DCM_DRP_CTL */
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+
+/* DCM_DRP_TRIG */
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+
+/* DCM_DRP_CFG */
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+
+/* HW_VERSION */
+#define FPGA_VER 0x80
+#define HW_VER_MASK 0x0F
+
+/* CD_DEGLITCH_EN */
+#define DISABLE_SD_CD 0x08
+#define DISABLE_MS_CD 0x10
+#define DISABLE_XD_CD 0x20
+#define SD_CD_DEGLITCH_EN 0x01
+#define MS_CD_DEGLITCH_EN 0x02
+#define XD_CD_DEGLITCH_EN 0x04
+
+/* OCPCTL */
+#define CARD_OC_DETECT_EN 0x08
+#define CARD_OC_CLR 0x01
+
+/* CARD_DMA1_CTL */
+#define EXTEND_DMA1_ASYNC_SIGNAL 0x02
+
+/* HS_USB_STAT */
+#define USB_HI_SPEED 0x01
+
+/* CFG_MODE_1 */
+#define RTS5179 0x02
+
+/* SYS_DUMMY0 */
+#define NYET_EN 0x01
+#define NYET_MSAK 0x01
+
+/* SSC_CTL1 */
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
+
+/* SSC_CTL2 */
+#define SSC_DEPTH_MASK 0x03
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_2M 0x01
+#define SSC_DEPTH_1M 0x02
+#define SSC_DEPTH_512K 0x03
+
+/* LDO_POWER_CFG */
+#define TUNE_SD18_MASK 0x1C
+#define TUNE_SD18_1V7 0x00
+#define TUNE_SD18_1V8 (0x01 << 2)
+#define TUNE_SD18_1V9 (0x02 << 2)
+#define TUNE_SD18_2V0 (0x03 << 2)
+#define TUNE_SD18_2V7 (0x04 << 2)
+#define TUNE_SD18_2V8 (0x05 << 2)
+#define TUNE_SD18_2V9 (0x06 << 2)
+#define TUNE_SD18_3V3 (0x07 << 2)
+
+/* XD_CP_WAITTIME */
+#define WAIT_1F 0x00
+#define WAIT_3F 0x01
+#define WAIT_7F 0x02
+#define WAIT_FF 0x03
+
+/* XD_INIT */
+#define XD_PWR_OFF_DELAY0 0x00
+#define XD_PWR_OFF_DELAY1 0x02
+#define XD_PWR_OFF_DELAY2 0x04
+#define XD_PWR_OFF_DELAY3 0x06
+#define XD_AUTO_PWR_OFF_EN 0xF7
+#define XD_NO_AUTO_PWR_OFF 0x08
+
+/* XD_DTCTL */
+/* XD_CATCTL */
+#define XD_TIME_RWN_1 0x00
+#define XD_TIME_RWN_STEP 0x20
+#define XD_TIME_RW_1 0x00
+#define XD_TIME_RW_STEP 0x04
+#define XD_TIME_SETUP_1 0x00
+#define XD_TIME_SETUP_STEP 0x01
+
+/* XD_CTL */
+#define XD_ECC2_UNCORRECTABLE 0x80
+#define XD_ECC2_ERROR 0x40
+#define XD_ECC1_UNCORRECTABLE 0x20
+#define XD_ECC1_ERROR 0x10
+#define XD_RDY 0x04
+#define XD_CE_EN 0xFD
+#define XD_CE_DISEN 0x02
+#define XD_WP_EN 0xFE
+#define XD_WP_DISEN 0x01
+
+/* XD_TRANSFER */
+#define XD_TRANSFER_START 0x80
+#define XD_TRANSFER_END 0x40
+#define XD_PPB_EMPTY 0x20
+#define XD_ERR 0x10
+#define XD_RESET 0x00
+#define XD_ERASE 0x01
+#define XD_READ_STATUS 0x02
+#define XD_READ_ID 0x03
+#define XD_READ_REDUNDANT 0x04
+#define XD_READ_PAGES 0x05
+#define XD_SET_CMD 0x06
+#define XD_NORMAL_READ 0x07
+#define XD_WRITE_PAGES 0x08
+#define XD_NORMAL_WRITE 0x09
+#define XD_WRITE_REDUNDANT 0x0A
+#define XD_SET_ADDR 0x0B
+#define XD_COPY_PAGES 0x0C
+
+/* XD_CFG */
+#define XD_PPB_TO_SIE 0x80
+#define XD_TO_PPB_ONLY 0x00
+#define XD_BA_TRANSFORM 0x40
+#define XD_BA_NO_TRANSFORM 0x00
+#define XD_NO_CALC_ECC 0x20
+#define XD_CALC_ECC 0x00
+#define XD_IGNORE_ECC 0x10
+#define XD_CHECK_ECC 0x00
+#define XD_DIRECT_TO_RB 0x08
+#define XD_ADDR_MASK 0x07
+#define XD_ADDR_LENGTH_0 0x00
+#define XD_ADDR_LENGTH_1 0x01
+#define XD_ADDR_LENGTH_2 0x02
+#define XD_ADDR_LENGTH_3 0x03
+#define XD_ADDR_LENGTH_4 0x04
+
+/* XD_PAGE_STATUS */
+#define XD_GPG 0xFF
+#define XD_BPG 0x00
+
+/* XD_BLOCK_STATUS */
+#define XD_GBLK 0xFF
+#define XD_LATER_BBLK 0xF0
+
+/* XD_PARITY */
+#define XD_ECC2_ALL1 0x80
+#define XD_ECC1_ALL1 0x40
+#define XD_BA2_ALL0 0x20
+#define XD_BA1_ALL0 0x10
+#define XD_BA1_BA2_EQL 0x04
+#define XD_BA2_VALID 0x02
+#define XD_BA1_VALID 0x01
+
+/* XD_CHK_DATA_STATUS */
+#define XD_PGSTS_ZEROBIT_OVER4 0x00
+#define XD_PGSTS_NOT_FF 0x02
+#define XD_AUTO_CHK_DATA_STATUS 0x01
+
+/* SD_CFG1 */
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_BUS_WIDTH_1 0x00
+#define SD_BUS_WIDTH_4 0x01
+#define SD_BUS_WIDTH_8 0x02
+#define SD_ASYNC_FIFO_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+
+/* SD_CFG2 */
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_WAIT_CRC_TO_EN 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+/* SD/MMC Response Type Definition */
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_NO_CHECK_CRC7,
+ * SD_RSP_LEN_0 */
+#define SD_RSP_TYPE_R0 0x04
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R1 0x01
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_WAIT_BUSY_END, SD_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R1b 0x09
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_CHECK_CRC7,
+ * SD_RSP_LEN_17 */
+#define SD_RSP_TYPE_R2 0x02
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_NO_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R3 0x05
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_NO_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R4 0x05
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R5 0x01
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R6 0x01
+/* SD_CALCULATE_CRC7, SD_CHECK_CRC16,
+ * SD_NO_WAIT_BUSY_END, SD_CHECK_CRC7,
+ * SD_RSP_LEN_6 */
+#define SD_RSP_TYPE_R7 0x01
+
+/* SD_CFG3 */
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
+/* SD_STAT1 */
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
+
+/* SD_STAT2 */
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
+/* SD_BUS_STAT */
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
+
+/* SD_PAD_CTL */
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
+
+/* SD_SAMPLE_POINT_CTL */
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
+
+/* SD_PUSH_POINT_CTL */
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
+
+/* SD_TRANSFER */
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+/* SD Transfer Mode definition */
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
+
+/* SD_VPTX_CTL / SD_VPRX_CTL */
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
+
+/* SD_DCMPS_TX_CTL / SD_DCMPS_RX_CTL */
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
+
+/* SD_CMD_STATE */
+#define SD_CMD_IDLE 0x80
+
+/* SD_DATA_STATE */
+#define SD_DATA_IDLE 0x80
+
+/* MS_BLKEND */
+#define SET_BLKEND 0x01
+
+/* MS_CFG */
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
+
+/* MS_TRANS_CFG */
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
+
+/* MS_TRANSFER */
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
+#define MS_TM_SET_CMD 0x06
+#define MS_TM_COPY_PAGE 0x07
+#define MS_TM_MULTI_READ 0x02
+#define MS_TM_MULTI_WRITE 0x03
+
+/* MC_DMA_CTL */
+#define DMA_TC_EQ_0 0x80
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 2)
+#define DMA_256 (1 << 2)
+#define DMA_512 (2 << 2)
+#define DMA_1024 (3 << 2)
+#define DMA_PACK_SIZE_MASK 0x0C
+
+/* CARD_INT_PEND */
+#define XD_INT 0x10
+#define MS_INT 0x08
+#define SD_INT 0x04
+
+/* MC_FIFO_CTL */
+#define FIFO_FLUSH 0x01
+
+/* AUTO_DELINK_EN */
+#define AUTO_DELINK 0x02
+#define FORCE_DELINK 0x01
+
+/* MC_DMA_RST */
+#define DMA_RESET 0x01
+
+#define SSC_POWER_MASK 0x01
+#define SSC_POWER_DOWN 0x01
+#define SSC_POWER_ON 0x00
+
+/* OCPCTL */
+#define MS_OCP_DETECT_EN 0x08
+#define MS_OCP_INT_EN 0x04
+#define MS_OCP_INT_CLR 0x02
+#define MS_OCP_CLEAR 0x01
+
+/* OCPSTAT */
+#define MS_OCP_DETECT 0x80
+#define MS_OCP_NOW 0x02
+#define MS_OCP_EVER 0x01
+
+/* MC_FIFO_STAT */
+#define FIFO_FULL 0x01
+#define FIFO_EMPTY 0x02
+
+/* RCCTL */
+#define U_HW_CMD_EN_MASK 0x02
+#define U_HW_CMD_EN 0x02
+#define U_HW_CMD_DIS 0x00
+
+/* Register address */
+#define FPDCTL 0xFC00
+#define SSC_DIV_N_0 0xFC07
+#define SSC_CTL1 0xFC09
+#define SSC_CTL2 0xFC0A
+#define CFG_MODE_1 0xFC0F
+#define RCCTL 0xFC14
+#define SYS_DUMMY0 0xFC30
+#define XD_CP_WAITTIME 0xFD00
+#define XD_CP_PAGELEN 0xFD01
+#define XD_CP_READADDR0 0xFD02
+#define XD_CP_READADDR1 0xFD03
+#define XD_CP_READADDR2 0xFD04
+#define XD_CP_READADDR3 0xFD05
+#define XD_CP_READADDR4 0xFD06
+#define XD_CP_WRITEADDR0 0xFD07
+#define XD_CP_WRITEADDR1 0xFD08
+#define XD_CP_WRITEADDR2 0xFD09
+#define XD_CP_WRITEADDR3 0xFD0A
+#define XD_CP_WRITEADDR4 0xFD0B
+#define XD_INIT 0xFD10
+#define XD_DTCTL 0xFD11
+#define XD_CTL 0xFD12
+#define XD_TRANSFER 0xFD13
+#define XD_CFG 0xFD14
+#define XD_ADDRESS0 0xFD15
+#define XD_ADDRESS1 0xFD16
+#define XD_ADDRESS2 0xFD17
+#define XD_ADDRESS3 0xFD18
+#define XD_ADDRESS4 0xFD19
+#define XD_DAT 0xFD1A
+#define XD_PAGE_CNT 0xFD1B
+#define XD_PAGE_STATUS 0xFD1C
+#define XD_BLOCK_STATUS 0xFD1D
+#define XD_BLOCK_ADDR1_L 0xFD1E
+#define XD_BLOCK_ADDR1_H 0xFD1F
+#define XD_BLOCK_ADDR2_L 0xFD20
+#define XD_BLOCK_ADDR2_H 0xFD21
+#define XD_BYTE_CNT_L 0xFD22
+#define XD_BYTE_CNT_H 0xFD23
+#define XD_PARITY 0xFD24
+#define XD_ECC_BIT1 0xFD25
+#define XD_ECC_BYTE1 0xFD26
+#define XD_ECC_BIT2 0xFD27
+#define XD_ECC_BYTE2 0xFD28
+#define XD_RESERVED0 0xFD29
+#define XD_RESERVED1 0xFD2A
+#define XD_RESERVED2 0xFD2B
+#define XD_RESERVED3 0xFD2C
+#define XD_CHK_DATA_STATUS 0xFD2D
+#define XD_CATCTL 0xFD2E
+
+#define MS_BLKEND 0xFD30
+#define MS_READ_START 0xFD31
+#define MS_READ_COUNT 0xFD32
+#define MS_WRITE_START 0xFD33
+#define MS_WRITE_COUNT 0xFD34
+#define MS_COMMAND 0xFD35
+#define MS_OLD_BLOCK_0 0xFD36
+#define MS_OLD_BLOCK_1 0xFD37
+#define MS_NEW_BLOCK_0 0xFD38
+#define MS_NEW_BLOCK_1 0xFD39
+#define MS_LOG_BLOCK_0 0xFD3A
+#define MS_LOG_BLOCK_1 0xFD3B
+#define MS_BUS_WIDTH 0xFD3C
+#define MS_PAGE_START 0xFD3D
+#define MS_PAGE_LENGTH 0xFD3E
+#define MS_CFG 0xFD40
+#define MS_TPC 0xFD41
+#define MS_TRANS_CFG 0xFD42
+#define MS_TRANSFER 0xFD43
+#define MS_INT_REG 0xFD44
+#define MS_BYTE_CNT 0xFD45
+#define MS_SECTOR_CNT_L 0xFD46
+#define MS_SECTOR_CNT_H 0xFD47
+#define MS_DBUS_H 0xFD48
+
+#define CARD_DMA1_CTL 0xFD5C
+#define CARD_PULL_CTL1 0xFD60
+#define CARD_PULL_CTL2 0xFD61
+#define CARD_PULL_CTL3 0xFD62
+#define CARD_PULL_CTL4 0xFD63
+#define CARD_PULL_CTL5 0xFD64
+#define CARD_PULL_CTL6 0xFD65
+#define CARD_EXIST 0xFD6F
+#define CARD_INT_PEND 0xFD71
+
+#define LDO_POWER_CFG 0xFD7B
+
+#define SD_CFG1 0xFDA0
+#define SD_CFG2 0xFDA1
+#define SD_CFG3 0xFDA2
+#define SD_STAT1 0xFDA3
+#define SD_STAT2 0xFDA4
+#define SD_BUS_STAT 0xFDA5
+#define SD_PAD_CTL 0xFDA6
+#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define SD_PUSH_POINT_CTL 0xFDA8
+#define SD_CMD0 0xFDA9
+#define SD_CMD1 0xFDAA
+#define SD_CMD2 0xFDAB
+#define SD_CMD3 0xFDAC
+#define SD_CMD4 0xFDAD
+#define SD_CMD5 0xFDAE
+#define SD_BYTE_CNT_L 0xFDAF
+#define SD_BYTE_CNT_H 0xFDB0
+#define SD_BLOCK_CNT_L 0xFDB1
+#define SD_BLOCK_CNT_H 0xFDB2
+#define SD_TRANSFER 0xFDB3
+#define SD_CMD_STATE 0xFDB5
+#define SD_DATA_STATE 0xFDB6
+#define SD_VPCLK0_CTL 0xFC2A
+#define SD_VPCLK1_CTL 0xFC2B
+#define SD_DCMPS0_CTL 0xFC2C
+#define SD_DCMPS1_CTL 0xFC2D
+
+#define CARD_DMA1_CTL 0xFD5C
+
+#define HW_VERSION 0xFC01
+
+#define SSC_CLK_FPGA_SEL 0xFC02
+#define CLK_DIV 0xFC03
+#define SFSM_ED 0xFC04
+
+#define CD_DEGLITCH_WIDTH 0xFC20
+#define CD_DEGLITCH_EN 0xFC21
+#define AUTO_DELINK_EN 0xFC23
+
+#define FPGA_PULL_CTL 0xFC1D
+#define CARD_CLK_SOURCE 0xFC2E
+
+#define CARD_SHARE_MODE 0xFD51
+#define CARD_DRIVE_SEL 0xFD52
+#define CARD_STOP 0xFD53
+#define CARD_OE 0xFD54
+#define CARD_AUTO_BLINK 0xFD55
+#define CARD_GPIO 0xFD56
+#define SD30_DRIVE_SEL 0xFD57
+
+#define CARD_DATA_SOURCE 0xFD5D
+#define CARD_SELECT 0xFD5E
+
+#define CARD_CLK_EN 0xFD79
+#define CARD_PWR_CTL 0xFD7A
+
+#define OCPCTL 0xFD80
+#define OCPPARA1 0xFD81
+#define OCPPARA2 0xFD82
+#define OCPSTAT 0xFD83
+
+#define HS_USB_STAT 0xFE01
+#define HS_VCONTROL 0xFE26
+#define HS_VSTAIN 0xFE27
+#define HS_VLOADM 0xFE28
+#define HS_VSTAOUT 0xFE29
+
+#define MC_IRQ 0xFF00
+#define MC_IRQEN 0xFF01
+#define MC_FIFO_CTL 0xFF02
+#define MC_FIFO_BC0 0xFF03
+#define MC_FIFO_BC1 0xFF04
+#define MC_FIFO_STAT 0xFF05
+#define MC_FIFO_MODE 0xFF06
+#define MC_FIFO_RD_PTR0 0xFF07
+#define MC_FIFO_RD_PTR1 0xFF08
+#define MC_DMA_CTL 0xFF10
+#define MC_DMA_TC0 0xFF11
+#define MC_DMA_TC1 0xFF12
+#define MC_DMA_TC2 0xFF13
+#define MC_DMA_TC3 0xFF14
+#define MC_DMA_RST 0xFF15
+
+/* Memory mapping */
+#define RBUF_SIZE_MASK 0xFBFF
+#define RBUF_BASE 0xF000
+#define PPBUF_BASE1 0xF800
+#define PPBUF_BASE2 0xFA00
+
+/* int monitor_card_cd */
+#define CD_EXIST 0
+#define CD_NOT_EXIST 1
+
+#define DEBOUNCE_CNT 5
+
+int monitor_card_cd(struct rts51x_chip *chip, u8 card);
+
+void do_remaining_work(struct rts51x_chip *chip);
+void do_reset_xd_card(struct rts51x_chip *chip);
+void do_reset_sd_card(struct rts51x_chip *chip);
+void do_reset_ms_card(struct rts51x_chip *chip);
+void rts51x_init_cards(struct rts51x_chip *chip);
+void rts51x_release_cards(struct rts51x_chip *chip);
+int switch_ssc_clock(struct rts51x_chip *chip, int clk);
+int switch_normal_clock(struct rts51x_chip *chip, int clk);
+int card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 sec_addr,
+ u16 sec_cnt);
+u8 get_lun_card(struct rts51x_chip *chip, unsigned int lun);
+int card_share_mode(struct rts51x_chip *chip, int card);
+int rts51x_select_card(struct rts51x_chip *chip, int card);
+void eject_card(struct rts51x_chip *chip, unsigned int lun);
+void trans_dma_enable(enum dma_data_direction dir, struct rts51x_chip *chip,
+ u32 byte_cnt, u8 pack_size);
+int enable_card_clock(struct rts51x_chip *chip, u8 card);
+int disable_card_clock(struct rts51x_chip *chip, u8 card);
+int card_power_on(struct rts51x_chip *chip, u8 card);
+int card_power_off(struct rts51x_chip *chip, u8 card);
+int toggle_gpio(struct rts51x_chip *chip, u8 gpio);
+int turn_on_led(struct rts51x_chip *chip, u8 gpio);
+int turn_off_led(struct rts51x_chip *chip, u8 gpio);
+
+static inline int check_card_ready(struct rts51x_chip *chip, unsigned int lun)
+{
+ if (chip->card_ready & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+static inline int check_card_exist(struct rts51x_chip *chip, unsigned int lun)
+{
+ if (chip->card_exist & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+static inline int check_card_wp(struct rts51x_chip *chip, unsigned int lun)
+{
+ if (chip->card_wp & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+static inline int check_card_fail(struct rts51x_chip *chip, unsigned int lun)
+{
+ if (chip->card_fail & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+static inline int check_card_ejected(struct rts51x_chip *chip, unsigned int lun)
+{
+ if (chip->card_ejected & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+static inline int check_fake_card_ready(struct rts51x_chip *chip,
+ unsigned int lun)
+{
+ if (chip->fake_card_ready & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+static inline u8 get_lun2card(struct rts51x_chip *chip, unsigned int lun)
+{
+ return chip->lun2card[lun];
+}
+
+static inline int check_lun_mc(struct rts51x_chip *chip, unsigned int lun)
+{
+ return CHK_BIT(chip->lun_mc, lun);
+}
+
+static inline void set_lun_mc(struct rts51x_chip *chip, unsigned int lun)
+{
+ SET_BIT(chip->lun_mc, lun);
+}
+
+static inline void clear_lun_mc(struct rts51x_chip *chip, unsigned int lun)
+{
+ CLR_BIT(chip->lun_mc, lun);
+}
+
+static inline int switch_clock(struct rts51x_chip *chip, int clk)
+{
+ int retval = 0;
+
+ if (chip->asic_code)
+ retval = switch_ssc_clock(chip, clk);
+ else
+ retval = switch_normal_clock(chip, clk);
+
+ return retval;
+}
+
+static inline void rts51x_clear_xd_error(struct rts51x_chip *chip)
+{
+ rts51x_ep0_write_register(chip, CARD_STOP,
+ XD_STOP | XD_CLR_ERR, XD_STOP | XD_CLR_ERR);
+
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH);
+ rts51x_ep0_write_register(chip, MC_DMA_RST, DMA_RESET, DMA_RESET);
+ rts51x_ep0_write_register(chip, SFSM_ED, 0xf8, 0xf8);
+}
+
+static inline void rts51x_clear_sd_error(struct rts51x_chip *chip)
+{
+ rts51x_ep0_write_register(chip, CARD_STOP,
+ SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR);
+
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH);
+ rts51x_ep0_write_register(chip, MC_DMA_RST, DMA_RESET, DMA_RESET);
+ rts51x_ep0_write_register(chip, SFSM_ED, 0xf8, 0xf8);
+}
+
+static inline void rts51x_clear_ms_error(struct rts51x_chip *chip)
+{
+ rts51x_ep0_write_register(chip, CARD_STOP,
+ MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR);
+
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH);
+ rts51x_ep0_write_register(chip, MC_DMA_RST, DMA_RESET, DMA_RESET);
+ rts51x_ep0_write_register(chip, SFSM_ED, 0xf8, 0xf8);
+}
+
+#endif /* __RTS51X_CARD_H */
diff --git a/drivers/staging/rts5139/rts51x_chip.c b/drivers/staging/rts5139/rts51x_chip.c
new file mode 100644
index 00000000000..adc0d000573
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_chip.c
@@ -0,0 +1,1167 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_card.h"
+#include "rts51x_transport.h"
+#include "rts51x_sys.h"
+#include "xd.h"
+#include "ms.h"
+#include "sd.h"
+
+static int check_sd_speed_prior(u32 sd_speed_prior)
+{
+ int i, fake_para = 0;
+
+ /* Check the legality of sd_speed_prior */
+ for (i = 0; i < 4; i++) {
+ u8 tmp = (u8) (sd_speed_prior >> (i * 8));
+ if ((tmp < 0x01) || (tmp > 0x04)) {
+ fake_para = 1;
+ break;
+ }
+ }
+
+ return !fake_para;
+}
+
+int rts51x_reset_chip(struct rts51x_chip *chip)
+{
+ int retval;
+
+ if (CHECK_PKG(chip, LQFP48)) {
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL, LDO3318_PWR_MASK,
+ LDO_SUSPEND);
+ RTS51X_WRITE_REG(chip, CARD_PWR_CTL, FORCE_LDO_POWERB,
+ FORCE_LDO_POWERB);
+ RTS51X_WRITE_REG(chip, CARD_PULL_CTL1, 0x30, 0x10);
+ RTS51X_WRITE_REG(chip, CARD_PULL_CTL5, 0x03, 0x01);
+ RTS51X_WRITE_REG(chip, CARD_PULL_CTL6, 0x0C, 0x04);
+ }
+ if (chip->asic_code) {
+ RTS51X_WRITE_REG(chip, SYS_DUMMY0, NYET_MSAK, NYET_EN);
+ RTS51X_WRITE_REG(chip, CD_DEGLITCH_WIDTH, 0xFF, 0x08);
+ rts51x_write_register(chip, CD_DEGLITCH_EN, XD_CD_DEGLITCH_EN,
+ 0x00);
+ rts51x_write_register(chip, SD30_DRIVE_SEL, SD30_DRIVE_MASK,
+ chip->option.sd30_pad_drive);
+ rts51x_write_register(chip, CARD_DRIVE_SEL, SD20_DRIVE_MASK,
+ chip->option.sd20_pad_drive);
+ if (chip->rts5179)
+ rts51x_write_register(chip, CARD_PULL_CTL5, 0x03, 0x01);
+ if (!chip->option.ww_enable) {
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_write_register(chip, CARD_PULL_CTL3,
+ 0x80, 0x80);
+ rts51x_write_register(chip, CARD_PULL_CTL6,
+ 0xf0, 0xA0);
+ } else {
+ rts51x_write_register(chip, CARD_PULL_CTL1,
+ 0x30, 0x20);
+ rts51x_write_register(chip, CARD_PULL_CTL3,
+ 0x80, 0x80);
+ rts51x_write_register(chip, CARD_PULL_CTL6,
+ 0x0c, 0x08);
+ }
+ }
+ }
+ if (chip->option.sd_ctl & SUPPORT_UHS50_MMC44) {
+ SET_UHS50(chip);
+ RTS51X_DEBUGP("option enable UHS50&MMC44,sd_ctl:0x%x\n",
+ chip->option.sd_ctl);
+ } else {
+ /* if(CHECK_PID(chip, 0x0139)&&CHECK_PKG(chip, LQFP48)) */
+ if ((CHECK_PID(chip, 0x0139) && CHECK_PKG(chip, LQFP48))
+ || chip->rts5179) {
+ SET_UHS50(chip);
+ RTS51X_DEBUGP("PID enable UHS50&MMC44\n");
+ } else {
+ CLEAR_UHS50(chip);
+ RTS51X_DEBUGP("PID disable UHS50&MMC44\n");
+ }
+ }
+
+ if (chip->option.ms_errreg_fix && (chip->ic_version > 1))
+ rts51x_write_register(chip, 0xFD4D, 0x01, 0x01);
+ retval = rts51x_write_phy_register(chip, 0xC2, 0x7C);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_init_cmd(chip);
+
+ /* GPIO OE */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO, GPIO_OE, GPIO_OE);
+#ifdef LED_AUTO_BLINK
+ /* LED autoblink */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_AUTO_BLINK,
+ BLINK_ENABLE | BLINK_SPEED_MASK,
+ BLINK_ENABLE | chip->option.led_blink_speed);
+#endif
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DMA1_CTL,
+ EXTEND_DMA1_ASYNC_SIGNAL, EXTEND_DMA1_ASYNC_SIGNAL);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef SUPPORT_OCP
+ if (chip->asic_code) {
+ rts51x_write_register(chip, OCPCTL, MS_OCP_DETECT_EN,
+ MS_OCP_DETECT_EN);
+ RTS51X_DEBUGP("Enable OCP detect!\n");
+ }
+#endif
+ if (chip->option.FT2_fast_mode) {
+ card_power_on(chip, SD_CARD | MS_CARD | XD_CARD);
+ wait_timeout(10);
+ }
+ rts51x_clear_start_time(chip);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_init_chip(struct rts51x_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ chip->max_lun = 0;
+ chip->cur_clk = 0;
+ chip->cur_card = 0;
+
+ chip->card2lun[XD_CARD] = 0;
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 0;
+ chip->card_ejected = 0;
+
+ chip->lun2card[0] = XD_CARD | SD_CARD | MS_CARD;
+#if 0
+ chip->option.sdr50_tx_phase = 0x01;
+ chip->option.sdr50_rx_phase = 0x05;
+ chip->option.ddr50_tx_phase = 0x09;
+ chip->option.ddr50_rx_phase = 0x06; /* add for debug */
+#endif
+#ifdef CLOSE_SSC_POWER
+ rts51x_write_register(chip, FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
+ udelay(100);
+ rts51x_write_register(chip, CLK_DIV, CLK_CHANGE, 0x00);
+#endif
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ RTS51X_READ_REG(chip, HW_VERSION, &val);
+ if ((val & 0x0f) >= 2)
+ chip->option.rcc_bug_fix_en = 0;
+ RTS51X_DEBUGP("rcc bug fix enable:%d\n", chip->option.rcc_bug_fix_en);
+ RTS51X_DEBUGP("HW_VERSION: 0x%x\n", val);
+ if (val & FPGA_VER) {
+ chip->asic_code = 0;
+ RTS51X_DEBUGP("FPGA!\n");
+ } else {
+ chip->asic_code = 1;
+ RTS51X_DEBUGP("ASIC!\n");
+ }
+ chip->ic_version = val & HW_VER_MASK;
+
+ if (!check_sd_speed_prior(chip->option.sd_speed_prior))
+ chip->option.sd_speed_prior = 0x01020403;
+ RTS51X_DEBUGP("sd_speed_prior = 0x%08x\n",
+ chip->option.sd_speed_prior);
+
+ RTS51X_READ_REG(chip, CARD_SHARE_MODE, &val);
+ if (val & CARD_SHARE_LQFP_SEL) {
+ chip->package = LQFP48;
+ RTS51X_DEBUGP("Package: LQFP48\n");
+ } else {
+ chip->package = QFN24;
+ RTS51X_DEBUGP("Package: QFN24\n");
+ }
+
+ RTS51X_READ_REG(chip, HS_USB_STAT, &val);
+ if (val & USB_HI_SPEED) {
+ chip->usb_speed = USB_20;
+ RTS51X_DEBUGP("USB High Speed\n");
+ } else {
+ chip->usb_speed = USB_11;
+ RTS51X_DEBUGP("USB Full Speed\n");
+ }
+
+ RTS51X_READ_REG(chip, CFG_MODE_1, &val);
+ if (val & RTS5179) {
+ chip->rts5179 = 1;
+ RTS51X_DEBUGP("device is rts5179\n");
+ } else {
+ chip->rts5179 = 0;
+ }
+
+ retval = rts51x_reset_chip(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_release_chip(struct rts51x_chip *chip)
+{
+ xd_free_l2p_tbl(chip);
+ ms_free_l2p_tbl(chip);
+ chip->card_ready = 0;
+ return STATUS_SUCCESS;
+}
+
+#ifndef LED_AUTO_BLINK
+static inline void rts51x_blink_led(struct rts51x_chip *chip)
+{
+ /* Read/Write */
+ if (chip->card_ready) {
+ if (chip->led_toggle_counter <
+ chip->option.led_toggle_interval) {
+ chip->led_toggle_counter++;
+ } else {
+ chip->led_toggle_counter = 0;
+ toggle_gpio(chip, LED_GPIO);
+ }
+ }
+}
+#endif
+
+int rts51x_check_start_time(struct rts51x_chip *chip)
+{
+ return 0;
+}
+
+void rts51x_set_start_time(struct rts51x_chip *chip)
+{
+}
+
+void rts51x_clear_start_time(struct rts51x_chip *chip)
+{
+}
+
+static void rts51x_auto_delink_cmd(struct rts51x_chip *chip)
+{
+ rts51x_write_register(chip, AUTO_DELINK_EN,
+ AUTO_DELINK, AUTO_DELINK);
+}
+
+static void rts51x_auto_delink_force_cmd(struct rts51x_chip *chip)
+{
+ rts51x_write_register(chip, AUTO_DELINK_EN,
+ AUTO_DELINK | FORCE_DELINK,
+ AUTO_DELINK | FORCE_DELINK);
+}
+
+#ifdef USING_POLLING_CYCLE_DELINK
+/* using polling cycle as delink time */
+static void rts51x_auto_delink_polling_cycle(struct rts51x_chip *chip)
+{
+ if (chip->auto_delink_counter <=
+ chip->option.delink_delay * 2) {
+ if (chip->auto_delink_counter ==
+ chip->option.delink_delay) {
+ clear_first_install_mark(chip);
+ if (chip->card_exist) {
+ /* False card */
+ if (!chip->card_ejected) {
+ /* if card is not ejected or safely
+ * remove,then do force delink */
+ RTS51X_DEBUGP("False card inserted,"
+ "do force delink\n");
+ rts51x_auto_delink_force_cmd(chip);
+ chip->auto_delink_counter =
+ chip->option.delink_delay * 2 + 1;
+ }
+ } else {
+ RTS51X_DEBUGP("No card inserted, do delink\n");
+ /* rts51x_write_register(chip, CARD_PWR_CTL,
+ DV3318_AUTO_PWR_OFF, 0); */
+ rts51x_auto_delink_cmd(chip);
+ }
+ }
+ if (chip->auto_delink_counter ==
+ chip->option.delink_delay * 2) {
+ RTS51X_DEBUGP("Try to do force delink\n");
+ rts51x_auto_delink_force_cmd(chip);
+ }
+ chip->auto_delink_counter++;
+ }
+}
+
+static void rts51x_auto_delink(struct rts51x_chip *chip)
+{
+ rts51x_auto_delink_polling_cycle(chip);
+}
+#else
+/* some of called funcs are not implemented, so comment it out */
+#if 0
+/* using precise time as delink time */
+static void rts51x_auto_delink_precise_time(struct rts51x_chip *chip)
+{
+ int retvalue = 0;
+
+ retvalue = rts51x_get_card_status(chip, &chip->card_status);
+ /* get card CD status success and card CD not exist,
+ * then check whether delink */
+ if ((retvalue == STATUS_SUCCESS)
+ && (!(chip->card_status & (SD_CD | MS_CD | XD_CD)))) {
+ if (rts51x_count_delink_time(chip) >=
+ chip->option.delink_delay) {
+ clear_first_install_mark(chip);
+ RTS51X_DEBUGP("No card inserted, do delink\n");
+ /* sangdy2010-05-17:disable because there is error
+ * after SSC clock closed and card power
+ * has been closed before */
+ /* rts51x_write_register(chip, CARD_PWR_CTL,
+ DV3318_AUTO_PWR_OFF, 0); */
+ rts51x_auto_delink_cmd(chip);
+ }
+ /* card CD exist and not ready, then do force delink */
+ if ((retvalue == STATUS_SUCCESS)
+ && (chip->card_status & (SD_CD | MS_CD | XD_CD))) {
+ /* if card is not ejected or safely remove,
+ * then do force delink */
+ if (!chip->card_ejected) {
+ /* sangdy2010-11-16:polling at least 2 cycles
+ * then do force delink for card may force delink
+ * if card is extracted and insert quickly
+ * after ready. */
+ if (chip->auto_delink_counter > 1) {
+ if (rts51x_count_delink_time(chip) >
+ chip->option.delink_delay * 2) {
+ RTS51X_DEBUGP("Try to do force"
+ "delink\n");
+ rts51x_auto_delink_force_cmd(chip);
+ }
+ }
+ }
+ }
+ chip->auto_delink_counter++;
+}
+#else
+static void rts51x_auto_delink_precise_time(struct rts51x_chip *chip)
+{
+}
+#endif
+
+static void rts51x_auto_delink(struct rts51x_chip *chip)
+{
+ rts51x_auto_delink_precise_time(chip);
+}
+#endif
+
+void rts51x_polling_func(struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (sd_card->sd_erase_status) {
+ if (chip->card_exist & SD_CARD) {
+ u8 val;
+ rts51x_read_register(chip, SD_BUS_STAT, &val);
+ if (val & SD_DAT0_STATUS) {
+ /* Erase completed */
+ sd_card->sd_erase_status = SD_NOT_ERASE;
+ sd_card->sd_lock_notify = 1;
+
+ /* SD card should be reinited,
+ * so we release it here. */
+ sd_cleanup_work(chip);
+ release_sd_card(chip);
+ chip->card_ready &= ~SD_CARD;
+ chip->card_exist &= ~SD_CARD;
+ chip->rw_card[chip->card2lun[SD_CARD]] = NULL;
+ clear_bit(chip->card2lun[SD_CARD],
+ &(chip->lun_mc));
+ }
+ } else {
+ sd_card->sd_erase_status = SD_NOT_ERASE;
+ }
+ }
+#endif
+
+ rts51x_init_cards(chip);
+
+#ifdef SUPPORT_OCP
+ /* if OCP happen and card exist, then close card OE */
+ if ((chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) &&
+ (chip->card_exist)) {
+
+ rts51x_prepare_run(chip);
+
+ if (chip->card_exist & SD_CARD)
+ rts51x_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
+ else if (chip->card_exist & MS_CARD)
+ rts51x_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
+ else if (chip->card_exist & XD_CARD)
+ rts51x_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
+ }
+#endif
+
+ if (chip->idle_counter < IDLE_MAX_COUNT) {
+ chip->idle_counter++;
+ } else {
+ if (!RTS51X_CHK_STAT(chip, STAT_IDLE)) {
+ RTS51X_DEBUGP("Idle state!\n");
+ RTS51X_SET_STAT(chip, STAT_IDLE);
+#ifndef LED_AUTO_BLINK
+ chip->led_toggle_counter = 0;
+#endif
+ /* Idle state, turn off LED
+ * to reduce power consumption */
+ if (chip->option.led_always_on
+ && (chip->card_exist &
+ (SD_CARD | MS_CARD | XD_CARD))
+ && (!chip->card_ejected)) {
+ turn_on_led(chip, LED_GPIO);
+ } else {
+ if (chip->rts5179) {
+ rts51x_ep0_write_register(chip,
+ CARD_GPIO,
+ 0x03, 0x00);
+ } else {
+ turn_off_led(chip, LED_GPIO);
+ }
+
+ }
+
+#ifdef CLOSE_SSC_POWER
+ if (!chip->card_ready) {
+ rts51x_write_register(chip, CLK_DIV, CLK_CHANGE,
+ CLK_CHANGE);
+ rts51x_write_register(chip, FPDCTL,
+ SSC_POWER_MASK,
+ SSC_POWER_DOWN);
+ RTS51X_DEBUGP("Close SSC clock power!\n");
+ }
+#endif
+ }
+ }
+
+ switch (RTS51X_GET_STAT(chip)) {
+ case STAT_RUN:
+#ifndef LED_AUTO_BLINK
+ rts51x_blink_led(chip);
+#endif
+ do_remaining_work(chip);
+ break;
+
+ case STAT_IDLE:
+ break;
+
+ default:
+ break;
+ }
+
+ if (chip->option.auto_delink_en && !chip->card_ready) {
+ rts51x_auto_delink(chip);
+ } else {
+ chip->auto_delink_counter = 0;
+ rts51x_clear_start_time(chip);
+ }
+}
+
+void rts51x_add_cmd(struct rts51x_chip *chip,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+{
+ int i;
+
+ if (chip->cmd_idx < ((CMD_BUF_LEN - CMD_OFFSET) / 4)) {
+ i = CMD_OFFSET + chip->cmd_idx * 4;
+ chip->cmd_buf[i++] =
+ ((cmd_type & 0x03) << 6) | (u8) ((reg_addr >> 8) & 0x3F);
+ chip->cmd_buf[i++] = (u8) reg_addr;
+ chip->cmd_buf[i++] = mask;
+ chip->cmd_buf[i++] = data;
+ chip->cmd_idx++;
+ }
+}
+
+int rts51x_send_cmd(struct rts51x_chip *chip, u8 flag, int timeout)
+{
+ int result;
+
+ chip->cmd_buf[CNT_H] = (u8) (chip->cmd_idx >> 8);
+ chip->cmd_buf[CNT_L] = (u8) (chip->cmd_idx);
+ chip->cmd_buf[STAGE_FLAG] = flag;
+
+ result = rts51x_transfer_data_rcc(chip, SND_BULK_PIPE(chip),
+ (void *)(chip->cmd_buf),
+ chip->cmd_idx * 4 + CMD_OFFSET,
+ 0, NULL, timeout, MODE_C);
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, result);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_get_rsp(struct rts51x_chip *chip, int rsp_len, int timeout)
+{
+ int result;
+
+ if (rsp_len <= 0)
+ TRACE_RET(chip, STATUS_ERROR);
+ /* rsp_len must aligned to dword */
+ if (rsp_len % 4)
+ rsp_len += (4 - rsp_len % 4);
+
+ result = rts51x_transfer_data_rcc(chip, RCV_BULK_PIPE(chip),
+ (void *)chip->rsp_buf, rsp_len,
+ 0, NULL, timeout, STAGE_R);
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, result);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status)
+{
+ int retval;
+ u16 val;
+
+#ifdef GET_CARD_STATUS_USING_EPC
+ retval = rts51x_get_epc_status(chip, &val);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#else
+ retval = rts51x_ctrl_transfer(chip, RCV_CTRL_PIPE(chip), 0x02, 0xC0,
+ 0, 0, &val, 2, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#endif
+
+ if (status)
+ *status = val;
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data)
+{
+ int retval;
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, addr, mask, data);
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+{
+ int retval;
+
+ if (data)
+ *data = 0;
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, READ_REG_CMD, addr, 0, 0);
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rts51x_get_rsp(chip, 1, 100);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (data)
+ *data = chip->rsp_buf[0];
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
+ u8 data)
+{
+ int retval;
+ u16 value = 0, index = 0;
+
+ value |= (u16) (3 & 0x03) << 14;
+ value |= (u16) (addr & 0x3FFF);
+ index |= (u16) mask << 8;
+ index |= (u16) data;
+
+ retval = rts51x_ctrl_transfer(chip, SND_CTRL_PIPE(chip), 0x00, 0x40,
+ cpu_to_be16(value), cpu_to_be16(index),
+ NULL, 0, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+{
+ int retval;
+ u16 value = 0;
+ u8 val;
+
+ if (data)
+ *data = 0;
+
+ value |= (u16) (2 & 0x03) << 14;
+ value |= (u16) (addr & 0x3FFF);
+
+ retval = rts51x_ctrl_transfer(chip, RCV_CTRL_PIPE(chip), 0x00, 0xC0,
+ cpu_to_be16(value), 0, &val, 1, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (data)
+ *data = val;
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len,
+ u8 *data)
+{
+ int result;
+ u16 cmd_len = len + 12;
+
+ if (!data)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ cmd_len = (cmd_len <= CMD_BUF_LEN) ? cmd_len : CMD_BUF_LEN;
+
+ /* cmd_len must aligned to dword */
+ if (cmd_len % 4)
+ cmd_len += (4 - cmd_len % 4);
+
+ chip->cmd_buf[0] = 'R';
+ chip->cmd_buf[1] = 'T';
+ chip->cmd_buf[2] = 'C';
+ chip->cmd_buf[3] = 'R';
+ chip->cmd_buf[PACKET_TYPE] = SEQ_WRITE;
+ chip->cmd_buf[5] = (u8) (len >> 8);
+ chip->cmd_buf[6] = (u8) len;
+ chip->cmd_buf[STAGE_FLAG] = 0;
+ chip->cmd_buf[8] = (u8) (addr >> 8);
+ chip->cmd_buf[9] = (u8) addr;
+
+ memcpy(chip->cmd_buf + 12, data, len);
+
+ result = rts51x_transfer_data_rcc(chip, SND_BULK_PIPE(chip),
+ (void *)(chip->cmd_buf), cmd_len, 0,
+ NULL, 100, MODE_C);
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, result);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
+ u8 *data)
+{
+ int result;
+ u16 rsp_len;
+
+ if (!data)
+ TRACE_RET(chip, STATUS_ERROR);
+ /* rsp_len must aligned to dword */
+ if (len % 4)
+ rsp_len = len + (4 - len % 4);
+ else
+ rsp_len = len;
+
+ chip->cmd_buf[0] = 'R';
+ chip->cmd_buf[1] = 'T';
+ chip->cmd_buf[2] = 'C';
+ chip->cmd_buf[3] = 'R';
+ chip->cmd_buf[PACKET_TYPE] = SEQ_READ;
+ chip->cmd_buf[5] = (u8) (rsp_len >> 8);
+ chip->cmd_buf[6] = (u8) rsp_len;
+ chip->cmd_buf[STAGE_FLAG] = STAGE_R;
+ chip->cmd_buf[8] = (u8) (addr >> 8);
+ chip->cmd_buf[9] = (u8) addr;
+
+ result = rts51x_transfer_data_rcc(chip, SND_BULK_PIPE(chip),
+ (void *)(chip->cmd_buf), 12, 0, NULL,
+ 100, MODE_C);
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, result);
+
+ result = rts51x_transfer_data_rcc(chip, RCV_BULK_PIPE(chip),
+ (void *)data, rsp_len, 0, NULL, 100,
+ STAGE_DI);
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, result);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+{
+ int retval;
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ retval =
+ rts51x_seq_read_register(chip, PPBUF_BASE2, (u16) buf_len, buf);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+{
+ int retval;
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ retval =
+ rts51x_seq_write_register(chip, PPBUF_BASE2, (u16) buf_len, buf);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_write_phy_register(struct rts51x_chip *chip, u8 addr, u8 val)
+{
+ int retval;
+
+ RTS51X_DEBUGP("Write 0x%x to phy register 0x%x\n", val, addr);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VSTAIN, 0xFF, val);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VCONTROL, 0xFF,
+ (addr >> 4) & 0x0F);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 * val)
+{
+ int retval;
+
+ RTS51X_DEBUGP("Read from phy register 0x%x\n", addr);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VCONTROL, 0xFF, 0x07);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VCONTROL, 0xFF,
+ (addr >> 4) & 0x0F);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+ rts51x_add_cmd(chip, READ_REG_CMD, HS_VSTAOUT, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, 100);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (val)
+ *val = chip->rsp_buf[0];
+
+ RTS51X_DEBUGP("Return value: 0x%x\n", chip->rsp_buf[0]);
+
+ return STATUS_SUCCESS;
+}
+
+void rts51x_do_before_power_down(struct rts51x_chip *chip)
+{
+ RTS51X_DEBUGP("rts51x_do_before_power_down\n");
+
+ rts51x_prepare_run(chip);
+
+ rts51x_release_cards(chip);
+ if (chip->rts5179)
+ rts51x_ep0_write_register(chip, CARD_GPIO, 0x03, 0x00);
+ else
+ turn_off_led(chip, LED_GPIO);
+
+ chip->cur_clk = 0;
+ chip->card_exist = 0;
+ chip->cur_card = 0;
+ if (chip->asic_code && !chip->option.ww_enable) {
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_write_register(chip, CARD_PULL_CTL3, 0x80, 0x00);
+ rts51x_write_register(chip, CARD_PULL_CTL6, 0xf0, 0x50);
+ } else {
+ rts51x_write_register(chip, CARD_PULL_CTL1, 0x30, 0x10);
+ rts51x_write_register(chip, CARD_PULL_CTL3, 0x80, 0x00);
+ rts51x_write_register(chip, CARD_PULL_CTL6, 0x0c, 0x04);
+ }
+ }
+ if (CHECK_PKG(chip, LQFP48))
+ rts51x_write_register(chip, CARD_PWR_CTL, LDO3318_PWR_MASK,
+ LDO_OFF);
+}
+
+void rts51x_clear_hw_error(struct rts51x_chip *chip)
+{
+ rts51x_ep0_write_register(chip, SFSM_ED, 0xf8, 0xf8);
+}
+
+void rts51x_prepare_run(struct rts51x_chip *chip)
+{
+#ifdef CLOSE_SSC_POWER
+ if (RTS51X_CHK_STAT(chip, STAT_IDLE) && (!chip->card_ready)) {
+ rts51x_write_register(chip, FPDCTL, SSC_POWER_MASK,
+ SSC_POWER_ON);
+ udelay(100);
+ RTS51X_DEBUGP("Open SSC clock power.\n");
+
+ rts51x_write_register(chip, CLK_DIV, CLK_CHANGE, 0x00);
+ }
+#endif
+#if 0
+ if (chip->option.ss_en && RTS51X_CHK_STAT(chip, STAT_SS)) {
+ rts51x_try_to_exit_ss(chip);
+ wait_timeout(100);
+ rts51x_init_chip(chip);
+ rts51x_init_cards(chip);
+ }
+
+ RTS51X_SET_STAT(chip, STAT_RUN);
+#endif
+}
+
+#ifdef _MSG_TRACE
+void rts51x_trace_msg(struct rts51x_chip *chip, unsigned char *buf, int clear)
+{
+ unsigned char *ptr;
+ int i, msg_cnt;
+
+ if (!buf)
+ return;
+
+ ptr = buf;
+
+ if (chip->trace_msg[chip->msg_idx].valid)
+ msg_cnt = TRACE_ITEM_CNT;
+ else
+ msg_cnt = chip->msg_idx;
+ *(ptr++) = (u8) (msg_cnt >> 24);
+ *(ptr++) = (u8) (msg_cnt >> 16);
+ *(ptr++) = (u8) (msg_cnt >> 8);
+ *(ptr++) = (u8) msg_cnt;
+ RTS51X_DEBUGP("Trace message count is %d\n", msg_cnt);
+
+ for (i = 1; i <= msg_cnt; i++) {
+ int j, idx;
+
+ idx = chip->msg_idx - i;
+ if (idx < 0)
+ idx += TRACE_ITEM_CNT;
+
+ *(ptr++) = (u8) (chip->trace_msg[idx].line >> 8);
+ *(ptr++) = (u8) (chip->trace_msg[idx].line);
+ for (j = 0; j < MSG_FUNC_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].func[j];
+ for (j = 0; j < MSG_FILE_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].file[j];
+ for (j = 0; j < TIME_VAL_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].timeval_buf[j];
+ }
+
+ if (clear) {
+ chip->msg_idx = 0;
+ for (i = 0; i < TRACE_ITEM_CNT; i++)
+ chip->trace_msg[i].valid = 0;
+ }
+}
+#endif
+
+void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 * status,
+ u8 status_len)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ u8 card = get_lun_card(chip, lun);
+#ifdef SUPPORT_OC
+ u8 oc_now_mask = 0, oc_ever_mask = 0;
+#endif
+
+ if (!status || (status_len < 32))
+ return;
+ /* IC Version */
+ status[0] = (u8) RTS51X_GET_PID(chip);
+ status[1] = (u8) (chip->ic_version);
+
+ /* Auto delink mode */
+ if (chip->option.auto_delink_en)
+ status[2] = 0x10;
+ else
+ status[2] = 0x00;
+
+ /* Spec version */
+ status[3] = 20;
+ status[4] = 10;
+ status[5] = 05;
+ status[6] = 21;
+
+ /* Card WP */
+ if (chip->card_wp)
+ status[7] = 0x20;
+ else
+ status[7] = 0x00;
+
+#ifdef SUPPORT_OC
+ /* Over current status */
+ status[8] = 0;
+ oc_now_mask = MS_OCP_NOW;
+ oc_ever_mask = MS_OCP_EVER;
+
+ if (chip->ocp_stat & oc_now_mask)
+ status[8] |= 0x02;
+ if (chip->ocp_stat & oc_ever_mask)
+ status[8] |= 0x01;
+#endif
+
+ if (card == SD_CARD) {
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_HCXC(sd_card)) {
+ if (sd_card->capacity > 0x4000000)
+ /* SDXC */
+ status[0x0E] = 0x02;
+ else /* SDHC */
+ status[0x0E] = 0x01;
+ } else { /* SDSC */
+ status[0x0E] = 0x00;
+ }
+
+ if (CHK_SD_SDR104(sd_card))
+ status[0x0F] = 0x03;
+ else if (CHK_SD_DDR50(sd_card))
+ status[0x0F] = 0x04;
+ else if (CHK_SD_SDR50(sd_card))
+ status[0x0F] = 0x02;
+ else if (CHK_SD_HS(sd_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00; /* Normal speed */
+ } else {
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ status[0x0E] = 0x01; /* High capacity */
+ else
+ status[0x0E] = 0x00; /* Normal capacity */
+
+ if (CHK_MMC_DDR52(sd_card))
+ status[0x0F] = 0x03; /* DDR 52M */
+ else if (CHK_MMC_52M(sd_card))
+ status[0x0F] = 0x02; /* SDR 52M */
+ else if (CHK_MMC_26M(sd_card))
+ status[0x0F] = 0x01; /* SDR 26M */
+ else
+ status[0x0F] = 0x00; /* Normal speed */
+ }
+ } else if (card == MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (CHK_MSXC(ms_card))
+ status[0x0E] = 0x01; /* XC */
+ else
+ status[0x0E] = 0x00;
+
+ if (CHK_HG8BIT(ms_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ }
+ }
+#ifdef SUPPORT_SD_LOCK
+ /* SD Lock/Unlock */
+ if (card == SD_CARD) {
+ status[0x17] = 0x80;
+ if (sd_card->sd_erase_status)
+ status[0x17] |= 0x01; /* Under erasing */
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ status[0x17] |= 0x02; /* Locked */
+ status[0x07] |= 0x40; /* Read protected */
+ }
+ if (sd_card->sd_lock_status & SD_PWD_EXIST)
+ status[0x17] |= 0x04; /* Contain PWD */
+ } else {
+ status[0x17] = 0x00;
+ }
+
+ RTS51X_DEBUGP("status[0x17] = 0x%x\n", status[0x17]);
+#endif
+
+ /* Function 0
+ * Support Magic Gate, CPRM and PhyRegister R/W */
+ status[0x18] = 0x8A;
+
+ /* Function 2
+ * Support OC LUN status & WP LUN status */
+ status[0x1A] = 0x28;
+
+ /* Function 7 */
+#ifdef SUPPORT_SD_LOCK
+ /* Support SD Lock/Unlock */
+ status[0x1F] = 0x01;
+#endif
+
+ /* Function 2
+ * Support OC LUN status & WP LUN status */
+ status[0x1A] = 0x28;
+}
+
+void rts51x_read_status(struct rts51x_chip *chip, unsigned int lun,
+ u8 *rts51x_status, u8 status_len)
+{
+ if (!rts51x_status || (status_len < 16))
+ return;
+ /* VID */
+ rts51x_status[0] = (u8) (RTS51X_GET_VID(chip) >> 8);
+ rts51x_status[1] = (u8) RTS51X_GET_VID(chip);
+
+ /* PID */
+ rts51x_status[2] = (u8) (RTS51X_GET_PID(chip) >> 8);
+ rts51x_status[3] = (u8) RTS51X_GET_PID(chip);
+
+ /* gbLUN */
+ rts51x_status[4] = (u8) lun;
+
+ /* Lun Card Number */
+ if (chip->card_exist) {
+ if (chip->card_exist & XD_CARD)
+ rts51x_status[5] = 4; /* xD Card */
+ else if (chip->card_exist & SD_CARD)
+ rts51x_status[5] = 2; /* SD Card */
+ else if (chip->card_exist & MS_CARD)
+ rts51x_status[5] = 3; /* MS Card */
+ else
+ rts51x_status[5] = 7; /* Multi */
+ } else {
+ rts51x_status[5] = 7; /* Multi */
+ }
+
+ /* Total LUNs */
+ rts51x_status[6] = 1;
+
+ /* IC Version */
+ rts51x_status[7] = (u8) RTS51X_GET_PID(chip);
+ rts51x_status[8] = chip->ic_version;
+
+ /* Physical Exist */
+ if (check_card_exist(chip, lun))
+ rts51x_status[9] = 1;
+ else
+ rts51x_status[9] = 0;
+
+ /* Multi Flag */
+ rts51x_status[10] = 1;
+
+ /* LUN Valid Map */
+ rts51x_status[11] = XD_CARD | SD_CARD | MS_CARD;
+
+ /* Logical Exist */
+ if (check_card_ready(chip, lun))
+ rts51x_status[12] = 1;
+ else
+ rts51x_status[12] = 0;
+
+ /* Detailed Type */
+ if (get_lun_card(chip, lun) == XD_CARD) {
+ rts51x_status[13] = 0x40;
+ } else if (get_lun_card(chip, lun) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ rts51x_status[13] = 0x20;
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_HCXC(sd_card))
+ rts51x_status[13] |= 0x04; /* Hi capacity SD */
+ if (CHK_SD_HS(sd_card))
+ rts51x_status[13] |= 0x02; /* Hi speed SD */
+ } else {
+ rts51x_status[13] |= 0x08; /* MMC card */
+ if (CHK_MMC_52M(sd_card))
+ rts51x_status[13] |= 0x02; /* Hi speed */
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ rts51x_status[13] |= 0x04; /* Hi capacity */
+ }
+ } else if (get_lun_card(chip, lun) == MS_CARD) {
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_MSPRO(ms_card)) {
+ rts51x_status[13] = 0x38; /* MS Pro */
+ if (CHK_HG8BIT(ms_card))
+ rts51x_status[13] |= 0x04; /* HG */
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card))
+ rts51x_status[13] |= 0x01; /* MSXC */
+#endif
+ } else {
+ rts51x_status[13] = 0x30;
+ }
+ } else {
+ rts51x_status[13] = 0x70;
+ }
+/* Support OC, auto delink, vendor r/w, get bus width */
+ rts51x_status[14] = 0x78;
+
+ rts51x_status[15] = 0x82;
+}
+
+int rts51x_transfer_data_rcc(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout, u8 stage_flag)
+{
+ int retval;
+
+ retval =
+ rts51x_transfer_data(chip, pipe, buf, len, use_sg, act_len,
+ timeout);
+
+ return retval;
+
+}
diff --git a/drivers/staging/rts5139/rts51x_chip.h b/drivers/staging/rts5139/rts51x_chip.h
new file mode 100644
index 00000000000..321ece750ed
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_chip.h
@@ -0,0 +1,904 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_CHIP_H
+#define __RTS51X_CHIP_H
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_host.h>
+
+#include "trace.h"
+
+#define SUPPORT_CPRM
+#define SUPPORT_MAGIC_GATE
+#define SUPPORT_MSXC
+/* #define LED_AUTO_BLINK */
+
+/* { wwang, 2010-07-26
+ * Add support for SD lock/unlock */
+/* #define SUPPORT_SD_LOCK */
+/* } wwang, 2010-07-26 */
+
+#ifdef SUPPORT_MAGIC_GA
+/* Using NORMAL_WRITE instead of AUTO_WRITE to set ICVTE */
+#define MG_SET_ICV_SLOW
+#endif
+
+#ifdef SUPPORT_MSXC
+#define XC_POWERCLASS
+#define SUPPORT_PCGL_1P18
+#endif
+
+#define GET_CARD_STATUS_USING_EPC
+
+#define CLOSE_SSC_POWER
+
+#define SUPPORT_OCP
+
+#define MS_SPEEDUP
+/* #define XD_SPEEDUP */
+
+#define SD_XD_IO_FOLLOW_PWR
+
+#define SD_NR 2
+#define MS_NR 3
+#define XD_NR 4
+#define SD_CARD (1 << SD_NR)
+#define MS_CARD (1 << MS_NR)
+#define XD_CARD (1 << XD_NR)
+
+#define SD_CD 0x01
+#define MS_CD 0x02
+#define XD_CD 0x04
+#define SD_WP 0x08
+
+#define MAX_ALLOWED_LUN_CNT 8
+#define CMD_BUF_LEN 1024
+#define RSP_BUF_LEN 1024
+#define POLLING_INTERVAL 50 /* 50ms */
+
+#define XD_FREE_TABLE_CNT 1200
+#define MS_FREE_TABLE_CNT 512
+
+/* Bit Operation */
+#define SET_BIT(data, idx) ((data) |= 1 << (idx))
+#define CLR_BIT(data, idx) ((data) &= ~(1 << (idx)))
+#define CHK_BIT(data, idx) ((data) & (1 << (idx)))
+
+/* Command type */
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
+#define PACKET_TYPE 4
+#define CNT_H 5
+#define CNT_L 6
+#define STAGE_FLAG 7
+#define CMD_OFFSET 8
+
+/* Packet type */
+#define BATCH_CMD 0
+#define SEQ_READ 1
+#define SEQ_WRITE 2
+
+/* Stage flag */
+#define STAGE_R 0x01
+#define STAGE_DI 0x02
+#define STAGE_DO 0x04
+/* Return MS_TRANS_CFG, GET_INT */
+#define STAGE_MS_STATUS 0x08
+/* Return XD_CFG, XD_CTL, XD_PAGE_STATUS */
+#define STAGE_XD_STATUS 0x10
+/* Command stage mode */
+#define MODE_C 0x00
+#define MODE_CR (STAGE_R)
+#define MODE_CDIR (STAGE_R | STAGE_DI)
+#define MODE_CDOR (STAGE_R | STAGE_DO)
+
+/* Function return code */
+#ifndef STATUS_SUCCESS
+#define STATUS_SUCCESS 0
+#endif
+
+#define STATUS_FAIL 1
+#define STATUS_READ_FAIL 2
+#define STATUS_WRITE_FAIL 3
+#define STATUS_TIMEDOUT 4
+#define STATUS_NOMEM 5
+#define STATUS_TRANS_SHORT 6
+#define STATUS_TRANS_LONG 7
+#define STATUS_STALLED 8
+#define STATUS_ERROR 10
+
+#define IDLE_MAX_COUNT 10
+#define POLLING_WAIT_CNT 1
+#define DELINK_DELAY 100
+#define LED_TOGGLE_INTERVAL 6
+#define LED_GPIO 0
+
+/* package */
+#define QFN24 0
+#define LQFP48 1
+
+#define USB_11 0
+#define USB_20 1
+
+/*
+ * Transport return codes
+ */
+/* Transport good, command good */
+#define TRANSPORT_GOOD 0
+/* Transport good, command failed */
+#define TRANSPORT_FAILED 1
+/* Command failed, no auto-sense */
+#define TRANSPORT_NO_SENSE 2
+/* Transport bad (i.e. device dead) */
+#define TRANSPORT_ERROR 3
+
+/* Supported Clock */
+enum card_clock { CLK_20 = 1, CLK_30, CLK_40, CLK_50, CLK_60, CLK_80, CLK_100 };
+
+#ifdef _MSG_TRACE
+
+#define TRACE_ITEM_CNT 64
+
+struct trace_msg_t {
+ u16 line;
+#define MSG_FUNC_LEN 64
+ char func[MSG_FUNC_LEN];
+#define MSG_FILE_LEN 32
+ char file[MSG_FILE_LEN];
+#define TIME_VAL_LEN 16
+ u8 timeval_buf[TIME_VAL_LEN];
+ u8 valid;
+};
+
+#endif /* _MSG_TRACE */
+
+/* Size of the autosense data buffer */
+#define SENSE_SIZE 18
+
+/* Sense type */
+#define SENSE_TYPE_NO_SENSE 0
+#define SENSE_TYPE_MEDIA_CHANGE 1
+#define SENSE_TYPE_MEDIA_NOT_PRESENT 2
+#define SENSE_TYPE_MEDIA_LBA_OVER_RANGE 3
+#define SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT 4
+#define SENSE_TYPE_MEDIA_WRITE_PROTECT 5
+#define SENSE_TYPE_MEDIA_INVALID_CMD_FIELD 6
+#define SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR 7
+#define SENSE_TYPE_MEDIA_WRITE_ERR 8
+#define SENSE_TYPE_FORMAT_IN_PROGRESS 9
+#define SENSE_TYPE_FORMAT_CMD_FAILED 10
+#ifdef SUPPORT_MAGIC_GATE
+/* COPY PROTECTION KEY EXCHANGE FAILURE - KEY NOT ESTABLISHED */
+#define SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB 0x0b
+/* COPY PROTECTION KEY EXCHANGE FAILURE - AUTHENTICATION FAILURE */
+#define SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN 0x0c
+/* INCOMPATIBLE MEDIUM INSTALLED */
+#define SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM 0x0d
+/* WRITE ERROR */
+#define SENSE_TYPE_MG_WRITE_ERR 0x0e
+#endif
+#ifdef SUPPORT_SD_LOCK
+/* FOR Locked SD card */
+#define SENSE_TYPE_MEDIA_READ_FORBIDDEN 0x10
+#endif
+
+/*---- sense key ----*/
+#define ILI 0x20 /* ILI bit is on */
+
+#define NO_SENSE 0x00 /* not exist sense key */
+#define RECOVER_ERR 0x01 /* Target/Logical unit is recoverd */
+#define NOT_READY 0x02 /* Logical unit is not ready */
+#define MEDIA_ERR 0x03 /* medium/data error */
+#define HARDWARE_ERR 0x04 /* hardware error */
+#define ILGAL_REQ 0x05 /* CDB/parameter/identify msg error */
+#define UNIT_ATTENTION 0x06 /* unit attention condition occur */
+#define DAT_PRTCT 0x07 /* read/write is desable */
+#define BLNC_CHK 0x08 /* find blank/DOF in read */
+ /* write to unblank area */
+#define CPY_ABRT 0x0a /* Copy/Compare/Copy&Verify illgal */
+#define ABRT_CMD 0x0b /* Target make the command in error */
+#define EQUAL 0x0c /* Search Data end with Equal */
+#define VLM_OVRFLW 0x0d /* Some data are left in buffer */
+#define MISCMP 0x0e /* find inequality */
+
+/*-----------------------------------
+ SENSE_DATA
+-----------------------------------*/
+/*---- valid ----*/
+#define SENSE_VALID 0x80 /* Sense data is valid as SCSI2 */
+#define SENSE_INVALID 0x00 /* Sense data is invalid as SCSI2 */
+
+/*---- error code ----*/
+#define CUR_ERR 0x70 /* current error */
+#define DEF_ERR 0x71 /* specific command error */
+
+/*---- sense key Infomation ----*/
+#define SNSKEYINFO_LEN 3 /* length of sense key infomation */
+
+#define SKSV 0x80
+#define CDB_ILLEGAL 0x40
+#define DAT_ILLEGAL 0x00
+#define BPV 0x08
+#define BIT_ILLEGAL0 0 /* bit0 is illegal */
+#define BIT_ILLEGAL1 1 /* bit1 is illegal */
+#define BIT_ILLEGAL2 2 /* bit2 is illegal */
+#define BIT_ILLEGAL3 3 /* bit3 is illegal */
+#define BIT_ILLEGAL4 4 /* bit4 is illegal */
+#define BIT_ILLEGAL5 5 /* bit5 is illegal */
+#define BIT_ILLEGAL6 6 /* bit6 is illegal */
+#define BIT_ILLEGAL7 7 /* bit7 is illegal */
+
+/*---- ASC ----*/
+#define ASC_NO_INFO 0x00
+#define ASC_MISCMP 0x1d
+#define ASC_INVLD_CDB 0x24
+#define ASC_INVLD_PARA 0x26
+#define ASC_LU_NOT_READY 0x04
+#define ASC_WRITE_ERR 0x0c
+#define ASC_READ_ERR 0x11
+#define ASC_LOAD_EJCT_ERR 0x53
+#define ASC_MEDIA_NOT_PRESENT 0x3A
+#define ASC_MEDIA_CHANGED 0x28
+#define ASC_MEDIA_IN_PROCESS 0x04
+#define ASC_WRITE_PROTECT 0x27
+#define ASC_LUN_NOT_SUPPORTED 0x25
+
+/*---- ASQC ----*/
+#define ASCQ_NO_INFO 0x00
+#define ASCQ_MEDIA_IN_PROCESS 0x01
+#define ASCQ_MISCMP 0x00
+#define ASCQ_INVLD_CDB 0x00
+#define ASCQ_INVLD_PARA 0x02
+#define ASCQ_LU_NOT_READY 0x02
+#define ASCQ_WRITE_ERR 0x02
+#define ASCQ_READ_ERR 0x00
+#define ASCQ_LOAD_EJCT_ERR 0x00
+#define ASCQ_WRITE_PROTECT 0x00
+
+struct sense_data_t {
+ unsigned char err_code; /* error code */
+ /* bit7 : valid */
+ /* (1 : SCSI2) */
+ /* (0 : Vendor specific) */
+ /* bit6-0 : error code */
+ /* (0x70 : current error) */
+ /* (0x71 : specific command error) */
+ unsigned char seg_no; /* segment No. */
+ unsigned char sense_key; /* byte5 : ILI */
+ /* bit3-0 : sense key */
+ unsigned char info[4]; /* infomation */
+ unsigned char ad_sense_len; /* additional sense data length */
+ unsigned char cmd_info[4]; /* command specific infomation */
+ unsigned char asc; /* ASC */
+ unsigned char ascq; /* ASCQ */
+ unsigned char rfu; /* FRU */
+ unsigned char sns_key_info[3]; /* sense key specific infomation */
+};
+
+/* sd_ctl bit map */
+/* SD push point control, bit 0, 1 */
+#define SD_PUSH_POINT_CTL_MASK 0x03
+#define SD_PUSH_POINT_DELAY 0x01
+#define SD_PUSH_POINT_AUTO 0x02
+/* SD sample point control, bit 2, 3 */
+#define SD_SAMPLE_POINT_CTL_MASK 0x0C
+#define SD_SAMPLE_POINT_DELAY 0x04
+#define SD_SAMPLE_POINT_AUTO 0x08
+/* SD DDR Tx phase set by user, bit 4 */
+#define SD_DDR_TX_PHASE_SET_BY_USER 0x10
+/* MMC DDR Tx phase set by user, bit 5 */
+#define MMC_DDR_TX_PHASE_SET_BY_USER 0x20
+/* Support MMC DDR mode, bit 6 */
+/*#define SUPPORT_MMC_DDR_MODE 0x40 */
+#define SUPPORT_UHS50_MMC44 0x40
+
+struct rts51x_option {
+ u8 led_blink_speed;
+
+ int mspro_formatter_enable;
+
+ /* card clock expected by user for fpga platform */
+ int fpga_sd_sdr104_clk;
+ int fpga_sd_ddr50_clk;
+ int fpga_sd_sdr50_clk;
+ int fpga_sd_hs_clk;
+ int fpga_mmc_52m_clk;
+ int fpga_ms_hg_clk;
+ int fpga_ms_4bit_clk;
+
+ /* card clock expected by user for asic platform */
+ int asic_sd_sdr104_clk;
+ int asic_sd_ddr50_clk;
+ int asic_sd_sdr50_clk;
+ int asic_sd_hs_clk;
+ int asic_mmc_52m_clk;
+ int asic_ms_hg_clk;
+ int asic_ms_4bit_clk;
+
+ u8 ssc_depth_sd_sdr104; /* sw */
+ u8 ssc_depth_sd_ddr50; /* sw */
+ u8 ssc_depth_sd_sdr50; /* sw */
+ u8 ssc_depth_sd_hs; /* sw */
+ u8 ssc_depth_mmc_52m; /* sw */
+ u8 ssc_depth_ms_hg; /* sw */
+ u8 ssc_depth_ms_4bit; /* sw */
+ u8 ssc_depth_low_speed; /* sw */
+
+ /* SD/MMC Tx phase */
+ int sd_ddr_tx_phase; /* Enabled by bit 4 of sd_ctl */
+ int mmc_ddr_tx_phase; /* Enabled by bit 5 of sd_ctl */
+
+ /* priority of choosing sd speed funciton */
+ u32 sd_speed_prior;
+
+ /* sd card control */
+ u32 sd_ctl;
+
+ /* Enable Selective Suspend */
+ int ss_en;
+ /* Interval to enter SS from IDLE state (second) */
+ int ss_delay;
+ int needs_remote_wakeup;
+ u8 ww_enable; /* sangdy2010-08-03:add for remote wakeup */
+
+ /* Enable SSC clock */
+ int ssc_en;
+
+ int auto_delink_en;
+
+ /* sangdy2010-07-13:add FT2 fast mode */
+ int FT2_fast_mode;
+ /* sangdy2010-07-15:
+ * add for config delay between 1/4 PMOS and 3/4 PMOS */
+ int pwr_delay;
+
+ int xd_rw_step; /* add to tune xd tRP */
+ int D3318_off_delay; /* add to tune D3318 off delay time */
+ int delink_delay; /* add to tune delink delay time */
+ /* add for rts5129 to enable/disable D3318 off */
+ u8 rts5129_D3318_off_enable;
+ u8 sd20_pad_drive; /* add to config SD20 PAD drive */
+ u8 sd30_pad_drive; /* add to config SD30 pad drive */
+ /*if reset or rw fail,then set SD20 pad drive again */
+ u8 reset_or_rw_fail_set_pad_drive;
+
+ u8 rcc_fail_flag; /* add to indicate whether rcc bug happen */
+ u8 rcc_bug_fix_en; /* if set,then support fixing rcc bug */
+ u8 debounce_num; /* debounce number */
+ int polling_time; /* polling delay time */
+ u8 led_toggle_interval; /* used to control led toggle speed */
+ int xd_rwn_step;
+ u8 sd_send_status_en;
+ /* used to store default phase which is
+ * used when phase tune all pass. */
+ u8 ddr50_tx_phase;
+ u8 ddr50_rx_phase;
+ u8 sdr50_tx_phase;
+ u8 sdr50_rx_phase;
+ /* used to enable select sdr50 tx phase according to proportion. */
+ u8 sdr50_phase_sel;
+ u8 ms_errreg_fix;
+ u8 reset_mmc_first;
+ u8 speed_mmc; /* when set, then try CMD55 only twice */
+ u8 led_always_on; /* if set, then led always on when card exist */
+ u8 dv18_voltage; /* add to tune dv18 voltage */
+};
+
+#define MS_FORMATTER_ENABLED(chip) ((chip)->option.mspro_formatter_enable)
+
+struct rts51x_chip;
+
+typedef int (*card_rw_func) (struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
+
+/* For MS Card */
+#define MAX_DEFECTIVE_BLOCK 10
+
+struct zone_entry {
+ u16 *l2p_table;
+ u16 *free_table;
+ u16 defect_list[MAX_DEFECTIVE_BLOCK]; /* For MS card only */
+ int set_index;
+ int get_index;
+ int unused_blk_cnt;
+ int disable_count;
+ /* To indicate whether the L2P table of this zone has been built. */
+ int build_flag;
+};
+
+struct xd_delay_write_tag {
+ u32 old_phyblock;
+ u32 new_phyblock;
+ u32 logblock;
+ u8 pageoff;
+ u8 delay_write_flag;
+};
+
+struct xd_info {
+ u8 maker_code;
+ u8 device_code;
+ u8 block_shift;
+ u8 page_off;
+ u8 addr_cycle;
+ u16 cis_block;
+ u8 multi_flag;
+ u8 err_code;
+ u32 capacity;
+
+ struct zone_entry *zone;
+ int zone_cnt;
+
+ struct xd_delay_write_tag delay_write;
+
+ int counter;
+
+ int xd_clock;
+};
+
+#define TYPE_SD 0x0000
+#define TYPE_MMC 0x0001
+
+/* TYPE_SD */
+#define SD_HS 0x0100
+#define SD_SDR50 0x0200
+#define SD_DDR50 0x0400
+#define SD_SDR104 0x0800
+#define SD_HCXC 0x1000
+
+/* TYPE_MMC */
+#define MMC_26M 0x0100
+#define MMC_52M 0x0200
+#define MMC_4BIT 0x0400
+#define MMC_8BIT 0x0800
+#define MMC_SECTOR_MODE 0x1000
+#define MMC_DDR52 0x2000
+
+/* SD card */
+#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD)
+#define CHK_SD_HS(sd_card) \
+ (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS))
+#define CHK_SD_SDR50(sd_card) \
+ (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50))
+#define CHK_SD_DDR50(sd_card) \
+ (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50))
+#define CHK_SD_SDR104(sd_card) \
+ (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104))
+#define CHK_SD_HCXC(sd_card) \
+ (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC))
+#define CHK_SD30_SPEED(sd_card) \
+ (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) ||\
+ CHK_SD_SDR104(sd_card))
+
+#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD)
+#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS)
+#define SET_SD_SDR50(sd_card) ((sd_card)->sd_type |= SD_SDR50)
+#define SET_SD_DDR50(sd_card) ((sd_card)->sd_type |= SD_DDR50)
+#define SET_SD_SDR104(sd_card) ((sd_card)->sd_type |= SD_SDR104)
+#define SET_SD_HCXC(sd_card) ((sd_card)->sd_type |= SD_HCXC)
+
+#define CLR_SD_HS(sd_card) ((sd_card)->sd_type &= ~SD_HS)
+#define CLR_SD_SDR50(sd_card) ((sd_card)->sd_type &= ~SD_SDR50)
+#define CLR_SD_DDR50(sd_card) ((sd_card)->sd_type &= ~SD_DDR50)
+#define CLR_SD_SDR104(sd_card) ((sd_card)->sd_type &= ~SD_SDR104)
+#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC)
+#define CLR_SD30_SPEED(sd_card) \
+ ((sd_card)->sd_type &= ~(SD_SDR50|SD_DDR50|SD_SDR104))
+
+/* MMC card */
+#define CHK_MMC(sd_card) \
+ (((sd_card)->sd_type & 0xFF) == TYPE_MMC)
+#define CHK_MMC_26M(sd_card) \
+ (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M))
+#define CHK_MMC_52M(sd_card) \
+ (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M))
+#define CHK_MMC_4BIT(sd_card) \
+ (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT))
+#define CHK_MMC_8BIT(sd_card) \
+ (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT))
+#define CHK_MMC_SECTOR_MODE(sd_card)\
+ (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE))
+#define CHK_MMC_DDR52(sd_card) \
+ (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52))
+
+#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC)
+#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M)
+#define SET_MMC_52M(sd_card) ((sd_card)->sd_type |= MMC_52M)
+#define SET_MMC_4BIT(sd_card) ((sd_card)->sd_type |= MMC_4BIT)
+#define SET_MMC_8BIT(sd_card) ((sd_card)->sd_type |= MMC_8BIT)
+#define SET_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type |= MMC_SECTOR_MODE)
+#define SET_MMC_DDR52(sd_card) ((sd_card)->sd_type |= MMC_DDR52)
+
+#define CLR_MMC_26M(sd_card) ((sd_card)->sd_type &= ~MMC_26M)
+#define CLR_MMC_52M(sd_card) ((sd_card)->sd_type &= ~MMC_52M)
+#define CLR_MMC_4BIT(sd_card) ((sd_card)->sd_type &= ~MMC_4BIT)
+#define CLR_MMC_8BIT(sd_card) ((sd_card)->sd_type &= ~MMC_8BIT)
+#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
+#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52)
+
+#define CHK_MMC_HS(sd_card) \
+ (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card))
+#define CLR_MMC_HS(sd_card) \
+do { \
+ CLR_MMC_DDR52(sd_card); \
+ CLR_MMC_52M(sd_card); \
+ CLR_MMC_26M(sd_card); \
+} while (0)
+
+#define SD_SUPPORT_CLASS_TEN 0x01
+#define SD_SUPPORT_1V8 0x02
+
+#define SD_SET_CLASS_TEN(sd_card) \
+ ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN)
+#define SD_CHK_CLASS_TEN(sd_card) \
+ ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN)
+#define SD_CLR_CLASS_TEN(sd_card) \
+ ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN)
+#define SD_SET_1V8(sd_card) \
+ ((sd_card)->sd_setting |= SD_SUPPORT_1V8)
+#define SD_CHK_1V8(sd_card) \
+ ((sd_card)->sd_setting & SD_SUPPORT_1V8)
+#define SD_CLR_1V8(sd_card) \
+ ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8)
+#define CLR_RETRY_SD20_MODE(sd_card) \
+ ((sd_card)->retry_SD20_mode = 0)
+#define SET_RETRY_SD20_MODE(sd_card) \
+ ((sd_card)->retry_SD20_mode = 1)
+#define CHK_RETRY_SD20_MODE(sd_card) \
+ ((sd_card)->retry_SD20_mode == 1)
+
+struct sd_info {
+ u16 sd_type;
+ u8 err_code;
+ u8 sd_data_buf_ready;
+ u32 sd_addr;
+ u32 capacity;
+
+ u8 raw_csd[16];
+ u8 raw_scr[8];
+
+ /* Sequential RW */
+ int seq_mode;
+ enum dma_data_direction pre_dir;
+ u32 pre_sec_addr;
+ u16 pre_sec_cnt;
+
+ int counter;
+
+ int sd_clock;
+
+#ifdef SUPPORT_CPRM
+ int sd_pass_thru_en;
+ int pre_cmd_err;
+ u8 last_rsp_type;
+ u8 rsp[17];
+#endif
+
+ u8 func_group1_mask;
+ u8 func_group2_mask;
+ u8 func_group3_mask;
+ u8 func_group4_mask;
+
+ u8 sd_switch_fail;
+ u8 sd_read_phase;
+ u8 retry_SD20_mode; /* sangdy2010-06-10 */
+ u8 sd_reset_fail; /* sangdy2010-07-01 */
+ u8 sd_send_status_en;
+
+#ifdef SUPPORT_SD_LOCK
+ u8 sd_lock_status;
+ u8 sd_erase_status;
+ u8 sd_lock_notify;
+#endif
+};
+
+#define MODE_512_SEQ 0x01
+#define MODE_2K_SEQ 0x02
+
+#define TYPE_MS 0x0000
+#define TYPE_MSPRO 0x0001
+
+#define MS_4BIT 0x0100
+#define MS_8BIT 0x0200
+#define MS_HG 0x0400
+#define MS_XC 0x0800
+
+#define HG8BIT (MS_HG | MS_8BIT)
+
+#define CHK_MSPRO(ms_card) \
+ (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
+#define CHK_HG8BIT(ms_card) \
+ (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT))
+#define CHK_MSXC(ms_card) \
+ (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC))
+#define CHK_MSHG(ms_card) \
+ (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG))
+
+#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT))
+#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT))
+
+struct ms_delay_write_tag {
+ u16 old_phyblock;
+ u16 new_phyblock;
+ u16 logblock;
+ u8 pageoff;
+ u8 delay_write_flag;
+};
+
+struct ms_info {
+ u16 ms_type;
+ u8 block_shift;
+ u8 page_off;
+ u16 total_block;
+ u16 boot_block;
+ u32 capacity;
+
+ u8 check_ms_flow;
+ u8 switch_8bit_fail;
+ u8 err_code;
+
+ struct zone_entry *segment;
+ int segment_cnt;
+
+ int pro_under_formatting;
+ int format_status;
+ u16 progress;
+ u8 raw_sys_info[96];
+#ifdef SUPPORT_PCGL_1P18
+ u8 raw_model_name[48];
+#endif
+
+ u8 multi_flag;
+
+ /* Sequential RW */
+ u8 seq_mode;
+ enum dma_data_direction pre_dir;
+ u32 pre_sec_addr;
+ u16 pre_sec_cnt;
+ u32 total_sec_cnt;
+ u8 last_rw_int;
+
+ struct ms_delay_write_tag delay_write;
+
+ int counter;
+
+ int ms_clock;
+
+#ifdef SUPPORT_MAGIC_GATE
+ u8 magic_gate_id[16];
+ u8 mg_entry_num;
+ int mg_auth; /* flag to indicate authentication process */
+#endif
+};
+
+#define PRO_UNDER_FORMATTING(ms_card) \
+ ((ms_card)->pro_under_formatting)
+#define SET_FORMAT_STATUS(ms_card, status) \
+ ((ms_card)->format_status = (status))
+#define CHK_FORMAT_STATUS(ms_card, status) \
+ ((ms_card)->format_status == (status))
+
+struct scsi_cmnd;
+
+enum CHIP_STAT { STAT_INIT, STAT_IDLE, STAT_RUN, STAT_SS_PRE, STAT_SS,
+ STAT_SUSPEND };
+
+struct rts51x_chip {
+ u16 vendor_id;
+ u16 product_id;
+ char max_lun;
+
+ struct scsi_cmnd *srb;
+ struct sense_data_t sense_buffer[MAX_ALLOWED_LUN_CNT];
+
+#ifndef LED_AUTO_BLINK
+ int led_toggle_counter;
+#endif
+ int ss_counter;
+ int idle_counter;
+ int auto_delink_counter;
+ enum CHIP_STAT chip_stat;
+
+ int resume_from_scsi;
+
+ /* Card information */
+ struct xd_info xd_card;
+ struct sd_info sd_card;
+ struct ms_info ms_card;
+
+ int cur_clk; /* current card clock */
+ int cur_card; /* Current card module */
+
+ u8 card_exist; /* card exist bit map (physical exist) */
+ u8 card_ready; /* card ready bit map (reset successfully) */
+ u8 card_fail; /* card reset fail bit map */
+ u8 card_ejected; /* card ejected bit map */
+ u8 card_wp; /* card write protected bit map */
+
+ u8 fake_card_ready;
+ /* flag to indicate whether to answer MediaChange */
+ unsigned long lun_mc;
+
+ /* card bus width */
+ u8 card_bus_width[MAX_ALLOWED_LUN_CNT];
+ /* card capacity */
+ u32 capacity[MAX_ALLOWED_LUN_CNT];
+
+ /* read/write card function pointer */
+ card_rw_func rw_card[MAX_ALLOWED_LUN_CNT];
+ /* read/write capacity, used for GPIO Toggle */
+ u32 rw_cap[MAX_ALLOWED_LUN_CNT];
+ /* card to lun mapping table */
+ u8 card2lun[32];
+ /* lun to card mapping table */
+ u8 lun2card[MAX_ALLOWED_LUN_CNT];
+
+#ifdef _MSG_TRACE
+ struct trace_msg_t trace_msg[TRACE_ITEM_CNT];
+ int msg_idx;
+#endif
+
+ int rw_need_retry;
+
+ /* ASIC or FPGA */
+ int asic_code;
+
+ /* QFN24 or LQFP48 */
+ int package;
+
+ /* Full Speed or High Speed */
+ int usb_speed;
+
+ /*sangdy:enable or disable UHS50 and MMC4.4 */
+ int uhs50_mmc44_en;
+
+ u8 ic_version;
+
+ /* Command buffer */
+ u8 *cmd_buf;
+ unsigned int cmd_idx;
+ /* Response buffer */
+ u8 *rsp_buf;
+
+ u16 card_status;
+
+#ifdef SUPPORT_OCP
+ u16 ocp_stat;
+#endif
+
+ struct rts51x_option option;
+ struct rts51x_usb *usb;
+
+ u8 rcc_read_response;
+ int reset_need_retry;
+ u8 rts5179;
+};
+
+#define UHS50_EN 0x0001
+#define UHS50_DIS 0x0000
+#define SET_UHS50(chip) ((chip)->uhs50_mmc44_en = UHS50_EN)
+#define CLEAR_UHS50(chip) ((chip)->uhs50_mmc44_en = UHS50_DIS)
+#define CHECK_UHS50(chip) (((chip)->uhs50_mmc44_en&0xff) == UHS50_EN)
+
+#define RTS51X_GET_VID(chip) ((chip)->vendor_id)
+#define RTS51X_GET_PID(chip) ((chip)->product_id)
+
+#define RTS51X_SET_STAT(chip, stat) \
+do { \
+ if ((stat) != STAT_IDLE) { \
+ (chip)->idle_counter = 0; \
+ } \
+ (chip)->chip_stat = (enum CHIP_STAT)(stat); \
+} while (0)
+#define RTS51X_CHK_STAT(chip, stat) ((chip)->chip_stat == (stat))
+#define RTS51X_GET_STAT(chip) ((chip)->chip_stat)
+
+#define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid))
+#define CHECK_PKG(chip, pkg) ((chip)->package == (pkg))
+#define CHECK_USB(chip, speed) ((chip)->usb_speed == (speed))
+
+int rts51x_reset_chip(struct rts51x_chip *chip);
+int rts51x_init_chip(struct rts51x_chip *chip);
+int rts51x_release_chip(struct rts51x_chip *chip);
+void rts51x_polling_func(struct rts51x_chip *chip);
+
+static inline void rts51x_init_cmd(struct rts51x_chip *chip)
+{
+ chip->cmd_idx = 0;
+ chip->cmd_buf[0] = 'R';
+ chip->cmd_buf[1] = 'T';
+ chip->cmd_buf[2] = 'C';
+ chip->cmd_buf[3] = 'R';
+ chip->cmd_buf[PACKET_TYPE] = BATCH_CMD;
+}
+
+void rts51x_add_cmd(struct rts51x_chip *chip,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+int rts51x_send_cmd(struct rts51x_chip *chip, u8 flag, int timeout);
+int rts51x_get_rsp(struct rts51x_chip *chip, int rsp_len, int timeout);
+
+static inline void rts51x_read_rsp_buf(struct rts51x_chip *chip, int offset,
+ u8 *buf, int buf_len)
+{
+ memcpy(buf, chip->rsp_buf + offset, buf_len);
+}
+
+static inline u8 *rts51x_get_rsp_data(struct rts51x_chip *chip)
+{
+ return chip->rsp_buf;
+}
+
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data);
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
+ u8 data);
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len,
+ u8 *data);
+int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
+ u8 *data);
+int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len);
+int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len);
+int rts51x_write_phy_register(struct rts51x_chip *chip, u8 addr, u8 val);
+int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 *val);
+void rts51x_do_before_power_down(struct rts51x_chip *chip);
+void rts51x_clear_hw_error(struct rts51x_chip *chip);
+void rts51x_prepare_run(struct rts51x_chip *chip);
+void rts51x_trace_msg(struct rts51x_chip *chip, unsigned char *buf, int clear);
+void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 *status,
+ u8 status_len);
+void rts51x_read_status(struct rts51x_chip *chip, unsigned int lun,
+ u8 *rts51x_status, u8 status_len);
+int rts51x_transfer_data_rcc(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout, u8 stage_flag);
+
+#define RTS51X_WRITE_REG(chip, addr, mask, data) \
+do { \
+ int _retval = rts51x_write_register((chip), \
+ (addr), (mask), (data)); \
+ if (_retval != STATUS_SUCCESS) { \
+ TRACE_RET((chip), _retval); \
+ } \
+} while (0)
+
+#define RTS51X_READ_REG(chip, addr, data) \
+do { \
+ int _retval = rts51x_read_register((chip), \
+ (addr), (data)); \
+ if (_retval != STATUS_SUCCESS) { \
+ TRACE_RET((chip), _retval); \
+ } \
+} while (0)
+
+#endif /* __RTS51X_CHIP_H */
diff --git a/drivers/staging/rts5139/rts51x_fop.c b/drivers/staging/rts5139/rts51x_fop.c
new file mode 100644
index 00000000000..6eaebb6223c
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_fop.c
@@ -0,0 +1,298 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include "rts51x.h"
+
+#ifdef SUPPORT_FILE_OP
+
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+
+#include "rts51x_chip.h"
+#include "rts51x_card.h"
+#include "rts51x_fop.h"
+#include "sd_cprm.h"
+#include "rts51x.h"
+
+#define RTS5139_IOC_MAGIC 0x39
+
+#define RTS5139_IOC_SD_DIRECT _IOWR(RTS5139_IOC_MAGIC, 0xA0, int)
+#define RTS5139_IOC_SD_GET_RSP _IOWR(RTS5139_IOC_MAGIC, 0xA1, int)
+
+static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip,
+ struct sd_direct_cmnd *cmnd)
+{
+ int retval;
+ u8 dir, cmd12, standby, acmd, cmd_idx, rsp_code;
+ u8 *buf;
+ u32 arg, len;
+
+ dir = (cmnd->cmnd[0] >> 3) & 0x03;
+ cmd12 = (cmnd->cmnd[0] >> 2) & 0x01;
+ standby = (cmnd->cmnd[0] >> 1) & 0x01;
+ acmd = cmnd->cmnd[0] & 0x01;
+ cmd_idx = cmnd->cmnd[1];
+ arg = ((u32) (cmnd->cmnd[2]) << 24) | ((u32) (cmnd->cmnd[3]) << 16) |
+ ((u32) (cmnd->cmnd[4]) << 8) | cmnd->cmnd[5];
+ len =
+ ((u32) (cmnd->cmnd[6]) << 16) | ((u32) (cmnd->cmnd[7]) << 8) |
+ cmnd->cmnd[8];
+ rsp_code = cmnd->cmnd[9];
+
+ if (dir) {
+ if (!cmnd->buf || (cmnd->buf_len < len))
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ switch (dir) {
+ case 0:
+ /* No data */
+ retval = ext_sd_execute_no_data(chip, chip->card2lun[SD_CARD],
+ cmd_idx, standby, acmd,
+ rsp_code, arg);
+ if (retval != TRANSPORT_GOOD)
+ TRACE_RET(chip, STATUS_FAIL);
+ break;
+
+ case 1:
+ /* Read from card */
+ buf = kmalloc(cmnd->buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ retval = ext_sd_execute_read_data(chip, chip->card2lun[SD_CARD],
+ cmd_idx, cmd12, standby, acmd,
+ rsp_code, arg, len, buf,
+ cmnd->buf_len, 0);
+ if (retval != TRANSPORT_GOOD) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval =
+ copy_to_user((void *)cmnd->buf, (void *)buf, cmnd->buf_len);
+ if (retval) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_NOMEM);
+ }
+
+ kfree(buf);
+ break;
+
+ case 2:
+ /* Write to card */
+ buf = kmalloc(cmnd->buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ retval =
+ copy_from_user((void *)buf, (void *)cmnd->buf,
+ cmnd->buf_len);
+ if (retval) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_NOMEM);
+ }
+
+ retval =
+ ext_sd_execute_write_data(chip, chip->card2lun[SD_CARD],
+ cmd_idx, cmd12, standby, acmd,
+ rsp_code, arg, len, buf,
+ cmnd->buf_len, 0);
+ if (retval != TRANSPORT_GOOD) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ kfree(buf);
+
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int rts51x_sd_get_rsp(struct rts51x_chip *chip, struct sd_rsp *rsp)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int count = 0, retval;
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (sd_card->last_rsp_type == SD_RSP_TYPE_R0)
+ TRACE_RET(chip, STATUS_FAIL);
+ else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2)
+ count = (rsp->rsp_len < 17) ? rsp->rsp_len : 17;
+ else
+ count = (rsp->rsp_len < 6) ? rsp->rsp_len : 6;
+
+ retval = copy_to_user((void *)rsp->rsp, (void *)sd_card->rsp, count);
+ if (retval)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ RTS51X_DEBUGP("Response length: %d\n", count);
+ RTS51X_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n",
+ sd_card->rsp[0], sd_card->rsp[1], sd_card->rsp[2],
+ sd_card->rsp[3]);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_open(struct inode *inode, struct file *filp)
+{
+ struct rts51x_chip *chip;
+ struct usb_interface *interface;
+ int subminor;
+ int retval = 0;
+
+ subminor = iminor(inode);
+
+ interface = usb_find_interface(&rts51x_driver, subminor);
+ if (!interface) {
+ RTS51X_DEBUGP("%s - error, can't find device for minor %d\n",
+ __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ chip = (struct rts51x_chip *)usb_get_intfdata(interface);
+ if (!chip) {
+ RTS51X_DEBUGP("Can't find chip\n");
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ /* Increase our reference to the host */
+ scsi_host_get(rts51x_to_host(chip));
+
+ /* lock the device pointers */
+ mutex_lock(&(chip->usb->dev_mutex));
+
+ /* save our object in the file's private structure */
+ filp->private_data = chip;
+
+ /* unlock the device pointers */
+ mutex_unlock(&chip->usb->dev_mutex);
+
+exit:
+ return retval;
+}
+
+int rts51x_release(struct inode *inode, struct file *filp)
+{
+ struct rts51x_chip *chip;
+
+ chip = (struct rts51x_chip *)filp->private_data;
+ if (chip == NULL)
+ return -ENODEV;
+
+ /* Drop our reference to the host; the SCSI core will free it
+ * (and "chip" along with it) when the refcount becomes 0. */
+ scsi_host_put(rts51x_to_host(chip));
+
+ return 0;
+}
+
+ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ return 0;
+}
+
+ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ return 0;
+}
+
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
+int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+#else
+long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ struct rts51x_chip *chip;
+ struct sd_direct_cmnd cmnd;
+ struct sd_rsp rsp;
+ int retval = 0;
+
+ chip = (struct rts51x_chip *)filp->private_data;
+ if (chip == NULL)
+ return -ENODEV;
+
+ /* lock the device pointers */
+ mutex_lock(&(chip->usb->dev_mutex));
+
+ switch (cmd) {
+ case RTS5139_IOC_SD_DIRECT:
+ retval =
+ copy_from_user((void *)&cmnd, (void *)arg,
+ sizeof(struct sd_direct_cmnd));
+ if (retval) {
+ retval = -ENOMEM;
+ TRACE_GOTO(chip, exit);
+ }
+ retval = rts51x_sd_direct_cmnd(chip, &cmnd);
+ if (retval != STATUS_SUCCESS) {
+ retval = -EIO;
+ TRACE_GOTO(chip, exit);
+ }
+ break;
+
+ case RTS5139_IOC_SD_GET_RSP:
+ retval =
+ copy_from_user((void *)&rsp, (void *)arg,
+ sizeof(struct sd_rsp));
+ if (retval) {
+ retval = -ENOMEM;
+ TRACE_GOTO(chip, exit);
+ }
+ retval = rts51x_sd_get_rsp(chip, &rsp);
+ if (retval != STATUS_SUCCESS) {
+ retval = -EIO;
+ TRACE_GOTO(chip, exit);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+exit:
+ /* unlock the device pointers */
+ mutex_unlock(&chip->usb->dev_mutex);
+
+ return retval;
+}
+
+#endif
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
new file mode 100644
index 00000000000..0453f57d1a8
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -0,0 +1,62 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_FOP_H
+#define __RTS51X_FOP_H
+
+#include "rts51x.h"
+
+#ifdef SUPPORT_FILE_OP
+
+#include <linux/fs.h>
+#include <linux/types.h>
+
+struct sd_direct_cmnd {
+ u8 cmnd[12];
+ void *buf;
+ int buf_len;
+};
+
+struct sd_rsp {
+ void *rsp;
+ int rsp_len;
+};
+
+int rts51x_open(struct inode *inode, struct file *filp);
+int rts51x_release(struct inode *inode, struct file *filp);
+ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos);
+ssize_t rts51x_write(struct file *filp, const char __user * buf, size_t count,
+ loff_t *f_pos);
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
+int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+#endif
+
+#endif /* __RTS51X_FOP_H */
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
new file mode 100644
index 00000000000..3b32f9e6e4f
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -0,0 +1,2234 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+
+#include "debug.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "rts51x_transport.h"
+#include "rts51x_sys.h"
+#include "sd_cprm.h"
+#include "ms_mg.h"
+#include "trace.h"
+
+void scsi_show_command(struct scsi_cmnd *srb)
+{
+ char *what = NULL;
+ int i, unknown_cmd = 0;
+
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ what = (char *)"TEST_UNIT_READY";
+ break;
+ case REZERO_UNIT:
+ what = (char *)"REZERO_UNIT";
+ break;
+ case REQUEST_SENSE:
+ what = (char *)"REQUEST_SENSE";
+ break;
+ case FORMAT_UNIT:
+ what = (char *)"FORMAT_UNIT";
+ break;
+ case READ_BLOCK_LIMITS:
+ what = (char *)"READ_BLOCK_LIMITS";
+ break;
+ case 0x07:
+ what = (char *)"REASSIGN_BLOCKS";
+ break;
+ case READ_6:
+ what = (char *)"READ_6";
+ break;
+ case WRITE_6:
+ what = (char *)"WRITE_6";
+ break;
+ case SEEK_6:
+ what = (char *)"SEEK_6";
+ break;
+ case READ_REVERSE:
+ what = (char *)"READ_REVERSE";
+ break;
+ case WRITE_FILEMARKS:
+ what = (char *)"WRITE_FILEMARKS";
+ break;
+ case SPACE:
+ what = (char *)"SPACE";
+ break;
+ case INQUIRY:
+ what = (char *)"INQUIRY";
+ break;
+ case RECOVER_BUFFERED_DATA:
+ what = (char *)"RECOVER_BUFFERED_DATA";
+ break;
+ case MODE_SELECT:
+ what = (char *)"MODE_SELECT";
+ break;
+ case RESERVE:
+ what = (char *)"RESERVE";
+ break;
+ case RELEASE:
+ what = (char *)"RELEASE";
+ break;
+ case COPY:
+ what = (char *)"COPY";
+ break;
+ case ERASE:
+ what = (char *)"ERASE";
+ break;
+ case MODE_SENSE:
+ what = (char *)"MODE_SENSE";
+ break;
+ case START_STOP:
+ what = (char *)"START_STOP";
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ what = (char *)"RECEIVE_DIAGNOSTIC";
+ break;
+ case SEND_DIAGNOSTIC:
+ what = (char *)"SEND_DIAGNOSTIC";
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ what = (char *)"ALLOW_MEDIUM_REMOVAL";
+ break;
+ case SET_WINDOW:
+ what = (char *)"SET_WINDOW";
+ break;
+ case READ_CAPACITY:
+ what = (char *)"READ_CAPACITY";
+ break;
+ case READ_10:
+ what = (char *)"READ_10";
+ break;
+ case WRITE_10:
+ what = (char *)"WRITE_10";
+ break;
+ case SEEK_10:
+ what = (char *)"SEEK_10";
+ break;
+ case WRITE_VERIFY:
+ what = (char *)"WRITE_VERIFY";
+ break;
+ case VERIFY:
+ what = (char *)"VERIFY";
+ break;
+ case SEARCH_HIGH:
+ what = (char *)"SEARCH_HIGH";
+ break;
+ case SEARCH_EQUAL:
+ what = (char *)"SEARCH_EQUAL";
+ break;
+ case SEARCH_LOW:
+ what = (char *)"SEARCH_LOW";
+ break;
+ case SET_LIMITS:
+ what = (char *)"SET_LIMITS";
+ break;
+ case READ_POSITION:
+ what = (char *)"READ_POSITION";
+ break;
+ case SYNCHRONIZE_CACHE:
+ what = (char *)"SYNCHRONIZE_CACHE";
+ break;
+ case LOCK_UNLOCK_CACHE:
+ what = (char *)"LOCK_UNLOCK_CACHE";
+ break;
+ case READ_DEFECT_DATA:
+ what = (char *)"READ_DEFECT_DATA";
+ break;
+ case MEDIUM_SCAN:
+ what = (char *)"MEDIUM_SCAN";
+ break;
+ case COMPARE:
+ what = (char *)"COMPARE";
+ break;
+ case COPY_VERIFY:
+ what = (char *)"COPY_VERIFY";
+ break;
+ case WRITE_BUFFER:
+ what = (char *)"WRITE_BUFFER";
+ break;
+ case READ_BUFFER:
+ what = (char *)"READ_BUFFER";
+ break;
+ case UPDATE_BLOCK:
+ what = (char *)"UPDATE_BLOCK";
+ break;
+ case READ_LONG:
+ what = (char *)"READ_LONG";
+ break;
+ case WRITE_LONG:
+ what = (char *)"WRITE_LONG";
+ break;
+ case CHANGE_DEFINITION:
+ what = (char *)"CHANGE_DEFINITION";
+ break;
+ case WRITE_SAME:
+ what = (char *)"WRITE_SAME";
+ break;
+ case GPCMD_READ_SUBCHANNEL:
+ what = (char *)"READ SUBCHANNEL";
+ break;
+ case READ_TOC:
+ what = (char *)"READ_TOC";
+ break;
+ case GPCMD_READ_HEADER:
+ what = (char *)"READ HEADER";
+ break;
+ case GPCMD_PLAY_AUDIO_10:
+ what = (char *)"PLAY AUDIO (10)";
+ break;
+ case GPCMD_PLAY_AUDIO_MSF:
+ what = (char *)"PLAY AUDIO MSF";
+ break;
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ what = (char *)"GET EVENT/STATUS NOTIFICATION";
+ break;
+ case GPCMD_PAUSE_RESUME:
+ what = (char *)"PAUSE/RESUME";
+ break;
+ case LOG_SELECT:
+ what = (char *)"LOG_SELECT";
+ break;
+ case LOG_SENSE:
+ what = (char *)"LOG_SENSE";
+ break;
+ case GPCMD_STOP_PLAY_SCAN:
+ what = (char *)"STOP PLAY/SCAN";
+ break;
+ case GPCMD_READ_DISC_INFO:
+ what = (char *)"READ DISC INFORMATION";
+ break;
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ what = (char *)"READ TRACK INFORMATION";
+ break;
+ case GPCMD_RESERVE_RZONE_TRACK:
+ what = (char *)"RESERVE TRACK";
+ break;
+ case GPCMD_SEND_OPC:
+ what = (char *)"SEND OPC";
+ break;
+ case MODE_SELECT_10:
+ what = (char *)"MODE_SELECT_10";
+ break;
+ case GPCMD_REPAIR_RZONE_TRACK:
+ what = (char *)"REPAIR TRACK";
+ break;
+ case 0x59:
+ what = (char *)"READ MASTER CUE";
+ break;
+ case MODE_SENSE_10:
+ what = (char *)"MODE_SENSE_10";
+ break;
+ case GPCMD_CLOSE_TRACK:
+ what = (char *)"CLOSE TRACK/SESSION";
+ break;
+ case 0x5C:
+ what = (char *)"READ BUFFER CAPACITY";
+ break;
+ case 0x5D:
+ what = (char *)"SEND CUE SHEET";
+ break;
+ case GPCMD_BLANK:
+ what = (char *)"BLANK";
+ break;
+ case REPORT_LUNS:
+ what = (char *)"REPORT LUNS";
+ break;
+ case MOVE_MEDIUM:
+ what = (char *)"MOVE_MEDIUM or PLAY AUDIO (12)";
+ break;
+ case READ_12:
+ what = (char *)"READ_12";
+ break;
+ case WRITE_12:
+ what = (char *)"WRITE_12";
+ break;
+ case WRITE_VERIFY_12:
+ what = (char *)"WRITE_VERIFY_12";
+ break;
+ case SEARCH_HIGH_12:
+ what = (char *)"SEARCH_HIGH_12";
+ break;
+ case SEARCH_EQUAL_12:
+ what = (char *)"SEARCH_EQUAL_12";
+ break;
+ case SEARCH_LOW_12:
+ what = (char *)"SEARCH_LOW_12";
+ break;
+ case SEND_VOLUME_TAG:
+ what = (char *)"SEND_VOLUME_TAG";
+ break;
+ case READ_ELEMENT_STATUS:
+ what = (char *)"READ_ELEMENT_STATUS";
+ break;
+ case GPCMD_READ_CD_MSF:
+ what = (char *)"READ CD MSF";
+ break;
+ case GPCMD_SCAN:
+ what = (char *)"SCAN";
+ break;
+ case GPCMD_SET_SPEED:
+ what = (char *)"SET CD SPEED";
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ what = (char *)"MECHANISM STATUS";
+ break;
+ case GPCMD_READ_CD:
+ what = (char *)"READ CD";
+ break;
+ case 0xE1:
+ what = (char *)"WRITE CONTINUE";
+ break;
+ case WRITE_LONG_2:
+ what = (char *)"WRITE_LONG_2";
+ break;
+ case VENDOR_CMND:
+ what = (char *)"Realtek's vendor command";
+ break;
+ default:
+ what = (char *)"(unknown command)";
+ unknown_cmd = 1;
+ break;
+ }
+
+ if (srb->cmnd[0] != TEST_UNIT_READY)
+ RTS51X_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
+ if (unknown_cmd) {
+ RTS51X_DEBUGP("");
+ for (i = 0; i < srb->cmd_len && i < 16; i++)
+ RTS51X_DEBUGPN(" %02x", srb->cmnd[i]);
+ RTS51X_DEBUGPN("\n");
+ }
+}
+
+void set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type)
+{
+ switch (sense_type) {
+ case SENSE_TYPE_MEDIA_CHANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_NOT_PRESENT:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_PROTECT:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
+ set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ break;
+
+ case SENSE_TYPE_FORMAT_IN_PROGRESS:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
+ break;
+
+ case SENSE_TYPE_FORMAT_CMD_FAILED:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
+ break;
+
+#ifdef SUPPORT_MAGIC_GATE
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
+ break;
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
+ break;
+#endif
+
+ case SENSE_TYPE_NO_SENSE:
+ default:
+ set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+}
+
+void set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u16 sns_key_info1)
+{
+ struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+
+ sense->err_code = err_code;
+ sense->sense_key = sense_key;
+ sense->info[0] = (u8) (info >> 24);
+ sense->info[1] = (u8) (info >> 16);
+ sense->info[2] = (u8) (info >> 8);
+ sense->info[3] = (u8) info;
+
+ sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
+ sense->asc = asc;
+ sense->ascq = ascq;
+ if (sns_key_info0 != 0) {
+ sense->sns_key_info[0] = SKSV | sns_key_info0;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ }
+}
+
+static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ rts51x_init_cards(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ return TRANSPORT_FAILED;
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+ if (sd_card->sd_lock_notify) {
+ sd_card->sd_lock_notify = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ } else if (sd_card->sd_lock_status & SD_LOCKED) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ return TRANSPORT_FAILED;
+ }
+ }
+#endif
+
+ return TRANSPORT_GOOD;
+}
+
+unsigned char formatter_inquiry_str[20] = {
+ 'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
+ '-', 'M', 'G', /* Byte[47:49] */
+ 0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
+ 0x00, /* Byte[51]: Category Specific Commands */
+ 0x00, /* Byte[52]: Access Control and feature */
+ 0x20, 0x20, 0x20, /* Byte[53:55] */
+};
+
+static int inquiry(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
+ char *inquiry_string;
+ unsigned char sendbytes;
+ unsigned char *buf;
+ u8 card = get_lun_card(chip, lun);
+ int pro_formatter_flag = 0;
+ unsigned char inquiry_buf[] = {
+ QULIFIRE | DRCT_ACCESS_DEV,
+ RMB_DISC | 0x0D,
+ 0x00,
+ 0x01,
+ 0x1f,
+ 0x02,
+ 0,
+ REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
+ };
+
+ inquiry_string = inquiry_default;
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ if (MS_FORMATTER_ENABLED(chip) && (get_lun2card(chip, lun) & MS_CARD)) {
+ if (!card || (card == MS_CARD))
+ pro_formatter_flag = 1;
+ }
+
+ if (pro_formatter_flag) {
+ if (scsi_bufflen(srb) < 56)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 56;
+ } else {
+ if (scsi_bufflen(srb) < 36)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 36;
+ }
+
+ if (sendbytes > 8) {
+ memcpy(buf, inquiry_buf, 8);
+ memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ if (pro_formatter_flag)
+ buf[4] = 0x33; /* Additional Length */
+ } else {
+ memcpy(buf, inquiry_buf, sendbytes);
+ }
+
+ if (pro_formatter_flag) {
+ if (sendbytes > 36)
+ memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int start_stop_unit(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ scsi_set_resid(srb, scsi_bufflen(srb));
+
+ if (srb->cmnd[1] == 1)
+ return TRANSPORT_GOOD;
+
+ switch (srb->cmnd[0x4]) {
+ case STOP_MEDIUM:
+ /* Media disabled */
+ return TRANSPORT_GOOD;
+
+ case UNLOAD_MEDIUM:
+ /* Media shall be unload */
+ if (check_card_ready(chip, lun))
+ eject_card(chip, lun);
+ return TRANSPORT_GOOD;
+
+ case MAKE_MEDIUM_READY:
+ case LOAD_MEDIUM:
+ if (check_card_ready(chip, lun)) {
+ return TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ break;
+ }
+
+ TRACE_RET(chip, TRANSPORT_ERROR);
+}
+
+static int allow_medium_removal(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int prevent;
+
+ prevent = srb->cmnd[4] & 0x1;
+
+ scsi_set_resid(srb, 0);
+
+ if (prevent) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static void ms_mode_sense(struct rts51x_chip *chip, u8 cmd,
+ int lun, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int sys_info_offset;
+ int data_size = buf_len;
+ int support_format = 0;
+ int i = 0;
+
+ if (cmd == MODE_SENSE) {
+ sys_info_offset = 8;
+ if (data_size > 0x68)
+ data_size = 0x68;
+ buf[i++] = 0x67; /* Mode Data Length */
+ } else {
+ sys_info_offset = 12;
+ if (data_size > 0x6C)
+ data_size = 0x6C;
+ buf[i++] = 0x00; /* Mode Data Length (MSB) */
+ buf[i++] = 0x6A; /* Mode Data Length (LSB) */
+ }
+
+ /* Medium Type Code */
+ if (check_card_ready(chip, lun)) {
+ if (CHK_MSXC(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x40;
+ } else if (CHK_MSPRO(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x20;
+ } else {
+ buf[i++] = 0x10;
+ }
+
+ /* WP */
+ if (check_card_wp(chip, lun))
+ buf[i++] = 0x80;
+ else
+ buf[i++] = 0x00;
+ } else {
+ buf[i++] = 0x00; /* MediaType */
+ buf[i++] = 0x00; /* WP */
+ }
+
+ buf[i++] = 0x00; /* Reserved */
+
+ if (cmd == MODE_SENSE_10) {
+ buf[i++] = 0x00; /* Reserved */
+ buf[i++] = 0x00; /* Block descriptor length(MSB) */
+ buf[i++] = 0x00; /* Block descriptor length(LSB) */
+
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 9)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 10)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 11)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 12) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ } else {
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 5)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 6)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 7)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 8) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ }
+
+ if (data_size > sys_info_offset) {
+ /* 96 Bytes Attribute Data */
+ int len = data_size - sys_info_offset;
+ len = (len < 96) ? len : 96;
+
+ memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
+ }
+}
+
+static int mode_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int dataSize;
+ int status;
+ int pro_formatter_flag;
+ unsigned char pageCode, *buf;
+ u8 card = get_lun_card(chip, lun);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ pro_formatter_flag = 0;
+ dataSize = 8;
+ /* In Combo mode, device responses ModeSense command as a MS LUN
+ * when no card is inserted */
+ if ((get_lun2card(chip, lun) & MS_CARD)) {
+ if (!card || (card == MS_CARD)) {
+ dataSize = 108;
+ if (chip->option.mspro_formatter_enable)
+ pro_formatter_flag = 1;
+ }
+ }
+
+ buf = kmalloc(dataSize, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ pageCode = srb->cmnd[2] & 0x3f;
+
+ if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
+ (pageCode == 0x00) || (pro_formatter_flag && (pageCode == 0x20))) {
+ if (srb->cmnd[0] == MODE_SENSE) {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0], lun, buf,
+ dataSize);
+ } else {
+ dataSize = 4;
+ buf[0] = 0x03;
+ buf[1] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[2] = 0x80;
+ else
+ buf[3] = 0x00;
+ }
+ } else {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0], lun, buf,
+ dataSize);
+ } else {
+ dataSize = 8;
+ buf[0] = 0x00;
+ buf[1] = 0x06;
+ buf[2] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[3] = 0x80;
+ else
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+ }
+ }
+ status = TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ status = TRANSPORT_FAILED;
+ }
+
+ if (status == TRANSPORT_GOOD) {
+ unsigned int len = min(scsi_bufflen(srb), dataSize);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+ }
+ kfree(buf);
+
+ return status;
+}
+
+static int request_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sense_data_t *sense;
+ unsigned int lun = SCSI_LUN(srb);
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned char *tmp, *buf;
+
+ sense = &(chip->sense_buffer[lun]);
+
+ if ((get_lun_card(chip, lun) == MS_CARD)
+ && PRO_UNDER_FORMATTING(ms_card)) {
+ mspro_format_sense(chip, lun);
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ tmp = (unsigned char *)sense;
+ memcpy(buf, tmp, scsi_bufflen(srb));
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ /* Reset Sense Data */
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ return TRANSPORT_GOOD;
+}
+
+static int read_write(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u32 start_sec;
+ u16 sec_cnt;
+
+ if (!check_card_ready(chip, lun) || (chip->capacity[lun] == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Accessing to any card is forbidden
+ * until the erase procedure of SD is completed */
+ RTS51X_DEBUGP("SD card being erased!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) == SD_CARD) {
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ RTS51X_DEBUGP("SD card locked!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ start_sec =
+ ((u32) srb->cmnd[2] << 24) |
+ ((u32) srb->cmnd[3] << 16) |
+ ((u32) srb->cmnd[4] << 8) |
+ ((u32) srb->cmnd[5]);
+ sec_cnt = ((u16) (srb->cmnd[7]) << 8) | srb->cmnd[8];
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ start_sec = ((u32) (srb->cmnd[1] & 0x1F) << 16) |
+ ((u32) srb->cmnd[2] << 8) | ((u32) srb->cmnd[3]);
+ sec_cnt = srb->cmnd[4];
+ } else if ((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ ((srb->cmnd[2] == PP_READ10) ||
+ (srb->cmnd[2] == PP_WRITE10))) {
+ start_sec = ((u32) srb->cmnd[4] << 24) |
+ ((u32) srb->cmnd[5] << 16) |
+ ((u32) srb->cmnd[6] << 8) |
+ ((u32) srb->cmnd[7]);
+ sec_cnt = ((u16) (srb->cmnd[9]) << 8) | srb->cmnd[10];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((start_sec > chip->capacity[lun]) ||
+ ((start_sec + sec_cnt) > chip->capacity[lun])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sec_cnt == 0) {
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ if ((srb->sc_data_direction == DMA_TO_DEVICE)
+ && check_card_wp(chip, lun)) {
+ RTS51X_DEBUGP("Write protected card!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = card_rw(srb, chip, start_sec, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+#if 0
+ if (chip->need_release & chip->lun2card[lun]) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ } else {
+#endif
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ }
+#if 0
+ }
+#endif
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_format_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 card = get_lun_card(chip, lun);
+ int desc_cnt;
+ int i = 0;
+
+ if (!check_card_ready(chip, lun)) {
+ if (!chip->option.mspro_formatter_enable) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[i++] = 0;
+ buf[i++] = 0;
+ buf[i++] = 0;
+
+ /* Capacity List Length */
+ if ((buf_len > 12) && chip->option.mspro_formatter_enable &&
+ (chip->lun2card[lun] & MS_CARD) && (!card || (card == MS_CARD))) {
+ buf[i++] = 0x10;
+ desc_cnt = 2;
+ } else {
+ buf[i++] = 0x08;
+ desc_cnt = 1;
+ }
+
+ while (desc_cnt) {
+ if (check_card_ready(chip, lun)) {
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 24);
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 16);
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 8);
+ buf[i++] = (unsigned char)(chip->capacity[lun]);
+
+ if (desc_cnt == 2)
+ /* Byte[8]: Descriptor Type: Formatted medium */
+ buf[i++] = 2;
+ else
+ buf[i++] = 0; /* Byte[16] */
+ } else {
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+
+ if (desc_cnt == 2)
+ /* Byte[8]: Descriptor Type: No medium */
+ buf[i++] = 3;
+ else
+ buf[i++] = 0; /*Byte[16] */
+ }
+
+ buf[i++] = 0x00;
+ buf[i++] = 0x02;
+ buf[i++] = 0x00;
+
+ desc_cnt--;
+ }
+
+ buf_len = min(scsi_bufflen(srb), buf_len);
+ rts51x_set_xfer_buf(buf, buf_len, srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ buf = kmalloc(8, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[0] = (unsigned char)((chip->capacity[lun] - 1) >> 24);
+ buf[1] = (unsigned char)((chip->capacity[lun] - 1) >> 16);
+ buf[2] = (unsigned char)((chip->capacity[lun] - 1) >> 8);
+ buf[3] = (unsigned char)(chip->capacity[lun] - 1);
+
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x02;
+ buf[7] = 0x00;
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 status[32] = { 0 };
+
+ rts51x_pp_status(chip, lun, status, 32);
+
+ buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
+ rts51x_set_xfer_buf(status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ u8 rts51x_status[16];
+ unsigned int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+
+ rts51x_read_status(chip, lun, rts51x_status, 16);
+
+ buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status));
+ rts51x_set_xfer_buf(rts51x_status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xe000) {
+ RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
+ return TRANSPORT_GOOD;
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ for (i = 0; i < len; i++) {
+ retval = rts51x_ep0_read_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xe000) {
+ RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
+ return TRANSPORT_GOOD;
+ }
+
+ len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_get_xfer_buf(buf, len, srb);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_ep0_write_register(chip, addr + i, 0xFF, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_sd_csd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) != SD_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rts51x_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = srb->cmnd[5];
+ len = srb->cmnd[7];
+
+ if (len) {
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_read_phy_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = min(scsi_bufflen(srb), (unsigned int)len);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = srb->cmnd[5];
+ len = srb->cmnd[7];
+
+ if (len) {
+ len = min(scsi_bufflen(srb), (unsigned int)len);
+
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_write_phy_register(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_card_bus_width(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card, bus_width;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ card = get_lun_card(chip, lun);
+ if ((card == SD_CARD) || (card == MS_CARD)) {
+ bus_width = chip->card_bus_width[lun];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rts51x_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+#ifdef _MSG_TRACE
+static int trace_msg_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf = NULL;
+ u8 clear;
+ unsigned int buf_len;
+
+ buf_len =
+ 4 +
+ ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT);
+
+ if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ clear = srb->cmnd[2];
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_trace_msg(chip, buf, clear);
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = STATUS_SUCCESS;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_type, mask, value, idx, mode, len;
+ u16 addr;
+ u32 timeout;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ rts51x_init_cmd(chip);
+ break;
+
+ case ADD_BATCHCMD:
+ cmd_type = srb->cmnd[4];
+ if (cmd_type > 2) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
+ mask = srb->cmnd[7];
+ value = srb->cmnd[8];
+ rts51x_add_cmd(chip, cmd_type, addr, mask, value);
+ break;
+
+ case SEND_BATCHCMD:
+ mode = srb->cmnd[4];
+ len = srb->cmnd[5];
+ timeout =
+ ((u32) srb->cmnd[6] << 24) | ((u32) srb->
+ cmnd[7] << 16) | ((u32) srb->
+ cmnd[8] <<
+ 8) | ((u32)
+ srb->
+ cmnd
+ [9]);
+ retval = rts51x_send_cmd(chip, mode, 1000);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (mode & STAGE_R) {
+ retval = rts51x_get_rsp(chip, len, timeout);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+ break;
+
+ case GET_BATCHRSP:
+ idx = srb->cmnd[4];
+ value = chip->rsp_buf[idx];
+ if (scsi_bufflen(srb) < 1) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ rts51x_set_xfer_buf(&value, 1, srb);
+ scsi_set_resid(srb, 0);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int suit_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ case ADD_BATCHCMD:
+ case SEND_BATCHCMD:
+ case GET_BATCHRSP:
+ result = rw_mem_cmd_buf(srb, chip);
+ break;
+ default:
+ result = TRANSPORT_ERROR;
+ }
+
+ return result;
+}
+
+static int app_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[2]) {
+ case PP_READ10:
+ case PP_WRITE10:
+ result = read_write(srb, chip);
+ break;
+
+ case SUIT_CMD:
+ result = suit_cmd(srb, chip);
+ break;
+
+ case READ_PHY:
+ result = read_phy_register(srb, chip);
+ break;
+
+ case WRITE_PHY:
+ result = write_phy_register(srb, chip);
+ break;
+
+ case GET_DEV_STATUS:
+ result = get_dev_status(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+static int vendor_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result = TRANSPORT_GOOD;
+
+ switch (srb->cmnd[1]) {
+ case READ_STATUS:
+ result = read_status(srb, chip);
+ break;
+
+ case READ_MEM:
+ result = read_mem(srb, chip);
+ break;
+
+ case WRITE_MEM:
+ result = write_mem(srb, chip);
+ break;
+
+ case GET_BUS_WIDTH:
+ result = get_card_bus_width(srb, chip);
+ break;
+
+ case GET_SD_CSD:
+ result = get_sd_csd(srb, chip);
+ break;
+
+#ifdef _MSG_TRACE
+ case TRACE_MSG:
+ result = trace_msg_cmd(srb, chip);
+ break;
+#endif
+
+ case SCSI_APP_CMD:
+ result = app_cmd(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+static int ms_format_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, quick_format;
+
+ if (get_lun_card(chip, lun) != MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47)
+ || (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D)
+ || (srb->cmnd[7] != 0x74)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[8] & 0x01)
+ quick_format = 0;
+ else
+ quick_format = 1;
+
+ if (!(chip->card_ready & MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (chip->card_wp & MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+#ifdef SUPPORT_PCGL_1P18
+int get_ms_information(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ u8 dev_info_id, data_len;
+ u8 *buf;
+ unsigned int buf_len;
+ int i;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ dev_info_id = srb->cmnd[3];
+ if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (dev_info_id == 0x15)
+ buf_len = data_len = 0x3A;
+ else
+ buf_len = data_len = 0x6A;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ i = 0;
+ /* GET Memory Stick Media Information Response Header */
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Device Information Type Code */
+ if (CHK_MSXC(ms_card))
+ buf[i++] = 0x03;
+ else
+ buf[i++] = 0x02;
+ /* SGM bit */
+ buf[i++] = 0x01;
+ /* Reserved */
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ /* Number of Device Information */
+ buf[i++] = 0x01;
+
+ /* Device Information Body
+ * Device Information ID Number */
+ buf[i++] = dev_info_id;
+ /* Device Information Length */
+ if (dev_info_id == 0x15)
+ data_len = 0x31;
+ else
+ data_len = 0x61;
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Valid Bit */
+ buf[i++] = 0x80;
+ if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
+ /* System Information */
+ memcpy(buf + i, ms_card->raw_sys_info, 96);
+ } else {
+ /* Model Name */
+ memcpy(buf + i, ms_card->raw_model_name, 48);
+ }
+
+ rts51x_set_xfer_buf(buf, buf_len, srb);
+
+ if (dev_info_id == 0x15)
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
+ else
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
+
+ kfree(buf);
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = TRANSPORT_ERROR;
+
+ if (srb->cmnd[2] == MS_FORMAT)
+ retval = ms_format_cmnd(srb, chip);
+#ifdef SUPPORT_PCGL_1P18
+ else if (srb->cmnd[2] == GET_MS_INFORMATION)
+ retval = get_ms_information(srb, chip);
+#endif
+
+ return retval;
+}
+
+#ifdef SUPPORT_CPRM
+static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ sd_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != SD_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[0]) {
+ case SD_PASS_THRU_MODE:
+ result = sd_pass_thru_mode(srb, chip);
+ break;
+
+ case SD_EXECUTE_NO_DATA:
+ result = sd_execute_no_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_READ:
+ result = sd_execute_read_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_WRITE:
+ result = sd_execute_write_data(srb, chip);
+ break;
+
+ case SD_GET_RSP:
+ result = sd_get_cmd_rsp(srb, chip);
+ break;
+
+ case SD_HW_RST:
+ result = sd_hw_rst(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+int mg_report_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+
+ switch (key_format) {
+ case KF_GET_LOC_EKB:
+ if ((scsi_bufflen(srb) == 0x41C) &&
+ (srb->cmnd[8] == 0x04) && (srb->cmnd[9] == 0x1C)) {
+ retval = mg_get_local_EKB(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_CHG:
+ if ((scsi_bufflen(srb) == 0x24) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x24)) {
+ retval = mg_get_rsp_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_GET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
+ retval = mg_get_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+int mg_send_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (check_card_wp(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+
+ switch (key_format) {
+ case KF_SET_LEAF_ID:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_set_leaf_id(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_CHG_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_rsp(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_SET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
+ retval = mg_set_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int result = TRANSPORT_GOOD;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Block all SCSI command except for REQUEST_SENSE
+ * and rs_ppstatus */
+ if (!
+ ((srb->cmnd[0] == VENDOR_CMND)
+ && (srb->cmnd[1] == SCSI_APP_CMD)
+ && (srb->cmnd[2] == GET_DEV_STATUS))
+ && (srb->cmnd[0] != REQUEST_SENSE)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, 0);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ if ((srb->cmnd[0] != REQUEST_SENSE)
+ && (srb->cmnd[0] != INQUIRY)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16) (ms_card->progress));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ switch (srb->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_6:
+ case WRITE_6:
+ result = read_write(srb, chip);
+ break;
+
+ case TEST_UNIT_READY:
+ result = test_unit_ready(srb, chip);
+ break;
+
+ case INQUIRY:
+ result = inquiry(srb, chip);
+ break;
+
+ case READ_CAPACITY:
+ result = read_capacity(srb, chip);
+ break;
+
+ case START_STOP:
+ result = start_stop_unit(srb, chip);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ result = allow_medium_removal(srb, chip);
+ break;
+
+ case REQUEST_SENSE:
+ result = request_sense(srb, chip);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ result = mode_sense(srb, chip);
+ break;
+
+ case 0x23:
+ result = read_format_capacity(srb, chip);
+ break;
+
+ case VENDOR_CMND:
+ result = vendor_cmnd(srb, chip);
+ break;
+
+ case MS_SP_CMND:
+ result = ms_sp_cmnd(srb, chip);
+ break;
+
+#ifdef SUPPORT_CPRM
+ case SD_PASS_THRU_MODE:
+ case SD_EXECUTE_NO_DATA:
+ case SD_EXECUTE_READ:
+ case SD_EXECUTE_WRITE:
+ case SD_GET_RSP:
+ case SD_HW_RST:
+ result = sd_extention_cmnd(srb, chip);
+ break;
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ case CMD_MSPRO_MG_RKEY:
+ result = mg_report_key(srb, chip);
+ break;
+
+ case CMD_MSPRO_MG_SKEY:
+ result = mg_send_key(srb, chip);
+ break;
+#endif
+
+ case FORMAT_UNIT:
+ case MODE_SELECT:
+ case VERIFY:
+ result = TRANSPORT_GOOD;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ result = TRANSPORT_FAILED;
+ }
+
+ return result;
+}
+
+/***********************************************************************
+ * Host functions
+ ***********************************************************************/
+
+const char *host_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for RTS51xx USB driver-based card reader";
+}
+
+int slave_alloc(struct scsi_device *sdev)
+{
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+ * the extra data and many devices choke if asked for more or
+ * less than 36 bytes.
+ */
+ sdev->inquiry_len = 36;
+ return 0;
+}
+
+int slave_configure(struct scsi_device *sdev)
+{
+ /* Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries. */
+ blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+
+ /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
+ * what is originally reported. We need this to avoid confusing
+ * the SCSI layer with devices that report 0 or 1, but need 10-byte
+ * commands (ala ATAPI devices behind certain bridges, or devices
+ * which simply have broken INQUIRY data).
+ *
+ * NOTE: This means /dev/sg programs (ala cdrecord) will get the
+ * actual information. This seems to be the preference for
+ * programs like that.
+ *
+ * NOTE: This also means that /proc/scsi/scsi and sysfs may report
+ * the actual value or the modified one, depending on where the
+ * data comes from.
+ */
+ if (sdev->scsi_level < SCSI_2)
+ sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+
+ return 0;
+}
+
+/***********************************************************************
+ * /proc/scsi/ functions
+ ***********************************************************************/
+
+/* we use this macro to help us write into the buffer */
+#undef SPRINTF
+#define SPRINTF(args...) \
+ do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+
+int proc_info(struct Scsi_Host *host, char *buffer,
+ char **start, off_t offset, int length, int inout)
+{
+ char *pos = buffer;
+
+ /* if someone is sending us data, just throw it away */
+ if (inout)
+ return length;
+
+ /* print the controller name */
+ SPRINTF(" Host scsi%d: %s\n", host->host_no, RTS51X_NAME);
+
+ /* print product, vendor, and driver version strings */
+ SPRINTF(" Vendor: Realtek Corp.\n");
+ SPRINTF(" Product: RTS51xx USB Card Reader\n");
+ SPRINTF(" Version: %s\n", DRIVER_VERSION);
+ SPRINTF(" Build: %s\n", __TIME__);
+
+ /*
+ * Calculate start of next buffer, and return value.
+ */
+ *start = buffer + offset;
+
+ if ((pos - buffer) < offset)
+ return 0;
+ else if ((pos - buffer - offset) < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+
+/* queue a command */
+/* This is always called with scsi_lock(host) held */
+int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+{
+ struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
+
+ /* check for state-transition errors */
+ if (chip->srb != NULL) {
+ RTS51X_DEBUGP("Error in %s: chip->srb = %p\n",
+ __func__, chip->srb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* fail the command if we are disconnecting */
+ if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("Fail command during disconnect\n");
+ srb->result = DID_NO_CONNECT << 16;
+ done(srb);
+ return 0;
+ }
+
+ /* enqueue the command and wake up the control thread */
+ srb->scsi_done = done;
+ chip->srb = srb;
+ complete(&chip->usb->cmnd_ready);
+
+ return 0;
+}
+
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) */
+int queuecommand(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+{
+ return queuecommand_lck(srb, done);
+}
+#else
+DEF_SCSI_QCMD(queuecommand)
+#endif
+/***********************************************************************
+ * Error handling functions
+ ***********************************************************************/
+/* Command timeout and abort */
+int command_abort(struct scsi_cmnd *srb)
+{
+ struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ /* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
+ * bits are protected by the host lock. */
+ scsi_lock(rts51x_to_host(chip));
+
+ /* Is this command still active? */
+ if (chip->srb != srb) {
+ scsi_unlock(rts51x_to_host(chip));
+ RTS51X_DEBUGP("-- nothing to abort\n");
+ return FAILED;
+ }
+
+ /* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
+ * a device reset isn't already in progress (to avoid interfering
+ * with the reset). Note that we must retain the host lock while
+ * calling usb_stor_stop_transport(); otherwise it might interfere
+ * with an auto-reset that begins as soon as we release the lock. */
+ set_bit(FLIDX_TIMED_OUT, &chip->usb->dflags);
+ if (!test_bit(FLIDX_RESETTING, &chip->usb->dflags)) {
+ set_bit(FLIDX_ABORTING, &chip->usb->dflags);
+ /* rts51x_stop_transport(us); */
+ }
+ scsi_unlock(rts51x_to_host(chip));
+
+ /* Wait for the aborted command to finish */
+ wait_for_completion(&chip->usb->notify);
+ return SUCCESS;
+}
+
+/* This invokes the transport reset mechanism to reset the state of the
+ * device */
+int device_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+/* Simulate a SCSI bus reset by resetting the device's USB port. */
+int bus_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+static const char *rts5139_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for RTS5139 USB card reader";
+}
+
+struct scsi_host_template rts51x_host_template = {
+ /* basic userland interface stuff */
+ .name = RTS51X_NAME,
+ .proc_name = RTS51X_NAME,
+ .proc_info = proc_info,
+ .info = rts5139_info,
+
+ /* command interface -- queued only */
+ .queuecommand = queuecommand,
+
+ /* error and abort handlers */
+ .eh_abort_handler = command_abort,
+ .eh_device_reset_handler = device_reset,
+ .eh_bus_reset_handler = bus_reset,
+
+ /* queue commands only, only one command per LUN */
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+
+ /* unknown initiator id */
+ .this_id = -1,
+
+ .slave_alloc = slave_alloc,
+ .slave_configure = slave_configure,
+
+ /* lots of sg segments can be handled */
+ .sg_tablesize = SG_ALL,
+
+ /* limit the total size of a transfer to 120 KB */
+ .max_sectors = 240,
+
+ /* merge commands... this seems to help performance, but
+ * periodically someone should test to see which setting is more
+ * optimal.
+ */
+ .use_clustering = 1,
+
+ /* emulated HBA */
+ .emulated = 1,
+
+ /* we do our own delay after a device or bus reset */
+ .skip_settle_delay = 1,
+
+ /* sysfs device attributes */
+ /* .sdev_attrs = sysfs_device_attr_list, */
+
+ /* module management */
+ .module = THIS_MODULE
+};
+
diff --git a/drivers/staging/rts5139/rts51x_scsi.h b/drivers/staging/rts5139/rts51x_scsi.h
new file mode 100644
index 00000000000..3a8ca069b27
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_scsi.h
@@ -0,0 +1,162 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_SCSI_H
+#define __RTS51X_SCSI_H
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_host.h>
+
+#include "rts51x_chip.h"
+
+#define MS_SP_CMND 0xFA
+#define MS_FORMAT 0xA0
+#define GET_MS_INFORMATION 0xB0
+
+#define VENDOR_CMND 0xF0
+
+#define READ_STATUS 0x09
+
+#define READ_MEM 0x0D
+#define WRITE_MEM 0x0E
+#define GET_BUS_WIDTH 0x13
+#define GET_SD_CSD 0x14
+#define TOGGLE_GPIO 0x15
+#define TRACE_MSG 0x18
+
+#define SCSI_APP_CMD 0x10
+
+#define PP_READ10 0x1A
+#define PP_WRITE10 0x0A
+#define READ_HOST_REG 0x1D
+#define WRITE_HOST_REG 0x0D
+#define SET_VAR 0x05
+#define GET_VAR 0x15
+#define DMA_READ 0x16
+#define DMA_WRITE 0x06
+#define GET_DEV_STATUS 0x10
+#define SET_CHIP_MODE 0x27
+#define SUIT_CMD 0xE0
+#define WRITE_PHY 0x07
+#define READ_PHY 0x17
+
+#define INIT_BATCHCMD 0x41
+#define ADD_BATCHCMD 0x42
+#define SEND_BATCHCMD 0x43
+#define GET_BATCHRSP 0x44
+
+#ifdef SUPPORT_CPRM
+/* SD Pass Through Command Extention */
+#define SD_PASS_THRU_MODE 0xD0
+#define SD_EXECUTE_NO_DATA 0xD1
+#define SD_EXECUTE_READ 0xD2
+#define SD_EXECUTE_WRITE 0xD3
+#define SD_GET_RSP 0xD4
+#define SD_HW_RST 0xD6
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+#define CMD_MSPRO_MG_RKEY 0xA4 /* Report Key Command */
+#define CMD_MSPRO_MG_SKEY 0xA3 /* Send Key Command */
+
+/* CBWCB field: key class */
+#define KC_MG_R_PRO 0xBE /* MG-R PRO */
+
+/* CBWCB field: key format */
+#define KF_SET_LEAF_ID 0x31 /* Set Leaf ID */
+#define KF_GET_LOC_EKB 0x32 /* Get Local EKB */
+#define KF_CHG_HOST 0x33 /* Challenge (host) */
+#define KF_RSP_CHG 0x34 /* Response and Challenge (device) */
+#define KF_RSP_HOST 0x35 /* Response (host) */
+#define KF_GET_ICV 0x36 /* Get ICV */
+#define KF_SET_ICV 0x37 /* SSet ICV */
+#endif
+
+struct rts51x_chip;
+
+/*-----------------------------------
+ Start-Stop-Unit
+-----------------------------------*/
+#define STOP_MEDIUM 0x00 /* access disable */
+#define MAKE_MEDIUM_READY 0x01 /* access enable */
+#define UNLOAD_MEDIUM 0x02 /* unload */
+#define LOAD_MEDIUM 0x03 /* load */
+
+/*-----------------------------------
+ STANDARD_INQUIRY
+-----------------------------------*/
+#define QULIFIRE 0x00
+#define AENC_FNC 0x00
+#define TRML_IOP 0x00
+#define REL_ADR 0x00
+#define WBUS_32 0x00
+#define WBUS_16 0x00
+#define SYNC 0x00
+#define LINKED 0x00
+#define CMD_QUE 0x00
+#define SFT_RE 0x00
+
+#define VEN_ID_LEN 8 /* Vendor ID Length */
+#define PRDCT_ID_LEN 16 /* Product ID Length */
+#define PRDCT_REV_LEN 4 /* Product LOT Length */
+
+#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
+#define RMB_DISC 0x80 /* The Device is Removable */
+#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */
+
+#define SCSI 0x00 /* Interface ID */
+
+void scsi_show_command(struct scsi_cmnd *srb);
+void set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type);
+void set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u16 sns_key_info1);
+
+int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+
+struct Scsi_Host;
+struct scsi_device;
+struct scsi_cmnd;
+
+const char *host_info(struct Scsi_Host *host);
+int slave_alloc(struct scsi_device *sdev);
+int slave_configure(struct scsi_device *sdev);
+int proc_info(struct Scsi_Host *host, char *buffer,
+ char **start, off_t offset, int length, int inout);
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) */
+int queuecommand(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *));
+#else
+int queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+#endif
+int command_abort(struct scsi_cmnd *srb);
+int device_reset(struct scsi_cmnd *srb);
+int bus_reset(struct scsi_cmnd *srb);
+
+#endif /* __RTS51X_SCSI_H */
diff --git a/drivers/staging/rts5139/rts51x_sys.h b/drivers/staging/rts5139/rts51x_sys.h
new file mode 100644
index 00000000000..b09cd34a6c0
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_sys.h
@@ -0,0 +1,54 @@
+/* Driver for Realtek USB RTS51xx card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_SYS_H
+#define __RTS51X_SYS_H
+
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_card.h"
+
+#define USING_POLLING_CYCLE_DELINK
+
+extern int rts51x_check_start_time(struct rts51x_chip *chip);
+extern void rts51x_set_start_time(struct rts51x_chip *chip);
+extern void rts51x_clear_start_time(struct rts51x_chip *chip);
+
+/* typedef dma_addr_t ULONG_PTR; */
+
+static inline void rts51x_reset_detected_cards(struct rts51x_chip *chip)
+{
+/* rts51x_reset_cards(chip); */
+}
+
+static inline void clear_first_install_mark(struct rts51x_chip *chip)
+{
+}
+
+void rts51x_enter_ss(struct rts51x_chip *chip);
+void rts51x_exit_ss(struct rts51x_chip *chip);
+
+#endif /* __RTS51X_SYS_H */
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
new file mode 100644
index 00000000000..e11467acc57
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -0,0 +1,1000 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+
+#include "debug.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_card.h"
+#include "rts51x_scsi.h"
+#include "rts51x_transport.h"
+#include "trace.h"
+
+/***********************************************************************
+ * Scatter-gather transfer buffer access routines
+ ***********************************************************************/
+
+/* Copy a buffer of length buflen to/from the srb's transfer buffer.
+ * Update the **sgptr and *offset variables so that the next copy will
+ * pick up from where this one left off.
+ */
+
+unsigned int rts51x_access_sglist(unsigned char *buffer,
+ unsigned int buflen, void *sglist,
+ void **sgptr, unsigned int *offset,
+ enum xfer_buf_dir dir)
+{
+ unsigned int cnt;
+ struct scatterlist *sg = (struct scatterlist *)*sgptr;
+
+ /* We have to go through the list one entry
+ * at a time. Each s-g entry contains some number of pages, and
+ * each page has to be kmap()'ed separately. If the page is already
+ * in kernel-addressable memory then kmap() will return its address.
+ * If the page is not directly accessible -- such as a user buffer
+ * located in high memory -- then kmap() will map it to a temporary
+ * position in the kernel's virtual address space.
+ */
+
+ if (!sg)
+ sg = (struct scatterlist *)sglist;
+
+ /* This loop handles a single s-g list entry, which may
+ * include multiple pages. Find the initial page structure
+ * and the starting offset within the page, and update
+ * the *offset and **sgptr values for the next loop.
+ */
+ cnt = 0;
+ while (cnt < buflen && sg) {
+ struct page *page = sg_page(sg) +
+ ((sg->offset + *offset) >> PAGE_SHIFT);
+ unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE - 1);
+ unsigned int sglen = sg->length - *offset;
+
+ if (sglen > buflen - cnt) {
+
+ /* Transfer ends within this s-g entry */
+ sglen = buflen - cnt;
+ *offset += sglen;
+ } else {
+
+ /* Transfer continues to next s-g entry */
+ *offset = 0;
+ sg = sg_next(sg);
+ }
+
+ /* Transfer the data for all the pages in this
+ * s-g entry. For each page: call kmap(), do the
+ * transfer, and call kunmap() immediately after. */
+ while (sglen > 0) {
+ unsigned int plen = min(sglen, (unsigned int)
+ PAGE_SIZE - poff);
+ unsigned char *ptr = kmap(page);
+
+ if (dir == TO_XFER_BUF)
+ memcpy(ptr + poff, buffer + cnt, plen);
+ else
+ memcpy(buffer + cnt, ptr + poff, plen);
+ kunmap(page);
+
+ /* Start at the beginning of the next page */
+ poff = 0;
+ ++page;
+ cnt += plen;
+ sglen -= plen;
+ }
+ }
+ *sgptr = sg;
+
+ /* Return the amount actually transferred */
+ return cnt;
+}
+
+unsigned int rts51x_access_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb,
+ struct scatterlist **sgptr,
+ unsigned int *offset, enum xfer_buf_dir dir)
+{
+ return rts51x_access_sglist(buffer, buflen, (void *)scsi_sglist(srb),
+ (void **)sgptr, offset, dir);
+}
+
+/* Store the contents of buffer into srb's transfer buffer and set the
+ * SCSI residue.
+ */
+void rts51x_set_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb)
+{
+ unsigned int offset = 0;
+ struct scatterlist *sg = NULL;
+
+ buflen = min(buflen, scsi_bufflen(srb));
+ buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
+ TO_XFER_BUF);
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
+}
+
+void rts51x_get_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb)
+{
+ unsigned int offset = 0;
+ struct scatterlist *sg = NULL;
+
+ buflen = min(buflen, scsi_bufflen(srb));
+ buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
+ FROM_XFER_BUF);
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
+}
+
+/* This is the completion handler which will wake us up when an URB
+ * completes.
+ */
+static void urb_done_completion(struct urb *urb)
+{
+ struct completion *urb_done_ptr = urb->context;
+
+ if (urb_done_ptr)
+ complete(urb_done_ptr);
+}
+
+/* This is the common part of the URB message submission code
+ *
+ * All URBs from the driver involved in handling a queued scsi
+ * command _must_ pass through this function (or something like it) for the
+ * abort mechanisms to work properly.
+ */
+static int rts51x_msg_common(struct rts51x_chip *chip, struct urb *urb,
+ int timeout)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+ struct completion urb_done;
+ long timeleft;
+ int status;
+
+ /* don't submit URBs during abort processing */
+ if (test_bit(FLIDX_ABORTING, &rts51x->dflags))
+ TRACE_RET(chip, -EIO);
+
+ /* set up data structures for the wakeup system */
+ init_completion(&urb_done);
+
+ /* fill the common fields in the URB */
+ urb->context = &urb_done;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->status = 0;
+
+ /* we assume that if transfer_buffer isn't us->iobuf then it
+ * hasn't been mapped for DMA. Yes, this is clunky, but it's
+ * easier than always having the caller tell us whether the
+ * transfer buffer has already been mapped. */
+ urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
+ if (urb->transfer_buffer == rts51x->iobuf) {
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_dma = rts51x->iobuf_dma;
+ }
+ urb->setup_dma = rts51x->cr_dma;
+
+ /* submit the URB */
+ status = usb_submit_urb(urb, GFP_NOIO);
+ if (status) {
+ /* something went wrong */
+ TRACE_RET(chip, status);
+ }
+
+ /* since the URB has been submitted successfully, it's now okay
+ * to cancel it */
+ set_bit(FLIDX_URB_ACTIVE, &rts51x->dflags);
+
+ /* did an abort occur during the submission? */
+ if (test_bit(FLIDX_ABORTING, &rts51x->dflags)) {
+
+ /* cancel the URB, if it hasn't been cancelled already */
+ if (test_and_clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags)) {
+ RTS51X_DEBUGP("-- cancelling URB\n");
+ usb_unlink_urb(urb);
+ }
+ }
+
+ /* wait for the completion of the URB */
+ timeleft =
+ wait_for_completion_interruptible_timeout(&urb_done,
+ (timeout * HZ /
+ 1000) ? :
+ MAX_SCHEDULE_TIMEOUT);
+
+ clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags);
+
+ if (timeleft <= 0) {
+ RTS51X_DEBUGP("%s -- cancelling URB\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ usb_kill_urb(urb);
+ if (timeleft == 0)
+ status = -ETIMEDOUT;
+ else
+ status = -EINTR;
+ } else {
+ status = urb->status;
+ }
+
+ return status;
+}
+
+/*
+ * Interpret the results of a URB transfer
+ */
+static int interpret_urb_result(struct rts51x_chip *chip, unsigned int pipe,
+ unsigned int length, int result,
+ unsigned int partial)
+{
+ int retval = STATUS_SUCCESS;
+
+ /* RTS51X_DEBUGP("Status code %d; transferred %u/%u\n",
+ result, partial, length); */
+ switch (result) {
+ /* no error code; did we send all the data? */
+ case 0:
+ if (partial != length) {
+ RTS51X_DEBUGP("-- short transfer\n");
+ TRACE_RET(chip, STATUS_TRANS_SHORT);
+ }
+ /* RTS51X_DEBUGP("-- transfer complete\n"); */
+ return STATUS_SUCCESS;
+ /* stalled */
+ case -EPIPE:
+ /* for control endpoints, (used by CB[I]) a stall indicates
+ * a failed command */
+ if (usb_pipecontrol(pipe)) {
+ RTS51X_DEBUGP("-- stall on control pipe\n");
+ TRACE_RET(chip, STATUS_STALLED);
+ }
+ /* for other sorts of endpoint, clear the stall */
+ RTS51X_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
+ if (rts51x_clear_halt(chip, pipe) < 0)
+ TRACE_RET(chip, STATUS_ERROR);
+ retval = STATUS_STALLED;
+ TRACE_GOTO(chip, Exit);
+
+ /* babble - the device tried to send more than
+ * we wanted to read */
+ case -EOVERFLOW:
+ RTS51X_DEBUGP("-- babble\n");
+ retval = STATUS_TRANS_LONG;
+ TRACE_GOTO(chip, Exit);
+
+ /* the transfer was cancelled by abort,
+ * disconnect, or timeout */
+ case -ECONNRESET:
+ RTS51X_DEBUGP("-- transfer cancelled\n");
+ retval = STATUS_ERROR;
+ TRACE_GOTO(chip, Exit);
+
+ /* short scatter-gather read transfer */
+ case -EREMOTEIO:
+ RTS51X_DEBUGP("-- short read transfer\n");
+ retval = STATUS_TRANS_SHORT;
+ TRACE_GOTO(chip, Exit);
+
+ /* abort or disconnect in progress */
+ case -EIO:
+ RTS51X_DEBUGP("-- abort or disconnect in progress\n");
+ retval = STATUS_ERROR;
+ TRACE_GOTO(chip, Exit);
+
+ case -ETIMEDOUT:
+ RTS51X_DEBUGP("-- time out\n");
+ retval = STATUS_TIMEDOUT;
+ TRACE_GOTO(chip, Exit);
+
+ /* the catch-all error case */
+ default:
+ RTS51X_DEBUGP("-- unknown error\n");
+ retval = STATUS_ERROR;
+ TRACE_GOTO(chip, Exit);
+ }
+
+Exit:
+ if ((retval != STATUS_SUCCESS) && !usb_pipecontrol(pipe))
+ rts51x_clear_hw_error(chip);
+
+ return retval;
+}
+
+int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe,
+ u8 request, u8 requesttype, u16 value, u16 index,
+ void *data, u16 size, int timeout)
+{
+ struct rts51x_usb *rts51x = chip->usb;
+ int result;
+
+ RTS51X_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
+ __func__, request, requesttype, value, index, size);
+
+ /* fill in the devrequest structure */
+ rts51x->cr->bRequestType = requesttype;
+ rts51x->cr->bRequest = request;
+ rts51x->cr->wValue = cpu_to_le16(value);
+ rts51x->cr->wIndex = cpu_to_le16(index);
+ rts51x->cr->wLength = cpu_to_le16(size);
+
+ /* fill and submit the URB */
+ usb_fill_control_urb(rts51x->current_urb, rts51x->pusb_dev, pipe,
+ (unsigned char *)rts51x->cr, data, size,
+ urb_done_completion, NULL);
+ result = rts51x_msg_common(chip, rts51x->current_urb, timeout);
+
+ return interpret_urb_result(chip, pipe, size, result,
+ rts51x->current_urb->actual_length);
+}
+
+int rts51x_clear_halt(struct rts51x_chip *chip, unsigned int pipe)
+{
+ int result;
+ int endp = usb_pipeendpoint(pipe);
+
+ if (usb_pipein(pipe))
+ endp |= USB_DIR_IN;
+
+ result = rts51x_ctrl_transfer(chip, SND_CTRL_PIPE(chip),
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
+ USB_ENDPOINT_HALT, endp, NULL, 0, 3000);
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ usb_reset_endpoint(chip->usb->pusb_dev, endp);
+
+ return STATUS_SUCCESS;
+}
+
+int rts51x_reset_pipe(struct rts51x_chip *chip, char pipe)
+{
+ return rts51x_clear_halt(chip, pipe);
+}
+
+static void rts51x_sg_clean(struct usb_sg_request *io)
+{
+ if (io->urbs) {
+ while (io->entries--)
+ usb_free_urb(io->urbs[io->entries]);
+ kfree(io->urbs);
+ io->urbs = NULL;
+ }
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */
+ if (io->dev->dev.dma_mask != NULL)
+ usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
+ io->sg, io->nents);
+#endif
+ io->dev = NULL;
+}
+#if 0
+static void rts51x_sg_complete(struct urb *urb)
+{
+ struct usb_sg_request *io = urb->context;
+ int status = urb->status;
+
+ spin_lock(&io->lock);
+
+ /* In 2.5 we require hcds' endpoint queues not to progress after fault
+ * reports, until the completion callback (this!) returns. That lets
+ * device driver code (like this routine) unlink queued urbs first,
+ * if it needs to, since the HC won't work on them at all. So it's
+ * not possible for page N+1 to overwrite page N, and so on.
+ *
+ * That's only for "hard" faults; "soft" faults (unlinks) sometimes
+ * complete before the HCD can get requests away from hardware,
+ * though never during cleanup after a hard fault.
+ */
+ if (io->status
+ && (io->status != -ECONNRESET
+ || status != -ECONNRESET)
+ && urb->actual_length) {
+ dev_err(io->dev->bus->controller,
+ "dev %s ep%d%s scatterlist error %d/%d\n",
+ io->dev->devpath,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_urb_dir_in(urb) ? "in" : "out",
+ status, io->status);
+ /* BUG (); */
+ }
+
+ if (io->status == 0 && status && status != -ECONNRESET) {
+ int i, found, retval;
+
+ io->status = status;
+
+ /* the previous urbs, and this one, completed already.
+ * unlink pending urbs so they won't rx/tx bad data.
+ * careful: unlink can sometimes be synchronous...
+ */
+ spin_unlock(&io->lock);
+ for (i = 0, found = 0; i < io->entries; i++) {
+ if (!io->urbs[i] || !io->urbs[i]->dev)
+ continue;
+ if (found) {
+ retval = usb_unlink_urb(io->urbs[i]);
+ if (retval != -EINPROGRESS &&
+ retval != -ENODEV &&
+ retval != -EBUSY)
+ dev_err(&io->dev->dev,
+ "%s, unlink --> %d\n",
+ __func__, retval);
+ } else if (urb == io->urbs[i])
+ found = 1;
+ }
+ spin_lock(&io->lock);
+ }
+ urb->dev = NULL;
+
+ /* on the last completion, signal usb_sg_wait() */
+ io->bytes += urb->actual_length;
+ io->count--;
+ if (!io->count)
+ complete(&io->complete);
+
+ spin_unlock(&io->lock);
+}
+
+/* This function is ported from usb_sg_init, which can transfer
+ * sg list partially */
+int rts51x_sg_init_partial(struct usb_sg_request *io, struct usb_device *dev,
+ unsigned pipe, unsigned period, void *buf, struct scatterlist **sgptr,
+ unsigned int *offset, int nents, size_t length, gfp_t mem_flags)
+{
+ int i;
+ int urb_flags;
+ int dma;
+ struct scatterlist *sg = *sgptr, *first_sg;
+
+ first_sg = (struct scatterlist *)buf;
+ if (!sg)
+ sg = first_sg;
+
+ if (!io || !dev || !sg
+ || usb_pipecontrol(pipe)
+ || usb_pipeisoc(pipe)
+ || (nents <= 0))
+ return -EINVAL;
+
+ spin_lock_init(&io->lock);
+ io->dev = dev;
+ io->pipe = pipe;
+ io->sg = first_sg; /* used by unmap */
+ io->nents = nents;
+
+ RTS51X_DEBUGP("Before map, sg address: 0x%x\n", (unsigned int)sg);
+ RTS51X_DEBUGP("Before map, dev address: 0x%x\n", (unsigned int)dev);
+
+ /* not all host controllers use DMA (like the mainstream pci ones);
+ * they can use PIO (sl811) or be software over another transport.
+ */
+ dma = (dev->dev.dma_mask != NULL);
+ if (dma) {
+ /* map the whole sg list, because here we only know the
+ * total nents */
+ io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
+ first_sg, nents);
+ } else {
+ io->entries = nents;
+ }
+
+ /* initialize all the urbs we'll use */
+ if (io->entries <= 0)
+ return io->entries;
+
+ io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+ if (!io->urbs)
+ goto nomem;
+
+ urb_flags = URB_NO_INTERRUPT;
+ if (dma)
+ urb_flags |= URB_NO_TRANSFER_DMA_MAP;
+ if (usb_pipein(pipe))
+ urb_flags |= URB_SHORT_NOT_OK;
+
+ RTS51X_DEBUGP("io->entries = %d\n", io->entries);
+
+ for (i = 0; (sg != NULL) && (length > 0); i++) {
+ unsigned len;
+
+ RTS51X_DEBUGP("sg address: 0x%x\n", (unsigned int)sg);
+ RTS51X_DEBUGP("length = %d, *offset = %d\n", length, *offset);
+
+ io->urbs[i] = usb_alloc_urb(0, mem_flags);
+ if (!io->urbs[i]) {
+ io->entries = i;
+ goto nomem;
+ }
+
+ io->urbs[i]->dev = NULL;
+ io->urbs[i]->pipe = pipe;
+ io->urbs[i]->interval = period;
+ io->urbs[i]->transfer_flags = urb_flags;
+
+ io->urbs[i]->complete = rts51x_sg_complete;
+ io->urbs[i]->context = io;
+
+ if (dma) {
+ io->urbs[i]->transfer_dma =
+ sg_dma_address(sg) + *offset;
+ len = sg_dma_len(sg) - *offset;
+ io->urbs[i]->transfer_buffer = NULL;
+ RTS51X_DEBUGP(" -- sg entry dma length = %d\n",
+ sg_dma_len(sg));
+ } else {
+ /* hc may use _only_ transfer_buffer */
+ io->urbs[i]->transfer_buffer = sg_virt(sg) + *offset;
+ len = sg->length - *offset;
+ RTS51X_DEBUGP(" -- sg entry length = %d\n",
+ sg->length);
+ }
+
+ if (length >= len) {
+ *offset = 0;
+ io->urbs[i]->transfer_buffer_length = len;
+ length -= len;
+ sg = sg_next(sg);
+ } else {
+ *offset += length;
+ io->urbs[i]->transfer_buffer_length = length;
+ length = 0;
+ }
+ if (length == 0)
+ io->entries = i + 1;
+#if 0
+ if (length) {
+ len = min_t(unsigned, len, length);
+ length -= len;
+ if (length == 0) {
+ io->entries = i + 1;
+ *offset += len;
+ } else {
+ *offset = 0;
+ }
+ }
+#endif
+ }
+ RTS51X_DEBUGP("In %s, urb count: %d\n", __func__, i);
+ io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
+
+ RTS51X_DEBUGP("sg address stored in sgptr: 0x%x\n", (unsigned int)sg);
+ *sgptr = sg;
+
+ /* transaction state */
+ io->count = io->entries;
+ io->status = 0;
+ io->bytes = 0;
+ init_completion(&io->complete);
+ return 0;
+
+nomem:
+ rts51x_sg_clean(io);
+ return -ENOMEM;
+}
+#endif
+int rts51x_sg_init(struct usb_sg_request *io, struct usb_device *dev,
+ unsigned pipe, unsigned period, struct scatterlist *sg,
+ int nents, size_t length, gfp_t mem_flags)
+{
+ return usb_sg_init(io, dev, pipe, period, sg, nents, length, mem_flags);
+}
+
+int rts51x_sg_wait(struct usb_sg_request *io, int timeout)
+{
+ long timeleft;
+ int i;
+ int entries = io->entries;
+
+ /* queue the urbs. */
+ spin_lock_irq(&io->lock);
+ i = 0;
+ while (i < entries && !io->status) {
+ int retval;
+
+ io->urbs[i]->dev = io->dev;
+ retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
+
+ /* after we submit, let completions or cancelations fire;
+ * we handshake using io->status.
+ */
+ spin_unlock_irq(&io->lock);
+ switch (retval) {
+ /* maybe we retrying will recover */
+ case -ENXIO: /* hc didn't queue this one */
+ case -EAGAIN:
+ case -ENOMEM:
+ io->urbs[i]->dev = NULL;
+ retval = 0;
+ yield();
+ break;
+
+ /* no error? continue immediately.
+ *
+ * NOTE: to work better with UHCI (4K I/O buffer may
+ * need 3K of TDs) it may be good to limit how many
+ * URBs are queued at once; N milliseconds?
+ */
+ case 0:
+ ++i;
+ cpu_relax();
+ break;
+
+ /* fail any uncompleted urbs */
+ default:
+ io->urbs[i]->dev = NULL;
+ io->urbs[i]->status = retval;
+ dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
+ __func__, retval);
+ usb_sg_cancel(io);
+ }
+ spin_lock_irq(&io->lock);
+ if (retval && (io->status == 0 || io->status == -ECONNRESET))
+ io->status = retval;
+ }
+ io->count -= entries - i;
+ if (io->count == 0)
+ complete(&io->complete);
+ spin_unlock_irq(&io->lock);
+
+ timeleft =
+ wait_for_completion_interruptible_timeout(&io->complete,
+ (timeout * HZ /
+ 1000) ? :
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeleft <= 0) {
+ RTS51X_DEBUGP("%s -- cancelling SG request\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ usb_sg_cancel(io);
+ if (timeleft == 0)
+ io->status = -ETIMEDOUT;
+ else
+ io->status = -EINTR;
+ }
+
+ rts51x_sg_clean(io);
+ return io->status;
+}
+
+/*
+ * Transfer a scatter-gather list via bulk transfer
+ *
+ * This function does basically the same thing as usb_stor_bulk_transfer_buf()
+ * above, but it uses the usbcore scatter-gather library.
+ */
+static int rts51x_bulk_transfer_sglist(struct rts51x_chip *chip,
+ unsigned int pipe,
+ struct scatterlist *sg, int num_sg,
+ unsigned int length,
+ unsigned int *act_len, int timeout)
+{
+ int result;
+
+ /* don't submit s-g requests during abort processing */
+ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
+ TRACE_RET(chip, STATUS_ERROR);
+
+ /* initialize the scatter-gather request block */
+ RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
+ length, num_sg);
+ result =
+ rts51x_sg_init(&chip->usb->current_sg, chip->usb->pusb_dev, pipe, 0,
+ sg, num_sg, length, GFP_NOIO);
+ if (result) {
+ RTS51X_DEBUGP("rts51x_sg_init returned %d\n", result);
+ TRACE_RET(chip, STATUS_ERROR);
+ }
+
+ /* since the block has been initialized successfully, it's now
+ * okay to cancel it */
+ set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
+
+ /* did an abort occur during the submission? */
+ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
+
+ /* cancel the request, if it hasn't been cancelled already */
+ if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("-- cancelling sg request\n");
+ usb_sg_cancel(&chip->usb->current_sg);
+ }
+ }
+
+ /* wait for the completion of the transfer */
+ result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
+
+ clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
+
+ /* result = us->current_sg.status; */
+ if (act_len)
+ *act_len = chip->usb->current_sg.bytes;
+ return interpret_urb_result(chip, pipe, length, result,
+ chip->usb->current_sg.bytes);
+}
+#if 0
+static int rts51x_bulk_transfer_sglist_partial(struct rts51x_chip *chip,
+ unsigned int pipe, void *buf, struct scatterlist **sgptr,
+ unsigned int *offset, int num_sg, unsigned int length,
+ unsigned int *act_len, int timeout)
+{
+ int result;
+
+ /* don't submit s-g requests during abort processing */
+ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
+ TRACE_RET(chip, STATUS_ERROR);
+
+ /* initialize the scatter-gather request block */
+ RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
+ length, num_sg);
+ result = rts51x_sg_init_partial(&chip->usb->current_sg,
+ chip->usb->pusb_dev, pipe, 0, buf, sgptr, offset,
+ num_sg, length, GFP_NOIO);
+ if (result) {
+ RTS51X_DEBUGP("rts51x_sg_init_partial returned %d\n", result);
+ TRACE_RET(chip, STATUS_ERROR);
+ }
+
+ /* since the block has been initialized successfully, it's now
+ * okay to cancel it */
+ set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
+
+ /* did an abort occur during the submission? */
+ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
+
+ /* cancel the request, if it hasn't been cancelled already */
+ if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("-- cancelling sg request\n");
+ usb_sg_cancel(&chip->usb->current_sg);
+ }
+ }
+
+ /* wait for the completion of the transfer */
+ result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
+
+ clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
+
+ /* result = us->current_sg.status; */
+ if (act_len)
+ *act_len = chip->usb->current_sg.bytes;
+ return interpret_urb_result(chip, pipe, length, result,
+ chip->usb->current_sg.bytes);
+}
+#endif
+int rts51x_bulk_transfer_buf(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, unsigned int length,
+ unsigned int *act_len, int timeout)
+{
+ int result;
+
+ /* fill and submit the URB */
+ usb_fill_bulk_urb(chip->usb->current_urb, chip->usb->pusb_dev, pipe,
+ buf, length, urb_done_completion, NULL);
+ result = rts51x_msg_common(chip, chip->usb->current_urb, timeout);
+
+ /* store the actual length of the data transferred */
+ if (act_len)
+ *act_len = chip->usb->current_urb->actual_length;
+ return interpret_urb_result(chip, pipe, length, result,
+ chip->usb->current_urb->actual_length);
+}
+
+int rts51x_transfer_data(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout)
+{
+ int result;
+
+ if (timeout < 600)
+ timeout = 600;
+
+ if (use_sg) {
+ result =
+ rts51x_bulk_transfer_sglist(chip, pipe,
+ (struct scatterlist *)buf,
+ use_sg, len, act_len, timeout);
+ } else {
+ result =
+ rts51x_bulk_transfer_buf(chip, pipe, buf, len, act_len,
+ timeout);
+ }
+
+ return result;
+}
+
+int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, void **ptr, unsigned int *offset,
+ unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout)
+{
+ int result;
+
+ if (timeout < 600)
+ timeout = 600;
+
+ if (use_sg) {
+ void *tmp_buf = kmalloc(len, GFP_KERNEL);
+ if (!tmp_buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ if (usb_pipeout(pipe)) {
+ rts51x_access_sglist(tmp_buf, len, buf, ptr, offset,
+ FROM_XFER_BUF);
+ }
+ result =
+ rts51x_bulk_transfer_buf(chip, pipe, tmp_buf, len, act_len,
+ timeout);
+ if (result == STATUS_SUCCESS) {
+ if (usb_pipein(pipe)) {
+ rts51x_access_sglist(tmp_buf, len, buf, ptr,
+ offset, TO_XFER_BUF);
+ }
+ }
+
+ kfree(tmp_buf);
+#if 0
+ result = rts51x_bulk_transfer_sglist_partial(chip, pipe, buf,
+ (struct scatterlist **)ptr, offset,
+ use_sg, len, act_len, timeout);
+#endif
+ } else {
+ unsigned int step = 0;
+ if (offset)
+ step = *offset;
+ result =
+ rts51x_bulk_transfer_buf(chip, pipe, buf + step, len,
+ act_len, timeout);
+ if (act_len)
+ step += *act_len;
+ else
+ step += len;
+ if (offset)
+ *offset = step;
+ }
+
+ return result;
+}
+
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status)
+{
+ unsigned int pipe = RCV_INTR_PIPE(chip);
+ struct usb_host_endpoint *ep;
+ struct completion urb_done;
+ int result;
+
+ if (!status)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ /* set up data structures for the wakeup system */
+ init_completion(&urb_done);
+
+ ep = chip->usb->pusb_dev->ep_in[usb_pipeendpoint(pipe)];
+
+ /* fill and submit the URB */
+ /* We set interval to 1 here, so the polling interval is controlled
+ * by our polling thread */
+ usb_fill_int_urb(chip->usb->intr_urb, chip->usb->pusb_dev, pipe,
+ status, 2, urb_done_completion, &urb_done, 1);
+
+ result = rts51x_msg_common(chip, chip->usb->intr_urb, 50);
+
+ return interpret_urb_result(chip, pipe, 2, result,
+ chip->usb->intr_urb->actual_length);
+}
+
+u8 media_not_present[] = {
+ 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 };
+u8 invalid_cmd_field[] = {
+ 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 };
+
+void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+#ifdef CONFIG_PM
+ if (chip->option.ss_en) {
+ if (srb->cmnd[0] == TEST_UNIT_READY) {
+ if (RTS51X_CHK_STAT(chip, STAT_SS)) {
+ if (check_fake_card_ready(chip,
+ SCSI_LUN(srb))) {
+ srb->result = SAM_STAT_GOOD;
+ } else {
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ media_not_present, SENSE_SIZE);
+ }
+ return;
+ }
+ } else if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
+ if (RTS51X_CHK_STAT(chip, STAT_SS)) {
+ int prevent = srb->cmnd[4] & 0x1;
+
+ if (prevent) {
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ invalid_cmd_field, SENSE_SIZE);
+ } else {
+ srb->result = SAM_STAT_GOOD;
+ }
+ return;
+ }
+ } else {
+ if (RTS51X_CHK_STAT(chip, STAT_SS)
+ || RTS51X_CHK_STAT(chip, STAT_SS_PRE)) {
+ /* Wake up device */
+ RTS51X_DEBUGP("Try to wake up device\n");
+ chip->resume_from_scsi = 1;
+
+ rts51x_try_to_exit_ss(chip);
+
+ if (RTS51X_CHK_STAT(chip, STAT_SS)) {
+ wait_timeout(3000);
+
+ rts51x_init_chip(chip);
+ rts51x_init_cards(chip);
+ }
+ }
+ }
+ }
+#endif
+
+ result = rts51x_scsi_handler(srb, chip);
+
+ /* if there is a transport error, reset and don't auto-sense */
+ if (result == TRANSPORT_ERROR) {
+ RTS51X_DEBUGP("-- transport indicates error, resetting\n");
+ srb->result = DID_ERROR << 16;
+ goto Handle_Errors;
+ }
+
+ srb->result = SAM_STAT_GOOD;
+
+ /*
+ * If we have a failure, we're going to do a REQUEST_SENSE
+ * automatically. Note that we differentiate between a command
+ * "failure" and an "error" in the transport mechanism.
+ */
+ if (result == TRANSPORT_FAILED) {
+ /* set the result so the higher layers expect this data */
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
+ sizeof(struct sense_data_t));
+ }
+
+ return;
+
+ /* Error and abort processing: try to resynchronize with the device
+ * by issuing a port reset. If that fails, try a class-specific
+ * device reset. */
+Handle_Errors:
+ return;
+}
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
new file mode 100644
index 00000000000..f7aa87f7f1a
--- /dev/null
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -0,0 +1,80 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_TRANSPORT_H
+#define __RTS51X_TRANSPORT_H
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include "rts51x.h"
+#include "rts51x_chip.h"
+
+#if 1 /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34) */
+#define URB_NO_SETUP_DMA_MAP 0
+#endif
+
+unsigned int rts51x_access_sglist(unsigned char *buffer,
+ unsigned int buflen, void *sglist,
+ void **sgptr, unsigned int *offset,
+ enum xfer_buf_dir dir);
+unsigned int rts51x_access_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb,
+ struct scatterlist **sgptr,
+ unsigned int *offset,
+ enum xfer_buf_dir dir);
+void rts51x_set_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb);
+void rts51x_get_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb);
+
+int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe,
+ u8 request, u8 requesttype, u16 value, u16 index,
+ void *data, u16 size, int timeout);
+int rts51x_clear_halt(struct rts51x_chip *chip, unsigned int pipe);
+int rts51x_transfer_data(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout);
+int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
+ void *buf, void **ptr, unsigned int *offset,
+ unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout);
+
+/* whichPipe:
+ * 0: bulk in pipe
+ * 1: bulk out pipe
+ * 2: intr in pipe */
+int rts51x_reset_pipe(struct rts51x_chip *chip, char pipe);
+
+#ifndef POLLING_IN_THREAD
+int rts51x_start_epc_transfer(struct rts51x_chip *chip);
+void rts51x_cancel_epc_transfer(struct rts51x_chip *chip);
+#endif
+
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status);
+void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+
+#endif /* __RTS51X_TRANSPORT_H */
diff --git a/drivers/staging/rts5139/sd.c b/drivers/staging/rts5139/sd.c
new file mode 100644
index 00000000000..d5dd2f926d1
--- /dev/null
+++ b/drivers/staging/rts5139/sd.c
@@ -0,0 +1,3400 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "rts51x.h"
+#include "rts51x_transport.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "sd.h"
+
+static inline void sd_set_reset_fail(struct rts51x_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->sd_reset_fail |= err_code;
+}
+
+static inline void sd_clear_reset_fail(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->sd_reset_fail = 0;
+}
+
+static inline int sd_check_reset_fail(struct rts51x_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ return sd_card->sd_reset_fail & err_code;
+}
+
+static inline void sd_set_err_code(struct rts51x_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->err_code |= err_code;
+}
+
+static inline void sd_clr_err_code(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->err_code = 0;
+}
+
+static inline int sd_check_err_code(struct rts51x_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ return sd_card->err_code & err_code;
+}
+
+static int sd_parse_err_code(struct rts51x_chip *chip)
+{
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+int sd_check_data0_status(struct rts51x_chip *chip)
+{
+ int retval;
+ u8 stat;
+
+ retval = rts51x_ep0_read_register(chip, SD_BUS_STAT, &stat);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (!(stat & SD_DAT0_STATUS)) {
+ sd_set_err_code(chip, SD_BUSY);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_send_cmd_get_rsp(struct rts51x_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int timeout = 50;
+ u16 reg_addr;
+ u8 buf[17], stat;
+ int len = 2;
+ int rty_cnt = 0;
+
+ sd_clr_err_code(chip);
+
+ RTS51X_DEBUGP("SD/MMC CMD %d, arg = 0x%08x\n", cmd_idx, arg);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+RTY_SEND_CMD:
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8) (arg >> 24));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8) (arg >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8) (arg >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8) arg);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END | SD_STAT_IDLE,
+ SD_TRANSFER_END | SD_STAT_IDLE);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, SD_STAT1, 0, 0);
+
+ if (CHECK_USB(chip, USB_20)) {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ /* Read data from ping-pong buffer */
+ for (reg_addr = PPBUF_BASE2;
+ reg_addr < PPBUF_BASE2 + 16; reg_addr++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0,
+ 0);
+ }
+ len = 18;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ /* Read data from SD_CMDx registers */
+ for (reg_addr = SD_CMD0; reg_addr <= SD_CMD4;
+ reg_addr++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0,
+ 0);
+ }
+ len = 7;
+ } else {
+ len = 2;
+ }
+ } else {
+ len = 2;
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, len, timeout);
+
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ u8 val;
+
+ rts51x_ep0_read_register(chip, SD_STAT1, &val);
+ RTS51X_DEBUGP("SD_STAT1: 0x%x\n", val);
+
+ rts51x_ep0_read_register(chip, SD_STAT2, &val);
+ RTS51X_DEBUGP("SD_STAT2: 0x%x\n", val);
+
+ if (val & SD_RSP_80CLK_TIMEOUT)
+ sd_set_err_code(chip, SD_RSP_TIMEOUT);
+
+ rts51x_ep0_read_register(chip, SD_BUS_STAT, &val);
+ RTS51X_DEBUGP("SD_BUS_STAT: 0x%x\n", val);
+
+ if (retval == STATUS_TIMEDOUT) {
+ if (rsp_type & SD_WAIT_BUSY_END) {
+ retval = sd_check_data0_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ sd_set_err_code(chip, SD_TO_ERR);
+ }
+ }
+ rts51x_clear_sd_error(chip);
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ if (CHECK_USB(chip, USB_20)) {
+ rts51x_read_rsp_buf(chip, 2, buf, len - 2);
+ } else {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ reg_addr = PPBUF_BASE2;
+ len = 16;
+ } else {
+ reg_addr = SD_CMD0;
+ len = 5;
+ }
+ retval = rts51x_seq_read_register(chip, reg_addr,
+ (unsigned short)len, buf);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ stat = chip->rsp_buf[1];
+
+ /* Check (Start,Transmission) bit of Response */
+ if ((buf[0] & 0xC0) != 0) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* Check CRC7 */
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (stat & SD_CRC7_ERR) {
+ if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (rty_cnt < SD_MAX_RETRY_COUNT) {
+ wait_timeout(20);
+ rty_cnt++;
+ goto RTY_SEND_CMD;
+ } else {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+ /* Check Status */
+ if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
+ if ((cmd_idx != SEND_RELATIVE_ADDR)
+ && (cmd_idx != SEND_IF_COND)) {
+ if (cmd_idx != STOP_TRANSMISSION) {
+ if (buf[1] & 0x80)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_SD_LOCK
+ /* exclude bit25 CARD_IS_LOCKED */
+ if (buf[1] & 0x7D) {
+#else
+ if (buf[1] & 0x7F) {
+#endif
+ RTS51X_DEBUGP("buf[1]: 0x%02x\n", buf[1]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (buf[2] & 0xFF) {
+ RTS51X_DEBUGP("buf[2]: 0x%02x\n", buf[2]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (buf[3] & 0x80) {
+ RTS51X_DEBUGP("buf[3]: 0x%02x\n", buf[3]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (buf[3] & 0x01) {
+ /* Get "READY_FOR_DATA" bit */
+ sd_card->sd_data_buf_ready = 1;
+ } else {
+ sd_card->sd_data_buf_ready = 0;
+ }
+ }
+ }
+
+ if (rsp && rsp_len)
+ memcpy(rsp, buf, rsp_len);
+
+ return STATUS_SUCCESS;
+}
+
+static inline void sd_print_debug_reg(struct rts51x_chip *chip)
+{
+#ifdef CONFIG_RTS5139_DEBUG
+ u8 val = 0;
+
+ rts51x_ep0_read_register(chip, SD_STAT1, &val);
+ RTS51X_DEBUGP("SD_STAT1: 0x%x\n", val);
+ rts51x_ep0_read_register(chip, SD_STAT2, &val);
+ RTS51X_DEBUGP("SD_STAT2: 0x%x\n", val);
+ rts51x_ep0_read_register(chip, SD_BUS_STAT, &val);
+ RTS51X_DEBUGP("SD_BUS_STAT: 0x%x\n", val);
+#endif
+}
+
+int sd_read_data(struct rts51x_chip *chip, u8 trans_mode, u8 *cmd, int cmd_len,
+ u16 byte_cnt, u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
+ int timeout)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ sd_clr_err_code(chip);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf_len > 512)
+ /* This function can't read data more than one page */
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+
+ if (cmd_len) {
+ RTS51X_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40);
+ for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0 + i, 0xFF,
+ cmd[i]);
+ }
+ }
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8) byte_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF,
+ (u8) (byte_cnt >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, (u8) blk_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF,
+ (u8) (blk_cnt >> 8));
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x03, bus_width);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END
+ | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ }
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, timeout);
+
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ sd_print_debug_reg(chip);
+ if (retval == STATUS_TIMEDOUT) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf && buf_len) {
+ retval = rts51x_read_ppbuf(chip, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_write_data(struct rts51x_chip *chip, u8 trans_mode,
+ u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt,
+ u8 bus_width, u8 *buf, int buf_len, int timeout)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ sd_clr_err_code(chip);
+
+ if (!buf)
+ buf_len = 0;
+
+ /* This function can't write data more than one page */
+ if (buf_len > 512)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (buf && buf_len) {
+ retval = rts51x_write_ppbuf(chip, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ rts51x_init_cmd(chip);
+
+ if (cmd_len) {
+ RTS51X_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40);
+ for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0 + i, 0xFF,
+ cmd[i]);
+ }
+ }
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8) byte_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF,
+ (u8) (byte_cnt >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, (u8) blk_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF,
+ (u8) (blk_cnt >> 8));
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x03, bus_width);
+
+ if (cmd_len) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
+ SD_RSP_LEN_6);
+ }
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, timeout);
+
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ sd_print_debug_reg(chip);
+
+ if (retval == STATUS_TIMEDOUT)
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_csd(struct rts51x_chip *chip, char check_wp)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u8 csd_ver, trans_speed;
+ u8 rsp[16];
+
+ for (i = 0; i < 6; i++) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
+ SD_RSP_TYPE_R2, rsp, 16);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+
+ if (i == 6)
+ TRACE_RET(chip, STATUS_FAIL);
+ memcpy(sd_card->raw_csd, rsp + 1, 15);
+ /* Get CRC7 */
+ RTS51X_READ_REG(chip, SD_CMD5, sd_card->raw_csd + 15);
+
+ RTS51X_DEBUGP("CSD Response:\n");
+ RTS51X_DUMP(rsp, 16);
+
+ /* Get CSD Version */
+ csd_ver = (rsp[1] & 0xc0) >> 6;
+ RTS51X_DEBUGP("csd_ver = %d\n", csd_ver);
+
+ trans_speed = rsp[4];
+ if ((trans_speed & 0x07) == 0x02) { /* 10Mbits/s */
+ if ((trans_speed & 0xf8) >= 0x30) { /* >25Mbits/s */
+ if (chip->asic_code)
+ sd_card->sd_clock = 46;
+ else
+ sd_card->sd_clock = CLK_50;
+ } else if ((trans_speed & 0xf8) == 0x28) { /* 20Mbits/s */
+ if (chip->asic_code)
+ sd_card->sd_clock = 39;
+ else
+ sd_card->sd_clock = CLK_40;
+ } else if ((trans_speed & 0xf8) == 0x20) { /* 15Mbits/s */
+ if (chip->asic_code)
+ sd_card->sd_clock = 29;
+ else
+ sd_card->sd_clock = CLK_30;
+ } else if ((trans_speed & 0xf8) >= 0x10) { /* 12Mbits/s */
+ if (chip->asic_code)
+ sd_card->sd_clock = 23;
+ else
+ sd_card->sd_clock = CLK_20;
+ } else if ((trans_speed & 0x08) >= 0x08) { /* 10Mbits/s */
+ if (chip->asic_code)
+ sd_card->sd_clock = 19;
+ else
+ sd_card->sd_clock = CLK_20;
+ } /*else { */
+ /*If this ,then slow card will use 30M clock */
+ /* TRACE_RET(chip, STATUS_FAIL); */
+ /* } */
+ }
+ /*else {
+ TRACE_RET(chip, STATUS_FAIL);
+ } */
+ if (CHK_MMC_SECTOR_MODE(sd_card)) {
+ sd_card->capacity = 0;
+ } else {
+ /* For High-Capacity Card, CSD_STRUCTURE always be "0x1" */
+ if ((!CHK_SD_HCXC(sd_card)) || (csd_ver == 0)) {
+ /* Calculate total sector according to C_SIZE,
+ * C_SIZE_MULT & READ_BL_LEN */
+ u8 blk_size, c_size_mult;
+ u16 c_size;
+ /* Get READ_BL_LEN */
+ blk_size = rsp[6] & 0x0F;
+ /* Get C_SIZE */
+ c_size = ((u16) (rsp[7] & 0x03) << 10)
+ + ((u16) rsp[8] << 2)
+ + ((u16) (rsp[9] & 0xC0) >> 6);
+ /* Get C_SIZE_MUL */
+ c_size_mult = (u8) ((rsp[10] & 0x03) << 1);
+ c_size_mult += (rsp[11] & 0x80) >> 7;
+ /* Calculate total Capacity */
+ sd_card->capacity =
+ (((u32) (c_size + 1)) *
+ (1 << (c_size_mult + 2))) << (blk_size - 9);
+ } else {
+ /* High Capacity Card and Use CSD2.0 Version */
+ u32 total_sector = 0;
+ total_sector = (((u32) rsp[8] & 0x3f) << 16) |
+ ((u32) rsp[9] << 8) | (u32) rsp[10];
+ /* Total Capacity= (C_SIZE+1) *
+ * 512K Byte = (C_SIZE+1)K Sector,1K = 1024 Bytes */
+ sd_card->capacity = (total_sector + 1) << 10;
+ }
+ }
+
+ /* We need check Write-Protected Status by Field PERM WP or TEMP WP */
+ if (check_wp) {
+ if (rsp[15] & 0x30)
+ chip->card_wp |= SD_CARD;
+ RTS51X_DEBUGP("CSD WP Status: 0x%x\n", rsp[15]);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_set_sample_push_timing(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ rts51x_init_cmd(chip);
+
+ if (CHK_SD_SDR104(sd_card) || CHK_SD_SDR50(sd_card)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_RST,
+ SD_30_MODE | SD_ASYNC_FIFO_RST);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ } else if (CHK_SD_DDR50(sd_card) || CHK_MMC_DDR52(sd_card)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_RST,
+ SD_DDR_MODE | SD_ASYNC_FIFO_RST);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD);
+ } else {
+ u8 val = 0;
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x0C, SD_20_MODE);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+
+ if ((chip->option.sd_ctl & SD_PUSH_POINT_CTL_MASK) ==
+ SD_PUSH_POINT_AUTO) {
+ val = SD20_TX_NEG_EDGE;
+ } else if ((chip->option.sd_ctl & SD_PUSH_POINT_CTL_MASK) ==
+ SD_PUSH_POINT_DELAY) {
+ val = SD20_TX_14_AHEAD;
+ } else {
+ val = SD20_TX_NEG_EDGE;
+ }
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ SD20_TX_SEL_MASK, val);
+
+ if ((chip->option.sd_ctl & SD_SAMPLE_POINT_CTL_MASK) ==
+ SD_SAMPLE_POINT_AUTO) {
+ if (chip->asic_code) {
+ if (CHK_SD_HS(sd_card) || CHK_MMC_52M(sd_card))
+ val = SD20_RX_14_DELAY;
+ else
+ val = SD20_RX_POS_EDGE;
+ } else {
+ val = SD20_RX_14_DELAY;
+ }
+ } else if ((chip->option.sd_ctl & SD_SAMPLE_POINT_CTL_MASK) ==
+ SD_SAMPLE_POINT_DELAY) {
+ val = SD20_RX_14_DELAY;
+ } else {
+ val = SD20_RX_POS_EDGE;
+ }
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, val);
+ }
+
+ if (CHK_MMC_DDR52(sd_card) && CHK_MMC_8BIT(sd_card)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DMA1_CTL,
+ EXTEND_DMA1_ASYNC_SIGNAL, 0);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static void sd_choose_proper_clock(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (CHK_SD_SDR104(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->option.asic_sd_sdr104_clk;
+ else
+ sd_card->sd_clock = chip->option.fpga_sd_sdr104_clk;
+ } else if (CHK_SD_DDR50(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->option.asic_sd_ddr50_clk;
+ else
+ sd_card->sd_clock = chip->option.fpga_sd_ddr50_clk;
+ } else if (CHK_SD_SDR50(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->option.asic_sd_sdr50_clk;
+ else
+ sd_card->sd_clock = chip->option.fpga_sd_sdr50_clk;
+ } else if (CHK_SD_HS(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->option.asic_sd_hs_clk;
+ else
+ sd_card->sd_clock = chip->option.fpga_sd_hs_clk;
+ } else if (CHK_MMC_52M(sd_card) || CHK_MMC_DDR52(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->option.asic_mmc_52m_clk;
+ else
+ sd_card->sd_clock = chip->option.fpga_mmc_52m_clk;
+ } else if (CHK_MMC_26M(sd_card)) {
+ if (chip->asic_code) {
+ sd_card->sd_clock = 46;
+ RTS51X_DEBUGP("Set MMC clock to 22.5MHz\n");
+ } else {
+ sd_card->sd_clock = CLK_50;
+ }
+ }
+}
+
+static int sd_set_init_para(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ retval = sd_set_sample_push_timing(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ sd_choose_proper_clock(chip);
+
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int sd_select_card(struct rts51x_chip *chip, int select)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd_idx, cmd_type;
+ u32 addr;
+
+ if (select) {
+ cmd_idx = SELECT_CARD;
+ cmd_type = SD_RSP_TYPE_R1;
+ addr = sd_card->sd_addr;
+ } else {
+ cmd_idx = DESELECT_CARD;
+ cmd_type = SD_RSP_TYPE_R0;
+ addr = 0;
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, cmd_idx, addr, cmd_type, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_SD_LOCK
+int sd_update_lock_status(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 rsp[5];
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (rsp[1] & 0x02)
+ sd_card->sd_lock_status |= SD_LOCKED;
+ else
+ sd_card->sd_lock_status &= ~SD_LOCKED;
+
+ RTS51X_DEBUGP("sd_card->sd_lock_status = 0x%x\n",
+ sd_card->sd_lock_status);
+
+ if (rsp[1] & 0x01) {
+ /* LOCK_UNLOCK_FAILED */
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+int sd_wait_currentstate_dataready(struct rts51x_chip *chip, u8 statechk,
+ u8 rdychk, u16 pollingcnt)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 rsp[5];
+ u16 i;
+
+ for (i = 0; i < pollingcnt; i++) {
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, rsp, 5);
+ if (retval == STATUS_SUCCESS) {
+ if (((rsp[3] & 0x1E) == statechk)
+ && ((rsp[3] & 0x01) == rdychk)) {
+ return STATUS_SUCCESS;
+ }
+ } else {
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_TIMEDOUT;
+}
+
+static int sd_voltage_switch(struct rts51x_chip *chip)
+{
+ int retval;
+ u8 stat;
+
+ RTS51X_WRITE_REG(chip, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ SD_CLK_TOGGLE_EN);
+
+ retval =
+ sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_READ_REG(chip, SD_BUS_STAT, &stat);
+ if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BUS_STAT, 0xFF,
+ SD_CLK_FORCE_STOP);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ if (chip->asic_code)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, LDO_POWER_CFG,
+ TUNE_SD18_MASK, TUNE_SD18_1V8);
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ wait_timeout(chip->option.D3318_off_delay);
+
+ RTS51X_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN);
+ wait_timeout(10);
+
+ RTS51X_READ_REG(chip, SD_BUS_STAT, &stat);
+ if ((stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) !=
+ (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) {
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BUS_STAT, 0xFF,
+ SD_CLK_FORCE_STOP);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, 0xFF, 0);
+ rts51x_send_cmd(chip, MODE_C, 100);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ RTS51X_WRITE_REG(chip, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_change_phase(struct rts51x_chip *chip, u8 sample_point,
+ u8 tune_dir)
+{
+ u16 SD_VP_CTL, SD_DCMPS_CTL;
+ u8 val;
+ int retval;
+
+ RTS51X_DEBUGP("sd_change_phase (sample_point = %d, tune_dir = %d)\n",
+ sample_point, tune_dir);
+
+ if (tune_dir == TUNE_RX) {
+ SD_VP_CTL = SD_VPCLK1_CTL;
+ SD_DCMPS_CTL = SD_DCMPS1_CTL;
+ } else {
+ SD_VP_CTL = SD_VPCLK0_CTL;
+ SD_DCMPS_CTL = SD_DCMPS0_CTL;
+ }
+
+ if (chip->asic_code) {
+ RTS51X_WRITE_REG(chip, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
+ RTS51X_WRITE_REG(chip, SD_VP_CTL, 0x1F, sample_point);
+ RTS51X_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ RTS51X_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ RTS51X_WRITE_REG(chip, CLK_DIV, CLK_CHANGE, 0);
+ } else {
+#ifdef CONFIG_RTS5139_DEBUG
+ RTS51X_READ_REG(chip, SD_VP_CTL, &val);
+ RTS51X_DEBUGP("SD_VP_CTL: 0x%x\n", val);
+ RTS51X_READ_REG(chip, SD_DCMPS_CTL, &val);
+ RTS51X_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val);
+#endif
+
+ RTS51X_WRITE_REG(chip, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
+ udelay(100);
+ RTS51X_WRITE_REG(chip, SD_VP_CTL, 0xFF,
+ PHASE_NOT_RESET | sample_point);
+ udelay(200);
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
+ DCMPS_CHANGE);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
+ DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, Fail);
+
+ retval = rts51x_get_rsp(chip, 1, 500);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, Fail);
+
+ val = chip->rsp_buf[0];
+ if (val & DCMPS_ERROR)
+ TRACE_GOTO(chip, Fail);
+ if ((val & DCMPS_CURRENT_PHASE) != sample_point)
+ TRACE_GOTO(chip, Fail);
+ RTS51X_WRITE_REG(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
+ RTS51X_WRITE_REG(chip, CLK_DIV, CLK_CHANGE, 0);
+ udelay(100);
+ }
+
+ RTS51X_WRITE_REG(chip, SD_CFG1, SD_ASYNC_FIFO_RST, 0);
+
+ return STATUS_SUCCESS;
+
+Fail:
+#ifdef CONFIG_RTS5139_DEBUG
+ rts51x_ep0_read_register(chip, SD_VP_CTL, &val);
+ RTS51X_DEBUGP("SD_VP_CTL: 0x%x\n", val);
+ rts51x_ep0_read_register(chip, SD_DCMPS_CTL, &val);
+ RTS51X_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val);
+#endif
+
+ RTS51X_WRITE_REG(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
+ RTS51X_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE, 0);
+ wait_timeout(10);
+
+ return STATUS_FAIL;
+}
+
+static int sd_check_spec(struct rts51x_chip *chip, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], buf[8];
+
+ retval =
+ sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SEND_SCR;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width, buf,
+ 8, 250);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ memcpy(sd_card->raw_scr, buf, 8);
+
+ if ((buf[0] & 0x0F) == 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_query_switch_result(struct rts51x_chip *chip, u8 func_group,
+ u8 func_to_switch, u8 *buf, int buf_len)
+{
+ u8 support_mask = 0, query_switch = 0, switch_busy = 0;
+ int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ support_offset = FUNCTION_GROUP1_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP1_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP1_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case HS_SUPPORT:
+ support_mask = HS_SUPPORT_MASK;
+ query_switch = HS_QUERY_SWITCH_OK;
+ switch_busy = HS_SWITCH_BUSY;
+ break;
+
+ case SDR50_SUPPORT:
+ support_mask = SDR50_SUPPORT_MASK;
+ query_switch = SDR50_QUERY_SWITCH_OK;
+ switch_busy = SDR50_SWITCH_BUSY;
+ break;
+
+ case SDR104_SUPPORT:
+ support_mask = SDR104_SUPPORT_MASK;
+ query_switch = SDR104_QUERY_SWITCH_OK;
+ switch_busy = SDR104_SWITCH_BUSY;
+ break;
+
+ case DDR50_SUPPORT:
+ support_mask = DDR50_SUPPORT_MASK;
+ query_switch = DDR50_QUERY_SWITCH_OK;
+ switch_busy = DDR50_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else if (func_group == SD_FUNC_GROUP_3) {
+ support_offset = FUNCTION_GROUP3_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP3_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP3_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case DRIVING_TYPE_A:
+ support_mask = DRIVING_TYPE_A_MASK;
+ query_switch = TYPE_A_QUERY_SWITCH_OK;
+ switch_busy = TYPE_A_SWITCH_BUSY;
+ break;
+
+ case DRIVING_TYPE_C:
+ support_mask = DRIVING_TYPE_C_MASK;
+ query_switch = TYPE_C_QUERY_SWITCH_OK;
+ switch_busy = TYPE_C_SWITCH_BUSY;
+ break;
+
+ case DRIVING_TYPE_D:
+ support_mask = DRIVING_TYPE_D_MASK;
+ query_switch = TYPE_D_QUERY_SWITCH_OK;
+ switch_busy = TYPE_D_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ support_offset = FUNCTION_GROUP4_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP4_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP4_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case CURRENT_LIMIT_400:
+ support_mask = CURRENT_LIMIT_400_MASK;
+ query_switch = CURRENT_LIMIT_400_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_400_SWITCH_BUSY;
+ break;
+
+ case CURRENT_LIMIT_600:
+ support_mask = CURRENT_LIMIT_600_MASK;
+ query_switch = CURRENT_LIMIT_600_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_600_SWITCH_BUSY;
+ break;
+
+ case CURRENT_LIMIT_800:
+ support_mask = CURRENT_LIMIT_800_MASK;
+ query_switch = CURRENT_LIMIT_800_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_800_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (func_group == SD_FUNC_GROUP_4)
+ buf[query_switch_offset] =
+ (buf[query_switch_offset] & 0xf0) >> 4;
+ if (!(buf[support_offset] & support_mask) ||
+ ((buf[query_switch_offset] & 0x0F) != query_switch))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
+ ((buf[check_busy_offset] & switch_busy) == switch_busy))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_switch_mode(struct rts51x_chip *chip, u8 mode,
+ u8 func_group, u8 func_to_switch, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], buf[64];
+
+ RTS51X_DEBUGP("sd_check_switch_mode (mode = %d, func_group = %d,"
+ "func_to_switch = %d)\n", mode, func_group, func_to_switch);
+
+ cmd[0] = 0x40 | SWITCH;
+ cmd[1] = mode;
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0xFF;
+ cmd[4] = 0xF0 + func_to_switch;
+ } else if (func_group == SD_FUNC_GROUP_3) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0xF0 + func_to_switch;
+ cmd[4] = 0xFF;
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0x0F + (func_to_switch << 4);
+ cmd[4] = 0xFF;
+ } else {
+ cmd[1] = SD_CHECK_MODE;
+ cmd[2] = 0xFF;
+ cmd[3] = 0xFF;
+ cmd[4] = 0xFF;
+ }
+
+ retval =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width, buf,
+ 64, 250);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ if (func_group == NO_ARGUMENT) {
+ sd_card->func_group1_mask = buf[0x0D];
+ sd_card->func_group2_mask = buf[0x0B];
+ sd_card->func_group3_mask = buf[0x09];
+ sd_card->func_group4_mask = buf[0x07];
+
+ RTS51X_DEBUGP("func_group1_mask = 0x%02x\n", buf[0x0D]);
+ RTS51X_DEBUGP("func_group2_mask = 0x%02x\n", buf[0x0B]);
+ RTS51X_DEBUGP("func_group3_mask = 0x%02x\n", buf[0x09]);
+ RTS51X_DEBUGP("func_group4_mask = 0x%02x\n", buf[0x07]);
+ } else {
+ if ((buf[0] == 0) && (buf[1] == 0))
+ TRACE_RET(chip, STATUS_FAIL);
+ retval =
+ sd_query_switch_result(chip, func_group, func_to_switch,
+ buf, 64);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_switch(struct rts51x_chip *chip,
+ u8 func_group, u8 func_to_switch, u8 bus_width)
+{
+ int retval;
+ int i;
+ int switch_good = 0;
+
+ for (i = 0; i < 3; i++) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
+ func_to_switch, bus_width);
+ if (retval == STATUS_SUCCESS) {
+ u8 stat;
+
+ retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
+ func_group, func_to_switch, bus_width);
+ if (retval == STATUS_SUCCESS) {
+ switch_good = 1;
+ break;
+ }
+
+ RTS51X_READ_REG(chip, SD_STAT1, &stat);
+
+ if (stat & SD_CRC16_ERR) {
+ RTS51X_DEBUGP("SD CRC16 error when switching"
+ "mode\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ wait_timeout(20);
+ }
+
+ if (!switch_good)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_switch_function(struct rts51x_chip *chip, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u8 func_to_switch = 0;
+
+ /* Get supported functions */
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE,
+ NO_ARGUMENT, NO_ARGUMENT, bus_width);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ sd_card->func_group1_mask &= ~(sd_card->sd_switch_fail);
+
+ for (i = 0; i < 4; i++) {
+ switch ((u8) (chip->option.sd_speed_prior >> (i * 8))) {
+ case DDR50_SUPPORT:
+ if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK)
+ && (CHECK_UHS50(chip)))
+ func_to_switch = DDR50_SUPPORT;
+ break;
+
+ case SDR50_SUPPORT:
+ if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK)
+ && (CHECK_UHS50(chip)))
+ func_to_switch = SDR50_SUPPORT;
+ break;
+
+ case HS_SUPPORT:
+ if (sd_card->func_group1_mask & HS_SUPPORT_MASK)
+ func_to_switch = HS_SUPPORT;
+ break;
+
+ default:
+ continue;
+ }
+
+ if (func_to_switch)
+ break;
+ }
+ RTS51X_DEBUGP("SD_FUNC_GROUP_1: func_to_switch = 0x%02x",
+ func_to_switch);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_SDR_RST)
+ && (DDR50_SUPPORT == func_to_switch)
+ && (sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
+ func_to_switch = SDR50_SUPPORT;
+ RTS51X_DEBUGP("Using SDR50 instead of DDR50 for SD Lock\n");
+ }
+#endif
+
+ if (func_to_switch) {
+ retval =
+ sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
+ bus_width);
+ if (retval != STATUS_SUCCESS) {
+ if (func_to_switch == SDR104_SUPPORT)
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
+ else if (func_to_switch == DDR50_SUPPORT)
+ sd_card->sd_switch_fail = DDR50_SUPPORT_MASK;
+ else if (func_to_switch == SDR50_SUPPORT)
+ sd_card->sd_switch_fail = SDR50_SUPPORT_MASK;
+ else if (func_to_switch == HS_SUPPORT)
+ sd_card->sd_switch_fail = HS_SUPPORT_MASK;
+
+ TRACE_RET(chip, retval);
+ }
+
+ if (func_to_switch == SDR104_SUPPORT)
+ SET_SD_SDR104(sd_card);
+ else if (func_to_switch == DDR50_SUPPORT)
+ SET_SD_DDR50(sd_card);
+ else if (func_to_switch == SDR50_SUPPORT)
+ SET_SD_SDR50(sd_card);
+ else
+ SET_SD_HS(sd_card);
+ }
+
+ if (CHK_SD_DDR50(sd_card))
+ RTS51X_WRITE_REG(chip, SD_CFG1, 0x0C, SD_DDR_MODE);
+
+ func_to_switch = 0;
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_400_MASK)
+ func_to_switch = CURRENT_LIMIT_400;
+
+ if (func_to_switch) {
+ RTS51X_DEBUGP("Try to switch current_limit_400\n");
+ retval =
+ sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
+ bus_width);
+ RTS51X_DEBUGP("Switch current_limit_400 status: (%d)\n",
+ retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_wait_data_idle(struct rts51x_chip *chip)
+{
+ int retval = STATUS_TIMEDOUT;
+ int i;
+ u8 val = 0;
+
+ for (i = 0; i < 100; i++) {
+ retval = rts51x_ep0_read_register(chip, SD_DATA_STATE, &val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (val & SD_DATA_IDLE) {
+ retval = STATUS_SUCCESS;
+ break;
+ }
+ udelay(100);
+ }
+ RTS51X_DEBUGP("SD_DATA_STATE: 0x%02x\n", val);
+
+ return retval;
+}
+
+static int sd_sdr_tuning_rx_cmd(struct rts51x_chip *chip, u8 sample_point)
+{
+ int retval;
+ u8 cmd[5];
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ cmd[0] = 0x40 | SEND_TUNING_PATTERN;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_AUTO_TUNING,
+ cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ /* Wait till SD DATA IDLE */
+ (void)sd_wait_data_idle(chip);
+
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning_rx_cmd(struct rts51x_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5];
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_DEBUGP("sd ddr tuning rx\n");
+
+ retval =
+ sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ cmd[0] = 0x40 | SD_STATUS;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ,
+ cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ /* Wait till SD DATA IDLE */
+ (void)sd_wait_data_idle(chip);
+
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_ddr_tunning_rx_cmd(struct rts51x_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_DEBUGP("mmc ddr tuning rx\n");
+
+ cmd[0] = 0x40 | SEND_EXT_CSD;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ,
+ cmd, 5, 0x200, 1, bus_width, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ /* Wait till SD DATA IDLE */
+ (void)sd_wait_data_idle(chip);
+
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_sdr_tuning_tx_cmd(struct rts51x_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
+ /* Tunning TX fail */
+ rts51x_ep0_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTS51X_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning_tx_cmd(struct rts51x_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (CHK_SD(sd_card)) {
+ bus_width = SD_BUS_WIDTH_4;
+ } else {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ }
+ retval = sd_wait_currentstate_dataready(chip, 0x08, 1, 20);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTS51X_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ cmd[0] = 0x40 | PROGRAM_CSD;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2,
+ cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_sd_error(chip);
+ /* Tunning TX fail */
+ rts51x_ep0_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTS51X_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static u8 sd_search_final_phase(struct rts51x_chip *chip, u32 phase_map,
+ u8 tune_dir)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct timing_phase_path path[MAX_PHASE + 1];
+ int i, j, cont_path_cnt;
+ int new_block, max_len;
+ u8 final_phase = 0xFF;
+ int final_path_idx;
+
+ if (phase_map == 0xffff) {
+ if (CHK_SD_DDR50(sd_card)) {
+ if (tune_dir == TUNE_TX)
+ final_phase = chip->option.ddr50_tx_phase;
+ else
+ final_phase = chip->option.ddr50_rx_phase;
+ RTS51X_DEBUGP("DDR50 tuning dir:%d all pass,"
+ "so select default phase:0x%x.\n",
+ tune_dir, final_phase);
+ } else {
+ if (tune_dir == TUNE_TX)
+ final_phase = chip->option.sdr50_tx_phase;
+ else
+ final_phase = chip->option.sdr50_rx_phase;
+ RTS51X_DEBUGP("SDR50 tuning dir:%d all pass,"
+ "so select default phase:0x%x.\n",
+ tune_dir, final_phase);
+ }
+ goto Search_Finish;
+ }
+
+ cont_path_cnt = 0;
+ new_block = 1;
+ j = 0;
+ for (i = 0; i < MAX_PHASE + 1; i++) {
+ if (phase_map & (1 << i)) {
+ if (new_block) {
+ new_block = 0;
+ j = cont_path_cnt++;
+ path[j].start = i;
+ path[j].end = i;
+ } else {
+ path[j].end = i;
+ }
+ } else {
+ new_block = 1;
+ if (cont_path_cnt) {
+ int idx = cont_path_cnt - 1;
+ path[idx].len =
+ path[idx].end - path[idx].start + 1;
+ path[idx].mid =
+ path[idx].start + path[idx].len / 2;
+ }
+ }
+ }
+
+ if (cont_path_cnt == 0) {
+ RTS51X_DEBUGP("No continuous phase path\n");
+ goto Search_Finish;
+ } else {
+ int idx = cont_path_cnt - 1;
+ path[idx].len = path[idx].end - path[idx].start + 1;
+ path[idx].mid = path[idx].start + path[idx].len / 2;
+ }
+
+ if ((path[0].start == 0) &&
+ (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
+ path[0].len += path[cont_path_cnt - 1].len;
+ path[0].mid = path[0].start + path[0].len / 2;
+ if (path[0].mid < 0)
+ path[0].mid += MAX_PHASE + 1;
+ cont_path_cnt--;
+ }
+ max_len = 0;
+ final_phase = 0;
+ final_path_idx = 0;
+ for (i = 0; i < cont_path_cnt; i++) {
+ if (path[i].len > max_len) {
+ max_len = path[i].len;
+ final_phase = (u8) path[i].mid;
+ final_path_idx = i;
+ }
+
+ RTS51X_DEBUGP("path[%d].start = %d\n", i, path[i].start);
+ RTS51X_DEBUGP("path[%d].end = %d\n", i, path[i].end);
+ RTS51X_DEBUGP("path[%d].len = %d\n", i, path[i].len);
+ RTS51X_DEBUGP("path[%d].mid = %d\n", i, path[i].mid);
+ RTS51X_DEBUGP("\n");
+ }
+
+ if ((tune_dir == TUNE_TX) && (CHK_SD_SDR50(sd_card))
+ && chip->option.sdr50_phase_sel) {
+ if (max_len > 6) {
+ int temp_mid = (max_len - 6) / 2;
+ int temp_final_phase =
+ path[final_path_idx].end - (max_len -
+ (3 + temp_mid));
+
+ if (temp_final_phase < 0)
+ final_phase = temp_final_phase + MAX_PHASE + 1;
+ else
+ final_phase = (u8) temp_final_phase;
+ }
+ }
+
+Search_Finish:
+ RTS51X_DEBUGP("Final choosen phase: %d\n", final_phase);
+ return final_phase;
+}
+
+static int sd_tuning_rx(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i, j;
+ u32 raw_phase_map[3], phase_map;
+ u8 final_phase;
+ int (*tuning_cmd) (struct rts51x_chip *chip, u8 sample_point);
+
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ tuning_cmd = sd_ddr_tuning_rx_cmd;
+ else
+ tuning_cmd = sd_sdr_tuning_rx_cmd;
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ tuning_cmd = mmc_ddr_tunning_rx_cmd;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < 3; i++) {
+ raw_phase_map[i] = 0;
+ for (j = MAX_PHASE; j >= 0; j--) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = tuning_cmd(chip, (u8) j);
+ if (retval == STATUS_SUCCESS)
+ raw_phase_map[i] |= 1 << j;
+ else
+ RTS51X_DEBUGP("Tuning phase %d fail\n", j);
+ }
+ }
+
+ phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
+ for (i = 0; i < 3; i++)
+ RTS51X_DEBUGP("RX raw_phase_map[%d] = 0x%04x\n", i,
+ raw_phase_map[i]);
+ RTS51X_DEBUGP("RX phase_map = 0x%04x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_RX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = tuning_cmd(chip, final_phase);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_pre_tuning_tx(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 i;
+ u8 pre_tune_tx_phase;
+ u32 pre_tune_phase_map;
+
+ RTS51X_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ pre_tune_tx_phase = 0xFF;
+ pre_tune_phase_map = 0x0000;
+ for (i = 0; i < MAX_PHASE + 1; i++) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_change_phase(chip, (u8) i, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if ((retval == STATUS_SUCCESS)
+ || !sd_check_err_code(chip, SD_RSP_TIMEOUT))
+ pre_tune_phase_map |= (u32) 1 << i;
+ }
+
+ RTS51X_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ pre_tune_tx_phase =
+ sd_search_final_phase(chip, pre_tune_phase_map, TUNE_TX);
+ if (pre_tune_tx_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_change_phase(chip, pre_tune_tx_phase, TUNE_TX);
+ RTS51X_DEBUGP("DDR TX pre tune phase: %d\n", (int)pre_tune_tx_phase);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_tuning_tx(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i, j;
+ u32 raw_phase_map[3], phase_map;
+ u8 final_phase;
+ int (*tuning_cmd) (struct rts51x_chip *chip, u8 sample_point);
+
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ tuning_cmd = sd_ddr_tuning_tx_cmd;
+ else
+ tuning_cmd = sd_sdr_tuning_tx_cmd;
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ tuning_cmd = sd_ddr_tuning_tx_cmd;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < 3; i++) {
+ raw_phase_map[i] = 0;
+ for (j = MAX_PHASE; j >= 0; j--) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = tuning_cmd(chip, (u8) j);
+ if (retval == STATUS_SUCCESS)
+ raw_phase_map[i] |= 1 << j;
+ else
+ RTS51X_DEBUGP("Tuning phase %d fail\n", j);
+ }
+ }
+
+ phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
+ for (i = 0; i < 3; i++)
+ RTS51X_DEBUGP("TX raw_phase_map[%d] = 0x%04x\n", i,
+ raw_phase_map[i]);
+ RTS51X_DEBUGP("TX phase_map = 0x%04x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = tuning_cmd(chip, final_phase);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_sdr_tuning(struct rts51x_chip *chip)
+{
+ int retval;
+
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning(struct rts51x_chip *chip)
+{
+ int retval;
+
+ if (!(chip->option.sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_ddr_pre_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ retval =
+ sd_change_phase(chip, (u8) chip->option.sd_ddr_tx_phase,
+ TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!(chip->option.sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_ddr_tuning(struct rts51x_chip *chip)
+{
+ int retval;
+
+ if (!(chip->option.sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_ddr_pre_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ retval =
+ sd_change_phase(chip, (u8) chip->option.mmc_ddr_tx_phase,
+ TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (!(chip->option.sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_switch_clock(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int re_tuning = 0;
+
+ retval = rts51x_select_card(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (CHK_SD30_SPEED(sd_card) || CHK_MMC_DDR52(sd_card)) {
+ if (sd_card->sd_clock != chip->cur_clk)
+ re_tuning = 1;
+ }
+
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (re_tuning) {
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ retval = sd_ddr_tuning(chip);
+ else
+ retval = sd_sdr_tuning(chip);
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ retval = mmc_ddr_tuning(chip);
+ }
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_prepare_reset(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (chip->asic_code)
+ sd_card->sd_clock = 29;
+ else
+ sd_card->sd_clock = CLK_30;
+
+ /* Set SD Clocks */
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, 0xFF,
+ SD20_RX_POS_EDGE);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_PUSH_POINT_CTL, 0xFF, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_select_card(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static void sd_pull_ctl_disable(struct rts51x_chip *chip)
+{
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+ }
+}
+
+static void sd_pull_ctl_enable(struct rts51x_chip *chip)
+{
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0xAA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA9);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x9A);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA5);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x9A);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x65);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x5A);
+ }
+}
+
+static int sd_init_power(struct rts51x_chip *chip)
+{
+ int retval;
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, LDO3318_PWR_MASK,
+ LDO_ON);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_3V3);
+ if (chip->asic_code)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, LDO_POWER_CFG,
+ TUNE_SD18_MASK, TUNE_SD18_3V3);
+ if (chip->asic_code)
+ sd_pull_ctl_disable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20,
+ FPGA_SD_PULL_CTL_BIT);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0);
+ if (!chip->option.FT2_fast_mode)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (!chip->option.FT2_fast_mode) {
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (CHECK_PKG(chip, LQFP48)
+ || chip->option.rts5129_D3318_off_enable)
+ rts51x_write_register(chip, CARD_PWR_CTL,
+ LDO_OFF, LDO_OFF);
+#endif
+ wait_timeout(250);
+
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (CHECK_PKG(chip, LQFP48)
+ || chip->option.rts5129_D3318_off_enable) {
+ rts51x_init_cmd(chip);
+ if (chip->asic_code)
+ sd_pull_ctl_enable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD,
+ FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ if (chip->asic_code)
+ rts51x_write_register(chip, CARD_PULL_CTL6,
+ 0x03, 0x00);
+ }
+#endif
+
+ /* Power on card */
+ retval = card_power_on(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ wait_timeout(260);
+
+#ifdef SUPPORT_OCP
+ rts51x_get_card_status(chip, &(chip->card_status));
+ chip->ocp_stat = (chip->card_status >> 4) & 0x03;
+
+ if (chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ RTS51X_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ rts51x_init_cmd(chip);
+ if (chip->asic_code) {
+ sd_pull_ctl_enable(chip);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ }
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef SD_XD_IO_FOLLOW_PWR
+ rts51x_write_register(chip, CARD_INT_PEND, XD_INT | MS_INT | SD_INT,
+ XD_INT | MS_INT | SD_INT);
+#endif
+
+ RTS51X_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_dummy_clock(struct rts51x_chip *chip)
+{
+ RTS51X_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
+ wait_timeout(5);
+ RTS51X_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0x00);
+
+ return STATUS_SUCCESS;
+}
+
+int reset_sd(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i = 0, j = 0, k = 0, hi_cap_flow = 0;
+ int sd_dont_switch = 0;
+ int support_1v8 = 0;
+ u8 rsp[16];
+ u8 switch_bus_width;
+ u32 voltage = 0;
+ u8 cmd[5], buf[64];
+ u16 sd_card_type;
+
+ SET_SD(sd_card);
+ CLR_RETRY_SD20_MODE(sd_card);
+Switch_Fail:
+ i = 0;
+ j = 0;
+ k = 0;
+ hi_cap_flow = 0;
+ support_1v8 = 0;
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
+ goto SD_UNLOCK_ENTRY;
+#endif
+
+ retval = sd_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ sd_dummy_clock(chip);
+
+ /* Start Initialization Process of SD Card */
+RTY_SD_RST:
+ retval =
+ sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ wait_timeout(20);
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA, SD_RSP_TYPE_R7,
+ rsp, 5);
+ if (retval == STATUS_SUCCESS) {
+ if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
+ hi_cap_flow = 1;
+ if (CHK_RETRY_SD20_MODE(sd_card)) {
+ voltage =
+ SUPPORT_VOLTAGE |
+ SUPPORT_HIGH_AND_EXTENDED_CAPACITY;
+ } else {
+ voltage =
+ SUPPORT_VOLTAGE |
+ SUPPORT_HIGH_AND_EXTENDED_CAPACITY |
+ SUPPORT_MAX_POWER_PERMANCE | SUPPORT_1V8;
+ }
+ }
+ }
+
+ if (!hi_cap_flow) {
+ voltage = SUPPORT_VOLTAGE;
+
+ retval =
+ sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ wait_timeout(20);
+ }
+
+ /* ACMD41 */
+ do {
+ {
+ u8 temp = 0;
+ rts51x_read_register(chip, CARD_INT_PEND, &temp);
+ RTS51X_DEBUGP("CARD_INT_PEND:%x\n", temp);
+ if (temp & SD_INT) {
+ chip->reset_need_retry = 1;
+ rts51x_write_register(chip, CARD_INT_PEND,
+ XD_INT | SD_INT | MS_INT,
+ XD_INT | SD_INT | MS_INT);
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+RTY_CMD55:
+ retval =
+ sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, SD_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ j++;
+ if (chip->option.speed_mmc) {
+ if (j < 2)
+ goto RTY_CMD55;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ if (j < 3)
+ goto RTY_SD_RST;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
+ SD_RSP_TYPE_R3, rsp, 5);
+ if (retval != STATUS_SUCCESS) {
+ k++;
+ if (k < 3)
+ goto RTY_SD_RST;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i++;
+ wait_timeout(20);
+ } while (!(rsp[1] & 0x80) && (i < 255)); /* Not complete power on */
+
+ if (i == 255) {
+ /* Time out */
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (hi_cap_flow) {
+ if (rsp[1] & 0x40)
+ SET_SD_HCXC(sd_card);
+ else
+ CLR_SD_HCXC(sd_card);
+ if (!CHK_RETRY_SD20_MODE(sd_card)) {
+ if ((CHK_SD_HCXC(sd_card)) && (CHECK_UHS50(chip))) {
+ support_1v8 = (rsp[1] & 0x01) ? 1 : 0;
+ RTS51X_DEBUGP("support_1v8 = %d\n",
+ support_1v8);
+ }
+ }
+ } else {
+ CLR_SD_HCXC(sd_card);
+ support_1v8 = 0;
+ }
+
+ /* CMD11: Switch Voltage */
+ if (support_1v8 && CHECK_UHS50(chip)
+ && !(((u8) chip->option.sd_speed_prior & SDR104_SUPPORT) ==
+ HS_SUPPORT)) {
+ retval = sd_voltage_switch(chip);
+ if (retval != STATUS_SUCCESS) {
+ SET_RETRY_SD20_MODE(sd_card);
+ sd_init_power(chip);
+ RTS51X_DEBUGP("1.8v switch fail\n");
+ goto Switch_Fail;
+ }
+ }
+
+ /* CMD 2 */
+ retval =
+ sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ /* CMD 3 */
+ retval =
+ sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0, SD_RSP_TYPE_R6,
+ rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ sd_card->sd_addr = (u32) rsp[1] << 24;
+ sd_card->sd_addr += (u32) rsp[2] << 16;
+
+ /* Get CSD register for Calculating Timing,Capacity,
+ * Check CSD to determaine as if this is the SD ROM card */
+ retval = sd_check_csd(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Select SD card */
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef SUPPORT_SD_LOCK
+SD_UNLOCK_ENTRY:
+ /* Get SD lock status */
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
+ return STATUS_SUCCESS;
+ } else if (!(sd_card->sd_lock_status & SD_UNLOCK_POW_ON)) {
+ sd_card->sd_lock_status &= ~SD_PWD_EXIST;
+ }
+#endif
+
+ /* ACMD42 */
+ retval =
+ sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (support_1v8) {
+ /* ACMD6 */
+ retval =
+ sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Enable 4 bit data bus */
+ retval =
+ sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ switch_bus_width = SD_BUS_WIDTH_4;
+ } else {
+ switch_bus_width = SD_BUS_WIDTH_1;
+ }
+
+ /* Set block length 512 bytes for all block commands */
+ retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN,
+ 0x200, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_WRITE_REG(chip, SD_CFG1, SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+
+ if (!(sd_card->raw_csd[4] & 0x40)) {
+ sd_dont_switch = 1;
+ RTS51X_DEBUGP("Not support class ten\n");
+ }
+
+ if (!sd_dont_switch) {
+ /* Check the card whether flow SD1.1 spec or higher */
+ retval = sd_check_spec(chip, switch_bus_width);
+ if (retval == STATUS_SUCCESS) {
+ retval = sd_switch_function(chip, switch_bus_width);
+ if (retval != STATUS_SUCCESS) {
+ if ((sd_card->sd_switch_fail ==
+ SDR104_SUPPORT_MASK)
+ || (sd_card->sd_switch_fail ==
+ DDR50_SUPPORT_MASK)
+ || (sd_card->sd_switch_fail ==
+ SDR50_SUPPORT_MASK)) {
+ sd_init_power(chip);
+ SET_RETRY_SD20_MODE(sd_card);
+ } else if (sd_card->sd_switch_fail ==
+ HS_SUPPORT_MASK) {
+ sd_dont_switch = 1;
+ }
+ goto Switch_Fail;
+ }
+ } else {
+ if (support_1v8) {
+ SET_RETRY_SD20_MODE(sd_card);
+ sd_init_power(chip);
+ sd_dont_switch = 1;
+
+ goto Switch_Fail;
+ }
+ }
+ }
+
+ if (!support_1v8) {
+ /* ACMD6 */
+ retval =
+ sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Enable 4 bit data bus */
+ retval =
+ sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+#ifdef SUPPORT_SD_LOCK
+ /* clear 1 bit mode status */
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+
+ if (CHK_SD30_SPEED(sd_card)) {
+ rts51x_write_register(chip, SD30_DRIVE_SEL, SD30_DRIVE_MASK,
+ 0x03);
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (CHK_SD_DDR50(sd_card))
+ retval = sd_ddr_tuning(chip);
+ else
+ retval = sd_sdr_tuning(chip);
+
+ if (retval != STATUS_SUCCESS) {
+ SET_RETRY_SD20_MODE(sd_card);
+ RTS51X_DEBUGP("tuning phase fail,goto SD20 mode\n");
+ sd_init_power(chip);
+ CLR_SD30_SPEED(sd_card);
+ goto Switch_Fail;
+ }
+ if (STATUS_SUCCESS ==
+ sd_wait_currentstate_dataready(chip, 0x08, 1, 20)) {
+ cmd[0] = 0x40 | READ_SINGLE_BLOCK;
+ cmd[1] = 0x00;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+ cmd[4] = 0x00;
+ retval =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512,
+ 1, SD_BUS_WIDTH_4, NULL, 0, 600);
+ if (retval != STATUS_SUCCESS) {
+ SET_RETRY_SD20_MODE(sd_card);
+ RTS51X_DEBUGP("read lba0 fail,"
+ "goto SD20 mode\n");
+ sd_init_power(chip);
+ CLR_SD30_SPEED(sd_card);
+ goto Switch_Fail;
+ }
+ }
+ }
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval == STATUS_SUCCESS) {
+ int ret;
+ cmd[0] = 0x40 | SEND_STATUS;
+ cmd[1] = 0x00;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+ cmd[4] = 0x00;
+ ret =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
+ SD_BUS_WIDTH_4, buf, 64, 600);
+ if (ret == STATUS_SUCCESS) {
+ sd_card_type = ((u16) buf[2] << 8) | (u16) buf[3];
+ RTS51X_DEBUGP("sd_card_type:0x%4x\n", sd_card_type);
+ if ((sd_card_type == 0x0001)
+ || (sd_card_type == 0x0002))
+ chip->card_wp |= SD_CARD;
+ } else {
+ rts51x_clear_sd_error(chip);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+ } else {
+ rts51x_clear_sd_error(chip);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ /* Check SD Machanical Write-Protect Switch */
+ retval = rts51x_get_card_status(chip, &(chip->card_status));
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (chip->card_status & SD_WP)
+ chip->card_wp |= SD_CARD;
+
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0x02);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 0x00);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_test_switch_bus(struct rts51x_chip *chip, u8 width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 buf[8] = { 0 }, bus_width;
+ u16 byte_cnt;
+ int len;
+
+ retval =
+ sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (width == MMC_8BIT_BUS) {
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+ len = 8;
+ byte_cnt = 8;
+ bus_width = SD_BUS_WIDTH_8;
+ } else {
+ buf[0] = 0x5A;
+ len = 4;
+ byte_cnt = 4;
+ bus_width = SD_BUS_WIDTH_4;
+ }
+
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
+ NULL, 0, byte_cnt, 1, bus_width, buf, len, 100);
+ if (retval != STATUS_SUCCESS) {
+ u8 val1 = 0, val2 = 0;
+ rts51x_ep0_read_register(chip, SD_STAT1, &val1);
+ rts51x_ep0_read_register(chip, SD_STAT2, &val2);
+ rts51x_clear_sd_error(chip);
+ if ((val1 & 0xE0) || val2)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ RTS51X_DEBUGP("SD/MMC CMD %d\n", BUSTEST_R);
+
+ rts51x_init_cmd(chip);
+
+ /* CMD14 */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | BUSTEST_R);
+
+ if (width == MMC_8BIT_BUS)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x08);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x04);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
+ if (width == MMC_8BIT_BUS) {
+ len = 3;
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 1, 0, 0);
+ } else {
+ len = 2;
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, len, 100);
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rts51x_read_rsp_buf(chip, 1, buf, 2);
+
+ if (width == MMC_8BIT_BUS) {
+ RTS51X_DEBUGP("BUSTEST_R [8bits]: 0x%02x 0x%02x\n",
+ buf[0], buf[1]);
+ if ((buf[0] == 0xAA) && (buf[1] == 0x55)) {
+ u8 rsp[5];
+ u32 arg;
+
+ if (CHK_MMC_DDR52(sd_card))
+ arg = 0x03B70600;
+ else
+ arg = 0x03B70200;
+ /* Switch MMC to 8-bit mode */
+ retval =
+ sd_send_cmd_get_rsp(chip, SWITCH, arg,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval == STATUS_SUCCESS)
+ && !(rsp[4] & MMC_SWITCH_ERR))
+ return STATUS_SUCCESS;
+ }
+ } else {
+ RTS51X_DEBUGP("BUSTEST_R [4bits]: 0x%02x\n", buf[0]);
+ if (buf[0] == 0xA5) {
+ u8 rsp[5];
+ u32 arg;
+
+ if (CHK_MMC_DDR52(sd_card))
+ arg = 0x03B70500;
+ else
+ arg = 0x03B70100;
+ /* Switch MMC to 4-bit mode */
+ retval =
+ sd_send_cmd_get_rsp(chip, SWITCH, arg,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval == STATUS_SUCCESS)
+ && !(rsp[4] & MMC_SWITCH_ERR))
+ return STATUS_SUCCESS;
+ }
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int mmc_switch_timing_bus(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 card_type, card_type_mask = 0;
+ u8 buf[6];
+
+ CLR_MMC_HS(sd_card);
+
+ RTS51X_DEBUGP("SD/MMC CMD %d\n", SEND_EXT_CSD);
+
+ rts51x_init_cmd(chip);
+
+ /* SEND_EXT_CSD command */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0, 0xFF,
+ 0x40 | SEND_EXT_CSD);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD1, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD2, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD3, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD4, 0xFF, 0);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 2);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END
+ | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 213, 0xFF, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 214, 0xFF, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 215, 0xFF, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 6, 1000);
+
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ if (retval == STATUS_TIMEDOUT) {
+ rts51x_clear_sd_error(chip);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rts51x_read_rsp_buf(chip, 0, buf, 6);
+
+ if (buf[0] & SD_TRANSFER_ERR) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ sd_card->capacity =
+ ((u32) buf[5] << 24) | ((u32) buf[4] << 16) |
+ ((u32) buf[3] << 8) | ((u32) buf[2]);
+#ifdef SUPPORT_SD_LOCK
+ if (!(sd_card->sd_lock_status & SD_SDR_RST) && CHECK_UHS50(chip))
+ card_type_mask = 0x07;
+ else
+ card_type_mask = 0x03;
+#else
+ if (CHECK_UHS50(chip))
+ card_type_mask = 0x07;
+ else
+ card_type_mask = 0x03;
+#endif
+
+ card_type = buf[1] & card_type_mask;
+ if (card_type) {
+ /* CARD TYPE FIELD = DDR52MHz, 52MHz or 26MHz */
+ u8 rsp[5];
+
+ if (card_type & 0x04)
+ SET_MMC_DDR52(sd_card);
+ else if (card_type & 0x02)
+ SET_MMC_52M(sd_card);
+ else
+ SET_MMC_26M(sd_card);
+
+ retval =
+ sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
+ CLR_MMC_HS(sd_card);
+ }
+ sd_choose_proper_clock(chip);
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ /* Test Bus Procedure */
+ if (mmc_test_switch_bus(chip, MMC_8BIT_BUS) == STATUS_SUCCESS) {
+ SET_MMC_8BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 8;
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+ } else if (mmc_test_switch_bus(chip, MMC_4BIT_BUS) == STATUS_SUCCESS) {
+ SET_MMC_4BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+ } else {
+ CLR_MMC_8BIT(sd_card);
+ CLR_MMC_4BIT(sd_card);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_mmc(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i = 0, j = 0, k = 0;
+ u8 rsp[16];
+ u8 spec_ver = 0;
+ u8 change_to_ddr52 = 1;
+ u8 cmd[5];
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
+ goto MMC_UNLOCK_ENTRY;
+#endif
+
+MMC_DDR_FAIL:
+
+ retval = sd_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ SET_MMC(sd_card);
+
+RTY_MMC_RST:
+ retval =
+ sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ do {
+ {
+ u8 temp = 0;
+ rts51x_read_register(chip, CARD_INT_PEND, &temp);
+ if (temp & SD_INT) {
+ chip->reset_need_retry = 1;
+ rts51x_write_register(chip, CARD_INT_PEND,
+ XD_INT | SD_INT | MS_INT,
+ XD_INT | SD_INT | MS_INT);
+ sd_set_reset_fail(chip, MMC_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ /* CMD 1 */
+ retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
+ (SUPPORT_VOLTAGE | 0x40000000),
+ SD_RSP_TYPE_R3, rsp, 5);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ sd_set_reset_fail(chip, MMC_RESET_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (sd_check_err_code(chip, SD_BUSY)
+ || sd_check_err_code(chip, SD_TO_ERR)) {
+ k++;
+ if (k < 20) {
+ sd_clr_err_code(chip);
+ goto RTY_MMC_RST;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ j++;
+ if (j < 100) {
+ sd_clr_err_code(chip);
+ goto RTY_MMC_RST;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ wait_timeout(20);
+ i++;
+ } while (!(rsp[1] & 0x80) && (i < 100)); /* Not complete power on */
+
+ if (i == 100) {
+ /* Time out */
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((rsp[1] & 0x60) == 0x40)
+ SET_MMC_SECTOR_MODE(sd_card);
+ else
+ CLR_MMC_SECTOR_MODE(sd_card);
+
+ /* CMD 2 */
+ retval =
+ sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ /* CMD 3 */
+ sd_card->sd_addr = 0x00100000;
+ retval =
+ sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
+ SD_RSP_TYPE_R6, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ /* Get CSD register for Calculating Timing,Capacity
+ * Check CSD to determaine as if this is the SD ROM card */
+ retval = sd_check_csd(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ /* Get MMC Spec_Ver in the CSD register */
+ spec_ver = (sd_card->raw_csd[0] & 0x3C) >> 2;
+
+ /* Select MMC card */
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ /* Set block length 512 bytes for all block commands */
+ retval =
+ sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef SUPPORT_SD_LOCK
+MMC_UNLOCK_ENTRY:
+ /* Get SD lock status */
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ RTS51X_WRITE_REG(chip, SD_CFG1, SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+
+ if (chip->ic_version < 2)
+ rts51x_write_register(chip, SD30_DRIVE_SEL, SD30_DRIVE_MASK,
+ 0x02);
+ rts51x_write_register(chip, CARD_DRIVE_SEL, SD20_DRIVE_MASK, DRIVE_8mA);
+
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 1;
+ if (spec_ver == 4) {
+ /* MMC 4.x Cards */
+ (void)mmc_switch_timing_bus(chip);
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MMC_DDR52(sd_card) && change_to_ddr52) {
+ /* Card is extracted while identifying */
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ CLR_MMC_DDR52(sd_card);
+ sd_init_power(chip);
+ change_to_ddr52 = 0;
+ goto MMC_DDR_FAIL;
+ }
+
+ retval = mmc_ddr_tuning(chip);
+ if (retval != STATUS_SUCCESS) {
+ CLR_MMC_DDR52(sd_card);
+ sd_init_power(chip);
+ change_to_ddr52 = 0;
+ goto MMC_DDR_FAIL;
+ }
+
+ if (STATUS_SUCCESS ==
+ sd_wait_currentstate_dataready(chip, 0x08, 1, 20)) {
+ cmd[0] = 0x40 | READ_SINGLE_BLOCK;
+ cmd[1] = 0x00;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+ cmd[4] = 0x00;
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
+ 5, 512, 1, SD_BUS_WIDTH_8,
+ NULL, 0, 600);
+ } else if (CHK_MMC_4BIT(sd_card)) {
+ retval =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
+ 5, 512, 1, SD_BUS_WIDTH_4,
+ NULL, 0, 600);
+ } else {
+ retval =
+ sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
+ 5, 512, 1, SD_BUS_WIDTH_1,
+ NULL, 0, 600);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ CLR_MMC_DDR52(sd_card);
+ change_to_ddr52 = 0;
+ RTS51X_DEBUGP("read lba0 fail,"
+ "goto SD20 mode\n");
+ sd_init_power(chip);
+ goto MMC_DDR_FAIL;
+ }
+ }
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0x02);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 0x00);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+#endif
+
+ retval = rts51x_get_card_status(chip, &(chip->card_status));
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (chip->card_status & SD_WP)
+ chip->card_wp |= SD_CARD;
+
+ return STATUS_SUCCESS;
+}
+
+int reset_sd_card(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ memset(sd_card, 0, sizeof(struct sd_info));
+
+ /* Init variables */
+ sd_card->sd_type = 0;
+ sd_card->seq_mode = 0;
+ sd_card->sd_data_buf_ready = 0;
+ sd_card->capacity = 0;
+ sd_card->sd_switch_fail = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ sd_clear_reset_fail(chip);
+ enable_card_clock(chip, SD_CARD);
+
+ sd_init_power(chip);
+
+ chip->reset_need_retry = 0;
+ for (i = 0; i < 3; i++) {
+ if (!chip->option.reset_mmc_first) { /* reset sd first */
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ /* Switch SD bus to 3V3 signal */
+ RTS51X_WRITE_REG(chip, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ if (sd_check_reset_fail(chip, SD_RESET_FAIL))
+ sd_clear_reset_fail(chip);
+ else
+ retval = reset_mmc(chip);
+ }
+ } else { /* reset MMC first */
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_reset_fail(chip, MMC_RESET_FAIL)) {
+ sd_clear_reset_fail(chip);
+ } else {
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ /* Switch SD bus to
+ * 3V3 signal */
+ RTS51X_WRITE_REG(chip,
+ SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ }
+ }
+ }
+ }
+
+ if ((retval == STATUS_SUCCESS) || (!chip->reset_need_retry)) {
+ /* if reset success or don't need retry,then break */
+ break;
+ }
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST) {
+ /* card is extracted */
+ break;
+ }
+ RTS51X_DEBUGP("retry reset sd card,%d\n", i);
+ chip->reset_need_retry = 0;
+ }
+
+ sd_clear_reset_fail(chip);
+ chip->reset_need_retry = 0;
+
+ if (retval == STATUS_SUCCESS) {
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, SD_CLK_DIVIDE_MASK,
+ SD_CLK_DIVIDE_0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 2);
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity = 0;
+ if (chip->option.reset_or_rw_fail_set_pad_drive) {
+ rts51x_write_register(chip, CARD_DRIVE_SEL,
+ SD20_DRIVE_MASK, DRIVE_8mA);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
+
+ if (chip->option.sd_send_status_en) {
+ sd_card->sd_send_status_en = 1;
+ } else {
+ if (sd_card->capacity > 0x20000) { /* 64MB */
+ sd_card->sd_send_status_en = 0;
+ } else {
+ sd_card->sd_send_status_en = 1;
+ }
+ }
+ RTS51X_DEBUGP("sd_card->sd_send_status = %d\n",
+ sd_card->sd_send_status_en);
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_DEBUGP("sd_card->sd_type = 0x%x\n", sd_card->sd_type);
+
+ return STATUS_SUCCESS;
+}
+
+#define WAIT_DATA_READY_RTY_CNT 255
+
+static int wait_data_buf_ready(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int i, retval;
+
+ for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
+ if (monitor_card_cd(chip, SD_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->sd_data_buf_ready = 0;
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (sd_card->sd_data_buf_ready)
+ return sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ sd_set_err_code(chip, SD_TO_ERR);
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+void sd_stop_seq_mode(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (sd_card->seq_mode) {
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ sd_set_err_code(chip, SD_STS_ERR);
+ sd_card->seq_mode = 0;
+
+ rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH,
+ FIFO_FLUSH);
+ }
+}
+
+static inline int sd_auto_tune_clock(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (chip->asic_code) {
+ if (sd_card->sd_clock > 30)
+ sd_card->sd_clock -= 20;
+ } else {
+ if (sd_card->sd_clock == CLK_100)
+ sd_card->sd_clock = CLK_80;
+ else if (sd_card->sd_clock == CLK_80)
+ sd_card->sd_clock = CLK_60;
+ else if (sd_card->sd_clock == CLK_60)
+ sd_card->sd_clock = CLK_50;
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int sd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ u32 data_addr;
+ int retval;
+ u8 flag;
+ unsigned int pipe;
+ u8 stageflag;
+
+ sd_card->counter = 0;
+
+ if (!CHK_SD_HCXC(sd_card) && !CHK_MMC_SECTOR_MODE(sd_card))
+ data_addr = start_sector << 9;
+ else
+ data_addr = start_sector;
+
+ RTS51X_DEBUGP("sd_rw, data_addr = 0x%x\n", data_addr);
+
+ sd_clr_err_code(chip);
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (sd_card->seq_mode && ((sd_card->pre_dir != srb->sc_data_direction)
+ ||
+ ((sd_card->pre_sec_addr +
+ sd_card->pre_sec_cnt) != start_sector))) {
+ if ((sd_card->pre_dir == DMA_FROM_DEVICE)
+ && !CHK_SD30_SPEED(sd_card)
+ && !CHK_SD_HS(sd_card)
+ && !CHK_MMC_HS(sd_card)
+ && sd_card->sd_send_status_en) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ }
+
+ retval =
+ sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, sd_parse_err_code(chip));
+ }
+
+ sd_card->seq_mode = 0;
+
+ RTS51X_WRITE_REG(chip, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH);
+
+ if (!CHK_SD30_SPEED(sd_card)
+ && !CHK_SD_HS(sd_card)
+ && !CHK_MMC_HS(sd_card)
+ && sd_card->sd_send_status_en) {
+ /* random rw, so pre_sec_cnt < 0x80 */
+ sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ }
+ }
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF,
+ (u8) sector_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF,
+ (u8) (sector_cnt >> 8));
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ RING_BUFFER);
+
+ if (CHK_MMC_8BIT(sd_card))
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_1);
+
+ if (sd_card->seq_mode) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
+ SD_RSP_LEN_0);
+
+ trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
+ DMA_512);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ flag = MODE_CDIR;
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_3 | SD_TRANSFER_START);
+ } else {
+ flag = MODE_CDOR;
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ }
+
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, flag, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ RTS51X_DEBUGP("SD/MMC CMD %d\n", READ_MULTIPLE_BLOCK);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0, 0xFF,
+ 0x40 | READ_MULTIPLE_BLOCK);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD1, 0xFF,
+ (u8) (data_addr >> 24));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD2, 0xFF,
+ (u8) (data_addr >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD3, 0xFF,
+ (u8) (data_addr >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD4, 0xFF,
+ (u8) data_addr);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
+ SD_RSP_LEN_6);
+
+ trans_dma_enable(srb->sc_data_direction, chip,
+ sector_cnt * 512, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDIR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ retval = rts51x_send_cmd(chip, MODE_C, 50);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_sd_error(chip);
+
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_RET(chip, sd_parse_err_code(chip));
+ }
+
+ retval = wait_data_buf_ready(chip);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_RET(chip, sd_parse_err_code(chip));
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
+ data_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, sd_parse_err_code(chip));
+ }
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
+ SD_RSP_LEN_0);
+
+ trans_dma_enable(srb->sc_data_direction, chip,
+ sector_cnt * 512, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDOR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ sd_card->seq_mode = 1;
+ }
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ pipe = RCV_BULK_PIPE(chip);
+ stageflag = STAGE_DI;
+ } else {
+ pipe = SND_BULK_PIPE(chip);
+ stageflag = STAGE_DO;
+ }
+
+ retval =
+ rts51x_transfer_data_rcc(chip, pipe, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ NULL, 10000, stageflag);
+ if (retval != STATUS_SUCCESS) {
+ u8 stat = 0;
+ int err = retval;
+
+ sd_print_debug_reg(chip);
+
+ rts51x_ep0_read_register(chip, SD_STAT1, &stat);
+ RTS51X_DEBUGP("SD_STAT1: 0x%x\n", stat);
+
+ rts51x_clear_sd_error(chip);
+
+ retval =
+ sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, retval);
+ }
+
+ if (stat & (SD_CRC7_ERR | SD_CRC16_ERR | SD_CRC_WRITE_ERR)) {
+ RTS51X_DEBUGP("SD CRC error, tune clock!\n");
+ sd_auto_tune_clock(chip);
+ }
+
+ sd_card->seq_mode = 0;
+
+ TRACE_RET(chip, err);
+ }
+ retval = rts51x_get_rsp(chip, 1, 2000);
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ sd_card->pre_sec_addr = start_sector;
+ sd_card->pre_sec_cnt = sector_cnt;
+ sd_card->pre_dir = srb->sc_data_direction;
+
+ return STATUS_SUCCESS;
+}
+
+void sd_cleanup_work(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (sd_card->seq_mode) {
+ RTS51X_DEBUGP("SD: stop transmission\n");
+ sd_stop_seq_mode(chip);
+ sd_card->counter = 0;
+ }
+}
+
+inline void sd_fill_power_off_card3v3(struct rts51x_chip *chip)
+{
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0);
+ if (!chip->option.FT2_fast_mode) {
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (CHECK_PKG(chip, LQFP48)
+ || chip->option.rts5129_D3318_off_enable)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK | LDO_OFF,
+ POWER_OFF | LDO_OFF);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK, POWER_OFF);
+#else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+#endif
+ }
+}
+
+int sd_power_off_card3v3(struct rts51x_chip *chip)
+{
+ int retval;
+
+ rts51x_init_cmd(chip);
+
+ sd_fill_power_off_card3v3(chip);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (!chip->option.FT2_fast_mode)
+ wait_timeout(chip->option.D3318_off_delay);
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+int release_sd_card(struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ RTS51X_DEBUGP("elease_sd_card\n");
+
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ chip->card_wp &= ~SD_CARD;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ memset(sd_card->raw_csd, 0, 16);
+ memset(sd_card->raw_scr, 0, 8);
+
+ rts51x_write_register(chip, SFSM_ED, HW_CMD_STOP, HW_CMD_STOP);
+ rts51x_write_register(chip, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ if (CHECK_PKG(chip, LQFP48) || chip->option.rts5129_D3318_off_enable)
+ sd_power_off_card3v3(chip);
+
+ rts51x_init_cmd(chip);
+ if (!(CHECK_PKG(chip, LQFP48) || chip->option.rts5129_D3318_off_enable))
+ sd_fill_power_off_card3v3(chip);
+
+ if (chip->asic_code)
+ sd_pull_ctl_disable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20,
+ FPGA_SD_PULL_CTL_BIT);
+
+ /* Switch LDO3318 to 3.3V */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, LDO_POWER_CFG, TUNE_SD18_MASK,
+ TUNE_SD18_3V3);
+
+ if (CHK_MMC_DDR52(sd_card) && CHK_MMC_8BIT(sd_card))
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DMA1_CTL,
+ EXTEND_DMA1_ASYNC_SIGNAL,
+ EXTEND_DMA1_ASYNC_SIGNAL);
+ if (CHK_SD30_SPEED(sd_card) || CHK_MMC(sd_card))
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ SD30_DRIVE_MASK, chip->option.sd30_pad_drive);
+ /* Suspend LDO3318 */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, LDO3318_PWR_MASK,
+ LDO_SUSPEND);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ wait_timeout(20);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5139/sd.h b/drivers/staging/rts5139/sd.h
new file mode 100644
index 00000000000..0805edcaea8
--- /dev/null
+++ b/drivers/staging/rts5139/sd.h
@@ -0,0 +1,304 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_SD_H
+#define __RTS51X_SD_H
+
+#include "rts51x_chip.h"
+
+#define SD_MAX_RETRY_COUNT 3
+
+#define SUPPORT_VOLTAGE 0x003C0000
+
+#define SD_RESET_FAIL 0x01
+#define MMC_RESET_FAIL 0x02
+
+/* Error Code */
+#define SD_NO_ERROR 0x0
+#define SD_CRC_ERR 0x80
+#define SD_TO_ERR 0x40
+#define SD_NO_CARD 0x20
+#define SD_BUSY 0x10
+#define SD_STS_ERR 0x08
+#define SD_RSP_TIMEOUT 0x04
+
+/* MMC/SD Command Index */
+/* Basic command (class 0) */
+#define GO_IDLE_STATE 0
+#define SEND_OP_COND 1 /* reserved for SD */
+#define ALL_SEND_CID 2
+#define SET_RELATIVE_ADDR 3
+#define SEND_RELATIVE_ADDR 3
+#define SET_DSR 4
+#define IO_SEND_OP_COND 5
+#define SWITCH 6
+#define SELECT_CARD 7
+#define DESELECT_CARD 7
+/* CMD8 is "SEND_EXT_CSD" for MMC4.x Spec
+ * while is "SEND_IF_COND" for SD 2.0 */
+#define SEND_EXT_CSD 8
+#define SEND_IF_COND 8
+/* end */
+#define SEND_CSD 9
+#define SEND_CID 10
+#define VOLTAGE_SWITCH 11
+#define READ_DAT_UTIL_STOP 11 /* reserved for SD */
+#define STOP_TRANSMISSION 12
+#define SEND_STATUS 13
+#define GO_INACTIVE_STATE 15
+
+/* Block oriented read commands (class 2) */
+#define SET_BLOCKLEN 16
+#define READ_SINGLE_BLOCK 17
+#define READ_MULTIPLE_BLOCK 18
+#define SEND_TUNING_PATTERN 19
+
+/* Bus Width Test */
+#define BUSTEST_R 14
+#define BUSTEST_W 19
+/* end */
+
+/* Block oriented write commands (class 4) */
+#define WRITE_BLOCK 24
+#define WRITE_MULTIPLE_BLOCK 25
+#define PROGRAM_CSD 27
+
+/* Erase commands */
+#define ERASE_WR_BLK_START 32
+#define ERASE_WR_BLK_END 33
+#define ERASE_CMD 38
+
+/* Block Oriented Write Protection Commands */
+#define LOCK_UNLOCK 42
+
+#define IO_RW_DIRECT 52
+
+/* Application specific commands (class 8) */
+#define APP_CMD 55
+#define GEN_CMD 56
+
+/* SD Application command Index */
+#define SET_BUS_WIDTH 6
+#define SD_STATUS 13
+#define SEND_NUM_WR_BLOCKS 22
+#define SET_WR_BLK_ERASE_COUNT 23
+#define SD_APP_OP_COND 41
+#define SET_CLR_CARD_DETECT 42
+#define SEND_SCR 51
+
+/* SD TIMEOUT function return error */
+#define SD_READ_COMPLETE 0x00
+#define SD_READ_TO 0x01
+#define SD_READ_ADVENCE 0x02
+
+/* SD v1.1 CMD6 SWITCH function */
+#define SD_CHECK_MODE 0x00
+#define SD_SWITCH_MODE 0x80
+#define SD_FUNC_GROUP_1 0x01
+#define SD_FUNC_GROUP_2 0x02
+#define SD_FUNC_GROUP_3 0x03
+#define SD_FUNC_GROUP_4 0x04
+#define SD_CHECK_SPEC_V1_1 0xFF
+
+/* SD Command Argument */
+#define NO_ARGUMENT 0x00
+#define CHECK_PATTERN 0x000000AA
+#define VOLTAGE_SUPPLY_RANGE 0x00000100 /* 2.7~3.6V */
+#define SUPPORT_HIGH_AND_EXTENDED_CAPACITY 0x40000000
+#define SUPPORT_MAX_POWER_PERMANCE 0x10000000
+#define SUPPORT_1V8 0x01000000
+
+/* Switch Command Error Code */
+#define SWTICH_NO_ERR 0x00
+#define CARD_NOT_EXIST 0x01
+#define SPEC_NOT_SUPPORT 0x02
+#define CHECK_MODE_ERR 0x03
+#define CHECK_NOT_READY 0x04
+#define SWITCH_CRC_ERR 0x05
+#define SWITCH_MODE_ERR 0x06
+#define SWITCH_PASS 0x07
+
+#ifdef SUPPORT_SD_LOCK
+/* CMD42 Parameter */
+#define SD_ERASE 0x08
+#define SD_LOCK 0x04
+#define SD_UNLOCK 0x00
+#define SD_CLR_PWD 0x02
+#define SD_SET_PWD 0x01
+
+#define SD_PWD_LEN 0x10
+
+/* SD lock unlock Status */
+#define SD_LOCKED 0x80 /* Global lock status */
+#define SD_LOCK_1BIT_MODE 0x40 /**/
+#define SD_PWD_EXIST 0x20
+#define SD_UNLOCK_POW_ON 0x01 /**/
+#define SD_SDR_RST 0x02 /* Reset SD30 card with current DDR mode to SDR mode. */
+/* g_bySDEraseStatus */
+#define SD_NOT_ERASE 0x00
+#define SD_UNDER_ERASING 0x01
+#define SD_COMPLETE_ERASE 0x02
+/* SD_RW FAIL status */
+#define SD_RW_FORBIDDEN 0x0F /* read/write is forbidden (SD card) */
+#endif
+/* Function Group Definition */
+/* Function Group 1 */
+#define HS_SUPPORT 0x01
+#define SDR50_SUPPORT 0x02
+#define SDR104_SUPPORT 0x03
+#define DDR50_SUPPORT 0x04
+#define HS_SUPPORT_MASK 0x02
+#define SDR50_SUPPORT_MASK 0x04
+#define SDR104_SUPPORT_MASK 0x08
+#define DDR50_SUPPORT_MASK 0x10
+#define HS_QUERY_SWITCH_OK 0x01
+#define SDR50_QUERY_SWITCH_OK 0x02
+#define SDR104_QUERY_SWITCH_OK 0x03
+#define DDR50_QUERY_SWITCH_OK 0x04
+#define HS_SWITCH_BUSY 0x02
+#define SDR50_SWITCH_BUSY 0x04
+#define SDR104_SWITCH_BUSY 0x08
+#define DDR50_SWITCH_BUSY 0x10
+#define FUNCTION_GROUP1_SUPPORT_OFFSET 0x0D
+#define FUNCTION_GROUP1_QUERY_SWITCH_OFFSET 0x10
+#define FUNCTION_GROUP1_CHECK_BUSY_OFFSET 0x1D
+/* Function Group 3 */
+#define DRIVING_TYPE_A 0x01
+#define DRIVING_TYPE_B 0x00
+#define DRIVING_TYPE_C 0x02
+#define DRIVING_TYPE_D 0x03
+#define DRIVING_TYPE_A_MASK 0x02
+#define DRIVING_TYPE_B_MASK 0x01
+#define DRIVING_TYPE_C_MASK 0x04
+#define DRIVING_TYPE_D_MASK 0x08
+#define TYPE_A_QUERY_SWITCH_OK 0x01
+#define TYPE_B_QUERY_SWITCH_OK 0x00
+#define TYPE_C_QUERY_SWITCH_OK 0x02
+#define TYPE_D_QUERY_SWITCH_OK 0x03
+#define TYPE_A_SWITCH_BUSY 0x02
+#define TYPE_B_SWITCH_BUSY 0x01
+#define TYPE_C_SWITCH_BUSY 0x04
+#define TYPE_D_SWITCH_BUSY 0x08
+#define FUNCTION_GROUP3_SUPPORT_OFFSET 0x09
+#define FUNCTION_GROUP3_QUERY_SWITCH_OFFSET 0x0F
+#define FUNCTION_GROUP3_CHECK_BUSY_OFFSET 0x19
+/* Function Group 4 */
+#define CURRENT_LIMIT_200 0x00
+#define CURRENT_LIMIT_400 0x01
+#define CURRENT_LIMIT_600 0x02
+#define CURRENT_LIMIT_800 0x03
+#define CURRENT_LIMIT_200_MASK 0x01
+#define CURRENT_LIMIT_400_MASK 0x02
+#define CURRENT_LIMIT_600_MASK 0x04
+#define CURRENT_LIMIT_800_MASK 0x08
+#define CURRENT_LIMIT_200_QUERY_SWITCH_OK 0x00
+#define CURRENT_LIMIT_400_QUERY_SWITCH_OK 0x01
+#define CURRENT_LIMIT_600_QUERY_SWITCH_OK 0x02
+#define CURRENT_LIMIT_800_QUERY_SWITCH_OK 0x03
+#define CURRENT_LIMIT_200_SWITCH_BUSY 0x01
+#define CURRENT_LIMIT_400_SWITCH_BUSY 0x02
+#define CURRENT_LIMIT_600_SWITCH_BUSY 0x04
+#define CURRENT_LIMIT_800_SWITCH_BUSY 0x08
+#define FUNCTION_GROUP4_SUPPORT_OFFSET 0x07
+#define FUNCTION_GROUP4_QUERY_SWITCH_OFFSET 0x0F
+#define FUNCTION_GROUP4_CHECK_BUSY_OFFSET 0x17
+/* Switch Function Status Offset */
+#define DATA_STRUCTURE_VER_OFFSET 0x11 /* The high offset */
+#define MAX_PHASE 15
+/* #define TOTAL_READ_PHASE 0x20 */
+/* #define TOTAL_WRITE_PHASE 0x20 */
+/* MMC v4.0 */
+/* #define MMC_52MHZ_SPEED 0x0001 */
+/* #define MMC_26MHZ_SPEED 0x0002 */
+#define MMC_8BIT_BUS 0x0010
+#define MMC_4BIT_BUS 0x0020
+/* #define MMC_SECTOR_MODE 0x0100 */
+#define MMC_SWITCH_ERR 0x80
+/* Tuning direction RX or TX */
+#define TUNE_TX 0x00
+#define TUNE_RX 0x01
+/* For Change_DCM_FreqMode Function */
+#define CHANGE_TX 0x00
+#define CHANGE_RX 0x01
+#define DCM_HIGH_FREQUENCY_MODE 0x00
+#define DCM_LOW_FREQUENCY_MODE 0x01
+#define DCM_HIGH_FREQUENCY_MODE_SET 0x0C
+#define DCM_Low_FREQUENCY_MODE_SET 0x00
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+#define CHECK_SD_TRANS_FAIL(chip, retval) \
+ (((retval) != STATUS_SUCCESS) || \
+ (chip->rsp_buf[0] & SD_TRANSFER_ERR))
+/* SD Tuning Data Structure */
+/* Record continuous timing phase path */
+struct timing_phase_path {
+ int start;
+ int end;
+ int mid;
+ int len;
+};
+
+int sd_select_card(struct rts51x_chip *chip, int select);
+int reset_sd_card(struct rts51x_chip *chip);
+int sd_switch_clock(struct rts51x_chip *chip);
+void sd_stop_seq_mode(struct rts51x_chip *chip);
+int sd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt);
+void sd_cleanup_work(struct rts51x_chip *chip);
+int sd_power_off_card3v3(struct rts51x_chip *chip);
+int release_sd_card(struct rts51x_chip *chip);
+
+#ifdef SUPPORT_SD_LOCK
+int sd_update_lock_status(struct rts51x_chip *chip);
+#endif
+
+#ifdef SUPPORT_CPRM
+extern int reset_sd(struct rts51x_chip *chip);
+extern int sd_check_data0_status(struct rts51x_chip *chip);
+extern int sd_read_data(struct rts51x_chip *chip, u8 trans_mode, u8 *cmd,
+ int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width,
+ u8 *buf, int buf_len, int timeout);
+#endif
+
+#endif /* __RTS51X_SD_H */
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
new file mode 100644
index 00000000000..407cd43ad3b
--- /dev/null
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -0,0 +1,1215 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "rts51x.h"
+#include "rts51x_transport.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "rts51x_chip.h"
+#include "sd.h"
+
+#ifdef SUPPORT_CPRM
+
+static inline int get_rsp_type(u8 rsp_code, u8 *rsp_type, int *rsp_len)
+{
+ if (!rsp_type || !rsp_len)
+ return STATUS_FAIL;
+
+ switch (rsp_code) {
+ case 0x03:
+ *rsp_type = SD_RSP_TYPE_R0; /* no response */
+ *rsp_len = 0;
+ break;
+
+ case 0x04:
+ *rsp_type = SD_RSP_TYPE_R1; /* R1,R6(,R4,R5) */
+ *rsp_len = 6;
+ break;
+
+ case 0x05:
+ *rsp_type = SD_RSP_TYPE_R1b; /* R1b */
+ *rsp_len = 6;
+ break;
+
+ case 0x06:
+ *rsp_type = SD_RSP_TYPE_R2; /* R2 */
+ *rsp_len = 17;
+ break;
+
+ case 0x07:
+ *rsp_type = SD_RSP_TYPE_R3; /* R3 */
+ *rsp_len = 6;
+ break;
+
+ default:
+ return STATUS_FAIL;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int soft_reset_sd_card(struct rts51x_chip *chip)
+{
+ return reset_sd(chip);
+}
+
+int ext_sd_send_cmd_get_rsp(struct rts51x_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len,
+ int special_check)
+{
+ int retval;
+ int timeout = 50;
+ u16 reg_addr;
+ u8 buf[17], stat;
+ int len = 2;
+ int rty_cnt = 0;
+
+ RTS51X_DEBUGP("EXT SD/MMC CMD %d\n", cmd_idx);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+RTY_SEND_CMD:
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8) (arg >> 24));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8) (arg >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8) (arg >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8) arg);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, SD_STAT1, 0, 0);
+
+ if (CHECK_USB(chip, USB_20)) {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2;
+ reg_addr < PPBUF_BASE2 + 16; reg_addr++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0,
+ 0);
+ }
+ len = 19;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ /* Read data from SD_CMDx registers */
+ for (reg_addr = SD_CMD0; reg_addr <= SD_CMD4;
+ reg_addr++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0,
+ 0);
+ }
+ len = 8;
+ } else {
+ len = 3;
+ }
+ rts51x_add_cmd(chip, READ_REG_CMD, SD_CMD5, 0, 0);
+ } else {
+ len = 2;
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, len, timeout);
+
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ rts51x_clear_sd_error(chip);
+
+ if (retval == STATUS_TIMEDOUT) {
+ if (rsp_type & SD_WAIT_BUSY_END) {
+ retval = sd_check_data0_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ if (CHECK_USB(chip, USB_20)) {
+ rts51x_read_rsp_buf(chip, 2, buf, len - 2);
+ } else {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ reg_addr = PPBUF_BASE2;
+ len = 16;
+ } else {
+ reg_addr = SD_CMD0;
+ len = 5;
+ }
+ retval =
+ rts51x_seq_read_register(chip, reg_addr,
+ (unsigned short)len, buf);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ RTS51X_READ_REG(chip, SD_CMD5, buf + len);
+ }
+ stat = chip->rsp_buf[1];
+
+ if ((buf[0] & 0xC0) != 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (stat & SD_CRC7_ERR) {
+ if (cmd_idx == WRITE_MULTIPLE_BLOCK)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (rty_cnt < SD_MAX_RETRY_COUNT) {
+ wait_timeout(20);
+ rty_cnt++;
+ goto RTY_SEND_CMD;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
+ (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
+ if ((cmd_idx != STOP_TRANSMISSION) && (special_check == 0)) {
+ if (buf[1] & 0x80)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (buf[1] & 0x7D) {
+#else
+ if (buf[1] & 0x7F) {
+#endif
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (buf[2] & 0xF8)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (cmd_idx == SELECT_CARD) {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ if ((buf[3] & 0x1E) != 0x04)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else if (rsp_type == SD_RSP_TYPE_R2) {
+ if ((buf[3] & 0x1E) != 0x03)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if (rsp && rsp_len)
+ memcpy(rsp, buf, rsp_len);
+
+ return STATUS_SUCCESS;
+}
+
+int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 * rsp, u8 rsp_type)
+{
+ int retval, rsp_len;
+ u16 reg_addr;
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ rts51x_init_cmd(chip);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
+ }
+ rsp_len = 17;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = SD_CMD0; reg_addr <= SD_CMD4; reg_addr++)
+ rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
+ rsp_len = 6;
+ }
+ rts51x_add_cmd(chip, READ_REG_CMD, SD_CMD5, 0xFF, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, rsp_len, 100);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (rsp) {
+ int min_len = (rsp_len < len) ? rsp_len : len;
+
+ memcpy(rsp, rts51x_get_rsp_data(chip), min_len);
+
+ RTS51X_DEBUGP("min_len = %d\n", min_len);
+ RTS51X_DEBUGP("Response in cmd buf: 0x%x 0x%x 0x%x 0x%x\n",
+ rsp[0], rsp[1], rsp[2], rsp[3]);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int ext_sd_execute_no_data(struct rts51x_chip *chip, unsigned int lun,
+ u8 cmd_idx, u8 standby, u8 acmd, u8 rsp_code,
+ u32 arg)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, rsp_len;
+ u8 rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ retval = get_rsp_type(rsp_code, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval =
+ rts51x_write_register(chip, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
+ retval =
+ rts51x_write_register(chip, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#else
+ /* Set H/W SD/MMC Bus Width */
+ rts51x_write_register(chip, SD_CFG1, 0x03, SD_BUS_WIDTH_4);
+#endif
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval =
+ ext_sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+ retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
+ sd_card->rsp, rsp_len, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+#ifdef SUPPORT_SD_LOCK
+ /* Get SD lock status */
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+#endif
+
+ return TRANSPORT_GOOD;
+
+SD_Execute_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int ext_sd_execute_read_data(struct rts51x_chip *chip, unsigned int lun,
+ u8 cmd_idx, u8 cmd12, u8 standby,
+ u8 acmd, u8 rsp_code, u32 arg, u32 data_len,
+ void *data_buf, unsigned int buf_len, int use_sg)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, rsp_len, i;
+ int cmd13_checkbit = 0, read_err = 0;
+ u8 rsp_type, bus_width;
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ retval = get_rsp_type(rsp_code, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ } else {
+ bus_width = SD_BUS_WIDTH_4;
+ }
+ RTS51X_DEBUGP("bus_width = %d\n", bus_width);
+#else
+ bus_width = SD_BUS_WIDTH_4;
+#endif
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval =
+ ext_sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (data_len <= 512) {
+ int min_len;
+ u8 *buf;
+ u16 byte_cnt, blk_cnt;
+ u8 cmd[5];
+ unsigned int offset = 0;
+ void *sg = NULL;
+
+ byte_cnt = (u16) (data_len & 0x3FF);
+ blk_cnt = 1;
+
+ cmd[0] = 0x40 | cmd_idx;
+ cmd[1] = (u8) (arg >> 24);
+ cmd[2] = (u8) (arg >> 16);
+ cmd[3] = (u8) (arg >> 8);
+ cmd[4] = (u8) arg;
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
+ blk_cnt, bus_width, buf, data_len, 2000);
+ if (retval != STATUS_SUCCESS) {
+ read_err = 1;
+ kfree(buf);
+ rts51x_write_register(chip, CARD_STOP,
+ SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ min_len = min(data_len, buf_len);
+ if (use_sg)
+ rts51x_access_sglist(buf, min_len, (void *)data_buf,
+ &sg, &offset, TO_XFER_BUF);
+ else
+ memcpy(data_buf, buf, min_len);
+
+ kfree(buf);
+ } else if (!(data_len & 0x1FF)) {
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H,
+ 0xFF, (u8) (data_len >> 17));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L,
+ 0xFF, (u8) ((data_len & 0x0001FE00) >> 9));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD0, 0xFF,
+ 0x40 | cmd_idx);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD1, 0xFF,
+ (u8) (arg >> 24));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD2, 0xFF,
+ (u8) (arg >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD3, 0xFF,
+ (u8) (arg >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8) arg);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG1, 0x03, bus_width);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
+ trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+ retval = rts51x_send_cmd(chip, MODE_CDIR, 100);
+ if (retval != STATUS_SUCCESS) {
+ read_err = 1;
+ rts51x_ep0_write_register(chip, CARD_STOP,
+ SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ retval =
+ rts51x_transfer_data_rcc(chip, RCV_BULK_PIPE(chip),
+ data_buf, buf_len, use_sg, NULL,
+ 10000, STAGE_DI);
+ if (retval != STATUS_SUCCESS) {
+ read_err = 1;
+ rts51x_ep0_write_register(chip, CARD_STOP,
+ SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+ retval = rts51x_get_rsp(chip, 1, 500);
+ if (CHECK_SD_TRANS_FAIL(chip, retval)) {
+ read_err = 1;
+ rts51x_ep0_write_register(chip, CARD_STOP,
+ SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+ } else {
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (cmd12) {
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ rts51x_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rts51x_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ }
+
+ if (standby || cmd12)
+ cmd13_checkbit = 1;
+
+ for (i = 0; i < 3; i++) {
+ retval =
+ ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ return TRANSPORT_GOOD;
+
+SD_Execute_Read_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ if (read_err)
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int ext_sd_execute_write_data(struct rts51x_chip *chip, unsigned int lun,
+ u8 cmd_idx, u8 cmd12, u8 standby, u8 acmd,
+ u8 rsp_code, u32 arg, u32 data_len,
+ void *data_buf, unsigned int buf_len, int use_sg)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, rsp_len;
+ int cmd13_checkbit = 0, write_err = 0;
+ u8 rsp_type;
+ u32 i;
+#ifdef SUPPORT_SD_LOCK
+ int lock_cmd_fail = 0;
+ u8 sd_lock_state = 0;
+ u8 lock_cmd_type = 0;
+#endif
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ sd_lock_state = sd_card->sd_lock_status;
+ sd_lock_state &= SD_LOCKED;
+ }
+#endif
+
+ retval = get_rsp_type(rsp_code, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval =
+ rts51x_write_register(chip, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
+ retval =
+ rts51x_write_register(chip, SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#else
+ rts51x_write_register(chip, SD_CFG1, 0x03, SD_BUS_WIDTH_4);
+#endif
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval =
+ ext_sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
+ sd_card->rsp, rsp_len, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ if (data_len <= 512) {
+ u8 *buf;
+ unsigned int offset = 0;
+ void *sg = NULL;
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ if (use_sg)
+ rts51x_access_sglist(buf, data_len, (void *)data_buf,
+ &sg, &offset, FROM_XFER_BUF);
+ else
+ memcpy(buf, data_buf, data_len);
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK)
+ lock_cmd_type = buf[0] & 0x0F;
+#endif
+
+ if (data_len > 256) {
+ rts51x_init_cmd(chip);
+ for (i = 0; i < 256; i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD,
+ (u16) (PPBUF_BASE2 + i), 0xFF,
+ buf[i]);
+ }
+ retval = rts51x_send_cmd(chip, MODE_C, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ rts51x_init_cmd(chip);
+ for (i = 256; i < data_len; i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD,
+ (u16) (PPBUF_BASE2 + i), 0xFF,
+ buf[i]);
+ }
+ retval = rts51x_send_cmd(chip, MODE_C, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ } else {
+ rts51x_init_cmd(chip);
+ for (i = 0; i < data_len; i++) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD,
+ (u16) (PPBUF_BASE2 + i), 0xFF,
+ buf[i]);
+ }
+ retval = rts51x_send_cmd(chip, MODE_C, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ }
+
+ kfree(buf);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF,
+ (u8) ((data_len >> 8) & 0x03));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF,
+ (u8) data_len);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 0x01);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ retval = rts51x_get_rsp(chip, 1, 250);
+ if (CHECK_SD_TRANS_FAIL(chip, retval))
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ } else if (!(data_len & 0x1FF)) {
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_H,
+ 0xFF, (u8) (data_len >> 17));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_BLOCK_CNT_L,
+ 0xFF, (u8) ((data_len & 0x0001FE00) >> 9));
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDOR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ retval =
+ rts51x_transfer_data_rcc(chip, SND_BULK_PIPE(chip),
+ data_buf, buf_len, use_sg, NULL,
+ 10000, STAGE_DO);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ retval = rts51x_get_rsp(chip, 1, 10000);
+ if (CHECK_SD_TRANS_FAIL(chip, retval))
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ } else {
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (retval < 0) {
+ write_err = 1;
+ rts51x_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ if (lock_cmd_type == SD_ERASE) {
+ sd_card->sd_erase_status = SD_UNDER_ERASING;
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, SD_BUS_STAT, SD_DAT0_STATUS,
+ SD_DAT0_STATUS);
+ retval = rts51x_send_cmd(chip, MODE_CR, 250);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ rts51x_get_rsp(chip, 1, 200); /* Don't care return value */
+
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS) {
+ RTS51X_DEBUGP("Lock command fail!\n");
+ lock_cmd_fail = 1;
+ }
+ }
+#endif /* SUPPORT_SD_LOCK */
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (cmd12) {
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ rts51x_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rts51x_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ }
+
+ if (cmd12 || standby) {
+ /* There is CMD7 or CMD12 sent before CMD13 */
+ cmd13_checkbit = 1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ retval =
+ ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ if (!lock_cmd_fail) {
+ RTS51X_DEBUGP("lock_cmd_type = 0x%x\n",
+ lock_cmd_type);
+ if (lock_cmd_type & SD_CLR_PWD)
+ sd_card->sd_lock_status &= ~SD_PWD_EXIST;
+ if (lock_cmd_type & SD_SET_PWD)
+ sd_card->sd_lock_status |= SD_PWD_EXIST;
+ }
+
+ RTS51X_DEBUGP("sd_lock_state = 0x%x,"
+ "sd_card->sd_lock_status = 0x%x\n",
+ sd_lock_state, sd_card->sd_lock_status);
+ if (sd_lock_state ^ (sd_card->sd_lock_status & SD_LOCKED)) {
+ sd_card->sd_lock_notify = 1;
+ if (sd_lock_state) {
+ if (sd_card->sd_lock_status &
+ SD_LOCK_1BIT_MODE) {
+ sd_card->sd_lock_status |=
+ (SD_UNLOCK_POW_ON | SD_SDR_RST);
+ if (CHK_SD(sd_card)) {
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ sd_card->sd_lock_status
+ &= ~(SD_UNLOCK_POW_ON |
+ SD_SDR_RST);
+ TRACE_GOTO(chip,
+ SD_Execute_Write_Cmd_Failed);
+ }
+ }
+
+ sd_card->sd_lock_status &=
+ ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
+ }
+ }
+ }
+ }
+
+ if (lock_cmd_fail) {
+ scsi_set_resid(srb, 0);
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#endif /* SUPPORT_SD_LOCK */
+
+ return TRANSPORT_GOOD;
+
+SD_Execute_Write_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ if (write_err)
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int len;
+ u8 buf[18] = {
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x0E,
+ 0x00, /* Version Number */
+ 0x00, /* WP | Media Type */
+ 0x00, /* RCA (Low byte) */
+ 0x00, /* RCA (High byte) */
+ 0x53, /* 'S' */
+ 0x44, /* 'D' */
+ 0x20, /* ' ' */
+ 0x43, /* 'C' */
+ 0x61, /* 'a' */
+ 0x72, /* 'r' */
+ 0x64, /* 'd' */
+ 0x00, /* Max LUN Number */
+ 0x00,
+ 0x00,
+ };
+
+ sd_card->pre_cmd_err = 0;
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3])
+ || (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5])
+ || (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7])
+ || (0x64 != srb->cmnd[8])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[1] & 0x0F) {
+ case 0:
+ sd_card->sd_pass_thru_en = 0;
+ break;
+
+ case 1:
+ sd_card->sd_pass_thru_en = 1;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ /* 0x01:SD Memory Card; 0x02:Other Media; 0x03:Illegal Media; */
+ buf[5] = (1 == CHK_SD(sd_card)) ? 0x01 : 0x02;
+ if (chip->card_wp & SD_CARD)
+ buf[5] |= 0x80;
+
+ buf[6] = (u8) (sd_card->sd_addr >> 16);
+ buf[7] = (u8) (sd_card->sd_addr >> 24);
+
+ buf[15] = chip->max_lun;
+
+ len = min(18, (int)scsi_bufflen(srb));
+ rts51x_set_xfer_buf(buf, len, srb);
+
+ return TRANSPORT_GOOD;
+}
+
+int sd_execute_no_data(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 cmd_idx, rsp_code;
+ u8 standby = 0, acmd = 0;
+ u32 arg;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ arg = ((u32) srb->cmnd[3] << 24) | ((u32) srb->cmnd[4] << 16) |
+ ((u32) srb->cmnd[5] << 8) | srb->cmnd[6];
+
+ rsp_code = srb->cmnd[10];
+
+ retval =
+ ext_sd_execute_no_data(chip, lun, cmd_idx, standby, acmd, rsp_code,
+ arg);
+ scsi_set_resid(srb, 0);
+ return retval;
+}
+
+int sd_execute_read_data(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_idx, rsp_code, send_cmd12 = 0, standby = 0, acmd = 0;
+ u32 arg, data_len;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x04)
+ send_cmd12 = 1;
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ arg = ((u32) srb->cmnd[3] << 24) | ((u32) srb->cmnd[4] << 16) |
+ ((u32) srb->cmnd[5] << 8) | srb->cmnd[6];
+
+ data_len =
+ ((u32) srb->cmnd[7] << 16) | ((u32) srb->cmnd[8] << 8) |
+ srb->cmnd[9];
+ rsp_code = srb->cmnd[10];
+
+ retval =
+ ext_sd_execute_read_data(chip, lun, cmd_idx, send_cmd12, standby,
+ acmd, rsp_code, arg, data_len,
+ scsi_sglist(srb), scsi_bufflen(srb),
+ scsi_sg_count(srb));
+ scsi_set_resid(srb, 0);
+ return retval;
+}
+
+int sd_execute_write_data(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_idx, rsp_code, send_cmd12 = 0, standby = 0, acmd = 0;
+ u32 data_len, arg;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x04)
+ send_cmd12 = 1;
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ data_len =
+ ((u32) srb->cmnd[7] << 16) | ((u32) srb->cmnd[8] << 8) |
+ srb->cmnd[9];
+ arg =
+ ((u32) srb->cmnd[3] << 24) | ((u32) srb->cmnd[4] << 16) |
+ ((u32) srb->cmnd[5] << 8) | srb->cmnd[6];
+ rsp_code = srb->cmnd[10];
+
+ retval =
+ ext_sd_execute_write_data(chip, lun, cmd_idx, send_cmd12, standby,
+ acmd, rsp_code, arg, data_len,
+ scsi_sglist(srb), scsi_bufflen(srb),
+ scsi_sg_count(srb));
+ scsi_set_resid(srb, 0);
+ return retval;
+}
+
+int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int count;
+ u16 data_len;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ data_len = ((u16) srb->cmnd[7] << 8) | srb->cmnd[8];
+
+ if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) {
+ count = (data_len < 17) ? data_len : 17;
+ } else {
+ count = (data_len < 6) ? data_len : 6;
+ }
+ rts51x_set_xfer_buf(sd_card->rsp, count, srb);
+
+ RTS51X_DEBUGP("Response length: %d\n", data_len);
+ RTS51X_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n",
+ sd_card->rsp[0], sd_card->rsp[1], sd_card->rsp[2],
+ sd_card->rsp[3]);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+int sd_hw_rst(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3])
+ || (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5])
+ || (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7])
+ || (0x64 != srb->cmnd[8])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[1] & 0x0F) {
+ case 0:
+ /* SD Card Power Off -> ON and Initialization */
+#ifdef SUPPORT_SD_LOCK
+ if (0x64 == srb->cmnd[9]) {
+ /* Command Mode */
+ sd_card->sd_lock_status |= SD_SDR_RST;
+ }
+#endif /* SUPPORT_SD_LOCK */
+ retval = reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_SDR_RST;
+#endif
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ sd_card->pre_cmd_err = 1;
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_SDR_RST;
+#endif
+ break;
+
+ case 1:
+ /* reset CMD(CMD0) and Initialization
+ * (without SD Card Power Off -> ON) */
+ retval = soft_reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ sd_card->pre_cmd_err = 1;
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
diff --git a/drivers/staging/rts5139/sd_cprm.h b/drivers/staging/rts5139/sd_cprm.h
new file mode 100644
index 00000000000..75e263b6594
--- /dev/null
+++ b/drivers/staging/rts5139/sd_cprm.h
@@ -0,0 +1,54 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_SD_CPRM_H
+#define __RTS51X_SD_CPRM_H
+
+#include "rts51x_chip.h"
+#include "sd.h"
+
+#ifdef SUPPORT_CPRM
+int ext_sd_execute_no_data(struct rts51x_chip *chip, unsigned int lun,
+ u8 cmd_idx, u8 standby, u8 acmd, u8 rsp_code,
+ u32 arg);
+int ext_sd_execute_read_data(struct rts51x_chip *chip, unsigned int lun,
+ u8 cmd_idx, u8 cmd12, u8 standby, u8 acmd,
+ u8 rsp_code, u32 arg, u32 data_len, void *data_buf,
+ unsigned int buf_len, int use_sg);
+int ext_sd_execute_write_data(struct rts51x_chip *chip, unsigned int lun,
+ u8 cmd_idx, u8 cmd12, u8 standby, u8 acmd,
+ u8 rsp_code, u32 arg, u32 data_len,
+ void *data_buf, unsigned int buf_len, int use_sg);
+
+int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int sd_execute_no_data(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int sd_execute_read_data(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int sd_execute_write_data(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+int sd_hw_rst(struct scsi_cmnd *srb, struct rts51x_chip *chip);
+#endif
+
+#endif /* __RTS51X_SD_CPRM_H */
diff --git a/drivers/staging/rts5139/trace.h b/drivers/staging/rts5139/trace.h
new file mode 100644
index 00000000000..0584b8ab43c
--- /dev/null
+++ b/drivers/staging/rts5139/trace.h
@@ -0,0 +1,137 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_TRACE_H
+#define __RTS51X_TRACE_H
+
+#include "debug.h"
+
+#define _MSG_TRACE
+
+#ifdef _MSG_TRACE
+static inline char *filename(char *path)
+{
+ char *ptr;
+
+ if (path == NULL)
+ return NULL;
+
+ ptr = path;
+
+ while (*ptr != '\0') {
+ if ((*ptr == '\\') || (*ptr == '/'))
+ path = ptr + 1;
+ ptr++;
+ }
+
+ return path;
+}
+
+#define TRACE_RET(chip, ret) \
+do { \
+ char *_file = filename((char *)__FILE__); \
+ RTS51X_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
+ (chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].func, \
+ __func__, MSG_FUNC_LEN-1); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].file, \
+ _file, MSG_FILE_LEN-1); \
+ get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf,\
+ TIME_VAL_LEN); \
+ (chip)->trace_msg[(chip)->msg_idx].valid = 1; \
+ (chip)->msg_idx++; \
+ if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
+ (chip)->msg_idx = 0; \
+ } \
+ return ret; \
+} while (0)
+
+#define TRACE_GOTO(chip, label) \
+do { \
+ char *_file = filename((char *)__FILE__); \
+ RTS51X_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
+ (chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].func, \
+ __func__, MSG_FUNC_LEN-1); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].file, \
+ _file, MSG_FILE_LEN-1); \
+ get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf,\
+ TIME_VAL_LEN); \
+ (chip)->trace_msg[(chip)->msg_idx].valid = 1; \
+ (chip)->msg_idx++; \
+ if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
+ (chip)->msg_idx = 0; \
+ } \
+ goto label; \
+} while (0)
+#else
+#define TRACE_RET(chip, ret) return (ret)
+#define TRACE_GOTO(chip, label) goto label
+#endif
+
+#ifdef CONFIG_RTS5139_DEBUG
+static inline void rts51x_dump(u8 *buf, int buf_len)
+{
+ int i;
+ u8 tmp[16] = { 0 };
+ u8 *_ptr = buf;
+
+ for (i = 0; i < ((buf_len) / 16); i++) {
+ RTS51X_DEBUGP("%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ _ptr[0], _ptr[1], _ptr[2], _ptr[3], _ptr[4],
+ _ptr[5], _ptr[6], _ptr[7], _ptr[8], _ptr[9],
+ _ptr[10], _ptr[11], _ptr[12], _ptr[13], _ptr[14],
+ _ptr[15]);
+ _ptr += 16;
+ }
+ if ((buf_len) % 16) {
+ memcpy(tmp, _ptr, (buf_len) % 16);
+ _ptr = tmp;
+ RTS51X_DEBUGP("%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ _ptr[0], _ptr[1], _ptr[2], _ptr[3], _ptr[4],
+ _ptr[5], _ptr[6], _ptr[7], _ptr[8], _ptr[9],
+ _ptr[10], _ptr[11], _ptr[12], _ptr[13], _ptr[14],
+ _ptr[15]);
+ }
+}
+
+#define RTS51X_DUMP(buf, buf_len) \
+ rts51x_dump((u8 *)(buf), (buf_len))
+
+#define CATCH_TRIGGER(chip) \
+do { \
+ rts51x_ep0_write_register((chip), 0xFC31, 0x01, 0x01); \
+ RTS51X_DEBUGP("Catch trigger!\n"); \
+} while (0)
+
+#else
+#define RTS51X_DUMP(buf, buf_len)
+#define CATCH_TRIGGER(chip)
+#endif
+
+#endif /* __RTS51X_TRACE_H */
diff --git a/drivers/staging/rts5139/xd.c b/drivers/staging/rts5139/xd.c
new file mode 100644
index 00000000000..5820605d180
--- /dev/null
+++ b/drivers/staging/rts5139/xd.c
@@ -0,0 +1,2255 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "rts51x.h"
+#include "rts51x_transport.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "xd.h"
+
+static int xd_build_l2p_tbl(struct rts51x_chip *chip, int zone_no);
+static int xd_init_page(struct rts51x_chip *chip, u32 phy_blk, u16 logoff,
+ u8 start_page, u8 end_page);
+
+static inline void xd_set_err_code(struct rts51x_chip *chip, u8 err_code)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ xd_card->err_code = err_code;
+}
+
+static inline int xd_check_err_code(struct rts51x_chip *chip, u8 err_code)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ return (xd_card->err_code == err_code);
+}
+
+static int xd_set_init_para(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ if (chip->asic_code)
+ xd_card->xd_clock = 47;
+ else
+ xd_card->xd_clock = CLK_50;
+
+ retval = switch_clock(chip, xd_card->xd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_switch_clock(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ retval = rts51x_select_card(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = switch_clock(chip, xd_card->xd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_id(struct rts51x_chip *chip, u8 id_cmd, u8 *id_buf,
+ u8 buf_len)
+{
+ int retval, i;
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_ID);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ for (i = 0; i < 4; i++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, (u16) (XD_ADDRESS1 + i), 0,
+ 0);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 20);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 5, 20);
+
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ if (id_buf && buf_len) {
+ if (buf_len > 4)
+ buf_len = 4;
+ rts51x_read_rsp_buf(chip, 1, id_buf, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void xd_assign_phy_addr(struct rts51x_chip *chip, u32 addr, u8 mode)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ switch (mode) {
+ case XD_RW_ADDR:
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF,
+ (u8) addr);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, 0xFF,
+ (u8) (addr >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3, 0xFF,
+ (u8) (addr >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
+ xd_card->addr_cycle | XD_CALC_ECC |
+ XD_BA_NO_TRANSFORM);
+ break;
+
+ case XD_ERASE_ADDR:
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF,
+ (u8) addr);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF,
+ (u8) (addr >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, 0xFF,
+ (u8) (addr >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
+ (xd_card->addr_cycle - 1) |
+ XD_CALC_ECC | XD_BA_NO_TRANSFORM);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xd_read_redundant(struct rts51x_chip *chip, u32 page_addr, u8 *buf,
+ int buf_len)
+{
+ int retval, i;
+
+ rts51x_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_REDUNDANT);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ for (i = 0; i < 6; i++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, (u16) (XD_PAGE_STATUS + i),
+ 0, 0);
+ }
+ for (i = 0; i < 4; i++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, (u16) (XD_RESERVED0 + i), 0,
+ 0);
+ }
+ rts51x_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 11, 500);
+
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ if (buf && buf_len) {
+ if (buf_len > 11)
+ buf_len = 11;
+ rts51x_read_rsp_buf(chip, 1, buf, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_data_from_ppb(struct rts51x_chip *chip, int offset, u8 *buf,
+ int buf_len)
+{
+ int retval, i;
+
+ if (!buf || (buf_len <= 0))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+
+ for (i = 0; i < buf_len; i++) {
+ rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i, 0,
+ 0);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, buf_len, 200);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_read_rsp_buf(chip, 0, buf, buf_len);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_cis(struct rts51x_chip *chip, u32 page_addr, u8 *buf,
+ int buf_len)
+{
+ int retval;
+ u8 reg;
+
+ if (!buf || (buf_len < 10))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END | XD_PPB_EMPTY,
+ XD_TRANSFER_END | XD_PPB_EMPTY);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, 500);
+ if (retval == STATUS_TIMEDOUT) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ RTS51X_READ_REG(chip, XD_PAGE_STATUS, &reg);
+ if (reg != XD_GPG) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTS51X_READ_REG(chip, XD_CTL, &reg);
+
+ if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
+ retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (reg & XD_ECC1_ERROR) { /* correctable error */
+ u8 ecc_bit, ecc_byte;
+
+ RTS51X_READ_REG(chip, XD_ECC_BIT1, &ecc_bit);
+ RTS51X_READ_REG(chip, XD_ECC_BYTE1, &ecc_byte);
+
+ RTS51X_DEBUGP("ECC_BIT1 = 0x%x, ECC_BYTE1 = 0x%x\n",
+ ecc_bit, ecc_byte);
+ if (ecc_byte < buf_len) {
+ RTS51X_DEBUGP("Before correct: 0x%x\n",
+ buf[ecc_byte]);
+ buf[ecc_byte] ^= (1 << ecc_bit);
+ RTS51X_DEBUGP("After correct: 0x%x\n",
+ buf[ecc_byte]);
+ }
+ }
+ } else if (!(reg & XD_ECC2_ERROR) || !(reg & XD_ECC2_UNCORRECTABLE)) {
+ RTS51X_WRITE_REG(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
+ XD_STOP | XD_CLR_ERR);
+
+ retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (reg & XD_ECC2_ERROR) {
+ u8 ecc_bit, ecc_byte;
+
+ RTS51X_READ_REG(chip, XD_ECC_BIT2, &ecc_bit);
+ RTS51X_READ_REG(chip, XD_ECC_BYTE2, &ecc_byte);
+
+ RTS51X_DEBUGP("ECC_BIT2 = 0x%x, ECC_BYTE2 = 0x%x\n",
+ ecc_bit, ecc_byte);
+ if (ecc_byte < buf_len) {
+ RTS51X_DEBUGP("Before correct: 0x%x\n",
+ buf[ecc_byte]);
+ buf[ecc_byte] ^= (1 << ecc_bit);
+ RTS51X_DEBUGP("After correct: 0x%x\n",
+ buf[ecc_byte]);
+ }
+ }
+ } else {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void xd_pull_ctl_disable(struct rts51x_chip *chip)
+{
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+ }
+}
+
+static void xd_pull_ctl_enable(struct rts51x_chip *chip)
+{
+ if (CHECK_PKG(chip, LQFP48)) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x59);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+ }
+}
+
+static int reset_xd(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval, i, j;
+ u8 id_buf[4], redunt[11];
+
+ retval = rts51x_select_card(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
+ XD_PGSTS_NOT_FF);
+ if (chip->asic_code)
+ xd_pull_ctl_disable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3));
+
+ if (!chip->option.FT2_fast_mode) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_INIT, XD_NO_AUTO_PWR_OFF,
+ 0);
+ if (CHECK_PKG(chip, LQFP48) ||
+ chip->option.rts5129_D3318_off_enable) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL,
+ DV3318_AUTO_PWR_OFF,
+ DV3318_AUTO_PWR_OFF);
+ }
+ }
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
+ if (!chip->option.FT2_fast_mode) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ if (!chip->option.FT2_fast_mode) {
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (CHECK_PKG(chip, LQFP48)
+ || chip->option.rts5129_D3318_off_enable) {
+ rts51x_write_register(chip, CARD_PWR_CTL,
+ LDO_OFF, LDO_OFF);
+ }
+#endif
+
+ wait_timeout(250);
+
+#ifdef SD_XD_IO_FOLLOW_PWR
+ if (CHECK_PKG(chip, LQFP48)
+ || chip->option.rts5129_D3318_off_enable) {
+ rts51x_init_cmd(chip);
+ if (chip->asic_code) {
+ xd_pull_ctl_enable(chip);
+ } else {
+ rts51x_add_cmd(chip, WRITE_REG_CMD,
+ FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 &
+ FPGA_XD_PULL_CTL_EN2));
+ }
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ retval = card_power_on(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+#ifdef SUPPORT_OCP
+ wait_timeout(50);
+ rts51x_get_card_status(chip, &(chip->card_status));
+ chip->ocp_stat = (chip->card_status >> 4) & 0x03;
+
+ if (chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ RTS51X_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ rts51x_init_cmd(chip);
+
+ if (chip->asic_code)
+ xd_pull_ctl_enable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN,
+ XD_OUTPUT_EN);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CTL, XD_CE_DISEN, XD_CE_DISEN);
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->option.FT2_fast_mode)
+ wait_timeout(200);
+
+ retval = xd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ /* Read ID to check if the timing setting is right */
+ for (i = 0; i < 4; i++) {
+ u8 xd_dat, xd_ctl;
+
+ if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
+ XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP *
+ (2 + i + chip->option.xd_rw_step)
+ + XD_TIME_RWN_STEP * (i + chip->option.xd_rwn_step));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
+ XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 +
+ i) + XD_TIME_RWN_STEP * (3 + i));
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_RESET);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ rts51x_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ retval = rts51x_get_rsp(chip, 3, 100);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ xd_dat = chip->rsp_buf[1];
+ xd_ctl = chip->rsp_buf[2];
+ RTS51X_DEBUGP("XD_DAT: 0x%x, XD_CTL: 0x%x\n", xd_dat, xd_ctl);
+
+ if (((xd_dat & READY_FLAG) != READY_STATE)
+ || !(xd_ctl & XD_RDY))
+ continue;
+
+ retval = xd_read_id(chip, READ_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTS51X_DEBUGP("READ_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
+
+ xd_card->device_code = id_buf[1];
+
+ switch (xd_card->device_code) {
+ case XD_4M_X8_512_1:
+ case XD_4M_X8_512_2:
+ xd_card->block_shift = 4; /* 16 pages per block */
+ xd_card->page_off = 0x0F;
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 8000; /* 500 * 2 ^ 4 */
+ XD_SET_4MB(xd_card);
+ break;
+ case XD_8M_X8_512:
+ xd_card->block_shift = 4;
+ xd_card->page_off = 0x0F;
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 16000; /* 1000 * 2 ^ 4 */
+ break;
+ case XD_16M_X8_512:
+ XD_PAGE_512(xd_card); /* 32 pages per block */
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 32000; /* 1000 * 2 ^ 5 */
+ break;
+ case XD_32M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 2;
+ xd_card->capacity = 64000; /* 2000 * 2 ^ 5 */
+ break;
+ case XD_64M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 4;
+ xd_card->capacity = 128000; /* 4000 * 2 ^ 5 */
+ break;
+ case XD_128M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 8;
+ xd_card->capacity = 256000; /* 8000 * 2 ^ 5 */
+ break;
+ case XD_256M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 16;
+ xd_card->capacity = 512000; /* 16000 * 2 ^ 5 */
+ break;
+ case XD_512M_X8:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 32;
+ xd_card->capacity = 1024000; /* 32000 * 2 ^ 5 */
+ break;
+ case xD_1G_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 64;
+ xd_card->capacity = 2048000; /* 64000 * 2 ^ 5 */
+ break;
+ case xD_2G_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 128;
+ xd_card->capacity = 4096000; /* 128000 * 2 ^ 5 */
+ break;
+ default:
+ continue;
+ }
+
+ /* Confirm timing setting */
+ for (j = 0; j < 10; j++) {
+ retval = xd_read_id(chip, READ_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (id_buf[1] != xd_card->device_code)
+ break;
+ }
+
+ /* Current timing pass */
+ if (j == 10)
+ break;
+ }
+
+ if (i == 4) {
+ xd_card->block_shift = 0;
+ xd_card->page_off = 0;
+ xd_card->addr_cycle = 0;
+ xd_card->capacity = 0;
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ RTS51X_DEBUGP("READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
+ if (id_buf[2] != XD_ID_CODE)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Search CIS block */
+ for (i = 0; i < 24; i++) {
+ u32 page_addr;
+
+ if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_addr = (u32) i << xd_card->block_shift;
+
+ for (j = 0; j < 3; j++) {
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (j == 3)
+ continue;
+
+ if (redunt[BLOCK_STATUS] != XD_GBLK)
+ continue;
+
+ j = 0;
+ /* Check page status */
+ if (redunt[PAGE_STATUS] != XD_GPG) {
+ for (j = 1; j <= 8; j++) {
+ retval =
+ xd_read_redundant(chip, page_addr + j,
+ redunt, 11);
+ if (retval == STATUS_SUCCESS) {
+ if (redunt[PAGE_STATUS] == XD_GPG)
+ break;
+ }
+ }
+
+ if (j == 9)
+ break;
+ }
+
+ if ((redunt[BLOCK_STATUS] == XD_GBLK)
+ && (redunt[PARITY] & XD_BA1_ALL0)) {
+ u8 buf[10];
+
+ page_addr += j;
+
+ retval = xd_read_cis(chip, page_addr, buf, 10);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if ((buf[0] == 0x01) && (buf[1] == 0x03)
+ && (buf[2] == 0xD9)
+ && (buf[3] == 0x01) && (buf[4] == 0xFF)
+ && (buf[5] == 0x18) && (buf[6] == 0x02)
+ && (buf[7] == 0xDF) && (buf[8] == 0x01)
+ && (buf[9] == 0x20)) {
+ xd_card->cis_block = (u16) i;
+ }
+ }
+
+ break;
+ }
+
+ RTS51X_DEBUGP("CIS block: 0x%x\n", xd_card->cis_block);
+ if (xd_card->cis_block == 0xFFFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_check_data_blank(u8 *redunt)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (redunt[PAGE_STATUS + i] != 0xFF)
+ return 0;
+ }
+
+ if ((redunt[PARITY] & (XD_ECC1_ALL1 | XD_ECC2_ALL1)) !=
+ (XD_ECC1_ALL1 | XD_ECC2_ALL1))
+ return 0;
+
+ for (i = 0; i < 4; i++) {
+ if (redunt[RESERVED0 + i] != 0xFF)
+ return 0;
+ }
+
+ return 1;
+}
+
+static u16 xd_load_log_block_addr(u8 *redunt)
+{
+ u16 addr = 0xFFFF;
+
+ if (redunt[PARITY] & XD_BA1_BA2_EQL)
+ addr =
+ ((u16) redunt[BLOCK_ADDR1_H] << 8) | redunt[BLOCK_ADDR1_L];
+ else if (redunt[PARITY] & XD_BA1_VALID)
+ addr =
+ ((u16) redunt[BLOCK_ADDR1_H] << 8) | redunt[BLOCK_ADDR1_L];
+ else if (redunt[PARITY] & XD_BA2_VALID)
+ addr =
+ ((u16) redunt[BLOCK_ADDR2_H] << 8) | redunt[BLOCK_ADDR2_L];
+
+ return addr;
+}
+
+static int xd_init_l2p_tbl(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int size, i;
+
+ RTS51X_DEBUGP("xd_init_l2p_tbl: zone_cnt = %d\n", xd_card->zone_cnt);
+
+ if (xd_card->zone_cnt < 1)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ size = xd_card->zone_cnt * sizeof(struct zone_entry);
+ RTS51X_DEBUGP("Buffer size for l2p table is %d\n", size);
+
+ xd_card->zone = vmalloc(size);
+ if (!xd_card->zone)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ for (i = 0; i < xd_card->zone_cnt; i++) {
+ xd_card->zone[i].build_flag = 0;
+ xd_card->zone[i].l2p_table = NULL;
+ xd_card->zone[i].free_table = NULL;
+ xd_card->zone[i].get_index = 0;
+ xd_card->zone[i].set_index = 0;
+ xd_card->zone[i].unused_blk_cnt = 0;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static inline void free_zone(struct zone_entry *zone)
+{
+ RTS51X_DEBUGP("free_zone\n");
+ if (!zone)
+ return;
+ zone->build_flag = 0;
+ zone->set_index = 0;
+ zone->get_index = 0;
+ zone->unused_blk_cnt = 0;
+ if (zone->l2p_table) {
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ }
+ if (zone->free_table) {
+ vfree(zone->free_table);
+ zone->free_table = NULL;
+ }
+}
+
+static void xd_set_unused_block(struct rts51x_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int zone_no;
+
+ zone_no = (int)phy_blk >> 10;
+ if (zone_no >= xd_card->zone_cnt) {
+ RTS51X_DEBUGP("Set unused block to invalid zone"
+ "(zone_no = %d, zone_cnt = %d)\n",
+ zone_no, xd_card->zone_cnt);
+ return;
+ }
+ zone = &(xd_card->zone[zone_no]);
+
+ if (zone->free_table == NULL) {
+ if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
+ return;
+ }
+
+ if ((zone->set_index >= XD_FREE_TABLE_CNT)
+ || (zone->set_index < 0)) {
+ free_zone(zone);
+ RTS51X_DEBUGP("Set unused block fail, invalid set_index\n");
+ return;
+ }
+
+ RTS51X_DEBUGP("Set unused block to index %d\n", zone->set_index);
+
+ zone->free_table[zone->set_index++] = (u16) (phy_blk & 0x3ff);
+ if (zone->set_index >= XD_FREE_TABLE_CNT)
+ zone->set_index = 0;
+ zone->unused_blk_cnt++;
+}
+
+static u32 xd_get_unused_block(struct rts51x_chip *chip, int zone_no)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ u32 phy_blk;
+
+ if (zone_no >= xd_card->zone_cnt) {
+ RTS51X_DEBUGP("Get unused block from invalid zone"
+ "(zone_no = %d, zone_cnt = %d)\n",
+ zone_no, xd_card->zone_cnt);
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+ zone = &(xd_card->zone[zone_no]);
+
+ if ((zone->unused_blk_cnt == 0) ||
+ (zone->set_index == zone->get_index)) {
+ free_zone(zone);
+ RTS51X_DEBUGP("Get unused block fail,"
+ "no unused block available\n");
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+ if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
+ free_zone(zone);
+ RTS51X_DEBUGP("Get unused block fail, invalid get_index\n");
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+
+ RTS51X_DEBUGP("Get unused block from index %d\n", zone->get_index);
+
+ phy_blk = zone->free_table[zone->get_index];
+ zone->free_table[zone->get_index++] = 0xFFFF;
+ if (zone->get_index >= XD_FREE_TABLE_CNT)
+ zone->get_index = 0;
+ zone->unused_blk_cnt--;
+
+ phy_blk += ((u32) (zone_no) << 10);
+ return phy_blk;
+}
+
+static void xd_set_l2p_tbl(struct rts51x_chip *chip, int zone_no, u16 log_off,
+ u16 phy_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+
+ zone = &(xd_card->zone[zone_no]);
+ zone->l2p_table[log_off] = phy_off;
+}
+
+static u32 xd_get_l2p_tbl(struct rts51x_chip *chip, int zone_no, u16 log_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int retval;
+
+ zone = &(xd_card->zone[zone_no]);
+ if (zone->l2p_table[log_off] == 0xFFFF) {
+ u32 phy_blk = 0;
+ int i;
+
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ RTS51X_DEBUGP("In xd_get_l2p_tbl,"
+ "delay write fail!\n");
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+
+ if (zone->unused_blk_cnt <= 0) {
+ RTS51X_DEBUGP("No unused block!\n");
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+
+ for (i = 0; i < zone->unused_blk_cnt; i++) {
+ phy_blk = xd_get_unused_block(chip, zone_no);
+ if (phy_blk == BLK_NOT_FOUND) {
+ RTS51X_DEBUGP("No unused block available!\n");
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+
+ retval =
+ xd_init_page(chip, phy_blk, log_off, 0,
+ xd_card->page_off + 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i >= zone->unused_blk_cnt) {
+ RTS51X_DEBUGP("No good unused block available!\n");
+ TRACE_RET(chip, BLK_NOT_FOUND);
+ }
+
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (phy_blk & 0x3FF));
+ return phy_blk;
+ }
+
+ return (u32) zone->l2p_table[log_off] + ((u32) (zone_no) << 10);
+}
+
+int reset_xd_card(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ memset(xd_card, 0, sizeof(struct xd_info));
+
+ xd_card->block_shift = 0;
+ xd_card->page_off = 0;
+ xd_card->addr_cycle = 0;
+ xd_card->capacity = 0;
+ xd_card->zone_cnt = 0;
+ xd_card->cis_block = 0xFFFF;
+ xd_card->delay_write.delay_write_flag = 0;
+
+ enable_card_clock(chip, XD_CARD);
+
+ retval = reset_xd(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (chip->option.reset_or_rw_fail_set_pad_drive) {
+ rts51x_write_register(chip, CARD_DRIVE_SEL,
+ SD20_DRIVE_MASK, DRIVE_8mA);
+ }
+ TRACE_RET(chip, retval);
+ }
+
+ retval = xd_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_mark_bad_block(struct rts51x_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+ u32 page_addr;
+ u8 reg = 0;
+
+ RTS51X_DEBUGP("mark block 0x%x as bad block\n", phy_blk);
+
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF,
+ XD_LATER_BBLK);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_H, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_L, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED0, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED1, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED2, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED3, 0xFF, 0xFF);
+
+ page_addr = phy_blk << xd_card->block_shift;
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ /* Specify page count */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
+ xd_card->page_off + 1);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rts51x_get_rsp(chip, 1, 100);
+
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ rts51x_ep0_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR)
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ else
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_init_page(struct rts51x_chip *chip, u32 phy_blk, u16 logoff,
+ u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+ u32 page_addr;
+ u8 reg = 0;
+
+ RTS51X_DEBUGP("Init block 0x%x\n", phy_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF,
+ (u8) (logoff >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF,
+ (u8) logoff);
+
+ page_addr = (phy_blk << xd_card->block_shift) + start_page;
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
+ XD_BA_TRANSFORM);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
+ (end_page - start_page));
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rts51x_get_rsp(chip, 1, 500);
+
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ rts51x_ep0_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_copy_page(struct rts51x_chip *chip,
+ u32 old_blk, u32 new_blk, u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 old_page, new_page;
+ u8 i, reg = 0;
+ int retval;
+
+ RTS51X_DEBUGP("Copy page from block 0x%x to block 0x%x\n", old_blk,
+ new_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ old_page = (old_blk << xd_card->block_shift) + start_page;
+ new_page = (new_blk << xd_card->block_shift) + start_page;
+
+ XD_CLR_BAD_NEWBLK(xd_card);
+
+ RTS51X_WRITE_REG(chip, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ for (i = start_page; i < end_page; i++) {
+ if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
+ RTS51X_WRITE_REG(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
+ XD_STOP | XD_CLR_ERR);
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rts51x_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, old_page, XD_RW_ADDR);
+
+ /* Single page read */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR | STAGE_XD_STATUS, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 4, 500);
+ if ((retval != STATUS_SUCCESS) ||
+ (chip->rsp_buf[2] & (XD_ECC1_ERROR | XD_ECC2_ERROR))) {
+ rts51x_clear_xd_error(chip);
+ reg = 0;
+ rts51x_ep0_read_register(chip, XD_CTL, &reg);
+ if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
+ wait_timeout(100);
+
+ if (monitor_card_cd(chip, XD_CARD) ==
+ CD_NOT_EXIST) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (((reg &
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ || ((reg & (XD_ECC2_ERROR |
+ XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ RTS51X_WRITE_REG(chip, XD_PAGE_STATUS,
+ 0xFF, XD_BPG);
+ RTS51X_WRITE_REG(chip, XD_BLOCK_STATUS,
+ 0xFF, XD_GBLK);
+ XD_SET_BAD_OLDBLK(xd_card);
+ RTS51X_DEBUGP("old block 0x%x"
+ "ecc error\n", old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ if (XD_CHK_BAD_OLDBLK(xd_card))
+ rts51x_clear_xd_error(chip);
+
+ rts51x_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, new_page, XD_RW_ADDR);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_PAGES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, 300);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ reg = 0;
+ rts51x_ep0_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, new_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ XD_SET_BAD_NEWBLK(xd_card);
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ }
+ TRACE_RET(chip, retval);
+ }
+
+ old_page++;
+ new_page++;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef XD_SPEEDUP
+static int xd_auto_copy_page(struct rts51x_chip *chip,
+ u32 old_blk, u32 new_blk,
+ u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 old_page, new_page;
+ int retval;
+ u8 page_count;
+
+ RTS51X_DEBUGP("Auto copy page from block 0x%x to block 0x%x\n",
+ old_blk, new_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_count = end_page - start_page;
+
+ if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ old_page = (old_blk << xd_card->block_shift) + start_page;
+ new_page = (new_blk << xd_card->block_shift) + start_page;
+
+ XD_CLR_BAD_NEWBLK(xd_card);
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_WAITTIME, 0x03, WAIT_FF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_PAGELEN, 0xFF, page_count);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_READADDR0, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_READADDR1, 0xFF,
+ (u8) old_page);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_READADDR2, 0xFF,
+ (u8) (old_page >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_READADDR3, 0xFF,
+ (u8) (old_page >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_READADDR4, 0xFF, 0);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_WRITEADDR0, 0xFF, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_WRITEADDR1, 0xFF,
+ (u8) new_page);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_WRITEADDR2, 0xFF,
+ (u8) (new_page >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_WRITEADDR3, 0xFF,
+ (u8) (new_page >> 16));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CP_WRITEADDR4, 0xFF, 0);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
+ XD_BA_TRANSFORM | XD_ADDR_MASK, 0 | xd_card->addr_cycle);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, 0);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_COPY_PAGES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_GOTO(chip, Copy_Fail);
+ }
+
+ retval = rts51x_get_rsp(chip, 1, 800);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_GOTO(chip, Copy_Fail);
+ }
+
+ return STATUS_SUCCESS;
+
+Copy_Fail:
+ retval = xd_copy_page(chip, old_blk, new_blk, start_page, end_page);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int xd_reset_cmd(struct rts51x_chip *chip)
+{
+ int retval;
+ u8 xd_dat, xd_ctl;
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_RESET);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+ rts51x_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+ rts51x_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 3, 100);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ xd_dat = chip->rsp_buf[1];
+ xd_ctl = chip->rsp_buf[2];
+ if (((xd_dat & READY_FLAG) == READY_STATE) && (xd_ctl & XD_RDY))
+ return STATUS_SUCCESS;
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_erase_block(struct rts51x_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr;
+ u8 reg = 0, xd_dat;
+ int i, retval;
+
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_addr = phy_blk << xd_card->block_shift;
+
+ for (i = 0; i < 3; i++) {
+ rts51x_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_ERASE);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+ rts51x_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 2, 300);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ rts51x_ep0_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ xd_set_err_code(chip, XD_ERASE_FAIL);
+ }
+ retval = xd_reset_cmd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ continue;
+ }
+ xd_dat = chip->rsp_buf[1];
+ if (xd_dat & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+ }
+
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_ERASE_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_build_l2p_tbl(struct rts51x_chip *chip, int zone_no)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int retval;
+ u32 start, end, i;
+ u16 max_logoff, cur_fst_page_logoff, cur_lst_page_logoff,
+ ent_lst_page_logoff;
+ u8 redunt[11];
+
+ RTS51X_DEBUGP("xd_build_l2p_tbl: %d\n", zone_no);
+
+ if (xd_card->zone == NULL) {
+ retval = xd_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ if (xd_card->zone[zone_no].build_flag) {
+ RTS51X_DEBUGP("l2p table of zone %d has been built\n",
+ zone_no);
+ return STATUS_SUCCESS;
+ }
+
+ zone = &(xd_card->zone[zone_no]);
+
+ if (zone->l2p_table == NULL) {
+ zone->l2p_table = vmalloc(2000);
+ if (zone->l2p_table == NULL)
+ TRACE_GOTO(chip, Build_Fail);
+ }
+ memset((u8 *) (zone->l2p_table), 0xff, 2000);
+
+ if (zone->free_table == NULL) {
+ zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
+ if (zone->free_table == NULL)
+ TRACE_GOTO(chip, Build_Fail);
+ }
+ memset((u8 *) (zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
+
+ if (zone_no == 0) {
+ if (xd_card->cis_block == 0xFFFF)
+ start = 0;
+ else
+ start = xd_card->cis_block + 1;
+ if (XD_CHK_4MB(xd_card)) {
+ end = 0x200;
+ max_logoff = 499;
+ } else {
+ end = 0x400;
+ max_logoff = 999;
+ }
+ } else {
+ start = (u32) (zone_no) << 10;
+ end = (u32) (zone_no + 1) << 10;
+ max_logoff = 999;
+ }
+
+ RTS51X_DEBUGP("start block 0x%x, end block 0x%x\n", start, end);
+
+ zone->set_index = zone->get_index = 0;
+ zone->unused_blk_cnt = 0;
+
+ for (i = start; i < end; i++) {
+ u32 page_addr = i << xd_card->block_shift;
+ u32 phy_block;
+
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ if (redunt[BLOCK_STATUS] != 0xFF) {
+ RTS51X_DEBUGP("bad block\n");
+ continue;
+ }
+
+ if (xd_check_data_blank(redunt)) {
+ RTS51X_DEBUGP("blank block\n");
+ xd_set_unused_block(chip, i);
+ continue;
+ }
+
+ cur_fst_page_logoff = xd_load_log_block_addr(redunt);
+ if ((cur_fst_page_logoff == 0xFFFF)
+ || (cur_fst_page_logoff > max_logoff)) {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ continue;
+ }
+ if ((zone_no == 0) && (cur_fst_page_logoff == 0)
+ && (redunt[PAGE_STATUS] != XD_GPG))
+ XD_SET_MBR_FAIL(xd_card);
+
+ if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16) (i & 0x3FF);
+ continue;
+ }
+
+ phy_block =
+ zone->l2p_table[cur_fst_page_logoff] +
+ ((u32) ((zone_no) << 10));
+
+ page_addr = ((i + 1) << xd_card->block_shift) - 1;
+
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ cur_lst_page_logoff = xd_load_log_block_addr(redunt);
+ if (cur_lst_page_logoff == cur_fst_page_logoff) {
+ int m;
+
+ page_addr =
+ ((phy_block + 1) << xd_card->block_shift) - 1;
+
+ for (m = 0; m < 3; m++) {
+ retval =
+ xd_read_redundant(chip, page_addr, redunt,
+ 11);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+
+ if (m == 3) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16) (i & 0x3FF);
+ retval = xd_erase_block(chip, phy_block);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, phy_block);
+ continue;
+ }
+
+ ent_lst_page_logoff = xd_load_log_block_addr(redunt);
+ if (ent_lst_page_logoff != cur_fst_page_logoff) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16) (i & 0x3FF);
+ retval = xd_erase_block(chip, phy_block);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, phy_block);
+ continue;
+ } else {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ }
+ } else {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ }
+ }
+
+ if (XD_CHK_4MB(xd_card))
+ end = 500;
+ else
+ end = 1000;
+
+ i = 0;
+ for (start = 0; start < end; start++) {
+ if (zone->l2p_table[start] == 0xFFFF)
+ i++;
+ }
+
+ RTS51X_DEBUGP("Block count %d, invalid L2P entry %d\n", end, i);
+ RTS51X_DEBUGP("Total unused block: %d\n", zone->unused_blk_cnt);
+
+ if ((zone->unused_blk_cnt - i) < 1)
+ chip->card_wp |= XD_CARD;
+
+ zone->build_flag = 1;
+
+ return STATUS_SUCCESS;
+
+Build_Fail:
+ if (zone->l2p_table) {
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ }
+ if (zone->free_table) {
+ vfree(zone->free_table);
+ zone->free_table = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+static int xd_send_cmd(struct rts51x_chip *chip, u8 cmd)
+{
+ int retval;
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_SET_CMD);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval = rts51x_get_rsp(chip, 1, 200);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_multiple_pages(struct rts51x_chip *chip, u32 phy_blk,
+ u32 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, void **ptr, unsigned int *offset)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr, new_blk;
+ u16 log_off;
+ u8 reg_val, page_cnt;
+ int zone_no, retval, i;
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_cnt = end_page - start_page;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16) (log_blk % 1000);
+
+ if ((phy_blk & 0x3FF) == 0x3FF) {
+ for (i = 0; i < 256; i++) {
+ page_addr = ((u32) i) << xd_card->block_shift;
+
+ retval = xd_read_redundant(chip, page_addr, NULL, 0);
+ if (retval == STATUS_SUCCESS)
+ break;
+
+ if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ page_addr = (phy_blk << xd_card->block_shift) + start_page;
+
+ rts51x_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_PPB_TO_SIE,
+ XD_PPB_TO_SIE);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ RING_BUFFER);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+
+ trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
+ DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END | XD_PPB_EMPTY,
+ XD_TRANSFER_END | XD_PPB_EMPTY);
+
+ retval = rts51x_send_cmd(chip, MODE_CDIR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_partial(chip, RCV_BULK_PIPE(chip), (void *)buf,
+ ptr, offset, page_cnt * 512,
+ scsi_sg_count(chip->srb), NULL, 2000);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+
+ if (retval == STATUS_TIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, retval);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+ retval = rts51x_get_rsp(chip, 1, 200);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+
+ if (retval == STATUS_TIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, retval);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ rts51x_ep0_read_register(chip, XD_PAGE_STATUS, &reg_val);
+ RTS51X_DEBUGP("XD_PAGE_STATUS: 0x%x\n", reg_val);
+
+ if (reg_val != XD_GPG)
+ xd_set_err_code(chip, XD_PRG_ERROR);
+
+ rts51x_ep0_read_register(chip, XD_CTL, &reg_val);
+ RTS51X_DEBUGP("XD_CTL: 0x%x\n", reg_val);
+
+ /* Handle uncorrectable ECC error */
+ if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
+ == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ wait_timeout(100);
+
+ if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ xd_set_err_code(chip, XD_ECC_ERROR);
+
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if (new_blk == NO_NEW_BLK) {
+ XD_CLR_BAD_OLDBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef XD_SPEEDUP
+ retval =
+ xd_auto_copy_page(chip, phy_blk, new_blk, 0,
+ xd_card->page_off + 1);
+#else
+ retval =
+ xd_copy_page(chip, phy_blk, new_blk, 0,
+ xd_card->page_off + 1);
+#endif
+ if (retval != STATUS_SUCCESS) {
+ if (!XD_CHK_BAD_NEWBLK(xd_card)) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ } else {
+ XD_CLR_BAD_NEWBLK(xd_card);
+ }
+ XD_CLR_BAD_OLDBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (new_blk & 0x3FF));
+ xd_erase_block(chip, phy_blk);
+ xd_mark_bad_block(chip, phy_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_finish_write(struct rts51x_chip *chip,
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval, zone_no;
+ u16 log_off;
+
+ RTS51X_DEBUGP("xd_finish_write, old_blk = 0x%x, new_blk = 0x%x,"
+ "log_blk = 0x%x\n", old_blk, new_blk, log_blk);
+
+ if (page_off > xd_card->page_off)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16) (log_blk % 1000);
+
+ if (old_blk == BLK_NOT_FOUND) {
+ retval = xd_init_page(chip, new_blk, log_off,
+ page_off, xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+#ifdef XD_SPEEDUP
+ retval = xd_auto_copy_page(chip, old_blk, new_blk,
+ page_off, xd_card->page_off + 1);
+#else
+ retval = xd_copy_page(chip, old_blk, new_blk,
+ page_off, xd_card->page_off + 1);
+#endif
+ if (retval != STATUS_SUCCESS) {
+ if (!XD_CHK_BAD_NEWBLK(xd_card)) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ }
+ XD_CLR_BAD_NEWBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS) {
+ if (XD_CHK_BAD_OLDBLK(xd_card)) {
+ xd_mark_bad_block(chip, old_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ } else {
+ /* Add source block to unused block */
+ xd_set_unused_block(chip, old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_NO_ERROR);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+ }
+
+ /* Add target block to L2P table */
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (new_blk & 0x3FF));
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_prepare_write(struct rts51x_chip *chip,
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+{
+ int retval;
+
+ RTS51X_DEBUGP("xd_prepare_write, old_blk = 0x%x, new_blk = 0x%x,"
+ "log_blk = 0x%x, page_off = %d\n",
+ old_blk, new_blk, log_blk, (int)page_off);
+
+ if (page_off) {
+#ifdef XD_SPEEDUP
+ retval = xd_auto_copy_page(chip, old_blk, new_blk, 0, page_off);
+#else
+ retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
+#endif
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_write_multiple_pages(struct rts51x_chip *chip, u32 old_blk,
+ u32 new_blk, u32 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, void **ptr,
+ unsigned int *offset)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr;
+ int zone_no, retval;
+ u16 log_off;
+ u8 page_cnt, reg_val;
+
+ RTS51X_DEBUGP("xd_write_multiple_pages, old_blk = 0x%x,"
+ "new_blk = 0x%x, log_blk = 0x%x\n",
+ old_blk, new_blk, log_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_cnt = end_page - start_page;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16) (log_blk % 1000);
+
+ page_addr = (new_blk << xd_card->block_shift) + start_page;
+
+ /* Send index command */
+ retval = xd_send_cmd(chip, READ1_1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ rts51x_init_cmd(chip);
+
+ /* Prepare redundant field */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF,
+ (u8) (log_off >> 8));
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF,
+ (u8) log_off);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ /* Transform the block address by hardware */
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
+ XD_BA_TRANSFORM);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ RING_BUFFER);
+
+ trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
+ DMA_512);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_PAGES);
+ rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rts51x_send_cmd(chip, MODE_CDOR, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ retval =
+ rts51x_transfer_data_partial(chip, SND_BULK_PIPE(chip), (void *)buf,
+ ptr, offset, page_cnt * 512,
+ scsi_sg_count(chip->srb), NULL, 2000);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+
+ if (retval == STATUS_TIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, retval);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+ retval = rts51x_get_rsp(chip, 1, 200);
+ if (retval != STATUS_SUCCESS) {
+ rts51x_clear_xd_error(chip);
+
+ if (retval == STATUS_TIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, retval);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+
+ if (end_page == (xd_card->page_off + 1)) {
+ xd_card->delay_write.delay_write_flag = 0;
+
+ if (old_blk != BLK_NOT_FOUND) {
+ retval = xd_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS) {
+ if (XD_CHK_BAD_OLDBLK(xd_card)) {
+ xd_mark_bad_block(chip, old_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ } else {
+ xd_set_unused_block(chip, old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_NO_ERROR);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+ }
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (new_blk & 0x3FF));
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ rts51x_ep0_read_register(chip, XD_DAT, &reg_val);
+ RTS51X_DEBUGP("XD_DAT: 0x%x\n", reg_val);
+
+ if (reg_val & PROGRAM_ERROR) {
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ xd_mark_bad_block(chip, new_blk);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+int xd_delay_write(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ int retval;
+
+ if (delay_write->delay_write_flag) {
+ RTS51X_DEBUGP("xd_delay_write\n");
+ retval = xd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ delay_write->delay_write_flag = 0;
+ retval = xd_finish_write(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock,
+ delay_write->pageoff);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ int retval, zone_no;
+ u32 log_blk, old_blk = 0, new_blk = 0;
+ u16 log_off, total_sec_cnt = sector_cnt;
+ u8 start_page, end_page = 0, page_cnt;
+ u8 *buf;
+ void *ptr = NULL;
+ unsigned int offset = 0;
+
+ xd_set_err_code(chip, XD_NO_ERROR);
+
+ xd_card->counter = 0;
+
+ RTS51X_DEBUGP("xd_rw: scsi_bufflen = %d, scsi_sg_count = %d\n",
+ scsi_bufflen(srb), scsi_sg_count(srb));
+ RTS51X_DEBUGP("Data direction: %s\n",
+ (srb->sc_data_direction ==
+ DMA_TO_DEVICE) ? "write" : "read");
+
+ buf = (u8 *) scsi_sglist(srb);
+
+ retval = xd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ log_blk = start_sector >> xd_card->block_shift;
+ start_page = (u8) start_sector & xd_card->page_off;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16) (log_blk % 1000);
+
+ RTS51X_DEBUGP("log_blk = 0x%x\n", log_blk);
+
+ if (xd_card->zone[zone_no].build_flag == 0) {
+ retval = xd_build_l2p_tbl(chip, zone_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ if (delay_write->old_phyblock != BLK_NOT_FOUND) {
+#ifdef XD_SPEEDUP
+ retval = xd_auto_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->pageoff, start_page);
+#else
+ retval = xd_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->pageoff,
+ start_page);
+#endif
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+ }
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page == delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else {
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if ((old_blk == BLK_NOT_FOUND)
+ || (new_blk == BLK_NOT_FOUND)) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+
+ retval =
+ xd_prepare_write(chip, old_blk, new_blk, log_blk,
+ start_page);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, XD_CARD) ==
+ CD_NOT_EXIST) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, retval);
+ }
+ }
+ } else {
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, retval);
+ }
+
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ if (old_blk == BLK_NOT_FOUND) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTS51X_DEBUGP("old_blk = 0x%x\n", old_blk);
+ if (srb->sc_data_direction == DMA_TO_DEVICE)
+ RTS51X_DEBUGP("new_blk = 0x%x\n", new_blk);
+
+ while (total_sec_cnt) {
+ if ((start_page + total_sec_cnt) > (xd_card->page_off + 1))
+ end_page = xd_card->page_off + 1;
+ else
+ end_page = start_page + (u8) total_sec_cnt;
+ page_cnt = end_page - start_page;
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ retval = xd_read_multiple_pages(chip, old_blk, log_blk,
+ start_page, end_page,
+ buf, &ptr, &offset);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval =
+ xd_write_multiple_pages(chip, old_blk, new_blk,
+ log_blk, start_page,
+ end_page, buf, &ptr,
+ &offset);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ total_sec_cnt -= page_cnt;
+
+ if (total_sec_cnt == 0)
+ break;
+
+ log_blk++;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16) (log_blk % 1000);
+
+ if (xd_card->zone[zone_no].build_flag == 0) {
+ retval = xd_build_l2p_tbl(chip, zone_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, retval);
+ }
+ }
+
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ if (old_blk == BLK_NOT_FOUND) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if (new_blk == BLK_NOT_FOUND) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ start_page = 0;
+ }
+
+ if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
+ (end_page != (xd_card->page_off + 1))) {
+ delay_write->delay_write_flag = 1;
+ delay_write->old_phyblock = old_blk;
+ delay_write->new_phyblock = new_blk;
+ delay_write->logblock = log_blk;
+ delay_write->pageoff = end_page;
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return STATUS_SUCCESS;
+}
+
+void xd_free_l2p_tbl(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int i = 0;
+
+ if (xd_card->zone != NULL) {
+ for (i = 0; i < xd_card->zone_cnt; i++) {
+ if (xd_card->zone[i].l2p_table != NULL) {
+ vfree(xd_card->zone[i].l2p_table);
+ xd_card->zone[i].l2p_table = NULL;
+ }
+ if (xd_card->zone[i].free_table != NULL) {
+ vfree(xd_card->zone[i].free_table);
+ xd_card->zone[i].free_table = NULL;
+ }
+ }
+ vfree(xd_card->zone);
+ xd_card->zone = NULL;
+ }
+}
+
+void xd_cleanup_work(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ if (xd_card->delay_write.delay_write_flag) {
+ RTS51X_DEBUGP("xD: delay write\n");
+ xd_delay_write(chip);
+ xd_card->counter = 0;
+ }
+}
+
+int xd_power_off_card3v3(struct rts51x_chip *chip)
+{
+ int retval;
+
+ rts51x_init_cmd(chip);
+
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, XD_CLK_EN, 0);
+
+ if (chip->asic_code)
+ xd_pull_ctl_disable(chip);
+ else
+ rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, 0xDF);
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
+ if (!chip->option.FT2_fast_mode) {
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
+ POWER_OFF);
+ if (CHECK_PKG(chip, LQFP48)
+ || chip->option.rts5129_D3318_off_enable)
+ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL,
+ DV3318_AUTO_PWR_OFF, 0);
+ }
+
+ retval = rts51x_send_cmd(chip, MODE_C, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ return STATUS_SUCCESS;
+}
+
+int release_xd_card(struct rts51x_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ RTS51X_DEBUGP("elease_xd_card\n");
+
+ chip->card_ready &= ~XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ chip->card_wp &= ~XD_CARD;
+
+ xd_card->delay_write.delay_write_flag = 0;
+
+ xd_free_l2p_tbl(chip);
+
+ rts51x_write_register(chip, SFSM_ED, HW_CMD_STOP, HW_CMD_STOP);
+
+ retval = xd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ if (chip->asic_code && CHECK_PKG(chip, QFN24))
+ wait_timeout(20);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5139/xd.h b/drivers/staging/rts5139/xd.h
new file mode 100644
index 00000000000..fa695903ba6
--- /dev/null
+++ b/drivers/staging/rts5139/xd.h
@@ -0,0 +1,193 @@
+/* Driver for Realtek RTS51xx USB card reader
+ * Header file
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#ifndef __RTS51X_XD_H
+#define __RTS51X_XD_H
+
+/* Error Codes */
+#define XD_NO_ERROR 0x00
+#define XD_NO_MEMORY 0x80
+#define XD_PRG_ERROR 0x40
+#define XD_NO_CARD 0x20
+#define XD_READ_FAIL 0x10
+#define XD_ERASE_FAIL 0x08
+#define XD_WRITE_FAIL 0x04
+#define XD_ECC_ERROR 0x02
+#define XD_TO_ERROR 0x01
+
+/* XD Commands */
+#define READ1_1 0x00
+#define READ1_2 0x01
+#define READ2 0x50
+#define READ_ID 0x90
+#define RESET 0xff
+#define PAGE_PRG_1 0x80
+#define PAGE_PRG_2 0x10
+#define BLK_ERASE_1 0x60
+#define BLK_ERASE_2 0xD0
+#define READ_STS 0x70
+#define READ_xD_ID 0x9A
+#define COPY_BACK_512 0x8A
+#define COPY_BACK_2K 0x85
+#define READ1_1_2 0x30
+#define READ1_1_3 0x35
+#define CHG_DAT_OUT_1 0x05
+#define RDM_DAT_OUT_1 0x05
+#define CHG_DAT_OUT_2 0xE0
+#define RDM_DAT_OUT_2 0xE0
+#define CHG_DAT_OUT_2 0xE0
+#define CHG_DAT_IN_1 0x85
+#define CACHE_PRG 0x15
+
+/* Redundant Area Related */
+#define XD_EXTRA_SIZE 0x10
+#define XD_2K_EXTRA_SIZE 0x40
+
+/* Define for XD Status */
+#define NOT_WRITE_PROTECTED 0x80
+#define READY_STATE 0x40
+#define PROGRAM_ERROR 0x01
+#define PROGRAM_ERROR_N_1 0x02
+#define INTERNAL_READY 0x20
+#define READY_FLAG 0x5F
+
+/* Define for device code */
+#define XD_8M_X8_512 0xE6
+#define XD_16M_X8_512 0x73
+#define XD_32M_X8_512 0x75
+#define XD_64M_X8_512 0x76
+#define XD_128M_X8_512 0x79
+#define XD_256M_X8_512 0x71
+#define XD_128M_X8_2048 0xF1
+#define XD_256M_X8_2048 0xDA
+#define XD_512M_X8 0xDC
+#define XD_128M_X16_2048 0xC1
+#define XD_4M_X8_512_1 0xE3
+#define XD_4M_X8_512_2 0xE5
+#define xD_1G_X8_512 0xD3
+#define xD_2G_X8_512 0xD5
+
+#define XD_ID_CODE 0xB5
+
+#define VENDOR_BLOCK 0xEFFF
+#define CIS_BLOCK 0xDFFF
+
+#define BLK_NOT_FOUND 0xFFFFFFFF
+
+#define NO_NEW_BLK 0xFFFFFFFF
+
+#define PAGE_CORRECTABLE 0x0
+#define PAGE_NOTCORRECTABLE 0x1
+
+#define NO_OFFSET 0x0
+#define WITH_OFFSET 0x1
+
+#define Sect_Per_Page 4
+#define XD_ADDR_MODE_2C XD_ADDR_MODE_2A
+
+#define ZONE0_BAD_BLOCK 23
+#define NOT_ZONE0_BAD_BLOCK 24
+
+/* Assign address mode */
+#define XD_RW_ADDR 0x01
+#define XD_ERASE_ADDR 0x02
+
+/* Macro Definition */
+#define XD_PAGE_512(xd_card) \
+ do { \
+ (xd_card)->block_shift = 5; \
+ (xd_card)->page_off = 0x1F; \
+ } while (0)
+
+#define XD_SET_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag |= 0x01)
+#define XD_CLR_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag &= ~0x01)
+#define XD_CHK_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag & 0x01)
+
+#define XD_SET_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag |= 0x02)
+#define XD_CLR_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag &= ~0x02)
+#define XD_CHK_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag & 0x02)
+
+#define XD_SET_MBR_FAIL(xd_card) ((xd_card)->multi_flag |= 0x04)
+#define XD_CLR_MBR_FAIL(xd_card) ((xd_card)->multi_flag &= ~0x04)
+#define XD_CHK_MBR_FAIL(xd_card) ((xd_card)->multi_flag & 0x04)
+
+#define XD_SET_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag |= 0x08)
+#define XD_CLR_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag &= ~0x08)
+#define XD_CHK_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag & 0x08)
+
+#define XD_SET_4MB(xd_card) ((xd_card)->multi_flag |= 0x10)
+#define XD_CLR_4MB(xd_card) ((xd_card)->multi_flag &= ~0x10)
+#define XD_CHK_4MB(xd_card) ((xd_card)->multi_flag & 0x10)
+
+#define XD_SET_ECC_ERR(xd_card) ((xd_card)->multi_flag |= 0x40)
+#define XD_CLR_ECC_ERR(xd_card) ((xd_card)->multi_flag &= ~0x40)
+#define XD_CHK_ECC_ERR(xd_card) ((xd_card)->multi_flag & 0x40)
+
+/* Offset in xD redundant buffer */
+#define PAGE_STATUS 0
+#define BLOCK_STATUS 1
+#define BLOCK_ADDR1_L 2
+#define BLOCK_ADDR1_H 3
+#define BLOCK_ADDR2_L 4
+#define BLOCK_ADDR2_H 5
+#define RESERVED0 6
+#define RESERVED1 7
+#define RESERVED2 8
+#define RESERVED3 9
+#define PARITY 10
+
+/* For CIS block */
+#define CIS0_0 0
+#define CIS0_1 1
+#define CIS0_2 2
+#define CIS0_3 3
+#define CIS0_4 4
+#define CIS0_5 5
+#define CIS0_6 6
+#define CIS0_7 7
+#define CIS0_8 8
+#define CIS0_9 9
+#define CIS1_0 256
+#define CIS1_1 (256 + 1)
+#define CIS1_2 (256 + 2)
+#define CIS1_3 (256 + 3)
+#define CIS1_4 (256 + 4)
+#define CIS1_5 (256 + 5)
+#define CIS1_6 (256 + 6)
+#define CIS1_7 (256 + 7)
+#define CIS1_8 (256 + 8)
+#define CIS1_9 (256 + 9)
+
+int reset_xd_card(struct rts51x_chip *chip);
+int xd_delay_write(struct rts51x_chip *chip);
+int xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
+ u16 sector_cnt);
+void xd_free_l2p_tbl(struct rts51x_chip *chip);
+void xd_cleanup_work(struct rts51x_chip *chip);
+int xd_power_off_card3v3(struct rts51x_chip *chip);
+int release_xd_card(struct rts51x_chip *chip);
+
+#endif /* __RTS51X_XD_H */
diff --git a/drivers/staging/rts_pstor/Makefile b/drivers/staging/rts_pstor/Makefile
index 61609aee0a0..42533d39c07 100644
--- a/drivers/staging/rts_pstor/Makefile
+++ b/drivers/staging/rts_pstor/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS := -Idrivers/scsi
+ccflags := -Idrivers/scsi
obj-$(CONFIG_RTS_PSTOR) := rts_pstor.o
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 16c73fbff51..480b0ed2e4d 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -36,7 +36,7 @@
#include "sd.h"
#include "xd.h"
-#define DRIVER_VERSION "v1.10"
+#define DRIVER_VERSION "v1.10"
MODULE_DESCRIPTION("Realtek PCI-Express card reader driver");
MODULE_LICENSE("GPL");
@@ -77,7 +77,7 @@ static const char *host_info(struct Scsi_Host *host)
return "SCSI emulation for PCI-Express Mass Storage devices";
}
-static int slave_alloc (struct scsi_device *sdev)
+static int slave_alloc(struct scsi_device *sdev)
{
/*
* Set the INQUIRY transfer length to 36. We don't use any of
@@ -130,7 +130,7 @@ static int slave_configure(struct scsi_device *sdev)
#define SPRINTF(args...) \
do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
-static int proc_info (struct Scsi_Host *host, char *buffer,
+static int proc_info(struct Scsi_Host *host, char *buffer,
char **start, off_t offset, int length, int inout)
{
char *pos = buffer;
@@ -506,13 +506,15 @@ static int rtsx_control_thread(void *__dev)
*/
else if (chip->srb->device->id) {
printk(KERN_ERR "Bad target number (%d:%d)\n",
- chip->srb->device->id, chip->srb->device->lun);
+ chip->srb->device->id,
+ chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
}
else if (chip->srb->device->lun > chip->max_lun) {
printk(KERN_ERR "Bad LUN (%d:%d)\n",
- chip->srb->device->id, chip->srb->device->lun);
+ chip->srb->device->id,
+ chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
}
@@ -625,26 +627,23 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
int retval;
u32 status;
- if (dev) {
+ if (dev)
chip = dev->chip;
- } else {
+ else
return IRQ_NONE;
- }
- if (!chip) {
+ if (!chip)
return IRQ_NONE;
- }
spin_lock(&dev->reg_lock);
retval = rtsx_pre_handle_interrupt(chip);
if (retval == STATUS_FAIL) {
spin_unlock(&dev->reg_lock);
- if (chip->int_reg == 0xFFFFFFFF) {
+ if (chip->int_reg == 0xFFFFFFFF)
return IRQ_HANDLED;
- } else {
+ else
return IRQ_NONE;
- }
}
status = chip->int_reg;
@@ -661,9 +660,8 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
if (status & (NEED_COMPLETE_INT | DELINK_INT)) {
if (status & (TRANS_FAIL_INT | DELINK_INT)) {
- if (status & DELINK_INT) {
+ if (status & DELINK_INT)
RTSX_SET_DELINK(chip);
- }
dev->trans_result = TRANS_RESULT_FAIL;
if (dev->done)
complete(dev->done);
@@ -844,7 +842,9 @@ static void rtsx_init_options(struct rtsx_chip *chip)
chip->ssc_en = 1;
chip->sd_speed_prior = 0x01040203;
chip->sd_current_prior = 0x00010203;
- chip->sd_ctl = SD_PUSH_POINT_AUTO | SD_SAMPLE_POINT_AUTO | SUPPORT_MMC_DDR_MODE;
+ chip->sd_ctl = SD_PUSH_POINT_AUTO |
+ SD_SAMPLE_POINT_AUTO |
+ SUPPORT_MMC_DDR_MODE;
chip->sd_ddr_tx_phase = 0;
chip->mmc_ddr_tx_phase = 1;
chip->sd_default_tx_phase = 15;
@@ -896,7 +896,8 @@ static void rtsx_init_options(struct rtsx_chip *chip)
chip->s3_pwr_off_delay = 1000;
}
-static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+static int __devinit rtsx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct Scsi_Host *host;
struct rtsx_dev *dev;
@@ -913,7 +914,8 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
err = pci_request_regions(pci, CR_DRIVER_NAME);
if (err < 0) {
- printk(KERN_ERR "PCI request regions for %s failed!\n", CR_DRIVER_NAME);
+ printk(KERN_ERR "PCI request regions for %s failed!\n",
+ CR_DRIVER_NAME);
pci_disable_device(pci);
return err;
}
@@ -934,9 +936,8 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
memset(dev, 0, sizeof(struct rtsx_dev));
dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
- if (dev->chip == NULL) {
+ if (dev->chip == NULL)
goto errout;
- }
spin_lock_init(&dev->reg_lock);
mutex_init(&(dev->dev_mutex));
@@ -950,7 +951,8 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
dev->pci = pci;
dev->irq = -1;
- printk(KERN_INFO "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci, 0));
+ printk(KERN_INFO "Resource length: 0x%x\n",
+ (unsigned int)pci_resource_len(pci, 0));
dev->addr = pci_resource_start(pci, 0);
dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
if (dev->remap_addr == NULL) {
@@ -959,9 +961,12 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
goto errout;
}
- /* Using "unsigned long" cast here to eliminate gcc warning in 64-bit system */
+ /*
+ * Using "unsigned long" cast here to eliminate gcc warning in
+ * 64-bit system
+ */
printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n",
- (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
+ (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN,
&(dev->rtsx_resv_buf_addr), GFP_KERNEL);
@@ -973,7 +978,8 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
- dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
+ dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
+ HOST_CMDS_BUF_LEN;
dev->chip->rtsx = dev;
@@ -1058,10 +1064,10 @@ static void __devexit rtsx_remove(struct pci_dev *pci)
}
/* PCI IDs */
-static struct pci_device_id rtsx_ids[] = {
- { 0x10EC, 0x5208, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_OTHERS << 16, 0xFF0000 },
- { 0x10EC, 0x5209, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_OTHERS << 16, 0xFF0000 },
- { 0x10EC, 0x5288, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_OTHERS << 16, 0xFF0000 },
+static DEFINE_PCI_DEVICE_TABLE(rtsx_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, },
};
diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h
index 86e47c2e3e3..1ab42fcc47d 100644
--- a/drivers/staging/rts_pstor/rtsx.h
+++ b/drivers/staging/rts_pstor/rtsx.h
@@ -24,8 +24,8 @@
#ifndef __REALTEK_RTSX_H
#define __REALTEK_RTSX_H
-#include <asm/io.h>
-#include <asm/bitops.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -80,7 +80,7 @@
#define wait_timeout_x(task_state, msecs) \
do { \
- set_current_state((task_state)); \
+ set_current_state((task_state)); \
schedule_timeout((msecs) * HZ / 1000); \
} while (0)
#define wait_timeout(msecs) wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
@@ -102,12 +102,12 @@ typedef unsigned long DELAY_PARA_T;
struct rtsx_chip;
struct rtsx_dev {
- struct pci_dev *pci;
+ struct pci_dev *pci;
/* pci resources */
unsigned long addr;
void __iomem *remap_addr;
- int irq;
+ int irq;
/* locks */
spinlock_t reg_lock;
diff --git a/drivers/staging/rts_pstor/rtsx_scsi.c b/drivers/staging/rts_pstor/rtsx_scsi.c
index 7de1fae443f..f2e5842d4c9 100644
--- a/drivers/staging/rts_pstor/rtsx_scsi.c
+++ b/drivers/staging/rts_pstor/rtsx_scsi.c
@@ -2730,7 +2730,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf_len = data_len = 0x6A;
}
- buf = (u8 *)kmalloc(buf_len, GFP_KERNEL);
+ buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
diff --git a/drivers/staging/rts_pstor/sd.c b/drivers/staging/rts_pstor/sd.c
index 8db14ddbeb7..aab690932ea 100644
--- a/drivers/staging/rts_pstor/sd.c
+++ b/drivers/staging/rts_pstor/sd.c
@@ -3134,41 +3134,40 @@ int reset_sd_card(struct rtsx_chip *chip)
if (chip->sd_ctl & RESET_MMC_FIRST) {
retval = reset_mmc(chip);
- if ((retval != STATUS_SUCCESS) && !sd_check_err_code(chip, SD_NO_CARD)) {
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
- if (CHECK_PID(chip, 0x5209)) {
- retval = sd_change_bank_voltage(chip, SD_IO_3V3);
- if (retval != STATUS_SUCCESS) {
- TRACE_RET(chip, STATUS_FAIL);
- }
- }
+ if (CHECK_PID(chip, 0x5209))
+ sd_change_bank_voltage(chip, SD_IO_3V3);
+
+ TRACE_RET(chip, STATUS_FAIL);
}
}
} else {
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
- if (sd_check_err_code(chip, SD_NO_CARD)) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
TRACE_RET(chip, STATUS_FAIL);
- }
if (CHECK_PID(chip, 0x5209)) {
retval = sd_change_bank_voltage(chip, SD_IO_3V3);
- if (retval != STATUS_SUCCESS) {
+ if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
- }
}
- if (!chip->sd_io) {
+ if (chip->sd_io) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
}
}
}
- if (retval != STATUS_SUCCESS) {
- TRACE_RET(chip, STATUS_FAIL);
- }
-
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
@@ -4139,7 +4138,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
cmd[3] = srb->cmnd[5];
cmd[4] = srb->cmnd[6];
- buf = (u8 *)kmalloc(data_len, GFP_KERNEL);
+ buf = kmalloc(data_len, GFP_KERNEL);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
@@ -4385,7 +4384,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
u16 i;
u8 *buf;
- buf = (u8 *)kmalloc(data_len, GFP_KERNEL);
+ buf = kmalloc(data_len, GFP_KERNEL);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
diff --git a/drivers/staging/rts_pstor/spi.c b/drivers/staging/rts_pstor/spi.c
index c803ba63550..6b36cc532a8 100644
--- a/drivers/staging/rts_pstor/spi.c
+++ b/drivers/staging/rts_pstor/spi.c
@@ -463,7 +463,7 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if (len) {
- buf = (u8 *)kmalloc(len, GFP_KERNEL);
+ buf = kmalloc(len, GFP_KERNEL);
if (!buf)
TRACE_RET(chip, STATUS_ERROR);
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index bf7286e01a3..8ac3faea2d2 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -2420,11 +2420,12 @@ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
(sep->pid_doing_transaction != 0)) {
dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
error = -EACCES;
- goto end_function;
}
-
mutex_unlock(&sep->sep_mutex);
+ if (error)
+ return error;
+
if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
return -ENOTTY;
@@ -2461,7 +2462,6 @@ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
-end_function:
mutex_unlock(&sep->ioctl_mutex);
return error;
}
diff --git a/drivers/tty/serial/68360serial.c b/drivers/staging/serial/68360serial.c
index 0a3e8787ed5..0a3e8787ed5 100644
--- a/drivers/tty/serial/68360serial.c
+++ b/drivers/staging/serial/68360serial.c
diff --git a/drivers/staging/serial/Kconfig b/drivers/staging/serial/Kconfig
new file mode 100644
index 00000000000..9489688397e
--- /dev/null
+++ b/drivers/staging/serial/Kconfig
@@ -0,0 +1,16 @@
+config SERIAL_68360_SMC
+ bool "68360 SMC uart support"
+ depends on M68360
+ help
+ This driver supports the SMC serial ports of the Motorola 68360 CPU.
+
+config SERIAL_68360_SCC
+ bool "68360 SCC uart support"
+ depends on M68360
+ help
+ This driver supports the SCC serial ports of the Motorola 68360 CPU.
+
+config SERIAL_68360
+ bool
+ depends on SERIAL_68360_SMC || SERIAL_68360_SCC
+ default y
diff --git a/drivers/staging/serial/Makefile b/drivers/staging/serial/Makefile
new file mode 100644
index 00000000000..37a6a0b35fb
--- /dev/null
+++ b/drivers/staging/serial/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SERIAL_68360) += 68360serial.o
diff --git a/drivers/staging/serial/TODO b/drivers/staging/serial/TODO
new file mode 100644
index 00000000000..a19cda81dab
--- /dev/null
+++ b/drivers/staging/serial/TODO
@@ -0,0 +1,6 @@
+These are a few serial drivers that either do not build, or do not work if they
+do build, or if they seem to work, are for obsolete hardware, or are full of
+unfixable races and no one uses them anymore.
+
+If no one steps up to adopt any of these drivers, they will be removed
+in the 3.4 release.
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 12f5eba0355..c44e41af288 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -24,7 +24,6 @@ static int debug;
#define DRIVER_DESC "Quatech USB to Serial Driver"
#define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */
-#define QUATECH_SSU100 0xC020 /* SSU100 */
#define QUATECH_SSU200 0xC030 /* SSU200 */
#define QUATECH_DSU100 0xC040 /* DSU100 */
#define QUATECH_DSU200 0xC050 /* DSU200 */
@@ -127,7 +126,6 @@ static int debug;
#define RS232_MODE 0x00
static const struct usb_device_id serqt_id_table[] = {
- {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU200)},
@@ -190,7 +188,7 @@ struct quatech_port {
struct usb_serial_port *port; /* owner of this object */
struct qt_get_device_data DeviceData;
- spinlock_t lock;
+ struct mutex lock;
bool read_urb_busy;
int RxHolding;
int ReadBulkStopped;
@@ -745,7 +743,7 @@ static int qt_startup(struct usb_serial *serial)
}
return -ENOMEM;
}
- spin_lock_init(&qt_port->lock);
+ mutex_init(&qt_port->lock);
usb_set_serial_port_data(port, qt_port);
@@ -775,7 +773,6 @@ static int qt_startup(struct usb_serial *serial)
}
switch (serial->dev->descriptor.idProduct) {
- case QUATECH_SSU100:
case QUATECH_DSU100:
case QUATECH_QSU100:
case QUATECH_ESU100A:
@@ -1160,7 +1157,6 @@ static int qt_write_room(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial;
struct quatech_port *qt_port;
- unsigned long flags;
int retval = -EINVAL;
@@ -1176,7 +1172,7 @@ static int qt_write_room(struct tty_struct *tty)
qt_port = qt_get_port_private(port);
- spin_lock_irqsave(&qt_port->lock, flags);
+ mutex_lock(&qt_port->lock);
dbg("%s - port %d\n", __func__, port->number);
@@ -1185,7 +1181,7 @@ static int qt_write_room(struct tty_struct *tty)
retval = port->bulk_out_size;
}
- spin_unlock_irqrestore(&qt_port->lock, flags);
+ mutex_unlock(&qt_port->lock);
return retval;
}
@@ -1358,7 +1354,6 @@ static void qt_break(struct tty_struct *tty, int break_state)
struct quatech_port *qt_port;
u16 index, onoff;
unsigned int result;
- unsigned long flags;
index = tty->index - serial->minor;
@@ -1369,7 +1364,7 @@ static void qt_break(struct tty_struct *tty, int break_state)
else
onoff = 0;
- spin_lock_irqsave(&qt_port->lock, flags);
+ mutex_lock(&qt_port->lock);
dbg("%s - port %d\n", __func__, port->number);
@@ -1377,7 +1372,7 @@ static void qt_break(struct tty_struct *tty, int break_state)
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_BREAK_CONTROL, 0x40, onoff, index, NULL, 0, 300);
- spin_unlock_irqrestore(&qt_port->lock, flags);
+ mutex_unlock(&qt_port->lock);
}
static inline int qt_real_tiocmget(struct tty_struct *tty,
@@ -1466,21 +1461,20 @@ static int qt_tiocmget(struct tty_struct *tty)
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
int retval = -ENODEV;
- unsigned long flags;
dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
- spin_lock_irqsave(&qt_port->lock, flags);
+ mutex_lock(&qt_port->lock);
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmget(tty, port, serial);
- spin_unlock_irqrestore(&qt_port->lock, flags);
+ mutex_unlock(&qt_port->lock);
return retval;
}
@@ -1491,7 +1485,6 @@ static int qt_tiocmset(struct tty_struct *tty,
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
- unsigned long flags;
int retval = -ENODEV;
dbg("In %s\n", __func__);
@@ -1499,14 +1492,14 @@ static int qt_tiocmset(struct tty_struct *tty,
if (!serial)
return -ENODEV;
- spin_lock_irqsave(&qt_port->lock, flags);
+ mutex_lock(&qt_port->lock);
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - qt_port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmset(tty, port, serial, set);
- spin_unlock_irqrestore(&qt_port->lock, flags);
+ mutex_unlock(&qt_port->lock);
return retval;
}
@@ -1515,7 +1508,6 @@ static void qt_throttle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
- unsigned long flags;
dbg("%s - port %d\n", __func__, port->number);
@@ -1524,13 +1516,13 @@ static void qt_throttle(struct tty_struct *tty)
qt_port = qt_get_port_private(port);
- spin_lock_irqsave(&qt_port->lock, flags);
+ mutex_lock(&qt_port->lock);
/* pass on to the driver specific version of this function */
qt_port->RxHolding = 1;
dbg("%s - port->RxHolding = 1\n", __func__);
- spin_unlock_irqrestore(&qt_port->lock, flags);
+ mutex_unlock(&qt_port->lock);
return;
}
@@ -1539,7 +1531,6 @@ static void qt_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
- unsigned long flags;
unsigned int result;
if (!serial)
@@ -1547,7 +1538,7 @@ static void qt_unthrottle(struct tty_struct *tty)
qt_port = qt_get_port_private(port);
- spin_lock_irqsave(&qt_port->lock, flags);
+ mutex_lock(&qt_port->lock);
dbg("%s - port %d\n", __func__, port->number);
@@ -1573,7 +1564,7 @@ static void qt_unthrottle(struct tty_struct *tty)
__func__, result);
}
}
- spin_unlock_irqrestore(&qt_port->lock, flags);
+ mutex_unlock(&qt_port->lock);
return;
}
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 18f11039bb5..77a0751a31a 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -3724,7 +3724,7 @@ static const struct net_device_ops slic_netdev_ops = {
.ndo_do_ioctl = slic_ioctl,
.ndo_set_mac_address = slic_mac_set_address,
.ndo_get_stats = slic_get_stats,
- .ndo_set_multicast_list = slic_mcast_set_list,
+ .ndo_set_rx_mode = slic_mcast_set_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
};
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index a164fc43bd8..39dbf339a4f 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -35,11 +35,13 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#ifdef CONFIG_PM
#include <linux/pm.h>
+#include <linux/module.h>
#endif
#include "smtcfb.h"
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index 506547b603e..86d556d6cf9 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -227,19 +227,12 @@ static int ioctl_write_page_data(unsigned long arg)
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
- buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!buf) {
- printk(KERN_ERR "ioctl_write_page_data: "
- "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(buf, (void __user *)info.data,
- IdentifyDeviceData.PageDataSize)) {
+ buf = memdup_user((void __user *)info.data,
+ IdentifyDeviceData.PageDataSize);
+ if (IS_ERR(buf)) {
printk(KERN_ERR "ioctl_write_page_data: "
"failed to copy user data\n");
- kfree(buf);
- return -EFAULT;
+ return PTR_ERR(buf);
}
mutex_lock(&spectra_lock);
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 36f4cb77567..11728a03f8a 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -30,6 +30,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "synaptics_i2c_rmi4.h"
/* TODO: for multiple device support will need a per-device mutex */
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index cd725033f27..60aa7b063c9 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -55,7 +55,7 @@ struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
hash_tab->match = match;
hash_tab->delete = delete == NULL ? noop : delete;
- hash_tab->buckets = (struct element **)
+ hash_tab->buckets =
kzalloc(sizeof(struct element *) * max_bucket, GFP_KERNEL);
if (hash_tab->buckets == NULL) {
gh_delete(hash_tab);
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
index c214df9b205..8a93d55ca59 100644
--- a/drivers/staging/tidspbridge/hw/hw_mmu.c
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -558,5 +558,5 @@ static hw_status mmu_set_ram_entry(const void __iomem *base_address,
void hw_mmu_tlb_flush_all(const void __iomem *base)
{
- __raw_writeb(1, base + MMU_GFLUSH);
+ __raw_writel(1, base + MMU_GFLUSH);
}
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 132adc57ebc..d4073684eac 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -17,6 +17,9 @@
* USA.
*/
+#ifndef __USBIP_STUB_H
+#define __USBIP_STUB_H
+
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -106,3 +109,5 @@ void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
__u32 status);
void stub_complete(struct urb *urb);
int stub_tx_loop(void *data);
+
+#endif /* __USBIP_STUB_H */
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index fce22f2bd8b..55c0b510889 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/kthread.h>
+#include <linux/module.h>
#include "usbip_common.h"
#include "stub.h"
@@ -524,11 +525,11 @@ static void stub_disconnect(struct usb_interface *interface)
}
}
-/*
+/*
* Presence of pre_reset and post_reset prevents the driver from being unbound
* when the device is being reset
*/
-
+
int stub_pre_reset(struct usb_interface *interface)
{
dev_dbg(&interface->dev, "pre_reset\n");
@@ -548,4 +549,4 @@ struct usb_driver stub_driver = {
.id_table = stub_table,
.pre_reset = stub_pre_reset,
.post_reset = stub_post_reset,
- };
+};
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index a34249a9cb6..2d631785006 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -18,6 +18,7 @@
*/
#include <linux/string.h>
+#include <linux/module.h>
#include "usbip_common.h"
#include "stub.h"
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 538fb9ee341..6b4e3e182de 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -175,12 +175,11 @@ static int tweak_reset_device_cmd(struct urb *urb)
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
/*
- * With the implementation of pre_reset and post_reset the driver no
+ * With the implementation of pre_reset and post_reset the driver no
* longer unbinds. This allows the use of synchronous reset.
*/
- if (usb_lock_device_for_reset(sdev->udev, sdev->interface)<0)
- {
+ if (usb_lock_device_for_reset(sdev->udev, sdev->interface) < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
}
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 074ac4267d3..be216175ae8 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -126,12 +126,12 @@ extern struct device_attribute dev_attr_usbip_debug;
*
*/
#define USBIP_CMD_SUBMIT 0x0001
-#define USBIP_RET_SUBMIT 0x0002
-#define USBIP_CMD_UNLINK 0x0003
+#define USBIP_CMD_UNLINK 0x0002
+#define USBIP_RET_SUBMIT 0x0003
#define USBIP_RET_UNLINK 0x0004
-#define USBIP_DIR_IN 0x00
-#define USBIP_DIR_OUT 0x01
+#define USBIP_DIR_OUT 0x00
+#define USBIP_DIR_IN 0x01
/**
* struct usbip_header_basic - data pertinent to every request
diff --git a/drivers/staging/usbip/usbip_protocol.txt b/drivers/staging/usbip/usbip_protocol.txt
new file mode 100644
index 00000000000..0f102081e86
--- /dev/null
+++ b/drivers/staging/usbip/usbip_protocol.txt
@@ -0,0 +1,358 @@
+PRELIMINARY DRAFT, MAY CONTAIN MISTAKES!
+28 Jun 2011
+
+The USB/IP protocol follows a server/client architecture. The server exports the
+USB devices and the clients imports them. The device driver for the exported
+USB device runs on the client machine.
+
+The client may ask for the list of the exported USB devices. To get the list the
+client opens a TCP/IP connection towards the server, and sends an OP_REQ_DEVLIST
+packet on top of the TCP/IP connection (so the actual OP_REQ_DEVLIST may be sent
+in one or more pieces at the low level transport layer). The server sends back
+the OP_REP_DEVLIST packet which lists the exported USB devices. Finally the
+TCP/IP connection is closed.
+
+ virtual host controller usb host
+ "client" "server"
+ (imports USB devices) (exports USB devices)
+ | |
+ | OP_REQ_DEVLIST |
+ | ----------------------------------------------> |
+ | |
+ | OP_REP_DEVLIST |
+ | <---------------------------------------------- |
+ | |
+
+Once the client knows the list of exported USB devices it may decide to use one
+of them. First the client opens a TCP/IP connection towards the server and
+sends an OP_REQ_IMPORT packet. The server replies with OP_REP_IMPORT. If the
+import was successful the TCP/IP connection remains open and will be used
+to trasfer the URB traffic between the client and the server. The client may
+send two types of packets: the USBIP_CMD_SUBMIT to submit an URB, and
+USBIP_CMD_UNLINK to unlink a previously submitted URB. The answers of the
+server may be USBIP_RET_SUBMIT and USBIP_RET_UNLINK respectively.
+
+ virtual host controller usb host
+ "client" "server"
+ (imports USB devices) (exports USB devices)
+ | |
+ | OP_REQ_IMPORT |
+ | ----------------------------------------------> |
+ | |
+ | OP_REP_IMPORT |
+ | <---------------------------------------------- |
+ | |
+ | |
+ | USBIP_CMD_SUBMIT(seqnum = n) |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_RET_SUBMIT(seqnum = n) |
+ | <---------------------------------------------- |
+ | . |
+ | : |
+ | |
+ | USBIP_CMD_SUBMIT(seqnum = m) |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_CMD_SUBMIT(seqnum = m+1) |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_CMD_SUBMIT(seqnum = m+2) |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_RET_SUBMIT(seqnum = m) |
+ | <---------------------------------------------- |
+ | |
+ | USBIP_CMD_SUBMIT(seqnum = m+3) |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_RET_SUBMIT(seqnum = m+1) |
+ | <---------------------------------------------- |
+ | |
+ | USBIP_CMD_SUBMIT(seqnum = m+4) |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_RET_SUBMIT(seqnum = m+2) |
+ | <---------------------------------------------- |
+ | . |
+ | : |
+ | |
+ | USBIP_CMD_UNLINK |
+ | ----------------------------------------------> |
+ | |
+ | USBIP_RET_UNLINK |
+ | <---------------------------------------------- |
+ | |
+
+The fields are in network (big endian) byte order meaning that the most significant
+byte (MSB) is stored at the lowest address.
+
+
+OP_REQ_DEVLIST: Retrieve the list of exported USB devices.
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
+-----------+--------+------------+---------------------------------------------------
+ 2 | 2 | 0x8005 | Command code: Retrieve the list of exported USB
+ | | | devices.
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | 0x00000000 | Status: unused, shall be set to 0
+
+OP_REP_DEVLIST: Reply with the list of exported USB devices.
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0.
+-----------+--------+------------+---------------------------------------------------
+ 2 | 2 | 0x0005 | Reply code: The list of exported USB devices.
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | 0x00000000 | Status: 0 for OK
+-----------+--------+------------+---------------------------------------------------
+ 8 | 4 | n | Number of exported devices: 0 means no exported
+ | | | devices.
+-----------+--------+------------+---------------------------------------------------
+ 0x0C | | | From now on the exported n devices are described,
+ | | | if any. If no devices are exported the message
+ | | | ends with the previous "number of exported
+ | | | devices" field.
+-----------+--------+------------+---------------------------------------------------
+ | 256 | | path: Path of the device on the host exporting the
+ | | | USB device, string closed with zero byte, e.g.
+ | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2"
+ | | | The unused bytes shall be filled with zero
+ | | | bytes.
+-----------+--------+------------+---------------------------------------------------
+ 0x10C | 32 | | busid: Bus ID of the exported device, string
+ | | | closed with zero byte, e.g. "3-2". The unused
+ | | | bytes shall be filled with zero bytes.
+-----------+--------+------------+---------------------------------------------------
+ 0x12C | 4 | | busnum
+-----------+--------+------------+---------------------------------------------------
+ 0x130 | 4 | | devnum
+-----------+--------+------------+---------------------------------------------------
+ 0x134 | 4 | | speed
+-----------+--------+------------+---------------------------------------------------
+ 0x138 | 2 | | idVendor
+-----------+--------+------------+---------------------------------------------------
+ 0x13A | 2 | | idProduct
+-----------+--------+------------+---------------------------------------------------
+ 0x13C | 2 | | bcdDevice
+-----------+--------+------------+---------------------------------------------------
+ 0x13E | 1 | | bDeviceClass
+-----------+--------+------------+---------------------------------------------------
+ 0x13F | 1 | | bDeviceSubClass
+-----------+--------+------------+---------------------------------------------------
+ 0x140 | 1 | | bDeviceProtocol
+-----------+--------+------------+---------------------------------------------------
+ 0x141 | 1 | | bConfigurationValue
+-----------+--------+------------+---------------------------------------------------
+ 0x142 | 1 | | bNumConfigurations
+-----------+--------+------------+---------------------------------------------------
+ 0x143 | 1 | | bNumInterfaces
+-----------+--------+------------+---------------------------------------------------
+ 0x144 | | m_0 | From now on each interface is described, all
+ | | | together bNumInterfaces times, with the
+ | | | the following 4 fields:
+-----------+--------+------------+---------------------------------------------------
+ | 1 | | bInterfaceClass
+-----------+--------+------------+---------------------------------------------------
+ 0x145 | 1 | | bInterfaceSubClass
+-----------+--------+------------+---------------------------------------------------
+ 0x146 | 1 | | bInterfaceProtocol
+-----------+--------+------------+---------------------------------------------------
+ 0x147 | 1 | | padding byte for alignment, shall be set to zero
+-----------+--------+------------+---------------------------------------------------
+ 0xC + | | | The second exported USB device starts at i=1
+ i*0x138 + | | | with the busid field.
+ m_(i-1)*4 | | |
+
+OP_REQ_IMPORT: Request to import (attach) a remote USB device.
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
+-----------+--------+------------+---------------------------------------------------
+ 2 | 2 | 0x8003 | Command code: import a remote USB device.
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | 0x00000000 | Status: unused, shall be set to 0
+-----------+--------+------------+---------------------------------------------------
+ 8 | 32 | | busid: the busid of the exported device on the
+ | | | remote host. The possible values are taken
+ | | | from the message field OP_REP_DEVLIST.busid.
+ | | | A string closed with zero, the unused bytes
+ | | | shall be filled with zeros.
+-----------+--------+------------+---------------------------------------------------
+
+OP_REP_IMPORT: Reply to import (attach) a remote USB device.
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
+-----------+--------+------------+---------------------------------------------------
+ 2 | 2 | 0x0003 | Reply code: Reply to import.
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | 0x00000000 | Status: 0 for OK
+ | | | 1 for error
+-----------+--------+------------+---------------------------------------------------
+ 8 | | | From now on comes the details of the imported
+ | | | device, if the previous status field was OK (0),
+ | | | otherwise the reply ends with the status field.
+-----------+--------+------------+---------------------------------------------------
+ | 256 | | path: Path of the device on the host exporting the
+ | | | USB device, string closed with zero byte, e.g.
+ | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2"
+ | | | The unused bytes shall be filled with zero
+ | | | bytes.
+-----------+--------+------------+---------------------------------------------------
+ 0x108 | 32 | | busid: Bus ID of the exported device, string
+ | | | closed with zero byte, e.g. "3-2". The unused
+ | | | bytes shall be filled with zero bytes.
+-----------+--------+------------+---------------------------------------------------
+ 0x128 | 4 | | busnum
+-----------+--------+------------+---------------------------------------------------
+ 0x12C | 4 | | devnum
+-----------+--------+------------+---------------------------------------------------
+ 0x130 | 4 | | speed
+-----------+--------+------------+---------------------------------------------------
+ 0x134 | 2 | | idVendor
+-----------+--------+------------+---------------------------------------------------
+ 0x136 | 2 | | idProduct
+-----------+--------+------------+---------------------------------------------------
+ 0x138 | 2 | | bcdDevice
+-----------+--------+------------+---------------------------------------------------
+ 0x139 | 1 | | bDeviceClass
+-----------+--------+------------+---------------------------------------------------
+ 0x13A | 1 | | bDeviceSubClass
+-----------+--------+------------+---------------------------------------------------
+ 0x13B | 1 | | bDeviceProtocol
+-----------+--------+------------+---------------------------------------------------
+ 0x13C | 1 | | bConfigurationValue
+-----------+--------+------------+---------------------------------------------------
+ 0x13D | 1 | | bNumConfigurations
+-----------+--------+------------+---------------------------------------------------
+ 0x13E | 1 | | bNumInterfaces
+
+USBIP_CMD_SUBMIT: Submit an URB
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 4 | 0x00000001 | command: Submit an URB
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | | seqnum: the sequence number of the URB to submit
+-----------+--------+------------+---------------------------------------------------
+ 8 | 4 | | devid
+-----------+--------+------------+---------------------------------------------------
+ 0xC | 4 | | direction: 0: USBIP_DIR_OUT
+ | | | 1: USBIP_DIR_IN
+-----------+--------+------------+---------------------------------------------------
+ 0x10 | 4 | | ep: endpoint number, possible values are: 0...15
+-----------+--------+------------+---------------------------------------------------
+ 0x14 | 4 | | transfer_flags: possible values depend on the
+ | | | URB transfer type, see below
+-----------+--------+------------+---------------------------------------------------
+ 0x18 | 4 | | transfer_buffer_length
+-----------+--------+------------+---------------------------------------------------
+ 0x1C | 4 | | start_frame: specify the selected frame to
+ | | | transmit an ISO frame, ignored if URB_ISO_ASAP
+ | | | is specified at transfer_flags
+-----------+--------+------------+---------------------------------------------------
+ 0x20 | 4 | | number_of_packets: number of ISO packets
+-----------+--------+------------+---------------------------------------------------
+ 0x24 | 4 | | interval: maximum time for the request on the
+ | | | server-side host controller
+-----------+--------+------------+---------------------------------------------------
+ 0x28 | 8 | | setup: data bytes for USB setup, filled with
+ | | | zeros if not used
+-----------+--------+------------+---------------------------------------------------
+ 0x30 | | | URB data. For ISO transfers the padding between
+ | | | each ISO packets is not transmitted.
+
+
+ Allowed transfer_flags | value | control | interrupt | bulk | isochronous
+ -------------------------+------------+---------+-----------+----------+-------------
+ URB_SHORT_NOT_OK | 0x00000001 | only in | only in | only in | no
+ URB_ISO_ASAP | 0x00000002 | no | no | no | yes
+ URB_NO_TRANSFER_DMA_MAP | 0x00000004 | yes | yes | yes | yes
+ URB_NO_FSBR | 0x00000020 | yes | no | no | no
+ URB_ZERO_PACKET | 0x00000040 | no | no | only out | no
+ URB_NO_INTERRUPT | 0x00000080 | yes | yes | yes | yes
+ URB_FREE_BUFFER | 0x00000100 | yes | yes | yes | yes
+ URB_DIR_MASK | 0x00000200 | yes | yes | yes | yes
+
+
+USBIP_RET_SUBMIT: Reply for submitting an URB
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 4 | 0x00000003 | command
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | | seqnum: URB sequence number
+-----------+--------+------------+---------------------------------------------------
+ 8 | 4 | | devid
+-----------+--------+------------+---------------------------------------------------
+ 0xC | 4 | | direction: 0: USBIP_DIR_OUT
+ | | | 1: USBIP_DIR_IN
+-----------+--------+------------+---------------------------------------------------
+ 0x10 | 4 | | ep: endpoint number
+-----------+--------+------------+---------------------------------------------------
+ 0x14 | 4 | | status: zero for successful URB transaction,
+ | | | otherwise some kind of error happened.
+-----------+--------+------------+---------------------------------------------------
+ 0x18 | 4 | n | actual_length: number of URB data bytes
+-----------+--------+------------+---------------------------------------------------
+ 0x1C | 4 | | start_frame: for an ISO frame the actually
+ | | | selected frame for transmit.
+-----------+--------+------------+---------------------------------------------------
+ 0x20 | 4 | | number_of_packets
+-----------+--------+------------+---------------------------------------------------
+ 0x24 | 4 | | error_count
+-----------+--------+------------+---------------------------------------------------
+ 0x28 | 8 | | setup: data bytes for USB setup, filled with
+ | | | zeros if not used
+-----------+--------+------------+---------------------------------------------------
+ 0x30 | n | | URB data bytes. For ISO transfers the padding
+ | | | between each ISO packets is not transmitted.
+
+USBIP_CMD_UNLINK: Unlink an URB
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 4 | 0x00000002 | command: URB unlink command
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | | seqnum: URB sequence number to unlink: FIXME: is this so?
+-----------+--------+------------+---------------------------------------------------
+ 8 | 4 | | devid
+-----------+--------+------------+---------------------------------------------------
+ 0xC | 4 | | direction: 0: USBIP_DIR_OUT
+ | | | 1: USBIP_DIR_IN
+-----------+--------+------------+---------------------------------------------------
+ 0x10 | 4 | | ep: endpoint number: zero
+-----------+--------+------------+---------------------------------------------------
+ 0x14 | 4 | | seqnum: the URB sequence number given previously
+ | | | at USBIP_CMD_SUBMIT.seqnum field
+-----------+--------+------------+---------------------------------------------------
+ 0x30 | n | | URB data bytes. For ISO transfers the padding
+ | | | between each ISO packets is not transmitted.
+
+USBIP_RET_UNLINK: Reply for URB unlink
+
+ Offset | Length | Value | Description
+-----------+--------+------------+---------------------------------------------------
+ 0 | 4 | 0x00000004 | command: reply for the URB unlink command
+-----------+--------+------------+---------------------------------------------------
+ 4 | 4 | | seqnum: the unlinked URB sequence number
+-----------+--------+------------+---------------------------------------------------
+ 8 | 4 | | devid
+-----------+--------+------------+---------------------------------------------------
+ 0xC | 4 | | direction: 0: USBIP_DIR_OUT
+ | | | 1: USBIP_DIR_IN
+-----------+--------+------------+---------------------------------------------------
+ 0x10 | 4 | | ep: endpoint number
+-----------+--------+------------+---------------------------------------------------
+ 0x14 | 4 | | status: This is the value contained in the
+ | | | urb->status in the URB completition handler.
+ | | | FIXME: a better explanation needed.
+-----------+--------+------------+---------------------------------------------------
+ 0x30 | n | | URB data bytes. For ISO transfers the padding
+ | | | between each ISO packets is not transmitted.
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index abbc285f433..269787751b2 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -133,7 +133,7 @@ static int check_usbip_device(struct sysfs_class_device *cdev)
strlen(vhci_driver->hc_device->path))) {
/* found usbip device */
usbip_cdev = calloc(1, sizeof(*usbip_cdev));
- if (!cdev) {
+ if (!usbip_cdev) {
dbg("calloc failed");
return -1;
}
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
index b7885a20275..bdf61c0fe69 100644
--- a/drivers/staging/usbip/userspace/src/usbip_attach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -51,7 +51,9 @@ static int record_connection(char *host, char *port, char *busid, int rhport)
char buff[MAX_BUFF+1];
int ret;
- mkdir(VHCI_STATE_PATH, 0700);
+ ret = mkdir(VHCI_STATE_PATH, 0700);
+ if (ret < 0)
+ return -1;
snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
diff --git a/drivers/staging/usbip/userspace/src/utils.h b/drivers/staging/usbip/userspace/src/utils.h
index fdcb14dc0fb..5916fd3e02a 100644
--- a/drivers/staging/usbip/userspace/src/utils.h
+++ b/drivers/staging/usbip/userspace/src/utils.h
@@ -22,3 +22,4 @@
int modify_match_busid(char *busid, int add);
#endif /* __UTILS_H */
+
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 71a586e00fd..88b32981cf1 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -8,6 +8,9 @@
*
*/
+#ifndef __USBIP_VHCI_H
+#define __USBIP_VHCI_H
+
#include <linux/device.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -129,3 +132,5 @@ static inline struct device *vhci_dev(struct vhci_hcd *vhci)
{
return vhci_to_hcd(vhci)->self.controller;
}
+
+#endif /* __USBIP_VHCI_H */
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 5122c13a956..0e4feac138e 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -1500,6 +1500,28 @@ static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
}
+void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
+ dma_addr_t *dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ return pci_alloc_consistent(pdev, size, dma);
+}
+
+void ca91cx42_free_consistent(struct device *parent, size_t size, void *vaddr,
+ dma_addr_t dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ pci_free_consistent(pdev, size, vaddr, dma);
+}
+
static int __init ca91cx42_init(void)
{
return pci_register_driver(&ca91cx42_driver);
@@ -1769,6 +1791,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
ca91cx42_bridge->slot_get = ca91cx42_slot_get;
+ ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
+ ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
data = ioread32(ca91cx42_device->base + MISC_CTL);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 9c539513c74..6c1167c2bea 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -2114,6 +2114,28 @@ static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
return (int)slot;
}
+void *tsi148_alloc_consistent(struct device *parent, size_t size,
+ dma_addr_t *dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ return pci_alloc_consistent(pdev, size, dma);
+}
+
+void tsi148_free_consistent(struct device *parent, size_t size, void *vaddr,
+ dma_addr_t dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ pci_free_consistent(pdev, size, vaddr, dma);
+}
+
static int __init tsi148_init(void)
{
return pci_register_driver(&tsi148_driver);
@@ -2443,6 +2465,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
tsi148_bridge->lm_attach = tsi148_lm_attach;
tsi148_bridge->lm_detach = tsi148_lm_detach;
tsi148_bridge->slot_get = tsi148_slot_get;
+ tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
+ tsi148_bridge->free_consistent = tsi148_free_consistent;
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 91d2cc7bb4c..7dcd1622b5f 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -43,7 +43,7 @@
static DEFINE_MUTEX(vme_user_mutex);
static const char driver_name[] = "vme_user";
-static int bus[USER_BUS_MAX];
+static int bus[VME_USER_BUS_MAX];
static unsigned int bus_num;
/* Currently Documentation/devices.txt defines the following for VME:
@@ -116,7 +116,7 @@ static struct driver_stats statistics;
static struct cdev *vme_user_cdev; /* Character device */
static struct class *vme_user_sysfs_class; /* Sysfs class */
-static struct device *vme_user_bridge; /* Pointer to bridge device */
+static struct vme_dev *vme_user_bridge; /* Pointer to user device */
static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
@@ -135,8 +135,9 @@ static ssize_t vme_user_write(struct file *, const char __user *, size_t,
static loff_t vme_user_llseek(struct file *, loff_t, int);
static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-static int __devinit vme_user_probe(struct device *, int, int);
-static int __devexit vme_user_remove(struct device *, int, int);
+static int vme_user_match(struct vme_dev *);
+static int __devinit vme_user_probe(struct vme_dev *);
+static int __devexit vme_user_remove(struct vme_dev *);
static const struct file_operations vme_user_fops = {
.open = vme_user_open,
@@ -620,6 +621,7 @@ static void buf_unalloc(int num)
static struct vme_driver vme_user_driver = {
.name = driver_name,
+ .match = vme_user_match,
.probe = vme_user_probe,
.remove = __devexit_p(vme_user_remove),
};
@@ -628,8 +630,6 @@ static struct vme_driver vme_user_driver = {
static int __init vme_user_init(void)
{
int retval = 0;
- int i;
- struct vme_device_id *ids;
printk(KERN_INFO "VME User Space Access Driver\n");
@@ -643,56 +643,42 @@ static int __init vme_user_init(void)
/* Let's start by supporting one bus, we can support more than one
* in future revisions if that ever becomes necessary.
*/
- if (bus_num > USER_BUS_MAX) {
+ if (bus_num > VME_USER_BUS_MAX) {
printk(KERN_ERR "%s: Driver only able to handle %d buses\n",
- driver_name, USER_BUS_MAX);
- bus_num = USER_BUS_MAX;
- }
-
-
- /* Dynamically create the bind table based on module parameters */
- ids = kmalloc(sizeof(struct vme_device_id) * (bus_num + 1), GFP_KERNEL);
- if (ids == NULL) {
- printk(KERN_ERR "%s: Unable to allocate ID table\n",
- driver_name);
- retval = -ENOMEM;
- goto err_id;
- }
-
- memset(ids, 0, (sizeof(struct vme_device_id) * (bus_num + 1)));
-
- for (i = 0; i < bus_num; i++) {
- ids[i].bus = bus[i];
- /*
- * We register the driver against the slot occupied by *this*
- * card, since it's really a low level way of controlling
- * the VME bridge
- */
- ids[i].slot = VME_SLOT_CURRENT;
+ driver_name, VME_USER_BUS_MAX);
+ bus_num = VME_USER_BUS_MAX;
}
- vme_user_driver.bind_table = ids;
-
- retval = vme_register_driver(&vme_user_driver);
+ /*
+ * Here we just register the maximum number of devices we can and
+ * leave vme_user_match() to allow only 1 to go through to probe().
+ * This way, if we later want to allow multiple user access devices,
+ * we just change the code in vme_user_match().
+ */
+ retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
if (retval != 0)
goto err_reg;
return retval;
err_reg:
- kfree(ids);
-err_id:
err_nocard:
return retval;
}
+static int vme_user_match(struct vme_dev *vdev)
+{
+ if (vdev->num >= VME_USER_BUS_MAX)
+ return 0;
+ return 1;
+}
+
/*
* In this simple access driver, the old behaviour is being preserved as much
* as practical. We will therefore reserve the buffers and request the images
* here so that we don't have to do it later.
*/
-static int __devinit vme_user_probe(struct device *dev, int cur_bus,
- int cur_slot)
+static int __devinit vme_user_probe(struct vme_dev *vdev)
{
int i, err;
char name[12];
@@ -704,7 +690,7 @@ static int __devinit vme_user_probe(struct device *dev, int cur_bus,
err = -EINVAL;
goto err_dev;
}
- vme_user_bridge = dev;
+ vme_user_bridge = vdev;
/* Initialise descriptors */
for (i = 0; i < VME_DEVS; i++) {
@@ -867,8 +853,7 @@ err_dev:
return err;
}
-static int __devexit vme_user_remove(struct device *dev, int cur_bus,
- int cur_slot)
+static int __devexit vme_user_remove(struct vme_dev *dev)
{
int i;
@@ -900,8 +885,6 @@ static int __devexit vme_user_remove(struct device *dev, int cur_bus,
static void __exit vme_user_exit(void)
{
vme_unregister_driver(&vme_user_driver);
-
- kfree(vme_user_driver.bind_table);
}
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index 24bf4e54013..d85a1e9dbe3 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -1,7 +1,7 @@
#ifndef _VME_USER_H_
#define _VME_USER_H_
-#define USER_BUS_MAX 1
+#define VME_USER_BUS_MAX 1
/*
* VMEbus Master Window Configuration Structure
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index c078ce369df..b04b4688f70 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -34,20 +34,17 @@
#include "vme.h"
#include "vme_bridge.h"
-/* Bitmask and mutex to keep track of bridge numbers */
+/* Bitmask and list of registered buses both protected by common mutex */
static unsigned int vme_bus_numbers;
-static DEFINE_MUTEX(vme_bus_num_mtx);
+static LIST_HEAD(vme_bus_list);
+static DEFINE_MUTEX(vme_buses_lock);
static void __exit vme_exit(void);
static int __init vme_init(void);
-
-/*
- * Find the bridge resource associated with a specific device resource
- */
-static struct vme_bridge *dev_to_bridge(struct device *dev)
+static struct vme_dev *dev_to_vme_dev(struct device *dev)
{
- return dev->platform_data;
+ return container_of(dev, struct vme_dev, dev);
}
/*
@@ -83,15 +80,11 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
/*
* Allocate a contiguous block of memory for use by the driver. This is used to
* create the buffers for the slave windows.
- *
- * XXX VME bridges could be available on buses other than PCI. At the momment
- * this framework only supports PCI devices.
*/
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
dma_addr_t *dma)
{
struct vme_bridge *bridge;
- struct pci_dev *pdev;
if (resource == NULL) {
printk(KERN_ERR "No resource\n");
@@ -104,28 +97,29 @@ void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
return NULL;
}
- /* Find pci_dev container of dev */
if (bridge->parent == NULL) {
- printk(KERN_ERR "Dev entry NULL\n");
+ printk(KERN_ERR "Dev entry NULL for"
+ " bridge %s\n", bridge->name);
return NULL;
}
- pdev = container_of(bridge->parent, struct pci_dev, dev);
- return pci_alloc_consistent(pdev, size, dma);
+ if (bridge->alloc_consistent == NULL) {
+ printk(KERN_ERR "alloc_consistent not supported by"
+ " bridge %s\n", bridge->name);
+ return NULL;
+ }
+
+ return bridge->alloc_consistent(bridge->parent, size, dma);
}
EXPORT_SYMBOL(vme_alloc_consistent);
/*
* Free previously allocated contiguous block of memory.
- *
- * XXX VME bridges could be available on buses other than PCI. At the momment
- * this framework only supports PCI devices.
*/
void vme_free_consistent(struct vme_resource *resource, size_t size,
void *vaddr, dma_addr_t dma)
{
struct vme_bridge *bridge;
- struct pci_dev *pdev;
if (resource == NULL) {
printk(KERN_ERR "No resource\n");
@@ -138,10 +132,19 @@ void vme_free_consistent(struct vme_resource *resource, size_t size,
return;
}
- /* Find pci_dev container of dev */
- pdev = container_of(bridge->parent, struct pci_dev, dev);
+ if (bridge->parent == NULL) {
+ printk(KERN_ERR "Dev entry NULL for"
+ " bridge %s\n", bridge->name);
+ return;
+ }
+
+ if (bridge->free_consistent == NULL) {
+ printk(KERN_ERR "free_consistent not supported by"
+ " bridge %s\n", bridge->name);
+ return;
+ }
- pci_free_consistent(pdev, size, vaddr, dma);
+ bridge->free_consistent(bridge->parent, size, vaddr, dma);
}
EXPORT_SYMBOL(vme_free_consistent);
@@ -229,7 +232,7 @@ static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
* Request a slave image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_slave_request(struct device *dev,
+struct vme_resource *vme_slave_request(struct vme_dev *vdev,
vme_address_t address, vme_cycle_t cycle)
{
struct vme_bridge *bridge;
@@ -238,7 +241,7 @@ struct vme_resource *vme_slave_request(struct device *dev,
struct vme_slave_resource *slave_image = NULL;
struct vme_resource *resource = NULL;
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
@@ -385,7 +388,7 @@ EXPORT_SYMBOL(vme_slave_free);
* Request a master image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_master_request(struct device *dev,
+struct vme_resource *vme_master_request(struct vme_dev *vdev,
vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
{
struct vme_bridge *bridge;
@@ -394,7 +397,7 @@ struct vme_resource *vme_master_request(struct device *dev,
struct vme_master_resource *master_image = NULL;
struct vme_resource *resource = NULL;
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
@@ -643,7 +646,8 @@ EXPORT_SYMBOL(vme_master_free);
* Request a DMA controller with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
+struct vme_resource *vme_dma_request(struct vme_dev *vdev,
+ vme_dma_route_t route)
{
struct vme_bridge *bridge;
struct list_head *dma_pos = NULL;
@@ -654,7 +658,7 @@ struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
/* XXX Not checking resource attributes */
printk(KERN_ERR "No VME resource Attribute tests done\n");
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
@@ -987,13 +991,13 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
}
EXPORT_SYMBOL(vme_irq_handler);
-int vme_irq_request(struct device *dev, int level, int statid,
+int vme_irq_request(struct vme_dev *vdev, int level, int statid,
void (*callback)(int, int, void *),
void *priv_data)
{
struct vme_bridge *bridge;
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
@@ -1030,11 +1034,11 @@ int vme_irq_request(struct device *dev, int level, int statid,
}
EXPORT_SYMBOL(vme_irq_request);
-void vme_irq_free(struct device *dev, int level, int statid)
+void vme_irq_free(struct vme_dev *vdev, int level, int statid)
{
struct vme_bridge *bridge;
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return;
@@ -1065,11 +1069,11 @@ void vme_irq_free(struct device *dev, int level, int statid)
}
EXPORT_SYMBOL(vme_irq_free);
-int vme_irq_generate(struct device *dev, int level, int statid)
+int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
{
struct vme_bridge *bridge;
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
@@ -1092,7 +1096,7 @@ EXPORT_SYMBOL(vme_irq_generate);
/*
* Request the location monitor, return resource or NULL
*/
-struct vme_resource *vme_lm_request(struct device *dev)
+struct vme_resource *vme_lm_request(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
struct list_head *lm_pos = NULL;
@@ -1100,7 +1104,7 @@ struct vme_resource *vme_lm_request(struct device *dev)
struct vme_lm_resource *lm = NULL;
struct vme_resource *resource = NULL;
- bridge = dev_to_bridge(dev);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
@@ -1281,11 +1285,11 @@ void vme_lm_free(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_lm_free);
-int vme_slot_get(struct device *bus)
+int vme_slot_get(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
- bridge = dev_to_bridge(bus);
+ bridge = vdev->bridge;
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
@@ -1303,207 +1307,212 @@ EXPORT_SYMBOL(vme_slot_get);
/* - Bridge Registration --------------------------------------------------- */
-static int vme_alloc_bus_num(void)
+static int vme_add_bus(struct vme_bridge *bridge)
{
int i;
+ int ret = -1;
- mutex_lock(&vme_bus_num_mtx);
+ mutex_lock(&vme_buses_lock);
for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
- if (((vme_bus_numbers >> i) & 0x1) == 0) {
- vme_bus_numbers |= (0x1 << i);
+ if ((vme_bus_numbers & (1 << i)) == 0) {
+ vme_bus_numbers |= (1 << i);
+ bridge->num = i;
+ INIT_LIST_HEAD(&bridge->devices);
+ list_add_tail(&bridge->bus_list, &vme_bus_list);
+ ret = 0;
break;
}
}
- mutex_unlock(&vme_bus_num_mtx);
+ mutex_unlock(&vme_buses_lock);
- return i;
+ return ret;
}
-static void vme_free_bus_num(int bus)
+static void vme_remove_bus(struct vme_bridge *bridge)
{
- mutex_lock(&vme_bus_num_mtx);
- vme_bus_numbers &= ~(0x1 << bus);
- mutex_unlock(&vme_bus_num_mtx);
+ struct vme_dev *vdev;
+ struct vme_dev *tmp;
+
+ mutex_lock(&vme_buses_lock);
+ vme_bus_numbers &= ~(1 << bridge->num);
+ list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
+ list_del(&vdev->drv_list);
+ list_del(&vdev->bridge_list);
+ device_unregister(&vdev->dev);
+ }
+ list_del(&bridge->bus_list);
+ mutex_unlock(&vme_buses_lock);
}
-int vme_register_bridge(struct vme_bridge *bridge)
+static void vme_dev_release(struct device *dev)
{
- struct device *dev;
- int retval;
- int i;
+ kfree(dev_to_vme_dev(dev));
+}
- bridge->num = vme_alloc_bus_num();
+int vme_register_bridge(struct vme_bridge *bridge)
+{
+ return vme_add_bus(bridge);
+}
+EXPORT_SYMBOL(vme_register_bridge);
- /* This creates 32 vme "slot" devices. This equates to a slot for each
- * ID available in a system conforming to the ANSI/VITA 1-1994
- * specification.
- */
- for (i = 0; i < VME_SLOTS_MAX; i++) {
- dev = &bridge->dev[i];
- memset(dev, 0, sizeof(struct device));
+void vme_unregister_bridge(struct vme_bridge *bridge)
+{
+ vme_remove_bus(bridge);
+}
+EXPORT_SYMBOL(vme_unregister_bridge);
- dev->parent = bridge->parent;
- dev->bus = &vme_bus_type;
- /*
- * We save a pointer to the bridge in platform_data so that we
- * can get to it later. We keep driver_data for use by the
- * driver that binds against the slot
- */
- dev->platform_data = bridge;
- dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
+/* - Driver Registration --------------------------------------------------- */
- retval = device_register(dev);
- if (retval)
+static int __vme_register_driver_bus(struct vme_driver *drv,
+ struct vme_bridge *bridge, unsigned int ndevs)
+{
+ int err;
+ unsigned int i;
+ struct vme_dev *vdev;
+ struct vme_dev *tmp;
+
+ for (i = 0; i < ndevs; i++) {
+ vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
+ if (!vdev) {
+ err = -ENOMEM;
+ goto err_devalloc;
+ }
+ vdev->num = i;
+ vdev->bridge = bridge;
+ vdev->dev.platform_data = drv;
+ vdev->dev.release = vme_dev_release;
+ vdev->dev.parent = bridge->parent;
+ vdev->dev.bus = &vme_bus_type;
+ dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
+ vdev->num);
+
+ err = device_register(&vdev->dev);
+ if (err)
goto err_reg;
- }
- return retval;
+ if (vdev->dev.platform_data) {
+ list_add_tail(&vdev->drv_list, &drv->devices);
+ list_add_tail(&vdev->bridge_list, &bridge->devices);
+ } else
+ device_unregister(&vdev->dev);
+ }
+ return 0;
err_reg:
- while (--i >= 0) {
- dev = &bridge->dev[i];
- device_unregister(dev);
- }
- vme_free_bus_num(bridge->num);
- return retval;
+ kfree(vdev);
+err_devalloc:
+ list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
+ list_del(&vdev->drv_list);
+ list_del(&vdev->bridge_list);
+ device_unregister(&vdev->dev);
+ }
+ return err;
}
-EXPORT_SYMBOL(vme_register_bridge);
-void vme_unregister_bridge(struct vme_bridge *bridge)
+static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
{
- int i;
- struct device *dev;
-
+ struct vme_bridge *bridge;
+ int err = 0;
- for (i = 0; i < VME_SLOTS_MAX; i++) {
- dev = &bridge->dev[i];
- device_unregister(dev);
+ mutex_lock(&vme_buses_lock);
+ list_for_each_entry(bridge, &vme_bus_list, bus_list) {
+ /*
+ * This cannot cause trouble as we already have vme_buses_lock
+ * and if the bridge is removed, it will have to go through
+ * vme_unregister_bridge() to do it (which calls remove() on
+ * the bridge which in turn tries to acquire vme_buses_lock and
+ * will have to wait). The probe() called after device
+ * registration in __vme_register_driver below will also fail
+ * as the bridge is being removed (since the probe() calls
+ * vme_bridge_get()).
+ */
+ err = __vme_register_driver_bus(drv, bridge, ndevs);
+ if (err)
+ break;
}
- vme_free_bus_num(bridge->num);
+ mutex_unlock(&vme_buses_lock);
+ return err;
}
-EXPORT_SYMBOL(vme_unregister_bridge);
-
-/* - Driver Registration --------------------------------------------------- */
-
-int vme_register_driver(struct vme_driver *drv)
+int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
{
+ int err;
+
drv->driver.name = drv->name;
drv->driver.bus = &vme_bus_type;
+ INIT_LIST_HEAD(&drv->devices);
+
+ err = driver_register(&drv->driver);
+ if (err)
+ return err;
- return driver_register(&drv->driver);
+ err = __vme_register_driver(drv, ndevs);
+ if (err)
+ driver_unregister(&drv->driver);
+
+ return err;
}
EXPORT_SYMBOL(vme_register_driver);
void vme_unregister_driver(struct vme_driver *drv)
{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(vme_unregister_driver);
-
-/* - Bus Registration ------------------------------------------------------ */
-
-static int vme_calc_slot(struct device *dev)
-{
- struct vme_bridge *bridge;
- int num;
-
- bridge = dev_to_bridge(dev);
-
- /* Determine slot number */
- num = 0;
- while (num < VME_SLOTS_MAX) {
- if (&bridge->dev[num] == dev)
- break;
+ struct vme_dev *dev, *dev_tmp;
- num++;
+ mutex_lock(&vme_buses_lock);
+ list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
+ list_del(&dev->drv_list);
+ list_del(&dev->bridge_list);
+ device_unregister(&dev->dev);
}
- if (num == VME_SLOTS_MAX) {
- dev_err(dev, "Failed to identify slot\n");
- num = 0;
- goto err_dev;
- }
- num++;
+ mutex_unlock(&vme_buses_lock);
-err_dev:
- return num;
+ driver_unregister(&drv->driver);
}
+EXPORT_SYMBOL(vme_unregister_driver);
-static struct vme_driver *dev_to_vme_driver(struct device *dev)
-{
- if (dev->driver == NULL)
- printk(KERN_ERR "Bugger dev->driver is NULL\n");
-
- return container_of(dev->driver, struct vme_driver, driver);
-}
+/* - Bus Registration ------------------------------------------------------ */
static int vme_bus_match(struct device *dev, struct device_driver *drv)
{
- struct vme_bridge *bridge;
- struct vme_driver *driver;
- int i, num;
+ struct vme_driver *vme_drv;
- bridge = dev_to_bridge(dev);
- driver = container_of(drv, struct vme_driver, driver);
+ vme_drv = container_of(drv, struct vme_driver, driver);
- num = vme_calc_slot(dev);
- if (!num)
- goto err_dev;
-
- if (driver->bind_table == NULL) {
- dev_err(dev, "Bind table NULL\n");
- goto err_table;
- }
+ if (dev->platform_data == vme_drv) {
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
- i = 0;
- while ((driver->bind_table[i].bus != 0) ||
- (driver->bind_table[i].slot != 0)) {
+ if (vme_drv->match && vme_drv->match(vdev))
+ return 1;
- if (bridge->num == driver->bind_table[i].bus) {
- if (num == driver->bind_table[i].slot)
- return 1;
-
- if (driver->bind_table[i].slot == VME_SLOT_ALL)
- return 1;
-
- if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
- (num == vme_slot_get(dev)))
- return 1;
- }
- i++;
+ dev->platform_data = NULL;
}
-
-err_dev:
-err_table:
return 0;
}
static int vme_bus_probe(struct device *dev)
{
- struct vme_bridge *bridge;
- struct vme_driver *driver;
int retval = -ENODEV;
+ struct vme_driver *driver;
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
- driver = dev_to_vme_driver(dev);
- bridge = dev_to_bridge(dev);
+ driver = dev->platform_data;
if (driver->probe != NULL)
- retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
+ retval = driver->probe(vdev);
return retval;
}
static int vme_bus_remove(struct device *dev)
{
- struct vme_bridge *bridge;
- struct vme_driver *driver;
int retval = -ENODEV;
+ struct vme_driver *driver;
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
- driver = dev_to_vme_driver(dev);
- bridge = dev_to_bridge(dev);
+ driver = dev->platform_data;
if (driver->remove != NULL)
- retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
+ retval = driver->remove(vdev);
return retval;
}
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index 4155d8c2a53..e3828badca6 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -88,22 +88,38 @@ struct vme_resource {
extern struct bus_type vme_bus_type;
+/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
+#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
+#define VME_MAX_SLOTS 32
+
#define VME_SLOT_CURRENT -1
#define VME_SLOT_ALL -2
-struct vme_device_id {
- int bus;
- int slot;
+/**
+ * Structure representing a VME device
+ * @id: The ID of the device (currently the bus and slot number)
+ * @bridge: Pointer to the bridge device this device is on
+ * @dev: Internal device structure
+ * @drv_list: List of devices (per driver)
+ * @bridge_list: List of devices (per bridge)
+ */
+struct vme_dev {
+ int num;
+ struct vme_bridge *bridge;
+ struct device dev;
+ struct list_head drv_list;
+ struct list_head bridge_list;
};
struct vme_driver {
struct list_head node;
const char *name;
- const struct vme_device_id *bind_table;
- int (*probe) (struct device *, int, int);
- int (*remove) (struct device *, int, int);
- void (*shutdown) (void);
- struct device_driver driver;
+ int (*match)(struct vme_dev *);
+ int (*probe)(struct vme_dev *);
+ int (*remove)(struct vme_dev *);
+ void (*shutdown)(void);
+ struct device_driver driver;
+ struct list_head devices;
};
void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
@@ -112,7 +128,7 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
size_t vme_get_size(struct vme_resource *);
-struct vme_resource *vme_slave_request(struct device *, vme_address_t,
+struct vme_resource *vme_slave_request(struct vme_dev *, vme_address_t,
vme_cycle_t);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
@@ -120,7 +136,7 @@ int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
void vme_slave_free(struct vme_resource *);
-struct vme_resource *vme_master_request(struct device *, vme_address_t,
+struct vme_resource *vme_master_request(struct vme_dev *, vme_address_t,
vme_cycle_t, vme_width_t);
int vme_master_set(struct vme_resource *, int, unsigned long long,
unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
@@ -132,7 +148,7 @@ unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
unsigned int, loff_t);
void vme_master_free(struct vme_resource *);
-struct vme_resource *vme_dma_request(struct device *, vme_dma_route_t);
+struct vme_resource *vme_dma_request(struct vme_dev *, vme_dma_route_t);
struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
struct vme_dma_attr *vme_dma_pattern_attribute(u32, vme_pattern_t);
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
@@ -145,12 +161,12 @@ int vme_dma_list_exec(struct vme_dma_list *);
int vme_dma_list_free(struct vme_dma_list *);
int vme_dma_free(struct vme_resource *);
-int vme_irq_request(struct device *, int, int,
+int vme_irq_request(struct vme_dev *, int, int,
void (*callback)(int, int, void *), void *);
-void vme_irq_free(struct device *, int, int);
-int vme_irq_generate(struct device *, int, int);
+void vme_irq_free(struct vme_dev *, int, int);
+int vme_irq_generate(struct vme_dev *, int, int);
-struct vme_resource * vme_lm_request(struct device *);
+struct vme_resource * vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
int vme_lm_set(struct vme_resource *, unsigned long long, vme_address_t,
vme_cycle_t);
@@ -160,9 +176,9 @@ int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
-int vme_slot_get(struct device *);
+int vme_slot_get(struct vme_dev *);
-int vme_register_driver(struct vme_driver *);
+int vme_register_driver(struct vme_driver *, unsigned int);
void vme_unregister_driver(struct vme_driver *);
diff --git a/drivers/staging/vme/vme_api.txt b/drivers/staging/vme/vme_api.txt
index 4910e92c52a..e8ff2151a48 100644
--- a/drivers/staging/vme/vme_api.txt
+++ b/drivers/staging/vme/vme_api.txt
@@ -18,37 +18,49 @@ registration function. The structure is as follows:
struct vme_driver {
struct list_head node;
- char *name;
- const struct vme_device_id *bind_table;
- int (*probe) (struct device *, int, int);
- int (*remove) (struct device *, int, int);
- void (*shutdown) (void);
- struct device_driver driver;
+ const char *name;
+ int (*match)(struct vme_dev *);
+ int (*probe)(struct vme_dev *);
+ int (*remove)(struct vme_dev *);
+ void (*shutdown)(void);
+ struct device_driver driver;
+ struct list_head devices;
+ unsigned int ndev;
};
-At the minimum, the '.name', '.probe' and '.bind_table' elements of this
-structure should be correctly set. The '.name' element is a pointer to a string
-holding the device driver's name. The '.probe' element should contain a pointer
-to the probe routine.
-
-The arguments of the probe routine are as follows:
-
- probe(struct device *dev, int bus, int slot);
-
-The '.bind_table' is a pointer to an array of type 'vme_device_id':
-
- struct vme_device_id {
- int bus;
- int slot;
+At the minimum, the '.name', '.match' and '.probe' elements of this structure
+should be correctly set. The '.name' element is a pointer to a string holding
+the device driver's name.
+
+The '.match' function allows controlling the number of devices that need to
+be registered. The match function should return 1 if a device should be
+probed and 0 otherwise. This example match function (from vme_user.c) limits
+the number of devices probed to one:
+
+ #define USER_BUS_MAX 1
+ ...
+ static int vme_user_match(struct vme_dev *vdev)
+ {
+ if (vdev->id.num >= USER_BUS_MAX)
+ return 0;
+ return 1;
+ }
+
+The '.probe' element should contain a pointer to the probe routine. The
+probe routine is passed a 'struct vme_dev' pointer as an argument. The
+'struct vme_dev' structure looks like the following:
+
+ struct vme_dev {
+ int num;
+ struct vme_bridge *bridge;
+ struct device dev;
+ struct list_head drv_list;
+ struct list_head bridge_list;
};
-Each structure in this array should provide a bus and slot number where the core
-should probe, using the driver's probe routine, for a device on the specified
-VME bus.
-
-The VME subsystem supports a single VME driver per 'slot'. There are considered
-to be 32 slots per bus, one for each slot-ID as defined in the ANSI/VITA 1-1994
-specification and are analogious to the physical slots on the VME backplane.
+Here, the 'num' field refers to the sequential device ID for this specific
+driver. The bridge number (or bus number) can be accessed using
+dev->bridge->num.
A function is also provided to unregister the driver from the VME core and is
usually called from the device driver's exit routine:
@@ -59,9 +71,11 @@ usually called from the device driver's exit routine:
Resource management
===================
-Once a driver has registered with the VME core the provided probe routine will
-be called for each of the bus/slot combination that becomes valid as VME buses
-are themselves registered. The probe routine is passed a pointer to the devices
+Once a driver has registered with the VME core the provided match routine will
+be called the number of times specified during the registration. If a match
+succeeds, a non-zero value should be returned. A zero return value indicates
+failure. For all successful matches, the probe routine of the corresponding
+driver is called. The probe routine is passed a pointer to the devices
device structure. This pointer should be saved, it will be required for
requesting VME resources.
@@ -71,13 +85,13 @@ specific window or DMA channel (which may be used by a different driver) this
driver allows a resource to be assigned based on the required attributes of the
driver in question:
- struct vme_resource * vme_master_request(struct device *dev,
+ struct vme_resource * vme_master_request(struct vme_dev *dev,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
- struct vme_resource * vme_slave_request(struct device *dev,
+ struct vme_resource * vme_slave_request(struct vme_dev *dev,
vme_address_t aspace, vme_cycle_t cycle);
- struct vme_resource *vme_dma_request(struct device *dev,
+ struct vme_resource *vme_dma_request(struct vme_dev *dev,
vme_dma_route_t route);
For slave windows these attributes are split into those of type 'vme_address_t'
@@ -301,10 +315,10 @@ status ID combination. Any given combination can only be assigned a single
callback function. A void pointer parameter is provided, the value of which is
passed to the callback function, the use of this pointer is user undefined:
- int vme_irq_request(struct device *dev, int level, int statid,
+ int vme_irq_request(struct vme_dev *dev, int level, int statid,
void (*callback)(int, int, void *), void *priv);
- void vme_irq_free(struct device *dev, int level, int statid);
+ void vme_irq_free(struct vme_dev *dev, int level, int statid);
The callback parameters are as follows. Care must be taken in writing a callback
function, callback functions run in interrupt context:
@@ -318,7 +332,7 @@ Interrupt Generation
The following function can be used to generate a VME interrupt at a given VME
level and VME status ID:
- int vme_irq_generate(struct device *dev, int level, int statid);
+ int vme_irq_generate(struct vme_dev *dev, int level, int statid);
Location monitors
@@ -334,7 +348,7 @@ Location Monitor Management
The following functions are provided to request the use of a block of location
monitors and to free them after they are no longer required:
- struct vme_resource * vme_lm_request(struct device *dev);
+ struct vme_resource * vme_lm_request(struct vme_dev *dev);
void vme_lm_free(struct vme_resource * res);
@@ -380,4 +394,4 @@ Slot Detection
This function returns the slot ID of the provided bridge.
- int vme_slot_get(struct device *dev);
+ int vme_slot_get(struct vme_dev *dev);
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index 4c6ec31b01d..c2deda2c38d 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -2,7 +2,6 @@
#define _VME_BRIDGE_H_
#define VME_CRCSR_BUF_SIZE (508*1024)
-#define VME_SLOTS_MAX 32
/*
* Resource structures
*/
@@ -98,8 +97,6 @@ struct vme_irq {
/* This structure stores all the information about one bridge
* The structure should be dynamically allocated by the driver and one instance
* of the structure should be present for each VME chip present in the system.
- *
- * Currently we assume that all chips are PCI-based
*/
struct vme_bridge {
char name[VMENAMSIZ];
@@ -110,14 +107,12 @@ struct vme_bridge {
struct list_head lm_resources;
struct list_head vme_errors; /* List for errors generated on VME */
+ struct list_head devices; /* List of devices on this bridge */
/* Bridge Info - XXX Move to private structure? */
- struct device *parent; /* Generic device struct (pdev->dev for PCI) */
+ struct device *parent; /* Parent device (eg. pdev->dev for PCI) */
void *driver_priv; /* Private pointer for the bridge driver */
-
- struct device dev[VME_SLOTS_MAX]; /* Device registered with
- * device model on VME bus
- */
+ struct list_head bus_list; /* list of VME buses */
/* Interrupt callbacks */
struct vme_irq irq[7];
@@ -165,6 +160,12 @@ struct vme_bridge {
/* CR/CSR space functions */
int (*slot_get) (struct vme_bridge *);
+
+ /* Bridge parent interface */
+ void *(*alloc_consistent)(struct device *dev, size_t size,
+ dma_addr_t *dma);
+ void (*free_consistent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma);
};
void vme_irq_handler(struct vme_bridge *, int, int);
diff --git a/drivers/staging/vt6655/IEEE11h.c b/drivers/staging/vt6655/IEEE11h.c
index e07ebd578d4..cf7364d6526 100644
--- a/drivers/staging/vt6655/IEEE11h.c
+++ b/drivers/staging/vt6655/IEEE11h.c
@@ -41,52 +41,52 @@
#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
#pragma pack(1)
typedef struct _WLAN_FRAME_ACTION {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char abyVars[1];
+ WLAN_80211HDR_A3 Header;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char abyVars[1];
} WLAN_FRAME_ACTION, *PWLAN_FRAME_ACTION;
typedef struct _WLAN_FRAME_MSRREQ {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_MEASURE_REQ sMSRReqEIDs[1];
+ WLAN_80211HDR_A3 Header;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
+ WLAN_IE_MEASURE_REQ sMSRReqEIDs[1];
} WLAN_FRAME_MSRREQ, *PWLAN_FRAME_MSRREQ;
typedef struct _WLAN_FRAME_MSRREP {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_MEASURE_REP sMSRRepEIDs[1];
+ WLAN_80211HDR_A3 Header;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
+ WLAN_IE_MEASURE_REP sMSRRepEIDs[1];
} WLAN_FRAME_MSRREP, *PWLAN_FRAME_MSRREP;
typedef struct _WLAN_FRAME_TPCREQ {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_TPC_REQ sTPCReqEIDs;
+ WLAN_80211HDR_A3 Header;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
+ WLAN_IE_TPC_REQ sTPCReqEIDs;
} WLAN_FRAME_TPCREQ, *PWLAN_FRAME_TPCREQ;
typedef struct _WLAN_FRAME_TPCREP {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_TPC_REP sTPCRepEIDs;
+ WLAN_80211HDR_A3 Header;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
+ WLAN_IE_TPC_REP sTPCRepEIDs;
} WLAN_FRAME_TPCREP, *PWLAN_FRAME_TPCREP;
#pragma pack()
-// action field reference ieee 802.11h Table 20e
+/* action field reference ieee 802.11h Table 20e */
#define ACTION_MSRREQ 0
#define ACTION_MSRREP 1
#define ACTION_TPCREQ 2
@@ -101,84 +101,100 @@ typedef struct _WLAN_FRAME_TPCREP {
static bool s_bRxMSRReq(PSMgmtObject pMgmt, PWLAN_FRAME_MSRREQ pMSRReq,
unsigned int uLength)
{
- size_t uNumOfEIDs = 0;
- bool bResult = true;
-
- if (uLength <= WLAN_A3FR_MAXLEN) {
- memcpy(pMgmt->abyCurrentMSRReq, pMSRReq, uLength);
- }
- uNumOfEIDs = ((uLength - offsetof(WLAN_FRAME_MSRREQ, sMSRReqEIDs))/ (sizeof(WLAN_IE_MEASURE_REQ)));
- pMgmt->pCurrMeasureEIDRep = &(((PWLAN_FRAME_MSRREP) (pMgmt->abyCurrentMSRRep))->sMSRRepEIDs[0]);
- pMgmt->uLengthOfRepEIDs = 0;
- bResult = CARDbStartMeasure(pMgmt->pAdapter,
- ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->sMSRReqEIDs,
- uNumOfEIDs
- );
- return (bResult);
+ size_t uNumOfEIDs = 0;
+ bool bResult = true;
+
+ if (uLength <= WLAN_A3FR_MAXLEN)
+ memcpy(pMgmt->abyCurrentMSRReq, pMSRReq, uLength);
+ uNumOfEIDs = ((uLength - offsetof(WLAN_FRAME_MSRREQ,
+ sMSRReqEIDs))/
+ (sizeof(WLAN_IE_MEASURE_REQ)));
+ pMgmt->pCurrMeasureEIDRep = &(((PWLAN_FRAME_MSRREP)
+ (pMgmt->abyCurrentMSRRep))->sMSRRepEIDs[0]);
+ pMgmt->uLengthOfRepEIDs = 0;
+ bResult = CARDbStartMeasure(pMgmt->pAdapter,
+ ((PWLAN_FRAME_MSRREQ)
+ (pMgmt->abyCurrentMSRReq))->sMSRReqEIDs,
+ uNumOfEIDs
+ );
+ return bResult;
}
-static bool s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, unsigned char byRate, unsigned char byRSSI)
+static bool s_bRxTPCReq(PSMgmtObject pMgmt,
+ PWLAN_FRAME_TPCREQ pTPCReq,
+ unsigned char byRate,
+ unsigned char byRSSI)
{
- PWLAN_FRAME_TPCREP pFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
- pFrame = (PWLAN_FRAME_TPCREP)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
- pFrame->Header.wFrameCtl = ( WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
- );
-
- memcpy( pFrame->Header.abyAddr1, pTPCReq->Header.abyAddr2, WLAN_ADDR_LEN);
- memcpy( pFrame->Header.abyAddr2, CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN);
- memcpy( pFrame->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- pFrame->byCategory = 0;
- pFrame->byAction = 3;
- pFrame->byDialogToken = ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->byDialogToken;
-
- pFrame->sTPCRepEIDs.byElementID = WLAN_EID_TPC_REP;
- pFrame->sTPCRepEIDs.len = 2;
- pFrame->sTPCRepEIDs.byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
- switch (byRate) {
- case RATE_54M:
- pFrame->sTPCRepEIDs.byLinkMargin = 65 - byRSSI;
- break;
- case RATE_48M:
- pFrame->sTPCRepEIDs.byLinkMargin = 66 - byRSSI;
- break;
- case RATE_36M:
- pFrame->sTPCRepEIDs.byLinkMargin = 70 - byRSSI;
- break;
- case RATE_24M:
- pFrame->sTPCRepEIDs.byLinkMargin = 74 - byRSSI;
- break;
- case RATE_18M:
- pFrame->sTPCRepEIDs.byLinkMargin = 77 - byRSSI;
- break;
- case RATE_12M:
- pFrame->sTPCRepEIDs.byLinkMargin = 79 - byRSSI;
- break;
- case RATE_9M:
- pFrame->sTPCRepEIDs.byLinkMargin = 81 - byRSSI;
- break;
- case RATE_6M:
- default:
- pFrame->sTPCRepEIDs.byLinkMargin = 82 - byRSSI;
- break;
- }
-
- pTxPacket->cbMPDULen = sizeof(WLAN_FRAME_TPCREP);
- pTxPacket->cbPayloadLen = sizeof(WLAN_FRAME_TPCREP) - WLAN_HDR_ADDR3_LEN;
- if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
- return (false);
- return (true);
-// return (CARDbSendPacket(pMgmt->pAdapter, pFrame, PKT_TYPE_802_11_MNG, sizeof(WLAN_FRAME_TPCREP)));
+ PWLAN_FRAME_TPCREP pFrame;
+ PSTxMgmtPacket pTxPacket = NULL;
+
+ pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket +
+sizeof(STxMgmtPacket));
+
+ pFrame = (PWLAN_FRAME_TPCREP)((unsigned char *)pTxPacket +
+sizeof(STxMgmtPacket));
+
+ pFrame->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
+ WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
+ );
+
+ memcpy(pFrame->Header.abyAddr1,
+ pTPCReq->Header.abyAddr2,
+ WLAN_ADDR_LEN);
+ memcpy(pFrame->Header.abyAddr2,
+ CARDpGetCurrentAddress(pMgmt->pAdapter),
+ WLAN_ADDR_LEN);
+ memcpy(pFrame->Header.abyAddr3,
+ pMgmt->abyCurrBSSID,
+ WLAN_BSSID_LEN);
+
+ pFrame->byCategory = 0;
+ pFrame->byAction = 3;
+ pFrame->byDialogToken = ((PWLAN_FRAME_MSRREQ)
+(pMgmt->abyCurrentMSRReq))->byDialogToken;
+
+ pFrame->sTPCRepEIDs.byElementID = WLAN_EID_TPC_REP;
+ pFrame->sTPCRepEIDs.len = 2;
+ pFrame->sTPCRepEIDs.byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
+ switch (byRate) {
+ case RATE_54M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 65 - byRSSI;
+ break;
+ case RATE_48M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 66 - byRSSI;
+ break;
+ case RATE_36M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 70 - byRSSI;
+ break;
+ case RATE_24M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 74 - byRSSI;
+ break;
+ case RATE_18M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 77 - byRSSI;
+ break;
+ case RATE_12M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 79 - byRSSI;
+ break;
+ case RATE_9M:
+ pFrame->sTPCRepEIDs.byLinkMargin = 81 - byRSSI;
+ break;
+ case RATE_6M:
+ default:
+ pFrame->sTPCRepEIDs.byLinkMargin = 82 - byRSSI;
+ break;
+}
+
+ pTxPacket->cbMPDULen = sizeof(WLAN_FRAME_TPCREP);
+ pTxPacket->cbPayloadLen = sizeof(WLAN_FRAME_TPCREP) -
+WLAN_HDR_ADDR3_LEN;
+ if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
+ return false;
+ return true;
+/* return (CARDbSendPacket(pMgmt->pAdapter, pFrame, PKT_TYPE_802_11_MNG,
+sizeof(WLAN_FRAME_TPCREP))); */
}
@@ -204,102 +220,110 @@ static bool s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, unsigned
*
-*/
bool
-IEEE11hbMgrRxAction (
- void *pMgmtHandle,
- void *pRxPacket
- )
+IEEE11hbMgrRxAction(void *pMgmtHandle, void *pRxPacket)
{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- PWLAN_FRAME_ACTION pAction = NULL;
- unsigned int uLength = 0;
- PWLAN_IE_CH_SW pChannelSwitch = NULL;
-
-
- // decode the frame
- uLength = ((PSRxMgmtPacket)pRxPacket)->cbMPDULen;
- if (uLength > WLAN_A3FR_MAXLEN) {
- return (false);
- }
-
-
- pAction = (PWLAN_FRAME_ACTION) (((PSRxMgmtPacket)pRxPacket)->p80211Header);
-
- if (pAction->byCategory == 0) {
- switch (pAction->byAction) {
- case ACTION_MSRREQ:
- return (s_bRxMSRReq(pMgmt, (PWLAN_FRAME_MSRREQ) pAction, uLength));
- break;
- case ACTION_MSRREP:
- break;
- case ACTION_TPCREQ:
- return (s_bRxTPCReq(pMgmt,
- (PWLAN_FRAME_TPCREQ) pAction,
- ((PSRxMgmtPacket)pRxPacket)->byRxRate,
- (unsigned char) ((PSRxMgmtPacket)pRxPacket)->uRSSI));
- break;
- case ACTION_TPCREP:
- break;
- case ACTION_CHSW:
- pChannelSwitch = (PWLAN_IE_CH_SW) (pAction->abyVars);
- if ((pChannelSwitch->byElementID == WLAN_EID_CH_SWITCH) &&
- (pChannelSwitch->len == 3)) {
- // valid element id
- CARDbChannelSwitch( pMgmt->pAdapter,
- pChannelSwitch->byMode,
- get_channel_mapping(pMgmt->pAdapter, pChannelSwitch->byChannel, pMgmt->eCurrentPHYMode),
- pChannelSwitch->byCount
- );
- }
- break;
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown Action = %d\n", pAction->byAction);
- break;
- }
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown Category = %d\n", pAction->byCategory);
- pAction->byCategory |= 0x80;
-
- //return (CARDbSendPacket(pMgmt->pAdapter, pAction, PKT_TYPE_802_11_MNG, uLength));
- return (true);
- }
- return (true);
+ PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
+ PWLAN_FRAME_ACTION pAction = NULL;
+ unsigned int uLength = 0;
+ PWLAN_IE_CH_SW pChannelSwitch = NULL;
+
+ /* decode the frame */
+ uLength = ((PSRxMgmtPacket)pRxPacket)->cbMPDULen;
+ if (uLength > WLAN_A3FR_MAXLEN)
+ return false;
+
+ pAction = (PWLAN_FRAME_ACTION)
+(((PSRxMgmtPacket)pRxPacket)->p80211Header);
+
+ if (pAction->byCategory == 0) {
+ switch (pAction->byAction) {
+ case ACTION_MSRREQ:
+ return s_bRxMSRReq(pMgmt,
+ (PWLAN_FRAME_MSRREQ)
+ pAction,
+ uLength);
+ break;
+ case ACTION_MSRREP:
+ break;
+ case ACTION_TPCREQ:
+ return s_bRxTPCReq(pMgmt,
+ (PWLAN_FRAME_TPCREQ) pAction,
+ ((PSRxMgmtPacket)pRxPacket)->byRxRate,
+ (unsigned char)
+ ((PSRxMgmtPacket)pRxPacket)->uRSSI);
+ break;
+ case ACTION_TPCREP:
+ break;
+ case ACTION_CHSW:
+ pChannelSwitch = (PWLAN_IE_CH_SW) (pAction->abyVars);
+ if ((pChannelSwitch->byElementID == WLAN_EID_CH_SWITCH)
+ && (pChannelSwitch->len == 3)) {
+ /* valid element id */
+ CARDbChannelSwitch(pMgmt->pAdapter,
+ pChannelSwitch->byMode,
+ get_channel_mapping(pMgmt->pAdapter,
+ pChannelSwitch->byChannel,
+ pMgmt->eCurrentPHYMode),
+ pChannelSwitch->byCount);
+ }
+ break;
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"Unknown Action = %d\n",
+ pAction->byAction);
+ break;
+ }
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown Category = %d\n",
+pAction->byCategory);
+ pAction->byCategory |= 0x80;
+
+ /*return (CARDbSendPacket(pMgmt->pAdapter, pAction, PKT_TYPE_802_11_MNG,
+uLength));*/
+ return true;
+ }
+ return true;
}
-bool IEEE11hbMSRRepTx (
- void *pMgmtHandle
- )
+bool IEEE11hbMSRRepTx(void *pMgmtHandle)
{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- PWLAN_FRAME_MSRREP pMSRRep = (PWLAN_FRAME_MSRREP) (pMgmt->abyCurrentMSRRep + sizeof(STxMgmtPacket));
- size_t uLength = 0;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
-
- pMSRRep->Header.wFrameCtl = ( WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
- );
-
- memcpy( pMSRRep->Header.abyAddr1, ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->Header.abyAddr2, WLAN_ADDR_LEN);
- memcpy( pMSRRep->Header.abyAddr2, CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN);
- memcpy( pMSRRep->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- pMSRRep->byCategory = 0;
- pMSRRep->byAction = 1;
- pMSRRep->byDialogToken = ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->byDialogToken;
-
- uLength = pMgmt->uLengthOfRepEIDs + offsetof(WLAN_FRAME_MSRREP, sMSRRepEIDs);
-
- pTxPacket->cbMPDULen = uLength;
- pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN;
- if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
- return (false);
- return (true);
-// return (CARDbSendPacket(pMgmt->pAdapter, pMSRRep, PKT_TYPE_802_11_MNG, uLength));
+ PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
+ PWLAN_FRAME_MSRREP pMSRRep = (PWLAN_FRAME_MSRREP)
+(pMgmt->abyCurrentMSRRep + sizeof(STxMgmtPacket));
+ size_t uLength = 0;
+ PSTxMgmtPacket pTxPacket = NULL;
+
+ pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep;
+ memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket +
+sizeof(STxMgmtPacket));
+
+ pMSRRep->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
+ WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
+ );
+
+ memcpy(pMSRRep->Header.abyAddr1, ((PWLAN_FRAME_MSRREQ)
+ (pMgmt->abyCurrentMSRReq))->Header.abyAddr2, WLAN_ADDR_LEN);
+ memcpy(pMSRRep->Header.abyAddr2,
+ CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN);
+ memcpy(pMSRRep->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
+
+ pMSRRep->byCategory = 0;
+ pMSRRep->byAction = 1;
+ pMSRRep->byDialogToken = ((PWLAN_FRAME_MSRREQ)
+ (pMgmt->abyCurrentMSRReq))->byDialogToken;
+
+ uLength = pMgmt->uLengthOfRepEIDs + offsetof(WLAN_FRAME_MSRREP,
+ sMSRRepEIDs);
+
+ pTxPacket->cbMPDULen = uLength;
+ pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN;
+ if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
+ return false;
+ return true;
+/* return (CARDbSendPacket(pMgmt->pAdapter, pMSRRep, PKT_TYPE_802_11_MNG,
+uLength)); */
}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 3d2a9ba16b1..d8dd7846447 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -815,14 +815,8 @@ else CARDbRadioPowerOn(pDevice);
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
// get Permanent network address
SROMvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %02x-%02x-%02x=%02x-%02x-%02x\n",
- pDevice->abyCurrentNetAddr[0],
- pDevice->abyCurrentNetAddr[1],
- pDevice->abyCurrentNetAddr[2],
- pDevice->abyCurrentNetAddr[3],
- pDevice->abyCurrentNetAddr[4],
- pDevice->abyCurrentNetAddr[5]);
-
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
+ pDevice->abyCurrentNetAddr);
// reset Tx pointer
CARDvSafeResetRx(pDevice);
@@ -911,7 +905,7 @@ static const struct net_device_ops device_netdev_ops = {
.ndo_do_ioctl = device_ioctl,
.ndo_get_stats = device_get_stats,
.ndo_start_xmit = device_xmit,
- .ndo_set_multicast_list = device_set_multi,
+ .ndo_set_rx_mode = device_set_multi,
};
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index cf0deacd9da..c0fab4bc870 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -1107,30 +1107,12 @@ static bool s_bAPModeRxCtl (
&Status
);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDeAuthenBeginSta 3\n");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSID:%02x-%02x-%02x=%02x-%02x-%02x \n",
- p802_11Header->abyAddr3[0],
- p802_11Header->abyAddr3[1],
- p802_11Header->abyAddr3[2],
- p802_11Header->abyAddr3[3],
- p802_11Header->abyAddr3[4],
- p802_11Header->abyAddr3[5]
- );
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR2:%02x-%02x-%02x=%02x-%02x-%02x \n",
- p802_11Header->abyAddr2[0],
- p802_11Header->abyAddr2[1],
- p802_11Header->abyAddr2[2],
- p802_11Header->abyAddr2[3],
- p802_11Header->abyAddr2[4],
- p802_11Header->abyAddr2[5]
- );
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR1:%02x-%02x-%02x=%02x-%02x-%02x \n",
- p802_11Header->abyAddr1[0],
- p802_11Header->abyAddr1[1],
- p802_11Header->abyAddr1[2],
- p802_11Header->abyAddr1[3],
- p802_11Header->abyAddr1[4],
- p802_11Header->abyAddr1[5]
- );
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSID:%pM\n",
+ p802_11Header->abyAddr3);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR2:%pM\n",
+ p802_11Header->abyAddr2);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR1:%pM\n",
+ p802_11Header->abyAddr1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: wFrameCtl= %x\n", p802_11Header->wFrameCtl );
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc:pDevice->byRxMode = %x\n", pDevice->byRxMode );
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 8cf88c3b68d..432a20993c6 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -38,715 +38,617 @@
#include "wpactl.h"
#include "rf.h"
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
#ifdef WPA_SM_Transtatus
- SWPAResult wpa_Result;
+ SWPAResult wpa_Result;
#endif
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
-
- PSCmdRequest pReq = (PSCmdRequest)rq;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int result = 0;
- PWLAN_IE_SSID pItemSSID;
- SCmdBSSJoin sJoinCmd;
- SCmdZoneTypeSet sZoneTypeCmd;
- SCmdScan sScanCmd;
- SCmdStartAP sStartAPCmd;
- SCmdSetWEP sWEPCmd;
- SCmdValue sValue;
- SBSSIDList sList;
- SNodeList sNodeList;
- PSBSSIDList pList;
- PSNodeList pNodeList;
- unsigned int cbListCount;
- PKnownBSS pBSS;
- PKnownNodeDB pNode;
- unsigned int ii, jj;
- SCmdLinkStatus sLinkStatus;
- unsigned char abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned long dwKeyIndex= 0;
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- long ldBm;
-
- pReq->wResult = 0;
-
- switch (pReq->wCmdCode) {
-
- case WLAN_CMD_BSS_SCAN:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin \n");
- if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
+int private_ioctl(PSDevice pDevice, struct ifreq *rq)
+{
+ PSCmdRequest pReq = (PSCmdRequest)rq;
+ PSMgmtObject pMgmt = pDevice->pMgmt;
+ int result = 0;
+ PWLAN_IE_SSID pItemSSID;
+ SCmdBSSJoin sJoinCmd;
+ SCmdZoneTypeSet sZoneTypeCmd;
+ SCmdScan sScanCmd;
+ SCmdStartAP sStartAPCmd;
+ SCmdSetWEP sWEPCmd;
+ SCmdValue sValue;
+ SBSSIDList sList;
+ SNodeList sNodeList;
+ PSBSSIDList pList;
+ PSNodeList pNodeList;
+ unsigned int cbListCount;
+ PKnownBSS pBSS;
+ PKnownNodeDB pNode;
+ unsigned int ii, jj;
+ SCmdLinkStatus sLinkStatus;
+ unsigned char abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+ unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned long dwKeyIndex = 0;
+ unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ long ldBm;
+
+ pReq->wResult = 0;
+
+ switch (pReq->wCmdCode) {
+ case WLAN_CMD_BSS_SCAN:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin\n");
+ if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
result = -EFAULT;
break;
}
- pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
- if (pItemSSID->len != 0) {
- memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- }
-
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
- spin_lock_irq(&pDevice->lock);
- if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((void *)pDevice, false);
- else
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
-
- if (pItemSSID->len != 0)
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
- else
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_ZONETYPE_SET:
- //mike add :cann't support.
- result=-EOPNOTSUPP;
- break;
-
- if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
- result = -EFAULT;
- break;
+ pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
+ if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
+ return -EINVAL;
+ if (pItemSSID->len != 0) {
+ memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
+ }
+
+ if (pDevice->bMACSuspend == true) {
+ if (pDevice->bRadioOff == true)
+ CARDbRadioPowerOn(pDevice);
+ vMgrTimerInit(pDevice);
+ MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
+ add_timer(&pMgmt->sTimerSecondCallback);
+ pDevice->bMACSuspend = false;
}
+ spin_lock_irq(&pDevice->lock);
+ if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
+ BSSvClearBSSList((void *)pDevice, false);
+ else
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
+
+ if (pItemSSID->len != 0)
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
+ else
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ spin_unlock_irq(&pDevice->lock);
+ break;
- if(sZoneTypeCmd.bWrite==true) {
- //////write zonetype
- if(sZoneTypeCmd.ZoneType == ZoneType_USA) {
- //set to USA
- printk("set_ZoneType:USA\n");
- }
- else if(sZoneTypeCmd.ZoneType == ZoneType_Japan) {
- //set to Japan
- printk("set_ZoneType:Japan\n");
- }
- else if(sZoneTypeCmd.ZoneType == ZoneType_Europe) {
- //set to Europe
- printk("set_ZoneType:Europe\n");
- }
- }
- else {
- ///////read zonetype
- unsigned char zonetype=0;
-
-
- if(zonetype == 0x00) { //USA
- sZoneTypeCmd.ZoneType = ZoneType_USA;
- }
- else if(zonetype == 0x01) { //Japan
- sZoneTypeCmd.ZoneType = ZoneType_Japan;
- }
- else if(zonetype == 0x02) { //Europe
- sZoneTypeCmd.ZoneType = ZoneType_Europe;
- }
- else { //Unknown ZoneType
- printk("Error:ZoneType[%x] Unknown ???\n",zonetype);
- result = -EFAULT;
+ case WLAN_CMD_ZONETYPE_SET:
+ /* mike add :cann't support. */
+ result = -EOPNOTSUPP;
break;
- }
- if (copy_to_user(pReq->data, &sZoneTypeCmd, sizeof(SCmdZoneTypeSet))) {
+
+ if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
result = -EFAULT;
break;
}
- }
- break;
-
- case WLAN_CMD_BSS_JOIN:
+ if (sZoneTypeCmd.bWrite == true) {
+ /* write zonetype */
+ if (sZoneTypeCmd.ZoneType == ZoneType_USA) {
+ /* set to USA */
+ printk("set_ZoneType:USA\n");
+ } else if (sZoneTypeCmd.ZoneType == ZoneType_Japan) {
+ /* set to Japan */
+ printk("set_ZoneType:Japan\n");
+ } else if (sZoneTypeCmd.ZoneType == ZoneType_Europe) {
+ /* set to Europe */
+ printk("set_ZoneType:Europe\n");
+ }
+ } else {
+ /* read zonetype */
+ unsigned char zonetype = 0;
+
+ if (zonetype == 0x00) { /* USA */
+ sZoneTypeCmd.ZoneType = ZoneType_USA;
+ } else if (zonetype == 0x01) { /* Japan */
+ sZoneTypeCmd.ZoneType = ZoneType_Japan;
+ } else if (zonetype == 0x02) { /* Europe */
+ sZoneTypeCmd.ZoneType = ZoneType_Europe;
+ } else { /* Unknown ZoneType */
+ printk("Error:ZoneType[%x] Unknown ???\n", zonetype);
+ result = -EFAULT;
+ break;
+ }
+ if (copy_to_user(pReq->data, &sZoneTypeCmd, sizeof(SCmdZoneTypeSet))) {
+ result = -EFAULT;
+ break;
+ }
+ }
+ break;
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
+ case WLAN_CMD_BSS_JOIN:
+ if (pDevice->bMACSuspend == true) {
+ if (pDevice->bRadioOff == true)
+ CARDbRadioPowerOn(pDevice);
+ vMgrTimerInit(pDevice);
+ MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
+ add_timer(&pMgmt->sTimerSecondCallback);
+ pDevice->bMACSuspend = false;
+ }
- if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
+ if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
result = -EFAULT;
break;
}
- pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
+ if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
+ return -EINVAL;
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- if (sJoinCmd.wBSSType == ADHOC) {
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to adhoc mode\n");
- }
- else {
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to STA mode\n");
- }
- if (sJoinCmd.bPSEnable == true) {
- pDevice->ePSMode = WMAC_POWER_FAST;
-// pDevice->ePSMode = WMAC_POWER_MAX;
- pMgmt->wListenInterval = 2;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving On\n");
- }
- else {
- pDevice->ePSMode = WMAC_POWER_CAM;
- pMgmt->wListenInterval = 1;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving Off \n");
- }
-
- if (sJoinCmd.bShareKeyAuth == true){
- pMgmt->bShareKeyAlgorithm = true;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
- }
- else {
- pMgmt->bShareKeyAlgorithm = false;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
- }
- pDevice->uChannel = sJoinCmd.uChannel;
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_SET_WEP:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WEP Key. \n");
- memset(&sWEPCmd, 0 ,sizeof(SCmdSetWEP));
- if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
+ if (sJoinCmd.wBSSType == ADHOC) {
+ pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to adhoc mode\n");
+ } else {
+ pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to STA mode\n");
+ }
+ if (sJoinCmd.bPSEnable == true) {
+ pDevice->ePSMode = WMAC_POWER_FAST;
+ pMgmt->wListenInterval = 2;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving On\n");
+ } else {
+ pDevice->ePSMode = WMAC_POWER_CAM;
+ pMgmt->wListenInterval = 1;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving Off\n");
+ }
+
+ if (sJoinCmd.bShareKeyAuth == true) {
+ pMgmt->bShareKeyAlgorithm = true;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key\n");
+ } else {
+ pMgmt->bShareKeyAlgorithm = false;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System\n");
+ }
+ pDevice->uChannel = sJoinCmd.uChannel;
+ netif_stop_queue(pDevice->dev);
+ spin_lock_irq(&pDevice->lock);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
+ spin_unlock_irq(&pDevice->lock);
+ break;
+
+ case WLAN_CMD_SET_WEP:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WEP Key.\n");
+ memset(&sWEPCmd, 0, sizeof(SCmdSetWEP));
+ if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
result = -EFAULT;
break;
}
- if (sWEPCmd.bEnableWep != true) {
- pDevice->bEncryptionEnable = false;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- MACvDisableDefaultKey(pDevice->PortOffset);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable. \n");
- break;
- }
-
- for (ii = 0; ii < WLAN_WEP_NKEYS; ii ++) {
- if (sWEPCmd.bWepKeyAvailable[ii]) {
- if (ii == sWEPCmd.byKeyIndex)
- //2006-1123-02,<Modify> by EinsnLiu
- //Evaluate the "dwKeyIndex" error
- // dwKeyIndex |= (1 << 31);
- dwKeyIndex =ii|(1 << 31);
- else
- dwKeyIndex = ii;
-
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- sWEPCmd.auWepKeyLength[ii],
- NULL,
- (unsigned char *)&sWEPCmd.abyWepKey[ii][0],
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
- }
- }
- pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
- pDevice->bTransmitKey = true;
- pDevice->bEncryptionEnable = true;
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
-
- break;
-
- case WLAN_CMD_GET_LINK:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_GET_LINK status. \n");
-
- memset(sLinkStatus.abySSID, 0 , WLAN_SSID_MAXLEN + 1);
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- sLinkStatus.wBSSType = ADHOC;
- else
- sLinkStatus.wBSSType = INFRA;
-
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
- sLinkStatus.byState = ADHOC_JOINTED;
- else
- sLinkStatus.byState = ADHOC_STARTED;
-
- sLinkStatus.uChannel = pMgmt->uCurrChannel;
- if (pDevice->bLinkPass == true) {
- sLinkStatus.bLink = true;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
- memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Link Success ! \n");
- }
- else {
- sLinkStatus.bLink = false;
- }
- if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
- result = -EFAULT;
+ if (sWEPCmd.bEnableWep != true) {
+ pDevice->bEncryptionEnable = false;
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ MACvDisableDefaultKey(pDevice->PortOffset);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable.\n");
break;
}
- break;
+ for (ii = 0; ii < WLAN_WEP_NKEYS; ii++) {
+ if (sWEPCmd.bWepKeyAvailable[ii]) {
+ if (ii == sWEPCmd.byKeyIndex)
+ dwKeyIndex = ii | (1 << 31);
+ else
+ dwKeyIndex = ii;
+
+ KeybSetDefaultKey(&(pDevice->sKey),
+ dwKeyIndex,
+ sWEPCmd.auWepKeyLength[ii],
+ NULL,
+ (unsigned char *)&sWEPCmd.abyWepKey[ii][0],
+ KEY_CTL_WEP,
+ pDevice->PortOffset,
+ pDevice->byLocalID);
+ }
+ }
+ pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
+ pDevice->bTransmitKey = true;
+ pDevice->bEncryptionEnable = true;
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ break;
- case WLAN_CMD_GET_LISTLEN:
+ case WLAN_CMD_GET_LINK:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_GET_LINK status.\n");
+
+ memset(sLinkStatus.abySSID, 0 , WLAN_SSID_MAXLEN + 1);
+
+ if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
+ sLinkStatus.wBSSType = ADHOC;
+ else
+ sLinkStatus.wBSSType = INFRA;
+
+ if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
+ sLinkStatus.byState = ADHOC_JOINTED;
+ else
+ sLinkStatus.byState = ADHOC_STARTED;
+
+ sLinkStatus.uChannel = pMgmt->uCurrChannel;
+ if (pDevice->bLinkPass == true) {
+ sLinkStatus.bLink = true;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
+ memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
+ sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Link Success!\n");
+ } else {
+ sLinkStatus.bLink = false;
+ sLinkStatus.uLinkRate = 0;
+ }
+ if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
+ result = -EFAULT;
+ break;
+ }
+ break;
+
+ case WLAN_CMD_GET_LISTLEN:
cbListCount = 0;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- cbListCount++;
- }
- sList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (!pBSS->bActive)
+ continue;
+ cbListCount++;
+ }
+ sList.uItem = cbListCount;
+ if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
result = -EFAULT;
break;
}
- pReq->wResult = 0;
- break;
+ pReq->wResult = 0;
+ break;
- case WLAN_CMD_GET_LIST:
- if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
+ case WLAN_CMD_GET_LIST:
+ if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
result = -EFAULT;
break;
}
- pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
- if (pList == NULL) {
- result = -ENOMEM;
- break;
- }
+ pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
+ if (pList == NULL) {
+ result = -ENOMEM;
+ break;
+ }
pList->uItem = sList.uItem;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
- pBSS = &(pMgmt->sBSSList[jj]);
- if (pBSS->bActive) {
- pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
- pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
- pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
-// pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI;
- RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
- pList->sBSSIDList[ii].uRSSI = (unsigned int)ldBm;
- memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
- memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
- pList->sBSSIDList[ii].byNetType = INFRA;
- }
- else {
- pList->sBSSIDList[ii].byNetType = ADHOC;
- }
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
- pList->sBSSIDList[ii].bWEPOn = true;
- }
- else {
- pList->sBSSIDList[ii].bWEPOn = false;
- }
- ii ++;
- if (ii >= pList->uItem)
- break;
- }
- }
-
- if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
+ for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
+ pBSS = &(pMgmt->sBSSList[jj]);
+ if (pBSS->bActive) {
+ pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
+ pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
+ pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
+ /* pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI; */
+ RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
+ pList->sBSSIDList[ii].uRSSI = (unsigned int)ldBm;
+ memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
+ pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
+ memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
+ memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
+ if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
+ pList->sBSSIDList[ii].byNetType = INFRA;
+ } else {
+ pList->sBSSIDList[ii].byNetType = ADHOC;
+ }
+ if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
+ pList->sBSSIDList[ii].bWEPOn = true;
+ } else {
+ pList->sBSSIDList[ii].bWEPOn = false;
+ }
+ ii++;
+ if (ii >= pList->uItem)
+ break;
+ }
+ }
+
+ if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
result = -EFAULT;
break;
}
- kfree(pList);
- pReq->wResult = 0;
- break;
+ kfree(pList);
+ pReq->wResult = 0;
+ break;
- case WLAN_CMD_GET_MIB:
- if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
+ case WLAN_CMD_GET_MIB:
+ if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
result = -EFAULT;
break;
}
- break;
+ break;
- case WLAN_CMD_GET_STAT:
- if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
+ case WLAN_CMD_GET_STAT:
+ if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
result = -EFAULT;
break;
}
- break;
- case WLAN_CMD_STOP_MAC:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_STOP_MAC\n");
- netif_stop_queue(pDevice->dev);
-
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == false) {
- CARDbRadioPowerOff(pDevice);
- }
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bCmdRunning = false;
- pDevice->bMACSuspend = true;
- MACvIntDisable(pDevice->PortOffset);
- spin_unlock_irq(&pDevice->lock);
-
- break;
+ break;
- case WLAN_CMD_START_MAC:
+ case WLAN_CMD_STOP_MAC:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_STOP_MAC\n");
+ netif_stop_queue(pDevice->dev);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_START_MAC\n");
+ spin_lock_irq(&pDevice->lock);
+ if (pDevice->bRadioOff == false) {
+ CARDbRadioPowerOff(pDevice);
+ }
+ pDevice->bLinkPass = false;
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ del_timer(&pDevice->sTimerCommand);
+ del_timer(&pMgmt->sTimerSecondCallback);
+ pDevice->bCmdRunning = false;
+ pDevice->bMACSuspend = true;
+ MACvIntDisable(pDevice->PortOffset);
+ spin_unlock_irq(&pDevice->lock);
+ break;
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
- break;
+ case WLAN_CMD_START_MAC:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_START_MAC\n");
- case WLAN_CMD_SET_HOSTAPD:
+ if (pDevice->bMACSuspend == true) {
+ if (pDevice->bRadioOff == true)
+ CARDbRadioPowerOn(pDevice);
+ vMgrTimerInit(pDevice);
+ MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
+ add_timer(&pMgmt->sTimerSecondCallback);
+ pDevice->bMACSuspend = false;
+ }
+ break;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD\n");
+ case WLAN_CMD_SET_HOSTAPD:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- if (vt6655_hostap_set_hostapd(pDevice, 1, 1) == 0){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
- }
- else {
- result = -EFAULT;
- break;
+ if (vt6655_hostap_set_hostapd(pDevice, 1, 1) == 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
+ } else {
+ result = -EFAULT;
+ break;
}
- }
- else {
- vt6655_hostap_set_hostapd(pDevice, 0, 1);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
- }
-
- break;
-
- case WLAN_CMD_SET_HOSTAPD_STA:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD_STA\n");
+ } else {
+ vt6655_hostap_set_hostapd(pDevice, 0, 1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
+ }
+ break;
- break;
- case WLAN_CMD_SET_802_1X:
+ case WLAN_CMD_SET_HOSTAPD_STA:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD_STA\n");
+ break;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_802_1X\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ case WLAN_CMD_SET_802_1X:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_802_1X\n");
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- pDevice->bEnable8021x = true;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable 802.1x\n");
- }
- else {
- pDevice->bEnable8021x = false;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable 802.1x\n");
- }
-
- break;
-
-
- case WLAN_CMD_SET_HOST_WEP:
+ pDevice->bEnable8021x = true;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable 802.1x\n");
+ } else {
+ pDevice->bEnable8021x = false;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable 802.1x\n");
+ }
+ break;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOST_WEP\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ case WLAN_CMD_SET_HOST_WEP:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOST_WEP\n");
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- pDevice->bEnableHostWEP = true;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HostWEP\n");
- }
- else {
- pDevice->bEnableHostWEP = false;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HostWEP\n");
- }
-
- break;
+ pDevice->bEnableHostWEP = true;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HostWEP\n");
+ } else {
+ pDevice->bEnableHostWEP = false;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HostWEP\n");
+ }
+ break;
- case WLAN_CMD_SET_WPA:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WPA\n");
+ case WLAN_CMD_SET_WPA:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WPA\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
- pDevice->bWPADEVUp = true;
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
- pDevice->bWPADEVUp = false;
- }
-
- break;
-
- case WLAN_CMD_AP_START:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_AP_START\n");
- if (pDevice->bRadioOff == true) {
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- }
- if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
+ memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
+ pDevice->bWPADEVUp = true;
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
+ pDevice->bWPADEVUp = false;
+ }
+ break;
+
+ case WLAN_CMD_AP_START:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_AP_START\n");
+ if (pDevice->bRadioOff == true) {
+ CARDbRadioPowerOn(pDevice);
+ vMgrTimerInit(pDevice);
+ MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
+ add_timer(&pMgmt->sTimerSecondCallback);
+ }
+ if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
result = -EFAULT;
break;
}
- if (sStartAPCmd.wBSSType == AP) {
- pMgmt->eConfigMode = WMAC_CONFIG_AP;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to AP mode\n");
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct BSS type not set to AP mode\n");
+ if (sStartAPCmd.wBSSType == AP) {
+ pMgmt->eConfigMode = WMAC_CONFIG_AP;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to AP mode\n");
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct BSS type not set to AP mode\n");
result = -EFAULT;
break;
- }
-
+ }
- if (sStartAPCmd.wBBPType == PHY80211g) {
- pMgmt->byAPBBType = PHY_TYPE_11G;
- }
- else if (sStartAPCmd.wBBPType == PHY80211a) {
- pMgmt->byAPBBType = PHY_TYPE_11A;
- }
- else {
- pMgmt->byAPBBType = PHY_TYPE_11B;
- }
+ if (sStartAPCmd.wBBPType == PHY80211g) {
+ pMgmt->byAPBBType = PHY_TYPE_11G;
+ } else if (sStartAPCmd.wBBPType == PHY80211a) {
+ pMgmt->byAPBBType = PHY_TYPE_11A;
+ } else {
+ pMgmt->byAPBBType = PHY_TYPE_11B;
+ }
- pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
+ if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
+ return -EINVAL;
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- if ((sStartAPCmd.uChannel > 0)&&(sStartAPCmd.uChannel <= 14))
- pDevice->uChannel = sStartAPCmd.uChannel;
-
- if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
- pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
- else
- pMgmt->wIBSSBeaconPeriod = 100;
-
- if (sStartAPCmd.bShareKeyAuth == true){
- pMgmt->bShareKeyAlgorithm = true;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
- }
- else {
- pMgmt->bShareKeyAlgorithm = false;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
- }
- memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
-
- if (sStartAPCmd.byBasicRate & BIT3) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- pMgmt->abyIBSSSuppRates[5] |= BIT7;
- }else if (sStartAPCmd.byBasicRate & BIT2) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- }else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- }else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- }else {
- //default 1,2M
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- }
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Support Rate= %x %x %x %x\n",
- pMgmt->abyIBSSSuppRates[2],
- pMgmt->abyIBSSSuppRates[3],
- pMgmt->abyIBSSSuppRates[4],
- pMgmt->abyIBSSSuppRates[5]
- );
-
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_GET_NODE_CNT:
+ if ((sStartAPCmd.uChannel > 0) && (sStartAPCmd.uChannel <= 14))
+ pDevice->uChannel = sStartAPCmd.uChannel;
+
+ if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
+ pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
+ else
+ pMgmt->wIBSSBeaconPeriod = 100;
+
+ if (sStartAPCmd.bShareKeyAuth == true) {
+ pMgmt->bShareKeyAlgorithm = true;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key\n");
+ } else {
+ pMgmt->bShareKeyAlgorithm = false;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System\n");
+ }
+ memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
+
+ if (sStartAPCmd.byBasicRate & BIT3) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ pMgmt->abyIBSSSuppRates[4] |= BIT7;
+ pMgmt->abyIBSSSuppRates[5] |= BIT7;
+ } else if (sStartAPCmd.byBasicRate & BIT2) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ pMgmt->abyIBSSSuppRates[4] |= BIT7;
+ } else if (sStartAPCmd.byBasicRate & BIT1) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ } else if (sStartAPCmd.byBasicRate & BIT1) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ } else {
+ /* default 1,2M */
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ }
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Support Rate= %x %x %x %x\n",
+ pMgmt->abyIBSSSuppRates[2],
+ pMgmt->abyIBSSSuppRates[3],
+ pMgmt->abyIBSSSuppRates[4],
+ pMgmt->abyIBSSSuppRates[5]);
+
+ netif_stop_queue(pDevice->dev);
+ spin_lock_irq(&pDevice->lock);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
+ spin_unlock_irq(&pDevice->lock);
+ break;
+ case WLAN_CMD_GET_NODE_CNT:
cbListCount = 0;
pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (!pNode->bActive)
- continue;
- cbListCount++;
- }
-
- sNodeList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
+ for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
+ pNode = &(pMgmt->sNodeDBTable[ii]);
+ if (!pNode->bActive)
+ continue;
+ cbListCount++;
+ }
+
+ sNodeList.uItem = cbListCount;
+ if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
result = -EFAULT;
break;
}
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_NODE_LIST:
+ pReq->wResult = 0;
+ break;
- if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
+ case WLAN_CMD_GET_NODE_LIST:
+ if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
result = -EFAULT;
break;
}
- pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
- if (pNodeList == NULL) {
- result = -ENOMEM;
- break;
- }
+ pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
+ if (pNodeList == NULL) {
+ result = -ENOMEM;
+ break;
+ }
pNodeList->uItem = sNodeList.uItem;
pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (pNode->bActive) {
- pNodeList->sNodeList[jj].wAID = pNode->wAID;
- memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
- pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
- pNodeList->sNodeList[jj].wInActiveCount = (unsigned short)pNode->uInActiveCount;
- pNodeList->sNodeList[jj].wEnQueueCnt = (unsigned short)pNode->wEnQueueCnt;
- pNodeList->sNodeList[jj].wFlags = (unsigned short)pNode->dwFlags;
- pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
- pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
- pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
- memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- pNodeList->sNodeList[jj].abyWepKey[0],
- pNodeList->sNodeList[jj].abyWepKey[1],
- pNodeList->sNodeList[jj].abyWepKey[2],
- pNodeList->sNodeList[jj].abyWepKey[3],
- pNodeList->sNodeList[jj].abyWepKey[4]
- );
- pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
- pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
- pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
- pNodeList->sNodeList[jj].wFailureRatio = (unsigned short)pNode->uFailureRatio;
- jj ++;
- if (jj >= pNodeList->uItem)
- break;
- }
- }
- if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
+ for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
+ pNode = &(pMgmt->sNodeDBTable[ii]);
+ if (pNode->bActive) {
+ pNodeList->sNodeList[jj].wAID = pNode->wAID;
+ memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
+ pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
+ pNodeList->sNodeList[jj].wInActiveCount = (unsigned short)pNode->uInActiveCount;
+ pNodeList->sNodeList[jj].wEnQueueCnt = (unsigned short)pNode->wEnQueueCnt;
+ pNodeList->sNodeList[jj].wFlags = (unsigned short)pNode->dwFlags;
+ pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
+ pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
+ pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
+ memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
+ pNodeList->sNodeList[jj].abyWepKey[0],
+ pNodeList->sNodeList[jj].abyWepKey[1],
+ pNodeList->sNodeList[jj].abyWepKey[2],
+ pNodeList->sNodeList[jj].abyWepKey[3],
+ pNodeList->sNodeList[jj].abyWepKey[4]);
+ pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
+ pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
+ pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
+ pNodeList->sNodeList[jj].wFailureRatio = (unsigned short)pNode->uFailureRatio;
+ jj++;
+ if (jj >= pNodeList->uItem)
+ break;
+ }
+ }
+ if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
result = -EFAULT;
break;
}
- kfree(pNodeList);
- pReq->wResult = 0;
- break;
+ kfree(pNodeList);
+ pReq->wResult = 0;
+ break;
#ifdef WPA_SM_Transtatus
- case 0xFF:
- memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname));
- wpa_Result.proto = 0;
- wpa_Result.key_mgmt = 0;
- wpa_Result.eap_type = 0;
- wpa_Result.authenticated = false;
- pDevice->fWPA_Authened = false;
- if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
- result = -EFAULT;
+ case 0xFF:
+ memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
+ wpa_Result.proto = 0;
+ wpa_Result.key_mgmt = 0;
+ wpa_Result.eap_type = 0;
+ wpa_Result.authenticated = false;
+ pDevice->fWPA_Authened = false;
+ if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
+ result = -EFAULT;
break;
}
-if(wpa_Result.authenticated==true) {
- #ifdef SndEvt_ToAPI
- {
- union iwreq_data wrqu;
+ if (wpa_Result.authenticated == true) {
+#ifdef SndEvt_ToAPI
+ {
+ union iwreq_data wrqu;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
- wrqu.data.length =pItemSSID->len;
- wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
- }
- #endif
- pDevice->fWPA_Authened = true; //is successful peer to wpa_Result.authenticated?
-}
-
- //printk("get private wpa_supplicant announce WPA SM\n");
- //printk("wpa-->ifname=%s\n",wpa_Result.ifname);
- //printk("wpa-->proto=%d\n",wpa_Result.proto);
- //printk("wpa-->key-mgmt=%d\n",wpa_Result.key_mgmt);
- //printk("wpa-->eap_type=%d\n",wpa_Result.eap_type);
- //printk("wpa-->authenticated is %s\n",(wpa_Result.authenticated==true)?"true":"false");
-
- pReq->wResult = 0;
- break;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
+ wrqu.data.length = pItemSSID->len;
+ wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
+ }
+#endif
+ pDevice->fWPA_Authened = true; /* is successful peer to wpa_Result.authenticated? */
+ }
+ pReq->wResult = 0;
+ break;
#endif
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Private command not support..\n");
- }
-
- return result;
-}
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Private command not support..\n");
+ }
-/*
-void
-vConfigWEPKey (
- PSDevice pDevice,
- unsigned long dwKeyIndex,
- unsigned char *pbyKey,
- unsigned long uKeyLength
- )
-{
- int ii;
-
-
- memset(&pDevice->abyWepKey[dwKeyIndex][0], 0, WLAN_WEPMAX_KEYLEN);
- memcpy(&pDevice->abyWepKey[dwKeyIndex][0], pbyKey, uKeyLength);
-
- pDevice->bWepKeyAvailable[dwKeyIndex] = true;
- pDevice->auWepKeyLength[dwKeyIndex] = uKeyLength;
-
- MACvSetDefaultKeyEntry(pDevice->PortOffset, uKeyLength, dwKeyIndex,
- (unsigned long *) &(pDevice->abyWepKey[dwKeyIndex][0]), pDevice->byLocalID);
-
- if (pDevice->eEncryptionStatus < Ndis802_11EncryptionNotSupported) {
- for(ii=0; ii<MAX_GROUP_KEY; ii++) {
- if ((pDevice->bWepKeyAvailable[ii] == true) &&
- (pDevice->auWepKeyLength[ii] == WLAN_WEP232_KEYLEN)) {
- pDevice->uCurrentWEPMode = TX_WEP_SW232;
- MACvDisableDefaultKey(pDevice->PortOffset);
- break;
- }
- }
- if ((ii == MAX_GROUP_KEY) &&
- (pDevice->eEncryptionStatus < Ndis802_11EncryptionNotSupported)) {
- MACvEnableDefaultKey(pDevice->PortOffset, pDevice->byLocalID);
- }
- }
+ return result;
}
-*/
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index ab289c30edb..c46d51908ac 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -2521,14 +2521,8 @@ vMgrCreateOwnIBSS(
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
// AP mode BSSID = MAC addr
memcpy(pMgmt->abyCurrBSSID, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"AP beacon created BSSID:%02x-%02x-%02x-%02x-%02x-%02x \n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]
- );
+ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"AP beacon created BSSID:%pM\n",
+ pMgmt->abyCurrBSSID);
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
@@ -2550,14 +2544,8 @@ vMgrCreateOwnIBSS(
pMgmt->abyCurrBSSID[0] |= IEEE_ADDR_UNIVERSAL;
- DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"Adhoc beacon created bssid:%02x-%02x-%02x-%02x-%02x-%02x \n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]
- );
+ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"Adhoc beacon created bssid:%pM\n",
+ pMgmt->abyCurrBSSID);
}
// Set Capability Info
@@ -2887,14 +2875,8 @@ vMgrJoinBSSBegin(
// pDevice->bLinkPass = true;
// memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join IBSS ok:%02x-%02x-%02x-%02x-%02x-%02x \n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]
- );
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join IBSS ok:%pM\n",
+ pMgmt->abyCurrBSSID);
// Preamble type auto-switch: if AP can receive short-preamble cap,
// and if registry setting is short preamble we can turn on too.
@@ -2984,13 +2966,8 @@ s_vMgrSynchBSS (
MACvReadBSSIDAddress(pDevice->PortOffset, pMgmt->abyCurrBSSID);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:set CurrBSSID address = %02x-%02x-%02x=%02x-%02x-%02x\n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:set CurrBSSID address = "
+ "%pM\n", pMgmt->abyCurrBSSID);
if (pCurr->eNetworkTypeInUse == PHY_TYPE_11A) {
if ((pMgmt->eConfigPHYMode == PHY_TYPE_11A) ||
@@ -4462,14 +4439,8 @@ s_vMgrRxProbeRequest(
sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeProbeRequest(&sFrame);
/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request rx:MAC addr:%02x-%02x-%02x=%02x-%02x-%02x \n",
- sFrame.pHdr->sA3.abyAddr2[0],
- sFrame.pHdr->sA3.abyAddr2[1],
- sFrame.pHdr->sA3.abyAddr2[2],
- sFrame.pHdr->sA3.abyAddr2[3],
- sFrame.pHdr->sA3.abyAddr2[4],
- sFrame.pHdr->sA3.abyAddr2[5]
- );
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request rx:MAC addr:%pM\n",
+ sFrame.pHdr->sA3.abyAddr2);
*/
if (sFrame.pSSID->len != 0) {
if (sFrame.pSSID->len != ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len)
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index a0f994ed58f..732ba88dc79 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -213,7 +213,9 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
int uu, ii;
- if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
+ if (param->u.wpa_key.alg_name > WPA_ALG_CCMP ||
+ param->u.wpa_key.key_len >= MAX_KEY_LEN ||
+ param->u.wpa_key.seq_len >= MAX_KEY_LEN)
return -EINVAL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index cb817ced518..c0edf97535d 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -1109,30 +1109,12 @@ static BOOL s_bAPModeRxCtl (
&Status
);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDeAuthenBeginSta 3\n");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSID:%02x-%02x-%02x=%02x-%02x-%02x \n",
- p802_11Header->abyAddr3[0],
- p802_11Header->abyAddr3[1],
- p802_11Header->abyAddr3[2],
- p802_11Header->abyAddr3[3],
- p802_11Header->abyAddr3[4],
- p802_11Header->abyAddr3[5]
- );
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR2:%02x-%02x-%02x=%02x-%02x-%02x \n",
- p802_11Header->abyAddr2[0],
- p802_11Header->abyAddr2[1],
- p802_11Header->abyAddr2[2],
- p802_11Header->abyAddr2[3],
- p802_11Header->abyAddr2[4],
- p802_11Header->abyAddr2[5]
- );
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR1:%02x-%02x-%02x=%02x-%02x-%02x \n",
- p802_11Header->abyAddr1[0],
- p802_11Header->abyAddr1[1],
- p802_11Header->abyAddr1[2],
- p802_11Header->abyAddr1[3],
- p802_11Header->abyAddr1[4],
- p802_11Header->abyAddr1[5]
- );
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSID:%pM\n",
+ p802_11Header->abyAddr3);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR2:%pM\n",
+ p802_11Header->abyAddr2);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR1:%pM\n",
+ p802_11Header->abyAddr1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: wFrameCtl= %x\n", p802_11Header->wFrameCtl );
return TRUE;
}
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index cfe9c95d780..49390026dea 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -40,665 +40,603 @@
#include "rndis.h"
#include "rf.h"
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
-
- SWPAResult wpa_Result;
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
-
- PSCmdRequest pReq = (PSCmdRequest)rq;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int result = 0;
- PWLAN_IE_SSID pItemSSID;
- SCmdBSSJoin sJoinCmd;
- SCmdZoneTypeSet sZoneTypeCmd;
- SCmdScan sScanCmd;
- SCmdStartAP sStartAPCmd;
- SCmdSetWEP sWEPCmd;
- SCmdValue sValue;
- SBSSIDList sList;
- SNodeList sNodeList;
- PSBSSIDList pList;
- PSNodeList pNodeList;
- unsigned int cbListCount;
- PKnownBSS pBSS;
- PKnownNodeDB pNode;
- unsigned int ii, jj;
- SCmdLinkStatus sLinkStatus;
- BYTE abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- DWORD dwKeyIndex= 0;
- BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- signed long ldBm;
-
- pReq->wResult = 0;
-
- switch(pReq->wCmdCode) {
-
- case WLAN_CMD_BSS_SCAN:
-
- if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
+SWPAResult wpa_Result;
+static int msglevel = MSG_LEVEL_INFO;
+
+int private_ioctl(PSDevice pDevice, struct ifreq *rq)
+{
+
+ PSCmdRequest pReq = (PSCmdRequest)rq;
+ PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ int result = 0;
+ PWLAN_IE_SSID pItemSSID;
+ SCmdBSSJoin sJoinCmd;
+ SCmdZoneTypeSet sZoneTypeCmd;
+ SCmdScan sScanCmd;
+ SCmdStartAP sStartAPCmd;
+ SCmdSetWEP sWEPCmd;
+ SCmdValue sValue;
+ SBSSIDList sList;
+ SNodeList sNodeList;
+ PSBSSIDList pList;
+ PSNodeList pNodeList;
+ unsigned int cbListCount;
+ PKnownBSS pBSS;
+ PKnownNodeDB pNode;
+ unsigned int ii, jj;
+ SCmdLinkStatus sLinkStatus;
+ BYTE abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+ BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ DWORD dwKeyIndex = 0;
+ BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ signed long ldBm;
+
+ pReq->wResult = 0;
+
+ switch (pReq->wCmdCode) {
+ case WLAN_CMD_BSS_SCAN:
+ if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
result = -EFAULT;
break;
}
- pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
- if (pItemSSID->len != 0) {
- memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- }
- spin_lock_irq(&pDevice->lock);
+ pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
+ if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
+ return -EINVAL;
+ if (pItemSSID->len != 0) {
+ memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
+ }
+ spin_lock_irq(&pDevice->lock);
- if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((void *) pDevice, FALSE);
- else
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
+ BSSvClearBSSList((void *)pDevice, FALSE);
+ else
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin\n");
- if (pItemSSID->len != 0)
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- abyScanSSID);
- else
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ if (pItemSSID->len != 0)
+ bScheduleCommand((void *)pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ abyScanSSID);
+ else
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
+ spin_unlock_irq(&pDevice->lock);
+ break;
- case WLAN_CMD_ZONETYPE_SET:
- //mike add :cann't support.
- result=-EOPNOTSUPP;
- break;
+ case WLAN_CMD_ZONETYPE_SET:
+ result = -EOPNOTSUPP;
+ break;
- if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
+ if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
result = -EFAULT;
break;
}
- if(sZoneTypeCmd.bWrite==TRUE) {
- //////write zonetype
- if(sZoneTypeCmd.ZoneType == ZoneType_USA) {
- //set to USA
- printk("set_ZoneType:USA\n");
- }
- else if(sZoneTypeCmd.ZoneType == ZoneType_Japan) {
- //set to Japan
- printk("set_ZoneType:Japan\n");
- }
- else if(sZoneTypeCmd.ZoneType == ZoneType_Europe) {
- //set to Europe
- printk("set_ZoneType:Europe\n");
- }
- }
- else {
- ///////read zonetype
- BYTE zonetype=0;
-
-
- if(zonetype == 0x00) { //USA
- sZoneTypeCmd.ZoneType = ZoneType_USA;
- }
- else if(zonetype == 0x01) { //Japan
- sZoneTypeCmd.ZoneType = ZoneType_Japan;
- }
- else if(zonetype == 0x02) { //Europe
- sZoneTypeCmd.ZoneType = ZoneType_Europe;
- }
- else { //Unknown ZoneType
- printk("Error:ZoneType[%x] Unknown ???\n",zonetype);
- result = -EFAULT;
+ if (sZoneTypeCmd.bWrite == TRUE) {
+ /* write zonetype */
+ if (sZoneTypeCmd.ZoneType == ZoneType_USA) {
+ /* set to USA */
+ printk("set_ZoneType:USA\n");
+ } else if (sZoneTypeCmd.ZoneType == ZoneType_Japan) {
+ /* set to Japan */
+ printk("set_ZoneType:Japan\n");
+ } else if (sZoneTypeCmd.ZoneType == ZoneType_Europe) {
+ /* set to Europe */
+ printk("set_ZoneType:Europe\n");
+ }
+ } else {
+ /* read zonetype */
+ BYTE zonetype = 0;
+
+ if (zonetype == 0x00) { /* USA */
+ sZoneTypeCmd.ZoneType = ZoneType_USA;
+ } else if (zonetype == 0x01) { /* Japan */
+ sZoneTypeCmd.ZoneType = ZoneType_Japan;
+ } else if (zonetype == 0x02) { /* Europe */
+ sZoneTypeCmd.ZoneType = ZoneType_Europe;
+ } else { /* Unknown ZoneType */
+ printk("Error:ZoneType[%x] Unknown ???\n", zonetype);
+ result = -EFAULT;
+ break;
+ }
+
+ if (copy_to_user(pReq->data, &sZoneTypeCmd,
+ sizeof(SCmdZoneTypeSet))) {
+ result = -EFAULT;
+ break;
+ }
+ }
break;
- }
- if (copy_to_user(pReq->data, &sZoneTypeCmd, sizeof(SCmdZoneTypeSet))) {
+ case WLAN_CMD_BSS_JOIN:
+ if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
result = -EFAULT;
break;
}
- }
- break;
+ pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
+ if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
+ return -EINVAL;
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
+ if (sJoinCmd.wBSSType == ADHOC) {
+ pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to adhoc mode\n");
+ } else {
+ pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to STA mode\n");
+ }
+ if (sJoinCmd.bPSEnable == TRUE) {
+ pDevice->ePSMode = WMAC_POWER_FAST;
+ pMgmt->wListenInterval = 2;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving On\n");
+ } else {
+ pDevice->ePSMode = WMAC_POWER_CAM;
+ pMgmt->wListenInterval = 1;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving Off\n");
+ }
+
+ if (sJoinCmd.bShareKeyAuth == TRUE) {
+ pMgmt->bShareKeyAlgorithm = TRUE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key\n");
+ } else {
+ pMgmt->bShareKeyAlgorithm = FALSE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System\n");
+ }
- case WLAN_CMD_BSS_JOIN:
+ pDevice->uChannel = sJoinCmd.uChannel;
+ netif_stop_queue(pDevice->dev);
+ spin_lock_irq(&pDevice->lock);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
+ spin_unlock_irq(&pDevice->lock);
+ break;
- if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
+ case WLAN_CMD_SET_WEP:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WEP Key.\n");
+ memset(&sWEPCmd, 0, sizeof(SCmdSetWEP));
+ if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
result = -EFAULT;
break;
}
-
- pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- if (sJoinCmd.wBSSType == ADHOC) {
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to adhoc mode\n");
- }
- else {
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to STA mode\n");
- }
- if (sJoinCmd.bPSEnable == TRUE) {
- pDevice->ePSMode = WMAC_POWER_FAST;
-// pDevice->ePSMode = WMAC_POWER_MAX;
- pMgmt->wListenInterval = 2;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving On\n");
- }
- else {
- pDevice->ePSMode = WMAC_POWER_CAM;
- pMgmt->wListenInterval = 1;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving Off \n");
- }
-
- if (sJoinCmd.bShareKeyAuth == TRUE){
- pMgmt->bShareKeyAlgorithm = TRUE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
- }
- else {
- pMgmt->bShareKeyAlgorithm = FALSE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
- }
- pDevice->uChannel = sJoinCmd.uChannel;
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_SET_WEP:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WEP Key. \n");
- memset(&sWEPCmd, 0 ,sizeof(SCmdSetWEP));
- if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
- result = -EFAULT;
+ if (sWEPCmd.bEnableWep != TRUE) {
+ int uu;
+
+ pDevice->bEncryptionEnable = FALSE;
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ spin_lock_irq(&pDevice->lock);
+ for (uu = 0; uu < MAX_KEY_TABLE; uu++)
+ MACvDisableKeyEntry(pDevice, uu);
+ spin_unlock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable.\n");
break;
}
- if (sWEPCmd.bEnableWep != TRUE) {
- int uu;
-
- pDevice->bEncryptionEnable = FALSE;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- spin_lock_irq(&pDevice->lock);
- for (uu = 0; uu < MAX_KEY_TABLE; uu++)
- MACvDisableKeyEntry(pDevice, uu);
- spin_unlock_irq(&pDevice->lock);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable.\n");
- break;
- }
-
- for (ii = 0; ii < WLAN_WEP_NKEYS; ii ++) {
- if (sWEPCmd.bWepKeyAvailable[ii]) {
- if (ii == sWEPCmd.byKeyIndex)
-//2006-1207-01<Modify>by Einsn Liu
-// dwKeyIndex|= (1 << 31);
- dwKeyIndex=ii|(1 << 31);
- else
- dwKeyIndex = ii;
- spin_lock_irq(&pDevice->lock);
- KeybSetDefaultKey( pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- sWEPCmd.auWepKeyLength[ii],
- NULL,
- (PBYTE)&sWEPCmd.abyWepKey[ii][0],
- KEY_CTL_WEP
- );
- spin_unlock_irq(&pDevice->lock);
-
- }
- }
- pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
- pDevice->bTransmitKey = TRUE;
- pDevice->bEncryptionEnable = TRUE;
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
-
- break;
-
- case WLAN_CMD_GET_LINK:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_GET_LINK status. \n");
-
- memset(sLinkStatus.abySSID, 0 , WLAN_SSID_MAXLEN + 1);
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- sLinkStatus.wBSSType = ADHOC;
- else
- sLinkStatus.wBSSType = INFRA;
-
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
- sLinkStatus.byState = ADHOC_JOINTED;
- else
- sLinkStatus.byState = ADHOC_STARTED;
-
- sLinkStatus.uChannel = pMgmt->uCurrChannel;
- if (pDevice->bLinkPass == TRUE) {
- sLinkStatus.bLink = TRUE;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
- memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Link Success ! \n");
- }
- else {
- sLinkStatus.bLink = FALSE;
- }
- if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
+
+ for (ii = 0; ii < WLAN_WEP_NKEYS; ii++) {
+ if (sWEPCmd.bWepKeyAvailable[ii]) {
+ if (ii == sWEPCmd.byKeyIndex)
+ dwKeyIndex = ii | (1 << 31);
+ else
+ dwKeyIndex = ii;
+ spin_lock_irq(&pDevice->lock);
+ KeybSetDefaultKey(pDevice, &(pDevice->sKey),
+ dwKeyIndex,
+ sWEPCmd.auWepKeyLength[ii],
+ NULL,
+ (PBYTE)&sWEPCmd.abyWepKey[ii][0],
+ KEY_CTL_WEP);
+ spin_unlock_irq(&pDevice->lock);
+ }
+ }
+ pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
+ pDevice->bTransmitKey = TRUE;
+ pDevice->bEncryptionEnable = TRUE;
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ break;
+
+ case WLAN_CMD_GET_LINK:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_GET_LINK status.\n");
+
+ memset(sLinkStatus.abySSID, 0, WLAN_SSID_MAXLEN + 1);
+
+ if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
+ sLinkStatus.wBSSType = ADHOC;
+ else
+ sLinkStatus.wBSSType = INFRA;
+
+ if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
+ sLinkStatus.byState = ADHOC_JOINTED;
+ else
+ sLinkStatus.byState = ADHOC_STARTED;
+
+ sLinkStatus.uChannel = pMgmt->uCurrChannel;
+ if (pDevice->bLinkPass == TRUE) {
+ sLinkStatus.bLink = TRUE;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
+ memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
+ sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Link Success!\n");
+ } else {
+ sLinkStatus.bLink = FALSE;
+ sLinkStatus.uLinkRate = 0;
+ }
+ if (copy_to_user(pReq->data, &sLinkStatus,
+ sizeof(SCmdLinkStatus))) {
result = -EFAULT;
break;
}
+ break;
- break;
-
- case WLAN_CMD_GET_LISTLEN:
+ case WLAN_CMD_GET_LISTLEN:
cbListCount = 0;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- cbListCount++;
- }
- sList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (!pBSS->bActive)
+ continue;
+ cbListCount++;
+ }
+ sList.uItem = cbListCount;
+ if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
result = -EFAULT;
break;
}
- pReq->wResult = 0;
- break;
+ pReq->wResult = 0;
+ break;
- case WLAN_CMD_GET_LIST:
- if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
+ case WLAN_CMD_GET_LIST:
+ if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
result = -EFAULT;
break;
}
- pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
- if (pList == NULL) {
- result = -ENOMEM;
- break;
- }
+ pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
+ if (pList == NULL) {
+ result = -ENOMEM;
+ break;
+ }
pList->uItem = sList.uItem;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
- pBSS = &(pMgmt->sBSSList[jj]);
- if (pBSS->bActive) {
- pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
- pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
- pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
- RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
- pList->sBSSIDList[ii].uRSSI = (unsigned int) ldBm;
-// pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI;
- memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
- memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
- pList->sBSSIDList[ii].byNetType = INFRA;
- }
- else {
- pList->sBSSIDList[ii].byNetType = ADHOC;
- }
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
- pList->sBSSIDList[ii].bWEPOn = TRUE;
- }
- else {
- pList->sBSSIDList[ii].bWEPOn = FALSE;
- }
- ii ++;
- if (ii >= pList->uItem)
- break;
- }
- }
-
- if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
+ for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
+ pBSS = &(pMgmt->sBSSList[jj]);
+ if (pBSS->bActive) {
+ pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
+ pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
+ pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
+ RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
+ pList->sBSSIDList[ii].uRSSI = (unsigned int) ldBm;
+ /* pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI; */
+ memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
+ pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
+ memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
+ memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
+ if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
+ pList->sBSSIDList[ii].byNetType = INFRA;
+ } else {
+ pList->sBSSIDList[ii].byNetType = ADHOC;
+ }
+ if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
+ pList->sBSSIDList[ii].bWEPOn = TRUE;
+ } else {
+ pList->sBSSIDList[ii].bWEPOn = FALSE;
+ }
+ ii++;
+ if (ii >= pList->uItem)
+ break;
+ }
+ }
+
+ if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
result = -EFAULT;
break;
}
- kfree(pList);
- pReq->wResult = 0;
- break;
+ kfree(pList);
+ pReq->wResult = 0;
+ break;
- case WLAN_CMD_GET_MIB:
- if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
+ case WLAN_CMD_GET_MIB:
+ if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
result = -EFAULT;
break;
}
- break;
+ break;
- case WLAN_CMD_GET_STAT:
- if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
+ case WLAN_CMD_GET_STAT:
+ if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
result = -EFAULT;
break;
}
- break;
- case WLAN_CMD_STOP_MAC:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_STOP_MAC\n");
- // Todo xxxxxx
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == FALSE) {
- CARDbRadioPowerOff(pDevice);
- }
- pDevice->bLinkPass = FALSE;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
-// del_timer(&pDevice->sTimerCommand);
-// del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bCmdRunning = FALSE;
- spin_unlock_irq(&pDevice->lock);
-
- break;
-
- case WLAN_CMD_START_MAC:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_START_MAC\n");
- // Todo xxxxxxx
- if (pDevice->bRadioOff == TRUE)
- CARDbRadioPowerOn(pDevice);
- break;
+ break;
+ case WLAN_CMD_STOP_MAC:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_STOP_MAC\n");
+ /* Todo xxxxxx */
+ netif_stop_queue(pDevice->dev);
+ spin_lock_irq(&pDevice->lock);
+ if (pDevice->bRadioOff == FALSE) {
+ CARDbRadioPowerOff(pDevice);
+ }
+ pDevice->bLinkPass = FALSE;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ /* del_timer(&pDevice->sTimerCommand); */
+ /* del_timer(&pMgmt->sTimerSecondCallback); */
+ pDevice->bCmdRunning = FALSE;
+ spin_unlock_irq(&pDevice->lock);
+ break;
- case WLAN_CMD_SET_HOSTAPD:
+ case WLAN_CMD_START_MAC:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_START_MAC\n");
+ /* Todo xxxxxxx */
+ if (pDevice->bRadioOff == TRUE)
+ CARDbRadioPowerOn(pDevice);
+ break;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD\n");
+ case WLAN_CMD_SET_HOSTAPD:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- if (vt6656_hostap_set_hostapd(pDevice, 1, 1) == 0){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
- }
- else {
- result = -EFAULT;
- break;
+ if (vt6656_hostap_set_hostapd(pDevice, 1, 1) == 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
+ } else {
+ result = -EFAULT;
+ break;
}
- }
- else {
- vt6656_hostap_set_hostapd(pDevice, 0, 1);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
- }
-
- break;
-
- case WLAN_CMD_SET_HOSTAPD_STA:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD_STA\n");
+ } else {
+ vt6656_hostap_set_hostapd(pDevice, 0, 1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
+ }
+ break;
- break;
- case WLAN_CMD_SET_802_1X:
+ case WLAN_CMD_SET_HOSTAPD_STA:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOSTAPD_STA\n");
+ break;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_802_1X\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ case WLAN_CMD_SET_802_1X:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_802_1X\n");
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- pDevice->bEnable8021x = TRUE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable 802.1x\n");
- }
- else {
- pDevice->bEnable8021x = FALSE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable 802.1x\n");
- }
-
- break;
-
-
- case WLAN_CMD_SET_HOST_WEP:
+ pDevice->bEnable8021x = TRUE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable 802.1x\n");
+ } else {
+ pDevice->bEnable8021x = FALSE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable 802.1x\n");
+ }
+ break;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOST_WEP\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ case WLAN_CMD_SET_HOST_WEP:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_HOST_WEP\n");
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- pDevice->bEnableHostWEP = TRUE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HostWEP\n");
- }
- else {
- pDevice->bEnableHostWEP = FALSE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HostWEP\n");
- }
-
- break;
+ pDevice->bEnableHostWEP = TRUE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HostWEP\n");
+ } else {
+ pDevice->bEnableHostWEP = FALSE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HostWEP\n");
+ }
+ break;
- case WLAN_CMD_SET_WPA:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WPA\n");
+ case WLAN_CMD_SET_WPA:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_SET_WPA\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
+ if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
result = -EFAULT;
break;
}
if (sValue.dwValue == 1) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr,
- pDevice->dev->dev_addr,
- ETH_ALEN);
- pDevice->bWPADEVUp = TRUE;
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
- pDevice->bWPADEVUp = FALSE;
- }
-
- break;
-
- case WLAN_CMD_AP_START:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_AP_START\n");
- if (pDevice->bRadioOff == TRUE) {
- CARDbRadioPowerOn(pDevice);
- add_timer(&pMgmt->sTimerSecondCallback);
- }
- if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
+ memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr,
+ ETH_ALEN);
+ pDevice->bWPADEVUp = TRUE;
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
+ pDevice->bWPADEVUp = FALSE;
+ }
+ break;
+
+ case WLAN_CMD_AP_START:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_AP_START\n");
+ if (pDevice->bRadioOff == TRUE) {
+ CARDbRadioPowerOn(pDevice);
+ add_timer(&pMgmt->sTimerSecondCallback);
+ }
+ if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
result = -EFAULT;
break;
}
- if (sStartAPCmd.wBSSType == AP) {
- pMgmt->eConfigMode = WMAC_CONFIG_AP;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to AP mode\n");
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct BSS type not set to AP mode\n");
+ if (sStartAPCmd.wBSSType == AP) {
+ pMgmt->eConfigMode = WMAC_CONFIG_AP;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to AP mode\n");
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct BSS type not set to AP mode\n");
result = -EFAULT;
break;
- }
-
+ }
- if (sStartAPCmd.wBBPType == PHY80211g) {
- pMgmt->byAPBBType = PHY_TYPE_11G;
- }
- else if (sStartAPCmd.wBBPType == PHY80211a) {
- pMgmt->byAPBBType = PHY_TYPE_11A;
- }
- else {
- pMgmt->byAPBBType = PHY_TYPE_11B;
- }
+ if (sStartAPCmd.wBBPType == PHY80211g) {
+ pMgmt->byAPBBType = PHY_TYPE_11G;
+ } else if (sStartAPCmd.wBBPType == PHY80211a) {
+ pMgmt->byAPBBType = PHY_TYPE_11A;
+ } else {
+ pMgmt->byAPBBType = PHY_TYPE_11B;
+ }
- pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
+ if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
+ return -EINVAL;
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- if ((sStartAPCmd.uChannel > 0)&&(sStartAPCmd.uChannel <= 14))
- pDevice->uChannel = sStartAPCmd.uChannel;
-
- if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
- pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
- else
- pMgmt->wIBSSBeaconPeriod = 100;
-
- if (sStartAPCmd.bShareKeyAuth == TRUE){
- pMgmt->bShareKeyAlgorithm = TRUE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
- }
- else {
- pMgmt->bShareKeyAlgorithm = FALSE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
- }
- memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
-
- if (sStartAPCmd.byBasicRate & BIT3) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- pMgmt->abyIBSSSuppRates[5] |= BIT7;
- }else if (sStartAPCmd.byBasicRate & BIT2) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- }else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- }else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- }else {
- //default 1,2M
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- }
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Support Rate= %x %x %x %x\n",
- pMgmt->abyIBSSSuppRates[2],
- pMgmt->abyIBSSSuppRates[3],
- pMgmt->abyIBSSSuppRates[4],
- pMgmt->abyIBSSSuppRates[5]
- );
-
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_GET_NODE_CNT:
+ if ((sStartAPCmd.uChannel > 0) && (sStartAPCmd.uChannel <= 14))
+ pDevice->uChannel = sStartAPCmd.uChannel;
+
+ if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
+ pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
+ else
+ pMgmt->wIBSSBeaconPeriod = 100;
+
+ if (sStartAPCmd.bShareKeyAuth == TRUE) {
+ pMgmt->bShareKeyAlgorithm = TRUE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key\n");
+ } else {
+ pMgmt->bShareKeyAlgorithm = FALSE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System\n");
+ }
+ memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
+
+ if (sStartAPCmd.byBasicRate & BIT3) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ pMgmt->abyIBSSSuppRates[4] |= BIT7;
+ pMgmt->abyIBSSSuppRates[5] |= BIT7;
+ } else if (sStartAPCmd.byBasicRate & BIT2) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ pMgmt->abyIBSSSuppRates[4] |= BIT7;
+ } else if (sStartAPCmd.byBasicRate & BIT1) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ } else if (sStartAPCmd.byBasicRate & BIT1) {
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ } else {
+ /* default 1,2M */
+ pMgmt->abyIBSSSuppRates[2] |= BIT7;
+ pMgmt->abyIBSSSuppRates[3] |= BIT7;
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Support Rate= %x %x %x %x\n",
+ pMgmt->abyIBSSSuppRates[2],
+ pMgmt->abyIBSSSuppRates[3],
+ pMgmt->abyIBSSSuppRates[4],
+ pMgmt->abyIBSSSuppRates[5]);
+
+ netif_stop_queue(pDevice->dev);
+ spin_lock_irq(&pDevice->lock);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
+ spin_unlock_irq(&pDevice->lock);
+ break;
+
+ case WLAN_CMD_GET_NODE_CNT:
cbListCount = 0;
pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (!pNode->bActive)
- continue;
- cbListCount++;
- }
-
- sNodeList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
+ for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
+ pNode = &(pMgmt->sNodeDBTable[ii]);
+ if (!pNode->bActive)
+ continue;
+ cbListCount++;
+ }
+
+ sNodeList.uItem = cbListCount;
+ if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
result = -EFAULT;
break;
}
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_NODE_LIST:
+ pReq->wResult = 0;
+ break;
- if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
+ case WLAN_CMD_GET_NODE_LIST:
+ if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
result = -EFAULT;
break;
}
- pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
- if (pNodeList == NULL) {
- result = -ENOMEM;
- break;
- }
+ pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
+ if (pNodeList == NULL) {
+ result = -ENOMEM;
+ break;
+ }
pNodeList->uItem = sNodeList.uItem;
pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (pNode->bActive) {
- pNodeList->sNodeList[jj].wAID = pNode->wAID;
- memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
- pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
- pNodeList->sNodeList[jj].wInActiveCount = (WORD)pNode->uInActiveCount;
- pNodeList->sNodeList[jj].wEnQueueCnt = (WORD)pNode->wEnQueueCnt;
- pNodeList->sNodeList[jj].wFlags = (WORD)pNode->dwFlags;
- pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
- pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
- pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
- memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- pNodeList->sNodeList[jj].abyWepKey[0],
- pNodeList->sNodeList[jj].abyWepKey[1],
- pNodeList->sNodeList[jj].abyWepKey[2],
- pNodeList->sNodeList[jj].abyWepKey[3],
- pNodeList->sNodeList[jj].abyWepKey[4]
- );
- pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
- pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
- pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
- pNodeList->sNodeList[jj].wFailureRatio = (WORD)pNode->uFailureRatio;
- jj ++;
- if (jj >= pNodeList->uItem)
- break;
- }
- }
- if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
+ for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
+ pNode = &(pMgmt->sNodeDBTable[ii]);
+ if (pNode->bActive) {
+ pNodeList->sNodeList[jj].wAID = pNode->wAID;
+ memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
+ pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
+ pNodeList->sNodeList[jj].wInActiveCount = (WORD)pNode->uInActiveCount;
+ pNodeList->sNodeList[jj].wEnQueueCnt = (WORD)pNode->wEnQueueCnt;
+ pNodeList->sNodeList[jj].wFlags = (WORD)pNode->dwFlags;
+ pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
+ pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
+ pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
+ memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
+ pNodeList->sNodeList[jj].abyWepKey[0],
+ pNodeList->sNodeList[jj].abyWepKey[1],
+ pNodeList->sNodeList[jj].abyWepKey[2],
+ pNodeList->sNodeList[jj].abyWepKey[3],
+ pNodeList->sNodeList[jj].abyWepKey[4]);
+ pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
+ pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
+ pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
+ pNodeList->sNodeList[jj].wFailureRatio = (WORD)pNode->uFailureRatio;
+ jj++;
+ if (jj >= pNodeList->uItem)
+ break;
+ }
+ }
+ if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
result = -EFAULT;
break;
}
- kfree(pNodeList);
- pReq->wResult = 0;
- break;
-
- case 0xFF:
- memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname));
- wpa_Result.proto = 0;
- wpa_Result.key_mgmt = 0;
- wpa_Result.eap_type = 0;
- wpa_Result.authenticated = FALSE;
- pDevice->fWPA_Authened = FALSE;
- if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
- result = -EFAULT;
+ kfree(pNodeList);
+ pReq->wResult = 0;
+ break;
+
+ case 0xFF:
+ memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
+ wpa_Result.proto = 0;
+ wpa_Result.key_mgmt = 0;
+ wpa_Result.eap_type = 0;
+ wpa_Result.authenticated = FALSE;
+ pDevice->fWPA_Authened = FALSE;
+ if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
+ result = -EFAULT;
break;
}
-//DavidWang for some AP maybe good authenticate
- if(wpa_Result.key_mgmt==0x20)
- pMgmt->Cisco_cckm =1;
- else
- pMgmt->Cisco_cckm =0;
-
-
-if(wpa_Result.authenticated==TRUE) {
- {
- union iwreq_data wrqu;
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
- wrqu.data.length =pItemSSID->len;
- wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
- }
- pDevice->fWPA_Authened = TRUE; //is successful peer to wpa_Result.authenticated?
-}
+ /* for some AP maybe good authenticate */
+ if (wpa_Result.key_mgmt == 0x20)
+ pMgmt->Cisco_cckm = 1;
+ else
+ pMgmt->Cisco_cckm = 0;
+
+ if (wpa_Result.authenticated == TRUE) {
+ {
+ union iwreq_data wrqu;
+
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
+ wrqu.data.length = pItemSSID->len;
+ wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
+ }
- //printk("get private wpa_supplicant announce WPA SM\n");
- //printk("wpa-->ifname=%s\n",wpa_Result.ifname);
- //printk("wpa-->proto=%d\n",wpa_Result.proto);
- //printk("wpa-->key-mgmt=%d\n",wpa_Result.key_mgmt);
- //printk("wpa-->eap_type=%d\n",wpa_Result.eap_type);
- //printk("wpa-->authenticated is %s\n",(wpa_Result.authenticated==TRUE)?"TRUE":"FALSE");
+ pDevice->fWPA_Authened = TRUE; /* is successful peer to wpa_Result.authenticated? */
+ }
- pReq->wResult = 0;
- break;
+ pReq->wResult = 0;
+ break;
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Private command not support..\n");
- }
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Private command not support..\n");
+ }
- return result;
+ return result;
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e18efd43e3e..27521b69ce0 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -611,17 +611,10 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
// if exist SW network address, use SW network address.
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %02x-%02x-%02x=%02x-%02x-%02x\n",
- pDevice->abyCurrentNetAddr[0],
- pDevice->abyCurrentNetAddr[1],
- pDevice->abyCurrentNetAddr[2],
- pDevice->abyCurrentNetAddr[3],
- pDevice->abyCurrentNetAddr[4],
- pDevice->abyCurrentNetAddr[5]);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
+ pDevice->abyCurrentNetAddr);
}
-
-
// Set BB and packet type at the same time.
// Set Short Slot Time, xIFS, and RSPINF.
if (pDevice->byBBType == BB_TYPE_11A) {
@@ -753,7 +746,7 @@ static const struct net_device_ops device_netdev_ops = {
.ndo_do_ioctl = device_ioctl,
.ndo_get_stats = device_get_stats,
.ndo_start_xmit = device_xmit,
- .ndo_set_multicast_list = device_set_multi,
+ .ndo_set_rx_mode = device_set_multi,
};
static int __devinit
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index d67748f90b1..f08e2d15c7b 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -2477,14 +2477,8 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
// AP mode BSSID = MAC addr
memcpy(pMgmt->abyCurrBSSID, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"AP beacon created BSSID:%02x-%02x-%02x-%02x-%02x-%02x \n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]
- );
+ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"AP beacon created BSSID:"
+ "%pM\n", pMgmt->abyCurrBSSID);
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
@@ -2506,14 +2500,8 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
pMgmt->abyCurrBSSID[0] |= IEEE_ADDR_UNIVERSAL;
- DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"Adhoc beacon created bssid:%02x-%02x-%02x-%02x-%02x-%02x \n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]
- );
+ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"Adhoc beacon created bssid:"
+ "%pM\n", pMgmt->abyCurrBSSID);
}
// set BSSID filter
@@ -2878,14 +2866,8 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join IBSS ok:%02x-%02x-%02x-%02x-%02x-%02x \n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]
- );
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join IBSS ok:%pM\n",
+ pMgmt->abyCurrBSSID);
// Preamble type auto-switch: if AP can receive short-preamble cap,
// and if registry setting is short preamble we can turn on too.
@@ -2983,13 +2965,8 @@ s_vMgrSynchBSS (
memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, 6);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:set CurrBSSID address = %02x-%02x-%02x=%02x-%02x-%02x\n",
- pMgmt->abyCurrBSSID[0],
- pMgmt->abyCurrBSSID[1],
- pMgmt->abyCurrBSSID[2],
- pMgmt->abyCurrBSSID[3],
- pMgmt->abyCurrBSSID[4],
- pMgmt->abyCurrBSSID[5]);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:set CurrBSSID address = "
+ "%pM\n", pMgmt->abyCurrBSSID);
if (pCurr->eNetworkTypeInUse == PHY_TYPE_11A) {
if ((pDevice->eConfigPHYMode == PHY_TYPE_11A) ||
@@ -4334,14 +4311,8 @@ s_vMgrRxProbeRequest(
sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
vMgrDecodeProbeRequest(&sFrame);
/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request rx:MAC addr:%02x-%02x-%02x=%02x-%02x-%02x \n",
- sFrame.pHdr->sA3.abyAddr2[0],
- sFrame.pHdr->sA3.abyAddr2[1],
- sFrame.pHdr->sA3.abyAddr2[2],
- sFrame.pHdr->sA3.abyAddr2[3],
- sFrame.pHdr->sA3.abyAddr2[4],
- sFrame.pHdr->sA3.abyAddr2[5]
- );
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request rx:MAC addr:%pM\n",
+ sFrame.pHdr->sA3.abyAddr2);
*/
if (sFrame.pSSID->len != 0) {
if (sFrame.pSSID->len != ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len)
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 9216df01829..2fa4f845a75 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -229,6 +229,9 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
return ret;
}
+ if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
+ return -EINVAL;
+
spin_unlock_irq(&pDevice->lock);
if(param->u.wpa_key.key && fcpfkernel) {
memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
@@ -269,6 +272,10 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
return ret;
}
+
+ if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
+ return -EINVAL;
+
spin_unlock_irq(&pDevice->lock);
if(param->u.wpa_key.seq && fcpfkernel) {
memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
@@ -772,9 +779,14 @@ static int wpa_set_associate(PSDevice pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); //Davidwang
- if (param->u.wpa_associate.wpa_ie &&
- copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie) {
+ if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+ return -EINVAL;
+
+ if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie,
+ param->u.wpa_associate.wpa_ie_len))
+ return -EFAULT;
+ }
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index c5a07fbe10a..77a3fff708c 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -24,7 +24,7 @@
#define AG_CONST 0.6072529350
#define FIXED(X) ((s32)((X) * 32768.0))
-#define DEG2RAD(X) 0.017453 * (X)
+#define DEG2RAD(X) (0.017453 * (X))
static const s32 Angles[] = {
FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)), FIXED(DEG2RAD(14.0362)),
@@ -51,7 +51,7 @@ s32 _s13_to_s32(u32 data)
if ((data & BIT(12)) != 0)
val |= 0xFFFFF000;
- return ((s32) val);
+ return (s32) val;
}
u32 _s32_to_s13(s32 data)
@@ -184,7 +184,7 @@ s32 _floor(s32 n)
else
n -= 5;
- return (n/10);
+ return n/10;
}
/****************************************************************************/
@@ -455,7 +455,7 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6);
/* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A);
- /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
+ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C);
/* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
@@ -577,7 +577,7 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6);
/* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A);
- /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
+ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C);
/* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
@@ -974,7 +974,7 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
phy_set_rf_data(phw_data, 11, (11<<24)|0x19BDD6); /* 20060612.1.a 0x1905D6); */
/* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
phy_set_rf_data(phw_data, 5, (5<<24)|0x24C60A); /* 0x24C60A (high temperature) */
- /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
+ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
phy_set_rf_data(phw_data, 6, (6<<24)|0x34880C); /* 20060612.1.a 0x06890C); */
/* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3724e1e67ec..a2e8bd452ed 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -277,7 +277,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
-static u64 wbsoft_get_tsf(struct ieee80211_hw *dev)
+static u64 wbsoft_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif)
{
printk("wbsoft_get_tsf called\n");
return 0;
diff --git a/drivers/staging/wlags49_h2/Makefile b/drivers/staging/wlags49_h2/Makefile
index e604ebd5aeb..31e1d89a384 100644
--- a/drivers/staging/wlags49_h2/Makefile
+++ b/drivers/staging/wlags49_h2/Makefile
@@ -11,11 +11,9 @@
#
# If you want to build AP support (untested), comment out -DSTA_ONLY
-INSTALLDIR := /lib/modules/$(shell uname -r)/kernel/drivers/net/wireless
ccflags-y := -I$(KERNELDIR)/include
ccflags-y += -I$(src) \
-DBUS_PCMCIA \
- -DUSE_WPA \
-DUSE_WEXT \
-DSTA_ONLY \
-DWVLAN_49 \
@@ -39,9 +37,6 @@ $(WLNAME)-y += ap_h25.o
endif
endif
-# If KERNELRELEASE is defined, we've been invoked from the
-# kernel build system and can use its language.
-ifneq ($(KERNELRELEASE),)
obj-m += $(WLNAME).o
@@ -58,24 +53,3 @@ $(WLNAME)-y += wl_profile.o \
dhf.o
$(WLNAME)-$(CONFIG_SYSFS) += wl_sysfs.o
-
-# Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-else
- KERNELDIR ?= /lib/modules/$(shell uname -r)/build
- PWD := $(shell pwd)
-
-default:
- $(MAKE) -C $(KERNELDIR) M=$(PWD) modules
-endif
-
-clean:
- rm -fr *.o *.ko *.mod.c *.mod.o .*.*.cmd Module.symvers \
- Module.markers modules.order .tmp_versions
-
-install: default
- -rmmod $(WLNAME)
- install -d $(INSTALLDIR)
- install -m 0644 -o root -g root $(WLNAME).ko $(INSTALLDIR)
- /sbin/depmod -aq
-
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 2c3dd140a35..8d5dddf0805 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -129,11 +129,15 @@
#define _LEAVE_STR "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
-#define _DBG_ENTER(A) DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), ++DBG_LEVEL(A), _ENTER_STR, __FUNC__)
-#define _DBG_LEAVE(A) DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), DBG_LEVEL(A)--, _LEAVE_STR, __FUNC__)
+#define _DBG_ENTER(A) \
+ DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), ++DBG_LEVEL(A), \
+ _ENTER_STR, __func__)
+#define _DBG_LEAVE(A) \
+ DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), DBG_LEVEL(A)--, \
+ _LEAVE_STR, __func__)
-#define DBG_FUNC(F) static const char *__FUNC__ = F;
+#define DBG_FUNC(F)
#define DBG_ENTER(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
_DBG_ENTER(A); }
@@ -145,29 +149,33 @@
DBG_PRINT(" %s -- "F"\n", N, S); }
-#define DBG_ERROR(A, S...) {if (DBG_FLAGS(A) & DBG_ERROR_ON) {\
- DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __FUNC__);\
- DBG_PRINTC(S); \
- DBG_TRAP; \
- } \
- }
+#define DBG_ERROR(A, S...) do { \
+ if (DBG_FLAGS(A) & DBG_ERROR_ON) { \
+ DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __func__); \
+ DBG_PRINTC(S); \
+ DBG_TRAP; \
+ } } while (0)
-#define DBG_WARNING(A, S...) {if (DBG_FLAGS(A) & DBG_WARNING_ON) {\
- DBG_PRINT("%s:WARNING:%s ", DBG_NAME(A), __FUNC__);\
- DBG_PRINTC(S); } }
+#define DBG_WARNING(A, S...) do { \
+ if (DBG_FLAGS(A) & DBG_WARNING_ON) { \
+ DBG_PRINT("%s:WARNING:%s ", DBG_NAME(A), __func__); \
+ DBG_PRINTC(S); \
+ } } while (0)
-#define DBG_NOTICE(A, S...) {if (DBG_FLAGS(A) & DBG_NOTICE_ON) {\
- DBG_PRINT("%s:NOTICE:%s ", DBG_NAME(A), __FUNC__);\
- DBG_PRINTC(S); \
- } \
- }
+#define DBG_NOTICE(A, S...) do { \
+ if (DBG_FLAGS(A) & DBG_NOTICE_ON) { \
+ DBG_PRINT("%s:NOTICE:%s ", DBG_NAME(A), __func__); \
+ DBG_PRINTC(S); \
+ } } while (0)
-#define DBG_TRACE(A, S...) do {if (DBG_FLAGS(A) & DBG_TRACE_ON) {\
- DBG_PRINT("%s:%s ", DBG_NAME(A), __FUNC__);\
- DBG_PRINTC(S); } } while (0)
+#define DBG_TRACE(A, S...) do { \
+ if (DBG_FLAGS(A) & DBG_TRACE_ON) { \
+ DBG_PRINT("%s:%s ", DBG_NAME(A), __func__); \
+ DBG_PRINTC(S); \
+ } } while (0)
#define DBG_RX(A, S...) {if (DBG_FLAGS(A) & DBG_RX_ON) {\
@@ -181,13 +189,12 @@
DBG_PRINT(S); } }
-#define DBG_ASSERT(C) { \
- if (!(C)) {\
- DBG_PRINT("ASSERT(%s) -- %s#%d (%s)\n", \
- #C, __FILE__, __LINE__, __FUNC__); \
- DBG_TRAP; \
- } \
- }
+#define DBG_ASSERT(C) do { \
+ if (!(C)) { \
+ DBG_PRINT("ASSERT(%s) -- %s#%d (%s)\n", \
+ #C, __FILE__, __LINE__, __func__); \
+ DBG_TRAP; \
+ } } while (0)
typedef struct {
char *dbgName;
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index a73317ef935..7dc176a95aa 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -1,97 +1,96 @@
-// vim:tw=110:ts=4:
/************************************************************************************************************
-*
-* FILE : HCF.C
-*
-* DATE : $Date: 2004/08/05 11:47:10 $ $Revision: 1.10 $
-* Original: 2004/06/02 10:22:22 Revision: 1.85 Tag: hcf7_t20040602_01
-* Original: 2004/04/15 09:24:41 Revision: 1.63 Tag: hcf7_t7_20040415_01
-* Original: 2004/04/13 14:22:44 Revision: 1.62 Tag: t7_20040413_01
-* Original: 2004/04/01 15:32:55 Revision: 1.59 Tag: t7_20040401_01
-* Original: 2004/03/10 15:39:27 Revision: 1.55 Tag: t20040310_01
-* Original: 2004/03/04 11:03:37 Revision: 1.53 Tag: t20040304_01
-* Original: 2004/03/02 14:51:21 Revision: 1.50 Tag: t20040302_03
-* Original: 2004/02/24 13:00:27 Revision: 1.43 Tag: t20040224_01
-* Original: 2004/02/19 10:57:25 Revision: 1.39 Tag: t20040219_01
-*
-* AUTHOR : Nico Valster
-*
-* SPECIFICATION: ........
-*
-* DESCRIPTION : HCF Routines for Hermes-II (callable via the Wireless Connection I/F or WCI)
-* Local Support Routines for above procedures
-*
-* Customizable via HCFCFG.H, which is included by HCF.H
-*
-*************************************************************************************************************
-*
-*
-* SOFTWARE LICENSE
-*
-* This software is provided subject to the following terms and conditions,
-* which you should read carefully before using the software. Using this
-* software indicates your acceptance of these terms and conditions. If you do
-* not agree with these terms and conditions, do not use the software.
-*
-* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
-* All rights reserved.
-*
-* Redistribution and use in source or binary forms, with or without
-* modifications, are permitted provided that the following conditions are met:
-*
-* . Redistributions of source code must retain the above copyright notice, this
-* list of conditions and the following Disclaimer as comments in the code as
-* well as in the documentation and/or other materials provided with the
-* distribution.
-*
-* . Redistributions in binary form must reproduce the above copyright notice,
-* this list of conditions and the following Disclaimer in the documentation
-* and/or other materials provided with the distribution.
-*
-* . Neither the name of Agere Systems Inc. nor the names of the contributors
-* may be used to endorse or promote products derived from this software
-* without specific prior written permission.
-*
-* Disclaimer
-*
-* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
-* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
-* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
-* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
-* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
-* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-* DAMAGE.
-*
-*
-************************************************************************************************************/
+ *
+ * FILE : HCF.C
+ *
+ * DATE : $Date: 2004/08/05 11:47:10 $ $Revision: 1.10 $
+ * Original: 2004/06/02 10:22:22 Revision: 1.85 Tag: hcf7_t20040602_01
+ * Original: 2004/04/15 09:24:41 Revision: 1.63 Tag: hcf7_t7_20040415_01
+ * Original: 2004/04/13 14:22:44 Revision: 1.62 Tag: t7_20040413_01
+ * Original: 2004/04/01 15:32:55 Revision: 1.59 Tag: t7_20040401_01
+ * Original: 2004/03/10 15:39:27 Revision: 1.55 Tag: t20040310_01
+ * Original: 2004/03/04 11:03:37 Revision: 1.53 Tag: t20040304_01
+ * Original: 2004/03/02 14:51:21 Revision: 1.50 Tag: t20040302_03
+ * Original: 2004/02/24 13:00:27 Revision: 1.43 Tag: t20040224_01
+ * Original: 2004/02/19 10:57:25 Revision: 1.39 Tag: t20040219_01
+ *
+ * AUTHOR : Nico Valster
+ *
+ * SPECIFICATION: ........
+ *
+ * DESCRIPTION : HCF Routines for Hermes-II (callable via the Wireless Connection I/F or WCI)
+ * Local Support Routines for above procedures
+ *
+ * Customizable via HCFCFG.H, which is included by HCF.H
+ *
+ *************************************************************************************************************
+ *
+ *
+ * SOFTWARE LICENSE
+ *
+ * This software is provided subject to the following terms and conditions,
+ * which you should read carefully before using the software. Using this
+ * software indicates your acceptance of these terms and conditions. If you do
+ * not agree with these terms and conditions, do not use the software.
+ *
+ * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * All rights reserved.
+ *
+ * Redistribution and use in source or binary forms, with or without
+ * modifications, are permitted provided that the following conditions are met:
+ *
+ * . Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following Disclaimer as comments in the code as
+ * well as in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * . Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following Disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * . Neither the name of Agere Systems Inc. nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Disclaimer
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
+ * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
+ * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ *
+ ************************************************************************************************************/
/************************************************************************************************************
-**
-** Implementation Notes
-**
-* - a leading marker of //! is used. The purpose of such a sequence is to help to understand the flow
-* An example is: //!rc = HCF_SUCCESS;
-* if this is superfluous because rc is already guaranteed to be 0 but it shows to the (maintenance)
-* programmer it is an intentional omission at the place where someone could consider it most appropriate at
-* first glance
-* - using near pointers in a model where ss!=ds is an invitation for disaster, so be aware of how you specify
-* your model and how you define variables which are used at interrupt time
-* - remember that sign extension on 32 bit platforms may cause problems unless code is carefully constructed,
-* e.g. use "(hcf_16)~foo" rather than "~foo"
-*
-************************************************************************************************************/
+ **
+ ** Implementation Notes
+ **
+ * - a leading marker of //! is used. The purpose of such a sequence is to help to understand the flow
+ * An example is: //!rc = HCF_SUCCESS;
+ * if this is superfluous because rc is already guaranteed to be 0 but it shows to the (maintenance)
+ * programmer it is an intentional omission at the place where someone could consider it most appropriate at
+ * first glance
+ * - using near pointers in a model where ss!=ds is an invitation for disaster, so be aware of how you specify
+ * your model and how you define variables which are used at interrupt time
+ * - remember that sign extension on 32 bit platforms may cause problems unless code is carefully constructed,
+ * e.g. use "(hcf_16)~foo" rather than "~foo"
+ *
+ ************************************************************************************************************/
-#include "hcf.h" // HCF and MSF common include file
-#include "hcfdef.h" // HCF specific include file
-#include "mmd.h" // MoreModularDriver common include file
+#include "hcf.h" // HCF and MSF common include file
+#include "hcfdef.h" // HCF specific include file
+#include "mmd.h" // MoreModularDriver common include file
#include <linux/kernel.h>
#if ! defined offsetof
@@ -102,56 +101,52 @@
/***********************************************************************************************************/
/*************************************** PROTOTYPES ******************************************************/
/***********************************************************************************************************/
-HCF_STATIC int cmd_exe( IFBP ifbp, hcf_16 cmd_code, hcf_16 par_0 );
-HCF_STATIC int init( IFBP ifbp );
-HCF_STATIC int put_info( IFBP ifbp, LTVP ltvp );
-#if (HCF_EXT) & HCF_EXT_MB
-HCF_STATIC int put_info_mb( IFBP ifbp, CFG_MB_INFO_STRCT FAR * ltvp );
-#endif // HCF_EXT_MB
+HCF_STATIC int cmd_exe( IFBP ifbp, hcf_16 cmd_code, hcf_16 par_0 );
+HCF_STATIC int init( IFBP ifbp );
+HCF_STATIC int put_info( IFBP ifbp, LTVP ltvp );
+HCF_STATIC int put_info_mb( IFBP ifbp, CFG_MB_INFO_STRCT FAR * ltvp );
#if (HCF_TYPE) & HCF_TYPE_WPA
-HCF_STATIC void calc_mic( hcf_32* p, hcf_32 M );
-void calc_mic_rx_frag( IFBP ifbp, wci_bufp p, int len );
-void calc_mic_tx_frag( IFBP ifbp, wci_bufp p, int len );
-HCF_STATIC int check_mic( IFBP ifbp );
+HCF_STATIC void calc_mic( hcf_32* p, hcf_32 M );
+void calc_mic_rx_frag( IFBP ifbp, wci_bufp p, int len );
+void calc_mic_tx_frag( IFBP ifbp, wci_bufp p, int len );
+HCF_STATIC int check_mic( IFBP ifbp );
#endif // HCF_TYPE_WPA
-HCF_STATIC void calibrate( IFBP ifbp );
-HCF_STATIC int cmd_cmpl( IFBP ifbp );
-HCF_STATIC hcf_16 get_fid( IFBP ifbp );
-HCF_STATIC void isr_info( IFBP ifbp );
+HCF_STATIC void calibrate( IFBP ifbp );
+HCF_STATIC int cmd_cmpl( IFBP ifbp );
+HCF_STATIC hcf_16 get_fid( IFBP ifbp );
+HCF_STATIC void isr_info( IFBP ifbp );
#if HCF_DMA
-HCF_STATIC DESC_STRCT* get_frame_lst(IFBP ifbp, int tx_rx_flag);
+HCF_STATIC DESC_STRCT* get_frame_lst(IFBP ifbp, int tx_rx_flag);
#endif // HCF_DMA
-HCF_STATIC void get_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) ); //char*, byte count (usually even)
+HCF_STATIC void get_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) ); //char*, byte count (usually even)
#if HCF_DMA
-HCF_STATIC void put_frame_lst( IFBP ifbp, DESC_STRCT *descp, int tx_rx_flag );
+HCF_STATIC void put_frame_lst( IFBP ifbp, DESC_STRCT *descp, int tx_rx_flag );
#endif // HCF_DMA
-HCF_STATIC void put_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) );
-HCF_STATIC void put_frag_finalize( IFBP ifbp );
-HCF_STATIC int setup_bap( IFBP ifbp, hcf_16 fid, int offset, int type );
+HCF_STATIC void put_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) );
+HCF_STATIC void put_frag_finalize( IFBP ifbp );
+HCF_STATIC int setup_bap( IFBP ifbp, hcf_16 fid, int offset, int type );
#if (HCF_ASSERT) & HCF_ASSERT_PRINTF
static int fw_printf(IFBP ifbp, CFG_FW_PRINTF_STRCT FAR *ltvp);
#endif // HCF_ASSERT_PRINTF
-HCF_STATIC int download( IFBP ifbp, CFG_PROG_STRCT FAR *ltvp );
-#if (HCF_ENCAP) & HCF_ENC
-HCF_STATIC hcf_8 hcf_encap( wci_bufp type );
-#endif // HCF_ENCAP
-HCF_STATIC hcf_8 null_addr[4] = { 0, 0, 0, 0 };
-#if ! defined IN_PORT_WORD //replace I/O Macros with logging facility
+HCF_STATIC int download( IFBP ifbp, CFG_PROG_STRCT FAR *ltvp );
+HCF_STATIC hcf_8 hcf_encap( wci_bufp type );
+HCF_STATIC hcf_8 null_addr[4] = { 0, 0, 0, 0 };
+#if ! defined IN_PORT_WORD //replace I/O Macros with logging facility
extern FILE *log_file;
-#define IN_PORT_WORD(port) in_port_word( (hcf_io)(port) )
+#define IN_PORT_WORD(port) in_port_word( (hcf_io)(port) )
static hcf_16 in_port_word( hcf_io port ) {
-hcf_16 i = (hcf_16)_inpw( port );
+ hcf_16 i = (hcf_16)_inpw( port );
if ( log_file ) {
fprintf( log_file, "\nR %2.2x %4.4x", (port)&0xFF, i);
}
return i;
} // in_port_word
-#define OUT_PORT_WORD(port, value) out_port_word( (hcf_io)(port), (hcf_16)(value) )
+#define OUT_PORT_WORD(port, value) out_port_word( (hcf_io)(port), (hcf_16)(value) )
static void out_port_word( hcf_io port, hcf_16 value ) {
_outpw( port, value );
@@ -160,12 +155,12 @@ static void out_port_word( hcf_io port, hcf_16 value ) {
}
}
-void IN_PORT_STRING_32( hcf_io prt, hcf_32 FAR * dst, int n) {
+void IN_PORT_STRING_32( hcf_io prt, hcf_32 FAR * dst, int n) {
int i = 0;
hcf_16 FAR * p;
if ( log_file ) {
fprintf( log_file, "\nread string_32 length %04x (%04d) at port %02.2x to addr %lp",
- (hcf_16)n, (hcf_16)n, (hcf_16)(prt)&0xFF, dst);
+ (hcf_16)n, (hcf_16)n, (hcf_16)(prt)&0xFF, dst);
}
while ( n-- ) {
p = (hcf_16 FAR *)dst;
@@ -178,12 +173,12 @@ void IN_PORT_STRING_32( hcf_io prt, hcf_32 FAR * dst, int n) {
}
} // IN_PORT_STRING_32
-void IN_PORT_STRING_8_16( hcf_io prt, hcf_8 FAR * dst, int n) { //also handles byte alignment problems
- hcf_16 FAR * p = (hcf_16 FAR *)dst; //this needs more elaborate code in non-x86 platforms
+void IN_PORT_STRING_8_16( hcf_io prt, hcf_8 FAR * dst, int n) { //also handles byte alignment problems
+ hcf_16 FAR * p = (hcf_16 FAR *)dst; //this needs more elaborate code in non-x86 platforms
int i = 0;
if ( log_file ) {
fprintf( log_file, "\nread string_16 length %04x (%04d) at port %02.2x to addr %lp",
- (hcf_16)n, (hcf_16)n, (hcf_16)(prt)&0xFF, dst );
+ (hcf_16)n, (hcf_16)n, (hcf_16)(prt)&0xFF, dst );
}
while ( n-- ) {
*p =(hcf_16)_inpw( prt);
@@ -198,12 +193,12 @@ void IN_PORT_STRING_8_16( hcf_io prt, hcf_8 FAR * dst, int n) { //also handles b
}
} // IN_PORT_STRING_8_16
-void OUT_PORT_STRING_32( hcf_io prt, hcf_32 FAR * src, int n) {
+void OUT_PORT_STRING_32( hcf_io prt, hcf_32 FAR * src, int n) {
int i = 0;
hcf_16 FAR * p;
if ( log_file ) {
fprintf( log_file, "\nwrite string_32 length %04x (%04d) at port %02.2x",
- (hcf_16)n, (hcf_16)n, (hcf_16)(prt)&0xFF);
+ (hcf_16)n, (hcf_16)n, (hcf_16)(prt)&0xFF);
}
while ( n-- ) {
p = (hcf_16 FAR *)src;
@@ -216,8 +211,8 @@ void OUT_PORT_STRING_32( hcf_io prt, hcf_32 FAR * src, int n) {
}
} // OUT_PORT_STRING_32
-void OUT_PORT_STRING_8_16( hcf_io prt, hcf_8 FAR * src, int n) { //also handles byte alignment problems
- hcf_16 FAR * p = (hcf_16 FAR *)src; //this needs more elaborate code in non-x86 platforms
+void OUT_PORT_STRING_8_16( hcf_io prt, hcf_8 FAR * src, int n) { //also handles byte alignment problems
+ hcf_16 FAR * p = (hcf_16 FAR *)src; //this needs more elaborate code in non-x86 platforms
int i = 0;
if ( log_file ) {
fprintf( log_file, "\nwrite string_16 length %04x (%04d) at port %04x", n, n, (hcf_16)prt);
@@ -238,27 +233,25 @@ void OUT_PORT_STRING_8_16( hcf_io prt, hcf_8 FAR * src, int n) { //also handles
#endif // IN_PORT_WORD
/************************************************************************************************************
-******************************* D A T A D E F I N I T I O N S ********************************************
-************************************************************************************************************/
+ ******************************* D A T A D E F I N I T I O N S ********************************************
+ ************************************************************************************************************/
#if HCF_ASSERT
-IFBP BASED assert_ifbp = NULL; //to make asserts easily work under MMD and DHF
+IFBP BASED assert_ifbp = NULL; //to make asserts easily work under MMD and DHF
#endif // HCF_ASSERT
-#if HCF_ENCAP
/* SNAP header to be inserted in Ethernet-II frames */
-HCF_STATIC hcf_8 BASED snap_header[] = { 0xAA, 0xAA, 0x03, 0x00, 0x00, //5 bytes signature +
- 0 }; //1 byte protocol identifier
-#endif // HCF_ENCAP
+HCF_STATIC hcf_8 BASED snap_header[] = { 0xAA, 0xAA, 0x03, 0x00, 0x00, //5 bytes signature +
+ 0 }; //1 byte protocol identifier
#if (HCF_TYPE) & HCF_TYPE_WPA
-HCF_STATIC hcf_8 BASED mic_pad[8] = { 0x5A, 0, 0, 0, 0, 0, 0, 0 }; //MIC padding of message
+HCF_STATIC hcf_8 BASED mic_pad[8] = { 0x5A, 0, 0, 0, 0, 0, 0, 0 }; //MIC padding of message
#endif // HCF_TYPE_WPA
#if defined MSF_COMPONENT_ID
CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
- sizeof(cfg_drv_identity)/sizeof(hcf_16) - 1, //length of RID
- CFG_DRV_IDENTITY, // (0x0826)
+ sizeof(cfg_drv_identity)/sizeof(hcf_16) - 1, //length of RID
+ CFG_DRV_IDENTITY, // (0x0826)
MSF_COMPONENT_ID,
MSF_COMPONENT_VAR,
MSF_COMPONENT_MAJOR_VER,
@@ -266,186 +259,186 @@ CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
} ;
CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
- sizeof(cfg_drv_sup_range)/sizeof(hcf_16) - 1, //length of RID
- CFG_DRV_SUP_RANGE, // (0x0827)
+ sizeof(cfg_drv_sup_range)/sizeof(hcf_16) - 1, //length of RID
+ CFG_DRV_SUP_RANGE, // (0x0827)
COMP_ROLE_SUPL,
COMP_ID_DUI,
- {{ DUI_COMPAT_VAR,
- DUI_COMPAT_BOT,
- DUI_COMPAT_TOP
+ {{ DUI_COMPAT_VAR,
+ DUI_COMPAT_BOT,
+ DUI_COMPAT_TOP
}}
} ;
struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
- sizeof(cfg_drv_act_ranges_pri)/sizeof(hcf_16) - 1, //length of RID
- CFG_DRV_ACT_RANGES_PRI, // (0x0828)
+ sizeof(cfg_drv_act_ranges_pri)/sizeof(hcf_16) - 1, //length of RID
+ CFG_DRV_ACT_RANGES_PRI, // (0x0828)
COMP_ROLE_ACT,
COMP_ID_PRI,
{
- { 0, 0, 0 }, // HCF_PRI_VAR_1 not supported by HCF 7
- { 0, 0, 0 }, // HCF_PRI_VAR_2 not supported by HCF 7
- { 3, //var_rec[2] - Variant number
- CFG_DRV_ACT_RANGES_PRI_3_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_PRI_3_TOP // - Top Compatibility
- }
+ { 0, 0, 0 }, // HCF_PRI_VAR_1 not supported by HCF 7
+ { 0, 0, 0 }, // HCF_PRI_VAR_2 not supported by HCF 7
+ { 3, //var_rec[2] - Variant number
+ CFG_DRV_ACT_RANGES_PRI_3_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_PRI_3_TOP // - Top Compatibility
+ }
}
} ;
struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
- sizeof(cfg_drv_act_ranges_sta)/sizeof(hcf_16) - 1, //length of RID
- CFG_DRV_ACT_RANGES_STA, // (0x0829)
+ sizeof(cfg_drv_act_ranges_sta)/sizeof(hcf_16) - 1, //length of RID
+ CFG_DRV_ACT_RANGES_STA, // (0x0829)
COMP_ROLE_ACT,
COMP_ID_STA,
{
#if defined HCF_STA_VAR_1
- { 1, //var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_STA_1_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_STA_1_TOP // - Top Compatibility
- },
+ { 1, //var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_STA_1_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_STA_1_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_STA_VAR_1
#if defined HCF_STA_VAR_2
- { 2, //var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_STA_2_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_STA_2_TOP // - Top Compatibility
- },
+ { 2, //var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_STA_2_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_STA_2_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_STA_VAR_2
// For Native_USB (Not used!)
#if defined HCF_STA_VAR_3
- { 3, //var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_STA_3_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_STA_3_TOP // - Top Compatibility
- },
+ { 3, //var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_STA_3_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_STA_3_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_STA_VAR_3
// Warp
#if defined HCF_STA_VAR_4
- { 4, //var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_STA_4_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_STA_4_TOP // - Top Compatibility
- }
+ { 4, //var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_STA_4_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_STA_4_TOP // - Top Compatibility
+ }
#else
- { 0, 0, 0 }
+ { 0, 0, 0 }
#endif // HCF_STA_VAR_4
}
} ;
struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
- sizeof(cfg_drv_act_ranges_hsi)/sizeof(hcf_16) - 1, //length of RID
- CFG_DRV_ACT_RANGES_HSI, // (0x082A)
+ sizeof(cfg_drv_act_ranges_hsi)/sizeof(hcf_16) - 1, //length of RID
+ CFG_DRV_ACT_RANGES_HSI, // (0x082A)
COMP_ROLE_ACT,
COMP_ID_HSI,
{
-#if defined HCF_HSI_VAR_0 // Controlled deployment
- { 0, // var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_HSI_0_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_HSI_0_TOP // - Top Compatibility
- },
+#if defined HCF_HSI_VAR_0 // Controlled deployment
+ { 0, // var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_HSI_0_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_HSI_0_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_HSI_VAR_0
- { 0, 0, 0 }, // HCF_HSI_VAR_1 not supported by HCF 7
- { 0, 0, 0 }, // HCF_HSI_VAR_2 not supported by HCF 7
- { 0, 0, 0 }, // HCF_HSI_VAR_3 not supported by HCF 7
-#if defined HCF_HSI_VAR_4 // Hermes-II all types
- { 4, // var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_HSI_4_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_HSI_4_TOP // - Top Compatibility
- },
+ { 0, 0, 0 }, // HCF_HSI_VAR_1 not supported by HCF 7
+ { 0, 0, 0 }, // HCF_HSI_VAR_2 not supported by HCF 7
+ { 0, 0, 0 }, // HCF_HSI_VAR_3 not supported by HCF 7
+#if defined HCF_HSI_VAR_4 // Hermes-II all types
+ { 4, // var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_HSI_4_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_HSI_4_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_HSI_VAR_4
-#if defined HCF_HSI_VAR_5 // WARP Hermes-2.5
- { 5, // var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_HSI_5_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_HSI_5_TOP // - Top Compatibility
- }
+#if defined HCF_HSI_VAR_5 // WARP Hermes-2.5
+ { 5, // var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_HSI_5_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_HSI_5_TOP // - Top Compatibility
+ }
#else
- { 0, 0, 0 }
+ { 0, 0, 0 }
#endif // HCF_HSI_VAR_5
}
} ;
CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_apf = {
- sizeof(cfg_drv_act_ranges_apf)/sizeof(hcf_16) - 1, //length of RID
- CFG_DRV_ACT_RANGES_APF, // (0x082B)
+ sizeof(cfg_drv_act_ranges_apf)/sizeof(hcf_16) - 1, //length of RID
+ CFG_DRV_ACT_RANGES_APF, // (0x082B)
COMP_ROLE_ACT,
COMP_ID_APF,
{
-#if defined HCF_APF_VAR_1 //(Fake) Hermes-I
- { 1, //var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_APF_1_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_APF_1_TOP // - Top Compatibility
- },
+#if defined HCF_APF_VAR_1 //(Fake) Hermes-I
+ { 1, //var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_APF_1_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_APF_1_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_APF_VAR_1
-#if defined HCF_APF_VAR_2 //Hermes-II
- { 2, // var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_APF_2_BOTTOM, // - Bottom Compatibility
- CFG_DRV_ACT_RANGES_APF_2_TOP // - Top Compatibility
- },
+#if defined HCF_APF_VAR_2 //Hermes-II
+ { 2, // var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_APF_2_BOTTOM, // - Bottom Compatibility
+ CFG_DRV_ACT_RANGES_APF_2_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_APF_VAR_2
-#if defined HCF_APF_VAR_3 // Native_USB
- { 3, // var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_APF_3_BOTTOM, // - Bottom Compatibility !!!!!see note below!!!!!!!
- CFG_DRV_ACT_RANGES_APF_3_TOP // - Top Compatibility
- },
+#if defined HCF_APF_VAR_3 // Native_USB
+ { 3, // var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_APF_3_BOTTOM, // - Bottom Compatibility !!!!!see note below!!!!!!!
+ CFG_DRV_ACT_RANGES_APF_3_TOP // - Top Compatibility
+ },
#else
- { 0, 0, 0 },
+ { 0, 0, 0 },
#endif // HCF_APF_VAR_3
-#if defined HCF_APF_VAR_4 // WARP Hermes 2.5
- { 4, // var_rec[1] - Variant number
- CFG_DRV_ACT_RANGES_APF_4_BOTTOM, // - Bottom Compatibility !!!!!see note below!!!!!!!
- CFG_DRV_ACT_RANGES_APF_4_TOP // - Top Compatibility
- }
+#if defined HCF_APF_VAR_4 // WARP Hermes 2.5
+ { 4, // var_rec[1] - Variant number
+ CFG_DRV_ACT_RANGES_APF_4_BOTTOM, // - Bottom Compatibility !!!!!see note below!!!!!!!
+ CFG_DRV_ACT_RANGES_APF_4_TOP // - Top Compatibility
+ }
#else
- { 0, 0, 0 }
+ { 0, 0, 0 }
#endif // HCF_APF_VAR_4
}
} ;
#define HCF_VERSION TEXT( "HCF$Revision: 1.10 $" )
static struct /*CFG_HCF_OPT_STRCT*/ {
- hcf_16 len; //length of cfg_hcf_opt struct
- hcf_16 typ; //type 0x082C
- hcf_16 v0; //offset HCF_VERSION
- hcf_16 v1; // MSF_COMPONENT_ID
- hcf_16 v2; // HCF_ALIGN
- hcf_16 v3; // HCF_ASSERT
- hcf_16 v4; // HCF_BIG_ENDIAN
- hcf_16 v5; // /* HCF_DLV | HCF_DLNV */
- hcf_16 v6; // HCF_DMA
- hcf_16 v7; // HCF_ENCAP
- hcf_16 v8; // HCF_EXT
- hcf_16 v9; // HCF_INT_ON
- hcf_16 v10; // HCF_IO
- hcf_16 v11; // HCF_LEGACY
- hcf_16 v12; // HCF_MAX_LTV
- hcf_16 v13; // HCF_PROT_TIME
- hcf_16 v14; // HCF_SLEEP
- hcf_16 v15; // HCF_TALLIES
- hcf_16 v16; // HCF_TYPE
- hcf_16 v17; // HCF_NIC_TAL_CNT
- hcf_16 v18; // HCF_HCF_TAL_CNT
- hcf_16 v19; // offset tallies
- TCHAR val[sizeof(HCF_VERSION)];
+ hcf_16 len; //length of cfg_hcf_opt struct
+ hcf_16 typ; //type 0x082C
+ hcf_16 v0; //offset HCF_VERSION
+ hcf_16 v1; // MSF_COMPONENT_ID
+ hcf_16 v2; // HCF_ALIGN
+ hcf_16 v3; // HCF_ASSERT
+ hcf_16 v4; // HCF_BIG_ENDIAN
+ hcf_16 v5; // /* HCF_DLV | HCF_DLNV */
+ hcf_16 v6; // HCF_DMA
+ hcf_16 v7; // HCF_ENCAP
+ hcf_16 v8; // HCF_EXT
+ hcf_16 v9; // HCF_INT_ON
+ hcf_16 v10; // HCF_IO
+ hcf_16 v11; // HCF_LEGACY
+ hcf_16 v12; // HCF_MAX_LTV
+ hcf_16 v13; // HCF_PROT_TIME
+ hcf_16 v14; // HCF_SLEEP
+ hcf_16 v15; // HCF_TALLIES
+ hcf_16 v16; // HCF_TYPE
+ hcf_16 v17; // HCF_NIC_TAL_CNT
+ hcf_16 v18; // HCF_HCF_TAL_CNT
+ hcf_16 v19; // offset tallies
+ char val[sizeof(HCF_VERSION)];
} BASED cfg_hcf_opt = {
sizeof(cfg_hcf_opt)/sizeof(hcf_16) -1,
- CFG_HCF_OPT, // (0x082C)
+ CFG_HCF_OPT, // (0x082C)
( sizeof(cfg_hcf_opt) - sizeof(HCF_VERSION) - 4 )/sizeof(hcf_16),
#if defined MSF_COMPONENT_ID
MSF_COMPONENT_ID,
@@ -455,7 +448,7 @@ static struct /*CFG_HCF_OPT_STRCT*/ {
HCF_ALIGN,
HCF_ASSERT,
HCF_BIG_ENDIAN,
- 0, // /* HCF_DLV | HCF_DLNV*/,
+ 0, // /* HCF_DLV | HCF_DLNV*/,
HCF_DMA,
HCF_ENCAP,
HCF_EXT,
@@ -478,218 +471,199 @@ static struct /*CFG_HCF_OPT_STRCT*/ {
}; // cfg_hcf_opt
#endif // MSF_COMPONENT_ID
-#if defined HCF_TALLIES_EXTRA
- replaced by HCF_EXT_TALLIES_FW ;
-#endif // HCF_TALLIES_EXTRA
-
-#if defined MSF_COMPONENT_ID || (HCF_EXT) & HCF_EXT_MB
-#if (HCF_EXT) & HCF_EXT_MB
HCF_STATIC LTV_STRCT BASED cfg_null = { 1, CFG_NULL, {0} };
-#endif // HCF_EXT_MB
+
HCF_STATIC hcf_16* BASED xxxx[ ] = {
-#if (HCF_EXT) & HCF_EXT_MB
- &cfg_null.len, //CFG_NULL 0x0820
-#endif // HCF_EXT_MB
+ &cfg_null.len, //CFG_NULL 0x0820
#if defined MSF_COMPONENT_ID
- &cfg_drv_identity.len, //CFG_DRV_IDENTITY 0x0826
- &cfg_drv_sup_range.len, //CFG_DRV_SUP_RANGE 0x0827
- &cfg_drv_act_ranges_pri.len, //CFG_DRV_ACT_RANGES_PRI 0x0828
- &cfg_drv_act_ranges_sta.len, //CFG_DRV_ACT_RANGES_STA 0x0829
- &cfg_drv_act_ranges_hsi.len, //CFG_DRV_ACT_RANGES_HSI 0x082A
- &cfg_drv_act_ranges_apf.len, //CFG_DRV_ACT_RANGES_APF 0x082B
- &cfg_hcf_opt.len, //CFG_HCF_OPT 0x082C
- NULL, //IFB_PRIIdentity placeholder 0xFD02
- NULL, //IFB_PRISup placeholder 0xFD03
+ &cfg_drv_identity.len, //CFG_DRV_IDENTITY 0x0826
+ &cfg_drv_sup_range.len, //CFG_DRV_SUP_RANGE 0x0827
+ &cfg_drv_act_ranges_pri.len, //CFG_DRV_ACT_RANGES_PRI 0x0828
+ &cfg_drv_act_ranges_sta.len, //CFG_DRV_ACT_RANGES_STA 0x0829
+ &cfg_drv_act_ranges_hsi.len, //CFG_DRV_ACT_RANGES_HSI 0x082A
+ &cfg_drv_act_ranges_apf.len, //CFG_DRV_ACT_RANGES_APF 0x082B
+ &cfg_hcf_opt.len, //CFG_HCF_OPT 0x082C
+ NULL, //IFB_PRIIdentity placeholder 0xFD02
+ NULL, //IFB_PRISup placeholder 0xFD03
#endif // MSF_COMPONENT_ID
- NULL //endsentinel
- };
-#define xxxx_PRI_IDENTITY_OFFSET (ARRAY_SIZE(xxxx) - 3)
-
-#endif // MSF_COMPONENT_ID / HCF_EXT_MB
+ NULL //endsentinel
+};
+#define xxxx_PRI_IDENTITY_OFFSET (ARRAY_SIZE(xxxx) - 3)
/************************************************************************************************************
-************************** T O P L E V E L H C F R O U T I N E S **************************************
-************************************************************************************************************/
+ ************************** T O P L E V E L H C F R O U T I N E S **************************************
+ ************************************************************************************************************/
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.MODULE int hcf_action( IFBP ifbp, hcf_16 action )
-*.PURPOSE Changes the run-time Card behavior.
-* Performs Miscellanuous actions.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* action number identifying the type of change
-* - HCF_ACT_CCX_OFF disable CKIP
-* - HCF_ACT_CCX_ON enable CKIP
-* - HCF_ACT_INT_FORCE_ON enable interrupt generation by WaveLAN NIC
-* - HCF_ACT_INT_OFF disable interrupt generation by WaveLAN NIC
-* - HCF_ACT_INT_ON compensate 1 HCF_ACT_INT_OFF, enable interrupt generation if balance reached
-* - HCF_ACT_PRS_SCAN Hermes Probe Respons Scan (F102) command
-* - HCF_ACT_RX_ACK acknowledge non-DMA receiver to Hermes
-* - HCF_ACT_SCAN Hermes Inquire Scan (F101) command (non-WARP only)
-* - HCF_ACT_SLEEP DDS Sleep request
-* - HCF_ACT_TALLIES Hermes Inquire Tallies (F100) command
-*
-*.RETURNS
-* HCF_SUCCESS all (including invalid)
-* HCF_INT_PENDING HCF_ACT_INT_OFF, interrupt pending
-* HCF_ERR_NO_NIC HCF_ACT_INT_OFF, NIC presence check fails
-*
-*.CONDITIONS
-* Except for hcf_action with HCF_ACT_INT_FORCE_ON or HCF_ACT_INT_OFF as parameter or hcf_connect with an I/O
-* address (i.e. not HCF_DISCONNECT), all hcf-function calls MUST be preceded by a call of hcf_action with
-* HCF_ACT_INT_OFF as parameter.
-* Note that hcf_connect defaults to NIC interrupt disabled mode, i.e. as if hcf_action( HCF_ACT_INT_OFF )
-* was called.
-*
-*.DESCRIPTION
-* hcf_action supports the following mode changing action-code pairs that are antonyms
-* - HCF_ACT_CCX_OFF / HCF_ACT_CCX_ON
-* - HCF_ACT_INT_[FORCE_]ON / HCF_ACT_INT_OFF
-*
-* Additionally hcf_action can start the following actions in the NIC:
-* - HCF_ACT_PRS_SCAN
-* - HCF_ACT_RX_ACK
-* - HCF_ACT_SCAN
-* - HCF_ACT_SLEEP
-* - HCF_ACT_TALLIES
-*
-* o HCF_ACT_INT_OFF: Sets NIC Interrupts mode Disabled.
-* This command, and the associated [Force] Enable NIC interrupts command, are only available if the HCF_INT_ON
-* compile time option is not set at 0x0000.
-*
-* o HCF_ACT_INT_ON: Sets NIC Interrupts mode Enabled.
-* Enable NIC Interrupts, depending on the number of preceding Disable NIC Interrupt calls.
-*
-* o HCF_ACT_INT_FORCE_ON: Force NIC Interrupts mode Enabled.
-* Sets NIC Interrupts mode Enabled, regardless off the number of preceding Disable NIC Interrupt calls.
-*
-* The disabling and enabling of interrupts are antonyms.
-* These actions must be balanced.
-* For each "disable interrupts" there must be a matching "enable interrupts".
-* The disable interrupts may be executed multiple times in a row without intervening enable interrupts, in
-* other words, the disable interrupts may be nested.
-* The interrupt generation mechanism is disabled at the first call with HCF_ACT_INT_OFF.
-* The interrupt generation mechanism is re-enabled when the number of calls with HCF_ACT_INT_ON matches the
-* number of calls with INT_OFF.
-*
-* It is not allowed to have more Enable NIC Interrupts calls than Disable NIC Interrupts calls.
-* The interrupt generation mechanism is initially (i.e. after hcf_connect) disabled.
-* An MSF based on a interrupt strategy must call hcf_action with INT_ON in its initialization logic.
-*
-*! The INT_OFF/INT_ON housekeeping is initialized at 0x0000 by hcf_connect, causing the interrupt generation
-* mechanism to be disabled at first. This suits MSF implementation based on a polling strategy.
-*
-* o HCF_ACT_CCX_OFF / HCF_ACT_CCX_ON
-*!! This can use some more explanation;?
-* Disables and Enables support in the HCF runtime code for the CCX feature. Each time one of these action
-* codes is used, the effects of the preceding use cease.
-*
-* o HCF_ACT_SLEEP: Initiates the Disconnected DeepSleep process
-* This command is only available if the HCF_DDS compile time option is set. It triggers the F/W to start the
-* sleep handshaking. Regardless whether the Host initiates a Disconnected DeepSleep (DDS) or the F/W initiates
-* a Connected DeepSleep (CDS), the Host-F/W sleep handshaking is completed when the NIC Interrupts mode is
-* enabled (by means of the balancing HCF_ACT_INT_ON), i.e. at that moment the F/W really goes into sleep mode.
-* The F/W is wokenup by the HCF when the NIC Interrupts mode are disabled, i.e. at the first HCF_ACT_INT_OFF
-* after going into sleep.
-*
-* The following Miscellanuous actions are defined:
-*
-* o HCF_ACT_RX_ACK: Receiver Acknowledgement (non-DMA, non-USB mode only)
-* Acking the receiver, frees the NIC memory used to hold the Rx frame and allows the F/W to
-* report the existence of the next Rx frame.
-* If the MSF does not need access (any longer) to the current frame, e.g. because it is rejected based on the
-* look ahead or copied to another buffer, the receiver may be acked. Acking earlier is assumed to have the
-* potential of improving the performance.
-* If the MSF does not explitly ack te receiver, the acking is done implicitly if:
-* - the received frame fits in the look ahead buffer, by the hcf_service_nic call that reported the Rx frame
-* - if not in the above step, by hcf_rcv_msg (assuming hcf_rcv_msg is called)
-* - if neither of the above implicit acks nor an explicit ack by the MSF, by the first hcf_service_nic after
-* the hcf_service_nic that reported the Rx frame.
-* Note: If an Rx frame is already acked, an explicit ACK by the MSF acts as a NoOperation.
-*
-* o HCF_ACT_TALLIES: Inquire Tallies command
-* This command is only operational if the F/W is enabled.
-* The Inquire Tallies command requests the F/W to provide its current set of tallies.
-* See also hcf_get_info with CFG_TALLIES as parameter.
-*
-* o HCF_ACT_PRS_SCAN: Inquire Probe Respons Scan command
-* This command is only operational if the F/W is enabled.
-* The Probe Respons Scan command starts a scan sequence.
-* The HCF puts the result of this action in an MSF defined buffer (see CFG_RID_LOG_STRCT).
-*
-* o HCF_ACT_SCAN: Inquire Scan command
-* This command is only supported for HII F/W (i.e. pre-WARP) and it is operational if the F/W is enabled.
-* The Inquire Scan command starts a scan sequence.
-* The HCF puts the result of this action in an MSF defined buffer (see CFG_RID_LOG_STRCT).
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value.
-* - NIC interrupts are not disabled while required by parameter action.
-* - an invalid code is specified in parameter action.
-* - HCF_ACT_INT_ON commands outnumber the HCF_ACT_INT_OFF commands.
-* - reentrancy, may be caused by calling hcf_functions without adequate protection against NIC interrupts or
-* multi-threading
-*
-* - Since the HCF does not maintain status information relative to the F/W enabled state, it is not asserted
-* whether HCF_ACT_SCAN, HCF_ACT_PRS_SCAN or HCF_ACT_TALLIES are only used while F/W is enabled.
-*
-*.DIAGRAM
-* 0: The assert embedded in HCFLOGENTRY checks against re-entrancy. Re-entrancy could be caused by a MSF logic
-* at task-level calling hcf_functions without shielding with HCF_ACT_ON/_OFF. However the HCF_ACT_INT_OFF
-* action itself can per definition not be protected this way. Based on code inspection, it can be concluded,
-* that there is no re-entrancy PROBLEM in this particular flow. It does not seem worth the trouble to
-* explicitly check for this condition (although there was a report of an MSF which ran into this assert.
-* 2:IFB_IntOffCnt is used to balance the INT_OFF and INT_ON calls. Disabling of the interrupts is achieved by
-* writing a zero to the Hermes IntEn register. In a shared interrupt environment (e.g. the mini-PCI NDIS
-* driver) it is considered more correct to return the status HCF_INT_PENDING if and only if, the current
-* invocation of hcf_service_nic is (apparently) called in the ISR when the ISR was activated as result of a
-* change in HREG_EV_STAT matching a bit in HREG_INT_EN, i.e. not if invoked as result of another device
-* generating an interrupt on the shared interrupt line.
-* Note 1: it has been observed that under certain adverse conditions on certain platforms the writing of
-* HREG_INT_EN can apparently fail, therefor it is paramount that HREG_INT_EN is written again with 0 for
-* each and every call to HCF_ACT_INT_OFF.
-* Note 2: it has been observed that under certain H/W & S/W architectures this logic is called when there is
-* no NIC at all. To cater for this, the value of HREG_INT_EN is validated. If the unused bit 0x0100 is set,
-* it is assumed there is no NIC.
-* Note 3: During the download process, some versions of the F/W reset HREG_SW_0, hence checking this
-* register for HCF_MAGIC (the classical NIC presence test) when HCF_ACT_INT_OFF is called due to another
-* card interrupting via a shared IRQ during a download, fails.
-*4: The construction "if ( ifbp->IFB_IntOffCnt-- == 0 )" is optimal (in the sense of shortest/quickest
-* path in error free flows) but NOT fail safe in case of too many INT_ON invocations compared to INT_OFF).
-* Enabling of the interrupts is achieved by writing the Hermes IntEn register.
-* - If the HCF is in Defunct mode, the interrupts stay disabled.
-* - Under "normal" conditions, the HCF is only interested in Info Events, Rx Events and Notify Events.
-* - When the HCF is out of Tx/Notify resources, the HCF is also interested in Alloc Events.
-* - via HCF_EXT, the MSF programmer can also request HREG_EV_TICK and/or HREG_EV_TX_EXC interrupts.
-* For DMA operation, the DMA hardware handles the alloc events. The DMA engine will generate a 'TxDmaDone'
-* event as soon as it has pumped a frame from host ram into NIC-RAM (note that the frame does not have to be
-* transmitted then), and a 'RxDmaDone' event as soon as a received frame has been pumped from NIC-RAM into
-* host ram. Note that the 'alloc' event has been removed from the event-mask, because the DMA engine will
-* react to and acknowledge this event.
-*6: ack the "old" Rx-event. See "Rx Buffer free strategy" in hcf_service_nic above for more explanation.
-* IFB_RxFID and IFB_RxLen must be cleared to bring both the internal HCF house keeping and the information
-* supplied to the MSF in the state "no frame received".
-*8: The HCF_ACT_SCAN, HCF_ACT_PRS_SCAN and HCF_ACT_TALLIES activity are merged by "clever" algebraic
-* manipulations of the RID-values and action codes, so foregoing robustness against migration problems for
-* ease of implementation. The assumptions about numerical relationships between CFG_TALLIES etc and
-* HCF_ACT_TALLIES etc are checked by the "#if" statements just prior to the body of this routine, resulting
-* in: err "maintenance" during compilation if the assumptions are no longer met. The writing of HREG_PARAM_1
-* with 0x3FFF in case of an PRS scan, is a kludge to get around lack of specification, hence different
-* implementation in F/W and Host.
-* When there is no NIC RAM available, some versions of the Hermes F/W do report 0x7F00 as error in the
-* Result field of the Status register and some F/W versions don't. To mask this difference to the MSF all
-* return codes of the Hermes are ignored ("best" and "most simple" solution to these types of analomies with
-* an acceptable loss due to ignoring all error situations as well).
-* The "No inquire space" is reported via the Hermes tallies.
-*30: do not HCFASSERT( rc, rc ) since rc == HCF_INT_PENDING is no error
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE int hcf_action( IFBP ifbp, hcf_16 action )
+ *.PURPOSE Changes the run-time Card behavior.
+ * Performs Miscellanuous actions.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * action number identifying the type of change
+ * - HCF_ACT_INT_FORCE_ON enable interrupt generation by WaveLAN NIC
+ * - HCF_ACT_INT_OFF disable interrupt generation by WaveLAN NIC
+ * - HCF_ACT_INT_ON compensate 1 HCF_ACT_INT_OFF, enable interrupt generation if balance reached
+ * - HCF_ACT_PRS_SCAN Hermes Probe Respons Scan (F102) command
+ * - HCF_ACT_RX_ACK acknowledge non-DMA receiver to Hermes
+ * - HCF_ACT_SCAN Hermes Inquire Scan (F101) command (non-WARP only)
+ * - HCF_ACT_SLEEP DDS Sleep request
+ * - HCF_ACT_TALLIES Hermes Inquire Tallies (F100) command
+ *
+ *.RETURNS
+ * HCF_SUCCESS all (including invalid)
+ * HCF_INT_PENDING HCF_ACT_INT_OFF, interrupt pending
+ * HCF_ERR_NO_NIC HCF_ACT_INT_OFF, NIC presence check fails
+ *
+ *.CONDITIONS
+ * Except for hcf_action with HCF_ACT_INT_FORCE_ON or HCF_ACT_INT_OFF as parameter or hcf_connect with an I/O
+ * address (i.e. not HCF_DISCONNECT), all hcf-function calls MUST be preceded by a call of hcf_action with
+ * HCF_ACT_INT_OFF as parameter.
+ * Note that hcf_connect defaults to NIC interrupt disabled mode, i.e. as if hcf_action( HCF_ACT_INT_OFF )
+ * was called.
+ *
+ *.DESCRIPTION
+ * hcf_action supports the following mode changing action-code pairs that are antonyms
+ * - HCF_ACT_INT_[FORCE_]ON / HCF_ACT_INT_OFF
+ *
+ * Additionally hcf_action can start the following actions in the NIC:
+ * - HCF_ACT_PRS_SCAN
+ * - HCF_ACT_RX_ACK
+ * - HCF_ACT_SCAN
+ * - HCF_ACT_SLEEP
+ * - HCF_ACT_TALLIES
+ *
+ * o HCF_ACT_INT_OFF: Sets NIC Interrupts mode Disabled.
+ * This command, and the associated [Force] Enable NIC interrupts command, are only available if the HCF_INT_ON
+ * compile time option is not set at 0x0000.
+ *
+ * o HCF_ACT_INT_ON: Sets NIC Interrupts mode Enabled.
+ * Enable NIC Interrupts, depending on the number of preceding Disable NIC Interrupt calls.
+ *
+ * o HCF_ACT_INT_FORCE_ON: Force NIC Interrupts mode Enabled.
+ * Sets NIC Interrupts mode Enabled, regardless off the number of preceding Disable NIC Interrupt calls.
+ *
+ * The disabling and enabling of interrupts are antonyms.
+ * These actions must be balanced.
+ * For each "disable interrupts" there must be a matching "enable interrupts".
+ * The disable interrupts may be executed multiple times in a row without intervening enable interrupts, in
+ * other words, the disable interrupts may be nested.
+ * The interrupt generation mechanism is disabled at the first call with HCF_ACT_INT_OFF.
+ * The interrupt generation mechanism is re-enabled when the number of calls with HCF_ACT_INT_ON matches the
+ * number of calls with INT_OFF.
+ *
+ * It is not allowed to have more Enable NIC Interrupts calls than Disable NIC Interrupts calls.
+ * The interrupt generation mechanism is initially (i.e. after hcf_connect) disabled.
+ * An MSF based on a interrupt strategy must call hcf_action with INT_ON in its initialization logic.
+ *
+ *! The INT_OFF/INT_ON housekeeping is initialized at 0x0000 by hcf_connect, causing the interrupt generation
+ * mechanism to be disabled at first. This suits MSF implementation based on a polling strategy.
+ *
+ * o HCF_ACT_SLEEP: Initiates the Disconnected DeepSleep process
+ * This command is only available if the HCF_DDS compile time option is set. It triggers the F/W to start the
+ * sleep handshaking. Regardless whether the Host initiates a Disconnected DeepSleep (DDS) or the F/W initiates
+ * a Connected DeepSleep (CDS), the Host-F/W sleep handshaking is completed when the NIC Interrupts mode is
+ * enabled (by means of the balancing HCF_ACT_INT_ON), i.e. at that moment the F/W really goes into sleep mode.
+ * The F/W is wokenup by the HCF when the NIC Interrupts mode are disabled, i.e. at the first HCF_ACT_INT_OFF
+ * after going into sleep.
+ *
+ * The following Miscellanuous actions are defined:
+ *
+ * o HCF_ACT_RX_ACK: Receiver Acknowledgement (non-DMA, non-USB mode only)
+ * Acking the receiver, frees the NIC memory used to hold the Rx frame and allows the F/W to
+ * report the existence of the next Rx frame.
+ * If the MSF does not need access (any longer) to the current frame, e.g. because it is rejected based on the
+ * look ahead or copied to another buffer, the receiver may be acked. Acking earlier is assumed to have the
+ * potential of improving the performance.
+ * If the MSF does not explitly ack te receiver, the acking is done implicitly if:
+ * - the received frame fits in the look ahead buffer, by the hcf_service_nic call that reported the Rx frame
+ * - if not in the above step, by hcf_rcv_msg (assuming hcf_rcv_msg is called)
+ * - if neither of the above implicit acks nor an explicit ack by the MSF, by the first hcf_service_nic after
+ * the hcf_service_nic that reported the Rx frame.
+ * Note: If an Rx frame is already acked, an explicit ACK by the MSF acts as a NoOperation.
+ *
+ * o HCF_ACT_TALLIES: Inquire Tallies command
+ * This command is only operational if the F/W is enabled.
+ * The Inquire Tallies command requests the F/W to provide its current set of tallies.
+ * See also hcf_get_info with CFG_TALLIES as parameter.
+ *
+ * o HCF_ACT_PRS_SCAN: Inquire Probe Respons Scan command
+ * This command is only operational if the F/W is enabled.
+ * The Probe Respons Scan command starts a scan sequence.
+ * The HCF puts the result of this action in an MSF defined buffer (see CFG_RID_LOG_STRCT).
+ *
+ * o HCF_ACT_SCAN: Inquire Scan command
+ * This command is only supported for HII F/W (i.e. pre-WARP) and it is operational if the F/W is enabled.
+ * The Inquire Scan command starts a scan sequence.
+ * The HCF puts the result of this action in an MSF defined buffer (see CFG_RID_LOG_STRCT).
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value.
+ * - NIC interrupts are not disabled while required by parameter action.
+ * - an invalid code is specified in parameter action.
+ * - HCF_ACT_INT_ON commands outnumber the HCF_ACT_INT_OFF commands.
+ * - reentrancy, may be caused by calling hcf_functions without adequate protection against NIC interrupts or
+ * multi-threading
+ *
+ * - Since the HCF does not maintain status information relative to the F/W enabled state, it is not asserted
+ * whether HCF_ACT_SCAN, HCF_ACT_PRS_SCAN or HCF_ACT_TALLIES are only used while F/W is enabled.
+ *
+ *.DIAGRAM
+ * 0: The assert embedded in HCFLOGENTRY checks against re-entrancy. Re-entrancy could be caused by a MSF logic
+ * at task-level calling hcf_functions without shielding with HCF_ACT_ON/_OFF. However the HCF_ACT_INT_OFF
+ * action itself can per definition not be protected this way. Based on code inspection, it can be concluded,
+ * that there is no re-entrancy PROBLEM in this particular flow. It does not seem worth the trouble to
+ * explicitly check for this condition (although there was a report of an MSF which ran into this assert.
+ * 2:IFB_IntOffCnt is used to balance the INT_OFF and INT_ON calls. Disabling of the interrupts is achieved by
+ * writing a zero to the Hermes IntEn register. In a shared interrupt environment (e.g. the mini-PCI NDIS
+ * driver) it is considered more correct to return the status HCF_INT_PENDING if and only if, the current
+ * invocation of hcf_service_nic is (apparently) called in the ISR when the ISR was activated as result of a
+ * change in HREG_EV_STAT matching a bit in HREG_INT_EN, i.e. not if invoked as result of another device
+ * generating an interrupt on the shared interrupt line.
+ * Note 1: it has been observed that under certain adverse conditions on certain platforms the writing of
+ * HREG_INT_EN can apparently fail, therefor it is paramount that HREG_INT_EN is written again with 0 for
+ * each and every call to HCF_ACT_INT_OFF.
+ * Note 2: it has been observed that under certain H/W & S/W architectures this logic is called when there is
+ * no NIC at all. To cater for this, the value of HREG_INT_EN is validated. If the unused bit 0x0100 is set,
+ * it is assumed there is no NIC.
+ * Note 3: During the download process, some versions of the F/W reset HREG_SW_0, hence checking this
+ * register for HCF_MAGIC (the classical NIC presence test) when HCF_ACT_INT_OFF is called due to another
+ * card interrupting via a shared IRQ during a download, fails.
+ *4: The construction "if ( ifbp->IFB_IntOffCnt-- == 0 )" is optimal (in the sense of shortest/quickest
+ * path in error free flows) but NOT fail safe in case of too many INT_ON invocations compared to INT_OFF).
+ * Enabling of the interrupts is achieved by writing the Hermes IntEn register.
+ * - If the HCF is in Defunct mode, the interrupts stay disabled.
+ * - Under "normal" conditions, the HCF is only interested in Info Events, Rx Events and Notify Events.
+ * - When the HCF is out of Tx/Notify resources, the HCF is also interested in Alloc Events.
+ * - via HCF_EXT, the MSF programmer can also request HREG_EV_TICK and/or HREG_EV_TX_EXC interrupts.
+ * For DMA operation, the DMA hardware handles the alloc events. The DMA engine will generate a 'TxDmaDone'
+ * event as soon as it has pumped a frame from host ram into NIC-RAM (note that the frame does not have to be
+ * transmitted then), and a 'RxDmaDone' event as soon as a received frame has been pumped from NIC-RAM into
+ * host ram. Note that the 'alloc' event has been removed from the event-mask, because the DMA engine will
+ * react to and acknowledge this event.
+ *6: ack the "old" Rx-event. See "Rx Buffer free strategy" in hcf_service_nic above for more explanation.
+ * IFB_RxFID and IFB_RxLen must be cleared to bring both the internal HCF house keeping and the information
+ * supplied to the MSF in the state "no frame received".
+ *8: The HCF_ACT_SCAN, HCF_ACT_PRS_SCAN and HCF_ACT_TALLIES activity are merged by "clever" algebraic
+ * manipulations of the RID-values and action codes, so foregoing robustness against migration problems for
+ * ease of implementation. The assumptions about numerical relationships between CFG_TALLIES etc and
+ * HCF_ACT_TALLIES etc are checked by the "#if" statements just prior to the body of this routine, resulting
+ * in: err "maintenance" during compilation if the assumptions are no longer met. The writing of HREG_PARAM_1
+ * with 0x3FFF in case of an PRS scan, is a kludge to get around lack of specification, hence different
+ * implementation in F/W and Host.
+ * When there is no NIC RAM available, some versions of the Hermes F/W do report 0x7F00 as error in the
+ * Result field of the Status register and some F/W versions don't. To mask this difference to the MSF all
+ * return codes of the Hermes are ignored ("best" and "most simple" solution to these types of analomies with
+ * an acceptable loss due to ignoring all error situations as well).
+ * The "No inquire space" is reported via the Hermes tallies.
+ *30: do not HCFASSERT( rc, rc ) since rc == HCF_INT_PENDING is no error
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
#if ( (HCF_TYPE) & HCF_TYPE_HII5 ) == 0
#if CFG_SCAN != CFG_TALLIES - HCF_ACT_TALLIES + HCF_ACT_SCAN
err: "maintenance" apparently inviolated the underlying assumption about the numerical values of these macros
@@ -701,43 +675,43 @@ err: "maintenance" apparently inviolated the underlying assumption about the num
int
hcf_action( IFBP ifbp, hcf_16 action )
{
-int rc = HCF_SUCCESS;
+ int rc = HCF_SUCCESS;
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
#if HCF_INT_ON
- HCFLOGENTRY( action == HCF_ACT_INT_FORCE_ON ? HCF_TRACE_ACTION_KLUDGE : HCF_TRACE_ACTION, action ) /* 0 */
+ HCFLOGENTRY( action == HCF_ACT_INT_FORCE_ON ? HCF_TRACE_ACTION_KLUDGE : HCF_TRACE_ACTION, action ); /* 0 */
#if (HCF_SLEEP)
HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFE || action == HCF_ACT_INT_OFF,
- MERGE_2( action, ifbp->IFB_IntOffCnt ) )
+ MERGE_2( action, ifbp->IFB_IntOffCnt ) );
#else
- HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFE, action )
+ HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFE, action );
#endif // HCF_SLEEP
HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFF ||
- action == HCF_ACT_INT_OFF || action == HCF_ACT_INT_FORCE_ON, action )
+ action == HCF_ACT_INT_OFF || action == HCF_ACT_INT_FORCE_ON, action );
HCFASSERT( ifbp->IFB_IntOffCnt <= 16 || ifbp->IFB_IntOffCnt >= 0xFFFE,
- MERGE_2( action, ifbp->IFB_IntOffCnt ) ) //nesting more than 16 deep seems unreasonable
+ MERGE_2( action, ifbp->IFB_IntOffCnt ) ); //nesting more than 16 deep seems unreasonable
#endif // HCF_INT_ON
switch (action) {
#if HCF_INT_ON
-hcf_16 i;
- case HCF_ACT_INT_OFF: // Disable Interrupt generation
+ hcf_16 i;
+ case HCF_ACT_INT_OFF: // Disable Interrupt generation
#if HCF_SLEEP
- if ( ifbp->IFB_IntOffCnt == 0xFFFE ) { // WakeUp test ;?tie this to the "new" super-LinkStat
- ifbp->IFB_IntOffCnt++; // restore conventional I/F
- OPW(HREG_IO, HREG_IO_WAKEUP_ASYNC ); // set wakeup bit
- OPW(HREG_IO, HREG_IO_WAKEUP_ASYNC ); // set wakeup bit to counteract the clearing by F/W
+ if ( ifbp->IFB_IntOffCnt == 0xFFFE ) { // WakeUp test ;?tie this to the "new" super-LinkStat
+ ifbp->IFB_IntOffCnt++; // restore conventional I/F
+ OPW(HREG_IO, HREG_IO_WAKEUP_ASYNC ); // set wakeup bit
+ OPW(HREG_IO, HREG_IO_WAKEUP_ASYNC ); // set wakeup bit to counteract the clearing by F/W
// 800 us latency before FW switches to high power
- MSF_WAIT(800); // MSF-defined function to wait n microseconds.
-//OOR if ( ifbp->IFB_DSLinkStat & CFG_LINK_STAT_DS_OOR ) { // OutOfRange
-// printk( "<5>ACT_INT_OFF: Deepsleep phase terminated, enable and go to AwaitConnection\n" ); //;?remove me 1 day
+ MSF_WAIT(800); // MSF-defined function to wait n microseconds.
+//OOR if ( ifbp->IFB_DSLinkStat & CFG_LINK_STAT_DS_OOR ) { // OutOfRange
+// printk( "<5>ACT_INT_OFF: Deepsleep phase terminated, enable and go to AwaitConnection\n" ); //;?remove me 1 day
// hcf_cntl( ifbp, HCF_CNTL_ENABLE );
// }
-// ifbp->IFB_DSLinkStat &= ~( CFG_LINK_STAT_DS_IR | CFG_LINK_STAT_DS_OOR); //clear IR/OOR state
+// ifbp->IFB_DSLinkStat &= ~( CFG_LINK_STAT_DS_IR | CFG_LINK_STAT_DS_OOR); //clear IR/OOR state
}
#endif // HCF_SLEEP
-/*2*/ ifbp->IFB_IntOffCnt++;
-//! rc = 0;
+ /*2*/ ifbp->IFB_IntOffCnt++;
+//! rc = 0;
i = IPW( HREG_INT_EN );
OPW( HREG_INT_EN, 0 );
if ( i & 0x1000 ) {
@@ -749,227 +723,219 @@ hcf_16 i;
}
break;
- case HCF_ACT_INT_FORCE_ON: // Enforce Enable Interrupt generation
+ case HCF_ACT_INT_FORCE_ON: // Enforce Enable Interrupt generation
ifbp->IFB_IntOffCnt = 0;
//Fall through in HCF_ACT_INT_ON
- case HCF_ACT_INT_ON: // Enable Interrupt generation
-/*4*/ if ( ifbp->IFB_IntOffCnt-- == 0 && ifbp->IFB_CardStat == 0 ) {
- //determine Interrupt Event mask
+ case HCF_ACT_INT_ON: // Enable Interrupt generation
+ /*4*/ if ( ifbp->IFB_IntOffCnt-- == 0 && ifbp->IFB_CardStat == 0 ) {
+ //determine Interrupt Event mask
#if HCF_DMA
if ( ifbp->IFB_CntlOpt & USE_DMA ) {
- i = HREG_EV_INFO | HREG_EV_RDMAD | HREG_EV_TDMAD | HREG_EV_TX_EXT; //mask when DMA active
+ i = HREG_EV_INFO | HREG_EV_RDMAD | HREG_EV_TDMAD | HREG_EV_TX_EXT; //mask when DMA active
} else
#endif // HCF_DMA
{
- i = HREG_EV_INFO | HREG_EV_RX | HREG_EV_TX_EXT; //mask when DMA not active
+ i = HREG_EV_INFO | HREG_EV_RX | HREG_EV_TX_EXT; //mask when DMA not active
if ( ifbp->IFB_RscInd == 0 ) {
- i |= HREG_EV_ALLOC; //mask when no TxFID available
+ i |= HREG_EV_ALLOC; //mask when no TxFID available
}
}
#if HCF_SLEEP
if ( ( IPW(HREG_EV_STAT) & ( i | HREG_EV_SLEEP_REQ ) ) == HREG_EV_SLEEP_REQ ) {
// firmware indicates it would like to go into sleep modus
// only acknowledge this request if no other events that can cause an interrupt are pending
- ifbp->IFB_IntOffCnt--; //becomes 0xFFFE
- OPW( HREG_INT_EN, i | HREG_EV_TICK );
+ ifbp->IFB_IntOffCnt--; //becomes 0xFFFE
+ OPW( HREG_INT_EN, i | HREG_EV_TICK );
OPW( HREG_EV_ACK, HREG_EV_SLEEP_REQ | HREG_EV_TICK | HREG_EV_ACK_REG_READY );
} else
#endif // HCF_SLEEP
{
- OPW( HREG_INT_EN, i | HREG_EV_SLEEP_REQ );
+ OPW( HREG_INT_EN, i | HREG_EV_SLEEP_REQ );
}
}
break;
#endif // HCF_INT_ON
#if (HCF_SLEEP) & HCF_DDS
- case HCF_ACT_SLEEP: // DDS Sleep request
+ case HCF_ACT_SLEEP: // DDS Sleep request
hcf_cntl( ifbp, HCF_CNTL_DISABLE );
cmd_exe( ifbp, HCMD_SLEEP, 0 );
break;
-// case HCF_ACT_WAKEUP: // DDS Wakeup request
-// HCFASSERT( ifbp->IFB_IntOffCnt == 0xFFFE, ifbp->IFB_IntOffCnt )
-// ifbp->IFB_IntOffCnt++; // restore conventional I/F
-// OPW( HREG_IO, HREG_IO_WAKEUP_ASYNC );
-// MSF_WAIT(800); // MSF-defined function to wait n microseconds.
-// rc = hcf_action( ifbp, HCF_ACT_INT_OFF ); /*bogus, IFB_IntOffCnt == 0xFFFF, so if you carefully look
-// *at the #if HCF_DDS statements, HCF_ACT_INT_OFF is empty
-// *for DDS. "Much" better would be to merge the flows for
-// *DDS and DEEP_SLEEP
-// */
-// break;
+// case HCF_ACT_WAKEUP: // DDS Wakeup request
+// HCFASSERT( ifbp->IFB_IntOffCnt == 0xFFFE, ifbp->IFB_IntOffCnt );
+// ifbp->IFB_IntOffCnt++; // restore conventional I/F
+// OPW( HREG_IO, HREG_IO_WAKEUP_ASYNC );
+// MSF_WAIT(800); // MSF-defined function to wait n microseconds.
+// rc = hcf_action( ifbp, HCF_ACT_INT_OFF ); /*bogus, IFB_IntOffCnt == 0xFFFF, so if you carefully look
+// *at the #if HCF_DDS statements, HCF_ACT_INT_OFF is empty
+// *for DDS. "Much" better would be to merge the flows for
+// *DDS and DEEP_SLEEP
+// */
+// break;
#endif // HCF_DDS
-#if (HCF_TYPE) & HCF_TYPE_CCX
- case HCF_ACT_CCX_ON: // enable CKIP
- case HCF_ACT_CCX_OFF: // disable CKIP
- ifbp->IFB_CKIPStat = action;
- break;
-#endif // HCF_TYPE_CCX
-
- case HCF_ACT_RX_ACK: //Receiver ACK
-/*6*/ if ( ifbp->IFB_RxFID ) {
+ case HCF_ACT_RX_ACK: //Receiver ACK
+ /*6*/ if ( ifbp->IFB_RxFID ) {
DAWA_ACK( HREG_EV_RX );
}
ifbp->IFB_RxFID = ifbp->IFB_RxLen = 0;
break;
-/*8*/ case HCF_ACT_PRS_SCAN: // Hermes PRS Scan (F102)
+ /*8*/ case HCF_ACT_PRS_SCAN: // Hermes PRS Scan (F102)
OPW( HREG_PARAM_1, 0x3FFF );
- //Fall through in HCF_ACT_TALLIES
- case HCF_ACT_TALLIES: // Hermes Inquire Tallies (F100)
+ //Fall through in HCF_ACT_TALLIES
+ case HCF_ACT_TALLIES: // Hermes Inquire Tallies (F100)
#if ( (HCF_TYPE) & HCF_TYPE_HII5 ) == 0
- case HCF_ACT_SCAN: // Hermes Inquire Scan (F101)
+ case HCF_ACT_SCAN: // Hermes Inquire Scan (F101)
#endif // HCF_TYPE_HII5
/*!! the assumptions about numerical relationships between CFG_TALLIES etc and HCF_ACT_TALLIES etc
- * are checked by #if statements just prior to this routine resulting in: err "maintenance" */
+ * are checked by #if statements just prior to this routine resulting in: err "maintenance" */
cmd_exe( ifbp, HCMD_INQUIRE, action - HCF_ACT_TALLIES + CFG_TALLIES );
break;
- default:
- HCFASSERT( DO_ASSERT, action )
+ default:
+ HCFASSERT( DO_ASSERT, action );
break;
}
- //! do not HCFASSERT( rc == HCF_SUCCESS, rc ) /* 30*/
- HCFLOGEXIT( HCF_TRACE_ACTION )
+ //! do not HCFASSERT( rc == HCF_SUCCESS, rc ) /* 30*/
+ HCFLOGEXIT( HCF_TRACE_ACTION );
return rc;
} // hcf_action
-#endif // HCF_DL_ONLY
/************************************************************************************************************
-*
-*.MODULE int hcf_cntl( IFBP ifbp, hcf_16 cmd )
-*.PURPOSE Connect or disconnect a specific port to a specific network.
-*!! ;???????????????? continue needs more explanation
-* recovers by means of "continue" when the connect process in CCX mode fails
-* Enables or disables data transmission and reception for the NIC.
-* Activates static NIC configuration for a specific port at connect.
-* Activates static configuration for all ports at enable.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* cmd 0x001F: Hermes command (disable, enable, connect, disconnect, continue)
-* HCF_CNTL_ENABLE Enable
-* HCF_CNTL_DISABLE Disable
-* HCF_CNTL_CONTINUE Continue
-* HCF_CNTL_CONNECT Connect
-* HCF_CNTL_DISCONNECT Disconnect
-* 0x0100: command qualifier (continue)
-* HCMD_RETRY retry flag
-* 0x0700: port number (connect/disconnect)
-* HCF_PORT_0 MAC Port 0
-* HCF_PORT_1 MAC Port 1
-* HCF_PORT_2 MAC Port 2
-* HCF_PORT_3 MAC Port 3
-* HCF_PORT_4 MAC Port 4
-* HCF_PORT_5 MAC Port 5
-* HCF_PORT_6 MAC Port 6
-*
-*.RETURNS
-* HCF_SUCCESS
-*!! via cmd_exe
-* HCF_ERR_NO_NIC
-* HCF_ERR_DEFUNCT_...
-* HCF_ERR_TIME_OUT
-*
-*.DESCRIPTION
-* The parameter cmd contains a number of subfields.
-* The actual value for cmd is created by logical or-ing the appropriate mnemonics for the subfields.
-* The field 0x001F contains the command code
-* - HCF_CNTL_ENABLE
-* - HCF_CNTL_DISABLE
-* - HCF_CNTL_CONNECT
-* - HCF_CNTL_DISCONNECT
-* - HCF_CNTL_CONTINUE
-*
-* For HCF_CNTL_CONTINUE, the field 0x0100 contains the retry flag HCMD_RETRY.
-* For HCF_CNTL_CONNECT and HCF_CNTL_DISCONNECT, the field 0x0700 contains the port number as HCF_PORT_#.
-* For Station as well as AccessPoint F/W, MAC Port 0 is the "normal" communication channel.
-* For AccessPoint F/W, MAC Port 1 through 6 control the WDS links.
-*
-* Note that despite the names HCF_CNTL_DISABLE and HCF_CNTL_ENABLE, hcf_cntl does not influence the NIC
-* Interrupts mode.
-*
-* The Connect is used by the MSF to bring a particular port in an inactive state as far as data transmission
-* and reception are concerned.
-* When a particular port is disconnected:
-* - the F/W disables the receiver for that port.
-* - the F/W ignores send commands for that port.
-* - all frames (Receive as well as pending Transmit) for that port on the NIC are discarded.
-*
-* When the NIC is disabled, above list applies to all ports, i.e. the result is like all ports are
-* disconnected.
-*
-* When a particular port is connected:
-* - the F/W effectuates the static configuration for that port.
-* - enables the receiver for that port.
-* - accepts send commands for that port.
-*
-* Enabling has the following effects:
-* - the F/W effectuates the static configuration for all ports.
-* The F/W only updates its static configuration at a transition from disabled to enabled or from
-* disconnected to connected.
-* In order to enforce the static configuration, the MSF must assure that such a transition takes place.
-* Due to such a disable/enable or disconnect/connect sequence, Rx/Tx frames may be lost, in other words,
-* configuration may impact communication.
-* - The DMA Engine (if applicable) is enabled.
-* Note that the Enable Function by itself only enables data transmission and reception, it
-* does not enable the Interrupt Generation mechanism. This is done by hcf_action.
-*
-* Disabling has the following effects:
-*!! ;?????is the following statement really true
-* - it acts as a disconnect on all ports.
-* - The DMA Engine (if applicable) is disabled.
-*
-* For impact of the disable command on the behavior of hcf_dma_tx/rx_get see the appropriate sections.
-*
-* Although the Enable/Disable and Connect/Disconnect are antonyms, there is no restriction on their sequencing,
-* in other words, they may be called multiple times in arbitrary sequence without being paired or balanced.
-* Each time one of these functions is called, the effects of the preceding calls cease.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value.
-* - NIC interrupts are not disabled.
-* - A command other than Continue, Enable, Disable, Connect or Disconnect is given.
-* - An invalid combination of the subfields is given or a bit outside the subfields is given.
-* - any return code besides HCF_SUCCESS.
-* - reentrancy, may be caused by calling a hcf_function without adequate protection against NIC interrupts or
-* multi-threading
-*
-*.DIAGRAM
-* hcf_cntl takes successively the following actions:
-*2: If the HCF is in Defunct mode or incompatible with the Primary or Station Supplier in the Hermes,
-* hcf_cntl() returns immediately with HCF_ERR_NO_NIC;? as status.
-*8: when the port is disabled, the DMA engine needs to be de-activated, so the host can safely reclaim tx
-* packets from the tx descriptor chain.
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE int hcf_cntl( IFBP ifbp, hcf_16 cmd )
+ *.PURPOSE Connect or disconnect a specific port to a specific network.
+ *!! ;???????????????? continue needs more explanation
+ * recovers by means of "continue" when the connect process in CCX mode fails
+ * Enables or disables data transmission and reception for the NIC.
+ * Activates static NIC configuration for a specific port at connect.
+ * Activates static configuration for all ports at enable.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * cmd 0x001F: Hermes command (disable, enable, connect, disconnect, continue)
+ * HCF_CNTL_ENABLE Enable
+ * HCF_CNTL_DISABLE Disable
+ * HCF_CNTL_CONTINUE Continue
+ * HCF_CNTL_CONNECT Connect
+ * HCF_CNTL_DISCONNECT Disconnect
+ * 0x0100: command qualifier (continue)
+ * HCMD_RETRY retry flag
+ * 0x0700: port number (connect/disconnect)
+ * HCF_PORT_0 MAC Port 0
+ * HCF_PORT_1 MAC Port 1
+ * HCF_PORT_2 MAC Port 2
+ * HCF_PORT_3 MAC Port 3
+ * HCF_PORT_4 MAC Port 4
+ * HCF_PORT_5 MAC Port 5
+ * HCF_PORT_6 MAC Port 6
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ *!! via cmd_exe
+ * HCF_ERR_NO_NIC
+ * HCF_ERR_DEFUNCT_...
+ * HCF_ERR_TIME_OUT
+ *
+ *.DESCRIPTION
+ * The parameter cmd contains a number of subfields.
+ * The actual value for cmd is created by logical or-ing the appropriate mnemonics for the subfields.
+ * The field 0x001F contains the command code
+ * - HCF_CNTL_ENABLE
+ * - HCF_CNTL_DISABLE
+ * - HCF_CNTL_CONNECT
+ * - HCF_CNTL_DISCONNECT
+ * - HCF_CNTL_CONTINUE
+ *
+ * For HCF_CNTL_CONTINUE, the field 0x0100 contains the retry flag HCMD_RETRY.
+ * For HCF_CNTL_CONNECT and HCF_CNTL_DISCONNECT, the field 0x0700 contains the port number as HCF_PORT_#.
+ * For Station as well as AccessPoint F/W, MAC Port 0 is the "normal" communication channel.
+ * For AccessPoint F/W, MAC Port 1 through 6 control the WDS links.
+ *
+ * Note that despite the names HCF_CNTL_DISABLE and HCF_CNTL_ENABLE, hcf_cntl does not influence the NIC
+ * Interrupts mode.
+ *
+ * The Connect is used by the MSF to bring a particular port in an inactive state as far as data transmission
+ * and reception are concerned.
+ * When a particular port is disconnected:
+ * - the F/W disables the receiver for that port.
+ * - the F/W ignores send commands for that port.
+ * - all frames (Receive as well as pending Transmit) for that port on the NIC are discarded.
+ *
+ * When the NIC is disabled, above list applies to all ports, i.e. the result is like all ports are
+ * disconnected.
+ *
+ * When a particular port is connected:
+ * - the F/W effectuates the static configuration for that port.
+ * - enables the receiver for that port.
+ * - accepts send commands for that port.
+ *
+ * Enabling has the following effects:
+ * - the F/W effectuates the static configuration for all ports.
+ * The F/W only updates its static configuration at a transition from disabled to enabled or from
+ * disconnected to connected.
+ * In order to enforce the static configuration, the MSF must assure that such a transition takes place.
+ * Due to such a disable/enable or disconnect/connect sequence, Rx/Tx frames may be lost, in other words,
+ * configuration may impact communication.
+ * - The DMA Engine (if applicable) is enabled.
+ * Note that the Enable Function by itself only enables data transmission and reception, it
+ * does not enable the Interrupt Generation mechanism. This is done by hcf_action.
+ *
+ * Disabling has the following effects:
+ *!! ;?????is the following statement really true
+ * - it acts as a disconnect on all ports.
+ * - The DMA Engine (if applicable) is disabled.
+ *
+ * For impact of the disable command on the behavior of hcf_dma_tx/rx_get see the appropriate sections.
+ *
+ * Although the Enable/Disable and Connect/Disconnect are antonyms, there is no restriction on their sequencing,
+ * in other words, they may be called multiple times in arbitrary sequence without being paired or balanced.
+ * Each time one of these functions is called, the effects of the preceding calls cease.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value.
+ * - NIC interrupts are not disabled.
+ * - A command other than Continue, Enable, Disable, Connect or Disconnect is given.
+ * - An invalid combination of the subfields is given or a bit outside the subfields is given.
+ * - any return code besides HCF_SUCCESS.
+ * - reentrancy, may be caused by calling a hcf_function without adequate protection against NIC interrupts or
+ * multi-threading
+ *
+ *.DIAGRAM
+ * hcf_cntl takes successively the following actions:
+ *2: If the HCF is in Defunct mode or incompatible with the Primary or Station Supplier in the Hermes,
+ * hcf_cntl() returns immediately with HCF_ERR_NO_NIC;? as status.
+ *8: when the port is disabled, the DMA engine needs to be de-activated, so the host can safely reclaim tx
+ * packets from the tx descriptor chain.
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
hcf_cntl( IFBP ifbp, hcf_16 cmd )
{
-int rc = HCF_ERR_INCOMP_FW;
+ int rc = HCF_ERR_INCOMP_FW;
#if HCF_ASSERT
-{ int x = cmd & HCMD_CMD_CODE;
- if ( x == HCF_CNTL_CONTINUE ) x &= ~HCMD_RETRY;
- else if ( (x == HCMD_DISABLE || x == HCMD_ENABLE) && ifbp->IFB_FWIdentity.comp_id == COMP_ID_FW_AP ) {
- x &= ~HFS_TX_CNTL_PORT;
+ { int x = cmd & HCMD_CMD_CODE;
+ if ( x == HCF_CNTL_CONTINUE ) x &= ~HCMD_RETRY;
+ else if ( (x == HCMD_DISABLE || x == HCMD_ENABLE) && ifbp->IFB_FWIdentity.comp_id == COMP_ID_FW_AP ) {
+ x &= ~HFS_TX_CNTL_PORT;
+ }
+ HCFASSERT( x==HCF_CNTL_ENABLE || x==HCF_CNTL_DISABLE || HCF_CNTL_CONTINUE ||
+ x==HCF_CNTL_CONNECT || x==HCF_CNTL_DISCONNECT, cmd );
}
- HCFASSERT( x==HCF_CNTL_ENABLE || x==HCF_CNTL_DISABLE || HCF_CNTL_CONTINUE ||
- x==HCF_CNTL_CONNECT || x==HCF_CNTL_DISCONNECT, cmd )
-}
#endif // HCF_ASSERT
// #if (HCF_SLEEP) & HCF_DDS
-// HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFE, cmd )
+// HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFE, cmd );
// #endif // HCF_DDS
- HCFLOGENTRY( HCF_TRACE_CNTL, cmd )
- if ( ifbp->IFB_CardStat == 0 ) { /*2*/
-/*6*/ rc = cmd_exe( ifbp, cmd, 0 );
+ HCFLOGENTRY( HCF_TRACE_CNTL, cmd );
+ if ( ifbp->IFB_CardStat == 0 ) { /*2*/
+ /*6*/ rc = cmd_exe( ifbp, cmd, 0 );
#if (HCF_SLEEP) & HCF_DDS
- ifbp->IFB_TickCnt = 0; //start 2 second period (with 1 tick uncertanty)
+ ifbp->IFB_TickCnt = 0; //start 2 second period (with 1 tick uncertanty)
#endif // HCF_DDS
}
#if HCF_DMA
@@ -980,7 +946,7 @@ int rc = HCF_ERR_INCOMP_FW;
hcf_io io_port = ifbp->IFB_IOBase;
DESC_STRCT *p;
if ( cmd == HCF_CNTL_DISABLE || cmd == HCF_CNTL_ENABLE ) {
- OUT_PORT_DWORD( (io_port + HREG_DMA_CTRL), DMA_CTRLSTAT_RESET); /*8*/
+ OUT_PORT_DWORD( (io_port + HREG_DMA_CTRL), DMA_CTRLSTAT_RESET); /*8*/
ifbp->IFB_CntlOpt &= ~DMA_ENABLED;
}
if ( cmd == HCF_CNTL_ENABLE ) {
@@ -989,12 +955,12 @@ int rc = HCF_ERR_INCOMP_FW;
* as additional beneficiary side effect, the SOP and EOP bits will also be cleared
*/
ifbp->IFB_CntlOpt |= DMA_ENABLED;
- HCFASSERT( NT_ASSERT, NEVER_TESTED )
+ HCFASSERT( NT_ASSERT, NEVER_TESTED );
// make the entire rx descriptor chain DMA-owned, so the DMA engine can (re-)use it.
p = ifbp->IFB_FirstDesc[DMA_RX];
if (p != NULL) { //;? Think this over again in the light of the new chaining strategy
- if ( 1 ) { //begin alternative
- HCFASSERT( NT_ASSERT, NEVER_TESTED )
+ if ( 1 ) { //begin alternative
+ HCFASSERT( NT_ASSERT, NEVER_TESTED );
put_frame_lst( ifbp, ifbp->IFB_FirstDesc[DMA_RX], DMA_RX );
if ( ifbp->IFB_FirstDesc[DMA_RX] ) {
put_frame_lst( ifbp, ifbp->IFB_FirstDesc[DMA_RX]->next_desc_addr, DMA_RX );
@@ -1013,147 +979,147 @@ int rc = HCF_ERR_INCOMP_FW;
}
}
#endif // HCF_DMA
- HCFASSERT( rc == HCF_SUCCESS, rc )
- HCFLOGEXIT( HCF_TRACE_CNTL )
+ HCFASSERT( rc == HCF_SUCCESS, rc );
+ HCFLOGEXIT( HCF_TRACE_CNTL );
return rc;
} // hcf_cntl
/************************************************************************************************************
-*
-*.MODULE int hcf_connect( IFBP ifbp, hcf_io io_base )
-*.PURPOSE Grants access right for the HCF to the IFB.
-* Initializes Card and HCF housekeeping.
-*
-*.ARGUMENTS
-* ifbp (near) address of the Interface Block
-* io_base non-USB: I/O Base address of the NIC (connect)
-* non-USB: HCF_DISCONNECT
-* USB: HCF_CONNECT, HCF_DISCONNECT
-*
-*.RETURNS
-* HCF_SUCCESS
-* HCF_ERR_INCOMP_PRI
-* HCF_ERR_INCOMP_FW
-* HCF_ERR_DEFUNCT_CMD_SEQ
-*!! HCF_ERR_NO_NIC really returned ;?
-* HCF_ERR_NO_NIC
-* HCF_ERR_TIME_OUT
-*
-* MSF-accessible fields of Result Block:
-* IFB_IOBase entry parameter io_base
-* IFB_IORange HREG_IO_RANGE (0x40/0x80)
-* IFB_Version version of the IFB layout
-* IFB_FWIdentity CFG_FW_IDENTITY_STRCT, specifies the identity of the
-* "running" F/W, i.e. tertiary F/W under normal conditions
-* IFB_FWSup CFG_SUP_RANGE_STRCT, specifies the supplier range of
-* the "running" F/W, i.e. tertiary F/W under normal conditions
-* IFB_HSISup CFG_SUP_RANGE_STRCT, specifies the HW/SW I/F range of the NIC
-* IFB_PRIIdentity CFG_PRI_IDENTITY_STRCT, specifies the Identity of the Primary F/W
-* IFB_PRISup CFG_SUP_RANGE_STRCT, specifies the supplier range of the Primary F/W
-* all other all MSF accessible fields, which are not specified above, are zero-filled
-*
-*.CONDITIONS
-* It is the responsibility of the MSF to assure the correctness of the I/O Base address.
-*
-* Note: hcf_connect defaults to NIC interrupt disabled mode, i.e. as if hcf_action( HCF_ACT_INT_OFF )
-* was called.
-*
-*.DESCRIPTION
-* hcf_connect passes the MSF-defined location of the IFB to the HCF and grants or revokes access right for the
-* HCF to the IFB. Revoking is done by specifying HCF_DISCONNECT rather than an I/O address for the parameter
-* io_base. Every call of hcf_connect in "connect" mode, must eventually be followed by a call of hcf_connect
-* in "disconnect" mode. Clalling hcf_connect in "connect"/"disconnect" mode can not be nested.
-* The IFB address must be used as a handle with all subsequent HCF-function calls and the HCF uses the IFB
-* address as a handle when it performs a call(back) of an MSF-function (i.e. msf_assert).
-*
-* Note that not only the MSF accessible fields are cleared, but also all internal housekeeping
-* information is re-initialized.
-* This implies that all settings which are done via hcf_action and hcf_put_info (e.g. CFG_MB_ASSERT, CFG_REG_MB,
-* CFG_REG_INFO_LOG) must be done again. The only field which is not cleared, is IFB_MSFSup.
-*
-* If HCF_INT_ON is selected as compile option, NIC interrupts are disabled.
-*
-* Assert fails if
-* - ifbp is not properly aligned ( ref chapter HCF_ALIGN in 4.1.1)
-* - I/O Base Address is not a multiple of 0x40 (note: 0x0000 is explicitly allowed).
-*
-*.DIAGRAM
-*
-*0: Throughout hcf_connect you need to distinguish the connect from the disconnect case, which requires
-* some attention about what to use as "I/O" address when for which purpose.
-*2:
-*2a: Reset H-II by toggling reset bit in IO-register on and off.
-* The HCF_TYPE_PRELOADED caters for the DOS environment where H-II is loaded by a separate program to
-* overcome the 64k size limit posed on DOS drivers.
-* The macro OPW is not yet useable because the IFB_IOBase field is not set.
-* Note 1: hopefully the clearing and initializing of the IFB (see below) acts as a delay which meets the
-* specification for S/W reset
-* Note 2: it turns out that on some H/W constellations, the clock to access the EEProm is not lowered
-* to an appropriate frequency by HREG_IO_SRESET. By giving an HCMD_INI first, this problem is worked around.
-*2b: Experimentally it is determined over a wide range of F/W versions that waiting for the for Cmd bit in
-* Ev register gives a workable strategy. The available documentation does not give much clues.
-*4: clear and initialize the IFB
-* The HCF house keeping info is designed such that zero is the appropriate initial value for as much as
-* feasible IFB-items.
-* The readable fields mentioned in the description section and some HCF specific fields are given their
-* actual value.
-* IFB_TickIni is initialized at best guess before calibration
-* Hcf_connect defaults to "no interrupt generation" (implicitly achieved by the zero-filling).
-*6: Register compile-time linked MSF Routine and set default filter level
-* cast needed to get around the "near" problem in DOS COM model
-* er C2446: no conversion from void (__near __cdecl *)(unsigned char __far *,unsigned int,unsigned short,int)
-* to void (__far __cdecl *)(unsigned char __far *,unsigned int,unsigned short,int)
-*8: If a command is apparently still active (as indicated by the Busy bit in Cmd register) this may indicate a
-* blocked cmd pipe line. To unblock the following actions are done:
-* - Ack everything
-* - Wait for Busy bit drop in Cmd register
-* - Wait for Cmd bit raise in Ev register
-* The two waits are combined in a single HCF_WAIT_WHILE to optimize memory size. If either of these waits
-* fail (prot_cnt becomes 0), then something is serious wrong. Rather than PANICK, the assumption is that the
-* next cmd_exe will fail, causing the HCF to go into DEFUNCT mode
-*10: Ack everything to unblock a (possibly blocked) cmd pipe line
-* Note 1: it is very likely that an Alloc event is pending and very well possible that a (Send) Cmd event is
-* pending on non-initial calls
-* Note 2: it is assumed that this strategy takes away the need to ack every conceivable event after an
-* Hermes Initialize
-*12: Only H-II NEEDS the Hermes Initialize command. Due to the different semantics for H-I and H-II
-* Initialize command, init() does not (and can not, since it is called e.g. after a download) execute the
-* Hermes Initialize command. Executing the Hermes Initialize command for H-I would not harm but not do
-* anything useful either, so it is skipped.
-* The return status of cmd_exe is ignored. It is assumed that if cmd_exe fails, init fails too
-*14: use io_base as a flag to merge hcf_connect and hcf_disconnect into 1 routine
-* the call to init and its subsequent call of cmd_exe will return HCF_ERR_NO_NIC if appropriate. This status
-* is (badly) needed by some legacy combination of NT4 and card services which do not yield an I/O address in
-* time.
-*
-*.NOTICE
-* On platforms where the NULL-pointer is not a bit-pattern of all zeros, the zero-filling of the IFB results
-* in an incorrect initialization of pointers.
-* The implementation of the MailBox manipulation in put_mb_info protects against the absence of a MailBox
-* based on IFB_MBSize, IFB_MBWp and ifbp->IFB_MBRp. This has ramifications on the initialization of the
-* MailBox via hcf_put_info with the CFG_REG_MB type, but it prevents dependency on the "NULL-"ness of
-* IFB_MBp.
-*
-*.NOTICE
-* There are a number of problems when asserting and logging hcf_connect, e.g.
-* - Asserting on re-entrancy of hcf_connect by means of
-* "HCFASSERT( (ifbp->IFB_AssertTrace & HCF_ASSERT_CONNECT) == 0, 0 )" is not useful because IFB contents
-* are undefined
-* - Asserting before the IFB is cleared will cause mdd_assert() to interpret the garbage in IFB_AssertRtn
-* as a routine address
-* Therefore HCFTRACE nor HCFLOGENTRY is called by hcf_connect.
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE int hcf_connect( IFBP ifbp, hcf_io io_base )
+ *.PURPOSE Grants access right for the HCF to the IFB.
+ * Initializes Card and HCF housekeeping.
+ *
+ *.ARGUMENTS
+ * ifbp (near) address of the Interface Block
+ * io_base non-USB: I/O Base address of the NIC (connect)
+ * non-USB: HCF_DISCONNECT
+ * USB: HCF_CONNECT, HCF_DISCONNECT
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ * HCF_ERR_INCOMP_PRI
+ * HCF_ERR_INCOMP_FW
+ * HCF_ERR_DEFUNCT_CMD_SEQ
+ *!! HCF_ERR_NO_NIC really returned ;?
+ * HCF_ERR_NO_NIC
+ * HCF_ERR_TIME_OUT
+ *
+ * MSF-accessible fields of Result Block:
+ * IFB_IOBase entry parameter io_base
+ * IFB_IORange HREG_IO_RANGE (0x40/0x80)
+ * IFB_Version version of the IFB layout
+ * IFB_FWIdentity CFG_FW_IDENTITY_STRCT, specifies the identity of the
+ * "running" F/W, i.e. tertiary F/W under normal conditions
+ * IFB_FWSup CFG_SUP_RANGE_STRCT, specifies the supplier range of
+ * the "running" F/W, i.e. tertiary F/W under normal conditions
+ * IFB_HSISup CFG_SUP_RANGE_STRCT, specifies the HW/SW I/F range of the NIC
+ * IFB_PRIIdentity CFG_PRI_IDENTITY_STRCT, specifies the Identity of the Primary F/W
+ * IFB_PRISup CFG_SUP_RANGE_STRCT, specifies the supplier range of the Primary F/W
+ * all other all MSF accessible fields, which are not specified above, are zero-filled
+ *
+ *.CONDITIONS
+ * It is the responsibility of the MSF to assure the correctness of the I/O Base address.
+ *
+ * Note: hcf_connect defaults to NIC interrupt disabled mode, i.e. as if hcf_action( HCF_ACT_INT_OFF )
+ * was called.
+ *
+ *.DESCRIPTION
+ * hcf_connect passes the MSF-defined location of the IFB to the HCF and grants or revokes access right for the
+ * HCF to the IFB. Revoking is done by specifying HCF_DISCONNECT rather than an I/O address for the parameter
+ * io_base. Every call of hcf_connect in "connect" mode, must eventually be followed by a call of hcf_connect
+ * in "disconnect" mode. Clalling hcf_connect in "connect"/"disconnect" mode can not be nested.
+ * The IFB address must be used as a handle with all subsequent HCF-function calls and the HCF uses the IFB
+ * address as a handle when it performs a call(back) of an MSF-function (i.e. msf_assert).
+ *
+ * Note that not only the MSF accessible fields are cleared, but also all internal housekeeping
+ * information is re-initialized.
+ * This implies that all settings which are done via hcf_action and hcf_put_info (e.g. CFG_MB_ASSERT, CFG_REG_MB,
+ * CFG_REG_INFO_LOG) must be done again. The only field which is not cleared, is IFB_MSFSup.
+ *
+ * If HCF_INT_ON is selected as compile option, NIC interrupts are disabled.
+ *
+ * Assert fails if
+ * - ifbp is not properly aligned ( ref chapter HCF_ALIGN in 4.1.1)
+ * - I/O Base Address is not a multiple of 0x40 (note: 0x0000 is explicitly allowed).
+ *
+ *.DIAGRAM
+ *
+ *0: Throughout hcf_connect you need to distinguish the connect from the disconnect case, which requires
+ * some attention about what to use as "I/O" address when for which purpose.
+ *2:
+ *2a: Reset H-II by toggling reset bit in IO-register on and off.
+ * The HCF_TYPE_PRELOADED caters for the DOS environment where H-II is loaded by a separate program to
+ * overcome the 64k size limit posed on DOS drivers.
+ * The macro OPW is not yet useable because the IFB_IOBase field is not set.
+ * Note 1: hopefully the clearing and initializing of the IFB (see below) acts as a delay which meets the
+ * specification for S/W reset
+ * Note 2: it turns out that on some H/W constellations, the clock to access the EEProm is not lowered
+ * to an appropriate frequency by HREG_IO_SRESET. By giving an HCMD_INI first, this problem is worked around.
+ *2b: Experimentally it is determined over a wide range of F/W versions that waiting for the for Cmd bit in
+ * Ev register gives a workable strategy. The available documentation does not give much clues.
+ *4: clear and initialize the IFB
+ * The HCF house keeping info is designed such that zero is the appropriate initial value for as much as
+ * feasible IFB-items.
+ * The readable fields mentioned in the description section and some HCF specific fields are given their
+ * actual value.
+ * IFB_TickIni is initialized at best guess before calibration
+ * Hcf_connect defaults to "no interrupt generation" (implicitly achieved by the zero-filling).
+ *6: Register compile-time linked MSF Routine and set default filter level
+ * cast needed to get around the "near" problem in DOS COM model
+ * er C2446: no conversion from void (__near __cdecl *)(unsigned char __far *,unsigned int,unsigned short,int)
+ * to void (__far __cdecl *)(unsigned char __far *,unsigned int,unsigned short,int)
+ *8: If a command is apparently still active (as indicated by the Busy bit in Cmd register) this may indicate a
+ * blocked cmd pipe line. To unblock the following actions are done:
+ * - Ack everything
+ * - Wait for Busy bit drop in Cmd register
+ * - Wait for Cmd bit raise in Ev register
+ * The two waits are combined in a single HCF_WAIT_WHILE to optimize memory size. If either of these waits
+ * fail (prot_cnt becomes 0), then something is serious wrong. Rather than PANICK, the assumption is that the
+ * next cmd_exe will fail, causing the HCF to go into DEFUNCT mode
+ *10: Ack everything to unblock a (possibly blocked) cmd pipe line
+ * Note 1: it is very likely that an Alloc event is pending and very well possible that a (Send) Cmd event is
+ * pending on non-initial calls
+ * Note 2: it is assumed that this strategy takes away the need to ack every conceivable event after an
+ * Hermes Initialize
+ *12: Only H-II NEEDS the Hermes Initialize command. Due to the different semantics for H-I and H-II
+ * Initialize command, init() does not (and can not, since it is called e.g. after a download) execute the
+ * Hermes Initialize command. Executing the Hermes Initialize command for H-I would not harm but not do
+ * anything useful either, so it is skipped.
+ * The return status of cmd_exe is ignored. It is assumed that if cmd_exe fails, init fails too
+ *14: use io_base as a flag to merge hcf_connect and hcf_disconnect into 1 routine
+ * the call to init and its subsequent call of cmd_exe will return HCF_ERR_NO_NIC if appropriate. This status
+ * is (badly) needed by some legacy combination of NT4 and card services which do not yield an I/O address in
+ * time.
+ *
+ *.NOTICE
+ * On platforms where the NULL-pointer is not a bit-pattern of all zeros, the zero-filling of the IFB results
+ * in an incorrect initialization of pointers.
+ * The implementation of the MailBox manipulation in put_mb_info protects against the absence of a MailBox
+ * based on IFB_MBSize, IFB_MBWp and ifbp->IFB_MBRp. This has ramifications on the initialization of the
+ * MailBox via hcf_put_info with the CFG_REG_MB type, but it prevents dependency on the "NULL-"ness of
+ * IFB_MBp.
+ *
+ *.NOTICE
+ * There are a number of problems when asserting and logging hcf_connect, e.g.
+ * - Asserting on re-entrancy of hcf_connect by means of
+ * "HCFASSERT( (ifbp->IFB_AssertTrace & HCF_ASSERT_CONNECT) == 0, 0 )" is not useful because IFB contents
+ * are undefined
+ * - Asserting before the IFB is cleared will cause mdd_assert() to interpret the garbage in IFB_AssertRtn
+ * as a routine address
+ * Therefore HCFTRACE nor HCFLOGENTRY is called by hcf_connect.
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
hcf_connect( IFBP ifbp, hcf_io io_base )
{
-int rc = HCF_SUCCESS;
-hcf_io io_addr;
-hcf_32 prot_cnt;
-hcf_8 *q;
-LTV_STRCT x;
+ int rc = HCF_SUCCESS;
+ hcf_io io_addr;
+ hcf_32 prot_cnt;
+ hcf_8 *q;
+ LTV_STRCT x;
#if HCF_ASSERT
hcf_16 xa = ifbp->IFB_FWIdentity.typ;
/* is assumed to cause an assert later on if hcf_connect is called without intervening hcf_disconnect.
@@ -1163,51 +1129,51 @@ LTV_STRCT x;
*/
#endif // HCF_ASSERT
- if ( io_base == HCF_DISCONNECT ) { //disconnect
+ if ( io_base == HCF_DISCONNECT ) { //disconnect
io_addr = ifbp->IFB_IOBase;
- OPW( HREG_INT_EN, 0 ); //;?workaround against dying F/W on subsequent hcf_connect calls
- } else { //connect /* 0 */
+ OPW( HREG_INT_EN, 0 ); //;?workaround against dying F/W on subsequent hcf_connect calls
+ } else { //connect /* 0 */
io_addr = io_base;
}
#if 0 //;? if a subsequent hcf_connect is preceded by an hcf_disconnect the wakeup is not needed !!
#if HCF_SLEEP
- OUT_PORT_WORD( .....+HREG_IO, HREG_IO_WAKEUP_ASYNC ); //OPW not yet useable
- MSF_WAIT(800); // MSF-defined function to wait n microseconds.
+ OUT_PORT_WORD( .....+HREG_IO, HREG_IO_WAKEUP_ASYNC ); //OPW not yet useable
+ MSF_WAIT(800); // MSF-defined function to wait n microseconds.
note that MSF_WAIT uses not yet defined!!!! IFB_IOBase and IFB_TickIni (via PROT_CNT_INI)
so be careful if this code is restored
#endif // HCF_SLEEP
#endif // 0
-#if ( (HCF_TYPE) & HCF_TYPE_PRELOADED ) == 0 //switch clock back for SEEPROM access !!!
- OUT_PORT_WORD( io_addr + HREG_CMD, HCMD_INI ); //OPW not yet useable
+#if ( (HCF_TYPE) & HCF_TYPE_PRELOADED ) == 0 //switch clock back for SEEPROM access !!!
+ OUT_PORT_WORD( io_addr + HREG_CMD, HCMD_INI ); //OPW not yet useable
prot_cnt = INI_TICK_INI;
HCF_WAIT_WHILE( (IN_PORT_WORD( io_addr + HREG_EV_STAT) & HREG_EV_CMD) == 0 );
- OUT_PORT_WORD( (io_addr + HREG_IO), HREG_IO_SRESET ); //OPW not yet useable /* 2a*/
+ OUT_PORT_WORD( (io_addr + HREG_IO), HREG_IO_SRESET ); //OPW not yet useable /* 2a*/
#endif // HCF_TYPE_PRELOADED
- for ( q = (hcf_8*)(&ifbp->IFB_Magic); q > (hcf_8*)ifbp; *--q = 0 ) /*NOP*/; /* 4 */
- ifbp->IFB_Magic = HCF_MAGIC;
- ifbp->IFB_Version = IFB_VERSION;
+ for ( q = (hcf_8*)(&ifbp->IFB_Magic); q > (hcf_8*)ifbp; *--q = 0 ) /*NOP*/; /* 4 */
+ ifbp->IFB_Magic = HCF_MAGIC;
+ ifbp->IFB_Version = IFB_VERSION;
#if defined MSF_COMPONENT_ID //a new IFB demonstrates how dirty the solution is
- xxxx[xxxx_PRI_IDENTITY_OFFSET] = NULL; //IFB_PRIIdentity placeholder 0xFD02
- xxxx[xxxx_PRI_IDENTITY_OFFSET+1] = NULL; //IFB_PRISup placeholder 0xFD03
+ xxxx[xxxx_PRI_IDENTITY_OFFSET] = NULL; //IFB_PRIIdentity placeholder 0xFD02
+ xxxx[xxxx_PRI_IDENTITY_OFFSET+1] = NULL; //IFB_PRISup placeholder 0xFD03
#endif // MSF_COMPONENT_ID
#if (HCF_TALLIES) & ( HCF_TALLIES_NIC | HCF_TALLIES_HCF )
- ifbp->IFB_TallyLen = 1 + 2 * (HCF_NIC_TAL_CNT + HCF_HCF_TAL_CNT); //convert # of Tallies to L value for LTV
- ifbp->IFB_TallyTyp = CFG_TALLIES; //IFB_TallyTyp: set T value
+ ifbp->IFB_TallyLen = 1 + 2 * (HCF_NIC_TAL_CNT + HCF_HCF_TAL_CNT); //convert # of Tallies to L value for LTV
+ ifbp->IFB_TallyTyp = CFG_TALLIES; //IFB_TallyTyp: set T value
#endif // HCF_TALLIES_NIC / HCF_TALLIES_HCF
- ifbp->IFB_IOBase = io_addr; //set IO_Base asap, so asserts via HREG_SW_2 don't harm
- ifbp->IFB_IORange = HREG_IO_RANGE;
- ifbp->IFB_CntlOpt = USE_16BIT;
+ ifbp->IFB_IOBase = io_addr; //set IO_Base asap, so asserts via HREG_SW_2 don't harm
+ ifbp->IFB_IORange = HREG_IO_RANGE;
+ ifbp->IFB_CntlOpt = USE_16BIT;
#if HCF_ASSERT
assert_ifbp = ifbp;
ifbp->IFB_AssertLvl = 1;
#if (HCF_ASSERT) & HCF_ASSERT_LNK_MSF_RTN
if ( io_base != HCF_DISCONNECT ) {
- ifbp->IFB_AssertRtn = (MSF_ASSERT_RTNP)msf_assert; /* 6 */
+ ifbp->IFB_AssertRtn = (MSF_ASSERT_RTNP)msf_assert; /* 6 */
}
#endif // HCF_ASSERT_LNK_MSF_RTN
-#if (HCF_ASSERT) & HCF_ASSERT_MB //build the structure to pass the assert info to hcf_put_info
+#if (HCF_ASSERT) & HCF_ASSERT_MB //build the structure to pass the assert info to hcf_put_info
ifbp->IFB_AssertStrct.len = sizeof(ifbp->IFB_AssertStrct)/sizeof(hcf_16) - 1;
ifbp->IFB_AssertStrct.typ = CFG_MB_INFO;
ifbp->IFB_AssertStrct.base_typ = CFG_MB_ASSERT;
@@ -1217,34 +1183,34 @@ LTV_STRCT x;
ifbp->IFB_AssertStrct.frag_buf[0].frag_addr = &ifbp->IFB_AssertLine;
#endif // HCF_ASSERT_MB
#endif // HCF_ASSERT
- IF_PROT_TIME( prot_cnt = ifbp->IFB_TickIni = INI_TICK_INI; )
+ IF_PROT_TIME( prot_cnt = ifbp->IFB_TickIni = INI_TICK_INI );
#if ( (HCF_TYPE) & HCF_TYPE_PRELOADED ) == 0
//!! No asserts before Reset-bit in HREG_IO is cleared
- OPW( HREG_IO, 0x0000 ); //OPW useable /* 2b*/
+ OPW( HREG_IO, 0x0000 ); //OPW useable /* 2b*/
HCF_WAIT_WHILE( (IPW( HREG_EV_STAT) & HREG_EV_CMD) == 0 );
- IF_PROT_TIME( HCFASSERT( prot_cnt, IPW( HREG_EV_STAT) ) )
- IF_PROT_TIME( if ( prot_cnt ) prot_cnt = ifbp->IFB_TickIni; )
+ IF_PROT_TIME( HCFASSERT( prot_cnt, IPW( HREG_EV_STAT) ) );
+ IF_PROT_TIME( if ( prot_cnt ) prot_cnt = ifbp->IFB_TickIni );
#endif // HCF_TYPE_PRELOADED
//!! No asserts before Reset-bit in HREG_IO is cleared
- HCFASSERT( DO_ASSERT, MERGE_2( HCF_ASSERT, 0xCAF0 ) ) //just to proof that the complete assert machinery is working
- HCFASSERT( xa != CFG_FW_IDENTITY, 0 ) // assert if hcf_connect is called without intervening hcf_disconnect.
- HCFASSERT( ((hcf_32)(void*)ifbp & (HCF_ALIGN-1) ) == 0, (hcf_32)(void*)ifbp )
- HCFASSERT( (io_addr & 0x003F) == 0, io_addr )
- //if Busy bit in Cmd register
- if (IPW( HREG_CMD ) & HCMD_BUSY ) { /* 8 */
- //. Ack all to unblock a (possibly) blocked cmd pipe line
+ HCFASSERT( DO_ASSERT, MERGE_2( HCF_ASSERT, 0xCAF0 ) ); //just to proof that the complete assert machinery is working
+ HCFASSERT( xa != CFG_FW_IDENTITY, 0 ); // assert if hcf_connect is called without intervening hcf_disconnect.
+ HCFASSERT( ((hcf_32)(void*)ifbp & (HCF_ALIGN-1) ) == 0, (hcf_32)(void*)ifbp );
+ HCFASSERT( (io_addr & 0x003F) == 0, io_addr );
+ //if Busy bit in Cmd register
+ if (IPW( HREG_CMD ) & HCMD_BUSY ) { /* 8 */
+ //. Ack all to unblock a (possibly) blocked cmd pipe line
OPW( HREG_EV_ACK, ~HREG_EV_SLEEP_REQ );
- //. Wait for Busy bit drop in Cmd register
- //. Wait for Cmd bit raise in Ev register
+ //. Wait for Busy bit drop in Cmd register
+ //. Wait for Cmd bit raise in Ev register
HCF_WAIT_WHILE( ( IPW( HREG_CMD ) & HCMD_BUSY ) && (IPW( HREG_EV_STAT) & HREG_EV_CMD) == 0 );
- IF_PROT_TIME( HCFASSERT( prot_cnt, IPW( HREG_EV_STAT) ) ) /* if prot_cnt == 0, cmd_exe will fail, causing DEFUNCT */
+ IF_PROT_TIME( HCFASSERT( prot_cnt, IPW( HREG_EV_STAT) ) ); /* if prot_cnt == 0, cmd_exe will fail, causing DEFUNCT */
}
OPW( HREG_EV_ACK, ~HREG_EV_SLEEP_REQ );
-#if ( (HCF_TYPE) & HCF_TYPE_PRELOADED ) == 0 /*12*/
+#if ( (HCF_TYPE) & HCF_TYPE_PRELOADED ) == 0 /*12*/
(void)cmd_exe( ifbp, HCMD_INI, 0 );
#endif // HCF_TYPE_PRELOADED
-if ( io_base != HCF_DISCONNECT ) {
- rc = init( ifbp ); /*14*/
+ if ( io_base != HCF_DISCONNECT ) {
+ rc = init( ifbp ); /*14*/
if ( rc == HCF_SUCCESS ) {
x.len = 2;
x.typ = CFG_NIC_BUS_TYPE;
@@ -1253,10 +1219,10 @@ if ( io_base != HCF_DISCONNECT ) {
//CFG_NIC_BUS_TYPE not supported -> default 32 bits/DMA, MSF has to overrule via CFG_CNTL_OPT
if ( x.len == 0 || x.val[0] == 0x0002 || x.val[0] == 0x0003 ) {
#if (HCF_IO) & HCF_IO_32BITS
- ifbp->IFB_CntlOpt &= ~USE_16BIT; //reset USE_16BIT
+ ifbp->IFB_CntlOpt &= ~USE_16BIT; //reset USE_16BIT
#endif // HCF_IO_32BITS
#if HCF_DMA
- ifbp->IFB_CntlOpt |= USE_DMA; //SET DMA
+ ifbp->IFB_CntlOpt |= USE_DMA; //SET DMA
#else
ifbp->IFB_IORange = 0x40 /*i.s.o. HREG_IO_RANGE*/;
#endif // HCF_DMA
@@ -1264,188 +1230,188 @@ if ( io_base != HCF_DISCONNECT ) {
}
} else HCFASSERT( ( ifbp->IFB_Magic ^= HCF_MAGIC ) == 0, ifbp->IFB_Magic ) /*NOP*/;
/* of above HCFASSERT only the side effect is needed, NOP in case HCFASSERT is dummy */
- ifbp->IFB_IOBase = io_base; /* 0*/
+ ifbp->IFB_IOBase = io_base; /* 0*/
return rc;
} // hcf_connect
#if HCF_DMA
/************************************************************************************************************
-* Function get_frame_lst
-* - resolve the "last host-owned descriptor" problems when a descriptor list is reclaimed by the MSF.
-*
-* The FrameList to be reclaimed as well as the DescriptorList always start in IFB_FirstDesc[tx_rx_flag]
-* and this is always the "current" DELWA Descriptor.
-*
-* If a FrameList is available, the last descriptor of the FrameList to turned into a new DELWA Descriptor:
-* - a copy is made from the information in the last descriptor of the FrameList into the current
-* DELWA Descriptor
-* - the remainder of the DescriptorList is detached from the copy by setting the next_desc_addr at NULL
-* - the DMA control bits of the copy are cleared to do not confuse the MSF
-* - the copy of the last descriptor (i.e. the "old" DELWA Descriptor) is chained to the prev Descriptor
-* of the FrameList, thus replacing the original last Descriptor of the FrameList.
-* - IFB_FirstDesc is changed to the address of that replaced (original) last descriptor of the FrameList,
-* i.e. the "new" DELWA Descriptor.
-*
-* This function makes a copy of that last host-owned descriptor, so the MSF will get a copy of the descriptor.
-* On top of that, it adjusts DMA related fields in the IFB structure.
- // perform a copying-scheme to circumvent the 'last host owned descriptor cannot be reclaimed' limitation imposed by H2.5's DMA hardware design
- // a 'reclaim descriptor' should be available in the HCF:
-*
-* Returns: address of the first descriptor of the FrameList
-*
+ * Function get_frame_lst
+ * - resolve the "last host-owned descriptor" problems when a descriptor list is reclaimed by the MSF.
+ *
+ * The FrameList to be reclaimed as well as the DescriptorList always start in IFB_FirstDesc[tx_rx_flag]
+ * and this is always the "current" DELWA Descriptor.
+ *
+ * If a FrameList is available, the last descriptor of the FrameList to turned into a new DELWA Descriptor:
+ * - a copy is made from the information in the last descriptor of the FrameList into the current
+ * DELWA Descriptor
+ * - the remainder of the DescriptorList is detached from the copy by setting the next_desc_addr at NULL
+ * - the DMA control bits of the copy are cleared to do not confuse the MSF
+ * - the copy of the last descriptor (i.e. the "old" DELWA Descriptor) is chained to the prev Descriptor
+ * of the FrameList, thus replacing the original last Descriptor of the FrameList.
+ * - IFB_FirstDesc is changed to the address of that replaced (original) last descriptor of the FrameList,
+ * i.e. the "new" DELWA Descriptor.
+ *
+ * This function makes a copy of that last host-owned descriptor, so the MSF will get a copy of the descriptor.
+ * On top of that, it adjusts DMA related fields in the IFB structure.
+ // perform a copying-scheme to circumvent the 'last host owned descriptor cannot be reclaimed' limitation imposed by H2.5's DMA hardware design
+ // a 'reclaim descriptor' should be available in the HCF:
+ *
+ * Returns: address of the first descriptor of the FrameList
+ *
8: Be careful once you start re-ordering the steps in the copy process, that it still works for cases
-* of FrameLists of 1, 2 and more than 2 descriptors
-*
-* Input parameters:
-* tx_rx_flag : specifies 'transmit' or 'receive' descriptor.
-*
-************************************************************************************************************/
+ * of FrameLists of 1, 2 and more than 2 descriptors
+ *
+ * Input parameters:
+ * tx_rx_flag : specifies 'transmit' or 'receive' descriptor.
+ *
+ ************************************************************************************************************/
HCF_STATIC DESC_STRCT*
get_frame_lst( IFBP ifbp, int tx_rx_flag )
{
-DESC_STRCT *head = ifbp->IFB_FirstDesc[tx_rx_flag];
-DESC_STRCT *copy, *p, *prev;
+ DESC_STRCT *head = ifbp->IFB_FirstDesc[tx_rx_flag];
+ DESC_STRCT *copy, *p, *prev;
- HCFASSERT( tx_rx_flag == DMA_RX || tx_rx_flag == DMA_TX, tx_rx_flag )
- //if FrameList
+ HCFASSERT( tx_rx_flag == DMA_RX || tx_rx_flag == DMA_TX, tx_rx_flag );
+ //if FrameList
if ( head ) {
- //. search for last descriptor of first FrameList
+ //. search for last descriptor of first FrameList
p = prev = head;
while ( ( p->BUF_SIZE & DESC_EOP ) == 0 && p->next_desc_addr ) {
- if ( ( ifbp->IFB_CntlOpt & DMA_ENABLED ) == 0 ) { //clear control bits when disabled
+ if ( ( ifbp->IFB_CntlOpt & DMA_ENABLED ) == 0 ) { //clear control bits when disabled
p->BUF_CNT &= DESC_CNT_MASK;
}
prev = p;
p = p->next_desc_addr;
}
- //. if DMA enabled
+ //. if DMA enabled
if ( ifbp->IFB_CntlOpt & DMA_ENABLED ) {
- //. . if last descriptor of FrameList is DMA owned
- //. . or if FrameList is single (DELWA) Descriptor
+ //. . if last descriptor of FrameList is DMA owned
+ //. . or if FrameList is single (DELWA) Descriptor
if ( p->BUF_CNT & DESC_DMA_OWNED || head->next_desc_addr == NULL ) {
- //. . . refuse to return FrameList to caller
+ //. . . refuse to return FrameList to caller
head = NULL;
}
}
}
- //if returnable FrameList found
+ //if returnable FrameList found
if ( head ) {
- //. if FrameList is single (DELWA) Descriptor (implies DMA disabled)
- if ( head->next_desc_addr == NULL ) {
- //. . clear DescriptorList
+ //. if FrameList is single (DELWA) Descriptor (implies DMA disabled)
+ if ( head->next_desc_addr == NULL ) {
+ //. . clear DescriptorList
/*;?ifbp->IFB_LastDesc[tx_rx_flag] =*/ ifbp->IFB_FirstDesc[tx_rx_flag] = NULL;
- //. else
+ //. else
} else {
- //. . strip hardware-related bits from last descriptor
- //. . remove DELWA Descriptor from head of DescriptorList
+ //. . strip hardware-related bits from last descriptor
+ //. . remove DELWA Descriptor from head of DescriptorList
copy = head;
- head = head->next_desc_addr;
- //. . exchange first (Confined) and last (possibly imprisoned) Descriptor
+ head = head->next_desc_addr;
+ //. . exchange first (Confined) and last (possibly imprisoned) Descriptor
copy->buf_phys_addr = p->buf_phys_addr;
copy->buf_addr = p->buf_addr;
- copy->BUF_SIZE = p->BUF_SIZE &= DESC_CNT_MASK; //get rid of DESC_EOP and possibly DESC_SOP
- copy->BUF_CNT = p->BUF_CNT &= DESC_CNT_MASK; //get rid of DESC_DMA_OWNED
+ copy->BUF_SIZE = p->BUF_SIZE &= DESC_CNT_MASK; //get rid of DESC_EOP and possibly DESC_SOP
+ copy->BUF_CNT = p->BUF_CNT &= DESC_CNT_MASK; //get rid of DESC_DMA_OWNED
#if (HCF_EXT) & HCF_DESC_STRCT_EXT
copy->DESC_MSFSup = p->DESC_MSFSup;
#endif // HCF_DESC_STRCT_EXT
- //. . turn into a DELWA Descriptor
+ //. . turn into a DELWA Descriptor
p->buf_addr = NULL;
- //. . chain copy to prev /* 8*/
+ //. . chain copy to prev /* 8*/
prev->next_desc_addr = copy;
- //. . detach remainder of the DescriptorList from FrameList
+ //. . detach remainder of the DescriptorList from FrameList
copy->next_desc_addr = NULL;
copy->next_desc_phys_addr = 0xDEAD0000; //! just to be nice, not really needed
- //. . save the new start (i.e. DELWA Descriptor) in IFB_FirstDesc
+ //. . save the new start (i.e. DELWA Descriptor) in IFB_FirstDesc
ifbp->IFB_FirstDesc[tx_rx_flag] = p;
}
- //. strip DESC_SOP from first descriptor
+ //. strip DESC_SOP from first descriptor
head->BUF_SIZE &= DESC_CNT_MASK;
//head->BUF_CNT &= DESC_CNT_MASK; get rid of DESC_DMA_OWNED
head->next_desc_phys_addr = 0xDEAD0000; //! just to be nice, not really needed
}
- //return the just detached FrameList (if any)
+ //return the just detached FrameList (if any)
return head;
} // get_frame_lst
/************************************************************************************************************
-* Function put_frame_lst
-*
-* This function
-*
-* Returns: address of the first descriptor of the FrameList
-*
-* Input parameters:
-* tx_rx_flag : specifies 'transmit' or 'receive' descriptor.
-*
-* The following list should be kept in sync with hcf_dma_tx/rx_put, in order to get them in the WCI-spec !!!!
-* Assert fails if
-* - DMA is not enabled
-* - descriptor list is NULL
-* - a descriptor in the descriptor list is not double word aligned
-* - a count of size field of a descriptor contains control bits, i.e. bits in the high order nibble.
-* - the DELWA descriptor is not a "singleton" DescriptorList.
-* - the DELWA descriptor is not the first Descriptor supplied
-* - a non_DMA descriptor is supplied before the DELWA Descriptor is supplied
-* - Possibly more checks could be added !!!!!!!!!!!!!
-
-*.NOTICE
-* The asserts marked with *sc* are really sanity checks for the HCF, they can (supposedly) not be influenced
-* by incorrect MSF behavior
-
- // The MSF is required to supply the HCF with a single descriptor for MSF tx reclaim purposes.
- // This 'reclaim descriptor' can be recognized by the fact that its buf_addr field is zero.
- *********************************************************************************************
- * Although not required from a hardware perspective:
- * - make each descriptor in this rx-chain DMA-owned.
- * - Also set the count to zero. EOP and SOP bits are also cleared.
- *********************************************************************************************/
+ * Function put_frame_lst
+ *
+ * This function
+ *
+ * Returns: address of the first descriptor of the FrameList
+ *
+ * Input parameters:
+ * tx_rx_flag : specifies 'transmit' or 'receive' descriptor.
+ *
+ * The following list should be kept in sync with hcf_dma_tx/rx_put, in order to get them in the WCI-spec !!!!
+ * Assert fails if
+ * - DMA is not enabled
+ * - descriptor list is NULL
+ * - a descriptor in the descriptor list is not double word aligned
+ * - a count of size field of a descriptor contains control bits, i.e. bits in the high order nibble.
+ * - the DELWA descriptor is not a "singleton" DescriptorList.
+ * - the DELWA descriptor is not the first Descriptor supplied
+ * - a non_DMA descriptor is supplied before the DELWA Descriptor is supplied
+ * - Possibly more checks could be added !!!!!!!!!!!!!
+
+ *.NOTICE
+ * The asserts marked with *sc* are really sanity checks for the HCF, they can (supposedly) not be influenced
+ * by incorrect MSF behavior
+
+ // The MSF is required to supply the HCF with a single descriptor for MSF tx reclaim purposes.
+ // This 'reclaim descriptor' can be recognized by the fact that its buf_addr field is zero.
+ *********************************************************************************************
+ * Although not required from a hardware perspective:
+ * - make each descriptor in this rx-chain DMA-owned.
+ * - Also set the count to zero. EOP and SOP bits are also cleared.
+ *********************************************************************************************/
HCF_STATIC void
put_frame_lst( IFBP ifbp, DESC_STRCT *descp, int tx_rx_flag )
{
- DESC_STRCT *p = descp;
+ DESC_STRCT *p = descp;
hcf_16 port;
- HCFASSERT( ifbp->IFB_CntlOpt & USE_DMA, ifbp->IFB_CntlOpt) //only hcf_dma_tx_put must also be DMA_ENABLED
- HCFASSERT( tx_rx_flag == DMA_RX || tx_rx_flag == DMA_TX, tx_rx_flag )
- HCFASSERT( p , 0 )
+ HCFASSERT( ifbp->IFB_CntlOpt & USE_DMA, ifbp->IFB_CntlOpt); //only hcf_dma_tx_put must also be DMA_ENABLED
+ HCFASSERT( tx_rx_flag == DMA_RX || tx_rx_flag == DMA_TX, tx_rx_flag );
+ HCFASSERT( p , 0 );
while ( p ) {
- HCFASSERT( ((hcf_32)p & 3 ) == 0, (hcf_32)p )
- HCFASSERT( (p->BUF_CNT & ~DESC_CNT_MASK) == 0, p->BUF_CNT )
- HCFASSERT( (p->BUF_SIZE & ~DESC_CNT_MASK) == 0, p->BUF_SIZE )
- p->BUF_SIZE &= DESC_CNT_MASK; //!!this SHOULD be superfluous in case of correct MSF
- p->BUF_CNT &= tx_rx_flag == DMA_RX ? 0 : DESC_CNT_MASK; //!!this SHOULD be superfluous in case of correct MSF
+ HCFASSERT( ((hcf_32)p & 3 ) == 0, (hcf_32)p );
+ HCFASSERT( (p->BUF_CNT & ~DESC_CNT_MASK) == 0, p->BUF_CNT );
+ HCFASSERT( (p->BUF_SIZE & ~DESC_CNT_MASK) == 0, p->BUF_SIZE );
+ p->BUF_SIZE &= DESC_CNT_MASK; //!!this SHOULD be superfluous in case of correct MSF
+ p->BUF_CNT &= tx_rx_flag == DMA_RX ? 0 : DESC_CNT_MASK; //!!this SHOULD be superfluous in case of correct MSF
p->BUF_CNT |= DESC_DMA_OWNED;
if ( p->next_desc_addr ) {
-// HCFASSERT( p->buf_addr && p->buf_phys_addr && p->BUF_SIZE && +/- p->BUF_SIZE, ... )
- HCFASSERT( p->next_desc_addr->desc_phys_addr, (hcf_32)p->next_desc_addr )
+// HCFASSERT( p->buf_addr && p->buf_phys_addr && p->BUF_SIZE && +/- p->BUF_SIZE, ... );
+ HCFASSERT( p->next_desc_addr->desc_phys_addr, (hcf_32)p->next_desc_addr );
p->next_desc_phys_addr = p->next_desc_addr->desc_phys_addr;
- } else { //
+ } else { //
p->next_desc_phys_addr = 0;
- if ( p->buf_addr == NULL ) { // DELWA Descriptor
- HCFASSERT( descp == p, (hcf_32)descp ) //singleton DescriptorList
- HCFASSERT( ifbp->IFB_FirstDesc[tx_rx_flag] == NULL, (hcf_32)ifbp->IFB_FirstDesc[tx_rx_flag])
- HCFASSERT( ifbp->IFB_LastDesc[tx_rx_flag] == NULL, (hcf_32)ifbp->IFB_LastDesc[tx_rx_flag])
+ if ( p->buf_addr == NULL ) { // DELWA Descriptor
+ HCFASSERT( descp == p, (hcf_32)descp ); //singleton DescriptorList
+ HCFASSERT( ifbp->IFB_FirstDesc[tx_rx_flag] == NULL, (hcf_32)ifbp->IFB_FirstDesc[tx_rx_flag]);
+ HCFASSERT( ifbp->IFB_LastDesc[tx_rx_flag] == NULL, (hcf_32)ifbp->IFB_LastDesc[tx_rx_flag]);
descp->BUF_CNT = 0; //&= ~DESC_DMA_OWNED;
ifbp->IFB_FirstDesc[tx_rx_flag] = descp;
// part of alternative ifbp->IFB_LastDesc[tx_rx_flag] = ifbp->IFB_FirstDesc[tx_rx_flag] = descp;
- // if "recycling" a FrameList
- // (e.g. called from hcf_cntl( HCF_CNTL_ENABLE )
- // . prepare for activation DMA controller
+ // if "recycling" a FrameList
+ // (e.g. called from hcf_cntl( HCF_CNTL_ENABLE )
+ // . prepare for activation DMA controller
// part of alternative descp = descp->next_desc_addr;
- } else { //a "real" FrameList, hand it over to the DMA engine
- HCFASSERT( ifbp->IFB_FirstDesc[tx_rx_flag], (hcf_32)descp )
- HCFASSERT( ifbp->IFB_LastDesc[tx_rx_flag], (hcf_32)descp )
+ } else { //a "real" FrameList, hand it over to the DMA engine
+ HCFASSERT( ifbp->IFB_FirstDesc[tx_rx_flag], (hcf_32)descp );
+ HCFASSERT( ifbp->IFB_LastDesc[tx_rx_flag], (hcf_32)descp );
HCFASSERT( ifbp->IFB_LastDesc[tx_rx_flag]->next_desc_addr == NULL,
- (hcf_32)ifbp->IFB_LastDesc[tx_rx_flag]->next_desc_addr)
+ (hcf_32)ifbp->IFB_LastDesc[tx_rx_flag]->next_desc_addr);
// p->buf_cntl.cntl_stat |= DESC_DMA_OWNED;
ifbp->IFB_LastDesc[tx_rx_flag]->next_desc_addr = descp;
ifbp->IFB_LastDesc[tx_rx_flag]->next_desc_phys_addr = descp->desc_phys_addr;
port = HREG_RXDMA_PTR32;
if ( tx_rx_flag ) {
- p->BUF_SIZE |= DESC_EOP; // p points at the last descriptor in the caller-supplied descriptor chain
+ p->BUF_SIZE |= DESC_EOP; // p points at the last descriptor in the caller-supplied descriptor chain
descp->BUF_SIZE |= DESC_SOP;
port = HREG_TXDMA_PTR32;
}
@@ -1459,79 +1425,75 @@ put_frame_lst( IFBP ifbp, DESC_STRCT *descp, int tx_rx_flag )
/************************************************************************************************************
-*
-*.MODULE DESC_STRCT* hcf_dma_rx_get( IFBP ifbp )
-*.PURPOSE decapsulate a message and provides that message to the MSF.
-* reclaim all descriptors in the rx descriptor chain.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS
-* pointer to a FrameList
-*
-*.DESCRIPTION
-* hcf_dma_rx_get is intended to return a received frame when such a frame is deposited in Host memory by the
-* DMA engine. In addition hcf_dma_rx_get can be used to reclaim all descriptors in the rx descriptor chain
-* when the DMA Engine is disabled, e.g. as part of a driver unloading strategy.
-* hcf_dma_rx_get must be called repeatedly by the MSF when hcf_service_nic signals availability of a rx frame
-* through the HREG_EV_RDMAD flag of IFB_DmaPackets. The calling must stop when a NULL pointer is returned, at
-* which time the HREG_EV_RDMAD flag is also cleared by the HCF to arm the mechanism for the next frame
-* reception.
-* Regardless whether the DMA Engine is currently enabled (as controlled via hcf_cntl), if the DMA controller
-* deposited an Rx-frame in the Rx-DescriptorList, this frame is detached from the Rx-DescriptorList,
-* transformed into a FrameList (i.e. updating the housekeeping fields in the descriptors) and returned to the
-* caller.
-* If no such Rx-frame is available in the Rx-DescriptorList, the behavior of hcf_dma_rx_get depends on the
-* status of the DMA Engine.
-* If the DMA Engine is enabled, a NULL pointer is returned.
-* If the DMA Engine is disabled, the following strategy is used:
-* - the complete Rx-DescriptorList is returned. The DELWA Descriptor is not part of the Rx-DescriptorList.
-* - If there is no Rx-DescriptorList, the DELWA Descriptor is returned.
-* - If there is no DELWA Descriptor, a NULL pointer is returned.
-*
-* If the MSF performs an disable/enable sequence without exhausting the Rx-DescriptorList as described above,
-* the enable command will reset all house keeping information, i.e. already received but not yet by the MSF
-* retrieved frames are lost and the next frame will be received starting with the oldest descriptor.
-*
-* The HCF can be used in 2 fashions: with and without decapsulation for data transfer.
-* This is controlled at compile time by the HCF_ENC bit of the HCF_ENCAP system constant.
-* If appropriate, decapsulation is done by moving some data inside the buffers and updating the descriptors
-* accordingly.
-*!! ;?????where did I describe why a simple manipulation with the count values does not suffice?
-*
-*.DIAGRAM
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE DESC_STRCT* hcf_dma_rx_get( IFBP ifbp )
+ *.PURPOSE decapsulate a message and provides that message to the MSF.
+ * reclaim all descriptors in the rx descriptor chain.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS
+ * pointer to a FrameList
+ *
+ *.DESCRIPTION
+ * hcf_dma_rx_get is intended to return a received frame when such a frame is deposited in Host memory by the
+ * DMA engine. In addition hcf_dma_rx_get can be used to reclaim all descriptors in the rx descriptor chain
+ * when the DMA Engine is disabled, e.g. as part of a driver unloading strategy.
+ * hcf_dma_rx_get must be called repeatedly by the MSF when hcf_service_nic signals availability of a rx frame
+ * through the HREG_EV_RDMAD flag of IFB_DmaPackets. The calling must stop when a NULL pointer is returned, at
+ * which time the HREG_EV_RDMAD flag is also cleared by the HCF to arm the mechanism for the next frame
+ * reception.
+ * Regardless whether the DMA Engine is currently enabled (as controlled via hcf_cntl), if the DMA controller
+ * deposited an Rx-frame in the Rx-DescriptorList, this frame is detached from the Rx-DescriptorList,
+ * transformed into a FrameList (i.e. updating the housekeeping fields in the descriptors) and returned to the
+ * caller.
+ * If no such Rx-frame is available in the Rx-DescriptorList, the behavior of hcf_dma_rx_get depends on the
+ * status of the DMA Engine.
+ * If the DMA Engine is enabled, a NULL pointer is returned.
+ * If the DMA Engine is disabled, the following strategy is used:
+ * - the complete Rx-DescriptorList is returned. The DELWA Descriptor is not part of the Rx-DescriptorList.
+ * - If there is no Rx-DescriptorList, the DELWA Descriptor is returned.
+ * - If there is no DELWA Descriptor, a NULL pointer is returned.
+ *
+ * If the MSF performs an disable/enable sequence without exhausting the Rx-DescriptorList as described above,
+ * the enable command will reset all house keeping information, i.e. already received but not yet by the MSF
+ * retrieved frames are lost and the next frame will be received starting with the oldest descriptor.
+ *
+ * The HCF can be used in 2 fashions: with and without decapsulation for data transfer.
+ * This is controlled at compile time by the HCF_ENC bit of the HCF_ENCAP system constant.
+ * If appropriate, decapsulation is done by moving some data inside the buffers and updating the descriptors
+ * accordingly.
+ *!! ;?????where did I describe why a simple manipulation with the count values does not suffice?
+ *
+ *.DIAGRAM
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
DESC_STRCT*
hcf_dma_rx_get (IFBP ifbp)
{
-DESC_STRCT *descp; // pointer to start of FrameList
+ DESC_STRCT *descp; // pointer to start of FrameList
descp = get_frame_lst( ifbp, DMA_RX );
- if ( descp && descp->buf_addr ) //!be aware of the missing curly bracket
+ if ( descp && descp->buf_addr ) {
- //skip decapsulation at confined descriptor
+ //skip decapsulation at confined descriptor
#if (HCF_ENCAP) == HCF_ENC
-#if (HCF_TYPE) & HCF_TYPE_CCX
- if ( ifbp->IFB_CKIPStat == HCF_ACT_CCX_OFF )
-#endif // HCF_TYPE_CCX
- {
-int i;
-DESC_STRCT *p = descp->next_desc_addr; //pointer to 2nd descriptor of frame
- HCFASSERT(p, 0)
+ int i;
+ DESC_STRCT *p = descp->next_desc_addr; //pointer to 2nd descriptor of frame
+ HCFASSERT(p, 0);
// The 2nd descriptor contains (maybe) a SNAP header plus part or whole of the payload.
//determine decapsulation sub-flag in RxFS
i = *(wci_recordp)&descp->buf_addr[HFS_STAT] & ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR );
if ( i == HFS_STAT_TUNNEL ||
- ( i == HFS_STAT_1042 && hcf_encap( (wci_bufp)&p->buf_addr[HCF_DASA_SIZE] ) != ENC_TUNNEL )) {
+ ( i == HFS_STAT_1042 && hcf_encap( (wci_bufp)&p->buf_addr[HCF_DASA_SIZE] ) != ENC_TUNNEL )) {
// The 2nd descriptor contains a SNAP header plus part or whole of the payload.
- HCFASSERT( p->BUF_CNT == (p->buf_addr[5] + (p->buf_addr[4]<<8) + 2*6 + 2 - 8), p->BUF_CNT )
+ HCFASSERT( p->BUF_CNT == (p->buf_addr[5] + (p->buf_addr[4]<<8) + 2*6 + 2 - 8), p->BUF_CNT );
// perform decapsulation
- HCFASSERT(p->BUF_SIZE >=8, p->BUF_SIZE)
+ HCFASSERT(p->BUF_SIZE >=8, p->BUF_SIZE);
// move SA[2:5] in the second buffer to replace part of the SNAP header
for ( i=3; i >= 0; i--) p->buf_addr[i+8] = p->buf_addr[i];
// copy DA[0:5], SA[0:1] from first buffer to second buffer
@@ -1542,532 +1504,494 @@ DESC_STRCT *p = descp->next_desc_addr; //pointer to 2nd descriptor of frame
}
#endif // HCF_ENC
if ( descp == NULL ) ifbp->IFB_DmaPackets &= (hcf_16)~HREG_EV_RDMAD; //;?could be integrated into get_frame_lst
- HCFLOGEXIT( HCF_TRACE_DMA_RX_GET )
+ HCFLOGEXIT( HCF_TRACE_DMA_RX_GET );
return descp;
} // hcf_dma_rx_get
/************************************************************************************************************
-*
-*.MODULE void hcf_dma_rx_put( IFBP ifbp, DESC_STRCT *descp )
-*.PURPOSE supply buffers for receive purposes.
-* supply the Rx-DELWA descriptor.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* descp address of a DescriptorList
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* This function is called by the MSF to supply the HCF with new/more buffers for receive purposes.
-* The HCF can be used in 2 fashions: with and without encapsulation for data transfer.
-* This is controlled at compile time by the HCF_ENC bit of the HCF_ENCAP system constant.
-* As a consequence, some additional constraints apply to the number of descriptor and the buffers associated
-* with the first 2 descriptors. Independent of the encapsulation feature, the COUNT fields are ignored.
-* A special case is the supplying of the DELWA descriptor, which must be supplied as the first descriptor.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value.
-* - NIC interrupts are not disabled while required by parameter action.
-* - in case decapsulation by the HCF is selected:
-* - The first databuffer does not have the exact size corresponding with the RxFS up to the 802.3 DestAddr
-* field (== 29 words).
-* - The FrameList does not consists of at least 2 Descriptors.
-* - The second databuffer does not have the minimum size of 8 bytes.
-*!! The 2nd part of the list of asserts should be kept in sync with put_frame_lst, in order to get
-*!! them in the WCI-spec !!!!
-* - DMA is not enabled
-* - descriptor list is NULL
-* - a descriptor in the descriptor list is not double word aligned
-* - a count of size field of a descriptor contains control bits, i.e. bits in the high order nibble.
-* - the DELWA descriptor is not a "singleton" DescriptorList.
-* - the DELWA descriptor is not the first Descriptor supplied
-* - a non_DMA descriptor is supplied before the DELWA Descriptor is supplied
-*!! - Possibly more checks could be added !!!!!!!!!!!!!
-*
-*.DIAGRAM
-*
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE void hcf_dma_rx_put( IFBP ifbp, DESC_STRCT *descp )
+ *.PURPOSE supply buffers for receive purposes.
+ * supply the Rx-DELWA descriptor.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * descp address of a DescriptorList
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * This function is called by the MSF to supply the HCF with new/more buffers for receive purposes.
+ * The HCF can be used in 2 fashions: with and without encapsulation for data transfer.
+ * This is controlled at compile time by the HCF_ENC bit of the HCF_ENCAP system constant.
+ * As a consequence, some additional constraints apply to the number of descriptor and the buffers associated
+ * with the first 2 descriptors. Independent of the encapsulation feature, the COUNT fields are ignored.
+ * A special case is the supplying of the DELWA descriptor, which must be supplied as the first descriptor.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value.
+ * - NIC interrupts are not disabled while required by parameter action.
+ * - in case decapsulation by the HCF is selected:
+ * - The first databuffer does not have the exact size corresponding with the RxFS up to the 802.3 DestAddr
+ * field (== 29 words).
+ * - The FrameList does not consists of at least 2 Descriptors.
+ * - The second databuffer does not have the minimum size of 8 bytes.
+ *!! The 2nd part of the list of asserts should be kept in sync with put_frame_lst, in order to get
+ *!! them in the WCI-spec !!!!
+ * - DMA is not enabled
+ * - descriptor list is NULL
+ * - a descriptor in the descriptor list is not double word aligned
+ * - a count of size field of a descriptor contains control bits, i.e. bits in the high order nibble.
+ * - the DELWA descriptor is not a "singleton" DescriptorList.
+ * - the DELWA descriptor is not the first Descriptor supplied
+ * - a non_DMA descriptor is supplied before the DELWA Descriptor is supplied
+ *!! - Possibly more checks could be added !!!!!!!!!!!!!
+ *
+ *.DIAGRAM
+ *
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
void
hcf_dma_rx_put( IFBP ifbp, DESC_STRCT *descp )
{
- HCFLOGENTRY( HCF_TRACE_DMA_RX_PUT, 0xDA01 )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
+ HCFLOGENTRY( HCF_TRACE_DMA_RX_PUT, 0xDA01 );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
put_frame_lst( ifbp, descp, DMA_RX );
#if HCF_ASSERT && (HCF_ENCAP) == HCF_ENC
if ( descp->buf_addr ) {
- HCFASSERT( descp->BUF_SIZE == HCF_DMA_RX_BUF1_SIZE, descp->BUF_SIZE )
- HCFASSERT( descp->next_desc_addr, 0 ) // first descriptor should be followed by another descriptor
+ HCFASSERT( descp->BUF_SIZE == HCF_DMA_RX_BUF1_SIZE, descp->BUF_SIZE );
+ HCFASSERT( descp->next_desc_addr, 0 ); // first descriptor should be followed by another descriptor
// The second DB is for SNAP and payload purposes. It should be a minimum of 12 bytes in size.
- HCFASSERT( descp->next_desc_addr->BUF_SIZE >= 12, descp->next_desc_addr->BUF_SIZE )
+ HCFASSERT( descp->next_desc_addr->BUF_SIZE >= 12, descp->next_desc_addr->BUF_SIZE );
}
#endif // HCFASSERT / HCF_ENC
- HCFLOGEXIT( HCF_TRACE_DMA_RX_PUT )
+ HCFLOGEXIT( HCF_TRACE_DMA_RX_PUT );
} // hcf_dma_rx_put
/************************************************************************************************************
-*
-*.MODULE DESC_STRCT* hcf_dma_tx_get( IFBP ifbp )
-*.PURPOSE DMA mode: reclaims and decapsulates packets in the tx descriptor chain if:
-* - A Tx packet has been copied from host-RAM into NIC-RAM by the DMA engine
-* - The Hermes/DMAengine have been disabled
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS
-* pointer to a reclaimed Tx packet.
-*
-*.DESCRIPTION
-* impact of the disable command:
-* When a non-empty pool of Tx descriptors exists (created by means of hcf_dma_put_tx), the MSF
-* is supposed to empty that pool by means of hcf_dma_tx_get calls after the disable in an
-* disable/enable sequence.
-*
-*.DIAGRAM
-*
-*.NOTICE
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE DESC_STRCT* hcf_dma_tx_get( IFBP ifbp )
+ *.PURPOSE DMA mode: reclaims and decapsulates packets in the tx descriptor chain if:
+ * - A Tx packet has been copied from host-RAM into NIC-RAM by the DMA engine
+ * - The Hermes/DMAengine have been disabled
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS
+ * pointer to a reclaimed Tx packet.
+ *
+ *.DESCRIPTION
+ * impact of the disable command:
+ * When a non-empty pool of Tx descriptors exists (created by means of hcf_dma_put_tx), the MSF
+ * is supposed to empty that pool by means of hcf_dma_tx_get calls after the disable in an
+ * disable/enable sequence.
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
DESC_STRCT*
hcf_dma_tx_get( IFBP ifbp )
{
-DESC_STRCT *descp; // pointer to start of FrameList
+ DESC_STRCT *descp; // pointer to start of FrameList
descp = get_frame_lst( ifbp, DMA_TX );
- if ( descp && descp->buf_addr ) //!be aware of the missing curly bracket
- //skip decapsulation at confined descriptor
+ if ( descp && descp->buf_addr ) {
+ //skip decapsulation at confined descriptor
#if (HCF_ENCAP) == HCF_ENC
- if ( ( descp->BUF_CNT == HFS_TYPE )
-#if (HCF_TYPE) & HCF_TYPE_CCX
- || ( descp->BUF_CNT == HFS_DAT )
-#endif // HCF_TYPE_CCX
- ) { // perform decapsulation if needed
+ if ( ( descp->BUF_CNT == HFS_TYPE )) {
+ // perform decapsulation if needed
descp->next_desc_addr->buf_phys_addr -= HCF_DASA_SIZE;
- descp->next_desc_addr->BUF_CNT += HCF_DASA_SIZE;
+ descp->next_desc_addr->BUF_CNT += HCF_DASA_SIZE;
}
#endif // HCF_ENC
+ }
if ( descp == NULL ) { //;?could be integrated into get_frame_lst
ifbp->IFB_DmaPackets &= (hcf_16)~HREG_EV_TDMAD;
}
- HCFLOGEXIT( HCF_TRACE_DMA_TX_GET )
+ HCFLOGEXIT( HCF_TRACE_DMA_TX_GET );
return descp;
} // hcf_dma_tx_get
/************************************************************************************************************
-*
-*.MODULE void hcf_dma_tx_put( IFBP ifbp, DESC_STRCT *descp, hcf_16 tx_cntl )
-*.PURPOSE puts a packet in the Tx DMA queue in host ram and kicks off the TxDma engine.
-* supply the Tx-DELWA descriptor.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* descp address of Tx Descriptor Chain (i.e. a single Tx frame)
-* tx_cntl indicates MAC-port and (Hermes) options
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* The HCF can be used in 2 fashions: with and without encapsulation for data transfer.
-* This is controlled at compile time by the HCF_ENC bit of the HCF_ENCAP system constant.
-*
-* Regardless of the HCF_ENCAP system constant, the descriptor list created to describe the frame to be
-* transmitted, must supply space to contain the 802.11 header, preceding the actual frame to be transmitted.
-* Basically, this only supplies working storage to the HCF which passes this on to the DMA engine.
-* As a consequence the contents of this space do not matter.
-* Nevertheless BUF_CNT must take in account this storage.
-* This working space to contain the 802.11 header may not be fragmented, the first buffer must be
-* sufficiently large to contain at least the 802.11 header, i.e. HFS_ADDR_DEST (29 words or 0x3A bytes).
-* This way, the HCF can simply, regardless whether or not the HCF encapsulates the frame, write the parameter
-* tx_cntl at offset 0x36 (HFS_TX_CNTL) in the first buffer.
-* Note that it is allowed to have part or all of the actual frame represented by the first descriptor as long
-* as the requirement for storage for the 802.11 header is met, i.e. the 802.3 frame starts at offset
-* HFS_ADDR_DEST.
-* Except for the Assert on the 1st buffer in case of Encapsualtion, the SIZE fields are ignored.
-*
-* In case the encapsulation feature is compiled in, there are the following additional requirements.
-* o The BUF_CNT of the first buffer changes from a minimum of 0x3A bytes to exactly 0x3A, i.e. the workspace
-* to store the 802.11 header
-* o The BUF_SIZE of the first buffer is at least the space needed to store the
-* - 802.11 header (29 words)
-* - 802.3 header, i.e. 12 bytes addressing information and 2 bytes length field
-* - 6 bytes SNAP-header
-* This results in 39 words or 0x4E bytes or HFS_TYPE.
-* Note that if the BUF_SIZE is larger than 0x4E, this surplus is not used.
-* o The actual frame begins in the 2nd descriptor (which is already implied by the BUF_CNT == 0x3A requirement) and the associated buffer contains at least the 802.3 header, i.e. the 14 bytes representing addressing information and length/type field
-*
-* When the HCF does not encapsulates (i.e. length/type field <= 1500), no changes are made to descriptors
-* or buffers.
-*
-* When the HCF actually encapsulates (i.e. length/type field > 1500), it successively writes, starting at
-* offset HFS_ADDR_DEST (0x3A) in the first buffer:
-* - the 802.3 addressing information, copied from the begin of the second buffer
-* - the frame length, derived from the total length of the individual fragments, corrected for the SNAP
-* header length and Type field and ignoring the Destination Address, Source Address and Length field
-* - the appropriate snap header (Tunnel or 1042, depending on the value of the type field).
-*
-* The information in the first two descriptors is adjusted accordingly:
-* - the first descriptor count is changed from 0x3A to 0x4E (HFS_TYPE), which matches 0x3A + 12 + 2 + 6
-* - the second descriptor count is decreased by 12, being the moved addressing information
-* - the second descriptor (physical) buffer address is increased by 12.
-*
-* When the descriptors are returned by hcf_dma_tx_get, the transformation of the first two descriptors is
-* undone.
-*
-* Under any of the above scenarios, the assert BUF_CNT <= BUF_SIZE must be true for all descriptors
-* In case of encapsulation, BUF_SIZE of the 1st descriptor is asserted to be at least HFS_TYPE (0x4E), so it is NOT tested.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value.
-* - tx_cntl has a recognizable out-of-range value.
-* - NIC interrupts are not disabled while required by parameter action.
-* - in case encapsulation by the HCF is selected:
-* - The FrameList does not consists of at least 2 Descriptors.
-* - The first databuffer does not contain exactly the (space for) the 802.11 header (== 28 words)
-* - The first databuffer does not have a size to additionally accommodate the 802.3 header and the
-* SNAP header of the frame after encapsulation (== 39 words).
-* - The second databuffer does not contain at least DA, SA and 'type/length' (==14 bytes or 7 words)
-*!! The 2nd part of the list of asserts should be kept in sync with put_frame_lst, in order to get
-*!! them in the WCI-spec !!!!
-* - DMA is not enabled
-* - descriptor list is NULL
-* - a descriptor in the descriptor list is not double word aligned
-* - a count of size field of a descriptor contains control bits, i.e. bits in the high order nibble.
-* - the DELWA descriptor is not a "singleton" DescriptorList.
-* - the DELWA descriptor is not the first Descriptor supplied
-* - a non_DMA descriptor is supplied before the DELWA Descriptor is supplied
-*!! - Possibly more checks could be added !!!!!!!!!!!!!
-*.DIAGRAM
-*
-*.NOTICE
-*
-*.ENDDOC END DOCUMENTATION
-*
-*
-*1: Write tx_cntl parameter to HFS_TX_CNTL field into the Hermes-specific header in buffer 1
-*4: determine whether encapsulation is needed and write the type (tunnel or 1042) already at the appropriate
-* offset in the 1st buffer
-*6: Build the encapsualtion enveloppe in the free space at the end of the 1st buffer
-* - Copy DA/SA fields from the 2nd buffer
-* - Calculate total length of the message (snap-header + type-field + the length of all buffer fragments
-* associated with the 802.3 frame (i.e all descriptors except the first), but not the DestinationAddress,
-* SourceAddress and length-field)
-* Assert the message length
-* Write length. Note that the message is in BE format, hence on LE platforms the length must be converted
-* ;? THIS IS NOT WHAT CURRENTLY IS IMPLEMENTED
-* - Write snap header. Note that the last byte of the snap header is NOT copied, that byte is already in
-* place as result of the call to hcf_encap.
-* Note that there are many ways to skin a cat. To express the offsets in the 1st buffer while writing
-* the snap header, HFS_TYPE is chosen as a reference point to make it easier to grasp that the snap header
-* and encapsualtion type are at least relative in the right.
-*8: modify 1st descriptor to reflect moved part of the 802.3 header + Snap-header
-* modify 2nd descriptor to skip the moved part of the 802.3 header (DA/SA
-*10: set each descriptor to 'DMA owned', clear all other control bits.
-* Set SOP bit on first descriptor. Set EOP bit on last descriptor.
-*12: Either append the current frame to an existing descriptor list or
-*14: create a list beginning with the current frame
-*16: remember the new end of the list
-*20: hand the frame over to the DMA engine
-************************************************************************************************************/
+ *
+ *.MODULE void hcf_dma_tx_put( IFBP ifbp, DESC_STRCT *descp, hcf_16 tx_cntl )
+ *.PURPOSE puts a packet in the Tx DMA queue in host ram and kicks off the TxDma engine.
+ * supply the Tx-DELWA descriptor.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * descp address of Tx Descriptor Chain (i.e. a single Tx frame)
+ * tx_cntl indicates MAC-port and (Hermes) options
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * The HCF can be used in 2 fashions: with and without encapsulation for data transfer.
+ * This is controlled at compile time by the HCF_ENC bit of the HCF_ENCAP system constant.
+ *
+ * Regardless of the HCF_ENCAP system constant, the descriptor list created to describe the frame to be
+ * transmitted, must supply space to contain the 802.11 header, preceding the actual frame to be transmitted.
+ * Basically, this only supplies working storage to the HCF which passes this on to the DMA engine.
+ * As a consequence the contents of this space do not matter.
+ * Nevertheless BUF_CNT must take in account this storage.
+ * This working space to contain the 802.11 header may not be fragmented, the first buffer must be
+ * sufficiently large to contain at least the 802.11 header, i.e. HFS_ADDR_DEST (29 words or 0x3A bytes).
+ * This way, the HCF can simply, regardless whether or not the HCF encapsulates the frame, write the parameter
+ * tx_cntl at offset 0x36 (HFS_TX_CNTL) in the first buffer.
+ * Note that it is allowed to have part or all of the actual frame represented by the first descriptor as long
+ * as the requirement for storage for the 802.11 header is met, i.e. the 802.3 frame starts at offset
+ * HFS_ADDR_DEST.
+ * Except for the Assert on the 1st buffer in case of Encapsualtion, the SIZE fields are ignored.
+ *
+ * In case the encapsulation feature is compiled in, there are the following additional requirements.
+ * o The BUF_CNT of the first buffer changes from a minimum of 0x3A bytes to exactly 0x3A, i.e. the workspace
+ * to store the 802.11 header
+ * o The BUF_SIZE of the first buffer is at least the space needed to store the
+ * - 802.11 header (29 words)
+ * - 802.3 header, i.e. 12 bytes addressing information and 2 bytes length field
+ * - 6 bytes SNAP-header
+ * This results in 39 words or 0x4E bytes or HFS_TYPE.
+ * Note that if the BUF_SIZE is larger than 0x4E, this surplus is not used.
+ * o The actual frame begins in the 2nd descriptor (which is already implied by the BUF_CNT == 0x3A requirement) and the associated buffer contains at least the 802.3 header, i.e. the 14 bytes representing addressing information and length/type field
+ *
+ * When the HCF does not encapsulates (i.e. length/type field <= 1500), no changes are made to descriptors
+ * or buffers.
+ *
+ * When the HCF actually encapsulates (i.e. length/type field > 1500), it successively writes, starting at
+ * offset HFS_ADDR_DEST (0x3A) in the first buffer:
+ * - the 802.3 addressing information, copied from the begin of the second buffer
+ * - the frame length, derived from the total length of the individual fragments, corrected for the SNAP
+ * header length and Type field and ignoring the Destination Address, Source Address and Length field
+ * - the appropriate snap header (Tunnel or 1042, depending on the value of the type field).
+ *
+ * The information in the first two descriptors is adjusted accordingly:
+ * - the first descriptor count is changed from 0x3A to 0x4E (HFS_TYPE), which matches 0x3A + 12 + 2 + 6
+ * - the second descriptor count is decreased by 12, being the moved addressing information
+ * - the second descriptor (physical) buffer address is increased by 12.
+ *
+ * When the descriptors are returned by hcf_dma_tx_get, the transformation of the first two descriptors is
+ * undone.
+ *
+ * Under any of the above scenarios, the assert BUF_CNT <= BUF_SIZE must be true for all descriptors
+ * In case of encapsulation, BUF_SIZE of the 1st descriptor is asserted to be at least HFS_TYPE (0x4E), so it is NOT tested.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value.
+ * - tx_cntl has a recognizable out-of-range value.
+ * - NIC interrupts are not disabled while required by parameter action.
+ * - in case encapsulation by the HCF is selected:
+ * - The FrameList does not consists of at least 2 Descriptors.
+ * - The first databuffer does not contain exactly the (space for) the 802.11 header (== 28 words)
+ * - The first databuffer does not have a size to additionally accommodate the 802.3 header and the
+ * SNAP header of the frame after encapsulation (== 39 words).
+ * - The second databuffer does not contain at least DA, SA and 'type/length' (==14 bytes or 7 words)
+ *!! The 2nd part of the list of asserts should be kept in sync with put_frame_lst, in order to get
+ *!! them in the WCI-spec !!!!
+ * - DMA is not enabled
+ * - descriptor list is NULL
+ * - a descriptor in the descriptor list is not double word aligned
+ * - a count of size field of a descriptor contains control bits, i.e. bits in the high order nibble.
+ * - the DELWA descriptor is not a "singleton" DescriptorList.
+ * - the DELWA descriptor is not the first Descriptor supplied
+ * - a non_DMA descriptor is supplied before the DELWA Descriptor is supplied
+ *!! - Possibly more checks could be added !!!!!!!!!!!!!
+ *.DIAGRAM
+ *
+ *.NOTICE
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ *
+ *1: Write tx_cntl parameter to HFS_TX_CNTL field into the Hermes-specific header in buffer 1
+ *4: determine whether encapsulation is needed and write the type (tunnel or 1042) already at the appropriate
+ * offset in the 1st buffer
+ *6: Build the encapsualtion enveloppe in the free space at the end of the 1st buffer
+ * - Copy DA/SA fields from the 2nd buffer
+ * - Calculate total length of the message (snap-header + type-field + the length of all buffer fragments
+ * associated with the 802.3 frame (i.e all descriptors except the first), but not the DestinationAddress,
+ * SourceAddress and length-field)
+ * Assert the message length
+ * Write length. Note that the message is in BE format, hence on LE platforms the length must be converted
+ * ;? THIS IS NOT WHAT CURRENTLY IS IMPLEMENTED
+ * - Write snap header. Note that the last byte of the snap header is NOT copied, that byte is already in
+ * place as result of the call to hcf_encap.
+ * Note that there are many ways to skin a cat. To express the offsets in the 1st buffer while writing
+ * the snap header, HFS_TYPE is chosen as a reference point to make it easier to grasp that the snap header
+ * and encapsualtion type are at least relative in the right.
+ *8: modify 1st descriptor to reflect moved part of the 802.3 header + Snap-header
+ * modify 2nd descriptor to skip the moved part of the 802.3 header (DA/SA
+ *10: set each descriptor to 'DMA owned', clear all other control bits.
+ * Set SOP bit on first descriptor. Set EOP bit on last descriptor.
+ *12: Either append the current frame to an existing descriptor list or
+ *14: create a list beginning with the current frame
+ *16: remember the new end of the list
+ *20: hand the frame over to the DMA engine
+ ************************************************************************************************************/
void
hcf_dma_tx_put( IFBP ifbp, DESC_STRCT *descp, hcf_16 tx_cntl )
{
-DESC_STRCT *p = descp->next_desc_addr;
-int i;
+ DESC_STRCT *p = descp->next_desc_addr;
+ int i;
#if HCF_ASSERT
int x = ifbp->IFB_FWIdentity.comp_id == COMP_ID_FW_AP ? tx_cntl & ~HFS_TX_CNTL_PORT : tx_cntl;
- HCFASSERT( (x & ~HCF_TX_CNTL_MASK ) == 0, tx_cntl )
+ HCFASSERT( (x & ~HCF_TX_CNTL_MASK ) == 0, tx_cntl );
#endif // HCF_ASSERT
- HCFLOGENTRY( HCF_TRACE_DMA_TX_PUT, 0xDA03 )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
- HCFASSERT( ( ifbp->IFB_CntlOpt & (USE_DMA|DMA_ENABLED) ) == (USE_DMA|DMA_ENABLED), ifbp->IFB_CntlOpt)
+ HCFLOGENTRY( HCF_TRACE_DMA_TX_PUT, 0xDA03 );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
+ HCFASSERT( ( ifbp->IFB_CntlOpt & (USE_DMA|DMA_ENABLED) ) == (USE_DMA|DMA_ENABLED), ifbp->IFB_CntlOpt);
if ( descp->buf_addr ) {
- *(hcf_16*)(descp->buf_addr + HFS_TX_CNTL) = tx_cntl; /*1*/
+ *(hcf_16*)(descp->buf_addr + HFS_TX_CNTL) = tx_cntl; /*1*/
#if (HCF_ENCAP) == HCF_ENC
- HCFASSERT( descp->next_desc_addr, 0 ) //at least 2 descripors
- HCFASSERT( descp->BUF_CNT == HFS_ADDR_DEST, descp->BUF_CNT ) //exact length required for 1st buffer
- HCFASSERT( descp->BUF_SIZE >= HCF_DMA_TX_BUF1_SIZE, descp->BUF_SIZE ) //minimal storage for encapsulation
- HCFASSERT( p->BUF_CNT >= 14, p->BUF_CNT ); //at least DA, SA and 'type' in 2nd buffer
-
-#if (HCF_TYPE) & HCF_TYPE_CCX
- /* if we are doing PPK +/- CMIC, or we are sending a DDP frame */
- if ( ( ifbp->IFB_CKIPStat == HCF_ACT_CCX_ON ) ||
- ( ( p->BUF_CNT >= 20 ) && ( ifbp->IFB_CKIPStat == HCF_ACT_CCX_OFF ) &&
- ( p->buf_addr[12] == 0xAA ) && ( p->buf_addr[13] == 0xAA ) &&
- ( p->buf_addr[14] == 0x03 ) && ( p->buf_addr[15] == 0x00 ) &&
- ( p->buf_addr[16] == 0x40 ) && ( p->buf_addr[17] == 0x96 ) &&
- ( p->buf_addr[18] == 0x00 ) && ( p->buf_addr[19] == 0x00 )))
- {
- /* copy the DA/SA to the first buffer */
- for ( i = 0; i < HCF_DASA_SIZE; i++ ) {
- descp->buf_addr[i + HFS_ADDR_DEST] = p->buf_addr[i];
+ HCFASSERT( descp->next_desc_addr, 0 ); //at least 2 descripors
+ HCFASSERT( descp->BUF_CNT == HFS_ADDR_DEST, descp->BUF_CNT ); //exact length required for 1st buffer
+ HCFASSERT( descp->BUF_SIZE >= HCF_DMA_TX_BUF1_SIZE, descp->BUF_SIZE ); //minimal storage for encapsulation
+ HCFASSERT( p->BUF_CNT >= 14, p->BUF_CNT ); //at least DA, SA and 'type' in 2nd buffer
+
+ descp->buf_addr[HFS_TYPE-1] = hcf_encap(&descp->next_desc_addr->buf_addr[HCF_DASA_SIZE]); /*4*/
+ if ( descp->buf_addr[HFS_TYPE-1] != ENC_NONE ) {
+ for ( i=0; i < HCF_DASA_SIZE; i++ ) { /*6*/
+ descp->buf_addr[i + HFS_ADDR_DEST] = descp->next_desc_addr->buf_addr[i];
}
- /* calculate the length of the second fragment only */
- i = 0;
- do { i += p->BUF_CNT; } while( p = p->next_desc_addr );
- i -= HCF_DASA_SIZE ;
- /* convert the length field to big endian, using the endian friendly macros */
- i = CNV_SHORT_TO_BIG(i); //!! this converts ONLY on LE platforms, how does that relate to the non-CCX code
- *(hcf_16*)(&descp->buf_addr[HFS_LEN]) = (hcf_16)i;
- descp->BUF_CNT = HFS_DAT;
- // modify 2nd descriptor to skip the 'Da/Sa' fields
- descp->next_desc_addr->buf_phys_addr += HCF_DASA_SIZE;
- descp->next_desc_addr->BUF_CNT -= HCF_DASA_SIZE;
- }
- else
-#endif // HCF_TYPE_CCX
- {
- descp->buf_addr[HFS_TYPE-1] = hcf_encap(&descp->next_desc_addr->buf_addr[HCF_DASA_SIZE]); /*4*/
- if ( descp->buf_addr[HFS_TYPE-1] != ENC_NONE ) {
- for ( i=0; i < HCF_DASA_SIZE; i++ ) { /*6*/
- descp->buf_addr[i + HFS_ADDR_DEST] = descp->next_desc_addr->buf_addr[i];
- }
- i = sizeof(snap_header) + 2 - ( 2*6 + 2 );
- do { i += p->BUF_CNT; } while ( ( p = p->next_desc_addr ) != NULL );
- *(hcf_16*)(&descp->buf_addr[HFS_LEN]) = CNV_END_SHORT(i); //!! this converts on ALL platforms, how does that relate to the CCX code
- for ( i=0; i < sizeof(snap_header) - 1; i++) {
- descp->buf_addr[HFS_TYPE - sizeof(snap_header) + i] = snap_header[i];
- }
- descp->BUF_CNT = HFS_TYPE; /*8*/
- descp->next_desc_addr->buf_phys_addr += HCF_DASA_SIZE;
- descp->next_desc_addr->BUF_CNT -= HCF_DASA_SIZE;
+ i = sizeof(snap_header) + 2 - ( 2*6 + 2 );
+ do { i += p->BUF_CNT; } while ( ( p = p->next_desc_addr ) != NULL );
+ *(hcf_16*)(&descp->buf_addr[HFS_LEN]) = CNV_END_SHORT(i); //!! this converts on ALL platforms, how does that relate to the CCX code
+ for ( i=0; i < sizeof(snap_header) - 1; i++) {
+ descp->buf_addr[HFS_TYPE - sizeof(snap_header) + i] = snap_header[i];
}
+ descp->BUF_CNT = HFS_TYPE; /*8*/
+ descp->next_desc_addr->buf_phys_addr += HCF_DASA_SIZE;
+ descp->next_desc_addr->BUF_CNT -= HCF_DASA_SIZE;
}
#endif // HCF_ENC
- }
+ }
put_frame_lst( ifbp, descp, DMA_TX );
- HCFLOGEXIT( HCF_TRACE_DMA_TX_PUT )
+ HCFLOGEXIT( HCF_TRACE_DMA_TX_PUT );
} // hcf_dma_tx_put
#endif // HCF_DMA
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.MODULE hcf_8 hcf_encap( wci_bufp type )
-*.PURPOSE test whether RFC1042 or Bridge-Tunnel encapsulation is needed.
-*
-*.ARGUMENTS
-* type (Far) pointer to the (Big Endian) Type/Length field in the message
-*
-*.RETURNS
-* ENC_NONE len/type is "len" ( (BIG_ENDIAN)type <= 1500 )
-* ENC_TUNNEL len/type is "type" and 0x80F3 or 0x8137
-* ENC_1042 len/type is "type" but not 0x80F3 or 0x8137
-*
-*.CONDITIONS
-* NIC Interrupts d.c
-*
-*.DESCRIPTION
-* Type must point to the Len/Type field of the message, this is the 2-byte field immediately after the 6 byte
-* Destination Address and 6 byte Source Address. The 2 successive bytes addressed by type are interpreted as
-* a Big Endian value. If that value is less than or equal to 1500, the message is assumed to be in 802.3
-* format. Otherwise the message is assumed to be in Ethernet-II format. Depending on the value of Len/Typ,
-* Bridge Tunnel or RFC1042 encapsulation is needed.
-*
-*.DIAGRAM
-*
-* 1: presume 802.3, hence preset return value at ENC_NONE
-* 2: convert type from "network" Endian format to native Endian
-* 4: the litmus test to distinguish type and len.
-* The hard code "magic" value of 1500 is intentional and should NOT be replaced by a mnemonic because it is
-* not related at all to the maximum frame size supported by the Hermes.
-* 6: check type against:
-* 0x80F3 //AppleTalk Address Resolution Protocol (AARP)
-* 0x8137 //IPX
-* to determine the type of encapsulation
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
-#if HCF_ENCAP //i.e HCF_ENC or HCF_ENC_SUP
-#if ! ( (HCF_ENCAP) & HCF_ENC_SUP )
-HCF_STATIC
-#endif // HCF_ENCAP
-hcf_8
+ *
+ *.MODULE hcf_8 hcf_encap( wci_bufp type )
+ *.PURPOSE test whether RFC1042 or Bridge-Tunnel encapsulation is needed.
+ *
+ *.ARGUMENTS
+ * type (Far) pointer to the (Big Endian) Type/Length field in the message
+ *
+ *.RETURNS
+ * ENC_NONE len/type is "len" ( (BIG_ENDIAN)type <= 1500 )
+ * ENC_TUNNEL len/type is "type" and 0x80F3 or 0x8137
+ * ENC_1042 len/type is "type" but not 0x80F3 or 0x8137
+ *
+ *.CONDITIONS
+ * NIC Interrupts d.c
+ *
+ *.DESCRIPTION
+ * Type must point to the Len/Type field of the message, this is the 2-byte field immediately after the 6 byte
+ * Destination Address and 6 byte Source Address. The 2 successive bytes addressed by type are interpreted as
+ * a Big Endian value. If that value is less than or equal to 1500, the message is assumed to be in 802.3
+ * format. Otherwise the message is assumed to be in Ethernet-II format. Depending on the value of Len/Typ,
+ * Bridge Tunnel or RFC1042 encapsulation is needed.
+ *
+ *.DIAGRAM
+ *
+ * 1: presume 802.3, hence preset return value at ENC_NONE
+ * 2: convert type from "network" Endian format to native Endian
+ * 4: the litmus test to distinguish type and len.
+ * The hard code "magic" value of 1500 is intentional and should NOT be replaced by a mnemonic because it is
+ * not related at all to the maximum frame size supported by the Hermes.
+ * 6: check type against:
+ * 0x80F3 //AppleTalk Address Resolution Protocol (AARP)
+ * 0x8137 //IPX
+ * to determine the type of encapsulation
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
+HCF_STATIC hcf_8
hcf_encap( wci_bufp type )
{
-hcf_8 rc = ENC_NONE; /* 1 */
-hcf_16 t = (hcf_16)(*type<<8) + *(type+1); /* 2 */
+ hcf_8 rc = ENC_NONE; /* 1 */
+ hcf_16 t = (hcf_16)(*type<<8) + *(type+1); /* 2 */
- if ( t > 1500 ) { /* 4 */
+ if ( t > 1500 ) { /* 4 */
if ( t == 0x8137 || t == 0x80F3 ) {
- rc = ENC_TUNNEL; /* 6 */
+ rc = ENC_TUNNEL; /* 6 */
} else {
rc = ENC_1042;
}
}
return rc;
} // hcf_encap
-#endif // HCF_ENCAP
-#endif // HCF_DL_ONLY
/************************************************************************************************************
-*
-*.MODULE int hcf_get_info( IFBP ifbp, LTVP ltvp )
-*.PURPOSE Obtains transient and persistent configuration information from the Card and from the HCF.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* ltvp address of LengthTypeValue structure specifying the "what" and the "how much" of the
-* information to be collected from the HCF or from the Hermes
-*
-*.RETURNS
-* HCF_ERR_LEN The provided buffer was too small
-* HCF_SUCCESS Success
-*!! via cmd_exe ( type >= CFG_RID_FW_MIN )
-* HCF_ERR_NO_NIC NIC removed during retrieval
-* HCF_ERR_TIME_OUT Expected Hermes event did not occur in expected time
-*!! via cmd_exe and setup_bap (type >= CFG_RID_FW_MIN )
-* HCF_ERR_DEFUNCT_... HCF is in defunct mode (bits 0x7F reflect cause)
-*
-*.DESCRIPTION
-* The T-field of the LTV-record (provided by the MSF in parameter ltvp) specifies the RID wanted. The RID
-* information identified by the T-field is copied into the V-field.
-* On entry, the L-field specifies the size of the buffer, also called the "Initial DataLength". The L-value
-* includes the size of the T-field, but not the size of the L-field itself.
-* On return, the L-field indicates the number of words actually contained by the Type and Value fields.
-* As the size of the Type field in the LTV-record is included in the "Initial DataLength" of the record, the
-* V-field can contain at most "Initial DataLength" - 1 words of data.
-* Copying stops if either the complete Information is copied or if the number of words indicated by the
-* "Initial DataLength" were copied. The "Initial DataLength" acts as a safe guard against Configuration
-* Information blocks that have different sizes for different F/W versions, e.g. when later versions support
-* more tallies than earlier versions.
-* If the size of Value field of the RID exceeds the size of the "Initial DataLength" -1, as much data
-* as fits is copied, and an error status of HCF_ERR_LEN is returned.
-*
-* It is the responsibility of the MSF to detect card removal and re-insertion and not call the HCF when the
-* NIC is absent. The MSF cannot, however, timely detect a Card removal if the Card is removed while
-* hcf_get_info is in progress. Therefore, the HCF performs its own check on Card presence after the read
-* operation of the NIC data. If the Card is not present or removed during the execution of hcf_get_info,
-* HCF_ERR_NO_NIC is returned and the content of the Data Buffer is unpredictable. This check is not performed
-* in case of the "HCF embedded" pseudo RIDs like CFG_TALLIES.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value.
-* - reentrancy, may be caused by calling hcf_functions without adequate protection
-* against NIC interrupts or multi-threading.
-* - ltvp is a NULL pointer.
-* - length field of the LTV-record at entry is 0 or 1 or has an excessive value (i.e. exceeds HCF_MAX_LTV).
-* - type field of the LTV-record is invalid.
-*
-*.DIAGRAM
-* Hcf_get_mb_info copies the contents of the oldest MailBox Info block in the MailBox to PC RAM. If len is
-* less than the size of the MailBox Info block, only as much as fits in the PC RAM buffer is copied. After
-* the copying the MailBox Read pointer is updated to point to the next MailBox Info block, hence the
-* remainder of an "oversized" MailBox Info block is lost. The truncation of the MailBox Info block is NOT
-* reflected in the return status. Note that hcf_get_info guarantees the length of the PC RAM buffer meets
-* the minimum requirements of at least 2, so no PC RAM buffer overrun.
-*
-* Calling hcf_get_mb_info when their is no MailBox Info block available or when there is no MailBox at all,
-* results in a "NULL" MailBox Info block.
-*
-*12: see NOTICE
-*17: The return status of cmd_wait and the first hcfio_in_string can be ignored, because when one fails, the
-* other fails via the IFB_DefunctStat mechanism
-*20: "HCFASSERT( rc == HCF_SUCCESS, rc )" is not suitable because this will always trigger as side effect of
-* the HCFASSERT in hcf_put_info which calls hcf_get_info to figure out whether the RID exists at all.
+ *
+ *.MODULE int hcf_get_info( IFBP ifbp, LTVP ltvp )
+ *.PURPOSE Obtains transient and persistent configuration information from the Card and from the HCF.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * ltvp address of LengthTypeValue structure specifying the "what" and the "how much" of the
+ * information to be collected from the HCF or from the Hermes
+ *
+ *.RETURNS
+ * HCF_ERR_LEN The provided buffer was too small
+ * HCF_SUCCESS Success
+ *!! via cmd_exe ( type >= CFG_RID_FW_MIN )
+ * HCF_ERR_NO_NIC NIC removed during retrieval
+ * HCF_ERR_TIME_OUT Expected Hermes event did not occur in expected time
+ *!! via cmd_exe and setup_bap (type >= CFG_RID_FW_MIN )
+ * HCF_ERR_DEFUNCT_... HCF is in defunct mode (bits 0x7F reflect cause)
+ *
+ *.DESCRIPTION
+ * The T-field of the LTV-record (provided by the MSF in parameter ltvp) specifies the RID wanted. The RID
+ * information identified by the T-field is copied into the V-field.
+ * On entry, the L-field specifies the size of the buffer, also called the "Initial DataLength". The L-value
+ * includes the size of the T-field, but not the size of the L-field itself.
+ * On return, the L-field indicates the number of words actually contained by the Type and Value fields.
+ * As the size of the Type field in the LTV-record is included in the "Initial DataLength" of the record, the
+ * V-field can contain at most "Initial DataLength" - 1 words of data.
+ * Copying stops if either the complete Information is copied or if the number of words indicated by the
+ * "Initial DataLength" were copied. The "Initial DataLength" acts as a safe guard against Configuration
+ * Information blocks that have different sizes for different F/W versions, e.g. when later versions support
+ * more tallies than earlier versions.
+ * If the size of Value field of the RID exceeds the size of the "Initial DataLength" -1, as much data
+ * as fits is copied, and an error status of HCF_ERR_LEN is returned.
+ *
+ * It is the responsibility of the MSF to detect card removal and re-insertion and not call the HCF when the
+ * NIC is absent. The MSF cannot, however, timely detect a Card removal if the Card is removed while
+ * hcf_get_info is in progress. Therefore, the HCF performs its own check on Card presence after the read
+ * operation of the NIC data. If the Card is not present or removed during the execution of hcf_get_info,
+ * HCF_ERR_NO_NIC is returned and the content of the Data Buffer is unpredictable. This check is not performed
+ * in case of the "HCF embedded" pseudo RIDs like CFG_TALLIES.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value.
+ * - reentrancy, may be caused by calling hcf_functions without adequate protection
+ * against NIC interrupts or multi-threading.
+ * - ltvp is a NULL pointer.
+ * - length field of the LTV-record at entry is 0 or 1 or has an excessive value (i.e. exceeds HCF_MAX_LTV).
+ * - type field of the LTV-record is invalid.
+ *
+ *.DIAGRAM
+ * Hcf_get_mb_info copies the contents of the oldest MailBox Info block in the MailBox to PC RAM. If len is
+ * less than the size of the MailBox Info block, only as much as fits in the PC RAM buffer is copied. After
+ * the copying the MailBox Read pointer is updated to point to the next MailBox Info block, hence the
+ * remainder of an "oversized" MailBox Info block is lost. The truncation of the MailBox Info block is NOT
+ * reflected in the return status. Note that hcf_get_info guarantees the length of the PC RAM buffer meets
+ * the minimum requirements of at least 2, so no PC RAM buffer overrun.
+ *
+ * Calling hcf_get_mb_info when their is no MailBox Info block available or when there is no MailBox at all,
+ * results in a "NULL" MailBox Info block.
+ *
+ *12: see NOTICE
+ *17: The return status of cmd_wait and the first hcfio_in_string can be ignored, because when one fails, the
+ * other fails via the IFB_DefunctStat mechanism
+ *20: "HCFASSERT( rc == HCF_SUCCESS, rc )" is not suitable because this will always trigger as side effect of
+ * the HCFASSERT in hcf_put_info which calls hcf_get_info to figure out whether the RID exists at all.
-*.NOTICE
-*
-* "HCF embedded" pseudo RIDs:
-* CFG_MB_INFO, CFG_TALLIES, CFG_DRV_IDENTITY, CFG_DRV_SUP_RANGE, CFG_DRV_ACT_RANGES_PRI,
-* CFG_DRV_ACT_RANGES_STA, CFG_DRV_ACT_RANGES_HSI
-* Note the HCF_ERR_LEN is NOT adequately set, when L >= 2 but less than needed
-*
-* Remarks: Transfers operation information and transient and persistent configuration information from the
-* Card and from the HCF to the MSF.
-* The exact layout of the provided data structure depends on the action code. Copying stops if either the
-* complete Configuration Information is copied or if the number of bytes indicated by len is copied. Len
-* acts as a safe guard against Configuration Information blocks which have different sizes for different
-* Hermes versions, e.g. when later versions support more tallies than earlier versions. It is a conscious
-* decision that unused parts of the PC RAM buffer are not cleared.
-*
-* Remarks: The only error against which is protected is the "Read error" as result of Card removal. Only the
-* last hcf_io_string need to be protected because if the first fails the second will fail as well. Checking
-* for cmd_exe errors is supposed superfluous because problems in cmd_exe are already caught or will be
-* caught by hcf_enable.
-*
-* CFG_MB_INFO: copy the oldest MailBox Info Block or the "null" block if none available.
-*
-* The mechanism to HCF_ASSERT on invalid typ-codes in the LTV record is based on the following strategy:
-* - during the pseudo-asynchronous Hermes commands (diagnose, download) only CFG_MB_INFO is acceptable
-* - some codes (e.g. CFG_TALLIES) are explicitly handled by the HCF which implies that these codes
-* are valid
-* - all other codes in the range 0xFC00 through 0xFFFF are passed to the Hermes. The Hermes returns an
-* LTV record with a zero value in the L-field for all Typ-codes it does not recognize. This is
-* defined and intended behavior, so HCF_ASSERT does not catch on this phenomena.
-* - all remaining codes are invalid and cause an ASSERT.
-*
-*.CONDITIONS
-* In case of USB, HCF_MAX_MSG ;?USED;? to limit the amount of data that can be retrieved via hcf_get_info.
-*
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *.NOTICE
+ *
+ * "HCF embedded" pseudo RIDs:
+ * CFG_MB_INFO, CFG_TALLIES, CFG_DRV_IDENTITY, CFG_DRV_SUP_RANGE, CFG_DRV_ACT_RANGES_PRI,
+ * CFG_DRV_ACT_RANGES_STA, CFG_DRV_ACT_RANGES_HSI
+ * Note the HCF_ERR_LEN is NOT adequately set, when L >= 2 but less than needed
+ *
+ * Remarks: Transfers operation information and transient and persistent configuration information from the
+ * Card and from the HCF to the MSF.
+ * The exact layout of the provided data structure depends on the action code. Copying stops if either the
+ * complete Configuration Information is copied or if the number of bytes indicated by len is copied. Len
+ * acts as a safe guard against Configuration Information blocks which have different sizes for different
+ * Hermes versions, e.g. when later versions support more tallies than earlier versions. It is a conscious
+ * decision that unused parts of the PC RAM buffer are not cleared.
+ *
+ * Remarks: The only error against which is protected is the "Read error" as result of Card removal. Only the
+ * last hcf_io_string need to be protected because if the first fails the second will fail as well. Checking
+ * for cmd_exe errors is supposed superfluous because problems in cmd_exe are already caught or will be
+ * caught by hcf_enable.
+ *
+ * CFG_MB_INFO: copy the oldest MailBox Info Block or the "null" block if none available.
+ *
+ * The mechanism to HCF_ASSERT on invalid typ-codes in the LTV record is based on the following strategy:
+ * - during the pseudo-asynchronous Hermes commands (diagnose, download) only CFG_MB_INFO is acceptable
+ * - some codes (e.g. CFG_TALLIES) are explicitly handled by the HCF which implies that these codes
+ * are valid
+ * - all other codes in the range 0xFC00 through 0xFFFF are passed to the Hermes. The Hermes returns an
+ * LTV record with a zero value in the L-field for all Typ-codes it does not recognize. This is
+ * defined and intended behavior, so HCF_ASSERT does not catch on this phenomena.
+ * - all remaining codes are invalid and cause an ASSERT.
+ *
+ *.CONDITIONS
+ * In case of USB, HCF_MAX_MSG ;?USED;? to limit the amount of data that can be retrieved via hcf_get_info.
+ *
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
hcf_get_info( IFBP ifbp, LTVP ltvp )
{
-int rc = HCF_SUCCESS;
-hcf_16 len = ltvp->len;
-hcf_16 type = ltvp->typ;
-wci_recordp p = &ltvp->len; //destination word pointer (in LTV record)
-hcf_16 *q = NULL; /* source word pointer Note!! DOS COM can't cope with FAR
- * as a consequence MailBox must be near which is usually true anyway
- */
-int i;
-
- HCFLOGENTRY( HCF_TRACE_GET_INFO, ltvp->typ )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
- HCFASSERT( ltvp, 0 )
- HCFASSERT( 1 < ltvp->len && ltvp->len <= HCF_MAX_LTV + 1, MERGE_2( ltvp->typ, ltvp->len ) )
-
- ltvp->len = 0; //default to: No Info Available
-#if defined MSF_COMPONENT_ID || (HCF_EXT) & HCF_EXT_MB //filter out all specials
+ int rc = HCF_SUCCESS;
+ hcf_16 len = ltvp->len;
+ hcf_16 type = ltvp->typ;
+ wci_recordp p = &ltvp->len; //destination word pointer (in LTV record)
+ hcf_16 *q = NULL; /* source word pointer Note!! DOS COM can't cope with FAR
+ * as a consequence MailBox must be near which is usually true anyway
+ */
+ int i;
+
+ HCFLOGENTRY( HCF_TRACE_GET_INFO, ltvp->typ );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
+ HCFASSERT( ltvp, 0 );
+ HCFASSERT( 1 < ltvp->len && ltvp->len <= HCF_MAX_LTV + 1, MERGE_2( ltvp->typ, ltvp->len ) );
+
+ ltvp->len = 0; //default to: No Info Available
+ //filter out all specials
for ( i = 0; ( q = xxxx[i] ) != NULL && q[1] != type; i++ ) /*NOP*/;
-#endif // MSF_COMPONENT_ID / HCF_EXT_MB
+
#if HCF_TALLIES
- if ( type == CFG_TALLIES ) { /*3*/
+ if ( type == CFG_TALLIES ) { /*3*/
(void)hcf_action( ifbp, HCF_ACT_TALLIES );
q = (hcf_16*)&ifbp->IFB_TallyLen;
}
#endif // HCF_TALLIES
-#if (HCF_EXT) & HCF_EXT_MB
+
if ( type == CFG_MB_INFO ) {
if ( ifbp->IFB_MBInfoLen ) {
if ( ifbp->IFB_MBp[ifbp->IFB_MBRp] == 0xFFFF ) {
ifbp->IFB_MBRp = 0; //;?Probably superfluous
}
q = &ifbp->IFB_MBp[ifbp->IFB_MBRp];
- ifbp->IFB_MBRp += *q + 1; //update read pointer
+ ifbp->IFB_MBRp += *q + 1; //update read pointer
if ( ifbp->IFB_MBp[ifbp->IFB_MBRp] == 0xFFFF ) {
ifbp->IFB_MBRp = 0;
}
ifbp->IFB_MBInfoLen = ifbp->IFB_MBp[ifbp->IFB_MBRp];
}
}
-#endif // HCF_EXT_MB
- if ( q != NULL ) { //a special or CFG_TALLIES or CFG_MB_INFO
- i = min( len, *q ) + 1; //total size of destination (including T-field)
+
+ if ( q != NULL ) { //a special or CFG_TALLIES or CFG_MB_INFO
+ i = min( len, *q ) + 1; //total size of destination (including T-field)
while ( i-- ) {
*p++ = *q;
#if (HCF_TALLIES) & HCF_TALLIES_RESET
@@ -2077,50 +2001,50 @@ int i;
#endif // HCF_TALLIES_RESET
q++;
}
- } else { // not a special nor CFG_TALLIES nor CFG_MB_INFO
- if ( type == CFG_CNTL_OPT ) { //read back effective options
+ } else { // not a special nor CFG_TALLIES nor CFG_MB_INFO
+ if ( type == CFG_CNTL_OPT ) { //read back effective options
ltvp->len = 2;
ltvp->val[0] = ifbp->IFB_CntlOpt;
#if (HCF_EXT) & HCF_EXT_NIC_ACCESS
} else if ( type == CFG_PROD_DATA ) { //only needed for some test tool on top of H-II NDIS driver
-hcf_io io_port;
-wci_bufp pt; //pointer with the "right" type, just to help ease writing macros with embedded assembly
+ hcf_io io_port;
+ wci_bufp pt; //pointer with the "right" type, just to help ease writing macros with embedded assembly
OPW( HREG_AUX_PAGE, (hcf_16)(PLUG_DATA_OFFSET >> 7) );
OPW( HREG_AUX_OFFSET, (hcf_16)(PLUG_DATA_OFFSET & 0x7E) );
- io_port = ifbp->IFB_IOBase + HREG_AUX_DATA; //to prevent side effects of the MSF-defined macro
- p = ltvp->val; //destination char pointer (in LTV record)
+ io_port = ifbp->IFB_IOBase + HREG_AUX_DATA; //to prevent side effects of the MSF-defined macro
+ p = ltvp->val; //destination char pointer (in LTV record)
i = len - 1;
if (i > 0 ) {
- pt = (wci_bufp)p; //just to help ease writing macros with embedded assembly
+ pt = (wci_bufp)p; //just to help ease writing macros with embedded assembly
IN_PORT_STRING_8_16( io_port, pt, i ); //space used by T: -1
}
} else if ( type == CFG_CMD_HCF ) {
#define P ((CFG_CMD_HCF_STRCT FAR *)ltvp)
- HCFASSERT( P->cmd == CFG_CMD_HCF_REG_ACCESS, P->cmd ) //only Hermes register access supported
+ HCFASSERT( P->cmd == CFG_CMD_HCF_REG_ACCESS, P->cmd ); //only Hermes register access supported
if ( P->cmd == CFG_CMD_HCF_REG_ACCESS ) {
- HCFASSERT( P->mode < ifbp->IFB_IOBase, P->mode ) //Check Register space
- ltvp->len = min( len, 4 ); //RESTORE ltv length
+ HCFASSERT( P->mode < ifbp->IFB_IOBase, P->mode ); //Check Register space
+ ltvp->len = min( len, 4 ); //RESTORE ltv length
P->add_info = IPW( P->mode );
}
#undef P
#endif // HCF_EXT_NIC_ACCESS
#if (HCF_ASSERT) & HCF_ASSERT_PRINTF
- } else if (type == CFG_FW_PRINTF) {
- rc = fw_printf(ifbp, (CFG_FW_PRINTF_STRCT*)ltvp);
+ } else if (type == CFG_FW_PRINTF) {
+ rc = fw_printf(ifbp, (CFG_FW_PRINTF_STRCT*)ltvp);
#endif // HCF_ASSERT_PRINTF
} else if ( type >= CFG_RID_FW_MIN ) {
//;? by using HCMD_BUSY option when calling cmd_exe, using a get_frag with length 0 just to set up the
//;? BAP and calling cmd_cmpl, you could merge the 2 Busy waits. Whether this really helps (and what
//;? would be the optimal sequence in cmd_exe and get_frag) would have to be MEASURED
-/*17*/ if ( ( rc = cmd_exe( ifbp, HCMD_ACCESS, type ) ) == HCF_SUCCESS &&
+ /*17*/ if ( ( rc = cmd_exe( ifbp, HCMD_ACCESS, type ) ) == HCF_SUCCESS &&
( rc = setup_bap( ifbp, type, 0, IO_IN ) ) == HCF_SUCCESS ) {
get_frag( ifbp, (wci_bufp)&ltvp->len, 2*len+2 BE_PAR(2) );
- if ( IPW( HREG_STAT ) == 0xFFFF ) { //NIC removal test
+ if ( IPW( HREG_STAT ) == 0xFFFF ) { //NIC removal test
ltvp->len = 0;
- HCFASSERT( DO_ASSERT, type )
+ HCFASSERT( DO_ASSERT, type );
}
}
-/*12*/ } else HCFASSERT( DO_ASSERT, type ) /*NOP*/; //NOP in case HCFASSERT is dummy
+ /*12*/ } else HCFASSERT( DO_ASSERT, type ) /*NOP*/; //NOP in case HCFASSERT is dummy
}
if ( len < ltvp->len ) {
ltvp->len = len;
@@ -2129,721 +2053,676 @@ wci_bufp pt; //pointer with the "right" type, just to help ease writing macr
}
}
HCFASSERT( rc == HCF_SUCCESS || ( rc == HCF_ERR_LEN && ifbp->IFB_AssertTrace & 1<<HCF_TRACE_PUT_INFO ),
- MERGE_2( type, rc ) ) /*20*/
- HCFLOGEXIT( HCF_TRACE_GET_INFO )
+ MERGE_2( type, rc ) ); /*20*/
+ HCFLOGEXIT( HCF_TRACE_GET_INFO );
return rc;
} // hcf_get_info
/************************************************************************************************************
-*
-*.MODULE int hcf_put_info( IFBP ifbp, LTVP ltvp )
-*.PURPOSE Transfers operation and configuration information to the Card and to the HCF.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* ltvp specifies the RID (as defined by Hermes I/F) or pseudo-RID (as defined by WCI)
-*
-*.RETURNS
-* HCF_SUCCESS
-*!! via cmd_exe
-* HCF_ERR_NO_NIC NIC removed during data retrieval
-* HCF_ERR_TIME_OUT Expected F/W event did not occur in time
-* HCF_ERR_DEFUNCT_...
-*!! via download CFG_DLNV_START <= type <= CFG_DL_STOP
-*!! via put_info CFG_RID_CFG_MIN <= type <= CFG_RID_CFG_MAX
-*!! via put_frag
-*
-*.DESCRIPTION
-* The L-field of the LTV-record (provided by the MSF in parameter ltvp) specifies the size of the buffer.
-* The L-value includes the size of the T-field, but not the size of the L-field.
-* The T- field specifies the RID placed in the V-field by the MSF.
-*
-* Not all CFG-codes can be used for hcf_put_info. The following CFG-codes are valid for hcf_put_info:
-* o One of the CFG-codes in the group "Network Parameters, Static Configuration Entities"
-* Changes made by hcf_put_info to CFG_codes in this group will not affect the F/W
-* and HCF behavior until hcf_cntl_port( HCF_PORT_ENABLE) is called.
-* o One of the CFG-codes in the group "Network Parameters, Dynamic Configuration Entities"
-* Changes made by hcf_put_info to CFG_codes will affect the F/W and HCF behavior immediately.
-* o CFG_PROG.
-* This code is used to initiate and terminate the process to download data either to
-* volatile or to non-volatile RAM on the NIC as well as for the actual download.
-* o CFG-codes related to the HCF behavior.
-* The related CFG-codes are:
-* - CFG_REG_MB
-* - CFG_REG_ASSERT_RTNP
-* - CFG_REG_INFO_LOG
-* - CFG_CMD_NIC
-* - CFG_CMD_DONGLE
-* - CFG_CMD_HCF
-* - CFG_NOTIFY
-*
-* All LTV-records "unknown" to the HCF are forwarded to the F/W.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value.
-* - ltvp is a NULL pointer.
-* - hcf_put_info was called without prior call to hcf_connect
-* - type field of the LTV-record is invalid, i.e. neither HCF nor F/W can handle the value.
-* - length field of the LTV-record at entry is less than 1 or exceeds MAX_LTV_SIZE.
-* - registering a MailBox with size less than 60 or a non-aligned buffer address is used.
-* - reentrancy, may be caused by calling hcf_functions without adequate protection against
-* NIC interrupts or multi-threading.
-*
-*.DIAGRAM
-*
-*.NOTICE
-* Remarks: In case of Hermes Configuration LTVs, the codes for the type are "cleverly" chosen to be
-* identical to the RID. Hermes Configuration information is copied from the provided data structure into the
-* Card.
-* In case of HCF Configuration LTVs, the type values are chosen in a range which does not overlap the
-* RID-range.
-*
-*20:
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE int hcf_put_info( IFBP ifbp, LTVP ltvp )
+ *.PURPOSE Transfers operation and configuration information to the Card and to the HCF.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * ltvp specifies the RID (as defined by Hermes I/F) or pseudo-RID (as defined by WCI)
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ *!! via cmd_exe
+ * HCF_ERR_NO_NIC NIC removed during data retrieval
+ * HCF_ERR_TIME_OUT Expected F/W event did not occur in time
+ * HCF_ERR_DEFUNCT_...
+ *!! via download CFG_DLNV_START <= type <= CFG_DL_STOP
+ *!! via put_info CFG_RID_CFG_MIN <= type <= CFG_RID_CFG_MAX
+ *!! via put_frag
+ *
+ *.DESCRIPTION
+ * The L-field of the LTV-record (provided by the MSF in parameter ltvp) specifies the size of the buffer.
+ * The L-value includes the size of the T-field, but not the size of the L-field.
+ * The T- field specifies the RID placed in the V-field by the MSF.
+ *
+ * Not all CFG-codes can be used for hcf_put_info. The following CFG-codes are valid for hcf_put_info:
+ * o One of the CFG-codes in the group "Network Parameters, Static Configuration Entities"
+ * Changes made by hcf_put_info to CFG_codes in this group will not affect the F/W
+ * and HCF behavior until hcf_cntl_port( HCF_PORT_ENABLE) is called.
+ * o One of the CFG-codes in the group "Network Parameters, Dynamic Configuration Entities"
+ * Changes made by hcf_put_info to CFG_codes will affect the F/W and HCF behavior immediately.
+ * o CFG_PROG.
+ * This code is used to initiate and terminate the process to download data either to
+ * volatile or to non-volatile RAM on the NIC as well as for the actual download.
+ * o CFG-codes related to the HCF behavior.
+ * The related CFG-codes are:
+ * - CFG_REG_MB
+ * - CFG_REG_ASSERT_RTNP
+ * - CFG_REG_INFO_LOG
+ * - CFG_CMD_NIC
+ * - CFG_CMD_DONGLE
+ * - CFG_CMD_HCF
+ * - CFG_NOTIFY
+ *
+ * All LTV-records "unknown" to the HCF are forwarded to the F/W.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value.
+ * - ltvp is a NULL pointer.
+ * - hcf_put_info was called without prior call to hcf_connect
+ * - type field of the LTV-record is invalid, i.e. neither HCF nor F/W can handle the value.
+ * - length field of the LTV-record at entry is less than 1 or exceeds MAX_LTV_SIZE.
+ * - registering a MailBox with size less than 60 or a non-aligned buffer address is used.
+ * - reentrancy, may be caused by calling hcf_functions without adequate protection against
+ * NIC interrupts or multi-threading.
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ * Remarks: In case of Hermes Configuration LTVs, the codes for the type are "cleverly" chosen to be
+ * identical to the RID. Hermes Configuration information is copied from the provided data structure into the
+ * Card.
+ * In case of HCF Configuration LTVs, the type values are chosen in a range which does not overlap the
+ * RID-range.
+ *
+ *20:
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
hcf_put_info( IFBP ifbp, LTVP ltvp )
{
-int rc = HCF_SUCCESS;
+ int rc = HCF_SUCCESS;
- HCFLOGENTRY( HCF_TRACE_PUT_INFO, ltvp->typ )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
- HCFASSERT( ltvp, 0 )
- HCFASSERT( 1 < ltvp->len && ltvp->len <= HCF_MAX_LTV + 1, ltvp->len )
+ HCFLOGENTRY( HCF_TRACE_PUT_INFO, ltvp->typ );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
+ HCFASSERT( ltvp, 0 );
+ HCFASSERT( 1 < ltvp->len && ltvp->len <= HCF_MAX_LTV + 1, ltvp->len );
- //all codes between 0xFA00 and 0xFCFF are passed to Hermes
+ //all codes between 0xFA00 and 0xFCFF are passed to Hermes
#if (HCF_TYPE) & HCF_TYPE_WPA
- { hcf_16 i;
- hcf_32 FAR * key_p;
-
- if ( ltvp->typ == CFG_ADD_TKIP_DEFAULT_KEY || ltvp->typ == CFG_ADD_TKIP_MAPPED_KEY ) {
- key_p = (hcf_32*)((CFG_ADD_TKIP_MAPPED_KEY_STRCT FAR *)ltvp)->tx_mic_key;
- i = TX_KEY; //i.e. TxKeyIndicator == 1, KeyID == 0
- if ( ltvp->typ == CFG_ADD_TKIP_DEFAULT_KEY ) {
- key_p = (hcf_32*)((CFG_ADD_TKIP_DEFAULT_KEY_STRCT FAR *)ltvp)->tx_mic_key;
- i = CNV_LITTLE_TO_SHORT(((CFG_ADD_TKIP_DEFAULT_KEY_STRCT FAR *)ltvp)->tkip_key_id_info);
- }
- if ( i & TX_KEY ) { /* TxKeyIndicator == 1
- (either really set by MSF in case of DEFAULT or faked by HCF in case of MAPPED ) */
- ifbp->IFB_MICTxCntl = (hcf_16)( HFS_TX_CNTL_MIC | (i & KEY_ID )<<8 );
- ifbp->IFB_MICTxKey[0] = CNV_LONGP_TO_LITTLE( key_p );
- ifbp->IFB_MICTxKey[1] = CNV_LONGP_TO_LITTLE( (key_p+1) );
+ {
+ hcf_16 i;
+ hcf_32 FAR * key_p;
+
+ if ( ltvp->typ == CFG_ADD_TKIP_DEFAULT_KEY || ltvp->typ == CFG_ADD_TKIP_MAPPED_KEY ) {
+ key_p = (hcf_32*)((CFG_ADD_TKIP_MAPPED_KEY_STRCT FAR *)ltvp)->tx_mic_key;
+ i = TX_KEY; //i.e. TxKeyIndicator == 1, KeyID == 0
+ if ( ltvp->typ == CFG_ADD_TKIP_DEFAULT_KEY ) {
+ key_p = (hcf_32*)((CFG_ADD_TKIP_DEFAULT_KEY_STRCT FAR *)ltvp)->tx_mic_key;
+ i = CNV_LITTLE_TO_SHORT(((CFG_ADD_TKIP_DEFAULT_KEY_STRCT FAR *)ltvp)->tkip_key_id_info);
+ }
+ if ( i & TX_KEY ) { /* TxKeyIndicator == 1
+ (either really set by MSF in case of DEFAULT or faked by HCF in case of MAPPED ) */
+ ifbp->IFB_MICTxCntl = (hcf_16)( HFS_TX_CNTL_MIC | (i & KEY_ID )<<8 );
+ ifbp->IFB_MICTxKey[0] = CNV_LONGP_TO_LITTLE( key_p );
+ ifbp->IFB_MICTxKey[1] = CNV_LONGP_TO_LITTLE( (key_p+1) );
+ }
+ i = ( i & KEY_ID ) * 2;
+ ifbp->IFB_MICRxKey[i] = CNV_LONGP_TO_LITTLE( (key_p+2) );
+ ifbp->IFB_MICRxKey[i+1] = CNV_LONGP_TO_LITTLE( (key_p+3) );
}
- i = ( i & KEY_ID ) * 2;
- ifbp->IFB_MICRxKey[i] = CNV_LONGP_TO_LITTLE( (key_p+2) );
- ifbp->IFB_MICRxKey[i+1] = CNV_LONGP_TO_LITTLE( (key_p+3) );
- }
#define P ((CFG_REMOVE_TKIP_DEFAULT_KEY_STRCT FAR *)ltvp)
- if ( ( ltvp->typ == CFG_REMOVE_TKIP_MAPPED_KEY ) ||
- ( ltvp->typ == CFG_REMOVE_TKIP_DEFAULT_KEY &&
- ( (ifbp->IFB_MICTxCntl >> 8) & KEY_ID ) == CNV_SHORT_TO_LITTLE(P->tkip_key_id )
- )
- ) { ifbp->IFB_MICTxCntl = 0; } //disable MIC-engine
+ if ( ( ltvp->typ == CFG_REMOVE_TKIP_MAPPED_KEY ) ||
+ ( ltvp->typ == CFG_REMOVE_TKIP_DEFAULT_KEY &&
+ ( (ifbp->IFB_MICTxCntl >> 8) & KEY_ID ) == CNV_SHORT_TO_LITTLE(P->tkip_key_id )
+ )
+ ) { ifbp->IFB_MICTxCntl = 0; } //disable MIC-engine
#undef P
- }
+ }
#endif // HCF_TYPE_WPA
if ( ltvp->typ == CFG_PROG ) {
rc = download( ifbp, (CFG_PROG_STRCT FAR *)ltvp );
} else switch (ltvp->typ) {
#if (HCF_ASSERT) & HCF_ASSERT_RT_MSF_RTN
- case CFG_REG_ASSERT_RTNP: //Register MSF Routines
+ case CFG_REG_ASSERT_RTNP: //Register MSF Routines
#define P ((CFG_REG_ASSERT_RTNP_STRCT FAR *)ltvp)
- ifbp->IFB_AssertRtn = P->rtnp;
-// ifbp->IFB_AssertLvl = P->lvl; //TODO not yet supported so default is set in hcf_connect
- HCFASSERT( DO_ASSERT, MERGE_2( HCF_ASSERT, 0xCAF1 ) ) //just to proof that the complete assert machinery is working
+ ifbp->IFB_AssertRtn = P->rtnp;
+// ifbp->IFB_AssertLvl = P->lvl; //TODO not yet supported so default is set in hcf_connect
+ HCFASSERT( DO_ASSERT, MERGE_2( HCF_ASSERT, 0xCAF1 ) ); //just to proof that the complete assert machinery is working
#undef P
- break;
+ break;
#endif // HCF_ASSERT_RT_MSF_RTN
#if (HCF_EXT) & HCF_EXT_INFO_LOG
- case CFG_REG_INFO_LOG: //Register Log filter
- ifbp->IFB_RIDLogp = ((CFG_RID_LOG_STRCT FAR*)ltvp)->recordp;
- break;
+ case CFG_REG_INFO_LOG: //Register Log filter
+ ifbp->IFB_RIDLogp = ((CFG_RID_LOG_STRCT FAR*)ltvp)->recordp;
+ break;
#endif // HCF_EXT_INFO_LOG
- case CFG_CNTL_OPT: //overrule option
- HCFASSERT( ( ltvp->val[0] & ~(USE_DMA | USE_16BIT) ) == 0, ltvp->val[0] )
- if ( ( ltvp->val[0] & USE_DMA ) == 0 ) ifbp->IFB_CntlOpt &= ~USE_DMA;
- ifbp->IFB_CntlOpt |= ltvp->val[0] & USE_16BIT;
- break;
-#if (HCF_EXT) & HCF_EXT_MB
- case CFG_REG_MB: //Register MailBox
+ case CFG_CNTL_OPT: //overrule option
+ HCFASSERT( ( ltvp->val[0] & ~(USE_DMA | USE_16BIT) ) == 0, ltvp->val[0] );
+ if ( ( ltvp->val[0] & USE_DMA ) == 0 ) ifbp->IFB_CntlOpt &= ~USE_DMA;
+ ifbp->IFB_CntlOpt |= ltvp->val[0] & USE_16BIT;
+ break;
+
+ case CFG_REG_MB: //Register MailBox
#define P ((CFG_REG_MB_STRCT FAR *)ltvp)
- HCFASSERT( ( (hcf_32)P->mb_addr & 0x0001 ) == 0, (hcf_32)P->mb_addr )
- HCFASSERT( (P)->mb_size >= 60, (P)->mb_size )
- ifbp->IFB_MBp = P->mb_addr;
- /* if no MB present, size must be 0 for ;?the old;? put_info_mb to work correctly */
- ifbp->IFB_MBSize = ifbp->IFB_MBp == NULL ? 0 : P->mb_size;
- ifbp->IFB_MBWp = ifbp->IFB_MBRp = 0;
- ifbp->IFB_MBp[0] = 0; //flag the MailBox as empty
- ifbp->IFB_MBInfoLen = 0;
- HCFASSERT( ifbp->IFB_MBSize >= 60 || ifbp->IFB_MBp == NULL, ifbp->IFB_MBSize )
+ HCFASSERT( ( (hcf_32)P->mb_addr & 0x0001 ) == 0, (hcf_32)P->mb_addr );
+ HCFASSERT( (P)->mb_size >= 60, (P)->mb_size );
+ ifbp->IFB_MBp = P->mb_addr;
+ /* if no MB present, size must be 0 for ;?the old;? put_info_mb to work correctly */
+ ifbp->IFB_MBSize = ifbp->IFB_MBp == NULL ? 0 : P->mb_size;
+ ifbp->IFB_MBWp = ifbp->IFB_MBRp = 0;
+ ifbp->IFB_MBp[0] = 0; //flag the MailBox as empty
+ ifbp->IFB_MBInfoLen = 0;
+ HCFASSERT( ifbp->IFB_MBSize >= 60 || ifbp->IFB_MBp == NULL, ifbp->IFB_MBSize );
#undef P
- break;
- case CFG_MB_INFO: //store MailBoxInfoBlock
- rc = put_info_mb( ifbp, (CFG_MB_INFO_STRCT FAR *)ltvp );
- break;
-#endif // HCF_EXT_MB
+ break;
+ case CFG_MB_INFO: //store MailBoxInfoBlock
+ rc = put_info_mb( ifbp, (CFG_MB_INFO_STRCT FAR *)ltvp );
+ break;
#if (HCF_EXT) & HCF_EXT_NIC_ACCESS
- case CFG_CMD_NIC:
+ case CFG_CMD_NIC:
#define P ((CFG_CMD_NIC_STRCT FAR *)ltvp)
- OPW( HREG_PARAM_2, P->parm2 );
- OPW( HREG_PARAM_1, P->parm1 );
- rc = cmd_exe( ifbp, P->cmd, P->parm0 );
- P->hcf_stat = (hcf_16)rc;
- P->stat = IPW( HREG_STAT );
- P->resp0 = IPW( HREG_RESP_0 );
- P->resp1 = IPW( HREG_RESP_1 );
- P->resp2 = IPW( HREG_RESP_2 );
- P->ifb_err_cmd = ifbp->IFB_ErrCmd;
- P->ifb_err_qualifier = ifbp->IFB_ErrQualifier;
+ OPW( HREG_PARAM_2, P->parm2 );
+ OPW( HREG_PARAM_1, P->parm1 );
+ rc = cmd_exe( ifbp, P->cmd, P->parm0 );
+ P->hcf_stat = (hcf_16)rc;
+ P->stat = IPW( HREG_STAT );
+ P->resp0 = IPW( HREG_RESP_0 );
+ P->resp1 = IPW( HREG_RESP_1 );
+ P->resp2 = IPW( HREG_RESP_2 );
+ P->ifb_err_cmd = ifbp->IFB_ErrCmd;
+ P->ifb_err_qualifier = ifbp->IFB_ErrQualifier;
#undef P
- break;
- case CFG_CMD_HCF:
+ break;
+ case CFG_CMD_HCF:
#define P ((CFG_CMD_HCF_STRCT FAR *)ltvp)
- HCFASSERT( P->cmd == CFG_CMD_HCF_REG_ACCESS, P->cmd ) //only Hermes register access supported
- if ( P->cmd == CFG_CMD_HCF_REG_ACCESS ) {
- HCFASSERT( P->mode < ifbp->IFB_IOBase, P->mode ) //Check Register space
- OPW( P->mode, P->add_info);
- }
+ HCFASSERT( P->cmd == CFG_CMD_HCF_REG_ACCESS, P->cmd ); //only Hermes register access supported
+ if ( P->cmd == CFG_CMD_HCF_REG_ACCESS ) {
+ HCFASSERT( P->mode < ifbp->IFB_IOBase, P->mode ); //Check Register space
+ OPW( P->mode, P->add_info);
+ }
#undef P
- break;
+ break;
#endif // HCF_EXT_NIC_ACCESS
#if (HCF_ASSERT) & HCF_ASSERT_PRINTF
- case CFG_FW_PRINTF_BUFFER_LOCATION:
- ifbp->IFB_FwPfBuff = *(CFG_FW_PRINTF_BUFFER_LOCATION_STRCT*)ltvp;
- break;
+ case CFG_FW_PRINTF_BUFFER_LOCATION:
+ ifbp->IFB_FwPfBuff = *(CFG_FW_PRINTF_BUFFER_LOCATION_STRCT*)ltvp;
+ break;
#endif // HCF_ASSERT_PRINTF
- default: //pass everything unknown above the "FID" range to the Hermes or Dongle
- rc = put_info( ifbp, ltvp );
- }
- //DO NOT !!! HCFASSERT( rc == HCF_SUCCESS, rc ) /* 20 */
- HCFLOGEXIT( HCF_TRACE_PUT_INFO )
+ default: //pass everything unknown above the "FID" range to the Hermes or Dongle
+ rc = put_info( ifbp, ltvp );
+ }
+ //DO NOT !!! HCFASSERT( rc == HCF_SUCCESS, rc ) /* 20 */
+ HCFLOGEXIT( HCF_TRACE_PUT_INFO );
return rc;
} // hcf_put_info
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.MODULE int hcf_rcv_msg( IFBP ifbp, DESC_STRCT *descp, unsigned int offset )
-*.PURPOSE All: decapsulate a message.
-* pre-HermesII.5: verify MIC.
-* non-USB, non-DMA mode: Transfer a message from the NIC to the Host and acknowledge reception.
-* USB: Transform a message from proprietary USB format to 802.3 format
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* descp Pointer to the Descriptor List location.
-* offset USB: not used
-* non-USB: specifies the beginning of the data to be obtained (0 corresponds with DestAddr field
-* of frame).
-*
-*.RETURNS
-* HCF_SUCCESS No SSN error ( or HCF_ERR_MIC already reported by hcf_service_nic)
-* HCF_ERR_MIC message contains an erroneous MIC ( HCF_SUCCESS is reported if HCF_ERR_MIC is already
-* reported by hcf_service_nic)
-* HCF_ERR_NO_NIC NIC removed during data retrieval
-* HCF_ERR_DEFUNCT...
-*
-*.DESCRIPTION
-* The Receive Message Function can be executed by the MSF to obtain the Data Info fields of the message that
-* is reported to be available by the Service NIC Function.
-*
-* The Receive Message Function copies the message data available in the Card memory into a buffer structure
-* provided by the MSF.
-* Only data of the message indicated by the Service NIC Function can be obtained.
-* Execution of the Service NIC function may result in the availability of a new message, but it definitely
-* makes the message reported by the preceding Service NIC function, unavailable.
-*
-* in non-USB/non-DMA mode, hcf_rcv_msg starts the copy process at the (non-negative) offset requested by the
-* parameter offset, relative to HFS_ADDR_DEST, e.g offset 0 starts copying from the Destination Address, the
-* very begin of the 802.3 frame message. Offset must either lay within the part of the 802.3 frame as stored
-* by hcf_service_nic in the lookahead buffer or be just behind it, i.e. the first byte not yet read.
-* When offset is within lookahead, data is copied from lookahead.
-* When offset is beyond lookahead, data is read directly from RxFS in NIC with disregard of the actual value
-* of offset
-*
-*.NOTICE:
-* o at entry: look ahead buffer as passed with hcf_service_nic is still accessible and unchanged
-* o at exit: Receive Frame in NIC memory is released
-*
-* Description:
-* Starting at the byte indicated by the Offset value, the bytes are copied from the Data Info
-* Part of the current Receive Frame Structure to the Host memory data buffer structure
-* identified by descp.
-* The maximum value for Offset is the number of characters of the 802.3 frame read into the
-* look ahead buffer by hcf_service_nic (i.e. the look ahead buffer size minus
-* Control and 802.11 fields)
-* If Offset is less than the maximum value, copying starts from the look ahead buffer till the
-* end of that buffer is reached
-* Then (or if the maximum value is specified for Offset), the
-* message is directly copied from NIC memory to Host memory.
-* If an invalid (i.e. too large) offset is specified, an assert catches but the buffer contents are
-* undefined.
-* Copying stops if either:
-* o the end of the 802.3 frame is reached
-* o the Descriptor with a NULL pointer in the next_desc_addr field is reached
-*
-* When the copying stops, the receiver is ack'ed, thus freeing the NIC memory where the frame is stored
-* As a consequence, hcf_rcv_msg can only be called once for any particular Rx frame.
-*
-* For the time being (PCI Bus mastering not yet supported), only the following fields of each
-* of the descriptors in the descriptor list must be set by the MSF:
-* o buf_cntl.buf_dim[1]
-* o *next_desc_addr
-* o *buf_addr
-* At return from hcf_rcv_msg, the field buf_cntl.buf_dim[0] of the used Descriptors reflects
-* the number of bytes in the buffer corresponding with the Descriptor.
-* On the last used Descriptor, buf_cntl.buf_dim[0] is less or equal to buf_cntl.buf_dim[1].
-* On all preceding Descriptors buf_cntl.buf_dim[0] is equal to buf_cntl.buf_dim[1].
-* On all succeeding (unused) Descriptors, buf_cntl.buf_dim[0] is zero.
-* Note: this I/F is based on the assumptions how the I/F needed for PCI Bus mastering will
-* be, so it may change.
-*
-* The most likely handling of HCF_ERR_NO_NIC by the MSF is to drop the already copied
-* data as elegantly as possible under the constraints and requirements posed by the (N)OS.
-* If no received Frame Structure is pending, "Success" rather than "Read error" is returned.
-* This error constitutes a logic flaw in the MSF
-* The HCF can only catch a minority of this
-* type of errors
-* Based on consistency ideas, the HCF catches none of these errors.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value
-* - there is no unacknowledged Rx-message available
-* - offset is out of range (outside look ahead buffer)
-* - descp is a NULL pointer
-* - any of the descriptors is not double word aligned
-* - reentrancy, may be caused by calling hcf_functions without adequate protection
-* against NIC interrupts or multi-threading.
-* - Interrupts are enabled.
-*
-*.DIAGRAM
-*
-*.NOTICE
-* - by using unsigned int as type for offset, no need to worry about negative offsets
-* - Asserting on being enabled/present is superfluous, since a non-zero IFB_lal implies that hcf_service_nic
-* was called and detected a Rx-message. A zero IFB_lal will set the BUF_CNT field of at least the first
-* descriptor to zero.
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE int hcf_rcv_msg( IFBP ifbp, DESC_STRCT *descp, unsigned int offset )
+ *.PURPOSE All: decapsulate a message.
+ * pre-HermesII.5: verify MIC.
+ * non-USB, non-DMA mode: Transfer a message from the NIC to the Host and acknowledge reception.
+ * USB: Transform a message from proprietary USB format to 802.3 format
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * descp Pointer to the Descriptor List location.
+ * offset USB: not used
+ * non-USB: specifies the beginning of the data to be obtained (0 corresponds with DestAddr field
+ * of frame).
+ *
+ *.RETURNS
+ * HCF_SUCCESS No WPA error ( or HCF_ERR_MIC already reported by hcf_service_nic)
+ * HCF_ERR_MIC message contains an erroneous MIC ( HCF_SUCCESS is reported if HCF_ERR_MIC is already
+ * reported by hcf_service_nic)
+ * HCF_ERR_NO_NIC NIC removed during data retrieval
+ * HCF_ERR_DEFUNCT...
+ *
+ *.DESCRIPTION
+ * The Receive Message Function can be executed by the MSF to obtain the Data Info fields of the message that
+ * is reported to be available by the Service NIC Function.
+ *
+ * The Receive Message Function copies the message data available in the Card memory into a buffer structure
+ * provided by the MSF.
+ * Only data of the message indicated by the Service NIC Function can be obtained.
+ * Execution of the Service NIC function may result in the availability of a new message, but it definitely
+ * makes the message reported by the preceding Service NIC function, unavailable.
+ *
+ * in non-USB/non-DMA mode, hcf_rcv_msg starts the copy process at the (non-negative) offset requested by the
+ * parameter offset, relative to HFS_ADDR_DEST, e.g offset 0 starts copying from the Destination Address, the
+ * very begin of the 802.3 frame message. Offset must either lay within the part of the 802.3 frame as stored
+ * by hcf_service_nic in the lookahead buffer or be just behind it, i.e. the first byte not yet read.
+ * When offset is within lookahead, data is copied from lookahead.
+ * When offset is beyond lookahead, data is read directly from RxFS in NIC with disregard of the actual value
+ * of offset
+ *
+ *.NOTICE:
+ * o at entry: look ahead buffer as passed with hcf_service_nic is still accessible and unchanged
+ * o at exit: Receive Frame in NIC memory is released
+ *
+ * Description:
+ * Starting at the byte indicated by the Offset value, the bytes are copied from the Data Info
+ * Part of the current Receive Frame Structure to the Host memory data buffer structure
+ * identified by descp.
+ * The maximum value for Offset is the number of characters of the 802.3 frame read into the
+ * look ahead buffer by hcf_service_nic (i.e. the look ahead buffer size minus
+ * Control and 802.11 fields)
+ * If Offset is less than the maximum value, copying starts from the look ahead buffer till the
+ * end of that buffer is reached
+ * Then (or if the maximum value is specified for Offset), the
+ * message is directly copied from NIC memory to Host memory.
+ * If an invalid (i.e. too large) offset is specified, an assert catches but the buffer contents are
+ * undefined.
+ * Copying stops if either:
+ * o the end of the 802.3 frame is reached
+ * o the Descriptor with a NULL pointer in the next_desc_addr field is reached
+ *
+ * When the copying stops, the receiver is ack'ed, thus freeing the NIC memory where the frame is stored
+ * As a consequence, hcf_rcv_msg can only be called once for any particular Rx frame.
+ *
+ * For the time being (PCI Bus mastering not yet supported), only the following fields of each
+ * of the descriptors in the descriptor list must be set by the MSF:
+ * o buf_cntl.buf_dim[1]
+ * o *next_desc_addr
+ * o *buf_addr
+ * At return from hcf_rcv_msg, the field buf_cntl.buf_dim[0] of the used Descriptors reflects
+ * the number of bytes in the buffer corresponding with the Descriptor.
+ * On the last used Descriptor, buf_cntl.buf_dim[0] is less or equal to buf_cntl.buf_dim[1].
+ * On all preceding Descriptors buf_cntl.buf_dim[0] is equal to buf_cntl.buf_dim[1].
+ * On all succeeding (unused) Descriptors, buf_cntl.buf_dim[0] is zero.
+ * Note: this I/F is based on the assumptions how the I/F needed for PCI Bus mastering will
+ * be, so it may change.
+ *
+ * The most likely handling of HCF_ERR_NO_NIC by the MSF is to drop the already copied
+ * data as elegantly as possible under the constraints and requirements posed by the (N)OS.
+ * If no received Frame Structure is pending, "Success" rather than "Read error" is returned.
+ * This error constitutes a logic flaw in the MSF
+ * The HCF can only catch a minority of this
+ * type of errors
+ * Based on consistency ideas, the HCF catches none of these errors.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value
+ * - there is no unacknowledged Rx-message available
+ * - offset is out of range (outside look ahead buffer)
+ * - descp is a NULL pointer
+ * - any of the descriptors is not double word aligned
+ * - reentrancy, may be caused by calling hcf_functions without adequate protection
+ * against NIC interrupts or multi-threading.
+ * - Interrupts are enabled.
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ * - by using unsigned int as type for offset, no need to worry about negative offsets
+ * - Asserting on being enabled/present is superfluous, since a non-zero IFB_lal implies that hcf_service_nic
+ * was called and detected a Rx-message. A zero IFB_lal will set the BUF_CNT field of at least the first
+ * descriptor to zero.
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
hcf_rcv_msg( IFBP ifbp, DESC_STRCT *descp, unsigned int offset )
{
-int rc = HCF_SUCCESS;
-wci_bufp cp; //char oriented working pointer
-hcf_16 i;
-int tot_len = ifbp->IFB_RxLen - offset; //total length
-wci_bufp lap = ifbp->IFB_lap + offset; //start address in LookAhead Buffer
-hcf_16 lal = ifbp->IFB_lal - offset; //available data within LookAhead Buffer
-hcf_16 j;
-
- HCFLOGENTRY( HCF_TRACE_RCV_MSG, offset )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
- HCFASSERT( descp, HCF_TRACE_RCV_MSG )
- HCFASSERT( ifbp->IFB_RxLen, HCF_TRACE_RCV_MSG )
- HCFASSERT( ifbp->IFB_RxLen >= offset, MERGE_2( offset, ifbp->IFB_RxLen ) )
- HCFASSERT( ifbp->IFB_lal >= offset, offset )
- HCFASSERT( (ifbp->IFB_CntlOpt & USE_DMA) == 0, 0xDADA )
+ int rc = HCF_SUCCESS;
+ wci_bufp cp; //char oriented working pointer
+ hcf_16 i;
+ int tot_len = ifbp->IFB_RxLen - offset; //total length
+ wci_bufp lap = ifbp->IFB_lap + offset; //start address in LookAhead Buffer
+ hcf_16 lal = ifbp->IFB_lal - offset; //available data within LookAhead Buffer
+ hcf_16 j;
+
+ HCFLOGENTRY( HCF_TRACE_RCV_MSG, offset );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
+ HCFASSERT( descp, HCF_TRACE_RCV_MSG );
+ HCFASSERT( ifbp->IFB_RxLen, HCF_TRACE_RCV_MSG );
+ HCFASSERT( ifbp->IFB_RxLen >= offset, MERGE_2( offset, ifbp->IFB_RxLen ) );
+ HCFASSERT( ifbp->IFB_lal >= offset, offset );
+ HCFASSERT( (ifbp->IFB_CntlOpt & USE_DMA) == 0, 0xDADA );
if ( tot_len < 0 ) {
- lal = 0; tot_len = 0; //suppress all copying activity in the do--while loop
+ lal = 0; tot_len = 0; //suppress all copying activity in the do--while loop
}
- do { //loop over all available fragments
+ do { //loop over all available fragments
// obnoxious hcf.c(1480) : warning C4769: conversion of near pointer to long integer
- HCFASSERT( ((hcf_32)descp & 3 ) == 0, (hcf_32)descp )
+ HCFASSERT( ((hcf_32)descp & 3 ) == 0, (hcf_32)descp );
cp = descp->buf_addr;
- j = min( (hcf_16)tot_len, descp->BUF_SIZE ); //minimum of "what's` available" and fragment size
+ j = min( (hcf_16)tot_len, descp->BUF_SIZE ); //minimum of "what's` available" and fragment size
descp->BUF_CNT = j;
- tot_len -= j; //adjust length still to go
- if ( lal ) { //if lookahead Buffer not yet completely copied
- i = min( lal, j ); //minimum of "what's available" in LookAhead and fragment size
- lal -= i; //adjust length still available in LookAhead
- j -= i; //adjust length still available in current fragment
+ tot_len -= j; //adjust length still to go
+ if ( lal ) { //if lookahead Buffer not yet completely copied
+ i = min( lal, j ); //minimum of "what's available" in LookAhead and fragment size
+ lal -= i; //adjust length still available in LookAhead
+ j -= i; //adjust length still available in current fragment
/*;? while loop could be improved by moving words but that is complicated on platforms with
* alignment requirements*/
while ( i-- ) *cp++ = *lap++;
}
- if ( j ) { //if LookAhead Buffer exhausted but still space in fragment, copy directly from NIC RAM
+ if ( j ) { //if LookAhead Buffer exhausted but still space in fragment, copy directly from NIC RAM
get_frag( ifbp, cp, j BE_PAR(0) );
CALC_RX_MIC( cp, j );
}
} while ( ( descp = descp->next_desc_addr ) != NULL );
#if (HCF_TYPE) & HCF_TYPE_WPA
if ( ifbp->IFB_RxFID ) {
- rc = check_mic( ifbp ); //prevents MIC error report if hcf_service_nic already consumed all
+ rc = check_mic( ifbp ); //prevents MIC error report if hcf_service_nic already consumed all
}
#endif // HCF_TYPE_WPA
- (void)hcf_action( ifbp, HCF_ACT_RX_ACK ); //only 1 shot to get the data, so free the resources in the NIC
- HCFASSERT( rc == HCF_SUCCESS, rc )
- HCFLOGEXIT( HCF_TRACE_RCV_MSG )
+ (void)hcf_action( ifbp, HCF_ACT_RX_ACK ); //only 1 shot to get the data, so free the resources in the NIC
+ HCFASSERT( rc == HCF_SUCCESS, rc );
+ HCFLOGEXIT( HCF_TRACE_RCV_MSG );
return rc;
} // hcf_rcv_msg
-#endif // HCF_DL_ONLY
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.MODULE int hcf_send_msg( IFBP ifbp, DESC_STRCT *descp, hcf_16 tx_cntl )
-*.PURPOSE Encapsulate a message and append padding and MIC.
-* non-USB: Transfers the resulting message from Host to NIC and initiates transmission.
-* USB: Transfer resulting message into a flat buffer.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* descp pointer to the DescriptorList or NULL
-* tx_cntl indicates MAC-port and (Hermes) options
-* HFS_TX_CNTL_SPECTRALINK
-* HFS_TX_CNTL_PRIO
-* HFS_TX_CNTL_TX_OK
-* HFS_TX_CNTL_TX_EX
-* HFS_TX_CNTL_TX_DELAY
-* HFS_TX_CNTL_TX_CONT
-* HCF_PORT_0 MAC Port 0 (default)
-* HCF_PORT_1 (AP only) MAC Port 1
-* HCF_PORT_2 (AP only) MAC Port 2
-* HCF_PORT_3 (AP only) MAC Port 3
-* HCF_PORT_4 (AP only) MAC Port 4
-* HCF_PORT_5 (AP only) MAC Port 5
-* HCF_PORT_6 (AP only) MAC Port 6
-*
-*.RETURNS
-* HCF_SUCCESS
-* HCF_ERR_DEFUNCT_..
-* HCF_ERR_TIME_OUT
-*
-*.DESCRIPTION:
-* The Send Message Function embodies 2 functions:
-* o transfers a message (including MAC header) from the provided buffer structure in Host memory to the Transmit
-* Frame Structure (TxFS) in NIC memory.
-* o Issue a send command to the F/W to actually transmit the contents of the TxFS.
-*
-* Control is based on the Resource Indicator IFB_RscInd.
-* The Resource Indicator is maintained by the HCF and should only be interpreted but not changed by the MSF.
-* The MSF must check IFB_RscInd to be non-zero before executing the call to the Send Message Function.
-* When no resources are available, the MSF must handle the queuing of the Transmit frame and check the
-* Resource Indicator periodically after calling hcf_service_nic.
-*
-* The Send Message Functions transfers a message to NIC memory when it is called with a non-NULL descp.
-* Before the Send Message Function is invoked this way, the Resource Indicator (IFB_RscInd) must be checked.
-* If the Resource is not available, Send Message Function execution must be postponed until after processing of
-* a next hcf_service_nic it appears that the Resource has become available.
-* The message is copied from the buffer structure identified by descp to the NIC.
-* Copying stops if a NULL pointer in the next_desc_addr field is reached.
-* Hcf_send_msg does not check for transmit buffer overflow, because the F/W does this protection.
-* In case of a transmit buffer overflow, the surplus which does not fit in the buffer is simply dropped.
-*
-* The Send Message Function activates the F/W to actually send the message to the medium when the
-* HFS_TX_CNTL_TX_DELAY bit of the tx_cntl parameter is not set.
-* If the descp parameter of the current call is non-NULL, the message as represented by descp is send.
-* If the descp parameter of the current call is NULL, and if the preceding call of the Send Message Function had
-* a non-NULL descp and the preceding call had the HFS_TX_CNTL_TX_DELAY bit of tx_cntl set, then the message as
-* represented by the descp of the preceding call is send.
-*
-* Hcf_send_msg supports encapsulation (see HCF_ENCAP) of Ethernet-II frames.
-* An Ethernet-II frame is transferred to the Transmit Frame structure as an 802.3 frame.
-* Hcf_send_msg distinguishes between an 802.3 and an Ethernet-II frame by looking at the data length/type field
-* of the frame. If this field contains a value larger than 1514, the frame is considered to be an Ethernet-II
-* frame, otherwise it is treated as an 802.3 frame.
-* To ease implementation of the HCF, this type/type field must be located in the first descriptor structure,
-* i.e. the 1st fragment must have a size of at least 14 (to contain DestAddr, SrcAddr and Len/Type field).
-* An Ethernet-II frame is encapsulated by inserting a SNAP header between the addressing information and the
-* type field. This insertion is transparent for the MSF.
-* The HCF contains a fixed table that stores a number of types. If the value specified by the type/type field
-* occurs in this table, Bridge Tunnel Encapsulation is used, otherwise RFC1042 encapsulation is used.
-* Bridge Tunnel uses AA AA 03 00 00 F8 as SNAP header,
-* RFC1042 uses AA AA 03 00 00 00 as SNAP header.
-* The table currently contains:
-* 0 0x80F3 AppleTalk Address Resolution Protocol (AARP)
-* 0 0x8137 IPX
-*
-* The algorithm to distinguish between 802.3 and Ethernet-II frames limits the maximum length for frames of
-* 802.3 frames to 1514 bytes.
-* Encapsulation can be suppressed by means of the system constant HCF_ENCAP, e.g. to support proprietary
-* protocols with 802.3 like frames with a size larger than 1514 bytes.
-*
-* In case the HCF encapsulates the frame, the number of bytes that is actually transmitted is determined by the
-* cumulative value of the buf_cntl.buf_dim[0] fields.
-* In case the HCF does not encapsulate the frame, the number of bytes that is actually transmitted is not
-* determined by the cumulative value of the buf_cntl.buf_dim[DESC_CNTL_CNT] fields of the desc_strct's but by
-* the Length field of the 802.3 frame.
-* If there is a conflict between the cumulative value of the buf_cntl.buf_dim[0] fields and the
-* 802.3 Length field the 802.3 Length field determines the number of bytes actually transmitted by the NIC while
-* the cumulative value of the buf_cntl.buf_dim[0] fields determines the position of the MIC, hence a mismatch
-* will result in MIC errors on the Receiving side.
-* Currently this problem is flagged on the Transmit side by an Assert.
-* The following fields of each of the descriptors in the descriptor list must be set by the MSF:
-* o buf_cntl.buf_dim[0]
-* o *next_desc_addr
-* o *buf_addr
-*
-* All bits of the tx_cntl parameter except HFS_TX_CNTL_TX_DELAY and the HCF_PORT# bits are passed to the F/W via
-* the HFS_TX_CNTL field of the TxFS.
-*
-* Note that hcf_send_msg does not detect NIC absence. The MSF is supposed to have its own -platform dependent-
-* way to recognize card removal/insertion.
-* The total system must be robust against card removal and there is no principal difference between card removal
-* just after hcf_send_msg returns but before the actual transmission took place or sometime earlier.
-*
-* Assert fails if
-* - ifbp has a recognizable out-of-range value
-* - descp is a NULL pointer
-* - no resources for PIF available.
-* - Interrupts are enabled.
-* - reentrancy, may be caused by calling hcf_functions without adequate protection
-* against NIC interrupts or multi-threading.
-*
-*.DIAGRAM
-*4: for the normal case (i.e. no HFS_TX_CNTL_TX_DELAY option active), a fid is acquired via the
-* routine get_fid. If no FID is acquired, the remainder is skipped without an error notification. After
-* all, the MSF is not supposed to call hcf_send_msg when no Resource is available.
-*7: The ControlField of the TxFS is written. Since put_frag can only return the fatal Defunct or "No NIC", the
-* return status can be ignored because when it fails, cmd_wait will fail as well. (see also the note on the
-* need for a return code below).
-* Note that HFS_TX_CNTL has different values for H-I, H-I/SSN and H-II and HFS_ADDR_DEST has different
-* values for H-I (regardless of SSN) and H-II.
-* By writing 17, 1 or 2 ( implying 16, 0 or 1 garbage word after HFS_TX_CNTL) the BAP just gets to
-* HFS_ADDR_DEST for H-I, H-I/SSN and H-II respectively.
-*10: if neither encapsulation nor MIC calculation is needed, splitting the first fragment in two does not
-* really help but it makes the flow easier to follow to do not optimize on this difference
-*
-* hcf_send_msg checks whether the frame is an Ethernet-II rather than an "official" 802.3 frame.
-* The E-II check is based on the length/type field in the MAC header. If this field has a value larger than
-* 1500, E-II is assumed. The implementation of this test fails if the length/type field is not in the first
-* descriptor. If E-II is recognized, a SNAP header is inserted. This SNAP header represents either RFC1042
-* or Bridge-Tunnel encapsulation, depending on the return status of the support routine hcf_encap.
-*
-*.NOTICE
-* hcf_send_msg leaves the responsibility to only send messages on enabled ports at the MSF level.
-* This is considered the strategy which is sufficiently adequate for all "robust" MSFs, have the least
-* processor utilization and being still acceptable robust at the WCI !!!!!
-*
-* hcf_send_msg does not NEED a return value to report NIC absence or removal during the execution of
-* hcf_send_msg(), because the MSF and higher layers must be able to cope anyway with the NIC being removed
-* after a successful completion of hcf_send_msg() but before the actual transmission took place.
-* To accommodate user expectations the current implementation does report NIC absence.
-* Defunct blocks all NIC access and will (also) be reported on a number of other calls.
-*
-* hcf_send_msg does not check for transmit buffer overflow because the Hermes does this protection.
-* In case of a transmit buffer overflow, the surplus which does not fit in the buffer is simply dropped.
-* Note that this possibly results in the transmission of incomplete frames.
-*
-* After some deliberation with F/W team, it is decided that - being in the twilight zone of not knowing
-* whether the problem at hand is an MSF bug, HCF buf, F/W bug, H/W malfunction or even something else - there
-* is no "best thing to do" in case of a failing send, hence the HCF considers the TxFID ownership to be taken
-* over by the F/W and hopes for an Allocate event in due time
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.MODULE int hcf_send_msg( IFBP ifbp, DESC_STRCT *descp, hcf_16 tx_cntl )
+ *.PURPOSE Encapsulate a message and append padding and MIC.
+ * non-USB: Transfers the resulting message from Host to NIC and initiates transmission.
+ * USB: Transfer resulting message into a flat buffer.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * descp pointer to the DescriptorList or NULL
+ * tx_cntl indicates MAC-port and (Hermes) options
+ * HFS_TX_CNTL_SPECTRALINK
+ * HFS_TX_CNTL_PRIO
+ * HFS_TX_CNTL_TX_OK
+ * HFS_TX_CNTL_TX_EX
+ * HFS_TX_CNTL_TX_DELAY
+ * HFS_TX_CNTL_TX_CONT
+ * HCF_PORT_0 MAC Port 0 (default)
+ * HCF_PORT_1 (AP only) MAC Port 1
+ * HCF_PORT_2 (AP only) MAC Port 2
+ * HCF_PORT_3 (AP only) MAC Port 3
+ * HCF_PORT_4 (AP only) MAC Port 4
+ * HCF_PORT_5 (AP only) MAC Port 5
+ * HCF_PORT_6 (AP only) MAC Port 6
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ * HCF_ERR_DEFUNCT_..
+ * HCF_ERR_TIME_OUT
+ *
+ *.DESCRIPTION:
+ * The Send Message Function embodies 2 functions:
+ * o transfers a message (including MAC header) from the provided buffer structure in Host memory to the Transmit
+ * Frame Structure (TxFS) in NIC memory.
+ * o Issue a send command to the F/W to actually transmit the contents of the TxFS.
+ *
+ * Control is based on the Resource Indicator IFB_RscInd.
+ * The Resource Indicator is maintained by the HCF and should only be interpreted but not changed by the MSF.
+ * The MSF must check IFB_RscInd to be non-zero before executing the call to the Send Message Function.
+ * When no resources are available, the MSF must handle the queuing of the Transmit frame and check the
+ * Resource Indicator periodically after calling hcf_service_nic.
+ *
+ * The Send Message Functions transfers a message to NIC memory when it is called with a non-NULL descp.
+ * Before the Send Message Function is invoked this way, the Resource Indicator (IFB_RscInd) must be checked.
+ * If the Resource is not available, Send Message Function execution must be postponed until after processing of
+ * a next hcf_service_nic it appears that the Resource has become available.
+ * The message is copied from the buffer structure identified by descp to the NIC.
+ * Copying stops if a NULL pointer in the next_desc_addr field is reached.
+ * Hcf_send_msg does not check for transmit buffer overflow, because the F/W does this protection.
+ * In case of a transmit buffer overflow, the surplus which does not fit in the buffer is simply dropped.
+ *
+ * The Send Message Function activates the F/W to actually send the message to the medium when the
+ * HFS_TX_CNTL_TX_DELAY bit of the tx_cntl parameter is not set.
+ * If the descp parameter of the current call is non-NULL, the message as represented by descp is send.
+ * If the descp parameter of the current call is NULL, and if the preceding call of the Send Message Function had
+ * a non-NULL descp and the preceding call had the HFS_TX_CNTL_TX_DELAY bit of tx_cntl set, then the message as
+ * represented by the descp of the preceding call is send.
+ *
+ * Hcf_send_msg supports encapsulation (see HCF_ENCAP) of Ethernet-II frames.
+ * An Ethernet-II frame is transferred to the Transmit Frame structure as an 802.3 frame.
+ * Hcf_send_msg distinguishes between an 802.3 and an Ethernet-II frame by looking at the data length/type field
+ * of the frame. If this field contains a value larger than 1514, the frame is considered to be an Ethernet-II
+ * frame, otherwise it is treated as an 802.3 frame.
+ * To ease implementation of the HCF, this type/type field must be located in the first descriptor structure,
+ * i.e. the 1st fragment must have a size of at least 14 (to contain DestAddr, SrcAddr and Len/Type field).
+ * An Ethernet-II frame is encapsulated by inserting a SNAP header between the addressing information and the
+ * type field. This insertion is transparent for the MSF.
+ * The HCF contains a fixed table that stores a number of types. If the value specified by the type/type field
+ * occurs in this table, Bridge Tunnel Encapsulation is used, otherwise RFC1042 encapsulation is used.
+ * Bridge Tunnel uses AA AA 03 00 00 F8 as SNAP header,
+ * RFC1042 uses AA AA 03 00 00 00 as SNAP header.
+ * The table currently contains:
+ * 0 0x80F3 AppleTalk Address Resolution Protocol (AARP)
+ * 0 0x8137 IPX
+ *
+ * The algorithm to distinguish between 802.3 and Ethernet-II frames limits the maximum length for frames of
+ * 802.3 frames to 1514 bytes.
+ * Encapsulation can be suppressed by means of the system constant HCF_ENCAP, e.g. to support proprietary
+ * protocols with 802.3 like frames with a size larger than 1514 bytes.
+ *
+ * In case the HCF encapsulates the frame, the number of bytes that is actually transmitted is determined by the
+ * cumulative value of the buf_cntl.buf_dim[0] fields.
+ * In case the HCF does not encapsulate the frame, the number of bytes that is actually transmitted is not
+ * determined by the cumulative value of the buf_cntl.buf_dim[DESC_CNTL_CNT] fields of the desc_strct's but by
+ * the Length field of the 802.3 frame.
+ * If there is a conflict between the cumulative value of the buf_cntl.buf_dim[0] fields and the
+ * 802.3 Length field the 802.3 Length field determines the number of bytes actually transmitted by the NIC while
+ * the cumulative value of the buf_cntl.buf_dim[0] fields determines the position of the MIC, hence a mismatch
+ * will result in MIC errors on the Receiving side.
+ * Currently this problem is flagged on the Transmit side by an Assert.
+ * The following fields of each of the descriptors in the descriptor list must be set by the MSF:
+ * o buf_cntl.buf_dim[0]
+ * o *next_desc_addr
+ * o *buf_addr
+ *
+ * All bits of the tx_cntl parameter except HFS_TX_CNTL_TX_DELAY and the HCF_PORT# bits are passed to the F/W via
+ * the HFS_TX_CNTL field of the TxFS.
+ *
+ * Note that hcf_send_msg does not detect NIC absence. The MSF is supposed to have its own -platform dependent-
+ * way to recognize card removal/insertion.
+ * The total system must be robust against card removal and there is no principal difference between card removal
+ * just after hcf_send_msg returns but before the actual transmission took place or sometime earlier.
+ *
+ * Assert fails if
+ * - ifbp has a recognizable out-of-range value
+ * - descp is a NULL pointer
+ * - no resources for PIF available.
+ * - Interrupts are enabled.
+ * - reentrancy, may be caused by calling hcf_functions without adequate protection
+ * against NIC interrupts or multi-threading.
+ *
+ *.DIAGRAM
+ *4: for the normal case (i.e. no HFS_TX_CNTL_TX_DELAY option active), a fid is acquired via the
+ * routine get_fid. If no FID is acquired, the remainder is skipped without an error notification. After
+ * all, the MSF is not supposed to call hcf_send_msg when no Resource is available.
+ *7: The ControlField of the TxFS is written. Since put_frag can only return the fatal Defunct or "No NIC", the
+ * return status can be ignored because when it fails, cmd_wait will fail as well. (see also the note on the
+ * need for a return code below).
+ * Note that HFS_TX_CNTL has different values for H-I, H-I/WPA and H-II and HFS_ADDR_DEST has different
+ * values for H-I (regardless of WPA) and H-II.
+ * By writing 17, 1 or 2 ( implying 16, 0 or 1 garbage word after HFS_TX_CNTL) the BAP just gets to
+ * HFS_ADDR_DEST for H-I, H-I/WPA and H-II respectively.
+ *10: if neither encapsulation nor MIC calculation is needed, splitting the first fragment in two does not
+ * really help but it makes the flow easier to follow to do not optimize on this difference
+ *
+ * hcf_send_msg checks whether the frame is an Ethernet-II rather than an "official" 802.3 frame.
+ * The E-II check is based on the length/type field in the MAC header. If this field has a value larger than
+ * 1500, E-II is assumed. The implementation of this test fails if the length/type field is not in the first
+ * descriptor. If E-II is recognized, a SNAP header is inserted. This SNAP header represents either RFC1042
+ * or Bridge-Tunnel encapsulation, depending on the return status of the support routine hcf_encap.
+ *
+ *.NOTICE
+ * hcf_send_msg leaves the responsibility to only send messages on enabled ports at the MSF level.
+ * This is considered the strategy which is sufficiently adequate for all "robust" MSFs, have the least
+ * processor utilization and being still acceptable robust at the WCI !!!!!
+ *
+ * hcf_send_msg does not NEED a return value to report NIC absence or removal during the execution of
+ * hcf_send_msg(), because the MSF and higher layers must be able to cope anyway with the NIC being removed
+ * after a successful completion of hcf_send_msg() but before the actual transmission took place.
+ * To accommodate user expectations the current implementation does report NIC absence.
+ * Defunct blocks all NIC access and will (also) be reported on a number of other calls.
+ *
+ * hcf_send_msg does not check for transmit buffer overflow because the Hermes does this protection.
+ * In case of a transmit buffer overflow, the surplus which does not fit in the buffer is simply dropped.
+ * Note that this possibly results in the transmission of incomplete frames.
+ *
+ * After some deliberation with F/W team, it is decided that - being in the twilight zone of not knowing
+ * whether the problem at hand is an MSF bug, HCF buf, F/W bug, H/W malfunction or even something else - there
+ * is no "best thing to do" in case of a failing send, hence the HCF considers the TxFID ownership to be taken
+ * over by the F/W and hopes for an Allocate event in due time
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
hcf_send_msg( IFBP ifbp, DESC_STRCT *descp, hcf_16 tx_cntl )
{
-int rc = HCF_SUCCESS;
-DESC_STRCT *p /* = descp*/; //working pointer
-hcf_16 len; // total byte count
-hcf_16 i;
+ int rc = HCF_SUCCESS;
+ DESC_STRCT *p /* = descp*/; //working pointer
+ hcf_16 len; // total byte count
+ hcf_16 i;
-hcf_16 fid = 0;
+ hcf_16 fid = 0;
- HCFASSERT( ifbp->IFB_RscInd || descp == NULL, ifbp->IFB_RscInd )
- HCFASSERT( (ifbp->IFB_CntlOpt & USE_DMA) == 0, 0xDADB )
+ HCFASSERT( ifbp->IFB_RscInd || descp == NULL, ifbp->IFB_RscInd );
+ HCFASSERT( (ifbp->IFB_CntlOpt & USE_DMA) == 0, 0xDADB );
- HCFLOGENTRY( HCF_TRACE_SEND_MSG, tx_cntl )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
+ HCFLOGENTRY( HCF_TRACE_SEND_MSG, tx_cntl );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
/* obnoxious c:/hcf/hcf.c(1480) : warning C4769: conversion of near pointer to long integer,
* so skip */
- HCFASSERT( ((hcf_32)descp & 3 ) == 0, (hcf_32)descp )
+ HCFASSERT( ((hcf_32)descp & 3 ) == 0, (hcf_32)descp );
#if HCF_ASSERT
-{ int x = ifbp->IFB_FWIdentity.comp_id == COMP_ID_FW_AP ? tx_cntl & ~HFS_TX_CNTL_PORT : tx_cntl;
- HCFASSERT( (x & ~HCF_TX_CNTL_MASK ) == 0, tx_cntl )
-}
+ { int x = ifbp->IFB_FWIdentity.comp_id == COMP_ID_FW_AP ? tx_cntl & ~HFS_TX_CNTL_PORT : tx_cntl;
+ HCFASSERT( (x & ~HCF_TX_CNTL_MASK ) == 0, tx_cntl );
+ }
#endif // HCF_ASSERT
- if ( descp ) ifbp->IFB_TxFID = 0; //cancel a pre-put message
-
-#if (HCF_EXT) & HCF_EXT_TX_CONT // Continuous transmit test
- if ( tx_cntl == HFS_TX_CNTL_TX_CONT ) {
- fid = get_fid(ifbp);
- if (fid != 0 ) {
- //setup BAP to begin of TxFS
- (void)setup_bap( ifbp, fid, 0, IO_OUT );
- //copy all the fragments in a transparent fashion
- for ( p = descp; p; p = p->next_desc_addr ) {
- /* obnoxious warning C4769: conversion of near pointer to long integer */
- HCFASSERT( ((hcf_32)p & 3 ) == 0, (hcf_32)p )
- put_frag( ifbp, p->buf_addr, p->BUF_CNT BE_PAR(0) );
- }
- rc = cmd_exe( ifbp, HCMD_THESEUS | HCMD_BUSY | HCMD_STARTPREAMBLE, fid );
- if ( ifbp->IFB_RscInd == 0 ) {
- ifbp->IFB_RscInd = get_fid( ifbp );
- }
- }
- // een slecht voorbeeld doet goed volgen ;?
- HCFLOGEXIT( HCF_TRACE_SEND_MSG )
- return rc;
- }
-#endif // HCF_EXT_TX_CONT
- /* the following initialization code is redundant for a pre-put message
- * but moving it inside the "if fid" logic makes the merging with the
- * USB flow awkward
- */
+ if ( descp ) ifbp->IFB_TxFID = 0; //cancel a pre-put message
+
+ /* the following initialization code is redundant for a pre-put message
+ * but moving it inside the "if fid" logic makes the merging with the
+ * USB flow awkward
+ */
#if (HCF_TYPE) & HCF_TYPE_WPA
tx_cntl |= ifbp->IFB_MICTxCntl;
#endif // HCF_TYPE_WPA
fid = ifbp->IFB_TxFID;
- if (fid == 0 && ( fid = get_fid( ifbp ) ) != 0 ) /* 4 */
- /* skip the next compound statement if:
- - pre-put message or
- - no fid available (which should never occur if the MSF adheres to the WCI)
- */
- { // to match the closing curly bracket of above "if" in case of HCF_TYPE_USB
- //calculate total length ;? superfluous unless CCX or Encapsulation
+ if (fid == 0 && ( fid = get_fid( ifbp ) ) != 0 ) /* 4 */
+ /* skip the next compound statement if:
+ - pre-put message or
+ - no fid available (which should never occur if the MSF adheres to the WCI)
+ */
+ { // to match the closing curly bracket of above "if" in case of HCF_TYPE_USB
+ //calculate total length ;? superfluous unless CCX or Encapsulation
len = 0;
p = descp;
do len += p->BUF_CNT; while ( ( p = p->next_desc_addr ) != NULL );
p = descp;
-//;? HCFASSERT( len <= HCF_MAX_MSG, len )
-/*7*/ (void)setup_bap( ifbp, fid, HFS_TX_CNTL, IO_OUT );
+//;? HCFASSERT( len <= HCF_MAX_MSG, len );
+ /*7*/ (void)setup_bap( ifbp, fid, HFS_TX_CNTL, IO_OUT );
#if (HCF_TYPE) & HCF_TYPE_TX_DELAY
- HCFASSERT( ( descp != NULL ) ^ ( tx_cntl & HFS_TX_CNTL_TX_DELAY ), tx_cntl )
+ HCFASSERT( ( descp != NULL ) ^ ( tx_cntl & HFS_TX_CNTL_TX_DELAY ), tx_cntl );
if ( tx_cntl & HFS_TX_CNTL_TX_DELAY ) {
- tx_cntl &= ~HFS_TX_CNTL_TX_DELAY; //!!HFS_TX_CNTL_TX_DELAY no longer available
+ tx_cntl &= ~HFS_TX_CNTL_TX_DELAY; //!!HFS_TX_CNTL_TX_DELAY no longer available
ifbp->IFB_TxFID = fid;
- fid = 0; //!!fid no longer available, be careful when modifying code
+ fid = 0; //!!fid no longer available, be careful when modifying code
}
#endif // HCF_TYPE_TX_DELAY
OPW( HREG_DATA_1, tx_cntl ) ;
OPW( HREG_DATA_1, 0 );
-#if ! ( (HCF_TYPE) & HCF_TYPE_CCX )
- HCFASSERT( p->BUF_CNT >= 14, p->BUF_CNT )
- /* assume DestAddr/SrcAddr/Len/Type ALWAYS contained in 1st fragment
- * otherwise life gets too cumbersome for MIC and Encapsulation !!!!!!!!
- if ( p->BUF_CNT >= 14 ) { alternatively: add a safety escape !!!!!!!!!!!! } */
-#endif // HCF_TYPE_CCX
- CALC_TX_MIC( NULL, -1 ); //initialize MIC
-/*10*/ put_frag( ifbp, p->buf_addr, HCF_DASA_SIZE BE_PAR(0) ); //write DA, SA with MIC calculation
- CALC_TX_MIC( p->buf_addr, HCF_DASA_SIZE ); //MIC over DA, SA
- CALC_TX_MIC( null_addr, 4 ); //MIC over (virtual) priority field
-#if (HCF_TYPE) & HCF_TYPE_CCX
- //!!be careful do not use positive test on HCF_ACT_CCX_OFF, because IFB_CKIPStat is initially 0
- if(( ifbp->IFB_CKIPStat == HCF_ACT_CCX_ON ) ||
- ((GET_BUF_CNT(p) >= 20 ) && ( ifbp->IFB_CKIPStat == HCF_ACT_CCX_OFF ) &&
- (p->buf_addr[12] == 0xAA) && (p->buf_addr[13] == 0xAA) &&
- (p->buf_addr[14] == 0x03) && (p->buf_addr[15] == 0x00) &&
- (p->buf_addr[16] == 0x40) && (p->buf_addr[17] == 0x96) &&
- (p->buf_addr[18] == 0x00) && (p->buf_addr[19] == 0x00)))
- {
- i = HCF_DASA_SIZE;
-
- OPW( HREG_DATA_1, CNV_SHORT_TO_BIG( len - i ));
-
- /* need to send out the remainder of the fragment */
- put_frag( ifbp, &p->buf_addr[i], GET_BUF_CNT(p) - i BE_PAR(0) );
- }
- else
-#endif // HCF_TYPE_CCX
- {
- //if encapsulation needed
+
+ HCFASSERT( p->BUF_CNT >= 14, p->BUF_CNT );
+ /* assume DestAddr/SrcAddr/Len/Type ALWAYS contained in 1st fragment
+ * otherwise life gets too cumbersome for MIC and Encapsulation !!!!!!!!
+ if ( p->BUF_CNT >= 14 ) { alternatively: add a safety escape !!!!!!!!!!!! } */
+
+ CALC_TX_MIC( NULL, -1 ); //initialize MIC
+ /*10*/ put_frag( ifbp, p->buf_addr, HCF_DASA_SIZE BE_PAR(0) ); //write DA, SA with MIC calculation
+ CALC_TX_MIC( p->buf_addr, HCF_DASA_SIZE ); //MIC over DA, SA
+ CALC_TX_MIC( null_addr, 4 ); //MIC over (virtual) priority field
+
+ //if encapsulation needed
#if (HCF_ENCAP) == HCF_ENC
- //write length (with SNAP-header,Type, without //DA,SA,Length ) no MIC calc.
- if ( ( snap_header[sizeof(snap_header)-1] = hcf_encap( &p->buf_addr[HCF_DASA_SIZE] ) ) != ENC_NONE ) {
- OPW( HREG_DATA_1, CNV_END_SHORT( len + (sizeof(snap_header) + 2) - ( 2*6 + 2 ) ) );
- //write splice with MIC calculation
- put_frag( ifbp, snap_header, sizeof(snap_header) BE_PAR(0) );
- CALC_TX_MIC( snap_header, sizeof(snap_header) ); //MIC over 6 byte SNAP
- i = HCF_DASA_SIZE;
- } else
+ //write length (with SNAP-header,Type, without //DA,SA,Length ) no MIC calc.
+ if ( ( snap_header[sizeof(snap_header)-1] = hcf_encap( &p->buf_addr[HCF_DASA_SIZE] ) ) != ENC_NONE ) {
+ OPW( HREG_DATA_1, CNV_END_SHORT( len + (sizeof(snap_header) + 2) - ( 2*6 + 2 ) ) );
+ //write splice with MIC calculation
+ put_frag( ifbp, snap_header, sizeof(snap_header) BE_PAR(0) );
+ CALC_TX_MIC( snap_header, sizeof(snap_header) ); //MIC over 6 byte SNAP
+ i = HCF_DASA_SIZE;
+ } else
#endif // HCF_ENC
- {
- OPW( HREG_DATA_1, *(wci_recordp)&p->buf_addr[HCF_DASA_SIZE] );
- i = 14;
- }
- //complete 1st fragment starting with Type with MIC calculation
- put_frag( ifbp, &p->buf_addr[i], p->BUF_CNT - i BE_PAR(0) );
- CALC_TX_MIC( &p->buf_addr[i], p->BUF_CNT - i );
+ {
+ OPW( HREG_DATA_1, *(wci_recordp)&p->buf_addr[HCF_DASA_SIZE] );
+ i = 14;
}
- //do the remaining fragments with MIC calculation
+ //complete 1st fragment starting with Type with MIC calculation
+ put_frag( ifbp, &p->buf_addr[i], p->BUF_CNT - i BE_PAR(0) );
+ CALC_TX_MIC( &p->buf_addr[i], p->BUF_CNT - i );
+
+ //do the remaining fragments with MIC calculation
while ( ( p = p->next_desc_addr ) != NULL ) {
/* obnoxious c:/hcf/hcf.c(1480) : warning C4769: conversion of near pointer to long integer,
* so skip */
- HCFASSERT( ((hcf_32)p & 3 ) == 0, (hcf_32)p )
+ HCFASSERT( ((hcf_32)p & 3 ) == 0, (hcf_32)p );
put_frag( ifbp, p->buf_addr, p->BUF_CNT BE_PAR(0) );
CALC_TX_MIC( p->buf_addr, p->BUF_CNT );
}
- //pad message, finalize MIC calculation and write MIC to NIC
+ //pad message, finalize MIC calculation and write MIC to NIC
put_frag_finalize( ifbp );
}
if ( fid ) {
-/*16*/ rc = cmd_exe( ifbp, HCMD_BUSY | HCMD_TX | HCMD_RECL, fid );
+ /*16*/ rc = cmd_exe( ifbp, HCMD_BUSY | HCMD_TX | HCMD_RECL, fid );
ifbp->IFB_TxFID = 0;
- /* probably this (i.e. no RscInd AND "HREG_EV_ALLOC") at this point in time occurs so infrequent,
- * that it might just as well be acceptable to skip this
- * "optimization" code and handle that additional interrupt once in a while
- */
+ /* probably this (i.e. no RscInd AND "HREG_EV_ALLOC") at this point in time occurs so infrequent,
+ * that it might just as well be acceptable to skip this
+ * "optimization" code and handle that additional interrupt once in a while
+ */
// 180 degree error in logic ;? #if ALLOC_15
-/*20*/ if ( ifbp->IFB_RscInd == 0 ) {
+ /*20*/ if ( ifbp->IFB_RscInd == 0 ) {
ifbp->IFB_RscInd = get_fid( ifbp );
}
// #endif // ALLOC_15
}
-// HCFASSERT( level::ifbp->IFB_RscInd, ifbp->IFB_RscInd )
- HCFLOGEXIT( HCF_TRACE_SEND_MSG )
+// HCFASSERT( level::ifbp->IFB_RscInd, ifbp->IFB_RscInd );
+ HCFLOGEXIT( HCF_TRACE_SEND_MSG );
return rc;
} // hcf_send_msg
-#endif // HCF_DL_ONLY
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.MODULE int hcf_service_nic( IFBP ifbp, wci_bufp bufp, unsigned int len )
-*.PURPOSE Services (most) NIC events.
-* Provides received message
-* Provides status information.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* In non-DMA mode:
-* bufp address of char buffer, sufficiently large to hold the first part of the RxFS up through HFS_TYPE
-* len length in bytes of buffer specified by bufp
-* value between HFS_TYPE + 2 and HFS_ADDR_DEST + HCF_MAX_MSG
-*
-*.RETURNS
-* HCF_SUCCESS
-* HCF_ERR_MIC message contains an erroneous MIC (only if frame fits completely in bufp)
-*
-*.DESCRIPTION
-*
-* MSF-accessible fields of Result Block
-* - IFB_RxLen 0 or Frame size.
-* - IFB_MBInfoLen 0 or the L-field of the oldest MBIB.
-* - IFB_RscInd
-* - IFB_HCF_Tallies updated if a corresponding event occurred.
-* - IFB_NIC_Tallies updated if a Tally Info frame received from the NIC.
-* - IFB_DmaPackets
-* - IFB_TxFsStat
-* - IFB_TxFsSwSup
-* - IFB_LinkStat reflects new link status or 0x0000 if no change relative to previous hcf_service_nic call.
+ *
+ *.MODULE int hcf_service_nic( IFBP ifbp, wci_bufp bufp, unsigned int len )
+ *.PURPOSE Services (most) NIC events.
+ * Provides received message
+ * Provides status information.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * In non-DMA mode:
+ * bufp address of char buffer, sufficiently large to hold the first part of the RxFS up through HFS_TYPE
+ * len length in bytes of buffer specified by bufp
+ * value between HFS_TYPE + 2 and HFS_ADDR_DEST + HCF_MAX_MSG
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ * HCF_ERR_MIC message contains an erroneous MIC (only if frame fits completely in bufp)
+ *
+ *.DESCRIPTION
+ *
+ * MSF-accessible fields of Result Block
+ * - IFB_RxLen 0 or Frame size.
+ * - IFB_MBInfoLen 0 or the L-field of the oldest MBIB.
+ * - IFB_RscInd
+ * - IFB_HCF_Tallies updated if a corresponding event occurred.
+ * - IFB_NIC_Tallies updated if a Tally Info frame received from the NIC.
+ * - IFB_DmaPackets
+ * - IFB_TxFsStat
+ * - IFB_TxFsSwSup
+ * - IFB_LinkStat reflects new link status or 0x0000 if no change relative to previous hcf_service_nic call.
or
-* - IFB_LinkStat link status, 0x8000 reflects change relative to previous hcf_service_nic call.
+* - IFB_LinkStat link status, 0x8000 reflects change relative to previous hcf_service_nic call.
*
* When IFB_MBInfoLen is non-zero, at least one MBIB is available.
*
-* IFB_RxLen reflects the number of received bytes in 802.3 view (Including DestAddr, SrcAddr and Length,
+* IFB_RxLen reflects the number of received bytes in 802.3 view (Including DestAddr, SrcAddr and Length,
* excluding MIC-padding, MIC and sum check) of active Rx Frame Structure. If no Rx Data s available, IFB_RxLen
* equals 0x0000.
* Repeated execution causes the Service NIC Function to provide information about subsequently received
@@ -2891,150 +2770,150 @@ or
* specific requirements of that environment to translate the interrupt strategy to a polled strategy.
*
* hcf_service_nic services the following Hermes events:
-* - HREG_EV_INFO Asynchronous Information Frame
-* - HREG_EV_INFO_DROP WMAC did not have sufficient RAM to build Unsolicited Information Frame
-* - HREG_EV_TX_EXC (if applicable, i.e. selected via HCF_EXT_INT_TX_EX bit of HCF_EXT)
-* - HREG_EV_SLEEP_REQ (if applicable, i.e. selected via HCF_DDS/HCF_CDS bit of HCF_SLEEP)
+* - HREG_EV_INFO Asynchronous Information Frame
+* - HREG_EV_INFO_DROP WMAC did not have sufficient RAM to build Unsolicited Information Frame
+* - HREG_EV_TX_EXC (if applicable, i.e. selected via HCF_EXT_INT_TX_EX bit of HCF_EXT)
+* - HREG_EV_SLEEP_REQ (if applicable, i.e. selected via HCF_DDS/HCF_CDS bit of HCF_SLEEP)
* ** in non_DMA mode
-* - HREG_EV_ALLOC Asynchronous part of Allocation/Reclaim completed while out of resources at
-* completion of hcf_send_msg/notify
-* - HREG_EV_RX the detection of the availability of received messages
-* including WaveLAN Management Protocol (WMP) message processing
+* - HREG_EV_ALLOC Asynchronous part of Allocation/Reclaim completed while out of resources at
+* completion of hcf_send_msg/notify
+* - HREG_EV_RX the detection of the availability of received messages
+* including WaveLAN Management Protocol (WMP) message processing
* ** in DMA mode
* - HREG_EV_RDMAD
* - HREG_EV_TDMAD
*!! hcf_service_nic does not service the following Hermes events:
-*!! HREG_EV_TX (the "OK" Tx Event) is no longer supported by the WCI, if it occurs it is unclear
-*!! what the cause is, so no meaningful strategy is available. Not acking the bit is
-*!! probably the best help that can be given to the debugger.
-*!! HREG_EV_CMD handled in cmd_wait.
-*!! HREG_EV_FW_DMA (i.e. HREG_EV_RXDMA, HREG_EV_TXDMA and_EV_LPESC) are either not used or used
-*!! between the F/W and the DMA engine.
-*!! HREG_EV_ACK_REG_READY is only applicable for H-II (i.e. not HII.5 and up, see DAWA)
-*
-* If, in non-DMA mode, a Rx message is available, its length is reflected by the IFB_RxLen field of the IFB.
-* This length reflects the data itself and the Destination Address, Source Address and DataLength/Type field
-* but not the SNAP-header in case of decapsulation by the HCF. If no message is available, IFB_RxLen is
-* zero. Former versions of the HCF handled WMP messages and supported a "monitor" mode in hcf_service_nic,
-* which deposited certain or all Rx messages in the MailBox. The responsibility to handle these frames is
-* moved to the MSF. The HCF offers as supports hcf_put_info with CFG_MB_INFO as parameter to emulate the old
-* implementation under control of the MSF.
+*!! HREG_EV_TX (the "OK" Tx Event) is no longer supported by the WCI, if it occurs it is unclear
+*!! what the cause is, so no meaningful strategy is available. Not acking the bit is
+*!! probably the best help that can be given to the debugger.
+*!! HREG_EV_CMD handled in cmd_wait.
+*!! HREG_EV_FW_DMA (i.e. HREG_EV_RXDMA, HREG_EV_TXDMA and_EV_LPESC) are either not used or used
+*!! between the F/W and the DMA engine.
+*!! HREG_EV_ACK_REG_READY is only applicable for H-II (i.e. not HII.5 and up, see DAWA)
+*
+* If, in non-DMA mode, a Rx message is available, its length is reflected by the IFB_RxLen field of the IFB.
+* This length reflects the data itself and the Destination Address, Source Address and DataLength/Type field
+* but not the SNAP-header in case of decapsulation by the HCF. If no message is available, IFB_RxLen is
+* zero. Former versions of the HCF handled WMP messages and supported a "monitor" mode in hcf_service_nic,
+* which deposited certain or all Rx messages in the MailBox. The responsibility to handle these frames is
+* moved to the MSF. The HCF offers as supports hcf_put_info with CFG_MB_INFO as parameter to emulate the old
+* implementation under control of the MSF.
*
* **Rx Buffer free strategy
-* When hcf_service_nic reports the availability of a non-DMA message, the MSF can access that message by
-* means of hcf_rcv_msg. It must be prevented that the LAN Controller writes new data in the NIC buffer
-* before the MSF is finished with the current message. The NIC buffer is returned to the LAN Controller
-* when:
-* - the complete frame fits in the lookahead buffer or
-* - hcf_rcv_msg is called or
-* - hcf_action with HCF_ACT_RX is called or
-* - hcf_service_nic is called again
-* It can be reasoned that hcf_action( INT_ON ) should not be given before the MSF has completely processed
-* a reported Rx-frame. The reason is that the INT_ON action is guaranteed to cause a (Rx-)interrupt (the
-* MSF is processing a Rx-frame, hence the Rx-event bit in the Hermes register must be active). This
-* interrupt will cause hcf_service_nic to be called, which will cause the ack-ing of the "last" Rx-event
-* to the Hermes, causing the Hermes to discard the associated NIC RAM buffer.
+* When hcf_service_nic reports the availability of a non-DMA message, the MSF can access that message by
+* means of hcf_rcv_msg. It must be prevented that the LAN Controller writes new data in the NIC buffer
+* before the MSF is finished with the current message. The NIC buffer is returned to the LAN Controller
+* when:
+* - the complete frame fits in the lookahead buffer or
+* - hcf_rcv_msg is called or
+* - hcf_action with HCF_ACT_RX is called or
+* - hcf_service_nic is called again
+* It can be reasoned that hcf_action( INT_ON ) should not be given before the MSF has completely processed
+* a reported Rx-frame. The reason is that the INT_ON action is guaranteed to cause a (Rx-)interrupt (the
+* MSF is processing a Rx-frame, hence the Rx-event bit in the Hermes register must be active). This
+* interrupt will cause hcf_service_nic to be called, which will cause the ack-ing of the "last" Rx-event
+* to the Hermes, causing the Hermes to discard the associated NIC RAM buffer.
* Assert fails if
* - ifbp is zero or other recognizable out-of-range value.
* - hcf_service_nic is called without a prior call to hcf_connect.
* - interrupts are enabled.
* - reentrancy, may be caused by calling hcf_functions without adequate protection
-* against NIC interrupts or multi-threading.
+* against NIC interrupts or multi-threading.
*
*
*.DIAGRAM
-*1: IFB_LinkStat is cleared, if a LinkStatus frame is received, IFB_LinkStat will be updated accordingly
-* by isr_info.
+*1: IFB_LinkStat is cleared, if a LinkStatus frame is received, IFB_LinkStat will be updated accordingly
+* by isr_info.
or
-*1: IFB_LinkStat change indication is cleared. If a LinkStatus frame is received, IFB_LinkStat will be updated
-* accordingly by isr_info.
+*1: IFB_LinkStat change indication is cleared. If a LinkStatus frame is received, IFB_LinkStat will be updated
+* accordingly by isr_info.
*2: IFB_RxLen must be cleared before the NIC presence check otherwise:
-* - this value may stay non-zero if the NIC is pulled out at an inconvenient moment.
-* - the RxAck on a zero-FID needs a zero-value for IFB_RxLen to work
-* Note that as side-effect of the hcf_action call, the remainder of Rx related info is re-initialized as
-* well.
-*4: In case of Defunct mode, the information supplied by Hermes is unreliable, so the body of
-* hcf_service_nic is skipped. Since hcf_cntl turns into a NOP if Primary or Station F/W is incompatible,
-* hcf_service_nic is also skipped in those cases.
-* To prevent that hcf_service_nic reports bogus information to the MSF with all - possibly difficult to
-* debug - undesirable side effects, it is paramount to check the NIC presence. In former days the presence
-* test was based on the Hermes register HREG_SW_0. Since in HCF_ACT_INT_OFF is chosen for strategy based on
-* HREG_EV_STAT, this is now also used in hcf_service_nic. The motivation to change strategy is partly
-* due to inconsistent F/W implementations with respect to HREG_SW_0 manipulation around reset and download.
-* Note that in polled environments Card Removal is not detected by INT_OFF which makes the check in
-* hcf_service_nic even more important.
-*8: The event status register of the Hermes is sampled
-* The assert checks for unexpected events ;?????????????????????????????????????.
-* - HREG_EV_INFO_DROP is explicitly excluded from the acceptable HREG_EV_STAT bits because it indicates
-* a too heavily loaded system.
-* - HREG_EV_ACK_REG_READY is 0x0000 for H-I (and hopefully H-II.5)
-*
-*
-* HREG_EV_TX_EXC is accepted (via HREG_EV_TX_EXT) if and only if HCF_EXT_INT_TX_EX set in the HCF_EXT
-* definition at compile time.
-* The following activities are handled:
-* - Alloc events are handled by hcf_send_msg (and notify). Only if there is no "spare" resource, the
-* alloc event is superficially serviced by hcf_service_nic to create a pseudo-resource with value
-* 0x001. This value is recognized by get_fid (called by hcf_send_msg and notify) where the real
-* TxFid is retrieved and the Hermes is acked and - hopefully - the "normal" case with a spare TxFid
-* in IFB_RscInd is restored.
-* - Info drop events are handled by incrementing a tally
-* - LinkEvent (including solicited and unsolicited tallies) are handled by procedure isr_info.
-* - TxEx (if selected at compile time) is handled by copying the significant part of the TxFS
-* into the IFB for further processing by the MSF.
-* Note the complication of the zero-FID protection sub-scheme in DAWA.
-* Note, the Ack of all of above events is handled at the end of hcf_service_nic
+* - this value may stay non-zero if the NIC is pulled out at an inconvenient moment.
+* - the RxAck on a zero-FID needs a zero-value for IFB_RxLen to work
+* Note that as side-effect of the hcf_action call, the remainder of Rx related info is re-initialized as
+* well.
+*4: In case of Defunct mode, the information supplied by Hermes is unreliable, so the body of
+* hcf_service_nic is skipped. Since hcf_cntl turns into a NOP if Primary or Station F/W is incompatible,
+* hcf_service_nic is also skipped in those cases.
+* To prevent that hcf_service_nic reports bogus information to the MSF with all - possibly difficult to
+* debug - undesirable side effects, it is paramount to check the NIC presence. In former days the presence
+* test was based on the Hermes register HREG_SW_0. Since in HCF_ACT_INT_OFF is chosen for strategy based on
+* HREG_EV_STAT, this is now also used in hcf_service_nic. The motivation to change strategy is partly
+* due to inconsistent F/W implementations with respect to HREG_SW_0 manipulation around reset and download.
+* Note that in polled environments Card Removal is not detected by INT_OFF which makes the check in
+* hcf_service_nic even more important.
+*8: The event status register of the Hermes is sampled
+* The assert checks for unexpected events ;?????????????????????????????????????.
+* - HREG_EV_INFO_DROP is explicitly excluded from the acceptable HREG_EV_STAT bits because it indicates
+* a too heavily loaded system.
+* - HREG_EV_ACK_REG_READY is 0x0000 for H-I (and hopefully H-II.5)
+*
+*
+* HREG_EV_TX_EXC is accepted (via HREG_EV_TX_EXT) if and only if HCF_EXT_INT_TX_EX set in the HCF_EXT
+* definition at compile time.
+* The following activities are handled:
+* - Alloc events are handled by hcf_send_msg (and notify). Only if there is no "spare" resource, the
+* alloc event is superficially serviced by hcf_service_nic to create a pseudo-resource with value
+* 0x001. This value is recognized by get_fid (called by hcf_send_msg and notify) where the real
+* TxFid is retrieved and the Hermes is acked and - hopefully - the "normal" case with a spare TxFid
+* in IFB_RscInd is restored.
+* - Info drop events are handled by incrementing a tally
+* - LinkEvent (including solicited and unsolicited tallies) are handled by procedure isr_info.
+* - TxEx (if selected at compile time) is handled by copying the significant part of the TxFS
+* into the IFB for further processing by the MSF.
+* Note the complication of the zero-FID protection sub-scheme in DAWA.
+* Note, the Ack of all of above events is handled at the end of hcf_service_nic
*16: In case of non-DMA ( either not compiled in or due to a run-time choice):
-* If an Rx-frame is available, first the FID of that frame is read, including the complication of the
-* zero-FID protection sub-scheme in DAWA. Note that such a zero-FID is acknowledged at the end of
-* hcf_service_nic and that this depends on the IFB_RxLen initialization in the begin of hcf_service_nic.
-* The Assert validates the HCF assumption about Hermes implementation upon which the range of
-* Pseudo-RIDs is based.
-* Then the control fields up to the start of the 802.3 frame are read from the NIC into the lookahead buffer.
-* The status field is converted to native Endianess.
-* The length is, after implicit Endianess conversion if needed, and adjustment for the 14 bytes of the
-* 802.3 MAC header, stored in IFB_RxLen.
-* In MAC Monitor mode, 802.11 control frames with a TOTAL length of 14 are received, so without this
-* length adjustment, IFB_RxLen could not be used to distinguish these frames from "no frame".
-* No MIC calculation processes are associated with the reading of these Control fields.
+* If an Rx-frame is available, first the FID of that frame is read, including the complication of the
+* zero-FID protection sub-scheme in DAWA. Note that such a zero-FID is acknowledged at the end of
+* hcf_service_nic and that this depends on the IFB_RxLen initialization in the begin of hcf_service_nic.
+* The Assert validates the HCF assumption about Hermes implementation upon which the range of
+* Pseudo-RIDs is based.
+* Then the control fields up to the start of the 802.3 frame are read from the NIC into the lookahead buffer.
+* The status field is converted to native Endianess.
+* The length is, after implicit Endianess conversion if needed, and adjustment for the 14 bytes of the
+* 802.3 MAC header, stored in IFB_RxLen.
+* In MAC Monitor mode, 802.11 control frames with a TOTAL length of 14 are received, so without this
+* length adjustment, IFB_RxLen could not be used to distinguish these frames from "no frame".
+* No MIC calculation processes are associated with the reading of these Control fields.
*26: This length test feels like superfluous robustness against malformed frames, but it turned out to be
-* needed in the real (hostile) world.
-* The decapsulation check needs sufficient data to represent DA, SA, L, SNAP and Type which amounts to
-* 22 bytes. In MAC Monitor mode, 802.11 control frames with a smaller length are received. To prevent
-* that the implementation goes haywire, a check on the length is needed.
-* The actual decapsulation takes place on the fly in the copying process by overwriting the SNAP header.
-* Note that in case of decapsulation the SNAP header is not passed to the MSF, hence IFB_RxLen must be
-* compensated for the SNAP header length.
-* The 22 bytes needed for decapsulation are (more than) sufficient for the exceptional handling of the
-* MIC algorithm of the L-field (replacing the 2 byte L-field with 4 0x00 bytes).
-*30: The 12 in the no-SSN branch corresponds with the get_frag, the 2 with the IPW of the SSN branch
+* needed in the real (hostile) world.
+* The decapsulation check needs sufficient data to represent DA, SA, L, SNAP and Type which amounts to
+* 22 bytes. In MAC Monitor mode, 802.11 control frames with a smaller length are received. To prevent
+* that the implementation goes haywire, a check on the length is needed.
+* The actual decapsulation takes place on the fly in the copying process by overwriting the SNAP header.
+* Note that in case of decapsulation the SNAP header is not passed to the MSF, hence IFB_RxLen must be
+* compensated for the SNAP header length.
+* The 22 bytes needed for decapsulation are (more than) sufficient for the exceptional handling of the
+* MIC algorithm of the L-field (replacing the 2 byte L-field with 4 0x00 bytes).
+*30: The 12 in the no-WPA branch corresponds with the get_frag, the 2 with the IPW of the WPA branch
*32: If Hermes reported MIC-presence, than the MIC engine is initialized with the non-dummy MIC calculation
-* routine address and appropriate key.
+* routine address and appropriate key.
*34: The 8 bytes after the DA, SA, L are read and it is checked whether decapsulation is needed i.e.:
-* - the Hermes reported Tunnel encapsulation or
-* - the Hermes reported 1042 Encapsulation and hcf_encap reports that the HCF would not have used
-* 1042 as the encapsulation mechanism
-* Note that the first field of the RxFS in bufp has Native Endianess due to the conversion done by the
-* BE_PAR in get_frag.
+* - the Hermes reported Tunnel encapsulation or
+* - the Hermes reported 1042 Encapsulation and hcf_encap reports that the HCF would not have used
+* 1042 as the encapsulation mechanism
+* Note that the first field of the RxFS in bufp has Native Endianess due to the conversion done by the
+* BE_PAR in get_frag.
*36: The Type field is the only word kept (after moving) of the just read 8 bytes, it is moved to the
-* L-field. The original L-field and 6 byte SNAP header are discarded, so IFB_RxLen and buf_addr must
-* be adjusted by 8.
+* L-field. The original L-field and 6 byte SNAP header are discarded, so IFB_RxLen and buf_addr must
+* be adjusted by 8.
*40: Determine how much of the frame (starting with DA) fits in the Lookahead buffer, then read the not-yet
-* read data into the lookahead buffer.
-* If the lookahead buffer contains the complete message, check the MIC. The majority considered this
-* I/F more appropriate then have the MSF call hcf_get_data only to check the MIC.
+* read data into the lookahead buffer.
+* If the lookahead buffer contains the complete message, check the MIC. The majority considered this
+* I/F more appropriate then have the MSF call hcf_get_data only to check the MIC.
*44: Since the complete message is copied from NIC RAM to PC RAM, the Rx can be acknowledged to the Hermes
-* to optimize the flow ( a better chance to get new Rx data in the next pass through hcf_service_nic ).
-* This acknowledgement can not be done via hcf_action( HCF_ACT_RX_ACK ) because this also clears
-* IFB_RxLEN thus corrupting the I/F to the MSF.
+* to optimize the flow ( a better chance to get new Rx data in the next pass through hcf_service_nic ).
+* This acknowledgement can not be done via hcf_action( HCF_ACT_RX_ACK ) because this also clears
+* IFB_RxLEN thus corrupting the I/F to the MSF.
*;?: In case of DMA (compiled in and activated):
*54: Limiting the number of places where the F/W is acked (e.g. the merging of the Rx-ACK with the other
-* ACKs), is supposed to diminish the potential of race conditions in the F/W.
-* Note 1: The CMD event is acknowledged in cmd_cmpl
-* Note 2: HREG_EV_ACK_REG_READY is 0x0000 for H-I (and hopefully H-II.5)
-* Note 3: The ALLOC event is acknowledged in get_fid (except for the initialization flow)
+* ACKs), is supposed to diminish the potential of race conditions in the F/W.
+* Note 1: The CMD event is acknowledged in cmd_cmpl
+* Note 2: HREG_EV_ACK_REG_READY is 0x0000 for H-I (and hopefully H-II.5)
+* Note 3: The ALLOC event is acknowledged in get_fid (except for the initialization flow)
*
*.NOTICE
* The Non-DMA HREG_EV_RX is handled different compared with the other F/W events.
@@ -3047,51 +2926,51 @@ or
*
*.NOTICE
* The minimum size for Len must supply space for:
-* - an F/W dependent number of bytes of Control Info field including the 802.11 Header field
-* - Destination Address
-* - Source Address
-* - Length field
-* - [ SNAP Header]
-* - [ Ethernet-II Type]
+* - an F/W dependent number of bytes of Control Info field including the 802.11 Header field
+* - Destination Address
+* - Source Address
+* - Length field
+* - [ SNAP Header]
+* - [ Ethernet-II Type]
* This results in 68 for Hermes-I and 80 for Hermes-II
* This way the minimum amount of information is available needed by the HCF to determine whether the frame
* must be decapsulated.
-*.ENDDOC END DOCUMENTATION
+*.ENDDOC END DOCUMENTATION
*
************************************************************************************************************/
int
hcf_service_nic( IFBP ifbp, wci_bufp bufp, unsigned int len )
{
-int rc = HCF_SUCCESS;
-hcf_16 stat;
-wci_bufp buf_addr;
-hcf_16 i;
+ int rc = HCF_SUCCESS;
+ hcf_16 stat;
+ wci_bufp buf_addr;
+ hcf_16 i;
- HCFLOGENTRY( HCF_TRACE_SERVICE_NIC, ifbp->IFB_IntOffCnt )
- HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic )
- HCFASSERT_INT
+ HCFLOGENTRY( HCF_TRACE_SERVICE_NIC, ifbp->IFB_IntOffCnt );
+ HCFASSERT( ifbp->IFB_Magic == HCF_MAGIC, ifbp->IFB_Magic );
+ HCFASSERT_INT;
- ifbp->IFB_LinkStat = 0; // ;? to be obsoleted ASAP /* 1*/
- ifbp->IFB_DSLinkStat &= ~CFG_LINK_STAT_CHANGE; /* 1*/
- (void)hcf_action( ifbp, HCF_ACT_RX_ACK ); /* 2*/
- if ( ifbp->IFB_CardStat == 0 && ( stat = IPW( HREG_EV_STAT ) ) != 0xFFFF ) { /* 4*/
+ ifbp->IFB_LinkStat = 0; // ;? to be obsoleted ASAP /* 1*/
+ ifbp->IFB_DSLinkStat &= ~CFG_LINK_STAT_CHANGE; /* 1*/
+ (void)hcf_action( ifbp, HCF_ACT_RX_ACK ); /* 2*/
+ if ( ifbp->IFB_CardStat == 0 && ( stat = IPW( HREG_EV_STAT ) ) != 0xFFFF ) { /* 4*/
/* IF_NOT_DMA( HCFASSERT( !( stat & ~HREG_EV_BASIC_MASK, stat ) )
* IF_NOT_USE_DMA( HCFASSERT( !( stat & ~HREG_EV_BASIC_MASK, stat ) )
* IF_USE_DMA( HCFASSERT( !( stat & ~( HREG_EV_BASIC_MASK ^ ( HREG_EV_...DMA.... ), stat ) )
*/
- /* 8*/
+ /* 8*/
if ( ifbp->IFB_RscInd == 0 && stat & HREG_EV_ALLOC ) { //Note: IFB_RscInd is ALWAYS 1 for DMA
ifbp->IFB_RscInd = 1;
}
- IF_TALLY( if ( stat & HREG_EV_INFO_DROP ) ifbp->IFB_HCF_Tallies.NoBufInfo++; )
+ IF_TALLY( if ( stat & HREG_EV_INFO_DROP ) { ifbp->IFB_HCF_Tallies.NoBufInfo++; } );
#if (HCF_EXT) & HCF_EXT_INT_TICK
if ( stat & HREG_EV_TICK ) {
ifbp->IFB_TickCnt++;
}
#if 0 // (HCF_SLEEP) & HCF_DDS
if ( ifbp->IFB_TickCnt == 3 && ( ifbp->IFB_DSLinkStat & CFG_LINK_STAT_CONNECTED ) == 0 ) {
-CFG_DDS_TICK_TIME_STRCT ltv;
+ CFG_DDS_TICK_TIME_STRCT ltv;
// 2 second period (with 1 tick uncertanty) in not-connected mode -->go into DS_OOR
hcf_action( ifbp, HCF_ACT_SLEEP );
ifbp->IFB_DSLinkStat |= CFG_LINK_STAT_DS_OOR; //set OutOfRange
@@ -3100,7 +2979,7 @@ CFG_DDS_TICK_TIME_STRCT ltv;
ltv.tick_time = ( ( ifbp->IFB_DSLinkStat & CFG_LINK_STAT_TIMER ) + 0x10 ) *64; //78 is more right
hcf_put_info( ifbp, (LTVP)&ltv );
printk( "<5>Preparing for sleep, link_status: %04X, timer : %d\n",
- ifbp->IFB_DSLinkStat, ltv.tick_time );//;?remove me 1 day
+ ifbp->IFB_DSLinkStat, ltv.tick_time );//;?remove me 1 day
ifbp->IFB_TickCnt++; //;?just to make sure we do not keep on printing above message
if ( ltv.tick_time < 300 * 125 ) ifbp->IFB_DSLinkStat += 0x0010;
@@ -3112,7 +2991,7 @@ CFG_DDS_TICK_TIME_STRCT ltv;
}
#if (HCF_EXT) & HCF_EXT_INT_TX_EX
if ( stat & HREG_EV_TX_EXT && ( i = IPW( HREG_TX_COMPL_FID ) ) != 0 /*DAWA*/ ) {
- DAWA_ZERO_FID( HREG_TX_COMPL_FID )
+ DAWA_ZERO_FID( HREG_TX_COMPL_FID );
(void)setup_bap( ifbp, i, 0, IO_IN );
get_frag( ifbp, &ifbp->IFB_TxFsStat, HFS_SWSUP BE_PAR(1) );
}
@@ -3121,110 +3000,103 @@ CFG_DDS_TICK_TIME_STRCT ltv;
#if HCF_DMA
if ( !( ifbp->IFB_CntlOpt & USE_DMA ) ) //!! be aware of the logical indentations
#endif // HCF_DMA
-/*16*/ if ( stat & HREG_EV_RX && ( ifbp->IFB_RxFID = IPW( HREG_RX_FID ) ) != 0 ) { //if 0 then DAWA_ACK
- HCFASSERT( bufp, len )
- HCFASSERT( len >= HFS_DAT + 2, len )
- DAWA_ZERO_FID( HREG_RX_FID )
- HCFASSERT( ifbp->IFB_RxFID < CFG_PROD_DATA, ifbp->IFB_RxFID)
- (void)setup_bap( ifbp, ifbp->IFB_RxFID, 0, IO_IN );
- get_frag( ifbp, bufp, HFS_ADDR_DEST BE_PAR(1) );
- ifbp->IFB_lap = buf_addr = bufp + HFS_ADDR_DEST;
- ifbp->IFB_RxLen = (hcf_16)(bufp[HFS_DAT_LEN] + (bufp[HFS_DAT_LEN+1]<<8) + 2*6 + 2);
-/*26*/ if ( ifbp->IFB_RxLen >= 22 ) { // convenient for MIC calculation (5 DWs + 1 "skipped" W)
- //. get DA,SA,Len/Type and (SNAP,Type or 8 data bytes)
-/*30*/ get_frag( ifbp, buf_addr, 22 BE_PAR(0) );
-/*32*/ CALC_RX_MIC( bufp, -1 ); //. initialize MIC
- CALC_RX_MIC( buf_addr, HCF_DASA_SIZE ); //. MIC over DA, SA
- CALC_RX_MIC( null_addr, 4 ); //. MIC over (virtual) priority field
- CALC_RX_MIC( buf_addr+14, 8 ); //. skip Len, MIC over SNAP,Type or 8 data bytes)
- buf_addr += 22;
-#if (HCF_TYPE) & HCF_TYPE_CCX
-//!!be careful do not use positive test on HCF_ACT_CCX_OFF, because IFB_CKIPStat is initially 0
- if( ifbp->IFB_CKIPStat != HCF_ACT_CCX_ON )
-#endif // HCF_TYPE_CCX
- {
+ /*16*/ if ( stat & HREG_EV_RX && ( ifbp->IFB_RxFID = IPW( HREG_RX_FID ) ) != 0 ) { //if 0 then DAWA_ACK
+ HCFASSERT( bufp, len );
+ HCFASSERT( len >= HFS_DAT + 2, len );
+ DAWA_ZERO_FID( HREG_RX_FID );
+ HCFASSERT( ifbp->IFB_RxFID < CFG_PROD_DATA, ifbp->IFB_RxFID);
+ (void)setup_bap( ifbp, ifbp->IFB_RxFID, 0, IO_IN );
+ get_frag( ifbp, bufp, HFS_ADDR_DEST BE_PAR(1) );
+ ifbp->IFB_lap = buf_addr = bufp + HFS_ADDR_DEST;
+ ifbp->IFB_RxLen = (hcf_16)(bufp[HFS_DAT_LEN] + (bufp[HFS_DAT_LEN+1]<<8) + 2*6 + 2);
+ /*26*/ if ( ifbp->IFB_RxLen >= 22 ) { // convenient for MIC calculation (5 DWs + 1 "skipped" W)
+ //. get DA,SA,Len/Type and (SNAP,Type or 8 data bytes)
+ /*30*/ get_frag( ifbp, buf_addr, 22 BE_PAR(0) );
+ /*32*/ CALC_RX_MIC( bufp, -1 ); //. initialize MIC
+ CALC_RX_MIC( buf_addr, HCF_DASA_SIZE ); //. MIC over DA, SA
+ CALC_RX_MIC( null_addr, 4 ); //. MIC over (virtual) priority field
+ CALC_RX_MIC( buf_addr+14, 8 ); //. skip Len, MIC over SNAP,Type or 8 data bytes)
+ buf_addr += 22;
#if (HCF_ENCAP) == HCF_ENC
- HCFASSERT( len >= HFS_DAT + 2 + sizeof(snap_header), len )
-/*34*/ i = *(wci_recordp)&bufp[HFS_STAT] & ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR );
+ HCFASSERT( len >= HFS_DAT + 2 + sizeof(snap_header), len );
+ /*34*/ i = *(wci_recordp)&bufp[HFS_STAT] & ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR );
if ( i == HFS_STAT_TUNNEL ||
- ( i == HFS_STAT_1042 && hcf_encap( (wci_bufp)&bufp[HFS_TYPE] ) != ENC_TUNNEL ) ) {
- //. copy E-II Type to 802.3 LEN field
-/*36*/ bufp[HFS_LEN ] = bufp[HFS_TYPE ];
+ ( i == HFS_STAT_1042 && hcf_encap( (wci_bufp)&bufp[HFS_TYPE] ) != ENC_TUNNEL ) ) {
+ //. copy E-II Type to 802.3 LEN field
+ /*36*/ bufp[HFS_LEN ] = bufp[HFS_TYPE ];
bufp[HFS_LEN+1] = bufp[HFS_TYPE+1];
- //. discard Snap by overwriting with data
+ //. discard Snap by overwriting with data
ifbp->IFB_RxLen -= (HFS_TYPE - HFS_LEN);
buf_addr -= ( HFS_TYPE - HFS_LEN ); // this happens to bring us at a DW boundary of 36
}
#endif // HCF_ENC
}
- }
-/*40*/ ifbp->IFB_lal = min( (hcf_16)(len - HFS_ADDR_DEST), ifbp->IFB_RxLen );
- i = ifbp->IFB_lal - ( buf_addr - ( bufp + HFS_ADDR_DEST ) );
- get_frag( ifbp, buf_addr, i BE_PAR(0) );
- CALC_RX_MIC( buf_addr, i );
+ /*40*/ ifbp->IFB_lal = min( (hcf_16)(len - HFS_ADDR_DEST), ifbp->IFB_RxLen );
+ i = ifbp->IFB_lal - ( buf_addr - ( bufp + HFS_ADDR_DEST ) );
+ get_frag( ifbp, buf_addr, i BE_PAR(0) );
+ CALC_RX_MIC( buf_addr, i );
#if (HCF_TYPE) & HCF_TYPE_WPA
- if ( ifbp->IFB_lal == ifbp->IFB_RxLen ) {
- rc = check_mic( ifbp );
- }
+ if ( ifbp->IFB_lal == ifbp->IFB_RxLen ) {
+ rc = check_mic( ifbp );
+ }
#endif // HCF_TYPE_WPA
-/*44*/ if ( len - HFS_ADDR_DEST >= ifbp->IFB_RxLen ) {
- ifbp->IFB_RxFID = 0;
- } else { /* IFB_RxFID is cleared, so you do not get another Rx_Ack at next entry of hcf_service_nic */
- stat &= (hcf_16)~HREG_EV_RX; //don't ack Rx if processing not yet completed
+ /*44*/ if ( len - HFS_ADDR_DEST >= ifbp->IFB_RxLen ) {
+ ifbp->IFB_RxFID = 0;
+ } else { /* IFB_RxFID is cleared, so you do not get another Rx_Ack at next entry of hcf_service_nic */
+ stat &= (hcf_16)~HREG_EV_RX; //don't ack Rx if processing not yet completed
+ }
}
- }
// in case of DMA: signal availability of rx and/or tx packets to MSF
- IF_USE_DMA( ifbp->IFB_DmaPackets |= stat & ( HREG_EV_RDMAD | HREG_EV_TDMAD ); )
+ IF_USE_DMA( ifbp->IFB_DmaPackets |= stat & ( HREG_EV_RDMAD | HREG_EV_TDMAD ) );
// rlav : pending HREG_EV_RDMAD or HREG_EV_TDMAD events get acknowledged here.
-/*54*/ stat &= (hcf_16)~( HREG_EV_SLEEP_REQ | HREG_EV_CMD | HREG_EV_ACK_REG_READY | HREG_EV_ALLOC | HREG_EV_FW_DMA );
-//a positive mask would be easier to understand /*54*/ stat &= (hcf_16)~( HREG_EV_SLEEP_REQ | HREG_EV_CMD | HREG_EV_ACK_REG_READY | HREG_EV_ALLOC | HREG_EV_FW_DMA );
- IF_USE_DMA( stat &= (hcf_16)~HREG_EV_RX; )
+ /*54*/ stat &= (hcf_16)~( HREG_EV_SLEEP_REQ | HREG_EV_CMD | HREG_EV_ACK_REG_READY | HREG_EV_ALLOC | HREG_EV_FW_DMA );
+//a positive mask would be easier to understand /*54*/ stat &= (hcf_16)~( HREG_EV_SLEEP_REQ | HREG_EV_CMD | HREG_EV_ACK_REG_READY | HREG_EV_ALLOC | HREG_EV_FW_DMA );
+ IF_USE_DMA( stat &= (hcf_16)~HREG_EV_RX );
if ( stat ) {
- DAWA_ACK( stat ); /*DAWA*/
+ DAWA_ACK( stat ); /*DAWA*/
}
}
- HCFLOGEXIT( HCF_TRACE_SERVICE_NIC )
+ HCFLOGEXIT( HCF_TRACE_SERVICE_NIC );
return rc;
} // hcf_service_nic
-#endif // HCF_DL_ONLY
/************************************************************************************************************
-************************** H C F S U P P O R T R O U T I N E S ******************************************
-************************************************************************************************************/
+ ************************** H C F S U P P O R T R O U T I N E S ******************************************
+ ************************************************************************************************************/
/************************************************************************************************************
-*
-*.SUBMODULE void calc_mic( hcf_32* p, hcf_32 m )
-*.PURPOSE calculate MIC on a quad byte.
-*
-*.ARGUMENTS
-* p address of the MIC
-* m 32 bit value to be processed by the MIC calculation engine
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* calc_mic is the implementation of the MIC algorithm. It is a monkey-see monkey-do copy of
-* Michael::appendByte()
-* of Appendix C of ..........
-*
-*
-*.DIAGRAM
-*
-*.NOTICE
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void calc_mic( hcf_32* p, hcf_32 m )
+ *.PURPOSE calculate MIC on a quad byte.
+ *
+ *.ARGUMENTS
+ * p address of the MIC
+ * m 32 bit value to be processed by the MIC calculation engine
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * calc_mic is the implementation of the MIC algorithm. It is a monkey-see monkey-do copy of
+ * Michael::appendByte()
+ * of Appendix C of ..........
+ *
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
#if (HCF_TYPE) & HCF_TYPE_WPA
#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
#define ROR32( A, n ) ROL32( (A), 32-(n) )
-#define L *p
-#define R *(p+1)
+#define L *p
+#define R *(p+1)
void
calc_mic( hcf_32* p, hcf_32 m )
@@ -3250,38 +3122,38 @@ calc_mic( hcf_32* p, hcf_32 m )
#if (HCF_TYPE) & HCF_TYPE_WPA
/************************************************************************************************************
-*
-*.SUBMODULE void calc_mic_rx_frag( IFBP ifbp, wci_bufp p, int len )
-*.PURPOSE calculate MIC on a single fragment.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* bufp (byte) address of buffer
-* len length in bytes of buffer specified by bufp
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* calc_mic_rx_frag ........
-*
-* The MIC is located in the IFB.
-* The MIC is separate for Tx and Rx, thus allowing hcf_send_msg to occur between hcf_service_nic and
-* hcf_rcv_msg.
-*
-*
-*.DIAGRAM
-*
-*.NOTICE
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void calc_mic_rx_frag( IFBP ifbp, wci_bufp p, int len )
+ *.PURPOSE calculate MIC on a single fragment.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * bufp (byte) address of buffer
+ * len length in bytes of buffer specified by bufp
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * calc_mic_rx_frag ........
+ *
+ * The MIC is located in the IFB.
+ * The MIC is separate for Tx and Rx, thus allowing hcf_send_msg to occur between hcf_service_nic and
+ * hcf_rcv_msg.
+ *
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
void
calc_mic_rx_frag( IFBP ifbp, wci_bufp p, int len )
{
-static union { hcf_32 x32; hcf_16 x16[2]; hcf_8 x8[4]; } x; //* area to accumulate 4 bytes input for MIC engine
-int i;
+ static union { hcf_32 x32; hcf_16 x16[2]; hcf_8 x8[4]; } x; //* area to accumulate 4 bytes input for MIC engine
+ int i;
- if ( len == -1 ) { //initialize MIC housekeeping
+ if ( len == -1 ) { //initialize MIC housekeeping
i = *(wci_recordp)&p[HFS_STAT];
/* i = CNV_SHORTP_TO_LITTLE(&p[HFS_STAT]); should not be neede to prevent alignment poroblems
* since len == -1 if and only if p is lookahaead buffer which MUST be word aligned
@@ -3289,12 +3161,12 @@ int i;
*/
if ( ( i & HFS_STAT_MIC ) == 0 ) {
- ifbp->IFB_MICRxCarry = 0xFFFF; //suppress MIC calculation
+ ifbp->IFB_MICRxCarry = 0xFFFF; //suppress MIC calculation
} else {
ifbp->IFB_MICRxCarry = 0;
-//* Note that "coincidentally" the bit positions used in HFS_STAT
-//* correspond with the offset of the key in IFB_MICKey
- i = ( i & HFS_STAT_MIC_KEY_ID ) >> 10; /* coincidentally no shift needed for i itself */
+//* Note that "coincidentally" the bit positions used in HFS_STAT
+//* correspond with the offset of the key in IFB_MICKey
+ i = ( i & HFS_STAT_MIC_KEY_ID ) >> 10; /* coincidentally no shift needed for i itself */
ifbp->IFB_MICRx[0] = CNV_LONG_TO_LITTLE(ifbp->IFB_MICRxKey[i ]);
ifbp->IFB_MICRx[1] = CNV_LONG_TO_LITTLE(ifbp->IFB_MICRxKey[i+1]);
}
@@ -3308,11 +3180,11 @@ int i;
ifbp->IFB_MICRxCarry = 4;
len -= 4;
}
- } else while ( ifbp->IFB_MICRxCarry < 4 && len ) { //note for hcf_16 applies: 0xFFFF > 4
- x.x8[ifbp->IFB_MICRxCarry++] = *p++;
- len--;
- }
- while ( ifbp->IFB_MICRxCarry == 4 ) { //contrived so we have only 1 call to calc_mic so we could bring it in-line
+ } else while ( ifbp->IFB_MICRxCarry < 4 && len ) { //note for hcf_16 applies: 0xFFFF > 4
+ x.x8[ifbp->IFB_MICRxCarry++] = *p++;
+ len--;
+ }
+ while ( ifbp->IFB_MICRxCarry == 4 ) { //contrived so we have only 1 call to calc_mic so we could bring it in-line
calc_mic( ifbp->IFB_MICRx, x.x32 );
x.x32 = CNV_LONGP_TO_LITTLE(p);
p += 4;
@@ -3328,92 +3200,92 @@ int i;
#if (HCF_TYPE) & HCF_TYPE_WPA
/************************************************************************************************************
-*
-*.SUBMODULE void calc_mic_tx_frag( IFBP ifbp, wci_bufp p, int len )
-*.PURPOSE calculate MIC on a single fragment.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* bufp (byte) address of buffer
-* len length in bytes of buffer specified by bufp
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* calc_mic_tx_frag ........
-*
-* The MIC is located in the IFB.
-* The MIC is separate for Tx and Rx, thus allowing hcf_send_msg to occur between hcf_service_nic and
-* hcf_rcv_msg.
-*
-*
-*.DIAGRAM
-*
-*.NOTICE
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void calc_mic_tx_frag( IFBP ifbp, wci_bufp p, int len )
+ *.PURPOSE calculate MIC on a single fragment.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * bufp (byte) address of buffer
+ * len length in bytes of buffer specified by bufp
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * calc_mic_tx_frag ........
+ *
+ * The MIC is located in the IFB.
+ * The MIC is separate for Tx and Rx, thus allowing hcf_send_msg to occur between hcf_service_nic and
+ * hcf_rcv_msg.
+ *
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
void
calc_mic_tx_frag( IFBP ifbp, wci_bufp p, int len )
{
-static union { hcf_32 x32; hcf_16 x16[2]; hcf_8 x8[4]; } x; //* area to accumulate 4 bytes input for MIC engine
+ static union { hcf_32 x32; hcf_16 x16[2]; hcf_8 x8[4]; } x; //* area to accumulate 4 bytes input for MIC engine
- //if initialization request
+ //if initialization request
if ( len == -1 ) {
- //. presume MIC calculation disabled
+ //. presume MIC calculation disabled
ifbp->IFB_MICTxCarry = 0xFFFF;
- //. if MIC calculation enabled
+ //. if MIC calculation enabled
if ( ifbp->IFB_MICTxCntl ) {
- //. . clear MIC carry
+ //. . clear MIC carry
ifbp->IFB_MICTxCarry = 0;
- //. . initialize MIC-engine
- ifbp->IFB_MICTx[0] = CNV_LONG_TO_LITTLE(ifbp->IFB_MICTxKey[0]); /*Tx always uses Key 0 */
+ //. . initialize MIC-engine
+ ifbp->IFB_MICTx[0] = CNV_LONG_TO_LITTLE(ifbp->IFB_MICTxKey[0]); /*Tx always uses Key 0 */
ifbp->IFB_MICTx[1] = CNV_LONG_TO_LITTLE(ifbp->IFB_MICTxKey[1]);
}
- //else
+ //else
} else {
- //. if MIC enabled (Tx) / if MIC present (Rx)
- //. and no carry from previous calc_mic_frag
+ //. if MIC enabled (Tx) / if MIC present (Rx)
+ //. and no carry from previous calc_mic_frag
if ( ifbp->IFB_MICTxCarry == 0 ) {
- //. . preset accu with 4 bytes from buffer
+ //. . preset accu with 4 bytes from buffer
x.x32 = CNV_LONGP_TO_LITTLE(p);
- //. . adjust pointer accordingly
+ //. . adjust pointer accordingly
p += 4;
- //. . if buffer contained less then 4 bytes
+ //. . if buffer contained less then 4 bytes
if ( len < 4 ) {
- //. . . promote valid bytes in accu to carry
- //. . . flag accu to contain incomplete double word
+ //. . . promote valid bytes in accu to carry
+ //. . . flag accu to contain incomplete double word
ifbp->IFB_MICTxCarry = (hcf_16)len;
- //. . else
+ //. . else
} else {
- //. . . flag accu to contain complete double word
+ //. . . flag accu to contain complete double word
ifbp->IFB_MICTxCarry = 4;
- //. . adjust remaining buffer length
+ //. . adjust remaining buffer length
len -= 4;
}
- //. else if MIC enabled
- //. and if carry bytes from previous calc_mic_tx_frag
- //. . move (1-3) bytes from carry into accu
- } else while ( ifbp->IFB_MICTxCarry < 4 && len ) { /* note for hcf_16 applies: 0xFFFF > 4 */
- x.x8[ifbp->IFB_MICTxCarry++] = *p++;
- len--;
- }
- //. while accu contains complete double word
- //. and MIC enabled
+ //. else if MIC enabled
+ //. and if carry bytes from previous calc_mic_tx_frag
+ //. . move (1-3) bytes from carry into accu
+ } else while ( ifbp->IFB_MICTxCarry < 4 && len ) { /* note for hcf_16 applies: 0xFFFF > 4 */
+ x.x8[ifbp->IFB_MICTxCarry++] = *p++;
+ len--;
+ }
+ //. while accu contains complete double word
+ //. and MIC enabled
while ( ifbp->IFB_MICTxCarry == 4 ) {
- //. . pass accu to MIC engine
+ //. . pass accu to MIC engine
calc_mic( ifbp->IFB_MICTx, x.x32 );
- //. . copy next 4 bytes from buffer to accu
+ //. . copy next 4 bytes from buffer to accu
x.x32 = CNV_LONGP_TO_LITTLE(p);
- //. . adjust buffer pointer
+ //. . adjust buffer pointer
p += 4;
- //. . if buffer contained less then 4 bytes
- //. . . promote valid bytes in accu to carry
- //. . . flag accu to contain incomplete double word
+ //. . if buffer contained less then 4 bytes
+ //. . . promote valid bytes in accu to carry
+ //. . . flag accu to contain incomplete double word
if ( len < 4 ) {
ifbp->IFB_MICTxCarry = (hcf_16)len;
}
- //. . adjust remaining buffer length
+ //. . adjust remaining buffer length
len -= 4;
}
}
@@ -3423,381 +3295,379 @@ static union { hcf_32 x32; hcf_16 x16[2]; hcf_8 x8[4]; } x; //* area to accumula
#if HCF_PROT_TIME
/************************************************************************************************************
-*
-*.SUBMODULE void calibrate( IFBP ifbp )
-*.PURPOSE calibrates the S/W protection counter against the Hermes Timer tick.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* calibrates the S/W protection counter against the Hermes Timer tick
-* IFB_TickIni is the value used to initialize the S/W protection counter such that the expiration period
-* more or less independent of the processor speed. If IFB_TickIni is not yet calibrated, it is done now.
-* This calibration is "reasonably" accurate because the Hermes is in a quiet state as a result of the
-* Initialize command.
-*
-*
-*.DIAGRAM
-*
-*1: IFB_TickIni is initialized at INI_TICK_INI by hcf_connect. If calibrate succeeds, IFB_TickIni is
-* guaranteed to be changed. As a consequence there will be only 1 shot at calibration (regardless of the
-* number of init calls) under normal circumstances.
-*2: Calibration is done HCF_PROT_TIME_CNT times. This diminish the effects of jitter and interference,
-* especially in a pre-emptive environment. HCF_PROT_TIME_CNT is in the range of 16 through 32 and derived
-* from the HCF_PROT_TIME specified by the MSF programmer. The divisor needed to scale HCF_PROT_TIME into the
-* 16-32 range, is used as a multiplicator after the calibration, to scale the found value back to the
-* requested range. This way a compromise is achieved between accuracy and duration of the calibration
-* process.
-*3: Acknowledge the Timer Tick Event.
-* Each cycle is limited to at most INI_TICK_INI samples of the TimerTick status of the Hermes.
-* Since the start of calibrate is unrelated to the Hermes Internal Timer, the first interval may last from 0
-* to the normal interval, all subsequent intervals should be the full length of the Hermes Tick interval.
-* The Hermes Timer Tick is not reprogrammed by the HCF, hence it is running at the default of 10 k
-* microseconds.
-*4: If the Timer Tick Event is continuously up (prot_cnt still has the value INI_TICK_INI) or no Timer Tick
-* Event occurred before the protection counter expired, reset IFB_TickIni to INI_TICK_INI,
-* set the defunct bit of IFB_CardStat (thus rendering the Hermes inoperable) and exit the calibrate routine.
-*8: ifbp->IFB_TickIni is multiplied to scale the found value back to the requested range as explained under 2.
-*
-*.NOTICE
-* o Although there are a number of viewpoints possible, calibrate() uses as error strategy that a single
-* failure of the Hermes TimerTick is considered fatal.
-* o There is no hard and concrete time-out value defined for Hermes activities. The default 1 seconds is
-* believed to be sufficiently "relaxed" for real life and to be sufficiently short to be still useful in an
-* environment with humans.
-* o Note that via IFB_DefunctStat time outs in cmd_wait and in hcfio_string block all Hermes access till the
-* next init so functions which call a mix of cmd_wait and hcfio_string only need to check the return status
-* of the last call
-* o The return code is preset at Time out.
-* The additional complication that no calibrated value for the protection count can be assumed since
-* calibrate() does not yet have determined a calibrated value (a catch 22), is handled by setting the
-* initial value at INI_TICK_INI (by hcf_connect). This approach is considered safe, because:
-* - the HCF does not use the pipeline mechanism of Hermes commands.
-* - the likelihood of failure (the only time when protection count is relevant) is small.
-* - the time will be sufficiently large on a fast machine (busy bit drops on good NIC before counter
-* expires)
-* - the time will be sufficiently small on a slow machine (counter expires on bad NIC before the end user
-* switches the power off in despair
-* The time needed to wrap a 32 bit counter around is longer than many humans want to wait, hence the more or
-* less arbitrary value of 0x40000L is chosen, assuming it does not take too long on an XT and is not too
-* short on a scream-machine.
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void calibrate( IFBP ifbp )
+ *.PURPOSE calibrates the S/W protection counter against the Hermes Timer tick.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * calibrates the S/W protection counter against the Hermes Timer tick
+ * IFB_TickIni is the value used to initialize the S/W protection counter such that the expiration period
+ * more or less independent of the processor speed. If IFB_TickIni is not yet calibrated, it is done now.
+ * This calibration is "reasonably" accurate because the Hermes is in a quiet state as a result of the
+ * Initialize command.
+ *
+ *
+ *.DIAGRAM
+ *
+ *1: IFB_TickIni is initialized at INI_TICK_INI by hcf_connect. If calibrate succeeds, IFB_TickIni is
+ * guaranteed to be changed. As a consequence there will be only 1 shot at calibration (regardless of the
+ * number of init calls) under normal circumstances.
+ *2: Calibration is done HCF_PROT_TIME_CNT times. This diminish the effects of jitter and interference,
+ * especially in a pre-emptive environment. HCF_PROT_TIME_CNT is in the range of 16 through 32 and derived
+ * from the HCF_PROT_TIME specified by the MSF programmer. The divisor needed to scale HCF_PROT_TIME into the
+ * 16-32 range, is used as a multiplicator after the calibration, to scale the found value back to the
+ * requested range. This way a compromise is achieved between accuracy and duration of the calibration
+ * process.
+ *3: Acknowledge the Timer Tick Event.
+ * Each cycle is limited to at most INI_TICK_INI samples of the TimerTick status of the Hermes.
+ * Since the start of calibrate is unrelated to the Hermes Internal Timer, the first interval may last from 0
+ * to the normal interval, all subsequent intervals should be the full length of the Hermes Tick interval.
+ * The Hermes Timer Tick is not reprogrammed by the HCF, hence it is running at the default of 10 k
+ * microseconds.
+ *4: If the Timer Tick Event is continuously up (prot_cnt still has the value INI_TICK_INI) or no Timer Tick
+ * Event occurred before the protection counter expired, reset IFB_TickIni to INI_TICK_INI,
+ * set the defunct bit of IFB_CardStat (thus rendering the Hermes inoperable) and exit the calibrate routine.
+ *8: ifbp->IFB_TickIni is multiplied to scale the found value back to the requested range as explained under 2.
+ *
+ *.NOTICE
+ * o Although there are a number of viewpoints possible, calibrate() uses as error strategy that a single
+ * failure of the Hermes TimerTick is considered fatal.
+ * o There is no hard and concrete time-out value defined for Hermes activities. The default 1 seconds is
+ * believed to be sufficiently "relaxed" for real life and to be sufficiently short to be still useful in an
+ * environment with humans.
+ * o Note that via IFB_DefunctStat time outs in cmd_wait and in hcfio_string block all Hermes access till the
+ * next init so functions which call a mix of cmd_wait and hcfio_string only need to check the return status
+ * of the last call
+ * o The return code is preset at Time out.
+ * The additional complication that no calibrated value for the protection count can be assumed since
+ * calibrate() does not yet have determined a calibrated value (a catch 22), is handled by setting the
+ * initial value at INI_TICK_INI (by hcf_connect). This approach is considered safe, because:
+ * - the HCF does not use the pipeline mechanism of Hermes commands.
+ * - the likelihood of failure (the only time when protection count is relevant) is small.
+ * - the time will be sufficiently large on a fast machine (busy bit drops on good NIC before counter
+ * expires)
+ * - the time will be sufficiently small on a slow machine (counter expires on bad NIC before the end user
+ * switches the power off in despair
+ * The time needed to wrap a 32 bit counter around is longer than many humans want to wait, hence the more or
+ * less arbitrary value of 0x40000L is chosen, assuming it does not take too long on an XT and is not too
+ * short on a scream-machine.
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC void
calibrate( IFBP ifbp )
{
-int cnt = HCF_PROT_TIME_CNT;
-hcf_32 prot_cnt;
+ int cnt = HCF_PROT_TIME_CNT;
+ hcf_32 prot_cnt;
HCFTRACE( ifbp, HCF_TRACE_CALIBRATE );
- if ( ifbp->IFB_TickIni == INI_TICK_INI ) { /*1*/
- ifbp->IFB_TickIni = 0; /*2*/
- while ( cnt-- ) {
- prot_cnt = INI_TICK_INI;
- OPW( HREG_EV_ACK, HREG_EV_TICK ); /*3*/
- while ( (IPW( HREG_EV_STAT ) & HREG_EV_TICK) == 0 && --prot_cnt ) {
- ifbp->IFB_TickIni++;
- }
- if ( prot_cnt == 0 || prot_cnt == INI_TICK_INI ) { /*4*/
- ifbp->IFB_TickIni = INI_TICK_INI;
- ifbp->IFB_DefunctStat = HCF_ERR_DEFUNCT_TIMER;
- ifbp->IFB_CardStat |= CARD_STAT_DEFUNCT;
- HCFASSERT( DO_ASSERT, prot_cnt )
- }
+ if ( ifbp->IFB_TickIni == INI_TICK_INI ) { /*1*/
+ ifbp->IFB_TickIni = 0; /*2*/
+ while ( cnt-- ) {
+ prot_cnt = INI_TICK_INI;
+ OPW( HREG_EV_ACK, HREG_EV_TICK ); /*3*/
+ while ( (IPW( HREG_EV_STAT ) & HREG_EV_TICK) == 0 && --prot_cnt ) {
+ ifbp->IFB_TickIni++;
}
- ifbp->IFB_TickIni <<= HCF_PROT_TIME_SHFT; /*8*/
+ if ( prot_cnt == 0 || prot_cnt == INI_TICK_INI ) { /*4*/
+ ifbp->IFB_TickIni = INI_TICK_INI;
+ ifbp->IFB_DefunctStat = HCF_ERR_DEFUNCT_TIMER;
+ ifbp->IFB_CardStat |= CARD_STAT_DEFUNCT;
+ HCFASSERT( DO_ASSERT, prot_cnt );
+ }
+ }
+ ifbp->IFB_TickIni <<= HCF_PROT_TIME_SHFT; /*8*/
}
HCFTRACE( ifbp, HCF_TRACE_CALIBRATE | HCF_TRACE_EXIT );
} // calibrate
#endif // HCF_PROT_TIME
-#if (HCF_DL_ONLY) == 0
#if (HCF_TYPE) & HCF_TYPE_WPA
/************************************************************************************************************
-*
-*.SUBMODULE int check_mic( IFBP ifbp )
-*.PURPOSE verifies the MIC of a received non-USB frame.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS
-* HCF_SUCCESS
-* HCF_ERR_MIC
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-*
-*4: test whether or not a MIC is reported by the Hermes
-*14: the calculated MIC and the received MIC are compared, the return status is set when there is a mismatch
-*
-*.NOTICE
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int check_mic( IFBP ifbp )
+ *.PURPOSE verifies the MIC of a received non-USB frame.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ * HCF_ERR_MIC
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ *
+ *4: test whether or not a MIC is reported by the Hermes
+ *14: the calculated MIC and the received MIC are compared, the return status is set when there is a mismatch
+ *
+ *.NOTICE
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
int
check_mic( IFBP ifbp )
{
-int rc = HCF_SUCCESS;
-hcf_32 x32[2]; //* area to save rcvd 8 bytes MIC
+ int rc = HCF_SUCCESS;
+ hcf_32 x32[2]; //* area to save rcvd 8 bytes MIC
- //if MIC present in RxFS
+ //if MIC present in RxFS
if ( *(wci_recordp)&ifbp->IFB_lap[-HFS_ADDR_DEST] & HFS_STAT_MIC ) {
- //or if ( ifbp->IFB_MICRxCarry != 0xFFFF )
- CALC_RX_MIC( mic_pad, 8 ); //. process up to 3 remaining bytes of data and append 5 to 8 bytes of padding to MIC calculation
+ //or if ( ifbp->IFB_MICRxCarry != 0xFFFF )
+ CALC_RX_MIC( mic_pad, 8 ); //. process up to 3 remaining bytes of data and append 5 to 8 bytes of padding to MIC calculation
get_frag( ifbp, (wci_bufp)x32, 8 BE_PAR(0));//. get 8 byte MIC from NIC
- //. if calculated and received MIC do not match
- //. . set status at HCF_ERR_MIC
-/*14*/ if ( x32[0] != CNV_LITTLE_TO_LONG(ifbp->IFB_MICRx[0]) ||
- x32[1] != CNV_LITTLE_TO_LONG(ifbp->IFB_MICRx[1]) ) {
+ //. if calculated and received MIC do not match
+ //. . set status at HCF_ERR_MIC
+ /*14*/ if ( x32[0] != CNV_LITTLE_TO_LONG(ifbp->IFB_MICRx[0]) ||
+ x32[1] != CNV_LITTLE_TO_LONG(ifbp->IFB_MICRx[1]) ) {
rc = HCF_ERR_MIC;
}
}
- //return status
+ //return status
return rc;
} // check_mic
#endif // HCF_TYPE_WPA
-#endif // HCF_DL_ONLY
/************************************************************************************************************
-*
-*.SUBMODULE int cmd_cmpl( IFBP ifbp )
-*.PURPOSE waits for Hermes Command Completion.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS
-* IFB_DefunctStat
-* HCF_ERR_TIME_OUT
-* HCF_ERR_DEFUNCT_CMD_SEQ
-* HCF_SUCCESS
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-*
-*2: Once cmd_cmpl is called, the Busy option bit in IFB_Cmd must be cleared
-*4: If Status register and command code don't match either:
-* - the Hermes and Host are out of sync ( a fatal error)
-* - error bits are reported via the Status Register.
-* Out of sync is considered fatal and brings the HCF in Defunct mode
-* Errors reported via the Status Register should be caused by sequence violations in Hermes command
-* sequences and hence these bugs should have been found during engineering testing. Since there is no
-* strategy to cope with this problem, it might as well be ignored at run time. Note that for any particular
-* situation where a strategy is formulated to handle the consequences of a particular bug causing a
-* particular Error situation reported via the Status Register, the bug should be removed rather than adding
-* logic to cope with the consequences of the bug.
-* There have been HCF versions where an error report via the Status Register even brought the HCF in defunct
-* mode (although it was not yet named like that at that time). This is particular undesirable behavior for a
-* general library.
-* Simply reporting the error (as "interesting") is debatable. There also have been HCF versions with this
-* strategy using the "vague" HCF_FAILURE code.
-* The error is reported via:
-* - MiscErr tally of the HCF Tally set
-* - the (informative) fields IFB_ErrCmd and IFB_ErrQualifier
-* - the assert mechanism
-*8: Here the Defunct case and the Status error are separately treated
-*
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int cmd_cmpl( IFBP ifbp )
+ *.PURPOSE waits for Hermes Command Completion.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS
+ * IFB_DefunctStat
+ * HCF_ERR_TIME_OUT
+ * HCF_ERR_DEFUNCT_CMD_SEQ
+ * HCF_SUCCESS
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ *
+ *2: Once cmd_cmpl is called, the Busy option bit in IFB_Cmd must be cleared
+ *4: If Status register and command code don't match either:
+ * - the Hermes and Host are out of sync ( a fatal error)
+ * - error bits are reported via the Status Register.
+ * Out of sync is considered fatal and brings the HCF in Defunct mode
+ * Errors reported via the Status Register should be caused by sequence violations in Hermes command
+ * sequences and hence these bugs should have been found during engineering testing. Since there is no
+ * strategy to cope with this problem, it might as well be ignored at run time. Note that for any particular
+ * situation where a strategy is formulated to handle the consequences of a particular bug causing a
+ * particular Error situation reported via the Status Register, the bug should be removed rather than adding
+ * logic to cope with the consequences of the bug.
+ * There have been HCF versions where an error report via the Status Register even brought the HCF in defunct
+ * mode (although it was not yet named like that at that time). This is particular undesirable behavior for a
+ * general library.
+ * Simply reporting the error (as "interesting") is debatable. There also have been HCF versions with this
+ * strategy using the "vague" HCF_FAILURE code.
+ * The error is reported via:
+ * - MiscErr tally of the HCF Tally set
+ * - the (informative) fields IFB_ErrCmd and IFB_ErrQualifier
+ * - the assert mechanism
+ *8: Here the Defunct case and the Status error are separately treated
+ *
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
cmd_cmpl( IFBP ifbp )
{
-PROT_CNT_INI
-int rc = HCF_SUCCESS;
-hcf_16 stat;
+ PROT_CNT_INI;
+ int rc = HCF_SUCCESS;
+ hcf_16 stat;
- HCFLOGENTRY( HCF_TRACE_CMD_CPL, ifbp->IFB_Cmd )
- ifbp->IFB_Cmd &= ~HCMD_BUSY; /* 2 */
- HCF_WAIT_WHILE( (IPW( HREG_EV_STAT) & HREG_EV_CMD) == 0 ); /* 4 */
+ HCFLOGENTRY( HCF_TRACE_CMD_CPL, ifbp->IFB_Cmd );
+ ifbp->IFB_Cmd &= ~HCMD_BUSY; /* 2 */
+ HCF_WAIT_WHILE( (IPW( HREG_EV_STAT) & HREG_EV_CMD) == 0 ); /* 4 */
stat = IPW( HREG_STAT );
#if HCF_PROT_TIME
if ( prot_cnt == 0 ) {
- IF_TALLY( ifbp->IFB_HCF_Tallies.MiscErr++; )
+ IF_TALLY( ifbp->IFB_HCF_Tallies.MiscErr++ );
rc = HCF_ERR_TIME_OUT;
- HCFASSERT( DO_ASSERT, ifbp->IFB_Cmd )
+ HCFASSERT( DO_ASSERT, ifbp->IFB_Cmd );
} else
#endif // HCF_PROT_TIME
{
DAWA_ACK( HREG_EV_CMD );
-/*4*/ if ( stat != (ifbp->IFB_Cmd & HCMD_CMD_CODE) ) {
-/*8*/ if ( ( (stat ^ ifbp->IFB_Cmd ) & HCMD_CMD_CODE) != 0 ) {
+ /*4*/ if ( stat != (ifbp->IFB_Cmd & HCMD_CMD_CODE) ) {
+ /*8*/ if ( ( (stat ^ ifbp->IFB_Cmd ) & HCMD_CMD_CODE) != 0 ) {
rc = ifbp->IFB_DefunctStat = HCF_ERR_DEFUNCT_CMD_SEQ;
ifbp->IFB_CardStat |= CARD_STAT_DEFUNCT;
}
- IF_TALLY( ifbp->IFB_HCF_Tallies.MiscErr++; )
+ IF_TALLY( ifbp->IFB_HCF_Tallies.MiscErr++ );
ifbp->IFB_ErrCmd = stat;
ifbp->IFB_ErrQualifier = IPW( HREG_RESP_0 );
- HCFASSERT( DO_ASSERT, MERGE_2( IPW( HREG_PARAM_0 ), ifbp->IFB_Cmd ) )
- HCFASSERT( DO_ASSERT, MERGE_2( ifbp->IFB_ErrQualifier, ifbp->IFB_ErrCmd ) )
+ HCFASSERT( DO_ASSERT, MERGE_2( IPW( HREG_PARAM_0 ), ifbp->IFB_Cmd ) );
+ HCFASSERT( DO_ASSERT, MERGE_2( ifbp->IFB_ErrQualifier, ifbp->IFB_ErrCmd ) );
}
}
- HCFASSERT( rc == HCF_SUCCESS, rc)
- HCFLOGEXIT( HCF_TRACE_CMD_CPL )
+ HCFASSERT( rc == HCF_SUCCESS, rc);
+ HCFLOGEXIT( HCF_TRACE_CMD_CPL );
return rc;
} // cmd_cmpl
/************************************************************************************************************
-*
-*.SUBMODULE int cmd_exe( IFBP ifbp, int cmd_code, int par_0 )
-*.PURPOSE Executes synchronous part of Hermes Command and - optionally - waits for Command Completion.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* cmd_code
-* par_0
-*
-*.RETURNS
-* IFB_DefunctStat
-* HCF_ERR_DEFUNCT_CMD_SEQ
-* HCF_SUCCESS
-* HCF_ERR_TO_BE_ADDED <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
-*
-*.DESCRIPTION
-* Executes synchronous Hermes Command and waits for Command Completion
-*
-* The general HCF strategy is to wait for command completion. As a consequence:
-* - the read of the busy bit before writing the command register is superfluous
-* - the Hermes requirement that no Inquiry command may be executed if there is still an unacknowledged
-* Inquiry command outstanding, is automatically met.
-* The Tx command uses the "Busy" bit in the cmd_code parameter to deviate from this general HCF strategy.
-* The idea is that by not busy-waiting on completion of this frequently used command the processor
-* utilization is diminished while using the busy-wait on all other seldom used commands the flow is kept
-* simple.
-*
-*
-*
-*.DIAGRAM
-*
-*1: skip the body of cmd_exe when in defunct mode or when - based on the S/W Support register write and
-* read back test - there is apparently no NIC.
-* Note: we gave up on the "old" strategy to write the S/W Support register at magic only when needed. Due to
-* the intricateness of Hermes F/W varieties ( which behave differently as far as corruption of the S/W
-* Support register is involved), the increasing number of Hermes commands which do an implicit initialize
-* (thus modifying the S/W Support register) and the workarounds of some OS/Support S/W induced aspects (e.g.
-* the System Soft library at WinNT which postpones the actual mapping of I/O space up to 30 seconds after
-* giving the go-ahead), the "magic" strategy is now reduced to a simple write and read back. This means that
-* problems like a bug tramping over the memory mapped Hermes registers will no longer be noticed as side
-* effect of the S/W Support register check.
-*2: check whether the preceding command skipped the busy wait and if so, check for command completion
-*
-*.NOTICE
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int cmd_exe( IFBP ifbp, int cmd_code, int par_0 )
+ *.PURPOSE Executes synchronous part of Hermes Command and - optionally - waits for Command Completion.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * cmd_code
+ * par_0
+ *
+ *.RETURNS
+ * IFB_DefunctStat
+ * HCF_ERR_DEFUNCT_CMD_SEQ
+ * HCF_SUCCESS
+ * HCF_ERR_TO_BE_ADDED <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ *
+ *.DESCRIPTION
+ * Executes synchronous Hermes Command and waits for Command Completion
+ *
+ * The general HCF strategy is to wait for command completion. As a consequence:
+ * - the read of the busy bit before writing the command register is superfluous
+ * - the Hermes requirement that no Inquiry command may be executed if there is still an unacknowledged
+ * Inquiry command outstanding, is automatically met.
+ * The Tx command uses the "Busy" bit in the cmd_code parameter to deviate from this general HCF strategy.
+ * The idea is that by not busy-waiting on completion of this frequently used command the processor
+ * utilization is diminished while using the busy-wait on all other seldom used commands the flow is kept
+ * simple.
+ *
+ *
+ *
+ *.DIAGRAM
+ *
+ *1: skip the body of cmd_exe when in defunct mode or when - based on the S/W Support register write and
+ * read back test - there is apparently no NIC.
+ * Note: we gave up on the "old" strategy to write the S/W Support register at magic only when needed. Due to
+ * the intricateness of Hermes F/W varieties ( which behave differently as far as corruption of the S/W
+ * Support register is involved), the increasing number of Hermes commands which do an implicit initialize
+ * (thus modifying the S/W Support register) and the workarounds of some OS/Support S/W induced aspects (e.g.
+ * the System Soft library at WinNT which postpones the actual mapping of I/O space up to 30 seconds after
+ * giving the go-ahead), the "magic" strategy is now reduced to a simple write and read back. This means that
+ * problems like a bug tramping over the memory mapped Hermes registers will no longer be noticed as side
+ * effect of the S/W Support register check.
+ *2: check whether the preceding command skipped the busy wait and if so, check for command completion
+ *
+ *.NOTICE
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
-cmd_exe( IFBP ifbp, hcf_16 cmd_code, hcf_16 par_0 ) //if HCMD_BUSY of cmd_code set, then do NOT wait for completion
+cmd_exe( IFBP ifbp, hcf_16 cmd_code, hcf_16 par_0 ) //if HCMD_BUSY of cmd_code set, then do NOT wait for completion
{
-int rc;
+ int rc;
- HCFLOGENTRY( HCF_TRACE_CMD_EXE, cmd_code )
- HCFASSERT( (cmd_code & HCMD_CMD_CODE) != HCMD_TX || cmd_code & HCMD_BUSY, cmd_code ) //Tx must have Busy bit set
+ HCFLOGENTRY( HCF_TRACE_CMD_EXE, cmd_code );
+ HCFASSERT( (cmd_code & HCMD_CMD_CODE) != HCMD_TX || cmd_code & HCMD_BUSY, cmd_code ); //Tx must have Busy bit set
OPW( HREG_SW_0, HCF_MAGIC );
- if ( IPW( HREG_SW_0 ) == HCF_MAGIC ) { /* 1 */
+ if ( IPW( HREG_SW_0 ) == HCF_MAGIC ) { /* 1 */
rc = ifbp->IFB_DefunctStat;
}
else rc = HCF_ERR_NO_NIC;
if ( rc == HCF_SUCCESS ) {
//;?is this a hot idea, better MEASURE performance impact
-/*2*/ if ( ifbp->IFB_Cmd & HCMD_BUSY ) {
+ /*2*/ if ( ifbp->IFB_Cmd & HCMD_BUSY ) {
rc = cmd_cmpl( ifbp );
}
OPW( HREG_PARAM_0, par_0 );
OPW( HREG_CMD, cmd_code &~HCMD_BUSY );
ifbp->IFB_Cmd = cmd_code;
- if ( (cmd_code & HCMD_BUSY) == 0 ) { //;?is this a hot idea, better MEASURE performance impact
+ if ( (cmd_code & HCMD_BUSY) == 0 ) { //;?is this a hot idea, better MEASURE performance impact
rc = cmd_cmpl( ifbp );
}
}
- HCFASSERT( rc == HCF_SUCCESS, MERGE_2( rc, cmd_code ) )
- HCFLOGEXIT( HCF_TRACE_CMD_EXE )
+ HCFASSERT( rc == HCF_SUCCESS, MERGE_2( rc, cmd_code ) );
+ HCFLOGEXIT( HCF_TRACE_CMD_EXE );
return rc;
} // cmd_exe
/************************************************************************************************************
-*
-*.SUBMODULE int download( IFBP ifbp, CFG_PROG_STRCT FAR *ltvp )
-*.PURPOSE downloads F/W image into NIC and initiates execution of the downloaded F/W.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* ltvp specifies the pseudo-RID (as defined by WCI)
-*
-*.RETURNS
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-*1: First, Ack everything to unblock a (possibly) blocked cmd pipe line
-* Note 1: it is very likely that an Alloc event is pending and very well possible that a (Send) Cmd event is
-* pending
-* Note 2: it is assumed that this strategy takes away the need to ack every conceivable event after an
-* Hermes Initialize
-*
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int download( IFBP ifbp, CFG_PROG_STRCT FAR *ltvp )
+ *.PURPOSE downloads F/W image into NIC and initiates execution of the downloaded F/W.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * ltvp specifies the pseudo-RID (as defined by WCI)
+ *
+ *.RETURNS
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ *1: First, Ack everything to unblock a (possibly) blocked cmd pipe line
+ * Note 1: it is very likely that an Alloc event is pending and very well possible that a (Send) Cmd event is
+ * pending
+ * Note 2: it is assumed that this strategy takes away the need to ack every conceivable event after an
+ * Hermes Initialize
+ *
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
-download( IFBP ifbp, CFG_PROG_STRCT FAR *ltvp ) //Hermes-II download (volatile only)
+download( IFBP ifbp, CFG_PROG_STRCT FAR *ltvp ) //Hermes-II download (volatile only)
{
-hcf_16 i;
-int rc = HCF_SUCCESS;
-wci_bufp cp;
-hcf_io io_port = ifbp->IFB_IOBase + HREG_AUX_DATA;
+ hcf_16 i;
+ int rc = HCF_SUCCESS;
+ wci_bufp cp;
+ hcf_io io_port = ifbp->IFB_IOBase + HREG_AUX_DATA;
- HCFLOGENTRY( HCF_TRACE_DL, ltvp->typ )
+ HCFLOGENTRY( HCF_TRACE_DL, ltvp->typ );
#if (HCF_TYPE) & HCF_TYPE_PRELOADED
- HCFASSERT( DO_ASSERT, ltvp->mode )
+ HCFASSERT( DO_ASSERT, ltvp->mode );
#else
- //if initial "program" LTV
+ //if initial "program" LTV
if ( ifbp->IFB_DLMode == CFG_PROG_STOP && ltvp->mode == CFG_PROG_VOLATILE) {
- //. switch Hermes to initial mode
-/*1*/ OPW( HREG_EV_ACK, ~HREG_EV_SLEEP_REQ );
- rc = cmd_exe( ifbp, HCMD_INI, 0 ); /* HCMD_INI can not be part of init() because that is called on
- * other occasions as well */
+ //. switch Hermes to initial mode
+ /*1*/ OPW( HREG_EV_ACK, ~HREG_EV_SLEEP_REQ );
+ rc = cmd_exe( ifbp, HCMD_INI, 0 ); /* HCMD_INI can not be part of init() because that is called on
+ * other occasions as well */
rc = init( ifbp );
}
- //if final "program" LTV
+ //if final "program" LTV
if ( ltvp->mode == CFG_PROG_STOP && ifbp->IFB_DLMode == CFG_PROG_VOLATILE) {
- //. start tertiary (or secondary)
+ //. start tertiary (or secondary)
OPW( HREG_PARAM_1, (hcf_16)(ltvp->nic_addr >> 16) );
rc = cmd_exe( ifbp, HCMD_EXECUTE, (hcf_16) ltvp->nic_addr );
if (rc == HCF_SUCCESS) {
- rc = init( ifbp ); /*;? do we really want to skip init if cmd_exe failed, i.e.
- * IFB_FW_Comp_Id is than possibly incorrect */
- }
- //else (non-final)
+ rc = init( ifbp ); /*;? do we really want to skip init if cmd_exe failed, i.e.
+ * IFB_FW_Comp_Id is than possibly incorrect */
+ }
+ //else (non-final)
} else {
- //. if mode == Readback SEEPROM
-#if 0 //;? as long as the next if contains a hard coded 0, might as well leave it out even more obvious
+ //. if mode == Readback SEEPROM
+#if 0 //;? as long as the next if contains a hard coded 0, might as well leave it out even more obvious
if ( 0 /*len is definitely not want we want;?*/ && ltvp->mode == CFG_PROG_SEEPROM_READBACK ) {
OPW( HREG_PARAM_1, (hcf_16)(ltvp->nic_addr >> 16) );
- OPW( HREG_PARAM_2, MUL_BY_2(ltvp->len - 4));
- //. . perform Hermes prog cmd with appropriate mode bits
+ OPW( HREG_PARAM_2, (hcf_16)((ltvp->len - 4) << 1) );
+ //. . perform Hermes prog cmd with appropriate mode bits
rc = cmd_exe( ifbp, HCMD_PROGRAM | ltvp->mode, (hcf_16)ltvp->nic_addr );
- //. . set up NIC RAM addressability according Resp0-1
+ //. . set up NIC RAM addressability according Resp0-1
OPW( HREG_AUX_PAGE, IPW( HREG_RESP_1) );
OPW( HREG_AUX_OFFSET, IPW( HREG_RESP_0) );
- //. . set up L-field of LTV according Resp2
+ //. . set up L-field of LTV according Resp2
i = ( IPW( HREG_RESP_2 ) + 1 ) / 2; // i contains max buffer size in words, a probably not very useful piece of information ;?
/*Nico's code based on i is the "real amount of data available"
if ( ltvp->len - 4 < i ) rc = HCF_ERR_LEN;
@@ -3810,126 +3680,125 @@ hcf_io io_port = ifbp->IFB_IOBase + HREG_AUX_DATA;
ltvp->len = i + 4;
}
*/
- //. . copy data from NIC via AUX port to LTV
- cp = (wci_bufp)ltvp->host_addr; /*IN_PORT_STRING_8_16 macro may modify its parameters*/
+ //. . copy data from NIC via AUX port to LTV
+ cp = (wci_bufp)ltvp->host_addr; /*IN_PORT_STRING_8_16 macro may modify its parameters*/
i = ltvp->len - 4;
- IN_PORT_STRING_8_16( io_port, cp, i ); //!!!WORD length, cp MUST be a char pointer // $$ char
- //. else (non-final programming)
+ IN_PORT_STRING_8_16( io_port, cp, i ); //!!!WORD length, cp MUST be a char pointer // $$ char
+ //. else (non-final programming)
} else
#endif //;? as long as the above if contains a hard coded 0, might as well leave it out even more obvious
- { //. . get number of words to program
- HCFASSERT( ltvp->segment_size, *ltvp->host_addr )
+ { //. . get number of words to program
+ HCFASSERT( ltvp->segment_size, *ltvp->host_addr );
i = ltvp->segment_size/2;
- //. . copy data (words) from LTV via AUX port to NIC
- cp = (wci_bufp)ltvp->host_addr; //OUT_PORT_STRING_8_16 macro may modify its parameters
- //. . if mode == volatile programming
+ //. . copy data (words) from LTV via AUX port to NIC
+ cp = (wci_bufp)ltvp->host_addr; //OUT_PORT_STRING_8_16 macro may modify its parameters
+ //. . if mode == volatile programming
if ( ltvp->mode == CFG_PROG_VOLATILE ) {
- //. . . set up NIC RAM addressability via AUX port
+ //. . . set up NIC RAM addressability via AUX port
OPW( HREG_AUX_PAGE, (hcf_16)(ltvp->nic_addr >> 16 << 9 | (ltvp->nic_addr & 0xFFFF) >> 7 ) );
OPW( HREG_AUX_OFFSET, (hcf_16)(ltvp->nic_addr & 0x007E) );
- OUT_PORT_STRING_8_16( io_port, cp, i ); //!!!WORD length, cp MUST be a char pointer
+ OUT_PORT_STRING_8_16( io_port, cp, i ); //!!!WORD length, cp MUST be a char pointer
}
}
}
- ifbp->IFB_DLMode = ltvp->mode; //save state in IFB_DLMode
+ ifbp->IFB_DLMode = ltvp->mode; //save state in IFB_DLMode
#endif // HCF_TYPE_PRELOADED
- HCFASSERT( rc == HCF_SUCCESS, rc )
- HCFLOGEXIT( HCF_TRACE_DL )
+ HCFASSERT( rc == HCF_SUCCESS, rc );
+ HCFLOGEXIT( HCF_TRACE_DL );
return rc;
} // download
#if (HCF_ASSERT) & HCF_ASSERT_PRINTF
/**************************************************
-* Certain Hermes-II firmware versions can generate
-* debug information. This debug information is
-* contained in a buffer in nic-RAM, and can be read
-* via the aux port.
-**************************************************/
+ * Certain Hermes-II firmware versions can generate
+ * debug information. This debug information is
+ * contained in a buffer in nic-RAM, and can be read
+ * via the aux port.
+ **************************************************/
HCF_STATIC int
fw_printf(IFBP ifbp, CFG_FW_PRINTF_STRCT FAR *ltvp)
{
- int rc = HCF_SUCCESS;
- hcf_16 fw_cnt;
-// hcf_32 DbMsgBuffer = 0x29D2, DbMsgCount= 0x000029D0;
-// hcf_16 DbMsgSize=0x00000080;
- hcf_32 DbMsgBuffer;
- CFG_FW_PRINTF_BUFFER_LOCATION_STRCT *p = &ifbp->IFB_FwPfBuff;
- ltvp->len = 1;
- if ( p->DbMsgSize != 0 ) {
- // first, check the counter in nic-RAM and compare it to the latest counter value of the HCF
- OPW( HREG_AUX_PAGE, (hcf_16)(p->DbMsgCount >> 7) );
- OPW( HREG_AUX_OFFSET, (hcf_16)(p->DbMsgCount & 0x7E) );
- fw_cnt = ((IPW( HREG_AUX_DATA) >>1 ) & ((hcf_16)p->DbMsgSize - 1));
- if ( fw_cnt != ifbp->IFB_DbgPrintF_Cnt ) {
-// DbgPrint("fw_cnt=%d IFB_DbgPrintF_Cnt=%d\n", fw_cnt, ifbp->IFB_DbgPrintF_Cnt);
- DbMsgBuffer = p->DbMsgBuffer + ifbp->IFB_DbgPrintF_Cnt * 6; // each entry is 3 words
- OPW( HREG_AUX_PAGE, (hcf_16)(DbMsgBuffer >> 7) );
- OPW( HREG_AUX_OFFSET, (hcf_16)(DbMsgBuffer & 0x7E) );
- ltvp->msg_id = IPW(HREG_AUX_DATA);
- ltvp->msg_par = IPW(HREG_AUX_DATA);
- ltvp->msg_tstamp = IPW(HREG_AUX_DATA);
- ltvp->len = 4;
- ifbp->IFB_DbgPrintF_Cnt++;
- ifbp->IFB_DbgPrintF_Cnt &= (p->DbMsgSize - 1);
- }
- }
- return rc;
+ int rc = HCF_SUCCESS;
+ hcf_16 fw_cnt;
+// hcf_32 DbMsgBuffer = 0x29D2, DbMsgCount= 0x000029D0;
+// hcf_16 DbMsgSize=0x00000080;
+ hcf_32 DbMsgBuffer;
+ CFG_FW_PRINTF_BUFFER_LOCATION_STRCT *p = &ifbp->IFB_FwPfBuff;
+ ltvp->len = 1;
+ if ( p->DbMsgSize != 0 ) {
+ // first, check the counter in nic-RAM and compare it to the latest counter value of the HCF
+ OPW( HREG_AUX_PAGE, (hcf_16)(p->DbMsgCount >> 7) );
+ OPW( HREG_AUX_OFFSET, (hcf_16)(p->DbMsgCount & 0x7E) );
+ fw_cnt = ((IPW( HREG_AUX_DATA) >>1 ) & ((hcf_16)p->DbMsgSize - 1));
+ if ( fw_cnt != ifbp->IFB_DbgPrintF_Cnt ) {
+// DbgPrint("fw_cnt=%d IFB_DbgPrintF_Cnt=%d\n", fw_cnt, ifbp->IFB_DbgPrintF_Cnt);
+ DbMsgBuffer = p->DbMsgBuffer + ifbp->IFB_DbgPrintF_Cnt * 6; // each entry is 3 words
+ OPW( HREG_AUX_PAGE, (hcf_16)(DbMsgBuffer >> 7) );
+ OPW( HREG_AUX_OFFSET, (hcf_16)(DbMsgBuffer & 0x7E) );
+ ltvp->msg_id = IPW(HREG_AUX_DATA);
+ ltvp->msg_par = IPW(HREG_AUX_DATA);
+ ltvp->msg_tstamp = IPW(HREG_AUX_DATA);
+ ltvp->len = 4;
+ ifbp->IFB_DbgPrintF_Cnt++;
+ ifbp->IFB_DbgPrintF_Cnt &= (p->DbMsgSize - 1);
+ }
+ }
+ return rc;
};
#endif // HCF_ASSERT_PRINTF
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.SUBMODULE hcf_16 get_fid( IFBP ifbp )
-*.PURPOSE get allocated FID for either transmit or notify.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS
-* 0 no FID available
-* <>0 FID number
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-* The preference is to use a "pending" alloc. If no alloc is pending, then - if available - the "spare" FID
-* is used.
-* If the spare FID is used, IFB_RscInd (representing the spare FID) must be cleared
-* If the pending alloc is used, the alloc event must be acknowledged to the Hermes.
-* In case the spare FID was depleted and the IFB_RscInd has been "faked" as pseudo resource with a 0x0001
-* value by hcf_service_nic, IFB_RscInd has to be "corrected" again to its 0x0000 value.
-*
-* Note that due to the Hermes-II H/W problems which are intended to be worked around by DAWA, the Alloc bit
-* in the Event register is no longer a reliable indication of the presence/absence of a FID. The "Clear FID"
-* part of the DAWA logic, together with the choice of the definition of the return information from get_fid,
-* handle this automatically, i.e. without additional code in get_fid.
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE hcf_16 get_fid( IFBP ifbp )
+ *.PURPOSE get allocated FID for either transmit or notify.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS
+ * 0 no FID available
+ * <>0 FID number
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ * The preference is to use a "pending" alloc. If no alloc is pending, then - if available - the "spare" FID
+ * is used.
+ * If the spare FID is used, IFB_RscInd (representing the spare FID) must be cleared
+ * If the pending alloc is used, the alloc event must be acknowledged to the Hermes.
+ * In case the spare FID was depleted and the IFB_RscInd has been "faked" as pseudo resource with a 0x0001
+ * value by hcf_service_nic, IFB_RscInd has to be "corrected" again to its 0x0000 value.
+ *
+ * Note that due to the Hermes-II H/W problems which are intended to be worked around by DAWA, the Alloc bit
+ * in the Event register is no longer a reliable indication of the presence/absence of a FID. The "Clear FID"
+ * part of the DAWA logic, together with the choice of the definition of the return information from get_fid,
+ * handle this automatically, i.e. without additional code in get_fid.
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC hcf_16
get_fid( IFBP ifbp )
{
-hcf_16 fid = 0;
+ hcf_16 fid = 0;
#if ( (HCF_TYPE) & HCF_TYPE_HII5 ) == 0
-PROT_CNT_INI
+ PROT_CNT_INI;
#endif // HCF_TYPE_HII5
- IF_DMA( HCFASSERT(!(ifbp->IFB_CntlOpt & USE_DMA), ifbp->IFB_CntlOpt) )
+ IF_DMA( HCFASSERT(!(ifbp->IFB_CntlOpt & USE_DMA), ifbp->IFB_CntlOpt) );
if ( IPW( HREG_EV_STAT) & HREG_EV_ALLOC) {
fid = IPW( HREG_ALLOC_FID );
- HCFASSERT( fid, ifbp->IFB_RscInd )
- DAWA_ZERO_FID( HREG_ALLOC_FID )
+ HCFASSERT( fid, ifbp->IFB_RscInd );
+ DAWA_ZERO_FID( HREG_ALLOC_FID );
#if ( (HCF_TYPE) & HCF_TYPE_HII5 ) == 0
HCF_WAIT_WHILE( ( IPW( HREG_EV_STAT ) & HREG_EV_ACK_REG_READY ) == 0 );
- HCFASSERT( prot_cnt, IPW( HREG_EV_STAT ) )
+ HCFASSERT( prot_cnt, IPW( HREG_EV_STAT ) );
#endif // HCF_TYPE_HII5
- DAWA_ACK( HREG_EV_ALLOC ); //!!note that HREG_EV_ALLOC is written only once
+ DAWA_ACK( HREG_EV_ALLOC ); //!!note that HREG_EV_ALLOC is written only once
// 180 degree error in logic ;? #if ALLOC_15
if ( ifbp->IFB_RscInd == 1 ) {
ifbp->IFB_RscInd = 0;
@@ -3943,120 +3812,119 @@ PROT_CNT_INI
}
return fid;
} // get_fid
-#endif // HCF_DL_ONLY
/************************************************************************************************************
-*
-*.SUBMODULE void get_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) )
-*.PURPOSE reads with 16/32 bit I/O via BAP1 port from NIC RAM to Host memory.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* bufp (byte) address of buffer
-* len length in bytes of buffer specified by bufp
-* word_len Big Endian only: number of leading bytes to swap in pairs
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* process the single byte (if applicable) read by the previous get_frag and copy len (or len-1) bytes from
-* NIC to bufp.
-* On a Big Endian platform, the parameter word_len controls the number of leading bytes whose endianess is
-* converted (i.e. byte swapped)
-*
-*
-*.DIAGRAM
-*10: The PCMCIA card can be removed in the middle of the transfer. By depositing a "magic number" in the
-* HREG_SW_0 register of the Hermes at initialization time and by verifying this register, it can be
-* determined whether the card is still present. The return status is set accordingly.
-* Clearing the buffer is a (relative) cheap way to prevent that failing I/O results in run-away behavior
-* because the garbage in the buffer is interpreted by the caller irrespective of the return status (e.g.
-* hcf_service_nic has this behavior).
-*
-*.NOTICE
-* It turns out DOS ODI uses zero length fragments. The HCF code can cope with it, but as a consequence, no
-* Assert on len is possible
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void get_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) )
+ *.PURPOSE reads with 16/32 bit I/O via BAP1 port from NIC RAM to Host memory.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * bufp (byte) address of buffer
+ * len length in bytes of buffer specified by bufp
+ * word_len Big Endian only: number of leading bytes to swap in pairs
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * process the single byte (if applicable) read by the previous get_frag and copy len (or len-1) bytes from
+ * NIC to bufp.
+ * On a Big Endian platform, the parameter word_len controls the number of leading bytes whose endianess is
+ * converted (i.e. byte swapped)
+ *
+ *
+ *.DIAGRAM
+ *10: The PCMCIA card can be removed in the middle of the transfer. By depositing a "magic number" in the
+ * HREG_SW_0 register of the Hermes at initialization time and by verifying this register, it can be
+ * determined whether the card is still present. The return status is set accordingly.
+ * Clearing the buffer is a (relative) cheap way to prevent that failing I/O results in run-away behavior
+ * because the garbage in the buffer is interpreted by the caller irrespective of the return status (e.g.
+ * hcf_service_nic has this behavior).
+ *
+ *.NOTICE
+ * It turns out DOS ODI uses zero length fragments. The HCF code can cope with it, but as a consequence, no
+ * Assert on len is possible
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC void
get_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) )
{
-hcf_io io_port = ifbp->IFB_IOBase + HREG_DATA_1; //BAP data register
-wci_bufp p = bufp; //working pointer
-int i; //prevent side effects from macro
-int j;
+ hcf_io io_port = ifbp->IFB_IOBase + HREG_DATA_1; //BAP data register
+ wci_bufp p = bufp; //working pointer
+ int i; //prevent side effects from macro
+ int j;
- HCFASSERT( ((hcf_32)bufp & (HCF_ALIGN-1) ) == 0, (hcf_32)bufp )
+ HCFASSERT( ((hcf_32)bufp & (HCF_ALIGN-1) ) == 0, (hcf_32)bufp );
-/*1: here recovery logic for intervening BAP access between hcf_service_nic and hcf_rcv_msg COULD be added
- * if current access is RxInitial
- * . persistent_offset += len
+/*1: here recovery logic for intervening BAP access between hcf_service_nic and hcf_rcv_msg COULD be added
+ * if current access is RxInitial
+ * . persistent_offset += len
*/
i = len;
- //if buffer length > 0 and carry from previous get_frag
+ //if buffer length > 0 and carry from previous get_frag
if ( i && ifbp->IFB_CarryIn ) {
- //. move carry to buffer
- //. adjust buffer length and pointer accordingly
+ //. move carry to buffer
+ //. adjust buffer length and pointer accordingly
*p++ = (hcf_8)(ifbp->IFB_CarryIn>>8);
i--;
- //. clear carry flag
+ //. clear carry flag
ifbp->IFB_CarryIn = 0;
}
#if (HCF_IO) & HCF_IO_32BITS
//skip zero-length I/O, single byte I/O and I/O not worthwhile (i.e. less than 6 bytes)for DW logic
- //if buffer length >= 6 and 32 bits I/O support
+ //if buffer length >= 6 and 32 bits I/O support
if ( !(ifbp->IFB_CntlOpt & USE_16BIT) && i >= 6 ) {
-hcf_32 FAR *p4; //prevent side effects from macro
- if ( ( (hcf_32)p & 0x1 ) == 0 ) { //. if buffer at least word aligned
- if ( (hcf_32)p & 0x2 ) { //. . if buffer not double word aligned
- //. . . read single word to get double word aligned
+ hcf_32 FAR *p4; //prevent side effects from macro
+ if ( ( (hcf_32)p & 0x1 ) == 0 ) { //. if buffer at least word aligned
+ if ( (hcf_32)p & 0x2 ) { //. . if buffer not double word aligned
+ //. . . read single word to get double word aligned
*(wci_recordp)p = IN_PORT_WORD( io_port );
- //. . . adjust buffer length and pointer accordingly
+ //. . . adjust buffer length and pointer accordingly
p += 2;
i -= 2;
}
- //. . read as many double word as possible
+ //. . read as many double word as possible
p4 = (hcf_32 FAR *)p;
j = i/4;
IN_PORT_STRING_32( io_port, p4, j );
- //. . adjust buffer length and pointer accordingly
+ //. . adjust buffer length and pointer accordingly
p += i & ~0x0003;
i &= 0x0003;
}
}
#endif // HCF_IO_32BITS
- //if no 32-bit support OR byte aligned OR 1-3 bytes left
+ //if no 32-bit support OR byte aligned OR 1-3 bytes left
if ( i ) {
- //. read as many word as possible in "alignment safe" way
+ //. read as many word as possible in "alignment safe" way
j = i/2;
IN_PORT_STRING_8_16( io_port, p, j );
- //. if 1 byte left
+ //. if 1 byte left
if ( i & 0x0001 ) {
- //. . read 1 word
+ //. . read 1 word
ifbp->IFB_CarryIn = IN_PORT_WORD( io_port );
- //. . store LSB in last char of buffer
+ //. . store LSB in last char of buffer
bufp[len-1] = (hcf_8)ifbp->IFB_CarryIn;
- //. . save MSB in carry, set carry flag
+ //. . save MSB in carry, set carry flag
ifbp->IFB_CarryIn |= 0x1;
}
}
#if HCF_BIG_ENDIAN
- HCFASSERT( word_len == 0 || word_len == 2 || word_len == 4, word_len )
- HCFASSERT( word_len == 0 || ((hcf_32)bufp & 1 ) == 0, (hcf_32)bufp )
- HCFASSERT( word_len <= len, MERGE2( word_len, len ) )
+ HCFASSERT( word_len == 0 || word_len == 2 || word_len == 4, word_len );
+ HCFASSERT( word_len == 0 || ((hcf_32)bufp & 1 ) == 0, (hcf_32)bufp );
+ HCFASSERT( word_len <= len, MERGE2( word_len, len ) );
//see put_frag for an alternative implementation, but be careful about what are int's and what are
//hcf_16's
- if ( word_len ) { //. if there is anything to convert
-hcf_8 c;
- c = bufp[1]; //. . convert the 1st hcf_16
+ if ( word_len ) { //. if there is anything to convert
+ hcf_8 c;
+ c = bufp[1]; //. . convert the 1st hcf_16
bufp[1] = bufp[0];
bufp[0] = c;
- if ( word_len > 1 ) { //. . if there is to convert more than 1 word ( i.e 2 )
- c = bufp[3]; //. . . convert the 2nd hcf_16
+ if ( word_len > 1 ) { //. . if there is to convert more than 1 word ( i.e 2 )
+ c = bufp[3]; //. . . convert the 2nd hcf_16
bufp[3] = bufp[2];
bufp[2] = c;
}
@@ -4065,108 +3933,108 @@ hcf_8 c;
} // get_frag
/************************************************************************************************************
-*
-*.SUBMODULE int init( IFBP ifbp )
-*.PURPOSE Handles common initialization aspects (H-I init, calibration, config.mngmt, allocation).
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS
-* HCF_ERR_INCOMP_PRI
-* HCF_ERR_INCOMP_FW
-* HCF_ERR_TIME_OUT
-* >>hcf_get_info
-* HCF_ERR_NO_NIC
-* HCF_ERR_LEN
-*
-*.DESCRIPTION
-* init will successively:
-* - in case of a (non-preloaded) H-I, initialize the NIC
-* - calibrate the S/W protection timer against the Hermes Timer
-* - collect HSI, "active" F/W Configuration Management Information
-* - in case active F/W is Primary F/W: collect Primary F/W Configuration Management Information
-* - check HSI and Primary F/W compatibility with the HCF
-* - in case active F/W is Station or AP F/W: check Station or AP F/W compatibility with the HCF
-* - in case active F/W is not Primary F/W: allocate FIDs to be used in transmit/notify process
-*
-*
-*.DIAGRAM
-*2: drop all error status bits in IFB_CardStat since they are expected to be re-evaluated.
-*4: Ack everything except HREG_EV_SLEEP_REQ. It is very likely that an Alloc event is pending and
-* very well possible that a Send Cmd event is pending. Acking HREG_EV_SLEEP_REQ is handled by hcf_action(
-* HCF_ACT_INT_ON ) !!!
-*10: Calibrate the S/W time-out protection mechanism by calling calibrate(). Note that possible errors
-* in the calibration process are nor reported by init but will show up via the defunct mechanism in
-* subsequent hcf-calls.
-*14: usb_check_comp() is called to have the minimal visual clutter for the legacy H-I USB dongle
-* compatibility check.
-*16: The following configuration management related information is retrieved from the NIC:
-* - HSI supplier
-* - F/W Identity
-* - F/W supplier
-* if appropriate:
-* - PRI Identity
-* - PRI supplier
-* appropriate means on H-I: always
-* and on H-II if F/W supplier reflects a primary (i.e. only after an Hermes Reset or Init
-* command).
-* QUESTION ;? !!!!!! should, For each of the above RIDs the Endianess is converted to native Endianess.
-* Only the return code of the first hcf_get_info is used. All hcf_get_info calls are made, regardless of
-* the success or failure of the 1st hcf_get_info. The assumptions are:
-* - if any call fails, they all fail, so remembering the result of the 1st call is adequate
-* - a failing call will overwrite the L-field with a 0x0000 value, which services both as an
-* error indication for the values cached in the IFB as making mmd_check_comp fail.
-* In case of H-I, when getting the F/W identity fails, the F/W is assumed to be H-I AP F/W pre-dating
-* version 9.0 and the F/W Identity and Supplier are faked accordingly.
-* In case of H-II, the Primary, Station and AP Identity are merged into a single F/W Identity.
-* The same applies to the Supplier information. As a consequence the PRI information can no longer be
-* retrieved when a Tertiary runs. To accommodate MSFs and Utilities who depend on PRI information being
-* available at any time, this information is cached in the IFB. In this cache the generic "F/W" value of
-* the typ-fields is overwritten with the specific (legacy) "PRI" values. To actually re-route the (legacy)
-* PRI request via hcf_get_info, the xxxx-table must be set. In case of H-I, this caching, modifying and
-* re-routing is not needed because PRI information is always available directly from the NIC. For
-* consistency the caching fields in the IFB are filled with the PRI information anyway.
-*18: mdd_check_comp() is called to check the Supplier Variant and Range of the Host-S/W I/F (HSI) and the
-* Primary Firmware Variant and Range against the Top and Bottom level supported by this HCF. If either of
-* these tests fails, the CARD_STAT_INCOMP_PRI bit of IFB_CardStat is set
-* Note: There should always be a primary except during production, so this makes the HCF in its current form
-* unsuitable for manufacturing test systems like the FTS. This can be remedied by an adding a test like
-* ifbp->IFB_PRISup.id == COMP_ID_PRI
-*20: In case there is Tertiary F/W and this F/W is Station F/W, the Supplier Variant and Range of the Station
-* Firmware function as retrieved from the Hermes is checked against the Top and Bottom level supported by
-* this HCF.
-* Note: ;? the tertiary F/W compatibility checks could be moved to the DHF, which already has checked the
-* CFI and MFI compatibility of the image with the NIC before the image was downloaded.
-*28: In case of non-Primary F/W: allocates and acknowledge a (TX or Notify) FID and allocates without
-* acknowledge another (TX or Notify) FID (the so-called 1.5 alloc scheme) with the following steps:
-* - execute the allocate command by calling cmd_exe
-* - wait till either the alloc event or a time-out occurs
-* - regardless whether the alloc event occurs, call get_fid to
-* - read the FID and save it in IFB_RscInd to be used as "spare FID"
-* - acknowledge the alloc event
-* - do another "half" allocate to complete the "1.5 Alloc scheme"
-* Note that above 3 steps do not harm and thus give the "cheapest" acceptable strategy.
-* If a time-out occurred, then report time out status (after all)
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int init( IFBP ifbp )
+ *.PURPOSE Handles common initialization aspects (H-I init, calibration, config.mngmt, allocation).
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS
+ * HCF_ERR_INCOMP_PRI
+ * HCF_ERR_INCOMP_FW
+ * HCF_ERR_TIME_OUT
+ * >>hcf_get_info
+ * HCF_ERR_NO_NIC
+ * HCF_ERR_LEN
+ *
+ *.DESCRIPTION
+ * init will successively:
+ * - in case of a (non-preloaded) H-I, initialize the NIC
+ * - calibrate the S/W protection timer against the Hermes Timer
+ * - collect HSI, "active" F/W Configuration Management Information
+ * - in case active F/W is Primary F/W: collect Primary F/W Configuration Management Information
+ * - check HSI and Primary F/W compatibility with the HCF
+ * - in case active F/W is Station or AP F/W: check Station or AP F/W compatibility with the HCF
+ * - in case active F/W is not Primary F/W: allocate FIDs to be used in transmit/notify process
+ *
+ *
+ *.DIAGRAM
+ *2: drop all error status bits in IFB_CardStat since they are expected to be re-evaluated.
+ *4: Ack everything except HREG_EV_SLEEP_REQ. It is very likely that an Alloc event is pending and
+ * very well possible that a Send Cmd event is pending. Acking HREG_EV_SLEEP_REQ is handled by hcf_action(
+ * HCF_ACT_INT_ON ) !!!
+ *10: Calibrate the S/W time-out protection mechanism by calling calibrate(). Note that possible errors
+ * in the calibration process are nor reported by init but will show up via the defunct mechanism in
+ * subsequent hcf-calls.
+ *14: usb_check_comp() is called to have the minimal visual clutter for the legacy H-I USB dongle
+ * compatibility check.
+ *16: The following configuration management related information is retrieved from the NIC:
+ * - HSI supplier
+ * - F/W Identity
+ * - F/W supplier
+ * if appropriate:
+ * - PRI Identity
+ * - PRI supplier
+ * appropriate means on H-I: always
+ * and on H-II if F/W supplier reflects a primary (i.e. only after an Hermes Reset or Init
+ * command).
+ * QUESTION ;? !!!!!! should, For each of the above RIDs the Endianess is converted to native Endianess.
+ * Only the return code of the first hcf_get_info is used. All hcf_get_info calls are made, regardless of
+ * the success or failure of the 1st hcf_get_info. The assumptions are:
+ * - if any call fails, they all fail, so remembering the result of the 1st call is adequate
+ * - a failing call will overwrite the L-field with a 0x0000 value, which services both as an
+ * error indication for the values cached in the IFB as making mmd_check_comp fail.
+ * In case of H-I, when getting the F/W identity fails, the F/W is assumed to be H-I AP F/W pre-dating
+ * version 9.0 and the F/W Identity and Supplier are faked accordingly.
+ * In case of H-II, the Primary, Station and AP Identity are merged into a single F/W Identity.
+ * The same applies to the Supplier information. As a consequence the PRI information can no longer be
+ * retrieved when a Tertiary runs. To accommodate MSFs and Utilities who depend on PRI information being
+ * available at any time, this information is cached in the IFB. In this cache the generic "F/W" value of
+ * the typ-fields is overwritten with the specific (legacy) "PRI" values. To actually re-route the (legacy)
+ * PRI request via hcf_get_info, the xxxx-table must be set. In case of H-I, this caching, modifying and
+ * re-routing is not needed because PRI information is always available directly from the NIC. For
+ * consistency the caching fields in the IFB are filled with the PRI information anyway.
+ *18: mdd_check_comp() is called to check the Supplier Variant and Range of the Host-S/W I/F (HSI) and the
+ * Primary Firmware Variant and Range against the Top and Bottom level supported by this HCF. If either of
+ * these tests fails, the CARD_STAT_INCOMP_PRI bit of IFB_CardStat is set
+ * Note: There should always be a primary except during production, so this makes the HCF in its current form
+ * unsuitable for manufacturing test systems like the FTS. This can be remedied by an adding a test like
+ * ifbp->IFB_PRISup.id == COMP_ID_PRI
+ *20: In case there is Tertiary F/W and this F/W is Station F/W, the Supplier Variant and Range of the Station
+ * Firmware function as retrieved from the Hermes is checked against the Top and Bottom level supported by
+ * this HCF.
+ * Note: ;? the tertiary F/W compatibility checks could be moved to the DHF, which already has checked the
+ * CFI and MFI compatibility of the image with the NIC before the image was downloaded.
+ *28: In case of non-Primary F/W: allocates and acknowledge a (TX or Notify) FID and allocates without
+ * acknowledge another (TX or Notify) FID (the so-called 1.5 alloc scheme) with the following steps:
+ * - execute the allocate command by calling cmd_exe
+ * - wait till either the alloc event or a time-out occurs
+ * - regardless whether the alloc event occurs, call get_fid to
+ * - read the FID and save it in IFB_RscInd to be used as "spare FID"
+ * - acknowledge the alloc event
+ * - do another "half" allocate to complete the "1.5 Alloc scheme"
+ * Note that above 3 steps do not harm and thus give the "cheapest" acceptable strategy.
+ * If a time-out occurred, then report time out status (after all)
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
init( IFBP ifbp )
{
-int rc = HCF_SUCCESS;
+ int rc = HCF_SUCCESS;
- HCFLOGENTRY( HCF_TRACE_INIT, 0 )
+ HCFLOGENTRY( HCF_TRACE_INIT, 0 );
- ifbp->IFB_CardStat = 0; /* 2*/
- OPW( HREG_EV_ACK, ~HREG_EV_SLEEP_REQ ); /* 4*/
- IF_PROT_TIME( calibrate( ifbp ); ) /*10*/
+ ifbp->IFB_CardStat = 0; /* 2*/
+ OPW( HREG_EV_ACK, ~HREG_EV_SLEEP_REQ ); /* 4*/
+ IF_PROT_TIME( calibrate( ifbp ) ); /*10*/
#if 0 // OOR
- ifbp->IFB_FWIdentity.len = 2; //misuse the IFB space for a put
+ ifbp->IFB_FWIdentity.len = 2; //misuse the IFB space for a put
ifbp->IFB_FWIdentity.typ = CFG_TICK_TIME;
- ifbp->IFB_FWIdentity.comp_id = (1000*1000)/1024 + 1; //roughly 1 second
+ ifbp->IFB_FWIdentity.comp_id = (1000*1000)/1024 + 1; //roughly 1 second
hcf_put_info( ifbp, (LTVP)&ifbp->IFB_FWIdentity.len );
#endif // OOR
ifbp->IFB_FWIdentity.len = sizeof(CFG_FW_IDENTITY_STRCT)/sizeof(hcf_16) - 1;
@@ -4179,8 +4047,8 @@ int rc = HCF_SUCCESS;
ifbp->IFB_FWIdentity.version_major = CNV_LITTLE_TO_SHORT( ifbp->IFB_FWIdentity.version_major );
ifbp->IFB_FWIdentity.version_minor = CNV_LITTLE_TO_SHORT( ifbp->IFB_FWIdentity.version_minor );
#endif // HCF_BIG_ENDIAN
-#if defined MSF_COMPONENT_ID /*14*/
- if ( rc == HCF_SUCCESS ) { /*16*/
+#if defined MSF_COMPONENT_ID /*14*/
+ if ( rc == HCF_SUCCESS ) { /*16*/
ifbp->IFB_HSISup.len = sizeof(CFG_SUP_RANGE_STRCT)/sizeof(hcf_16) - 1;
ifbp->IFB_HSISup.typ = CFG_NIC_HSI_SUP_RANGE;
rc = hcf_get_info( ifbp, (LTVP)&ifbp->IFB_HSISup.len );
@@ -4207,133 +4075,132 @@ int rc = HCF_SUCCESS;
ifbp->IFB_FWSup.top = CNV_LITTLE_TO_SHORT( ifbp->IFB_FWSup.top );
#endif // HCF_BIG_ENDIAN
- if ( ifbp->IFB_FWSup.id == COMP_ID_PRI ) { /* 20*/
-int i = sizeof( CFG_FW_IDENTITY_STRCT) + sizeof(CFG_SUP_RANGE_STRCT );
+ if ( ifbp->IFB_FWSup.id == COMP_ID_PRI ) { /* 20*/
+ int i = sizeof( CFG_FW_IDENTITY_STRCT) + sizeof(CFG_SUP_RANGE_STRCT );
while ( i-- ) ((hcf_8*)(&ifbp->IFB_PRIIdentity))[i] = ((hcf_8*)(&ifbp->IFB_FWIdentity))[i];
ifbp->IFB_PRIIdentity.typ = CFG_PRI_IDENTITY;
ifbp->IFB_PRISup.typ = CFG_PRI_SUP_RANGE;
xxxx[xxxx_PRI_IDENTITY_OFFSET] = &ifbp->IFB_PRIIdentity.len;
xxxx[xxxx_PRI_IDENTITY_OFFSET+1] = &ifbp->IFB_PRISup.len;
}
- if ( !mmd_check_comp( (void*)&cfg_drv_act_ranges_hsi, &ifbp->IFB_HSISup) /* 22*/
+ if ( !mmd_check_comp( (void*)&cfg_drv_act_ranges_hsi, &ifbp->IFB_HSISup) /* 22*/
#if ( (HCF_TYPE) & HCF_TYPE_PRELOADED ) == 0
//;? the PRI compatibility check is only relevant for DHF
- || !mmd_check_comp( (void*)&cfg_drv_act_ranges_pri, &ifbp->IFB_PRISup)
+ || !mmd_check_comp( (void*)&cfg_drv_act_ranges_pri, &ifbp->IFB_PRISup)
#endif // HCF_TYPE_PRELOADED
- ) {
+ ) {
ifbp->IFB_CardStat = CARD_STAT_INCOMP_PRI;
rc = HCF_ERR_INCOMP_PRI;
}
- if ( ( ifbp->IFB_FWSup.id == COMP_ID_STA && !mmd_check_comp( (void*)&cfg_drv_act_ranges_sta, &ifbp->IFB_FWSup) ) ||
- ( ifbp->IFB_FWSup.id == COMP_ID_APF && !mmd_check_comp( (void*)&cfg_drv_act_ranges_apf, &ifbp->IFB_FWSup) )
- ) { /* 24 */
+ if ( ( ifbp->IFB_FWSup.id == COMP_ID_STA && !mmd_check_comp( (void*)&cfg_drv_act_ranges_sta, &ifbp->IFB_FWSup) ) ||
+ ( ifbp->IFB_FWSup.id == COMP_ID_APF && !mmd_check_comp( (void*)&cfg_drv_act_ranges_apf, &ifbp->IFB_FWSup) )
+ ) { /* 24 */
ifbp->IFB_CardStat |= CARD_STAT_INCOMP_FW;
rc = HCF_ERR_INCOMP_FW;
}
}
#endif // MSF_COMPONENT_ID
-#if (HCF_DL_ONLY) == 0 /* 28 */
+
if ( rc == HCF_SUCCESS && ifbp->IFB_FWIdentity.comp_id >= COMP_ID_FW_STA ) {
-PROT_CNT_INI
+ PROT_CNT_INI;
/**************************************************************************************
- * rlav: the DMA engine needs the host to cause a 'hanging alloc event' for it to consume.
- * not sure if this is the right spot in the HCF, thinking about hcf_enable...
- **************************************************************************************/
+ * rlav: the DMA engine needs the host to cause a 'hanging alloc event' for it to consume.
+ * not sure if this is the right spot in the HCF, thinking about hcf_enable...
+ **************************************************************************************/
rc = cmd_exe( ifbp, HCMD_ALLOC, 0 );
// 180 degree error in logic ;? #if ALLOC_15
-// ifbp->IFB_RscInd = 1; //let's hope that by the time hcf_send_msg isa called, there will be a FID
+// ifbp->IFB_RscInd = 1; //let's hope that by the time hcf_send_msg isa called, there will be a FID
//#else
if ( rc == HCF_SUCCESS ) {
HCF_WAIT_WHILE( (IPW( HREG_EV_STAT ) & HREG_EV_ALLOC) == 0 );
- IF_PROT_TIME( HCFASSERT(prot_cnt, IPW( HREG_EV_STAT ) ) /*NOP*/;)
+ IF_PROT_TIME( HCFASSERT(prot_cnt, IPW( HREG_EV_STAT )) );
#if HCF_DMA
if ( ! ( ifbp->IFB_CntlOpt & USE_DMA ) )
#endif // HCF_DMA
{
ifbp->IFB_RscInd = get_fid( ifbp );
- HCFASSERT( ifbp->IFB_RscInd, 0 )
+ HCFASSERT( ifbp->IFB_RscInd, 0 );
cmd_exe( ifbp, HCMD_ALLOC, 0 );
- IF_PROT_TIME( if ( prot_cnt == 0 ) rc = HCF_ERR_TIME_OUT; )
+ IF_PROT_TIME( if ( prot_cnt == 0 ) rc = HCF_ERR_TIME_OUT );
}
}
//#endif // ALLOC_15
}
-#endif // HCF_DL_ONLY
- HCFASSERT( rc == HCF_SUCCESS, rc )
- HCFLOGEXIT( HCF_TRACE_INIT )
+
+ HCFASSERT( rc == HCF_SUCCESS, rc );
+ HCFLOGEXIT( HCF_TRACE_INIT );
return rc;
} // init
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.SUBMODULE void isr_info( IFBP ifbp )
-*.PURPOSE handles link events.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-*1: First the FID number corresponding with the InfoEvent is determined.
-* Note the complication of the zero-FID protection sub-scheme in DAWA.
-* Next the L-field and the T-field are fetched into scratch buffer info.
-*2: In case of tallies, the 16 bits Hermes values are accumulated in the IFB into 32 bits values. Info[0]
-* is (expected to be) HCF_NIC_TAL_CNT + 1. The contraption "while ( info[0]-- >1 )" rather than
-* "while ( --info[0] )" is used because it is dangerous to determine the length of the Value field by
-* decrementing info[0]. As a result of a bug in some version of the F/W, info[0] may be 0, resulting
-* in a very long loop in the pre-decrement logic.
-*4: In case of a link status frame, the information is copied to the IFB field IFB_linkStat
-*6: All other than Tallies (including "unknown" ones) are checked against the selection set by the MSF
-* via CFG_RID_LOG. If a match is found or the selection set has the wild-card type (i.e non-NULL buffer
-* pointer at the terminating zero-type), the frame is copied to the (type-specific) log buffer.
-* Note that to accumulate tallies into IFB AND to log them or to log a frame when a specific match occures
-* AND based on the wild-card selection, you have to call setup_bap again after the 1st copy.
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void isr_info( IFBP ifbp )
+ *.PURPOSE handles link events.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ *1: First the FID number corresponding with the InfoEvent is determined.
+ * Note the complication of the zero-FID protection sub-scheme in DAWA.
+ * Next the L-field and the T-field are fetched into scratch buffer info.
+ *2: In case of tallies, the 16 bits Hermes values are accumulated in the IFB into 32 bits values. Info[0]
+ * is (expected to be) HCF_NIC_TAL_CNT + 1. The contraption "while ( info[0]-- >1 )" rather than
+ * "while ( --info[0] )" is used because it is dangerous to determine the length of the Value field by
+ * decrementing info[0]. As a result of a bug in some version of the F/W, info[0] may be 0, resulting
+ * in a very long loop in the pre-decrement logic.
+ *4: In case of a link status frame, the information is copied to the IFB field IFB_linkStat
+ *6: All other than Tallies (including "unknown" ones) are checked against the selection set by the MSF
+ * via CFG_RID_LOG. If a match is found or the selection set has the wild-card type (i.e non-NULL buffer
+ * pointer at the terminating zero-type), the frame is copied to the (type-specific) log buffer.
+ * Note that to accumulate tallies into IFB AND to log them or to log a frame when a specific match occures
+ * AND based on the wild-card selection, you have to call setup_bap again after the 1st copy.
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC void
isr_info( IFBP ifbp )
{
-hcf_16 info[2], fid;
+ hcf_16 info[2], fid;
#if (HCF_EXT) & HCF_EXT_INFO_LOG
-RID_LOGP ridp = ifbp->IFB_RIDLogp; //NULL or pointer to array of RID_LOG structures (terminated by zero typ)
+ RID_LOGP ridp = ifbp->IFB_RIDLogp; //NULL or pointer to array of RID_LOG structures (terminated by zero typ)
#endif // HCF_EXT_INFO_LOG
- HCFTRACE( ifbp, HCF_TRACE_ISR_INFO ); /* 1 */
+ HCFTRACE( ifbp, HCF_TRACE_ISR_INFO ); /* 1 */
fid = IPW( HREG_INFO_FID );
- DAWA_ZERO_FID( HREG_INFO_FID )
+ DAWA_ZERO_FID( HREG_INFO_FID );
if ( fid ) {
(void)setup_bap( ifbp, fid, 0, IO_IN );
get_frag( ifbp, (wci_bufp)info, 4 BE_PAR(2) );
- HCFASSERT( info[0] <= HCF_MAX_LTV + 1, MERGE_2( info[1], info[0] ) ) //;? a smaller value makes more sense
-#if (HCF_TALLIES) & HCF_TALLIES_NIC //Hermes tally support
+ HCFASSERT( info[0] <= HCF_MAX_LTV + 1, MERGE_2( info[1], info[0] ) ); //;? a smaller value makes more sense
+#if (HCF_TALLIES) & HCF_TALLIES_NIC //Hermes tally support
if ( info[1] == CFG_TALLIES ) {
-hcf_32 *p;
-/*2*/ if ( info[0] > HCF_NIC_TAL_CNT ) {
+ hcf_32 *p;
+ /*2*/ if ( info[0] > HCF_NIC_TAL_CNT ) {
info[0] = HCF_NIC_TAL_CNT + 1;
}
p = (hcf_32*)&ifbp->IFB_NIC_Tallies;
- while ( info[0]-- >1 ) *p++ += IPW( HREG_DATA_1 ); //request may return zero length
+ while ( info[0]-- >1 ) *p++ += IPW( HREG_DATA_1 ); //request may return zero length
}
else
#endif // HCF_TALLIES_NIC
{
-/*4*/ if ( info[1] == CFG_LINK_STAT ) {
+ /*4*/ if ( info[1] == CFG_LINK_STAT ) {
ifbp->IFB_LinkStat = IPW( HREG_DATA_1 );
}
#if (HCF_EXT) & HCF_EXT_INFO_LOG
-/*6*/ while ( 1 ) {
+ /*6*/ while ( 1 ) {
if ( ridp->typ == 0 || ridp->typ == info[1] ) {
if ( ridp->bufp ) {
- HCFASSERT( ridp->len >= 2, ridp->typ )
- ridp->bufp[0] = min((hcf_16)(ridp->len - 1), info[0] ); //save L
- ridp->bufp[1] = info[1]; //save T
+ HCFASSERT( ridp->len >= 2, ridp->typ );
+ ridp->bufp[0] = min((hcf_16)(ridp->len - 1), info[0] ); //save L
+ ridp->bufp[1] = info[1]; //save T
get_frag( ifbp, (wci_bufp)&ridp->bufp[2], (ridp->bufp[0] - 1)*2 BE_PAR(0) );
}
break;
@@ -4346,84 +4213,82 @@ hcf_32 *p;
}
return;
} // isr_info
-#endif // HCF_DL_ONLY
//
//
// #endif // HCF_TALLIES_NIC
-// /*4*/ if ( info[1] == CFG_LINK_STAT ) {
-// ifbp->IFB_DSLinkStat = IPW( HREG_DATA_1 ) | CFG_LINK_STAT_CHANGE; //corrupts BAP !! ;?
-// ifbp->IFB_LinkStat = ifbp->IFB_DSLinkStat & CFG_LINK_STAT_FW; //;? to be obsoleted
-// printk( "<4>linkstatus: %04x\n", ifbp->IFB_DSLinkStat ); //;?remove me 1 day
+// /*4*/ if ( info[1] == CFG_LINK_STAT ) {
+// ifbp->IFB_DSLinkStat = IPW( HREG_DATA_1 ) | CFG_LINK_STAT_CHANGE; //corrupts BAP !! ;?
+// ifbp->IFB_LinkStat = ifbp->IFB_DSLinkStat & CFG_LINK_STAT_FW; //;? to be obsoleted
+// printk( "<4>linkstatus: %04x\n", ifbp->IFB_DSLinkStat ); //;?remove me 1 day
// #if (HCF_SLEEP) & HCF_DDS
-// if ( ( ifbp->IFB_DSLinkStat & CFG_LINK_STAT_CONNECTED ) == 0 ) { //even values are disconnected etc.
-// ifbp->IFB_TickCnt = 0; //start 2 second period (with 1 tick uncertanty)
-// printk( "<5>isr_info: AwaitConnection phase started, IFB_TickCnt = 0\n" ); //;?remove me 1 day
-// }
+// if ( ( ifbp->IFB_DSLinkStat & CFG_LINK_STAT_CONNECTED ) == 0 ) { //even values are disconnected etc.
+// ifbp->IFB_TickCnt = 0; //start 2 second period (with 1 tick uncertanty)
+// printk( "<5>isr_info: AwaitConnection phase started, IFB_TickCnt = 0\n" ); //;?remove me 1 day
+// }
// #endif // HCF_DDS
-// }
+// }
// #if (HCF_EXT) & HCF_EXT_INFO_LOG
-// /*6*/ while ( 1 ) {
-// if ( ridp->typ == 0 || ridp->typ == info[1] ) {
-// if ( ridp->bufp ) {
-// HCFASSERT( ridp->len >= 2, ridp->typ )
-// (void)setup_bap( ifbp, fid, 2, IO_IN ); //restore BAP for tallies, linkstat and specific type followed by wild card
-// ridp->bufp[0] = min( ridp->len - 1, info[0] ); //save L
-// get_frag( ifbp, (wci_bufp)&ridp->bufp[1], ridp->bufp[0]*2 BE_PAR(0) );
-// }
-// break; //;?this break is no longer needed due to setup_bap but lets concentrate on DDS first
-// }
-// ridp++;
-// }
+// /*6*/ while ( 1 ) {
+// if ( ridp->typ == 0 || ridp->typ == info[1] ) {
+// if ( ridp->bufp ) {
+// HCFASSERT( ridp->len >= 2, ridp->typ );
+// (void)setup_bap( ifbp, fid, 2, IO_IN ); //restore BAP for tallies, linkstat and specific type followed by wild card
+// ridp->bufp[0] = min( ridp->len - 1, info[0] ); //save L
+// get_frag( ifbp, (wci_bufp)&ridp->bufp[1], ridp->bufp[0]*2 BE_PAR(0) );
+// }
+// break; //;?this break is no longer needed due to setup_bap but lets concentrate on DDS first
+// }
+// ridp++;
+// }
// #endif // HCF_EXT_INFO_LOG
-// }
-// HCFTRACE( ifbp, HCF_TRACE_ISR_INFO | HCF_TRACE_EXIT );
+// }
+// HCFTRACE( ifbp, HCF_TRACE_ISR_INFO | HCF_TRACE_EXIT );
//
//
//
//
-// return;
+// return;
//} // isr_info
-//#endif // HCF_DL_ONLY
/************************************************************************************************************
-*
-*.SUBMODULE void mdd_assert( IFBP ifbp, unsigned int line_number, hcf_32 q )
-*.PURPOSE filters assert on level and interfaces to the MSF supplied msf_assert routine.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* line_number line number of the line which caused the assert
-* q qualifier, additional information which may give a clue about the problem
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-*
-*.NOTICE
-* mdd_assert has been through a turmoil, renaming hcf_assert to assert and hcf_assert again and supporting off
-* and on being called from the MSF level and other ( immature ) ModularDriverDevelopment modules like DHF and
-* MMD.
+ *
+ *.SUBMODULE void mdd_assert( IFBP ifbp, unsigned int line_number, hcf_32 q )
+ *.PURPOSE filters assert on level and interfaces to the MSF supplied msf_assert routine.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * line_number line number of the line which caused the assert
+ * q qualifier, additional information which may give a clue about the problem
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ * mdd_assert has been through a turmoil, renaming hcf_assert to assert and hcf_assert again and supporting off
+ * and on being called from the MSF level and other ( immature ) ModularDriverDevelopment modules like DHF and
+ * MMD.
* !!!! The assert routine is not an hcf_..... routine in the sense that it may be called by the MSF,
- * however it is called from mmd.c and dhf.c, so it must be external.
- * To prevent namespace pollution it needs a prefix, to prevent that MSF programmers think that
- * they are allowed to call the assert logic, the prefix HCF can't be used, so MDD is selected!!!!
+ * however it is called from mmd.c and dhf.c, so it must be external.
+ * To prevent namespace pollution it needs a prefix, to prevent that MSF programmers think that
+ * they are allowed to call the assert logic, the prefix HCF can't be used, so MDD is selected!!!!
*
-* When called from the DHF module the line number is incremented by DHF_FILE_NAME_OFFSET and when called from
-* the MMD module by MMD_FILE_NAME_OFFSET.
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ * When called from the DHF module the line number is incremented by DHF_FILE_NAME_OFFSET and when called from
+ * the MMD module by MMD_FILE_NAME_OFFSET.
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
#if HCF_ASSERT
void
mdd_assert( IFBP ifbp, unsigned int line_number, hcf_32 q )
{
-hcf_16 run_time_flag = ifbp->IFB_AssertLvl;
+ hcf_16 run_time_flag = ifbp->IFB_AssertLvl;
if ( run_time_flag /* > ;?????? */ ) { //prevent recursive behavior, later to be extended to level filtering
ifbp->IFB_AssertQualifier = q;
@@ -4440,74 +4305,74 @@ hcf_16 run_time_flag = ifbp->IFB_AssertLvl;
OPW( HREG_SW_2, (hcf_16)(q >> 16 ) );
#endif // HCF_ASSERT_SW_SUP
-#if (HCF_EXT) & HCF_EXT_MB && (HCF_ASSERT) & HCF_ASSERT_MB
- ifbp->IFB_AssertLvl = 0; // prevent recursive behavior
+#if (HCF_ASSERT) & HCF_ASSERT_MB
+ ifbp->IFB_AssertLvl = 0; // prevent recursive behavior
hcf_put_info( ifbp, (LTVP)&ifbp->IFB_AssertStrct );
- ifbp->IFB_AssertLvl = run_time_flag; // restore appropriate filter level
-#endif // HCF_EXT_MB / HCF_ASSERT_MB
+ ifbp->IFB_AssertLvl = run_time_flag; // restore appropriate filter level
+#endif // HCF_ASSERT_MB
}
} // mdd_assert
#endif // HCF_ASSERT
/************************************************************************************************************
-*
-*.SUBMODULE void put_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) )
-*.PURPOSE writes with 16/32 bit I/O via BAP1 port from Host memory to NIC RAM.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* bufp (byte) address of buffer
-* len length in bytes of buffer specified by bufp
-* word_len Big Endian only: number of leading bytes to swap in pairs
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* process the single byte (if applicable) not yet written by the previous put_frag and copy len
-* (or len-1) bytes from bufp to NIC.
-*
-*
-*.DIAGRAM
-*
-*.NOTICE
-* It turns out DOS ODI uses zero length fragments. The HCF code can cope with it, but as a consequence, no
-* Assert on len is possible
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void put_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) )
+ *.PURPOSE writes with 16/32 bit I/O via BAP1 port from Host memory to NIC RAM.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * bufp (byte) address of buffer
+ * len length in bytes of buffer specified by bufp
+ * word_len Big Endian only: number of leading bytes to swap in pairs
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * process the single byte (if applicable) not yet written by the previous put_frag and copy len
+ * (or len-1) bytes from bufp to NIC.
+ *
+ *
+ *.DIAGRAM
+ *
+ *.NOTICE
+ * It turns out DOS ODI uses zero length fragments. The HCF code can cope with it, but as a consequence, no
+ * Assert on len is possible
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC void
put_frag( IFBP ifbp, wci_bufp bufp, int len BE_PAR( int word_len ) )
{
-hcf_io io_port = ifbp->IFB_IOBase + HREG_DATA_1; //BAP data register
-int i; //prevent side effects from macro
-hcf_16 j;
- HCFASSERT( ((hcf_32)bufp & (HCF_ALIGN-1) ) == 0, (hcf_32)bufp )
+ hcf_io io_port = ifbp->IFB_IOBase + HREG_DATA_1; //BAP data register
+ int i; //prevent side effects from macro
+ hcf_16 j;
+ HCFASSERT( ((hcf_32)bufp & (HCF_ALIGN-1) ) == 0, (hcf_32)bufp );
#if HCF_BIG_ENDIAN
- HCFASSERT( word_len == 0 || word_len == 2 || word_len == 4, word_len )
- HCFASSERT( word_len == 0 || ((hcf_32)bufp & 1 ) == 0, (hcf_32)bufp )
- HCFASSERT( word_len <= len, MERGE_2( word_len, len ) )
+ HCFASSERT( word_len == 0 || word_len == 2 || word_len == 4, word_len );
+ HCFASSERT( word_len == 0 || ((hcf_32)bufp & 1 ) == 0, (hcf_32)bufp );
+ HCFASSERT( word_len <= len, MERGE_2( word_len, len ) );
- if ( word_len ) { //if there is anything to convert
- //. convert and write the 1st hcf_16
+ if ( word_len ) { //if there is anything to convert
+ //. convert and write the 1st hcf_16
j = bufp[1] | bufp[0]<<8;
OUT_PORT_WORD( io_port, j );
- //. update pointer and counter accordingly
+ //. update pointer and counter accordingly
len -= 2;
bufp += 2;
- if ( word_len > 1 ) { //. if there is to convert more than 1 word ( i.e 2 )
- //. . convert and write the 2nd hcf_16
- j = bufp[1] | bufp[0]<<8; /*bufp is already incremented by 2*/
+ if ( word_len > 1 ) { //. if there is to convert more than 1 word ( i.e 2 )
+ //. . convert and write the 2nd hcf_16
+ j = bufp[1] | bufp[0]<<8; /*bufp is already incremented by 2*/
OUT_PORT_WORD( io_port, j );
- //. . update pointer and counter accordingly
+ //. . update pointer and counter accordingly
len -= 2;
bufp += 2;
}
}
#endif // HCF_BIG_ENDIAN
i = len;
- if ( i && ifbp->IFB_CarryOut ) { //skip zero-length
+ if ( i && ifbp->IFB_CarryOut ) { //skip zero-length
j = ((*bufp)<<8) + ( ifbp->IFB_CarryOut & 0xFF );
OUT_PORT_WORD( io_port, j );
bufp++; i--;
@@ -4515,35 +4380,35 @@ hcf_16 j;
}
#if (HCF_IO) & HCF_IO_32BITS
//skip zero-length I/O, single byte I/O and I/O not worthwhile (i.e. less than 6 bytes)for DW logic
- //if buffer length >= 6 and 32 bits I/O support
+ //if buffer length >= 6 and 32 bits I/O support
if ( !(ifbp->IFB_CntlOpt & USE_16BIT) && i >= 6 ) {
-hcf_32 FAR *p4; //prevent side effects from macro
- if ( ( (hcf_32)bufp & 0x1 ) == 0 ) { //. if buffer at least word aligned
- if ( (hcf_32)bufp & 0x2 ) { //. . if buffer not double word aligned
- //. . . write a single word to get double word aligned
- j = *(wci_recordp)bufp; //just to help ease writing macros with embedded assembly
+ hcf_32 FAR *p4; //prevent side effects from macro
+ if ( ( (hcf_32)bufp & 0x1 ) == 0 ) { //. if buffer at least word aligned
+ if ( (hcf_32)bufp & 0x2 ) { //. . if buffer not double word aligned
+ //. . . write a single word to get double word aligned
+ j = *(wci_recordp)bufp; //just to help ease writing macros with embedded assembly
OUT_PORT_WORD( io_port, j );
- //. . . adjust buffer length and pointer accordingly
+ //. . . adjust buffer length and pointer accordingly
bufp += 2; i -= 2;
}
- //. . write as many double word as possible
+ //. . write as many double word as possible
p4 = (hcf_32 FAR *)bufp;
j = (hcf_16)i/4;
OUT_PORT_STRING_32( io_port, p4, j );
- //. . adjust buffer length and pointer accordingly
+ //. . adjust buffer length and pointer accordingly
bufp += i & ~0x0003;
i &= 0x0003;
}
}
#endif // HCF_IO_32BITS
- //if no 32-bit support OR byte aligned OR 1 word left
+ //if no 32-bit support OR byte aligned OR 1 word left
if ( i ) {
- //. if odd number of bytes left
+ //. if odd number of bytes left
if ( i & 0x0001 ) {
- //. . save left over byte (before bufp is corrupted) in carry, set carry flag
- ifbp->IFB_CarryOut = (hcf_16)bufp[i-1] | 0x0100; //note that i and bufp are always simultaneously modified, &bufp[i-1] is invariant
+ //. . save left over byte (before bufp is corrupted) in carry, set carry flag
+ ifbp->IFB_CarryOut = (hcf_16)bufp[i-1] | 0x0100; //note that i and bufp are always simultaneously modified, &bufp[i-1] is invariant
}
- //. write as many word as possible in "alignment safe" way
+ //. write as many word as possible in "alignment safe" way
j = (hcf_16)i/2;
OUT_PORT_STRING_8_16( io_port, bufp, j );
}
@@ -4551,117 +4416,117 @@ hcf_32 FAR *p4; //prevent side effects from macro
/************************************************************************************************************
-*
-*.SUBMODULE void put_frag_finalize( IFBP ifbp )
-*.PURPOSE cleanup after put_frag for trailing odd byte and MIC transfer to NIC.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-*
-*.RETURNS N.A.
-*
-*.DESCRIPTION
-* finalize the MIC calculation with the padding pattern, output the last byte (if applicable)
-* of the message and the MIC to the TxFS
-*
-*
-*.DIAGRAM
-*2: 1 byte of the last put_frag may be still in IFB_CarryOut ( the put_frag carry holder ), so ........
-* 1 - 3 bytes of the last put_frag may be still in IFB_tx_32 ( the MIC engine carry holder ), so ........
-* The call to the MIC calculation routine feeds these remaining bytes (if any) of put_frag and the
-* just as many bytes of the padding as needed to the MIC calculation engine. Note that the "unneeded" pad
-* bytes simply end up in the MIC engine carry holder and are never used.
-*8: write the remainder of the MIC and possible some garbage to NIC RAM
-* Note: i is always 4 (a loop-invariant of the while in point 2)
-*
-*.NOTICE
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE void put_frag_finalize( IFBP ifbp )
+ *.PURPOSE cleanup after put_frag for trailing odd byte and MIC transfer to NIC.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ *
+ *.RETURNS N.A.
+ *
+ *.DESCRIPTION
+ * finalize the MIC calculation with the padding pattern, output the last byte (if applicable)
+ * of the message and the MIC to the TxFS
+ *
+ *
+ *.DIAGRAM
+ *2: 1 byte of the last put_frag may be still in IFB_CarryOut ( the put_frag carry holder ), so ........
+ * 1 - 3 bytes of the last put_frag may be still in IFB_tx_32 ( the MIC engine carry holder ), so ........
+ * The call to the MIC calculation routine feeds these remaining bytes (if any) of put_frag and the
+ * just as many bytes of the padding as needed to the MIC calculation engine. Note that the "unneeded" pad
+ * bytes simply end up in the MIC engine carry holder and are never used.
+ *8: write the remainder of the MIC and possible some garbage to NIC RAM
+ * Note: i is always 4 (a loop-invariant of the while in point 2)
+ *
+ *.NOTICE
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC void
put_frag_finalize( IFBP ifbp )
{
#if (HCF_TYPE) & HCF_TYPE_WPA
- if ( ifbp->IFB_MICTxCarry != 0xFFFF) { //if MIC calculation active
- CALC_TX_MIC( mic_pad, 8); //. feed (up to 8 bytes of) virtual padding to MIC engine
- //. write (possibly) trailing byte + (most of) MIC
+ if ( ifbp->IFB_MICTxCarry != 0xFFFF) { //if MIC calculation active
+ CALC_TX_MIC( mic_pad, 8); //. feed (up to 8 bytes of) virtual padding to MIC engine
+ //. write (possibly) trailing byte + (most of) MIC
put_frag( ifbp, (wci_bufp)ifbp->IFB_MICTx, 8 BE_PAR(0) );
}
#endif // HCF_TYPE_WPA
- put_frag( ifbp, null_addr, 1 BE_PAR(0) ); //write (possibly) trailing data or MIC byte
+ put_frag( ifbp, null_addr, 1 BE_PAR(0) ); //write (possibly) trailing data or MIC byte
} // put_frag_finalize
/************************************************************************************************************
-*
-*.SUBMODULE int put_info( IFBP ifbp, LTVP ltvp )
-*.PURPOSE support routine to handle the "basic" task of hcf_put_info to pass RIDs to the NIC.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* ltvp address in NIC RAM where LVT-records are located
-*
-*.RETURNS
-* HCF_SUCCESS
-* >>put_frag
-* >>cmd_wait
-*
-*.DESCRIPTION
-*
-*
-*.DIAGRAM
-*20: do not write RIDs to NICs which have incompatible Firmware
-*24: If the RID does not exist, the L-field is set to zero.
-* Note that some RIDs can not be read, e.g. the pseudo RIDs for direct Hermes commands and CFG_DEFAULT_KEYS
-*28: If the RID is written successful, pass it to the NIC by means of an Access Write command
-*
-*.NOTICE
-* The mechanism to HCF_ASSERT on invalid typ-codes in the LTV record is based on the following strategy:
-* - some codes (e.g. CFG_REG_MB) are explicitly handled by the HCF which implies that these codes
-* are valid. These codes are already consumed by hcf_put_info.
-* - all other codes are passed to the Hermes. Before the put action is executed, hcf_get_info is called
-* with an LTV record with a value of 1 in the L-field and the intended put action type in the Typ-code
-* field. If the put action type is valid, it is also valid as a get action type code - except
-* for CFG_DEFAULT_KEYS and CFG_ADD_TKIP_DEFAULT_KEY - so the HCF_ASSERT logic of hcf_get_info should
-* not catch.
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int put_info( IFBP ifbp, LTVP ltvp )
+ *.PURPOSE support routine to handle the "basic" task of hcf_put_info to pass RIDs to the NIC.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * ltvp address in NIC RAM where LVT-records are located
+ *
+ *.RETURNS
+ * HCF_SUCCESS
+ * >>put_frag
+ * >>cmd_wait
+ *
+ *.DESCRIPTION
+ *
+ *
+ *.DIAGRAM
+ *20: do not write RIDs to NICs which have incompatible Firmware
+ *24: If the RID does not exist, the L-field is set to zero.
+ * Note that some RIDs can not be read, e.g. the pseudo RIDs for direct Hermes commands and CFG_DEFAULT_KEYS
+ *28: If the RID is written successful, pass it to the NIC by means of an Access Write command
+ *
+ *.NOTICE
+ * The mechanism to HCF_ASSERT on invalid typ-codes in the LTV record is based on the following strategy:
+ * - some codes (e.g. CFG_REG_MB) are explicitly handled by the HCF which implies that these codes
+ * are valid. These codes are already consumed by hcf_put_info.
+ * - all other codes are passed to the Hermes. Before the put action is executed, hcf_get_info is called
+ * with an LTV record with a value of 1 in the L-field and the intended put action type in the Typ-code
+ * field. If the put action type is valid, it is also valid as a get action type code - except
+ * for CFG_DEFAULT_KEYS and CFG_ADD_TKIP_DEFAULT_KEY - so the HCF_ASSERT logic of hcf_get_info should
+ * not catch.
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
-put_info( IFBP ifbp, LTVP ltvp )
+put_info( IFBP ifbp, LTVP ltvp )
{
-int rc = HCF_SUCCESS;
+ int rc = HCF_SUCCESS;
- HCFASSERT( ifbp->IFB_CardStat == 0, MERGE_2( ltvp->typ, ifbp->IFB_CardStat ) )
- HCFASSERT( CFG_RID_CFG_MIN <= ltvp->typ && ltvp->typ <= CFG_RID_CFG_MAX, ltvp->typ )
+ HCFASSERT( ifbp->IFB_CardStat == 0, MERGE_2( ltvp->typ, ifbp->IFB_CardStat ) );
+ HCFASSERT( CFG_RID_CFG_MIN <= ltvp->typ && ltvp->typ <= CFG_RID_CFG_MAX, ltvp->typ );
- if ( ifbp->IFB_CardStat == 0 && /* 20*/
- ( ( CFG_RID_CFG_MIN <= ltvp->typ && ltvp->typ <= CFG_RID_CFG_MAX ) ||
- ( CFG_RID_ENG_MIN <= ltvp->typ /* && ltvp->typ <= 0xFFFF */ ) ) ) {
+ if ( ifbp->IFB_CardStat == 0 && /* 20*/
+ ( ( CFG_RID_CFG_MIN <= ltvp->typ && ltvp->typ <= CFG_RID_CFG_MAX ) ||
+ ( CFG_RID_ENG_MIN <= ltvp->typ /* && ltvp->typ <= 0xFFFF */ ) ) ) {
#if HCF_ASSERT //FCC8, FCB0, FCB4, FCB6, FCB7, FCB8, FCC0, FCC4, FCBC, FCBD, FCBE, FCBF
- {
- hcf_16 t = ltvp->typ;
- LTV_STRCT x = { 2, t, {0} }; /*24*/
- hcf_get_info( ifbp, (LTVP)&x );
- if ( x.len == 0 &&
- ( t != CFG_DEFAULT_KEYS && t != CFG_ADD_TKIP_DEFAULT_KEY && t != CFG_REMOVE_TKIP_DEFAULT_KEY &&
- t != CFG_ADD_TKIP_MAPPED_KEY && t != CFG_REMOVE_TKIP_MAPPED_KEY &&
- t != CFG_HANDOVER_ADDR && t != CFG_DISASSOCIATE_ADDR &&
- t != CFG_FCBC && t != CFG_FCBD && t != CFG_FCBE && t != CFG_FCBF &&
- t != CFG_DEAUTHENTICATE_ADDR
- )
- ) {
- HCFASSERT( DO_ASSERT, ltvp->typ )
+ {
+ hcf_16 t = ltvp->typ;
+ LTV_STRCT x = { 2, t, {0} }; /*24*/
+ hcf_get_info( ifbp, (LTVP)&x );
+ if ( x.len == 0 &&
+ ( t != CFG_DEFAULT_KEYS && t != CFG_ADD_TKIP_DEFAULT_KEY && t != CFG_REMOVE_TKIP_DEFAULT_KEY &&
+ t != CFG_ADD_TKIP_MAPPED_KEY && t != CFG_REMOVE_TKIP_MAPPED_KEY &&
+ t != CFG_HANDOVER_ADDR && t != CFG_DISASSOCIATE_ADDR &&
+ t != CFG_FCBC && t != CFG_FCBD && t != CFG_FCBE && t != CFG_FCBF &&
+ t != CFG_DEAUTHENTICATE_ADDR
+ )
+ ) {
+ HCFASSERT( DO_ASSERT, ltvp->typ );
+ }
}
- }
#endif // HCF_ASSERT
rc = setup_bap( ifbp, ltvp->typ, 0, IO_OUT );
put_frag( ifbp, (wci_bufp)ltvp, 2*ltvp->len + 2 BE_PAR(2) );
-/*28*/ if ( rc == HCF_SUCCESS ) {
+ /*28*/ if ( rc == HCF_SUCCESS ) {
rc = cmd_exe( ifbp, HCMD_ACCESS + HCMD_ACCESS_WRITE, ltvp->typ );
}
}
@@ -4669,214 +4534,209 @@ int rc = HCF_SUCCESS;
} // put_info
-#if (HCF_DL_ONLY) == 0
/************************************************************************************************************
-*
-*.SUBMODULE int put_info_mb( IFBP ifbp, CFG_MB_INFO_STRCT FAR * ltvp )
-*.PURPOSE accumulates a ( series of) buffers into a single Info block into the MailBox.
-*
-*.ARGUMENTS
-* ifbp address of the Interface Block
-* ltvp address of structure specifying the "type" and the fragments of the information to be synthesized
-* as an LTV into the MailBox
-*
-*.RETURNS
-*
-*.DESCRIPTION
-* If the data does not fit (including no MailBox is available), the IFB_MBTally is incremented and an
-* error status is returned.
-* HCF_ASSERT does not catch.
-* Calling put_info_mb when their is no MailBox available, is considered a design error in the MSF.
-*
-* Note that there is always at least 1 word of unused space in the mail box.
-* As a consequence:
-* - no problem in pointer arithmetic (MB_RP == MB_WP means unambiguously mail box is completely empty
-* - There is always free space to write an L field with a value of zero after each MB_Info block. This
-* allows for an easy scan mechanism in the "get MB_Info block" logic.
-*
-*
-*.DIAGRAM
-*1: Calculate L field of the MBIB, i.e. 1 for the T-field + the cumulative length of the fragments.
-*2: The free space in the MailBox is calculated (2a: free part from Write Ptr to Read Ptr, 2b: free part
-* turns out to wrap around) . If this space suffices to store the number of words reflected by len (T-field
-* + Value-field) plus the additional MailBox Info L-field + a trailing 0 to act as the L-field of a trailing
-* dummy or empty LTV record, then a MailBox Info block is build in the MailBox consisting of
-* - the value len in the first word
-* - type in the second word
-* - a copy of the contents of the fragments in the second and higher word
-*
-*4: Since put_info_mb() can more or less directly be called from the MSF level, the I/F must be robust
-* against out-of-range variables. As failsafe coding, the MB update is skipped by changing tlen to 0 if
-* len == 0; This will indirectly cause an assert as result of the violation of the next if clause.
-*6: Check whether the free space in MailBox suffices (this covers the complete absence of the MailBox).
-* Note that len is unsigned, so even MSF I/F violation works out O.K.
-* The '2' in the expression "len+2" is used because 1 word is needed for L itself and 1 word is needed
-* for the zero-sentinel
-*8: update MailBox Info length report to MSF with "oldest" MB Info Block size. Be careful here, if you get
-* here before the MailBox is registered, you can't read from the buffer addressed by IFB_MBp (it is the
-* Null buffer) so don't move this code till the end of this routine but keep it where there is garuanteed
-* a buffer.
-*
-*.NOTICE
-* boundary testing depends on the fact that IFB_MBSize is guaranteed to be zero if no MailBox is present,
-* and to a lesser degree, that IFB_MBWp = IFB_MBRp = 0
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
-#if (HCF_EXT) & HCF_EXT_MB
+ *
+ *.SUBMODULE int put_info_mb( IFBP ifbp, CFG_MB_INFO_STRCT FAR * ltvp )
+ *.PURPOSE accumulates a ( series of) buffers into a single Info block into the MailBox.
+ *
+ *.ARGUMENTS
+ * ifbp address of the Interface Block
+ * ltvp address of structure specifying the "type" and the fragments of the information to be synthesized
+ * as an LTV into the MailBox
+ *
+ *.RETURNS
+ *
+ *.DESCRIPTION
+ * If the data does not fit (including no MailBox is available), the IFB_MBTally is incremented and an
+ * error status is returned.
+ * HCF_ASSERT does not catch.
+ * Calling put_info_mb when their is no MailBox available, is considered a design error in the MSF.
+ *
+ * Note that there is always at least 1 word of unused space in the mail box.
+ * As a consequence:
+ * - no problem in pointer arithmetic (MB_RP == MB_WP means unambiguously mail box is completely empty
+ * - There is always free space to write an L field with a value of zero after each MB_Info block. This
+ * allows for an easy scan mechanism in the "get MB_Info block" logic.
+ *
+ *
+ *.DIAGRAM
+ *1: Calculate L field of the MBIB, i.e. 1 for the T-field + the cumulative length of the fragments.
+ *2: The free space in the MailBox is calculated (2a: free part from Write Ptr to Read Ptr, 2b: free part
+ * turns out to wrap around) . If this space suffices to store the number of words reflected by len (T-field
+ * + Value-field) plus the additional MailBox Info L-field + a trailing 0 to act as the L-field of a trailing
+ * dummy or empty LTV record, then a MailBox Info block is build in the MailBox consisting of
+ * - the value len in the first word
+ * - type in the second word
+ * - a copy of the contents of the fragments in the second and higher word
+ *
+ *4: Since put_info_mb() can more or less directly be called from the MSF level, the I/F must be robust
+ * against out-of-range variables. As failsafe coding, the MB update is skipped by changing tlen to 0 if
+ * len == 0; This will indirectly cause an assert as result of the violation of the next if clause.
+ *6: Check whether the free space in MailBox suffices (this covers the complete absence of the MailBox).
+ * Note that len is unsigned, so even MSF I/F violation works out O.K.
+ * The '2' in the expression "len+2" is used because 1 word is needed for L itself and 1 word is needed
+ * for the zero-sentinel
+ *8: update MailBox Info length report to MSF with "oldest" MB Info Block size. Be careful here, if you get
+ * here before the MailBox is registered, you can't read from the buffer addressed by IFB_MBp (it is the
+ * Null buffer) so don't move this code till the end of this routine but keep it where there is garuanteed
+ * a buffer.
+ *
+ *.NOTICE
+ * boundary testing depends on the fact that IFB_MBSize is guaranteed to be zero if no MailBox is present,
+ * and to a lesser degree, that IFB_MBWp = IFB_MBRp = 0
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
put_info_mb( IFBP ifbp, CFG_MB_INFO_STRCT FAR * ltvp )
{
-int rc = HCF_SUCCESS;
-hcf_16 i; //work counter
-hcf_16 *dp; //destination pointer (in MailBox)
-wci_recordp sp; //source pointer
-hcf_16 len; //total length to copy to MailBox
-hcf_16 tlen; //free length/working length/offset in WMP frame
+ int rc = HCF_SUCCESS;
+ hcf_16 i; //work counter
+ hcf_16 *dp; //destination pointer (in MailBox)
+ wci_recordp sp; //source pointer
+ hcf_16 len; //total length to copy to MailBox
+ hcf_16 tlen; //free length/working length/offset in WMP frame
if ( ifbp->IFB_MBp == NULL ) return rc; //;?not sufficient
- HCFASSERT( ifbp->IFB_MBp != NULL, 0 ) //!!!be careful, don't get into an endless recursion
- HCFASSERT( ifbp->IFB_MBSize, 0 )
+ HCFASSERT( ifbp->IFB_MBp != NULL, 0 ); //!!!be careful, don't get into an endless recursion
+ HCFASSERT( ifbp->IFB_MBSize, 0 );
- len = 1; /* 1 */
+ len = 1; /* 1 */
for ( i = 0; i < ltvp->frag_cnt; i++ ) {
len += ltvp->frag_buf[i].frag_len;
}
if ( ifbp->IFB_MBRp > ifbp->IFB_MBWp ) {
- tlen = ifbp->IFB_MBRp - ifbp->IFB_MBWp; /* 2a*/
+ tlen = ifbp->IFB_MBRp - ifbp->IFB_MBWp; /* 2a*/
} else {
if ( ifbp->IFB_MBRp == ifbp->IFB_MBWp ) {
- ifbp->IFB_MBRp = ifbp->IFB_MBWp = 0; // optimize Wrapping
+ ifbp->IFB_MBRp = ifbp->IFB_MBWp = 0; // optimize Wrapping
}
- tlen = ifbp->IFB_MBSize - ifbp->IFB_MBWp; /* 2b*/
- if ( ( tlen <= len + 2 ) && ( len + 2 < ifbp->IFB_MBRp ) ) { //if trailing space is too small but
- // leading space is sufficiently large
- ifbp->IFB_MBp[ifbp->IFB_MBWp] = 0xFFFF; //flag dummy LTV to fill the trailing space
- ifbp->IFB_MBWp = 0; //reset WritePointer to begin of MailBox
- tlen = ifbp->IFB_MBRp; //get new available space size
+ tlen = ifbp->IFB_MBSize - ifbp->IFB_MBWp; /* 2b*/
+ if ( ( tlen <= len + 2 ) && ( len + 2 < ifbp->IFB_MBRp ) ) { //if trailing space is too small but
+ // leading space is sufficiently large
+ ifbp->IFB_MBp[ifbp->IFB_MBWp] = 0xFFFF; //flag dummy LTV to fill the trailing space
+ ifbp->IFB_MBWp = 0; //reset WritePointer to begin of MailBox
+ tlen = ifbp->IFB_MBRp; //get new available space size
}
}
dp = &ifbp->IFB_MBp[ifbp->IFB_MBWp];
if ( len == 0 ) {
tlen = 0; //;? what is this good for
}
- if ( len + 2 >= tlen ){ /* 6 */
+ if ( len + 2 >= tlen ){ /* 6 */
//Do Not ASSERT, this is a normal condition
- IF_TALLY( ifbp->IFB_HCF_Tallies.NoBufMB++; ) /*NOP to cover against analomies with empty compound*/;
+ IF_TALLY( ifbp->IFB_HCF_Tallies.NoBufMB++ );
rc = HCF_ERR_LEN;
} else {
- *dp++ = len; //write Len (= size of T+V in words to MB_Info block
- *dp++ = ltvp->base_typ; //write Type to MB_Info block
- ifbp->IFB_MBWp += len + 1; //update WritePointer of MailBox
- for ( i = 0; i < ltvp->frag_cnt; i++ ) { // process each of the fragments
+ *dp++ = len; //write Len (= size of T+V in words to MB_Info block
+ *dp++ = ltvp->base_typ; //write Type to MB_Info block
+ ifbp->IFB_MBWp += len + 1; //update WritePointer of MailBox
+ for ( i = 0; i < ltvp->frag_cnt; i++ ) { // process each of the fragments
sp = ltvp->frag_buf[i].frag_addr;
len = ltvp->frag_buf[i].frag_len;
while ( len-- ) *dp++ = *sp++;
}
- ifbp->IFB_MBp[ifbp->IFB_MBWp] = 0; //to assure get_info for CFG_MB_INFO stops
- ifbp->IFB_MBInfoLen = ifbp->IFB_MBp[ifbp->IFB_MBRp]; /* 8 */
+ ifbp->IFB_MBp[ifbp->IFB_MBWp] = 0; //to assure get_info for CFG_MB_INFO stops
+ ifbp->IFB_MBInfoLen = ifbp->IFB_MBp[ifbp->IFB_MBRp]; /* 8 */
}
return rc;
} // put_info_mb
-#endif // HCF_EXT_MB
-#endif // HCF_DL_ONLY
-
/************************************************************************************************************
-*
-*.SUBMODULE int setup_bap( IFBP ifbp, hcf_16 fid, int offset, int type )
-*.PURPOSE set up data access to NIC RAM via BAP_1.
-*
-*.ARGUMENTS
-* ifbp address of I/F Block
-* fid FID/RID
-* offset !!even!! offset in FID/RID
-* type IO_IN, IO_OUT
-*
-*.RETURNS
-* HCF_SUCCESS O.K
-* HCF_ERR_NO_NIC card is removed
-* HCF_ERR_DEFUNCT_TIME_OUT Fatal malfunction detected
-* HCF_ERR_DEFUNCT_..... if and only if IFB_DefunctStat <> 0
-*
-*.DESCRIPTION
-*
-* A non-zero return status indicates:
-* - the NIC is considered nonoperational, e.g. due to a time-out of some Hermes activity in the past
-* - BAP_1 could not properly be initialized
-* - the card is removed before completion of the data transfer
-* In all other cases, a zero is returned.
-* BAP Initialization failure indicates an H/W error which is very likely to signal complete H/W failure.
-* Once a BAP Initialization failure has occurred all subsequent interactions with the Hermes will return a
-* "defunct" status till the Hermes is re-initialized by means of an hcf_connect.
-*
-* A BAP is a set of registers (Offset, Select and Data) offering read/write access to a particular FID or
-* RID. This access is based on a auto-increment feature.
-* There are two BAPs but these days the HCF uses only BAP_1 and leaves BAP_0 to the PCI Busmastering H/W.
-*
-* The BAP-mechanism is based on the Busy bit in the Offset register (see the Hermes definition). The waiting
-* for Busy must occur between writing the Offset register and accessing the Data register. The
-* implementation to wait for the Busy bit drop after each write to the Offset register, implies that the
-* requirement that the Busy bit is low before the Select register is written, is automatically met.
-* BAP-setup may be time consuming (e.g. 380 usec for large offsets occurs frequently). The wait for Busy bit
-* drop is protected by a loop counter, which is initialized with IFB_TickIni, which is calibrated in init.
-*
-* The NIC I/F is optimized for word transfer and can only handle word transfer at a word boundary in NIC
-* RAM. The intended solution for transfer of a single byte has multiple H/W flaws. There have been different
-* S/W Workaround strategies. RID access is hcf_16 based by "nature", so no byte access problems. For Tx/Rx
-* FID access, the byte logic became obsolete by absorbing it in the double word oriented nature of the MIC
-* feature.
-*
-*
-*.DIAGRAM
-*
-*2: the test on rc checks whether the HCF went into "defunct" mode ( e.g. BAP initialization or a call to
-* cmd_wait did ever fail).
-*4: the select register and offset register are set
-* the offset register is monitored till a successful condition (no busy bit) is detected or till the
-* (calibrated) protection counter expires
-* If the counter expires, this is reflected in IFB_DefunctStat, so all subsequent calls to setup_bap fail
-* immediately ( see 2)
-*6: initialization of the carry as used by pet/get_frag
-*8: HREG_OFFSET_ERR is ignored as error because:
-* a: the Hermes is robust against it
-* b: it is not known what causes it (probably a bug), hence no strategy can be specified which level is
-* to handle this error in which way. In the past, it could be induced by the MSF level, e.g. by calling
-* hcf_rcv_msg while there was no Rx-FID available. Since this is an MSF-error which is caught by ASSERT,
-* there is no run-time action required by the HCF.
-* Lumping the Offset error in with the Busy bit error, as has been done in the past turns out to be a
-* disaster or a life saver, just depending on what the cause of the error is. Since no prediction can be
-* done about the future, it is "felt" to be the best strategy to ignore this error. One day the code was
-* accompanied by the following comment:
-* // ignore HREG_OFFSET_ERR, someone, supposedly the MSF programmer ;) made a bug. Since we don't know
-* // what is going on, we might as well go on - under management pressure - by ignoring it
-*
-*.ENDDOC END DOCUMENTATION
-*
-************************************************************************************************************/
+ *
+ *.SUBMODULE int setup_bap( IFBP ifbp, hcf_16 fid, int offset, int type )
+ *.PURPOSE set up data access to NIC RAM via BAP_1.
+ *
+ *.ARGUMENTS
+ * ifbp address of I/F Block
+ * fid FID/RID
+ * offset !!even!! offset in FID/RID
+ * type IO_IN, IO_OUT
+ *
+ *.RETURNS
+ * HCF_SUCCESS O.K
+ * HCF_ERR_NO_NIC card is removed
+ * HCF_ERR_DEFUNCT_TIME_OUT Fatal malfunction detected
+ * HCF_ERR_DEFUNCT_..... if and only if IFB_DefunctStat <> 0
+ *
+ *.DESCRIPTION
+ *
+ * A non-zero return status indicates:
+ * - the NIC is considered nonoperational, e.g. due to a time-out of some Hermes activity in the past
+ * - BAP_1 could not properly be initialized
+ * - the card is removed before completion of the data transfer
+ * In all other cases, a zero is returned.
+ * BAP Initialization failure indicates an H/W error which is very likely to signal complete H/W failure.
+ * Once a BAP Initialization failure has occurred all subsequent interactions with the Hermes will return a
+ * "defunct" status till the Hermes is re-initialized by means of an hcf_connect.
+ *
+ * A BAP is a set of registers (Offset, Select and Data) offering read/write access to a particular FID or
+ * RID. This access is based on a auto-increment feature.
+ * There are two BAPs but these days the HCF uses only BAP_1 and leaves BAP_0 to the PCI Busmastering H/W.
+ *
+ * The BAP-mechanism is based on the Busy bit in the Offset register (see the Hermes definition). The waiting
+ * for Busy must occur between writing the Offset register and accessing the Data register. The
+ * implementation to wait for the Busy bit drop after each write to the Offset register, implies that the
+ * requirement that the Busy bit is low before the Select register is written, is automatically met.
+ * BAP-setup may be time consuming (e.g. 380 usec for large offsets occurs frequently). The wait for Busy bit
+ * drop is protected by a loop counter, which is initialized with IFB_TickIni, which is calibrated in init.
+ *
+ * The NIC I/F is optimized for word transfer and can only handle word transfer at a word boundary in NIC
+ * RAM. The intended solution for transfer of a single byte has multiple H/W flaws. There have been different
+ * S/W Workaround strategies. RID access is hcf_16 based by "nature", so no byte access problems. For Tx/Rx
+ * FID access, the byte logic became obsolete by absorbing it in the double word oriented nature of the MIC
+ * feature.
+ *
+ *
+ *.DIAGRAM
+ *
+ *2: the test on rc checks whether the HCF went into "defunct" mode ( e.g. BAP initialization or a call to
+ * cmd_wait did ever fail).
+ *4: the select register and offset register are set
+ * the offset register is monitored till a successful condition (no busy bit) is detected or till the
+ * (calibrated) protection counter expires
+ * If the counter expires, this is reflected in IFB_DefunctStat, so all subsequent calls to setup_bap fail
+ * immediately ( see 2)
+ *6: initialization of the carry as used by pet/get_frag
+ *8: HREG_OFFSET_ERR is ignored as error because:
+ * a: the Hermes is robust against it
+ * b: it is not known what causes it (probably a bug), hence no strategy can be specified which level is
+ * to handle this error in which way. In the past, it could be induced by the MSF level, e.g. by calling
+ * hcf_rcv_msg while there was no Rx-FID available. Since this is an MSF-error which is caught by ASSERT,
+ * there is no run-time action required by the HCF.
+ * Lumping the Offset error in with the Busy bit error, as has been done in the past turns out to be a
+ * disaster or a life saver, just depending on what the cause of the error is. Since no prediction can be
+ * done about the future, it is "felt" to be the best strategy to ignore this error. One day the code was
+ * accompanied by the following comment:
+ * // ignore HREG_OFFSET_ERR, someone, supposedly the MSF programmer ;) made a bug. Since we don't know
+ * // what is going on, we might as well go on - under management pressure - by ignoring it
+ *
+ *.ENDDOC END DOCUMENTATION
+ *
+ ************************************************************************************************************/
HCF_STATIC int
setup_bap( IFBP ifbp, hcf_16 fid, int offset, int type )
{
-PROT_CNT_INI
-int rc;
+ PROT_CNT_INI;
+ int rc;
HCFTRACE( ifbp, HCF_TRACE_STRIO );
rc = ifbp->IFB_DefunctStat;
- if (rc == HCF_SUCCESS) { /*2*/
- OPW( HREG_SELECT_1, fid ); /*4*/
+ if (rc == HCF_SUCCESS) { /*2*/
+ OPW( HREG_SELECT_1, fid ); /*4*/
OPW( HREG_OFFSET_1, offset );
if ( type == IO_IN ) {
ifbp->IFB_CarryIn = 0;
}
else ifbp->IFB_CarryOut = 0;
HCF_WAIT_WHILE( IPW( HREG_OFFSET_1) & HCMD_BUSY );
- HCFASSERT( !( IPW( HREG_OFFSET_1) & HREG_OFFSET_ERR ), MERGE_2( fid, offset ) ) /*8*/
+ HCFASSERT( !( IPW( HREG_OFFSET_1) & HREG_OFFSET_ERR ), MERGE_2( fid, offset ) ); /*8*/
if ( prot_cnt == 0 ) {
- HCFASSERT( DO_ASSERT, MERGE_2( fid, offset ) )
+ HCFASSERT( DO_ASSERT, MERGE_2( fid, offset ) );
rc = ifbp->IFB_DefunctStat = HCF_ERR_DEFUNCT_TIME_OUT;
ifbp->IFB_CardStat |= CARD_STAT_DEFUNCT;
}
diff --git a/drivers/staging/wlags49_h2/hcf.h b/drivers/staging/wlags49_h2/hcf.h
index 2cd573944cd..00099473116 100644
--- a/drivers/staging/wlags49_h2/hcf.h
+++ b/drivers/staging/wlags49_h2/hcf.h
@@ -301,13 +301,11 @@ typedef struct {
#if (HCF_EXT) & HCF_EXT_INT_TICK
int IFB_TickCnt; // Hermes Timer Tick Counter
#endif // HCF_EXT_INT_TICK
-#if (HCF_EXT) & HCF_EXT_MB
hcf_16 *IFB_MBp; // pointer to the MailBox
hcf_16 IFB_MBSize; // size of the MailBox
hcf_16 IFB_MBWp; // zero-based write index into the MailBox
hcf_16 IFB_MBRp; // zero-based read index into the MailBox
hcf_16 IFB_MBInfoLen; // contents of L-field of the oldest available MailBoxInfoBlock
-#endif // HCF_EXT_MB
#if (HCF_TYPE) & HCF_TYPE_WPA
hcf_16 IFB_MICTxCntl; // MIC bit and Key index in TxControl field of TxFS
hcf_32 IFB_MICTxKey[2]; // calculating key
@@ -335,12 +333,7 @@ typedef struct {
CFG_FW_PRINTF_BUFFER_LOCATION_STRCT IFB_FwPfBuff;
#endif // HCF_ASSERT_PRINTF
#endif // HCF_ASSERT
-#if ! defined HCF_INT_OFF
hcf_16 volatile IFB_IntOffCnt; // 0xFFFF based HCF_ACT_INT_OFF nesting counter, DeepSleep flag
-#endif // HCF_INT_OFF
-#if (HCF_TYPE) & HCF_TYPE_CCX
- hcf_16 IFB_CKIPStat; // CKIP Status flag
-#endif // HCF_TYPE_CCX
#if (HCF_TALLIES) & ( HCF_TALLIES_NIC | HCF_TALLIES_HCF ) //Hermes and/or HCF tally support
hcf_32 IFB_Silly_you_should_align; //;?
hcf_16 IFB_TallyLen; // Tally length (to build an LTV)
@@ -382,9 +375,6 @@ typedef IFB_STRCT* IFBP;
EXTERN_C int hcf_action (IFBP ifbp, hcf_16 cmd );
EXTERN_C int hcf_connect (IFBP ifbp, hcf_io io_base );
-#if (HCF_ENCAP) & HCF_ENC_SUP
-EXTERN_C hcf_8 hcf_encap (wci_bufp type );
-#endif // HCF_ENC_SUP
EXTERN_C int hcf_get_info (IFBP ifbp, LTVP ltvp );
EXTERN_C int hcf_service_nic (IFBP ifbp, wci_bufp bufp, unsigned int len );
EXTERN_C int hcf_cntl (IFBP ifbp, hcf_16 cmd );
diff --git a/drivers/staging/wlags49_h2/hcfcfg.h b/drivers/staging/wlags49_h2/hcfcfg.h
index 83475b1060a..7545bc55411 100644
--- a/drivers/staging/wlags49_h2/hcfcfg.h
+++ b/drivers/staging/wlags49_h2/hcfcfg.h
@@ -300,493 +300,6 @@ typedef unsigned long hcf_32;
#define HCF_TALLIES_HCF 0x0002 // HCF Tallies accumulated in IFB
#define HCF_TALLIES_RESET 0x8000 // Tallies in IFB are reset when reported via hcf_get_info
-
-/************************************************************************************************/
-/****************************** M I N I P O R T N D I S *************************************/
-/************************************************************************************************/
-
-#if defined WVLAN_41 || defined WVLAN_48 || defined WVLAN_52 || defined _WIN32_WCE
-
-#ifndef WVLAN_46
-#define HCF_EXT (HCF_EXT_INFO_LOG | HCF_EXT_MB | HCF_EXT_NIC_ACCESS )
-#else
-#define HCF_EXT ( HCF_EXT_TX_CONT | HCF_EXT_INFO_LOG | HCF_EXT_MB | HCF_EXT_NIC_ACCESS )
-#endif
-#define HCF_DLV 1 //H-I legacy, superfluous for H-II
-
-#ifdef _WIN32_WCE
-#define HCF_IO HCF_IO_MEM
-#define HCF_DMA 0 // To enable DMA
-#endif
-
-#if _VARIANT == 7
-#define HCF_SLEEP HCF_CDS
-#endif // _VARIANT == 7
-
-#if _VARIANT == 5 || _VARIANT == 6
-#define _WARP
-#define _AES
-#define HCF_SLEEP HCF_CDS
-#if _VARIANT == 6
-//! #define _RSN
-#endif // _VARIANT == 6
-#ifndef _WIN32_WCE
-#define HCF_IO HCF_IO_32BITS
-#define HCF_DMA 1 // To enable DMA
-#endif
-#endif // _VARIANT == 5 || _VARIANT == 6
-
-
-//HWi for migration purposes I defined a define which will be TRUE for ALL drivers
-//Meaning that _CCX defined code which we think will get a all driver OK flag can be defined from _CCX to _CCX_OK
-#if defined WVLAN_48 // && !defined _WIN32_WCE
-#if _VARIANT == 4 || _VARIANT == 6
-#define _CCX_OK 1
-#endif // _VARIANT == 4 || _VARIANT == 6
-#endif // WVLAN_48
-
-//#if !defined WVLAN_46
-#if defined WVLAN_48
-#if _VARIANT == 4 || _VARIANT == 6
-#define _CCX
-#define HCF_MAX_MSG_CKIP_PADDING 86 //, use 86 for rx fragmentation. 28 is enuf for MIC+PPK encapsulation
-#define HCF_MAX_MSG ( 1514 + HCF_MAX_MSG_CKIP_PADDING ) // need extra padding for CKIP (need to subtract 28 for NDIS)
-#endif // _VARIANT == 4 || _VARIANT == 6
-#endif // WVLAN_48
-//#endif // WVLAN_46
-
-#if !defined WVLAN_46
-#define _PEEK
-#endif
-
-#ifndef _WIN32_WCE
-// ASSERT already used by WinCE...
-#ifdef ASSERT
-#undef ASSERT
-#define ASSERT(x) ASSERTDEBUGMSG((x), (TEXT("SIMULATE ASSERT:")))
-#endif
-#endif
-
-
-#if defined WVLAN_41
-#define MSF_COMPONENT_ID COMP_ID_MINIPORT_NDIS_31
-#endif // WVLAN_41
-#if defined WVLAN_48 && !defined _WIN32_WCE
-#define MSF_COMPONENT_ID COMP_ID_MINIPORT_NDIS_50
-#endif // WVLAN_48 / _WIN32_WCE
-#if defined WVLAN_52 && !defined _WIN32_WCE
-#define MSF_COMPONENT_ID COMP_ID_MINIPORT_NDIS_40
-#endif // WVLAN_52 / _WIN32_WCE
-#if defined WVLAN_46
-#define MSF_COMPONENT_ID COMP_ID_WIN_CE
-#endif // _WIN32_WCE
-
-#define MSF_COMPONENT_VAR _VARIANT
-
-#define T1__HCF_TYPE (HCF_TYPE_NONE)
-
-#define T2__HCF_TYPE (T1__HCF_TYPE)
-
-#ifdef _WARP
-#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_HII5 | HCF_TYPE_WARP )
-#else
-#if _VARIANT == 7
-#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_HII5)
-#else // _VARIANT == 7
-#define T3__HCF_TYPE (T2__HCF_TYPE)
-#endif // _VARIANT == 7
-#endif // _WARP
-
-#ifdef _CCX_OK
-#define T4__HCF_TYPE (T3__HCF_TYPE | HCF_TYPE_CCX)
-#else
-#define T4__HCF_TYPE (T3__HCF_TYPE)
-#endif // _CCX_OK
-
-//not suitable for H-II #define HCF_CFG_STA_1_BOTTOM 16
-
-// Default WPA in ON for all drivers except for WARP driver
-#ifdef _WARP
-#define T5__HCF_TYPE (T4__HCF_TYPE)
-#else // _WARP
-#define T5__HCF_TYPE (T4__HCF_TYPE | HCF_TYPE_WPA)
-#endif // _WARP
-
-#define HCF_TYPE (T5__HCF_TYPE)
-
-// This is needed to get aux_ctrl() from the HCF for WlFreezeAndDump()
-#if (defined DBG && DBG != 0)
-#ifndef STATIC
-#define STATIC
-#endif
-#endif
-
-#if !defined SOFTRONICS_CODE && !defined _APIDLL && !defined _WIN32_WCE
-#include <ndis.h>
-#endif // SOFTRONICS_CODE / _APIDLL / _WIN32_WCE
-#if defined _WIN32_WCE
-#include <windows.h>
-#include <winnt.h>
-#endif // _WIN32_WCE
-#include "version.h"
-
-#define MSF_COMPONENT_MAJOR_VER TPI_MAJOR_VERSION
-#define MSF_COMPONENT_MINOR_VER TPI_MINOR_VERSION
-
-#if !defined _APIDLL && !defined _WIN32_WCE
-
-__inline UCHAR NDIS_IN_BYTE( ULONG port )
-{
- UCHAR value;
- NdisRawReadPortUchar(port , &value);
- return (value);
-}
-
-__inline ULONG NDIS_IN_LONG( ULONG port )
-{
- ULONG value;
- NdisRawReadPortUlong(port , &value);
- return (value);
-}
-__inline USHORT NDIS_IN_WORD( ULONG port )
-{
- USHORT value;
- NdisRawReadPortUshort(port , &value);
- return (value);
-}
-
-#define IN_PORT_DWORD(port) NDIS_IN_LONG( (ULONG) (port) )
-#define IN_PORT_WORD(port) NDIS_IN_WORD( (ULONG) (port) )
-#define OUT_PORT_DWORD(port, value) NdisRawWritePortUlong((ULONG) (port) , value)
-#define OUT_PORT_WORD(port, value) NdisRawWritePortUshort((ULONG) (port) , (USHORT) (value))
-
-#define IN_PORT_STRING_8_16(port, addr, len) IN_PORT_STRING_16(port, addr, len)
-#define OUT_PORT_STRING_8_16(port, addr, len) OUT_PORT_STRING_16(port, addr, len)
-
-#define IN_PORT_STRING_32(port, addr, len) { \
- NdisRawReadPortBufferUlong(port, addr, (len)); \
-}
-
-#define OUT_PORT_STRING_32(port, addr, len) { \
- NdisRawWritePortBufferUlong(port, addr, (len)); \
-}
-
-#define IN_PORT_STRING_16(port, addr, len) NdisRawReadPortBufferUshort(port, addr, (len));
-#define OUT_PORT_STRING_16(port, addr, len) NdisRawWritePortBufferUshort(port, addr, (len));
-
-#endif // _APIDLL / _WIN32_WCE
-
-#if defined _WIN32_WCE
-
-#define HCF_ALIGN 2
-#define HCF_MEM_IO 1 // overrule standard Port I/O with Memory mapped I/O
-#define HCF_PROT_TIME 49
-
-#define IN_PORT_BYTE CE_IN_PORT_BYTE
-#define OUT_PORT_BYTE CE_OUT_PORT_BYTE
-#define IN_PORT_WORD CE_IN_PORT_WORD
-#define OUT_PORT_WORD CE_OUT_PORT_WORD
-#define IN_PORT_STRING_16 CE_IN_PORT_STRING
-#define OUT_PORT_STRING_16 CE_OUT_PORT_STRING
-
-extern hcf_8 CE_IN_PORT_BYTE(hcf_32 port);
-extern void CE_OUT_PORT_BYTE(hcf_32 port, hcf_8 value);
-extern hcf_16 CE_IN_PORT_WORD(hcf_32 port);
-extern void CE_OUT_PORT_WORD(hcf_32 port, hcf_16 value);
-extern void CE_IN_PORT_STRING(hcf_32 port, void *addr, hcf_16 len);
-extern void CE_OUT_PORT_STRING(hcf_32 port, void *addr, hcf_16 len);
-
-
-#endif
-
-#if defined _DEBUG || (defined DBG && DBG != 0)
-#define HCF_ASSERT ( HCF_ASSERT_LNK_MSF_RTN | HCF_ASSERT_RT_MSF_RTN | HCF_ASSERT_PRINTF ) //0xC001
-//#define HCF_ASSERT ( HCF_ASSERT_LNK_MSF_RTN | HCF_ASSERT_RT_MSF_RTN | HCF_ASSERT_PRINTF | HCF_ASSERT_MB ) //just to test
-#endif // _DEBUG || DBG
-
-#if defined DEBUG || defined _DEBUG || (defined DBG && DBG != 0)
-#ifdef _WIN32_WCE
-#define DBGA2W(DBGSTR) CeConvertAnsiToUnicodeLen((char*)DBGSTR)
-#define OUTPUTDEBUGMSG(dprintf_exp) ((void)((! ZONE_DEBUG) ? 0:ce_debug_out dprintf_exp))
-#define ASSERTDEBUGMSG(cond, dprintf_exp) ((void)((cond) ? 0:ce_debug_out dprintf_exp))
-
-#define ZONE_ERROR DEBUGZONE(0)
-#define ZONE_WARN DEBUGZONE(1)
-#define ZONE_FUNCTION DEBUGZONE(2)
-#define ZONE_INIT DEBUGZONE(3)
-#define ZONE_INTR DEBUGZONE(4)
-#define ZONE_RCV DEBUGZONE(5)
-#define ZONE_XMIT DEBUGZONE(6)
-#define ZONE_ASSERT DEBUGZONE(7)
-#define ZONE_DEBUG DEBUGZONE(8)
-#define ZONE_OEM DEBUGZONE(9)
-#define ZONE_HCF DEBUGZONE(10)
-#define ZONE_PORTIO DEBUGZONE(11)
-#define ZONE_LOGTOFILE DEBUGZONE(15)
-
-#else // !(_WIN32_WCE)
-
-#define OUTPUTDEBUGMSG(dprintf_exp) ((void) (DbgPrint dprintf_exp))
-// the assertdebugmsg macro will print filename, line followed by a caller-defined text, when cond == 0
-#define ASSERTDEBUGMSG(cond, print) ((void)((cond) ? 0: (DbgPrint("%s %s:%d - ", print, __FILE__, __LINE__))))
-
-#define ZONE_ERROR 1
-#define ZONE_WARN 1
-#define ZONE_FUNCTION 1
-#define ZONE_INIT 1
-#define ZONE_INTR 1
-#define ZONE_RCV 1
-#define ZONE_XMIT 1
-#define ZONE_ASSERT 1
-#define ZONE_DEBUG 1
-#define ZONE_OEM 1
-#define ZONE_HCF 1
-#define ZONE_PORTIO 1
-#define ZONE_LOGTOFILE 1
-
-#endif // _WIN32_WCE
-#ifndef DBGA2W
-#define DBGA2W
-#endif // DBGA2W
-
-#else // !(defined DEBUG || defined _DEBUG || (defined DBG && DBG != 0) )
-#define OUTPUTDEBUGMSG(dprintf_exp)
-#define ASSERTDEBUGMSG(cond, dprintf_exp)
-#endif // DEBUG / DBG
-
-#if !defined HCF_MAX_MSG_CKIP_PADDING
-#define HCF_MAX_MSG_CKIP_PADDING 0
-#endif // HCF_MAX_MSG_CKIP_PADDING
-
-#if !defined HCF_MAX_MSG
-#define HCF_MAX_MSG 1514
-#endif // HCF_MAX_MSG
-
-#define HCF_LEGACY 1 //;?nv je moet wat
-
-#endif //WVLAN_41 / WVLAN_48 / WVLAN_52 / _WIN32_WCE
-
-
-/************************************************************************************************/
-/**************************** P A C K E T D R I V E R ***************************************/
-/********************************** D O S O D I *********************************************/
-/************************************************************************************************/
-
-#if defined WVLAN_42 || defined WVLAN_43
-
-#pragma warning ( disable: 4001 )
-#define FAR __far //segmented 16 bits mode
-#define BASED __based(__segname("_CODE")) //force all the "const" structures in the CODE segment
-
-//#define HCF_IO 0 //no DMA, no 32 bits
-
-//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To ease testing the different options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#define HCF_EXT HCF_EXT_MB
-#define HCF_PROT_TIME 49 //49*10240 microseconds H/W failure protection timer
-
-//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To ease testing the different options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-/******************************** CONFIGURATION MANAGEMENT *****************************************/
-#ifdef WVLAN_42
-#define MSF_COMPONENT_ID COMP_ID_PACKET
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER 6
-#define MSF_COMPONENT_MINOR_VER 12
-#endif // WVLAN_42
-
-#ifdef WVLAN_43
-#define MSF_COMPONENT_ID COMP_ID_ODI_16
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER 6
-#define MSF_COMPONENT_MINOR_VER 10
-#endif // WVLAN_43
-
-/************************************** INPUT / OUTPUT **********************************************/
-#ifndef H_2_INC
-#include <stdio.h>
-#include <conio.h>
-#if 1 //temorary use functions defined in hcf.c
-#ifndef _DEBUG
-#pragma intrinsic( _inp, _inpw, _outp, _outpw )
-#endif // _DEBUG
-
-#define IN_PORT_WORD(port) ((hcf_16)_inpw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) ((void)_outpw( (hcf_io)(port), value ))
-
-#if 1 // C implementation which let the processor handle the word-at-byte-boundary problem
-#define IN_PORT_STRING_8_16( port, addr, n) while (n--) \
- { *(hcf_16 FAR*)addr = IN_PORT_WORD( port ); ((hcf_8 FAR*)addr)+=2; }
-#define OUT_PORT_STRING_8_16( port, addr, n) while (n--) \
- { OUT_PORT_WORD( port, *(hcf_16 FAR*)addr ) ; ((hcf_8 FAR*)addr)+=2; }
-#elif 0 // C implementation which handles the word-at-byte-boundary problem
-#define IN_PORT_STRING_8_16( port, addr, n) while ( n-- ) \
- { hcf_16 i = IN_PORT_WORD(port); *((hcf_8 FAR*)addr)++ = (hcf_8)i; *((hcf_8 FAR*)addr)++ = (hcf_8)(i>>8);}
-#define OUT_PORT_STRING_8_16( port, addr, n) while ( n-- ) \
- { OUT_PORT_WORD( port, *((hcf_8 FAR*)addr) | *(((hcf_8 FAR*)addr)+1)<<8 ); (hcf_8 FAR*)addr+=2; }
-#else // Assembler implementation
-#define IN_PORT_STRING_8_16( port, addr, n) __asm \
-{ \
- __asm push di \
- __asm push es \
- __asm mov cx,n \
- __asm les di,addr \
- __asm mov dx,port \
- __asm rep insw \
- __asm pop es \
- __asm pop di \
-}
-
-#define OUT_PORT_STRING_8_16( port, addr, n) __asm \
-{ \
- __asm push si \
- __asm push ds \
- __asm mov cx,n \
- __asm lds si,addr \
- __asm mov dx,port \
- __asm rep outsw \
- __asm pop ds \
- __asm pop si \
-}
-
-#endif // Asm or C implementation
-#define IN_PORT_STRING_32( port, addr, n) { int n2 = 2*n; IN_PORT_STRING_8_16(port, addr, n2) }
-#define OUT_PORT_STRING_32( port, addr, n) { int n2 = 2*n; OUT_PORT_STRING_8_16(port, addr, n2) }
-#endif // 0 //temorary use functions defined in hcf.c
-#endif // H_2_INC
-
-#endif // WVLAN_42 / WVLAN_43
-
-
-
-/************************************************************************************************/
-/**************************** D O S H - I / II L O A D E R **********************************/
-/************************************************************************************************/
-
-#if defined H0_LDR || defined H1_LDR || defined H2_LDR || defined H5_LDR
-
-#if defined H0_LDR //implies H-I
-#define HCF_DLV 0 //H-I legacy, meaningless under H-II
-#define HCF_DLNV 1 //H-I legacy, meaningless under H-II
-#endif // H0_LDR
-
-#if defined H1_LDR //implies H-I
-#define HCF_DLV 1 //H-I legacy, meaningless under H-II
-#define HCF_DLNV 0 //H-I legacy, meaningless under H-II
-#endif // H1_LDR / H2_LDR
-
-//#if defined H2_LDR : not needed, H-II defaults are O.K for H2_LDR
-
-#ifdef H5_LDR
-#define HCF_TYPE (HCF_TYPE_HII5 | HCF_TYPE_WARP )
-//;? why does only this subset of the H_LDRs need HCF_TYPE to be defined here
-#endif
-
-#define HCF_ASSERT HCF_ASSERT_LNK_MSF_RTN //support dynamic linking of msf_assert routine
-#define HCF_ENCAP 0
-#define HCF_INT_ON 0
-#define HCF_TALLIES 0
-
-#define MSF_COMPONENT_ID COMP_ID_ODI_16 //;?By lack of any better
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER 0
-#define MSF_COMPONENT_MINOR_VER 0
-
-#include <stdio.h>
-#include <conio.h>
-#if defined NDEBUG
-#pragma intrinsic( _inp, _inpw, _outp, _outpw )
-#endif // NDEBUG
-
-#if 0 //use 0 to replace I/O Macros with logging facility
-#define IN_PORT_WORD(port) ((hcf_16)_inpw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) ((void)_outpw( (hcf_io)(port), value ))
-#define IN_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { *(hcf_16 FAR*)addr = IN_PORT_WORD( port ); (cast)addr += 2; }
-#define OUT_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { OUT_PORT_WORD( port, *(hcf_16 FAR*)addr ) ; (cast)addr += 2; }
-#endif //use 0 to replace I/O Macros with logging facility
-
-#endif // H0_LDR / H1_LDR / H2_LDR
-
-
-
-/************************************************************************************************/
-/**************************** H C F D E M O P R O G R A M ***********************************/
-/************************************************************************************************/
-
-#if defined HCF_DEMO
-
-#define HCF_DLV 1 //;?should become the default !defaults to 1 anyway for H-II
-//#define HCF_DLNV 0 //defaults to 0 anyway for H-II
-
-#define HCF_ASSERT HCF_ASSERT_LNK_MSF_RTN //support dynamic linking of msf_assert routine
-
-#define HCF_ENCAP 0
-#define HCF_INT_ON 0
-#define HCF_TALLIES ( HCF_TALLIES_NIC | HCF_TALLIES_HCF )
-
-//#define MSF_COMPONENT_ID NO configuration management
-
-#include <stdio.h>
-#include <conio.h>
-#if defined NDEBUG
-#pragma intrinsic( _inp, _inpw, _outp, _outpw )
-#endif // NDEBUG
-
-#if 0 //use 0 to replace I/O Macros with logging facility
-#define IN_PORT_WORD(port) ((hcf_16)_inpw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) ((void)_outpw( (hcf_io)(port), value ))
-#define IN_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { *(hcf_16 FAR*)addr = IN_PORT_WORD( port ); (cast)addr += 2; }
-#define OUT_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { OUT_PORT_WORD( port, *(hcf_16 FAR*)addr ) ; (cast)addr += 2; }
-#endif //use 0 to replace I/O Macros with logging facility
-
-#endif // HCF_DEMO
-
-
-
-/************************************************************************************************/
-/*********************************** M A C O S **********************************************/
-/************************************************************************************************/
-
-#if defined WVLAN_45
-
-#include "Version.h"
-
-#define MSF_COMPONENT_ID COMP_ID_MAC_OS
-#define MSF_COMPONENT_VAR VARIANT
-#define MSF_COMPONENT_MAJOR_VER VERSION_MAJOR
-#define MSF_COMPONENT_MINOR_VER VERSION_MINOR
-
-#define MAC_OS 1
-
-#define HCF_BIG_ENDIAN 1 // selects Big Endian (a.k.a. Motorola), most significant byte first
-
-#if defined DEBUG
-#define HCF_ASSERT HCF_ASSERT_MB // logging via Mailbox
-#endif // DEBUG
-
-#ifdef __cplusplus
-extern "C" {
-#endif // __cplusplus
-extern volatile unsigned char *MacIOaddr;
-extern hcf_16 IN_PORT_WORD(hcf_16 port);
-extern void OUT_PORT_WORD(hcf_16 port, hcf_16 value);
-extern void IN_PORT_STRING_16(hcf_16 port, void *addr, hcf_16 len);
-extern void OUT_PORT_STRING_16(hcf_16 port, void *addr, hcf_16 len);
-
-#define SwapBytes(t) (((t) >> 8) + (((t) & 0xff) << 8))
-
-#ifdef __cplusplus
-}
-#endif // __cplusplus
-
-#endif // WVLAN_45
-
-
-
/************************************************************************************************/
/****************************************** L I N U X *****************************************/
/************************************************************************************************/
@@ -803,16 +316,25 @@ extern void OUT_PORT_STRING_16(hcf_16 port, void *addr, hcf_16 len);
//#define HCF_SLEEP (HCF_CDS | HCF_DDS )
#define HCF_SLEEP (HCF_CDS)
-//#define HCF_TYPE (HCF_TYPE_HII5|HCF_TYPE_STA|HCF_TYPE_AP)
+/* Note: Non-WARP firmware all support WPA. However the original Agere
+ * linux driver does not enable WPA. Enabling WPA here causes whatever
+ * preliminary WPA logic to be included, some of which may be specific
+ * to HERMESI.
+ *
+ * Various comment are clear that WARP and WPA are not compatible
+ * (which may just mean WARP does WPA in a different fashion).
+ */
+
+/* #define HCF_TYPE (HCF_TYPE_HII5|HCF_TYPE_STA|HCF_TYPE_AP) */
#ifdef HERMES25
#ifdef WARP
#define HCF_TYPE ( HCF_TYPE_WARP | HCF_TYPE_HII5 )
#else
-#define HCF_TYPE HCF_TYPE_HII5
-#endif // WARP
+#define HCF_TYPE (HCF_TYPE_HII5 | HCF_TYPE_WPA)
+#endif /* WARP */
#else
-#define HCF_TYPE HCF_TYPE_NONE
-#endif // HERMES25
+#define HCF_TYPE HCF_TYPE_WPA
+#endif /* HERMES25 */
#ifdef ENABLE_DMA
#define HCF_DMA 1
@@ -934,16 +456,6 @@ extern void OUT_PORT_STRING_16(hcf_16 port, void *addr, hcf_16 len);
#define IN_PORT_STRING_8_16(port, addr, len) IN_PORT_STRING_16(port, addr, len)
#define OUT_PORT_STRING_8_16(port, addr, len) OUT_PORT_STRING_16(port, addr, len)
-
-#ifndef OUTPUTDEBUGMSG
-#define OUTPUTDEBUGMSG(dprintf_exp)
-#endif
-
-
-#ifndef ASSERTDEBUGMSG
-#define ASSERTDEBUGMSG(cond, dprintf_exp)
-#endif
-
#ifndef CFG_SCAN_CHANNELS_2GHZ
#define CFG_SCAN_CHANNELS_2GHZ 0xFCC2
#endif /* CFG_SCAN_CHANNELS_2GHZ */
@@ -951,1043 +463,6 @@ extern void OUT_PORT_STRING_16(hcf_16 port, void *addr, hcf_16 len);
#define HCF_MAX_MSG 1600 //get going ;?
#endif // WVLAN_49
-
-
-/************************************************************************************************/
-/********************************************* Q N X ******************************************/
-/************************************************************************************************/
-
-#if defined __QNX__ || defined WVLAN_50
-
-#define MSF_COMPONENT_ID 0 //Although there is no DUI support, we need this to get ...
-#define MSF_COMPONENT_VAR 0 //...compatibilty check to function
-#define MSF_COMPONENT_MAJOR_VER 0 //...;?this is worth looking into to make this a more
-#define MSF_COMPONENT_MINOR_VER 0 //..."defined" I/F so OEMers can figure out what to do
-
-#include <conio.h>
-
-#define IN_PORT_WORD(port) ((hcf_16)inpw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) (outpw( (hcf_io)(port), (hcf_16) (value) ))
-/*
-#define IN_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { *(hcf_16*)addr = IN_PORT_WORD( port ); (cast)addr += 2; }
-#define OUT_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { OUT_PORT_WORD( port, *(hcf_16*)addr ) ; (cast)addr += 2; }
-*/
-
-#endif // QNX / WVLAN_50
-
-
-
-/************************************************************************************************/
-/********************************************* B E O S ****************************************/
-/************************************************************************************************/
-
-#if defined __BEOS__
-
-#define MSF_COMPONENT_ID 0 //Although there is no DUI support, we need this to get ...
-#define MSF_COMPONENT_VAR 0 //...compatibilty check to function
-#define MSF_COMPONENT_MAJOR_VER 0 //...;?this is worth looking into to make this a more
-#define MSF_COMPONENT_MINOR_VER 0 //..."defined" I/F so OEMers can figure out what to do
-
-#include <drivers/Drivers.h>
-#include <drivers/KernelExport.h>
-
-uint8 read_io_8 (int);
-void write_io_8 (int, uint8);
-uint16 read_io_16 (int);
-void write_io_16 (int, uint16);
-
-#define IN_PORT_WORD(port) ((hcf_16)read_io_16( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) (write_io_16( (hcf_io)(port), (hcf_16) (value) ))
-/*
-#define IN_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { *(hcf_16*)addr = IN_PORT_WORD( port ); (cast)addr += 2; }
-#define OUT_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { OUT_PORT_WORD( port, *(hcf_16*)addr ) ; (cast)addr += 2; }
-*/
-#endif // __BEOS__
-
-
-
-/************************************************************************************************/
-/******************************** U S B D O N G L E *****************************************/
-/************************************************************************************************/
-
-#if defined USB
-#include "gpif.h"
-
-#define MSF_COMPONENT_MAJOR_VER 0
-#define MSF_COMPONENT_MINOR_VER 1
-
-#define IN_PORT_WORD(port) (Hermes_IO_Read( (hcf_8)(port)))
-#define OUT_PORT_WORD(port, value) (Hermes_IO_Write( (hcf_8)port, /*(hcf_16)*/(value) ) )
-/* !!!! NOTE USB supports only 16-bits I/O and no 8-bits I/O
- * as a consequence the IN_/OUT_PORT_STRING_16 macros use hcf_16* rather than hcf_8 pointers
- * to get more optimal code
- * therefore the pointers are incremented by 1 (which means two "bytes") rather than by 2
- */
-//#define IN_PORT_STRING_16( port, addr, n) while ( n-- ) { *((hcf_16*)addr)++ = IN_PORT_WORD( port ); }
-//#define OUT_PORT_STRING_16( port, addr, n) while ( n-- ) { OUT_PORT_WORD( port, *((hcf_16*)addr)++ ); }
-#define IN_PORT_STRING_16( port, dst, n) while ( n-- ) { *dst++ = IN_PORT_WORD( port ); }
-#define OUT_PORT_STRING_16( port, src, n) while ( n-- ) { OUT_PORT_WORD( port, *src++ ); }
-
-//#define HCF_TYPE ( HCF_TYPE_AP | HCF_TYPE_WPA )
-#define HCF_TYPE HCF_TYPE_WPA
-
-#endif // USB
-
-
-/************************************************************************************************/
-/****************************************** FreeBSD *******************************************/
-/************************************************************************************************/
-
-#if defined __FREE_BSD__
-
-#define MSF_COMPONENT_ID COMP_ID_FreeBSD
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER 1
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#include <machine/cpufunc.h>
-
-#define IN_PORT_WORD(port) ((hcf_16)inw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) (outw((hcf_io)(port), (hcf_16)(value)))
-
-/*
-#define IN_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { *(hcf_16*)addr = IN_PORT_WORD( port ); (cast)addr += 2; }
-#define OUT_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { OUT_PORT_WORD( port, *(hcf_16*)addr ) ; (cast)addr += 2; }
-*/
-#endif // __FREE_BSD__
-
-
-
-/************************************************************************************************/
-/********************************* W A V E P O I N T ******************************************/
-/************************************************************************************************/
-
-#if defined WVLAN_81 /* BORLANDC */
-
-#define EXTERN_C extern // needed because DHF uses this instead of 'extern'
-
-#define MSF_COMPONENT_ID COMP_ID_AP1
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER 4
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_PROT_TIME 49 //49*10240 microseconds H/W failure protection timer
-
-//#define HCF_ASSERT HCF_ASSERT_MB // logging via Mailbox /* debug build only */
-
-#if !defined FAR
-#define FAR far // segmented 16 bits mode
-#endif // FAR
-
-#define IN_PORT_WORD(port) (inport( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) (outport( (hcf_io)(port), value ))
-
-#define IN_PORT_STRING_16(port, addr, len) \
- asm { push di; push es; mov cx,len; les di,addr; mov dx,port; rep insw; pop es; pop di }
-
-#define OUT_PORT_STRING_16(port, addr, len) \
- asm { push si; push ds; mov cx,len; lds si,addr; mov dx,port; rep outsw; pop ds; pop si }
-
-#endif // WVLAN_81
-
-
-/************************************************************************************************/
-/******************************** W A V E L A U N C H *****************************************/
-/************************************************************************************************/
-
-#if defined WVLAUNCH
-
-#include "DriverX.h"
-extern HWDEVICE* g_pDevice;
-
-//#define MSF_COMPONENT_ID 0 //;? to get around browser problem
-
-#define IN_PORT_WORD(port) HwInpw( g_pDevice, port )
-#define OUT_PORT_WORD(port, value) HwOutpw( g_pDevice, port, value )
-
-
-// C implementation which let the processor handle the word-at-byte-boundary problem
-/*
-#define IN_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { *(hcf_16 FAR*)addr = IN_PORT_WORD( port ); (cast)addr += 2; }
-#define OUT_PORT_STRING_16( port, addr, n) \
- while ( n-- ) { OUT_PORT_WORD( port, *(hcf_16 FAR*)addr ) ; (cast)addr += 2; }
-*/
-#endif // WVLAUNCH
-
-
-
-/************************************************************************************************/
-/************************************* W C I T S T *********************************************/
-/************************************************************************************************/
-
-#if defined WCITST
-#define MSF_COMPONENT_ID 0 //Although there is no DUI support, we need this to get ...
-#define MSF_COMPONENT_VAR 0 //...compatibilty check to function
-#define MSF_COMPONENT_MAJOR_VER 0 //...;?this is worth looking into to make this a more
-#define MSF_COMPONENT_MINOR_VER 0 //..."defined" I/F so OEMers can figure out what to do
-
-//#define HCF_ENCAP HCF_ENC_NONE //to get going
-//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To ease testing the different options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#define HCF_TYPE (HCF_TYPE_WPA | HCF_TYPE_PRELOADED) // Hermes-I for HCF6, II for HCF7
-#define HCF_DMA 1
-//#define LLB //!!!!MIC Debug Only
-#if defined LLB && !((HCF_TYPE) & HCF_TYPE_WPA)
-err: no LLB unless SSN;
-#endif // LLB / HCF_TYPE_WPA
-//#define HCF_ALIGN 2
-#define HCF_DLV 1 //just to change memory layout ????;?
-//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To ease testing the different options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#define HCF_ASSERT HCF_ASSERT_SW_SUP // logging via Hermes support registerr
-//#define HCF_ASSERT HCF_ASSERT_MB // logging via Mailbox
-
-#if defined __GNUC__
-#include "stdio.h"
-//#include "unistd.h" //ioperm libc5
-#include "sys/io.h" //ioperm glibc
-#define extern //see IO Port Programming mini-HOWTO
-//#include "asm/io.h" //
-#define IN_PORT_WORD(port) inw( (hcf_io)(port) )
-#define IN_PORT_DWORD(port) inl( (hcf_io)(port) )
-#define OUT_PORT_WORD(port, value) outw( (hcf_io)(port), (hcf_16)(value) )
-#define OUT_PORT_DWORD(port, value) outl( (hcf_io)(port), (hcf_16)(value) )
-#else
-#pragma warning ( disable: 4001 )
-#define FAR __far // segmented 16 bits mode
-
-#include <stdio.h>
-#include <conio.h>
-#ifndef _DEBUG
-#pragma intrinsic( _inp, _inpw, _outp, _outpw )
-#endif // _DEBUG
-
-#ifdef LOG
-extern FILE* utm_logfile;
-hcf_16 ipw( hcf_16 port );
-hcf_8 ipb( hcf_16 port );
-void opw( hcf_16 port, hcf_16 value );
-void opb( hcf_16 port, hcf_8 value );
-
-#define IN_PORT_WORD(port) ipw( (hcf_io)(port) )
-#define OUT_PORT_WORD(port, value) opw( (hcf_io)(port), (hcf_16)(value) )
-#else // LOG
-#define IN_PORT_WORD(port) ((hcf_16)_inpw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) ((void)_outpw( (hcf_io)(port), value ))
-#endif // LOG
-
-#if 1 //ASM example
-#define IN_PORT_STRING_16( port, addr, len) __asm \
-{ \
- __asm push di \
- __asm push es \
- __asm mov cx,len \
- __asm les di,addr \
- __asm mov dx,port \
- __asm rep insw \
- __asm pop es \
- __asm pop di \
-}
-
-#define OUT_PORT_STRING_16( port, addr, len) __asm \
-{ \
- __asm push si \
- __asm push ds \
- __asm mov cx,len \
- __asm lds si,addr \
- __asm mov dx,port \
- __asm rep outsw \
- __asm pop ds \
- __asm pop si \
-}
-
-#endif // asm example
-
-#endif // __GCC__
-
-#if ! defined IN_PORT_STRING_16
-#define IN_PORT_STRING_16( port, addr, n) while (n--) \
- { *(hcf_16 FAR*)addr = IN_PORT_WORD( port ); ((hcf_16 FAR*)addr)++; }
-#define OUT_PORT_STRING_16( port, addr, n) while (n--) \
- { OUT_PORT_WORD( port, *(hcf_16 FAR*)addr ); ((hcf_16 FAR*)addr)++; }
-#endif // IN_PORT_STRING_16
-
-#endif // WCITST
-
-
-/************************************************************************************************/
-/******************************* Motorola Power PC 800 family *********************************/
-/************************************************************************************************/
-/* known users: LH@I
- */
-
-#if defined I_MPC8XX
-
-#define MSF_COMPONENT_VAR 0
-#define MSF_COMPONENT_ID 0
-#define MSF_COMPONENT_MAJOR_VER 1
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_HSI_VAR 1
-
-#define HCF_BIG_ENDIAN 1
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#include "o_portbl.h"
-#include "ipcmcia.h"
-typedef o_uint8_t hcf_8;
-typedef o_uint16_t hcf_16;
-typedef o_uint32_t hcf_32;
-
-/***************************************************************************/
-
-
-#ifdef _lint
-#else
-asm hcf_16 IN_PORT_WORD(int port)
-{
-% reg port
- lhbrx r3,r0,port
- eieio
-}
-#endif // _lint
-
-
-#ifdef _lint
-#else
-asm void OUT_PORT_WORD(int port, hcf_16 value)
-{
-% reg port, value
- sthbrx value,r0,port
- eieio
-}
-#endif // _lint
-
-/***************************************************************************/
-
-#define IN_PORT_STRING_16(port, addr, len) \
- { \
- unsigned l = (len); \
- hcf_16 *d = (volatile hcf_16 *)(addr); \
- while (l--) \
- { \
- *d++ = *(volatile hcf_16 *)(port); \
- EIEIO(); \
- } \
- }
-
-#define OUT_PORT_STRING_16(port, addr, len) \
- { \
- unsigned l = (len); \
- hcf_16 *s = (volatile hcf_16 *)(addr); \
- while (l--) \
- { \
- *(volatile hcf_16 *)(port) = *s++; \
- EIEIO(); \
- } \
- }
-
-#endif // I_MPC8XX
-
-
-
-/************************************************************************************************/
-/********************************** Diab or High C 29K **************************************/
-/************************************************************************************************/
-/* known users: GK@C
- */
-
-#if defined _AM29K
-
-#define MSF_COMPONENT_VAR 0
-#define MSF_COMPONENT_ID COMP_ID_AP1
-#define MSF_COMPONENT_MAJOR_VER 1
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_BIG_ENDIAN 1
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#define SwapBytes(t) /*lint -e572*/(((t) >> 8) + (((t) & 0xff) << 8))/*lint +e572*/
-
-#if defined __ppc
- #ifndef __GNUC__
- #define __asm__ asm
- #endif
-
- #if ! defined _lint
- #define EIEIO() __asm__(" eieio")
- #else
- #define EIEIO()
- #endif
-
- static hcf_16 IN_PORT_WORD(int port) {
- hcf_16 value = *(volatile hcf_16 *)(port); EIEIO();
- value = SwapBytes(value);
- return value;
- }
-
- #define OUT_PORT_WORD(port, value) \
- { *(volatile hcf_16 *)(port) = SwapBytes(value); EIEIO(); }
-#else
- #define IN_PORT_WORD(port) (*(volatile hcf_16 *)(port))
- #define OUT_PORT_WORD(port, value) (*(volatile hcf_16 *)(port) = (value))
-#endif // __ppc
-
-/***************************************************************************/
-
-#define IN_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 t, *d = (volatile hcf_16 *)(addr); \
- while (l--) { \
- t = IN_PORT_WORD(port); \
- *d++ = SwapBytes(t); \
- } \
- }
-
-#define OUT_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 t, *s = (volatile hcf_16 *)(addr); \
- while (l--) { \
- t = *s++; \
- OUT_PORT_WORD(port, SwapBytes(t)); \
- } \
- }
-
-#if PRODUCT == 9150
- #define HCF_ASSERT HCF_ASSERT_MB // logging via Mailbox
- #undef MSF_COMPONENT_ID
-#endif // 9150
-
-#endif // _AM29K
-
-
-
-/************************************************************************************************/
-/***************************************** MPC860 **********************************************/
-/************************************************************************************************/
-/* known users: RR
- */
-
-#if defined CPU
-#if CPU == PPC860
-
-#define MSF_COMPONENT_VAR 0
-#define MSF_COMPONENT_ID 0
-#define MSF_COMPONENT_MAJOR_VER 1
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_BIG_ENDIAN 1
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#define SwapBytes(t) /*lint -e572*/(((t) >> 8) + (((t) & 0xff) << 8))/*lint +e572*/
-
-#ifndef __GNUC__
- #define __asm__ asm
-#endif
-
-#if ! defined _lint
- #define EIEIO() __asm__(" eieio")
-#else
- #define EIEIO()
-#endif
-
-static hcf_16 IN_PORT_WORD(int port) {
- hcf_16 value = *(volatile hcf_16 *)(port); EIEIO();
- value = SwapBytes(value);
- return value;
- #ifdef __GNUC__
- /* the following serves to avoid the compiler warnings that
- * IN_PORT_WORD() is not used in some files */
- (void)IN_PORT_WORD;
- #endif
-}
-
-#define OUT_PORT_WORD(port, value) \
- { *(volatile hcf_16 *)(port) = SwapBytes(value); EIEIO(); }
-
-/***************************************************************************/
-
-#define IN_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 t; \
- volatile hcf_16 *d = (volatile hcf_16 *)(addr); \
- while (l--) { \
- t = IN_PORT_WORD(port); \
- *d++ = SwapBytes(t); \
- } \
- }
-
-#define OUT_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 t; \
- volatile hcf_16 *s = (volatile hcf_16 *)(addr); \
- while (l--) { \
- t = *s++; \
- OUT_PORT_WORD(port, SwapBytes(t)); \
- } \
- }
-
-#if PRODUCT == 9150
- #define HCF_ASSERT HCF_ASSERT_MB // logging via Mailbox
- #undef MSF_COMPONENT_ID
-#endif
-
-#endif /* PPC860 */
-#endif /* CPU */
-
-
-
-/************************************************************************************************/
-/**************************** Microtec Research 80X86 Compiler *********************************/
-/************************************************************************************************/
-
-#if 0
-
-//#undef HCF_TYPE // Hermes-I Station F/W without SSN support
-
-#define MSF_COMPONENT_VAR 0
-#define MSF_COMPONENT_ID 0
-#define MSF_COMPONENT_MAJOR_VER 1
-#define MSF_COMPONENT_MINOR_VER 0
-
-extern int far inp( int );
-extern void far outp( int, int );
-extern int far inpw( int );
-extern void far outpw( int, int );
-
-#define IN_PORT_WORD(port) ((hcf_16)inpw( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) ((void)outpw( (hcf_io)(port), value ))
-
-#define IN_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 *d = (hcf_16 *)(addr); \
- while (l--) *d++ = IN_PORT_WORD(port); \
- }
-
-#define OUT_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 *s = (hcf_16 *)(addr); \
- while (l--) OUT_PORT_WORD(port, *s++); \
- }
-#endif /* Microtec 80X86 C Compiler */
-
-
-
-/************************************************************************************************/
-/****************************** W A V E L A N E C ********************************************/
-/************************************************************************************************/
-/* known users: KM
- */
-
-#ifdef mc68302
-
-#define MSF_COMPONENT_ID COMP_ID_EC
-
-#include <version.h>
-
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER MAJOR_VERSION
-#define MSF_COMPONENT_MINOR_VER MINOR_VERSION
-
-#define HCF_BIG_ENDIAN 1
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#define SwapBytes(t) /*lint -e572*/(((t) >> 8) + (((t) & 0xff) << 8))/*lint +e572*/
-
-#define PCMCIA_ADDRESS 0xc80000UL
-
-#define IN_PORT_2BYTES(port) (*(hcf_16 *)(port))
-#if 0
-static hcf_16 IN_PORT_WORD(hcf_32 port) // should be hcf_io, not hcf_32
-{
- hcf_16 word = IN_PORT_2BYTES(port);
- return SwapBytes(word);
-}
-#else
-static hcf_16 swap_var;
-#define IN_PORT_WORD(port) \
- (((swap_var = IN_PORT_2BYTES(port)) >> 8) + (((swap_var) & 0xff) << 8))
-#endif
-#define OUT_PORT_2BYTES(port, value) (*(hcf_16 *)(port) = (hcf_16)(value))
-#define OUT_PORT_WORD(port, value) OUT_PORT_2BYTES(port, SwapBytes(value))
-
-/*
-#define IN_PORT_STRING_16(port, addr, len) \
- while ((len)--) {*(hcf_16 *)(addr) = IN_PORT_2BYTES(port); ((cast)addr) += 2; }
-#define OUT_PORT_STRING_16(port, addr, len) \
- while ((len)--) {OUT_PORT_2BYTES((port), *(hcf_16 *)(addr)) ; ((cast)addr) += 2; }
-*/
-
-#endif /* mc68302 */
-
-
-
-/************************************************************************************************/
-/********************************* NGAP ***************************************/
-/************************************************************************************************/
-
-#if defined __VX_WORKS__ /* VxWorks */
-
-#if defined WLC_STATION
-//#undef HCF_TYPE /* Hermes-I Station F/W without SSN support */
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_ENDSTA
-#else
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_ENDAP
-#endif // WLC_STATION
-
-#define HCF_YIELD (taskDelay(0) == 0)
-
-#define MSF_COMPONENT_VAR 1
-#define MSF_COMPONENT_MAJOR_VER 1
-#define MSF_COMPONENT_MINOR_VER 0
-
-// #define HCF_ASSERT HCF_ASSERT_MB // logging via Mailbox
-
-#if defined PC486BSP
-
-#define IN_PORT_WORD(port) (sysInWord ((hcf_io)(port)))
-#define OUT_PORT_WORD(port, value) (sysOutWord ((hcf_io)(port), (hcf_16) (value)))
-#define IN_PORT_STRING_16(port, addr, n) (sysInWordString ((hcf_io)(port), addr, n))
-#define OUT_PORT_STRING_16(port, addr, n) (sysOutWordString ((hcf_io)(port), addr, n))
-
-#elif defined AS2000BSP
-
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-/* Define PCI stuff here. */
-unsigned short sysRead16( unsigned short *port );
-void sysWrite16( unsigned short *port, unsigned short value );
-
-#define PCI_IN_BYTE( port ) \
- *(unsigned char *)( port )
-
-#define PCI_IN_WORD( port ) \
- sysRead16( (unsigned short *)( port ) )
-
-#define PCI_OUT_BYTE( port, value ) \
- *(unsigned char *)( port ) = (unsigned char)( value )
-
-#define PCI_OUT_WORD( port, value ) \
- sysWrite16( (unsigned short *)( port ), (unsigned short)( value ) )
-
-#define IN_PORT_WORD( port ) \
- PCI_IN_WORD( port )
-
-#define OUT_PORT_WORD( port, value ) \
- PCI_OUT_WORD( port, value )
-
-#define IN_PORT_STRING_16( port, buf, len ) \
- do { \
- hcf_16 *p; \
- \
- for ( p = (hcf_16 *)(buf); p < &( (hcf_16 *)(buf) )[ (int)len ]; p++ ) { \
- *p = PCI_IN_WORD( port ); \
- } \
- } while ( 0 )
-
-#define OUT_PORT_STRING_16( port, buf, len ) \
- do { \
- const hcf_16 *p; \
- \
- for ( p = (const hcf_16 *)( buf ); p < &( (const hcf_16 *)(buf) )[ (int)len ]; p++ ) { \
- PCI_OUT_WORD( port, *p ); \
- } \
- } while ( 0 )
-
-#elif defined FADS860BSP /* elif defined AS2000BSP */
-
-#define HCF_BIG_ENDIAN 1
-
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#ifndef __GNUC__
- #define __asm__ asm
-#endif
-
-#if ! defined _lint
- #define EIEIO() __asm__(" eieio")
-#else
- #define EIEIO()
-#endif
-
-static hcf_16 IN_PORT_WORD(int port) {
- hcf_16 value = *(volatile hcf_16 *)(port); EIEIO();
- value = ((value & 0xff00) >> 8) + ((value & 0x00ff) << 8);
-/* value = CNV_LITTLE_TO_SHORT(value); */
- return value;
- #ifdef __GNUC__
- /* the following serves to avoid the compiler warnings that
- * IN_PORT_WORD() is not used in some files */
- (void)IN_PORT_WORD;
- #endif
-}
-
-#define OUT_PORT_WORD(port, value) \
- { *(volatile hcf_16 *)(port) = CNV_SHORT_TO_LITTLE(value); EIEIO(); }
-
-/***********************************************************************/
-
-#define IN_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- volatile hcf_16 *d = (volatile hcf_16 *)(addr); \
- while (l--) { \
- *d++ = *(volatile hcf_16 *)(port); \
- EIEIO(); \
- } \
- }
-
-#define OUT_PORT_STRING_16( port, addr, len) { \
- unsigned l = (len); \
- volatile hcf_16 *s = (volatile hcf_16 *)(addr); \
- while (l--) { \
- *(volatile hcf_16 *)(port) = *s++; \
- EIEIO(); \
- } \
- }
-
-#elif defined DAYTONABSP
-
-#define HCF_BIG_ENDIAN 1
-
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#ifndef __GNUC__
- #define __asm__ asm
-#endif
-
-#define IN_PORT_WORD(port) (sysOrinocoInWord((unsigned long)(port)))
-#define OUT_PORT_WORD(port,value) (sysOrinocoOutWord((unsigned long)(port), (unsigned short)(value)))
-
-#define IN_PORT_STRING_16(port,addr,len) (sysOrinocoInString((port), (addr), (len)))
-#define OUT_PORT_STRING_16(port,addr,len) (sysOrinocoOutString((port), (addr), (len)))
-
-extern unsigned char sysOrinocoInByte (unsigned long port);
-extern unsigned short sysOrinocoInWord (unsigned long port);
-extern void sysOrinocoInString (unsigned long port, void *addr, unsigned short len);
-
-extern void sysOrinocoOutByte (unsigned long port, unsigned char value);
-extern void sysOrinocoOutWord (unsigned long port, unsigned short value);
-extern void sysOrinocoOutString (unsigned long port, void *addr, unsigned short len);
-
-#elif defined ALPHA_BSP
-
-#define HCF_BIG_ENDIAN 1
-
-#define HCF_IO HCF_IO_MEM // overrule standard Port I/O with Memory mapped I/O
-
-#ifndef __GNUC__
- #define __asm__ asm
-#endif
-
-#define IN_PORT_WORD(port) (sysOrinocoInWord((unsigned long)(port)))
-#define OUT_PORT_WORD(port,value) (sysOrinocoOutWord((unsigned long)(port), (unsigned short)(value)))
-
-#define IN_PORT_STRING_16(port,addr,len) (sysOrinocoInString((port), (addr), (len)))
-#define OUT_PORT_STRING_16(port,addr,len) (sysOrinocoOutString((port), (addr), (len)))
-
-extern unsigned char sysOrinocoInByte (unsigned long port);
-extern unsigned short sysOrinocoInWord (unsigned long port);
-extern void sysOrinocoInString (unsigned long port, void *addr, unsigned short len);
-
-extern void sysOrinocoOutByte (unsigned long port, unsigned char value);
-extern void sysOrinocoOutWord (unsigned long port, unsigned short value);
-extern void sysOrinocoOutString (unsigned long port, void *addr, unsigned short len);
-
-#else
-
-err: /* commented here */ /* "BSP is not defined..." */
-
-#endif /* else PC486BSP */
-
-#endif // __VX_WORKS__
-
-
-
-/************************************************************************************************/
-/****************************** VXWORKS. Motorola Sandpoint PowerPC 824X ***********************/
-/************************************************************************************************/
-#ifdef __VX_WORKS_SANDPOINT_824X__
-
-#include <vxWorks.h>
-#include <sysLib.h>
-#include <taskLib.h>
-
-#ifdef WVLAN_53
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_ENDSTA
-#endif /* WVLAN_53 */
-
-#ifdef WVLAN_54
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_ENDAP
-#endif /* WVLAN_54 */
-
-#ifdef WVLAN_56
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_END
-#endif /* WVLAN_56 */
-
-#if !defined MSF_COMPONENT_ID
-#error "you must define an MSF component ID: WVLAN_53, WVLAN_54, WVLAN_56"
-#endif
-
-#define MSF_COMPONENT_VAR 1
-
-#define HCF_EXT HCF_EXT_INFO_LOG
-#define HCF_SLEEP ( HCF_CDS | HCF_DDS )
-//#define HCF_SLEEP ( HCF_DDS )
-
-#ifndef HCF_ACT_WAKEUP
-#define HCF_ACT_WAKEUP 0x1D
-#endif // HCF_ACT_WAKEUP
-
-#if defined FATNIC | defined BEAGLE_H253
-#define T1__HCF_TYPE HCF_TYPE_STA
-#else
-#define T1__HCF_TYPE HCF_TYPE_AP | HCF_TYPE_STA
-#endif
-
-#ifdef HERMES_USB
-#define T2__HCF_TYPE (T1__HCF_TYPE | HCF_TYPE_USB)
-#else // HERMES_USB
-#define T2__HCF_TYPE (T1__HCF_TYPE)
-#endif // HERMES_USB
-
-#ifdef _WARP
-#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_HII5)
-#else // _WARP
-#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_WPA | HCF_TYPE_HII)
-#endif // WARP
-
-#ifdef _CCX
-#define T4__HCF_TYPE (T3__HCF_TYPE | HCF_TYPE_CCX)
-#else // _WARP
-#define T4__HCF_TYPE (T3__HCF_TYPE)
-#endif // _CCX
-
-#define T5__HCF_TYPE (T4__HCF_TYPE)
-
-// Default to TYPE_AP + SSN!
-#define HCF_TYPE (T5__HCF_TYPE )
-
-
-
-#define MSF_COMPONENT_MAJOR_VER 2
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_IO HCF_IO_MEM
-#define HCF_DMA 0
-#define HCF_MEM_IO 1
-#define HCF_BIG_ENDIAN 1
-
-//#define support_32bits 1
-
-#define IN_PORT_WORD(port) (sysInWord( (hcf_io)(port) ))
-#define OUT_PORT_WORD(port, value) (sysOutWord( (hcf_io)(port), (hcf_16)(value) ))
-#define IN_PORT_DWORD(port) (sysInLong( (hcf_io)(port) ))
-#define OUT_PORT_DWORD(port, value) (sysOutLong( (hcf_io)(port), (hcf_16)(value) ))
-#define IN_PORT_STRING_16(port, dst, n) (sysInWordString((hcf_io)(port), (hcf_16 *)dst, n))
-#define OUT_PORT_STRING_16(port, src, n) (sysOutWordString((hcf_io)(port), (hcf_16 *)src, n))
-
-#ifdef WVLAN_DEBUG
-#define DBG 1
-#define _DEBUG 1
-#endif
-
-/* we'll need to add these prints someday */
-#define OUTPUTDEBUGMSG(dprintf_exp)
-#define ASSERTDEBUGMSG(cond, dprintf_exp)
-
-#define HCF_INTERFACE_CONNECT(ifbp)
-#define HCF_INTERFACE_DISCONNECT(ifbp)
-#define HCF_ENTER_INTERFACE_FUNCT(ibfb)
-#define HCF_LEAVE_INTERFACE_FUNCT(ifbp)
-
-#define CNV_END_INT(w) ( ((hcf_16)(w) & 0x00FF) << 8 | ((hcf_16)(w) & 0xFF00) >> 8 )
-#define CNV_LITTLE_TO_INT(w) CNV_END_INT(w)
-#define CNV_INT_TO_LITTLE(w) CNV_LITTLE_TO_INT(w)
-
-#endif /* __VX_WORKS_SANDPOINT_824X__ */
-
-/************************************************************************************************/
-/************************************* VXWORKS. ARM T8300 IPPhone *****************************/
-/************************************************************************************************/
-#if defined( IPT_T8300 ) || defined( IPT_T8307 )
-
-#include <vxWorks.h>
-#include <sysLib.h>
-#include <taskLib.h>
-
-#define HCF_ALIGN 4 /* default to 4 byte alignment */
-
-#define BEAGLE_H253 /* Hermes 2.5.3 build, better to be in the project file */
-#define OOR_DDS /* Hermes 2.5.3 build, better to be in the project file */
-#define FATNIC
-
-
-#ifdef WVLAN_53
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_ENDSTA
-#endif /* WVLAN_53 */
-
-#ifdef WVLAN_54
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_ENDAP
-#endif /* WVLAN_54 */
-
-#ifdef WVLAN_56
-#define MSF_COMPONENT_ID COMP_ID_VX_WORKS_END
-#endif /* WVLAN_56 */
-
-#if !defined MSF_COMPONENT_ID
-#error "you must define an MSF component ID: WVLAN_53, WVLAN_54, WVLAN_56"
-#endif
-
-#define MSF_COMPONENT_VAR 1
-
-#define HCF_EXT HCF_EXT_INFO_LOG
-//#define HCF_EXT HCF_EXT_INFO_LOG | HCF_EXT_MB
-#define HCF_SLEEP ( HCF_CDS | HCF_DDS )
-//#define HCF_SLEEP ( HCF_DDS )
-
-#ifndef HCF_ACT_WAKEUP
-#define HCF_ACT_WAKEUP 0x1D
-#endif // HCF_ACT_WAKEUP
-
-#if defined FATNIC || defined BEAGLE_H253
-#define T1__HCF_TYPE HCF_TYPE_STA
-#else
-//#define T1__HCF_TYPE HCF_TYPE_AP | HCF_TYPE_STA
-#define T1__HCF_TYPE HCF_TYPE_STA /* dz, Station code only */
-#endif
-
-#ifdef HERMES_USB
-#define T2__HCF_TYPE (T1__HCF_TYPE | HCF_TYPE_USB)
-#else // HERMES_USB
-#define T2__HCF_TYPE (T1__HCF_TYPE)
-#endif // HERMES_USB
-
-#ifdef _WARP
-#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_HII5)
-#else // _WARP
-#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_WPA | HCF_TYPE_HII)
-//#define T3__HCF_TYPE (T2__HCF_TYPE | HCF_TYPE_HII) /* dz. no WPA support at this time, test code */
-#endif // WARP
-
-#ifdef _CCX
-#define T4__HCF_TYPE (T3__HCF_TYPE | HCF_TYPE_CCX)
-#else // _WARP
-#define T4__HCF_TYPE (T3__HCF_TYPE)
-#endif // _CCX
-
-#define T5__HCF_TYPE (T4__HCF_TYPE)
-
-// Default to TYPE_AP + SSN!
-#define HCF_TYPE (T5__HCF_TYPE )
-
-
-#define MSF_COMPONENT_MAJOR_VER 2
-#define MSF_COMPONENT_MINOR_VER 0
-
-#define HCF_IO HCF_IO_MEM
-#define HCF_DMA 0
-#define HCF_MEM_IO 1
-
-
-/* Endian is determined by vxWorks project compile option */
-#if (_BYTE_ORDER == _BIG_ENDIAN)
-#undef HCF_LITTLE_ENDIAN
-#define HCF_BIG_ENDIAN 1
-#endif
-
-
-#define CNV_END(w) ( ((hcf_16)(w) & 0x00FF) << 8 | ((hcf_16)(w) & 0xFF00) >> 8 )
-#if defined HCF_BIG_ENDIAN
-//******************************************** B I G E N D I A N *******************************************
-#define CNV_LITTLE_TO_INT(w) CNV_END(w) // endianess conversion needed
-#define CNV_BIG_TO_INT(w) (w) // no endianess conversion needed
-#else
-//****************************************** L I T T L E E N D I A N ****************************************
-#define CNV_LITTLE_TO_INT(w) (w) // no endianess conversion needed
-#define CNV_BIG_TO_INT(w) CNV_END(w) // endianess conversion needed
-#endif // HCF_BIG_ENDIAN
-
-// conversion macros which can be expressed in other macros
-#define CNV_INT_TO_LITTLE(w) CNV_LITTLE_TO_INT(w)
-#define CNV_INT_TO_BIG(w) CNV_BIG_TO_INT(w)
-
-
-
-#define IN_PORT_WORD( port ) *((volatile hcf_16 *)( port ))
-#define OUT_PORT_WORD( port, value ) *((volatile hcf_16 *)( port )) = ((hcf_16)( value ))
-//#define IN_PORT_BYTE( port ) *((volatile hcf_8 *)( port ))
-
-#define IN_PORT_STRING( port, addr, len) { \
- unsigned l = len; \
- hcf_16 *d = (hcf_16 *)(addr); \
- hcf_16 t; \
- while (l--) { \
- t = IN_PORT_WORD(port); \
- *d++ = CNV_LITTLE_TO_INT(t); \
- } \
-} // IN_PORT_STRING
-
-#define OUT_PORT_STRING( port, addr, len) { \
- unsigned l = (len); \
- hcf_16 *s = (hcf_16 *)(addr); \
- hcf_16 t; \
- while (l--) { \
- t = *s++; \
- t = CNV_LITTLE_TO_INT(t); \
- OUT_PORT_WORD(port, t); \
- } \
-} // OUT_PORT_STRING
-
-#define IN_PORT_STRING_16(port, dst, n) { \
- unsigned l = (n); \
- hcf_16 *d = (hcf_16 *)(dst); \
- while (l--) { \
- *d++ = IN_PORT_WORD(port); \
- } \
-} // IN_PORT_STRING_16
-
-#define OUT_PORT_STRING_16(port, src, n) { \
- hcf_16 t; \
- int l = (n); \
- hcf_16 *s = (hcf_16 *)(src); \
- while (l--) { \
- t = *s++; \
- OUT_PORT_WORD(port, t); \
- } \
-} // OUT_PORT_STRING_16
-
-/* #define HCF_YIELD (taskDelay(0) == 0) */
-
-
-
-#ifdef WVLAN_DEBUG
-#define DBG 1
-#define _DEBUG 1
-#endif
-
-/* we'll need to add these prints someday */
-#define OUTPUTDEBUGMSG(dprintf_exp)
-#define ASSERTDEBUGMSG(cond, dprintf_exp)
-
-#define HCF_INTERFACE_CONNECT(ifbp)
-#define HCF_INTERFACE_DISCONNECT(ifbp)
-#define HCF_ENTER_INTERFACE_FUNCT(ibfb)
-#define HCF_LEAVE_INTERFACE_FUNCT(ifbp)
-
-#define sysInWord(offsetAddr) IN_PORT_WORD(offsetAddr)
-#define sysInByte(offsetAddr) IN_PORT_BYTE(offsetAddr)
-#define sysOutWord(addr, value) OUT_PORT_WORD(addr, value)
-
-#endif /*IPT_T8300 */
-
/************************************************************************************************************/
/*********************************** **************************************/
/************************************************************************************************************/
@@ -2003,10 +478,6 @@ err: /* commented here */ /* "BSP is not defined..." */
#define HCF_BIG_ENDIAN 0
#endif // HCF_BIG_ENDIAN
-#if ! defined HCF_DL_ONLY
-#define HCF_DL_ONLY 0
-#endif // HCF_DL_ONLY
-
#if ! defined HCF_DMA
#define HCF_DMA 0
#endif // HCF_DMA
@@ -2015,14 +486,6 @@ err: /* commented here */ /* "BSP is not defined..." */
#define HCF_ENCAP HCF_ENC
#endif // HCF_ENCAP
-#if ! defined HCF_ENTRY
-#define HCF_ENTRY( ifbp )
-#endif // HCF_ENTRY
-
-#if ! defined HCF_EXIT
-#define HCF_EXIT( ifbp )
-#endif // HCF_EXIT
-
#if ! defined HCF_EXT
#define HCF_EXT 0
#endif // HCF_EXT
@@ -2064,11 +527,6 @@ err: /* commented here */ /* "BSP is not defined..." */
#define HCF_BIG_ENDIAN 1 //just for convenience of generating cfg_hcf_opt
#endif // HCF_BIG_ENDIAN
-#if HCF_DL_ONLY
-#undef HCF_DL_ONLY
-#define HCF_DL_ONLY 1 //just for convenience of generating cfg_hcf_opt
-#endif // HCF_DL_ONLY
-
#if HCF_DMA
#undef HCF_DMA
#define HCF_DMA 1 //just for convenience of generating cfg_hcf_opt
@@ -2244,21 +702,9 @@ err: primary variants 1 and 2 correspond with H-I only;
#define TEXT(x) x
#endif // TEXT
-#if !defined _TCHAR_DEFINED
-#define TCHAR char
-#endif // _TCHAR_DEFINED
-
/************************************************************************************************************/
/*********************** C O N F L I C T D E T E C T I O N & R E S O L U T I O N ************************/
/************************************************************************************************************/
-#if defined HCF_LITTLE_ENDIAN
-err: HCF_LITTLE_ENDIAN is obsolete;
-#endif // HCF_LITTLE_ENDIAN
-
-#if defined HCF_INT_OFF
-err: HCF_INT_OFF is obsolete;
-#endif //HCF_INT_OFF
-
#if HCF_ALIGN != 1 && HCF_ALIGN != 2 && HCF_ALIGN != 4 && HCF_ALIGN != 8
err: invalid value for HCF_ALIGN;
#endif // HCF_ALIGN
@@ -2276,10 +722,6 @@ err: these macros are not used consistently;
err: invalid value for HCF_BIG_ENDIAN;
#endif // HCF_BIG_ENDIAN
-#if HCF_DL_ONLY != 0 && HCF_DL_ONLY != 1
-err: invalid value for HCF_DL_ONLY;
-#endif // HCF_DL_ONLY
-
#if HCF_DMA != 0 && HCF_DMA != 1
err: invalid value for HCF_DMA;
#endif // HCF_DMA
diff --git a/drivers/staging/wlags49_h2/hcfdef.h b/drivers/staging/wlags49_h2/hcfdef.h
index cb1966d8473..a62b53a2289 100644
--- a/drivers/staging/wlags49_h2/hcfdef.h
+++ b/drivers/staging/wlags49_h2/hcfdef.h
@@ -1,286 +1,249 @@
-
-// vim:tw=110:ts=4:
#ifndef HCFDEFC_H
#define HCFDEFC_H 1
/*************************************************************************************************
-*
-* FILE : HCFDEF.H
-*
-* DATE : $Date: 2004/08/05 11:47:10 $ $Revision: 1.8 $
-* Original: 2004/05/28 14:05:35 Revision: 1.59 Tag: hcf7_t20040602_01
-* Original: 2004/05/13 15:31:45 Revision: 1.53 Tag: hcf7_t7_20040513_01
-* Original: 2004/04/15 09:24:42 Revision: 1.44 Tag: hcf7_t7_20040415_01
-* Original: 2004/04/13 14:22:45 Revision: 1.43 Tag: t7_20040413_01
-* Original: 2004/04/01 15:32:55 Revision: 1.40 Tag: t7_20040401_01
-* Original: 2004/03/10 15:39:28 Revision: 1.36 Tag: t20040310_01
-* Original: 2004/03/03 14:10:12 Revision: 1.34 Tag: t20040304_01
-* Original: 2004/03/02 09:27:12 Revision: 1.32 Tag: t20040302_03
-* Original: 2004/02/24 13:00:29 Revision: 1.29 Tag: t20040224_01
-* Original: 2004/02/18 17:13:57 Revision: 1.26 Tag: t20040219_01
-*
-* AUTHOR : Nico Valster
-*
-* SPECIFICATION: ...........
-*
-* DESC : Definitions and Prototypes for HCF only
-*
-**************************************************************************************************
-*
-*
-* SOFTWARE LICENSE
-*
-* This software is provided subject to the following terms and conditions,
-* which you should read carefully before using the software. Using this
-* software indicates your acceptance of these terms and conditions. If you do
-* not agree with these terms and conditions, do not use the software.
-*
-* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
-* All rights reserved.
-*
-* Redistribution and use in source or binary forms, with or without
-* modifications, are permitted provided that the following conditions are met:
-*
-* . Redistributions of source code must retain the above copyright notice, this
-* list of conditions and the following Disclaimer as comments in the code as
-* well as in the documentation and/or other materials provided with the
-* distribution.
-*
-* . Redistributions in binary form must reproduce the above copyright notice,
-* this list of conditions and the following Disclaimer in the documentation
-* and/or other materials provided with the distribution.
-*
-* . Neither the name of Agere Systems Inc. nor the names of the contributors
-* may be used to endorse or promote products derived from this software
-* without specific prior written permission.
-*
-* Disclaimer
-*
-* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
-* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
-* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
-* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
-* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
-* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-* DAMAGE.
-*
-*
-*************************************************************************************************/
+ *
+ * FILE : HCFDEF.H
+ *
+ * DATE : $Date: 2004/08/05 11:47:10 $ $Revision: 1.8 $
+ * Original: 2004/05/28 14:05:35 Revision: 1.59 Tag: hcf7_t20040602_01
+ * Original: 2004/05/13 15:31:45 Revision: 1.53 Tag: hcf7_t7_20040513_01
+ * Original: 2004/04/15 09:24:42 Revision: 1.44 Tag: hcf7_t7_20040415_01
+ * Original: 2004/04/13 14:22:45 Revision: 1.43 Tag: t7_20040413_01
+ * Original: 2004/04/01 15:32:55 Revision: 1.40 Tag: t7_20040401_01
+ * Original: 2004/03/10 15:39:28 Revision: 1.36 Tag: t20040310_01
+ * Original: 2004/03/03 14:10:12 Revision: 1.34 Tag: t20040304_01
+ * Original: 2004/03/02 09:27:12 Revision: 1.32 Tag: t20040302_03
+ * Original: 2004/02/24 13:00:29 Revision: 1.29 Tag: t20040224_01
+ * Original: 2004/02/18 17:13:57 Revision: 1.26 Tag: t20040219_01
+ *
+ * AUTHOR : Nico Valster
+ *
+ * SPECIFICATION: ...........
+ *
+ * DESC : Definitions and Prototypes for HCF only
+ *
+ **************************************************************************************************
+ *
+ *
+ * SOFTWARE LICENSE
+ *
+ * This software is provided subject to the following terms and conditions,
+ * which you should read carefully before using the software. Using this
+ * software indicates your acceptance of these terms and conditions. If you do
+ * not agree with these terms and conditions, do not use the software.
+ *
+ * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * All rights reserved.
+ *
+ * Redistribution and use in source or binary forms, with or without
+ * modifications, are permitted provided that the following conditions are met:
+ *
+ * . Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following Disclaimer as comments in the code as
+ * well as in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * . Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following Disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * . Neither the name of Agere Systems Inc. nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Disclaimer
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
+ * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
+ * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ *
+ *************************************************************************************************/
/************************************************************************************************/
/********************************* P R E F I X E S ********************************************/
/************************************************************************************************/
-//IFB_ Interface Block
-//HCMD_ Hermes Command
-//HFS_ Hermes (Transmit/Receive) Frame Structure
-//HREG_ Hermes Register
+//IFB_ Interface Block
+//HCMD_ Hermes Command
+//HFS_ Hermes (Transmit/Receive) Frame Structure
+//HREG_ Hermes Register
/*************************************************************************************************/
-#if 0 //
-#define BIT0 0x0001
-#define BIT1 0x0002
-#define BIT2 0x0004
-#define BIT3 0x0008
-#define BIT4 0x0010
-#define BIT5 0x0020
-#define BIT6 0x0040
-#define BIT7 0x0080
-#define BIT8 0x0100
-#define BIT9 0x0200
-#define BIT10 0x0400
-#define BIT11 0x0800
-#define BIT12 0x1000
-#define BIT13 0x2000
-#define BIT14 0x4000
-#define BIT15 0x8000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-#endif // 0
-
/************************************************************************************************/
/********************************* GENERAL EQUATES **********************************************/
/************************************************************************************************/
-#define HCF_MAGIC 0x7D37 // "}7" Handle validation
+#define HCF_MAGIC 0x7D37 // "}7" Handle validation
-#define PLUG_DATA_OFFSET 0x00000800 //needed by some test tool on top of H-II NDIS driver
+#define PLUG_DATA_OFFSET 0x00000800 //needed by some test tool on top of H-II NDIS driver
-#define INI_TICK_INI 0x00040000L
+#define INI_TICK_INI 0x00040000L
-#define IO_IN 0 //hcfio_in_string
-#define IO_OUT 1 //hcfio_out_string
+#define IO_IN 0 //hcfio_in_string
+#define IO_OUT 1 //hcfio_out_string
//DO_ASSERT, create an artificial FALSE to force an ASSERT without the nasty compiler warning
-#define DO_ASSERT ( assert_ifbp->IFB_Magic != HCF_MAGIC && assert_ifbp->IFB_Magic == HCF_MAGIC )
-#define NT_ASSERT 0x0000 //, NEVER_TESTED
-#define NEVER_TESTED MERGE_2( 0xEFFE, 0xFEEF )
-#define SE_ASSERT 0x5EFF /* Side Effect, HCFASSERT invokation which are only called for the
- * side effect and which should never trigger */
-#define DHF_FILE_NAME_OFFSET 10000 //to distinguish DHF from HCF asserts by means of line number
-#define MMD_FILE_NAME_OFFSET 20000 //to distinguish MMD from HCF asserts by means of line number
+#define DO_ASSERT ( assert_ifbp->IFB_Magic != HCF_MAGIC && assert_ifbp->IFB_Magic == HCF_MAGIC )
+#define NT_ASSERT 0x0000 //, NEVER_TESTED
+#define NEVER_TESTED MERGE_2( 0xEFFE, 0xFEEF )
+#define SE_ASSERT 0x5EFF /* Side Effect, HCFASSERT invokation which are only called for the
+ * side effect and which should never trigger */
+#define DHF_FILE_NAME_OFFSET 10000 //to distinguish DHF from HCF asserts by means of line number
+#define MMD_FILE_NAME_OFFSET 20000 //to distinguish MMD from HCF asserts by means of line number
// trace codes used to
// 1: profile execution times via HCF_TRACE and HCF_TRACE_VALUE
// 2: hierarchical flow information via HCFLOGENTRY / HCFLOGEXIT
-//#define HCF_TRACE_CONNECT useless
-//#define HCF_TRACE_DISCONNECT useless
-#define HCF_TRACE_ACTION 0x0000 // 0x0001
-#define HCF_TRACE_CNTL 0x0001 // 0x0002
-#define HCF_TRACE_DMA_RX_GET 0x0002 // 0x0004
-#define HCF_TRACE_DMA_RX_PUT 0x0003 // 0x0008
-#define HCF_TRACE_DMA_TX_GET 0x0004 // 0x0010
-#define HCF_TRACE_DMA_TX_PUT 0x0005 // 0x0020
-#define HCF_TRACE_GET_INFO 0x0006 // 0x0040
-#define HCF_TRACE_PUT_INFO 0x0007 // 0x0080
-#define HCF_TRACE_RCV_MSG 0x0008 // 0x0100
-#define HCF_TRACE_SEND_MSG 0x0009 // 0x0200
-#define HCF_TRACE_SERVICE_NIC 0x000A // 0x0400
+//#define HCF_TRACE_CONNECT useless
+//#define HCF_TRACE_DISCONNECT useless
+#define HCF_TRACE_ACTION 0x0000 // 0x0001
+#define HCF_TRACE_CNTL 0x0001 // 0x0002
+#define HCF_TRACE_DMA_RX_GET 0x0002 // 0x0004
+#define HCF_TRACE_DMA_RX_PUT 0x0003 // 0x0008
+#define HCF_TRACE_DMA_TX_GET 0x0004 // 0x0010
+#define HCF_TRACE_DMA_TX_PUT 0x0005 // 0x0020
+#define HCF_TRACE_GET_INFO 0x0006 // 0x0040
+#define HCF_TRACE_PUT_INFO 0x0007 // 0x0080
+#define HCF_TRACE_RCV_MSG 0x0008 // 0x0100
+#define HCF_TRACE_SEND_MSG 0x0009 // 0x0200
+#define HCF_TRACE_SERVICE_NIC 0x000A // 0x0400
// #define HCF_TRACE_ 0x000C // 0x1000
// #define HCF_TRACE_ 0x000D // 0x2000
// #define HCF_TRACE_ 0x000E // 0x4000
// #define HCF_TRACE_ 0x000F // 0x8000
-// ============================================ HCF_TRACE_... codes below 0x0010 are asserted on re-entry
-#define HCF_TRACE_ACTION_KLUDGE 0x0010 /* once you start introducing kludges there is no end to it
- * this is an escape to do not assert on re-entrancy problem caused
- * by HCF_ACT_INT_FORCE_ON used to get Microsofts NDIS drivers going
- */
-#define HCF_TRACE_STRIO 0x0020
-#define HCF_TRACE_ALLOC 0X0021
-#define HCF_TRACE_DL 0X0023
-#define HCF_TRACE_ISR_INFO 0X0024
-#define HCF_TRACE_CALIBRATE 0x0026
-
-#define HCF_TRACE_CMD_CPL 0x0040
-#define HCF_TRACE_CMD_EXE 0x0041
-#define HCF_TRACE_GET_FID 0x0042
-#define HCF_TRACE_GET_FRAG 0x0043
-#define HCF_TRACE_INIT 0x0044
-#define HCF_TRACE_PUT_FRAG 0x0045
-#define HCF_TRACE_SETUP_BAP 0x0046
-
-#define HCF_TRACE_EXIT 0x8000 // Keil C warns "long constant truncated to int"
-
-//#define BAP_0 HREG_DATA_0 //Used by DMA controller to access NIC RAM
-#define BAP_1 HREG_DATA_1 //Used by HCF to access NIC RAM
+// ============================================ HCF_TRACE_... codes below 0x0010 are asserted on re-entry
+#define HCF_TRACE_ACTION_KLUDGE 0x0010 /* once you start introducing kludges there is no end to it
+ * this is an escape to do not assert on re-entrancy problem caused
+ * by HCF_ACT_INT_FORCE_ON used to get Microsofts NDIS drivers going
+ */
+#define HCF_TRACE_STRIO 0x0020
+#define HCF_TRACE_ALLOC 0X0021
+#define HCF_TRACE_DL 0X0023
+#define HCF_TRACE_ISR_INFO 0X0024
+#define HCF_TRACE_CALIBRATE 0x0026
+
+#define HCF_TRACE_CMD_CPL 0x0040
+#define HCF_TRACE_CMD_EXE 0x0041
+#define HCF_TRACE_GET_FID 0x0042
+#define HCF_TRACE_GET_FRAG 0x0043
+#define HCF_TRACE_INIT 0x0044
+#define HCF_TRACE_PUT_FRAG 0x0045
+#define HCF_TRACE_SETUP_BAP 0x0046
+
+#define HCF_TRACE_EXIT 0x8000 // Keil C warns "long constant truncated to int"
+
+//#define BAP_0 HREG_DATA_0 //Used by DMA controller to access NIC RAM
+#define BAP_1 HREG_DATA_1 //Used by HCF to access NIC RAM
//************************* Hermes Receive/Transmit Frame Structures
//HFS_STAT
//see MMD.H for HFS_STAT_ERR
-#define HFS_STAT_MSG_TYPE 0xE000 //Hermes reported Message Type
-#define HFS_STAT_MIC_KEY_ID 0x1800 //MIC key used (if any)
-#define HFS_STAT_1042 0x2000 //RFC1042 Encoded
-#define HFS_STAT_TUNNEL 0x4000 //Bridge-Tunnel Encoded
-#define HFS_STAT_WMP_MSG 0x6000 //WaveLAN-II Management Protocol Frame
+#define HFS_STAT_MSG_TYPE 0xE000 //Hermes reported Message Type
+#define HFS_STAT_MIC_KEY_ID 0x1800 //MIC key used (if any)
+#define HFS_STAT_1042 0x2000 //RFC1042 Encoded
+#define HFS_STAT_TUNNEL 0x4000 //Bridge-Tunnel Encoded
+#define HFS_STAT_WMP_MSG 0x6000 //WaveLAN-II Management Protocol Frame
#if (HCF_TYPE) & HCF_TYPE_WPA
-#define HFS_STAT_MIC 0x0010 //Frame contains MIC //;? re-instate when F/W ready
+#define HFS_STAT_MIC 0x0010 //Frame contains MIC //;? re-instate when F/W ready
#endif
//************************* Hermes Register Offsets and Command bits
-#define HREG_IO_RANGE 0x80 //I/O Range used by Hermes
+#define HREG_IO_RANGE 0x80 //I/O Range used by Hermes
//************************* Command/Status
-#define HREG_CMD 0x00 //
-#define HCMD_CMD_CODE 0x3F
-#define HREG_PARAM_0 0x02 //
-#define HREG_PARAM_1 0x04 //
-#define HREG_PARAM_2 0x06 //
-#define HREG_STAT 0x08 //
-#define HREG_STAT_CMD_CODE 0x003F //
-#define HREG_STAT_DIAG_ERR 0x0100
-#define HREG_STAT_INQUIRE_ERR 0x0500
-#define HREG_STAT_CMD_RESULT 0x7F00 //
-#define HREG_RESP_0 0x0A //
-#define HREG_RESP_1 0x0C //
-#define HREG_RESP_2 0x0E //
+#define HREG_CMD 0x00 //
+#define HCMD_CMD_CODE 0x3F
+#define HREG_PARAM_0 0x02 //
+#define HREG_PARAM_1 0x04 //
+#define HREG_PARAM_2 0x06 //
+#define HREG_STAT 0x08 //
+#define HREG_STAT_CMD_CODE 0x003F //
+#define HREG_STAT_DIAG_ERR 0x0100
+#define HREG_STAT_INQUIRE_ERR 0x0500
+#define HREG_STAT_CMD_RESULT 0x7F00 //
+#define HREG_RESP_0 0x0A //
+#define HREG_RESP_1 0x0C //
+#define HREG_RESP_2 0x0E //
//************************* FID Management
-#define HREG_INFO_FID 0x10 //
-#define HREG_RX_FID 0x20 //
-#define HREG_ALLOC_FID 0x22 //
-#define HREG_TX_COMPL_FID 0x24 //
+#define HREG_INFO_FID 0x10 //
+#define HREG_RX_FID 0x20 //
+#define HREG_ALLOC_FID 0x22 //
+#define HREG_TX_COMPL_FID 0x24 //
//************************* BAP
//20031030 HWi Inserted this again because the dongle code uses this (GPIF.C)
-//#define HREG_SELECT_0 0x18 //
-//#define HREG_OFFSET_0 0x1C //
-//#define HREG_DATA_0 0x36 //
+//#define HREG_SELECT_0 0x18 //
+//#define HREG_OFFSET_0 0x1C //
+//#define HREG_DATA_0 0x36 //
-//#define HREG_OFFSET_BUSY 0x8000 // use HCMD_BUSY
-#define HREG_OFFSET_ERR 0x4000 //
-//rsrvd #define HREG_OFFSET_DATA_OFFSET 0x0FFF //
+//#define HREG_OFFSET_BUSY 0x8000 // use HCMD_BUSY
+#define HREG_OFFSET_ERR 0x4000 //
+//rsrvd #define HREG_OFFSET_DATA_OFFSET 0x0FFF //
-#define HREG_SELECT_1 0x1A //
-#define HREG_OFFSET_1 0x1E //
-#define HREG_DATA_1 0x38 //
+#define HREG_SELECT_1 0x1A //
+#define HREG_OFFSET_1 0x1E //
+#define HREG_DATA_1 0x38 //
//************************* Event
-#define HREG_EV_STAT 0x30 //
-#define HREG_INT_EN 0x32 //
-#define HREG_EV_ACK 0x34 //
+#define HREG_EV_STAT 0x30 //
+#define HREG_INT_EN 0x32 //
+#define HREG_EV_ACK 0x34 //
-#define HREG_EV_TICK 0x8000 //Auxiliary Timer Tick
-//#define HREG_EV_RES 0x4000 //H-I only: H/W error (Wait Time-out)
-#define HREG_EV_INFO_DROP 0x2000 //WMAC did not have sufficient RAM to build Unsollicited Frame
+#define HREG_EV_TICK 0x8000 //Auxiliary Timer Tick
+//#define HREG_EV_RES 0x4000 //H-I only: H/W error (Wait Time-out)
+#define HREG_EV_INFO_DROP 0x2000 //WMAC did not have sufficient RAM to build Unsollicited Frame
#if (HCF_TYPE) & HCF_TYPE_HII5
-#define HREG_EV_ACK_REG_READY 0x0000
+#define HREG_EV_ACK_REG_READY 0x0000
#else
-#define HREG_EV_ACK_REG_READY 0x1000 //Workaround Kludge bit for H-II (not H-II.5)
+#define HREG_EV_ACK_REG_READY 0x1000 //Workaround Kludge bit for H-II (not H-II.5)
#endif // HCF_TYPE_HII5
#if (HCF_SLEEP) & ( HCF_CDS | HCF_DDS )
-#define HREG_EV_SLEEP_REQ 0x0800
+#define HREG_EV_SLEEP_REQ 0x0800
#else
-#define HREG_EV_SLEEP_REQ 0x0000
+#define HREG_EV_SLEEP_REQ 0x0000
#endif // HCF_CDS / HCF_DDS
#if HCF_DMA
-//#define HREG_EV_LPESC 0x0400 // firmware sets this bit and clears it, not for host usage.
-#define HREG_EV_RDMAD 0x0200 // rx frame in host memory
-#define HREG_EV_TDMAD 0x0100 // tx frame in host memory processed
-//#define HREG_EV_RXDMA 0x0040 // firmware kicks off DMA engine (bit is not for host usage)
-//#define HREG_EV_TXDMA 0x0020 // firmware kicks off DMA engine (bit is not for host usage)
-#define HREG_EV_FW_DMA 0x0460 // firmware / DMA engine I/F (bits are not for host usage)
+//#define HREG_EV_LPESC 0x0400 // firmware sets this bit and clears it, not for host usage.
+#define HREG_EV_RDMAD 0x0200 // rx frame in host memory
+#define HREG_EV_TDMAD 0x0100 // tx frame in host memory processed
+//#define HREG_EV_RXDMA 0x0040 // firmware kicks off DMA engine (bit is not for host usage)
+//#define HREG_EV_TXDMA 0x0020 // firmware kicks off DMA engine (bit is not for host usage)
+#define HREG_EV_FW_DMA 0x0460 // firmware / DMA engine I/F (bits are not for host usage)
#else
-#define HREG_EV_FW_DMA 0x0000
+#define HREG_EV_FW_DMA 0x0000
#endif // HCF_DMA
-#define HREG_EV_INFO 0x0080 // Asynchronous Information Frame
-#define HREG_EV_CMD 0x0010 // Command completed, Status and Response available
-#define HREG_EV_ALLOC 0x0008 // Asynchronous part of Allocation/Reclaim completed
-#define HREG_EV_TX_EXC 0x0004 // Asynchronous Transmission unsuccessful completed
-#define HREG_EV_TX 0x0002 // Asynchronous Transmission successful completed
-#define HREG_EV_RX 0x0001 // Asynchronous Receive Frame
-
-#define HREG_EV_TX_EXT ( (HCF_EXT) & (HCF_EXT_INT_TX_EX | HCF_EXT_INT_TICK ) )
- /* HREG_EV_TX_EXT := 0x0000 or HREG_EV_TX_EXC and/or HREG_EV_TICK
- * could be extended with HREG_EV_TX */
+#define HREG_EV_INFO 0x0080 // Asynchronous Information Frame
+#define HREG_EV_CMD 0x0010 // Command completed, Status and Response available
+#define HREG_EV_ALLOC 0x0008 // Asynchronous part of Allocation/Reclaim completed
+#define HREG_EV_TX_EXC 0x0004 // Asynchronous Transmission unsuccessful completed
+#define HREG_EV_TX 0x0002 // Asynchronous Transmission successful completed
+#define HREG_EV_RX 0x0001 // Asynchronous Receive Frame
+
+#define HREG_EV_TX_EXT ( (HCF_EXT) & (HCF_EXT_INT_TX_EX | HCF_EXT_INT_TICK ) )
+/* HREG_EV_TX_EXT := 0x0000 or HREG_EV_TX_EXC and/or HREG_EV_TICK
+ * could be extended with HREG_EV_TX */
#if HCF_EXT_INT_TX_EX != HREG_EV_TX_EXC
err: these values should match;
#endif // HCF_EXT_INT_TX_EX / HREG_EV_TX_EXC
@@ -290,97 +253,97 @@ err: these values should match;
#endif // HCF_EXT_INT_TICK / HREG_EV_TICK
//************************* Host Software
-#define HREG_SW_0 0x28 //
-#define HREG_SW_1 0x2A //
-#define HREG_SW_2 0x2C //
-//rsrvd #define HREG_SW_3 0x2E //
+#define HREG_SW_0 0x28 //
+#define HREG_SW_1 0x2A //
+#define HREG_SW_2 0x2C //
+//rsrvd #define HREG_SW_3 0x2E //
//************************* Control and Auxiliary Port
-#define HREG_IO 0x12
-#define HREG_IO_SRESET 0x0001
-#define HREG_IO_WAKEUP_ASYNC 0x0002
-#define HREG_IO_WOKEN_UP 0x0004
-#define HREG_CNTL 0x14 //
-//#define HREG_CNTL_WAKEUP_SYNC 0x0001
-#define HREG_CNTL_AUX_ENA_STAT 0xC000
-#define HREG_CNTL_AUX_DIS_STAT 0x0000
-#define HREG_CNTL_AUX_ENA_CNTL 0x8000
-#define HREG_CNTL_AUX_DIS_CNTL 0x4000
-#define HREG_CNTL_AUX_DSD 0x2000
-#define HREG_CNTL_AUX_ENA (HREG_CNTL_AUX_ENA_CNTL | HREG_CNTL_AUX_DIS_CNTL )
-#define HREG_SPARE 0x16 //
-#define HREG_AUX_PAGE 0x3A //
-#define HREG_AUX_OFFSET 0x3C //
-#define HREG_AUX_DATA 0x3E //
+#define HREG_IO 0x12
+#define HREG_IO_SRESET 0x0001
+#define HREG_IO_WAKEUP_ASYNC 0x0002
+#define HREG_IO_WOKEN_UP 0x0004
+#define HREG_CNTL 0x14 //
+//#define HREG_CNTL_WAKEUP_SYNC 0x0001
+#define HREG_CNTL_AUX_ENA_STAT 0xC000
+#define HREG_CNTL_AUX_DIS_STAT 0x0000
+#define HREG_CNTL_AUX_ENA_CNTL 0x8000
+#define HREG_CNTL_AUX_DIS_CNTL 0x4000
+#define HREG_CNTL_AUX_DSD 0x2000
+#define HREG_CNTL_AUX_ENA (HREG_CNTL_AUX_ENA_CNTL | HREG_CNTL_AUX_DIS_CNTL )
+#define HREG_SPARE 0x16 //
+#define HREG_AUX_PAGE 0x3A //
+#define HREG_AUX_OFFSET 0x3C //
+#define HREG_AUX_DATA 0x3E //
#if HCF_DMA
//************************* DMA (bus mastering)
- // Be careful to use these registers only at a genuine 32 bits NIC
- // On 16 bits NICs, these addresses are mapped into the range 0x00 through 0x3F with all consequences
- // thereof, e.g. HREG_DMA_CTRL register maps to HREG_CMD.
-#define HREG_DMA_CTRL 0x0040
-#define HREG_TXDMA_PTR32 0x0044
-#define HREG_TXDMA_PRIO_PTR32 0x0048
-#define HREG_TXDMA_HIPRIO_PTR32 0x004C
-#define HREG_RXDMA_PTR32 0x0050
-#define HREG_CARDDETECT_1 0x007C // contains 7D37
-#define HREG_CARDDETECT_2 0x007E // contains 7DE7
-#define HREG_FREETIMER 0x0058
-#define HREG_DMA_RX_CNT 0x0026
+// Be careful to use these registers only at a genuine 32 bits NIC
+// On 16 bits NICs, these addresses are mapped into the range 0x00 through 0x3F with all consequences
+// thereof, e.g. HREG_DMA_CTRL register maps to HREG_CMD.
+#define HREG_DMA_CTRL 0x0040
+#define HREG_TXDMA_PTR32 0x0044
+#define HREG_TXDMA_PRIO_PTR32 0x0048
+#define HREG_TXDMA_HIPRIO_PTR32 0x004C
+#define HREG_RXDMA_PTR32 0x0050
+#define HREG_CARDDETECT_1 0x007C // contains 7D37
+#define HREG_CARDDETECT_2 0x007E // contains 7DE7
+#define HREG_FREETIMER 0x0058
+#define HREG_DMA_RX_CNT 0x0026
/******************************************************************************
-* Defines for the bits in the DmaControl register (@40h)
-******************************************************************************/
-#define HREG_DMA_CTRL_RXHWEN 0x80000000 // high word enable bit
-#define HREG_DMA_CTRL_RXRESET 0x40000000 // tx dma init bit
-#define HREG_DMA_CTRL_RXBAP1 BIT29
-#define HREG_DMA_CTRL_RX_STALLED BIT28
-#define HREG_DMA_CTRL_RXAUTOACK_DMADONE BIT27 // no host involvement req. for TDMADONE event
-#define HREG_DMA_CTRL_RXAUTOACK_INFO BIT26 // no host involvement req. for alloc event
-#define HREG_DMA_CTRL_RXAUTOACK_DMAEN 0x02000000 // no host involvement req. for TxDMAen event
-#define HREG_DMA_CTRL_RXAUTOACK_RX 0x01000000 // no host involvement req. for tx event
-#define HREG_DMA_CTRL_RX_BUSY BIT23 // read only bit
-//#define HREG_DMA_CTRL_RX_RBUFCONT_PLAIN 0 // bits 21..20
-//#define HREG_DMA_CTRL_RX_MODE_PLAIN_DMA 0 // mode 0
-#define HREG_DMA_CTRL_RX_MODE_SINGLE_PACKET 0x00010000 // mode 1
-#define HREG_DMA_CTRL_RX_MODE_MULTI_PACKET 0x00020000 // mode 2
-//#define HREG_DMA_CTRL_RX_MODE_DISABLE (0x00020000|0x00010000) // disable tx dma engine
-#define HREG_DMA_CTRL_TXHWEN 0x8000 // low word enable bit
-#define HREG_DMA_CTRL_TXRESET 0x4000 // rx dma init bit
-#define HREG_DMA_CTRL_TXBAP1 BIT13
-#define HREG_DMA_CTRL_TXAUTOACK_DMADONE BIT11 // no host involvement req. for RxDMADONE event
-#define HREG_DMA_CTRL_TXAUTOACK_DMAEN 0x00000400 // no host involvement req. for RxDMAen event
-#define HREG_DMA_CTRL_TXAUTOACK_DMAALLOC 0x00000200 // no host involvement req. for info event
-#define HREG_DMA_CTRL_TXAUTOACK_TX 0x00000100 // no host involvement req. for rx event
-#define HREG_DMA_CTRL_TX_BUSY BIT7 // read only bit
-//#define HREG_DMA_CTRL_TX_TBUFCONT_PLAIN 0 // bits 6..5
-//#define HREG_DMA_CTRL_TX_MODE_PLAIN_DMA 0 // mode 0
-#define HREG_DMA_CTRL_TX_MODE_SINGLE_PACKET BIT0 // mode 1
-#define HREG_DMA_CTRL_TX_MODE_MULTI_PACKET 0x00000002 // mode 2
-//#define HREG_DMA_CTRL_TX_MODE_DISABLE (0x00000001|0x00000002) // disable tx dma engine
+ * Defines for the bits in the DmaControl register (@40h)
+ ******************************************************************************/
+#define HREG_DMA_CTRL_RXHWEN 0x80000000 // high word enable bit
+#define HREG_DMA_CTRL_RXRESET 0x40000000 // tx dma init bit
+#define HREG_DMA_CTRL_RXBAP1 BIT29
+#define HREG_DMA_CTRL_RX_STALLED BIT28
+#define HREG_DMA_CTRL_RXAUTOACK_DMADONE BIT27 // no host involvement req. for TDMADONE event
+#define HREG_DMA_CTRL_RXAUTOACK_INFO BIT26 // no host involvement req. for alloc event
+#define HREG_DMA_CTRL_RXAUTOACK_DMAEN 0x02000000 // no host involvement req. for TxDMAen event
+#define HREG_DMA_CTRL_RXAUTOACK_RX 0x01000000 // no host involvement req. for tx event
+#define HREG_DMA_CTRL_RX_BUSY BIT23 // read only bit
+//#define HREG_DMA_CTRL_RX_RBUFCONT_PLAIN 0 // bits 21..20
+//#define HREG_DMA_CTRL_RX_MODE_PLAIN_DMA 0 // mode 0
+#define HREG_DMA_CTRL_RX_MODE_SINGLE_PACKET 0x00010000 // mode 1
+#define HREG_DMA_CTRL_RX_MODE_MULTI_PACKET 0x00020000 // mode 2
+//#define HREG_DMA_CTRL_RX_MODE_DISABLE (0x00020000|0x00010000) // disable tx dma engine
+#define HREG_DMA_CTRL_TXHWEN 0x8000 // low word enable bit
+#define HREG_DMA_CTRL_TXRESET 0x4000 // rx dma init bit
+#define HREG_DMA_CTRL_TXBAP1 BIT13
+#define HREG_DMA_CTRL_TXAUTOACK_DMADONE BIT11 // no host involvement req. for RxDMADONE event
+#define HREG_DMA_CTRL_TXAUTOACK_DMAEN 0x00000400 // no host involvement req. for RxDMAen event
+#define HREG_DMA_CTRL_TXAUTOACK_DMAALLOC 0x00000200 // no host involvement req. for info event
+#define HREG_DMA_CTRL_TXAUTOACK_TX 0x00000100 // no host involvement req. for rx event
+#define HREG_DMA_CTRL_TX_BUSY BIT7 // read only bit
+//#define HREG_DMA_CTRL_TX_TBUFCONT_PLAIN 0 // bits 6..5
+//#define HREG_DMA_CTRL_TX_MODE_PLAIN_DMA 0 // mode 0
+#define HREG_DMA_CTRL_TX_MODE_SINGLE_PACKET BIT0 // mode 1
+#define HREG_DMA_CTRL_TX_MODE_MULTI_PACKET 0x00000002 // mode 2
+//#define HREG_DMA_CTRL_TX_MODE_DISABLE (0x00000001|0x00000002) // disable tx dma engine
//configuration DWORD to configure DMA for mode2 operation, using BAP0 as the DMA BAP.
#define DMA_CTRLSTAT_GO (HREG_DMA_CTRL_RXHWEN | HREG_DMA_CTRL_RX_MODE_MULTI_PACKET | \
HREG_DMA_CTRL_RXAUTOACK_DMAEN | HREG_DMA_CTRL_RXAUTOACK_RX | \
HREG_DMA_CTRL_TXHWEN | /*;?HREG_DMA_CTRL_TX_TBUFCONT_PLAIN |*/ \
- HREG_DMA_CTRL_TX_MODE_MULTI_PACKET | HREG_DMA_CTRL_TXAUTOACK_DMAEN |\
+ HREG_DMA_CTRL_TX_MODE_MULTI_PACKET | HREG_DMA_CTRL_TXAUTOACK_DMAEN | \
HREG_DMA_CTRL_TXAUTOACK_DMAALLOC)
//configuration DWORD to reset both the Tx and Rx DMA engines
#define DMA_CTRLSTAT_RESET (HREG_DMA_CTRL_RXHWEN | HREG_DMA_CTRL_RXRESET | HREG_DMA_CTRL_TXHWEN | HREG_DMA_CTRL_TXRESET)
-//#define DESC_DMA_OWNED 0x80000000 // BIT31
-#define DESC_DMA_OWNED 0x8000 // BIT31
-#define DESC_SOP 0x8000 // BIT15
-#define DESC_EOP 0x4000 // BIT14
+//#define DESC_DMA_OWNED 0x80000000 // BIT31
+#define DESC_DMA_OWNED 0x8000 // BIT31
+#define DESC_SOP 0x8000 // BIT15
+#define DESC_EOP 0x4000 // BIT14
-#define DMA_RX 0
-#define DMA_TX 1
+#define DMA_RX 0
+#define DMA_TX 1
-// #define IFB_RxFirstDesc IFB_FirstDesc[DMA_RX]
-// #define IFB_TxFirstDesc IFB_FirstDesc[DMA_TX]
-// #define IFB_RxLastDesc IFB_LastDesc[DMA_RX]
-// #define IFB_TxLastDesc IFB_LastDesc[DMA_TX]
+// #define IFB_RxFirstDesc IFB_FirstDesc[DMA_RX]
+// #define IFB_TxFirstDesc IFB_FirstDesc[DMA_TX]
+// #define IFB_RxLastDesc IFB_LastDesc[DMA_RX]
+// #define IFB_TxLastDesc IFB_LastDesc[DMA_TX]
#endif // HCF_DMA
//
@@ -390,96 +353,96 @@ err: these values should match;
// Hermes Command Codes and Qualifier bits
-#define HCMD_BUSY 0x8000 // Busy bit, applicable for all commands
-#define HCMD_INI 0x0000 //
-#define HCMD_ENABLE HCF_CNTL_ENABLE // 0x0001
-#define HCMD_DISABLE HCF_CNTL_DISABLE // 0x0002
-#define HCMD_CONNECT HCF_CNTL_CONNECT // 0x0003
-#define HCMD_EXECUTE 0x0004 //
-#define HCMD_DISCONNECT HCF_CNTL_DISCONNECT // 0x0005
-#define HCMD_SLEEP 0x0006 //
-#define HCMD_CONTINUE HCF_CNTL_CONTINUE // 0x0007
-#define HCMD_RETRY 0x0100 // Retry bit
-#define HCMD_ALLOC 0x000A //
-#define HCMD_TX 0x000B //
-#define HCMD_RECL 0x0100 // Reclaim bit, applicable for Tx and Inquire
-#define HCMD_INQUIRE 0x0011 //
-#define HCMD_ACCESS 0x0021 //
-#define HCMD_ACCESS_WRITE 0x0100 // Write bit
-#define HCMD_PROGRAM 0x0022 //
-#define HCMD_READ_MIF 0x0030
-#define HCMD_WRITE_MIF 0x0031
-#define HCMD_THESEUS 0x0038
-#define HCMD_STARTPREAMBLE 0x0E00 // Start continuous preamble Tx
-#define HCMD_STOP 0x0F00 // Stop Theseus test mode
+#define HCMD_BUSY 0x8000 // Busy bit, applicable for all commands
+#define HCMD_INI 0x0000 //
+#define HCMD_ENABLE HCF_CNTL_ENABLE // 0x0001
+#define HCMD_DISABLE HCF_CNTL_DISABLE // 0x0002
+#define HCMD_CONNECT HCF_CNTL_CONNECT // 0x0003
+#define HCMD_EXECUTE 0x0004 //
+#define HCMD_DISCONNECT HCF_CNTL_DISCONNECT // 0x0005
+#define HCMD_SLEEP 0x0006 //
+#define HCMD_CONTINUE HCF_CNTL_CONTINUE // 0x0007
+#define HCMD_RETRY 0x0100 // Retry bit
+#define HCMD_ALLOC 0x000A //
+#define HCMD_TX 0x000B //
+#define HCMD_RECL 0x0100 // Reclaim bit, applicable for Tx and Inquire
+#define HCMD_INQUIRE 0x0011 //
+#define HCMD_ACCESS 0x0021 //
+#define HCMD_ACCESS_WRITE 0x0100 // Write bit
+#define HCMD_PROGRAM 0x0022 //
+#define HCMD_READ_MIF 0x0030
+#define HCMD_WRITE_MIF 0x0031
+#define HCMD_THESEUS 0x0038
+#define HCMD_STARTPREAMBLE 0x0E00 // Start continuous preamble Tx
+#define HCMD_STOP 0x0F00 // Stop Theseus test mode
//Configuration Management
//
-#define CFG_DRV_ACT_RANGES_PRI_3_BOTTOM 1 // Default Bottom Compatibility for Primary Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_PRI_3_TOP 1 // Default Top Compatibility for Primary Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_PRI_3_BOTTOM 1 // Default Bottom Compatibility for Primary Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_PRI_3_TOP 1 // Default Top Compatibility for Primary Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_HSI_4_BOTTOM 1 // Default Bottom Compatibility for H/W - driver I/F
-#define CFG_DRV_ACT_RANGES_HSI_4_TOP 1 // Default Top Compatibility for H/W - driver I/F
+#define CFG_DRV_ACT_RANGES_HSI_4_BOTTOM 1 // Default Bottom Compatibility for H/W - driver I/F
+#define CFG_DRV_ACT_RANGES_HSI_4_TOP 1 // Default Top Compatibility for H/W - driver I/F
-#define CFG_DRV_ACT_RANGES_HSI_5_BOTTOM 1 // Default Bottom Compatibility for H/W - driver I/F
-#define CFG_DRV_ACT_RANGES_HSI_5_TOP 1 // Default Top Compatibility for H/W - driver I/F
+#define CFG_DRV_ACT_RANGES_HSI_5_BOTTOM 1 // Default Bottom Compatibility for H/W - driver I/F
+#define CFG_DRV_ACT_RANGES_HSI_5_TOP 1 // Default Top Compatibility for H/W - driver I/F
#if (HCF_TYPE) & HCF_TYPE_WPA
-#define CFG_DRV_ACT_RANGES_APF_1_BOTTOM 16 // Default Bottom Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_1_TOP 16 // Default Top Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_1_BOTTOM 16 // Default Bottom Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_1_TOP 16 // Default Top Compatibility for AP Firmware - driver I/F
#else //;? is this REALLY O.K.
-#define CFG_DRV_ACT_RANGES_APF_1_BOTTOM 1 // Default Bottom Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_1_TOP 1 // Default Top Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_1_BOTTOM 1 // Default Bottom Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_1_TOP 1 // Default Top Compatibility for AP Firmware - driver I/F
#endif // HCF_TYPE_WPA
-#define CFG_DRV_ACT_RANGES_APF_2_BOTTOM 2 // Default Bottom Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_2_TOP 2 // Default Top Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_2_BOTTOM 2 // Default Bottom Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_2_TOP 2 // Default Top Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_3_BOTTOM 1 // Default Bottom Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_3_TOP 1 // Default Top Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_3_BOTTOM 1 // Default Bottom Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_3_TOP 1 // Default Top Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_4_BOTTOM 1 // Default Bottom Compatibility for AP Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_APF_4_TOP 1 // Default Top Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_4_BOTTOM 1 // Default Bottom Compatibility for AP Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_APF_4_TOP 1 // Default Top Compatibility for AP Firmware - driver I/F
#if (HCF_TYPE) & HCF_TYPE_HII5
-#define CFG_DRV_ACT_RANGES_STA_2_BOTTOM 6 // Default Bottom Compatibility for Station Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_STA_2_TOP 6 // Default Top Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_2_BOTTOM 6 // Default Bottom Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_2_TOP 6 // Default Top Compatibility for Station Firmware - driver I/F
#else // (HCF_TYPE) & HCF_TYPE_HII5
-#define CFG_DRV_ACT_RANGES_STA_2_BOTTOM 1 // Default Bottom Compatibility for Station Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_STA_2_TOP 2 // Default Top Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_2_BOTTOM 1 // Default Bottom Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_2_TOP 2 // Default Top Compatibility for Station Firmware - driver I/F
#endif // (HCF_TYPE) & HCF_TYPE_HII5
-#define CFG_DRV_ACT_RANGES_STA_3_BOTTOM 1 // Default Bottom Compatibility for Station Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_STA_3_TOP 1 // Default Top Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_3_BOTTOM 1 // Default Bottom Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_3_TOP 1 // Default Top Compatibility for Station Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_STA_4_BOTTOM 1 // Default Bottom Compatibility for Station Firmware - driver I/F
-#define CFG_DRV_ACT_RANGES_STA_4_TOP 1 // Default Top Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_4_BOTTOM 1 // Default Bottom Compatibility for Station Firmware - driver I/F
+#define CFG_DRV_ACT_RANGES_STA_4_TOP 1 // Default Top Compatibility for Station Firmware - driver I/F
//---------------------------------------------------------------------------------------------------------------------
#if defined HCF_CFG_PRI_1_TOP || defined HCF_CFG_PRI_1_BOTTOM
-err: PRI_1 not supported for H-I; // Compatibility for Primary Firmware - driver I/F
+err: PRI_1 not supported for H-I; // Compatibility for Primary Firmware - driver I/F
#endif // HCF_CFG_PRI_1_TOP / HCF_CFG_PRI_1_BOTTOM
#if defined HCF_CFG_PRI_2_TOP || defined HCF_CFG_PRI_2_BOTTOM
-err: PRI_2 not supported for H-I; // Compatibility for Primary Firmware - driver I/F
+err: PRI_2 not supported for H-I; // Compatibility for Primary Firmware - driver I/F
#endif // HCF_CFG_PRI_2_TOP / HCF_CFG_PRI_2_BOTTOM
-#ifdef HCF_CFG_PRI_3_TOP // Top Compatibility for Primary Firmware - driver I/F
-#if HCF_CFG_PRI_3_TOP == 0 || \
+#ifdef HCF_CFG_PRI_3_TOP // Top Compatibility for Primary Firmware - driver I/F
+#if HCF_CFG_PRI_3_TOP == 0 || \
CFG_DRV_ACT_RANGES_PRI_3_BOTTOM <= HCF_CFG_PRI_3_TOP && HCF_CFG_PRI_3_TOP <= CFG_DRV_ACT_RANGES_PRI_3_TOP
#undef CFG_DRV_ACT_RANGES_PRI_3_TOP
-#define CFG_DRV_ACT_RANGES_PRI_3_TOP HCF_CFG_PRI_3_TOP
+#define CFG_DRV_ACT_RANGES_PRI_3_TOP HCF_CFG_PRI_3_TOP
#else
err: ;
#endif
#endif // HCF_CFG_PRI_3_TOP
-#ifdef HCF_CFG_PRI_3_BOTTOM // Bottom Compatibility for Primary Firmware - driver I/F
+#ifdef HCF_CFG_PRI_3_BOTTOM // Bottom Compatibility for Primary Firmware - driver I/F
#if CFG_DRV_ACT_RANGES_PRI_3_BOTTOM <= HCF_CFG_PRI_3_BOTTOM && HCF_CFG_PRI_3_BOTTOM <= CFG_DRV_ACT_RANGES_PRI_3_TOP
#undef CFG_DRV_ACT_RANGES_PRI_3_BOTTOM
-#define CFG_DRV_ACT_RANGES_PRI_3_BOTTOM HCF_CFG_PRI_3_BOTTOM
+#define CFG_DRV_ACT_RANGES_PRI_3_BOTTOM HCF_CFG_PRI_3_BOTTOM
#else
err: ;
#endif
@@ -488,77 +451,77 @@ err: ;
//---------------------------------------------------------------------------------------------------------------------
#if defined HCF_CFG_HSI_0_TOP || defined HCF_CFG_HSI_0_BOTTOM
-err: HSI_0 not supported for H-I; // Compatibility for HSI I/F
+err: HSI_0 not supported for H-I; // Compatibility for HSI I/F
#endif // HCF_CFG_HSI_0_TOP / HCF_CFG_HSI_0_BOTTOM
#if defined HCF_CFG_HSI_1_TOP || defined HCF_CFG_HSI_1_BOTTOM
-err: HSI_1 not supported for H-I; // Compatibility for HSI I/F
+err: HSI_1 not supported for H-I; // Compatibility for HSI I/F
#endif // HCF_CFG_HSI_1_TOP / HCF_CFG_HSI_1_BOTTOM
#if defined HCF_CFG_HSI_2_TOP || defined HCF_CFG_HSI_2_BOTTOM
-err: HSI_2 not supported for H-I; // Compatibility for HSI I/F
+err: HSI_2 not supported for H-I; // Compatibility for HSI I/F
#endif // HCF_CFG_HSI_2_TOP / HCF_CFG_HSI_2_BOTTOM
#if defined HCF_CFG_HSI_3_TOP || defined HCF_CFG_HSI_3_BOTTOM
-err: HSI_3 not supported for H-I; // Compatibility for HSI I/F
+err: HSI_3 not supported for H-I; // Compatibility for HSI I/F
#endif // HCF_CFG_HSI_3_TOP / HCF_CFG_HSI_3_BOTTOM
-#ifdef HCF_CFG_HSI_4_TOP // Top Compatibility for HSI I/F
-#if HCF_CFG_HSI_4_TOP == 0 || \
+#ifdef HCF_CFG_HSI_4_TOP // Top Compatibility for HSI I/F
+#if HCF_CFG_HSI_4_TOP == 0 || \
CFG_DRV_ACT_RANGES_HSI_4_BOTTOM <= CF_CFG_HSI_4_TOP && HCF_CFG_HSI_4_TOP <= CFG_DRV_ACT_RANGES_HSI_4_TOP
#undef CFG_DRV_ACT_RANGES_HSI_4_TOP
-#define CFG_DRV_ACT_RANGES_HSI_4_TOP HCF_CFG_HSI_4_TOP
+#define CFG_DRV_ACT_RANGES_HSI_4_TOP HCF_CFG_HSI_4_TOP
#else
err: ;
#endif
#endif // HCF_CFG_HSI_4_TOP
-#ifdef HCF_CFG_HSI_4_BOTTOM // Bottom Compatibility for HSI I/F
+#ifdef HCF_CFG_HSI_4_BOTTOM // Bottom Compatibility for HSI I/F
#if CFG_DRV_ACT_RANGES_HSI_4_BOTTOM <= HCF_CFG_HSI_4_BOTTOM && HCF_CFG_HSI_4_BOTTOM <= CFG_DRV_ACT_RANGES_HSI_4_TOP
#undef CFG_DRV_ACT_RANGES_HSI_4_BOTTOM
-#define CFG_DRV_ACT_RANGES_HSI_4_BOTTOM HCF_CFG_HSI_4_BOTTOM
+#define CFG_DRV_ACT_RANGES_HSI_4_BOTTOM HCF_CFG_HSI_4_BOTTOM
#else
err: ;
#endif
#endif // HCF_CFG_HSI_4_BOTTOM
-#ifdef HCF_CFG_HSI_5_TOP // Top Compatibility for HSI I/F
-#if HCF_CFG_HSI_5_TOP == 0 || \
+#ifdef HCF_CFG_HSI_5_TOP // Top Compatibility for HSI I/F
+#if HCF_CFG_HSI_5_TOP == 0 || \
CFG_DRV_ACT_RANGES_HSI_5_BOTTOM <= CF_CFG_HSI_5_TOP && HCF_CFG_HSI_5_TOP <= CFG_DRV_ACT_RANGES_HSI_5_TOP
#undef CFG_DRV_ACT_RANGES_HSI_5_TOP
-#define CFG_DRV_ACT_RANGES_HSI_5_TOP HCF_CFG_HSI_5_TOP
+#define CFG_DRV_ACT_RANGES_HSI_5_TOP HCF_CFG_HSI_5_TOP
#else
err: ;
#endif
#endif // HCF_CFG_HSI_5_TOP
-#ifdef HCF_CFG_HSI_5_BOTTOM // Bottom Compatibility for HSI I/F
+#ifdef HCF_CFG_HSI_5_BOTTOM // Bottom Compatibility for HSI I/F
#if CFG_DRV_ACT_RANGES_HSI_5_BOTTOM <= HCF_CFG_HSI_5_BOTTOM && HCF_CFG_HSI_5_BOTTOM <= CFG_DRV_ACT_RANGES_HSI_5_TOP
#undef CFG_DRV_ACT_RANGES_HSI_5_BOTTOM
-#define CFG_DRV_ACT_RANGES_HSI_5_BOTTOM HCF_CFG_HSI_5_BOTTOM
+#define CFG_DRV_ACT_RANGES_HSI_5_BOTTOM HCF_CFG_HSI_5_BOTTOM
#else
err: ;
#endif
#endif // HCF_CFG_HSI_5_BOTTOM
//---------------------------------------------------------------------------------------------------------------------
#if defined HCF_CFG_APF_1_TOP || defined HCF_CFG_APF_1_BOTTOM
-err: APF_1 not supported for H-I; // Compatibility for AP Firmware - driver I/F
+err: APF_1 not supported for H-I; // Compatibility for AP Firmware - driver I/F
#endif // HCF_CFG_APF_1_TOP / HCF_CFG_APF_1_BOTTOM
-#ifdef HCF_CFG_APF_2_TOP // Top Compatibility for AP Firmware - driver I/F
-#if HCF_CFG_APF_2_TOP == 0 || \
+#ifdef HCF_CFG_APF_2_TOP // Top Compatibility for AP Firmware - driver I/F
+#if HCF_CFG_APF_2_TOP == 0 || \
CFG_DRV_ACT_RANGES_APF_2_BOTTOM <= HCF_CFG_APF_2_TOP && HCF_CFG_APF_2_TOP <= CFG_DRV_ACT_RANGES_APF_2_TOP
#undef CFG_DRV_ACT_RANGES_APF_2_TOP
-#define CFG_DRV_ACT_RANGES_APF_2_TOP HCF_CFG_APF_2_TOP
+#define CFG_DRV_ACT_RANGES_APF_2_TOP HCF_CFG_APF_2_TOP
#else
err: ;
#endif
#endif // HCF_CFG_APF_TOP
-#ifdef HCF_CFG_APF_2_BOTTOM // Bottom Compatibility for AP Firmware - driver I/F
+#ifdef HCF_CFG_APF_2_BOTTOM // Bottom Compatibility for AP Firmware - driver I/F
#if CFG_DRV_ACT_RANGES_APF_2_BOTTOM <= HCF_CFG_APF_2_BOTTOM && HCF_CFG_APF_2_BOTTOM <= CFG_DRV_ACT_RANGES_APF_2_TOP
#undef CFG_DRV_ACT_RANGES_APF_2_BOTTOM
-#define CFG_DRV_ACT_RANGES_APF_2_BOTTOM HCF_CFG_APF_2_BOTTOM
+#define CFG_DRV_ACT_RANGES_APF_2_BOTTOM HCF_CFG_APF_2_BOTTOM
#else
err: ;
#endif
@@ -566,23 +529,23 @@ err: ;
//---------------------------------------------------------------------------------------------------------------------
#if defined HCF_CFG_STA_1_TOP || defined HCF_CFG_STA_1_BOTTOM
-err: STA_1 not supported for H-I; // Compatibility for Station Firmware - driver I/F
+err: STA_1 not supported for H-I; // Compatibility for Station Firmware - driver I/F
#endif // HCF_CFG_STA_1_TOP / HCF_CFG_STA_1_BOTTOM
-#ifdef HCF_CFG_STA_2_TOP // Top Compatibility for Station Firmware - driver I/F
-#if HCF_CFG_STA_2_TOP == 0 || \
+#ifdef HCF_CFG_STA_2_TOP // Top Compatibility for Station Firmware - driver I/F
+#if HCF_CFG_STA_2_TOP == 0 || \
CFG_DRV_ACT_RANGES_STA_2_BOTTOM <= HCF_CFG_STA_2_TOP && HCF_CFG_STA_2_TOP <= CFG_DRV_ACT_RANGES_STA_2_TOP
#undef CFG_DRV_ACT_RANGES_STA_2_TOP
-#define CFG_DRV_ACT_RANGES_STA_2_TOP HCF_CFG_STA_2_TOP
+#define CFG_DRV_ACT_RANGES_STA_2_TOP HCF_CFG_STA_2_TOP
#else
err: ;
#endif
#endif // HCF_CFG_STA_TOP
-#ifdef HCF_CFG_STA_2_BOTTOM // Bottom Compatibility for Station Firmware - driver I/F
+#ifdef HCF_CFG_STA_2_BOTTOM // Bottom Compatibility for Station Firmware - driver I/F
#if CFG_DRV_ACT_RANGES_STA_2_BOTTOM <= HCF_CFG_STA_2_BOTTOM && HCF_CFG_STA_2_BOTTOM <= CFG_DRV_ACT_RANGES_STA_2_TOP
#undef CFG_DRV_ACT_RANGES_STA_2_BOTTOM
-#define CFG_DRV_ACT_RANGES_STA_2_BOTTOM HCF_CFG_STA_2_BOTTOM
+#define CFG_DRV_ACT_RANGES_STA_2_BOTTOM HCF_CFG_STA_2_BOTTOM
#else
err: ;
#endif
@@ -594,91 +557,81 @@ err: ;
/************************************************************************************************/
#ifdef HCF_SLEEP
-#if defined MSF_WAIT
-err: MSF should no longer supply this macro;
-#else
-#define MSF_WAIT(x) \
- { PROT_CNT_INI \
- HCF_WAIT_WHILE( ( IPW( HREG_IO ) & HREG_IO_WOKEN_UP ) == 0 ); \
- HCFASSERT( prot_cnt, IPW( HREG_IO ) ) \
- }
-#endif // MSF_WAIT
+#define MSF_WAIT(x) do { \
+ PROT_CNT_INI; \
+ HCF_WAIT_WHILE((IPW(HREG_IO) & HREG_IO_WOKEN_UP) == 0); \
+ HCFASSERT( prot_cnt, IPW( HREG_IO ) ); \
+ } while (0)
#else
-#define MSF_WAIT(x) /*NOP*/
+#define MSF_WAIT(x) do { } while (0)
#endif // HCF_SLEEP
-#define LOF(x) (sizeof(x)/sizeof(hcf_16)-1)
-
-#define MUL_BY_2( x ) ( (x) << 1 ) //used to multiply by 2
-#define DIV_BY_2( x ) ( (x) >> 1 ) //used to divide by 2
+#define LOF(x) (sizeof(x)/sizeof(hcf_16)-1)
//resolve problems on for some 16 bits compilers to create 32 bit values
-#define MERGE_2( hw, lw ) ( ( ((hcf_32)(hw)) << 16 ) | ((hcf_16)(lw)) )
+#define MERGE_2( hw, lw ) ( ( ((hcf_32)(hw)) << 16 ) | ((hcf_16)(lw)) )
#if ! defined HCF_STATIC
-#define HCF_STATIC static
-#endif // HCF_STATIC
+#define HCF_STATIC static
+#endif // HCF_STATIC
#if ( (HCF_TYPE) & HCF_TYPE_HII5 ) == 0
-#define DAWA_ACK( mask) { \
- OPW( HREG_EV_ACK, mask | HREG_EV_ACK_REG_READY ); \
- OPW( HREG_EV_ACK, (mask & ~HREG_EV_ALLOC) | HREG_EV_ACK_REG_READY ); }
-#define DAWA_ZERO_FID(reg) OPW( reg, 0 );
+#define DAWA_ACK( mask) do { \
+ OPW( HREG_EV_ACK, mask | HREG_EV_ACK_REG_READY ); \
+ OPW( HREG_EV_ACK, (mask & ~HREG_EV_ALLOC) | HREG_EV_ACK_REG_READY ); \
+ } while (0)
+#define DAWA_ZERO_FID(reg) OPW( reg, 0 )
#else
-#define DAWA_ACK( mask) OPW( HREG_EV_ACK, mask );
-#define DAWA_ZERO_FID(reg)
+#define DAWA_ACK( mask) OPW( HREG_EV_ACK, mask )
+#define DAWA_ZERO_FID(reg) do { } while (0)
#endif // HCF_TYPE_HII5
#if (HCF_TYPE) & HCF_TYPE_WPA
-#define CALC_RX_MIC( p, len ) calc_mic_rx_frag( ifbp, p, len )
-#define CALC_TX_MIC( p, len ) calc_mic_tx_frag( ifbp, p, len )
-#define IF_SSN(x) x
-#define IF_NOT_SSN(x)
+#define CALC_RX_MIC( p, len ) calc_mic_rx_frag( ifbp, p, len )
+#define CALC_TX_MIC( p, len ) calc_mic_tx_frag( ifbp, p, len )
#else
-#define CALC_RX_MIC( p, len )
-#define CALC_TX_MIC( p, len )
+#define CALC_RX_MIC( p, len )
+#define CALC_TX_MIC( p, len )
#define MIC_RX_RTN( mic, dw )
#define MIC_TX_RTN( mic, dw )
-#define IF_SSN(x)
-#define IF_NOT_SSN(x) x
#endif // HCF_TYPE_WPA
-#if HCF_TALLIES & HCF_TALLIES_HCF //HCF tally support
-#define IF_TALLY(x) x
+#if HCF_TALLIES & HCF_TALLIES_HCF //HCF tally support
+#define IF_TALLY(x) do { x; } while (0)
#else
-#define IF_TALLY(x)
+#define IF_TALLY(x) do { } while (0)
#endif // HCF_TALLIES_HCF
#if HCF_DMA
-#define IF_DMA(x) x
-#define IF_NOT_DMA(x)
-#define IF_USE_DMA(x) if ( ifbp->IFB_CntlOpt & USE_DMA ) x
-#define IF_NOT_USE_DMA(x) if ( !(ifbp->IFB_CntlOpt & USE_DMA) ) x
+#define IF_DMA(x) do { x; } while(0)
+#define IF_NOT_DMA(x) do { } while(0)
+#define IF_USE_DMA(x) if ( ifbp->IFB_CntlOpt & USE_DMA ) { x; }
+#define IF_NOT_USE_DMA(x) if ( !(ifbp->IFB_CntlOpt & USE_DMA) ) { x; }
#else
-#define IF_DMA(x)
-#define IF_NOT_DMA(x) x
-#define IF_USE_DMA(x)
-#define IF_NOT_USE_DMA(x) x
+#define IF_DMA(x) do { } while(0)
+#define IF_NOT_DMA(x) do { x; } while(0)
+#define IF_USE_DMA(x) do { } while(0)
+#define IF_NOT_USE_DMA(x) do { x; } while(0)
#endif // HCF_DMA
#define IPW(x) ((hcf_16)IN_PORT_WORD( ifbp->IFB_IOBase + (x) ) )
#define OPW(x, y) OUT_PORT_WORD( ifbp->IFB_IOBase + (x), y )
- /* make sure the implementation of HCF_WAIT_WHILE is such that there may be multiple HCF_WAIT_WHILE calls
- * in a row and that when one fails all subsequent fail immediately without reinitialization of prot_cnt
- */
+/* make sure the implementation of HCF_WAIT_WHILE is such that there may be multiple HCF_WAIT_WHILE calls
+ * in a row and that when one fails all subsequent fail immediately without reinitialization of prot_cnt
+ */
#if HCF_PROT_TIME == 0
-#define PROT_CNT_INI
-#define IF_PROT_TIME(x)
+#define PROT_CNT_INI do { } while(0)
+#define IF_PROT_TIME(x) do { } while(0)
#if defined HCF_YIELD
-#define HCF_WAIT_WHILE( x ) while ( (x) && (HCF_YIELD) ) /*NOP*/;
+#define HCF_WAIT_WHILE( x ) do { } while( (x) && (HCF_YIELD) )
#else
-#define HCF_WAIT_WHILE( x ) while ( x ) /*NOP*/;
+#define HCF_WAIT_WHILE( x ) do { } while ( x )
#endif // HCF_YIELD
#else
-#define PROT_CNT_INI hcf_32 prot_cnt = ifbp->IFB_TickIni;
-#define IF_PROT_TIME(x) x
+#define PROT_CNT_INI hcf_32 prot_cnt = ifbp->IFB_TickIni
+#define IF_PROT_TIME(x) do { x; } while(0)
#if defined HCF_YIELD
#define HCF_WAIT_WHILE( x ) while ( prot_cnt && (x) && (HCF_YIELD) ) prot_cnt--;
#else
@@ -697,7 +650,7 @@ err: you used an invalid bitmask;
#endif // HCF_EX_INT
#if 0 //get compiler going
-#if HCF_EX_INT_TICK != HREG_EV_TICK
+#if HCF_EX_INT_TICK != HREG_EV_TICK
;? out dated checking
err: someone redefined these macros while the implemenation assumes they are equal;
#endif
@@ -721,39 +674,37 @@ err: someone redefined these macros while the implemenation assumes they are equ
* on the if-statement
*/
#if HCF_ASSERT
-#define HCFASSERT(x,q) {if (!(x)) {mdd_assert( ifbp, __LINE__ , q );}}
+#define HCFASSERT(x,q) do { if (!(x)) {mdd_assert(ifbp, __LINE__, q );} } while(0)
#define MMDASSERT(x,q) {if (!(x)) {mdd_assert( assert_ifbp, __LINE__ + FILE_NAME_OFFSET, q );}}
-#define HCFLOGENTRY( where, what ) \
-{if ( (ifbp->IFB_AssertWhere = where) <= 15 ) { \
- HCF_ENTRY( ifbp ); \
- HCFASSERT( (ifbp->IFB_AssertTrace & 1<<((where)&0xF)) == 0, ifbp->IFB_AssertTrace ); \
- ifbp->IFB_AssertTrace |= 1<<((where)&0xF); \
- } \
-HCFTRACE(ifbp, where ) \
-HCFTRACEVALUE(ifbp, what ) \
-}
-
-#define HCFLOGEXIT( where ) \
-{if ( (ifbp->IFB_AssertWhere = where) <= 15 ) { \
- HCF_EXIT( ifbp ); \
- ifbp->IFB_AssertTrace &= ~(1<<((where)&0xF)); \
- } \
-HCFTRACE(ifbp, (where)|HCF_TRACE_EXIT ) \
-}
+#define HCFLOGENTRY( where, what ) do { \
+ if ( (ifbp->IFB_AssertWhere = where) <= 15 ) { \
+ HCFASSERT( (ifbp->IFB_AssertTrace & 1<<((where)&0xF)) == 0, ifbp->IFB_AssertTrace ); \
+ ifbp->IFB_AssertTrace |= 1<<((where)&0xF); \
+ } \
+ HCFTRACE(ifbp, where ); \
+ HCFTRACEVALUE(ifbp, what ); \
+ } while (0)
+
+#define HCFLOGEXIT( where ) do { \
+ if ( (ifbp->IFB_AssertWhere = where) <= 15 ) { \
+ ifbp->IFB_AssertTrace &= ~(1<<((where)&0xF)); \
+ } \
+ HCFTRACE(ifbp, (where)|HCF_TRACE_EXIT ); \
+ } while (0)
#else // HCF_ASSERT
-#define HCFASSERT( x, q )
+#define HCFASSERT( x, q ) do { } while(0)
#define MMDASSERT( x, q )
-#define HCFLOGENTRY( where, what ) {HCF_ENTRY( ifbp );}
-#define HCFLOGEXIT( where ) {HCF_EXIT( ifbp );}
+#define HCFLOGENTRY( where, what ) do { } while(0)
+#define HCFLOGEXIT( where ) do { } while(0)
#endif // HCF_ASSERT
#if HCF_INT_ON
/* ;? HCFASSERT_INT
* #if (HCF_SLEEP) & HCF_DDS
* #define HCFASSERT_INT HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFF && ifbp->IFB_IntOffCnt != 0xFFFE, \
- * ifbp->IFB_IntOffCnt )
+ * ifbp->IFB_IntOffCnt )
* #else
*/
#define HCFASSERT_INT HCFASSERT( ifbp->IFB_IntOffCnt != 0xFFFF, ifbp->IFB_IntOffCnt )
@@ -764,18 +715,18 @@ HCFTRACE(ifbp, (where)|HCF_TRACE_EXIT ) \
#if defined HCF_TRACE
-#define HCFTRACE(ifbp, where ) {OPW( HREG_SW_1, where );}
-//#define HCFTRACE(ifbp, where ) {HCFASSERT( DO_ASSERT, where );}
-#define HCFTRACEVALUE(ifbp, what ) {OPW( HREG_SW_2, what );}
+#define HCFTRACE(ifbp, where ) do {OPW( HREG_SW_1, where );} while(0)
+//#define HCFTRACE(ifbp, where ) {HCFASSERT( DO_ASSERT, where );}
+#define HCFTRACEVALUE(ifbp, what ) do {OPW( HREG_SW_2, what );} while (0)
//#define HCFTRACEVALUE(ifbp, what ) {HCFASSERT( DO_ASSERT, what );}
#else
-#define HCFTRACE(ifbp, where )
-#define HCFTRACEVALUE(ifbp, what )
+#define HCFTRACE(ifbp, where ) do { } while(0)
+#define HCFTRACEVALUE(ifbp, what ) do { } while(0)
#endif // HCF_TRACE
#if HCF_BIG_ENDIAN
-#define BE_PAR(x) ,x
+#define BE_PAR(x) ,x
#else
#define BE_PAR(x)
#endif // HCF_BIG_ENDIAN
@@ -789,13 +740,13 @@ HCFTRACE(ifbp, (where)|HCF_TRACE_EXIT ) \
/************************************************************************************************/
#if HCF_ASSERT
-extern IFBP BASED assert_ifbp; //to make asserts easily work under MMD and DHF
-EXTERN_C void mdd_assert (IFBP ifbp, unsigned int line_number, hcf_32 q );
+extern IFBP BASED assert_ifbp; //to make asserts easily work under MMD and DHF
+EXTERN_C void mdd_assert (IFBP ifbp, unsigned int line_number, hcf_32 q );
#endif //HCF_ASSERT
-#if ! ( (HCF_IO) & HCF_IO_32BITS ) // defined 16 bits only
+#if ! ( (HCF_IO) & HCF_IO_32BITS ) // defined 16 bits only
#undef OUT_PORT_STRING_32
#undef IN_PORT_STRING_32
#endif // HCF_IO
-#endif //HCFDEFC_H
+#endif //HCFDEFC_H
diff --git a/drivers/staging/wlags49_h2/mdd.h b/drivers/staging/wlags49_h2/mdd.h
index 5aa9eb846de..b02e3ea9e47 100644
--- a/drivers/staging/wlags49_h2/mdd.h
+++ b/drivers/staging/wlags49_h2/mdd.h
@@ -444,7 +444,7 @@ XX1( CFG_DEFAULT_KEYS, KEY_STRCT, key[4] ) /*defines set of encryption keys
tx_mic_key[4], rx_mic_key[4] ) /* */
X6( CFG_ADD_TKIP_MAPPED_KEY, bssid[3], tkip_key[8], \
tsc[4], rsc[4], tx_mic_key[4], rx_mic_key[4] ) /* */
- X1( CFG_SET_SSN_AUTHENTICATION_SUITE, \
+ X1( CFG_SET_WPA_AUTHENTICATION_SUITE, \
ssn_authentication_suite ) /* */
X1( CFG_REMOVE_TKIP_DEFAULT_KEY,tkip_key_id ) /* */
X1( CFG_TICK_TIME, tick_time ) /*Auxiliary Timer tick interval */
@@ -525,7 +525,7 @@ X2( CFG_WOL_PATTERNS, nPatterns, buffer[WOL_BUF_SIZE] ) /*[STA] WakeOnLan pat
X1( CFG_OWN_MAC_ADDR, mac_addr[3] ) /*[AP] Unique local node MAC Address */
X3( CFG_PCF_INFO, medium_occupancy_limit, \
cfp_period, cfp_max_duration ) /*[AP] Point Coordination Function capability info */
- X1( CFG_CUR_SSN_INFO_ELEMENT, ssn_info_element[1] ) /* */
+ X1( CFG_CUR_WPA_INFO_ELEMENT, ssn_info_element[1] ) /* */
X4( CFG_CUR_TKIP_IV_INFO, \
tkip_seq_cnt0[4], tkip_seq_cnt1[4], \
tkip_seq_cnt2[4], tkip_seq_cnt3[4] ) /* */
@@ -802,7 +802,7 @@ XX1( CFG_SCAN, SCAN_RS_STRCT, scan_result[32] ) /*Scan results *
#define CFG_PCF_INFO 0xFD87 //[AP] Point Coordination Function capability info
//*RESERVED* #define CFG_HIGHEST_BASIC_RATE 0xFD88 //
#define CFG_CUR_COUNTRY_INFO 0xFD89 //
-#define CFG_CUR_SSN_INFO_ELEMENT 0xFD8A //
+#define CFG_CUR_WPA_INFO_ELEMENT 0xFD8A //
#define CFG_CUR_TKIP_IV_INFO 0xFD8B //
#define CFG_CUR_ASSOC_REQ_INFO 0xFD8C //
#define CFG_CUR_ASSOC_RESP_INFO 0xFD8D //
@@ -893,20 +893,20 @@ XX1( CFG_SCAN, SCAN_RS_STRCT, scan_result[32] ) /*Scan results *
//HFS_TX_CNTL
/* Note that the HCF_.... System Constants influence the HFS_.... values below
- * H-I H-I | H-II H-II H-II.5
- * WPA | WPA
- * HFS_TX_CNTL_TX_OK 0002 0002 | 0002 0002 N/A <<<<<<<<deprecated
- * HFS_TX_CNTL_TX_EX 0004 0004 | 0004 0004 N/A
- * HFS_TX_CNTL_MIC N/A 0010 | N/A 0010 N/A
- * HFS_TX_CNTL_TID N/A N/A | N/A N/A 000F
- * HFS_TX_CNTL_SERVICE_CLASS N/A N/A | N/A N/A 00C0
- * HFS_TX_CNTL_PORT 0700 0700 | 0700 0700 0700
- * HFS_TX_CNTL_MIC_KEY_ID 1800 1800 | 0000 1800 N/A
- * HFS_TX_CNTL_CKIP 0000 0000 | 0000 2000 2000
- * HFS_TX_CNTL_TX_DELAY 4000 4000 | 4000 4000 N/A
- * HFS_TX_CNTL_ACTION N/A N/A | N/A N/A 4000
- * ==== ==== | ==== ==== ====
- * 5F06 5F16 | 4706 7F06 67CF
+ * H-I H-I | H-II H-II H-II.5
+ * WPA | WPA
+ * HFS_TX_CNTL_TX_OK 0002 0002 | 0002 0002 N/A <<<<<<<<deprecated
+ * HFS_TX_CNTL_TX_EX 0004 0004 | 0004 0004 N/A
+ * HFS_TX_CNTL_MIC N/A 0010 | N/A 0010 N/A
+ * HFS_TX_CNTL_TID N/A N/A | N/A N/A 000F
+ * HFS_TX_CNTL_SERVICE_CLASS N/A N/A | N/A N/A 00C0
+ * HFS_TX_CNTL_PORT 0700 0700 | 0700 0700 0700
+ * HFS_TX_CNTL_MIC_KEY_ID 1800 1800 | 0000 1800 N/A
+ * HFS_TX_CNTL_CKIP 0000 0000 | 0000 2000 2000
+ * HFS_TX_CNTL_TX_DELAY 4000 4000 | 4000 4000 N/A
+ * HFS_TX_CNTL_ACTION N/A N/A | N/A N/A 4000
+ * ==== ==== | ==== ==== ====
+ * 5F06 5F16 | 4706 7F06 67CF
*
* HCF_TX_CNTL_MASK specifies the bits allowed on the Host I/F
* note: bit 0x4000 has different meaning for H-II and H-II.5
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index a3a727c3b40..321580267fe 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -501,117 +501,3 @@ int wl_adapter_is_open(struct net_device *dev)
return link->open;
} /* wl_adapter_is_open */
/*============================================================================*/
-
-
-#if DBG
-
-/*******************************************************************************
- * DbgEvent()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Converts the card serivces events to text for debugging.
- *
- * PARAMETERS:
- *
- * mask - a integer representing the error(s) being reported by Card
- * Services.
- *
- * RETURNS:
- *
- * a pointer to a string describing the error(s)
- *
- ******************************************************************************/
-const char *DbgEvent(int mask)
-{
- static char DbgBuffer[256];
- char *pBuf;
- /*--------------------------------------------------------------------*/
-
- pBuf = DbgBuffer;
- *pBuf = '\0';
-
-
- if (mask & CS_EVENT_WRITE_PROTECT)
- strcat(pBuf, "WRITE_PROTECT ");
-
- if (mask & CS_EVENT_CARD_LOCK)
- strcat(pBuf, "CARD_LOCK ");
-
- if (mask & CS_EVENT_CARD_INSERTION)
- strcat(pBuf, "CARD_INSERTION ");
-
- if (mask & CS_EVENT_CARD_REMOVAL)
- strcat(pBuf, "CARD_REMOVAL ");
-
- if (mask & CS_EVENT_BATTERY_DEAD)
- strcat(pBuf, "BATTERY_DEAD ");
-
- if (mask & CS_EVENT_BATTERY_LOW)
- strcat(pBuf, "BATTERY_LOW ");
-
- if (mask & CS_EVENT_READY_CHANGE)
- strcat(pBuf, "READY_CHANGE ");
-
- if (mask & CS_EVENT_CARD_DETECT)
- strcat(pBuf, "CARD_DETECT ");
-
- if (mask & CS_EVENT_RESET_REQUEST)
- strcat(pBuf, "RESET_REQUEST ");
-
- if (mask & CS_EVENT_RESET_PHYSICAL)
- strcat(pBuf, "RESET_PHYSICAL ");
-
- if (mask & CS_EVENT_CARD_RESET)
- strcat(pBuf, "CARD_RESET ");
-
- if (mask & CS_EVENT_REGISTRATION_COMPLETE)
- strcat(pBuf, "REGISTRATION_COMPLETE ");
-
- /* if (mask & CS_EVENT_RESET_COMPLETE)
- strcat(pBuf, "RESET_COMPLETE "); */
-
- if (mask & CS_EVENT_PM_SUSPEND)
- strcat(pBuf, "PM_SUSPEND ");
-
- if (mask & CS_EVENT_PM_RESUME)
- strcat(pBuf, "PM_RESUME ");
-
- if (mask & CS_EVENT_INSERTION_REQUEST)
- strcat(pBuf, "INSERTION_REQUEST ");
-
- if (mask & CS_EVENT_EJECTION_REQUEST)
- strcat(pBuf, "EJECTION_REQUEST ");
-
- if (mask & CS_EVENT_MTD_REQUEST)
- strcat(pBuf, "MTD_REQUEST ");
-
- if (mask & CS_EVENT_ERASE_COMPLETE)
- strcat(pBuf, "ERASE_COMPLETE ");
-
- if (mask & CS_EVENT_REQUEST_ATTENTION)
- strcat(pBuf, "REQUEST_ATTENTION ");
-
- if (mask & CS_EVENT_CB_DETECT)
- strcat(pBuf, "CB_DETECT ");
-
- if (mask & CS_EVENT_3VCARD)
- strcat(pBuf, "3VCARD ");
-
- if (mask & CS_EVENT_XVCARD)
- strcat(pBuf, "XVCARD ");
-
-
- if (*pBuf) {
- pBuf[strlen(pBuf) - 1] = '\0';
- } else {
- if (mask != 0x0)
- sprintf(pBuf, "<<0x%08x>>", mask);
- }
-
- return pBuf;
-} /* DbgEvent */
-/*============================================================================*/
-
-#endif /* DBG */
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index b61c9eb75ad..57534083405 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -74,15 +74,8 @@
#include <pcmcia/ds.h>
#endif // BUS_PCMCIA
-#ifdef HAS_WIRELESS_EXTENSIONS
#include <linux/wireless.h>
-#if WIRELESS_EXT > 13
#include <net/iw_handler.h>
-#endif // WIRELESS_EXT > 13
-#define USE_DBM
-#define RETURN_CURRENT_NETWORKNAME
-#define USE_FREQUENCY
-#endif // HAS_WIRELESS_EXTENSIONS/
#include <linux/list.h>
@@ -890,7 +883,7 @@ struct wl_private
int is_registered;
int is_handling_int;
int firmware_present;
- char sysfsCreated;
+ bool sysfsCreated;
CFG_DRV_INFO_STRCT driverInfo;
CFG_IDENTITY_STRCT driverIdentity;
CFG_FW_IDENTITY_STRCT StationIdentity;
@@ -987,6 +980,12 @@ struct wl_private
#ifdef USE_WDS
WVLAN_WDS_IF wds_port[NUM_WDS_PORTS];
#endif // USE_WDS
+
+ /* Track whether the card is using WEP encryption or WPA
+ * so we know what to disable next time through.
+ * IW_ENCODE_ALG_NONE, IW_ENCODE_ALG_WEP, IW_ENCODE_ALG_TKIP
+ */
+ int wext_enc;
}; // wl_private
#define wl_priv(dev) ((struct wl_private *) netdev_priv(dev))
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 6d45ab3f027..483eee1bf63 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -1993,8 +1993,10 @@ int wl_put_ltv( struct wl_private *lp )
lp->ltvRecord.typ = CFG_SET_WPA_AUTH_KEY_MGMT_SUITE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->AuthKeyMgmtSuite );
hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord ));
- /* WEP Keys */
- wl_set_wep_keys( lp );
+
+ /* If WEP (or no) keys are being used, write (or clear) them */
+ if (lp->wext_enc != IW_ENCODE_ALG_TKIP)
+ wl_set_wep_keys(lp);
/* Country Code */
/* countryInfo, ltvCountryInfo, CFG_CNF_COUNTRY_INFO */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index cf917e613f2..5a2b334f206 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -217,7 +217,7 @@ int wl_config( struct net_device *dev, struct ifmap *map )
/* The only thing we care about here is a port change. Since this not needed,
ignore the request. */
- DBG_TRACE( DbgInfo, "%s: %s called.\n", dev->name, __FUNC__ );
+ DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
DBG_LEAVE( DbgInfo );
return 0;
@@ -1179,7 +1179,7 @@ static const struct net_device_ops wl_netdev_ops =
.ndo_set_config = &wl_config,
.ndo_get_stats = &wl_stats,
- .ndo_set_multicast_list = &wl_multicast,
+ .ndo_set_rx_mode = &wl_multicast,
.ndo_init = &wl_insert,
.ndo_open = &wl_adapter_open,
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.c b/drivers/staging/wlags49_h2/wl_sysfs.c
index 9b833b30ae6..1508f04b3c6 100644
--- a/drivers/staging/wlags49_h2/wl_sysfs.c
+++ b/drivers/staging/wlags49_h2/wl_sysfs.c
@@ -120,17 +120,19 @@ static struct attribute_group wlags_group = {
void register_wlags_sysfs(struct net_device *net)
{
- struct device *dev = &(net->dev);
- struct wl_private *lp = wl_priv(net);
-
- lp->sysfsCreated = sysfs_create_group(&dev->kobj, &wlags_group);
+ struct device *dev = &(net->dev);
+ struct wl_private *lp = wl_priv(net);
+ int err;
+ err = sysfs_create_group(&dev->kobj, &wlags_group);
+ if (!err)
+ lp->sysfsCreated = true;
}
void unregister_wlags_sysfs(struct net_device *net)
{
- struct device *dev = &(net->dev);
- struct wl_private *lp = wl_priv(net);
+ struct device *dev = &(net->dev);
+ struct wl_private *lp = wl_priv(net);
- if (lp->sysfsCreated)
- sysfs_remove_group(&dev->kobj, &wlags_group);
+ if (lp->sysfsCreated)
+ sysfs_remove_group(&dev->kobj, &wlags_group);
}
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index a5faada136d..fd37040afd0 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -143,7 +143,8 @@ err: define bus type;
" for " BUS_TYPE ", " \
MODULE_DATE " by " VENDOR_NAME
-#define WIRELESS_SUPPORT 15 // The version of wireless extensions we support
+/* The version of wireless extensions we support */
+#define WIRELESS_SUPPORT 21
//#define DBG_MOD_NAME DRIVER_NAME ":" BUS_TYPE ":" HW_TYPE ":" FW_TYPE
#define DBG_MOD_NAME MODULE_NAME
@@ -157,9 +158,6 @@ err: define bus type;
* There doesn't seem to be a difference for PCMCIA and PCI anymore, at least
* for PCMCIA the same defines are needed now as previously only used for PCI
*/
-#if USE_WEXT
-#define HAS_WIRELESS_EXTENSIONS
-#endif // USE_WEXT
#define NEW_MULTICAST
#define ALLOC_SKB(len) dev_alloc_skb(len+2)
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 522a31090c5..8ac5e1081aa 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -75,20 +75,6 @@
#include <wl_wext.h>
#include <wl_priv.h>
-
-
-/* If WIRELESS_EXT is not defined (as a result of HAS_WIRELESS_EXTENSIONS
- #including linux/wireless.h), then these functions do not need to be included
- in the build. */
-#ifdef WIRELESS_EXT
-
-#define IWE_STREAM_ADD_EVENT(info, buf, end, iwe, len) \
- iwe_stream_add_event(info, buf, end, iwe, len)
-#define IWE_STREAM_ADD_POINT(info, buf, end, iwe, msg) \
- iwe_stream_add_point(info, buf, end, iwe, msg)
-
-
-
/*******************************************************************************
* global definitions
******************************************************************************/
@@ -97,7 +83,223 @@ extern dbg_info_t *DbgInfo;
#endif // DBG
+/* Set up the LTV to program the appropriate key */
+static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
+ int set_tx, u8 *seq, u8 *key, size_t key_len)
+{
+ int ret = -EINVAL;
+ int buf_idx = 0;
+ hcf_8 tsc[IW_ENCODE_SEQ_MAX_SIZE] =
+ { 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00 };
+
+ DBG_ENTER(DbgInfo);
+
+ /*
+ * Check the key index here; if 0, load as Pairwise Key, otherwise,
+ * load as a group key. Note that for the Hermes, the RIDs for
+ * group/pairwise keys are different from each other and different
+ * than the default WEP keys as well.
+ */
+ switch (key_idx) {
+ case 0:
+ ltv->len = 28;
+ ltv->typ = CFG_ADD_TKIP_MAPPED_KEY;
+
+ /* Load the BSSID */
+ memcpy(&ltv->u.u8[buf_idx], addr, ETH_ALEN);
+ buf_idx += ETH_ALEN;
+
+ /* Load the TKIP key */
+ memcpy(&ltv->u.u8[buf_idx], &key[0], 16);
+ buf_idx += 16;
+
+ /* Load the TSC */
+ memcpy(&ltv->u.u8[buf_idx], tsc, IW_ENCODE_SEQ_MAX_SIZE);
+ buf_idx += IW_ENCODE_SEQ_MAX_SIZE;
+
+ /* Load the RSC */
+ memcpy(&ltv->u.u8[buf_idx], seq, IW_ENCODE_SEQ_MAX_SIZE);
+ buf_idx += IW_ENCODE_SEQ_MAX_SIZE;
+
+ /* Load the TxMIC key */
+ memcpy(&ltv->u.u8[buf_idx], &key[16], 8);
+ buf_idx += 8;
+
+ /* Load the RxMIC key */
+ memcpy(&ltv->u.u8[buf_idx], &key[24], 8);
+
+ ret = 0;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ ltv->len = 26;
+ ltv->typ = CFG_ADD_TKIP_DEFAULT_KEY;
+
+ /* Load the key Index */
+
+ /* If this is a Tx Key, set bit 8000 */
+ if (set_tx)
+ key_idx |= 0x8000;
+ ltv->u.u16[buf_idx] = cpu_to_le16(key_idx);
+ buf_idx += 2;
+
+ /* Load the RSC */
+ memcpy(&ltv->u.u8[buf_idx], seq, IW_ENCODE_SEQ_MAX_SIZE);
+ buf_idx += IW_ENCODE_SEQ_MAX_SIZE;
+
+ /* Load the TKIP, TxMIC, and RxMIC keys in one shot, because in
+ CFG_ADD_TKIP_DEFAULT_KEY they are back-to-back */
+ memcpy(&ltv->u.u8[buf_idx], key, key_len);
+ buf_idx += key_len;
+
+ /* Load the TSC */
+ memcpy(&ltv->u.u8[buf_idx], tsc, IW_ENCODE_SEQ_MAX_SIZE);
+
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ DBG_LEAVE(DbgInfo);
+ return ret;
+}
+
+/* Set up the LTV to clear the appropriate key */
+static int hermes_clear_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr)
+{
+ int ret;
+
+ switch (key_idx) {
+ case 0:
+ if (memcmp(addr, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0) {
+ ltv->len = 7;
+ ltv->typ = CFG_REMOVE_TKIP_MAPPED_KEY;
+ memcpy(&ltv->u.u8[0], addr, ETH_ALEN);
+ ret = 0;
+ }
+ break;
+ case 1:
+ case 2:
+ case 3:
+ /* Clear the Group TKIP keys by index */
+ ltv->len = 2;
+ ltv->typ = CFG_REMOVE_TKIP_DEFAULT_KEY;
+ ltv->u.u16[0] = cpu_to_le16(key_idx);
+
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Set the WEP keys in the wl_private structure */
+static int hermes_set_wep_keys(struct wl_private *lp, u16 key_idx,
+ u8 *key, size_t key_len,
+ bool enable, bool set_tx)
+{
+ hcf_8 encryption_state = lp->EnableEncryption;
+ int tk = lp->TransmitKeyID - 1; /* current key */
+ int ret = 0;
+
+ /* Is encryption supported? */
+ if (!wl_has_wep(&(lp->hcfCtx))) {
+ DBG_WARNING(DbgInfo, "WEP not supported on this device\n");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ DBG_NOTICE(DbgInfo, "pointer: %p, length: %d\n",
+ key, key_len);
+
+ /* Check the size of the key */
+ switch (key_len) {
+ case MIN_KEY_SIZE:
+ case MAX_KEY_SIZE:
+
+ /* Check the index */
+ if ((key_idx < 0) || (key_idx >= MAX_KEYS))
+ key_idx = tk;
+
+ /* Cleanup */
+ memset(lp->DefaultKeys.key[key_idx].key, 0, MAX_KEY_SIZE);
+
+ /* Copy the key in the driver */
+ memcpy(lp->DefaultKeys.key[key_idx].key, key, key_len);
+
+ /* Set the length */
+ lp->DefaultKeys.key[key_idx].len = key_len;
+
+ DBG_NOTICE(DbgInfo, "encoding.length: %d\n", key_len);
+ DBG_NOTICE(DbgInfo, "set key: %s(%d) [%d]\n",
+ lp->DefaultKeys.key[key_idx].key,
+ lp->DefaultKeys.key[key_idx].len, key_idx);
+
+ /* Enable WEP (if possible) */
+ if ((key_idx == tk) && (lp->DefaultKeys.key[tk].len > 0))
+ lp->EnableEncryption = 1;
+
+ break;
+
+ case 0:
+ /* Do we want to just set the current transmit key? */
+ if (set_tx && (key_idx >= 0) && (key_idx < MAX_KEYS)) {
+ DBG_NOTICE(DbgInfo, "index: %d; len: %d\n", key_idx,
+ lp->DefaultKeys.key[key_idx].len);
+
+ if (lp->DefaultKeys.key[key_idx].len > 0) {
+ lp->TransmitKeyID = key_idx + 1;
+ lp->EnableEncryption = 1;
+ } else {
+ DBG_WARNING(DbgInfo, "Problem setting the current TxKey\n");
+ ret = -EINVAL;
+ }
+ }
+ break;
+
+ default:
+ DBG_WARNING(DbgInfo, "Invalid Key length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the flags */
+ if (enable) {
+ lp->EnableEncryption = 1;
+ lp->wext_enc = IW_ENCODE_ALG_WEP;
+ } else {
+ lp->EnableEncryption = 0; /* disable encryption */
+ lp->wext_enc = IW_ENCODE_ALG_NONE;
+ }
+
+ DBG_TRACE(DbgInfo, "encryption_state : %d\n", encryption_state);
+ DBG_TRACE(DbgInfo, "lp->EnableEncryption : %d\n", lp->EnableEncryption);
+ DBG_TRACE(DbgInfo, "erq->length : %d\n", key_len);
+
+ /* Write the changes to the card */
+ if (ret == 0) {
+ DBG_NOTICE(DbgInfo, "encrypt: %d, ID: %d\n", lp->EnableEncryption,
+ lp->TransmitKeyID);
+
+ if (lp->EnableEncryption == encryption_state) {
+ if (key_len != 0) {
+ /* Dynamic WEP key update */
+ wl_set_wep_keys(lp);
+ }
+ } else {
+ /* To switch encryption on/off, soft reset is
+ * required */
+ wl_apply(lp);
+ }
+ }
+
+out:
+ return ret;
+}
/*******************************************************************************
* wireless_commit()
@@ -324,16 +526,8 @@ static int wireless_get_frequency(struct net_device *dev, struct iw_request_info
if( ret == HCF_SUCCESS ) {
hcf_16 channel = CNV_LITTLE_TO_INT( lp->ltvRecord.u.u16[0] );
-#ifdef USE_FREQUENCY
-
freq->m = wl_get_freq_from_chan( channel ) * 100000;
freq->e = 1;
-#else
-
- freq->m = channel;
- freq->e = 0;
-
-#endif /* USE_FREQUENCY */
}
wl_act_int_on( lp );
@@ -460,8 +654,6 @@ retry:
/* Link quality */
-#ifdef USE_DBM
-
range->max_qual.qual = (u_char)HCF_MAX_COMM_QUALITY;
/* If the value returned in /proc/net/wireless is greater than the maximum range,
@@ -470,13 +662,6 @@ retry:
range->max_qual.level = (u_char)( dbm( HCF_MIN_SIGNAL_LEVEL ) - 1 );
range->max_qual.noise = (u_char)( dbm( HCF_MIN_NOISE_LEVEL ) - 1 );
-#else
-
- range->max_qual.qual = 100;
- range->max_qual.level = 100;
- range->max_qual.noise = 100;
-
-#endif /* USE_DBM */
/* Set available rates */
@@ -508,8 +693,6 @@ retry:
/* Encryption */
-#if WIRELESS_EXT > 8
-
/* Holding the lock too long, make a gap to allow other processes */
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
@@ -526,26 +709,17 @@ retry:
range->max_encoding_tokens = MAX_KEYS;
}
-#endif /* WIRELESS_EXT > 8 */
-
/* Tx Power Info */
range->txpower_capa = IW_TXPOW_MWATT;
range->num_txpower = 1;
range->txpower[0] = RADIO_TX_POWER_MWATT;
-#if WIRELESS_EXT > 10
-
/* Wireless Extension Info */
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = WIRELESS_SUPPORT;
// Retry Limits and Lifetime - NOT SUPPORTED
-#endif
-
-
-#if WIRELESS_EXT > 11
-
/* Holding the lock too long, make a gap to allow other processes */
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
@@ -555,18 +729,18 @@ retry:
range->avg_qual = lp->wstats.qual;
DBG_TRACE( DbgInfo, "wl_wireless_stats done\n" );
-#endif
-
/* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVREGISTERED) |
- IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
- IW_EVENT_CAPA_MASK(IWEVEXPIRED));
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVREGISTERED);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVEXPIRED);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP;
+ range->scan_capa = IW_SCAN_CAPA_NONE;
out_unlock:
wl_act_int_on( lp );
@@ -1075,9 +1249,6 @@ static int wireless_get_essid(struct net_device *dev, struct iw_request_info *in
/* Copy the information into the user buffer */
data->length = pName->length;
- /* NOTE: Null terminating is necessary for proper display of the SSID in
- the wireless tools */
- data->length = pName->length + 1;
if( pName->length < HCF_MAX_NAME_LEN ) {
pName->name[pName->length] = '\0';
}
@@ -1087,7 +1258,6 @@ static int wireless_get_essid(struct net_device *dev, struct iw_request_info *in
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
//;?should we return an error status in AP mode
-#ifdef RETURN_CURRENT_NETWORKNAME
/* if desired is null ("any"), return current or "any" */
if( pName->name[0] == '\0' ) {
@@ -1104,11 +1274,7 @@ static int wireless_get_essid(struct net_device *dev, struct iw_request_info *in
pName->length = CNV_LITTLE_TO_INT( pName->length );
/* Copy the information into the user buffer */
- data->length = pName->length + 1;
- if( pName->length < HCF_MAX_NAME_LEN ) {
- pName->name[pName->length] = '\0';
- }
-
+ data->length = pName->length;
data->flags = 1;
} else {
ret = -EFAULT;
@@ -1116,11 +1282,8 @@ static int wireless_get_essid(struct net_device *dev, struct iw_request_info *in
}
}
-#endif // RETURN_CURRENT_NETWORKNAME
#endif // HCF_STA
- data->length--;
-
if (pName->length > IW_ESSID_MAX_SIZE) {
ret = -EFAULT;
goto out_unlock;
@@ -1169,156 +1332,39 @@ static int wireless_set_encode(struct net_device *dev, struct iw_request_info *i
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- int ret = 0;
-
-#if 1 //;? #if WIRELESS_EXT > 8 - used unconditionally in the rest of the code...
- hcf_8 encryption_state;
-#endif // WIRELESS_EXT > 8
- /*------------------------------------------------------------------------*/
-
+ int key_idx = (erq->flags & IW_ENCODE_INDEX) - 1;
+ int ret = 0;
+ bool enable = true;
- DBG_FUNC( "wireless_set_encode" );
- DBG_ENTER( DbgInfo );
+ DBG_ENTER(DbgInfo);
- if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
+ if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
}
- wl_lock( lp, &flags );
-
- wl_act_int_off( lp );
-
- /* Is encryption supported? */
- if( !wl_has_wep( &( lp->hcfCtx ))) {
- DBG_WARNING( DbgInfo, "WEP not supported on this device\n" );
- ret = -EOPNOTSUPP;
- goto out_unlock;
- }
-
- DBG_NOTICE( DbgInfo, "pointer: %p, length: %d, flags: %#x\n",
- keybuf, erq->length,
- erq->flags);
-
- /* Save state of Encryption switch */
- encryption_state = lp->EnableEncryption;
-
- /* Basic checking: do we have a key to set? */
- if((erq->length) != 0) {
- int index = ( erq->flags & IW_ENCODE_INDEX ) - 1;
- int tk = lp->TransmitKeyID - 1; // current key
-
-
- /* Check the size of the key */
- switch(erq->length) {
- case 0:
- break;
-
- case MIN_KEY_SIZE:
- case MAX_KEY_SIZE:
-
- /* Check the index */
- if(( index < 0 ) || ( index >= MAX_KEYS )) {
- index = tk;
- }
-
- /* Cleanup */
- memset( lp->DefaultKeys.key[index].key, 0, MAX_KEY_SIZE );
-
- /* Copy the key in the driver */
- memcpy( lp->DefaultKeys.key[index].key, keybuf, erq->length);
-
- /* Set the length */
- lp->DefaultKeys.key[index].len = erq->length;
-
- DBG_NOTICE( DbgInfo, "encoding.length: %d\n", erq->length );
- DBG_NOTICE( DbgInfo, "set key: %s(%d) [%d]\n", lp->DefaultKeys.key[index].key,
- lp->DefaultKeys.key[index].len, index );
-
- /* Enable WEP (if possible) */
- if(( index == tk ) && ( lp->DefaultKeys.key[tk].len > 0 )) {
- lp->EnableEncryption = 1;
- }
-
- break;
-
- default:
- DBG_WARNING( DbgInfo, "Invalid Key length\n" );
- ret = -EINVAL;
- goto out_unlock;
- }
- } else {
- int index = ( erq->flags & IW_ENCODE_INDEX ) - 1;
-
-
- /* Do we want to just set the current transmit key? */
- if(( index >= 0 ) && ( index < MAX_KEYS )) {
- DBG_NOTICE( DbgInfo, "index: %d; len: %d\n", index,
- lp->DefaultKeys.key[index].len );
-
- if( lp->DefaultKeys.key[index].len > 0 ) {
- lp->TransmitKeyID = index + 1;
- lp->EnableEncryption = 1;
- } else {
- DBG_WARNING( DbgInfo, "Problem setting the current TxKey\n" );
- DBG_LEAVE( DbgInfo );
- ret = -EINVAL;
- }
- }
- }
-
- /* Read the flags */
- if( erq->flags & IW_ENCODE_DISABLED ) {
- lp->EnableEncryption = 0; // disable encryption
- } else {
- lp->EnableEncryption = 1;
- }
-
- if( erq->flags & IW_ENCODE_RESTRICTED ) {
- DBG_WARNING( DbgInfo, "IW_ENCODE_RESTRICTED invalid\n" );
- ret = -EINVAL; // Invalid
- }
+ if (erq->flags & IW_ENCODE_DISABLED)
+ enable = false;
- DBG_TRACE( DbgInfo, "encryption_state : %d\n", encryption_state );
- DBG_TRACE( DbgInfo, "lp->EnableEncryption : %d\n", lp->EnableEncryption );
- DBG_TRACE( DbgInfo, "erq->length : %d\n",
- erq->length);
- DBG_TRACE( DbgInfo, "erq->flags : 0x%x\n",
- erq->flags);
+ wl_lock(lp, &flags);
- /* Write the changes to the card */
- if( ret == 0 ) {
- DBG_NOTICE( DbgInfo, "encrypt: %d, ID: %d\n", lp->EnableEncryption,
- lp->TransmitKeyID );
+ wl_act_int_off(lp);
- if( lp->EnableEncryption == encryption_state ) {
- if( erq->length != 0 ) {
- /* Dynamic WEP key update */
- wl_set_wep_keys( lp );
- }
- } else {
- /* To switch encryption on/off, soft reset is required */
- wl_apply( lp );
- }
- }
+ ret = hermes_set_wep_keys(lp, key_idx, keybuf, erq->length,
+ enable, true);
/* Send an event that Encryption has been set */
- wl_wext_event_encode( dev );
-
-out_unlock:
+ if (ret == 0)
+ wl_wext_event_encode(dev);
- wl_act_int_on( lp );
+ wl_act_int_on(lp);
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return ret;
-} // wireless_set_encode
-/*============================================================================*/
-
-
-
+}
/*******************************************************************************
* wireless_get_encode()
@@ -2023,11 +2069,9 @@ static int wireless_set_rts_threshold (struct net_device *dev, struct iw_request
goto out;
}
-#if WIRELESS_EXT > 8
if( rts->disabled ) {
rthr = 2347;
}
-#endif /* WIRELESS_EXT > 8 */
if(( rthr < 256 ) || ( rthr > 2347 )) {
ret = -EINVAL;
@@ -2095,12 +2139,8 @@ static int wireless_get_rts_threshold (struct net_device *dev, struct iw_request
rts->value = lp->RTSThreshold;
-#if WIRELESS_EXT > 8
-
rts->disabled = ( rts->value == 2347 );
-#endif /* WIRELESS_EXT > 8 */
-
rts->fixed = 1;
wl_act_int_on( lp );
@@ -2527,8 +2567,6 @@ out:
-#if WIRELESS_EXT > 13
-
/*******************************************************************************
* wireless_set_scan()
*******************************************************************************
@@ -2737,8 +2775,8 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
memcpy( iwe.u.ap_addr.sa_data, probe_resp->BSSID, ETH_ALEN);
iwe.len = IW_EV_ADDR_LEN;
- buf = IWE_STREAM_ADD_EVENT(info, buf, buf_end, &iwe, IW_EV_ADDR_LEN);
-
+ buf = iwe_stream_add_event(info, buf, buf_end,
+ &iwe, IW_EV_ADDR_LEN);
/* Use the mode to indicate if it's a station or AP */
/* Won't always be an AP if in IBSS mode */
@@ -2754,8 +2792,8 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
iwe.len = IW_EV_UINT_LEN;
- buf = IWE_STREAM_ADD_EVENT(info, buf, buf_end, &iwe, IW_EV_UINT_LEN);
-
+ buf = iwe_stream_add_event(info, buf, buf_end,
+ &iwe, IW_EV_UINT_LEN);
/* Any quality information */
memset(&iwe, 0, sizeof(iwe));
@@ -2767,7 +2805,8 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
iwe.u.qual.updated = lp->probe_results.scan_complete | IW_QUAL_DBM;
iwe.len = IW_EV_QUAL_LEN;
- buf = IWE_STREAM_ADD_EVENT(info, buf, buf_end, &iwe, IW_EV_QUAL_LEN);
+ buf = iwe_stream_add_event(info, buf, buf_end,
+ &iwe, IW_EV_QUAL_LEN);
/* ESSID information */
@@ -2778,7 +2817,8 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
iwe.u.data.length = probe_resp->rawData[1];
iwe.u.data.flags = 1;
- buf = IWE_STREAM_ADD_POINT(info, buf, buf_end, &iwe, &probe_resp->rawData[2]);
+ buf = iwe_stream_add_point(info, buf, buf_end,
+ &iwe, &probe_resp->rawData[2]);
}
@@ -2796,7 +2836,7 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
iwe.u.data.flags |= IW_ENCODE_DISABLED;
}
- buf = IWE_STREAM_ADD_POINT(info, buf, buf_end, &iwe, NULL);
+ buf = iwe_stream_add_point(info, buf, buf_end, &iwe, NULL);
/* Frequency Info */
@@ -2807,10 +2847,10 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
iwe.u.freq.m = wl_parse_ds_ie( probe_resp );
iwe.u.freq.e = 0;
- buf = IWE_STREAM_ADD_EVENT(info, buf, buf_end, &iwe, IW_EV_FREQ_LEN);
+ buf = iwe_stream_add_event(info, buf, buf_end,
+ &iwe, IW_EV_FREQ_LEN);
-#if WIRELESS_EXT > 14
/* Custom info (Beacon Interval) */
memset( &iwe, 0, sizeof( iwe ));
memset( msg, 0, sizeof( msg ));
@@ -2819,27 +2859,25 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
sprintf( msg, "beacon_interval=%d", probe_resp->beaconInterval );
iwe.u.data.length = strlen( msg );
- buf = IWE_STREAM_ADD_POINT(info, buf, buf_end, &iwe, msg);
+ buf = iwe_stream_add_point(info, buf, buf_end, &iwe, msg);
- /* Custom info (WPA-IE) */
+ /* WPA-IE */
wpa_ie = NULL;
wpa_ie_len = 0;
wpa_ie = wl_parse_wpa_ie( probe_resp, &wpa_ie_len );
if( wpa_ie != NULL ) {
- memset( &iwe, 0, sizeof( iwe ));
- memset( msg, 0, sizeof( msg ));
+ memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf( msg, "wpa_ie=%s", wl_print_wpa_ie( wpa_ie, wpa_ie_len ));
- iwe.u.data.length = strlen( msg );
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = wpa_ie_len;
- buf = IWE_STREAM_ADD_POINT(info, buf, buf_end, &iwe, msg);
+ buf = iwe_stream_add_point(info, buf, buf_end,
+ &iwe, wpa_ie);
}
/* Add other custom info in formatted string format as needed... */
-#endif
}
data->length = buf - extra;
@@ -2856,10 +2894,24 @@ out:
} // wireless_get_scan
/*============================================================================*/
-#endif // WIRELESS_EXT > 13
-
-
-#if WIRELESS_EXT > 17
+#if DBG
+static const char * const auth_names[] = {
+ "IW_AUTH_WPA_VERSION",
+ "IW_AUTH_CIPHER_PAIRWISE",
+ "IW_AUTH_CIPHER_GROUP",
+ "IW_AUTH_KEY_MGMT",
+ "IW_AUTH_TKIP_COUNTERMEASURES",
+ "IW_AUTH_DROP_UNENCRYPTED",
+ "IW_AUTH_80211_AUTH_ALG",
+ "IW_AUTH_WPA_ENABLED",
+ "IW_AUTH_RX_UNENCRYPTED_EAPOL",
+ "IW_AUTH_ROAMING_CONTROL",
+ "IW_AUTH_PRIVACY_INVOKED",
+ "IW_AUTH_CIPHER_GROUP_MGMT",
+ "IW_AUTH_MFP",
+ "Unsupported"
+};
+#endif
static int wireless_set_auth(struct net_device *dev,
struct iw_request_info *info,
@@ -2867,14 +2919,15 @@ static int wireless_set_auth(struct net_device *dev,
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- int ret;
- int iwa_idx = data->flags & IW_AUTH_INDEX;
- int iwa_val = data->value;
+ ltv_t ltv;
+ int ret;
+ int iwa_idx = data->flags & IW_AUTH_INDEX;
+ int iwa_val = data->value;
DBG_FUNC( "wireless_set_auth" );
DBG_ENTER( DbgInfo );
- if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
+ if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
}
@@ -2883,89 +2936,102 @@ static int wireless_set_auth(struct net_device *dev,
wl_act_int_off( lp );
+ if (iwa_idx > IW_AUTH_MFP)
+ iwa_idx = IW_AUTH_MFP + 1;
+ DBG_TRACE(DbgInfo, "%s\n", auth_names[iwa_idx]);
switch (iwa_idx) {
- case IW_AUTH_WPA_VERSION:
- DBG_TRACE( DbgInfo, "IW_AUTH_WPA_VERSION\n");
- /* We do support WPA only; how should DISABLED be treated? */
- if (iwa_val == IW_AUTH_WPA_VERSION_WPA)
- ret = 0;
- else
- ret = -EINVAL;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- DBG_TRACE( DbgInfo, "IW_AUTH_WPA_ENABLED: val = %d\n", iwa_val);
- if (iwa_val)
- lp->EnableEncryption = 2;
- else
- lp->EnableEncryption = 0;
+ case IW_AUTH_WPA_VERSION:
+ /* We do support WPA */
+ if ((iwa_val == IW_AUTH_WPA_VERSION_WPA) ||
+ (iwa_val == IW_AUTH_WPA_VERSION_DISABLED))
ret = 0;
- break;
+ else
+ ret = -EINVAL;
+ break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- DBG_TRACE( DbgInfo, "IW_AUTH_TKIP_COUNTERMEASURES\n");
- lp->driverEnable = !iwa_val;
- if(lp->driverEnable)
- hcf_cntl(&(lp->hcfCtx), HCF_CNTL_ENABLE | HCF_PORT_0);
- else
- hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISABLE | HCF_PORT_0);
- ret = 0;
- break;
+ case IW_AUTH_WPA_ENABLED:
+ DBG_TRACE(DbgInfo, "val = %d\n", iwa_val);
+ if (iwa_val)
+ lp->EnableEncryption = 2;
+ else
+ lp->EnableEncryption = 0;
- case IW_AUTH_DROP_UNENCRYPTED:
- DBG_TRACE( DbgInfo, "IW_AUTH_DROP_UNENCRYPTED\n");
- /* We do not actually do anything here, just to silence
- * wpa_supplicant */
- ret = 0;
- break;
+ /* Write straight to the card */
+ ltv.len = 2;
+ ltv.typ = CFG_CNF_ENCRYPTION;
+ ltv.u.u16[0] = cpu_to_le16(lp->EnableEncryption);
+ ret = hcf_put_info(&lp->hcfCtx, (LTVP)&ltv);
- case IW_AUTH_CIPHER_PAIRWISE:
- DBG_TRACE( DbgInfo, "IW_AUTH_CIPHER_PAIRWISE\n");
- /* not implemented, return an error */
- ret = -EINVAL;
- break;
+ break;
- case IW_AUTH_CIPHER_GROUP:
- DBG_TRACE( DbgInfo, "IW_AUTH_CIPHER_GROUP\n");
- /* not implemented, return an error */
- ret = -EINVAL;
- break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
- case IW_AUTH_KEY_MGMT:
- DBG_TRACE( DbgInfo, "IW_AUTH_KEY_MGMT\n");
- /* not implemented, return an error */
- ret = -EINVAL;
- break;
+ /* Immediately disable card */
+ lp->driverEnable = !iwa_val;
+ if (lp->driverEnable)
+ hcf_cntl(&(lp->hcfCtx), HCF_CNTL_ENABLE | HCF_PORT_0);
+ else
+ hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISABLE | HCF_PORT_0);
+ ret = 0;
+ break;
- case IW_AUTH_80211_AUTH_ALG:
- DBG_TRACE( DbgInfo, "IW_AUTH_80211_AUTH_ALG\n");
- /* not implemented, return an error */
+ case IW_AUTH_MFP:
+ /* Management Frame Protection not supported.
+ * Only fail if set to required.
+ */
+ if (iwa_val == IW_AUTH_MFP_REQUIRED)
ret = -EINVAL;
- break;
+ else
+ ret = 0;
+ break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- DBG_TRACE( DbgInfo, "IW_AUTH_RX_UNENCRYPTED_EAPOL\n");
- /* not implemented, return an error */
- ret = -EINVAL;
- break;
+ case IW_AUTH_KEY_MGMT:
- case IW_AUTH_ROAMING_CONTROL:
- DBG_TRACE( DbgInfo, "IW_AUTH_ROAMING_CONTROL\n");
- /* not implemented, return an error */
- ret = -EINVAL;
- break;
+ /* Record required management suite.
+ * Will take effect on next commit */
+ if (iwa_val != 0)
+ lp->AuthKeyMgmtSuite = 4;
+ else
+ lp->AuthKeyMgmtSuite = 0;
- case IW_AUTH_PRIVACY_INVOKED:
- DBG_TRACE( DbgInfo, "IW_AUTH_PRIVACY_INVOKED\n");
- /* not implemented, return an error */
- ret = -EINVAL;
- break;
+ ret = -EINPROGRESS;
+ break;
- default:
- DBG_TRACE( DbgInfo, "IW_AUTH_?? (%d) unknown\n", iwa_idx);
- /* return an error */
+ case IW_AUTH_80211_AUTH_ALG:
+
+ /* Just record whether open or shared is required.
+ * Will take effect on next commit */
+ ret = -EINPROGRESS;
+
+ if (iwa_val & IW_AUTH_ALG_SHARED_KEY)
+ lp->authentication = 1;
+ else if (iwa_val & IW_AUTH_ALG_OPEN_SYSTEM)
+ lp->authentication = 0;
+ else
ret = -EINVAL;
- break;
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ /* Only needed for AP */
+ lp->ExcludeUnencrypted = iwa_val;
+ ret = -EINPROGRESS;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ case IW_AUTH_ROAMING_CONTROL:
+ case IW_AUTH_PRIVACY_INVOKED:
+ /* Not used. May need to do something with
+ * CIPHER_PAIRWISE and CIPHER_GROUP*/
+ ret = -EINPROGRESS;
+ break;
+
+ default:
+ DBG_TRACE(DbgInfo, "IW_AUTH_?? (%d) unknown\n", iwa_idx);
+ /* return an error */
+ ret = -EOPNOTSUPP;
+ break;
}
wl_act_int_on( lp );
@@ -2979,290 +3045,176 @@ out:
/*============================================================================*/
-
-static int hermes_set_key(ltv_t *ltv, int alg, int key_idx, u8 *addr,
- int set_tx, u8 *seq, u8 *key, size_t key_len)
+static void flush_tx(struct wl_private *lp)
{
- int ret = -EINVAL;
- // int count = 0;
- int buf_idx = 0;
- hcf_8 tsc[IW_ENCODE_SEQ_MAX_SIZE] =
- { 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00 };
-
- DBG_FUNC( "hermes_set_key" );
- DBG_ENTER( DbgInfo );
+ ltv_t ltv;
+ int count;
/*
- * Check the key index here; if 0, load as Pairwise Key, otherwise,
- * load as a group key. Note that for the Hermes, the RIDs for
- * group/pariwise keys are different from each other and different
- * than the default WEP keys as well.
- */
- switch (alg)
- {
- case IW_ENCODE_ALG_TKIP:
- DBG_TRACE( DbgInfo, "IW_ENCODE_ALG_TKIP: key(%d)\n", key_idx);
-#if 0
- /*
- * Make sure that there is no data queued up in the firmware
- * before setting the TKIP keys. If this check is not
- * performed, some data may be sent out with incorrect MIC
- * and cause synchronizarion errors with the AP
- */
- /* Check every 1ms for 100ms */
- for( count = 0; count < 100; count++ )
- {
- usleep( 1000 );
+ * Make sure that there is no data queued up in the firmware
+ * before setting the TKIP keys. If this check is not
+ * performed, some data may be sent out with incorrect MIC
+ * and cause synchronizarion errors with the AP
+ */
+ /* Check every 1ms for 100ms */
+ for (count = 0; count < 100; count++) {
+ udelay(1000);
+
+ ltv.len = 2;
+ ltv.typ = 0xFD91; /* This RID not defined in HCF yet!!! */
+ ltv.u.u16[0] = 0;
+
+ hcf_get_info(&(lp->hcfCtx), (LTVP)&ltv);
+
+ if (ltv.u.u16[0] == 0)
+ break;
+ }
+
+ if (count >= 100)
+ DBG_TRACE(DbgInfo, "Timed out waiting for TxQ flush!\n");
- ltv.len = 2;
- ltv.typ = 0xFD91; // This RID not defined in HCF yet!!!
- ltv.u.u16[0] = 0;
+}
- wl_get_info( sock, &ltv, ifname );
+static int wireless_set_encodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *keybuf)
+{
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret;
+ int key_idx = (erq->flags & IW_ENCODE_INDEX) - 1;
+ ltv_t ltv;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)keybuf;
+ bool enable = true;
+ bool set_tx = false;
- if( ltv.u.u16[0] == 0 )
- {
- break;
- }
- }
+ DBG_ENTER(DbgInfo);
- if( count == 100 )
- {
- wpa_printf( MSG_DEBUG, "Timed out waiting for TxQ!" );
- }
-#endif
+ if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
+ ret = -EBUSY;
+ goto out;
+ }
- switch (key_idx) {
- case 0:
- ltv->len = 28;
- ltv->typ = CFG_ADD_TKIP_MAPPED_KEY;
+ if (erq->flags & IW_ENCODE_DISABLED) {
+ ext->alg = IW_ENCODE_ALG_NONE;
+ enable = false;
+ }
- /* Load the BSSID */
- memcpy(&ltv->u.u8[buf_idx], addr, ETH_ALEN);
- buf_idx += ETH_ALEN;
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+ set_tx = true;
- /* Load the TKIP key */
- memcpy(&ltv->u.u8[buf_idx], &key[0], 16);
- buf_idx += 16;
+ wl_lock(lp, &flags);
- /* Load the TSC */
- memcpy(&ltv->u.u8[buf_idx], tsc, IW_ENCODE_SEQ_MAX_SIZE);
- buf_idx += IW_ENCODE_SEQ_MAX_SIZE;
+ wl_act_int_off(lp);
- /* Load the RSC */
- memcpy(&ltv->u.u8[buf_idx], seq, IW_ENCODE_SEQ_MAX_SIZE);
- buf_idx += IW_ENCODE_SEQ_MAX_SIZE;
+ memset(&ltv, 0, sizeof(ltv));
- /* Load the TxMIC key */
- memcpy(&ltv->u.u8[buf_idx], &key[16], 8);
- buf_idx += 8;
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_TKIP:
+ DBG_TRACE(DbgInfo, "IW_ENCODE_ALG_TKIP: key(%d)\n", key_idx);
- /* Load the RxMIC key */
- memcpy(&ltv->u.u8[buf_idx], &key[24], 8);
+ if (sizeof(ext->rx_seq) != 8) {
+ DBG_TRACE(DbgInfo, "rx_seq size mismatch\n");
+ DBG_LEAVE(DbgInfo);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
- ret = 0;
- break;
- case 1:
- case 2:
- case 3:
- ltv->len = 26;
- ltv->typ = CFG_ADD_TKIP_DEFAULT_KEY;
+ ret = hermes_set_tkip_keys(&ltv, key_idx, ext->addr.sa_data,
+ set_tx,
+ ext->rx_seq, ext->key, ext->key_len);
- /* Load the key Index */
- ltv->u.u16[buf_idx] = key_idx;
- /* If this is a Tx Key, set bit 8000 */
- if(set_tx)
- ltv->u.u16[buf_idx] |= 0x8000;
- buf_idx += 2;
+ if (ret != 0) {
+ DBG_TRACE(DbgInfo, "hermes_set_tkip_keys returned != 0, key not set\n");
+ goto out_unlock;
+ }
- /* Load the RSC */
- memcpy(&ltv->u.u8[buf_idx], seq, IW_ENCODE_SEQ_MAX_SIZE);
- buf_idx += IW_ENCODE_SEQ_MAX_SIZE;
+ flush_tx(lp);
- /* Load the TKIP, TxMIC, and RxMIC keys in one shot, because in
- CFG_ADD_TKIP_DEFAULT_KEY they are back-to-back */
- memcpy(&ltv->u.u8[buf_idx], key, key_len);
- buf_idx += key_len;
+ lp->wext_enc = IW_ENCODE_ALG_TKIP;
- /* Load the TSC */
- memcpy(&ltv->u.u8[buf_idx], tsc, IW_ENCODE_SEQ_MAX_SIZE);
+ /* Write the key */
+ ret = hcf_put_info(&(lp->hcfCtx), (LTVP)&ltv);
+ break;
- ltv->u.u16[0] = CNV_INT_TO_LITTLE(ltv->u.u16[0]);
+ case IW_ENCODE_ALG_WEP:
+ DBG_TRACE(DbgInfo, "IW_ENCODE_ALG_WEP: key(%d)\n", key_idx);
- ret = 0;
- break;
- default:
- break;
+ if (erq->flags & IW_ENCODE_RESTRICTED) {
+ DBG_WARNING(DbgInfo, "IW_ENCODE_RESTRICTED invalid\n");
+ ret = -EINVAL;
+ goto out_unlock;
}
- break;
+ ret = hermes_set_wep_keys(lp, key_idx, ext->key, ext->key_len,
+ enable, set_tx);
- case IW_ENCODE_ALG_WEP:
- DBG_TRACE( DbgInfo, "IW_ENCODE_ALG_WEP: key(%d)\n", key_idx);
break;
case IW_ENCODE_ALG_CCMP:
- DBG_TRACE( DbgInfo, "IW_ENCODE_ALG_CCMP: key(%d)\n", key_idx);
+ DBG_TRACE(DbgInfo, "IW_ENCODE_ALG_CCMP: key(%d)\n", key_idx);
+ ret = -EOPNOTSUPP;
break;
case IW_ENCODE_ALG_NONE:
- DBG_TRACE( DbgInfo, "IW_ENCODE_ALG_NONE: key(%d)\n", key_idx);
- switch (key_idx) {
- case 0:
- if (memcmp(addr, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0) {
- //if (addr != NULL) {
- ltv->len = 7;
- ltv->typ = CFG_REMOVE_TKIP_MAPPED_KEY;
- memcpy(&ltv->u.u8[0], addr, ETH_ALEN);
- ret = 0;
- }
- break;
- case 1:
- case 2:
- case 3:
- /* Clear the Group TKIP keys by index */
- ltv->len = 2;
- ltv->typ = CFG_REMOVE_TKIP_DEFAULT_KEY;
- ltv->u.u16[0] = key_idx;
-
+ DBG_TRACE(DbgInfo, "IW_ENCODE_ALG_NONE: key(%d)\n", key_idx);
+
+ if (lp->wext_enc == IW_ENCODE_ALG_TKIP) {
+ ret = hermes_clear_tkip_keys(&ltv, key_idx,
+ ext->addr.sa_data);
+ flush_tx(lp);
+ lp->wext_enc = IW_ENCODE_ALG_NONE;
+ ret = hcf_put_info(&(lp->hcfCtx), (LTVP)&ltv);
+
+ } else if (lp->wext_enc == IW_ENCODE_ALG_WEP) {
+ ret = hermes_set_wep_keys(lp, key_idx,
+ ext->key, ext->key_len,
+ false, false);
+ } else {
ret = 0;
- break;
- default:
- break;
}
+
break;
+
default:
DBG_TRACE( DbgInfo, "IW_ENCODE_??: key(%d)\n", key_idx);
+ ret = -EOPNOTSUPP;
break;
}
- DBG_LEAVE( DbgInfo );
- return ret;
-} // hermes_set_key
-/*============================================================================*/
-
-
-
-static int wireless_set_encodeext (struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf)
-{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret;
- int key_idx = (erq->flags&IW_ENCODE_INDEX) - 1;
- ltv_t ltv;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)keybuf;
-
- DBG_FUNC( "wireless_set_encodeext" );
- DBG_ENTER( DbgInfo );
-
- if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
- ret = -EBUSY;
- goto out;
- }
-
- if (sizeof(ext->rx_seq) != 8) {
- DBG_TRACE(DbgInfo, "rz_seq size mismatch\n");
- DBG_LEAVE(DbgInfo);
- return -EINVAL;
- }
-
- /* Handle WEP keys via the old set encode procedure */
- if(ext->alg == IW_ENCODE_ALG_WEP) {
- struct iw_point wep_erq;
- char *wep_keybuf;
-
- /* Build request structure */
- wep_erq.flags = erq->flags; // take over flags with key index
- wep_erq.length = ext->key_len; // take length from extended key info
- wep_keybuf = ext->key; // pointer to the key text
-
- /* Call wireless_set_encode tot handle the WEP key */
- ret = wireless_set_encode(dev, info, &wep_erq, wep_keybuf);
- goto out;
- }
-
- /* Proceed for extended encode functions for WAP and NONE */
- wl_lock( lp, &flags );
-
- wl_act_int_off( lp );
-
- memset(&ltv, 0, sizeof(ltv));
- ret = hermes_set_key(&ltv, ext->alg, key_idx, ext->addr.sa_data,
- ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
- ext->rx_seq, ext->key, ext->key_len);
-
- if (ret != 0) {
- DBG_TRACE( DbgInfo, "hermes_set_key returned != 0, key not set\n");
- goto out_unlock;
- }
-
- /* Put the key in HCF */
- ret = hcf_put_info(&(lp->hcfCtx), (LTVP)&ltv);
-
out_unlock:
- if(ret == HCF_SUCCESS) {
- DBG_TRACE( DbgInfo, "Put key info succes\n");
- } else {
- DBG_TRACE( DbgInfo, "Put key info failed, key not set\n");
- }
- wl_act_int_on( lp );
+ wl_act_int_on(lp);
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return ret;
-} // wireless_set_encodeext
+}
/*============================================================================*/
-static int wireless_get_genie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
+static int wireless_set_genie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
int ret = 0;
- ltv_t ltv;
-
- DBG_FUNC( "wireless_get_genie" );
- DBG_ENTER( DbgInfo );
-
- if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
- ret = -EBUSY;
- goto out;
- }
-
- wl_lock( lp, &flags );
-
- wl_act_int_off( lp );
-
- memset(&ltv, 0, sizeof(ltv));
- ltv.len = 2;
- ltv.typ = CFG_SET_WPA_AUTH_KEY_MGMT_SUITE;
- lp->AuthKeyMgmtSuite = ltv.u.u16[0] = 4;
- ltv.u.u16[0] = CNV_INT_TO_LITTLE(ltv.u.u16[0]);
- ret = hcf_put_info(&(lp->hcfCtx), (LTVP)&ltv);
-
- wl_act_int_on( lp );
+ DBG_ENTER(DbgInfo);
- wl_unlock(lp, &flags);
+ /* We can't write this to the card, but apparently this
+ * operation needs to succeed */
+ ret = 0;
-out:
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return ret;
}
/*============================================================================*/
-#endif // WIRELESS_EXT > 17
-
/*******************************************************************************
* wl_wireless_stats()
*******************************************************************************
@@ -3316,7 +3268,6 @@ struct iw_statistics * wl_wireless_stats( struct net_device *dev )
if( status == HCF_SUCCESS ) {
pQual = (CFG_COMMS_QUALITY_STRCT *)&( lp->ltvRecord );
-#ifdef USE_DBM
pStats->qual.qual = (u_char) CNV_LITTLE_TO_INT( pQual->coms_qual );
pStats->qual.level = (u_char) dbm( CNV_LITTLE_TO_INT( pQual->signal_lvl ));
pStats->qual.noise = (u_char) dbm( CNV_LITTLE_TO_INT( pQual->noise_lvl ));
@@ -3325,23 +3276,6 @@ struct iw_statistics * wl_wireless_stats( struct net_device *dev )
IW_QUAL_LEVEL_UPDATED |
IW_QUAL_NOISE_UPDATED |
IW_QUAL_DBM);
-#else
- pStats->qual.qual = percent( CNV_LITTLE_TO_INT( pQual->coms_qual ),
- HCF_MIN_COMM_QUALITY,
- HCF_MAX_COMM_QUALITY );
-
- pStats->qual.level = percent( CNV_LITTLE_TO_INT( pQual->signal_lvl ),
- HCF_MIN_SIGNAL_LEVEL,
- HCF_MAX_SIGNAL_LEVEL );
-
- pStats->qual.noise = percent( CNV_LITTLE_TO_INT( pQual->noise_lvl ),
- HCF_MIN_NOISE_LEVEL,
- HCF_MAX_NOISE_LEVEL );
-
- pStats->qual.updated |= (IW_QUAL_QUAL_UPDATED |
- IW_QUAL_LEVEL_UPDATED |
- IW_QUAL_NOISE_UPDATED);
-#endif /* USE_DBM */
} else {
memset( &( pStats->qual ), 0, sizeof( pStats->qual ));
}
@@ -3512,7 +3446,6 @@ inline void wl_spy_gather( struct net_device *dev, u_char *mac )
******************************************************************************/
void wl_wext_event_freq( struct net_device *dev )
{
-#if WIRELESS_EXT > 13
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
@@ -3524,7 +3457,6 @@ void wl_wext_event_freq( struct net_device *dev )
wrqu.freq.e = 0;
wireless_send_event( dev, SIOCSIWFREQ, &wrqu, NULL );
-#endif /* WIRELESS_EXT > 13 */
return;
} // wl_wext_event_freq
@@ -3554,7 +3486,6 @@ void wl_wext_event_freq( struct net_device *dev )
******************************************************************************/
void wl_wext_event_mode( struct net_device *dev )
{
-#if WIRELESS_EXT > 13
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
@@ -3569,7 +3500,6 @@ void wl_wext_event_mode( struct net_device *dev )
}
wireless_send_event( dev, SIOCSIWMODE, &wrqu, NULL );
-#endif /* WIRELESS_EXT > 13 */
return;
} // wl_wext_event_mode
@@ -3599,7 +3529,6 @@ void wl_wext_event_mode( struct net_device *dev )
******************************************************************************/
void wl_wext_event_essid( struct net_device *dev )
{
-#if WIRELESS_EXT > 13
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
@@ -3616,7 +3545,6 @@ void wl_wext_event_essid( struct net_device *dev )
wrqu.essid.flags = 1;
wireless_send_event( dev, SIOCSIWESSID, &wrqu, lp->NetworkName );
-#endif /* WIRELESS_EXT > 13 */
return;
} // wl_wext_event_essid
@@ -3646,7 +3574,6 @@ void wl_wext_event_essid( struct net_device *dev )
******************************************************************************/
void wl_wext_event_encode( struct net_device *dev )
{
-#if WIRELESS_EXT > 13
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
int index = 0;
@@ -3688,7 +3615,6 @@ void wl_wext_event_encode( struct net_device *dev )
wireless_send_event( dev, SIOCSIWENCODE, &wrqu,
lp->DefaultKeys.key[index].key );
-#endif /* WIRELESS_EXT > 13 */
return;
} // wl_wext_event_encode
@@ -3718,7 +3644,6 @@ void wl_wext_event_encode( struct net_device *dev )
******************************************************************************/
void wl_wext_event_ap( struct net_device *dev )
{
-#if WIRELESS_EXT > 13
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
int status;
@@ -3747,8 +3672,6 @@ void wl_wext_event_ap( struct net_device *dev )
wireless_send_event( dev, SIOCGIWAP, &wrqu, NULL );
}
-#endif /* WIRELESS_EXT > 13 */
-
return;
} // wl_wext_event_ap
/*============================================================================*/
@@ -3776,7 +3699,6 @@ void wl_wext_event_ap( struct net_device *dev )
******************************************************************************/
void wl_wext_event_scan_complete( struct net_device *dev )
{
-#if WIRELESS_EXT > 13
union iwreq_data wrqu;
/*------------------------------------------------------------------------*/
@@ -3785,7 +3707,6 @@ void wl_wext_event_scan_complete( struct net_device *dev )
wrqu.addr.sa_family = ARPHRD_ETHER;
wireless_send_event( dev, SIOCGIWSCAN, &wrqu, NULL );
-#endif /* WIRELESS_EXT > 13 */
return;
} // wl_wext_event_scan_complete
@@ -3815,7 +3736,6 @@ void wl_wext_event_scan_complete( struct net_device *dev )
******************************************************************************/
void wl_wext_event_new_sta( struct net_device *dev )
{
-#if WIRELESS_EXT > 14
union iwreq_data wrqu;
/*------------------------------------------------------------------------*/
@@ -3826,7 +3746,6 @@ void wl_wext_event_new_sta( struct net_device *dev )
memcpy( wrqu.addr.sa_data, dev->dev_addr, ETH_ALEN );
wrqu.addr.sa_family = ARPHRD_ETHER;
wireless_send_event( dev, IWEVREGISTERED, &wrqu, NULL );
-#endif /* WIRELESS_EXT > 14 */
return;
} // wl_wext_event_new_sta
@@ -3856,7 +3775,6 @@ void wl_wext_event_new_sta( struct net_device *dev )
******************************************************************************/
void wl_wext_event_expired_sta( struct net_device *dev )
{
-#if WIRELESS_EXT > 14
union iwreq_data wrqu;
/*------------------------------------------------------------------------*/
@@ -3866,7 +3784,6 @@ void wl_wext_event_expired_sta( struct net_device *dev )
memcpy( wrqu.addr.sa_data, dev->dev_addr, ETH_ALEN );
wrqu.addr.sa_family = ARPHRD_ETHER;
wireless_send_event( dev, IWEVEXPIRED, &wrqu, NULL );
-#endif /* WIRELESS_EXT > 14 */
return;
} // wl_wext_event_expired_sta
@@ -3895,10 +3812,9 @@ void wl_wext_event_expired_sta( struct net_device *dev )
******************************************************************************/
void wl_wext_event_mic_failed( struct net_device *dev )
{
-#if WIRELESS_EXT > 14
- char msg[512];
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
+ struct iw_michaelmicfailure wxmic;
int key_idx;
char *addr1;
char *addr2;
@@ -3920,31 +3836,17 @@ void wl_wext_event_mic_failed( struct net_device *dev )
DBG_PRINT( "MIC FAIL - KEY USED : %d, STATUS : 0x%04x\n", key_idx,
hdr->status );
- memset( &wrqu, 0, sizeof( wrqu ));
- memset( msg, 0, sizeof( msg ));
-
-
- /* Because MIC failures are not part of the Wireless Extensions yet, they
- must be passed as a string using an IWEVCUSTOM event. In order for the
- event to be effective, the string format must be known by both the
- driver and the supplicant. The following is the string format used by the
- hostap project's WPA supplicant, and will be used here until the Wireless
- Extensions interface adds this support:
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(&wxmic, 0, sizeof(wxmic));
- MLME-MICHAELMICFAILURE.indication(keyid=# broadcast/unicast addr=addr2)
- */
+ wxmic.flags = key_idx & IW_MICFAILURE_KEY_ID;
+ wxmic.flags |= (addr1[0] & 1) ?
+ IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
+ wxmic.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(wxmic.src_addr.sa_data, addr2, ETH_ALEN);
- /* NOTE: Format of MAC address (using colons to separate bytes) may cause
- a problem in future versions of the supplicant, if they ever
- actually parse these parameters */
-#if DBG
- sprintf(msg, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast "
- "addr=%pM)", key_idx, addr1[0] & 0x01 ? "broad" : "uni",
- addr2);
-#endif
- wrqu.data.length = strlen( msg );
- wireless_send_event( dev, IWEVCUSTOM, &wrqu, msg );
-#endif /* WIRELESS_EXT > 14 */
+ wrqu.data.length = sizeof(wxmic);
+ wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&wxmic);
return;
} // wl_wext_event_mic_failed
@@ -3974,8 +3876,6 @@ void wl_wext_event_mic_failed( struct net_device *dev )
******************************************************************************/
void wl_wext_event_assoc_ie( struct net_device *dev )
{
-#if WIRELESS_EXT > 14
- char msg[512];
union iwreq_data wrqu;
struct wl_private *lp = wl_priv(dev);
int status;
@@ -3986,7 +3886,6 @@ void wl_wext_event_assoc_ie( struct net_device *dev )
memset( &wrqu, 0, sizeof( wrqu ));
- memset( msg, 0, sizeof( msg ));
/* Retrieve the Association Request IE */
lp->ltvRecord.len = 45;
@@ -3999,24 +3898,18 @@ void wl_wext_event_assoc_ie( struct net_device *dev )
memcpy( &data.rawData, &( lp->ltvRecord.u.u8[1] ), 88 );
wpa_ie = wl_parse_wpa_ie( &data, &length );
- /* Because this event (Association WPA-IE) is not part of the Wireless
- Extensions yet, it must be passed as a string using an IWEVCUSTOM event.
- In order for the event to be effective, the string format must be known
- by both the driver and the supplicant. The following is the string format
- used by the hostap project's WPA supplicant, and will be used here until
- the Wireless Extensions interface adds this support:
-
- ASSOCINFO(ReqIEs=WPA-IE RespIEs=WPA-IE)
- */
-
if( length != 0 )
{
- sprintf( msg, "ASSOCINFO(ReqIEs=%s)", wl_print_wpa_ie( wpa_ie, length ));
- wrqu.data.length = strlen( msg );
- wireless_send_event( dev, IWEVCUSTOM, &wrqu, msg );
+ wrqu.data.length = wpa_ie[1] + 2;
+ wireless_send_event(dev, IWEVASSOCREQIE,
+ &wrqu, wpa_ie);
+
+ /* This bit is a hack. We send the respie
+ * event at the same time */
+ wireless_send_event(dev, IWEVASSOCRESPIE,
+ &wrqu, wpa_ie);
}
}
-#endif /* WIRELESS_EXT > 14 */
return;
} // wl_wext_event_assoc_ie
@@ -4025,66 +3918,39 @@ void wl_wext_event_assoc_ie( struct net_device *dev )
static const iw_handler wl_handler[] =
{
- (iw_handler) wireless_commit, /* SIOCSIWCOMMIT */
- (iw_handler) wireless_get_protocol, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) wireless_set_frequency, /* SIOCSIWFREQ */
- (iw_handler) wireless_get_frequency, /* SIOCGIWFREQ */
- (iw_handler) wireless_set_porttype, /* SIOCSIWMODE */
- (iw_handler) wireless_get_porttype, /* SIOCGIWMODE */
- (iw_handler) wireless_set_sensitivity, /* SIOCSIWSENS */
- (iw_handler) wireless_get_sensitivity, /* SIOCGIWSENS */
- (iw_handler) NULL , /* SIOCSIWRANGE */
- (iw_handler) wireless_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL , /* SIOCSIWPRIV */
- (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
- (iw_handler) NULL , /* SIOCSIWSTATS */
- (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- NULL, /* SIOCSIWTHRSPY */
- NULL, /* SIOCGIWTHRSPY */
- (iw_handler) NULL, /* SIOCSIWAP */
+ IW_HANDLER(SIOCSIWCOMMIT, (iw_handler) wireless_commit),
+ IW_HANDLER(SIOCGIWNAME, (iw_handler) wireless_get_protocol),
+ IW_HANDLER(SIOCSIWFREQ, (iw_handler) wireless_set_frequency),
+ IW_HANDLER(SIOCGIWFREQ, (iw_handler) wireless_get_frequency),
+ IW_HANDLER(SIOCSIWMODE, (iw_handler) wireless_set_porttype),
+ IW_HANDLER(SIOCGIWMODE, (iw_handler) wireless_get_porttype),
+ IW_HANDLER(SIOCSIWSENS, (iw_handler) wireless_set_sensitivity),
+ IW_HANDLER(SIOCGIWSENS, (iw_handler) wireless_get_sensitivity),
+ IW_HANDLER(SIOCGIWRANGE, (iw_handler) wireless_get_range),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
- (iw_handler) wireless_get_bssid, /* SIOCGIWAP */
-#else
- (iw_handler) NULL, /* SIOCGIWAP */
+ IW_HANDLER(SIOCGIWAP, (iw_handler) wireless_get_bssid),
#endif
- (iw_handler) NULL, /* SIOCSIWMLME */
- (iw_handler) wireless_get_ap_list, /* SIOCGIWAPLIST */
- (iw_handler) wireless_set_scan, /* SIOCSIWSCAN */
- (iw_handler) wireless_get_scan, /* SIOCGIWSCAN */
- (iw_handler) wireless_set_essid, /* SIOCSIWESSID */
- (iw_handler) wireless_get_essid, /* SIOCGIWESSID */
- (iw_handler) wireless_set_nickname, /* SIOCSIWNICKN */
- (iw_handler) wireless_get_nickname, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) wireless_set_rate, /* SIOCSIWRATE */
- (iw_handler) wireless_get_rate, /* SIOCGIWRATE */
- (iw_handler) wireless_set_rts_threshold,/* SIOCSIWRTS */
- (iw_handler) wireless_get_rts_threshold,/* SIOCGIWRTS */
- (iw_handler) NULL, /* SIOCSIWFRAG */
- (iw_handler) NULL, /* SIOCGIWFRAG */
- (iw_handler) NULL, /* SIOCSIWTXPOW */
- (iw_handler) wireless_get_tx_power, /* SIOCGIWTXPOW */
- (iw_handler) NULL, /* SIOCSIWRETRY */
- (iw_handler) NULL, /* SIOCGIWRETRY */
- (iw_handler) wireless_set_encode, /* SIOCSIWENCODE */
- (iw_handler) wireless_get_encode, /* SIOCGIWENCODE */
- (iw_handler) wireless_set_power, /* SIOCSIWPOWER */
- (iw_handler) wireless_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) wireless_get_genie, /* SIOCSIWGENIE */
- (iw_handler) NULL, /* SIOCGIWGENIE */
- (iw_handler) wireless_set_auth, /* SIOCSIWAUTH */
- (iw_handler) NULL, /* SIOCGIWAUTH */
- (iw_handler) wireless_set_encodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) NULL, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
- (iw_handler) NULL, /* -- hole -- */
+ IW_HANDLER(SIOCGIWAPLIST, (iw_handler) wireless_get_ap_list),
+ IW_HANDLER(SIOCSIWSCAN, (iw_handler) wireless_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, (iw_handler) wireless_get_scan),
+ IW_HANDLER(SIOCSIWESSID, (iw_handler) wireless_set_essid),
+ IW_HANDLER(SIOCGIWESSID, (iw_handler) wireless_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, (iw_handler) wireless_set_nickname),
+ IW_HANDLER(SIOCGIWNICKN, (iw_handler) wireless_get_nickname),
+ IW_HANDLER(SIOCSIWRATE, (iw_handler) wireless_set_rate),
+ IW_HANDLER(SIOCGIWRATE, (iw_handler) wireless_get_rate),
+ IW_HANDLER(SIOCSIWRTS, (iw_handler) wireless_set_rts_threshold),
+ IW_HANDLER(SIOCGIWRTS, (iw_handler) wireless_get_rts_threshold),
+ IW_HANDLER(SIOCGIWTXPOW, (iw_handler) wireless_get_tx_power),
+ IW_HANDLER(SIOCSIWENCODE, (iw_handler) wireless_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, (iw_handler) wireless_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, (iw_handler) wireless_set_power),
+ IW_HANDLER(SIOCGIWPOWER, (iw_handler) wireless_get_power),
+ IW_HANDLER(SIOCSIWGENIE, (iw_handler) wireless_set_genie),
+ IW_HANDLER(SIOCSIWAUTH, (iw_handler) wireless_set_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, (iw_handler) wireless_set_encodeext),
};
static const iw_handler wl_private_handler[] =
@@ -4120,5 +3986,3 @@ const struct iw_handler_def wl_iw_handler_def =
.standard = (iw_handler *) wl_handler,
.get_wireless_stats = wl_get_wireless_stats,
};
-
-#endif // WIRELESS_EXT
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index 39d39a47b05..a713058c802 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -62,9 +62,6 @@
#define __WL_WEXT_H__
-#ifdef WIRELESS_EXT
-
-
/*******************************************************************************
* function protoypes
******************************************************************************/
@@ -88,9 +85,4 @@ void wl_wext_event_assoc_ie( struct net_device *dev );
extern const struct iw_handler_def wl_iw_handler_def;
-#else
-#error WIRELESS_EXT
-#endif // WIRELESS_EXT
-
-
#endif // __WL_WEXT_H__
diff --git a/drivers/staging/wlags49_h25/Makefile b/drivers/staging/wlags49_h25/Makefile
index 7d4b5632c94..6e0159d0a34 100644
--- a/drivers/staging/wlags49_h25/Makefile
+++ b/drivers/staging/wlags49_h25/Makefile
@@ -11,11 +11,9 @@
#
# If you want to build AP support (untested), comment out -DSTA_ONLY
-INSTALLDIR := /lib/modules/$(shell uname -r)/kernel/drivers/net/wireless
ccflags-y := -I$(KERNELDIR)/include
ccflags-y += -I$(src) \
-DBUS_PCMCIA \
- -DUSE_WPA \
-DUSE_WEXT \
-DSTA_ONLY \
-DWVLAN_49 \
@@ -39,10 +37,6 @@ $(WLNAME)-y += ap_h25.o
endif
endif
-# If KERNELRELEASE is defined, we've been invoked from the
-# kernel build system and can use its language.
-ifneq ($(KERNELRELEASE),)
-
obj-m += $(WLNAME).o
$(WLNAME)-y += wl_profile.o \
@@ -59,23 +53,3 @@ $(WLNAME)-y += wl_profile.o \
$(WLNAME)-$(CONFIG_SYSFS) += wl_sysfs.o
-# Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-else
- KERNELDIR ?= /lib/modules/$(shell uname -r)/build
- PWD := $(shell pwd)
-
-default:
- $(MAKE) -C $(KERNELDIR) M=$(PWD) modules
-endif
-
-clean:
- rm -fr *.o *.ko *.mod.c *.mod.o .*.*.cmd Module.symvers \
- Module.markers modules.order .tmp_versions
-
-install: default
- -rmmod $(WLNAME)
- install -d $(INSTALLDIR)
- install -m 0644 -o root -g root $(WLNAME).ko $(INSTALLDIR)
- /sbin/depmod -aq
-
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index b0af292bc7e..14bfeb2e704 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -715,7 +715,7 @@ static const struct net_device_ops p80211_netdev_ops = {
.ndo_stop = p80211knetdev_stop,
.ndo_get_stats = p80211knetdev_get_stats,
.ndo_start_xmit = p80211knetdev_hard_start_xmit,
- .ndo_set_multicast_list = p80211knetdev_set_multicast_list,
+ .ndo_set_rx_mode = p80211knetdev_set_multicast_list,
.ndo_do_ioctl = p80211knetdev_do_ioctl,
.ndo_set_mac_address = p80211knetdev_set_mac_address,
.ndo_tx_timeout = p80211knetdev_tx_timeout,
diff --git a/drivers/staging/xgifb/TODO b/drivers/staging/xgifb/TODO
index c85ff5e9e70..13d9bc25797 100644
--- a/drivers/staging/xgifb/TODO
+++ b/drivers/staging/xgifb/TODO
@@ -5,7 +5,6 @@ Arnaud
TODO:
- clean ups
-- fix build warnings when module
- sort out dup ids with SiS driver
- remove useless/wrong/unused #ifdef/code/...
- fix printk usages
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index f6cd22d7963..71aebe3e84d 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -11,12 +11,6 @@
#define XGIFAIL(x) do { printk(x "\n"); return -EINVAL; } while (0)
-#define VER_MAJOR 0
-#define VER_MINOR 8
-#define VER_LEVEL 1
-
-#define DRIVER_DESC "XGI Volari Frame Buffer Module Version 0.8.1"
-
#ifndef PCI_VENDOR_ID_XG
#define PCI_VENDOR_ID_XG 0x18CA
#endif
@@ -37,12 +31,6 @@
#define PCI_DEVICE_ID_XG_27 0x027
#endif
-
-
-#define XGI_IOTYPE1 void __iomem
-#define XGI_IOTYPE2 __iomem
-#define XGIINITSTATIC static
-
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
{PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20, PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0},
@@ -58,61 +46,29 @@ static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* To be included in fb.h */
-#ifndef FB_ACCEL_XGI_GLAMOUR_2
-#define FB_ACCEL_XGI_GLAMOUR_2 40 /* XGI 315, 650, 740 */
-#endif
#ifndef FB_ACCEL_XGI_XABRE
#define FB_ACCEL_XGI_XABRE 41 /* XGI 330 ("Xabre") */
#endif
-#define MAX_ROM_SCAN 0x10000
-
-#define SEQ_ADR 0x14
#define SEQ_DATA 0x15
-#define DAC_ADR 0x18
-#define DAC_DATA 0x19
-#define CRTC_ADR 0x24
-#define CRTC_DATA 0x25
-#define DAC2_ADR (0x16-0x30)
-#define DAC2_DATA (0x17-0x30)
-#define VB_PART1_ADR (0x04-0x30)
-#define VB_PART1_DATA (0x05-0x30)
-#define VB_PART2_ADR (0x10-0x30)
-#define VB_PART2_DATA (0x11-0x30)
-#define VB_PART3_ADR (0x12-0x30)
-#define VB_PART3_DATA (0x13-0x30)
-#define VB_PART4_ADR (0x14-0x30)
-#define VB_PART4_DATA (0x15-0x30)
-
-#define XGISR XGI_Pr.P3c4
-#define XGICR XGI_Pr.P3d4
-#define XGIDACA XGI_Pr.P3c8
-#define XGIDACD XGI_Pr.P3c9
-#define XGIPART1 XGI_Pr.Part1Port
-#define XGIPART2 XGI_Pr.Part2Port
-#define XGIPART3 XGI_Pr.Part3Port
-#define XGIPART4 XGI_Pr.Part4Port
-#define XGIPART5 XGI_Pr.Part5Port
+
+#define XGISR (xgifb_info->dev_info.P3c4)
+#define XGICR (xgifb_info->dev_info.P3d4)
+#define XGIDACA (xgifb_info->dev_info.P3c8)
+#define XGIDACD (xgifb_info->dev_info.P3c9)
+#define XGIPART1 (xgifb_info->dev_info.Part1Port)
+#define XGIPART2 (xgifb_info->dev_info.Part2Port)
+#define XGIPART3 (xgifb_info->dev_info.Part3Port)
+#define XGIPART4 (xgifb_info->dev_info.Part4Port)
+#define XGIPART5 (xgifb_info->dev_info.Part5Port)
#define XGIDAC2A XGIPART5
#define XGIDAC2D (XGIPART5 + 1)
-#define XGIMISCR (XGI_Pr.RelIO + 0x1c)
-#define XGIINPSTAT (XGI_Pr.RelIO + 0x2a)
#define IND_XGI_PASSWORD 0x05 /* SRs */
-#define IND_XGI_COLOR_MODE 0x06
#define IND_XGI_RAMDAC_CONTROL 0x07
#define IND_XGI_DRAM_SIZE 0x14
-#define IND_XGI_SCRATCH_REG_16 0x16
-#define IND_XGI_SCRATCH_REG_17 0x17
-#define IND_XGI_SCRATCH_REG_1A 0x1A
#define IND_XGI_MODULE_ENABLE 0x1E
#define IND_XGI_PCI_ADDRESS_SET 0x20
-#define IND_XGI_TURBOQUEUE_ADR 0x26
-#define IND_XGI_TURBOQUEUE_SET 0x27
-#define IND_XGI_POWER_ON_TRAP 0x38
-#define IND_XGI_POWER_ON_TRAP2 0x39
-#define IND_XGI_CMDQUEUE_SET 0x26
-#define IND_XGI_CMDQUEUE_THRESHOLD 0x27
#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
#define IND_XGI_SCRATCH_REG_CR31 0x31
@@ -120,23 +76,10 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define IND_XGI_SCRATCH_REG_CR33 0x33
#define IND_XGI_LCD_PANEL 0x36
#define IND_XGI_SCRATCH_REG_CR37 0x37
-#define IND_XGI_AGP_IO_PAD 0x48
-#define IND_BRI_DRAM_STATUS 0x63 /* PCI config memory size offset */
-
-#define MMIO_QUEUE_PHYBASE 0x85C0
-#define MMIO_QUEUE_WRITEPORT 0x85C4
-#define MMIO_QUEUE_READPORT 0x85C8
-
-#define IND_XGI_CRT2_WRITE_ENABLE_300 0x24
#define IND_XGI_CRT2_WRITE_ENABLE_315 0x2F
#define XGI_PASSWORD 0x86 /* SR05 */
-#define XGI_INTERLACED_MODE 0x20 /* SR06 */
-#define XGI_8BPP_COLOR_MODE 0x0
-#define XGI_15BPP_COLOR_MODE 0x1
-#define XGI_16BPP_COLOR_MODE 0x2
-#define XGI_32BPP_COLOR_MODE 0x4
#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */
#define XGI_DRAM_SIZE_1MB 0x00
@@ -148,27 +91,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGI_DRAM_SIZE_64MB 0x06
#define XGI_DRAM_SIZE_128MB 0x07
#define XGI_DRAM_SIZE_256MB 0x08
-#define XGI_DATA_BUS_MASK 0x02
-#define XGI_DATA_BUS_64 0x00
-#define XGI_DATA_BUS_128 0x01
-#define XGI_DUAL_CHANNEL_MASK 0x0C
-#define XGI_SINGLE_CHANNEL_1_RANK 0x0
-#define XGI_SINGLE_CHANNEL_2_RANK 0x1
-#define XGI_ASYM_DDR 0x02
-#define XGI_DUAL_CHANNEL_1_RANK 0x3
-
-#define XGI550_DRAM_SIZE_MASK 0x3F /* 550/650/740 SR14 */
-#define XGI550_DRAM_SIZE_4MB 0x00
-#define XGI550_DRAM_SIZE_8MB 0x01
-#define XGI550_DRAM_SIZE_16MB 0x03
-#define XGI550_DRAM_SIZE_24MB 0x05
-#define XGI550_DRAM_SIZE_32MB 0x07
-#define XGI550_DRAM_SIZE_64MB 0x0F
-#define XGI550_DRAM_SIZE_96MB 0x17
-#define XGI550_DRAM_SIZE_128MB 0x1F
-#define XGI550_DRAM_SIZE_256MB 0x3F
-
-#define XGI_SCRATCH_REG_1A_MASK 0x10
#define XGI_ENABLE_2D 0x40 /* SR1E */
@@ -176,7 +98,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGI_PCI_ADDR_ENABLE 0x80
#define XGI_SIMULTANEOUS_VIEW_ENABLE 0x01 /* CR30 */
-#define XGI_MODE_SELECT_CRT2 0x02
#define XGI_VB_OUTPUT_COMPOSITE 0x04
#define XGI_VB_OUTPUT_SVIDEO 0x08
#define XGI_VB_OUTPUT_SCART 0x10
@@ -199,118 +120,34 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
XGI_VB_SCART | XGI_VB_HIVISION|XGI_VB_YPBPR)
#define XGI_EXTERNAL_CHIP_MASK 0x0E /* CR37 */
-#define XGI_EXTERNAL_CHIP_XGI301 0x01 /* in CR37 << 1 ! */
-#define XGI_EXTERNAL_CHIP_LVDS 0x02 /* in CR37 << 1 ! */
-#define XGI_EXTERNAL_CHIP_TRUMPION 0x03 /* in CR37 << 1 ! */
-#define XGI_EXTERNAL_CHIP_LVDS_CHRONTEL 0x04 /* in CR37 << 1 ! */
-#define XGI_EXTERNAL_CHIP_CHRONTEL 0x05 /* in CR37 << 1 ! */
#define XGI310_EXTERNAL_CHIP_LVDS 0x02 /* in CR37 << 1 ! */
#define XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL 0x03 /* in CR37 << 1 ! */
-#define XGI_AGP_2X 0x20 /* CR48 */
-
-#define BRI_DRAM_SIZE_MASK 0x70 /* PCI bridge config data */
-#define BRI_DRAM_SIZE_2MB 0x00
-#define BRI_DRAM_SIZE_4MB 0x01
-#define BRI_DRAM_SIZE_8MB 0x02
-#define BRI_DRAM_SIZE_16MB 0x03
-#define BRI_DRAM_SIZE_32MB 0x04
-#define BRI_DRAM_SIZE_64MB 0x05
-
-#define SR_BUFFER_SIZE 5
-#define CR_BUFFER_SIZE 5
-
/* ------------------- Global Variables ----------------------------- */
-/* Fbcon variables */
-static struct fb_info *fb_info;
-
-
-static int video_type = FB_TYPE_PACKED_PIXELS;
-
-static struct fb_var_screeninfo default_var = {
- .xres = 0,
- .yres = 0,
- .xres_virtual = 0,
- .yres_virtual = 0,
- .xoffset = 0,
- .yoffset = 0,
- .bits_per_pixel = 0,
- .grayscale = 0,
- .red = {0, 8, 0},
- .green = {0, 8, 0},
- .blue = {0, 8, 0},
- .transp = {0, 0, 0},
- .nonstd = 0,
- .activate = FB_ACTIVATE_NOW,
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .pixclock = 0,
- .left_margin = 0,
- .right_margin = 0,
- .upper_margin = 0,
- .lower_margin = 0,
- .hsync_len = 0,
- .vsync_len = 0,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
-};
-
-static struct fb_fix_screeninfo XGIfb_fix = {
- .id = "XGI",
- .type = FB_TYPE_PACKED_PIXELS,
- .xpanstep = 1,
- .ypanstep = 1,
-};
-static char myid[20];
-static u32 pseudo_palette[17];
-
-
/* display status */
-static int XGIfb_off = 0;
-static int XGIfb_crt1off = 0;
+static int XGIfb_crt1off;
static int XGIfb_forcecrt1 = -1;
-static int XGIfb_userom = 0;
-/*static int XGIfb_useoem = -1; */
+static int XGIfb_userom ;
/* global flags */
-static int XGIfb_registered;
-static int XGIfb_tvmode = 0;
-static int XGIfb_pdc = 0;
-static int enable_dstn = 0;
+static int XGIfb_tvmode;
+static int enable_dstn;
static int XGIfb_ypan = -1;
-
-static int XGIfb_CRT2_write_enable = 0;
-
/* TW: CRT2 type (for overriding autodetection) */
static int XGIfb_crt2type = -1;
/* PR: Tv plug type (for overriding autodetection) */
static int XGIfb_tvplug = -1;
-static unsigned char XGIfb_detectedpdc = 0;
-
-static unsigned char XGIfb_detectedlcda = 0xff;
-
-
-
-
/* TW: For ioctl XGIFB_GET_INFO */
/* XGIfb_info XGIfbinfo; */
-/* TW: Hardware extension; contains data on hardware */
-static struct xgi_hw_device_info XGIhw_ext;
-
-/* TW: XGI private structure */
-static struct vb_device_info XGI_Pr;
-
#define MD_XGI300 1
#define MD_XGI315 2
/* mode table */
-/* NOT const - will be patched for 1280x960 mode number chaos reasons */
-static struct _XGIbios_mode {
+static const struct _XGIbios_mode {
char name[15];
u8 mode_no;
u16 vesa_mode_no_1; /* "XGI defined" VESA mode number */
@@ -323,9 +160,6 @@ static struct _XGIbios_mode {
u16 rows;
u8 chipset;
} XGIbios_mode[] = {
-#define MODE_INDEX_NONE 0 /* TW: index for mode=none */
- {"none", 0xFF, 0x0000, 0x0000, 0, 0, 0, 0, 0, 0,
- MD_XGI300|MD_XGI315}, /* TW: for mode "none" */
{"320x240x16", 0x56, 0x0000, 0x0000, 320, 240, 16, 1, 40, 15,
MD_XGI315},
{"320x480x8", 0x5A, 0x0000, 0x0000, 320, 480, 8, 1, 40, 30,
@@ -365,11 +199,9 @@ static struct _XGIbios_mode {
MD_XGI300|MD_XGI315},
{"800x480x32", 0x76, 0x0000, 0x0000, 800, 480, 32, 1, 100, 30,
MD_XGI300|MD_XGI315},
-#define DEFAULT_MODE 21 /* TW: index for 800x600x8 */
-#define DEFAULT_LCDMODE 21 /* TW: index for 800x600x8 */
-#define DEFAULT_TVMODE 21 /* TW: index for 800x600x8 */
{"800x600x8", 0x30, 0x0103, 0x0103, 800, 600, 8, 1, 100, 37,
MD_XGI300|MD_XGI315},
+#define DEFAULT_MODE 20 /* index for 800x600x16 */
{"800x600x16", 0x47, 0x0114, 0x0114, 800, 600, 16, 1, 100, 37,
MD_XGI300|MD_XGI315},
{"800x600x24", 0x63, 0x013b, 0x0115, 800, 600, 32, 1, 100, 37,
@@ -424,9 +256,8 @@ static struct _XGIbios_mode {
MD_XGI315},
{"1280x768x32", 0x25, 0x0000, 0x0000, 1280, 768, 32, 1, 160, 48,
MD_XGI315},
-#define MODEINDEX_1280x960 48
{"1280x960x8", 0x7C, 0x0000, 0x0000, 1280, 960, 8, 1, 160, 60,
- MD_XGI300|MD_XGI315}, /* TW: Modenumbers being patched */
+ MD_XGI300|MD_XGI315},
{"1280x960x16", 0x7D, 0x0000, 0x0000, 1280, 960, 16, 1, 160, 60,
MD_XGI300|MD_XGI315},
{"1280x960x24", 0x7E, 0x0000, 0x0000, 1280, 960, 32, 1, 160, 60,
@@ -476,16 +307,6 @@ static struct _XGIbios_mode {
{"\0", 0x00, 0, 0, 0, 0, 0, 0, 0}
};
-/* mode-related variables */
-#ifdef MODULE
-static int xgifb_mode_idx = 1;
-#else
-static int xgifb_mode_idx = -1; /* Use a default mode if we are
- inside the kernel */
-#endif
-static u8 XGIfb_mode_no = 0;
-static u8 XGIfb_rate_idx = 0;
-
/* TW: CR36 evaluation */
static const unsigned short XGI300paneltype[] = {
LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
@@ -505,19 +326,19 @@ static const struct _XGI_crt2type {
int tvplug_no;
} XGI_crt2type[] = {
{"NONE", 0, -1},
- {"LCD", DISPTYPE_LCD, -1},
- {"TV", DISPTYPE_TV, -1},
- {"VGA", DISPTYPE_CRT2, -1},
- {"SVIDEO", DISPTYPE_TV, TVPLUG_SVIDEO},
- {"COMPOSITE", DISPTYPE_TV, TVPLUG_COMPOSITE},
- {"SCART", DISPTYPE_TV, TVPLUG_SCART},
+ {"LCD", XGIFB_DISP_LCD, -1},
+ {"TV", XGIFB_DISP_TV, -1},
+ {"VGA", XGIFB_DISP_CRT, -1},
+ {"SVIDEO", XGIFB_DISP_TV, TVPLUG_SVIDEO},
+ {"COMPOSITE", XGIFB_DISP_TV, TVPLUG_COMPOSITE},
+ {"SCART", XGIFB_DISP_TV, TVPLUG_SCART},
{"none", 0, -1},
- {"lcd", DISPTYPE_LCD, -1},
- {"tv", DISPTYPE_TV, -1},
- {"vga", DISPTYPE_CRT2, -1},
- {"svideo", DISPTYPE_TV, TVPLUG_SVIDEO},
- {"composite", DISPTYPE_TV, TVPLUG_COMPOSITE},
- {"scart", DISPTYPE_TV, TVPLUG_SCART},
+ {"lcd", XGIFB_DISP_LCD, -1},
+ {"tv", XGIFB_DISP_TV, -1},
+ {"vga", XGIFB_DISP_CRT, -1},
+ {"svideo", XGIFB_DISP_TV, TVPLUG_SVIDEO},
+ {"composite", XGIFB_DISP_TV, TVPLUG_COMPOSITE},
+ {"scart", XGIFB_DISP_TV, TVPLUG_SCART},
{"\0", -1, -1}
};
@@ -740,6 +561,5 @@ static const struct _XGI_TV_filter {
};
static int filter = -1;
-static unsigned char filter_tb;
#endif
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 4403e5f8059..36db231cd80 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -28,10 +28,6 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
-#ifndef XGIFB_PAN
-#define XGIFB_PAN
-#endif
-
#include <linux/io.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
@@ -45,15 +41,17 @@
#include "vb_setmode.h"
#define Index_CR_GPIO_Reg1 0x48
-#define Index_CR_GPIO_Reg2 0x49
#define Index_CR_GPIO_Reg3 0x4a
#define GPIOG_EN (1<<6)
-#define GPIOG_WRITE (1<<6)
#define GPIOG_READ (1<<1)
#define XGIFB_ROM_SIZE 65536
+static char *mode;
+static int vesa = -1;
+static unsigned int refresh_rate;
+
/* -------------------- Macro definitions ---------------------------- */
#undef XGIFBDEBUG
@@ -143,9 +141,6 @@ static inline void dumpVGAReg(void)
}
#endif
-/* data for XGI components */
-struct video_info xgi_video_info;
-
#if 1
#define DEBUGPRN(x)
#else
@@ -390,44 +385,6 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
}
-/* ------------ Interface for init & mode switching code ------------- */
-
-static unsigned char XGIfb_query_VGA_config_space(
- struct xgi_hw_device_info *pXGIhw_ext, unsigned long offset,
- unsigned long set, unsigned long *value)
-{
- static struct pci_dev *pdev = NULL;
- static unsigned char init = 0, valid_pdev = 0;
-
- if (!set)
- DPRINTK("XGIfb: Get VGA offset 0x%lx\n", offset);
- else
- DPRINTK("XGIfb: Set offset 0x%lx to 0x%lx\n", offset, *value);
-
- if (!init) {
- init = 1;
- pdev = pci_get_device(PCI_VENDOR_ID_XG, xgi_video_info.chip_id,
- pdev);
- if (pdev) {
- valid_pdev = 1;
- pci_dev_put(pdev);
- }
- }
-
- if (!valid_pdev) {
- printk(KERN_DEBUG "XGIfb: Can't find XGI %d VGA device.\n",
- xgi_video_info.chip_id);
- return 0;
- }
-
- if (set == 0)
- pci_read_config_dword(pdev, offset, (u32 *) value);
- else
- pci_write_config_dword(pdev, offset, (u32)(*value));
-
- return 1;
-}
-
/* ------------------ Internal helper routines ----------------- */
static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
@@ -445,48 +402,26 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
&& (XGIbios_mode[XGIfb_mode_idx].yres
== XGI21_LCDCapList[0].LVDSVDE)
&& (XGIbios_mode[XGIfb_mode_idx].bpp == 8)) {
- XGIfb_mode_no = XGIbios_mode[XGIfb_mode_idx].mode_no;
found_mode = 1;
break;
}
XGIfb_mode_idx++;
}
if (!found_mode)
- XGIfb_mode_idx = 0;
+ XGIfb_mode_idx = -1;
return XGIfb_mode_idx;
}
-static void XGIfb_search_mode(const char *name)
+static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info,
+ const char *name)
{
int i = 0, j = 0, l;
- if (name == NULL) {
- printk(KERN_ERR "XGIfb: Internal error, using default mode.\n");
- xgifb_mode_idx = DEFAULT_MODE;
- if ((xgi_video_info.chip == XG21)
- && ((xgi_video_info.disp_state & DISPTYPE_DISP2)
- == DISPTYPE_LCD)) {
- xgifb_mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx();
- }
- return;
- }
-
- if (!strcmp(name, XGIbios_mode[MODE_INDEX_NONE].name)) {
- printk(KERN_ERR "XGIfb: Mode 'none' not supported anymore. Using default.\n");
- xgifb_mode_idx = DEFAULT_MODE;
- if ((xgi_video_info.chip == XG21)
- && ((xgi_video_info.disp_state & DISPTYPE_DISP2)
- == DISPTYPE_LCD)) {
- xgifb_mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx();
- }
- return;
- }
-
while (XGIbios_mode[i].mode_no != 0) {
l = min(strlen(name), strlen(XGIbios_mode[i].name));
if (!strncmp(name, XGIbios_mode[i].name, l)) {
- xgifb_mode_idx = i;
+ xgifb_info->mode_idx = i;
j = 1;
break;
}
@@ -496,82 +431,66 @@ static void XGIfb_search_mode(const char *name)
printk(KERN_INFO "XGIfb: Invalid mode '%s'\n", name);
}
-static void XGIfb_search_vesamode(unsigned int vesamode)
+static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
+ unsigned int vesamode)
{
int i = 0, j = 0;
- if (vesamode == 0) {
-
- printk(KERN_ERR "XGIfb: Mode 'none' not supported anymore. Using default.\n");
- xgifb_mode_idx = DEFAULT_MODE;
- if ((xgi_video_info.chip == XG21)
- && ((xgi_video_info.disp_state & DISPTYPE_DISP2)
- == DISPTYPE_LCD)) {
- xgifb_mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx();
- }
- return;
- }
+ if (vesamode == 0)
+ goto invalid;
vesamode &= 0x1dff; /* Clean VESA mode number from other flags */
while (XGIbios_mode[i].mode_no != 0) {
if ((XGIbios_mode[i].vesa_mode_no_1 == vesamode) ||
(XGIbios_mode[i].vesa_mode_no_2 == vesamode)) {
- xgifb_mode_idx = i;
+ xgifb_info->mode_idx = i;
j = 1;
break;
}
i++;
}
+
+invalid:
if (!j)
printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
}
-static int XGIfb_GetXG21LVDSData(void)
+static int XGIfb_GetXG21LVDSData(struct xgifb_video_info *xgifb_info)
{
u8 tmp;
- unsigned char *pData;
+ void __iomem *data = xgifb_info->mmio_vbase + 0x20000;
int i, j, k;
tmp = xgifb_reg_get(XGISR, 0x1e);
xgifb_reg_set(XGISR, 0x1e, tmp | 4);
- pData = xgi_video_info.mmio_vbase + 0x20000;
- if ((pData[0x0] == 0x55) &&
- (pData[0x1] == 0xAA) &&
- (pData[0x65] & 0x1)) {
- i = pData[0x316] | (pData[0x317] << 8);
- j = pData[i - 1];
+ if ((readb(data) == 0x55) &&
+ (readb(data + 1) == 0xAA) &&
+ (readb(data + 0x65) & 0x1)) {
+ i = readw(data + 0x316);
+ j = readb(data + i - 1);
if (j == 0xff)
j = 1;
k = 0;
do {
- XGI21_LCDCapList[k].LVDS_Capability = pData[i]
- | (pData[i + 1] << 8);
- XGI21_LCDCapList[k].LVDSHT = pData[i + 2] | (pData[i
- + 3] << 8);
- XGI21_LCDCapList[k].LVDSVT = pData[i + 4] | (pData[i
- + 5] << 8);
- XGI21_LCDCapList[k].LVDSHDE = pData[i + 6] | (pData[i
- + 7] << 8);
- XGI21_LCDCapList[k].LVDSVDE = pData[i + 8] | (pData[i
- + 9] << 8);
- XGI21_LCDCapList[k].LVDSHFP = pData[i + 10] | (pData[i
- + 11] << 8);
- XGI21_LCDCapList[k].LVDSVFP = pData[i + 12] | (pData[i
- + 13] << 8);
- XGI21_LCDCapList[k].LVDSHSYNC = pData[i + 14]
- | (pData[i + 15] << 8);
- XGI21_LCDCapList[k].LVDSVSYNC = pData[i + 16]
- | (pData[i + 17] << 8);
- XGI21_LCDCapList[k].VCLKData1 = pData[i + 18];
- XGI21_LCDCapList[k].VCLKData2 = pData[i + 19];
- XGI21_LCDCapList[k].PSC_S1 = pData[i + 20];
- XGI21_LCDCapList[k].PSC_S2 = pData[i + 21];
- XGI21_LCDCapList[k].PSC_S3 = pData[i + 22];
- XGI21_LCDCapList[k].PSC_S4 = pData[i + 23];
- XGI21_LCDCapList[k].PSC_S5 = pData[i + 24];
+ XGI21_LCDCapList[k].LVDS_Capability = readw(data + i);
+ XGI21_LCDCapList[k].LVDSHT = readw(data + i + 2);
+ XGI21_LCDCapList[k].LVDSVT = readw(data + i + 4);
+ XGI21_LCDCapList[k].LVDSHDE = readw(data + i + 6);
+ XGI21_LCDCapList[k].LVDSVDE = readw(data + i + 8);
+ XGI21_LCDCapList[k].LVDSHFP = readw(data + i + 10);
+ XGI21_LCDCapList[k].LVDSVFP = readw(data + i + 12);
+ XGI21_LCDCapList[k].LVDSHSYNC = readw(data + i + 14);
+ XGI21_LCDCapList[k].LVDSVSYNC = readw(data + i + 16);
+ XGI21_LCDCapList[k].VCLKData1 = readb(data + i + 18);
+ XGI21_LCDCapList[k].VCLKData2 = readb(data + i + 19);
+ XGI21_LCDCapList[k].PSC_S1 = readb(data + i + 20);
+ XGI21_LCDCapList[k].PSC_S2 = readb(data + i + 21);
+ XGI21_LCDCapList[k].PSC_S3 = readb(data + i + 22);
+ XGI21_LCDCapList[k].PSC_S4 = readb(data + i + 23);
+ XGI21_LCDCapList[k].PSC_S5 = readb(data + i + 24);
i += 25;
j--;
k++;
@@ -582,13 +501,13 @@ static int XGIfb_GetXG21LVDSData(void)
return 0;
}
-static int XGIfb_validate_mode(int myindex)
+static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
{
u16 xres, yres;
+ struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
- if (xgi_video_info.chip == XG21) {
- if ((xgi_video_info.disp_state & DISPTYPE_DISP2)
- == DISPTYPE_LCD) {
+ if (xgifb_info->chip == XG21) {
+ if (xgifb_info->display2 == XGIFB_DISP_LCD) {
xres = XGI21_LCDCapList[0].LVDSHDE;
yres = XGI21_LCDCapList[0].LVDSVDE;
if (XGIbios_mode[myindex].xres > xres)
@@ -607,15 +526,15 @@ static int XGIfb_validate_mode(int myindex)
}
/* FIXME: for now, all is valid on XG27 */
- if (xgi_video_info.chip == XG27)
+ if (xgifb_info->chip == XG27)
return myindex;
if (!(XGIbios_mode[myindex].chipset & MD_XGI315))
return -1;
- switch (xgi_video_info.disp_state & DISPTYPE_DISP2) {
- case DISPTYPE_LCD:
- switch (XGIhw_ext.ulCRT2LCDType) {
+ switch (xgifb_info->display2) {
+ case XGIFB_DISP_LCD:
+ switch (hw_info->ulCRT2LCDType) {
case LCD_640x480:
xres = 640;
yres = 480;
@@ -671,13 +590,13 @@ static int XGIfb_validate_mode(int myindex)
return -1;
if (XGIbios_mode[myindex].yres > yres)
return -1;
- if ((XGIhw_ext.ulExternalChip == 0x01) || /* LVDS */
- (XGIhw_ext.ulExternalChip == 0x05)) { /* LVDS+Chrontel */
+ if ((hw_info->ulExternalChip == 0x01) || /* LVDS */
+ (hw_info->ulExternalChip == 0x05)) { /* LVDS+Chrontel */
switch (XGIbios_mode[myindex].xres) {
case 512:
if (XGIbios_mode[myindex].yres != 512)
return -1;
- if (XGIhw_ext.ulCRT2LCDType == LCD_1024x600)
+ if (hw_info->ulCRT2LCDType == LCD_1024x600)
return -1;
break;
case 640:
@@ -695,13 +614,13 @@ static int XGIfb_validate_mode(int myindex)
(XGIbios_mode[myindex].yres != 768))
return -1;
if ((XGIbios_mode[myindex].yres == 600) &&
- (XGIhw_ext.ulCRT2LCDType != LCD_1024x600))
+ (hw_info->ulCRT2LCDType != LCD_1024x600))
return -1;
break;
case 1152:
if ((XGIbios_mode[myindex].yres) != 768)
return -1;
- if (XGIhw_ext.ulCRT2LCDType != LCD_1152x768)
+ if (hw_info->ulCRT2LCDType != LCD_1152x768)
return -1;
break;
case 1280:
@@ -709,7 +628,7 @@ static int XGIfb_validate_mode(int myindex)
(XGIbios_mode[myindex].yres != 1024))
return -1;
if ((XGIbios_mode[myindex].yres == 768) &&
- (XGIhw_ext.ulCRT2LCDType != LCD_1280x768))
+ (hw_info->ulCRT2LCDType != LCD_1280x768))
return -1;
break;
case 1400:
@@ -747,7 +666,7 @@ static int XGIfb_validate_mode(int myindex)
(XGIbios_mode[myindex].yres != 1024))
return -1;
if (XGIbios_mode[myindex].yres == 960) {
- if (XGIhw_ext.ulCRT2LCDType ==
+ if (hw_info->ulCRT2LCDType ==
LCD_1400x1050)
return -1;
}
@@ -765,28 +684,28 @@ static int XGIfb_validate_mode(int myindex)
}
}
break;
- case DISPTYPE_TV:
+ case XGIFB_DISP_TV:
switch (XGIbios_mode[myindex].xres) {
case 512:
case 640:
case 800:
break;
case 720:
- if (xgi_video_info.TV_type == TVMODE_NTSC) {
+ if (xgifb_info->TV_type == TVMODE_NTSC) {
if (XGIbios_mode[myindex].yres != 480)
return -1;
- } else if (xgi_video_info.TV_type == TVMODE_PAL) {
+ } else if (xgifb_info->TV_type == TVMODE_PAL) {
if (XGIbios_mode[myindex].yres != 576)
return -1;
}
/* TW: LVDS/CHRONTEL does not support 720 */
- if (xgi_video_info.hasVB == HASVB_LVDS_CHRONTEL ||
- xgi_video_info.hasVB == HASVB_CHRONTEL) {
+ if (xgifb_info->hasVB == HASVB_LVDS_CHRONTEL ||
+ xgifb_info->hasVB == HASVB_CHRONTEL) {
return -1;
}
break;
case 1024:
- if (xgi_video_info.TV_type == TVMODE_NTSC) {
+ if (xgifb_info->TV_type == TVMODE_NTSC) {
if (XGIbios_mode[myindex].bpp == 32)
return -1;
}
@@ -795,10 +714,12 @@ static int XGIfb_validate_mode(int myindex)
return -1;
}
break;
- case DISPTYPE_CRT2:
+ case XGIFB_DISP_CRT:
if (XGIbios_mode[myindex].xres > 1280)
return -1;
break;
+ case XGIFB_DISP_NONE:
+ break;
}
return myindex;
@@ -823,49 +744,52 @@ static void XGIfb_search_crt2type(const char *name)
printk(KERN_INFO "XGIfb: Invalid CRT2 type: %s\n", name);
}
-static u8 XGIfb_search_refresh_rate(unsigned int rate)
+static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
+ unsigned int rate)
{
u16 xres, yres;
int i = 0;
- xres = XGIbios_mode[xgifb_mode_idx].xres;
- yres = XGIbios_mode[xgifb_mode_idx].yres;
+ xres = XGIbios_mode[xgifb_info->mode_idx].xres;
+ yres = XGIbios_mode[xgifb_info->mode_idx].yres;
- XGIfb_rate_idx = 0;
+ xgifb_info->rate_idx = 0;
while ((XGIfb_vrate[i].idx != 0) && (XGIfb_vrate[i].xres <= xres)) {
if ((XGIfb_vrate[i].xres == xres) &&
(XGIfb_vrate[i].yres == yres)) {
if (XGIfb_vrate[i].refresh == rate) {
- XGIfb_rate_idx = XGIfb_vrate[i].idx;
+ xgifb_info->rate_idx = XGIfb_vrate[i].idx;
break;
} else if (XGIfb_vrate[i].refresh > rate) {
if ((XGIfb_vrate[i].refresh - rate) <= 3) {
DPRINTK("XGIfb: Adjusting rate from %d up to %d\n",
rate, XGIfb_vrate[i].refresh);
- XGIfb_rate_idx = XGIfb_vrate[i].idx;
- xgi_video_info.refresh_rate =
+ xgifb_info->rate_idx =
+ XGIfb_vrate[i].idx;
+ xgifb_info->refresh_rate =
XGIfb_vrate[i].refresh;
} else if (((rate - XGIfb_vrate[i - 1].refresh)
<= 2) && (XGIfb_vrate[i].idx
!= 1)) {
DPRINTK("XGIfb: Adjusting rate from %d down to %d\n",
rate, XGIfb_vrate[i-1].refresh);
- XGIfb_rate_idx = XGIfb_vrate[i - 1].idx;
- xgi_video_info.refresh_rate =
+ xgifb_info->rate_idx =
+ XGIfb_vrate[i - 1].idx;
+ xgifb_info->refresh_rate =
XGIfb_vrate[i - 1].refresh;
}
break;
} else if ((rate - XGIfb_vrate[i].refresh) <= 2) {
DPRINTK("XGIfb: Adjusting rate from %d down to %d\n",
rate, XGIfb_vrate[i].refresh);
- XGIfb_rate_idx = XGIfb_vrate[i].idx;
+ xgifb_info->rate_idx = XGIfb_vrate[i].idx;
break;
}
}
i++;
}
- if (XGIfb_rate_idx > 0) {
- return XGIfb_rate_idx;
+ if (xgifb_info->rate_idx > 0) {
+ return xgifb_info->rate_idx;
} else {
printk(KERN_INFO "XGIfb: Unsupported rate %d for %dx%d\n",
rate, xres, yres);
@@ -891,13 +815,14 @@ static void XGIfb_search_tvstd(const char *name)
/* ----------- FBDev related routines for all series ----------- */
-static void XGIfb_bpp_to_var(struct fb_var_screeninfo *var)
+static void XGIfb_bpp_to_var(struct xgifb_video_info *xgifb_info,
+ struct fb_var_screeninfo *var)
{
switch (var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 6;
- xgi_video_info.video_cmap_len = 256;
+ xgifb_info->video_cmap_len = 256;
break;
case 16:
var->red.offset = 11;
@@ -908,7 +833,7 @@ static void XGIfb_bpp_to_var(struct fb_var_screeninfo *var)
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
- xgi_video_info.video_cmap_len = 16;
+ xgifb_info->video_cmap_len = 16;
break;
case 32:
var->red.offset = 16;
@@ -919,45 +844,45 @@ static void XGIfb_bpp_to_var(struct fb_var_screeninfo *var)
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
- xgi_video_info.video_cmap_len = 16;
+ xgifb_info->video_cmap_len = 16;
break;
}
}
/* --------------------- SetMode routines ------------------------- */
-static void XGIfb_pre_setmode(void)
+static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
{
u8 cr30 = 0, cr31 = 0;
cr31 = xgifb_reg_get(XGICR, 0x31);
cr31 &= ~0x60;
- switch (xgi_video_info.disp_state & DISPTYPE_DISP2) {
- case DISPTYPE_CRT2:
+ switch (xgifb_info->display2) {
+ case XGIFB_DISP_CRT:
cr30 = (XGI_VB_OUTPUT_CRT2 | XGI_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= XGI_DRIVER_MODE;
break;
- case DISPTYPE_LCD:
+ case XGIFB_DISP_LCD:
cr30 = (XGI_VB_OUTPUT_LCD | XGI_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= XGI_DRIVER_MODE;
break;
- case DISPTYPE_TV:
- if (xgi_video_info.TV_type == TVMODE_HIVISION)
+ case XGIFB_DISP_TV:
+ if (xgifb_info->TV_type == TVMODE_HIVISION)
cr30 = (XGI_VB_OUTPUT_HIVISION
| XGI_SIMULTANEOUS_VIEW_ENABLE);
- else if (xgi_video_info.TV_plug == TVPLUG_SVIDEO)
+ else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
cr30 = (XGI_VB_OUTPUT_SVIDEO
| XGI_SIMULTANEOUS_VIEW_ENABLE);
- else if (xgi_video_info.TV_plug == TVPLUG_COMPOSITE)
+ else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
cr30 = (XGI_VB_OUTPUT_COMPOSITE
| XGI_SIMULTANEOUS_VIEW_ENABLE);
- else if (xgi_video_info.TV_plug == TVPLUG_SCART)
+ else if (xgifb_info->TV_plug == TVPLUG_SCART)
cr30 = (XGI_VB_OUTPUT_SCART
| XGI_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= XGI_DRIVER_MODE;
- if (XGIfb_tvmode == 1 || xgi_video_info.TV_type == TVMODE_PAL)
+ if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
cr31 |= 0x01;
else
cr31 &= ~0x01;
@@ -969,10 +894,11 @@ static void XGIfb_pre_setmode(void)
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
- xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33, (XGIfb_rate_idx & 0x0F));
+ xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33,
+ (xgifb_info->rate_idx & 0x0F));
}
-static void XGIfb_post_setmode(void)
+static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
{
u8 reg;
unsigned char doit = 1;
@@ -982,21 +908,21 @@ static void XGIfb_post_setmode(void)
xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01);
*test*
*/
- if (xgi_video_info.video_bpp == 8) {
+ if (xgifb_info->video_bpp == 8) {
/* TW: We can't switch off CRT1 on LVDS/Chrontel
* in 8bpp Modes */
- if ((xgi_video_info.hasVB == HASVB_LVDS) ||
- (xgi_video_info.hasVB == HASVB_LVDS_CHRONTEL)) {
+ if ((xgifb_info->hasVB == HASVB_LVDS) ||
+ (xgifb_info->hasVB == HASVB_LVDS_CHRONTEL)) {
doit = 0;
}
/* TW: We can't switch off CRT1 on 301B-DH
* in 8bpp Modes if using LCD */
- if (xgi_video_info.disp_state & DISPTYPE_LCD)
+ if (xgifb_info->display2 == XGIFB_DISP_LCD)
doit = 0;
}
/* TW: We can't switch off CRT1 if bridge is in slave mode */
- if (xgi_video_info.hasVB != HASVB_NONE) {
+ if (xgifb_info->hasVB != HASVB_NONE) {
reg = xgifb_reg_get(XGIPART1, 0x00);
if ((reg & 0x50) == 0x10)
@@ -1015,49 +941,54 @@ static void XGIfb_post_setmode(void)
xgifb_reg_and(XGISR, IND_XGI_RAMDAC_CONTROL, ~0x04);
- if ((xgi_video_info.disp_state & DISPTYPE_TV) && (xgi_video_info.hasVB
- == HASVB_301)) {
+ if (xgifb_info->display2 == XGIFB_DISP_TV &&
+ xgifb_info->hasVB == HASVB_301) {
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg < 0xB0) { /* Set filter for XGI301 */
- switch (xgi_video_info.video_width) {
+ int filter_tb;
+
+ switch (xgifb_info->video_width) {
case 320:
- filter_tb = (xgi_video_info.TV_type ==
+ filter_tb = (xgifb_info->TV_type ==
TVMODE_NTSC) ? 4 : 12;
break;
case 640:
- filter_tb = (xgi_video_info.TV_type ==
+ filter_tb = (xgifb_info->TV_type ==
TVMODE_NTSC) ? 5 : 13;
break;
case 720:
- filter_tb = (xgi_video_info.TV_type ==
+ filter_tb = (xgifb_info->TV_type ==
TVMODE_NTSC) ? 6 : 14;
break;
case 800:
- filter_tb = (xgi_video_info.TV_type ==
+ filter_tb = (xgifb_info->TV_type ==
TVMODE_NTSC) ? 7 : 15;
break;
default:
+ filter_tb = 0;
filter = -1;
break;
}
- xgifb_reg_or(XGIPART1, XGIfb_CRT2_write_enable, 0x01);
+ xgifb_reg_or(XGIPART1,
+ IND_XGI_CRT2_WRITE_ENABLE_315,
+ 0x01);
- if (xgi_video_info.TV_type == TVMODE_NTSC) {
+ if (xgifb_info->TV_type == TVMODE_NTSC) {
xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
- if (xgi_video_info.TV_plug == TVPLUG_SVIDEO) {
+ if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
xgifb_reg_and(XGIPART2, 0x30, 0xdf);
- } else if (xgi_video_info.TV_plug
+ } else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
xgifb_reg_or(XGIPART2, 0x30, 0x20);
- switch (xgi_video_info.video_width) {
+ switch (xgifb_info->video_width) {
case 640:
xgifb_reg_set(XGIPART2,
0x35,
@@ -1103,20 +1034,20 @@ static void XGIfb_post_setmode(void)
}
}
- } else if (xgi_video_info.TV_type == TVMODE_PAL) {
+ } else if (xgifb_info->TV_type == TVMODE_PAL) {
xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
- if (xgi_video_info.TV_plug == TVPLUG_SVIDEO) {
+ if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
xgifb_reg_and(XGIPART2, 0x30, 0xDF);
- } else if (xgi_video_info.TV_plug
+ } else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
xgifb_reg_or(XGIPART2, 0x30, 0x20);
- switch (xgi_video_info.video_width) {
+ switch (xgifb_info->video_width) {
case 640:
xgifb_reg_set(XGIPART2,
0x35,
@@ -1203,7 +1134,8 @@ static void XGIfb_post_setmode(void)
static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
struct fb_info *info)
{
-
+ struct xgifb_video_info *xgifb_info = info->par;
+ struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
unsigned int htotal = var->left_margin + var->xres + var->right_margin
+ var->hsync_len;
unsigned int vtotal = var->upper_margin + var->yres + var->lower_margin
@@ -1241,58 +1173,61 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (var->pixclock && htotal && vtotal) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
- xgi_video_info.refresh_rate = (unsigned int) (hrate * 2
+ xgifb_info->refresh_rate = (unsigned int) (hrate * 2
/ vtotal);
} else {
- xgi_video_info.refresh_rate = 60;
+ xgifb_info->refresh_rate = 60;
}
printk(KERN_DEBUG "XGIfb: Change mode to %dx%dx%d-%dHz\n",
var->xres,
var->yres,
var->bits_per_pixel,
- xgi_video_info.refresh_rate);
+ xgifb_info->refresh_rate);
- old_mode = xgifb_mode_idx;
- xgifb_mode_idx = 0;
+ old_mode = xgifb_info->mode_idx;
+ xgifb_info->mode_idx = 0;
- while ((XGIbios_mode[xgifb_mode_idx].mode_no != 0)
- && (XGIbios_mode[xgifb_mode_idx].xres <= var->xres)) {
- if ((XGIbios_mode[xgifb_mode_idx].xres == var->xres)
- && (XGIbios_mode[xgifb_mode_idx].yres
- == var->yres)
- && (XGIbios_mode[xgifb_mode_idx].bpp
+ while ((XGIbios_mode[xgifb_info->mode_idx].mode_no != 0) &&
+ (XGIbios_mode[xgifb_info->mode_idx].xres <= var->xres)) {
+ if ((XGIbios_mode[xgifb_info->mode_idx].xres == var->xres) &&
+ (XGIbios_mode[xgifb_info->mode_idx].yres == var->yres) &&
+ (XGIbios_mode[xgifb_info->mode_idx].bpp
== var->bits_per_pixel)) {
- XGIfb_mode_no = XGIbios_mode[xgifb_mode_idx].mode_no;
found_mode = 1;
break;
}
- xgifb_mode_idx++;
+ xgifb_info->mode_idx++;
}
if (found_mode)
- xgifb_mode_idx = XGIfb_validate_mode(xgifb_mode_idx);
+ xgifb_info->mode_idx = XGIfb_validate_mode(xgifb_info,
+ xgifb_info->mode_idx);
else
- xgifb_mode_idx = -1;
+ xgifb_info->mode_idx = -1;
- if (xgifb_mode_idx < 0) {
+ if (xgifb_info->mode_idx < 0) {
printk(KERN_ERR "XGIfb: Mode %dx%dx%d not supported\n",
var->xres, var->yres, var->bits_per_pixel);
- xgifb_mode_idx = old_mode;
+ xgifb_info->mode_idx = old_mode;
return -EINVAL;
}
- if (XGIfb_search_refresh_rate(xgi_video_info.refresh_rate) == 0) {
- XGIfb_rate_idx = XGIbios_mode[xgifb_mode_idx].rate_idx;
- xgi_video_info.refresh_rate = 60;
+ if (XGIfb_search_refresh_rate(xgifb_info,
+ xgifb_info->refresh_rate) == 0) {
+ xgifb_info->rate_idx =
+ XGIbios_mode[xgifb_info->mode_idx].rate_idx;
+ xgifb_info->refresh_rate = 60;
}
if (isactive) {
- XGIfb_pre_setmode();
- if (XGISetModeNew(&XGIhw_ext, XGIfb_mode_no) == 0) {
+ XGIfb_pre_setmode(xgifb_info);
+ if (XGISetModeNew(hw_info,
+ XGIbios_mode[xgifb_info->mode_idx].mode_no)
+ == 0) {
printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
- XGIfb_mode_no);
+ XGIbios_mode[xgifb_info->mode_idx].mode_no);
return -EINVAL;
}
info->fix.line_length = ((info->var.xres_virtual
@@ -1305,66 +1240,68 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
0x0E,
(info->fix.line_length & 0xff00) >> 8);
- XGIfb_post_setmode();
+ XGIfb_post_setmode(xgifb_info);
DPRINTK("XGIfb: Set new mode: %dx%dx%d-%d\n",
- XGIbios_mode[xgifb_mode_idx].xres,
- XGIbios_mode[xgifb_mode_idx].yres,
- XGIbios_mode[xgifb_mode_idx].bpp,
- xgi_video_info.refresh_rate);
-
- xgi_video_info.video_bpp = XGIbios_mode[xgifb_mode_idx].bpp;
- xgi_video_info.video_vwidth = info->var.xres_virtual;
- xgi_video_info.video_width = XGIbios_mode[xgifb_mode_idx].xres;
- xgi_video_info.video_vheight = info->var.yres_virtual;
- xgi_video_info.video_height = XGIbios_mode[xgifb_mode_idx].yres;
- xgi_video_info.org_x = xgi_video_info.org_y = 0;
- xgi_video_info.video_linelength = info->var.xres_virtual
- * (xgi_video_info.video_bpp >> 3);
- switch (xgi_video_info.video_bpp) {
+ XGIbios_mode[xgifb_info->mode_idx].xres,
+ XGIbios_mode[xgifb_info->mode_idx].yres,
+ XGIbios_mode[xgifb_info->mode_idx].bpp,
+ xgifb_info->refresh_rate);
+
+ xgifb_info->video_bpp = XGIbios_mode[xgifb_info->mode_idx].bpp;
+ xgifb_info->video_vwidth = info->var.xres_virtual;
+ xgifb_info->video_width =
+ XGIbios_mode[xgifb_info->mode_idx].xres;
+ xgifb_info->video_vheight = info->var.yres_virtual;
+ xgifb_info->video_height =
+ XGIbios_mode[xgifb_info->mode_idx].yres;
+ xgifb_info->org_x = xgifb_info->org_y = 0;
+ xgifb_info->video_linelength = info->var.xres_virtual
+ * (xgifb_info->video_bpp >> 3);
+ switch (xgifb_info->video_bpp) {
case 8:
- xgi_video_info.DstColor = 0x0000;
- xgi_video_info.XGI310_AccelDepth = 0x00000000;
- xgi_video_info.video_cmap_len = 256;
+ xgifb_info->DstColor = 0x0000;
+ xgifb_info->XGI310_AccelDepth = 0x00000000;
+ xgifb_info->video_cmap_len = 256;
#if defined(__powerpc__)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, (cr_data & 0xE0));
#endif
break;
case 16:
- xgi_video_info.DstColor = 0x8000;
- xgi_video_info.XGI310_AccelDepth = 0x00010000;
+ xgifb_info->DstColor = 0x8000;
+ xgifb_info->XGI310_AccelDepth = 0x00010000;
#if defined(__powerpc__)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x0B));
#endif
- xgi_video_info.video_cmap_len = 16;
+ xgifb_info->video_cmap_len = 16;
break;
case 32:
- xgi_video_info.DstColor = 0xC000;
- xgi_video_info.XGI310_AccelDepth = 0x00020000;
- xgi_video_info.video_cmap_len = 16;
+ xgifb_info->DstColor = 0xC000;
+ xgifb_info->XGI310_AccelDepth = 0x00020000;
+ xgifb_info->video_cmap_len = 16;
#if defined(__powerpc__)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x15));
#endif
break;
default:
- xgi_video_info.video_cmap_len = 16;
+ xgifb_info->video_cmap_len = 16;
printk(KERN_ERR "XGIfb: Unsupported depth %d",
- xgi_video_info.video_bpp);
+ xgifb_info->video_bpp);
break;
}
}
- XGIfb_bpp_to_var(var); /*update ARGB info*/
+ XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/
DEBUGPRN("End of do_set_var");
dumpVGAReg();
return 0;
}
-#ifdef XGIFB_PAN
-static int XGIfb_pan_var(struct fb_var_screeninfo *var)
+static int XGIfb_pan_var(struct xgifb_video_info *xgifb_info,
+ struct fb_var_screeninfo *var)
{
unsigned int base;
@@ -1403,8 +1340,8 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var)
xgifb_reg_set(XGISR, 0x37, (base >> 24) & 0x03);
xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
- if (xgi_video_info.disp_state & DISPTYPE_DISP2) {
- xgifb_reg_or(XGIPART1, XGIfb_CRT2_write_enable, 0x01);
+ if (xgifb_info->display2 != XGIFB_DISP_NONE) {
+ xgifb_reg_or(XGIPART1, IND_XGI_CRT2_WRITE_ENABLE_315, 0x01);
xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
@@ -1416,7 +1353,6 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var)
/* printk("End of pan_var"); */
return 0;
}
-#endif
static int XGIfb_open(struct fb_info *info, int user)
{
@@ -1449,6 +1385,8 @@ static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *info)
{
+ struct xgifb_video_info *xgifb_info = info->par;
+
if (regno >= XGIfb_get_cmap_len(&info->var))
return 1;
@@ -1458,7 +1396,7 @@ static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
outb((red >> 10), XGIDACD);
outb((green >> 10), XGIDACD);
outb((blue >> 10), XGIDACD);
- if (xgi_video_info.disp_state & DISPTYPE_DISP2) {
+ if (xgifb_info->display2 != XGIFB_DISP_NONE) {
outb(regno, XGIDAC2A);
outb((red >> 8), XGIDAC2D);
outb((green >> 8), XGIDAC2D);
@@ -1486,30 +1424,28 @@ static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
struct fb_info *info)
{
+ struct xgifb_video_info *xgifb_info = info->par;
+
DEBUGPRN("inside get_fix");
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, myid);
+ fix->smem_start = xgifb_info->video_base;
- fix->smem_start = xgi_video_info.video_base;
+ fix->smem_len = xgifb_info->video_size;
- fix->smem_len = xgi_video_info.video_size;
-
- fix->type = video_type;
+ fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
- if (xgi_video_info.video_bpp == 8)
+ if (xgifb_info->video_bpp == 8)
fix->visual = FB_VISUAL_PSEUDOCOLOR;
else
fix->visual = FB_VISUAL_DIRECTCOLOR;
fix->xpanstep = 0;
-#ifdef XGIFB_PAN
if (XGIfb_ypan)
fix->ypanstep = 1;
-#endif
fix->ywrapstep = 0;
- fix->line_length = xgi_video_info.video_linelength;
- fix->mmio_start = xgi_video_info.mmio_base;
- fix->mmio_len = xgi_video_info.mmio_size;
+ fix->line_length = xgifb_info->video_linelength;
+ fix->mmio_start = xgifb_info->mmio_base;
+ fix->mmio_len = xgifb_info->mmio_size;
fix->accel = FB_ACCEL_XGI_XABRE;
DEBUGPRN("end of get_fix");
@@ -1531,6 +1467,7 @@ static int XGIfb_set_par(struct fb_info *info)
static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ struct xgifb_video_info *xgifb_info = info->par;
unsigned int htotal = var->left_margin + var->xres + var->right_margin
+ var->hsync_len;
unsigned int vtotal = 0;
@@ -1561,15 +1498,15 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (var->pixclock && htotal && vtotal) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
- xgi_video_info.refresh_rate =
+ xgifb_info->refresh_rate =
(unsigned int) (hrate * 2 / vtotal);
printk(KERN_DEBUG
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
__func__, var->pixclock, htotal, vtotal,
- __func__, drate, hrate, xgi_video_info.refresh_rate);
+ __func__, drate, hrate, xgifb_info->refresh_rate);
} else {
- xgi_video_info.refresh_rate = 60;
+ xgifb_info->refresh_rate = 60;
}
/*
@@ -1591,7 +1528,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if ((XGIbios_mode[search_idx].xres == var->xres) &&
(XGIbios_mode[search_idx].yres == var->yres) &&
(XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
- if (XGIfb_validate_mode(search_idx) > 0) {
+ if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) {
found_mode = 1;
break;
}
@@ -1609,7 +1546,8 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
(var->yres <= XGIbios_mode[search_idx].yres) &&
(var->bits_per_pixel ==
XGIbios_mode[search_idx].bpp)) {
- if (XGIfb_validate_mode(search_idx) > 0) {
+ if (XGIfb_validate_mode(xgifb_info,
+ search_idx) > 0) {
found_mode = 1;
break;
}
@@ -1632,7 +1570,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
/* TW: TODO: Check the refresh rate */
/* Adapt RGB settings */
- XGIfb_bpp_to_var(var);
+ XGIfb_bpp_to_var(xgifb_info, var);
/* Sanity check for offsets */
if (var->xoffset < 0)
@@ -1648,7 +1586,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
} /* else { */
/* TW: Now patch yres_virtual if we use panning */
/* May I do this? */
- /* var->yres_virtual = xgi_video_info.heapstart /
+ /* var->yres_virtual = xgifb_info->heapstart /
(var->xres * (var->bits_per_pixel >> 3)); */
/* if (var->yres_virtual <= var->yres) { */
/* TW: Paranoia check */
@@ -1673,11 +1611,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-#ifdef XGIFB_PAN
static int XGIfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int err;
+ struct xgifb_video_info *xgifb_info = info->par;
/* printk("\nInside pan_display:\n"); */
@@ -1696,7 +1634,7 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
> info->var.yres_virtual)
return -EINVAL;
}
- err = XGIfb_pan_var(var);
+ err = XGIfb_pan_var(xgifb_info, var);
if (err < 0)
return err;
@@ -1710,10 +1648,10 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
/* printk("End of pan_display\n"); */
return 0;
}
-#endif
static int XGIfb_blank(int blank, struct fb_info *info)
{
+ struct xgifb_video_info *xgifb_info = info->par;
u8 reg;
reg = xgifb_reg_get(XGICR, 0x17);
@@ -1736,9 +1674,7 @@ static struct fb_ops XGIfb_ops = {
.fb_check_var = XGIfb_check_var,
.fb_set_par = XGIfb_set_par,
.fb_setcolreg = XGIfb_setcolreg,
-#ifdef XGIFB_PAN
.fb_pan_display = XGIfb_pan_display,
-#endif
.fb_blank = XGIfb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -1750,51 +1686,51 @@ static struct fb_ops XGIfb_ops = {
/* for XGI 315/550/650/740/330 */
-static int XGIfb_get_dram_size(void)
+static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
{
u8 ChannelNum, tmp;
u8 reg = 0;
/* xorg driver sets 32MB * 1 channel */
- if (xgi_video_info.chip == XG27)
+ if (xgifb_info->chip == XG27)
xgifb_reg_set(XGISR, IND_XGI_DRAM_SIZE, 0x51);
reg = xgifb_reg_get(XGISR, IND_XGI_DRAM_SIZE);
switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
case XGI_DRAM_SIZE_1MB:
- xgi_video_info.video_size = 0x100000;
+ xgifb_info->video_size = 0x100000;
break;
case XGI_DRAM_SIZE_2MB:
- xgi_video_info.video_size = 0x200000;
+ xgifb_info->video_size = 0x200000;
break;
case XGI_DRAM_SIZE_4MB:
- xgi_video_info.video_size = 0x400000;
+ xgifb_info->video_size = 0x400000;
break;
case XGI_DRAM_SIZE_8MB:
- xgi_video_info.video_size = 0x800000;
+ xgifb_info->video_size = 0x800000;
break;
case XGI_DRAM_SIZE_16MB:
- xgi_video_info.video_size = 0x1000000;
+ xgifb_info->video_size = 0x1000000;
break;
case XGI_DRAM_SIZE_32MB:
- xgi_video_info.video_size = 0x2000000;
+ xgifb_info->video_size = 0x2000000;
break;
case XGI_DRAM_SIZE_64MB:
- xgi_video_info.video_size = 0x4000000;
+ xgifb_info->video_size = 0x4000000;
break;
case XGI_DRAM_SIZE_128MB:
- xgi_video_info.video_size = 0x8000000;
+ xgifb_info->video_size = 0x8000000;
break;
case XGI_DRAM_SIZE_256MB:
- xgi_video_info.video_size = 0x10000000;
+ xgifb_info->video_size = 0x10000000;
break;
default:
return -1;
}
tmp = (reg & 0x0c) >> 2;
- switch (xgi_video_info.chip) {
+ switch (xgifb_info->chip) {
case XG20:
case XG21:
case XG27:
@@ -1830,25 +1766,25 @@ static int XGIfb_get_dram_size(void)
break;
}
- xgi_video_info.video_size = xgi_video_info.video_size * ChannelNum;
+ xgifb_info->video_size = xgifb_info->video_size * ChannelNum;
/* PLiad fixed for benchmarking and fb set */
- /* xgi_video_info.video_size = 0x200000; */ /* 1024x768x16 */
- /* xgi_video_info.video_size = 0x1000000; */ /* benchmark */
+ /* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */
+ /* xgifb_info->video_size = 0x1000000; */ /* benchmark */
printk("XGIfb: SR14=%x DramSzie %x ChannelNum %x\n",
reg,
- xgi_video_info.video_size, ChannelNum);
+ xgifb_info->video_size, ChannelNum);
return 0;
}
-static void XGIfb_detect_VB(void)
+static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
{
u8 cr32, temp = 0;
- xgi_video_info.TV_plug = xgi_video_info.TV_type = 0;
+ xgifb_info->TV_plug = xgifb_info->TV_type = 0;
- switch (xgi_video_info.hasVB) {
+ switch (xgifb_info->hasVB) {
case HASVB_LVDS_CHRONTEL:
case HASVB_CHRONTEL:
break;
@@ -1871,35 +1807,35 @@ static void XGIfb_detect_VB(void)
if (XGIfb_crt2type != -1)
/* TW: Override with option */
- xgi_video_info.disp_state = XGIfb_crt2type;
+ xgifb_info->display2 = XGIfb_crt2type;
else if (cr32 & XGI_VB_TV)
- xgi_video_info.disp_state = DISPTYPE_TV;
+ xgifb_info->display2 = XGIFB_DISP_TV;
else if (cr32 & XGI_VB_LCD)
- xgi_video_info.disp_state = DISPTYPE_LCD;
+ xgifb_info->display2 = XGIFB_DISP_LCD;
else if (cr32 & XGI_VB_CRT2)
- xgi_video_info.disp_state = DISPTYPE_CRT2;
+ xgifb_info->display2 = XGIFB_DISP_CRT;
else
- xgi_video_info.disp_state = 0;
+ xgifb_info->display2 = XGIFB_DISP_NONE;
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
- xgi_video_info.TV_plug = XGIfb_tvplug;
+ xgifb_info->TV_plug = XGIfb_tvplug;
else if (cr32 & XGI_VB_HIVISION) {
- xgi_video_info.TV_type = TVMODE_HIVISION;
- xgi_video_info.TV_plug = TVPLUG_SVIDEO;
+ xgifb_info->TV_type = TVMODE_HIVISION;
+ xgifb_info->TV_plug = TVPLUG_SVIDEO;
} else if (cr32 & XGI_VB_SVIDEO)
- xgi_video_info.TV_plug = TVPLUG_SVIDEO;
+ xgifb_info->TV_plug = TVPLUG_SVIDEO;
else if (cr32 & XGI_VB_COMPOSITE)
- xgi_video_info.TV_plug = TVPLUG_COMPOSITE;
+ xgifb_info->TV_plug = TVPLUG_COMPOSITE;
else if (cr32 & XGI_VB_SCART)
- xgi_video_info.TV_plug = TVPLUG_SCART;
+ xgifb_info->TV_plug = TVPLUG_SCART;
- if (xgi_video_info.TV_type == 0) {
+ if (xgifb_info->TV_type == 0) {
temp = xgifb_reg_get(XGICR, 0x38);
if (temp & 0x10)
- xgi_video_info.TV_type = TVMODE_PAL;
+ xgifb_info->TV_type = TVMODE_PAL;
else
- xgi_video_info.TV_type = TVMODE_NTSC;
+ xgifb_info->TV_type = TVMODE_NTSC;
}
/* TW: Copy forceCRT1 option to CRT1off if option is given */
@@ -1911,37 +1847,37 @@ static void XGIfb_detect_VB(void)
}
}
-static int XGIfb_has_VB(void)
+static int XGIfb_has_VB(struct xgifb_video_info *xgifb_info)
{
u8 vb_chipid;
vb_chipid = xgifb_reg_get(XGIPART4, 0x00);
switch (vb_chipid) {
case 0x01:
- xgi_video_info.hasVB = HASVB_301;
+ xgifb_info->hasVB = HASVB_301;
break;
case 0x02:
- xgi_video_info.hasVB = HASVB_302;
+ xgifb_info->hasVB = HASVB_302;
break;
default:
- xgi_video_info.hasVB = HASVB_NONE;
+ xgifb_info->hasVB = HASVB_NONE;
return 0;
}
return 1;
}
-static void XGIfb_get_VB_type(void)
+static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
{
u8 reg;
- if (!XGIfb_has_VB()) {
+ if (!XGIfb_has_VB(xgifb_info)) {
reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
switch ((reg & XGI_EXTERNAL_CHIP_MASK) >> 1) {
case XGI310_EXTERNAL_CHIP_LVDS:
- xgi_video_info.hasVB = HASVB_LVDS;
+ xgifb_info->hasVB = HASVB_LVDS;
break;
case XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL:
- xgi_video_info.hasVB = HASVB_LVDS_CHRONTEL;
+ xgifb_info->hasVB = HASVB_LVDS_CHRONTEL;
break;
default:
break;
@@ -1949,49 +1885,47 @@ static void XGIfb_get_VB_type(void)
}
}
-XGIINITSTATIC int __init XGIfb_setup(char *options)
+static int __init xgifb_optval(char *fullopt, int validx)
{
- char *this_opt;
+ unsigned long lres;
- xgi_video_info.refresh_rate = 0;
+ if (kstrtoul(fullopt + validx, 0, &lres) < 0 || lres > INT_MAX) {
+ pr_err("xgifb: invalid value for option: %s\n", fullopt);
+ return 0;
+ }
+ return lres;
+}
- printk(KERN_INFO "XGIfb: Options %s\n", options);
+static int __init XGIfb_setup(char *options)
+{
+ char *this_opt;
if (!options || !*options)
return 0;
+ pr_info("xgifb: options: %s\n", options);
+
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if (!strncmp(this_opt, "mode:", 5)) {
- XGIfb_search_mode(this_opt + 5);
- } else if (!strncmp(this_opt, "vesa:", 5)) {
- XGIfb_search_vesamode(simple_strtoul(
- this_opt + 5, NULL, 0));
- } else if (!strncmp(this_opt, "mode:", 5)) {
- XGIfb_search_mode(this_opt + 5);
+ mode = this_opt + 5;
} else if (!strncmp(this_opt, "vesa:", 5)) {
- XGIfb_search_vesamode(simple_strtoul(
- this_opt + 5, NULL, 0));
+ vesa = xgifb_optval(this_opt, 5);
} else if (!strncmp(this_opt, "vrate:", 6)) {
- xgi_video_info.refresh_rate = simple_strtoul(
- this_opt + 6, NULL, 0);
+ refresh_rate = xgifb_optval(this_opt, 6);
} else if (!strncmp(this_opt, "rate:", 5)) {
- xgi_video_info.refresh_rate = simple_strtoul(
- this_opt + 5, NULL, 0);
- } else if (!strncmp(this_opt, "off", 3)) {
- XGIfb_off = 1;
+ refresh_rate = xgifb_optval(this_opt, 5);
} else if (!strncmp(this_opt, "crt1off", 7)) {
XGIfb_crt1off = 1;
} else if (!strncmp(this_opt, "filter:", 7)) {
- filter = (int)simple_strtoul(this_opt + 7, NULL, 0);
+ filter = xgifb_optval(this_opt, 7);
} else if (!strncmp(this_opt, "forcecrt2type:", 14)) {
XGIfb_search_crt2type(this_opt + 14);
} else if (!strncmp(this_opt, "forcecrt1:", 10)) {
- XGIfb_forcecrt1 = (int)simple_strtoul(
- this_opt + 10, NULL, 0);
+ XGIfb_forcecrt1 = xgifb_optval(this_opt, 10);
} else if (!strncmp(this_opt, "tvmode:", 7)) {
XGIfb_search_tvstd(this_opt + 7);
} else if (!strncmp(this_opt, "tvstandard:", 11)) {
@@ -1999,32 +1933,15 @@ XGIINITSTATIC int __init XGIfb_setup(char *options)
} else if (!strncmp(this_opt, "dstn", 4)) {
enable_dstn = 1;
/* TW: DSTN overrules forcecrt2type */
- XGIfb_crt2type = DISPTYPE_LCD;
- } else if (!strncmp(this_opt, "pdc:", 4)) {
- XGIfb_pdc = simple_strtoul(this_opt + 4, NULL, 0);
- if (XGIfb_pdc & ~0x3c) {
- printk(KERN_INFO "XGIfb: Illegal pdc parameter\n");
- XGIfb_pdc = 0;
- }
+ XGIfb_crt2type = XGIFB_DISP_LCD;
} else if (!strncmp(this_opt, "noypan", 6)) {
XGIfb_ypan = 0;
} else if (!strncmp(this_opt, "userom:", 7)) {
- XGIfb_userom = (int)simple_strtoul(
- this_opt + 7, NULL, 0);
- /* } else if (!strncmp(this_opt, "useoem:", 7)) { */
- /* XGIfb_useoem = (int)simple_strtoul(
- this_opt + 7, NULL, 0); */
+ XGIfb_userom = xgifb_optval(this_opt, 7);
} else {
- XGIfb_search_mode(this_opt);
- /* printk(KERN_INFO "XGIfb: Invalid option %s\n",
- this_opt); */
+ mode = this_opt;
}
-
- /* TW: Panning only with acceleration */
- XGIfb_ypan = 0;
-
}
- printk("\nxgifb: outa xgifb_setup 3450");
return 0;
}
@@ -2056,44 +1973,46 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
u8 reg, reg1;
u8 CR48, CR38;
int ret;
+ bool xgi21_drvlcdcaplist = false;
+ struct fb_info *fb_info;
+ struct xgifb_video_info *xgifb_info;
+ struct xgi_hw_device_info *hw_info;
- if (XGIfb_off)
- return -ENXIO;
-
- XGIfb_registered = 0;
-
- memset(&XGIhw_ext, 0, sizeof(struct xgi_hw_device_info));
- fb_info = framebuffer_alloc(sizeof(struct fb_info), &pdev->dev);
+ fb_info = framebuffer_alloc(sizeof(*xgifb_info), &pdev->dev);
if (!fb_info)
return -ENOMEM;
- xgi_video_info.chip_id = pdev->device;
+ xgifb_info = fb_info->par;
+ hw_info = &xgifb_info->hw_info;
+ xgifb_info->fb_info = fb_info;
+ xgifb_info->chip_id = pdev->device;
pci_read_config_byte(pdev,
PCI_REVISION_ID,
- &xgi_video_info.revision_id);
- XGIhw_ext.jChipRevision = xgi_video_info.revision_id;
-
- xgi_video_info.pcibus = pdev->bus->number;
- xgi_video_info.pcislot = PCI_SLOT(pdev->devfn);
- xgi_video_info.pcifunc = PCI_FUNC(pdev->devfn);
- xgi_video_info.subsysvendor = pdev->subsystem_vendor;
- xgi_video_info.subsysdevice = pdev->subsystem_device;
-
- xgi_video_info.video_base = pci_resource_start(pdev, 0);
- xgi_video_info.mmio_base = pci_resource_start(pdev, 1);
- xgi_video_info.mmio_size = pci_resource_len(pdev, 1);
- xgi_video_info.vga_base = pci_resource_start(pdev, 2) + 0x30;
- XGIhw_ext.pjIOAddress = (unsigned char *)xgi_video_info.vga_base;
+ &xgifb_info->revision_id);
+ hw_info->jChipRevision = xgifb_info->revision_id;
+
+ xgifb_info->pcibus = pdev->bus->number;
+ xgifb_info->pcislot = PCI_SLOT(pdev->devfn);
+ xgifb_info->pcifunc = PCI_FUNC(pdev->devfn);
+ xgifb_info->subsysvendor = pdev->subsystem_vendor;
+ xgifb_info->subsysdevice = pdev->subsystem_device;
+
+ xgifb_info->video_base = pci_resource_start(pdev, 0);
+ xgifb_info->mmio_base = pci_resource_start(pdev, 1);
+ xgifb_info->mmio_size = pci_resource_len(pdev, 1);
+ xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
+ hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base;
/* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */
printk("XGIfb: Relocate IO address: %lx [%08lx]\n",
- (unsigned long)pci_resource_start(pdev, 2), XGI_Pr.RelIO);
+ (unsigned long)pci_resource_start(pdev, 2),
+ xgifb_info->dev_info.RelIO);
if (pci_enable_device(pdev)) {
ret = -EIO;
goto error;
}
- XGIRegInit(&XGI_Pr, (unsigned long)XGIhw_ext.pjIOAddress);
+ XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
reg1 = xgifb_reg_get(XGISR, IND_XGI_PASSWORD);
@@ -2104,417 +2023,413 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
- switch (xgi_video_info.chip_id) {
+ switch (xgifb_info->chip_id) {
case PCI_DEVICE_ID_XG_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
if (CR48&GPIOG_READ)
- xgi_video_info.chip = XG21;
+ xgifb_info->chip = XG21;
else
- xgi_video_info.chip = XG20;
- XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
+ xgifb_info->chip = XG20;
break;
case PCI_DEVICE_ID_XG_40:
- xgi_video_info.chip = XG40;
- XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
+ xgifb_info->chip = XG40;
break;
case PCI_DEVICE_ID_XG_41:
- xgi_video_info.chip = XG41;
- XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
+ xgifb_info->chip = XG41;
break;
case PCI_DEVICE_ID_XG_42:
- xgi_video_info.chip = XG42;
- XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
+ xgifb_info->chip = XG42;
break;
case PCI_DEVICE_ID_XG_27:
- xgi_video_info.chip = XG27;
- XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
+ xgifb_info->chip = XG27;
break;
default:
ret = -ENODEV;
goto error;
}
- printk("XGIfb:chipid = %x\n", xgi_video_info.chip);
- XGIhw_ext.jChipType = xgi_video_info.chip;
+ printk("XGIfb:chipid = %x\n", xgifb_info->chip);
+ hw_info->jChipType = xgifb_info->chip;
- if ((xgi_video_info.chip == XG21) || (XGIfb_userom)) {
- XGIhw_ext.pjVirtualRomBase = xgifb_copy_rom(pdev);
- if (XGIhw_ext.pjVirtualRomBase)
+ if ((xgifb_info->chip == XG21) || (XGIfb_userom)) {
+ hw_info->pjVirtualRomBase = xgifb_copy_rom(pdev);
+ if (hw_info->pjVirtualRomBase)
printk(KERN_INFO "XGIfb: Video ROM found and mapped to %p\n",
- XGIhw_ext.pjVirtualRomBase);
+ hw_info->pjVirtualRomBase);
else
printk(KERN_INFO "XGIfb: Video ROM not found\n");
} else {
- XGIhw_ext.pjVirtualRomBase = NULL;
+ hw_info->pjVirtualRomBase = NULL;
printk(KERN_INFO "XGIfb: Video ROM usage disabled\n");
}
- XGIhw_ext.pQueryVGAConfigSpace = &XGIfb_query_VGA_config_space;
- if (XGIfb_get_dram_size()) {
+ if (XGIfb_get_dram_size(xgifb_info)) {
printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
goto error;
}
- if ((xgifb_mode_idx < 0) ||
- ((XGIbios_mode[xgifb_mode_idx].mode_no) != 0xFF)) {
- /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
- xgifb_reg_or(XGISR,
- IND_XGI_PCI_ADDRESS_SET,
- (XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE));
- /* Enable 2D accelerator engine */
- xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D);
- }
+ /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
+ xgifb_reg_or(XGISR,
+ IND_XGI_PCI_ADDRESS_SET,
+ (XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE));
+ /* Enable 2D accelerator engine */
+ xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D);
- XGIhw_ext.ulVideoMemorySize = xgi_video_info.video_size;
+ hw_info->ulVideoMemorySize = xgifb_info->video_size;
- if (!request_mem_region(xgi_video_info.video_base,
- xgi_video_info.video_size,
+ if (!request_mem_region(xgifb_info->video_base,
+ xgifb_info->video_size,
"XGIfb FB")) {
printk("unable request memory size %x",
- xgi_video_info.video_size);
+ xgifb_info->video_size);
printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve frame buffer memory\n");
printk(KERN_ERR "XGIfb: Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error;
}
- if (!request_mem_region(xgi_video_info.mmio_base,
- xgi_video_info.mmio_size,
+ if (!request_mem_region(xgifb_info->mmio_base,
+ xgifb_info->mmio_size,
"XGIfb MMIO")) {
printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve MMIO region\n");
ret = -ENODEV;
goto error_0;
}
- xgi_video_info.video_vbase = XGIhw_ext.pjVideoMemoryAddress =
- ioremap(xgi_video_info.video_base, xgi_video_info.video_size);
- xgi_video_info.mmio_vbase = ioremap(xgi_video_info.mmio_base,
- xgi_video_info.mmio_size);
+ xgifb_info->video_vbase = hw_info->pjVideoMemoryAddress =
+ ioremap(xgifb_info->video_base, xgifb_info->video_size);
+ xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
+ xgifb_info->mmio_size);
printk(KERN_INFO "XGIfb: Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
- xgi_video_info.video_base,
- xgi_video_info.video_vbase,
- xgi_video_info.video_size / 1024);
+ xgifb_info->video_base,
+ xgifb_info->video_vbase,
+ xgifb_info->video_size / 1024);
printk(KERN_INFO "XGIfb: MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
- xgi_video_info.mmio_base, xgi_video_info.mmio_vbase,
- xgi_video_info.mmio_size / 1024);
+ xgifb_info->mmio_base, xgifb_info->mmio_vbase,
+ xgifb_info->mmio_size / 1024);
printk("XGIfb: XGIInitNew() ...");
- if (XGIInitNew(&XGIhw_ext))
+ pci_set_drvdata(pdev, xgifb_info);
+ if (XGIInitNew(pdev))
printk("OK\n");
else
printk("Fail\n");
- xgi_video_info.mtrr = (unsigned int) 0;
-
- if ((xgifb_mode_idx < 0) ||
- ((XGIbios_mode[xgifb_mode_idx].mode_no) != 0xFF)) {
- xgi_video_info.hasVB = HASVB_NONE;
- if ((xgi_video_info.chip == XG20) ||
- (xgi_video_info.chip == XG27)) {
- xgi_video_info.hasVB = HASVB_NONE;
- } else if (xgi_video_info.chip == XG21) {
- CR38 = xgifb_reg_get(XGICR, 0x38);
- if ((CR38&0xE0) == 0xC0) {
- xgi_video_info.disp_state = DISPTYPE_LCD;
- if (!XGIfb_GetXG21LVDSData()) {
- int m;
- for (m = 0; m < sizeof(XGI21_LCDCapList)/sizeof(struct XGI21_LVDSCapStruct); m++) {
- if ((XGI21_LCDCapList[m].LVDSHDE == XGIbios_mode[xgifb_mode_idx].xres) &&
- (XGI21_LCDCapList[m].LVDSVDE == XGIbios_mode[xgifb_mode_idx].yres)) {
- xgifb_reg_set(XGI_Pr.P3d4, 0x36, m);
- }
- }
- }
- } else if ((CR38&0xE0) == 0x60) {
- xgi_video_info.hasVB = HASVB_CHRONTEL;
- } else {
- xgi_video_info.hasVB = HASVB_NONE;
- }
+ xgifb_info->mtrr = (unsigned int) 0;
+
+ xgifb_info->hasVB = HASVB_NONE;
+ if ((xgifb_info->chip == XG20) ||
+ (xgifb_info->chip == XG27)) {
+ xgifb_info->hasVB = HASVB_NONE;
+ } else if (xgifb_info->chip == XG21) {
+ CR38 = xgifb_reg_get(XGICR, 0x38);
+ if ((CR38&0xE0) == 0xC0) {
+ xgifb_info->display2 = XGIFB_DISP_LCD;
+ if (!XGIfb_GetXG21LVDSData(xgifb_info))
+ xgi21_drvlcdcaplist = true;
+ } else if ((CR38&0xE0) == 0x60) {
+ xgifb_info->hasVB = HASVB_CHRONTEL;
} else {
- XGIfb_get_VB_type();
+ xgifb_info->hasVB = HASVB_NONE;
}
+ } else {
+ XGIfb_get_VB_type(xgifb_info);
+ }
- XGIhw_ext.ujVBChipID = VB_CHIP_UNKNOWN;
-
- XGIhw_ext.ulExternalChip = 0;
+ hw_info->ujVBChipID = VB_CHIP_UNKNOWN;
- switch (xgi_video_info.hasVB) {
- case HASVB_301:
- reg = xgifb_reg_get(XGIPART4, 0x01);
- if (reg >= 0xE0) {
- XGIhw_ext.ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
- } else if (reg >= 0xD0) {
- XGIhw_ext.ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg);
- }
- /* else if (reg >= 0xB0) {
- XGIhw_ext.ujVBChipID = VB_CHIP_301B;
- reg1 = xgifb_reg_get(XGIPART4, 0x23);
- printk("XGIfb: XGI301B bridge detected\n");
- } */
- else {
- XGIhw_ext.ujVBChipID = VB_CHIP_301;
- printk("XGIfb: XGI301 bridge detected\n");
- }
- break;
- case HASVB_302:
- reg = xgifb_reg_get(XGIPART4, 0x01);
- if (reg >= 0xE0) {
- XGIhw_ext.ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
- } else if (reg >= 0xD0) {
- XGIhw_ext.ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
- } else if (reg >= 0xB0) {
- reg1 = xgifb_reg_get(XGIPART4, 0x23);
-
- XGIhw_ext.ujVBChipID = VB_CHIP_302B;
+ hw_info->ulExternalChip = 0;
- } else {
- XGIhw_ext.ujVBChipID = VB_CHIP_302;
- printk(KERN_INFO "XGIfb: XGI302 bridge detected\n");
- }
- break;
- case HASVB_LVDS:
- XGIhw_ext.ulExternalChip = 0x1;
- printk(KERN_INFO "XGIfb: LVDS transmitter detected\n");
- break;
- case HASVB_TRUMPION:
- XGIhw_ext.ulExternalChip = 0x2;
- printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n");
- break;
- case HASVB_CHRONTEL:
- XGIhw_ext.ulExternalChip = 0x4;
- printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n");
- break;
- case HASVB_LVDS_CHRONTEL:
- XGIhw_ext.ulExternalChip = 0x5;
- printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n");
- break;
- default:
- printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n");
- break;
+ switch (xgifb_info->hasVB) {
+ case HASVB_301:
+ reg = xgifb_reg_get(XGIPART4, 0x01);
+ if (reg >= 0xE0) {
+ hw_info->ujVBChipID = VB_CHIP_302LV;
+ printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ } else if (reg >= 0xD0) {
+ hw_info->ujVBChipID = VB_CHIP_301LV;
+ printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg);
}
-
- if (xgi_video_info.hasVB != HASVB_NONE)
- XGIfb_detect_VB();
-
- if (xgi_video_info.disp_state & DISPTYPE_DISP2) {
- if (XGIfb_crt1off)
- xgi_video_info.disp_state |= DISPMODE_SINGLE;
- else
- xgi_video_info.disp_state |= (DISPMODE_MIRROR |
- DISPTYPE_CRT1);
- } else {
- xgi_video_info.disp_state = DISPMODE_SINGLE |
- DISPTYPE_CRT1;
+ /* else if (reg >= 0xB0) {
+ hw_info->ujVBChipID = VB_CHIP_301B;
+ reg1 = xgifb_reg_get(XGIPART4, 0x23);
+ printk("XGIfb: XGI301B bridge detected\n");
+ } */
+ else {
+ hw_info->ujVBChipID = VB_CHIP_301;
+ printk("XGIfb: XGI301 bridge detected\n");
}
+ break;
+ case HASVB_302:
+ reg = xgifb_reg_get(XGIPART4, 0x01);
+ if (reg >= 0xE0) {
+ hw_info->ujVBChipID = VB_CHIP_302LV;
+ printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ } else if (reg >= 0xD0) {
+ hw_info->ujVBChipID = VB_CHIP_301LV;
+ printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ } else if (reg >= 0xB0) {
+ reg1 = xgifb_reg_get(XGIPART4, 0x23);
- if (xgi_video_info.disp_state & DISPTYPE_LCD) {
- if (!enable_dstn) {
- reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
- reg &= 0x0f;
- XGIhw_ext.ulCRT2LCDType = XGI310paneltype[reg];
+ hw_info->ujVBChipID = VB_CHIP_302B;
- } else {
- /* TW: FSTN/DSTN */
- XGIhw_ext.ulCRT2LCDType = LCD_320x480;
- }
+ } else {
+ hw_info->ujVBChipID = VB_CHIP_302;
+ printk(KERN_INFO "XGIfb: XGI302 bridge detected\n");
}
+ break;
+ case HASVB_LVDS:
+ hw_info->ulExternalChip = 0x1;
+ printk(KERN_INFO "XGIfb: LVDS transmitter detected\n");
+ break;
+ case HASVB_TRUMPION:
+ hw_info->ulExternalChip = 0x2;
+ printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n");
+ break;
+ case HASVB_CHRONTEL:
+ hw_info->ulExternalChip = 0x4;
+ printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n");
+ break;
+ case HASVB_LVDS_CHRONTEL:
+ hw_info->ulExternalChip = 0x5;
+ printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n");
+ break;
+ default:
+ printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n");
+ break;
+ }
- XGIfb_detectedpdc = 0;
+ if (xgifb_info->hasVB != HASVB_NONE)
+ XGIfb_detect_VB(xgifb_info);
- XGIfb_detectedlcda = 0xff;
+ if (xgifb_info->display2 == XGIFB_DISP_LCD) {
+ if (!enable_dstn) {
+ reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
+ reg &= 0x0f;
+ hw_info->ulCRT2LCDType = XGI310paneltype[reg];
- /* TW: Try to find about LCDA */
+ } else {
+ /* TW: FSTN/DSTN */
+ hw_info->ulCRT2LCDType = LCD_320x480;
+ }
+ }
- if ((XGIhw_ext.ujVBChipID == VB_CHIP_302B) ||
- (XGIhw_ext.ujVBChipID == VB_CHIP_301LV) ||
- (XGIhw_ext.ujVBChipID == VB_CHIP_302LV)) {
- int tmp;
- tmp = xgifb_reg_get(XGICR, 0x34);
- if (tmp <= 0x13) {
+ if ((hw_info->ujVBChipID == VB_CHIP_302B) ||
+ (hw_info->ujVBChipID == VB_CHIP_301LV) ||
+ (hw_info->ujVBChipID == VB_CHIP_302LV)) {
+ int tmp;
+ tmp = xgifb_reg_get(XGICR, 0x34);
+ if (tmp <= 0x13) {
+ /* Currently on LCDA?
+ *(Some BIOSes leave CR38) */
+ tmp = xgifb_reg_get(XGICR, 0x38);
+ if ((tmp & 0x03) == 0x03) {
+ /* XGI_Pr.XGI_UseLCDA = 1; */
+ } else {
/* Currently on LCDA?
- *(Some BIOSes leave CR38) */
- tmp = xgifb_reg_get(XGICR, 0x38);
- if ((tmp & 0x03) == 0x03) {
+ *(Some newer BIOSes set D0 in CR35) */
+ tmp = xgifb_reg_get(XGICR, 0x35);
+ if (tmp & 0x01) {
/* XGI_Pr.XGI_UseLCDA = 1; */
} else {
- /* Currently on LCDA?
- *(Some newer BIOSes set D0 in CR35) */
- tmp = xgifb_reg_get(XGICR, 0x35);
- if (tmp & 0x01) {
- /* XGI_Pr.XGI_UseLCDA = 1; */
- } else {
- tmp = xgifb_reg_get(XGICR,
- 0x30);
- if (tmp & 0x20) {
- tmp = xgifb_reg_get(
- XGIPART1, 0x13);
- if (tmp & 0x04) {
- /* XGI_Pr.XGI_UseLCDA = 1; */
- }
+ tmp = xgifb_reg_get(XGICR,
+ 0x30);
+ if (tmp & 0x20) {
+ tmp = xgifb_reg_get(
+ XGIPART1, 0x13);
+ if (tmp & 0x04) {
+ /* XGI_Pr.XGI_UseLCDA = 1; */
}
}
}
}
-
}
- if (xgifb_mode_idx >= 0)
- xgifb_mode_idx = XGIfb_validate_mode(xgifb_mode_idx);
+ }
- if (xgifb_mode_idx < 0) {
- switch (xgi_video_info.disp_state & DISPTYPE_DISP2) {
- case DISPTYPE_LCD:
- xgifb_mode_idx = DEFAULT_LCDMODE;
- if (xgi_video_info.chip == XG21)
- xgifb_mode_idx =
- XGIfb_GetXG21DefaultLVDSModeIdx();
- break;
- case DISPTYPE_TV:
- xgifb_mode_idx = DEFAULT_TVMODE;
- break;
- default:
- xgifb_mode_idx = DEFAULT_MODE;
+ xgifb_info->mode_idx = -1;
+
+ if (mode)
+ XGIfb_search_mode(xgifb_info, mode);
+ else if (vesa != -1)
+ XGIfb_search_vesamode(xgifb_info, vesa);
+
+ if (xgifb_info->mode_idx >= 0)
+ xgifb_info->mode_idx =
+ XGIfb_validate_mode(xgifb_info, xgifb_info->mode_idx);
+
+ if (xgifb_info->mode_idx < 0) {
+ if (xgifb_info->display2 == XGIFB_DISP_LCD &&
+ xgifb_info->chip == XG21)
+ xgifb_info->mode_idx =
+ XGIfb_GetXG21DefaultLVDSModeIdx();
+ else
+ xgifb_info->mode_idx = DEFAULT_MODE;
+ }
+
+ if (xgifb_info->mode_idx < 0) {
+ dev_err(&pdev->dev, "no supported video mode found\n");
+ goto error_1;
+ }
+
+ if (xgi21_drvlcdcaplist) {
+ int m;
+
+ for (m = 0; m < ARRAY_SIZE(XGI21_LCDCapList); m++)
+ if ((XGI21_LCDCapList[m].LVDSHDE ==
+ XGIbios_mode[xgifb_info->mode_idx].xres) &&
+ (XGI21_LCDCapList[m].LVDSVDE ==
+ XGIbios_mode[xgifb_info->mode_idx].yres)) {
+ xgifb_reg_set(xgifb_info->dev_info.P3d4,
+ 0x36,
+ m);
break;
}
- }
+ }
- XGIfb_mode_no = XGIbios_mode[xgifb_mode_idx].mode_no;
+ /* yilin set default refresh rate */
+ xgifb_info->refresh_rate = refresh_rate;
+ if (xgifb_info->refresh_rate == 0)
+ xgifb_info->refresh_rate = 60;
+ if (XGIfb_search_refresh_rate(xgifb_info,
+ xgifb_info->refresh_rate) == 0) {
+ xgifb_info->rate_idx =
+ XGIbios_mode[xgifb_info->mode_idx].rate_idx;
+ xgifb_info->refresh_rate = 60;
+ }
- /* yilin set default refresh rate */
- if (xgi_video_info.refresh_rate == 0)
- xgi_video_info.refresh_rate = 60;
- if (XGIfb_search_refresh_rate(
- xgi_video_info.refresh_rate) == 0) {
- XGIfb_rate_idx = XGIbios_mode[xgifb_mode_idx].rate_idx;
- xgi_video_info.refresh_rate = 60;
- }
+ xgifb_info->video_bpp = XGIbios_mode[xgifb_info->mode_idx].bpp;
+ xgifb_info->video_vwidth =
+ xgifb_info->video_width =
+ XGIbios_mode[xgifb_info->mode_idx].xres;
+ xgifb_info->video_vheight =
+ xgifb_info->video_height =
+ XGIbios_mode[xgifb_info->mode_idx].yres;
+ xgifb_info->org_x = xgifb_info->org_y = 0;
+ xgifb_info->video_linelength =
+ xgifb_info->video_width *
+ (xgifb_info->video_bpp >> 3);
+ switch (xgifb_info->video_bpp) {
+ case 8:
+ xgifb_info->DstColor = 0x0000;
+ xgifb_info->XGI310_AccelDepth = 0x00000000;
+ xgifb_info->video_cmap_len = 256;
+ break;
+ case 16:
+ xgifb_info->DstColor = 0x8000;
+ xgifb_info->XGI310_AccelDepth = 0x00010000;
+ xgifb_info->video_cmap_len = 16;
+ break;
+ case 32:
+ xgifb_info->DstColor = 0xC000;
+ xgifb_info->XGI310_AccelDepth = 0x00020000;
+ xgifb_info->video_cmap_len = 16;
+ break;
+ default:
+ xgifb_info->video_cmap_len = 16;
+ printk(KERN_INFO "XGIfb: Unsupported depth %d",
+ xgifb_info->video_bpp);
+ break;
+ }
- xgi_video_info.video_bpp = XGIbios_mode[xgifb_mode_idx].bpp;
- xgi_video_info.video_vwidth =
- xgi_video_info.video_width =
- XGIbios_mode[xgifb_mode_idx].xres;
- xgi_video_info.video_vheight =
- xgi_video_info.video_height =
- XGIbios_mode[xgifb_mode_idx].yres;
- xgi_video_info.org_x = xgi_video_info.org_y = 0;
- xgi_video_info.video_linelength =
- xgi_video_info.video_width *
- (xgi_video_info.video_bpp >> 3);
- switch (xgi_video_info.video_bpp) {
- case 8:
- xgi_video_info.DstColor = 0x0000;
- xgi_video_info.XGI310_AccelDepth = 0x00000000;
- xgi_video_info.video_cmap_len = 256;
- break;
- case 16:
- xgi_video_info.DstColor = 0x8000;
- xgi_video_info.XGI310_AccelDepth = 0x00010000;
- xgi_video_info.video_cmap_len = 16;
- break;
- case 32:
- xgi_video_info.DstColor = 0xC000;
- xgi_video_info.XGI310_AccelDepth = 0x00020000;
- xgi_video_info.video_cmap_len = 16;
- break;
- default:
- xgi_video_info.video_cmap_len = 16;
- printk(KERN_INFO "XGIfb: Unsupported depth %d",
- xgi_video_info.video_bpp);
- break;
+ printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n",
+ xgifb_info->video_width,
+ xgifb_info->video_height,
+ xgifb_info->video_bpp,
+ xgifb_info->refresh_rate);
+
+ fb_info->var.red.length = 8;
+ fb_info->var.green.length = 8;
+ fb_info->var.blue.length = 8;
+ fb_info->var.activate = FB_ACTIVATE_NOW;
+ fb_info->var.height = -1;
+ fb_info->var.width = -1;
+ fb_info->var.vmode = FB_VMODE_NONINTERLACED;
+ fb_info->var.xres = xgifb_info->video_width;
+ fb_info->var.xres_virtual = xgifb_info->video_width;
+ fb_info->var.yres = xgifb_info->video_height;
+ fb_info->var.yres_virtual = xgifb_info->video_height;
+ fb_info->var.bits_per_pixel = xgifb_info->video_bpp;
+
+ XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
+
+ fb_info->var.pixclock = (u32) (1000000000 /
+ XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info,
+ hw_info,
+ XGIbios_mode[xgifb_info->mode_idx].mode_no,
+ xgifb_info->rate_idx));
+
+ if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info,
+ XGIbios_mode[xgifb_info->mode_idx].mode_no,
+ xgifb_info->rate_idx,
+ &fb_info->var.left_margin,
+ &fb_info->var.right_margin,
+ &fb_info->var.upper_margin,
+ &fb_info->var.lower_margin,
+ &fb_info->var.hsync_len,
+ &fb_info->var.vsync_len,
+ &fb_info->var.sync,
+ &fb_info->var.vmode)) {
+
+ if ((fb_info->var.vmode & FB_VMODE_MASK) ==
+ FB_VMODE_INTERLACED) {
+ fb_info->var.yres <<= 1;
+ fb_info->var.yres_virtual <<= 1;
+ } else if ((fb_info->var.vmode & FB_VMODE_MASK) ==
+ FB_VMODE_DOUBLE) {
+ fb_info->var.pixclock >>= 1;
+ fb_info->var.yres >>= 1;
+ fb_info->var.yres_virtual >>= 1;
}
- printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n",
- xgi_video_info.video_width,
- xgi_video_info.video_height,
- xgi_video_info.video_bpp,
- xgi_video_info.refresh_rate);
-
- default_var.xres =
- default_var.xres_virtual =
- xgi_video_info.video_width;
- default_var.yres =
- default_var.yres_virtual =
- xgi_video_info.video_height;
- default_var.bits_per_pixel = xgi_video_info.video_bpp;
-
- XGIfb_bpp_to_var(&default_var);
-
- default_var.pixclock = (u32) (1000000000 /
- XGIfb_mode_rate_to_dclock(&XGI_Pr, &XGIhw_ext,
- XGIfb_mode_no, XGIfb_rate_idx));
-
- if (XGIfb_mode_rate_to_ddata(&XGI_Pr, &XGIhw_ext,
- XGIfb_mode_no, XGIfb_rate_idx,
- &default_var.left_margin, &default_var.right_margin,
- &default_var.upper_margin, &default_var.lower_margin,
- &default_var.hsync_len, &default_var.vsync_len,
- &default_var.sync, &default_var.vmode)) {
-
- if ((default_var.vmode & FB_VMODE_MASK) ==
- FB_VMODE_INTERLACED) {
- default_var.yres <<= 1;
- default_var.yres_virtual <<= 1;
- } else if ((default_var.vmode & FB_VMODE_MASK) ==
- FB_VMODE_DOUBLE) {
- default_var.pixclock >>= 1;
- default_var.yres >>= 1;
- default_var.yres_virtual >>= 1;
- }
+ }
- }
+ strncpy(fb_info->fix.id, "XGI", sizeof(fb_info->fix.id) - 1);
+ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fb_info->fix.xpanstep = 1;
+ fb_info->fix.ypanstep = 1;
- fb_info->flags = FBINFO_FLAG_DEFAULT;
- fb_info->var = default_var;
- fb_info->fix = XGIfb_fix;
- fb_info->par = &xgi_video_info;
- fb_info->screen_base = xgi_video_info.video_vbase;
- fb_info->fbops = &XGIfb_ops;
- XGIfb_get_fix(&fb_info->fix, -1, fb_info);
- fb_info->pseudo_palette = pseudo_palette;
+ fb_info->flags = FBINFO_FLAG_DEFAULT;
+ fb_info->screen_base = xgifb_info->video_vbase;
+ fb_info->fbops = &XGIfb_ops;
+ XGIfb_get_fix(&fb_info->fix, -1, fb_info);
+ fb_info->pseudo_palette = xgifb_info->pseudo_palette;
- fb_alloc_cmap(&fb_info->cmap, 256 , 0);
+ fb_alloc_cmap(&fb_info->cmap, 256 , 0);
#ifdef CONFIG_MTRR
- xgi_video_info.mtrr = mtrr_add(
- (unsigned int) xgi_video_info.video_base,
- (unsigned int) xgi_video_info.video_size,
- MTRR_TYPE_WRCOMB, 1);
- if (xgi_video_info.mtrr)
- printk(KERN_INFO "XGIfb: Added MTRRs\n");
+ xgifb_info->mtrr = mtrr_add(xgifb_info->video_base,
+ xgifb_info->video_size, MTRR_TYPE_WRCOMB, 1);
+ if (xgifb_info->mtrr >= 0)
+ dev_info(&pdev->dev, "added MTRR\n");
#endif
- if (register_framebuffer(fb_info) < 0) {
- ret = -EINVAL;
- goto error_1;
- }
-
- XGIfb_registered = 1;
-
- printk(KERN_INFO "fb%d: %s frame buffer device, Version %d.%d.%02d\n",
- fb_info->node, myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
-
+ if (register_framebuffer(fb_info) < 0) {
+ ret = -EINVAL;
+ goto error_mtrr;
}
dumpVGAReg();
return 0;
+error_mtrr:
+#ifdef CONFIG_MTRR
+ if (xgifb_info->mtrr >= 0)
+ mtrr_del(xgifb_info->mtrr, xgifb_info->video_base,
+ xgifb_info->video_size);
+#endif /* CONFIG_MTRR */
error_1:
- iounmap(xgi_video_info.mmio_vbase);
- iounmap(xgi_video_info.video_vbase);
- release_mem_region(xgi_video_info.mmio_base, xgi_video_info.mmio_size);
+ iounmap(xgifb_info->mmio_vbase);
+ iounmap(xgifb_info->video_vbase);
+ release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
error_0:
- release_mem_region(xgi_video_info.video_base,
- xgi_video_info.video_size);
+ release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
error:
- vfree(XGIhw_ext.pjVirtualRomBase);
+ vfree(hw_info->pjVirtualRomBase);
framebuffer_release(fb_info);
return ret;
}
@@ -2525,13 +2440,20 @@ error:
static void __devexit xgifb_remove(struct pci_dev *pdev)
{
+ struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
+ struct fb_info *fb_info = xgifb_info->fb_info;
+
unregister_framebuffer(fb_info);
- iounmap(xgi_video_info.mmio_vbase);
- iounmap(xgi_video_info.video_vbase);
- release_mem_region(xgi_video_info.mmio_base, xgi_video_info.mmio_size);
- release_mem_region(xgi_video_info.video_base,
- xgi_video_info.video_size);
- vfree(XGIhw_ext.pjVirtualRomBase);
+#ifdef CONFIG_MTRR
+ if (xgifb_info->mtrr >= 0)
+ mtrr_del(xgifb_info->mtrr, xgifb_info->video_base,
+ xgifb_info->video_size);
+#endif /* CONFIG_MTRR */
+ iounmap(xgifb_info->mmio_vbase);
+ iounmap(xgifb_info->video_vbase);
+ release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
+ release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
+ vfree(xgifb_info->hw_info.pjVirtualRomBase);
framebuffer_release(fb_info);
pci_set_drvdata(pdev, NULL);
}
@@ -2543,7 +2465,7 @@ static struct pci_driver xgifb_driver = {
.remove = __devexit_p(xgifb_remove)
};
-XGIINITSTATIC int __init xgifb_init(void)
+static int __init xgifb_init(void)
{
char *option = NULL;
@@ -2554,9 +2476,7 @@ XGIINITSTATIC int __init xgifb_init(void)
return pci_register_driver(&xgifb_driver);
}
-#ifndef MODULE
module_init(xgifb_init);
-#endif
/*****************************************************/
/* MODULE */
@@ -2564,154 +2484,32 @@ module_init(xgifb_init);
#ifdef MODULE
-static char *mode = NULL;
-static int vesa = 0;
-static unsigned int rate = 0;
-static unsigned int mem = 0;
-static char *forcecrt2type = NULL;
-static int forcecrt1 = -1;
-static int pdc = -1;
-static int pdc1 = -1;
-static int noypan = -1;
-static int userom = -1;
-static int useoem = -1;
-static char *tvstandard = NULL;
-static int nocrt2rate = 0;
-static int scalelcd = -1;
-static char *specialtiming = NULL;
-static int lvdshl = -1;
-static int tvxposoffset = 0, tvyposoffset = 0;
-#if !defined(__i386__) && !defined(__x86_64__)
-static int resetcard = 0;
-static int videoram = 0;
-#endif
-
MODULE_DESCRIPTION("Z7 Z9 Z9S Z11 framebuffer device driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("XGITECH , Others");
-module_param(mem, int, 0);
-module_param(noypan, int, 0);
-module_param(userom, int, 0);
-module_param(useoem, int, 0);
module_param(mode, charp, 0);
module_param(vesa, int, 0);
-module_param(rate, int, 0);
-module_param(forcecrt1, int, 0);
-module_param(forcecrt2type, charp, 0);
-module_param(scalelcd, int, 0);
-module_param(pdc, int, 0);
-module_param(pdc1, int, 0);
-module_param(specialtiming, charp, 0);
-module_param(lvdshl, int, 0);
-module_param(tvstandard, charp, 0);
-module_param(tvxposoffset, int, 0);
-module_param(tvyposoffset, int, 0);
module_param(filter, int, 0);
-module_param(nocrt2rate, int, 0);
-#if !defined(__i386__) && !defined(__x86_64__)
-module_param(resetcard, int, 0);
-module_param(videoram, int, 0);
-#endif
-
-MODULE_PARM_DESC(noypan,
- "\nIf set to anything other than 0, y-panning will be disabled and scrolling\n"
- "will be performed by redrawing the screen. (default: 0)\n");
MODULE_PARM_DESC(mode,
- "\nSelects the desired default display mode in the format XxYxDepth,\n"
- "eg. 1024x768x16. Other formats supported include XxY-Depth and\n"
- "XxY-Depth@Rate. If the parameter is only one (decimal or hexadecimal)\n"
- "number, it will be interpreted as a VESA mode number. (default: 800x600x8)\n");
+ "\nSelects the desired default display mode in the format XxYxDepth,\n"
+ "eg. 1024x768x16.\n");
MODULE_PARM_DESC(vesa,
- "\nSelects the desired default display mode by VESA defined mode number, eg.\n"
- "0x117 (default: 0x0103)\n");
-
-MODULE_PARM_DESC(rate,
- "\nSelects the desired vertical refresh rate for CRT1 (external VGA) in Hz.\n"
- "If the mode is specified in the format XxY-Depth@Rate, this parameter\n"
- "will be ignored (default: 60)\n");
-
-MODULE_PARM_DESC(forcecrt1,
- "\nNormally, the driver autodetects whether or not CRT1 (external VGA) is\n"
- "connected. With this option, the detection can be overridden (1=CRT1 ON,\n"
- "0=CRT1 OFF) (default: [autodetected])\n");
-
-MODULE_PARM_DESC(forcecrt2type,
- "\nIf this option is omitted, the driver autodetects CRT2 output devices, such as\n"
- "LCD, TV or secondary VGA. With this option, this autodetection can be\n"
- "overridden. Possible parameters are LCD, TV, VGA or NONE. NONE disables CRT2.\n"
- "On systems with a SiS video bridge, parameters SVIDEO, COMPOSITE or SCART can\n"
- "be used instead of TV to override the TV detection. Furthermore, on systems\n"
- "with a SiS video bridge, SVIDEO+COMPOSITE, HIVISION, YPBPR480I, YPBPR480P,\n"
- "YPBPR720P and YPBPR1080I are understood. However, whether or not these work\n"
- "depends on the very hardware in use. (default: [autodetected])\n");
-
-MODULE_PARM_DESC(scalelcd,
- "\nSetting this to 1 will force the driver to scale the LCD image to the panel's\n"
- "native resolution. Setting it to 0 will disable scaling; LVDS panels will\n"
- "show black bars around the image, TMDS panels will probably do the scaling\n"
- "themselves. Default: 1 on LVDS panels, 0 on TMDS panels\n");
-
-MODULE_PARM_DESC(pdc,
- "\nThis is for manually selecting the LCD panel delay compensation. The driver\n"
- "should detect this correctly in most cases; however, sometimes this is not\n"
- "possible. If you see 'small waves' on the LCD, try setting this to 4, 32 or 24\n"
- "on a 300 series chipset; 6 on a 315 series chipset. If the problem persists,\n"
- "try other values (on 300 series: between 4 and 60 in steps of 4; on 315 series:\n"
- "any value from 0 to 31). (default: autodetected, if LCD is active during start)\n");
-
-MODULE_PARM_DESC(pdc1,
- "\nThis is same as pdc, but for LCD-via CRT1. Hence, this is for the 315/330\n"
- "series only. (default: autodetected if LCD is in LCD-via-CRT1 mode during\n"
- "startup) - Note: currently, this has no effect because LCD-via-CRT1 is not\n"
- "implemented yet.\n");
-
-MODULE_PARM_DESC(specialtiming,
- "\nPlease refer to documentation for more information on this option.\n");
-
-MODULE_PARM_DESC(lvdshl,
- "\nPlease refer to documentation for more information on this option.\n");
-
-MODULE_PARM_DESC(tvstandard,
- "\nThis allows overriding the BIOS default for the TV standard. Valid choices are\n"
- "pal, ntsc, palm and paln. (default: [auto; pal or ntsc only])\n");
-
-MODULE_PARM_DESC(tvxposoffset,
- "\nRelocate TV output horizontally. Possible parameters: -32 through 32.\n"
- "Default: 0\n");
-
-MODULE_PARM_DESC(tvyposoffset,
- "\nRelocate TV output vertically. Possible parameters: -32 through 32.\n"
- "Default: 0\n");
+ "\nSelects the desired default display mode by VESA mode number, eg.\n"
+ "0x117.\n");
MODULE_PARM_DESC(filter,
"\nSelects TV flicker filter type (only for systems with a SiS301 video bridge).\n"
"(Possible values 0-7, default: [no filter])\n");
-MODULE_PARM_DESC(nocrt2rate,
- "\nSetting this to 1 will force the driver to use the default refresh rate for\n"
- "CRT2 if CRT2 type is VGA. (default: 0, use same rate as CRT1)\n");
-
-static int __init xgifb_init_module(void)
-{
- printk("\nXGIfb_init_module");
- if (mode)
- XGIfb_search_mode(mode);
- else if (vesa != -1)
- XGIfb_search_vesamode(vesa);
-
- return xgifb_init();
-}
-
static void __exit xgifb_remove_module(void)
{
pci_unregister_driver(&xgifb_driver);
printk(KERN_DEBUG "xgifb: Module unloaded\n");
}
-module_init(xgifb_init_module);
module_exit(xgifb_remove_module);
#endif /* /MODULE */
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 45b6015d160..7611846a703 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -3,15 +3,15 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-#define DISPTYPE_CRT1 0x00000008L
-#define DISPTYPE_CRT2 0x00000004L
-#define DISPTYPE_LCD 0x00000002L
-#define DISPTYPE_TV 0x00000001L
-#define DISPTYPE_DISP1 DISPTYPE_CRT1
-#define DISPTYPE_DISP2 (DISPTYPE_CRT2 | DISPTYPE_LCD | DISPTYPE_TV)
-#define DISPMODE_SINGLE 0x00000020L
-#define DISPMODE_MIRROR 0x00000010L
-#define DISPMODE_DUALVIEW 0x00000040L
+#include "vb_struct.h"
+#include "vgatypes.h"
+
+enum xgifb_display_type {
+ XGIFB_DISP_NONE = 0,
+ XGIFB_DISP_CRT,
+ XGIFB_DISP_LCD,
+ XGIFB_DISP_TV,
+};
#define HASVB_NONE 0x00
#define HASVB_301 0x01
@@ -19,13 +19,8 @@
#define HASVB_TRUMPION 0x04
#define HASVB_LVDS_CHRONTEL 0x10
#define HASVB_302 0x20
-#define HASVB_303 0x40
#define HASVB_CHRONTEL 0x80
-#ifndef XGIFB_ID
-#define XGIFB_ID 0x53495346 /* Identify myself with 'XGIF' */
-#endif
-
enum XGI_CHIP_TYPE {
XG40 = 32,
XG41,
@@ -47,11 +42,6 @@ enum xgi_tvtype {
};
enum xgi_tv_plug { /* vicki@030226 */
-/* TVPLUG_Legacy = 0, */
-/* TVPLUG_COMPOSITE, */
-/* TVPLUG_SVIDEO, */
-/* TVPLUG_SCART, */
-/* TVPLUG_TOTAL */
TVPLUG_UNKNOWN = 0,
TVPLUG_COMPOSITE = 1,
TVPLUG_SVIDEO = 2,
@@ -64,14 +54,23 @@ enum xgi_tv_plug { /* vicki@030226 */
TVPLUG_TOTAL
};
-struct video_info {
+struct xgifb_video_info {
+ struct fb_info *fb_info;
+ struct xgi_hw_device_info hw_info;
+ struct vb_device_info dev_info;
+
+ int mode_idx;
+ int rate_idx;
+
+ u32 pseudo_palette[17];
+
int chip_id;
unsigned int video_size;
unsigned long video_base;
- char *video_vbase;
+ void __iomem *video_vbase;
unsigned long mmio_base;
unsigned long mmio_size;
- char *mmio_vbase;
+ void __iomem *mmio_vbase;
unsigned long vga_base;
unsigned long mtrr;
@@ -86,7 +85,7 @@ struct video_info {
int video_linelength;
unsigned int refresh_rate;
- unsigned long disp_state;
+ enum xgifb_display_type display2; /* the second display output type */
unsigned char hasVB;
unsigned char TV_type;
unsigned char TV_plug;
@@ -108,7 +107,4 @@ struct video_info {
char reserved[236];
};
-
-extern struct video_info xgi_video_info;
-
#endif
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 339c071854f..5beeef99bb1 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -3,261 +3,48 @@
#ifndef _INITDEF_
#define _INITDEF_
-#ifndef NewScratch
-#define NewScratch
-#endif
-/* shampoo */
-
-#define SEQ_ADDRESS_PORT 0x0014
-#define SEQ_DATA_PORT 0x0015
-#define MISC_OUTPUT_REG_READ_PORT 0x001C
-#define MISC_OUTPUT_REG_WRITE_PORT 0x0012
-#define GRAPH_DATA_PORT 0x1F
-#define GRAPH_ADDRESS_PORT 0x1E
-#define XGI_MASK_DUAL_CHIP 0x04 /* SR3A */
-#define CRTC_ADDRESS_PORT_COLOR 0x0024
-#define VIDEO_SUBSYSTEM_ENABLE_PORT 0x0013
-#define PCI_COMMAND 0x04
-
-/* ~shampoo */
-
-
-#define VB_XGI301 0x0001 /*301b*/
-#define VB_XGI301B 0x0002
-#define VB_XGI302B 0x0004
-#define VB_XGI301LV 0x0008 /*301lv*/
-#define VB_XGI302LV 0x0010
#define VB_XGI301C 0x0020 /* for 301C */
-#define VB_NoLCD 0x8000
/*end 301b*/
-#define VB_YPbPrInfo 0x07 /*301lv*/
-#define VB_YPbPr525i 0x00
#define VB_YPbPr525p 0x01
#define VB_YPbPr750p 0x02
#define VB_YPbPr1080i 0x03
-/* #define CRT1Len 17 */
#define LVDSCRT1Len 15
-#define CHTVRegDataLen 5
-
-/* #define ModeInfoFlag 0x07 */
-/* #define IsTextMode 0x07 */
-/* #define ModeText 0x00 */
-/* #define ModeCGA 0x01 */
-/* #define ModeEGA 0x02 */
-/* #define ModeVGA 0x03 */
-/* #define Mode15Bpp 0x04 */
-/* #define Mode16Bpp 0x05 */
-/* #define Mode24Bpp 0x06 */
-/* #define Mode32Bpp 0x07 */
-
-/* #define DACInfoFlag 0x18 */
-/* #define MemoryInfoFlag 0x1E0 */
-/* #define MemorySizeShift 0x05 */
-
-#define Charx8Dot 0x0200
-#define LineCompareOff 0x0400
-#define CRT2Mode 0x0800
-#define HalfDCLK 0x1000
-#define NoSupportSimuTV 0x2000
-#define DoubleScanMode 0x8000
-
-#define SupportAllCRT2 0x0078
-#define SupportTV 0x0008
-#define SupportHiVisionTV 0x0010
-#define SupportLCD 0x0020
-#define SupportRAMDAC2 0x0040
-#define NoSupportTV 0x0070
-#define NoSupportHiVisionTV 0x0060
-#define NoSupportLCD 0x0058
+
#define SupportCHTV 0x0800
#define SupportCRT2in301C 0x0100 /* for 301C */
-#define SupportTV1024 0x0800 /*301b*/
-#define SupportYPbPr 0x1000 /*301lv*/
-#define InterlaceMode 0x0080
-#define SyncPP 0x0000
-#define SyncPN 0x4000
-#define SyncNP 0x8000
-/* #define SyncNN 0xc000 */
-#define ECLKindex0 0x0000
-#define ECLKindex1 0x0100
-#define ECLKindex2 0x0200
-#define ECLKindex3 0x0300
-#define ECLKindex4 0x0400
-
-#define SetSimuScanMode 0x0001
-#define SwitchToCRT2 0x0002
-/* #define SetCRT2ToTV 0x009C */
-#define SetCRT2ToAVIDEO 0x0004
-#define SetCRT2ToSVIDEO 0x0008
-#define SetCRT2ToSCART 0x0010
-#define SetCRT2ToLCD 0x0020
-#define SetCRT2ToRAMDAC 0x0040
-#define SetCRT2ToHiVisionTV 0x0080
-#define SetNTSCTV 0x0000
-/* #define SetPALTV 0x0100 */
-#define SetInSlaveMode 0x0200
-#define SetNotSimuMode 0x0400
-#define SetNotSimuTVMode 0x0400
-#define SetDispDevSwitch 0x0800
-#define LoadDACFlag 0x1000
-#define DisableCRT2Display 0x2000
-#define DriverMode 0x4000
-#define HotKeySwitch 0x8000
#define SetCHTVOverScan 0x8000
-/* #define SetCRT2ToLCDA 0x8000 301b */
#define PanelRGB18Bit 0x0100
#define PanelRGB24Bit 0x0000
-#define TVOverScan 0x10
-#define TVOverScanShift 4
-#define ClearBufferFlag 0x20
-#define EnableDualEdge 0x01 /*301b*/
-#define SetToLCDA 0x02
-
-#define YPbPrModeInfo 0x38
-/* #define YPbPrMode525i 0x00 */
-/* #define YPbPrMode525p 0x08 */
-/* #define YPbPrMode750p 0x10 */
-/* #define YPbPrMode1080i 0x18 */
-
-#define SetSCARTOutput 0x01
-#define BoardTVType 0x02
-#define EnablePALMN 0x40
-/* #define ProgrammingCRT2 0x01 */
-/* #define TVSimuMode 0x02 */
-/* #define RPLLDIV2XO 0x04 */
-/* #define LCDVESATiming 0x08 */
-/* #define EnableLVDSDDA 0x10 */
-#define SetDispDevSwitchFlag 0x20
-#define CheckWinDos 0x40
-#define SetJDOSMode 0x80
-
#define Panel320x480 0x07 /*fstn*/
/* [ycchen] 02/12/03 Modify for Multi-Sync. LCD Support */
#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
-#define PanelRefInfo 0x60
#define Panel800x600 0x01
#define Panel1024x768 0x02
#define Panel1024x768x75 0x22
#define Panel1280x1024 0x03
#define Panel1280x1024x75 0x23
#define Panel640x480 0x04
-#define Panel1024x600 0x05
-#define Panel1152x864 0x06
#define Panel1280x960 0x07
-#define Panel1152x768 0x08
#define Panel1400x1050 0x09
-#define Panel1280x768 0x0A
#define Panel1600x1200 0x0B
#define PanelRef60Hz 0x00
#define PanelRef75Hz 0x20
-#define LCDRGB18Bit 0x01
-
-#define ExtChipTrumpion 0x06
-#define ExtChipCH7005 0x08
-#define ExtChipMitacTV 0x0a
-#define LCDNonExpanding 0x10
-#define LCDNonExpandingShift 4
-#define LCDSync 0x20
-#define LCDSyncBit 0xe0
-#define LCDSyncShift 6
-
-/* #define DDC2DelayTime 300 */
#define CRT2DisplayFlag 0x2000
-/* #define LCDDataLen 8 */
-/* #define HiTVDataLen 12 */
-/* #define TVDataLen 16 */
-/* #define SetPALTV 0x0100 */
-#define HalfDCLK 0x1000
-#define NTSCHT 1716
-#define NTSCVT 525
-#define PALHT 1728
-#define PALVT 625
-#define StHiTVHT 892
-#define StHiTVVT 1126
-#define StHiTextTVHT 1000
-#define StHiTextTVVT 1126
-#define ExtHiTVHT 2100
-#define ExtHiTVVT 1125
-
-#define St750pTVHT 1716
-#define St750pTVVT 525
-#define Ext750pTVHT 1716
-#define Ext750pTVVT 525
-#define St525pTVHT 1716
-#define St525pTVVT 525
-#define Ext525pTVHT 1716
-#define Ext525pTVVT 525
-#define St525iTVHT 1716
-#define St525iTVVT 525
-#define Ext525iTVHT 1716
-#define Ext525iTVVT 525
-
-#define VCLKStartFreq 25
-#define SoftDramType 0x80
-#define VCLK40 0x04
-
-#define VCLK162 0x21
-
-#define LCDRGB18Bit 0x01
-#define LoadDACFlag 0x1000
-#define AfterLockCRT2 0x4000
-#define SetCRT2ToAVIDEO 0x0004
-#define SetCRT2ToSCART 0x0010
-#define Ext2StructSize 5
-
#define YPbPr525iVCLK 0x03B
#define YPbPr525iVCLK_2 0x03A
-#define SwitchToCRT2 0x0002
-/* #define LCDVESATiming 0x08 */
-#define SetSCARTOutput 0x01
-#define AVIDEOSense 0x01
-#define SVIDEOSense 0x02
-#define SCARTSense 0x04
-#define LCDSense 0x08
-#define Monitor1Sense 0x20
-#define Monitor2Sense 0x10
-#define HiTVSense 0x40
-#define BoardTVType 0x02
-#define HotPlugFunction 0x08
-#define StStructSize 0x06
-
-
#define XGI_CRT2_PORT_00 (0x00 - 0x030)
#define XGI_CRT2_PORT_04 (0x04 - 0x030)
#define XGI_CRT2_PORT_10 (0x10 - 0x30)
#define XGI_CRT2_PORT_12 (0x12 - 0x30)
#define XGI_CRT2_PORT_14 (0x14 - 0x30)
-
-#define LCDNonExpanding 0x10
-#define ADR_CRT2PtrData 0x20E
-#define offset_Zurac 0x210
-#define ADR_LVDSDesPtrData 0x212
-#define ADR_LVDSCRT1DataPtr 0x214
-#define ADR_CHTVVCLKPtr 0x216
-#define ADR_CHTVRegDataPtr 0x218
-
-#define LVDSDataLen 6
-/* #define EnableLVDSDDA 0x10 */
-/* #define LVDSDesDataLen 3 */
-#define ActiveNonExpanding 0x40
-#define ActiveNonExpandingShift 6
-/* #define ActivePAL 0x20 */
-#define ActivePALShift 5
-/* #define ModeSwitchStatus 0x0F */
-#define SoftTVType 0x40
-#define SoftSettingAddr 0x52
-#define ModeSettingAddr 0x53
-
-/* #define SelectCRT1Rate 0x4 */
-
#define _PanelType00 0x00
#define _PanelType01 0x08
#define _PanelType02 0x10
@@ -275,173 +62,26 @@
#define _PanelType0E 0x70
#define _PanelType0F 0x78
-/* 1: XGI is primary vga 0:XGI is secondary vga */
-#define PRIMARY_VGA 0
-#define BIOSIDCodeAddr 0x235
-#define OEMUtilIDCodeAddr 0x237
-#define VBModeIDTableAddr 0x239
-#define OEMTVPtrAddr 0x241
-#define PhaseTableAddr 0x243
-#define NTSCFilterTableAddr 0x245
-#define PALFilterTableAddr 0x247
-#define OEMLCDPtr_1Addr 0x249
-#define OEMLCDPtr_2Addr 0x24B
-#define LCDHPosTable_1Addr 0x24D
-#define LCDHPosTable_2Addr 0x24F
-#define LCDVPosTable_1Addr 0x251
-#define LCDVPosTable_2Addr 0x253
-#define OEMLCDPIDTableAddr 0x255
-
-#define VBModeStructSize 5
-#define PhaseTableSize 4
-#define FilterTableSize 4
-#define LCDHPosTableSize 7
-#define LCDVPosTableSize 5
-#define OEMLVDSPIDTableSize 4
-#define LVDSHPosTableSize 4
-#define LVDSVPosTableSize 6
-
-#define VB_ModeID 0
-#define VB_TVTableIndex 1
-#define VB_LCDTableIndex 2
-#define VB_LCDHIndex 3
-#define VB_LCDVIndex 4
-
-#define OEMLCDEnable 0x0001
-#define OEMLCDDelayEnable 0x0002
-#define OEMLCDPOSEnable 0x0004
-#define OEMTVEnable 0x0100
-#define OEMTVDelayEnable 0x0200
-#define OEMTVFlickerEnable 0x0400
-#define OEMTVPhaseEnable 0x0800
-#define OEMTVFilterEnable 0x1000
-
-#define OEMLCDPanelIDSupport 0x0080
-
-/* #define LCDVESATiming 0x0001 //LCD Info CR37 */
-/* #define EnableLVDSDDA 0x0002 */
-#define EnableScalingLCD 0x0008
-#define SetPWDEnable 0x0004
-#define SetLCDtoNonExpanding 0x0010
-/* #define SetLCDPolarity 0x00E0 */
-#define SetLCDDualLink 0x0100
-#define SetLCDLowResolution 0x0200
-#define SetLCDStdMode 0x0400
-#define SetTVStdMode 0x0200
-#define SetTVLowResolution 0x0400
/* =============================================================
for 310
============================================================== */
-#define SoftDRAMType 0x80
-#define SoftSetting_OFFSET 0x52
-#define SR07_OFFSET 0x7C
-#define SR15_OFFSET 0x7D
-#define SR16_OFFSET 0x81
-#define SR17_OFFSET 0x85
-#define SR19_OFFSET 0x8D
-#define SR1F_OFFSET 0x99
-#define SR21_OFFSET 0x9A
-#define SR22_OFFSET 0x9B
-#define SR23_OFFSET 0x9C
-#define SR24_OFFSET 0x9D
-#define SR25_OFFSET 0x9E
-#define SR31_OFFSET 0x9F
-#define SR32_OFFSET 0xA0
-#define SR33_OFFSET 0xA1
-
-#define CR40_OFFSET 0xA2
-#define SR25_1_OFFSET 0xF6
-#define CR49_OFFSET 0xF7
-
-#define VB310Data_1_2_Offset 0xB6
-#define VB310Data_4_D_Offset 0xB7
-#define VB310Data_4_E_Offset 0xB8
-#define VB310Data_4_10_Offset 0xBB
-
-#define RGBSenseDataOffset 0xBD
-#define YCSenseDataOffset 0xBF
-#define VideoSenseDataOffset 0xC1
-#define OutputSelectOffset 0xF3
-
-#define ECLK_MCLK_DISTANCE 0x14
-#define VBIOSTablePointerStart 0x200
-#define StandTablePtrOffset (VBIOSTablePointerStart+0x02)
-#define EModeIDTablePtrOffset (VBIOSTablePointerStart+0x04)
-#define CRT1TablePtrOffset (VBIOSTablePointerStart+0x06)
-#define ScreenOffsetPtrOffset (VBIOSTablePointerStart+0x08)
-#define VCLKDataPtrOffset (VBIOSTablePointerStart+0x0A)
-#define MCLKDataPtrOffset (VBIOSTablePointerStart+0x0E)
-#define CRT2PtrDataPtrOffset (VBIOSTablePointerStart+0x10)
-#define TVAntiFlickPtrOffset (VBIOSTablePointerStart+0x12)
-#define TVDelayPtr1Offset (VBIOSTablePointerStart+0x14)
-#define TVPhaseIncrPtr1Offset (VBIOSTablePointerStart+0x16)
-#define TVYFilterPtr1Offset (VBIOSTablePointerStart+0x18)
-#define LCDDelayPtr1Offset (VBIOSTablePointerStart+0x20)
-#define TVEdgePtr1Offset (VBIOSTablePointerStart+0x24)
-#define CRT2Delay1Offset (VBIOSTablePointerStart+0x28)
-#define LCDDataDesOffset (VBIOSTablePointerStart-0x02)
-#define LCDDataPtrOffset (VBIOSTablePointerStart+0x2A)
-#define LCDDesDataPtrOffset (VBIOSTablePointerStart+0x2C)
/* add LCDDataList for GetLCDPtr */
#define LCDDataList (VBIOSTablePointerStart+0x22)
-/* add TVDataList for GetTVPtr */
-#define TVDataList (VBIOSTablePointerStart+0x36)
/* */
/* Modify from 310.inc */
/* */
/* */
-
-#define ShowMsgFlag 0x20 /* SoftSetting */
-#define ShowVESAFlag 0x10
-#define HotPlugFunction 0x08
#define ModeSoftSetting 0x04
-#define TVSoftSetting 0x02
-#define LCDSoftSetting 0x01
-#define GatingCRTinLCDA 0x10
-#define SetHiTVOutput 0x08
-#define SetYPbPrOutput 0x04
#define BoardTVType 0x02
-#define SetSCARTOutput 0x01
-
-/* TVModeSetting, Others as same as CR30 */
-#define ModeSettingYPbPr 0x02
-
-/* TVModeSetting same as CR35 */
-
-/* LCDModeSetting same as CR37 */
-
-#define EnableNewTVFont 0x10 /* MiscCapability */
-
-#define EnableLCDOutput 0x80 /* LCDCfgSetting */
#define SoftDRAMType 0x80 /* DRAMSetting */
-#define SoftDRAMConfig 0x40
-#define MosSelDRAMType 0x20
-#define SDRAM 000h
-#define SGRAM 0x01
-#define ESDRAM 0x02
-
-#define EnableAGPCfgSetting 0x01 /* AGPCfgSetting */
/* ---------------- SetMode Stack */
#define CRT1Len 15
#define VCLKLen 4
-#define DefThreshold 0x0100
-#define ExtRegsSize ((57+8+37+70+63+28+768+1)/64+1)
-
-#define VGA_XGI315 0x0001 /* VGA Type Info */
-#define VGA_SNewis315e 0x0002 /* 315 series */
-#define VGA_XGI550 0x0004
-#define VGA_XGI640 0x0008
-#define VGA_XGI740 0x0010
-#define VGA_XGI650 0x0020
-#define VGA_XGI650M 0x0040
-#define VGA_XGI651 0x0080
#define VGA_XGI340 0x0001 /* 340 series */
-#define VGA_XGI330 0x0001 /* 330 series */
-#define VGA_XGI660 0x0001 /* 660 series */
#define VB_XGI301 0x0001 /* VB Type Info */
#define VB_XGI301B 0x0002 /* 301 series */
@@ -450,34 +90,16 @@
#define VB_XGI301LV 0x0008
#define VB_XGI302LV 0x0010
#define VB_LVDS_NS 0x0001 /* 3rd party chip */
-#define VB_CH7017 0x0002
-#define VB_CH7007 0x0080 /* [Billy] 07/05/03 */
-/* #define VB_LVDS_SI 0x0004 */
#define ModeInfoFlag 0x0007
-#define IsTextMode 0x0007
#define ModeText 0x0000
-#define ModeCGA 0x0001
#define ModeEGA 0x0002 /* 16 colors mode */
#define ModeVGA 0x0003 /* 256 colors mode */
-#define Mode15Bpp 0x0004 /* 15 Bpp Color Mode */
-#define Mode16Bpp 0x0005 /* 16 Bpp Color Mode */
-#define Mode24Bpp 0x0006 /* 24 Bpp Color Mode */
-#define Mode32Bpp 0x0007 /* 32 Bpp Color Mode */
#define DACInfoFlag 0x0018
-#define MONODAC 0x0000
-#define CGADAC 0x0008
-#define EGADAC 0x0010
-#define VGADAC 0x0018
#define MemoryInfoFlag 0x01e0
#define MemorySizeShift 5
-#define Need1MSize 0x0000
-#define Need2MSize 0x0020
-#define Need4MSize 0x0060
-#define Need8MSize 0x00e0
-#define Need16MSize 0x01e0
#define Charx8Dot 0x0200
#define LineCompareOff 0x0400
@@ -487,11 +109,7 @@
#define DoubleScanMode 0x8000
/* -------------- Ext_InfoFlag */
-#define SupportModeInfo 0x0007
-#define Support256 0x0003
-#define Support15Bpp 0x0004
#define Support16Bpp 0x0005
-#define Support24Bpp 0x0006
#define Support32Bpp 0x0007
#define SupportAllCRT2 0x0078
@@ -513,7 +131,6 @@
/* -------------- SetMode Stack/Scratch */
#define SetSimuScanMode 0x0001 /* VBInfo/CR30 & CR31 */
#define SwitchToCRT2 0x0002
-#define SetCRT2ToTV1 0x009C
#define SetCRT2ToTV 0x089C
#define SetCRT2ToAVIDEO 0x0004
#define SetCRT2ToSVIDEO 0x0008
@@ -524,23 +141,14 @@
#define SetCRT2ToLCDA 0x0100
#define SetInSlaveMode 0x0200
#define SetNotSimuMode 0x0400
-#define HKEventMode 0x0800
#define SetCRT2ToYPbPr 0x0800
#define LoadDACFlag 0x1000
#define DisableCRT2Display 0x2000
#define DriverMode 0x4000
#define SetCRT2ToDualEdge 0x8000
-#define HotKeySwitch 0x8000
#define ProgrammingCRT2 0x0001 /* Set Flag */
-#define EnableVCMode 0x0002
-#define SetHKEventMode 0x0004
#define ReserveTVOption 0x0008
-#define DisableRelocateIO 0x0010
-#define Win9xDOSMode 0x0020
-#define JDOSMode 0x0040
-/* #define SetWin9xforJap 0x0080 // not used now */
-/* #define SetWin9xforKorea 0x0100 // not used now */
#define GatingCRT 0x0800
#define DisableChB 0x1000
#define EnableChB 0x2000
@@ -552,15 +160,11 @@
#define SetNTSCJ 0x0002
#define SetPALMTV 0x0004
#define SetPALNTV 0x0008
-#define SetCHTVUnderScan 0x0000
-/* #define SetCHTVOverScan 0x0010 */
#define SetYPbPrMode525i 0x0020
#define SetYPbPrMode525p 0x0040
#define SetYPbPrMode750p 0x0080
#define SetYPbPrMode1080i 0x0100
-#define SetTVStdMode 0x0200
#define SetTVLowResolution 0x0400
-#define SetTVSimuMode 0x0800
#define TVSimuMode 0x0800
#define RPLLDIV2XO 0x1000
#define NTSC1024x768 0x2000
@@ -571,38 +175,20 @@
#define EnableScalingLCD 0x0008
#define SetPWDEnable 0x0004
#define SetLCDtoNonExpanding 0x0010
-#define SetLCDPolarity 0x00e0
#define SetLCDDualLink 0x0100
#define SetLCDLowResolution 0x0200
#define SetLCDStdMode 0x0400
/* LCD Capability shampoo */
#define DefaultLCDCap 0x80ea
-#define RLVDSDHL00 0x0000
-#define RLVDSDHL01 0x0001
-#define RLVDSDHL10 0x0002 /* default */
-#define RLVDSDHL11 0x0003
#define EnableLCD24bpp 0x0004 /* default */
#define DisableLCD24bpp 0x0000
-#define RLVDSClkSFT0 0x0000
-#define RLVDSClkSFT1 0x0008 /* default */
-#define EnableLVDSDCBal 0x0010
-#define DisableLVDSDCBal 0x0000 /* default */
-#define SinglePolarity 0x0020 /* default */
-#define MultiPolarity 0x0000
#define LCDPolarity 0x00c0 /* default: SyncNN */
-#define LCDSingleLink 0x0000 /* default */
#define LCDDualLink 0x0100
#define EnableSpectrum 0x0200
-#define DisableSpectrum 0x0000 /* default */
#define PWDEnable 0x0400
-#define PWDDisable 0x0000 /* default */
-#define PWMEnable 0x0800
-#define PWMDisable 0x0000 /* default */
#define EnableVBCLKDRVLOW 0x4000
-#define EnableVBCLKDRVHigh 0x0000 /* default */
#define EnablePLLSPLOW 0x8000
-#define EnablePLLSPHigh 0x0000 /* default */
#define LCDBToA 0x20 /* LCD SetFlag */
#define StLCDBToA 0x40
@@ -616,187 +202,51 @@
#define Monitor1Sense 0x20
#define HiTVSense 0x40
-#ifdef NewScratch
#define YPbPrSense 0x80 /* NEW SCRATCH */
-#endif
#define TVSense 0xc7
#define TVOverScan 0x10 /* CR35 */
-#define TVOverScanShift 4
-#ifdef NewScratch
-#define NTSCMode 0x00
-#define PALMode 0x00
-#define NTSCJMode 0x02
-#define PALMNMode 0x0c
#define YPbPrMode 0xe0
#define YPbPrMode525i 0x00
#define YPbPrMode525p 0x20
#define YPbPrMode750p 0x40
#define YPbPrMode1080i 0x60
-#else /* Old Scratch */
-#define ClearBufferFlag 0x20
-#endif
#define LCDRGB18Bit 0x01 /* CR37 */
#define LCDNonExpanding 0x10
-#define LCDNonExpandingShift 4
#define LCDSync 0x20
#define LCDSyncBit 0xe0 /* H/V polarity & sync ID */
-#define LCDSyncShift 6
-#ifdef NewScratch
#define ScalingLCD 0x08
-#else /* Old Scratch */
-#define ExtChipType 0x0e
-#define ExtChip301 0x02
-#define ExtChipLVDS 0x04
-#define ExtChipCH7019 0x06
-#define ScalingLCD 0x10
-#endif
#define EnableDualEdge 0x01 /* CR38 */
#define SetToLCDA 0x02
-#ifdef NewScratch
#define SetYPbPr 0x04
-#define DisableChannelA 0x08
-#define DisableChannelB 0x10
-#define ExtChipType 0xe0
-#define ExtChip301 0x20
-#define ExtChipLVDS 0x40
-#define ExtChipCH7019 0x60
-#else /* Old Scratch */
-#define YPbPrSense 0x04
-#define SetYPbPr 0x08
-#define YPbPrMode 0x30
-#define YPbPrMode525i 0x00
-#define YPbPrMode525p 0x10
-#define YPbPrMode750p 0x20
-#define YPbPrMode1080i 0x30
-#define PALMNMode 0xc0
-#endif
-#define BacklightControlBit 0x01 /* CR3A */
-#define Win9xforJap 0x40
-#define Win9xforKorea 0x80
-
-#define ForceMDBits 0x07 /* CR3B */
-#define ForceMD_JDOS 0x00
-#define ForceMD_640x400T 0x01
-#define ForceMD_640x350T 0x02
-#define ForceMD_720x400T 0x03
-#define ForceMD_640x480E 0x04
-#define ForceMD_640x400E 0x05
-#define ForceP1Bit 0x10
-#define ForceP2Bit 0x20
-#define EnableForceMDinBIOS 0x40
-#define EnableForceMDinDrv 0x80
-
-#ifdef NewScratch /* New Scratch */
/* ---------------------- VUMA Information */
-#define LCDSettingFromCMOS 0x04 /* CR3C */
-#define TVSettingFromCMOS 0x08
#define DisplayDeviceFromCMOS 0x10
-#define HKSupportInSBIOS 0x20
-#define OSDSupportInSBIOS 0x40
-#define DisableLogo 0x80
/* ---------------------- HK Evnet Definition */
-#define HKEvent 0x0f /* CR3D */
-#define HK_ModeSwitch 0x01
-#define HK_Expanding 0x02
-#define HK_OverScan 0x03
-#define HK_Brightness 0x04
-#define HK_Contrast 0x05
-#define HK_Mute 0x06
-#define HK_Volume 0x07
#define ModeSwitchStatus 0xf0
#define ActiveCRT1 0x10
#define ActiveLCD 0x0020
#define ActiveTV 0x40
#define ActiveCRT2 0x80
-#define TVSwitchStatus 0x1f /* CR3E */
#define ActiveAVideo 0x01
#define ActiveSVideo 0x02
#define ActiveSCART 0x04
#define ActiveHiTV 0x08
#define ActiveYPbPr 0x10
-#define EnableHKEvent 0x01 /* CR3F */
-#define EnableOSDEvent 0x02
-#define StartOSDEvent 0x04
-#define IgnoreHKEvent 0x08
-#define IgnoreOSDEvent 0x10
-#else /* Old Scratch */
-#define OSD_SBIOS 0x02 /* SR17 */
-#define DisableLogo 0x04
-#define SelectKDOS 0x08
-#define KorWinMode 0x10
-#define KorMode3Bit 0x0020
-#define PSCCtrlBit 0x40
-#define NPSCCtrlBitShift 6
-#define BlueScreenBit 0x80
-
-#define HKEvent 0x0f /* CR79 */
-#define HK_ModeSwitch 0x01
-#define HK_Expanding 0x02
-#define HK_OverScan 0x03
-#define HK_Brightness 0x04
-#define HK_Contrast 0x05
-#define HK_Mute 0x06
-#define HK_Volume 0x07
-#define ActivePAL 0x0020
-#define ActivePALShift 5
-#define ActiveNonExpanding 0x40
-#define ActiveNonExpandingShift 6
-#define ActiveOverScan 0x80
-#define ActiveOverScanShift 7
-
-#define ModeSwitchStatus 0x0b /* SR15 */
-#define ActiveCRT1 0x01
-#define ActiveLCD 0x02
-#define ActiveCRT2 0x08
-
-#define TVSwitchStatus 0xf0 /* SR16 */
-#define TVConfigShift 3
-#define ActiveTV 0x01
-#define ActiveYPbPr 0x04
-#define ActiveAVideo 0x10
-#define ActiveSVideo 0x0020
-#define ActiveSCART 0x40
-#define ActiveHiTV 0x80
-
-#define EnableHKEvent 0x01 /* CR7A */
-#define EnableOSDEvent 0x02
-#define StartOSDEvent 0x04
-#define CMOSSupport 0x08
-#define HotKeySupport 0x10
-#define IngoreHKOSDEvent 0x20
-#endif
-
-/* //------------- Misc. Definition */
-#define SelectCRT1Rate 00h
-/* #define SelectCRT2Rate 04h */
-
-#define DDC1DelayTime 1000
-#ifdef TRUMPION
-#define DDC2DelayTime 15
-#else
-#define DDC2DelayTime 150
-#endif
-
-#define R_FACTOR 04Dh
-#define G_FACTOR 097h
-#define B_FACTOR 01Ch
/* --------------------------------------------------------- */
/* translated from asm code 301def.h */
/* */
/* --------------------------------------------------------- */
#define LCDDataLen 8
-#define HiTVDataLen 12
#define TVDataLen 12
#define LVDSCRT1Len_H 8
#define LVDSCRT1Len_V 7
@@ -806,7 +256,6 @@
#define LVDSDesDataLen2 8
#define LCDDesDataLen2 8
#define CHTVRegLen 16
-#define CHLVRegLen 12
#define StHiTVHT 892
#define StHiTVVT 1126
@@ -817,7 +266,6 @@
#define NTSCHT 1716
#define NTSCVT 525
#define NTSC1024x768HT 1908
-#define NTSC1024x768VT 525
#define PALHT 1728
#define PALVT 625
@@ -828,8 +276,6 @@
#define YPbPrTV750pHT 1650
#define YPbPrTV750pVT 750
-#define CRT2VCLKSel 0xc0
-
#define CRT2Delay1 0x04 /* XGI301 */
#define CRT2Delay2 0x0A /* 301B,302 */
@@ -846,57 +292,41 @@
#define VCLK52_406 0x09
#define VCLK56_25 0x0A
#define VCLK65 0x0B
-#define VCLK67_765 0x0C
#define VCLK68_179 0x0D
#define VCLK72_852 0x0E
#define VCLK75 0x0F
-#define VCLK75_8 0x10
#define VCLK78_75 0x11
#define VCLK79_411 0x12
#define VCLK83_95 0x13
-#define VCLK84_8 0x14
#define VCLK86_6 0x15
#define VCLK94_5 0x16
-#define VCLK104_998 0x17
-#define VCLK105_882 0x18
#define VCLK108_2 0x19
-#define VCLK109_175 0x1A
#define VCLK113_309 0x1B
#define VCLK116_406 0x1C
-#define VCLK132_258 0x1D
#define VCLK135_5 0x1E
#define VCLK139_054 0x1F
#define VCLK157_5 0x20
#define VCLK162 0x21
#define VCLK175 0x22
#define VCLK189 0x23
-#define VCLK194_4 0x24
#define VCLK202_5 0x25
#define VCLK229_5 0x26
#define VCLK234 0x27
-#define VCLK252_699 0x28
#define VCLK254_817 0x29
-#define VCLK265_728 0x2A
#define VCLK266_952 0x2B
#define VCLK269_655 0x2C
-#define VCLK272_042 0x2D
#define VCLK277_015 0x2E
-#define VCLK286_359 0x2F
#define VCLK291_132 0x30
#define VCLK291_766 0x31
-#define VCLK309_789 0x32
#define VCLK315_195 0x33
#define VCLK323_586 0x34
#define VCLK330_615 0x35
-#define VCLK332_177 0x36
#define VCLK340_477 0x37
#define VCLK375_847 0x38
#define VCLK388_631 0x39
#define VCLK125_999 0x51
#define VCLK148_5 0x52
-#define VCLK178_992 0x54
#define VCLK217_325 0x55
-#define VCLK299_505 0x56
#define YPbPr750pVCLK 0x57
#define TVVCLKDIV2 0x3A
@@ -906,45 +336,13 @@
#define HiTVSimuVCLK 0x3E
#define HiTVTextVCLK 0x3F
#define VCLK39_77 0x40
-/* #define YPbPr750pVCLK 0x0F */
#define YPbPr525pVCLK 0x3A
-/* #define ;;YPbPr525iVCLK 0x3B */
-/* #define ;;YPbPr525iVCLK_2 0x3A */
#define NTSC1024VCLK 0x41
-#define VCLK25_175_41 0x42 /* ; ScaleLCD */
-#define VCLK25_175_42 0x43
-#define VCLK28_322_43 0x44
-#define VCLK40_44 0x45
-#define VCLKQVGA_1 0x46 /* ; QVGA */
-#define VCLKQVGA_2 0x47
-#define VCLKQVGA_3 0x48
#define VCLK35_2 0x49 /* ; 800x480 */
#define VCLK122_61 0x4A
#define VCLK80_350 0x4B
#define VCLK107_385 0x4C
-#define CHTVVCLK30_2 0x50 /* ;;CHTV */
-#define CHTVVCLK28_1 0x51
-#define CHTVVCLK43_6 0x52
-#define CHTVVCLK26_4 0x53
-#define CHTVVCLK24_6 0x54
-#define CHTVVCLK47_8 0x55
-#define CHTVVCLK31_5 0x56
-#define CHTVVCLK26_2 0x57
-#define CHTVVCLK39 0x58
-#define CHTVVCLK36 0x59
-
-#define CH7007TVVCLK30_2 0x00 /* [Billy] 2007/05/18 For CH7007 */
-#define CH7007TVVCLK28_1 0x01
-#define CH7007TVVCLK43_6 0x02
-#define CH7007TVVCLK26_4 0x03
-#define CH7007TVVCLK24_6 0x04
-#define CH7007TVVCLK47_8 0x05
-#define CH7007TVVCLK31_5 0x06
-#define CH7007TVVCLK26_2 0x07
-#define CH7007TVVCLK39 0x08
-#define CH7007TVVCLK36 0x09
-
#define RES320x200 0x00
#define RES320x240 0x01
#define RES400x300 0x02
@@ -1018,5 +416,4 @@
#define RES1280x960x85 0x46
#define RES1280x960x120 0x47
-#define LFBDRAMTrap 0x30
#endif
diff --git a/drivers/staging/xgifb/vb_ext.h b/drivers/staging/xgifb/vb_ext.h
index 814a446b70c..0b1f55b4242 100644
--- a/drivers/staging/xgifb/vb_ext.h
+++ b/drivers/staging/xgifb/vb_ext.h
@@ -1,26 +1,6 @@
#ifndef _VBEXT_
#define _VBEXT_
-struct DWORDREGS {
- unsigned long Eax, Ebx, Ecx, Edx, Esi, Edi, Ebp;
-};
-
-struct WORDREGS {
- unsigned short ax, hi_ax, bx, hi_bx, cx, hi_cx, dx, hi_dx, si,
- hi_si, di, hi_di, bp, hi_bp;
-};
-
-struct BYTEREGS {
- unsigned char al, ah, hi_al, hi_ah, bl, bh, hi_bl, hi_bh, cl, ch,
- hi_cl, hi_ch, dl, dh, hi_dl, hi_dh;
-};
-
-typedef union _X86_REGS {
- struct DWORDREGS e;
- struct WORDREGS x;
- struct BYTEREGS h;
-} X86_REGS, *PX86_REGS;
-
extern void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo);
extern unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *,
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 493b5322039..9e890a17fbc 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,7 +1,8 @@
-#include "vgatypes.h"
-
#include <linux/types.h>
#include <linux/delay.h> /* udelay */
+#include <linux/pci.h>
+
+#include "vgatypes.h"
#include "XGIfb.h"
#include "vb_def.h"
@@ -14,15 +15,13 @@
#include <linux/io.h>
-static unsigned char XGINew_ChannelAB, XGINew_DataBusWidth;
-
-static unsigned short XGINew_DDRDRAM_TYPE340[4][5] = {
+static const unsigned short XGINew_DDRDRAM_TYPE340[4][5] = {
{ 2, 13, 9, 64, 0x45},
{ 2, 12, 9, 32, 0x35},
{ 2, 12, 8, 16, 0x31},
{ 2, 11, 8, 8, 0x21} };
-static unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
+static const unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
{ 2, 14, 11, 128, 0x5D},
{ 2, 14, 10, 64, 0x59},
{ 2, 13, 11, 64, 0x4D},
@@ -36,8 +35,6 @@ static unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
{ 2, 12, 9, 8, 0x35},
{ 2, 12, 8, 4, 0x31} };
-static int XGINew_RAMType;
-
static unsigned char
XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -111,14 +108,18 @@ static void XGINew_DDR1x_MRS_340(unsigned long P3c4,
}
udelay(60);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR15[2][XGINew_RAMType]); /* SR18 */
+ xgifb_reg_set(P3c4,
+ 0x18,
+ pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x01);
xgifb_reg_set(P3c4, 0x16, pVBInfo->SR16[0]);
xgifb_reg_set(P3c4, 0x16, pVBInfo->SR16[1]);
mdelay(1);
xgifb_reg_set(P3c4, 0x1B, 0x03);
udelay(500);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR15[2][XGINew_RAMType]); /* SR18 */
+ xgifb_reg_set(P3c4,
+ 0x18,
+ pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x00);
xgifb_reg_set(P3c4, 0x16, pVBInfo->SR16[2]);
xgifb_reg_set(P3c4, 0x16, pVBInfo->SR16[3]);
@@ -131,23 +132,23 @@ static void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4,
0x28,
- pVBInfo->MCLKData[XGINew_RAMType].SR28);
+ pVBInfo->MCLKData[pVBInfo->ram_type].SR28);
xgifb_reg_set(pVBInfo->P3c4,
0x29,
- pVBInfo->MCLKData[XGINew_RAMType].SR29);
+ pVBInfo->MCLKData[pVBInfo->ram_type].SR29);
xgifb_reg_set(pVBInfo->P3c4,
0x2A,
- pVBInfo->MCLKData[XGINew_RAMType].SR2A);
+ pVBInfo->MCLKData[pVBInfo->ram_type].SR2A);
xgifb_reg_set(pVBInfo->P3c4,
0x2E,
- pVBInfo->ECLKData[XGINew_RAMType].SR2E);
+ pVBInfo->ECLKData[pVBInfo->ram_type].SR2E);
xgifb_reg_set(pVBInfo->P3c4,
0x2F,
- pVBInfo->ECLKData[XGINew_RAMType].SR2F);
+ pVBInfo->ECLKData[pVBInfo->ram_type].SR2F);
xgifb_reg_set(pVBInfo->P3c4,
0x30,
- pVBInfo->ECLKData[XGINew_RAMType].SR30);
+ pVBInfo->ECLKData[pVBInfo->ram_type].SR30);
/* [Vicent] 2004/07/07,
* When XG42 ECLK = MCLK = 207MHz, Set SR32 D[1:0] = 10b */
@@ -155,12 +156,12 @@ static void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension,
* Modify SR32 value, when MCLK=207MHZ, ELCK=250MHz,
* Set SR32 D[1:0] = 10b */
if (HwDeviceExtension->jChipType == XG42) {
- if ((pVBInfo->MCLKData[XGINew_RAMType].SR28 == 0x1C) &&
- (pVBInfo->MCLKData[XGINew_RAMType].SR29 == 0x01) &&
- (((pVBInfo->ECLKData[XGINew_RAMType].SR2E == 0x1C) &&
- (pVBInfo->ECLKData[XGINew_RAMType].SR2F == 0x01)) ||
- ((pVBInfo->ECLKData[XGINew_RAMType].SR2E == 0x22) &&
- (pVBInfo->ECLKData[XGINew_RAMType].SR2F == 0x01))))
+ if ((pVBInfo->MCLKData[pVBInfo->ram_type].SR28 == 0x1C) &&
+ (pVBInfo->MCLKData[pVBInfo->ram_type].SR29 == 0x01) &&
+ (((pVBInfo->ECLKData[pVBInfo->ram_type].SR2E == 0x1C) &&
+ (pVBInfo->ECLKData[pVBInfo->ram_type].SR2F == 0x01)) ||
+ ((pVBInfo->ECLKData[pVBInfo->ram_type].SR2E == 0x22) &&
+ (pVBInfo->ECLKData[pVBInfo->ram_type].SR2F == 0x01))))
xgifb_reg_set(pVBInfo->P3c4,
0x32,
((unsigned char) xgifb_reg_get(
@@ -173,8 +174,7 @@ static void XGINew_DDRII_Bootup_XG27(
unsigned long P3c4, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = P3c4 + 0x10;
- XGINew_RAMType = (int) XGINew_GetXG20DRAMType(HwDeviceExtension,
- pVBInfo);
+ pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
/* Set Double Frequency */
@@ -249,8 +249,7 @@ static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
{
unsigned long P3d4 = P3c4 + 0x10;
- XGINew_RAMType = (int) XGINew_GetXG20DRAMType(HwDeviceExtension,
- pVBInfo);
+ pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
xgifb_reg_set(P3d4, 0x97, 0x11); /* CR97 */
@@ -306,7 +305,9 @@ static void XGINew_DDR1x_MRS_XG20(unsigned long P3c4,
xgifb_reg_set(P3c4, 0x16, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x80);
udelay(60);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR15[2][XGINew_RAMType]); /* SR18 */
+ xgifb_reg_set(P3c4,
+ 0x18,
+ pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
/* xgifb_reg_set(P3c4, 0x18, 0x31); */
xgifb_reg_set(P3c4, 0x19, 0x01);
xgifb_reg_set(P3c4, 0x16, 0x03);
@@ -315,7 +316,9 @@ static void XGINew_DDR1x_MRS_XG20(unsigned long P3c4,
xgifb_reg_set(P3c4, 0x1B, 0x03);
udelay(500);
/* xgifb_reg_set(P3c4, 0x18, 0x31); */
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR15[2][XGINew_RAMType]); /* SR18 */
+ xgifb_reg_set(P3c4,
+ 0x18,
+ pVBInfo->SR15[2][pVBInfo->ram_type]); /* SR18 */
xgifb_reg_set(P3c4, 0x19, 0x00);
xgifb_reg_set(P3c4, 0x16, 0x03);
xgifb_reg_set(P3c4, 0x16, 0x83);
@@ -332,13 +335,13 @@ static void XGINew_DDR1x_DefaultRegister(
XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
xgifb_reg_set(P3d4,
0x82,
- pVBInfo->CR40[11][XGINew_RAMType]); /* CR82 */
+ pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR82 */
xgifb_reg_set(P3d4,
0x85,
- pVBInfo->CR40[12][XGINew_RAMType]); /* CR85 */
+ pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */
xgifb_reg_set(P3d4,
0x86,
- pVBInfo->CR40[13][XGINew_RAMType]); /* CR86 */
+ pVBInfo->CR40[13][pVBInfo->ram_type]); /* CR86 */
xgifb_reg_set(P3d4, 0x98, 0x01);
xgifb_reg_set(P3d4, 0x9A, 0x02);
@@ -353,15 +356,15 @@ static void XGINew_DDR1x_DefaultRegister(
/* CR82 */
xgifb_reg_set(P3d4,
0x82,
- pVBInfo->CR40[11][XGINew_RAMType]);
+ pVBInfo->CR40[11][pVBInfo->ram_type]);
/* CR85 */
xgifb_reg_set(P3d4,
0x85,
- pVBInfo->CR40[12][XGINew_RAMType]);
+ pVBInfo->CR40[12][pVBInfo->ram_type]);
/* CR86 */
xgifb_reg_set(P3d4,
0x86,
- pVBInfo->CR40[13][XGINew_RAMType]);
+ pVBInfo->CR40[13][pVBInfo->ram_type]);
break;
default:
xgifb_reg_set(P3d4, 0x82, 0x88);
@@ -372,7 +375,7 @@ static void XGINew_DDR1x_DefaultRegister(
xgifb_reg_get(P3d4, 0x86);
xgifb_reg_set(P3d4,
0x86,
- pVBInfo->CR40[13][XGINew_RAMType]);
+ pVBInfo->CR40[13][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x85, 0x00);
@@ -385,11 +388,11 @@ static void XGINew_DDR1x_DefaultRegister(
/* CR85 */
xgifb_reg_set(P3d4,
0x85,
- pVBInfo->CR40[12][XGINew_RAMType]);
+ pVBInfo->CR40[12][pVBInfo->ram_type]);
/* CR82 */
xgifb_reg_set(P3d4,
0x82,
- pVBInfo->CR40[11][XGINew_RAMType]);
+ pVBInfo->CR40[11][pVBInfo->ram_type]);
break;
}
@@ -414,16 +417,18 @@ static void XGINew_DDR2_DefaultRegister(
xgifb_reg_set(P3d4, 0x86, 0x88);
xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
/* CR86 */
- xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][XGINew_RAMType]);
+ xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x85, 0x00);
xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */
xgifb_reg_set(P3d4, 0x85, 0x88);
xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */
- xgifb_reg_set(P3d4, 0x85, pVBInfo->CR40[12][XGINew_RAMType]); /* CR85 */
+ xgifb_reg_set(P3d4,
+ 0x85,
+ pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */
if (HwDeviceExtension->jChipType == XG27)
/* CR82 */
- xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][XGINew_RAMType]);
+ xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]);
else
xgifb_reg_set(P3d4, 0x82, 0xA8); /* CR82 */
@@ -443,15 +448,15 @@ static void XGINew_SetDRAMDefaultRegister340(
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
- xgifb_reg_set(P3d4, 0x6D, pVBInfo->CR40[8][XGINew_RAMType]);
- xgifb_reg_set(P3d4, 0x68, pVBInfo->CR40[5][XGINew_RAMType]);
- xgifb_reg_set(P3d4, 0x69, pVBInfo->CR40[6][XGINew_RAMType]);
- xgifb_reg_set(P3d4, 0x6A, pVBInfo->CR40[7][XGINew_RAMType]);
+ xgifb_reg_set(P3d4, 0x6D, pVBInfo->CR40[8][pVBInfo->ram_type]);
+ xgifb_reg_set(P3d4, 0x68, pVBInfo->CR40[5][pVBInfo->ram_type]);
+ xgifb_reg_set(P3d4, 0x69, pVBInfo->CR40[6][pVBInfo->ram_type]);
+ xgifb_reg_set(P3d4, 0x6A, pVBInfo->CR40[7][pVBInfo->ram_type]);
temp2 = 0;
for (i = 0; i < 4; i++) {
/* CR6B DQS fine tune delay */
- temp = pVBInfo->CR6B[XGINew_RAMType][i];
+ temp = pVBInfo->CR6B[pVBInfo->ram_type][i];
for (j = 0; j < 4; j++) {
temp1 = ((temp >> (2 * j)) & 0x03) << 2;
temp2 |= temp1;
@@ -466,7 +471,7 @@ static void XGINew_SetDRAMDefaultRegister340(
temp2 = 0;
for (i = 0; i < 4; i++) {
/* CR6E DQM fine tune delay */
- temp = pVBInfo->CR6E[XGINew_RAMType][i];
+ temp = pVBInfo->CR6E[pVBInfo->ram_type][i];
for (j = 0; j < 4; j++) {
temp1 = ((temp >> (2 * j)) & 0x03) << 2;
temp2 |= temp1;
@@ -485,7 +490,7 @@ static void XGINew_SetDRAMDefaultRegister340(
temp2 = 0;
for (i = 0; i < 8; i++) {
/* CR6F DQ fine tune delay */
- temp = pVBInfo->CR6F[XGINew_RAMType][8 * k + i];
+ temp = pVBInfo->CR6F[pVBInfo->ram_type][8 * k + i];
for (j = 0; j < 4; j++) {
temp1 = (temp >> (2 * j)) & 0x03;
temp2 |= temp1;
@@ -499,12 +504,16 @@ static void XGINew_SetDRAMDefaultRegister340(
temp3 += 0x01;
}
- xgifb_reg_set(P3d4, 0x80, pVBInfo->CR40[9][XGINew_RAMType]); /* CR80 */
- xgifb_reg_set(P3d4, 0x81, pVBInfo->CR40[10][XGINew_RAMType]); /* CR81 */
+ xgifb_reg_set(P3d4,
+ 0x80,
+ pVBInfo->CR40[9][pVBInfo->ram_type]); /* CR80 */
+ xgifb_reg_set(P3d4,
+ 0x81,
+ pVBInfo->CR40[10][pVBInfo->ram_type]); /* CR81 */
temp2 = 0x80;
/* CR89 terminator type select */
- temp = pVBInfo->CR89[XGINew_RAMType][0];
+ temp = pVBInfo->CR89[pVBInfo->ram_type][0];
for (j = 0; j < 4; j++) {
temp1 = (temp >> (2 * j)) & 0x03;
temp2 |= temp1;
@@ -514,45 +523,49 @@ static void XGINew_SetDRAMDefaultRegister340(
temp2 += 0x10;
}
- temp = pVBInfo->CR89[XGINew_RAMType][1];
+ temp = pVBInfo->CR89[pVBInfo->ram_type][1];
temp1 = temp & 0x03;
temp2 |= temp1;
xgifb_reg_set(P3d4, 0x89, temp2);
- temp = pVBInfo->CR40[3][XGINew_RAMType];
+ temp = pVBInfo->CR40[3][pVBInfo->ram_type];
temp1 = temp & 0x0F;
temp2 = (temp >> 4) & 0x07;
temp3 = temp & 0x80;
xgifb_reg_set(P3d4, 0x45, temp1); /* CR45 */
xgifb_reg_set(P3d4, 0x99, temp2); /* CR99 */
xgifb_reg_or(P3d4, 0x40, temp3); /* CR40_D[7] */
- xgifb_reg_set(P3d4, 0x41, pVBInfo->CR40[0][XGINew_RAMType]); /* CR41 */
+ xgifb_reg_set(P3d4,
+ 0x41,
+ pVBInfo->CR40[0][pVBInfo->ram_type]); /* CR41 */
if (HwDeviceExtension->jChipType == XG27)
xgifb_reg_set(P3d4, 0x8F, *pVBInfo->pCR8F); /* CR8F */
for (j = 0; j <= 6; j++) /* CR90 - CR96 */
xgifb_reg_set(P3d4, (0x90 + j),
- pVBInfo->CR40[14 + j][XGINew_RAMType]);
+ pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
xgifb_reg_set(P3d4, (0xC3 + j),
- pVBInfo->CR40[21 + j][XGINew_RAMType]);
+ pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
for (j = 0; j < 2; j++) /* CR8A - CR8B */
xgifb_reg_set(P3d4, (0x8A + j),
- pVBInfo->CR40[1 + j][XGINew_RAMType]);
+ pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
if ((HwDeviceExtension->jChipType == XG41) ||
(HwDeviceExtension->jChipType == XG42))
xgifb_reg_set(P3d4, 0x8C, 0x87);
- xgifb_reg_set(P3d4, 0x59, pVBInfo->CR40[4][XGINew_RAMType]); /* CR59 */
+ xgifb_reg_set(P3d4,
+ 0x59,
+ pVBInfo->CR40[4][pVBInfo->ram_type]); /* CR59 */
xgifb_reg_set(P3d4, 0x83, 0x09); /* CR83 */
xgifb_reg_set(P3d4, 0x87, 0x00); /* CR87 */
xgifb_reg_set(P3d4, 0xCF, *pVBInfo->pCRCF); /* CRCF */
- if (XGINew_RAMType) {
+ if (pVBInfo->ram_type) {
/* xgifb_reg_set(P3c4, 0x17, 0xC0); */ /* SR17 DDRII */
xgifb_reg_set(P3c4, 0x17, 0x80); /* SR17 DDRII */
if (HwDeviceExtension->jChipType == XG27)
@@ -570,11 +583,13 @@ static void XGINew_SetDRAMDefaultRegister340(
xgifb_reg_set(P3d4, 0xB0, 0x80); /* DDRII Dual frequency mode */
XGINew_DDR2_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo);
}
- xgifb_reg_set(P3c4, 0x1B, pVBInfo->SR15[3][XGINew_RAMType]); /* SR1B */
+ xgifb_reg_set(P3c4,
+ 0x1B,
+ pVBInfo->SR15[3][pVBInfo->ram_type]); /* SR1B */
}
static void XGINew_SetDRAMSizingType(int index,
- unsigned short DRAMTYPE_TABLE[][5],
+ const unsigned short DRAMTYPE_TABLE[][5],
struct vb_device_info *pVBInfo)
{
unsigned short data;
@@ -586,14 +601,14 @@ static void XGINew_SetDRAMSizingType(int index,
}
static unsigned short XGINew_SetDRAMSizeReg(int index,
- unsigned short DRAMTYPE_TABLE[][5],
+ const unsigned short DRAMTYPE_TABLE[][5],
struct vb_device_info *pVBInfo)
{
unsigned short data = 0, memsize = 0;
int RankSize;
unsigned char ChannelNo;
- RankSize = DRAMTYPE_TABLE[index][3] * XGINew_DataBusWidth / 32;
+ RankSize = DRAMTYPE_TABLE[index][3] * pVBInfo->ram_bus / 32;
data = xgifb_reg_get(pVBInfo->P3c4, 0x13);
data &= 0x80;
@@ -602,10 +617,10 @@ static unsigned short XGINew_SetDRAMSizeReg(int index,
data = 0;
- if (XGINew_ChannelAB == 3)
+ if (pVBInfo->ram_channel == 3)
ChannelNo = 4;
else
- ChannelNo = XGINew_ChannelAB;
+ ChannelNo = pVBInfo->ram_channel;
if (ChannelNo * RankSize <= 256) {
while ((RankSize >>= 1) > 0)
@@ -619,8 +634,8 @@ static unsigned short XGINew_SetDRAMSizeReg(int index,
(xgifb_reg_get(pVBInfo->P3c4, 0x14) & 0x0F) |
(data & 0xF0));
- /* data |= XGINew_ChannelAB << 2; */
- /* data |= (XGINew_DataBusWidth / 64) << 1; */
+ /* data |= pVBInfo->ram_channel << 2; */
+ /* data |= (pVBInfo->ram_bus / 64) << 1; */
/* xgifb_reg_set(pVBInfo->P3c4, 0x14, data); */
/* should delay */
@@ -630,14 +645,14 @@ static unsigned short XGINew_SetDRAMSizeReg(int index,
}
static unsigned short XGINew_SetDRAMSize20Reg(int index,
- unsigned short DRAMTYPE_TABLE[][5],
+ const unsigned short DRAMTYPE_TABLE[][5],
struct vb_device_info *pVBInfo)
{
unsigned short data = 0, memsize = 0;
int RankSize;
unsigned char ChannelNo;
- RankSize = DRAMTYPE_TABLE[index][3] * XGINew_DataBusWidth / 8;
+ RankSize = DRAMTYPE_TABLE[index][3] * pVBInfo->ram_bus / 8;
data = xgifb_reg_get(pVBInfo->P3c4, 0x13);
data &= 0x80;
@@ -646,10 +661,10 @@ static unsigned short XGINew_SetDRAMSize20Reg(int index,
data = 0;
- if (XGINew_ChannelAB == 3)
+ if (pVBInfo->ram_channel == 3)
ChannelNo = 4;
else
- ChannelNo = XGINew_ChannelAB;
+ ChannelNo = pVBInfo->ram_channel;
if (ChannelNo * RankSize <= 256) {
while ((RankSize >>= 1) > 0)
@@ -664,8 +679,8 @@ static unsigned short XGINew_SetDRAMSize20Reg(int index,
(data & 0xF0));
udelay(15);
- /* data |= XGINew_ChannelAB << 2; */
- /* data |= (XGINew_DataBusWidth / 64) << 1; */
+ /* data |= pVBInfo->ram_channel << 2; */
+ /* data |= (pVBInfo->ram_bus / 64) << 1; */
/* xgifb_reg_set(pVBInfo->P3c4, 0x14, data); */
/* should delay */
@@ -679,12 +694,13 @@ static int XGINew_ReadWriteRest(unsigned short StopAddr,
{
int i;
unsigned long Position = 0;
+ void __iomem *fbaddr = pVBInfo->FBAddr;
- *((unsigned long *) (pVBInfo->FBAddr + Position)) = Position;
+ writel(Position, fbaddr + Position);
for (i = StartAddr; i <= StopAddr; i++) {
Position = 1 << i;
- *((unsigned long *) (pVBInfo->FBAddr + Position)) = Position;
+ writel(Position, fbaddr + Position);
}
udelay(500); /* [Vicent] 2004/04/16.
@@ -692,13 +708,12 @@ static int XGINew_ReadWriteRest(unsigned short StopAddr,
Position = 0;
- if ((*(unsigned long *) (pVBInfo->FBAddr + Position)) != Position)
+ if (readl(fbaddr + Position) != Position)
return 0;
for (i = StartAddr; i <= StopAddr; i++) {
Position = 1 << i;
- if ((*(unsigned long *) (pVBInfo->FBAddr + Position)) !=
- Position)
+ if (readl(fbaddr + Position) != Position)
return 0;
}
return 1;
@@ -729,14 +744,14 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
case XG21:
data = xgifb_reg_get(pVBInfo->P3d4, 0x97);
data = data & 0x01;
- XGINew_ChannelAB = 1; /* XG20 "JUST" one channel */
+ pVBInfo->ram_channel = 1; /* XG20 "JUST" one channel */
if (data == 0) { /* Single_32_16 */
if ((HwDeviceExtension->ulVideoMemorySize - 1)
> 0x1000000) {
- XGINew_DataBusWidth = 32; /* 32 bits */
+ pVBInfo->ram_bus = 32; /* 32 bits */
/* 22bit + 2 rank + 32bit */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
@@ -765,7 +780,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x800000) {
- XGINew_DataBusWidth = 16; /* 16 bits */
+ pVBInfo->ram_bus = 16; /* 16 bits */
/* 22bit + 2 rank + 16bit */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
@@ -783,7 +798,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
} else { /* Dual_16_8 */
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x800000) {
- XGINew_DataBusWidth = 16; /* 16 bits */
+ pVBInfo->ram_bus = 16; /* 16 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
/* 0x41:16Mx16 bit*/
@@ -814,7 +829,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
if ((HwDeviceExtension->ulVideoMemorySize - 1) >
0x400000) {
- XGINew_DataBusWidth = 8; /* 8 bits */
+ pVBInfo->ram_bus = 8; /* 8 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
/* 0x30:8Mx8 bit*/
@@ -833,21 +848,21 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
break;
case XG27:
- XGINew_DataBusWidth = 16; /* 16 bits */
- XGINew_ChannelAB = 1; /* Single channel */
+ pVBInfo->ram_bus = 16; /* 16 bits */
+ pVBInfo->ram_channel = 1; /* Single channel */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/
break;
case XG41:
if (XGINew_CheckFrequence(pVBInfo) == 1) {
- XGINew_DataBusWidth = 32; /* 32 bits */
- XGINew_ChannelAB = 3; /* Quad Channel */
+ pVBInfo->ram_bus = 32; /* 32 bits */
+ pVBInfo->ram_channel = 3; /* Quad Channel */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4C);
if (XGINew_ReadWriteRest(25, 23, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 2; /* Dual channels */
+ pVBInfo->ram_channel = 2; /* Dual channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x48);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
@@ -858,7 +873,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 3;
+ pVBInfo->ram_channel = 3;
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x3C);
@@ -872,15 +887,15 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
else
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x39);
} else { /* DDR */
- XGINew_DataBusWidth = 64; /* 64 bits */
- XGINew_ChannelAB = 2; /* Dual channels */
+ pVBInfo->ram_bus = 64; /* 64 bits */
+ pVBInfo->ram_channel = 2; /* Dual channels */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x5A);
if (XGINew_ReadWriteRest(25, 24, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 1; /* Single channels */
+ pVBInfo->ram_channel = 1; /* Single channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
@@ -891,14 +906,14 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 2; /* Dual channels */
+ pVBInfo->ram_channel = 2; /* Dual channels */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4A);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 1; /* Single channels */
+ pVBInfo->ram_channel = 1; /* Single channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x42);
if (XGINew_ReadWriteRest(8, 4, pVBInfo) == 1)
@@ -918,8 +933,8 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
It's Different from Other XG40 Series.
*/
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
- XGINew_DataBusWidth = 32; /* 32 bits */
- XGINew_ChannelAB = 2; /* 2 Channel */
+ pVBInfo->ram_bus = 32; /* 32 bits */
+ pVBInfo->ram_channel = 2; /* 2 Channel */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x44);
@@ -931,7 +946,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 1; /* Single Channel */
+ pVBInfo->ram_channel = 1; /* Single Channel */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x40);
@@ -942,8 +957,8 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
}
} else { /* DDR */
- XGINew_DataBusWidth = 64; /* 64 bits */
- XGINew_ChannelAB = 1; /* 1 channels */
+ pVBInfo->ram_bus = 64; /* 64 bits */
+ pVBInfo->ram_channel = 1; /* 1 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
@@ -960,15 +975,15 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
default: /* XG40 */
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII */
- XGINew_DataBusWidth = 32; /* 32 bits */
- XGINew_ChannelAB = 3;
+ pVBInfo->ram_bus = 32; /* 32 bits */
+ pVBInfo->ram_channel = 3;
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4C);
if (XGINew_ReadWriteRest(25, 23, pVBInfo) == 1)
return;
- XGINew_ChannelAB = 2; /* 2 channels */
+ pVBInfo->ram_channel = 2; /* 2 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x48);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
@@ -978,14 +993,14 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x3C);
if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) {
- XGINew_ChannelAB = 3; /* 4 channels */
+ pVBInfo->ram_channel = 3; /* 4 channels */
} else {
- XGINew_ChannelAB = 2; /* 2 channels */
+ pVBInfo->ram_channel = 2; /* 2 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x38);
}
} else { /* DDR */
- XGINew_DataBusWidth = 64; /* 64 bits */
- XGINew_ChannelAB = 2; /* 2 channels */
+ pVBInfo->ram_bus = 64; /* 64 bits */
+ pVBInfo->ram_channel = 2; /* 2 channels */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x5A);
@@ -1021,7 +1036,7 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
if (memsize == 0)
continue;
- addr = memsize + (XGINew_ChannelAB - 2) + 20;
+ addr = memsize + (pVBInfo->ram_channel - 2) + 20;
if ((HwDeviceExtension->ulVideoMemorySize - 1) <
(unsigned long) (1 << addr))
continue;
@@ -1041,7 +1056,7 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
if (memsize == 0)
continue;
- addr = memsize + (XGINew_ChannelAB - 2) + 20;
+ addr = memsize + (pVBInfo->ram_channel - 2) + 20;
if ((HwDeviceExtension->ulVideoMemorySize - 1) <
(unsigned long) (1 << addr))
continue;
@@ -1428,8 +1443,10 @@ static unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo)
return temp;
}
-unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
+unsigned char XGIInitNew(struct pci_dev *pdev)
{
+ struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
+ struct xgi_hw_device_info *HwDeviceExtension = &xgifb_info->hw_info;
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
unsigned char i, temp = 0, temp1;
@@ -1438,8 +1455,6 @@ unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
/* unsigned long j, k; */
- unsigned long Temp;
-
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
@@ -1551,8 +1566,7 @@ unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
/* 3.SetMemoryClock
- XGINew_RAMType = (int)XGINew_GetXG20DRAMType(HwDeviceExtension,
- pVBInfo);
+ pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
*/
printk("11");
@@ -1579,6 +1593,8 @@ unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
printk("12");
if (HwDeviceExtension->jChipType < XG20) { /* kuku 2004/06/25 */
+ u32 Temp;
+
/* Set AGP Rate */
/*
temp1 = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
@@ -1645,10 +1661,7 @@ unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
/* if (ChipsetID == 0x25308086) */
/* xgifb_reg_set(pVBInfo->P3d4, 0x77, 0xF0); */
- HwDeviceExtension->pQueryVGAConfigSpace(HwDeviceExtension,
- 0x50,
- 0,
- &Temp); /* Get */
+ pci_read_config_dword(pdev, 0x50, &Temp);
Temp >>= 20;
Temp &= 0xF;
@@ -1732,15 +1745,6 @@ unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
printk("183");
/* XGINew_DetectMonitor(HwDeviceExtension); */
- pVBInfo->IF_DEF_CH7007 = 0;
- if ((HwDeviceExtension->jChipType == XG21) &&
- (pVBInfo->IF_DEF_CH7007)) {
- printk("184");
- /* sense CRT2 */
- XGI_GetSenseStatus(HwDeviceExtension, pVBInfo);
- printk("185");
-
- }
if (HwDeviceExtension->jChipType == XG21) {
printk("186");
@@ -1763,8 +1767,7 @@ unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
}
printk("19");
- XGINew_RAMType = (int) XGINew_GetXG20DRAMType(HwDeviceExtension,
- pVBInfo);
+ pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
XGINew_SetDRAMDefaultRegister340(HwDeviceExtension,
pVBInfo->P3d4,
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index 6b7723057f7..a27b4fe0bb7 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,6 +1,6 @@
#ifndef _VBINIT_
#define _VBINIT_
-extern unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension);
+extern unsigned char XGIInitNew(struct pci_dev *pdev);
extern struct XGI21_LVDSCapStruct XGI21_LCDCapList[13];
#endif
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index dc4d6e6fc9b..81c0cc41bb4 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -8,17 +8,15 @@
#include "vb_def.h"
#include "vgatypes.h"
#include "vb_struct.h"
+#include "vb_init.h"
#include "vb_util.h"
#include "vb_table.h"
#include "vb_setmode.h"
#define IndexMask 0xff
-#ifndef XGI_MASK_DUAL_CHIP
-#define XGI_MASK_DUAL_CHIP 0x04 /* SR3A */
-#endif
-static unsigned short XGINew_MDA_DAC[] = {
+static const unsigned short XGINew_MDA_DAC[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
@@ -28,7 +26,7 @@ static unsigned short XGINew_MDA_DAC[] = {
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F};
-static unsigned short XGINew_CGA_DAC[] = {
+static const unsigned short XGINew_CGA_DAC[] = {
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
@@ -38,7 +36,7 @@ static unsigned short XGINew_CGA_DAC[] = {
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F};
-static unsigned short XGINew_EGA_DAC[] = {
+static const unsigned short XGINew_EGA_DAC[] = {
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x05, 0x15,
0x20, 0x30, 0x24, 0x34, 0x21, 0x31, 0x25, 0x35,
0x08, 0x18, 0x0C, 0x1C, 0x09, 0x19, 0x0D, 0x1D,
@@ -48,7 +46,7 @@ static unsigned short XGINew_EGA_DAC[] = {
0x0A, 0x1A, 0x0E, 0x1E, 0x0B, 0x1B, 0x0F, 0x1F,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F};
-static unsigned short XGINew_VGA_DAC[] = {
+static const unsigned short XGINew_VGA_DAC[] = {
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
0x00, 0x05, 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x18,
@@ -144,11 +142,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->TimingV = (struct XGI_TimingVStruct *) XGI_TimingV;
pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table;
- pVBInfo->CHTVVCLKUNTSC = XGI330_CHTVVCLKUNTSC;
- pVBInfo->CHTVVCLKONTSC = XGI330_CHTVVCLKONTSC;
- pVBInfo->CHTVVCLKUPAL = XGI330_CHTVVCLKUPAL;
- pVBInfo->CHTVVCLKOPAL = XGI330_CHTVVCLKOPAL;
-
/* 310 customization related */
if ((pVBInfo->VBType & VB_XGI301LV) || (pVBInfo->VBType & VB_XGI302LV))
pVBInfo->LCDCapList = XGI_LCDDLCapList;
@@ -207,19 +200,6 @@ static unsigned char XGI_GetModePtr(unsigned short ModeNo,
return index; /* Get pVBInfo->StandTable index */
}
-/*
-unsigned char XGI_SetBIOSData(unsigned short ModeNo,
- unsigned short ModeIdIndex) {
- return (0);
-}
-*/
-
-/* unsigned char XGI_ClearBankRegs(unsigned short ModeNo,
- unsigned short ModeIdIndex) {
- return( 0 ) ;
-}
-*/
-
static void XGI_SetSeqRegs(unsigned short ModeNo,
unsigned short StandTableIndex,
unsigned short ModeIdIndex,
@@ -490,11 +470,6 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
}
} else { /* for LVDS */
- if (pVBInfo->IF_DEF_CH7005 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- tempax |= SupportCHTV;
- }
-
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
tempax |= SupportLCD;
@@ -1001,65 +976,10 @@ static void XGI_SetXG27CRTC(unsigned short ModeNo,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetXG21LCD */
-/* Input : */
-/* Output : FCLK duty cycle, FCLK delay compensation */
-/* Description : All values set zero */
-/* --------------------------------------------------------------------- */
-static void XGI_SetXG21LCD(struct vb_device_info *pVBInfo,
- unsigned short RefreshRateTableIndex, unsigned short ModeNo)
-{
- unsigned short Data, Temp, b3CC;
- unsigned short XGI_P3cc;
-
- XGI_P3cc = pVBInfo->P3cc;
-
- xgifb_reg_set(pVBInfo->P3d4, 0x2E, 0x00);
- xgifb_reg_set(pVBInfo->P3d4, 0x2F, 0x00);
- xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x00);
- xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x00);
- if (((*pVBInfo->pDVOSetting) & 0xC0) == 0xC0) {
- xgifb_reg_set(pVBInfo->P3d4, 0x2E, *pVBInfo->pCR2E);
- xgifb_reg_set(pVBInfo->P3d4, 0x2F, *pVBInfo->pCR2F);
- xgifb_reg_set(pVBInfo->P3d4, 0x46, *pVBInfo->pCR46);
- xgifb_reg_set(pVBInfo->P3d4, 0x47, *pVBInfo->pCR47);
- }
-
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
-
- if (Temp & 0x01) {
- xgifb_reg_or(pVBInfo->P3c4, 0x06, 0x40); /* 18 bits FP */
- xgifb_reg_or(pVBInfo->P3c4, 0x09, 0x40);
- }
-
- xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x01); /* Negative blank polarity */
-
- xgifb_reg_and(pVBInfo->P3c4, 0x30, ~0x20);
- xgifb_reg_and(pVBInfo->P3c4, 0x35, ~0x80);
-
- if (ModeNo <= 0x13) {
- b3CC = (unsigned char) inb(XGI_P3cc);
- if (b3CC & 0x40)
- /* Hsync polarity */
- xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
- if (b3CC & 0x80)
- /* Vsync polarity */
- xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
- } else {
- Data = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
- if (Data & 0x4000)
- /* Hsync polarity */
- xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
- if (Data & 0x8000)
- /* Vsync polarity */
- xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
- }
-}
-
-static void XGI_SetXG27LCD(struct vb_device_info *pVBInfo,
- unsigned short RefreshRateTableIndex,
- unsigned short ModeNo)
+static void xgifb_set_lcd(int chip_id,
+ struct vb_device_info *pVBInfo,
+ unsigned short RefreshRateTableIndex,
+ unsigned short ModeNo)
{
unsigned short Data, Temp, b3CC;
unsigned short XGI_P3cc;
@@ -1071,10 +991,12 @@ static void XGI_SetXG27LCD(struct vb_device_info *pVBInfo,
xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x00);
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- if ((Temp & 0x03) == 0) { /* dual 12 */
- xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x13);
- xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x13);
+ if (chip_id == XG27) {
+ Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ if ((Temp & 0x03) == 0) { /* dual 12 */
+ xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x13);
+ xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x13);
+ }
}
if (((*pVBInfo->pDVOSetting) & 0xC0) == 0xC0) {
@@ -1084,7 +1006,16 @@ static void XGI_SetXG27LCD(struct vb_device_info *pVBInfo,
xgifb_reg_set(pVBInfo->P3d4, 0x47, *pVBInfo->pCR47);
}
- XGI_SetXG27FPBits(pVBInfo);
+ if (chip_id == XG27) {
+ XGI_SetXG27FPBits(pVBInfo);
+ } else {
+ Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ if (Temp & 0x01) {
+ /* 18 bits FP */
+ xgifb_reg_or(pVBInfo->P3c4, 0x06, 0x40);
+ xgifb_reg_or(pVBInfo->P3c4, 0x09, 0x40);
+ }
+ }
xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x01); /* Negative blank polarity */
@@ -1336,8 +1267,6 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short tempbx;
-
unsigned short LCDXlat1VCLK[4] = { VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2,
@@ -1358,7 +1287,6 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
unsigned short CRT2Index, VCLKIndex;
unsigned short modeflag, resinfo;
- unsigned char *CHTVVCLKPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
@@ -1459,47 +1387,15 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
else
VCLKIndex = CRT2Index;
- if (pVBInfo->IF_DEF_CH7005 == 1) {
- if (!(pVBInfo->VBInfo & SetCRT2ToLCD)) {
- VCLKIndex &= 0x1f;
- tempbx = 0;
-
- if (pVBInfo->VBInfo & SetPALTV)
- tempbx += 2;
-
- if (pVBInfo->VBInfo & SetCHTVOverScan)
- tempbx += 1;
-
- switch (tempbx) {
- case 0:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKUNTSC;
- break;
- case 1:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKONTSC;
- break;
- case 2:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKUPAL;
- break;
- case 3:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKOPAL;
- break;
- default:
- break;
- }
-
- VCLKIndex = CHTVVCLKPtr[VCLKIndex];
- }
- } else {
- VCLKIndex = VCLKIndex >> 6;
- if ((pVBInfo->LCDResInfo == Panel800x600) ||
- (pVBInfo->LCDResInfo == Panel320x480))
- VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
- else if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75))
- VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
- else
- VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
- }
+ VCLKIndex = VCLKIndex >> 6;
+ if ((pVBInfo->LCDResInfo == Panel800x600) ||
+ (pVBInfo->LCDResInfo == Panel320x480))
+ VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
+ else if ((pVBInfo->LCDResInfo == Panel1024x768) ||
+ (pVBInfo->LCDResInfo == Panel1024x768x75))
+ VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
+ else
+ VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
}
/* VCLKIndex = VCLKIndex&IndexMask; */
@@ -1771,60 +1667,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
}
-/*
-void XGI_VesaLowResolution(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short modeflag;
-
- if (ModeNo > 0x13)
- modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- else
- modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
-
- if (ModeNo > 0x13) {
- if (modeflag & DoubleScanMode) {
- if (modeflag & HalfDCLK) {
- if (pVBInfo->VBType & VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C)) {
- if (!(pVBInfo->VBInfo &
- SetCRT2ToRAMDAC)) {
- if (pVBInfo->VBInfo &
- SetInSlaveMode) {
- xgifb_reg_and_or(
- pVBInfo->P3c4,
- 0x01,
- 0xf7,
- 0x00);
- xgifb_reg_and_or(
- pVBInfo->P3c4,
- 0x0f,
- 0x7f,
- 0x00);
- return;
- }
- }
- }
- xgifb_reg_and_or(pVBInfo->P3c4,
- 0x0f,
- 0xff,
- 0x80);
- xgifb_reg_and_or(pVBInfo->P3c4,
- 0x01,
- 0xf7,
- 0x00);
- return;
- }
- }
- }
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0f, 0x7f, 0x00);
-}
-*/
-
static void XGI_WriteDAC(unsigned short dl,
unsigned short ah,
unsigned short al,
@@ -1859,7 +1701,8 @@ static void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data, data2, time, i, j, k, m, n, o, si, di, bx, dl, al,
- ah, dh, *table = NULL;
+ ah, dh;
+ const unsigned short *table = NULL;
if (ModeNo <= 0x13)
data = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
@@ -2061,10 +1904,8 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
switch (tempbx) {
case 0:
- tempdi = XGI_EPLLCDCRT1Ptr_H;
- break;
case 1:
- tempdi = XGI_EPLLCDCRT1Ptr_V;
+ tempdi = xgifb_epllcd_crt1;
break;
case 2:
tempdi = XGI_EPLLCDDataPtr;
@@ -2314,10 +2155,8 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCD1280x1024Data[tempal];
break;
case 6:
- return &XGI_ExtLCD1400x1050Data[tempal];
- break;
case 7:
- return &XGI_StLCD1400x1050Data[tempal];
+ return &xgifb_lcd_1400x1050[tempal];
break;
case 8:
return &XGI_CetLCD1400x1050Data[tempal];
@@ -2341,10 +2180,8 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCD1024x768x75Data[tempal];
break;
case 15:
- return &XGI_ExtLCD1280x1024x75Data[tempal];
- break;
case 16:
- return &XGI_StLCD1280x1024x75Data[tempal];
+ return &xgifb_lcd_1280x1024x75[tempal];
break;
case 17:
return &XGI_CetLCD1280x1024x75Data[tempal];
@@ -2388,18 +2225,12 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1280x1024Data[tempal];
break;
case 6:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
- return &XGI_ExtLCDDLDes1400x1050Data[tempal];
- else
- return &XGI_ExtLCDDes1400x1050Data[tempal];
- break;
case 7:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
- return &XGI_StLCDDLDes1400x1050Data[tempal];
+ return &xgifb_lcddldes_1400x1050[tempal];
else
- return &XGI_StLCDDes1400x1050Data[tempal];
+ return &xgifb_lcddes_1400x1050[tempal];
break;
case 8:
return &XGI_CetLCDDes1400x1050Data[tempal];
@@ -2425,27 +2256,19 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_NoScalingDesData[tempal];
break;
case 13:
- return &XGI_ExtLCDDes1024x768x75Data[tempal];
- break;
case 14:
- return &XGI_StLCDDes1024x768x75Data[tempal];
+ return &xgifb_lcddes_1024x768x75[tempal];
break;
case 15:
return &XGI_CetLCDDes1024x768x75Data[tempal];
break;
case 16:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
- return &XGI_ExtLCDDLDes1280x1024x75Data[tempal];
- else
- return &XGI_ExtLCDDes1280x1024x75Data[tempal];
- break;
case 17:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
- return &XGI_StLCDDLDes1280x1024x75Data[tempal];
+ return &xgifb_lcddldes_1280x1024x75[tempal];
else
- return &XGI_StLCDDes1280x1024x75Data[tempal];
+ return &xgifb_lcddes_1280x1024x75[tempal];
break;
case 18:
if ((pVBInfo->VBType & VB_XGI301LV) ||
@@ -2499,18 +2322,13 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
switch (tempbx) {
case 0:
tempdi = NULL; /*EPLCHTVCRT1Ptr_H;*/
- if (pVBInfo->IF_DEF_CH7007 == 1)
- tempdi = XGI_EPLCHTVCRT1Ptr;
-
break;
case 1:
tempdi = NULL; /*EPLCHTVCRT1Ptr_V;*/
- if (pVBInfo->IF_DEF_CH7007 == 1)
- tempdi = XGI_EPLCHTVCRT1Ptr;
-
break;
case 2:
- tempdi = XGI_EPLCHTVDataPtr;
+ case 6:
+ tempdi = xgifb_chrontel_tv;
break;
case 3:
tempdi = NULL;
@@ -2521,9 +2339,6 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
case 5:
tempdi = NULL;
break;
- case 6:
- tempdi = XGI_EPLCHTVRegPtr;
- break;
default:
break;
}
@@ -2625,7 +2440,6 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
{
unsigned short tempbx;
struct XGI330_LVDSDataStruct *LCDPtr = NULL;
- struct XGI330_CHTVDataStruct *TVPtr = NULL;
tempbx = 2;
@@ -2638,17 +2452,6 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->HT = LCDPtr->LCDHT;
pVBInfo->VT = LCDPtr->LCDVT;
}
- if (pVBInfo->IF_DEF_CH7017 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- TVPtr = (struct XGI330_CHTVDataStruct *) XGI_GetTVPtr(
- tempbx, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
- pVBInfo->VGAHT = TVPtr->VGAHT;
- pVBInfo->VGAVT = TVPtr->VGAVT;
- pVBInfo->HT = TVPtr->LCDHT;
- pVBInfo->VT = TVPtr->LCDVT;
- }
- }
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding
@@ -2681,9 +2484,6 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short tempbx, i;
struct XGI_LVDSCRT1HDataStruct *LCDPtr = NULL;
struct XGI_LVDSCRT1VDataStruct *LCDPtr1 = NULL;
- /* struct XGI330_CHTVDataStruct *TVPtr = NULL; */
- struct XGI_CH7007TV_TimingHStruct *CH7007TV_TimingHPtr = NULL;
- struct XGI_CH7007TV_TimingVStruct *CH7007TV_TimingVPtr = NULL;
if (ModeNo <= 0x13)
index = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
@@ -2692,113 +2492,36 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
index = index & IndexMask;
- if ((pVBInfo->IF_DEF_ScaleLCD == 0) ||
- ((pVBInfo->IF_DEF_ScaleLCD == 1) &&
- (!(pVBInfo->LCDInfo & EnableScalingLCD)))) {
- tempbx = 0;
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
- XGI_GetLcdPtr(tempbx, ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
-
- for (i = 0; i < 8; i++)
- pVBInfo->TimingH[0].data[i] = LCDPtr[0].Reg[i];
- }
-
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- CH7007TV_TimingHPtr =
- (struct XGI_CH7007TV_TimingHStruct *)
- XGI_GetTVPtr(
- tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
-
- for (i = 0; i < 8; i++)
- pVBInfo->TimingH[0].data[i] =
- CH7007TV_TimingHPtr[0].data[i];
- }
- }
-
- /* if (pVBInfo->IF_DEF_CH7017 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- TVPtr = (struct XGI330_CHTVDataStruct *)
- XGI_GetTVPtr(
- tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
- }
- */
-
- XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
-
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- xgifb_reg_set(pVBInfo->P3c4, 0x2E,
- CH7007TV_TimingHPtr[0].data[8]);
- xgifb_reg_set(pVBInfo->P3c4, 0x2F,
- CH7007TV_TimingHPtr[0].data[9]);
- }
-
- tempbx = 1;
+ tempbx = 0;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
- XGI_GetLcdPtr(
- tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
- for (i = 0; i < 7; i++)
- pVBInfo->TimingV[0].data[i] = LCDPtr1[0].Reg[i];
- }
-
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- CH7007TV_TimingVPtr =
- (struct XGI_CH7007TV_TimingVStruct *)
- XGI_GetTVPtr(
- tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
+ XGI_GetLcdPtr(tempbx, ModeNo,
+ ModeIdIndex,
+ RefreshRateTableIndex,
+ pVBInfo);
- for (i = 0; i < 7; i++)
- pVBInfo->TimingV[0].data[i] =
- CH7007TV_TimingVPtr[0].data[i];
- }
- }
- /* if (pVBInfo->IF_DEF_CH7017 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- TVPtr = (struct XGI330_CHTVDataStruct *)
- XGI_GetTVPtr(tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
- }
- */
+ for (i = 0; i < 8; i++)
+ pVBInfo->TimingH[0].data[i] = LCDPtr[0].Reg[i];
+ }
- XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
+ XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- xgifb_reg_and_or(pVBInfo->P3c4, 0x33, ~0x01,
- CH7007TV_TimingVPtr[0].data[7] & 0x01);
- xgifb_reg_set(pVBInfo->P3c4, 0x34,
- CH7007TV_TimingVPtr[0].data[8]);
- xgifb_reg_set(pVBInfo->P3c4, 0x3F,
- CH7007TV_TimingVPtr[0].data[9]);
+ tempbx = 1;
- }
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
+ XGI_GetLcdPtr(
+ tempbx,
+ ModeNo,
+ ModeIdIndex,
+ RefreshRateTableIndex,
+ pVBInfo);
+ for (i = 0; i < 7; i++)
+ pVBInfo->TimingV[0].data[i] = LCDPtr1[0].Reg[i];
}
+
+ XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
}
static unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo)
@@ -2887,287 +2610,263 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
else
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
- if (!(pVBInfo->SetFlag & Win9xDOSMode)) {
- if ((pVBInfo->IF_DEF_CH7017 == 0) || (pVBInfo->VBInfo
- & (SetCRT2ToLCD | SetCRT2ToLCDA))) {
- if (pVBInfo->IF_DEF_OEMUtil == 1) {
- tempbx = 8;
- LCDPtr = (struct XGI330_LCDDataDesStruct *)
- XGI_GetLcdPtr(tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
- }
-
- if ((pVBInfo->IF_DEF_OEMUtil == 0) ||
- (LCDPtr == NULL)) {
- tempbx = 3;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- LCDPtr1 =
- (struct XGI330_LCDDataDesStruct2 *)
- XGI_GetLcdPtr(
- tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
- else
- LCDPtr =
- (struct XGI330_LCDDataDesStruct *)
- XGI_GetLcdPtr(
- tempbx,
- ModeNo,
- ModeIdIndex,
- RefreshRateTableIndex,
- pVBInfo);
- }
+ tempbx = 3;
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ LCDPtr1 =
+ (struct XGI330_LCDDataDesStruct2 *)
+ XGI_GetLcdPtr(
+ tempbx,
+ ModeNo,
+ ModeIdIndex,
+ RefreshRateTableIndex,
+ pVBInfo);
+ else
+ LCDPtr =
+ (struct XGI330_LCDDataDesStruct *)
+ XGI_GetLcdPtr(
+ tempbx,
+ ModeNo,
+ ModeIdIndex,
+ RefreshRateTableIndex,
+ pVBInfo);
- XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
- push1 = tempbx;
- push2 = tempax;
+ XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
+ push1 = tempbx;
+ push2 = tempax;
+
+ /* GetLCDResInfo */
+ if ((pVBInfo->LCDResInfo == Panel1024x768) ||
+ (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ tempax = 1024;
+ tempbx = 768;
+ } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ tempax = 1280;
+ tempbx = 1024;
+ } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ tempax = 1400;
+ tempbx = 1050;
+ } else {
+ tempax = 1600;
+ tempbx = 1200;
+ }
- /* GetLCDResInfo */
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
- tempax = 1024;
- tempbx = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
- tempax = 1280;
- tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
- tempax = 1400;
- tempbx = 1050;
- } else {
- tempax = 1600;
- tempbx = 1200;
- }
+ if (pVBInfo->LCDInfo & SetLCDtoNonExpanding) {
+ pVBInfo->HDE = tempax;
+ pVBInfo->VDE = tempbx;
+ pVBInfo->VGAHDE = tempax;
+ pVBInfo->VGAVDE = tempbx;
+ }
- if (pVBInfo->LCDInfo & SetLCDtoNonExpanding) {
- pVBInfo->HDE = tempax;
- pVBInfo->VDE = tempbx;
- pVBInfo->VGAHDE = tempax;
- pVBInfo->VGAVDE = tempbx;
- }
+ tempax = pVBInfo->HT;
- if ((pVBInfo->IF_DEF_ScaleLCD == 1) &&
- (pVBInfo->LCDInfo & EnableScalingLCD)) {
- tempax = pVBInfo->HDE;
- tempbx = pVBInfo->VDE;
- }
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ tempbx = LCDPtr1->LCDHDES;
+ else
+ tempbx = LCDPtr->LCDHDES;
- tempax = pVBInfo->HT;
+ tempcx = pVBInfo->HDE;
+ tempbx = tempbx & 0x0fff;
+ tempcx += tempbx;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDHDES;
- else
- tempbx = LCDPtr->LCDHDES;
+ if (tempcx >= tempax)
+ tempcx -= tempax;
- tempcx = pVBInfo->HDE;
- tempbx = tempbx & 0x0fff;
- tempcx += tempbx;
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1A, tempbx & 0x07);
- if (tempcx >= tempax)
- tempcx -= tempax;
+ tempcx = tempcx >> 3;
+ tempbx = tempbx >> 3;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1A, tempbx & 0x07);
+ xgifb_reg_set(pVBInfo->Part1Port, 0x16,
+ (unsigned short) (tempbx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x17,
+ (unsigned short) (tempcx & 0xff));
- tempcx = tempcx >> 3;
- tempbx = tempbx >> 3;
+ tempax = pVBInfo->HT;
- xgifb_reg_set(pVBInfo->Part1Port, 0x16,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x17,
- (unsigned short) (tempcx & 0xff));
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ tempbx = LCDPtr1->LCDHRS;
+ else
+ tempbx = LCDPtr->LCDHRS;
- tempax = pVBInfo->HT;
+ tempcx = push2;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDHRS;
- else
- tempbx = LCDPtr->LCDHRS;
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ tempcx = LCDPtr1->LCDHSync;
- tempcx = push2;
+ tempcx += tempbx;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempcx = LCDPtr1->LCDHSync;
+ if (tempcx >= tempax)
+ tempcx -= tempax;
- tempcx += tempbx;
+ tempax = tempbx & 0x07;
+ tempax = tempax >> 5;
+ tempcx = tempcx >> 3;
+ tempbx = tempbx >> 3;
- if (tempcx >= tempax)
- tempcx -= tempax;
+ tempcx &= 0x1f;
+ tempax |= tempcx;
- tempax = tempbx & 0x07;
- tempax = tempax >> 5;
- tempcx = tempcx >> 3;
- tempbx = tempbx >> 3;
+ xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
+ xgifb_reg_set(pVBInfo->Part1Port, 0x14,
+ (unsigned short) (tempbx & 0xff));
- tempcx &= 0x1f;
- tempax |= tempcx;
+ tempax = pVBInfo->VT;
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ tempbx = LCDPtr1->LCDVDES;
+ else
+ tempbx = LCDPtr->LCDVDES;
+ tempcx = pVBInfo->VDE;
- xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
- xgifb_reg_set(pVBInfo->Part1Port, 0x14,
- (unsigned short) (tempbx & 0xff));
+ tempbx = tempbx & 0x0fff;
+ tempcx += tempbx;
+ if (tempcx >= tempax)
+ tempcx -= tempax;
- tempax = pVBInfo->VT;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDVDES;
- else
- tempbx = LCDPtr->LCDVDES;
- tempcx = pVBInfo->VDE;
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
+ (unsigned short) (tempbx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
+ (unsigned short) (tempcx & 0xff));
- tempbx = tempbx & 0x0fff;
- tempcx += tempbx;
- if (tempcx >= tempax)
- tempcx -= tempax;
+ tempbx = (tempbx >> 8) & 0x07;
+ tempcx = (tempcx >> 8) & 0x07;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
- (unsigned short) (tempcx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
+ (unsigned short) ((tempcx << 3)
+ | tempbx));
- tempbx = (tempbx >> 8) & 0x07;
- tempcx = (tempcx >> 8) & 0x07;
+ tempax = pVBInfo->VT;
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ tempbx = LCDPtr1->LCDVRS;
+ else
+ tempbx = LCDPtr->LCDVRS;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
- (unsigned short) ((tempcx << 3)
- | tempbx));
+ /* tempbx = tempbx >> 4; */
+ tempcx = push1;
- tempax = pVBInfo->VT;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDVRS;
- else
- tempbx = LCDPtr->LCDVRS;
+ if (pVBInfo->LCDInfo & EnableScalingLCD)
+ tempcx = LCDPtr1->LCDVSync;
- /* tempbx = tempbx >> 4; */
- tempcx = push1;
+ tempcx += tempbx;
+ if (tempcx >= tempax)
+ tempcx -= tempax;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempcx = LCDPtr1->LCDVSync;
+ xgifb_reg_set(pVBInfo->Part1Port, 0x18,
+ (unsigned short) (tempbx & 0xff));
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
+ (unsigned short) (tempcx & 0x0f));
- tempcx += tempbx;
- if (tempcx >= tempax)
- tempcx -= tempax;
+ tempax = ((tempbx >> 8) & 0x07) << 3;
- xgifb_reg_set(pVBInfo->Part1Port, 0x18,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
- (unsigned short) (tempcx & 0x0f));
+ tempbx = pVBInfo->VGAVDE;
+ if (tempbx != pVBInfo->VDE)
+ tempax |= 0x40;
- tempax = ((tempbx >> 8) & 0x07) << 3;
+ if (pVBInfo->LCDInfo & EnableLVDSDDA)
+ tempax |= 0x40;
- tempbx = pVBInfo->VGAVDE;
- if (tempbx != pVBInfo->VDE)
- tempax |= 0x40;
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
+ tempax);
- if (pVBInfo->LCDInfo & EnableLVDSDDA)
- tempax |= 0x40;
+ tempcx = pVBInfo->VGAVT;
+ tempbx = pVBInfo->VDE;
+ tempax = pVBInfo->VGAVDE;
+ tempcx -= tempax;
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
- tempax);
+ temp = tempax; /* 0430 ylshieh */
+ temp1 = (temp << 18) / tempbx;
- tempcx = pVBInfo->VGAVT;
- tempbx = pVBInfo->VDE;
- tempax = pVBInfo->VGAVDE;
- tempcx -= tempax;
+ tempdx = (unsigned short) ((temp << 18) % tempbx);
- temp = tempax; /* 0430 ylshieh */
- temp1 = (temp << 18) / tempbx;
+ if (tempdx != 0)
+ temp1 += 1;
- tempdx = (unsigned short) ((temp << 18) % tempbx);
+ temp2 = temp1;
+ push3 = temp2;
- if (tempdx != 0)
- temp1 += 1;
+ xgifb_reg_set(pVBInfo->Part1Port, 0x37,
+ (unsigned short) (temp2 & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x36,
+ (unsigned short) ((temp2 >> 8) & 0xff));
- temp2 = temp1;
- push3 = temp2;
+ tempbx = (unsigned short) (temp2 >> 16);
+ tempax = tempbx & 0x03;
- xgifb_reg_set(pVBInfo->Part1Port, 0x37,
- (unsigned short) (temp2 & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x36,
- (unsigned short) ((temp2 >> 8) & 0xff));
+ tempbx = pVBInfo->VGAVDE;
+ if (tempbx == pVBInfo->VDE)
+ tempax |= 0x04;
- tempbx = (unsigned short) (temp2 >> 16);
- tempax = tempbx & 0x03;
+ xgifb_reg_set(pVBInfo->Part1Port, 0x35, tempax);
- tempbx = pVBInfo->VGAVDE;
- if (tempbx == pVBInfo->VDE)
- tempax |= 0x04;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x35, tempax);
-
- if (pVBInfo->VBType & VB_XGI301C) {
- temp2 = push3;
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x3c,
- (unsigned short) (temp2 & 0xff));
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x3b,
- (unsigned short) ((temp2 >> 8) &
- 0xff));
- tempbx = (unsigned short) (temp2 >> 16);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a,
- ~0xc0,
- (unsigned short) ((tempbx &
- 0xff) << 6));
-
- tempcx = pVBInfo->VGAVDE;
- if (tempcx == pVBInfo->VDE)
- xgifb_reg_and_or(pVBInfo->Part4Port,
- 0x30, ~0x0c, 0x00);
- else
- xgifb_reg_and_or(pVBInfo->Part4Port,
- 0x30, ~0x0c, 0x08);
- }
+ if (pVBInfo->VBType & VB_XGI301C) {
+ temp2 = push3;
+ xgifb_reg_set(pVBInfo->Part4Port,
+ 0x3c,
+ (unsigned short) (temp2 & 0xff));
+ xgifb_reg_set(pVBInfo->Part4Port,
+ 0x3b,
+ (unsigned short) ((temp2 >> 8) &
+ 0xff));
+ tempbx = (unsigned short) (temp2 >> 16);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a,
+ ~0xc0,
+ (unsigned short) ((tempbx &
+ 0xff) << 6));
+
+ tempcx = pVBInfo->VGAVDE;
+ if (tempcx == pVBInfo->VDE)
+ xgifb_reg_and_or(pVBInfo->Part4Port,
+ 0x30, ~0x0c, 0x00);
+ else
+ xgifb_reg_and_or(pVBInfo->Part4Port,
+ 0x30, ~0x0c, 0x08);
+ }
- tempcx = pVBInfo->VGAHDE;
- tempbx = pVBInfo->HDE;
+ tempcx = pVBInfo->VGAHDE;
+ tempbx = pVBInfo->HDE;
- temp1 = tempcx << 16;
+ temp1 = tempcx << 16;
- tempax = (unsigned short) (temp1 / tempbx);
+ tempax = (unsigned short) (temp1 / tempbx);
- if ((tempbx & 0xffff) == (tempcx & 0xffff))
- tempax = 65535;
+ if ((tempbx & 0xffff) == (tempcx & 0xffff))
+ tempax = 65535;
- temp3 = tempax;
- temp1 = pVBInfo->VGAHDE << 16;
+ temp3 = tempax;
+ temp1 = pVBInfo->VGAHDE << 16;
- temp1 /= temp3;
- temp3 = temp3 << 16;
- temp1 -= 1;
+ temp1 /= temp3;
+ temp3 = temp3 << 16;
+ temp1 -= 1;
- temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
+ temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
- tempax = (unsigned short) (temp3 & 0xff);
- xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
+ tempax = (unsigned short) (temp3 & 0xff);
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
- temp1 = pVBInfo->VGAVDE << 18;
- temp1 = temp1 / push3;
- tempbx = (unsigned short) (temp1 & 0xffff);
+ temp1 = pVBInfo->VGAVDE << 18;
+ temp1 = temp1 / push3;
+ tempbx = (unsigned short) (temp1 & 0xffff);
- if (pVBInfo->LCDResInfo == Panel1024x768)
- tempbx -= 1;
+ if (pVBInfo->LCDResInfo == Panel1024x768)
+ tempbx -= 1;
- tempax = ((tempbx >> 8) & 0xff) << 3;
- tempax |= (unsigned short) ((temp3 >> 8) & 0x07);
- xgifb_reg_set(pVBInfo->Part1Port, 0x20,
- (unsigned short) (tempax & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x21,
- (unsigned short) (tempbx & 0xff));
+ tempax = ((tempbx >> 8) & 0xff) << 3;
+ tempax |= (unsigned short) ((temp3 >> 8) & 0x07);
+ xgifb_reg_set(pVBInfo->Part1Port, 0x20,
+ (unsigned short) (tempax & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x21,
+ (unsigned short) (tempbx & 0xff));
- temp3 = temp3 >> 16;
+ temp3 = temp3 >> 16;
- if (modeflag & HalfDCLK)
- temp3 = temp3 >> 1;
+ if (modeflag & HalfDCLK)
+ temp3 = temp3 >> 1;
- xgifb_reg_set(pVBInfo->Part1Port, 0x22,
- (unsigned short) ((temp3 >> 8) & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x23,
- (unsigned short) (temp3 & 0xff));
- }
- }
+ xgifb_reg_set(pVBInfo->Part1Port, 0x22,
+ (unsigned short) ((temp3 >> 8) & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x23,
+ (unsigned short) (temp3 & 0xff));
}
/* --------------------------------------------------------------------- */
@@ -3182,11 +2881,6 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
unsigned short index;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- if (pVBInfo->IF_DEF_ScaleLCD == 1) {
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- return;
- }
-
/* index = XGI_GetLCDCapPtr(pVBInfo); */
index = XGI_GetLCDCapPtr1(pVBInfo);
@@ -3207,9 +2901,7 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
{
unsigned short index, modeflag;
- unsigned short tempbx;
unsigned char tempal;
- unsigned char *CHTVVCLKPtr = NULL;
if (ModeNo <= 0x13)
/* si+St_ResInfo */
@@ -3267,95 +2959,8 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV)
return tempal;
}
- /* else if ((pVBInfo->IF_DEF_CH7017==1) &&
- (pVBInfo->VBType&VB_CH7017)) {
- if (ModeNo<=0x13)
- *tempal = pVBInfo->SModeIDTable[ModeIdIndex].
- St_CRT2CRTC;
- else
- *tempal = pVBInfo->RefIndex[
- RefreshRateTableIndex].Ext_CRT2CRTC;
- *tempal = *tempal & 0x1F;
- tempbx = 0;
- if (pVBInfo->TVInfo & SetPALTV)
- tempbx = tempbx + 2;
- if (pVBInfo->TVInfo & SetCHTVOverScan)
- tempbx++;
- tempbx = tempbx << 1;
- } */
} /* {End of VB} */
- if ((pVBInfo->IF_DEF_CH7007 == 1) &&
- (pVBInfo->VBType & VB_CH7007)) { /* [Billy] 07/05/08 CH7007 */
- /* VideoDebugPrint((
- 0,
- "XGI_GetVCLKPtr: pVBInfo->IF_DEF_CH7007==1\n")); */
- if ((pVBInfo->VBInfo & SetCRT2ToTV)) {
- if (ModeNo <= 0x13) {
- tempal = pVBInfo->SModeIDTable[ModeIdIndex].
- St_CRT2CRTC;
- } else {
- tempal = pVBInfo->RefIndex[
- RefreshRateTableIndex].Ext_CRT2CRTC;
- }
-
- tempal = tempal & 0x0F;
- tempbx = 0;
-
- if (pVBInfo->TVInfo & SetPALTV)
- tempbx = tempbx + 2;
-
- if (pVBInfo->TVInfo & SetCHTVOverScan)
- tempbx++;
-
- /** tempbx = tempbx << 1; CH7007 ? **/
-
- /* [Billy]07/05/29 CH7007 */
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- switch (tempbx) {
- case 0:
- CHTVVCLKPtr = XGI7007_CHTVVCLKUNTSC;
- break;
- case 1:
- CHTVVCLKPtr = XGI7007_CHTVVCLKONTSC;
- break;
- case 2:
- CHTVVCLKPtr = XGI7007_CHTVVCLKUPAL;
- break;
- case 3:
- CHTVVCLKPtr = XGI7007_CHTVVCLKOPAL;
- break;
- default:
- break;
-
- }
- }
- /* else {
- switch(tempbx) {
- case 0:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKUNTSC;
- break;
- case 1:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKONTSC;
- break;
- case 2:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKUPAL;
- break;
- case 3:
- CHTVVCLKPtr = pVBInfo->CHTVVCLKOPAL;
- break;
- default:
- break;
- }
- }
- */
-
- tempal = CHTVVCLKPtr[tempal];
- return tempal;
- }
-
- }
-
tempal = (unsigned char) inb((pVBInfo->P3ca + 0x02));
tempal = tempal >> 2;
tempal &= 0x03;
@@ -3374,13 +2979,7 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
unsigned char *di_1, struct vb_device_info *pVBInfo)
{
- if (pVBInfo->IF_DEF_CH7007 == 1) { /* [Billy] 2007/05/16 */
- /* VideoDebugPrint((
- 0,
- "XGI_GetVCLKLen: pVBInfo->IF_DEF_CH7007==1\n")); */
- *di_0 = (unsigned char) XGI_CH7007VCLKData[tempal].SR2B;
- *di_1 = (unsigned char) XGI_CH7007VCLKData[tempal].SR2C;
- } else if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B
+ if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && (pVBInfo->SetFlag
& ProgrammingCRT2)) {
@@ -3408,10 +3007,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
(unsigned short) (0x10 * i));
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- xgifb_reg_set(pVBInfo->P3c4, 0x2b, di_0);
- xgifb_reg_set(pVBInfo->P3c4, 0x2c, di_1);
- } else if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA))
&& (!(pVBInfo->VBInfo & SetInSlaveMode))) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
@@ -3526,10 +3122,6 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
unsigned short flag, tempbx, tempah;
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- pVBInfo->VBType = VB_CH7007;
- return;
- }
if (pVBInfo->IF_DEF_LVDS == 0) {
tempbx = VB_XGI302B;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
@@ -3566,13 +3158,6 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
pVBInfo->VBType = tempbx;
}
- /*
- else if (pVBInfo->IF_DEF_CH7017 == 1)
- pVBInfo->VBType = VB_CH7017;
- else //LVDS
- pVBInfo->VBType = VB_LVDS_NS;
- */
-
}
void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
@@ -3630,17 +3215,6 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
SetCRT2ToLCDA;
}
}
- } else if (pVBInfo->IF_DEF_CH7017 == 1) {
- if (pVBInfo->VBType & VB_CH7017) {
- if (temp & EnableDualEdge) {
- tempbx |=
- SetCRT2ToDualEdge;
-
- if (temp & SetToLCDA)
- tempbx |=
- SetCRT2ToLCDA;
- }
- }
}
}
}
@@ -3650,11 +3224,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (((pVBInfo->IF_DEF_LVDS == 0) &&
((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV) ||
- (pVBInfo->VBType & VB_XGI301C))) ||
- ((pVBInfo->IF_DEF_CH7017 == 1) &&
- (pVBInfo->VBType & VB_CH7017)) ||
- ((pVBInfo->IF_DEF_CH7007 == 1) &&
- (pVBInfo->VBType & VB_CH7007))) {
+ (pVBInfo->VBType & VB_XGI301C)))) {
if (temp & SetYPbPr) { /* temp = CR38 */
if (pVBInfo->IF_DEF_HiVision == 1) {
/* shampoo add for new
@@ -3693,15 +3263,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = 0x017C;
}
} else { /* 3nd party chip */
- if (pVBInfo->IF_DEF_CH7017 == 1)
- temp = (SetCRT2ToTV |
- SetCRT2ToLCD |
- SetCRT2ToLCDA);
- /* [Billy] 07/05/03 */
- else if (pVBInfo->IF_DEF_CH7007 == 1)
- temp = SetCRT2ToTV;
- else
- temp = SetCRT2ToLCD;
+ temp = SetCRT2ToLCD;
}
if (!(tempbx & temp)) {
@@ -3789,34 +3351,6 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx |= (SetInSlaveMode |
SetSimuScanMode);
}
-
- if (pVBInfo->IF_DEF_VideoCapture == 1) {
- if (((HwDeviceExtension->jChipType ==
- XG40) &&
- (pVBInfo->Set_VGAType == XG40)) ||
- ((HwDeviceExtension->jChipType ==
- XG41) &&
- (pVBInfo->Set_VGAType == XG41)) ||
- ((HwDeviceExtension->jChipType ==
- XG42) &&
- (pVBInfo->Set_VGAType == XG42)) ||
- ((HwDeviceExtension->jChipType ==
- XG45) &&
- (pVBInfo->Set_VGAType == XG45))) {
- if (ModeNo <= 13) {
- if (!(tempbx &
- SetCRT2ToRAMDAC)) {
- /*CRT2 not need
- * to support*/
- tempbx &=
- (0x00FF |
- (~SetInSlaveMode));
- pVBInfo->SetFlag
- |= EnableVCMode;
- }
- }
- }
- }
}
/* LCD+TV can't support in slave mode
@@ -3883,20 +3417,6 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
*/
}
- if (pVBInfo->IF_DEF_CH7017 == 1) {
- tempbx = xgifb_reg_get(pVBInfo->P3d4, 0x35);
-
- if (tempbx & TVOverScan)
- tempbx |= SetCHTVOverScan;
- }
-
- if (pVBInfo->IF_DEF_CH7007 == 1) { /* [Billy] 07/05/04 */
- tempbx = xgifb_reg_get(pVBInfo->P3d4, 0x35);
-
- if (tempbx & TVOverScan)
- tempbx |= SetCHTVOverScan;
- }
-
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempbx |= SetPALTV;
@@ -4003,9 +3523,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
/* End of LCD75 */
- if (pVBInfo->IF_DEF_OEMUtil == 1)
- pVBInfo->LCDTypeInfo = (temp & 0xf0) >> 4;
-
if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
return 0;
@@ -4015,9 +3532,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
temp &= (ScalingLCD | LCDNonExpanding | LCDSyncBit | SetPWDEnable);
- if ((pVBInfo->IF_DEF_ScaleLCD == 1) && (temp & LCDNonExpanding))
- temp &= ~EnableScalingLCD;
-
tempbx |= temp;
LCDIdIndex = XGI_GetLCDCapPtr1(pVBInfo);
@@ -4031,11 +3545,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- if (pVBInfo->IF_DEF_CH7017 == 1) {
- if (tempax & LCDDualLink)
- tempbx |= SetLCDDualLink;
- }
-
if (pVBInfo->IF_DEF_LVDS == 0) {
if ((pVBInfo->LCDResInfo == Panel1400x1050) && (pVBInfo->VBInfo
& SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo
@@ -4077,16 +3586,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->LCDInfo = tempbx;
- if (pVBInfo->IF_DEF_PWD == 1) {
- if (pVBInfo->LCDInfo & SetPWDEnable) {
- if ((pVBInfo->VBType & VB_XGI302LV) ||
- (pVBInfo->VBType & VB_XGI301C)) {
- if (!(tempax & PWDEnable))
- pVBInfo->LCDInfo &= ~SetPWDEnable;
- }
- }
- }
-
if (pVBInfo->IF_DEF_LVDS == 0) {
if (tempax & (LockLCDBToA | StLCDBToA)) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
@@ -4169,102 +3668,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
return 1;
}
-/* win2000 MM adapter not support standard mode! */
-
-#if 0
-static unsigned char XGINew_CheckMemorySize(
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short memorysize, modeflag, temp, temp1, tmp;
-
- /*
- if ((HwDeviceExtension->jChipType == XGI_650) ||
- (HwDeviceExtension->jChipType == XGI_650M)) {
- return 1;
- }
- */
-
- if (ModeNo <= 0x13)
- modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
- else
- modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- /* ModeType = modeflag&ModeInfoFlag; // Get mode type */
-
- memorysize = modeflag & MemoryInfoFlag;
- memorysize = memorysize > MemorySizeShift;
- memorysize++; /* Get memory size */
-
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x14); /* Get DRAM Size */
- tmp = temp;
-
- if (HwDeviceExtension->jChipType == XG40) {
- /* memory size per channel SR14[7:4] */
- temp = 1 << ((temp & 0x0F0) >> 4);
- if ((tmp & 0x0c) == 0x0C) { /* Qual channels */
- temp <<= 2;
- } else if ((tmp & 0x0c) == 0x08) { /* Dual channels */
- temp <<= 1;
- }
- } else if (HwDeviceExtension->jChipType == XG42) {
- /* memory size per channel SR14[7:4] */
- temp = 1 << ((temp & 0x0F0) >> 4);
- if ((tmp & 0x04) == 0x04) { /* Dual channels */
- temp <<= 1;
- }
- } else if (HwDeviceExtension->jChipType == XG45) {
- /* memory size per channel SR14[7:4] */
- temp = 1 << ((temp & 0x0F0) >> 4);
- if ((tmp & 0x0c) == 0x0C) { /* Qual channels */
- temp <<= 2;
- } else if ((tmp & 0x0c) == 0x08) { /* triple channels */
- temp1 = temp;
- temp <<= 1;
- temp += temp1;
- } else if ((tmp & 0x0c) == 0x04) { /* Dual channels */
- temp <<= 1;
- }
- }
- if (temp < memorysize)
- return 0;
- else
- return 1;
-}
-#endif
-
-/*
-void XGINew_IsLowResolution(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned char XGINew_CheckMemorySize(
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short data ;
- unsigned short ModeFlag ;
-
- data = xgifb_reg_get(pVBInfo->P3c4, 0x0F);
- data &= 0x7F;
- xgifb_reg_set(pVBInfo->P3c4, 0x0F, data);
-
- if (ModeNo > 0x13) {
- ModeFlag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- if ((ModeFlag & HalfDCLK) && (ModeFlag & DoubleScanMode)) {
- data = xgifb_reg_get(pVBInfo->P3c4, 0x0F);
- data |= 0x80;
- xgifb_reg_set(pVBInfo->P3c4, 0x0F, data);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x01);
- data &= 0xF7;
- xgifb_reg_set(pVBInfo->P3c4, 0x01, data);
- }
- }
-}
-*/
-
static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
{
unsigned char ujRet = 0;
@@ -4348,10 +3751,6 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
}
- /* [Billy] 07/05/23 For CH7007 */
- if (pVBInfo->IF_DEF_CH7007 == 1) {
- }
-
if (pXGIHWDE->jChipType == XG27) {
if (pVBInfo->IF_DEF_LVDS == 1) {
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x1)) {
@@ -4388,12 +3787,6 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
}
}
- if (pVBInfo->IF_DEF_CH7007 == 1) { /* [Billy] 07/05/23 For CH7007 */
- /* if (IsCH7007TVMode(pVBInfo) == 0) */
- {
- }
- }
-
if (pXGIHWDE->jChipType == XG27) {
if ((XGI_XG27GetPSCValue(pVBInfo) & 0x2)) {
/* LVDS backlight off */
@@ -4418,20 +3811,9 @@ static void XGI_WaitDisply(struct vb_device_info *pVBInfo)
break;
}
-#if 0
-static void XGI_WaitDisplay(struct vb_device_info *pVBInfo)
-{
- while (!(inb(pVBInfo->P3da) & 0x01))
- ;
- while (inb(pVBInfo->P3da) & 0x01)
- ;
-}
-#endif
-
static void XGI_AutoThreshold(struct vb_device_info *pVBInfo)
{
- if (!(pVBInfo->SetFlag & Win9xDOSMode))
- xgifb_reg_or(pVBInfo->Part1Port, 0x01, 0x40);
+ xgifb_reg_or(pVBInfo->Part1Port, 0x01, 0x40);
}
static void XGI_SaveCRT2Info(unsigned short ModeNo,
@@ -5026,11 +4408,6 @@ static void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = (pVBInfo->VGAVT - 1);
temp = tempcx & 0x00FF;
- if (pVBInfo->IF_DEF_CH7005 == 1) {
- if (pVBInfo->VBInfo & 0x0C)
- temp--;
- }
-
xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
tempbx = pVBInfo->VGAVDE - 1;
temp = tempbx & 0x00FF;
@@ -6176,27 +5553,24 @@ static struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
tempbx = pVBInfo->VDE;
}
- if (tempax < tempbx)
- return &EnlargeTap4Timing[0];
- else if (tempax == tempbx)
- return &NoScaleTap4Timing[0]; /* 1:1 */
+ if (tempax <= tempbx)
+ return &xgifb_tap4_timing[0];
else
- Tap4TimingPtr = NTSCTap4Timing; /* NTSC */
+ Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */
if (pVBInfo->TVInfo & SetPALTV)
Tap4TimingPtr = PALTap4Timing;
if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
- Tap4TimingPtr = YPbPr525iTap4Timing;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
- Tap4TimingPtr = YPbPr525pTap4Timing;
+ if ((pVBInfo->TVInfo & SetYPbPrMode525i) ||
+ (pVBInfo->TVInfo & SetYPbPrMode525p))
+ Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
if (pVBInfo->TVInfo & SetYPbPrMode750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
}
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
- Tap4TimingPtr = HiTVTap4Timing;
+ Tap4TimingPtr = xgifb_tap4_timing;
i = 0;
while (Tap4TimingPtr[i].DE != 0xFFFF) {
@@ -6216,10 +5590,6 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
if (!(pVBInfo->VBType & VB_XGI301C))
return;
-#ifndef Tap4
- xgifb_reg_and(pVBInfo->Part2Port, 0x4E, 0xEB); /* Disable Tap4 */
-#else /* Tap4 Setting */
-
Tap4TimingPtr = XGI_GetTap4Ptr(0, pVBInfo); /* Set Horizontal Scaling */
for (i = 0x80, j = 0; i <= 0xBF; i++, j++)
xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
@@ -6241,7 +5611,6 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
else
/* Enable H.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x10);
-#endif
}
static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
@@ -6742,204 +6111,10 @@ void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
}
-static void XGI_SetXG21LVDSPara(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char temp, Miscdata;
- unsigned short xres, yres, modeflag, resindex, lvdstableindex;
- unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
- unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
- unsigned short value;
-
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
-
- temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability &
- (LCDPolarity << 8)) >> 8);
- temp &= LCDPolarity;
- Miscdata = (unsigned char) inb(pVBInfo->P3cc);
-
- outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
-
- temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability & LCDPolarity);
- /* SR35[7] FP VSync polarity */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
- /* SR30[5] FP HSync polarity */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x30, ~0x20, (temp & 0x40) >> 1);
-
- XGI_SetXG21FPBits(pVBInfo);
- resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
- if (ModeNo <= 0x13) {
- xres = pVBInfo->StResInfo[resindex].HTotal;
- yres = pVBInfo->StResInfo[resindex].VTotal;
- /* si+St_ResInfo */
- modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
- } else {
- xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
- yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
- /* si+St_ModeFlag */
- modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- }
-
- if (!(modeflag & Charx8Dot))
- xres = xres * 8 / 9;
-
- LVDSHT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHT;
-
- LVDSHBS = xres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE
- - xres) / 2;
- if ((ModeNo <= 0x13) && (modeflag & HalfDCLK))
- LVDSHBS -= xres / 4;
-
- if (LVDSHBS > LVDSHT)
- LVDSHBS -= LVDSHT;
-
- LVDSHRS = LVDSHBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHFP;
- if (LVDSHRS > LVDSHT)
- LVDSHRS -= LVDSHT;
-
- LVDSHRE = LVDSHRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHSYNC;
- if (LVDSHRE > LVDSHT)
- LVDSHRE -= LVDSHT;
-
- LVDSHBE = LVDSHBS + LVDSHT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE;
-
- LVDSVT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVT;
-
- LVDSVBS = yres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE
- - yres) / 2;
- if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
- LVDSVBS += yres / 2;
-
- if (LVDSVBS > LVDSVT)
- LVDSVBS -= LVDSVT;
-
- LVDSVRS = LVDSVBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVFP;
- if (LVDSVRS > LVDSVT)
- LVDSVRS -= LVDSVT;
-
- LVDSVRE = LVDSVRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVSYNC;
- if (LVDSVRE > LVDSVT)
- LVDSVRE -= LVDSVT;
-
- LVDSVBE = LVDSVBS + LVDSVT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE;
-
- temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
- xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
-
- if (!(modeflag & Charx8Dot))
- xgifb_reg_or(pVBInfo->P3c4, 0x1, 0x1);
-
- /* HT SR0B[1:0] CR00 */
- value = (LVDSHT >> 3) - 5;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x03, (value & 0x300) >> 8);
- xgifb_reg_set(pVBInfo->P3d4, 0x0, (value & 0xFF));
-
- /* HBS SR0B[5:4] CR02 */
- value = (LVDSHBS >> 3) - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x30, (value & 0x300) >> 4);
- xgifb_reg_set(pVBInfo->P3d4, 0x2, (value & 0xFF));
-
- /* HBE SR0C[1:0] CR05[7] CR03[4:0] */
- value = (LVDSHBE >> 3) - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x03, (value & 0xC0) >> 6);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x80, (value & 0x20) << 2);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x03, ~0x1F, value & 0x1F);
-
- /* HRS SR0B[7:6] CR04 */
- value = (LVDSHRS >> 3) + 2;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0xC0, (value & 0x300) >> 2);
- xgifb_reg_set(pVBInfo->P3d4, 0x4, (value & 0xFF));
-
- /* Panel HRS SR2F[1:0] SR2E[7:0] */
- value--;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0x03, (value & 0x300) >> 8);
- xgifb_reg_set(pVBInfo->P3c4, 0x2E, (value & 0xFF));
-
- /* HRE SR0C[2] CR05[4:0] */
- value = (LVDSHRE >> 3) + 2;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x04, (value & 0x20) >> 3);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x1F, value & 0x1F);
-
- /* Panel HRE SR2F[7:2] */
- value--;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0xFC, value << 2);
-
- /* VT SR0A[0] CR07[5][0] CR06 */
- value = LVDSVT - 2;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x01, (value & 0x400) >> 10);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x20, (value & 0x200) >> 4);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x01, (value & 0x100) >> 8);
- xgifb_reg_set(pVBInfo->P3d4, 0x06, (value & 0xFF));
-
- /* VBS SR0A[2] CR09[5] CR07[3] CR15 */
- value = LVDSVBS - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x04, (value & 0x400) >> 8);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x09, ~0x20, (value & 0x200) >> 4);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x08, (value & 0x100) >> 5);
- xgifb_reg_set(pVBInfo->P3d4, 0x15, (value & 0xFF));
-
- /* VBE SR0A[4] CR16 */
- value = LVDSVBE - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x10, (value & 0x100) >> 4);
- xgifb_reg_set(pVBInfo->P3d4, 0x16, (value & 0xFF));
-
- /* VRS SR0A[3] CR7[7][2] CR10 */
- value = LVDSVRS - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x08, (value & 0x400) >> 7);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x80, (value & 0x200) >> 2);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x04, (value & 0x100) >> 6);
- xgifb_reg_set(pVBInfo->P3d4, 0x10, (value & 0xFF));
-
- /* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03, (value & 0x600) >> 9);
- xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
-
- /* VRE SR0A[5] CR11[3:0] */
- value = LVDSVRE - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x20, (value & 0x10) << 1);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x11, ~0x0F, value & 0x0F);
-
- /* Panel VRE SR3F[7:2] *//* SR3F[7] has to be 0, h/w bug */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, (value << 2) & 0x7C);
-
- for (temp = 0, value = 0; temp < 3; temp++) {
-
- xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2B,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData1);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2C,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData2);
- value += 0x10;
- }
-
- if (!(modeflag & Charx8Dot)) {
- inb(pVBInfo->P3da); /* reset 3da */
- outb(0x13, pVBInfo->P3c0); /* set index */
- /* set data, panning = 0, shift left 1 dot*/
- outb(0x00, pVBInfo->P3c0);
-
- inb(pVBInfo->P3da); /* Enable Attribute */
- outb(0x20, pVBInfo->P3c0);
-
- inb(pVBInfo->P3da); /* reset 3da */
- }
-
-}
-
-/* no shadow case */
-static void XGI_SetXG27LVDSPara(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void xgifb_set_lvds(int chip_id,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned char temp, Miscdata;
unsigned short xres, yres, modeflag, resindex, lvdstableindex;
@@ -6963,7 +6138,11 @@ static void XGI_SetXG27LVDSPara(unsigned short ModeNo,
/* SR30[5] FP HSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, ~0x20, (temp & 0x40) >> 1);
- XGI_SetXG27FPBits(pVBInfo);
+ if (chip_id == XG27)
+ XGI_SetXG27FPBits(pVBInfo);
+ else
+ XGI_SetXG21FPBits(pVBInfo);
+
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
@@ -7090,9 +6269,18 @@ static void XGI_SetXG27LVDSPara(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x04, (value & 0x100) >> 6);
xgifb_reg_set(pVBInfo->P3d4, 0x10, (value & 0xFF));
- /* Panel VRS SR35[2:0] SR34[7:0] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07, (value & 0x700) >> 8);
- xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
+ if (chip_id == XG27) {
+ /* Panel VRS SR35[2:0] SR34[7:0] */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07,
+ (value & 0x700) >> 8);
+ xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
+ } else {
+ /* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03,
+ (value & 0x600) >> 9);
+ xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
+ }
/* VRE SR0A[5] CR11[3:0] */
value = LVDSVRE - 1;
@@ -7100,7 +6288,13 @@ static void XGI_SetXG27LVDSPara(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->P3d4, 0x11, ~0x0F, value & 0x0F);
/* Panel VRE SR3F[7:2] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, (value << 2) & 0xFC);
+ if (chip_id == XG27)
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
+ (value << 2) & 0xFC);
+ else
+ /* SR3F[7] has to be 0, h/w bug */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
+ (value << 2) & 0x7C);
for (temp = 0, value = 0; temp < 3; temp++) {
@@ -7209,30 +6403,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
{
unsigned short tempah = 0;
- if (pVBInfo->SetFlag == Win9xDOSMode)
- return;
-
- /*
- if (CH7017) {
- if (!(pVBInfo->VBInfo &
- (SetCRT2ToLCD | SetCRT2toLCDA)) ||
- (XGI_DisableChISLCD(pVBInfo))) {
- if (!XGI_IsLCDON(pVBInfo)) {
- if (DISCHARGE) {
- tempbx = XGINew_GetCH7005(0x61);
- // first time we power up
- if (tempbx < 0x01)
- // and disable power sequence
- XGINew_SetCH7005(0x0066);
- else
- // leave VDD on - disable power
- XGINew_SetCH7005(0x5f66);
- }
- }
- }
- }
- */
-
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
tempah = 0x3F;
@@ -7712,28 +6882,16 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
case 0x02:
case 0x05:
case 0x0D:
- filterPtr = PALMYFilter1;
- break;
-
case 0x03:
- filterPtr = PALNYFilter1;
+ filterPtr = xgifb_palmn_yfilter1;
break;
case 0x08:
case 0x0C:
- filterPtr = NTSCYFilter2;
- break;
-
case 0x0A:
- filterPtr = PALMYFilter2;
- break;
-
case 0x0B:
- filterPtr = PALNYFilter2;
- break;
-
case 0x09:
- filterPtr = PALYFilter2;
+ filterPtr = xgifb_yfilter2;
break;
default:
@@ -7782,9 +6940,6 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- if (pVBInfo->SetFlag & Win9xDOSMode)
- return;
-
/* GetPart1IO(); */
XGI_SetDelayComp(pVBInfo);
@@ -8033,13 +7188,6 @@ static void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension,
}
-void XGI_OpenCRTC(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx;
- tempbx = 0;
-}
-
void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -8137,13 +7285,6 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- if (pVBInfo->IF_DEF_CH7005 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (modeflag & HalfDCLK)
- return 0;
- }
- }
-
if (ModeNo < 0x14)
return 0xFFFF;
@@ -8158,11 +7299,6 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
index--;
if (pVBInfo->SetFlag & ProgrammingCRT2) {
- if (pVBInfo->IF_DEF_CH7005 == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- index = 0;
- }
-
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
@@ -8401,16 +7537,6 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
{
unsigned short tempah;
- if (pVBInfo->SetFlag == Win9xDOSMode) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- return;
- } else
- /* LVDS or CH7017 */
- return;
- }
-
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if (!(pVBInfo->SetFlag & DisableChA)) {
@@ -8511,11 +7637,6 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
/* EnablePart4_1F */
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
- if (pVBInfo->SetFlag & Win9xDOSMode) {
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- return;
- }
-
if (!(pVBInfo->SetFlag & DisableChA)) {
XGI_VBLongWait(pVBInfo);
if (!(pVBInfo->SetFlag & GatingCRT)) {
@@ -8629,21 +7750,12 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
RefreshRateTableIndex);
- if (HwDeviceExtension->jChipType == XG27)
- XGI_SetXG27LCD(pVBInfo, RefreshRateTableIndex,
- ModeNo);
- else
- XGI_SetXG21LCD(pVBInfo, RefreshRateTableIndex,
- ModeNo);
+ xgifb_set_lcd(HwDeviceExtension->jChipType,
+ pVBInfo, RefreshRateTableIndex, ModeNo);
- if (pVBInfo->IF_DEF_LVDS == 1) {
- if (HwDeviceExtension->jChipType == XG27)
- XGI_SetXG27LVDSPara(ModeNo,
- ModeIdIndex, pVBInfo);
- else
- XGI_SetXG21LVDSPara(ModeNo,
- ModeIdIndex, pVBInfo);
- }
+ if (pVBInfo->IF_DEF_LVDS == 1)
+ xgifb_set_lvds(HwDeviceExtension->jChipType,
+ ModeNo, ModeIdIndex, pVBInfo);
/* P. ON */
/* xgifb_reg_or(pVBInfo->P3d4, 0x48, 0x20); */
}
@@ -8671,14 +7783,7 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
pVBInfo->IF_DEF_LVDS = 0;
- pVBInfo->IF_DEF_CH7005 = 0;
pVBInfo->IF_DEF_LCDA = 1;
- pVBInfo->IF_DEF_CH7017 = 0;
- pVBInfo->IF_DEF_CH7007 = 0; /* [Billy] 2007/05/14 */
- pVBInfo->IF_DEF_VideoCapture = 0;
- pVBInfo->IF_DEF_ScaleLCD = 0;
- pVBInfo->IF_DEF_OEMUtil = 0;
- pVBInfo->IF_DEF_PWD = 0;
if (HwDeviceExtension->jChipType >= XG20) { /* kuku 2004/06/25 */
pVBInfo->IF_DEF_YPbPr = 0;
@@ -8748,7 +7853,6 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_DisableBridge(HwDeviceExtension, pVBInfo);
- /* XGI_OpenCRTC(HwDeviceExtension, pVBInfo); */
if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
@@ -8808,8 +7912,7 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
}
pVBInfo->SetFlag = 0;
- if (pVBInfo->IF_DEF_CH7007 != 1)
- pVBInfo->VBInfo = DisableCRT2Display;
+ pVBInfo->VBInfo = DisableCRT2Display;
XGI_DisplayOff(HwDeviceExtension, pVBInfo);
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 377d27c0c33..f9ade6f9f7e 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,16 +1,6 @@
#ifndef _VB_STRUCT_
#define _VB_STRUCT_
-#ifdef _INITNEW_
-#define EXTERN
-#else
-#define EXTERN extern
-#endif
-
-struct XGI_PanelDelayTblStruct {
- unsigned char timer[2];
-};
-
struct XGI_LCDDataStruct {
unsigned short RVBHCMAX;
unsigned short RVBHCFACT;
@@ -45,24 +35,6 @@ struct XGI_TVDataStruct {
unsigned char RY4COE;
};
-struct XGI_LVDSDataStruct {
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short LCDHT;
- unsigned short LCDVT;
-};
-
-struct XGI_LVDSDesStruct {
- unsigned short LCDHDES;
- unsigned short LCDVDES;
-};
-
-struct XGI_LVDSCRT1DataStruct {
- unsigned char CR[15];
-};
-
-/*add for LCDA*/
-
struct XGI_StStruct {
unsigned char St_ModeID;
unsigned short St_ModeFlag;
@@ -146,10 +118,6 @@ struct XGI_ModeResInfoStruct {
unsigned char YChar;
};
-struct XGI_LCDNBDesStruct {
- unsigned char NB[12];
-};
-
/*add for new UNIVGABIOS*/
struct XGI_LCDDesStruct {
unsigned short LCDHDES;
@@ -165,12 +133,6 @@ struct XGI_LCDDataTablStruct {
unsigned short DATAPTR;
};
-struct XGI_TVTablDataStruct {
- unsigned short MASK;
- unsigned short CAP;
- unsigned short DATAPTR;
-};
-
struct XGI330_LCDDataDesStruct {
unsigned short LCDHDES;
unsigned short LCDHRS;
@@ -246,22 +208,10 @@ struct XGI_TimingVStruct {
unsigned char data[7];
};
-struct XGI_CH7007TV_TimingHStruct {
- unsigned char data[10];
-};
-
-struct XGI_CH7007TV_TimingVStruct {
- unsigned char data[10];
-};
-
struct XGI_XG21CRT1Struct {
unsigned char ModeID, CR02, CR03, CR15, CR16;
};
-struct XGI330_CHTVRegDataStruct {
- unsigned char Reg[16];
-};
-
struct XGI330_LCDCapStruct {
unsigned char LCD_ID;
unsigned short LCD_Capability;
@@ -324,18 +274,6 @@ struct XGI301C_Tap4TimingStruct {
unsigned char Reg[64]; /* C0-FF */
};
-struct XGI_New_StandTableStruct {
- unsigned char CRT_COLS;
- unsigned char ROWS;
- unsigned char CHAR_HEIGHT;
- unsigned short CRT_LEN;
- unsigned char SR[4];
- unsigned char MISC;
- unsigned char CRTC[0x19];
- unsigned char ATTR[0x14];
- unsigned char GRC[9];
-};
-
struct vb_device_info {
unsigned char ISXPDOS;
unsigned long P3c4, P3d4, P3c0, P3ce, P3c2, P3cc;
@@ -350,12 +288,10 @@ struct vb_device_info {
unsigned short ModeType;
/* ,IF_DEF_FSTN; add for dstn */
unsigned short IF_DEF_LVDS, IF_DEF_TRUMPION, IF_DEF_DSTN;
- unsigned short IF_DEF_CRT2Monitor, IF_DEF_VideoCapture;
- unsigned short IF_DEF_LCDA, IF_DEF_CH7017, IF_DEF_YPbPr,
- IF_DEF_ScaleLCD, IF_DEF_OEMUtil, IF_DEF_PWD;
+ unsigned short IF_DEF_CRT2Monitor;
+ unsigned short IF_DEF_LCDA, IF_DEF_YPbPr;
unsigned short IF_DEF_ExpLink;
- unsigned short IF_DEF_CH7005, IF_DEF_HiVision;
- unsigned short IF_DEF_CH7007; /* Billy 2007/05/03 */
+ unsigned short IF_DEF_HiVision;
unsigned short LCDResInfo, LCDTypeInfo, VBType;/*301b*/
unsigned short VBInfo, TVInfo, LCDInfo, Set_VGAType;
unsigned short VBExtInfo;/*301lv*/
@@ -364,7 +300,7 @@ struct vb_device_info {
unsigned short SelectCRT2Rate;
unsigned char *ROMAddr;
- unsigned char *FBAddr;
+ void __iomem *FBAddr;
unsigned long BaseAddr;
unsigned long RelIO;
@@ -420,10 +356,6 @@ struct vb_device_info {
unsigned char *XGI_TVDelayList;
unsigned char *XGI_TVDelayList2;
- unsigned char *CHTVVCLKUNTSC;
- unsigned char *CHTVVCLKONTSC;
- unsigned char *CHTVVCLKUPAL;
- unsigned char *CHTVVCLKOPAL;
unsigned char *NTSCTiming;
unsigned char *PALTiming;
unsigned char *HiTVExtTiming;
@@ -460,23 +392,10 @@ struct vb_device_info {
struct XGI_StResInfoStruct *StResInfo;
struct XGI_ModeResInfoStruct *ModeResInfo;
struct XGI_XG21CRT1Struct *UpdateCRT1;
-}; /* _struct vb_device_info */
+ int ram_type;
+ int ram_channel;
+ int ram_bus;
+}; /* _struct vb_device_info */
-struct TimingInfo {
- unsigned short Horizontal_ACTIVE;
- unsigned short Horizontal_FP;
- unsigned short Horizontal_SYNC;
- unsigned short Horizontal_BP;
- unsigned short Vertical_ACTIVE;
- unsigned short Vertical_FP;
- unsigned short Vertical_SYNC;
- unsigned short Vertical_BP;
- double DCLK;
- unsigned char FrameRate;
- unsigned char Interlace;
- unsigned short Margin;
-};
-
-#define _VB_STRUCT_
#endif /* _VB_STRUCT_ */
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index d10de4888dc..b81ac7726d1 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,5 +1,3 @@
-#define Tap4
-
/* yilin modify for xgi20 */
static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
{0x16, 0x01, 0x01, 166},
@@ -109,62 +107,12 @@ static unsigned char XGI340_CR6B[8][4] = {
{0x00, 0x00, 0x00, 0x00}
};
-static unsigned char XGI340_CR6E[8][4] = {
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00}
-};
+static unsigned char XGI340_CR6E[8][4];
+
+static unsigned char XGI340_CR6F[8][32];
+
+static unsigned char XGI340_CR89[8][2];
-static unsigned char XGI340_CR6F[8][32] = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-};
-
-static unsigned char XGI340_CR89[8][2] = {
- {0x00, 0x00},
- {0x00, 0x00},
- {0x00, 0x00},
- {0x00, 0x00},
- {0x00, 0x00},
- {0x00, 0x00},
- {0x00, 0x00},
- {0x00, 0x00}
-};
/* CR47,CR48,CR49,CR4A,CR4B,CR4C,CR70,CR71,CR74,CR75,CR76,CR77 */
static unsigned char XGI340_AGPReg[12] = {
0x28, 0x23, 0x00, 0x20, 0x00, 0x20,
@@ -173,41 +121,10 @@ static unsigned char XGI340_AGPReg[12] = {
static unsigned char XGI340_SR16[4] = {0x03, 0x83, 0x03, 0x83};
-#if 0
-static unsigned char XGI330_SR15_1[8][8] = {
- {0x0, 0x0, 0x00, 0x00, 0x20, 0x20, 0x00, 0x00},
- {0x5, 0x15, 0x15, 0x15, 0x15, 0x15, 0x00, 0x00},
- {0xba, 0xba, 0xba, 0xba, 0xBA, 0xBA, 0x00, 0x00},
- {0x55, 0x57, 0x57, 0xAB, 0xAB, 0xAB, 0x00, 0x00},
- {0x60, 0x34, 0x34, 0x34, 0x34, 0x34, 0x00, 0x00},
- {0x0, 0x80, 0x80, 0x80, 0x83, 0x83, 0x00, 0x00},
- {0x50, 0x50, 0x50, 0x3C, 0x3C, 0x3C, 0x00, 0x00},
- {0x0, 0xa5, 0xfb, 0xf6, 0xF6, 0xF6, 0x00, 0x00}
-};
-
-static unsigned char XGI330_cr40_1[15][8] = {
- {0x66, 0x40, 0x40, 0x28, 0x24, 0x24, 0x00, 0x00},
- {0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x0F, 0x0F, 0x00, 0x00},
- {0x00, 0xf0, 0xf0, 0xf0, 0xF0, 0xF0, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x88, 0x88, 0x88, 0xAA, 0xAC, 0xAC, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x77, 0x77, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0xA2, 0x00, 0x00, 0xA2, 0xA2, 0x00, 0x00},
-};
-#endif
-
-static unsigned char XGI330_sr25[] = {0x00, 0x0};
+static unsigned char XGI330_sr25[2];
static unsigned char XGI330_sr31 = 0xc0;
static unsigned char XGI330_sr32 = 0x11;
-static unsigned char XGI330_SR33 = 0x00;
+static unsigned char XGI330_SR33;
static unsigned char XG40_CRCF = 0x13;
static unsigned char XG40_DRAMTypeDefinition = 0xFF ;
@@ -816,13 +733,9 @@ static struct XGI_StandTableStruct XGI330_StandTable[] = {
}
};
-static struct XGI_TimingHStruct XGI_TimingH[] = {
- { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }
-};
+static struct XGI_TimingHStruct XGI_TimingH[1];
-static struct XGI_TimingVStruct XGI_TimingV[] = {
- { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }
-};
+static struct XGI_TimingVStruct XGI_TimingV[1];
static struct XGI_XG21CRT1Struct XGI_UpdateCRT1Table[] = {
{0x01, 0x27, 0x91, 0x8f, 0xc0}, /* 00 */
@@ -1007,112 +920,6 @@ static struct XGI_CRT1TableStruct XGI_CRT1Table[] = {
0x03, 0xDE, 0xC0, 0x84, 0xBF, 0x04, 0x90} } /* 0x47 */
};
-#if 0
-static struct XGI330_CHTVRegDataStruct XGI_CHTVRegUNTSC[] = {
- /* Index: 000h, 001h, 002h, 004h, 003h, 005h, 006h, 007h,
- 008h, 015h, 01Fh, 00Ch, 00Dh, 00Eh, 00Fh, 010h */
- /* 00 (640x200,640x400) */
- { {0x4A, 0x77, 0xBB, 0x94, 0x84, 0x48, 0xFE, 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01 } },
- /* 01 (640x350) */
- { {0x4A, 0x77, 0xBB, 0x94, 0x84, 0x48, 0xFE, 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01 } },
- /* 02 (720x400) */
- { {0x4A, 0x77, 0xBB, 0x94, 0x84, 0x48, 0xFE, 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01 } },
- /* 03 (720x350) */
- { {0x4A, 0x77, 0xBB, 0x94, 0x84, 0x48, 0xFE, 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01 } },
- /* 04 (640x480) ;;5/6/02 */
- { {0x6A, 0x77, 0xBB, 0x6E, 0x84, 0x2E, 0x02, 0x5A,
- 0x04, 0x00, 0x80, 0x20, 0x7E, 0x80, 0x97, 0x00 } },
- /* 05 (800x600) ;;1/12/02 */
- { {0xCF, 0x77, 0xB7, 0xC8, 0x84, 0x3B, 0x02, 0x5A,
- 0x04, 0x00, 0x80, 0x19, 0x88, 0xAE, 0xA3, 0x00 } },
- /* 06 (1024x768) ;;5/6/02 */
- { {0xEE, 0x77, 0xBB, 0x66, 0x87, 0x32, 0x01, 0x5A,
- 0x04, 0x00, 0x80, 0x1B, 0xD4, 0x2F, 0x6F, 0x00 } }
-};
-
-static struct XGI330_CHTVRegDataStruct XGI_CHTVRegONTSC[] = {
- /* Index: 000h, 001h, 002h, 004h, 003h, 005h, 006h, 007h,
- 008h, 015h, 01Fh, 00Ch, 00Dh, 00Eh, 00Fh, 010h */
- /* 00 (640x200,640x400) */
- { {0x49, 0x77, 0xBB, 0x7B, 0x84, 0x34, 0x00, 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 01 (640x350) */
- { {0x49, 0x77, 0xBB, 0x7B, 0x84, 0x34, 0x00 , 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 02 (720x400) */
- { {0x49, 0x77, 0xBB, 0x7B, 0x84, 0x34, 0x00 , 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 03 (720x350) */
- { {0x49, 0x77, 0xBB, 0x7B, 0x84, 0x34, 0x00 , 0x50,
- 0x04, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 04 (640x480) ;;5/6/02 */
- { {0x69, 0x77, 0xBB, 0x6E, 0x84, 0x1E, 0x00 , 0x5A,
- 0x04, 0x00, 0x80, 0x25, 0x1A, 0x80, 0x26, 0x00} },
- /* 05 (800x600) ;;5/6/02 */
- { {0xCE, 0x77, 0xB7, 0xB6, 0x83, 0x2C, 0x02 , 0x5A,
- 0x04, 0x00, 0x80, 0x1C, 0x00, 0x82, 0x97, 0x00} },
- /* 06 (1024x768) ;;5/6/02 */
- { {0xED, 0x77, 0xBB, 0x66, 0x8C, 0x21, 0x02 , 0x5A,
- 0x04, 0x00, 0x80, 0x1F, 0xA0, 0x7E, 0x73, 0x00} }
-};
-
-static struct XGI330_CHTVRegDataStruct XGI_CHTVRegUPAL[] = {
- /* Index: 000h, 001h, 002h, 004h, 003h, 005h, 006h, 007h,
- 008h, 015h, 01Fh, 00Ch, 00Dh, 00Eh, 00Fh, 010h */
- /* ; 00 (640x200,640x400) */
- { {0x41, 0x7F, 0xB7, 0x34, 0xAD, 0x50, 0x34, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* ; 01 (640x350) */
- { {0x41, 0x7F, 0xB7, 0x80, 0x85, 0x50, 0x00, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* ; 02 (720x400) */
- { {0x41, 0x7F, 0xB7, 0x34, 0xAD, 0x50, 0x34, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* ; 03 (720x350) */
- { {0x41, 0x7F, 0xB7, 0x12, 0x85, 0x50, 0x00, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* ; 04 (640x480) */
- { {0x61, 0x7F, 0xB7, 0x99, 0x84, 0x35, 0x04, 0x5A,
- 0x05, 0x00, 0x80, 0x26, 0x2A, 0x55, 0x5D, 0x00} },
- /* ; 05 (800x600) ;;1/12/02 */
- { {0xC3, 0x7F, 0xB7, 0x7A, 0x84, 0x40, 0x02, 0x5A,
- 0x05, 0x00, 0x80, 0x1F, 0x84, 0x3D, 0x28, 0x00} },
- /* ; 06 (1024x768) ;;1/12/02 */
- { {0xE5, 0x7F, 0xB7, 0x1D, 0xA7, 0x3E, 0x04, 0x5A,
- 0x05, 0x00, 0x80, 0x20, 0x3E, 0xE4, 0x22, 0x00} }
-};
-
-static struct XGI330_CHTVRegDataStruct XGI_CHTVRegOPAL[] = {
- /* Index: 000, 0x01, 0x02, 0x04, 0x03, 0x05, 0x06, 0x07,
- 0x08, 0x15, 0x1F, 0x0C, 0x0D, 0x0E, 0x0F, 0x10h */
- /* 00 (640x200,640x400) */
- { {0x41, 0x7F, 0xB7, 0x36, 0xAD, 0x50, 0x34, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 01 (640x350) */
- { {0x41, 0x7F, 0xB7, 0x86, 0x85, 0x50, 0x00, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 02 (720x400) */
- { {0x41, 0x7F, 0xB7, 0x36, 0xAD, 0x50, 0x34, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 03 (720x350) */
- { {0x41, 0x7F, 0xB7, 0x86, 0x85, 0x50, 0x00, 0x83,
- 0x05, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x01} },
- /* 04 (640x480) */
- { {0x61, 0x7F, 0xB7, 0x99, 0x84, 0x35, 0x04, 0x5A,
- 0x05, 0x00, 0x80, 0x26, 0x2A, 0x55, 0x5D, 0x00} },
- /* 05 (800x600) ;;1/12/02 */
- { {0xC1, 0x7F, 0xB7, 0x4D, 0x8C, 0x1E, 0x31, 0x5A,
- 0x05, 0x00, 0x80, 0x26, 0x78, 0x19, 0x34, 0x00} },
- /* 06 (1024x768) ;;1/12/02 */
- { {0xE4, 0x7F, 0xB7, 0x1E, 0xAF, 0x29, 0x37, 0x5A,
- 0x05, 0x00, 0x80, 0x25, 0x8C, 0xB2, 0x2A, 0x00} }
-};
-#endif
-
static unsigned char XGI_CH7017LV1024x768[] = {
0x60, 0x02, 0x00, 0x07, 0x40, 0xED,
0xA3, 0xC8, 0xC7, 0xAC, 0xE0, 0x02};
@@ -1151,16 +958,6 @@ static struct XGI330_LCDDataStruct XGI_ExtLCD1024x768Data[] = {
{1, 1, 1344, 806, 1344, 806}
};
-/*struct XGI330_LCDDataStruct XGI_St2LCD1024x768Data[] = {
- {62, 25, 800, 546, 1344, 806},
- {32, 15, 930, 546, 1344, 806},
- {62, 25, 800, 546, 1344, 806},
- {104, 45, 945, 496, 1344, 806},
- {62, 25, 800, 546, 1344, 806},
- {31, 18, 1008, 624, 1344, 806},
- {1, 1, 1344, 806, 1344, 806}
-};*/
-
static struct XGI330_LCDDataStruct XGI_CetLCD1024x768Data[] = {
{1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400,
640x200,640x400) */
@@ -1194,19 +991,6 @@ static struct XGI330_LCDDataStruct XGI_ExtLCD1280x1024Data[] = {
{1, 1, 1688, 1066, 1688, 1066}
};
-#if 0
-static struct XGI330_LCDDataStruct XGI_St2LCD1280x1024Data[] = {
- {22, 5, 800, 510, 1650, 1088},
- {22, 5, 800, 510, 1650, 1088},
- {176, 45, 900, 510, 1650, 1088},
- {176, 45, 900, 510, 1650, 1088},
- {22, 5, 800, 510, 1650, 1088},
- {13, 5, 1024, 675, 1560, 1152},
- {16, 9, 1266, 804, 1688, 1072},
- {1, 1, 1688, 1066, 1688, 1066}
-};
-#endif
-
static struct XGI330_LCDDataStruct XGI_CetLCD1280x1024Data[] = {
{1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400,
640x200,640x400) */
@@ -1220,21 +1004,7 @@ static struct XGI330_LCDDataStruct XGI_CetLCD1280x1024Data[] = {
{1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
};
-static struct XGI330_LCDDataStruct XGI_StLCD1400x1050Data[] = {
- {211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
- {211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
- {211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */
- {211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */
- {211, 48, 840, 488, 1688, 1066}, /* 04 (640x480x60Hz) */
- {211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */
- {211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz
- w/o Scaling) */
- {1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
-};
-
-static struct XGI330_LCDDataStruct XGI_ExtLCD1400x1050Data[] = {
+static struct XGI330_LCDDataStruct xgifb_lcd_1400x1050[] = {
{211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
640x200,640x400) */
{211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
@@ -1315,19 +1085,6 @@ static struct XGI330_LCDDataStruct XGI_ExtLCD1024x768x75Data[] = {
{1, 1, 1312, 800, 1312, 800} /* ; 06 (1024x768x75Hz) */
};
-#if 0
-static struct XGI330_LCDDataStruct XGI_StLCD1024x768x75Data[] = {
- {42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
- {48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */
- {42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */
- {48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */
- {8, 5, 1312, 500, 1312, 800}, /* ; 04 (640x480x75Hz) */
- {41, 25, 1024, 625, 1312, 800}, /* ; 05 (800x600x75Hz) */
- {1, 1, 1312, 800, 1312, 800} /* ; 06 (1024x768x75Hz) */
-};
-#endif
-
static struct XGI330_LCDDataStruct XGI_CetLCD1024x768x75Data[] = {
{1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,
640x200,640x400) */
@@ -1339,19 +1096,7 @@ static struct XGI330_LCDDataStruct XGI_CetLCD1024x768x75Data[] = {
{1, 1, 1312, 800, 1312, 800} /* ; 06 (1024x768x75Hz) */
};
-static struct XGI330_LCDDataStruct XGI_ExtLCD1280x1024x75Data[] = {
- {211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
- {211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
- {211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */
- {211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */
- {211, 45, 768, 498, 1688, 1066}, /* ; 04 (640x480x75Hz) */
- {211, 75, 1024, 625, 1688, 1066}, /* ; 05 (800x600x75Hz) */
- {211, 120, 1280, 798, 1688, 1066}, /* ; 06 (1024x768x75Hz) */
- {1, 1, 1688, 1066, 1688, 1066} /* ; 07 (1280x1024x75Hz) */
-};
-
-static struct XGI330_LCDDataStruct XGI_StLCD1280x1024x75Data[] = {
+static struct XGI330_LCDDataStruct xgifb_lcd_1280x1024x75[] = {
{211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
640x200,640x400) */
{211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
@@ -1487,7 +1232,7 @@ static struct XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024Data[] = {
{9, 1337, 1065, 1024} /* 07 (1280x1024x60Hz) */
};
-static struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1400x1050Data[] = {
+static struct XGI330_LCDDataDesStruct xgifb_lcddldes_1400x1050[] = {
{18, 1464, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
{18, 1464, 0, 1051}, /* 01 (320x350,640x350) */
{18, 1464, 0, 1051}, /* 02 (360x400,720x400) */
@@ -1499,31 +1244,7 @@ static struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1400x1050Data[] = {
{18, 1464, 0, 1051} /* 08 (1400x1050x60Hz) */
};
-static struct XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1400x1050Data[] = {
- {18, 1464, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
- {18, 1464, 0, 1051}, /* 01 (320x350,640x350) */
- {18, 1464, 0, 1051}, /* 02 (360x400,720x400) */
- {18, 1464, 0, 1051}, /* 03 (720x350) */
- {18, 1464, 0, 1051}, /* 04 (640x480x60Hz) */
- {18, 1464, 0, 1051}, /* 05 (800x600x60Hz) */
- {18, 1464, 0, 1051}, /* 06 (1024x768x60Hz) */
- {1646, 1406, 1053, 1038}, /* 07 (1280x1024x60Hz) */
- {18, 1464, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static struct XGI330_LCDDataDesStruct XGI_StLCDDes1400x1050Data[] = {
- {9, 1455, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
- {9, 1455, 0, 1051}, /* 01 (320x350,640x350) */
- {9, 1455, 0, 1051}, /* 02 (360x400,720x400) */
- {9, 1455, 0, 1051}, /* 03 (720x350) */
- {9, 1455, 0, 1051}, /* 04 (640x480x60Hz) */
- {9, 1455, 0, 1051}, /* 05 (800x600x60Hz) */
- {9, 1455, 0, 1051}, /* 06 (1024x768x60Hz) */
- {1637, 1397, 1053, 1038}, /* 07 (1280x1024x60Hz) */
- {9, 1455, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1400x1050Data[] = {
+static struct XGI330_LCDDataDesStruct xgifb_lcddes_1400x1050[] = {
{9, 1455, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
{9, 1455, 0, 1051}, /* 01 (320x350,640x350) */
{9, 1455, 0, 1051}, /* 02 (360x400,720x400) */
@@ -1624,17 +1345,7 @@ static struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = {
};
/* ;;1024x768x75Hz */
-static struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768x75Data[] = {
- {9, 1049, 0, 769}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1049, 0, 769}, /* ; 01 (320x350,640x350) */
- {9, 1049, 0, 769}, /* ; 02 (360x400,720x400) */
- {9, 1049, 0, 769}, /* ; 03 (720x350) */
- {9, 1049, 0, 769}, /* ; 04 (640x480x75Hz) */
- {9, 1049, 0, 769}, /* ; 05 (800x600x75Hz) */
- {9, 1049, 0, 769} /* ; 06 (1024x768x75Hz) */
-};
-
-static struct XGI330_LCDDataDesStruct XGI_StLCDDes1024x768x75Data[] = {
+static struct XGI330_LCDDataDesStruct xgifb_lcddes_1024x768x75[] = {
{9, 1049, 0, 769}, /* ; 00 (320x200,320x400,640x200,640x400) */
{9, 1049, 0, 769}, /* ; 01 (320x350,640x350) */
{9, 1049, 0, 769}, /* ; 02 (360x400,720x400) */
@@ -1656,18 +1367,7 @@ static struct XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768x75Data[] = {
};
/* ;;1280x1024x75Hz */
-static struct XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024x75Data[] = {
- {18, 1314, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {18, 1314, 0, 1025}, /* ; 01 (320x350,640x350) */
- {18, 1314, 0, 1025}, /* ; 02 (360x400,720x400) */
- {18, 1314, 0, 1025}, /* ; 03 (720x350) */
- {18, 1314, 0, 1025}, /* ; 04 (640x480x60Hz) */
- {18, 1314, 0, 1025}, /* ; 05 (800x600x60Hz) */
- {18, 1314, 0, 1025}, /* ; 06 (1024x768x60Hz) */
- {18, 1314, 0, 1025} /* ; 07 (1280x1024x60Hz) */
-};
-
-static struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024x75Data[] = {
+static struct XGI330_LCDDataDesStruct xgifb_lcddldes_1280x1024x75[] = {
{18, 1314, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
{18, 1314, 0, 1025}, /* ; 01 (320x350,640x350) */
{18, 1314, 0, 1025}, /* ; 02 (360x400,720x400) */
@@ -1691,18 +1391,7 @@ static struct XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024x75Data[] = {
};
/* ;;1280x1024x75Hz */
-static struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024x75Data[] = {
- {9, 1305, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1305, 0, 1025}, /* ; 01 (320x350,640x350) */
- {9, 1305, 0, 1025}, /* ; 02 (360x400,720x400) */
- {9, 1305, 0, 1025}, /* ; 03 (720x350) */
- {9, 1305, 0, 1025}, /* ; 04 (640x480x60Hz) */
- {9, 1305, 0, 1025}, /* ; 05 (800x600x60Hz) */
- {9, 1305, 0, 1025}, /* ; 06 (1024x768x60Hz) */
- {9, 1305, 0, 1025} /* ; 07 (1280x1024x60Hz) */
-};
-
-static struct XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024x75Data[] = {
+static struct XGI330_LCDDataDesStruct xgifb_lcddes_1280x1024x75[] = {
{9, 1305, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
{9, 1305, 0, 1025}, /* ; 01 (320x350,640x350) */
{9, 1305, 0, 1025}, /* ; 02 (360x400,720x400) */
@@ -2041,63 +1730,6 @@ static unsigned char XGI330_Ren750pGroup3[] = {
0x18, 0x1D, 0x23, 0x28, 0x4C, 0xAA, 0x01
};
-#if 0
-static struct XGI_PanelDelayTblStruct XGI330_PanelDelayTbl[] = {
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} },
- { {0x00, 0x00} }
-};
-
-static struct XGI330_LVDSDataStruct XGI330_LVDS320x480Data_1[] = {
- {848, 433, 400, 525},
- {848, 389, 400, 525},
- {848, 433, 400, 525},
- {848, 389, 400, 525},
- {848, 518, 400, 525},
- {1056, 628, 400, 525},
- {400, 525, 400, 525},
- {800, 449, 1000, 644},
- {800, 525, 1000, 635}
-};
-
-static struct XGI330_LVDSDataStruct XGI330_LVDS800x600Data_1[] = {
- {848, 433, 1060, 629},
- {848, 389, 1060, 629},
- {848, 433, 1060, 629},
- {848, 389, 1060, 629},
- {848, 518, 1060, 629},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {800, 449, 1000, 644},
- {800, 525, 1000, 635}
-};
-
-static struct XGI330_LVDSDataStruct XGI330_LVDS800x600Data_2[] = {
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {800, 449, 1000, 644},
- {800, 525, 1000, 635}
-};
-#endif
-
static struct XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1[] = {
{ 960, 438, 1344, 806}, /* 00 (320x200,320x400,640x200,640x400) */
{ 960, 388, 1344, 806}, /* 01 (320x350,640x350) */
@@ -2143,79 +1775,7 @@ static struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2[] = {
{800, 449, 1280, 801},
{800, 525, 1280, 813}
};
-/*
-struct XGI330_LVDSDataStruct XGI_LVDS1280x768Data_1[] = {
- {768, 438, 1408, 806},
- {768, 388, 1408, 806},
- {768, 438, 1408, 806},
- {768, 388, 1408, 806},
- {768, 518, 1408, 806},
- {928, 638, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806}
-};
-
-struct XGI330_LVDSDataStruct XGI_LVDS1280x768Data_2[] = {
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806},
- {1408, 806, 1408, 806}
-};
-
-struct XGI330_LVDSDataStruct XGI_LVDS1280x768NData_1[] = {
- {704, 438, 1344, 806},
- {704, 388, 1344, 806},
- {704, 438, 1344, 806},
- {704, 388, 1344, 806},
- {704, 518, 1344, 806},
- {864, 638, 1344, 806},
- {1088, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806}
-};
-struct XGI330_LVDSDataStruct XGI_LVDS1280x768NData_2[] = {
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806}
-};
-
-struct XGI330_LVDSDataStruct XGI_LVDS1280x768SData_1[] = {
- {1048, 438, 1688, 806},
- {1048, 388, 1688, 806},
- {1148, 438, 1688, 806},
- {1148, 388, 1688, 806},
- {1048, 518, 1688, 806},
- {1208, 638, 1688, 806},
- {1432, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806}
-};
-
-struct XGI330_LVDSDataStruct XGI_LVDS1280x768SData_2[] = {
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806},
- {1688, 806, 1688, 806}
-};
-*/
static struct XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_1[] = {
{928, 416, 1688, 1066},
{928, 366, 1688, 1066},
@@ -2502,20 +2062,6 @@ static struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = {
{0, 1328, 0, 771, 112, 6} /* ; 0A (1280x768x75Hz) */
};
-#if 0
-static struct XGI330_LVDSDataStruct XGI330_LVDS640x480Data_1[] = {
- { 800, 449, 800, 449},
- { 800, 449, 800, 449},
- { 800, 449, 800, 449},
- { 800, 449, 800, 449},
- { 800, 525, 800, 525},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628},
- {1056, 628, 1056, 628}
-};
-#endif
-
static struct XGI330_CHTVDataStruct XGI_CHTVUNTSCData[] = {
{ 840, 600, 840, 600},
{ 840, 600, 840, 600},
@@ -2805,68 +2351,6 @@ static struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] = {
{ {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
};
-#if 0
-static struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UNTSC[] = {
- { {0x64, 0x4f, 0x88, 0x56, 0x9f, 0x56, 0x3e,
- 0xe8, 0x84, 0x8f, 0x57, 0x20, 0x00, 0x01, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x56, 0x9f, 0x56, 0x3e,
- 0xd0, 0x82, 0x5d, 0x57, 0x00, 0x00, 0x01, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x56, 0x9f, 0x56, 0x3e,
- 0xe8, 0x84, 0x8f, 0x57, 0x20, 0x00, 0x01, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x56, 0x9f, 0x56, 0x3e,
- 0xd0, 0x82, 0x5d, 0x57, 0x00, 0x00, 0x01, 0x00 } },
- { {0x5d, 0x4f, 0x81, 0x53, 0x9c, 0x56, 0xba,
- 0x18, 0x84, 0xdf, 0x57, 0x00, 0x00, 0x01, 0x00 } },
- { {0x80, 0x63, 0x84, 0x6c, 0x17, 0xec, 0xf0,
- x90, 0x8c, 0x57, 0xed, 0x20, 0x00, 0x06, 0x01 } }
-};
-
-static struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1ONTSC[] = {
- { {0x64, 0x4f, 0x88, 0x5a, 0x9f, 0x0b, 0x3e,
- 0xc0, 0x84, 0x8f, 0x0c, 0x20, 0x00, 0x01, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x5a, 0x9f, 0x0b, 0x3e,
- 0xb0, 0x8d, 0x5d, 0x0c, 0x00, 0x00, 0x01, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x5a, 0x9f, 0x0b, 0x3e,
- 0xc0, 0x84, 0x8f, 0x0c, 0x20, 0x00, 0x01, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x5a, 0x9f, 0x0b, 0x3e,
- 0xb0, 0x8d, 0x5d, 0x0c, 0x00, 0x00, 0x01, 0x00 } },
- { {0x5d, 0x4f, 0x81, 0x56, 0x9c, 0x0b, 0x3e,
- 0xe8, 0x84, 0xdf, 0x0c, 0x00, 0x00, 0x01, 0x00 } },
- { {0x7d, 0x63, 0x81, 0x6a, 0x16, 0xba, 0xf0,
- x7f, 0x86, 0x57, 0xbb, 0x00, 0x00, 0x06, 0x01 } }
-};
-
-static struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UPAL[] = {
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xf8, 0x83, 0x8f, 0x70, 0x20, 0x00, 0x05, 0x00 } },
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xde, 0x81, 0x5d, 0x70, 0x00, 0x00, 0x05, 0x00 } },
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xf8, 0x83, 0x8f, 0x70, 0x20, 0x00, 0x05, 0x00 } },
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xde, 0x81, 0x5d, 0x70, 0x00, 0x00, 0x05, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x55, 0x80, 0xec, 0xba,
- 0x50, 0x84, 0xdf, 0xed, 0x00, 0x00, 0x05, 0x00 } },
- { {0x70, 0x63, 0x94, 0x68, 0x8d, 0x42, 0xf1,
- xc8, 0x8c, 0x57, 0xe9, 0x20, 0x00, 0x05, 0x01 } }
-};
-
-static struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1OPAL[] = {
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xf0, 0x83, 0x8f, 0x70, 0x20, 0x00, 0x05, 0x00 } },
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xde, 0x81, 0x5d, 0x70, 0x00, 0x00, 0x05, 0x00 } },
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xf0, 0x83, 0x8f, 0x70, 0x20, 0x00, 0x05, 0x00 } },
- { {0x79, 0x4f, 0x9d, 0x5a, 0x90, 0x6f, 0x3e,
- 0xde, 0x81, 0x5d, 0x70, 0x00, 0x00, 0x05, 0x00 } },
- { {0x64, 0x4f, 0x88, 0x55, 0x80, 0x6f, 0xba,
- 0x20, 0x83, 0xdf, 0x70, 0x00, 0x00, 0x05, 0x00 } },
- { {0x73, 0x63, 0x97, 0x69, 0x8e, 0xec, 0xf0,
- x90, 0x8c, 0x57, 0xed, 0x20, 0x00, 0x05, 0x01 } }
-};
-#endif
-
/*add for new UNIVGABIOS*/
static struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = {
{Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
@@ -2918,33 +2402,18 @@ static struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = {
{0xFF, 0x0000, 0x0000, 0}
};
-static struct XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_H[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1_H */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2_H */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1_H */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2_H */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1_H */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2_H */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1_H */
- {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1_Hx75 */
- {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2_Hx75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1_Hx75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2_Hx75*/
- {0xFF, 0x0000, 0x0000, 0}
-};
-
-static struct XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_V[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1_V */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2_V */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1_V */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2_V */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1_V */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2_V */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1_V */
- {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1_Vx75 */
- {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2_Vx75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1_Vx75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2_Vx75*/
+static struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1[] = {
+ {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
+ {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
+ {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
+ {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
+ {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
+ {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
+ {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
+ {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
+ {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
+ {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
+ {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
{0xFF, 0x0000, 0x0000, 0}
};
@@ -3007,43 +2476,12 @@ static struct XGI330_TVDataTablStruct XGI_TVDataTable[] = {
{0xffff, 0x0000, 12} /* END */
};
-#if 0
-static unsigned short TVLenList[] = {
- LVDSCRT1Len_H,
- LVDSCRT1Len_V,
- LVDSDataLen,
- 0,
- TVDataLen,
- 0,
- 0,
- CHTVRegLen
-};
-#endif
-
-/* Chrontel 7017 TV CRT1 Timing List */
-static struct XGI330_TVDataTablStruct XGI_EPLCHTVCRT1Ptr[] = {
- {0x0011, 0x0000, 0}, /* XGI_CHTVCRT1UNTSC */
- {0x0011, 0x0010, 1}, /* XGI_CHTVCRT1ONTSC */
- {0x0011, 0x0001, 2}, /* XGI_CHTVCRT1UPAL */
- {0x0011, 0x0011, 3}, /* XGI_CHTVCRT1OPAL */
- {0xFFFF, 0x0000, 4}
-};
-
-/* ;;Chrontel 7017 TV Timing List */
-static struct XGI330_TVDataTablStruct XGI_EPLCHTVDataPtr[] = {
- {0x0011, 0x0000, 0}, /* XGI_CHTVUNTSCData */
- {0x0011, 0x0010, 1}, /* XGI_CHTVONTSCData */
- {0x0011, 0x0001, 2}, /* XGI_CHTVUPALData */
- {0x0011, 0x0011, 3}, /* XGI_CHTVOPALData */
- {0xFFFF, 0x0000, 4}
-};
-
-/* ;;Chrontel 7017 TV Reg. List */
-static struct XGI330_TVDataTablStruct XGI_EPLCHTVRegPtr[] = {
- {0x0011, 0x0000, 0}, /* XGI_CHTVRegUNTSC */
- {0x0011, 0x0010, 1}, /* XGI_CHTVRegONTSC */
- {0x0011, 0x0001, 2}, /* XGI_CHTVRegUPAL */
- {0x0011, 0x0011, 3}, /* XGI_CHTVRegOPAL */
+/* Chrontel 7017 TV List */
+static struct XGI330_TVDataTablStruct xgifb_chrontel_tv[] = {
+ {0x0011, 0x0000, 0}, /* UNTSC */
+ {0x0011, 0x0010, 1}, /* ONTSC */
+ {0x0011, 0x0001, 2}, /* UPAL */
+ {0x0011, 0x0011, 3}, /* OPAL */
{0xFFFF, 0x0000, 4}
};
@@ -3060,44 +2498,6 @@ static unsigned short LCDLenList[] = {
0
};
-#if 0
-/* 660, Dual link */
-static struct XGI330_LCDCapStruct XGI660_LCDDLCapList[] = {
-/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x014, 0x88, 0x06, VCLK65,
- 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024 */
- {Panel1280x1024, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x053, 0x70, 0x03, VCLK108_2,
- 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1400x1050 */
- {Panel1400x1050, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x053, 0x70, 0x03, VCLK108_2,
- 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1600x1200 */
- {Panel1600x1200, LCDDualLink+DefaultLCDCap, LCDToFull,
- 0x053, 0xC0, 0x03, VCLK162,
- 0x43, 0x22, 0x70, 0x24, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x014, 0x60, 0, VCLK78_75,
- 0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x053, 0x90, 0x03, VCLK135_5,
- 0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x053, 0x88, 0x06, VCLK65,
- 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
-};
-#endif
-
/* Dual link only */
static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
/* LCDCap1024x768 */
@@ -3134,40 +2534,6 @@ static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-#if 0
-static struct XGI330_LCDCapStruct XGI660_LCDCapList[] = {
-/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x014, 0x88, 0x06, VCLK65,
- 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024 */
- {Panel1280x1024, DefaultLCDCap, StLCDBToA, 0x053, 0x70, 0x03, VCLK108_2,
- 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1400x1050 */
- {Panel1400x1050, DefaultLCDCap, StLCDBToA, 0x053, 0x70, 0x03, VCLK108_2,
- 0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1600x1200 */
- {Panel1600x1200, DefaultLCDCap, LCDToFull, 0x053, 0xC0, 0x03, VCLK162,
- 0x5A, 0x23, 0x5A, 0x23, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x014, 0x60, 0, VCLK78_75,
- 0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, + DefaultLCDCap, StLCDBToA,
- 0x053, 0x90, 0x03, VCLK135_5,
- 0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x053, 0x88, 0x06, VCLK65,
- 0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
- 0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
-};
-#endif
-
static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
/* LCDCap1024x768 */
{Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
@@ -3384,169 +2750,6 @@ static struct XGI_Ext2Struct XGI330_RefIndex[] = {
0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */
};
-
-#if 0
-static struct XGI330_VCLKDataStruct XGI330_VCLKData[] = {
- {0x1b, 0xe1, 25}, /* 0x0 */
- {0x4e, 0xe4, 28}, /* 0x1 */
- {0x57, 0xe4, 31}, /* 0x2 */
- {0xc3, 0xc8, 36}, /* 0x3 */
- {0x42, 0xe2, 40}, /* 0x4 */
- {0xfe, 0xcd, 43}, /* 0x5 */
- {0x5d, 0xc4, 44}, /* 0x6 */
- {0x52, 0xe2, 49}, /* 0x7 */
- {0x53, 0xe2, 50}, /* 0x8 */
- {0x74, 0x67, 52}, /* 0x9 */
- {0x6d, 0x66, 56}, /* 0xa */
- {0x6c, 0xc3, 65}, /* 0xb */
- {0x46, 0x44, 67}, /* 0xc */
- {0xb1, 0x46, 68}, /* 0xd */
- {0xd3, 0x4a, 72}, /* 0xe */
- {0x29, 0x61, 75}, /* 0xf */
- {0x6e, 0x46, 76}, /* 0x10 */
- {0x2b, 0x61, 78}, /* 0x11 */
- {0x31, 0x42, 79}, /* 0x12 */
- {0xab, 0x44, 83}, /* 0x13 */
- {0x46, 0x25, 84}, /* 0x14 */
- {0x78, 0x29, 86}, /* 0x15 */
- {0x62, 0x44, 94}, /* 0x16 */
- {0x2b, 0x41, 104}, /* 0x17 */
- {0x3a, 0x23, 105}, /* 0x18 */
- {0x70, 0x44, 108}, /* 0x19 */
- {0x3c, 0x23, 109}, /* 0x1a */
- {0x5e, 0x43, 113}, /* 0x1b */
- {0xbc, 0x44, 116}, /* 0x1c */
- {0xe0, 0x46, 132}, /* 0x1d */
- {0x54, 0x42, 135}, /* 0x1e */
- {0xea, 0x2a, 139}, /* 0x1f */
- {0x41, 0x22, 157}, /* 0x20 */
- {0x70, 0x24, 162}, /* 0x21 */
- {0x30, 0x21, 175}, /* 0x22 */
- {0x4e, 0x22, 189}, /* 0x23 */
- {0xde, 0x26, 194}, /* 0x24 */
- {0x62, 0x06, 202}, /* 0x25 */
- {0x3f, 0x03, 229}, /* 0x26 */
- {0xb8, 0x06, 234}, /* 0x27 */
- {0x34, 0x02, 253}, /* 0x28 */
- {0x58, 0x04, 255}, /* 0x29 */
- {0x24, 0x01, 265}, /* 0x2a */
- {0x9b, 0x02, 267}, /* 0x2b */
- {0x70, 0x05, 270}, /* 0x2c */
- {0x25, 0x01, 272}, /* 0x2d */
- {0x9c, 0x02, 277}, /* 0x2e */
- {0x27, 0x01, 286}, /* 0x2f */
- {0x3c, 0x02, 291}, /* 0x30 */
- {0xef, 0x0a, 292}, /* 0x31 */
- {0xf6, 0x0a, 310}, /* 0x32 */
- {0x95, 0x01, 315}, /* 0x33 */
- {0xf0, 0x09, 324}, /* 0x34 */
- {0xfe, 0x0a, 331}, /* 0x35 */
- {0xf3, 0x09, 332}, /* 0x36 */
- {0xea, 0x08, 340}, /* 0x37 */
- {0xe8, 0x07, 376}, /* 0x38 */
- {0xde, 0x06, 389}, /* 0x39 */
- {0x52, 0x2a, 54}, /* 0x3a */
- {0x52, 0x6a, 27}, /* 0x3b */
- {0x62, 0x24, 70}, /* 0x3c */
- {0x62, 0x64, 70}, /* 0x3d */
- {0xa8, 0x4c, 30}, /* 0x3e */
- {0x20, 0x26, 33}, /* 0x3f */
- {0x31, 0xc2, 39}, /* 0x40 */
- {0x60, 0x36, 30}, /* 0x41 */
- {0x40, 0x4A, 28}, /* 0x42 */
- {0x9F, 0x46, 44}, /* 0x43 */
- {0x97, 0x2C, 26}, /* 0x44 */
- {0x44, 0xE4, 25}, /* 0x45 */
- {0x7E, 0x32, 47}, /* 0x46 */
- {0x08, 0x24, 31}, /* 0x47 */
- {0x97, 0x2c, 26}, /* 0x48 */
- {0xCE, 0x3c, 39}, /* 0x49 */
- {0x52, 0x4A, 36}, /* 0x4a */
- {0x2C, 0x61, 95}, /* 0x4b */
- {0x78, 0x27, 108}, /* 0x4c */
- {0x66, 0x43, 123}, /* 0x4d */
- {0x2c, 0x61, 80}, /* 0x4e */
- {0x3b, 0x61, 108} /* 0x4f */
-};
-
-static struct XGI_VBVCLKDataStruct XGI330_VBVCLKData[] = {
- {0x1b, 0xe1, 25}, /* 0x0 */
- {0x4e, 0xe4, 28}, /* 0x1 */
- {0x57, 0xe4, 31}, /* 0x2 */
- {0xc3, 0xc8, 36}, /* 0x3 */
- {0x42, 0x47, 40}, /* 0x4 */
- {0xfe, 0xcd, 43}, /* 0x5 */
- {0x5d, 0xc4, 44}, /* 0x6 */
- {0x52, 0x47, 49}, /* 0x7 */
- {0x53, 0x47, 50}, /* 0x8 */
- {0x74, 0x67, 52}, /* 0x9 */
- {0x6d, 0x66, 56}, /* 0xa */
- {0x5a, 0x64, 65}, /* 0xb */
- {0x46, 0x44, 67}, /* 0xc */
- {0xb1, 0x46, 68}, /* 0xd */
- {0xd3, 0x4a, 72}, /* 0xe */
- {0x29, 0x61, 75}, /* 0xf */
- {0x6d, 0x46, 75}, /* 0x10 */
- {0x41, 0x43, 78}, /* 0x11 */
- {0x31, 0x42, 79}, /* 0x12 */
- {0xab, 0x44, 83}, /* 0x13 */
- {0x46, 0x25, 84}, /* 0x14 */
- {0x78, 0x29, 86}, /* 0x15 */
- {0x62, 0x44, 94}, /* 0x16 */
- {0x2b, 0x22, 104}, /* 0x17 */
- {0x49, 0x24, 105}, /* 0x18 */
- {0xf8, 0x2f, 108}, /* 0x19 */
- {0x3c, 0x23, 109}, /* 0x1a */
- {0x5e, 0x43, 113}, /* 0x1b */
- {0xbc, 0x44, 116}, /* 0x1c */
- {0xe0, 0x46, 132}, /* 0x1d */
- {0xd4, 0x28, 135}, /* 0x1e */
- {0xea, 0x2a, 139}, /* 0x1f */
- {0x41, 0x22, 157}, /* 0x20 */
- {0x70, 0x24, 162}, /* 0x21 */
- {0x30, 0x21, 175}, /* 0x22 */
- {0x4e, 0x22, 189}, /* 0x23 */
- {0xde, 0x26, 194}, /* 0x24 */
- {0x70, 0x07, 202}, /* 0x25 */
- {0x3f, 0x03, 229}, /* 0x26 */
- {0xb8, 0x06, 234}, /* 0x27 */
- {0x34, 0x02, 253}, /* 0x28 */
- {0x58, 0x04, 255}, /* 0x29 */
- {0x24, 0x01, 265}, /* 0x2a */
- {0x9b, 0x02, 267}, /* 0x2b */
- {0x70, 0x05, 270}, /* 0x2c */
- {0x25, 0x01, 272}, /* 0x2d */
- {0x9c, 0x02, 277}, /* 0x2e */
- {0x27, 0x01, 286}, /* 0x2f */
- {0x3c, 0x02, 291}, /* 0x30 */
- {0xef, 0x0a, 292}, /* 0x31 */
- {0xf6, 0x0a, 310}, /* 0x32 */
- {0x95, 0x01, 315}, /* 0x33 */
- {0xf0, 0x09, 324}, /* 0x34 */
- {0xfe, 0x0a, 331}, /* 0x35 */
- {0xf3, 0x09, 332}, /* 0x36 */
- {0xea, 0x08, 340}, /* 0x37 */
- {0xe8, 0x07, 376}, /* 0x38 */
- {0xde, 0x06, 389}, /* 0x39 */
- {0x52, 0x2a, 54}, /* 0x3a */
- {0x52, 0x6a, 27}, /* 0x3b */
- {0x62, 0x24, 70}, /* 0x3c */
- {0x62, 0x64, 70}, /* 0x3d */
- {0xa8, 0x4c, 30}, /* 0x3e */
- {0x20, 0x26, 33}, /* 0x3f */
- {0x31, 0xc2, 39}, /* 0x40 */
- {0x2e, 0x48, 25}, /* 0x41 */
- {0x24, 0x46, 25}, /* 0x42 */
- {0x26, 0x64, 28}, /* 0x43 */
- {0x37, 0x64, 40}, /* 0x44 */
- {0xa1, 0x42, 108}, /* 0x45 */
- {0x37, 0x61, 100}, /* 0x46 */
- {0x78, 0x27, 108}, /* 0x47 */
- {0x5e, 0x64, 68}, /* 0x48 chiawen for fuj1280x768*/
- {0x70, 0x44, 108}, /* 0x49 chiawen for 1400x1050*/
-};
-#endif
-
static unsigned char XGI330_ScreenOffset[] = {
0x14, 0x19, 0x20, 0x28, 0x32, 0x40,
0x50, 0x64, 0x78, 0x80, 0x2d, 0x35,
@@ -3591,49 +2794,16 @@ static unsigned char XGI330_OutputSelect = 0x40;
static unsigned char XGI330_SoftSetting = 0x30;
static unsigned char XGI330_SR07 = 0x18;
-#if 0
-static unsigned char XGI330New_SR15[8][8] = {
- { 0x0, 0x4, 0x60, 0x60},
- { 0xf, 0xf, 0xf, 0xf},
- {0xba, 0xba, 0xba, 0xba},
- {0xa9, 0xa9, 0xac, 0xac},
- {0xa0, 0xa0, 0xa0, 0xa8},
- { 0x0, 0x0, 0x2, 0x2},
- {0x30, 0x30, 0x40, 0x40},
- { 0x0, 0xa5, 0xfb, 0xf6}
-};
-
-static unsigned char XGI330New_CR40[5][8] = {
- {0x77, 0x77, 0x44, 0x44},
- {0x77, 0x77, 0x44, 0x44},
- { 0x0, 0x0, 0x0, 0x0},
- {0x5b, 0x5b, 0xab, 0xab},
- { 0x0, 0x0, 0xf0, 0xf8}
-};
-#endif
-
static unsigned char XGI330_CR49[] = {0xaa, 0x88};
-static unsigned char XGI330_SR1F = 0x0;
+static unsigned char XGI330_SR1F;
static unsigned char XGI330_SR21 = 0xa3;
-#if 0
-static unsigned char XGI330_650_SR21 = 0xa7;
-#endif
static unsigned char XGI330_SR22 = 0xfb;
static unsigned char XGI330_SR23 = 0xf6;
static unsigned char XGI330_SR24 = 0xd;
-#if 0
-static unsigned char XGI660_SR21 = 0xa3;/* 2003.0312 */
-static unsigned char XGI660_SR22 = 0xf3;/* 2003.0312 */
-
-static unsigned char XGI330_LVDS_SR32 = 0x00; /* ynlai for 650 LVDS */
-static unsigned char XGI330_LVDS_SR33 = 0x00; /* chiawen for 650 LVDS */
-static unsigned char XGI330_650_SR31 = 0x40;
-static unsigned char XGI330_650_SR33 = 0x04;
-#endif
-static unsigned char XGI330_CRT2Data_1_2 = 0x0;
-static unsigned char XGI330_CRT2Data_4_D = 0x0;
-static unsigned char XGI330_CRT2Data_4_E = 0x0;
+static unsigned char XGI330_CRT2Data_1_2;
+static unsigned char XGI330_CRT2Data_4_D;
+static unsigned char XGI330_CRT2Data_4_E;
static unsigned char XGI330_CRT2Data_4_10 = 0x80;
static unsigned short XGI330_RGBSenseData = 0xd1;
static unsigned short XGI330_VideoSenseData = 0xb9;
@@ -3641,22 +2811,14 @@ static unsigned short XGI330_YCSenseData = 0xb3;
static unsigned short XGI330_RGBSenseData2 = 0x0190; /*301b*/
static unsigned short XGI330_VideoSenseData2 = 0x0110;
static unsigned short XGI330_YCSenseData2 = 0x016B;
-#if 0
-static unsigned char XGI330_NTSCPhase[] = {0x21, 0xed, 0x8a, 0x8};
-static unsigned char XGI330_PALPhase[] = {0x2a, 0x5, 0xd3, 0x0};
-static unsigned char XGI330_NTSCPhase2[] = {0x21, 0xF0, 0x7B, 0xD6};/*301b*/
-static unsigned char XGI330_PALPhase2[] = {0x2a, 0x09, 0x86, 0xe9};
-static unsigned char XGI330_PALMPhase[] = {0x21, 0xE4, 0x2E, 0x9B}; /*palmn*/
-static unsigned char XGI330_PALNPhase[] = {0x21, 0xF4, 0x3E, 0xBA};
-#endif
-static unsigned char XG40_I2CDefinition = 0x00 ;
+static unsigned char XG40_I2CDefinition;
static unsigned char XG20_CR97 = 0x10 ;
-static unsigned char XG21_DVOSetting = 0x00 ;
-static unsigned char XG21_CR2E = 0x00 ;
-static unsigned char XG21_CR2F = 0x00 ;
-static unsigned char XG21_CR46 = 0x00 ;
-static unsigned char XG21_CR47 = 0x00 ;
+static unsigned char XG21_DVOSetting;
+static unsigned char XG21_CR2E;
+static unsigned char XG21_CR2F;
+static unsigned char XG21_CR46;
+static unsigned char XG21_CR47;
static unsigned char XG27_CR97 = 0xC1 ;
static unsigned char XG27_SR36 = 0x30 ;
@@ -3664,68 +2826,10 @@ static unsigned char XG27_CR8F = 0x0C ;
static unsigned char XG27_CRD0[] = {
0, 0, 0, 0, 0, 0, 0, 0x82, 0x00, 0x66, 0x01, 0x00
};
-static unsigned char XG27_CRDE[] = {0, 0};
+static unsigned char XG27_CRDE[2];
static unsigned char XG27_SR40 = 0x04 ;
static unsigned char XG27_SR41 = 0x00 ;
-static unsigned char XGI330_CHTVVCLKUNTSC[] = {0x00};
-
-static unsigned char XGI330_CHTVVCLKONTSC[] = {0x00};
-
-static unsigned char XGI330_CHTVVCLKUPAL[] = {0x00};
-
-static unsigned char XGI330_CHTVVCLKOPAL[] = {0x00};
-
-static unsigned char XGI7007_CHTVVCLKUNTSC[] = {
- CH7007TVVCLK30_2,
- CH7007TVVCLK30_2,
- CH7007TVVCLK30_2,
- CH7007TVVCLK30_2,
- CH7007TVVCLK28_1,
- CH7007TVVCLK47_8
-};
-
-static unsigned char XGI7007_CHTVVCLKONTSC[] = {
- CH7007TVVCLK26_4,
- CH7007TVVCLK26_4,
- CH7007TVVCLK26_4,
- CH7007TVVCLK26_4,
- CH7007TVVCLK24_6,
- CH7007TVVCLK43_6
-};
-
-static unsigned char XGI7007_CHTVVCLKUPAL[] = {
- CH7007TVVCLK31_5,
- CH7007TVVCLK31_5,
- CH7007TVVCLK31_5,
- CH7007TVVCLK31_5,
- CH7007TVVCLK26_2,
- CH7007TVVCLK39
-};
-
-static unsigned char XGI7007_CHTVVCLKOPAL[] = {
- CH7007TVVCLK31_5,
- CH7007TVVCLK31_5,
- CH7007TVVCLK31_5,
- CH7007TVVCLK31_5,
- CH7007TVVCLK26_2,
- CH7007TVVCLK36
-};
-
-static struct XGI330_VCLKDataStruct XGI_CH7007VCLKData[] = {
- {0x60, 0x36, 30}, /* 0 30.2 MHZ */
- {0x40, 0x4A, 28}, /* 1 28.19 MHZ */
- {0x9F, 0x46, 44}, /* 2 43.6 MHZ */
- {0x97, 0x2C, 26}, /* 3 26.4 MHZ */
- {0x44, 0xE4, 25}, /* 4 24.6 MHZ */
- {0x7E, 0x32, 47}, /* 5 47.832 MHZ */
- {0x8A, 0x24, 31}, /* 6 31.5 MHZ */
- {0x97, 0x2C, 26}, /* 7 26.2 MHZ */
- {0xCE, 0x3C, 39}, /* 8 39 MHZ */
- {0x52, 0x4A, 36}, /* 9 36 MHZ */
- {0xFF, 0x00, 0} /* End mark */
-};
-
static struct XGI330_VCLKDataStruct XGI_VCLKData[] = {
/* SR2B,SR2C,SR2D */
{0x1B, 0xE1, 25}, /* 00 (25.175MHz) */
@@ -3805,21 +2909,6 @@ static struct XGI330_VCLKDataStruct XGI_VCLKData[] = {
{0x66, 0x43, 123}, /* 4A (122.61Mhz) */
{0x2C, 0x61, 80}, /* 4B (80.350Mhz) */
{0x3B, 0x61, 108}, /* 4C (107.385Mhz) */
-/*
- {0x60, 0x36, 30},// 4D (30.200MHz) }// No use
- {0x60, 0x36, 30},// 4E (30.200MHz) }// No use
- {0x60, 0x36, 30},// 4F (30.200MHz) }// No use
- {0x60, 0x36, 30},// 50 (30.200MHz) }// CHTV
- {0x40, 0x4A, 28},// 51 (28.190MHz)
- {0x9F, 0x46, 44},// 52 (43.600MHz)
- {0x97, 0x2C, 26},// 53 (26.400MHz)
- {0x44, 0xE4, 25},// 54 (24.600MHz)
- {0x7E, 0x32, 47},// 55 (47.832MHz)
- {0x8A, 0x24, 31},// 56 (31.500MHz)
- {0x97, 0x2C, 26},// 57 (26.200MHz)
- {0xCE, 0x3C, 39},// 58 (39.000MHz)
- {0x52, 0x4A, 36},// 59 (36.000MHz)
-*/
{0x69, 0x61, 191}, /* 4D (190.96MHz ) */
{0x4F, 0x22, 192}, /* 4E (192.069MHz) */
{0x28, 0x26, 322}, /* 4F (322.273MHz) */
@@ -3912,21 +3001,6 @@ static struct XGI330_VCLKDataStruct XGI_VBVCLKData[] = {
{0x66, 0x43, 123}, /* 4A (122.61Mhz) */
{0x2C, 0x61, 80 }, /* 4B (80.350Mhz) */
{0x3B, 0x61, 108}, /* 4C (107.385Mhz) */
-/*
- {0x60, 0x36, 30}, // 4D (30.200MHz) }// No use
- {0x60, 0x36, 30}, // 4E (30.200MHz) }// No use
- {0x60, 0x36, 30}, // 4F (30.200MHz) }// No use
- {0x60, 0x36, 30}, // 50 (30.200MHz) }// CHTV
- {0x40, 0x4A, 28}, // 51 (28.190MHz)
- {0x9F, 0x46, 44}, // 52 (43.600MHz)
- {0x97, 0x2C, 26}, // 53 (26.400MHz)
- {0x44, 0xE4, 25}, // 54 (24.600MHz)
- {0x7E, 0x32, 47}, // 55 (47.832MHz)
- {0x8A, 0x24, 31}, // 56 (31.500MHz)
- {0x97, 0x2C, 26}, // 57 (26.200MHz)
- {0xCE, 0x3C, 39}, // 58 (39.000MHz)
- {0x52, 0x4A, 36}, // 59 (36.000MHz)
-*/
{0x69, 0x61, 191}, /* 4D (190.96MHz ) */
{0x4F, 0x22, 192}, /* 4E (192.069MHz) */
{0x28, 0x26, 322}, /* 4F (322.273MHz) */
@@ -3941,38 +3015,6 @@ static struct XGI330_VCLKDataStruct XGI_VBVCLKData[] = {
{0xFF, 0x00, 0} /* End mark */
};
-#if 0
-static unsigned char XGI660_TVDelayList[] = {
- 0x44, /* ; 0 ExtNTSCDelay */
- 0x44, /* ; 1 StNTSCDelay */
- 0x44, /* ; 2 ExtPALDelay */
- 0x44, /* ; 3 StPALDelay */
- 0x44, /* ; 4 ExtHiTVDelay(1080i) */
- 0x44, /* ; 5 StHiTVDelay(1080i) */
- 0x44, /* ; 6 ExtYPbPrDelay(525i) */
- 0x44, /* ; 7 StYPbPrDealy(525i) */
- 0x44, /* ; 8 ExtYPbPrDelay(525p) */
- 0x44, /* ; 9 StYPbPrDealy(525p) */
- 0x44, /* ; A ExtYPbPrDelay(750p) */
- 0x44 /* ; B StYPbPrDealy(750p) */
-};
-
-static unsigned char XGI660_TVDelayList2[] = {
- 0x44, /* ; 0 ExtNTSCDelay */
- 0x44, /* ; 1 StNTSCDelay */
- 0x44, /* ; 2 ExtPALDelay */
- 0x44, /* ; 3 StPALDelay */
- 0x44, /* ; 4 ExtHiTVDelay */
- 0x44, /* ; 5 StHiTVDelay */
- 0x44, /* ; 6 ExtYPbPrDelay(525i) */
- 0x44, /* ; 7 StYPbPrDealy(525i) */
- 0x44, /* ; 8 ExtYPbPrDelay(525p) */
- 0x44, /* ; 9 StYPbPrDealy(525p) */
- 0x44, /* ; A ExtYPbPrDelay(750p) */
- 0x44 /* ; B StYPbPrDealy(750p) */
-};
-#endif
-
static unsigned char XGI301TVDelayList[] = {
0x22, /* ; 0 ExtNTSCDelay */
0x22, /* ; 1 StNTSCDelay */
@@ -4062,7 +3104,7 @@ static unsigned char PALYFilter1[] = {
0xFC, 0xFB, 0x14, 0x2A /* 6 : 800x gra. mode */
};
-static unsigned char PALMYFilter1[] = {
+static unsigned char xgifb_palmn_yfilter1[] = {
0x00, 0xF4, 0x10, 0x38, /* 0 : 320x text mode */
0x00, 0xF4, 0x10, 0x38, /* 1 : 360x text mode */
0xEB, 0x04, 0x10, 0x18, /* 2 : 640x text mode */
@@ -4073,51 +3115,7 @@ static unsigned char PALMYFilter1[] = {
0xFF, 0xFF, 0xFF, 0xFF /* End of Table */
};
-static unsigned char PALNYFilter1[] = {
- 0x00, 0xF4, 0x10, 0x38, /* 0 : 320x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 1 : 360x text mode */
- 0xEB, 0x04, 0x10, 0x18, /* 2 : 640x text mode */
- 0xF7, 0x06, 0x19, 0x14, /* 3 : 720x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 4 : 320x gra. mode */
- 0xEB, 0x04, 0x25, 0x18, /* 5 : 640x gra. mode */
- 0xEB, 0x15, 0x25, 0xF6, /* 6 : 800x gra. mode */
- 0xFF, 0xFF, 0xFF, 0xFF /* End of Table */
-};
-
-static unsigned char NTSCYFilter2[] = {
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 0 : 320x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 1 : 360x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 2 : 640x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 3 : 720x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 4 : 320x gra. mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 5 : 640x gra. mode */
- 0x01, 0x01, 0xFC, 0xF8, 0x08, 0x26, 0x38, /* 6 : 800x gra. mode */
- 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x22, 0x28 /* 7 : 1024xgra. mode */
-};
-
-static unsigned char PALYFilter2[] = {
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 0 : 320x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 1 : 360x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 2 : 640x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 3 : 720x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 4 : 320x gra. mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 5 : 640x gra. mode */
- 0x01, 0x01, 0xFC, 0xF8, 0x08, 0x26, 0x38, /* 6 : 800x gra. mode */
- 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x22, 0x28 /* 7 : 1024xgra. mode */
-};
-
-static unsigned char PALMYFilter2[] = {
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 0 : 320x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 1 : 360x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 2 : 640x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 3 : 720x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 4 : 320x gra. mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 5 : 640x gra. mode */
- 0x01, 0x01, 0xFC, 0xF8, 0x08, 0x26, 0x38, /* 6 : 800x gra. mode */
- 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x22, 0x28 /* 7 : 1024xgra. mode */
-};
-
-static unsigned char PALNYFilter2[] = {
+static unsigned char xgifb_yfilter2[] = {
0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 0 : 320x text mode */
0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 1 : 360x text mode */
0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 2 : 640x text mode */
@@ -4134,35 +3132,7 @@ static unsigned char XGI_NTSC1024AdjTime[] = {
0x58, 0xe4, 0x73, 0xd0, 0x13
};
-static struct XGI301C_Tap4TimingStruct HiTVTap4Timing[] = {
- {0, {
- 0x00, 0x20, 0x00, 0x00, 0x7F, 0x20, 0x02, 0x7F, /* ; C0-C7 */
- 0x7D, 0x20, 0x04, 0x7F, 0x7D, 0x1F, 0x06, 0x7E, /* ; C8-CF */
- 0x7C, 0x1D, 0x09, 0x7E, 0x7C, 0x1B, 0x0B, 0x7E, /* ; D0-D7 */
- 0x7C, 0x19, 0x0E, 0x7D, 0x7C, 0x17, 0x11, 0x7C, /* ; D8-DF */
- 0x7C, 0x14, 0x14, 0x7C, 0x7C, 0x11, 0x17, 0x7C, /* ; E0-E7 */
- 0x7D, 0x0E, 0x19, 0x7C, 0x7E, 0x0B, 0x1B, 0x7C, /* ; EA-EF */
- 0x7E, 0x09, 0x1D, 0x7C, 0x7F, 0x06, 0x1F, 0x7C, /* ; F0-F7 */
- 0x7F, 0x04, 0x20, 0x7D, 0x00, 0x02, 0x20, 0x7E /* ; F8-FF */
- }
- }
-};
-
-static struct XGI301C_Tap4TimingStruct EnlargeTap4Timing[] = {
- {0, {
- 0x00, 0x20, 0x00, 0x00, 0x7F, 0x20, 0x02, 0x7F, /* ; C0-C7 */
- 0x7D, 0x20, 0x04, 0x7F, 0x7D, 0x1F, 0x06, 0x7E, /* ; C8-CF */
- 0x7C, 0x1D, 0x09, 0x7E, 0x7C, 0x1B, 0x0B, 0x7E, /* ; D0-D7 */
- 0x7C, 0x19, 0x0E, 0x7D, 0x7C, 0x17, 0x11, 0x7C, /* ; D8-DF */
- 0x7C, 0x14, 0x14, 0x7C, 0x7C, 0x11, 0x17, 0x7C, /* ; E0-E7 */
- 0x7D, 0x0E, 0x19, 0x7C, 0x7E, 0x0B, 0x1B, 0x7C, /* ; EA-EF */
- 0x7E, 0x09, 0x1D, 0x7C, 0x7F, 0x06, 0x1F, 0x7C, /* ; F0-F7 */
- 0x7F, 0x04, 0x20, 0x7D, 0x00, 0x02, 0x20, 0x7E /* ; F8-FF */
- }
- }
-};
-
-static struct XGI301C_Tap4TimingStruct NoScaleTap4Timing[] = {
+static struct XGI301C_Tap4TimingStruct xgifb_tap4_timing[] = {
{0, {
0x00, 0x20, 0x00, 0x00, 0x7F, 0x20, 0x02, 0x7F, /* ; C0-C7 */
0x7D, 0x20, 0x04, 0x7F, 0x7D, 0x1F, 0x06, 0x7E, /* ; C8-CF */
@@ -4212,7 +3182,7 @@ static struct XGI301C_Tap4TimingStruct PALTap4Timing[] = {
}
};
-static struct XGI301C_Tap4TimingStruct NTSCTap4Timing[] = {
+static struct XGI301C_Tap4TimingStruct xgifb_ntsc_525_tap4_timing[] = {
{480, {
0x04, 0x1A, 0x04, 0x7E, 0x03, 0x1A, 0x06, 0x7D, /* ; C0-C7 */
0x01, 0x1A, 0x08, 0x7D, 0x00, 0x19, 0x0A, 0x7D, /* ; C8-CF */
@@ -4248,78 +3218,6 @@ static struct XGI301C_Tap4TimingStruct NTSCTap4Timing[] = {
}
};
-static struct XGI301C_Tap4TimingStruct YPbPr525pTap4Timing[] = {
- {480, {
- 0x04, 0x1A, 0x04, 0x7E, 0x03, 0x1A, 0x06, 0x7D, /* ; C0-C7 */
- 0x01, 0x1A, 0x08, 0x7D, 0x00, 0x19, 0x0A, 0x7D, /* ; C8-CF */
- 0x7F, 0x19, 0x0C, 0x7C, 0x7E, 0x18, 0x0E, 0x7C, /* ; D0-D7 */
- 0x7E, 0x17, 0x10, 0x7B, 0x7D, 0x15, 0x12, 0x7C, /* ; D8-DF */
- 0x7D, 0x13, 0x13, 0x7D, 0x7C, 0x12, 0x15, 0x7D, /* ; E0-E7 */
- 0x7C, 0x10, 0x17, 0x7D, 0x7C, 0x0E, 0x18, 0x7E, /* ; EA-EF */
- 0x7D, 0x0C, 0x19, 0x7E, 0x7D, 0x0A, 0x19, 0x00, /* ; F0-F7 */
- 0x7D, 0x08, 0x1A, 0x01, 0x7E, 0x06, 0x1A, 0x02 /* ; F8-FF */
- }
- },
- {600, {
- 0x07, 0x14, 0x07, 0x7E, 0x06, 0x14, 0x09, 0x7D, /* ; C0-C7 */
- 0x05, 0x14, 0x0A, 0x7D, 0x04, 0x13, 0x0B, 0x7E, /* ; C8-CF */
- 0x03, 0x13, 0x0C, 0x7E, 0x02, 0x12, 0x0D, 0x7F, /* ; D0-D7 */
- 0x01, 0x12, 0x0E, 0x7F, 0x01, 0x11, 0x0F, 0x7F, /* ; D8-DF */
- 0x01, 0x10, 0x10, 0x00, 0x7F, 0x0F, 0x11, 0x01, /* ; E0-E7 */
- 0x7F, 0x0E, 0x12, 0x01, 0x7E, 0x0D, 0x12, 0x03, /* ; EA-EF */
- 0x7E, 0x0C, 0x13, 0x03, 0x7E, 0x0B, 0x13, 0x04, /* ; F0-F7 */
- 0x7E, 0x0A, 0x14, 0x04, 0x7D, 0x09, 0x14, 0x06 /* ; F8-FF */
- }
- },
- {0xFFFF, {
- 0x09, 0x0F, 0x09, 0x7F, 0x08, 0x0F, 0x09, 0x00, /* ; C0-C7 */
- 0x07, 0x0F, 0x0A, 0x00, 0x06, 0x0F, 0x0A, 0x01, /* ; C8-CF */
- 0x06, 0x0E, 0x0B, 0x01, 0x05, 0x0E, 0x0B, 0x02, /* ; D0-D7 */
- 0x04, 0x0E, 0x0C, 0x02, 0x04, 0x0D, 0x0C, 0x03, /* ; D8-DF */
- 0x03, 0x0D, 0x0D, 0x03, 0x02, 0x0C, 0x0D, 0x05, /* ; E0-E7 */
- 0x02, 0x0C, 0x0E, 0x04, 0x01, 0x0B, 0x0E, 0x06, /* ; EA-EF */
- 0x01, 0x0B, 0x0E, 0x06, 0x00, 0x0A, 0x0F, 0x07, /* ; F0-F7 */
- 0x00, 0x0A, 0x0F, 0x07, 0x00, 0x09, 0x0F, 0x08 /* ; F8-FF */
- }
- }
-};
-
-static struct XGI301C_Tap4TimingStruct YPbPr525iTap4Timing[] = {
- {480, {
- 0x04, 0x1A, 0x04, 0x7E, 0x03, 0x1A, 0x06, 0x7D, /* ; C0-C7 */
- 0x01, 0x1A, 0x08, 0x7D, 0x00, 0x19, 0x0A, 0x7D, /* ; C8-CF */
- 0x7F, 0x19, 0x0C, 0x7C, 0x7E, 0x18, 0x0E, 0x7C, /* ; D0-D7 */
- 0x7E, 0x17, 0x10, 0x7B, 0x7D, 0x15, 0x12, 0x7C, /* ; D8-DF */
- 0x7D, 0x13, 0x13, 0x7D, 0x7C, 0x12, 0x15, 0x7D, /* ; E0-E7 */
- 0x7C, 0x10, 0x17, 0x7D, 0x7C, 0x0E, 0x18, 0x7E, /* ; EA-EF */
- 0x7D, 0x0C, 0x19, 0x7E, 0x7D, 0x0A, 0x19, 0x00, /* ; F0-F7 */
- 0x7D, 0x08, 0x1A, 0x01, 0x7E, 0x06, 0x1A, 0x02 /* ; F8-FF */
- }
- },
- {600, {
- 0x07, 0x14, 0x07, 0x7E, 0x06, 0x14, 0x09, 0x7D, /* ; C0-C7 */
- 0x05, 0x14, 0x0A, 0x7D, 0x04, 0x13, 0x0B, 0x7E, /* ; C8-CF */
- 0x03, 0x13, 0x0C, 0x7E, 0x02, 0x12, 0x0D, 0x7F, /* ; D0-D7 */
- 0x01, 0x12, 0x0E, 0x7F, 0x01, 0x11, 0x0F, 0x7F, /* ; D8-DF */
- 0x01, 0x10, 0x10, 0x00, 0x7F, 0x0F, 0x11, 0x01, /* ; E0-E7 */
- 0x7F, 0x0E, 0x12, 0x01, 0x7E, 0x0D, 0x12, 0x03, /* ; EA-EF */
- 0x7E, 0x0C, 0x13, 0x03, 0x7E, 0x0B, 0x13, 0x04, /* ; F0-F7 */
- 0x7E, 0x0A, 0x14, 0x04, 0x7D, 0x09, 0x14, 0x06 /* ; F8-FF */
- }
- },
- {0xFFFF, {
- 0x09, 0x0F, 0x09, 0x7F, 0x08, 0x0F, 0x09, 0x00, /* ; C0-C7 */
- 0x07, 0x0F, 0x0A, 0x00, 0x06, 0x0F, 0x0A, 0x01, /* ; C8-CF */
- 0x06, 0x0E, 0x0B, 0x01, 0x05, 0x0E, 0x0B, 0x02, /* ; D0-D7 */
- 0x04, 0x0E, 0x0C, 0x02, 0x04, 0x0D, 0x0C, 0x03, /* ; D8-DF */
- 0x03, 0x0D, 0x0D, 0x03, 0x02, 0x0C, 0x0D, 0x05, /* ; E0-E7 */
- 0x02, 0x0C, 0x0E, 0x04, 0x01, 0x0B, 0x0E, 0x06, /* ; EA-EF */
- 0x01, 0x0B, 0x0E, 0x06, 0x00, 0x0A, 0x0F, 0x07, /* ; F0-F7 */
- 0x00, 0x0A, 0x0F, 0x07, 0x00, 0x09, 0x0F, 0x08 /* ; F8-FF */
- }
- }
-};
-
static struct XGI301C_Tap4TimingStruct YPbPr750pTap4Timing[] = {
{0xFFFF, {
0x05, 0x19, 0x05, 0x7D, 0x03, 0x19, 0x06, 0x7E, /* ; C0-C7 */
diff --git a/drivers/staging/xgifb/vb_util.c b/drivers/staging/xgifb/vb_util.c
index ea2b795bfa5..b5c99891ead 100644
--- a/drivers/staging/xgifb/vb_util.c
+++ b/drivers/staging/xgifb/vb_util.c
@@ -1,10 +1,11 @@
+#include <linux/io.h>
+#include <linux/types.h>
+
#include "vb_def.h"
#include "vgatypes.h"
#include "vb_struct.h"
#include "XGIfb.h"
-#include <linux/io.h>
-#include <linux/types.h>
#include "vb_util.h"
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 5aeb3a4d66f..9b939b75c30 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -47,18 +47,13 @@ enum XGI_LCD_TYPE {
};
#endif
-struct XGI_DSReg {
- unsigned char jIdx;
- unsigned char jVal;
-};
-
struct xgi_hw_device_info {
unsigned long ulExternalChip; /* NO VB or other video bridge*/
/* if ujVBChipID = VB_CHIP_UNKNOWN, */
unsigned char *pjVirtualRomBase; /* ROM image */
- unsigned char *pjVideoMemoryAddress;/* base virtual memory address */
+ void __iomem *pjVideoMemoryAddress;/* base virtual memory address */
/* of Linear VGA memory */
unsigned long ulVideoMemorySize; /* size, in bytes, of the
@@ -78,10 +73,6 @@ struct xgi_hw_device_info {
/* "XGI_VB_CHIP_TYPE" */
unsigned long ulCRT2LCDType; /* defined in the data structure type */
-
- unsigned char(*pQueryVGAConfigSpace)(struct xgi_hw_device_info *,
- unsigned long, unsigned long,
- unsigned long *);
};
/* Additional IOCTL for communication xgifb <> X driver */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 462fbc20561..56c1f9c80dc 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -651,7 +651,7 @@ static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
/*
* byte count defining poor *mean* compression; pages with greater zsize
* will be rejected until sufficient better-compressed pages are accepted
- * driving the man below this threshold
+ * driving the mean below this threshold
*/
static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
@@ -962,15 +962,6 @@ out:
static unsigned long zcache_failed_get_free_pages;
static unsigned long zcache_failed_alloc;
static unsigned long zcache_put_to_flush;
-static unsigned long zcache_aborted_preload;
-static unsigned long zcache_aborted_shrink;
-
-/*
- * Ensure that memory allocation requests in zcache don't result
- * in direct reclaim requests via the shrinker, which would cause
- * an infinite loop. Maybe a GFP flag would be better?
- */
-static DEFINE_SPINLOCK(zcache_direct_reclaim_lock);
/*
* for now, used named slabs so can easily track usage; later can
@@ -1009,10 +1000,6 @@ static int zcache_do_preload(struct tmem_pool *pool)
goto out;
if (unlikely(zcache_obj_cache == NULL))
goto out;
- if (!spin_trylock(&zcache_direct_reclaim_lock)) {
- zcache_aborted_preload++;
- goto out;
- }
preempt_disable();
kp = &__get_cpu_var(zcache_preloads);
while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
@@ -1021,7 +1008,7 @@ static int zcache_do_preload(struct tmem_pool *pool)
ZCACHE_GFP_MASK);
if (unlikely(objnode == NULL)) {
zcache_failed_alloc++;
- goto unlock_out;
+ goto out;
}
preempt_disable();
kp = &__get_cpu_var(zcache_preloads);
@@ -1034,13 +1021,13 @@ static int zcache_do_preload(struct tmem_pool *pool)
obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
if (unlikely(obj == NULL)) {
zcache_failed_alloc++;
- goto unlock_out;
+ goto out;
}
page = (void *)__get_free_page(ZCACHE_GFP_MASK);
if (unlikely(page == NULL)) {
zcache_failed_get_free_pages++;
kmem_cache_free(zcache_obj_cache, obj);
- goto unlock_out;
+ goto out;
}
preempt_disable();
kp = &__get_cpu_var(zcache_preloads);
@@ -1053,8 +1040,6 @@ static int zcache_do_preload(struct tmem_pool *pool)
else
free_page((unsigned long)page);
ret = 0;
-unlock_out:
- spin_unlock(&zcache_direct_reclaim_lock);
out:
return ret;
}
@@ -1357,8 +1342,14 @@ static int zcache_cpu_notifier(struct notifier_block *nb,
kp->objnodes[kp->nr - 1] = NULL;
kp->nr--;
}
- kmem_cache_free(zcache_obj_cache, kp->obj);
- free_page((unsigned long)kp->page);
+ if (kp->obj) {
+ kmem_cache_free(zcache_obj_cache, kp->obj);
+ kp->obj = NULL;
+ }
+ if (kp->page) {
+ free_page((unsigned long)kp->page);
+ kp->page = NULL;
+ }
break;
default:
break;
@@ -1423,8 +1414,6 @@ ZCACHE_SYSFS_RO(evicted_buddied_pages);
ZCACHE_SYSFS_RO(failed_get_free_pages);
ZCACHE_SYSFS_RO(failed_alloc);
ZCACHE_SYSFS_RO(put_to_flush);
-ZCACHE_SYSFS_RO(aborted_preload);
-ZCACHE_SYSFS_RO(aborted_shrink);
ZCACHE_SYSFS_RO(compress_poor);
ZCACHE_SYSFS_RO(mean_compress_poor);
ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
@@ -1466,8 +1455,6 @@ static struct attribute *zcache_attrs[] = {
&zcache_failed_get_free_pages_attr.attr,
&zcache_failed_alloc_attr.attr,
&zcache_put_to_flush_attr.attr,
- &zcache_aborted_preload_attr.attr,
- &zcache_aborted_shrink_attr.attr,
&zcache_zbud_unbuddied_list_counts_attr.attr,
&zcache_zbud_cumul_chunk_counts_attr.attr,
&zcache_zv_curr_dist_counts_attr.attr,
@@ -1507,11 +1494,7 @@ static int shrink_zcache_memory(struct shrinker *shrink,
if (!(gfp_mask & __GFP_FS))
/* does this case really need to be skipped? */
goto out;
- if (spin_trylock(&zcache_direct_reclaim_lock)) {
- zbud_evict_pages(nr);
- spin_unlock(&zcache_direct_reclaim_lock);
- } else
- zcache_aborted_shrink++;
+ zbud_evict_pages(nr);
}
ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
out:
@@ -1668,7 +1651,7 @@ static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
if (cli == NULL)
goto out;
atomic_inc(&cli->refcount);
- pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
+ pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
if (pool == NULL) {
pr_info("zcache: pool creation failed: out of memory\n");
goto out;
@@ -1798,8 +1781,10 @@ static int zcache_frontswap_poolid = -1;
/*
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
+ * frontswap_get_page()
*/
-#define SWIZ_BITS 4
+#define SWIZ_BITS 27
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
#define iswiz(_ind) (_ind >> SWIZ_BITS)
@@ -1993,7 +1978,7 @@ static int __init zcache_init(void)
pr_info("zcache: frontswap enabled using kernel "
"transcendent memory and xvmalloc\n");
if (old_ops.init != NULL)
- pr_warning("ktmem: frontswap_ops overridden");
+ pr_warning("zcache: frontswap_ops overridden");
}
#endif
out:
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index d70ec1ad10d..b9926ee0052 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -37,10 +37,10 @@
/* Globals */
static int zram_major;
-struct zram *devices;
+struct zram *zram_devices;
/* Module params (documentation at end) */
-unsigned int num_devices;
+unsigned int zram_num_devices;
static void zram_stat_inc(u32 *v)
{
@@ -560,27 +560,34 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
+ if (unlikely(!zram->init_done) && zram_init_device(zram))
+ goto error;
+
+ down_read(&zram->init_lock);
+ if (unlikely(!zram->init_done))
+ goto error_unlock;
+
if (!valid_io_request(zram, bio)) {
zram_stat64_inc(zram, &zram->stats.invalid_io);
- bio_io_error(bio);
- return 0;
- }
-
- if (unlikely(!zram->init_done) && zram_init_device(zram)) {
- bio_io_error(bio);
- return 0;
+ goto error_unlock;
}
__zram_make_request(zram, bio, bio_data_dir(bio));
+ up_read(&zram->init_lock);
return 0;
+
+error_unlock:
+ up_read(&zram->init_lock);
+error:
+ bio_io_error(bio);
+ return 0;
}
-void zram_reset_device(struct zram *zram)
+void __zram_reset_device(struct zram *zram)
{
size_t index;
- mutex_lock(&zram->init_lock);
zram->init_done = 0;
/* Free various per-device buffers */
@@ -617,7 +624,13 @@ void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- mutex_unlock(&zram->init_lock);
+}
+
+void zram_reset_device(struct zram *zram)
+{
+ down_write(&zram->init_lock);
+ __zram_reset_device(zram);
+ up_write(&zram->init_lock);
}
int zram_init_device(struct zram *zram)
@@ -625,10 +638,10 @@ int zram_init_device(struct zram *zram)
int ret;
size_t num_pages;
- mutex_lock(&zram->init_lock);
+ down_write(&zram->init_lock);
if (zram->init_done) {
- mutex_unlock(&zram->init_lock);
+ up_write(&zram->init_lock);
return 0;
}
@@ -638,24 +651,22 @@ int zram_init_device(struct zram *zram)
if (!zram->compress_workmem) {
pr_err("Error allocating compressor working memory!\n");
ret = -ENOMEM;
- goto fail;
+ goto fail_no_table;
}
zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
- goto fail;
+ goto fail_no_table;
}
num_pages = zram->disksize >> PAGE_SHIFT;
zram->table = vzalloc(num_pages * sizeof(*zram->table));
if (!zram->table) {
pr_err("Error allocating zram address table\n");
- /* To prevent accessing table entries during cleanup */
- zram->disksize = 0;
ret = -ENOMEM;
- goto fail;
+ goto fail_no_table;
}
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
@@ -671,20 +682,23 @@ int zram_init_device(struct zram *zram)
}
zram->init_done = 1;
- mutex_unlock(&zram->init_lock);
+ up_write(&zram->init_lock);
pr_debug("Initialization done!\n");
return 0;
+fail_no_table:
+ /* To prevent accessing table entries during cleanup */
+ zram->disksize = 0;
fail:
- mutex_unlock(&zram->init_lock);
- zram_reset_device(zram);
-
+ __zram_reset_device(zram);
+ up_write(&zram->init_lock);
pr_err("Initialization failed: err=%d\n", ret);
return ret;
}
-void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
+static void zram_slot_free_notify(struct block_device *bdev,
+ unsigned long index)
{
struct zram *zram;
@@ -703,7 +717,7 @@ static int create_device(struct zram *zram, int device_id)
int ret = 0;
init_rwsem(&zram->lock);
- mutex_init(&zram->init_lock);
+ init_rwsem(&zram->init_lock);
spin_lock_init(&zram->stat64_lock);
zram->queue = blk_alloc_queue(GFP_KERNEL);
@@ -780,9 +794,9 @@ static int __init zram_init(void)
{
int ret, dev_id;
- if (num_devices > max_num_devices) {
+ if (zram_num_devices > max_num_devices) {
pr_warning("Invalid value for num_devices: %u\n",
- num_devices);
+ zram_num_devices);
ret = -EINVAL;
goto out;
}
@@ -794,21 +808,21 @@ static int __init zram_init(void)
goto out;
}
- if (!num_devices) {
+ if (!zram_num_devices) {
pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
+ zram_num_devices = 1;
}
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
- devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
- if (!devices) {
+ pr_info("Creating %u devices ...\n", zram_num_devices);
+ zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
+ if (!zram_devices) {
ret = -ENOMEM;
goto unregister;
}
- for (dev_id = 0; dev_id < num_devices; dev_id++) {
- ret = create_device(&devices[dev_id], dev_id);
+ for (dev_id = 0; dev_id < zram_num_devices; dev_id++) {
+ ret = create_device(&zram_devices[dev_id], dev_id);
if (ret)
goto free_devices;
}
@@ -817,8 +831,8 @@ static int __init zram_init(void)
free_devices:
while (dev_id)
- destroy_device(&devices[--dev_id]);
- kfree(devices);
+ destroy_device(&zram_devices[--dev_id]);
+ kfree(zram_devices);
unregister:
unregister_blkdev(zram_major, "zram");
out:
@@ -830,8 +844,8 @@ static void __exit zram_exit(void)
int i;
struct zram *zram;
- for (i = 0; i < num_devices; i++) {
- zram = &devices[i];
+ for (i = 0; i < zram_num_devices; i++) {
+ zram = &zram_devices[i];
destroy_device(zram);
if (zram->init_done)
@@ -840,12 +854,12 @@ static void __exit zram_exit(void)
unregister_blkdev(zram_major, "zram");
- kfree(devices);
+ kfree(zram_devices);
pr_debug("Cleanup done!\n");
}
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of zram devices");
+module_param(zram_num_devices, uint, 0);
+MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
module_init(zram_init);
module_exit(zram_exit);
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index abe5221c100..e5cd2469b6a 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -47,7 +47,7 @@ static const unsigned default_disksize_perc_ram = 25;
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
-static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
+static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
@@ -112,8 +112,8 @@ struct zram {
struct request_queue *queue;
struct gendisk *disk;
int init_done;
- /* Prevent concurrent execution of device init and reset */
- struct mutex init_lock;
+ /* Prevent concurrent execution of device init, reset and R/W request */
+ struct rw_semaphore init_lock;
/*
* This is the limit on amount of *uncompressed* worth of data
* we can store in a disk.
@@ -123,13 +123,13 @@ struct zram {
struct zram_stats stats;
};
-extern struct zram *devices;
-extern unsigned int num_devices;
+extern struct zram *zram_devices;
+extern unsigned int zram_num_devices;
#ifdef CONFIG_SYSFS
extern struct attribute_group zram_disk_attr_group;
#endif
extern int zram_init_device(struct zram *zram);
-extern void zram_reset_device(struct zram *zram);
+extern void __zram_reset_device(struct zram *zram);
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index a70cc010d18..0ea8ed296a9 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -34,8 +34,8 @@ static struct zram *dev_to_zram(struct device *dev)
int i;
struct zram *zram = NULL;
- for (i = 0; i < num_devices; i++) {
- zram = &devices[i];
+ for (i = 0; i < zram_num_devices; i++) {
+ zram = &zram_devices[i];
if (disk_to_dev(zram->disk) == dev)
break;
}
@@ -55,19 +55,23 @@ static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
+ u64 disksize;
struct zram *zram = dev_to_zram(dev);
+ ret = strict_strtoull(buf, 10, &disksize);
+ if (ret)
+ return ret;
+
+ down_write(&zram->init_lock);
if (zram->init_done) {
+ up_write(&zram->init_lock);
pr_info("Cannot change disksize for initialized device\n");
return -EBUSY;
}
- ret = strict_strtoull(buf, 10, &zram->disksize);
- if (ret)
- return ret;
-
- zram->disksize = PAGE_ALIGN(zram->disksize);
+ zram->disksize = PAGE_ALIGN(disksize);
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ up_write(&zram->init_lock);
return len;
}
@@ -106,8 +110,10 @@ static ssize_t reset_store(struct device *dev,
if (bdev)
fsync_bdev(bdev);
+ down_write(&zram->init_lock);
if (zram->init_done)
- zram_reset_device(zram);
+ __zram_reset_device(zram);
+ up_write(&zram->init_lock);
return len;
}
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 1060c7b7f80..62e54053bcd 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -6,7 +6,6 @@ target_core_mod-y := target_core_configfs.o \
target_core_hba.o \
target_core_pr.o \
target_core_alua.o \
- target_core_scdb.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6a4ea29c2f3..4d01768fcd9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
cmd->se_cmd.t_data_nents;
- iov_count += TRANSPORT_IOV_DATA_BUFFER;
+ iov_count += ISCSI_IOV_DATA_BUFFER;
cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
if (!cmd->iov_data) {
@@ -3538,16 +3538,8 @@ get_immediate:
spin_lock_bh(&conn->cmd_lock);
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- /*
- * Determine if a struct se_cmd is assoicated with
- * this struct iscsi_cmd.
- */
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
- !(cmd->tmr_req))
- iscsit_release_cmd(cmd);
- else
- transport_generic_free_cmd(&cmd->se_cmd,
- 1, 0);
+
+ iscsit_free_cmd(cmd);
goto get_immediate;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
spin_unlock_bh(&cmd->istate_lock);
@@ -3940,7 +3932,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
- struct se_cmd *se_cmd;
/*
* We expect this function to only ever be called from either RX or TX
* thread context via iscsit_close_connection() once the other context
@@ -3948,35 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
*/
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
- list_del(&cmd->i_list);
- spin_unlock_bh(&conn->cmd_lock);
- iscsit_increment_maxcmdsn(cmd, sess);
- se_cmd = &cmd->se_cmd;
- /*
- * Special cases for active iSCSI TMR, and
- * transport_lookup_cmd_lun() failing from
- * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
- */
- if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
- se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
- else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
- transport_release_cmd(se_cmd);
- else
- iscsit_release_cmd(cmd);
-
- spin_lock_bh(&conn->cmd_lock);
- continue;
- }
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
- se_cmd = &cmd->se_cmd;
- if (se_cmd->transport_wait_for_tasks)
- se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
}
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 11fd7430781..beb39469e7f 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -18,6 +18,7 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -27,40 +28,11 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
-{
- unsigned char result = 0;
- /*
- * MSB
- */
- if ((val[0] >= 'a') && (val[0] <= 'f'))
- result = ((val[0] - 'a' + 10) & 0xf) << 4;
- else
- if ((val[0] >= 'A') && (val[0] <= 'F'))
- result = ((val[0] - 'A' + 10) & 0xf) << 4;
- else /* digit */
- result = ((val[0] - '0') & 0xf) << 4;
- /*
- * LSB
- */
- if ((val[1] >= 'a') && (val[1] <= 'f'))
- result |= ((val[1] - 'a' + 10) & 0xf);
- else
- if ((val[1] >= 'A') && (val[1] <= 'F'))
- result |= ((val[1] - 'A' + 10) & 0xf);
- else /* digit */
- result |= ((val[1] - '0') & 0xf);
-
- return result;
-}
-
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int i, j = 0;
+ int j = DIV_ROUND_UP(len, 2);
- for (i = 0; i < len; i += 2) {
- dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
- }
+ hex2bin(dst, src, j);
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 470ed551eeb..3723d90d5ae 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -57,6 +57,9 @@
#define TA_PROD_MODE_WRITE_PROTECT 0
#define TA_CACHE_CORE_NPS 0
+
+#define ISCSI_IOV_DATA_BUFFER 5
+
enum tpg_np_network_transport_table {
ISCSI_TCP = 0,
ISCSI_SCTP_TCP = 1,
@@ -425,7 +428,6 @@ struct iscsi_cmd {
/* Number of times struct iscsi_cmd is present in immediate queue */
atomic_t immed_queue_count;
atomic_t response_queue_count;
- atomic_t transport_sent;
spinlock_t datain_lock;
spinlock_t dataout_timeout_lock;
/* spinlock for protecting struct iscsi_cmd->i_state */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 91a4d170bda..0b8404c3012 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -143,12 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -170,12 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -260,12 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 0);
+ iscsit_free_cmd(cmd);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -319,12 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
@@ -377,13 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
-
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 0);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -403,13 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
(cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
-
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -434,10 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
iscsit_free_all_datain_reqs(cmd);
- if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
- cmd->se_cmd.transport_wait_for_tasks)
- cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
- 0, 0);
+ transport_wait_for_tasks(&cmd->se_cmd);
/*
* Add the struct iscsi_cmd to the connection recovery cmd list
*/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 4d087ac1106..426cd4bf6a9 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -504,7 +504,7 @@ static int iscsi_target_do_authentication(
break;
case 1:
pr_debug("iSCSI security negotiation"
- " completed sucessfully.\n");
+ " completed successfully.\n");
login->auth_complete = 1;
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index db1fe1ec84d..490207eacde 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write(
* so if we have received all DataOUT we can safety ignore Initiator.
*/
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
- if (!atomic_read(&cmd->transport_sent)) {
+ if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
@@ -314,11 +314,11 @@ static int iscsit_task_reassign_complete_read(
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
}
- if (!atomic_read(&cmd->transport_sent)) {
+ if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {
pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
" transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
- transport_generic_handle_cdb(se_cmd);
+ transport_handle_cdb_direct(se_cmd);
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index f00137f377b..02348f727bd 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
}
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
- (void *)cmd->tmr_req, tcm_function);
+ (void *)cmd->tmr_req, tcm_function,
+ GFP_KERNEL);
if (!se_cmd->se_tmr_req)
goto out;
@@ -839,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kmem_cache_free(lio_cmd_cache, cmd);
}
+void iscsit_free_cmd(struct iscsi_cmd *cmd)
+{
+ /*
+ * Determine if a struct se_cmd is assoicated with
+ * this struct iscsi_cmd.
+ */
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ case ISCSI_OP_SCSI_TMFUNC:
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
+ break;
+ default:
+ iscsit_release_cmd(cmd);
+ break;
+ }
+}
+
int iscsit_check_session_usage_count(struct iscsi_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 2cd49d607bd..835bf7de028 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aa2d6799723..b15d8cbf630 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
@@ -290,6 +290,15 @@ static int tcm_loop_queuecommand(
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ /*
+ * Ensure that this tl_tpg reference from the incoming sc->device->id
+ * has already been configured via tcm_loop_make_naa_tpg().
+ */
+ if (!tl_tpg->tl_hba) {
+ set_host_byte(sc, DID_NO_CONNECT);
+ sc->scsi_done(sc);
+ return 0;
+ }
se_tpg = &tl_tpg->tl_se_tpg;
/*
* Determine the SAM Task Attribute and allocate tl_cmd and
@@ -366,7 +375,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Allocate the LUN_RESET TMR
*/
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
- TMR_LUN_RESET);
+ TMR_LUN_RESET, GFP_KERNEL);
if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
@@ -388,7 +397,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
SUCCESS : FAILED;
release:
if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1, 0);
+ transport_generic_free_cmd(se_cmd, 1);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
@@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg(
*/
core_tpg_deregister(se_tpg);
+ tl_tpg->tl_hba = NULL;
+ tl_tpg->tl_tpgt = 0;
+
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 98c98a3a025..8f4447749c7 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -24,7 +24,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
@@ -68,6 +67,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
+ /*
+ * Need at least 4 bytes of response data or else we can't
+ * even fit the return data length.
+ */
+ if (cmd->data_length < 4) {
+ pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
+ " too small\n", cmd->data_length);
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
@@ -75,6 +83,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
+ * Check if the Target port group and Target port descriptor list
+ * based on tg_pt_gp_members count will fit into the response payload.
+ * Otherwise, bump rd_len to let the initiator know we have exceeded
+ * the allocation length and the response is truncated.
+ */
+ if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
+ cmd->data_length) {
+ rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
+ continue;
+ }
+ /*
* PREF: Preferred target port bit, determine if this
* bit should be set for port group.
*/
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index f04d4ef99dc..38535eb1392 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -24,7 +24,7 @@
*/
#include <linux/kernel.h>
-#include <linux/ctype.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -156,11 +156,12 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
}
static void
-target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
{
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
- unsigned char *buf = buf_off;
- int cnt = 0, next = 1;
+ int cnt;
+ bool next = true;
+
/*
* Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
* byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
@@ -169,19 +170,18 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_of
* NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
* per device uniqeness.
*/
- while (*p != '\0') {
- if (cnt >= 13)
- break;
- if (!isxdigit(*p)) {
- p++;
+ for (cnt = 0; *p && cnt < 13; p++) {
+ int val = hex_to_bin(*p);
+
+ if (val < 0)
continue;
- }
- if (next != 0) {
- buf[cnt++] |= hex_to_bin(*p++);
- next = 0;
+
+ if (next) {
+ next = false;
+ buf[cnt++] |= val;
} else {
- buf[cnt] = hex_to_bin(*p++) << 4;
- next = 1;
+ next = true;
+ buf[cnt] = val << 4;
}
}
}
@@ -1266,3 +1266,52 @@ transport_emulate_control_cdb(struct se_task *task)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
+
+/*
+ * Write a CDB into @cdb that is based on the one the intiator sent us,
+ * but updated to only cover the sectors that the current task handles.
+ */
+void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
+
+ memcpy(cdb, cmd->t_task_cdb, cdb_len);
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ unsigned long long lba = task->task_lba;
+ u32 sectors = task->task_sectors;
+
+ switch (cdb_len) {
+ case 6:
+ /* 21-bit LBA and 8-bit sectors */
+ cdb[1] = (lba >> 16) & 0x1f;
+ cdb[2] = (lba >> 8) & 0xff;
+ cdb[3] = lba & 0xff;
+ cdb[4] = sectors & 0xff;
+ break;
+ case 10:
+ /* 32-bit LBA and 16-bit sectors */
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be16(sectors, &cdb[7]);
+ break;
+ case 12:
+ /* 32-bit LBA and 32-bit sectors */
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be32(sectors, &cdb[6]);
+ break;
+ case 16:
+ /* 64-bit LBA and 32-bit sectors */
+ put_unaligned_be64(lba, &cdb[2]);
+ put_unaligned_be32(sectors, &cdb[10]);
+ break;
+ case 32:
+ /* 64-bit LBA and 32-bit sectors, extended CDB */
+ put_unaligned_be64(lba, &cdb[12]);
+ put_unaligned_be32(sectors, &cdb[28]);
+ break;
+ default:
+ BUG();
+ }
+ }
+}
+EXPORT_SYMBOL(target_get_task_cdb);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index b2575d8568c..e0c1e8a8dd4 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
@@ -133,14 +132,6 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
/*
- * Ensure that TCM subsystem plugins are loaded at this point for
- * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
- * LUN symlinks.
- */
- if (transport_subsystem_check_init() < 0)
- return ERR_PTR(-EINVAL);
-
- /*
* Below are some hardcoded request_module() calls to automatically
* local fabric modules when the following is called:
*
@@ -725,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth);
DEF_DEV_ATTRIB(queue_depth);
SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
-DEF_DEV_ATTRIB(task_timeout);
-SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
-
DEF_DEV_ATTRIB(max_unmap_lba_count);
SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
@@ -761,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
&target_core_dev_attrib_queue_depth.attr,
- &target_core_dev_attrib_task_timeout.attr,
&target_core_dev_attrib_max_unmap_lba_count.attr,
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
@@ -3080,8 +3067,7 @@ static struct config_group *target_core_call_addhbatotarget(
/*
* Load up TCM subsystem plugins if they have not already been loaded.
*/
- if (transport_subsystem_check_init() < 0)
- return ERR_PTR(-EINVAL);
+ transport_subsystem_check_init();
hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
if (IS_ERR(hba))
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ca6e4a4df13..f870c3bcfd8 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -914,21 +914,6 @@ void se_dev_set_default_attribs(
dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
}
-int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
-{
- if (task_timeout > DA_TASK_TIMEOUT_MAX) {
- pr_err("dev[%p]: Passed task_timeout: %u larger then"
- " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
- return -EINVAL;
- } else {
- dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
- pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
- dev, task_timeout);
- }
-
- return 0;
-}
-
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
@@ -972,36 +957,24 @@ int se_dev_set_unmap_granularity_alignment(
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->dpo_emulated == NULL) {
- pr_err("dev->transport->dpo_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->dpo_emulated(dev) == 0) {
- pr_err("dev->transport->dpo_emulated not supported\n");
- return -EINVAL;
- }
- dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
- pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
- " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
- return 0;
+
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == NULL) {
- pr_err("dev->transport->fua_write_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->fua_write_emulated(dev) == 0) {
- pr_err("dev->transport->fua_write_emulated not supported\n");
+
+ if (dev->transport->fua_write_emulated == 0) {
+ pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
@@ -1012,36 +985,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->fua_read_emulated == NULL) {
- pr_err("dev->transport->fua_read_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->fua_read_emulated(dev) == 0) {
- pr_err("dev->transport->fua_read_emulated not supported\n");
- return -EINVAL;
- }
- dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
- pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
- return 0;
+
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == NULL) {
- pr_err("dev->transport->write_cache_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->write_cache_emulated(dev) == 0) {
- pr_err("dev->transport->write_cache_emulated not supported\n");
+ if (dev->transport->write_cache_emulated == 0) {
+ pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 55bbe0847a6..09b6f8729f9 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index c4ea3a9a555..39f021b855e 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -63,6 +63,7 @@ u32 sas_get_pr_transport_id(
unsigned char *buf)
{
unsigned char *ptr;
+ int ret;
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
@@ -74,7 +75,9 @@ u32 sas_get_pr_transport_id(
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
- hex2bin(&buf[4], ptr, 8);
+ ret = hex2bin(&buf[4], ptr, 8);
+ if (ret < 0)
+ pr_debug("sas transport_id: invalid hex string\n");
/*
* The SAS Transport ID is a hardcoded 24-byte length
@@ -156,8 +159,9 @@ u32 fc_get_pr_transport_id(
unsigned char *buf)
{
unsigned char *ptr;
- int i;
+ int i, ret;
u32 off = 8;
+
/*
* PROTOCOL IDENTIFIER is 0h for FCP-2
*
@@ -174,7 +178,9 @@ u32 fc_get_pr_transport_id(
i++;
continue;
}
- hex2bin(&buf[off++], &ptr[i], 1);
+ ret = hex2bin(&buf[off++], &ptr[i], 1);
+ if (ret < 0)
+ pr_debug("fc transport_id: invalid hex string\n");
i += 2;
}
/*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index bc1b33639b8..19a0be9c657 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -26,7 +26,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
@@ -273,13 +272,14 @@ fd_alloc_task(unsigned char *cdb)
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+ struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba *
- task->se_dev->se_sub_dev->se_dev_attrib.block_size);
+ se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
@@ -325,13 +325,14 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+ struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba *
- task->se_dev->se_sub_dev->se_dev_attrib.block_size);
+ se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
@@ -399,33 +400,6 @@ static void fd_emulate_sync_cache(struct se_task *task)
}
/*
- * Tell TCM Core that we are capable of WriteCache emulation for
- * an underlying struct se_device.
- */
-static int fd_emulated_write_cache(struct se_device *dev)
-{
- return 1;
-}
-
-static int fd_emulated_dpo(struct se_device *dev)
-{
- return 0;
-}
-/*
- * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
- * for TYPE_DISK.
- */
-static int fd_emulated_fua_write(struct se_device *dev)
-{
- return 1;
-}
-
-static int fd_emulated_fua_read(struct se_device *dev)
-{
- return 0;
-}
-
-/*
* WRITE Force Unit Access (FUA) emulation on a per struct se_task
* LBA range basis..
*/
@@ -608,17 +582,6 @@ static ssize_t fd_show_configfs_dev_params(
return bl;
}
-/* fd_get_cdb(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static unsigned char *fd_get_cdb(struct se_task *task)
-{
- struct fd_request *req = FILE_REQ(task);
-
- return req->fd_scsi_cdb;
-}
-
/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
*
*
@@ -650,15 +613,13 @@ static struct se_subsystem_api fileio_template = {
.name = "fileio",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
+ .write_cache_emulated = 1,
+ .fua_write_emulated = 1,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
- .dpo_emulated = fd_emulated_dpo,
- .fua_write_emulated = fd_emulated_fua_write,
- .fua_read_emulated = fd_emulated_fua_read,
- .write_cache_emulated = fd_emulated_write_cache,
.alloc_task = fd_alloc_task,
.do_task = fd_do_task,
.do_sync_cache = fd_emulate_sync_cache,
@@ -666,7 +627,6 @@ static struct se_subsystem_api fileio_template = {
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
- .get_cdb = fd_get_cdb,
.get_device_rev = fd_get_device_rev,
.get_device_type = fd_get_device_type,
.get_blocks = fd_get_blocks,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index daebd710b89..59e6e73106c 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,9 +14,7 @@
struct fd_request {
struct se_task fd_task;
- /* SCSI CDB from iSCSI Command PDU */
- unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
-} ____cacheline_aligned;
+};
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7e123410544..41ad02b5fb8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -27,7 +27,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
@@ -314,104 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long;
}
+static void iblock_end_io_flush(struct bio *bio, int err)
+{
+ struct se_cmd *cmd = bio->bi_private;
+
+ if (err)
+ pr_err("IBLOCK: cache flush failed: %d\n", err);
+
+ if (cmd)
+ transport_complete_sync_cache(cmd, err == 0);
+ bio_put(bio);
+}
+
/*
- * Emulate SYCHRONIZE_CACHE_*
+ * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
+ * always flush the whole cache.
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
- sector_t error_sector;
- int ret;
+ struct bio *bio;
/*
* If the Immediate bit is set, queue up the GOOD response
- * for this SYNCHRONIZE_CACHE op
+ * for this SYNCHRONIZE_CACHE op.
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
- /*
- * blkdev_issue_flush() does not support a specifying a range, so
- * we have to flush the entire cache.
- */
- ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
- if (ret != 0) {
- pr_err("IBLOCK: block_issue_flush() failed: %d "
- " error_sector: %llu\n", ret,
- (unsigned long long)error_sector);
- }
-
+ bio = bio_alloc(GFP_KERNEL, 0);
+ bio->bi_end_io = iblock_end_io_flush;
+ bio->bi_bdev = ib_dev->ibd_bd;
if (!immed)
- transport_complete_sync_cache(cmd, ret == 0);
-}
-
-/*
- * Tell TCM Core that we are capable of WriteCache emulation for
- * an underlying struct se_device.
- */
-static int iblock_emulated_write_cache(struct se_device *dev)
-{
- return 1;
-}
-
-static int iblock_emulated_dpo(struct se_device *dev)
-{
- return 0;
-}
-
-/*
- * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
- * for TYPE_DISK.
- */
-static int iblock_emulated_fua_write(struct se_device *dev)
-{
- return 1;
-}
-
-static int iblock_emulated_fua_read(struct se_device *dev)
-{
- return 0;
-}
-
-static int iblock_do_task(struct se_task *task)
-{
- struct se_device *dev = task->task_se_cmd->se_dev;
- struct iblock_req *req = IBLOCK_REQ(task);
- struct bio *bio = req->ib_bio, *nbio = NULL;
- struct blk_plug plug;
- int rw;
-
- if (task->task_data_direction == DMA_TO_DEVICE) {
- /*
- * Force data to disk if we pretend to not have a volatile
- * write cache, or the initiator set the Force Unit Access bit.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
- rw = WRITE_FUA;
- else
- rw = WRITE;
- } else {
- rw = READ;
- }
-
- blk_start_plug(&plug);
- while (bio) {
- nbio = bio->bi_next;
- bio->bi_next = NULL;
- pr_debug("Calling submit_bio() task: %p bio: %p"
- " bio->bi_sector: %llu\n", task, bio,
- (unsigned long long)bio->bi_sector);
-
- submit_bio(rw, bio);
- bio = nbio;
- }
- blk_finish_plug(&plug);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ bio->bi_private = cmd;
+ submit_bio(WRITE_FLUSH, bio);
}
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
@@ -425,20 +362,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
static void iblock_free_task(struct se_task *task)
{
- struct iblock_req *req = IBLOCK_REQ(task);
- struct bio *bio, *hbio = req->ib_bio;
- /*
- * We only release the bio(s) here if iblock_bio_done() has not called
- * bio_put() -> iblock_bio_destructor().
- */
- while (hbio != NULL) {
- bio = hbio;
- hbio = hbio->bi_next;
- bio->bi_next = NULL;
- bio_put(bio);
- }
-
- kfree(req);
+ kfree(IBLOCK_REQ(task));
}
enum {
@@ -552,25 +476,21 @@ static ssize_t iblock_show_configfs_dev_params(
static void iblock_bio_destructor(struct bio *bio)
{
struct se_task *task = bio->bi_private;
- struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+ struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
bio_free(bio, ib_dev->ibd_bio_set);
}
-static struct bio *iblock_get_bio(
- struct se_task *task,
- struct iblock_req *ib_req,
- struct iblock_dev *ib_dev,
- int *ret,
- sector_t lba,
- u32 sg_num)
+static struct bio *
+iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
{
+ struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
+ struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
- *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
@@ -591,17 +511,33 @@ static struct bio *iblock_get_bio(
return bio;
}
-static int iblock_map_data_SG(struct se_task *task)
+static int iblock_do_task(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
- struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
- struct iblock_req *ib_req = IBLOCK_REQ(task);
- struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct bio *bio;
+ struct bio_list list;
struct scatterlist *sg;
- int ret = 0;
u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
+ struct blk_plug plug;
+ int rw;
+
+ if (task->task_data_direction == DMA_TO_DEVICE) {
+ /*
+ * Force data to disk if we pretend to not have a volatile
+ * write cache, or the initiator set the Force Unit Access bit.
+ */
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ task->task_se_cmd->t_tasks_fua))
+ rw = WRITE_FUA;
+ else
+ rw = WRITE;
+ } else {
+ rw = READ;
+ }
+
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
@@ -620,68 +556,43 @@ static int iblock_map_data_SG(struct se_task *task)
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+ bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio)
- return ret;
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+ bio_list_init(&list);
+ bio_list_add(&list, bio);
- ib_req->ib_bio = bio;
- hbio = tbio = bio;
- /*
- * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
- * from task->task_sg -> struct scatterlist memory.
- */
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
- pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
- " %p len: %u offset: %u\n", task, bio, sg_page(sg),
- sg->length, sg->offset);
-again:
- ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
- if (ret != sg->length) {
-
- pr_debug("*** Set bio->bi_sector: %llu\n",
- (unsigned long long)bio->bi_sector);
- pr_debug("** task->task_size: %u\n",
- task->task_size);
- pr_debug("*** bio->bi_max_vecs: %u\n",
- bio->bi_max_vecs);
- pr_debug("*** bio->bi_vcnt: %u\n",
- bio->bi_vcnt);
-
- bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
- block_lba, sg_num);
+ /*
+ * XXX: if the length the device accepts is shorter than the
+ * length of the S/G list entry this will cause and
+ * endless loop. Better hope no driver uses huge pages.
+ */
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+ bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio)
goto fail;
-
- tbio = tbio->bi_next = bio;
- pr_debug("-----------------> Added +1 bio: %p to"
- " list, Going to again\n", bio);
- goto again;
+ bio_list_add(&list, bio);
}
+
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
- pr_debug("task: %p bio-add_page() passed!, decremented"
- " sg_num to %u\n", task, sg_num);
- pr_debug("task: %p bio_add_page() passed!, increased lba"
- " to %llu\n", task, (unsigned long long)block_lba);
- pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
- " %u\n", task, bio->bi_vcnt);
}
- return 0;
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&list)))
+ submit_bio(rw, bio);
+ blk_finish_plug(&plug);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
fail:
- while (hbio) {
- bio = hbio;
- hbio = hbio->bi_next;
- bio->bi_next = NULL;
+ while ((bio = bio_list_pop(&list)))
bio_put(bio);
- }
- return ret;
-}
-
-static unsigned char *iblock_get_cdb(struct se_task *task)
-{
- return IBLOCK_REQ(task)->ib_scsi_cdb;
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
static u32 iblock_get_device_rev(struct se_device *dev)
@@ -707,6 +618,7 @@ static void iblock_bio_done(struct bio *bio, int err)
{
struct se_task *task = bio->bi_private;
struct iblock_req *ibr = IBLOCK_REQ(task);
+
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
@@ -721,50 +633,31 @@ static void iblock_bio_done(struct bio *bio, int err)
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
- bio_put(bio);
- /*
- * Wait to complete the task until the last bio as completed.
- */
- if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
- return;
-
- ibr->ib_bio = NULL;
- transport_complete_task(task, 0);
- return;
}
- pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
- task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
- /*
- * bio_put() will call iblock_bio_destructor() to release the bio back
- * to ibr->ib_bio_set.
- */
+
bio_put(bio);
- /*
- * Wait to complete the task until the last bio as completed.
- */
+
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
- /*
- * Return GOOD status for task if zero ib_bio_err_cnt exists.
- */
- ibr->ib_bio = NULL;
- transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+
+ pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba,
+ (unsigned long long)bio->bi_sector, err);
+
+ transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .map_data_SG = iblock_map_data_SG,
+ .write_cache_emulated = 1,
+ .fua_write_emulated = 1,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
- .dpo_emulated = iblock_emulated_dpo,
- .fua_write_emulated = iblock_emulated_fua_write,
- .fua_read_emulated = iblock_emulated_fua_read,
- .write_cache_emulated = iblock_emulated_write_cache,
.alloc_task = iblock_alloc_task,
.do_task = iblock_do_task,
.do_discard = iblock_do_discard,
@@ -773,7 +666,6 @@ static struct se_subsystem_api iblock_template = {
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
- .get_cdb = iblock_get_cdb,
.get_device_rev = iblock_get_device_rev,
.get_device_type = iblock_get_device_type,
.get_blocks = iblock_get_blocks,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index a121cd1b657..5cf1860c10d 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -8,10 +8,8 @@
struct iblock_req {
struct se_task ib_task;
- unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
- struct bio *ib_bio;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 7fd3a161f7c..0c4f783f924 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -25,7 +25,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 2b7b0da9146..dad671dee9e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -26,7 +26,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
@@ -567,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice(
if (IS_ERR(sh)) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return (struct se_device *) sh;
+ return ERR_CAST(sh);
}
}
} else {
@@ -677,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
*/
static int pscsi_transport_complete(struct se_task *task)
{
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
@@ -777,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb)
return &pt->pscsi_task;
}
-static inline void pscsi_blk_init_request(
- struct se_task *task,
- struct pscsi_plugin_task *pt,
- struct request *req,
- int bidi_read)
-{
- /*
- * Defined as "scsi command" in include/linux/blkdev.h.
- */
- req->cmd_type = REQ_TYPE_BLOCK_PC;
- /*
- * For the extra BIDI-COMMAND READ struct request we do not
- * need to setup the remaining structure members
- */
- if (bidi_read)
- return;
- /*
- * Setup the done function pointer for struct request,
- * also set the end_io_data pointer.to struct se_task.
- */
- req->end_io = pscsi_req_done;
- req->end_io_data = task;
- /*
- * Load the referenced struct se_task's SCSI CDB into
- * include/linux/blkdev.h:struct request->cmd
- */
- req->cmd_len = scsi_command_size(pt->pscsi_cdb);
- req->cmd = &pt->pscsi_cdb[0];
- /*
- * Setup pointer for outgoing sense data.
- */
- req->sense = &pt->pscsi_sense[0];
- req->sense_len = 0;
-}
-
-/*
- * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
-*/
-static int pscsi_blk_get_request(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
-
- pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
- (task->task_data_direction == DMA_TO_DEVICE),
- GFP_KERNEL);
- if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
- pr_err("PSCSI: blk_get_request() failed: %ld\n",
- IS_ERR(pt->pscsi_req));
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- /*
- * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
- * and setup rq callback, CDB and sense.
- */
- pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
- return 0;
-}
-
-/* pscsi_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int pscsi_do_task(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- /*
- * Set the struct request->timeout value based on peripheral
- * device type from SCSI.
- */
- if (pdv->pdv_sd->type == TYPE_DISK)
- pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
- else
- pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
-
- pt->pscsi_req->retries = PS_RETRY;
- /*
- * Queue the struct request into the struct scsi_device->request_queue.
- * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
- * descriptor
- */
- blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
- (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
- pscsi_req_done);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
-}
-
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
@@ -1049,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
-static int __pscsi_map_SG(
- struct se_task *task,
- struct scatterlist *task_sg,
- u32 task_sg_num,
- int bidi_read)
+static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
+ struct bio **hbio)
{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+ u32 task_sg_num = task->task_sg_nents;
+ struct bio *bio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = task->task_size, i, len, bytes, off;
@@ -1066,19 +973,8 @@ static int __pscsi_map_SG(
int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
- if (!task->task_size)
- return 0;
- /*
- * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
- * the bio_vec maplist from task->task_sg ->
- * struct scatterlist memory. The struct se_task->task_sg[] currently needs
- * to be attached to struct bios for submission to Linux/SCSI using
- * struct request to struct scsi_device->request_queue.
- *
- * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
- * is ported to upstream SCSI passthrough functionality that accepts
- * struct scatterlist->page_link or struct page as a paraemeter.
- */
+ *hbio = NULL;
+
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
@@ -1115,8 +1011,8 @@ static int __pscsi_map_SG(
* bios need to be added to complete a given
* struct se_task
*/
- if (!hbio)
- hbio = tbio = bio;
+ if (!*hbio)
+ *hbio = tbio = bio;
else
tbio = tbio->bi_next = bio;
}
@@ -1152,92 +1048,82 @@ static int __pscsi_map_SG(
off = 0;
}
}
- /*
- * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
- * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
- */
- if (!bidi_read) {
- /*
- * Starting with v2.6.31, call blk_make_request() passing in *hbio to
- * allocate the pSCSI task a struct request.
- */
- pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
- hbio, GFP_KERNEL);
- if (!pt->pscsi_req) {
- pr_err("pSCSI: blk_make_request() failed\n");
- goto fail;
- }
- /*
- * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
- * and setup rq callback, CDB and sense.
- */
- pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
-
- return task->task_sg_nents;
- }
- /*
- * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
- * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
- */
- pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
- hbio, GFP_KERNEL);
- if (!pt->pscsi_req->next_rq) {
- pr_err("pSCSI: blk_make_request() failed for BIDI\n");
- goto fail;
- }
- pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
return task->task_sg_nents;
fail:
- while (hbio) {
- bio = hbio;
- hbio = hbio->bi_next;
+ while (*hbio) {
+ bio = *hbio;
+ *hbio = (*hbio)->bi_next;
bio->bi_next = NULL;
- bio_endio(bio, 0);
+ bio_endio(bio, 0); /* XXX: should be error */
}
return ret;
}
-/*
- * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call.
- */
-static int pscsi_map_SG(struct se_task *task)
+static int pscsi_do_task(struct se_task *task)
{
+ struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct request *req;
+ struct bio *hbio;
int ret;
- /*
- * Setup the main struct request for the task->task_sg[] payload
- */
+ target_get_task_cdb(task, pt->pscsi_cdb);
+
+ if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ req = blk_get_request(pdv->pdv_sd->request_queue,
+ (task->task_data_direction == DMA_TO_DEVICE),
+ GFP_KERNEL);
+ if (!req || IS_ERR(req)) {
+ pr_err("PSCSI: blk_get_request() failed: %ld\n",
+ req ? IS_ERR(req) : -ENOMEM);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ } else {
+ BUG_ON(!task->task_size);
- ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0);
- if (ret >= 0 && task->task_sg_bidi) {
/*
- * If present, set up the extra BIDI-COMMAND SCSI READ
- * struct request and payload.
+ * Setup the main struct request for the task->task_sg[] payload
*/
- ret = __pscsi_map_SG(task, task->task_sg_bidi,
- task->task_sg_nents, 1);
+ ret = pscsi_map_sg(task, task->task_sg, &hbio);
+ if (ret < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
+ GFP_KERNEL);
+ if (!req) {
+ pr_err("pSCSI: blk_make_request() failed\n");
+ goto fail;
+ }
}
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- return 0;
-}
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ req->end_io = pscsi_req_done;
+ req->end_io_data = task;
+ req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+ req->cmd = &pt->pscsi_cdb[0];
+ req->sense = &pt->pscsi_sense[0];
+ req->sense_len = 0;
+ if (pdv->pdv_sd->type == TYPE_DISK)
+ req->timeout = PS_TIMEOUT_DISK;
+ else
+ req->timeout = PS_TIMEOUT_OTHER;
+ req->retries = PS_RETRY;
-static int pscsi_CDB_none(struct se_task *task)
-{
- return pscsi_blk_get_request(task);
-}
+ blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
+ (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
+ pscsi_req_done);
-/* pscsi_get_cdb():
- *
- *
- */
-static unsigned char *pscsi_get_cdb(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
- return pt->pscsi_cdb;
+fail:
+ while (hbio) {
+ struct bio *bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_endio(bio, 0); /* XXX: should be error */
+ }
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
/* pscsi_get_sense_buffer():
@@ -1328,23 +1214,13 @@ static void pscsi_req_done(struct request *req, int uptodate)
pt->pscsi_resid = req->resid_len;
pscsi_process_SAM_status(task, pt);
- /*
- * Release BIDI-READ if present
- */
- if (req->next_rq != NULL)
- __blk_put_request(req->q, req->next_rq);
-
__blk_put_request(req->q, req);
- pt->pscsi_req = NULL;
}
static struct se_subsystem_api pscsi_template = {
.name = "pscsi",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
- .cdb_none = pscsi_CDB_none,
- .map_control_SG = pscsi_map_SG,
- .map_data_SG = pscsi_map_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
@@ -1358,7 +1234,6 @@ static struct se_subsystem_api pscsi_template = {
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
- .get_cdb = pscsi_get_cdb,
.get_sense_buffer = pscsi_get_sense_buffer,
.get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index ebf4f1ae2c8..fdc17b6aefb 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -27,7 +27,6 @@ struct pscsi_plugin_task {
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
- struct request *pscsi_req;
unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index e567e129c69..5158d3846f1 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -27,7 +27,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
@@ -351,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
+ struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -467,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
+ struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -582,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
*/
static int rd_MEMCPY_do_task(struct se_task *task)
{
- struct se_device *dev = task->se_dev;
+ struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
unsigned long long lba;
int ret;
@@ -692,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params(
return bl;
}
-/* rd_get_cdb(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static unsigned char *rd_get_cdb(struct se_task *task)
-{
- struct rd_request *req = RD_REQ(task);
-
- return req->rd_scsi_cdb;
-}
-
static u32 rd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
@@ -736,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = {
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_cdb = rd_get_cdb,
.get_device_rev = rd_get_device_rev,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 0d027732cd0..784e56a0410 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -22,8 +22,6 @@ void rd_module_exit(void);
struct rd_request {
struct se_task rd_task;
- /* SCSI CDB from iSCSI Command PDU */
- unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* Offset from start of page */
u32 rd_offset;
/* Starting page in Ramdisk for request */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
deleted file mode 100644
index 72843441d4f..00000000000
--- a/drivers/target/target_core_scdb.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*******************************************************************************
- * Filename: target_core_scdb.c
- *
- * This file contains the generic target engine Split CDB related functions.
- *
- * Copyright (c) 2004-2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-#include <linux/net.h>
-#include <linux/string.h>
-#include <scsi/scsi.h>
-#include <asm/unaligned.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-
-#include "target_core_scdb.h"
-
-/* split_cdb_XX_6():
- *
- * 21-bit LBA w/ 8-bit SECTORS
- */
-void split_cdb_XX_6(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- cdb[1] = (lba >> 16) & 0x1f;
- cdb[2] = (lba >> 8) & 0xff;
- cdb[3] = lba & 0xff;
- cdb[4] = sectors & 0xff;
-}
-
-/* split_cdb_XX_10():
- *
- * 32-bit LBA w/ 16-bit SECTORS
- */
-void split_cdb_XX_10(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be16(sectors, &cdb[7]);
-}
-
-/* split_cdb_XX_12():
- *
- * 32-bit LBA w/ 32-bit SECTORS
- */
-void split_cdb_XX_12(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be32(sectors, &cdb[6]);
-}
-
-/* split_cdb_XX_16():
- *
- * 64-bit LBA w/ 32-bit SECTORS
- */
-void split_cdb_XX_16(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be64(lba, &cdb[2]);
- put_unaligned_be32(sectors, &cdb[10]);
-}
-
-/*
- * split_cdb_XX_32():
- *
- * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
- */
-void split_cdb_XX_32(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be64(lba, &cdb[12]);
- put_unaligned_be32(sectors, &cdb[28]);
-}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
deleted file mode 100644
index 48e9ccc9585..00000000000
--- a/drivers/target/target_core_scdb.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef TARGET_CORE_SCDB_H
-#define TARGET_CORE_SCDB_H
-
-extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *);
-
-#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index a8d6e1dee93..874152aed94 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -32,7 +32,6 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/proc_fs.h>
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 27d4925e51c..570b144a1ed 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -24,7 +24,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
@@ -44,12 +43,12 @@
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
- u8 function)
+ u8 function,
+ gfp_t gfp_flags)
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
- GFP_ATOMIC : GFP_KERNEL);
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
@@ -67,15 +66,16 @@ void core_tmr_release_req(
struct se_tmr_req *tmr)
{
struct se_device *dev = tmr->tmr_dev;
+ unsigned long flags;
if (!dev) {
kmem_cache_free(se_tmr_req_cache, tmr);
return;
}
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_del(&tmr->tmr_list);
- spin_unlock_irq(&dev->se_tmr_lock);
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
kmem_cache_free(se_tmr_req_cache, tmr);
}
@@ -100,54 +100,20 @@ static void core_tmr_handle_tas_abort(
transport_cmd_finish_abort(cmd, 0);
}
-int core_tmr_lun_reset(
+static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
- struct list_head *preempt_and_abort_list,
- struct se_cmd *prout_cmd)
+ struct list_head *preempt_and_abort_list)
{
- struct se_cmd *cmd, *tcmd;
- struct se_node_acl *tmr_nacl = NULL;
- struct se_portal_group *tmr_tpg = NULL;
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
+ LIST_HEAD(drain_tmr_list);
struct se_tmr_req *tmr_p, *tmr_pp;
- struct se_task *task, *task_tmp;
+ struct se_cmd *cmd;
unsigned long flags;
- int fe_count, tas;
- /*
- * TASK_ABORTED status bit, this is configurable via ConfigFS
- * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
- *
- * A task aborted status (TAS) bit set to zero specifies that aborted
- * tasks shall be terminated by the device server without any response
- * to the application client. A TAS bit set to one specifies that tasks
- * aborted by the actions of an I_T nexus other than the I_T nexus on
- * which the command was received shall be completed with TASK ABORTED
- * status (see SAM-4).
- */
- tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
- /*
- * Determine if this se_tmr is coming from a $FABRIC_MOD
- * or struct se_device passthrough..
- */
- if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
- if (tmr_nacl && tmr_tpg) {
- pr_debug("LUN_RESET: TMR caller fabric: %s"
- " initiator port %s\n",
- tmr_tpg->se_tpg_tfo->get_fabric_name(),
- tmr_nacl->initiatorname);
- }
- }
- pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
- (preempt_and_abort_list) ? "Preempt" : "TMR",
- dev->transport->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
@@ -169,29 +135,48 @@ int core_tmr_lun_reset(
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
- spin_unlock_irq(&dev->se_tmr_lock);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ spin_lock(&cmd->t_state_lock);
if (!atomic_read(&cmd->t_transport_active)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_unlock(&cmd->t_state_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_unlock(&cmd->t_state_lock);
continue;
}
+ spin_unlock(&cmd->t_state_lock);
+
+ list_move_tail(&tmr->tmr_list, &drain_tmr_list);
+ }
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+
+ while (!list_empty(&drain_tmr_list)) {
+ tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
+ list_del(&tmr->tmr_list);
+ cmd = tmr_p->task_cmd;
+
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
- (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
- tmr_p->function, tmr_p->response, cmd->t_state);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ (preempt_and_abort_list) ? "Preempt" : "", tmr,
+ tmr->function, tmr->response, cmd->t_state);
- transport_cmd_finish_abort_tmr(cmd);
- spin_lock_irq(&dev->se_tmr_lock);
+ transport_cmd_finish_abort(cmd, 1);
}
- spin_unlock_irq(&dev->se_tmr_lock);
+}
+
+static void core_tmr_drain_task_list(
+ struct se_device *dev,
+ struct se_cmd *prout_cmd,
+ struct se_node_acl *tmr_nacl,
+ int tas,
+ struct list_head *preempt_and_abort_list)
+{
+ LIST_HEAD(drain_task_list);
+ struct se_cmd *cmd;
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+ int fe_count;
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
@@ -236,18 +221,28 @@ int core_tmr_lun_reset(
if (prout_cmd == cmd)
continue;
- list_del(&task->t_state_list);
+ list_move_tail(&task->t_state_list, &drain_task_list);
atomic_set(&task->task_state_active, 0);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ /*
+ * Remove from task execute list before processing drain_task_list
+ */
+ if (!list_empty(&task->t_execute_list))
+ __transport_remove_task_from_execute_queue(task, dev);
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ while (!list_empty(&drain_task_list)) {
+ task = list_entry(drain_task_list.next, struct se_task, t_state_list);
+ list_del(&task->t_state_list);
+ cmd = task->task_se_cmd;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
pr_debug("LUN_RESET: %s cmd: %p task: %p"
- " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
- "def_t_state: %d/%d cdb: 0x%02x\n",
+ " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+ "cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state, cmd->t_task_cdb[0]);
+ cmd->t_task_cdb[0]);
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
@@ -260,35 +255,24 @@ int core_tmr_lun_reset(
atomic_read(&cmd->t_transport_stop),
atomic_read(&cmd->t_transport_sent));
- if (atomic_read(&task->task_active)) {
- atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
- " for dev: %p\n", task, dev);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("LUN_RESET Completed task: %p shutdown for"
- " dev: %p\n", task, dev);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
-
- atomic_set(&task->task_active, 0);
- atomic_set(&task->task_stop, 0);
- } else {
- if (atomic_read(&task->task_execute_queue) != 0)
- transport_remove_task_from_execute_queue(task, dev);
- }
- __transport_stop_task_timer(task, &flags);
+ /*
+ * If the command may be queued onto a workqueue cancel it now.
+ *
+ * This is equivalent to removal from the execute queue in the
+ * loop above, but we do it down here given that
+ * cancel_work_sync may block.
+ */
+ if (cmd->t_state == TRANSPORT_COMPLETE)
+ cancel_work_sync(&cmd->work);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ target_stop_task(task, &flags);
if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&cmd->t_task_cdbs_ex_left));
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
fe_count = atomic_read(&cmd->t_fe_count);
@@ -298,22 +282,31 @@ int core_tmr_lun_reset(
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1);
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
continue;
}
pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+static void core_tmr_drain_cmd_list(
+ struct se_device *dev,
+ struct se_cmd *prout_cmd,
+ struct se_node_acl *tmr_nacl,
+ int tas,
+ struct list_head *preempt_and_abort_list)
+{
+ LIST_HEAD(drain_cmd_list);
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
+ struct se_cmd *cmd, *tcmd;
+ unsigned long flags;
/*
* Release all commands remaining in the struct se_device cmd queue.
*
@@ -337,11 +330,26 @@ int core_tmr_lun_reset(
*/
if (prout_cmd == cmd)
continue;
+ /*
+ * Skip direct processing of TRANSPORT_FREE_CMD_INTR for
+ * HW target mode fabrics.
+ */
+ spin_lock(&cmd->t_state_lock);
+ if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
+ spin_unlock(&cmd->t_state_lock);
+ continue;
+ }
+ spin_unlock(&cmd->t_state_lock);
- atomic_dec(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 0);
atomic_dec(&qobj->queue_cnt);
- list_del(&cmd->se_queue_node);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
+ }
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ while (!list_empty(&drain_cmd_list)) {
+ cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
+ list_del_init(&cmd->se_queue_node);
pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
@@ -354,9 +362,53 @@ int core_tmr_lun_reset(
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+}
+
+int core_tmr_lun_reset(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+ struct list_head *preempt_and_abort_list,
+ struct se_cmd *prout_cmd)
+{
+ struct se_node_acl *tmr_nacl = NULL;
+ struct se_portal_group *tmr_tpg = NULL;
+ int tas;
+ /*
+ * TASK_ABORTED status bit, this is configurable via ConfigFS
+ * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+ /*
+ * Determine if this se_tmr is coming from a $FABRIC_MOD
+ * or struct se_device passthrough..
+ */
+ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+ tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+ tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ if (tmr_nacl && tmr_tpg) {
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
+ " initiator port %s\n",
+ tmr_tpg->se_tpg_tfo->get_fabric_name(),
+ tmr_nacl->initiatorname);
+ }
+ }
+ pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+ dev->transport->name, tas);
+
+ core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+ core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
+ preempt_and_abort_list);
+ core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
+ preempt_and_abort_list);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
@@ -379,3 +431,4 @@ int core_tmr_lun_reset(
dev->transport->name);
return 0;
}
+
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 162b736c734..49fd0a9b0a5 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -593,7 +593,7 @@ int core_tpg_set_initiator_node_queue_depth(
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
- pr_debug("Successfuly changed queue depth to: %d for Initiator"
+ pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index a4b0a8d27f2..d7525580448 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -26,7 +26,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -55,11 +54,11 @@
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
-#include "target_core_scdb.h"
#include "target_core_ua.h"
static int sub_api_initialized;
+static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
@@ -70,30 +69,19 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-/* Used for transport_dev_get_map_*() */
-typedef int (*map_func_t)(struct se_task *, u32);
-
static int transport_generic_write_pending(struct se_cmd *);
static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev);
static void transport_complete_task_attr(struct se_cmd *cmd);
-static int transport_complete_qf(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
- struct se_device *dev, int (*qf_callback)(struct se_cmd *));
-static void transport_direct_request_timeout(struct se_cmd *cmd);
+ struct se_device *dev);
static void transport_free_dev_tasks(struct se_cmd *cmd);
-static u32 transport_allocate_tasks(struct se_cmd *cmd,
- unsigned long long starting_lba,
- enum dma_data_direction data_direction,
- struct scatterlist *sgl, unsigned int nents);
static int transport_generic_get_mem(struct se_cmd *cmd);
-static int transport_generic_remove(struct se_cmd *cmd,
- int session_reinstatement);
-static void transport_release_fe_cmd(struct se_cmd *cmd);
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
- struct se_queue_obj *qobj);
+static void transport_put_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_stop_all_task_timers(struct se_cmd *cmd);
+static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
@@ -109,7 +97,7 @@ int init_se_kmem_caches(void)
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out;
+ goto out_free_cmd_cache;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -117,14 +105,14 @@ int init_se_kmem_caches(void)
if (!se_sess_cache) {
pr_err("kmem_cache_create() for struct se_session"
" failed\n");
- goto out;
+ goto out_free_tmr_req_cache;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
if (!se_ua_cache) {
pr_err("kmem_cache_create() for struct se_ua failed\n");
- goto out;
+ goto out_free_sess_cache;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
@@ -132,7 +120,7 @@ int init_se_kmem_caches(void)
if (!t10_pr_reg_cache) {
pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
- goto out;
+ goto out_free_ua_cache;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
@@ -140,7 +128,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_lu_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
- goto out;
+ goto out_free_pr_reg_cache;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
@@ -148,7 +136,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_lu_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
- goto out;
+ goto out_free_lu_gp_cache;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
@@ -156,7 +144,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_tg_pt_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
- goto out;
+ goto out_free_lu_gp_mem_cache;
}
t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
"t10_alua_tg_pt_gp_mem_cache",
@@ -166,34 +154,41 @@ int init_se_kmem_caches(void)
if (!t10_alua_tg_pt_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
- goto out;
+ goto out_free_tg_pt_gp_cache;
}
+ target_completion_wq = alloc_workqueue("target_completion",
+ WQ_MEM_RECLAIM, 0);
+ if (!target_completion_wq)
+ goto out_free_tg_pt_gp_mem_cache;
+
return 0;
+
+out_free_tg_pt_gp_mem_cache:
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+out_free_tg_pt_gp_cache:
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+out_free_lu_gp_mem_cache:
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+out_free_lu_gp_cache:
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+out_free_pr_reg_cache:
+ kmem_cache_destroy(t10_pr_reg_cache);
+out_free_ua_cache:
+ kmem_cache_destroy(se_ua_cache);
+out_free_sess_cache:
+ kmem_cache_destroy(se_sess_cache);
+out_free_tmr_req_cache:
+ kmem_cache_destroy(se_tmr_req_cache);
+out_free_cmd_cache:
+ kmem_cache_destroy(se_cmd_cache);
out:
- if (se_cmd_cache)
- kmem_cache_destroy(se_cmd_cache);
- if (se_tmr_req_cache)
- kmem_cache_destroy(se_tmr_req_cache);
- if (se_sess_cache)
- kmem_cache_destroy(se_sess_cache);
- if (se_ua_cache)
- kmem_cache_destroy(se_ua_cache);
- if (t10_pr_reg_cache)
- kmem_cache_destroy(t10_pr_reg_cache);
- if (t10_alua_lu_gp_cache)
- kmem_cache_destroy(t10_alua_lu_gp_cache);
- if (t10_alua_lu_gp_mem_cache)
- kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
- if (t10_alua_tg_pt_gp_cache)
- kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
- if (t10_alua_tg_pt_gp_mem_cache)
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
return -ENOMEM;
}
void release_se_kmem_caches(void)
{
+ destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
@@ -234,10 +229,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj)
}
EXPORT_SYMBOL(transport_init_queue_obj);
-static int transport_subsystem_reqmods(void)
+void transport_subsystem_check_init(void)
{
int ret;
+ if (sub_api_initialized)
+ return;
+
ret = request_module("target_core_iblock");
if (ret != 0)
pr_err("Unable to load target_core_iblock\n");
@@ -254,24 +252,8 @@ static int transport_subsystem_reqmods(void)
if (ret != 0)
pr_err("Unable to load target_core_stgt\n");
- return 0;
-}
-
-int transport_subsystem_check_init(void)
-{
- int ret;
-
- if (sub_api_initialized)
- return 0;
- /*
- * Request the loading of known TCM subsystem plugins..
- */
- ret = transport_subsystem_reqmods();
- if (ret < 0)
- return ret;
-
sub_api_initialized = 1;
- return 0;
+ return;
}
struct se_session *transport_init_session(void)
@@ -438,16 +420,15 @@ EXPORT_SYMBOL(transport_deregister_session);
*/
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
- struct se_device *dev;
+ struct se_device *dev = cmd->se_dev;
struct se_task *task;
unsigned long flags;
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- dev = task->se_dev;
- if (!dev)
- continue;
+ if (!dev)
+ return;
- if (atomic_read(&task->task_active))
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (task->task_flags & TF_ACTIVE)
continue;
if (!atomic_read(&task->task_state_active))
@@ -489,8 +470,6 @@ static int transport_cmd_check_stop(
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- cmd->deferred_t_state = cmd->t_state;
- cmd->t_state = TRANSPORT_DEFERRED_CMD;
atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
@@ -508,8 +487,6 @@ static int transport_cmd_check_stop(
" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- cmd->deferred_t_state = cmd->t_state;
- cmd->t_state = TRANSPORT_DEFERRED_CMD;
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
@@ -594,35 +571,24 @@ check_lun:
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
- transport_lun_remove_cmd(cmd);
+ if (!cmd->se_tmr_req)
+ transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove)
- transport_generic_remove(cmd, 0);
-}
-
-void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
-{
- transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
-
- if (transport_cmd_check_stop_to_fabric(cmd))
- return;
-
- transport_generic_remove(cmd, 0);
+ if (remove) {
+ transport_remove_cmd_from_queue(cmd);
+ transport_put_cmd(cmd);
+ }
}
-static void transport_add_cmd_to_queue(
- struct se_cmd *cmd,
- int t_state)
+static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
+ bool at_head)
{
struct se_device *dev = cmd->se_dev;
struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
- INIT_LIST_HEAD(&cmd->se_queue_node);
-
if (t_state) {
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
@@ -631,15 +597,20 @@ static void transport_add_cmd_to_queue(
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
- cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
+
+ /* If the cmd is already on the list, remove it before we add it */
+ if (!list_empty(&cmd->se_queue_node))
+ list_del(&cmd->se_queue_node);
+ else
+ atomic_inc(&qobj->queue_cnt);
+
+ if (at_head)
list_add(&cmd->se_queue_node, &qobj->qobj_list);
- } else
+ else
list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
- atomic_inc(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 1);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq);
}
@@ -656,19 +627,18 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
}
cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
- atomic_dec(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 0);
- list_del(&cmd->se_queue_node);
+ list_del_init(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return cmd;
}
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
- struct se_queue_obj *qobj)
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
{
- struct se_cmd *t;
+ struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -676,14 +646,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
-
- list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
- if (t == cmd) {
- atomic_dec(&cmd->t_transport_queue_active);
- atomic_dec(&qobj->queue_cnt);
- list_del(&cmd->se_queue_node);
- break;
- }
+ atomic_set(&cmd->t_transport_queue_active, 0);
+ atomic_dec(&qobj->queue_cnt);
+ list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
if (atomic_read(&cmd->t_transport_queue_active)) {
@@ -716,6 +681,13 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
}
EXPORT_SYMBOL(transport_complete_sync_cache);
+static void target_complete_failure_work(struct work_struct *work)
+{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+ transport_generic_request_failure(cmd, 1, 1);
+}
+
/* transport_complete_task():
*
* Called from interrupt and non interrupt context depending
@@ -724,8 +696,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache);
void transport_complete_task(struct se_task *task, int success)
{
struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = task->se_dev;
- int t_state;
+ struct se_device *dev = cmd->se_dev;
unsigned long flags;
#if 0
pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
@@ -735,7 +706,7 @@ void transport_complete_task(struct se_task *task, int success)
atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_set(&task->task_active, 0);
+ task->task_flags &= ~TF_ACTIVE;
/*
* See if any sense data exists, if so set the TASK_SENSE flag.
@@ -754,68 +725,39 @@ void transport_complete_task(struct se_task *task, int success)
* See if we are waiting for outstanding struct se_task
* to complete for an exception condition
*/
- if (atomic_read(&task->task_stop)) {
- /*
- * Decrement cmd->t_se_count if this task had
- * previously thrown its timeout exception handler.
- */
- if (atomic_read(&task->task_timeout)) {
- atomic_dec(&cmd->t_se_count);
- atomic_set(&task->task_timeout, 0);
- }
+ if (task->task_flags & TF_REQUEST_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
complete(&task->task_stop_comp);
return;
}
/*
- * If the task's timeout handler has fired, use the t_task_cdbs_timeout
- * left counter to determine when the struct se_cmd is ready to be queued to
- * the processing thread.
- */
- if (atomic_read(&task->task_timeout)) {
- if (!atomic_dec_and_test(
- &cmd->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return;
- }
- t_state = TRANSPORT_COMPLETE_TIMEOUT;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_add_cmd_to_queue(cmd, t_state);
- return;
- }
- atomic_dec(&cmd->t_task_cdbs_timeout_left);
-
- /*
* Decrement the outstanding t_task_cdbs_left count. The last
* struct se_task from struct se_cmd will complete itself into the
* device queue depending upon int success.
*/
if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
- if (!success)
- cmd->t_tasks_failed = 1;
-
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
if (!success || cmd->t_tasks_failed) {
- t_state = TRANSPORT_COMPLETE_FAILURE;
if (!task->task_error_status) {
task->task_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
+ INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
- t_state = TRANSPORT_COMPLETE_OK;
+ INIT_WORK(&cmd->work, target_complete_ok_work);
}
+
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_add_cmd_to_queue(cmd, t_state);
+ queue_work(target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(transport_complete_task);
@@ -902,14 +844,12 @@ static void __transport_add_task_to_execute_queue(
static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
- struct se_device *dev;
+ struct se_device *dev = cmd->se_dev;
struct se_task *task;
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
- dev = task->se_dev;
-
if (atomic_read(&task->task_state_active))
continue;
@@ -934,38 +874,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_execute_queue))
+ if (!list_empty(&task->t_execute_list))
continue;
/*
* __transport_add_task_to_execute_queue() handles the
* SAM Task Attribute emulation if enabled
*/
__transport_add_task_to_execute_queue(task, task_prev, dev);
- atomic_set(&task->task_execute_queue, 1);
task_prev = task;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-/* transport_remove_task_from_execute_queue():
- *
- *
- */
+void __transport_remove_task_from_execute_queue(struct se_task *task,
+ struct se_device *dev)
+{
+ list_del_init(&task->t_execute_list);
+ atomic_dec(&dev->execute_tasks);
+}
+
void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
unsigned long flags;
- if (atomic_read(&task->task_execute_queue) == 0) {
- dump_stack();
+ if (WARN_ON(list_empty(&task->t_execute_list)))
return;
- }
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
+ __transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -991,14 +929,11 @@ static void target_qf_do_work(struct work_struct *work)
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
- (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
+ (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
- /*
- * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
- * has been added to head of queue
- */
- transport_add_cmd_to_queue(cmd, cmd->t_state);
+
+ transport_add_cmd_to_queue(cmd, cmd->t_state, true);
}
}
@@ -1053,41 +988,6 @@ void transport_dump_dev_state(
*bl += sprintf(b + *bl, " ");
}
-/* transport_release_all_cmds():
- *
- *
- */
-static void transport_release_all_cmds(struct se_device *dev)
-{
- struct se_cmd *cmd, *tcmd;
- int bug_out = 0, t_state;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
- list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
- se_queue_node) {
- t_state = cmd->t_state;
- list_del(&cmd->se_queue_node);
- spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
- flags);
-
- pr_err("Releasing ITT: 0x%08x, i_state: %u,"
- " t_state: %u directly\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), t_state);
-
- transport_release_fe_cmd(cmd);
- bug_out = 1;
-
- spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
- }
- spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
-#if 0
- if (bug_out)
- BUG();
-#endif
-}
-
void transport_dump_vpd_proto_id(
struct t10_vpd *vpd,
unsigned char *p_buf,
@@ -1573,7 +1473,6 @@ transport_generic_get_task(struct se_cmd *cmd,
INIT_LIST_HEAD(&task->t_state_list);
init_completion(&task->task_stop_comp);
task->task_se_cmd = cmd;
- task->se_dev = dev;
task->task_data_direction = data_direction;
return task;
@@ -1598,6 +1497,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
+ INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->t_task_list);
init_completion(&cmd->transport_lun_fe_stop_comp);
@@ -1641,21 +1541,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
return 0;
}
-void transport_free_se_cmd(
- struct se_cmd *se_cmd)
-{
- if (se_cmd->se_tmr_req)
- core_tmr_release_req(se_cmd->se_tmr_req);
- /*
- * Check and free any extended CDB buffer that was allocated
- */
- if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
- kfree(se_cmd->t_task_cdb);
-}
-EXPORT_SYMBOL(transport_free_se_cmd);
-
-static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
-
/* transport_generic_allocate_tasks():
*
* Called from fabric RX Thread.
@@ -1667,12 +1552,6 @@ int transport_generic_allocate_tasks(
int ret;
transport_generic_prepare_cdb(cdb);
-
- /*
- * This is needed for early exceptions.
- */
- cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1730,26 +1609,6 @@ int transport_generic_allocate_tasks(
EXPORT_SYMBOL(transport_generic_allocate_tasks);
/*
- * Used by fabric module frontends not defining a TFO->new_cmd_map()
- * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
- */
-int transport_generic_handle_cdb(
- struct se_cmd *cmd)
-{
- if (!cmd->se_lun) {
- dump_stack();
- pr_err("cmd->se_lun is NULL\n");
- return -EINVAL;
- }
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_cdb);
-
-static void transport_generic_request_failure(struct se_cmd *,
- struct se_device *, int, int);
-/*
* Used by fabric module frontends to queue tasks directly.
* Many only be used from process context only
*/
@@ -1773,7 +1632,7 @@ int transport_handle_cdb_direct(
* Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
* transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
* in existing usage to ensure that outstanding descriptors are handled
- * correctly during shutdown via transport_generic_wait_for_tasks()
+ * correctly during shutdown via transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@@ -1790,7 +1649,7 @@ int transport_handle_cdb_direct(
return 0;
else if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL, 0,
+ transport_generic_request_failure(cmd, 0,
(cmd->data_direction != DMA_TO_DEVICE));
}
return 0;
@@ -1811,7 +1670,7 @@ int transport_generic_handle_cdb_map(
return -EINVAL;
}
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);
@@ -1841,7 +1700,7 @@ int transport_generic_handle_data(
if (transport_check_aborted_status(cmd, 1) != 0)
return 0;
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);
@@ -1853,12 +1712,7 @@ EXPORT_SYMBOL(transport_generic_handle_data);
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
- /*
- * This is needed for early exceptions.
- */
- cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
@@ -1866,10 +1720,36 @@ EXPORT_SYMBOL(transport_generic_handle_tmr);
void transport_generic_free_cmd_intr(
struct se_cmd *cmd)
{
- transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);
}
EXPORT_SYMBOL(transport_generic_free_cmd_intr);
+/*
+ * If the task is active, request it to be stopped and sleep until it
+ * has completed.
+ */
+bool target_stop_task(struct se_task *task, unsigned long *flags)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ bool was_active = false;
+
+ if (task->task_flags & TF_ACTIVE) {
+ task->task_flags |= TF_REQUEST_STOP;
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ pr_debug("Task %p waiting to complete\n", task);
+ wait_for_completion(&task->task_stop_comp);
+ pr_debug("Task %p stopped successfully\n", task);
+
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
+ task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
+ was_active = true;
+ }
+
+ return was_active;
+}
+
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
@@ -1885,51 +1765,26 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
- pr_debug("task_no[%d] - Processing task %p\n",
- task->task_no, task);
+ pr_debug("Processing task %p\n", task);
/*
* If the struct se_task has not been sent and is not active,
* remove the struct se_task from the execution queue.
*/
- if (!atomic_read(&task->task_sent) &&
- !atomic_read(&task->task_active)) {
+ if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
transport_remove_task_from_execute_queue(task,
- task->se_dev);
+ cmd->se_dev);
- pr_debug("task_no[%d] - Removed from execute queue\n",
- task->task_no);
+ pr_debug("Task %p removed from execute queue\n", task);
spin_lock_irqsave(&cmd->t_state_lock, flags);
continue;
}
- /*
- * If the struct se_task is active, sleep until it is returned
- * from the plugin.
- */
- if (atomic_read(&task->task_active)) {
- atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
-
- pr_debug("task_no[%d] - Waiting to complete\n",
- task->task_no);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("task_no[%d] - Stopped successfully\n",
- task->task_no);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
-
- atomic_set(&task->task_active, 0);
- atomic_set(&task->task_stop, 0);
- } else {
- pr_debug("task_no[%d] - Did nothing\n", task->task_no);
+ if (!target_stop_task(task, &flags)) {
+ pr_debug("Task %p - did nothing\n", task);
ret++;
}
-
- __transport_stop_task_timer(task, &flags);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -1941,7 +1796,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
static void transport_generic_request_failure(
struct se_cmd *cmd,
- struct se_device *dev,
int complete,
int sc)
{
@@ -1950,10 +1804,9 @@ static void transport_generic_request_failure(
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state/def_t_state:"
- " %d/%d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->deferred_t_state,
+ cmd->t_state,
cmd->transport_error_status);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
@@ -1966,10 +1819,6 @@ static void transport_generic_request_failure(
atomic_read(&cmd->t_transport_stop),
atomic_read(&cmd->t_transport_sent));
- transport_stop_all_task_timers(cmd);
-
- if (dev)
- atomic_inc(&dev->depth_left);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
@@ -1977,7 +1826,6 @@ static void transport_generic_request_failure(
transport_complete_task_attr(cmd);
if (complete) {
- transport_direct_request_timeout(cmd);
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2076,46 +1924,8 @@ check_stop:
return;
queue_full:
- cmd->t_state = TRANSPORT_COMPLETE_OK;
- transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
-}
-
-static void transport_direct_request_timeout(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->t_transport_timeout)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
-
- atomic_sub(atomic_read(&cmd->t_transport_timeout),
- &cmd->t_se_count);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-static void transport_generic_request_timeout(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- /*
- * Reset cmd->t_se_count to allow transport_generic_remove()
- * to allow last call to free memory resources.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (atomic_read(&cmd->t_transport_timeout) > 1) {
- int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
-
- atomic_sub(tmp, &cmd->t_se_count);
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_generic_remove(cmd, 0);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev);
}
static inline u32 transport_lba_21(unsigned char *cdb)
@@ -2160,127 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-/*
- * Called from interrupt context.
- */
-static void transport_task_timeout_handler(unsigned long data)
-{
- struct se_task *task = (struct se_task *)data;
- struct se_cmd *cmd = task->task_se_cmd;
- unsigned long flags;
-
- pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (task->task_flags & TF_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- task->task_flags &= ~TF_RUNNING;
-
- /*
- * Determine if transport_complete_task() has already been called.
- */
- if (!atomic_read(&task->task_active)) {
- pr_debug("transport task: %p cmd: %p timeout task_active"
- " == 0\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
-
- atomic_inc(&cmd->t_se_count);
- atomic_inc(&cmd->t_transport_timeout);
- cmd->t_tasks_failed = 1;
-
- atomic_set(&task->task_timeout, 1);
- task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
- task->task_scsi_status = 1;
-
- if (atomic_read(&task->task_stop)) {
- pr_debug("transport task: %p cmd: %p timeout task_stop"
- " == 1\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&task->task_stop_comp);
- return;
- }
-
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
- pr_debug("transport task: %p cmd: %p timeout non zero"
- " t_task_cdbs_left\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
- task, cmd);
-
- cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
-}
-
-/*
- * Called with cmd->t_state_lock held.
- */
-static void transport_start_task_timer(struct se_task *task)
-{
- struct se_device *dev = task->se_dev;
- int timeout;
-
- if (task->task_flags & TF_RUNNING)
- return;
- /*
- * If the task_timeout is disabled, exit now.
- */
- timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
- if (!timeout)
- return;
-
- init_timer(&task->task_timer);
- task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
- task->task_timer.data = (unsigned long) task;
- task->task_timer.function = transport_task_timeout_handler;
-
- task->task_flags |= TF_RUNNING;
- add_timer(&task->task_timer);
-#if 0
- pr_debug("Starting task timer for cmd: %p task: %p seconds:"
- " %d\n", task->task_se_cmd, task, timeout);
-#endif
-}
-
-/*
- * Called with spin_lock_irq(&cmd->t_state_lock) held.
- */
-void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
-{
- struct se_cmd *cmd = task->task_se_cmd;
-
- if (!task->task_flags & TF_RUNNING)
- return;
-
- task->task_flags |= TF_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
- del_timer_sync(&task->task_timer);
-
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
- task->task_flags &= ~TF_RUNNING;
- task->task_flags &= ~TF_STOP;
-}
-
-static void transport_stop_all_task_timers(struct se_cmd *cmd)
-{
- struct se_task *task = NULL, *task_tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_for_each_entry_safe(task, task_tmp,
- &cmd->t_task_list, t_list)
- __transport_stop_task_timer(task, &flags);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
static inline int transport_tcq_window_closed(struct se_device *dev)
{
if (dev->dev_tcq_window_closed++ <
@@ -2385,7 +2074,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, NULL, 0, 1);
+ transport_generic_request_failure(cmd, 0, 1);
return 0;
}
@@ -2448,9 +2137,7 @@ check_depth:
}
task = list_first_entry(&dev->execute_task_list,
struct se_task, t_execute_list);
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
+ __transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
@@ -2458,15 +2145,13 @@ check_depth:
cmd = task->task_se_cmd;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_set(&task->task_active, 1);
- atomic_set(&task->task_sent, 1);
+ task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_inc(&cmd->t_task_cdbs_sent);
if (atomic_read(&cmd->t_task_cdbs_sent) ==
cmd->t_task_list_num)
- atomic_set(&cmd->transport_sent, 1);
+ atomic_set(&cmd->t_transport_sent, 1);
- transport_start_task_timer(task);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* The struct se_cmd->transport_emulate_cdb() function pointer is used
@@ -2477,10 +2162,13 @@ check_depth:
error = cmd->transport_emulate_cdb(cmd);
if (error != 0) {
cmd->transport_error_status = error;
- atomic_set(&task->task_active, 0);
- atomic_set(&cmd->transport_sent, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags &= ~TF_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd, dev, 0, 1);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
goto check_depth;
}
/*
@@ -2513,10 +2201,13 @@ check_depth:
if (error != 0) {
cmd->transport_error_status = error;
- atomic_set(&task->task_active, 0);
- atomic_set(&cmd->transport_sent, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags &= ~TF_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd, dev, 0, 1);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
}
}
@@ -2538,8 +2229,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2752,13 +2441,16 @@ out:
static int transport_get_sense_data(struct se_cmd *cmd)
{
unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
- struct se_device *dev;
+ struct se_device *dev = cmd->se_dev;
struct se_task *task = NULL, *task_tmp;
unsigned long flags;
u32 offset = 0;
WARN_ON(!cmd->se_lun);
+ if (!dev)
+ return 0;
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2767,14 +2459,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
-
if (!task->task_sense)
continue;
- dev = task->se_dev;
- if (!dev)
- continue;
-
if (!dev->transport->get_sense_buffer) {
pr_err("dev->transport->get_sense_buffer"
" is NULL\n");
@@ -2783,9 +2470,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)
sense_buffer = dev->transport->get_sense_buffer(task);
if (!sense_buffer) {
- pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
+ pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
" sense buffer for task with sense\n",
- cmd->se_tfo->get_task_tag(cmd), task->task_no);
+ cmd->se_tfo->get_task_tag(cmd), task);
continue;
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2814,7 +2501,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
@@ -2915,8 +2601,6 @@ static int transport_generic_cmd_sequencer(
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->transport_wait_for_tasks =
- &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
@@ -2926,7 +2610,6 @@ static int transport_generic_cmd_sequencer(
*/
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
@@ -2965,7 +2648,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_6;
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -2974,7 +2656,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_10;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -2983,7 +2664,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_12;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -2992,7 +2672,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_16;
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -3001,7 +2680,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_6;
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -3010,7 +2688,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_10;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
@@ -3020,7 +2697,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_12;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
@@ -3030,7 +2706,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_16;
cmd->t_task_lba = transport_lba_64(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
@@ -3043,18 +2718,14 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_10;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
- if (passthrough)
- break;
+
+ if (dev->transport->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV)
+ goto out_unsupported_cdb;
/*
- * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+ * Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[1] & 0x8);
@@ -3078,19 +2749,14 @@ static int transport_generic_cmd_sequencer(
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic.
*/
- cmd->transport_split_cdb = &split_cdb_XX_32;
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
if (passthrough)
- break;
-
+ goto out_unsupported_cdb;
/*
- * Setup BIDI XOR callback to be run during
- * transport_generic_complete_ok()
+ * Setup BIDI XOR callback to be run during after I/O
+ * completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[10] & 0x8);
@@ -3430,7 +3096,6 @@ static int transport_generic_cmd_sequencer(
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
@@ -3488,8 +3153,7 @@ out_invalid_cdb_field:
}
/*
- * Called from transport_generic_complete_ok() and
- * transport_generic_request_failure() to determine which dormant/delayed
+ * Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
@@ -3557,12 +3221,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
}
-static int transport_complete_qf(struct se_cmd *cmd)
+static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
- return cmd->se_tfo->queue_status(cmd);
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ transport_complete_task_attr(cmd);
+
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret)
+ goto out;
+ }
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
@@ -3572,7 +3242,7 @@ static int transport_complete_qf(struct se_cmd *cmd)
if (cmd->t_bidi_data_sg) {
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret < 0)
- return ret;
+ break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
@@ -3582,17 +3252,20 @@ static int transport_complete_qf(struct se_cmd *cmd)
break;
}
- return ret;
+out:
+ if (ret < 0) {
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ return;
+ }
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
}
static void transport_handle_queue_full(
struct se_cmd *cmd,
- struct se_device *dev,
- int (*qf_callback)(struct se_cmd *))
+ struct se_device *dev)
{
spin_lock_irq(&dev->qf_cmd_lock);
- cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
- cmd->transport_qf_callback = qf_callback;
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc(&dev->dev_qf_count);
smp_mb__after_atomic_inc();
@@ -3601,9 +3274,11 @@ static void transport_handle_queue_full(
schedule_work(&cmd->se_dev->qf_work_queue);
}
-static void transport_generic_complete_ok(struct se_cmd *cmd)
+static void target_complete_ok_work(struct work_struct *work)
{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
int reason = 0, ret;
+
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
@@ -3618,14 +3293,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
schedule_work(&cmd->se_dev->qf_work_queue);
- if (cmd->transport_qf_callback) {
- ret = cmd->transport_qf_callback(cmd);
- if (ret < 0)
- goto queue_full;
-
- cmd->transport_qf_callback = NULL;
- goto done;
- }
/*
* Check if we need to retrieve a sense buffer from
* the struct se_cmd in question.
@@ -3701,7 +3368,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
break;
}
-done:
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -3709,34 +3375,35 @@ done:
queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction);
- transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev);
}
static void transport_free_dev_tasks(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
unsigned long flags;
+ LIST_HEAD(dispose_list);
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_active))
- continue;
+ if (!(task->task_flags & TF_ACTIVE))
+ list_move_tail(&task->t_list, &dispose_list);
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ while (!list_empty(&dispose_list)) {
+ task = list_first_entry(&dispose_list, struct se_task, t_list);
- kfree(task->task_sg_bidi);
- kfree(task->task_sg);
+ if (task->task_sg != cmd->t_data_sg &&
+ task->task_sg != cmd->t_bidi_data_sg)
+ kfree(task->task_sg);
list_del(&task->t_list);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (task->se_dev)
- task->se_dev->transport->free_task(task);
- else
- pr_err("task[%u] - task->se_dev is NULL\n",
- task->task_no);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->se_dev->transport->free_task(task);
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
@@ -3764,89 +3431,43 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
-static inline void transport_release_tasks(struct se_cmd *cmd)
-{
- transport_free_dev_tasks(cmd);
-}
-
-static inline int transport_dec_and_check(struct se_cmd *cmd)
+/**
+ * transport_put_cmd - release a reference to a command
+ * @cmd: command to release
+ *
+ * This routine releases our reference to the command and frees it if possible.
+ */
+static void transport_put_cmd(struct se_cmd *cmd)
{
unsigned long flags;
+ int free_tasks = 0;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (atomic_read(&cmd->t_fe_count)) {
- if (!atomic_dec_and_test(&cmd->t_fe_count)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return 1;
- }
+ if (!atomic_dec_and_test(&cmd->t_fe_count))
+ goto out_busy;
}
if (atomic_read(&cmd->t_se_count)) {
- if (!atomic_dec_and_test(&cmd->t_se_count)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return 1;
- }
+ if (!atomic_dec_and_test(&cmd->t_se_count))
+ goto out_busy;
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return 0;
-}
-
-static void transport_release_fe_cmd(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- if (transport_dec_and_check(cmd))
- return;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->transport_dev_active)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto free_pages;
- }
- atomic_set(&cmd->transport_dev_active, 0);
- transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_release_tasks(cmd);
-free_pages:
- transport_free_pages(cmd);
- transport_free_se_cmd(cmd);
- cmd->se_tfo->release_cmd(cmd);
-}
-
-static int
-transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
-{
- unsigned long flags;
-
- if (transport_dec_and_check(cmd)) {
- if (session_reinstatement) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- }
- return 1;
- }
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->transport_dev_active)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto free_pages;
+ if (atomic_read(&cmd->transport_dev_active)) {
+ atomic_set(&cmd->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ free_tasks = 1;
}
- atomic_set(&cmd->transport_dev_active, 0);
- transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_release_tasks(cmd);
+ if (free_tasks != 0)
+ transport_free_dev_tasks(cmd);
-free_pages:
transport_free_pages(cmd);
transport_release_cmd(cmd);
- return 0;
+ return;
+out_busy:
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
/*
@@ -3888,62 +3509,6 @@ int transport_generic_map_mem_to_cmd(
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-static int transport_new_cmd_obj(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- int set_counts = 1, rc, task_cdbs;
-
- /*
- * Setup any BIDI READ tasks and memory from
- * cmd->t_mem_bidi_list so the READ struct se_tasks
- * are queued first for the non pSCSI passthrough case.
- */
- if (cmd->t_bidi_data_sg &&
- (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
- rc = transport_allocate_tasks(cmd,
- cmd->t_task_lba,
- DMA_FROM_DEVICE,
- cmd->t_bidi_data_sg,
- cmd->t_bidi_data_nents);
- if (rc <= 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- set_counts = 0;
- }
- /*
- * Setup the tasks and memory from cmd->t_mem_list
- * Note for BIDI transfers this will contain the WRITE payload
- */
- task_cdbs = transport_allocate_tasks(cmd,
- cmd->t_task_lba,
- cmd->data_direction,
- cmd->t_data_sg,
- cmd->t_data_nents);
- if (task_cdbs <= 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
-
- if (set_counts) {
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- }
-
- cmd->t_task_list_num = task_cdbs;
-
- atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
- atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
- atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
- return 0;
-}
-
void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -4054,15 +3619,13 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
/*
* For the padded tasks, use the extra SGL vector allocated
* in transport_allocate_data_tasks() for the sg_prev_nents
- * offset into sg_chain() above.. The last task of a
- * multi-task list, or a single task will not have
- * task->task_sg_padded set..
+ * offset into sg_chain() above.
+ *
+ * We do not need the padding for the last task (or a single
+ * task), but in that case we will never use the sg_prev_nents
+ * value below which would be incorrect.
*/
- if (task->task_padded_sg)
- sg_prev_nents = (task->task_sg_nents + 1);
- else
- sg_prev_nents = task->task_sg_nents;
-
+ sg_prev_nents = (task->task_sg_nents + 1);
sg_prev = task->task_sg;
}
/*
@@ -4092,30 +3655,60 @@ EXPORT_SYMBOL(transport_do_task_sg_chain);
/*
* Break up cmd into chunks transport can handle
*/
-static int transport_allocate_data_tasks(
- struct se_cmd *cmd,
- unsigned long long lba,
+static int
+transport_allocate_data_tasks(struct se_cmd *cmd,
enum dma_data_direction data_direction,
- struct scatterlist *sgl,
- unsigned int sgl_nents)
+ struct scatterlist *cmd_sg, unsigned int sgl_nents)
{
- unsigned char *cdb = NULL;
- struct se_task *task;
struct se_device *dev = cmd->se_dev;
- unsigned long flags;
- int task_count, i, ret;
- sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
- u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
- struct scatterlist *sg;
- struct scatterlist *cmd_sg;
+ int task_count, i;
+ unsigned long long lba;
+ sector_t sectors, dev_max_sectors;
+ u32 sector_size;
+
+ if (transport_cmd_get_valid_sectors(cmd) < 0)
+ return -EINVAL;
+
+ dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
WARN_ON(cmd->data_length % sector_size);
+
+ lba = cmd->t_task_lba;
sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
-
- cmd_sg = sgl;
+
+ /*
+ * If we need just a single task reuse the SG list in the command
+ * and avoid a lot of work.
+ */
+ if (task_count == 1) {
+ struct se_task *task;
+ unsigned long flags;
+
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!task)
+ return -ENOMEM;
+
+ task->task_sg = cmd_sg;
+ task->task_sg_nents = sgl_nents;
+
+ task->task_lba = lba;
+ task->task_sectors = sectors;
+ task->task_size = task->task_sectors * sector_size;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ return task_count;
+ }
+
for (i = 0; i < task_count; i++) {
+ struct se_task *task;
unsigned int task_size, task_sg_nents_padded;
+ struct scatterlist *sg;
+ unsigned long flags;
int count;
task = transport_generic_get_task(cmd, data_direction);
@@ -4126,14 +3719,6 @@ static int transport_allocate_data_tasks(
task->task_sectors = min(sectors, dev_max_sectors);
task->task_size = task->task_sectors * sector_size;
- cdb = dev->transport->get_cdb(task);
- BUG_ON(!cdb);
-
- memcpy(cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
- /* Update new cdb with updated lba/sectors */
- cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
/*
* This now assumes that passed sg_ents are in PAGE_SIZE chunks
* in order to calculate the number per task SGL entries
@@ -4149,7 +3734,6 @@ static int transport_allocate_data_tasks(
*/
if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
task_sg_nents_padded = (task->task_sg_nents + 1);
- task->task_padded_sg = 1;
} else
task_sg_nents_padded = task->task_sg_nents;
@@ -4181,20 +3765,6 @@ static int transport_allocate_data_tasks(
list_add_tail(&task->t_list, &cmd->t_task_list);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
- /*
- * Now perform the memory map of task->task_sg[] into backend
- * subsystem memory..
- */
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_sent))
- continue;
- if (!dev->transport->map_data_SG)
- continue;
-
- ret = dev->transport->map_data_SG(task);
- if (ret < 0)
- return 0;
- }
return task_count;
}
@@ -4202,30 +3772,14 @@ static int transport_allocate_data_tasks(
static int
transport_allocate_control_task(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
- unsigned char *cdb;
struct se_task *task;
unsigned long flags;
- int ret = 0;
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
return -ENOMEM;
- cdb = dev->transport->get_cdb(task);
- BUG_ON(!cdb);
- memcpy(cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
- task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
- GFP_KERNEL);
- if (!task->task_sg) {
- cmd->se_dev->transport->free_task(task);
- return -ENOMEM;
- }
-
- memcpy(task->task_sg, cmd->t_data_sg,
- sizeof(struct scatterlist) * cmd->t_data_nents);
+ task->task_sg = cmd->t_data_sg;
task->task_size = cmd->data_length;
task->task_sg_nents = cmd->t_data_nents;
@@ -4233,53 +3787,20 @@ transport_allocate_control_task(struct se_cmd *cmd)
list_add_tail(&task->t_list, &cmd->t_task_list);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
- if (dev->transport->map_control_SG)
- ret = dev->transport->map_control_SG(task);
- } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
- if (dev->transport->cdb_none)
- ret = dev->transport->cdb_none(task);
- } else {
- pr_err("target: Unknown control cmd type!\n");
- BUG();
- }
-
/* Success! Return number of tasks allocated */
- if (ret == 0)
- return 1;
- return ret;
-}
-
-static u32 transport_allocate_tasks(
- struct se_cmd *cmd,
- unsigned long long lba,
- enum dma_data_direction data_direction,
- struct scatterlist *sgl,
- unsigned int sgl_nents)
-{
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- return -EINVAL;
-
- return transport_allocate_data_tasks(cmd, lba, data_direction,
- sgl, sgl_nents);
- } else
- return transport_allocate_control_task(cmd);
-
+ return 1;
}
-
-/* transport_generic_new_cmd(): Called from transport_processing_thread()
- *
- * Allocate storage transport resources from a set of values predefined
- * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
- * Any non zero return here is treated as an "out of resource' op here.
+/*
+ * Allocate any required ressources to execute the command, and either place
+ * it on the execution queue if possible. For writes we might not have the
+ * payload yet, thus notify the fabric via a call to ->write_pending instead.
*/
- /*
- * Generate struct se_task(s) and/or their payloads for this CDB.
- */
int transport_generic_new_cmd(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+ int task_cdbs, task_cdbs_bidi = 0;
+ int set_counts = 1;
int ret = 0;
/*
@@ -4293,16 +3814,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret < 0)
return ret;
}
+
/*
- * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
- * control or data CDB types, and perform the map to backend subsystem
- * code from SGL memory allocated here by transport_generic_get_mem(), or
- * via pre-existing SGL memory setup explictly by fabric module code with
- * transport_generic_map_mem_to_cmd().
+ * For BIDI command set up the read tasks first.
*/
- ret = transport_new_cmd_obj(cmd);
- if (ret < 0)
- return ret;
+ if (cmd->t_bidi_data_sg &&
+ dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
+
+ task_cdbs_bidi = transport_allocate_data_tasks(cmd,
+ DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ if (task_cdbs_bidi <= 0)
+ goto out_fail;
+
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ set_counts = 0;
+ }
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ task_cdbs = transport_allocate_data_tasks(cmd,
+ cmd->data_direction, cmd->t_data_sg,
+ cmd->t_data_nents);
+ } else {
+ task_cdbs = transport_allocate_control_task(cmd);
+ }
+
+ if (task_cdbs <= 0)
+ goto out_fail;
+
+ if (set_counts) {
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ }
+
+ cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
+ atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
+ atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
+
/*
* For WRITEs, let the fabric know its buffer is ready..
* This WRITE struct se_cmd (and all of its associated struct se_task's)
@@ -4320,6 +3870,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
*/
transport_execute_tasks(cmd);
return 0;
+
+out_fail:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -4333,15 +3888,15 @@ void transport_generic_process_write(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_generic_process_write);
-static int transport_write_pending_qf(struct se_cmd *cmd)
+static void transport_write_pending_qf(struct se_cmd *cmd)
{
- return cmd->se_tfo->write_pending(cmd);
+ if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) {
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
+ cmd);
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ }
}
-/* transport_generic_write_pending():
- *
- *
- */
static int transport_generic_write_pending(struct se_cmd *cmd)
{
unsigned long flags;
@@ -4351,17 +3906,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_WRITE_PENDING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (cmd->transport_qf_callback) {
- ret = cmd->transport_qf_callback(cmd);
- if (ret == -EAGAIN)
- goto queue_full;
- else if (ret < 0)
- return ret;
-
- cmd->transport_qf_callback = NULL;
- return 0;
- }
-
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
* cmd->t_transport_active=0 so that transport_generic_handle_data
@@ -4386,61 +3930,52 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev,
- transport_write_pending_qf);
+ transport_handle_queue_full(cmd, cmd->se_dev);
return ret;
}
+/**
+ * transport_release_cmd - free a command
+ * @cmd: command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
void transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
- transport_free_se_cmd(cmd);
+ if (cmd->se_tmr_req)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
cmd->se_tfo->release_cmd(cmd);
}
EXPORT_SYMBOL(transport_release_cmd);
-/* transport_generic_free_cmd():
- *
- * Called from processing frontend to release storage engine resources
- */
-void transport_generic_free_cmd(
- struct se_cmd *cmd,
- int wait_for_tasks,
- int session_reinstatement)
+void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ if (wait_for_tasks && cmd->se_tmr_req)
+ transport_wait_for_tasks(cmd);
+
transport_release_cmd(cmd);
- else {
+ } else {
+ if (wait_for_tasks)
+ transport_wait_for_tasks(cmd);
+
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
- if (cmd->se_lun) {
-#if 0
- pr_debug("cmd: %p ITT: 0x%08x contains"
- " cmd->se_lun\n", cmd,
- cmd->se_tfo->get_task_tag(cmd));
-#endif
+ if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- }
-
- if (wait_for_tasks && cmd->transport_wait_for_tasks)
- cmd->transport_wait_for_tasks(cmd, 0, 0);
transport_free_dev_tasks(cmd);
- transport_generic_remove(cmd, session_reinstatement);
+ transport_put_cmd(cmd);
}
}
EXPORT_SYMBOL(transport_generic_free_cmd);
-static void transport_nop_wait_for_tasks(
- struct se_cmd *cmd,
- int remove_cmd,
- int session_reinstatement)
-{
- return;
-}
-
/* transport_lun_wait_for_tasks():
*
* Called from ConfigFS context to stop the passed struct se_cmd to allow
@@ -4479,7 +4014,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd);
return 0;
}
@@ -4610,22 +4145,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
return 0;
}
-/* transport_generic_wait_for_tasks():
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd: command to wait
*
- * Called from frontend or passthrough context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
*/
-static void transport_generic_wait_for_tasks(
- struct se_cmd *cmd,
- int remove_cmd,
- int session_reinstatement)
+void transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
- return;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ /*
+ * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
+ * has been set in transport_set_supported_SAM_opcode().
+ */
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
@@ -4665,16 +4208,17 @@ static void transport_generic_wait_for_tasks(
atomic_set(&cmd->transport_lun_stop, 0);
}
if (!atomic_read(&cmd->t_transport_active) ||
- atomic_read(&cmd->t_transport_aborted))
- goto remove;
+ atomic_read(&cmd->t_transport_aborted)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
atomic_set(&cmd->t_transport_stop, 1);
pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
- " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
- " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state);
+ " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
+ cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -4689,13 +4233,10 @@ static void transport_generic_wait_for_tasks(
pr_debug("wait_for_tasks: Stopped wait_for_compltion("
"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
cmd->se_tfo->get_task_tag(cmd));
-remove:
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!remove_cmd)
- return;
- transport_generic_free_cmd(cmd, 0, session_reinstatement);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+EXPORT_SYMBOL(transport_wait_for_tasks);
static int transport_get_sense_codes(
struct se_cmd *cmd,
@@ -4920,6 +4461,15 @@ EXPORT_SYMBOL(transport_check_aborted_status);
void transport_send_task_abort(struct se_cmd *cmd)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
/*
* If there are still expected incoming fabric WRITEs, we wait
* until until they have completed before sending a TASK_ABORTED
@@ -4984,184 +4534,10 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
cmd->se_tfo->queue_tm_rsp(cmd);
- transport_cmd_check_stop(cmd, 2, 0);
+ transport_cmd_check_stop_to_fabric(cmd);
return 0;
}
-/*
- * Called with spin_lock_irq(&dev->execute_task_lock); held
- *
- */
-static struct se_task *
-transport_get_task_from_state_list(struct se_device *dev)
-{
- struct se_task *task;
-
- if (list_empty(&dev->state_task_list))
- return NULL;
-
- list_for_each_entry(task, &dev->state_task_list, t_state_list)
- break;
-
- list_del(&task->t_state_list);
- atomic_set(&task->task_state_active, 0);
-
- return task;
-}
-
-static void transport_processing_shutdown(struct se_device *dev)
-{
- struct se_cmd *cmd;
- struct se_task *task;
- unsigned long flags;
- /*
- * Empty the struct se_device's struct se_task state list.
- */
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- while ((task = transport_get_task_from_state_list(dev))) {
- if (!task->task_se_cmd) {
- pr_err("task->task_se_cmd is NULL!\n");
- continue;
- }
- cmd = task->task_se_cmd;
-
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
- " i_state: %d, t_state/def_t_state:"
- " %d/%d cdb: 0x%02x\n", cmd, task,
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->deferred_t_state,
- cmd->t_task_cdb[0]);
- pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
- " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
- " t_transport_stop: %d t_transport_sent: %d\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->t_task_list_num,
- atomic_read(&cmd->t_task_cdbs_left),
- atomic_read(&cmd->t_task_cdbs_sent),
- atomic_read(&cmd->t_transport_active),
- atomic_read(&cmd->t_transport_stop),
- atomic_read(&cmd->t_transport_sent));
-
- if (atomic_read(&task->task_active)) {
- atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- pr_debug("Waiting for task: %p to shutdown for dev:"
- " %p\n", task, dev);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("Completed task: %p shutdown for dev: %p\n",
- task, dev);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
-
- atomic_set(&task->task_active, 0);
- atomic_set(&task->task_stop, 0);
- } else {
- if (atomic_read(&task->task_execute_queue) != 0)
- transport_remove_task_from_execute_queue(task, dev);
- }
- __transport_stop_task_timer(task, &flags);
-
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- pr_debug("Skipping task: %p, dev: %p for"
- " t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&cmd->t_task_cdbs_ex_left));
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- continue;
- }
-
- if (atomic_read(&cmd->t_transport_active)) {
- pr_debug("got t_transport_active = 1 for task: %p, dev:"
- " %p\n", task, dev);
-
- if (atomic_read(&cmd->t_fe_count)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
- transport_send_check_condition_and_sense(
- cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
- 0);
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
-
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop(cmd, 1, 0);
- } else {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
-
- transport_lun_remove_cmd(cmd);
-
- if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0);
- }
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- continue;
- }
- pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
- task, dev);
-
- if (atomic_read(&cmd->t_fe_count)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
- transport_send_check_condition_and_sense(cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
-
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop(cmd, 1, 0);
- } else {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
- transport_lun_remove_cmd(cmd);
-
- if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0);
- }
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- }
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- /*
- * Empty the struct se_device's struct se_cmd list.
- */
- while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
-
- pr_debug("From Device Queue: cmd: %p t_state: %d\n",
- cmd, cmd->t_state);
-
- if (atomic_read(&cmd->t_fe_count)) {
- transport_send_check_condition_and_sense(cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
-
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop(cmd, 1, 0);
- } else {
- transport_lun_remove_cmd(cmd);
- if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0);
- }
- }
-}
-
/* transport_processing_thread():
*
*
@@ -5181,14 +4557,6 @@ static int transport_processing_thread(void *param)
if (ret < 0)
goto out;
- spin_lock_irq(&dev->dev_status_lock);
- if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
- spin_unlock_irq(&dev->dev_status_lock);
- transport_processing_shutdown(dev);
- continue;
- }
- spin_unlock_irq(&dev->dev_status_lock);
-
get_cmd:
__transport_execute_tasks(dev);
@@ -5197,6 +4565,9 @@ get_cmd:
continue;
switch (cmd->t_state) {
+ case TRANSPORT_NEW_CMD:
+ BUG();
+ break;
case TRANSPORT_NEW_CMD_MAP:
if (!cmd->se_tfo->new_cmd_map) {
pr_err("cmd->se_tfo->new_cmd_map is"
@@ -5206,19 +4577,17 @@ get_cmd:
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL,
+ transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
break;
}
- /* Fall through */
- case TRANSPORT_NEW_CMD:
ret = transport_generic_new_cmd(cmd);
if (ret == -EAGAIN)
break;
else if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL,
+ transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
}
@@ -5226,33 +4595,22 @@ get_cmd:
case TRANSPORT_PROCESS_WRITE:
transport_generic_process_write(cmd);
break;
- case TRANSPORT_COMPLETE_OK:
- transport_stop_all_task_timers(cmd);
- transport_generic_complete_ok(cmd);
- break;
- case TRANSPORT_REMOVE:
- transport_generic_remove(cmd, 0);
- break;
case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0, 0);
+ transport_generic_free_cmd(cmd, 0);
break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
- case TRANSPORT_COMPLETE_FAILURE:
- transport_generic_request_failure(cmd, NULL, 1, 1);
- break;
- case TRANSPORT_COMPLETE_TIMEOUT:
- transport_stop_all_task_timers(cmd);
- transport_generic_request_timeout(cmd);
- break;
case TRANSPORT_COMPLETE_QF_WP:
- transport_generic_write_pending(cmd);
+ transport_write_pending_qf(cmd);
+ break;
+ case TRANSPORT_COMPLETE_QF_OK:
+ transport_complete_qf(cmd);
break;
default:
- pr_err("Unknown t_state: %d deferred_t_state:"
- " %d for ITT: 0x%08x i_state: %d on SE LUN:"
- " %u\n", cmd->t_state, cmd->deferred_t_state,
+ pr_err("Unknown t_state: %d for ITT: 0x%08x "
+ "i_state: %d on SE LUN: %u\n",
+ cmd->t_state,
cmd->se_tfo->get_task_tag(cmd),
cmd->se_tfo->get_cmd_state(cmd),
cmd->se_lun->unpacked_lun);
@@ -5263,7 +4621,8 @@ get_cmd:
}
out:
- transport_release_all_cmds(dev);
+ WARN_ON(!list_empty(&dev->state_task_list));
+ WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
dev->process_thread = NULL;
return 0;
}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 31e3c652527..50a480db7a6 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -24,7 +24,6 @@
*
******************************************************************************/
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 80fbcde00cb..6195026cc7b 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
@@ -115,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
void ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
}
/*
@@ -268,9 +267,8 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
if (IS_ERR(fp)) {
/* XXX need to find cmd if queued */
- cmd->se_cmd.t_state = TRANSPORT_REMOVE;
cmd->seq = NULL;
- transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
return;
}
@@ -288,7 +286,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
__func__, fh->fh_r_ctl);
ft_invl_hw_context(cmd);
fc_frame_free(fp);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
}
@@ -397,7 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
}
pr_debug("alloc tm cmd fn %d\n", tm_func);
- tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
+ tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
if (!tmr) {
pr_debug("alloc failed\n");
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
@@ -421,7 +419,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
sess = cmd->sess;
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
ft_sess_put(sess);
return;
}
@@ -628,7 +626,7 @@ static void ft_send_work(struct work_struct *work)
if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
return;
}
if (ret == -EINVAL) {
@@ -637,10 +635,10 @@ static void ft_send_work(struct work_struct *work)
else
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
return;
}
- transport_generic_handle_cdb(se_cmd);
+ transport_handle_cdb_direct(se_cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 8fa39b74f22..5f770412ca4 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
@@ -32,6 +31,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
+#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -71,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
{
const char *cp;
char c;
- u32 nibble;
u32 byte = 0;
u32 pos = 0;
u32 err;
+ int val;
*wwn = 0;
for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
@@ -95,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
return cp - name;
}
err = 3;
- if (isdigit(c))
- nibble = c - '0';
- else if (isxdigit(c) && (islower(c) || !strict))
- nibble = tolower(c) - 'a' + 10;
- else
+ val = hex_to_bin(c);
+ if (val < 0 || (strict && isupper(c)))
goto fail;
- *wwn = (*wwn << 4) | nibble;
+ *wwn = (*wwn << 4) | val;
}
err = 4;
fail:
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index d35ea5a3d56..1369b1cb103 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index dbb5eaeee39..326921385af 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index bd7cc052799..8816f53e004 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -60,9 +60,13 @@ config VT_CONSOLE
If unsure, say Y.
+config VT_CONSOLE_SLEEP
+ def_bool y
+ depends on VT_CONSOLE && PM_SLEEP
+
config HW_CONSOLE
bool
- depends on VT && !S390 && !UML
+ depends on VT && !UML
default y
config VT_HW_CONSOLE_BINDING
@@ -350,3 +354,37 @@ config TRACE_SINK
If you select this option, you need to select
"Trace data router for MIPI P1149.7 cJTAG standard".
+
+config PPC_EPAPR_HV_BYTECHAN
+ tristate "ePAPR hypervisor byte channel driver"
+ depends on PPC
+ help
+ This driver creates /dev entries for each ePAPR hypervisor byte
+ channel, thereby allowing applications to communicate with byte
+ channels as if they were serial ports.
+
+config PPC_EARLY_DEBUG_EHV_BC
+ bool "Early console (udbg) support for ePAPR hypervisors"
+ depends on PPC_EPAPR_HV_BYTECHAN
+ help
+ Select this option to enable early console (a.k.a. "udbg") support
+ via an ePAPR byte channel. You also need to choose the byte channel
+ handle below.
+
+config PPC_EARLY_DEBUG_EHV_BC_HANDLE
+ int "Byte channel handle for early console (udbg)"
+ depends on PPC_EARLY_DEBUG_EHV_BC
+ default 0
+ help
+ If you want early console (udbg) output through a byte channel,
+ specify the handle of the byte channel to use.
+
+ For this to work, the byte channel driver must be compiled
+ in-kernel, not as a module.
+
+ Note that only one early console driver can be enabled, so don't
+ enable any others if you enable this one.
+
+ If the number you specify is not a valid byte channel handle, then
+ there simply will be no early console output. This is true also
+ if you don't boot under a hypervisor at all.
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index ea89b0bd15f..2953059530e 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -26,5 +26,6 @@ obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK) += synclink.o
+obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
obj-y += ipwireless/
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 220579592c2..b84c83456dc 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1529,7 +1529,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct async_struct * info = tty->driver_data;
unsigned long orig_jiffies, char_time;
- int tty_was_locked = tty_locked();
int lsr;
if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
@@ -1541,12 +1540,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
orig_jiffies = jiffies;
/*
- * tty_wait_until_sent is called from lots of places,
- * with or without the BTM.
- */
- if (!tty_was_locked)
- tty_lock();
- /*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
@@ -1586,8 +1579,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- if (!tty_was_locked)
- tty_unlock();
+
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
@@ -2025,7 +2017,7 @@ static int __init amiga_serial_probe(struct platform_device *pdev)
if (error)
goto fail_unregister;
- error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, IRQF_DISABLED,
+ error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, 0,
"serial RX", state);
if (error)
goto fail_free_irq;
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index c0e8f2eeb88..c9bf779481d 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -45,7 +45,6 @@
#undef CY_DEBUG_IO
#undef CY_DEBUG_COUNT
#undef CY_DEBUG_DTR
-#undef CY_DEBUG_WAIT_UNTIL_SENT
#undef CY_DEBUG_INTERRUPTS
#undef CY_16Y_HACK
#undef CY_ENABLE_MONITORING
@@ -1678,16 +1677,10 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
*/
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
-#ifdef CY_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "In cy_wait_until_sent(%d) check=%d, jiff=%lu...",
- timeout, char_time, jiffies);
-#endif
+
card = info->card;
if (!cy_is_Z(card)) {
while (cyy_readb(info, CySRER) & CyTxRdy) {
-#ifdef CY_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies);
-#endif
if (msleep_interruptible(jiffies_to_msecs(char_time)))
break;
if (timeout && time_after(jiffies, orig_jiffies +
@@ -1697,9 +1690,6 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
}
/* Run one more char cycle */
msleep_interruptible(jiffies_to_msecs(char_time * 5));
-#ifdef CY_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies);
-#endif
}
static void cy_flush_buffer(struct tty_struct *tty)
@@ -3377,7 +3367,7 @@ static int __init cy_detect_isa(void)
/* allocate IRQ */
if (request_irq(cy_isa_irq, cyy_interrupt,
- IRQF_DISABLED, "Cyclom-Y", &cy_card[j])) {
+ 0, "Cyclom-Y", &cy_card[j])) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but "
"could not allocate IRQ#%d.\n",
(unsigned long)cy_isa_address, cy_isa_irq);
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
new file mode 100644
index 00000000000..1595dba0072
--- /dev/null
+++ b/drivers/tty/ehv_bytechan.c
@@ -0,0 +1,881 @@
+/* ePAPR hypervisor byte channel device driver
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * This driver support three distinct interfaces, all of which are related to
+ * ePAPR hypervisor byte channels.
+ *
+ * 1) An early-console (udbg) driver. This provides early console output
+ * through a byte channel. The byte channel handle must be specified in a
+ * Kconfig option.
+ *
+ * 2) A normal console driver. Output is sent to the byte channel designated
+ * for stdout in the device tree. The console driver is for handling kernel
+ * printk calls.
+ *
+ * 3) A tty driver, which is used to handle user-space input and output. The
+ * byte channel used for the console is designated as the default tty.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <asm/epapr_hcalls.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/circ_buf.h>
+#include <asm/udbg.h>
+
+/* The size of the transmit circular buffer. This must be a power of two. */
+#define BUF_SIZE 2048
+
+/* Per-byte channel private data */
+struct ehv_bc_data {
+ struct device *dev;
+ struct tty_port port;
+ uint32_t handle;
+ unsigned int rx_irq;
+ unsigned int tx_irq;
+
+ spinlock_t lock; /* lock for transmit buffer */
+ unsigned char buf[BUF_SIZE]; /* transmit circular buffer */
+ unsigned int head; /* circular buffer head */
+ unsigned int tail; /* circular buffer tail */
+
+ int tx_irq_enabled; /* true == TX interrupt is enabled */
+};
+
+/* Array of byte channel objects */
+static struct ehv_bc_data *bcs;
+
+/* Byte channel handle for stdout (and stdin), taken from device tree */
+static unsigned int stdout_bc;
+
+/* Virtual IRQ for the byte channel handle for stdin, taken from device tree */
+static unsigned int stdout_irq;
+
+/**************************** SUPPORT FUNCTIONS ****************************/
+
+/*
+ * Enable the transmit interrupt
+ *
+ * Unlike a serial device, byte channels have no mechanism for disabling their
+ * own receive or transmit interrupts. To emulate that feature, we toggle
+ * the IRQ in the kernel.
+ *
+ * We cannot just blindly call enable_irq() or disable_irq(), because these
+ * calls are reference counted. This means that we cannot call enable_irq()
+ * if interrupts are already enabled. This can happen in two situations:
+ *
+ * 1. The tty layer makes two back-to-back calls to ehv_bc_tty_write()
+ * 2. A transmit interrupt occurs while executing ehv_bc_tx_dequeue()
+ *
+ * To work around this, we keep a flag to tell us if the IRQ is enabled or not.
+ */
+static void enable_tx_interrupt(struct ehv_bc_data *bc)
+{
+ if (!bc->tx_irq_enabled) {
+ enable_irq(bc->tx_irq);
+ bc->tx_irq_enabled = 1;
+ }
+}
+
+static void disable_tx_interrupt(struct ehv_bc_data *bc)
+{
+ if (bc->tx_irq_enabled) {
+ disable_irq_nosync(bc->tx_irq);
+ bc->tx_irq_enabled = 0;
+ }
+}
+
+/*
+ * find the byte channel handle to use for the console
+ *
+ * The byte channel to be used for the console is specified via a "stdout"
+ * property in the /chosen node.
+ *
+ * For compatible with legacy device trees, we also look for a "stdout" alias.
+ */
+static int find_console_handle(void)
+{
+ struct device_node *np, *np2;
+ const char *sprop = NULL;
+ const uint32_t *iprop;
+
+ np = of_find_node_by_path("/chosen");
+ if (np)
+ sprop = of_get_property(np, "stdout-path", NULL);
+
+ if (!np || !sprop) {
+ of_node_put(np);
+ np = of_find_node_by_name(NULL, "aliases");
+ if (np)
+ sprop = of_get_property(np, "stdout", NULL);
+ }
+
+ if (!sprop) {
+ of_node_put(np);
+ return 0;
+ }
+
+ /* We don't care what the aliased node is actually called. We only
+ * care if it's compatible with "epapr,hv-byte-channel", because that
+ * indicates that it's a byte channel node. We use a temporary
+ * variable, 'np2', because we can't release 'np' until we're done with
+ * 'sprop'.
+ */
+ np2 = of_find_node_by_path(sprop);
+ of_node_put(np);
+ np = np2;
+ if (!np) {
+ pr_warning("ehv-bc: stdout node '%s' does not exist\n", sprop);
+ return 0;
+ }
+
+ /* Is it a byte channel? */
+ if (!of_device_is_compatible(np, "epapr,hv-byte-channel")) {
+ of_node_put(np);
+ return 0;
+ }
+
+ stdout_irq = irq_of_parse_and_map(np, 0);
+ if (stdout_irq == NO_IRQ) {
+ pr_err("ehv-bc: no 'interrupts' property in %s node\n", sprop);
+ of_node_put(np);
+ return 0;
+ }
+
+ /*
+ * The 'hv-handle' property contains the handle for this byte channel.
+ */
+ iprop = of_get_property(np, "hv-handle", NULL);
+ if (!iprop) {
+ pr_err("ehv-bc: no 'hv-handle' property in %s node\n",
+ np->name);
+ of_node_put(np);
+ return 0;
+ }
+ stdout_bc = be32_to_cpu(*iprop);
+
+ of_node_put(np);
+ return 1;
+}
+
+/*************************** EARLY CONSOLE DRIVER ***************************/
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
+
+/*
+ * send a byte to a byte channel, wait if necessary
+ *
+ * This function sends a byte to a byte channel, and it waits and
+ * retries if the byte channel is full. It returns if the character
+ * has been sent, or if some error has occurred.
+ *
+ */
+static void byte_channel_spin_send(const char data)
+{
+ int ret, count;
+
+ do {
+ count = 1;
+ ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
+ &count, &data);
+ } while (ret == EV_EAGAIN);
+}
+
+/*
+ * The udbg subsystem calls this function to display a single character.
+ * We convert CR to a CR/LF.
+ */
+static void ehv_bc_udbg_putc(char c)
+{
+ if (c == '\n')
+ byte_channel_spin_send('\r');
+
+ byte_channel_spin_send(c);
+}
+
+/*
+ * early console initialization
+ *
+ * PowerPC kernels support an early printk console, also known as udbg.
+ * This function must be called via the ppc_md.init_early function pointer.
+ * At this point, the device tree has been unflattened, so we can obtain the
+ * byte channel handle for stdout.
+ *
+ * We only support displaying of characters (putc). We do not support
+ * keyboard input.
+ */
+void __init udbg_init_ehv_bc(void)
+{
+ unsigned int rx_count, tx_count;
+ unsigned int ret;
+
+ /* Verify the byte channel handle */
+ ret = ev_byte_channel_poll(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
+ &rx_count, &tx_count);
+ if (ret)
+ return;
+
+ udbg_putc = ehv_bc_udbg_putc;
+ register_early_udbg_console();
+
+ udbg_printf("ehv-bc: early console using byte channel handle %u\n",
+ CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE);
+}
+
+#endif
+
+/****************************** CONSOLE DRIVER ******************************/
+
+static struct tty_driver *ehv_bc_driver;
+
+/*
+ * Byte channel console sending worker function.
+ *
+ * For consoles, if the output buffer is full, we should just spin until it
+ * clears.
+ */
+static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
+ unsigned int count)
+{
+ unsigned int len;
+ int ret = 0;
+
+ while (count) {
+ len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
+ do {
+ ret = ev_byte_channel_send(handle, &len, s);
+ } while (ret == EV_EAGAIN);
+ count -= len;
+ s += len;
+ }
+
+ return ret;
+}
+
+/*
+ * write a string to the console
+ *
+ * This function gets called to write a string from the kernel, typically from
+ * a printk(). This function spins until all data is written.
+ *
+ * We copy the data to a temporary buffer because we need to insert a \r in
+ * front of every \n. It's more efficient to copy the data to the buffer than
+ * it is to make multiple hcalls for each character or each newline.
+ */
+static void ehv_bc_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ char s2[EV_BYTE_CHANNEL_MAX_BYTES];
+ unsigned int i, j = 0;
+ char c;
+
+ for (i = 0; i < count; i++) {
+ c = *s++;
+
+ if (c == '\n')
+ s2[j++] = '\r';
+
+ s2[j++] = c;
+ if (j >= (EV_BYTE_CHANNEL_MAX_BYTES - 1)) {
+ if (ehv_bc_console_byte_channel_send(stdout_bc, s2, j))
+ return;
+ j = 0;
+ }
+ }
+
+ if (j)
+ ehv_bc_console_byte_channel_send(stdout_bc, s2, j);
+}
+
+/*
+ * When /dev/console is opened, the kernel iterates the console list looking
+ * for one with ->device and then calls that method. On success, it expects
+ * the passed-in int* to contain the minor number to use.
+ */
+static struct tty_driver *ehv_bc_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+
+ return ehv_bc_driver;
+}
+
+static struct console ehv_bc_console = {
+ .name = "ttyEHV",
+ .write = ehv_bc_console_write,
+ .device = ehv_bc_console_device,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+};
+
+/*
+ * Console initialization
+ *
+ * This is the first function that is called after the device tree is
+ * available, so here is where we determine the byte channel handle and IRQ for
+ * stdout/stdin, even though that information is used by the tty and character
+ * drivers.
+ */
+static int __init ehv_bc_console_init(void)
+{
+ if (!find_console_handle()) {
+ pr_debug("ehv-bc: stdout is not a byte channel\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
+ /* Print a friendly warning if the user chose the wrong byte channel
+ * handle for udbg.
+ */
+ if (stdout_bc != CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE)
+ pr_warning("ehv-bc: udbg handle %u is not the stdout handle\n",
+ CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE);
+#endif
+
+ /* add_preferred_console() must be called before register_console(),
+ otherwise it won't work. However, we don't want to enumerate all the
+ byte channels here, either, since we only care about one. */
+
+ add_preferred_console(ehv_bc_console.name, ehv_bc_console.index, NULL);
+ register_console(&ehv_bc_console);
+
+ pr_info("ehv-bc: registered console driver for byte channel %u\n",
+ stdout_bc);
+
+ return 0;
+}
+console_initcall(ehv_bc_console_init);
+
+/******************************** TTY DRIVER ********************************/
+
+/*
+ * byte channel receive interupt handler
+ *
+ * This ISR is called whenever data is available on a byte channel.
+ */
+static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
+{
+ struct ehv_bc_data *bc = data;
+ struct tty_struct *ttys = tty_port_tty_get(&bc->port);
+ unsigned int rx_count, tx_count, len;
+ int count;
+ char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+ int ret;
+
+ /* ttys could be NULL during a hangup */
+ if (!ttys)
+ return IRQ_HANDLED;
+
+ /* Find out how much data needs to be read, and then ask the TTY layer
+ * if it can handle that much. We want to ensure that every byte we
+ * read from the byte channel will be accepted by the TTY layer.
+ */
+ ev_byte_channel_poll(bc->handle, &rx_count, &tx_count);
+ count = tty_buffer_request_room(ttys, rx_count);
+
+ /* 'count' is the maximum amount of data the TTY layer can accept at
+ * this time. However, during testing, I was never able to get 'count'
+ * to be less than 'rx_count'. I'm not sure whether I'm calling it
+ * correctly.
+ */
+
+ while (count > 0) {
+ len = min_t(unsigned int, count, sizeof(buffer));
+
+ /* Read some data from the byte channel. This function will
+ * never return more than EV_BYTE_CHANNEL_MAX_BYTES bytes.
+ */
+ ev_byte_channel_receive(bc->handle, &len, buffer);
+
+ /* 'len' is now the amount of data that's been received. 'len'
+ * can't be zero, and most likely it's equal to one.
+ */
+
+ /* Pass the received data to the tty layer. */
+ ret = tty_insert_flip_string(ttys, buffer, len);
+
+ /* 'ret' is the number of bytes that the TTY layer accepted.
+ * If it's not equal to 'len', then it means the buffer is
+ * full, which should never happen. If it does happen, we can
+ * exit gracefully, but we drop the last 'len - ret' characters
+ * that we read from the byte channel.
+ */
+ if (ret != len)
+ break;
+
+ count -= len;
+ }
+
+ /* Tell the tty layer that we're done. */
+ tty_flip_buffer_push(ttys);
+
+ tty_kref_put(ttys);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * dequeue the transmit buffer to the hypervisor
+ *
+ * This function, which can be called in interrupt context, dequeues as much
+ * data as possible from the transmit buffer to the byte channel.
+ */
+static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
+{
+ unsigned int count;
+ unsigned int len, ret;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&bc->lock, flags);
+ len = min_t(unsigned int,
+ CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
+ EV_BYTE_CHANNEL_MAX_BYTES);
+
+ ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
+
+ /* 'len' is valid only if the return code is 0 or EV_EAGAIN */
+ if (!ret || (ret == EV_EAGAIN))
+ bc->tail = (bc->tail + len) & (BUF_SIZE - 1);
+
+ count = CIRC_CNT(bc->head, bc->tail, BUF_SIZE);
+ spin_unlock_irqrestore(&bc->lock, flags);
+ } while (count && !ret);
+
+ spin_lock_irqsave(&bc->lock, flags);
+ if (CIRC_CNT(bc->head, bc->tail, BUF_SIZE))
+ /*
+ * If we haven't emptied the buffer, then enable the TX IRQ.
+ * We'll get an interrupt when there's more room in the
+ * hypervisor's output buffer.
+ */
+ enable_tx_interrupt(bc);
+ else
+ disable_tx_interrupt(bc);
+ spin_unlock_irqrestore(&bc->lock, flags);
+}
+
+/*
+ * byte channel transmit interupt handler
+ *
+ * This ISR is called whenever space becomes available for transmitting
+ * characters on a byte channel.
+ */
+static irqreturn_t ehv_bc_tty_tx_isr(int irq, void *data)
+{
+ struct ehv_bc_data *bc = data;
+ struct tty_struct *ttys = tty_port_tty_get(&bc->port);
+
+ ehv_bc_tx_dequeue(bc);
+ if (ttys) {
+ tty_wakeup(ttys);
+ tty_kref_put(ttys);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function is called when the tty layer has data for us send. We store
+ * the data first in a circular buffer, and then dequeue as much of that data
+ * as possible.
+ *
+ * We don't need to worry about whether there is enough room in the buffer for
+ * all the data. The purpose of ehv_bc_tty_write_room() is to tell the tty
+ * layer how much data it can safely send to us. We guarantee that
+ * ehv_bc_tty_write_room() will never lie, so the tty layer will never send us
+ * too much data.
+ */
+static int ehv_bc_tty_write(struct tty_struct *ttys, const unsigned char *s,
+ int count)
+{
+ struct ehv_bc_data *bc = ttys->driver_data;
+ unsigned long flags;
+ unsigned int len;
+ unsigned int written = 0;
+
+ while (1) {
+ spin_lock_irqsave(&bc->lock, flags);
+ len = CIRC_SPACE_TO_END(bc->head, bc->tail, BUF_SIZE);
+ if (count < len)
+ len = count;
+ if (len) {
+ memcpy(bc->buf + bc->head, s, len);
+ bc->head = (bc->head + len) & (BUF_SIZE - 1);
+ }
+ spin_unlock_irqrestore(&bc->lock, flags);
+ if (!len)
+ break;
+
+ s += len;
+ count -= len;
+ written += len;
+ }
+
+ ehv_bc_tx_dequeue(bc);
+
+ return written;
+}
+
+/*
+ * This function can be called multiple times for a given tty_struct, which is
+ * why we initialize bc->ttys in ehv_bc_tty_port_activate() instead.
+ *
+ * The tty layer will still call this function even if the device was not
+ * registered (i.e. tty_register_device() was not called). This happens
+ * because tty_register_device() is optional and some legacy drivers don't
+ * use it. So we need to check for that.
+ */
+static int ehv_bc_tty_open(struct tty_struct *ttys, struct file *filp)
+{
+ struct ehv_bc_data *bc = &bcs[ttys->index];
+
+ if (!bc->dev)
+ return -ENODEV;
+
+ return tty_port_open(&bc->port, ttys, filp);
+}
+
+/*
+ * Amazingly, if ehv_bc_tty_open() returns an error code, the tty layer will
+ * still call this function to close the tty device. So we can't assume that
+ * the tty port has been initialized.
+ */
+static void ehv_bc_tty_close(struct tty_struct *ttys, struct file *filp)
+{
+ struct ehv_bc_data *bc = &bcs[ttys->index];
+
+ if (bc->dev)
+ tty_port_close(&bc->port, ttys, filp);
+}
+
+/*
+ * Return the amount of space in the output buffer
+ *
+ * This is actually a contract between the driver and the tty layer outlining
+ * how much write room the driver can guarantee will be sent OR BUFFERED. This
+ * driver MUST honor the return value.
+ */
+static int ehv_bc_tty_write_room(struct tty_struct *ttys)
+{
+ struct ehv_bc_data *bc = ttys->driver_data;
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&bc->lock, flags);
+ count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE);
+ spin_unlock_irqrestore(&bc->lock, flags);
+
+ return count;
+}
+
+/*
+ * Stop sending data to the tty layer
+ *
+ * This function is called when the tty layer's input buffers are getting full,
+ * so the driver should stop sending it data. The easiest way to do this is to
+ * disable the RX IRQ, which will prevent ehv_bc_tty_rx_isr() from being
+ * called.
+ *
+ * The hypervisor will continue to queue up any incoming data. If there is any
+ * data in the queue when the RX interrupt is enabled, we'll immediately get an
+ * RX interrupt.
+ */
+static void ehv_bc_tty_throttle(struct tty_struct *ttys)
+{
+ struct ehv_bc_data *bc = ttys->driver_data;
+
+ disable_irq(bc->rx_irq);
+}
+
+/*
+ * Resume sending data to the tty layer
+ *
+ * This function is called after previously calling ehv_bc_tty_throttle(). The
+ * tty layer's input buffers now have more room, so the driver can resume
+ * sending it data.
+ */
+static void ehv_bc_tty_unthrottle(struct tty_struct *ttys)
+{
+ struct ehv_bc_data *bc = ttys->driver_data;
+
+ /* If there is any data in the queue when the RX interrupt is enabled,
+ * we'll immediately get an RX interrupt.
+ */
+ enable_irq(bc->rx_irq);
+}
+
+static void ehv_bc_tty_hangup(struct tty_struct *ttys)
+{
+ struct ehv_bc_data *bc = ttys->driver_data;
+
+ ehv_bc_tx_dequeue(bc);
+ tty_port_hangup(&bc->port);
+}
+
+/*
+ * TTY driver operations
+ *
+ * If we could ask the hypervisor how much data is still in the TX buffer, or
+ * at least how big the TX buffers are, then we could implement the
+ * .wait_until_sent and .chars_in_buffer functions.
+ */
+static const struct tty_operations ehv_bc_ops = {
+ .open = ehv_bc_tty_open,
+ .close = ehv_bc_tty_close,
+ .write = ehv_bc_tty_write,
+ .write_room = ehv_bc_tty_write_room,
+ .throttle = ehv_bc_tty_throttle,
+ .unthrottle = ehv_bc_tty_unthrottle,
+ .hangup = ehv_bc_tty_hangup,
+};
+
+/*
+ * initialize the TTY port
+ *
+ * This function will only be called once, no matter how many times
+ * ehv_bc_tty_open() is called. That's why we register the ISR here, and also
+ * why we initialize tty_struct-related variables here.
+ */
+static int ehv_bc_tty_port_activate(struct tty_port *port,
+ struct tty_struct *ttys)
+{
+ struct ehv_bc_data *bc = container_of(port, struct ehv_bc_data, port);
+ int ret;
+
+ ttys->driver_data = bc;
+
+ ret = request_irq(bc->rx_irq, ehv_bc_tty_rx_isr, 0, "ehv-bc", bc);
+ if (ret < 0) {
+ dev_err(bc->dev, "could not request rx irq %u (ret=%i)\n",
+ bc->rx_irq, ret);
+ return ret;
+ }
+
+ /* request_irq also enables the IRQ */
+ bc->tx_irq_enabled = 1;
+
+ ret = request_irq(bc->tx_irq, ehv_bc_tty_tx_isr, 0, "ehv-bc", bc);
+ if (ret < 0) {
+ dev_err(bc->dev, "could not request tx irq %u (ret=%i)\n",
+ bc->tx_irq, ret);
+ free_irq(bc->rx_irq, bc);
+ return ret;
+ }
+
+ /* The TX IRQ is enabled only when we can't write all the data to the
+ * byte channel at once, so by default it's disabled.
+ */
+ disable_tx_interrupt(bc);
+
+ return 0;
+}
+
+static void ehv_bc_tty_port_shutdown(struct tty_port *port)
+{
+ struct ehv_bc_data *bc = container_of(port, struct ehv_bc_data, port);
+
+ free_irq(bc->tx_irq, bc);
+ free_irq(bc->rx_irq, bc);
+}
+
+static const struct tty_port_operations ehv_bc_tty_port_ops = {
+ .activate = ehv_bc_tty_port_activate,
+ .shutdown = ehv_bc_tty_port_shutdown,
+};
+
+static int __devinit ehv_bc_tty_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ehv_bc_data *bc;
+ const uint32_t *iprop;
+ unsigned int handle;
+ int ret;
+ static unsigned int index = 1;
+ unsigned int i;
+
+ iprop = of_get_property(np, "hv-handle", NULL);
+ if (!iprop) {
+ dev_err(&pdev->dev, "no 'hv-handle' property in %s node\n",
+ np->name);
+ return -ENODEV;
+ }
+
+ /* We already told the console layer that the index for the console
+ * device is zero, so we need to make sure that we use that index when
+ * we probe the console byte channel node.
+ */
+ handle = be32_to_cpu(*iprop);
+ i = (handle == stdout_bc) ? 0 : index++;
+ bc = &bcs[i];
+
+ bc->handle = handle;
+ bc->head = 0;
+ bc->tail = 0;
+ spin_lock_init(&bc->lock);
+
+ bc->rx_irq = irq_of_parse_and_map(np, 0);
+ bc->tx_irq = irq_of_parse_and_map(np, 1);
+ if ((bc->rx_irq == NO_IRQ) || (bc->tx_irq == NO_IRQ)) {
+ dev_err(&pdev->dev, "no 'interrupts' property in %s node\n",
+ np->name);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ bc->dev = tty_register_device(ehv_bc_driver, i, &pdev->dev);
+ if (IS_ERR(bc->dev)) {
+ ret = PTR_ERR(bc->dev);
+ dev_err(&pdev->dev, "could not register tty (ret=%i)\n", ret);
+ goto error;
+ }
+
+ tty_port_init(&bc->port);
+ bc->port.ops = &ehv_bc_tty_port_ops;
+
+ dev_set_drvdata(&pdev->dev, bc);
+
+ dev_info(&pdev->dev, "registered /dev/%s%u for byte channel %u\n",
+ ehv_bc_driver->name, i, bc->handle);
+
+ return 0;
+
+error:
+ irq_dispose_mapping(bc->tx_irq);
+ irq_dispose_mapping(bc->rx_irq);
+
+ memset(bc, 0, sizeof(struct ehv_bc_data));
+ return ret;
+}
+
+static int ehv_bc_tty_remove(struct platform_device *pdev)
+{
+ struct ehv_bc_data *bc = dev_get_drvdata(&pdev->dev);
+
+ tty_unregister_device(ehv_bc_driver, bc - bcs);
+
+ irq_dispose_mapping(bc->tx_irq);
+ irq_dispose_mapping(bc->rx_irq);
+
+ return 0;
+}
+
+static const struct of_device_id ehv_bc_tty_of_ids[] = {
+ { .compatible = "epapr,hv-byte-channel" },
+ {}
+};
+
+static struct platform_driver ehv_bc_tty_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ehv-bc",
+ .of_match_table = ehv_bc_tty_of_ids,
+ },
+ .probe = ehv_bc_tty_probe,
+ .remove = ehv_bc_tty_remove,
+};
+
+/**
+ * ehv_bc_init - ePAPR hypervisor byte channel driver initialization
+ *
+ * This function is called when this module is loaded.
+ */
+static int __init ehv_bc_init(void)
+{
+ struct device_node *np;
+ unsigned int count = 0; /* Number of elements in bcs[] */
+ int ret;
+
+ pr_info("ePAPR hypervisor byte channel driver\n");
+
+ /* Count the number of byte channels */
+ for_each_compatible_node(np, NULL, "epapr,hv-byte-channel")
+ count++;
+
+ if (!count)
+ return -ENODEV;
+
+ /* The array index of an element in bcs[] is the same as the tty index
+ * for that element. If you know the address of an element in the
+ * array, then you can use pointer math (e.g. "bc - bcs") to get its
+ * tty index.
+ */
+ bcs = kzalloc(count * sizeof(struct ehv_bc_data), GFP_KERNEL);
+ if (!bcs)
+ return -ENOMEM;
+
+ ehv_bc_driver = alloc_tty_driver(count);
+ if (!ehv_bc_driver) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ehv_bc_driver->owner = THIS_MODULE;
+ ehv_bc_driver->driver_name = "ehv-bc";
+ ehv_bc_driver->name = ehv_bc_console.name;
+ ehv_bc_driver->type = TTY_DRIVER_TYPE_CONSOLE;
+ ehv_bc_driver->subtype = SYSTEM_TYPE_CONSOLE;
+ ehv_bc_driver->init_termios = tty_std_termios;
+ ehv_bc_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(ehv_bc_driver, &ehv_bc_ops);
+
+ ret = tty_register_driver(ehv_bc_driver);
+ if (ret) {
+ pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret);
+ goto error;
+ }
+
+ ret = platform_driver_register(&ehv_bc_tty_driver);
+ if (ret) {
+ pr_err("ehv-bc: could not register platform driver (ret=%i)\n",
+ ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (ehv_bc_driver) {
+ tty_unregister_driver(ehv_bc_driver);
+ put_tty_driver(ehv_bc_driver);
+ }
+
+ kfree(bcs);
+
+ return ret;
+}
+
+
+/**
+ * ehv_bc_exit - ePAPR hypervisor byte channel driver termination
+ *
+ * This function is called when this driver is unloaded.
+ */
+static void __exit ehv_bc_exit(void)
+{
+ tty_unregister_driver(ehv_bc_driver);
+ put_tty_driver(ehv_bc_driver);
+ kfree(bcs);
+}
+
+module_init(ehv_bc_init);
+module_exit(ehv_bc_exit);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("ePAPR hypervisor byte channel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index e1aaf4f309b..7430bc3c8d5 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
* there is no buffered data otherwise sleeps on a wait queue
* waking periodically to check chars_in_buffer().
*/
- tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
+ tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
} else {
if (hp->count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
@@ -852,7 +852,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
* find index to use:
* see if this vterm id matches one registered for console.
*/
- for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
+ for (i = 0; i < MAX_NR_HVC_CONSOLES; i++)
if (vtermnos[i] == hp->vtermno &&
cons_ops[i] == hp->ops)
break;
@@ -862,9 +862,13 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
i = ++last_hvc;
hp->index = i;
+ hvc_console.index = i;
+ vtermnos[i] = vtermno;
+ cons_ops[i] = ops;
list_add_tail(&(hp->next), &hvc_structs);
spin_unlock(&hvc_structs_lock);
+ register_console(&hvc_console);
return hp;
}
@@ -875,6 +879,7 @@ int hvc_remove(struct hvc_struct *hp)
unsigned long flags;
struct tty_struct *tty;
+ unregister_console(&hvc_console);
spin_lock_irqsave(&hp->lock, flags);
tty = tty_kref_get(hp->tty);
diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index 2623e177e8d..c9adb0559f6 100644
--- a/drivers/tty/hvc/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
@@ -28,7 +28,7 @@ int notifier_add_irq(struct hvc_struct *hp, int irq)
hp->irq_requested = 0;
return 0;
}
- rc = request_irq(irq, hvc_handle_interrupt, IRQF_DISABLED,
+ rc = request_irq(irq, hvc_handle_interrupt, 0,
"hvc_console", hp);
if (!rc)
hp->irq_requested = 1;
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 4c8b6654693..55882b5930a 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1057,7 +1057,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
* the conn was registered and now.
*/
if (!(rc = request_irq(irq, &hvcs_handle_interrupt,
- IRQF_DISABLED, "ibmhvcs", hvcsd))) {
+ 0, "ibmhvcs", hvcsd))) {
/*
* It is possible the vty-server was removed after the irq was
* requested but before we have time to enable interrupts.
@@ -1237,7 +1237,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
irq = hvcsd->vdev->irq;
spin_unlock_irqrestore(&hvcsd->lock, flags);
- tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
+ tty_wait_until_sent_from_close(tty, HVCS_CLOSE_WAIT);
/*
* This line is important because it tells hvcs_open that this
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index c94e2f5853d..cdfa3e02d62 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -1105,7 +1105,7 @@ static int __init hvsi_init(void)
struct hvsi_struct *hp = &hvsi_ports[i];
int ret = 1;
- ret = request_irq(hp->virq, hvsi_interrupt, IRQF_DISABLED, "hvsi", hp);
+ ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp);
if (ret)
printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n",
hp->virq, ret);
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index db1cf9c328d..e5c295ab5de 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1598,7 +1598,7 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
}
retval = request_irq(board->irq, isicom_interrupt,
- IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board);
+ IRQF_SHARED, ISICOM_NAME, board);
if (retval < 0) {
dev_err(&pdev->dev, "Could not install handler at Irq %d. "
"Card%d will be disabled.\n", board->irq, index + 1);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 7fc8c02fea6..8998d527232 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -2005,16 +2005,9 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
*/
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...",
- timeout, char_time);
- printk("jiff=%lu...", jiffies);
-#endif
+
spin_lock_irqsave(&info->slock, flags);
while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
-#endif
spin_unlock_irqrestore(&info->slock, flags);
schedule_timeout_interruptible(char_time);
spin_lock_irqsave(&info->slock, flags);
@@ -2025,10 +2018,6 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
}
spin_unlock_irqrestore(&info->slock, flags);
set_current_state(TASK_RUNNING);
-
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
-#endif
}
/*
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 8a50e4eebf1..4cb0d0a3e57 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -21,7 +21,6 @@
* Mostly done: ioctls for setting modes/timing
* Partly done: hooks so you can pull off frames to non tty devs
* Restart DLCI 0 when it closes ?
- * Test basic encoding
* Improve the tx engine
* Resolve tx side locking by adding a queue_head and routing
* all control traffic via it
@@ -810,38 +809,41 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
- int len, size;
+ int len, total_size, size;
int h = dlci->adaption - 1;
- len = kfifo_len(dlci->fifo);
- if (len == 0)
- return 0;
-
- /* MTU/MRU count only the data bits */
- if (len > gsm->mtu)
- len = gsm->mtu;
-
- size = len + h;
-
- msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
- if (msg == NULL)
- return -ENOMEM;
- dp = msg->data;
- switch (dlci->adaption) {
- case 1: /* Unstructured */
- break;
- case 2: /* Unstructed with modem bits. Always one byte as we never
- send inline break data */
- *dp += gsm_encode_modem(dlci);
- len--;
- break;
+ total_size = 0;
+ while(1) {
+ len = kfifo_len(dlci->fifo);
+ if (len == 0)
+ return total_size;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu)
+ len = gsm->mtu;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructed with modem bits. Always one byte as we never
+ send inline break data */
+ *dp++ = gsm_encode_modem(dlci);
+ break;
+ }
+ WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+ __gsm_data_queue(dlci, msg);
+ total_size += size;
}
- WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
- __gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
- return size;
+ return total_size;
}
/**
@@ -2004,6 +2006,7 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
struct gsm_msg *txq;
+ struct gsm_control *gc;
gsm->dead = 1;
@@ -2017,6 +2020,13 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
spin_unlock(&gsm_mux_lock);
WARN_ON(i == MAX_MUX);
+ /* In theory disconnecting DLCI 0 is sufficient but for some
+ modems this is apparently not the case. */
+ if (dlci) {
+ gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
+ if (gc)
+ gsm_control_wait(gsm, gc);
+ }
del_timer_sync(&gsm->t2_timer);
/* Now we are sure T2 has stopped */
if (dlci) {
@@ -2982,7 +2992,7 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
struct gsm_dlci *dlci = tty->driver_data;
unsigned int modem_tx = dlci->modem_tx;
- modem_tx &= clear;
+ modem_tx &= ~clear;
modem_tx |= set;
if (modem_tx != dlci->modem_tx) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e809e9d4683..e18604b3fc7 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -670,12 +670,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return retval;
+
/* find a device that is not in use. */
tty_lock();
index = devpts_new_index(inode);
tty_unlock();
- if (index < 0)
- return index;
+ if (index < 0) {
+ retval = index;
+ goto err_file;
+ }
mutex_lock(&tty_mutex);
tty_lock();
@@ -689,27 +695,27 @@ static int ptmx_open(struct inode *inode, struct file *filp)
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- retval = tty_add_file(tty, filp);
- if (retval)
- goto out;
+ tty_add_file(tty, filp);
retval = devpts_pty_new(inode, tty->link);
if (retval)
- goto out1;
+ goto err_release;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
- goto out2;
-out1:
+ goto err_release;
+
tty_unlock();
- return retval;
-out2:
+ return 0;
+err_release:
tty_unlock();
tty_release(inode, filp);
return retval;
out:
devpts_kill_index(inode, index);
tty_unlock();
+err_file:
+ tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index e0a77540b8c..a88ef9782a4 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -235,22 +235,6 @@ static void batten_down_hatches(void)
static void status_handle(struct m68k_serial *info, unsigned short status)
{
-#if 0
- if(status & DCD) {
- if((info->port.tty->termios->c_cflag & CRTSCTS) &&
- ((info->curregs[3] & AUTO_ENAB)==0)) {
- info->curregs[3] |= AUTO_ENAB;
- info->pendregs[3] |= AUTO_ENAB;
- write_zsreg(info->m68k_channel, 3, info->curregs[3]);
- }
- } else {
- if((info->curregs[3] & AUTO_ENAB)) {
- info->curregs[3] &= ~AUTO_ENAB;
- info->pendregs[3] &= ~AUTO_ENAB;
- write_zsreg(info->m68k_channel, 3, info->curregs[3]);
- }
- }
-#endif
/* If this is console input and this is a
* 'break asserted' status change interrupt
* see if we can drop into the debugger
@@ -340,9 +324,6 @@ static void transmit_chars(struct m68k_serial *info)
info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
info->xmit_cnt--;
- if (info->xmit_cnt < WAKEUP_CHARS)
- schedule_work(&info->tqueue);
-
if(info->xmit_cnt <= 0) {
/* All done for now... TX ints off */
uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
@@ -378,21 +359,6 @@ irqreturn_t rs_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void do_softint(struct work_struct *work)
-{
- struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (!tty)
- return;
-#if 0
- if (clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) {
- tty_wakeup(tty);
- }
-#endif
-}
-
static int startup(struct m68k_serial * info)
{
m68328_uart *uart = &uart_addr[info->line];
@@ -1324,7 +1290,6 @@ rs68328_init(void)
info->event = 0;
info->count = 0;
info->blocked_open = 0;
- INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
info->line = i;
@@ -1341,7 +1306,7 @@ rs68328_init(void)
if (request_irq(uart_irqs[i],
rs_interrupt,
- IRQF_DISABLED,
+ 0,
"M68328_UART", info))
panic("Unable to attach 68328 serial interrupt\n");
}
diff --git a/drivers/tty/serial/68328serial.h b/drivers/tty/serial/68328serial.h
index 8c9c3c0745d..3d2faabd766 100644
--- a/drivers/tty/serial/68328serial.h
+++ b/drivers/tty/serial/68328serial.h
@@ -158,7 +158,6 @@ struct m68k_serial {
int xmit_head;
int xmit_tail;
int xmit_cnt;
- struct work_struct tqueue;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
};
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index 7f50999eebc..a87a56cb541 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -309,6 +309,13 @@ static const struct serial8250_config uart_config[] = {
UART_FCR_T_TRIG_01,
.flags = UART_CAP_FIFO | UART_CAP_RTOIE,
},
+ [PORT_XR17D15X] = {
+ .name = "XR17D15X",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR,
+ },
};
#if defined(CONFIG_MIPS_ALCHEMY)
@@ -461,42 +468,6 @@ static void tsi_serial_out(struct uart_port *p, int offset, int value)
writeb(value, p->membase + offset);
}
-/* Save the LCR value so it can be re-written when a Busy Detect IRQ occurs. */
-static inline void dwapb_save_out_value(struct uart_port *p, int offset,
- int value)
-{
- struct uart_8250_port *up =
- container_of(p, struct uart_8250_port, port);
-
- if (offset == UART_LCR)
- up->lcr = value;
-}
-
-/* Read the IER to ensure any interrupt is cleared before returning from ISR. */
-static inline void dwapb_check_clear_ier(struct uart_port *p, int offset)
-{
- if (offset == UART_TX || offset == UART_IER)
- p->serial_in(p, UART_IER);
-}
-
-static void dwapb_serial_out(struct uart_port *p, int offset, int value)
-{
- int save_offset = offset;
- offset = map_8250_out_reg(p, offset) << p->regshift;
- dwapb_save_out_value(p, save_offset, value);
- writeb(value, p->membase + offset);
- dwapb_check_clear_ier(p, save_offset);
-}
-
-static void dwapb32_serial_out(struct uart_port *p, int offset, int value)
-{
- int save_offset = offset;
- offset = map_8250_out_reg(p, offset) << p->regshift;
- dwapb_save_out_value(p, save_offset, value);
- writel(value, p->membase + offset);
- dwapb_check_clear_ier(p, save_offset);
-}
-
static unsigned int io_serial_in(struct uart_port *p, int offset)
{
offset = map_8250_in_reg(p, offset) << p->regshift;
@@ -509,6 +480,8 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
outb(value, p->iobase + offset);
}
+static int serial8250_default_handle_irq(struct uart_port *port);
+
static void set_io_from_upio(struct uart_port *p)
{
struct uart_8250_port *up =
@@ -540,16 +513,6 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = tsi_serial_out;
break;
- case UPIO_DWAPB:
- p->serial_in = mem_serial_in;
- p->serial_out = dwapb_serial_out;
- break;
-
- case UPIO_DWAPB32:
- p->serial_in = mem32_serial_in;
- p->serial_out = dwapb32_serial_out;
- break;
-
default:
p->serial_in = io_serial_in;
p->serial_out = io_serial_out;
@@ -557,6 +520,7 @@ static void set_io_from_upio(struct uart_port *p)
}
/* Remember loaded iotype */
up->cur_iotype = p->iotype;
+ p->handle_irq = serial8250_default_handle_irq;
}
static void
@@ -567,8 +531,6 @@ serial_out_sync(struct uart_8250_port *up, int offset, int value)
case UPIO_MEM:
case UPIO_MEM32:
case UPIO_AU:
- case UPIO_DWAPB:
- case UPIO_DWAPB32:
p->serial_out(p, offset, value);
p->serial_in(p, UART_LCR); /* safe, no side-effects */
break;
@@ -1120,6 +1082,14 @@ static void autoconfig_16550a(struct uart_8250_port *up)
serial_outp(up, UART_IER, iersave);
/*
+ * Exar uarts have EFR in a weird location
+ */
+ if (up->port.flags & UPF_EXAR_EFR) {
+ up->port.type = PORT_XR17D15X;
+ up->capabilities |= UART_CAP_AFE | UART_CAP_EFR;
+ }
+
+ /*
* We distinguish between 16550A and U6 16550A by counting
* how many bytes are in the FIFO.
*/
@@ -1621,6 +1591,29 @@ static void serial8250_handle_port(struct uart_8250_port *up)
spin_unlock_irqrestore(&up->port.lock, flags);
}
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ if (!(iir & UART_IIR_NO_INT)) {
+ serial8250_handle_port(up);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_handle_irq);
+
+static int serial8250_default_handle_irq(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned int iir = serial_in(up, UART_IIR);
+
+ return serial8250_handle_irq(port, iir);
+}
+
/*
* This is the serial driver's interrupt routine.
*
@@ -1648,30 +1641,13 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
l = i->head;
do {
struct uart_8250_port *up;
- unsigned int iir;
+ struct uart_port *port;
up = list_entry(l, struct uart_8250_port, list);
+ port = &up->port;
- iir = serial_in(up, UART_IIR);
- if (!(iir & UART_IIR_NO_INT)) {
- serial8250_handle_port(up);
-
+ if (port->handle_irq(port)) {
handled = 1;
-
- end = NULL;
- } else if ((up->port.iotype == UPIO_DWAPB ||
- up->port.iotype == UPIO_DWAPB32) &&
- (iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
- /* The DesignWare APB UART has an Busy Detect (0x07)
- * interrupt meaning an LCR write attempt occurred while the
- * UART was busy. The interrupt must be cleared by reading
- * the UART status register (USR) and the LCR re-written. */
- unsigned int status;
- status = *(volatile u32 *)up->port.private_data;
- serial_out(up, UART_LCR, up->lcr);
-
- handled = 1;
-
end = NULL;
} else if (end == NULL)
end = l;
@@ -2081,8 +2057,8 @@ static int serial8250_startup(struct uart_port *port)
*/
if (!(up->port.flags & UPF_BUGGY_UART) &&
(serial_inp(up, UART_LSR) == 0xff)) {
- printk(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
- serial_index(&up->port));
+ printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+ serial_index(&up->port));
return -ENODEV;
}
@@ -2458,7 +2434,10 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
efr |= UART_EFR_CTS;
serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_outp(up, UART_EFR, efr);
+ if (up->port.flags & UPF_EXAR_EFR)
+ serial_outp(up, UART_XR_EFR, efr);
+ else
+ serial_outp(up, UART_EFR, efr);
}
#ifdef CONFIG_ARCH_OMAP
@@ -2570,8 +2549,6 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_TSI:
case UPIO_MEM32:
case UPIO_MEM:
- case UPIO_DWAPB:
- case UPIO_DWAPB32:
if (!up->port.mapbase)
break;
@@ -2608,8 +2585,6 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
case UPIO_TSI:
case UPIO_MEM32:
case UPIO_MEM:
- case UPIO_DWAPB:
- case UPIO_DWAPB32:
if (!up->port.mapbase)
break;
@@ -3050,6 +3025,10 @@ int __init early_serial_setup(struct uart_port *port)
p->serial_in = port->serial_in;
if (port->serial_out)
p->serial_out = port->serial_out;
+ if (port->handle_irq)
+ p->handle_irq = port->handle_irq;
+ else
+ p->handle_irq = serial8250_default_handle_irq;
return 0;
}
@@ -3118,6 +3097,7 @@ static int __devinit serial8250_probe(struct platform_device *dev)
port.type = p->type;
port.serial_in = p->serial_in;
port.serial_out = p->serial_out;
+ port.handle_irq = p->handle_irq;
port.set_termios = p->set_termios;
port.pm = p->pm;
port.dev = &dev->dev;
@@ -3283,6 +3263,8 @@ int serial8250_register_port(struct uart_port *port)
uart->port.serial_in = port->serial_in;
if (port->serial_out)
uart->port.serial_out = port->serial_out;
+ if (port->handle_irq)
+ uart->port.handle_irq = port->handle_irq;
/* Possibly override set_termios call */
if (port->set_termios)
uart->port.set_termios = port->set_termios;
diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250_dw.c
new file mode 100644
index 00000000000..bf1fba640c2
--- /dev/null
+++ b/drivers/tty/serial/8250_dw.c
@@ -0,0 +1,194 @@
+/*
+ * Synopsys DesignWare 8250 driver.
+ *
+ * Copyright 2011 Picochip, Jamie Iles.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The Synopsys DesignWare 8250 has an extra feature whereby it detects if the
+ * LCR is written whilst busy. If it is, then a busy detect interrupt is
+ * raised, the LCR needs to be rewritten and the uart status register read.
+ */
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct dw8250_data {
+ int last_lcr;
+ int line;
+};
+
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct dw8250_data *d = p->private_data;
+
+ if (offset == UART_LCR)
+ d->last_lcr = value;
+
+ offset <<= p->regshift;
+ writeb(value, p->membase + offset);
+}
+
+static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
+{
+ offset <<= p->regshift;
+
+ return readb(p->membase + offset);
+}
+
+static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
+{
+ struct dw8250_data *d = p->private_data;
+
+ if (offset == UART_LCR)
+ d->last_lcr = value;
+
+ offset <<= p->regshift;
+ writel(value, p->membase + offset);
+}
+
+static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
+{
+ offset <<= p->regshift;
+
+ return readl(p->membase + offset);
+}
+
+/* Offset for the DesignWare's UART Status Register. */
+#define UART_USR 0x1f
+
+static int dw8250_handle_irq(struct uart_port *p)
+{
+ struct dw8250_data *d = p->private_data;
+ unsigned int iir = p->serial_in(p, UART_IIR);
+
+ if (serial8250_handle_irq(p, iir)) {
+ return 1;
+ } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ /* Clear the USR and write the LCR again. */
+ (void)p->serial_in(p, UART_USR);
+ p->serial_out(p, d->last_lcr, UART_LCR);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int __devinit dw8250_probe(struct platform_device *pdev)
+{
+ struct uart_port port = {};
+ struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+ struct dw8250_data *data;
+
+ if (!regs || !irq) {
+ dev_err(&pdev->dev, "no registers/irq defined\n");
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ port.private_data = data;
+
+ spin_lock_init(&port.lock);
+ port.mapbase = regs->start;
+ port.irq = irq->start;
+ port.handle_irq = dw8250_handle_irq;
+ port.type = PORT_8250;
+ port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP |
+ UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ port.dev = &pdev->dev;
+
+ port.iotype = UPIO_MEM;
+ port.serial_in = dw8250_serial_in;
+ port.serial_out = dw8250_serial_out;
+ if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ switch (val) {
+ case 1:
+ break;
+ case 4:
+ port.iotype = UPIO_MEM32;
+ port.serial_in = dw8250_serial_in32;
+ port.serial_out = dw8250_serial_out32;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported reg-io-width (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(np, "reg-shift", &val))
+ port.regshift = val;
+
+ if (of_property_read_u32(np, "clock-frequency", &val)) {
+ dev_err(&pdev->dev, "no clock-frequency property set\n");
+ return -EINVAL;
+ }
+ port.uartclk = val;
+
+ data->line = serial8250_register_port(&port);
+ if (data->line < 0)
+ return data->line;
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int __devexit dw8250_remove(struct platform_device *pdev)
+{
+ struct dw8250_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+
+ return 0;
+}
+
+static const struct of_device_id dw8250_match[] = {
+ { .compatible = "snps,dw-apb-uart" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw8250_match);
+
+static struct platform_driver dw8250_platform_driver = {
+ .driver = {
+ .name = "dw-apb-uart",
+ .owner = THIS_MODULE,
+ .of_match_table = dw8250_match,
+ },
+ .probe = dw8250_probe,
+ .remove = __devexit_p(dw8250_remove),
+};
+
+static int __init dw8250_init(void)
+{
+ return platform_driver_register(&dw8250_platform_driver);
+}
+module_init(dw8250_init);
+
+static void __exit dw8250_exit(void)
+{
+ platform_driver_unregister(&dw8250_platform_driver);
+}
+module_exit(dw8250_exit);
+
+MODULE_AUTHOR("Jamie Iles");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver");
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 3abeca2a2a1..825937a5f21 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1101,6 +1101,15 @@ static int pci_eg20t_init(struct pci_dev *dev)
#endif
}
+static int
+pci_xr17c154_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ port->flags |= UPF_EXAR_EFR;
+ return pci_default_setup(priv, board, port, idx);
+}
+
/* This should be in linux/pci_ids.h */
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
@@ -1506,6 +1515,30 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_timedia_setup,
},
/*
+ * Exar cards
+ */
+ {
+ .vendor = PCI_VENDOR_ID_EXAR,
+ .device = PCI_DEVICE_ID_EXAR_XR17C152,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_xr17c154_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_EXAR,
+ .device = PCI_DEVICE_ID_EXAR_XR17C154,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_xr17c154_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_EXAR,
+ .device = PCI_DEVICE_ID_EXAR_XR17C158,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_xr17c154_setup,
+ },
+ /*
* Xircom cards
*/
{
@@ -1558,46 +1591,55 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8811,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8812,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8813,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8814,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = 0x10DB,
.device = 0x8027,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = 0x10DB,
.device = 0x8028,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = 0x10DB,
.device = 0x8029,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = 0x10DB,
.device = 0x800C,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
{
.vendor = 0x10DB,
.device = 0x800D,
.init = pci_eg20t_init,
+ .setup = pci_default_setup,
},
/*
* Cronyx Omega PCI (PLX-chip based)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 4dcb37bbdf9..5f479dada6f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -267,6 +267,13 @@ config SERIAL_8250_RM9K
port hardware found on MIPS RM9122 and similar processors.
If unsure, say N.
+config SERIAL_8250_DW
+ tristate "Support for Synopsys DesignWare 8250 quirks"
+ depends on SERIAL_8250 && OF
+ help
+ Selecting this option will enable handling of the extra features
+ present in the Synopsys DesignWare APB UART.
+
comment "Non-8250 serial port support"
config SERIAL_AMBA_PL010
@@ -522,8 +529,8 @@ config SERIAL_S3C6400
config SERIAL_S5PV210
tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_EXYNOS4210)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210)
+ depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
+ select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
default y
help
Serial port support for Samsung's S5P Family of SoC's
@@ -722,7 +729,7 @@ config SERIAL_BFIN
Add support for the built-in UARTs on the Blackfin.
To compile this driver as a module, choose M here: the
- module will be called bfin_5xx.
+ module is named bfin_uart.ko.
config SERIAL_BFIN_CONSOLE
bool "Console on Blackfin serial port"
@@ -1035,23 +1042,6 @@ config SERIAL_MCF_CONSOLE
help
Enable a ColdFire internal serial port to be the system console.
-config SERIAL_68360_SMC
- bool "68360 SMC uart support"
- depends on M68360
- help
- This driver supports the SMC serial ports of the Motorola 68360 CPU.
-
-config SERIAL_68360_SCC
- bool "68360 SCC uart support"
- depends on M68360
- help
- This driver supports the SCC serial ports of the Motorola 68360 CPU.
-
-config SERIAL_68360
- bool
- depends on SERIAL_68360_SMC || SERIAL_68360_SCC
- default y
-
config SERIAL_PMACZILOG
tristate "Mac or PowerMac z85c30 ESCC support"
depends on (M68K && MAC) || (PPC_OF && PPC_PMAC)
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 83b4da6a106..e10cf5b54b6 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
+obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
@@ -35,7 +36,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o
obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
-obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
+obj-$(CONFIG_SERIAL_BFIN) += bfin_uart.o
obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
@@ -49,7 +50,6 @@ obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
obj-$(CONFIG_SERIAL_MUX) += mux.o
obj-$(CONFIG_SERIAL_68328) += 68328serial.o
-obj-$(CONFIG_SERIAL_68360) += 68360serial.o
obj-$(CONFIG_SERIAL_MCF) += mcf.o
obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 60e049b041a..00a73ecb2df 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -218,7 +218,7 @@ static int altera_jtaguart_startup(struct uart_port *port)
unsigned long flags;
int ret;
- ret = request_irq(port->irq, altera_jtaguart_interrupt, IRQF_DISABLED,
+ ret = request_irq(port->irq, altera_jtaguart_interrupt, 0,
DRV_NAME, port);
if (ret) {
pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d "
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 50bc5a5ac65..d902558ccfd 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -315,7 +315,7 @@ static int altera_uart_startup(struct uart_port *port)
return 0;
}
- ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED,
+ ret = request_irq(port->irq, altera_uart_interrupt, 0,
DRV_NAME, port);
if (ret) {
pr_err(DRV_NAME ": unable to attach Altera UART %d "
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 19a943693e4..77554fd68d1 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index b922f5d2e61..9988c0c305c 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -33,6 +33,8 @@
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
@@ -157,11 +159,22 @@ struct atmel_uart_port {
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
+static unsigned long atmel_ports_in_use;
#ifdef SUPPORT_SYSRQ
static struct console atmel_console;
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_serial_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-usart" },
+ { .compatible = "atmel,at91sam9260-usart" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_serial_dt_ids);
+#endif
+
static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port *uart)
{
@@ -339,7 +352,8 @@ static void atmel_stop_tx(struct uart_port *port)
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
- if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
+ !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_start_rx(port);
}
@@ -356,7 +370,8 @@ static void atmel_start_tx(struct uart_port *port)
really need this.*/
return;
- if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
+ !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_stop_rx(port);
/* re-enable PDC transmit */
@@ -680,7 +695,8 @@ static void atmel_tx_dma(struct uart_port *port)
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
} else {
- if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
+ !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
@@ -1407,6 +1423,48 @@ static struct uart_ops atmel_pops = {
#endif
};
+static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
+ struct device_node *np)
+{
+ u32 rs485_delay[2];
+
+ /* DMA/PDC usage specification */
+ if (of_get_property(np, "atmel,use-dma-rx", NULL))
+ atmel_port->use_dma_rx = 1;
+ else
+ atmel_port->use_dma_rx = 0;
+ if (of_get_property(np, "atmel,use-dma-tx", NULL))
+ atmel_port->use_dma_tx = 1;
+ else
+ atmel_port->use_dma_tx = 0;
+
+ /* rs485 properties */
+ if (of_property_read_u32_array(np, "rs485-rts-delay",
+ rs485_delay, 2) == 0) {
+ struct serial_rs485 *rs485conf = &atmel_port->rs485;
+
+ rs485conf->delay_rts_before_send = rs485_delay[0];
+ rs485conf->delay_rts_after_send = rs485_delay[1];
+ rs485conf->flags = 0;
+
+ if (rs485conf->delay_rts_before_send == 0 &&
+ rs485conf->delay_rts_after_send == 0) {
+ rs485conf->flags |= SER_RS485_RTS_ON_SEND;
+ } else {
+ if (rs485conf->delay_rts_before_send)
+ rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
+ if (rs485conf->delay_rts_after_send)
+ rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
+ }
+
+ if (of_get_property(np, "rs485-rx-during-tx", NULL))
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+ if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
+ rs485conf->flags |= SER_RS485_ENABLED;
+ }
+}
+
/*
* Configure the port from the platform device resource info.
*/
@@ -1414,13 +1472,20 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct uart_port *port = &atmel_port->uart;
- struct atmel_uart_data *data = pdev->dev.platform_data;
+ struct atmel_uart_data *pdata = pdev->dev.platform_data;
+
+ if (pdev->dev.of_node) {
+ atmel_of_init_port(atmel_port, pdev->dev.of_node);
+ } else {
+ atmel_port->use_dma_rx = pdata->use_dma_rx;
+ atmel_port->use_dma_tx = pdata->use_dma_tx;
+ atmel_port->rs485 = pdata->rs485;
+ }
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
- port->line = data->num;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
@@ -1430,10 +1495,10 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
- if (data->regs)
+ if (pdata && pdata->regs) {
/* Already mapped by setup code */
- port->membase = data->regs;
- else {
+ port->membase = pdata->regs;
+ } else {
port->flags |= UPF_IOREMAP;
port->membase = NULL;
}
@@ -1447,9 +1512,6 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
/* only enable clock when USART is in use */
}
- atmel_port->use_dma_rx = data->use_dma_rx;
- atmel_port->use_dma_tx = data->use_dma_tx;
- atmel_port->rs485 = data->rs485;
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
@@ -1611,10 +1673,14 @@ static int __init atmel_console_init(void)
if (atmel_default_console_device) {
struct atmel_uart_data *pdata =
atmel_default_console_device->dev.platform_data;
+ int id = pdata->num;
+ struct atmel_uart_port *port = &atmel_ports[id];
+
+ port->backup_imr = 0;
+ port->uart.line = id;
- add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL);
- atmel_init_port(&atmel_ports[pdata->num],
- atmel_default_console_device);
+ add_preferred_console(ATMEL_DEVICENAME, id, NULL);
+ atmel_init_port(port, atmel_default_console_device);
register_console(&atmel_console);
}
@@ -1711,14 +1777,39 @@ static int atmel_serial_resume(struct platform_device *pdev)
static int __devinit atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
+ struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
- int ret;
+ int ret = -ENODEV;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
- port = &atmel_ports[pdata->num];
+ if (np)
+ ret = of_alias_get_id(np, "serial");
+ else
+ if (pdata)
+ ret = pdata->num;
+
+ if (ret < 0)
+ /* port id not found in platform data nor device-tree aliases:
+ * auto-enumerate it */
+ ret = find_first_zero_bit(&atmel_ports_in_use,
+ sizeof(atmel_ports_in_use));
+
+ if (ret > ATMEL_MAX_UART) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (test_and_set_bit(ret, &atmel_ports_in_use)) {
+ /* port already in use */
+ ret = -EBUSY;
+ goto err;
+ }
+
+ port = &atmel_ports[ret];
port->backup_imr = 0;
+ port->uart.line = ret;
atmel_init_port(port, pdev);
@@ -1764,7 +1855,7 @@ err_alloc_ring:
clk_put(port->clk);
port->clk = NULL;
}
-
+err:
return ret;
}
@@ -1784,6 +1875,8 @@ static int __devexit atmel_serial_remove(struct platform_device *pdev)
/* "port" is allocated statically, so we shouldn't free it */
+ clear_bit(port->line, &atmel_ports_in_use);
+
clk_put(atmel_port->clk);
return ret;
@@ -1797,6 +1890,7 @@ static struct platform_driver atmel_serial_driver = {
.driver = {
.name = "atmel_usart",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_serial_dt_ids),
},
};
diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
deleted file mode 100644
index ff6979181ac..00000000000
--- a/drivers/tty/serial/bfin_5xx.c
+++ /dev/null
@@ -1,1596 +0,0 @@
-/*
- * Blackfin On-Chip Serial Driver
- *
- * Copyright 2006-2010 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-
-#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#define DRIVER_NAME "bfin-uart"
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/platform_device.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/kgdb.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/portmux.h>
-#include <asm/cacheflush.h>
-#include <asm/dma.h>
-
-#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
-#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
-#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
-#include <asm/bfin_serial.h>
-
-#ifdef CONFIG_SERIAL_BFIN_MODULE
-# undef CONFIG_EARLY_PRINTK
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_MODULE
-# undef CONFIG_EARLY_PRINTK
-#endif
-
-/* UART name and device definitions */
-#define BFIN_SERIAL_DEV_NAME "ttyBF"
-#define BFIN_SERIAL_MAJOR 204
-#define BFIN_SERIAL_MINOR 64
-
-static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
-
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
-
-# ifndef CONFIG_SERIAL_BFIN_PIO
-# error KGDB only support UART in PIO mode.
-# endif
-
-static int kgdboc_port_line;
-static int kgdboc_break_enabled;
-#endif
-/*
- * Setup for console. Argument comes from the menuconfig
- */
-#define DMA_RX_XCOUNT 512
-#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
-
-#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
-static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
-#else
-static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
-#endif
-
-static void bfin_serial_reset_irda(struct uart_port *port);
-
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
-static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- if (uart->cts_pin < 0)
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
-
- /* CTS PIN is negative assertive. */
- if (UART_GET_CTS(uart))
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
- else
- return TIOCM_DSR | TIOCM_CAR;
-}
-
-static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- if (uart->rts_pin < 0)
- return;
-
- /* RTS PIN is negative assertive. */
- if (mctrl & TIOCM_RTS)
- UART_ENABLE_RTS(uart);
- else
- UART_DISABLE_RTS(uart);
-}
-
-/*
- * Handle any change of modem status signal.
- */
-static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
-{
- struct bfin_serial_port *uart = dev_id;
- unsigned int status;
-
- status = bfin_serial_get_mctrl(&uart->port);
- uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- uart->scts = 1;
- UART_CLEAR_SCTS(uart);
- UART_CLEAR_IER(uart, EDSSI);
-#endif
-
- return IRQ_HANDLED;
-}
-#else
-static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
-{
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
-}
-
-static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-}
-#endif
-
-/*
- * interrupts are disabled on entry
- */
-static void bfin_serial_stop_tx(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- struct circ_buf *xmit = &uart->port.state->xmit;
-#endif
-
- while (!(UART_GET_LSR(uart) & TEMT))
- cpu_relax();
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
- disable_dma(uart->tx_dma_channel);
- xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
- uart->port.icount.tx += uart->tx_count;
- uart->tx_count = 0;
- uart->tx_done = 1;
-#else
-#ifdef CONFIG_BF54x
- /* Clear TFI bit */
- UART_PUT_LSR(uart, TFI);
-#endif
- UART_CLEAR_IER(uart, ETBEI);
-#endif
-}
-
-/*
- * port is locked and interrupts are disabled
- */
-static void bfin_serial_start_tx(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- struct tty_struct *tty = uart->port.state->port.tty;
-
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
- /*
- * To avoid losting RX interrupt, we reset IR function
- * before sending data.
- */
- if (tty->termios->c_line == N_IRDA)
- bfin_serial_reset_irda(port);
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
- if (uart->tx_done)
- bfin_serial_dma_tx_chars(uart);
-#else
- UART_SET_IER(uart, ETBEI);
- bfin_serial_tx_chars(uart);
-#endif
-}
-
-/*
- * Interrupts are enabled
- */
-static void bfin_serial_stop_rx(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
- UART_CLEAR_IER(uart, ERBFI);
-}
-
-/*
- * Set the modem control timer to fire immediately.
- */
-static void bfin_serial_enable_ms(struct uart_port *port)
-{
-}
-
-
-#if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO)
-# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold)
-# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
-#else
-# define UART_GET_ANOMALY_THRESHOLD(uart) 0
-# define UART_SET_ANOMALY_THRESHOLD(uart, v)
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_PIO
-static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
-{
- struct tty_struct *tty = NULL;
- unsigned int status, ch, flg;
- static struct timeval anomaly_start = { .tv_sec = 0 };
-
- status = UART_GET_LSR(uart);
- UART_CLEAR_LSR(uart);
-
- ch = UART_GET_CHAR(uart);
- uart->port.icount.rx++;
-
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- if (kgdb_connected && kgdboc_port_line == uart->port.line
- && kgdboc_break_enabled)
- if (ch == 0x3) {/* Ctrl + C */
- kgdb_breakpoint();
- return;
- }
-
- if (!uart->port.state || !uart->port.state->port.tty)
- return;
-#endif
- tty = uart->port.state->port.tty;
-
- if (ANOMALY_05000363) {
- /* The BF533 (and BF561) family of processors have a nice anomaly
- * where they continuously generate characters for a "single" break.
- * We have to basically ignore this flood until the "next" valid
- * character comes across. Due to the nature of the flood, it is
- * not possible to reliably catch bytes that are sent too quickly
- * after this break. So application code talking to the Blackfin
- * which sends a break signal must allow at least 1.5 character
- * times after the end of the break for things to stabilize. This
- * timeout was picked as it must absolutely be larger than 1
- * character time +/- some percent. So 1.5 sounds good. All other
- * Blackfin families operate properly. Woo.
- */
- if (anomaly_start.tv_sec) {
- struct timeval curr;
- suseconds_t usecs;
-
- if ((~ch & (~ch + 1)) & 0xff)
- goto known_good_char;
-
- do_gettimeofday(&curr);
- if (curr.tv_sec - anomaly_start.tv_sec > 1)
- goto known_good_char;
-
- usecs = 0;
- if (curr.tv_sec != anomaly_start.tv_sec)
- usecs += USEC_PER_SEC;
- usecs += curr.tv_usec - anomaly_start.tv_usec;
-
- if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
- goto known_good_char;
-
- if (ch)
- anomaly_start.tv_sec = 0;
- else
- anomaly_start = curr;
-
- return;
-
- known_good_char:
- status &= ~BI;
- anomaly_start.tv_sec = 0;
- }
- }
-
- if (status & BI) {
- if (ANOMALY_05000363)
- if (bfin_revid() < 5)
- do_gettimeofday(&anomaly_start);
- uart->port.icount.brk++;
- if (uart_handle_break(&uart->port))
- goto ignore_char;
- status &= ~(PE | FE);
- }
- if (status & PE)
- uart->port.icount.parity++;
- if (status & OE)
- uart->port.icount.overrun++;
- if (status & FE)
- uart->port.icount.frame++;
-
- status &= uart->port.read_status_mask;
-
- if (status & BI)
- flg = TTY_BREAK;
- else if (status & PE)
- flg = TTY_PARITY;
- else if (status & FE)
- flg = TTY_FRAME;
- else
- flg = TTY_NORMAL;
-
- if (uart_handle_sysrq_char(&uart->port, ch))
- goto ignore_char;
-
- uart_insert_char(&uart->port, status, OE, ch, flg);
-
- ignore_char:
- tty_flip_buffer_push(tty);
-}
-
-static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
-{
- struct circ_buf *xmit = &uart->port.state->xmit;
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
-#ifdef CONFIG_BF54x
- /* Clear TFI bit */
- UART_PUT_LSR(uart, TFI);
-#endif
- /* Anomaly notes:
- * 05000215 - we always clear ETBEI within last UART TX
- * interrupt to end a string. It is always set
- * when start a new tx.
- */
- UART_CLEAR_IER(uart, ETBEI);
- return;
- }
-
- if (uart->port.x_char) {
- UART_PUT_CHAR(uart, uart->port.x_char);
- uart->port.icount.tx++;
- uart->port.x_char = 0;
- }
-
- while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
- UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- uart->port.icount.tx++;
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&uart->port);
-}
-
-static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
-{
- struct bfin_serial_port *uart = dev_id;
-
- while (UART_GET_LSR(uart) & DR)
- bfin_serial_rx_chars(uart);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
-{
- struct bfin_serial_port *uart = dev_id;
-
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
- spin_lock(&uart->port.lock);
- if (UART_GET_LSR(uart) & THRE)
- bfin_serial_tx_chars(uart);
- spin_unlock(&uart->port.lock);
-
- return IRQ_HANDLED;
-}
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
-static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
-{
- struct circ_buf *xmit = &uart->port.state->xmit;
-
- uart->tx_done = 0;
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
- uart->tx_count = 0;
- uart->tx_done = 1;
- return;
- }
-
- if (uart->port.x_char) {
- UART_PUT_CHAR(uart, uart->port.x_char);
- uart->port.icount.tx++;
- uart->port.x_char = 0;
- }
-
- uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
- if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
- uart->tx_count = UART_XMIT_SIZE - xmit->tail;
- blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
- (unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
- set_dma_config(uart->tx_dma_channel,
- set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
- INTR_ON_BUF,
- DIMENSION_LINEAR,
- DATA_SIZE_8,
- DMA_SYNC_RESTART));
- set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
- set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
- set_dma_x_modify(uart->tx_dma_channel, 1);
- SSYNC();
- enable_dma(uart->tx_dma_channel);
-
- UART_SET_IER(uart, ETBEI);
-}
-
-static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
-{
- struct tty_struct *tty = uart->port.state->port.tty;
- int i, flg, status;
-
- status = UART_GET_LSR(uart);
- UART_CLEAR_LSR(uart);
-
- uart->port.icount.rx +=
- CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
- UART_XMIT_SIZE);
-
- if (status & BI) {
- uart->port.icount.brk++;
- if (uart_handle_break(&uart->port))
- goto dma_ignore_char;
- status &= ~(PE | FE);
- }
- if (status & PE)
- uart->port.icount.parity++;
- if (status & OE)
- uart->port.icount.overrun++;
- if (status & FE)
- uart->port.icount.frame++;
-
- status &= uart->port.read_status_mask;
-
- if (status & BI)
- flg = TTY_BREAK;
- else if (status & PE)
- flg = TTY_PARITY;
- else if (status & FE)
- flg = TTY_FRAME;
- else
- flg = TTY_NORMAL;
-
- for (i = uart->rx_dma_buf.tail; ; i++) {
- if (i >= UART_XMIT_SIZE)
- i = 0;
- if (i == uart->rx_dma_buf.head)
- break;
- if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
- uart_insert_char(&uart->port, status, OE,
- uart->rx_dma_buf.buf[i], flg);
- }
-
- dma_ignore_char:
- tty_flip_buffer_push(tty);
-}
-
-void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
-{
- int x_pos, pos;
-
- dma_disable_irq_nosync(uart->rx_dma_channel);
- spin_lock_bh(&uart->rx_lock);
-
- /* 2D DMA RX buffer ring is used. Because curr_y_count and
- * curr_x_count can't be read as an atomic operation,
- * curr_y_count should be read before curr_x_count. When
- * curr_x_count is read, curr_y_count may already indicate
- * next buffer line. But, the position calculated here is
- * still indicate the old line. The wrong position data may
- * be smaller than current buffer tail, which cause garbages
- * are received if it is not prohibit.
- */
- uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
- x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
- uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
- if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
- uart->rx_dma_nrows = 0;
- x_pos = DMA_RX_XCOUNT - x_pos;
- if (x_pos == DMA_RX_XCOUNT)
- x_pos = 0;
-
- pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
- /* Ignore receiving data if new position is in the same line of
- * current buffer tail and small.
- */
- if (pos > uart->rx_dma_buf.tail ||
- uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
- uart->rx_dma_buf.head = pos;
- bfin_serial_dma_rx_chars(uart);
- uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
- }
-
- spin_unlock_bh(&uart->rx_lock);
- dma_enable_irq(uart->rx_dma_channel);
-
- mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
-}
-
-static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
-{
- struct bfin_serial_port *uart = dev_id;
- struct circ_buf *xmit = &uart->port.state->xmit;
-
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
- spin_lock(&uart->port.lock);
- if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
- disable_dma(uart->tx_dma_channel);
- clear_dma_irqstat(uart->tx_dma_channel);
- /* Anomaly notes:
- * 05000215 - we always clear ETBEI within last UART TX
- * interrupt to end a string. It is always set
- * when start a new tx.
- */
- UART_CLEAR_IER(uart, ETBEI);
- xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
- uart->port.icount.tx += uart->tx_count;
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&uart->port);
-
- bfin_serial_dma_tx_chars(uart);
- }
-
- spin_unlock(&uart->port.lock);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
-{
- struct bfin_serial_port *uart = dev_id;
- unsigned short irqstat;
- int x_pos, pos;
-
- spin_lock(&uart->rx_lock);
- irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
- clear_dma_irqstat(uart->rx_dma_channel);
-
- uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
- x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
- uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
- if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
- uart->rx_dma_nrows = 0;
-
- pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
- if (pos > uart->rx_dma_buf.tail ||
- uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
- uart->rx_dma_buf.head = pos;
- bfin_serial_dma_rx_chars(uart);
- uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
- }
-
- spin_unlock(&uart->rx_lock);
-
- return IRQ_HANDLED;
-}
-#endif
-
-/*
- * Return TIOCSER_TEMT when transmitter is not busy.
- */
-static unsigned int bfin_serial_tx_empty(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- unsigned short lsr;
-
- lsr = UART_GET_LSR(uart);
- if (lsr & TEMT)
- return TIOCSER_TEMT;
- else
- return 0;
-}
-
-static void bfin_serial_break_ctl(struct uart_port *port, int break_state)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- u16 lcr = UART_GET_LCR(uart);
- if (break_state)
- lcr |= SB;
- else
- lcr &= ~SB;
- UART_PUT_LCR(uart, lcr);
- SSYNC();
-}
-
-static int bfin_serial_startup(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
- dma_addr_t dma_handle;
-
- if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
- printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
- return -EBUSY;
- }
-
- if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
- printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
- free_dma(uart->rx_dma_channel);
- return -EBUSY;
- }
-
- set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
- set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);
-
- uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
- uart->rx_dma_buf.head = 0;
- uart->rx_dma_buf.tail = 0;
- uart->rx_dma_nrows = 0;
-
- set_dma_config(uart->rx_dma_channel,
- set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
- INTR_ON_ROW, DIMENSION_2D,
- DATA_SIZE_8,
- DMA_SYNC_RESTART));
- set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
- set_dma_x_modify(uart->rx_dma_channel, 1);
- set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
- set_dma_y_modify(uart->rx_dma_channel, 1);
- set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
- enable_dma(uart->rx_dma_channel);
-
- uart->rx_dma_timer.data = (unsigned long)(uart);
- uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
- uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
- add_timer(&(uart->rx_dma_timer));
-#else
-# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
- kgdboc_break_enabled = 0;
- else {
-# endif
- if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED,
- "BFIN_UART_RX", uart)) {
- printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
- return -EBUSY;
- }
-
- if (request_irq
- (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED,
- "BFIN_UART_TX", uart)) {
- printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
- free_irq(uart->port.irq, uart);
- return -EBUSY;
- }
-
-# ifdef CONFIG_BF54x
- {
- /*
- * UART2 and UART3 on BF548 share interrupt PINs and DMA
- * controllers with SPORT2 and SPORT3. UART rx and tx
- * interrupts are generated in PIO mode only when configure
- * their peripheral mapping registers properly, which means
- * request corresponding DMA channels in PIO mode as well.
- */
- unsigned uart_dma_ch_rx, uart_dma_ch_tx;
-
- switch (uart->port.irq) {
- case IRQ_UART3_RX:
- uart_dma_ch_rx = CH_UART3_RX;
- uart_dma_ch_tx = CH_UART3_TX;
- break;
- case IRQ_UART2_RX:
- uart_dma_ch_rx = CH_UART2_RX;
- uart_dma_ch_tx = CH_UART2_TX;
- break;
- default:
- uart_dma_ch_rx = uart_dma_ch_tx = 0;
- break;
- };
-
- if (uart_dma_ch_rx &&
- request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
- printk(KERN_NOTICE"Fail to attach UART interrupt\n");
- free_irq(uart->port.irq, uart);
- free_irq(uart->port.irq + 1, uart);
- return -EBUSY;
- }
- if (uart_dma_ch_tx &&
- request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
- printk(KERN_NOTICE "Fail to attach UART interrupt\n");
- free_dma(uart_dma_ch_rx);
- free_irq(uart->port.irq, uart);
- free_irq(uart->port.irq + 1, uart);
- return -EBUSY;
- }
- }
-# endif
-# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- }
-# endif
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (uart->cts_pin >= 0) {
- if (request_irq(gpio_to_irq(uart->cts_pin),
- bfin_serial_mctrl_cts_int,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
- uart->cts_pin = -1;
- pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
- }
- }
- if (uart->rts_pin >= 0) {
- gpio_direction_output(uart->rts_pin, 0);
- }
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
- bfin_serial_mctrl_cts_int,
- IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
- uart->cts_pin = -1;
- pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
- }
-
- /* CTS RTS PINs are negative assertive. */
- UART_PUT_MCR(uart, ACTS);
- UART_SET_IER(uart, EDSSI);
-#endif
-
- UART_SET_IER(uart, ERBFI);
- return 0;
-}
-
-static void bfin_serial_shutdown(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
- disable_dma(uart->tx_dma_channel);
- free_dma(uart->tx_dma_channel);
- disable_dma(uart->rx_dma_channel);
- free_dma(uart->rx_dma_channel);
- del_timer(&(uart->rx_dma_timer));
- dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
-#else
-#ifdef CONFIG_BF54x
- switch (uart->port.irq) {
- case IRQ_UART3_RX:
- free_dma(CH_UART3_RX);
- free_dma(CH_UART3_TX);
- break;
- case IRQ_UART2_RX:
- free_dma(CH_UART2_RX);
- free_dma(CH_UART2_TX);
- break;
- default:
- break;
- };
-#endif
- free_irq(uart->port.irq, uart);
- free_irq(uart->port.irq+1, uart);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (uart->cts_pin >= 0)
- free_irq(gpio_to_irq(uart->cts_pin), uart);
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0)
- free_irq(uart->status_irq, uart);
-#endif
-}
-
-static void
-bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- unsigned long flags;
- unsigned int baud, quot;
- unsigned short val, ier, lcr = 0;
-
- switch (termios->c_cflag & CSIZE) {
- case CS8:
- lcr = WLS(8);
- break;
- case CS7:
- lcr = WLS(7);
- break;
- case CS6:
- lcr = WLS(6);
- break;
- case CS5:
- lcr = WLS(5);
- break;
- default:
- printk(KERN_ERR "%s: word lengh not supported\n",
- __func__);
- }
-
- /* Anomaly notes:
- * 05000231 - STOP bit is always set to 1 whatever the user is set.
- */
- if (termios->c_cflag & CSTOPB) {
- if (ANOMALY_05000231)
- printk(KERN_WARNING "STOP bits other than 1 is not "
- "supported in case of anomaly 05000231.\n");
- else
- lcr |= STB;
- }
- if (termios->c_cflag & PARENB)
- lcr |= PEN;
- if (!(termios->c_cflag & PARODD))
- lcr |= EPS;
- if (termios->c_cflag & CMSPAR)
- lcr |= STP;
-
- spin_lock_irqsave(&uart->port.lock, flags);
-
- port->read_status_mask = OE;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= (FE | PE);
- if (termios->c_iflag & (BRKINT | PARMRK))
- port->read_status_mask |= BI;
-
- /*
- * Characters to ignore
- */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= FE | PE;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= BI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= OE;
- }
-
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- /* If discipline is not IRDA, apply ANOMALY_05000230 */
- if (termios->c_line != N_IRDA)
- quot -= ANOMALY_05000230;
-
- UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
-
- /* Disable UART */
- ier = UART_GET_IER(uart);
- UART_DISABLE_INTS(uart);
-
- /* Set DLAB in LCR to Access DLL and DLH */
- UART_SET_DLAB(uart);
-
- UART_PUT_DLL(uart, quot & 0xFF);
- UART_PUT_DLH(uart, (quot >> 8) & 0xFF);
- SSYNC();
-
- /* Clear DLAB in LCR to Access THR RBR IER */
- UART_CLEAR_DLAB(uart);
-
- UART_PUT_LCR(uart, lcr);
-
- /* Enable UART */
- UART_ENABLE_INTS(uart, ier);
-
- val = UART_GET_GCTL(uart);
- val |= UCEN;
- UART_PUT_GCTL(uart, val);
-
- /* Port speed changed, update the per-port timeout. */
- uart_update_timeout(port, termios->c_cflag, baud);
-
- spin_unlock_irqrestore(&uart->port.lock, flags);
-}
-
-static const char *bfin_serial_type(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
- return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL;
-}
-
-/*
- * Release the memory region(s) being used by 'port'.
- */
-static void bfin_serial_release_port(struct uart_port *port)
-{
-}
-
-/*
- * Request the memory region(s) being used by 'port'.
- */
-static int bfin_serial_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-/*
- * Configure/autoconfigure the port.
- */
-static void bfin_serial_config_port(struct uart_port *port, int flags)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
- if (flags & UART_CONFIG_TYPE &&
- bfin_serial_request_port(&uart->port) == 0)
- uart->port.type = PORT_BFIN;
-}
-
-/*
- * Verify the new serial_struct (for TIOCSSERIAL).
- * The only change we allow are to the flags and type, and
- * even then only between PORT_BFIN and PORT_UNKNOWN
- */
-static int
-bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- return 0;
-}
-
-/*
- * Enable the IrDA function if tty->ldisc.num is N_IRDA.
- * In other cases, disable IrDA function.
- */
-static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- unsigned short val;
-
- switch (ld) {
- case N_IRDA:
- val = UART_GET_GCTL(uart);
- val |= (IREN | RPOLC);
- UART_PUT_GCTL(uart, val);
- break;
- default:
- val = UART_GET_GCTL(uart);
- val &= ~(IREN | RPOLC);
- UART_PUT_GCTL(uart, val);
- }
-}
-
-static void bfin_serial_reset_irda(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- unsigned short val;
-
- val = UART_GET_GCTL(uart);
- val &= ~(IREN | RPOLC);
- UART_PUT_GCTL(uart, val);
- SSYNC();
- val |= (IREN | RPOLC);
- UART_PUT_GCTL(uart, val);
- SSYNC();
-}
-
-#ifdef CONFIG_CONSOLE_POLL
-/* Anomaly notes:
- * 05000099 - Because we only use THRE in poll_put and DR in poll_get,
- * losing other bits of UART_LSR is not a problem here.
- */
-static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
- while (!(UART_GET_LSR(uart) & THRE))
- cpu_relax();
-
- UART_CLEAR_DLAB(uart);
- UART_PUT_CHAR(uart, (unsigned char)chr);
-}
-
-static int bfin_serial_poll_get_char(struct uart_port *port)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- unsigned char chr;
-
- while (!(UART_GET_LSR(uart) & DR))
- cpu_relax();
-
- UART_CLEAR_DLAB(uart);
- chr = UART_GET_CHAR(uart);
-
- return chr;
-}
-#endif
-
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
-static void bfin_kgdboc_port_shutdown(struct uart_port *port)
-{
- if (kgdboc_break_enabled) {
- kgdboc_break_enabled = 0;
- bfin_serial_shutdown(port);
- }
-}
-
-static int bfin_kgdboc_port_startup(struct uart_port *port)
-{
- kgdboc_port_line = port->line;
- kgdboc_break_enabled = !bfin_serial_startup(port);
- return 0;
-}
-#endif
-
-static struct uart_ops bfin_serial_pops = {
- .tx_empty = bfin_serial_tx_empty,
- .set_mctrl = bfin_serial_set_mctrl,
- .get_mctrl = bfin_serial_get_mctrl,
- .stop_tx = bfin_serial_stop_tx,
- .start_tx = bfin_serial_start_tx,
- .stop_rx = bfin_serial_stop_rx,
- .enable_ms = bfin_serial_enable_ms,
- .break_ctl = bfin_serial_break_ctl,
- .startup = bfin_serial_startup,
- .shutdown = bfin_serial_shutdown,
- .set_termios = bfin_serial_set_termios,
- .set_ldisc = bfin_serial_set_ldisc,
- .type = bfin_serial_type,
- .release_port = bfin_serial_release_port,
- .request_port = bfin_serial_request_port,
- .config_port = bfin_serial_config_port,
- .verify_port = bfin_serial_verify_port,
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- .kgdboc_port_startup = bfin_kgdboc_port_startup,
- .kgdboc_port_shutdown = bfin_kgdboc_port_shutdown,
-#endif
-#ifdef CONFIG_CONSOLE_POLL
- .poll_put_char = bfin_serial_poll_put_char,
- .poll_get_char = bfin_serial_poll_get_char,
-#endif
-};
-
-#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
-/*
- * If the port was already initialised (eg, by a boot loader),
- * try to determine the current setup.
- */
-static void __init
-bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
- int *parity, int *bits)
-{
- unsigned short status;
-
- status = UART_GET_IER(uart) & (ERBFI | ETBEI);
- if (status == (ERBFI | ETBEI)) {
- /* ok, the port was enabled */
- u16 lcr, dlh, dll;
-
- lcr = UART_GET_LCR(uart);
-
- *parity = 'n';
- if (lcr & PEN) {
- if (lcr & EPS)
- *parity = 'e';
- else
- *parity = 'o';
- }
- switch (lcr & 0x03) {
- case 0: *bits = 5; break;
- case 1: *bits = 6; break;
- case 2: *bits = 7; break;
- case 3: *bits = 8; break;
- }
- /* Set DLAB in LCR to Access DLL and DLH */
- UART_SET_DLAB(uart);
-
- dll = UART_GET_DLL(uart);
- dlh = UART_GET_DLH(uart);
-
- /* Clear DLAB in LCR to Access THR RBR IER */
- UART_CLEAR_DLAB(uart);
-
- *baud = get_sclk() / (16*(dll | dlh << 8));
- }
- pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
-}
-
-static struct uart_driver bfin_serial_reg;
-
-static void bfin_serial_console_putchar(struct uart_port *port, int ch)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- while (!(UART_GET_LSR(uart) & THRE))
- barrier();
- UART_PUT_CHAR(uart, ch);
-}
-
-#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
- defined (CONFIG_EARLY_PRINTK) */
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
-#define CLASS_BFIN_CONSOLE "bfin-console"
-/*
- * Interrupts are disabled on entering
- */
-static void
-bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct bfin_serial_port *uart = bfin_serial_ports[co->index];
- unsigned long flags;
-
- spin_lock_irqsave(&uart->port.lock, flags);
- uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
- spin_unlock_irqrestore(&uart->port.lock, flags);
-
-}
-
-static int __init
-bfin_serial_console_setup(struct console *co, char *options)
-{
- struct bfin_serial_port *uart;
- int baud = 57600;
- int bits = 8;
- int parity = 'n';
-# if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
- int flow = 'r';
-# else
- int flow = 'n';
-# endif
-
- /*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
- return -ENODEV;
-
- uart = bfin_serial_ports[co->index];
- if (!uart)
- return -ENODEV;
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- bfin_serial_console_get_options(uart, &baud, &parity, &bits);
-
- return uart_set_options(&uart->port, co, baud, parity, bits, flow);
-}
-
-static struct console bfin_serial_console = {
- .name = BFIN_SERIAL_DEV_NAME,
- .write = bfin_serial_console_write,
- .device = uart_console_device,
- .setup = bfin_serial_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &bfin_serial_reg,
-};
-#define BFIN_SERIAL_CONSOLE &bfin_serial_console
-#else
-#define BFIN_SERIAL_CONSOLE NULL
-#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
-
-#ifdef CONFIG_EARLY_PRINTK
-static struct bfin_serial_port bfin_earlyprintk_port;
-#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
-
-/*
- * Interrupts are disabled on entering
- */
-static void
-bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
-{
- unsigned long flags;
-
- if (bfin_earlyprintk_port.port.line != co->index)
- return;
-
- spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
- uart_console_write(&bfin_earlyprintk_port.port, s, count,
- bfin_serial_console_putchar);
- spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
-}
-
-/*
- * This should have a .setup or .early_setup in it, but then things get called
- * without the command line options, and the baud rate gets messed up - so
- * don't let the common infrastructure play with things. (see calls to setup
- * & earlysetup in ./kernel/printk.c:register_console()
- */
-static struct __initdata console bfin_early_serial_console = {
- .name = "early_BFuart",
- .write = bfin_earlyprintk_console_write,
- .device = uart_console_device,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &bfin_serial_reg,
-};
-#endif
-
-static struct uart_driver bfin_serial_reg = {
- .owner = THIS_MODULE,
- .driver_name = DRIVER_NAME,
- .dev_name = BFIN_SERIAL_DEV_NAME,
- .major = BFIN_SERIAL_MAJOR,
- .minor = BFIN_SERIAL_MINOR,
- .nr = BFIN_UART_NR_PORTS,
- .cons = BFIN_SERIAL_CONSOLE,
-};
-
-static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct bfin_serial_port *uart = platform_get_drvdata(pdev);
-
- return uart_suspend_port(&bfin_serial_reg, &uart->port);
-}
-
-static int bfin_serial_resume(struct platform_device *pdev)
-{
- struct bfin_serial_port *uart = platform_get_drvdata(pdev);
-
- return uart_resume_port(&bfin_serial_reg, &uart->port);
-}
-
-static int bfin_serial_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct bfin_serial_port *uart = NULL;
- int ret = 0;
-
- if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
- dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
- return -ENOENT;
- }
-
- if (bfin_serial_ports[pdev->id] == NULL) {
-
- uart = kzalloc(sizeof(*uart), GFP_KERNEL);
- if (!uart) {
- dev_err(&pdev->dev,
- "fail to malloc bfin_serial_port\n");
- return -ENOMEM;
- }
- bfin_serial_ports[pdev->id] = uart;
-
-#ifdef CONFIG_EARLY_PRINTK
- if (!(bfin_earlyprintk_port.port.membase
- && bfin_earlyprintk_port.port.line == pdev->id)) {
- /*
- * If the peripheral PINs of current port is allocated
- * in earlyprintk probe stage, don't do it again.
- */
-#endif
- ret = peripheral_request_list(
- (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev,
- "fail to request bfin serial peripherals\n");
- goto out_error_free_mem;
- }
-#ifdef CONFIG_EARLY_PRINTK
- }
-#endif
-
- spin_lock_init(&uart->port.lock);
- uart->port.uartclk = get_sclk();
- uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
- uart->port.ops = &bfin_serial_pops;
- uart->port.line = pdev->id;
- uart->port.iotype = UPIO_MEM;
- uart->port.flags = UPF_BOOT_AUTOCONF;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- ret = -ENOENT;
- goto out_error_free_peripherals;
- }
-
- uart->port.membase = ioremap(res->start, resource_size(res));
- if (!uart->port.membase) {
- dev_err(&pdev->dev, "Cannot map uart IO\n");
- ret = -ENXIO;
- goto out_error_free_peripherals;
- }
- uart->port.mapbase = res->start;
-
- uart->port.irq = platform_get_irq(pdev, 0);
- if (uart->port.irq < 0) {
- dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
- ret = -ENOENT;
- goto out_error_unmap;
- }
-
- uart->status_irq = platform_get_irq(pdev, 1);
- if (uart->status_irq < 0) {
- dev_err(&pdev->dev, "No uart status IRQ specified\n");
- ret = -ENOENT;
- goto out_error_unmap;
- }
-
-#ifdef CONFIG_SERIAL_BFIN_DMA
- spin_lock_init(&uart->rx_lock);
- uart->tx_done = 1;
- uart->tx_count = 0;
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
- ret = -ENOENT;
- goto out_error_unmap;
- }
- uart->tx_dma_channel = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (res == NULL) {
- dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
- ret = -ENOENT;
- goto out_error_unmap;
- }
- uart->rx_dma_channel = res->start;
-
- init_timer(&(uart->rx_dma_timer));
-#endif
-
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (res == NULL)
- uart->cts_pin = -1;
- else
- uart->cts_pin = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 1);
- if (res == NULL)
- uart->rts_pin = -1;
- else
- uart->rts_pin = res->start;
-# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
- if (uart->rts_pin >= 0)
- gpio_request(uart->rts_pin, DRIVER_NAME);
-# endif
-#endif
- }
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
- if (!is_early_platform_device(pdev)) {
-#endif
- uart = bfin_serial_ports[pdev->id];
- uart->port.dev = &pdev->dev;
- dev_set_drvdata(&pdev->dev, uart);
- ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
- }
-#endif
-
- if (!ret)
- return 0;
-
- if (uart) {
-out_error_unmap:
- iounmap(uart->port.membase);
-out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
-out_error_free_mem:
- kfree(uart);
- bfin_serial_ports[pdev->id] = NULL;
- }
-
- return ret;
-}
-
-static int __devexit bfin_serial_remove(struct platform_device *pdev)
-{
- struct bfin_serial_port *uart = platform_get_drvdata(pdev);
-
- dev_set_drvdata(&pdev->dev, NULL);
-
- if (uart) {
- uart_remove_one_port(&bfin_serial_reg, &uart->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
-#endif
- iounmap(uart->port.membase);
- peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
- kfree(uart);
- bfin_serial_ports[pdev->id] = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver bfin_serial_driver = {
- .probe = bfin_serial_probe,
- .remove = __devexit_p(bfin_serial_remove),
- .suspend = bfin_serial_suspend,
- .resume = bfin_serial_resume,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
-static __initdata struct early_platform_driver early_bfin_serial_driver = {
- .class_str = CLASS_BFIN_CONSOLE,
- .pdrv = &bfin_serial_driver,
- .requested_id = EARLY_PLATFORM_ID_UNSET,
-};
-
-static int __init bfin_serial_rs_console_init(void)
-{
- early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
-
- early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
-
- register_console(&bfin_serial_console);
-
- return 0;
-}
-console_initcall(bfin_serial_rs_console_init);
-#endif
-
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Memory can't be allocated dynamically during earlyprink init stage.
- * So, do individual probe for earlyprink with a static uart port variable.
- */
-static int bfin_earlyprintk_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int ret;
-
- if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
- dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
- return -ENOENT;
- }
-
- ret = peripheral_request_list(
- (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev,
- "fail to request bfin serial peripherals\n");
- return ret;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- ret = -ENOENT;
- goto out_error_free_peripherals;
- }
-
- bfin_earlyprintk_port.port.membase = ioremap(res->start,
- resource_size(res));
- if (!bfin_earlyprintk_port.port.membase) {
- dev_err(&pdev->dev, "Cannot map uart IO\n");
- ret = -ENXIO;
- goto out_error_free_peripherals;
- }
- bfin_earlyprintk_port.port.mapbase = res->start;
- bfin_earlyprintk_port.port.line = pdev->id;
- bfin_earlyprintk_port.port.uartclk = get_sclk();
- bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
- spin_lock_init(&bfin_earlyprintk_port.port.lock);
-
- return 0;
-
-out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
-
- return ret;
-}
-
-static struct platform_driver bfin_earlyprintk_driver = {
- .probe = bfin_earlyprintk_probe,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
- .class_str = CLASS_BFIN_EARLYPRINTK,
- .pdrv = &bfin_earlyprintk_driver,
- .requested_id = EARLY_PLATFORM_ID_UNSET,
-};
-
-struct console __init *bfin_earlyserial_init(unsigned int port,
- unsigned int cflag)
-{
- struct ktermios t;
- char port_name[20];
-
- if (port < 0 || port >= BFIN_UART_NR_PORTS)
- return NULL;
-
- /*
- * Only probe resource of the given port in earlyprintk boot arg.
- * The expected port id should be indicated in port name string.
- */
- snprintf(port_name, 20, DRIVER_NAME ".%d", port);
- early_platform_driver_register(&early_bfin_earlyprintk_driver,
- port_name);
- early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
-
- if (!bfin_earlyprintk_port.port.membase)
- return NULL;
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
- /*
- * If we are using early serial, don't let the normal console rewind
- * log buffer, since that causes things to be printed multiple times
- */
- bfin_serial_console.flags &= ~CON_PRINTBUFFER;
-#endif
-
- bfin_early_serial_console.index = port;
- t.c_cflag = cflag;
- t.c_iflag = 0;
- t.c_oflag = 0;
- t.c_lflag = ICANON;
- t.c_line = port;
- bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
-
- return &bfin_early_serial_console;
-}
-#endif /* CONFIG_EARLY_PRINTK */
-
-static int __init bfin_serial_init(void)
-{
- int ret;
-
- pr_info("Blackfin serial driver\n");
-
- ret = uart_register_driver(&bfin_serial_reg);
- if (ret) {
- pr_err("failed to register %s:%d\n",
- bfin_serial_reg.driver_name, ret);
- }
-
- ret = platform_driver_register(&bfin_serial_driver);
- if (ret) {
- pr_err("fail to register bfin uart\n");
- uart_unregister_driver(&bfin_serial_reg);
- }
-
- return ret;
-}
-
-static void __exit bfin_serial_exit(void)
-{
- platform_driver_unregister(&bfin_serial_driver);
- uart_unregister_driver(&bfin_serial_reg);
-}
-
-
-module_init(bfin_serial_init);
-module_exit(bfin_serial_exit);
-
-MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
-MODULE_DESCRIPTION("Blackfin generic serial port driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
-MODULE_ALIAS("platform:bfin-uart");
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index 891d194ae75..ee101c0d358 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -294,7 +294,7 @@ static int sport_startup(struct uart_port *port)
if (request_irq(gpio_to_irq(up->cts_pin),
sport_mctrl_cts_int,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_DISABLED, "BFIN_SPORT_UART_CTS", up)) {
+ 0, "BFIN_SPORT_UART_CTS", up)) {
up->cts_pin = -1;
dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n");
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
new file mode 100644
index 00000000000..66afb98b77b
--- /dev/null
+++ b/drivers/tty/serial/bfin_uart.c
@@ -0,0 +1,1611 @@
+/*
+ * Blackfin On-Chip Serial Driver
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#define DRIVER_NAME "bfin-uart"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/kgdb.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/portmux.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+
+#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
+#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+#include <asm/bfin_serial.h>
+
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
+/* UART name and device definitions */
+#define BFIN_SERIAL_DEV_NAME "ttyBF"
+#define BFIN_SERIAL_MAJOR 204
+#define BFIN_SERIAL_MINOR 64
+
+static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
+
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+
+# ifndef CONFIG_SERIAL_BFIN_PIO
+# error KGDB only support UART in PIO mode.
+# endif
+
+static int kgdboc_port_line;
+static int kgdboc_break_enabled;
+#endif
+/*
+ * Setup for console. Argument comes from the menuconfig
+ */
+#define DMA_RX_XCOUNT 512
+#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
+
+#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
+#else
+static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
+#endif
+
+static void bfin_serial_reset_irda(struct uart_port *port);
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ if (uart->cts_pin < 0)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ /* CTS PIN is negative assertive. */
+ if (UART_GET_CTS(uart))
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ if (uart->rts_pin < 0)
+ return;
+
+ /* RTS PIN is negative assertive. */
+ if (mctrl & TIOCM_RTS)
+ UART_ENABLE_RTS(uart);
+ else
+ UART_DISABLE_RTS(uart);
+}
+
+/*
+ * Handle any change of modem status signal.
+ */
+static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+ unsigned int status;
+
+ status = bfin_serial_get_mctrl(&uart->port);
+ uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ uart->scts = 1;
+ UART_CLEAR_SCTS(uart);
+ UART_CLEAR_IER(uart, EDSSI);
+#endif
+
+ return IRQ_HANDLED;
+}
+#else
+static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+#endif
+
+/*
+ * interrupts are disabled on entry
+ */
+static void bfin_serial_stop_tx(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ struct circ_buf *xmit = &uart->port.state->xmit;
+#endif
+
+ while (!(UART_GET_LSR(uart) & TEMT))
+ cpu_relax();
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ disable_dma(uart->tx_dma_channel);
+ xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
+ uart->port.icount.tx += uart->tx_count;
+ uart->tx_count = 0;
+ uart->tx_done = 1;
+#else
+#ifdef CONFIG_BF54x
+ /* Clear TFI bit */
+ UART_PUT_LSR(uart, TFI);
+#endif
+ UART_CLEAR_IER(uart, ETBEI);
+#endif
+}
+
+/*
+ * port is locked and interrupts are disabled
+ */
+static void bfin_serial_start_tx(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ struct tty_struct *tty = uart->port.state->port.tty;
+
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
+ uart->scts = 0;
+ uart_handle_cts_change(&uart->port, uart->scts);
+ }
+#endif
+
+ /*
+ * To avoid losting RX interrupt, we reset IR function
+ * before sending data.
+ */
+ if (tty->termios->c_line == N_IRDA)
+ bfin_serial_reset_irda(port);
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ if (uart->tx_done)
+ bfin_serial_dma_tx_chars(uart);
+#else
+ UART_SET_IER(uart, ETBEI);
+ bfin_serial_tx_chars(uart);
+#endif
+}
+
+/*
+ * Interrupts are enabled
+ */
+static void bfin_serial_stop_rx(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ UART_CLEAR_IER(uart, ERBFI);
+}
+
+/*
+ * Set the modem control timer to fire immediately.
+ */
+static void bfin_serial_enable_ms(struct uart_port *port)
+{
+}
+
+
+#if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO)
+# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold)
+# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
+#else
+# define UART_GET_ANOMALY_THRESHOLD(uart) 0
+# define UART_SET_ANOMALY_THRESHOLD(uart, v)
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_PIO
+static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
+{
+ struct tty_struct *tty = NULL;
+ unsigned int status, ch, flg;
+ static struct timeval anomaly_start = { .tv_sec = 0 };
+
+ status = UART_GET_LSR(uart);
+ UART_CLEAR_LSR(uart);
+
+ ch = UART_GET_CHAR(uart);
+ uart->port.icount.rx++;
+
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ if (kgdb_connected && kgdboc_port_line == uart->port.line
+ && kgdboc_break_enabled)
+ if (ch == 0x3) {/* Ctrl + C */
+ kgdb_breakpoint();
+ return;
+ }
+
+ if (!uart->port.state || !uart->port.state->port.tty)
+ return;
+#endif
+ tty = uart->port.state->port.tty;
+
+ if (ANOMALY_05000363) {
+ /* The BF533 (and BF561) family of processors have a nice anomaly
+ * where they continuously generate characters for a "single" break.
+ * We have to basically ignore this flood until the "next" valid
+ * character comes across. Due to the nature of the flood, it is
+ * not possible to reliably catch bytes that are sent too quickly
+ * after this break. So application code talking to the Blackfin
+ * which sends a break signal must allow at least 1.5 character
+ * times after the end of the break for things to stabilize. This
+ * timeout was picked as it must absolutely be larger than 1
+ * character time +/- some percent. So 1.5 sounds good. All other
+ * Blackfin families operate properly. Woo.
+ */
+ if (anomaly_start.tv_sec) {
+ struct timeval curr;
+ suseconds_t usecs;
+
+ if ((~ch & (~ch + 1)) & 0xff)
+ goto known_good_char;
+
+ do_gettimeofday(&curr);
+ if (curr.tv_sec - anomaly_start.tv_sec > 1)
+ goto known_good_char;
+
+ usecs = 0;
+ if (curr.tv_sec != anomaly_start.tv_sec)
+ usecs += USEC_PER_SEC;
+ usecs += curr.tv_usec - anomaly_start.tv_usec;
+
+ if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
+ goto known_good_char;
+
+ if (ch)
+ anomaly_start.tv_sec = 0;
+ else
+ anomaly_start = curr;
+
+ return;
+
+ known_good_char:
+ status &= ~BI;
+ anomaly_start.tv_sec = 0;
+ }
+ }
+
+ if (status & BI) {
+ if (ANOMALY_05000363)
+ if (bfin_revid() < 5)
+ do_gettimeofday(&anomaly_start);
+ uart->port.icount.brk++;
+ if (uart_handle_break(&uart->port))
+ goto ignore_char;
+ status &= ~(PE | FE);
+ }
+ if (status & PE)
+ uart->port.icount.parity++;
+ if (status & OE)
+ uart->port.icount.overrun++;
+ if (status & FE)
+ uart->port.icount.frame++;
+
+ status &= uart->port.read_status_mask;
+
+ if (status & BI)
+ flg = TTY_BREAK;
+ else if (status & PE)
+ flg = TTY_PARITY;
+ else if (status & FE)
+ flg = TTY_FRAME;
+ else
+ flg = TTY_NORMAL;
+
+ if (uart_handle_sysrq_char(&uart->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&uart->port, status, OE, ch, flg);
+
+ ignore_char:
+ tty_flip_buffer_push(tty);
+}
+
+static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
+{
+ struct circ_buf *xmit = &uart->port.state->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
+#ifdef CONFIG_BF54x
+ /* Clear TFI bit */
+ UART_PUT_LSR(uart, TFI);
+#endif
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
+ UART_CLEAR_IER(uart, ETBEI);
+ return;
+ }
+
+ if (uart->port.x_char) {
+ UART_PUT_CHAR(uart, uart->port.x_char);
+ uart->port.icount.tx++;
+ uart->port.x_char = 0;
+ }
+
+ while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
+ UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ uart->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uart->port);
+}
+
+static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+
+ while (UART_GET_LSR(uart) & DR)
+ bfin_serial_rx_chars(uart);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
+ uart->scts = 0;
+ uart_handle_cts_change(&uart->port, uart->scts);
+ }
+#endif
+ spin_lock(&uart->port.lock);
+ if (UART_GET_LSR(uart) & THRE)
+ bfin_serial_tx_chars(uart);
+ spin_unlock(&uart->port.lock);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
+{
+ struct circ_buf *xmit = &uart->port.state->xmit;
+
+ uart->tx_done = 0;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
+ uart->tx_count = 0;
+ uart->tx_done = 1;
+ return;
+ }
+
+ if (uart->port.x_char) {
+ UART_PUT_CHAR(uart, uart->port.x_char);
+ uart->port.icount.tx++;
+ uart->port.x_char = 0;
+ }
+
+ uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
+ uart->tx_count = UART_XMIT_SIZE - xmit->tail;
+ blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
+ (unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
+ set_dma_config(uart->tx_dma_channel,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
+ INTR_ON_BUF,
+ DIMENSION_LINEAR,
+ DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
+ set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
+ set_dma_x_modify(uart->tx_dma_channel, 1);
+ SSYNC();
+ enable_dma(uart->tx_dma_channel);
+
+ UART_SET_IER(uart, ETBEI);
+}
+
+static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
+{
+ struct tty_struct *tty = uart->port.state->port.tty;
+ int i, flg, status;
+
+ status = UART_GET_LSR(uart);
+ UART_CLEAR_LSR(uart);
+
+ uart->port.icount.rx +=
+ CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
+ UART_XMIT_SIZE);
+
+ if (status & BI) {
+ uart->port.icount.brk++;
+ if (uart_handle_break(&uart->port))
+ goto dma_ignore_char;
+ status &= ~(PE | FE);
+ }
+ if (status & PE)
+ uart->port.icount.parity++;
+ if (status & OE)
+ uart->port.icount.overrun++;
+ if (status & FE)
+ uart->port.icount.frame++;
+
+ status &= uart->port.read_status_mask;
+
+ if (status & BI)
+ flg = TTY_BREAK;
+ else if (status & PE)
+ flg = TTY_PARITY;
+ else if (status & FE)
+ flg = TTY_FRAME;
+ else
+ flg = TTY_NORMAL;
+
+ for (i = uart->rx_dma_buf.tail; ; i++) {
+ if (i >= UART_XMIT_SIZE)
+ i = 0;
+ if (i == uart->rx_dma_buf.head)
+ break;
+ if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
+ uart_insert_char(&uart->port, status, OE,
+ uart->rx_dma_buf.buf[i], flg);
+ }
+
+ dma_ignore_char:
+ tty_flip_buffer_push(tty);
+}
+
+void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
+{
+ int x_pos, pos;
+
+ dma_disable_irq_nosync(uart->rx_dma_channel);
+ spin_lock_bh(&uart->rx_lock);
+
+ /* 2D DMA RX buffer ring is used. Because curr_y_count and
+ * curr_x_count can't be read as an atomic operation,
+ * curr_y_count should be read before curr_x_count. When
+ * curr_x_count is read, curr_y_count may already indicate
+ * next buffer line. But, the position calculated here is
+ * still indicate the old line. The wrong position data may
+ * be smaller than current buffer tail, which cause garbages
+ * are received if it is not prohibit.
+ */
+ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+ x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+ uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
+ uart->rx_dma_nrows = 0;
+ x_pos = DMA_RX_XCOUNT - x_pos;
+ if (x_pos == DMA_RX_XCOUNT)
+ x_pos = 0;
+
+ pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
+ /* Ignore receiving data if new position is in the same line of
+ * current buffer tail and small.
+ */
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
+ uart->rx_dma_buf.head = pos;
+ bfin_serial_dma_rx_chars(uart);
+ uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
+ }
+
+ spin_unlock_bh(&uart->rx_lock);
+ dma_enable_irq(uart->rx_dma_channel);
+
+ mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
+}
+
+static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+ struct circ_buf *xmit = &uart->port.state->xmit;
+
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
+ uart->scts = 0;
+ uart_handle_cts_change(&uart->port, uart->scts);
+ }
+#endif
+
+ spin_lock(&uart->port.lock);
+ if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
+ disable_dma(uart->tx_dma_channel);
+ clear_dma_irqstat(uart->tx_dma_channel);
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
+ UART_CLEAR_IER(uart, ETBEI);
+ xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
+ uart->port.icount.tx += uart->tx_count;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uart->port);
+
+ bfin_serial_dma_tx_chars(uart);
+ }
+
+ spin_unlock(&uart->port.lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+ unsigned short irqstat;
+ int x_pos, pos;
+
+ spin_lock(&uart->rx_lock);
+ irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
+ clear_dma_irqstat(uart->rx_dma_channel);
+
+ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+ x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+ uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
+ uart->rx_dma_nrows = 0;
+
+ pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
+ uart->rx_dma_buf.head = pos;
+ bfin_serial_dma_rx_chars(uart);
+ uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
+ }
+
+ spin_unlock(&uart->rx_lock);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * Return TIOCSER_TEMT when transmitter is not busy.
+ */
+static unsigned int bfin_serial_tx_empty(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned short lsr;
+
+ lsr = UART_GET_LSR(uart);
+ if (lsr & TEMT)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static void bfin_serial_break_ctl(struct uart_port *port, int break_state)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ u16 lcr = UART_GET_LCR(uart);
+ if (break_state)
+ lcr |= SB;
+ else
+ lcr &= ~SB;
+ UART_PUT_LCR(uart, lcr);
+ SSYNC();
+}
+
+static int bfin_serial_startup(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ dma_addr_t dma_handle;
+
+ if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
+ printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
+ return -EBUSY;
+ }
+
+ if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
+ printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
+ free_dma(uart->rx_dma_channel);
+ return -EBUSY;
+ }
+
+ set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
+ set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);
+
+ uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
+ uart->rx_dma_buf.head = 0;
+ uart->rx_dma_buf.tail = 0;
+ uart->rx_dma_nrows = 0;
+
+ set_dma_config(uart->rx_dma_channel,
+ set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
+ INTR_ON_ROW, DIMENSION_2D,
+ DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
+ set_dma_x_modify(uart->rx_dma_channel, 1);
+ set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
+ set_dma_y_modify(uart->rx_dma_channel, 1);
+ set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
+ enable_dma(uart->rx_dma_channel);
+
+ uart->rx_dma_timer.data = (unsigned long)(uart);
+ uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
+ uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
+ add_timer(&(uart->rx_dma_timer));
+#else
+# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
+ kgdboc_break_enabled = 0;
+ else {
+# endif
+ if (request_irq(uart->rx_irq, bfin_serial_rx_int, 0,
+ "BFIN_UART_RX", uart)) {
+ printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
+ return -EBUSY;
+ }
+
+ if (request_irq
+ (uart->tx_irq, bfin_serial_tx_int, 0,
+ "BFIN_UART_TX", uart)) {
+ printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
+ free_irq(uart->rx_irq, uart);
+ return -EBUSY;
+ }
+
+# ifdef CONFIG_BF54x
+ {
+ /*
+ * UART2 and UART3 on BF548 share interrupt PINs and DMA
+ * controllers with SPORT2 and SPORT3. UART rx and tx
+ * interrupts are generated in PIO mode only when configure
+ * their peripheral mapping registers properly, which means
+ * request corresponding DMA channels in PIO mode as well.
+ */
+ unsigned uart_dma_ch_rx, uart_dma_ch_tx;
+
+ switch (uart->rx_irq) {
+ case IRQ_UART3_RX:
+ uart_dma_ch_rx = CH_UART3_RX;
+ uart_dma_ch_tx = CH_UART3_TX;
+ break;
+ case IRQ_UART2_RX:
+ uart_dma_ch_rx = CH_UART2_RX;
+ uart_dma_ch_tx = CH_UART2_TX;
+ break;
+ default:
+ uart_dma_ch_rx = uart_dma_ch_tx = 0;
+ break;
+ };
+
+ if (uart_dma_ch_rx &&
+ request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
+ printk(KERN_NOTICE"Fail to attach UART interrupt\n");
+ free_irq(uart->rx_irq, uart);
+ free_irq(uart->tx_irq, uart);
+ return -EBUSY;
+ }
+ if (uart_dma_ch_tx &&
+ request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
+ printk(KERN_NOTICE "Fail to attach UART interrupt\n");
+ free_dma(uart_dma_ch_rx);
+ free_irq(uart->rx_irq, uart);
+ free_irq(uart->tx_irq, uart);
+ return -EBUSY;
+ }
+ }
+# endif
+# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ }
+# endif
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->cts_pin >= 0) {
+ if (request_irq(gpio_to_irq(uart->cts_pin),
+ bfin_serial_mctrl_cts_int,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ 0, "BFIN_UART_CTS", uart)) {
+ uart->cts_pin = -1;
+ pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
+ }
+ }
+ if (uart->rts_pin >= 0)
+ gpio_direction_output(uart->rts_pin, 0);
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
+ bfin_serial_mctrl_cts_int,
+ 0, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
+ pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
+ }
+
+ /* CTS RTS PINs are negative assertive. */
+ UART_PUT_MCR(uart, ACTS);
+ UART_SET_IER(uart, EDSSI);
+#endif
+
+ UART_SET_IER(uart, ERBFI);
+ return 0;
+}
+
+static void bfin_serial_shutdown(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ disable_dma(uart->tx_dma_channel);
+ free_dma(uart->tx_dma_channel);
+ disable_dma(uart->rx_dma_channel);
+ free_dma(uart->rx_dma_channel);
+ del_timer(&(uart->rx_dma_timer));
+ dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
+#else
+#ifdef CONFIG_BF54x
+ switch (uart->port.irq) {
+ case IRQ_UART3_RX:
+ free_dma(CH_UART3_RX);
+ free_dma(CH_UART3_TX);
+ break;
+ case IRQ_UART2_RX:
+ free_dma(CH_UART2_RX);
+ free_dma(CH_UART2_TX);
+ break;
+ default:
+ break;
+ };
+#endif
+ free_irq(uart->rx_irq, uart);
+ free_irq(uart->tx_irq, uart);
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->cts_pin >= 0)
+ free_irq(gpio_to_irq(uart->cts_pin), uart);
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->cts_pin >= 0)
+ free_irq(uart->status_irq, uart);
+#endif
+}
+
+static void
+bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned long flags;
+ unsigned int baud, quot;
+ unsigned short val, ier, lcr = 0;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS8:
+ lcr = WLS(8);
+ break;
+ case CS7:
+ lcr = WLS(7);
+ break;
+ case CS6:
+ lcr = WLS(6);
+ break;
+ case CS5:
+ lcr = WLS(5);
+ break;
+ default:
+ printk(KERN_ERR "%s: word lengh not supported\n",
+ __func__);
+ }
+
+ /* Anomaly notes:
+ * 05000231 - STOP bit is always set to 1 whatever the user is set.
+ */
+ if (termios->c_cflag & CSTOPB) {
+ if (ANOMALY_05000231)
+ printk(KERN_WARNING "STOP bits other than 1 is not "
+ "supported in case of anomaly 05000231.\n");
+ else
+ lcr |= STB;
+ }
+ if (termios->c_cflag & PARENB)
+ lcr |= PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr |= EPS;
+ if (termios->c_cflag & CMSPAR)
+ lcr |= STP;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+
+ port->read_status_mask = OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (FE | PE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= FE | PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= OE;
+ }
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ /* If discipline is not IRDA, apply ANOMALY_05000230 */
+ if (termios->c_line != N_IRDA)
+ quot -= ANOMALY_05000230;
+
+ UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
+
+ /* Disable UART */
+ ier = UART_GET_IER(uart);
+ UART_DISABLE_INTS(uart);
+
+ /* Set DLAB in LCR to Access DLL and DLH */
+ UART_SET_DLAB(uart);
+
+ UART_PUT_DLL(uart, quot & 0xFF);
+ UART_PUT_DLH(uart, (quot >> 8) & 0xFF);
+ SSYNC();
+
+ /* Clear DLAB in LCR to Access THR RBR IER */
+ UART_CLEAR_DLAB(uart);
+
+ UART_PUT_LCR(uart, lcr);
+
+ /* Enable UART */
+ UART_ENABLE_INTS(uart, ier);
+
+ val = UART_GET_GCTL(uart);
+ val |= UCEN;
+ UART_PUT_GCTL(uart, val);
+
+ /* Port speed changed, update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+}
+
+static const char *bfin_serial_type(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void bfin_serial_release_port(struct uart_port *port)
+{
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int bfin_serial_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void bfin_serial_config_port(struct uart_port *port, int flags)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ if (flags & UART_CONFIG_TYPE &&
+ bfin_serial_request_port(&uart->port) == 0)
+ uart->port.type = PORT_BFIN;
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ * The only change we allow are to the flags and type, and
+ * even then only between PORT_BFIN and PORT_UNKNOWN
+ */
+static int
+bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return 0;
+}
+
+/*
+ * Enable the IrDA function if tty->ldisc.num is N_IRDA.
+ * In other cases, disable IrDA function.
+ */
+static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned short val;
+
+ switch (ld) {
+ case N_IRDA:
+ val = UART_GET_GCTL(uart);
+ val |= (IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ break;
+ default:
+ val = UART_GET_GCTL(uart);
+ val &= ~(IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ }
+}
+
+static void bfin_serial_reset_irda(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned short val;
+
+ val = UART_GET_GCTL(uart);
+ val &= ~(IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ SSYNC();
+ val |= (IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ SSYNC();
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/* Anomaly notes:
+ * 05000099 - Because we only use THRE in poll_put and DR in poll_get,
+ * losing other bits of UART_LSR is not a problem here.
+ */
+static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ while (!(UART_GET_LSR(uart) & THRE))
+ cpu_relax();
+
+ UART_CLEAR_DLAB(uart);
+ UART_PUT_CHAR(uart, (unsigned char)chr);
+}
+
+static int bfin_serial_poll_get_char(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned char chr;
+
+ while (!(UART_GET_LSR(uart) & DR))
+ cpu_relax();
+
+ UART_CLEAR_DLAB(uart);
+ chr = UART_GET_CHAR(uart);
+
+ return chr;
+}
+#endif
+
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+static void bfin_kgdboc_port_shutdown(struct uart_port *port)
+{
+ if (kgdboc_break_enabled) {
+ kgdboc_break_enabled = 0;
+ bfin_serial_shutdown(port);
+ }
+}
+
+static int bfin_kgdboc_port_startup(struct uart_port *port)
+{
+ kgdboc_port_line = port->line;
+ kgdboc_break_enabled = !bfin_serial_startup(port);
+ return 0;
+}
+#endif
+
+static struct uart_ops bfin_serial_pops = {
+ .tx_empty = bfin_serial_tx_empty,
+ .set_mctrl = bfin_serial_set_mctrl,
+ .get_mctrl = bfin_serial_get_mctrl,
+ .stop_tx = bfin_serial_stop_tx,
+ .start_tx = bfin_serial_start_tx,
+ .stop_rx = bfin_serial_stop_rx,
+ .enable_ms = bfin_serial_enable_ms,
+ .break_ctl = bfin_serial_break_ctl,
+ .startup = bfin_serial_startup,
+ .shutdown = bfin_serial_shutdown,
+ .set_termios = bfin_serial_set_termios,
+ .set_ldisc = bfin_serial_set_ldisc,
+ .type = bfin_serial_type,
+ .release_port = bfin_serial_release_port,
+ .request_port = bfin_serial_request_port,
+ .config_port = bfin_serial_config_port,
+ .verify_port = bfin_serial_verify_port,
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ .kgdboc_port_startup = bfin_kgdboc_port_startup,
+ .kgdboc_port_shutdown = bfin_kgdboc_port_shutdown,
+#endif
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_put_char = bfin_serial_poll_put_char,
+ .poll_get_char = bfin_serial_poll_get_char,
+#endif
+};
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+/*
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init
+bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
+ int *parity, int *bits)
+{
+ unsigned short status;
+
+ status = UART_GET_IER(uart) & (ERBFI | ETBEI);
+ if (status == (ERBFI | ETBEI)) {
+ /* ok, the port was enabled */
+ u16 lcr, dlh, dll;
+
+ lcr = UART_GET_LCR(uart);
+
+ *parity = 'n';
+ if (lcr & PEN) {
+ if (lcr & EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+ switch (lcr & 0x03) {
+ case 0:
+ *bits = 5;
+ break;
+ case 1:
+ *bits = 6;
+ break;
+ case 2:
+ *bits = 7;
+ break;
+ case 3:
+ *bits = 8;
+ break;
+ }
+ /* Set DLAB in LCR to Access DLL and DLH */
+ UART_SET_DLAB(uart);
+
+ dll = UART_GET_DLL(uart);
+ dlh = UART_GET_DLH(uart);
+
+ /* Clear DLAB in LCR to Access THR RBR IER */
+ UART_CLEAR_DLAB(uart);
+
+ *baud = get_sclk() / (16*(dll | dlh << 8));
+ }
+ pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
+}
+
+static struct uart_driver bfin_serial_reg;
+
+static void bfin_serial_console_putchar(struct uart_port *port, int ch)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ while (!(UART_GET_LSR(uart) & THRE))
+ barrier();
+ UART_PUT_CHAR(uart, ch);
+}
+
+#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
+ defined (CONFIG_EARLY_PRINTK) */
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+#define CLASS_BFIN_CONSOLE "bfin-console"
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct bfin_serial_port *uart = bfin_serial_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+ uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+
+}
+
+static int __init
+bfin_serial_console_setup(struct console *co, char *options)
+{
+ struct bfin_serial_port *uart;
+ int baud = 57600;
+ int bits = 8;
+ int parity = 'n';
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ int flow = 'r';
+# else
+ int flow = 'n';
+# endif
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
+ return -ENODEV;
+
+ uart = bfin_serial_ports[co->index];
+ if (!uart)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ bfin_serial_console_get_options(uart, &baud, &parity, &bits);
+
+ return uart_set_options(&uart->port, co, baud, parity, bits, flow);
+}
+
+static struct console bfin_serial_console = {
+ .name = BFIN_SERIAL_DEV_NAME,
+ .write = bfin_serial_console_write,
+ .device = uart_console_device,
+ .setup = bfin_serial_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &bfin_serial_reg,
+};
+#define BFIN_SERIAL_CONSOLE (&bfin_serial_console)
+#else
+#define BFIN_SERIAL_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
+
+#ifdef CONFIG_EARLY_PRINTK
+static struct bfin_serial_port bfin_earlyprintk_port;
+#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
+{
+ unsigned long flags;
+
+ if (bfin_earlyprintk_port.port.line != co->index)
+ return;
+
+ spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
+ uart_console_write(&bfin_earlyprintk_port.port, s, count,
+ bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
+}
+
+/*
+ * This should have a .setup or .early_setup in it, but then things get called
+ * without the command line options, and the baud rate gets messed up - so
+ * don't let the common infrastructure play with things. (see calls to setup
+ * & earlysetup in ./kernel/printk.c:register_console()
+ */
+static struct __initdata console bfin_early_serial_console = {
+ .name = "early_BFuart",
+ .write = bfin_earlyprintk_console_write,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &bfin_serial_reg,
+};
+#endif
+
+static struct uart_driver bfin_serial_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = BFIN_SERIAL_DEV_NAME,
+ .major = BFIN_SERIAL_MAJOR,
+ .minor = BFIN_SERIAL_MINOR,
+ .nr = BFIN_UART_NR_PORTS,
+ .cons = BFIN_SERIAL_CONSOLE,
+};
+
+static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ return uart_suspend_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_resume(struct platform_device *pdev)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ return uart_resume_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct bfin_serial_port *uart = NULL;
+ int ret = 0;
+
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
+ return -ENOENT;
+ }
+
+ if (bfin_serial_ports[pdev->id] == NULL) {
+
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ dev_err(&pdev->dev,
+ "fail to malloc bfin_serial_port\n");
+ return -ENOMEM;
+ }
+ bfin_serial_ports[pdev->id] = uart;
+
+#ifdef CONFIG_EARLY_PRINTK
+ if (!(bfin_earlyprintk_port.port.membase
+ && bfin_earlyprintk_port.port.line == pdev->id)) {
+ /*
+ * If the peripheral PINs of current port is allocated
+ * in earlyprintk probe stage, don't do it again.
+ */
+#endif
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ goto out_error_free_mem;
+ }
+#ifdef CONFIG_EARLY_PRINTK
+ }
+#endif
+
+ spin_lock_init(&uart->port.lock);
+ uart->port.uartclk = get_sclk();
+ uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ uart->port.ops = &bfin_serial_pops;
+ uart->port.line = pdev->id;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ uart->port.membase = ioremap(res->start, resource_size(res));
+ if (!uart->port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ uart->port.mapbase = res->start;
+
+ uart->tx_irq = platform_get_irq(pdev, 0);
+ if (uart->tx_irq < 0) {
+ dev_err(&pdev->dev, "No uart TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ uart->rx_irq = platform_get_irq(pdev, 1);
+ if (uart->rx_irq < 0) {
+ dev_err(&pdev->dev, "No uart RX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->port.irq = uart->rx_irq;
+
+ uart->status_irq = platform_get_irq(pdev, 2);
+ if (uart->status_irq < 0) {
+ dev_err(&pdev->dev, "No uart status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ spin_lock_init(&uart->rx_lock);
+ uart->tx_done = 1;
+ uart->tx_count = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->tx_dma_channel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->rx_dma_channel = res->start;
+
+ init_timer(&(uart->rx_dma_timer));
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ uart->cts_pin = -1;
+ else
+ uart->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ uart->rts_pin = -1;
+ else
+ uart->rts_pin = res->start;
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
+ if (uart->rts_pin >= 0)
+ gpio_request(uart->rts_pin, DRIVER_NAME);
+# endif
+#endif
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ uart = bfin_serial_ports[pdev->id];
+ uart->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, uart);
+ ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ }
+#endif
+
+ if (!ret)
+ return 0;
+
+ if (uart) {
+out_error_unmap:
+ iounmap(uart->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
+ }
+
+ return ret;
+}
+
+static int __devexit bfin_serial_remove(struct platform_device *pdev)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (uart) {
+ uart_remove_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
+#endif
+ iounmap(uart->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver bfin_serial_driver = {
+ .probe = bfin_serial_probe,
+ .remove = __devexit_p(bfin_serial_remove),
+ .suspend = bfin_serial_suspend,
+ .resume = bfin_serial_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
+static __initdata struct early_platform_driver early_bfin_serial_driver = {
+ .class_str = CLASS_BFIN_CONSOLE,
+ .pdrv = &bfin_serial_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init bfin_serial_rs_console_init(void)
+{
+ early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
+
+ early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
+
+ register_console(&bfin_serial_console);
+
+ return 0;
+}
+console_initcall(bfin_serial_rs_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Memory can't be allocated dynamically during earlyprink init stage.
+ * So, do individual probe for earlyprink with a static uart port variable.
+ */
+static int bfin_earlyprintk_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
+ return -ENOENT;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ bfin_earlyprintk_port.port.membase = ioremap(res->start,
+ resource_size(res));
+ if (!bfin_earlyprintk_port.port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ bfin_earlyprintk_port.port.mapbase = res->start;
+ bfin_earlyprintk_port.port.line = pdev->id;
+ bfin_earlyprintk_port.port.uartclk = get_sclk();
+ bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ spin_lock_init(&bfin_earlyprintk_port.port.lock);
+
+ return 0;
+
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+
+ return ret;
+}
+
+static struct platform_driver bfin_earlyprintk_driver = {
+ .probe = bfin_earlyprintk_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+ .class_str = CLASS_BFIN_EARLYPRINTK,
+ .pdrv = &bfin_earlyprintk_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+struct console __init *bfin_earlyserial_init(unsigned int port,
+ unsigned int cflag)
+{
+ struct ktermios t;
+ char port_name[20];
+
+ if (port < 0 || port >= BFIN_UART_NR_PORTS)
+ return NULL;
+
+ /*
+ * Only probe resource of the given port in earlyprintk boot arg.
+ * The expected port id should be indicated in port name string.
+ */
+ snprintf(port_name, 20, DRIVER_NAME ".%d", port);
+ early_platform_driver_register(&early_bfin_earlyprintk_driver,
+ port_name);
+ early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
+
+ if (!bfin_earlyprintk_port.port.membase)
+ return NULL;
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ /*
+ * If we are using early serial, don't let the normal console rewind
+ * log buffer, since that causes things to be printed multiple times
+ */
+ bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
+ bfin_early_serial_console.index = port;
+ t.c_cflag = cflag;
+ t.c_iflag = 0;
+ t.c_oflag = 0;
+ t.c_lflag = ICANON;
+ t.c_line = port;
+ bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
+
+ return &bfin_early_serial_console;
+}
+#endif /* CONFIG_EARLY_PRINTK */
+
+static int __init bfin_serial_init(void)
+{
+ int ret;
+
+ pr_info("Blackfin serial driver\n");
+
+ ret = uart_register_driver(&bfin_serial_reg);
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
+ bfin_serial_reg.driver_name, ret);
+ }
+
+ ret = platform_driver_register(&bfin_serial_driver);
+ if (ret) {
+ pr_err("fail to register bfin uart\n");
+ uart_unregister_driver(&bfin_serial_reg);
+ }
+
+ return ret;
+}
+
+static void __exit bfin_serial_exit(void)
+{
+ platform_driver_unregister(&bfin_serial_driver);
+ uart_unregister_driver(&bfin_serial_reg);
+}
+
+
+module_init(bfin_serial_init);
+module_exit(bfin_serial_exit);
+
+MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
+MODULE_DESCRIPTION("Blackfin generic serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
+MODULE_ALIAS("platform:bfin-uart");
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 9488da74d4f..b418947b710 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 58be715913c..b7435043f2f 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -258,7 +258,7 @@ static struct e100_serial rs_table[] = {
.dma_out_enabled = 1,
.dma_out_nbr = SER0_TX_DMA_NBR,
.dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
- .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 0 dma tr",
#else
.dma_out_enabled = 0,
@@ -271,7 +271,7 @@ static struct e100_serial rs_table[] = {
.dma_in_enabled = 1,
.dma_in_nbr = SER0_RX_DMA_NBR,
.dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
- .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 0 dma rec",
#else
.dma_in_enabled = 0,
@@ -313,7 +313,7 @@ static struct e100_serial rs_table[] = {
.dma_out_enabled = 1,
.dma_out_nbr = SER1_TX_DMA_NBR,
.dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
- .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 1 dma tr",
#else
.dma_out_enabled = 0,
@@ -326,7 +326,7 @@ static struct e100_serial rs_table[] = {
.dma_in_enabled = 1,
.dma_in_nbr = SER1_RX_DMA_NBR,
.dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
- .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 1 dma rec",
#else
.dma_in_enabled = 0,
@@ -369,7 +369,7 @@ static struct e100_serial rs_table[] = {
.dma_out_enabled = 1,
.dma_out_nbr = SER2_TX_DMA_NBR,
.dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
- .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 2 dma tr",
#else
.dma_out_enabled = 0,
@@ -382,7 +382,7 @@ static struct e100_serial rs_table[] = {
.dma_in_enabled = 1,
.dma_in_nbr = SER2_RX_DMA_NBR,
.dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
- .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 2 dma rec",
#else
.dma_in_enabled = 0,
@@ -423,7 +423,7 @@ static struct e100_serial rs_table[] = {
.dma_out_enabled = 1,
.dma_out_nbr = SER3_TX_DMA_NBR,
.dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
- .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 3 dma tr",
#else
.dma_out_enabled = 0,
@@ -436,7 +436,7 @@ static struct e100_serial rs_table[] = {
.dma_in_enabled = 1,
.dma_in_nbr = SER3_RX_DMA_NBR,
.dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
- .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 3 dma rec",
#else
.dma_in_enabled = 0,
@@ -1788,7 +1788,7 @@ static unsigned int handle_descr_data(struct e100_serial *info,
struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer;
if (info->recv_cnt + recvl > 65536) {
- printk(KERN_CRIT
+ printk(KERN_WARNING
"%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
return 0;
}
@@ -3813,13 +3813,13 @@ rs_close(struct tty_struct *tty, struct file * filp)
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
- printk(KERN_CRIT
+ printk(KERN_ERR
"rs_close: bad serial port count; tty->count is 1, "
"info->count is %d\n", info->count);
info->count = 1;
}
if (--info->count < 0) {
- printk(KERN_CRIT "rs_close: bad serial port count for ttyS%d: %d\n",
+ printk(KERN_ERR "rs_close: bad serial port count for ttyS%d: %d\n",
info->line, info->count);
info->count = 0;
}
@@ -4452,7 +4452,7 @@ static int __init rs_init(void)
#if defined(CONFIG_ETRAX_RS485_ON_PA)
if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
rs485_pa_bit)) {
- printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
+ printk(KERN_ERR "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
put_tty_driver(driver);
return -EBUSY;
@@ -4461,7 +4461,7 @@ static int __init rs_init(void)
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
rs485_port_g_bit)) {
- printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
+ printk(KERN_ERR "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
put_tty_driver(driver);
return -EBUSY;
@@ -4494,7 +4494,7 @@ static int __init rs_init(void)
if (info->enabled) {
if (cris_request_io_interface(info->io_if,
info->io_if_description)) {
- printk(KERN_CRIT "ETRAX100LX async serial: "
+ printk(KERN_ERR "ETRAX100LX async serial: "
"Could not allocate IO pins for "
"%s, port %d\n",
info->io_if_description, i);
@@ -4558,7 +4558,7 @@ static int __init rs_init(void)
/* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
- IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
+ IRQF_SHARED, "serial ", driver))
panic("%s: Failed to request irq8", __func__);
#endif
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index ddc487a2d42..e3699a84049 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -47,6 +47,7 @@
#include <linux/serial_core.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/atomic.h>
#include <asm/bootinfo.h>
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 8a869e58f6d..d55709a7a75 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1554,7 +1554,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
/* save off irq and request irq line */
if ( (retval = request_irq(dev->irq, icom_interrupt,
- IRQF_DISABLED | IRQF_SHARED, ICOM_DRIVER_NAME,
+ IRQF_SHARED, ICOM_DRIVER_NAME,
(void *) icom_adapter))) {
goto probe_exit2;
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 7e91b3d368c..54ffdc6243f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -508,8 +508,10 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
continue;
- if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) {
- if (rx & URXD_PRERR)
+ if (unlikely(rx & URXD_ERR)) {
+ if (rx & URXD_BRK)
+ sport->port.icount.brk++;
+ else if (rx & URXD_PRERR)
sport->port.icount.parity++;
else if (rx & URXD_FRMERR)
sport->port.icount.frame++;
@@ -524,7 +526,9 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
rx &= sport->port.read_status_mask;
- if (rx & URXD_PRERR)
+ if (rx & URXD_BRK)
+ flg = TTY_BREAK;
+ else if (rx & URXD_PRERR)
flg = TTY_PARITY;
else if (rx & URXD_FRMERR)
flg = TTY_FRAME;
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index ee43efc7bdc..758ff310f7f 100644
--- a/drivers/tty/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -13,6 +13,7 @@
*/
#include <linux/errno.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/circ_buf.h>
#include <linux/serial_reg.h>
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index fcfe82653ac..6b36c1554d7 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -14,6 +14,7 @@
*/
#include <linux/errno.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serialP.h>
#include <linux/circ_buf.h>
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index b704c8ce0d7..529bec6edaf 100644
--- a/drivers/tty/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -88,7 +88,6 @@ enum {
/* 4 extra for alignment play space */
#define WRITEBUFLEN ((4096) + 4)
-#define MYFLIPLEN N_TTY_BUF_SIZE
#define JSM_VERSION "jsm: 1.2-1-INKERNEL"
#define JSM_PARTNUM "40002438_A-INKERNEL"
@@ -150,7 +149,6 @@ struct jsm_board
u32 bd_uart_offset; /* Space between each UART */
struct jsm_channel *channels[MAXPORTS]; /* array of pointers to our channels. */
- char *flipbuf; /* Our flip buffer, alloced if board is found */
u32 bd_dividend; /* Board/UARTs specific dividend */
@@ -177,16 +175,13 @@ struct jsm_board
#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
-#define CH_FLIPBUF_IN_USE 0x4000 /* Channel's flipbuf is in use */
#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
/* Our Read/Error/Write queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
-#define WQUEUEMASK 0x0FFF /* 4 K - 1 */
#define RQUEUESIZE (RQUEUEMASK + 1)
#define EQUEUESIZE RQUEUESIZE
-#define WQUEUESIZE (WQUEUEMASK + 1)
/************************************************************************
@@ -226,10 +221,6 @@ struct jsm_channel {
u16 ch_e_head; /* Head location of the error queue */
u16 ch_e_tail; /* Tail location of the error queue */
- u8 *ch_wqueue; /* Our write queue buffer - malloc'ed */
- u16 ch_w_head; /* Head location of the write queue */
- u16 ch_w_tail; /* Tail location of the write queue */
-
u64 ch_rxcount; /* total of data received so far */
u64 ch_txcount; /* total of data transmitted so far */
@@ -378,7 +369,6 @@ extern int jsm_debug;
* Prototypes for non-static functions used in more than one module
*
*************************************************************************/
-int jsm_tty_write(struct uart_port *port);
int jsm_tty_init(struct jsm_board *);
int jsm_uart_port_init(struct jsm_board *);
int jsm_remove_uart_port(struct jsm_board *);
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 96da17868cf..648b6a3efa3 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -160,27 +160,10 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
dev_info(&pdev->dev, "board %d: Digi Neo (rev %d), irq %d\n",
adapter_count, brd->rev, brd->irq);
- /*
- * allocate flip buffer for board.
- *
- * Okay to malloc with GFP_KERNEL, we are not at interrupt
- * context, and there are no locks held.
- */
- brd->flipbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipbuf) {
- /* XXX: leaking all resources from jsm_tty_init and
- jsm_uart_port_init here! */
- dev_err(&pdev->dev, "memory allocation for flipbuf failed\n");
- rc = -ENOMEM;
- goto out_free_uart;
- }
-
pci_set_drvdata(pdev, brd);
pci_save_state(pdev);
return 0;
- out_free_uart:
- jsm_remove_uart_port(brd);
out_free_irq:
jsm_remove_uart_port(brd);
free_irq(brd->irq, brd);
@@ -211,14 +194,12 @@ static void __devexit jsm_remove_one(struct pci_dev *pdev)
if (brd->channels[i]) {
kfree(brd->channels[i]->ch_rqueue);
kfree(brd->channels[i]->ch_equeue);
- kfree(brd->channels[i]->ch_wqueue);
kfree(brd->channels[i]);
}
}
pci_release_regions(pdev);
pci_disable_device(pdev);
- kfree(brd->flipbuf);
kfree(brd);
}
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 4538c3e3646..81dfafa11b0 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -496,12 +496,15 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
int s;
int qlen;
u32 len_written = 0;
+ struct circ_buf *circ;
if (!ch)
return;
+ circ = &ch->uart_port.state->xmit;
+
/* No data to write to the UART */
- if (ch->ch_w_tail == ch->ch_w_head)
+ if (uart_circ_empty(circ))
return;
/* If port is "stopped", don't send any data to the UART */
@@ -517,11 +520,10 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
if (ch->ch_cached_lsr & UART_LSR_THRE) {
ch->ch_cached_lsr &= ~(UART_LSR_THRE);
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+ writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx);
jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev,
- "Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]);
- ch->ch_w_tail++;
- ch->ch_w_tail &= WQUEUEMASK;
+ "Tx data: %x\n", circ->buf[circ->tail]);
+ circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1);
ch->ch_txcount++;
}
return;
@@ -536,36 +538,36 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
/* cache head and tail of queue */
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
+ head = circ->head & (UART_XMIT_SIZE - 1);
+ tail = circ->tail & (UART_XMIT_SIZE - 1);
+ qlen = uart_circ_chars_pending(circ);
/* Find minimum of the FIFO space, versus queue length */
n = min(n, qlen);
while (n > 0) {
- s = ((head >= tail) ? head : WQUEUESIZE) - tail;
+ s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail;
s = min(s, n);
if (s <= 0)
break;
- memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+ memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s);
/* Add and flip queue if needed */
- tail = (tail + s) & WQUEUEMASK;
+ tail = (tail + s) & (UART_XMIT_SIZE - 1);
n -= s;
ch->ch_txcount += s;
len_written += s;
}
/* Update the final tail */
- ch->ch_w_tail = tail & WQUEUEMASK;
+ circ->tail = tail & (UART_XMIT_SIZE - 1);
if (len_written >= ch->ch_t_tlevel)
ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- if (!jsm_tty_write(&ch->uart_port))
+ if (uart_circ_empty(circ))
uart_write_wakeup(&ch->uart_port);
}
@@ -946,7 +948,6 @@ static void neo_param(struct jsm_channel *ch)
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = ch->ch_r_tail = 0;
ch->ch_e_head = ch->ch_e_tail = 0;
- ch->ch_w_head = ch->ch_w_tail = 0;
neo_flush_uart_write(ch);
neo_flush_uart_read(ch);
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 7a4a914ecff..434bd881fca 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -118,6 +118,19 @@ static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
udelay(10);
}
+/*
+ * jsm_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static void jsm_tty_write(struct uart_port *port)
+{
+ struct jsm_channel *channel;
+ channel = container_of(port, struct jsm_channel, uart_port);
+ channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
+}
+
static void jsm_tty_start_tx(struct uart_port *port)
{
struct jsm_channel *channel = (struct jsm_channel *)port;
@@ -216,14 +229,6 @@ static int jsm_tty_open(struct uart_port *port)
return -ENOMEM;
}
}
- if (!channel->ch_wqueue) {
- channel->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
- if (!channel->ch_wqueue) {
- jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
- "unable to allocate write queue buf");
- return -ENOMEM;
- }
- }
channel->ch_flags &= ~(CH_OPENING);
/*
@@ -237,7 +242,6 @@ static int jsm_tty_open(struct uart_port *port)
*/
channel->ch_r_head = channel->ch_r_tail = 0;
channel->ch_e_head = channel->ch_e_tail = 0;
- channel->ch_w_head = channel->ch_w_tail = 0;
brd->bd_ops->flush_uart_write(channel);
brd->bd_ops->flush_uart_read(channel);
@@ -836,75 +840,3 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
}
}
}
-
-/*
- * jsm_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-int jsm_tty_write(struct uart_port *port)
-{
- int bufcount;
- int data_count = 0,data_count1 =0;
- u16 head;
- u16 tail;
- u16 tmask;
- u32 remain;
- int temp_tail = port->state->xmit.tail;
- struct jsm_channel *channel = (struct jsm_channel *)port;
-
- tmask = WQUEUEMASK;
- head = (channel->ch_w_head) & tmask;
- tail = (channel->ch_w_tail) & tmask;
-
- if ((bufcount = tail - head - 1) < 0)
- bufcount += WQUEUESIZE;
-
- bufcount = min(bufcount, 56);
- remain = WQUEUESIZE - head;
-
- data_count = 0;
- if (bufcount >= remain) {
- bufcount -= remain;
- while ((port->state->xmit.head != temp_tail) &&
- (data_count < remain)) {
- channel->ch_wqueue[head++] =
- port->state->xmit.buf[temp_tail];
-
- temp_tail++;
- temp_tail &= (UART_XMIT_SIZE - 1);
- data_count++;
- }
- if (data_count == remain) head = 0;
- }
-
- data_count1 = 0;
- if (bufcount > 0) {
- remain = bufcount;
- while ((port->state->xmit.head != temp_tail) &&
- (data_count1 < remain)) {
- channel->ch_wqueue[head++] =
- port->state->xmit.buf[temp_tail];
-
- temp_tail++;
- temp_tail &= (UART_XMIT_SIZE - 1);
- data_count1++;
-
- }
- }
-
- port->state->xmit.tail = temp_tail;
-
- data_count += data_count1;
- if (data_count) {
- head &= tmask;
- channel->ch_w_head = head;
- }
-
- if (data_count) {
- channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
- }
-
- return data_count;
-}
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 58cf279ed87..96c1cacc736 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -338,21 +338,21 @@ lqasc_startup(struct uart_port *port)
ASCCON_ROEN, port->membase + LTQ_ASC_CON);
retval = request_irq(ltq_port->tx_irq, lqasc_tx_int,
- IRQF_DISABLED, "asc_tx", port);
+ 0, "asc_tx", port);
if (retval) {
pr_err("failed to request lqasc_tx_int\n");
return retval;
}
retval = request_irq(ltq_port->rx_irq, lqasc_rx_int,
- IRQF_DISABLED, "asc_rx", port);
+ 0, "asc_rx", port);
if (retval) {
pr_err("failed to request lqasc_rx_int\n");
goto err1;
}
retval = request_irq(ltq_port->err_irq, lqasc_err_int,
- IRQF_DISABLED, "asc_err", port);
+ 0, "asc_err", port);
if (retval) {
pr_err("failed to request lqasc_err_int\n");
goto err2;
@@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&ltq_asc_lock, flags);
/* Don't rewrite B0 */
- if (tty_termios_baud_rate(new))
+ if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
+
+ uart_update_timeout(port, cflag, baud);
}
static const char*
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 8e07517f8ac..08018934e01 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 7b951adac54..2af5aa5f3a8 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -47,6 +47,8 @@
#include <linux/serial.h>
#include <linux/spi/spi.h>
#include <linux/freezer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial_max3100.h>
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index a8164601c0e..db00b595cab 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -31,6 +31,8 @@
#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/freezer.h>
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 3394b7cc172..9afca093d6e 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -380,7 +380,7 @@ static void mcf_config_port(struct uart_port *port, int flags)
/* Clear mask, so no surprise interrupts. */
writeb(0, port->membase + MCFUART_UIMR);
- if (request_irq(port->irq, mcf_interrupt, IRQF_DISABLED, "UART", port))
+ if (request_irq(port->irq, mcf_interrupt, 0, "UART", port))
printk(KERN_ERR "MCF: unable to attach ColdFire UART %d "
"interrupt vector=%d\n", port->line, port->irq);
}
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index cab52f4a88b..286c386d9c4 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#define HSU_DMA_BUF_SIZE 2048
@@ -764,6 +765,8 @@ static int serial_hsu_startup(struct uart_port *port)
container_of(port, struct uart_hsu_port, port);
unsigned long flags;
+ pm_runtime_get_sync(up->dev);
+
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
@@ -871,6 +874,8 @@ static void serial_hsu_shutdown(struct uart_port *port)
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
+
+ pm_runtime_put(up->dev);
}
static void
@@ -1249,6 +1254,39 @@ static int serial_hsu_resume(struct pci_dev *pdev)
#define serial_hsu_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_runtime_idle(struct device *dev)
+{
+ int err;
+
+ err = pm_schedule_suspend(dev, 500);
+ if (err)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int serial_hsu_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int serial_hsu_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+#else
+#define serial_hsu_runtime_idle NULL
+#define serial_hsu_runtime_suspend NULL
+#define serial_hsu_runtime_resume NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_pm_ops = {
+ .runtime_suspend = serial_hsu_runtime_suspend,
+ .runtime_resume = serial_hsu_runtime_resume,
+ .runtime_idle = serial_hsu_runtime_idle,
+};
+
/* temp global pointer before we settle down on using one or four PCI dev */
static struct hsu_port *phsu;
@@ -1315,6 +1353,9 @@ static int serial_hsu_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, uport);
}
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
return 0;
err_disable:
@@ -1411,6 +1452,9 @@ static void serial_hsu_remove(struct pci_dev *pdev)
if (!priv)
return;
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
/* For port 0/1/2, priv is the address of uart_hsu_port */
if (pdev->device != 0x081E) {
up = priv;
@@ -1423,7 +1467,7 @@ static void serial_hsu_remove(struct pci_dev *pdev)
}
/* First 3 are UART ports, and the 4th is the DMA */
-static const struct pci_device_id pci_ids[] __devinitdata = {
+static const struct pci_device_id pci_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
@@ -1438,6 +1482,9 @@ static struct pci_driver hsu_pci_driver = {
.remove = __devexit_p(serial_hsu_remove),
.suspend = serial_hsu_suspend,
.resume = serial_hsu_resume,
+ .driver = {
+ .pm = &serial_hsu_pm_ops,
+ },
};
static int __init hsu_pci_init(void)
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index a0bcd8a3758..1093a88a1fe 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -34,6 +34,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
@@ -273,7 +274,7 @@ static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np)
{
- port->irqflags = IRQF_DISABLED;
+ port->irqflags = 0;
port->irq = irq_of_parse_and_map(np, 0);
}
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 23bc743f2a2..4c309e86990 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -23,10 +23,14 @@
* 1 word. If SPI master controller doesn't support sclk frequency change,
* then the char need be sent out one by one with some delay
*
- * 2. Currently only RX available interrrupt is used, no need for waiting TXE
+ * 2. Currently only RX available interrupt is used, no need for waiting TXE
* interrupt for a low speed UART device
*/
+#ifdef CONFIG_MAGIC_SYSRQ
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@@ -73,9 +77,9 @@ struct uart_max3110 {
/* global data structure, may need be removed */
static struct uart_max3110 *pmax;
-static void receive_chars(struct uart_max3110 *max,
- unsigned char *str, int len);
-static int max3110_read_multi(struct uart_max3110 *max, u8 *buf);
+static int receive_chars(struct uart_max3110 *max,
+ unsigned short *str, int len);
+static int max3110_read_multi(struct uart_max3110 *max);
static void max3110_con_receive(struct uart_max3110 *max);
static int max3110_write_then_read(struct uart_max3110 *max,
@@ -108,7 +112,6 @@ static int max3110_out(struct uart_max3110 *max, const u16 out)
{
void *buf;
u16 *obuf, *ibuf;
- u8 ch;
int ret;
buf = kzalloc(8, GFP_KERNEL | GFP_DMA);
@@ -125,11 +128,7 @@ static int max3110_out(struct uart_max3110 *max, const u16 out)
goto exit;
}
- /* If some valid data is read back */
- if (*ibuf & MAX3110_READ_DATA_AVAILABLE) {
- ch = *ibuf & 0xff;
- receive_chars(max, &ch, 1);
- }
+ receive_chars(max, ibuf, 1);
exit:
kfree(buf);
@@ -142,12 +141,11 @@ exit:
*
* Return how many valide bytes are read back
*/
-static int max3110_read_multi(struct uart_max3110 *max, u8 *rxbuf)
+static int max3110_read_multi(struct uart_max3110 *max)
{
void *buf;
u16 *obuf, *ibuf;
- u8 *pbuf, valid_str[M3110_RX_FIFO_DEPTH];
- int i, j, blen;
+ int ret, blen;
blen = M3110_RX_FIFO_DEPTH * sizeof(u16);
buf = kzalloc(blen * 2, GFP_KERNEL | GFP_DMA);
@@ -165,19 +163,10 @@ static int max3110_read_multi(struct uart_max3110 *max, u8 *rxbuf)
return 0;
}
- /* If caller doesn't provide a buffer, then handle received char */
- pbuf = rxbuf ? rxbuf : valid_str;
-
- for (i = 0, j = 0; i < M3110_RX_FIFO_DEPTH; i++) {
- if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
- pbuf[j++] = ibuf[i] & 0xff;
- }
-
- if (j && (pbuf == valid_str))
- receive_chars(max, valid_str, j);
+ ret = receive_chars(max, ibuf, M3110_RX_FIFO_DEPTH);
kfree(buf);
- return j;
+ return ret;
}
static void serial_m3110_con_putchar(struct uart_port *port, int ch)
@@ -207,7 +196,7 @@ static void serial_m3110_con_write(struct console *co,
uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
if (!test_and_set_bit(CON_TX_NEEDED, &pmax->uart_flags))
- wake_up_process(pmax->main_thread);
+ wake_up(&pmax->wq);
}
static int __init
@@ -276,8 +265,7 @@ static void send_circ_buf(struct uart_max3110 *max,
{
void *buf;
u16 *obuf, *ibuf;
- u8 valid_str[WORDS_PER_XFER];
- int i, j, len, blen, dma_size, left, ret = 0;
+ int i, len, blen, dma_size, left, ret = 0;
dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
@@ -301,18 +289,13 @@ static void send_circ_buf(struct uart_max3110 *max,
}
/* Fail to send msg to console is not very critical */
+
ret = max3110_write_then_read(max, obuf, ibuf, blen, 0);
if (ret)
pr_warning(PR_FMT "%s(): get err msg %d\n",
__func__, ret);
- for (i = 0, j = 0; i < len; i++) {
- if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
- valid_str[j++] = ibuf[i] & 0xff;
- }
-
- if (j)
- receive_chars(max, valid_str, j);
+ receive_chars(max, ibuf, len);
max->port.icount.tx += len;
left -= len;
@@ -349,33 +332,54 @@ static void serial_m3110_start_tx(struct uart_port *port)
container_of(port, struct uart_max3110, port);
if (!test_and_set_bit(UART_TX_NEEDED, &max->uart_flags))
- wake_up_process(max->main_thread);
+ wake_up(&max->wq);
}
-static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
+static int
+receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
{
struct uart_port *port = &max->port;
struct tty_struct *tty;
- int usable;
+ char buf[M3110_RX_FIFO_DEPTH];
+ int r, w, usable;
/* If uart is not opened, just return */
if (!port->state)
- return;
+ return 0;
- tty = port->state->port.tty;
+ tty = tty_port_tty_get(&port->state->port);
if (!tty)
- return;
+ return 0;
+
+ for (r = 0, w = 0; r < len; r++) {
+ if (str[r] & MAX3110_BREAK &&
+ uart_handle_break(port))
+ continue;
+
+ if (str[r] & MAX3110_READ_DATA_AVAILABLE) {
+ if (uart_handle_sysrq_char(port, str[r] & 0xff))
+ continue;
+
+ buf[w++] = str[r] & 0xff;
+ }
+ }
+
+ if (!w) {
+ tty_kref_put(tty);
+ return 0;
+ }
- while (len) {
- usable = tty_buffer_request_room(tty, len);
+ for (r = 0; w; r += usable, w -= usable) {
+ usable = tty_buffer_request_room(tty, w);
if (usable) {
- tty_insert_flip_string(tty, str, usable);
- str += usable;
+ tty_insert_flip_string(tty, buf + r, usable);
port->icount.rx += usable;
}
- len -= usable;
}
tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+
+ return r;
}
/*
@@ -390,28 +394,15 @@ static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
*/
static void max3110_con_receive(struct uart_max3110 *max)
{
- int loop = 1, num, total = 0;
- u8 recv_buf[512], *pbuf;
+ int loop = 1, num;
- pbuf = recv_buf;
do {
- num = max3110_read_multi(max, pbuf);
+ num = max3110_read_multi(max);
if (num) {
loop = 5;
- pbuf += num;
- total += num;
-
- if (total >= 504) {
- receive_chars(max, recv_buf, total);
- pbuf = recv_buf;
- total = 0;
- }
}
} while (--loop);
-
- if (total)
- receive_chars(max, recv_buf, total);
}
static int max3110_main_thread(void *_max)
@@ -424,7 +415,8 @@ static int max3110_main_thread(void *_max)
pr_info(PR_FMT "start main thread\n");
do {
- wait_event_interruptible(*wq, max->uart_flags || kthread_should_stop());
+ wait_event_interruptible(*wq,
+ max->uart_flags || kthread_should_stop());
mutex_lock(&max->thread_mutex);
@@ -452,8 +444,9 @@ static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
/* max3110's irq is a falling edge, not level triggered,
* so no need to disable the irq */
+
if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
- wake_up_process(max->main_thread);
+ wake_up(&max->wq);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
index c37ea48c825..35af0739513 100644
--- a/drivers/tty/serial/mrst_max3110.h
+++ b/drivers/tty/serial/mrst_max3110.h
@@ -7,6 +7,7 @@
/* status bits for all 4 MAX3110 operate modes */
#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
+#define MAX3110_BREAK (1 << 10)
#define WC_TAG (3 << 14)
#define RC_TAG (1 << 14)
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e6ba8387650..29cbfd8c4e7 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -804,8 +804,6 @@ static int __init msm_console_setup(struct console *co, char *options)
if (unlikely(!port->membase))
return -ENXIO;
- port->cons = co;
-
msm_init_clock(port);
if (options)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 624701f8138..60c6eb85026 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -30,6 +30,8 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 9711e06a837..06f6aefd5ba 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -21,6 +21,8 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/console.h>
#include <linux/delay.h> /* for udelay */
#include <linux/device.h>
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index de173671e3d..9beaff1cec2 100644
--- a/drivers/tty/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
@@ -15,6 +15,7 @@
#include <linux/serial_reg.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index b46218d679e..21febef926a 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -20,6 +20,8 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dmi.h>
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 531931c1b25..5c8e3bba6c8 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -100,6 +100,16 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
int max_count = 256;
do {
+ /* work around Errata #20 according to
+ * Intel(R) PXA27x Processor Family
+ * Specification Update (May 2005)
+ *
+ * Step 2
+ * Disable the Reciever Time Out Interrupt via IER[RTOEI]
+ */
+ up->ier &= ~UART_IER_RTOIE;
+ serial_out(up, UART_IER, up->ier);
+
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -156,6 +166,16 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
tty_flip_buffer_push(tty);
+
+ /* work around Errata #20 according to
+ * Intel(R) PXA27x Processor Family
+ * Specification Update (May 2005)
+ *
+ * Step 6:
+ * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE]
+ */
+ up->ier |= UART_IER_RTOIE;
+ serial_out(up, UART_IER, up->ier);
}
static void transmit_chars(struct uart_pxa_port *up)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 6edafb5ace1..b31f1c3a2c4 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -83,6 +83,16 @@ static int s3c24xx_serial_txempty_nofifo(struct uart_port *port)
return (rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE);
}
+/*
+ * s3c64xx and later SoC's include the interrupt mask and status registers in
+ * the controller itself, unlike the s3c24xx SoC's which have these registers
+ * in the interrupt controller. Check if the port type is s3c64xx or higher.
+ */
+static int s3c24xx_serial_has_interrupt_mask(struct uart_port *port)
+{
+ return to_ourport(port)->info->type == PORT_S3C6400;
+}
+
static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
unsigned long flags;
@@ -126,7 +136,11 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (tx_enabled(port)) {
- disable_irq_nosync(ourport->tx_irq);
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __set_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ disable_irq_nosync(ourport->tx_irq);
tx_enabled(port) = 0;
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_enable(port);
@@ -141,19 +155,26 @@ static void s3c24xx_serial_start_tx(struct uart_port *port)
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
- enable_irq(ourport->tx_irq);
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __clear_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ enable_irq(ourport->tx_irq);
tx_enabled(port) = 1;
}
}
-
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (rx_enabled(port)) {
dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
- disable_irq_nosync(ourport->rx_irq);
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __set_bit(S3C64XX_UINTM_RXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ disable_irq_nosync(ourport->rx_irq);
rx_enabled(port) = 0;
}
}
@@ -320,6 +341,28 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
return IRQ_HANDLED;
}
+/* interrupt handler for s3c64xx and later SoC's.*/
+static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
+{
+ struct s3c24xx_uart_port *ourport = id;
+ struct uart_port *port = &ourport->port;
+ unsigned int pend = rd_regl(port, S3C64XX_UINTP);
+ unsigned long flags;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (pend & S3C64XX_UINTM_RXD_MSK) {
+ ret = s3c24xx_serial_rx_chars(irq, id);
+ wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK);
+ }
+ if (pend & S3C64XX_UINTM_TXD_MSK) {
+ ret = s3c24xx_serial_tx_chars(irq, id);
+ wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
@@ -377,18 +420,25 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (ourport->tx_claimed) {
- free_irq(ourport->tx_irq, ourport);
+ if (!s3c24xx_serial_has_interrupt_mask(port))
+ free_irq(ourport->tx_irq, ourport);
tx_enabled(port) = 0;
ourport->tx_claimed = 0;
}
if (ourport->rx_claimed) {
- free_irq(ourport->rx_irq, ourport);
+ if (!s3c24xx_serial_has_interrupt_mask(port))
+ free_irq(ourport->rx_irq, ourport);
ourport->rx_claimed = 0;
rx_enabled(port) = 0;
}
-}
+ /* Clear pending interrupts and mask all interrupts */
+ if (s3c24xx_serial_has_interrupt_mask(port)) {
+ wr_regl(port, S3C64XX_UINTP, 0xf);
+ wr_regl(port, S3C64XX_UINTM, 0xf);
+ }
+}
static int s3c24xx_serial_startup(struct uart_port *port)
{
@@ -436,6 +486,33 @@ static int s3c24xx_serial_startup(struct uart_port *port)
return ret;
}
+static int s3c64xx_serial_startup(struct uart_port *port)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+ int ret;
+
+ dbg("s3c64xx_serial_startup: port=%p (%08lx,%p)\n",
+ port->mapbase, port->membase);
+
+ ret = request_irq(port->irq, s3c64xx_serial_handle_irq, IRQF_SHARED,
+ s3c24xx_serial_portname(port), ourport);
+ if (ret) {
+ printk(KERN_ERR "cannot get irq %d\n", port->irq);
+ return ret;
+ }
+
+ /* For compatibility with s3c24xx Soc's */
+ rx_enabled(port) = 1;
+ ourport->rx_claimed = 1;
+ tx_enabled(port) = 0;
+ ourport->tx_claimed = 1;
+
+ /* Enable Rx Interrupt */
+ __clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM));
+ dbg("s3c64xx_serial_startup ok\n");
+ return ret;
+}
+
/* power power management control */
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
@@ -879,7 +956,6 @@ static struct uart_ops s3c24xx_serial_ops = {
.verify_port = s3c24xx_serial_verify_port,
};
-
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
@@ -895,7 +971,6 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
.iotype = UPIO_MEM,
- .irq = IRQ_S3CUART_RX0,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
@@ -907,7 +982,6 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock),
.iotype = UPIO_MEM,
- .irq = IRQ_S3CUART_RX1,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
@@ -921,7 +995,6 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock),
.iotype = UPIO_MEM,
- .irq = IRQ_S3CUART_RX2,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
@@ -935,7 +1008,6 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[3].port.lock),
.iotype = UPIO_MEM,
- .irq = IRQ_S3CUART_RX3,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
@@ -1077,6 +1149,10 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
port->dev = &platdev->dev;
ourport->info = info;
+ /* Startup sequence is different for s3c64xx and higher SoC's */
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
+
/* copy the info in from provided structure */
ourport->port.fifosize = info->fifosize;
@@ -1116,6 +1192,13 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->clk = clk_get(&platdev->dev, "uart");
+ /* Keep all interrupts masked and cleared */
+ if (s3c24xx_serial_has_interrupt_mask(port)) {
+ wr_regl(port, S3C64XX_UINTM, 0xf);
+ wr_regl(port, S3C64XX_UINTP, 0xf);
+ wr_regl(port, S3C64XX_UINTSP, 0xf);
+ }
+
dbg("port: map=%08x, mem=%08x, irq=%d (%d,%d), clock=%ld\n",
port->mapbase, port->membase, port->irq,
ourport->rx_irq, ourport->tx_irq, port->uartclk);
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index a69d9a54be9..8e87b788e5c 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -61,6 +61,7 @@ struct s3c24xx_uart_port {
/* register access controls */
#define portaddr(port, reg) ((port)->membase + (reg))
+#define portaddrl(port, reg) ((unsigned long *)((port)->membase + (reg)))
#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 6bc2e3f876f..0be8a2f00d0 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -37,6 +37,7 @@
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/types.h>
#include <linux/atomic.h>
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index a3efbea5dbb..0406d7ff505 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -57,7 +57,7 @@ static struct lock_class_key port_lock_key;
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
-static void __uart_wait_until_sent(struct uart_port *port, int timeout);
+static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state, int pm_state);
/*
@@ -72,7 +72,7 @@ void uart_write_wakeup(struct uart_port *port)
* closed. No cookie for you.
*/
BUG_ON(!state);
- tasklet_schedule(&state->tlet);
+ tty_wakeup(state->port.tty);
}
static void uart_stop(struct tty_struct *tty)
@@ -107,12 +107,6 @@ static void uart_start(struct tty_struct *tty)
spin_unlock_irqrestore(&port->lock, flags);
}
-static void uart_tasklet_action(unsigned long data)
-{
- struct uart_state *state = (struct uart_state *)data;
- tty_wakeup(state->port.tty);
-}
-
static inline void
uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
{
@@ -255,9 +249,11 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
}
/*
- * kill off our tasklet
+ * It's possible for shutdown to be called after suspend if we get
+ * a DCD drop (hangup) at just the right time. Clear suspended bit so
+ * we don't try to resume a port that has been shutdown.
*/
- tasklet_kill(&state->tlet);
+ clear_bit(ASYNCB_SUSPENDED, &port->flags);
/*
* Free the transmit buffer page.
@@ -1261,8 +1257,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
struct uart_port *uport;
unsigned long flags;
- BUG_ON(!tty_locked());
-
if (!state)
return;
@@ -1271,12 +1265,11 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
pr_debug("uart_close(%d) called\n", uport->line);
- mutex_lock(&port->mutex);
spin_lock_irqsave(&port->lock, flags);
if (tty_hung_up_p(filp)) {
spin_unlock_irqrestore(&port->lock, flags);
- goto done;
+ return;
}
if ((tty->count == 1) && (port->count != 1)) {
@@ -1298,7 +1291,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
}
if (port->count) {
spin_unlock_irqrestore(&port->lock, flags);
- goto done;
+ return;
}
/*
@@ -1306,19 +1299,13 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* the line discipline to only process XON/XOFF characters by
* setting tty->closing.
*/
+ set_bit(ASYNCB_CLOSING, &port->flags);
tty->closing = 1;
spin_unlock_irqrestore(&port->lock, flags);
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
- /*
- * hack: open-coded tty_wait_until_sent to avoid
- * recursive tty_lock
- */
- long timeout = msecs_to_jiffies(port->closing_wait);
- if (wait_event_interruptible_timeout(tty->write_wait,
- !tty_chars_in_buffer(tty), timeout) >= 0)
- __uart_wait_until_sent(uport, timeout);
- }
+ if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
+ tty_wait_until_sent_from_close(tty,
+ msecs_to_jiffies(port->closing_wait));
/*
* At this point, we stop accepting input. To do this, we
@@ -1334,9 +1321,10 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
- __uart_wait_until_sent(uport, uport->timeout);
+ uart_wait_until_sent(tty, uport->timeout);
}
+ mutex_lock(&port->mutex);
uart_shutdown(tty, state);
uart_flush_buffer(tty);
@@ -1361,15 +1349,18 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* Wake up anyone trying to open this port.
*/
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
+ clear_bit(ASYNCB_CLOSING, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
+ wake_up_interruptible(&port->close_wait);
-done:
mutex_unlock(&port->mutex);
}
-static void __uart_wait_until_sent(struct uart_port *port, int timeout)
+static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
unsigned long char_time, expire;
if (port->type == PORT_UNKNOWN || port->fifosize == 0)
@@ -1421,16 +1412,6 @@ static void __uart_wait_until_sent(struct uart_port *port, int timeout)
}
}
-static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
-
- tty_lock();
- __uart_wait_until_sent(port, timeout);
- tty_unlock();
-}
-
/*
* This is called with the BKL held in
* linux/drivers/char/tty_io.c:do_tty_hangup()
@@ -1443,7 +1424,6 @@ static void uart_hangup(struct tty_struct *tty)
struct tty_port *port = &state->port;
unsigned long flags;
- BUG_ON(!tty_locked());
pr_debug("uart_hangup(%d)\n", state->uart_port->line);
mutex_lock(&port->mutex);
@@ -1530,7 +1510,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
struct tty_port *port;
int retval, line = tty->index;
- BUG_ON(!tty_locked());
pr_debug("uart_open(%d) called\n", line);
/*
@@ -2008,6 +1987,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (port->tty && port->tty->termios && termios.c_cflag == 0)
termios = *(port->tty->termios);
+ if (console_suspend_enabled)
+ uart_change_pm(state, 0);
uport->ops->set_termios(uport, &termios, NULL);
if (console_suspend_enabled)
console_start(uport->cons);
@@ -2068,8 +2049,6 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
case UPIO_MEM32:
case UPIO_AU:
case UPIO_TSI:
- case UPIO_DWAPB:
- case UPIO_DWAPB32:
snprintf(address, sizeof(address),
"MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
@@ -2298,8 +2277,6 @@ int uart_register_driver(struct uart_driver *drv)
port->ops = &uart_port_ops;
port->close_delay = 500; /* .5 seconds */
port->closing_wait = 30000; /* 30 seconds */
- tasklet_init(&state->tlet, uart_tasklet_action,
- (unsigned long)state);
}
retval = tty_register_driver(normal);
@@ -2460,11 +2437,6 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
*/
uport->type = PORT_UNKNOWN;
- /*
- * Kill the tasklet, and free resources.
- */
- tasklet_kill(&state->tlet);
-
state->uart_port = NULL;
mutex_unlock(&port_mutex);
@@ -2489,8 +2461,6 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
case UPIO_MEM32:
case UPIO_AU:
case UPIO_TSI:
- case UPIO_DWAPB:
- case UPIO_DWAPB32:
return (port1->mapbase == port2->mapbase);
}
return 0;
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index 2430319f2f5..7c13639c597 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
@@ -336,19 +337,19 @@ static int ks8695uart_startup(struct uart_port *port)
/*
* Allocate the IRQ
*/
- retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, IRQF_DISABLED, "UART TX", port);
+ retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, 0, "UART TX", port);
if (retval)
goto err_tx;
- retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, IRQF_DISABLED, "UART RX", port);
+ retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, 0, "UART RX", port);
if (retval)
goto err_rx;
- retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, IRQF_DISABLED, "UART LineStatus", port);
+ retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, 0, "UART LineStatus", port);
if (retval)
goto err_ls;
- retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, IRQF_DISABLED, "UART ModemStatus", port);
+ retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, 0, "UART ModemStatus", port);
if (retval)
goto err_ms;
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 8e3fc1944e6..34bd345da77 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -28,6 +28,8 @@
#include <linux/pci.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <asm/io.h>
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 5ea6ec3442e..9871c57b348 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1976,7 +1976,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
* For the muxed case there's nothing more to do.
*/
port->irq = p->irqs[SCIx_RXI_IRQ];
- port->irqflags = IRQF_DISABLED;
+ port->irqflags = 0;
port->serial_in = sci_serial_in;
port->serial_out = sci_serial_out;
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 377ae74e715..238c7df73ef 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -39,6 +39,7 @@
#include <linux/interrupt.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/module.h>
@@ -737,7 +738,7 @@ static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
DPRINTF("sn_console: switching to interrupt driven console\n");
if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
- IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SHARED,
"SAL console driver", port) >= 0) {
spin_lock_irqsave(&port->sc_port.lock, flags);
port->sc_port.irq = SGI_UART_VECTOR;
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 1f36b7eb735..a4b63bfeaa2 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -23,6 +23,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 8af1ed83a4c..b908615ccaa 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -15,6 +15,7 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 9af9f0879a2..cea8918b823 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -20,8 +20,10 @@
#include <linux/module.h>
#include <linux/serial.h>
-#include <linux/slab.h>
#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 19cc1e8149d..8c03b127fd0 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -12,9 +12,11 @@
*/
#include <linux/platform_device.h>
+#include <linux/serial.h>
#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/console.h>
-#include <linux/serial.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 0aebd7121b5..b7455b52608 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -63,6 +63,7 @@
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/types.h>
#include <linux/atomic.h>
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 272e417a9b0..e67fb20490d 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -2124,7 +2124,6 @@ static int mgsl_write(struct tty_struct * tty,
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
/* operating in synchronous (frame oriented) mode */
- /* operating in synchronous (frame oriented) mode */
if (info->tx_active) {
if ( info->params.mode == MGSL_MODE_HDLC ) {
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index c77831c7675..0f6b796c95c 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -4950,7 +4950,7 @@ CheckAgain:
if ( debug_level >= DEBUG_LEVEL_DATA )
trace_block(info,info->rx_buf_list_ex[StartIndex].virt_addr,
- min_t(int, framesize,SCABUFSIZE),0);
+ min_t(unsigned int, framesize, SCABUFSIZE), 0);
if (framesize) {
if (framesize > info->max_frame_size)
@@ -5015,14 +5015,14 @@ static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int co
SCADESC_EX *desc_ex;
if ( debug_level >= DEBUG_LEVEL_DATA )
- trace_block(info,buf, min_t(int, count,SCABUFSIZE), 1);
+ trace_block(info, buf, min_t(unsigned int, count, SCABUFSIZE), 1);
/* Copy source buffer to one or more DMA buffers, starting with
* the first transmit dma buffer.
*/
for(i=0;;)
{
- copy_count = min_t(unsigned short,count,SCABUFSIZE);
+ copy_count = min_t(unsigned int, count, SCABUFSIZE);
desc = &info->tx_buf_list[i];
desc_ex = &info->tx_buf_list_ex[i];
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4f1fc81112e..05085beb83d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -194,8 +194,7 @@ static inline struct tty_struct *file_tty(struct file *file)
return ((struct tty_file_private *)file->private_data)->tty;
}
-/* Associate a new file with the tty structure */
-int tty_add_file(struct tty_struct *tty, struct file *file)
+int tty_alloc_file(struct file *file)
{
struct tty_file_private *priv;
@@ -203,15 +202,36 @@ int tty_add_file(struct tty_struct *tty, struct file *file)
if (!priv)
return -ENOMEM;
+ file->private_data = priv;
+
+ return 0;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
priv->tty = tty;
priv->file = file;
- file->private_data = priv;
spin_lock(&tty_files_lock);
list_add(&priv->list, &tty->tty_files);
spin_unlock(&tty_files_lock);
+}
- return 0;
+/**
+ * tty_free_file - free file->private_data
+ *
+ * This shall be used only for fail path handling when tty_add_file was not
+ * called yet.
+ */
+void tty_free_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ file->private_data = NULL;
+ kfree(priv);
}
/* Delete file from its tty */
@@ -222,8 +242,7 @@ void tty_del_file(struct file *file)
spin_lock(&tty_files_lock);
list_del(&priv->list);
spin_unlock(&tty_files_lock);
- file->private_data = NULL;
- kfree(priv);
+ tty_free_file(file);
}
@@ -1811,6 +1830,10 @@ static int tty_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
retry_open:
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return -ENOMEM;
+
noctty = filp->f_flags & O_NOCTTY;
index = -1;
retval = 0;
@@ -1823,6 +1846,7 @@ retry_open:
if (!tty) {
tty_unlock();
mutex_unlock(&tty_mutex);
+ tty_free_file(filp);
return -ENXIO;
}
driver = tty_driver_kref_get(tty->driver);
@@ -1855,6 +1879,7 @@ retry_open:
}
tty_unlock();
mutex_unlock(&tty_mutex);
+ tty_free_file(filp);
return -ENODEV;
}
@@ -1862,6 +1887,7 @@ retry_open:
if (!driver) {
tty_unlock();
mutex_unlock(&tty_mutex);
+ tty_free_file(filp);
return -ENODEV;
}
got_driver:
@@ -1872,6 +1898,8 @@ got_driver:
if (IS_ERR(tty)) {
tty_unlock();
mutex_unlock(&tty_mutex);
+ tty_driver_kref_put(driver);
+ tty_free_file(filp);
return PTR_ERR(tty);
}
}
@@ -1887,15 +1915,11 @@ got_driver:
tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
tty_unlock();
+ tty_free_file(filp);
return PTR_ERR(tty);
}
- retval = tty_add_file(tty, filp);
- if (retval) {
- tty_unlock();
- tty_release(inode, filp);
- return retval;
- }
+ tty_add_file(tty, filp);
check_tty_count(tty, "tty_open");
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -2716,6 +2740,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
ld = tty_ldisc_ref_wait(tty);
if (ld->ops->compat_ioctl)
retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+ else
+ retval = n_tty_compat_ioctl_helper(tty, file, cmd, arg);
tty_ldisc_deref(ld);
return retval;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 53f2442c609..9314d93c1a2 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
+#include <linux/compat.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -1179,3 +1180,19 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
}
}
EXPORT_SYMBOL(n_tty_ioctl_helper);
+
+#ifdef CONFIG_COMPAT
+long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case TIOCGLCKTRMIOS:
+ case TIOCSLCKTRMIOS:
+ return tty_mode_ioctl(tty, file, cmd, (unsigned long) compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+EXPORT_SYMBOL(n_tty_compat_ioctl_helper);
+#endif
+
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index ef925d58171..512c49f98e8 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -450,7 +450,6 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
if (ld->ops->open) {
int ret;
/* BTM here locks versus a hangup event */
- WARN_ON(!tty_locked());
ret = ld->ops->open(tty);
if (ret)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 3b2bb771944..9ff986c32a2 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -15,30 +15,18 @@
* Don't use in new code.
*/
static DEFINE_MUTEX(big_tty_mutex);
-struct task_struct *__big_tty_mutex_owner;
-EXPORT_SYMBOL_GPL(__big_tty_mutex_owner);
/*
* Getting the big tty mutex.
*/
void __lockfunc tty_lock(void)
{
- struct task_struct *task = current;
-
- WARN_ON(__big_tty_mutex_owner == task);
-
mutex_lock(&big_tty_mutex);
- __big_tty_mutex_owner = task;
}
EXPORT_SYMBOL(tty_lock);
void __lockfunc tty_unlock(void)
{
- struct task_struct *task = current;
-
- WARN_ON(__big_tty_mutex_owner != task);
- __big_tty_mutex_owner = NULL;
-
mutex_unlock(&big_tty_mutex);
}
EXPORT_SYMBOL(tty_unlock);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 33d37d230f8..ef9dd628ba0 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -350,7 +350,7 @@ int tty_port_close_start(struct tty_port *port,
tty_driver_flush_buffer(tty);
if (test_bit(ASYNCB_INITIALIZED, &port->flags) &&
port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, port->closing_wait);
+ tty_wait_until_sent_from_close(tty, port->closing_wait);
if (port->drain_delay) {
unsigned int bps = tty_get_baud_rate(tty);
long timeout;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 3761ccf0f34..a605549ee28 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -33,7 +33,6 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/irq.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
@@ -43,6 +42,8 @@
#include <linux/notifier.h>
#include <linux/jiffies.h>
+#include <asm/irq_regs.h>
+
extern void ctrl_alt_del(void);
/*
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index fb864e7fcd1..7a0a12ae545 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -301,6 +301,8 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
/* Insert the contents of the selection buffer into the
* queue of the tty associated with the current console.
* Invoked by ioctl().
+ *
+ * Locking: always called with BTM from vt_ioctl
*/
int paste_selection(struct tty_struct *tty)
{
@@ -310,8 +312,6 @@ int paste_selection(struct tty_struct *tty)
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
- /* always called with BTM from vt_ioctl */
- WARN_ON(!tty_locked());
console_lock();
poke_blanked_console();
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index b3915b7ad3e..e716839fab6 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
static void notify_write(struct vc_data *vc, unsigned int unicode)
{
- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
}
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index d2efe823c20..a783d533a1a 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -69,7 +69,7 @@ static ssize_t map_name_show(struct uio_mem *mem, char *buf)
static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
{
- return sprintf(buf, "0x%lx\n", mem->addr);
+ return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr);
}
static ssize_t map_size_show(struct uio_mem *mem, char *buf)
@@ -79,7 +79,7 @@ static ssize_t map_size_show(struct uio_mem *mem, char *buf)
static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
{
- return sprintf(buf, "0x%lx\n", mem->addr & ~PAGE_MASK);
+ return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK);
}
struct map_sysfs_entry {
@@ -634,8 +634,7 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
page = virt_to_page(idev->info->mem[mi].addr + offset);
else
- page = vmalloc_to_page((void *)idev->info->mem[mi].addr
- + offset);
+ page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset);
get_page(page);
vmf->page = page;
return 0;
@@ -750,14 +749,13 @@ static int uio_major_init(void)
uio_major = MAJOR(uio_dev);
uio_cdev = cdev;
- result = 0;
-out:
- return result;
+ return 0;
out_put:
kobject_put(&cdev->kobj);
out_unregister:
unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES);
- goto out;
+out:
+ return result;
}
static void uio_major_cleanup(void)
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index fc22e1e6f21..02bd47bdee1 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uio_driver.h>
-#include <linux/spinlock.h>
#define DRIVER_VERSION "0.01.0"
#define DRIVER_AUTHOR "Michael S. Tsirkin <mst@redhat.com>"
@@ -33,7 +32,6 @@
struct uio_pci_generic_dev {
struct uio_info info;
struct pci_dev *pdev;
- spinlock_t lock; /* guards command register accesses */
};
static inline struct uio_pci_generic_dev *
@@ -57,7 +55,6 @@ static irqreturn_t irqhandler(int irq, struct uio_info *info)
BUILD_BUG_ON(PCI_COMMAND % 4);
BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
- spin_lock_irq(&gdev->lock);
pci_block_user_cfg_access(pdev);
/* Read both command and status registers in a single 32-bit operation.
@@ -83,7 +80,6 @@ static irqreturn_t irqhandler(int irq, struct uio_info *info)
done:
pci_unblock_user_cfg_access(pdev);
- spin_unlock_irq(&gdev->lock);
return ret;
}
@@ -158,7 +154,6 @@ static int __devinit probe(struct pci_dev *pdev,
gdev->info.irq_flags = IRQF_SHARED;
gdev->info.handler = irqhandler;
gdev->pdev = pdev;
- spin_lock_init(&gdev->lock);
if (uio_register_device(&pdev->dev, &gdev->info))
goto err_register;
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index bae96d24676..0b2ed71e3bf 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -253,7 +253,7 @@ static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
};
#ifdef CONFIG_OF
-static const struct of_device_id __devinitconst uio_of_genirq_match[] = {
+static const struct of_device_id uio_of_genirq_match[] = {
{ /* empty for now */ },
};
MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 48f1781352f..4ac2750491d 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -12,6 +12,11 @@ menuconfig USB_SUPPORT
if USB_SUPPORT
+config USB_COMMON
+ tristate
+ default y
+ depends on USB || USB_GADGET
+
# Host-side USB depends on having a host controller
# NOTE: dummy_hcd is always an option, but it's ignored here ...
# NOTE: SL-811 option should be board-specific ...
@@ -19,6 +24,7 @@ config USB_ARCH_HAS_HCD
boolean
default y if USB_ARCH_HAS_OHCI
default y if USB_ARCH_HAS_EHCI
+ default y if USB_ARCH_HAS_XHCI
default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
default y if BLACKFIN # SL-811
@@ -54,7 +60,7 @@ config USB_ARCH_HAS_OHCI
# some non-PCI hcds implement EHCI
config USB_ARCH_HAS_EHCI
boolean
- default y if PPC_83xx
+ default y if FSL_SOC
default y if PPC_MPC512x
default y if SOC_AU1200
default y if ARCH_IXP4XX
@@ -69,6 +75,12 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_MSM
default y if MICROBLAZE
default y if SPARC_LEON
+ default y if ARCH_MMP
+ default PCI
+
+# some non-PCI HCDs implement xHCI
+config USB_ARCH_HAS_XHCI
+ boolean
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
@@ -110,6 +122,8 @@ config USB
source "drivers/usb/core/Kconfig"
+source "drivers/usb/dwc3/Kconfig"
+
source "drivers/usb/mon/Kconfig"
source "drivers/usb/wusbcore/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 30ddf8dc4f7..75eca764522 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_USB) += core/
+obj-$(CONFIG_USB_DWC3) += dwc3/
+
obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_PCI) += host/
@@ -51,3 +53,5 @@ obj-$(CONFIG_USB_MUSB_HDRC) += musb/
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs/
obj-$(CONFIG_USB_OTG_UTILS) += otg/
obj-$(CONFIG_USB_GADGET) += gadget/
+
+obj-$(CONFIG_USB_COMMON) += usb-common.o
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index dac7676ce21..6960715c506 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1058,11 +1058,11 @@ made_compressed_probe:
goto alloc_fail;
}
- ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
- readsize = le16_to_cpu(epread->wMaxPacketSize) *
+ ctrlsize = usb_endpoint_maxp(epctrl);
+ readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
acm->combined_interfaces = combined_interfaces;
- acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
+ acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
acm->minor = minor;
@@ -1305,7 +1305,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
struct acm *acm = usb_get_intfdata(intf);
int cnt;
- if (message.event & PM_EVENT_AUTO) {
+ if (PMSG_IS_AUTO(message)) {
int b;
spin_lock_irq(&acm->write_lock);
@@ -1534,6 +1534,9 @@ static const struct usb_device_id acm_ids[] = {
{ NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
+ /* Support for Owen devices */
+ { USB_DEVICE(0x03eb, 0x0030), }, /* Owen SI30 */
+
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
/* Support Lego NXT using pbLua firmware */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 2b9ff518b50..efe684908c1 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -682,7 +682,7 @@ next_desc:
if (!ep || !usb_endpoint_is_int_in(ep))
goto err;
- desc->wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize);
+ desc->wMaxPacketSize = usb_endpoint_maxp(ep);
desc->bMaxPacketSize0 = udev->descriptor.bMaxPacketSize0;
desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
@@ -798,11 +798,11 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
/* if this is an autosuspend the caller does the locking */
- if (!(message.event & PM_EVENT_AUTO))
+ if (!PMSG_IS_AUTO(message))
mutex_lock(&desc->lock);
spin_lock_irq(&desc->iuspin);
- if ((message.event & PM_EVENT_AUTO) &&
+ if (PMSG_IS_AUTO(message) &&
(test_bit(WDM_IN_USE, &desc->flags)
|| test_bit(WDM_RESPONDING, &desc->flags))) {
spin_unlock_irq(&desc->iuspin);
@@ -815,7 +815,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
}
- if (!(message.event & PM_EVENT_AUTO))
+ if (!PMSG_IS_AUTO(message))
mutex_unlock(&desc->lock);
return rv;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 3f94ac34dce..12cf5e7395a 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -186,8 +186,7 @@ static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
for (n = 0; n < current_setting->desc.bNumEndpoints; n++)
if (current_setting->endpoint[n].desc.bEndpointAddress ==
data->bulk_in)
- max_size = le16_to_cpu(current_setting->endpoint[n].
- desc.wMaxPacketSize);
+ max_size = usb_endpoint_maxp(&current_setting->endpoint[n].desc);
if (max_size == 0) {
dev_err(dev, "Couldn't get wMaxPacketSize\n");
@@ -636,7 +635,7 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
for (n = 0; n < current_setting->desc.bNumEndpoints; n++) {
desc = &current_setting->endpoint[n].desc;
if (desc->bEndpointAddress == data->bulk_in)
- max_size = le16_to_cpu(desc->wMaxPacketSize);
+ max_size = usb_endpoint_maxp(desc);
}
if (max_size == 0) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 26678cadfb2..f4bdd0ce8d5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -124,9 +124,9 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
if (usb_endpoint_xfer_isoc(&ep->desc))
max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
- le16_to_cpu(ep->desc.wMaxPacketSize);
+ usb_endpoint_maxp(&ep->desc);
else if (usb_endpoint_xfer_int(&ep->desc))
- max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) *
+ max_tx = usb_endpoint_maxp(&ep->desc) *
(desc->bMaxBurst + 1);
else
max_tx = 999999;
@@ -241,7 +241,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
cfgno, inum, asnum, d->bEndpointAddress);
endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
endpoint->desc.bInterval = 1;
- if (le16_to_cpu(endpoint->desc.wMaxPacketSize) > 8)
+ if (usb_endpoint_maxp(&endpoint->desc) > 8)
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
@@ -254,7 +254,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
&& usb_endpoint_xfer_bulk(d)) {
unsigned maxp;
- maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize) & 0x07ff;
+ maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
@@ -755,3 +755,106 @@ err2:
dev_err(ddev, "out of memory\n");
return result;
}
+
+void usb_release_bos_descriptor(struct usb_device *dev)
+{
+ if (dev->bos) {
+ kfree(dev->bos->desc);
+ kfree(dev->bos);
+ dev->bos = NULL;
+ }
+}
+
+/* Get BOS descriptor set */
+int usb_get_bos_descriptor(struct usb_device *dev)
+{
+ struct device *ddev = &dev->dev;
+ struct usb_bos_descriptor *bos;
+ struct usb_dev_cap_header *cap;
+ unsigned char *buffer;
+ int length, total_len, num, i;
+ int ret;
+
+ bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
+ if (!bos)
+ return -ENOMEM;
+
+ /* Get BOS descriptor */
+ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
+ if (ret < USB_DT_BOS_SIZE) {
+ dev_err(ddev, "unable to get BOS descriptor\n");
+ if (ret >= 0)
+ ret = -ENOMSG;
+ kfree(bos);
+ return ret;
+ }
+
+ length = bos->bLength;
+ total_len = le16_to_cpu(bos->wTotalLength);
+ num = bos->bNumDeviceCaps;
+ kfree(bos);
+ if (total_len < length)
+ return -EINVAL;
+
+ dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL);
+ if (!dev->bos)
+ return -ENOMEM;
+
+ /* Now let's get the whole BOS descriptor set */
+ buffer = kzalloc(total_len, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ dev->bos->desc = (struct usb_bos_descriptor *)buffer;
+
+ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
+ if (ret < total_len) {
+ dev_err(ddev, "unable to get BOS descriptor set\n");
+ if (ret >= 0)
+ ret = -ENOMSG;
+ goto err;
+ }
+ total_len -= length;
+
+ for (i = 0; i < num; i++) {
+ buffer += length;
+ cap = (struct usb_dev_cap_header *)buffer;
+ length = cap->bLength;
+
+ if (total_len < length)
+ break;
+ total_len -= length;
+
+ if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+ dev_warn(ddev, "descriptor type invalid, skip\n");
+ continue;
+ }
+
+ switch (cap->bDevCapabilityType) {
+ case USB_CAP_TYPE_WIRELESS_USB:
+ /* Wireless USB cap descriptor is handled by wusb */
+ break;
+ case USB_CAP_TYPE_EXT:
+ dev->bos->ext_cap =
+ (struct usb_ext_cap_descriptor *)buffer;
+ break;
+ case USB_SS_CAP_TYPE:
+ dev->bos->ss_cap =
+ (struct usb_ss_cap_descriptor *)buffer;
+ break;
+ case CONTAINER_ID_TYPE:
+ dev->bos->ss_id =
+ (struct usb_ss_container_id_descriptor *)buffer;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+
+err:
+ usb_release_bos_descriptor(dev);
+ return ret;
+}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 0149c0976e9..d9569658476 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -190,7 +190,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
dir = usb_endpoint_dir_in(desc) ? 'I' : 'O';
if (speed == USB_SPEED_HIGH) {
- switch (le16_to_cpu(desc->wMaxPacketSize) & (0x03 << 11)) {
+ switch (usb_endpoint_maxp(desc) & (0x03 << 11)) {
case 1 << 11:
bandwidth = 2; break;
case 2 << 11:
@@ -240,7 +240,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
start += sprintf(start, format_endpt, desc->bEndpointAddress, dir,
desc->bmAttributes, type,
- (le16_to_cpu(desc->wMaxPacketSize) & 0x07ff) *
+ (usb_endpoint_maxp(desc) & 0x07ff) *
bandwidth,
interval, unit);
return start;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 37518dfdeb9..e3beaf229ee 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -46,6 +46,7 @@
#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/security.h>
+#include <linux/user_namespace.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include <linux/moduleparam.h>
@@ -68,7 +69,7 @@ struct dev_state {
wait_queue_head_t wait; /* wake up if a request completed */
unsigned int discsignr;
struct pid *disc_pid;
- uid_t disc_uid, disc_euid;
+ const struct cred *cred;
void __user *disccontext;
unsigned long ifclaimed;
u32 secid;
@@ -79,7 +80,7 @@ struct async {
struct list_head asynclist;
struct dev_state *ps;
struct pid *pid;
- uid_t uid, euid;
+ const struct cred *cred;
unsigned int signr;
unsigned int ifnum;
void __user *userbuffer;
@@ -248,6 +249,7 @@ static struct async *alloc_async(unsigned int numisoframes)
static void free_async(struct async *as)
{
put_pid(as->pid);
+ put_cred(as->cred);
kfree(as->urb->transfer_buffer);
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
@@ -393,9 +395,8 @@ static void async_completed(struct urb *urb)
struct dev_state *ps = as->ps;
struct siginfo sinfo;
struct pid *pid = NULL;
- uid_t uid = 0;
- uid_t euid = 0;
u32 secid = 0;
+ const struct cred *cred = NULL;
int signr;
spin_lock(&ps->lock);
@@ -407,9 +408,8 @@ static void async_completed(struct urb *urb)
sinfo.si_errno = as->status;
sinfo.si_code = SI_ASYNCIO;
sinfo.si_addr = as->userurb;
- pid = as->pid;
- uid = as->uid;
- euid = as->euid;
+ pid = get_pid(as->pid);
+ cred = get_cred(as->cred);
secid = as->secid;
}
snoop(&urb->dev->dev, "urb complete\n");
@@ -422,9 +422,11 @@ static void async_completed(struct urb *urb)
cancel_bulk_urbs(ps, as->bulk_addr);
spin_unlock(&ps->lock);
- if (signr)
- kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid,
- euid, secid);
+ if (signr) {
+ kill_pid_info_as_cred(sinfo.si_signo, &sinfo, pid, cred, secid);
+ put_pid(pid);
+ put_cred(cred);
+ }
wake_up(&ps->wait);
}
@@ -607,9 +609,10 @@ static int findintfep(struct usb_device *dev, unsigned int ep)
}
static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
- unsigned int index)
+ unsigned int request, unsigned int index)
{
int ret = 0;
+ struct usb_host_interface *alt_setting;
if (ps->dev->state != USB_STATE_UNAUTHENTICATED
&& ps->dev->state != USB_STATE_ADDRESS
@@ -618,6 +621,19 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype))
return 0;
+ /*
+ * check for the special corner case 'get_device_id' in the printer
+ * class specification, where wIndex is (interface << 8 | altsetting)
+ * instead of just interface
+ */
+ if (requesttype == 0xa1 && request == 0) {
+ alt_setting = usb_find_alt_setting(ps->dev->actconfig,
+ index >> 8, index & 0xff);
+ if (alt_setting
+ && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER)
+ index >>= 8;
+ }
+
index &= 0xff;
switch (requesttype & USB_RECIP_MASK) {
case USB_RECIP_ENDPOINT:
@@ -656,7 +672,6 @@ static int usbdev_open(struct inode *inode, struct file *file)
{
struct usb_device *dev = NULL;
struct dev_state *ps;
- const struct cred *cred = current_cred();
int ret;
ret = -ENOMEM;
@@ -706,8 +721,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
init_waitqueue_head(&ps->wait);
ps->discsignr = 0;
ps->disc_pid = get_pid(task_pid(current));
- ps->disc_uid = cred->uid;
- ps->disc_euid = cred->euid;
+ ps->cred = get_current_cred();
ps->disccontext = NULL;
ps->ifclaimed = 0;
security_task_getsecid(current, &ps->secid);
@@ -749,6 +763,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
usb_unlock_device(dev);
usb_put_dev(dev);
put_pid(ps->disc_pid);
+ put_cred(ps->cred);
as = async_getcompleted(ps);
while (as) {
@@ -770,7 +785,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.wIndex);
+ ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest,
+ ctrl.wIndex);
if (ret)
return ret;
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
@@ -1048,7 +1064,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
struct usb_host_endpoint *ep;
struct async *as;
struct usb_ctrlrequest *dr = NULL;
- const struct cred *cred = current_cred();
unsigned int u, totlen, isofrmlen;
int ret, ifnum = -1;
int is_in;
@@ -1100,7 +1115,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
kfree(dr);
return -EINVAL;
}
- ret = check_ctrlrecip(ps, dr->bRequestType,
+ ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
le16_to_cpup(&dr->wIndex));
if (ret) {
kfree(dr);
@@ -1262,8 +1277,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->signr = uurb->signr;
as->ifnum = ifnum;
as->pid = get_pid(task_pid(current));
- as->uid = cred->uid;
- as->euid = cred->euid;
+ as->cred = get_current_cred();
security_task_getsecid(current, &as->secid);
if (!is_in && uurb->buffer_length > 0) {
if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
@@ -1981,9 +1995,8 @@ static void usbdev_remove(struct usb_device *udev)
sinfo.si_errno = EPIPE;
sinfo.si_code = SI_ASYNCIO;
sinfo.si_addr = ps->disccontext;
- kill_pid_info_as_uid(ps->discsignr, &sinfo,
- ps->disc_pid, ps->disc_uid,
- ps->disc_euid, ps->secid);
+ kill_pid_info_as_cred(ps->discsignr, &sinfo,
+ ps->disc_pid, ps->cred, ps->secid);
}
}
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 34e3da5aa72..3b029a0a478 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1046,8 +1046,7 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
/* Non-root devices on a full/low-speed bus must wait for their
* companion high-speed root hub, in case a handoff is needed.
*/
- if (!(msg.event & PM_EVENT_AUTO) && udev->parent &&
- udev->bus->hs_companion)
+ if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion)
device_pm_wait_for_dev(&udev->dev,
&udev->bus->hs_companion->root_hub->dev);
@@ -1075,7 +1074,7 @@ static int usb_suspend_interface(struct usb_device *udev,
if (driver->suspend) {
status = driver->suspend(intf, msg);
- if (status && !(msg.event & PM_EVENT_AUTO))
+ if (status && !PMSG_IS_AUTO(msg))
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -1189,7 +1188,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
status = usb_suspend_interface(udev, intf, msg);
/* Ignore errors during system sleep transitions */
- if (!(msg.event & PM_EVENT_AUTO))
+ if (!PMSG_IS_AUTO(msg))
status = 0;
if (status != 0)
break;
@@ -1199,7 +1198,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
status = usb_suspend_device(udev, msg);
/* Again, ignore errors during system sleep transitions */
- if (!(msg.event & PM_EVENT_AUTO))
+ if (!PMSG_IS_AUTO(msg))
status = 0;
}
@@ -1583,7 +1582,7 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
- if (status > 0)
+ if (status > 0 || status == -EINPROGRESS)
status = 0;
return status;
}
@@ -1700,6 +1699,20 @@ int usb_runtime_idle(struct device *dev)
return 0;
}
+int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int ret = -EPERM;
+
+ if (hcd->driver->set_usb2_hw_lpm) {
+ ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
+ if (!ret)
+ udev->usb2_hw_lpm_enabled = enable;
+ }
+
+ return ret;
+}
+
#endif /* CONFIG_USB_SUSPEND */
struct bus_type usb_bus_type = {
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index df502a98d0d..db7fe50c23d 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -56,7 +56,7 @@ static ssize_t show_ep_wMaxPacketSize(struct device *dev,
{
struct ep_device *ep = to_ep_device(dev);
return sprintf(buf, "%04x\n",
- le16_to_cpu(ep->desc->wMaxPacketSize) & 0x07ff);
+ usb_endpoint_maxp(ep->desc) & 0x07ff);
}
static DEVICE_ATTR(wMaxPacketSize, S_IRUGO, show_ep_wMaxPacketSize, NULL);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index ce22f4a84ed..a004db35f6d 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -242,7 +242,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_master(dev);
- retval = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
if (retval != 0)
goto unmap_registers;
set_hs_companion(dev, hcd);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 73cbbd85219..13222d352a6 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -442,7 +442,11 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
struct usb_ctrlrequest *cmd;
u16 typeReq, wValue, wIndex, wLength;
u8 *ubuf = urb->transfer_buffer;
- u8 tbuf [sizeof (struct usb_hub_descriptor)]
+ /*
+ * tbuf should be as big as the BOS descriptor and
+ * the USB hub descriptor.
+ */
+ u8 tbuf[USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE]
__attribute__((aligned(4)));
const u8 *bufp = tbuf;
unsigned len = 0;
@@ -562,6 +566,8 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
else /* unsupported IDs --> "protocol stall" */
goto error;
break;
+ case USB_DT_BOS << 8:
+ goto nongeneric;
default:
goto error;
}
@@ -596,6 +602,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
/* CLASS REQUESTS (and errors) */
default:
+nongeneric:
/* non-generic request */
switch (typeReq) {
case GetHubStatus:
@@ -605,6 +612,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
case GetHubDescriptor:
len = sizeof (struct usb_hub_descriptor);
break;
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ /* len is returned by hub_control */
+ break;
}
status = hcd->driver->hub_control (hcd,
typeReq, wValue, wIndex,
@@ -615,7 +625,7 @@ error:
status = -EPIPE;
}
- if (status) {
+ if (status < 0) {
len = 0;
if (status != -EPIPE) {
dev_dbg (hcd->self.controller,
@@ -624,6 +634,10 @@ error:
typeReq, wValue, wIndex,
wLength, status);
}
+ } else if (status > 0) {
+ /* hub_control may return the length of data copied. */
+ len = status;
+ status = 0;
}
if (len) {
if (urb->transfer_buffer_length < len)
@@ -1961,8 +1975,9 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
int status;
int old_state = hcd->state;
- dev_dbg(&rhdev->dev, "bus %s%s\n",
- (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend");
+ dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""),
+ rhdev->do_remote_wakeup);
if (HCD_DEAD(hcd)) {
dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend");
return 0;
@@ -1997,8 +2012,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
int status;
int old_state = hcd->state;
- dev_dbg(&rhdev->dev, "usb %s%s\n",
- (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
+ dev_dbg(&rhdev->dev, "usb %sresume\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
if (HCD_DEAD(hcd)) {
dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume");
return 0;
@@ -2429,7 +2444,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
* but drivers can override it in reset() if needed, along with
* recording the overall controller's system wakeup capability.
*/
- device_init_wakeup(&rhdev->dev, 1);
+ device_set_wakeup_capable(&rhdev->dev, 1);
/* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is
* registered. But since the controller can die at any time,
@@ -2478,6 +2493,13 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
usb_hcd_poll_rh_status(hcd);
+
+ /*
+ * Host controllers don't generate their own wakeup requests;
+ * they only forward requests from the root hub. Therefore
+ * controllers should always be enabled for remote wakeup.
+ */
+ device_wakeup_enable(hcd->self.controller);
return retval;
error_create_attr_group:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a428aa080a3..96f05b29c9a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1636,11 +1636,6 @@ void usb_disconnect(struct usb_device **pdev)
int i;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- if (!udev) {
- pr_debug ("%s nodev\n", __func__);
- return;
- }
-
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* this quiesces everything except pending urbs.
@@ -2030,11 +2025,23 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
+#define HUB_BH_RESET_TIME 50
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 500
+static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+
+/* Is a USB 3.0 port in the Inactive state? */
+static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+{
+ return hub_is_superspeed(hub->hdev) &&
+ (portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_INACTIVE;
+}
+
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
- struct usb_device *udev, unsigned int delay)
+ struct usb_device *udev, unsigned int delay, bool warm)
{
int delay_time, ret;
u16 portstatus;
@@ -2051,28 +2058,71 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
- /* Device went away? */
- if (!(portstatus & USB_PORT_STAT_CONNECTION))
- return -ENOTCONN;
-
- /* bomb out completely if the connection bounced */
- if ((portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
-
- /* if we`ve finished resetting, then break out of the loop */
- if (!(portstatus & USB_PORT_STAT_RESET) &&
- (portstatus & USB_PORT_STAT_ENABLE)) {
- if (hub_is_wusb(hub))
- udev->speed = USB_SPEED_WIRELESS;
- else if (hub_is_superspeed(hub->hdev))
- udev->speed = USB_SPEED_SUPER;
- else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
- udev->speed = USB_SPEED_HIGH;
- else if (portstatus & USB_PORT_STAT_LOW_SPEED)
- udev->speed = USB_SPEED_LOW;
- else
- udev->speed = USB_SPEED_FULL;
- return 0;
+ /*
+ * Some buggy devices require a warm reset to be issued even
+ * when the port appears not to be connected.
+ */
+ if (!warm) {
+ /*
+ * Some buggy devices can cause an NEC host controller
+ * to transition to the "Error" state after a hot port
+ * reset. This will show up as the port state in
+ * "Inactive", and the port may also report a
+ * disconnect. Forcing a warm port reset seems to make
+ * the device work.
+ *
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
+ */
+ if (hub_port_inactive(hub, portstatus)) {
+ int ret;
+
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ if (portchange & USB_PORT_STAT_C_LINK_STATE)
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+ if (portchange & USB_PORT_STAT_C_RESET)
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_RESET);
+ dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
+ port1);
+ ret = hub_port_reset(hub, port1,
+ udev, HUB_BH_RESET_TIME,
+ true);
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ return ret;
+ }
+ /* Device went away? */
+ if (!(portstatus & USB_PORT_STAT_CONNECTION))
+ return -ENOTCONN;
+
+ /* bomb out completely if the connection bounced */
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+ return -ENOTCONN;
+
+ /* if we`ve finished resetting, then break out of
+ * the loop
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_ENABLE)) {
+ if (hub_is_wusb(hub))
+ udev->speed = USB_SPEED_WIRELESS;
+ else if (hub_is_superspeed(hub->hdev))
+ udev->speed = USB_SPEED_SUPER;
+ else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
+ udev->speed = USB_SPEED_HIGH;
+ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
+ udev->speed = USB_SPEED_LOW;
+ else
+ udev->speed = USB_SPEED_FULL;
+ return 0;
+ }
+ } else {
+ if (portchange & USB_PORT_STAT_C_BH_RESET)
+ return 0;
}
/* switch to the long delay after two short delay failures */
@@ -2080,35 +2130,84 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
delay = HUB_LONG_RESET_TIME;
dev_dbg (hub->intfdev,
- "port %d not reset yet, waiting %dms\n",
- port1, delay);
+ "port %d not %sreset yet, waiting %dms\n",
+ port1, warm ? "warm " : "", delay);
}
return -EBUSY;
}
+static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, int *status, bool warm)
+{
+ switch (*status) {
+ case 0:
+ if (!warm) {
+ struct usb_hcd *hcd;
+ /* TRSTRCY = 10 ms; plus some extra */
+ msleep(10 + 40);
+ update_devnum(udev, 0);
+ hcd = bus_to_hcd(udev->bus);
+ if (hcd->driver->reset_device) {
+ *status = hcd->driver->reset_device(hcd, udev);
+ if (*status < 0) {
+ dev_err(&udev->dev, "Cannot reset "
+ "HCD device state\n");
+ break;
+ }
+ }
+ }
+ /* FALL THROUGH */
+ case -ENOTCONN:
+ case -ENODEV:
+ clear_port_feature(hub->hdev,
+ port1, USB_PORT_FEAT_C_RESET);
+ /* FIXME need disconnect() for NOTATTACHED device */
+ if (warm) {
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+ } else {
+ usb_set_device_state(udev, *status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+ }
+ break;
+ }
+}
+
+/* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
static int hub_port_reset(struct usb_hub *hub, int port1,
- struct usb_device *udev, unsigned int delay)
+ struct usb_device *udev, unsigned int delay, bool warm)
{
int i, status;
- struct usb_hcd *hcd;
- hcd = bus_to_hcd(udev->bus);
- /* Block EHCI CF initialization during the port reset.
- * Some companion controllers don't like it when they mix.
- */
- down_read(&ehci_cf_port_reset_rwsem);
+ if (!warm) {
+ /* Block EHCI CF initialization during the port reset.
+ * Some companion controllers don't like it when they mix.
+ */
+ down_read(&ehci_cf_port_reset_rwsem);
+ } else {
+ if (!hub_is_superspeed(hub->hdev)) {
+ dev_err(hub->intfdev, "only USB3 hub support "
+ "warm reset\n");
+ return -EINVAL;
+ }
+ }
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
- status = set_port_feature(hub->hdev,
- port1, USB_PORT_FEAT_RESET);
- if (status)
+ status = set_port_feature(hub->hdev, port1, (warm ?
+ USB_PORT_FEAT_BH_PORT_RESET :
+ USB_PORT_FEAT_RESET));
+ if (status) {
dev_err(hub->intfdev,
- "cannot reset port %d (err = %d)\n",
- port1, status);
- else {
- status = hub_port_wait_reset(hub, port1, udev, delay);
+ "cannot %sreset port %d (err = %d)\n",
+ warm ? "warm " : "", port1, status);
+ } else {
+ status = hub_port_wait_reset(hub, port1, udev, delay,
+ warm);
if (status && status != -ENOTCONN)
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
@@ -2116,34 +2215,14 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
}
/* return on disconnect or reset */
- switch (status) {
- case 0:
- /* TRSTRCY = 10 ms; plus some extra */
- msleep(10 + 40);
- update_devnum(udev, 0);
- if (hcd->driver->reset_device) {
- status = hcd->driver->reset_device(hcd, udev);
- if (status < 0) {
- dev_err(&udev->dev, "Cannot reset "
- "HCD device state\n");
- break;
- }
- }
- /* FALL THROUGH */
- case -ENOTCONN:
- case -ENODEV:
- clear_port_feature(hub->hdev,
- port1, USB_PORT_FEAT_C_RESET);
- /* FIXME need disconnect() for NOTATTACHED device */
- usb_set_device_state(udev, status
- ? USB_STATE_NOTATTACHED
- : USB_STATE_DEFAULT);
+ if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+ hub_port_finish_reset(hub, port1, udev, &status, warm);
goto done;
}
dev_dbg (hub->intfdev,
- "port %d not enabled, trying reset again...\n",
- port1);
+ "port %d not enabled, trying %sreset again...\n",
+ port1, warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
@@ -2151,45 +2230,11 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
"Cannot enable port %i. Maybe the USB cable is bad?\n",
port1);
- done:
- up_read(&ehci_cf_port_reset_rwsem);
- return status;
-}
-
-/* Warm reset a USB3 protocol port */
-static int hub_port_warm_reset(struct usb_hub *hub, int port)
-{
- int ret;
- u16 portstatus, portchange;
-
- if (!hub_is_superspeed(hub->hdev)) {
- dev_err(hub->intfdev, "only USB3 hub support warm reset\n");
- return -EINVAL;
- }
-
- /* Warm reset the port */
- ret = set_port_feature(hub->hdev,
- port, USB_PORT_FEAT_BH_PORT_RESET);
- if (ret) {
- dev_err(hub->intfdev, "cannot warm reset port %d\n", port);
- return ret;
- }
-
- msleep(20);
- ret = hub_port_status(hub, port, &portstatus, &portchange);
-
- if (portchange & USB_PORT_STAT_C_RESET)
- clear_port_feature(hub->hdev, port, USB_PORT_FEAT_C_RESET);
-
- if (portchange & USB_PORT_STAT_C_BH_RESET)
- clear_port_feature(hub->hdev, port,
- USB_PORT_FEAT_C_BH_PORT_RESET);
-
- if (portchange & USB_PORT_STAT_C_LINK_STATE)
- clear_port_feature(hub->hdev, port,
- USB_PORT_FEAT_C_PORT_LINK_STATE);
+done:
+ if (!warm)
+ up_read(&ehci_cf_port_reset_rwsem);
- return ret;
+ return status;
}
/* Check if a port is power on */
@@ -2324,8 +2369,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
int port1 = udev->portnum;
int status;
- // dev_dbg(hub->intfdev, "suspend port %d\n", port1);
-
/* enable remote wakeup when appropriate; this lets the device
* wake up the upstream hub (including maybe the root hub).
*
@@ -2342,11 +2385,15 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
status);
/* bail if autosuspend is requested */
- if (msg.event & PM_EVENT_AUTO)
+ if (PMSG_IS_AUTO(msg))
return status;
}
}
+ /* disable USB2 hardware LPM */
+ if (udev->usb2_hw_lpm_enabled == 1)
+ usb_set_usb2_hardware_lpm(udev, 0);
+
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = set_port_feature(hub->hdev,
@@ -2367,12 +2414,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
USB_CTRL_SET_TIMEOUT);
/* System sleep transitions should never fail */
- if (!(msg.event & PM_EVENT_AUTO))
+ if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
/* device has up to 10 msec to fully suspend */
- dev_dbg(&udev->dev, "usb %ssuspend\n",
- (msg.event & PM_EVENT_AUTO ? "auto-" : ""));
+ dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""),
+ udev->do_remote_wakeup);
usb_set_device_state(udev, USB_STATE_SUSPENDED);
msleep(10);
}
@@ -2523,7 +2571,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
} else {
/* drive resume for at least 20 msec */
dev_dbg(&udev->dev, "usb %sresume\n",
- (msg.event & PM_EVENT_AUTO ? "auto-" : ""));
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
msleep(25);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
@@ -2558,7 +2606,12 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
if (status < 0) {
dev_dbg(&udev->dev, "can't resume, status %d\n", status);
hub_port_logical_disconnect(hub, port1);
+ } else {
+ /* Try to enable USB2 hardware LPM */
+ if (udev->usb2_hw_lpm_capable == 1)
+ usb_set_usb2_hardware_lpm(udev, 1);
}
+
return status;
}
@@ -2625,7 +2678,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
udev = hdev->children [port1-1];
if (udev && udev->can_submit) {
dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
- if (msg.event & PM_EVENT_AUTO)
+ if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
}
@@ -2798,7 +2851,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
int i, j, retval;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
- char *speed, *type;
+ const char *speed;
int devnum = udev->devnum;
/* root hub ports have a slightly longer reset period
@@ -2819,7 +2872,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
- retval = hub_port_reset(hub, port1, udev, delay);
+ retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
@@ -2858,25 +2911,16 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
default:
goto fail;
}
-
- type = "";
- switch (udev->speed) {
- case USB_SPEED_LOW: speed = "low"; break;
- case USB_SPEED_FULL: speed = "full"; break;
- case USB_SPEED_HIGH: speed = "high"; break;
- case USB_SPEED_SUPER:
- speed = "super";
- break;
- case USB_SPEED_WIRELESS:
- speed = "variable";
- type = "Wireless ";
- break;
- default: speed = "?"; break;
- }
+
+ if (udev->speed == USB_SPEED_WIRELESS)
+ speed = "variable speed Wireless";
+ else
+ speed = usb_speed_string(udev->speed);
+
if (udev->speed != USB_SPEED_SUPER)
dev_info(&udev->dev,
- "%s %s speed %sUSB device number %d using %s\n",
- (udev->config) ? "reset" : "new", speed, type,
+ "%s %s USB device number %d using %s\n",
+ (udev->config) ? "reset" : "new", speed,
devnum, udev->bus->controller->driver->name);
/* Set up TT records, if needed */
@@ -2949,7 +2993,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
buf->bMaxPacketSize0;
kfree(buf);
- retval = hub_port_reset(hub, port1, udev, delay);
+ retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
goto fail;
if (oldspeed != udev->speed) {
@@ -3023,7 +3067,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
i = 512;
else
i = udev->descriptor.bMaxPacketSize0;
- if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
+ if (usb_endpoint_maxp(&udev->ep0.desc) != i) {
if (udev->speed == USB_SPEED_LOW ||
!(i == 8 || i == 16 || i == 32 || i == 64)) {
dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
@@ -3047,6 +3091,15 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
goto fail;
}
+ if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
+ retval = usb_get_bos_descriptor(udev);
+ if (!retval) {
+ if (udev->bos->ext_cap && (USB_LPM_SUPPORT &
+ le32_to_cpu(udev->bos->ext_cap->bmAttributes)))
+ udev->lpm_capable = 1;
+ }
+ }
+
retval = 0;
/* notify HCD that we have a device connected and addressed */
if (hcd->driver->update_device)
@@ -3570,7 +3623,8 @@ static void hub_events(void)
(portstatus & USB_PORT_STAT_LINK_STATE)
== USB_SS_PORT_LS_SS_INACTIVE) {
dev_dbg(hub_dev, "warm reset port %d\n", i);
- hub_port_warm_reset(hub, i);
+ hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
}
if (connect_change)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0b5ec234c78..b3bdfede45e 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -435,7 +435,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
len = sg->length;
if (length) {
- len = min_t(unsigned, len, length);
+ len = min_t(size_t, len, length);
length -= len;
if (length == 0)
io->entries = i + 1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 81ce6a8e1d9..d6a8d8269bf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -38,6 +38,27 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C200 */
+ { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C250 */
+ { USB_DEVICE(0x046d, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C300 */
+ { USB_DEVICE(0x046d, 0x0805), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam B/C500 */
+ { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam Pro 9000 */
+ { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C310 */
+ { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C270 */
+ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Harmony 700-series */
{ USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -69,6 +90,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Guillemot Webcam Hercules Dualpix Exchange*/
+ { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index cf05b97693e..662c0cf3a3e 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -412,6 +412,56 @@ set_level(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
+static ssize_t
+show_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ const char *p;
+
+ if (udev->usb2_hw_lpm_enabled == 1)
+ p = "enabled";
+ else
+ p = "disabled";
+
+ return sprintf(buf, "%s\n", p);
+}
+
+static ssize_t
+set_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ bool value;
+ int ret;
+
+ usb_lock_device(udev);
+
+ ret = strtobool(buf, &value);
+
+ if (!ret)
+ ret = usb_set_usb2_hardware_lpm(udev, value);
+
+ usb_unlock_device(udev);
+
+ if (!ret)
+ return count;
+
+ return ret;
+}
+
+static DEVICE_ATTR(usb2_hardware_lpm, S_IRUGO | S_IWUSR, show_usb2_hardware_lpm,
+ set_usb2_hardware_lpm);
+
+static struct attribute *usb2_hardware_lpm_attr[] = {
+ &dev_attr_usb2_hardware_lpm.attr,
+ NULL,
+};
+static struct attribute_group usb2_hardware_lpm_attr_group = {
+ .name = power_group_name,
+ .attrs = usb2_hardware_lpm_attr,
+};
+
static struct attribute *power_attrs[] = {
&dev_attr_autosuspend.attr,
&dev_attr_level.attr,
@@ -428,13 +478,20 @@ static int add_power_attributes(struct device *dev)
{
int rc = 0;
- if (is_usb_device(dev))
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
rc = sysfs_merge_group(&dev->kobj, &power_attr_group);
+ if (udev->usb2_hw_lpm_capable == 1)
+ rc = sysfs_merge_group(&dev->kobj,
+ &usb2_hardware_lpm_attr_group);
+ }
+
return rc;
}
static void remove_power_attributes(struct device *dev)
{
+ sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
sysfs_unmerge_group(&dev->kobj, &power_attr_group);
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index ae334b067c1..909625b91eb 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -350,7 +350,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
dev->state < USB_STATE_CONFIGURED)
return -ENODEV;
- max = le16_to_cpu(ep->desc.wMaxPacketSize);
+ max = usb_endpoint_maxp(&ep->desc);
if (max <= 0) {
dev_dbg(&dev->dev,
"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 8706fc97e60..73cd90012ec 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -225,6 +225,7 @@ static void usb_release_dev(struct device *dev)
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
+ usb_release_bos_descriptor(udev);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index d44d4b7bbf1..3888778582c 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -28,6 +28,8 @@ extern int usb_remove_device(struct usb_device *udev);
extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
+extern int usb_get_bos_descriptor(struct usb_device *dev);
+extern void usb_release_bos_descriptor(struct usb_device *dev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
@@ -80,6 +82,7 @@ extern int usb_remote_wakeup(struct usb_device *dev);
extern int usb_runtime_suspend(struct device *dev);
extern int usb_runtime_resume(struct device *dev);
extern int usb_runtime_idle(struct device *dev);
+extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
#else
@@ -94,6 +97,10 @@ static inline int usb_remote_wakeup(struct usb_device *udev)
return 0;
}
+static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+{
+ return 0;
+}
#endif
extern struct bus_type usb_bus_type;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
new file mode 100644
index 00000000000..3c1d67d324f
--- /dev/null
+++ b/drivers/usb/dwc3/Kconfig
@@ -0,0 +1,25 @@
+config USB_DWC3
+ tristate "DesignWare USB3 DRD Core Support"
+ depends on (USB || USB_GADGET)
+ select USB_OTG_UTILS
+ help
+ Say Y or M here if your system has a Dual Role SuperSpeed
+ USB controller based on the DesignWare USB3 IP Core.
+
+ If you choose to build this driver is a dynamically linked
+ module, the module will be called dwc3.ko.
+
+if USB_DWC3
+
+config USB_DWC3_DEBUG
+ bool "Enable Debugging Messages"
+ help
+ Say Y here to enable debugging messages on DWC3 Driver.
+
+config USB_DWC3_VERBOSE
+ bool "Enable Verbose Debugging Messages"
+ depends on USB_DWC3_DEBUG
+ help
+ Say Y here to enable verbose debugging messages on DWC3 Driver.
+
+endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
new file mode 100644
index 00000000000..593d1dbc465
--- /dev/null
+++ b/drivers/usb/dwc3/Makefile
@@ -0,0 +1,36 @@
+ccflags-$(CONFIG_USB_DWC3_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
+
+obj-$(CONFIG_USB_DWC3) += dwc3.o
+
+dwc3-y := core.o
+
+ifneq ($(CONFIG_USB_GADGET_DWC3),)
+ dwc3-y += gadget.o ep0.o
+endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+ dwc3-y += debugfs.o
+endif
+
+##
+# Platform-specific glue layers go here
+#
+# NOTICE: Make sure your glue layer doesn't depend on anything
+# which is arch-specific and that it compiles on all situations.
+#
+# We want to keep this requirement in order to be able to compile
+# the entire driver (with all its glue layers) on several architectures
+# and make sure it compiles fine. This will also help with allmodconfig
+# and allyesconfig builds.
+#
+# The only exception is the PCI glue layer, but that's only because
+# PCI doesn't provide nops if CONFIG_PCI isn't enabled.
+##
+
+obj-$(CONFIG_USB_DWC3) += dwc3-omap.o
+
+ifneq ($(CONFIG_PCI),)
+ obj-$(CONFIG_USB_DWC3) += dwc3-pci.o
+endif
+
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
new file mode 100644
index 00000000000..717ebc9ff94
--- /dev/null
+++ b/drivers/usb/dwc3/core.c
@@ -0,0 +1,484 @@
+/**
+ * core.c - DesignWare USB3 DRD Controller Core file
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/module.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+#include "debug.h"
+
+/**
+ * dwc3_core_soft_reset - Issues core soft reset and PHY reset
+ * @dwc: pointer to our context structure
+ */
+static void dwc3_core_soft_reset(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /* Before Resetting PHY, put Core in Reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ /* Assert USB3 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ /* Assert USB2 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ mdelay(100);
+
+ /* Clear USB3 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ /* Clear USB2 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ /* After PHYs are stable we can take Core out of reset state */
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+/**
+ * dwc3_free_one_event_buffer - Frees one event buffer
+ * @dwc: Pointer to our controller context structure
+ * @evt: Pointer to event buffer to be freed
+ */
+static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
+ struct dwc3_event_buffer *evt)
+{
+ dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+ kfree(evt);
+}
+
+/**
+ * dwc3_alloc_one_event_buffer - Allocated one event buffer structure
+ * @dwc: Pointer to our controller context structure
+ * @length: size of the event buffer
+ *
+ * Returns a pointer to the allocated event buffer structure on succes
+ * otherwise ERR_PTR(errno).
+ */
+static struct dwc3_event_buffer *__devinit
+dwc3_alloc_one_event_buffer(struct dwc3 *dwc, unsigned length)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+ if (!evt)
+ return ERR_PTR(-ENOMEM);
+
+ evt->dwc = dwc;
+ evt->length = length;
+ evt->buf = dma_alloc_coherent(dwc->dev, length,
+ &evt->dma, GFP_KERNEL);
+ if (!evt->buf) {
+ kfree(evt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return evt;
+}
+
+/**
+ * dwc3_free_event_buffers - frees all allocated event buffers
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_free_event_buffers(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+ int i;
+
+ for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ evt = dwc->ev_buffs[i];
+ if (evt) {
+ dwc3_free_one_event_buffer(dwc, evt);
+ dwc->ev_buffs[i] = NULL;
+ }
+ }
+}
+
+/**
+ * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
+ * @dwc: Pointer to out controller context structure
+ * @num: number of event buffers to allocate
+ * @length: size of event buffer
+ *
+ * Returns 0 on success otherwise negative errno. In error the case, dwc
+ * may contain some buffers allocated but not all which were requested.
+ */
+static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned num,
+ unsigned length)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc3_alloc_one_event_buffer(dwc, length);
+ if (IS_ERR(evt)) {
+ dev_err(dwc->dev, "can't allocate event buffer\n");
+ return PTR_ERR(evt);
+ }
+ dwc->ev_buffs[i] = evt;
+ }
+
+ return 0;
+}
+
+/**
+ * dwc3_event_buffers_setup - setup our allocated event buffers
+ * @dwc: Pointer to out controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int __devinit dwc3_event_buffers_setup(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+ int n;
+
+ for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ evt = dwc->ev_buffs[n];
+ dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
+ evt->buf, (unsigned long long) evt->dma,
+ evt->length);
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
+ lower_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
+ upper_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
+ evt->length & 0xffff);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+ }
+
+ return 0;
+}
+
+static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+ int n;
+
+ for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ evt = dwc->ev_buffs[n];
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+ }
+}
+
+static void __devinit dwc3_cache_hwparams(struct dwc3 *dwc)
+{
+ struct dwc3_hwparams *parms = &dwc->hwparams;
+
+ parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
+ parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
+ parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
+ parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
+ parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
+ parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
+ parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
+ parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
+ parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
+}
+
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int __devinit dwc3_core_init(struct dwc3 *dwc)
+{
+ unsigned long timeout;
+ u32 reg;
+ int ret;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+ /* This should read as U3 followed by revision number */
+ if ((reg & DWC3_GSNPSID_MASK) != 0x55330000) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+ dwc->revision = reg & DWC3_GSNPSREV_MASK;
+
+ dwc3_core_soft_reset(dwc);
+
+ /* issue device SoftReset too */
+ timeout = jiffies + msecs_to_jiffies(500);
+ dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dwc->dev, "Reset Timed Out\n");
+ ret = -ETIMEDOUT;
+ goto err0;
+ }
+
+ cpu_relax();
+ } while (true);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_NUM,
+ DWC3_EVENT_BUFFERS_SIZE);
+ if (ret) {
+ dev_err(dwc->dev, "failed to allocate event buffers\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ ret = dwc3_event_buffers_setup(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to setup event buffers\n");
+ goto err1;
+ }
+
+ dwc3_cache_hwparams(dwc);
+
+ return 0;
+
+err1:
+ dwc3_free_event_buffers(dwc);
+
+err0:
+ return ret;
+}
+
+static void dwc3_core_exit(struct dwc3 *dwc)
+{
+ dwc3_event_buffers_cleanup(dwc);
+ dwc3_free_event_buffers(dwc);
+}
+
+#define DWC3_ALIGN_MASK (16 - 1)
+
+static int __devinit dwc3_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct resource *res;
+ struct dwc3 *dwc;
+ void __iomem *regs;
+ unsigned int features = id->driver_data;
+ int ret = -ENOMEM;
+ int irq;
+ void *mem;
+
+ mem = kzalloc(sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
+ if (!mem) {
+ dev_err(&pdev->dev, "not enough memory\n");
+ goto err0;
+ }
+ dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
+ dwc->mem = mem;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing resource\n");
+ goto err1;
+ }
+
+ res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!res) {
+ dev_err(&pdev->dev, "can't request mem region\n");
+ goto err1;
+ }
+
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ goto err2;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ goto err3;
+ }
+
+ spin_lock_init(&dwc->lock);
+ platform_set_drvdata(pdev, dwc);
+
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(res);
+ dwc->dev = &pdev->dev;
+ dwc->irq = irq;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
+ ret = dwc3_core_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize core\n");
+ goto err3;
+ }
+
+ if (features & DWC3_HAS_PERIPHERAL) {
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialized gadget\n");
+ goto err4;
+ }
+ }
+
+ ret = dwc3_debugfs_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize debugfs\n");
+ goto err5;
+ }
+
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+
+err5:
+ if (features & DWC3_HAS_PERIPHERAL)
+ dwc3_gadget_exit(dwc);
+
+err4:
+ dwc3_core_exit(dwc);
+
+err3:
+ iounmap(regs);
+
+err2:
+ release_mem_region(res->start, resource_size(res));
+
+err1:
+ kfree(dwc->mem);
+
+err0:
+ return ret;
+}
+
+static int __devexit dwc3_remove(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct dwc3 *dwc = platform_get_drvdata(pdev);
+ struct resource *res;
+ unsigned int features = id->driver_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ dwc3_debugfs_exit(dwc);
+
+ if (features & DWC3_HAS_PERIPHERAL)
+ dwc3_gadget_exit(dwc);
+
+ dwc3_core_exit(dwc);
+ release_mem_region(res->start, resource_size(res));
+ iounmap(dwc->regs);
+ kfree(dwc->mem);
+
+ return 0;
+}
+
+static const struct platform_device_id dwc3_id_table[] __devinitconst = {
+ {
+ .name = "dwc3-omap",
+ .driver_data = (DWC3_HAS_PERIPHERAL
+ | DWC3_HAS_XHCI
+ | DWC3_HAS_OTG),
+ },
+ {
+ .name = "dwc3-pci",
+ .driver_data = DWC3_HAS_PERIPHERAL,
+ },
+ { }, /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, dwc3_id_table);
+
+static struct platform_driver dwc3_driver = {
+ .probe = dwc3_probe,
+ .remove = __devexit_p(dwc3_remove),
+ .driver = {
+ .name = "dwc3",
+ },
+ .id_table = dwc3_id_table,
+};
+
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
+
+static int __devinit dwc3_init(void)
+{
+ return platform_driver_register(&dwc3_driver);
+}
+module_init(dwc3_init);
+
+static void __exit dwc3_exit(void)
+{
+ platform_driver_unregister(&dwc3_driver);
+}
+module_exit(dwc3_exit);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
new file mode 100644
index 00000000000..29a8e1679e1
--- /dev/null
+++ b/drivers/usb/dwc3/core.h
@@ -0,0 +1,768 @@
+/**
+ * core.h - DesignWare USB3 DRD Core Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DRIVERS_USB_DWC3_CORE_H
+#define __DRIVERS_USB_DWC3_CORE_H
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* Global constants */
+#define DWC3_ENDPOINTS_NUM 32
+
+#define DWC3_EVENT_BUFFERS_NUM 2
+#define DWC3_EVENT_BUFFERS_SIZE PAGE_SIZE
+#define DWC3_EVENT_TYPE_MASK 0xfe
+
+#define DWC3_EVENT_TYPE_DEV 0
+#define DWC3_EVENT_TYPE_CARKIT 3
+#define DWC3_EVENT_TYPE_I2C 4
+
+#define DWC3_DEVICE_EVENT_DISCONNECT 0
+#define DWC3_DEVICE_EVENT_RESET 1
+#define DWC3_DEVICE_EVENT_CONNECT_DONE 2
+#define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE 3
+#define DWC3_DEVICE_EVENT_WAKEUP 4
+#define DWC3_DEVICE_EVENT_EOPF 6
+#define DWC3_DEVICE_EVENT_SOF 7
+#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9
+#define DWC3_DEVICE_EVENT_CMD_CMPL 10
+#define DWC3_DEVICE_EVENT_OVERFLOW 11
+
+#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GSNPSID_MASK 0xffff0000
+#define DWC3_GSNPSREV_MASK 0xffff
+
+/* Global Registers */
+#define DWC3_GSBUSCFG0 0xc100
+#define DWC3_GSBUSCFG1 0xc104
+#define DWC3_GTXTHRCFG 0xc108
+#define DWC3_GRXTHRCFG 0xc10c
+#define DWC3_GCTL 0xc110
+#define DWC3_GEVTEN 0xc114
+#define DWC3_GSTS 0xc118
+#define DWC3_GSNPSID 0xc120
+#define DWC3_GGPIO 0xc124
+#define DWC3_GUID 0xc128
+#define DWC3_GUCTL 0xc12c
+#define DWC3_GBUSERRADDR0 0xc130
+#define DWC3_GBUSERRADDR1 0xc134
+#define DWC3_GPRTBIMAP0 0xc138
+#define DWC3_GPRTBIMAP1 0xc13c
+#define DWC3_GHWPARAMS0 0xc140
+#define DWC3_GHWPARAMS1 0xc144
+#define DWC3_GHWPARAMS2 0xc148
+#define DWC3_GHWPARAMS3 0xc14c
+#define DWC3_GHWPARAMS4 0xc150
+#define DWC3_GHWPARAMS5 0xc154
+#define DWC3_GHWPARAMS6 0xc158
+#define DWC3_GHWPARAMS7 0xc15c
+#define DWC3_GDBGFIFOSPACE 0xc160
+#define DWC3_GDBGLTSSM 0xc164
+#define DWC3_GPRTBIMAP_HS0 0xc180
+#define DWC3_GPRTBIMAP_HS1 0xc184
+#define DWC3_GPRTBIMAP_FS0 0xc188
+#define DWC3_GPRTBIMAP_FS1 0xc18c
+
+#define DWC3_GUSB2PHYCFG(n) (0xc200 + (n * 0x04))
+#define DWC3_GUSB2I2CCTL(n) (0xc240 + (n * 0x04))
+
+#define DWC3_GUSB2PHYACC(n) (0xc280 + (n * 0x04))
+
+#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + (n * 0x04))
+
+#define DWC3_GTXFIFOSIZ(n) (0xc300 + (n * 0x04))
+#define DWC3_GRXFIFOSIZ(n) (0xc380 + (n * 0x04))
+
+#define DWC3_GEVNTADRLO(n) (0xc400 + (n * 0x10))
+#define DWC3_GEVNTADRHI(n) (0xc404 + (n * 0x10))
+#define DWC3_GEVNTSIZ(n) (0xc408 + (n * 0x10))
+#define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10))
+
+#define DWC3_GHWPARAMS8 0xc600
+
+/* Device Registers */
+#define DWC3_DCFG 0xc700
+#define DWC3_DCTL 0xc704
+#define DWC3_DEVTEN 0xc708
+#define DWC3_DSTS 0xc70c
+#define DWC3_DGCMDPAR 0xc710
+#define DWC3_DGCMD 0xc714
+#define DWC3_DALEPENA 0xc720
+#define DWC3_DEPCMDPAR2(n) (0xc800 + (n * 0x10))
+#define DWC3_DEPCMDPAR1(n) (0xc804 + (n * 0x10))
+#define DWC3_DEPCMDPAR0(n) (0xc808 + (n * 0x10))
+#define DWC3_DEPCMD(n) (0xc80c + (n * 0x10))
+
+/* OTG Registers */
+#define DWC3_OCFG 0xcc00
+#define DWC3_OCTL 0xcc04
+#define DWC3_OEVTEN 0xcc08
+#define DWC3_OSTS 0xcc0C
+
+/* Bit fields */
+
+/* Global Configuration Register */
+#define DWC3_GCTL_PWRDNSCALE(n) (n << 19)
+#define DWC3_GCTL_U2RSTECN (1 << 16)
+#define DWC3_GCTL_RAMCLKSEL(x) ((x & DWC3_GCTL_CLK_MASK) << 6)
+#define DWC3_GCTL_CLK_BUS (0)
+#define DWC3_GCTL_CLK_PIPE (1)
+#define DWC3_GCTL_CLK_PIPEHALF (2)
+#define DWC3_GCTL_CLK_MASK (3)
+
+#define DWC3_GCTL_PRTCAPDIR(n) (n << 12)
+#define DWC3_GCTL_PRTCAP_HOST 1
+#define DWC3_GCTL_PRTCAP_DEVICE 2
+#define DWC3_GCTL_PRTCAP_OTG 3
+
+#define DWC3_GCTL_CORESOFTRESET (1 << 11)
+#define DWC3_GCTL_SCALEDOWN(n) (n << 4)
+#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
+#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+
+/* Global USB2 PHY Configuration Register */
+#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
+#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
+
+/* Global USB3 PIPE Control Register */
+#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
+#define DWC3_GUSB3PIPECTL_SUSPHY (1 << 17)
+
+/* Global HWPARAMS1 Register */
+#define DWC3_GHWPARAMS1_EN_PWROPT(n) ((n & (3 << 24)) >> 24)
+#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0
+#define DWC3_GHWPARAMS1_EN_PWROPT_CLK 1
+
+/* Device Configuration Register */
+#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
+#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
+
+#define DWC3_DCFG_SPEED_MASK (7 << 0)
+#define DWC3_DCFG_SUPERSPEED (4 << 0)
+#define DWC3_DCFG_HIGHSPEED (0 << 0)
+#define DWC3_DCFG_FULLSPEED2 (1 << 0)
+#define DWC3_DCFG_LOWSPEED (2 << 0)
+#define DWC3_DCFG_FULLSPEED1 (3 << 0)
+
+/* Device Control Register */
+#define DWC3_DCTL_RUN_STOP (1 << 31)
+#define DWC3_DCTL_CSFTRST (1 << 30)
+#define DWC3_DCTL_LSFTRST (1 << 29)
+
+#define DWC3_DCTL_HIRD_THRES_MASK (0x1f << 24)
+#define DWC3_DCTL_HIRD_THRES(n) (((n) & DWC3_DCTL_HIRD_THRES_MASK) >> 24)
+
+#define DWC3_DCTL_APPL1RES (1 << 23)
+
+#define DWC3_DCTL_INITU2ENA (1 << 12)
+#define DWC3_DCTL_ACCEPTU2ENA (1 << 11)
+#define DWC3_DCTL_INITU1ENA (1 << 10)
+#define DWC3_DCTL_ACCEPTU1ENA (1 << 9)
+#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1)
+
+#define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5)
+#define DWC3_DCTL_ULSTCHNGREQ(n) (((n) << 5) & DWC3_DCTL_ULSTCHNGREQ_MASK)
+
+#define DWC3_DCTL_ULSTCHNG_NO_ACTION (DWC3_DCTL_ULSTCHNGREQ(0))
+#define DWC3_DCTL_ULSTCHNG_SS_DISABLED (DWC3_DCTL_ULSTCHNGREQ(4))
+#define DWC3_DCTL_ULSTCHNG_RX_DETECT (DWC3_DCTL_ULSTCHNGREQ(5))
+#define DWC3_DCTL_ULSTCHNG_SS_INACTIVE (DWC3_DCTL_ULSTCHNGREQ(6))
+#define DWC3_DCTL_ULSTCHNG_RECOVERY (DWC3_DCTL_ULSTCHNGREQ(8))
+#define DWC3_DCTL_ULSTCHNG_COMPLIANCE (DWC3_DCTL_ULSTCHNGREQ(10))
+#define DWC3_DCTL_ULSTCHNG_LOOPBACK (DWC3_DCTL_ULSTCHNGREQ(11))
+
+/* Device Event Enable Register */
+#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN (1 << 12)
+#define DWC3_DEVTEN_EVNTOVERFLOWEN (1 << 11)
+#define DWC3_DEVTEN_CMDCMPLTEN (1 << 10)
+#define DWC3_DEVTEN_ERRTICERREN (1 << 9)
+#define DWC3_DEVTEN_SOFEN (1 << 7)
+#define DWC3_DEVTEN_EOPFEN (1 << 6)
+#define DWC3_DEVTEN_WKUPEVTEN (1 << 4)
+#define DWC3_DEVTEN_ULSTCNGEN (1 << 3)
+#define DWC3_DEVTEN_CONNECTDONEEN (1 << 2)
+#define DWC3_DEVTEN_USBRSTEN (1 << 1)
+#define DWC3_DEVTEN_DISCONNEVTEN (1 << 0)
+
+/* Device Status Register */
+#define DWC3_DSTS_PWRUPREQ (1 << 24)
+#define DWC3_DSTS_COREIDLE (1 << 23)
+#define DWC3_DSTS_DEVCTRLHLT (1 << 22)
+
+#define DWC3_DSTS_USBLNKST_MASK (0x0f << 18)
+#define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18)
+
+#define DWC3_DSTS_RXFIFOEMPTY (1 << 17)
+
+#define DWC3_DSTS_SOFFN_MASK (0x3ff << 3)
+#define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3)
+
+#define DWC3_DSTS_CONNECTSPD (7 << 0)
+
+#define DWC3_DSTS_SUPERSPEED (4 << 0)
+#define DWC3_DSTS_HIGHSPEED (0 << 0)
+#define DWC3_DSTS_FULLSPEED2 (1 << 0)
+#define DWC3_DSTS_LOWSPEED (2 << 0)
+#define DWC3_DSTS_FULLSPEED1 (3 << 0)
+
+/* Device Generic Command Register */
+#define DWC3_DGCMD_SET_LMP 0x01
+#define DWC3_DGCMD_SET_PERIODIC_PAR 0x02
+#define DWC3_DGCMD_XMIT_FUNCTION 0x03
+#define DWC3_DGCMD_SELECTED_FIFO_FLUSH 0x09
+#define DWC3_DGCMD_ALL_FIFO_FLUSH 0x0a
+#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
+#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
+
+/* Device Endpoint Command Register */
+#define DWC3_DEPCMD_PARAM_SHIFT 16
+#define DWC3_DEPCMD_PARAM(x) (x << DWC3_DEPCMD_PARAM_SHIFT)
+#define DWC3_DEPCMD_GET_RSC_IDX(x) ((x >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
+#define DWC3_DEPCMD_STATUS_MASK (0x0f << 12)
+#define DWC3_DEPCMD_STATUS(x) ((x & DWC3_DEPCMD_STATUS_MASK) >> 12)
+#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
+#define DWC3_DEPCMD_CMDACT (1 << 10)
+#define DWC3_DEPCMD_CMDIOC (1 << 8)
+
+#define DWC3_DEPCMD_DEPSTARTCFG (0x09 << 0)
+#define DWC3_DEPCMD_ENDTRANSFER (0x08 << 0)
+#define DWC3_DEPCMD_UPDATETRANSFER (0x07 << 0)
+#define DWC3_DEPCMD_STARTTRANSFER (0x06 << 0)
+#define DWC3_DEPCMD_CLEARSTALL (0x05 << 0)
+#define DWC3_DEPCMD_SETSTALL (0x04 << 0)
+#define DWC3_DEPCMD_GETSEQNUMBER (0x03 << 0)
+#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0)
+#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0)
+
+/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
+#define DWC3_DALEPENA_EP(n) (1 << n)
+
+#define DWC3_DEPCMD_TYPE_CONTROL 0
+#define DWC3_DEPCMD_TYPE_ISOC 1
+#define DWC3_DEPCMD_TYPE_BULK 2
+#define DWC3_DEPCMD_TYPE_INTR 3
+
+/* Structures */
+
+struct dwc3_trb_hw;
+
+/**
+ * struct dwc3_event_buffer - Software event buffer representation
+ * @list: a list of event buffers
+ * @buf: _THE_ buffer
+ * @length: size of this buffer
+ * @dma: dma_addr_t
+ * @dwc: pointer to DWC controller
+ */
+struct dwc3_event_buffer {
+ void *buf;
+ unsigned length;
+ unsigned int lpos;
+
+ dma_addr_t dma;
+
+ struct dwc3 *dwc;
+};
+
+#define DWC3_EP_FLAG_STALLED (1 << 0)
+#define DWC3_EP_FLAG_WEDGED (1 << 1)
+
+#define DWC3_EP_DIRECTION_TX true
+#define DWC3_EP_DIRECTION_RX false
+
+#define DWC3_TRB_NUM 32
+#define DWC3_TRB_MASK (DWC3_TRB_NUM - 1)
+
+/**
+ * struct dwc3_ep - device side endpoint representation
+ * @endpoint: usb endpoint
+ * @request_list: list of requests for this endpoint
+ * @req_queued: list of requests on this ep which have TRBs setup
+ * @trb_pool: array of transaction buffers
+ * @trb_pool_dma: dma address of @trb_pool
+ * @free_slot: next slot which is going to be used
+ * @busy_slot: first slot which is owned by HW
+ * @desc: usb_endpoint_descriptor pointer
+ * @dwc: pointer to DWC controller
+ * @flags: endpoint flags (wedged, stalled, ...)
+ * @current_trb: index of current used trb
+ * @number: endpoint number (1 - 15)
+ * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
+ * @res_trans_idx: Resource transfer index
+ * @interval: the intervall on which the ISOC transfer is started
+ * @name: a human readable name e.g. ep1out-bulk
+ * @direction: true for TX, false for RX
+ * @stream_capable: true when streams are enabled
+ */
+struct dwc3_ep {
+ struct usb_ep endpoint;
+ struct list_head request_list;
+ struct list_head req_queued;
+
+ struct dwc3_trb_hw *trb_pool;
+ dma_addr_t trb_pool_dma;
+ u32 free_slot;
+ u32 busy_slot;
+ const struct usb_endpoint_descriptor *desc;
+ struct dwc3 *dwc;
+
+ unsigned flags;
+#define DWC3_EP_ENABLED (1 << 0)
+#define DWC3_EP_STALL (1 << 1)
+#define DWC3_EP_WEDGE (1 << 2)
+#define DWC3_EP_BUSY (1 << 4)
+#define DWC3_EP_PENDING_REQUEST (1 << 5)
+
+ /* This last one is specific to EP0 */
+#define DWC3_EP0_DIR_IN (1 << 31)
+
+ unsigned current_trb;
+
+ u8 number;
+ u8 type;
+ u8 res_trans_idx;
+ u32 interval;
+
+ char name[20];
+
+ unsigned direction:1;
+ unsigned stream_capable:1;
+};
+
+enum dwc3_phy {
+ DWC3_PHY_UNKNOWN = 0,
+ DWC3_PHY_USB3,
+ DWC3_PHY_USB2,
+};
+
+enum dwc3_ep0_next {
+ DWC3_EP0_UNKNOWN = 0,
+ DWC3_EP0_COMPLETE,
+ DWC3_EP0_NRDY_SETUP,
+ DWC3_EP0_NRDY_DATA,
+ DWC3_EP0_NRDY_STATUS,
+};
+
+enum dwc3_ep0_state {
+ EP0_UNCONNECTED = 0,
+ EP0_SETUP_PHASE,
+ EP0_DATA_PHASE,
+ EP0_STATUS_PHASE,
+};
+
+enum dwc3_link_state {
+ /* In SuperSpeed */
+ DWC3_LINK_STATE_U0 = 0x00, /* in HS, means ON */
+ DWC3_LINK_STATE_U1 = 0x01,
+ DWC3_LINK_STATE_U2 = 0x02, /* in HS, means SLEEP */
+ DWC3_LINK_STATE_U3 = 0x03, /* in HS, means SUSPEND */
+ DWC3_LINK_STATE_SS_DIS = 0x04,
+ DWC3_LINK_STATE_RX_DET = 0x05, /* in HS, means Early Suspend */
+ DWC3_LINK_STATE_SS_INACT = 0x06,
+ DWC3_LINK_STATE_POLL = 0x07,
+ DWC3_LINK_STATE_RECOV = 0x08,
+ DWC3_LINK_STATE_HRESET = 0x09,
+ DWC3_LINK_STATE_CMPLY = 0x0a,
+ DWC3_LINK_STATE_LPBK = 0x0b,
+ DWC3_LINK_STATE_MASK = 0x0f,
+};
+
+enum dwc3_device_state {
+ DWC3_DEFAULT_STATE,
+ DWC3_ADDRESS_STATE,
+ DWC3_CONFIGURED_STATE,
+};
+
+/**
+ * struct dwc3_trb - transfer request block
+ * @bpl: lower 32bit of the buffer
+ * @bph: higher 32bit of the buffer
+ * @length: buffer size (up to 16mb - 1)
+ * @pcm1: packet count m1
+ * @trbsts: trb status
+ * 0 = ok
+ * 1 = missed isoc
+ * 2 = setup pending
+ * @hwo: hardware owner of descriptor
+ * @lst: last trb
+ * @chn: chain buffers
+ * @csp: continue on short packets (only supported on isoc eps)
+ * @trbctl: trb control
+ * 1 = normal
+ * 2 = control-setup
+ * 3 = control-status-2
+ * 4 = control-status-3
+ * 5 = control-data (first trb of data stage)
+ * 6 = isochronous-first (first trb of service interval)
+ * 7 = isochronous
+ * 8 = link trb
+ * others = reserved
+ * @isp_imi: interrupt on short packet / interrupt on missed isoc
+ * @ioc: interrupt on complete
+ * @sid_sofn: Stream ID / SOF Number
+ */
+struct dwc3_trb {
+ u64 bplh;
+
+ union {
+ struct {
+ u32 length:24;
+ u32 pcm1:2;
+ u32 reserved27_26:2;
+ u32 trbsts:4;
+#define DWC3_TRB_STS_OKAY 0
+#define DWC3_TRB_STS_MISSED_ISOC 1
+#define DWC3_TRB_STS_SETUP_PENDING 2
+ };
+ u32 len_pcm;
+ };
+
+ union {
+ struct {
+ u32 hwo:1;
+ u32 lst:1;
+ u32 chn:1;
+ u32 csp:1;
+ u32 trbctl:6;
+ u32 isp_imi:1;
+ u32 ioc:1;
+ u32 reserved13_12:2;
+ u32 sid_sofn:16;
+ u32 reserved31_30:2;
+ };
+ u32 control;
+ };
+} __packed;
+
+/**
+ * struct dwc3_trb_hw - transfer request block (hw format)
+ * @bpl: DW0-3
+ * @bph: DW4-7
+ * @size: DW8-B
+ * @trl: DWC-F
+ */
+struct dwc3_trb_hw {
+ __le32 bpl;
+ __le32 bph;
+ __le32 size;
+ __le32 ctrl;
+} __packed;
+
+static inline void dwc3_trb_to_hw(struct dwc3_trb *nat, struct dwc3_trb_hw *hw)
+{
+ hw->bpl = cpu_to_le32(lower_32_bits(nat->bplh));
+ hw->bph = cpu_to_le32(upper_32_bits(nat->bplh));
+ hw->size = cpu_to_le32p(&nat->len_pcm);
+ /* HWO is written last */
+ hw->ctrl = cpu_to_le32p(&nat->control);
+}
+
+static inline void dwc3_trb_to_nat(struct dwc3_trb_hw *hw, struct dwc3_trb *nat)
+{
+ u64 bplh;
+
+ bplh = le32_to_cpup(&hw->bpl);
+ bplh |= (u64) le32_to_cpup(&hw->bph) << 32;
+ nat->bplh = bplh;
+
+ nat->len_pcm = le32_to_cpup(&hw->size);
+ nat->control = le32_to_cpup(&hw->ctrl);
+}
+
+/**
+ * dwc3_hwparams - copy of HWPARAMS registers
+ * @hwparams0 - GHWPARAMS0
+ * @hwparams1 - GHWPARAMS1
+ * @hwparams2 - GHWPARAMS2
+ * @hwparams3 - GHWPARAMS3
+ * @hwparams4 - GHWPARAMS4
+ * @hwparams5 - GHWPARAMS5
+ * @hwparams6 - GHWPARAMS6
+ * @hwparams7 - GHWPARAMS7
+ * @hwparams8 - GHWPARAMS8
+ */
+struct dwc3_hwparams {
+ u32 hwparams0;
+ u32 hwparams1;
+ u32 hwparams2;
+ u32 hwparams3;
+ u32 hwparams4;
+ u32 hwparams5;
+ u32 hwparams6;
+ u32 hwparams7;
+ u32 hwparams8;
+};
+
+/**
+ * struct dwc3 - representation of our controller
+ * @ctrl_req: usb control request which is used for ep0
+ * @ep0_trb: trb which is used for the ctrl_req
+ * @ep0_bounce: bounce buffer for ep0
+ * @setup_buf: used while precessing STD USB requests
+ * @ctrl_req_addr: dma address of ctrl_req
+ * @ep0_trb: dma address of ep0_trb
+ * @ep0_usb_req: dummy req used while handling STD USB requests
+ * @setup_buf_addr: dma address of setup_buf
+ * @ep0_bounce_addr: dma address of ep0_bounce
+ * @lock: for synchronizing
+ * @dev: pointer to our struct device
+ * @event_buffer_list: a list of event buffers
+ * @gadget: device side representation of the peripheral controller
+ * @gadget_driver: pointer to the gadget driver
+ * @regs: base address for our registers
+ * @regs_size: address space size
+ * @irq: IRQ number
+ * @revision: revision register contents
+ * @is_selfpowered: true when we are selfpowered
+ * @three_stage_setup: set if we perform a three phase setup
+ * @ep0_status_pending: ep0 status response without a req is pending
+ * @ep0_bounced: true when we used bounce buffer
+ * @ep0_expect_in: true when we expect a DATA IN transfer
+ * @start_config_issued: true when StartConfig command has been issued
+ * @ep0_next_event: hold the next expected event
+ * @ep0state: state of endpoint zero
+ * @link_state: link state
+ * @speed: device speed (super, high, full, low)
+ * @mem: points to start of memory which is used for this struct.
+ * @hwparams: copy of hwparams registers
+ * @root: debugfs root folder pointer
+ */
+struct dwc3 {
+ struct usb_ctrlrequest *ctrl_req;
+ struct dwc3_trb_hw *ep0_trb;
+ void *ep0_bounce;
+ u8 *setup_buf;
+ dma_addr_t ctrl_req_addr;
+ dma_addr_t ep0_trb_addr;
+ dma_addr_t setup_buf_addr;
+ dma_addr_t ep0_bounce_addr;
+ struct usb_request ep0_usb_req;
+ /* device lock */
+ spinlock_t lock;
+ struct device *dev;
+
+ struct dwc3_event_buffer *ev_buffs[DWC3_EVENT_BUFFERS_NUM];
+ struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+
+ void __iomem *regs;
+ size_t regs_size;
+
+ int irq;
+
+ u32 revision;
+
+#define DWC3_REVISION_173A 0x5533173a
+#define DWC3_REVISION_175A 0x5533175a
+#define DWC3_REVISION_180A 0x5533180a
+#define DWC3_REVISION_183A 0x5533183a
+#define DWC3_REVISION_185A 0x5533185a
+#define DWC3_REVISION_188A 0x5533188a
+#define DWC3_REVISION_190A 0x5533190a
+
+ unsigned is_selfpowered:1;
+ unsigned three_stage_setup:1;
+ unsigned ep0_status_pending:1;
+ unsigned ep0_bounced:1;
+ unsigned ep0_expect_in:1;
+ unsigned start_config_issued:1;
+
+ enum dwc3_ep0_next ep0_next_event;
+ enum dwc3_ep0_state ep0state;
+ enum dwc3_link_state link_state;
+ enum dwc3_device_state dev_state;
+
+ u8 speed;
+ void *mem;
+
+ struct dwc3_hwparams hwparams;
+ struct dentry *root;
+};
+
+/* -------------------------------------------------------------------------- */
+
+#define DWC3_TRBSTS_OK 0
+#define DWC3_TRBSTS_MISSED_ISOC 1
+#define DWC3_TRBSTS_SETUP_PENDING 2
+
+#define DWC3_TRBCTL_NORMAL 1
+#define DWC3_TRBCTL_CONTROL_SETUP 2
+#define DWC3_TRBCTL_CONTROL_STATUS2 3
+#define DWC3_TRBCTL_CONTROL_STATUS3 4
+#define DWC3_TRBCTL_CONTROL_DATA 5
+#define DWC3_TRBCTL_ISOCHRONOUS_FIRST 6
+#define DWC3_TRBCTL_ISOCHRONOUS 7
+#define DWC3_TRBCTL_LINK_TRB 8
+
+/* -------------------------------------------------------------------------- */
+
+struct dwc3_event_type {
+ u32 is_devspec:1;
+ u32 type:6;
+ u32 reserved8_31:25;
+} __packed;
+
+#define DWC3_DEPEVT_XFERCOMPLETE 0x01
+#define DWC3_DEPEVT_XFERINPROGRESS 0x02
+#define DWC3_DEPEVT_XFERNOTREADY 0x03
+#define DWC3_DEPEVT_RXTXFIFOEVT 0x04
+#define DWC3_DEPEVT_STREAMEVT 0x06
+#define DWC3_DEPEVT_EPCMDCMPLT 0x07
+
+/**
+ * struct dwc3_event_depvt - Device Endpoint Events
+ * @one_bit: indicates this is an endpoint event (not used)
+ * @endpoint_number: number of the endpoint
+ * @endpoint_event: The event we have:
+ * 0x00 - Reserved
+ * 0x01 - XferComplete
+ * 0x02 - XferInProgress
+ * 0x03 - XferNotReady
+ * 0x04 - RxTxFifoEvt (IN->Underrun, OUT->Overrun)
+ * 0x05 - Reserved
+ * 0x06 - StreamEvt
+ * 0x07 - EPCmdCmplt
+ * @reserved11_10: Reserved, don't use.
+ * @status: Indicates the status of the event. Refer to databook for
+ * more information.
+ * @parameters: Parameters of the current event. Refer to databook for
+ * more information.
+ */
+struct dwc3_event_depevt {
+ u32 one_bit:1;
+ u32 endpoint_number:5;
+ u32 endpoint_event:4;
+ u32 reserved11_10:2;
+ u32 status:4;
+#define DEPEVT_STATUS_BUSERR (1 << 0)
+#define DEPEVT_STATUS_SHORT (1 << 1)
+#define DEPEVT_STATUS_IOC (1 << 2)
+#define DEPEVT_STATUS_LST (1 << 3)
+
+/* Stream event only */
+#define DEPEVT_STREAMEVT_FOUND 1
+#define DEPEVT_STREAMEVT_NOTFOUND 2
+
+/* Control-only Status */
+#define DEPEVT_STATUS_CONTROL_SETUP 0
+#define DEPEVT_STATUS_CONTROL_DATA 1
+#define DEPEVT_STATUS_CONTROL_STATUS 2
+
+ u32 parameters:16;
+} __packed;
+
+/**
+ * struct dwc3_event_devt - Device Events
+ * @one_bit: indicates this is a non-endpoint event (not used)
+ * @device_event: indicates it's a device event. Should read as 0x00
+ * @type: indicates the type of device event.
+ * 0 - DisconnEvt
+ * 1 - USBRst
+ * 2 - ConnectDone
+ * 3 - ULStChng
+ * 4 - WkUpEvt
+ * 5 - Reserved
+ * 6 - EOPF
+ * 7 - SOF
+ * 8 - Reserved
+ * 9 - ErrticErr
+ * 10 - CmdCmplt
+ * 11 - EvntOverflow
+ * 12 - VndrDevTstRcved
+ * @reserved15_12: Reserved, not used
+ * @event_info: Information about this event
+ * @reserved31_24: Reserved, not used
+ */
+struct dwc3_event_devt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 type:4;
+ u32 reserved15_12:4;
+ u32 event_info:8;
+ u32 reserved31_24:8;
+} __packed;
+
+/**
+ * struct dwc3_event_gevt - Other Core Events
+ * @one_bit: indicates this is a non-endpoint event (not used)
+ * @device_event: indicates it's (0x03) Carkit or (0x04) I2C event.
+ * @phy_port_number: self-explanatory
+ * @reserved31_12: Reserved, not used.
+ */
+struct dwc3_event_gevt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 phy_port_number:4;
+ u32 reserved31_12:20;
+} __packed;
+
+/**
+ * union dwc3_event - representation of Event Buffer contents
+ * @raw: raw 32-bit event
+ * @type: the type of the event
+ * @depevt: Device Endpoint Event
+ * @devt: Device Event
+ * @gevt: Global Event
+ */
+union dwc3_event {
+ u32 raw;
+ struct dwc3_event_type type;
+ struct dwc3_event_depevt depevt;
+ struct dwc3_event_devt devt;
+ struct dwc3_event_gevt gevt;
+};
+
+/*
+ * DWC3 Features to be used as Driver Data
+ */
+
+#define DWC3_HAS_PERIPHERAL BIT(0)
+#define DWC3_HAS_XHCI BIT(1)
+#define DWC3_HAS_OTG BIT(3)
+
+#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
new file mode 100644
index 00000000000..5894ee8222a
--- /dev/null
+++ b/drivers/usb/dwc3/debug.h
@@ -0,0 +1,50 @@
+/**
+ * debug.h - DesignWare USB3 DRD Controller Debug Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_DEBUG_FS
+extern int dwc3_debugfs_init(struct dwc3 *);
+extern void dwc3_debugfs_exit(struct dwc3 *);
+#else
+static inline int dwc3_debugfs_init(struct dwc3 *d)
+{ return 0; }
+static inline void dwc3_debugfs_exit(struct dwc3 *d)
+{ }
+#endif
+
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
new file mode 100644
index 00000000000..da1ad77d8d5
--- /dev/null
+++ b/drivers/usb/dwc3/debugfs.c
@@ -0,0 +1,441 @@
+/**
+ * debugfs.c - DesignWare USB3 DRD Controller DebugFS file
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/ptrace.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+
+#include <asm/uaccess.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+struct dwc3_register {
+ const char *name;
+ u32 offset;
+};
+
+#define dump_register(nm) \
+{ \
+ .name = __stringify(nm), \
+ .offset = DWC3_ ##nm, \
+}
+
+static const struct dwc3_register dwc3_regs[] = {
+ dump_register(GSBUSCFG0),
+ dump_register(GSBUSCFG1),
+ dump_register(GTXTHRCFG),
+ dump_register(GRXTHRCFG),
+ dump_register(GCTL),
+ dump_register(GEVTEN),
+ dump_register(GSTS),
+ dump_register(GSNPSID),
+ dump_register(GGPIO),
+ dump_register(GUID),
+ dump_register(GUCTL),
+ dump_register(GBUSERRADDR0),
+ dump_register(GBUSERRADDR1),
+ dump_register(GPRTBIMAP0),
+ dump_register(GPRTBIMAP1),
+ dump_register(GHWPARAMS0),
+ dump_register(GHWPARAMS1),
+ dump_register(GHWPARAMS2),
+ dump_register(GHWPARAMS3),
+ dump_register(GHWPARAMS4),
+ dump_register(GHWPARAMS5),
+ dump_register(GHWPARAMS6),
+ dump_register(GHWPARAMS7),
+ dump_register(GDBGFIFOSPACE),
+ dump_register(GDBGLTSSM),
+ dump_register(GPRTBIMAP_HS0),
+ dump_register(GPRTBIMAP_HS1),
+ dump_register(GPRTBIMAP_FS0),
+ dump_register(GPRTBIMAP_FS1),
+
+ dump_register(GUSB2PHYCFG(0)),
+ dump_register(GUSB2PHYCFG(1)),
+ dump_register(GUSB2PHYCFG(2)),
+ dump_register(GUSB2PHYCFG(3)),
+ dump_register(GUSB2PHYCFG(4)),
+ dump_register(GUSB2PHYCFG(5)),
+ dump_register(GUSB2PHYCFG(6)),
+ dump_register(GUSB2PHYCFG(7)),
+ dump_register(GUSB2PHYCFG(8)),
+ dump_register(GUSB2PHYCFG(9)),
+ dump_register(GUSB2PHYCFG(10)),
+ dump_register(GUSB2PHYCFG(11)),
+ dump_register(GUSB2PHYCFG(12)),
+ dump_register(GUSB2PHYCFG(13)),
+ dump_register(GUSB2PHYCFG(14)),
+ dump_register(GUSB2PHYCFG(15)),
+
+ dump_register(GUSB2I2CCTL(0)),
+ dump_register(GUSB2I2CCTL(1)),
+ dump_register(GUSB2I2CCTL(2)),
+ dump_register(GUSB2I2CCTL(3)),
+ dump_register(GUSB2I2CCTL(4)),
+ dump_register(GUSB2I2CCTL(5)),
+ dump_register(GUSB2I2CCTL(6)),
+ dump_register(GUSB2I2CCTL(7)),
+ dump_register(GUSB2I2CCTL(8)),
+ dump_register(GUSB2I2CCTL(9)),
+ dump_register(GUSB2I2CCTL(10)),
+ dump_register(GUSB2I2CCTL(11)),
+ dump_register(GUSB2I2CCTL(12)),
+ dump_register(GUSB2I2CCTL(13)),
+ dump_register(GUSB2I2CCTL(14)),
+ dump_register(GUSB2I2CCTL(15)),
+
+ dump_register(GUSB2PHYACC(0)),
+ dump_register(GUSB2PHYACC(1)),
+ dump_register(GUSB2PHYACC(2)),
+ dump_register(GUSB2PHYACC(3)),
+ dump_register(GUSB2PHYACC(4)),
+ dump_register(GUSB2PHYACC(5)),
+ dump_register(GUSB2PHYACC(6)),
+ dump_register(GUSB2PHYACC(7)),
+ dump_register(GUSB2PHYACC(8)),
+ dump_register(GUSB2PHYACC(9)),
+ dump_register(GUSB2PHYACC(10)),
+ dump_register(GUSB2PHYACC(11)),
+ dump_register(GUSB2PHYACC(12)),
+ dump_register(GUSB2PHYACC(13)),
+ dump_register(GUSB2PHYACC(14)),
+ dump_register(GUSB2PHYACC(15)),
+
+ dump_register(GUSB3PIPECTL(0)),
+ dump_register(GUSB3PIPECTL(1)),
+ dump_register(GUSB3PIPECTL(2)),
+ dump_register(GUSB3PIPECTL(3)),
+ dump_register(GUSB3PIPECTL(4)),
+ dump_register(GUSB3PIPECTL(5)),
+ dump_register(GUSB3PIPECTL(6)),
+ dump_register(GUSB3PIPECTL(7)),
+ dump_register(GUSB3PIPECTL(8)),
+ dump_register(GUSB3PIPECTL(9)),
+ dump_register(GUSB3PIPECTL(10)),
+ dump_register(GUSB3PIPECTL(11)),
+ dump_register(GUSB3PIPECTL(12)),
+ dump_register(GUSB3PIPECTL(13)),
+ dump_register(GUSB3PIPECTL(14)),
+ dump_register(GUSB3PIPECTL(15)),
+
+ dump_register(GTXFIFOSIZ(0)),
+ dump_register(GTXFIFOSIZ(1)),
+ dump_register(GTXFIFOSIZ(2)),
+ dump_register(GTXFIFOSIZ(3)),
+ dump_register(GTXFIFOSIZ(4)),
+ dump_register(GTXFIFOSIZ(5)),
+ dump_register(GTXFIFOSIZ(6)),
+ dump_register(GTXFIFOSIZ(7)),
+ dump_register(GTXFIFOSIZ(8)),
+ dump_register(GTXFIFOSIZ(9)),
+ dump_register(GTXFIFOSIZ(10)),
+ dump_register(GTXFIFOSIZ(11)),
+ dump_register(GTXFIFOSIZ(12)),
+ dump_register(GTXFIFOSIZ(13)),
+ dump_register(GTXFIFOSIZ(14)),
+ dump_register(GTXFIFOSIZ(15)),
+ dump_register(GTXFIFOSIZ(16)),
+ dump_register(GTXFIFOSIZ(17)),
+ dump_register(GTXFIFOSIZ(18)),
+ dump_register(GTXFIFOSIZ(19)),
+ dump_register(GTXFIFOSIZ(20)),
+ dump_register(GTXFIFOSIZ(21)),
+ dump_register(GTXFIFOSIZ(22)),
+ dump_register(GTXFIFOSIZ(23)),
+ dump_register(GTXFIFOSIZ(24)),
+ dump_register(GTXFIFOSIZ(25)),
+ dump_register(GTXFIFOSIZ(26)),
+ dump_register(GTXFIFOSIZ(27)),
+ dump_register(GTXFIFOSIZ(28)),
+ dump_register(GTXFIFOSIZ(29)),
+ dump_register(GTXFIFOSIZ(30)),
+ dump_register(GTXFIFOSIZ(31)),
+
+ dump_register(GRXFIFOSIZ(0)),
+ dump_register(GRXFIFOSIZ(1)),
+ dump_register(GRXFIFOSIZ(2)),
+ dump_register(GRXFIFOSIZ(3)),
+ dump_register(GRXFIFOSIZ(4)),
+ dump_register(GRXFIFOSIZ(5)),
+ dump_register(GRXFIFOSIZ(6)),
+ dump_register(GRXFIFOSIZ(7)),
+ dump_register(GRXFIFOSIZ(8)),
+ dump_register(GRXFIFOSIZ(9)),
+ dump_register(GRXFIFOSIZ(10)),
+ dump_register(GRXFIFOSIZ(11)),
+ dump_register(GRXFIFOSIZ(12)),
+ dump_register(GRXFIFOSIZ(13)),
+ dump_register(GRXFIFOSIZ(14)),
+ dump_register(GRXFIFOSIZ(15)),
+ dump_register(GRXFIFOSIZ(16)),
+ dump_register(GRXFIFOSIZ(17)),
+ dump_register(GRXFIFOSIZ(18)),
+ dump_register(GRXFIFOSIZ(19)),
+ dump_register(GRXFIFOSIZ(20)),
+ dump_register(GRXFIFOSIZ(21)),
+ dump_register(GRXFIFOSIZ(22)),
+ dump_register(GRXFIFOSIZ(23)),
+ dump_register(GRXFIFOSIZ(24)),
+ dump_register(GRXFIFOSIZ(25)),
+ dump_register(GRXFIFOSIZ(26)),
+ dump_register(GRXFIFOSIZ(27)),
+ dump_register(GRXFIFOSIZ(28)),
+ dump_register(GRXFIFOSIZ(29)),
+ dump_register(GRXFIFOSIZ(30)),
+ dump_register(GRXFIFOSIZ(31)),
+
+ dump_register(GEVNTADRLO(0)),
+ dump_register(GEVNTADRHI(0)),
+ dump_register(GEVNTSIZ(0)),
+ dump_register(GEVNTCOUNT(0)),
+
+ dump_register(GHWPARAMS8),
+ dump_register(DCFG),
+ dump_register(DCTL),
+ dump_register(DEVTEN),
+ dump_register(DSTS),
+ dump_register(DGCMDPAR),
+ dump_register(DGCMD),
+ dump_register(DALEPENA),
+
+ dump_register(DEPCMDPAR2(0)),
+ dump_register(DEPCMDPAR2(1)),
+ dump_register(DEPCMDPAR2(2)),
+ dump_register(DEPCMDPAR2(3)),
+ dump_register(DEPCMDPAR2(4)),
+ dump_register(DEPCMDPAR2(5)),
+ dump_register(DEPCMDPAR2(6)),
+ dump_register(DEPCMDPAR2(7)),
+ dump_register(DEPCMDPAR2(8)),
+ dump_register(DEPCMDPAR2(9)),
+ dump_register(DEPCMDPAR2(10)),
+ dump_register(DEPCMDPAR2(11)),
+ dump_register(DEPCMDPAR2(12)),
+ dump_register(DEPCMDPAR2(13)),
+ dump_register(DEPCMDPAR2(14)),
+ dump_register(DEPCMDPAR2(15)),
+ dump_register(DEPCMDPAR2(16)),
+ dump_register(DEPCMDPAR2(17)),
+ dump_register(DEPCMDPAR2(18)),
+ dump_register(DEPCMDPAR2(19)),
+ dump_register(DEPCMDPAR2(20)),
+ dump_register(DEPCMDPAR2(21)),
+ dump_register(DEPCMDPAR2(22)),
+ dump_register(DEPCMDPAR2(23)),
+ dump_register(DEPCMDPAR2(24)),
+ dump_register(DEPCMDPAR2(25)),
+ dump_register(DEPCMDPAR2(26)),
+ dump_register(DEPCMDPAR2(27)),
+ dump_register(DEPCMDPAR2(28)),
+ dump_register(DEPCMDPAR2(29)),
+ dump_register(DEPCMDPAR2(30)),
+ dump_register(DEPCMDPAR2(31)),
+
+ dump_register(DEPCMDPAR1(0)),
+ dump_register(DEPCMDPAR1(1)),
+ dump_register(DEPCMDPAR1(2)),
+ dump_register(DEPCMDPAR1(3)),
+ dump_register(DEPCMDPAR1(4)),
+ dump_register(DEPCMDPAR1(5)),
+ dump_register(DEPCMDPAR1(6)),
+ dump_register(DEPCMDPAR1(7)),
+ dump_register(DEPCMDPAR1(8)),
+ dump_register(DEPCMDPAR1(9)),
+ dump_register(DEPCMDPAR1(10)),
+ dump_register(DEPCMDPAR1(11)),
+ dump_register(DEPCMDPAR1(12)),
+ dump_register(DEPCMDPAR1(13)),
+ dump_register(DEPCMDPAR1(14)),
+ dump_register(DEPCMDPAR1(15)),
+ dump_register(DEPCMDPAR1(16)),
+ dump_register(DEPCMDPAR1(17)),
+ dump_register(DEPCMDPAR1(18)),
+ dump_register(DEPCMDPAR1(19)),
+ dump_register(DEPCMDPAR1(20)),
+ dump_register(DEPCMDPAR1(21)),
+ dump_register(DEPCMDPAR1(22)),
+ dump_register(DEPCMDPAR1(23)),
+ dump_register(DEPCMDPAR1(24)),
+ dump_register(DEPCMDPAR1(25)),
+ dump_register(DEPCMDPAR1(26)),
+ dump_register(DEPCMDPAR1(27)),
+ dump_register(DEPCMDPAR1(28)),
+ dump_register(DEPCMDPAR1(29)),
+ dump_register(DEPCMDPAR1(30)),
+ dump_register(DEPCMDPAR1(31)),
+
+ dump_register(DEPCMDPAR0(0)),
+ dump_register(DEPCMDPAR0(1)),
+ dump_register(DEPCMDPAR0(2)),
+ dump_register(DEPCMDPAR0(3)),
+ dump_register(DEPCMDPAR0(4)),
+ dump_register(DEPCMDPAR0(5)),
+ dump_register(DEPCMDPAR0(6)),
+ dump_register(DEPCMDPAR0(7)),
+ dump_register(DEPCMDPAR0(8)),
+ dump_register(DEPCMDPAR0(9)),
+ dump_register(DEPCMDPAR0(10)),
+ dump_register(DEPCMDPAR0(11)),
+ dump_register(DEPCMDPAR0(12)),
+ dump_register(DEPCMDPAR0(13)),
+ dump_register(DEPCMDPAR0(14)),
+ dump_register(DEPCMDPAR0(15)),
+ dump_register(DEPCMDPAR0(16)),
+ dump_register(DEPCMDPAR0(17)),
+ dump_register(DEPCMDPAR0(18)),
+ dump_register(DEPCMDPAR0(19)),
+ dump_register(DEPCMDPAR0(20)),
+ dump_register(DEPCMDPAR0(21)),
+ dump_register(DEPCMDPAR0(22)),
+ dump_register(DEPCMDPAR0(23)),
+ dump_register(DEPCMDPAR0(24)),
+ dump_register(DEPCMDPAR0(25)),
+ dump_register(DEPCMDPAR0(26)),
+ dump_register(DEPCMDPAR0(27)),
+ dump_register(DEPCMDPAR0(28)),
+ dump_register(DEPCMDPAR0(29)),
+ dump_register(DEPCMDPAR0(30)),
+ dump_register(DEPCMDPAR0(31)),
+
+ dump_register(DEPCMD(0)),
+ dump_register(DEPCMD(1)),
+ dump_register(DEPCMD(2)),
+ dump_register(DEPCMD(3)),
+ dump_register(DEPCMD(4)),
+ dump_register(DEPCMD(5)),
+ dump_register(DEPCMD(6)),
+ dump_register(DEPCMD(7)),
+ dump_register(DEPCMD(8)),
+ dump_register(DEPCMD(9)),
+ dump_register(DEPCMD(10)),
+ dump_register(DEPCMD(11)),
+ dump_register(DEPCMD(12)),
+ dump_register(DEPCMD(13)),
+ dump_register(DEPCMD(14)),
+ dump_register(DEPCMD(15)),
+ dump_register(DEPCMD(16)),
+ dump_register(DEPCMD(17)),
+ dump_register(DEPCMD(18)),
+ dump_register(DEPCMD(19)),
+ dump_register(DEPCMD(20)),
+ dump_register(DEPCMD(21)),
+ dump_register(DEPCMD(22)),
+ dump_register(DEPCMD(23)),
+ dump_register(DEPCMD(24)),
+ dump_register(DEPCMD(25)),
+ dump_register(DEPCMD(26)),
+ dump_register(DEPCMD(27)),
+ dump_register(DEPCMD(28)),
+ dump_register(DEPCMD(29)),
+ dump_register(DEPCMD(30)),
+ dump_register(DEPCMD(31)),
+
+ dump_register(OCFG),
+ dump_register(OCTL),
+ dump_register(OEVTEN),
+ dump_register(OSTS),
+};
+
+static int dwc3_regdump_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ int i;
+
+ seq_printf(s, "DesignWare USB3 Core Register Dump\n");
+
+ for (i = 0; i < ARRAY_SIZE(dwc3_regs); i++) {
+ seq_printf(s, "%-20s : %08x\n", dwc3_regs[i].name,
+ dwc3_readl(dwc->regs, dwc3_regs[i].offset));
+ }
+
+ return 0;
+}
+
+static int dwc3_regdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_regdump_show, inode->i_private);
+}
+
+static const struct file_operations dwc3_regdump_fops = {
+ .open = dwc3_regdump_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+
+ root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ if (IS_ERR(root)){
+ ret = PTR_ERR(root);
+ goto err0;
+ }
+
+ dwc->root = root;
+
+ file = debugfs_create_file("regdump", S_IRUGO, root, dwc,
+ &dwc3_regdump_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+
+err0:
+ return ret;
+}
+
+void __devexit dwc3_debugfs_exit(struct dwc3 *dwc)
+{
+ debugfs_remove_recursive(dwc->root);
+ dwc->root = NULL;
+}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
new file mode 100644
index 00000000000..062552b5fc8
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -0,0 +1,401 @@
+/**
+ * dwc3-omap.c - OMAP Specific Glue layer
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dwc3-omap.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include "io.h"
+
+/*
+ * All these registers belong to OMAP's Wrapper around the
+ * DesignWare USB3 Core.
+ */
+
+#define USBOTGSS_REVISION 0x0000
+#define USBOTGSS_SYSCONFIG 0x0010
+#define USBOTGSS_IRQ_EOI 0x0020
+#define USBOTGSS_IRQSTATUS_RAW_0 0x0024
+#define USBOTGSS_IRQSTATUS_0 0x0028
+#define USBOTGSS_IRQENABLE_SET_0 0x002c
+#define USBOTGSS_IRQENABLE_CLR_0 0x0030
+#define USBOTGSS_IRQSTATUS_RAW_1 0x0034
+#define USBOTGSS_IRQSTATUS_1 0x0038
+#define USBOTGSS_IRQENABLE_SET_1 0x003c
+#define USBOTGSS_IRQENABLE_CLR_1 0x0040
+#define USBOTGSS_UTMI_OTG_CTRL 0x0080
+#define USBOTGSS_UTMI_OTG_STATUS 0x0084
+#define USBOTGSS_MMRAM_OFFSET 0x0100
+#define USBOTGSS_FLADJ 0x0104
+#define USBOTGSS_DEBUG_CFG 0x0108
+#define USBOTGSS_DEBUG_DATA 0x010c
+
+/* SYSCONFIG REGISTER */
+#define USBOTGSS_SYSCONFIG_DMADISABLE (1 << 16)
+#define USBOTGSS_SYSCONFIG_STANDBYMODE(x) ((x) << 4)
+
+#define USBOTGSS_STANDBYMODE_FORCE_STANDBY 0
+#define USBOTGSS_STANDBYMODE_NO_STANDBY 1
+#define USBOTGSS_STANDBYMODE_SMART_STANDBY 2
+#define USBOTGSS_STANDBYMODE_SMART_WAKEUP 3
+
+#define USBOTGSS_STANDBYMODE_MASK (0x03 << 4)
+
+#define USBOTGSS_SYSCONFIG_IDLEMODE(x) ((x) << 2)
+
+#define USBOTGSS_IDLEMODE_FORCE_IDLE 0
+#define USBOTGSS_IDLEMODE_NO_IDLE 1
+#define USBOTGSS_IDLEMODE_SMART_IDLE 2
+#define USBOTGSS_IDLEMODE_SMART_WAKEUP 3
+
+#define USBOTGSS_IDLEMODE_MASK (0x03 << 2)
+
+/* IRQ_EOI REGISTER */
+#define USBOTGSS_IRQ_EOI_LINE_NUMBER (1 << 0)
+
+/* IRQS0 BITS */
+#define USBOTGSS_IRQO_COREIRQ_ST (1 << 0)
+
+/* IRQ1 BITS */
+#define USBOTGSS_IRQ1_DMADISABLECLR (1 << 17)
+#define USBOTGSS_IRQ1_OEVT (1 << 16)
+#define USBOTGSS_IRQ1_DRVVBUS_RISE (1 << 13)
+#define USBOTGSS_IRQ1_CHRGVBUS_RISE (1 << 12)
+#define USBOTGSS_IRQ1_DISCHRGVBUS_RISE (1 << 11)
+#define USBOTGSS_IRQ1_IDPULLUP_RISE (1 << 8)
+#define USBOTGSS_IRQ1_DRVVBUS_FALL (1 << 5)
+#define USBOTGSS_IRQ1_CHRGVBUS_FALL (1 << 4)
+#define USBOTGSS_IRQ1_DISCHRGVBUS_FALL (1 << 3)
+#define USBOTGSS_IRQ1_IDPULLUP_FALL (1 << 0)
+
+/* UTMI_OTG_CTRL REGISTER */
+#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS (1 << 5)
+#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS (1 << 4)
+#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS (1 << 3)
+#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP (1 << 0)
+
+/* UTMI_OTG_STATUS REGISTER */
+#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE (1 << 31)
+#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT (1 << 9)
+#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8)
+#define USBOTGSS_UTMI_OTG_STATUS_IDDIG (1 << 4)
+#define USBOTGSS_UTMI_OTG_STATUS_SESSEND (1 << 3)
+#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID (1 << 2)
+#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID (1 << 1)
+
+struct dwc3_omap {
+ /* device lock */
+ spinlock_t lock;
+
+ struct platform_device *dwc3;
+ struct device *dev;
+
+ int irq;
+ void __iomem *base;
+
+ void *context;
+ u32 resource_size;
+
+ u32 dma_status:1;
+};
+
+static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
+{
+ struct dwc3_omap *omap = _omap;
+ u32 reg;
+
+ spin_lock(&omap->lock);
+
+ reg = dwc3_readl(omap->base, USBOTGSS_IRQSTATUS_1);
+
+ if (reg & USBOTGSS_IRQ1_DMADISABLECLR) {
+ dev_dbg(omap->dev, "DMA Disable was Cleared\n");
+ omap->dma_status = false;
+ }
+
+ if (reg & USBOTGSS_IRQ1_OEVT)
+ dev_dbg(omap->dev, "OTG Event\n");
+
+ if (reg & USBOTGSS_IRQ1_DRVVBUS_RISE)
+ dev_dbg(omap->dev, "DRVVBUS Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_CHRGVBUS_RISE)
+ dev_dbg(omap->dev, "CHRGVBUS Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_RISE)
+ dev_dbg(omap->dev, "DISCHRGVBUS Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_IDPULLUP_RISE)
+ dev_dbg(omap->dev, "IDPULLUP Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_DRVVBUS_FALL)
+ dev_dbg(omap->dev, "DRVVBUS Fall\n");
+
+ if (reg & USBOTGSS_IRQ1_CHRGVBUS_FALL)
+ dev_dbg(omap->dev, "CHRGVBUS Fall\n");
+
+ if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_FALL)
+ dev_dbg(omap->dev, "DISCHRGVBUS Fall\n");
+
+ if (reg & USBOTGSS_IRQ1_IDPULLUP_FALL)
+ dev_dbg(omap->dev, "IDPULLUP Fall\n");
+
+ dwc3_writel(omap->base, USBOTGSS_IRQSTATUS_1, reg);
+
+ reg = dwc3_readl(omap->base, USBOTGSS_IRQSTATUS_0);
+ dwc3_writel(omap->base, USBOTGSS_IRQSTATUS_0, reg);
+
+ spin_unlock(&omap->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit dwc3_omap_probe(struct platform_device *pdev)
+{
+ struct dwc3_omap_data *pdata = pdev->dev.platform_data;
+ struct platform_device *dwc3;
+ struct dwc3_omap *omap;
+ struct resource *res;
+
+ int ret = -ENOMEM;
+ int irq;
+
+ u32 reg;
+
+ void __iomem *base;
+ void *context;
+
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ dev_err(&pdev->dev, "not enough memory\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, omap);
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ resource\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "missing memory base resource\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ goto err1;
+ }
+
+ dwc3 = platform_device_alloc("dwc3-omap", -1);
+ if (!dwc3) {
+ dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
+ goto err2;
+ }
+
+ context = kzalloc(resource_size(res), GFP_KERNEL);
+ if (!context) {
+ dev_err(&pdev->dev, "couldn't allocate dwc3 context memory\n");
+ goto err3;
+ }
+
+ spin_lock_init(&omap->lock);
+ dma_set_coherent_mask(&dwc3->dev, pdev->dev.coherent_dma_mask);
+
+ dwc3->dev.parent = &pdev->dev;
+ dwc3->dev.dma_mask = pdev->dev.dma_mask;
+ dwc3->dev.dma_parms = pdev->dev.dma_parms;
+ omap->resource_size = resource_size(res);
+ omap->context = context;
+ omap->dev = &pdev->dev;
+ omap->irq = irq;
+ omap->base = base;
+ omap->dwc3 = dwc3;
+
+ reg = dwc3_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "missing platform data\n");
+ } else {
+ switch (pdata->utmi_mode) {
+ case DWC3_OMAP_UTMI_MODE_SW:
+ reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+ break;
+ case DWC3_OMAP_UTMI_MODE_HW:
+ reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+ break;
+ default:
+ dev_dbg(&pdev->dev, "UNKNOWN utmi mode %d\n",
+ pdata->utmi_mode);
+ }
+ }
+
+ dwc3_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, reg);
+
+ /* check the DMA Status */
+ reg = dwc3_readl(omap->base, USBOTGSS_SYSCONFIG);
+ omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);
+
+ /* Set No-Idle and No-Standby */
+ reg &= ~(USBOTGSS_STANDBYMODE_MASK
+ | USBOTGSS_IDLEMODE_MASK);
+
+ reg |= (USBOTGSS_SYSCONFIG_STANDBYMODE(USBOTGSS_STANDBYMODE_NO_STANDBY)
+ | USBOTGSS_SYSCONFIG_IDLEMODE(USBOTGSS_IDLEMODE_NO_IDLE));
+
+ dwc3_writel(omap->base, USBOTGSS_SYSCONFIG, reg);
+
+ ret = request_irq(omap->irq, dwc3_omap_interrupt, 0,
+ "dwc3-omap", omap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ #%d --> %d\n",
+ omap->irq, ret);
+ goto err4;
+ }
+
+ /* enable all IRQs */
+ reg = USBOTGSS_IRQO_COREIRQ_ST;
+ dwc3_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, reg);
+
+ reg = (USBOTGSS_IRQ1_OEVT |
+ USBOTGSS_IRQ1_DRVVBUS_RISE |
+ USBOTGSS_IRQ1_CHRGVBUS_RISE |
+ USBOTGSS_IRQ1_DISCHRGVBUS_RISE |
+ USBOTGSS_IRQ1_IDPULLUP_RISE |
+ USBOTGSS_IRQ1_DRVVBUS_FALL |
+ USBOTGSS_IRQ1_CHRGVBUS_FALL |
+ USBOTGSS_IRQ1_DISCHRGVBUS_FALL |
+ USBOTGSS_IRQ1_IDPULLUP_FALL);
+
+ dwc3_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, reg);
+
+ ret = platform_device_add_resources(dwc3, pdev->resource,
+ pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
+ goto err5;
+ }
+
+ ret = platform_device_add(dwc3);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register dwc3 device\n");
+ goto err5;
+ }
+
+ return 0;
+
+err5:
+ free_irq(omap->irq, omap);
+
+err4:
+ kfree(omap->context);
+
+err3:
+ platform_device_put(dwc3);
+
+err2:
+ iounmap(base);
+
+err1:
+ kfree(omap);
+
+err0:
+ return ret;
+}
+
+static int __devexit dwc3_omap_remove(struct platform_device *pdev)
+{
+ struct dwc3_omap *omap = platform_get_drvdata(pdev);
+
+ platform_device_unregister(omap->dwc3);
+
+ free_irq(omap->irq, omap);
+ iounmap(omap->base);
+
+ kfree(omap->context);
+ kfree(omap);
+
+ return 0;
+}
+
+static const struct of_device_id of_dwc3_matach[] = {
+ {
+ "ti,dwc3",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_matach);
+
+static struct platform_driver dwc3_omap_driver = {
+ .probe = dwc3_omap_probe,
+ .remove = __devexit_p(dwc3_omap_remove),
+ .driver = {
+ .name = "omap-dwc3",
+ .of_match_table = of_dwc3_matach,
+ },
+};
+
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
+
+static int __devinit dwc3_omap_init(void)
+{
+ return platform_driver_register(&dwc3_omap_driver);
+}
+module_init(dwc3_omap_init);
+
+static void __exit dwc3_omap_exit(void)
+{
+ platform_driver_unregister(&dwc3_omap_driver);
+}
+module_exit(dwc3_omap_exit);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
new file mode 100644
index 00000000000..f77c0004268
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -0,0 +1,219 @@
+/**
+ * dwc3-pci.c - PCI Specific glue layer
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+/* FIXME define these in <linux/pci_ids.h> */
+#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+
+#define DWC3_PCI_DEVS_POSSIBLE 32
+
+struct dwc3_pci {
+ struct device *dev;
+ struct platform_device *dwc3;
+};
+
+static DECLARE_BITMAP(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
+
+static int dwc3_pci_get_device_id(struct dwc3_pci *glue)
+{
+ int id;
+
+again:
+ id = find_first_zero_bit(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
+ if (id < DWC3_PCI_DEVS_POSSIBLE) {
+ int old;
+
+ old = test_and_set_bit(id, dwc3_pci_devs);
+ if (old)
+ goto again;
+ } else {
+ dev_err(glue->dev, "no space for new device\n");
+ id = -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dwc3_pci_put_device_id(struct dwc3_pci *glue, int id)
+{
+ int ret;
+
+ if (id < 0)
+ return;
+
+ ret = test_bit(id, dwc3_pci_devs);
+ WARN(!ret, "Device: %s\nID %d not in use\n",
+ dev_driver_string(glue->dev), id);
+ clear_bit(id, dwc3_pci_devs);
+}
+
+static int __devinit dwc3_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct resource res[2];
+ struct platform_device *dwc3;
+ struct dwc3_pci *glue;
+ int ret = -ENOMEM;
+ int devid;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pci->dev, "not enough memory\n");
+ goto err0;
+ }
+
+ glue->dev = &pci->dev;
+
+ ret = pci_enable_device(pci);
+ if (ret) {
+ dev_err(&pci->dev, "failed to enable pci device\n");
+ goto err1;
+ }
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_set_master(pci);
+
+ devid = dwc3_pci_get_device_id(glue);
+ if (devid < 0)
+ goto err2;
+
+ dwc3 = platform_device_alloc("dwc3-pci", devid);
+ if (!dwc3) {
+ dev_err(&pci->dev, "couldn't allocate dwc3 device\n");
+ goto err3;
+ }
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = "dwc_usb3";
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = "dwc_usb3";
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(&pci->dev, "couldn't add resources to dwc3 device\n");
+ goto err4;
+ }
+
+ pci_set_drvdata(pci, glue);
+
+ dma_set_coherent_mask(&dwc3->dev, pci->dev.coherent_dma_mask);
+
+ dwc3->dev.dma_mask = pci->dev.dma_mask;
+ dwc3->dev.dma_parms = pci->dev.dma_parms;
+ dwc3->dev.parent = &pci->dev;
+ glue->dwc3 = dwc3;
+
+ ret = platform_device_add(dwc3);
+ if (ret) {
+ dev_err(&pci->dev, "failed to register dwc3 device\n");
+ goto err4;
+ }
+
+ return 0;
+
+err4:
+ pci_set_drvdata(pci, NULL);
+ platform_device_put(dwc3);
+
+err3:
+ dwc3_pci_put_device_id(glue, devid);
+
+err2:
+ pci_disable_device(pci);
+
+err1:
+ kfree(pci);
+
+err0:
+ return ret;
+}
+
+static void __devexit dwc3_pci_remove(struct pci_dev *pci)
+{
+ struct dwc3_pci *glue = pci_get_drvdata(pci);
+
+ dwc3_pci_put_device_id(glue, glue->dwc3->id);
+ platform_device_unregister(glue->dwc3);
+ pci_set_drvdata(pci, NULL);
+ pci_disable_device(pci);
+ kfree(glue);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+
+static struct pci_driver dwc3_pci_driver = {
+ .name = "pci-dwc3",
+ .id_table = dwc3_pci_id_table,
+ .probe = dwc3_pci_probe,
+ .remove = __devexit_p(dwc3_pci_remove),
+};
+
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 PCI Glue Layer");
+
+static int __devinit dwc3_pci_init(void)
+{
+ return pci_register_driver(&dwc3_pci_driver);
+}
+module_init(dwc3_pci_init);
+
+static void __exit dwc3_pci_exit(void)
+{
+ pci_unregister_driver(&dwc3_pci_driver);
+}
+module_exit(dwc3_pci_exit);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
new file mode 100644
index 00000000000..69a4e43ddf5
--- /dev/null
+++ b/drivers/usb/dwc3/ep0.c
@@ -0,0 +1,804 @@
+/**
+ * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event);
+
+static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
+{
+ switch (state) {
+ case EP0_UNCONNECTED:
+ return "Unconnected";
+ case EP0_SETUP_PHASE:
+ return "Setup Phase";
+ case EP0_DATA_PHASE:
+ return "Data Phase";
+ case EP0_STATUS_PHASE:
+ return "Status Phase";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
+ u32 len, u32 type)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb_hw *trb_hw;
+ struct dwc3_trb trb;
+ struct dwc3_ep *dep;
+
+ int ret;
+
+ dep = dwc->eps[epnum];
+ if (dep->flags & DWC3_EP_BUSY) {
+ dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
+ return 0;
+ }
+
+ trb_hw = dwc->ep0_trb;
+ memset(&trb, 0, sizeof(trb));
+
+ trb.trbctl = type;
+ trb.bplh = buf_dma;
+ trb.length = len;
+
+ trb.hwo = 1;
+ trb.lst = 1;
+ trb.ioc = 1;
+ trb.isp_imi = 1;
+
+ dwc3_trb_to_hw(&trb, trb_hw);
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+ params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_STARTTRANSFER, &params);
+ if (ret < 0) {
+ dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
+ return ret;
+ }
+
+ dep->flags |= DWC3_EP_BUSY;
+ dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
+ dep->number);
+
+ dwc->ep0_next_event = DWC3_EP0_COMPLETE;
+
+ return 0;
+}
+
+static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
+ struct dwc3_request *req)
+{
+ int ret = 0;
+
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->epnum = dep->number;
+
+ list_add_tail(&req->list, &dep->request_list);
+
+ /*
+ * Gadget driver might not be quick enough to queue a request
+ * before we get a Transfer Not Ready event on this endpoint.
+ *
+ * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
+ * flag is set, it's telling us that as soon as Gadget queues the
+ * required request, we should kick the transfer here because the
+ * IRQ we were waiting for is long gone.
+ */
+ if (dep->flags & DWC3_EP_PENDING_REQUEST) {
+ struct dwc3 *dwc = dep->dwc;
+ unsigned direction;
+ u32 type;
+
+ direction = !!(dep->flags & DWC3_EP0_DIR_IN);
+
+ if (dwc->ep0state == EP0_STATUS_PHASE) {
+ type = dwc->three_stage_setup
+ ? DWC3_TRBCTL_CONTROL_STATUS3
+ : DWC3_TRBCTL_CONTROL_STATUS2;
+ } else if (dwc->ep0state == EP0_DATA_PHASE) {
+ type = DWC3_TRBCTL_CONTROL_DATA;
+ } else {
+ /* should never happen */
+ WARN_ON(1);
+ return 0;
+ }
+
+ ret = dwc3_ep0_start_trans(dwc, direction,
+ req->request.dma, req->request.length, type);
+ dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
+ DWC3_EP0_DIR_IN);
+ }
+
+ return ret;
+}
+
+int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (!dep->desc) {
+ dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
+ request, dep->name);
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ /* we share one TRB for ep0/1 */
+ if (!list_empty(&dwc->eps[0]->request_list) ||
+ !list_empty(&dwc->eps[1]->request_list) ||
+ dwc->ep0_status_pending) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
+ request, dep->name, request->length,
+ dwc3_ep0_state_string(dwc->ep0state));
+
+ ret = __dwc3_gadget_ep0_queue(dep, req);
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep = dwc->eps[0];
+
+ /* stall is always issued on EP0 */
+ __dwc3_gadget_ep_set_halt(dwc->eps[0], 1);
+ dwc->eps[0]->flags = DWC3_EP_ENABLED;
+
+ if (!list_empty(&dep->request_list)) {
+ struct dwc3_request *req;
+
+ req = next_request(&dep->request_list);
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ }
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+}
+
+void dwc3_ep0_out_start(struct dwc3 *dwc)
+{
+ int ret;
+
+ ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
+ DWC3_TRBCTL_CONTROL_SETUP);
+ WARN_ON(ret < 0);
+}
+
+static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
+{
+ struct dwc3_ep *dep;
+ u32 windex = le16_to_cpu(wIndex_le);
+ u32 epnum;
+
+ epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
+ if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum |= 1;
+
+ dep = dwc->eps[epnum];
+ if (dep->flags & DWC3_EP_ENABLED)
+ return dep;
+
+ return NULL;
+}
+
+static void dwc3_ep0_send_status_response(struct dwc3 *dwc)
+{
+ dwc3_ep0_start_trans(dwc, 1, dwc->setup_buf_addr,
+ dwc->ep0_usb_req.length,
+ DWC3_TRBCTL_CONTROL_DATA);
+}
+
+/*
+ * ch 9.4.5
+ */
+static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ struct dwc3_ep *dep;
+ u32 recip;
+ u16 usb_status = 0;
+ __le16 *response_pkt;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ /*
+ * We are self-powered. U1/U2/LTM will be set later
+ * once we handle this states. RemoteWakeup is 0 on SS
+ */
+ usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
+ break;
+
+ case USB_RECIP_INTERFACE:
+ /*
+ * Function Remote Wake Capable D0
+ * Function Remote Wakeup D1
+ */
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
+ return -EINVAL;
+
+ if (dep->flags & DWC3_EP_STALL)
+ usb_status = 1 << USB_ENDPOINT_HALT;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ response_pkt = (__le16 *) dwc->setup_buf;
+ *response_pkt = cpu_to_le16(usb_status);
+ dwc->ep0_usb_req.length = sizeof(*response_pkt);
+ dwc->ep0_status_pending = 1;
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ struct dwc3_ep *dep;
+ u32 recip;
+ u32 wValue;
+ u32 wIndex;
+ u32 reg;
+ int ret;
+ u32 mode;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+
+ /*
+ * 9.4.1 says only only for SS, in AddressState only for
+ * default control pipe
+ */
+ switch (wValue) {
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ case USB_DEVICE_LTM_ENABLE:
+ if (dwc->dev_state != DWC3_CONFIGURED_STATE)
+ return -EINVAL;
+ if (dwc->speed != DWC3_DSTS_SUPERSPEED)
+ return -EINVAL;
+ }
+
+ /* XXX add U[12] & LTM */
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ break;
+
+ case USB_DEVICE_TEST_MODE:
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ mode = wIndex >> 8;
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+
+ switch (mode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ reg |= mode << 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
+ /* XXX enable Low power suspend */
+ ;
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
+ /* XXX enable remote wakeup */
+ ;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
+ return -EINVAL;
+ ret = __dwc3_gadget_ep_set_halt(dep, set);
+ if (ret)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ u32 addr;
+ u32 reg;
+
+ addr = le16_to_cpu(ctrl->wValue);
+ if (addr > 127)
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ reg |= DWC3_DCFG_DEVADDR(addr);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ if (addr)
+ dwc->dev_state = DWC3_ADDRESS_STATE;
+ else
+ dwc->dev_state = DWC3_DEFAULT_STATE;
+
+ return 0;
+}
+
+static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&dwc->lock);
+ ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
+ spin_lock(&dwc->lock);
+ return ret;
+}
+
+static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ u32 cfg;
+ int ret;
+
+ dwc->start_config_issued = false;
+ cfg = le16_to_cpu(ctrl->wValue);
+
+ switch (dwc->dev_state) {
+ case DWC3_DEFAULT_STATE:
+ return -EINVAL;
+ break;
+
+ case DWC3_ADDRESS_STATE:
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ /* if the cfg matches and the cfg is non zero */
+ if (!ret && cfg)
+ dwc->dev_state = DWC3_CONFIGURED_STATE;
+ break;
+
+ case DWC3_CONFIGURED_STATE:
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ if (!cfg)
+ dwc->dev_state = DWC3_ADDRESS_STATE;
+ break;
+ }
+ return 0;
+}
+
+static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
+ ret = dwc3_ep0_handle_status(dwc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
+ ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
+ ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = dwc3_ep0_set_address(dwc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ ret = dwc3_ep0_set_config(dwc, ctrl);
+ break;
+ default:
+ dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ break;
+ };
+
+ return ret;
+}
+
+static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
+ int ret;
+ u32 len;
+
+ if (!dwc->gadget_driver)
+ goto err;
+
+ len = le16_to_cpu(ctrl->wLength);
+ if (!len) {
+ dwc->three_stage_setup = false;
+ dwc->ep0_expect_in = false;
+ dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
+ } else {
+ dwc->three_stage_setup = true;
+ dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
+ dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
+ }
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = dwc3_ep0_std_request(dwc, ctrl);
+ else
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+
+ if (ret >= 0)
+ return;
+
+err:
+ dwc3_ep0_stall_and_restart(dwc);
+}
+
+static void dwc3_ep0_complete_data(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_request *r = NULL;
+ struct usb_request *ur;
+ struct dwc3_trb trb;
+ struct dwc3_ep *dep;
+ u32 transferred;
+ u8 epnum;
+
+ epnum = event->endpoint_number;
+ dep = dwc->eps[epnum];
+
+ dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
+
+ if (!dwc->ep0_status_pending) {
+ r = next_request(&dwc->eps[0]->request_list);
+ ur = &r->request;
+ } else {
+ ur = &dwc->ep0_usb_req;
+ dwc->ep0_status_pending = 0;
+ }
+
+ dwc3_trb_to_nat(dwc->ep0_trb, &trb);
+
+ if (dwc->ep0_bounced) {
+ struct dwc3_ep *ep0 = dwc->eps[0];
+
+ transferred = min_t(u32, ur->length,
+ ep0->endpoint.maxpacket - trb.length);
+ memcpy(ur->buf, dwc->ep0_bounce, transferred);
+ dwc->ep0_bounced = false;
+ } else {
+ transferred = ur->length - trb.length;
+ ur->actual += transferred;
+ }
+
+ if ((epnum & 1) && ur->actual < ur->length) {
+ /* for some reason we did not get everything out */
+
+ dwc3_ep0_stall_and_restart(dwc);
+ } else {
+ /*
+ * handle the case where we have to send a zero packet. This
+ * seems to be case when req.length > maxpacket. Could it be?
+ */
+ if (r)
+ dwc3_gadget_giveback(dep, r, 0);
+ }
+}
+
+static void dwc3_ep0_complete_req(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_request *r;
+ struct dwc3_ep *dep;
+
+ dep = dwc->eps[0];
+
+ if (!list_empty(&dep->request_list)) {
+ r = next_request(&dep->request_list);
+
+ dwc3_gadget_giveback(dep, r, 0);
+ }
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+}
+
+static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+
+ dep->flags &= ~DWC3_EP_BUSY;
+
+ switch (dwc->ep0state) {
+ case EP0_SETUP_PHASE:
+ dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
+ dwc3_ep0_inspect_setup(dwc, event);
+ break;
+
+ case EP0_DATA_PHASE:
+ dev_vdbg(dwc->dev, "Data Phase\n");
+ dwc3_ep0_complete_data(dwc, event);
+ break;
+
+ case EP0_STATUS_PHASE:
+ dev_vdbg(dwc->dev, "Status Phase\n");
+ dwc3_ep0_complete_req(dwc, event);
+ break;
+ default:
+ WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
+ }
+}
+
+static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+}
+
+static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep;
+ struct dwc3_request *req;
+ int ret;
+
+ dep = dwc->eps[0];
+ dwc->ep0state = EP0_DATA_PHASE;
+
+ if (dwc->ep0_status_pending) {
+ dwc3_ep0_send_status_response(dwc);
+ return;
+ }
+
+ if (list_empty(&dep->request_list)) {
+ dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+
+ if (event->endpoint_number)
+ dep->flags |= DWC3_EP0_DIR_IN;
+ return;
+ }
+
+ req = next_request(&dep->request_list);
+ req->direction = !!event->endpoint_number;
+
+ dwc->ep0state = EP0_DATA_PHASE;
+ if (req->request.length == 0) {
+ ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ dwc->ctrl_req_addr, 0,
+ DWC3_TRBCTL_CONTROL_DATA);
+ } else if ((req->request.length % dep->endpoint.maxpacket)
+ && (event->endpoint_number == 0)) {
+ dwc3_map_buffer_to_dma(req);
+
+ WARN_ON(req->request.length > dep->endpoint.maxpacket);
+
+ dwc->ep0_bounced = true;
+
+ /*
+ * REVISIT in case request length is bigger than EP0
+ * wMaxPacketSize, we will need two chained TRBs to handle
+ * the transfer.
+ */
+ ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ dwc->ep0_bounce_addr, dep->endpoint.maxpacket,
+ DWC3_TRBCTL_CONTROL_DATA);
+ } else {
+ dwc3_map_buffer_to_dma(req);
+
+ ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ req->request.dma, req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA);
+ }
+
+ WARN_ON(ret < 0);
+}
+
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ u32 type;
+ int ret;
+
+ dwc->ep0state = EP0_STATUS_PHASE;
+
+ type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
+ : DWC3_TRBCTL_CONTROL_STATUS2;
+
+ ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ dwc->ctrl_req_addr, 0, type);
+
+ WARN_ON(ret < 0);
+}
+
+static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ switch (event->status) {
+ case DEPEVT_STATUS_CONTROL_SETUP:
+ dev_vdbg(dwc->dev, "Control Setup\n");
+ dwc3_ep0_do_control_setup(dwc, event);
+ break;
+
+ case DEPEVT_STATUS_CONTROL_DATA:
+ dev_vdbg(dwc->dev, "Control Data\n");
+
+ if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
+ dev_vdbg(dwc->dev, "Expected %d got %d\n",
+ dwc->ep0_next_event,
+ DWC3_EP0_NRDY_DATA);
+
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+
+ /*
+ * One of the possible error cases is when Host _does_
+ * request for Data Phase, but it does so on the wrong
+ * direction.
+ *
+ * Here, we already know ep0_next_event is DATA (see above),
+ * so we only need to check for direction.
+ */
+ if (dwc->ep0_expect_in != event->endpoint_number) {
+ dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+
+ dwc3_ep0_do_control_data(dwc, event);
+ break;
+
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ dev_vdbg(dwc->dev, "Control Status\n");
+
+ if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
+ dev_vdbg(dwc->dev, "Expected %d got %d\n",
+ dwc->ep0_next_event,
+ DWC3_EP0_NRDY_STATUS);
+
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+ dwc3_ep0_do_control_status(dwc, event);
+ }
+}
+
+void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const const struct dwc3_event_depevt *event)
+{
+ u8 epnum = event->endpoint_number;
+
+ dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
+ dwc3_ep_event_string(event->endpoint_event),
+ epnum >> 1, (epnum & 1) ? "in" : "out",
+ dwc3_ep0_state_string(dwc->ep0state));
+
+ switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ dwc3_ep0_xfer_complete(dwc, event);
+ break;
+
+ case DWC3_DEPEVT_XFERNOTREADY:
+ dwc3_ep0_xfernotready(dwc, event);
+ break;
+
+ case DWC3_DEPEVT_XFERINPROGRESS:
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+ case DWC3_DEPEVT_STREAMEVT:
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ break;
+ }
+}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
new file mode 100644
index 00000000000..fa824cfdd2e
--- /dev/null
+++ b/drivers/usb/dwc3/gadget.c
@@ -0,0 +1,2104 @@
+/**
+ * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+void dwc3_map_buffer_to_dma(struct dwc3_request *req)
+{
+ struct dwc3 *dwc = req->dep->dwc;
+
+ if (req->request.length == 0) {
+ /* req->request.dma = dwc->setup_buf_addr; */
+ return;
+ }
+
+ if (req->request.dma == DMA_ADDR_INVALID) {
+ req->request.dma = dma_map_single(dwc->dev, req->request.buf,
+ req->request.length, req->direction
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = true;
+ }
+}
+
+void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
+{
+ struct dwc3 *dwc = req->dep->dwc;
+
+ if (req->request.length == 0) {
+ req->request.dma = DMA_ADDR_INVALID;
+ return;
+ }
+
+ if (req->mapped) {
+ dma_unmap_single(dwc->dev, req->request.dma,
+ req->request.length, req->direction
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 0;
+ req->request.dma = DMA_ADDR_INVALID;
+ }
+}
+
+void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (req->queued) {
+ dep->busy_slot++;
+ /*
+ * Skip LINK TRB. We can't use req->trb and check for
+ * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
+ * completed (not the LINK TRB).
+ */
+ if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->desc))
+ dep->busy_slot++;
+ }
+ list_del(&req->list);
+
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+
+ dwc3_unmap_buffer_from_dma(req);
+
+ dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
+ req, dep->name, req->request.actual,
+ req->request.length, status);
+
+ spin_unlock(&dwc->lock);
+ req->request.complete(&req->dep->endpoint, &req->request);
+ spin_lock(&dwc->lock);
+}
+
+static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ case DWC3_DEPCMD_DEPSTARTCFG:
+ return "Start New Configuration";
+ case DWC3_DEPCMD_ENDTRANSFER:
+ return "End Transfer";
+ case DWC3_DEPCMD_UPDATETRANSFER:
+ return "Update Transfer";
+ case DWC3_DEPCMD_STARTTRANSFER:
+ return "Start Transfer";
+ case DWC3_DEPCMD_CLEARSTALL:
+ return "Clear Stall";
+ case DWC3_DEPCMD_SETSTALL:
+ return "Set Stall";
+ case DWC3_DEPCMD_GETSEQNUMBER:
+ return "Get Data Sequence Number";
+ case DWC3_DEPCMD_SETTRANSFRESOURCE:
+ return "Set Endpoint Transfer Resource";
+ case DWC3_DEPCMD_SETEPCONFIG:
+ return "Set Endpoint Configuration";
+ default:
+ return "UNKNOWN command";
+ }
+}
+
+int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+{
+ struct dwc3_ep *dep = dwc->eps[ep];
+ u32 timeout = 500;
+ u32 reg;
+
+ dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
+ dep->name,
+ dwc3_gadget_ep_cmd_string(cmd), params->param0,
+ params->param1, params->param2);
+
+ dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
+ dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
+ dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
+
+ dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
+ if (!(reg & DWC3_DEPCMD_CMDACT)) {
+ dev_vdbg(dwc->dev, "Command Complete --> %d\n",
+ DWC3_DEPCMD_STATUS(reg));
+ return 0;
+ }
+
+ /*
+ * We can't sleep here, because it is also called from
+ * interrupt context.
+ */
+ timeout--;
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ } while (1);
+}
+
+static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+ struct dwc3_trb_hw *trb)
+{
+ u32 offset = (char *) trb - (char *) dep->trb_pool;
+
+ return dep->trb_pool_dma + offset;
+}
+
+static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (dep->trb_pool)
+ return 0;
+
+ if (dep->number == 0 || dep->number == 1)
+ return 0;
+
+ dep->trb_pool = dma_alloc_coherent(dwc->dev,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ &dep->trb_pool_dma, GFP_KERNEL);
+ if (!dep->trb_pool) {
+ dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
+ dep->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dwc3_free_trb_pool(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ dep->trb_pool, dep->trb_pool_dma);
+
+ dep->trb_pool = NULL;
+ dep->trb_pool_dma = 0;
+}
+
+static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+
+ memset(&params, 0x00, sizeof(params));
+
+ if (dep->number != 1) {
+ cmd = DWC3_DEPCMD_DEPSTARTCFG;
+ /* XferRscIdx == 0 for ep0 and 2 for the remaining */
+ if (dep->number > 1) {
+ if (dwc->start_config_issued)
+ return 0;
+ dwc->start_config_issued = true;
+ cmd |= DWC3_DEPCMD_PARAM(2);
+ }
+
+ return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ }
+
+ return 0;
+}
+
+static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+ | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
+ | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
+
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
+ | DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+ if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
+ params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+ | DWC3_DEPCFG_STREAM_EVENT_EN;
+ dep->stream_capable = true;
+ }
+
+ if (usb_endpoint_xfer_isoc(desc))
+ params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
+
+ /*
+ * We are doing 1:1 mapping for endpoints, meaning
+ * Physical Endpoints 2 maps to Logical Endpoint 2 and
+ * so on. We consider the direction bit as part of the physical
+ * endpoint number. So USB endpoint 0x81 is 0x03.
+ */
+ params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+ /*
+ * We must use the lower 16 TX FIFOs even though
+ * HW might have more
+ */
+ if (dep->direction)
+ params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+ if (desc->bInterval) {
+ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+ dep->interval = 1 << (desc->bInterval - 1);
+ }
+
+ return dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETEPCONFIG, &params);
+}
+
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
+
+ return dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+}
+
+/**
+ * __dwc3_gadget_ep_enable - Initializes a HW endpoint
+ * @dep: endpoint to be initialized
+ * @desc: USB Endpoint Descriptor
+ *
+ * Caller should take care of locking
+ */
+static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 reg;
+ int ret = -ENOMEM;
+
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ ret = dwc3_gadget_start_config(dwc, dep);
+ if (ret)
+ return ret;
+ }
+
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
+ if (ret)
+ return ret;
+
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ struct dwc3_trb_hw *trb_st_hw;
+ struct dwc3_trb_hw *trb_link_hw;
+ struct dwc3_trb trb_link;
+
+ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+ if (ret)
+ return ret;
+
+ dep->desc = desc;
+ dep->type = usb_endpoint_type(desc);
+ dep->flags |= DWC3_EP_ENABLED;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg |= DWC3_DALEPENA_EP(dep->number);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+ if (!usb_endpoint_xfer_isoc(desc))
+ return 0;
+
+ memset(&trb_link, 0, sizeof(trb_link));
+
+ /* Link TRB for ISOC. The HWO but is never reset */
+ trb_st_hw = &dep->trb_pool[0];
+
+ trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
+ trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
+ trb_link.hwo = true;
+
+ trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
+ dwc3_trb_to_hw(&trb_link, trb_link_hw);
+ }
+
+ return 0;
+}
+
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
+static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+
+ if (!list_empty(&dep->req_queued))
+ dwc3_stop_active_transfer(dwc, dep->number);
+
+ while (!list_empty(&dep->request_list)) {
+ req = next_request(&dep->request_list);
+
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ }
+}
+
+/**
+ * __dwc3_gadget_ep_disable - Disables a HW endpoint
+ * @dep: the endpoint to disable
+ *
+ * This function also removes requests which are currently processed ny the
+ * hardware and those which are not yet scheduled.
+ * Caller should take care of locking.
+ */
+static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 reg;
+
+ dwc3_remove_requests(dwc, dep);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg &= ~DWC3_DALEPENA_EP(dep->number);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+ dep->stream_capable = false;
+ dep->desc = NULL;
+ dep->type = 0;
+ dep->flags = 0;
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EINVAL;
+}
+
+static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
+{
+ return -EINVAL;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct dwc3_ep *dep;
+ struct dwc3 *dwc;
+ unsigned long flags;
+ int ret;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("dwc3: invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("dwc3: missing wMaxPacketSize\n");
+ return -EINVAL;
+ }
+
+ dep = to_dwc3_ep(ep);
+ dwc = dep->dwc;
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ strncat(dep->name, "-control", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ strncat(dep->name, "-isoc", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ strncat(dep->name, "-bulk", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ strncat(dep->name, "-int", sizeof(dep->name));
+ break;
+ default:
+ dev_err(dwc->dev, "invalid endpoint transfer type\n");
+ }
+
+ if (dep->flags & DWC3_EP_ENABLED) {
+ dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
+ dep->name);
+ return 0;
+ }
+
+ dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_enable(dep, desc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep;
+ struct dwc3 *dwc;
+ unsigned long flags;
+ int ret;
+
+ if (!ep) {
+ pr_debug("dwc3: invalid parameters\n");
+ return -EINVAL;
+ }
+
+ dep = to_dwc3_ep(ep);
+ dwc = dep->dwc;
+
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
+ dep->name);
+ return 0;
+ }
+
+ snprintf(dep->name, sizeof(dep->name), "ep%d%s",
+ dep->number >> 1,
+ (dep->number & 1) ? "in" : "out");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_disable(dep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct dwc3_request *req;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req) {
+ dev_err(dwc->dev, "not enough memory\n");
+ return NULL;
+ }
+
+ req->epnum = dep->number;
+ req->dep = dep;
+ req->request.dma = DMA_ADDR_INVALID;
+
+ return &req->request;
+}
+
+static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+
+ kfree(req);
+}
+
+/*
+ * dwc3_prepare_trbs - setup TRBs from requests
+ * @dep: endpoint for which requests are being prepared
+ * @starting: true if the endpoint is idle and no requests are queued.
+ *
+ * The functions goes through the requests list and setups TRBs for the
+ * transfers. The functions returns once there are not more TRBs available or
+ * it run out of requests.
+ */
+static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
+ bool starting)
+{
+ struct dwc3_request *req, *n, *ret = NULL;
+ struct dwc3_trb_hw *trb_hw;
+ struct dwc3_trb trb;
+ u32 trbs_left;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+
+ /* the first request must not be queued */
+ trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
+ /*
+ * if busy & slot are equal than it is either full or empty. If we are
+ * starting to proceed requests then we are empty. Otherwise we ar
+ * full and don't do anything
+ */
+ if (!trbs_left) {
+ if (!starting)
+ return NULL;
+ trbs_left = DWC3_TRB_NUM;
+ /*
+ * In case we start from scratch, we queue the ISOC requests
+ * starting from slot 1. This is done because we use ring
+ * buffer and have no LST bit to stop us. Instead, we place
+ * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
+ * after the first request so we start at slot 1 and have
+ * 7 requests proceed before we hit the first IOC.
+ * Other transfer types don't use the ring buffer and are
+ * processed from the first TRB until the last one. Since we
+ * don't wrap around we have to start at the beginning.
+ */
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ dep->busy_slot = 1;
+ dep->free_slot = 1;
+ } else {
+ dep->busy_slot = 0;
+ dep->free_slot = 0;
+ }
+ }
+
+ /* The last TRB is a link TRB, not used for xfer */
+ if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
+ return NULL;
+
+ list_for_each_entry_safe(req, n, &dep->request_list, list) {
+ unsigned int last_one = 0;
+ unsigned int cur_slot;
+
+ trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ cur_slot = dep->free_slot;
+ dep->free_slot++;
+
+ /* Skip the LINK-TRB on ISOC */
+ if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->desc))
+ continue;
+
+ dwc3_gadget_move_request_queued(req);
+ memset(&trb, 0, sizeof(trb));
+ trbs_left--;
+
+ /* Is our TRB pool empty? */
+ if (!trbs_left)
+ last_one = 1;
+ /* Is this the last request? */
+ if (list_empty(&dep->request_list))
+ last_one = 1;
+
+ /*
+ * FIXME we shouldn't need to set LST bit always but we are
+ * facing some weird problem with the Hardware where it doesn't
+ * complete even though it has been previously started.
+ *
+ * While we're debugging the problem, as a workaround to
+ * multiple TRBs handling, use only one TRB at a time.
+ */
+ last_one = 1;
+
+ req->trb = trb_hw;
+ if (!ret)
+ ret = req;
+
+ trb.bplh = req->request.dma;
+
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ trb.isp_imi = true;
+ trb.csp = true;
+ } else {
+ trb.lst = last_one;
+ }
+
+ if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
+ trb.sid_sofn = req->request.stream_id;
+
+ switch (usb_endpoint_type(dep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+
+ /* IOC every DWC3_TRB_NUM / 4 so we can refill */
+ if (!(cur_slot % (DWC3_TRB_NUM / 4)))
+ trb.ioc = last_one;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ trb.trbctl = DWC3_TRBCTL_NORMAL;
+ break;
+ default:
+ /*
+ * This is only possible with faulty memory because we
+ * checked it already :)
+ */
+ BUG();
+ }
+
+ trb.length = req->request.length;
+ trb.hwo = true;
+
+ dwc3_trb_to_hw(&trb, trb_hw);
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
+
+ if (last_one)
+ break;
+ }
+
+ return ret;
+}
+
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
+ int start_new)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_request *req;
+ struct dwc3 *dwc = dep->dwc;
+ int ret;
+ u32 cmd;
+
+ if (start_new && (dep->flags & DWC3_EP_BUSY)) {
+ dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
+ return -EBUSY;
+ }
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+
+ /*
+ * If we are getting here after a short-out-packet we don't enqueue any
+ * new requests as we try to set the IOC bit only on the last request.
+ */
+ if (start_new) {
+ if (list_empty(&dep->req_queued))
+ dwc3_prepare_trbs(dep, start_new);
+
+ /* req points to the first request which will be sent */
+ req = next_request(&dep->req_queued);
+ } else {
+ /*
+ * req points to the first request where HWO changed
+ * from 0 to 1
+ */
+ req = dwc3_prepare_trbs(dep, start_new);
+ }
+ if (!req) {
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = upper_32_bits(req->trb_dma);
+ params.param1 = lower_32_bits(req->trb_dma);
+
+ if (start_new)
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+ else
+ cmd = DWC3_DEPCMD_UPDATETRANSFER;
+
+ cmd |= DWC3_DEPCMD_PARAM(cmd_param);
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ if (ret < 0) {
+ dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
+
+ /*
+ * FIXME we need to iterate over the list of requests
+ * here and stop, unmap, free and del each of the linked
+ * requests instead of we do now.
+ */
+ dwc3_unmap_buffer_from_dma(req);
+ list_del(&req->list);
+ return ret;
+ }
+
+ dep->flags |= DWC3_EP_BUSY;
+ dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
+ dep->number);
+ if (!dep->res_trans_idx)
+ printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
+ return 0;
+}
+
+static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->direction = dep->direction;
+ req->epnum = dep->number;
+
+ /*
+ * We only add to our list of requests now and
+ * start consuming the list once we get XferNotReady
+ * IRQ.
+ *
+ * That way, we avoid doing anything that we don't need
+ * to do now and defer it until the point we receive a
+ * particular token from the Host side.
+ *
+ * This will also avoid Host cancelling URBs due to too
+ * many NACKs.
+ */
+ dwc3_map_buffer_to_dma(req);
+ list_add_tail(&req->list, &dep->request_list);
+
+ /*
+ * There is one special case: XferNotReady with
+ * empty list of requests. We need to kick the
+ * transfer here in that situation, otherwise
+ * we will be NAKing forever.
+ *
+ * If we get XferNotReady before gadget driver
+ * has a chance to queue a request, we will ACK
+ * the IRQ but won't be able to receive the data
+ * until the next request is queued. The following
+ * code is handling exactly that.
+ */
+ if (dep->flags & DWC3_EP_PENDING_REQUEST) {
+ int ret;
+ int start_trans;
+
+ start_trans = 1;
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ dep->flags & DWC3_EP_BUSY)
+ start_trans = 0;
+
+ ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
+ if (ret && ret != -EBUSY) {
+ struct dwc3 *dwc = dep->dwc;
+
+ dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+ dep->name);
+ }
+ };
+
+ return 0;
+}
+
+static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+
+ int ret;
+
+ if (!dep->desc) {
+ dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
+ request, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
+ request, ep->name, request->length);
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_queue(dep, req);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_request *r = NULL;
+
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ list_for_each_entry(r, &dep->request_list, list) {
+ if (r == req)
+ break;
+ }
+
+ if (r != req) {
+ list_for_each_entry(r, &dep->req_queued, list) {
+ if (r == req)
+ break;
+ }
+ if (r == req) {
+ /* wait until it is processed */
+ dwc3_stop_active_transfer(dwc, dep->number);
+ goto out0;
+ }
+ dev_err(dwc->dev, "request %p was not queued to %s\n",
+ request, ep->name);
+ ret = -EINVAL;
+ goto out0;
+ }
+
+ /* giveback the request */
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+
+out0:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc = dep->dwc;
+ int ret;
+
+ memset(&params, 0x00, sizeof(params));
+
+ if (value) {
+ if (dep->number == 0 || dep->number == 1) {
+ /*
+ * Whenever EP0 is stalled, we will restart
+ * the state machine, thus moving back to
+ * Setup Phase
+ */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ }
+
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETSTALL, &params);
+ if (ret)
+ dev_err(dwc->dev, "failed to %s STALL on %s\n",
+ value ? "set" : "clear",
+ dep->name);
+ else
+ dep->flags |= DWC3_EP_STALL;
+ } else {
+ if (dep->flags & DWC3_EP_WEDGE)
+ return 0;
+
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_CLEARSTALL, &params);
+ if (ret)
+ dev_err(dwc->dev, "failed to %s STALL on %s\n",
+ value ? "set" : "clear",
+ dep->name);
+ else
+ dep->flags &= ~DWC3_EP_STALL;
+ }
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = __dwc3_gadget_ep_set_halt(dep, value);
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+
+ dep->flags |= DWC3_EP_WEDGE;
+
+ return dwc3_gadget_ep_set_halt(ep, 1);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+};
+
+static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
+ .enable = dwc3_gadget_ep0_enable,
+ .disable = dwc3_gadget_ep0_disable,
+ .alloc_request = dwc3_gadget_ep_alloc_request,
+ .free_request = dwc3_gadget_ep_free_request,
+ .queue = dwc3_gadget_ep0_queue,
+ .dequeue = dwc3_gadget_ep_dequeue,
+ .set_halt = dwc3_gadget_ep_set_halt,
+ .set_wedge = dwc3_gadget_ep_set_wedge,
+};
+
+static const struct usb_ep_ops dwc3_gadget_ep_ops = {
+ .enable = dwc3_gadget_ep_enable,
+ .disable = dwc3_gadget_ep_disable,
+ .alloc_request = dwc3_gadget_ep_alloc_request,
+ .free_request = dwc3_gadget_ep_free_request,
+ .queue = dwc3_gadget_ep_queue,
+ .dequeue = dwc3_gadget_ep_dequeue,
+ .set_halt = dwc3_gadget_ep_set_halt,
+ .set_wedge = dwc3_gadget_ep_set_wedge,
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_get_frame(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_SOFFN(reg);
+}
+
+static int dwc3_gadget_wakeup(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ unsigned long timeout;
+ unsigned long flags;
+
+ u32 reg;
+
+ int ret = 0;
+
+ u8 link_state;
+ u8 speed;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ /*
+ * According to the Databook Remote wakeup request should
+ * be issued only when the device is in early suspend state.
+ *
+ * We can check that via USB Link State bits in DSTS register.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ speed = reg & DWC3_DSTS_CONNECTSPD;
+ if (speed == DWC3_DSTS_SUPERSPEED) {
+ dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ link_state = DWC3_DSTS_USBLNKST(reg);
+
+ switch (link_state) {
+ case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
+ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
+ break;
+ default:
+ dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
+ link_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+ /*
+ * Switch link state to Recovery. In HS/FS/LS this means
+ * RemoteWakeup Request
+ */
+ reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* wait for at least 2000us */
+ usleep_range(2000, 2500);
+
+ /* write zeroes to Link Change Request */
+ reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* pool until Link State change to ON */
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (!(time_after(jiffies, timeout))) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ /* in HS, means ON */
+ if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
+ break;
+ }
+
+ if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
+ dev_err(dwc->dev, "failed to send remote wakeup\n");
+ ret = -EINVAL;
+ }
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
+ int is_selfpowered)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ dwc->is_selfpowered = !!is_selfpowered;
+
+ return 0;
+}
+
+static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+{
+ u32 reg;
+ u32 timeout = 500;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (is_on)
+ reg |= DWC3_DCTL_RUN_STOP;
+ else
+ reg &= ~DWC3_DCTL_RUN_STOP;
+
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (is_on) {
+ if (!(reg & DWC3_DSTS_DEVCTRLHLT))
+ break;
+ } else {
+ if (reg & DWC3_DSTS_DEVCTRLHLT)
+ break;
+ }
+ timeout--;
+ if (!timeout)
+ break;
+ udelay(1);
+ } while (1);
+
+ dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
+ dwc->gadget_driver
+ ? dwc->gadget_driver->function : "no-function",
+ is_on ? "connect" : "disconnect");
+}
+
+static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ is_on = !!is_on;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_run_stop(dwc, is_on);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ struct dwc3_ep *dep;
+ unsigned long flags;
+ int ret = 0;
+ u32 reg;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->gadget_driver) {
+ dev_err(dwc->dev, "%s is already bound to %s\n",
+ dwc->gadget.name,
+ dwc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ dwc->gadget_driver = driver;
+ dwc->gadget.dev.driver = &driver->driver;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+
+ reg &= ~DWC3_GCTL_SCALEDOWN(3);
+ reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
+ reg &= ~DWC3_GCTL_DISSCRAMBLE;
+ reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
+
+ switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams0)) {
+ case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ default:
+ dev_dbg(dwc->dev, "No power optimization available\n");
+ }
+
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have a bug
+ * when The device fails to connect at SuperSpeed
+ * and falls back to high-speed mode which causes
+ * the device to enter in a Connect/Disconnect loop
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ reg |= DWC3_GCTL_U2RSTECN;
+
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_SPEED_MASK);
+ reg |= DWC3_DCFG_SUPERSPEED;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ dwc->start_config_issued = false;
+
+ /* Start with SuperSpeed Default */
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+ dep = dwc->eps[0];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ goto err0;
+ }
+
+ dep = dwc->eps[1];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ goto err1;
+ }
+
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+
+err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+
+ dwc->gadget_driver = NULL;
+ dwc->gadget.dev.driver = NULL;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+static const struct usb_gadget_ops dwc3_gadget_ops = {
+ .get_frame = dwc3_gadget_get_frame,
+ .wakeup = dwc3_gadget_wakeup,
+ .set_selfpowered = dwc3_gadget_set_selfpowered,
+ .pullup = dwc3_gadget_pullup,
+ .udc_start = dwc3_gadget_start,
+ .udc_stop = dwc3_gadget_stop,
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ u8 epnum;
+
+ INIT_LIST_HEAD(&dwc->gadget.ep_list);
+
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep) {
+ dev_err(dwc->dev, "can't allocate endpoint %d\n",
+ epnum);
+ return -ENOMEM;
+ }
+
+ dep->dwc = dwc;
+ dep->number = epnum;
+ dwc->eps[epnum] = dep;
+
+ snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
+ (epnum & 1) ? "in" : "out");
+ dep->endpoint.name = dep->name;
+ dep->direction = (epnum & 1);
+
+ if (epnum == 0 || epnum == 1) {
+ dep->endpoint.maxpacket = 512;
+ dep->endpoint.ops = &dwc3_gadget_ep0_ops;
+ if (!epnum)
+ dwc->gadget.ep0 = &dep->endpoint;
+ } else {
+ int ret;
+
+ dep->endpoint.maxpacket = 1024;
+ dep->endpoint.ops = &dwc3_gadget_ep_ops;
+ list_add_tail(&dep->endpoint.ep_list,
+ &dwc->gadget.ep_list);
+
+ ret = dwc3_alloc_trb_pool(dep);
+ if (ret) {
+ dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&dep->request_list);
+ INIT_LIST_HEAD(&dep->req_queued);
+ }
+
+ return 0;
+}
+
+static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ u8 epnum;
+
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = dwc->eps[epnum];
+ dwc3_free_trb_pool(dep);
+
+ if (epnum != 0 && epnum != 1)
+ list_del(&dep->endpoint.ep_list);
+
+ kfree(dep);
+ }
+}
+
+static void dwc3_gadget_release(struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+}
+
+/* -------------------------------------------------------------------------- */
+static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event, int status)
+{
+ struct dwc3_request *req;
+ struct dwc3_trb trb;
+ unsigned int count;
+ unsigned int s_pkt = 0;
+
+ do {
+ req = next_request(&dep->req_queued);
+ if (!req)
+ break;
+
+ dwc3_trb_to_nat(req->trb, &trb);
+
+ if (trb.hwo && status != -ESHUTDOWN)
+ /*
+ * We continue despite the error. There is not much we
+ * can do. If we don't clean in up we loop for ever. If
+ * we skip the TRB than it gets overwritten reused after
+ * a while since we use them in a ring buffer. a BUG()
+ * would help. Lets hope that if this occures, someone
+ * fixes the root cause instead of looking away :)
+ */
+ dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
+ dep->name, req->trb);
+ count = trb.length;
+
+ if (dep->direction) {
+ if (count) {
+ dev_err(dwc->dev, "incomplete IN transfer %s\n",
+ dep->name);
+ status = -ECONNRESET;
+ }
+ } else {
+ if (count && (event->status & DEPEVT_STATUS_SHORT))
+ s_pkt = 1;
+ }
+
+ /*
+ * We assume here we will always receive the entire data block
+ * which we should receive. Meaning, if we program RX to
+ * receive 4K but we receive only 2K, we assume that's all we
+ * should receive and we simply bounce the request back to the
+ * gadget driver for further processing.
+ */
+ req->request.actual += req->request.length - count;
+ dwc3_gadget_giveback(dep, req, status);
+ if (s_pkt)
+ break;
+ if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
+ break;
+ if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
+ break;
+ } while (1);
+
+ if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
+ return 0;
+ return 1;
+}
+
+static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
+ struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
+ int start_new)
+{
+ unsigned status = 0;
+ int clean_busy;
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
+ if (clean_busy) {
+ dep->flags &= ~DWC3_EP_BUSY;
+ dep->res_trans_idx = 0;
+ }
+}
+
+static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
+ struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
+{
+ u32 uf;
+
+ if (list_empty(&dep->request_list)) {
+ dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
+ dep->name);
+ return;
+ }
+
+ if (event->parameters) {
+ u32 mask;
+
+ mask = ~(dep->interval - 1);
+ uf = event->parameters & mask;
+ /* 4 micro frames in the future */
+ uf += dep->interval * 4;
+ } else {
+ uf = 0;
+ }
+
+ __dwc3_gadget_kick_transfer(dep, uf, 1);
+}
+
+static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_event_depevt mod_ev = *event;
+
+ /*
+ * We were asked to remove one requests. It is possible that this
+ * request and a few other were started together and have the same
+ * transfer index. Since we stopped the complete endpoint we don't
+ * know how many requests were already completed (and not yet)
+ * reported and how could be done (later). We purge them all until
+ * the end of the list.
+ */
+ mod_ev.status = DEPEVT_STATUS_LST;
+ dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
+ dep->flags &= ~DWC3_EP_BUSY;
+ /* pending requets are ignored and are queued on XferNotReady */
+}
+
+static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ u32 param = event->parameters;
+ u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
+
+ switch (cmd_type) {
+ case DWC3_DEPCMD_ENDTRANSFER:
+ dwc3_process_ep_cmd_complete(dep, event);
+ break;
+ case DWC3_DEPCMD_STARTTRANSFER:
+ dep->res_trans_idx = param & 0x7f;
+ break;
+ default:
+ printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
+ __func__, cmd_type);
+ break;
+ };
+}
+
+static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep;
+ u8 epnum = event->endpoint_number;
+
+ dep = dwc->eps[epnum];
+
+ dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
+ dwc3_ep_event_string(event->endpoint_event));
+
+ if (epnum == 0 || epnum == 1) {
+ dwc3_ep0_interrupt(dwc, event);
+ return;
+ }
+
+ switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
+ dep->name);
+ return;
+ }
+
+ dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
+ break;
+ case DWC3_DEPEVT_XFERINPROGRESS:
+ if (!usb_endpoint_xfer_isoc(dep->desc)) {
+ dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
+ dep->name);
+ return;
+ }
+
+ dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
+ break;
+ case DWC3_DEPEVT_XFERNOTREADY:
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ dwc3_gadget_start_isoc(dwc, dep, event);
+ } else {
+ int ret;
+
+ dev_vdbg(dwc->dev, "%s: reason %s\n",
+ dep->name, event->status
+ ? "Transfer Active"
+ : "Transfer Not Active");
+
+ ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
+ if (!ret || ret == -EBUSY)
+ return;
+
+ dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+ dep->name);
+ }
+
+ break;
+ case DWC3_DEPEVT_STREAMEVT:
+ if (!usb_endpoint_xfer_bulk(dep->desc)) {
+ dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
+ dep->name);
+ return;
+ }
+
+ switch (event->status) {
+ case DEPEVT_STREAMEVT_FOUND:
+ dev_vdbg(dwc->dev, "Stream %d found and started\n",
+ event->parameters);
+
+ break;
+ case DEPEVT_STREAMEVT_NOTFOUND:
+ /* FALLTHROUGH */
+ default:
+ dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
+ }
+ break;
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+ dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ dwc3_ep_cmd_compl(dep, event);
+ break;
+ }
+}
+
+static void dwc3_disconnect_gadget(struct dwc3 *dwc)
+{
+ if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->disconnect(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+{
+ struct dwc3_ep *dep;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+ int ret;
+
+ dep = dwc->eps[epnum];
+
+ WARN_ON(!dep->res_trans_idx);
+ if (dep->res_trans_idx) {
+ cmd = DWC3_DEPCMD_ENDTRANSFER;
+ cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
+ cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ WARN_ON_ONCE(ret);
+ dep->res_trans_idx = 0;
+ }
+}
+
+static void dwc3_stop_active_transfers(struct dwc3 *dwc)
+{
+ u32 epnum;
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep;
+
+ dep = dwc->eps[epnum];
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ dwc3_remove_requests(dwc, dep);
+ }
+}
+
+static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
+{
+ u32 epnum;
+
+ for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ dep = dwc->eps[epnum];
+
+ if (!(dep->flags & DWC3_EP_STALL))
+ continue;
+
+ dep->flags &= ~DWC3_EP_STALL;
+
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_CLEARSTALL, &params);
+ WARN_ON_ONCE(ret);
+ }
+}
+
+static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+{
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+#if 0
+ XXX
+ U1/U2 is powersave optimization. Skip it for now. Anyway we need to
+ enable it before we can disable it.
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_INITU1ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ reg &= ~DWC3_DCTL_INITU2ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+#endif
+
+ dwc3_stop_active_transfers(dwc);
+ dwc3_disconnect_gadget(dwc);
+ dwc->start_config_issued = false;
+
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+ if (on)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+ else
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+}
+
+static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+
+ if (on)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ else
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+}
+
+static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ /* Enable PHYs */
+ dwc3_gadget_usb2_phy_power(dwc, true);
+ dwc3_gadget_usb3_phy_power(dwc, true);
+
+ if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
+ dwc3_disconnect_gadget(dwc);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc3_stop_active_transfers(dwc);
+ dwc3_clear_stall_all_ep(dwc);
+ dwc->start_config_issued = false;
+
+ /* Reset device address to zero */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
+{
+ u32 reg;
+ u32 usb30_clock = DWC3_GCTL_CLK_BUS;
+
+ /*
+ * We change the clock only at SS but I dunno why I would want to do
+ * this. Maybe it becomes part of the power saving plan.
+ */
+
+ if (speed != DWC3_DSTS_SUPERSPEED)
+ return;
+
+ /*
+ * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
+ * each time on Connect Done.
+ */
+ if (!usb30_clock)
+ return;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
+{
+ switch (speed) {
+ case USB_SPEED_SUPER:
+ dwc3_gadget_usb2_phy_power(dwc, false);
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ dwc3_gadget_usb3_phy_power(dwc, false);
+ break;
+ }
+}
+
+static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
+ int ret;
+ u32 reg;
+ u8 speed;
+
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ memset(&params, 0x00, sizeof(params));
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ speed = reg & DWC3_DSTS_CONNECTSPD;
+ dwc->speed = speed;
+
+ dwc3_update_ram_clk_sel(dwc, speed);
+
+ switch (speed) {
+ case DWC3_DCFG_SUPERSPEED:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ dwc->gadget.ep0->maxpacket = 512;
+ dwc->gadget.speed = USB_SPEED_SUPER;
+ break;
+ case DWC3_DCFG_HIGHSPEED:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ dwc->gadget.ep0->maxpacket = 64;
+ dwc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case DWC3_DCFG_FULLSPEED2:
+ case DWC3_DCFG_FULLSPEED1:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ dwc->gadget.ep0->maxpacket = 64;
+ dwc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case DWC3_DCFG_LOWSPEED:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
+ dwc->gadget.ep0->maxpacket = 8;
+ dwc->gadget.speed = USB_SPEED_LOW;
+ break;
+ }
+
+ /* Disable unneded PHY */
+ dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
+
+ dep = dwc->eps[0];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return;
+ }
+
+ dep = dwc->eps[1];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return;
+ }
+
+ /*
+ * Configure PHY via GUSB3PIPECTLn if required.
+ *
+ * Update GTXFIFOSIZn
+ *
+ * In both cases reset values should be sufficient.
+ */
+}
+
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
+{
+ dev_vdbg(dwc->dev, "%s\n", __func__);
+
+ /*
+ * TODO take core out of low power mode when that's
+ * implemented.
+ */
+
+ dwc->gadget_driver->resume(&dwc->gadget);
+}
+
+static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ /* The fith bit says SuperSpeed yes or no. */
+ dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
+
+ dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
+}
+
+static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_devt *event)
+{
+ switch (event->type) {
+ case DWC3_DEVICE_EVENT_DISCONNECT:
+ dwc3_gadget_disconnect_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_RESET:
+ dwc3_gadget_reset_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_CONNECT_DONE:
+ dwc3_gadget_conndone_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_WAKEUP:
+ dwc3_gadget_wakeup_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
+ break;
+ case DWC3_DEVICE_EVENT_EOPF:
+ dev_vdbg(dwc->dev, "End of Periodic Frame\n");
+ break;
+ case DWC3_DEVICE_EVENT_SOF:
+ dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
+ break;
+ case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+ dev_vdbg(dwc->dev, "Erratic Error\n");
+ break;
+ case DWC3_DEVICE_EVENT_CMD_CMPL:
+ dev_vdbg(dwc->dev, "Command Complete\n");
+ break;
+ case DWC3_DEVICE_EVENT_OVERFLOW:
+ dev_vdbg(dwc->dev, "Overflow\n");
+ break;
+ default:
+ dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
+ }
+}
+
+static void dwc3_process_event_entry(struct dwc3 *dwc,
+ const union dwc3_event *event)
+{
+ /* Endpoint IRQ, handle it and return early */
+ if (event->type.is_devspec == 0) {
+ /* depevt */
+ return dwc3_endpoint_interrupt(dwc, &event->depevt);
+ }
+
+ switch (event->type.type) {
+ case DWC3_EVENT_TYPE_DEV:
+ dwc3_gadget_interrupt(dwc, &event->devt);
+ break;
+ /* REVISIT what to do with Carkit and I2C events ? */
+ default:
+ dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
+ }
+}
+
+static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
+{
+ struct dwc3_event_buffer *evt;
+ int left;
+ u32 count;
+
+ count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
+ count &= DWC3_GEVNTCOUNT_MASK;
+ if (!count)
+ return IRQ_NONE;
+
+ evt = dwc->ev_buffs[buf];
+ left = count;
+
+ while (left > 0) {
+ union dwc3_event event;
+
+ memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
+ dwc3_process_event_entry(dwc, &event);
+ /*
+ * XXX we wrap around correctly to the next entry as almost all
+ * entries are 4 bytes in size. There is one entry which has 12
+ * bytes which is a regular entry followed by 8 bytes data. ATM
+ * I don't know how things are organized if were get next to the
+ * a boundary so I worry about that once we try to handle that.
+ */
+ evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+ left -= 4;
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = _dwc;
+ int i;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&dwc->lock);
+
+ for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ irqreturn_t status;
+
+ status = dwc3_process_event_buf(dwc, i);
+ if (status == IRQ_HANDLED)
+ ret = status;
+ }
+
+ spin_unlock(&dwc->lock);
+
+ return ret;
+}
+
+/**
+ * dwc3_gadget_init - Initializes gadget related registers
+ * @dwc: Pointer to out controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+int __devinit dwc3_gadget_init(struct dwc3 *dwc)
+{
+ u32 reg;
+ int ret;
+ int irq;
+
+ dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ &dwc->ctrl_req_addr, GFP_KERNEL);
+ if (!dwc->ctrl_req) {
+ dev_err(dwc->dev, "failed to allocate ctrl request\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ &dwc->ep0_trb_addr, GFP_KERNEL);
+ if (!dwc->ep0_trb) {
+ dev_err(dwc->dev, "failed to allocate ep0 trb\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ dwc->setup_buf = dma_alloc_coherent(dwc->dev,
+ sizeof(*dwc->setup_buf) * 2,
+ &dwc->setup_buf_addr, GFP_KERNEL);
+ if (!dwc->setup_buf) {
+ dev_err(dwc->dev, "failed to allocate setup buffer\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
+ 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
+ if (!dwc->ep0_bounce) {
+ dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ dev_set_name(&dwc->gadget.dev, "gadget");
+
+ dwc->gadget.ops = &dwc3_gadget_ops;
+ dwc->gadget.is_dualspeed = true;
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->gadget.dev.parent = dwc->dev;
+
+ dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
+
+ dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
+ dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
+ dwc->gadget.dev.release = dwc3_gadget_release;
+ dwc->gadget.name = "dwc3-gadget";
+
+ /*
+ * REVISIT: Here we should clear all pending IRQs to be
+ * sure we're starting from a well known location.
+ */
+
+ ret = dwc3_gadget_init_endpoints(dwc);
+ if (ret)
+ goto err4;
+
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+
+ ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
+ "dwc3", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ goto err5;
+ }
+
+ /* Enable all but Start and End of Frame IRQs */
+ reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
+ DWC3_DEVTEN_EVNTOVERFLOWEN |
+ DWC3_DEVTEN_CMDCMPLTEN |
+ DWC3_DEVTEN_ERRTICERREN |
+ DWC3_DEVTEN_WKUPEVTEN |
+ DWC3_DEVTEN_ULSTCNGEN |
+ DWC3_DEVTEN_CONNECTDONEEN |
+ DWC3_DEVTEN_USBRSTEN |
+ DWC3_DEVTEN_DISCONNEVTEN);
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+
+ ret = device_register(&dwc->gadget.dev);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register gadget device\n");
+ put_device(&dwc->gadget.dev);
+ goto err6;
+ }
+
+ ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register udc\n");
+ goto err7;
+ }
+
+ return 0;
+
+err7:
+ device_unregister(&dwc->gadget.dev);
+
+err6:
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
+ free_irq(irq, dwc);
+
+err5:
+ dwc3_gadget_free_endpoints(dwc);
+
+err4:
+ dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
+ dwc->ep0_bounce_addr);
+
+err3:
+ dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
+ dwc->setup_buf, dwc->setup_buf_addr);
+
+err2:
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dwc->ep0_trb, dwc->ep0_trb_addr);
+
+err1:
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dwc->ctrl_req, dwc->ctrl_req_addr);
+
+err0:
+ return ret;
+}
+
+void dwc3_gadget_exit(struct dwc3 *dwc)
+{
+ int irq;
+ int i;
+
+ usb_del_gadget_udc(&dwc->gadget);
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
+ free_irq(irq, dwc);
+
+ for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
+ __dwc3_gadget_ep_disable(dwc->eps[i]);
+
+ dwc3_gadget_free_endpoints(dwc);
+
+ dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
+ dwc->ep0_bounce_addr);
+
+ dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
+ dwc->setup_buf, dwc->setup_buf_addr);
+
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dwc->ep0_trb, dwc->ep0_trb_addr);
+
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dwc->ctrl_req, dwc->ctrl_req_addr);
+
+ device_unregister(&dwc->gadget.dev);
+}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
new file mode 100644
index 00000000000..71145a449d9
--- /dev/null
+++ b/drivers/usb/dwc3/gadget.h
@@ -0,0 +1,211 @@
+/**
+ * gadget.h - DesignWare USB3 DRD Gadget Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DRIVERS_USB_DWC3_GADGET_H
+#define __DRIVERS_USB_DWC3_GADGET_H
+
+#include <linux/list.h>
+#include <linux/usb/gadget.h>
+#include "io.h"
+
+struct dwc3;
+#define to_dwc3_ep(ep) (container_of(ep, struct dwc3_ep, endpoint))
+#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
+
+/* DEPCFG parameter 1 */
+#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
+#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
+#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
+#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
+#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
+#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
+#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
+#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
+#define DWC3_DEPCFG_BULK_BASED (1 << 30)
+#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
+
+/* DEPCFG parameter 0 */
+#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
+#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
+#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
+
+/* DEPXFERCFG parameter 0 */
+#define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
+
+struct dwc3_gadget_ep_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
+/* -------------------------------------------------------------------------- */
+
+struct dwc3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct dwc3_ep *dep;
+
+ u8 epnum;
+ struct dwc3_trb_hw *trb;
+ dma_addr_t trb_dma;
+
+ unsigned direction:1;
+ unsigned mapped:1;
+ unsigned queued:1;
+};
+#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
+
+static inline struct dwc3_request *next_request(struct list_head *list)
+{
+ if (list_empty(list))
+ return NULL;
+
+ return list_first_entry(list, struct dwc3_request, list);
+}
+
+static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
+{
+ struct dwc3_ep *dep = req->dep;
+
+ req->queued = true;
+ list_move_tail(&req->list, &dep->req_queued);
+}
+
+#if defined(CONFIG_USB_GADGET_DWC3) || defined(CONFIG_USB_GADGET_DWC3_MODULE)
+int dwc3_gadget_init(struct dwc3 *dwc);
+void dwc3_gadget_exit(struct dwc3 *dwc);
+#else
+static inline int dwc3_gadget_init(struct dwc3 *dwc) { return 0; }
+static inline void dwc3_gadget_exit(struct dwc3 *dwc) { }
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+{
+ return 0;
+}
+#endif
+
+void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status);
+
+void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event);
+void dwc3_ep0_out_start(struct dwc3 *dwc);
+int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags);
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
+int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+void dwc3_map_buffer_to_dma(struct dwc3_request *req);
+void dwc3_unmap_buffer_from_dma(struct dwc3_request *req);
+
+/**
+ * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+ * @dwc: DesignWare USB3 Pointer
+ * @number: DWC endpoint number
+ *
+ * Caller should take care of locking
+ */
+static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3 *dwc, u8 number)
+{
+ u32 res_id;
+
+ res_id = dwc3_readl(dwc->regs, DWC3_DEPCMD(number));
+
+ return DWC3_DEPCMD_GET_RSC_IDX(res_id);
+}
+
+/**
+ * dwc3_gadget_event_string - returns event name
+ * @event: the event code
+ */
+static inline const char *dwc3_gadget_event_string(u8 event)
+{
+ switch (event) {
+ case DWC3_DEVICE_EVENT_DISCONNECT:
+ return "Disconnect";
+ case DWC3_DEVICE_EVENT_RESET:
+ return "Reset";
+ case DWC3_DEVICE_EVENT_CONNECT_DONE:
+ return "Connection Done";
+ case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ return "Link Status Change";
+ case DWC3_DEVICE_EVENT_WAKEUP:
+ return "WakeUp";
+ case DWC3_DEVICE_EVENT_EOPF:
+ return "End-Of-Frame";
+ case DWC3_DEVICE_EVENT_SOF:
+ return "Start-Of-Frame";
+ case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+ return "Erratic Error";
+ case DWC3_DEVICE_EVENT_CMD_CMPL:
+ return "Command Complete";
+ case DWC3_DEVICE_EVENT_OVERFLOW:
+ return "Overflow";
+ }
+
+ return "UNKNOWN";
+}
+
+/**
+ * dwc3_ep_event_string - returns event name
+ * @event: then event code
+ */
+static inline const char *dwc3_ep_event_string(u8 event)
+{
+ switch (event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ return "Transfer Complete";
+ case DWC3_DEPEVT_XFERINPROGRESS:
+ return "Transfer In-Progress";
+ case DWC3_DEPEVT_XFERNOTREADY:
+ return "Transfer Not Ready";
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+ return "FIFO";
+ case DWC3_DEPEVT_STREAMEVT:
+ return "Stream";
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ return "Endpoint Command Complete";
+ }
+
+ return "UNKNOWN";
+}
+
+#endif /* __DRIVERS_USB_DWC3_GADGET_H */
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
new file mode 100644
index 00000000000..bc957db1ea4
--- /dev/null
+++ b/drivers/usb/dwc3/io.h
@@ -0,0 +1,54 @@
+/**
+ * io.h - DesignWare USB3 DRD IO Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DRIVERS_USB_DWC3_IO_H
+#define __DRIVERS_USB_DWC3_IO_H
+
+#include <asm/io.h>
+
+static inline u32 dwc3_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+#endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 5a084b9cfa3..b21cd376c11 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -96,6 +96,22 @@ config USB_GADGET_VBUS_DRAW
This value will be used except for system-specific gadget
drivers that have more specific information.
+config USB_GADGET_STORAGE_NUM_BUFFERS
+ int "Number of storage pipeline buffers"
+ range 2 4
+ default 2
+ help
+ Usually 2 buffers are enough to establish a good buffering
+ pipeline. The number may be increased in order to compensate
+ for a bursty VFS behaviour. For instance there may be CPU wake up
+ latencies that makes the VFS to appear bursty in a system with
+ an CPU on-demand governor. Especially if DMA is doing IO to
+ offload the CPU. In this case the CPU will go into power
+ save often and spin up occasionally to move data within VFS.
+ If selecting USB_GADGET_DEBUG_FILES this value may be set by
+ a module parameter as well.
+ If unsure, say 2.
+
#
# USB Peripheral Controller Support
#
@@ -255,12 +271,11 @@ config USB_S3C_HSOTG
integrated into the S3C64XX series SoC.
config USB_IMX
- tristate "Freescale IMX USB Peripheral Controller"
- depends on ARCH_MX1
+ tristate "Freescale i.MX1 USB Peripheral Controller"
+ depends on ARCH_MXC
help
- Freescale's IMX series include an integrated full speed
- USB 1.1 device controller. The controller in the IMX series
- is register-compatible.
+ Freescale's i.MX1 includes an integrated full speed
+ USB 1.1 device controller.
It has Six fixed-function endpoints, as well as endpoint
zero (for control transfers).
@@ -303,6 +318,18 @@ config USB_PXA_U2O
PXA9xx Processor series include a high speed USB2.0 device
controller, which support high speed and full speed USB peripheral.
+config USB_GADGET_DWC3
+ tristate "DesignWare USB3.0 (DRD) Controller"
+ depends on USB_DWC3
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SUPERSPEED
+ help
+ DesignWare USB3.0 controller is a SuperSpeed USB3.0 Controller
+ which can be configured for peripheral-only, host-only, hub-only
+ and Dual-Role operation. This Controller was first integrated into
+ the OMAP5 series of processors. More information about the OMAP5
+ version of this controller, refer to http://www.ti.com/omap5.
+
#
# Controllers available in both integrated and discrete versions
#
@@ -846,6 +873,16 @@ config USB_G_NOKIA
It's only really useful for N900 hardware. If you're building
a kernel for N900, say Y or M here. If unsure, say N.
+config USB_G_ACM_MS
+ tristate "CDC Composite Device (ACM and mass storage)"
+ depends on BLOCK
+ help
+ This driver provides two functions in one configuration:
+ a mass storage, and a CDC ACM (serial port) link.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_acm_ms".
+
config USB_G_MULTI
tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
depends on BLOCK && NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9ba725af4a0..b54ac619089 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -28,7 +28,7 @@ obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
-mv_udc-y := mv_udc_core.o mv_udc_phy.o
+mv_udc-y := mv_udc_core.o
obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
@@ -51,6 +51,7 @@ g_dbgp-y := dbgp.o
g_nokia-y := nokia.o
g_webcam-y := webcam.o
g_ncm-y := ncm.o
+g_acm_ms-y := acm_ms.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -69,3 +70,4 @@ obj-$(CONFIG_USB_G_MULTI) += g_multi.o
obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
obj-$(CONFIG_USB_G_NCM) += g_ncm.o
+obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o
diff --git a/drivers/usb/gadget/acm_ms.c b/drivers/usb/gadget/acm_ms.c
new file mode 100644
index 00000000000..fdb7aec3bd0
--- /dev/null
+++ b/drivers/usb/gadget/acm_ms.c
@@ -0,0 +1,256 @@
+/*
+ * acm_ms.c -- Composite driver, with ACM and mass storage support
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: David Brownell
+ * Modified: Klaus Schwarzkopf <schwarzkopf@sensortherm.de>
+ *
+ * Heavily based on multi.c and cdc2.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+#include "u_serial.h"
+
+#define DRIVER_DESC "Composite Gadget (ACM + MS)"
+#define DRIVER_VERSION "2011/10/10"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
+ * Instead: allocate your own, using normal USB-IF procedures.
+ */
+#define ACM_MS_VENDOR_NUM 0x1d6b /* Linux Foundation */
+#define ACM_MS_PRODUCT_NUM 0x0106 /* Composite Gadget: ACM + MS*/
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+
+ .bDeviceClass = USB_CLASS_MISC /* 0xEF */,
+ .bDeviceSubClass = 2,
+ .bDeviceProtocol = 1,
+
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id can be overridden by module parameters. */
+ .idVendor = cpu_to_le16(ACM_MS_VENDOR_NUM),
+ .idProduct = cpu_to_le16(ACM_MS_PRODUCT_NUM),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ /*.bNumConfigurations = DYNAMIC*/
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /*
+ * REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+
+static struct fsg_common fsg_common;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We _always_ have both ACM and mass storage functions.
+ */
+static int __init acm_ms_do_config(struct usb_configuration *c)
+{
+ int status;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+
+ status = acm_bind_config(c, 0);
+ if (status < 0)
+ return status;
+
+ status = fsg_bind_config(c->cdev, c, &fsg_common);
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+static struct usb_configuration acm_ms_config_driver = {
+ .label = DRIVER_DESC,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init acm_ms_bind(struct usb_composite_dev *cdev)
+{
+ int gcnum;
+ struct usb_gadget *gadget = cdev->gadget;
+ int status;
+ void *retp;
+
+ /* set up serial link layer */
+ status = gserial_setup(cdev->gadget, 1);
+ if (status < 0)
+ return status;
+
+ /* set up mass storage function */
+ retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data);
+ if (IS_ERR(retp)) {
+ status = PTR_ERR(retp);
+ goto fail0;
+ }
+
+ /* set bcdDevice */
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0) {
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ } else {
+ WARNING(cdev, "controller '%s' not recognized; trying %s\n",
+ gadget->name,
+ acm_ms_config_driver.label);
+ device_desc.bcdDevice =
+ cpu_to_le16(0x0300 | 0x0099);
+ }
+
+ /*
+ * Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail1;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail1;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+ /* register our configuration */
+ status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config);
+ if (status < 0)
+ goto fail1;
+
+ dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
+ DRIVER_DESC);
+ fsg_common_put(&fsg_common);
+ return 0;
+
+ /* error recovery */
+fail1:
+ fsg_common_put(&fsg_common);
+fail0:
+ gserial_cleanup();
+ return status;
+}
+
+static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
+{
+ gserial_cleanup();
+
+ return 0;
+}
+
+static struct usb_composite_driver acm_ms_driver = {
+ .name = "g_acm_ms",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .unbind = __exit_p(acm_ms_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Klaus Schwarzkopf <schwarzkopf@sensortherm.de>");
+MODULE_LICENSE("GPL v2");
+
+static int __init init(void)
+{
+ return usb_composite_probe(&acm_ms_driver, acm_ms_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&acm_ms_driver);
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 70f2b376c86..4730016d7cd 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
@@ -354,7 +345,7 @@ udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
writel(tmp, &dev->ep[ep->num].regs->ctl);
/* set max packet size */
- maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ maxpacket = usb_endpoint_maxp(desc);
tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
ep->ep.maxpacket = maxpacket;
@@ -3014,13 +3005,8 @@ __acquires(dev->lock)
/* link up all endpoints */
udc_setup_endpoints(dev);
- if (dev->gadget.speed == USB_SPEED_HIGH) {
- dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
- "high");
- } else if (dev->gadget.speed == USB_SPEED_FULL) {
- dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
- "full");
- }
+ dev_info(&dev->pdev->dev, "Connect: %s\n",
+ usb_speed_string(dev->gadget.speed));
/* init ep 0 */
activate_control_endpoints(dev);
diff --git a/drivers/usb/gadget/amd5536udc.h b/drivers/usb/gadget/amd5536udc.h
index 1d1c7543468..f87e29c6532 100644
--- a/drivers/usb/gadget/amd5536udc.h
+++ b/drivers/usb/gadget/amd5536udc.h
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef AMD5536UDC_H
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index ddb118a7680..8efe0fa9228 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -9,16 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
*/
#undef VERBOSE_DEBUG
@@ -460,7 +450,7 @@ static void nuke(struct at91_ep *ep, int status)
{
struct at91_request *req;
- // terminer chaque requete dans la queue
+ /* terminate any request in the queue */
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
@@ -487,7 +477,7 @@ static int at91_ep_enable(struct usb_ep *_ep,
|| !desc || ep->desc
|| _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
- || (maxpacket = le16_to_cpu(desc->wMaxPacketSize)) == 0
+ || (maxpacket = usb_endpoint_maxp(desc)) == 0
|| maxpacket > ep->maxpacket) {
DBG("bad ep or descriptor\n");
return -EINVAL;
@@ -788,7 +778,7 @@ static const struct usb_ep_ops at91_ep_ops = {
.queue = at91_ep_queue,
.dequeue = at91_ep_dequeue,
.set_halt = at91_ep_set_halt,
- // there's only imprecise fifo status reporting
+ /* there's only imprecise fifo status reporting */
};
/*-------------------------------------------------------------------------*/
@@ -846,7 +836,7 @@ static void udc_reinit(struct at91_udc *udc)
ep->fifo_bank = 0;
ep->ep.maxpacket = ep->maxpacket;
ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i);
- // initialiser une queue par endpoint
+ /* initialize one queue per endpoint */
INIT_LIST_HEAD(&ep->queue);
}
}
@@ -952,7 +942,7 @@ static int at91_vbus_session(struct usb_gadget *gadget, int is_active)
struct at91_udc *udc = to_udc(gadget);
unsigned long flags;
- // VDBG("vbus %s\n", is_active ? "on" : "off");
+ /* VDBG("vbus %s\n", is_active ? "on" : "off"); */
spin_lock_irqsave(&udc->lock, flags);
udc->vbus = (is_active != 0);
if (udc->driver)
@@ -1003,7 +993,7 @@ static const struct usb_gadget_ops at91_udc_ops = {
* VBUS-powered devices may also also want to support bigger
* power budgets after an appropriate SET_CONFIGURATION.
*/
- // .vbus_power = at91_vbus_power,
+ /* .vbus_power = at91_vbus_power, */
};
/*-------------------------------------------------------------------------*/
@@ -1072,7 +1062,7 @@ static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr)
ep->is_in = 0;
}
} else {
- // REVISIT this happens sometimes under load; why??
+ /* REVISIT this happens sometimes under load; why?? */
ERR("SETUP len %d, csr %08x\n", rxcount, csr);
status = -EINVAL;
}
@@ -1451,7 +1441,7 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXSUSP);
at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXRSM);
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXSUSP);
- // VDBG("bus suspend\n");
+ /* VDBG("bus suspend\n"); */
if (udc->suspended)
continue;
udc->suspended = 1;
@@ -1473,7 +1463,7 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXSUSP);
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
- // VDBG("bus resume\n");
+ /* VDBG("bus resume\n"); */
if (!udc->suspended)
continue;
udc->suspended = 0;
@@ -1820,7 +1810,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
/* request UDC and maybe VBUS irqs */
udc->udp_irq = platform_get_irq(pdev, 0);
retval = request_irq(udc->udp_irq, at91_udc_irq,
- IRQF_DISABLED, driver_name, udc);
+ 0, driver_name, udc);
if (retval < 0) {
DBG("request irq %d failed\n", udc->udp_irq);
goto fail1;
@@ -1848,7 +1838,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
jiffies + VBUS_POLL_TIMEOUT);
} else {
if (request_irq(udc->board.vbus_pin, at91_vbus_irq,
- IRQF_DISABLED, driver_name, udc)) {
+ 0, driver_name, udc)) {
DBG("request vbus irq %d failed\n",
udc->board.vbus_pin);
retval = -EBUSY;
diff --git a/drivers/usb/gadget/at91_udc.h b/drivers/usb/gadget/at91_udc.h
index 108ca54f909..3c0315b86ac 100644
--- a/drivers/usb/gadget/at91_udc.h
+++ b/drivers/usb/gadget/at91_udc.h
@@ -7,16 +7,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef AT91_UDC_H
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 5b1665eb1be..271a9d87360 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -527,7 +527,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
- maxpacket = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff;
+ maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
|| ep->index == 0
@@ -571,7 +571,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
* Bits 11:12 specify number of _additional_
* transactions per microframe.
*/
- nr_trans = ((le16_to_cpu(desc->wMaxPacketSize) >> 11) & 3) + 1;
+ nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
if (nr_trans > 3)
return -EINVAL;
@@ -1718,13 +1718,12 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
spin_lock(&udc->lock);
}
- if (status & USBA_HIGH_SPEED) {
- DBG(DBG_BUS, "High-speed bus reset detected\n");
+ if (status & USBA_HIGH_SPEED)
udc->gadget.speed = USB_SPEED_HIGH;
- } else {
- DBG(DBG_BUS, "Full-speed bus reset detected\n");
+ else
udc->gadget.speed = USB_SPEED_FULL;
- }
+ DBG(DBG_BUS, "%s bus reset detected\n",
+ usb_speed_string(udc->gadget.speed));
ep0 = &usba_ep[0];
ep0->desc = &usba_ep0_desc;
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index b1c1afbb875..672674c2fb3 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 470981ad6f7..4eedfe55715 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -3,17 +3,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 1265a8502ea..83428f56253 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -2101,7 +2101,7 @@ static int ep_enable(struct usb_ep *ep,
mEp->num = usb_endpoint_num(desc);
mEp->type = usb_endpoint_type(desc);
- mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
+ mEp->ep.maxpacket = usb_endpoint_maxp(desc);
dbg_event(_usb_addr(mEp), "ENABLE", 0);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index aef47414f5d..8a5529d214f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -7,15 +7,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
@@ -164,7 +155,7 @@ int config_ep_by_speed(struct usb_gadget *g,
ep_found:
/* commit results */
- _ep->maxpacket = le16_to_cpu(chosen_desc->wMaxPacketSize);
+ _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
@@ -551,9 +542,9 @@ static int bos_desc(struct usb_composite_dev *cdev)
if (cdev->gadget->ops->get_config_params)
cdev->gadget->ops->get_config_params(&dcd_config_params);
else {
- dcd_config_params.bU1devExitLat = USB_DEFULT_U1_DEV_EXIT_LAT;
+ dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT;
dcd_config_params.bU2DevExitLat =
- cpu_to_le16(USB_DEFULT_U2_DEV_EXIT_LAT);
+ cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
}
ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
@@ -626,25 +617,9 @@ static int set_config(struct usb_composite_dev *cdev,
result = 0;
}
- INFO(cdev, "%s speed config #%d: %s\n",
- ({ char *speed;
- switch (gadget->speed) {
- case USB_SPEED_LOW:
- speed = "low";
- break;
- case USB_SPEED_FULL:
- speed = "full";
- break;
- case USB_SPEED_HIGH:
- speed = "high";
- break;
- case USB_SPEED_SUPER:
- speed = "super";
- break;
- default:
- speed = "?";
- break;
- } ; speed; }), number, c ? c->label : "unconfigured");
+ INFO(cdev, "%s config #%d: %s\n",
+ usb_speed_string(gadget->speed),
+ number, c ? c->label : "unconfigured");
if (!c)
goto done;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index b2c00133487..7542a72ce51 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -7,15 +7,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index 8beefdd3678..f855ecf7a63 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -4,7 +4,6 @@
* Copyright (C) 2010 Stephane Duverger
*
* Released under the GPLv2.
- *
*/
/* verbose messages */
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index e755a9d267f..ab8f1b488d5 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -10,15 +10,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
@@ -439,7 +430,7 @@ dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
* maximum packet size.
* For SS devices the wMaxPacketSize is limited by 1024.
*/
- max = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff;
+ max = usb_endpoint_maxp(desc) & 0x7ff;
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
@@ -1277,7 +1268,7 @@ static int periodic_bytes (struct dummy *dum, struct dummy_ep *ep)
int tmp;
/* high bandwidth mode */
- tmp = le16_to_cpu(ep->desc->wMaxPacketSize);
+ tmp = usb_endpoint_maxp(ep->desc);
tmp = (tmp >> 11) & 0x03;
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 7a7e6b7e1fd..596a0b464e6 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -7,16 +7,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
@@ -158,7 +148,7 @@ ep_matches (
* where it's an output parameter representing the full speed limit.
* the usb spec fixes high speed bulk maxpacket at 512 bytes.
*/
- max = 0x7ff & le16_to_cpu(desc->wMaxPacketSize);
+ max = 0x7ff & usb_endpoint_maxp(desc);
switch (type) {
case USB_ENDPOINT_XFER_INT:
/* INT: limit 64 bytes full speed, 1024 high/super speed */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index aafc84f33e2..0cd764d5935 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index a9a4eade7e8..ec7ffcd0d0c 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -460,7 +460,7 @@ static int audio_set_endpoint_req(struct usb_function *f,
switch (ctrl->bRequest) {
case UAC_SET_CUR:
- value = 0;
+ value = len;
break;
case UAC_SET_MIN:
@@ -499,7 +499,7 @@ static int audio_get_endpoint_req(struct usb_function *f,
case UAC_GET_MIN:
case UAC_GET_MAX:
case UAC_GET_RES:
- value = 3;
+ value = len;
break;
case UAC_GET_MEM:
break;
@@ -681,17 +681,18 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
status = -ENOMEM;
- /* supcard all relevant hardware speeds... we expect that when
+ /* copy descriptors, and track endpoint copies */
+ f->descriptors = usb_copy_descriptors(f_audio_desc);
+
+ /*
+ * support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
-
- /* copy descriptors, and track endpoint copies */
if (gadget_is_dualspeed(c->cdev->gadget)) {
c->highspeed = true;
f->hs_descriptors = usb_copy_descriptors(f_audio_desc);
- } else
- f->descriptors = usb_copy_descriptors(f_audio_desc);
+ }
return 0;
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 3691a0cb946..11c07cb7d33 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 046c6d0e696..1a7b2dd7d40 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index c161a9aaeb7..6b1c20b6c9b 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -12,15 +12,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 83a266bdb40..b2113420b80 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -7,15 +7,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index ca660d40b11..6d87f288df4 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5b933958200..52583a23533 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -112,8 +112,7 @@
* is not loaded (an empty string as "filename" in the fsg_config
* structure causes error). The CD-ROM emulation includes a single
* data track and no audio tracks; hence there need be only one
- * backing file per LUN. Note also that the CD-ROM block length is
- * set to 512 rather than the more common value 2048.
+ * backing file per LUN.
*
*
* MSF includes support for module parameters. If gadget using it
@@ -363,7 +362,7 @@ struct fsg_common {
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
- struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
+ struct fsg_buffhd *buffhds;
int cmnd_size;
u8 cmnd[MAX_COMMAND_SIZE];
@@ -745,7 +744,6 @@ static int do_read(struct fsg_common *common)
u32 amount_left;
loff_t file_offset, file_offset_tmp;
unsigned int amount;
- unsigned int partial_page;
ssize_t nread;
/*
@@ -771,7 +769,7 @@ static int do_read(struct fsg_common *common)
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
- file_offset = ((loff_t) lba) << 9;
+ file_offset = ((loff_t) lba) << curlun->blkbits;
/* Carry out the file reads */
amount_left = common->data_size_from_cmnd;
@@ -784,18 +782,10 @@ static int do_read(struct fsg_common *common)
* Try to read the remaining amount.
* But don't read more than the buffer size.
* And don't try to read past the end of the file.
- * Finally, if we're not at a page boundary, don't read past
- * the next page.
- * If this means reading 0 then we were asked to read past
- * the end of file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t)amount,
curlun->file_length - file_offset);
- partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
- if (partial_page > 0)
- amount = min(amount, (unsigned int)PAGE_CACHE_SIZE -
- partial_page);
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
@@ -812,7 +802,8 @@ static int do_read(struct fsg_common *common)
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info =
+ file_offset >> curlun->blkbits;
curlun->info_valid = 1;
bh->inreq->length = 0;
bh->state = BUF_STATE_FULL;
@@ -835,18 +826,25 @@ static int do_read(struct fsg_common *common)
} else if (nread < amount) {
LDBG(curlun, "partial file read: %d/%u\n",
(int)nread, amount);
- nread -= (nread & 511); /* Round down to a block */
+ nread = round_down(nread, curlun->blksize);
}
file_offset += nread;
amount_left -= nread;
common->residue -= nread;
+
+ /*
+ * Except at the end of the transfer, nread will be
+ * equal to the buffer size, which is divisible by the
+ * bulk-in maxpacket size.
+ */
bh->inreq->length = nread;
bh->state = BUF_STATE_FULL;
/* If an error occurred, report it and its position */
if (nread < amount) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info =
+ file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -877,7 +875,6 @@ static int do_write(struct fsg_common *common)
u32 amount_left_to_req, amount_left_to_write;
loff_t usb_offset, file_offset, file_offset_tmp;
unsigned int amount;
- unsigned int partial_page;
ssize_t nwritten;
int rc;
@@ -921,7 +918,7 @@ static int do_write(struct fsg_common *common)
/* Carry out the file writes */
get_some_more = 1;
- file_offset = usb_offset = ((loff_t) lba) << 9;
+ file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
amount_left_to_req = common->data_size_from_cmnd;
amount_left_to_write = common->data_size_from_cmnd;
@@ -933,41 +930,21 @@ static int do_write(struct fsg_common *common)
/*
* Figure out how much we want to get:
- * Try to get the remaining amount.
- * But don't get more than the buffer size.
- * And don't try to go past the end of the file.
- * If we're not at a page boundary,
- * don't go past the next page.
- * If this means getting 0, then we were asked
- * to write past the end of file.
- * Finally, round down to a block boundary.
+ * Try to get the remaining amount,
+ * but not more than the buffer size.
*/
amount = min(amount_left_to_req, FSG_BUFLEN);
- amount = min((loff_t)amount,
- curlun->file_length - usb_offset);
- partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
- if (partial_page > 0)
- amount = min(amount,
- (unsigned int)PAGE_CACHE_SIZE - partial_page);
-
- if (amount == 0) {
+
+ /* Beyond the end of the backing file? */
+ if (usb_offset >= curlun->file_length) {
get_some_more = 0;
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
- curlun->sense_data_info = usb_offset >> 9;
+ curlun->sense_data_info =
+ usb_offset >> curlun->blkbits;
curlun->info_valid = 1;
continue;
}
- amount -= amount & 511;
- if (amount == 0) {
-
- /*
- * Why were we were asked to transfer a
- * partial block?
- */
- get_some_more = 0;
- continue;
- }
/* Get the next buffer */
usb_offset += amount;
@@ -977,12 +954,11 @@ static int do_write(struct fsg_common *common)
get_some_more = 0;
/*
- * amount is always divisible by 512, hence by
- * the bulk-out maxpacket size
+ * Except at the end of the transfer, amount will be
+ * equal to the buffer size, which is divisible by
+ * the bulk-out maxpacket size.
*/
- bh->outreq->length = amount;
- bh->bulk_out_intended_length = amount;
- bh->outreq->short_not_ok = 1;
+ set_bulk_out_req_length(common, bh, amount);
if (!start_out_transfer(common, bh))
/* Dunno what to do if common->fsg is NULL */
return -EIO;
@@ -1002,7 +978,8 @@ static int do_write(struct fsg_common *common)
/* Did something go wrong with the transfer? */
if (bh->outreq->status != 0) {
curlun->sense_data = SS_COMMUNICATION_FAILURE;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info =
+ file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1016,6 +993,16 @@ static int do_write(struct fsg_common *common)
amount = curlun->file_length - file_offset;
}
+ /* Don't accept excess data. The spec doesn't say
+ * what to do in this case. We'll ignore the error.
+ */
+ amount = min(amount, bh->bulk_out_intended_length);
+
+ /* Don't write a partial block */
+ amount = round_down(amount, curlun->blksize);
+ if (amount == 0)
+ goto empty_write;
+
/* Perform the write */
file_offset_tmp = file_offset;
nwritten = vfs_write(curlun->filp,
@@ -1033,8 +1020,7 @@ static int do_write(struct fsg_common *common)
} else if (nwritten < amount) {
LDBG(curlun, "partial file write: %d/%u\n",
(int)nwritten, amount);
- nwritten -= (nwritten & 511);
- /* Round down to a block */
+ nwritten = round_down(nwritten, curlun->blksize);
}
file_offset += nwritten;
amount_left_to_write -= nwritten;
@@ -1043,13 +1029,15 @@ static int do_write(struct fsg_common *common)
/* If an error occurred, report it and its position */
if (nwritten < amount) {
curlun->sense_data = SS_WRITE_ERROR;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info =
+ file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
+ empty_write:
/* Did the host decide to stop early? */
- if (bh->outreq->actual != bh->outreq->length) {
+ if (bh->outreq->actual < bh->bulk_out_intended_length) {
common->short_packet_received = 1;
break;
}
@@ -1129,8 +1117,8 @@ static int do_verify(struct fsg_common *common)
return -EIO; /* No default reply */
/* Prepare to carry out the file verify */
- amount_left = verification_length << 9;
- file_offset = ((loff_t) lba) << 9;
+ amount_left = verification_length << curlun->blkbits;
+ file_offset = ((loff_t) lba) << curlun->blkbits;
/* Write out all the dirty buffers before invalidating them */
fsg_lun_fsync_sub(curlun);
@@ -1148,8 +1136,6 @@ static int do_verify(struct fsg_common *common)
* Try to read the remaining amount, but not more than
* the buffer size.
* And don't try to read past the end of the file.
- * If this means reading 0 then we were asked to read
- * past the end of file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t)amount,
@@ -1157,7 +1143,8 @@ static int do_verify(struct fsg_common *common)
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info =
+ file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1179,11 +1166,12 @@ static int do_verify(struct fsg_common *common)
} else if (nread < amount) {
LDBG(curlun, "partial file verify: %d/%u\n",
(int)nread, amount);
- nread -= nread & 511; /* Round down to a sector */
+ nread = round_down(nread, curlun->blksize);
}
if (nread == 0) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info =
+ file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1289,7 +1277,7 @@ static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
/* Max logical block */
- put_unaligned_be32(512, &buf[4]); /* Block length */
+ put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
return 8;
}
@@ -1527,7 +1515,7 @@ static int do_read_format_capacities(struct fsg_common *common,
put_unaligned_be32(curlun->num_sectors, &buf[0]);
/* Number of blocks */
- put_unaligned_be32(512, &buf[4]); /* Block length */
+ put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
buf[4] = 0x02; /* Current capacity */
return 12;
}
@@ -1607,7 +1595,7 @@ static int throw_away_data(struct fsg_common *common)
common->next_buffhd_to_drain = bh->next;
/* A short packet or an error ends everything */
- if (bh->outreq->actual != bh->outreq->length ||
+ if (bh->outreq->actual < bh->bulk_out_intended_length ||
bh->outreq->status != 0) {
raise_exception(common,
FSG_STATE_ABORT_BULK_OUT);
@@ -1623,12 +1611,11 @@ static int throw_away_data(struct fsg_common *common)
amount = min(common->usb_amount_left, FSG_BUFLEN);
/*
- * amount is always divisible by 512, hence by
+ * Except at the end of the transfer, amount will be
+ * equal to the buffer size, which is divisible by
* the bulk-out maxpacket size.
*/
- bh->outreq->length = amount;
- bh->bulk_out_intended_length = amount;
- bh->outreq->short_not_ok = 1;
+ set_bulk_out_req_length(common, bh, amount);
if (!start_out_transfer(common, bh))
/* Dunno what to do if common->fsg is NULL */
return -EIO;
@@ -2022,7 +2009,8 @@ static int do_scsi_command(struct fsg_common *common)
case READ_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+ common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
+ common->curlun->blkbits;
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)");
@@ -2032,7 +2020,8 @@ static int do_scsi_command(struct fsg_common *common)
case READ_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) << 9;
+ get_unaligned_be16(&common->cmnd[7]) <<
+ common->curlun->blkbits;
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)");
@@ -2042,7 +2031,8 @@ static int do_scsi_command(struct fsg_common *common)
case READ_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) << 9;
+ get_unaligned_be32(&common->cmnd[6]) <<
+ common->curlun->blkbits;
reply = check_command(common, 12, DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)");
@@ -2142,7 +2132,8 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+ common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
+ common->curlun->blkbits;
reply = check_command(common, 6, DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)");
@@ -2152,7 +2143,8 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) << 9;
+ get_unaligned_be16(&common->cmnd[7]) <<
+ common->curlun->blkbits;
reply = check_command(common, 10, DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)");
@@ -2162,7 +2154,8 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) << 9;
+ get_unaligned_be32(&common->cmnd[6]) <<
+ common->curlun->blkbits;
reply = check_command(common, 12, DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)");
@@ -2297,7 +2290,6 @@ static int get_next_command(struct fsg_common *common)
/* Queue a request to read a Bulk-only CBW */
set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
- bh->outreq->short_not_ok = 1;
if (!start_out_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
@@ -2348,7 +2340,7 @@ reset:
if (common->fsg) {
fsg = common->fsg;
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
if (bh->inreq) {
@@ -2401,12 +2393,11 @@ reset:
goto reset;
fsg->bulk_out->driver_data = common;
fsg->bulk_out_enabled = 1;
- common->bulk_out_maxpacket =
- le16_to_cpu(fsg->bulk_out->desc->wMaxPacketSize);
+ common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
/* Allocate the requests */
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
@@ -2475,7 +2466,7 @@ static void handle_exception(struct fsg_common *common)
/* Cancel all the pending transfers */
if (likely(common->fsg)) {
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
if (bh->inreq_busy)
usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
@@ -2487,7 +2478,7 @@ static void handle_exception(struct fsg_common *common)
/* Wait until everything is idle */
for (;;) {
int num_active = 0;
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
num_active += bh->inreq_busy + bh->outreq_busy;
}
@@ -2510,7 +2501,7 @@ static void handle_exception(struct fsg_common *common)
*/
spin_lock_irq(&common->lock);
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
bh->state = BUF_STATE_EMPTY;
}
@@ -2719,6 +2710,10 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
int nluns, i, rc;
char *pathbuf;
+ rc = fsg_num_buffers_validate();
+ if (rc != 0)
+ return ERR_PTR(rc);
+
/* Find out how many LUNs there should be */
nluns = cfg->nluns;
if (nluns < 1 || nluns > FSG_MAX_LUNS) {
@@ -2737,6 +2732,14 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
common->free_storage_on_release = 0;
}
+ common->buffhds = kcalloc(fsg_num_buffers,
+ sizeof *(common->buffhds), GFP_KERNEL);
+ if (!common->buffhds) {
+ if (common->free_storage_on_release)
+ kfree(common);
+ return ERR_PTR(-ENOMEM);
+ }
+
common->ops = cfg->ops;
common->private_data = cfg->private_data;
@@ -2814,7 +2817,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Data buffers cyclic list */
bh = common->buffhds;
- i = FSG_NUM_BUFFERS;
+ i = fsg_num_buffers;
goto buffhds_first_it;
do {
bh->next = bh + 1;
@@ -2940,12 +2943,13 @@ static void fsg_common_release(struct kref *ref)
{
struct fsg_buffhd *bh = common->buffhds;
- unsigned i = FSG_NUM_BUFFERS;
+ unsigned i = fsg_num_buffers;
do {
kfree(bh->buf);
} while (++bh, --i);
}
+ kfree(common->buffhds);
if (common->free_storage_on_release)
kfree(common);
}
@@ -3019,6 +3023,28 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
}
}
+ if (gadget_is_superspeed(gadget)) {
+ unsigned max_burst;
+
+ /* Calculate bMaxBurst, we know packet size is 1024 */
+ max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
+
+ fsg_ss_bulk_in_desc.bEndpointAddress =
+ fsg_fs_bulk_in_desc.bEndpointAddress;
+ fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
+
+ fsg_ss_bulk_out_desc.bEndpointAddress =
+ fsg_fs_bulk_out_desc.bEndpointAddress;
+ fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
+
+ f->ss_descriptors = usb_copy_descriptors(fsg_ss_function);
+ if (unlikely(!f->ss_descriptors)) {
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+ return -ENOMEM;
+ }
+ }
+
return 0;
autoconf_fail:
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
new file mode 100644
index 00000000000..67b222908cf
--- /dev/null
+++ b/drivers/usb/gadget/f_midi.c
@@ -0,0 +1,998 @@
+/*
+ * f_midi.c -- USB MIDI class function driver
+ *
+ * Copyright (C) 2006 Thumtronics Pty Ltd.
+ * Developed for Thumtronics by Grey Innovation
+ * Ben Williamson <ben.williamson@greyinnovation.com>
+ *
+ * Rewritten for the composite framework
+ * Copyright (C) 2011 Daniel Mack <zonque@gmail.com>
+ *
+ * Based on drivers/usb/gadget/f_audio.c,
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * and drivers/usb/gadget/midi.c,
+ * Copyright (C) 2006 Thumtronics Pty Ltd.
+ * Ben Williamson <ben.williamson@greyinnovation.com>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/midi.h>
+
+MODULE_AUTHOR("Ben Williamson");
+MODULE_LICENSE("GPL v2");
+
+static const char f_midi_shortname[] = "f_midi";
+static const char f_midi_longname[] = "MIDI Gadget";
+
+/*
+ * We can only handle 16 cables on one single endpoint, as cable numbers are
+ * stored in 4-bit fields. And as the interface currently only holds one
+ * single endpoint, this is the maximum number of ports we can allow.
+ */
+#define MAX_PORTS 16
+
+/*
+ * This is a gadget, and the IN/OUT naming is from the host's perspective.
+ * USB -> OUT endpoint -> rawmidi
+ * USB <- IN endpoint <- rawmidi
+ */
+struct gmidi_in_port {
+ struct f_midi *midi;
+ int active;
+ uint8_t cable;
+ uint8_t state;
+#define STATE_UNKNOWN 0
+#define STATE_1PARAM 1
+#define STATE_2PARAM_1 2
+#define STATE_2PARAM_2 3
+#define STATE_SYSEX_0 4
+#define STATE_SYSEX_1 5
+#define STATE_SYSEX_2 6
+ uint8_t data[2];
+};
+
+struct f_midi {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+ struct usb_ep *in_ep, *out_ep;
+ struct snd_card *card;
+ struct snd_rawmidi *rmidi;
+
+ struct snd_rawmidi_substream *in_substream[MAX_PORTS];
+ struct snd_rawmidi_substream *out_substream[MAX_PORTS];
+ struct gmidi_in_port *in_port[MAX_PORTS];
+
+ unsigned long out_triggered;
+ struct tasklet_struct tasklet;
+ unsigned int in_ports;
+ unsigned int out_ports;
+ int index;
+ char *id;
+ unsigned int buflen, qlen;
+};
+
+static inline struct f_midi *func_to_midi(struct usb_function *f)
+{
+ return container_of(f, struct f_midi, func);
+}
+
+static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
+DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
+DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ /* .bNumEndpoints = DYNAMIC */
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ /* .iInterface = DYNAMIC */
+};
+
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
+ .bLength = UAC_DT_AC_HEADER_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = USB_MS_HEADER,
+ .bcdADC = cpu_to_le16(0x0100),
+ .wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)),
+ .bInCollection = 1,
+ /* .baInterfaceNr = DYNAMIC */
+};
+
+/* B.4.1 Standard MS Interface Descriptor */
+static struct usb_interface_descriptor ms_interface_desc __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
+ /* .iInterface = DYNAMIC */
+};
+
+/* B.4.2 Class-Specific MS Interface Descriptor */
+static struct usb_ms_header_descriptor ms_header_desc __initdata = {
+ .bLength = USB_DT_MS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = USB_MS_HEADER,
+ .bcdMSC = cpu_to_le16(0x0100),
+ /* .wTotalLength = DYNAMIC */
+};
+
+/* B.4.3 Embedded MIDI IN Jack Descriptor */
+static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
+ .bLength = USB_DT_MIDI_IN_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
+ .bJackType = USB_MS_EMBEDDED,
+ /* .bJackID = DYNAMIC */
+};
+
+/* B.4.4 Embedded MIDI OUT Jack Descriptor */
+static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
+ /* .bLength = DYNAMIC */
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
+ .bJackType = USB_MS_EMBEDDED,
+ /* .bJackID = DYNAMIC */
+ /* .bNrInputPins = DYNAMIC */
+ /* .pins = DYNAMIC */
+};
+
+/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+/* B.5.2 Class-specific MS Bulk OUT Endpoint Descriptor */
+static struct usb_ms_endpoint_descriptor_16 ms_out_desc = {
+ /* .bLength = DYNAMIC */
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = USB_MS_GENERAL,
+ /* .bNumEmbMIDIJack = DYNAMIC */
+ /* .baAssocJackID = DYNAMIC */
+};
+
+/* B.6.1 Standard Bulk IN Endpoint Descriptor */
+static struct usb_endpoint_descriptor bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
+static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
+ /* .bLength = DYNAMIC */
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = USB_MS_GENERAL,
+ /* .bNumEmbMIDIJack = DYNAMIC */
+ /* .baAssocJackID = DYNAMIC */
+};
+
+/* string IDs are assigned dynamically */
+
+#define STRING_FUNC_IDX 0
+
+static struct usb_string midi_string_defs[] = {
+ [STRING_FUNC_IDX].s = "MIDI function",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings midi_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = midi_string_defs,
+};
+
+static struct usb_gadget_strings *midi_strings[] = {
+ &midi_stringtab,
+ NULL,
+};
+
+static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req) {
+ req->length = length;
+ req->buf = kmalloc(length, GFP_ATOMIC);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+ return req;
+}
+
+static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static const uint8_t f_midi_cin_length[] = {
+ 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
+};
+
+/*
+ * Receives a chunk of MIDI data.
+ */
+static void f_midi_read_data(struct usb_ep *ep, int cable,
+ uint8_t *data, int length)
+{
+ struct f_midi *midi = ep->driver_data;
+ struct snd_rawmidi_substream *substream = midi->out_substream[cable];
+
+ if (!substream)
+ /* Nobody is listening - throw it on the floor. */
+ return;
+
+ if (!test_bit(cable, &midi->out_triggered))
+ return;
+
+ snd_rawmidi_receive(substream, data, length);
+}
+
+static void f_midi_handle_out_data(struct usb_ep *ep, struct usb_request *req)
+{
+ unsigned int i;
+ u8 *buf = req->buf;
+
+ for (i = 0; i + 3 < req->actual; i += 4)
+ if (buf[i] != 0) {
+ int cable = buf[i] >> 4;
+ int length = f_midi_cin_length[buf[i] & 0x0f];
+ f_midi_read_data(ep, cable, &buf[i + 1], length);
+ }
+}
+
+static void
+f_midi_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_midi *midi = ep->driver_data;
+ struct usb_composite_dev *cdev = midi->func.config->cdev;
+ int status = req->status;
+
+ switch (status) {
+ case 0: /* normal completion */
+ if (ep == midi->out_ep) {
+ /* We received stuff. req is queued again, below */
+ f_midi_handle_out_data(ep, req);
+ } else if (ep == midi->in_ep) {
+ /* Our transmit completed. See if there's more to go.
+ * f_midi_transmit eats req, don't queue it again. */
+ f_midi_transmit(midi, req);
+ return;
+ }
+ break;
+
+ /* this endpoint is normally active while we're configured */
+ case -ECONNABORTED: /* hardware forced ep reset */
+ case -ECONNRESET: /* request dequeued */
+ case -ESHUTDOWN: /* disconnect from host */
+ VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status,
+ req->actual, req->length);
+ if (ep == midi->out_ep)
+ f_midi_handle_out_data(ep, req);
+
+ free_ep_req(ep, req);
+ return;
+
+ case -EOVERFLOW: /* buffer overrun on read means that
+ * we didn't provide a big enough buffer.
+ */
+ default:
+ DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name,
+ status, req->actual, req->length);
+ break;
+ case -EREMOTEIO: /* short read */
+ break;
+ }
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ ERROR(cdev, "kill %s: resubmit %d bytes --> %d\n",
+ ep->name, req->length, status);
+ usb_ep_set_halt(ep);
+ /* FIXME recover later ... somehow */
+ }
+}
+
+static int f_midi_start_ep(struct f_midi *midi,
+ struct usb_function *f,
+ struct usb_ep *ep)
+{
+ int err;
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ if (ep->driver_data)
+ usb_ep_disable(ep);
+
+ err = config_ep_by_speed(midi->gadget, f, ep);
+ if (err) {
+ ERROR(cdev, "can't configure %s: %d\n", ep->name, err);
+ return err;
+ }
+
+ err = usb_ep_enable(ep);
+ if (err) {
+ ERROR(cdev, "can't start %s: %d\n", ep->name, err);
+ return err;
+ }
+
+ ep->driver_data = midi;
+
+ return 0;
+}
+
+static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_midi *midi = func_to_midi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned i;
+ int err;
+
+ err = f_midi_start_ep(midi, f, midi->in_ep);
+ if (err)
+ return err;
+
+ err = f_midi_start_ep(midi, f, midi->out_ep);
+ if (err)
+ return err;
+
+ if (midi->out_ep->driver_data)
+ usb_ep_disable(midi->out_ep);
+
+ err = config_ep_by_speed(midi->gadget, f, midi->out_ep);
+ if (err) {
+ ERROR(cdev, "can't configure %s: %d\n",
+ midi->out_ep->name, err);
+ return err;
+ }
+
+ err = usb_ep_enable(midi->out_ep);
+ if (err) {
+ ERROR(cdev, "can't start %s: %d\n",
+ midi->out_ep->name, err);
+ return err;
+ }
+
+ midi->out_ep->driver_data = midi;
+
+ /* allocate a bunch of read buffers and queue them all at once. */
+ for (i = 0; i < midi->qlen && err == 0; i++) {
+ struct usb_request *req =
+ alloc_ep_req(midi->out_ep, midi->buflen);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->complete = f_midi_complete;
+ err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
+ if (err) {
+ ERROR(midi, "%s queue req: %d\n",
+ midi->out_ep->name, err);
+ }
+ }
+
+ return 0;
+}
+
+static void f_midi_disable(struct usb_function *f)
+{
+ struct f_midi *midi = func_to_midi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ DBG(cdev, "disable\n");
+
+ /*
+ * just disable endpoints, forcing completion of pending i/o.
+ * all our completion handlers free their requests in this case.
+ */
+ usb_ep_disable(midi->in_ep);
+ usb_ep_disable(midi->out_ep);
+}
+
+static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct f_midi *midi = func_to_midi(f);
+ struct snd_card *card;
+
+ DBG(cdev, "unbind\n");
+
+ /* just to be sure */
+ f_midi_disable(f);
+
+ card = midi->card;
+ midi->card = NULL;
+ if (card)
+ snd_card_free(card);
+
+ kfree(midi->id);
+ midi->id = NULL;
+
+ usb_free_descriptors(f->descriptors);
+ kfree(midi);
+}
+
+static int f_midi_snd_free(struct snd_device *device)
+{
+ return 0;
+}
+
+static void f_midi_transmit_packet(struct usb_request *req, uint8_t p0,
+ uint8_t p1, uint8_t p2, uint8_t p3)
+{
+ unsigned length = req->length;
+ u8 *buf = (u8 *)req->buf + length;
+
+ buf[0] = p0;
+ buf[1] = p1;
+ buf[2] = p2;
+ buf[3] = p3;
+ req->length = length + 4;
+}
+
+/*
+ * Converts MIDI commands to USB MIDI packets.
+ */
+static void f_midi_transmit_byte(struct usb_request *req,
+ struct gmidi_in_port *port, uint8_t b)
+{
+ uint8_t p0 = port->cable << 4;
+
+ if (b >= 0xf8) {
+ f_midi_transmit_packet(req, p0 | 0x0f, b, 0, 0);
+ } else if (b >= 0xf0) {
+ switch (b) {
+ case 0xf0:
+ port->data[0] = b;
+ port->state = STATE_SYSEX_1;
+ break;
+ case 0xf1:
+ case 0xf3:
+ port->data[0] = b;
+ port->state = STATE_1PARAM;
+ break;
+ case 0xf2:
+ port->data[0] = b;
+ port->state = STATE_2PARAM_1;
+ break;
+ case 0xf4:
+ case 0xf5:
+ port->state = STATE_UNKNOWN;
+ break;
+ case 0xf6:
+ f_midi_transmit_packet(req, p0 | 0x05, 0xf6, 0, 0);
+ port->state = STATE_UNKNOWN;
+ break;
+ case 0xf7:
+ switch (port->state) {
+ case STATE_SYSEX_0:
+ f_midi_transmit_packet(req,
+ p0 | 0x05, 0xf7, 0, 0);
+ break;
+ case STATE_SYSEX_1:
+ f_midi_transmit_packet(req,
+ p0 | 0x06, port->data[0], 0xf7, 0);
+ break;
+ case STATE_SYSEX_2:
+ f_midi_transmit_packet(req,
+ p0 | 0x07, port->data[0],
+ port->data[1], 0xf7);
+ break;
+ }
+ port->state = STATE_UNKNOWN;
+ break;
+ }
+ } else if (b >= 0x80) {
+ port->data[0] = b;
+ if (b >= 0xc0 && b <= 0xdf)
+ port->state = STATE_1PARAM;
+ else
+ port->state = STATE_2PARAM_1;
+ } else { /* b < 0x80 */
+ switch (port->state) {
+ case STATE_1PARAM:
+ if (port->data[0] < 0xf0) {
+ p0 |= port->data[0] >> 4;
+ } else {
+ p0 |= 0x02;
+ port->state = STATE_UNKNOWN;
+ }
+ f_midi_transmit_packet(req, p0, port->data[0], b, 0);
+ break;
+ case STATE_2PARAM_1:
+ port->data[1] = b;
+ port->state = STATE_2PARAM_2;
+ break;
+ case STATE_2PARAM_2:
+ if (port->data[0] < 0xf0) {
+ p0 |= port->data[0] >> 4;
+ port->state = STATE_2PARAM_1;
+ } else {
+ p0 |= 0x03;
+ port->state = STATE_UNKNOWN;
+ }
+ f_midi_transmit_packet(req,
+ p0, port->data[0], port->data[1], b);
+ break;
+ case STATE_SYSEX_0:
+ port->data[0] = b;
+ port->state = STATE_SYSEX_1;
+ break;
+ case STATE_SYSEX_1:
+ port->data[1] = b;
+ port->state = STATE_SYSEX_2;
+ break;
+ case STATE_SYSEX_2:
+ f_midi_transmit_packet(req,
+ p0 | 0x04, port->data[0], port->data[1], b);
+ port->state = STATE_SYSEX_0;
+ break;
+ }
+ }
+}
+
+static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
+{
+ struct usb_ep *ep = midi->in_ep;
+ int i;
+
+ if (!ep)
+ return;
+
+ if (!req)
+ req = alloc_ep_req(ep, midi->buflen);
+
+ if (!req) {
+ ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+ return;
+ }
+ req->length = 0;
+ req->complete = f_midi_complete;
+
+ for (i = 0; i < MAX_PORTS; i++) {
+ struct gmidi_in_port *port = midi->in_port[i];
+ struct snd_rawmidi_substream *substream = midi->in_substream[i];
+
+ if (!port || !port->active || !substream)
+ continue;
+
+ while (req->length + 3 < midi->buflen) {
+ uint8_t b;
+ if (snd_rawmidi_transmit(substream, &b, 1) != 1) {
+ port->active = 0;
+ break;
+ }
+ f_midi_transmit_byte(req, port, b);
+ }
+ }
+
+ if (req->length > 0)
+ usb_ep_queue(ep, req, GFP_ATOMIC);
+ else
+ free_ep_req(ep, req);
+}
+
+static void f_midi_in_tasklet(unsigned long data)
+{
+ struct f_midi *midi = (struct f_midi *) data;
+ f_midi_transmit(midi, NULL);
+}
+
+static int f_midi_in_open(struct snd_rawmidi_substream *substream)
+{
+ struct f_midi *midi = substream->rmidi->private_data;
+
+ if (!midi->in_port[substream->number])
+ return -EINVAL;
+
+ VDBG(midi, "%s()\n", __func__);
+ midi->in_substream[substream->number] = substream;
+ midi->in_port[substream->number]->state = STATE_UNKNOWN;
+ return 0;
+}
+
+static int f_midi_in_close(struct snd_rawmidi_substream *substream)
+{
+ struct f_midi *midi = substream->rmidi->private_data;
+
+ VDBG(midi, "%s()\n", __func__);
+ return 0;
+}
+
+static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+ struct f_midi *midi = substream->rmidi->private_data;
+
+ if (!midi->in_port[substream->number])
+ return;
+
+ VDBG(midi, "%s() %d\n", __func__, up);
+ midi->in_port[substream->number]->active = up;
+ if (up)
+ tasklet_hi_schedule(&midi->tasklet);
+}
+
+static int f_midi_out_open(struct snd_rawmidi_substream *substream)
+{
+ struct f_midi *midi = substream->rmidi->private_data;
+
+ if (substream->number >= MAX_PORTS)
+ return -EINVAL;
+
+ VDBG(midi, "%s()\n", __func__);
+ midi->out_substream[substream->number] = substream;
+ return 0;
+}
+
+static int f_midi_out_close(struct snd_rawmidi_substream *substream)
+{
+ struct f_midi *midi = substream->rmidi->private_data;
+
+ VDBG(midi, "%s()\n", __func__);
+ return 0;
+}
+
+static void f_midi_out_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+ struct f_midi *midi = substream->rmidi->private_data;
+
+ VDBG(midi, "%s()\n", __func__);
+
+ if (up)
+ set_bit(substream->number, &midi->out_triggered);
+ else
+ clear_bit(substream->number, &midi->out_triggered);
+}
+
+static struct snd_rawmidi_ops gmidi_in_ops = {
+ .open = f_midi_in_open,
+ .close = f_midi_in_close,
+ .trigger = f_midi_in_trigger,
+};
+
+static struct snd_rawmidi_ops gmidi_out_ops = {
+ .open = f_midi_out_open,
+ .close = f_midi_out_close,
+ .trigger = f_midi_out_trigger
+};
+
+/* register as a sound "card" */
+static int f_midi_register_card(struct f_midi *midi)
+{
+ struct snd_card *card;
+ struct snd_rawmidi *rmidi;
+ int err;
+ static struct snd_device_ops ops = {
+ .dev_free = f_midi_snd_free,
+ };
+
+ err = snd_card_create(midi->index, midi->id, THIS_MODULE, 0, &card);
+ if (err < 0) {
+ ERROR(midi, "snd_card_create() failed\n");
+ goto fail;
+ }
+ midi->card = card;
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, midi, &ops);
+ if (err < 0) {
+ ERROR(midi, "snd_device_new() failed: error %d\n", err);
+ goto fail;
+ }
+
+ strcpy(card->driver, f_midi_longname);
+ strcpy(card->longname, f_midi_longname);
+ strcpy(card->shortname, f_midi_shortname);
+
+ /* Set up rawmidi */
+ snd_component_add(card, "MIDI");
+ err = snd_rawmidi_new(card, card->longname, 0,
+ midi->out_ports, midi->in_ports, &rmidi);
+ if (err < 0) {
+ ERROR(midi, "snd_rawmidi_new() failed: error %d\n", err);
+ goto fail;
+ }
+ midi->rmidi = rmidi;
+ strcpy(rmidi->name, card->shortname);
+ rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
+ SNDRV_RAWMIDI_INFO_INPUT |
+ SNDRV_RAWMIDI_INFO_DUPLEX;
+ rmidi->private_data = midi;
+
+ /*
+ * Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT.
+ * It's an upside-down world being a gadget.
+ */
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
+
+ snd_card_set_dev(card, &midi->gadget->dev);
+
+ /* register it - we're ready to go */
+ err = snd_card_register(card);
+ if (err < 0) {
+ ERROR(midi, "snd_card_register() failed\n");
+ goto fail;
+ }
+
+ VDBG(midi, "%s() finished ok\n", __func__);
+ return 0;
+
+fail:
+ if (midi->card) {
+ snd_card_free(midi->card);
+ midi->card = NULL;
+ }
+ return err;
+}
+
+/* MIDI function driver setup/binding */
+
+static int __init
+f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12];
+ struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
+ struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_midi *midi = func_to_midi(f);
+ int status, n, jack = 1, i = 0;
+
+ /* maybe allocate device-global string ID */
+ if (midi_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ goto fail;
+ midi_string_defs[0].id = status;
+ }
+
+ /* We have two interfaces, AudioControl and MIDIStreaming */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ms_interface_desc.bInterfaceNumber = status;
+ ac_header_desc.baInterfaceNr[0] = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
+ if (!midi->in_ep)
+ goto fail;
+ midi->in_ep->driver_data = cdev; /* claim */
+
+ midi->out_ep = usb_ep_autoconfig(cdev->gadget, &bulk_out_desc);
+ if (!midi->out_ep)
+ goto fail;
+ midi->out_ep->driver_data = cdev; /* claim */
+
+ /*
+ * construct the function's descriptor set. As the number of
+ * input and output MIDI ports is configurable, we have to do
+ * it that way.
+ */
+
+ /* add the headers - these are always the same */
+ midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
+ midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
+ midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+
+ /* calculate the header's wTotalLength */
+ n = USB_DT_MS_HEADER_SIZE
+ + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE
+ + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1);
+ ms_header_desc.wTotalLength = cpu_to_le16(n);
+
+ midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+
+ /* we have one embedded IN jack */
+ jack_in_emb_desc.bJackID = jack++;
+ midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
+
+ /* and a dynamic amount of external IN jacks */
+ for (n = 0; n < midi->in_ports; n++) {
+ struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
+
+ ext->bLength = USB_DT_MIDI_IN_SIZE;
+ ext->bDescriptorType = USB_DT_CS_INTERFACE;
+ ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
+ ext->bJackType = USB_MS_EXTERNAL;
+ ext->bJackID = jack++;
+ ext->iJack = 0;
+
+ midi_function[i++] = (struct usb_descriptor_header *) ext;
+ }
+
+ /* one embedded OUT jack ... */
+ jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
+ jack_out_emb_desc.bJackID = jack++;
+ jack_out_emb_desc.bNrInputPins = midi->in_ports;
+ /* ... which referencess all external IN jacks */
+ for (n = 0; n < midi->in_ports; n++) {
+ jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID;
+ jack_out_emb_desc.pins[n].baSourcePin = 1;
+ }
+
+ midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc;
+
+ /* and multiple external OUT jacks ... */
+ for (n = 0; n < midi->out_ports; n++) {
+ struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n];
+ int m;
+
+ ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
+ ext->bDescriptorType = USB_DT_CS_INTERFACE;
+ ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
+ ext->bJackType = USB_MS_EXTERNAL;
+ ext->bJackID = jack++;
+ ext->bNrInputPins = 1;
+ ext->iJack = 0;
+ /* ... which all reference the same embedded IN jack */
+ for (m = 0; m < midi->out_ports; m++) {
+ ext->pins[m].baSourceID = jack_in_emb_desc.bJackID;
+ ext->pins[m].baSourcePin = 1;
+ }
+
+ midi_function[i++] = (struct usb_descriptor_header *) ext;
+ }
+
+ /* configure the endpoint descriptors ... */
+ ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+ ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
+ for (n = 0; n < midi->in_ports; n++)
+ ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
+
+ ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+ ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
+ for (n = 0; n < midi->out_ports; n++)
+ ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
+
+ /* ... and add them to the list */
+ midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
+ midi_function[i++] = (struct usb_descriptor_header *) &ms_out_desc;
+ midi_function[i++] = (struct usb_descriptor_header *) &bulk_in_desc;
+ midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
+ midi_function[i++] = NULL;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ /* copy descriptors, and track endpoint copies */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ c->highspeed = true;
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(512);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(512);
+ f->hs_descriptors = usb_copy_descriptors(midi_function);
+ } else {
+ f->descriptors = usb_copy_descriptors(midi_function);
+ }
+
+ return 0;
+
+fail:
+ /* we might as well release our claims on endpoints */
+ if (midi->out_ep)
+ midi->out_ep->driver_data = NULL;
+ if (midi->in_ep)
+ midi->in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+/**
+ * f_midi_bind_config - add USB MIDI function to a configuration
+ * @c: the configuration to supcard the USB audio function
+ * @index: the soundcard index to use for the ALSA device creation
+ * @id: the soundcard id to use for the ALSA device creation
+ * @buflen: the buffer length to use
+ * @qlen the number of read requests to pre-allocate
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ */
+int __init f_midi_bind_config(struct usb_configuration *c,
+ int index, char *id,
+ unsigned int in_ports,
+ unsigned int out_ports,
+ unsigned int buflen,
+ unsigned int qlen)
+{
+ struct f_midi *midi;
+ int status, i;
+
+ /* sanity check */
+ if (in_ports > MAX_PORTS || out_ports > MAX_PORTS)
+ return -EINVAL;
+
+ /* allocate and initialize one new instance */
+ midi = kzalloc(sizeof *midi, GFP_KERNEL);
+ if (!midi) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < in_ports; i++) {
+ struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ status = -ENOMEM;
+ goto setup_fail;
+ }
+
+ port->midi = midi;
+ port->active = 0;
+ port->cable = i;
+ midi->in_port[i] = port;
+ }
+
+ midi->gadget = c->cdev->gadget;
+ tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
+
+ /* set up ALSA midi devices */
+ midi->in_ports = in_ports;
+ midi->out_ports = out_ports;
+ status = f_midi_register_card(midi);
+ if (status < 0)
+ goto setup_fail;
+
+ midi->func.name = "gmidi function";
+ midi->func.strings = midi_strings;
+ midi->func.bind = f_midi_bind;
+ midi->func.unbind = f_midi_unbind;
+ midi->func.set_alt = f_midi_set_alt;
+ midi->func.disable = f_midi_disable;
+
+ midi->id = kstrdup(id, GFP_KERNEL);
+ midi->index = index;
+ midi->buflen = buflen;
+ midi->qlen = qlen;
+
+ status = usb_add_function(c, &midi->func);
+ if (status)
+ goto setup_fail;
+
+ return 0;
+
+setup_fail:
+ for (--i; i >= 0; i--)
+ kfree(midi->in_port[i]);
+ kfree(midi);
+fail:
+ return status;
+}
+
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index ae69ed7e6b9..aab8eded045 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -13,15 +13,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index 394502abeb9..e3f74bf5da2 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -10,15 +10,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 8f3eab1af88..34907703333 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/mm.h>
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 3ea4666be3d..704d1d94f72 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -11,15 +11,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index e18b4f52095..168906d2b5d 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 3dc53754ab6..c1540648125 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
index 7a8b9aa4aea..2022fe49214 100644
--- a/drivers/usb/gadget/f_uvc.c
+++ b/drivers/usb/gadget/f_uvc.c
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/f_uvc.h b/drivers/usb/gadget/f_uvc.h
index e18a6636c28..abf83293513 100644
--- a/drivers/usb/gadget/f_uvc.h
+++ b/drivers/usb/gadget/f_uvc.h
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
#ifndef _F_UVC_H_
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 639e14a2fd1..3ac4f51cd0b 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -69,8 +69,7 @@
* each LUN would be settable independently as a disk drive or a CD-ROM
* drive, but currently all LUNs have to be the same type. The CD-ROM
* emulation includes a single data track and no audio tracks; hence there
- * need be only one backing file per LUN. Note also that the CD-ROM block
- * length is set to 512 rather than the more common value 2048.
+ * need be only one backing file per LUN.
*
* Requirements are modest; only a bulk-in and a bulk-out endpoint are
* needed (an interrupt-out endpoint is also needed for CBI). The memory
@@ -461,7 +460,6 @@ struct fsg_dev {
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
- struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
int thread_wakeup_needed;
struct completion thread_notifier;
@@ -488,6 +486,8 @@ struct fsg_dev {
unsigned int nluns;
struct fsg_lun *luns;
struct fsg_lun *curlun;
+ /* Must be the last entry */
+ struct fsg_buffhd buffhds[];
};
typedef void (*fsg_routine_t)(struct fsg_dev *);
@@ -586,7 +586,19 @@ dev_qualifier = {
.bNumConfigurations = 1,
};
+static int populate_bos(struct fsg_dev *fsg, u8 *buf)
+{
+ memcpy(buf, &fsg_bos_desc, USB_DT_BOS_SIZE);
+ buf += USB_DT_BOS_SIZE;
+
+ memcpy(buf, &fsg_ext_cap_desc, USB_DT_USB_EXT_CAP_SIZE);
+ buf += USB_DT_USB_EXT_CAP_SIZE;
+ memcpy(buf, &fsg_ss_cap_desc, USB_DT_USB_SS_CAP_SIZE);
+
+ return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE
+ + USB_DT_USB_EXT_CAP_SIZE;
+}
/*
* Config descriptors must agree with the code that sets configurations
@@ -935,7 +947,8 @@ static int standard_setup_req(struct fsg_dev *fsg,
break;
case USB_DT_DEVICE_QUALIFIER:
VDBG(fsg, "get device qualifier\n");
- if (!gadget_is_dualspeed(fsg->gadget))
+ if (!gadget_is_dualspeed(fsg->gadget) ||
+ fsg->gadget->speed == USB_SPEED_SUPER)
break;
/*
* Assume ep0 uses the same maxpacket value for both
@@ -948,7 +961,8 @@ static int standard_setup_req(struct fsg_dev *fsg,
case USB_DT_OTHER_SPEED_CONFIG:
VDBG(fsg, "get other-speed config descriptor\n");
- if (!gadget_is_dualspeed(fsg->gadget))
+ if (!gadget_is_dualspeed(fsg->gadget) ||
+ fsg->gadget->speed == USB_SPEED_SUPER)
break;
goto get_config;
case USB_DT_CONFIG:
@@ -967,7 +981,15 @@ get_config:
value = usb_gadget_get_string(&fsg_stringtab,
w_value & 0xff, req->buf);
break;
+
+ case USB_DT_BOS:
+ VDBG(fsg, "get bos descriptor\n");
+
+ if (gadget_is_superspeed(fsg->gadget))
+ value = populate_bos(fsg, req->buf);
+ break;
}
+
break;
/* One config, two speeds */
@@ -1136,7 +1158,6 @@ static int do_read(struct fsg_dev *fsg)
u32 amount_left;
loff_t file_offset, file_offset_tmp;
unsigned int amount;
- unsigned int partial_page;
ssize_t nread;
/* Get the starting Logical Block Address and check that it's
@@ -1158,7 +1179,7 @@ static int do_read(struct fsg_dev *fsg)
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
- file_offset = ((loff_t) lba) << 9;
+ file_offset = ((loff_t) lba) << curlun->blkbits;
/* Carry out the file reads */
amount_left = fsg->data_size_from_cmnd;
@@ -1171,17 +1192,10 @@ static int do_read(struct fsg_dev *fsg)
* Try to read the remaining amount.
* But don't read more than the buffer size.
* And don't try to read past the end of the file.
- * Finally, if we're not at a page boundary, don't read past
- * the next page.
- * If this means reading 0 then we were asked to read past
- * the end of file. */
+ */
amount = min((unsigned int) amount_left, mod_data.buflen);
amount = min((loff_t) amount,
curlun->file_length - file_offset);
- partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
- if (partial_page > 0)
- amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
- partial_page);
/* Wait for the next buffer to become available */
bh = fsg->next_buffhd_to_fill;
@@ -1196,7 +1210,7 @@ static int do_read(struct fsg_dev *fsg)
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info = file_offset >> curlun->blkbits;
curlun->info_valid = 1;
bh->inreq->length = 0;
bh->state = BUF_STATE_FULL;
@@ -1221,18 +1235,23 @@ static int do_read(struct fsg_dev *fsg)
} else if (nread < amount) {
LDBG(curlun, "partial file read: %d/%u\n",
(int) nread, amount);
- nread -= (nread & 511); // Round down to a block
+ nread = round_down(nread, curlun->blksize);
}
file_offset += nread;
amount_left -= nread;
fsg->residue -= nread;
+
+ /* Except at the end of the transfer, nread will be
+ * equal to the buffer size, which is divisible by the
+ * bulk-in maxpacket size.
+ */
bh->inreq->length = nread;
bh->state = BUF_STATE_FULL;
/* If an error occurred, report it and its position */
if (nread < amount) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info = file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1262,7 +1281,6 @@ static int do_write(struct fsg_dev *fsg)
u32 amount_left_to_req, amount_left_to_write;
loff_t usb_offset, file_offset, file_offset_tmp;
unsigned int amount;
- unsigned int partial_page;
ssize_t nwritten;
int rc;
@@ -1303,7 +1321,7 @@ static int do_write(struct fsg_dev *fsg)
/* Carry out the file writes */
get_some_more = 1;
- file_offset = usb_offset = ((loff_t) lba) << 9;
+ file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
while (amount_left_to_write > 0) {
@@ -1313,38 +1331,20 @@ static int do_write(struct fsg_dev *fsg)
if (bh->state == BUF_STATE_EMPTY && get_some_more) {
/* Figure out how much we want to get:
- * Try to get the remaining amount.
- * But don't get more than the buffer size.
- * And don't try to go past the end of the file.
- * If we're not at a page boundary,
- * don't go past the next page.
- * If this means getting 0, then we were asked
- * to write past the end of file.
- * Finally, round down to a block boundary. */
+ * Try to get the remaining amount,
+ * but not more than the buffer size.
+ */
amount = min(amount_left_to_req, mod_data.buflen);
- amount = min((loff_t) amount, curlun->file_length -
- usb_offset);
- partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
- if (partial_page > 0)
- amount = min(amount,
- (unsigned int) PAGE_CACHE_SIZE - partial_page);
-
- if (amount == 0) {
+
+ /* Beyond the end of the backing file? */
+ if (usb_offset >= curlun->file_length) {
get_some_more = 0;
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
- curlun->sense_data_info = usb_offset >> 9;
+ curlun->sense_data_info = usb_offset >> curlun->blkbits;
curlun->info_valid = 1;
continue;
}
- amount -= (amount & 511);
- if (amount == 0) {
-
- /* Why were we were asked to transfer a
- * partial block? */
- get_some_more = 0;
- continue;
- }
/* Get the next buffer */
usb_offset += amount;
@@ -1353,11 +1353,11 @@ static int do_write(struct fsg_dev *fsg)
if (amount_left_to_req == 0)
get_some_more = 0;
- /* amount is always divisible by 512, hence by
- * the bulk-out maxpacket size */
- bh->outreq->length = bh->bulk_out_intended_length =
- amount;
- bh->outreq->short_not_ok = 1;
+ /* Except at the end of the transfer, amount will be
+ * equal to the buffer size, which is divisible by
+ * the bulk-out maxpacket size.
+ */
+ set_bulk_out_req_length(fsg, bh, amount);
start_transfer(fsg, fsg->bulk_out, bh->outreq,
&bh->outreq_busy, &bh->state);
fsg->next_buffhd_to_fill = bh->next;
@@ -1376,7 +1376,7 @@ static int do_write(struct fsg_dev *fsg)
/* Did something go wrong with the transfer? */
if (bh->outreq->status != 0) {
curlun->sense_data = SS_COMMUNICATION_FAILURE;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info = file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1390,6 +1390,16 @@ static int do_write(struct fsg_dev *fsg)
amount = curlun->file_length - file_offset;
}
+ /* Don't accept excess data. The spec doesn't say
+ * what to do in this case. We'll ignore the error.
+ */
+ amount = min(amount, bh->bulk_out_intended_length);
+
+ /* Don't write a partial block */
+ amount = round_down(amount, curlun->blksize);
+ if (amount == 0)
+ goto empty_write;
+
/* Perform the write */
file_offset_tmp = file_offset;
nwritten = vfs_write(curlun->filp,
@@ -1408,8 +1418,7 @@ static int do_write(struct fsg_dev *fsg)
} else if (nwritten < amount) {
LDBG(curlun, "partial file write: %d/%u\n",
(int) nwritten, amount);
- nwritten -= (nwritten & 511);
- // Round down to a block
+ nwritten = round_down(nwritten, curlun->blksize);
}
file_offset += nwritten;
amount_left_to_write -= nwritten;
@@ -1418,13 +1427,14 @@ static int do_write(struct fsg_dev *fsg)
/* If an error occurred, report it and its position */
if (nwritten < amount) {
curlun->sense_data = SS_WRITE_ERROR;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info = file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
+ empty_write:
/* Did the host decide to stop early? */
- if (bh->outreq->actual != bh->outreq->length) {
+ if (bh->outreq->actual < bh->bulk_out_intended_length) {
fsg->short_packet_received = 1;
break;
}
@@ -1500,8 +1510,8 @@ static int do_verify(struct fsg_dev *fsg)
return -EIO; // No default reply
/* Prepare to carry out the file verify */
- amount_left = verification_length << 9;
- file_offset = ((loff_t) lba) << 9;
+ amount_left = verification_length << curlun->blkbits;
+ file_offset = ((loff_t) lba) << curlun->blkbits;
/* Write out all the dirty buffers before invalidating them */
fsg_lun_fsync_sub(curlun);
@@ -1519,15 +1529,14 @@ static int do_verify(struct fsg_dev *fsg)
* Try to read the remaining amount, but not more than
* the buffer size.
* And don't try to read past the end of the file.
- * If this means reading 0 then we were asked to read
- * past the end of file. */
+ */
amount = min((unsigned int) amount_left, mod_data.buflen);
amount = min((loff_t) amount,
curlun->file_length - file_offset);
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info = file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1550,11 +1559,11 @@ static int do_verify(struct fsg_dev *fsg)
} else if (nread < amount) {
LDBG(curlun, "partial file verify: %d/%u\n",
(int) nread, amount);
- nread -= (nread & 511); // Round down to a sector
+ nread = round_down(nread, curlun->blksize);
}
if (nread == 0) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
- curlun->sense_data_info = file_offset >> 9;
+ curlun->sense_data_info = file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
@@ -1668,7 +1677,7 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
/* Max logical block */
- put_unaligned_be32(512, &buf[4]); /* Block length */
+ put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
return 8;
}
@@ -1890,7 +1899,7 @@ static int do_read_format_capacities(struct fsg_dev *fsg,
put_unaligned_be32(curlun->num_sectors, &buf[0]);
/* Number of blocks */
- put_unaligned_be32(512, &buf[4]); /* Block length */
+ put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
buf[4] = 0x02; /* Current capacity */
return 12;
}
@@ -1969,7 +1978,7 @@ static int throw_away_data(struct fsg_dev *fsg)
fsg->next_buffhd_to_drain = bh->next;
/* A short packet or an error ends everything */
- if (bh->outreq->actual != bh->outreq->length ||
+ if (bh->outreq->actual < bh->bulk_out_intended_length ||
bh->outreq->status != 0) {
raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
return -EINTR;
@@ -1983,11 +1992,11 @@ static int throw_away_data(struct fsg_dev *fsg)
amount = min(fsg->usb_amount_left,
(u32) mod_data.buflen);
- /* amount is always divisible by 512, hence by
- * the bulk-out maxpacket size */
- bh->outreq->length = bh->bulk_out_intended_length =
- amount;
- bh->outreq->short_not_ok = 1;
+ /* Except at the end of the transfer, amount will be
+ * equal to the buffer size, which is divisible by
+ * the bulk-out maxpacket size.
+ */
+ set_bulk_out_req_length(fsg, bh, amount);
start_transfer(fsg, fsg->bulk_out, bh->outreq,
&bh->outreq_busy, &bh->state);
fsg->next_buffhd_to_fill = bh->next;
@@ -2415,7 +2424,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case READ_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+ fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)")) == 0)
@@ -2424,7 +2433,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case READ_10:
fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << 9;
+ get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)")) == 0)
@@ -2433,7 +2442,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case READ_12:
fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << 9;
+ get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)")) == 0)
@@ -2519,7 +2528,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case WRITE_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+ fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)")) == 0)
@@ -2528,7 +2537,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case WRITE_10:
fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << 9;
+ get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)")) == 0)
@@ -2537,7 +2546,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case WRITE_12:
fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << 9;
+ get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)")) == 0)
@@ -2666,7 +2675,6 @@ static int get_next_command(struct fsg_dev *fsg)
/* Queue a request to read a Bulk-only CBW */
set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
- bh->outreq->short_not_ok = 1;
start_transfer(fsg, fsg->bulk_out, bh->outreq,
&bh->outreq_busy, &bh->state);
@@ -2752,7 +2760,7 @@ static int do_set_interface(struct fsg_dev *fsg, int altsetting)
reset:
/* Deallocate the requests */
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &fsg->buffhds[i];
if (bh->inreq) {
@@ -2791,29 +2799,32 @@ reset:
/* Enable the endpoints */
d = fsg_ep_desc(fsg->gadget,
- &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
+ &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc,
+ &fsg_ss_bulk_in_desc);
if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
goto reset;
fsg->bulk_in_enabled = 1;
d = fsg_ep_desc(fsg->gadget,
- &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
+ &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc,
+ &fsg_ss_bulk_out_desc);
if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
goto reset;
fsg->bulk_out_enabled = 1;
- fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+ fsg->bulk_out_maxpacket = usb_endpoint_maxp(d);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
if (transport_is_cbi()) {
d = fsg_ep_desc(fsg->gadget,
- &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc);
+ &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc,
+ &fsg_ss_intr_in_desc);
if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
goto reset;
fsg->intr_in_enabled = 1;
}
/* Allocate the requests */
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &fsg->buffhds[i];
if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
@@ -2862,17 +2873,10 @@ static int do_set_config(struct fsg_dev *fsg, u8 new_config)
fsg->config = new_config;
if ((rc = do_set_interface(fsg, 0)) != 0)
fsg->config = 0; // Reset on errors
- else {
- char *speed;
-
- switch (fsg->gadget->speed) {
- case USB_SPEED_LOW: speed = "low"; break;
- case USB_SPEED_FULL: speed = "full"; break;
- case USB_SPEED_HIGH: speed = "high"; break;
- default: speed = "?"; break;
- }
- INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
- }
+ else
+ INFO(fsg, "%s config #%d\n",
+ usb_speed_string(fsg->gadget->speed),
+ fsg->config);
}
return rc;
}
@@ -2909,7 +2913,7 @@ static void handle_exception(struct fsg_dev *fsg)
/* Cancel all the pending transfers */
if (fsg->intreq_busy)
usb_ep_dequeue(fsg->intr_in, fsg->intreq);
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
bh = &fsg->buffhds[i];
if (bh->inreq_busy)
usb_ep_dequeue(fsg->bulk_in, bh->inreq);
@@ -2920,7 +2924,7 @@ static void handle_exception(struct fsg_dev *fsg)
/* Wait until everything is idle */
for (;;) {
num_active = fsg->intreq_busy;
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
bh = &fsg->buffhds[i];
num_active += bh->inreq_busy + bh->outreq_busy;
}
@@ -2942,7 +2946,7 @@ static void handle_exception(struct fsg_dev *fsg)
* state, and the exception. Then invoke the handler. */
spin_lock_irq(&fsg->lock);
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
bh = &fsg->buffhds[i];
bh->state = BUF_STATE_EMPTY;
}
@@ -3149,6 +3153,15 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
DBG(fsg, "unbind\n");
clear_bit(REGISTERED, &fsg->atomic_bitflags);
+ /* If the thread isn't already dead, tell it to exit now */
+ if (fsg->state != FSG_STATE_TERMINATED) {
+ raise_exception(fsg, FSG_STATE_EXIT);
+ wait_for_completion(&fsg->thread_notifier);
+
+ /* The cleanup routine waits for this completion also */
+ complete(&fsg->thread_notifier);
+ }
+
/* Unregister the sysfs attribute files and the LUNs */
for (i = 0; i < fsg->nluns; ++i) {
curlun = &fsg->luns[i];
@@ -3162,17 +3175,8 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
}
}
- /* If the thread isn't already dead, tell it to exit now */
- if (fsg->state != FSG_STATE_TERMINATED) {
- raise_exception(fsg, FSG_STATE_EXIT);
- wait_for_completion(&fsg->thread_notifier);
-
- /* The cleanup routine waits for this completion also */
- complete(&fsg->thread_notifier);
- }
-
/* Free the data buffers */
- for (i = 0; i < FSG_NUM_BUFFERS; ++i)
+ for (i = 0; i < fsg_num_buffers; ++i)
kfree(fsg->buffhds[i].buf);
/* Free the request and buffer for endpoint 0 */
@@ -3445,6 +3449,24 @@ static int __init fsg_bind(struct usb_gadget *gadget)
fsg_fs_intr_in_desc.bEndpointAddress;
}
+ if (gadget_is_superspeed(gadget)) {
+ unsigned max_burst;
+
+ fsg_ss_function[i + FSG_SS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+
+ /* Calculate bMaxBurst, we know packet size is 1024 */
+ max_burst = min_t(unsigned, mod_data.buflen / 1024, 15);
+
+ /* Assume endpoint addresses are the same for both speeds */
+ fsg_ss_bulk_in_desc.bEndpointAddress =
+ fsg_fs_bulk_in_desc.bEndpointAddress;
+ fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
+
+ fsg_ss_bulk_out_desc.bEndpointAddress =
+ fsg_fs_bulk_out_desc.bEndpointAddress;
+ fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
+ }
+
if (gadget_is_otg(gadget))
fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
@@ -3460,7 +3482,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
req->complete = ep0_complete;
/* Allocate the data buffers */
- for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ for (i = 0; i < fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &fsg->buffhds[i];
/* Allocate for the bulk-in endpoint. We assume that
@@ -3471,7 +3493,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
goto out;
bh->next = bh + 1;
}
- fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+ fsg->buffhds[fsg_num_buffers - 1].next = &fsg->buffhds[0];
/* This should reflect the actual gadget power source */
usb_gadget_set_selfpowered(gadget);
@@ -3561,11 +3583,7 @@ static void fsg_resume(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver fsg_driver = {
-#ifdef CONFIG_USB_GADGET_DUALSPEED
- .speed = USB_SPEED_HIGH,
-#else
- .speed = USB_SPEED_FULL,
-#endif
+ .speed = USB_SPEED_SUPER,
.function = (char *) fsg_string_product,
.unbind = fsg_unbind,
.disconnect = fsg_disconnect,
@@ -3587,7 +3605,9 @@ static int __init fsg_alloc(void)
{
struct fsg_dev *fsg;
- fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+ fsg = kzalloc(sizeof *fsg +
+ fsg_num_buffers * sizeof *(fsg->buffhds), GFP_KERNEL);
+
if (!fsg)
return -ENOMEM;
spin_lock_init(&fsg->lock);
@@ -3605,6 +3625,10 @@ static int __init fsg_init(void)
int rc;
struct fsg_dev *fsg;
+ rc = fsg_num_buffers_validate();
+ if (rc != 0)
+ return rc;
+
if ((rc = fsg_alloc()) != 0)
return rc;
fsg = the_fsg;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 3bf872e1ad3..2a03e4de11c 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -540,7 +540,7 @@ static int qe_ep_init(struct qe_udc *udc,
int reval = 0;
u16 max = 0;
- max = le16_to_cpu(desc->wMaxPacketSize);
+ max = usb_endpoint_maxp(desc);
/* check the max package size validate for this endpoint */
/* Refer to USB2.0 spec table 9-13,
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index de24a4233c2..b2c44e1d581 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -559,7 +559,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
- max = le16_to_cpu(desc->wMaxPacketSize);
+ max = usb_endpoint_maxp(desc);
/* Disable automatic zlp generation. Driver is responsible to indicate
* explicitly through req->req.zero. This is needed to enable multi-td
@@ -1715,34 +1715,31 @@ static void dtd_complete_irq(struct fsl_udc *udc)
}
}
+static inline enum usb_device_speed portscx_device_speed(u32 reg)
+{
+ switch (speed & PORTSCX_PORT_SPEED_MASK) {
+ case PORTSCX_PORT_SPEED_HIGH:
+ return USB_SPEED_HIGH;
+ case PORTSCX_PORT_SPEED_FULL:
+ return USB_SPEED_FULL;
+ case PORTSCX_PORT_SPEED_LOW:
+ return USB_SPEED_LOW;
+ default:
+ return USB_SPEED_UNKNOWN;
+ }
+}
+
/* Process a port change interrupt */
static void port_change_irq(struct fsl_udc *udc)
{
- u32 speed;
-
if (udc->bus_reset)
udc->bus_reset = 0;
/* Bus resetting is finished */
- if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET)) {
+ if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET))
/* Get the speed */
- speed = (fsl_readl(&dr_regs->portsc1)
- & PORTSCX_PORT_SPEED_MASK);
- switch (speed) {
- case PORTSCX_PORT_SPEED_HIGH:
- udc->gadget.speed = USB_SPEED_HIGH;
- break;
- case PORTSCX_PORT_SPEED_FULL:
- udc->gadget.speed = USB_SPEED_FULL;
- break;
- case PORTSCX_PORT_SPEED_LOW:
- udc->gadget.speed = USB_SPEED_LOW;
- break;
- default:
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- break;
- }
- }
+ udc->gadget.speed =
+ portscx_device_speed(fsl_readl(&dr_regs->portsc1));
/* Update USB state */
if (!udc->resume_state)
@@ -2167,20 +2164,8 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
default:
s = "None"; break;
}
- s;} ), ( {
- char *s;
- switch (tmp_reg & PORTSCX_PORT_SPEED_UNDEF) {
- case PORTSCX_PORT_SPEED_FULL:
- s = "Full Speed"; break;
- case PORTSCX_PORT_SPEED_LOW:
- s = "Low Speed"; break;
- case PORTSCX_PORT_SPEED_HIGH:
- s = "High Speed"; break;
- default:
- s = "Undefined"; break;
- }
- s;
- } ),
+ s;} ),
+ usb_speed_string(portscx_device_speed(tmp_reg)),
(tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
"Normal PHY mode" : "Low power mode",
(tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 4ec888f9000..e593f2849fa 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -220,7 +210,7 @@ static int config_ep(struct fusb300_ep *ep,
info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
- info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ info.maxpacket = usb_endpoint_maxp(desc);
info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
if ((info.type == USB_ENDPOINT_XFER_INT) ||
@@ -1479,7 +1469,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
fusb300->gadget.name = udc_name;
fusb300->reg = reg;
- ret = request_irq(ires->start, fusb300_irq, IRQF_DISABLED | IRQF_SHARED,
+ ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED,
udc_name, fusb300);
if (ret < 0) {
pr_err("request_irq error (%d)\n", ret);
@@ -1487,7 +1477,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
}
ret = request_irq(ires1->start, fusb300_irq,
- IRQF_DISABLED | IRQF_SHARED, udc_name, fusb300);
+ IRQF_SHARED, udc_name, fusb300);
if (ret < 0) {
pr_err("request_irq1 error (%d)\n", ret);
goto clean_up;
diff --git a/drivers/usb/gadget/fusb300_udc.h b/drivers/usb/gadget/fusb300_udc.h
index f51aa2ef1f9..92745bd0306 100644
--- a/drivers/usb/gadget/fusb300_udc.h
+++ b/drivers/usb/gadget/fusb300_udc.h
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 704c2800ac0..0519d77915e 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) "g_ffs: " fmt
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f3a83cd0ef5..a8855d0b7f3 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -31,6 +31,7 @@
#define gadget_is_ci13xxx_msm(g) (!strcmp("ci13xxx_msm", (g)->name))
#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
#define gadget_is_dummy(g) (!strcmp("dummy_udc", (g)->name))
+#define gadget_is_dwc3(g) (!strcmp("dwc3-gadget", (g)->name))
#define gadget_is_fsl_qe(g) (!strcmp("fsl_qe_udc", (g)->name))
#define gadget_is_fsl_usb2(g) (!strcmp("fsl-usb2-udc", (g)->name))
#define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name))
@@ -115,6 +116,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x30;
else if (gadget_is_net2272(gadget))
return 0x31;
+ else if (gadget_is_dwc3(gadget))
+ return 0x32;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 8b9220e128a..8fcde37aa6d 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -36,134 +36,43 @@
#include "gadget_chips.h"
-
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
+#include "composite.c"
#include "usbstring.c"
#include "config.c"
#include "epautoconf.c"
+#include "f_midi.c"
/*-------------------------------------------------------------------------*/
-
MODULE_AUTHOR("Ben Williamson");
MODULE_LICENSE("GPL v2");
-#define DRIVER_VERSION "25 Jul 2006"
-
static const char shortname[] = "g_midi";
static const char longname[] = "MIDI Gadget";
static int index = SNDRV_DEFAULT_IDX1;
-static char *id = SNDRV_DEFAULT_STR1;
-
-module_param(index, int, 0444);
+module_param(index, int, S_IRUGO);
MODULE_PARM_DESC(index, "Index value for the USB MIDI Gadget adapter.");
-module_param(id, charp, 0444);
-MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter.");
-
-/* Some systems will want different product identifiers published in the
- * device descriptor, either numbers or strings or both. These string
- * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
- */
-
-static ushort idVendor;
-module_param(idVendor, ushort, S_IRUGO);
-MODULE_PARM_DESC(idVendor, "USB Vendor ID");
-
-static ushort idProduct;
-module_param(idProduct, ushort, S_IRUGO);
-MODULE_PARM_DESC(idProduct, "USB Product ID");
-
-static ushort bcdDevice;
-module_param(bcdDevice, ushort, S_IRUGO);
-MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
-
-static char *iManufacturer;
-module_param(iManufacturer, charp, S_IRUGO);
-MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
-
-static char *iProduct;
-module_param(iProduct, charp, S_IRUGO);
-MODULE_PARM_DESC(iProduct, "USB Product string");
-
-static char *iSerialNumber;
-module_param(iSerialNumber, charp, S_IRUGO);
-MODULE_PARM_DESC(iSerialNumber, "SerialNumber");
-
-/*
- * this version autoconfigures as much as possible,
- * which is reasonable for most "bulk-only" drivers.
- */
-static const char *EP_IN_NAME;
-static const char *EP_OUT_NAME;
+static char *id = SNDRV_DEFAULT_STR1;
+module_param(id, charp, S_IRUGO);
+MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter.");
-/* big enough to hold our biggest descriptor */
-#define USB_BUFSIZ 256
-
-
-/* This is a gadget, and the IN/OUT naming is from the host's perspective.
- USB -> OUT endpoint -> rawmidi
- USB <- IN endpoint <- rawmidi */
-struct gmidi_in_port {
- struct gmidi_device* dev;
- int active;
- uint8_t cable; /* cable number << 4 */
- uint8_t state;
-#define STATE_UNKNOWN 0
-#define STATE_1PARAM 1
-#define STATE_2PARAM_1 2
-#define STATE_2PARAM_2 3
-#define STATE_SYSEX_0 4
-#define STATE_SYSEX_1 5
-#define STATE_SYSEX_2 6
- uint8_t data[2];
-};
-
-struct gmidi_device {
- spinlock_t lock;
- struct usb_gadget *gadget;
- struct usb_request *req; /* for control responses */
- u8 config;
- struct usb_ep *in_ep, *out_ep;
- struct snd_card *card;
- struct snd_rawmidi *rmidi;
- struct snd_rawmidi_substream *in_substream;
- struct snd_rawmidi_substream *out_substream;
-
- /* For the moment we only support one port in
- each direction, but in_port is kept as a
- separate struct so we can have more later. */
- struct gmidi_in_port in_port;
- unsigned long out_triggered;
- struct tasklet_struct tasklet;
-};
-
-static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req);
-
-
-#define DBG(d, fmt, args...) \
- dev_dbg(&(d)->gadget->dev , fmt , ## args)
-#define VDBG(d, fmt, args...) \
- dev_vdbg(&(d)->gadget->dev , fmt , ## args)
-#define ERROR(d, fmt, args...) \
- dev_err(&(d)->gadget->dev , fmt , ## args)
-#define INFO(d, fmt, args...) \
- dev_info(&(d)->gadget->dev , fmt , ## args)
-
-
-static unsigned buflen = 256;
-static unsigned qlen = 32;
-
+static unsigned int buflen = 256;
module_param(buflen, uint, S_IRUGO);
+MODULE_PARM_DESC(buflen, "MIDI buffer length");
+
+static unsigned int qlen = 32;
module_param(qlen, uint, S_IRUGO);
+MODULE_PARM_DESC(qlen, "USB read request queue length");
+static unsigned int in_ports = 1;
+module_param(in_ports, uint, S_IRUGO);
+MODULE_PARM_DESC(in_ports, "Number of MIDI input ports");
+
+static unsigned int out_ports = 1;
+module_param(out_ports, uint, S_IRUGO);
+MODULE_PARM_DESC(out_ports, "Number of MIDI output ports");
/* Thanks to Grey Innovation for donating this product ID.
*
@@ -173,1149 +82,124 @@ module_param(qlen, uint, S_IRUGO);
#define DRIVER_VENDOR_NUM 0x17b3 /* Grey Innovation */
#define DRIVER_PRODUCT_NUM 0x0004 /* Linux-USB "MIDI Gadget" */
+/* string IDs are assigned dynamically */
-/*
- * DESCRIPTORS ... most are static, but strings and (full)
- * configuration descriptors are built on demand.
- */
-
-#define STRING_MANUFACTURER 25
-#define STRING_PRODUCT 42
-#define STRING_SERIAL 101
-#define STRING_MIDI_GADGET 250
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_DESCRIPTION_IDX 2
-/* We only have the one configuration, it's number 1. */
-#define GMIDI_CONFIG 1
-
-/* We have two interfaces- AudioControl and MIDIStreaming */
-#define GMIDI_AC_INTERFACE 0
-#define GMIDI_MS_INTERFACE 1
-#define GMIDI_NUM_INTERFACES 2
-
-DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
-DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
-DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(1);
-
-/* B.1 Device Descriptor */
static struct usb_device_descriptor device_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
- .bcdUSB = cpu_to_le16(0x0200),
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_PER_INTERFACE,
- .idVendor = cpu_to_le16(DRIVER_VENDOR_NUM),
- .idProduct = cpu_to_le16(DRIVER_PRODUCT_NUM),
- .iManufacturer = STRING_MANUFACTURER,
- .iProduct = STRING_PRODUCT,
+ .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_NUM),
+ .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_NUM),
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
.bNumConfigurations = 1,
};
-/* B.2 Configuration Descriptor */
-static struct usb_config_descriptor config_desc = {
- .bLength = USB_DT_CONFIG_SIZE,
- .bDescriptorType = USB_DT_CONFIG,
- /* compute wTotalLength on the fly */
- .bNumInterfaces = GMIDI_NUM_INTERFACES,
- .bConfigurationValue = GMIDI_CONFIG,
- .iConfiguration = STRING_MIDI_GADGET,
- /*
- * FIXME: When embedding this driver in a device,
- * these need to be set to reflect the actual
- * power properties of the device. Is it selfpowered?
- */
- .bmAttributes = USB_CONFIG_ATT_ONE,
- .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
-};
-
-/* B.3.1 Standard AC Interface Descriptor */
-static const struct usb_interface_descriptor ac_interface_desc = {
- .bLength = USB_DT_INTERFACE_SIZE,
- .bDescriptorType = USB_DT_INTERFACE,
- .bInterfaceNumber = GMIDI_AC_INTERFACE,
- .bNumEndpoints = 0,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .iInterface = STRING_MIDI_GADGET,
-};
-
-/* B.3.2 Class-Specific AC Interface Descriptor */
-static const struct uac1_ac_header_descriptor_1 ac_header_desc = {
- .bLength = UAC_DT_AC_HEADER_SIZE(1),
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_HEADER,
- .bcdADC = cpu_to_le16(0x0100),
- .wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)),
- .bInCollection = 1,
- .baInterfaceNr = {
- [0] = GMIDI_MS_INTERFACE,
- }
-};
-
-/* B.4.1 Standard MS Interface Descriptor */
-static const struct usb_interface_descriptor ms_interface_desc = {
- .bLength = USB_DT_INTERFACE_SIZE,
- .bDescriptorType = USB_DT_INTERFACE,
- .bInterfaceNumber = GMIDI_MS_INTERFACE,
- .bNumEndpoints = 2,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
- .iInterface = STRING_MIDI_GADGET,
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = "Grey Innovation",
+ [STRING_PRODUCT_IDX].s = "MIDI Gadget",
+ [STRING_DESCRIPTION_IDX].s = "MIDI",
+ { } /* end of list */
};
-/* B.4.2 Class-Specific MS Interface Descriptor */
-static const struct usb_ms_header_descriptor ms_header_desc = {
- .bLength = USB_DT_MS_HEADER_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_HEADER,
- .bcdMSC = cpu_to_le16(0x0100),
- .wTotalLength = cpu_to_le16(USB_DT_MS_HEADER_SIZE
- + 2*USB_DT_MIDI_IN_SIZE
- + 2*USB_DT_MIDI_OUT_SIZE(1)),
-};
-
-#define JACK_IN_EMB 1
-#define JACK_IN_EXT 2
-#define JACK_OUT_EMB 3
-#define JACK_OUT_EXT 4
-
-/* B.4.3 MIDI IN Jack Descriptors */
-static const struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
- .bLength = USB_DT_MIDI_IN_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
- .bJackType = USB_MS_EMBEDDED,
- .bJackID = JACK_IN_EMB,
-};
-
-static const struct usb_midi_in_jack_descriptor jack_in_ext_desc = {
- .bLength = USB_DT_MIDI_IN_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
- .bJackType = USB_MS_EXTERNAL,
- .bJackID = JACK_IN_EXT,
-};
-
-/* B.4.4 MIDI OUT Jack Descriptors */
-static const struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc = {
- .bLength = USB_DT_MIDI_OUT_SIZE(1),
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
- .bJackType = USB_MS_EMBEDDED,
- .bJackID = JACK_OUT_EMB,
- .bNrInputPins = 1,
- .pins = {
- [0] = {
- .baSourceID = JACK_IN_EXT,
- .baSourcePin = 1,
- }
- }
-};
-
-static const struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc = {
- .bLength = USB_DT_MIDI_OUT_SIZE(1),
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
- .bJackType = USB_MS_EXTERNAL,
- .bJackID = JACK_OUT_EXT,
- .bNrInputPins = 1,
- .pins = {
- [0] = {
- .baSourceID = JACK_IN_EMB,
- .baSourcePin = 1,
- }
- }
-};
-
-/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
-static struct usb_endpoint_descriptor bulk_out_desc = {
- .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-};
-
-/* B.5.2 Class-specific MS Bulk OUT Endpoint Descriptor */
-static const struct usb_ms_endpoint_descriptor_1 ms_out_desc = {
- .bLength = USB_DT_MS_ENDPOINT_SIZE(1),
- .bDescriptorType = USB_DT_CS_ENDPOINT,
- .bDescriptorSubtype = USB_MS_GENERAL,
- .bNumEmbMIDIJack = 1,
- .baAssocJackID = {
- [0] = JACK_IN_EMB,
- }
-};
-
-/* B.6.1 Standard Bulk IN Endpoint Descriptor */
-static struct usb_endpoint_descriptor bulk_in_desc = {
- .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-};
-
-/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
-static const struct usb_ms_endpoint_descriptor_1 ms_in_desc = {
- .bLength = USB_DT_MS_ENDPOINT_SIZE(1),
- .bDescriptorType = USB_DT_CS_ENDPOINT,
- .bDescriptorSubtype = USB_MS_GENERAL,
- .bNumEmbMIDIJack = 1,
- .baAssocJackID = {
- [0] = JACK_OUT_EMB,
- }
-};
-
-static const struct usb_descriptor_header *gmidi_function [] = {
- (struct usb_descriptor_header *)&ac_interface_desc,
- (struct usb_descriptor_header *)&ac_header_desc,
- (struct usb_descriptor_header *)&ms_interface_desc,
-
- (struct usb_descriptor_header *)&ms_header_desc,
- (struct usb_descriptor_header *)&jack_in_emb_desc,
- (struct usb_descriptor_header *)&jack_in_ext_desc,
- (struct usb_descriptor_header *)&jack_out_emb_desc,
- (struct usb_descriptor_header *)&jack_out_ext_desc,
- /* If you add more jacks, update ms_header_desc.wTotalLength */
-
- (struct usb_descriptor_header *)&bulk_out_desc,
- (struct usb_descriptor_header *)&ms_out_desc,
- (struct usb_descriptor_header *)&bulk_in_desc,
- (struct usb_descriptor_header *)&ms_in_desc,
- NULL,
-};
-
-static char manufacturer[50];
-static char product_desc[40] = "MIDI Gadget";
-static char serial_number[20];
-
-/* static strings, in UTF-8 */
-static struct usb_string strings [] = {
- { STRING_MANUFACTURER, manufacturer, },
- { STRING_PRODUCT, product_desc, },
- { STRING_SERIAL, serial_number, },
- { STRING_MIDI_GADGET, longname, },
- { } /* end of list */
-};
-
-static struct usb_gadget_strings stringtab = {
+static struct usb_gadget_strings stringtab_dev = {
.language = 0x0409, /* en-us */
- .strings = strings,
+ .strings = strings_dev,
};
-static int config_buf(struct usb_gadget *gadget,
- u8 *buf, u8 type, unsigned index)
-{
- int len;
-
- /* only one configuration */
- if (index != 0) {
- return -EINVAL;
- }
- len = usb_gadget_config_buf(&config_desc,
- buf, USB_BUFSIZ, gmidi_function);
- if (len < 0) {
- return len;
- }
- ((struct usb_config_descriptor *)buf)->bDescriptorType = type;
- return len;
-}
-
-static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
-{
- struct usb_request *req;
-
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req) {
- req->length = length;
- req->buf = kmalloc(length, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- req = NULL;
- }
- }
- return req;
-}
-
-static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
-{
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
-static const uint8_t gmidi_cin_length[] = {
- 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
};
-/*
- * Receives a chunk of MIDI data.
- */
-static void gmidi_read_data(struct usb_ep *ep, int cable,
- uint8_t *data, int length)
-{
- struct gmidi_device *dev = ep->driver_data;
- /* cable is ignored, because for now we only have one. */
-
- if (!dev->out_substream) {
- /* Nobody is listening - throw it on the floor. */
- return;
- }
- if (!test_bit(dev->out_substream->number, &dev->out_triggered)) {
- return;
- }
- snd_rawmidi_receive(dev->out_substream, data, length);
-}
-
-static void gmidi_handle_out_data(struct usb_ep *ep, struct usb_request *req)
-{
- unsigned i;
- u8 *buf = req->buf;
-
- for (i = 0; i + 3 < req->actual; i += 4) {
- if (buf[i] != 0) {
- int cable = buf[i] >> 4;
- int length = gmidi_cin_length[buf[i] & 0x0f];
- gmidi_read_data(ep, cable, &buf[i + 1], length);
- }
- }
-}
-
-static void gmidi_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct gmidi_device *dev = ep->driver_data;
- int status = req->status;
-
- switch (status) {
- case 0: /* normal completion */
- if (ep == dev->out_ep) {
- /* we received stuff.
- req is queued again, below */
- gmidi_handle_out_data(ep, req);
- } else if (ep == dev->in_ep) {
- /* our transmit completed.
- see if there's more to go.
- gmidi_transmit eats req, don't queue it again. */
- gmidi_transmit(dev, req);
- return;
- }
- break;
-
- /* this endpoint is normally active while we're configured */
- case -ECONNABORTED: /* hardware forced ep reset */
- case -ECONNRESET: /* request dequeued */
- case -ESHUTDOWN: /* disconnect from host */
- VDBG(dev, "%s gone (%d), %d/%d\n", ep->name, status,
- req->actual, req->length);
- if (ep == dev->out_ep) {
- gmidi_handle_out_data(ep, req);
- }
- free_ep_req(ep, req);
- return;
-
- case -EOVERFLOW: /* buffer overrun on read means that
- * we didn't provide a big enough
- * buffer.
- */
- default:
- DBG(dev, "%s complete --> %d, %d/%d\n", ep->name,
- status, req->actual, req->length);
- break;
- case -EREMOTEIO: /* short read */
- break;
- }
-
- status = usb_ep_queue(ep, req, GFP_ATOMIC);
- if (status) {
- ERROR(dev, "kill %s: resubmit %d bytes --> %d\n",
- ep->name, req->length, status);
- usb_ep_set_halt(ep);
- /* FIXME recover later ... somehow */
- }
-}
-
-static int set_gmidi_config(struct gmidi_device *dev, gfp_t gfp_flags)
-{
- int err = 0;
- struct usb_request *req;
- struct usb_ep *ep;
- unsigned i;
-
- dev->in_ep->desc = &bulk_in_desc;
- err = usb_ep_enable(dev->in_ep);
- if (err) {
- ERROR(dev, "can't start %s: %d\n", dev->in_ep->name, err);
- goto fail;
- }
- dev->in_ep->driver_data = dev;
-
- dev->out_ep->desc = &bulk_out_desc;
- err = usb_ep_enable(dev->out_ep);
- if (err) {
- ERROR(dev, "can't start %s: %d\n", dev->out_ep->name, err);
- goto fail;
- }
- dev->out_ep->driver_data = dev;
-
- /* allocate a bunch of read buffers and queue them all at once. */
- ep = dev->out_ep;
- for (i = 0; i < qlen && err == 0; i++) {
- req = alloc_ep_req(ep, buflen);
- if (req) {
- req->complete = gmidi_complete;
- err = usb_ep_queue(ep, req, GFP_ATOMIC);
- if (err) {
- DBG(dev, "%s queue req: %d\n", ep->name, err);
- }
- } else {
- err = -ENOMEM;
- }
- }
-fail:
- /* caller is responsible for cleanup on error */
- return err;
-}
-
-
-static void gmidi_reset_config(struct gmidi_device *dev)
-{
- if (dev->config == 0) {
- return;
- }
-
- DBG(dev, "reset config\n");
-
- /* just disable endpoints, forcing completion of pending i/o.
- * all our completion handlers free their requests in this case.
- */
- usb_ep_disable(dev->in_ep);
- usb_ep_disable(dev->out_ep);
- dev->config = 0;
-}
-
-/* change our operational config. this code must agree with the code
- * that returns config descriptors, and altsetting code.
- *
- * it's also responsible for power management interactions. some
- * configurations might not work with our current power sources.
- *
- * note that some device controller hardware will constrain what this
- * code can do, perhaps by disallowing more than one configuration or
- * by limiting configuration choices (like the pxa2xx).
- */
-static int
-gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags)
-{
- int result = 0;
- struct usb_gadget *gadget = dev->gadget;
-
-#if 0
- /* FIXME */
- /* Hacking this bit out fixes a bug where on receipt of two
- USB_REQ_SET_CONFIGURATION messages, we end up with no
- buffered OUT requests waiting for data. This is clearly
- hiding a bug elsewhere, because if the config didn't
- change then we really shouldn't do anything. */
- /* Having said that, when we do "change" from config 1
- to config 1, we at least gmidi_reset_config() which
- clears out any requests on endpoints, so it's not like
- we leak or anything. */
- if (number == dev->config) {
- return 0;
- }
-#endif
-
- gmidi_reset_config(dev);
-
- switch (number) {
- case GMIDI_CONFIG:
- result = set_gmidi_config(dev, gfp_flags);
- break;
- default:
- result = -EINVAL;
- /* FALL THROUGH */
- case 0:
- return result;
- }
-
- if (!result && (!dev->in_ep || !dev->out_ep)) {
- result = -ENODEV;
- }
- if (result) {
- gmidi_reset_config(dev);
- } else {
- char *speed;
-
- switch (gadget->speed) {
- case USB_SPEED_LOW: speed = "low"; break;
- case USB_SPEED_FULL: speed = "full"; break;
- case USB_SPEED_HIGH: speed = "high"; break;
- default: speed = "?"; break;
- }
-
- dev->config = number;
- INFO(dev, "%s speed\n", speed);
- }
- return result;
-}
-
-
-static void gmidi_setup_complete(struct usb_ep *ep, struct usb_request *req)
-{
- if (req->status || req->actual != req->length) {
- DBG((struct gmidi_device *) ep->driver_data,
- "setup complete --> %d, %d/%d\n",
- req->status, req->actual, req->length);
- }
-}
-
-/*
- * The setup() callback implements all the ep0 functionality that's
- * not handled lower down, in hardware or the hardware driver (like
- * device and endpoint feature flags, and their status). It's all
- * housekeeping for the gadget function we're implementing. Most of
- * the work is in config-specific setup.
- */
-static int gmidi_setup(struct usb_gadget *gadget,
- const struct usb_ctrlrequest *ctrl)
-{
- struct gmidi_device *dev = get_gadget_data(gadget);
- struct usb_request *req = dev->req;
- int value = -EOPNOTSUPP;
- u16 w_index = le16_to_cpu(ctrl->wIndex);
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u16 w_length = le16_to_cpu(ctrl->wLength);
-
- /* usually this stores reply data in the pre-allocated ep0 buffer,
- * but config change events will reconfigure hardware.
- */
- req->zero = 0;
- switch (ctrl->bRequest) {
-
- case USB_REQ_GET_DESCRIPTOR:
- if (ctrl->bRequestType != USB_DIR_IN) {
- goto unknown;
- }
- switch (w_value >> 8) {
-
- case USB_DT_DEVICE:
- device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
- value = min(w_length, (u16) sizeof(device_desc));
- memcpy(req->buf, &device_desc, value);
- break;
- case USB_DT_CONFIG:
- value = config_buf(gadget, req->buf,
- w_value >> 8,
- w_value & 0xff);
- if (value >= 0) {
- value = min(w_length, (u16)value);
- }
- break;
-
- case USB_DT_STRING:
- /* wIndex == language code.
- * this driver only handles one language, you can
- * add string tables for other languages, using
- * any UTF-8 characters
- */
- value = usb_gadget_get_string(&stringtab,
- w_value & 0xff, req->buf);
- if (value >= 0) {
- value = min(w_length, (u16)value);
- }
- break;
- }
- break;
-
- /* currently two configs, two speeds */
- case USB_REQ_SET_CONFIGURATION:
- if (ctrl->bRequestType != 0) {
- goto unknown;
- }
- if (gadget->a_hnp_support) {
- DBG(dev, "HNP available\n");
- } else if (gadget->a_alt_hnp_support) {
- DBG(dev, "HNP needs a different root port\n");
- } else {
- VDBG(dev, "HNP inactive\n");
- }
- spin_lock(&dev->lock);
- value = gmidi_set_config(dev, w_value, GFP_ATOMIC);
- spin_unlock(&dev->lock);
- break;
- case USB_REQ_GET_CONFIGURATION:
- if (ctrl->bRequestType != USB_DIR_IN) {
- goto unknown;
- }
- *(u8 *)req->buf = dev->config;
- value = min(w_length, (u16)1);
- break;
-
- /* until we add altsetting support, or other interfaces,
- * only 0/0 are possible. pxa2xx only supports 0/0 (poorly)
- * and already killed pending endpoint I/O.
- */
- case USB_REQ_SET_INTERFACE:
- if (ctrl->bRequestType != USB_RECIP_INTERFACE) {
- goto unknown;
- }
- spin_lock(&dev->lock);
- if (dev->config && w_index < GMIDI_NUM_INTERFACES
- && w_value == 0)
- {
- u8 config = dev->config;
-
- /* resets interface configuration, forgets about
- * previous transaction state (queued bufs, etc)
- * and re-inits endpoint state (toggle etc)
- * no response queued, just zero status == success.
- * if we had more than one interface we couldn't
- * use this "reset the config" shortcut.
- */
- gmidi_reset_config(dev);
- gmidi_set_config(dev, config, GFP_ATOMIC);
- value = 0;
- }
- spin_unlock(&dev->lock);
- break;
- case USB_REQ_GET_INTERFACE:
- if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) {
- goto unknown;
- }
- if (!dev->config) {
- break;
- }
- if (w_index >= GMIDI_NUM_INTERFACES) {
- value = -EDOM;
- break;
- }
- *(u8 *)req->buf = 0;
- value = min(w_length, (u16)1);
- break;
-
- default:
-unknown:
- VDBG(dev, "unknown control req%02x.%02x v%04x i%04x l%d\n",
- ctrl->bRequestType, ctrl->bRequest,
- w_value, w_index, w_length);
- }
-
- /* respond with data transfer before status phase? */
- if (value >= 0) {
- req->length = value;
- req->zero = value < w_length;
- value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
- if (value < 0) {
- DBG(dev, "ep_queue --> %d\n", value);
- req->status = 0;
- gmidi_setup_complete(gadget->ep0, req);
- }
- }
-
- /* device either stalls (value < 0) or reports success */
- return value;
-}
-
-static void gmidi_disconnect(struct usb_gadget *gadget)
-{
- struct gmidi_device *dev = get_gadget_data(gadget);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->lock, flags);
- gmidi_reset_config(dev);
-
- /* a more significant application might have some non-usb
- * activities to quiesce here, saving resources like power
- * or pushing the notification up a network stack.
- */
- spin_unlock_irqrestore(&dev->lock, flags);
-
- /* next we may get setup() calls to enumerate new connections;
- * or an unbind() during shutdown (including removing module).
- */
-}
-
-static void /* __init_or_exit */ gmidi_unbind(struct usb_gadget *gadget)
-{
- struct gmidi_device *dev = get_gadget_data(gadget);
- struct snd_card *card;
-
- DBG(dev, "unbind\n");
-
- card = dev->card;
- dev->card = NULL;
- if (card) {
- snd_card_free(card);
- }
-
- /* we've already been disconnected ... no i/o is active */
- if (dev->req) {
- dev->req->length = USB_BUFSIZ;
- free_ep_req(gadget->ep0, dev->req);
- }
- kfree(dev);
- set_gadget_data(gadget, NULL);
-}
-
-static int gmidi_snd_free(struct snd_device *device)
+static int __exit midi_unbind(struct usb_composite_dev *dev)
{
return 0;
}
-static void gmidi_transmit_packet(struct usb_request *req, uint8_t p0,
- uint8_t p1, uint8_t p2, uint8_t p3)
-{
- unsigned length = req->length;
- u8 *buf = (u8 *)req->buf + length;
-
- buf[0] = p0;
- buf[1] = p1;
- buf[2] = p2;
- buf[3] = p3;
- req->length = length + 4;
-}
-
-/*
- * Converts MIDI commands to USB MIDI packets.
- */
-static void gmidi_transmit_byte(struct usb_request *req,
- struct gmidi_in_port *port, uint8_t b)
-{
- uint8_t p0 = port->cable;
-
- if (b >= 0xf8) {
- gmidi_transmit_packet(req, p0 | 0x0f, b, 0, 0);
- } else if (b >= 0xf0) {
- switch (b) {
- case 0xf0:
- port->data[0] = b;
- port->state = STATE_SYSEX_1;
- break;
- case 0xf1:
- case 0xf3:
- port->data[0] = b;
- port->state = STATE_1PARAM;
- break;
- case 0xf2:
- port->data[0] = b;
- port->state = STATE_2PARAM_1;
- break;
- case 0xf4:
- case 0xf5:
- port->state = STATE_UNKNOWN;
- break;
- case 0xf6:
- gmidi_transmit_packet(req, p0 | 0x05, 0xf6, 0, 0);
- port->state = STATE_UNKNOWN;
- break;
- case 0xf7:
- switch (port->state) {
- case STATE_SYSEX_0:
- gmidi_transmit_packet(req,
- p0 | 0x05, 0xf7, 0, 0);
- break;
- case STATE_SYSEX_1:
- gmidi_transmit_packet(req,
- p0 | 0x06, port->data[0], 0xf7, 0);
- break;
- case STATE_SYSEX_2:
- gmidi_transmit_packet(req,
- p0 | 0x07, port->data[0],
- port->data[1], 0xf7);
- break;
- }
- port->state = STATE_UNKNOWN;
- break;
- }
- } else if (b >= 0x80) {
- port->data[0] = b;
- if (b >= 0xc0 && b <= 0xdf)
- port->state = STATE_1PARAM;
- else
- port->state = STATE_2PARAM_1;
- } else { /* b < 0x80 */
- switch (port->state) {
- case STATE_1PARAM:
- if (port->data[0] < 0xf0) {
- p0 |= port->data[0] >> 4;
- } else {
- p0 |= 0x02;
- port->state = STATE_UNKNOWN;
- }
- gmidi_transmit_packet(req, p0, port->data[0], b, 0);
- break;
- case STATE_2PARAM_1:
- port->data[1] = b;
- port->state = STATE_2PARAM_2;
- break;
- case STATE_2PARAM_2:
- if (port->data[0] < 0xf0) {
- p0 |= port->data[0] >> 4;
- port->state = STATE_2PARAM_1;
- } else {
- p0 |= 0x03;
- port->state = STATE_UNKNOWN;
- }
- gmidi_transmit_packet(req,
- p0, port->data[0], port->data[1], b);
- break;
- case STATE_SYSEX_0:
- port->data[0] = b;
- port->state = STATE_SYSEX_1;
- break;
- case STATE_SYSEX_1:
- port->data[1] = b;
- port->state = STATE_SYSEX_2;
- break;
- case STATE_SYSEX_2:
- gmidi_transmit_packet(req,
- p0 | 0x04, port->data[0], port->data[1], b);
- port->state = STATE_SYSEX_0;
- break;
- }
- }
-}
-
-static void gmidi_transmit(struct gmidi_device *dev, struct usb_request *req)
-{
- struct usb_ep *ep = dev->in_ep;
- struct gmidi_in_port *port = &dev->in_port;
-
- if (!ep) {
- return;
- }
- if (!req) {
- req = alloc_ep_req(ep, buflen);
- }
- if (!req) {
- ERROR(dev, "gmidi_transmit: alloc_ep_request failed\n");
- return;
- }
- req->length = 0;
- req->complete = gmidi_complete;
-
- if (port->active) {
- while (req->length + 3 < buflen) {
- uint8_t b;
- if (snd_rawmidi_transmit(dev->in_substream, &b, 1)
- != 1)
- {
- port->active = 0;
- break;
- }
- gmidi_transmit_byte(req, port, b);
- }
- }
- if (req->length > 0) {
- usb_ep_queue(ep, req, GFP_ATOMIC);
- } else {
- free_ep_req(ep, req);
- }
-}
-
-static void gmidi_in_tasklet(unsigned long data)
-{
- struct gmidi_device *dev = (struct gmidi_device *)data;
-
- gmidi_transmit(dev, NULL);
-}
-
-static int gmidi_in_open(struct snd_rawmidi_substream *substream)
-{
- struct gmidi_device *dev = substream->rmidi->private_data;
-
- VDBG(dev, "gmidi_in_open\n");
- dev->in_substream = substream;
- dev->in_port.state = STATE_UNKNOWN;
- return 0;
-}
-
-static int gmidi_in_close(struct snd_rawmidi_substream *substream)
-{
- struct gmidi_device *dev = substream->rmidi->private_data;
-
- VDBG(dev, "gmidi_in_close\n");
- return 0;
-}
-
-static void gmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
-{
- struct gmidi_device *dev = substream->rmidi->private_data;
-
- VDBG(dev, "gmidi_in_trigger %d\n", up);
- dev->in_port.active = up;
- if (up) {
- tasklet_hi_schedule(&dev->tasklet);
- }
-}
-
-static int gmidi_out_open(struct snd_rawmidi_substream *substream)
-{
- struct gmidi_device *dev = substream->rmidi->private_data;
-
- VDBG(dev, "gmidi_out_open\n");
- dev->out_substream = substream;
- return 0;
-}
-
-static int gmidi_out_close(struct snd_rawmidi_substream *substream)
-{
- struct gmidi_device *dev = substream->rmidi->private_data;
-
- VDBG(dev, "gmidi_out_close\n");
- return 0;
-}
-
-static void gmidi_out_trigger(struct snd_rawmidi_substream *substream, int up)
-{
- struct gmidi_device *dev = substream->rmidi->private_data;
-
- VDBG(dev, "gmidi_out_trigger %d\n", up);
- if (up) {
- set_bit(substream->number, &dev->out_triggered);
- } else {
- clear_bit(substream->number, &dev->out_triggered);
- }
-}
-
-static struct snd_rawmidi_ops gmidi_in_ops = {
- .open = gmidi_in_open,
- .close = gmidi_in_close,
- .trigger = gmidi_in_trigger,
+static struct usb_configuration midi_config = {
+ .label = "MIDI Gadget",
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE,
+ .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
};
-static struct snd_rawmidi_ops gmidi_out_ops = {
- .open = gmidi_out_open,
- .close = gmidi_out_close,
- .trigger = gmidi_out_trigger
-};
-
-/* register as a sound "card" */
-static int gmidi_register_card(struct gmidi_device *dev)
+static int __init midi_bind_config(struct usb_configuration *c)
{
- struct snd_card *card;
- struct snd_rawmidi *rmidi;
- int err;
- int out_ports = 1;
- int in_ports = 1;
- static struct snd_device_ops ops = {
- .dev_free = gmidi_snd_free,
- };
-
- err = snd_card_create(index, id, THIS_MODULE, 0, &card);
- if (err < 0) {
- ERROR(dev, "snd_card_create failed\n");
- goto fail;
- }
- dev->card = card;
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, dev, &ops);
- if (err < 0) {
- ERROR(dev, "snd_device_new failed: error %d\n", err);
- goto fail;
- }
-
- strcpy(card->driver, longname);
- strcpy(card->longname, longname);
- strcpy(card->shortname, shortname);
-
- /* Set up rawmidi */
- dev->in_port.dev = dev;
- dev->in_port.active = 0;
- snd_component_add(card, "MIDI");
- err = snd_rawmidi_new(card, "USB MIDI Gadget", 0,
- out_ports, in_ports, &rmidi);
- if (err < 0) {
- ERROR(dev, "snd_rawmidi_new failed: error %d\n", err);
- goto fail;
- }
- dev->rmidi = rmidi;
- strcpy(rmidi->name, card->shortname);
- rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
- SNDRV_RAWMIDI_INFO_INPUT |
- SNDRV_RAWMIDI_INFO_DUPLEX;
- rmidi->private_data = dev;
-
- /* Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT.
- It's an upside-down world being a gadget. */
- snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
- snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
-
- snd_card_set_dev(card, &dev->gadget->dev);
-
- /* register it - we're ready to go */
- err = snd_card_register(card);
- if (err < 0) {
- ERROR(dev, "snd_card_register failed\n");
- goto fail;
- }
-
- VDBG(dev, "gmidi_register_card finished ok\n");
- return 0;
-
-fail:
- if (dev->card) {
- snd_card_free(dev->card);
- dev->card = NULL;
- }
- return err;
+ return f_midi_bind_config(c, index, id,
+ in_ports, out_ports,
+ buflen, qlen);
}
-/*
- * Creates an output endpoint, and initializes output ports.
- */
-static int __init gmidi_bind(struct usb_gadget *gadget)
+static int __init midi_bind(struct usb_composite_dev *cdev)
{
- struct gmidi_device *dev;
- struct usb_ep *in_ep, *out_ep;
- int gcnum, err = 0;
+ struct usb_gadget *gadget = cdev->gadget;
+ int gcnum, status;
- /* support optional vendor/distro customization */
- if (idVendor) {
- if (!idProduct) {
- pr_err("idVendor needs idProduct!\n");
- return -ENODEV;
- }
- device_desc.idVendor = cpu_to_le16(idVendor);
- device_desc.idProduct = cpu_to_le16(idProduct);
- if (bcdDevice) {
- device_desc.bcdDevice = cpu_to_le16(bcdDevice);
- }
- }
- if (iManufacturer) {
- strlcpy(manufacturer, iManufacturer, sizeof(manufacturer));
- } else {
- snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
- init_utsname()->sysname, init_utsname()->release,
- gadget->name);
- }
- if (iProduct) {
- strlcpy(product_desc, iProduct, sizeof(product_desc));
- }
- if (iSerialNumber) {
- device_desc.iSerialNumber = STRING_SERIAL,
- strlcpy(serial_number, iSerialNumber, sizeof(serial_number));
- }
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
- /* Bulk-only drivers like this one SHOULD be able to
- * autoconfigure on any sane usb controller driver,
- * but there may also be important quirks to address.
- */
- usb_ep_autoconfig_reset(gadget);
- in_ep = usb_ep_autoconfig(gadget, &bulk_in_desc);
- if (!in_ep) {
-autoconf_fail:
- pr_err("%s: can't autoconfigure on %s\n",
- shortname, gadget->name);
- return -ENODEV;
- }
- EP_IN_NAME = in_ep->name;
- in_ep->driver_data = in_ep; /* claim */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
- out_ep = usb_ep_autoconfig(gadget, &bulk_out_desc);
- if (!out_ep) {
- goto autoconf_fail;
- }
- EP_OUT_NAME = out_ep->name;
- out_ep->driver_data = out_ep; /* claim */
+ /* config description */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+ midi_config.iConfiguration = status;
gcnum = usb_gadget_controller_number(gadget);
- if (gcnum >= 0) {
- device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
- } else {
+ if (gcnum < 0) {
/* gmidi is so simple (no altsettings) that
* it SHOULD NOT have problems with bulk-capable hardware.
* so warn about unrecognized controllers, don't panic.
*/
pr_warning("%s: controller '%s' not recognized\n",
- shortname, gadget->name);
+ __func__, gadget->name);
device_desc.bcdDevice = cpu_to_le16(0x9999);
+ } else {
+ device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
}
+ status = usb_add_config(cdev, &midi_config, midi_bind_config);
+ if (status < 0)
+ return status;
- /* ok, we made sense of the hardware ... */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- return -ENOMEM;
- }
- spin_lock_init(&dev->lock);
- dev->gadget = gadget;
- dev->in_ep = in_ep;
- dev->out_ep = out_ep;
- set_gadget_data(gadget, dev);
- tasklet_init(&dev->tasklet, gmidi_in_tasklet, (unsigned long)dev);
-
- /* preallocate control response and buffer */
- dev->req = alloc_ep_req(gadget->ep0, USB_BUFSIZ);
- if (!dev->req) {
- err = -ENOMEM;
- goto fail;
- }
-
- dev->req->complete = gmidi_setup_complete;
-
- gadget->ep0->driver_data = dev;
-
- INFO(dev, "%s, version: " DRIVER_VERSION "\n", longname);
- INFO(dev, "using %s, OUT %s IN %s\n", gadget->name,
- EP_OUT_NAME, EP_IN_NAME);
-
- /* register as an ALSA sound card */
- err = gmidi_register_card(dev);
- if (err < 0) {
- goto fail;
- }
-
- VDBG(dev, "gmidi_bind finished ok\n");
+ pr_info("%s\n", longname);
return 0;
-
-fail:
- gmidi_unbind(gadget);
- return err;
-}
-
-
-static void gmidi_suspend(struct usb_gadget *gadget)
-{
- struct gmidi_device *dev = get_gadget_data(gadget);
-
- if (gadget->speed == USB_SPEED_UNKNOWN) {
- return;
- }
-
- DBG(dev, "suspend\n");
-}
-
-static void gmidi_resume(struct usb_gadget *gadget)
-{
- struct gmidi_device *dev = get_gadget_data(gadget);
-
- DBG(dev, "resume\n");
}
-
-static struct usb_gadget_driver gmidi_driver = {
- .speed = USB_SPEED_FULL,
- .function = (char *)longname,
- .unbind = gmidi_unbind,
-
- .setup = gmidi_setup,
- .disconnect = gmidi_disconnect,
-
- .suspend = gmidi_suspend,
- .resume = gmidi_resume,
-
- .driver = {
- .name = (char *)shortname,
- .owner = THIS_MODULE,
- },
+static struct usb_composite_driver midi_driver = {
+ .name = (char *) longname,
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
+ .unbind = __exit_p(midi_unbind),
};
-static int __init gmidi_init(void)
+static int __init midi_init(void)
{
- return usb_gadget_probe_driver(&gmidi_driver, gmidi_bind);
+ return usb_composite_probe(&midi_driver, midi_bind);
}
-module_init(gmidi_init);
+module_init(midi_init);
-static void __exit gmidi_cleanup(void)
+static void __exit midi_cleanup(void)
{
- usb_gadget_unregister_driver(&gmidi_driver);
+ usb_composite_unregister(&midi_driver);
}
-module_exit(gmidi_cleanup);
+module_exit(midi_cleanup);
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
index 9fb575034a0..f888c3ede86 100644
--- a/drivers/usb/gadget/hid.c
+++ b/drivers/usb/gadget/hid.c
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 692fd9b2248..2d978c0e7ce 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -689,7 +689,7 @@ static int imx_ep_enable(struct usb_ep *usb_ep,
return -EINVAL;
}
- if (imx_ep->fifosize < le16_to_cpu(desc->wMaxPacketSize)) {
+ if (imx_ep->fifosize < usb_endpoint_maxp(desc)) {
D_ERR(imx_usb->dev,
"<%s> bad %s maxpacket\n", __func__, usb_ep->name);
return -ERANGE;
@@ -1478,7 +1478,7 @@ static int __init imx_udc_probe(struct platform_device *pdev)
for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
ret = request_irq(imx_usb->usbd_int[i], intr_handler(i),
- IRQF_DISABLED, driver_name, imx_usb);
+ 0, driver_name, imx_usb);
if (ret) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
imx_usb->usbd_int[i], ret);
diff --git a/drivers/usb/gadget/imx_udc.h b/drivers/usb/gadget/imx_udc.h
index 7136c242b4e..d118fb77784 100644
--- a/drivers/usb/gadget/imx_udc.h
+++ b/drivers/usb/gadget/imx_udc.h
@@ -8,11 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_USB_GADGET_IMX_H
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 1b240990448..a392ec0d2d5 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index a06e2c27b43..c9fa3bf5b37 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -5,16 +5,6 @@
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
@@ -60,9 +50,6 @@ static const char driver_name[] = "langwell_udc";
static const char driver_desc[] = DRIVER_DESC;
-/* controller device global variable */
-static struct langwell_udc *the_controller;
-
/* for endpoint 0 operations */
static const struct usb_endpoint_descriptor
langwell_ep0_desc = {
@@ -283,7 +270,7 @@ static int langwell_ep_enable(struct usb_ep *_ep,
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = le16_to_cpu(desc->wMaxPacketSize);
+ max = usb_endpoint_maxp(desc);
/*
* disable HW zero length termination select
@@ -1321,9 +1308,12 @@ static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
return 0;
}
-static int langwell_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *));
-static int langwell_stop(struct usb_gadget_driver *driver);
+static int langwell_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+
+static int langwell_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops langwell_ops = {
@@ -1345,8 +1335,8 @@ static const struct usb_gadget_ops langwell_ops = {
/* D+ pullup, software-controlled connect/disconnect to USB host */
.pullup = langwell_pullup,
- .start = langwell_start,
- .stop = langwell_stop,
+ .udc_start = langwell_start,
+ .udc_stop = langwell_stop,
};
@@ -1561,7 +1551,7 @@ static void stop_activity(struct langwell_udc *dev,
static ssize_t show_function(struct device *_dev,
struct device_attribute *attr, char *buf)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = dev_get_drvdata(_dev);
if (!dev->driver || !dev->driver->function
|| strlen(dev->driver->function) > PAGE_SIZE)
@@ -1572,11 +1562,25 @@ static ssize_t show_function(struct device *_dev,
static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+static inline enum usb_device_speed lpm_device_speed(u32 reg)
+{
+ switch (LPM_PSPD(reg)) {
+ case LPM_SPEED_HIGH:
+ return USB_SPEED_HIGH;
+ case LPM_SPEED_FULL:
+ return USB_SPEED_FULL;
+ case LPM_SPEED_LOW:
+ return USB_SPEED_LOW;
+ default:
+ return USB_SPEED_UNKNOWN;
+ }
+}
+
/* device "langwell_udc" sysfs attribute file */
static ssize_t show_langwell_udc(struct device *_dev,
struct device_attribute *attr, char *buf)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = dev_get_drvdata(_dev);
struct langwell_request *req;
struct langwell_ep *ep = NULL;
char *next;
@@ -1700,20 +1704,7 @@ static ssize_t show_langwell_udc(struct device *_dev,
"BmAttributes: %d\n\n",
LPM_PTS(tmp_reg),
(tmp_reg & LPM_STS) ? 1 : 0,
- ({
- char *s;
- switch (LPM_PSPD(tmp_reg)) {
- case LPM_SPEED_FULL:
- s = "Full Speed"; break;
- case LPM_SPEED_LOW:
- s = "Low Speed"; break;
- case LPM_SPEED_HIGH:
- s = "High Speed"; break;
- default:
- s = "Unknown Speed"; break;
- }
- s;
- }),
+ usb_speed_string(lpm_device_speed(tmp_reg)),
(tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
(tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
LPM_BA(tmp_reg));
@@ -1821,7 +1812,7 @@ static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
static ssize_t store_remote_wakeup(struct device *_dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = dev_get_drvdata(_dev);
unsigned long flags;
ssize_t rc = count;
@@ -1857,21 +1848,15 @@ static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
* the driver might get unbound.
*/
-static int langwell_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int langwell_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = gadget_to_langwell(g);
unsigned long flags;
int retval;
- if (!dev)
- return -ENODEV;
-
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
- if (dev->driver)
- return -EBUSY;
-
spin_lock_irqsave(&dev->lock, flags);
/* hook up the driver ... */
@@ -1881,18 +1866,9 @@ static int langwell_start(struct usb_gadget_driver *driver,
spin_unlock_irqrestore(&dev->lock, flags);
- retval = bind(&dev->gadget);
- if (retval) {
- dev_dbg(&dev->pdev->dev, "bind to driver %s --> %d\n",
- driver->driver.name, retval);
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return retval;
- }
-
retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
if (retval)
- goto err_unbind;
+ goto err;
dev->usb_state = USB_STATE_ATTACHED;
dev->ep0_state = WAIT_FOR_SETUP;
@@ -1909,31 +1885,27 @@ static int langwell_start(struct usb_gadget_driver *driver,
dev_info(&dev->pdev->dev, "register driver: %s\n",
driver->driver.name);
dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
return 0;
-err_unbind:
- driver->unbind(&dev->gadget);
+err:
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
return retval;
}
/* unregister gadget driver */
-static int langwell_stop(struct usb_gadget_driver *driver)
+static int langwell_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = gadget_to_langwell(g);
unsigned long flags;
- if (!dev)
- return -ENODEV;
-
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
- if (unlikely(!driver || !driver->unbind))
- return -EINVAL;
-
/* exit PHY low power suspend */
if (dev->pdev->device != 0x0829)
langwell_phy_low_power(dev, 0);
@@ -1956,8 +1928,6 @@ static int langwell_stop(struct usb_gadget_driver *driver)
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
- /* unbind gadget driver */
- driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -1966,6 +1936,7 @@ static int langwell_stop(struct usb_gadget_driver *driver)
dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
driver->driver.name);
dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
return 0;
}
@@ -2657,12 +2628,10 @@ done:
dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
}
-
/* port change detect interrupt handler */
static void handle_port_change(struct langwell_udc *dev)
{
u32 portsc1, devlc;
- u32 speed;
dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
@@ -2677,24 +2646,9 @@ static void handle_port_change(struct langwell_udc *dev)
/* bus reset is finished */
if (!(portsc1 & PORTS_PR)) {
/* get the speed */
- speed = LPM_PSPD(devlc);
- switch (speed) {
- case LPM_SPEED_HIGH:
- dev->gadget.speed = USB_SPEED_HIGH;
- break;
- case LPM_SPEED_FULL:
- dev->gadget.speed = USB_SPEED_FULL;
- break;
- case LPM_SPEED_LOW:
- dev->gadget.speed = USB_SPEED_LOW;
- break;
- default:
- dev->gadget.speed = USB_SPEED_UNKNOWN;
- break;
- }
- dev_vdbg(&dev->pdev->dev,
- "speed = %d, dev->gadget.speed = %d\n",
- speed, dev->gadget.speed);
+ dev->gadget.speed = lpm_device_speed(devlc);
+ dev_vdbg(&dev->pdev->dev, "dev->gadget.speed = %d\n",
+ dev->gadget.speed);
}
/* LPM L0 to L1 */
@@ -2969,7 +2923,7 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
handle_port_change(dev);
}
- /* suspend interrrupt */
+ /* suspend interrupt */
if (irq_sts & STS_SLI) {
dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
handle_bus_suspend(dev);
@@ -2999,7 +2953,7 @@ static irqreturn_t langwell_irq(int irq, void *_dev)
/* release device structure */
static void gadget_release(struct device *_dev)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = dev_get_drvdata(_dev);
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
@@ -3057,7 +3011,7 @@ static void sram_deinit(struct langwell_udc *dev)
/* tear down the binding between this driver and the pci device */
static void langwell_udc_remove(struct pci_dev *pdev)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = pci_get_drvdata(pdev);
DECLARE_COMPLETION(done);
@@ -3124,8 +3078,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
/* free dev, wait for the release() finished */
wait_for_completion(&done);
-
- the_controller = NULL;
}
@@ -3144,11 +3096,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
size_t size;
int retval;
- if (the_controller) {
- dev_warn(&pdev->dev, "ignoring\n");
- return -EBUSY;
- }
-
/* alloc, and start init */
dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (dev == NULL) {
@@ -3368,8 +3315,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
"After langwell_udc_probe(), print all registers:\n");
print_all_registers(dev);
- the_controller = dev;
-
retval = device_register(&dev->gadget.dev);
if (retval)
goto error;
@@ -3404,7 +3349,7 @@ error:
/* device controller suspend */
static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = pci_get_drvdata(pdev);
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
@@ -3452,7 +3397,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
/* device controller resume */
static int langwell_udc_resume(struct pci_dev *pdev)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = pci_get_drvdata(pdev);
size_t size;
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
@@ -3534,7 +3479,7 @@ static int langwell_udc_resume(struct pci_dev *pdev)
/* pci driver shutdown */
static void langwell_udc_shutdown(struct pci_dev *pdev)
{
- struct langwell_udc *dev = the_controller;
+ struct langwell_udc *dev = pci_get_drvdata(pdev);
u32 usbmode;
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
index f1d9c1bb04f..ef79e242b7b 100644
--- a/drivers/usb/gadget/langwell_udc.h
+++ b/drivers/usb/gadget/langwell_udc.h
@@ -5,16 +5,6 @@
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#include <linux/usb/langwell_udc.h>
@@ -231,3 +221,5 @@ struct langwell_udc {
u16 dev_status;
};
+#define gadget_to_langwell(g) container_of((g), struct langwell_udc, gadget)
+
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 491f825ed5c..91d0af2a24a 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
@@ -370,7 +360,7 @@ static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
ep->pipectr = get_pipectr_addr(pipenum);
ep->pipenum = pipenum;
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
m66592->pipenum2ep[pipenum] = ep;
m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep;
INIT_LIST_HEAD(&ep->queue);
@@ -447,7 +437,7 @@ static int alloc_pipe_config(struct m66592_ep *ep,
ep->type = info.type;
info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
- info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ info.maxpacket = usb_endpoint_maxp(desc);
info.interval = desc->bInterval;
if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
info.dir_in = 1;
@@ -1674,7 +1664,7 @@ static int __init m66592_probe(struct platform_device *pdev)
m66592->timer.data = (unsigned long)m66592;
m66592->reg = reg;
- ret = request_irq(ires->start, m66592_irq, IRQF_DISABLED | IRQF_SHARED,
+ ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
udc_name, m66592);
if (ret < 0) {
pr_err("request_irq error (%d)\n", ret);
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
index 7b93d579af3..9d9f7e39f03 100644
--- a/drivers/usb/gadget/m66592-udc.h
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __M66592_UDC_H__
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index d3eb27427c5..e24f72f82a4 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -10,15 +10,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
@@ -169,7 +160,7 @@ static struct usb_composite_driver msg_driver = {
.name = "g_mass_storage",
.dev = &msg_device_desc,
.iProduct = DRIVER_DESC,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.needs_serial = 1,
};
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 8c7b74717d8..7e7f515b8b1 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -10,15 +10,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/usb/gadget/mv_udc.h b/drivers/usb/gadget/mv_udc.h
index 65f1f7c3bd4..daa75c12f33 100644
--- a/drivers/usb/gadget/mv_udc.h
+++ b/drivers/usb/gadget/mv_udc.h
@@ -1,3 +1,11 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
#ifndef __MV_UDC_H
#define __MV_UDC_H
@@ -194,14 +202,25 @@ struct mv_udc {
unsigned int ep0_dir;
unsigned int dev_addr;
+ unsigned int test_mode;
int errors;
unsigned softconnect:1,
vbus_active:1,
remote_wakeup:1,
softconnected:1,
- force_fs:1;
- struct clk *clk;
+ force_fs:1,
+ clock_gating:1,
+ active:1;
+
+ struct work_struct vbus_work;
+ struct workqueue_struct *qwork;
+
+ struct mv_usb_platform_data *pdata;
+
+ /* some SOC has mutiple clock sources for USB*/
+ unsigned int clknum;
+ struct clk *clk[0];
};
/* endpoint data structure */
@@ -225,6 +244,7 @@ struct mv_req {
struct mv_dtd *dtd, *head, *tail;
struct mv_ep *ep;
struct list_head queue;
+ unsigned int test_mode;
unsigned dtd_count;
unsigned mapped:1;
};
@@ -289,6 +309,4 @@ struct mv_dtd {
struct mv_dtd *next_dtd_virt;
};
-extern int mv_udc_phy_init(unsigned int base);
-
#endif
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index ce1ac2bcb31..892412103dd 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -1,3 +1,14 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -22,6 +33,7 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/platform_data/mv_usb.h>
#include <asm/system.h>
#include <asm/unaligned.h>
@@ -45,6 +57,8 @@
#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
+static DECLARE_COMPLETION(release_done);
+
static const char driver_name[] = "mv_udc";
static const char driver_desc[] = DRIVER_DESC;
@@ -53,6 +67,7 @@ static struct mv_udc *the_controller;
int mv_usb_otgsc;
static void nuke(struct mv_ep *ep, int status);
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
/* for endpoint 0 operations */
static const struct usb_endpoint_descriptor mv_ep0_desc = {
@@ -82,14 +97,16 @@ static void ep0_reset(struct mv_udc *udc)
(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
| EP_QUEUE_HEAD_IOS;
+ ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
+
epctrlx = readl(&udc->op_regs->epctrlx[0]);
if (i) { /* TX */
- epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
+ epctrlx |= EPCTRL_TX_ENABLE
| (USB_ENDPOINT_XFER_CONTROL
<< EPCTRL_TX_EP_TYPE_SHIFT);
} else { /* RX */
- epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
+ epctrlx |= EPCTRL_RX_ENABLE
| (USB_ENDPOINT_XFER_CONTROL
<< EPCTRL_RX_EP_TYPE_SHIFT);
}
@@ -122,6 +139,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
int i, direction;
int retval = 0;
u32 errors;
+ u32 bit_pos;
curr_dqh = &udc->ep_dqh[index];
direction = index % 2;
@@ -139,10 +157,20 @@ static int process_ep_req(struct mv_udc *udc, int index,
errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
if (!errors) {
- remaining_length +=
+ remaining_length =
(curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
actual -= remaining_length;
+
+ if (remaining_length) {
+ if (direction) {
+ dev_dbg(&udc->dev->dev,
+ "TX dTD remains data\n");
+ retval = -EPROTO;
+ break;
+ } else
+ break;
+ }
} else {
dev_info(&udc->dev->dev,
"complete_tr error: ep=%d %s: error = 0x%x\n",
@@ -164,6 +192,20 @@ static int process_ep_req(struct mv_udc *udc, int index,
if (retval)
return retval;
+ if (direction == EP_DIR_OUT)
+ bit_pos = 1 << curr_req->ep->ep_num;
+ else
+ bit_pos = 1 << (16 + curr_req->ep->ep_num);
+
+ while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
+ if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
+ while (readl(&udc->op_regs->epstatus) & bit_pos)
+ udelay(1);
+ break;
+ }
+ udelay(1);
+ }
+
curr_req->req.actual = actual;
return 0;
@@ -335,7 +377,7 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
} else {
/* Write dQH next pointer and terminate bit to 0 */
dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
dqh->size_ioc_int_sts = 0;
/* Ensure that updates to the QH will occur before priming. */
@@ -376,7 +418,7 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
}
}
done:
- return retval;;
+ return retval;
}
static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
@@ -481,6 +523,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
u16 max = 0;
u32 bit_pos, epctrlx, direction;
unsigned char zlt = 0, ios = 0, mult = 0;
+ unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
udc = ep->udc;
@@ -493,7 +536,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
return -ESHUTDOWN;
direction = ep_dir(ep);
- max = le16_to_cpu(desc->wMaxPacketSize);
+ max = usb_endpoint_maxp(desc);
/*
* disable HW zero length termination select
@@ -501,9 +544,6 @@ static int mv_ep_enable(struct usb_ep *_ep,
*/
zlt = 1;
- /* Get the endpoint queue head address */
- dqh = (struct mv_dqh *)ep->dqh;
-
bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
/* Check if the Endpoint is Primed */
@@ -532,7 +572,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x8ff; /* bit 0~10 */
+ max = max & 0x7ff; /* bit 0~10 */
/* 3 transactions at most */
if (mult > 3)
goto en_done;
@@ -540,6 +580,10 @@ static int mv_ep_enable(struct usb_ep *_ep,
default:
goto en_done;
}
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* Get the endpoint queue head address */
+ dqh = ep->dqh;
dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
| (mult << EP_QUEUE_HEAD_MULT_POS)
| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
@@ -572,18 +616,20 @@ static int mv_ep_enable(struct usb_ep *_ep,
*/
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
- epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ epctrlx |= (USB_ENDPOINT_XFER_BULK
<< EPCTRL_RX_EP_TYPE_SHIFT);
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
}
epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
- epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ epctrlx |= (USB_ENDPOINT_XFER_BULK
<< EPCTRL_TX_EP_TYPE_SHIFT);
writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
}
+ spin_unlock_irqrestore(&udc->lock, flags);
+
return 0;
en_done:
return -EINVAL;
@@ -595,6 +641,7 @@ static int mv_ep_disable(struct usb_ep *_ep)
struct mv_ep *ep;
struct mv_dqh *dqh;
u32 bit_pos, epctrlx, direction;
+ unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
if ((_ep == NULL) || !ep->desc)
@@ -605,6 +652,8 @@ static int mv_ep_disable(struct usb_ep *_ep)
/* Get the endpoint queue head address */
dqh = ep->dqh;
+ spin_lock_irqsave(&udc->lock, flags);
+
direction = ep_dir(ep);
bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
@@ -623,6 +672,9 @@ static int mv_ep_disable(struct usb_ep *_ep)
ep->desc = NULL;
ep->stopped = 1;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
return 0;
}
@@ -655,37 +707,28 @@ static void mv_ep_fifo_flush(struct usb_ep *_ep)
{
struct mv_udc *udc;
u32 bit_pos, direction;
- struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+ struct mv_ep *ep;
unsigned int loops;
+ if (!_ep)
+ return;
+
+ ep = container_of(_ep, struct mv_ep, ep);
+ if (!ep->desc)
+ return;
+
udc = ep->udc;
direction = ep_dir(ep);
- bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
- /*
- * Flushing will halt the pipe
- * Write 1 to the Flush register
- */
- writel(bit_pos, &udc->op_regs->epflush);
- /* Wait until flushing completed */
- loops = LOOPS(FLUSH_TIMEOUT);
- while (readl(&udc->op_regs->epflush) & bit_pos) {
- /*
- * ENDPTFLUSH bit should be cleared to indicate this
- * operation is complete
- */
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "TIMEOUT for ENDPTFLUSH=0x%x, bit_pos=0x%x\n",
- (unsigned)readl(&udc->op_regs->epflush),
- (unsigned)bit_pos);
- return;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
+ if (ep->ep_num == 0)
+ bit_pos = (1 << 16) | 1;
+ else if (direction == EP_DIR_OUT)
+ bit_pos = 1 << ep->ep_num;
+ else
+ bit_pos = 1 << (16 + ep->ep_num);
+
loops = LOOPS(EPSTATUS_TIMEOUT);
- while (readl(&udc->op_regs->epstatus) & bit_pos) {
+ do {
unsigned int inter_loops;
if (loops == 0) {
@@ -700,7 +743,7 @@ static void mv_ep_fifo_flush(struct usb_ep *_ep)
/* Wait until flushing completed */
inter_loops = LOOPS(FLUSH_TIMEOUT);
- while (readl(&udc->op_regs->epflush) & bit_pos) {
+ while (readl(&udc->op_regs->epflush)) {
/*
* ENDPTFLUSH bit should be cleared to indicate this
* operation is complete
@@ -717,7 +760,7 @@ static void mv_ep_fifo_flush(struct usb_ep *_ep)
udelay(LOOPS_USEC);
}
loops--;
- }
+ } while (readl(&udc->op_regs->epstatus) & bit_pos);
}
/* queues (submits) an I/O request to an endpoint */
@@ -987,6 +1030,22 @@ static struct usb_ep_ops mv_ep_ops = {
.fifo_flush = mv_ep_fifo_flush, /* flush fifo */
};
+static void udc_clock_enable(struct mv_udc *udc)
+{
+ unsigned int i;
+
+ for (i = 0; i < udc->clknum; i++)
+ clk_enable(udc->clk[i]);
+}
+
+static void udc_clock_disable(struct mv_udc *udc)
+{
+ unsigned int i;
+
+ for (i = 0; i < udc->clknum; i++)
+ clk_disable(udc->clk[i]);
+}
+
static void udc_stop(struct mv_udc *udc)
{
u32 tmp;
@@ -1075,6 +1134,40 @@ static int udc_reset(struct mv_udc *udc)
return 0;
}
+static int mv_udc_enable(struct mv_udc *udc)
+{
+ int retval;
+
+ if (udc->clock_gating == 0 || udc->active)
+ return 0;
+
+ dev_dbg(&udc->dev->dev, "enable udc\n");
+ udc_clock_enable(udc);
+ if (udc->pdata->phy_init) {
+ retval = udc->pdata->phy_init(udc->phy_regs);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "init phy error %d\n", retval);
+ udc_clock_disable(udc);
+ return retval;
+ }
+ }
+ udc->active = 1;
+
+ return 0;
+}
+
+static void mv_udc_disable(struct mv_udc *udc)
+{
+ if (udc->clock_gating && udc->active) {
+ dev_dbg(&udc->dev->dev, "disable udc\n");
+ if (udc->pdata->phy_deinit)
+ udc->pdata->phy_deinit(udc->phy_regs);
+ udc_clock_disable(udc);
+ udc->active = 0;
+ }
+}
+
static int mv_udc_get_frame(struct usb_gadget *gadget)
{
struct mv_udc *udc;
@@ -1110,22 +1203,68 @@ static int mv_udc_wakeup(struct usb_gadget *gadget)
return 0;
}
+static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct mv_udc *udc;
+ unsigned long flags;
+ int retval = 0;
+
+ udc = container_of(gadget, struct mv_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+
+ dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+ __func__, udc->softconnect, udc->vbus_active);
+
+ udc->vbus_active = (is_active != 0);
+ if (udc->driver && udc->softconnect && udc->vbus_active) {
+ retval = mv_udc_enable(udc);
+ if (retval == 0) {
+ /* Clock is disabled, need re-init registers */
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
+ }
+ } else if (udc->driver && udc->softconnect) {
+ /* stop all the transfer in queue*/
+ stop_activity(udc, udc->driver);
+ udc_stop(udc);
+ mv_udc_disable(udc);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return retval;
+}
+
static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
{
struct mv_udc *udc;
unsigned long flags;
+ int retval = 0;
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+ __func__, udc->softconnect, udc->vbus_active);
+
udc->softconnect = (is_on != 0);
- if (udc->driver && udc->softconnect)
- udc_start(udc);
- else
+ if (udc->driver && udc->softconnect && udc->vbus_active) {
+ retval = mv_udc_enable(udc);
+ if (retval == 0) {
+ /* Clock is disabled, need re-init registers */
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
+ }
+ } else if (udc->driver && udc->vbus_active) {
+ /* stop all the transfer in queue*/
+ stop_activity(udc, udc->driver);
udc_stop(udc);
+ mv_udc_disable(udc);
+ }
spin_unlock_irqrestore(&udc->lock, flags);
- return 0;
+ return retval;
}
static int mv_udc_start(struct usb_gadget_driver *driver,
@@ -1140,17 +1279,15 @@ static const struct usb_gadget_ops mv_ops = {
/* tries to wake up the host connected to this gadget */
.wakeup = mv_udc_wakeup,
+ /* notify controller that VBUS is powered or not */
+ .vbus_session = mv_udc_vbus_session,
+
/* D+ pullup, software-controlled connect/disconnect to USB host */
.pullup = mv_udc_pullup,
.start = mv_udc_start,
.stop = mv_udc_stop,
};
-static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
-{
- dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
-}
-
static int eps_init(struct mv_udc *udc)
{
struct mv_ep *ep;
@@ -1257,7 +1394,7 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
udc->usb_state = USB_STATE_ATTACHED;
udc->ep0_state = WAIT_FOR_SETUP;
- udc->ep0_dir = USB_DIR_OUT;
+ udc->ep0_dir = EP_DIR_OUT;
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1269,9 +1406,13 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
udc->gadget.dev.driver = NULL;
return retval;
}
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
+
+ /* pullup is always on */
+ mv_udc_pullup(&udc->gadget, 1);
+
+ /* When boot with cable attached, there will be no vbus irq occurred */
+ if (udc->qwork)
+ queue_work(udc->qwork, &udc->vbus_work);
return 0;
}
@@ -1284,13 +1425,16 @@ static int mv_udc_stop(struct usb_gadget_driver *driver)
if (!udc)
return -ENODEV;
- udc_stop(udc);
-
spin_lock_irqsave(&udc->lock, flags);
+ mv_udc_enable(udc);
+ udc_stop(udc);
+
/* stop all usb activities */
udc->gadget.speed = USB_SPEED_UNKNOWN;
stop_activity(udc, driver);
+ mv_udc_disable(udc);
+
spin_unlock_irqrestore(&udc->lock, flags);
/* unbind gadget driver */
@@ -1301,6 +1445,31 @@ static int mv_udc_stop(struct usb_gadget_driver *driver)
return 0;
}
+static void mv_set_ptc(struct mv_udc *udc, u32 mode)
+{
+ u32 portsc;
+
+ portsc = readl(&udc->op_regs->portsc[0]);
+ portsc |= mode << 16;
+ writel(portsc, &udc->op_regs->portsc[0]);
+}
+
+static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct mv_udc *udc = the_controller;
+ struct mv_req *req = container_of(_req, struct mv_req, req);
+ unsigned long flags;
+
+ dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (req->test_mode) {
+ mv_set_ptc(udc, req->test_mode);
+ req->test_mode = 0;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
static int
udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
{
@@ -1310,6 +1479,7 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
ep = &udc->eps[0];
udc->ep0_dir = direction;
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
req = udc->status_req;
@@ -1323,9 +1493,21 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
req->ep = ep;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
- req->req.complete = NULL;
+ if (udc->test_mode) {
+ req->req.complete = prime_status_complete;
+ req->test_mode = udc->test_mode;
+ udc->test_mode = 0;
+ } else
+ req->req.complete = NULL;
req->dtd_count = 0;
+ if (req->req.dma == DMA_ADDR_INVALID) {
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+ }
+
/* prime the data phase */
if (!req_to_dtd(req))
retval = queue_dtd(ep, req);
@@ -1346,6 +1528,17 @@ out:
return retval;
}
+static void mv_udc_testmode(struct mv_udc *udc, u16 index)
+{
+ if (index <= TEST_FORCE_EN) {
+ udc->test_mode = index;
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+ ep0_stall(udc);
+ } else
+ dev_err(&udc->dev->dev,
+ "This test mode(%d) is not supported\n", index);
+}
+
static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
{
udc->dev_addr = (u8)setup->wValue;
@@ -1360,7 +1553,7 @@ static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
struct usb_ctrlrequest *setup)
{
- u16 status;
+ u16 status = 0;
int retval;
if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
@@ -1388,6 +1581,8 @@ static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
retval = udc_prime_status(udc, EP_DIR_IN, status, false);
if (retval)
ep0_stall(udc);
+ else
+ udc->ep0_state = DATA_STATE_XMIT;
}
static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
@@ -1402,9 +1597,6 @@ static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
case USB_DEVICE_REMOTE_WAKEUP:
udc->remote_wakeup = 0;
break;
- case USB_DEVICE_TEST_MODE:
- mv_udc_testmode(udc, 0, false);
- break;
default:
goto out;
}
@@ -1433,8 +1625,6 @@ static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
if (udc_prime_status(udc, EP_DIR_IN, 0, true))
ep0_stall(udc);
- else
- udc->ep0_state = DATA_STATE_XMIT;
out:
return;
}
@@ -1452,16 +1642,16 @@ static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
break;
case USB_DEVICE_TEST_MODE:
if (setup->wIndex & 0xFF
- && udc->gadget.speed != USB_SPEED_HIGH)
- goto out;
- if (udc->usb_state == USB_STATE_CONFIGURED
- || udc->usb_state == USB_STATE_ADDRESS
- || udc->usb_state == USB_STATE_DEFAULT)
- mv_udc_testmode(udc,
- setup->wIndex & 0xFF00, true);
- else
- goto out;
- break;
+ || udc->gadget.speed != USB_SPEED_HIGH)
+ ep0_stall(udc);
+
+ if (udc->usb_state != USB_STATE_CONFIGURED
+ && udc->usb_state != USB_STATE_ADDRESS
+ && udc->usb_state != USB_STATE_DEFAULT)
+ ep0_stall(udc);
+
+ mv_udc_testmode(udc, (setup->wIndex >> 8));
+ goto out;
default:
goto out;
}
@@ -1599,8 +1789,7 @@ static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
/* Clear bit in ENDPTSETUPSTAT */
- temp = readl(&udc->op_regs->epsetupstat);
- writel(temp | (1 << ep_num), &udc->op_regs->epsetupstat);
+ writel((1 << ep_num), &udc->op_regs->epsetupstat);
/* while a hazard exists when setup package arrives */
do {
@@ -1871,23 +2060,57 @@ static irqreturn_t mv_udc_irq(int irq, void *dev)
return IRQ_HANDLED;
}
+static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
+{
+ struct mv_udc *udc = (struct mv_udc *)dev;
+
+ /* polling VBUS and init phy may cause too much time*/
+ if (udc->qwork)
+ queue_work(udc->qwork, &udc->vbus_work);
+
+ return IRQ_HANDLED;
+}
+
+static void mv_udc_vbus_work(struct work_struct *work)
+{
+ struct mv_udc *udc;
+ unsigned int vbus;
+
+ udc = container_of(work, struct mv_udc, vbus_work);
+ if (!udc->pdata->vbus)
+ return;
+
+ vbus = udc->pdata->vbus->poll();
+ dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
+
+ if (vbus == VBUS_HIGH)
+ mv_udc_vbus_session(&udc->gadget, 1);
+ else if (vbus == VBUS_LOW)
+ mv_udc_vbus_session(&udc->gadget, 0);
+}
+
/* release device structure */
static void gadget_release(struct device *_dev)
{
struct mv_udc *udc = the_controller;
complete(udc->done);
- kfree(udc);
}
-static int mv_udc_remove(struct platform_device *dev)
+static int __devexit mv_udc_remove(struct platform_device *dev)
{
struct mv_udc *udc = the_controller;
- DECLARE_COMPLETION(done);
+ int clk_i;
usb_del_gadget_udc(&udc->gadget);
- udc->done = &done;
+ if (udc->qwork) {
+ flush_workqueue(udc->qwork);
+ destroy_workqueue(udc->qwork);
+ }
+
+ if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ free_irq(udc->pdata->vbus->irq, &dev->dev);
/* free memory allocated in probe */
if (udc->dtd_pool)
@@ -1902,6 +2125,8 @@ static int mv_udc_remove(struct platform_device *dev)
if (udc->irq)
free_irq(udc->irq, &dev->dev);
+ mv_udc_disable(udc);
+
if (udc->cap_regs)
iounmap(udc->cap_regs);
udc->cap_regs = NULL;
@@ -1915,45 +2140,62 @@ static int mv_udc_remove(struct platform_device *dev)
kfree(udc->status_req);
}
+ for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
+ clk_put(udc->clk[clk_i]);
+
device_unregister(&udc->gadget.dev);
/* free dev, wait for the release() finished */
- wait_for_completion(&done);
+ wait_for_completion(udc->done);
+ kfree(udc);
the_controller = NULL;
return 0;
}
-int mv_udc_probe(struct platform_device *dev)
+static int __devinit mv_udc_probe(struct platform_device *dev)
{
+ struct mv_usb_platform_data *pdata = dev->dev.platform_data;
struct mv_udc *udc;
int retval = 0;
+ int clk_i = 0;
struct resource *r;
size_t size;
- udc = kzalloc(sizeof *udc, GFP_KERNEL);
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
+ udc = kzalloc(size, GFP_KERNEL);
if (udc == NULL) {
dev_err(&dev->dev, "failed to allocate memory for udc\n");
- retval = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
+ the_controller = udc;
+ udc->done = &release_done;
+ udc->pdata = dev->dev.platform_data;
spin_lock_init(&udc->lock);
udc->dev = dev;
- udc->clk = clk_get(&dev->dev, "U2OCLK");
- if (IS_ERR(udc->clk)) {
- retval = PTR_ERR(udc->clk);
- goto error;
+ udc->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
+ udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(udc->clk[clk_i])) {
+ retval = PTR_ERR(udc->clk[clk_i]);
+ goto err_put_clk;
+ }
}
- r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "u2o");
+ r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
if (r == NULL) {
dev_err(&dev->dev, "no I/O memory resource defined\n");
retval = -ENODEV;
- goto error;
+ goto err_put_clk;
}
udc->cap_regs = (struct mv_cap_regs __iomem *)
@@ -1961,29 +2203,31 @@ int mv_udc_probe(struct platform_device *dev)
if (udc->cap_regs == NULL) {
dev_err(&dev->dev, "failed to map I/O memory\n");
retval = -EBUSY;
- goto error;
+ goto err_put_clk;
}
- r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "u2ophy");
+ r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
if (r == NULL) {
dev_err(&dev->dev, "no phy I/O memory resource defined\n");
retval = -ENODEV;
- goto error;
+ goto err_iounmap_capreg;
}
udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
if (udc->phy_regs == 0) {
dev_err(&dev->dev, "failed to map phy I/O memory\n");
retval = -EBUSY;
- goto error;
+ goto err_iounmap_capreg;
}
/* we will acces controller register, so enable the clk */
- clk_enable(udc->clk);
- retval = mv_udc_phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&dev->dev, "phy initialization error %d\n", retval);
- goto error;
+ udc_clock_enable(udc);
+ if (pdata->phy_init) {
+ retval = pdata->phy_init(udc->phy_regs);
+ if (retval) {
+ dev_err(&dev->dev, "phy init error %d\n", retval);
+ goto err_iounmap_phyreg;
+ }
}
udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
@@ -1991,6 +2235,13 @@ int mv_udc_probe(struct platform_device *dev)
& CAPLENGTH_MASK));
udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
+ /*
+ * some platform will use usb to download image, it may not disconnect
+ * usb gadget before loading kernel. So first stop udc here.
+ */
+ udc_stop(udc);
+ writel(0xFFFFFFFF, &udc->op_regs->usbsts);
+
size = udc->max_eps * sizeof(struct mv_dqh) *2;
size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
@@ -1999,7 +2250,7 @@ int mv_udc_probe(struct platform_device *dev)
if (udc->ep_dqh == NULL) {
dev_err(&dev->dev, "allocate dQH memory failed\n");
retval = -ENOMEM;
- goto error;
+ goto err_disable_clock;
}
udc->ep_dqh_size = size;
@@ -2012,7 +2263,7 @@ int mv_udc_probe(struct platform_device *dev)
if (!udc->dtd_pool) {
retval = -ENOMEM;
- goto error;
+ goto err_free_dma;
}
size = udc->max_eps * sizeof(struct mv_ep) *2;
@@ -2020,7 +2271,7 @@ int mv_udc_probe(struct platform_device *dev)
if (udc->eps == NULL) {
dev_err(&dev->dev, "allocate ep memory failed\n");
retval = -ENOMEM;
- goto error;
+ goto err_destroy_dma;
}
/* initialize ep0 status request structure */
@@ -2028,13 +2279,13 @@ int mv_udc_probe(struct platform_device *dev)
if (!udc->status_req) {
dev_err(&dev->dev, "allocate status_req memory failed\n");
retval = -ENOMEM;
- goto error;
+ goto err_free_eps;
}
INIT_LIST_HEAD(&udc->status_req->queue);
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
- udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
+ udc->status_req->req.dma = DMA_ADDR_INVALID;
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2045,15 +2296,15 @@ int mv_udc_probe(struct platform_device *dev)
if (r == NULL) {
dev_err(&dev->dev, "no IRQ resource defined\n");
retval = -ENODEV;
- goto error;
+ goto err_free_status_req;
}
udc->irq = r->start;
if (request_irq(udc->irq, mv_udc_irq,
- IRQF_DISABLED | IRQF_SHARED, driver_name, udc)) {
+ IRQF_SHARED, driver_name, udc)) {
dev_err(&dev->dev, "Request irq %d for UDC failed\n",
udc->irq);
retval = -ENODEV;
- goto error;
+ goto err_free_status_req;
}
/* initialize gadget structure */
@@ -2072,18 +2323,82 @@ int mv_udc_probe(struct platform_device *dev)
retval = device_register(&udc->gadget.dev);
if (retval)
- goto error;
+ goto err_free_irq;
eps_init(udc);
- the_controller = udc;
+ /* VBUS detect: we can disable/enable clock on demand.*/
+ if (pdata->vbus) {
+ udc->clock_gating = 1;
+ retval = request_threaded_irq(pdata->vbus->irq, NULL,
+ mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
+ if (retval) {
+ dev_info(&dev->dev,
+ "Can not request irq for VBUS, "
+ "disable clock gating\n");
+ udc->clock_gating = 0;
+ }
+
+ udc->qwork = create_singlethread_workqueue("mv_udc_queue");
+ if (!udc->qwork) {
+ dev_err(&dev->dev, "cannot create workqueue\n");
+ retval = -ENOMEM;
+ goto err_unregister;
+ }
+
+ INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
+ }
+
+ /*
+ * When clock gating is supported, we can disable clk and phy.
+ * If not, it means that VBUS detection is not supported, we
+ * have to enable vbus active all the time to let controller work.
+ */
+ if (udc->clock_gating) {
+ if (udc->pdata->phy_deinit)
+ udc->pdata->phy_deinit(udc->phy_regs);
+ udc_clock_disable(udc);
+ } else
+ udc->vbus_active = 1;
retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
- if (!retval)
- return retval;
-error:
- if (udc)
- mv_udc_remove(udc->dev);
+ if (retval)
+ goto err_unregister;
+
+ dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
+ udc->clock_gating ? "with" : "without");
+
+ return 0;
+
+err_unregister:
+ if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ free_irq(pdata->vbus->irq, &dev->dev);
+ device_unregister(&udc->gadget.dev);
+err_free_irq:
+ free_irq(udc->irq, &dev->dev);
+err_free_status_req:
+ kfree(udc->status_req->req.buf);
+ kfree(udc->status_req);
+err_free_eps:
+ kfree(udc->eps);
+err_destroy_dma:
+ dma_pool_destroy(udc->dtd_pool);
+err_free_dma:
+ dma_free_coherent(&dev->dev, udc->ep_dqh_size,
+ udc->ep_dqh, udc->ep_dqh_dma);
+err_disable_clock:
+ if (udc->pdata->phy_deinit)
+ udc->pdata->phy_deinit(udc->phy_regs);
+ udc_clock_disable(udc);
+err_iounmap_phyreg:
+ iounmap((void *)udc->phy_regs);
+err_iounmap_capreg:
+ iounmap(udc->cap_regs);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(udc->clk[clk_i]);
+ the_controller = NULL;
+ kfree(udc);
return retval;
}
@@ -2102,11 +2417,16 @@ static int mv_udc_resume(struct device *_dev)
struct mv_udc *udc = the_controller;
int retval;
- retval = mv_udc_phy_init(udc->phy_regs);
- if (retval) {
- dev_err(_dev, "phy initialization error %d\n", retval);
- return retval;
+ if (udc->pdata->phy_init) {
+ retval = udc->pdata->phy_init(udc->phy_regs);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "init phy error %d when resume back\n",
+ retval);
+ return retval;
+ }
}
+
udc_reset(udc);
ep0_reset(udc);
udc_start(udc);
@@ -2120,9 +2440,21 @@ static const struct dev_pm_ops mv_udc_pm_ops = {
};
#endif
+static void mv_udc_shutdown(struct platform_device *dev)
+{
+ struct mv_udc *udc = the_controller;
+ u32 mode;
+
+ /* reset controller mode to IDLE */
+ mode = readl(&udc->op_regs->usbmode);
+ mode &= ~3;
+ writel(mode, &udc->op_regs->usbmode);
+}
+
static struct platform_driver udc_driver = {
.probe = mv_udc_probe,
.remove = __exit_p(mv_udc_remove),
+ .shutdown = mv_udc_shutdown,
.driver = {
.owner = THIS_MODULE,
.name = "pxa-u2o",
diff --git a/drivers/usb/gadget/mv_udc_phy.c b/drivers/usb/gadget/mv_udc_phy.c
deleted file mode 100644
index d4dea97e38a..00000000000
--- a/drivers/usb/gadget/mv_udc_phy.c
+++ /dev/null
@@ -1,214 +0,0 @@
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-
-#include <mach/cputype.h>
-
-#ifdef CONFIG_ARCH_MMP
-
-#define UTMI_REVISION 0x0
-#define UTMI_CTRL 0x4
-#define UTMI_PLL 0x8
-#define UTMI_TX 0xc
-#define UTMI_RX 0x10
-#define UTMI_IVREF 0x14
-#define UTMI_T0 0x18
-#define UTMI_T1 0x1c
-#define UTMI_T2 0x20
-#define UTMI_T3 0x24
-#define UTMI_T4 0x28
-#define UTMI_T5 0x2c
-#define UTMI_RESERVE 0x30
-#define UTMI_USB_INT 0x34
-#define UTMI_DBG_CTL 0x38
-#define UTMI_OTG_ADDON 0x3c
-
-/* For UTMICTRL Register */
-#define UTMI_CTRL_USB_CLK_EN (1 << 31)
-/* pxa168 */
-#define UTMI_CTRL_SUSPEND_SET1 (1 << 30)
-#define UTMI_CTRL_SUSPEND_SET2 (1 << 29)
-#define UTMI_CTRL_RXBUF_PDWN (1 << 24)
-#define UTMI_CTRL_TXBUF_PDWN (1 << 11)
-
-#define UTMI_CTRL_INPKT_DELAY_SHIFT 30
-#define UTMI_CTRL_INPKT_DELAY_SOF_SHIFT 28
-#define UTMI_CTRL_PU_REF_SHIFT 20
-#define UTMI_CTRL_ARC_PULLDN_SHIFT 12
-#define UTMI_CTRL_PLL_PWR_UP_SHIFT 1
-#define UTMI_CTRL_PWR_UP_SHIFT 0
-/* For UTMI_PLL Register */
-#define UTMI_PLL_CLK_BLK_EN_SHIFT 24
-#define UTMI_PLL_FBDIV_SHIFT 4
-#define UTMI_PLL_REFDIV_SHIFT 0
-#define UTMI_PLL_FBDIV_MASK 0x00000FF0
-#define UTMI_PLL_REFDIV_MASK 0x0000000F
-#define UTMI_PLL_ICP_MASK 0x00007000
-#define UTMI_PLL_KVCO_MASK 0x00031000
-#define UTMI_PLL_PLLCALI12_SHIFT 29
-#define UTMI_PLL_PLLCALI12_MASK (0x3 << 29)
-#define UTMI_PLL_PLLVDD18_SHIFT 27
-#define UTMI_PLL_PLLVDD18_MASK (0x3 << 27)
-#define UTMI_PLL_PLLVDD12_SHIFT 25
-#define UTMI_PLL_PLLVDD12_MASK (0x3 << 25)
-#define UTMI_PLL_KVCO_SHIFT 15
-#define UTMI_PLL_ICP_SHIFT 12
-/* For UTMI_TX Register */
-#define UTMI_TX_REG_EXT_FS_RCAL_SHIFT 27
-#define UTMI_TX_REG_EXT_FS_RCAL_MASK (0xf << 27)
-#define UTMI_TX_REG_EXT_FS_RCAL_EN_MASK 26
-#define UTMI_TX_REG_EXT_FS_RCAL_EN (0x1 << 26)
-#define UTMI_TX_LOW_VDD_EN_SHIFT 11
-#define UTMI_TX_IMPCAL_VTH_SHIFT 14
-#define UTMI_TX_IMPCAL_VTH_MASK (0x7 << 14)
-#define UTMI_TX_CK60_PHSEL_SHIFT 17
-#define UTMI_TX_CK60_PHSEL_MASK (0xf << 17)
-#define UTMI_TX_TXVDD12_SHIFT 22
-#define UTMI_TX_TXVDD12_MASK (0x3 << 22)
-#define UTMI_TX_AMP_SHIFT 0
-#define UTMI_TX_AMP_MASK (0x7 << 0)
-/* For UTMI_RX Register */
-#define UTMI_RX_SQ_THRESH_SHIFT 4
-#define UTMI_RX_SQ_THRESH_MASK (0xf << 4)
-#define UTMI_REG_SQ_LENGTH_SHIFT 15
-#define UTMI_REG_SQ_LENGTH_MASK (0x3 << 15)
-
-#define REG_RCAL_START 0x00001000
-#define VCOCAL_START 0x00200000
-#define KVCO_EXT 0x00400000
-#define PLL_READY 0x00800000
-#define CLK_BLK_EN 0x01000000
-#endif
-
-static unsigned int u2o_read(unsigned int base, unsigned int offset)
-{
- return readl(base + offset);
-}
-
-static void u2o_set(unsigned int base, unsigned int offset, unsigned int value)
-{
- unsigned int reg;
-
- reg = readl(base + offset);
- reg |= value;
- writel(reg, base + offset);
- readl(base + offset);
-}
-
-static void u2o_clear(unsigned int base, unsigned int offset,
- unsigned int value)
-{
- unsigned int reg;
-
- reg = readl(base + offset);
- reg &= ~value;
- writel(reg, base + offset);
- readl(base + offset);
-}
-
-static void u2o_write(unsigned int base, unsigned int offset,
- unsigned int value)
-{
- writel(value, base + offset);
- readl(base + offset);
-}
-
-#ifdef CONFIG_ARCH_MMP
-int mv_udc_phy_init(unsigned int base)
-{
- unsigned long timeout;
-
- /* Initialize the USB PHY power */
- if (cpu_is_pxa910()) {
- u2o_set(base, UTMI_CTRL, (1 << UTMI_CTRL_INPKT_DELAY_SOF_SHIFT)
- | (1 << UTMI_CTRL_PU_REF_SHIFT));
- }
-
- u2o_set(base, UTMI_CTRL, 1 << UTMI_CTRL_PLL_PWR_UP_SHIFT);
- u2o_set(base, UTMI_CTRL, 1 << UTMI_CTRL_PWR_UP_SHIFT);
-
- /* UTMI_PLL settings */
- u2o_clear(base, UTMI_PLL, UTMI_PLL_PLLVDD18_MASK
- | UTMI_PLL_PLLVDD12_MASK | UTMI_PLL_PLLCALI12_MASK
- | UTMI_PLL_FBDIV_MASK | UTMI_PLL_REFDIV_MASK
- | UTMI_PLL_ICP_MASK | UTMI_PLL_KVCO_MASK);
-
- u2o_set(base, UTMI_PLL, (0xee << UTMI_PLL_FBDIV_SHIFT)
- | (0xb << UTMI_PLL_REFDIV_SHIFT)
- | (3 << UTMI_PLL_PLLVDD18_SHIFT)
- | (3 << UTMI_PLL_PLLVDD12_SHIFT)
- | (3 << UTMI_PLL_PLLCALI12_SHIFT)
- | (1 << UTMI_PLL_ICP_SHIFT) | (3 << UTMI_PLL_KVCO_SHIFT));
-
- /* UTMI_TX */
- u2o_clear(base, UTMI_TX, UTMI_TX_REG_EXT_FS_RCAL_EN_MASK
- | UTMI_TX_TXVDD12_MASK
- | UTMI_TX_CK60_PHSEL_MASK | UTMI_TX_IMPCAL_VTH_MASK
- | UTMI_TX_REG_EXT_FS_RCAL_MASK | UTMI_TX_AMP_MASK);
- u2o_set(base, UTMI_TX, (3 << UTMI_TX_TXVDD12_SHIFT)
- | (4 << UTMI_TX_CK60_PHSEL_SHIFT)
- | (4 << UTMI_TX_IMPCAL_VTH_SHIFT)
- | (8 << UTMI_TX_REG_EXT_FS_RCAL_SHIFT)
- | (3 << UTMI_TX_AMP_SHIFT));
-
- /* UTMI_RX */
- u2o_clear(base, UTMI_RX, UTMI_RX_SQ_THRESH_MASK
- | UTMI_REG_SQ_LENGTH_MASK);
- if (cpu_is_pxa168())
- u2o_set(base, UTMI_RX, (7 << UTMI_RX_SQ_THRESH_SHIFT)
- | (2 << UTMI_REG_SQ_LENGTH_SHIFT));
- else
- u2o_set(base, UTMI_RX, (0x7 << UTMI_RX_SQ_THRESH_SHIFT)
- | (2 << UTMI_REG_SQ_LENGTH_SHIFT));
-
- /* UTMI_IVREF */
- if (cpu_is_pxa168())
- /*
- * fixing Microsoft Altair board interface with NEC hub issue -
- * Set UTMI_IVREF from 0x4a3 to 0x4bf
- */
- u2o_write(base, UTMI_IVREF, 0x4bf);
-
- /* calibrate */
- timeout = jiffies + 100;
- while ((u2o_read(base, UTMI_PLL) & PLL_READY) == 0) {
- if (time_after(jiffies, timeout))
- return -ETIME;
- cpu_relax();
- }
-
- /* toggle VCOCAL_START bit of UTMI_PLL */
- udelay(200);
- u2o_set(base, UTMI_PLL, VCOCAL_START);
- udelay(40);
- u2o_clear(base, UTMI_PLL, VCOCAL_START);
-
- /* toggle REG_RCAL_START bit of UTMI_TX */
- udelay(200);
- u2o_set(base, UTMI_TX, REG_RCAL_START);
- udelay(40);
- u2o_clear(base, UTMI_TX, REG_RCAL_START);
- udelay(200);
-
- /* make sure phy is ready */
- timeout = jiffies + 100;
- while ((u2o_read(base, UTMI_PLL) & PLL_READY) == 0) {
- if (time_after(jiffies, timeout))
- return -ETIME;
- cpu_relax();
- }
-
- if (cpu_is_pxa168()) {
- u2o_set(base, UTMI_RESERVE, 1 << 5);
- /* Turn on UTMI PHY OTG extension */
- u2o_write(base, UTMI_OTG_ADDON, 1);
- }
- return 0;
-}
-#else
-int mv_udc_phy_init(unsigned int base)
-{
- return 0;
-}
-#endif
diff --git a/drivers/usb/gadget/ncm.c b/drivers/usb/gadget/ncm.c
index 62ee5087dca..89530034dff 100644
--- a/drivers/usb/gadget/ncm.c
+++ b/drivers/usb/gadget/ncm.c
@@ -14,15 +14,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define DEBUG */
diff --git a/drivers/usb/gadget/ndis.h b/drivers/usb/gadget/ndis.h
index df886cec5ef..b0e52fc277b 100644
--- a/drivers/usb/gadget/ndis.h
+++ b/drivers/usb/gadget/ndis.h
@@ -10,12 +10,6 @@
*
* This source code is offered for use in the public domain. You may
* use, modify or distribute it freely.
- *
- * This code is distributed in the hope that it will be useful but
- * WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY
- * DISCLAIMED. This includes but is not limited to warranties of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
#ifndef _LINUX_NDIS_H
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index ab98ea926a1..d1b76368472 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -204,7 +204,7 @@ net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
+ max = usb_endpoint_maxp(desc) & 0x1fff;
spin_lock_irqsave(&dev->lock, flags);
_ep->maxpacket = max & 0x7fff;
@@ -1172,17 +1172,18 @@ net2272_pullup(struct usb_gadget *_gadget, int is_on)
return 0;
}
-static int net2272_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *));
-static int net2272_stop(struct usb_gadget_driver *driver);
+static int net2272_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver);
+static int net2272_stop(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops net2272_ops = {
- .get_frame = net2272_get_frame,
- .wakeup = net2272_wakeup,
+ .get_frame = net2272_get_frame,
+ .wakeup = net2272_wakeup,
.set_selfpowered = net2272_set_selfpowered,
- .pullup = net2272_pullup,
- .start = net2272_start,
- .stop = net2272_stop,
+ .pullup = net2272_pullup,
+ .udc_start = net2272_start,
+ .udc_stop = net2272_stop,
};
/*---------------------------------------------------------------------------*/
@@ -1356,8 +1357,6 @@ net2272_set_fifo_mode(struct net2272 *dev, int mode)
/*---------------------------------------------------------------------------*/
-static struct net2272 *the_controller;
-
static void
net2272_usb_reset(struct net2272 *dev)
{
@@ -1453,20 +1452,17 @@ net2272_ep0_start(struct net2272 *dev)
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-static int net2272_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int net2272_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver)
{
- struct net2272 *dev = the_controller;
- int ret;
+ struct net2272 *dev;
unsigned i;
- if (!driver || !bind || !driver->unbind || !driver->setup ||
+ if (!driver || !driver->unbind || !driver->setup ||
driver->speed != USB_SPEED_HIGH)
return -EINVAL;
- if (!dev)
- return -ENODEV;
- if (dev->driver)
- return -EBUSY;
+
+ dev = container_of(_gadget, struct net2272, gadget);
for (i = 0; i < 4; ++i)
dev->ep[i].irqs = 0;
@@ -1475,14 +1471,6 @@ static int net2272_start(struct usb_gadget_driver *driver,
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
- ret = bind(&dev->gadget);
- if (ret) {
- dev_dbg(dev->dev, "bind to driver %s --> %d\n",
- driver->driver.name, ret);
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return ret;
- }
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
@@ -1510,33 +1498,21 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
for (i = 0; i < 4; ++i)
net2272_dequeue_all(&dev->ep[i]);
- /* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
- driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
-
- }
net2272_usb_reinit(dev);
}
-static int net2272_stop(struct usb_gadget_driver *driver)
+static int net2272_stop(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver)
{
- struct net2272 *dev = the_controller;
+ struct net2272 *dev;
unsigned long flags;
- if (!dev)
- return -ENODEV;
- if (!driver || driver != dev->driver)
- return -EINVAL;
+ dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
- net2272_pullup(&dev->gadget, 0);
-
- driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -1764,8 +1740,8 @@ net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
dev->gadget.speed = USB_SPEED_HIGH;
else
dev->gadget.speed = USB_SPEED_FULL;
- dev_dbg(dev->dev, "%s speed\n",
- (dev->gadget.speed == USB_SPEED_HIGH) ? "high" : "full");
+ dev_dbg(dev->dev, "%s\n",
+ usb_speed_string(dev->gadget.speed));
}
ep = &dev->ep[0];
@@ -2238,7 +2214,6 @@ net2272_remove(struct net2272 *dev)
device_remove_file(dev->dev, &dev_attr_registers);
dev_info(dev->dev, "unbind\n");
- the_controller = NULL;
}
static struct net2272 * __devinit
@@ -2246,11 +2221,6 @@ net2272_probe_init(struct device *dev, unsigned int irq)
{
struct net2272 *ret;
- if (the_controller) {
- dev_warn(dev, "ignoring\n");
- return ERR_PTR(-EBUSY);
- }
-
if (!irq) {
dev_dbg(dev, "No IRQ!\n");
return ERR_PTR(-ENODEV);
@@ -2307,8 +2277,6 @@ net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
dma_mode_string());
dev_info(dev->dev, "version: %s\n", driver_vers);
- the_controller = dev;
-
ret = device_register(&dev->gadget.dev);
if (ret)
goto err_irq;
@@ -2684,8 +2652,6 @@ net2272_plat_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
- the_controller = dev;
-
return 0;
err_io:
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 3dd40b4e675..7f1bc9a73cd 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -33,15 +33,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG /* messages on error and most fault paths */
@@ -169,7 +160,7 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
return -EDOM;
/* sanity check ep-e/ep-f since their fifos are small */
- max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
+ max = usb_endpoint_maxp (desc) & 0x1fff;
if (ep->num > 4 && max > 64)
return -ERANGE;
@@ -1410,17 +1401,18 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
return 0;
}
-static int net2280_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *));
-static int net2280_stop(struct usb_gadget_driver *driver);
+static int net2280_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver);
+static int net2280_stop(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops net2280_ops = {
.get_frame = net2280_get_frame,
.wakeup = net2280_wakeup,
.set_selfpowered = net2280_set_selfpowered,
.pullup = net2280_pullup,
- .start = net2280_start,
- .stop = net2280_stop,
+ .udc_start = net2280_start,
+ .udc_stop = net2280_stop,
};
/*-------------------------------------------------------------------------*/
@@ -1640,7 +1632,7 @@ show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
default:
val = "iso"; break;
}; val; }),
- le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
+ usb_endpoint_maxp (d) & 0x1fff,
ep->dma ? "dma" : "pio", ep->fifo_size
);
} else /* ep0 should only have one transfer queued */
@@ -1753,8 +1745,6 @@ static void set_fifo_mode (struct net2280 *dev, int mode)
* perhaps to bind specific drivers to specific devices.
*/
-static struct net2280 *the_controller;
-
static void usb_reset (struct net2280 *dev)
{
u32 tmp;
@@ -1880,10 +1870,10 @@ static void ep0_start (struct net2280 *dev)
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-static int net2280_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int net2280_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver)
{
- struct net2280 *dev = the_controller;
+ struct net2280 *dev;
int retval;
unsigned i;
@@ -1891,14 +1881,11 @@ static int net2280_start(struct usb_gadget_driver *driver,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver
- || driver->speed != USB_SPEED_HIGH
- || !bind || !driver->setup)
+ if (!driver || driver->speed != USB_SPEED_HIGH
+ || !driver->setup)
return -EINVAL;
- if (!dev)
- return -ENODEV;
- if (dev->driver)
- return -EBUSY;
+
+ dev = container_of (_gadget, struct net2280, gadget);
for (i = 0; i < 7; i++)
dev->ep [i].irqs = 0;
@@ -1908,14 +1895,6 @@ static int net2280_start(struct usb_gadget_driver *driver,
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
- retval = bind(&dev->gadget);
- if (retval) {
- DEBUG (dev, "bind to driver %s --> %d\n",
- driver->driver.name, retval);
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return retval;
- }
retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
if (retval) goto err_unbind;
@@ -1961,33 +1940,21 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
for (i = 0; i < 7; i++)
nuke (&dev->ep [i]);
- /* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock (&dev->lock);
- driver->disconnect (&dev->gadget);
- spin_lock (&dev->lock);
- }
-
usb_reinit (dev);
}
-static int net2280_stop(struct usb_gadget_driver *driver)
+static int net2280_stop(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver)
{
- struct net2280 *dev = the_controller;
+ struct net2280 *dev;
unsigned long flags;
- if (!dev)
- return -ENODEV;
- if (!driver || driver != dev->driver || !driver->unbind)
- return -EINVAL;
+ dev = container_of (_gadget, struct net2280, gadget);
spin_lock_irqsave (&dev->lock, flags);
stop_activity (dev, driver);
spin_unlock_irqrestore (&dev->lock, flags);
- net2280_pullup (&dev->gadget, 0);
-
- driver->unbind (&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -2266,9 +2233,7 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
else
dev->gadget.speed = USB_SPEED_FULL;
net2280_led_speed (dev, dev->gadget.speed);
- DEBUG (dev, "%s speed\n",
- (dev->gadget.speed == USB_SPEED_HIGH)
- ? "high" : "full");
+ DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed));
}
ep = &dev->ep [0];
@@ -2481,7 +2446,7 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
- * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and
+ * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
* both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
* only indicates a change in the reset state).
*/
@@ -2709,8 +2674,6 @@ static void net2280_remove (struct pci_dev *pdev)
pci_set_drvdata (pdev, NULL);
INFO (dev, "unbind\n");
-
- the_controller = NULL;
}
/* wrap this driver around the specified device, but
@@ -2724,14 +2687,6 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem *base = NULL;
int retval, i;
- /* if you want to support more than one controller in a system,
- * usb_gadget_driver_{register,unregister}() must change.
- */
- if (the_controller) {
- dev_warn (&pdev->dev, "ignoring\n");
- return -EBUSY;
- }
-
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
@@ -2858,8 +2813,6 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
use_dma
? (use_dma_chaining ? "chaining" : "enabled")
: "disabled");
- the_controller = dev;
-
retval = device_register (&dev->gadget.dev);
if (retval) goto done;
retval = device_create_file (&pdev->dev, &dev_attr_registers);
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
index c36852263d9..a844be0d683 100644
--- a/drivers/usb/gadget/net2280.h
+++ b/drivers/usb/gadget/net2280.h
@@ -11,15 +11,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/usb/net2280.h>
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 740c7daed27..788989a1022 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -10,15 +10,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
@@ -166,15 +157,14 @@ static int omap_ep_enable(struct usb_ep *_ep,
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
- || ep->maxpacket < le16_to_cpu
- (desc->wMaxPacketSize)) {
+ || ep->maxpacket < usb_endpoint_maxp(desc)) {
DBG("%s, bad ep or descriptor\n", __func__);
return -EINVAL;
}
- maxp = le16_to_cpu (desc->wMaxPacketSize);
+ maxp = usb_endpoint_maxp(desc);
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
&& maxp != ep->maxpacket)
- || le16_to_cpu(desc->wMaxPacketSize) > ep->maxpacket
+ || usb_endpoint_maxp(desc) > ep->maxpacket
|| !desc->wMaxPacketSize) {
DBG("%s, bad %s maxpacket\n", __func__, _ep->name);
return -ERANGE;
@@ -2968,7 +2958,7 @@ known:
}
#ifdef USE_ISO
status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
- IRQF_DISABLED, "omap_udc iso", udc);
+ 0, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index f96615ab6b7..550d6dcdf10 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -4,15 +4,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -947,7 +938,7 @@ static void pch_udc_ep_enable(struct pch_udc_ep *ep,
else
buff_size = UDC_EPOUT_BUFF_SIZE;
pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
- pch_udc_ep_set_maxpkt(ep, le16_to_cpu(desc->wMaxPacketSize));
+ pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
pch_udc_ep_set_nak(ep);
pch_udc_ep_fifo_flush(ep, ep->in);
/* Configure the endpoint */
@@ -957,7 +948,7 @@ static void pch_udc_ep_enable(struct pch_udc_ep *ep,
(cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
(cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
(cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
- le16_to_cpu(desc->wMaxPacketSize) << UDC_CSR_NE_MAX_PKT_SHIFT;
+ usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
if (ep->in)
pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
@@ -1466,7 +1457,7 @@ static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
ep->desc = desc;
ep->halted = 0;
pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
spin_unlock_irqrestore(&dev->lock, iflags);
return 0;
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index a341dde6f9c..65a8834f274 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
@@ -971,23 +962,15 @@ printer_set_config(struct printer_dev *dev, unsigned number)
usb_gadget_vbus_draw(dev->gadget,
dev->gadget->is_otg ? 8 : 100);
} else {
- char *speed;
unsigned power;
power = 2 * config_desc.bMaxPower;
usb_gadget_vbus_draw(dev->gadget, power);
- switch (gadget->speed) {
- case USB_SPEED_FULL: speed = "full"; break;
-#ifdef CONFIG_USB_GADGET_DUALSPEED
- case USB_SPEED_HIGH: speed = "high"; break;
-#endif
- default: speed = "?"; break;
- }
-
dev->config = number;
- INFO(dev, "%s speed config #%d: %d mA, %s\n",
- speed, number, power, driver_desc);
+ INFO(dev, "%s config #%d: %d mA, %s\n",
+ usb_speed_string(gadget->speed),
+ number, power, driver_desc);
}
return result;
}
@@ -1611,7 +1594,7 @@ cleanup(void)
if (status)
ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
- unregister_chrdev_region(g_printer_devno, 2);
+ unregister_chrdev_region(g_printer_devno, 1);
class_destroy(usb_gadget_class);
mutex_unlock(&usb_printer_gadget.lock_printer_io);
}
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index e4e59b4de25..c090a7e3ecf 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -11,16 +11,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/* #define VERBOSE_DEBUG */
@@ -232,8 +222,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
- || ep->fifo_size < le16_to_cpu
- (desc->wMaxPacketSize)) {
+ || ep->fifo_size < usb_endpoint_maxp (desc)) {
DMSG("%s, bad ep or descriptor\n", __func__);
return -EINVAL;
}
@@ -248,7 +237,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
/* hardware _could_ do smaller, but driver doesn't */
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
- && le16_to_cpu (desc->wMaxPacketSize)
+ && usb_endpoint_maxp (desc)
!= BULK_FIFO_SIZE)
|| !desc->wMaxPacketSize) {
DMSG("%s, bad %s maxpacket\n", __func__, _ep->name);
@@ -264,7 +253,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
ep->desc = desc;
ep->stopped = 0;
ep->pio_irqs = 0;
- ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
+ ep->ep.maxpacket = usb_endpoint_maxp (desc);
/* flush fifo (mostly for OUT buffers) */
pxa25x_ep_fifo_flush (_ep);
@@ -401,7 +390,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
unsigned max;
- max = le16_to_cpu(ep->desc->wMaxPacketSize);
+ max = usb_endpoint_maxp(ep->desc);
do {
unsigned count;
int is_last, is_short;
@@ -671,8 +660,7 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
* we can report per-packet status. that also helps with dma.
*/
if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
- && req->req.length > le16_to_cpu
- (ep->desc->wMaxPacketSize)))
+ && req->req.length > usb_endpoint_maxp (ep->desc)))
return -EMSGSIZE;
DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
@@ -1105,7 +1093,7 @@ udc_seq_show(struct seq_file *m, void *_d)
tmp = *dev->ep [i].reg_udccs;
seq_printf(m,
"%s max %d %s udccs %02x irqs %lu\n",
- ep->ep.name, le16_to_cpu(desc->wMaxPacketSize),
+ ep->ep.name, usb_endpoint_maxp(desc),
"pio", tmp, ep->pio_irqs);
/* TODO translate all five groups of udccs bits! */
@@ -2202,7 +2190,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
/* irq setup after old hardware state is cleaned up */
retval = request_irq(irq, pxa25x_udc_irq,
- IRQF_DISABLED, driver_name, dev);
+ 0, driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %d, err %d\n",
driver_name, irq, retval);
@@ -2214,7 +2202,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
if (machine_is_lubbock()) {
retval = request_irq(LUBBOCK_USB_DISC_IRQ,
lubbock_vbus_irq,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
+ IRQF_SAMPLE_RANDOM,
driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
@@ -2223,7 +2211,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
}
retval = request_irq(LUBBOCK_USB_IRQ,
lubbock_vbus_irq,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
+ IRQF_SAMPLE_RANDOM,
driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
diff --git a/drivers/usb/gadget/pxa25x_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index f572c561746..8eaf4e43726 100644
--- a/drivers/usb/gadget/pxa25x_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_GADGET_PXA25X_H
@@ -161,8 +152,6 @@ static struct pxa25x_udc *the_controller;
#ifdef DEBUG
-static int is_vbus_present(void);
-
static const char *state_name[] = {
"EP0_IDLE",
"EP0_IN_DATA_PHASE", "EP0_OUT_DATA_PHASE",
@@ -214,8 +203,7 @@ dump_state(struct pxa25x_udc *dev)
u32 tmp;
unsigned i;
- DMSG("%s %s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
- is_vbus_present() ? "host " : "disconnected",
+ DMSG("%s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
state_name[dev->ep0state],
UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
dump_udccr("udccr");
@@ -232,9 +220,6 @@ dump_state(struct pxa25x_udc *dev)
} else
DMSG("ep0 driver '%s'\n", dev->driver->driver.name);
- if (!is_vbus_present())
- return;
-
dump_udccs0 ("udccs0");
DMSG("ep0 IN %lu/%lu, OUT %lu/%lu\n",
dev->stats.write.bytes, dev->stats.write.ops,
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 85b68c75dc9..18b6b091f2a 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -8,16 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -1439,7 +1429,7 @@ static int pxa_ep_enable(struct usb_ep *_ep,
return -EINVAL;
}
- if (ep->fifo_size < le16_to_cpu(desc->wMaxPacketSize)) {
+ if (ep->fifo_size < usb_endpoint_maxp(desc)) {
ep_err(ep, "bad maxpacket\n");
return -ERANGE;
}
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index b01696eab06..7f4e8f424e8 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_USB_GADGET_PXA27X_H
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 50991e5bd5e..68a826a1b86 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
@@ -28,13 +18,14 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "r8a66597-udc.h"
-#define DRIVER_VERSION "2009-08-18"
+#define DRIVER_VERSION "2011-09-26"
static const char udc_name[] = "r8a66597_udc";
static const char *r8a66597_ep_name[] = {
@@ -115,13 +106,15 @@ static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
u16 pid = 0;
unsigned long offset;
- if (pipenum == 0)
+ if (pipenum == 0) {
pid = r8a66597_read(r8a66597, DCPCTR) & PID;
- else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
pid = r8a66597_read(r8a66597, offset) & PID;
- } else
- printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
return pid;
}
@@ -131,13 +124,15 @@ static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
{
unsigned long offset;
- if (pipenum == 0)
+ if (pipenum == 0) {
r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
- else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
r8a66597_mdfy(r8a66597, pid, PID, offset);
- } else
- printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
}
static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
@@ -160,13 +155,15 @@ static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
u16 ret = 0;
unsigned long offset;
- if (pipenum == 0)
+ if (pipenum == 0) {
ret = r8a66597_read(r8a66597, DCPCTR);
- else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
ret = r8a66597_read(r8a66597, offset);
- } else
- printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
return ret;
}
@@ -177,13 +174,63 @@ static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
pipe_stop(r8a66597, pipenum);
- if (pipenum == 0)
+ if (pipenum == 0) {
r8a66597_bset(r8a66597, SQCLR, DCPCTR);
- else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
offset = get_pipectr_addr(pipenum);
r8a66597_bset(r8a66597, SQCLR, offset);
- } else
- printk(KERN_ERR "unexpect pipe num(%d)\n", pipenum);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
+}
+
+static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ unsigned long offset;
+
+ pipe_stop(r8a66597, pipenum);
+
+ if (pipenum == 0) {
+ r8a66597_bset(r8a66597, SQSET, DCPCTR);
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ r8a66597_bset(r8a66597, SQSET, offset);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "unexpect pipe num(%d)\n", pipenum);
+ }
+}
+
+static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ unsigned long offset;
+
+ if (pipenum == 0) {
+ return r8a66597_read(r8a66597, DCPCTR) & SQMON;
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ return r8a66597_read(r8a66597, offset) & SQMON;
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "unexpect pipe num(%d)\n", pipenum);
+ }
+
+ return 0;
+}
+
+static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ return control_reg_sqmon(r8a66597, pipenum);
+}
+
+static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 toggle)
+{
+ if (toggle)
+ control_reg_sqset(r8a66597, pipenum);
+ else
+ control_reg_sqclr(r8a66597, pipenum);
}
static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
@@ -222,18 +269,51 @@ static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
return MBW_16;
}
+static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 isel, u16 fifosel)
+{
+ u16 tmp, mask, loop;
+ int i = 0;
+
+ if (!pipenum) {
+ mask = ISEL | CURPIPE;
+ loop = isel;
+ } else {
+ mask = CURPIPE;
+ loop = pipenum;
+ }
+ r8a66597_mdfy(r8a66597, loop, mask, fifosel);
+
+ do {
+ tmp = r8a66597_read(r8a66597, fifosel);
+ if (i++ > 1000000) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "r8a66597: register%x, loop %x "
+ "is timeout\n", fifosel, loop);
+ break;
+ }
+ ndelay(1);
+ } while ((tmp & mask) != loop);
+}
+
static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
{
struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
if (ep->use_dma)
- return;
+ r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
ndelay(450);
- r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
+ if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
+ r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
+ else
+ r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
+
+ if (ep->use_dma)
+ r8a66597_bset(r8a66597, DREQE, ep->fifosel);
}
static int pipe_buffer_setting(struct r8a66597 *r8a66597,
@@ -297,17 +377,18 @@ static void pipe_buffer_release(struct r8a66597 *r8a66597,
if (info->pipe == 0)
return;
- if (is_bulk_pipe(info->pipe))
+ if (is_bulk_pipe(info->pipe)) {
r8a66597->bulk--;
- else if (is_interrupt_pipe(info->pipe))
+ } else if (is_interrupt_pipe(info->pipe)) {
r8a66597->interrupt--;
- else if (is_isoc_pipe(info->pipe)) {
+ } else if (is_isoc_pipe(info->pipe)) {
r8a66597->isochronous--;
if (info->type == R8A66597_BULK)
r8a66597->bulk--;
- } else
- printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
- info->pipe);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "ep_release: unexpect pipenum (%d)\n", info->pipe);
+ }
}
static void pipe_initialize(struct r8a66597_ep *ep)
@@ -337,11 +418,17 @@ static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
ep->fifoaddr = CFIFO;
ep->fifosel = CFIFOSEL;
ep->fifoctr = CFIFOCTR;
- ep->fifotrn = 0;
ep->pipectr = get_pipectr_addr(pipenum);
+ if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
+ ep->pipetre = get_pipetre_addr(pipenum);
+ ep->pipetrn = get_pipetrn_addr(pipenum);
+ } else {
+ ep->pipetre = 0;
+ ep->pipetrn = 0;
+ }
ep->pipenum = pipenum;
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
r8a66597->pipenum2ep[pipenum] = ep;
r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
= ep;
@@ -381,7 +468,8 @@ static int alloc_pipe_config(struct r8a66597_ep *ep,
case USB_ENDPOINT_XFER_BULK:
if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
- printk(KERN_ERR "bulk pipe is insufficient\n");
+ dev_err(r8a66597_to_dev(r8a66597),
+ "bulk pipe is insufficient\n");
return -ENODEV;
} else {
info.pipe = R8A66597_BASE_PIPENUM_ISOC
@@ -397,7 +485,8 @@ static int alloc_pipe_config(struct r8a66597_ep *ep,
break;
case USB_ENDPOINT_XFER_INT:
if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
- printk(KERN_ERR "interrupt pipe is insufficient\n");
+ dev_err(r8a66597_to_dev(r8a66597),
+ "interrupt pipe is insufficient\n");
return -ENODEV;
}
info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
@@ -406,7 +495,8 @@ static int alloc_pipe_config(struct r8a66597_ep *ep,
break;
case USB_ENDPOINT_XFER_ISOC:
if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
- printk(KERN_ERR "isochronous pipe is insufficient\n");
+ dev_err(r8a66597_to_dev(r8a66597),
+ "isochronous pipe is insufficient\n");
return -ENODEV;
}
info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
@@ -414,13 +504,13 @@ static int alloc_pipe_config(struct r8a66597_ep *ep,
counter = &r8a66597->isochronous;
break;
default:
- printk(KERN_ERR "unexpect xfer type\n");
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
return -EINVAL;
}
ep->type = info.type;
info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
- info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ info.maxpacket = usb_endpoint_maxp(desc);
info.interval = desc->bInterval;
if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
info.dir_in = 1;
@@ -429,7 +519,8 @@ static int alloc_pipe_config(struct r8a66597_ep *ep,
ret = pipe_buffer_setting(r8a66597, &info);
if (ret < 0) {
- printk(KERN_ERR "pipe_buffer_setting fail\n");
+ dev_err(r8a66597_to_dev(r8a66597),
+ "pipe_buffer_setting fail\n");
return ret;
}
@@ -495,6 +586,124 @@ static void start_ep0_write(struct r8a66597_ep *ep,
}
}
+static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 fifosel)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
+ if (tmp == pipenum)
+ r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
+}
+
+static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
+ int enable)
+{
+ struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
+ u16 tmp, toggle;
+
+ /* check current BFRE bit */
+ r8a66597_write(r8a66597, pipenum, PIPESEL);
+ tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
+ if ((enable && tmp) || (!enable && !tmp))
+ return;
+
+ /* change BFRE bit */
+ pipe_stop(r8a66597, pipenum);
+ disable_fifosel(r8a66597, pipenum, CFIFOSEL);
+ disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
+ disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
+
+ toggle = save_usb_toggle(r8a66597, pipenum);
+
+ r8a66597_write(r8a66597, pipenum, PIPESEL);
+ if (enable)
+ r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
+ else
+ r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
+
+ /* initialize for internal BFRE flag */
+ r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
+ r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
+
+ restore_usb_toggle(r8a66597, pipenum, toggle);
+}
+
+static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
+ struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ struct r8a66597_dma *dma;
+
+ if (!r8a66597_is_sudmac(r8a66597))
+ return -ENODEV;
+
+ /* Check transfer type */
+ if (!is_bulk_pipe(ep->pipenum))
+ return -EIO;
+
+ if (r8a66597->dma.used)
+ return -EBUSY;
+
+ /* set SUDMAC parameters */
+ dma = &r8a66597->dma;
+ dma->used = 1;
+ if (ep->desc->bEndpointAddress & USB_DIR_IN) {
+ dma->dir = 1;
+ } else {
+ dma->dir = 0;
+ change_bfre_mode(r8a66597, ep->pipenum, 1);
+ }
+
+ /* set r8a66597_ep paramters */
+ ep->use_dma = 1;
+ ep->dma = dma;
+ ep->fifoaddr = D0FIFO;
+ ep->fifosel = D0FIFOSEL;
+ ep->fifoctr = D0FIFOCTR;
+
+ /* dma mapping */
+ req->req.dma = dma_map_single(r8a66597_to_dev(ep->r8a66597),
+ req->req.buf, req->req.length,
+ dma->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static void sudmac_free_channel(struct r8a66597 *r8a66597,
+ struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ if (!r8a66597_is_sudmac(r8a66597))
+ return;
+
+ dma_unmap_single(r8a66597_to_dev(ep->r8a66597),
+ req->req.dma, req->req.length,
+ ep->dma->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
+ r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
+
+ ep->dma->used = 0;
+ ep->use_dma = 0;
+ ep->fifoaddr = CFIFO;
+ ep->fifosel = CFIFOSEL;
+ ep->fifoctr = CFIFOCTR;
+}
+
+static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ BUG_ON(req->req.length == 0);
+
+ r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
+ r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
+ r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
+ r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
+
+ r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
+}
+
static void start_packet_write(struct r8a66597_ep *ep,
struct r8a66597_request *req)
{
@@ -505,11 +714,29 @@ static void start_packet_write(struct r8a66597_ep *ep,
disable_irq_empty(r8a66597, ep->pipenum);
pipe_start(r8a66597, ep->pipenum);
- tmp = r8a66597_read(r8a66597, ep->fifoctr);
- if (unlikely((tmp & FRDY) == 0))
- pipe_irq_enable(r8a66597, ep->pipenum);
- else
- irq_packet_write(ep, req);
+ if (req->req.length == 0) {
+ transfer_complete(ep, req, 0);
+ } else {
+ r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
+ if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
+ /* PIO mode */
+ pipe_change(r8a66597, ep->pipenum);
+ disable_irq_empty(r8a66597, ep->pipenum);
+ pipe_start(r8a66597, ep->pipenum);
+ tmp = r8a66597_read(r8a66597, ep->fifoctr);
+ if (unlikely((tmp & FRDY) == 0))
+ pipe_irq_enable(r8a66597, ep->pipenum);
+ else
+ irq_packet_write(ep, req);
+ } else {
+ /* DMA mode */
+ pipe_change(r8a66597, ep->pipenum);
+ disable_irq_nrdy(r8a66597, ep->pipenum);
+ pipe_start(r8a66597, ep->pipenum);
+ enable_irq_nrdy(r8a66597, ep->pipenum);
+ sudmac_start(r8a66597, ep, req);
+ }
+ }
}
static void start_packet_read(struct r8a66597_ep *ep,
@@ -524,17 +751,26 @@ static void start_packet_read(struct r8a66597_ep *ep,
pipe_start(r8a66597, pipenum);
pipe_irq_enable(r8a66597, pipenum);
} else {
- if (ep->use_dma) {
- r8a66597_bset(r8a66597, TRCLR, ep->fifosel);
- pipe_change(r8a66597, pipenum);
- r8a66597_bset(r8a66597, TRENB, ep->fifosel);
+ pipe_stop(r8a66597, pipenum);
+ if (ep->pipetre) {
+ enable_irq_nrdy(r8a66597, pipenum);
+ r8a66597_write(r8a66597, TRCLR, ep->pipetre);
r8a66597_write(r8a66597,
- (req->req.length + ep->ep.maxpacket - 1)
- / ep->ep.maxpacket,
- ep->fifotrn);
+ DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
+ ep->pipetrn);
+ r8a66597_bset(r8a66597, TRENB, ep->pipetre);
+ }
+
+ if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
+ /* PIO mode */
+ change_bfre_mode(r8a66597, ep->pipenum, 0);
+ pipe_start(r8a66597, pipenum); /* trigger once */
+ pipe_irq_enable(r8a66597, pipenum);
+ } else {
+ pipe_change(r8a66597, pipenum);
+ sudmac_start(r8a66597, ep, req);
+ pipe_start(r8a66597, pipenum); /* trigger once */
}
- pipe_start(r8a66597, pipenum); /* trigger once */
- pipe_irq_enable(r8a66597, pipenum);
}
}
@@ -564,7 +800,8 @@ static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
control_end(ep->r8a66597, 0);
break;
default:
- printk(KERN_ERR "start_ep0: unexpect ctsq(%x)\n", ctsq);
+ dev_err(r8a66597_to_dev(ep->r8a66597),
+ "start_ep0: unexpect ctsq(%x)\n", ctsq);
break;
}
}
@@ -690,6 +927,9 @@ __acquires(r8a66597->lock)
if (!list_empty(&ep->queue))
restart = 1;
+ if (ep->use_dma)
+ sudmac_free_channel(ep->r8a66597, ep, req);
+
spin_unlock(&ep->r8a66597->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&ep->r8a66597->lock);
@@ -718,7 +958,8 @@ static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
do {
tmp = r8a66597_read(r8a66597, ep->fifoctr);
if (i++ > 100000) {
- printk(KERN_ERR "pipe0 is busy. maybe cpu i/o bus"
+ dev_err(r8a66597_to_dev(r8a66597),
+ "pipe0 is busy. maybe cpu i/o bus "
"conflict. please power off this controller.");
return;
}
@@ -733,7 +974,7 @@ static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
/* write fifo */
if (req->req.buf) {
if (size > 0)
- r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
+ r8a66597_write_fifo(r8a66597, ep, buf, size);
if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
}
@@ -769,7 +1010,8 @@ static void irq_packet_write(struct r8a66597_ep *ep,
if (unlikely((tmp & FRDY) == 0)) {
pipe_stop(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
- printk(KERN_ERR "write fifo not ready. pipnum=%d\n", pipenum);
+ dev_err(r8a66597_to_dev(r8a66597),
+ "write fifo not ready. pipnum=%d\n", pipenum);
return;
}
@@ -780,7 +1022,7 @@ static void irq_packet_write(struct r8a66597_ep *ep,
/* write fifo */
if (req->req.buf) {
- r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
+ r8a66597_write_fifo(r8a66597, ep, buf, size);
if ((size == 0)
|| ((size % ep->ep.maxpacket) != 0)
|| ((bufsize != ep->ep.maxpacket)
@@ -819,7 +1061,7 @@ static void irq_packet_read(struct r8a66597_ep *ep,
req->req.status = -EPIPE;
pipe_stop(r8a66597, pipenum);
pipe_irq_disable(r8a66597, pipenum);
- printk(KERN_ERR "read fifo not ready");
+ dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
return;
}
@@ -1095,7 +1337,7 @@ static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
break;
default:
r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
- printk(KERN_ERR "USB speed unknown\n");
+ dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
}
}
@@ -1158,11 +1400,71 @@ __acquires(r8a66597->lock)
control_end(r8a66597, 0);
break;
default:
- printk(KERN_ERR "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
+ dev_err(r8a66597_to_dev(r8a66597),
+ "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
break;
}
}
+static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
+{
+ u16 pipenum;
+ struct r8a66597_request *req;
+ u32 len;
+ int i = 0;
+
+ pipenum = ep->pipenum;
+ pipe_change(r8a66597, pipenum);
+
+ while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
+ udelay(1);
+ if (unlikely(i++ >= 10000)) { /* timeout = 10 msec */
+ dev_err(r8a66597_to_dev(r8a66597),
+ "%s: FRDY was not set (%d)\n",
+ __func__, pipenum);
+ return;
+ }
+ }
+
+ r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
+ req = get_request_from_ep(ep);
+
+ /* prepare parameters */
+ len = r8a66597_sudmac_read(r8a66597, CH0CBC);
+ req->req.actual += len;
+
+ /* clear */
+ r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (len % ep->ep.maxpacket)) {
+ if (ep->dma->dir) {
+ disable_irq_ready(r8a66597, pipenum);
+ enable_irq_empty(r8a66597, pipenum);
+ } else {
+ /* Clear the interrupt flag for next transfer */
+ r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
+ transfer_complete(ep, req, 0);
+ }
+ }
+}
+
+static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
+{
+ u32 irqsts;
+ struct r8a66597_ep *ep;
+ u16 pipenum;
+
+ irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
+ if (irqsts & CH0ENDS) {
+ r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
+ pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
+ ep = r8a66597->pipenum2ep[pipenum];
+ sudmac_finish(r8a66597, ep);
+ }
+}
+
static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
{
struct r8a66597 *r8a66597 = _r8a66597;
@@ -1173,6 +1475,9 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
u16 savepipe;
u16 mask0;
+ if (r8a66597_is_sudmac(r8a66597))
+ r8a66597_sudmac_irq(r8a66597);
+
spin_lock(&r8a66597->lock);
intsts0 = r8a66597_read(r8a66597, INTSTS0);
@@ -1433,23 +1738,18 @@ static struct usb_ep_ops r8a66597_ep_ops = {
};
/*-------------------------------------------------------------------------*/
-static struct r8a66597 *the_controller;
-
-static int r8a66597_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int r8a66597_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct r8a66597 *r8a66597 = the_controller;
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
int retval;
if (!driver
|| driver->speed != USB_SPEED_HIGH
- || !bind
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
return -ENODEV;
- if (r8a66597->driver)
- return -EBUSY;
/* hook up the driver */
driver->driver.bus = NULL;
@@ -1458,14 +1758,8 @@ static int r8a66597_start(struct usb_gadget_driver *driver,
retval = device_add(&r8a66597->gadget.dev);
if (retval) {
- printk(KERN_ERR "device_add error (%d)\n", retval);
- goto error;
- }
-
- retval = bind(&r8a66597->gadget);
- if (retval) {
- printk(KERN_ERR "bind to driver error (%d)\n", retval);
- device_del(&r8a66597->gadget.dev);
+ dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
+ retval);
goto error;
}
@@ -1489,23 +1783,17 @@ error:
return retval;
}
-static int r8a66597_stop(struct usb_gadget_driver *driver)
+static int r8a66597_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct r8a66597 *r8a66597 = the_controller;
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
unsigned long flags;
- if (driver != r8a66597->driver || !driver->unbind)
- return -EINVAL;
-
spin_lock_irqsave(&r8a66597->lock, flags);
- if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
- r8a66597_usb_disconnect(r8a66597);
r8a66597_bclr(r8a66597, VBSE, INTENB0);
disable_controller(r8a66597);
spin_unlock_irqrestore(&r8a66597->lock, flags);
- driver->unbind(&r8a66597->gadget);
-
device_del(&r8a66597->gadget.dev);
r8a66597->driver = NULL;
return 0;
@@ -1535,8 +1823,8 @@ static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
static struct usb_gadget_ops r8a66597_gadget_ops = {
.get_frame = r8a66597_get_frame,
- .start = r8a66597_start,
- .stop = r8a66597_stop,
+ .udc_start = r8a66597_start,
+ .udc_stop = r8a66597_stop,
.pullup = r8a66597_pullup,
};
@@ -1547,6 +1835,8 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
usb_del_gadget_udc(&r8a66597->gadget);
del_timer_sync(&r8a66597->timer);
iounmap(r8a66597->reg);
+ if (r8a66597->pdata->sudmac)
+ iounmap(r8a66597->sudmac_reg);
free_irq(platform_get_irq(pdev, 0), r8a66597);
r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
#ifdef CONFIG_HAVE_CLK
@@ -1563,6 +1853,26 @@ static void nop_completion(struct usb_ep *ep, struct usb_request *r)
{
}
+static int __init r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
+ if (!res) {
+ dev_err(&pdev->dev, "platform_get_resource error(sudmac).\n");
+ return -ENODEV;
+ }
+
+ r8a66597->sudmac_reg = ioremap(res->start, resource_size(res));
+ if (r8a66597->sudmac_reg == NULL) {
+ dev_err(&pdev->dev, "ioremap error(sudmac).\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int __init r8a66597_probe(struct platform_device *pdev)
{
#ifdef CONFIG_HAVE_CLK
@@ -1579,7 +1889,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
- printk(KERN_ERR "platform_get_resource error.\n");
+ dev_err(&pdev->dev, "platform_get_resource error.\n");
goto clean_up;
}
@@ -1589,14 +1899,14 @@ static int __init r8a66597_probe(struct platform_device *pdev)
if (irq < 0) {
ret = -ENODEV;
- printk(KERN_ERR "platform_get_irq error.\n");
+ dev_err(&pdev->dev, "platform_get_irq error.\n");
goto clean_up;
}
reg = ioremap(res->start, resource_size(res));
if (reg == NULL) {
ret = -ENOMEM;
- printk(KERN_ERR "ioremap error.\n");
+ dev_err(&pdev->dev, "ioremap error.\n");
goto clean_up;
}
@@ -1604,7 +1914,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
if (r8a66597 == NULL) {
ret = -ENOMEM;
- printk(KERN_ERR "kzalloc error\n");
+ dev_err(&pdev->dev, "kzalloc error\n");
goto clean_up;
}
@@ -1640,13 +1950,18 @@ static int __init r8a66597_probe(struct platform_device *pdev)
clk_enable(r8a66597->clk);
}
#endif
+ if (r8a66597->pdata->sudmac) {
+ ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
+ if (ret < 0)
+ goto clean_up2;
+ }
disable_controller(r8a66597); /* make sure controller is disabled */
- ret = request_irq(irq, r8a66597_irq, IRQF_DISABLED | IRQF_SHARED,
+ ret = request_irq(irq, r8a66597_irq, IRQF_SHARED,
udc_name, r8a66597);
if (ret < 0) {
- printk(KERN_ERR "request_irq error (%d)\n", ret);
+ dev_err(&pdev->dev, "request_irq error (%d)\n", ret);
goto clean_up2;
}
@@ -1672,13 +1987,10 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->ep[0].fifoaddr = CFIFO;
r8a66597->ep[0].fifosel = CFIFOSEL;
r8a66597->ep[0].fifoctr = CFIFOCTR;
- r8a66597->ep[0].fifotrn = 0;
r8a66597->ep[0].pipectr = get_pipectr_addr(0);
r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
- the_controller = r8a66597;
-
r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
GFP_KERNEL);
if (r8a66597->ep0_req == NULL)
@@ -1705,6 +2017,8 @@ clean_up2:
#endif
clean_up:
if (r8a66597) {
+ if (r8a66597->sudmac_reg)
+ iounmap(r8a66597->sudmac_reg);
if (r8a66597->ep0_req)
r8a66597_free_request(&r8a66597->ep[0].ep,
r8a66597->ep0_req);
diff --git a/drivers/usb/gadget/r8a66597-udc.h b/drivers/usb/gadget/r8a66597-udc.h
index 503f766c23a..8e3de61cd4b 100644
--- a/drivers/usb/gadget/r8a66597-udc.h
+++ b/drivers/usb/gadget/r8a66597-udc.h
@@ -8,16 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __R8A66597_H__
@@ -53,6 +43,7 @@
((pipenum >= R8A66597_BASE_PIPENUM_ISOC) && \
(pipenum < (R8A66597_BASE_PIPENUM_ISOC + R8A66597_MAX_NUM_ISOC)))
+#define r8a66597_is_sudmac(r8a66597) (r8a66597->pdata->sudmac)
struct r8a66597_pipe_info {
u16 pipe;
u16 epnum;
@@ -70,6 +61,7 @@ struct r8a66597_request {
struct r8a66597_ep {
struct usb_ep ep;
struct r8a66597 *r8a66597;
+ struct r8a66597_dma *dma;
struct list_head queue;
unsigned busy:1;
@@ -85,13 +77,20 @@ struct r8a66597_ep {
unsigned char fifoaddr;
unsigned char fifosel;
unsigned char fifoctr;
- unsigned char fifotrn;
unsigned char pipectr;
+ unsigned char pipetre;
+ unsigned char pipetrn;
+};
+
+struct r8a66597_dma {
+ unsigned used:1;
+ unsigned dir:1; /* 1 = IN(write), 0 = OUT(read) */
};
struct r8a66597 {
spinlock_t lock;
void __iomem *reg;
+ void __iomem *sudmac_reg;
#ifdef CONFIG_HAVE_CLK
struct clk *clk;
@@ -104,6 +103,7 @@ struct r8a66597 {
struct r8a66597_ep ep[R8A66597_MAX_NUM_PIPE];
struct r8a66597_ep *pipenum2ep[R8A66597_MAX_NUM_PIPE];
struct r8a66597_ep *epaddr2ep[16];
+ struct r8a66597_dma dma;
struct timer_list timer;
struct usb_request *ep0_req; /* for internal request */
@@ -124,6 +124,7 @@ struct r8a66597 {
#define gadget_to_r8a66597(_gadget) \
container_of(_gadget, struct r8a66597, gadget)
#define r8a66597_to_gadget(r8a66597) (&r8a66597->gadget)
+#define r8a66597_to_dev(r8a66597) (r8a66597->gadget.dev.parent)
static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset)
{
@@ -182,12 +183,27 @@ static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
iowrite16(val, r8a66597->reg + offset);
}
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+ u16 val, u16 pat, unsigned long offset)
+{
+ u16 tmp;
+ tmp = r8a66597_read(r8a66597, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, val, 0, offset)
+
static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
- unsigned long offset,
+ struct r8a66597_ep *ep,
unsigned char *buf,
int len)
{
- void __iomem *fifoaddr = r8a66597->reg + offset;
+ void __iomem *fifoaddr = r8a66597->reg + ep->fifoaddr;
int adj = 0;
int i;
@@ -215,18 +231,12 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
adj = 0x01; /* 16-bit wide */
}
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bclr(r8a66597, MBW_16, ep->fifosel);
for (i = 0; i < len; i++)
iowrite8(buf[i], fifoaddr + adj - (i & adj));
-}
-
-static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
- u16 val, u16 pat, unsigned long offset)
-{
- u16 tmp;
- tmp = r8a66597_read(r8a66597, offset);
- tmp = tmp & (~pat);
- tmp = tmp | val;
- r8a66597_write(r8a66597, tmp, offset);
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bclr(r8a66597, MBW_16, ep->fifosel);
}
static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
@@ -251,12 +261,21 @@ static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
return clock;
}
-#define r8a66597_bclr(r8a66597, val, offset) \
- r8a66597_mdfy(r8a66597, 0, val, offset)
-#define r8a66597_bset(r8a66597, val, offset) \
- r8a66597_mdfy(r8a66597, val, 0, offset)
+static inline u32 r8a66597_sudmac_read(struct r8a66597 *r8a66597,
+ unsigned long offset)
+{
+ return ioread32(r8a66597->sudmac_reg + offset);
+}
+
+static inline void r8a66597_sudmac_write(struct r8a66597 *r8a66597, u32 val,
+ unsigned long offset)
+{
+ iowrite32(val, r8a66597->sudmac_reg + offset);
+}
#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
+#define get_pipetre_addr(pipenum) (PIPE1TRE + (pipenum - 1) * 4)
+#define get_pipetrn_addr(pipenum) (PIPE1TRN + (pipenum - 1) * 4)
#define enable_irq_ready(r8a66597, pipenum) \
enable_pipe_irq(r8a66597, pipenum, BRDYENB)
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 8bdee67ce09..a552453dc94 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -1951,30 +1951,26 @@ static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
case S3C_DSTS_EnumSpd_FS:
case S3C_DSTS_EnumSpd_FS48:
hsotg->gadget.speed = USB_SPEED_FULL;
- dev_info(hsotg->dev, "new device is full-speed\n");
-
ep0_mps = EP0_MPS_LIMIT;
ep_mps = 64;
break;
case S3C_DSTS_EnumSpd_HS:
- dev_info(hsotg->dev, "new device is high-speed\n");
hsotg->gadget.speed = USB_SPEED_HIGH;
-
ep0_mps = EP0_MPS_LIMIT;
ep_mps = 512;
break;
case S3C_DSTS_EnumSpd_LS:
hsotg->gadget.speed = USB_SPEED_LOW;
- dev_info(hsotg->dev, "new device is low-speed\n");
-
/* note, we don't actually support LS in this driver at the
* moment, and the documentation seems to imply that it isn't
* supported by the PHYs on some of the devices.
*/
break;
}
+ dev_info(hsotg->dev, "new device is %s\n",
+ usb_speed_string(hsotg->gadget.speed));
/* we should now know the maximum packet size for an
* endpoint, so set the endpoints to a default value. */
@@ -2297,7 +2293,7 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
return -EINVAL;
}
- mps = le16_to_cpu(desc->wMaxPacketSize);
+ mps = usb_endpoint_maxp(desc);
/* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 3fa717c5f4b..8d54f893cef 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
#include <linux/prefetch.h>
#include <mach/regs-s3c2443-clock.h>
@@ -137,6 +138,7 @@ struct s3c_hsudc {
struct usb_gadget_driver *driver;
struct device *dev;
struct s3c24xx_hsudc_platdata *pd;
+ struct otg_transceiver *transceiver;
spinlock_t lock;
void __iomem *regs;
struct resource *mem_rsrc;
@@ -759,11 +761,11 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
if (!_ep || !desc || hsep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| hsep->bEndpointAddress != desc->bEndpointAddress
- || ep_maxpacket(hsep) < le16_to_cpu(desc->wMaxPacketSize))
+ || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
return -EINVAL;
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
- && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(hsep))
+ && usb_endpoint_maxp(desc) != ep_maxpacket(hsep))
|| !desc->wMaxPacketSize)
return -ERANGE;
@@ -779,7 +781,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
hsep->stopped = hsep->wedge = 0;
hsep->desc = desc;
- hsep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ hsep->ep.maxpacket = usb_endpoint_maxp(desc);
s3c_hsudc_set_halt(_ep, 0);
__set_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
@@ -1171,6 +1173,22 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
return ret;
}
+ /* connect to bus through transceiver */
+ if (hsudc->transceiver) {
+ ret = otg_set_peripheral(hsudc->transceiver, &hsudc->gadget);
+ if (ret) {
+ dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
+ hsudc->gadget.name);
+ driver->unbind(&hsudc->gadget);
+
+ device_del(&hsudc->gadget.dev);
+
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ return ret;
+ }
+ }
+
enable_irq(hsudc->irq);
dev_info(hsudc->dev, "bound driver %s\n", driver->driver.name);
@@ -1201,6 +1219,9 @@ static int s3c_hsudc_stop(struct usb_gadget_driver *driver)
s3c_hsudc_stop_activity(hsudc, driver);
spin_unlock_irqrestore(&hsudc->lock, flags);
+ if (hsudc->transceiver)
+ (void) otg_set_peripheral(hsudc->transceiver, NULL);
+
driver->unbind(&hsudc->gadget);
device_del(&hsudc->gadget.dev);
disable_irq(hsudc->irq);
@@ -1220,10 +1241,24 @@ static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
return s3c_hsudc_read_frameno(to_hsudc(gadget));
}
+static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct s3c_hsudc *hsudc = the_controller;
+
+ if (!hsudc)
+ return -ENODEV;
+
+ if (hsudc->transceiver)
+ return otg_set_power(hsudc->transceiver, mA);
+
+ return -EOPNOTSUPP;
+}
+
static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
.get_frame = s3c_hsudc_gadget_getframe,
.start = s3c_hsudc_start,
.stop = s3c_hsudc_stop,
+ .vbus_draw = s3c_hsudc_vbus_draw,
};
static int s3c_hsudc_probe(struct platform_device *pdev)
@@ -1247,6 +1282,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
hsudc->dev = dev;
hsudc->pd = pdev->dev.platform_data;
+ hsudc->transceiver = otg_get_transceiver();
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "unable to obtain driver resource data\n");
@@ -1269,19 +1306,6 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
goto err_remap;
}
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "unable to obtain IRQ number\n");
- goto err_irq;
- }
- hsudc->irq = ret;
-
- ret = request_irq(hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc);
- if (ret < 0) {
- dev_err(dev, "irq request failed\n");
- goto err_irq;
- }
-
spin_lock_init(&hsudc->lock);
device_initialize(&hsudc->gadget.dev);
@@ -1299,6 +1323,19 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
s3c_hsudc_setup_ep(hsudc);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "unable to obtain IRQ number\n");
+ goto err_irq;
+ }
+ hsudc->irq = ret;
+
+ ret = request_irq(hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_irq;
+ }
+
hsudc->uclk = clk_get(&pdev->dev, "usb-device");
if (IS_ERR(hsudc->uclk)) {
dev_err(dev, "failed to find usb-device clock source\n");
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 8d31848aab0..b8643771fa8 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -10,16 +10,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -1082,7 +1072,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
+ max = usb_endpoint_maxp(desc) & 0x1fff;
local_irq_save (flags);
_ep->maxpacket = max & 0x7ff;
@@ -1903,7 +1893,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
/* irq setup after old hardware state is cleaned up */
retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
- IRQF_DISABLED, gadget_name, udc);
+ 0, gadget_name, udc);
if (retval != 0) {
dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
@@ -1927,7 +1917,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
}
retval = request_irq(irq, s3c2410_udc_vbus_irq,
- IRQF_DISABLED | IRQF_TRIGGER_RISING
+ IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING | IRQF_SHARED,
gadget_name, udc);
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
index 9e0bece4f24..a48f619cb1c 100644
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -9,16 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _S3C2410_UDC_H
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index d3dd227a2bf..c7f291a331d 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
@@ -52,6 +43,12 @@
* characters rather then a pointer to void.
*/
+/*
+ * When USB_GADGET_DEBUG_FILES is defined the module param num_buffers
+ * sets the number of pipeline buffers (length of the fsg_buffhd array).
+ * The valid range of num_buffers is: num >= 2 && num <= 4.
+ */
+
#include <linux/usb/storage.h>
#include <scsi/scsi.h>
@@ -247,6 +244,8 @@ struct fsg_lun {
u32 sense_data_info;
u32 unit_attention_data;
+ unsigned int blkbits; /* Bits of logical block size of bound block device */
+ unsigned int blksize; /* logical block size of bound block device */
struct device dev;
};
@@ -262,8 +261,31 @@ static struct fsg_lun *fsg_lun_from_dev(struct device *dev)
#define EP0_BUFSIZE 256
#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
-/* Number of buffers we will use. 2 is enough for double-buffering */
-#define FSG_NUM_BUFFERS 2
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO);
+MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers");
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_DEBUG */
+
+/* check if fsg_num_buffers is within a valid range */
+static inline int fsg_num_buffers_validate(void)
+{
+ if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
+ return 0;
+ pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
+ fsg_num_buffers, 2 ,4);
+ return -EINVAL;
+}
/* Default size of buffer length. */
#define FSG_BUFLEN ((u32)16384)
@@ -493,12 +515,128 @@ static struct usb_descriptor_header *fsg_hs_function[] = {
NULL,
};
+static struct usb_endpoint_descriptor
+fsg_ss_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = {
+ .bLength = sizeof(fsg_ss_bulk_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /*.bMaxBurst = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor
+fsg_ss_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
+ .bLength = sizeof(fsg_ss_bulk_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /*.bMaxBurst = DYNAMIC, */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_ss_intr_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(2),
+ .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_intr_in_comp_desc = {
+ .bLength = sizeof(fsg_ss_bulk_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ .wBytesPerInterval = cpu_to_le16(2),
+};
+
+#ifndef FSG_NO_OTG
+# define FSG_SS_FUNCTION_PRE_EP_ENTRIES 2
+#else
+# define FSG_SS_FUNCTION_PRE_EP_ENTRIES 1
+#endif
+
+#endif
+
+static __maybe_unused struct usb_ext_cap_descriptor fsg_ext_cap_desc = {
+ .bLength = USB_DT_USB_EXT_CAP_SIZE,
+ .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
+ .bDevCapabilityType = USB_CAP_TYPE_EXT,
+
+ .bmAttributes = cpu_to_le32(USB_LPM_SUPPORT),
+};
+
+static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
+ .bLength = USB_DT_USB_SS_CAP_SIZE,
+ .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
+ .bDevCapabilityType = USB_SS_CAP_TYPE,
+
+ /* .bmAttributes = LTM is not supported yet */
+
+ .wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION
+ | USB_FULL_SPEED_OPERATION
+ | USB_HIGH_SPEED_OPERATION
+ | USB_5GBPS_OPERATION),
+ .bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
+ .bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT,
+ .bU2DevExitLat = USB_DEFAULT_U2_DEV_EXIT_LAT,
+};
+
+static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
+ .bLength = USB_DT_BOS_SIZE,
+ .bDescriptorType = USB_DT_BOS,
+
+ .wTotalLength = USB_DT_BOS_SIZE
+ + USB_DT_USB_EXT_CAP_SIZE
+ + USB_DT_USB_SS_CAP_SIZE,
+
+ .bNumDeviceCaps = 2,
+};
+
+static struct usb_descriptor_header *fsg_ss_function[] = {
+#ifndef FSG_NO_OTG
+ (struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+ (struct usb_descriptor_header *) &fsg_intf_desc,
+ (struct usb_descriptor_header *) &fsg_ss_bulk_in_desc,
+ (struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc,
+ (struct usb_descriptor_header *) &fsg_ss_bulk_out_desc,
+ (struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc,
+#ifndef FSG_NO_INTR_EP
+ (struct usb_descriptor_header *) &fsg_ss_intr_in_desc,
+ (struct usb_descriptor_header *) &fsg_ss_intr_in_comp_desc,
+#endif
+ NULL,
+};
+
/* Maxpacket and other transfer characteristics vary by speed. */
static __maybe_unused struct usb_endpoint_descriptor *
fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
- struct usb_endpoint_descriptor *hs)
+ struct usb_endpoint_descriptor *hs,
+ struct usb_endpoint_descriptor *ss)
{
- if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return ss;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return hs;
return fs;
}
@@ -580,13 +718,24 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
rc = (int) size;
goto out;
}
- num_sectors = size >> 9; /* File size in 512-byte blocks */
+
+ if (curlun->cdrom) {
+ curlun->blksize = 2048;
+ curlun->blkbits = 11;
+ } else if (inode->i_bdev) {
+ curlun->blksize = bdev_logical_block_size(inode->i_bdev);
+ curlun->blkbits = blksize_bits(curlun->blksize);
+ } else {
+ curlun->blksize = 512;
+ curlun->blkbits = 9;
+ }
+
+ num_sectors = size >> curlun->blkbits; /* File size in logic-block-size blocks */
min_sectors = 1;
if (curlun->cdrom) {
- num_sectors &= ~3; /* Reduce to a multiple of 2048 */
- min_sectors = 300*4; /* Smallest track is 300 frames */
- if (num_sectors >= 256*60*75*4) {
- num_sectors = (256*60*75 - 1) * 4;
+ min_sectors = 300; /* Smallest track is 300 frames */
+ if (num_sectors >= 256*60*75) {
+ num_sectors = 256*60*75 - 1;
LINFO(curlun, "file too big: %s\n", filename);
LINFO(curlun, "using only first %d blocks\n",
(int) num_sectors);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index dfed4c1d96c..29c854bbca4 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index c966440ddd7..8012357e98a 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -9,15 +9,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __U_ETHER_H
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index a8aa46962d8..3a4a664bab4 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -552,9 +552,8 @@ recycle:
/* Push from tty to ldisc; without low_latency set this is handled by
* a workqueue, so we won't get callbacks and can hold port_lock
*/
- if (tty && do_push) {
+ if (tty && do_push)
tty_flip_buffer_push(tty);
- }
/* We want our data queue to become empty ASAP, keeping data
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 05ba4721436..022baeca7c9 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -356,7 +356,7 @@ static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
static ssize_t usb_udc_softconn_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct usb_udc *udc = dev_get_drvdata(dev);
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
if (sysfs_streq(buf, "connect")) {
usb_gadget_connect(udc->gadget);
@@ -375,23 +375,8 @@ static ssize_t usb_udc_speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- struct usb_gadget *gadget = udc->gadget;
-
- switch (gadget->speed) {
- case USB_SPEED_LOW:
- return snprintf(buf, PAGE_SIZE, "low-speed\n");
- case USB_SPEED_FULL:
- return snprintf(buf, PAGE_SIZE, "full-speed\n");
- case USB_SPEED_HIGH:
- return snprintf(buf, PAGE_SIZE, "high-speed\n");
- case USB_SPEED_WIRELESS:
- return snprintf(buf, PAGE_SIZE, "wireless\n");
- case USB_SPEED_SUPER:
- return snprintf(buf, PAGE_SIZE, "super-speed\n");
- case USB_SPEED_UNKNOWN: /* FALLTHROUGH */
- default:
- return snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
- }
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ usb_speed_string(udc->gadget->speed));
}
static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL);
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
index 5b7919460fd..bc78c606c12 100644
--- a/drivers/usb/gadget/uvc.h
+++ b/drivers/usb/gadget/uvc.h
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
#ifndef _UVC_GADGET_H_
@@ -56,6 +55,7 @@ struct uvc_event
#include <linux/usb.h> /* For usb_endpoint_* */
#include <linux/usb/gadget.h>
#include <linux/videodev2.h>
+#include <linux/version.h>
#include <media/v4l2-fh.h>
#include "uvc_queue.h"
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index aa0ad34e0f1..d776adb2da6 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 52f8f9e513a..f6e083b5019 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -16,7 +15,6 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
index b08f35438d7..b0e53a8ea4f 100644
--- a/drivers/usb/gadget/uvc_video.c
+++ b/drivers/usb/gadget/uvc_video.c
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
index df6882de50b..668fe128f2e 100644
--- a/drivers/usb/gadget/webcam.c
+++ b/drivers/usb/gadget/webcam.c
@@ -8,8 +8,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
*/
+
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/usb/video.h>
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 00e2fd2d479..20697cc132d 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -8,15 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ab085f12d57..060e0e2b1ae 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -19,7 +19,7 @@ config USB_C67X00_HCD
config USB_XHCI_HCD
tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
- depends on USB && PCI && EXPERIMENTAL
+ depends on USB && USB_ARCH_HAS_XHCI && EXPERIMENTAL
---help---
The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
"SuperSpeed" host controller hardware.
@@ -515,6 +515,19 @@ config USB_R8A66597_HCD
To compile this driver as a module, choose M here: the
module will be called r8a66597-hcd.
+config USB_RENESAS_USBHS_HCD
+ tristate "Renesas USBHS HCD support"
+ depends on USB
+ depends on USB_RENESAS_USBHS
+ help
+ The Renesas USBHS is a USB 2.0 host and peripheral controller.
+
+ Enable this option if your board has this chip, and you want
+ to use it as a host controller. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called renesas-usbhs.
+
config USB_WHCI_HCD
tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -544,11 +557,11 @@ config USB_HWA_HCD
will be called "hwa-hc".
config USB_IMX21_HCD
- tristate "iMX21 HCD support"
- depends on USB && ARM && MACH_MX21
+ tristate "i.MX21 HCD support"
+ depends on USB && ARM && ARCH_MXC
help
This driver enables support for the on-chip USB host in the
- iMX21 processor.
+ i.MX21 processor.
To compile this driver as a module, choose M here: the
module will be called "imx21-hcd".
@@ -578,3 +591,10 @@ config USB_OCTEON_OHCI
config USB_OCTEON2_COMMON
bool
default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_PXA168_EHCI
+ bool "Marvell PXA168 on-chip EHCI HCD support"
+ depends on USB_EHCI_HCD && ARCH_MMP
+ help
+ Enable support for Marvell PXA168 SoC's on-chip EHCI
+ host controller
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 624a362f2fe..ed48a5d79e1 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -11,8 +11,9 @@ fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
-xhci-hcd-y := xhci.o xhci-mem.o xhci-pci.o
+xhci-hcd-y := xhci.o xhci-mem.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
+xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
obj-$(CONFIG_USB_WHCI_HCD) += whci/
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index 4d2e88d04da..afb6743cf09 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -163,7 +163,7 @@ static int ehci_ath79_probe(struct platform_device *pdev)
goto err_release_region;
}
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto err_iounmap;
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 42ae5740990..65719e8d24e 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -181,7 +181,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
ehci->hcs_params = readl(&ehci->caps->hcs_params);
ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
+ IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
return ret;
@@ -293,7 +293,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
/* here we "know" root ports should always stay powered */
ehci_port_power(ehci, 1);
- hcd->state = HC_STATE_SUSPENDED;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 40a844c1dbb..d6d74d2e09f 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -697,6 +697,19 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
}
#undef DBG_SCHED_LIMIT
+static const char *rh_state_string(struct ehci_hcd *ehci)
+{
+ switch (ehci->rh_state) {
+ case EHCI_RH_HALTED:
+ return "halted";
+ case EHCI_RH_SUSPENDED:
+ return "suspended";
+ case EHCI_RH_RUNNING:
+ return "running";
+ }
+ return "?";
+}
+
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
@@ -730,11 +743,11 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
temp = scnprintf (next, size,
"bus %s, device %s\n"
"%s\n"
- "EHCI %x.%02x, hcd state %d\n",
+ "EHCI %x.%02x, rh state %s\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc,
- i >> 8, i & 0x0ff, hcd->state);
+ i >> 8, i & 0x0ff, rh_state_string(ehci));
size -= temp;
next += temp;
@@ -808,7 +821,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
temp = scnprintf (next, size, "uframe %04x\n",
- ehci_readl(ehci, &ehci->regs->frame_index));
+ ehci_read_frame_index(ehci));
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 34a3140d1e5..e90344a1763 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -134,7 +134,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
/* Don't need to set host mode here. It will be done by tdi_reset() */
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err4;
@@ -392,7 +392,7 @@ static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
dev_dbg(dev, "suspending...\n");
- hcd->state = HC_STATE_SUSPENDED;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
dev->power.power_state = PMSG_SUSPEND;
/* ignore non-host interrupts */
@@ -481,7 +481,7 @@ static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- hcd->state = HC_STATE_RUNNING;
+ ehci->rh_state = EHCI_RH_RUNNING;
dev->power.power_state = PMSG_ON;
tmp = ehci_readl(ehci, &ehci->regs->command);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index f72ae0b6ee7..59e81615e09 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -95,7 +95,7 @@ static const char hcd_name [] = "ehci_hcd";
#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
#define EHCI_SHRINK_JIFFIES (DIV_ROUND_UP(HZ, 200) + 1)
- /* 200-ms async qh unlink delay */
+ /* 5-ms async qh unlink delay */
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh = 0; // 0 to 6
@@ -238,7 +238,7 @@ static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
error = handshake(ehci, ptr, mask, done, usec);
if (error) {
ehci_halt(ehci);
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
+ ehci->rh_state = EHCI_RH_HALTED;
ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
ptr, mask, done, error);
}
@@ -278,7 +278,7 @@ static int ehci_reset (struct ehci_hcd *ehci)
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
+ ehci->rh_state = EHCI_RH_HALTED;
ehci->next_statechange = jiffies;
retval = handshake (ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
@@ -307,7 +307,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
u32 temp;
#ifdef DEBUG
- if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
+ if (ehci->rh_state != EHCI_RH_RUNNING)
BUG ();
#endif
@@ -356,7 +356,7 @@ static void ehci_iaa_watchdog(unsigned long param)
*/
if (ehci->reclaim
&& !timer_pending(&ehci->iaa_watchdog)
- && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
+ && ehci->rh_state == EHCI_RH_RUNNING) {
u32 cmd, status;
/* If we get here, IAA is *REALLY* late. It's barely
@@ -496,7 +496,7 @@ static void ehci_work (struct ehci_hcd *ehci)
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
+ if (ehci->rh_state == EHCI_RH_RUNNING &&
(ehci->async->qh_next.ptr != NULL ||
ehci->periodic_sched != 0))
timer_action (ehci, TIMER_IO_WATCHDOG);
@@ -516,7 +516,7 @@ static void ehci_stop (struct usb_hcd *hcd)
del_timer_sync(&ehci->iaa_watchdog);
spin_lock_irq(&ehci->lock);
- if (HC_IS_RUNNING (hcd->state))
+ if (ehci->rh_state == EHCI_RH_RUNNING)
ehci_quiesce (ehci);
ehci_silence_controller(ehci);
@@ -741,7 +741,7 @@ static int ehci_run (struct usb_hcd *hcd)
* be started before the port switching actions could complete.
*/
down_write(&ehci_cf_port_reset_rwsem);
- hcd->state = HC_STATE_RUNNING;
+ ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
@@ -768,6 +768,35 @@ static int ehci_run (struct usb_hcd *hcd)
return 0;
}
+static int __maybe_unused ehci_setup (struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->regs = (void __iomem *)ehci->caps +
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci->sbrn = HCD_USB2;
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci_reset(ehci);
+
+ return 0;
+}
+
/*-------------------------------------------------------------------------*/
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
@@ -788,7 +817,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* Shared IRQ? */
masked_status = status & INTR_MASK;
- if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) {
+ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
spin_unlock(&ehci->lock);
return IRQ_NONE;
}
@@ -952,7 +981,7 @@ static int ehci_urb_enqueue (
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* failfast */
- if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
+ if (ehci->rh_state != EHCI_RH_RUNNING && ehci->reclaim)
end_unlink_async(ehci);
/* If the QH isn't linked then there's nothing we can do
@@ -1079,7 +1108,7 @@ rescan:
goto idle_timeout;
}
- if (!HC_IS_RUNNING (hcd->state))
+ if (ehci->rh_state != EHCI_RH_RUNNING)
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
@@ -1166,8 +1195,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
- ehci->periodic_size;
+ return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
}
/*-------------------------------------------------------------------------*/
@@ -1291,6 +1319,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_grlib_driver
#endif
+#ifdef CONFIG_USB_PXA168_EHCI
+#include "ehci-pxa168.c"
+#define PLATFORM_DRIVER ehci_pxa168_driver
+#endif
+
+#ifdef CONFIG_NLM_XLR
+#include "ehci-xls.c"
+#define PLATFORM_DRIVER ehci_xls_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4c32cb19b40..77bbb2357e4 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -236,10 +236,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
/* stop schedules, clean any completed work */
- if (HC_IS_RUNNING(hcd->state)) {
+ if (ehci->rh_state == EHCI_RH_RUNNING)
ehci_quiesce (ehci);
- hcd->state = HC_STATE_QUIESCING;
- }
ehci->command = ehci_readl(ehci, &ehci->regs->command);
ehci_work(ehci);
@@ -313,7 +311,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
/* turn off now-idle HC */
ehci_halt (ehci);
- hcd->state = HC_STATE_SUSPENDED;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
if (ehci->reclaim)
end_unlink_async(ehci);
@@ -382,6 +380,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
/* restore CMD_RUN, framelist size, and irq threshold */
ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci->rh_state = EHCI_RH_RUNNING;
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
@@ -451,7 +450,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
}
ehci->next_statechange = jiffies + msecs_to_jiffies(5);
- hcd->state = HC_STATE_RUNNING;
/* Now we can safely re-enable irqs */
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
@@ -563,7 +561,7 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
u32 ppcd = 0;
/* if !USB_SUSPEND, root hub timers won't get shut down ... */
- if (!HC_IS_RUNNING(hcd->state))
+ if (ehci->rh_state != EHCI_RH_RUNNING)
return 0;
/* init status to no-changes */
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 555a73c864b..55978fcfa4b 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -236,7 +236,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
priv->hcd = hcd;
platform_set_drvdata(pdev, priv);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto err_add;
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index c3ba3ed5f3a..ba1f5136113 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -155,7 +155,7 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
goto err3;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 45240321ca0..e39b0297bad 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -228,7 +228,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 395bdb0248d..a68a2a5c4b8 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -277,7 +277,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
printk(KERN_WARNING "Orion ehci -USB phy version isn't supported.\n");
}
- err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err4;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 1102ce65a3a..f4b627d343a 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -224,6 +224,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
pci_dev_put(p_smbus);
}
break;
+ case PCI_VENDOR_ID_NETMOS:
+ /* MosChip frame-index-register bug */
+ ehci_info(ehci, "applying MosChip frame-index workaround\n");
+ ehci->frame_index_bug = 1;
+ break;
}
/* optional debug port, normally in the first BAR */
@@ -439,7 +444,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
/* here we "know" root ports should always stay powered */
ehci_port_power(ehci, 1);
- hcd->state = HC_STATE_SUSPENDED;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
return 0;
}
#endif
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 64626a777d6..2dc32da75cf 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -167,7 +167,7 @@ static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev)
ps3_system_bus_set_drvdata(dev, hcd);
- result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
+ result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
diff --git a/drivers/usb/host/ehci-pxa168.c b/drivers/usb/host/ehci-pxa168.c
new file mode 100644
index 00000000000..ac0c16e8f53
--- /dev/null
+++ b/drivers/usb/host/ehci-pxa168.c
@@ -0,0 +1,363 @@
+/*
+ * drivers/usb/host/ehci-pxa168.c
+ *
+ * Tanmay Upadhyay <tanmay.upadhyay@einfochips.com>
+ *
+ * Based on drivers/usb/host/ehci-orion.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <mach/pxa168.h>
+
+#define USB_PHY_CTRL_REG 0x4
+#define USB_PHY_PLL_REG 0x8
+#define USB_PHY_TX_REG 0xc
+
+#define FBDIV_SHIFT 4
+
+#define ICP_SHIFT 12
+#define ICP_15 2
+#define ICP_20 3
+#define ICP_25 4
+
+#define KVCO_SHIFT 15
+
+#define PLLCALI12_SHIFT 25
+#define CALI12_VDD 0
+#define CALI12_09 1
+#define CALI12_10 2
+#define CALI12_11 3
+
+#define PLLVDD12_SHIFT 27
+#define VDD12_VDD 0
+#define VDD12_10 1
+#define VDD12_11 2
+#define VDD12_12 3
+
+#define PLLVDD18_SHIFT 29
+#define VDD18_19 0
+#define VDD18_20 1
+#define VDD18_21 2
+#define VDD18_22 3
+
+
+#define PLL_READY (1 << 23)
+#define VCOCAL_START (1 << 21)
+#define REG_RCAL_START (1 << 12)
+
+struct pxa168_usb_drv_data {
+ struct ehci_hcd ehci;
+ struct clk *pxa168_usb_clk;
+ struct resource *usb_phy_res;
+ void __iomem *usb_phy_reg_base;
+};
+
+static int ehci_pxa168_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci_reset(ehci);
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /*
+ * data structure init
+ */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ hcd->has_tt = 1;
+
+ ehci_port_power(ehci, 0);
+
+ return retval;
+}
+
+static const struct hc_driver ehci_pxa168_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Marvell PXA168 EHCI",
+ .hcd_priv_size = sizeof(struct pxa168_usb_drv_data),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_pxa168_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int pxa168_usb_phy_init(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *usb_phy_reg_base;
+ struct pxa168_usb_pdata *pdata;
+ struct pxa168_usb_drv_data *drv_data;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ unsigned long reg_val;
+ int pll_retry_cont = 10000, err = 0;
+
+ drv_data = (struct pxa168_usb_drv_data *)hcd->hcd_priv;
+ pdata = (struct pxa168_usb_pdata *)pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no PHY register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ ehci_pxa168_hc_driver.description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ return -EBUSY;
+ }
+
+ usb_phy_reg_base = ioremap(res->start, resource_size(res));
+ if (usb_phy_reg_base == NULL) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ err = -EFAULT;
+ goto err1;
+ }
+ drv_data->usb_phy_reg_base = usb_phy_reg_base;
+ drv_data->usb_phy_res = res;
+
+ /* If someone wants to init USB phy in board specific way */
+ if (pdata && pdata->phy_init)
+ return pdata->phy_init(usb_phy_reg_base);
+
+ /* Power up the PHY and PLL */
+ writel(readl(usb_phy_reg_base + USB_PHY_CTRL_REG) | 0x3,
+ usb_phy_reg_base + USB_PHY_CTRL_REG);
+
+ /* Configure PHY PLL */
+ reg_val = readl(usb_phy_reg_base + USB_PHY_PLL_REG) & ~(0x7e03ffff);
+ reg_val |= (VDD18_22 << PLLVDD18_SHIFT | VDD12_12 << PLLVDD12_SHIFT |
+ CALI12_11 << PLLCALI12_SHIFT | 3 << KVCO_SHIFT |
+ ICP_15 << ICP_SHIFT | 0xee << FBDIV_SHIFT | 0xb);
+ writel(reg_val, usb_phy_reg_base + USB_PHY_PLL_REG);
+
+ /* Make sure PHY PLL is ready */
+ while (!(readl(usb_phy_reg_base + USB_PHY_PLL_REG) & PLL_READY)) {
+ if (!(pll_retry_cont--)) {
+ dev_dbg(&pdev->dev, "USB PHY PLL not ready\n");
+ err = -EIO;
+ goto err2;
+ }
+ }
+
+ /* Toggle VCOCAL_START bit of U2PLL for PLL calibration */
+ udelay(200);
+ writel(readl(usb_phy_reg_base + USB_PHY_PLL_REG) | VCOCAL_START,
+ usb_phy_reg_base + USB_PHY_PLL_REG);
+ udelay(40);
+ writel(readl(usb_phy_reg_base + USB_PHY_PLL_REG) & ~VCOCAL_START,
+ usb_phy_reg_base + USB_PHY_PLL_REG);
+
+ /* Toggle REG_RCAL_START bit of U2PTX for impedance calibration */
+ udelay(400);
+ writel(readl(usb_phy_reg_base + USB_PHY_TX_REG) | REG_RCAL_START,
+ usb_phy_reg_base + USB_PHY_TX_REG);
+ udelay(40);
+ writel(readl(usb_phy_reg_base + USB_PHY_TX_REG) & ~REG_RCAL_START,
+ usb_phy_reg_base + USB_PHY_TX_REG);
+
+ /* Make sure PHY PLL is ready again */
+ pll_retry_cont = 0;
+ while (!(readl(usb_phy_reg_base + USB_PHY_PLL_REG) & PLL_READY)) {
+ if (!(pll_retry_cont--)) {
+ dev_dbg(&pdev->dev, "USB PHY PLL not ready\n");
+ err = -EIO;
+ goto err2;
+ }
+ }
+
+ return 0;
+err2:
+ iounmap(usb_phy_reg_base);
+err1:
+ release_mem_region(res->start, resource_size(res));
+ return err;
+}
+
+static int __devinit ehci_pxa168_drv_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct pxa168_usb_drv_data *drv_data;
+ void __iomem *regs;
+ int irq, err = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_debug("Initializing pxa168-SoC USB Host Controller\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ err = -ENODEV;
+ goto err1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ err = -ENODEV;
+ goto err1;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ ehci_pxa168_hc_driver.description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ err = -EBUSY;
+ goto err1;
+ }
+
+ regs = ioremap(res->start, resource_size(res));
+ if (regs == NULL) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ err = -EFAULT;
+ goto err2;
+ }
+
+ hcd = usb_create_hcd(&ehci_pxa168_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ err = -ENOMEM;
+ goto err3;
+ }
+
+ drv_data = (struct pxa168_usb_drv_data *)hcd->hcd_priv;
+
+ /* Enable USB clock */
+ drv_data->pxa168_usb_clk = clk_get(&pdev->dev, "PXA168-USBCLK");
+ if (IS_ERR(drv_data->pxa168_usb_clk)) {
+ dev_err(&pdev->dev, "Couldn't get USB clock\n");
+ err = PTR_ERR(drv_data->pxa168_usb_clk);
+ goto err4;
+ }
+ clk_enable(drv_data->pxa168_usb_clk);
+
+ err = pxa168_usb_phy_init(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "USB PHY initialization failed\n");
+ goto err5;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ hcd->has_tt = 1;
+ ehci->sbrn = 0x20;
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ if (err)
+ goto err5;
+
+ return 0;
+
+err5:
+ clk_disable(drv_data->pxa168_usb_clk);
+ clk_put(drv_data->pxa168_usb_clk);
+err4:
+ usb_put_hcd(hcd);
+err3:
+ iounmap(regs);
+err2:
+ release_mem_region(res->start, resource_size(res));
+err1:
+ dev_err(&pdev->dev, "init %s fail, %d\n",
+ dev_name(&pdev->dev), err);
+
+ return err;
+}
+
+static int __exit ehci_pxa168_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct pxa168_usb_drv_data *drv_data =
+ (struct pxa168_usb_drv_data *)hcd->hcd_priv;
+
+ usb_remove_hcd(hcd);
+
+ /* Power down PHY & PLL */
+ writel(readl(drv_data->usb_phy_reg_base + USB_PHY_CTRL_REG) & (~0x3),
+ drv_data->usb_phy_reg_base + USB_PHY_CTRL_REG);
+
+ clk_disable(drv_data->pxa168_usb_clk);
+ clk_put(drv_data->pxa168_usb_clk);
+
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ iounmap(drv_data->usb_phy_reg_base);
+ release_mem_region(drv_data->usb_phy_res->start,
+ resource_size(drv_data->usb_phy_res));
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:pxa168-ehci");
+
+static struct platform_driver ehci_pxa168_driver = {
+ .probe = ehci_pxa168_drv_probe,
+ .remove = __exit_p(ehci_pxa168_drv_remove),
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver.name = "pxa168-ehci",
+};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 0917e3a3246..4e4066c35a0 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -111,8 +111,6 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
}
}
- /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
- wmb ();
hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
@@ -153,7 +151,7 @@ static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
spin_lock_irqsave(&ehci->lock, flags);
qh->clearing_tt = 0;
if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
- && HC_IS_RUNNING(hcd->state))
+ && ehci->rh_state == EHCI_RH_RUNNING)
qh_link_async(ehci, qh);
spin_unlock_irqrestore(&ehci->lock, flags);
}
@@ -425,7 +423,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
+ && ehci->rh_state == EHCI_RH_RUNNING)) {
break;
/* scan the whole queue for unlinks whenever it stops */
@@ -433,7 +431,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
stopped = 1;
/* cancel everything if we halt, suspend, etc */
- if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
+ if (ehci->rh_state != EHCI_RH_RUNNING)
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
@@ -724,7 +722,8 @@ qh_urb_transaction (
/*
* control requests may need a terminating data "status" ack;
- * bulk ones may need a terminating short packet (zero length).
+ * other OUT ones may need a terminating short packet
+ * (zero length).
*/
if (likely (urb->transfer_buffer_length != 0)) {
int one_more = 0;
@@ -733,7 +732,7 @@ qh_urb_transaction (
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
- } else if (usb_pipebulk (urb->pipe)
+ } else if (usb_pipeout(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
@@ -977,9 +976,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(ehci, &ehci->regs->status,
STS_ASS, 0, 150);
- cmd |= CMD_ASE | CMD_RUN;
+ cmd |= CMD_ASE;
ehci_writel(ehci, cmd, &ehci->regs->command);
- ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
@@ -1058,7 +1056,7 @@ static struct ehci_qh *qh_append_tds (
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(ehci);
- wmb ();
+
dummy = qh->dummy;
dma = dummy->qtd_dma;
@@ -1168,14 +1166,13 @@ static void end_unlink_async (struct ehci_hcd *ehci)
qh_completions (ehci, qh);
- if (!list_empty (&qh->qtd_list)
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
+ if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
qh_link_async (ehci, qh);
- else {
+ } else {
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
+ if (ehci->rh_state == EHCI_RH_RUNNING
&& ehci->async->qh_next.qh == NULL)
timer_action (ehci, TIMER_ASYNC_OFF);
}
@@ -1211,7 +1208,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* stop async schedule right now? */
if (unlikely (qh == ehci->async)) {
/* can't get here without STS_ASS set */
- if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
+ if (ehci->rh_state != EHCI_RH_HALTED
&& !ehci->reclaim) {
/* ... and CMD_IAAD clear */
ehci_writel(ehci, cmd & ~CMD_ASE,
@@ -1237,7 +1234,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
wmb ();
/* If the controller isn't running, we don't have to wait for it */
- if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) {
+ if (unlikely(ehci->rh_state != EHCI_RH_RUNNING)) {
/* if (unlikely (qh->reclaim != 0))
* this will recurse, probably not much
*/
@@ -1260,7 +1257,7 @@ static void scan_async (struct ehci_hcd *ehci)
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
- stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
+ stopped = (ehci->rh_state != EHCI_RH_RUNNING);
ehci->qh_scan_next = ehci->async->qh_next.qh;
while (ehci->qh_scan_next) {
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 9e77f1c8bdb..024b65c4990 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -136,7 +136,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
- err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail;
@@ -270,7 +270,7 @@ static int s5p_ehci_resume(struct device *dev)
/* here we "know" root ports should always stay powered */
ehci_port_power(ehci, 1);
- hcd->state = HC_STATE_SUSPENDED;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 2abf8543f08..2e829fae648 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -36,6 +36,27 @@
static int ehci_get_frame (struct usb_hcd *hcd);
+#ifdef CONFIG_PCI
+
+static unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+ unsigned uf;
+
+ /*
+ * The MosChip MCS9990 controller updates its microframe counter
+ * a little before the frame counter, and occasionally we will read
+ * the invalid intermediate value. Avoid problems by checking the
+ * microframe number (the low-order 3 bits); if they are 0 then
+ * re-read the register to get the correct value.
+ */
+ uf = ehci_readl(ehci, &ehci->regs->frame_index);
+ if (unlikely(ehci->frame_index_bug && ((uf & 7) == 0)))
+ uf = ehci_readl(ehci, &ehci->regs->frame_index);
+ return uf;
+}
+
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -479,10 +500,9 @@ static int enable_periodic (struct ehci_hcd *ehci)
cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
ehci_writel(ehci, cmd, &ehci->regs->command);
/* posted write ... PSS happens later */
- ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
/* make sure ehci_work scans these */
- ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
+ ehci->next_uframe = ehci_read_frame_index(ehci)
% (ehci->periodic_size << 3);
if (unlikely(ehci->broken_periodic))
ehci->last_periodic_enable = ktime_get_real();
@@ -677,7 +697,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) &&
- HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
+ ehci->rh_state == EHCI_RH_RUNNING) {
rc = qh_schedule(ehci, qh);
/* An error here likely indicates handshake failure
@@ -1409,7 +1429,7 @@ iso_stream_schedule (
goto fail;
}
- now = ehci_readl(ehci, &ehci->regs->frame_index) & (mod - 1);
+ now = ehci_read_frame_index(ehci) & (mod - 1);
/* Typical case: reuse current schedule, stream is still active.
* Hopefully there are no gaps from the host falling behind
@@ -2275,8 +2295,8 @@ scan_periodic (struct ehci_hcd *ehci)
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = ehci->next_uframe;
- if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
- clock = ehci_readl(ehci, &ehci->regs->frame_index);
+ if (ehci->rh_state == EHCI_RH_RUNNING) {
+ clock = ehci_read_frame_index(ehci);
clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
} else {
clock = now_uframe + mod - 1;
@@ -2310,7 +2330,7 @@ restart:
union ehci_shadow temp;
int live;
- live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
+ live = (ehci->rh_state == EHCI_RH_RUNNING);
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_QH:
/* handle any completions */
@@ -2435,7 +2455,7 @@ restart:
* We can't advance our scan without collecting the ISO
* transfers that are still pending in this frame.
*/
- if (incomplete && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
+ if (incomplete && ehci->rh_state == EHCI_RH_RUNNING) {
ehci->next_uframe = now_uframe;
break;
}
@@ -2451,12 +2471,11 @@ restart:
if (now_uframe == clock) {
unsigned now;
- if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
+ if (ehci->rh_state != EHCI_RH_RUNNING
|| ehci->periodic_sched == 0)
break;
ehci->next_uframe = now_uframe;
- now = ehci_readl(ehci, &ehci->regs->frame_index) &
- (mod - 1);
+ now = ehci_read_frame_index(ehci) & (mod - 1);
if (now_uframe == now)
break;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 86a95bb80a6..9d9cf47d80d 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -168,7 +168,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
clk_enable(priv->fclk);
clk_enable(priv->iclk);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd");
goto fail_add_hcd;
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index dbf1e4ef3c1..b115b0b76e3 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -154,7 +154,7 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
ehci->clk = usbh_clk;
spear_start_ehci(ehci);
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto fail_add_hcd;
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 02b2bfd49a1..db9d1b4bfbd 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -674,7 +674,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
#endif
- err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail;
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
index 47d749631bc..54d1ab8aec4 100644
--- a/drivers/usb/host/ehci-vt8500.c
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -133,7 +133,7 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev)
ehci_port_power(ehci, 1);
ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
+ IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
return ret;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
new file mode 100644
index 00000000000..fe74bd67601
--- /dev/null
+++ b/drivers/usb/host/ehci-xls.c
@@ -0,0 +1,161 @@
+/*
+ * EHCI HCD for Netlogic XLS processors.
+ *
+ * (C) Copyright 2011 Netlogic Microsystems Inc.
+ *
+ * Based on various ehci-*.c drivers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/platform_device.h>
+
+static int ehci_xls_setup(struct usb_hcd *hcd)
+{
+ int retval;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci_reset(ehci);
+
+ return retval;
+}
+
+int ehci_xls_probe_internal(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int retval, irq;
+
+ /* Get our IRQ from an earlier registered Platform Resource */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+
+ /* Get our Memory Handle */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Error: MMIO Handle %s setup!\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ driver->description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto err2;
+ }
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+
+ if (hcd->regs == NULL) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto err3;
+ }
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval != 0)
+ goto err4;
+ return retval;
+
+err4:
+ iounmap(hcd->regs);
+err3:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err2:
+ usb_put_hcd(hcd);
+err1:
+ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev),
+ retval);
+ return retval;
+}
+
+static struct hc_driver ehci_xls_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "XLS EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+ .irq = ehci_irq,
+ .flags = HCD_USB2 | HCD_MEMORY,
+ .reset = ehci_xls_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ .get_frame_number = ehci_get_frame,
+
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_xls_probe(struct platform_device *pdev)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ return ehci_xls_probe_internal(&ehci_xls_hc_driver, pdev);
+}
+
+static int ehci_xls_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+MODULE_ALIAS("ehci-xls");
+
+static struct platform_driver ehci_xls_driver = {
+ .probe = ehci_xls_probe,
+ .remove = ehci_xls_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "ehci-xls",
+ },
+};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index cc7d337ec35..0a5fda73b3f 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -62,6 +62,12 @@ struct ehci_stats {
#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
+enum ehci_rh_state {
+ EHCI_RH_HALTED,
+ EHCI_RH_SUSPENDED,
+ EHCI_RH_RUNNING
+};
+
struct ehci_hcd { /* one per controller */
/* glue to PCI and HCD framework */
struct ehci_caps __iomem *caps;
@@ -70,6 +76,7 @@ struct ehci_hcd { /* one per controller */
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
+ enum ehci_rh_state rh_state;
/* async schedule support */
struct ehci_qh *async;
@@ -139,6 +146,7 @@ struct ehci_hcd { /* one per controller */
unsigned fs_i_thresh:1; /* Intel iso scheduling */
unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -740,6 +748,22 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_PCI
+
+/* For working around the MosChip frame-index-register bug */
+static unsigned ehci_read_frame_index(struct ehci_hcd *ehci);
+
+#else
+
+static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+ return ehci_readl(ehci, &ehci->regs->frame_index);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
#ifndef DEBUG
#define STUB_DEBUG_FILES
#endif /* DEBUG */
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 572ea53b022..4ed6d19f2a5 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -621,12 +621,15 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev)
goto err_pram;
}
- pram_addr = cpm_muram_alloc_fixed(iprop[2], FHCI_PRAM_SIZE);
+ pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64);
if (IS_ERR_VALUE(pram_addr)) {
dev_err(dev, "failed to allocate usb pram\n");
ret = -ENOMEM;
goto err_pram;
}
+
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_addr);
fhci->pram = cpm_muram_addr(pram_addr);
/* GPIOs and pins */
@@ -686,7 +689,7 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev)
}
ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq,
- IRQF_DISABLED, "qe timer (usb)", hcd);
+ 0, "qe timer (usb)", hcd);
if (ret) {
dev_err(dev, "failed to request timer irq");
goto err_timer_irq;
@@ -745,7 +748,7 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev)
out_be16(&fhci->regs->usb_event, 0xffff);
out_be16(&fhci->regs->usb_mask, 0);
- ret = usb_add_hcd(hcd, usb_irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, usb_irq, 0);
if (ret < 0)
goto err_add_hcd;
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index a42ef380e91..2df851b4bc7 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -1,7 +1,7 @@
/*
* Freescale QUICC Engine USB Host Controller Driver
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) Logic Product Development, Inc. 2007
@@ -810,9 +810,11 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
ed->dev_addr = usb_pipedevice(urb->pipe);
ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
usb_pipeout(urb->pipe));
+ /* setup stage */
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
+ /* data stage */
if (data_len > 0) {
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
@@ -820,9 +822,18 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
true);
}
- td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
- usb_pipeout(urb->pipe) ? FHCI_TA_IN : FHCI_TA_OUT,
- USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
+ /* status stage */
+ if (data_len > 0)
+ td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+ (usb_pipeout(urb->pipe) ? FHCI_TA_IN :
+ FHCI_TA_OUT),
+ USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+ else
+ td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+ FHCI_TA_IN,
+ USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
urb_state = US_CTRL_SETUP;
break;
case FHCI_TF_ISO:
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index af05718bdc7..2ee18cfa1ef 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1891,7 +1891,7 @@ static int imx21_probe(struct platform_device *pdev)
dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
(readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret != 0) {
dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
goto failed_add_hcd;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index baae4ccd16a..d91e5f211a7 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1639,7 +1639,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
goto err6;
}
- ret = usb_add_hcd(hcd, irq, irqflags | IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err6;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 9c37dad3e81..e5fd8aa57af 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2358,7 +2358,7 @@ static int isp1362_hc_reset(struct usb_hcd *hcd)
unsigned long flags;
int clkrdy = 0;
- pr_info("%s:\n", __func__);
+ pr_debug("%s:\n", __func__);
if (isp1362_hcd->board && isp1362_hcd->board->reset) {
isp1362_hcd->board->reset(hcd->self.controller, 1);
@@ -2395,7 +2395,7 @@ static void isp1362_hc_stop(struct usb_hcd *hcd)
unsigned long flags;
u32 tmp;
- pr_info("%s:\n", __func__);
+ pr_debug("%s:\n", __func__);
del_timer_sync(&hcd->rh_timer);
@@ -2523,7 +2523,7 @@ static int isp1362_hc_start(struct usb_hcd *hcd)
u16 chipid;
unsigned long flags;
- pr_info("%s:\n", __func__);
+ pr_debug("%s:\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
@@ -2773,7 +2773,7 @@ static int __devinit isp1362_probe(struct platform_device *pdev)
if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
irq_flags |= IRQF_TRIGGER_LOW;
- retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
if (retval != 0)
goto err6;
pr_info("%s, irq %d\n", hcd->product_desc, irq);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 840beda66dd..27dfab80ed8 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -21,8 +21,10 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/timer.h>
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
+#include <linux/gpio.h>
#include "isp1760-hcd.h"
@@ -39,7 +41,6 @@ struct isp1760_hcd {
int int_done_map;
struct memory_chunk memory_pool[BLOCKS];
struct list_head controlqhs, bulkqhs, interruptqhs;
- int active_ptds;
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024
@@ -48,6 +49,8 @@ struct isp1760_hcd {
unsigned long reset_done;
unsigned long next_statechange;
unsigned int devflags;
+
+ int rst_gpio;
};
static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
@@ -114,6 +117,7 @@ struct isp1760_qh {
u32 toggle;
u32 ping;
int slot;
+ int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
};
struct urb_listitem {
@@ -432,6 +436,18 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
int result;
u32 scratch, hwmode;
+ /* low-level chip reset */
+ if (gpio_is_valid(priv->rst_gpio)) {
+ unsigned int rst_lvl;
+
+ rst_lvl = (priv->devflags &
+ ISP1760_FLAG_RESET_ACTIVE_HIGH) ? 1 : 0;
+
+ gpio_set_value(priv->rst_gpio, rst_lvl);
+ mdelay(50);
+ gpio_set_value(priv->rst_gpio, !rst_lvl);
+ }
+
/* Setup HW Mode Control: This assumes a level active-low interrupt */
hwmode = HW_DATA_BUS_32BIT;
@@ -489,10 +505,6 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
"analog" : "digital");
- /* This is weird: at the first plug-in of a device there seems to be
- one packet queued that never gets returned? */
- priv->active_ptds = -1;
-
/* ATL reset */
reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
mdelay(10);
@@ -514,83 +526,6 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
return priv_init(hcd);
}
-static void isp1760_init_maps(struct usb_hcd *hcd)
-{
- /*set last maps, for iso its only 1, else 32 tds bitmap*/
- reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
- reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
- reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
-
- reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
-
- reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
- ATL_BUF_FILL | INT_BUF_FILL);
-}
-
-static void isp1760_enable_interrupts(struct usb_hcd *hcd)
-{
- reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
- reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
- reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
- reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
- /* step 23 passed */
-}
-
-static int isp1760_run(struct usb_hcd *hcd)
-{
- int retval;
- u32 temp;
- u32 command;
- u32 chipid;
-
- hcd->uses_new_polling = 1;
-
- hcd->state = HC_STATE_RUNNING;
- isp1760_enable_interrupts(hcd);
- temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
-
- command = reg_read32(hcd->regs, HC_USBCMD);
- command &= ~(CMD_LRESET|CMD_RESET);
- command |= CMD_RUN;
- reg_write32(hcd->regs, HC_USBCMD, command);
-
- retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
- if (retval)
- return retval;
-
- /*
- * XXX
- * Spec says to write FLAG_CF as last config action, priv code grabs
- * the semaphore while doing so.
- */
- down_write(&ehci_cf_port_reset_rwsem);
- reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
-
- retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
- up_write(&ehci_cf_port_reset_rwsem);
- if (retval)
- return retval;
-
- chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
- dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
- chipid & 0xffff, chipid >> 16);
-
- /* PTD Register Init Part 2, Step 28 */
- /* enable INTs */
- isp1760_init_maps(hcd);
-
- /* GRR this is run-once init(), being done every time the HC starts.
- * So long as they're part of class devices, we can't do it init()
- * since the class device isn't created that early.
- */
- return 0;
-}
-
static u32 base_to_chip(u32 base)
{
return ((base - 0x400) >> 3);
@@ -813,28 +748,29 @@ static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
WARN_ON(slots[slot].qh);
WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
- slots[slot].qtd = qtd;
- slots[slot].qh = qh;
- qh->slot = slot;
- qtd->status = QTD_XFER_STARTED; /* Set this before writing ptd, since
- interrupt routine may preempt and expects this value. */
- ptd_write(hcd->regs, ptd_offset, slot, ptd);
- priv->active_ptds++;
-
/* Make sure done map has not triggered from some unlinked transfer */
if (ptd_offset == ATL_PTD_OFFSET) {
priv->atl_done_map |= reg_read32(hcd->regs,
HC_ATL_PTD_DONEMAP_REG);
- priv->atl_done_map &= ~(1 << qh->slot);
+ priv->atl_done_map &= ~(1 << slot);
+ } else {
+ priv->int_done_map |= reg_read32(hcd->regs,
+ HC_INT_PTD_DONEMAP_REG);
+ priv->int_done_map &= ~(1 << slot);
+ }
+
+ qh->slot = slot;
+ qtd->status = QTD_XFER_STARTED;
+ slots[slot].timestamp = jiffies;
+ slots[slot].qtd = qtd;
+ slots[slot].qh = qh;
+ ptd_write(hcd->regs, ptd_offset, slot, ptd);
+ if (ptd_offset == ATL_PTD_OFFSET) {
skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
skip_map &= ~(1 << qh->slot);
reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
} else {
- priv->int_done_map |= reg_read32(hcd->regs,
- HC_INT_PTD_DONEMAP_REG);
- priv->int_done_map &= ~(1 << qh->slot);
-
skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
skip_map &= ~(1 << qh->slot);
reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
@@ -858,10 +794,7 @@ static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
if (qtd->status < QTD_XFER_COMPLETE)
break;
- if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
- last_qtd = 1;
- else
- last_qtd = qtd->urb != qtd_next->urb;
+ last_qtd = last_qtd_of_urb(qtd, qh);
if ((!last_qtd) && (qtd->status == QTD_RETIRE))
qtd_next->status = QTD_RETIRE;
@@ -902,7 +835,7 @@ static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
GFP_ATOMIC);
if (unlikely(!urb_listitem))
- break;
+ break; /* Try again on next call */
urb_listitem->urb = qtd->urb;
list_add_tail(&urb_listitem->urb_list, urb_list);
}
@@ -928,6 +861,10 @@ static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
return;
}
+ /* Make sure this endpoint's TT buffer is clean before queueing ptds */
+ if (qh->tt_buffer_dirty)
+ return;
+
if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
qtd_list)->urb->pipe)) {
ptd_offset = INT_PTD_OFFSET;
@@ -1168,11 +1105,9 @@ static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
return PTD_STATE_QTD_DONE;
}
-static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
+static void handle_done_ptds(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 imask;
- irqreturn_t irqret = IRQ_NONE;
struct ptd ptd;
struct isp1760_qh *qh;
int slot;
@@ -1181,27 +1116,14 @@ static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
u32 ptd_offset;
struct isp1760_qtd *qtd;
int modified;
- static int last_active_ptds;
- int int_skip_map, atl_skip_map;
-
- spin_lock(&priv->lock);
-
- if (!(hcd->state & HC_STATE_RUNNING))
- goto leave;
-
- imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
- if (unlikely(!imask))
- goto leave;
- reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
+ int skip_map;
- int_skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
- atl_skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
- priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
- priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
- priv->int_done_map &= ~int_skip_map;
- priv->atl_done_map &= ~atl_skip_map;
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ priv->int_done_map &= ~skip_map;
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ priv->atl_done_map &= ~skip_map;
- modified = priv->int_done_map | priv->atl_done_map;
+ modified = priv->int_done_map || priv->atl_done_map;
while (priv->int_done_map || priv->atl_done_map) {
if (priv->int_done_map) {
@@ -1240,7 +1162,6 @@ static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
slots[slot].qtd = NULL;
qh = slots[slot].qh;
slots[slot].qh = NULL;
- priv->active_ptds--;
qh->slot = -1;
WARN_ON(qtd->status != QTD_XFER_STARTED);
@@ -1281,6 +1202,15 @@ static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
case PTD_STATE_URB_RETIRE:
qtd->status = QTD_RETIRE;
+ if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
+ (qtd->urb->status != -EPIPE) &&
+ (qtd->urb->status != -EREMOTEIO)) {
+ qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(qtd->urb))
+ /* Clear failed; let's hope things work
+ anyway */
+ qh->tt_buffer_dirty = 0;
+ }
qtd = NULL;
qh->toggle = 0;
qh->ping = 0;
@@ -1311,22 +1241,28 @@ static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
if (modified)
schedule_ptds(hcd);
+}
- /* ISP1760 Errata 2 explains that interrupts may be missed (or not
- happen?) if two USB devices are running simultaneously. Perhaps
- this happens when a PTD is finished during interrupt handling;
- enable SOF interrupts if PTDs are still scheduled when exiting this
- interrupt handler, just to be safe. */
+static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 imask;
+ irqreturn_t irqret = IRQ_NONE;
- if (priv->active_ptds != last_active_ptds) {
- if (priv->active_ptds > 0)
- reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
- INTERRUPT_ENABLE_SOT_MASK);
- else
- reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
- INTERRUPT_ENABLE_MASK);
- last_active_ptds = priv->active_ptds;
- }
+ spin_lock(&priv->lock);
+
+ if (!(hcd->state & HC_STATE_RUNNING))
+ goto leave;
+
+ imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
+ if (unlikely(!imask))
+ goto leave;
+ reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
+
+ priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
+ priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
+
+ handle_done_ptds(hcd);
irqret = IRQ_HANDLED;
leave:
@@ -1335,6 +1271,138 @@ leave:
return irqret;
}
+/*
+ * Workaround for problem described in chip errata 2:
+ *
+ * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
+ * One solution suggested in the errata is to use SOF interrupts _instead_of_
+ * ATL done interrupts (the "instead of" might be important since it seems
+ * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
+ * to set the PTD's done bit in addition to not generating an interrupt!).
+ *
+ * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
+ * done bit is not being set. This is bad - it blocks the endpoint until reboot.
+ *
+ * If we use SOF interrupts only, we get latency between ptd completion and the
+ * actual handling. This is very noticeable in testusb runs which takes several
+ * minutes longer without ATL interrupts.
+ *
+ * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
+ * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
+ * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
+ * completed and its done map bit is set.
+ *
+ * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
+ * not to cause too much lag when this HW bug occurs, while still hopefully
+ * ensuring that the check does not falsely trigger.
+ */
+#define SLOT_TIMEOUT 300
+#define SLOT_CHECK_PERIOD 200
+static struct timer_list errata2_timer;
+
+void errata2_function(unsigned long data)
+{
+ struct usb_hcd *hcd = (struct usb_hcd *) data;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int slot;
+ struct ptd ptd;
+ unsigned long spinflags;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+
+ for (slot = 0; slot < 32; slot++)
+ if (priv->atl_slots[slot].qh && time_after(jiffies,
+ priv->atl_slots[slot].timestamp +
+ SLOT_TIMEOUT * HZ / 1000)) {
+ ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ if (!FROM_DW0_VALID(ptd.dw0) &&
+ !FROM_DW3_ACTIVE(ptd.dw3))
+ priv->atl_done_map |= 1 << slot;
+ }
+
+ if (priv->atl_done_map)
+ handle_done_ptds(hcd);
+
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+
+ errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+ add_timer(&errata2_timer);
+}
+
+static int isp1760_run(struct usb_hcd *hcd)
+{
+ int retval;
+ u32 temp;
+ u32 command;
+ u32 chipid;
+
+ hcd->uses_new_polling = 1;
+
+ hcd->state = HC_STATE_RUNNING;
+
+ /* Set PTD interrupt AND & OR maps */
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
+ /* step 23 passed */
+
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
+
+ command = reg_read32(hcd->regs, HC_USBCMD);
+ command &= ~(CMD_LRESET|CMD_RESET);
+ command |= CMD_RUN;
+ reg_write32(hcd->regs, HC_USBCMD, command);
+
+ retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
+ if (retval)
+ return retval;
+
+ /*
+ * XXX
+ * Spec says to write FLAG_CF as last config action, priv code grabs
+ * the semaphore while doing so.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
+
+ retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
+ up_write(&ehci_cf_port_reset_rwsem);
+ if (retval)
+ return retval;
+
+ init_timer(&errata2_timer);
+ errata2_timer.function = errata2_function;
+ errata2_timer.data = (unsigned long) hcd;
+ errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+ add_timer(&errata2_timer);
+
+ chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
+ chipid & 0xffff, chipid >> 16);
+
+ /* PTD Register Init Part 2, Step 28 */
+
+ /* Setup registers controlling PTD checking */
+ reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
+ ATL_BUF_FILL | INT_BUF_FILL);
+
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ return 0;
+}
+
static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
{
qtd->data_buffer = databuffer;
@@ -1503,7 +1571,6 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
packetize_urb(hcd, urb, &new_qtds, mem_flags);
if (list_empty(&new_qtds))
return -ENOMEM;
- urb->hcpriv = NULL; /* Used to signal unlink to interrupt handler */
retval = 0;
spin_lock_irqsave(&priv->lock, spinflags);
@@ -1531,6 +1598,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
qh = qh_alloc(GFP_ATOMIC);
if (!qh) {
retval = -ENOMEM;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
goto out;
}
list_add_tail(&qh->qh_list, ep_queue);
@@ -1570,7 +1638,41 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
}
qh->slot = -1;
- priv->active_ptds--;
+}
+
+/*
+ * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
+ * any active transfer belonging to the urb in the process.
+ */
+static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd)
+{
+ struct urb *urb;
+ int urb_was_running;
+
+ urb = qtd->urb;
+ urb_was_running = 0;
+ list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
+ if (qtd->urb != urb)
+ break;
+
+ if (qtd->status >= QTD_XFER_STARTED)
+ urb_was_running = 1;
+ if (last_qtd_of_urb(qtd, qh) &&
+ (qtd->status >= QTD_XFER_COMPLETE))
+ urb_was_running = 0;
+
+ if (qtd->status == QTD_XFER_STARTED)
+ kill_transfer(hcd, urb, qh);
+ qtd->status = QTD_RETIRE;
+ }
+
+ if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
+ qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(urb))
+ /* Clear failed; let's hope things work anyway */
+ qh->tt_buffer_dirty = 0;
+ }
}
static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
@@ -1595,9 +1697,8 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
if (qtd->urb == urb) {
- if (qtd->status == QTD_XFER_STARTED)
- kill_transfer(hcd, urb, qh);
- qtd->status = QTD_RETIRE;
+ dequeue_urb_from_qtd(hcd, qh, qtd);
+ break;
}
urb->status = status;
@@ -1622,12 +1723,11 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
if (!qh)
goto out;
- list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
- if (qtd->status == QTD_XFER_STARTED)
- kill_transfer(hcd, qtd->urb, qh);
- qtd->status = QTD_RETIRE;
- qtd->urb->status = -ECONNRESET;
- }
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
+ if (qtd->status != QTD_RETIRE) {
+ dequeue_urb_from_qtd(hcd, qh, qtd);
+ qtd->urb->status = -ECONNRESET;
+ }
ep->hcpriv = NULL;
/* Cannot free qh here since it will be parsed by schedule_ptds() */
@@ -2021,6 +2121,8 @@ static void isp1760_stop(struct usb_hcd *hcd)
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 temp;
+ del_timer(&errata2_timer);
+
isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
NULL, 0);
mdelay(20);
@@ -2048,6 +2150,23 @@ static void isp1760_shutdown(struct usb_hcd *hcd)
reg_write32(hcd->regs, HC_USBCMD, command);
}
+static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ struct isp1760_qh *qh = ep->hcpriv;
+ unsigned long spinflags;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+ qh->tt_buffer_dirty = 0;
+ schedule_ptds(hcd);
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+}
+
+
static const struct hc_driver isp1760_hc_driver = {
.description = "isp1760-hcd",
.product_desc = "NXP ISP1760 USB Host Controller",
@@ -2064,6 +2183,7 @@ static const struct hc_driver isp1760_hc_driver = {
.get_frame_number = isp1760_get_frame,
.hub_status_data = isp1760_hub_status_data,
.hub_control = isp1760_hub_control,
+ .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
};
int __init init_kmem_once(void)
@@ -2102,6 +2222,7 @@ void deinit_kmem_cache(void)
struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
int irq, unsigned long irqflags,
+ int rst_gpio,
struct device *dev, const char *busname,
unsigned int devflags)
{
@@ -2121,6 +2242,7 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
priv = hcd_to_priv(hcd);
priv->devflags = devflags;
+ priv->rst_gpio = rst_gpio;
init_memory(priv);
hcd->regs = ioremap(res_start, res_len);
if (!hcd->regs) {
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 014a7dfadf9..33dc79ccaa6 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -4,6 +4,7 @@
/* exports for if */
struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
int irq, unsigned long irqflags,
+ int rst_gpio,
struct device *dev, const char *busname,
unsigned int devflags);
int init_kmem_once(void);
@@ -73,7 +74,6 @@ void deinit_kmem_cache(void);
#define HC_EOT_INT (1 << 3)
#define HC_SOT_INT (1 << 1)
#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT)
-#define INTERRUPT_ENABLE_SOT_MASK (HC_SOT_INT)
#define HC_ISO_IRQ_MASK_OR_REG 0x318
#define HC_INT_IRQ_MASK_OR_REG 0x31C
@@ -107,6 +107,7 @@ struct ptd {
struct slotinfo {
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
+ unsigned long timestamp;
};
@@ -126,6 +127,7 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
+#define ISP1760_FLAG_RESET_ACTIVE_HIGH 0x80000000 /* RESET GPIO active high */
/* chip memory management */
struct memory_chunk {
@@ -188,6 +190,7 @@ struct memory_chunk {
#define DW3_BABBLE_BIT (1 << 29)
#define DW3_HALT_BIT (1 << 30)
#define DW3_ACTIVE_BIT (1 << 31)
+#define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
#define INT_UNDERRUN (1 << 2)
#define INT_BABBLE (1 << 1)
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 7ee30056f37..2c7fc830c9e 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -17,19 +17,28 @@
#include "isp1760-hcd.h"
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_OF
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
#endif
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_OF
+struct isp1760 {
+ struct usb_hcd *hcd;
+ int rst_gpio;
+};
+
static int of_isp1760_probe(struct platform_device *dev)
{
- struct usb_hcd *hcd;
+ struct isp1760 *drvdata;
struct device_node *dp = dev->dev.of_node;
struct resource *res;
struct resource memory;
@@ -39,6 +48,11 @@ static int of_isp1760_probe(struct platform_device *dev)
int ret;
const unsigned int *prop;
unsigned int devflags = 0;
+ enum of_gpio_flags gpio_flags;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
ret = of_address_to_resource(dp, 0, &memory);
if (ret)
@@ -78,32 +92,57 @@ static int of_isp1760_probe(struct platform_device *dev)
if (of_get_property(dp, "dreq-polarity", NULL) != NULL)
devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
- hcd = isp1760_register(memory.start, res_len, virq,
- IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
- devflags);
- if (IS_ERR(hcd)) {
- ret = PTR_ERR(hcd);
- goto release_reg;
+ drvdata->rst_gpio = of_get_gpio_flags(dp, 0, &gpio_flags);
+ if (gpio_is_valid(drvdata->rst_gpio)) {
+ ret = gpio_request(drvdata->rst_gpio, dev_name(&dev->dev));
+ if (!ret) {
+ if (!(gpio_flags & OF_GPIO_ACTIVE_LOW)) {
+ devflags |= ISP1760_FLAG_RESET_ACTIVE_HIGH;
+ gpio_direction_output(drvdata->rst_gpio, 0);
+ } else {
+ gpio_direction_output(drvdata->rst_gpio, 1);
+ }
+ } else {
+ drvdata->rst_gpio = ret;
+ }
}
- dev_set_drvdata(&dev->dev, hcd);
+ drvdata->hcd = isp1760_register(memory.start, res_len, virq,
+ IRQF_SHARED, drvdata->rst_gpio,
+ &dev->dev, dev_name(&dev->dev),
+ devflags);
+ if (IS_ERR(drvdata->hcd)) {
+ ret = PTR_ERR(drvdata->hcd);
+ goto free_gpio;
+ }
+
+ dev_set_drvdata(&dev->dev, drvdata);
return ret;
+free_gpio:
+ if (gpio_is_valid(drvdata->rst_gpio))
+ gpio_free(drvdata->rst_gpio);
release_reg:
release_mem_region(memory.start, res_len);
+ kfree(drvdata);
return ret;
}
static int of_isp1760_remove(struct platform_device *dev)
{
- struct usb_hcd *hcd = dev_get_drvdata(&dev->dev);
+ struct isp1760 *drvdata = dev_get_drvdata(&dev->dev);
dev_set_drvdata(&dev->dev, NULL);
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
+ usb_remove_hcd(drvdata->hcd);
+ iounmap(drvdata->hcd->regs);
+ release_mem_region(drvdata->hcd->rsrc_start, drvdata->hcd->rsrc_len);
+ usb_put_hcd(drvdata->hcd);
+
+ if (gpio_is_valid(drvdata->rst_gpio))
+ gpio_free(drvdata->rst_gpio);
+
+ kfree(drvdata);
return 0;
}
@@ -240,7 +279,7 @@ static int __devinit isp1761_pci_probe(struct pci_dev *dev,
dev->dev.dma_mask = NULL;
hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
- IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
+ IRQF_SHARED, -ENOENT, &dev->dev, dev_name(&dev->dev),
devflags);
if (IS_ERR(hcd)) {
ret_status = -ENODEV;
@@ -313,7 +352,7 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
resource_size_t mem_size;
struct isp1760_platform_data *priv = pdev->dev.platform_data;
unsigned int devflags = 0;
- unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED;
+ unsigned long irqflags = IRQF_SHARED;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
@@ -351,7 +390,8 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
}
hcd = isp1760_register(mem_res->start, mem_size, irq_res->start,
- irqflags, &pdev->dev, dev_name(&pdev->dev), devflags);
+ irqflags, -ENOENT,
+ &pdev->dev, dev_name(&pdev->dev), devflags);
if (IS_ERR(hcd)) {
pr_warning("isp1760: Failed to register the HCD device\n");
ret = -ENODEV;
@@ -396,7 +436,7 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_OF
ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
@@ -416,7 +456,7 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_OF
platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/usb/host/ohci-ath79.c b/drivers/usb/host/ohci-ath79.c
index c620c50f677..18d574d6958 100644
--- a/drivers/usb/host/ohci-ath79.c
+++ b/drivers/usb/host/ohci-ath79.c
@@ -111,7 +111,7 @@ static int ohci_ath79_probe(struct platform_device *pdev)
ohci_hcd_init(hcd_to_ohci(hcd));
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret)
goto err_stop_hcd;
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 958d985f295..6b7bc50dfea 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -218,7 +218,7 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
ohci_hcd_init(hcd_to_ohci(hcd));
ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
+ IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
return ret;
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 6aca2c4453f..843509778a3 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -344,7 +344,7 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
error = -ENODEV;
goto err4;
}
- error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ error = usb_add_hcd(hcd, irq, 0);
if (error)
goto err4;
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 4e681613e7a..dc45d489d00 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -81,7 +81,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, pdev->resource[1].start, 0);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index f9cf3f04b74..34efd479e06 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1114,6 +1114,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_ath79_driver
#endif
+#ifdef CONFIG_NLM_XLR
+#include "ohci-xls.c"
+#define PLATFORM_DRIVER ohci_xls_driver
+#endif
+
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OMAP1_PLATFORM_DRIVER) && \
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 9154615292d..2f00040fc40 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -356,10 +356,7 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
msleep(20);
}
- /* Does the root hub have a port wakeup pending? */
- if (ohci_readl(ohci, &ohci->regs->intrstatus) &
- (OHCI_INTR_RD | OHCI_INTR_RHSC))
- usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(hcd);
}
/* Carry out polling-, autostop-, and autoresume-related state changes */
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index d8b45647d1d..d469bf9b9e5 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -164,7 +164,7 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
ohci_hcd_init(ohci);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
goto err3;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 5645f70b921..e4b8782cc6e 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -14,7 +14,7 @@
* This file is licenced under the GPL.
*/
-#include <linux/signal.h> /* IRQF_DISABLED */
+#include <linux/signal.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -363,7 +363,7 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
retval = -ENXIO;
goto err3;
}
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, irq, 0);
if (retval)
goto err3;
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 6048f2f64f7..516ebc4d6cc 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -149,7 +149,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "ohci");
- if (!ret) {
+ if (!res) {
dev_err(dev, "UHH OHCI get resource failed\n");
return -ENOMEM;
}
@@ -180,7 +180,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
ohci_hcd_init(hcd_to_ohci(hcd));
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret) {
dev_dbg(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 653d6a60edb..9ad8bee22c1 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -398,7 +398,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
ohci_hcd_init(ohci);
dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0)
return ret;
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c
index 28467e288a9..f13d08f94d6 100644
--- a/drivers/usb/host/ohci-pnx8550.c
+++ b/drivers/usb/host/ohci-pnx8550.c
@@ -107,7 +107,7 @@ int usb_hcd_pnx8550_probe (const struct hc_driver *driver,
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 0c12f4e14dc..d24cc89de16 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -143,7 +143,7 @@ static int __devinit ohci_hcd_ppc_of_probe(struct platform_device *op)
ohci_hcd_init(ohci);
- rv = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ rv = usb_add_hcd(hcd, irq, 0);
if (rv == 0)
return 0;
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index c0f595c4448..1514b706747 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -80,7 +80,7 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
#endif
ohci_hcd_init(ohci);
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, irq, 0);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 700950455f4..6fd4fa1f19b 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -164,7 +164,7 @@ static int __devinit ps3_ohci_probe(struct ps3_system_bus_device *dev)
ps3_system_bus_set_drvdata(dev, hcd);
- result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
+ result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 80be5472783..29dfefe1c72 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -359,7 +359,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, irq, 0);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index dd24fc115e4..15dc51ded61 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -428,7 +428,7 @@ static struct ed *ed_get (
ed->type = usb_pipetype(pipe);
info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
- info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16;
+ info |= usb_endpoint_maxp(&ep->desc) << 16;
if (udev->speed == USB_SPEED_LOW)
info |= ED_LOWSPEED;
/* only control transfers store pids in tds */
@@ -444,7 +444,7 @@ static struct ed *ed_get (
ed->load = usb_calc_bus_time (
udev->speed, !is_out,
ed->type == PIPE_ISOCHRONOUS,
- le16_to_cpu(ep->desc.wMaxPacketSize))
+ usb_endpoint_maxp(&ep->desc))
/ 1000;
}
}
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 7c9a4d55526..a1877c47601 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -384,7 +384,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
if (retval != 0)
goto err_ioremap;
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 4204d9720d2..4bde4f9821b 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -143,7 +143,7 @@ int usb_hcd_sa1111_probe (const struct hc_driver *driver,
sa1111_start_hc(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, dev->irq[1], IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, dev->irq[1], 0);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index 14cecb52a9f..afc4eb6bb9d 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -109,7 +109,7 @@ static int ohci_hcd_sh_probe(struct platform_device *pdev)
hcd->regs = (void __iomem *)res->start;
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
err("Failed to add hcd");
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 78918ca0da2..968cea2b6d4 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -165,7 +165,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err5;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 4fd4bea9ac7..69874654f3b 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -152,7 +152,7 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
spear_start_ohci(ohci_p);
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
index c4aea3b8315..5ba18595d6f 100644
--- a/drivers/usb/host/ohci-ssb.c
+++ b/drivers/usb/host/ohci-ssb.c
@@ -169,7 +169,7 @@ static int ssb_ohci_attach(struct ssb_device *dev)
hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs)
goto err_put_hcd;
- err = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
+ err = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
if (err)
goto err_iounmap;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 57ad1271fc9..06331d93117 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -244,7 +244,7 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret)
goto err_add_hcd;
diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c
new file mode 100644
index 00000000000..a3a9c6f45b9
--- /dev/null
+++ b/drivers/usb/host/ohci-xls.c
@@ -0,0 +1,151 @@
+/*
+ * OHCI HCD for Netlogic XLS processors.
+ *
+ * (C) Copyright 2011 Netlogic Microsystems Inc.
+ *
+ * Based on ohci-au1xxx.c, and other Linux OHCI drivers.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+
+static int ohci_xls_probe_internal(const struct hc_driver *driver,
+ struct platform_device *dev)
+{
+ struct resource *res;
+ struct usb_hcd *hcd;
+ int retval, irq;
+
+ /* Get our IRQ from an earlier registered Platform Resource */
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ dev_err(&dev->dev, "Found HC with no IRQ\n");
+ return -ENODEV;
+ }
+
+ /* Get our Memory Handle */
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&dev->dev, "MMIO Handle incorrect!\n");
+ return -ENODEV;
+ }
+
+ hcd = usb_create_hcd(driver, &dev->dev, "XLS");
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ driver->description)) {
+ dev_dbg(&dev->dev, "Controller already in use\n");
+ retval = -EBUSY;
+ goto err2;
+ }
+
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (hcd->regs == NULL) {
+ dev_dbg(&dev->dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto err3;
+ }
+
+ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (retval != 0)
+ goto err4;
+ return retval;
+
+err4:
+ iounmap(hcd->regs);
+err3:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err2:
+ usb_put_hcd(hcd);
+err1:
+ dev_err(&dev->dev, "init fail, %d\n", retval);
+ return retval;
+}
+
+static int ohci_xls_reset(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci_hcd_init(ohci);
+ return ohci_init(ohci);
+}
+
+static int __devinit ohci_xls_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci;
+ int ret;
+
+ ohci = hcd_to_ohci(hcd);
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ err("can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+ return 0;
+}
+
+static struct hc_driver ohci_xls_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "XLS OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_USB11,
+ .reset = ohci_xls_reset,
+ .start = ohci_xls_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+ .get_frame_number = ohci_get_frame,
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int ohci_xls_probe(struct platform_device *dev)
+{
+ int ret;
+
+ pr_debug("In ohci_xls_probe");
+ if (usb_disabled())
+ return -ENODEV;
+ ret = ohci_xls_probe_internal(&ohci_xls_hc_driver, dev);
+ return ret;
+}
+
+static int ohci_xls_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+static struct platform_driver ohci_xls_driver = {
+ .probe = ohci_xls_probe,
+ .remove = ohci_xls_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "ohci-xls-0",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 40a0d8b03ad..e84ca1928db 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -959,7 +959,7 @@ static void init_pipe_info(struct r8a66597 *r8a66597, struct urb *urb,
info.pipenum = get_empty_pipenum(r8a66597, ep);
info.address = get_urb_to_r8a66597_addr(r8a66597, urb);
info.epnum = usb_endpoint_num(ep);
- info.maxpacket = le16_to_cpu(ep->wMaxPacketSize);
+ info.maxpacket = usb_endpoint_maxp(ep);
info.type = get_r8a66597_type(usb_endpoint_type(ep));
info.bufnum = get_bufnum(info.pipenum);
info.buf_bsize = get_buf_bsize(info.pipenum);
@@ -2519,7 +2519,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->has_tt = 1;
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
+ ret = usb_add_hcd(hcd, irq, irq_trigger);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd\n");
goto clean_up3;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 1a996245ab9..961d6638d8f 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1729,7 +1729,7 @@ sl811h_probe(struct platform_device *dev)
* Use resource IRQ flags if set by platform device setup.
*/
irqflags |= IRQF_SHARED;
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | irqflags);
+ retval = usb_add_hcd(hcd, irq, irqflags);
if (retval != 0)
goto err6;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index fba99b12058..c8ae199cfbb 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -294,50 +294,50 @@ __acquires(uhci->lock)
* and that remote wakeups should be enabled.
*/
egsm_enable = USBCMD_EGSM;
- uhci->RD_enable = 1;
int_enable = USBINTR_RESUME;
wakeup_enable = 1;
- /* In auto-stop mode wakeups must always be detected, but
- * Resume-Detect interrupts may be prohibited. (In the absence
- * of CONFIG_PM, they are always disallowed.)
+ /*
+ * In auto-stop mode, we must be able to detect new connections.
+ * The user can force us to poll by disabling remote wakeup;
+ * otherwise we will use the EGSM/RD mechanism.
*/
if (auto_stop) {
if (!device_may_wakeup(&rhdev->dev))
- int_enable = 0;
+ egsm_enable = int_enable = 0;
+ }
- /* In bus-suspend mode wakeups may be disabled, but if they are
- * allowed then so are Resume-Detect interrupts.
- */
- } else {
#ifdef CONFIG_PM
+ /*
+ * In bus-suspend mode, we use the wakeup setting specified
+ * for the root hub.
+ */
+ else {
if (!rhdev->do_remote_wakeup)
wakeup_enable = 0;
-#endif
}
+#endif
- /* EGSM causes the root hub to echo a 'K' signal (resume) out any
- * port which requests a remote wakeup. According to the USB spec,
- * every hub is supposed to do this. But if we are ignoring
- * remote-wakeup requests anyway then there's no point to it.
- * We also shouldn't enable EGSM if it's broken.
- */
- if (!wakeup_enable || global_suspend_mode_is_broken(uhci))
- egsm_enable = 0;
-
- /* If we're ignoring wakeup events then there's no reason to
- * enable Resume-Detect interrupts. We also shouldn't enable
- * them if they are broken or disallowed.
+ /*
+ * UHCI doesn't distinguish between wakeup requests from downstream
+ * devices and local connect/disconnect events. There's no way to
+ * enable one without the other; both are controlled by EGSM. Thus
+ * if wakeups are disallowed then EGSM must be turned off -- in which
+ * case remote wakeup requests from downstream during system sleep
+ * will be lost.
+ *
+ * In addition, if EGSM is broken then we can't use it. Likewise,
+ * if Resume-Detect interrupts are broken then we can't use them.
*
- * This logic may lead us to enabling RD but not EGSM. The UHCI
- * spec foolishly says that RD works only when EGSM is on, but
- * there's no harm in enabling it anyway -- perhaps some chips
- * will implement it!
+ * Finally, neither EGSM nor RD is useful by itself. Without EGSM,
+ * the RD status bit will never get set. Without RD, the controller
+ * won't generate interrupts to tell the system about wakeup events.
*/
- if (!wakeup_enable || resume_detect_interrupts_are_broken(uhci) ||
- !int_enable)
- uhci->RD_enable = int_enable = 0;
+ if (!wakeup_enable || global_suspend_mode_is_broken(uhci) ||
+ resume_detect_interrupts_are_broken(uhci))
+ egsm_enable = int_enable = 0;
+ uhci->RD_enable = !!int_enable;
uhci_writew(uhci, int_enable, USBINTR);
uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD);
mb();
@@ -364,10 +364,12 @@ __acquires(uhci->lock)
uhci->rh_state = new_state;
uhci->is_stopped = UHCI_IS_STOPPED;
- /* If interrupts don't work and remote wakeup is enabled then
- * the suspended root hub needs to be polled.
+ /*
+ * If remote wakeup is enabled but either EGSM or RD interrupts
+ * doesn't work, then we won't get an interrupt when a wakeup event
+ * occurs. Thus the suspended root hub needs to be polled.
*/
- if (!int_enable && wakeup_enable)
+ if (wakeup_enable && (!int_enable || !egsm_enable))
set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
else
clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 84ed28b34f9..f6ca80ee4ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -280,7 +280,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
qh->load = usb_calc_bus_time(udev->speed,
usb_endpoint_dir_in(&hep->desc),
qh->type == USB_ENDPOINT_XFER_ISOC,
- le16_to_cpu(hep->desc.wMaxPacketSize))
+ usb_endpoint_maxp(&hep->desc))
/ 1000 + 1;
} else { /* Skeleton QH */
@@ -792,7 +792,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
{
struct uhci_td *td;
unsigned long destination, status;
- int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
dma_addr_t data = urb->transfer_dma;
__hc32 *plink;
@@ -918,7 +918,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
{
struct uhci_td *td;
unsigned long destination, status;
- int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
int this_sg_len;
dma_addr_t data;
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ce5c9e51748..c7f33123d4c 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -65,6 +65,12 @@
/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
+#define XHCI_L1C (1 << 16)
+
+/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
+#define XHCI_HLC (1 << 19)
+
/* command register values to disable interrupts and halt the HC */
/* start/stop HC execution - do not write unless HC is halted*/
#define XHCI_CMD_RUN (1 << 0)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 723f8231193..431efe72b1f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -28,6 +28,25 @@
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
PORT_RC | PORT_PLC | PORT_PE)
+/* usb 1.1 root hub device descriptor */
+static u8 usb_bos_descriptor [] = {
+ USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */
+ USB_DT_BOS, /* __u8 bDescriptorType */
+ 0x0F, 0x00, /* __le16 wTotalLength, 15 bytes */
+ 0x1, /* __u8 bNumDeviceCaps */
+ /* First device capability */
+ USB_DT_USB_SS_CAP_SIZE, /* __u8 bLength, 10 bytes */
+ USB_DT_DEVICE_CAPABILITY, /* Device Capability */
+ USB_SS_CAP_TYPE, /* bDevCapabilityType, SUPERSPEED_USB */
+ 0x00, /* bmAttributes, LTM off by default */
+ USB_5GBPS_OPERATION, 0x00, /* wSpeedsSupported, 5Gbps only */
+ 0x03, /* bFunctionalitySupport,
+ USB 3.0 speed only */
+ 0x00, /* bU1DevExitLat, set later. */
+ 0x00, 0x00 /* __le16 bU2DevExitLat, set later. */
+};
+
+
static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc, int ports)
{
@@ -232,7 +251,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
continue;
speed = xhci->devs[i]->udev->speed;
if (((speed == USB_SPEED_SUPER) == (hcd->speed == HCD_USB3))
- && xhci->devs[i]->port == port) {
+ && xhci->devs[i]->fake_port == port) {
slot_id = i;
break;
}
@@ -392,13 +411,39 @@ static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
return max_ports;
}
+void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 link_state)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, port_array[port_id]);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_PLS_MASK;
+ temp |= PORT_LINK_STROBE | link_state;
+ xhci_writel(xhci, temp, port_array[port_id]);
+}
+
+/* Test and clear port RWC bit */
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 port_bit)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, port_array[port_id]);
+ if (temp & port_bit) {
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= port_bit;
+ xhci_writel(xhci, temp, port_array[port_id]);
+ }
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports;
unsigned long flags;
- u32 temp, temp1, status;
+ u32 temp, status;
int retval = 0;
__le32 __iomem **port_array;
int slot_id;
@@ -429,6 +474,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_hub_descriptor(hcd, xhci,
(struct usb_hub_descriptor *) buf);
break;
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ if ((wValue & 0xff00) != (USB_DT_BOS << 8))
+ goto error;
+
+ if (hcd->speed != HCD_USB3)
+ goto error;
+
+ memcpy(buf, &usb_bos_descriptor,
+ USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ buf[12] = HCS_U1_LATENCY(temp);
+ put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
case GetPortStatus:
if (!wIndex || wIndex > max_ports)
goto error;
@@ -472,11 +532,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "Resume USB2 port %d\n",
wIndex + 1);
bus_state->resume_done[wIndex] = 0;
- temp1 = xhci_port_state_to_neutral(temp);
- temp1 &= ~PORT_PLS_MASK;
- temp1 |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp1, port_array[wIndex]);
-
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
xhci_dbg(xhci, "set port %d resume\n",
wIndex + 1);
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
@@ -551,10 +608,19 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
temp = xhci_readl(xhci, port_array[wIndex]);
+ if ((temp & PORT_PLS_MASK) != XDEV_U0) {
+ /* Resume the port to U0 first */
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
/* In spec software should not attempt to suspend
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
+ temp = xhci_readl(xhci, port_array[wIndex]);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending device "
@@ -573,10 +639,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U3;
- xhci_writel(xhci, temp, port_array[wIndex]);
+ xhci_set_link_state(xhci, port_array, wIndex, XDEV_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
@@ -610,10 +673,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
}
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | link_state;
- xhci_writel(xhci, temp, port_array[wIndex]);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ link_state);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20); /* wait device to enter */
@@ -677,24 +738,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if ((temp & PORT_PE) == 0)
goto error;
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_RESUME;
- xhci_writel(xhci, temp,
- port_array[wIndex]);
-
- spin_unlock_irqrestore(&xhci->lock,
- flags);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_RESUME);
+ spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20);
spin_lock_irqsave(&xhci->lock, flags);
-
- temp = xhci_readl(xhci,
- port_array[wIndex]);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp,
- port_array[wIndex]);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
}
bus_state->port_c_suspend |= 1 << wIndex;
@@ -910,25 +960,18 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (test_bit(port_index, &bus_state->bus_suspended) &&
(temp & PORT_PLS_MASK)) {
if (DEV_SUPERSPEED(temp)) {
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, port_array[port_index]);
+ xhci_set_link_state(xhci, port_array,
+ port_index, XDEV_U0);
} else {
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_RESUME;
- xhci_writel(xhci, temp, port_array[port_index]);
+ xhci_set_link_state(xhci, port_array,
+ port_index, XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20);
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, port_array[port_index]);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, port_array[port_index]);
+ xhci_set_link_state(xhci, port_array,
+ port_index, XDEV_U0);
}
/* wait for the port to enter U0 and report port link
* state change.
@@ -938,12 +981,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
spin_lock_irqsave(&xhci->lock, flags);
/* Clear PLC */
- temp = xhci_readl(xhci, port_array[port_index]);
- if (temp & PORT_PLC) {
- temp = xhci_port_state_to_neutral(temp);
- temp |= PORT_PLC;
- xhci_writel(xhci, temp, port_array[port_index]);
- }
+ xhci_test_and_clear_bit(xhci, port_array, port_index,
+ PORT_PLC);
slot_id = xhci_find_slot_id_by_port(hcd,
xhci, port_index + 1);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d446886b22b..42a22b8e692 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -61,8 +61,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
- if (!seg)
- return;
if (seg->trbs) {
xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
seg->trbs, (unsigned long long)seg->dma);
@@ -81,7 +79,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
- struct xhci_segment *next, bool link_trbs)
+ struct xhci_segment *next, bool link_trbs, bool isoc)
{
u32 val;
@@ -97,7 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
- if (xhci_link_trb_quirk(xhci))
+ /* Set chain bit for isoc rings on AMD 0.96 host */
+ if (xhci_link_trb_quirk(xhci) ||
+ (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
@@ -112,18 +112,20 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
struct xhci_segment *seg;
struct xhci_segment *first_seg;
- if (!ring || !ring->first_seg)
+ if (!ring)
return;
- first_seg = ring->first_seg;
- seg = first_seg->next;
- xhci_dbg(xhci, "Freeing ring at %p\n", ring);
- while (seg != first_seg) {
- struct xhci_segment *next = seg->next;
- xhci_segment_free(xhci, seg);
- seg = next;
+ if (ring->first_seg) {
+ first_seg = ring->first_seg;
+ seg = first_seg->next;
+ xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+ while (seg != first_seg) {
+ struct xhci_segment *next = seg->next;
+ xhci_segment_free(xhci, seg);
+ seg = next;
+ }
+ xhci_segment_free(xhci, first_seg);
+ ring->first_seg = NULL;
}
- xhci_segment_free(xhci, first_seg);
- ring->first_seg = NULL;
kfree(ring);
}
@@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, bool link_trbs, gfp_t flags)
+ unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
{
struct xhci_ring *ring;
struct xhci_segment *prev;
@@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, flags);
if (!next)
goto fail;
- xhci_link_segments(xhci, prev, next, link_trbs);
+ xhci_link_segments(xhci, prev, next, link_trbs, isoc);
prev = next;
num_segs--;
}
- xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+ xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
if (link_trbs) {
/* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
- struct xhci_ring *ring)
+ struct xhci_ring *ring, bool isoc)
{
struct xhci_segment *seg = ring->first_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
/* All endpoint rings have link TRBs */
- xhci_link_segments(xhci, seg, seg->next, 1);
+ xhci_link_segments(xhci, seg, seg->next, 1, isoc);
seg = seg->next;
} while (seg != ring->first_seg);
xhci_initialize_ring_info(ring);
@@ -315,7 +317,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
stream_ctx, dma);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
@@ -343,9 +345,9 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- return pci_alloc_consistent(pdev,
+ return dma_alloc_coherent(&pdev->dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
- dma);
+ dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
@@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ xhci_ring_alloc(xhci, 1, true, false, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -687,11 +689,103 @@ static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
ep->xhci = xhci;
}
-/* All the xhci_tds in the ring's TD list should be freed at this point */
+static void xhci_free_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int slot_id)
+{
+ struct list_head *tt;
+ struct list_head *tt_list_head;
+ struct list_head *tt_next;
+ struct xhci_tt_bw_info *tt_info;
+
+ /* If the device never made it past the Set Address stage,
+ * it may not have the real_port set correctly.
+ */
+ if (virt_dev->real_port == 0 ||
+ virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+ xhci_dbg(xhci, "Bad real port.\n");
+ return;
+ }
+
+ tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ if (list_empty(tt_list_head))
+ return;
+
+ list_for_each(tt, tt_list_head) {
+ tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
+ if (tt_info->slot_id == slot_id)
+ break;
+ }
+ /* Cautionary measure in case the hub was disconnected before we
+ * stored the TT information.
+ */
+ if (tt_info->slot_id != slot_id)
+ return;
+
+ tt_next = tt->next;
+ tt_info = list_entry(tt, struct xhci_tt_bw_info,
+ tt_list);
+ /* Multi-TT hubs will have more than one entry */
+ do {
+ list_del(tt);
+ kfree(tt_info);
+ tt = tt_next;
+ if (list_empty(tt_list_head))
+ break;
+ tt_next = tt->next;
+ tt_info = list_entry(tt, struct xhci_tt_bw_info,
+ tt_list);
+ } while (tt_info->slot_id == slot_id);
+}
+
+int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags)
+{
+ struct xhci_tt_bw_info *tt_info;
+ unsigned int num_ports;
+ int i, j;
+
+ if (!tt->multi)
+ num_ports = 1;
+ else
+ num_ports = hdev->maxchild;
+
+ for (i = 0; i < num_ports; i++, tt_info++) {
+ struct xhci_interval_bw_table *bw_table;
+
+ tt_info = kzalloc(sizeof(*tt_info), mem_flags);
+ if (!tt_info)
+ goto free_tts;
+ INIT_LIST_HEAD(&tt_info->tt_list);
+ list_add(&tt_info->tt_list,
+ &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_info->slot_id = virt_dev->udev->slot_id;
+ if (tt->multi)
+ tt_info->ttport = i+1;
+ bw_table = &tt_info->bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++)
+ INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
+ }
+ return 0;
+
+free_tts:
+ xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
+ return -ENOMEM;
+}
+
+
+/* All the xhci_tds in the ring's TD list should be freed at this point.
+ * Should be called with xhci->lock held if there is any chance the TT lists
+ * will be manipulated by the configure endpoint, allocate device, or update
+ * hub functions while this function is removing the TT entries from the list.
+ */
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *dev;
int i;
+ int old_active_eps = 0;
/* Slot ID 0 is reserved */
if (slot_id == 0 || !xhci->devs[slot_id])
@@ -702,13 +796,29 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (!dev)
return;
+ if (dev->tt_info)
+ old_active_eps = dev->tt_info->active_eps;
+
for (i = 0; i < 31; ++i) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
+ /* Endpoints on the TT/root port lists should have been removed
+ * when usb_disable_device() was called for the device.
+ * We can't drop them anyway, because the udev might have gone
+ * away by this point, and we can't tell what speed it was.
+ */
+ if (!list_empty(&dev->eps[i].bw_endpoint_list))
+ xhci_warn(xhci, "Slot %u endpoint %u "
+ "not removed from BW list!\n",
+ slot_id, i);
}
+ /* If this is a hub, free the TT(s) from the TT list */
+ xhci_free_tt_info(xhci, dev, slot_id);
+ /* If necessary, update the number of active TTs on this root port */
+ xhci_update_tt_active_eps(xhci, dev, old_active_eps);
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
@@ -762,10 +872,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
for (i = 0; i < 31; i++) {
xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+ INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -921,9 +1032,40 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
- dev->port = top_dev->portnum;
+ dev->fake_port = top_dev->portnum;
+ dev->real_port = port_num;
xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
- xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->port);
+ xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+
+ /* Find the right bandwidth table that this device will be a part of.
+ * If this is a full speed device attached directly to a root port (or a
+ * decendent of one), it counts as a primary bandwidth domain, not a
+ * secondary bandwidth domain under a TT. An xhci_tt_info structure
+ * will never be created for the HS root hub.
+ */
+ if (!udev->tt || !udev->tt->hub->parent) {
+ dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ } else {
+ struct xhci_root_port_bw_info *rh_bw;
+ struct xhci_tt_bw_info *tt_bw;
+
+ rh_bw = &xhci->rh_bw[port_num - 1];
+ /* Find the right TT. */
+ list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
+ if (tt_bw->slot_id != udev->tt->hub->slot_id)
+ continue;
+
+ if (!dev->udev->tt->multi ||
+ (udev->tt->multi &&
+ tt_bw->ttport == dev->udev->ttport)) {
+ dev->bw_table = &tt_bw->bw_table;
+ dev->tt_info = tt_bw;
+ break;
+ }
+ }
+ if (!dev->tt_info)
+ xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
+ }
/* Is this a LS/FS device under an external HS hub? */
if (udev->tt && udev->tt->hub->parent) {
@@ -1141,8 +1283,8 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
if (udev->speed == USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
- max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
- max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11;
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * (max_burst + 1);
}
@@ -1175,10 +1317,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/
if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 8, true, mem_flags);
+ xhci_ring_alloc(xhci, 8, true, true, mem_flags);
else
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ xhci_ring_alloc(xhci, 1, true, false, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1187,7 +1329,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
- xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -1211,7 +1354,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set the max packet size and max burst */
switch (udev->speed) {
case USB_SPEED_SUPER:
- max_packet = le16_to_cpu(ep->desc.wMaxPacketSize);
+ max_packet = usb_endpoint_maxp(&ep->desc);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */
max_packet = ep->ss_ep_comp.bMaxBurst;
@@ -1223,14 +1366,14 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) {
- max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize)
+ max_burst = (usb_endpoint_maxp(&ep->desc)
& 0x1800) >> 11;
ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
}
/* Fall through */
case USB_SPEED_FULL:
case USB_SPEED_LOW:
- max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
break;
default:
@@ -1286,6 +1429,70 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
*/
}
+void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
+{
+ bw_info->ep_interval = 0;
+ bw_info->mult = 0;
+ bw_info->num_packets = 0;
+ bw_info->max_packet_size = 0;
+ bw_info->type = 0;
+ bw_info->max_esit_payload = 0;
+}
+
+void xhci_update_bw_info(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_input_control_ctx *ctrl_ctx,
+ struct xhci_virt_device *virt_dev)
+{
+ struct xhci_bw_info *bw_info;
+ struct xhci_ep_ctx *ep_ctx;
+ unsigned int ep_type;
+ int i;
+
+ for (i = 1; i < 31; ++i) {
+ bw_info = &virt_dev->eps[i].bw_info;
+
+ /* We can't tell what endpoint type is being dropped, but
+ * unconditionally clearing the bandwidth info for non-periodic
+ * endpoints should be harmless because the info will never be
+ * set in the first place.
+ */
+ if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
+ /* Dropped endpoint */
+ xhci_clear_endpoint_bw_info(bw_info);
+ continue;
+ }
+
+ if (EP_IS_ADDED(ctrl_ctx, i)) {
+ ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
+ ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
+
+ /* Ignore non-periodic endpoints */
+ if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
+ ep_type != ISOC_IN_EP &&
+ ep_type != INT_IN_EP)
+ continue;
+
+ /* Added or changed endpoint */
+ bw_info->ep_interval = CTX_TO_EP_INTERVAL(
+ le32_to_cpu(ep_ctx->ep_info));
+ /* Number of packets and mult are zero-based in the
+ * input context, but we want one-based for the
+ * interval table.
+ */
+ bw_info->mult = CTX_TO_EP_MULT(
+ le32_to_cpu(ep_ctx->ep_info)) + 1;
+ bw_info->num_packets = CTX_TO_MAX_BURST(
+ le32_to_cpu(ep_ctx->ep_info2)) + 1;
+ bw_info->max_packet_size = MAX_PACKET_DECODED(
+ le32_to_cpu(ep_ctx->ep_info2));
+ bw_info->type = ep_type;
+ bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
+ le32_to_cpu(ep_ctx->tx_info));
+ }
+ }
+}
+
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
@@ -1344,10 +1551,9 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->scratchpad)
goto fail_sp;
- xhci->scratchpad->sp_array =
- pci_alloc_consistent(to_pci_dev(dev),
+ xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
num_sp * sizeof(u64),
- &xhci->scratchpad->sp_dma);
+ &xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
@@ -1364,8 +1570,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
- void *buf = pci_alloc_consistent(to_pci_dev(dev),
- xhci->page_size, &dma);
+ void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+ flags);
if (!buf)
goto fail_sp5;
@@ -1378,7 +1584,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
fail_sp5:
for (i = i - 1; i >= 0; i--) {
- pci_free_consistent(to_pci_dev(dev), xhci->page_size,
+ dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
@@ -1388,7 +1594,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
- pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
+ dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
@@ -1412,13 +1618,13 @@ static void scratchpad_free(struct xhci_hcd *xhci)
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
- pci_free_consistent(pdev, xhci->page_size,
+ dma_free_coherent(&pdev->dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
- pci_free_consistent(pdev, num_sp * sizeof(u64),
+ dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
@@ -1463,18 +1669,10 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
{
- int last;
-
- if (!urb_priv)
- return;
-
- last = urb_priv->length - 1;
- if (last >= 0) {
- int i;
- for (i = 0; i <= last; i++)
- kfree(urb_priv->td[i]);
+ if (urb_priv) {
+ kfree(urb_priv->td[0]);
+ kfree(urb_priv);
}
- kfree(urb_priv);
}
void xhci_free_command(struct xhci_hcd *xhci,
@@ -1489,6 +1687,8 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct dev_info *dev_info, *next;
+ unsigned long flags;
int size;
int i;
@@ -1500,7 +1700,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
- pci_free_consistent(pdev, size,
+ dma_free_coherent(&pdev->dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
xhci_dbg(xhci, "Freed ERST\n");
@@ -1540,17 +1740,25 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
if (xhci->dcbaa)
- pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+ dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
scratchpad_free(xhci);
+ spin_lock_irqsave(&xhci->lock, flags);
+ list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
+ list_del(&dev_info->list);
+ kfree(dev_info);
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
kfree(xhci->usb2_ports);
kfree(xhci->usb3_ports);
kfree(xhci->port_array);
+ kfree(xhci->rh_bw);
xhci->page_size = 0;
xhci->page_shift = 0;
@@ -1762,6 +1970,23 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
+
+ /* Check the host's USB2 LPM capability */
+ if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
+ (temp & XHCI_L1C)) {
+ xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
+ xhci->sw_lpm_support = 1;
+ }
+
+ if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
+ xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
+ xhci->sw_lpm_support = 1;
+ if (temp & XHCI_HLC) {
+ xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
+ xhci->hw_lpm_support = 1;
+ }
+ }
+
port_offset--;
for (i = port_offset; i < (port_offset + port_count); i++) {
/* Duplicate entry. Ignore the port if the revisions differ. */
@@ -1806,7 +2031,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
__le32 __iomem *addr;
u32 offset;
unsigned int num_ports;
- int i, port_index;
+ int i, j, port_index;
addr = &xhci->cap_regs->hcc_params;
offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
@@ -1821,6 +2046,18 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->port_array)
return -ENOMEM;
+ xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
+ if (!xhci->rh_bw)
+ return -ENOMEM;
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bw_table;
+
+ INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
+ bw_table = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++)
+ INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
+ }
+
/*
* For whatever reason, the first capability offset is from the
* capability register base, not from the HCCPARAMS register.
@@ -1959,8 +2196,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* Section 5.4.8 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
- sizeof(*xhci->dcbaa), &dma);
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+ GFP_KERNEL);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
@@ -1994,14 +2231,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_pool_create("xHCI 1KB stream ctx arrays",
dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
- * will be allocated with pci_alloc_consistent()
+ * will be allocated with dma_alloc_coherent()
*/
if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2032,14 +2269,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg(xhci, "// Allocating event ring\n");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
+ flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci, flags) < 0)
goto fail;
- xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
- sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+ xhci->erst.entries = dma_alloc_coherent(dev,
+ sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+ GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
@@ -2102,6 +2341,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
+ INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+
return 0;
fail:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cb16de213f6..9f51f88cc0f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -51,61 +51,9 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
return 0;
}
-/* called during probe() after chip reset completes */
-static int xhci_pci_setup(struct usb_hcd *hcd)
+static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
- struct xhci_hcd *xhci;
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- int retval;
- u32 temp;
-
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
-
- if (usb_hcd_is_primary_hcd(hcd)) {
- xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
- if (!xhci)
- return -ENOMEM;
- *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
- xhci->main_hcd = hcd;
- /* Mark the first roothub as being USB 2.0.
- * The xHCI driver will register the USB 3.0 roothub.
- */
- hcd->speed = HCD_USB2;
- hcd->self.root_hub->speed = USB_SPEED_HIGH;
- /*
- * USB 2.0 roothub under xHCI has an integrated TT,
- * (rate matching hub) as opposed to having an OHCI/UHCI
- * companion controller.
- */
- hcd->has_tt = 1;
- } else {
- /* xHCI private pointer was set in xhci_pci_probe for the second
- * registered roothub.
- */
- xhci = hcd_to_xhci(hcd);
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- if (HCC_64BIT_ADDR(temp)) {
- xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
- }
- return 0;
- }
-
- xhci->cap_regs = hcd->regs;
- xhci->op_regs = hcd->regs +
- HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
- xhci->run_regs = hcd->regs +
- (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
- /* Cache read-only capability registers */
- xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
- xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
- xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
- xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
- xhci->hci_version = HC_VERSION(xhci->hcc_params);
- xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- xhci_print_registers(xhci);
+ struct pci_dev *pdev = to_pci_dev(dev);
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
@@ -128,6 +76,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
+ xhci->quirks |= XHCI_AMD_0x96_HOST;
+
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
@@ -136,39 +87,29 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
}
+}
- /* Make sure the HC is halted. */
- retval = xhci_halt(xhci);
- if (retval)
- goto error;
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci;
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int retval;
- xhci_dbg(xhci, "Resetting HCD\n");
- /* Reset the internal HC memory state and registers. */
- retval = xhci_reset(xhci);
+ retval = xhci_gen_setup(hcd, xhci_pci_quirks);
if (retval)
- goto error;
- xhci_dbg(xhci, "Reset complete\n");
-
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- if (HCC_64BIT_ADDR(temp)) {
- xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
- }
+ return retval;
- xhci_dbg(xhci, "Calling HCD init\n");
- /* Initialize HCD and host controller data structures. */
- retval = xhci_init(hcd);
- if (retval)
- goto error;
- xhci_dbg(xhci, "Called HCD init\n");
+ xhci = hcd_to_xhci(hcd);
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return 0;
pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
@@ -178,7 +119,6 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!retval)
return retval;
-error:
kfree(xhci);
return retval;
}
@@ -222,7 +162,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
retval = usb_add_hcd(xhci->shared_hcd, dev->irq,
- IRQF_DISABLED | IRQF_SHARED);
+ IRQF_SHARED);
if (retval)
goto put_usb3_hcd;
/* Roothub already marked as USB 3.0 speed */
@@ -344,6 +284,11 @@ static const struct hc_driver xhci_pci_hc_driver = {
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
+ /*
+ * call back when device connected and addressed
+ */
+ .update_device = xhci_update_device,
+ .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
};
/*-------------------------------------------------------------------------*/
@@ -375,12 +320,12 @@ static struct pci_driver xhci_pci_driver = {
#endif
};
-int xhci_register_pci(void)
+int __init xhci_register_pci(void)
{
return pci_register_driver(&xhci_pci_driver);
}
-void xhci_unregister_pci(void)
+void __exit xhci_unregister_pci(void)
{
pci_unregister_driver(&xhci_pci_driver);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 952e2ded61a..940321b3ec6 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming)
+ bool consumer, bool more_trbs_coming, bool isoc)
{
u32 chain;
union xhci_trb *next;
@@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (!chain && !more_trbs_coming)
break;
- /* If we're not dealing with 0.95 hardware,
+ /* If we're not dealing with 0.95 hardware or
+ * isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
- if (!xhci_link_trb_quirk(xhci)) {
+ if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
+ && !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
@@ -1329,10 +1331,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (DEV_SUPERSPEED(temp)) {
xhci_dbg(xhci, "resume SS port %d\n", port_id);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, port_array[faked_port_index]);
+ xhci_set_link_state(xhci, port_array, faked_port_index,
+ XDEV_U0);
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
faked_port_index);
if (!slot_id) {
@@ -1342,10 +1342,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_ring_device(xhci, slot_id);
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
/* Clear PORT_PLC */
- temp = xhci_readl(xhci, port_array[faked_port_index]);
- temp = xhci_port_state_to_neutral(temp);
- temp |= PORT_PLC;
- xhci_writel(xhci, temp, port_array[faked_port_index]);
+ xhci_test_and_clear_bit(xhci, port_array,
+ faked_port_index, PORT_PLC);
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
@@ -1356,6 +1354,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
+ if (hcd->speed != HCD_USB3)
+ xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
+ PORT_PLC);
+
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
@@ -2192,7 +2194,8 @@ cleanup:
if ((urb->actual_length != urb->transfer_buffer_length &&
(urb->transfer_flags &
URB_SHORT_NOT_OK)) ||
- status != 0)
+ (status != 0 &&
+ !usb_endpoint_xfer_isoc(&urb->ep->desc)))
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
"expected = %x, status = %d\n",
urb, urb->actual_length,
@@ -2409,7 +2412,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming,
+ bool consumer, bool more_trbs_coming, bool isoc,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
@@ -2419,7 +2422,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
- inc_enq(xhci, ring, consumer, more_trbs_coming);
+ inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
}
/*
@@ -2427,7 +2430,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+ u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
@@ -2469,10 +2472,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
- /* If we're not dealing with 0.95 hardware,
- * clear the chain bit.
+ /* If we're not dealing with 0.95 hardware or isoc rings
+ * on AMD 0.96 host, clear the chain bit.
*/
- if (!xhci_link_trb_quirk(xhci))
+ if (!xhci_link_trb_quirk(xhci) && !(isoc &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2505,6 +2509,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
+ bool isoc,
gfp_t mem_flags)
{
int ret;
@@ -2522,7 +2527,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
- num_trbs, mem_flags);
+ num_trbs, isoc, mem_flags);
if (ret)
return ret;
@@ -2711,7 +2716,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
* running_total.
*/
packets_transferred = (running_total + trb_buff_len) /
- le16_to_cpu(urb->ep->desc.wMaxPacketSize);
+ usb_endpoint_maxp(&urb->ep->desc);
return xhci_td_remainder(total_packet_count - packets_transferred);
}
@@ -2741,11 +2746,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
- le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+ usb_endpoint_maxp(&urb->ep->desc));
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
+ num_trbs, urb, 0, false, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
@@ -2840,7 +2845,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -2931,7 +2936,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
+ num_trbs, urb, 0, false, mem_flags);
if (ret < 0)
return ret;
@@ -2948,7 +2953,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
running_total = 0;
total_packet_count = roundup(urb->transfer_buffer_length,
- le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+ usb_endpoint_maxp(&urb->ep->desc));
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -3003,7 +3008,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3063,7 +3068,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
+ num_trbs, urb, 0, false, mem_flags);
if (ret < 0)
return ret;
@@ -3096,7 +3101,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
- queue_trb(xhci, ep_ring, false, true,
+ queue_trb(xhci, ep_ring, false, true, false,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3116,7 +3121,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, true,
+ queue_trb(xhci, ep_ring, false, true, false,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
@@ -3132,7 +3137,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, false,
+ queue_trb(xhci, ep_ring, false, false, false,
0,
0,
TRB_INTR_TARGET(0),
@@ -3269,7 +3274,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
total_packet_count = roundup(td_len,
- le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+ usb_endpoint_maxp(&urb->ep->desc));
/* A zero-length transfer still involves at least one packet. */
if (total_packet_count == 0)
total_packet_count++;
@@ -3281,7 +3286,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
- urb->stream_id, trbs_per_td, urb, i, mem_flags);
+ urb->stream_id, trbs_per_td, urb, i, true,
+ mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
@@ -3351,7 +3357,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3433,7 +3439,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
- num_trbs, mem_flags);
+ num_trbs, true, mem_flags);
if (ret)
return ret;
@@ -3492,7 +3498,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
- reserved_trbs, GFP_ATOMIC);
+ reserved_trbs, false, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
@@ -3500,8 +3506,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
- queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
- field4 | xhci->cmd_ring->cycle_state);
+ queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
+ field3, field4 | xhci->cmd_ring->cycle_state);
return 0;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3a0f695138f..1ff95a0df57 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -175,28 +175,19 @@ int xhci_reset(struct xhci_hcd *xhci)
return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
}
-/*
- * Free IRQs
- * free all IRQs request
- */
-static void xhci_free_irq(struct xhci_hcd *xhci)
+#ifdef CONFIG_PCI
+static int xhci_free_msi(struct xhci_hcd *xhci)
{
int i;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- /* return if using legacy interrupt */
- if (xhci_to_hcd(xhci)->irq >= 0)
- return;
-
- if (xhci->msix_entries) {
- for (i = 0; i < xhci->msix_count; i++)
- if (xhci->msix_entries[i].vector)
- free_irq(xhci->msix_entries[i].vector,
- xhci_to_hcd(xhci));
- } else if (pdev->irq >= 0)
- free_irq(pdev->irq, xhci_to_hcd(xhci));
+ if (!xhci->msix_entries)
+ return -EINVAL;
- return;
+ for (i = 0; i < xhci->msix_count; i++)
+ if (xhci->msix_entries[i].vector)
+ free_irq(xhci->msix_entries[i].vector,
+ xhci_to_hcd(xhci));
+ return 0;
}
/*
@@ -224,6 +215,28 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
}
/*
+ * Free IRQs
+ * free all IRQs request
+ */
+static void xhci_free_irq(struct xhci_hcd *xhci)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ int ret;
+
+ /* return if using legacy interrupt */
+ if (xhci_to_hcd(xhci)->irq >= 0)
+ return;
+
+ ret = xhci_free_msi(xhci);
+ if (!ret)
+ return;
+ if (pdev->irq >= 0)
+ free_irq(pdev->irq, xhci_to_hcd(xhci));
+
+ return;
+}
+
+/*
* Set up MSI-X
*/
static int xhci_setup_msix(struct xhci_hcd *xhci)
@@ -302,6 +315,72 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
return;
}
+static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+ int i;
+
+ if (xhci->msix_entries) {
+ for (i = 0; i < xhci->msix_count; i++)
+ synchronize_irq(xhci->msix_entries[i].vector);
+ }
+}
+
+static int xhci_try_enable_msi(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ int ret;
+
+ /*
+ * Some Fresco Logic host controllers advertise MSI, but fail to
+ * generate interrupts. Don't even try to enable MSI.
+ */
+ if (xhci->quirks & XHCI_BROKEN_MSI)
+ return 0;
+
+ /* unregister the legacy interrupt */
+ if (hcd->irq)
+ free_irq(hcd->irq, hcd);
+ hcd->irq = -1;
+
+ ret = xhci_setup_msix(xhci);
+ if (ret)
+ /* fall back to msi*/
+ ret = xhci_setup_msi(xhci);
+
+ if (!ret)
+ /* hcd->irq is -1, we have MSI */
+ return 0;
+
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+ if (ret) {
+ xhci_err(xhci, "request interrupt %d failed\n",
+ pdev->irq);
+ return ret;
+ }
+ hcd->irq = pdev->irq;
+ return 0;
+}
+
+#else
+
+static int xhci_try_enable_msi(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+}
+
+static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+}
+
+#endif
+
/*
* Initialize memory for HCD and xHC (one-time init).
*
@@ -316,7 +395,7 @@ int xhci_init(struct usb_hcd *hcd)
xhci_dbg(xhci, "xhci_init\n");
spin_lock_init(&xhci->lock);
- if (link_quirk) {
+ if (xhci->hci_version == 0x95 && link_quirk) {
xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
@@ -413,9 +492,8 @@ int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
- u32 ret;
+ int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
@@ -426,34 +504,10 @@ int xhci_run(struct usb_hcd *hcd)
return xhci_run_finished(xhci);
xhci_dbg(xhci, "xhci_run\n");
- /* unregister the legacy interrupt */
- if (hcd->irq)
- free_irq(hcd->irq, hcd);
- hcd->irq = -1;
-
- /* Some Fresco Logic host controllers advertise MSI, but fail to
- * generate interrupts. Don't even try to enable MSI.
- */
- if (xhci->quirks & XHCI_BROKEN_MSI)
- goto legacy_irq;
- ret = xhci_setup_msix(xhci);
+ ret = xhci_try_enable_msi(hcd);
if (ret)
- /* fall back to msi*/
- ret = xhci_setup_msi(xhci);
-
- if (ret) {
-legacy_irq:
- /* fall back to legacy interrupt*/
- ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
- hcd->irq_descr, hcd);
- if (ret) {
- xhci_err(xhci, "request interrupt %d failed\n",
- pdev->irq);
- return ret;
- }
- hcd->irq = pdev->irq;
- }
+ return ret;
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
init_timer(&xhci->event_ring_timer);
@@ -694,7 +748,6 @@ int xhci_suspend(struct xhci_hcd *xhci)
int rc = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
- int i;
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -730,10 +783,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
- if (xhci->msix_entries) {
- for (i = 0; i < xhci->msix_count; i++)
- synchronize_irq(xhci->msix_entries[i].vector);
- }
+ xhci_msix_sync_irqs(xhci);
return rc;
}
@@ -945,8 +995,7 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
return -ENODEV;
if (check_virt_dev) {
- if (!udev->slot_id || !xhci->devs
- || !xhci->devs[udev->slot_id]) {
+ if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
"device\n", func);
return -EINVAL;
@@ -987,7 +1036,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
- max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
+ max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
if (hw_max_packet_size != max_packet_size) {
xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
@@ -1035,6 +1084,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_td *buffer;
unsigned long flags;
int ret = 0;
unsigned int slot_id, ep_index;
@@ -1065,13 +1115,15 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
if (!urb_priv)
return -ENOMEM;
+ buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
+ if (!buffer) {
+ kfree(urb_priv);
+ return -ENOMEM;
+ }
+
for (i = 0; i < size; i++) {
- urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
- if (!urb_priv->td[i]) {
- urb_priv->length = i;
- xhci_urb_free_priv(xhci, urb_priv);
- return -ENOMEM;
- }
+ urb_priv->td[i] = buffer;
+ buffer++;
}
urb_priv->length = size;
@@ -1747,6 +1799,564 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
xhci->num_active_eps);
}
+unsigned int xhci_get_block_size(struct usb_device *udev)
+{
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ return FS_BLOCK;
+ case USB_SPEED_HIGH:
+ return HS_BLOCK;
+ case USB_SPEED_SUPER:
+ return SS_BLOCK;
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_WIRELESS:
+ default:
+ /* Should never happen */
+ return 1;
+ }
+}
+
+unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
+{
+ if (interval_bw->overhead[LS_OVERHEAD_TYPE])
+ return LS_OVERHEAD;
+ if (interval_bw->overhead[FS_OVERHEAD_TYPE])
+ return FS_OVERHEAD;
+ return HS_OVERHEAD;
+}
+
+/* If we are changing a LS/FS device under a HS hub,
+ * make sure (if we are activating a new TT) that the HS bus has enough
+ * bandwidth for this new TT.
+ */
+static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps)
+{
+ struct xhci_interval_bw_table *bw_table;
+ struct xhci_tt_bw_info *tt_info;
+
+ /* Find the bandwidth table for the root port this TT is attached to. */
+ bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+ tt_info = virt_dev->tt_info;
+ /* If this TT already had active endpoints, the bandwidth for this TT
+ * has already been added. Removing all periodic endpoints (and thus
+ * making the TT enactive) will only decrease the bandwidth used.
+ */
+ if (old_active_eps)
+ return 0;
+ if (old_active_eps == 0 && tt_info->active_eps != 0) {
+ if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
+ return -ENOMEM;
+ return 0;
+ }
+ /* Not sure why we would have no new active endpoints...
+ *
+ * Maybe because of an Evaluate Context change for a hub update or a
+ * control endpoint 0 max packet size change?
+ * FIXME: skip the bandwidth calculation in that case.
+ */
+ return 0;
+}
+
+static int xhci_check_ss_bw(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev)
+{
+ unsigned int bw_reserved;
+
+ bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
+ if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
+ return -ENOMEM;
+
+ bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
+ if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * This algorithm is a very conservative estimate of the worst-case scheduling
+ * scenario for any one interval. The hardware dynamically schedules the
+ * packets, so we can't tell which microframe could be the limiting factor in
+ * the bandwidth scheduling. This only takes into account periodic endpoints.
+ *
+ * Obviously, we can't solve an NP complete problem to find the minimum worst
+ * case scenario. Instead, we come up with an estimate that is no less than
+ * the worst case bandwidth used for any one microframe, but may be an
+ * over-estimate.
+ *
+ * We walk the requirements for each endpoint by interval, starting with the
+ * smallest interval, and place packets in the schedule where there is only one
+ * possible way to schedule packets for that interval. In order to simplify
+ * this algorithm, we record the largest max packet size for each interval, and
+ * assume all packets will be that size.
+ *
+ * For interval 0, we obviously must schedule all packets for each interval.
+ * The bandwidth for interval 0 is just the amount of data to be transmitted
+ * (the sum of all max ESIT payload sizes, plus any overhead per packet times
+ * the number of packets).
+ *
+ * For interval 1, we have two possible microframes to schedule those packets
+ * in. For this algorithm, if we can schedule the same number of packets for
+ * each possible scheduling opportunity (each microframe), we will do so. The
+ * remaining number of packets will be saved to be transmitted in the gaps in
+ * the next interval's scheduling sequence.
+ *
+ * As we move those remaining packets to be scheduled with interval 2 packets,
+ * we have to double the number of remaining packets to transmit. This is
+ * because the intervals are actually powers of 2, and we would be transmitting
+ * the previous interval's packets twice in this interval. We also have to be
+ * sure that when we look at the largest max packet size for this interval, we
+ * also look at the largest max packet size for the remaining packets and take
+ * the greater of the two.
+ *
+ * The algorithm continues to evenly distribute packets in each scheduling
+ * opportunity, and push the remaining packets out, until we get to the last
+ * interval. Then those packets and their associated overhead are just added
+ * to the bandwidth used.
+ */
+static int xhci_check_bw_table(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps)
+{
+ unsigned int bw_reserved;
+ unsigned int max_bandwidth;
+ unsigned int bw_used;
+ unsigned int block_size;
+ struct xhci_interval_bw_table *bw_table;
+ unsigned int packet_size = 0;
+ unsigned int overhead = 0;
+ unsigned int packets_transmitted = 0;
+ unsigned int packets_remaining = 0;
+ unsigned int i;
+
+ if (virt_dev->udev->speed == USB_SPEED_SUPER)
+ return xhci_check_ss_bw(xhci, virt_dev);
+
+ if (virt_dev->udev->speed == USB_SPEED_HIGH) {
+ max_bandwidth = HS_BW_LIMIT;
+ /* Convert percent of bus BW reserved to blocks reserved */
+ bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
+ } else {
+ max_bandwidth = FS_BW_LIMIT;
+ bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
+ }
+
+ bw_table = virt_dev->bw_table;
+ /* We need to translate the max packet size and max ESIT payloads into
+ * the units the hardware uses.
+ */
+ block_size = xhci_get_block_size(virt_dev->udev);
+
+ /* If we are manipulating a LS/FS device under a HS hub, double check
+ * that the HS bus has enough bandwidth if we are activing a new TT.
+ */
+ if (virt_dev->tt_info) {
+ xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
+ virt_dev->real_port);
+ if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
+ xhci_warn(xhci, "Not enough bandwidth on HS bus for "
+ "newly activated TT.\n");
+ return -ENOMEM;
+ }
+ xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
+ virt_dev->tt_info->slot_id,
+ virt_dev->tt_info->ttport);
+ } else {
+ xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
+ virt_dev->real_port);
+ }
+
+ /* Add in how much bandwidth will be used for interval zero, or the
+ * rounded max ESIT payload + number of packets * largest overhead.
+ */
+ bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
+ bw_table->interval_bw[0].num_packets *
+ xhci_get_largest_overhead(&bw_table->interval_bw[0]);
+
+ for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
+ unsigned int bw_added;
+ unsigned int largest_mps;
+ unsigned int interval_overhead;
+
+ /*
+ * How many packets could we transmit in this interval?
+ * If packets didn't fit in the previous interval, we will need
+ * to transmit that many packets twice within this interval.
+ */
+ packets_remaining = 2 * packets_remaining +
+ bw_table->interval_bw[i].num_packets;
+
+ /* Find the largest max packet size of this or the previous
+ * interval.
+ */
+ if (list_empty(&bw_table->interval_bw[i].endpoints))
+ largest_mps = 0;
+ else {
+ struct xhci_virt_ep *virt_ep;
+ struct list_head *ep_entry;
+
+ ep_entry = bw_table->interval_bw[i].endpoints.next;
+ virt_ep = list_entry(ep_entry,
+ struct xhci_virt_ep, bw_endpoint_list);
+ /* Convert to blocks, rounding up */
+ largest_mps = DIV_ROUND_UP(
+ virt_ep->bw_info.max_packet_size,
+ block_size);
+ }
+ if (largest_mps > packet_size)
+ packet_size = largest_mps;
+
+ /* Use the larger overhead of this or the previous interval. */
+ interval_overhead = xhci_get_largest_overhead(
+ &bw_table->interval_bw[i]);
+ if (interval_overhead > overhead)
+ overhead = interval_overhead;
+
+ /* How many packets can we evenly distribute across
+ * (1 << (i + 1)) possible scheduling opportunities?
+ */
+ packets_transmitted = packets_remaining >> (i + 1);
+
+ /* Add in the bandwidth used for those scheduled packets */
+ bw_added = packets_transmitted * (overhead + packet_size);
+
+ /* How many packets do we have remaining to transmit? */
+ packets_remaining = packets_remaining % (1 << (i + 1));
+
+ /* What largest max packet size should those packets have? */
+ /* If we've transmitted all packets, don't carry over the
+ * largest packet size.
+ */
+ if (packets_remaining == 0) {
+ packet_size = 0;
+ overhead = 0;
+ } else if (packets_transmitted > 0) {
+ /* Otherwise if we do have remaining packets, and we've
+ * scheduled some packets in this interval, take the
+ * largest max packet size from endpoints with this
+ * interval.
+ */
+ packet_size = largest_mps;
+ overhead = interval_overhead;
+ }
+ /* Otherwise carry over packet_size and overhead from the last
+ * time we had a remainder.
+ */
+ bw_used += bw_added;
+ if (bw_used > max_bandwidth) {
+ xhci_warn(xhci, "Not enough bandwidth. "
+ "Proposed: %u, Max: %u\n",
+ bw_used, max_bandwidth);
+ return -ENOMEM;
+ }
+ }
+ /*
+ * Ok, we know we have some packets left over after even-handedly
+ * scheduling interval 15. We don't know which microframes they will
+ * fit into, so we over-schedule and say they will be scheduled every
+ * microframe.
+ */
+ if (packets_remaining > 0)
+ bw_used += overhead + packet_size;
+
+ if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
+ unsigned int port_index = virt_dev->real_port - 1;
+
+ /* OK, we're manipulating a HS device attached to a
+ * root port bandwidth domain. Include the number of active TTs
+ * in the bandwidth used.
+ */
+ bw_used += TT_HS_OVERHEAD *
+ xhci->rh_bw[port_index].num_active_tts;
+ }
+
+ xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
+ "Available: %u " "percent\n",
+ bw_used, max_bandwidth, bw_reserved,
+ (max_bandwidth - bw_used - bw_reserved) * 100 /
+ max_bandwidth);
+
+ bw_used += bw_reserved;
+ if (bw_used > max_bandwidth) {
+ xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
+ bw_used, max_bandwidth);
+ return -ENOMEM;
+ }
+
+ bw_table->bw_used = bw_used;
+ return 0;
+}
+
+static bool xhci_is_async_ep(unsigned int ep_type)
+{
+ return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
+ ep_type != ISOC_IN_EP &&
+ ep_type != INT_IN_EP);
+}
+
+static bool xhci_is_sync_in_ep(unsigned int ep_type)
+{
+ return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
+}
+
+static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
+{
+ unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
+
+ if (ep_bw->ep_interval == 0)
+ return SS_OVERHEAD_BURST +
+ (ep_bw->mult * ep_bw->num_packets *
+ (SS_OVERHEAD + mps));
+ return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
+ (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
+ 1 << ep_bw->ep_interval);
+
+}
+
+void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+ struct xhci_bw_info *ep_bw,
+ struct xhci_interval_bw_table *bw_table,
+ struct usb_device *udev,
+ struct xhci_virt_ep *virt_ep,
+ struct xhci_tt_bw_info *tt_info)
+{
+ struct xhci_interval_bw *interval_bw;
+ int normalized_interval;
+
+ if (xhci_is_async_ep(ep_bw->type))
+ return;
+
+ if (udev->speed == USB_SPEED_SUPER) {
+ if (xhci_is_sync_in_ep(ep_bw->type))
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
+ xhci_get_ss_bw_consumed(ep_bw);
+ else
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
+ xhci_get_ss_bw_consumed(ep_bw);
+ return;
+ }
+
+ /* SuperSpeed endpoints never get added to intervals in the table, so
+ * this check is only valid for HS/FS/LS devices.
+ */
+ if (list_empty(&virt_ep->bw_endpoint_list))
+ return;
+ /* For LS/FS devices, we need to translate the interval expressed in
+ * microframes to frames.
+ */
+ if (udev->speed == USB_SPEED_HIGH)
+ normalized_interval = ep_bw->ep_interval;
+ else
+ normalized_interval = ep_bw->ep_interval - 3;
+
+ if (normalized_interval == 0)
+ bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
+ interval_bw = &bw_table->interval_bw[normalized_interval];
+ interval_bw->num_packets -= ep_bw->num_packets;
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
+ break;
+ case USB_SPEED_FULL:
+ interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
+ break;
+ case USB_SPEED_HIGH:
+ interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_WIRELESS:
+ /* Should never happen because only LS/FS/HS endpoints will get
+ * added to the endpoint list.
+ */
+ return;
+ }
+ if (tt_info)
+ tt_info->active_eps -= 1;
+ list_del_init(&virt_ep->bw_endpoint_list);
+}
+
+static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
+ struct xhci_bw_info *ep_bw,
+ struct xhci_interval_bw_table *bw_table,
+ struct usb_device *udev,
+ struct xhci_virt_ep *virt_ep,
+ struct xhci_tt_bw_info *tt_info)
+{
+ struct xhci_interval_bw *interval_bw;
+ struct xhci_virt_ep *smaller_ep;
+ int normalized_interval;
+
+ if (xhci_is_async_ep(ep_bw->type))
+ return;
+
+ if (udev->speed == USB_SPEED_SUPER) {
+ if (xhci_is_sync_in_ep(ep_bw->type))
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
+ xhci_get_ss_bw_consumed(ep_bw);
+ else
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
+ xhci_get_ss_bw_consumed(ep_bw);
+ return;
+ }
+
+ /* For LS/FS devices, we need to translate the interval expressed in
+ * microframes to frames.
+ */
+ if (udev->speed == USB_SPEED_HIGH)
+ normalized_interval = ep_bw->ep_interval;
+ else
+ normalized_interval = ep_bw->ep_interval - 3;
+
+ if (normalized_interval == 0)
+ bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
+ interval_bw = &bw_table->interval_bw[normalized_interval];
+ interval_bw->num_packets += ep_bw->num_packets;
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
+ break;
+ case USB_SPEED_FULL:
+ interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
+ break;
+ case USB_SPEED_HIGH:
+ interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_WIRELESS:
+ /* Should never happen because only LS/FS/HS endpoints will get
+ * added to the endpoint list.
+ */
+ return;
+ }
+
+ if (tt_info)
+ tt_info->active_eps += 1;
+ /* Insert the endpoint into the list, largest max packet size first. */
+ list_for_each_entry(smaller_ep, &interval_bw->endpoints,
+ bw_endpoint_list) {
+ if (ep_bw->max_packet_size >=
+ smaller_ep->bw_info.max_packet_size) {
+ /* Add the new ep before the smaller endpoint */
+ list_add_tail(&virt_ep->bw_endpoint_list,
+ &smaller_ep->bw_endpoint_list);
+ return;
+ }
+ }
+ /* Add the new endpoint at the end of the list. */
+ list_add_tail(&virt_ep->bw_endpoint_list,
+ &interval_bw->endpoints);
+}
+
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps)
+{
+ struct xhci_root_port_bw_info *rh_bw_info;
+ if (!virt_dev->tt_info)
+ return;
+
+ rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+ if (old_active_eps == 0 &&
+ virt_dev->tt_info->active_eps != 0) {
+ rh_bw_info->num_active_tts += 1;
+ rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
+ } else if (old_active_eps != 0 &&
+ virt_dev->tt_info->active_eps == 0) {
+ rh_bw_info->num_active_tts -= 1;
+ rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
+ }
+}
+
+static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_bw_info ep_bw_info[31];
+ int i;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int old_active_eps = 0;
+
+ if (virt_dev->tt_info)
+ old_active_eps = virt_dev->tt_info->active_eps;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+
+ for (i = 0; i < 31; i++) {
+ if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
+ continue;
+
+ /* Make a copy of the BW info in case we need to revert this */
+ memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
+ sizeof(ep_bw_info[i]));
+ /* Drop the endpoint from the interval table if the endpoint is
+ * being dropped or changed.
+ */
+ if (EP_IS_DROPPED(ctrl_ctx, i))
+ xhci_drop_ep_from_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+ /* Overwrite the information stored in the endpoints' bw_info */
+ xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
+ for (i = 0; i < 31; i++) {
+ /* Add any changed or added endpoints to the interval table */
+ if (EP_IS_ADDED(ctrl_ctx, i))
+ xhci_add_ep_to_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+
+ if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
+ /* Ok, this fits in the bandwidth we have.
+ * Update the number of active TTs.
+ */
+ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+ return 0;
+ }
+
+ /* We don't have enough bandwidth for this, revert the stored info. */
+ for (i = 0; i < 31; i++) {
+ if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
+ continue;
+
+ /* Drop the new copies of any added or changed endpoints from
+ * the interval table.
+ */
+ if (EP_IS_ADDED(ctrl_ctx, i)) {
+ xhci_drop_ep_from_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+ /* Revert the endpoint back to its old information */
+ memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
+ sizeof(ep_bw_info[i]));
+ /* Add any changed or dropped endpoints back into the table */
+ if (EP_IS_DROPPED(ctrl_ctx, i))
+ xhci_add_ep_to_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+ return -ENOMEM;
+}
+
+
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
@@ -1765,17 +2375,30 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
- if (command) {
+
+ if (command)
in_ctx = command->in_ctx;
- if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
- xhci_reserve_host_resources(xhci, in_ctx)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_warn(xhci, "Not enough host resources, "
- "active endpoint contexts = %u\n",
- xhci->num_active_eps);
- return -ENOMEM;
- }
+ else
+ in_ctx = virt_dev->in_ctx;
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+ if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
+ xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, in_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough bandwidth\n");
+ return -ENOMEM;
+ }
+ if (command) {
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
@@ -1789,15 +2412,6 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
- in_ctx = virt_dev->in_ctx;
- if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
- xhci_reserve_host_resources(xhci, in_ctx)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_warn(xhci, "Not enough host resources, "
- "active endpoint contexts = %u\n",
- xhci->num_active_eps);
- return -ENOMEM;
- }
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
@@ -1888,6 +2502,12 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+ /* Don't issue the command if there's no endpoints to update. */
+ if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
+ ctrl_ctx->drop_flags == 0)
+ return 0;
+
xhci_dbg(xhci, "New Input Control Context:\n");
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
@@ -2525,6 +3145,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
int timeleft;
int last_freed_endpoint;
struct xhci_slot_ctx *slot_ctx;
+ int old_active_eps = 0;
ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
if (ret <= 0)
@@ -2666,7 +3287,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
- }
+ if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
+ xhci_drop_ep_from_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
+ }
+ /* If necessary, update the number of active TTs on this root port */
+ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
ret = 0;
@@ -2704,6 +3336,11 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
+ if (udev->usb2_hw_lpm_enabled) {
+ xhci_set_usb2_hardware_lpm(hcd, udev, 0);
+ udev->usb2_hw_lpm_enabled = 0;
+ }
+
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = xhci_readl(xhci, &xhci->op_regs->status);
@@ -2889,7 +3526,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
* command on a timeout.
*/
if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for a slot\n",
+ xhci_warn(xhci, "%s while waiting for address device command\n",
timeleft == 0 ? "Timeout" : "Signal");
/* FIXME cancel the address device command */
return -ETIME;
@@ -2957,6 +3594,254 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
return 0;
}
+#ifdef CONFIG_USB_SUSPEND
+
+/* BESL to HIRD Encoding array for USB2 LPM */
+static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
+ 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
+
+/* Calculate HIRD/BESL for USB2 PORTPMSC*/
+static int xhci_calculate_hird_besl(int u2del, bool use_besl)
+{
+ int hird;
+
+ if (use_besl) {
+ for (hird = 0; hird < 16; hird++) {
+ if (xhci_besl_encoding[hird] >= u2del)
+ break;
+ }
+ } else {
+ if (u2del <= 50)
+ hird = 0;
+ else
+ hird = (u2del - 51) / 75 + 1;
+
+ if (hird > 15)
+ hird = 15;
+ }
+
+ return hird;
+}
+
+static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct dev_info *dev_info;
+ __le32 __iomem **port_array;
+ __le32 __iomem *addr, *pm_addr;
+ u32 temp, dev_id;
+ unsigned int port_num;
+ unsigned long flags;
+ int u2del, hird;
+ int ret;
+
+ if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
+ !udev->lpm_capable)
+ return -EINVAL;
+
+ /* we only support lpm for non-hub device connected to root hub yet */
+ if (!udev->parent || udev->parent->parent ||
+ udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return -EINVAL;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ /* Look for devices in lpm_failed_devs list */
+ dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
+ le16_to_cpu(udev->descriptor.idProduct);
+ list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
+ if (dev_info->dev_id == dev_id) {
+ ret = -EINVAL;
+ goto finish;
+ }
+ }
+
+ port_array = xhci->usb2_ports;
+ port_num = udev->portnum - 1;
+
+ if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
+ xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
+ ret = -EINVAL;
+ goto finish;
+ }
+
+ /*
+ * Test USB 2.0 software LPM.
+ * FIXME: some xHCI 1.0 hosts may implement a new register to set up
+ * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
+ * in the June 2011 errata release.
+ */
+ xhci_dbg(xhci, "test port %d software LPM\n", port_num);
+ /*
+ * Set L1 Device Slot and HIRD/BESL.
+ * Check device's USB 2.0 extension descriptor to determine whether
+ * HIRD or BESL shoule be used. See USB2.0 LPM errata.
+ */
+ pm_addr = port_array[port_num] + 1;
+ u2del = HCS_U2_LATENCY(xhci->hcs_params3);
+ if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
+ hird = xhci_calculate_hird_besl(u2del, 1);
+ else
+ hird = xhci_calculate_hird_besl(u2del, 0);
+
+ temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
+ xhci_writel(xhci, temp, pm_addr);
+
+ /* Set port link state to U2(L1) */
+ addr = port_array[port_num];
+ xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
+
+ /* wait for ACK */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ /* Check L1 Status */
+ ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
+ if (ret != -ETIMEDOUT) {
+ /* enter L1 successfully */
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
+ port_num, temp);
+ ret = 0;
+ } else {
+ temp = xhci_readl(xhci, pm_addr);
+ xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
+ port_num, temp & PORT_L1S_MASK);
+ ret = -EINVAL;
+ }
+
+ /* Resume the port */
+ xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ /* Clear PLC */
+ xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
+
+ /* Check PORTSC to make sure the device is in the right state */
+ if (!ret) {
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
+ if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
+ (temp & PORT_PLS_MASK) != XDEV_U0) {
+ xhci_dbg(xhci, "port L1 resume fail\n");
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ /* Insert dev to lpm_failed_devs list */
+ xhci_warn(xhci, "device LPM test failed, may disconnect and "
+ "re-enumerate\n");
+ dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
+ if (!dev_info) {
+ ret = -ENOMEM;
+ goto finish;
+ }
+ dev_info->dev_id = dev_id;
+ INIT_LIST_HEAD(&dev_info->list);
+ list_add(&dev_info->list, &xhci->lpm_failed_devs);
+ } else {
+ xhci_ring_device(xhci, udev->slot_id);
+ }
+
+finish:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+}
+
+int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ __le32 __iomem **port_array;
+ __le32 __iomem *pm_addr;
+ u32 temp;
+ unsigned int port_num;
+ unsigned long flags;
+ int u2del, hird;
+
+ if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
+ !udev->lpm_capable)
+ return -EPERM;
+
+ if (!udev->parent || udev->parent->parent ||
+ udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return -EPERM;
+
+ if (udev->usb2_hw_lpm_capable != 1)
+ return -EPERM;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ port_array = xhci->usb2_ports;
+ port_num = udev->portnum - 1;
+ pm_addr = port_array[port_num] + 1;
+ temp = xhci_readl(xhci, pm_addr);
+
+ xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
+ enable ? "enable" : "disable", port_num);
+
+ u2del = HCS_U2_LATENCY(xhci->hcs_params3);
+ if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
+ hird = xhci_calculate_hird_besl(u2del, 1);
+ else
+ hird = xhci_calculate_hird_besl(u2del, 0);
+
+ if (enable) {
+ temp &= ~PORT_HIRD_MASK;
+ temp |= PORT_HIRD(hird) | PORT_RWE;
+ xhci_writel(xhci, temp, pm_addr);
+ temp = xhci_readl(xhci, pm_addr);
+ temp |= PORT_HLE;
+ xhci_writel(xhci, temp, pm_addr);
+ } else {
+ temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
+ xhci_writel(xhci, temp, pm_addr);
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
+}
+
+int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ ret = xhci_usb2_software_lpm_test(hcd, udev);
+ if (!ret) {
+ xhci_dbg(xhci, "software LPM test succeed\n");
+ if (xhci->hw_lpm_support == 1) {
+ udev->usb2_hw_lpm_capable = 1;
+ ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
+ if (!ret)
+ udev->usb2_hw_lpm_enabled = 1;
+ }
+ }
+
+ return 0;
+}
+
+#else
+
+int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable)
+{
+ return 0;
+}
+
+int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_USB_SUSPEND */
+
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
@@ -2988,6 +3873,14 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
}
spin_lock_irqsave(&xhci->lock, flags);
+ if (hdev->speed == USB_SPEED_HIGH &&
+ xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
+ xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -ENOMEM;
+ }
+
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
@@ -3051,22 +3944,108 @@ int xhci_get_frame(struct usb_hcd *hcd)
return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
}
+int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+{
+ struct xhci_hcd *xhci;
+ struct device *dev = hcd->self.controller;
+ int retval;
+ u32 temp;
+
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
+
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
+ if (!xhci)
+ return -ENOMEM;
+ *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
+ xhci->main_hcd = hcd;
+ /* Mark the first roothub as being USB 2.0.
+ * The xHCI driver will register the USB 3.0 roothub.
+ */
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ /*
+ * USB 2.0 roothub under xHCI has an integrated TT,
+ * (rate matching hub) as opposed to having an OHCI/UHCI
+ * companion controller.
+ */
+ hcd->has_tt = 1;
+ } else {
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+ xhci = hcd_to_xhci(hcd);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ if (HCC_64BIT_ADDR(temp)) {
+ xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
+ } else {
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ }
+ return 0;
+ }
+
+ xhci->cap_regs = hcd->regs;
+ xhci->op_regs = hcd->regs +
+ HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+ xhci->run_regs = hcd->regs +
+ (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ /* Cache read-only capability registers */
+ xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci->hci_version = HC_VERSION(xhci->hcc_params);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_print_registers(xhci);
+
+ get_quirks(dev, xhci);
+
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+ goto error;
+
+ xhci_dbg(xhci, "Resetting HCD\n");
+ /* Reset the internal HC memory state and registers. */
+ retval = xhci_reset(xhci);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Reset complete\n");
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ if (HCC_64BIT_ADDR(temp)) {
+ xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
+ } else {
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ }
+
+ xhci_dbg(xhci, "Calling HCD init\n");
+ /* Initialize HCD and host controller data structures. */
+ retval = xhci_init(hcd);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Called HCD init\n");
+ return 0;
+error:
+ kfree(xhci);
+ return retval;
+}
+
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
static int __init xhci_hcd_init(void)
{
-#ifdef CONFIG_PCI
- int retval = 0;
+ int retval;
retval = xhci_register_pci();
-
if (retval < 0) {
printk(KERN_DEBUG "Problem registering PCI driver.");
return retval;
}
-#endif
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
@@ -3091,8 +4070,6 @@ module_init(xhci_hcd_init);
static void __exit xhci_hcd_cleanup(void)
{
-#ifdef CONFIG_PCI
xhci_unregister_pci();
-#endif
}
module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index cae8e23308b..3c8fbd2772e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -272,6 +272,7 @@ struct xhci_op_regs {
*/
#define PORT_PLS_MASK (0xf << 5)
#define XDEV_U0 (0x0 << 5)
+#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
@@ -362,7 +363,13 @@ struct xhci_op_regs {
/* Bits 24:31 for port testing */
/* USB2 Protocol PORTSPMSC */
-#define PORT_RWE (1 << 0x3)
+#define PORT_L1S_MASK 7
+#define PORT_L1S_SUCCESS 1
+#define PORT_RWE (1 << 3)
+#define PORT_HIRD(p) (((p) & 0xf) << 4)
+#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS(p) (((p) & 0xff) << 8)
+#define PORT_HLE (1 << 16)
/**
* struct xhci_intr_reg - Interrupt Register Set
@@ -611,11 +618,13 @@ struct xhci_ep_ctx {
#define EP_STATE_ERROR 4
/* Mult - Max number of burtst within an interval, in EP companion desc. */
#define EP_MULT(p) (((p) & 0x3) << 8)
+#define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) (((p) & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
+#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
#define EP_MAXPSTREAMS_MASK (0x1f << 10)
#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
@@ -640,6 +649,7 @@ struct xhci_ep_ctx {
/* bit 6 reserved */
/* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p) (((p)&0xff) << 8)
+#define CTX_TO_MAX_BURST(p) (((p) >> 8) & 0xff)
#define MAX_PACKET(p) (((p)&0xffff) << 16)
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
@@ -652,6 +662,7 @@ struct xhci_ep_ctx {
/* tx_info bitmasks */
#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
+#define CTX_TO_MAX_ESIT_PAYLOAD(p) (((p) >> 16) & 0xffff)
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
@@ -670,6 +681,11 @@ struct xhci_input_control_ctx {
__le32 rsvd2[6];
};
+#define EP_IS_ADDED(ctrl_ctx, i) \
+ (le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))
+#define EP_IS_DROPPED(ctrl_ctx, i) \
+ (le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1)))
+
/* Represents everything that is needed to issue a command on the command ring.
* It's useful to pre-allocate these for commands that cannot fail due to
* out-of-memory errors, like freeing streams.
@@ -731,6 +747,67 @@ struct xhci_stream_info {
#define SMALL_STREAM_ARRAY_SIZE 256
#define MEDIUM_STREAM_ARRAY_SIZE 1024
+/* Some Intel xHCI host controllers need software to keep track of the bus
+ * bandwidth. Keep track of endpoint info here. Each root port is allocated
+ * the full bus bandwidth. We must also treat TTs (including each port under a
+ * multi-TT hub) as a separate bandwidth domain. The direct memory interface
+ * (DMI) also limits the total bandwidth (across all domains) that can be used.
+ */
+struct xhci_bw_info {
+ /* ep_interval is zero-based */
+ unsigned int ep_interval;
+ /* mult and num_packets are one-based */
+ unsigned int mult;
+ unsigned int num_packets;
+ unsigned int max_packet_size;
+ unsigned int max_esit_payload;
+ unsigned int type;
+};
+
+/* "Block" sizes in bytes the hardware uses for different device speeds.
+ * The logic in this part of the hardware limits the number of bits the hardware
+ * can use, so must represent bandwidth in a less precise manner to mimic what
+ * the scheduler hardware computes.
+ */
+#define FS_BLOCK 1
+#define HS_BLOCK 4
+#define SS_BLOCK 16
+#define DMI_BLOCK 32
+
+/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
+ * with each byte transferred. SuperSpeed devices have an initial overhead to
+ * set up bursts. These are in blocks, see above. LS overhead has already been
+ * translated into FS blocks.
+ */
+#define DMI_OVERHEAD 8
+#define DMI_OVERHEAD_BURST 4
+#define SS_OVERHEAD 8
+#define SS_OVERHEAD_BURST 32
+#define HS_OVERHEAD 26
+#define FS_OVERHEAD 20
+#define LS_OVERHEAD 128
+/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
+ * microframe ~= 24Mbps) of the HS bus as the devices can actually use because
+ * of overhead associated with split transfers crossing microframe boundaries.
+ * 31 blocks is pure protocol overhead.
+ */
+#define TT_HS_OVERHEAD (31 + 94)
+#define TT_DMI_OVERHEAD (25 + 12)
+
+/* Bandwidth limits in blocks */
+#define FS_BW_LIMIT 1285
+#define TT_BW_LIMIT 1320
+#define HS_BW_LIMIT 1607
+#define SS_BW_LIMIT_IN 3906
+#define DMI_BW_LIMIT_IN 3906
+#define SS_BW_LIMIT_OUT 3906
+#define DMI_BW_LIMIT_OUT 3906
+
+/* Percentage of bus bandwidth reserved for non-periodic transfers */
+#define FS_BW_RESERVED 10
+#define HS_BW_RESERVED 20
+#define SS_BW_RESERVED 10
+
struct xhci_virt_ep {
struct xhci_ring *ring;
/* Related to endpoints that are configured to use stream IDs only */
@@ -772,8 +849,39 @@ struct xhci_virt_ep {
* process the missed tds on the endpoint ring.
*/
bool skip;
+ /* Bandwidth checking storage */
+ struct xhci_bw_info bw_info;
+ struct list_head bw_endpoint_list;
+};
+
+enum xhci_overhead_type {
+ LS_OVERHEAD_TYPE = 0,
+ FS_OVERHEAD_TYPE,
+ HS_OVERHEAD_TYPE,
+};
+
+struct xhci_interval_bw {
+ unsigned int num_packets;
+ /* Sorted by max packet size.
+ * Head of the list is the greatest max packet size.
+ */
+ struct list_head endpoints;
+ /* How many endpoints of each speed are present. */
+ unsigned int overhead[3];
+};
+
+#define XHCI_MAX_INTERVAL 16
+
+struct xhci_interval_bw_table {
+ unsigned int interval0_esit_payload;
+ struct xhci_interval_bw interval_bw[XHCI_MAX_INTERVAL];
+ /* Includes reserved bandwidth for async endpoints */
+ unsigned int bw_used;
+ unsigned int ss_bw_in;
+ unsigned int ss_bw_out;
};
+
struct xhci_virt_device {
struct usb_device *udev;
/*
@@ -798,7 +906,32 @@ struct xhci_virt_device {
/* Status of the last command issued for this device */
u32 cmd_status;
struct list_head cmd_list;
- u8 port;
+ u8 fake_port;
+ u8 real_port;
+ struct xhci_interval_bw_table *bw_table;
+ struct xhci_tt_bw_info *tt_info;
+};
+
+/*
+ * For each roothub, keep track of the bandwidth information for each periodic
+ * interval.
+ *
+ * If a high speed hub is attached to the roothub, each TT associated with that
+ * hub is a separate bandwidth domain. The interval information for the
+ * endpoints on the devices under that TT will appear in the TT structure.
+ */
+struct xhci_root_port_bw_info {
+ struct list_head tts;
+ unsigned int num_active_tts;
+ struct xhci_interval_bw_table bw_table;
+};
+
+struct xhci_tt_bw_info {
+ struct list_head tt_list;
+ int slot_id;
+ int ttport;
+ struct xhci_interval_bw_table bw_table;
+ int active_eps;
};
@@ -1198,6 +1331,12 @@ struct s3_save {
u64 erst_dequeue;
};
+/* Use for lpm */
+struct dev_info {
+ u32 dev_id;
+ struct list_head list;
+};
+
struct xhci_bus_state {
unsigned long bus_suspended;
unsigned long next_statechange;
@@ -1261,12 +1400,16 @@ struct xhci_hcd {
struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
+ /* Store LPM test failed devices' information */
+ struct list_head lpm_failed_devs;
/* slot enabling and address device helpers */
struct completion addr_dev;
int slot_id;
/* Internal mirror of the HW's dcbaa */
struct xhci_virt_device *devs[MAX_HC_SLOTS];
+ /* For keeping track of bandwidth domains per roothub. */
+ struct xhci_root_port_bw_info *rh_bw;
/* DMA pools */
struct dma_pool *device_pool;
@@ -1318,6 +1461,8 @@ struct xhci_hcd {
#define XHCI_EP_LIMIT_QUIRK (1 << 5)
#define XHCI_BROKEN_MSI (1 << 6)
#define XHCI_RESET_ON_RESUME (1 << 7)
+#define XHCI_SW_BW_CHECKING (1 << 8)
+#define XHCI_AMD_0x96_HOST (1 << 9)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
@@ -1330,6 +1475,10 @@ struct xhci_hcd {
/* Array of pointers to USB 2.0 PORTSC registers */
__le32 __iomem **usb2_ports;
unsigned int num_usb2_ports;
+ /* support xHCI 0.96 spec USB2 software LPM */
+ unsigned sw_lpm_support:1;
+ /* support xHCI 1.0 spec USB2 hardware LPM */
+ unsigned hw_lpm_support:1;
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -1401,9 +1550,7 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
- u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
- return ((HC_VERSION(temp) == 0x95) &&
- (xhci->quirks & XHCI_LINK_TRB_QUIRK));
+ return xhci->quirks & XHCI_LINK_TRB_QUIRK;
}
/* xHCI debugging */
@@ -1438,6 +1585,20 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+ struct xhci_bw_info *ep_bw,
+ struct xhci_interval_bw_table *bw_table,
+ struct usb_device *udev,
+ struct xhci_virt_ep *virt_ep,
+ struct xhci_tt_bw_info *tt_info);
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps);
+void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
+void xhci_update_bw_info(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_input_control_ctx *ctrl_ctx,
+ struct xhci_virt_device *virt_dev);
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
@@ -1483,9 +1644,13 @@ void xhci_free_command(struct xhci_hcd *xhci,
/* xHCI PCI glue */
int xhci_register_pci(void);
void xhci_unregister_pci(void);
+#else
+static inline int xhci_register_pci(void) { return 0; }
+static inline void xhci_unregister_pci(void) {}
#endif
/* xHCI host controller glue */
+typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
@@ -1493,6 +1658,7 @@ int xhci_init(struct usb_hcd *hcd);
int xhci_run(struct usb_hcd *hcd);
void xhci_stop(struct usb_hcd *hcd);
void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
#ifdef CONFIG_PM
int xhci_suspend(struct xhci_hcd *xhci);
@@ -1507,6 +1673,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags);
@@ -1514,6 +1684,9 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
@@ -1572,6 +1745,10 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
/* xHCI roothub code */
+void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 link_state);
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 port_bit);
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index a6afd15f6a4..fe858711651 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -213,7 +213,7 @@ static void adu_interrupt_in_callback(struct urb *urb)
if (urb->actual_length > 0 && dev->interrupt_in_buffer[0] != 0x00) {
if (dev->read_buffer_length <
- (4 * le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize)) -
+ (4 * usb_endpoint_maxp(dev->interrupt_in_endpoint)) -
(urb->actual_length)) {
memcpy (dev->read_buffer_primary +
dev->read_buffer_length,
@@ -315,7 +315,7 @@ static int adu_open(struct inode *inode, struct file *file)
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
- le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback, dev,
dev->interrupt_in_endpoint->bInterval);
dev->read_urb_finished = 0;
@@ -483,7 +483,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
- le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback,
dev,
dev->interrupt_in_endpoint->bInterval);
@@ -536,7 +536,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
- le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback,
dev,
dev->interrupt_in_endpoint->bInterval);
@@ -622,7 +622,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
dbg(4," %s : sending, count = %Zd", __func__, count);
/* write the data into interrupt_out_buffer from userspace */
- buffer_size = le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
bytes_to_write = count > buffer_size ? buffer_size : count;
dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
__func__, buffer_size, count, bytes_to_write);
@@ -752,8 +752,8 @@ static int adu_probe(struct usb_interface *interface,
goto error;
}
- in_end_size = le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize);
- out_end_size = le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize);
+ in_end_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
+ out_end_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL);
if (!dev->read_buffer_primary) {
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 2f41089cd85..2dbe600fbc1 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2777,7 +2777,7 @@ static int ftdi_elan_probe(struct usb_interface *interface,
endpoint = &iface_desc->endpoint[i].desc;
if (!ftdi->bulk_in_endpointAddr &&
usb_endpoint_is_bulk_in(endpoint)) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
ftdi->bulk_in_size = buffer_size;
ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress;
ftdi->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index c6184b4d169..515b67fffab 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -359,7 +359,7 @@ static int idmouse_probe(struct usb_interface *interface,
endpoint = &iface_desc->endpoint[0].desc;
if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) {
/* we found a bulk in endpoint */
- dev->orig_bi_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ dev->orig_bi_size = usb_endpoint_maxp(endpoint);
dev->bulk_in_size = 0x200; /* works _much_ faster */
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer =
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index a2190b983f5..81457904d6b 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -803,7 +803,7 @@ static int iowarrior_probe(struct usb_interface *interface,
dev->int_out_endpoint = endpoint;
}
/* we have to check the report_size often, so remember it in the endianess suitable for our machine */
- dev->report_size = le16_to_cpu(dev->int_in_endpoint->wMaxPacketSize);
+ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
/* IOWarrior56 has wMaxPacketSize different from report size */
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index cb4096201e2..48c166f0d76 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -721,7 +721,7 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
if (dev->interrupt_out_endpoint == NULL)
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
- dev->interrupt_in_endpoint_size = le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize);
+ dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
dev->ring_buffer = kmalloc(ring_buffer_size*(sizeof(size_t)+dev->interrupt_in_endpoint_size), GFP_KERNEL);
if (!dev->ring_buffer) {
dev_err(&intf->dev, "Couldn't allocate ring_buffer\n");
@@ -737,7 +737,7 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev_err(&intf->dev, "Couldn't allocate interrupt_in_urb\n");
goto error;
}
- dev->interrupt_out_endpoint_size = dev->interrupt_out_endpoint ? le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize) :
+ dev->interrupt_out_endpoint_size = dev->interrupt_out_endpoint ? usb_endpoint_maxp(dev->interrupt_out_endpoint) :
udev->descriptor.bMaxPacketSize0;
dev->interrupt_out_buffer = kmalloc(write_buffer_size*dev->interrupt_out_endpoint_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer) {
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 6482c6e2e6b..a989356f693 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -409,7 +409,7 @@ static int tower_open (struct inode *inode, struct file *file)
dev->udev,
usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
- le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
tower_interrupt_in_callback,
dev,
dev->interrupt_in_interval);
@@ -928,7 +928,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
err("Couldn't allocate read_buffer");
goto error;
}
- dev->interrupt_in_buffer = kmalloc (le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize), GFP_KERNEL);
+ dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer) {
err("Couldn't allocate interrupt_in_buffer");
goto error;
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 51648154bb4..1871cdf10da 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
#define DRIVER_VERSION "USBLCD Driver Version 1.05"
@@ -34,22 +34,29 @@ static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
-MODULE_DEVICE_TABLE (usb, id_table);
+MODULE_DEVICE_TABLE(usb, id_table);
static DEFINE_MUTEX(open_disc_mutex);
struct usb_lcd {
- struct usb_device * udev; /* init: probe_lcd */
- struct usb_interface * interface; /* the interface for this device */
- unsigned char * bulk_in_buffer; /* the buffer to receive data */
- size_t bulk_in_size; /* the size of the receive buffer */
- __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
- __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
+ struct usb_device *udev; /* init: probe_lcd */
+ struct usb_interface *interface; /* the interface for
+ this device */
+ unsigned char *bulk_in_buffer; /* the buffer to receive
+ data */
+ size_t bulk_in_size; /* the size of the
+ receive buffer */
+ __u8 bulk_in_endpointAddr; /* the address of the
+ bulk in endpoint */
+ __u8 bulk_out_endpointAddr; /* the address of the
+ bulk out endpoint */
struct kref kref;
- struct semaphore limit_sem; /* to stop writes at full throttle from
- * using up all RAM */
- struct usb_anchor submitted; /* URBs to wait for before suspend */
+ struct semaphore limit_sem; /* to stop writes at
+ full throttle from
+ using up all RAM */
+ struct usb_anchor submitted; /* URBs to wait for
+ before suspend */
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
@@ -63,8 +70,8 @@ static void lcd_delete(struct kref *kref)
struct usb_lcd *dev = to_lcd_dev(kref);
usb_put_dev(dev->udev);
- kfree (dev->bulk_in_buffer);
- kfree (dev);
+ kfree(dev->bulk_in_buffer);
+ kfree(dev);
}
@@ -80,7 +87,7 @@ static int lcd_open(struct inode *inode, struct file *file)
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
mutex_unlock(&lcd_mutex);
- err ("USBLCD: %s - error, can't find device for minor %d",
+ err("USBLCD: %s - error, can't find device for minor %d",
__func__, subminor);
return -ENODEV;
}
@@ -126,7 +133,8 @@ static int lcd_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t lcd_read(struct file *file, char __user * buffer, size_t count, loff_t *ppos)
+static ssize_t lcd_read(struct file *file, char __user * buffer,
+ size_t count, loff_t *ppos)
{
struct usb_lcd *dev;
int retval = 0;
@@ -135,8 +143,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, size_t count, l
dev = file->private_data;
/* do a blocking bulk read to get data from the device */
- retval = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
+ retval = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in_endpointAddr),
dev->bulk_in_buffer,
min(dev->bulk_in_size, count),
&bytes_read, 10000);
@@ -161,23 +170,23 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
-
+
switch (cmd) {
case IOCTL_GET_HARD_VERSION:
mutex_lock(&lcd_mutex);
bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
- sprintf(buf,"%1d%1d.%1d%1d",
+ sprintf(buf, "%1d%1d.%1d%1d",
(bcdDevice & 0xF000)>>12,
(bcdDevice & 0xF00)>>8,
(bcdDevice & 0xF0)>>4,
(bcdDevice & 0xF));
mutex_unlock(&lcd_mutex);
- if (copy_to_user((void __user *)arg,buf,strlen(buf))!=0)
+ if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
case IOCTL_GET_DRV_VERSION:
- sprintf(buf,DRIVER_VERSION);
- if (copy_to_user((void __user *)arg,buf,strlen(buf))!=0)
+ sprintf(buf, DRIVER_VERSION);
+ if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
default:
@@ -199,7 +208,7 @@ static void lcd_write_bulk_callback(struct urb *urb)
if (status &&
!(status == -ENOENT ||
status == -ECONNRESET ||
- status == -ESHUTDOWN)) {
+ status == -ESHUTDOWN)) {
dbg("USBLCD: %s - nonzero write bulk status received: %d",
__func__, status);
}
@@ -210,15 +219,16 @@ static void lcd_write_bulk_callback(struct urb *urb)
up(&dev->limit_sem);
}
-static ssize_t lcd_write(struct file *file, const char __user * user_buffer, size_t count, loff_t *ppos)
+static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
+ size_t count, loff_t *ppos)
{
struct usb_lcd *dev;
- int retval = 0, r;
+ int retval = 0, r;
struct urb *urb = NULL;
char *buf = NULL;
-
+
dev = file->private_data;
-
+
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
@@ -233,34 +243,38 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, siz
retval = -ENOMEM;
goto err_no_buf;
}
-
- buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
+
+ buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
}
-
+
if (copy_from_user(buf, user_buffer, count)) {
retval = -EFAULT;
goto error;
}
-
+
/* initialize the urb properly */
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out_endpointAddr),
buf, count, lcd_write_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->submitted);
-
+
/* send the data out the bulk port */
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
- err("USBLCD: %s - failed submitting write urb, error %d", __func__, retval);
+ err("USBLCD: %s - failed submitting write urb, error %d",
+ __func__, retval);
goto error_unanchor;
}
-
- /* release our reference to this urb, the USB core will eventually free it entirely */
+
+ /* release our reference to this urb,
+ the USB core will eventually free it entirely */
usb_free_urb(urb);
exit:
@@ -276,13 +290,13 @@ err_no_buf:
}
static const struct file_operations lcd_fops = {
- .owner = THIS_MODULE,
- .read = lcd_read,
- .write = lcd_write,
- .open = lcd_open,
+ .owner = THIS_MODULE,
+ .read = lcd_read,
+ .write = lcd_write,
+ .open = lcd_open,
.unlocked_ioctl = lcd_ioctl,
- .release = lcd_release,
- .llseek = noop_llseek,
+ .release = lcd_release,
+ .llseek = noop_llseek,
};
/*
@@ -290,12 +304,13 @@ static const struct file_operations lcd_fops = {
* and to have the device registered with the driver core
*/
static struct usb_class_driver lcd_class = {
- .name = "lcd%d",
- .fops = &lcd_fops,
- .minor_base = USBLCD_MINOR,
+ .name = "lcd%d",
+ .fops = &lcd_fops,
+ .minor_base = USBLCD_MINOR,
};
-static int lcd_probe(struct usb_interface *interface, const struct usb_device_id *id)
+static int lcd_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
struct usb_lcd *dev = NULL;
struct usb_host_interface *iface_desc;
@@ -322,7 +337,7 @@ static int lcd_probe(struct usb_interface *interface, const struct usb_device_id
retval = -ENODEV;
goto error;
}
-
+
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
iface_desc = interface->cur_altsetting;
@@ -332,7 +347,7 @@ static int lcd_probe(struct usb_interface *interface, const struct usb_device_id
if (!dev->bulk_in_endpointAddr &&
usb_endpoint_is_bulk_in(endpoint)) {
/* we found a bulk in endpoint */
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
dev->bulk_in_size = buffer_size;
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -369,7 +384,7 @@ static int lcd_probe(struct usb_interface *interface, const struct usb_device_id
dev_info(&interface->dev, "USBLCD Version %1d%1d.%1d%1d found "
"at address %d\n", (i & 0xF000)>>12, (i & 0xF00)>>8,
- (i & 0xF0)>>4,(i & 0xF), dev->udev->devnum);
+ (i & 0xF0)>>4, (i & 0xF), dev->udev->devnum);
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "USB LCD device now attached to USBLCD-%d\n",
@@ -401,7 +416,7 @@ static int lcd_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int lcd_resume (struct usb_interface *intf)
+static int lcd_resume(struct usb_interface *intf)
{
return 0;
}
@@ -409,16 +424,16 @@ static int lcd_resume (struct usb_interface *intf)
static void lcd_disconnect(struct usb_interface *interface)
{
struct usb_lcd *dev;
- int minor = interface->minor;
+ int minor = interface->minor;
mutex_lock(&open_disc_mutex);
- dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
+ dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
mutex_unlock(&open_disc_mutex);
- /* give back our minor */
- usb_deregister_dev(interface, &lcd_class);
-
+ /* give back our minor */
+ usb_deregister_dev(interface, &lcd_class);
+
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
@@ -438,7 +453,7 @@ static struct usb_driver lcd_driver = {
static int __init usb_lcd_init(void)
{
int result;
-
+
result = usb_register(&lcd_driver);
if (result)
err("usb_register failed. Error number %d", result);
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 1616ad1793a..43f84e50d51 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -33,10 +33,10 @@ static const struct usb_device_id id_table[] = {
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
{ },
};
-MODULE_DEVICE_TABLE (usb, id_table);
+MODULE_DEVICE_TABLE(usb, id_table);
struct usb_led {
- struct usb_device * udev;
+ struct usb_device *udev;
unsigned char blue;
unsigned char red;
unsigned char green;
@@ -113,14 +113,16 @@ static void change_color(struct usb_led *led)
}
#define show_set(value) \
-static ssize_t show_##value(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_##value(struct device *dev, struct device_attribute *attr,\
+ char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_led *led = usb_get_intfdata(intf); \
\
return sprintf(buf, "%d\n", led->value); \
} \
-static ssize_t set_##value(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) \
+static ssize_t set_##value(struct device *dev, struct device_attribute *attr,\
+ const char *buf, size_t count) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_led *led = usb_get_intfdata(intf); \
@@ -135,7 +137,8 @@ show_set(blue);
show_set(red);
show_set(green);
-static int led_probe(struct usb_interface *interface, const struct usb_device_id *id)
+static int led_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_led *dev = NULL;
@@ -150,7 +153,7 @@ static int led_probe(struct usb_interface *interface, const struct usb_device_id
dev->udev = usb_get_dev(udev);
dev->type = id->driver_info;
- usb_set_intfdata (interface, dev);
+ usb_set_intfdata(interface, dev);
retval = device_create_file(&interface->dev, &dev_attr_blue);
if (retval)
@@ -194,7 +197,7 @@ error:
device_remove_file(&interface->dev, &dev_attr_blue);
device_remove_file(&interface->dev, &dev_attr_red);
device_remove_file(&interface->dev, &dev_attr_green);
- usb_set_intfdata (interface, NULL);
+ usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
error_mem:
@@ -205,14 +208,14 @@ static void led_disconnect(struct usb_interface *interface)
{
struct usb_led *dev;
- dev = usb_get_intfdata (interface);
+ dev = usb_get_intfdata(interface);
device_remove_file(&interface->dev, &dev_attr_blue);
device_remove_file(&interface->dev, &dev_attr_red);
device_remove_file(&interface->dev, &dev_attr_green);
/* first remove the files, then set the pointer to NULL */
- usb_set_intfdata (interface, NULL);
+ usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
@@ -243,8 +246,8 @@ static void __exit usb_led_exit(void)
usb_deregister(&led_driver);
}
-module_init (usb_led_init);
-module_exit (usb_led_exit);
+module_init(usb_led_init);
+module_exit(usb_led_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index bb10846affc..bd6d00802ea 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -359,8 +359,10 @@ static int simple_io(
urb->context = &completion;
while (retval == 0 && iterations-- > 0) {
init_completion(&completion);
- if (usb_pipeout(urb->pipe))
+ if (usb_pipeout(urb->pipe)) {
simple_fill_buf(urb);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0)
break;
@@ -1583,8 +1585,8 @@ static struct urb *iso_alloc_urb(
if (bytes < 0 || !desc)
return NULL;
- maxp = 0x7ff & le16_to_cpu(desc->wMaxPacketSize);
- maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
+ maxp = 0x7ff & usb_endpoint_maxp(desc);
+ maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
@@ -1654,7 +1656,7 @@ test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
"... iso period %d %sframes, wMaxPacket %04x\n",
1 << (desc->bInterval - 1),
(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
- le16_to_cpu(desc->wMaxPacketSize));
+ usb_endpoint_maxp(desc));
for (i = 0; i < param->sglen; i++) {
urbs[i] = iso_alloc_urb(udev, pipe, desc,
@@ -2298,25 +2300,8 @@ usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, dev);
dev_info(&intf->dev, "%s\n", info->name);
- dev_info(&intf->dev, "%s speed {control%s%s%s%s%s} tests%s\n",
- ({ char *tmp;
- switch (udev->speed) {
- case USB_SPEED_LOW:
- tmp = "low";
- break;
- case USB_SPEED_FULL:
- tmp = "full";
- break;
- case USB_SPEED_HIGH:
- tmp = "high";
- break;
- case USB_SPEED_SUPER:
- tmp = "super";
- break;
- default:
- tmp = "unknown";
- break;
- }; tmp; }),
+ dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
+ usb_speed_string(udev->speed),
info->ctrl_out ? " in/out" : "",
rtest, wtest,
irtest, iwtest,
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index a09dbd243eb..a04b2ff9dd8 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1101,7 +1101,7 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
nevents = mon_bin_queued(rp);
sp = (struct mon_bin_stats __user *)arg;
- if (put_user(rp->cnt_lost, &sp->dropped))
+ if (put_user(ndropped, &sp->dropped))
return -EFAULT;
if (put_user(nevents, &sp->queued))
return -EFAULT;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index e81820370d6..ae4a20acef6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -634,6 +634,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+ u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
@@ -683,6 +684,18 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT);
+
+ /*
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
+ */
+
+ if (request->short_not_ok && len == musb_ep->packet_sz)
+ use_mode_1 = 1;
+ else
+ use_mode_1 = 0;
+
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_buffer_mapped(req)) {
@@ -704,7 +717,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
- * we don't get DMA completion interrrupt for short packets.
+ * we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
@@ -714,37 +727,41 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* then becomes usable as a runtime "use mode 1" hint...
*/
- csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
- csr |= MUSB_RXCSR_AUTOCLEAR;
- /* csr |= MUSB_RXCSR_DMAMODE; */
-
- /* this special sequence (enabling and then
- * disabling MUSB_RXCSR_DMAMODE) is required
- * to get DMAReq to activate
- */
- musb_writew(epio, MUSB_RXCSR,
- csr | MUSB_RXCSR_DMAMODE);
-#else
- if (!musb_ep->hb_mult &&
- musb_ep->hw_ep->rx_double_buffered)
+ /* Experimental: Mode1 works with mass storage use cases */
+ if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
-#endif
- musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ /*
+ * this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ } else {
+ if (!musb_ep->hb_mult &&
+ musb_ep->hw_ep->rx_double_buffered)
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
if (request->actual < request->length) {
int transfer_size = 0;
-#ifdef USE_MODE1
- transfer_size = min(request->length - request->actual,
- channel->max_len);
-#else
- transfer_size = min(request->length - request->actual,
- (unsigned)len);
-#endif
- if (transfer_size <= musb_ep->packet_sz)
- musb_ep->dma->desired_mode = 0;
- else
+ if (use_mode_1) {
+ transfer_size = min(request->length - request->actual,
+ channel->max_len);
musb_ep->dma->desired_mode = 1;
+ } else {
+ transfer_size = min(request->length - request->actual,
+ (unsigned)len);
+ musb_ep->dma->desired_mode = 0;
+ }
use_dma = c->channel_program(
channel,
@@ -1020,7 +1037,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
- tmp = le16_to_cpu(desc->wMaxPacketSize);
+ tmp = usb_endpoint_maxp(desc);
if (tmp & ~0x07ff) {
int ok;
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 9378b359c1f..6a0d0467ec7 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -679,6 +679,14 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
musb_readb(mbase, MUSB_FADDR),
decode_ep0stage(musb->ep0_state));
+ if (csr & MUSB_CSR0_P_DATAEND) {
+ /*
+ * If DATAEND is set we should not call the callback,
+ * hence the status stage is not complete.
+ */
+ return IRQ_HANDLED;
+ }
+
/* I sent a stall.. need to acknowledge it now.. */
if (csr & MUSB_CSR0_P_SENTSTALL) {
musb_writew(regs, MUSB_CSR0,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8b2473fa0f4..60ddba8066e 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1932,7 +1932,7 @@ static int musb_urb_enqueue(
INIT_LIST_HEAD(&qh->ring);
qh->is_ready = 1;
- qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+ qh->maxpacket = usb_endpoint_maxp(epd);
qh->type = usb_endpoint_type(epd);
/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index f70c5a57773..57a608584e1 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -408,7 +408,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
- if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+ if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), &controller->controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index ca9b690a7e4..8c86787c2f0 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -899,7 +899,7 @@ static int otg_bind(struct isp1301 *isp)
if (otg_dev)
status = request_irq(otg_dev->resource[1].start, omap_otg_irq,
- IRQF_DISABLED, DRIVER_NAME, isp);
+ 0, DRIVER_NAME, isp);
else
status = -ENODEV;
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index b4d2c0972b3..ed2b26cfe81 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -137,22 +137,6 @@ static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
return ret;
}
-/*-------------------------------------------------------------------------*/
-static int twl6030_set_phy_clk(struct otg_transceiver *x, int on)
-{
- struct twl6030_usb *twl;
- struct device *dev;
- struct twl4030_usb_data *pdata;
-
- twl = xceiv_to_twl(x);
- dev = twl->dev;
- pdata = dev->platform_data;
-
- pdata->phy_set_clock(twl->dev, on);
-
- return 0;
-}
-
static int twl6030_phy_init(struct otg_transceiver *x)
{
struct twl6030_usb *twl;
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index 286cbf1ca7d..6f4afa43638 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -4,7 +4,7 @@
config USB_RENESAS_USBHS
tristate 'Renesas USBHS controller'
- depends on SUPERH || ARCH_SHMOBILE
+ depends on USB && USB_GADGET
default n
help
Renesas USBHS is a discrete USB host and peripheral controller chip
diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile
index ce08345fa15..bc8aef4311a 100644
--- a/drivers/usb/renesas_usbhs/Makefile
+++ b/drivers/usb/renesas_usbhs/Makefile
@@ -6,4 +6,10 @@ obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o
renesas_usbhs-y := common.o mod.o pipe.o fifo.o
-renesas_usbhs-$(CONFIG_USB_RENESAS_USBHS_UDC) += mod_gadget.o
+ifneq ($(CONFIG_USB_RENESAS_USBHS_HCD),)
+ renesas_usbhs-y += mod_host.o
+endif
+
+ifneq ($(CONFIG_USB_RENESAS_USBHS_UDC),)
+ renesas_usbhs-y += mod_gadget.o
+endif
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d8239e5efa6..d2e2efaba65 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -61,8 +61,8 @@
*/
#define usbhs_platform_call(priv, func, args...)\
(!(priv) ? -ENODEV : \
- !((priv)->pfunc->func) ? 0 : \
- (priv)->pfunc->func(args))
+ !((priv)->pfunc.func) ? 0 : \
+ (priv)->pfunc.func(args))
/*
* common functions
@@ -114,6 +114,10 @@ void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
{
u16 mask = DCFM | DRPD | DPRPU;
u16 val = DCFM | DRPD;
+ int has_otg = usbhs_get_dparam(priv, has_otg);
+
+ if (has_otg)
+ usbhs_bset(priv, DVSTCTR, (EXTLP | PWEN), (EXTLP | PWEN));
/*
* if enable
@@ -147,19 +151,133 @@ int usbhs_frame_get_num(struct usbhs_priv *priv)
}
/*
+ * usb request functions
+ */
+void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
+{
+ u16 val;
+
+ val = usbhs_read(priv, USBREQ);
+ req->bRequest = (val >> 8) & 0xFF;
+ req->bRequestType = (val >> 0) & 0xFF;
+
+ req->wValue = usbhs_read(priv, USBVAL);
+ req->wIndex = usbhs_read(priv, USBINDX);
+ req->wLength = usbhs_read(priv, USBLENG);
+}
+
+void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
+{
+ usbhs_write(priv, USBREQ, (req->bRequest << 8) | req->bRequestType);
+ usbhs_write(priv, USBVAL, req->wValue);
+ usbhs_write(priv, USBINDX, req->wIndex);
+ usbhs_write(priv, USBLENG, req->wLength);
+
+ usbhs_bset(priv, DCPCTR, SUREQ, SUREQ);
+}
+
+/*
+ * bus/vbus functions
+ */
+void usbhs_bus_send_sof_enable(struct usbhs_priv *priv)
+{
+ u16 status = usbhs_read(priv, DVSTCTR) & (USBRST | UACT);
+
+ if (status != USBRST) {
+ struct device *dev = usbhs_priv_to_dev(priv);
+ dev_err(dev, "usbhs should be reset\n");
+ }
+
+ usbhs_bset(priv, DVSTCTR, (USBRST | UACT), UACT);
+}
+
+void usbhs_bus_send_reset(struct usbhs_priv *priv)
+{
+ usbhs_bset(priv, DVSTCTR, (USBRST | UACT), USBRST);
+}
+
+int usbhs_bus_get_speed(struct usbhs_priv *priv)
+{
+ u16 dvstctr = usbhs_read(priv, DVSTCTR);
+
+ switch (RHST & dvstctr) {
+ case RHST_LOW_SPEED:
+ return USB_SPEED_LOW;
+ case RHST_FULL_SPEED:
+ return USB_SPEED_FULL;
+ case RHST_HIGH_SPEED:
+ return USB_SPEED_HIGH;
+ }
+
+ return USB_SPEED_UNKNOWN;
+}
+
+int usbhs_vbus_ctrl(struct usbhs_priv *priv, int enable)
+{
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+
+ return usbhs_platform_call(priv, set_vbus, pdev, enable);
+}
+
+static void usbhsc_bus_init(struct usbhs_priv *priv)
+{
+ usbhs_write(priv, DVSTCTR, 0);
+
+ usbhs_vbus_ctrl(priv, 0);
+}
+
+/*
+ * device configuration
+ */
+int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum,
+ u16 upphub, u16 hubport, u16 speed)
+{
+ struct device *dev = usbhs_priv_to_dev(priv);
+ u16 usbspd = 0;
+ u32 reg = DEVADD0 + (2 * devnum);
+
+ if (devnum > 10) {
+ dev_err(dev, "cannot set speed to unknown device %d\n", devnum);
+ return -EIO;
+ }
+
+ if (upphub > 0xA) {
+ dev_err(dev, "unsupported hub number %d\n", upphub);
+ return -EIO;
+ }
+
+ switch (speed) {
+ case USB_SPEED_LOW:
+ usbspd = USBSPD_SPEED_LOW;
+ break;
+ case USB_SPEED_FULL:
+ usbspd = USBSPD_SPEED_FULL;
+ break;
+ case USB_SPEED_HIGH:
+ usbspd = USBSPD_SPEED_HIGH;
+ break;
+ default:
+ dev_err(dev, "unsupported speed %d\n", speed);
+ return -EIO;
+ }
+
+ usbhs_write(priv, reg, UPPHUB(upphub) |
+ HUBPORT(hubport)|
+ USBSPD(usbspd));
+
+ return 0;
+}
+
+/*
* local functions
*/
-static void usbhsc_bus_ctrl(struct usbhs_priv *priv, int enable)
+static void usbhsc_set_buswait(struct usbhs_priv *priv)
{
int wait = usbhs_get_dparam(priv, buswait_bwait);
- u16 data = 0;
- if (enable) {
- /* set bus wait if platform have */
- if (wait)
- usbhs_bset(priv, BUSWAIT, 0x000F, wait);
- }
- usbhs_write(priv, DVSTCTR, data);
+ /* set bus wait if platform have */
+ if (wait)
+ usbhs_bset(priv, BUSWAIT, 0x000F, wait);
}
/*
@@ -191,10 +309,8 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* USB on */
usbhs_sys_clock_ctrl(priv, enable);
- usbhsc_bus_ctrl(priv, enable);
} else {
/* USB off */
- usbhsc_bus_ctrl(priv, enable);
usbhs_sys_clock_ctrl(priv, enable);
/* disable PM */
@@ -203,13 +319,10 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
}
/*
- * notify hotplug
+ * hotplug
*/
-static void usbhsc_notify_hotplug(struct work_struct *work)
+static void usbhsc_hotplug(struct usbhs_priv *priv)
{
- struct usbhs_priv *priv = container_of(work,
- struct usbhs_priv,
- notify_hotplug_work.work);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
int id;
@@ -237,6 +350,10 @@ static void usbhsc_notify_hotplug(struct work_struct *work)
if (usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, enable);
+ /* bus init */
+ usbhsc_set_buswait(priv);
+ usbhsc_bus_init(priv);
+
/* module start */
usbhs_mod_call(priv, start, priv);
@@ -246,6 +363,9 @@ static void usbhsc_notify_hotplug(struct work_struct *work)
/* module stop */
usbhs_mod_call(priv, stop, priv);
+ /* bus init */
+ usbhsc_bus_init(priv);
+
/* power off */
if (usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, enable);
@@ -257,6 +377,17 @@ static void usbhsc_notify_hotplug(struct work_struct *work)
}
}
+/*
+ * notify hotplug
+ */
+static void usbhsc_notify_hotplug(struct work_struct *work)
+{
+ struct usbhs_priv *priv = container_of(work,
+ struct usbhs_priv,
+ notify_hotplug_work.work);
+ usbhsc_hotplug(priv);
+}
+
int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
@@ -315,24 +446,28 @@ static int __devinit usbhs_probe(struct platform_device *pdev)
/*
* care platform info
*/
- priv->pfunc = &info->platform_callback;
- priv->dparam = &info->driver_param;
+ memcpy(&priv->pfunc,
+ &info->platform_callback,
+ sizeof(struct renesas_usbhs_platform_callback));
+ memcpy(&priv->dparam,
+ &info->driver_param,
+ sizeof(struct renesas_usbhs_driver_param));
/* set driver callback functions for platform */
dfunc = &info->driver_callback;
dfunc->notify_hotplug = usbhsc_drvcllbck_notify_hotplug;
/* set default param if platform doesn't have */
- if (!priv->dparam->pipe_type) {
- priv->dparam->pipe_type = usbhsc_default_pipe_type;
- priv->dparam->pipe_size = ARRAY_SIZE(usbhsc_default_pipe_type);
+ if (!priv->dparam.pipe_type) {
+ priv->dparam.pipe_type = usbhsc_default_pipe_type;
+ priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_default_pipe_type);
}
- if (!priv->dparam->pio_dma_border)
- priv->dparam->pio_dma_border = 64; /* 64byte */
+ if (!priv->dparam.pio_dma_border)
+ priv->dparam.pio_dma_border = 64; /* 64byte */
/* FIXME */
/* runtime power control ? */
- if (priv->pfunc->get_vbus)
+ if (priv->pfunc.get_vbus)
usbhsc_flags_set(priv, USBHSF_RUNTIME_PWCTRL);
/*
@@ -443,9 +578,60 @@ static int __devexit usbhs_remove(struct platform_device *pdev)
return 0;
}
+static int usbhsc_suspend(struct device *dev)
+{
+ struct usbhs_priv *priv = dev_get_drvdata(dev);
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
+
+ if (mod) {
+ usbhs_mod_call(priv, stop, priv);
+ usbhs_mod_change(priv, -1);
+ }
+
+ if (mod || !usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ usbhsc_power_ctrl(priv, 0);
+
+ return 0;
+}
+
+static int usbhsc_resume(struct device *dev)
+{
+ struct usbhs_priv *priv = dev_get_drvdata(dev);
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+
+ usbhs_platform_call(priv, phy_reset, pdev);
+
+ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ usbhsc_power_ctrl(priv, 1);
+
+ usbhsc_hotplug(priv);
+
+ return 0;
+}
+
+static int usbhsc_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops usbhsc_pm_ops = {
+ .suspend = usbhsc_suspend,
+ .resume = usbhsc_resume,
+ .runtime_suspend = usbhsc_runtime_nop,
+ .runtime_resume = usbhsc_runtime_nop,
+};
+
static struct platform_driver renesas_usbhs_driver = {
.driver = {
.name = "renesas_usbhs",
+ .pm = &usbhsc_pm_ops,
},
.probe = usbhs_probe,
.remove = __devexit_p(usbhs_remove),
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index b410463a121..8729da5c3be 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -90,6 +90,17 @@ struct usbhs_priv;
#define PIPE9TRN 0x00BA
#define PIPEATRE 0x00BC
#define PIPEATRN 0x00BE
+#define DEVADD0 0x00D0 /* Device address n configuration */
+#define DEVADD1 0x00D2
+#define DEVADD2 0x00D4
+#define DEVADD3 0x00D6
+#define DEVADD4 0x00D8
+#define DEVADD5 0x00DA
+#define DEVADD6 0x00DC
+#define DEVADD7 0x00DE
+#define DEVADD8 0x00E0
+#define DEVADD9 0x00E2
+#define DEVADDA 0x00E4
/* SYSCFG */
#define SCKE (1 << 10) /* USB Module Clock Enable */
@@ -102,6 +113,8 @@ struct usbhs_priv;
/* DVSTCTR */
#define EXTLP (1 << 10) /* Controls the EXTLP pin output state */
#define PWEN (1 << 9) /* Controls the PWEN pin output state */
+#define USBRST (1 << 6) /* Bus Reset Output */
+#define UACT (1 << 4) /* USB Bus Enable */
#define RHST (0x7) /* Reset Handshake */
#define RHST_LOW_SPEED 1 /* Low-speed connection */
#define RHST_FULL_SPEED 2 /* Full-speed connection */
@@ -159,6 +172,15 @@ struct usbhs_priv;
#define NODATA_STATUS_STAGE 5 /* Control write NoData status stage */
#define SEQUENCE_ERROR 6 /* Control transfer sequence error */
+/* INTSTS1 */
+#define OVRCR (1 << 15) /* OVRCR Interrupt Status */
+#define BCHG (1 << 14) /* USB Bus Change Interrupt Status */
+#define DTCH (1 << 12) /* USB Disconnection Detect Interrupt Status */
+#define ATTCH (1 << 11) /* ATTCH Interrupt Status */
+#define EOFERR (1 << 6) /* EOF Error Detect Interrupt Status */
+#define SIGN (1 << 5) /* Setup Transaction Error Interrupt Status */
+#define SACK (1 << 4) /* Setup Transaction ACK Response Interrupt Status */
+
/* PIPECFG */
/* DCPCFG */
#define TYPE_NONE (0 << 14) /* Transfer Type */
@@ -183,9 +205,11 @@ struct usbhs_priv;
/* PIPEnCTR */
/* DCPCTR */
#define BSTS (1 << 15) /* Buffer Status */
+#define SUREQ (1 << 14) /* Sending SETUP Token */
#define CSSTS (1 << 12) /* CSSTS Status */
-#define SQCLR (1 << 8) /* Toggle Bit Clear */
#define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */
+#define SQCLR (1 << 8) /* Toggle Bit Clear */
+#define SQSET (1 << 7) /* Toggle Bit Set */
#define PBUSY (1 << 5) /* Pipe Busy */
#define PID_MASK (0x3) /* Response PID */
#define PID_NAK 0
@@ -202,6 +226,14 @@ struct usbhs_priv;
/* FRMNUM */
#define FRNM_MASK (0x7FF)
+/* DEVADDn */
+#define UPPHUB(x) (((x) & 0xF) << 11) /* HUB Register */
+#define HUBPORT(x) (((x) & 0x7) << 8) /* HUB Port for Target Device */
+#define USBSPD(x) (((x) & 0x3) << 6) /* Device Transfer Rate */
+#define USBSPD_SPEED_LOW 0x1
+#define USBSPD_SPEED_FULL 0x2
+#define USBSPD_SPEED_HIGH 0x3
+
/*
* struct
*/
@@ -210,8 +242,8 @@ struct usbhs_priv {
void __iomem *base;
unsigned int irq;
- struct renesas_usbhs_platform_callback *pfunc;
- struct renesas_usbhs_driver_param *dparam;
+ struct renesas_usbhs_platform_callback pfunc;
+ struct renesas_usbhs_driver_param dparam;
struct delayed_work notify_hotplug_work;
struct platform_device *pdev;
@@ -258,15 +290,35 @@ void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable);
/*
+ * usb request
+ */
+void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
+void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
+
+/*
+ * bus
+ */
+void usbhs_bus_send_sof_enable(struct usbhs_priv *priv);
+void usbhs_bus_send_reset(struct usbhs_priv *priv);
+int usbhs_bus_get_speed(struct usbhs_priv *priv);
+int usbhs_vbus_ctrl(struct usbhs_priv *priv, int enable);
+
+/*
* frame
*/
int usbhs_frame_get_num(struct usbhs_priv *priv);
/*
+ * device config
+ */
+int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum, u16 upphub,
+ u16 hubport, u16 speed);
+
+/*
* data
*/
struct usbhs_priv *usbhs_pdev_to_priv(struct platform_device *pdev);
-#define usbhs_get_dparam(priv, param) (priv->dparam->param)
+#define usbhs_get_dparam(priv, param) (priv->dparam.param)
#define usbhs_priv_to_pdev(priv) (priv->pdev)
#define usbhs_priv_to_dev(priv) (&priv->pdev->dev)
#define usbhs_priv_to_lock(priv) (&priv->lock)
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index a34430f55fb..8da685e796d 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -54,35 +54,45 @@ static struct usbhs_pkt_handle usbhsf_null_handler = {
};
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
- struct usbhs_pkt_handle *handler,
+ void (*done)(struct usbhs_priv *priv,
+ struct usbhs_pkt *pkt),
void *buf, int len, int zero)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
+ if (!done) {
+ dev_err(dev, "no done function\n");
+ return;
+ }
+
/******************** spin lock ********************/
usbhs_lock(priv, flags);
- if (!handler) {
+ if (!pipe->handler) {
dev_err(dev, "no handler function\n");
- handler = &usbhsf_null_handler;
+ pipe->handler = &usbhsf_null_handler;
}
list_del_init(&pkt->node);
list_add_tail(&pkt->node, &pipe->list);
+ /*
+ * each pkt must hold own handler.
+ * because handler might be changed by its situation.
+ * dma handler -> pio handler.
+ */
pkt->pipe = pipe;
pkt->buf = buf;
- pkt->handler = handler;
+ pkt->handler = pipe->handler;
pkt->length = len;
pkt->zero = zero;
pkt->actual = 0;
+ pkt->done = done;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
-
- usbhs_pkt_start(pipe);
}
static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
@@ -118,10 +128,15 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
return pkt;
}
-int __usbhs_pkt_handler(struct usbhs_pipe *pipe, int type)
+enum {
+ USBHSF_PKT_PREPARE,
+ USBHSF_PKT_TRY_RUN,
+ USBHSF_PKT_DMA_DONE,
+};
+
+static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
- struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pkt *pkt;
struct device *dev = usbhs_priv_to_dev(priv);
int (*func)(struct usbhs_pkt *pkt, int *is_done);
@@ -161,13 +176,18 @@ __usbhs_pkt_handler_end:
/******************** spin unlock ******************/
if (is_done) {
- info->done(pkt);
+ pkt->done(priv, pkt);
usbhs_pkt_start(pipe);
}
return ret;
}
+void usbhs_pkt_start(struct usbhs_pipe *pipe)
+{
+ usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
+}
+
/*
* irq enable/disable function
*/
@@ -276,9 +296,13 @@ static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
usbhsf_fifo_is_busy(fifo))
return -EBUSY;
- if (usbhs_pipe_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe)) {
base |= (1 == write) << 5; /* ISEL */
+ if (usbhs_mod_is_host(priv))
+ usbhs_dcp_dir_for_host(pipe, write);
+ }
+
/* "base" will be used below */
usbhs_write(priv, fifo->sel, base | MBW_32);
@@ -297,6 +321,151 @@ static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
}
/*
+ * DCP status stage
+ */
+static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret;
+
+ usbhs_pipe_disable(pipe);
+
+ ret = usbhsf_fifo_select(pipe, fifo, 1);
+ if (ret < 0) {
+ dev_err(dev, "%s() faile\n", __func__);
+ return ret;
+ }
+
+ usbhs_pipe_sequence_data1(pipe); /* DATA1 */
+
+ usbhsf_fifo_clear(pipe, fifo);
+ usbhsf_send_terminator(pipe, fifo);
+
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ usbhsf_tx_irq_ctrl(pipe, 1);
+ usbhs_pipe_enable(pipe);
+
+ return ret;
+}
+
+static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret;
+
+ usbhs_pipe_disable(pipe);
+
+ ret = usbhsf_fifo_select(pipe, fifo, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s() fail\n", __func__);
+ return ret;
+ }
+
+ usbhs_pipe_sequence_data1(pipe); /* DATA1 */
+ usbhsf_fifo_clear(pipe, fifo);
+
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ usbhsf_rx_irq_ctrl(pipe, 1);
+ usbhs_pipe_enable(pipe);
+
+ return ret;
+
+}
+
+static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+
+ if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
+ usbhsf_tx_irq_ctrl(pipe, 0);
+ else
+ usbhsf_rx_irq_ctrl(pipe, 0);
+
+ pkt->actual = pkt->length;
+ *is_done = 1;
+
+ return 0;
+}
+
+struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
+ .prepare = usbhs_dcp_dir_switch_to_write,
+ .try_run = usbhs_dcp_dir_switch_done,
+};
+
+struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
+ .prepare = usbhs_dcp_dir_switch_to_read,
+ .try_run = usbhs_dcp_dir_switch_done,
+};
+
+/*
+ * DCP data stage (push)
+ */
+static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+
+ usbhs_pipe_sequence_data1(pipe); /* DATA1 */
+
+ /*
+ * change handler to PIO push
+ */
+ pkt->handler = &usbhs_fifo_pio_push_handler;
+
+ return pkt->handler->prepare(pkt, is_done);
+}
+
+struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
+ .prepare = usbhsf_dcp_data_stage_try_push,
+};
+
+/*
+ * DCP data stage (pop)
+ */
+static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
+ int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
+
+ if (usbhs_pipe_is_busy(pipe))
+ return 0;
+
+ /*
+ * prepare pop for DCP should
+ * - change DCP direction,
+ * - clear fifo
+ * - DATA1
+ */
+ usbhs_pipe_disable(pipe);
+
+ usbhs_pipe_sequence_data1(pipe); /* DATA1 */
+
+ usbhsf_fifo_select(pipe, fifo, 0);
+ usbhsf_fifo_clear(pipe, fifo);
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ /*
+ * change handler to PIO pop
+ */
+ pkt->handler = &usbhs_fifo_pio_pop_handler;
+
+ return pkt->handler->prepare(pkt, is_done);
+}
+
+struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
+ .prepare = usbhsf_dcp_data_stage_prepare_pop,
+};
+
+/*
* PIO push handler
*/
static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
@@ -452,6 +621,20 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
total_len = len;
/*
+ * update actual length first here to decide disable pipe.
+ * if this pipe keeps BUF status and all data were popped,
+ * then, next interrupt/token will be issued again
+ */
+ pkt->actual += total_len;
+
+ if ((pkt->actual == pkt->length) || /* receive all data */
+ (total_len < maxp)) { /* short packet */
+ *is_done = 1;
+ usbhsf_rx_irq_ctrl(pipe, 0);
+ usbhs_pipe_disable(pipe); /* disable pipe first */
+ }
+
+ /*
* Buffer clear if Zero-Length packet
*
* see
@@ -481,16 +664,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
}
- pkt->actual += total_len;
-
usbhs_fifo_read_end:
- if ((pkt->actual == pkt->length) || /* receive all data */
- (total_len < maxp)) { /* short packet */
- *is_done = 1;
- usbhsf_rx_irq_ctrl(pipe, 0);
- usbhs_pipe_disable(pipe);
- }
-
dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
usbhs_pipe_number(pipe),
pkt->length, pkt->actual, *is_done, pkt->zero);
@@ -646,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
if (len % 4) /* 32bit alignment */
goto usbhsf_pio_prepare_push;
- if (((u32)pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_push;
/* get enable DMA fifo */
@@ -723,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
if (!fifo)
goto usbhsf_pio_prepare_pop;
- if (((u32)pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_pop;
ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -884,7 +1058,7 @@ static int usbhsf_irq_empty(struct usbhs_priv *priv,
if (!(irq_state->bempsts & (1 << i)))
continue;
- ret = usbhs_pkt_run(pipe);
+ ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
if (ret < 0)
dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
}
@@ -914,7 +1088,7 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
if (!(irq_state->brdysts & (1 << i)))
continue;
- ret = usbhs_pkt_run(pipe);
+ ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
if (ret < 0)
dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
}
@@ -929,7 +1103,7 @@ static void usbhsf_dma_complete(void *arg)
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
- ret = usbhs_pkt_dmadone(pipe);
+ ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
if (ret < 0)
dev_err(dev, "dma_complete run_error %d : %d\n",
usbhs_pipe_number(pipe), ret);
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index ed6d8e56c13..32a7b246b28 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -51,6 +51,8 @@ struct usbhs_pkt {
struct list_head node;
struct usbhs_pipe *pipe;
struct usbhs_pkt_handle *handler;
+ void (*done)(struct usbhs_priv *priv,
+ struct usbhs_pkt *pkt);
dma_addr_t dma;
void *buf;
int length;
@@ -76,12 +78,6 @@ void usbhs_fifo_quit(struct usbhs_priv *priv);
/*
* packet info
*/
-enum {
- USBHSF_PKT_PREPARE,
- USBHSF_PKT_TRY_RUN,
- USBHSF_PKT_DMA_DONE,
-};
-
extern struct usbhs_pkt_handle usbhs_fifo_pio_push_handler;
extern struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler;
extern struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
@@ -89,16 +85,18 @@ extern struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
extern struct usbhs_pkt_handle usbhs_fifo_dma_push_handler;
extern struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler;
+extern struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler;
+extern struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler;
+
+extern struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler;
+extern struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler;
void usbhs_pkt_init(struct usbhs_pkt *pkt);
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
- struct usbhs_pkt_handle *handler,
+ void (*done)(struct usbhs_priv *priv,
+ struct usbhs_pkt *pkt),
void *buf, int len, int zero);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
-int __usbhs_pkt_handler(struct usbhs_pipe *pipe, int type);
-
-#define usbhs_pkt_start(p) __usbhs_pkt_handler(p, USBHSF_PKT_PREPARE)
-#define usbhs_pkt_run(p) __usbhs_pkt_handler(p, USBHSF_PKT_TRY_RUN)
-#define usbhs_pkt_dmadone(p) __usbhs_pkt_handler(p, USBHSF_PKT_DMA_DONE)
+void usbhs_pkt_start(struct usbhs_pipe *pipe);
#endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index a577f8f4064..053f86d7000 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -58,7 +58,7 @@ void usbhs_mod_autonomy_mode(struct usbhs_priv *priv)
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
info->irq_vbus = usbhsm_autonomy_irq_vbus;
- priv->pfunc->get_vbus = usbhsm_autonomy_get_vbus;
+ priv->pfunc.get_vbus = usbhsm_autonomy_get_vbus;
usbhs_irq_callback_update(priv, NULL);
}
@@ -93,8 +93,9 @@ struct usbhs_mod *usbhs_mod_get(struct usbhs_priv *priv, int id)
return ret;
}
-int usbhs_mod_is_host(struct usbhs_priv *priv, struct usbhs_mod *mod)
+int usbhs_mod_is_host(struct usbhs_priv *priv)
{
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
if (!mod)
@@ -139,13 +140,17 @@ int usbhs_mod_probe(struct usbhs_priv *priv)
/*
* install host/gadget driver
*/
- ret = usbhs_mod_gadget_probe(priv);
+ ret = usbhs_mod_host_probe(priv);
if (ret < 0)
return ret;
+ ret = usbhs_mod_gadget_probe(priv);
+ if (ret < 0)
+ goto mod_init_host_err;
+
/* irq settings */
ret = request_irq(priv->irq, usbhs_interrupt,
- IRQF_DISABLED, dev_name(dev), priv);
+ 0, dev_name(dev), priv);
if (ret) {
dev_err(dev, "irq request err\n");
goto mod_init_gadget_err;
@@ -155,12 +160,15 @@ int usbhs_mod_probe(struct usbhs_priv *priv)
mod_init_gadget_err:
usbhs_mod_gadget_remove(priv);
+mod_init_host_err:
+ usbhs_mod_host_remove(priv);
return ret;
}
void usbhs_mod_remove(struct usbhs_priv *priv)
{
+ usbhs_mod_host_remove(priv);
usbhs_mod_gadget_remove(priv);
free_irq(priv->irq, priv);
}
@@ -168,20 +176,6 @@ void usbhs_mod_remove(struct usbhs_priv *priv)
/*
* status functions
*/
-int usbhs_status_get_usb_speed(struct usbhs_irq_state *irq_state)
-{
- switch (irq_state->dvstctr & RHST) {
- case RHST_LOW_SPEED:
- return USB_SPEED_LOW;
- case RHST_FULL_SPEED:
- return USB_SPEED_FULL;
- case RHST_HIGH_SPEED:
- return USB_SPEED_HIGH;
- }
-
- return USB_SPEED_UNKNOWN;
-}
-
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state)
{
int state = irq_state->intsts0 & DVSQ_MASK;
@@ -221,8 +215,6 @@ static void usbhs_status_get_each_irq(struct usbhs_priv *priv,
state->intsts0 = usbhs_read(priv, INTSTS0);
state->intsts1 = usbhs_read(priv, INTSTS1);
- state->dvstctr = usbhs_read(priv, DVSTCTR);
-
/* mask */
if (mod) {
state->brdysts = usbhs_read(priv, BRDYSTS);
@@ -269,6 +261,8 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
* see also
* usbhs_irq_setting_update
*/
+
+ /* INTSTS0 */
if (irq_state.intsts0 & VBINT)
usbhs_mod_info_call(priv, irq_vbus, priv, &irq_state);
@@ -284,15 +278,38 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
if (irq_state.intsts0 & BRDY)
usbhs_mod_call(priv, irq_ready, priv, &irq_state);
+ /* INTSTS1 */
+ if (irq_state.intsts1 & ATTCH)
+ usbhs_mod_call(priv, irq_attch, priv, &irq_state);
+
+ if (irq_state.intsts1 & DTCH)
+ usbhs_mod_call(priv, irq_dtch, priv, &irq_state);
+
+ if (irq_state.intsts1 & SIGN)
+ usbhs_mod_call(priv, irq_sign, priv, &irq_state);
+
+ if (irq_state.intsts1 & SACK)
+ usbhs_mod_call(priv, irq_sack, priv, &irq_state);
+
return IRQ_HANDLED;
}
void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
{
u16 intenb0 = 0;
+ u16 intenb1 = 0;
struct usbhs_mod_info *info = usbhs_priv_to_modinfo(priv);
+ /*
+ * BEMPENB/BRDYENB are picky.
+ * below method is required
+ *
+ * - clear INTSTS0
+ * - update BEMPENB/BRDYENB
+ * - update INTSTS0
+ */
usbhs_write(priv, INTENB0, 0);
+ usbhs_write(priv, INTENB1, 0);
usbhs_write(priv, BEMPENB, 0);
usbhs_write(priv, BRDYENB, 0);
@@ -310,6 +327,9 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
intenb0 |= VBSE;
if (mod) {
+ /*
+ * INTSTS0
+ */
if (mod->irq_ctrl_stage)
intenb0 |= CTRE;
@@ -322,7 +342,26 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
usbhs_write(priv, BRDYENB, mod->irq_brdysts);
intenb0 |= BRDYE;
}
+
+ /*
+ * INTSTS1
+ */
+ if (mod->irq_attch)
+ intenb1 |= ATTCHE;
+
+ if (mod->irq_attch)
+ intenb1 |= DTCHE;
+
+ if (mod->irq_sign)
+ intenb1 |= SIGNE;
+
+ if (mod->irq_sack)
+ intenb1 |= SACKE;
}
- usbhs_write(priv, INTENB0, intenb0);
+ if (intenb0)
+ usbhs_write(priv, INTENB0, intenb0);
+
+ if (intenb1)
+ usbhs_write(priv, INTENB1, intenb1);
}
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 5c845a28a21..8ae3733031c 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -30,7 +30,6 @@ struct usbhs_irq_state {
u16 brdysts;
u16 nrdysts;
u16 bempsts;
- u16 dvstctr;
};
struct usbhs_mod {
@@ -42,26 +41,48 @@ struct usbhs_mod {
int (*start)(struct usbhs_priv *priv);
int (*stop)(struct usbhs_priv *priv);
- /* INTSTS0 :: DVST (DVSQ) */
+ /*
+ * INTSTS0
+ */
+
+ /* DVST (DVSQ) */
int (*irq_dev_state)(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state);
- /* INTSTS0 :: CTRT (CTSQ) */
+ /* CTRT (CTSQ) */
int (*irq_ctrl_stage)(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state);
- /* INTSTS0 :: BEMP */
- /* BEMPSTS */
+ /* BEMP / BEMPSTS */
int (*irq_empty)(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state);
u16 irq_bempsts;
- /* INTSTS0 :: BRDY */
- /* BRDYSTS */
+ /* BRDY / BRDYSTS */
int (*irq_ready)(struct usbhs_priv *priv,
struct usbhs_irq_state *irq_state);
u16 irq_brdysts;
+ /*
+ * INTSTS1
+ */
+
+ /* ATTCHE */
+ int (*irq_attch)(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state);
+
+ /* DTCHE */
+ int (*irq_dtch)(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state);
+
+ /* SIGN */
+ int (*irq_sign)(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state);
+
+ /* SACK */
+ int (*irq_sack)(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state);
+
struct usbhs_priv *priv;
};
@@ -89,7 +110,7 @@ struct usbhs_mod_info {
struct usbhs_mod *usbhs_mod_get(struct usbhs_priv *priv, int id);
struct usbhs_mod *usbhs_mod_get_current(struct usbhs_priv *priv);
void usbhs_mod_register(struct usbhs_priv *priv, struct usbhs_mod *usb, int id);
-int usbhs_mod_is_host(struct usbhs_priv *priv, struct usbhs_mod *mod);
+int usbhs_mod_is_host(struct usbhs_priv *priv);
int usbhs_mod_change(struct usbhs_priv *priv, int id);
int usbhs_mod_probe(struct usbhs_priv *priv);
void usbhs_mod_remove(struct usbhs_priv *priv);
@@ -99,7 +120,6 @@ void usbhs_mod_autonomy_mode(struct usbhs_priv *priv);
/*
* status functions
*/
-int usbhs_status_get_usb_speed(struct usbhs_irq_state *irq_state);
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state);
int usbhs_status_get_ctrl_stage(struct usbhs_irq_state *irq_state);
@@ -119,9 +139,24 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
})
/*
- * gadget control
+ * host / gadget control
*/
-#ifdef CONFIG_USB_RENESAS_USBHS_UDC
+#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
+ defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
+extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv);
+extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv);
+#else
+static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
+{
+ return 0;
+}
+static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
+{
+}
+#endif
+
+#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
+ defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv);
extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv);
#else
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index cb2d451d511..4cc7ee0babc 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -39,7 +39,6 @@ struct usbhsg_uep {
char ep_name[EP_NAME_SIZE];
struct usbhsg_gpriv *gpriv;
- struct usbhs_pkt_handle *handler;
};
struct usbhsg_gpriv {
@@ -128,25 +127,6 @@ LIST_HEAD(the_controller_link);
/*
* queue push/pop
*/
-static void usbhsg_queue_push(struct usbhsg_uep *uep,
- struct usbhsg_request *ureq)
-{
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
- struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq);
- struct usb_request *req = &ureq->req;
-
- req->actual = 0;
- req->status = -EINPROGRESS;
- usbhs_pkt_push(pipe, pkt, uep->handler,
- req->buf, req->length, req->zero);
-
- dev_dbg(dev, "pipe %d : queue push (%d)\n",
- usbhs_pipe_number(pipe),
- req->length);
-}
-
static void usbhsg_queue_pop(struct usbhsg_uep *uep,
struct usbhsg_request *ureq,
int status)
@@ -161,7 +141,7 @@ static void usbhsg_queue_pop(struct usbhsg_uep *uep,
ureq->req.complete(&uep->ep, &ureq->req);
}
-static void usbhsg_queue_done(struct usbhs_pkt *pkt)
+static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
@@ -172,6 +152,26 @@ static void usbhsg_queue_done(struct usbhs_pkt *pkt)
usbhsg_queue_pop(uep, ureq, 0);
}
+static void usbhsg_queue_push(struct usbhsg_uep *uep,
+ struct usbhsg_request *ureq)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq);
+ struct usb_request *req = &ureq->req;
+
+ req->actual = 0;
+ req->status = -EINPROGRESS;
+ usbhs_pkt_push(pipe, pkt, usbhsg_queue_done,
+ req->buf, req->length, req->zero);
+ usbhs_pkt_start(pipe);
+
+ dev_dbg(dev, "pipe %d : queue push (%d)\n",
+ usbhs_pipe_number(pipe),
+ req->length);
+}
+
/*
* dma map/unmap
*/
@@ -265,7 +265,7 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) {
usbhs_pipe_disable(pipe);
- usbhs_pipe_clear_sequence(pipe);
+ usbhs_pipe_sequence_data0(pipe);
usbhs_pipe_enable(pipe);
}
@@ -355,7 +355,7 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- gpriv->gadget.speed = usbhs_status_get_usb_speed(irq_state);
+ gpriv->gadget.speed = usbhs_bus_get_speed(priv);
dev_dbg(dev, "state = %x : speed : %d\n",
usbhs_status_get_device_state(irq_state),
@@ -389,13 +389,13 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
switch (stage) {
case READ_DATA_STAGE:
- dcp->handler = &usbhs_fifo_pio_push_handler;
+ pipe->handler = &usbhs_fifo_pio_push_handler;
break;
case WRITE_DATA_STAGE:
- dcp->handler = &usbhs_fifo_pio_pop_handler;
+ pipe->handler = &usbhs_fifo_pio_pop_handler;
break;
case NODATA_STATUS_STAGE:
- dcp->handler = &usbhs_ctrl_stage_end_handler;
+ pipe->handler = &usbhs_ctrl_stage_end_handler;
break;
default:
return ret;
@@ -479,24 +479,31 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
*/
if (uep->pipe) {
usbhs_pipe_clear(uep->pipe);
- usbhs_pipe_clear_sequence(uep->pipe);
+ usbhs_pipe_sequence_data0(uep->pipe);
return 0;
}
- pipe = usbhs_pipe_malloc(priv, desc);
+ pipe = usbhs_pipe_malloc(priv,
+ usb_endpoint_type(desc),
+ usb_endpoint_dir_in(desc));
if (pipe) {
uep->pipe = pipe;
pipe->mod_private = uep;
+ /* set epnum / maxp */
+ usbhs_pipe_config_update(pipe, 0,
+ usb_endpoint_num(desc),
+ usb_endpoint_maxp(desc));
+
/*
* usbhs_fifo_dma_push/pop_handler try to
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
if (usb_endpoint_dir_in(desc))
- uep->handler = &usbhs_fifo_dma_push_handler;
+ pipe->handler = &usbhs_fifo_dma_push_handler;
else
- uep->handler = &usbhs_fifo_dma_pop_handler;
+ pipe->handler = &usbhs_fifo_dma_pop_handler;
ret = 0;
}
@@ -659,7 +666,6 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
* pipe initialize and enable DCP
*/
usbhs_pipe_init(priv,
- usbhsg_queue_done,
usbhsg_dma_map_ctrl);
usbhs_fifo_init(priv);
usbhsg_uep_init(gpriv);
@@ -667,6 +673,7 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
/* dcp init */
dcp->pipe = usbhs_dcp_malloc(priv);
dcp->pipe->mod_private = dcp;
+ usbhs_pipe_config_update(dcp->pipe, 0, 0, 64);
/*
* system config enble
@@ -730,10 +737,6 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhsg_pipe_disable(dcp);
- if (gpriv->driver &&
- gpriv->driver->disconnect)
- gpriv->driver->disconnect(&gpriv->gadget);
-
dev_dbg(dev, "stop gadget\n");
return 0;
@@ -744,31 +747,19 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
* linux usb function
*
*/
-static int usbhsg_gadget_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int usbhsg_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct usbhsg_gpriv *gpriv;
+ struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv;
struct device *dev;
int ret;
- if (!bind ||
- !driver ||
+ if (!driver ||
!driver->setup ||
driver->speed != USB_SPEED_HIGH)
return -EINVAL;
- /*
- * find unused controller
- */
- usbhsg_for_each_controller(gpriv) {
- if (!gpriv->driver)
- goto find_unused_controller;
- }
- return -ENODEV;
-
-find_unused_controller:
-
dev = usbhsg_gpriv_to_dev(gpriv);
priv = usbhsg_gpriv_to_priv(gpriv);
@@ -782,19 +773,8 @@ find_unused_controller:
goto add_fail;
}
- ret = bind(&gpriv->gadget);
- if (ret) {
- dev_err(dev, "bind to driver %s error %d\n",
- driver->driver.name, ret);
- goto bind_fail;
- }
-
- dev_dbg(dev, "bind %s\n", driver->driver.name);
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-bind_fail:
- device_del(&gpriv->gadget.dev);
add_fail:
gpriv->driver = NULL;
gpriv->gadget.dev.driver = NULL;
@@ -802,9 +782,10 @@ add_fail:
return ret;
}
-static int usbhsg_gadget_stop(struct usb_gadget_driver *driver)
+static int usbhsg_gadget_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct usbhsg_gpriv *gpriv;
+ struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv;
struct device *dev;
@@ -812,17 +793,6 @@ static int usbhsg_gadget_stop(struct usb_gadget_driver *driver)
!driver->unbind)
return -EINVAL;
- /*
- * find controller
- */
- usbhsg_for_each_controller(gpriv) {
- if (gpriv->driver == driver)
- goto find_matching_controller;
- }
- return -ENODEV;
-
-find_matching_controller:
-
dev = usbhsg_gpriv_to_dev(gpriv);
priv = usbhsg_gpriv_to_priv(gpriv);
@@ -830,12 +800,6 @@ find_matching_controller:
device_del(&gpriv->gadget.dev);
gpriv->driver = NULL;
- if (driver->disconnect)
- driver->disconnect(&gpriv->gadget);
-
- driver->unbind(&gpriv->gadget);
- dev_dbg(dev, "unbind %s\n", driver->driver.name);
-
return 0;
}
@@ -852,8 +816,8 @@ static int usbhsg_get_frame(struct usb_gadget *gadget)
static struct usb_gadget_ops usbhsg_gadget_ops = {
.get_frame = usbhsg_get_frame,
- .start = usbhsg_gadget_start,
- .stop = usbhsg_gadget_stop,
+ .udc_start = usbhsg_gadget_start,
+ .udc_stop = usbhsg_gadget_stop,
};
static int usbhsg_start(struct usbhs_priv *priv)
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
new file mode 100644
index 00000000000..1a7208a50af
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -0,0 +1,1312 @@
+/*
+ * Renesas USB driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "common.h"
+
+/*
+ *** HARDWARE LIMITATION ***
+ *
+ * 1) renesas_usbhs has a limited number of controllable devices.
+ * it can control only 9 devices in generally.
+ * see DEVADDn / DCPMAXP / PIPEMAXP.
+ *
+ * 2) renesas_usbhs pipe number is limited.
+ * the pipe will be re-used for each devices.
+ * so, software should control DATA0/1 sequence of each devices.
+ */
+
+
+/*
+ * image of mod_host
+ *
+ * +--------+
+ * | udev 0 | --> it is used when set address
+ * +--------+
+ *
+ * +--------+ pipes are reused for each uep.
+ * | udev 1 |-+- [uep 0 (dcp) ] --+ pipe will be switched when
+ * +--------+ | | target device was changed
+ * +- [uep 1 (bulk)] --|---+ +--------------+
+ * | +--------------> | pipe0 (dcp) |
+ * +- [uep 2 (bulk)] --|---|---+ +--------------+
+ * | | | | pipe1 (isoc) |
+ * +--------+ | | | +--------------+
+ * | udev 2 |-+- [uep 0 (dcp) ] --+ +-- |------> | pipe2 (bulk) |
+ * +--------+ | | | | +--------------+
+ * +- [uep 1 (int) ] --|-+ | +------> | pipe3 (bulk) |
+ * | | | | +--------------+
+ * +--------+ | +-|---|------> | pipe4 (int) |
+ * | udev 3 |-+- [uep 0 (dcp) ] --+ | | +--------------+
+ * +--------+ | | | | .... |
+ * +- [uep 1 (bulk)] ------+ | | .... |
+ * | |
+ * +- [uep 2 (bulk)]-----------+
+ */
+
+
+/*
+ * struct
+ */
+struct usbhsh_pipe_info {
+ unsigned int usr_cnt; /* see usbhsh_endpoint_alloc() */
+};
+
+struct usbhsh_request {
+ struct urb *urb;
+ struct usbhs_pkt pkt;
+ struct list_head ureq_link; /* see hpriv :: ureq_link_xxx */
+};
+
+struct usbhsh_device {
+ struct usb_device *usbv;
+ struct list_head ep_list_head; /* list of usbhsh_ep */
+};
+
+struct usbhsh_ep {
+ struct usbhs_pipe *pipe;
+ struct usbhsh_device *udev; /* attached udev */
+ struct list_head ep_list; /* list to usbhsh_device */
+
+ int maxp;
+};
+
+#define USBHSH_DEVICE_MAX 10 /* see DEVADDn / DCPMAXP / PIPEMAXP */
+#define USBHSH_PORT_MAX 7 /* see DEVADDn :: HUBPORT */
+struct usbhsh_hpriv {
+ struct usbhs_mod mod;
+ struct usbhs_pipe *dcp;
+
+ struct usbhsh_device udev[USBHSH_DEVICE_MAX];
+
+ struct usbhsh_pipe_info *pipe_info;
+ int pipe_size;
+
+ u32 port_stat; /* USB_PORT_STAT_xxx */
+
+ struct completion *done;
+
+ /* see usbhsh_req_alloc/free */
+ struct list_head ureq_link_active;
+ struct list_head ureq_link_free;
+};
+
+
+static const char usbhsh_hcd_name[] = "renesas_usbhs host";
+
+/*
+ * macro
+ */
+#define usbhsh_priv_to_hpriv(priv) \
+ container_of(usbhs_mod_get(priv, USBHS_HOST), struct usbhsh_hpriv, mod)
+
+#define __usbhsh_for_each_hpipe(start, pos, h, i) \
+ for (i = start, pos = (h)->hpipe + i; \
+ i < (h)->hpipe_size; \
+ i++, pos = (h)->hpipe + i)
+
+#define usbhsh_for_each_hpipe(pos, hpriv, i) \
+ __usbhsh_for_each_hpipe(1, pos, hpriv, i)
+
+#define usbhsh_for_each_hpipe_with_dcp(pos, hpriv, i) \
+ __usbhsh_for_each_hpipe(0, pos, hpriv, i)
+
+#define __usbhsh_for_each_udev(start, pos, h, i) \
+ for (i = start, pos = (h)->udev + i; \
+ i < USBHSH_DEVICE_MAX; \
+ i++, pos = (h)->udev + i)
+
+#define usbhsh_for_each_udev(pos, hpriv, i) \
+ __usbhsh_for_each_udev(1, pos, hpriv, i)
+
+#define usbhsh_for_each_udev_with_dev0(pos, hpriv, i) \
+ __usbhsh_for_each_udev(0, pos, hpriv, i)
+
+#define usbhsh_hcd_to_hpriv(h) (struct usbhsh_hpriv *)((h)->hcd_priv)
+#define usbhsh_hcd_to_dev(h) ((h)->self.controller)
+
+#define usbhsh_hpriv_to_priv(h) ((h)->mod.priv)
+#define usbhsh_hpriv_to_dcp(h) ((h)->dcp)
+#define usbhsh_hpriv_to_hcd(h) \
+ container_of((void *)h, struct usb_hcd, hcd_priv)
+
+#define usbhsh_ep_to_uep(u) ((u)->hcpriv)
+#define usbhsh_uep_to_pipe(u) ((u)->pipe)
+#define usbhsh_uep_to_udev(u) ((u)->udev)
+#define usbhsh_urb_to_ureq(u) ((u)->hcpriv)
+#define usbhsh_urb_to_usbv(u) ((u)->dev)
+
+#define usbhsh_usbv_to_udev(d) dev_get_drvdata(&(d)->dev)
+
+#define usbhsh_udev_to_usbv(h) ((h)->usbv)
+
+#define usbhsh_pipe_info(p) ((p)->mod_private)
+
+#define usbhsh_device_number(h, d) ((int)((d) - (h)->udev))
+#define usbhsh_device_nth(h, d) ((h)->udev + d)
+#define usbhsh_device0(h) usbhsh_device_nth(h, 0)
+
+#define usbhsh_port_stat_init(h) ((h)->port_stat = 0)
+#define usbhsh_port_stat_set(h, s) ((h)->port_stat |= (s))
+#define usbhsh_port_stat_clear(h, s) ((h)->port_stat &= ~(s))
+#define usbhsh_port_stat_get(h) ((h)->port_stat)
+
+#define usbhsh_pkt_to_req(p) \
+ container_of((void *)p, struct usbhsh_request, pkt)
+
+/*
+ * req alloc/free
+ */
+static void usbhsh_req_list_init(struct usbhsh_hpriv *hpriv)
+{
+ INIT_LIST_HEAD(&hpriv->ureq_link_active);
+ INIT_LIST_HEAD(&hpriv->ureq_link_free);
+}
+
+static void usbhsh_req_list_quit(struct usbhsh_hpriv *hpriv)
+{
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usbhsh_request *ureq, *next;
+
+ /* kfree all active ureq */
+ list_for_each_entry_safe(ureq, next,
+ &hpriv->ureq_link_active,
+ ureq_link) {
+ dev_err(dev, "active ureq (%p) is force freed\n", ureq);
+ kfree(ureq);
+ }
+
+ /* kfree all free ureq */
+ list_for_each_entry_safe(ureq, next, &hpriv->ureq_link_free, ureq_link)
+ kfree(ureq);
+}
+
+static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct usbhsh_request *ureq;
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ if (list_empty(&hpriv->ureq_link_free)) {
+ /*
+ * create new one if there is no free ureq
+ */
+ ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
+ if (ureq)
+ INIT_LIST_HEAD(&ureq->ureq_link);
+ } else {
+ /*
+ * reuse "free" ureq if exist
+ */
+ ureq = list_entry(hpriv->ureq_link_free.next,
+ struct usbhsh_request,
+ ureq_link);
+ if (ureq)
+ list_del_init(&ureq->ureq_link);
+ }
+
+ if (!ureq) {
+ dev_err(dev, "ureq alloc fail\n");
+ return NULL;
+ }
+
+ usbhs_pkt_init(&ureq->pkt);
+
+ /*
+ * push it to "active" list
+ */
+ list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_active);
+ ureq->urb = urb;
+
+ return ureq;
+}
+
+static void usbhsh_req_free(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_request *ureq)
+{
+ struct usbhs_pkt *pkt = &ureq->pkt;
+
+ usbhs_pkt_init(pkt);
+
+ /*
+ * removed from "active" list,
+ * and push it to "free" list
+ */
+ ureq->urb = NULL;
+ list_del_init(&ureq->ureq_link);
+ list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_free);
+}
+
+/*
+ * device control
+ */
+
+static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+{
+ return !list_empty(&udev->ep_list_head);
+}
+
+static struct usbhsh_device *usbhsh_device_alloc(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhsh_device *udev = NULL;
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ int i;
+
+ /*
+ * device 0
+ */
+ if (0 == usb_pipedevice(urb->pipe)) {
+ udev = usbhsh_device0(hpriv);
+ goto usbhsh_device_find;
+ }
+
+ /*
+ * find unused device
+ */
+ usbhsh_for_each_udev(udev, hpriv, i) {
+ if (usbhsh_udev_to_usbv(udev))
+ continue;
+ goto usbhsh_device_find;
+ }
+
+ dev_err(dev, "no free usbhsh_device\n");
+
+ return NULL;
+
+usbhsh_device_find:
+ if (usbhsh_device_has_endpoint(udev))
+ dev_warn(dev, "udev have old endpoint\n");
+
+ /* uep will be attached */
+ INIT_LIST_HEAD(&udev->ep_list_head);
+
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be enable
+ */
+ dev_set_drvdata(&usbv->dev, udev);
+ udev->usbv = usbv;
+
+ /* set device config */
+ usbhs_set_device_speed(priv,
+ usbhsh_device_number(hpriv, udev),
+ usbhsh_device_number(hpriv, udev),
+ 0, /* FIXME no parent */
+ usbv->speed);
+
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
+
+ return udev;
+}
+
+static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
+{
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
+
+ if (usbhsh_device_has_endpoint(udev))
+ dev_warn(dev, "udev still have endpoint\n");
+
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be disable
+ */
+ dev_set_drvdata(&usbv->dev, NULL);
+ udev->usbv = NULL;
+}
+
+/*
+ * end-point control
+ */
+struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev,
+ struct usb_host_endpoint *ep,
+ gfp_t mem_flags)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhsh_ep *uep;
+ struct usbhsh_pipe_info *info;
+ struct usbhs_pipe *pipe, *best_pipe;
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_endpoint_descriptor *desc = &ep->desc;
+ int type, i;
+ unsigned int min_usr;
+
+ uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
+ if (!uep) {
+ dev_err(dev, "usbhsh_ep alloc fail\n");
+ return NULL;
+ }
+ type = usb_endpoint_type(desc);
+
+ /*
+ * find best pipe for endpoint
+ * see
+ * HARDWARE LIMITATION
+ */
+ min_usr = ~0;
+ best_pipe = NULL;
+ usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
+ if (!usbhs_pipe_type_is(pipe, type))
+ continue;
+
+ info = usbhsh_pipe_info(pipe);
+
+ if (min_usr > info->usr_cnt) {
+ min_usr = info->usr_cnt;
+ best_pipe = pipe;
+ }
+ }
+
+ if (unlikely(!best_pipe)) {
+ dev_err(dev, "couldn't find best pipe\n");
+ kfree(uep);
+ return NULL;
+ }
+
+ /*
+ * init uep
+ */
+ uep->pipe = best_pipe;
+ uep->maxp = usb_endpoint_maxp(desc);
+ usbhsh_uep_to_udev(uep) = udev;
+ usbhsh_ep_to_uep(ep) = uep;
+
+ /*
+ * update pipe user count
+ */
+ info = usbhsh_pipe_info(best_pipe);
+ info->usr_cnt++;
+
+ /* init this endpoint, and attach it to udev */
+ INIT_LIST_HEAD(&uep->ep_list);
+ list_add_tail(&uep->ep_list, &udev->ep_list_head);
+
+ /*
+ * usbhs_pipe_config_update() should be called after
+ * usbhs_device_config()
+ * see
+ * DCPMAXP/PIPEMAXP
+ */
+ usbhs_pipe_config_update(uep->pipe,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ uep->maxp);
+
+ dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usbhs_pipe_name(pipe), uep);
+
+ return uep;
+}
+
+void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
+ struct usb_host_endpoint *ep)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
+ struct usbhsh_pipe_info *info;
+
+ if (!uep)
+ return;
+
+ dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
+ usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
+ usbhs_pipe_name(uep->pipe), uep);
+
+ info = usbhsh_pipe_info(uep->pipe);
+ info->usr_cnt--;
+
+ /* remove this endpoint from udev */
+ list_del_init(&uep->ep_list);
+
+ usbhsh_uep_to_udev(uep) = NULL;
+ usbhsh_ep_to_uep(ep) = NULL;
+
+ kfree(uep);
+}
+
+/*
+ * queue push/pop
+ */
+static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
+{
+ struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct urb *urb = ureq->urb;
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!urb) {
+ dev_warn(dev, "pkt doesn't have urb\n");
+ return;
+ }
+
+ urb->actual_length = pkt->actual;
+ usbhsh_req_free(hpriv, ureq);
+ usbhsh_urb_to_ureq(urb) = NULL;
+
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ usb_hcd_giveback_urb(hcd, urb, 0);
+}
+
+static int usbhsh_queue_push(struct usb_hcd *hcd,
+ struct usbhs_pipe *pipe,
+ struct urb *urb)
+{
+ struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
+ struct usbhs_pkt *pkt = &ureq->pkt;
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ void *buf;
+ int len;
+
+ if (usb_pipeisoc(urb->pipe)) {
+ dev_err(dev, "pipe iso is not supported now\n");
+ return -EIO;
+ }
+
+ if (usb_pipein(urb->pipe))
+ pipe->handler = &usbhs_fifo_pio_pop_handler;
+ else
+ pipe->handler = &usbhs_fifo_pio_push_handler;
+
+ buf = (void *)(urb->transfer_buffer + urb->actual_length);
+ len = urb->transfer_buffer_length - urb->actual_length;
+
+ dev_dbg(dev, "%s\n", __func__);
+ usbhs_pkt_push(pipe, pkt, usbhsh_queue_done,
+ buf, len, (urb->transfer_flags & URB_ZERO_PACKET));
+ usbhs_pkt_start(pipe);
+
+ return 0;
+}
+
+/*
+ * DCP setup stage
+ */
+static int usbhsh_is_request_address(struct urb *urb)
+{
+ struct usb_ctrlrequest *cmd;
+
+ cmd = (struct usb_ctrlrequest *)urb->setup_packet;
+
+ if ((DeviceOutRequest == cmd->bRequestType << 8) &&
+ (USB_REQ_SET_ADDRESS == cmd->bRequest))
+ return 1;
+ else
+ return 0;
+}
+
+static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pipe *pipe)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usb_ctrlrequest req;
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ /*
+ * wait setup packet ACK
+ * see
+ * usbhsh_irq_setup_ack()
+ * usbhsh_irq_setup_err()
+ */
+ DECLARE_COMPLETION(done);
+ hpriv->done = &done;
+
+ /* copy original request */
+ memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
+
+ /*
+ * renesas_usbhs can not use original usb address.
+ * see HARDWARE LIMITATION.
+ * modify usb address here.
+ */
+ if (usbhsh_is_request_address(urb)) {
+ /* FIXME */
+ req.wValue = 1;
+ dev_dbg(dev, "create new address - %d\n", req.wValue);
+ }
+
+ /* set request */
+ usbhs_usbreq_set_val(priv, &req);
+
+ /*
+ * wait setup packet ACK
+ */
+ wait_for_completion(&done);
+ hpriv->done = NULL;
+
+ dev_dbg(dev, "%s done\n", __func__);
+}
+
+/*
+ * DCP data stage
+ */
+static void usbhsh_data_stage_packet_done(struct usbhs_priv *priv,
+ struct usbhs_pkt *pkt)
+{
+ struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct urb *urb = ureq->urb;
+
+ /* this ureq was connected to urb when usbhsh_urb_enqueue() */
+
+ usbhsh_req_free(hpriv, ureq);
+ usbhsh_urb_to_ureq(urb) = NULL;
+}
+
+static void usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pipe *pipe)
+{
+ struct usbhsh_request *ureq;
+ struct usbhs_pkt *pkt;
+
+ /*
+ * FIXME
+ *
+ * data stage uses ureq which is connected to urb
+ * see usbhsh_urb_enqueue() :: alloc new request.
+ * it will be freed in usbhsh_data_stage_packet_done()
+ */
+ ureq = usbhsh_urb_to_ureq(urb);
+ pkt = &ureq->pkt;
+
+ if (usb_pipein(urb->pipe))
+ pipe->handler = &usbhs_dcp_data_stage_in_handler;
+ else
+ pipe->handler = &usbhs_dcp_data_stage_out_handler;
+
+ usbhs_pkt_push(pipe, pkt,
+ usbhsh_data_stage_packet_done,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ (urb->transfer_flags & URB_ZERO_PACKET));
+}
+
+/*
+ * DCP status stage
+ */
+static void usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pipe *pipe)
+{
+ struct usbhsh_request *ureq;
+ struct usbhs_pkt *pkt;
+
+ /*
+ * FIXME
+ *
+ * status stage uses allocated ureq.
+ * it will be freed on usbhsh_queue_done()
+ */
+ ureq = usbhsh_req_alloc(hpriv, urb, GFP_KERNEL);
+ pkt = &ureq->pkt;
+
+ if (usb_pipein(urb->pipe))
+ pipe->handler = &usbhs_dcp_status_stage_in_handler;
+ else
+ pipe->handler = &usbhs_dcp_status_stage_out_handler;
+
+ usbhs_pkt_push(pipe, pkt,
+ usbhsh_queue_done,
+ NULL,
+ urb->transfer_buffer_length,
+ 0);
+}
+
+static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
+ struct usbhsh_hpriv *hpriv,
+ struct usbhs_pipe *pipe,
+ struct urb *urb)
+{
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /*
+ * setup stage
+ *
+ * usbhsh_send_setup_stage_packet() wait SACK/SIGN
+ */
+ usbhsh_setup_stage_packet_push(hpriv, urb, pipe);
+
+ /*
+ * data stage
+ *
+ * It is pushed only when urb has buffer.
+ */
+ if (urb->transfer_buffer_length)
+ usbhsh_data_stage_packet_push(hpriv, urb, pipe);
+
+ /*
+ * status stage
+ */
+ usbhsh_status_stage_packet_push(hpriv, urb, pipe);
+
+ /*
+ * start pushed packets
+ */
+ usbhs_pkt_start(pipe);
+
+ return 0;
+}
+
+/*
+ * dma map functions
+ */
+static int usbhsh_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+{
+ return 0;
+}
+
+/*
+ * for hc_driver
+ */
+static int usbhsh_host_start(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+static void usbhsh_host_stop(struct usb_hcd *hcd)
+{
+}
+
+static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct usbhsh_request *ureq;
+ struct usbhsh_device *udev, *new_udev = NULL;
+ struct usbhs_pipe *pipe;
+ struct usbhsh_ep *uep;
+
+ int ret;
+
+ dev_dbg(dev, "%s (%s)\n",
+ __func__, usb_pipein(urb->pipe) ? "in" : "out");
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto usbhsh_urb_enqueue_error_not_linked;
+
+ /*
+ * get udev
+ */
+ udev = usbhsh_usbv_to_udev(usbv);
+ if (!udev) {
+ new_udev = usbhsh_device_alloc(hpriv, urb);
+ if (!new_udev)
+ goto usbhsh_urb_enqueue_error_not_linked;
+
+ udev = new_udev;
+ }
+
+ /*
+ * get uep
+ */
+ uep = usbhsh_ep_to_uep(ep);
+ if (!uep) {
+ uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags);
+ if (!uep)
+ goto usbhsh_urb_enqueue_error_free_device;
+ }
+ pipe = usbhsh_uep_to_pipe(uep);
+
+ /*
+ * alloc new request
+ */
+ ureq = usbhsh_req_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq)) {
+ ret = -ENOMEM;
+ goto usbhsh_urb_enqueue_error_free_endpoint;
+ }
+ usbhsh_urb_to_ureq(urb) = ureq;
+
+ /*
+ * push packet
+ */
+ if (usb_pipecontrol(urb->pipe))
+ usbhsh_dcp_queue_push(hcd, hpriv, pipe, urb);
+ else
+ usbhsh_queue_push(hcd, pipe, urb);
+
+ return 0;
+
+usbhsh_urb_enqueue_error_free_endpoint:
+ usbhsh_endpoint_free(hpriv, ep);
+usbhsh_urb_enqueue_error_free_device:
+ if (new_udev)
+ usbhsh_device_free(hpriv, new_udev);
+usbhsh_urb_enqueue_error_not_linked:
+
+ dev_dbg(dev, "%s error\n", __func__);
+
+ return ret;
+}
+
+static int usbhsh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
+
+ if (ureq) {
+ usbhsh_req_free(hpriv, ureq);
+ usbhsh_urb_to_ureq(urb) = NULL;
+ }
+
+ return 0;
+}
+
+static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
+ struct usbhsh_device *udev;
+ struct usbhsh_hpriv *hpriv;
+
+ /*
+ * this function might be called manytimes by same hcd/ep
+ * in-endpoitn == out-endpoint if ep == dcp.
+ */
+ if (!uep)
+ return;
+
+ udev = usbhsh_uep_to_udev(uep);
+ hpriv = usbhsh_hcd_to_hpriv(hcd);
+
+ usbhsh_endpoint_free(hpriv, ep);
+ ep->hcpriv = NULL;
+
+ /*
+ * if there is no endpoint,
+ * free device
+ */
+ if (!usbhsh_device_has_endpoint(udev))
+ usbhsh_device_free(hpriv, udev);
+}
+
+static int usbhsh_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int roothub_id = 1; /* only 1 root hub */
+
+ /*
+ * does port stat was changed ?
+ * check USB_PORT_STAT_C_xxx << 16
+ */
+ if (usbhsh_port_stat_get(hpriv) & 0xFFFF0000)
+ *buf = (1 << roothub_id);
+ else
+ *buf = 0;
+
+ dev_dbg(dev, "%s (%02x)\n", __func__, *buf);
+
+ return !!(*buf);
+}
+
+static int __usbhsh_hub_hub_feature(struct usbhsh_hpriv *hpriv,
+ u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(dev, "%s :: C_HUB_xx\n", __func__);
+ return 0;
+ }
+
+ return -EPIPE;
+}
+
+static int __usbhsh_hub_port_feature(struct usbhsh_hpriv *hpriv,
+ u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int enable = (typeReq == SetPortFeature);
+ int speed, i, timeout = 128;
+ int roothub_id = 1; /* only 1 root hub */
+
+ /* common error */
+ if (wIndex > roothub_id || wLength != 0)
+ return -EPIPE;
+
+ /* check wValue */
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ usbhs_vbus_ctrl(priv, enable);
+ dev_dbg(dev, "%s :: USB_PORT_FEAT_POWER\n", __func__);
+ break;
+
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_SUSPEND:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(dev, "%s :: USB_PORT_FEAT_xxx\n", __func__);
+ break;
+
+ case USB_PORT_FEAT_RESET:
+ if (!enable)
+ break;
+
+ usbhsh_port_stat_clear(hpriv,
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_LOW_SPEED);
+
+ usbhs_bus_send_reset(priv);
+ msleep(20);
+ usbhs_bus_send_sof_enable(priv);
+
+ for (i = 0; i < timeout ; i++) {
+ switch (usbhs_bus_get_speed(priv)) {
+ case USB_SPEED_LOW:
+ speed = USB_PORT_STAT_LOW_SPEED;
+ goto got_usb_bus_speed;
+ case USB_SPEED_HIGH:
+ speed = USB_PORT_STAT_HIGH_SPEED;
+ goto got_usb_bus_speed;
+ case USB_SPEED_FULL:
+ speed = 0;
+ goto got_usb_bus_speed;
+ }
+
+ msleep(20);
+ }
+ return -EPIPE;
+
+got_usb_bus_speed:
+ usbhsh_port_stat_set(hpriv, speed);
+ usbhsh_port_stat_set(hpriv, USB_PORT_STAT_ENABLE);
+
+ dev_dbg(dev, "%s :: USB_PORT_FEAT_RESET (speed = %d)\n",
+ __func__, speed);
+
+ /* status change is not needed */
+ return 0;
+
+ default:
+ return -EPIPE;
+ }
+
+ /* set/clear status */
+ if (enable)
+ usbhsh_port_stat_set(hpriv, (1 << wValue));
+ else
+ usbhsh_port_stat_clear(hpriv, (1 << wValue));
+
+ return 0;
+}
+
+static int __usbhsh_hub_get_status(struct usbhsh_hpriv *hpriv,
+ u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usb_hub_descriptor *desc = (struct usb_hub_descriptor *)buf;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int roothub_id = 1; /* only 1 root hub */
+
+ switch (typeReq) {
+ case GetHubStatus:
+ dev_dbg(dev, "%s :: GetHubStatus\n", __func__);
+
+ *buf = 0x00;
+ break;
+
+ case GetPortStatus:
+ if (wIndex != roothub_id)
+ return -EPIPE;
+
+ dev_dbg(dev, "%s :: GetPortStatus\n", __func__);
+ *(__le32 *)buf = cpu_to_le32(usbhsh_port_stat_get(hpriv));
+ break;
+
+ case GetHubDescriptor:
+ desc->bDescriptorType = 0x29;
+ desc->bHubContrCurrent = 0;
+ desc->bNbrPorts = roothub_id;
+ desc->bDescLength = 9;
+ desc->bPwrOn2PwrGood = 0;
+ desc->wHubCharacteristics = cpu_to_le16(0x0011);
+ desc->u.hs.DeviceRemovable[0] = (roothub_id << 1);
+ desc->u.hs.DeviceRemovable[1] = ~0;
+ dev_dbg(dev, "%s :: GetHubDescriptor\n", __func__);
+ break;
+ }
+
+ return 0;
+}
+
+static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret = -EPIPE;
+
+ switch (typeReq) {
+
+ /* Hub Feature */
+ case ClearHubFeature:
+ case SetHubFeature:
+ ret = __usbhsh_hub_hub_feature(hpriv, typeReq,
+ wValue, wIndex, buf, wLength);
+ break;
+
+ /* Port Feature */
+ case SetPortFeature:
+ case ClearPortFeature:
+ ret = __usbhsh_hub_port_feature(hpriv, typeReq,
+ wValue, wIndex, buf, wLength);
+ break;
+
+ /* Get status */
+ case GetHubStatus:
+ case GetPortStatus:
+ case GetHubDescriptor:
+ ret = __usbhsh_hub_get_status(hpriv, typeReq,
+ wValue, wIndex, buf, wLength);
+ break;
+ }
+
+ dev_dbg(dev, "typeReq = %x, ret = %d, port_stat = %x\n",
+ typeReq, ret, usbhsh_port_stat_get(hpriv));
+
+ return ret;
+}
+
+static struct hc_driver usbhsh_driver = {
+ .description = usbhsh_hcd_name,
+ .hcd_priv_size = sizeof(struct usbhsh_hpriv),
+
+ /*
+ * generic hardware linkage
+ */
+ .flags = HCD_USB2,
+
+ .start = usbhsh_host_start,
+ .stop = usbhsh_host_stop,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = usbhsh_urb_enqueue,
+ .urb_dequeue = usbhsh_urb_dequeue,
+ .endpoint_disable = usbhsh_endpoint_disable,
+
+ /*
+ * root hub
+ */
+ .hub_status_data = usbhsh_hub_status_data,
+ .hub_control = usbhsh_hub_control,
+};
+
+/*
+ * interrupt functions
+ */
+static int usbhsh_irq_attch(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_dbg(dev, "device attached\n");
+
+ usbhsh_port_stat_set(hpriv, USB_PORT_STAT_CONNECTION);
+ usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+
+ return 0;
+}
+
+static int usbhsh_irq_dtch(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_dbg(dev, "device detached\n");
+
+ usbhsh_port_stat_clear(hpriv, USB_PORT_STAT_CONNECTION);
+ usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+
+ return 0;
+}
+
+static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_dbg(dev, "setup packet OK\n");
+
+ if (unlikely(!hpriv->done))
+ dev_err(dev, "setup ack happen without necessary data\n");
+ else
+ complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+
+ return 0;
+}
+
+static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_dbg(dev, "setup packet Err\n");
+
+ if (unlikely(!hpriv->done))
+ dev_err(dev, "setup err happen without necessary data\n");
+ else
+ complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+
+ return 0;
+}
+
+/*
+ * module start/stop
+ */
+static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct usbhsh_pipe_info *pipe_info = hpriv->pipe_info;
+ struct usbhs_pipe *pipe;
+ u32 *pipe_type = usbhs_get_dparam(priv, pipe_type);
+ int pipe_size = usbhs_get_dparam(priv, pipe_size);
+ int old_type, dir_in, i;
+
+ /* init all pipe */
+ old_type = USB_ENDPOINT_XFER_CONTROL;
+ for (i = 0; i < pipe_size; i++) {
+ pipe_info[i].usr_cnt = 0;
+
+ /*
+ * data "output" will be finished as soon as possible,
+ * but there is no guaranty at data "input" case.
+ *
+ * "input" needs "standby" pipe.
+ * So, "input" direction pipe > "output" direction pipe
+ * is good idea.
+ *
+ * 1st USB_ENDPOINT_XFER_xxx will be output direction,
+ * and the other will be input direction here.
+ *
+ * ex)
+ * ...
+ * USB_ENDPOINT_XFER_ISOC -> dir out
+ * USB_ENDPOINT_XFER_ISOC -> dir in
+ * USB_ENDPOINT_XFER_BULK -> dir out
+ * USB_ENDPOINT_XFER_BULK -> dir in
+ * USB_ENDPOINT_XFER_BULK -> dir in
+ * ...
+ */
+ dir_in = (pipe_type[i] == old_type);
+ old_type = pipe_type[i];
+
+ if (USB_ENDPOINT_XFER_CONTROL == pipe_type[i]) {
+ pipe = usbhs_dcp_malloc(priv);
+ usbhsh_hpriv_to_dcp(hpriv) = pipe;
+ } else {
+ pipe = usbhs_pipe_malloc(priv,
+ pipe_type[i],
+ dir_in);
+ }
+
+ pipe->mod_private = pipe_info + i;
+ }
+}
+
+static int usbhsh_start(struct usbhs_priv *priv)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret;
+
+ /* add hcd */
+ ret = usb_add_hcd(hcd, 0, 0);
+ if (ret < 0)
+ return 0;
+
+ /*
+ * pipe initialize and enable DCP
+ */
+ usbhs_pipe_init(priv,
+ usbhsh_dma_map_ctrl);
+ usbhs_fifo_init(priv);
+ usbhsh_pipe_init_for_host(priv);
+
+ /*
+ * system config enble
+ * - HI speed
+ * - host
+ * - usb module
+ */
+ usbhs_sys_hispeed_ctrl(priv, 1);
+ usbhs_sys_host_ctrl(priv, 1);
+ usbhs_sys_usb_ctrl(priv, 1);
+
+ /*
+ * enable irq callback
+ */
+ mod->irq_attch = usbhsh_irq_attch;
+ mod->irq_dtch = usbhsh_irq_dtch;
+ mod->irq_sack = usbhsh_irq_setup_ack;
+ mod->irq_sign = usbhsh_irq_setup_err;
+ usbhs_irq_callback_update(priv, mod);
+
+ dev_dbg(dev, "start host\n");
+
+ return ret;
+}
+
+static int usbhsh_stop(struct usbhs_priv *priv)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ usb_remove_hcd(hcd);
+
+ /* disable sys */
+ usbhs_sys_hispeed_ctrl(priv, 0);
+ usbhs_sys_host_ctrl(priv, 0);
+ usbhs_sys_usb_ctrl(priv, 0);
+
+ dev_dbg(dev, "quit host\n");
+
+ return 0;
+}
+
+int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
+{
+ struct usbhsh_hpriv *hpriv;
+ struct usb_hcd *hcd;
+ struct usbhsh_pipe_info *pipe_info;
+ struct usbhsh_device *udev;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int pipe_size = usbhs_get_dparam(priv, pipe_size);
+ int i;
+
+ /* initialize hcd */
+ hcd = usb_create_hcd(&usbhsh_driver, dev, usbhsh_hcd_name);
+ if (!hcd) {
+ dev_err(dev, "Failed to create hcd\n");
+ return -ENOMEM;
+ }
+
+ pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
+ if (!pipe_info) {
+ dev_err(dev, "Could not allocate pipe_info\n");
+ goto usbhs_mod_host_probe_err;
+ }
+
+ /*
+ * CAUTION
+ *
+ * There is no guarantee that it is possible to access usb module here.
+ * Don't accesses to it.
+ * The accesse will be enable after "usbhsh_start"
+ */
+
+ hpriv = usbhsh_hcd_to_hpriv(hcd);
+
+ /*
+ * register itself
+ */
+ usbhs_mod_register(priv, &hpriv->mod, USBHS_HOST);
+
+ /* init hpriv */
+ hpriv->mod.name = "host";
+ hpriv->mod.start = usbhsh_start;
+ hpriv->mod.stop = usbhsh_stop;
+ hpriv->pipe_info = pipe_info;
+ hpriv->pipe_size = pipe_size;
+ hpriv->done = NULL;
+ usbhsh_req_list_init(hpriv);
+ usbhsh_port_stat_init(hpriv);
+
+ /* init all device */
+ usbhsh_for_each_udev_with_dev0(udev, hpriv, i) {
+ udev->usbv = NULL;
+ INIT_LIST_HEAD(&udev->ep_list_head);
+ }
+
+ dev_info(dev, "host probed\n");
+
+ return 0;
+
+usbhs_mod_host_probe_err:
+ usb_put_hcd(hcd);
+
+ return -ENOMEM;
+}
+
+int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv)
+{
+ struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+
+ usbhsh_req_list_quit(hpriv);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 1b14cae4570..c74389ce217 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -29,9 +29,6 @@
#define usbhsp_flags_has(p, f) ((p)->flags & USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_init(p) do {(p)->flags = 0; } while (0)
-#define usbhsp_type(p) ((p)->pipe_type)
-#define usbhsp_type_is(p, t) ((p)->pipe_type == t)
-
/*
* for debug
*/
@@ -42,28 +39,9 @@ static char *usbhsp_pipe_name[] = {
[USB_ENDPOINT_XFER_ISOC] = "ISO",
};
-/*
- * usb request functions
- */
-void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
+char *usbhs_pipe_name(struct usbhs_pipe *pipe)
{
- u16 val;
-
- val = usbhs_read(priv, USBREQ);
- req->bRequest = (val >> 8) & 0xFF;
- req->bRequestType = (val >> 0) & 0xFF;
-
- req->wValue = usbhs_read(priv, USBVAL);
- req->wIndex = usbhs_read(priv, USBINDX);
- req->wLength = usbhs_read(priv, USBLENG);
-}
-
-void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
-{
- usbhs_write(priv, USBREQ, (req->bRequest << 8) | req->bRequestType);
- usbhs_write(priv, USBVAL, req->wValue);
- usbhs_write(priv, USBINDX, req->wIndex);
- usbhs_write(priv, USBLENG, req->wLength);
+ return usbhsp_pipe_name[usbhs_pipe_type(pipe)];
}
/*
@@ -106,17 +84,6 @@ static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe,
usbhs_bset(priv, pipe_reg, mask, val);
}
-static u16 __usbhsp_pipe_xxx_get(struct usbhs_pipe *pipe,
- u16 dcp_reg, u16 pipe_reg)
-{
- struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
-
- if (usbhs_pipe_is_dcp(pipe))
- return usbhs_read(priv, dcp_reg);
- else
- return usbhs_read(priv, pipe_reg);
-}
-
/*
* DCPCFG/PIPECFG functions
*/
@@ -144,11 +111,6 @@ static void usbhsp_pipe_maxp_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
__usbhsp_pipe_xxx_set(pipe, DCPMAXP, PIPEMAXP, mask, val);
}
-static u16 usbhsp_pipe_maxp_get(struct usbhs_pipe *pipe)
-{
- return __usbhsp_pipe_xxx_get(pipe, DCPMAXP, PIPEMAXP);
-}
-
/*
* pipe control functions
*/
@@ -303,16 +265,16 @@ static int usbhsp_possible_double_buffer(struct usbhs_pipe *pipe)
/*
* only ISO / BULK pipe can use double buffer
*/
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK) ||
- usbhsp_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK) ||
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
return 1;
return 0;
}
static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
- const struct usb_endpoint_descriptor *desc,
- int is_host)
+ int is_host,
+ int dir_in)
{
u16 type = 0;
u16 bfre = 0;
@@ -341,40 +303,40 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
*/
/* TYPE */
- type = type_array[usbhsp_type(pipe)];
+ type = type_array[usbhs_pipe_type(pipe)];
/* BFRE */
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
- usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK))
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
bfre = 0; /* FIXME */
/* DBLB */
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
- usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK))
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
dblb = (is_double) ? DBLB : 0;
/* CNTMD */
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK))
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
cntmd = 0; /* FIXME */
/* DIR */
- if (usb_endpoint_dir_in(desc))
+ if (dir_in)
usbhsp_flags_set(pipe, IS_DIR_HOST);
- if ((is_host && usb_endpoint_dir_out(desc)) ||
- (!is_host && usb_endpoint_dir_in(desc)))
+ if ((is_host && !dir_in) ||
+ (!is_host && dir_in))
dir |= DIR_OUT;
if (!dir)
usbhsp_flags_set(pipe, IS_DIR_IN);
/* SHTNAK */
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK) &&
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK) &&
!dir)
shtnak = SHTNAK;
/* EPNUM */
- epnum = 0xF & usb_endpoint_num(desc);
+ epnum = 0; /* see usbhs_pipe_config_update() */
return type |
bfre |
@@ -385,19 +347,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
epnum;
}
-static u16 usbhsp_setup_pipemaxp(struct usbhs_pipe *pipe,
- const struct usb_endpoint_descriptor *desc,
- int is_host)
-{
- /* host should set DEVSEL */
-
- /* reutn MXPS */
- return PIPE_MAXP_MASK & le16_to_cpu(desc->wMaxPacketSize);
-}
-
-static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe,
- const struct usb_endpoint_descriptor *desc,
- int is_host)
+static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
@@ -441,9 +391,9 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe,
* INT : 64 byte
* ISOC: 512 byte
*/
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_CONTROL))
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_CONTROL))
buff_size = 256;
- else if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_INT))
+ else if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT))
buff_size = 64;
else
buff_size = 512;
@@ -453,7 +403,7 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe,
/* BUFNMB has been reserved for INT pipe
* see above */
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_INT)) {
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT)) {
bufnmb = pipe_num - 2;
} else {
bufnmb = info->bufnmb_last;
@@ -473,16 +423,42 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe,
(0xff & bufnmb) << 0;
}
+void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
+ u16 epnum, u16 maxp)
+{
+ if (devsel > 0xA) {
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_err(dev, "devsel error %d\n", devsel);
+
+ devsel = 0;
+ }
+
+ usbhsp_pipe_barrier(pipe);
+
+ pipe->maxp = maxp;
+
+ usbhsp_pipe_select(pipe);
+ usbhsp_pipe_maxp_set(pipe, 0xFFFF,
+ (devsel << 12) |
+ maxp);
+
+ if (!usbhs_pipe_is_dcp(pipe))
+ usbhsp_pipe_cfg_set(pipe, 0x000F, epnum);
+}
+
/*
* pipe control
*/
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe)
{
- u16 mask = usbhs_pipe_is_dcp(pipe) ? DCP_MAXP_MASK : PIPE_MAXP_MASK;
-
- usbhsp_pipe_select(pipe);
-
- return (int)(usbhsp_pipe_maxp_get(pipe) & mask);
+ /*
+ * see
+ * usbhs_pipe_config_update()
+ * usbhs_dcp_malloc()
+ */
+ return pipe->maxp;
}
int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe)
@@ -495,9 +471,12 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
-void usbhs_pipe_clear_sequence(struct usbhs_pipe *pipe)
+void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int data)
{
- usbhsp_pipectrl_set(pipe, SQCLR, SQCLR);
+ u16 mask = (SQCLR | SQSET);
+ u16 val = (data) ? SQSET : SQCLR;
+
+ usbhsp_pipectrl_set(pipe, mask, val);
}
void usbhs_pipe_clear(struct usbhs_pipe *pipe)
@@ -516,7 +495,7 @@ static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
*/
pipe = NULL;
usbhs_for_each_pipe_with_dcp(pos, priv, i) {
- if (!usbhsp_type_is(pos, type))
+ if (!usbhs_pipe_type_is(pos, type))
continue;
if (usbhsp_flags_has(pos, IS_USED))
continue;
@@ -538,19 +517,12 @@ static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
}
void usbhs_pipe_init(struct usbhs_priv *priv,
- void (*done)(struct usbhs_pkt *pkt),
int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map))
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
- struct device *dev = usbhs_priv_to_dev(priv);
struct usbhs_pipe *pipe;
int i;
- if (!done) {
- dev_err(dev, "no done function\n");
- return;
- }
-
/*
* FIXME
*
@@ -565,7 +537,7 @@ void usbhs_pipe_init(struct usbhs_priv *priv,
*/
info->bufnmb_last = 4;
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
- if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_INT))
+ if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT))
info->bufnmb_last++;
usbhsp_flags_init(pipe);
@@ -577,24 +549,23 @@ void usbhs_pipe_init(struct usbhs_priv *priv,
usbhs_pipe_clear(pipe);
}
- info->done = done;
info->dma_map_ctrl = dma_map_ctrl;
}
struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
- const struct usb_endpoint_descriptor *desc)
+ int endpoint_type,
+ int dir_in)
{
struct device *dev = usbhs_priv_to_dev(priv);
- struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhs_pipe *pipe;
- int is_host = usbhs_mod_is_host(priv, mod);
+ int is_host = usbhs_mod_is_host(priv);
int ret;
- u16 pipecfg, pipebuf, pipemaxp;
+ u16 pipecfg, pipebuf;
- pipe = usbhsp_get_pipe(priv, usb_endpoint_type(desc));
+ pipe = usbhsp_get_pipe(priv, endpoint_type);
if (!pipe) {
dev_err(dev, "can't get pipe (%s)\n",
- usbhsp_pipe_name[usb_endpoint_type(desc)]);
+ usbhsp_pipe_name[endpoint_type]);
return NULL;
}
@@ -609,22 +580,25 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
return NULL;
}
- pipecfg = usbhsp_setup_pipecfg(pipe, desc, is_host);
- pipebuf = usbhsp_setup_pipebuff(pipe, desc, is_host);
- pipemaxp = usbhsp_setup_pipemaxp(pipe, desc, is_host);
+ pipecfg = usbhsp_setup_pipecfg(pipe, is_host, dir_in);
+ pipebuf = usbhsp_setup_pipebuff(pipe);
usbhsp_pipe_select(pipe);
usbhsp_pipe_cfg_set(pipe, 0xFFFF, pipecfg);
usbhsp_pipe_buf_set(pipe, 0xFFFF, pipebuf);
- usbhsp_pipe_maxp_set(pipe, 0xFFFF, pipemaxp);
- usbhs_pipe_clear_sequence(pipe);
+ usbhs_pipe_sequence_data0(pipe);
dev_dbg(dev, "enable pipe %d : %s (%s)\n",
usbhs_pipe_number(pipe),
- usbhsp_pipe_name[usb_endpoint_type(desc)],
+ usbhs_pipe_name(pipe),
usbhs_pipe_is_dir_in(pipe) ? "in" : "out");
+ /*
+ * epnum / maxp are still not set to this pipe.
+ * call usbhs_pipe_config_update() after this function !!
+ */
+
return pipe;
}
@@ -651,25 +625,31 @@ struct usbhs_pipe *usbhs_dcp_malloc(struct usbhs_priv *priv)
if (!pipe)
return NULL;
+ INIT_LIST_HEAD(&pipe->list);
+
/*
- * dcpcfg : default
- * dcpmaxp : default
- * pipebuf : nothing to do
+ * call usbhs_pipe_config_update() after this function !!
*/
- usbhsp_pipe_select(pipe);
- usbhs_pipe_clear_sequence(pipe);
- INIT_LIST_HEAD(&pipe->list);
-
return pipe;
}
void usbhs_dcp_control_transfer_done(struct usbhs_pipe *pipe)
{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+
WARN_ON(!usbhs_pipe_is_dcp(pipe));
usbhs_pipe_enable(pipe);
- usbhsp_pipectrl_set(pipe, CCPL, CCPL);
+
+ if (!usbhs_mod_is_host(priv)) /* funconly */
+ usbhsp_pipectrl_set(pipe, CCPL, CCPL);
+}
+
+void usbhs_dcp_dir_for_host(struct usbhs_pipe *pipe, int dir_out)
+{
+ usbhsp_pipe_cfg_set(pipe, DIR_OUT,
+ dir_out ? DIR_OUT : 0);
}
/*
@@ -703,7 +683,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv)
*/
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
pipe->priv = priv;
- usbhsp_type(pipe) = pipe_type[i] & USB_ENDPOINT_XFERTYPE_MASK;
+
+ usbhs_pipe_type(pipe) =
+ pipe_type[i] & USB_ENDPOINT_XFERTYPE_MASK;
dev_dbg(dev, "pipe %x\t: %s\n",
i, usbhsp_pipe_name[pipe_type[i]]);
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 41534cb0e73..6334fc644cc 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -30,11 +30,15 @@ struct usbhs_pipe {
struct usbhs_fifo *fifo;
struct list_head list;
+ int maxp;
+
u32 flags;
#define USBHS_PIPE_FLAGS_IS_USED (1 << 0)
#define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1)
#define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2)
+ struct usbhs_pkt_handle *handler;
+
void *mod_private;
};
@@ -43,7 +47,6 @@ struct usbhs_pipe_info {
int size; /* array size of "pipe" */
int bufnmb_last; /* FIXME : driver needs good allocator */
- void (*done)(struct usbhs_pkt *pkt);
int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map);
};
@@ -67,32 +70,30 @@ struct usbhs_pipe_info {
#define usbhs_priv_to_pipeinfo(pr) (&(pr)->pipe_info)
/*
- * usb request
- */
-void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
-void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
-
-/*
* pipe control
*/
+char *usbhs_pipe_name(struct usbhs_pipe *pipe);
struct usbhs_pipe
-*usbhs_pipe_malloc(struct usbhs_priv *priv,
- const struct usb_endpoint_descriptor *desc);
+*usbhs_pipe_malloc(struct usbhs_priv *priv, int endpoint_type, int dir_in);
int usbhs_pipe_probe(struct usbhs_priv *priv);
void usbhs_pipe_remove(struct usbhs_priv *priv);
int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
void usbhs_pipe_init(struct usbhs_priv *priv,
- void (*done)(struct usbhs_pkt *pkt),
int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
-void usbhs_pipe_clear_sequence(struct usbhs_pipe *pipe);
void usbhs_pipe_clear(struct usbhs_pipe *pipe);
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo);
+void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
+ u16 epnum, u16 maxp);
+
+#define usbhs_pipe_sequence_data0(pipe) usbhs_pipe_data_sequence(pipe, 0)
+#define usbhs_pipe_sequence_data1(pipe) usbhs_pipe_data_sequence(pipe, 1)
+void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int data);
#define usbhs_pipe_to_priv(p) ((p)->priv)
#define usbhs_pipe_number(p) (int)((p) - (p)->priv->pipe_info.pipe)
@@ -100,10 +101,14 @@ void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo);
#define usbhs_pipe_to_fifo(p) ((p)->fifo)
#define usbhs_pipe_is_busy(p) usbhs_pipe_to_fifo(p)
+#define usbhs_pipe_type(p) ((p)->pipe_type)
+#define usbhs_pipe_type_is(p, t) ((p)->pipe_type == t)
+
/*
* dcp control
*/
struct usbhs_pipe *usbhs_dcp_malloc(struct usbhs_priv *priv);
void usbhs_dcp_control_transfer_done(struct usbhs_pipe *pipe);
+void usbhs_dcp_dir_for_host(struct usbhs_pipe *pipe, int dir_out);
#endif /* RENESAS_USB_PIPE_H */
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index b71e309116a..677f577c024 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -252,6 +252,7 @@ config USB_SERIAL_GARMIN
config USB_SERIAL_IPW
tristate "USB IPWireless (3G UMTS TDD) Driver"
+ select USB_SERIAL_WWAN
help
Say Y here if you want to use a IPWireless USB modem such as
the ones supplied by Axity3G/Sentech South Africa.
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 86fbba6336c..e92cbefc0f8 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -227,7 +227,7 @@
* - All sleeps use a timeout of DIGI_RETRY_TIMEOUT before looping to
* recheck the condition they are sleeping on. This is defensive,
* in case a wake up is lost.
-* - Following Documentation/DocBook/kernel-locking.pdf no spin locks
+* - Following Documentation/DocBook/kernel-locking.tmpl no spin locks
* are held when calling copy_to/from_user or printk.
*/
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 5fc13e71791..8fe034d2d3e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -73,6 +73,7 @@ struct ftdi_private {
*/
int flags; /* some ASYNC_xxxx flags are supported */
unsigned long last_dtr_rts; /* saved modem control outputs */
+ struct async_icount icount;
wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
char prev_status, diff_status; /* Used for TIOCMIWAIT */
char transmit_empty; /* If transmitter is empty or not */
@@ -207,6 +208,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) },
{ USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) },
@@ -745,6 +748,8 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
@@ -885,6 +890,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
static int ftdi_tiocmget(struct tty_struct *tty);
static int ftdi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
+static int ftdi_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
static int ftdi_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
@@ -919,6 +926,7 @@ static struct usb_serial_driver ftdi_sio_device = {
.prepare_write_buffer = ftdi_prepare_write_buffer,
.tiocmget = ftdi_tiocmget,
.tiocmset = ftdi_tiocmset,
+ .get_icount = ftdi_get_icount,
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
@@ -1486,7 +1494,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
}
/* set max packet size based on descriptor */
- priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
+ priv->max_packet_size = usb_endpoint_maxp(ep_desc);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -1646,6 +1654,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
kref_init(&priv->kref);
mutex_init(&priv->cfg_lock);
+ memset(&priv->icount, 0x00, sizeof(priv->icount));
init_waitqueue_head(&priv->delta_msr_wait);
priv->flags = ASYNC_LOW_LATENCY;
@@ -1909,6 +1918,7 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
c = kfifo_out(&port->write_fifo, &buffer[i + 1], len);
if (!c)
break;
+ priv->icount.tx += c;
buffer[i] = (c << 2) + 1;
count += c + 1;
}
@@ -1916,6 +1926,7 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
} else {
count = kfifo_out_locked(&port->write_fifo, dest, size,
&port->lock);
+ priv->icount.tx += count;
}
return count;
@@ -1943,6 +1954,14 @@ static int ftdi_process_packet(struct tty_struct *tty,
N.B. packet may be processed more than once, but differences
are only processed once. */
status = packet[0] & FTDI_STATUS_B0_MASK;
+ if (status & FTDI_RS0_CTS)
+ priv->icount.cts++;
+ if (status & FTDI_RS0_DSR)
+ priv->icount.dsr++;
+ if (status & FTDI_RS0_RI)
+ priv->icount.rng++;
+ if (status & FTDI_RS0_RLSD)
+ priv->icount.dcd++;
if (status != priv->prev_status) {
priv->diff_status |= status ^ priv->prev_status;
wake_up_interruptible(&priv->delta_msr_wait);
@@ -1955,15 +1974,20 @@ static int ftdi_process_packet(struct tty_struct *tty,
* over framing errors */
if (packet[1] & FTDI_RS_BI) {
flag = TTY_BREAK;
+ priv->icount.brk++;
usb_serial_handle_break(port);
} else if (packet[1] & FTDI_RS_PE) {
flag = TTY_PARITY;
+ priv->icount.parity++;
} else if (packet[1] & FTDI_RS_FE) {
flag = TTY_FRAME;
+ priv->icount.frame++;
}
/* Overrun is special, not associated with a char */
- if (packet[1] & FTDI_RS_OE)
+ if (packet[1] & FTDI_RS_OE) {
+ priv->icount.overrun++;
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
}
/* save if the transmitter is empty or not */
@@ -1975,6 +1999,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
len -= 2;
if (!len)
return 0; /* status only */
+ priv->icount.rx += len;
ch = packet + 2;
if (port->port.console && port->sysrq) {
@@ -2277,11 +2302,34 @@ static int ftdi_tiocmset(struct tty_struct *tty,
return update_mctrl(port, set, clear);
}
+static int ftdi_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct async_icount *ic = &priv->icount;
+
+ icount->cts = ic->cts;
+ icount->dsr = ic->dsr;
+ icount->rng = ic->rng;
+ icount->dcd = ic->dcd;
+ icount->tx = ic->tx;
+ icount->rx = ic->rx;
+ icount->frame = ic->frame;
+ icount->parity = ic->parity;
+ icount->overrun = ic->overrun;
+ icount->brk = ic->brk;
+ icount->buf_overrun = ic->buf_overrun;
+ return 0;
+}
+
static int ftdi_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct async_icount cnow;
+ struct async_icount cprev;
dbg("%s cmd 0x%04x", __func__, cmd);
@@ -2301,41 +2349,30 @@ static int ftdi_ioctl(struct tty_struct *tty,
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was.
- * (except that the driver doesn't support it !)
*
* This code is borrowed from linux/drivers/char/serial.c
*/
case TIOCMIWAIT:
- while (priv != NULL) {
+ cprev = priv->icount;
+ while (1) {
interruptible_sleep_on(&priv->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
- else {
- char diff = priv->diff_status;
-
- if (diff == 0)
- return -EIO; /* no change => error */
-
- /* Consume all events */
- priv->diff_status = 0;
-
- /* Return 0 if caller wanted to know about
- these bits */
- if (((arg & TIOCM_RNG) && (diff & FTDI_RS0_RI)) ||
- ((arg & TIOCM_DSR) && (diff & FTDI_RS0_DSR)) ||
- ((arg & TIOCM_CD) && (diff & FTDI_RS0_RLSD)) ||
- ((arg & TIOCM_CTS) && (diff & FTDI_RS0_CTS))) {
- return 0;
- }
- /*
- * Otherwise caller can't care less about what
- * happened,and so we continue to wait for more
- * events.
- */
+ cnow = priv->icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+ return -EIO; /* no change => error */
+ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
+ return 0;
}
+ cprev = cnow;
}
- return 0;
+ /* not reached */
+ break;
case TIOCSERGETLSR:
return get_lsr_info(port, (struct serial_struct __user *)arg);
break;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index bf5227ad3ef..571fa96b49c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -54,6 +54,7 @@
/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
+#define LMI_LM3S_ICDI_BOARD_PID 0xbcda
#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
@@ -420,9 +421,11 @@
#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
/*
- * DSS-20 Sync Station for Sony Ericsson P800
+ * Sony Ericsson product ids
*/
-#define FTDI_DSS20_PID 0xFC82
+#define FTDI_DSS20_PID 0xFC82 /* DSS-20 Sync Station for Sony Ericsson P800 */
+#define FTDI_URBAN_0_PID 0xFC8A /* Sony Ericsson Urban, uart #0 */
+#define FTDI_URBAN_1_PID 0xFC8B /* Sony Ericsson Urban, uart #1 */
/* www.irtrans.de device */
#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index abf095be575..2ee807523f5 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -3042,7 +3042,7 @@ static int edge_startup(struct usb_serial *serial)
endpoint = &serial->interface->altsetting[0].
endpoint[i].desc;
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
if (!interrupt_in_found &&
(usb_endpoint_is_int_in(endpoint))) {
/* we found a interrupt in endpoint */
@@ -3107,7 +3107,7 @@ static int edge_startup(struct usb_serial *serial)
usb_rcvbulkpipe(dev,
endpoint->bEndpointAddress),
edge_serial->bulk_in_buffer,
- le16_to_cpu(endpoint->wMaxPacketSize),
+ usb_endpoint_maxp(endpoint),
edge_bulk_in_callback,
edge_serial);
bulk_in_found = true;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index ca77e88836b..5170799d6e9 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -47,6 +47,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
+#include "usb-wwan.h"
/*
* Version Information
@@ -185,7 +186,7 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
/*--2: Start reading from the device */
dbg("%s: setting up bulk read callback", __func__);
- usb_serial_generic_open(tty, port);
+ usb_wwan_open(tty, port);
/*--3: Tell the modem to open the floodgates on the rx bulk channel */
dbg("%s:asking modem for RxRead (RXBULK_ON)", __func__);
@@ -219,6 +220,29 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
return 0;
}
+/* fake probe - only to allocate data structures */
+static int ipw_probe(struct usb_serial *serial, const struct usb_device_id *id)
+{
+ struct usb_wwan_intf_private *data;
+
+ data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->susp_lock);
+ usb_set_serial_data(serial, data);
+ return 0;
+}
+
+static void ipw_release(struct usb_serial *serial)
+{
+ struct usb_wwan_intf_private *data = usb_get_serial_data(serial);
+
+ usb_wwan_release(serial);
+ usb_set_serial_data(serial, NULL);
+ kfree(data);
+}
+
static void ipw_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *dev = port->serial->dev;
@@ -285,7 +309,7 @@ static void ipw_close(struct usb_serial_port *port)
dev_err(&port->dev,
"Disabling bulk RxRead failed (error = %d)\n", result);
- usb_serial_generic_close(port);
+ usb_wwan_close(port);
}
static struct usb_serial_driver ipw_device = {
@@ -297,9 +321,14 @@ static struct usb_serial_driver ipw_device = {
.usb_driver = &usb_ipw_driver,
.id_table = usb_ipw_ids,
.num_ports = 1,
+ .disconnect = usb_wwan_disconnect,
.open = ipw_open,
.close = ipw_close,
+ .probe = ipw_probe,
+ .attach = usb_wwan_startup,
+ .release = ipw_release,
.dtr_rts = ipw_dtr_rts,
+ .write = usb_wwan_write,
};
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 40abedbc594..3524a105d04 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2006,7 +2006,6 @@ static int mos7720_ioctl(struct tty_struct *tty,
dbg("%s (%d) TIOCSERGETLSR", __func__, port->number);
return get_lsr_info(tty, mos7720_port,
(unsigned int __user *)arg);
- return 0;
/* FIXME: These should be using the mode methods */
case TIOCMBIS:
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 7b50aa12275..c72abd52498 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2263,7 +2263,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
case TIOCSERGETLSR:
dbg("%s (%d) TIOCSERGETLSR", __func__, port->number);
return mos7840_get_lsr_info(tty, argp);
- return 0;
case TIOCGSERIAL:
dbg("%s (%d) TIOCGSERIAL", __func__, port->number);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 96423f3c8ef..c248a914743 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -523,7 +523,7 @@ static int opticon_startup(struct usb_serial *serial)
goto error;
}
- priv->buffer_size = le16_to_cpu(endpoint->wMaxPacketSize) * 2;
+ priv->buffer_size = usb_endpoint_maxp(endpoint) * 2;
priv->bulk_in_buffer = kmalloc(priv->buffer_size, GFP_KERNEL);
if (!priv->bulk_in_buffer) {
dev_err(&priv->udev->dev, "out of memory\n");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fe22e90bc87..89ae1f65e1b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -475,31 +475,54 @@ enum option_blacklist_reason {
OPTION_BLACKLIST_RESERVED_IF = 2
};
+#define MAX_BL_NUM 8
struct option_blacklist_info {
- const u32 infolen; /* number of interface numbers on blacklist */
- const u8 *ifaceinfo; /* pointer to the array holding the numbers */
- enum option_blacklist_reason reason;
+ /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
+ const unsigned long sendsetup;
+ /* bitfield of interface numbers for OPTION_BLACKLIST_RESERVED_IF */
+ const unsigned long reserved;
};
-static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
static const struct option_blacklist_info four_g_w14_blacklist = {
- .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
- .ifaceinfo = four_g_w14_no_sendsetup,
- .reason = OPTION_BLACKLIST_SENDSETUP
+ .sendsetup = BIT(0) | BIT(1),
};
-static const u8 alcatel_x200_no_sendsetup[] = { 0, 1 };
static const struct option_blacklist_info alcatel_x200_blacklist = {
- .infolen = ARRAY_SIZE(alcatel_x200_no_sendsetup),
- .ifaceinfo = alcatel_x200_no_sendsetup,
- .reason = OPTION_BLACKLIST_SENDSETUP
+ .sendsetup = BIT(0) | BIT(1),
+};
+
+static const struct option_blacklist_info zte_0037_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
};
-static const u8 zte_k3765_z_no_sendsetup[] = { 0, 1, 2 };
static const struct option_blacklist_info zte_k3765_z_blacklist = {
- .infolen = ARRAY_SIZE(zte_k3765_z_no_sendsetup),
- .ifaceinfo = zte_k3765_z_no_sendsetup,
- .reason = OPTION_BLACKLIST_SENDSETUP
+ .sendsetup = BIT(0) | BIT(1) | BIT(2),
+ .reserved = BIT(4),
+};
+
+static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ .reserved = BIT(1) | BIT(2),
+};
+
+static const struct option_blacklist_info net_intf1_blacklist = {
+ .reserved = BIT(1),
+};
+
+static const struct option_blacklist_info net_intf3_blacklist = {
+ .reserved = BIT(3),
+};
+
+static const struct option_blacklist_info net_intf4_blacklist = {
+ .reserved = BIT(4),
+};
+
+static const struct option_blacklist_info net_intf5_blacklist = {
+ .reserved = BIT(5),
+};
+
+static const struct option_blacklist_info zte_mf626_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
+ .reserved = BIT(4),
};
static const struct usb_device_id option_ids[] = {
@@ -599,12 +622,15 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -705,7 +731,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -720,51 +747,62 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
/* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist },
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
/* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -779,11 +817,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
@@ -1214,10 +1254,35 @@ static void __exit option_exit(void)
module_init(option_init);
module_exit(option_exit);
+static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
+ const struct option_blacklist_info *blacklist)
+{
+ unsigned long num;
+ const unsigned long *intf_list;
+
+ if (blacklist) {
+ if (reason == OPTION_BLACKLIST_SENDSETUP)
+ intf_list = &blacklist->sendsetup;
+ else if (reason == OPTION_BLACKLIST_RESERVED_IF)
+ intf_list = &blacklist->reserved;
+ else {
+ BUG_ON(reason);
+ return false;
+ }
+
+ for_each_set_bit(num, intf_list, MAX_BL_NUM + 1) {
+ if (num == ifnum)
+ return true;
+ }
+ }
+ return false;
+}
+
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct usb_wwan_intf_private *data;
+
/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
@@ -1230,14 +1295,14 @@ static int option_probe(struct usb_serial *serial,
serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
return -ENODEV;
- /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
- if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
- (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
- serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
- serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
- (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
- serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
- return -ENODEV;
+ /* Don't bind reserved interfaces (like network ones) which often have
+ * the same class/subclass/protocol as the serial interfaces. Look at
+ * the Windows driver .INF files for reserved interface numbers.
+ */
+ if (is_blacklisted(
+ serial->interface->cur_altsetting->desc.bInterfaceNumber,
+ OPTION_BLACKLIST_RESERVED_IF,
+ (const struct option_blacklist_info *) id->driver_info))
/* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID &&
@@ -1246,7 +1311,6 @@ static int option_probe(struct usb_serial *serial,
return -ENODEV;
data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
-
if (!data)
return -ENOMEM;
data->send_setup = option_send_setup;
@@ -1255,23 +1319,6 @@ static int option_probe(struct usb_serial *serial,
return 0;
}
-static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
- const struct option_blacklist_info *blacklist)
-{
- const u8 *info;
- int i;
-
- if (blacklist) {
- info = blacklist->ifaceinfo;
-
- for (i = 0; i < blacklist->infolen; i++) {
- if (info[i] == ifnum)
- return blacklist->reason;
- }
- }
- return OPTION_BLACKLIST_NONE;
-}
-
static void option_instat_callback(struct urb *urb)
{
int err;
@@ -1343,9 +1390,8 @@ static int option_send_setup(struct usb_serial_port *port)
int val = 0;
dbg("%s", __func__);
- if (is_blacklisted(ifNum,
- (struct option_blacklist_info *) intfdata->private)
- == OPTION_BLACKLIST_SENDSETUP) {
+ if (is_blacklisted(ifNum, OPTION_BLACKLIST_SENDSETUP,
+ (struct option_blacklist_info *) intfdata->private)) {
dbg("No send_setup on blacklisted interface #%d\n", ifNum);
return -EIO;
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1d33260de01..9083d1e616b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
+ { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -360,9 +361,6 @@ static void pl2303_set_termios(struct tty_struct *tty,
tmp >>= 2;
buf[1] <<= 1;
}
- if (tmp > 256) {
- tmp %= 256;
- }
buf[0] = tmp;
}
}
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index ca0d237683b..3d10d7f0207 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -148,3 +148,8 @@
/* WinChipHead USB->RS 232 adapter */
#define WINCHIPHEAD_VENDOR_ID 0x4348
#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
+
+/* SMART USB Serial Adapter */
+#define SMART_VENDOR_ID 0x0b8c
+#define SMART_PRODUCT_ID 0x2303
+
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index aeccc7f0a93..b9bb24729c9 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -28,6 +28,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
+ {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
{USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
{USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
@@ -84,6 +85,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
+ {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index d5d136a53b6..b18179bda0d 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1009,7 +1009,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
struct sierra_intf_private *intfdata;
int b;
- if (message.event & PM_EVENT_AUTO) {
+ if (PMSG_IS_AUTO(message)) {
intfdata = serial->private;
spin_lock_irq(&intfdata->susp_lock);
b = intfdata->in_flight;
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index d9457bd4fe1..7096f799b07 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -226,7 +226,7 @@ static int symbol_startup(struct usb_serial *serial)
goto error;
}
- priv->buffer_size = le16_to_cpu(endpoint->wMaxPacketSize) * 2;
+ priv->buffer_size = usb_endpoint_maxp(endpoint) * 2;
priv->int_buffer = kmalloc(priv->buffer_size, GFP_KERNEL);
if (!priv->int_buffer) {
dev_err(&priv->udev->dev, "out of memory\n");
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 1c031309ab2..cc274fdf262 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -912,7 +912,7 @@ int usb_serial_probe(struct usb_interface *interface,
goto probe_error;
}
buffer_size = max_t(int, serial->type->bulk_in_size,
- le16_to_cpu(endpoint->wMaxPacketSize));
+ usb_endpoint_maxp(endpoint));
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -942,7 +942,7 @@ int usb_serial_probe(struct usb_interface *interface,
goto probe_error;
buffer_size = serial->type->bulk_out_size;
if (!buffer_size)
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
port->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -990,7 +990,7 @@ int usb_serial_probe(struct usb_interface *interface,
"No free urbs available\n");
goto probe_error;
}
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
port->interrupt_in_endpointAddress =
endpoint->bEndpointAddress;
port->interrupt_in_buffer = kmalloc(buffer_size,
@@ -1021,7 +1021,7 @@ int usb_serial_probe(struct usb_interface *interface,
"No free urbs available\n");
goto probe_error;
}
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
port->interrupt_out_size = buffer_size;
port->interrupt_out_endpointAddress =
endpoint->bEndpointAddress;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index e4fad5e643d..d555ca9567b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -651,7 +651,7 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
dbg("%s entered", __func__);
- if (message.event & PM_EVENT_AUTO) {
+ if (PMSG_IS_AUTO(message)) {
spin_lock_irq(&intfdata->susp_lock);
b = intfdata->in_flight;
spin_unlock_irq(&intfdata->susp_lock);
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index bedc4b9f2ac..fe2d803a634 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -42,7 +42,7 @@ config USB_STORAGE_REALTEK
config REALTEK_AUTOPM
bool "Realtek Card Reader autosuspend support"
- depends on USB_STORAGE_REALTEK && CONFIG_PM_RUNTIME
+ depends on USB_STORAGE_REALTEK && PM_RUNTIME
default y
config USB_STORAGE_DATAFAB
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 34adc4b42ce..0ce5f79197e 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -25,7 +25,6 @@
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -293,6 +292,52 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
return USB_STOR_TRANSPORT_ERROR;
}
+static int rts51x_bulk_transport_special(struct us_data *us, u8 lun,
+ u8 *cmd, int cmd_len, u8 *buf, int buf_len,
+ enum dma_data_direction dir, int *act_len)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
+ int result;
+ unsigned int cswlen;
+ unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
+
+ /* set up the command wrapper */
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = cpu_to_le32(buf_len);
+ bcb->Flags = (dir == DMA_FROM_DEVICE) ? 1 << 7 : 0;
+ bcb->Tag = ++us->tag;
+ bcb->Lun = lun;
+ bcb->Length = cmd_len;
+
+ /* copy the command payload */
+ memset(bcb->CDB, 0, sizeof(bcb->CDB));
+ memcpy(bcb->CDB, cmd, bcb->Length);
+
+ /* send it to out endpoint */
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
+ bcb, cbwlen, NULL);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ /* DATA STAGE */
+ /* send/receive data payload, if there is any */
+
+ if (buf && buf_len) {
+ unsigned int pipe = (dir == DMA_FROM_DEVICE) ?
+ us->recv_bulk_pipe : us->send_bulk_pipe;
+ result = usb_stor_bulk_transfer_buf(us, pipe,
+ buf, buf_len, NULL);
+ if (result == USB_STOR_XFER_ERROR)
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ /* get CSW for device status */
+ result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs,
+ US_BULK_CS_WRAP_LEN, &cswlen, 250);
+ return result;
+}
+
/* Determine what the maximum LUN supported is */
static int rts51x_get_max_lun(struct us_data *us)
{
@@ -320,6 +365,11 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
{
int retval;
u8 cmnd[12] = { 0 };
+ u8 *buf;
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (buf == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
@@ -331,10 +381,14 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
cmnd[5] = (u8) len;
retval = rts51x_bulk_transport(us, 0, cmnd, 12,
- data, len, DMA_FROM_DEVICE, NULL);
- if (retval != USB_STOR_TRANSPORT_GOOD)
+ buf, len, DMA_FROM_DEVICE, NULL);
+ if (retval != USB_STOR_TRANSPORT_GOOD) {
+ kfree(buf);
return -EIO;
+ }
+ memcpy(data, buf, len);
+ kfree(buf);
return 0;
}
@@ -342,6 +396,12 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
{
int retval;
u8 cmnd[12] = { 0 };
+ u8 *buf;
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (buf == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
+ memcpy(buf, data, len);
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
@@ -353,7 +413,8 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
cmnd[5] = (u8) len;
retval = rts51x_bulk_transport(us, 0, cmnd, 12,
- data, len, DMA_TO_DEVICE, NULL);
+ buf, len, DMA_TO_DEVICE, NULL);
+ kfree(buf);
if (retval != USB_STOR_TRANSPORT_GOOD)
return -EIO;
@@ -365,6 +426,11 @@ static int rts51x_read_status(struct us_data *us,
{
int retval;
u8 cmnd[12] = { 0 };
+ u8 *buf;
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (buf == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("%s, lun = %d\n", __func__, lun);
@@ -372,10 +438,14 @@ static int rts51x_read_status(struct us_data *us,
cmnd[1] = 0x09;
retval = rts51x_bulk_transport(us, lun, cmnd, 12,
- status, len, DMA_FROM_DEVICE, actlen);
- if (retval != USB_STOR_TRANSPORT_GOOD)
+ buf, len, DMA_FROM_DEVICE, actlen);
+ if (retval != USB_STOR_TRANSPORT_GOOD) {
+ kfree(buf);
return -EIO;
+ }
+ memcpy(status, buf, len);
+ kfree(buf);
return 0;
}
@@ -434,6 +504,29 @@ static int enable_oscillator(struct us_data *us)
return 0;
}
+static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
+{
+ int retval;
+ u16 addr = 0xFE47;
+ u8 cmnd[12] = {0};
+
+ US_DEBUGP("%s, addr = 0x%x, len = %d\n", __FUNCTION__, addr, len);
+
+ cmnd[0] = 0xF0;
+ cmnd[1] = 0x0E;
+ cmnd[2] = (u8)(addr >> 8);
+ cmnd[3] = (u8)addr;
+ cmnd[4] = (u8)(len >> 8);
+ cmnd[5] = (u8)len;
+
+ retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, data, len, DMA_TO_DEVICE, NULL);
+ if (retval != USB_STOR_TRANSPORT_GOOD) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int do_config_autodelink(struct us_data *us, int enable, int force)
{
int retval;
@@ -454,7 +547,8 @@ static int do_config_autodelink(struct us_data *us, int enable, int force)
US_DEBUGP("In %s,set 0xfe47 to 0x%x\n", __func__, value);
- retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
+ retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
@@ -486,7 +580,8 @@ static int config_autodelink_after_power_on(struct us_data *us)
SET_BIT(value, 7);
- retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
+ retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
@@ -507,7 +602,8 @@ static int config_autodelink_after_power_on(struct us_data *us)
CLR_BIT(value, 7);
}
- retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
+ retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
@@ -584,7 +680,8 @@ static int config_autodelink_before_power_down(struct us_data *us)
if (CHECK_ID(chip, 0x0138, 0x3882))
SET_BIT(value, 2);
- retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
+ retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index e8ae21b2d38..ff32390d61e 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -691,6 +691,9 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int temp_result;
struct scsi_eh_save ses;
int sense_size = US_SENSE_SIZE;
+ struct scsi_sense_hdr sshdr;
+ const u8 *scdd;
+ u8 fm_ili;
/* device supports and needs bigger sense buffer */
if (us->fflags & US_FL_SANE_SENSE)
@@ -774,32 +777,30 @@ Retry_Sense:
srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
}
+ scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+
US_DEBUGP("-- Result from auto-sense is %d\n", temp_result);
US_DEBUGP("-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
- srb->sense_buffer[0],
- srb->sense_buffer[2] & 0xf,
- srb->sense_buffer[12],
- srb->sense_buffer[13]);
+ sshdr.response_code, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
#ifdef CONFIG_USB_STORAGE_DEBUG
- usb_stor_show_sense(
- srb->sense_buffer[2] & 0xf,
- srb->sense_buffer[12],
- srb->sense_buffer[13]);
+ usb_stor_show_sense(sshdr.sense_key, sshdr.asc, sshdr.ascq);
#endif
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
+ scdd = scsi_sense_desc_find(srb->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, 4);
+ fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
+
/* We often get empty sense data. This could indicate that
* everything worked or that there was an unspecified
* problem. We have to decide which.
*/
- if ( /* Filemark 0, ignore EOM, ILI 0, no sense */
- (srb->sense_buffer[2] & 0xaf) == 0 &&
- /* No ASC or ASCQ */
- srb->sense_buffer[12] == 0 &&
- srb->sense_buffer[13] == 0) {
-
+ if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
+ fm_ili == 0) {
/* If things are really okay, then let's show that.
* Zero out the sense buffer so the higher layers
* won't realize we did an unsolicited auto-sense.
@@ -814,7 +815,10 @@ Retry_Sense:
*/
} else {
srb->result = DID_ERROR << 16;
- srb->sense_buffer[2] = HARDWARE_ERROR;
+ if ((sshdr.response_code & 0x72) == 0x72)
+ srb->sense_buffer[1] = HARDWARE_ERROR;
+ else
+ srb->sense_buffer[2] = HARDWARE_ERROR;
}
}
}
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 0ca095820f3..c325e69415a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,12 +831,22 @@ static int usb_stor_scan_thread(void * __us)
dev_dbg(dev, "device found\n");
- set_freezable();
- /* Wait for the timeout to expire or for a disconnect */
+ set_freezable_with_signal();
+ /*
+ * Wait for the timeout to expire or for a disconnect
+ *
+ * We can't freeze in this thread or we risk causing khubd to
+ * fail to freeze, but we can't be non-freezable either. Nor can
+ * khubd freeze while waiting for scanning to complete as it may
+ * hold the device lock, causing a hang when suspending devices.
+ * So we request a fake signal when freezing and use
+ * interruptible sleep to kick us out of our wait early when
+ * freezing happens.
+ */
if (delay_use > 0) {
dev_dbg(dev, "waiting for device to settle "
"before scanning\n");
- wait_event_freezable_timeout(us->delay_wait,
+ wait_event_interruptible_timeout(us->delay_wait,
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
delay_use * HZ);
}
diff --git a/drivers/usb/usb-common.c b/drivers/usb/usb-common.c
new file mode 100644
index 00000000000..d29503e954a
--- /dev/null
+++ b/drivers/usb/usb-common.c
@@ -0,0 +1,35 @@
+/*
+ * Provides code common for host and device side USB.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ * If either host side (ie. CONFIG_USB=y) or device side USB stack
+ * (ie. CONFIG_USB_GADGET=y) is compiled in the kernel, this module is
+ * compiled-in as well. Otherwise, if either of the two stacks is
+ * compiled as module, this file is compiled as module as well.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/ch9.h>
+
+const char *usb_speed_string(enum usb_device_speed speed)
+{
+ static const char *const names[] = {
+ [USB_SPEED_UNKNOWN] = "UNKNOWN",
+ [USB_SPEED_LOW] = "low-speed",
+ [USB_SPEED_FULL] = "full-speed",
+ [USB_SPEED_HIGH] = "high-speed",
+ [USB_SPEED_WIRELESS] = "wireless",
+ [USB_SPEED_SUPER] = "super-speed",
+ };
+
+ if (speed < 0 || speed >= ARRAY_SIZE(names))
+ speed = USB_SPEED_UNKNOWN;
+ return names[speed];
+}
+EXPORT_SYMBOL_GPL(usb_speed_string);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index e24ce312307..32d6fc95390 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -555,7 +555,7 @@ static int skel_probe(struct usb_interface *interface,
if (!dev->bulk_in_endpointAddr &&
usb_endpoint_is_bulk_in(endpoint)) {
/* we found a bulk in endpoint */
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = usb_endpoint_maxp(endpoint);
dev->bulk_in_size = buffer_size;
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 59a748a0e5d..0d1863c9edd 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -43,7 +43,7 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
/* Fill up Data Transfer EP pointers */
wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
- wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
+ wa->xfer_result_size = usb_endpoint_maxp(wa->dti_epd);
wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
if (wa->xfer_result == NULL)
goto error_xfer_result_alloc;
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
index 157485c862c..a7494bf1008 100644
--- a/drivers/uwb/uwb-internal.h
+++ b/drivers/uwb/uwb-internal.h
@@ -28,7 +28,6 @@
#ifndef __UWB_INTERNAL_H__
#define __UWB_INTERNAL_H__
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/uwb.h>
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 915fd74da7a..d449a74d4a3 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -50,7 +50,7 @@
#include <video/vga.h>
#include <asm/io.h>
-static DEFINE_SPINLOCK(vga_lock);
+static DEFINE_RAW_SPINLOCK(vga_lock);
static int cursor_size_lastfrom;
static int cursor_size_lastto;
static u32 vgacon_xres;
@@ -157,7 +157,7 @@ static inline void write_vga(unsigned char reg, unsigned int val)
* ddprintk might set the console position from interrupt
* handlers, thus the write has to be IRQ-atomic.
*/
- spin_lock_irqsave(&vga_lock, flags);
+ raw_spin_lock_irqsave(&vga_lock, flags);
#ifndef SLOW_VGA
v1 = reg + (val & 0xff00);
@@ -170,7 +170,7 @@ static inline void write_vga(unsigned char reg, unsigned int val)
outb_p(reg + 1, vga_video_port_reg);
outb_p(val & 0xff, vga_video_port_val);
#endif
- spin_unlock_irqrestore(&vga_lock, flags);
+ raw_spin_unlock_irqrestore(&vga_lock, flags);
}
static inline void vga_set_mem_top(struct vc_data *c)
@@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
cursor_size_lastfrom = from;
cursor_size_lastto = to;
- spin_lock_irqsave(&vga_lock, flags);
+ raw_spin_lock_irqsave(&vga_lock, flags);
if (vga_video_type >= VIDEO_TYPE_VGAC) {
outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
curs = inb_p(vga_video_port_val);
@@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
outb_p(curs, vga_video_port_val);
outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
outb_p(cure, vga_video_port_val);
- spin_unlock_irqrestore(&vga_lock, flags);
+ raw_spin_unlock_irqrestore(&vga_lock, flags);
}
static void vgacon_cursor(struct vc_data *c, int mode)
@@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_data *c,
unsigned int scanlines = height * c->vc_font.height;
u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
- spin_lock_irqsave(&vga_lock, flags);
+ raw_spin_lock_irqsave(&vga_lock, flags);
vgacon_xres = width * VGA_FONTWIDTH;
vgacon_yres = height * c->vc_font.height;
@@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_data *c,
outb_p(vsync_end, vga_video_port_val);
}
- spin_unlock_irqrestore(&vga_lock, flags);
+ raw_spin_unlock_irqrestore(&vga_lock, flags);
return 0;
}
@@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgastate *state, int mode)
{
/* save original values of VGA controller registers */
if (!vga_vesa_blanked) {
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I);
vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg);
vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R);
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
vga_state.HorizontalTotal = inb_p(vga_video_port_val);
@@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgastate *state, int mode)
/* assure that video is enabled */
/* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20);
/* test for vertical retrace in process.... */
@@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgastate *state, int mode)
/* restore both index registers */
vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
}
static void vga_vesa_unblank(struct vgastate *state)
{
/* restore original values of VGA controller registers */
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO);
outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
@@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgastate *state)
/* restore index/control registers */
vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
}
static void vga_pal_blank(struct vgastate *state)
@@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
charmap += 4 * cmapsz;
#endif
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
/* First, the Sequencer */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
/* CPU writes only to map 2 */
@@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00);
/* map start at A000:0000 */
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00);
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
if (arg) {
if (set)
@@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
}
}
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
/* First, the sequencer, Synchronous reset */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
/* CPU writes to maps 0 and 1 */
@@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
inb_p(video_port_status);
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
}
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
return 0;
}
@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
registers; they are write-only on EGA, but it appears that they
are all don't care bits on EGA, so I guess it doesn't matter. */
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
ovr = inb_p(vga_video_port_val);
outb_p(0x09, vga_video_port_reg); /* Font size register */
fsr = inb_p(vga_video_port_val);
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
vde = maxscan & 0xff; /* Vertical display end reg */
ovr = (ovr & 0xbd) + /* Overflow register */
((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3);
fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */
- spin_lock_irq(&vga_lock);
+ raw_spin_lock_irq(&vga_lock);
outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
outb_p(ovr, vga_video_port_val);
outb_p(0x09, vga_video_port_reg); /* Font size */
outb_p(fsr, vga_video_port_val);
outb_p(0x12, vga_video_port_reg); /* Vertical display limit */
outb_p(vde, vga_video_port_val);
- spin_unlock_irq(&vga_lock);
+ raw_spin_unlock_irq(&vga_lock);
vga_video_font_height = fontheight;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index d885c770eb8..2d97752f79a 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -428,7 +428,7 @@ static int __init igafb_init(void)
*
* IGS2000 has its I/O memory mapped and we want
* to generate memory cycles on PCI, e.g. do ioremap(),
- * then readb/writeb() as in Documentation/IO-mapping.txt.
+ * then readb/writeb() as in Documentation/io-mapping.txt.
*
* IGS1682 is more traditional, it responds to PCI I/O
* cycles, so we want to access it with inb()/outb().
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 68b9136847a..4acf88884f9 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -529,4 +529,14 @@ void vring_transport_features(struct virtio_device *vdev)
}
EXPORT_SYMBOL_GPL(vring_transport_features);
+/* return the size of the vring within the virtqueue */
+unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
+{
+
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->vring.num;
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index bd44417c84d..5c5f4b14fd0 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -54,12 +54,35 @@ static atomic_t ticks;
static inline void bcm47xx_wdt_hw_start(void)
{
/* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */
- ssb_watchdog_timer_set(&ssb_bcm47xx, 0xfffffff);
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0xfffffff);
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc,
+ 0xfffffff);
+ break;
+#endif
+ }
}
static inline int bcm47xx_wdt_hw_stop(void)
{
- return ssb_watchdog_timer_set(&ssb_bcm47xx, 0);
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0);
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 0);
+ return 0;
+#endif
+ }
+ return -EINVAL;
}
static void bcm47xx_timer_tick(unsigned long unused)
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index e97b0499bd0..97b8184614a 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -40,7 +40,7 @@
* mknod /dev/watchdog c 10 130
*
* For an example userspace keep-alive daemon, see:
- * Documentation/watchdog/watchdog.txt
+ * Documentation/watchdog/wdt.txt
*/
#include <linux/module.h>
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5f7ff8e2fc1..8795480c235 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -137,16 +137,6 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel.
-config XEN_PLATFORM_PCI
- tristate "xen platform pci device driver"
- depends on XEN_PVHVM && PCI
- default m
- help
- Driver for the Xen PCI Platform device: it is responsible for
- initializing xenbus and grant_table when running in a Xen HVM
- domain. As a consequence this driver is required to run any Xen PV
- frontend on Xen HVM.
-
config SWIOTLB_XEN
def_bool y
depends on PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 72bbb27d7a6..974fffdf22b 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
-obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
+obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
@@ -23,5 +23,3 @@ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
-
-xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5dfd8f8ff07..5876e1ae6c2 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,20 +501,24 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
* alloc_xenballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
+ * @highmem: highmem or lowmem pages
* @return 0 on success, error otherwise
*/
-int alloc_xenballooned_pages(int nr_pages, struct page** pages)
+int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
{
int pgno = 0;
struct page* page;
mutex_lock(&balloon_mutex);
while (pgno < nr_pages) {
- page = balloon_retrieve(true);
- if (page) {
+ page = balloon_retrieve(highmem);
+ if (page && PageHighMem(page) == highmem) {
pages[pgno++] = page;
} else {
enum bp_state st;
- st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
+ if (page)
+ balloon_append(page);
+ st = decrease_reservation(nr_pages - pgno,
+ highmem ? GFP_HIGHUSER : GFP_USER);
if (st != BP_DONE)
goto out_undo;
}
@@ -555,17 +559,40 @@ void free_xenballooned_pages(int nr_pages, struct page** pages)
}
EXPORT_SYMBOL(free_xenballooned_pages);
-static int __init balloon_init(void)
+static void __init balloon_add_region(unsigned long start_pfn,
+ unsigned long pages)
{
unsigned long pfn, extra_pfn_end;
struct page *page;
+ /*
+ * If the amount of usable memory has been limited (e.g., with
+ * the 'mem' command line parameter), don't add pages beyond
+ * this limit.
+ */
+ extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+ for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
+ page = pfn_to_page(pfn);
+ /* totalram_pages and totalhigh_pages do not
+ include the boot-time balloon extension, so
+ don't subtract from it. */
+ __balloon_append(page);
+ }
+}
+
+static int __init balloon_init(void)
+{
+ int i;
+
if (!xen_domain())
return -ENODEV;
pr_info("xen/balloon: Initialising balloon driver.\n");
- balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn;
+ balloon_stats.current_pages = xen_pv_domain()
+ ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
+ : max_pfn;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
@@ -584,24 +611,13 @@ static int __init balloon_init(void)
#endif
/*
- * Initialise the balloon with excess memory space. We need
- * to make sure we don't add memory which doesn't exist or
- * logically exist. The E820 map can be trimmed to be smaller
- * than the amount of physical memory due to the mem= command
- * line parameter. And if this is a 32-bit non-HIGHMEM kernel
- * on a system with memory which requires highmem to access,
- * don't try to use it.
+ * Initialize the balloon with pages from the extra memory
+ * regions (see arch/x86/xen/setup.c).
*/
- extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
- (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
- for (pfn = PFN_UP(xen_extra_mem_start);
- pfn < extra_pfn_end;
- pfn++) {
- page = pfn_to_page(pfn);
- /* totalram_pages and totalhigh_pages do not include the boot-time
- balloon extension, so don't subtract from it. */
- __balloon_append(page);
- }
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
+ if (xen_extra_mem[i].size)
+ balloon_add_region(PFN_UP(xen_extra_mem[i].start),
+ PFN_DOWN(xen_extra_mem[i].size));
return 0;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7523719bf8a..0eb8a57cc80 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -432,7 +432,8 @@ static int __must_check xen_allocate_irq_dynamic(void)
irq = irq_alloc_desc_from(first, -1);
- xen_irq_init(irq);
+ if (irq >= 0)
+ xen_irq_init(irq);
return irq;
}
@@ -713,7 +714,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
- if (irq == -1)
+ if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
@@ -729,7 +730,7 @@ out:
error_irq:
mutex_unlock(&irq_mapping_update_lock);
xen_free_irq(irq);
- return -1;
+ return ret;
}
#endif
@@ -779,7 +780,7 @@ int xen_irq_from_pirq(unsigned pirq)
mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) {
- if (info == NULL || info->type != IRQT_PIRQ)
+ if (info->type != IRQT_PIRQ)
continue;
irq = info->irq;
if (info->u.pirq.pirq == pirq)
@@ -872,11 +873,32 @@ static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
}
+static int find_virq(unsigned int virq, unsigned int cpu)
+{
+ struct evtchn_status status;
+ int port, rc = -ENOENT;
+
+ memset(&status, 0, sizeof(status));
+ for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
+ status.dom = DOMID_SELF;
+ status.port = port;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
+ if (rc < 0)
+ continue;
+ if (status.status != EVTCHNSTAT_virq)
+ continue;
+ if (status.u.virq == virq && status.vcpu == cpu) {
+ rc = port;
+ break;
+ }
+ }
+ return rc;
+}
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
- int evtchn, irq;
+ int evtchn, irq, ret;
mutex_lock(&irq_mapping_update_lock);
@@ -892,10 +914,16 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
bind_virq.virq = virq;
bind_virq.vcpu = cpu;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
- &bind_virq) != 0)
- BUG();
- evtchn = bind_virq.port;
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq);
+ if (ret == 0)
+ evtchn = bind_virq.port;
+ else {
+ if (ret == -EEXIST)
+ ret = find_virq(virq, cpu);
+ BUG_ON(ret < 0);
+ evtchn = ret;
+ }
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
@@ -1021,7 +1049,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
if (irq < 0)
return irq;
- irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
+ irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -1670,6 +1698,7 @@ void __init xen_init_IRQ(void)
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
+ BUG_ON(!evtchn_to_irq);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
evtchn_to_irq[i] = -1;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f914b26cf0c..880798aae2f 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -83,6 +83,7 @@ struct grant_map {
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_map_grant_ref *kmap_ops;
struct page **pages;
};
@@ -116,19 +117,22 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
+ add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL);
add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
+ NULL == add->kmap_ops ||
NULL == add->pages)
goto err;
- if (alloc_xenballooned_pages(count, add->pages))
+ if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
goto err;
for (i = 0; i < count; i++) {
add->map_ops[i].handle = -1;
add->unmap_ops[i].handle = -1;
+ add->kmap_ops[i].handle = -1;
}
add->index = 0;
@@ -142,6 +146,7 @@ err:
kfree(add->grants);
kfree(add->map_ops);
kfree(add->unmap_ops);
+ kfree(add->kmap_ops);
kfree(add);
return NULL;
}
@@ -243,10 +248,35 @@ static int map_grant_pages(struct grant_map *map)
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
map->flags, -1 /* handle */);
}
+ } else {
+ /*
+ * Setup the map_ops corresponding to the pte entries pointing
+ * to the kernel linear addresses of the struct pages.
+ * These ptes are completely different from the user ptes dealt
+ * with find_grant_ptes.
+ */
+ for (i = 0; i < map->count; i++) {
+ unsigned level;
+ unsigned long address = (unsigned long)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ pte_t *ptep;
+ u64 pte_maddr = 0;
+ BUG_ON(PageHighMem(map->pages[i]));
+
+ ptep = lookup_address(address, &level);
+ pte_maddr = arbitrary_virt_to_machine(ptep).maddr;
+ gnttab_set_map_op(&map->kmap_ops[i], pte_maddr,
+ map->flags |
+ GNTMAP_host_map |
+ GNTMAP_contains_pte,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ }
}
pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs(map->map_ops, map->pages, map->count);
+ err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ map->pages, map->count);
if (err)
return err;
@@ -462,13 +492,11 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv);
- spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
gntdev_put_map(map);
}
- spin_unlock(&priv->lock);
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -532,10 +560,11 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
- gntdev_put_map(map);
err = 0;
}
spin_unlock(&priv->lock);
+ if (map)
+ gntdev_put_map(map);
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 4f44b347b24..8c71ab80175 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -448,7 +448,8 @@ unsigned int gnttab_max_grant_frames(void)
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct page **pages, unsigned int count)
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
@@ -488,8 +489,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
*/
return -EOPNOTSUPP;
}
- ret = m2p_add_override(mfn, pages[i],
- map_ops[i].flags & GNTMAP_contains_pte);
+ ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
if (ret)
return ret;
}
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index cef4bafc07d..66057075d6e 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -18,6 +18,7 @@
*/
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include <xen/interface/xen.h>
@@ -26,26 +27,85 @@
#include <asm/xen/hypercall.h>
#include "../pci/pci.h"
+static bool __read_mostly pci_seg_supported = true;
+
static int xen_add_device(struct device *dev)
{
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *physfn = pci_dev->physfn;
+#endif
+
+ if (pci_seg_supported) {
+ struct physdev_pci_device_add add = {
+ .seg = pci_domain_nr(pci_dev->bus),
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
+#ifdef CONFIG_ACPI
+ acpi_handle handle;
+#endif
+
+#ifdef CONFIG_PCI_IOV
+ if (pci_dev->is_virtfn) {
+ add.flags = XEN_PCI_DEV_VIRTFN;
+ add.physfn.bus = physfn->bus->number;
+ add.physfn.devfn = physfn->devfn;
+ } else
+#endif
+ if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
+ add.flags = XEN_PCI_DEV_EXTFN;
+
+#ifdef CONFIG_ACPI
+ handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
+ if (!handle)
+ handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
+#ifdef CONFIG_PCI_IOV
+ if (!handle && pci_dev->is_virtfn)
+ handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge);
+#endif
+ if (handle) {
+ acpi_status status;
+
+ do {
+ unsigned long long pxm;
+
+ status = acpi_evaluate_integer(handle, "_PXM",
+ NULL, &pxm);
+ if (ACPI_SUCCESS(status)) {
+ add.optarr[0] = pxm;
+ add.flags |= XEN_PCI_DEV_PXM;
+ break;
+ }
+ status = acpi_get_parent(handle, &handle);
+ } while (ACPI_SUCCESS(status));
+ }
+#endif /* CONFIG_ACPI */
+
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add);
+ if (r != -ENOSYS)
+ return r;
+ pci_seg_supported = false;
+ }
+ if (pci_domain_nr(pci_dev->bus))
+ r = -ENOSYS;
#ifdef CONFIG_PCI_IOV
- if (pci_dev->is_virtfn) {
+ else if (pci_dev->is_virtfn) {
struct physdev_manage_pci_ext manage_pci_ext = {
.bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
.is_virtfn = 1,
- .physfn.bus = pci_dev->physfn->bus->number,
- .physfn.devfn = pci_dev->physfn->devfn,
+ .physfn.bus = physfn->bus->number,
+ .physfn.devfn = physfn->devfn,
};
r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
&manage_pci_ext);
- } else
+ }
#endif
- if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
+ else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
struct physdev_manage_pci_ext manage_pci_ext = {
.bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
@@ -71,13 +131,27 @@ static int xen_remove_device(struct device *dev)
{
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct physdev_manage_pci manage_pci;
- manage_pci.bus = pci_dev->bus->number;
- manage_pci.devfn = pci_dev->devfn;
+ if (pci_seg_supported) {
+ struct physdev_pci_device device = {
+ .seg = pci_domain_nr(pci_dev->bus),
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
- r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
- &manage_pci);
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
+ &device);
+ } else if (pci_domain_nr(pci_dev->bus))
+ r = -ENOSYS;
+ else {
+ struct physdev_manage_pci manage_pci = {
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
+
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
+ &manage_pci);
+ }
return r;
}
@@ -96,13 +170,16 @@ static int xen_pci_notifier(struct notifier_block *nb,
r = xen_remove_device(dev);
break;
default:
- break;
+ return NOTIFY_DONE;
}
-
- return r;
+ if (r)
+ dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
+ action == BUS_NOTIFY_ADD_DEVICE ? "add" :
+ (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
+ return NOTIFY_OK;
}
-struct notifier_block device_nb = {
+static struct notifier_block device_nb = {
.notifier_call = xen_pci_notifier,
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 6e8c15a2320..c984768d98c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -38,6 +38,7 @@
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include <xen/hvc-console.h>
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
@@ -146,8 +147,10 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
void __init xen_swiotlb_init(int verbose)
{
unsigned long bytes;
- int rc;
+ int rc = -ENOMEM;
unsigned long nr_tbl;
+ char *m = NULL;
+ unsigned int repeat = 3;
nr_tbl = swioltb_nr_tbl();
if (nr_tbl)
@@ -156,16 +159,17 @@ void __init xen_swiotlb_init(int verbose)
xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
}
-
+retry:
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from any location.
*/
xen_io_tlb_start = alloc_bootmem(bytes);
- if (!xen_io_tlb_start)
- panic("Cannot allocate SWIOTLB buffer");
-
+ if (!xen_io_tlb_start) {
+ m = "Cannot allocate Xen-SWIOTLB buffer!\n";
+ goto error;
+ }
xen_io_tlb_end = xen_io_tlb_start + bytes;
/*
* And replace that memory with pages under 4GB.
@@ -173,17 +177,28 @@ void __init xen_swiotlb_init(int verbose)
rc = xen_swiotlb_fixup(xen_io_tlb_start,
bytes,
xen_io_tlb_nslabs);
- if (rc)
+ if (rc) {
+ free_bootmem(__pa(xen_io_tlb_start), bytes);
+ m = "Failed to get contiguous memory for DMA from Xen!\n"\
+ "You either: don't have the permissions, do not have"\
+ " enough free memory under 4GB, or the hypervisor memory"\
+ "is too fragmented!";
goto error;
-
+ }
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
return;
error:
- panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\
- "We either don't have the permission or you do not have enough"\
- "free memory under 4GB!\n", rc);
+ if (repeat--) {
+ xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
+ (xen_io_tlb_nslabs >> 1));
+ printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
+ (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
+ goto retry;
+ }
+ xen_raw_printk("%s (rc:%d)", m, rc);
+ panic("%s (rc:%d)", m, rc);
}
void *
@@ -194,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
unsigned long vstart;
+ phys_addr_t phys;
+ dma_addr_t dev_addr;
/*
* Ignore region specifiers - the kernel's ideas of
@@ -209,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
vstart = __get_free_pages(flags, order);
ret = (void *)vstart;
+ if (!ret)
+ return ret;
+
if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+ dma_mask = hwdev->coherent_dma_mask;
- if (ret) {
+ phys = virt_to_phys(ret);
+ dev_addr = xen_phys_to_bus(phys);
+ if (((dev_addr + size - 1 <= dma_mask)) &&
+ !range_straddles_page_boundary(phys, size))
+ *dma_handle = dev_addr;
+ else {
if (xen_create_contiguous_region(vstart, order,
fls64(dma_mask)) != 0) {
free_pages(vstart, order);
return NULL;
}
- memset(ret, 0, size);
*dma_handle = virt_to_machine(ret).maddr;
}
+ memset(ret, 0, size);
return ret;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
@@ -230,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
int order = get_order(size);
+ phys_addr_t phys;
+ u64 dma_mask = DMA_BIT_MASK(32);
if (dma_release_from_coherent(hwdev, order, vaddr))
return;
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;
+
+ phys = virt_to_phys(vaddr);
+
+ if (((dev_addr + size - 1 > dma_mask)) ||
+ range_straddles_page_boundary(phys, size))
+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
+
free_pages((unsigned long)vaddr, order);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
@@ -278,9 +313,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size))
- panic("map_single: bounce buffer is not DMA'ble");
-
+ if (!dma_capable(dev, dev_addr, size)) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
+ dev_addr = 0;
+ }
return dev_addr;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index a8031445d94..444345afbd5 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -15,7 +15,6 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-#define DRV_NAME "xen-pciback"
static int permissive;
module_param(permissive, bool, 0644);
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index da3cbdfcb5d..3daf862d739 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -15,7 +15,6 @@ struct pci_bar_info {
int which;
};
-#define DRV_NAME "xen-pciback"
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
@@ -25,7 +24,7 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
int ret;
ret = xen_pcibk_read_config_word(dev, offset, value, data);
- if (!atomic_read(&dev->enable_cnt))
+ if (!pci_is_enabled(dev))
return ret;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
@@ -187,7 +186,7 @@ static inline void read_dev_bar(struct pci_dev *dev,
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = res[pos].end - res[pos].start + 1;
+ bar_info->len_val = resource_size(&res[pos]);
}
static void *bar_init(struct pci_dev *dev, int offset)
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 921a889e65e..7476791cab4 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -12,7 +12,6 @@
#include "conf_space_quirks.h"
LIST_HEAD(xen_pcibk_quirks);
-#define DRV_NAME "xen-pciback"
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
@@ -36,7 +35,7 @@ static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG DRV_NAME
- ":quirk didn't match any device xen_pciback knows about\n");
+ ": quirk didn't match any device known\n");
out:
return tmp_quirk;
}
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index 1d32a9a42c0..828dddc360d 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -7,13 +7,13 @@
#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "pciback.h"
struct passthrough_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list;
- spinlock_t lock;
+ struct mutex lock;
};
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
@@ -24,9 +24,8 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
struct pci_dev *dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
@@ -37,7 +36,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
return dev;
}
@@ -48,7 +47,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
- unsigned long flags;
unsigned int domain, bus, devfn;
int err;
@@ -57,9 +55,9 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
return -ENOMEM;
dev_entry->dev = dev;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_add_tail(&dev_entry->list, &dev_data->dev_list);
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
/* Publish this device. */
domain = (unsigned int)pci_domain_nr(dev->bus);
@@ -76,9 +74,8 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
struct pci_dev *found_dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
if (dev_entry->dev == dev) {
@@ -88,7 +85,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
@@ -102,7 +99,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
if (!dev_data)
return -ENOMEM;
- spin_lock_init(&dev_data->lock);
+ mutex_init(&dev_data->lock);
INIT_LIST_HEAD(&dev_data->dev_list);
@@ -116,14 +113,14 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
{
int err = 0;
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
- struct pci_dev_entry *dev_entry, *e, *tmp;
+ struct pci_dev_entry *dev_entry, *e;
struct pci_dev *dev;
int found;
unsigned int domain, bus;
- spin_lock(&dev_data->lock);
+ mutex_lock(&dev_data->lock);
- list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
/* Only publish this device as a root if none of its
* parent bridges are exported
*/
@@ -142,16 +139,13 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
bus = (unsigned int)dev_entry->dev->bus->number;
if (!found) {
- spin_unlock(&dev_data->lock);
err = publish_root_cb(pdev, domain, bus);
if (err)
break;
- spin_lock(&dev_data->lock);
}
}
- if (!err)
- spin_unlock(&dev_data->lock);
+ mutex_unlock(&dev_data->lock);
return err;
}
@@ -182,7 +176,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
return 1;
}
-struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
+const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
.name = "passthrough",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index aec214ac0a1..8f06e1ed028 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -21,8 +21,6 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-#define DRV_NAME "xen-pciback"
-
static char *pci_devs_to_hide;
wait_queue_head_t xen_pcibk_aer_wait_queue;
/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
@@ -222,6 +220,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
}
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ if (WARN_ON(!found_psdev))
+ return;
/*hold this lock for avoiding breaking link between
* pcistub and xen_pcibk when AER is in processing
@@ -514,12 +514,9 @@ static void kill_domain_by_device(struct pcistub_device *psdev)
int err;
char nodename[PCI_NODENAME_MAX];
- if (!psdev)
- dev_err(&psdev->dev->dev,
- "device is NULL when do AER recovery/kill_domain\n");
+ BUG_ON(!psdev);
snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
psdev->pdev->xdev->otherend_id);
- nodename[strlen(nodename)] = '\0';
again:
err = xenbus_transaction_start(&xbt);
@@ -605,7 +602,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
if (test_bit(_XEN_PCIF_active,
(unsigned long *)&psdev->pdev->sh_info->flags)) {
dev_dbg(&psdev->dev->dev,
- "schedule pci_conf service in xen_pcibk\n");
+ "schedule pci_conf service in " DRV_NAME "\n");
xen_pcibk_test_and_schedule_op(psdev->pdev);
}
@@ -995,8 +992,7 @@ out:
err = count;
return err;
}
-
-DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
+static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
size_t count)
@@ -1015,8 +1011,7 @@ out:
err = count;
return err;
}
-
-DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
+static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
{
@@ -1039,8 +1034,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
return count;
}
-
-DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
+static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
{
@@ -1069,8 +1063,7 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return count;
}
-
-DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
+static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
const char *buf,
@@ -1106,7 +1099,8 @@ out:
err = count;
return err;
}
-DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch);
+static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
+ pcistub_irq_handler_switch);
static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
size_t count)
@@ -1170,8 +1164,8 @@ out:
return count;
}
-
-DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
+static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
+ pcistub_quirk_add);
static ssize_t permissive_add(struct device_driver *drv, const char *buf,
size_t count)
@@ -1236,8 +1230,8 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return count;
}
-
-DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
+static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
+ permissive_add);
static void pcistub_exit(void)
{
@@ -1374,3 +1368,4 @@ module_init(xen_pcibk_init);
module_exit(xen_pcibk_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:pci");
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index a0e131a8150..e9b4011c5f9 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -15,6 +15,8 @@
#include <linux/atomic.h>
#include <xen/interface/io/pciif.h>
+#define DRV_NAME "xen-pciback"
+
struct pci_dev_entry {
struct list_head list;
struct pci_dev *dev;
@@ -27,7 +29,7 @@ struct pci_dev_entry {
struct xen_pcibk_device {
void *pci_dev_data;
- spinlock_t dev_lock;
+ struct mutex dev_lock;
struct xenbus_device *xdev;
struct xenbus_watch be_watch;
u8 be_watching;
@@ -89,7 +91,7 @@ typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
* passthrough - BDFs are exactly like in the host.
*/
struct xen_pcibk_backend {
- char *name;
+ const char *name;
int (*init)(struct xen_pcibk_device *pdev);
void (*free)(struct xen_pcibk_device *pdev);
int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
@@ -104,9 +106,9 @@ struct xen_pcibk_backend {
unsigned int devfn);
};
-extern struct xen_pcibk_backend xen_pcibk_vpci_backend;
-extern struct xen_pcibk_backend xen_pcibk_passthrough_backend;
-extern struct xen_pcibk_backend *xen_pcibk_backend;
+extern const struct xen_pcibk_backend xen_pcibk_vpci_backend;
+extern const struct xen_pcibk_backend xen_pcibk_passthrough_backend;
+extern const struct xen_pcibk_backend *xen_pcibk_backend;
static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev,
@@ -116,13 +118,14 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
if (xen_pcibk_backend && xen_pcibk_backend->add)
return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
return -1;
-};
+}
+
static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->release(pdev, dev);
-};
+}
static inline struct pci_dev *
xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
@@ -131,7 +134,8 @@ xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
if (xen_pcibk_backend && xen_pcibk_backend->get)
return xen_pcibk_backend->get(pdev, domain, bus, devfn);
return NULL;
-};
+}
+
/**
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
* before sending aer request to pcifront, so that guest could identify
@@ -148,25 +152,29 @@ static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
devfn);
return -1;
-};
+}
+
static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->init)
return xen_pcibk_backend->init(pdev);
return -1;
-};
+}
+
static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb cb)
{
if (xen_pcibk_backend && xen_pcibk_backend->publish)
return xen_pcibk_backend->publish(pdev, cb);
return -1;
-};
+}
+
static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->free(pdev);
-};
+}
+
/* Handles events from front-end */
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 8c95c3415b7..63616d7453e 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include "pciback.h"
-#define DRV_NAME "xen-pciback"
int verbose_request;
module_param(verbose_request, int, 0644);
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 4a42cfb0959..46d140baebd 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -8,16 +8,15 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "pciback.h"
#define PCI_SLOT_MAX 32
-#define DRV_NAME "xen-pciback"
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list[PCI_SLOT_MAX];
- spinlock_t lock;
+ struct mutex lock;
};
static inline struct list_head *list_first(struct list_head *head)
@@ -33,13 +32,12 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
if (domain != 0 || bus != 0)
return NULL;
if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
list_for_each_entry(entry,
&vpci_dev->dev_list[PCI_SLOT(devfn)],
@@ -50,7 +48,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
}
return dev;
}
@@ -71,7 +69,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
@@ -90,7 +87,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
dev_entry->dev = dev;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
/* Keep multi-function devices together on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
@@ -129,7 +126,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
"No more space on root virtual PCI bus");
unlock:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
/* Publish this device. */
if (!err)
@@ -145,14 +142,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
- struct pci_dev_entry *e, *tmp;
- list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
- list) {
+ struct pci_dev_entry *e;
+
+ list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
if (e->dev == dev) {
list_del(&e->list);
found_dev = e->dev;
@@ -163,7 +159,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
}
out:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
@@ -178,7 +174,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
if (!vpci_dev)
return -ENOMEM;
- spin_lock_init(&vpci_dev->lock);
+ mutex_init(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
@@ -222,10 +218,9 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
int found = 0, slot;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
@@ -243,11 +238,11 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
}
}
}
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
return found;
}
-struct xen_pcibk_backend xen_pcibk_vpci_backend = {
+const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
.name = "vpci",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 978d2c6f5dc..075525945e3 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -13,7 +13,6 @@
#include <asm/xen/pci.h>
#include "pciback.h"
-#define DRV_NAME "xen-pciback"
#define INVALID_EVTCHN_IRQ (-1)
struct workqueue_struct *xen_pcibk_wq;
@@ -44,7 +43,7 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
pdev->xdev = xdev;
dev_set_drvdata(&xdev->dev, pdev);
- spin_lock_init(&pdev->dev_lock);
+ mutex_init(&pdev->dev_lock);
pdev->sh_info = NULL;
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
@@ -62,14 +61,12 @@ out:
static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
{
- spin_lock(&pdev->dev_lock);
-
+ mutex_lock(&pdev->dev_lock);
/* Ensure the guest can't trigger our handler before removing devices */
if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
unbind_from_irqhandler(pdev->evtchn_irq, pdev);
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
}
- spin_unlock(&pdev->dev_lock);
/* If the driver domain started an op, make sure we complete it
* before releasing the shared memory */
@@ -77,13 +74,11 @@ static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
/* Note, the workqueue does not use spinlocks at all.*/
flush_workqueue(xen_pcibk_wq);
- spin_lock(&pdev->dev_lock);
if (pdev->sh_info != NULL) {
xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
pdev->sh_info = NULL;
}
- spin_unlock(&pdev->dev_lock);
-
+ mutex_unlock(&pdev->dev_lock);
}
static void free_pdev(struct xen_pcibk_device *pdev)
@@ -120,9 +115,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
goto out;
}
- spin_lock(&pdev->dev_lock);
pdev->sh_info = vaddr;
- spin_unlock(&pdev->dev_lock);
err = bind_interdomain_evtchn_to_irqhandler(
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
@@ -132,10 +125,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
"Error binding event channel to IRQ");
goto out;
}
-
- spin_lock(&pdev->dev_lock);
pdev->evtchn_irq = err;
- spin_unlock(&pdev->dev_lock);
err = 0;
dev_dbg(&pdev->xdev->dev, "Attached!\n");
@@ -150,6 +140,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
char *magic = NULL;
+ mutex_lock(&pdev->dev_lock);
/* Make sure we only do this setup once */
if (xenbus_read_driver_state(pdev->xdev->nodename) !=
XenbusStateInitialised)
@@ -176,7 +167,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
xenbus_dev_fatal(pdev->xdev, -EFAULT,
"version mismatch (%s/%s) with pcifront - "
- "halting xen_pcibk",
+ "halting " DRV_NAME,
magic, XEN_PCI_MAGIC);
goto out;
}
@@ -194,6 +185,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
out:
+ mutex_unlock(&pdev->dev_lock);
kfree(magic);
@@ -249,6 +241,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
goto out;
dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
+ dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
if (xen_register_device_domain_owner(dev,
pdev->xdev->otherend_id) != 0) {
dev_err(&dev->dev, "device has been assigned to another " \
@@ -288,6 +281,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
}
dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
+ dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
xen_unregister_device_domain_owner(dev);
xen_pcibk_release_pci_dev(pdev, dev);
@@ -369,6 +363,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
+ mutex_lock(&pdev->dev_lock);
/* Make sure we only reconfigure once */
if (xenbus_read_driver_state(pdev->xdev->nodename) !=
XenbusStateReconfiguring)
@@ -506,6 +501,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
out:
+ mutex_unlock(&pdev->dev_lock);
return 0;
}
@@ -562,6 +558,7 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
char dev_str[64];
char state_str[64];
+ mutex_lock(&pdev->dev_lock);
/* It's possible we could get the call to setup twice, so make sure
* we're not already connected.
*/
@@ -642,10 +639,10 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
"Error switching to initialised state!");
out:
+ mutex_unlock(&pdev->dev_lock);
if (!err)
/* see if pcifront is already configured (if not, we'll wait) */
xen_pcibk_attach(pdev);
-
return err;
}
@@ -724,7 +721,7 @@ static struct xenbus_driver xenbus_xen_pcibk_driver = {
.otherend_changed = xen_pcibk_frontend_changed,
};
-struct xen_pcibk_backend *xen_pcibk_backend;
+const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
int __init xen_pcibk_xenbus_register(void)
{
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 6ea852e2516..d93c70857e0 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -68,6 +68,8 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
@@ -93,6 +95,15 @@ static unsigned int selfballoon_uphysteresis __read_mostly = 1;
/* In HZ, controls frequency of worker invocation. */
static unsigned int selfballoon_interval __read_mostly = 5;
+/*
+ * Minimum usable RAM in MB for selfballooning target for balloon.
+ * If non-zero, it is added to totalreserve_pages and self-ballooning
+ * will not balloon below the sum. If zero, a piecewise linear function
+ * is calculated as a minimum and added to totalreserve_pages. Note that
+ * setting this value indiscriminately may cause OOMs and crashes.
+ */
+static unsigned int selfballoon_min_usable_mb;
+
static void selfballoon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
@@ -189,20 +200,23 @@ static int __init xen_selfballooning_setup(char *s)
__setup("selfballooning", xen_selfballooning_setup);
#endif /* CONFIG_FRONTSWAP */
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+
/*
* Use current balloon size, the goal (vm_committed_as), and hysteresis
* parameters to set a new target balloon size
*/
static void selfballoon_process(struct work_struct *work)
{
- unsigned long cur_pages, goal_pages, tgt_pages;
+ unsigned long cur_pages, goal_pages, tgt_pages, floor_pages;
+ unsigned long useful_pages;
bool reset_timer = false;
if (xen_selfballooning_enabled) {
- cur_pages = balloon_stats.current_pages;
+ cur_pages = totalram_pages;
tgt_pages = cur_pages; /* default is no change */
goal_pages = percpu_counter_read_positive(&vm_committed_as) +
- balloon_stats.current_pages - totalram_pages;
+ totalreserve_pages;
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
if (frontswap_selfshrinking && frontswap_enabled)
@@ -217,7 +231,26 @@ static void selfballoon_process(struct work_struct *work)
((goal_pages - cur_pages) /
selfballoon_uphysteresis);
/* else if cur_pages == goal_pages, no change */
- balloon_set_new_target(tgt_pages);
+ useful_pages = max_pfn - totalreserve_pages;
+ if (selfballoon_min_usable_mb != 0)
+ floor_pages = totalreserve_pages +
+ MB2PAGES(selfballoon_min_usable_mb);
+ /* piecewise linear function ending in ~3% slope */
+ else if (useful_pages < MB2PAGES(16))
+ floor_pages = max_pfn; /* not worth ballooning */
+ else if (useful_pages < MB2PAGES(64))
+ floor_pages = totalreserve_pages + MB2PAGES(16) +
+ ((useful_pages - MB2PAGES(16)) >> 1);
+ else if (useful_pages < MB2PAGES(512))
+ floor_pages = totalreserve_pages + MB2PAGES(40) +
+ ((useful_pages - MB2PAGES(40)) >> 3);
+ else /* useful_pages >= MB2PAGES(512) */
+ floor_pages = totalreserve_pages + MB2PAGES(99) +
+ ((useful_pages - MB2PAGES(99)) >> 5);
+ if (tgt_pages < floor_pages)
+ tgt_pages = floor_pages;
+ balloon_set_new_target(tgt_pages +
+ balloon_stats.current_pages - totalram_pages);
reset_timer = true;
}
#ifdef CONFIG_FRONTSWAP
@@ -340,6 +373,31 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev,
static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_uphys, store_selfballoon_uphys);
+SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
+ selfballoon_min_usable_mb);
+
+static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_min_usable_mb = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
+ show_selfballoon_min_usable_mb,
+ store_selfballoon_min_usable_mb);
+
+
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -421,6 +479,7 @@ static struct attribute *selfballoon_attrs[] = {
&attr_selfballoon_interval.attr,
&attr_selfballoon_downhysteresis.attr,
&attr_selfballoon_uphysteresis.attr,
+ &attr_selfballoon_min_usable_mb.attr,
#ifdef CONFIG_FRONTSWAP
&attr_frontswap_selfshrinking.attr,
&attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 090c61ee8fd..2eff7a6aaa2 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -212,7 +212,9 @@ int xb_init_comms(void)
printk(KERN_WARNING "XENBUS response ring is not quiescent "
"(%08x:%08x): fixing up\n",
intf->rsp_cons, intf->rsp_prod);
- intf->rsp_cons = intf->rsp_prod;
+ /* breaks kdump */
+ if (!reset_devices)
+ intf->rsp_cons = intf->rsp_prod;
}
if (xenbus_irq) {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index bd2f90c9ac8..cef9b0bf63d 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -684,64 +684,74 @@ static int __init xenbus_probe_initcall(void)
device_initcall(xenbus_probe_initcall);
-static int __init xenbus_init(void)
+/* Set up event channel for xenstored which is run as a local process
+ * (this is normally used only in dom0)
+ */
+static int __init xenstored_local_init(void)
{
int err = 0;
unsigned long page = 0;
+ struct evtchn_alloc_unbound alloc_unbound;
- DPRINTK("");
+ /* Allocate Xenstore page */
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ goto out_err;
- err = -ENODEV;
- if (!xen_domain())
- return err;
+ xen_store_mfn = xen_start_info->store_mfn =
+ pfn_to_mfn(virt_to_phys((void *)page) >>
+ PAGE_SHIFT);
- /*
- * Domain0 doesn't have a store_evtchn or store_mfn yet.
- */
- if (xen_initial_domain()) {
- struct evtchn_alloc_unbound alloc_unbound;
+ /* Next allocate a local port which xenstored can bind to */
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = DOMID_SELF;
- /* Allocate Xenstore page */
- page = get_zeroed_page(GFP_KERNEL);
- if (!page)
- goto out_error;
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (err == -ENOSYS)
+ goto out_err;
- xen_store_mfn = xen_start_info->store_mfn =
- pfn_to_mfn(virt_to_phys((void *)page) >>
- PAGE_SHIFT);
+ BUG_ON(err);
+ xen_store_evtchn = xen_start_info->store_evtchn =
+ alloc_unbound.port;
- /* Next allocate a local port which xenstored can bind to */
- alloc_unbound.dom = DOMID_SELF;
- alloc_unbound.remote_dom = 0;
+ return 0;
- err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
- &alloc_unbound);
- if (err == -ENOSYS)
- goto out_error;
+ out_err:
+ if (page != 0)
+ free_page(page);
+ return err;
+}
- BUG_ON(err);
- xen_store_evtchn = xen_start_info->store_evtchn =
- alloc_unbound.port;
+static int __init xenbus_init(void)
+{
+ int err = 0;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (xen_hvm_domain()) {
+ uint64_t v = 0;
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (err)
+ goto out_error;
+ xen_store_evtchn = (int)v;
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err)
+ goto out_error;
+ xen_store_mfn = (unsigned long)v;
+ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
} else {
- if (xen_hvm_domain()) {
- uint64_t v = 0;
- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
- if (err)
- goto out_error;
- xen_store_evtchn = (int)v;
- err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ xen_store_evtchn = xen_start_info->store_evtchn;
+ xen_store_mfn = xen_start_info->store_mfn;
+ if (xen_store_evtchn)
+ xenstored_ready = 1;
+ else {
+ err = xenstored_local_init();
if (err)
goto out_error;
- xen_store_mfn = (unsigned long)v;
- xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
- } else {
- xen_store_evtchn = xen_start_info->store_evtchn;
- xen_store_mfn = xen_start_info->store_mfn;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
- xenstored_ready = 1;
}
+ xen_store_interface = mfn_to_virt(xen_store_mfn);
}
/* Initialize the interface to xenstore. */
@@ -760,12 +770,7 @@ static int __init xenbus_init(void)
proc_mkdir("xen", NULL);
#endif
- return 0;
-
- out_error:
- if (page != 0)
- free_page(page);
-
+ out_error:
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 60adf919d78..32417b5064f 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -104,8 +104,6 @@ static int xenbus_uevent_backend(struct device *dev,
xdev = to_xenbus_device(dev);
bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
- if (xdev == NULL)
- return -ENODEV;
if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype))
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index ed2ba474a56..540587e18a9 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -248,10 +248,131 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
+static int backend_state;
+
+static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
+ const char **v, unsigned int l)
+{
+ xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ v[XS_WATCH_PATH], xenbus_strstate(backend_state));
+ wake_up(&backend_state_wq);
+}
+
+static void xenbus_reset_wait_for_backend(char *be, int expected)
+{
+ long timeout;
+ timeout = wait_event_interruptible_timeout(backend_state_wq,
+ backend_state == expected, 5 * HZ);
+ if (timeout <= 0)
+ printk(KERN_INFO "XENBUS: backend %s timed out.\n", be);
+}
+
+/*
+ * Reset frontend if it is in Connected or Closed state.
+ * Wait for backend to catch up.
+ * State Connected happens during kdump, Closed after kexec.
+ */
+static void xenbus_reset_frontend(char *fe, char *be, int be_state)
+{
+ struct xenbus_watch be_watch;
+
+ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ be, xenbus_strstate(be_state));
+
+ memset(&be_watch, 0, sizeof(be_watch));
+ be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be);
+ if (!be_watch.node)
+ return;
+
+ be_watch.callback = xenbus_reset_backend_state_changed;
+ backend_state = XenbusStateUnknown;
+
+ printk(KERN_INFO "XENBUS: triggering reconnect on %s\n", be);
+ register_xenbus_watch(&be_watch);
+
+ /* fall through to forward backend to state XenbusStateInitialising */
+ switch (be_state) {
+ case XenbusStateConnected:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
+ xenbus_reset_wait_for_backend(be, XenbusStateClosing);
+
+ case XenbusStateClosing:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
+ xenbus_reset_wait_for_backend(be, XenbusStateClosed);
+
+ case XenbusStateClosed:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
+ xenbus_reset_wait_for_backend(be, XenbusStateInitWait);
+ }
+
+ unregister_xenbus_watch(&be_watch);
+ printk(KERN_INFO "XENBUS: reconnect done on %s\n", be);
+ kfree(be_watch.node);
+}
+
+static void xenbus_check_frontend(char *class, char *dev)
+{
+ int be_state, fe_state, err;
+ char *backend, *frontend;
+
+ frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev);
+ if (!frontend)
+ return;
+
+ err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state);
+ if (err != 1)
+ goto out;
+
+ switch (fe_state) {
+ case XenbusStateConnected:
+ case XenbusStateClosed:
+ printk(KERN_DEBUG "XENBUS: frontend %s %s\n",
+ frontend, xenbus_strstate(fe_state));
+ backend = xenbus_read(XBT_NIL, frontend, "backend", NULL);
+ if (!backend || IS_ERR(backend))
+ goto out;
+ err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state);
+ if (err == 1)
+ xenbus_reset_frontend(frontend, backend, be_state);
+ kfree(backend);
+ break;
+ default:
+ break;
+ }
+out:
+ kfree(frontend);
+}
+
+static void xenbus_reset_state(void)
+{
+ char **devclass, **dev;
+ int devclass_n, dev_n;
+ int i, j;
+
+ devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n);
+ if (IS_ERR(devclass))
+ return;
+
+ for (i = 0; i < devclass_n; i++) {
+ dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n);
+ if (IS_ERR(dev))
+ continue;
+ for (j = 0; j < dev_n; j++)
+ xenbus_check_frontend(devclass[i], dev[j]);
+ kfree(dev);
+ }
+ kfree(devclass);
+}
+
static int frontend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
+ /* reset devices in Connected or Closed state */
+ if (xen_hvm_domain())
+ xenbus_reset_state();
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 5534690075a..b3b8f2f3ad1 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include "xenbus_comms.h"
struct xs_stored_msg {
@@ -620,6 +621,15 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
+static void xs_reset_watches(void)
+{
+ int err;
+
+ err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
+ if (err && err != -EEXIST)
+ printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
+}
+
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -638,8 +648,7 @@ int register_xenbus_watch(struct xenbus_watch *watch)
err = xs_watch(watch->node, token);
- /* Ignore errors due to multiple registration. */
- if ((err != 0) && (err != -EEXIST)) {
+ if (err) {
spin_lock(&watches_lock);
list_del(&watch->list);
spin_unlock(&watches_lock);
@@ -897,5 +906,9 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
+ /* shutdown watches for kexec boot */
+ if (xen_hvm_domain())
+ xs_reset_watches();
+
return 0;
}
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 7ee2b6e7178..229624f867d 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -37,6 +37,7 @@ zorro_match_device(const struct zorro_device_id *ids,
}
return NULL;
}
+EXPORT_SYMBOL(zorro_match_device);
static int zorro_device_probe(struct device *dev)
@@ -91,6 +92,7 @@ int zorro_register_driver(struct zorro_driver *drv)
/* register with core */
return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(zorro_register_driver);
/**
@@ -107,6 +109,7 @@ void zorro_unregister_driver(struct zorro_driver *drv)
{
driver_unregister(&drv->driver);
}
+EXPORT_SYMBOL(zorro_unregister_driver);
/**
@@ -168,6 +171,7 @@ struct bus_type zorro_bus_type = {
.probe = zorro_device_probe,
.remove = zorro_device_remove,
};
+EXPORT_SYMBOL(zorro_bus_type);
static int __init zorro_driver_init(void)
@@ -177,7 +181,3 @@ static int __init zorro_driver_init(void)
postcore_initcall(zorro_driver_init);
-EXPORT_SYMBOL(zorro_match_device);
-EXPORT_SYMBOL(zorro_register_driver);
-EXPORT_SYMBOL(zorro_unregister_driver);
-EXPORT_SYMBOL(zorro_bus_type);
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index ef966188611..2b78014a124 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -132,21 +132,19 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
- int token;
+ int token, r;
if (!*p)
continue;
token = match_token(p, tokens, args);
- if (token < Opt_uname) {
- int r = match_int(&args[0], &option);
+ switch (token) {
+ case Opt_debug:
+ r = match_int(&args[0], &option);
if (r < 0) {
P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ "integer field, but no integer?\n");
ret = r;
continue;
}
- }
- switch (token) {
- case Opt_debug:
v9ses->debug = option;
#ifdef CONFIG_NET_9P_DEBUG
p9_debug_level = option;
@@ -154,12 +152,33 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
break;
case Opt_dfltuid:
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ ret = r;
+ continue;
+ }
v9ses->dfltuid = option;
break;
case Opt_dfltgid:
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ ret = r;
+ continue;
+ }
v9ses->dfltgid = option;
break;
case Opt_afid:
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ ret = r;
+ continue;
+ }
v9ses->afid = option;
break;
case Opt_uname:
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 9c2bdda5cd9..598fff1a54e 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -165,9 +165,8 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
while (rdir->head < rdir->tail) {
p9stat_init(&st);
- err = p9stat_read(rdir->buf + rdir->head,
- rdir->tail - rdir->head, &st,
- fid->clnt->proto_version);
+ err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
+ rdir->tail - rdir->head, &st);
if (err) {
P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
err = -EIO;
@@ -231,7 +230,7 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
while (err == 0) {
if (rdir->tail == rdir->head) {
err = p9_client_readdir(fid, rdir->buf, buflen,
- filp->f_pos);
+ filp->f_pos);
if (err <= 0)
goto unlock_and_exit;
@@ -241,10 +240,9 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
while (rdir->head < rdir->tail) {
- err = p9dirent_read(rdir->buf + rdir->head,
- rdir->tail - rdir->head,
- &curdirent,
- fid->clnt->proto_version);
+ err = p9dirent_read(fid->clnt, rdir->buf + rdir->head,
+ rdir->tail - rdir->head,
+ &curdirent);
if (err < 0) {
P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
err = -EIO;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index e3c03db3c78..b5a1076aaa6 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -278,10 +278,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
case S_IFSOCK:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
- inode->i_fop = &v9fs_file_operations_dotl;
} else if (v9fs_proto_dotu(v9ses)) {
inode->i_op = &v9fs_file_inode_operations;
- inode->i_fop = &v9fs_file_operations;
} else {
P9_DPRINTK(P9_DEBUG_ERROR,
"special files without extended mode\n");
diff --git a/fs/attr.c b/fs/attr.c
index 538e27959d3..7ee7ba48831 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -13,6 +13,7 @@
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
#include <linux/security.h>
+#include <linux/evm.h>
/**
* inode_change_ok - check if attribute changes to an inode are allowed
@@ -237,8 +238,10 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
else
error = simple_setattr(dentry, attr);
- if (!error)
+ if (!error) {
fsnotify_change(dentry, ia_valid);
+ evm_inode_post_setattr(dentry, ia_valid);
+ }
return error;
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 538f65a79ec..dae5dfe41ba 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!max_to_defrag)
max_to_defrag = last_index - 1;
- while (i <= last_index && defrag_count < max_to_defrag) {
+ /*
+ * make writeback starts from i, so the defrag range can be
+ * written sequentially.
+ */
+ if (i < inode->i_mapping->writeback_index)
+ inode->i_mapping->writeback_index = i;
+
+ while (i <= last_index && defrag_count < max_to_defrag &&
+ (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT)) {
/*
* make sure we stop running if someone unmounts
* the FS
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 69565e5fc6a..426aa464f1a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -383,36 +383,36 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
XATTR_REPLACE);
}
-int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *suffix;
+ const struct xattr *xattr;
+ struct btrfs_trans_handle *trans = fs_info;
char *name;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &suffix, &value,
- &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
-
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
- GFP_NOFS);
- if (!name) {
- err = -ENOMEM;
- } else {
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
- err = __btrfs_setxattr(trans, inode, name, value, len, 0);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ err = __btrfs_setxattr(trans, inode, name,
+ xattr->value, xattr->value_len, 0);
kfree(name);
+ if (err < 0)
+ break;
}
-
- kfree(suffix);
- kfree(value);
return err;
}
+
+int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &btrfs_initxattrs, trans);
+}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f4af4cc3750..62abf9fd6ff 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
- "ntlmv2 in kernel release 3.1");
+ "ntlmv2 in kernel release 3.2");
}
ses->overrideSecFlg = volume_info->secFlg;
@@ -2877,9 +2877,9 @@ cleanup_volume_info_contents(struct smb_vol *volume_info)
{
kfree(volume_info->username);
kzfree(volume_info->password);
- kfree(volume_info->UNC);
if (volume_info->UNCip != volume_info->UNC + 2)
kfree(volume_info->UNCip);
+ kfree(volume_info->UNC);
kfree(volume_info->domainname);
kfree(volume_info->iocharset);
kfree(volume_info->prepath);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 2a22fb2989e..c3230888214 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
@@ -31,16 +32,8 @@
#define MAX_EA_VALUE_SIZE 65535
#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
-#define CIFS_XATTR_USER_PREFIX "user."
-#define CIFS_XATTR_SYSTEM_PREFIX "system."
-#define CIFS_XATTR_OS2_PREFIX "os2."
-#define CIFS_XATTR_SECURITY_PREFIX "security."
-#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_TRUSTED_PREFIX_LEN 8
-#define XATTR_SECURITY_PREFIX_LEN 9
-/* BB need to add server (Samba e.g) support for security and trusted prefix */
-
+/* BB need to add server (Samba e.g) support for security and trusted prefix */
int cifs_removexattr(struct dentry *direntry, const char *ea_name)
{
@@ -76,8 +69,8 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
}
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
- && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ && (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) {
cFYI(1,
"illegal xattr request %s (only user namespace supported)",
ea_name);
@@ -88,7 +81,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto remove_ea_exit;
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
(__u16)0, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -149,21 +142,23 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
cFYI(1, "attempt to set cifs inode metadata");
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
- ea_name += 4; /* skip past os2. prefix */
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -269,7 +264,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* return alt name if available as pseudo attr */
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
@@ -277,15 +273,15 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cFYI(1, "attempt to query cifs inode metadata");
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
- ea_name += 4; /* skip past os2. prefix */
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -339,10 +335,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cFYI(1, "Query CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
} else if (strncmp(ea_name,
- CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
+ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
cFYI(1, "Trusted xattr namespace not supported yet");
} else if (strncmp(ea_name,
- CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
+ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
cFYI(1, "Security xattr namespace not supported yet");
} else
cFYI(1,
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h
index 44e17e9c21a..cc0ea9fe5ec 100644
--- a/fs/coda/coda_linux.h
+++ b/fs/coda/coda_linux.h
@@ -59,12 +59,11 @@ void coda_sysctl_clean(void);
#define CODA_ALLOC(ptr, cast, size) do { \
if (size < PAGE_SIZE) \
- ptr = kmalloc((unsigned long) size, GFP_KERNEL); \
+ ptr = kzalloc((unsigned long) size, GFP_KERNEL); \
else \
- ptr = (cast)vmalloc((unsigned long) size); \
+ ptr = (cast)vzalloc((unsigned long) size); \
if (!ptr) \
printk("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \
- else memset( ptr, 0, size ); \
} while (0)
diff --git a/fs/compat.c b/fs/compat.c
index 58b1da45989..05e3f3d2cd7 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -37,7 +37,6 @@
#include <linux/dirent.h>
#include <linux/fsnotify.h>
#include <linux/highuid.h>
-#include <linux/nfsd/syscall.h>
#include <linux/personality.h>
#include <linux/rwsem.h>
#include <linux/tsacct_kern.h>
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index c83f4768eea..ca418aaf635 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -23,7 +23,8 @@
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * Please see Documentation/filesystems/configfs.txt for more information.
+ * Please see Documentation/filesystems/configfs/configfs.txt for more
+ * information.
*/
#undef DEBUG
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 76dc4c3e5d5..50cee7f9110 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -23,7 +23,7 @@
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * Please see the file Documentation/filesystems/configfs.txt for
+ * Please see the file Documentation/filesystems/configfs/configfs.txt for
* critical information about using the config_item interface.
*/
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index e7a7a2f0732..f3a257d7a98 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -1,5 +1,5 @@
/*
- * file.c - part of debugfs, a tiny little debug file system
+ * inode.c - part of debugfs, a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index fe047d966dc..9026fc91fe3 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -700,7 +700,7 @@ static const struct file_operations eventpoll_fops = {
.llseek = noop_llseek,
};
-/* Fast test to see if the file is an evenpoll file */
+/* Fast test to see if the file is an eventpoll file */
static inline int is_file_epoll(struct file *f)
{
return f->f_op == &eventpoll_fops;
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 5d979b4347b..c922adc8ef4 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -46,28 +46,30 @@ ext2_xattr_security_set(struct dentry *dentry, const char *name,
value, size, flags);
}
-int
-ext2_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext2_initxattrs, NULL);
+}
+
const struct xattr_handler ext2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext2_xattr_security_list,
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index b8d9f83aa5c..3c218b8a51d 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -48,28 +48,32 @@ ext3_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext3_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ handle_t *handle = fs_info;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext3_xattr_set_handle(handle, inode,
+ EXT3_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext3_initxattrs, handle);
+}
+
const struct xattr_handler ext3_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext3_xattr_security_list,
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 007c3bfbf09..34e4350dd4d 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -48,28 +48,32 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int
-ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ handle_t *handle = fs_info;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext4_xattr_set_handle(handle, inode,
+ EXT4_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext4_initxattrs, handle);
+}
+
const struct xattr_handler ext4_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext4_xattr_security_list,
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 900cf986aad..6525b804d5e 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -624,31 +624,29 @@ fail:
return error;
}
-static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
- const struct qstr *qstr)
+int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
-
- err = security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
- &name, &value, &len);
-
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ const struct xattr *xattr;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = __gfs2_xattr_set(inode, xattr->name, xattr->value,
+ xattr->value_len, 0,
+ GFS2_EATYPE_SECURITY);
+ if (err < 0)
+ break;
}
-
- err = __gfs2_xattr_set(&ip->i_inode, name, value, len, 0,
- GFS2_EATYPE_SECURITY);
- kfree(value);
- kfree(name);
-
return err;
}
+static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
+ &gfs2_initxattrs, NULL);
+}
+
/**
* gfs2_create_inode - Create a new inode
* @dir: The parent directory
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index cfeb7164b08..0f20208df60 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -22,26 +22,29 @@
#include <linux/security.h>
#include "nodelist.h"
-/* ---- Initial Security Label Attachment -------------- */
-int jffs2_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+/* ---- Initial Security Label(s) Attachment callback --- */
+int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int rc;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ int err = 0;
- rc = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return 0;
- return rc;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0);
+ return err;
+}
- kfree(name);
- kfree(value);
- return rc;
+/* ---- Initial Security Label(s) Attachment ----------- */
+int jffs2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &jffs2_initxattrs, NULL);
}
/* ---- XATTR Handler for "security.*" ----------------- */
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index e87fedef23d..26683e15b3a 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -1089,38 +1089,37 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
}
#ifdef CONFIG_JFS_SECURITY
-int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int rc;
- size_t len;
- void *value;
- char *suffix;
+ const struct xattr *xattr;
+ tid_t *tid = fs_info;
char *name;
-
- rc = security_inode_init_security(inode, dir, qstr, &suffix, &value,
- &len);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return 0;
- return rc;
- }
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 1 + strlen(suffix),
- GFP_NOFS);
- if (!name) {
- rc = -ENOMEM;
- goto kmalloc_failed;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
+ strcpy(name, XATTR_SECURITY_PREFIX);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+
+ err = __jfs_setxattr(*tid, inode, name,
+ xattr->value, xattr->value_len, 0);
+ kfree(name);
+ if (err < 0)
+ break;
}
- strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
-
- rc = __jfs_setxattr(tid, inode, name, value, len, 0);
-
- kfree(name);
-kmalloc_failed:
- kfree(suffix);
- kfree(value);
+ return err;
+}
- return rc;
+int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &jfs_initxattrs, &tid);
}
#endif
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index b7c99bfb3da..6f29836ec0c 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -316,14 +316,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
struct hlist_node *pos;
struct nlm_host *host = NULL;
struct nsm_handle *nsm = NULL;
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- };
- struct sockaddr_in6 sin6 = {
- .sin6_family = AF_INET6,
- };
- struct sockaddr *src_sap;
- size_t src_len = rqstp->rq_addrlen;
+ struct sockaddr *src_sap = svc_daddr(rqstp);
+ size_t src_len = rqstp->rq_daddrlen;
struct nlm_lookup_host_info ni = {
.server = 1,
.sap = svc_addr(rqstp),
@@ -340,21 +334,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
mutex_lock(&nlm_host_mutex);
- switch (ni.sap->sa_family) {
- case AF_INET:
- sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
- src_sap = (struct sockaddr *)&sin;
- break;
- case AF_INET6:
- ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
- src_sap = (struct sockaddr *)&sin6;
- break;
- default:
- dprintk("lockd: %s failed; unrecognized address family\n",
- __func__);
- goto out;
- }
-
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index abfff9d7979..c061b9aa7dd 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -282,7 +282,7 @@ int lockd_up(void)
/*
* Create the kernel thread and wait for it to start.
*/
- nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(nlmsvc_rqst)) {
error = PTR_ERR(nlmsvc_rqst);
nlmsvc_rqst = NULL;
diff --git a/fs/locks.c b/fs/locks.c
index 703f545097d..3b0d05dcd7c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -60,7 +60,7 @@
*
* Initial implementation of mandatory locks. SunOS turned out to be
* a rotten model, so I implemented the "obvious" semantics.
- * See 'Documentation/mandatory.txt' for details.
+ * See 'Documentation/filesystems/mandatory-locking.txt' for details.
* Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
*
* Don't allow mandatory locks on mmap()'ed files. Added simple functions to
@@ -133,6 +133,20 @@
#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
+static bool lease_breaking(struct file_lock *fl)
+{
+ return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
+}
+
+static int target_leasetype(struct file_lock *fl)
+{
+ if (fl->fl_flags & FL_UNLOCK_PENDING)
+ return F_UNLCK;
+ if (fl->fl_flags & FL_DOWNGRADE_PENDING)
+ return F_RDLCK;
+ return fl->fl_type;
+}
+
int leases_enable = 1;
int lease_break_time = 45;
@@ -1119,6 +1133,17 @@ int locks_mandatory_area(int read_write, struct inode *inode,
EXPORT_SYMBOL(locks_mandatory_area);
+static void lease_clear_pending(struct file_lock *fl, int arg)
+{
+ switch (arg) {
+ case F_UNLCK:
+ fl->fl_flags &= ~FL_UNLOCK_PENDING;
+ /* fall through: */
+ case F_RDLCK:
+ fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
+ }
+}
+
/* We already had a lease on this file; just change its type */
int lease_modify(struct file_lock **before, int arg)
{
@@ -1127,6 +1152,7 @@ int lease_modify(struct file_lock **before, int arg)
if (error)
return error;
+ lease_clear_pending(fl, arg);
locks_wake_up_blocks(fl);
if (arg == F_UNLCK)
locks_delete_lock(before);
@@ -1135,19 +1161,25 @@ int lease_modify(struct file_lock **before, int arg)
EXPORT_SYMBOL(lease_modify);
+static bool past_time(unsigned long then)
+{
+ if (!then)
+ /* 0 is a special value meaning "this never expires": */
+ return false;
+ return time_after(jiffies, then);
+}
+
static void time_out_leases(struct inode *inode)
{
struct file_lock **before;
struct file_lock *fl;
before = &inode->i_flock;
- while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
- if ((fl->fl_break_time == 0)
- || time_before(jiffies, fl->fl_break_time)) {
- before = &fl->fl_next;
- continue;
- }
- lease_modify(before, fl->fl_type & ~F_INPROGRESS);
+ while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
+ if (past_time(fl->fl_downgrade_time))
+ lease_modify(before, F_RDLCK);
+ if (past_time(fl->fl_break_time))
+ lease_modify(before, F_UNLCK);
if (fl == *before) /* lease_modify may have freed fl */
before = &fl->fl_next;
}
@@ -1165,7 +1197,7 @@ static void time_out_leases(struct inode *inode)
*/
int __break_lease(struct inode *inode, unsigned int mode)
{
- int error = 0, future;
+ int error = 0;
struct file_lock *new_fl, *flock;
struct file_lock *fl;
unsigned long break_time;
@@ -1182,24 +1214,13 @@ int __break_lease(struct inode *inode, unsigned int mode)
if ((flock == NULL) || !IS_LEASE(flock))
goto out;
+ if (!locks_conflict(flock, new_fl))
+ goto out;
+
for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
- if (want_write) {
- /* If we want write access, we have to revoke any lease. */
- future = F_UNLCK | F_INPROGRESS;
- } else if (flock->fl_type & F_INPROGRESS) {
- /* If the lease is already being broken, we just leave it */
- future = flock->fl_type;
- } else if (flock->fl_type & F_WRLCK) {
- /* Downgrade the exclusive lease to a read-only lease. */
- future = F_RDLCK | F_INPROGRESS;
- } else {
- /* the existing lease was read-only, so we can read too. */
- goto out;
- }
-
if (IS_ERR(new_fl) && !i_have_this_lease
&& ((mode & O_NONBLOCK) == 0)) {
error = PTR_ERR(new_fl);
@@ -1214,12 +1235,18 @@ int __break_lease(struct inode *inode, unsigned int mode)
}
for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
- if (fl->fl_type != future) {
- fl->fl_type = future;
+ if (want_write) {
+ if (fl->fl_flags & FL_UNLOCK_PENDING)
+ continue;
+ fl->fl_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
- /* lease must have lmops break callback */
- fl->fl_lmops->lm_break(fl);
+ } else {
+ if (lease_breaking(flock))
+ continue;
+ fl->fl_flags |= FL_DOWNGRADE_PENDING;
+ fl->fl_downgrade_time = break_time;
}
+ fl->fl_lmops->lm_break(fl);
}
if (i_have_this_lease || (mode & O_NONBLOCK)) {
@@ -1243,10 +1270,13 @@ restart:
if (error >= 0) {
if (error == 0)
time_out_leases(inode);
- /* Wait for the next lease that has not been broken yet */
+ /*
+ * Wait for the next conflicting lease that has not been
+ * broken yet
+ */
for (flock = inode->i_flock; flock && IS_LEASE(flock);
flock = flock->fl_next) {
- if (flock->fl_type & F_INPROGRESS)
+ if (locks_conflict(new_fl, flock))
goto restart;
}
error = 0;
@@ -1314,7 +1344,7 @@ int fcntl_getlease(struct file *filp)
for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
fl = fl->fl_next) {
if (fl->fl_file == filp) {
- type = fl->fl_type & ~F_INPROGRESS;
+ type = target_leasetype(fl);
break;
}
}
@@ -1322,50 +1352,23 @@ int fcntl_getlease(struct file *filp)
return type;
}
-/**
- * generic_setlease - sets a lease on an open file
- * @filp: file pointer
- * @arg: type of lease to obtain
- * @flp: input - file_lock to use, output - file_lock inserted
- *
- * The (input) flp->fl_lmops->lm_break function is required
- * by break_lease().
- *
- * Called with file_lock_lock held.
- */
-int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
+int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
- int error, rdlease_count = 0, wrlease_count = 0;
+ int error;
lease = *flp;
- error = -EACCES;
- if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
- goto out;
- error = -EINVAL;
- if (!S_ISREG(inode->i_mode))
+ error = -EAGAIN;
+ if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
goto out;
- error = security_file_lock(filp, arg);
- if (error)
+ if ((arg == F_WRLCK)
+ && ((dentry->d_count > 1)
+ || (atomic_read(&inode->i_count) > 1)))
goto out;
- time_out_leases(inode);
-
- BUG_ON(!(*flp)->fl_lmops->lm_break);
-
- if (arg != F_UNLCK) {
- error = -EAGAIN;
- if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
- goto out;
- if ((arg == F_WRLCK)
- && ((dentry->d_count > 1)
- || (atomic_read(&inode->i_count) > 1)))
- goto out;
- }
-
/*
* At this point, we know that if there is an exclusive
* lease on this file, then we hold it on this filp
@@ -1374,27 +1377,28 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
* then the file is not open by anyone (including us)
* except for this filp.
*/
+ error = -EAGAIN;
for (before = &inode->i_flock;
((fl = *before) != NULL) && IS_LEASE(fl);
before = &fl->fl_next) {
- if (fl->fl_file == filp)
+ if (fl->fl_file == filp) {
my_before = before;
- else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
- /*
- * Someone is in the process of opening this
- * file for writing so we may not take an
- * exclusive lease on it.
- */
- wrlease_count++;
- else
- rdlease_count++;
+ continue;
+ }
+ /*
+ * No exclusive leases if someone else has a lease on
+ * this file:
+ */
+ if (arg == F_WRLCK)
+ goto out;
+ /*
+ * Modifying our existing lease is OK, but no getting a
+ * new lease if someone else is opening for write:
+ */
+ if (fl->fl_flags & FL_UNLOCK_PENDING)
+ goto out;
}
- error = -EAGAIN;
- if ((arg == F_RDLCK && (wrlease_count > 0)) ||
- (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
- goto out;
-
if (my_before != NULL) {
error = lease->fl_lmops->lm_change(my_before, arg);
if (!error)
@@ -1402,9 +1406,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
}
- if (arg == F_UNLCK)
- goto out;
-
error = -EINVAL;
if (!leases_enable)
goto out;
@@ -1415,6 +1416,62 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
out:
return error;
}
+
+int generic_delete_lease(struct file *filp, struct file_lock **flp)
+{
+ struct file_lock *fl, **before;
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+
+ for (before = &inode->i_flock;
+ ((fl = *before) != NULL) && IS_LEASE(fl);
+ before = &fl->fl_next) {
+ if (fl->fl_file != filp)
+ continue;
+ return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
+ }
+ return -EAGAIN;
+}
+
+/**
+ * generic_setlease - sets a lease on an open file
+ * @filp: file pointer
+ * @arg: type of lease to obtain
+ * @flp: input - file_lock to use, output - file_lock inserted
+ *
+ * The (input) flp->fl_lmops->lm_break function is required
+ * by break_lease().
+ *
+ * Called with file_lock_lock held.
+ */
+int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
+ return -EACCES;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+ error = security_file_lock(filp, arg);
+ if (error)
+ return error;
+
+ time_out_leases(inode);
+
+ BUG_ON(!(*flp)->fl_lmops->lm_break);
+
+ switch (arg) {
+ case F_UNLCK:
+ return generic_delete_lease(filp, flp);
+ case F_RDLCK:
+ case F_WRLCK:
+ return generic_add_lease(filp, arg, flp);
+ default:
+ BUG();
+ }
+}
EXPORT_SYMBOL(generic_setlease);
static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
@@ -2126,7 +2183,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
}
} else if (IS_LEASE(fl)) {
seq_printf(f, "LEASE ");
- if (fl->fl_type & F_INPROGRESS)
+ if (lease_breaking(fl))
seq_printf(f, "BREAKING ");
else if (fl->fl_file)
seq_printf(f, "ACTIVE ");
@@ -2142,7 +2199,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
seq_printf(f, "%s ",
- (fl->fl_type & F_INPROGRESS)
+ (lease_breaking(fl))
? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
: (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 9561c8fc8bd..281ae95932c 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -176,17 +176,6 @@ retry:
return bio;
}
-static void bl_set_lo_fail(struct pnfs_layout_segment *lseg)
-{
- if (lseg->pls_range.iomode == IOMODE_RW) {
- dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- } else {
- dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
- }
-}
-
/* This is basically copied from mpage_end_io_read */
static void bl_end_io_read(struct bio *bio, int err)
{
@@ -206,7 +195,7 @@ static void bl_end_io_read(struct bio *bio, int err)
if (!uptodate) {
if (!rdata->pnfs_error)
rdata->pnfs_error = -EIO;
- bl_set_lo_fail(rdata->lseg);
+ pnfs_set_lo_fail(rdata->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -303,6 +292,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
bl_end_io_read, par);
if (IS_ERR(bio)) {
rdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
goto out;
}
}
@@ -370,7 +360,7 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
if (!uptodate) {
if (!wdata->pnfs_error)
wdata->pnfs_error = -EIO;
- bl_set_lo_fail(wdata->lseg);
+ pnfs_set_lo_fail(wdata->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -386,7 +376,7 @@ static void bl_end_io_write(struct bio *bio, int err)
if (!uptodate) {
if (!wdata->pnfs_error)
wdata->pnfs_error = -EIO;
- bl_set_lo_fail(wdata->lseg);
+ pnfs_set_lo_fail(wdata->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -543,6 +533,11 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
fill_invalid_ext:
dprintk("%s need to zero %d pages\n", __func__, npg_zero);
for (;npg_zero > 0; npg_zero--) {
+ if (bl_is_sector_init(be->be_inval, isect)) {
+ dprintk("isect %llu already init\n",
+ (unsigned long long)isect);
+ goto next_page;
+ }
/* page ref released in bl_end_io_write_zero */
index = isect >> PAGE_CACHE_SECTOR_SHIFT;
dprintk("%s zero %dth page: index %lu isect %llu\n",
@@ -562,8 +557,7 @@ fill_invalid_ext:
* PageUptodate: It was read before
* sector_initialized: already written out
*/
- if (PageDirty(page) || PageWriteback(page) ||
- bl_is_sector_init(be->be_inval, isect)) {
+ if (PageDirty(page) || PageWriteback(page)) {
print_page(page);
unlock_page(page);
page_cache_release(page);
@@ -592,6 +586,7 @@ fill_invalid_ext:
bl_end_io_write_zero, par);
if (IS_ERR(bio)) {
wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
goto out;
}
/* FIXME: This should be done in bi_end_io */
@@ -640,6 +635,7 @@ next_page:
bl_end_io_write, par);
if (IS_ERR(bio)) {
wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
goto out;
}
isect += PAGE_CACHE_SECTORS;
@@ -805,7 +801,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
struct nfs4_deviceid *d_id)
{
struct pnfs_device *dev;
- struct pnfs_block_dev *rv = NULL;
+ struct pnfs_block_dev *rv;
u32 max_resp_sz;
int max_pages;
struct page **pages = NULL;
@@ -823,18 +819,20 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
dev = kmalloc(sizeof(*dev), GFP_NOFS);
if (!dev) {
dprintk("%s kmalloc failed\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
kfree(dev);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
for (i = 0; i < max_pages; i++) {
pages[i] = alloc_page(GFP_NOFS);
- if (!pages[i])
+ if (!pages[i]) {
+ rv = ERR_PTR(-ENOMEM);
goto out_free;
+ }
}
memcpy(&dev->dev_id, d_id, sizeof(*d_id));
@@ -847,8 +845,10 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
rc = nfs4_proc_getdeviceinfo(server, dev);
dprintk("%s getdevice info returns %d\n", __func__, rc);
- if (rc)
+ if (rc) {
+ rv = ERR_PTR(rc);
goto out_free;
+ }
rv = nfs4_blk_decode_device(server, dev);
out_free:
@@ -866,7 +866,7 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
struct pnfs_devicelist *dlist = NULL;
struct pnfs_block_dev *bdev;
LIST_HEAD(block_disklist);
- int status = 0, i;
+ int status, i;
dprintk("%s enter\n", __func__);
@@ -898,8 +898,8 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
for (i = 0; i < dlist->num_devs; i++) {
bdev = nfs4_blk_get_deviceinfo(server, fh,
&dlist->dev_id[i]);
- if (!bdev) {
- status = -ENODEV;
+ if (IS_ERR(bdev)) {
+ status = PTR_ERR(bdev);
goto out_error;
}
spin_lock(&b_mt_id->bm_lock);
@@ -960,7 +960,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = {
};
static const struct rpc_pipe_ops bl_upcall_ops = {
- .upcall = bl_pipe_upcall,
+ .upcall = rpc_pipe_generic_upcall,
.downcall = bl_pipe_downcall,
.destroy_msg = bl_pipe_destroy_msg,
};
@@ -989,17 +989,20 @@ static int __init nfs4blocklayout_init(void)
mnt,
NFS_PIPE_DIRNAME, 0, &path);
if (ret)
- goto out_remove;
+ goto out_putrpc;
bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
&bl_upcall_ops, 0);
+ path_put(&path);
if (IS_ERR(bl_device_pipe)) {
ret = PTR_ERR(bl_device_pipe);
- goto out_remove;
+ goto out_putrpc;
}
out:
return ret;
+out_putrpc:
+ rpc_put_mount();
out_remove:
pnfs_unregister_layoutdriver(&blocklayout_type);
return ret;
@@ -1012,6 +1015,7 @@ static void __exit nfs4blocklayout_exit(void)
pnfs_unregister_layoutdriver(&blocklayout_type);
rpc_unlink(bl_device_pipe);
+ rpc_put_mount();
}
MODULE_ALIAS("nfs-layouttype4-3");
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index f27d827960a..42acf7ef599 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -150,7 +150,7 @@ BLK_LSEG2EXT(struct pnfs_layout_segment *lseg)
}
struct bl_dev_msg {
- int status;
+ int32_t status;
uint32_t major, minor;
};
@@ -169,8 +169,6 @@ extern wait_queue_head_t bl_wq;
#define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */
/* blocklayoutdev.c */
-ssize_t bl_pipe_upcall(struct file *, struct rpc_pipe_msg *,
- char __user *, size_t);
ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
struct block_device *nfs4_blkdev_get(dev_t dev);
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index a83b393fb01..d08ba9107fd 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -79,28 +79,6 @@ int nfs4_blkdev_put(struct block_device *bdev)
return blkdev_put(bdev, FMODE_READ);
}
-/*
- * Shouldn't there be a rpc_generic_upcall() to do this for us?
- */
-ssize_t bl_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char __user *dst, size_t buflen)
-{
- char *data = (char *)msg->data + msg->copied;
- size_t mlen = min(msg->len - msg->copied, buflen);
- unsigned long left;
-
- left = copy_to_user(dst, data, mlen);
- if (left == mlen) {
- msg->errno = -EFAULT;
- return -EFAULT;
- }
-
- mlen -= left;
- msg->copied += mlen;
- msg->errno = 0;
- return mlen;
-}
-
static struct bl_dev_msg bl_mount_reply;
ssize_t bl_pipe_downcall(struct file *filp, const char __user *src,
@@ -131,7 +109,7 @@ struct pnfs_block_dev *
nfs4_blk_decode_device(struct nfs_server *server,
struct pnfs_device *dev)
{
- struct pnfs_block_dev *rv = NULL;
+ struct pnfs_block_dev *rv;
struct block_device *bd = NULL;
struct rpc_pipe_msg msg;
struct bl_msg_hdr bl_msg = {
@@ -141,7 +119,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
uint8_t *dataptr;
DECLARE_WAITQUEUE(wq, current);
struct bl_dev_msg *reply = &bl_mount_reply;
- int offset, len, i;
+ int offset, len, i, rc;
dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data,
@@ -168,8 +146,10 @@ nfs4_blk_decode_device(struct nfs_server *server,
dprintk("%s CALLING USERSPACE DAEMON\n", __func__);
add_wait_queue(&bl_wq, &wq);
- if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
+ rc = rpc_queue_upcall(bl_device_pipe->d_inode, &msg);
+ if (rc < 0) {
remove_wait_queue(&bl_wq, &wq);
+ rv = ERR_PTR(rc);
goto out;
}
@@ -187,8 +167,9 @@ nfs4_blk_decode_device(struct nfs_server *server,
bd = nfs4_blkdev_get(MKDEV(reply->major, reply->minor));
if (IS_ERR(bd)) {
- dprintk("%s failed to open device : %ld\n",
- __func__, PTR_ERR(bd));
+ rc = PTR_ERR(bd);
+ dprintk("%s failed to open device : %d\n", __func__, rc);
+ rv = ERR_PTR(rc);
goto out;
}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index e3d29426905..516f3375e06 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -125,7 +125,7 @@ nfs4_callback_up(struct svc_serv *serv)
else
goto out_err;
- return svc_prepare_thread(serv, &serv->sv_pools[0]);
+ return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
out_err:
if (ret == 0)
@@ -199,7 +199,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
init_waitqueue_head(&serv->sv_cb_waitq);
- rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(rqstp)) {
svc_xprt_put(serv->sv_bc_xprt);
serv->sv_bc_xprt = NULL;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5833fbbf59b..873bf00d51a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -336,11 +336,12 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2;
- if (ipv6_addr_scope(&sin1->sin6_addr) == IPV6_ADDR_SCOPE_LINKLOCAL &&
- sin1->sin6_scope_id != sin2->sin6_scope_id)
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
return 0;
+ else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ return sin1->sin6_scope_id == sin2->sin6_scope_id;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
+ return 1;
}
#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
@@ -1867,6 +1868,10 @@ static int nfs_server_list_show(struct seq_file *m, void *v)
/* display one transport per line on subsequent lines */
clp = list_entry(v, struct nfs_client, cl_share_link);
+ /* Check if the client is initialized */
+ if (clp->cl_cons_state != NFS_CS_READY)
+ return 0;
+
seq_printf(m, "v%u %s %s %3d %s\n",
clp->rpc_ops->version,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 321a66bc384..7f265406980 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -240,7 +240,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
sizeof(delegation->stateid.data));
delegation->type = res->delegation_type;
delegation->maxsize = res->maxsize;
- delegation->change_attr = nfsi->change_attr;
+ delegation->change_attr = inode->i_version;
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 5b1006480bc..7cf2c4699b0 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -212,7 +212,7 @@ static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = nfsi->change_attr;
+ auxdata.change_attr = nfsi->vfs_inode.i_version;
if (bufmax > sizeof(auxdata))
bufmax = sizeof(auxdata);
@@ -244,7 +244,7 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = nfsi->change_attr;
+ auxdata.change_attr = nfsi->vfs_inode.i_version;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index f20801ae0a1..47d1c6ff2d8 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -336,8 +336,6 @@ struct idmap {
struct idmap_hashtable idmap_group_hash;
};
-static ssize_t idmap_pipe_upcall(struct file *, struct rpc_pipe_msg *,
- char __user *, size_t);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
@@ -345,7 +343,7 @@ static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static unsigned int fnvhash32(const void *, size_t);
static const struct rpc_pipe_ops idmap_upcall_ops = {
- .upcall = idmap_pipe_upcall,
+ .upcall = rpc_pipe_generic_upcall,
.downcall = idmap_pipe_downcall,
.destroy_msg = idmap_pipe_destroy_msg,
};
@@ -595,27 +593,6 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
return ret;
}
-/* RPC pipefs upcall/downcall routines */
-static ssize_t
-idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char __user *dst, size_t buflen)
-{
- char *data = (char *)msg->data + msg->copied;
- size_t mlen = min(msg->len, buflen);
- unsigned long left;
-
- left = copy_to_user(dst, data, mlen);
- if (left == mlen) {
- msg->errno = -EFAULT;
- return -EFAULT;
- }
-
- mlen -= left;
- msg->copied += mlen;
- msg->errno = 0;
- return mlen;
-}
-
static ssize_t
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index fe1203797b2..4dc6d078f10 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -318,7 +318,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
memset(&inode->i_atime, 0, sizeof(inode->i_atime));
memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
- nfsi->change_attr = 0;
+ inode->i_version = 0;
inode->i_size = 0;
inode->i_nlink = 0;
inode->i_uid = -2;
@@ -344,7 +344,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
- nfsi->change_attr = fattr->change_attr;
+ inode->i_version = fattr->change_attr;
else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_DATA;
@@ -897,8 +897,8 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
- && nfsi->change_attr == fattr->pre_change_attr) {
- nfsi->change_attr = fattr->change_attr;
+ && inode->i_version == fattr->pre_change_attr) {
+ inode->i_version = fattr->change_attr;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
ret |= NFS_INO_INVALID_ATTR;
@@ -952,7 +952,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
return -EIO;
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
- nfsi->change_attr != fattr->change_attr)
+ inode->i_version != fattr->change_attr)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
/* Verify a few of the more important attributes */
@@ -1163,7 +1163,7 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
}
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
- fattr->pre_change_attr = NFS_I(inode)->change_attr;
+ fattr->pre_change_attr = inode->i_version;
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
@@ -1244,13 +1244,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
- if (nfsi->change_attr != fattr->change_attr) {
+ if (inode->i_version != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
- nfsi->change_attr = fattr->change_attr;
+ inode->i_version = fattr->change_attr;
}
} else if (server->caps & NFS_CAP_CHANGE_ATTR)
invalid |= save_cache_validity;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index ab12913dd47..c1a1bd8ddf1 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -457,13 +457,3 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
-/*
- * Helper for restarting RPC calls in the possible presence of NFSv4.1
- * sessions.
- */
-static inline int nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
-{
- if (nfs4_has_session(clp))
- return rpc_restart_call_prepare(task);
- return rpc_restart_call(task);
-}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3e93e9a1bee..693ae22f873 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -13,30 +13,6 @@
struct idmap;
-/*
- * In a seqid-mutating op, this macro controls which error return
- * values trigger incrementation of the seqid.
- *
- * from rfc 3010:
- * The client MUST monotonically increment the sequence number for the
- * CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE
- * operations. This is true even in the event that the previous
- * operation that used the sequence number received an error. The only
- * exception to this rule is if the previous operation received one of
- * the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,
- * NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,
- * NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.
- *
- */
-#define seqid_mutating_err(err) \
-(((err) != NFSERR_STALE_CLIENTID) && \
- ((err) != NFSERR_STALE_STATEID) && \
- ((err) != NFSERR_BAD_STATEID) && \
- ((err) != NFSERR_BAD_SEQID) && \
- ((err) != NFSERR_BAD_XDR) && \
- ((err) != NFSERR_RESOURCE) && \
- ((err) != NFSERR_NOFILEHANDLE))
-
enum nfs4_client_state {
NFS4CLNT_MANAGER_RUNNING = 0,
NFS4CLNT_CHECK_LEASE,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index e8915d4840a..09119418402 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -77,19 +77,6 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
BUG();
}
-/* For data server errors we don't recover from */
-static void
-filelayout_set_lo_fail(struct pnfs_layout_segment *lseg)
-{
- if (lseg->pls_range.iomode == IOMODE_RW) {
- dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- } else {
- dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
- }
-}
-
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
@@ -135,7 +122,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
static int filelayout_read_done_cb(struct rpc_task *task,
struct nfs_read_data *data)
{
- struct nfs_client *clp = data->ds_clp;
int reset = 0;
dprintk("%s DS read\n", __func__);
@@ -145,11 +131,10 @@ static int filelayout_read_done_cb(struct rpc_task *task,
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
- filelayout_set_lo_fail(data->lseg);
+ pnfs_set_lo_fail(data->lseg);
nfs4_reset_read(task, data);
- clp = NFS_SERVER(data->inode)->nfs_client;
}
- nfs_restart_rpc(task, clp);
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -216,17 +201,13 @@ static int filelayout_write_done_cb(struct rpc_task *task,
if (filelayout_async_handle_error(task, data->args.context->state,
data->ds_clp, &reset) == -EAGAIN) {
- struct nfs_client *clp;
-
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
- filelayout_set_lo_fail(data->lseg);
+ pnfs_set_lo_fail(data->lseg);
nfs4_reset_write(task, data);
- clp = NFS_SERVER(data->inode)->nfs_client;
- } else
- clp = data->ds_clp;
- nfs_restart_rpc(task, clp);
+ }
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -256,9 +237,9 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
prepare_to_resend_writes(data);
- filelayout_set_lo_fail(data->lseg);
+ pnfs_set_lo_fail(data->lseg);
} else
- nfs_restart_rpc(task, data->ds_clp);
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4700fae1ada..d2ae413c986 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -73,9 +73,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
-static int _nfs4_proc_lookup(struct rpc_clnt *client, struct inode *dir,
- const struct qstr *name, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
@@ -753,9 +750,9 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
- if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
+ if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
- nfsi->change_attr = cinfo->after;
+ dir->i_version = cinfo->after;
spin_unlock(&dir->i_lock);
}
@@ -1596,8 +1593,14 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
int status;
status = nfs4_run_open_task(data, 0);
- if (status != 0 || !data->rpc_done)
+ if (!data->rpc_done)
+ return status;
+ if (status != 0) {
+ if (status == -NFS4ERR_BADNAME &&
+ !(o_arg->open_flags & O_CREAT))
+ return -ENOENT;
return status;
+ }
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
@@ -2408,14 +2411,15 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
return status;
}
-static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
- const struct nfs_fh *dirfh, const struct qstr *name,
- struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
+ const struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
{
+ struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
- .dir_fh = dirfh,
+ .dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_lookup_res res = {
@@ -2431,40 +2435,8 @@ static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
nfs_fattr_init(fattr);
- dprintk("NFS call lookupfh %s\n", name->name);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
- dprintk("NFS reply lookupfh: %d\n", status);
- return status;
-}
-
-static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
- struct qstr *name, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
-{
- struct nfs4_exception exception = { };
- int err;
- do {
- err = _nfs4_proc_lookupfh(server->client, server, dirfh, name, fhandle, fattr);
- /* FIXME: !!!! */
- if (err == -NFS4ERR_MOVED) {
- err = -EREMOTE;
- break;
- }
- err = nfs4_handle_exception(server, err, &exception);
- } while (exception.retry);
- return err;
-}
-
-static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
- const struct qstr *name, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
-{
- int status;
-
dprintk("NFS call lookup %s\n", name->name);
- status = _nfs4_proc_lookupfh(clnt, NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
- if (status == -NFS4ERR_MOVED)
- status = nfs4_get_referral(dir, name, fattr, fhandle);
+ status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
@@ -2485,11 +2457,20 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst
struct nfs4_exception exception = { };
int err;
do {
- err = nfs4_handle_exception(NFS_SERVER(dir),
- _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr),
- &exception);
- if (err == -EPERM)
+ int status;
+
+ status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
+ switch (status) {
+ case -NFS4ERR_BADNAME:
+ return -ENOENT;
+ case -NFS4ERR_MOVED:
+ err = nfs4_get_referral(dir, name, fattr, fhandle);
+ break;
+ case -NFS4ERR_WRONGSEC:
nfs_fixup_secinfo_attributes(fattr, fhandle);
+ }
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ status, &exception);
} while (exception.retry);
return err;
}
@@ -3210,7 +3191,7 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
struct nfs_server *server = NFS_SERVER(data->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- nfs_restart_rpc(task, server->nfs_client);
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -3260,7 +3241,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -3317,7 +3298,7 @@ static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *dat
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
nfs_refresh_inode(inode, data->res.fattr);
@@ -3857,7 +3838,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
- nfs_restart_rpc(task, data->res.server->nfs_client);
+ rpc_restart_call_prepare(task);
return;
}
}
@@ -4111,8 +4092,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
- nfs_restart_rpc(task,
- calldata->server->nfs_client);
+ rpc_restart_call_prepare(task);
}
}
@@ -4945,7 +4925,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
task->tk_status = 0;
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
- nfs_restart_rpc(task, data->clp);
+ rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
@@ -5786,7 +5766,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
server = NFS_SERVER(lrp->args.inode);
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
- nfs_restart_rpc(task, lrp->clp);
+ rpc_restart_call_prepare(task);
return;
}
spin_lock(&lo->plh_inode->i_lock);
@@ -5957,7 +5937,7 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
}
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
- nfs_restart_rpc(task, server->nfs_client);
+ rpc_restart_call_prepare(task);
return;
}
@@ -6270,7 +6250,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
- .lookupfh = nfs4_proc_lookupfh,
.lookup = nfs4_proc_lookup,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index e550e8836c3..ee73d9a4f70 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1168,23 +1168,17 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
/*
* Called by non rpc-based layout drivers
*/
-int
-pnfs_ld_write_done(struct nfs_write_data *data)
+void pnfs_ld_write_done(struct nfs_write_data *data)
{
- int status;
-
- if (!data->pnfs_error) {
+ if (likely(!data->pnfs_error)) {
pnfs_set_layoutcommit(data);
data->mds_ops->rpc_call_done(&data->task, data);
- data->mds_ops->rpc_release(data);
- return 0;
+ } else {
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
}
-
- dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
- data->pnfs_error);
- status = nfs_initiate_write(data, NFS_CLIENT(data->inode),
- data->mds_ops, NFS_FILE_SYNC);
- return status ? : -EAGAIN;
+ data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
@@ -1268,23 +1262,17 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
/*
* Called by non rpc-based layout drivers
*/
-int
-pnfs_ld_read_done(struct nfs_read_data *data)
+void pnfs_ld_read_done(struct nfs_read_data *data)
{
- int status;
-
- if (!data->pnfs_error) {
+ if (likely(!data->pnfs_error)) {
__nfs4_read_done_cb(data);
data->mds_ops->rpc_call_done(&data->task, data);
- data->mds_ops->rpc_release(data);
- return 0;
+ } else {
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
}
-
- dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
- data->pnfs_error);
- status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
- data->mds_ops);
- return status ? : -EAGAIN;
+ data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
@@ -1381,6 +1369,18 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
}
}
+void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
+{
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ } else {
+ dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ }
+}
+EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
+
void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 01cbfd54f3c..1509530cb11 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -178,6 +178,7 @@ int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *, struct nfs_page *);
int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
+void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg);
int pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
@@ -200,8 +201,8 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
int _pnfs_return_layout(struct inode *);
-int pnfs_ld_write_done(struct nfs_write_data *);
-int pnfs_ld_read_done(struct nfs_read_data *);
+void pnfs_ld_write_done(struct nfs_write_data *);
+void pnfs_ld_read_done(struct nfs_read_data *);
struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
loff_t pos,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 2171c043ab0..8b48ec63f72 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -35,16 +35,13 @@ static const struct rpc_call_ops nfs_read_partial_ops;
static const struct rpc_call_ops nfs_read_full_ops;
static struct kmem_cache *nfs_rdata_cachep;
-static mempool_t *nfs_rdata_mempool;
-
-#define MIN_POOL_READ (32)
struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
{
- struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_KERNEL);
+ struct nfs_read_data *p;
+ p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
if (p) {
- memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
if (pagecount <= ARRAY_SIZE(p->page_array))
@@ -52,7 +49,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
else {
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
if (!p->pagevec) {
- mempool_free(p, nfs_rdata_mempool);
+ kmem_cache_free(nfs_rdata_cachep, p);
p = NULL;
}
}
@@ -64,7 +61,7 @@ void nfs_readdata_free(struct nfs_read_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
- mempool_free(p, nfs_rdata_mempool);
+ kmem_cache_free(nfs_rdata_cachep, p);
}
void nfs_readdata_release(struct nfs_read_data *rdata)
@@ -276,7 +273,6 @@ nfs_async_read_error(struct list_head *head)
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- SetPageError(req->wb_page);
nfs_readpage_release(req);
}
}
@@ -322,7 +318,6 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head
offset += len;
} while(nbytes != 0);
atomic_set(&req->wb_complete, requests);
- ClearPageError(page);
desc->pg_rpc_callops = &nfs_read_partial_ops;
return ret;
out_bad:
@@ -331,7 +326,6 @@ out_bad:
list_del(&data->list);
nfs_readdata_free(data);
}
- SetPageError(page);
nfs_readpage_release(req);
return -ENOMEM;
}
@@ -357,7 +351,6 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
- ClearPageError(req->wb_page);
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
@@ -435,7 +428,7 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
+ rpc_restart_call_prepare(task);
}
/*
@@ -462,10 +455,10 @@ static void nfs_readpage_release_partial(void *calldata)
int status = data->task.tk_status;
if (status < 0)
- SetPageError(page);
+ set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
if (atomic_dec_and_test(&req->wb_complete)) {
- if (!PageError(page))
+ if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
SetPageUptodate(page);
nfs_readpage_release(req);
}
@@ -541,13 +534,23 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
static void nfs_readpage_release_full(void *calldata)
{
struct nfs_read_data *data = calldata;
+ struct nfs_pageio_descriptor pgio;
+ if (data->pnfs_error) {
+ nfs_pageio_init_read_mds(&pgio, data->inode);
+ pgio.pg_recoalesce = 1;
+ }
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ if (!data->pnfs_error)
+ nfs_readpage_release(req);
+ else
+ nfs_pageio_add_request(&pgio, req);
}
+ if (data->pnfs_error)
+ nfs_pageio_complete(&pgio);
nfs_readdata_release(calldata);
}
@@ -648,7 +651,6 @@ readpage_async_filler(void *data, struct page *page)
return 0;
out_error:
error = PTR_ERR(new);
- SetPageError(page);
out_unlock:
unlock_page(page);
return error;
@@ -711,16 +713,10 @@ int __init nfs_init_readpagecache(void)
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
- nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
- nfs_rdata_cachep);
- if (nfs_rdata_mempool == NULL)
- return -ENOMEM;
-
return 0;
}
void nfs_destroy_readpagecache(void)
{
- mempool_destroy(nfs_rdata_mempool);
kmem_cache_destroy(nfs_rdata_cachep);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 5b19b6aabe1..480b3b6bf71 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -733,18 +733,22 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
return 0;
}
+
+#ifdef CONFIG_NFS_V4
#ifdef CONFIG_NFS_V4_1
-void show_sessions(struct seq_file *m, struct nfs_server *server)
+static void show_sessions(struct seq_file *m, struct nfs_server *server)
{
if (nfs4_has_session(server->nfs_client))
seq_printf(m, ",sessions");
}
#else
-void show_sessions(struct seq_file *m, struct nfs_server *server) {}
+static void show_sessions(struct seq_file *m, struct nfs_server *server) {}
+#endif
#endif
+#ifdef CONFIG_NFS_V4
#ifdef CONFIG_NFS_V4_1
-void show_pnfs(struct seq_file *m, struct nfs_server *server)
+static void show_pnfs(struct seq_file *m, struct nfs_server *server)
{
seq_printf(m, ",pnfs=");
if (server->pnfs_curr_ld)
@@ -752,9 +756,10 @@ void show_pnfs(struct seq_file *m, struct nfs_server *server)
else
seq_printf(m, "not configured");
}
-#else /* CONFIG_NFS_V4_1 */
-void show_pnfs(struct seq_file *m, struct nfs_server *server) {}
-#endif /* CONFIG_NFS_V4_1 */
+#else
+static void show_pnfs(struct seq_file *m, struct nfs_server *server) {}
+#endif
+#endif
static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
{
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index b2fbbde58e4..4f9319a2e56 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -87,7 +87,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- nfs_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
+ rpc_restart_call_prepare(task);
}
/**
@@ -369,7 +369,7 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
struct dentry *new_dentry = data->new_dentry;
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
- nfs_restart_rpc(task, NFS_SERVER(old_dir)->nfs_client);
+ rpc_restart_call_prepare(task);
return;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c9bd2a6b7d4..2219c88d96b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -390,7 +390,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error);
if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
- nfsi->change_attr++;
+ inode->i_version++;
set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
@@ -428,7 +428,6 @@ static void
nfs_mark_request_dirty(struct nfs_page *req)
{
__set_page_dirty_nobuffers(req->wb_page);
- __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -762,6 +761,8 @@ int nfs_updatepage(struct file *file, struct page *page,
status = nfs_writepage_setup(ctx, page, offset, count);
if (status < 0)
nfs_set_pageerror(page);
+ else
+ __set_page_dirty_nobuffers(page);
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
@@ -1010,7 +1011,6 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *r
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
- ClearPageError(req->wb_page);
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
@@ -1165,7 +1165,13 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
static void nfs_writeback_release_full(void *calldata)
{
struct nfs_write_data *data = calldata;
- int status = data->task.tk_status;
+ int ret, status = data->task.tk_status;
+ struct nfs_pageio_descriptor pgio;
+
+ if (data->pnfs_error) {
+ nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
+ pgio.pg_recoalesce = 1;
+ }
/* Update attributes as result of writeback. */
while (!list_empty(&data->pages)) {
@@ -1181,6 +1187,11 @@ static void nfs_writeback_release_full(void *calldata)
req->wb_bytes,
(long long)req_offset(req));
+ if (data->pnfs_error) {
+ dprintk(", pnfs error = %d\n", data->pnfs_error);
+ goto next;
+ }
+
if (status < 0) {
nfs_set_pageerror(page);
nfs_context_set_write_error(req->wb_context, status);
@@ -1200,7 +1211,19 @@ remove_request:
next:
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
+ if (data->pnfs_error) {
+ lock_page(page);
+ nfs_pageio_cond_complete(&pgio, page->index);
+ ret = nfs_page_async_flush(&pgio, page, 0);
+ if (ret) {
+ nfs_set_pageerror(page);
+ dprintk("rewrite to MDS error = %d\n", ret);
+ }
+ unlock_page(page);
+ }
}
+ if (data->pnfs_error)
+ nfs_pageio_complete(&pgio);
nfs_writedata_release(calldata);
}
@@ -1281,7 +1304,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
- nfs_restart_rpc(task, server->nfs_client);
+ rpc_restart_call_prepare(task);
return;
}
if (time_before(complain, jiffies)) {
@@ -1553,6 +1576,10 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int flags = FLUSH_SYNC;
int ret = 0;
+ /* no commits means nothing needs to be done */
+ if (!nfsi->ncommit)
+ return ret;
+
if (wbc->sync_mode == WB_SYNC_NONE) {
/* Don't commit yet if this is a non-blocking flush and there
* are a lot of outstanding writes for this mapping.
@@ -1686,34 +1713,20 @@ out_error:
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page)
{
- struct nfs_page *req;
- int ret;
+ /*
+ * If PagePrivate is set, then the page is currently associated with
+ * an in-progress read or write request. Don't try to migrate it.
+ *
+ * FIXME: we could do this in principle, but we'll need a way to ensure
+ * that we can safely release the inode reference while holding
+ * the page lock.
+ */
+ if (PagePrivate(page))
+ return -EBUSY;
nfs_fscache_release_page(page, GFP_KERNEL);
- req = nfs_find_and_lock_request(page, false);
- ret = PTR_ERR(req);
- if (IS_ERR(req))
- goto out;
-
- ret = migrate_page(mapping, newpage, page);
- if (!req)
- goto out;
- if (ret)
- goto out_unlock;
- page_cache_get(newpage);
- spin_lock(&mapping->host->i_lock);
- req->wb_page = newpage;
- SetPagePrivate(newpage);
- set_page_private(newpage, (unsigned long)req);
- ClearPagePrivate(page);
- set_page_private(page, 0);
- spin_unlock(&mapping->host->i_lock);
- page_cache_release(page);
-out_unlock:
- nfs_clear_page_tag_locked(req);
-out:
- return ret;
+ return migrate_page(mapping, newpage, page);
}
#endif
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index f4cc1e2bfc5..62f3b9074e8 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/exportfs.h>
-#include <linux/nfsd/syscall.h>
#include <net/ipv6.h>
#include "nfsd.h"
@@ -318,7 +317,6 @@ static void svc_export_put(struct kref *ref)
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
- kfree(exp->ex_pathname);
nfsd4_fslocs_free(&exp->ex_fslocs);
kfree(exp);
}
@@ -528,11 +526,6 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_client = dom;
- err = -ENOMEM;
- exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
- if (!exp.ex_pathname)
- goto out2;
-
/* expiry */
err = -EINVAL;
exp.h.expiry_time = get_expiry(&mesg);
@@ -613,8 +606,6 @@ out4:
nfsd4_fslocs_free(&exp.ex_fslocs);
kfree(exp.ex_uuid);
out3:
- kfree(exp.ex_pathname);
-out2:
path_put(&exp.ex_path);
out1:
auth_domain_put(dom);
@@ -678,7 +669,6 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_client = item->ex_client;
new->ex_path.dentry = dget(item->ex_path.dentry);
new->ex_path.mnt = mntget(item->ex_path.mnt);
- new->ex_pathname = NULL;
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
@@ -696,8 +686,6 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
new->ex_fsid = item->ex_fsid;
new->ex_uuid = item->ex_uuid;
item->ex_uuid = NULL;
- new->ex_pathname = item->ex_pathname;
- item->ex_pathname = NULL;
new->ex_fslocs.locations = item->ex_fslocs.locations;
item->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = item->ex_fslocs.locations_count;
@@ -1010,7 +998,7 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
return exp;
}
-static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
+struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp)
{
u32 fsidv[2];
@@ -1030,7 +1018,7 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
struct svc_export *exp;
__be32 rv;
- exp = find_fsidzero_export(rqstp);
+ exp = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 02eb4edf0ec..7748d6a18d9 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -39,6 +39,8 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
+
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
@@ -351,7 +353,7 @@ static void encode_cb_recall4args(struct xdr_stream *xdr,
__be32 *p;
encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
- encode_stateid4(xdr, &dp->dl_stateid);
+ encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
p = xdr_reserve_space(xdr, 4);
*p++ = xdr_zero; /* truncate */
@@ -460,6 +462,8 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
*/
status = 0;
out:
+ if (status)
+ nfsd4_mark_cb_fault(cb->cb_clp, status);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -686,6 +690,12 @@ static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
warn_no_callback_path(clp, reason);
}
+static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
+{
+ clp->cl_cb_state = NFSD4_CB_FAULT;
+ warn_no_callback_path(clp, reason);
+}
+
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
@@ -787,7 +797,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
- struct nfs4_client *clp = dp->dl_client;
+ struct nfs4_client *clp = dp->dl_stid.sc_client;
u32 minorversion = clp->cl_minorversion;
cb->cb_minorversion = minorversion;
@@ -809,7 +819,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
- struct nfs4_client *clp = dp->dl_client;
+ struct nfs4_client *clp = dp->dl_stid.sc_client;
dprintk("%s: minorversion=%d\n", __func__,
clp->cl_minorversion);
@@ -832,7 +842,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
- struct nfs4_client *clp = dp->dl_client;
+ struct nfs4_client *clp = dp->dl_stid.sc_client;
struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
nfsd4_cb_done(task, calldata);
@@ -1006,7 +1016,7 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
void nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfsd4_callback *cb = &dp->dl_recall;
- struct nfs4_client *clp = dp->dl_client;
+ struct nfs4_client *clp = dp->dl_stid.sc_client;
dp->dl_retries = 1;
cb->cb_op = dp;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index e8077766661..fa383361bc6 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -35,6 +35,7 @@
#include <linux/file.h>
#include <linux/slab.h>
+#include "idmap.h"
#include "cache.h"
#include "xdr4.h"
#include "vfs.h"
@@ -156,6 +157,8 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
+ accmode |= NFSD_MAY_READ_IF_EXEC;
+
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode |= NFSD_MAY_READ;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
@@ -168,12 +171,29 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
return status;
}
+static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
+{
+ umode_t mode = fh->fh_dentry->d_inode->i_mode;
+
+ if (S_ISREG(mode))
+ return nfs_ok;
+ if (S_ISDIR(mode))
+ return nfserr_isdir;
+ /*
+ * Using err_symlink as our catch-all case may look odd; but
+ * there's no other obvious error for this case in 4.0, and we
+ * happen to know that it will cause the linux v4 client to do
+ * the right thing on attempts to open something other than a
+ * regular file.
+ */
+ return nfserr_symlink;
+}
+
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct svc_fh resfh;
__be32 status;
- int created = 0;
fh_init(&resfh, NFS4_FHSIZE);
open->op_truncate = 0;
@@ -202,7 +222,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
open->op_fname.len, &open->op_iattr,
&resfh, open->op_createmode,
(u32 *)open->op_verf.data,
- &open->op_truncate, &created);
+ &open->op_truncate, &open->op_created);
/*
* Following rfc 3530 14.2.16, use the returned bitmask
@@ -216,6 +236,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
status = nfsd_lookup(rqstp, current_fh,
open->op_fname.data, open->op_fname.len, &resfh);
fh_unlock(current_fh);
+ if (status)
+ goto out;
+ status = nfsd_check_obj_isreg(&resfh);
}
if (status)
goto out;
@@ -227,9 +250,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
fh_dup2(current_fh, &resfh);
/* set reply cache */
- fh_copy_shallow(&open->op_stateowner->so_replay.rp_openfh,
+ fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&resfh.fh_handle);
- if (!created)
+ if (!open->op_created)
status = do_open_permission(rqstp, current_fh, open,
NFSD_MAY_NOP);
@@ -254,7 +277,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
/* set replay cache */
- fh_copy_shallow(&open->op_stateowner->so_replay.rp_openfh,
+ fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&current_fh->fh_handle);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
@@ -283,14 +306,18 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_compoundres *resp;
- dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n",
+ dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
- open->op_stateowner);
+ open->op_openowner);
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
+ /* We don't yet support WANT bits: */
+ open->op_share_access &= NFS4_SHARE_ACCESS_MASK;
+
+ open->op_created = 0;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
@@ -309,7 +336,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
resp = rqstp->rq_resp;
status = nfsd4_process_open1(&resp->cstate, open);
if (status == nfserr_replay_me) {
- struct nfs4_replay *rp = &open->op_stateowner->so_replay;
+ struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
fh_put(&cstate->current_fh);
fh_copy_shallow(&cstate->current_fh.fh_handle,
&rp->rp_openfh);
@@ -339,32 +366,23 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_NULL:
- /*
- * (1) set CURRENT_FH to the file being opened,
- * creating it if necessary, (2) set open->op_cinfo,
- * (3) set open->op_truncate if the file is to be
- * truncated after opening, (4) do permission checking.
- */
status = do_open_lookup(rqstp, &cstate->current_fh,
open);
if (status)
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
- open->op_stateowner->so_confirmed = 1;
- /*
- * The CURRENT_FH is already set to the file being
- * opened. (1) set open->op_cinfo, (2) set
- * open->op_truncate if the file is to be truncated
- * after opening, (3) do permission checking.
- */
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
+ case NFS4_OPEN_CLAIM_FH:
+ case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, &cstate->current_fh,
open);
if (status)
goto out;
break;
+ case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
- open->op_stateowner->so_confirmed = 1;
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
dprintk("NFSD: unsupported OPEN claim type %d\n",
open->op_claim_type);
status = nfserr_notsupp;
@@ -381,12 +399,13 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* set, (2) sets open->op_stateid, (3) sets open->op_delegation.
*/
status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
+ WARN_ON(status && open->op_created);
out:
- if (open->op_stateowner) {
- nfs4_get_stateowner(open->op_stateowner);
- cstate->replay_owner = open->op_stateowner;
- }
- nfs4_unlock_state();
+ nfsd4_cleanup_open_state(open, status);
+ if (open->op_openowner)
+ cstate->replay_owner = &open->op_openowner->oo_owner;
+ else
+ nfs4_unlock_state();
return status;
}
@@ -467,17 +486,12 @@ static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
- __be32 status;
-
u32 *p = (u32 *)commit->co_verf.data;
*p++ = nfssvc_boot.tv_sec;
*p++ = nfssvc_boot.tv_usec;
- status = nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
+ return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
- if (status == nfserr_symlink)
- status = nfserr_inval;
- return status;
}
static __be32
@@ -492,8 +506,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR,
NFSD_MAY_CREATE);
- if (status == nfserr_symlink)
- status = nfserr_notdir;
if (status)
return status;
@@ -691,7 +703,7 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
readdir->rd_bmval[1] &= nfsd_suppattrs1(cstate->minorversion);
readdir->rd_bmval[2] &= nfsd_suppattrs2(cstate->minorversion);
- if ((cookie > ~(u32)0) || (cookie == 1) || (cookie == 2) ||
+ if ((cookie == 1) || (cookie == 2) ||
(cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
return nfserr_bad_cookie;
@@ -719,8 +731,6 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
- if (status == nfserr_symlink)
- return nfserr_notdir;
if (!status) {
fh_unlock(&cstate->current_fh);
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
@@ -751,8 +761,6 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
(S_ISDIR(cstate->save_fh.fh_dentry->d_inode->i_mode) &&
S_ISDIR(cstate->current_fh.fh_dentry->d_inode->i_mode)))
status = nfserr_exist;
- else if (status == nfserr_symlink)
- status = nfserr_notdir;
if (!status) {
set_change_info(&rename->rn_sinfo, &cstate->current_fh);
@@ -892,8 +900,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
write->wr_bytes_written = cnt;
- if (status == nfserr_symlink)
- status = nfserr_inval;
return status;
}
@@ -930,7 +936,7 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
count = 4 + (verify->ve_attrlen >> 2);
buf = kmalloc(count << 2, GFP_KERNEL);
if (!buf)
- return nfserr_resource;
+ return nfserr_jukebox;
status = nfsd4_encode_fattr(&cstate->current_fh,
cstate->current_fh.fh_export,
@@ -994,6 +1000,8 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
+typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
+
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
@@ -1001,13 +1009,15 @@ enum nfsd4_op_flags {
/* For rfc 5661 section 2.6.3.1.1: */
OP_HANDLES_WRONGSEC = 1 << 3,
OP_IS_PUTFH_LIKE = 1 << 4,
-};
-
-struct nfsd4_operation {
- nfsd4op_func op_func;
- u32 op_flags;
- char *op_name;
/*
+ * These are the ops whose result size we estimate before
+ * encoding, to avoid performing an op then not being able to
+ * respond or cache a response. This includes writes and setattrs
+ * as well as the operations usually called "nonidempotent":
+ */
+ OP_MODIFIES_SOMETHING = 1 << 5,
+ /*
+ * Cache compounds containing these ops in the xid-based drc:
* We use the DRC for compounds containing non-idempotent
* operations, *except* those that are 4.1-specific (since
* sessions provide their own EOS), and except for stateful
@@ -1015,7 +1025,15 @@ struct nfsd4_operation {
* (since sequence numbers provide EOS for open, lock, etc in
* the v4.0 case).
*/
- bool op_cacheresult;
+ OP_CACHEME = 1 << 6,
+};
+
+struct nfsd4_operation {
+ nfsd4op_func op_func;
+ u32 op_flags;
+ char *op_name;
+ /* Try to get response size before operation */
+ nfsd4op_rsize op_rsize_bop;
};
static struct nfsd4_operation nfsd4_ops[];
@@ -1062,7 +1080,7 @@ static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
bool nfsd4_cache_this_op(struct nfsd4_op *op)
{
- return OPDESC(op)->op_cacheresult;
+ return OPDESC(op)->op_flags & OP_CACHEME;
}
static bool need_wrongsec_check(struct svc_rqst *rqstp)
@@ -1110,6 +1128,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
struct nfsd4_operation *opdesc;
struct nfsd4_compound_state *cstate = &resp->cstate;
int slack_bytes;
+ u32 plen = 0;
__be32 status;
resp->xbuf = &rqstp->rq_res;
@@ -1188,6 +1207,15 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
goto encode_op;
}
+ /* If op is non-idempotent */
+ if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
+ plen = opdesc->op_rsize_bop(rqstp, op);
+ op->status = nfsd4_check_resp_size(resp, plen);
+ }
+
+ if (op->status)
+ goto encode_op;
+
if (opdesc->op_func)
op->status = opdesc->op_func(rqstp, cstate, &op->u);
else
@@ -1217,7 +1245,7 @@ encode_op:
be32_to_cpu(status));
if (cstate->replay_owner) {
- nfs4_put_stateowner(cstate->replay_owner);
+ nfs4_unlock_state();
cstate->replay_owner = NULL;
}
/* XXX Ugh, we need to get rid of this kind of special case: */
@@ -1238,6 +1266,144 @@ out:
return status;
}
+#define op_encode_hdr_size (2)
+#define op_encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define op_encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
+#define op_encode_change_info_maxsz (5)
+#define nfs4_fattr_bitmap_maxsz (4)
+
+#define op_encode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define op_encode_lock_denied_maxsz (8 + op_encode_lockowner_maxsz)
+
+#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+
+#define op_encode_ace_maxsz (3 + nfs4_owner_maxsz)
+#define op_encode_delegation_maxsz (1 + op_encode_stateid_maxsz + 1 + \
+ op_encode_ace_maxsz)
+
+#define op_encode_channel_attrs_maxsz (6 + 1 + 1)
+
+static inline u32 nfsd4_only_status_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_status_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32);
+}
+
+static inline u32 nfsd4_commit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_create_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_change_info_maxsz
+ + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_link_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_change_info_maxsz)
+ * sizeof(__be32);
+}
+
+static inline u32 nfsd4_lock_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_lock_denied_maxsz)
+ * sizeof(__be32);
+}
+
+static inline u32 nfsd4_open_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_stateid_maxsz
+ + op_encode_change_info_maxsz + 1
+ + nfs4_fattr_bitmap_maxsz
+ + op_encode_delegation_maxsz) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_read_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ u32 maxcount = 0, rlen = 0;
+
+ maxcount = svc_max_payload(rqstp);
+ rlen = op->u.read.rd_length;
+
+ if (rlen > maxcount)
+ rlen = maxcount;
+
+ return (op_encode_hdr_size + 2) * sizeof(__be32) + rlen;
+}
+
+static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ u32 rlen = op->u.readdir.rd_maxcount;
+
+ if (rlen > PAGE_SIZE)
+ rlen = PAGE_SIZE;
+
+ return (op_encode_hdr_size + op_encode_verifier_maxsz)
+ * sizeof(__be32) + rlen;
+}
+
+static inline u32 nfsd4_remove_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_change_info_maxsz)
+ * sizeof(__be32);
+}
+
+static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_change_info_maxsz
+ + op_encode_change_info_maxsz) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + 2 + 1024) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
+ 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\
+ 2 + /*eir_server_owner.so_minor_id */\
+ /* eir_server_owner.so_major_id<> */\
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
+ /* eir_server_scope<> */\
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
+ 1 + /* eir_server_impl_id array length */\
+ 0 /* ignored eir_server_impl_id contents */) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_bind_conn_to_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* bctsr_sessid */\
+ 2 /* bctsr_dir, use_conn_in_rdma_mode */) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* sessionid */\
+ 2 + /* csr_sequence, csr_flags */\
+ op_encode_channel_attrs_maxsz + \
+ op_encode_channel_attrs_maxsz) * sizeof(__be32);
+}
+
static struct nfsd4_operation nfsd4_ops[] = {
[OP_ACCESS] = {
.op_func = (nfsd4op_func)nfsd4_access,
@@ -1245,20 +1411,27 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_CLOSE] = {
.op_func = (nfsd4op_func)nfsd4_close,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_CLOSE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_COMMIT] = {
.op_func = (nfsd4op_func)nfsd4_commit,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_COMMIT",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_commit_rsize,
},
[OP_CREATE] = {
.op_func = (nfsd4op_func)nfsd4_create,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_CREATE",
- .op_cacheresult = true,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
},
[OP_DELEGRETURN] = {
.op_func = (nfsd4op_func)nfsd4_delegreturn,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_DELEGRETURN",
+ .op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_GETATTR] = {
.op_func = (nfsd4op_func)nfsd4_getattr,
@@ -1271,12 +1444,16 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LINK] = {
.op_func = (nfsd4op_func)nfsd4_link,
+ .op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
+ | OP_CACHEME,
.op_name = "OP_LINK",
- .op_cacheresult = true,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_link_rsize,
},
[OP_LOCK] = {
.op_func = (nfsd4op_func)nfsd4_lock,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCK",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
},
[OP_LOCKT] = {
.op_func = (nfsd4op_func)nfsd4_lockt,
@@ -1284,7 +1461,9 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCKU] = {
.op_func = (nfsd4op_func)nfsd4_locku,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_LOCKU",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_LOOKUP] = {
.op_func = (nfsd4op_func)nfsd4_lookup,
@@ -1302,42 +1481,54 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_OPEN] = {
.op_func = (nfsd4op_func)nfsd4_open,
- .op_flags = OP_HANDLES_WRONGSEC,
+ .op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
},
[OP_OPEN_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_open_confirm,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_CONFIRM",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_OPEN_DOWNGRADE] = {
.op_func = (nfsd4op_func)nfsd4_open_downgrade,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_OPEN_DOWNGRADE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
},
[OP_PUTFH] = {
.op_func = (nfsd4op_func)nfsd4_putfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_PUTFH",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTPUBFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_PUTPUBFH",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_PUTROOTFH] = {
.op_func = (nfsd4op_func)nfsd4_putrootfh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_PUTROOTFH",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_READ] = {
.op_func = (nfsd4op_func)nfsd4_read,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_READ",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
},
[OP_READDIR] = {
.op_func = (nfsd4op_func)nfsd4_readdir,
+ .op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_READDIR",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_readdir_rsize,
},
[OP_READLINK] = {
.op_func = (nfsd4op_func)nfsd4_readlink,
@@ -1345,29 +1536,36 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_REMOVE] = {
.op_func = (nfsd4op_func)nfsd4_remove,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_REMOVE",
- .op_cacheresult = true,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_remove_rsize,
},
[OP_RENAME] = {
- .op_name = "OP_RENAME",
.op_func = (nfsd4op_func)nfsd4_rename,
- .op_cacheresult = true,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_name = "OP_RENAME",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_rename_rsize,
},
[OP_RENEW] = {
.op_func = (nfsd4op_func)nfsd4_renew,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_RENEW",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+
},
[OP_RESTOREFH] = {
.op_func = (nfsd4op_func)nfsd4_restorefh,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
- | OP_IS_PUTFH_LIKE,
+ | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
.op_name = "OP_RESTOREFH",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SAVEFH] = {
.op_func = (nfsd4op_func)nfsd4_savefh,
- .op_flags = OP_HANDLES_WRONGSEC,
+ .op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
.op_name = "OP_SAVEFH",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO] = {
.op_func = (nfsd4op_func)nfsd4_secinfo,
@@ -1377,19 +1575,22 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_SETATTR] = {
.op_func = (nfsd4op_func)nfsd4_setattr,
.op_name = "OP_SETATTR",
- .op_cacheresult = true,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
},
[OP_SETCLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_setclientid,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID",
- .op_cacheresult = true,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_setclientid_rsize,
},
[OP_SETCLIENTID_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_SETCLIENTID_CONFIRM",
- .op_cacheresult = true,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_VERIFY] = {
.op_func = (nfsd4op_func)nfsd4_verify,
@@ -1397,35 +1598,46 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_WRITE] = {
.op_func = (nfsd4op_func)nfsd4_write,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_WRITE",
- .op_cacheresult = true,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = (nfsd4op_func)nfsd4_release_lockowner,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_RELEASE_LOCKOWNER",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
.op_func = (nfsd4op_func)nfsd4_exchange_id,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
},
[OP_BIND_CONN_TO_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_BIND_CONN_TO_SESSION",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_bind_conn_to_session_rsize,
},
[OP_CREATE_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_create_session,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_CREATE_SESSION",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_session_rsize,
},
[OP_DESTROY_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_destroy_session,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_SESSION",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SEQUENCE] = {
.op_func = (nfsd4op_func)nfsd4_sequence,
@@ -1433,14 +1645,17 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_SEQUENCE",
},
[OP_DESTROY_CLIENTID] = {
- .op_func = NULL,
- .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+ .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+ | OP_MODIFIES_SOMETHING,
.op_name = "OP_DESTROY_CLIENTID",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_RECLAIM_COMPLETE] = {
.op_func = (nfsd4op_func)nfsd4_reclaim_complete,
- .op_flags = ALLOWED_WITHOUT_FH,
+ .op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_RECLAIM_COMPLETE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SECINFO_NO_NAME] = {
.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
@@ -1454,8 +1669,9 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
[OP_FREE_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_free_stateid,
- .op_flags = ALLOWED_WITHOUT_FH,
+ .op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
};
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 29d77f60585..ed083b9a731 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -45,6 +45,7 @@
/* Globals */
static struct file *rec_file;
+static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
static int
nfs4_save_creds(const struct cred **original_creds)
@@ -88,7 +89,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
struct xdr_netobj cksum;
struct hash_desc desc;
struct scatterlist sg;
- __be32 status = nfserr_resource;
+ __be32 status = nfserr_jukebox;
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
@@ -129,6 +130,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
if (!rec_file || clp->cl_firststate)
return 0;
+ clp->cl_firststate = 1;
status = nfs4_save_creds(&original_cred);
if (status < 0)
return status;
@@ -143,10 +145,8 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
goto out_unlock;
}
status = -EEXIST;
- if (dentry->d_inode) {
- dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
+ if (dentry->d_inode)
goto out_put;
- }
status = mnt_want_write(rec_file->f_path.mnt);
if (status)
goto out_put;
@@ -156,12 +156,14 @@ out_put:
dput(dentry);
out_unlock:
mutex_unlock(&dir->d_inode->i_mutex);
- if (status == 0) {
- clp->cl_firststate = 1;
+ if (status == 0)
vfs_fsync(rec_file, 0);
- }
+ else
+ printk(KERN_ERR "NFSD: failed to write recovery record"
+ " (err %d); please check that %s exists"
+ " and is writeable", status,
+ user_recovery_dirname);
nfs4_reset_creds(original_cred);
- dprintk("NFSD: nfsd4_create_clid_dir returns %d\n", status);
return status;
}
@@ -354,13 +356,13 @@ nfsd4_recdir_load(void) {
*/
void
-nfsd4_init_recdir(char *rec_dirname)
+nfsd4_init_recdir()
{
const struct cred *original_cred;
int status;
printk("NFSD: Using %s as the NFSv4 state recovery directory\n",
- rec_dirname);
+ user_recovery_dirname);
BUG_ON(rec_file);
@@ -372,10 +374,10 @@ nfsd4_init_recdir(char *rec_dirname)
return;
}
- rec_file = filp_open(rec_dirname, O_RDONLY | O_DIRECTORY, 0);
+ rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
if (IS_ERR(rec_file)) {
printk("NFSD: unable to find recovery directory %s\n",
- rec_dirname);
+ user_recovery_dirname);
rec_file = NULL;
}
@@ -390,3 +392,30 @@ nfsd4_shutdown_recdir(void)
fput(rec_file);
rec_file = NULL;
}
+
+/*
+ * Change the NFSv4 recovery directory to recdir.
+ */
+int
+nfs4_reset_recoverydir(char *recdir)
+{
+ int status;
+ struct path path;
+
+ status = kern_path(recdir, LOOKUP_FOLLOW, &path);
+ if (status)
+ return status;
+ status = -ENOTDIR;
+ if (S_ISDIR(path.dentry->d_inode->i_mode)) {
+ strcpy(user_recovery_dirname, recdir);
+ status = 0;
+ }
+ path_put(&path);
+ return status;
+}
+
+char *
+nfs4_recoverydir(void)
+{
+ return user_recovery_dirname;
+}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3787ec11740..47e94e33a97 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,9 +49,6 @@
time_t nfsd4_lease = 90; /* default lease time */
time_t nfsd4_grace = 90;
static time_t boot_time;
-static u32 current_ownerid = 1;
-static u32 current_fileid = 1;
-static u32 current_delegid = 1;
static stateid_t zerostateid; /* bits all 0 */
static stateid_t onestateid; /* bits all 1 */
static u64 current_sessionid = 1;
@@ -60,13 +57,7 @@ static u64 current_sessionid = 1;
#define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
/* forward declarations */
-static struct nfs4_stateid * find_stateid(stateid_t *stid, int flags);
-static struct nfs4_stateid * search_for_stateid(stateid_t *stid);
-static struct nfs4_delegation * search_for_delegation(stateid_t *stid);
-static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid);
-static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
-static void nfs4_set_recdir(char *recdir);
-static int check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner);
+static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
/* Locking: */
@@ -80,7 +71,8 @@ static DEFINE_MUTEX(client_mutex);
*/
static DEFINE_SPINLOCK(recall_lock);
-static struct kmem_cache *stateowner_slab = NULL;
+static struct kmem_cache *openowner_slab = NULL;
+static struct kmem_cache *lockowner_slab = NULL;
static struct kmem_cache *file_slab = NULL;
static struct kmem_cache *stateid_slab = NULL;
static struct kmem_cache *deleg_slab = NULL;
@@ -112,6 +104,11 @@ opaque_hashval(const void *ptr, int nbytes)
static struct list_head del_recall_lru;
+static void nfsd4_free_file(struct nfs4_file *f)
+{
+ kmem_cache_free(file_slab, f);
+}
+
static inline void
put_nfs4_file(struct nfs4_file *fi)
{
@@ -119,7 +116,7 @@ put_nfs4_file(struct nfs4_file *fi)
list_del(&fi->fi_hash);
spin_unlock(&recall_lock);
iput(fi->fi_inode);
- kmem_cache_free(file_slab, fi);
+ nfsd4_free_file(fi);
}
}
@@ -136,35 +133,33 @@ unsigned int max_delegations;
* Open owner state (share locks)
*/
-/* hash tables for nfs4_stateowner */
-#define OWNER_HASH_BITS 8
-#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
-#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
+/* hash tables for open owners */
+#define OPEN_OWNER_HASH_BITS 8
+#define OPEN_OWNER_HASH_SIZE (1 << OPEN_OWNER_HASH_BITS)
+#define OPEN_OWNER_HASH_MASK (OPEN_OWNER_HASH_SIZE - 1)
-#define ownerid_hashval(id) \
- ((id) & OWNER_HASH_MASK)
-#define ownerstr_hashval(clientid, ownername) \
- (((clientid) + opaque_hashval((ownername.data), (ownername.len))) & OWNER_HASH_MASK)
+static unsigned int open_ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
+{
+ unsigned int ret;
-static struct list_head ownerid_hashtbl[OWNER_HASH_SIZE];
-static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
+ ret = opaque_hashval(ownername->data, ownername->len);
+ ret += clientid;
+ return ret & OPEN_OWNER_HASH_MASK;
+}
+
+static struct list_head open_ownerstr_hashtbl[OPEN_OWNER_HASH_SIZE];
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
-/* hash table for (open)nfs4_stateid */
-#define STATEID_HASH_BITS 10
-#define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS)
-#define STATEID_HASH_MASK (STATEID_HASH_SIZE - 1)
-
-#define file_hashval(x) \
- hash_ptr(x, FILE_HASH_BITS)
-#define stateid_hashval(owner_id, file_id) \
- (((owner_id) + (file_id)) & STATEID_HASH_MASK)
+static unsigned int file_hashval(struct inode *ino)
+{
+ /* XXX: why are we hashing on inode pointer, anyway? */
+ return hash_ptr(ino, FILE_HASH_BITS);
+}
static struct list_head file_hashtbl[FILE_HASH_SIZE];
-static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
{
@@ -192,8 +187,15 @@ static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{
if (atomic_dec_and_test(&fp->fi_access[oflag])) {
- nfs4_file_put_fd(fp, O_RDWR);
nfs4_file_put_fd(fp, oflag);
+ /*
+ * It's also safe to get rid of the RDWR open *if*
+ * we no longer have need of the other kind of access
+ * or if we already have the other kind of open:
+ */
+ if (fp->fi_fds[1-oflag]
+ || atomic_read(&fp->fi_access[1 - oflag]) == 0)
+ nfs4_file_put_fd(fp, O_RDWR);
}
}
@@ -206,8 +208,73 @@ static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
__nfs4_file_put_access(fp, oflag);
}
+static inline int get_new_stid(struct nfs4_stid *stid)
+{
+ static int min_stateid = 0;
+ struct idr *stateids = &stid->sc_client->cl_stateids;
+ int new_stid;
+ int error;
+
+ error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
+ /*
+ * Note: the necessary preallocation was done in
+ * nfs4_alloc_stateid(). The idr code caps the number of
+ * preallocations that can exist at a time, but the state lock
+ * prevents anyone from using ours before we get here:
+ */
+ BUG_ON(error);
+ /*
+ * It shouldn't be a problem to reuse an opaque stateid value.
+ * I don't think it is for 4.1. But with 4.0 I worry that, for
+ * example, a stray write retransmission could be accepted by
+ * the server when it should have been rejected. Therefore,
+ * adopt a trick from the sctp code to attempt to maximize the
+ * amount of time until an id is reused, by ensuring they always
+ * "increase" (mod INT_MAX):
+ */
+
+ min_stateid = new_stid+1;
+ if (min_stateid == INT_MAX)
+ min_stateid = 0;
+ return new_stid;
+}
+
+static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
+{
+ stateid_t *s = &stid->sc_stateid;
+ int new_id;
+
+ stid->sc_type = type;
+ stid->sc_client = cl;
+ s->si_opaque.so_clid = cl->cl_clientid;
+ new_id = get_new_stid(stid);
+ s->si_opaque.so_id = (u32)new_id;
+ /* Will be incremented before return to client: */
+ s->si_generation = 0;
+}
+
+static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
+{
+ struct idr *stateids = &cl->cl_stateids;
+
+ if (!idr_pre_get(stateids, GFP_KERNEL))
+ return NULL;
+ /*
+ * Note: if we fail here (or any time between now and the time
+ * we actually get the new idr), we won't need to undo the idr
+ * preallocation, since the idr code caps the number of
+ * preallocated entries.
+ */
+ return kmem_cache_alloc(slab, GFP_KERNEL);
+}
+
+static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
+{
+ return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
+}
+
static struct nfs4_delegation *
-alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_fh *current_fh, u32 type)
+alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
@@ -224,21 +291,23 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
return NULL;
if (num_delegations > max_delegations)
return NULL;
- dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
+ dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
if (dp == NULL)
return dp;
+ init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
+ /*
+ * delegation seqid's are never incremented. The 4.1 special
+ * meaning of seqid 0 isn't meaningful, really, but let's avoid
+ * 0 anyway just for consistency and use 1:
+ */
+ dp->dl_stid.sc_stateid.si_generation = 1;
num_delegations++;
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
- dp->dl_client = clp;
get_nfs4_file(fp);
dp->dl_file = fp;
dp->dl_type = type;
- dp->dl_stateid.si_boot = boot_time;
- dp->dl_stateid.si_stateownerid = current_delegid++;
- dp->dl_stateid.si_fileid = 0;
- dp->dl_stateid.si_generation = 0;
fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
dp->dl_time = 0;
atomic_set(&dp->dl_count, 1);
@@ -267,10 +336,18 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
}
}
+static void unhash_stid(struct nfs4_stid *s)
+{
+ struct idr *stateids = &s->sc_client->cl_stateids;
+
+ idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
+}
+
/* Called under the state lock. */
static void
unhash_delegation(struct nfs4_delegation *dp)
{
+ unhash_stid(&dp->dl_stid);
list_del_init(&dp->dl_perclnt);
spin_lock(&recall_lock);
list_del_init(&dp->dl_perfile);
@@ -292,10 +369,16 @@ static DEFINE_SPINLOCK(client_lock);
#define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
#define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
-#define clientid_hashval(id) \
- ((id) & CLIENT_HASH_MASK)
-#define clientstr_hashval(name) \
- (opaque_hashval((name), 8) & CLIENT_HASH_MASK)
+static unsigned int clientid_hashval(u32 id)
+{
+ return id & CLIENT_HASH_MASK;
+}
+
+static unsigned int clientstr_hashval(const char *name)
+{
+ return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
+}
+
/*
* reclaim_str_hashtbl[] holds known client info from previous reset/reboot
* used in reboot/reset lease grace period processing
@@ -362,7 +445,7 @@ set_deny(unsigned int *deny, unsigned long bmap) {
}
static int
-test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
+test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
unsigned int access, deny;
set_access(&access, stp->st_access_bmap);
@@ -385,14 +468,13 @@ static int nfs4_access_to_omode(u32 access)
BUG();
}
-static void unhash_generic_stateid(struct nfs4_stateid *stp)
+static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
{
- list_del(&stp->st_hash);
list_del(&stp->st_perfile);
list_del(&stp->st_perstateowner);
}
-static void free_generic_stateid(struct nfs4_stateid *stp)
+static void close_generic_stateid(struct nfs4_ol_stateid *stp)
{
int i;
@@ -401,84 +483,106 @@ static void free_generic_stateid(struct nfs4_stateid *stp)
if (test_bit(i, &stp->st_access_bmap))
nfs4_file_put_access(stp->st_file,
nfs4_access_to_omode(i));
+ __clear_bit(i, &stp->st_access_bmap);
}
}
put_nfs4_file(stp->st_file);
+ stp->st_file = NULL;
+}
+
+static void free_generic_stateid(struct nfs4_ol_stateid *stp)
+{
kmem_cache_free(stateid_slab, stp);
}
-static void release_lock_stateid(struct nfs4_stateid *stp)
+static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct file *file;
unhash_generic_stateid(stp);
+ unhash_stid(&stp->st_stid);
file = find_any_file(stp->st_file);
if (file)
- locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
+ locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
+ close_generic_stateid(stp);
free_generic_stateid(stp);
}
-static void unhash_lockowner(struct nfs4_stateowner *sop)
+static void unhash_lockowner(struct nfs4_lockowner *lo)
{
- struct nfs4_stateid *stp;
+ struct nfs4_ol_stateid *stp;
- list_del(&sop->so_idhash);
- list_del(&sop->so_strhash);
- list_del(&sop->so_perstateid);
- while (!list_empty(&sop->so_stateids)) {
- stp = list_first_entry(&sop->so_stateids,
- struct nfs4_stateid, st_perstateowner);
+ list_del(&lo->lo_owner.so_strhash);
+ list_del(&lo->lo_perstateid);
+ while (!list_empty(&lo->lo_owner.so_stateids)) {
+ stp = list_first_entry(&lo->lo_owner.so_stateids,
+ struct nfs4_ol_stateid, st_perstateowner);
release_lock_stateid(stp);
}
}
-static void release_lockowner(struct nfs4_stateowner *sop)
+static void release_lockowner(struct nfs4_lockowner *lo)
{
- unhash_lockowner(sop);
- nfs4_put_stateowner(sop);
+ unhash_lockowner(lo);
+ nfs4_free_lockowner(lo);
}
static void
-release_stateid_lockowners(struct nfs4_stateid *open_stp)
+release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
{
- struct nfs4_stateowner *lock_sop;
+ struct nfs4_lockowner *lo;
while (!list_empty(&open_stp->st_lockowners)) {
- lock_sop = list_entry(open_stp->st_lockowners.next,
- struct nfs4_stateowner, so_perstateid);
- /* list_del(&open_stp->st_lockowners); */
- BUG_ON(lock_sop->so_is_open_owner);
- release_lockowner(lock_sop);
+ lo = list_entry(open_stp->st_lockowners.next,
+ struct nfs4_lockowner, lo_perstateid);
+ release_lockowner(lo);
}
}
-static void release_open_stateid(struct nfs4_stateid *stp)
+static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
{
unhash_generic_stateid(stp);
release_stateid_lockowners(stp);
+ close_generic_stateid(stp);
+}
+
+static void release_open_stateid(struct nfs4_ol_stateid *stp)
+{
+ unhash_open_stateid(stp);
+ unhash_stid(&stp->st_stid);
free_generic_stateid(stp);
}
-static void unhash_openowner(struct nfs4_stateowner *sop)
+static void unhash_openowner(struct nfs4_openowner *oo)
{
- struct nfs4_stateid *stp;
+ struct nfs4_ol_stateid *stp;
- list_del(&sop->so_idhash);
- list_del(&sop->so_strhash);
- list_del(&sop->so_perclient);
- list_del(&sop->so_perstateid); /* XXX: necessary? */
- while (!list_empty(&sop->so_stateids)) {
- stp = list_first_entry(&sop->so_stateids,
- struct nfs4_stateid, st_perstateowner);
+ list_del(&oo->oo_owner.so_strhash);
+ list_del(&oo->oo_perclient);
+ while (!list_empty(&oo->oo_owner.so_stateids)) {
+ stp = list_first_entry(&oo->oo_owner.so_stateids,
+ struct nfs4_ol_stateid, st_perstateowner);
release_open_stateid(stp);
}
}
-static void release_openowner(struct nfs4_stateowner *sop)
+static void release_last_closed_stateid(struct nfs4_openowner *oo)
{
- unhash_openowner(sop);
- list_del(&sop->so_close_lru);
- nfs4_put_stateowner(sop);
+ struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
+
+ if (s) {
+ unhash_stid(&s->st_stid);
+ free_generic_stateid(s);
+ oo->oo_last_closed_stid = NULL;
+ }
+}
+
+static void release_openowner(struct nfs4_openowner *oo)
+{
+ unhash_openowner(oo);
+ list_del(&oo->oo_close_lru);
+ release_last_closed_stateid(oo);
+ nfs4_free_openowner(oo);
}
#define SESSION_HASH_SIZE 512
@@ -843,9 +947,6 @@ renew_client_locked(struct nfs4_client *clp)
return;
}
- /*
- * Move client to the end to the LRU list.
- */
dprintk("renewing client (clientid %08x/%08x)\n",
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
@@ -943,7 +1044,7 @@ unhash_client_locked(struct nfs4_client *clp)
static void
expire_client(struct nfs4_client *clp)
{
- struct nfs4_stateowner *sop;
+ struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head reaplist;
@@ -961,8 +1062,8 @@ expire_client(struct nfs4_client *clp)
unhash_delegation(dp);
}
while (!list_empty(&clp->cl_openowners)) {
- sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
- release_openowner(sop);
+ oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
+ release_openowner(oo);
}
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
@@ -1038,6 +1139,23 @@ static void gen_confirm(struct nfs4_client *clp)
*p++ = i++;
}
+static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
+{
+ return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
+}
+
+static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
+{
+ struct nfs4_stid *s;
+
+ s = find_stateid(cl, t);
+ if (!s)
+ return NULL;
+ if (typemask & s->sc_type)
+ return s;
+ return NULL;
+}
+
static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
@@ -1060,6 +1178,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
}
}
+ idr_init(&clp->cl_stateids);
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
atomic_set(&clp->cl_refcount, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
@@ -1083,17 +1202,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
return clp;
}
-static int check_name(struct xdr_netobj name)
-{
- if (name.len == 0)
- return 0;
- if (name.len > NFS4_OPAQUE_LIMIT) {
- dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
- return 0;
- }
- return 1;
-}
-
static void
add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
{
@@ -1125,8 +1233,10 @@ find_confirmed_client(clientid_t *clid)
unsigned int idhashval = clientid_hashval(clid->cl_id);
list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
- if (same_clid(&clp->cl_clientid, clid))
+ if (same_clid(&clp->cl_clientid, clid)) {
+ renew_client(clp);
return clp;
+ }
}
return NULL;
}
@@ -1173,20 +1283,6 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
return NULL;
}
-static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr)
-{
- switch (family) {
- case AF_INET:
- ((struct sockaddr_in *)sa)->sin_family = AF_INET;
- ((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr;
- return;
- case AF_INET6:
- ((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6;
- ((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6;
- return;
- }
-}
-
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
{
@@ -1218,7 +1314,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r
conn->cb_prog = se->se_callback_prog;
conn->cb_ident = se->se_callback_ident;
- rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr);
+ memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
return;
out_err:
conn->cb_addr.ss_family = AF_UNSPEC;
@@ -1350,7 +1446,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
__func__, rqstp, exid, exid->clname.len, exid->clname.data,
addr_str, exid->flags, exid->spa_how);
- if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
+ if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
return nfserr_inval;
/* Currently only support SP4_NONE */
@@ -1849,8 +1945,16 @@ out:
nfsd4_get_session(cstate->session);
atomic_inc(&clp->cl_refcount);
- if (clp->cl_cb_state == NFSD4_CB_DOWN)
- seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
+ switch (clp->cl_cb_state) {
+ case NFSD4_CB_DOWN:
+ seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
+ break;
+ case NFSD4_CB_FAULT:
+ seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
+ break;
+ default:
+ seq->status_flags = 0;
+ }
}
kfree(conn);
spin_unlock(&client_lock);
@@ -1858,6 +1962,50 @@ out:
return status;
}
+static inline bool has_resources(struct nfs4_client *clp)
+{
+ return !list_empty(&clp->cl_openowners)
+ || !list_empty(&clp->cl_delegations)
+ || !list_empty(&clp->cl_sessions);
+}
+
+__be32
+nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
+{
+ struct nfs4_client *conf, *unconf, *clp;
+ int status = 0;
+
+ nfs4_lock_state();
+ unconf = find_unconfirmed_client(&dc->clientid);
+ conf = find_confirmed_client(&dc->clientid);
+
+ if (conf) {
+ clp = conf;
+
+ if (!is_client_expired(conf) && has_resources(conf)) {
+ status = nfserr_clientid_busy;
+ goto out;
+ }
+
+ /* rfc5661 18.50.3 */
+ if (cstate->session && conf == cstate->session->se_client) {
+ status = nfserr_clientid_busy;
+ goto out;
+ }
+ } else if (unconf)
+ clp = unconf;
+ else {
+ status = nfserr_stale_clientid;
+ goto out;
+ }
+
+ expire_client(clp);
+out:
+ nfs4_unlock_state();
+ dprintk("%s return %d\n", __func__, ntohl(status));
+ return status;
+}
+
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
{
@@ -1900,19 +2048,13 @@ __be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid *setclid)
{
- struct xdr_netobj clname = {
- .len = setclid->se_namelen,
- .data = setclid->se_name,
- };
+ struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
unsigned int strhashval;
struct nfs4_client *conf, *unconf, *new;
__be32 status;
char dname[HEXDIR_LEN];
- if (!check_name(clname))
- return nfserr_inval;
-
status = nfs4_make_rec_clidname(dname, &clname);
if (status)
return status;
@@ -1946,7 +2088,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* of 5 bullet points, labeled as CASE0 - CASE4 below.
*/
unconf = find_unconfirmed_client_by_str(dname, strhashval);
- status = nfserr_resource;
+ status = nfserr_jukebox;
if (!conf) {
/*
* RFC 3530 14.2.33 CASE 4:
@@ -2116,31 +2258,28 @@ out:
return status;
}
+static struct nfs4_file *nfsd4_alloc_file(void)
+{
+ return kmem_cache_alloc(file_slab, GFP_KERNEL);
+}
+
/* OPEN Share state helper functions */
-static inline struct nfs4_file *
-alloc_init_file(struct inode *ino)
+static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
{
- struct nfs4_file *fp;
unsigned int hashval = file_hashval(ino);
- fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
- if (fp) {
- atomic_set(&fp->fi_ref, 1);
- INIT_LIST_HEAD(&fp->fi_hash);
- INIT_LIST_HEAD(&fp->fi_stateids);
- INIT_LIST_HEAD(&fp->fi_delegations);
- fp->fi_inode = igrab(ino);
- fp->fi_id = current_fileid++;
- fp->fi_had_conflict = false;
- fp->fi_lease = NULL;
- memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
- memset(fp->fi_access, 0, sizeof(fp->fi_access));
- spin_lock(&recall_lock);
- list_add(&fp->fi_hash, &file_hashtbl[hashval]);
- spin_unlock(&recall_lock);
- return fp;
- }
- return NULL;
+ atomic_set(&fp->fi_ref, 1);
+ INIT_LIST_HEAD(&fp->fi_hash);
+ INIT_LIST_HEAD(&fp->fi_stateids);
+ INIT_LIST_HEAD(&fp->fi_delegations);
+ fp->fi_inode = igrab(ino);
+ fp->fi_had_conflict = false;
+ fp->fi_lease = NULL;
+ memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
+ memset(fp->fi_access, 0, sizeof(fp->fi_access));
+ spin_lock(&recall_lock);
+ list_add(&fp->fi_hash, &file_hashtbl[hashval]);
+ spin_unlock(&recall_lock);
}
static void
@@ -2155,7 +2294,8 @@ nfsd4_free_slab(struct kmem_cache **slab)
void
nfsd4_free_slabs(void)
{
- nfsd4_free_slab(&stateowner_slab);
+ nfsd4_free_slab(&openowner_slab);
+ nfsd4_free_slab(&lockowner_slab);
nfsd4_free_slab(&file_slab);
nfsd4_free_slab(&stateid_slab);
nfsd4_free_slab(&deleg_slab);
@@ -2164,16 +2304,20 @@ nfsd4_free_slabs(void)
static int
nfsd4_init_slabs(void)
{
- stateowner_slab = kmem_cache_create("nfsd4_stateowners",
- sizeof(struct nfs4_stateowner), 0, 0, NULL);
- if (stateowner_slab == NULL)
+ openowner_slab = kmem_cache_create("nfsd4_openowners",
+ sizeof(struct nfs4_openowner), 0, 0, NULL);
+ if (openowner_slab == NULL)
+ goto out_nomem;
+ lockowner_slab = kmem_cache_create("nfsd4_lockowners",
+ sizeof(struct nfs4_openowner), 0, 0, NULL);
+ if (lockowner_slab == NULL)
goto out_nomem;
file_slab = kmem_cache_create("nfsd4_files",
sizeof(struct nfs4_file), 0, 0, NULL);
if (file_slab == NULL)
goto out_nomem;
stateid_slab = kmem_cache_create("nfsd4_stateids",
- sizeof(struct nfs4_stateid), 0, 0, NULL);
+ sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
if (stateid_slab == NULL)
goto out_nomem;
deleg_slab = kmem_cache_create("nfsd4_delegations",
@@ -2187,97 +2331,94 @@ out_nomem:
return -ENOMEM;
}
-void
-nfs4_free_stateowner(struct kref *kref)
+void nfs4_free_openowner(struct nfs4_openowner *oo)
{
- struct nfs4_stateowner *sop =
- container_of(kref, struct nfs4_stateowner, so_ref);
- kfree(sop->so_owner.data);
- kmem_cache_free(stateowner_slab, sop);
+ kfree(oo->oo_owner.so_owner.data);
+ kmem_cache_free(openowner_slab, oo);
}
-static inline struct nfs4_stateowner *
-alloc_stateowner(struct xdr_netobj *owner)
+void nfs4_free_lockowner(struct nfs4_lockowner *lo)
{
- struct nfs4_stateowner *sop;
+ kfree(lo->lo_owner.so_owner.data);
+ kmem_cache_free(lockowner_slab, lo);
+}
- if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) {
- if ((sop->so_owner.data = kmalloc(owner->len, GFP_KERNEL))) {
- memcpy(sop->so_owner.data, owner->data, owner->len);
- sop->so_owner.len = owner->len;
- kref_init(&sop->so_ref);
- return sop;
- }
- kmem_cache_free(stateowner_slab, sop);
- }
- return NULL;
+static void init_nfs4_replay(struct nfs4_replay *rp)
+{
+ rp->rp_status = nfserr_serverfault;
+ rp->rp_buflen = 0;
+ rp->rp_buf = rp->rp_ibuf;
}
-static struct nfs4_stateowner *
-alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
+static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
+{
struct nfs4_stateowner *sop;
- struct nfs4_replay *rp;
- unsigned int idhashval;
- if (!(sop = alloc_stateowner(&open->op_owner)))
+ sop = kmem_cache_alloc(slab, GFP_KERNEL);
+ if (!sop)
+ return NULL;
+
+ sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
+ if (!sop->so_owner.data) {
+ kmem_cache_free(slab, sop);
return NULL;
- idhashval = ownerid_hashval(current_ownerid);
- INIT_LIST_HEAD(&sop->so_idhash);
- INIT_LIST_HEAD(&sop->so_strhash);
- INIT_LIST_HEAD(&sop->so_perclient);
+ }
+ sop->so_owner.len = owner->len;
+
INIT_LIST_HEAD(&sop->so_stateids);
- INIT_LIST_HEAD(&sop->so_perstateid); /* not used */
- INIT_LIST_HEAD(&sop->so_close_lru);
- sop->so_time = 0;
- list_add(&sop->so_idhash, &ownerid_hashtbl[idhashval]);
- list_add(&sop->so_strhash, &ownerstr_hashtbl[strhashval]);
- list_add(&sop->so_perclient, &clp->cl_openowners);
- sop->so_is_open_owner = 1;
- sop->so_id = current_ownerid++;
sop->so_client = clp;
- sop->so_seqid = open->op_seqid;
- sop->so_confirmed = 0;
- rp = &sop->so_replay;
- rp->rp_status = nfserr_serverfault;
- rp->rp_buflen = 0;
- rp->rp_buf = rp->rp_ibuf;
+ init_nfs4_replay(&sop->so_replay);
return sop;
}
-static inline void
-init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
- struct nfs4_stateowner *sop = open->op_stateowner;
- unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
+static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
+{
+ list_add(&oo->oo_owner.so_strhash, &open_ownerstr_hashtbl[strhashval]);
+ list_add(&oo->oo_perclient, &clp->cl_openowners);
+}
- INIT_LIST_HEAD(&stp->st_hash);
- INIT_LIST_HEAD(&stp->st_perstateowner);
+static struct nfs4_openowner *
+alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
+ struct nfs4_openowner *oo;
+
+ oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
+ if (!oo)
+ return NULL;
+ oo->oo_owner.so_is_open_owner = 1;
+ oo->oo_owner.so_seqid = open->op_seqid;
+ oo->oo_flags = NFS4_OO_NEW;
+ oo->oo_time = 0;
+ oo->oo_last_closed_stid = NULL;
+ INIT_LIST_HEAD(&oo->oo_close_lru);
+ hash_openowner(oo, clp, strhashval);
+ return oo;
+}
+
+static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
+ struct nfs4_openowner *oo = open->op_openowner;
+ struct nfs4_client *clp = oo->oo_owner.so_client;
+
+ init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
INIT_LIST_HEAD(&stp->st_lockowners);
- INIT_LIST_HEAD(&stp->st_perfile);
- list_add(&stp->st_hash, &stateid_hashtbl[hashval]);
- list_add(&stp->st_perstateowner, &sop->so_stateids);
+ list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
- stp->st_stateowner = sop;
+ stp->st_stateowner = &oo->oo_owner;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
- stp->st_stateid.si_stateownerid = sop->so_id;
- stp->st_stateid.si_fileid = fp->fi_id;
- stp->st_stateid.si_generation = 0;
stp->st_access_bmap = 0;
stp->st_deny_bmap = 0;
- __set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK,
- &stp->st_access_bmap);
+ __set_bit(open->op_share_access, &stp->st_access_bmap);
__set_bit(open->op_share_deny, &stp->st_deny_bmap);
stp->st_openstp = NULL;
}
static void
-move_to_close_lru(struct nfs4_stateowner *sop)
+move_to_close_lru(struct nfs4_openowner *oo)
{
- dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop);
+ dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
- list_move_tail(&sop->so_close_lru, &close_lru);
- sop->so_time = get_seconds();
+ list_move_tail(&oo->oo_close_lru, &close_lru);
+ oo->oo_time = get_seconds();
}
static int
@@ -2289,14 +2430,18 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
(sop->so_client->cl_clientid.cl_id == clid->cl_id);
}
-static struct nfs4_stateowner *
+static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
{
- struct nfs4_stateowner *so = NULL;
+ struct nfs4_stateowner *so;
+ struct nfs4_openowner *oo;
- list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
- if (same_owner_str(so, &open->op_owner, &open->op_clientid))
- return so;
+ list_for_each_entry(so, &open_ownerstr_hashtbl[hashval], so_strhash) {
+ if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
+ oo = openowner(so);
+ renew_client(oo->oo_owner.so_client);
+ return oo;
+ }
}
return NULL;
}
@@ -2320,31 +2465,6 @@ find_file(struct inode *ino)
return NULL;
}
-static inline int access_valid(u32 x, u32 minorversion)
-{
- if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ)
- return 0;
- if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH)
- return 0;
- x &= ~NFS4_SHARE_ACCESS_MASK;
- if (minorversion && x) {
- if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL)
- return 0;
- if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED)
- return 0;
- x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK);
- }
- if (x)
- return 0;
- return 1;
-}
-
-static inline int deny_valid(u32 x)
-{
- /* Note: unlike access bits, deny bits may be zero. */
- return x <= NFS4_SHARE_DENY_BOTH;
-}
-
/*
* Called to check deny when READ with all zero stateid or
* WRITE with all zero or all one stateid
@@ -2354,7 +2474,7 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{
struct inode *ino = current_fh->fh_dentry->d_inode;
struct nfs4_file *fp;
- struct nfs4_stateid *stp;
+ struct nfs4_ol_stateid *stp;
__be32 ret;
dprintk("NFSD: nfs4_share_conflict\n");
@@ -2429,6 +2549,16 @@ static const struct lock_manager_operations nfsd_lease_mng_ops = {
.lm_change = nfsd_change_deleg_cb,
};
+static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
+{
+ if (nfsd4_has_session(cstate))
+ return nfs_ok;
+ if (seqid == so->so_seqid - 1)
+ return nfserr_replay_me;
+ if (seqid == so->so_seqid)
+ return nfs_ok;
+ return nfserr_bad_seqid;
+}
__be32
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
@@ -2437,57 +2567,49 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
clientid_t *clientid = &open->op_clientid;
struct nfs4_client *clp = NULL;
unsigned int strhashval;
- struct nfs4_stateowner *sop = NULL;
-
- if (!check_name(open->op_owner))
- return nfserr_inval;
+ struct nfs4_openowner *oo = NULL;
+ __be32 status;
if (STALE_CLIENTID(&open->op_clientid))
return nfserr_stale_clientid;
+ /*
+ * In case we need it later, after we've already created the
+ * file and don't want to risk a further failure:
+ */
+ open->op_file = nfsd4_alloc_file();
+ if (open->op_file == NULL)
+ return nfserr_jukebox;
- strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner);
- sop = find_openstateowner_str(strhashval, open);
- open->op_stateowner = sop;
- if (!sop) {
- /* Make sure the client's lease hasn't expired. */
+ strhashval = open_ownerstr_hashval(clientid->cl_id, &open->op_owner);
+ oo = find_openstateowner_str(strhashval, open);
+ open->op_openowner = oo;
+ if (!oo) {
clp = find_confirmed_client(clientid);
if (clp == NULL)
return nfserr_expired;
- goto renew;
+ goto new_owner;
}
- /* When sessions are used, skip open sequenceid processing */
- if (nfsd4_has_session(cstate))
- goto renew;
- if (!sop->so_confirmed) {
+ if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
/* Replace unconfirmed owners without checking for replay. */
- clp = sop->so_client;
- release_openowner(sop);
- open->op_stateowner = NULL;
- goto renew;
- }
- if (open->op_seqid == sop->so_seqid - 1) {
- if (sop->so_replay.rp_buflen)
- return nfserr_replay_me;
- /* The original OPEN failed so spectacularly
- * that we don't even have replay data saved!
- * Therefore, we have no choice but to continue
- * processing this OPEN; presumably, we'll
- * fail again for the same reason.
- */
- dprintk("nfsd4_process_open1: replay with no replay cache\n");
- goto renew;
- }
- if (open->op_seqid != sop->so_seqid)
- return nfserr_bad_seqid;
-renew:
- if (open->op_stateowner == NULL) {
- sop = alloc_init_open_stateowner(strhashval, clp, open);
- if (sop == NULL)
- return nfserr_resource;
- open->op_stateowner = sop;
+ clp = oo->oo_owner.so_client;
+ release_openowner(oo);
+ open->op_openowner = NULL;
+ goto new_owner;
}
- list_del_init(&sop->so_close_lru);
- renew_client(sop->so_client);
+ status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
+ if (status)
+ return status;
+ clp = oo->oo_owner.so_client;
+ goto alloc_stateid;
+new_owner:
+ oo = alloc_init_open_stateowner(strhashval, clp, open);
+ if (oo == NULL)
+ return nfserr_jukebox;
+ open->op_openowner = oo;
+alloc_stateid:
+ open->op_stp = nfs4_alloc_stateid(clp);
+ if (!open->op_stp)
+ return nfserr_jukebox;
return nfs_ok;
}
@@ -2500,36 +2622,37 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
return nfs_ok;
}
-static struct nfs4_delegation *
-find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
+static int share_access_to_flags(u32 share_access)
{
- struct nfs4_delegation *dp;
+ share_access &= ~NFS4_SHARE_WANT_MASK;
- spin_lock(&recall_lock);
- list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
- if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
- spin_unlock(&recall_lock);
- return dp;
- }
- spin_unlock(&recall_lock);
- return NULL;
+ return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
}
-static int share_access_to_flags(u32 share_access)
+static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
{
- share_access &= ~NFS4_SHARE_WANT_MASK;
+ struct nfs4_stid *ret;
- return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
+ ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
+ if (!ret)
+ return NULL;
+ return delegstateid(ret);
+}
+
+static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
+{
+ return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
+ open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
}
static __be32
-nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
+nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
struct nfs4_delegation **dp)
{
int flags;
__be32 status = nfserr_bad_stateid;
- *dp = find_delegation_file(fp, &open->op_delegate_stateid);
+ *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
if (*dp == NULL)
goto out;
flags = share_access_to_flags(open->op_share_access);
@@ -2537,41 +2660,37 @@ nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
if (status)
*dp = NULL;
out:
- if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
+ if (!nfsd4_is_deleg_cur(open))
return nfs_ok;
if (status)
return status;
- open->op_stateowner->so_confirmed = 1;
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
return nfs_ok;
}
static __be32
-nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_stateid **stpp)
+nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
{
- struct nfs4_stateid *local;
- __be32 status = nfserr_share_denied;
- struct nfs4_stateowner *sop = open->op_stateowner;
+ struct nfs4_ol_stateid *local;
+ struct nfs4_openowner *oo = open->op_openowner;
list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
/* remember if we have seen this open owner */
- if (local->st_stateowner == sop)
+ if (local->st_stateowner == &oo->oo_owner)
*stpp = local;
/* check for conflicting share reservations */
if (!test_share(local, open))
- goto out;
+ return nfserr_share_denied;
}
- status = 0;
-out:
- return status;
+ return nfs_ok;
}
-static inline struct nfs4_stateid *
-nfs4_alloc_stateid(void)
+static void nfs4_free_stateid(struct nfs4_ol_stateid *s)
{
- return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
+ kmem_cache_free(stateid_slab, s);
}
static inline int nfs4_access_to_access(u32 nfs4_access)
@@ -2592,12 +2711,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
int oflag = nfs4_access_to_omode(open->op_share_access);
int access = nfs4_access_to_access(open->op_share_access);
- /* CLAIM_DELEGATE_CUR is used in response to a broken lease;
- * allowing it to break the lease and return EAGAIN leaves the
- * client unable to make progress in returning the delegation */
- if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
- access |= NFSD_MAY_NOT_BREAK_LEASE;
-
if (!fp->fi_fds[oflag]) {
status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
&fp->fi_fds[oflag]);
@@ -2609,27 +2722,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
return nfs_ok;
}
-static __be32
-nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp,
- struct nfs4_file *fp, struct svc_fh *cur_fh,
- struct nfsd4_open *open)
-{
- struct nfs4_stateid *stp;
- __be32 status;
-
- stp = nfs4_alloc_stateid();
- if (stp == NULL)
- return nfserr_resource;
-
- status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
- if (status) {
- kmem_cache_free(stateid_slab, stp);
- return status;
- }
- *stpp = stp;
- return 0;
-}
-
static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
struct nfsd4_open *open)
@@ -2646,9 +2738,9 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
}
static __be32
-nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
+nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
{
- u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
+ u32 op_share_access = open->op_share_access;
bool new_access;
__be32 status;
@@ -2677,8 +2769,8 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
static void
nfs4_set_claim_prev(struct nfsd4_open *open)
{
- open->op_stateowner->so_confirmed = 1;
- open->op_stateowner->so_client->cl_firststate = 1;
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
+ open->op_openowner->oo_owner.so_client->cl_firststate = 1;
}
/* Should we give out recallable state?: */
@@ -2721,7 +2813,7 @@ static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
if (!fl)
return -ENOMEM;
fl->fl_file = find_readable_file(fp);
- list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+ list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
if (status) {
list_del_init(&dp->dl_perclnt);
@@ -2750,7 +2842,7 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
atomic_inc(&fp->fi_delegees);
list_add(&dp->dl_perfile, &fp->fi_delegations);
spin_unlock(&recall_lock);
- list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+ list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
return 0;
}
@@ -2758,14 +2850,14 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
* Attempt to hand out a delegation.
*/
static void
-nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_stateid *stp)
+nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
{
struct nfs4_delegation *dp;
- struct nfs4_stateowner *sop = stp->st_stateowner;
+ struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
int cb_up;
int status, flag = 0;
- cb_up = nfsd4_cb_channel_good(sop->so_client);
+ cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
flag = NFS4_OPEN_DELEGATE_NONE;
open->op_recall = 0;
switch (open->op_claim_type) {
@@ -2781,7 +2873,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
* had the chance to reclaim theirs.... */
if (locks_in_grace())
goto out;
- if (!cb_up || !sop->so_confirmed)
+ if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
flag = NFS4_OPEN_DELEGATE_WRITE;
@@ -2792,17 +2884,17 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
goto out;
}
- dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
+ dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
if (dp == NULL)
goto out_no_deleg;
status = nfs4_set_delegation(dp, flag);
if (status)
goto out_free;
- memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
+ memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
- STATEID_VAL(&dp->dl_stateid));
+ STATEID_VAL(&dp->dl_stid.sc_stateid));
out:
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
&& flag == NFS4_OPEN_DELEGATE_NONE
@@ -2824,16 +2916,13 @@ __be32
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct inode *ino = current_fh->fh_dentry->d_inode;
- struct nfs4_stateid *stp = NULL;
+ struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
- status = nfserr_inval;
- if (!access_valid(open->op_share_access, resp->cstate.minorversion)
- || !deny_valid(open->op_share_deny))
- goto out;
/*
* Lookup file; if found, lookup stateid and check open request,
* and check for delegations in the process of being recalled.
@@ -2843,17 +2932,17 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (fp) {
if ((status = nfs4_check_open(fp, open, &stp)))
goto out;
- status = nfs4_check_deleg(fp, open, &dp);
+ status = nfs4_check_deleg(cl, fp, open, &dp);
if (status)
goto out;
} else {
status = nfserr_bad_stateid;
- if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
- goto out;
- status = nfserr_resource;
- fp = alloc_init_file(ino);
- if (fp == NULL)
+ if (nfsd4_is_deleg_cur(open))
goto out;
+ status = nfserr_jukebox;
+ fp = open->op_file;
+ open->op_file = NULL;
+ nfsd4_init_file(fp, ino);
}
/*
@@ -2865,24 +2954,24 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status)
goto out;
- update_stateid(&stp->st_stateid);
} else {
- status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
+ status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
if (status)
goto out;
- init_stateid(stp, fp, open);
+ stp = open->op_stp;
+ open->op_stp = NULL;
+ init_open_stateid(stp, fp, open);
status = nfsd4_truncate(rqstp, current_fh, open);
if (status) {
release_open_stateid(stp);
goto out;
}
- if (nfsd4_has_session(&resp->cstate))
- update_stateid(&stp->st_stateid);
}
- memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t));
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
if (nfsd4_has_session(&resp->cstate))
- open->op_stateowner->so_confirmed = 1;
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
/*
* Attempt to hand out a delegation. No error return, because the
@@ -2893,7 +2982,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs_ok;
dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
- STATEID_VAL(&stp->st_stateid));
+ STATEID_VAL(&stp->st_stid.sc_stateid));
out:
if (fp)
put_nfs4_file(fp);
@@ -2903,13 +2992,34 @@ out:
* To finish the open response, we just need to set the rflags.
*/
open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
- if (!open->op_stateowner->so_confirmed &&
+ if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
!nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
return status;
}
+void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
+{
+ if (open->op_openowner) {
+ struct nfs4_openowner *oo = open->op_openowner;
+
+ if (!list_empty(&oo->oo_owner.so_stateids))
+ list_del_init(&oo->oo_close_lru);
+ if (oo->oo_flags & NFS4_OO_NEW) {
+ if (status) {
+ release_openowner(oo);
+ open->op_openowner = NULL;
+ } else
+ oo->oo_flags &= ~NFS4_OO_NEW;
+ }
+ }
+ if (open->op_file)
+ nfsd4_free_file(open->op_file);
+ if (open->op_stp)
+ nfs4_free_stateid(open->op_stp);
+}
+
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
clientid_t *clid)
@@ -2930,7 +3040,6 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("nfsd4_renew: clientid not found!\n");
goto out;
}
- renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
@@ -2962,7 +3071,7 @@ static time_t
nfs4_laundromat(void)
{
struct nfs4_client *clp;
- struct nfs4_stateowner *sop;
+ struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head *pos, *next, reaplist;
time_t cutoff = get_seconds() - nfsd4_lease;
@@ -3019,16 +3128,14 @@ nfs4_laundromat(void)
}
test_val = nfsd4_lease;
list_for_each_safe(pos, next, &close_lru) {
- sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
- if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) {
- u = sop->so_time - cutoff;
+ oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
+ if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
+ u = oo->oo_time - cutoff;
if (test_val > u)
test_val = u;
break;
}
- dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
- sop->so_id);
- release_openowner(sop);
+ release_openowner(oo);
}
if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
@@ -3050,30 +3157,17 @@ laundromat_main(struct work_struct *not_used)
queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
}
-static struct nfs4_stateowner *
-search_close_lru(u32 st_id, int flags)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
{
- struct nfs4_stateowner *local = NULL;
-
- if (flags & CLOSE_STATE) {
- list_for_each_entry(local, &close_lru, so_close_lru) {
- if (local->so_id == st_id)
- return local;
- }
- }
- return NULL;
-}
-
-static inline int
-nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
-{
- return fhp->fh_dentry->d_inode != stp->st_file->fi_inode;
+ if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
+ return nfserr_bad_stateid;
+ return nfs_ok;
}
static int
STALE_STATEID(stateid_t *stateid)
{
- if (stateid->si_boot == boot_time)
+ if (stateid->si_opaque.so_clid.cl_boot == boot_time)
return 0;
dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
STATEID_VAL(stateid));
@@ -3096,7 +3190,7 @@ access_permit_write(unsigned long access_bmap)
}
static
-__be32 nfs4_check_openmode(struct nfs4_stateid *stp, int flags)
+__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
{
__be32 status = nfserr_openmode;
@@ -3139,68 +3233,80 @@ grace_disallows_io(struct inode *inode)
return locks_in_grace() && mandatory_lock(inode);
}
-static int check_stateid_generation(stateid_t *in, stateid_t *ref, int flags)
+/* Returns true iff a is later than b: */
+static bool stateid_generation_after(stateid_t *a, stateid_t *b)
+{
+ return (s32)a->si_generation - (s32)b->si_generation > 0;
+}
+
+static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
{
/*
* When sessions are used the stateid generation number is ignored
* when it is zero.
*/
- if ((flags & HAS_SESSION) && in->si_generation == 0)
- goto out;
+ if (has_session && in->si_generation == 0)
+ return nfs_ok;
+
+ if (in->si_generation == ref->si_generation)
+ return nfs_ok;
/* If the client sends us a stateid from the future, it's buggy: */
- if (in->si_generation > ref->si_generation)
+ if (stateid_generation_after(in, ref))
return nfserr_bad_stateid;
/*
- * The following, however, can happen. For example, if the
- * client sends an open and some IO at the same time, the open
- * may bump si_generation while the IO is still in flight.
- * Thanks to hard links and renames, the client never knows what
- * file an open will affect. So it could avoid that situation
- * only by serializing all opens and IO from the same open
- * owner. To recover from the old_stateid error, the client
- * will just have to retry the IO:
+ * However, we could see a stateid from the past, even from a
+ * non-buggy client. For example, if the client sends a lock
+ * while some IO is outstanding, the lock may bump si_generation
+ * while the IO is still in flight. The client could avoid that
+ * situation by waiting for responses on all the IO requests,
+ * but better performance may result in retrying IO that
+ * receives an old_stateid error if requests are rarely
+ * reordered in flight:
*/
- if (in->si_generation < ref->si_generation)
- return nfserr_old_stateid;
-out:
- return nfs_ok;
+ return nfserr_old_stateid;
}
-static int is_delegation_stateid(stateid_t *stateid)
+__be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
- return stateid->si_fileid == 0;
-}
+ struct nfs4_stid *s;
+ struct nfs4_ol_stateid *ols;
+ __be32 status;
-static int is_open_stateid(struct nfs4_stateid *stateid)
-{
- return stateid->st_openstp == NULL;
+ if (STALE_STATEID(stateid))
+ return nfserr_stale_stateid;
+
+ s = find_stateid(cl, stateid);
+ if (!s)
+ return nfserr_stale_stateid;
+ status = check_stateid_generation(stateid, &s->sc_stateid, 1);
+ if (status)
+ return status;
+ if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
+ return nfs_ok;
+ ols = openlockstateid(s);
+ if (ols->st_stateowner->so_is_open_owner
+ && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
+ return nfserr_bad_stateid;
+ return nfs_ok;
}
-__be32 nfs4_validate_stateid(stateid_t *stateid, int flags)
+static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
{
- struct nfs4_stateid *stp = NULL;
- __be32 status = nfserr_stale_stateid;
+ struct nfs4_client *cl;
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ return nfserr_bad_stateid;
if (STALE_STATEID(stateid))
- goto out;
-
- status = nfserr_expired;
- stp = search_for_stateid(stateid);
- if (!stp)
- goto out;
- status = nfserr_bad_stateid;
-
- if (!stp->st_stateowner->so_confirmed)
- goto out;
-
- status = check_stateid_generation(stateid, &stp->st_stateid, flags);
- if (status)
- goto out;
+ return nfserr_stale_stateid;
+ cl = find_confirmed_client(&stateid->si_opaque.so_clid);
+ if (!cl)
+ return nfserr_expired;
+ *s = find_stateid_by_type(cl, stateid, typemask);
+ if (!*s)
+ return nfserr_bad_stateid;
+ return nfs_ok;
- status = nfs_ok;
-out:
- return status;
}
/*
@@ -3210,7 +3316,8 @@ __be32
nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filpp)
{
- struct nfs4_stateid *stp = NULL;
+ struct nfs4_stid *s;
+ struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
struct svc_fh *current_fh = &cstate->current_fh;
struct inode *ino = current_fh->fh_dentry->d_inode;
@@ -3222,60 +3329,47 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
if (grace_disallows_io(ino))
return nfserr_grace;
- if (nfsd4_has_session(cstate))
- flags |= HAS_SESSION;
-
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return check_special_stateids(current_fh, stateid, flags);
- status = nfserr_stale_stateid;
- if (STALE_STATEID(stateid))
+ status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
+ if (status)
+ return status;
+ status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
+ if (status)
goto out;
-
- /*
- * We assume that any stateid that has the current boot time,
- * but that we can't find, is expired:
- */
- status = nfserr_expired;
- if (is_delegation_stateid(stateid)) {
- dp = find_delegation_stateid(ino, stateid);
- if (!dp)
- goto out;
- status = check_stateid_generation(stateid, &dp->dl_stateid,
- flags);
- if (status)
- goto out;
+ switch (s->sc_type) {
+ case NFS4_DELEG_STID:
+ dp = delegstateid(s);
status = nfs4_check_delegmode(dp, flags);
if (status)
goto out;
- renew_client(dp->dl_client);
if (filpp) {
*filpp = dp->dl_file->fi_deleg_file;
BUG_ON(!*filpp);
}
- } else { /* open or lock stateid */
- stp = find_stateid(stateid, flags);
- if (!stp)
- goto out;
- status = nfserr_bad_stateid;
- if (nfs4_check_fh(current_fh, stp))
- goto out;
- if (!stp->st_stateowner->so_confirmed)
- goto out;
- status = check_stateid_generation(stateid, &stp->st_stateid,
- flags);
+ break;
+ case NFS4_OPEN_STID:
+ case NFS4_LOCK_STID:
+ stp = openlockstateid(s);
+ status = nfs4_check_fh(current_fh, stp);
if (status)
goto out;
+ if (stp->st_stateowner->so_is_open_owner
+ && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
+ goto out;
status = nfs4_check_openmode(stp, flags);
if (status)
goto out;
- renew_client(stp->st_stateowner->so_client);
if (filpp) {
if (flags & RD_STATE)
*filpp = find_readable_file(stp->st_file);
else
*filpp = find_writeable_file(stp->st_file);
}
+ break;
+ default:
+ return nfserr_bad_stateid;
}
status = nfs_ok;
out:
@@ -3283,18 +3377,9 @@ out:
}
static __be32
-nfsd4_free_delegation_stateid(stateid_t *stateid)
+nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_delegation *dp = search_for_delegation(stateid);
- if (dp)
- return nfserr_locks_held;
- return nfserr_bad_stateid;
-}
-
-static __be32
-nfsd4_free_lock_stateid(struct nfs4_stateid *stp)
-{
- if (check_for_locks(stp->st_file, stp->st_stateowner))
+ if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
return nfserr_locks_held;
release_lock_stateid(stp);
return nfs_ok;
@@ -3307,51 +3392,40 @@ __be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_test_stateid *test_stateid)
{
- test_stateid->ts_has_session = nfsd4_has_session(cstate);
+ /* real work is done during encoding */
return nfs_ok;
}
-/*
- * Free a state id
- */
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_free_stateid *free_stateid)
{
stateid_t *stateid = &free_stateid->fr_stateid;
- struct nfs4_stateid *stp;
- __be32 ret;
+ struct nfs4_stid *s;
+ struct nfs4_client *cl = cstate->session->se_client;
+ __be32 ret = nfserr_bad_stateid;
nfs4_lock_state();
- if (is_delegation_stateid(stateid)) {
- ret = nfsd4_free_delegation_stateid(stateid);
- goto out;
- }
-
- stp = search_for_stateid(stateid);
- if (!stp) {
- ret = nfserr_bad_stateid;
+ s = find_stateid(cl, stateid);
+ if (!s)
goto out;
- }
- if (stateid->si_generation != 0) {
- if (stateid->si_generation < stp->st_stateid.si_generation) {
- ret = nfserr_old_stateid;
- goto out;
- }
- if (stateid->si_generation > stp->st_stateid.si_generation) {
- ret = nfserr_bad_stateid;
- goto out;
- }
- }
-
- if (is_open_stateid(stp)) {
+ switch (s->sc_type) {
+ case NFS4_DELEG_STID:
ret = nfserr_locks_held;
goto out;
- } else {
- ret = nfsd4_free_lock_stateid(stp);
- goto out;
+ case NFS4_OPEN_STID:
+ case NFS4_LOCK_STID:
+ ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+ if (ret)
+ goto out;
+ if (s->sc_type == NFS4_LOCK_STID)
+ ret = nfsd4_free_lock_stateid(openlockstateid(s));
+ else
+ ret = nfserr_locks_held;
+ break;
+ default:
+ ret = nfserr_bad_stateid;
}
-
out:
nfs4_unlock_state();
return ret;
@@ -3364,124 +3438,64 @@ setlkflg (int type)
RD_STATE : WR_STATE;
}
+static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
+{
+ struct svc_fh *current_fh = &cstate->current_fh;
+ struct nfs4_stateowner *sop = stp->st_stateowner;
+ __be32 status;
+
+ status = nfsd4_check_seqid(cstate, sop, seqid);
+ if (status)
+ return status;
+ if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
+ /*
+ * "Closed" stateid's exist *only* to return
+ * nfserr_replay_me from the previous step.
+ */
+ return nfserr_bad_stateid;
+ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+ if (status)
+ return status;
+ return nfs4_check_fh(current_fh, stp);
+}
+
/*
* Checks for sequence id mutating operations.
*/
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
- stateid_t *stateid, int flags,
- struct nfs4_stateowner **sopp,
- struct nfs4_stateid **stpp, struct nfsd4_lock *lock)
+ stateid_t *stateid, char typemask,
+ struct nfs4_ol_stateid **stpp)
{
- struct nfs4_stateid *stp;
- struct nfs4_stateowner *sop;
- struct svc_fh *current_fh = &cstate->current_fh;
__be32 status;
+ struct nfs4_stid *s;
dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
seqid, STATEID_VAL(stateid));
*stpp = NULL;
- *sopp = NULL;
-
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
- dprintk("NFSD: preprocess_seqid_op: magic stateid!\n");
- return nfserr_bad_stateid;
- }
-
- if (STALE_STATEID(stateid))
- return nfserr_stale_stateid;
-
- if (nfsd4_has_session(cstate))
- flags |= HAS_SESSION;
-
- /*
- * We return BAD_STATEID if filehandle doesn't match stateid,
- * the confirmed flag is incorrecly set, or the generation
- * number is incorrect.
- */
- stp = find_stateid(stateid, flags);
- if (stp == NULL) {
- /*
- * Also, we should make sure this isn't just the result of
- * a replayed close:
- */
- sop = search_close_lru(stateid->si_stateownerid, flags);
- /* It's not stale; let's assume it's expired: */
- if (sop == NULL)
- return nfserr_expired;
- *sopp = sop;
- goto check_replay;
- }
-
- *stpp = stp;
- *sopp = sop = stp->st_stateowner;
-
- if (lock) {
- clientid_t *lockclid = &lock->v.new.clientid;
- struct nfs4_client *clp = sop->so_client;
- int lkflg = 0;
- __be32 status;
-
- lkflg = setlkflg(lock->lk_type);
-
- if (lock->lk_is_new) {
- if (!sop->so_is_open_owner)
- return nfserr_bad_stateid;
- if (!(flags & HAS_SESSION) &&
- !same_clid(&clp->cl_clientid, lockclid))
- return nfserr_bad_stateid;
- /* stp is the open stateid */
- status = nfs4_check_openmode(stp, lkflg);
- if (status)
- return status;
- } else {
- /* stp is the lock stateid */
- status = nfs4_check_openmode(stp->st_openstp, lkflg);
- if (status)
- return status;
- }
- }
+ status = nfsd4_lookup_stateid(stateid, typemask, &s);
+ if (status)
+ return status;
+ *stpp = openlockstateid(s);
+ cstate->replay_owner = (*stpp)->st_stateowner;
- if (nfs4_check_fh(current_fh, stp)) {
- dprintk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n");
- return nfserr_bad_stateid;
- }
+ return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
+}
- /*
- * We now validate the seqid and stateid generation numbers.
- * For the moment, we ignore the possibility of
- * generation number wraparound.
- */
- if (!(flags & HAS_SESSION) && seqid != sop->so_seqid)
- goto check_replay;
+static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
+{
+ __be32 status;
+ struct nfs4_openowner *oo;
- if (sop->so_confirmed && flags & CONFIRM) {
- dprintk("NFSD: preprocess_seqid_op: expected"
- " unconfirmed stateowner!\n");
- return nfserr_bad_stateid;
- }
- if (!sop->so_confirmed && !(flags & CONFIRM)) {
- dprintk("NFSD: preprocess_seqid_op: stateowner not"
- " confirmed yet!\n");
- return nfserr_bad_stateid;
- }
- status = check_stateid_generation(stateid, &stp->st_stateid, flags);
+ status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
+ NFS4_OPEN_STID, stpp);
if (status)
return status;
- renew_client(sop->so_client);
+ oo = openowner((*stpp)->st_stateowner);
+ if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
+ return nfserr_bad_stateid;
return nfs_ok;
-
-check_replay:
- if (seqid == sop->so_seqid - 1) {
- dprintk("NFSD: preprocess_seqid_op: retransmission?\n");
- /* indicate replay to calling function */
- return nfserr_replay_me;
- }
- dprintk("NFSD: preprocess_seqid_op: bad seqid (expected %d, got %d)\n",
- sop->so_seqid, seqid);
- *sopp = NULL;
- return nfserr_bad_seqid;
}
__be32
@@ -3489,8 +3503,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_open_confirm *oc)
{
__be32 status;
- struct nfs4_stateowner *sop;
- struct nfs4_stateid *stp;
+ struct nfs4_openowner *oo;
+ struct nfs4_ol_stateid *stp;
dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
@@ -3502,38 +3516,52 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
- if ((status = nfs4_preprocess_seqid_op(cstate,
+ status = nfs4_preprocess_seqid_op(cstate,
oc->oc_seqid, &oc->oc_req_stateid,
- CONFIRM | OPEN_STATE,
- &oc->oc_stateowner, &stp, NULL)))
- goto out;
-
- sop = oc->oc_stateowner;
- sop->so_confirmed = 1;
- update_stateid(&stp->st_stateid);
- memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t));
+ NFS4_OPEN_STID, &stp);
+ if (status)
+ goto out;
+ oo = openowner(stp->st_stateowner);
+ status = nfserr_bad_stateid;
+ if (oo->oo_flags & NFS4_OO_CONFIRMED)
+ goto out;
+ oo->oo_flags |= NFS4_OO_CONFIRMED;
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
- __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stateid));
+ __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
- nfsd4_create_clid_dir(sop->so_client);
+ nfsd4_create_clid_dir(oo->oo_owner.so_client);
+ status = nfs_ok;
out:
- if (oc->oc_stateowner) {
- nfs4_get_stateowner(oc->oc_stateowner);
- cstate->replay_owner = oc->oc_stateowner;
- }
- nfs4_unlock_state();
+ if (!cstate->replay_owner)
+ nfs4_unlock_state();
return status;
}
-static inline void nfs4_file_downgrade(struct nfs4_stateid *stp, unsigned int to_access)
+static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
{
- int i;
+ if (!test_bit(access, &stp->st_access_bmap))
+ return;
+ nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
+ __clear_bit(access, &stp->st_access_bmap);
+}
- for (i = 1; i < 4; i++) {
- if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
- nfs4_file_put_access(stp->st_file, i);
- __clear_bit(i, &stp->st_access_bmap);
- }
+static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
+{
+ switch (to_access) {
+ case NFS4_SHARE_ACCESS_READ:
+ nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
+ nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
+ break;
+ case NFS4_SHARE_ACCESS_WRITE:
+ nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
+ nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
+ break;
+ case NFS4_SHARE_ACCESS_BOTH:
+ break;
+ default:
+ BUG();
}
}
@@ -3553,24 +3581,20 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_open_downgrade *od)
{
__be32 status;
- struct nfs4_stateid *stp;
+ struct nfs4_ol_stateid *stp;
dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
cstate->current_fh.fh_dentry->d_name.name);
- if (!access_valid(od->od_share_access, cstate->minorversion)
- || !deny_valid(od->od_share_deny))
- return nfserr_inval;
+ /* We don't yet support WANT bits: */
+ od->od_share_access &= NFS4_SHARE_ACCESS_MASK;
nfs4_lock_state();
- if ((status = nfs4_preprocess_seqid_op(cstate,
- od->od_seqid,
- &od->od_stateid,
- OPEN_STATE,
- &od->od_stateowner, &stp, NULL)))
+ status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
+ &od->od_stateid, &stp);
+ if (status)
goto out;
-
status = nfserr_inval;
if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
@@ -3582,22 +3606,45 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
stp->st_deny_bmap, od->od_share_deny);
goto out;
}
- nfs4_file_downgrade(stp, od->od_share_access);
+ nfs4_stateid_downgrade(stp, od->od_share_access);
reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
- update_stateid(&stp->st_stateid);
- memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t));
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
status = nfs_ok;
out:
- if (od->od_stateowner) {
- nfs4_get_stateowner(od->od_stateowner);
- cstate->replay_owner = od->od_stateowner;
- }
- nfs4_unlock_state();
+ if (!cstate->replay_owner)
+ nfs4_unlock_state();
return status;
}
+void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
+{
+ struct nfs4_openowner *oo;
+ struct nfs4_ol_stateid *s;
+
+ if (!so->so_is_open_owner)
+ return;
+ oo = openowner(so);
+ s = oo->oo_last_closed_stid;
+ if (!s)
+ return;
+ if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
+ /* Release the last_closed_stid on the next seqid bump: */
+ oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
+ return;
+ }
+ oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
+ release_last_closed_stateid(oo);
+}
+
+static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+{
+ unhash_open_stateid(s);
+ s->st_stid.sc_type = NFS4_CLOSED_STID;
+}
+
/*
* nfs4_unlock_state() called after encode
*/
@@ -3606,39 +3653,37 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_close *close)
{
__be32 status;
- struct nfs4_stateid *stp;
+ struct nfs4_openowner *oo;
+ struct nfs4_ol_stateid *stp;
dprintk("NFSD: nfsd4_close on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
cstate->current_fh.fh_dentry->d_name.name);
nfs4_lock_state();
- /* check close_lru for replay */
- if ((status = nfs4_preprocess_seqid_op(cstate,
- close->cl_seqid,
- &close->cl_stateid,
- OPEN_STATE | CLOSE_STATE,
- &close->cl_stateowner, &stp, NULL)))
+ status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
+ &close->cl_stateid,
+ NFS4_OPEN_STID|NFS4_CLOSED_STID,
+ &stp);
+ if (status)
goto out;
+ oo = openowner(stp->st_stateowner);
status = nfs_ok;
- update_stateid(&stp->st_stateid);
- memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
- /* release_stateid() calls nfsd_close() if needed */
- release_open_stateid(stp);
+ nfsd4_close_open_stateid(stp);
+ oo->oo_last_closed_stid = stp;
/* place unused nfs4_stateowners on so_close_lru list to be
* released by the laundromat service after the lease period
* to enable us to handle CLOSE replay
*/
- if (list_empty(&close->cl_stateowner->so_stateids))
- move_to_close_lru(close->cl_stateowner);
+ if (list_empty(&oo->oo_owner.so_stateids))
+ move_to_close_lru(oo);
out:
- if (close->cl_stateowner) {
- nfs4_get_stateowner(close->cl_stateowner);
- cstate->replay_owner = close->cl_stateowner;
- }
- nfs4_unlock_state();
+ if (!cstate->replay_owner)
+ nfs4_unlock_state();
return status;
}
@@ -3648,34 +3693,22 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
+ struct nfs4_stid *s;
struct inode *inode;
__be32 status;
- int flags = 0;
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
return status;
inode = cstate->current_fh.fh_dentry->d_inode;
- if (nfsd4_has_session(cstate))
- flags |= HAS_SESSION;
nfs4_lock_state();
- status = nfserr_bad_stateid;
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
- goto out;
- status = nfserr_stale_stateid;
- if (STALE_STATEID(stateid))
- goto out;
- status = nfserr_bad_stateid;
- if (!is_delegation_stateid(stateid))
- goto out;
- status = nfserr_expired;
- dp = find_delegation_stateid(inode, stateid);
- if (!dp)
+ status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s);
+ if (status)
goto out;
- status = check_stateid_generation(stateid, &dp->dl_stateid, flags);
+ dp = delegstateid(s);
+ status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
if (status)
goto out;
- renew_client(dp->dl_client);
unhash_delegation(dp);
out:
@@ -3713,9 +3746,6 @@ last_byte_offset(u64 start, u64 len)
return end > start ? end - 1: NFS4_MAX_UINT64;
}
-#define lockownerid_hashval(id) \
- ((id) & LOCK_HASH_MASK)
-
static inline unsigned int
lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
struct xdr_netobj *ownername)
@@ -3725,101 +3755,7 @@ lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
& LOCK_HASH_MASK;
}
-static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE];
static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
-static struct list_head lockstateid_hashtbl[STATEID_HASH_SIZE];
-
-static int
-same_stateid(stateid_t *id_one, stateid_t *id_two)
-{
- if (id_one->si_stateownerid != id_two->si_stateownerid)
- return 0;
- return id_one->si_fileid == id_two->si_fileid;
-}
-
-static struct nfs4_stateid *
-find_stateid(stateid_t *stid, int flags)
-{
- struct nfs4_stateid *local;
- u32 st_id = stid->si_stateownerid;
- u32 f_id = stid->si_fileid;
- unsigned int hashval;
-
- dprintk("NFSD: find_stateid flags 0x%x\n",flags);
- if (flags & (LOCK_STATE | RD_STATE | WR_STATE)) {
- hashval = stateid_hashval(st_id, f_id);
- list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
- if ((local->st_stateid.si_stateownerid == st_id) &&
- (local->st_stateid.si_fileid == f_id))
- return local;
- }
- }
-
- if (flags & (OPEN_STATE | RD_STATE | WR_STATE)) {
- hashval = stateid_hashval(st_id, f_id);
- list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
- if ((local->st_stateid.si_stateownerid == st_id) &&
- (local->st_stateid.si_fileid == f_id))
- return local;
- }
- }
- return NULL;
-}
-
-static struct nfs4_stateid *
-search_for_stateid(stateid_t *stid)
-{
- struct nfs4_stateid *local;
- unsigned int hashval = stateid_hashval(stid->si_stateownerid, stid->si_fileid);
-
- list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
- if (same_stateid(&local->st_stateid, stid))
- return local;
- }
-
- list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
- if (same_stateid(&local->st_stateid, stid))
- return local;
- }
- return NULL;
-}
-
-static struct nfs4_delegation *
-search_for_delegation(stateid_t *stid)
-{
- struct nfs4_file *fp;
- struct nfs4_delegation *dp;
- struct list_head *pos;
- int i;
-
- for (i = 0; i < FILE_HASH_SIZE; i++) {
- list_for_each_entry(fp, &file_hashtbl[i], fi_hash) {
- list_for_each(pos, &fp->fi_delegations) {
- dp = list_entry(pos, struct nfs4_delegation, dl_perfile);
- if (same_stateid(&dp->dl_stateid, stid))
- return dp;
- }
- }
- }
- return NULL;
-}
-
-static struct nfs4_delegation *
-find_delegation_stateid(struct inode *ino, stateid_t *stid)
-{
- struct nfs4_file *fp;
- struct nfs4_delegation *dl;
-
- dprintk("NFSD: %s: stateid=" STATEID_FMT "\n", __func__,
- STATEID_VAL(stid));
-
- fp = find_file(ino);
- if (!fp)
- return NULL;
- dl = find_delegation_file(fp, stid);
- put_nfs4_file(fp);
- return dl;
-}
/*
* TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
@@ -3846,15 +3782,21 @@ static const struct lock_manager_operations nfsd_posix_mng_ops = {
static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{
- struct nfs4_stateowner *sop;
+ struct nfs4_lockowner *lo;
if (fl->fl_lmops == &nfsd_posix_mng_ops) {
- sop = (struct nfs4_stateowner *) fl->fl_owner;
- kref_get(&sop->so_ref);
- deny->ld_sop = sop;
- deny->ld_clientid = sop->so_client->cl_clientid;
+ lo = (struct nfs4_lockowner *) fl->fl_owner;
+ deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
+ lo->lo_owner.so_owner.len, GFP_KERNEL);
+ if (!deny->ld_owner.data)
+ /* We just don't care that much */
+ goto nevermind;
+ deny->ld_owner.len = lo->lo_owner.so_owner.len;
+ deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
} else {
- deny->ld_sop = NULL;
+nevermind:
+ deny->ld_owner.len = 0;
+ deny->ld_owner.data = NULL;
deny->ld_clientid.cl_boot = 0;
deny->ld_clientid.cl_id = 0;
}
@@ -3867,8 +3809,8 @@ nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
deny->ld_type = NFS4_WRITE_LT;
}
-static struct nfs4_stateowner *
-find_lockstateowner_str(struct inode *inode, clientid_t *clid,
+static struct nfs4_lockowner *
+find_lockowner_str(struct inode *inode, clientid_t *clid,
struct xdr_netobj *owner)
{
unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
@@ -3876,11 +3818,17 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
if (same_owner_str(op, owner, clid))
- return op;
+ return lockowner(op);
}
return NULL;
}
+static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
+{
+ list_add(&lo->lo_owner.so_strhash, &lock_ownerstr_hashtbl[strhashval]);
+ list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
+}
+
/*
* Alloc a lock owner structure.
* Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
@@ -3889,67 +3837,40 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
* strhashval = lock_ownerstr_hashval
*/
-static struct nfs4_stateowner *
-alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_stateid *open_stp, struct nfsd4_lock *lock) {
- struct nfs4_stateowner *sop;
- struct nfs4_replay *rp;
- unsigned int idhashval;
+static struct nfs4_lockowner *
+alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
+ struct nfs4_lockowner *lo;
- if (!(sop = alloc_stateowner(&lock->lk_new_owner)))
+ lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
+ if (!lo)
return NULL;
- idhashval = lockownerid_hashval(current_ownerid);
- INIT_LIST_HEAD(&sop->so_idhash);
- INIT_LIST_HEAD(&sop->so_strhash);
- INIT_LIST_HEAD(&sop->so_perclient);
- INIT_LIST_HEAD(&sop->so_stateids);
- INIT_LIST_HEAD(&sop->so_perstateid);
- INIT_LIST_HEAD(&sop->so_close_lru); /* not used */
- sop->so_time = 0;
- list_add(&sop->so_idhash, &lock_ownerid_hashtbl[idhashval]);
- list_add(&sop->so_strhash, &lock_ownerstr_hashtbl[strhashval]);
- list_add(&sop->so_perstateid, &open_stp->st_lockowners);
- sop->so_is_open_owner = 0;
- sop->so_id = current_ownerid++;
- sop->so_client = clp;
+ INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
+ lo->lo_owner.so_is_open_owner = 0;
/* It is the openowner seqid that will be incremented in encode in the
* case of new lockowners; so increment the lock seqid manually: */
- sop->so_seqid = lock->lk_new_lock_seqid + 1;
- sop->so_confirmed = 1;
- rp = &sop->so_replay;
- rp->rp_status = nfserr_serverfault;
- rp->rp_buflen = 0;
- rp->rp_buf = rp->rp_ibuf;
- return sop;
+ lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
+ hash_lockowner(lo, strhashval, clp, open_stp);
+ return lo;
}
-static struct nfs4_stateid *
-alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struct nfs4_stateid *open_stp)
+static struct nfs4_ol_stateid *
+alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
{
- struct nfs4_stateid *stp;
- unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
+ struct nfs4_ol_stateid *stp;
+ struct nfs4_client *clp = lo->lo_owner.so_client;
- stp = nfs4_alloc_stateid();
+ stp = nfs4_alloc_stateid(clp);
if (stp == NULL)
- goto out;
- INIT_LIST_HEAD(&stp->st_hash);
- INIT_LIST_HEAD(&stp->st_perfile);
- INIT_LIST_HEAD(&stp->st_perstateowner);
- INIT_LIST_HEAD(&stp->st_lockowners); /* not used */
- list_add(&stp->st_hash, &lockstateid_hashtbl[hashval]);
+ return NULL;
+ init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
list_add(&stp->st_perfile, &fp->fi_stateids);
- list_add(&stp->st_perstateowner, &sop->so_stateids);
- stp->st_stateowner = sop;
+ list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+ stp->st_stateowner = &lo->lo_owner;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
- stp->st_stateid.si_stateownerid = sop->so_id;
- stp->st_stateid.si_fileid = fp->fi_id;
- stp->st_stateid.si_generation = 0;
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
-
-out:
return stp;
}
@@ -3960,7 +3881,7 @@ check_lock_length(u64 offset, u64 length)
LOFF_OVERFLOW(offset, length)));
}
-static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access)
+static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
{
struct nfs4_file *fp = lock_stp->st_file;
int oflag = nfs4_access_to_omode(access);
@@ -3978,15 +3899,16 @@ __be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lock *lock)
{
- struct nfs4_stateowner *open_sop = NULL;
- struct nfs4_stateowner *lock_sop = NULL;
- struct nfs4_stateid *lock_stp;
+ struct nfs4_openowner *open_sop = NULL;
+ struct nfs4_lockowner *lock_sop = NULL;
+ struct nfs4_ol_stateid *lock_stp;
struct nfs4_file *fp;
struct file *filp = NULL;
struct file_lock file_lock;
struct file_lock conflock;
__be32 status = 0;
unsigned int strhashval;
+ int lkflg;
int err;
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
@@ -4010,7 +3932,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Use open owner and open stateid to create lock owner and
* lock stateid.
*/
- struct nfs4_stateid *open_stp = NULL;
+ struct nfs4_ol_stateid *open_stp = NULL;
status = nfserr_stale_clientid;
if (!nfsd4_has_session(cstate) &&
@@ -4018,26 +3940,29 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
/* validate and update open stateid and open seqid */
- status = nfs4_preprocess_seqid_op(cstate,
+ status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
&lock->lk_new_open_stateid,
- OPEN_STATE,
- &lock->lk_replay_owner, &open_stp,
- lock);
+ &open_stp);
if (status)
goto out;
- open_sop = lock->lk_replay_owner;
+ open_sop = openowner(open_stp->st_stateowner);
+ status = nfserr_bad_stateid;
+ if (!nfsd4_has_session(cstate) &&
+ !same_clid(&open_sop->oo_owner.so_client->cl_clientid,
+ &lock->v.new.clientid))
+ goto out;
/* create lockowner and lock stateid */
fp = open_stp->st_file;
- strhashval = lock_ownerstr_hashval(fp->fi_inode,
- open_sop->so_client->cl_clientid.cl_id,
+ strhashval = lock_ownerstr_hashval(fp->fi_inode,
+ open_sop->oo_owner.so_client->cl_clientid.cl_id,
&lock->v.new.owner);
/* XXX: Do we need to check for duplicate stateowners on
* the same file, or should they just be allowed (and
* create new stateids)? */
- status = nfserr_resource;
+ status = nfserr_jukebox;
lock_sop = alloc_init_lock_stateowner(strhashval,
- open_sop->so_client, open_stp, lock);
+ open_sop->oo_owner.so_client, open_stp, lock);
if (lock_sop == NULL)
goto out;
lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
@@ -4046,16 +3971,20 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
} else {
/* lock (lock owner + lock stateid) already exists */
status = nfs4_preprocess_seqid_op(cstate,
- lock->lk_old_lock_seqid,
- &lock->lk_old_lock_stateid,
- LOCK_STATE,
- &lock->lk_replay_owner, &lock_stp, lock);
+ lock->lk_old_lock_seqid,
+ &lock->lk_old_lock_stateid,
+ NFS4_LOCK_STID, &lock_stp);
if (status)
goto out;
- lock_sop = lock->lk_replay_owner;
+ lock_sop = lockowner(lock_stp->st_stateowner);
fp = lock_stp->st_file;
}
- /* lock->lk_replay_owner and lock_stp have been created or found */
+ /* lock_sop and lock_stp have been created or found */
+
+ lkflg = setlkflg(lock->lk_type);
+ status = nfs4_check_openmode(lock_stp, lkflg);
+ if (status)
+ goto out;
status = nfserr_grace;
if (locks_in_grace() && !lock->lk_reclaim)
@@ -4106,8 +4035,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
switch (-err) {
case 0: /* success! */
- update_stateid(&lock_stp->st_stateid);
- memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
+ update_stateid(&lock_stp->st_stid.sc_stateid);
+ memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
sizeof(stateid_t));
status = 0;
break;
@@ -4119,19 +4048,16 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
case (EDEADLK):
status = nfserr_deadlock;
break;
- default:
+ default:
dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
- status = nfserr_resource;
+ status = nfserrno(err);
break;
}
out:
if (status && lock->lk_is_new && lock_sop)
release_lockowner(lock_sop);
- if (lock->lk_replay_owner) {
- nfs4_get_stateowner(lock->lk_replay_owner);
- cstate->replay_owner = lock->lk_replay_owner;
- }
- nfs4_unlock_state();
+ if (!cstate->replay_owner)
+ nfs4_unlock_state();
return status;
}
@@ -4163,6 +4089,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct inode *inode;
struct file_lock file_lock;
+ struct nfs4_lockowner *lo;
int error;
__be32 status;
@@ -4172,19 +4099,14 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (check_lock_length(lockt->lt_offset, lockt->lt_length))
return nfserr_inval;
- lockt->lt_stateowner = NULL;
nfs4_lock_state();
status = nfserr_stale_clientid;
if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
goto out;
- if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) {
- dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n");
- if (status == nfserr_symlink)
- status = nfserr_inval;
+ if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
goto out;
- }
inode = cstate->current_fh.fh_dentry->d_inode;
locks_init_lock(&file_lock);
@@ -4203,10 +4125,9 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
- lockt->lt_stateowner = find_lockstateowner_str(inode,
- &lockt->lt_clientid, &lockt->lt_owner);
- if (lockt->lt_stateowner)
- file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner;
+ lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
+ if (lo)
+ file_lock.fl_owner = (fl_owner_t)lo;
file_lock.fl_pid = current->tgid;
file_lock.fl_flags = FL_POSIX;
@@ -4234,7 +4155,7 @@ __be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_locku *locku)
{
- struct nfs4_stateid *stp;
+ struct nfs4_ol_stateid *stp;
struct file *filp = NULL;
struct file_lock file_lock;
__be32 status;
@@ -4249,13 +4170,10 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
- if ((status = nfs4_preprocess_seqid_op(cstate,
- locku->lu_seqid,
- &locku->lu_stateid,
- LOCK_STATE,
- &locku->lu_stateowner, &stp, NULL)))
+ status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
+ &locku->lu_stateid, NFS4_LOCK_STID, &stp);
+ if (status)
goto out;
-
filp = find_any_file(stp->st_file);
if (!filp) {
status = nfserr_lock_range;
@@ -4264,7 +4182,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
BUG_ON(!filp);
locks_init_lock(&file_lock);
file_lock.fl_type = F_UNLCK;
- file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner;
+ file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
file_lock.fl_pid = current->tgid;
file_lock.fl_file = filp;
file_lock.fl_flags = FL_POSIX;
@@ -4285,15 +4203,12 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/*
* OK, unlock succeeded; the only thing left to do is update the stateid.
*/
- update_stateid(&stp->st_stateid);
- memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t));
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
out:
- if (locku->lu_stateowner) {
- nfs4_get_stateowner(locku->lu_stateowner);
- cstate->replay_owner = locku->lu_stateowner;
- }
- nfs4_unlock_state();
+ if (!cstate->replay_owner)
+ nfs4_unlock_state();
return status;
out_nfserr:
@@ -4307,7 +4222,7 @@ out_nfserr:
* 0: no locks held by lockowner
*/
static int
-check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner)
+check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
{
struct file_lock **flpp;
struct inode *inode = filp->fi_inode;
@@ -4332,7 +4247,8 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
{
clientid_t *clid = &rlockowner->rl_clientid;
struct nfs4_stateowner *sop;
- struct nfs4_stateid *stp;
+ struct nfs4_lockowner *lo;
+ struct nfs4_ol_stateid *stp;
struct xdr_netobj *owner = &rlockowner->rl_owner;
struct list_head matches;
int i;
@@ -4356,16 +4272,15 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
* data structures. */
INIT_LIST_HEAD(&matches);
for (i = 0; i < LOCK_HASH_SIZE; i++) {
- list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) {
+ list_for_each_entry(sop, &lock_ownerstr_hashtbl[i], so_strhash) {
if (!same_owner_str(sop, owner, clid))
continue;
list_for_each_entry(stp, &sop->so_stateids,
st_perstateowner) {
- if (check_for_locks(stp->st_file, sop))
+ lo = lockowner(sop);
+ if (check_for_locks(stp->st_file, lo))
goto out;
- /* Note: so_perclient unused for lockowners,
- * so it's OK to fool with here. */
- list_add(&sop->so_perclient, &matches);
+ list_add(&lo->lo_list, &matches);
}
}
}
@@ -4374,12 +4289,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
* have been checked. */
status = nfs_ok;
while (!list_empty(&matches)) {
- sop = list_entry(matches.next, struct nfs4_stateowner,
- so_perclient);
+ lo = list_entry(matches.next, struct nfs4_lockowner,
+ lo_list);
/* unhash_stateowner deletes so_perclient only
* for openowners. */
- list_del(&sop->so_perclient);
- release_lockowner(sop);
+ list_del(&lo->lo_list);
+ release_lockowner(lo);
}
out:
nfs4_unlock_state();
@@ -4501,16 +4416,10 @@ nfs4_state_init(void)
for (i = 0; i < FILE_HASH_SIZE; i++) {
INIT_LIST_HEAD(&file_hashtbl[i]);
}
- for (i = 0; i < OWNER_HASH_SIZE; i++) {
- INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
- INIT_LIST_HEAD(&ownerid_hashtbl[i]);
- }
- for (i = 0; i < STATEID_HASH_SIZE; i++) {
- INIT_LIST_HEAD(&stateid_hashtbl[i]);
- INIT_LIST_HEAD(&lockstateid_hashtbl[i]);
+ for (i = 0; i < OPEN_OWNER_HASH_SIZE; i++) {
+ INIT_LIST_HEAD(&open_ownerstr_hashtbl[i]);
}
for (i = 0; i < LOCK_HASH_SIZE; i++) {
- INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]);
INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
}
memset(&onestateid, ~0, sizeof(stateid_t));
@@ -4527,7 +4436,7 @@ nfsd4_load_reboot_recovery_data(void)
int status;
nfs4_lock_state();
- nfsd4_init_recdir(user_recovery_dirname);
+ nfsd4_init_recdir();
status = nfsd4_recdir_load();
nfs4_unlock_state();
if (status)
@@ -4636,40 +4545,3 @@ nfs4_state_shutdown(void)
nfs4_unlock_state();
nfsd4_destroy_callback_queue();
}
-
-/*
- * user_recovery_dirname is protected by the nfsd_mutex since it's only
- * accessed when nfsd is starting.
- */
-static void
-nfs4_set_recdir(char *recdir)
-{
- strcpy(user_recovery_dirname, recdir);
-}
-
-/*
- * Change the NFSv4 recovery directory to recdir.
- */
-int
-nfs4_reset_recoverydir(char *recdir)
-{
- int status;
- struct path path;
-
- status = kern_path(recdir, LOOKUP_FOLLOW, &path);
- if (status)
- return status;
- status = -ENOTDIR;
- if (S_ISDIR(path.dentry->d_inode->i_mode)) {
- nfs4_set_recdir(recdir);
- status = 0;
- }
- path_put(&path);
- return status;
-}
-
-char *
-nfs4_recoverydir(void)
-{
- return user_recovery_dirname;
-}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c8bf405d19d..66d095d7955 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -456,7 +456,6 @@ nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
{
DECODE_HEAD;
- close->cl_stateowner = NULL;
READ_BUF(4);
READ32(close->cl_seqid);
return nfsd4_decode_stateid(argp, &close->cl_stateid);
@@ -551,7 +550,6 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
DECODE_HEAD;
- lock->lk_replay_owner = NULL;
/*
* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
*/
@@ -611,7 +609,6 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
{
DECODE_HEAD;
- locku->lu_stateowner = NULL;
READ_BUF(8);
READ32(locku->lu_type);
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
@@ -642,6 +639,83 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup
DECODE_TAIL;
}
+static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x)
+{
+ __be32 *p;
+ u32 w;
+
+ READ_BUF(4);
+ READ32(w);
+ *x = w;
+ switch (w & NFS4_SHARE_ACCESS_MASK) {
+ case NFS4_SHARE_ACCESS_READ:
+ case NFS4_SHARE_ACCESS_WRITE:
+ case NFS4_SHARE_ACCESS_BOTH:
+ break;
+ default:
+ return nfserr_bad_xdr;
+ }
+ w &= !NFS4_SHARE_ACCESS_MASK;
+ if (!w)
+ return nfs_ok;
+ if (!argp->minorversion)
+ return nfserr_bad_xdr;
+ switch (w & NFS4_SHARE_WANT_MASK) {
+ case NFS4_SHARE_WANT_NO_PREFERENCE:
+ case NFS4_SHARE_WANT_READ_DELEG:
+ case NFS4_SHARE_WANT_WRITE_DELEG:
+ case NFS4_SHARE_WANT_ANY_DELEG:
+ case NFS4_SHARE_WANT_NO_DELEG:
+ case NFS4_SHARE_WANT_CANCEL:
+ break;
+ default:
+ return nfserr_bad_xdr;
+ }
+ w &= ~NFS4_SHARE_WANT_MASK;
+ if (!w)
+ return nfs_ok;
+ switch (w) {
+ case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
+ case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
+ case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL |
+ NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED):
+ return nfs_ok;
+ }
+xdr_error:
+ return nfserr_bad_xdr;
+}
+
+static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x)
+{
+ __be32 *p;
+
+ READ_BUF(4);
+ READ32(*x);
+ /* Note: unlinke access bits, deny bits may be zero. */
+ if (*x & ~NFS4_SHARE_DENY_BOTH)
+ return nfserr_bad_xdr;
+ return nfs_ok;
+xdr_error:
+ return nfserr_bad_xdr;
+}
+
+static __be32 nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
+{
+ __be32 *p;
+
+ READ_BUF(4);
+ READ32(o->len);
+
+ if (o->len == 0 || o->len > NFS4_OPAQUE_LIMIT)
+ return nfserr_bad_xdr;
+
+ READ_BUF(o->len);
+ SAVEMEM(o->data, o->len);
+ return nfs_ok;
+xdr_error:
+ return nfserr_bad_xdr;
+}
+
static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{
@@ -649,19 +723,23 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
- open->op_stateowner = NULL;
+ open->op_openowner = NULL;
/* seqid, share_access, share_deny, clientid, ownerlen */
- READ_BUF(16 + sizeof(clientid_t));
+ READ_BUF(4);
READ32(open->op_seqid);
- READ32(open->op_share_access);
- READ32(open->op_share_deny);
+ status = nfsd4_decode_share_access(argp, &open->op_share_access);
+ if (status)
+ goto xdr_error;
+ status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
+ if (status)
+ goto xdr_error;
+ READ_BUF(sizeof(clientid_t));
COPYMEM(&open->op_clientid, sizeof(clientid_t));
- READ32(open->op_owner.len);
-
- /* owner, open_flag */
- READ_BUF(open->op_owner.len + 4);
- SAVEMEM(open->op_owner.data, open->op_owner.len);
+ status = nfsd4_decode_opaque(argp, &open->op_owner);
+ if (status)
+ goto xdr_error;
+ READ_BUF(4);
READ32(open->op_create);
switch (open->op_create) {
case NFS4_OPEN_NOCREATE:
@@ -727,6 +805,19 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval)))
return status;
break;
+ case NFS4_OPEN_CLAIM_FH:
+ case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
+ if (argp->minorversion < 1)
+ goto xdr_error;
+ /* void */
+ break;
+ case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
+ if (argp->minorversion < 1)
+ goto xdr_error;
+ status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid);
+ if (status)
+ return status;
+ break;
default:
goto xdr_error;
}
@@ -739,7 +830,6 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con
{
DECODE_HEAD;
- open_conf->oc_stateowner = NULL;
status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
if (status)
return status;
@@ -754,15 +844,17 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
{
DECODE_HEAD;
- open_down->od_stateowner = NULL;
status = nfsd4_decode_stateid(argp, &open_down->od_stateid);
if (status)
return status;
- READ_BUF(12);
+ READ_BUF(4);
READ32(open_down->od_seqid);
- READ32(open_down->od_share_access);
- READ32(open_down->od_share_deny);
-
+ status = nfsd4_decode_share_access(argp, &open_down->od_share_access);
+ if (status)
+ return status;
+ status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
+ if (status)
+ return status;
DECODE_TAIL;
}
@@ -903,12 +995,13 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
{
DECODE_HEAD;
- READ_BUF(12);
+ READ_BUF(8);
COPYMEM(setclientid->se_verf.data, 8);
- READ32(setclientid->se_namelen);
- READ_BUF(setclientid->se_namelen + 8);
- SAVEMEM(setclientid->se_name, setclientid->se_namelen);
+ status = nfsd4_decode_opaque(argp, &setclientid->se_name);
+ if (status)
+ return nfserr_bad_xdr;
+ READ_BUF(8);
READ32(setclientid->se_callback_prog);
READ32(setclientid->se_callback_netid_len);
@@ -1051,11 +1144,9 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(exid->verifier.data, NFS4_VERIFIER_SIZE);
- READ_BUF(4);
- READ32(exid->clname.len);
-
- READ_BUF(exid->clname.len);
- SAVEMEM(exid->clname.data, exid->clname.len);
+ status = nfsd4_decode_opaque(argp, &exid->clname);
+ if (status)
+ return nfserr_bad_xdr;
READ_BUF(4);
READ32(exid->flags);
@@ -1326,6 +1417,16 @@ xdr_error:
goto out;
}
+static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc)
+{
+ DECODE_HEAD;
+
+ READ_BUF(8);
+ COPYMEM(&dc->clientid, 8);
+
+ DECODE_TAIL;
+}
+
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
{
DECODE_HEAD;
@@ -1447,7 +1548,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid,
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
};
@@ -1630,15 +1731,20 @@ static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
* we know whether the error to be returned is a sequence id mutating error.
*/
-#define ENCODE_SEQID_OP_TAIL(stateowner) do { \
- if (seqid_mutating_err(nfserr) && stateowner) { \
- stateowner->so_seqid++; \
- stateowner->so_replay.rp_status = nfserr; \
- stateowner->so_replay.rp_buflen = \
- (((char *)(resp)->p - (char *)save)); \
- memcpy(stateowner->so_replay.rp_buf, save, \
- stateowner->so_replay.rp_buflen); \
- } } while (0);
+static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, __be32 nfserr)
+{
+ struct nfs4_stateowner *stateowner = resp->cstate.replay_owner;
+
+ if (seqid_mutating_err(ntohl(nfserr)) && stateowner) {
+ stateowner->so_seqid++;
+ stateowner->so_replay.rp_status = nfserr;
+ stateowner->so_replay.rp_buflen =
+ (char *)resp->p - (char *)save;
+ memcpy(stateowner->so_replay.rp_buf, save,
+ stateowner->so_replay.rp_buflen);
+ nfsd4_purge_closed_stateid(stateowner);
+ }
+}
/* Encode as an array of strings the string given with components
* separated @sep.
@@ -1697,36 +1803,89 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
}
/*
- * Return the path to an export point in the pseudo filesystem namespace
- * Returned string is safe to use as long as the caller holds a reference
- * to @exp.
+ * Encode a path in RFC3530 'pathname4' format
*/
-static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *stat)
+static __be32 nfsd4_encode_path(const struct path *root,
+ const struct path *path, __be32 **pp, int *buflen)
{
- struct svc_fh tmp_fh;
- char *path = NULL, *rootpath;
- size_t rootlen;
+ struct path cur = {
+ .mnt = path->mnt,
+ .dentry = path->dentry,
+ };
+ __be32 *p = *pp;
+ struct dentry **components = NULL;
+ unsigned int ncomponents = 0;
+ __be32 err = nfserr_jukebox;
- fh_init(&tmp_fh, NFS4_FHSIZE);
- *stat = exp_pseudoroot(rqstp, &tmp_fh);
- if (*stat)
- return NULL;
- rootpath = tmp_fh.fh_export->ex_pathname;
+ dprintk("nfsd4_encode_components(");
- path = exp->ex_pathname;
+ path_get(&cur);
+ /* First walk the path up to the nfsd root, and store the
+ * dentries/path components in an array.
+ */
+ for (;;) {
+ if (cur.dentry == root->dentry && cur.mnt == root->mnt)
+ break;
+ if (cur.dentry == cur.mnt->mnt_root) {
+ if (follow_up(&cur))
+ continue;
+ goto out_free;
+ }
+ if ((ncomponents & 15) == 0) {
+ struct dentry **new;
+ new = krealloc(components,
+ sizeof(*new) * (ncomponents + 16),
+ GFP_KERNEL);
+ if (!new)
+ goto out_free;
+ components = new;
+ }
+ components[ncomponents++] = cur.dentry;
+ cur.dentry = dget_parent(cur.dentry);
+ }
- rootlen = strlen(rootpath);
- if (strncmp(path, rootpath, rootlen)) {
- dprintk("nfsd: fs_locations failed;"
- "%s is not contained in %s\n", path, rootpath);
- *stat = nfserr_notsupp;
- path = NULL;
- goto out;
+ *buflen -= 4;
+ if (*buflen < 0)
+ goto out_free;
+ WRITE32(ncomponents);
+
+ while (ncomponents) {
+ struct dentry *dentry = components[ncomponents - 1];
+ unsigned int len = dentry->d_name.len;
+
+ *buflen -= 4 + (XDR_QUADLEN(len) << 2);
+ if (*buflen < 0)
+ goto out_free;
+ WRITE32(len);
+ WRITEMEM(dentry->d_name.name, len);
+ dprintk("/%s", dentry->d_name.name);
+ dput(dentry);
+ ncomponents--;
}
- path += rootlen;
-out:
- fh_put(&tmp_fh);
- return path;
+
+ *pp = p;
+ err = 0;
+out_free:
+ dprintk(")\n");
+ while (ncomponents)
+ dput(components[--ncomponents]);
+ kfree(components);
+ path_put(&cur);
+ return err;
+}
+
+static __be32 nfsd4_encode_fsloc_fsroot(struct svc_rqst *rqstp,
+ const struct path *path, __be32 **pp, int *buflen)
+{
+ struct svc_export *exp_ps;
+ __be32 res;
+
+ exp_ps = rqst_find_fsidzero_export(rqstp);
+ if (IS_ERR(exp_ps))
+ return nfserrno(PTR_ERR(exp_ps));
+ res = nfsd4_encode_path(&exp_ps->ex_path, path, pp, buflen);
+ exp_put(exp_ps);
+ return res;
}
/*
@@ -1740,11 +1899,8 @@ static __be32 nfsd4_encode_fs_locations(struct svc_rqst *rqstp,
int i;
__be32 *p = *pp;
struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
- char *root = nfsd4_path(rqstp, exp, &status);
- if (status)
- return status;
- status = nfsd4_encode_components('/', root, &p, buflen);
+ status = nfsd4_encode_fsloc_fsroot(rqstp, &exp->ex_path, &p, buflen);
if (status)
return status;
if ((*buflen -= 4) < 0)
@@ -1760,12 +1916,19 @@ static __be32 nfsd4_encode_fs_locations(struct svc_rqst *rqstp,
return 0;
}
-static u32 nfs4_ftypes[16] = {
- NF4BAD, NF4FIFO, NF4CHR, NF4BAD,
- NF4DIR, NF4BAD, NF4BLK, NF4BAD,
- NF4REG, NF4BAD, NF4LNK, NF4BAD,
- NF4SOCK, NF4BAD, NF4LNK, NF4BAD,
-};
+static u32 nfs4_file_type(umode_t mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFIFO: return NF4FIFO;
+ case S_IFCHR: return NF4CHR;
+ case S_IFDIR: return NF4DIR;
+ case S_IFBLK: return NF4BLK;
+ case S_IFLNK: return NF4LNK;
+ case S_IFREG: return NF4REG;
+ case S_IFSOCK: return NF4SOCK;
+ default: return NF4BAD;
+ };
+}
static __be32
nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
@@ -1954,7 +2117,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if (bmval0 & FATTR4_WORD0_TYPE) {
if ((buflen -= 4) < 0)
goto out_resource;
- dummy = nfs4_ftypes[(stat.mode & S_IFMT) >> 12];
+ dummy = nfs4_file_type(stat.mode);
if (dummy == NF4BAD)
goto out_serverfault;
WRITE32(dummy);
@@ -2488,7 +2651,7 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c
if (!nfserr)
nfsd4_encode_stateid(resp, &close->cl_stateid);
- ENCODE_SEQID_OP_TAIL(close->cl_stateowner);
+ encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
@@ -2564,17 +2727,18 @@ nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh
static void
nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
{
+ struct xdr_netobj *conf = &ld->ld_owner;
__be32 *p;
- RESERVE_SPACE(32 + XDR_LEN(ld->ld_sop ? ld->ld_sop->so_owner.len : 0));
+ RESERVE_SPACE(32 + XDR_LEN(conf->len));
WRITE64(ld->ld_start);
WRITE64(ld->ld_length);
WRITE32(ld->ld_type);
- if (ld->ld_sop) {
+ if (conf->len) {
WRITEMEM(&ld->ld_clientid, 8);
- WRITE32(ld->ld_sop->so_owner.len);
- WRITEMEM(ld->ld_sop->so_owner.data, ld->ld_sop->so_owner.len);
- kref_put(&ld->ld_sop->so_ref, nfs4_free_stateowner);
+ WRITE32(conf->len);
+ WRITEMEM(conf->data, conf->len);
+ kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
WRITE64((u64)0); /* clientid */
WRITE32(0); /* length of owner name */
@@ -2592,7 +2756,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
else if (nfserr == nfserr_denied)
nfsd4_encode_lock_denied(resp, &lock->lk_denied);
- ENCODE_SEQID_OP_TAIL(lock->lk_replay_owner);
+ encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
@@ -2612,7 +2776,7 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
if (!nfserr)
nfsd4_encode_stateid(resp, &locku->lu_stateid);
- ENCODE_SEQID_OP_TAIL(locku->lu_stateowner);
+ encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
@@ -2693,7 +2857,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
}
/* XXX save filehandle here */
out:
- ENCODE_SEQID_OP_TAIL(open->op_stateowner);
+ encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
@@ -2705,7 +2869,7 @@ nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct
if (!nfserr)
nfsd4_encode_stateid(resp, &oc->oc_resp_stateid);
- ENCODE_SEQID_OP_TAIL(oc->oc_stateowner);
+ encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
@@ -2717,7 +2881,7 @@ nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struc
if (!nfserr)
nfsd4_encode_stateid(resp, &od->od_stateid);
- ENCODE_SEQID_OP_TAIL(od->od_stateowner);
+ encode_seqid_op_tail(resp, save, nfserr);
return nfserr;
}
@@ -2759,8 +2923,6 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen,
&maxcount);
- if (nfserr == nfserr_symlink)
- nfserr = nfserr_inval;
if (nfserr)
return nfserr;
eof = (read->rd_offset + maxcount >=
@@ -2886,8 +3048,6 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
readdir->common.err == nfserr_toosmall &&
readdir->buffer == page)
nfserr = nfserr_toosmall;
- if (nfserr == nfserr_symlink)
- nfserr = nfserr_notdir;
if (nfserr)
goto err_no_verf;
@@ -3218,9 +3378,9 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
WRITEMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN);
WRITE32(seq->seqid);
WRITE32(seq->slotid);
- WRITE32(seq->maxslots);
- /* For now: target_maxslots = maxslots */
- WRITE32(seq->maxslots);
+ /* Note slotid's are numbered from zero: */
+ WRITE32(seq->maxslots - 1); /* sr_highest_slotid */
+ WRITE32(seq->maxslots - 1); /* sr_target_highest_slotid */
WRITE32(seq->status_flags);
ADJUST_ARGS();
@@ -3233,6 +3393,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_test_stateid *test_stateid)
{
struct nfsd4_compoundargs *argp;
+ struct nfs4_client *cl = resp->cstate.session->se_client;
stateid_t si;
__be32 *p;
int i;
@@ -3248,7 +3409,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
nfs4_lock_state();
for (i = 0; i < test_stateid->ts_num_ids; i++) {
nfsd4_decode_stateid(argp, &si);
- valid = nfs4_validate_stateid(&si, test_stateid->ts_has_session);
+ valid = nfs4_validate_stateid(cl, &si);
RESERVE_SPACE(4);
*p++ = htonl(valid);
resp->p = p;
@@ -3334,34 +3495,29 @@ static nfsd4_enc nfsd4_enc_ops[] = {
/*
* Calculate the total amount of memory that the compound response has taken
- * after encoding the current operation.
+ * after encoding the current operation with pad.
*
- * pad: add on 8 bytes for the next operation's op_code and status so that
- * there is room to cache a failure on the next operation.
+ * pad: if operation is non-idempotent, pad was calculate by op_rsize_bop()
+ * which was specified at nfsd4_operation, else pad is zero.
*
- * Compare this length to the session se_fmaxresp_cached.
+ * Compare this length to the session se_fmaxresp_sz and se_fmaxresp_cached.
*
* Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
* will be at least a page and will therefore hold the xdr_buf head.
*/
-static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp)
+int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
{
- int status = 0;
struct xdr_buf *xb = &resp->rqstp->rq_res;
- struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
struct nfsd4_session *session = NULL;
struct nfsd4_slot *slot = resp->cstate.slot;
- u32 length, tlen = 0, pad = 8;
+ u32 length, tlen = 0;
if (!nfsd4_has_session(&resp->cstate))
- return status;
+ return 0;
session = resp->cstate.session;
- if (session == NULL || slot->sl_cachethis == 0)
- return status;
-
- if (resp->opcnt >= args->opcnt)
- pad = 0; /* this is the last operation */
+ if (session == NULL)
+ return 0;
if (xb->page_len == 0) {
length = (char *)resp->p - (char *)xb->head[0].iov_base + pad;
@@ -3374,10 +3530,14 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp)
dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__,
length, xb->page_len, tlen, pad);
- if (length <= session->se_fchannel.maxresp_cached)
- return status;
- else
+ if (length > session->se_fchannel.maxresp_sz)
+ return nfserr_rep_too_big;
+
+ if (slot->sl_cachethis == 1 &&
+ length > session->se_fchannel.maxresp_cached)
return nfserr_rep_too_big_to_cache;
+
+ return 0;
}
void
@@ -3397,8 +3557,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
!nfsd4_enc_ops[op->opnum]);
op->status = nfsd4_enc_ops[op->opnum](resp, op->status, &op->u);
/* nfsd4_check_drc_limit guarantees enough room for error status */
- if (!op->status && nfsd4_check_drc_limit(resp))
- op->status = nfserr_rep_too_big_to_cache;
+ if (!op->status)
+ op->status = nfsd4_check_resp_size(resp, 0);
status:
/*
* Note: We write the status directly, instead of using WRITE32(),
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index c7716143cbd..db34a585e11 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -9,7 +9,6 @@
#include <linux/ctype.h>
#include <linux/sunrpc/svcsock.h>
-#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 7ecfa242030..58134a23fdf 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -11,13 +11,39 @@
#include <linux/types.h>
#include <linux/mount.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/sunrpc/msg_prot.h>
+
#include <linux/nfsd/debug.h>
#include <linux/nfsd/export.h>
#include <linux/nfsd/stats.h>
+
/*
* nfsd version
*/
#define NFSD_SUPPORTED_MINOR_VERSION 1
+/*
+ * Maximum blocksizes supported by daemon under various circumstances.
+ */
+#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
+/* NFSv2 is limited by the protocol specification, see RFC 1094 */
+#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
+
+
+/*
+ * Largest number of bytes we need to allocate for an NFS
+ * call or reply. Used to control buffer sizes. We use
+ * the length of v3 WRITE, READDIR and READDIR replies
+ * which are an RPC header, up to 26 XDR units of reply
+ * data, and some page data.
+ *
+ * Note that accuracy here doesn't matter too much as the
+ * size is rounded up to a page size when allocating space.
+ */
+#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
@@ -335,6 +361,13 @@ static inline u32 nfsd_suppattrs2(u32 minorversion)
#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
NFSD_WRITEABLE_ATTRS_WORD2
+extern int nfsd4_is_junction(struct dentry *dentry);
+#else
+static inline int nfsd4_is_junction(struct dentry *dentry)
+{
+ return 0;
+}
+
#endif /* CONFIG_NFSD_V4 */
#endif /* LINUX_NFSD_NFSD_H */
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 90c6aa6d5e0..c763de5c115 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -59,28 +59,25 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int type)
+nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int requested)
{
- /* Type can be negative when creating hardlinks - not to a dir */
- if (type > 0 && (mode & S_IFMT) != type) {
- if (rqstp->rq_vers == 4 && (mode & S_IFMT) == S_IFLNK)
- return nfserr_symlink;
- else if (type == S_IFDIR)
- return nfserr_notdir;
- else if ((mode & S_IFMT) == S_IFDIR)
- return nfserr_isdir;
- else
- return nfserr_inval;
- }
- if (type < 0 && (mode & S_IFMT) == -type) {
- if (rqstp->rq_vers == 4 && (mode & S_IFMT) == S_IFLNK)
- return nfserr_symlink;
- else if (type == -S_IFDIR)
- return nfserr_isdir;
- else
- return nfserr_notdir;
- }
- return 0;
+ mode &= S_IFMT;
+
+ if (requested == 0) /* the caller doesn't care */
+ return nfs_ok;
+ if (mode == requested)
+ return nfs_ok;
+ /*
+ * v4 has an error more specific than err_notdir which we should
+ * return in preference to err_notdir:
+ */
+ if (rqstp->rq_vers == 4 && mode == S_IFLNK)
+ return nfserr_symlink;
+ if (requested == S_IFDIR)
+ return nfserr_notdir;
+ if (mode == S_IFDIR)
+ return nfserr_isdir;
+ return nfserr_inval;
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 4eefaf1b42e..a3cf38476a1 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
+#include <linux/idr.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/nfsd/nfsfh.h>
#include "nfsfh.h"
@@ -45,24 +46,20 @@ typedef struct {
} clientid_t;
typedef struct {
- u32 so_boot;
- u32 so_stateownerid;
- u32 so_fileid;
+ clientid_t so_clid;
+ u32 so_id;
} stateid_opaque_t;
typedef struct {
u32 si_generation;
stateid_opaque_t si_opaque;
} stateid_t;
-#define si_boot si_opaque.so_boot
-#define si_stateownerid si_opaque.so_stateownerid
-#define si_fileid si_opaque.so_fileid
#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
#define STATEID_VAL(s) \
- (s)->si_boot, \
- (s)->si_stateownerid, \
- (s)->si_fileid, \
+ (s)->si_opaque.so_clid.cl_boot, \
+ (s)->si_opaque.so_clid.cl_id, \
+ (s)->si_opaque.so_id, \
(s)->si_generation
struct nfsd4_callback {
@@ -76,17 +73,27 @@ struct nfsd4_callback {
bool cb_done;
};
+struct nfs4_stid {
+#define NFS4_OPEN_STID 1
+#define NFS4_LOCK_STID 2
+#define NFS4_DELEG_STID 4
+/* For an open stateid kept around *only* to process close replays: */
+#define NFS4_CLOSED_STID 8
+ unsigned char sc_type;
+ stateid_t sc_stateid;
+ struct nfs4_client *sc_client;
+};
+
struct nfs4_delegation {
+ struct nfs4_stid dl_stid; /* must be first field */
struct list_head dl_perfile;
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
atomic_t dl_count; /* ref count */
- struct nfs4_client *dl_client;
struct nfs4_file *dl_file;
u32 dl_type;
time_t dl_time;
/* For recall: */
- stateid_t dl_stateid;
struct knfsd_fh dl_fh;
int dl_retries;
struct nfsd4_callback dl_recall;
@@ -104,6 +111,11 @@ struct nfs4_cb_conn {
struct svc_xprt *cb_xprt; /* minorversion 1 only */
};
+static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
+{
+ return container_of(s, struct nfs4_delegation, dl_stid);
+}
+
/* Maximum number of slots per session. 160 is useful for long haul TCP */
#define NFSD_MAX_SLOTS_PER_SESSION 160
/* Maximum number of operations per session compound */
@@ -220,6 +232,7 @@ struct nfs4_client {
struct list_head cl_idhash; /* hash by cl_clientid.id */
struct list_head cl_strhash; /* hash by cl_name */
struct list_head cl_openowners;
+ struct idr cl_stateids; /* stateid lookup */
struct list_head cl_delegations;
struct list_head cl_lru; /* tail queue */
struct xdr_netobj cl_name; /* id generated by client */
@@ -245,6 +258,7 @@ struct nfs4_client {
#define NFSD4_CB_UP 0
#define NFSD4_CB_UNKNOWN 1
#define NFSD4_CB_DOWN 2
+#define NFSD4_CB_FAULT 3
int cl_cb_state;
struct nfsd4_callback cl_cb_null;
struct nfsd4_session *cl_cb_session;
@@ -293,6 +307,9 @@ static inline void
update_stateid(stateid_t *stateid)
{
stateid->si_generation++;
+ /* Wraparound recommendation from 3530bis-13 9.1.3.2: */
+ if (stateid->si_generation == 0)
+ stateid->si_generation = 1;
}
/* A reasonable value for REPLAY_ISIZE was estimated as follows:
@@ -312,49 +329,57 @@ struct nfs4_replay {
__be32 rp_status;
unsigned int rp_buflen;
char *rp_buf;
- unsigned intrp_allocated;
struct knfsd_fh rp_openfh;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
-/*
-* nfs4_stateowner can either be an open_owner, or a lock_owner
-*
-* so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[]
-* for lock_owner
-* so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[]
-* for lock_owner
-* so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client
-* struct is reaped.
-* so_perfilestate: heads the list of nfs4_stateid (either open or lock)
-* and is used to ensure no dangling nfs4_stateid references when we
-* release a stateowner.
-* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when
-* close is called to reap associated byte-range locks
-* so_close_lru: (open) stateowner is placed on this list instead of being
-* reaped (when so_perfilestate is empty) to hold the last close replay.
-* reaped by laundramat thread after lease period.
-*/
struct nfs4_stateowner {
- struct kref so_ref;
- struct list_head so_idhash; /* hash by so_id */
struct list_head so_strhash; /* hash by op_name */
- struct list_head so_perclient;
struct list_head so_stateids;
- struct list_head so_perstateid; /* for lockowners only */
- struct list_head so_close_lru; /* tail queue */
- time_t so_time; /* time of placement on so_close_lru */
- int so_is_open_owner; /* 1=openowner,0=lockowner */
- u32 so_id;
struct nfs4_client * so_client;
/* after increment in ENCODE_SEQID_OP_TAIL, represents the next
* sequence id expected from the client: */
u32 so_seqid;
struct xdr_netobj so_owner; /* open owner name */
- int so_confirmed; /* successful OPEN_CONFIRM? */
struct nfs4_replay so_replay;
+ bool so_is_open_owner;
};
+struct nfs4_openowner {
+ struct nfs4_stateowner oo_owner; /* must be first field */
+ struct list_head oo_perclient;
+ /*
+ * We keep around openowners a little while after last close,
+ * which saves clients from having to confirm, and allows us to
+ * handle close replays if they come soon enough. The close_lru
+ * is a list of such openowners, to be reaped by the laundromat
+ * thread eventually if they remain unused:
+ */
+ struct list_head oo_close_lru;
+ struct nfs4_ol_stateid *oo_last_closed_stid;
+ time_t oo_time; /* time of placement on so_close_lru */
+#define NFS4_OO_CONFIRMED 1
+#define NFS4_OO_PURGE_CLOSE 2
+#define NFS4_OO_NEW 4
+ unsigned char oo_flags;
+};
+
+struct nfs4_lockowner {
+ struct nfs4_stateowner lo_owner; /* must be first element */
+ struct list_head lo_perstateid; /* for lockowners only */
+ struct list_head lo_list; /* for temporary uses */
+};
+
+static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so)
+{
+ return container_of(so, struct nfs4_openowner, oo_owner);
+}
+
+static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
+{
+ return container_of(so, struct nfs4_lockowner, lo_owner);
+}
+
/*
* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
* o fi_perfile list is used to search for conflicting
@@ -368,17 +393,17 @@ struct nfs4_file {
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
- * Each open or lock stateid contributes 1 to either
- * fi_access[O_RDONLY], fi_access[O_WRONLY], or both, depending
- * on open or lock mode:
+ * Each open or lock stateid contributes 0-4 to the counts
+ * below depending on which bits are set in st_access_bitmap:
+ * 1 to fi_access[O_RDONLY] if NFS4_SHARE_ACCES_READ is set
+ * + 1 to fi_access[O_WRONLY] if NFS4_SHARE_ACCESS_WRITE is set
+ * + 1 to both of the above if NFS4_SHARE_ACCESS_BOTH is set.
*/
atomic_t fi_access[2];
struct file *fi_deleg_file;
struct file_lock *fi_lease;
atomic_t fi_delegees;
struct inode *fi_inode;
- u32 fi_id; /* used with stateowner->so_id
- * for stateid_hashtbl hash */
bool fi_had_conflict;
};
@@ -408,50 +433,27 @@ static inline struct file *find_any_file(struct nfs4_file *f)
return f->fi_fds[O_RDONLY];
}
-/*
-* nfs4_stateid can either be an open stateid or (eventually) a lock stateid
-*
-* (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file
-*
-* st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry
-* st_perfile: file_hashtbl[] entry.
-* st_perfile_state: nfs4_stateowner->so_perfilestate
-* st_perlockowner: (open stateid) list of lock nfs4_stateowners
-* st_access_bmap: used only for open stateid
-* st_deny_bmap: used only for open stateid
-* st_openstp: open stateid lock stateid was derived from
-*
-* XXX: open stateids and lock stateids have diverged sufficiently that
-* we should consider defining separate structs for the two cases.
-*/
-
-struct nfs4_stateid {
- struct list_head st_hash;
+/* "ol" stands for "Open or Lock". Better suggestions welcome. */
+struct nfs4_ol_stateid {
+ struct nfs4_stid st_stid; /* must be first field */
struct list_head st_perfile;
struct list_head st_perstateowner;
struct list_head st_lockowners;
struct nfs4_stateowner * st_stateowner;
struct nfs4_file * st_file;
- stateid_t st_stateid;
unsigned long st_access_bmap;
unsigned long st_deny_bmap;
- struct nfs4_stateid * st_openstp;
+ struct nfs4_ol_stateid * st_openstp;
};
+static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
+{
+ return container_of(s, struct nfs4_ol_stateid, st_stid);
+}
+
/* flags for preprocess_seqid_op() */
-#define HAS_SESSION 0x00000001
-#define CONFIRM 0x00000002
-#define OPEN_STATE 0x00000004
-#define LOCK_STATE 0x00000008
#define RD_STATE 0x00000010
#define WR_STATE 0x00000020
-#define CLOSE_STATE 0x00000040
-
-#define seqid_mutating_err(err) \
- (((err) != nfserr_stale_clientid) && \
- ((err) != nfserr_bad_seqid) && \
- ((err) != nfserr_stale_stateid) && \
- ((err) != nfserr_bad_stateid))
struct nfsd4_compound_state;
@@ -461,7 +463,8 @@ extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
-extern void nfs4_free_stateowner(struct kref *kref);
+extern void nfs4_free_openowner(struct nfs4_openowner *);
+extern void nfs4_free_lockowner(struct nfs4_lockowner *);
extern int set_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
@@ -473,7 +476,7 @@ extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
-extern void nfsd4_init_recdir(char *recdir_name);
+extern void nfsd4_init_recdir(void);
extern int nfsd4_recdir_load(void);
extern void nfsd4_shutdown_recdir(void);
extern int nfs4_client_to_reclaim(const char *name);
@@ -482,18 +485,7 @@ extern void nfsd4_recdir_purge_old(void);
extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
extern void release_session_client(struct nfsd4_session *);
-extern __be32 nfs4_validate_stateid(stateid_t *, int);
-
-static inline void
-nfs4_put_stateowner(struct nfs4_stateowner *so)
-{
- kref_put(&so->so_ref, nfs4_free_stateowner);
-}
-
-static inline void
-nfs4_get_stateowner(struct nfs4_stateowner *so)
-{
- kref_get(&so->so_ref);
-}
+extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *);
+extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index fd0acca5370..7a2e442623c 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -168,6 +168,8 @@ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
{
if (d_mountpoint(dentry))
return 1;
+ if (nfsd4_is_junction(dentry))
+ return 1;
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return 0;
return dentry->d_inode != NULL;
@@ -502,7 +504,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int flags = 0;
/* Get inode */
- error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, NFSD_MAY_SATTR);
+ error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR);
if (error)
return error;
@@ -592,6 +594,22 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
return error;
}
+#define NFSD_XATTR_JUNCTION_PREFIX XATTR_TRUSTED_PREFIX "junction."
+#define NFSD_XATTR_JUNCTION_TYPE NFSD_XATTR_JUNCTION_PREFIX "type"
+int nfsd4_is_junction(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (inode == NULL)
+ return 0;
+ if (inode->i_mode & S_IXUGO)
+ return 0;
+ if (!(inode->i_mode & S_ISVTX))
+ return 0;
+ if (vfs_getxattr(dentry, NFSD_XATTR_JUNCTION_TYPE, NULL, 0) <= 0)
+ return 0;
+ return 1;
+}
#endif /* defined(CONFIG_NFSD_V4) */
#ifdef CONFIG_NFSD_V3
@@ -1352,7 +1370,7 @@ __be32
do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
struct svc_fh *resfhp, int createmode, u32 *verifier,
- int *truncp, int *created)
+ bool *truncp, bool *created)
{
struct dentry *dentry, *dchild = NULL;
struct inode *dirp;
@@ -1632,10 +1650,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
- err = fh_verify(rqstp, tfhp, -S_IFDIR, NFSD_MAY_NOP);
+ err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP);
if (err)
goto out;
-
+ err = nfserr_isdir;
+ if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode))
+ goto out;
err = nfserr_perm;
if (!len)
goto out;
@@ -2114,7 +2134,8 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
/* Allow read access to binaries even when mode 111 */
if (err == -EACCES && S_ISREG(inode->i_mode) &&
- acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE))
+ (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
+ acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
err = inode_permission(inode, MAY_EXEC);
return err? nfserrno(err) : 0;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index e0bbac04d1d..3f54ad03bb2 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -10,21 +10,22 @@
/*
* Flags for nfsd_permission
*/
-#define NFSD_MAY_NOP 0
-#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
-#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
-#define NFSD_MAY_READ 4 /* == MAY_READ */
-#define NFSD_MAY_SATTR 8
-#define NFSD_MAY_TRUNC 16
-#define NFSD_MAY_LOCK 32
-#define NFSD_MAY_MASK 63
+#define NFSD_MAY_NOP 0
+#define NFSD_MAY_EXEC 0x001 /* == MAY_EXEC */
+#define NFSD_MAY_WRITE 0x002 /* == MAY_WRITE */
+#define NFSD_MAY_READ 0x004 /* == MAY_READ */
+#define NFSD_MAY_SATTR 0x008
+#define NFSD_MAY_TRUNC 0x010
+#define NFSD_MAY_LOCK 0x020
+#define NFSD_MAY_MASK 0x03f
/* extra hints to permission and open routines: */
-#define NFSD_MAY_OWNER_OVERRIDE 64
-#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
-#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
-#define NFSD_MAY_NOT_BREAK_LEASE 512
-#define NFSD_MAY_BYPASS_GSS 1024
+#define NFSD_MAY_OWNER_OVERRIDE 0x040
+#define NFSD_MAY_LOCAL_ACCESS 0x080 /* for device special files */
+#define NFSD_MAY_BYPASS_GSS_ON_ROOT 0x100
+#define NFSD_MAY_NOT_BREAK_LEASE 0x200
+#define NFSD_MAY_BYPASS_GSS 0x400
+#define NFSD_MAY_READ_IF_EXEC 0x800
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
@@ -61,7 +62,7 @@ __be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
__be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
struct svc_fh *res, int createmode,
- u32 *verifier, int *truncp, int *created);
+ u32 *verifier, bool *truncp, bool *created);
__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
loff_t, unsigned long);
#endif /* CONFIG_NFSD_V3 */
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index d2a8d04428c..2364747ee97 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -81,7 +81,6 @@ struct nfsd4_access {
struct nfsd4_close {
u32 cl_seqid; /* request */
stateid_t cl_stateid; /* request+response */
- struct nfs4_stateowner * cl_stateowner; /* response */
};
struct nfsd4_commit {
@@ -131,7 +130,7 @@ struct nfsd4_link {
struct nfsd4_lock_denied {
clientid_t ld_clientid;
- struct nfs4_stateowner *ld_sop;
+ struct xdr_netobj ld_owner;
u64 ld_start;
u64 ld_length;
u32 ld_type;
@@ -165,9 +164,6 @@ struct nfsd4_lock {
} ok;
struct nfsd4_lock_denied denied;
} u;
- /* The lk_replay_owner is the open owner in the open_to_lock_owner
- * case and the lock owner otherwise: */
- struct nfs4_stateowner *lk_replay_owner;
};
#define lk_new_open_seqid v.new.open_seqid
#define lk_new_open_stateid v.new.open_stateid
@@ -188,7 +184,6 @@ struct nfsd4_lockt {
struct xdr_netobj lt_owner;
u64 lt_offset;
u64 lt_length;
- struct nfs4_stateowner * lt_stateowner;
struct nfsd4_lock_denied lt_denied;
};
@@ -199,7 +194,6 @@ struct nfsd4_locku {
stateid_t lu_stateid;
u64 lu_offset;
u64 lu_length;
- struct nfs4_stateowner *lu_stateowner;
};
@@ -232,8 +226,11 @@ struct nfsd4_open {
u32 op_recall; /* recall */
struct nfsd4_change_info op_cinfo; /* response */
u32 op_rflags; /* response */
- int op_truncate; /* used during processing */
- struct nfs4_stateowner *op_stateowner; /* used during processing */
+ bool op_truncate; /* used during processing */
+ bool op_created; /* used during processing */
+ struct nfs4_openowner *op_openowner; /* used during processing */
+ struct nfs4_file *op_file; /* used during processing */
+ struct nfs4_ol_stateid *op_stp; /* used during processing */
struct nfs4_acl *op_acl;
};
#define op_iattr iattr
@@ -243,7 +240,6 @@ struct nfsd4_open_confirm {
stateid_t oc_req_stateid /* request */;
u32 oc_seqid /* request */;
stateid_t oc_resp_stateid /* response */;
- struct nfs4_stateowner * oc_stateowner; /* response */
};
struct nfsd4_open_downgrade {
@@ -251,7 +247,6 @@ struct nfsd4_open_downgrade {
u32 od_seqid;
u32 od_share_access;
u32 od_share_deny;
- struct nfs4_stateowner *od_stateowner;
};
@@ -325,8 +320,7 @@ struct nfsd4_setattr {
struct nfsd4_setclientid {
nfs4_verifier se_verf; /* request */
- u32 se_namelen; /* request */
- char * se_name; /* request */
+ struct xdr_netobj se_name;
u32 se_callback_prog; /* request */
u32 se_callback_netid_len; /* request */
char * se_callback_netid_val; /* request */
@@ -351,7 +345,6 @@ struct nfsd4_saved_compoundargs {
struct nfsd4_test_stateid {
__be32 ts_num_ids;
- __be32 ts_has_session;
struct nfsd4_compoundargs *ts_saved_args;
struct nfsd4_saved_compoundargs ts_savedp;
};
@@ -405,6 +398,10 @@ struct nfsd4_destroy_session {
struct nfs4_sessionid sessionid;
};
+struct nfsd4_destroy_clientid {
+ clientid_t clientid;
+};
+
struct nfsd4_reclaim_complete {
u32 rca_one_fs;
};
@@ -532,6 +529,7 @@ int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
struct nfsd4_compoundargs *);
int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
struct nfsd4_compoundres *);
+int nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
@@ -558,11 +556,13 @@ extern __be32 nfsd4_sequence(struct svc_rqst *,
extern __be32 nfsd4_destroy_session(struct svc_rqst *,
struct nfsd4_compound_state *,
struct nfsd4_destroy_session *);
+extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_clientid *);
__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *);
extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
struct nfsd4_open *open);
extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
struct svc_fh *current_fh, struct nfsd4_open *open);
+extern void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status);
extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
extern __be32 nfsd4_close(struct svc_rqst *rqstp,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 81ecf9c0bf0..194fb22ef79 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7185,20 +7185,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
{
int ret = 0;
struct buffer_head *dir_bh = NULL;
- struct ocfs2_security_xattr_info si = {
- .enable = 1,
- };
- ret = ocfs2_init_security_get(inode, dir, qstr, &si);
+ ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (!ret) {
- ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
- si.name, si.value, si.value_len,
- XATTR_CREATE);
- if (ret) {
- mlog_errno(ret);
- goto leave;
- }
- } else if (ret != -EOPNOTSUPP) {
mlog_errno(ret);
goto leave;
}
@@ -7255,6 +7244,22 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
+int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, XATTR_CREATE);
+ if (err)
+ break;
+ }
+ return err;
+}
+
int ocfs2_init_security_get(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
@@ -7263,8 +7268,13 @@ int ocfs2_init_security_get(struct inode *inode,
/* check whether ocfs2 support feature xattr */
if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
return -EOPNOTSUPP;
- return security_inode_init_security(inode, dir, qstr, &si->name,
- &si->value, &si->value_len);
+ if (si)
+ return security_old_inode_init_security(inode, dir, qstr,
+ &si->name, &si->value,
+ &si->value_len);
+
+ return security_inode_init_security(inode, dir, qstr,
+ &ocfs2_initxattrs, NULL);
}
int ocfs2_init_security_set(handle_t *handle,
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index a159ba5a35e..eb711060a6f 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -291,14 +291,13 @@ int reiserfs_allocate_list_bitmaps(struct super_block *sb,
for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
jb = jb_array + i;
jb->journal_list = NULL;
- jb->bitmaps = vmalloc(mem);
+ jb->bitmaps = vzalloc(mem);
if (!jb->bitmaps) {
reiserfs_warning(sb, "clm-2000", "unable to "
"allocate bitmaps for journal lists");
failed = 1;
break;
}
- memset(jb->bitmaps, 0, mem);
}
if (failed) {
free_list_bitmaps(sb, jb_array);
@@ -353,11 +352,10 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
if (num_cnodes <= 0) {
return NULL;
}
- head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
+ head = vzalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
if (!head) {
return NULL;
}
- memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
head[0].prev = NULL;
head[0].next = head + 1;
for (i = 1; i < num_cnodes; i++) {
@@ -2685,14 +2683,13 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
* dependency inversion warnings.
*/
reiserfs_write_unlock(sb);
- journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal));
+ journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal));
if (!journal) {
reiserfs_warning(sb, "journal-1256",
"unable to get memory for journal structure");
reiserfs_write_lock(sb);
return 1;
}
- memset(journal, 0, sizeof(struct reiserfs_journal));
INIT_LIST_HEAD(&journal->j_bitmap_nodes);
INIT_LIST_HEAD(&journal->j_prealloc_list);
INIT_LIST_HEAD(&journal->j_working_list);
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index b6b9b1fe33b..7483279b482 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -111,15 +111,13 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
/* allocate additional bitmap blocks, reallocate array of bitmap
* block pointers */
bitmap =
- vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
+ vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
if (!bitmap) {
/* Journal bitmaps are still supersized, but the memory isn't
* leaked, so I guess it's ok */
printk("reiserfs_resize: unable to allocate memory.\n");
return -ENOMEM;
}
- memset(bitmap, 0,
- sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
for (i = 0; i < bmap_nr; i++)
bitmap[i] = old_bitmap[i];
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index ef66c18a933..534668fa41b 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -66,8 +66,8 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
if (IS_PRIVATE(dir))
return 0;
- error = security_inode_init_security(inode, dir, qstr, &sec->name,
- &sec->value, &sec->length);
+ error = security_old_inode_init_security(inode, dir, qstr, &sec->name,
+ &sec->value, &sec->length);
if (error) {
if (error == -EOPNOTSUPP)
error = 0;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index 1360d4f88f4..048b59d5b2f 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -19,9 +19,9 @@ config SQUASHFS
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/modules.txt>. The module
- will be called squashfs. Note that the root file system (the one
- containing the directory /) cannot be compiled as a module.
+ say M here. The module will be called squashfs. Note that the root
+ file system (the one containing the directory /) cannot be compiled
+ as a module.
If unsure, say N.
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index ea9120a830d..48ffbdf0d01 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -43,20 +43,48 @@ static DEFINE_IDA(sysfs_ino_ida);
static void sysfs_link_sibling(struct sysfs_dirent *sd)
{
struct sysfs_dirent *parent_sd = sd->s_parent;
- struct sysfs_dirent **pos;
- BUG_ON(sd->s_sibling);
-
- /* Store directory entries in order by ino. This allows
- * readdir to properly restart without having to add a
- * cursor into the s_dir.children list.
- */
- for (pos = &parent_sd->s_dir.children; *pos; pos = &(*pos)->s_sibling) {
- if (sd->s_ino < (*pos)->s_ino)
- break;
+ struct rb_node **p;
+ struct rb_node *parent;
+
+ if (sysfs_type(sd) == SYSFS_DIR)
+ parent_sd->s_dir.subdirs++;
+
+ p = &parent_sd->s_dir.inode_tree.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+#define node rb_entry(parent, struct sysfs_dirent, inode_node)
+ if (sd->s_ino < node->s_ino) {
+ p = &node->inode_node.rb_left;
+ } else if (sd->s_ino > node->s_ino) {
+ p = &node->inode_node.rb_right;
+ } else {
+ printk(KERN_CRIT "sysfs: inserting duplicate inode '%lx'\n",
+ (unsigned long) sd->s_ino);
+ BUG();
+ }
+#undef node
}
- sd->s_sibling = *pos;
- *pos = sd;
+ rb_link_node(&sd->inode_node, parent, p);
+ rb_insert_color(&sd->inode_node, &parent_sd->s_dir.inode_tree);
+
+ p = &parent_sd->s_dir.name_tree.rb_node;
+ parent = NULL;
+ while (*p) {
+ int c;
+ parent = *p;
+#define node rb_entry(parent, struct sysfs_dirent, name_node)
+ c = strcmp(sd->s_name, node->s_name);
+ if (c < 0) {
+ p = &node->name_node.rb_left;
+ } else {
+ p = &node->name_node.rb_right;
+ }
+#undef node
+ }
+ rb_link_node(&sd->name_node, parent, p);
+ rb_insert_color(&sd->name_node, &parent_sd->s_dir.name_tree);
}
/**
@@ -71,16 +99,11 @@ static void sysfs_link_sibling(struct sysfs_dirent *sd)
*/
static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
{
- struct sysfs_dirent **pos;
+ if (sysfs_type(sd) == SYSFS_DIR)
+ sd->s_parent->s_dir.subdirs--;
- for (pos = &sd->s_parent->s_dir.children; *pos;
- pos = &(*pos)->s_sibling) {
- if (*pos == sd) {
- *pos = sd->s_sibling;
- sd->s_sibling = NULL;
- break;
- }
- }
+ rb_erase(&sd->inode_node, &sd->s_parent->s_dir.inode_tree);
+ rb_erase(&sd->name_node, &sd->s_parent->s_dir.name_tree);
}
/**
@@ -126,7 +149,6 @@ struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd)
*/
void sysfs_put_active(struct sysfs_dirent *sd)
{
- struct completion *cmpl;
int v;
if (unlikely(!sd))
@@ -138,10 +160,9 @@ void sysfs_put_active(struct sysfs_dirent *sd)
return;
/* atomic_dec_return() is a mb(), we'll always see the updated
- * sd->s_sibling.
+ * sd->u.completion.
*/
- cmpl = (void *)sd->s_sibling;
- complete(cmpl);
+ complete(sd->u.completion);
}
/**
@@ -155,16 +176,16 @@ static void sysfs_deactivate(struct sysfs_dirent *sd)
DECLARE_COMPLETION_ONSTACK(wait);
int v;
- BUG_ON(sd->s_sibling || !(sd->s_flags & SYSFS_FLAG_REMOVED));
+ BUG_ON(!(sd->s_flags & SYSFS_FLAG_REMOVED));
if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
return;
- sd->s_sibling = (void *)&wait;
+ sd->u.completion = (void *)&wait;
rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
/* atomic_add_return() is a mb(), put_active() will always see
- * the updated sd->s_sibling.
+ * the updated sd->u.completion.
*/
v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);
@@ -173,8 +194,6 @@ static void sysfs_deactivate(struct sysfs_dirent *sd)
wait_for_completion(&wait);
}
- sd->s_sibling = NULL;
-
lock_acquired(&sd->dep_map, _RET_IP_);
rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
@@ -384,6 +403,13 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
struct sysfs_inode_attrs *ps_iattr;
+ if (!!sysfs_ns_type(acxt->parent_sd) != !!sd->s_ns) {
+ WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
+ sysfs_ns_type(acxt->parent_sd)? "required": "invalid",
+ acxt->parent_sd->s_name, sd->s_name);
+ return -EINVAL;
+ }
+
if (sysfs_find_dirent(acxt->parent_sd, sd->s_ns, sd->s_name))
return -EEXIST;
@@ -490,7 +516,7 @@ void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
}
sd->s_flags |= SYSFS_FLAG_REMOVED;
- sd->s_sibling = acxt->removed;
+ sd->u.removed_list = acxt->removed;
acxt->removed = sd;
}
@@ -514,8 +540,7 @@ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
while (acxt->removed) {
struct sysfs_dirent *sd = acxt->removed;
- acxt->removed = sd->s_sibling;
- sd->s_sibling = NULL;
+ acxt->removed = sd->u.removed_list;
sysfs_deactivate(sd);
unmap_bin_file(sd);
@@ -540,15 +565,43 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
const void *ns,
const unsigned char *name)
{
- struct sysfs_dirent *sd;
+ struct rb_node *p = parent_sd->s_dir.name_tree.rb_node;
+ struct sysfs_dirent *found = NULL;
- for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling) {
- if (ns && sd->s_ns && (sd->s_ns != ns))
- continue;
- if (!strcmp(sd->s_name, name))
- return sd;
+ if (!!sysfs_ns_type(parent_sd) != !!ns) {
+ WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
+ sysfs_ns_type(parent_sd)? "required": "invalid",
+ parent_sd->s_name, name);
+ return NULL;
}
- return NULL;
+
+ while (p) {
+ int c;
+#define node rb_entry(p, struct sysfs_dirent, name_node)
+ c = strcmp(name, node->s_name);
+ if (c < 0) {
+ p = node->name_node.rb_left;
+ } else if (c > 0) {
+ p = node->name_node.rb_right;
+ } else {
+ found = node;
+ p = node->name_node.rb_left;
+ }
+#undef node
+ }
+
+ if (found) {
+ while (found->s_ns != ns) {
+ p = rb_next(&found->name_node);
+ if (!p)
+ return NULL;
+ found = rb_entry(p, struct sysfs_dirent, name_node);
+ if (strcmp(name, found->s_name))
+ return NULL;
+ }
+ }
+
+ return found;
}
/**
@@ -744,21 +797,19 @@ void sysfs_remove_subdir(struct sysfs_dirent *sd)
static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
{
struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent **pos;
+ struct rb_node *pos;
if (!dir_sd)
return;
pr_debug("sysfs %s: removing dir\n", dir_sd->s_name);
sysfs_addrm_start(&acxt, dir_sd);
- pos = &dir_sd->s_dir.children;
- while (*pos) {
- struct sysfs_dirent *sd = *pos;
-
+ pos = rb_first(&dir_sd->s_dir.inode_tree);
+ while (pos) {
+ struct sysfs_dirent *sd = rb_entry(pos, struct sysfs_dirent, inode_node);
+ pos = rb_next(pos);
if (sysfs_type(sd) != SYSFS_DIR)
sysfs_remove_one(&acxt, sd);
- else
- pos = &(*pos)->s_sibling;
}
sysfs_addrm_finish(&acxt);
@@ -881,12 +932,28 @@ static struct sysfs_dirent *sysfs_dir_pos(const void *ns,
pos = NULL;
}
if (!pos && (ino > 1) && (ino < INT_MAX)) {
- pos = parent_sd->s_dir.children;
- while (pos && (ino > pos->s_ino))
- pos = pos->s_sibling;
+ struct rb_node *p = parent_sd->s_dir.inode_tree.rb_node;
+ while (p) {
+#define node rb_entry(p, struct sysfs_dirent, inode_node)
+ if (ino < node->s_ino) {
+ pos = node;
+ p = node->inode_node.rb_left;
+ } else if (ino > node->s_ino) {
+ p = node->inode_node.rb_right;
+ } else {
+ pos = node;
+ break;
+ }
+#undef node
+ }
+ }
+ while (pos && pos->s_ns != ns) {
+ struct rb_node *p = rb_next(&pos->inode_node);
+ if (!p)
+ pos = NULL;
+ else
+ pos = rb_entry(p, struct sysfs_dirent, inode_node);
}
- while (pos && pos->s_ns && pos->s_ns != ns)
- pos = pos->s_sibling;
return pos;
}
@@ -894,10 +961,13 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos)
{
pos = sysfs_dir_pos(ns, parent_sd, ino, pos);
- if (pos)
- pos = pos->s_sibling;
- while (pos && pos->s_ns && pos->s_ns != ns)
- pos = pos->s_sibling;
+ if (pos) do {
+ struct rb_node *p = rb_next(&pos->inode_node);
+ if (!p)
+ pos = NULL;
+ else
+ pos = rb_entry(p, struct sysfs_dirent, inode_node);
+ } while (pos && pos->s_ns != ns);
return pos;
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 1ad8c93c1b8..d4e6080b4b2 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -466,9 +466,6 @@ void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
mutex_lock(&sysfs_mutex);
if (sd && dir)
- /* Only directories are tagged, so no need to pass
- * a tag explicitly.
- */
sd = sysfs_find_dirent(sd, NULL, dir);
if (sd && attr)
sd = sysfs_find_dirent(sd, NULL, attr);
@@ -488,17 +485,56 @@ const struct file_operations sysfs_file_operations = {
.poll = sysfs_poll,
};
+int sysfs_attr_ns(struct kobject *kobj, const struct attribute *attr,
+ const void **pns)
+{
+ struct sysfs_dirent *dir_sd = kobj->sd;
+ const struct sysfs_ops *ops;
+ const void *ns = NULL;
+ int err;
+
+ err = 0;
+ if (!sysfs_ns_type(dir_sd))
+ goto out;
+
+ err = -EINVAL;
+ if (!kobj->ktype)
+ goto out;
+ ops = kobj->ktype->sysfs_ops;
+ if (!ops)
+ goto out;
+ if (!ops->namespace)
+ goto out;
+
+ err = 0;
+ ns = ops->namespace(kobj, attr);
+out:
+ if (err) {
+ WARN(1, KERN_ERR "missing sysfs namespace attribute operation for "
+ "kobject: %s\n", kobject_name(kobj));
+ }
+ *pns = ns;
+ return err;
+}
+
int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
const struct attribute *attr, int type, mode_t amode)
{
umode_t mode = (amode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
+ const void *ns;
int rc;
+ rc = sysfs_attr_ns(dir_sd->s_dir.kobj, attr, &ns);
+ if (rc)
+ return rc;
+
sd = sysfs_new_dirent(attr->name, mode, type);
if (!sd)
return -ENOMEM;
+
+ sd->s_ns = ns;
sd->s_attr.attr = (void *)attr;
sysfs_dirent_init_lockdep(sd);
@@ -586,12 +622,17 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
{
struct sysfs_dirent *sd;
struct iattr newattrs;
+ const void *ns;
int rc;
+ rc = sysfs_attr_ns(kobj, attr, &ns);
+ if (rc)
+ return rc;
+
mutex_lock(&sysfs_mutex);
rc = -ENOENT;
- sd = sysfs_find_dirent(kobj->sd, NULL, attr->name);
+ sd = sysfs_find_dirent(kobj->sd, ns, attr->name);
if (!sd)
goto out;
@@ -616,7 +657,12 @@ EXPORT_SYMBOL_GPL(sysfs_chmod_file);
void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
{
- sysfs_hash_and_remove(kobj->sd, NULL, attr->name);
+ const void *ns;
+
+ if (sysfs_attr_ns(kobj, attr, &ns))
+ return;
+
+ sysfs_hash_and_remove(kobj->sd, ns, attr->name);
}
void sysfs_remove_files(struct kobject * kobj, const struct attribute **ptr)
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index e3f091a81c7..e23f28894a3 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -202,18 +202,6 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_ctime = iattr->ia_ctime;
}
-static int sysfs_count_nlink(struct sysfs_dirent *sd)
-{
- struct sysfs_dirent *child;
- int nr = 0;
-
- for (child = sd->s_dir.children; child; child = child->s_sibling)
- if (sysfs_type(child) == SYSFS_DIR)
- nr++;
-
- return nr + 2;
-}
-
static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode)
{
struct sysfs_inode_attrs *iattrs = sd->s_iattr;
@@ -230,7 +218,7 @@ static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode)
}
if (sysfs_type(sd) == SYSFS_DIR)
- inode->i_nlink = sysfs_count_nlink(sd);
+ inode->i_nlink = sd->s_dir.subdirs + 2;
}
int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
@@ -336,8 +324,6 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const cha
sysfs_addrm_start(&acxt, dir_sd);
sd = sysfs_find_dirent(dir_sd, ns, name);
- if (sd && (sd->s_ns != ns))
- sd = NULL;
if (sd)
sysfs_remove_one(&acxt, sd);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 845ab3ad229..ce29e28b766 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -11,14 +11,18 @@
#include <linux/lockdep.h>
#include <linux/kobject_ns.h>
#include <linux/fs.h>
+#include <linux/rbtree.h>
struct sysfs_open_dirent;
/* type-specific structures for sysfs_dirent->s_* union members */
struct sysfs_elem_dir {
struct kobject *kobj;
- /* children list starts here and goes through sd->s_sibling */
- struct sysfs_dirent *children;
+
+ unsigned long subdirs;
+
+ struct rb_root inode_tree;
+ struct rb_root name_tree;
};
struct sysfs_elem_symlink {
@@ -56,9 +60,16 @@ struct sysfs_dirent {
struct lockdep_map dep_map;
#endif
struct sysfs_dirent *s_parent;
- struct sysfs_dirent *s_sibling;
const char *s_name;
+ struct rb_node inode_node;
+ struct rb_node name_node;
+
+ union {
+ struct completion *completion;
+ struct sysfs_dirent *removed_list;
+ } u;
+
const void *s_ns; /* namespace tag */
union {
struct sysfs_elem_dir s_dir;
diff --git a/fs/xattr.c b/fs/xattr.c
index f060663ab70..67583de8218 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -14,6 +14,7 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/security.h>
+#include <linux/evm.h>
#include <linux/syscalls.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
@@ -166,6 +167,64 @@ out_noalloc:
}
EXPORT_SYMBOL_GPL(xattr_getsecurity);
+/*
+ * vfs_getxattr_alloc - allocate memory, if necessary, before calling getxattr
+ *
+ * Allocate memory, if not already allocated, or re-allocate correct size,
+ * before retrieving the extended attribute.
+ *
+ * Returns the result of alloc, if failed, or the getxattr operation.
+ */
+ssize_t
+vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
+ size_t xattr_size, gfp_t flags)
+{
+ struct inode *inode = dentry->d_inode;
+ char *value = *xattr_value;
+ int error;
+
+ error = xattr_permission(inode, name, MAY_READ);
+ if (error)
+ return error;
+
+ if (!inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+
+ error = inode->i_op->getxattr(dentry, name, NULL, 0);
+ if (error < 0)
+ return error;
+
+ if (!value || (error > xattr_size)) {
+ value = krealloc(*xattr_value, error + 1, flags);
+ if (!value)
+ return -ENOMEM;
+ memset(value, 0, error + 1);
+ }
+
+ error = inode->i_op->getxattr(dentry, name, value, error);
+ *xattr_value = value;
+ return error;
+}
+
+/* Compare an extended attribute value with the given value */
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags)
+{
+ char *xattr_value = NULL;
+ int rc;
+
+ rc = vfs_getxattr_alloc(dentry, xattr_name, &xattr_value, 0, flags);
+ if (rc < 0)
+ return rc;
+
+ if ((rc != size) || (memcmp(xattr_value, value, rc) != 0))
+ rc = -EINVAL;
+ else
+ rc = 0;
+ kfree(xattr_value);
+ return rc;
+}
+
ssize_t
vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
{
@@ -243,8 +302,10 @@ vfs_removexattr(struct dentry *dentry, const char *name)
error = inode->i_op->removexattr(dentry, name);
mutex_unlock(&inode->i_mutex);
- if (!error)
+ if (!error) {
fsnotify_xattr(dentry);
+ evm_inode_post_removexattr(dentry, name);
+ }
return error;
}
EXPORT_SYMBOL_GPL(vfs_removexattr);
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index f7c8f7a9ea6..292eff19803 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -61,12 +61,7 @@ extern void kmem_free(const void *);
static inline void *kmem_zalloc_large(size_t size)
{
- void *ptr;
-
- ptr = vmalloc(size);
- if (ptr)
- memset(ptr, 0, size);
- return ptr;
+ return vzalloc(size);
}
static inline void kmem_free_large(void *ptr)
{
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index cac2ecfa674..ef43fce519a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -629,7 +629,7 @@ xfs_buf_item_push(
* the xfsbufd to get this buffer written. We have to unlock the buffer
* to allow the xfsbufd to write it, too.
*/
-STATIC void
+STATIC bool
xfs_buf_item_pushbuf(
struct xfs_log_item *lip)
{
@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
+ return true;
}
STATIC void
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 9e0e2fa3f2c..bb3f71d236d 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
* search the buffer cache can be a time consuming thing, and AIL lock is a
* spinlock.
*/
-STATIC void
+STATIC bool
xfs_qm_dquot_logitem_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_buf *bp;
+ bool ret = true;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
if (completion_done(&dqp->q_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp);
- return;
+ return true;
}
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
- return;
+ return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
+ if (xfs_buf_ispinned(bp))
+ ret = false;
xfs_buf_relse(bp);
+ return ret;
}
/*
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 588406dc6a3..836ad80d4f2 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -708,13 +708,14 @@ xfs_inode_item_committed(
* marked delayed write. If that's the case, we'll promote it and that will
* allow the caller to write the buffer by triggering the xfsbufd to run.
*/
-STATIC void
+STATIC bool
xfs_inode_item_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
struct xfs_buf *bp;
+ bool ret = true;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf(
if (completion_done(&ip->i_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return;
+ return true;
}
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf(
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!bp)
- return;
+ return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
+ if (xfs_buf_ispinned(bp))
+ ret = false;
xfs_buf_relse(bp);
+ return ret;
}
/*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 673704fab74..28856accb4f 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -102,37 +102,38 @@ xfs_mark_inode_dirty(
}
+
+int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ struct xfs_inode *ip = XFS_I(inode);
+ int error = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ error = xfs_attr_set(ip, xattr->name, xattr->value,
+ xattr->value_len, ATTR_SECURE);
+ if (error < 0)
+ break;
+ }
+ return error;
+}
+
/*
* Hook in SELinux. This is not quite correct yet, what we really need
* here (as we do for default ACLs) is a mechanism by which creation of
* these attrs can be journalled at inode creation time (along with the
* inode, of course, such that log replay can't cause these to be lost).
*/
+
STATIC int
xfs_init_security(
struct inode *inode,
struct inode *dir,
const struct qstr *qstr)
{
- struct xfs_inode *ip = XFS_I(inode);
- size_t length;
- void *value;
- unsigned char *name;
- int error;
-
- error = security_inode_init_security(inode, dir, qstr, (char **)&name,
- &value, &length);
- if (error) {
- if (error == -EOPNOTSUPP)
- return 0;
- return -error;
- }
-
- error = xfs_attr_set(ip, name, value, length, ATTR_SECURE);
-
- kfree(name);
- kfree(value);
- return error;
+ return security_inode_init_security(inode, dir, qstr,
+ &xfs_initxattrs, NULL);
}
static void
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 1e8a45e74c3..828662f70d6 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -68,6 +68,8 @@
#include <linux/ctype.h>
#include <linux/writeback.h>
#include <linux/capability.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/list_sort.h>
#include <asm/page.h>
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2366c54cc4f..5cf06b85fd9 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void)
*/
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
if (!xfs_syncd_wq)
- goto out;
-
- xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
- if (!xfs_ail_wq)
- goto out_destroy_syncd;
-
+ return -ENOMEM;
return 0;
-
-out_destroy_syncd:
- destroy_workqueue(xfs_syncd_wq);
-out:
- return -ENOMEM;
}
STATIC void
xfs_destroy_workqueues(void)
{
- destroy_workqueue(xfs_ail_wq);
destroy_workqueue(xfs_syncd_wq);
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 06a9759b635..53597f4db9b 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
void (*iop_push)(xfs_log_item_t *);
- void (*iop_pushbuf)(xfs_log_item_t *);
+ bool (*iop_pushbuf)(xfs_log_item_t *);
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
} xfs_item_ops_t;
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index c15aa29fa16..3a1e7ca54c2 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,6 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
-
#ifdef DEBUG
/*
* Check that the list is sorted as it should be.
@@ -356,16 +354,10 @@ xfs_ail_delete(
xfs_trans_ail_cursor_clear(ailp, lip);
}
-/*
- * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
- * to run at a later time if there is more work to do to complete the push.
- */
-STATIC void
-xfs_ail_worker(
- struct work_struct *work)
+static long
+xfsaild_push(
+ struct xfs_ail *ailp)
{
- struct xfs_ail *ailp = container_of(to_delayed_work(work),
- struct xfs_ail, xa_work);
xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor cur;
xfs_log_item_t *lip;
@@ -427,8 +419,13 @@ xfs_ail_worker(
case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf);
- IOP_PUSHBUF(lip);
- ailp->xa_last_pushed_lsn = lsn;
+
+ if (!IOP_PUSHBUF(lip)) {
+ stuck++;
+ flush_log = 1;
+ } else {
+ ailp->xa_last_pushed_lsn = lsn;
+ }
push_xfsbufd = 1;
break;
@@ -440,7 +437,6 @@ xfs_ail_worker(
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
- ailp->xa_last_pushed_lsn = lsn;
stuck++;
break;
@@ -501,20 +497,6 @@ out_done:
/* We're past our target or empty, so idle */
ailp->xa_last_pushed_lsn = 0;
- /*
- * We clear the XFS_AIL_PUSHING_BIT first before checking
- * whether the target has changed. If the target has changed,
- * this pushes the requeue race directly onto the result of the
- * atomic test/set bit, so we are guaranteed that either the
- * the pusher that changed the target or ourselves will requeue
- * the work (but not both).
- */
- clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
- smp_rmb();
- if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
- test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
- return;
-
tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
@@ -537,9 +519,30 @@ out_done:
tout = 20;
}
- /* There is more to do, requeue us. */
- queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
- msecs_to_jiffies(tout));
+ return tout;
+}
+
+static int
+xfsaild(
+ void *data)
+{
+ struct xfs_ail *ailp = data;
+ long tout = 0; /* milliseconds */
+
+ while (!kthread_should_stop()) {
+ if (tout && tout <= 20)
+ __set_current_state(TASK_KILLABLE);
+ else
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(tout ?
+ msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+
+ try_to_freeze();
+
+ tout = xfsaild_push(ailp);
+ }
+
+ return 0;
}
/*
@@ -574,8 +577,9 @@ xfs_ail_push(
*/
smp_wmb();
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
- if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
- queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
+ smp_wmb();
+
+ wake_up_process(ailp->xa_task);
}
/*
@@ -813,9 +817,18 @@ xfs_trans_ail_init(
INIT_LIST_HEAD(&ailp->xa_ail);
INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock);
- INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
+
+ ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
+ ailp->xa_mount->m_fsname);
+ if (IS_ERR(ailp->xa_task))
+ goto out_free_ailp;
+
mp->m_ail = ailp;
return 0;
+
+out_free_ailp:
+ kmem_free(ailp);
+ return ENOMEM;
}
void
@@ -824,6 +837,6 @@ xfs_trans_ail_destroy(
{
struct xfs_ail *ailp = mp->m_ail;
- cancel_delayed_work_sync(&ailp->xa_work);
+ kthread_stop(ailp->xa_task);
kmem_free(ailp);
}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 212946b9723..22750b5e4a8 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -64,23 +64,17 @@ struct xfs_ail_cursor {
*/
struct xfs_ail {
struct xfs_mount *xa_mount;
+ struct task_struct *xa_task;
struct list_head xa_ail;
xfs_lsn_t xa_target;
struct list_head xa_cursors;
spinlock_t xa_lock;
- struct delayed_work xa_work;
xfs_lsn_t xa_last_pushed_lsn;
- unsigned long xa_flags;
};
-#define XFS_AIL_PUSHING_BIT 0
-
/*
* From xfs_trans_ail.c
*/
-
-extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
-
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items, int nr_items,
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 84793c7025e..9e5b0356e2b 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -145,11 +145,6 @@ struct f_owner_ex {
#define F_SHLCK 8 /* or 4 */
#endif
-/* for leases */
-#ifndef F_INPROGRESS
-#define F_INPROGRESS 16
-#endif
-
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db22d136ad0..b5e2e4c6b01 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -222,7 +222,6 @@
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
*(__tracepoints_ptrs) /* Tracepoints: pointer array */\
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
- *(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index d34c187432e..f57c36881c4 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+#include <linux/socket.h>
/*
* AppleTalk networking structures
@@ -28,7 +29,7 @@ struct atalk_addr {
};
struct sockaddr_at {
- sa_family_t sat_family;
+ __kernel_sa_family_t sat_family;
__u8 sat_port;
struct atalk_addr sat_addr;
char sat_zero[8];
diff --git a/include/linux/ax25.h b/include/linux/ax25.h
index 56c11f0dbd8..74c89a41732 100644
--- a/include/linux/ax25.h
+++ b/include/linux/ax25.h
@@ -47,7 +47,7 @@ typedef struct {
} ax25_address;
struct sockaddr_ax25 {
- sa_family_t sax25_family;
+ __kernel_sa_family_t sax25_family;
ax25_address sax25_call;
int sax25_ndigis;
/* Digipeater ax25_address sets follow */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 8c96654bef1..5dbd7055cb8 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -6,6 +6,7 @@
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/bcma/bcma_driver_pci.h>
+#include <linux/bcma/bcma_driver_mips.h>
#include <linux/ssb/ssb.h> /* SPROM sharing */
#include "bcma_regs.h"
@@ -14,9 +15,9 @@ struct bcma_device;
struct bcma_bus;
enum bcma_hosttype {
- BCMA_HOSTTYPE_NONE,
BCMA_HOSTTYPE_PCI,
BCMA_HOSTTYPE_SDIO,
+ BCMA_HOSTTYPE_SOC,
};
struct bcma_chipinfo {
@@ -130,6 +131,7 @@ struct bcma_device {
struct device dev;
struct device *dma_dev;
+
unsigned int irq;
bool dev_registered;
@@ -138,6 +140,9 @@ struct bcma_device {
u32 addr;
u32 wrap;
+ void __iomem *io_addr;
+ void __iomem *io_wrap;
+
void *drvdata;
struct list_head list;
};
@@ -190,9 +195,11 @@ struct bcma_bus {
struct bcma_device *mapped_core;
struct list_head cores;
u8 nr_cores;
+ u8 init_done:1;
struct bcma_drv_cc drv_cc;
struct bcma_drv_pci drv_pci;
+ struct bcma_drv_mips drv_mips;
/* We decided to share SPROM struct with SSB as long as we do not need
* any hacks for BCMA. This simplifies drivers code. */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index a0f684615ae..1526d965ed0 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -24,6 +24,7 @@
#define BCMA_CC_FLASHT_NONE 0x00000000 /* No flash */
#define BCMA_CC_FLASHT_STSER 0x00000100 /* ST serial flash */
#define BCMA_CC_FLASHT_ATSER 0x00000200 /* Atmel serial flash */
+#define BCMA_CC_FLASHT_NFLASH 0x00000200
#define BCMA_CC_FLASHT_PARA 0x00000700 /* Parallel flash */
#define BCMA_CC_CAP_PLLT 0x00038000 /* PLL Type */
#define BCMA_PLLTYPE_NONE 0x00000000
@@ -178,6 +179,7 @@
#define BCMA_CC_PROG_CFG 0x0120
#define BCMA_CC_PROG_WAITCNT 0x0124
#define BCMA_CC_FLASH_CFG 0x0128
+#define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
#define BCMA_CC_FLASH_WAITCNT 0x012C
/* 0x1E0 is defined as shared BCMA_CLKCTLST */
#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
@@ -239,6 +241,64 @@
#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
#define BCMA_CC_SPROM_PCIE6 0x0830 /* SPROM beginning on PCIe rev >= 6 */
+/* Divider allocation in 4716/47162/5356 */
+#define BCMA_CC_PMU5_MAINPLL_CPU 1
+#define BCMA_CC_PMU5_MAINPLL_MEM 2
+#define BCMA_CC_PMU5_MAINPLL_SSB 3
+
+/* PLL usage in 4716/47162 */
+#define BCMA_CC_PMU4716_MAINPLL_PLL0 12
+
+/* PLL usage in 5356/5357 */
+#define BCMA_CC_PMU5356_MAINPLL_PLL0 0
+#define BCMA_CC_PMU5357_MAINPLL_PLL0 0
+
+/* 4706 PMU */
+#define BCMA_CC_PMU4706_MAINPLL_PLL0 0
+
+/* ALP clock on pre-PMU chips */
+#define BCMA_CC_PMU_ALP_CLOCK 20000000
+/* HT clock for systems with PMU-enabled chipcommon */
+#define BCMA_CC_PMU_HT_CLOCK 80000000
+
+/* PMU rev 5 (& 6) */
+#define BCMA_CC_PPL_P1P2_OFF 0
+#define BCMA_CC_PPL_P1_MASK 0x0f000000
+#define BCMA_CC_PPL_P1_SHIFT 24
+#define BCMA_CC_PPL_P2_MASK 0x00f00000
+#define BCMA_CC_PPL_P2_SHIFT 20
+#define BCMA_CC_PPL_M14_OFF 1
+#define BCMA_CC_PPL_MDIV_MASK 0x000000ff
+#define BCMA_CC_PPL_MDIV_WIDTH 8
+#define BCMA_CC_PPL_NM5_OFF 2
+#define BCMA_CC_PPL_NDIV_MASK 0xfff00000
+#define BCMA_CC_PPL_NDIV_SHIFT 20
+#define BCMA_CC_PPL_FMAB_OFF 3
+#define BCMA_CC_PPL_MRAT_MASK 0xf0000000
+#define BCMA_CC_PPL_MRAT_SHIFT 28
+#define BCMA_CC_PPL_ABRAT_MASK 0x08000000
+#define BCMA_CC_PPL_ABRAT_SHIFT 27
+#define BCMA_CC_PPL_FDIV_MASK 0x07ffffff
+#define BCMA_CC_PPL_PLLCTL_OFF 4
+#define BCMA_CC_PPL_PCHI_OFF 5
+#define BCMA_CC_PPL_PCHI_MASK 0x0000003f
+
+/* BCM4331 ChipControl numbers. */
+#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */
+#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */
+#define BCMA_CHIPCTL_4331_EXT_LNA BIT(2) /* 0 disable */
+#define BCMA_CHIPCTL_4331_SPROM_GPIO13_15 BIT(3) /* sprom/gpio13-15 mux */
+#define BCMA_CHIPCTL_4331_EXTPA_EN BIT(4) /* 0 ext pa disable, 1 ext pa enabled */
+#define BCMA_CHIPCTL_4331_GPIOCLK_ON_SPROMCS BIT(5) /* set drive out GPIO_CLK on sprom_cs pin */
+#define BCMA_CHIPCTL_4331_PCIE_MDIO_ON_SPROMCS BIT(6) /* use sprom_cs pin as PCIE mdio interface */
+#define BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5 BIT(7) /* aband extpa will be at gpio2/5 and sprom_dout */
+#define BCMA_CHIPCTL_4331_OVR_PIPEAUXCLKEN BIT(8) /* override core control on pipe_AuxClkEnable */
+#define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */
+#define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */
+#define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */
+#define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */
+#define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */
+
/* Data for the PMU, if available.
* Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
*/
@@ -247,14 +307,37 @@ struct bcma_chipcommon_pmu {
u32 crystalfreq; /* The active crystal frequency (in kHz) */
};
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+struct bcma_pflash {
+ u8 buswidth;
+ u32 window;
+ u32 window_size;
+};
+
+struct bcma_serial_port {
+ void *regs;
+ unsigned long clockspeed;
+ unsigned int irq;
+ unsigned int baud_base;
+ unsigned int reg_shift;
+};
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
struct bcma_drv_cc {
struct bcma_device *core;
u32 status;
u32 capabilities;
u32 capabilities_ext;
+ u8 setup_done:1;
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
struct bcma_chipcommon_pmu pmu;
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ struct bcma_pflash pflash;
+
+ int nr_serial_ports;
+ struct bcma_serial_port serial_ports[4];
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
};
/* Register access */
@@ -275,6 +358,8 @@ extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+
extern void bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc,
u32 ticks);
@@ -293,4 +378,13 @@ u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
extern void bcma_pmu_init(struct bcma_drv_cc *cc);
+extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
+ u32 value);
+extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
+ u32 mask, u32 set);
+extern void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set);
+extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set);
+
#endif /* LINUX_BCMA_DRIVER_CC_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
new file mode 100644
index 00000000000..c0043645cdc
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -0,0 +1,51 @@
+#ifndef LINUX_BCMA_DRIVER_MIPS_H_
+#define LINUX_BCMA_DRIVER_MIPS_H_
+
+#define BCMA_MIPS_IPSFLAG 0x0F08
+/* which sbflags get routed to mips interrupt 1 */
+#define BCMA_MIPS_IPSFLAG_IRQ1 0x0000003F
+#define BCMA_MIPS_IPSFLAG_IRQ1_SHIFT 0
+/* which sbflags get routed to mips interrupt 2 */
+#define BCMA_MIPS_IPSFLAG_IRQ2 0x00003F00
+#define BCMA_MIPS_IPSFLAG_IRQ2_SHIFT 8
+/* which sbflags get routed to mips interrupt 3 */
+#define BCMA_MIPS_IPSFLAG_IRQ3 0x003F0000
+#define BCMA_MIPS_IPSFLAG_IRQ3_SHIFT 16
+/* which sbflags get routed to mips interrupt 4 */
+#define BCMA_MIPS_IPSFLAG_IRQ4 0x3F000000
+#define BCMA_MIPS_IPSFLAG_IRQ4_SHIFT 24
+
+/* MIPS 74K core registers */
+#define BCMA_MIPS_MIPS74K_CORECTL 0x0000
+#define BCMA_MIPS_MIPS74K_EXCEPTBASE 0x0004
+#define BCMA_MIPS_MIPS74K_BIST 0x000C
+#define BCMA_MIPS_MIPS74K_INTMASK_INT0 0x0014
+#define BCMA_MIPS_MIPS74K_INTMASK(int) \
+ ((int) * 4 + BCMA_MIPS_MIPS74K_INTMASK_INT0)
+#define BCMA_MIPS_MIPS74K_NMIMASK 0x002C
+#define BCMA_MIPS_MIPS74K_GPIOSEL 0x0040
+#define BCMA_MIPS_MIPS74K_GPIOOUT 0x0044
+#define BCMA_MIPS_MIPS74K_GPIOEN 0x0048
+#define BCMA_MIPS_MIPS74K_CLKCTLST 0x01E0
+
+#define BCMA_MIPS_OOBSELOUTA30 0x100
+
+struct bcma_device;
+
+struct bcma_drv_mips {
+ struct bcma_device *core;
+ u8 setup_done:1;
+ unsigned int assigned_irqs;
+};
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
+#else
+static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
+#endif
+
+extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
+
+extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
+
+#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
new file mode 100644
index 00000000000..4203c5593b9
--- /dev/null
+++ b/include/linux/bcma/bcma_soc.h
@@ -0,0 +1,16 @@
+#ifndef LINUX_BCMA_SOC_H_
+#define LINUX_BCMA_SOC_H_
+
+#include <linux/bcma/bcma.h>
+
+struct bcma_soc {
+ struct bcma_bus bus;
+ struct bcma_device core_cc;
+ struct bcma_device core_mips;
+};
+
+int __init bcma_host_soc_register(struct bcma_soc *soc);
+
+int bcma_bus_register(struct bcma_bus *bus);
+
+#endif /* LINUX_BCMA_SOC_H_ */
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
index d9cb19b7cff..3f3bac6af7b 100644
--- a/include/linux/caif/caif_socket.h
+++ b/include/linux/caif/caif_socket.h
@@ -9,12 +9,7 @@
#define _LINUX_CAIF_SOCKET_H
#include <linux/types.h>
-
-#ifdef __KERNEL__
#include <linux/socket.h>
-#else
-#include <sys/socket.h>
-#endif
/**
* enum caif_link_selector - Physical Link Selection.
@@ -144,7 +139,7 @@ enum caif_debug_service {
* CAIF Channel. It defines the service to connect to on the modem.
*/
struct sockaddr_caif {
- sa_family_t family;
+ __kernel_sa_family_t family;
union {
struct {
__u8 type; /* type: enum caif_at_type */
diff --git a/include/linux/can.h b/include/linux/can.h
index d18333302cb..9a19bcb3eea 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -8,8 +8,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_H
@@ -78,7 +76,7 @@ struct can_frame {
* @can_addr: protocol specific address information
*/
struct sockaddr_can {
- sa_family_t can_family;
+ __kernel_sa_family_t can_family;
int can_ifindex;
union {
/* transport protocol class address information (e.g. ISOTP) */
diff --git a/include/linux/can/Kbuild b/include/linux/can/Kbuild
index 8cb05aae661..c62b7f1728f 100644
--- a/include/linux/can/Kbuild
+++ b/include/linux/can/Kbuild
@@ -1,4 +1,5 @@
header-y += raw.h
header-y += bcm.h
+header-y += gw.h
header-y += error.h
header-y += netlink.h
diff --git a/include/linux/can/bcm.h b/include/linux/can/bcm.h
index 1432b278c52..3ebe387fea4 100644
--- a/include/linux/can/bcm.h
+++ b/include/linux/can/bcm.h
@@ -7,14 +7,13 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_BCM_H
#define CAN_BCM_H
#include <linux/types.h>
+#include <linux/can.h>
/**
* struct bcm_msg_head - head of messages to/from the broadcast manager
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 5ce6b5d62ec..0ccc1cd28b9 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -8,8 +8,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_CORE_H
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index cc0bb496166..a0969fcb72b 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -8,7 +8,6 @@
*
* Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com>
*
- * Send feedback to <socketcan-users@lists.berlios.de>
*/
#ifndef CAN_DEV_H
diff --git a/include/linux/can/error.h b/include/linux/can/error.h
index 5958074302a..63e855ea6b8 100644
--- a/include/linux/can/error.h
+++ b/include/linux/can/error.h
@@ -7,8 +7,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_ERROR_H
diff --git a/include/linux/can/gw.h b/include/linux/can/gw.h
new file mode 100644
index 00000000000..8e1db18c3cb
--- /dev/null
+++ b/include/linux/can/gw.h
@@ -0,0 +1,162 @@
+/*
+ * linux/can/gw.h
+ *
+ * Definitions for CAN frame Gateway/Router/Bridge
+ *
+ * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
+ * Copyright (c) 2011 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ */
+
+#ifndef CAN_GW_H
+#define CAN_GW_H
+
+#include <linux/types.h>
+#include <linux/can.h>
+
+struct rtcanmsg {
+ __u8 can_family;
+ __u8 gwtype;
+ __u16 flags;
+};
+
+/* CAN gateway types */
+enum {
+ CGW_TYPE_UNSPEC,
+ CGW_TYPE_CAN_CAN, /* CAN->CAN routing */
+ __CGW_TYPE_MAX
+};
+
+#define CGW_TYPE_MAX (__CGW_TYPE_MAX - 1)
+
+/* CAN rtnetlink attribute definitions */
+enum {
+ CGW_UNSPEC,
+ CGW_MOD_AND, /* CAN frame modification binary AND */
+ CGW_MOD_OR, /* CAN frame modification binary OR */
+ CGW_MOD_XOR, /* CAN frame modification binary XOR */
+ CGW_MOD_SET, /* CAN frame modification set alternate values */
+ CGW_CS_XOR, /* set data[] XOR checksum into data[index] */
+ CGW_CS_CRC8, /* set data[] CRC8 checksum into data[index] */
+ CGW_HANDLED, /* number of handled CAN frames */
+ CGW_DROPPED, /* number of dropped CAN frames */
+ CGW_SRC_IF, /* ifindex of source network interface */
+ CGW_DST_IF, /* ifindex of destination network interface */
+ CGW_FILTER, /* specify struct can_filter on source CAN device */
+ __CGW_MAX
+};
+
+#define CGW_MAX (__CGW_MAX - 1)
+
+#define CGW_FLAGS_CAN_ECHO 0x01
+#define CGW_FLAGS_CAN_SRC_TSTAMP 0x02
+
+#define CGW_MOD_FUNCS 4 /* AND OR XOR SET */
+
+/* CAN frame elements that are affected by curr. 3 CAN frame modifications */
+#define CGW_MOD_ID 0x01
+#define CGW_MOD_DLC 0x02
+#define CGW_MOD_DATA 0x04
+
+#define CGW_FRAME_MODS 3 /* ID DLC DATA */
+
+#define MAX_MODFUNCTIONS (CGW_MOD_FUNCS * CGW_FRAME_MODS)
+
+struct cgw_frame_mod {
+ struct can_frame cf;
+ __u8 modtype;
+} __attribute__((packed));
+
+#define CGW_MODATTR_LEN sizeof(struct cgw_frame_mod)
+
+struct cgw_csum_xor {
+ __s8 from_idx;
+ __s8 to_idx;
+ __s8 result_idx;
+ __u8 init_xor_val;
+} __attribute__((packed));
+
+struct cgw_csum_crc8 {
+ __s8 from_idx;
+ __s8 to_idx;
+ __s8 result_idx;
+ __u8 init_crc_val;
+ __u8 final_xor_val;
+ __u8 crctab[256];
+ __u8 profile;
+ __u8 profile_data[20];
+} __attribute__((packed));
+
+/* length of checksum operation parameters. idx = index in CAN frame data[] */
+#define CGW_CS_XOR_LEN sizeof(struct cgw_csum_xor)
+#define CGW_CS_CRC8_LEN sizeof(struct cgw_csum_crc8)
+
+/* CRC8 profiles (compute CRC for additional data elements - see below) */
+enum {
+ CGW_CRC8PRF_UNSPEC,
+ CGW_CRC8PRF_1U8, /* compute one additional u8 value */
+ CGW_CRC8PRF_16U8, /* u8 value table indexed by data[1] & 0xF */
+ CGW_CRC8PRF_SFFID_XOR, /* (can_id & 0xFF) ^ (can_id >> 8 & 0xFF) */
+ __CGW_CRC8PRF_MAX
+};
+
+#define CGW_CRC8PRF_MAX (__CGW_CRC8PRF_MAX - 1)
+
+/*
+ * CAN rtnetlink attribute contents in detail
+ *
+ * CGW_XXX_IF (length 4 bytes):
+ * Sets an interface index for source/destination network interfaces.
+ * For the CAN->CAN gwtype the indices of _two_ CAN interfaces are mandatory.
+ *
+ * CGW_FILTER (length 8 bytes):
+ * Sets a CAN receive filter for the gateway job specified by the
+ * struct can_filter described in include/linux/can.h
+ *
+ * CGW_MOD_XXX (length 17 bytes):
+ * Specifies a modification that's done to a received CAN frame before it is
+ * send out to the destination interface.
+ *
+ * <struct can_frame> data used as operator
+ * <u8> affected CAN frame elements
+ *
+ * CGW_CS_XOR (length 4 bytes):
+ * Set a simple XOR checksum starting with an initial value into
+ * data[result-idx] using data[start-idx] .. data[end-idx]
+ *
+ * The XOR checksum is calculated like this:
+ *
+ * xor = init_xor_val
+ *
+ * for (i = from_idx .. to_idx)
+ * xor ^= can_frame.data[i]
+ *
+ * can_frame.data[ result_idx ] = xor
+ *
+ * CGW_CS_CRC8 (length 282 bytes):
+ * Set a CRC8 value into data[result-idx] using a given 256 byte CRC8 table,
+ * a given initial value and a defined input data[start-idx] .. data[end-idx].
+ * Finally the result value is XOR'ed with the final_xor_val.
+ *
+ * The CRC8 checksum is calculated like this:
+ *
+ * crc = init_crc_val
+ *
+ * for (i = from_idx .. to_idx)
+ * crc = crctab[ crc ^ can_frame.data[i] ]
+ *
+ * can_frame.data[ result_idx ] = crc ^ final_xor_val
+ *
+ * The calculated CRC may contain additional source data elements that can be
+ * defined in the handling of 'checksum profiles' e.g. shown in AUTOSAR specs
+ * like http://www.autosar.org/download/R4.0/AUTOSAR_SWS_E2ELibrary.pdf
+ * E.g. the profile_data[] may contain additional u8 values (called DATA_IDs)
+ * that are used depending on counter values inside the CAN frame data[].
+ * So far only three profiles have been implemented for illustration.
+ *
+ * Remark: In general the attribute data is a linear buffer.
+ * Beware of sending unpacked or aligned structs!
+ */
+
+#endif
diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h
index 34542d374dd..14966ddb7df 100644
--- a/include/linux/can/netlink.h
+++ b/include/linux/can/netlink.h
@@ -5,8 +5,6 @@
*
* Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_NETLINK_H
diff --git a/include/linux/can/raw.h b/include/linux/can/raw.h
index b2a0f87492c..781f3a3701b 100644
--- a/include/linux/can/raw.h
+++ b/include/linux/can/raw.h
@@ -8,8 +8,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_RAW_H
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c4211235000..a63d13d84ad 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -198,7 +198,7 @@ struct cpu_vfs_cap_data {
/* Allow modification of routing tables */
/* Allow setting arbitrary process / process group ownership on
sockets */
-/* Allow binding to any address for transparent proxying */
+/* Allow binding to any address for transparent proxying (also via NET_RAW) */
/* Allow setting TOS (type of service) */
/* Allow setting promiscuous mode */
/* Allow clearing driver statistics */
@@ -210,6 +210,7 @@ struct cpu_vfs_cap_data {
/* Allow use of RAW sockets */
/* Allow use of PACKET sockets */
+/* Allow binding to any address for transparent proxying (also via NET_ADMIN) */
#define CAP_NET_RAW 13
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index d7adf151d33..ca768ae729b 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -6,7 +6,6 @@
#include <linux/net.h>
#include <linux/radix-tree.h>
#include <linux/uio.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include "types.h"
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 12c517b51ca..d03612b196e 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -54,6 +54,7 @@ struct proc_event {
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
PROC_EVENT_PTRACE = 0x00000100,
+ PROC_EVENT_COMM = 0x00000200,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit */
PROC_EVENT_EXIT = 0x80000000
@@ -103,6 +104,12 @@ struct proc_event {
__kernel_pid_t tracer_tgid;
} ptrace;
+ struct comm_proc_event {
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
+ char comm[16];
+ } comm;
+
struct exit_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
@@ -118,6 +125,7 @@ void proc_exec_connector(struct task_struct *task);
void proc_id_connector(struct task_struct *task, int which_id);
void proc_sid_connector(struct task_struct *task);
void proc_ptrace_connector(struct task_struct *task, int which_id);
+void proc_comm_connector(struct task_struct *task);
void proc_exit_connector(struct task_struct *task);
#else
static inline void proc_fork_connector(struct task_struct *task)
@@ -133,6 +141,9 @@ static inline void proc_id_connector(struct task_struct *task,
static inline void proc_sid_connector(struct task_struct *task)
{}
+static inline void proc_comm_connector(struct task_struct *task)
+{}
+
static inline void proc_ptrace_connector(struct task_struct *task,
int ptrace_id)
{}
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
new file mode 100644
index 00000000000..afb94583960
--- /dev/null
+++ b/include/linux/devfreq.h
@@ -0,0 +1,238 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ * for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_H__
+#define __LINUX_DEVFREQ_H__
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/opp.h>
+
+#define DEVFREQ_NAME_LEN 16
+
+struct devfreq;
+
+/**
+ * struct devfreq_dev_status - Data given from devfreq user device to
+ * governors. Represents the performance
+ * statistics.
+ * @total_time The total time represented by this instance of
+ * devfreq_dev_status
+ * @busy_time The time that the device was working among the
+ * total_time.
+ * @current_frequency The operating frequency.
+ * @private_data An entry not specified by the devfreq framework.
+ * A device and a specific governor may have their
+ * own protocol with private_data. However, because
+ * this is governor-specific, a governor using this
+ * will be only compatible with devices aware of it.
+ */
+struct devfreq_dev_status {
+ /* both since the last measure */
+ unsigned long total_time;
+ unsigned long busy_time;
+ unsigned long current_frequency;
+ void *private_date;
+};
+
+/**
+ * struct devfreq_dev_profile - Devfreq's user device profile
+ * @initial_freq The operating frequency when devfreq_add_device() is
+ * called.
+ * @polling_ms The polling interval in ms. 0 disables polling.
+ * @target The device should set its operating frequency at
+ * freq or lowest-upper-than-freq value. If freq is
+ * higher than any operable frequency, set maximum.
+ * Before returning, target function should set
+ * freq at the current frequency.
+ * @get_dev_status The device should provide the current performance
+ * status to devfreq, which is used by governors.
+ * @exit An optional callback that is called when devfreq
+ * is removing the devfreq object due to error or
+ * from devfreq_remove_device() call. If the user
+ * has registered devfreq->nb at a notifier-head,
+ * this is the time to unregister it.
+ */
+struct devfreq_dev_profile {
+ unsigned long initial_freq;
+ unsigned int polling_ms;
+
+ int (*target)(struct device *dev, unsigned long *freq);
+ int (*get_dev_status)(struct device *dev,
+ struct devfreq_dev_status *stat);
+ void (*exit)(struct device *dev);
+};
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @name Governor's name
+ * @get_target_freq Returns desired operating frequency for the device.
+ * Basically, get_target_freq will run
+ * devfreq_dev_profile.get_dev_status() to get the
+ * status of the device (load = busy_time / total_time).
+ * If no_central_polling is set, this callback is called
+ * only with update_devfreq() notified by OPP.
+ * @init Called when the devfreq is being attached to a device
+ * @exit Called when the devfreq is being removed from a
+ * device. Governor should stop any internal routines
+ * before return because related data may be
+ * freed after exit().
+ * @no_central_polling Do not use devfreq's central polling mechanism.
+ * When this is set, devfreq will not call
+ * get_target_freq with devfreq_monitor(). However,
+ * devfreq will call get_target_freq with
+ * devfreq_update() notified by OPP framework.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+ const char name[DEVFREQ_NAME_LEN];
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*init)(struct devfreq *this);
+ void (*exit)(struct devfreq *this);
+ const bool no_central_polling;
+};
+
+/**
+ * struct devfreq - Device devfreq structure
+ * @node list node - contains the devices with devfreq that have been
+ * registered.
+ * @lock a mutex to protect accessing devfreq.
+ * @dev device registered by devfreq class. dev.parent is the device
+ * using devfreq.
+ * @profile device-specific devfreq profile
+ * @governor method how to choose frequency based on the usage.
+ * @nb notifier block used to notify devfreq object that it should
+ * reevaluate operable frequencies. Devfreq users may use
+ * devfreq.nb to the corresponding register notifier call chain.
+ * @polling_jiffies interval in jiffies.
+ * @previous_freq previously configured frequency value.
+ * @next_polling the number of remaining jiffies to poll with
+ * "devfreq_monitor" executions to reevaluate
+ * frequency/voltage of the device. Set by
+ * profile's polling_ms interval.
+ * @data Private data of the governor. The devfreq framework does not
+ * touch this.
+ * @being_removed a flag to mark that this object is being removed in
+ * order to prevent trying to remove the object multiple times.
+ *
+ * This structure stores the devfreq information for a give device.
+ *
+ * Note that when a governor accesses entries in struct devfreq in its
+ * functions except for the context of callbacks defined in struct
+ * devfreq_governor, the governor should protect its access with the
+ * struct mutex lock in struct devfreq. A governor may use this mutex
+ * to protect its own private data in void *data as well.
+ */
+struct devfreq {
+ struct list_head node;
+
+ struct mutex lock;
+ struct device dev;
+ struct devfreq_dev_profile *profile;
+ const struct devfreq_governor *governor;
+ struct notifier_block nb;
+
+ unsigned long polling_jiffies;
+ unsigned long previous_freq;
+ unsigned int next_polling;
+
+ void *data; /* private data for governors */
+
+ bool being_removed;
+};
+
+#if defined(CONFIG_PM_DEVFREQ)
+extern struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const struct devfreq_governor *governor,
+ void *data);
+extern int devfreq_remove_device(struct devfreq *devfreq);
+
+/* Helper functions for devfreq user device driver with OPP. */
+extern struct opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq);
+extern int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+extern int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+
+#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE
+extern const struct devfreq_governor devfreq_powersave;
+#endif
+#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE
+extern const struct devfreq_governor devfreq_performance;
+#endif
+#ifdef CONFIG_DEVFREQ_GOV_USERSPACE
+extern const struct devfreq_governor devfreq_userspace;
+#endif
+#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND
+extern const struct devfreq_governor devfreq_simple_ondemand;
+/**
+ * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * and devfreq_add_device
+ * @ upthreshold If the load is over this value, the frequency jumps.
+ * Specify 0 to use the default. Valid value = 0 to 100.
+ * @ downdifferential If the load is under upthreshold - downdifferential,
+ * the governor may consider slowing the frequency down.
+ * Specify 0 to use the default. Valid value = 0 to 100.
+ * downdifferential < upthreshold must hold.
+ *
+ * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
+ * the governor uses the default values.
+ */
+struct devfreq_simple_ondemand_data {
+ unsigned int upthreshold;
+ unsigned int downdifferential;
+};
+#endif
+
+#else /* !CONFIG_PM_DEVFREQ */
+static struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_governor *governor,
+ void *data);
+{
+ return NULL;
+}
+
+static int devfreq_remove_device(struct devfreq *devfreq);
+{
+ return 0;
+}
+
+static struct opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq)
+{
+ return -EINVAL;
+}
+
+static int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+static int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+#define devfreq_powersave NULL
+#define devfreq_performance NULL
+#define devfreq_userspace NULL
+#define devfreq_simple_ondemand NULL
+
+#endif /* CONFIG_PM_DEVFREQ */
+
+#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3fa1f3d90ce..99e3e50b5c5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,6 +197,11 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
unsigned discards_supported:1;
+
+ /*
+ * Set if this target does not return zeroes on discarded blocks.
+ */
+ unsigned discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */
diff --git a/include/linux/device.h b/include/linux/device.h
index c20dfbfc49b..bdcf361ca93 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -350,6 +350,8 @@ struct class_attribute {
char *buf);
ssize_t (*store)(struct class *class, struct class_attribute *attr,
const char *buf, size_t count);
+ const void *(*namespace)(struct class *class,
+ const struct class_attribute *attr);
};
#define CLASS_ATTR(_name, _mode, _show, _store) \
@@ -636,6 +638,11 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
+{
+ return dev ? dev->power.subsys_data : NULL;
+}
+
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
@@ -785,6 +792,8 @@ extern const char *dev_driver_string(const struct device *dev);
#ifdef CONFIG_PRINTK
+extern int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf);
extern int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
@@ -805,6 +814,9 @@ extern int _dev_info(const struct device *dev, const char *fmt, ...)
#else
+static inline int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+ { return 0; }
static inline int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bbd8661b347..ef90cbd8e17 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -25,11 +25,12 @@ struct intel_iommu;
struct dmar_domain;
struct root_entry;
-extern void free_dmar_iommu(struct intel_iommu *iommu);
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
+extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
@@ -39,8 +40,11 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return 0;
}
+static inline void free_dmar_iommu(struct intel_iommu *iommu)
+{
+}
+#define dmar_disabled (1)
#endif
-extern int dmar_disabled;
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 7b776d71d36..a8b1a847c10 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -26,8 +26,13 @@
#include <linux/msi.h>
#include <linux/irqreturn.h>
+/* DMAR Flags */
+#define DMAR_INTR_REMAP 0x1
+#define DMAR_X2APIC_OPT_OUT 0x2
+
struct intel_iommu;
-#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
+#ifdef CONFIG_DMAR_TABLE
+extern struct acpi_table_header *dmar_tbl;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -76,7 +81,7 @@ static inline int enable_drhd_fault_handling(void)
{
return -1;
}
-#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
+#endif /* !CONFIG_DMAR_TABLE */
struct irte {
union {
@@ -107,10 +112,10 @@ struct irte {
};
};
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
extern int intr_remapping_enabled;
extern int intr_remapping_supported(void);
-extern int enable_intr_remapping(int);
+extern int enable_intr_remapping(void);
extern void disable_intr_remapping(void);
extern int reenable_intr_remapping(int);
@@ -177,7 +182,7 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
#define intr_remapping_enabled (0)
-static inline int enable_intr_remapping(int eim)
+static inline int enable_intr_remapping(void)
{
return -1;
}
@@ -192,6 +197,11 @@ static inline int reenable_intr_remapping(int eim)
}
#endif
+enum {
+ IRQ_REMAP_XAPIC_MODE,
+ IRQ_REMAP_X2APIC_MODE,
+};
+
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
@@ -204,7 +214,7 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_rmrr_unit {
@@ -227,9 +237,26 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
+int dmar_parse_rmrr_atsr_dev(void);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
+extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
+ struct pci_dev ***devices, u16 segment);
extern int intel_iommu_init(void);
-#else /* !CONFIG_DMAR: */
+#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
-#endif /* CONFIG_DMAR */
+static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+{
+ return 0;
+}
+static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
+{
+ return 0;
+}
+static inline int dmar_parse_rmrr_atsr_dev(void)
+{
+ return 0;
+}
+#endif /* CONFIG_INTEL_IOMMU */
#endif /* __DMAR_H__ */
diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h
index 06954319051..81f52f2c572 100644
--- a/include/linux/drbd_tag_magic.h
+++ b/include/linux/drbd_tag_magic.h
@@ -28,7 +28,7 @@ enum packet_types {
#define NL_STRING(pn, pr, member, len) \
unsigned char member[len]; int member ## _len; \
int tag_and_len ## member;
-#include "linux/drbd_nl.h"
+#include <linux/drbd_nl.h>
/* declare tag-list-sizes */
static const int tag_list_sizes[] = {
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index e747ecd48e1..13aae8087b5 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,13 +1,6 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
-/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-extern long long dynamic_debug_enabled;
-extern long long dynamic_debug_enabled2;
-
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -47,26 +40,55 @@ extern int ddebug_remove_module(const char *mod_name);
extern int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-#define dynamic_pr_debug(fmt, ...) do { \
- static struct _ddebug descriptor \
- __used \
- __attribute__((section("__verbose"), aligned(8))) = \
- { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
- _DPRINTK_FLAGS_DEFAULT }; \
- if (unlikely(descriptor.enabled)) \
- __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
- } while (0)
-
-
-#define dynamic_dev_dbg(dev, fmt, ...) do { \
- static struct _ddebug descriptor \
- __used \
- __attribute__((section("__verbose"), aligned(8))) = \
- { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
- _DPRINTK_FLAGS_DEFAULT }; \
- if (unlikely(descriptor.enabled)) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
- } while (0)
+struct device;
+
+extern int __dynamic_dev_dbg(struct _ddebug *descriptor,
+ const struct device *dev,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+struct net_device;
+
+extern int __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ static struct _ddebug __used __aligned(8) \
+ __attribute__((section("__verbose"))) name = { \
+ .modname = KBUILD_MODNAME, \
+ .function = __func__, \
+ .filename = __FILE__, \
+ .format = (fmt), \
+ .lineno = __LINE__, \
+ .flags = _DPRINTK_FLAGS_DEFAULT, \
+ .enabled = false, \
+ }
+
+#define dynamic_pr_debug(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.enabled)) \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_dev_dbg(dev, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.enabled)) \
+ __dynamic_dev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_netdev_dbg(dev, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.enabled)) \
+ __dynamic_netdev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#else
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c6e427ab65f..45f00b61c09 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -117,99 +117,101 @@ struct ethtool_eeprom {
__u8 data[0];
};
-/* for configuring coalescing parameters of chip */
+/**
+ * struct ethtool_coalesce - coalescing parameters for IRQs and stats updates
+ * @cmd: ETHTOOL_{G,S}COALESCE
+ * @rx_coalesce_usecs: How many usecs to delay an RX interrupt after
+ * a packet arrives.
+ * @rx_max_coalesced_frames: Maximum number of packets to receive
+ * before an RX interrupt.
+ * @rx_coalesce_usecs_irq: Same as @rx_coalesce_usecs, except that
+ * this value applies while an IRQ is being serviced by the host.
+ * @rx_max_coalesced_frames_irq: Same as @rx_max_coalesced_frames,
+ * except that this value applies while an IRQ is being serviced
+ * by the host.
+ * @tx_coalesce_usecs: How many usecs to delay a TX interrupt after
+ * a packet is sent.
+ * @tx_max_coalesced_frames: Maximum number of packets to be sent
+ * before a TX interrupt.
+ * @tx_coalesce_usecs_irq: Same as @tx_coalesce_usecs, except that
+ * this value applies while an IRQ is being serviced by the host.
+ * @tx_max_coalesced_frames_irq: Same as @tx_max_coalesced_frames,
+ * except that this value applies while an IRQ is being serviced
+ * by the host.
+ * @stats_block_coalesce_usecs: How many usecs to delay in-memory
+ * statistics block updates. Some drivers do not have an
+ * in-memory statistic block, and in such cases this value is
+ * ignored. This value must not be zero.
+ * @use_adaptive_rx_coalesce: Enable adaptive RX coalescing.
+ * @use_adaptive_tx_coalesce: Enable adaptive TX coalescing.
+ * @pkt_rate_low: Threshold for low packet rate (packets per second).
+ * @rx_coalesce_usecs_low: How many usecs to delay an RX interrupt after
+ * a packet arrives, when the packet rate is below @pkt_rate_low.
+ * @rx_max_coalesced_frames_low: Maximum number of packets to be received
+ * before an RX interrupt, when the packet rate is below @pkt_rate_low.
+ * @tx_coalesce_usecs_low: How many usecs to delay a TX interrupt after
+ * a packet is sent, when the packet rate is below @pkt_rate_low.
+ * @tx_max_coalesced_frames_low: Maximum nuumber of packets to be sent before
+ * a TX interrupt, when the packet rate is below @pkt_rate_low.
+ * @pkt_rate_high: Threshold for high packet rate (packets per second).
+ * @rx_coalesce_usecs_high: How many usecs to delay an RX interrupt after
+ * a packet arrives, when the packet rate is above @pkt_rate_high.
+ * @rx_max_coalesced_frames_high: Maximum number of packets to be received
+ * before an RX interrupt, when the packet rate is above @pkt_rate_high.
+ * @tx_coalesce_usecs_high: How many usecs to delay a TX interrupt after
+ * a packet is sent, when the packet rate is above @pkt_rate_high.
+ * @tx_max_coalesced_frames_high: Maximum number of packets to be sent before
+ * a TX interrupt, when the packet rate is above @pkt_rate_high.
+ * @rate_sample_interval: How often to do adaptive coalescing packet rate
+ * sampling, measured in seconds. Must not be zero.
+ *
+ * Each pair of (usecs, max_frames) fields specifies this exit
+ * condition for interrupt coalescing:
+ * (usecs > 0 && time_since_first_completion >= usecs) ||
+ * (max_frames > 0 && completed_frames >= max_frames)
+ * It is illegal to set both usecs and max_frames to zero as this
+ * would cause interrupts to never be generated. To disable
+ * coalescing, set usecs = 0 and max_frames = 1.
+ *
+ * Some implementations ignore the value of max_frames and use the
+ * condition:
+ * time_since_first_completion >= usecs
+ * This is deprecated. Drivers for hardware that does not support
+ * counting completions should validate that max_frames == !rx_usecs.
+ *
+ * Adaptive RX/TX coalescing is an algorithm implemented by some
+ * drivers to improve latency under low packet rates and improve
+ * throughput under high packet rates. Some drivers only implement
+ * one of RX or TX adaptive coalescing. Anything not implemented by
+ * the driver causes these values to be silently ignored.
+ *
+ * When the packet rate is below @pkt_rate_high but above
+ * @pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
struct ethtool_coalesce {
- __u32 cmd; /* ETHTOOL_{G,S}COALESCE */
-
- /* How many usecs to delay an RX interrupt after
- * a packet arrives. If 0, only rx_max_coalesced_frames
- * is used.
- */
+ __u32 cmd;
__u32 rx_coalesce_usecs;
-
- /* How many packets to delay an RX interrupt after
- * a packet arrives. If 0, only rx_coalesce_usecs is
- * used. It is illegal to set both usecs and max frames
- * to zero as this would cause RX interrupts to never be
- * generated.
- */
__u32 rx_max_coalesced_frames;
-
- /* Same as above two parameters, except that these values
- * apply while an IRQ is being serviced by the host. Not
- * all cards support this feature and the values are ignored
- * in that case.
- */
__u32 rx_coalesce_usecs_irq;
__u32 rx_max_coalesced_frames_irq;
-
- /* How many usecs to delay a TX interrupt after
- * a packet is sent. If 0, only tx_max_coalesced_frames
- * is used.
- */
__u32 tx_coalesce_usecs;
-
- /* How many packets to delay a TX interrupt after
- * a packet is sent. If 0, only tx_coalesce_usecs is
- * used. It is illegal to set both usecs and max frames
- * to zero as this would cause TX interrupts to never be
- * generated.
- */
__u32 tx_max_coalesced_frames;
-
- /* Same as above two parameters, except that these values
- * apply while an IRQ is being serviced by the host. Not
- * all cards support this feature and the values are ignored
- * in that case.
- */
__u32 tx_coalesce_usecs_irq;
__u32 tx_max_coalesced_frames_irq;
-
- /* How many usecs to delay in-memory statistics
- * block updates. Some drivers do not have an in-memory
- * statistic block, and in such cases this value is ignored.
- * This value must not be zero.
- */
__u32 stats_block_coalesce_usecs;
-
- /* Adaptive RX/TX coalescing is an algorithm implemented by
- * some drivers to improve latency under low packet rates and
- * improve throughput under high packet rates. Some drivers
- * only implement one of RX or TX adaptive coalescing. Anything
- * not implemented by the driver causes these values to be
- * silently ignored.
- */
__u32 use_adaptive_rx_coalesce;
__u32 use_adaptive_tx_coalesce;
-
- /* When the packet rate (measured in packets per second)
- * is below pkt_rate_low, the {rx,tx}_*_low parameters are
- * used.
- */
__u32 pkt_rate_low;
__u32 rx_coalesce_usecs_low;
__u32 rx_max_coalesced_frames_low;
__u32 tx_coalesce_usecs_low;
__u32 tx_max_coalesced_frames_low;
-
- /* When the packet rate is below pkt_rate_high but above
- * pkt_rate_low (both measured in packets per second) the
- * normal {rx,tx}_* coalescing parameters are used.
- */
-
- /* When the packet rate is (measured in packets per second)
- * is above pkt_rate_high, the {rx,tx}_*_high parameters are
- * used.
- */
__u32 pkt_rate_high;
__u32 rx_coalesce_usecs_high;
__u32 rx_max_coalesced_frames_high;
__u32 tx_coalesce_usecs_high;
__u32 tx_max_coalesced_frames_high;
-
- /* How often to do adaptive coalescing packet rate sampling,
- * measured in seconds. Must not be zero.
- */
__u32 rate_sample_interval;
};
@@ -444,7 +446,7 @@ struct ethtool_flow_ext {
};
/**
- * struct ethtool_rx_flow_spec - specification for RX flow filter
+ * struct ethtool_rx_flow_spec - classification rule for RX flows
* @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW
* @h_u: Flow fields to match (dependent on @flow_type)
* @h_ext: Additional fields to match
@@ -454,7 +456,9 @@ struct ethtool_flow_ext {
* includes the %FLOW_EXT flag.
* @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
* if packets should be discarded
- * @location: Index of filter in hardware table
+ * @location: Location of rule in the table. Locations must be
+ * numbered such that a flow matching multiple rules will be
+ * classified according to the first (lowest numbered) rule.
*/
struct ethtool_rx_flow_spec {
__u32 flow_type;
@@ -473,9 +477,9 @@ struct ethtool_rx_flow_spec {
* %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS
* @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW
* @data: Command-dependent value
- * @fs: Flow filter specification
+ * @fs: Flow classification rule
* @rule_cnt: Number of rules to be affected
- * @rule_locs: Array of valid rule indices
+ * @rule_locs: Array of used rule locations
*
* For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating
* the fields included in the flow hash, e.g. %RXH_IP_SRC. The following
@@ -487,23 +491,20 @@ struct ethtool_rx_flow_spec {
* For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined
* rules on return.
*
- * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the index of an
- * existing filter rule on entry and @fs contains the rule on return.
+ * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an
+ * existing rule on entry and @fs contains the rule on return.
*
* For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the
* user buffer for @rule_locs on entry. On return, @data is the size
- * of the filter table and @rule_locs contains the indices of the
- * defined rules.
+ * of the rule table, @rule_cnt is the number of defined rules, and
+ * @rule_locs contains the locations of the defined rules. Drivers
+ * must use the second parameter to get_rxnfc() instead of @rule_locs.
*
- * For %ETHTOOL_SRXCLSRLINS, @fs specifies the filter rule to add or
- * update. @fs.@location specifies the index to use and must not be
- * ignored.
+ * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update.
+ * @fs.@location specifies the location to use and must not be ignored.
*
- * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the index of an
- * existing filter rule on entry.
- *
- * Implementation of indexed classification rules generally requires a
- * TCAM.
+ * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an
+ * existing rule on entry.
*/
struct ethtool_rxnfc {
__u32 cmd;
@@ -726,6 +727,9 @@ enum ethtool_sfeatures_retval_bits {
/* needed by dev_disable_lro() */
extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
+extern int __ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd);
+
/**
* enum ethtool_phys_id_state - indicator state for physical identification
* @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
@@ -936,7 +940,7 @@ struct ethtool_ops {
int (*set_priv_flags)(struct net_device *, u32);
int (*get_sset_count)(struct net_device *, int);
int (*get_rxnfc)(struct net_device *,
- struct ethtool_rxnfc *, void *);
+ struct ethtool_rxnfc *, u32 *rule_locs);
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
diff --git a/include/linux/evm.h b/include/linux/evm.h
new file mode 100644
index 00000000000..9fc13a76092
--- /dev/null
+++ b/include/linux/evm.h
@@ -0,0 +1,100 @@
+/*
+ * evm.h
+ *
+ * Copyright (c) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#ifndef _LINUX_EVM_H
+#define _LINUX_EVM_H
+
+#include <linux/integrity.h>
+#include <linux/xattr.h>
+
+struct integrity_iint_cache;
+
+#ifdef CONFIG_EVM
+extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint);
+extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
+extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size);
+extern void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len);
+extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name);
+extern int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm);
+#ifdef CONFIG_FS_POSIX_ACL
+extern int posix_xattr_acl(const char *xattrname);
+#else
+static inline int posix_xattr_acl(const char *xattrname)
+{
+ return 0;
+}
+#endif
+#else
+#ifdef CONFIG_INTEGRITY
+static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ return INTEGRITY_UNKNOWN;
+}
+#endif
+
+static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ return;
+}
+
+static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len)
+{
+ return;
+}
+
+static inline int evm_inode_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return;
+}
+
+static inline int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm)
+{
+ return 0;
+}
+
+#endif /* CONFIG_EVM_H */
+#endif /* LINUX_EVM_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 741956fa5bf..8eeb205f298 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -155,7 +155,7 @@ extern unsigned int sk_run_filter(const struct sk_buff *skb,
const struct sock_filter *filter);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
-extern int sk_chk_filter(struct sock_filter *filter, int flen);
+extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
#ifdef CONFIG_BPF_JIT
extern void bpf_jit_compile(struct sk_filter *fp);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 1effc8b56b4..aa56cf31f7f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -49,6 +49,7 @@ extern int thaw_process(struct task_struct *p);
extern void refrigerator(void);
extern int freeze_processes(void);
+extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
static inline int try_to_freeze(void)
@@ -171,7 +172,8 @@ static inline void clear_freeze_flag(struct task_struct *p) {}
static inline int thaw_process(struct task_struct *p) { return 1; }
static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
+static inline int freeze_processes(void) { return -ENOSYS; }
+static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline int try_to_freeze(void) { return 0; }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 277f497923a..ba98668a182 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1063,6 +1063,8 @@ static inline int file_check_writeable(struct file *filp)
#define FL_LEASE 32 /* lease held on this file */
#define FL_CLOSE 64 /* unlock on close */
#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
@@ -1109,7 +1111,7 @@ struct file_lock {
struct list_head fl_link; /* doubly linked list of all locks */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
- unsigned char fl_flags;
+ unsigned int fl_flags;
unsigned char fl_type;
unsigned int fl_pid;
struct pid *fl_nspid;
@@ -1119,7 +1121,9 @@ struct file_lock {
loff_t fl_end;
struct fasync_struct * fl_fasync; /* for lease break notifications */
- unsigned long fl_break_time; /* for nonblocking lease breaks */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9cf8e7ae745..deed5f9a1e1 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -71,6 +71,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/input.h>
+#include <linux/semaphore.h>
/*
* We parse each description item into this structure. Short items data
@@ -312,6 +313,7 @@ struct hid_item {
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
+#define HID_QUIRK_MULTITOUCH 0x00000100
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
@@ -475,6 +477,7 @@ struct hid_device { /* device report descriptor */
unsigned country; /* HID country */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
+ struct semaphore driver_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
struct hid_ll_driver *ll_driver;
diff --git a/drivers/staging/hv/hyperv.h b/include/linux/hyperv.h
index 1747a2404f6..12ec328481d 100644
--- a/drivers/staging/hv/hyperv.h
+++ b/include/linux/hyperv.h
@@ -27,18 +27,14 @@
#include <linux/scatterlist.h>
#include <linux/list.h>
+#include <linux/uuid.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
-#include <asm/hyperv.h>
-
-struct hv_guid {
- unsigned char data[16];
-};
-
#define MAX_PAGE_BUFFER_COUNT 16
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -156,8 +152,8 @@ struct hv_ring_buffer_debug_info {
* struct contains the fundamental information about an offer.
*/
struct vmbus_channel_offer {
- struct hv_guid if_type;
- struct hv_guid if_instance;
+ uuid_le if_type;
+ uuid_le if_instance;
u64 int_latency; /* in 100ns units */
u32 if_revision;
u32 server_ctx_size; /* in bytes */
@@ -526,8 +522,8 @@ enum vmbus_channel_state {
struct vmbus_channel_debug_info {
u32 relid;
enum vmbus_channel_state state;
- struct hv_guid interfacetype;
- struct hv_guid interface_instance;
+ uuid_le interfacetype;
+ uuid_le interface_instance;
u32 monitorid;
u32 servermonitor_pending;
u32 servermonitor_latency;
@@ -582,11 +578,6 @@ struct vmbus_channel {
struct work_struct work;
enum vmbus_channel_state state;
- /*
- * For util channels, stash the
- * the service index for easy access.
- */
- s8 util_index;
struct vmbus_channel_offer_channel offermsg;
/*
@@ -615,8 +606,6 @@ struct vmbus_channel {
void *channel_callback_context;
};
-void free_channel(struct vmbus_channel *channel);
-
void vmbus_onmessage(void *context);
int vmbus_request_offers(void);
@@ -702,79 +691,6 @@ extern void vmbus_get_debug_info(struct vmbus_channel *channel,
extern void vmbus_ontimer(unsigned long data);
-
-#define LOWORD(dw) ((unsigned short)(dw))
-#define HIWORD(dw) ((unsigned short)(((unsigned int) (dw) >> 16) & 0xFFFF))
-
-
-#define VMBUS 0x0001
-#define STORVSC 0x0002
-#define NETVSC 0x0004
-#define INPUTVSC 0x0008
-#define BLKVSC 0x0010
-#define VMBUS_DRV 0x0100
-#define STORVSC_DRV 0x0200
-#define NETVSC_DRV 0x0400
-#define INPUTVSC_DRV 0x0800
-#define BLKVSC_DRV 0x1000
-
-#define ALL_MODULES (VMBUS |\
- STORVSC |\
- NETVSC |\
- INPUTVSC |\
- BLKVSC |\
- VMBUS_DRV |\
- STORVSC_DRV |\
- NETVSC_DRV |\
- INPUTVSC_DRV|\
- BLKVSC_DRV)
-
-/* Logging Level */
-#define ERROR_LVL 3
-#define WARNING_LVL 4
-#define INFO_LVL 6
-#define DEBUG_LVL 7
-#define DEBUG_LVL_ENTEREXIT 8
-#define DEBUG_RING_LVL 9
-
-extern unsigned int vmbus_loglevel;
-
-#define DPRINT(mod, lvl, fmt, args...) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (lvl <= LOWORD(vmbus_loglevel))) \
- printk(KERN_DEBUG #mod": %s() " fmt "\n", __func__, ## args);\
- } while (0)
-
-#define DPRINT_DBG(mod, fmt, args...) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (DEBUG_LVL <= LOWORD(vmbus_loglevel))) \
- printk(KERN_DEBUG #mod": %s() " fmt "\n", __func__, ## args);\
- } while (0)
-
-#define DPRINT_INFO(mod, fmt, args...) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (INFO_LVL <= LOWORD(vmbus_loglevel))) \
- printk(KERN_INFO #mod": " fmt "\n", ## args);\
- } while (0)
-
-#define DPRINT_WARN(mod, fmt, args...) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (WARNING_LVL <= LOWORD(vmbus_loglevel))) \
- printk(KERN_WARNING #mod": WARNING! " fmt "\n", ## args);\
- } while (0)
-
-#define DPRINT_ERR(mod, fmt, args...) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (ERROR_LVL <= LOWORD(vmbus_loglevel))) \
- printk(KERN_ERR #mod": %s() ERROR!! " fmt "\n", \
- __func__, ## args);\
- } while (0)
-
-
-
-struct hv_driver;
-struct hv_device;
-
struct hv_dev_port_info {
u32 int_mask;
u32 read_idx;
@@ -783,34 +699,17 @@ struct hv_dev_port_info {
u32 bytes_avail_towrite;
};
-struct hv_device_info {
- u32 chn_id;
- u32 chn_state;
- struct hv_guid chn_type;
- struct hv_guid chn_instance;
-
- u32 monitor_id;
- u32 server_monitor_pending;
- u32 server_monitor_latency;
- u32 server_monitor_conn_id;
- u32 client_monitor_pending;
- u32 client_monitor_latency;
- u32 client_monitor_conn_id;
-
- struct hv_dev_port_info inbound;
- struct hv_dev_port_info outbound;
-};
-
/* Base driver object */
struct hv_driver {
const char *name;
/* the device type supported by this driver */
- struct hv_guid dev_type;
+ uuid_le dev_type;
+ const struct hv_vmbus_device_id *id_table;
struct device_driver driver;
- int (*probe)(struct hv_device *);
+ int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
int (*remove)(struct hv_device *);
void (*shutdown)(struct hv_device *);
@@ -819,17 +718,14 @@ struct hv_driver {
/* Base device object */
struct hv_device {
/* the device type id of this device */
- struct hv_guid dev_type;
+ uuid_le dev_type;
/* the device instance id of this device */
- struct hv_guid dev_instance;
+ uuid_le dev_instance;
struct device device;
struct vmbus_channel *channel;
-
- /* Device extension; */
- void *ext;
};
@@ -843,10 +739,34 @@ static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
return container_of(d, struct hv_driver, driver);
}
+static inline void hv_set_drvdata(struct hv_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->device, data);
+}
+
+static inline void *hv_get_drvdata(struct hv_device *dev)
+{
+ return dev_get_drvdata(&dev->device);
+}
/* Vmbus interface */
-int vmbus_child_driver_register(struct device_driver *drv);
-void vmbus_child_driver_unregister(struct device_driver *drv);
+#define vmbus_driver_register(driver) \
+ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
+ struct module *owner,
+ const char *mod_name);
+void vmbus_driver_unregister(struct hv_driver *hv_driver);
+
+/**
+ * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
+ *
+ * This macro is used to create a struct hv_vmbus_device_id that matches a
+ * specific device.
+ */
+#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \
+ g8, g9, ga, gb, gc, gd, ge, gf) \
+ .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
+ g8, g9, ga, gb, gc, gd, ge, gf },
/*
* Common header for Hyper-V ICs
@@ -868,6 +788,19 @@ void vmbus_child_driver_unregister(struct device_driver *drv);
#define HV_ERROR_NOT_SUPPORTED 0x80070032
#define HV_ERROR_MACHINE_LOCKED 0x800704F7
+/*
+ * While we want to handle util services as regular devices,
+ * there is only one instance of each of these services; so
+ * we statically allocate the service specific state.
+ */
+
+struct hv_util_service {
+ u8 *recv_buffer;
+ void (*util_cb)(void *);
+ int (*util_init)(struct hv_util_service *);
+ void (*util_deinit)(void);
+};
+
struct vmbuspipe_hdr {
u32 flags;
u32 msgsize;
@@ -926,23 +859,15 @@ struct ictimesync_data {
u8 flags;
} __packed;
-/* Index for each IC struct in array hv_cb_utils[] */
-#define HV_SHUTDOWN_MSG 0
-#define HV_TIMESYNC_MSG 1
-#define HV_HEARTBEAT_MSG 2
-#define HV_KVP_MSG 3
-
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
- unsigned char data[16];
+ uuid_le data;
struct vmbus_channel *channel;
void (*callback) (void *context);
};
-extern void prep_negotiate_resp(struct icmsg_hdr *,
- struct icmsg_negotiate *, u8 *);
-extern void chn_cb_negotiate(void *);
-extern struct hyperv_service_callback hv_cb_utils[];
+extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
+ struct icmsg_negotiate *, u8 *);
#endif /* _HYPERV_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 54c87896087..48363c3c40f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -130,6 +130,8 @@
#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060
/* A-MSDU 802.11n */
#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080
+/* Mesh Control 802.11s */
+#define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100
/* U-APSD queue for WMM IEs sent by AP */
#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7)
@@ -629,9 +631,14 @@ struct ieee80211_rann_ie {
u8 rann_ttl;
u8 rann_addr[6];
u32 rann_seq;
+ u32 rann_interval;
u32 rann_metric;
} __attribute__ ((packed));
+enum ieee80211_rann_flags {
+ RANN_FLAG_IS_GATE = 1 << 0,
+};
+
#define WLAN_SA_QUERY_TR_ID_LEN 2
struct ieee80211_mgmt {
@@ -736,19 +743,10 @@ struct ieee80211_mgmt {
__le16 params;
__le16 reason_code;
} __attribute__((packed)) delba;
- struct{
+ struct {
u8 action_code;
- /* capab_info for open and confirm,
- * reason for close
- */
- __le16 aux;
- /* Followed in plink_confirm by status
- * code, AID and supported rates,
- * and directly by supported rates in
- * plink_open and plink_close
- */
u8 variable[0];
- } __attribute__((packed)) plink_action;
+ } __attribute__((packed)) self_prot;
struct{
u8 action_code;
u8 variable[0];
@@ -761,6 +759,12 @@ struct ieee80211_mgmt {
u8 action;
u8 smps_control;
} __attribute__ ((packed)) ht_smps;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capability;
+ u8 variable[0];
+ } __packed tdls_discover_resp;
} u;
} __attribute__ ((packed)) action;
} u;
@@ -779,6 +783,13 @@ struct ieee80211_mmie {
u8 mic[8];
} __attribute__ ((packed));
+struct ieee80211_vendor_ie {
+ u8 element_id;
+ u8 len;
+ u8 oui[3];
+ u8 oui_type;
+} __packed;
+
/* Control frames */
struct ieee80211_rts {
__le16 frame_control;
@@ -800,6 +811,52 @@ struct ieee80211_pspoll {
u8 ta[6];
} __attribute__ ((packed));
+/* TDLS */
+
+/* Link-id information element */
+struct ieee80211_tdls_lnkie {
+ u8 ie_type; /* Link Identifier IE */
+ u8 ie_len;
+ u8 bssid[6];
+ u8 init_sta[6];
+ u8 resp_sta[6];
+} __packed;
+
+struct ieee80211_tdls_data {
+ u8 da[6];
+ u8 sa[6];
+ __be16 ether_type;
+ u8 payload_type;
+ u8 category;
+ u8 action_code;
+ union {
+ struct {
+ u8 dialog_token;
+ __le16 capability;
+ u8 variable[0];
+ } __packed setup_req;
+ struct {
+ __le16 status_code;
+ u8 dialog_token;
+ __le16 capability;
+ u8 variable[0];
+ } __packed setup_resp;
+ struct {
+ __le16 status_code;
+ u8 dialog_token;
+ u8 variable[0];
+ } __packed setup_cfm;
+ struct {
+ __le16 reason_code;
+ u8 variable[0];
+ } __packed teardown;
+ struct {
+ u8 dialog_token;
+ u8 variable[0];
+ } __packed discover_req;
+ } u;
+} __packed;
+
/**
* struct ieee80211_bar - HT Block Ack Request
*
@@ -816,9 +873,11 @@ struct ieee80211_bar {
} __attribute__((packed));
/* 802.11 BAR control masks */
-#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
-#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
-
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
+#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
#define IEEE80211_HT_MCS_MASK_LEN 10
@@ -1189,16 +1248,13 @@ enum ieee80211_eid {
WLAN_EID_TS_DELAY = 43,
WLAN_EID_TCLAS_PROCESSING = 44,
WLAN_EID_QOS_CAPA = 46,
+ /* 802.11z */
+ WLAN_EID_LINK_ID = 101,
/* 802.11s */
WLAN_EID_MESH_CONFIG = 113,
WLAN_EID_MESH_ID = 114,
WLAN_EID_LINK_METRIC_REPORT = 115,
WLAN_EID_CONGESTION_NOTIFICATION = 116,
- /* Note that the Peer Link IE has been replaced with the similar
- * Peer Management IE. We will keep the former definition until mesh
- * code is changed to comply with latest 802.11s drafts.
- */
- WLAN_EID_PEER_LINK = 55, /* no longer in 802.11s drafts */
WLAN_EID_PEER_MGMT = 117,
WLAN_EID_CHAN_SWITCH_PARAM = 118,
WLAN_EID_MESH_AWAKE_WINDOW = 119,
@@ -1277,13 +1333,11 @@ enum ieee80211_category {
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
+ WLAN_CATEGORY_TDLS = 12,
WLAN_CATEGORY_MESH_ACTION = 13,
WLAN_CATEGORY_MULTIHOP_ACTION = 14,
WLAN_CATEGORY_SELF_PROTECTED = 15,
WLAN_CATEGORY_WMM = 17,
- /* TODO: remove MESH_PATH_SEL after mesh is updated
- * to current 802.11s draft */
- WLAN_CATEGORY_MESH_PATH_SEL = 32,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -1309,6 +1363,31 @@ enum ieee80211_ht_actioncode {
WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
};
+/* Self Protected Action codes */
+enum ieee80211_self_protected_actioncode {
+ WLAN_SP_RESERVED = 0,
+ WLAN_SP_MESH_PEERING_OPEN = 1,
+ WLAN_SP_MESH_PEERING_CONFIRM = 2,
+ WLAN_SP_MESH_PEERING_CLOSE = 3,
+ WLAN_SP_MGK_INFORM = 4,
+ WLAN_SP_MGK_ACK = 5,
+};
+
+/* Mesh action codes */
+enum ieee80211_mesh_actioncode {
+ WLAN_MESH_ACTION_LINK_METRIC_REPORT,
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
+ WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
+ WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
+ WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
+ WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
+ WLAN_MESH_ACTION_MCCA_TEARDOWN,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -1318,6 +1397,36 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_AES_CMAC = 16,
};
+/* Public action codes */
+enum ieee80211_pub_actioncode {
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14,
+};
+
+/* TDLS action codes */
+enum ieee80211_tdls_actioncode {
+ WLAN_TDLS_SETUP_REQUEST = 0,
+ WLAN_TDLS_SETUP_RESPONSE = 1,
+ WLAN_TDLS_SETUP_CONFIRM = 2,
+ WLAN_TDLS_TEARDOWN = 3,
+ WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4,
+ WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5,
+ WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6,
+ WLAN_TDLS_PEER_PSM_REQUEST = 7,
+ WLAN_TDLS_PEER_PSM_RESPONSE = 8,
+ WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9,
+ WLAN_TDLS_DISCOVERY_REQUEST = 10,
+};
+
+/*
+ * TDLS capabililites to be enabled in the 5th byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
+#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
+
+/* TDLS specific payload type in the LLC/SNAP header */
+#define WLAN_TDLS_SNAP_RFTYPE 0x2
+
/**
* enum - mesh path selection protocol identifier
*
@@ -1453,6 +1562,9 @@ enum ieee80211_sa_query_action {
#define WLAN_PMKID_LEN 16
+#define WLAN_OUI_WFA 0x506f9a
+#define WLAN_OUI_TYPE_WFA_P2P 9
+
/*
* WMM/802.11e Tspec Element
*/
diff --git a/include/linux/if.h b/include/linux/if.h
index 03489ca92de..db20bd4fd16 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -78,6 +78,7 @@
* datapath port */
#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing
* skbs on transmit */
+#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index a3d99ff6e3b..e473003e4bd 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -83,11 +83,13 @@
#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
+#define ETH_P_TDLS 0x890D /* TDLS */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
/*
* Non DIX types. Won't clash for 1500 types.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 0ee969a5593..c52d4b5f872 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -279,6 +279,7 @@ enum {
IFLA_VF_MAC, /* Hardware queue specific attributes */
IFLA_VF_VLAN,
IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
+ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
__IFLA_VF_MAX,
};
@@ -300,13 +301,22 @@ struct ifla_vf_tx_rate {
__u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
};
+struct ifla_vf_spoofchk {
+ __u32 vf;
+ __u32 setting;
+};
+#ifdef __KERNEL__
+
+/* We don't want this structure exposed to user space */
struct ifla_vf_info {
__u32 vf;
__u8 mac[32];
__u32 vlan;
__u32 qos;
__u32 tx_rate;
+ __u32 spoofchk;
};
+#endif
/* VF ports management section
*
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index e28b2e4959d..d103dca5c56 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -64,6 +64,7 @@ struct macvlan_dev {
int (*forward)(struct net_device *dev, struct sk_buff *skb);
struct macvtap_queue *taps[MAX_MACVTAP_QUEUES];
int numvtaps;
+ int minor;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index c1486060f5e..f3799295d23 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -61,6 +61,17 @@ struct tpacket_stats {
unsigned int tp_drops;
};
+struct tpacket_stats_v3 {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+ unsigned int tp_freeze_q_cnt;
+};
+
+union tpacket_stats_u {
+ struct tpacket_stats stats1;
+ struct tpacket_stats_v3 stats3;
+};
+
struct tpacket_auxdata {
__u32 tp_status;
__u32 tp_len;
@@ -78,6 +89,7 @@ struct tpacket_auxdata {
#define TP_STATUS_LOSING 0x4
#define TP_STATUS_CSUMNOTREADY 0x8
#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */
+#define TP_STATUS_BLK_TMO 0x20
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0x0
@@ -85,6 +97,9 @@ struct tpacket_auxdata {
#define TP_STATUS_SENDING 0x2
#define TP_STATUS_WRONG_FORMAT 0x4
+/* Rx ring - feature request bits */
+#define TP_FT_REQ_FILL_RXHASH 0x1
+
struct tpacket_hdr {
unsigned long tp_status;
unsigned int tp_len;
@@ -111,11 +126,100 @@ struct tpacket2_hdr {
__u16 tp_padding;
};
+struct tpacket_hdr_variant1 {
+ __u32 tp_rxhash;
+ __u32 tp_vlan_tci;
+};
+
+struct tpacket3_hdr {
+ __u32 tp_next_offset;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u32 tp_snaplen;
+ __u32 tp_len;
+ __u32 tp_status;
+ __u16 tp_mac;
+ __u16 tp_net;
+ /* pkt_hdr variants */
+ union {
+ struct tpacket_hdr_variant1 hv1;
+ };
+};
+
+struct tpacket_bd_ts {
+ unsigned int ts_sec;
+ union {
+ unsigned int ts_usec;
+ unsigned int ts_nsec;
+ };
+};
+
+struct tpacket_hdr_v1 {
+ __u32 block_status;
+ __u32 num_pkts;
+ __u32 offset_to_first_pkt;
+
+ /* Number of valid bytes (including padding)
+ * blk_len <= tp_block_size
+ */
+ __u32 blk_len;
+
+ /*
+ * Quite a few uses of sequence number:
+ * 1. Make sure cache flush etc worked.
+ * Well, one can argue - why not use the increasing ts below?
+ * But look at 2. below first.
+ * 2. When you pass around blocks to other user space decoders,
+ * you can see which blk[s] is[are] outstanding etc.
+ * 3. Validate kernel code.
+ */
+ __aligned_u64 seq_num;
+
+ /*
+ * ts_last_pkt:
+ *
+ * Case 1. Block has 'N'(N >=1) packets and TMO'd(timed out)
+ * ts_last_pkt == 'time-stamp of last packet' and NOT the
+ * time when the timer fired and the block was closed.
+ * By providing the ts of the last packet we can absolutely
+ * guarantee that time-stamp wise, the first packet in the
+ * next block will never precede the last packet of the
+ * previous block.
+ * Case 2. Block has zero packets and TMO'd
+ * ts_last_pkt = time when the timer fired and the block
+ * was closed.
+ * Case 3. Block has 'N' packets and NO TMO.
+ * ts_last_pkt = time-stamp of the last pkt in the block.
+ *
+ * ts_first_pkt:
+ * Is always the time-stamp when the block was opened.
+ * Case a) ZERO packets
+ * No packets to deal with but atleast you know the
+ * time-interval of this block.
+ * Case b) Non-zero packets
+ * Use the ts of the first packet in the block.
+ *
+ */
+ struct tpacket_bd_ts ts_first_pkt, ts_last_pkt;
+};
+
+union tpacket_bd_header_u {
+ struct tpacket_hdr_v1 bh1;
+};
+
+struct tpacket_block_desc {
+ __u32 version;
+ __u32 offset_to_priv;
+ union tpacket_bd_header_u hdr;
+};
+
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
+#define TPACKET3_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket3_hdr)) + sizeof(struct sockaddr_ll))
enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
+ TPACKET_V3
};
/*
@@ -138,6 +242,21 @@ struct tpacket_req {
unsigned int tp_frame_nr; /* Total number of frames */
};
+struct tpacket_req3 {
+ unsigned int tp_block_size; /* Minimal size of contiguous block */
+ unsigned int tp_block_nr; /* Number of blocks */
+ unsigned int tp_frame_size; /* Size of frame */
+ unsigned int tp_frame_nr; /* Total number of frames */
+ unsigned int tp_retire_blk_tov; /* timeout in msecs */
+ unsigned int tp_sizeof_priv; /* offset to private data area */
+ unsigned int tp_feature_req_word;
+};
+
+union tpacket_req_u {
+ struct tpacket_req req;
+ struct tpacket_req3 req3;
+};
+
struct packet_mreq {
int mr_ifindex;
unsigned short mr_type;
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 184bc556620..23cefa1111b 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -39,7 +39,7 @@ struct pppol2tp_addr {
* bits. So we need a different sockaddr structure.
*/
struct pppol2tpv3_addr {
- pid_t pid; /* pid that owns the fd.
+ __kernel_pid_t pid; /* pid that owns the fd.
* 0 => current */
int fd; /* FD of UDP or IP socket to use */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 397921b09ef..b5f927f59f2 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -20,8 +20,9 @@
#include <linux/types.h>
#include <asm/byteorder.h>
-#ifdef __KERNEL__
+#include <linux/socket.h>
#include <linux/if_ether.h>
+#ifdef __KERNEL__
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/ppp_channel.h>
@@ -63,7 +64,7 @@ struct pptp_addr {
#define PX_MAX_PROTO 3
struct sockaddr_pppox {
- sa_family_t sa_family; /* address family, AF_PPPOX */
+ __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
union {
struct pppoe_addr pppoe;
@@ -77,7 +78,7 @@ struct sockaddr_pppox {
* type instead.
*/
struct sockaddr_pppol2tp {
- sa_family_t sa_family; /* address family, AF_PPPOX */
+ __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tp_addr pppol2tp;
} __attribute__((packed));
@@ -86,7 +87,7 @@ struct sockaddr_pppol2tp {
* bits. So we need a different sockaddr structure.
*/
struct sockaddr_pppol2tpv3 {
- sa_family_t sa_family; /* address family, AF_PPPOX */
+ __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tpv3_addr pppol2tp;
} __attribute__((packed));
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 09e6e62f995..6ac8e50c6cf 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,8 +15,6 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_inode_alloc(struct inode *inode);
-extern void ima_inode_free(struct inode *inode);
extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
@@ -27,16 +25,6 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_inode_alloc(struct inode *inode)
-{
- return 0;
-}
-
-static inline void ima_inode_free(struct inode *inode)
-{
- return;
-}
-
static inline int ima_file_check(struct file *file, int mask)
{
return 0;
@@ -51,6 +39,5 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
{
return 0;
}
-
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/in.h b/include/linux/in.h
index beeb6dee2b4..01129c0ea87 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -182,7 +182,7 @@ struct in_pktinfo {
/* Structure describing an Internet (IP) socket address. */
#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
struct sockaddr_in {
- sa_family_t sin_family; /* Address family */
+ __kernel_sa_family_t sin_family; /* Address family */
__be16 sin_port; /* Port number */
struct in_addr sin_addr; /* Internet address */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index bc8c4902208..80b480c9753 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -97,9 +97,10 @@ enum {
INET_DIAG_INFO,
INET_DIAG_VEGASINFO,
INET_DIAG_CONG,
+ INET_DIAG_TOS,
};
-#define INET_DIAG_MAX INET_DIAG_CONG
+#define INET_DIAG_MAX INET_DIAG_TOS
/* INET_DIAG_MEM */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d14e058aaee..08ffab01e76 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
new file mode 100644
index 00000000000..a0c41256cb9
--- /dev/null
+++ b/include/linux/integrity.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_INTEGRITY_H
+#define _LINUX_INTEGRITY_H
+
+#include <linux/fs.h>
+
+enum integrity_status {
+ INTEGRITY_PASS = 0,
+ INTEGRITY_FAIL,
+ INTEGRITY_NOLABEL,
+ INTEGRITY_NOXATTRS,
+ INTEGRITY_UNKNOWN,
+};
+
+/* List of EVM protected security xattrs */
+#ifdef CONFIG_INTEGRITY
+extern int integrity_inode_alloc(struct inode *inode);
+extern void integrity_inode_free(struct inode *inode);
+
+#else
+static inline int integrity_inode_alloc(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void integrity_inode_free(struct inode *inode)
+{
+ return;
+}
+#endif /* CONFIG_INTEGRITY_H */
+#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 9310c699a37..e6ca56de993 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -271,7 +271,7 @@ struct qi_desc {
};
struct q_inval {
- spinlock_t q_lock;
+ raw_spinlock_t q_lock;
struct qi_desc *desc; /* invalidation queue */
int *desc_status; /* desc status */
int free_head; /* first free entry */
@@ -279,7 +279,7 @@ struct q_inval {
int free_cnt;
};
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
@@ -311,14 +311,14 @@ struct intel_iommu {
u64 cap;
u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- spinlock_t register_lock; /* protect register handling */
+ raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
unsigned int irq;
unsigned char name[13]; /* Device Name */
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain **domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
@@ -329,7 +329,7 @@ struct intel_iommu {
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
int node;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a103732b758..a64b00e286f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -59,6 +59,8 @@
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ * resume time.
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -72,6 +74,7 @@
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
+#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -95,6 +98,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @flags: flags (see IRQF_* above)
* @name: name of the device
* @dev_id: cookie to identify the device
+ * @percpu_dev_id: cookie to identify the device
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @dir: pointer to the proc/irq/NN/name entry
@@ -104,17 +108,18 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @thread_mask: bitmask for keeping track of @thread activity
*/
struct irqaction {
- irq_handler_t handler;
- unsigned long flags;
- void *dev_id;
- struct irqaction *next;
- int irq;
- irq_handler_t thread_fn;
- struct task_struct *thread;
- unsigned long thread_flags;
- unsigned long thread_mask;
- const char *name;
- struct proc_dir_entry *dir;
+ irq_handler_t handler;
+ unsigned long flags;
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ struct irqaction *next;
+ int irq;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned long thread_flags;
+ unsigned long thread_mask;
+ const char *name;
+ struct proc_dir_entry *dir;
} ____cacheline_internodealigned_in_smp;
extern irqreturn_t no_action(int cpl, void *dev_id);
@@ -136,6 +141,10 @@ extern int __must_check
request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
+extern int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *percpu_dev_id);
+
extern void exit_irq_thread(void);
#else
@@ -164,10 +173,18 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
return request_irq(irq, handler, flags, name, dev_id);
}
+static inline int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *percpu_dev_id)
+{
+ return request_irq(irq, handler, 0, devname, percpu_dev_id);
+}
+
static inline void exit_irq_thread(void) { }
#endif
extern void free_irq(unsigned int, void *);
+extern void free_percpu_irq(unsigned int, void __percpu *);
struct device;
@@ -207,7 +224,9 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
+extern void disable_percpu_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+extern void enable_percpu_irq(unsigned int irq, unsigned int type);
/* The following three functions are for the core kernel use only. */
#ifdef CONFIG_GENERIC_HARDIRQS
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 8cdcc2a199a..c81ed2ac16b 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -27,7 +27,7 @@
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io_mapping.txt
+ * See Documentation/io-mapping.txt
*/
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index acb9ad684d6..bf22b031790 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -16,6 +16,8 @@
#define IP6_TNL_F_MIP6_DEV 0x8
/* copy DSCP from the outer packet */
#define IP6_TNL_F_RCV_DSCP_COPY 0x10
+/* copy fwmark from inner packet */
+#define IP6_TNL_F_USE_ORIG_FWMARK 0x20
struct ip6_tnl_parm {
char name[IFNAMSIZ]; /* name of tunnel device */
diff --git a/include/linux/ipx.h b/include/linux/ipx.h
index aabb1d29402..3d48014cdd7 100644
--- a/include/linux/ipx.h
+++ b/include/linux/ipx.h
@@ -7,7 +7,7 @@
#define IPX_MTU 576
struct sockaddr_ipx {
- sa_family_t sipx_family;
+ __kernel_sa_family_t sipx_family;
__be16 sipx_port;
__be32 sipx_network;
unsigned char sipx_node[IPX_NODE_LEN];
diff --git a/include/linux/irda.h b/include/linux/irda.h
index 00bdad0e851..a014c325231 100644
--- a/include/linux/irda.h
+++ b/include/linux/irda.h
@@ -26,12 +26,9 @@
#define KERNEL_IRDA_H
#include <linux/types.h>
+#include <linux/socket.h>
-/* Please do *not* add any #include in this file, this file is
- * included as-is in user space.
- * Please fix the calling file to properly included needed files before
- * this one, or preferably to include <net/irda/irda.h> instead.
- * Jean II */
+/* Note that this file is shared with user space. */
/* Hint bit positions for first hint byte */
#define HINT_PNP 0x01
@@ -125,7 +122,7 @@ enum {
#define LSAP_ANY 0xff
struct sockaddr_irda {
- sa_family_t sir_family; /* AF_IRDA */
+ __kernel_sa_family_t sir_family; /* AF_IRDA */
__u8 sir_lsap_sel; /* LSAP selector */
__u32 sir_addr; /* Device address */
char sir_name[25]; /* Usually <service>:IrDA:TinyTP */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 59517300a31..59e49c80cc2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -66,6 +66,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -88,12 +89,13 @@ enum {
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -336,12 +338,14 @@ struct irq_chip {
* IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
* IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
* when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
IRQCHIP_EOI_IF_HANDLED = (1 << 1),
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
};
/* This include will go away once we isolated irq_desc usage to core code */
@@ -365,6 +369,8 @@ enum {
struct irqaction;
extern int setup_irq(unsigned int irq, struct irqaction *new);
extern void remove_irq(unsigned int irq, struct irqaction *act);
+extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
+extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
@@ -392,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
@@ -420,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
}
+extern int irq_set_percpu_devid(unsigned int irq);
+
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name);
@@ -481,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
}
+static inline void irq_set_percpu_devid_flags(unsigned int irq)
+{
+ irq_set_status_flags(irq,
+ IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
+ IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
+}
+
/* Handle dynamic irq creation and destruction */
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 150134ac709..6b69c2c9dff 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -53,6 +53,7 @@ struct irq_desc {
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index 44cd663c53b..4ccf95d681b 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -68,7 +68,7 @@
#define ISDN_NET_ENCAP_SYNCPPP 4
#define ISDN_NET_ENCAP_UIHDLC 5
#define ISDN_NET_ENCAP_CISCOHDLCK 6 /* With SLARP and keepalive */
-#define ISDN_NET_ENCAP_X25IFACE 7 /* Documentation/networking/x25-iface.txt*/
+#define ISDN_NET_ENCAP_X25IFACE 7 /* Documentation/networking/x25-iface.txt */
#define ISDN_NET_ENCAP_MAX_ENCAP ISDN_NET_ENCAP_X25IFACE
/* Facility which currently uses an ISDN-channel */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 46ac9a50528..8eefcf7e95e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -382,7 +382,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
}
extern int hex_to_bin(char ch);
-extern void hex2bin(u8 *dst, const char *src, size_t count);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
/*
* General tracing related utility functions - trace_printk(),
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index dd7c12e875b..dce6e4dbeda 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -181,7 +181,7 @@ struct kretprobe {
int nmissed;
size_t data_size;
struct hlist_head free_instances;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
struct kretprobe_instance {
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
index 4bdb31df8e7..e77d7f9bb24 100644
--- a/include/linux/l2tp.h
+++ b/include/linux/l2tp.h
@@ -8,8 +8,8 @@
#define _LINUX_L2TP_H_
#include <linux/types.h>
-#ifdef __KERNEL__
#include <linux/socket.h>
+#ifdef __KERNEL__
#include <linux/in.h>
#else
#include <netinet/in.h>
@@ -26,14 +26,15 @@
#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
struct sockaddr_l2tpip {
/* The first fields must match struct sockaddr_in */
- sa_family_t l2tp_family; /* AF_INET */
+ __kernel_sa_family_t l2tp_family; /* AF_INET */
__be16 l2tp_unused; /* INET port number (unused) */
struct in_addr l2tp_addr; /* Internet address */
__u32 l2tp_conn_id; /* Connection ID of tunnel */
/* Pad to size of `struct sockaddr'. */
- unsigned char __pad[sizeof(struct sockaddr) - sizeof(sa_family_t) -
+ unsigned char __pad[sizeof(struct sockaddr) -
+ sizeof(__kernel_sa_family_t) -
sizeof(__be16) - sizeof(struct in_addr) -
sizeof(__u32)];
};
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
index ce709e1885c..873c1eb635e 100644
--- a/include/linux/lapb.h
+++ b/include/linux/lapb.h
@@ -44,7 +44,8 @@ struct lapb_parms_struct {
unsigned int mode;
};
-extern int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks);
+extern int lapb_register(struct net_device *dev,
+ const struct lapb_register_struct *callbacks);
extern int lapb_unregister(struct net_device *dev);
extern int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms);
extern int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms);
diff --git a/include/linux/llc.h b/include/linux/llc.h
index ad7074ba81a..a2418ae13ee 100644
--- a/include/linux/llc.h
+++ b/include/linux/llc.h
@@ -12,16 +12,20 @@
*
* See the GNU General Public License for more details.
*/
+
+#include <linux/socket.h>
+
#define __LLC_SOCK_SIZE__ 16 /* sizeof(sockaddr_llc), word align. */
struct sockaddr_llc {
- sa_family_t sllc_family; /* AF_LLC */
- sa_family_t sllc_arphrd; /* ARPHRD_ETHER */
+ __kernel_sa_family_t sllc_family; /* AF_LLC */
+ __kernel_sa_family_t sllc_arphrd; /* ARPHRD_ETHER */
unsigned char sllc_test;
unsigned char sllc_xid;
unsigned char sllc_ua; /* UA data, only for SOCK_STREAM. */
unsigned char sllc_sap;
unsigned char sllc_mac[IFHWADDRLEN];
- unsigned char __pad[__LLC_SOCK_SIZE__ - sizeof(sa_family_t) * 2 -
+ unsigned char __pad[__LLC_SOCK_SIZE__ -
+ sizeof(__kernel_sa_family_t) * 2 -
sizeof(unsigned char) * 4 - IFHWADDRLEN];
};
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index ef820a3c378..b6a56e37284 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -548,7 +548,7 @@ do { \
#endif
#ifdef CONFIG_PROVE_RCU
-extern void lockdep_rcu_dereference(const char *file, const int line);
+void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#endif
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 8dda8ded5cd..ed8fe0d0409 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -18,6 +18,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/regmap.h>
/*
* Register values.
@@ -361,12 +362,8 @@ struct wm831x {
struct mutex io_lock;
struct device *dev;
- int (*read_dev)(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *dest);
- int (*write_dev)(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *src);
- void *control_data;
+ struct regmap *regmap;
int irq; /* Our chip IRQ */
struct mutex irq_lock;
@@ -374,6 +371,8 @@ struct wm831x {
int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+ bool soft_shutdown;
+
/* Chip revision based flags */
unsigned has_gpio_ena:1; /* Has GPIO enable bit */
unsigned has_cs_sts:1; /* Has current sink status bit */
@@ -412,8 +411,11 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq);
void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
+void wm831x_device_shutdown(struct wm831x *wm831x);
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
void wm831x_auxadc_init(struct wm831x *wm831x);
+extern struct regmap_config wm831x_regmap_config;
+
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 0ba24599fe5..1d7a3f7b3b5 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -123,6 +123,9 @@ struct wm831x_pdata {
/** Disable the touchscreen */
bool disable_touch;
+ /** The driver should initiate a power off sequence during shutdown */
+ bool soft_shutdown;
+
int irq_base;
int gpio_base;
int gpio_defaults[WM831X_GPIO_NUM];
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 2aab4e93a5c..0147b696851 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -25,16 +25,15 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
+struct regmap;
+
#define WM8400_REGISTER_COUNT 0x55
struct wm8400 {
struct device *dev;
- int (*read_dev)(void *data, char reg, int count, u16 *dst);
- int (*write_dev)(void *data, char reg, int count, const u16 *src);
-
struct mutex io_lock;
- void *io_data;
+ struct regmap *regmap;
u16 reg_cache[WM8400_REGISTER_COUNT];
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index f0b69cdae41..45df450d869 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -24,6 +24,7 @@ enum wm8994_type {
struct regulator_dev;
struct regulator_bulk_data;
+struct regmap;
#define WM8994_NUM_GPIO_REGS 11
#define WM8994_NUM_LDO_REGS 2
@@ -50,18 +51,12 @@ struct regulator_bulk_data;
#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
struct wm8994 {
- struct mutex io_lock;
struct mutex irq_lock;
enum wm8994_type type;
struct device *dev;
- int (*read_dev)(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *dest);
- int (*write_dev)(struct wm8994 *wm8994, unsigned short reg,
- int bytes, const void *src);
-
- void *control_data;
+ struct regmap *regmap;
int gpio_base;
int irq_base;
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 103113a2fd1..27748230aa6 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -11,131 +11,130 @@
#include <linux/types.h>
/* Generic MII registers. */
-
-#define MII_BMCR 0x00 /* Basic mode control register */
-#define MII_BMSR 0x01 /* Basic mode status register */
-#define MII_PHYSID1 0x02 /* PHYS ID 1 */
-#define MII_PHYSID2 0x03 /* PHYS ID 2 */
-#define MII_ADVERTISE 0x04 /* Advertisement control reg */
-#define MII_LPA 0x05 /* Link partner ability reg */
-#define MII_EXPANSION 0x06 /* Expansion register */
-#define MII_CTRL1000 0x09 /* 1000BASE-T control */
-#define MII_STAT1000 0x0a /* 1000BASE-T status */
-#define MII_ESTATUS 0x0f /* Extended Status */
-#define MII_DCOUNTER 0x12 /* Disconnect counter */
-#define MII_FCSCOUNTER 0x13 /* False carrier counter */
-#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
-#define MII_RERRCOUNTER 0x15 /* Receive error counter */
-#define MII_SREVISION 0x16 /* Silicon revision */
-#define MII_RESV1 0x17 /* Reserved... */
-#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
-#define MII_PHYADDR 0x19 /* PHY address */
-#define MII_RESV2 0x1a /* Reserved... */
-#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
-#define MII_NCONFIG 0x1c /* Network interface config */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_ESTATUS 0x0f /* Extended Status */
+#define MII_DCOUNTER 0x12 /* Disconnect counter */
+#define MII_FCSCOUNTER 0x13 /* False carrier counter */
+#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
+#define MII_RERRCOUNTER 0x15 /* Receive error counter */
+#define MII_SREVISION 0x16 /* Silicon revision */
+#define MII_RESV1 0x17 /* Reserved... */
+#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
+#define MII_PHYADDR 0x19 /* PHY address */
+#define MII_RESV2 0x1a /* Reserved... */
+#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
+#define MII_NCONFIG 0x1c /* Network interface config */
/* Basic mode control register. */
-#define BMCR_RESV 0x003f /* Unused... */
-#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
-#define BMCR_CTST 0x0080 /* Collision test */
-#define BMCR_FULLDPLX 0x0100 /* Full duplex */
-#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
-#define BMCR_ISOLATE 0x0400 /* Disconnect DP83840 from MII */
-#define BMCR_PDOWN 0x0800 /* Powerdown the DP83840 */
-#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
-#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
-#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
-#define BMCR_RESET 0x8000 /* Reset the DP83840 */
+#define BMCR_RESV 0x003f /* Unused... */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#define BMCR_CTST 0x0080 /* Collision test */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define BMCR_PDOWN 0x0800 /* Enable low power state */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
+#define BMCR_RESET 0x8000 /* Reset to default state */
/* Basic mode status register. */
-#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
-#define BMSR_JCD 0x0002 /* Jabber detected */
-#define BMSR_LSTATUS 0x0004 /* Link status */
-#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
-#define BMSR_RFAULT 0x0010 /* Remote fault detected */
-#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
-#define BMSR_RESV 0x00c0 /* Unused... */
-#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
-#define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */
-#define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */
-#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
-#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
-#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
-#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
-#define BMSR_100BASE4 0x8000 /* Can do 100mbps, 4k packets */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_JCD 0x0002 /* Jabber detected */
+#define BMSR_LSTATUS 0x0004 /* Link status */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_RFAULT 0x0010 /* Remote fault detected */
+#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
+#define BMSR_RESV 0x00c0 /* Unused... */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+#define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */
+#define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+#define BMSR_100BASE4 0x8000 /* Can do 100mbps, 4k packets */
/* Advertisement control register. */
-#define ADVERTISE_SLCT 0x001f /* Selector bits */
-#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
-#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
-#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
-#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
-#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
-#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
-#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
-#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
-#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
-#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
-#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
-#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
-#define ADVERTISE_RESV 0x1000 /* Unused... */
-#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */
-#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */
-#define ADVERTISE_NPAGE 0x8000 /* Next page bit */
-
-#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
- ADVERTISE_CSMA)
-#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
- ADVERTISE_100HALF | ADVERTISE_100FULL)
+#define ADVERTISE_SLCT 0x001f /* Selector bits */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
+#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
+#define ADVERTISE_RESV 0x1000 /* Unused... */
+#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */
+#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */
+#define ADVERTISE_NPAGE 0x8000 /* Next page bit */
+
+#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
+ ADVERTISE_CSMA)
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
/* Link partner ability register. */
-#define LPA_SLCT 0x001f /* Same as advertise selector */
-#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
-#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */
-#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
-#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */
-#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
-#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */
-#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
-#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/
-#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */
-#define LPA_PAUSE_CAP 0x0400 /* Can pause */
-#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
-#define LPA_RESV 0x1000 /* Unused... */
-#define LPA_RFAULT 0x2000 /* Link partner faulted */
-#define LPA_LPACK 0x4000 /* Link partner acked us */
-#define LPA_NPAGE 0x8000 /* Next page bit */
+#define LPA_SLCT 0x001f /* Same as advertise selector */
+#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
+#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */
+#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
+#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */
+#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
+#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */
+#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
+#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/
+#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */
+#define LPA_PAUSE_CAP 0x0400 /* Can pause */
+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
+#define LPA_RESV 0x1000 /* Unused... */
+#define LPA_RFAULT 0x2000 /* Link partner faulted */
+#define LPA_LPACK 0x4000 /* Link partner acked us */
+#define LPA_NPAGE 0x8000 /* Next page bit */
#define LPA_DUPLEX (LPA_10FULL | LPA_100FULL)
#define LPA_100 (LPA_100FULL | LPA_100HALF | LPA_100BASE4)
/* Expansion register for auto-negotiation. */
-#define EXPANSION_NWAY 0x0001 /* Can do N-way auto-nego */
-#define EXPANSION_LCWP 0x0002 /* Got new RX page code word */
-#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
-#define EXPANSION_NPCAPABLE 0x0008 /* Link partner supports npage */
-#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
-#define EXPANSION_RESV 0xffe0 /* Unused... */
+#define EXPANSION_NWAY 0x0001 /* Can do N-way auto-nego */
+#define EXPANSION_LCWP 0x0002 /* Got new RX page code word */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
+#define EXPANSION_NPCAPABLE 0x0008 /* Link partner supports npage */
+#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
+#define EXPANSION_RESV 0xffe0 /* Unused... */
-#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
-#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
/* N-way test register. */
-#define NWAYTEST_RESV1 0x00ff /* Unused... */
-#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
-#define NWAYTEST_RESV2 0xfe00 /* Unused... */
+#define NWAYTEST_RESV1 0x00ff /* Unused... */
+#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
+#define NWAYTEST_RESV2 0xfe00 /* Unused... */
/* 1000BASE-T Control register */
-#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
-#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
#define CTL1000_AS_MASTER 0x0800
#define CTL1000_ENABLE_MASTER 0x1000
/* 1000BASE-T Status register */
-#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
-#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
-#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
-#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */
+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
+#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
+#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */
/* Flow control flags */
#define FLOW_CTRL_TX 0x01
@@ -149,7 +148,7 @@ struct mii_ioctl_data {
__u16 val_out;
};
-#ifdef __KERNEL__
+#ifdef __KERNEL__
#include <linux/if.h>
@@ -180,7 +179,7 @@ extern unsigned int mii_check_media (struct mii_if_info *mii,
unsigned int ok_to_print,
unsigned int init_media);
extern int generic_mii_ioctl(struct mii_if_info *mii_if,
- struct mii_ioctl_data *mii_data, int cmd,
+ struct mii_ioctl_data *mii_data, int cmd,
unsigned int *duplex_changed);
@@ -189,7 +188,6 @@ static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
return (struct mii_ioctl_data *) &rq->ifr_ifru;
}
-
/**
* mii_nway_result
* @negotiated: value of MII ANAR and'd with ANLPAR
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 53ef894bfa0..2366f94a095 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -75,6 +75,7 @@ enum {
MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
+ MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
MLX4_DEV_CAP_FLAG_WOL = 1LL << 38,
MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 774b8952deb..29971a589ff 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -135,6 +135,17 @@ struct page {
#endif
;
+struct page_frag {
+ struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 offset;
+ __u32 size;
+#else
+ __u16 offset;
+ __u16 size;
+#endif
+};
+
typedef unsigned long __nocast vm_flags_t;
/*
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ae28e93fd07..468819cdde8 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -405,6 +405,15 @@ struct virtio_device_id {
};
#define VIRTIO_DEV_ANY_ID 0xffffffff
+/*
+ * For Hyper-V devices we use the device guid as the id.
+ */
+struct hv_vmbus_device_id {
+ __u8 guid[16];
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
/* i2c */
#define I2C_NAME_SIZE 20
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ddaae98c53f..fffb10bd551 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -262,6 +262,26 @@ static inline void __kernel_param_unlock(void)
.str = &__param_string_##name, 0, perm); \
__MODULE_PARM_TYPE(name, "string")
+/**
+ * parameq - checks if two parameter names match
+ * @name1: parameter name 1
+ * @name2: parameter name 2
+ *
+ * Returns true if the two parameter names are equal.
+ * Dashes (-) are considered equal to underscores (_).
+ */
+extern bool parameq(const char *name1, const char *name2);
+
+/**
+ * parameqn - checks if two parameter names match
+ * @name1: parameter name 1
+ * @name2: parameter name 2
+ * @n: the length to compare
+ *
+ * Similar to parameq(), except it compares @n characters.
+ */
+extern bool parameqn(const char *name1, const char *name2, size_t n);
+
/* Called on module insert or kernel boot */
extern int parse_args(const char *name,
char *args,
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index a3b8546354a..ae5df122e42 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -45,7 +45,7 @@ struct hwtstamp_config {
};
/* possible values for hwtstamp_config->tx_type */
-enum {
+enum hwtstamp_tx_types {
/*
* No outgoing packet will need hardware time stamping;
* should a packet arrive which asks for it, no hardware
@@ -60,10 +60,19 @@ enum {
* before sending the packet.
*/
HWTSTAMP_TX_ON,
+
+ /*
+ * Enables time stamping for outgoing packets just as
+ * HWTSTAMP_TX_ON does, but also enables time stamp insertion
+ * directly into Sync packets. In this case, transmitted Sync
+ * packets will not received a time stamp via the socket error
+ * queue.
+ */
+ HWTSTAMP_TX_ONESTEP_SYNC,
};
/* possible values for hwtstamp_config->rx_filter */
-enum {
+enum hwtstamp_rx_filters {
/* time stamp no incoming packet at all */
HWTSTAMP_FILTER_NONE,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ddee79bb8f1..df1c836e694 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -31,7 +31,7 @@
#include <linux/if_link.h>
#ifdef __KERNEL__
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/atomic.h>
@@ -723,9 +723,8 @@ struct netdev_tc_txq {
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
- *
- * void (*ndo_set_multicast_list)(struct net_device *dev);
- * This function is called when the multicast address list changes.
+ * If driver handles unicast address filtering, it should set
+ * IFF_UNICAST_FLT to its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
@@ -782,6 +781,7 @@ struct netdev_tc_txq {
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
* int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
* int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
+ * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
@@ -868,7 +868,6 @@ struct net_device_ops {
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
- void (*ndo_set_multicast_list)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
@@ -902,6 +901,8 @@ struct net_device_ops {
int queue, u16 vlan, u8 qos);
int (*ndo_set_vf_tx_rate)(struct net_device *dev,
int vf, int rate);
+ int (*ndo_set_vf_spoofchk)(struct net_device *dev,
+ int vf, bool setting);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
@@ -924,11 +925,15 @@ struct net_device_ops {
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
+#endif
+
+#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE)
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
+
#ifdef CONFIG_RFS_ACCEL
int (*ndo_rx_flow_steer)(struct net_device *dev,
const struct sk_buff *skb,
@@ -964,7 +969,7 @@ struct net_device {
*/
char name[IFNAMSIZ];
- struct pm_qos_request_list pm_qos_req;
+ struct pm_qos_request pm_qos_req;
/* device name hash chain */
struct hlist_node name_hlist;
@@ -2587,9 +2592,6 @@ static inline int netif_is_bond_slave(struct net_device *dev)
extern struct pernet_operations __net_initdata loopback_net_ops;
-int dev_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd);
-
static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
{
if (dev->features & NETIF_F_RXCSUM)
@@ -2617,6 +2619,9 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
+extern int __netdev_printk(const char *level, const struct net_device *dev,
+ struct va_format *vaf);
+
extern int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
__attribute__ ((format (printf, 3, 4)));
@@ -2644,8 +2649,7 @@ extern int netdev_info(const struct net_device *dev, const char *format, ...)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
- dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
- netdev_name(__dev), ##args); \
+ dynamic_netdev_dbg(__dev, format, ##args); \
} while (0)
#else
#define netdev_dbg(__dev, format, args...) \
@@ -2712,9 +2716,7 @@ do { \
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
- dynamic_dev_dbg((netdev)->dev.parent, \
- "%s: " format, \
- netdev_name(netdev), ##args); \
+ dynamic_netdev_dbg(netdev, format, ##args); \
} while (0)
#else
#define netif_dbg(priv, type, dev, format, args...) \
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index 0ca66e97acb..d1366f05d1b 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -2,6 +2,7 @@
#define _XT_CONNLIMIT_H
#include <linux/types.h>
+#include <linux/netfilter.h>
struct xt_connlimit_data;
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 74b904d8f99..e3c041d5402 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -6,6 +6,7 @@
#define _XT_CONNTRACK_H
#include <linux/types.h>
+#include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#define XT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
diff --git a/include/linux/netfilter/xt_iprange.h b/include/linux/netfilter/xt_iprange.h
index c1f21a779a4..25fd7cf851f 100644
--- a/include/linux/netfilter/xt_iprange.h
+++ b/include/linux/netfilter/xt_iprange.h
@@ -2,6 +2,7 @@
#define _LINUX_NETFILTER_XT_IPRANGE_H 1
#include <linux/types.h>
+#include <linux/netfilter.h>
enum {
IPRANGE_SRC = 1 << 0, /* match source IP address */
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index adbf4bff87e..e08565d4517 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -52,7 +52,7 @@ struct arpt_arp {
struct in_addr smsk, tmsk;
/* Device hw address length, src+target device addresses */
- u_int8_t arhln, arhln_mask;
+ __u8 arhln, arhln_mask;
struct arpt_devaddr_info src_devaddr;
struct arpt_devaddr_info tgt_devaddr;
@@ -71,9 +71,9 @@ struct arpt_arp {
unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
/* Flags word */
- u_int8_t flags;
+ __u8 flags;
/* Inverse flags */
- u_int16_t invflags;
+ __u16 invflags;
};
/* Values for "flag" field in struct arpt_ip (general arp structure).
@@ -102,9 +102,9 @@ struct arpt_entry
struct arpt_arp arp;
/* Size of arpt_entry + matches */
- u_int16_t target_offset;
+ __u16 target_offset;
/* Size of arpt_entry + matches + target */
- u_int16_t next_offset;
+ __u16 next_offset;
/* Back pointer */
unsigned int comefrom;
@@ -260,8 +260,8 @@ extern unsigned int arpt_do_table(struct sk_buff *skb,
struct compat_arpt_entry {
struct arpt_arp arp;
- u_int16_t target_offset;
- u_int16_t next_offset;
+ __u16 target_offset;
+ __u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
unsigned char elems[0];
diff --git a/include/linux/netfilter_decnet.h b/include/linux/netfilter_decnet.h
index 6f425369ee2..0b09732aacd 100644
--- a/include/linux/netfilter_decnet.h
+++ b/include/linux/netfilter_decnet.h
@@ -11,6 +11,9 @@
/* only for userspace compatibility */
#ifndef __KERNEL__
+
+#include <limits.h> /* for INT_MIN, INT_MAX */
+
/* IP Cache bits. */
/* Src IP address. */
#define NFC_DN_SRC 0x0001
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 29c7727ff0e..fa0946c549d 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -9,6 +9,9 @@
/* only for userspace compatibility */
#ifndef __KERNEL__
+
+#include <limits.h> /* for INT_MIN, INT_MAX */
+
/* IP Cache bits. */
/* Src IP address. */
#define NFC_IP_SRC 0x0001
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 64a5d95c58e..db79231914c 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -81,12 +81,12 @@ struct ipt_ip {
unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
/* Protocol, 0 = ANY */
- u_int16_t proto;
+ __u16 proto;
/* Flags word */
- u_int8_t flags;
+ __u8 flags;
/* Inverse flags */
- u_int8_t invflags;
+ __u8 invflags;
};
/* Values for "flag" field in struct ipt_ip (general ip structure). */
@@ -114,9 +114,9 @@ struct ipt_entry {
unsigned int nfcache;
/* Size of ipt_entry + matches */
- u_int16_t target_offset;
+ __u16 target_offset;
/* Size of ipt_entry + matches + target */
- u_int16_t next_offset;
+ __u16 next_offset;
/* Back pointer */
unsigned int comefrom;
@@ -149,9 +149,9 @@ struct ipt_entry {
/* ICMP matching stuff */
struct ipt_icmp {
- u_int8_t type; /* type to match */
- u_int8_t code[2]; /* range of code */
- u_int8_t invflags; /* Inverse flags */
+ __u8 type; /* type to match */
+ __u8 code[2]; /* range of code */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "inv" field for struct ipt_icmp. */
@@ -288,8 +288,8 @@ extern unsigned int ipt_do_table(struct sk_buff *skb,
struct compat_ipt_entry {
struct ipt_ip ip;
compat_uint_t nfcache;
- u_int16_t target_offset;
- u_int16_t next_offset;
+ __u16 target_offset;
+ __u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
unsigned char elems[0];
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 1f7e300094c..57c025127f1 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -12,6 +12,9 @@
/* only for userspace compatibility */
#ifndef __KERNEL__
+
+#include <limits.h> /* for INT_MIN, INT_MAX */
+
/* IP Cache bits. */
/* Src IP address. */
#define NFC_IP6_SRC 0x0001
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index c9784f7a9c1..f549adccc94 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -81,14 +81,14 @@ struct ip6t_ip6 {
* MH do not match any packets.
* - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol.
*/
- u_int16_t proto;
+ __u16 proto;
/* TOS to match iff flags & IP6T_F_TOS */
- u_int8_t tos;
+ __u8 tos;
/* Flags word */
- u_int8_t flags;
+ __u8 flags;
/* Inverse flags */
- u_int8_t invflags;
+ __u8 invflags;
};
/* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */
@@ -118,9 +118,9 @@ struct ip6t_entry {
unsigned int nfcache;
/* Size of ipt_entry + matches */
- u_int16_t target_offset;
+ __u16 target_offset;
/* Size of ipt_entry + matches + target */
- u_int16_t next_offset;
+ __u16 next_offset;
/* Back pointer */
unsigned int comefrom;
@@ -186,9 +186,9 @@ struct ip6t_error {
/* ICMP matching stuff */
struct ip6t_icmp {
- u_int8_t type; /* type to match */
- u_int8_t code[2]; /* range of code */
- u_int8_t invflags; /* Inverse flags */
+ __u8 type; /* type to match */
+ __u8 code[2]; /* range of code */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "inv" field for struct ipt_icmp. */
@@ -298,8 +298,8 @@ extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
struct compat_ip6t_entry {
struct ip6t_ip6 ipv6;
compat_uint_t nfcache;
- u_int16_t target_offset;
- u_int16_t next_offset;
+ __u16 target_offset;
+ __u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
unsigned char elems[0];
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 180540a84d3..8180cd9d73d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_NETLINK_H
#define __LINUX_NETLINK_H
-#include <linux/socket.h> /* for sa_family_t */
+#include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h>
#define NETLINK_ROUTE 0 /* Routing/device hook */
diff --git a/include/linux/netrom.h b/include/linux/netrom.h
index 6939b32f66a..af7313cc9cb 100644
--- a/include/linux/netrom.h
+++ b/include/linux/netrom.h
@@ -7,6 +7,8 @@
#ifndef NETROM_KERNEL_H
#define NETROM_KERNEL_H
+#include <linux/ax25.h>
+
#define NETROM_MTU 236
#define NETROM_T1 1
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 330a4c5db58..36cb955b05c 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -39,6 +39,10 @@
*
* @NFC_CMD_GET_DEVICE: request information about a device (requires
* %NFC_ATTR_DEVICE_INDEX) or dump request to get a list of all nfc devices
+ * @NFC_CMD_DEV_UP: turn on the nfc device
+ * (requires %NFC_ATTR_DEVICE_INDEX)
+ * @NFC_CMD_DEV_DOWN: turn off the nfc device
+ * (requires %NFC_ATTR_DEVICE_INDEX)
* @NFC_CMD_START_POLL: start polling for targets using the given protocols
* (requires %NFC_ATTR_DEVICE_INDEX and %NFC_ATTR_PROTOCOLS)
* @NFC_CMD_STOP_POLL: stop polling for targets (requires
@@ -56,6 +60,8 @@
enum nfc_commands {
NFC_CMD_UNSPEC,
NFC_CMD_GET_DEVICE,
+ NFC_CMD_DEV_UP,
+ NFC_CMD_DEV_DOWN,
NFC_CMD_START_POLL,
NFC_CMD_STOP_POLL,
NFC_CMD_GET_TARGET,
@@ -123,4 +129,6 @@ struct sockaddr_nfc {
#define NFC_SOCKPROTO_RAW 0
#define NFC_SOCKPROTO_MAX 1
+#define NFC_HEADER_SIZE 1
+
#endif /*__LINUX_NFC_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 76f99e8714f..32345c2805c 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -373,6 +373,22 @@ enum nfsstat4 {
NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
};
+static inline bool seqid_mutating_err(u32 err)
+{
+ /* rfc 3530 section 8.1.5: */
+ switch (err) {
+ case NFS4ERR_STALE_CLIENTID:
+ case NFS4ERR_STALE_STATEID:
+ case NFS4ERR_BAD_STATEID:
+ case NFS4ERR_BAD_SEQID:
+ case NFS4ERR_BADXDR:
+ case NFS4ERR_RESOURCE:
+ case NFS4ERR_NOFILEHANDLE:
+ return false;
+ };
+ return true;
+}
+
/*
* Note: NF4BAD is not actually part of the protocol; it is just used
* internally by nfsd.
@@ -394,7 +410,10 @@ enum open_claim_type4 {
NFS4_OPEN_CLAIM_NULL = 0,
NFS4_OPEN_CLAIM_PREVIOUS = 1,
NFS4_OPEN_CLAIM_DELEGATE_CUR = 2,
- NFS4_OPEN_CLAIM_DELEGATE_PREV = 3
+ NFS4_OPEN_CLAIM_DELEGATE_PREV = 3,
+ NFS4_OPEN_CLAIM_FH = 4, /* 4.1 */
+ NFS4_OPEN_CLAIM_DELEG_CUR_FH = 5, /* 4.1 */
+ NFS4_OPEN_CLAIM_DELEG_PREV_FH = 6, /* 4.1 */
};
enum opentype4 {
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index eaac770f886..60a137b7f17 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -149,7 +149,6 @@ struct nfs_inode {
unsigned long read_cache_jiffies;
unsigned long attrtimeo;
unsigned long attrtimeo_timestamp;
- __u64 change_attr; /* v4 only */
unsigned long attr_gencount;
/* "Generation counter" for the attribute cache. This is
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index e2791a27a90..ab465fe8c3d 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -34,6 +34,7 @@ enum {
PG_NEED_COMMIT,
PG_NEED_RESCHED,
PG_PNFS_COMMIT,
+ PG_PARTIAL_READ_FAILED,
};
struct nfs_inode;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index abd615d74a2..c74595ba709 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1133,7 +1133,6 @@ struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
struct nfs_read_data {
- int flags;
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@@ -1156,7 +1155,6 @@ struct nfs_read_data {
};
struct nfs_write_data {
- int flags;
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@@ -1197,9 +1195,6 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
- int (*lookupfh)(struct nfs_server *, struct nfs_fh *,
- struct qstr *, struct nfs_fh *,
- struct nfs_fattr *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild
index 55d1467de3c..b8d4001212b 100644
--- a/include/linux/nfsd/Kbuild
+++ b/include/linux/nfsd/Kbuild
@@ -1,6 +1,4 @@
-header-y += const.h
header-y += debug.h
header-y += export.h
header-y += nfsfh.h
header-y += stats.h
-header-y += syscall.h
diff --git a/include/linux/nfsd/const.h b/include/linux/nfsd/const.h
deleted file mode 100644
index 323f8cfa060..00000000000
--- a/include/linux/nfsd/const.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * include/linux/nfsd/const.h
- *
- * Various constants related to NFS.
- *
- * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef _LINUX_NFSD_CONST_H
-#define _LINUX_NFSD_CONST_H
-
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-
-/*
- * Maximum protocol version supported by knfsd
- */
-#define NFSSVC_MAXVERS 3
-
-/*
- * Maximum blocksizes supported by daemon under various circumstances.
- */
-#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
-/* NFSv2 is limited by the protocol specification, see RFC 1094 */
-#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
-
-#ifdef __KERNEL__
-
-#include <linux/sunrpc/msg_prot.h>
-
-/*
- * Largest number of bytes we need to allocate for an NFS
- * call or reply. Used to control buffer sizes. We use
- * the length of v3 WRITE, READDIR and READDIR replies
- * which are an RPC header, up to 26 XDR units of reply
- * data, and some page data.
- *
- * Note that accuracy here doesn't matter too much as the
- * size is rounded up to a page size when allocating space.
- */
-#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
-
-#ifdef CONFIG_NFSD_V4
-# define NFSSVC_XDRSIZE NFS4_SVC_XDRSIZE
-#elif defined(CONFIG_NFSD_V3)
-# define NFSSVC_XDRSIZE NFS3_SVC_XDRSIZE
-#else
-# define NFSSVC_XDRSIZE NFS2_SVC_XDRSIZE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_NFSD_CONST_H */
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 8a31a20efe7..f85308e688f 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -96,7 +96,6 @@ struct svc_export {
struct auth_domain * ex_client;
int ex_flags;
struct path ex_path;
- char *ex_pathname;
uid_t ex_anon_uid;
gid_t ex_anon_gid;
int ex_fsid;
@@ -137,6 +136,7 @@ struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct path *);
+struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
int exp_rootfh(struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index f76d80ccec1..ce4743a2601 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -14,11 +14,14 @@
#ifndef _LINUX_NFSD_FH_H
#define _LINUX_NFSD_FH_H
-# include <linux/types.h>
+#include <linux/types.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
#ifdef __KERNEL__
# include <linux/sunrpc/svc.h>
#endif
-#include <linux/nfsd/const.h>
/*
* This is the old "dentry style" Linux NFSv2 file handle.
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h
deleted file mode 100644
index 812bc1e160d..00000000000
--- a/include/linux/nfsd/syscall.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * include/linux/nfsd/syscall.h
- *
- * This file holds all declarations for the knfsd syscall interface.
- *
- * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef NFSD_SYSCALL_H
-#define NFSD_SYSCALL_H
-
-#include <linux/types.h>
-#include <linux/nfsd/export.h>
-
-/*
- * Version of the syscall interface
- */
-#define NFSCTL_VERSION 0x0201
-
-/*
- * These are the commands understood by nfsctl().
- */
-#define NFSCTL_SVC 0 /* This is a server process. */
-#define NFSCTL_ADDCLIENT 1 /* Add an NFS client. */
-#define NFSCTL_DELCLIENT 2 /* Remove an NFS client. */
-#define NFSCTL_EXPORT 3 /* export a file system. */
-#define NFSCTL_UNEXPORT 4 /* unexport a file system. */
-/*#define NFSCTL_UGIDUPDATE 5 / * update a client's uid/gid map. DISCARDED */
-/*#define NFSCTL_GETFH 6 / * get an fh by ino DISCARDED */
-#define NFSCTL_GETFD 7 /* get an fh by path (used by mountd) */
-#define NFSCTL_GETFS 8 /* get an fh by path with max FH len */
-
-/* SVC */
-struct nfsctl_svc {
- unsigned short svc_port;
- int svc_nthreads;
-};
-
-/* ADDCLIENT/DELCLIENT */
-struct nfsctl_client {
- char cl_ident[NFSCLNT_IDMAX+1];
- int cl_naddr;
- struct in_addr cl_addrlist[NFSCLNT_ADDRMAX];
- int cl_fhkeytype;
- int cl_fhkeylen;
- unsigned char cl_fhkey[NFSCLNT_KEYMAX];
-};
-
-/* EXPORT/UNEXPORT */
-struct nfsctl_export {
- char ex_client[NFSCLNT_IDMAX+1];
- char ex_path[NFS_MAXPATHLEN+1];
- __kernel_old_dev_t ex_dev;
- __kernel_ino_t ex_ino;
- int ex_flags;
- __kernel_uid_t ex_anon_uid;
- __kernel_gid_t ex_anon_gid;
-};
-
-/* GETFD */
-struct nfsctl_fdparm {
- struct sockaddr gd_addr;
- char gd_path[NFS_MAXPATHLEN+1];
- int gd_version;
-};
-
-/* GETFS - GET Filehandle with Size */
-struct nfsctl_fsparm {
- struct sockaddr gd_addr;
- char gd_path[NFS_MAXPATHLEN+1];
- int gd_maxlen;
-};
-
-/*
- * This is the argument union.
- */
-struct nfsctl_arg {
- int ca_version; /* safeguard */
- union {
- struct nfsctl_svc u_svc;
- struct nfsctl_client u_client;
- struct nfsctl_export u_export;
- struct nfsctl_fdparm u_getfd;
- struct nfsctl_fsparm u_getfs;
- /*
- * The following dummy member is needed to preserve binary compatibility
- * on platforms where alignof(void*)>alignof(int). It's needed because
- * this union used to contain a member (u_umap) which contained a
- * pointer.
- */
- void *u_ptr;
- } u;
-#define ca_svc u.u_svc
-#define ca_client u.u_client
-#define ca_export u.u_export
-#define ca_getfd u.u_getfd
-#define ca_getfs u.u_getfs
-};
-
-union nfsctl_res {
- __u8 cr_getfh[NFS_FHSIZE];
- struct knfsd_fh cr_getfs;
-};
-
-#ifdef __KERNEL__
-/*
- * Kernel syscall implementation.
- */
-extern int exp_addclient(struct nfsctl_client *ncp);
-extern int exp_delclient(struct nfsctl_client *ncp);
-extern int exp_export(struct nfsctl_export *nxp);
-extern int exp_unexport(struct nfsctl_export *nxp);
-
-#endif /* __KERNEL__ */
-
-#endif /* NFSD_SYSCALL_H */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 8ad70dcac3f..8049bf77d79 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -161,6 +161,13 @@
* @NL80211_CMD_SET_BEACON: set the beacon on an access point interface
* using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD,
* %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL attributes.
+ * Following attributes are provided for drivers that generate full Beacon
+ * and Probe Response frames internally: %NL80211_ATTR_SSID,
+ * %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHERS_PAIRWISE,
+ * %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS,
+ * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY,
+ * %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_IE, %NL80211_ATTR_IE_PROBE_RESP,
+ * %NL80211_ATTR_IE_ASSOC_RESP.
* @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface,
* parameters are like for %NL80211_CMD_SET_BEACON.
* @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it
@@ -231,6 +238,8 @@
*
* @NL80211_CMD_GET_SCAN: get scan results
* @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
+ * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
+ * probe requests at CCK rate or not.
* @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
* NL80211_CMD_GET_SCAN and on the "scan" multicast group)
* @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
@@ -425,6 +434,8 @@
* specified using %NL80211_ATTR_DURATION. When called, this operation
* returns a cookie (%NL80211_ATTR_COOKIE) that will be included with the
* TX status event pertaining to the TX request.
+ * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
+ * management frames at CCK rate or not in 2GHz band.
* @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this
* command may be used with the corresponding cookie to cancel the wait
* time if it is known that it is no longer necessary.
@@ -492,6 +503,12 @@
* this command may also be sent by the driver as an MLME event to
* inform userspace of the new replay counter.
*
+ * @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace
+ * of PMKSA caching dandidates.
+ *
+ * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
+ * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -616,6 +633,11 @@ enum nl80211_commands {
NL80211_CMD_SET_REKEY_OFFLOAD,
+ NL80211_CMD_PMKSA_CANDIDATE,
+
+ NL80211_CMD_TDLS_OPER,
+ NL80211_CMD_TDLS_MGMT,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -762,6 +784,8 @@ enum nl80211_commands {
* that can be added to a scan request
* @NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN: maximum length of information
* elements that can be added to a scheduled scan request
+ * @NL80211_ATTR_MAX_MATCH_SETS: maximum number of sets that can be
+ * used with @NL80211_ATTR_SCHED_SCAN_MATCH, a wiphy attribute.
*
* @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz)
* @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive
@@ -842,18 +866,20 @@ enum nl80211_commands {
* @NL80211_ATTR_STATUS_CODE: StatusCode for the %NL80211_CMD_CONNECT
* event (u16)
* @NL80211_ATTR_PRIVACY: Flag attribute, used with connect(), indicating
- * that protected APs should be used.
+ * that protected APs should be used. This is also used with NEW_BEACON to
+ * indicate that the BSS is to use protection.
*
- * @NL80211_ATTR_CIPHERS_PAIRWISE: Used with CONNECT and ASSOCIATE to
- * indicate which unicast key ciphers will be used with the connection
+ * @NL80211_ATTR_CIPHERS_PAIRWISE: Used with CONNECT, ASSOCIATE, and NEW_BEACON
+ * to indicate which unicast key ciphers will be used with the connection
* (an array of u32).
- * @NL80211_ATTR_CIPHER_GROUP: Used with CONNECT and ASSOCIATE to indicate
- * which group key cipher will be used with the connection (a u32).
- * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT and ASSOCIATE to indicate
- * which WPA version(s) the AP we want to associate with is using
+ * @NL80211_ATTR_CIPHER_GROUP: Used with CONNECT, ASSOCIATE, and NEW_BEACON to
+ * indicate which group key cipher will be used with the connection (a
+ * u32).
+ * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT, ASSOCIATE, and NEW_BEACON to
+ * indicate which WPA version(s) the AP we want to associate with is using
* (a u32 with flags from &enum nl80211_wpa_versions).
- * @NL80211_ATTR_AKM_SUITES: Used with CONNECT and ASSOCIATE to indicate
- * which key management algorithm(s) to use (an array of u32).
+ * @NL80211_ATTR_AKM_SUITES: Used with CONNECT, ASSOCIATE, and NEW_BEACON to
+ * indicate which key management algorithm(s) to use (an array of u32).
*
* @NL80211_ATTR_REQ_IE: (Re)association request information elements as
* sent out by the card, for ROAM and successful CONNECT events.
@@ -1002,6 +1028,24 @@ enum nl80211_commands {
* @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan
* cycles, in msecs.
+
+ * @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more
+ * sets of attributes to match during scheduled scans. Only BSSs
+ * that match any of the sets will be reported. These are
+ * pass-thru filter rules.
+ * For a match to succeed, the BSS must match all attributes of a
+ * set. Since not every hardware supports matching all types of
+ * attributes, there is no guarantee that the reported BSSs are
+ * fully complying with the match sets and userspace needs to be
+ * able to ignore them by itself.
+ * Thus, the implementation is somewhat hardware-dependent, but
+ * this is only an optimization and the userspace application
+ * needs to handle all the non-filtered results anyway.
+ * If the match attributes don't make sense when combined with
+ * the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID
+ * is included in the probe request, but the match attributes
+ * will never let it go through), -EINVAL may be returned.
+ * If ommited, no filtering is done.
*
* @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
* interface combinations. In each nested item, it contains attributes
@@ -1019,6 +1063,52 @@ enum nl80211_commands {
* being a list of supported rates as defined by IEEE 802.11 7.3.2.2 but
* without the length restriction (at most %NL80211_MAX_SUPP_RATES).
*
+ * @NL80211_ATTR_HIDDEN_SSID: indicates whether SSID is to be hidden from Beacon
+ * and Probe Response (when response to wildcard Probe Request); see
+ * &enum nl80211_hidden_ssid, represented as a u32
+ *
+ * @NL80211_ATTR_IE_PROBE_RESP: Information element(s) for Probe Response frame.
+ * This is used with %NL80211_CMD_NEW_BEACON and %NL80211_CMD_SET_BEACON to
+ * provide extra IEs (e.g., WPS/P2P IE) into Probe Response frames when the
+ * driver (or firmware) replies to Probe Request frames.
+ * @NL80211_ATTR_IE_ASSOC_RESP: Information element(s) for (Re)Association
+ * Response frames. This is used with %NL80211_CMD_NEW_BEACON and
+ * %NL80211_CMD_SET_BEACON to provide extra IEs (e.g., WPS/P2P IE) into
+ * (Re)Association Response frames when the driver (or firmware) replies to
+ * (Re)Association Request frames.
+ *
+ * @NL80211_ATTR_STA_WME: Nested attribute containing the wme configuration
+ * of the station, see &enum nl80211_sta_wme_attr.
+ * @NL80211_ATTR_SUPPORT_AP_UAPSD: the device supports uapsd when working
+ * as AP.
+ *
+ * @NL80211_ATTR_ROAM_SUPPORT: Indicates whether the firmware is capable of
+ * roaming to another AP in the same ESS if the signal lever is low.
+ *
+ * @NL80211_ATTR_PMKSA_CANDIDATE: Nested attribute containing the PMKSA caching
+ * candidate information, see &enum nl80211_pmksa_candidate_attr.
+ *
+ * @NL80211_ATTR_TX_NO_CCK_RATE: Indicates whether to use CCK rate or not
+ * for management frames transmission. In order to avoid p2p probe/action
+ * frames are being transmitted at CCK rate in 2GHz band, the user space
+ * applications use this attribute.
+ * This attribute is used with %NL80211_CMD_TRIGGER_SCAN and
+ * %NL80211_CMD_FRAME commands.
+ *
+ * @NL80211_ATTR_TDLS_ACTION: Low level TDLS action code (e.g. link setup
+ * request, link setup confirm, link teardown, etc.). Values are
+ * described in the TDLS (802.11z) specification.
+ * @NL80211_ATTR_TDLS_DIALOG_TOKEN: Non-zero token for uniquely identifying a
+ * TDLS conversation between two devices.
+ * @NL80211_ATTR_TDLS_OPERATION: High level TDLS operation; see
+ * &enum nl80211_tdls_operation, represented as a u8.
+ * @NL80211_ATTR_TDLS_SUPPORT: A flag indicating the device can operate
+ * as a TDLS peer sta.
+ * @NL80211_ATTR_TDLS_EXTERNAL_SETUP: The TDLS discovery/setup and teardown
+ * procedures should be performed by sending TDLS packets via
+ * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be
+ * used for asking the driver to perform a TDLS operation.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1224,6 +1314,29 @@ enum nl80211_attrs {
NL80211_ATTR_SCAN_SUPP_RATES,
+ NL80211_ATTR_HIDDEN_SSID,
+
+ NL80211_ATTR_IE_PROBE_RESP,
+ NL80211_ATTR_IE_ASSOC_RESP,
+
+ NL80211_ATTR_STA_WME,
+ NL80211_ATTR_SUPPORT_AP_UAPSD,
+
+ NL80211_ATTR_ROAM_SUPPORT,
+
+ NL80211_ATTR_SCHED_SCAN_MATCH,
+ NL80211_ATTR_MAX_MATCH_SETS,
+
+ NL80211_ATTR_PMKSA_CANDIDATE,
+
+ NL80211_ATTR_TX_NO_CCK_RATE,
+
+ NL80211_ATTR_TDLS_ACTION,
+ NL80211_ATTR_TDLS_DIALOG_TOKEN,
+ NL80211_ATTR_TDLS_OPERATION,
+ NL80211_ATTR_TDLS_SUPPORT,
+ NL80211_ATTR_TDLS_EXTERNAL_SETUP,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1321,6 +1434,7 @@ enum nl80211_iftype {
* @NL80211_STA_FLAG_WME: station is WME/QoS capable
* @NL80211_STA_FLAG_MFP: station uses management frame protection
* @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated
+ * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer
* @NL80211_STA_FLAG_MAX: highest station flag number currently defined
* @__NL80211_STA_FLAG_AFTER_LAST: internal use
*/
@@ -1331,6 +1445,7 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_WME,
NL80211_STA_FLAG_MFP,
NL80211_STA_FLAG_AUTHENTICATED,
+ NL80211_STA_FLAG_TDLS_PEER,
/* keep last */
__NL80211_STA_FLAG_AFTER_LAST,
@@ -1433,6 +1548,7 @@ enum nl80211_sta_bss_param {
* @NL80211_STA_INFO_BSS_PARAM: current station's view of BSS, nested attribute
* containing info as possible, see &enum nl80211_sta_bss_param
* @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
+ * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update.
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -1454,6 +1570,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_RX_BITRATE,
NL80211_STA_INFO_BSS_PARAM,
NL80211_STA_INFO_CONNECTED_TIME,
+ NL80211_STA_INFO_STA_FLAGS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -1683,6 +1800,26 @@ enum nl80211_reg_rule_attr {
};
/**
+ * enum nl80211_sched_scan_match_attr - scheduled scan match attributes
+ * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
+ * only report BSS with matching SSID.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
+ * attribute number currently defined
+ * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_sched_scan_match_attr {
+ __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID,
+
+ NL80211_ATTR_SCHED_SCAN_MATCH_SSID,
+
+ /* keep last */
+ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
+ NL80211_SCHED_SCAN_MATCH_ATTR_MAX =
+ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_reg_rule_flags - regulatory rule flags
*
* @NL80211_RRF_NO_OFDM: OFDM modulation not allowed
@@ -1833,6 +1970,13 @@ enum nl80211_mntr_flags {
* @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a
* source mesh point for path selection elements.
*
+ * @NL80211_MESHCONF_HWMP_RANN_INTERVAL: The interval of time (in TUs) between
+ * root announcements are transmitted.
+ *
+ * @NL80211_MESHCONF_GATE_ANNOUNCEMENTS: Advertise that this mesh station has
+ * access to a broader network beyond the MBSS. This is done via Root
+ * Announcement frames.
+ *
* @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -1854,6 +1998,8 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
NL80211_MESHCONF_HWMP_ROOTMODE,
NL80211_MESHCONF_ELEMENT_TTL,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2430,4 +2576,78 @@ enum nl80211_rekey_data {
MAX_NL80211_REKEY_DATA = NUM_NL80211_REKEY_DATA - 1
};
+/**
+ * enum nl80211_hidden_ssid - values for %NL80211_ATTR_HIDDEN_SSID
+ * @NL80211_HIDDEN_SSID_NOT_IN_USE: do not hide SSID (i.e., broadcast it in
+ * Beacon frames)
+ * @NL80211_HIDDEN_SSID_ZERO_LEN: hide SSID by using zero-length SSID element
+ * in Beacon frames
+ * @NL80211_HIDDEN_SSID_ZERO_CONTENTS: hide SSID by using correct length of SSID
+ * element in Beacon frames but zero out each byte in the SSID
+ */
+enum nl80211_hidden_ssid {
+ NL80211_HIDDEN_SSID_NOT_IN_USE,
+ NL80211_HIDDEN_SSID_ZERO_LEN,
+ NL80211_HIDDEN_SSID_ZERO_CONTENTS
+};
+
+/**
+ * enum nl80211_sta_wme_attr - station WME attributes
+ * @__NL80211_STA_WME_INVALID: invalid number for nested attribute
+ * @NL80211_STA_WME_UAPSD_QUEUES: bitmap of uapsd queues. the format
+ * is the same as the AC bitmap in the QoS info field.
+ * @NL80211_STA_WME_MAX_SP: max service period. the format is the same
+ * as the MAX_SP field in the QoS info field (but already shifted down).
+ * @__NL80211_STA_WME_AFTER_LAST: internal
+ * @NL80211_STA_WME_MAX: highest station WME attribute
+ */
+enum nl80211_sta_wme_attr {
+ __NL80211_STA_WME_INVALID,
+ NL80211_STA_WME_UAPSD_QUEUES,
+ NL80211_STA_WME_MAX_SP,
+
+ /* keep last */
+ __NL80211_STA_WME_AFTER_LAST,
+ NL80211_STA_WME_MAX = __NL80211_STA_WME_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_pmksa_candidate_attr - attributes for PMKSA caching candidates
+ * @__NL80211_PMKSA_CANDIDATE_INVALID: invalid number for nested attributes
+ * @NL80211_PMKSA_CANDIDATE_INDEX: candidate index (u32; the smaller, the higher
+ * priority)
+ * @NL80211_PMKSA_CANDIDATE_BSSID: candidate BSSID (6 octets)
+ * @NL80211_PMKSA_CANDIDATE_PREAUTH: RSN pre-authentication supported (flag)
+ * @NUM_NL80211_PMKSA_CANDIDATE: number of PMKSA caching candidate attributes
+ * (internal)
+ * @MAX_NL80211_PMKSA_CANDIDATE: highest PMKSA caching candidate attribute
+ * (internal)
+ */
+enum nl80211_pmksa_candidate_attr {
+ __NL80211_PMKSA_CANDIDATE_INVALID,
+ NL80211_PMKSA_CANDIDATE_INDEX,
+ NL80211_PMKSA_CANDIDATE_BSSID,
+ NL80211_PMKSA_CANDIDATE_PREAUTH,
+
+ /* keep last */
+ NUM_NL80211_PMKSA_CANDIDATE,
+ MAX_NL80211_PMKSA_CANDIDATE = NUM_NL80211_PMKSA_CANDIDATE - 1
+};
+
+/**
+ * enum nl80211_tdls_operation - values for %NL80211_ATTR_TDLS_OPERATION
+ * @NL80211_TDLS_DISCOVERY_REQ: Send a TDLS discovery request
+ * @NL80211_TDLS_SETUP: Setup TDLS link
+ * @NL80211_TDLS_TEARDOWN: Teardown a TDLS link which is already established
+ * @NL80211_TDLS_ENABLE_LINK: Enable TDLS link
+ * @NL80211_TDLS_DISABLE_LINK: Disable TDLS link
+ */
+enum nl80211_tdls_operation {
+ NL80211_TDLS_DISCOVERY_REQ,
+ NL80211_TDLS_SETUP,
+ NL80211_TDLS_TEARDOWN,
+ NL80211_TDLS_ENABLE_LINK,
+ NL80211_TDLS_DISABLE_LINK,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 7020e9736fc..87a9208f8ae 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -16,9 +16,14 @@
#include <linux/err.h>
#include <linux/cpufreq.h>
+#include <linux/notifier.h>
struct opp;
+enum opp_event {
+ OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
#if defined(CONFIG_PM_OPP)
unsigned long opp_get_voltage(struct opp *opp);
@@ -40,6 +45,8 @@ int opp_enable(struct device *dev, unsigned long freq);
int opp_disable(struct device *dev, unsigned long freq);
+struct srcu_notifier_head *opp_get_notifier(struct device *dev);
+
#else
static inline unsigned long opp_get_voltage(struct opp *opp)
{
@@ -89,6 +96,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
{
return 0;
}
+
+struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_PM */
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 49c8727eeb5..a4c562453f6 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
/** lock for read/write safety */
-extern spinlock_t oprofilefs_lock;
+extern raw_spinlock_t oprofilefs_lock;
/**
* Add the contents of a circular buffer to the event buffer.
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9fc01226055..f1b1ca1a09e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -174,6 +174,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
/* Device configuration is irrevocably lost if disabled into D3 */
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+ /* Provide indication device is assigned by a Virtual Machine Manager */
+ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
};
enum pci_irq_reroute_variant {
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 5edc9014263..b9df9ed1adc 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -16,7 +16,7 @@
#ifdef CONFIG_SMP
struct percpu_counter {
- spinlock_t lock;
+ raw_spinlock_t lock;
s64 count;
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index 6fb13841db4..f53a4167c5f 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -24,6 +24,7 @@
#define LINUX_PHONET_H
#include <linux/types.h>
+#include <linux/socket.h>
/* Automatic protocol selection */
#define PN_PROTO_TRANSPORT 0
@@ -96,11 +97,11 @@ struct phonetmsg {
/* Phonet socket address structure */
struct sockaddr_pn {
- sa_family_t spn_family;
+ __kernel_sa_family_t spn_family;
__u8 spn_obj;
__u8 spn_dev;
__u8 spn_resource;
- __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
+ __u8 spn_zero[sizeof(struct sockaddr) - sizeof(__kernel_sa_family_t) - 3];
} __attribute__((packed));
/* Well known address */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 54fc4138955..79f337c4738 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -420,7 +420,7 @@ struct phy_driver {
/*
* Requests a Tx timestamp for 'skb'. The phy driver promises
- * to deliver it to the socket's error queue as soon as a
+ * to deliver it using skb_complete_tx_timestamp() as soon as a
* timestamp becomes available. One of the PTP_CLASS_ values
* is passed in 'type'.
*/
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
new file mode 100644
index 00000000000..88863531d86
--- /dev/null
+++ b/include/linux/pinctrl/machine.h
@@ -0,0 +1,107 @@
+/*
+ * Machine interface for the pinctrl subsystem.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINMUX_MACHINE_H
+#define __LINUX_PINMUX_MACHINE_H
+
+/**
+ * struct pinmux_map - boards/machines shall provide this map for devices
+ * @name: the name of this specific map entry for the particular machine.
+ * This is the second parameter passed to pinmux_get() when you want
+ * to have several mappings to the same device
+ * @ctrl_dev: the pin control device to be used by this mapping, may be NULL
+ * if you provide .ctrl_dev_name instead (this is more common)
+ * @ctrl_dev_name: the name of the device controlling this specific mapping,
+ * the name must be the same as in your struct device*, may be NULL if
+ * you provide .ctrl_dev instead
+ * @function: a function in the driver to use for this mapping, the driver
+ * will lookup the function referenced by this ID on the specified
+ * pin control device
+ * @group: sometimes a function can map to different pin groups, so this
+ * selects a certain specific pin group to activate for the function, if
+ * left as NULL, the first applicable group will be used
+ * @dev: the device using this specific mapping, may be NULL if you provide
+ * .dev_name instead (this is more common)
+ * @dev_name: the name of the device using this specific mapping, the name
+ * must be the same as in your struct device*, may be NULL if you
+ * provide .dev instead
+ * @hog_on_boot: if this is set to true, the pin control subsystem will itself
+ * hog the mappings as the pinmux device drivers are attached, so this is
+ * typically used with system maps (mux mappings without an assigned
+ * device) that you want to get hogged and enabled by default as soon as
+ * a pinmux device supporting it is registered. These maps will not be
+ * disabled and put until the system shuts down.
+ */
+struct pinmux_map {
+ const char *name;
+ struct device *ctrl_dev;
+ const char *ctrl_dev_name;
+ const char *function;
+ const char *group;
+ struct device *dev;
+ const char *dev_name;
+ const bool hog_on_boot;
+};
+
+/*
+ * Convenience macro to set a simple map from a certain pin controller and a
+ * certain function to a named device
+ */
+#define PINMUX_MAP(a, b, c, d) \
+ { .name = a, .ctrl_dev_name = b, .function = c, .dev_name = d }
+
+/*
+ * Convenience macro to map a system function onto a certain pinctrl device.
+ * System functions are not assigned to a particular device.
+ */
+#define PINMUX_MAP_SYS(a, b, c) \
+ { .name = a, .ctrl_dev_name = b, .function = c }
+
+/*
+ * Convenience macro to map a function onto the primary device pinctrl device
+ * this is especially helpful on systems that have only one pin controller
+ * or need to set up a lot of mappings on the primary controller.
+ */
+#define PINMUX_MAP_PRIMARY(a, b, c) \
+ { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
+ .dev_name = c }
+
+/*
+ * Convenience macro to map a system function onto the primary pinctrl device.
+ * System functions are not assigned to a particular device.
+ */
+#define PINMUX_MAP_PRIMARY_SYS(a, b) \
+ { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b }
+
+/*
+ * Convenience macro to map a system function onto the primary pinctrl device,
+ * to be hogged by the pinmux core until the system shuts down.
+ */
+#define PINMUX_MAP_PRIMARY_SYS_HOG(a, b) \
+ { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
+ .hog_on_boot = true }
+
+
+#ifdef CONFIG_PINMUX
+
+extern int pinmux_register_mappings(struct pinmux_map const *map,
+ unsigned num_maps);
+
+#else
+
+static inline int pinmux_register_mappings(struct pinmux_map const *map,
+ unsigned num_maps)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PINMUX */
+#endif
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
new file mode 100644
index 00000000000..3605e947fa9
--- /dev/null
+++ b/include/linux/pinctrl/pinctrl.h
@@ -0,0 +1,133 @@
+/*
+ * Interface the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCTRL_H
+#define __LINUX_PINCTRL_PINCTRL_H
+
+#ifdef CONFIG_PINCTRL
+
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+
+struct pinctrl_dev;
+struct pinmux_ops;
+struct gpio_chip;
+
+/**
+ * struct pinctrl_pin_desc - boards/machines provide information on their
+ * pins, pads or other muxable units in this struct
+ * @number: unique pin number from the global pin number space
+ * @name: a name for this pin
+ */
+struct pinctrl_pin_desc {
+ unsigned number;
+ const char *name;
+};
+
+/* Convenience macro to define a single named or anonymous pin descriptor */
+#define PINCTRL_PIN(a, b) { .number = a, .name = b }
+#define PINCTRL_PIN_ANON(a) { .number = a }
+
+/**
+ * struct pinctrl_gpio_range - each pin controller can provide subranges of
+ * the GPIO number space to be handled by the controller
+ * @node: list node for internal use
+ * @name: a name for the chip in this range
+ * @id: an ID number for the chip in this range
+ * @base: base offset of the GPIO range
+ * @npins: number of pins in the GPIO range, including the base number
+ * @gc: an optional pointer to a gpio_chip
+ */
+struct pinctrl_gpio_range {
+ struct list_head node;
+ const char *name;
+ unsigned int id;
+ unsigned int base;
+ unsigned int npins;
+ struct gpio_chip *gc;
+};
+
+/**
+ * struct pinctrl_ops - global pin control operations, to be implemented by
+ * pin controller drivers.
+ * @list_groups: list the number of selectable named groups available
+ * in this pinmux driver, the core will begin on 0 and call this
+ * repeatedly as long as it returns >= 0 to enumerate the groups
+ * @get_group_name: return the group name of the pin group
+ * @get_group_pins: return an array of pins corresponding to a certain
+ * group selector @pins, and the size of the array in @num_pins
+ * @pin_dbg_show: optional debugfs display hook that will provide per-device
+ * info for a certain pin in debugfs
+ */
+struct pinctrl_ops {
+ int (*list_groups) (struct pinctrl_dev *pctldev, unsigned selector);
+ const char *(*get_group_name) (struct pinctrl_dev *pctldev,
+ unsigned selector);
+ int (*get_group_pins) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins);
+ void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset);
+};
+
+/**
+ * struct pinctrl_desc - pin controller descriptor, register this to pin
+ * control subsystem
+ * @name: name for the pin controller
+ * @pins: an array of pin descriptors describing all the pins handled by
+ * this pin controller
+ * @npins: number of descriptors in the array, usually just ARRAY_SIZE()
+ * of the pins field above
+ * @maxpin: since pin spaces may be sparse, there can he "holes" in the
+ * pin range, this attribute gives the maximum pin number in the
+ * total range. This should not be lower than npins for example,
+ * but may be equal to npins if you have no holes in the pin range.
+ * @pctlops: pin control operation vtable, to support global concepts like
+ * grouping of pins, this is optional.
+ * @pmxops: pinmux operation vtable, if you support pinmuxing in your driver
+ * @owner: module providing the pin controller, used for refcounting
+ */
+struct pinctrl_desc {
+ const char *name;
+ struct pinctrl_pin_desc const *pins;
+ unsigned int npins;
+ unsigned int maxpin;
+ struct pinctrl_ops *pctlops;
+ struct pinmux_ops *pmxops;
+ struct module *owner;
+};
+
+/* External interface to pin controller */
+extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data);
+extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
+extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range);
+extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range);
+extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
+extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
+#else
+
+
+/* Sufficiently stupid default function when pinctrl is not in use */
+static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
+{
+ return pin >= 0;
+}
+
+#endif /* !CONFIG_PINCTRL */
+
+#endif /* __LINUX_PINCTRL_PINCTRL_H */
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
new file mode 100644
index 00000000000..3c430e797ef
--- /dev/null
+++ b/include/linux/pinctrl/pinmux.h
@@ -0,0 +1,117 @@
+/*
+ * Interface the pinmux subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINMUX_H
+#define __LINUX_PINCTRL_PINMUX_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "pinctrl.h"
+
+/* This struct is private to the core and should be regarded as a cookie */
+struct pinmux;
+
+#ifdef CONFIG_PINMUX
+
+struct pinctrl_dev;
+
+/**
+ * struct pinmux_ops - pinmux operations, to be implemented by pin controller
+ * drivers that support pinmuxing
+ * @request: called by the core to see if a certain pin can be made available
+ * available for muxing. This is called by the core to acquire the pins
+ * before selecting any actual mux setting across a function. The driver
+ * is allowed to answer "no" by returning a negative error code
+ * @free: the reverse function of the request() callback, frees a pin after
+ * being requested
+ * @list_functions: list the number of selectable named functions available
+ * in this pinmux driver, the core will begin on 0 and call this
+ * repeatedly as long as it returns >= 0 to enumerate mux settings
+ * @get_function_name: return the function name of the muxing selector,
+ * called by the core to figure out which mux setting it shall map a
+ * certain device to
+ * @get_function_groups: return an array of groups names (in turn
+ * referencing pins) connected to a certain function selector. The group
+ * name can be used with the generic @pinctrl_ops to retrieve the
+ * actual pins affected. The applicable groups will be returned in
+ * @groups and the number of groups in @num_groups
+ * @enable: enable a certain muxing function with a certain pin group. The
+ * driver does not need to figure out whether enabling this function
+ * conflicts some other use of the pins in that group, such collisions
+ * are handled by the pinmux subsystem. The @func_selector selects a
+ * certain function whereas @group_selector selects a certain set of pins
+ * to be used. On simple controllers the latter argument may be ignored
+ * @disable: disable a certain muxing selector with a certain pin group
+ * @gpio_request_enable: requests and enables GPIO on a certain pin.
+ * Implement this only if you can mux every pin individually as GPIO. The
+ * affected GPIO range is passed along with an offset into that
+ * specific GPIO range - function selectors and pin groups are orthogonal
+ * to this, the core will however make sure the pins do not collide
+ */
+struct pinmux_ops {
+ int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*list_functions) (struct pinctrl_dev *pctldev, unsigned selector);
+ const char *(*get_function_name) (struct pinctrl_dev *pctldev,
+ unsigned selector);
+ int (*get_function_groups) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups);
+ int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector);
+ void (*disable) (struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector);
+ int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset);
+};
+
+/* External interface to pinmux */
+extern int pinmux_request_gpio(unsigned gpio);
+extern void pinmux_free_gpio(unsigned gpio);
+extern struct pinmux * __must_check pinmux_get(struct device *dev, const char *name);
+extern void pinmux_put(struct pinmux *pmx);
+extern int pinmux_enable(struct pinmux *pmx);
+extern void pinmux_disable(struct pinmux *pmx);
+
+#else /* !CONFIG_PINMUX */
+
+static inline int pinmux_request_gpio(unsigned gpio)
+{
+ return 0;
+}
+
+static inline void pinmux_free_gpio(unsigned gpio)
+{
+}
+
+static inline struct pinmux * __must_check pinmux_get(struct device *dev, const char *name)
+{
+ return NULL;
+}
+
+static inline void pinmux_put(struct pinmux *pmx)
+{
+}
+
+static inline int pinmux_enable(struct pinmux *pmx)
+{
+ return 0;
+}
+
+static inline void pinmux_disable(struct pinmux *pmx)
+{
+}
+
+#endif /* CONFIG_PINMUX */
+
+#endif /* __LINUX_PINCTRL_PINMUX_H */
diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h
new file mode 100644
index 00000000000..ada401244e0
--- /dev/null
+++ b/include/linux/platform_data/dwc3-omap.h
@@ -0,0 +1,47 @@
+/**
+ * dwc3-omap.h - OMAP Specific Glue layer, header.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ * All rights reserved.
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+enum dwc3_omap_utmi_mode {
+ DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
+ DWC3_OMAP_UTMI_MODE_HW,
+ DWC3_OMAP_UTMI_MODE_SW,
+};
+
+struct dwc3_omap_data {
+ enum dwc3_omap_utmi_mode utmi_mode;
+};
diff --git a/include/linux/platform_data/exynos4_tmu.h b/include/linux/platform_data/exynos4_tmu.h
new file mode 100644
index 00000000000..39e038cca59
--- /dev/null
+++ b/include/linux/platform_data/exynos4_tmu.h
@@ -0,0 +1,83 @@
+/*
+ * exynos4_tmu.h - Samsung EXYNOS4 TMU (Thermal Management Unit)
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_EXYNOS4_TMU_H
+#define _LINUX_EXYNOS4_TMU_H
+
+enum calibration_type {
+ TYPE_ONE_POINT_TRIMMING,
+ TYPE_TWO_POINT_TRIMMING,
+ TYPE_NONE,
+};
+
+/**
+ * struct exynos4_tmu_platform_data
+ * @threshold: basic temperature for generating interrupt
+ * 25 <= threshold <= 125 [unit: degree Celsius]
+ * @trigger_levels: array for each interrupt levels
+ * [unit: degree Celsius]
+ * 0: temperature for trigger_level0 interrupt
+ * condition for trigger_level0 interrupt:
+ * current temperature > threshold + trigger_levels[0]
+ * 1: temperature for trigger_level1 interrupt
+ * condition for trigger_level1 interrupt:
+ * current temperature > threshold + trigger_levels[1]
+ * 2: temperature for trigger_level2 interrupt
+ * condition for trigger_level2 interrupt:
+ * current temperature > threshold + trigger_levels[2]
+ * 3: temperature for trigger_level3 interrupt
+ * condition for trigger_level3 interrupt:
+ * current temperature > threshold + trigger_levels[3]
+ * @trigger_level0_en:
+ * 1 = enable trigger_level0 interrupt,
+ * 0 = disable trigger_level0 interrupt
+ * @trigger_level1_en:
+ * 1 = enable trigger_level1 interrupt,
+ * 0 = disable trigger_level1 interrupt
+ * @trigger_level2_en:
+ * 1 = enable trigger_level2 interrupt,
+ * 0 = disable trigger_level2 interrupt
+ * @trigger_level3_en:
+ * 1 = enable trigger_level3 interrupt,
+ * 0 = disable trigger_level3 interrupt
+ * @gain: gain of amplifier in the positive-TC generator block
+ * 0 <= gain <= 15
+ * @reference_voltage: reference voltage of amplifier
+ * in the positive-TC generator block
+ * 0 <= reference_voltage <= 31
+ * @cal_type: calibration type for temperature
+ *
+ * This structure is required for configuration of exynos4_tmu driver.
+ */
+struct exynos4_tmu_platform_data {
+ u8 threshold;
+ u8 trigger_levels[4];
+ bool trigger_level0_en;
+ bool trigger_level1_en;
+ bool trigger_level2_en;
+ bool trigger_level3_en;
+
+ u8 gain;
+ u8 reference_voltage;
+
+ enum calibration_type cal_type;
+};
+#endif /* _LINUX_EXYNOS4_TMU_H */
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
new file mode 100644
index 00000000000..e9d9149ddf3
--- /dev/null
+++ b/include/linux/platform_data/mv_usb.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_PLATFORM_USB_H
+#define __MV_PLATFORM_USB_H
+
+enum pxa_ehci_type {
+ EHCI_UNDEFINED = 0,
+ PXA_U2OEHCI, /* pxa 168, 9xx */
+ PXA_SPH, /* pxa 168, 9xx SPH */
+ MMP3_HSIC, /* mmp3 hsic */
+ MMP3_FSIC, /* mmp3 fsic */
+};
+
+enum {
+ MV_USB_MODE_OTG,
+ MV_USB_MODE_HOST,
+};
+
+enum {
+ VBUS_LOW = 0,
+ VBUS_HIGH = 1 << 0,
+};
+
+struct mv_usb_addon_irq {
+ unsigned int irq;
+ int (*poll)(void);
+};
+
+struct mv_usb_platform_data {
+ unsigned int clknum;
+ char **clkname;
+ struct mv_usb_addon_irq *id; /* Only valid for OTG. ID pin change*/
+ struct mv_usb_addon_irq *vbus; /* valid for OTG/UDC. VBUS change*/
+
+ /* only valid for HCD. OTG or Host only*/
+ unsigned int mode;
+
+ int (*phy_init)(unsigned int regbase);
+ void (*phy_deinit)(unsigned int regbase);
+ int (*set_vbus)(unsigned int vbus);
+};
+
+#endif
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index abd28621527..88734e871e3 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -36,7 +36,7 @@ struct ntc_thermistor_platform_data {
* read_uV()
*
* How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc
+ * described at Documentation/hwmon/ntc_thermistor
*
* pullup/down_ohm: 0 for infinite / not-connected
*/
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 27bb05aae70..651a066686a 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -49,10 +49,54 @@ extern struct resource *platform_get_resource_byname(struct platform_device *, u
extern int platform_get_irq_byname(struct platform_device *, const char *);
extern int platform_add_devices(struct platform_device **, int);
-extern struct platform_device *platform_device_register_resndata(
+struct platform_device_info {
+ struct device *parent;
+
+ const char *name;
+ int id;
+
+ const struct resource *res;
+ unsigned int num_res;
+
+ const void *data;
+ size_t size_data;
+ u64 dma_mask;
+};
+extern struct platform_device *platform_device_register_full(
+ struct platform_device_info *pdevinfo);
+
+/**
+ * platform_device_register_resndata - add a platform-level device with
+ * resources and platform-specific data
+ *
+ * @parent: parent device for the device we're adding
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_resndata(
struct device *parent, const char *name, int id,
const struct resource *res, unsigned int num,
- const void *data, size_t size);
+ const void *data, size_t size) {
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent,
+ .name = name,
+ .id = id,
+ .res = res,
+ .num_res = num,
+ .data = data,
+ .size_data = size,
+ .dma_mask = 0,
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
/**
* platform_device_register_simple - add a platform-level device and its resources
diff --git a/include/linux/pm.h b/include/linux/pm.h
index f7c84c9abd3..f15acb64681 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -326,6 +326,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
* requested by a driver.
*/
+#define PM_EVENT_INVALID (-1)
#define PM_EVENT_ON 0x0000
#define PM_EVENT_FREEZE 0x0001
#define PM_EVENT_SUSPEND 0x0002
@@ -346,6 +347,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
+#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, })
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
@@ -366,6 +368,8 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
#define PMSG_AUTO_RESUME ((struct pm_message) \
{ .event = PM_EVENT_AUTO_RESUME, })
+#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
+
/**
* Device run-time power management status.
*
@@ -421,6 +425,22 @@ enum rpm_request {
struct wakeup_source;
+struct pm_domain_data {
+ struct list_head list_node;
+ struct device *dev;
+};
+
+struct pm_subsys_data {
+ spinlock_t lock;
+ unsigned int refcount;
+#ifdef CONFIG_PM_CLK
+ struct list_head clock_list;
+#endif
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+ struct pm_domain_data *domain_data;
+#endif
+};
+
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
@@ -432,6 +452,7 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+ bool wakeup_path:1;
#else
unsigned int should_wakeup:1;
#endif
@@ -462,10 +483,13 @@ struct dev_pm_info {
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
#endif
- void *subsys_data; /* Owned by the subsystem. */
+ struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
+ struct pm_qos_constraints *constraints;
};
extern void update_pm_runtime_accounting(struct device *dev);
+extern int dev_pm_get_subsys_data(struct device *dev);
+extern int dev_pm_put_subsys_data(struct device *dev);
/*
* Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
new file mode 100644
index 00000000000..8348866e7b0
--- /dev/null
+++ b/include/linux/pm_clock.h
@@ -0,0 +1,71 @@
+/*
+ * pm_clock.h - Definitions and headers related to device clocks.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_CLOCK_H
+#define _LINUX_PM_CLOCK_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+struct pm_clk_notifier_block {
+ struct notifier_block nb;
+ struct dev_pm_domain *pm_domain;
+ char *con_ids[];
+};
+
+#ifdef CONFIG_PM_CLK
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+ return dev && dev->power.subsys_data
+ && list_empty(&dev->power.subsys_data->clock_list);
+}
+
+extern void pm_clk_init(struct device *dev);
+extern int pm_clk_create(struct device *dev);
+extern void pm_clk_destroy(struct device *dev);
+extern int pm_clk_add(struct device *dev, const char *con_id);
+extern void pm_clk_remove(struct device *dev, const char *con_id);
+extern int pm_clk_suspend(struct device *dev);
+extern int pm_clk_resume(struct device *dev);
+#else
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+ return true;
+}
+static inline void pm_clk_init(struct device *dev)
+{
+}
+static inline int pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
+static inline void pm_clk_destroy(struct device *dev)
+{
+}
+static inline int pm_clk_add(struct device *dev, const char *con_id)
+{
+ return -EINVAL;
+}
+static inline void pm_clk_remove(struct device *dev, const char *con_id)
+{
+}
+#define pm_clk_suspend NULL
+#define pm_clk_resume NULL
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+extern void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb);
+#else
+static inline void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f9ec1736a11..65633e5a2bc 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -13,6 +13,7 @@
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
+ GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
GPD_STATE_BUSY, /* Something is happening to the PM domain */
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */
@@ -25,15 +26,14 @@ struct dev_power_governor {
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head sd_node; /* Node in the parent's subdomain list */
- struct generic_pm_domain *parent; /* Parent PM domain */
- struct list_head sd_list; /* List of dubdomains */
+ struct list_head master_links; /* Links with PM domain as a master */
+ struct list_head slave_links; /* Links with PM domain as a slave */
struct list_head dev_list; /* List of devices */
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
unsigned int in_progress; /* Number of devices being suspended now */
- unsigned int sd_count; /* Number of subdomains with power "on" */
+ atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
wait_queue_head_t status_wait_queue;
struct task_struct *poweroff_task; /* Powering off task */
@@ -42,6 +42,7 @@ struct generic_pm_domain {
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
+ bool dev_irq_safe; /* Device callbacks are IRQ-safe */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
int (*start_device)(struct device *dev);
@@ -54,12 +55,23 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
return container_of(pd, struct generic_pm_domain, domain);
}
-struct dev_list_entry {
- struct list_head node;
- struct device *dev;
+struct gpd_link {
+ struct generic_pm_domain *master;
+ struct list_head master_node;
+ struct generic_pm_domain *slave;
+ struct list_head slave_node;
+};
+
+struct generic_pm_domain_data {
+ struct pm_domain_data base;
bool need_restore;
};
+static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
+{
+ return container_of(pdd, struct generic_pm_domain_data, base);
+}
+
#ifdef CONFIG_PM_GENERIC_DOMAINS
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
new file mode 100644
index 00000000000..83b0ea302a8
--- /dev/null
+++ b/include/linux/pm_qos.h
@@ -0,0 +1,155 @@
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+/* interface for the pm_qos_power infrastructure of the linux kernel.
+ *
+ * Mark Gross <mgross@linux.intel.com>
+ */
+#include <linux/plist.h>
+#include <linux/notifier.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+
+#define PM_QOS_RESERVED 0
+#define PM_QOS_CPU_DMA_LATENCY 1
+#define PM_QOS_NETWORK_LATENCY 2
+#define PM_QOS_NETWORK_THROUGHPUT 3
+
+#define PM_QOS_NUM_CLASSES 4
+#define PM_QOS_DEFAULT_VALUE -1
+
+#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
+
+struct pm_qos_request {
+ struct plist_node node;
+ int pm_qos_class;
+};
+
+struct dev_pm_qos_request {
+ struct plist_node node;
+ struct device *dev;
+};
+
+enum pm_qos_type {
+ PM_QOS_UNITIALIZED,
+ PM_QOS_MAX, /* return the largest value */
+ PM_QOS_MIN /* return the smallest value */
+};
+
+/*
+ * Note: The lockless read path depends on the CPU accessing
+ * target_value atomically. Atomic access is only guaranteed on all CPU
+ * types linux supports for 32 bit quantites
+ */
+struct pm_qos_constraints {
+ struct plist_head list;
+ s32 target_value; /* Do not change to 64 bit */
+ s32 default_value;
+ enum pm_qos_type type;
+ struct blocking_notifier_head *notifiers;
+};
+
+/* Action requested to pm_qos_update_target */
+enum pm_qos_req_action {
+ PM_QOS_ADD_REQ, /* Add a new request */
+ PM_QOS_UPDATE_REQ, /* Update an existing request */
+ PM_QOS_REMOVE_REQ /* Remove an existing request */
+};
+
+static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
+{
+ return req->dev != 0;
+}
+
+#ifdef CONFIG_PM
+int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+ enum pm_qos_req_action action, int value);
+void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+ s32 value);
+void pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value);
+void pm_qos_remove_request(struct pm_qos_request *req);
+
+int pm_qos_request(int pm_qos_class);
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_request_active(struct pm_qos_request *req);
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+s32 dev_pm_qos_read_value(struct device *dev);
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ s32 value);
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
+int dev_pm_qos_add_notifier(struct device *dev,
+ struct notifier_block *notifier);
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier);
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
+void dev_pm_qos_constraints_init(struct device *dev);
+void dev_pm_qos_constraints_destroy(struct device *dev);
+#else
+static inline int pm_qos_update_target(struct pm_qos_constraints *c,
+ struct plist_node *node,
+ enum pm_qos_req_action action,
+ int value)
+ { return 0; }
+static inline void pm_qos_add_request(struct pm_qos_request *req,
+ int pm_qos_class, s32 value)
+ { return; }
+static inline void pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+ { return; }
+static inline void pm_qos_remove_request(struct pm_qos_request *req)
+ { return; }
+
+static inline int pm_qos_request(int pm_qos_class)
+ { return 0; }
+static inline int pm_qos_add_notifier(int pm_qos_class,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int pm_qos_remove_notifier(int pm_qos_class,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int pm_qos_request_active(struct pm_qos_request *req)
+ { return 0; }
+static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
+ { return 0; }
+
+static inline s32 dev_pm_qos_read_value(struct device *dev)
+ { return 0; }
+static inline int dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ s32 value)
+ { return 0; }
+static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+ { return 0; }
+static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+ { return 0; }
+static inline int dev_pm_qos_add_notifier(struct device *dev,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_add_global_notifier(
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_remove_global_notifier(
+ struct notifier_block *notifier)
+ { return 0; }
+static inline void dev_pm_qos_constraints_init(struct device *dev)
+{
+ dev->power.power_state = PMSG_ON;
+}
+static inline void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ dev->power.power_state = PMSG_INVALID;
+}
+#endif
+
+#endif
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
deleted file mode 100644
index a7d87f911ca..00000000000
--- a/include/linux/pm_qos_params.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _LINUX_PM_QOS_PARAMS_H
-#define _LINUX_PM_QOS_PARAMS_H
-/* interface for the pm_qos_power infrastructure of the linux kernel.
- *
- * Mark Gross <mgross@linux.intel.com>
- */
-#include <linux/plist.h>
-#include <linux/notifier.h>
-#include <linux/miscdevice.h>
-
-#define PM_QOS_RESERVED 0
-#define PM_QOS_CPU_DMA_LATENCY 1
-#define PM_QOS_NETWORK_LATENCY 2
-#define PM_QOS_NETWORK_THROUGHPUT 3
-
-#define PM_QOS_NUM_CLASSES 4
-#define PM_QOS_DEFAULT_VALUE -1
-
-#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
-#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
-#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
-
-struct pm_qos_request_list {
- struct plist_node list;
- int pm_qos_class;
-};
-
-void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value);
-void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
- s32 new_value);
-void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
-
-int pm_qos_request(int pm_qos_class);
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_request_active(struct pm_qos_request_list *req);
-
-#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index daac05d751b..70b284024d9 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -251,46 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
__pm_runtime_use_autosuspend(dev, false);
}
-struct pm_clk_notifier_block {
- struct notifier_block nb;
- struct dev_pm_domain *pm_domain;
- char *con_ids[];
-};
-
-#ifdef CONFIG_PM_CLK
-extern int pm_clk_init(struct device *dev);
-extern void pm_clk_destroy(struct device *dev);
-extern int pm_clk_add(struct device *dev, const char *con_id);
-extern void pm_clk_remove(struct device *dev, const char *con_id);
-extern int pm_clk_suspend(struct device *dev);
-extern int pm_clk_resume(struct device *dev);
-#else
-static inline int pm_clk_init(struct device *dev)
-{
- return -EINVAL;
-}
-static inline void pm_clk_destroy(struct device *dev)
-{
-}
-static inline int pm_clk_add(struct device *dev, const char *con_id)
-{
- return -EINVAL;
-}
-static inline void pm_clk_remove(struct device *dev, const char *con_id)
-{
-}
-#define pm_clk_suspend NULL
-#define pm_clk_resume NULL
-#endif
-
-#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
- struct pm_clk_notifier_block *clknb);
-#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
- struct pm_clk_notifier_block *clknb)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index cf793bbbd05..ef35bb73f69 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -58,7 +58,7 @@ struct prop_local_percpu {
*/
int shift;
unsigned long period;
- spinlock_t lock; /* protect the snapshot state */
+ raw_spinlock_t lock; /* protect the snapshot state */
};
int prop_local_init_percpu(struct prop_local_percpu *pl);
@@ -106,11 +106,11 @@ struct prop_local_single {
*/
unsigned long period;
int shift;
- spinlock_t lock; /* protect the snapshot state */
+ raw_spinlock_t lock; /* protect the snapshot state */
};
#define INIT_PROP_LOCAL_SINGLE(name) \
-{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
int prop_local_init_single(struct prop_local_single *pl);
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 2f007157fab..e11ccb4cf48 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -8,7 +8,7 @@
#define DEFAULT_RATELIMIT_BURST 10
struct ratelimit_state {
- spinlock_t lock; /* protect the state */
+ raw_spinlock_t lock; /* protect the state */
int interval;
int burst;
@@ -20,7 +20,7 @@ struct ratelimit_state {
#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
\
struct ratelimit_state name = { \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.interval = interval_init, \
.burst = burst_init, \
}
@@ -28,7 +28,7 @@ struct ratelimit_state {
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
- spin_lock_init(&rs->lock);
+ raw_spin_lock_init(&rs->lock);
rs->interval = interval;
rs->burst = burst;
rs->printed = 0;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 8f4f881a0ad..2cf4226ade7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -33,6 +33,7 @@
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
+#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
@@ -64,32 +65,74 @@ static inline void rcutorture_record_progress(unsigned long vernum)
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+/* Exported common interfaces */
+
+#ifdef CONFIG_PREEMPT_RCU
+
/**
- * struct rcu_head - callback structure for use with RCU
- * @next: next update requests in a list
- * @func: actual update function to call after the grace period.
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed. However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
*/
-struct rcu_head {
- struct rcu_head *next;
- void (*func)(struct rcu_head *head);
-};
+extern void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
-/* Exported common interfaces */
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
+#define call_rcu call_rcu_sched
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by :
+ * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
+ * OR
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ * These may be nested.
+ */
+extern void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+
+/**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_sched() assumes
+ * that the read-side critical sections end on enabling of preemption
+ * or on voluntary preemption.
+ * RCU read-side critical sections are delimited by :
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
+ * OR
+ * anything that disables preemption.
+ * These may be nested.
+ */
extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
-extern void synchronize_sched(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
-
-static inline void __rcu_read_lock_bh(void)
-{
- local_bh_disable();
-}
-static inline void __rcu_read_unlock_bh(void)
-{
- local_bh_enable();
-}
+extern void synchronize_sched(void);
#ifdef CONFIG_PREEMPT_RCU
@@ -152,6 +195,15 @@ static inline void rcu_exit_nohz(void)
#endif /* #else #ifdef CONFIG_NO_HZ */
+/*
+ * Infrastructure to implement the synchronize_() primitives in
+ * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
+ */
+
+typedef void call_rcu_func_t(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+void wait_rcu_gp(call_rcu_func_t crf);
+
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
@@ -297,19 +349,31 @@ extern int rcu_my_thread_group_empty(void);
/**
* rcu_lockdep_assert - emit lockdep splat if specified condition not met
* @c: condition to check
+ * @s: informative message
*/
-#define rcu_lockdep_assert(c) \
+#define rcu_lockdep_assert(c, s) \
do { \
static bool __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
__warned = true; \
- lockdep_rcu_dereference(__FILE__, __LINE__); \
+ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
+#define rcu_sleep_check() \
+ do { \
+ rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh" \
+ " read-side critical section"); \
+ rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched"\
+ " read-side critical section"); \
+ } while (0)
+
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c) do { } while (0)
+#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -338,14 +402,16 @@ extern int rcu_my_thread_group_empty(void);
#define __rcu_dereference_check(p, c, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
- rcu_lockdep_assert(c); \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
+ " usage"); \
rcu_dereference_sparse(p, space); \
smp_read_barrier_depends(); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c); \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
+ " usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
@@ -359,15 +425,15 @@ extern int rcu_my_thread_group_empty(void);
#define __rcu_dereference_index_check(p, c) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_lockdep_assert(c); \
+ rcu_lockdep_assert(c, \
+ "suspicious rcu_dereference_index_check()" \
+ " usage"); \
smp_read_barrier_depends(); \
(_________p1); \
})
#define __rcu_assign_pointer(p, v, space) \
({ \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- smp_wmb(); \
+ smp_wmb(); \
(p) = (typeof(*v) __force space *)(v); \
})
@@ -500,26 +566,6 @@ extern int rcu_my_thread_group_empty(void);
#define rcu_dereference_protected(p, c) \
__rcu_dereference_protected((p), (c), __rcu)
-/**
- * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * This is the RCU-bh counterpart to rcu_dereference_protected().
- */
-#define rcu_dereference_bh_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
-
-/**
- * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * This is the RCU-sched counterpart to rcu_dereference_protected().
- */
-#define rcu_dereference_sched_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
-
/**
* rcu_dereference() - fetch RCU-protected pointer for dereferencing
@@ -630,7 +676,7 @@ static inline void rcu_read_unlock(void)
*/
static inline void rcu_read_lock_bh(void)
{
- __rcu_read_lock_bh();
+ local_bh_disable();
__acquire(RCU_BH);
rcu_read_acquire_bh();
}
@@ -644,7 +690,7 @@ static inline void rcu_read_unlock_bh(void)
{
rcu_read_release_bh();
__release(RCU_BH);
- __rcu_read_unlock_bh();
+ local_bh_enable();
}
/**
@@ -698,11 +744,18 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* any prior initialization. Returns the value assigned.
*
* Inserts memory barriers on architectures that require them
- * (pretty much all of them other than x86), and also prevents
- * the compiler from reordering the code that initializes the
- * structure after the pointer assignment. More importantly, this
- * call documents which pointers will be dereferenced by RCU read-side
- * code.
+ * (which is most of them), and also prevents the compiler from
+ * reordering the code that initializes the structure after the pointer
+ * assignment. More importantly, this call documents which pointers
+ * will be dereferenced by RCU read-side code.
+ *
+ * In some special cases, you may use RCU_INIT_POINTER() instead
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+ * to the fact that it does not constrain either the CPU or the compiler.
+ * That said, using RCU_INIT_POINTER() when you should have used
+ * rcu_assign_pointer() is a very bad thing that results in
+ * impossible-to-diagnose memory corruption. So please be careful.
+ * See the RCU_INIT_POINTER() comment header for details.
*/
#define rcu_assign_pointer(p, v) \
__rcu_assign_pointer((p), (v), __rcu)
@@ -710,105 +763,38 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
*
- * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep
- * splats.
+ * Initialize an RCU-protected pointer in special cases where readers
+ * do not need ordering constraints on the CPU or the compiler. These
+ * special cases are:
+ *
+ * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 2. The caller has taken whatever steps are required to prevent
+ * RCU readers from concurrently accessing this pointer -or-
+ * 3. The referenced data structure has already been exposed to
+ * readers either at compile time or via rcu_assign_pointer() -and-
+ * a. You have not made -any- reader-visible changes to
+ * this structure since then -or-
+ * b. It is OK for readers accessing this structure from its
+ * new location to see the old state of the structure. (For
+ * example, the changes were to statistical counters or to
+ * other state where exact synchronization is not required.)
+ *
+ * Failure to follow these rules governing use of RCU_INIT_POINTER() will
+ * result in impossible-to-diagnose memory corruption. As in the structures
+ * will look OK in crash dumps, but any concurrent RCU readers might
+ * see pre-initialized values of the referenced data structure. So
+ * please be very careful how you use RCU_INIT_POINTER()!!!
+ *
+ * If you are creating an RCU-protected linked structure that is accessed
+ * by a single external-to-structure RCU-protected pointer, then you may
+ * use RCU_INIT_POINTER() to initialize the internal RCU-protected
+ * pointers, but you must use rcu_assign_pointer() to initialize the
+ * external-to-structure pointer -after- you have completely initialized
+ * the reader-accessible portions of the linked structure.
*/
#define RCU_INIT_POINTER(p, v) \
p = (typeof(*v) __force __rcu *)(v)
-/* Infrastructure to implement the synchronize_() primitives. */
-
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-extern void wakeme_after_rcu(struct rcu_head *head);
-
-#ifdef CONFIG_PREEMPT_RCU
-
-/**
- * call_rcu() - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all pre-existing RCU read-side
- * critical sections have completed. However, the callback function
- * might well execute concurrently with RCU read-side critical sections
- * that started after call_rcu() was invoked. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- */
-extern void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-
-/* In classic RCU, call_rcu() is just call_rcu_sched(). */
-#define call_rcu call_rcu_sched
-
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-/**
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
- * OR
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- * These may be nested.
- */
-extern void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-/*
- * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
- * by call_rcu() and rcu callback execution, and are therefore not part of the
- * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
- */
-
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-# define STATE_RCU_HEAD_READY 0
-# define STATE_RCU_HEAD_QUEUED 1
-
-extern struct debug_obj_descr rcuhead_debug_descr;
-
-static inline void debug_rcu_head_queue(struct rcu_head *head)
-{
- WARN_ON_ONCE((unsigned long)head & 0x3);
- debug_object_activate(head, &rcuhead_debug_descr);
- debug_object_active_state(head, &rcuhead_debug_descr,
- STATE_RCU_HEAD_READY,
- STATE_RCU_HEAD_QUEUED);
-}
-
-static inline void debug_rcu_head_unqueue(struct rcu_head *head)
-{
- debug_object_active_state(head, &rcuhead_debug_descr,
- STATE_RCU_HEAD_QUEUED,
- STATE_RCU_HEAD_READY);
- debug_object_deactivate(head, &rcuhead_debug_descr);
-}
-#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-static inline void debug_rcu_head_queue(struct rcu_head *head)
-{
-}
-
-static inline void debug_rcu_head_unqueue(struct rcu_head *head)
-{
-}
-#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-
static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
{
return offset < 4096;
@@ -827,18 +813,6 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
call_rcu(head, (rcu_callback)offset);
}
-extern void kfree(const void *);
-
-static inline void __rcu_reclaim(struct rcu_head *head)
-{
- unsigned long offset = (unsigned long)head->func;
-
- if (__is_kfree_rcu_offset(offset))
- kfree((void *)head - offset);
- else
- head->func(head);
-}
-
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 52b3e0281fd..00b7a5e493d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,9 +27,23 @@
#include <linux/cache.h>
+#ifdef CONFIG_RCU_BOOST
static inline void rcu_init(void)
{
}
+#else /* #ifdef CONFIG_RCU_BOOST */
+void rcu_init(void);
+#endif /* #else #ifdef CONFIG_RCU_BOOST */
+
+static inline void rcu_barrier_bh(void)
+{
+ wait_rcu_gp(call_rcu_bh);
+}
+
+static inline void rcu_barrier_sched(void)
+{
+ wait_rcu_gp(call_rcu_sched);
+}
#ifdef CONFIG_TINY_RCU
@@ -45,9 +59,13 @@ static inline void rcu_barrier(void)
#else /* #ifdef CONFIG_TINY_RCU */
-void rcu_barrier(void);
void synchronize_rcu_expedited(void);
+static inline void rcu_barrier(void)
+{
+ wait_rcu_gp(call_rcu);
+}
+
#endif /* #else #ifdef CONFIG_TINY_RCU */
static inline void synchronize_rcu_bh(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e65d06634dd..67458468f1a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -67,6 +67,8 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 60a65cd7e1a..3daac2d8dc3 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -20,9 +20,77 @@
struct i2c_client;
struct spi_device;
+/* An enum of all the supported cache types */
+enum regcache_type {
+ REGCACHE_NONE,
+ REGCACHE_INDEXED,
+ REGCACHE_RBTREE,
+ REGCACHE_LZO
+};
+
+/**
+ * Default value for a register. We use an array of structs rather
+ * than a simple array as many modern devices have very sparse
+ * register maps.
+ *
+ * @reg: Register address.
+ * @def: Register default value.
+ */
+struct reg_default {
+ unsigned int reg;
+ unsigned int def;
+};
+
+/**
+ * Configuration for the register map of a device.
+ *
+ * @reg_bits: Number of bits in a register address, mandatory.
+ * @val_bits: Number of bits in a register value, mandatory.
+ *
+ * @writeable_reg: Optional callback returning true if the register
+ * can be written to.
+ * @readable_reg: Optional callback returning true if the register
+ * can be read from.
+ * @volatile_reg: Optional callback returning true if the register
+ * value can't be cached.
+ * @precious_reg: Optional callback returning true if the rgister
+ * should not be read outside of a call from the driver
+ * (eg, a clear on read interrupt status register).
+ *
+ * @max_register: Optional, specifies the maximum valid register index.
+ * @reg_defaults: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults: Number of elements in reg_defaults.
+ *
+ * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * a read.
+ * @write_flag_mask: Mask to be set in the top byte of the register when doing
+ * a write. If both read_flag_mask and write_flag_mask are
+ * empty the regmap_bus default masks are used.
+ *
+ * @cache_type: The actual cache type.
+ * @reg_defaults_raw: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ */
struct regmap_config {
int reg_bits;
int val_bits;
+
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ unsigned int max_register;
+ struct reg_default *reg_defaults;
+ unsigned int num_reg_defaults;
+ enum regcache_type cache_type;
+ const void *reg_defaults_raw;
+ unsigned int num_reg_defaults_raw;
+
+ u8 read_flag_mask;
+ u8 write_flag_mask;
};
typedef int (*regmap_hw_write)(struct device *dev, const void *data,
@@ -37,25 +105,18 @@ typedef int (*regmap_hw_read)(struct device *dev,
/**
* Description of a hardware bus for the register map infrastructure.
*
- * @list: Internal use.
- * @type: Bus type, used to identify bus to be used for a device.
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
* @read: Read operation. Data is returned in the buffer used to transmit
* data.
- * @owner: Module with the bus implementation, used to pin the implementation
- * in memory.
* @read_flag_mask: Mask to be set in the top byte of the register when doing
* a read.
*/
struct regmap_bus {
- struct list_head list;
- struct bus_type *type;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_read read;
- struct module *owner;
u8 read_flag_mask;
};
@@ -79,4 +140,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regcache_sync(struct regmap *map);
+void regcache_cache_only(struct regmap *map, bool enable);
+void regcache_cache_bypass(struct regmap *map, bool enable);
+
#endif
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
index a175d059803..4d09f6eab35 100644
--- a/include/linux/rfkill-gpio.h
+++ b/include/linux/rfkill-gpio.h
@@ -30,6 +30,8 @@
* @reset_gpio: GPIO which is used for reseting rfkill switch
* @shutdown_gpio: GPIO which is used for shutdown of rfkill switch
* @power_clk_name: [optional] name of clk to turn off while blocked
+ * @gpio_runtime_close: clean up platform specific gpio configuration
+ * @gpio_runtime_setup: set up platform specific gpio configuration
*/
struct rfkill_gpio_platform_data {
@@ -38,6 +40,8 @@ struct rfkill_gpio_platform_data {
int shutdown_gpio;
const char *power_clk_name;
enum rfkill_type type;
+ void (*gpio_runtime_close)(struct platform_device *);
+ int (*gpio_runtime_setup)(struct platform_device *);
};
#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rose.h b/include/linux/rose.h
index c7b4b184c82..1fcfe95893b 100644
--- a/include/linux/rose.h
+++ b/include/linux/rose.h
@@ -7,6 +7,9 @@
#ifndef ROSE_KERNEL_H
#define ROSE_KERNEL_H
+#include <linux/socket.h>
+#include <linux/ax25.h>
+
#define ROSE_MTU 251
#define ROSE_MAX_DIGIS 6
@@ -44,7 +47,7 @@ typedef struct {
} rose_address;
struct sockaddr_rose {
- sa_family_t srose_family;
+ __kernel_sa_family_t srose_family;
rose_address srose_addr;
ax25_address srose_call;
int srose_ndigis;
@@ -52,7 +55,7 @@ struct sockaddr_rose {
};
struct full_sockaddr_rose {
- sa_family_t srose_family;
+ __kernel_sa_family_t srose_family;
rose_address srose_addr;
ax25_address srose_call;
unsigned int srose_ndigis;
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 34701241b67..d5b13bc07a0 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -22,7 +22,7 @@
*/
struct rw_semaphore {
__s32 activity;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 6a6741440cb..63d40655439 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -25,7 +25,7 @@ struct rw_semaphore;
/* All arch specific implementations share the same struct */
struct rw_semaphore {
long count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -56,9 +56,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, \
+ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 41d0237fd44..ede8a6585e3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -270,7 +270,6 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(int cpu);
-extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
extern int get_nohz_timer_target(void);
@@ -510,7 +509,7 @@ struct task_cputime {
struct thread_group_cputimer {
struct task_cputime cputime;
int running;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
#include <linux/rwsem.h>
@@ -1260,9 +1259,6 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
-#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
- int rcu_boosted;
-#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -2165,7 +2161,8 @@ extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
-extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
+extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
+ const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
@@ -2565,7 +2562,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- spin_lock_init(&sig->cputimer.lock);
+ raw_spin_lock_init(&sig->cputimer.lock);
}
/*
diff --git a/include/linux/security.h b/include/linux/security.h
index ebd2a53a3d0..19d8e04e168 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -36,6 +36,7 @@
#include <linux/key.h>
#include <linux/xfrm.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <net/flow.h>
/* Maximum number of letters for an LSM name string */
@@ -147,6 +148,10 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+/* security_inode_init_security callback function to write xattrs */
+typedef int (*initxattrs) (struct inode *inode,
+ const struct xattr *xattr_array, void *fs_data);
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -1367,7 +1372,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode_getsecctx:
* Returns a string containing all relavent security context information
*
- * @inode we wish to set the security context of.
+ * @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
* This is the main security structure.
@@ -1655,6 +1660,8 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
+extern void __init security_fixup_ops(struct security_operations *ops);
+
/* Security operations */
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
@@ -1704,8 +1711,11 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len);
+ const struct qstr *qstr,
+ initxattrs initxattrs, void *fs_data);
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
@@ -2034,11 +2044,19 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- char **name,
- void **value,
- size_t *len)
+ initxattrs initxattrs,
+ void *fs_data)
{
- return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int security_old_inode_init_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr,
+ char **name, void **value,
+ size_t *len)
+{
+ return 0;
}
static inline int security_inode_create(struct inode *dir,
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 39fa04966aa..dc368b8ce21 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -14,14 +14,14 @@
/* Please don't access any members of this structure directly */
struct semaphore {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
diff --git a/include/linux/serial.h b/include/linux/serial.h
index ef914061511..97ff8e27a6c 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -211,6 +211,7 @@ struct serial_rs485 {
#define SER_RS485_RTS_ON_SEND (1 << 1)
#define SER_RS485_RTS_AFTER_SEND (1 << 2)
#define SER_RS485_RTS_BEFORE_SEND (1 << 3)
+#define SER_RS485_RX_DURING_TX (1 << 4)
__u32 delay_rts_before_send; /* Milliseconds */
__u32 delay_rts_after_send; /* Milliseconds */
__u32 padding[5]; /* Memory is cheap, new structs
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 97f5b45bbc0..1f05bbeac01 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -35,6 +35,7 @@ struct plat_serial8250_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned old);
};
@@ -80,6 +81,7 @@ extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a5c31146a33..eadf33d0abb 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -46,7 +46,8 @@
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
#define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
-#define PORT_MAX_8250 20 /* max port ID */
+#define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+#define PORT_MAX_8250 21 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -300,6 +301,7 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int old);
unsigned int irq; /* irq number */
@@ -317,9 +319,7 @@ struct uart_port {
#define UPIO_MEM32 (3)
#define UPIO_AU (4) /* Au1x00 type IO */
#define UPIO_TSI (5) /* Tsi108/109 type IO */
-#define UPIO_DWAPB (6) /* DesignWare APB UART */
-#define UPIO_RM9000 (7) /* RM9000 type IO */
-#define UPIO_DWAPB32 (8) /* DesignWare APB UART (32 bit accesses) */
+#define UPIO_RM9000 (6) /* RM9000 type IO */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
@@ -350,6 +350,7 @@ struct uart_port {
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16))
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
+#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
/* The exact UART type is known and should not be probed. */
#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
@@ -384,7 +385,6 @@ struct uart_state {
int pm_state;
struct circ_buf xmit;
- struct tasklet_struct tlet;
struct uart_port *uart_port;
};
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index c75bda37c18..8ce70d76f83 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -152,6 +152,7 @@
* LCR=0xBF (or DLAB=1 for 16C660)
*/
#define UART_EFR 2 /* I/O: Extended Features Register */
+#define UART_XR_EFR 9 /* I/O: Extended Features Register (XR17D15x) */
#define UART_EFR_CTS 0x80 /* CTS flow control */
#define UART_EFR_RTS 0x40 /* RTS flow control */
#define UART_EFR_SCD 0x20 /* Special character detect */
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
new file mode 100644
index 00000000000..2076acf8294
--- /dev/null
+++ b/include/linux/sh_eth.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_SH_ETH_H__
+#define __ASM_SH_ETH_H__
+
+#include <linux/phy.h>
+
+enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
+enum {
+ SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_SH4,
+ SH_ETH_REG_FAST_SH3_SH2
+};
+
+struct sh_eth_plat_data {
+ int phy;
+ int edmac_endian;
+ int register_type;
+ phy_interface_t phy_interface;
+ void (*set_mdio_gate)(void *addr);
+
+ unsigned char mac_addr[6];
+ unsigned no_ether_link:1;
+ unsigned ether_link_active_low:1;
+};
+
+#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 8bd383caa36..6a6b352326d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -29,6 +29,7 @@
#include <linux/rcupdate.h>
#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
+#include <linux/dma-mapping.h>
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
@@ -45,6 +46,11 @@
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
+/* return minimum truesize of one skb containing X bytes of data */
+#define SKB_TRUESIZE(X) ((X) + \
+ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
/* A. Checksumming of received packets by device.
*
* NONE: device failed to checksum this packet.
@@ -134,7 +140,9 @@ struct sk_buff;
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct {
- struct page *page;
+ struct {
+ struct page *p;
+ } page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
__u32 size;
@@ -144,6 +152,26 @@ struct skb_frag_struct {
#endif
};
+static inline unsigned int skb_frag_size(const skb_frag_t *frag)
+{
+ return frag->size;
+}
+
+static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
+{
+ frag->size = size;
+}
+
+static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
+{
+ frag->size += delta;
+}
+
+static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+ frag->size -= delta;
+}
+
#define HAVE_HW_TIME_STAMP
/**
@@ -322,6 +350,8 @@ typedef unsigned char *sk_buff_data_t;
* @queue_mapping: Queue mapping for multiqueue devices
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
+ * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
+ * ports.
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
@@ -414,6 +444,7 @@ struct sk_buff {
__u8 ndisc_nodetype:2;
#endif
__u8 ooo_okay:1;
+ __u8 l4_rxhash:1;
kmemcheck_bitfield_end(flags2);
/* 0/13 bit hole */
@@ -521,6 +552,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
}
+extern void skb_recycle(struct sk_buff *skb);
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
@@ -573,11 +605,11 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state);
-extern __u32 __skb_get_rxhash(struct sk_buff *skb);
+extern void __skb_get_rxhash(struct sk_buff *skb);
static inline __u32 skb_get_rxhash(struct sk_buff *skb)
{
if (!skb->rxhash)
- skb->rxhash = __skb_get_rxhash(skb);
+ __skb_get_rxhash(skb);
return skb->rxhash;
}
@@ -823,9 +855,9 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
{
- struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ struct sk_buff *list = ((const struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
@@ -844,9 +876,9 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+ struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
@@ -1123,18 +1155,51 @@ static inline int skb_pagelen(const struct sk_buff *skb)
int i, len = 0;
for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
- len += skb_shinfo(skb)->frags[i].size;
+ len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
return len + skb_headlen(skb);
}
-static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+/**
+ * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Initialises the @i'th fragment of @skb to point to &size bytes at
+ * offset @off within @page.
+ *
+ * Does not take any additional reference on the fragment.
+ */
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
+ frag->page.p = page;
frag->page_offset = off;
- frag->size = size;
+ skb_frag_size_set(frag, size);
+}
+
+/**
+ * skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
+ * @skb to point to &size bytes at offset @off within @page. In
+ * addition updates @skb such that @i is the last fragment.
+ *
+ * Does not take any additional reference on the fragment.
+ */
+static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ __skb_fill_page_desc(skb, i, page, off, size);
skb_shinfo(skb)->nr_frags = i + 1;
}
@@ -1629,6 +1694,137 @@ static inline void netdev_free_page(struct net_device *dev, struct page *page)
}
/**
+ * skb_frag_page - retrieve the page refered to by a paged fragment
+ * @frag: the paged fragment
+ *
+ * Returns the &struct page associated with @frag.
+ */
+static inline struct page *skb_frag_page(const skb_frag_t *frag)
+{
+ return frag->page.p;
+}
+
+/**
+ * __skb_frag_ref - take an addition reference on a paged fragment.
+ * @frag: the paged fragment
+ *
+ * Takes an additional reference on the paged fragment @frag.
+ */
+static inline void __skb_frag_ref(skb_frag_t *frag)
+{
+ get_page(skb_frag_page(frag));
+}
+
+/**
+ * skb_frag_ref - take an addition reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset.
+ *
+ * Takes an additional reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_ref(struct sk_buff *skb, int f)
+{
+ __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+}
+
+/**
+ * __skb_frag_unref - release a reference on a paged fragment.
+ * @frag: the paged fragment
+ *
+ * Releases a reference on the paged fragment @frag.
+ */
+static inline void __skb_frag_unref(skb_frag_t *frag)
+{
+ put_page(skb_frag_page(frag));
+}
+
+/**
+ * skb_frag_unref - release a reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset
+ *
+ * Releases a reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_unref(struct sk_buff *skb, int f)
+{
+ __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
+}
+
+/**
+ * skb_frag_address - gets the address of the data contained in a paged fragment
+ * @frag: the paged fragment buffer
+ *
+ * Returns the address of the data within @frag. The page must already
+ * be mapped.
+ */
+static inline void *skb_frag_address(const skb_frag_t *frag)
+{
+ return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+
+/**
+ * skb_frag_address_safe - gets the address of the data contained in a paged fragment
+ * @frag: the paged fragment buffer
+ *
+ * Returns the address of the data within @frag. Checks that the page
+ * is mapped and returns %NULL otherwise.
+ */
+static inline void *skb_frag_address_safe(const skb_frag_t *frag)
+{
+ void *ptr = page_address(skb_frag_page(frag));
+ if (unlikely(!ptr))
+ return NULL;
+
+ return ptr + frag->page_offset;
+}
+
+/**
+ * __skb_frag_set_page - sets the page contained in a paged fragment
+ * @frag: the paged fragment
+ * @page: the page to set
+ *
+ * Sets the fragment @frag to contain @page.
+ */
+static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
+{
+ frag->page.p = page;
+}
+
+/**
+ * skb_frag_set_page - sets the page contained in a paged fragment of an skb
+ * @skb: the buffer
+ * @f: the fragment offset
+ * @page: the page to set
+ *
+ * Sets the @f'th fragment of @skb to contain @page.
+ */
+static inline void skb_frag_set_page(struct sk_buff *skb, int f,
+ struct page *page)
+{
+ __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
+}
+
+/**
+ * skb_frag_dma_map - maps a paged fragment via the DMA API
+ * @device: the device to map the fragment to
+ * @frag: the paged fragment to map
+ * @offset: the offset within the fragment (starting at the
+ * fragment's own offset)
+ * @size: the number of bytes to map
+ * @direction: the direction of the mapping (%PCI_DMA_*)
+ *
+ * Maps the page associated with @frag to @device.
+ */
+static inline dma_addr_t skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_map_page(dev, skb_frag_page(frag),
+ frag->page_offset + offset, size, dir);
+}
+
+/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
* @len: length up to which to write
@@ -1636,7 +1832,7 @@ static inline void netdev_free_page(struct net_device *dev, struct page *page)
* Returns true if modifying the header part of the cloned buffer
* does not requires the data to be copied.
*/
-static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
+static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
@@ -1730,13 +1926,13 @@ static inline int skb_add_data(struct sk_buff *skb,
}
static inline int skb_can_coalesce(struct sk_buff *skb, int i,
- struct page *page, int off)
+ const struct page *page, int off)
{
if (i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == frag->page &&
- off == frag->page_offset + frag->size;
+ return page == skb_frag_page(frag) &&
+ off == frag->page_offset + skb_frag_size(frag);
}
return 0;
}
@@ -2020,8 +2216,13 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
/**
* skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
*
+ * PHY drivers may accept clones of transmitted packets for
+ * timestamping via their phy_driver.txtstamp method. These drivers
+ * must call this function to return the skb back to the stack, with
+ * or without a timestamp.
+ *
* @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps
+ * @hwtstamps: hardware time stamps, may be NULL if not available
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
@@ -2257,7 +2458,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
- struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
unlikely(shinfo->gso_type == 0)) {
__skb_warn_lro_forwarding(skb);
@@ -2281,7 +2483,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
* Instead of forcing ip_summed to CHECKSUM_NONE, we can
* use this helper, to document places where we make this assertion.
*/
-static inline void skb_checksum_none_assert(struct sk_buff *skb)
+static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
#ifdef DEBUG
BUG_ON(skb->ip_summed != CHECKSUM_NONE);
@@ -2290,5 +2492,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+{
+ if (irqs_disabled())
+ return false;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+ return false;
+
+ if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ return false;
+
+ skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+ if (skb_end_pointer(skb) - skb->head < skb_size)
+ return false;
+
+ if (skb_shared(skb) || skb_cloned(skb))
+ return false;
+
+ return true;
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 8623217f84d..f10ed7b4a71 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -25,7 +25,7 @@ struct ssb_sprom {
u8 et1phyaddr; /* MII address for enet1 */
u8 et0mdcport; /* MDIO for enet0 */
u8 et1mdcport; /* MDIO for enet1 */
- u8 board_rev; /* Board revision number from SPROM. */
+ u16 board_rev; /* Board revision number from SPROM. */
u8 country_code; /* Country Code */
u16 leddc_on_time; /* LED Powersave Duty Cycle On Count */
u16 leddc_off_time; /* LED Powersave Duty Cycle Off Count */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index efbf459d571..98941203a27 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -462,6 +462,46 @@
#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */
#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */
+/* Values for boardflags_lo read from SPROM */
+#define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
+#define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */
+#define SSB_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */
+#define SSB_BFL_RSSI 0x0008 /* software calculates nrssi slope. */
+#define SSB_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */
+#define SSB_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */
+#define SSB_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */
+#define SSB_BFL_ENETADM 0x0080 /* has ADMtek switch */
+#define SSB_BFL_ENETVLAN 0x0100 /* can do vlan */
+#define SSB_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */
+#define SSB_BFL_NOPCI 0x0400 /* board leaves PCI floating */
+#define SSB_BFL_FEM 0x0800 /* supports the Front End Module */
+#define SSB_BFL_EXTLNA 0x1000 /* has an external LNA */
+#define SSB_BFL_HGPA 0x2000 /* had high gain PA */
+#define SSB_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */
+#define SSB_BFL_ALTIQ 0x8000 /* alternate I/Q settings */
+
+/* Values for boardflags_hi read from SPROM */
+#define SSB_BFH_NOPA 0x0001 /* has no PA */
+#define SSB_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */
+#define SSB_BFH_PAREF 0x0004 /* uses the PARef LDO */
+#define SSB_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared with bluetooth */
+#define SSB_BFH_PHASESHIFT 0x0010 /* can support phase shifter */
+#define SSB_BFH_BUCKBOOST 0x0020 /* has buck/booster */
+#define SSB_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna with bluetooth */
+
+/* Values for boardflags2_lo read from SPROM */
+#define SSB_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
+#define SSB_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */
+#define SSB_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */
+#define SSB_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */
+#define SSB_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */
+#define SSB_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */
+#define SSB_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */
+#define SSB_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */
+#define SSB_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
+#define SSB_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
+#define SSB_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
+
/* Values for SSB_SPROM1_BINF_CCODE */
enum {
SSB_SPROM1CCODE_WORLD = 0,
diff --git a/drivers/net/sungem_phy.h b/include/linux/sungem_phy.h
index af02f9479cb..bd9be9f59d3 100644
--- a/drivers/net/sungem_phy.h
+++ b/include/linux/sungem_phy.h
@@ -61,7 +61,7 @@ struct mii_phy
/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
* filled, the remaining fields will be filled on return
*/
-extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
+extern int sungem_phy_probe(struct mii_phy *phy, int mii_id);
/* MII definitions missing from mii.h */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index db7bcaf7c5b..492486a7448 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -9,6 +9,7 @@
#ifndef _LINUX_SUNRPC_CLNT_H
#define _LINUX_SUNRPC_CLNT_H
+#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -161,7 +162,7 @@ const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
size_t rpc_pton(const char *, const size_t,
struct sockaddr *, const size_t);
-char * rpc_sockaddr2uaddr(const struct sockaddr *);
+char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
size_t rpc_uaddr2sockaddr(const char *, const size_t,
struct sockaddr *, const size_t);
@@ -218,7 +219,13 @@ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
{
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
+
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+ return false;
+ else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ return sin1->sin6_scope_id == sin2->sin6_scope_id;
+
+ return true;
}
static inline bool __rpc_copy_addr6(struct sockaddr *dst,
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cf14db975da..e4ea43058d8 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -44,6 +44,8 @@ RPC_I(struct inode *inode)
return container_of(inode, struct rpc_inode, vfs_inode);
}
+extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
+ char __user *, size_t);
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
struct rpc_clnt;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 223588a976a..d8d5d93071b 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -212,11 +212,6 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
iov->iov_len += sizeof(__be32);
}
-union svc_addr_u {
- struct in_addr addr;
- struct in6_addr addr6;
-};
-
/*
* The context of a single thread, including the request currently being
* processed.
@@ -225,8 +220,12 @@ struct svc_rqst {
struct list_head rq_list; /* idle list */
struct list_head rq_all; /* all threads list */
struct svc_xprt * rq_xprt; /* transport ptr */
+
struct sockaddr_storage rq_addr; /* peer address */
size_t rq_addrlen;
+ struct sockaddr_storage rq_daddr; /* dest addr of request
+ * - reply from here */
+ size_t rq_daddrlen;
struct svc_serv * rq_server; /* RPC service definition */
struct svc_pool * rq_pool; /* thread pool */
@@ -255,9 +254,6 @@ struct svc_rqst {
unsigned short
rq_secure : 1; /* secure port */
- union svc_addr_u rq_daddr; /* dest addr of request
- * - reply from here */
-
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
void * rq_auth_data; /* flavor-specific data */
@@ -300,6 +296,21 @@ static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_addr;
}
+static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in *) &rqst->rq_daddr;
+}
+
+static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in6 *) &rqst->rq_daddr;
+}
+
+static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr *) &rqst->rq_daddr;
+}
+
/*
* Check buffer bounds after decoding arguments
*/
@@ -340,7 +351,8 @@ struct svc_deferred_req {
struct svc_xprt *xprt;
struct sockaddr_storage addr; /* where reply must go */
size_t addrlen;
- union svc_addr_u daddr; /* where reply must come from */
+ struct sockaddr_storage daddr; /* where reply must come from */
+ size_t daddrlen;
struct cache_deferred_req handle;
size_t xprt_hlen;
int argslen;
@@ -404,7 +416,7 @@ struct svc_procedure {
struct svc_serv *svc_create(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *));
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
- struct svc_pool *pool);
+ struct svc_pool *pool, int node);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *),
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6bbcef22e10..57a692432f8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -8,15 +8,18 @@
#include <linux/mm.h>
#include <asm/errno.h>
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+#ifdef CONFIG_VT
extern void pm_set_vt_switch(int);
-extern int pm_prepare_console(void);
-extern void pm_restore_console(void);
#else
static inline void pm_set_vt_switch(int do_switch)
{
}
+#endif
+#ifdef CONFIG_VT_CONSOLE_SLEEP
+extern int pm_prepare_console(void);
+extern void pm_restore_console(void);
+#else
static inline int pm_prepare_console(void)
{
return 0;
@@ -34,6 +37,58 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
+enum suspend_stat_step {
+ SUSPEND_FREEZE = 1,
+ SUSPEND_PREPARE,
+ SUSPEND_SUSPEND,
+ SUSPEND_SUSPEND_NOIRQ,
+ SUSPEND_RESUME_NOIRQ,
+ SUSPEND_RESUME
+};
+
+struct suspend_stats {
+ int success;
+ int fail;
+ int failed_freeze;
+ int failed_prepare;
+ int failed_suspend;
+ int failed_suspend_noirq;
+ int failed_resume;
+ int failed_resume_noirq;
+#define REC_FAILED_NUM 2
+ int last_failed_dev;
+ char failed_devs[REC_FAILED_NUM][40];
+ int last_failed_errno;
+ int errno[REC_FAILED_NUM];
+ int last_failed_step;
+ enum suspend_stat_step failed_steps[REC_FAILED_NUM];
+};
+
+extern struct suspend_stats suspend_stats;
+
+static inline void dpm_save_failed_dev(const char *name)
+{
+ strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ name,
+ sizeof(suspend_stats.failed_devs[0]));
+ suspend_stats.last_failed_dev++;
+ suspend_stats.last_failed_dev %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_errno(int err)
+{
+ suspend_stats.errno[suspend_stats.last_failed_errno] = err;
+ suspend_stats.last_failed_errno++;
+ suspend_stats.last_failed_errno %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_step(enum suspend_stat_step step)
+{
+ suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
+ suspend_stats.last_failed_step++;
+ suspend_stats.last_failed_step %= REC_FAILED_NUM;
+}
+
/**
* struct platform_suspend_ops - Callbacks for managing platform dependent
* system sleep states.
@@ -334,4 +389,38 @@ static inline void unlock_system_sleep(void)
}
#endif
+#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
+/*
+ * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
+ * to save/restore additional information to/from the array of page
+ * frame numbers in the hibernation image. For s390 this is used to
+ * save and restore the storage key for each page that is included
+ * in the hibernation image.
+ */
+unsigned long page_key_additional_pages(unsigned long pages);
+int page_key_alloc(unsigned long pages);
+void page_key_free(void);
+void page_key_read(unsigned long *pfn);
+void page_key_memorize(unsigned long *pfn);
+void page_key_write(void *address);
+
+#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
+
+static inline unsigned long page_key_additional_pages(unsigned long pages)
+{
+ return 0;
+}
+
+static inline int page_key_alloc(unsigned long pages)
+{
+ return 0;
+}
+
+static inline void page_key_free(void) {}
+static inline void page_key_read(unsigned long *pfn) {}
+static inline void page_key_memorize(unsigned long *pfn) {}
+static inline void page_key_write(void *address) {}
+
+#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
+
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 11684d9e6bd..9a1ec10fd50 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -435,7 +435,7 @@ enum {
NET_IPV4_ROUTE_MAX_SIZE=5,
NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
NET_IPV4_ROUTE_GC_TIMEOUT=7,
- NET_IPV4_ROUTE_GC_INTERVAL=8,
+ NET_IPV4_ROUTE_GC_INTERVAL=8, /* obsolete since 2.6.38 */
NET_IPV4_ROUTE_REDIRECT_LOAD=9,
NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index d7d2f215814..dac0859e644 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -112,6 +112,7 @@ struct bin_attribute {
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *,char *);
ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
+ const void *(*namespace)(struct kobject *, const struct attribute *);
};
struct sysfs_dirent;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 531ede8006d..7f59ee94698 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -111,7 +111,8 @@ enum {
#define TCPI_OPT_TIMESTAMPS 1
#define TCPI_OPT_SACK 2
#define TCPI_OPT_WSCALE 4
-#define TCPI_OPT_ECN 8
+#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
+#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
enum tcp_ca_state {
TCP_CA_Open = 0,
@@ -379,6 +380,10 @@ struct tcp_sock {
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
+ u32 prior_cwnd; /* Congestion window at start of Recovery. */
+ u32 prr_delivered; /* Number of newly delivered packets to
+ * receiver in Recovery. */
+ u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index 0db239590b4..9730b0e51e4 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -41,6 +41,10 @@
#include <linux/string.h>
#include <asm/byteorder.h>
+#ifndef __KERNEL__
+#include <arpa/inet.h> /* for ntohs etc. */
+#endif
+
/*
* Configuration
*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5f2ede82b3d..5dbb3cb05a8 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -473,7 +473,9 @@ extern void proc_clear_tty(struct task_struct *p);
extern struct tty_struct *get_current_tty(void);
extern void tty_default_fops(struct file_operations *fops);
extern struct tty_struct *alloc_tty_struct(void);
-extern int tty_add_file(struct tty_struct *tty, struct file *file);
+extern int tty_alloc_file(struct file *file);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
+extern void tty_free_file(struct file *file);
extern void free_tty_struct(struct tty_struct *tty);
extern void initialize_tty_struct(struct tty_struct *tty,
struct tty_driver *driver, int idx);
@@ -581,6 +583,8 @@ extern int __init tty_init(void);
/* tty_ioctl.c */
extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
+extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
/* serial.c */
@@ -602,8 +606,24 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(void) __acquires(tty_lock);
extern void __lockfunc tty_unlock(void) __releases(tty_lock);
-extern struct task_struct *__big_tty_mutex_owner;
-#define tty_locked() (current == __big_tty_mutex_owner)
+
+/*
+ * this shall be called only from where BTM is held (like close)
+ *
+ * We need this to ensure nobody waits for us to finish while we are waiting.
+ * Without this we were encountering system stalls.
+ *
+ * This should be indeed removed with BTM removal later.
+ *
+ * Locking: BTM required. Nobody is allowed to hold port->mutex.
+ */
+static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
+ long timeout)
+{
+ tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
+ tty_wait_until_sent(tty, timeout);
+ tty_lock();
+}
/*
* wait_event_interruptible_tty -- wait for a condition with the tty lock held
diff --git a/include/linux/types.h b/include/linux/types.h
index 176da8c1fbb..57a97234bec 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -238,6 +238,16 @@ struct ustat {
char f_fpack[6];
};
+/**
+ * struct rcu_head - callback structure for use with RCU
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ */
+struct rcu_head {
+ struct rcu_head *next;
+ void (*func)(struct rcu_head *head);
+};
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 665517c05ea..fd99ff9298c 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -23,7 +23,10 @@ struct uio_map;
/**
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
- * @addr: address of the device's memory
+ * @addr: address of the device's memory (phys_addr is used since
+ * addr can be logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of the
+ * address types)
* @size: size of IO
* @memtype: type of memory addr points to
* @internal_addr: ioremap-ped version of addr, for driver internal use
@@ -31,7 +34,7 @@ struct uio_map;
*/
struct uio_mem {
const char *name;
- unsigned long addr;
+ phys_addr_t addr;
unsigned long size;
int memtype;
void __iomem *internal_addr;
diff --git a/include/linux/un.h b/include/linux/un.h
index 45561c564b8..3ed3e46c1b1 100644
--- a/include/linux/un.h
+++ b/include/linux/un.h
@@ -1,10 +1,12 @@
#ifndef _LINUX_UN_H
#define _LINUX_UN_H
+#include <linux/socket.h>
+
#define UNIX_PATH_MAX 108
struct sockaddr_un {
- sa_family_t sun_family; /* AF_UNIX */
+ __kernel_sa_family_t sun_family; /* AF_UNIX */
char sun_path[UNIX_PATH_MAX]; /* pathname */
};
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 73c7df48960..6f49a1b39fa 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -292,6 +292,16 @@ struct usb_host_config {
int extralen;
};
+/* USB2.0 and USB3.0 device BOS descriptor set */
+struct usb_host_bos {
+ struct usb_bos_descriptor *desc;
+
+ /* wireless cap descriptor is handled by wusb */
+ struct usb_ext_cap_descriptor *ext_cap;
+ struct usb_ss_cap_descriptor *ss_cap;
+ struct usb_ss_container_id_descriptor *ss_id;
+};
+
int __usb_get_extra_descriptor(char *buffer, unsigned size,
unsigned char type, void **ptr);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
@@ -381,6 +391,7 @@ struct usb_tt;
* @ep0: endpoint 0 data (default control pipe)
* @dev: generic device interface
* @descriptor: USB device descriptor
+ * @bos: USB device BOS descriptor set
* @config: all of the device's configs
* @actconfig: the active configuration
* @ep_in: array of IN endpoints
@@ -399,6 +410,9 @@ struct usb_tt;
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
* @wusb: device is Wireless USB
+ * @lpm_capable: device supports LPM
+ * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+ * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled
* @string_langid: language ID for strings
* @product: iProduct string, if present (static)
* @manufacturer: iManufacturer string, if present (static)
@@ -442,6 +456,7 @@ struct usb_device {
struct device dev;
struct usb_device_descriptor descriptor;
+ struct usb_host_bos *bos;
struct usb_host_config *config;
struct usb_host_config *actconfig;
@@ -460,6 +475,9 @@ struct usb_device {
unsigned authorized:1;
unsigned authenticated:1;
unsigned wusb:1;
+ unsigned lpm_capable:1;
+ unsigned usb2_hw_lpm_capable:1;
+ unsigned usb2_hw_lpm_enabled:1;
int string_langid;
/* static strings from the device */
@@ -1574,7 +1592,7 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
return 0;
/* NOTE: only 0x07ff bits are for packet size... */
- return le16_to_cpu(ep->desc.wMaxPacketSize);
+ return usb_endpoint_maxp(&ep->desc);
}
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 0fd3fbdd828..f32a64e57f9 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -34,6 +34,7 @@
#define __LINUX_USB_CH9_H
#include <linux/types.h> /* __u8 etc */
+#include <asm/byteorder.h> /* le16_to_cpu */
/*-------------------------------------------------------------------------*/
@@ -143,6 +144,11 @@
#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */
#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00
+/*
+ * Suspend Options, Table 9-7 USB 3.0 spec
+ */
+#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
+#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
@@ -570,6 +576,17 @@ static inline int usb_endpoint_is_isoc_out(
return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd);
}
+/**
+ * usb_endpoint_maxp - get endpoint's max packet size
+ * @epd: endpoint to be checked
+ *
+ * Returns @epd's max packet
+ */
+static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
+{
+ return le16_to_cpu(epd->wMaxPacketSize);
+}
+
/*-------------------------------------------------------------------------*/
/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
@@ -851,6 +868,18 @@ enum usb_device_speed {
USB_SPEED_SUPER, /* usb 3.0 */
};
+#ifdef __KERNEL__
+
+/**
+ * usb_speed_string() - Returns human readable-name of the speed.
+ * @speed: The speed to return human-readable name for. If it's not
+ * any of the speeds defined in usb_device_speed enum, string for
+ * USB_SPEED_UNKNOWN will be returned.
+ */
+extern const char *usb_speed_string(enum usb_device_speed speed);
+
+#endif
+
enum usb_device_state {
/* NOTATTACHED isn't in the USB spec, and this state acts
* the same as ATTACHED ... but it's clearer this way.
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 087f4b93183..1d3a67523ff 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -437,9 +437,9 @@ static inline void usb_ep_fifo_flush(struct usb_ep *ep)
struct usb_dcd_config_params {
__u8 bU1devExitLat; /* U1 Device exit Latency */
-#define USB_DEFULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
+#define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
__le16 bU2DevExitLat; /* U2 Device exit Latency */
-#define USB_DEFULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */
+#define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */
};
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0097136ba45..03354d557b7 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -178,7 +178,7 @@ struct usb_hcd {
* this structure.
*/
unsigned long hcd_priv[0]
- __attribute__ ((aligned(sizeof(unsigned long))));
+ __attribute__ ((aligned(sizeof(s64))));
};
/* 2.4 does this a bit differently ... */
@@ -343,6 +343,7 @@ struct hc_driver {
* address is set
*/
int (*update_device)(struct usb_hcd *, struct usb_device *);
+ int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
index b6b8660d0c6..55805f9dcf2 100644
--- a/include/linux/usb/r8a66597.h
+++ b/include/linux/usb/r8a66597.h
@@ -48,6 +48,9 @@ struct r8a66597_platdata {
/* (external controller only) set one = WR0_N shorted to WR1_N */
unsigned wr0_shorted_to_wr1:1;
+
+ /* set one = using SUDMAC */
+ unsigned sudmac:1;
};
/* Register definitions */
@@ -417,5 +420,62 @@ struct r8a66597_platdata {
#define USBSPD 0x00C0
#define RTPORT 0x0001
+/* SUDMAC registers */
+#define CH0CFG 0x00
+#define CH1CFG 0x04
+#define CH0BA 0x10
+#define CH1BA 0x14
+#define CH0BBC 0x18
+#define CH1BBC 0x1C
+#define CH0CA 0x20
+#define CH1CA 0x24
+#define CH0CBC 0x28
+#define CH1CBC 0x2C
+#define CH0DEN 0x30
+#define CH1DEN 0x34
+#define DSTSCLR 0x38
+#define DBUFCTRL 0x3C
+#define DINTCTRL 0x40
+#define DINTSTS 0x44
+#define DINTSTSCLR 0x48
+#define CH0SHCTRL 0x50
+#define CH1SHCTRL 0x54
+
+/* SUDMAC Configuration Registers */
+#define SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
+#define RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
+#define LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
+
+/* DMA Enable Registers */
+#define DEN 0x0001 /* b1: DMA Transfer Enable */
+
+/* DMA Status Clear Register */
+#define CH1STCLR 0x0002 /* b2: Ch1 DMA Status Clear */
+#define CH0STCLR 0x0001 /* b1: Ch0 DMA Status Clear */
+
+/* DMA Buffer Control Register */
+#define CH1BUFW 0x0200 /* b9: Ch1 DMA Buffer Data Transfer Enable */
+#define CH0BUFW 0x0100 /* b8: Ch0 DMA Buffer Data Transfer Enable */
+#define CH1BUFS 0x0002 /* b2: Ch1 DMA Buffer Data Status */
+#define CH0BUFS 0x0001 /* b1: Ch0 DMA Buffer Data Status */
+
+/* DMA Interrupt Control Register */
+#define CH1ERRE 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Enable */
+#define CH0ERRE 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Enable */
+#define CH1ENDE 0x0002 /* b2: Ch1 DMA Transfer End Int Enable */
+#define CH0ENDE 0x0001 /* b1: Ch0 DMA Transfer End Int Enable */
+
+/* DMA Interrupt Status Register */
+#define CH1ERRS 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Status */
+#define CH0ERRS 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Status */
+#define CH1ENDS 0x0002 /* b2: Ch1 DMA Transfer End Int Status */
+#define CH0ENDS 0x0001 /* b1: Ch0 DMA Transfer End Int Status */
+
+/* DMA Interrupt Status Clear Register */
+#define CH1ERRC 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Stat Clear */
+#define CH0ERRC 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Stat Clear */
+#define CH1ENDC 0x0002 /* b2: Ch1 DMA Transfer End Int Stat Clear */
+#define CH0ENDC 0x0001 /* b1: Ch0 DMA Transfer End Int Stat Clear */
+
#endif /* __LINUX_USB_R8A66597_H */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 8977431259c..e5a40c31854 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -82,6 +82,13 @@ struct renesas_usbhs_platform_callback {
* get VBUS status function.
*/
int (*get_vbus)(struct platform_device *pdev);
+
+ /*
+ * option:
+ *
+ * VBUS control is needed for Host
+ */
+ int (*set_vbus)(struct platform_device *pdev, int enable);
};
/*
@@ -101,6 +108,8 @@ struct renesas_usbhs_driver_param {
* option:
*
* for BUSWAIT :: BWAIT
+ * see
+ * renesas_usbhs/common.c :: usbhsc_set_buswait()
* */
int buswait_bwait;
@@ -127,6 +136,11 @@ struct renesas_usbhs_driver_param {
* pio <--> dma border.
*/
int pio_dma_border; /* default is 64byte */
+
+ /*
+ * option:
+ */
+ u32 has_otg:1; /* for controlling PWEN/EXTLP */
};
/*
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 71088574960..851ebf1a447 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -61,6 +61,9 @@ struct virtqueue {
* virtqueue_detach_unused_buf: detach first unused buffer
* vq: the struct virtqueue we're talking about.
* Returns NULL or the "data" token handed to add_buf
+ * virtqueue_get_vring_size: return the size of the virtqueue's vring
+ * vq: the struct virtqueue containing the vring of interest.
+ * Returns the size of the vring.
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
@@ -97,6 +100,8 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
diff --git a/include/linux/x25.h b/include/linux/x25.h
index 6450a7f1207..810cce6737e 100644
--- a/include/linux/x25.h
+++ b/include/linux/x25.h
@@ -12,6 +12,7 @@
#define X25_KERNEL_H
#include <linux/types.h>
+#include <linux/socket.h>
#define SIOCX25GSUBSCRIP (SIOCPROTOPRIVATE + 0)
#define SIOCX25SSUBSCRIP (SIOCPROTOPRIVATE + 1)
@@ -57,7 +58,7 @@ struct x25_address {
* Linux X.25 Address structure, used for bind, and connect mostly.
*/
struct sockaddr_x25 {
- sa_family_t sx25_family; /* Must be AF_X25 */
+ __kernel_sa_family_t sx25_family; /* Must be AF_X25 */
struct x25_address sx25_addr; /* X.121 Address */
};
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index aed54c50aa6..e5d12203154 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,6 +30,9 @@
#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
/* Security namespace */
+#define XATTR_EVM_SUFFIX "evm"
+#define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX
+
#define XATTR_SELINUX_SUFFIX "selinux"
#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
@@ -49,6 +52,11 @@
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
+#define XATTR_POSIX_ACL_ACCESS "posix_acl_access"
+#define XATTR_NAME_POSIX_ACL_ACCESS XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_ACCESS
+#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
#ifdef __KERNEL__
#include <linux/types.h>
@@ -67,6 +75,12 @@ struct xattr_handler {
size_t size, int flags, int handler_flags);
};
+struct xattr {
+ char *name;
+ void *value;
+ size_t value_len;
+};
+
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
@@ -78,7 +92,10 @@ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer,
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
-
+ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_XATTR_H */
diff --git a/include/media/pwc-ioctl.h b/include/media/pwc-ioctl.h
index 0f19779c463..1ed1e616fe3 100644
--- a/include/media/pwc-ioctl.h
+++ b/include/media/pwc-ioctl.h
@@ -53,7 +53,6 @@
*/
#include <linux/types.h>
-#include <linux/version.h>
/* Enumeration of image sizes */
#define PSZ_SQCIF 0x00
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index 1c647e8148c..d8fb6012c10 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -34,7 +34,7 @@
* does memory allocation too using vmalloc_32().
*
* videobuf_dma_*()
- * see Documentation/PCI/PCI-DMA-mapping.txt, these functions to
+ * see Documentation/DMA-API-HOWTO.txt, these functions to
* basically the same. The map function does also build a
* scatterlist for the buffer (and unmap frees it ...)
*
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index a6326ef8ade..2d70b95b3b5 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -76,11 +76,8 @@ do { \
} \
} while (0)
-#define P9_DUMP_PKT(way, pdu) p9pdu_dump(way, pdu)
-
#else
#define P9_DPRINTK(level, format, arg...) do { } while (0)
-#define P9_DUMP_PKT(way, pdu) do { } while (0)
#endif
@@ -359,6 +356,9 @@ enum p9_qid_t {
/* Room for readdir header */
#define P9_READDIRHDRSZ 24
+/* size of header for zero copy read/write */
+#define P9_ZC_HDR_SZ 4096
+
/**
* struct p9_qid - file system entity information
* @type: 8-bit type &p9_qid_t
@@ -555,10 +555,6 @@ struct p9_rstatfs {
* @tag: transaction id of the request
* @offset: used by marshalling routines to track current position in buffer
* @capacity: used by marshalling routines to track total malloc'd capacity
- * @pubuf: Payload user buffer given by the caller
- * @pkbuf: Payload kernel buffer given by the caller
- * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
- * @private: For transport layer's use.
* @sdata: payload
*
* &p9_fcall represents the structure for all 9P RPC
@@ -575,10 +571,6 @@ struct p9_fcall {
size_t offset;
size_t capacity;
- char __user *pubuf;
- char *pkbuf;
- size_t pbuf_size;
- void *private;
u8 *sdata;
};
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 55ce72ce986..fc9b90b0c05 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -151,7 +151,7 @@ struct p9_req_t {
struct p9_client {
spinlock_t lock; /* protect client structure */
- int msize;
+ unsigned int msize;
unsigned char proto_version;
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
@@ -240,8 +240,8 @@ int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version);
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent);
struct p9_wstat *p9_client_stat(struct p9_fid *fid);
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr);
@@ -259,7 +259,7 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(char *, int, struct p9_wstat *, int);
+int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
void p9stat_free(struct p9_wstat *);
int p9_is_proto_dotu(struct p9_client *clnt);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 83531ebeee9..adcbb20f651 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,13 +26,6 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
-#define P9_TRANS_PREF_PAYLOAD_MASK 0x1
-
-/* Default. Add Payload to PDU before sending it down to transport layer */
-#define P9_TRANS_PREF_PAYLOAD_DEF 0x0
-/* Send pay load separately to transport layer along with PDU.*/
-#define P9_TRANS_PREF_PAYLOAD_SEP 0x1
-
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
@@ -56,13 +49,14 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
- int pref; /* Preferences of this transport */
int def; /* this transport should be default */
struct module *owner;
int (*create)(struct p9_client *, const char *, char *);
void (*close) (struct p9_client *);
int (*request) (struct p9_client *, struct p9_req_t *req);
int (*cancel) (struct p9_client *, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *, struct p9_req_t *,
+ char *, char *, int , int, int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 582e4ae7075..cbc6bb0a683 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -8,7 +8,7 @@
#define TEMP_VALID_LIFETIME (7*86400)
#define TEMP_PREFERRED_LIFETIME (86400)
-#define REGEN_MAX_RETRY (5)
+#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
#define ADDR_CHECK_FREQUENCY (120*HZ)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index be30aabe7b8..aaf79af7243 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -716,6 +716,16 @@ struct hci_rp_read_bd_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
+struct hci_cp_write_page_scan_activity {
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47
+ #define PAGE_SCAN_TYPE_STANDARD 0x00
+ #define PAGE_SCAN_TYPE_INTERLACED 0x01
+
#define HCI_OP_LE_SET_EVENT_MASK 0x2001
struct hci_cp_le_set_event_mask {
__u8 mask[8];
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f441b8b296..5b924423cf2 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -195,8 +195,6 @@ struct hci_dev {
__u16 init_last_cmd;
- struct crypto_blkcipher *tfm;
-
struct inquiry_cache inq_cache;
struct hci_conn_hash conn_hash;
struct list_head blacklist;
@@ -348,6 +346,7 @@ enum {
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
HCI_CONN_SCO_SETUP_PEND,
+ HCI_CONN_LE_SMP_PEND,
};
static inline void hci_conn_hash_init(struct hci_dev *hdev)
@@ -395,6 +394,22 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
}
}
+static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ switch (type) {
+ case ACL_LINK:
+ return h->acl_num;
+ case LE_LINK:
+ return h->le_num;
+ case SCO_LINK:
+ case ESCO_LINK:
+ return h->sco_num;
+ default:
+ return 0;
+ }
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
@@ -475,7 +490,7 @@ static inline void hci_conn_put(struct hci_conn *conn)
{
if (atomic_dec_and_test(&conn->refcnt)) {
unsigned long timeo;
- if (conn->type == ACL_LINK) {
+ if (conn->type == ACL_LINK || conn->type == LE_LINK) {
del_timer(&conn->idle_timer);
if (conn->state == BT_CONNECTED) {
timeo = msecs_to_jiffies(conn->disc_timeout);
@@ -838,7 +853,7 @@ int mgmt_powered(u16 index, u8 powered);
int mgmt_discoverable(u16 index, u8 discoverable);
int mgmt_connectable(u16 index, u8 connectable);
int mgmt_new_key(u16 index, struct link_key *key, u8 persistent);
-int mgmt_connected(u16 index, bdaddr_t *bdaddr);
+int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type);
int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
int mgmt_disconnect_failed(u16 index);
int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
@@ -858,6 +873,8 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
u8 *eir);
int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name);
int mgmt_discovering(u16 index, u8 discovering);
+int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr);
+int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 4f34ad25e75..ab90ae0970a 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -354,8 +354,8 @@ struct l2cap_chan {
__u8 retry_count;
__u8 num_acked;
__u16 sdu_len;
- __u16 partial_sdu_len;
struct sk_buff *sdu;
+ struct sk_buff *sdu_last_frag;
__u8 remote_tx_win;
__u8 remote_max_tx;
@@ -409,14 +409,8 @@ struct l2cap_conn {
__u8 disc_reason;
- __u8 preq[7]; /* SMP Pairing Request */
- __u8 prsp[7]; /* SMP Pairing Response */
- __u8 prnd[16]; /* SMP Pairing Random */
- __u8 pcnf[16]; /* SMP Pairing Confirm */
- __u8 tk[16]; /* SMP Temporary Key */
- __u8 smp_key_size;
-
struct timer_list security_timer;
+ struct smp_chan *smp_chan;
struct list_head chan_l;
rwlock_t chan_lock;
@@ -454,7 +448,6 @@ enum {
#define L2CAP_CONF_MAX_CONF_RSP 2
enum {
- CONN_SAR_SDU,
CONN_SREJ_SENT,
CONN_WAIT_F,
CONN_SREJ_ACT,
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 5428fd32cce..d66da0f94f9 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -211,6 +211,11 @@ struct mgmt_cp_unblock_device {
bdaddr_t bdaddr;
} __packed;
+#define MGMT_OP_SET_FAST_CONNECTABLE 0x001F
+struct mgmt_cp_set_fast_connectable {
+ __u8 enable;
+} __packed;
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -249,6 +254,7 @@ struct mgmt_ev_new_key {
#define MGMT_EV_CONNECTED 0x000B
struct mgmt_ev_connected {
bdaddr_t bdaddr;
+ __u8 link_type;
} __packed;
#define MGMT_EV_DISCONNECTED 0x000C
@@ -301,3 +307,13 @@ struct mgmt_ev_remote_name {
} __packed;
#define MGMT_EV_DISCOVERING 0x0014
+
+#define MGMT_EV_DEVICE_BLOCKED 0x0015
+struct mgmt_ev_device_blocked {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNBLOCKED 0x0016
+struct mgmt_ev_device_unblocked {
+ bdaddr_t bdaddr;
+} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 46c45761230..15b97d54944 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -115,9 +115,26 @@ struct smp_cmd_security_req {
#define SMP_MIN_ENC_KEY_SIZE 7
#define SMP_MAX_ENC_KEY_SIZE 16
+struct smp_chan {
+ struct l2cap_conn *conn;
+ u8 preq[7]; /* SMP Pairing Request */
+ u8 prsp[7]; /* SMP Pairing Response */
+ u8 prnd[16]; /* SMP Pairing Random (local) */
+ u8 rrnd[16]; /* SMP Pairing Random (remote) */
+ u8 pcnf[16]; /* SMP Pairing Confirm */
+ u8 tk[16]; /* SMP Temporary Key */
+ u8 smp_key_size;
+ struct crypto_blkcipher *tfm;
+ struct work_struct confirm;
+ struct work_struct random;
+
+};
+
/* SMP Commands */
int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+void smp_chan_destroy(struct l2cap_conn *conn);
+
#endif /* __SMP_H */
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index c5dedd87b4c..8d552519ff6 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -52,8 +52,9 @@ struct cfhsi_desc {
/*
* Maximum bytes transferred in one transfer.
*/
-/* TODO: 4096 is temporary... */
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * 4096)
+#define CFHSI_MAX_CAIF_FRAME_SZ 4096
+
+#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
/* Size of the complete HSI TX buffer. */
#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
@@ -75,18 +76,21 @@ struct cfhsi_desc {
#define CFHSI_WAKE_UP_ACK 1
#define CFHSI_WAKE_DOWN_ACK 2
#define CFHSI_AWAKE 3
-#define CFHSI_PENDING_RX 4
-#define CFHSI_SHUTDOWN 6
-#define CFHSI_FLUSH_FIFO 7
+#define CFHSI_WAKELOCK_HELD 4
+#define CFHSI_SHUTDOWN 5
+#define CFHSI_FLUSH_FIFO 6
#ifndef CFHSI_INACTIVITY_TOUT
#define CFHSI_INACTIVITY_TOUT (1 * HZ)
#endif /* CFHSI_INACTIVITY_TOUT */
-#ifndef CFHSI_WAKEUP_TOUT
-#define CFHSI_WAKEUP_TOUT (3 * HZ)
-#endif /* CFHSI_WAKEUP_TOUT */
+#ifndef CFHSI_WAKE_TOUT
+#define CFHSI_WAKE_TOUT (3 * HZ)
+#endif /* CFHSI_WAKE_TOUT */
+#ifndef CFHSI_MAX_RX_RETRIES
+#define CFHSI_MAX_RX_RETRIES (10 * HZ)
+#endif
/* Structure implemented by the CAIF HSI driver. */
struct cfhsi_drv {
@@ -104,11 +108,21 @@ struct cfhsi_dev {
int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev);
int (*cfhsi_wake_up) (struct cfhsi_dev *dev);
int (*cfhsi_wake_down) (struct cfhsi_dev *dev);
+ int (*cfhsi_get_peer_wake) (struct cfhsi_dev *dev, bool *status);
int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy);
int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev);
struct cfhsi_drv *drv;
};
+/* Structure holds status of received CAIF frames processing */
+struct cfhsi_rx_state {
+ int state;
+ int nfrms;
+ int pld_len;
+ int retries;
+ bool piggy_desc;
+};
+
/* Structure implemented by CAIF HSI drivers. */
struct cfhsi {
struct caif_dev_common cfdev;
@@ -118,7 +132,8 @@ struct cfhsi {
struct cfhsi_drv drv;
struct cfhsi_dev *dev;
int tx_state;
- int rx_state;
+ struct cfhsi_rx_state rx_state;
+ unsigned long inactivity_timeout;
int rx_len;
u8 *rx_ptr;
u8 *tx_buf;
@@ -130,13 +145,13 @@ struct cfhsi {
struct list_head list;
struct work_struct wake_up_work;
struct work_struct wake_down_work;
- struct work_struct rx_done_work;
- struct work_struct tx_done_work;
+ struct work_struct out_of_sync_work;
struct workqueue_struct *wq;
wait_queue_head_t wake_up_wait;
wait_queue_head_t wake_down_wait;
wait_queue_head_t flush_fifo_wait;
struct timer_list timer;
+ struct timer_list rx_slowpath_timer;
unsigned long bits;
};
diff --git a/include/net/cfg80211-wext.h b/include/net/cfg80211-wext.h
new file mode 100644
index 00000000000..25baddc4fbe
--- /dev/null
+++ b/include/net/cfg80211-wext.h
@@ -0,0 +1,55 @@
+#ifndef __NET_CFG80211_WEXT_H
+#define __NET_CFG80211_WEXT_H
+/*
+ * 802.11 device and configuration interface -- wext handlers
+ *
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+/*
+ * Temporary wext handlers & helper functions
+ *
+ * These are used only by drivers that aren't yet fully
+ * converted to cfg80211.
+ */
+int cfg80211_wext_giwname(struct net_device *dev,
+ struct iw_request_info *info,
+ char *name, char *extra);
+int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
+ u32 *mode, char *extra);
+int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
+ u32 *mode, char *extra);
+int cfg80211_wext_siwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int cfg80211_wext_giwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_giwrange(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_siwrts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra);
+int cfg80211_wext_giwrts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra);
+int cfg80211_wext_siwfrag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra);
+int cfg80211_wext_giwfrag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra);
+int cfg80211_wext_giwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra);
+
+#endif /* __NET_CFG80211_WEXT_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 401d73bd151..92cf1c2c30c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -20,11 +20,6 @@
#include <linux/ieee80211.h>
#include <net/regulatory.h>
-/* remove once we remove the wext stuff */
-#include <net/iw_handler.h>
-#include <linux/wireless.h>
-
-
/**
* DOC: Introduction
*
@@ -339,6 +334,36 @@ struct survey_info {
};
/**
+ * struct cfg80211_crypto_settings - Crypto settings
+ * @wpa_versions: indicates which, if any, WPA versions are enabled
+ * (from enum nl80211_wpa_versions)
+ * @cipher_group: group key cipher suite (or 0 if unset)
+ * @n_ciphers_pairwise: number of AP supported unicast ciphers
+ * @ciphers_pairwise: unicast key cipher suites
+ * @n_akm_suites: number of AKM suites
+ * @akm_suites: AKM suites
+ * @control_port: Whether user space controls IEEE 802.1X port, i.e.,
+ * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
+ * required to assume that the port is unauthorized until authorized by
+ * user space. Otherwise, port is marked authorized by default.
+ * @control_port_ethertype: the control port protocol that should be
+ * allowed through even on unauthorized ports
+ * @control_port_no_encrypt: TRUE to prevent encryption of control port
+ * protocol frames.
+ */
+struct cfg80211_crypto_settings {
+ u32 wpa_versions;
+ u32 cipher_group;
+ int n_ciphers_pairwise;
+ u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
+ int n_akm_suites;
+ u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
+ bool control_port;
+ __be16 control_port_ethertype;
+ bool control_port_no_encrypt;
+};
+
+/**
* struct beacon_parameters - beacon parameters
*
* Used to configure the beacon for an interface.
@@ -351,11 +376,38 @@ struct survey_info {
* @dtim_period: DTIM period or zero if not changed
* @head_len: length of @head
* @tail_len: length of @tail
+ * @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from
+ * user space)
+ * @ssid_len: length of @ssid
+ * @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames
+ * @crypto: crypto settings
+ * @privacy: the BSS uses privacy
+ * @auth_type: Authentication type (algorithm)
+ * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
+ * @beacon_ies_len: length of beacon_ies in octets
+ * @proberesp_ies: extra information element(s) to add into Probe Response
+ * frames or %NULL
+ * @proberesp_ies_len: length of proberesp_ies in octets
+ * @assocresp_ies: extra information element(s) to add into (Re)Association
+ * Response frames or %NULL
+ * @assocresp_ies_len: length of assocresp_ies in octets
*/
struct beacon_parameters {
u8 *head, *tail;
int interval, dtim_period;
int head_len, tail_len;
+ const u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_hidden_ssid hidden_ssid;
+ struct cfg80211_crypto_settings crypto;
+ bool privacy;
+ enum nl80211_auth_type auth_type;
+ const u8 *beacon_ies;
+ size_t beacon_ies_len;
+ const u8 *proberesp_ies;
+ size_t proberesp_ies_len;
+ const u8 *assocresp_ies;
+ size_t assocresp_ies_len;
};
/**
@@ -372,6 +424,17 @@ enum plink_actions {
};
/**
+ * enum station_parameters_apply_mask - station parameter values to apply
+ * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
+ *
+ * Not all station parameters have in-band "no change" signalling,
+ * for those that don't these flags will are used.
+ */
+enum station_parameters_apply_mask {
+ STATION_PARAM_APPLY_UAPSD = BIT(0),
+};
+
+/**
* struct station_parameters - station parameters
*
* Used to change and create a new station.
@@ -389,17 +452,24 @@ enum plink_actions {
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
* @ht_capa: HT capabilities of station
+ * @uapsd_queues: bitmap of queues configured for uapsd. same format
+ * as the AC bitmap in the QoS info field
+ * @max_sp: max Service Period. same format as the MAX_SP in the
+ * QoS info field (but already shifted down)
*/
struct station_parameters {
u8 *supported_rates;
struct net_device *vlan;
u32 sta_flags_mask, sta_flags_set;
+ u32 sta_modify_mask;
int listen_interval;
u16 aid;
u8 supported_rates_len;
u8 plink_action;
u8 plink_state;
struct ieee80211_ht_cap *ht_capa;
+ u8 uapsd_queues;
+ u8 max_sp;
};
/**
@@ -426,6 +496,8 @@ struct station_parameters {
* @STATION_INFO_RX_BITRATE: @rxrate fields are filled
* @STATION_INFO_BSS_PARAM: @bss_param filled
* @STATION_INFO_CONNECTED_TIME: @connected_time filled
+ * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
+ * @STATION_INFO_STA_FLAGS: @sta_flags filled
*/
enum station_info_flags {
STATION_INFO_INACTIVE_TIME = 1<<0,
@@ -444,7 +516,9 @@ enum station_info_flags {
STATION_INFO_SIGNAL_AVG = 1<<13,
STATION_INFO_RX_BITRATE = 1<<14,
STATION_INFO_BSS_PARAM = 1<<15,
- STATION_INFO_CONNECTED_TIME = 1<<16
+ STATION_INFO_CONNECTED_TIME = 1<<16,
+ STATION_INFO_ASSOC_REQ_IES = 1<<17,
+ STATION_INFO_STA_FLAGS = 1<<18
};
/**
@@ -536,6 +610,11 @@ struct sta_bss_parameters {
* This number should increase every time the list of stations
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
+ * @assoc_req_ies: IEs from (Re)Association Request.
+ * This is used only when in AP mode with drivers that do not use
+ * user space MLME/SME implementation. The information is provided for
+ * the cfg80211_new_sta() calls to notify user space of the IEs.
+ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
*/
struct station_info {
u32 filled;
@@ -556,8 +635,17 @@ struct station_info {
u32 tx_failed;
u32 rx_dropped_misc;
struct sta_bss_parameters bss_param;
+ struct nl80211_sta_flag_update sta_flags;
int generation;
+
+ const u8 *assoc_req_ies;
+ size_t assoc_req_ies_len;
+
+ /*
+ * Note: Add a new enum station_info_flags value for each new field and
+ * use it to check which fields are initialized.
+ */
};
/**
@@ -688,6 +776,12 @@ struct mesh_config {
u16 dot11MeshHWMPpreqMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
+ u16 dot11MeshHWMPRannInterval;
+ /* This is missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol
+ * set to true only means that the station will announce others it's a
+ * mesh gate, but not necessarily using the gate announcement protocol.
+ * Still keeping the same nomenclature to be in sync with the spec. */
+ bool dot11MeshGateAnnouncementProtocol;
};
/**
@@ -781,6 +875,7 @@ struct cfg80211_ssid {
* @wiphy: the wiphy this was for
* @dev: the interface
* @aborted: (internal) scan request was notified as aborted
+ * @no_cck: used to send probe requests at non CCK rate in 2GHz band
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -795,12 +890,22 @@ struct cfg80211_scan_request {
struct wiphy *wiphy;
struct net_device *dev;
bool aborted;
+ bool no_cck;
/* keep last */
struct ieee80211_channel *channels[0];
};
/**
+ * struct cfg80211_match_set - sets of attributes to match
+ *
+ * @ssid: SSID to be matched
+ */
+struct cfg80211_match_set {
+ struct cfg80211_ssid ssid;
+};
+
+/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
@@ -809,6 +914,11 @@ struct cfg80211_scan_request {
* @interval: interval between each scheduled scan cycle
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
+ * @match_sets: sets of parameters to be matched for a scan result
+ * entry to be considered valid and to be passed to the host
+ * (others are filtered out).
+ * If ommited, all results are passed.
+ * @n_match_sets: number of match sets
* @wiphy: the wiphy this was for
* @dev: the interface
* @channels: channels to scan
@@ -820,6 +930,8 @@ struct cfg80211_sched_scan_request {
u32 interval;
const u8 *ie;
size_t ie_len;
+ struct cfg80211_match_set *match_sets;
+ int n_match_sets;
/* internal */
struct wiphy *wiphy;
@@ -896,36 +1008,6 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
/**
- * struct cfg80211_crypto_settings - Crypto settings
- * @wpa_versions: indicates which, if any, WPA versions are enabled
- * (from enum nl80211_wpa_versions)
- * @cipher_group: group key cipher suite (or 0 if unset)
- * @n_ciphers_pairwise: number of AP supported unicast ciphers
- * @ciphers_pairwise: unicast key cipher suites
- * @n_akm_suites: number of AKM suites
- * @akm_suites: AKM suites
- * @control_port: Whether user space controls IEEE 802.1X port, i.e.,
- * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
- * required to assume that the port is unauthorized until authorized by
- * user space. Otherwise, port is marked authorized by default.
- * @control_port_ethertype: the control port protocol that should be
- * allowed through even on unauthorized ports
- * @control_port_no_encrypt: TRUE to prevent encryption of control port
- * protocol frames.
- */
-struct cfg80211_crypto_settings {
- u32 wpa_versions;
- u32 cipher_group;
- int n_ciphers_pairwise;
- u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
- int n_akm_suites;
- u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
- bool control_port;
- __be16 control_port_ethertype;
- bool control_port_no_encrypt;
-};
-
-/**
* struct cfg80211_auth_request - Authentication request data
*
* This structure provides information needed to complete IEEE 802.11
@@ -1343,6 +1425,9 @@ struct cfg80211_gtk_rekey_data {
* @set_ringparam: Set tx and rx ring sizes.
*
* @get_ringparam: Get tx and rx ring current and maximum sizes.
+ *
+ * @tdls_mgmt: Transmit a TDLS management frame.
+ * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1419,7 +1504,7 @@ struct cfg80211_ops {
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
- int (*set_txq_params)(struct wiphy *wiphy,
+ int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_txq_params *params);
int (*set_channel)(struct wiphy *wiphy, struct net_device *dev,
@@ -1495,7 +1580,8 @@ struct cfg80211_ops {
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie);
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie);
int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
struct net_device *dev,
u64 cookie);
@@ -1525,6 +1611,12 @@ struct cfg80211_ops {
int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_gtk_rekey_data *data);
+
+ int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *buf, size_t len);
+ int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper);
};
/*
@@ -1574,6 +1666,15 @@ struct cfg80211_ops {
* @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
* auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
* @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
+ * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
+ * firmware.
+ * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
+ * @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation.
+ * @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z)
+ * link setup/discovery operations internally. Setup, discovery and
+ * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
+ * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
+ * used for asking the driver/firmware to perform a TDLS operation.
*/
enum wiphy_flags {
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -1588,6 +1689,10 @@ enum wiphy_flags {
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11),
WIPHY_FLAG_ENFORCE_COMBINATIONS = BIT(12),
+ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
+ WIPHY_FLAG_AP_UAPSD = BIT(14),
+ WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
};
/**
@@ -1749,6 +1854,7 @@ struct wiphy_wowlan_support {
* @debugfsdir: debugfs directory used for this wiphy, will be renamed
* automatically on wiphy renames
* @dev: (virtual) struct device for this wiphy
+ * @registered: helps synchronize suspend/resume with wiphy unregister
* @wext: wireless extension handlers
* @priv: driver private data (sized according to wiphy_new() parameter)
* @interface_modes: bitmask of interfaces types valid for this wiphy,
@@ -1765,6 +1871,9 @@ struct wiphy_wowlan_support {
* any given scan
* @max_sched_scan_ssids: maximum number of SSIDs the device can scan
* for in any given scheduled scan
+ * @max_match_sets: maximum number of match sets the device can handle
+ * when performing a scheduled scan, 0 if filtering is not
+ * supported.
* @max_scan_ie_len: maximum length of user-controlled IEs device can
* add to probe request frames transmitted during a scan, must not
* include fixed IEs like supported rates
@@ -1822,6 +1931,7 @@ struct wiphy {
int bss_priv_size;
u8 max_scan_ssids;
u8 max_sched_scan_ssids;
+ u8 max_match_sets;
u16 max_scan_ie_len;
u16 max_sched_scan_ie_len;
@@ -2235,6 +2345,69 @@ extern int ieee80211_radiotap_iterator_next(
extern const unsigned char rfc1042_header[6];
extern const unsigned char bridge_tunnel_header[6];
+/* Parsed Information Elements */
+struct ieee802_11_elems {
+ u8 *ie_start;
+ size_t total_len;
+
+ /* pointers to IEs */
+ u8 *ssid;
+ u8 *supp_rates;
+ u8 *fh_params;
+ u8 *ds_params;
+ u8 *cf_params;
+ struct ieee80211_tim_ie *tim;
+ u8 *ibss_params;
+ u8 *challenge;
+ u8 *wpa;
+ u8 *rsn;
+ u8 *erp_info;
+ u8 *ext_supp_rates;
+ u8 *wmm_info;
+ u8 *wmm_param;
+ struct ieee80211_ht_cap *ht_cap_elem;
+ struct ieee80211_ht_info *ht_info_elem;
+ struct ieee80211_meshconf_ie *mesh_config;
+ u8 *mesh_id;
+ u8 *peering;
+ u8 *preq;
+ u8 *prep;
+ u8 *perr;
+ struct ieee80211_rann_ie *rann;
+ u8 *ch_switch_elem;
+ u8 *country_elem;
+ u8 *pwr_constr_elem;
+ u8 *quiet_elem; /* first quite element */
+ u8 *timeout_int;
+
+ /* length of them, respectively */
+ u8 ssid_len;
+ u8 supp_rates_len;
+ u8 fh_params_len;
+ u8 ds_params_len;
+ u8 cf_params_len;
+ u8 tim_len;
+ u8 ibss_params_len;
+ u8 challenge_len;
+ u8 wpa_len;
+ u8 rsn_len;
+ u8 erp_info_len;
+ u8 ext_supp_rates_len;
+ u8 wmm_info_len;
+ u8 wmm_param_len;
+ u8 mesh_id_len;
+ u8 peering_len;
+ u8 preq_len;
+ u8 prep_len;
+ u8 perr_len;
+ u8 ch_switch_elem_len;
+ u8 country_elem_len;
+ u8 pwr_constr_elem_len;
+ u8 quiet_elem_len;
+ u8 num_of_quiet_elem; /* can be more the one */
+ u8 timeout_int_len;
+};
+
/**
* ieee80211_get_hdrlen_from_skb - get header length from data
*
@@ -2324,6 +2497,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb);
const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
/**
+ * cfg80211_find_vendor_ie - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * This function will return %NULL if the vendor specific element ID
+ * could not be found or if the element is invalid (claims to be
+ * longer than the given data), or a pointer to the first byte
+ * of the requested element, that is the byte containing the
+ * element ID. There are no checks on the element length
+ * other than having to fit into the given data.
+ */
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+ const u8 *ies, int len);
+
+/**
* DOC: Regulatory enforcement infrastructure
*
* TODO
@@ -2398,113 +2589,6 @@ extern int freq_reg_info(struct wiphy *wiphy,
const struct ieee80211_reg_rule **reg_rule);
/*
- * Temporary wext handlers & helper functions
- *
- * In the future cfg80211 will simply assign the entire wext handler
- * structure to netdevs it manages, but we're not there yet.
- */
-int cfg80211_wext_giwname(struct net_device *dev,
- struct iw_request_info *info,
- char *name, char *extra);
-int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
- u32 *mode, char *extra);
-int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
- u32 *mode, char *extra);
-int cfg80211_wext_siwscan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-int cfg80211_wext_giwscan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-int cfg80211_wext_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-
-int cfg80211_wext_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra);
-int cfg80211_wext_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra);
-int cfg80211_wext_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid);
-int cfg80211_wext_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid);
-int cfg80211_wext_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra);
-int cfg80211_wext_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra);
-
-int cfg80211_wext_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rts, char *extra);
-int cfg80211_wext_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rts, char *extra);
-int cfg80211_wext_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frag, char *extra);
-int cfg80211_wext_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frag, char *extra);
-int cfg80211_wext_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *retry, char *extra);
-int cfg80211_wext_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *retry, char *extra);
-int cfg80211_wext_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *extra);
-int cfg80211_wext_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf);
-int cfg80211_wext_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf);
-int cfg80211_wext_siwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *keybuf);
-int cfg80211_wext_giwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *keybuf);
-struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev);
-
-int cfg80211_wext_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra);
-int cfg80211_wext_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra);
-
-int cfg80211_wext_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra);
-int cfg80211_wext_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra);
-
-int cfg80211_wext_siwpmksa(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-
-/*
* callbacks for asynchronous cfg80211 methods, notification
* functions and BSS handling helpers
*/
@@ -3090,6 +3174,17 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
+/**
+ * cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate
+ * @dev: network device
+ * @index: candidate index (the smaller the index, the higher the priority)
+ * @bssid: BSSID of AP
+ * @preauth: Whether AP advertises support for RSN pre-authentication
+ * @gfp: allocation flags
+ */
+void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
index bc1e7ef4017..443626ed4cb 100644
--- a/include/net/dcbevent.h
+++ b/include/net/dcbevent.h
@@ -24,8 +24,26 @@ enum dcbevent_notif_type {
DCB_APP_EVENT = 1,
};
+#ifdef CONFIG_DCB
extern int register_dcbevent_notifier(struct notifier_block *nb);
extern int unregister_dcbevent_notifier(struct notifier_block *nb);
extern int call_dcbevent_notifiers(unsigned long val, void *v);
+#else
+static inline int
+register_dcbevent_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_dcbevent_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_dcbevent_notifiers(unsigned long val, void *v)
+{
+ return 0;
+}
+#endif /* CONFIG_DCB */
#endif
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index f5aa39997f0..2cd66d0be34 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -23,9 +23,10 @@
#include <linux/dcbnl.h>
struct dcb_app_type {
- char name[IFNAMSIZ];
+ int ifindex;
struct dcb_app app;
struct list_head list;
+ u8 dcbx;
};
int dcb_setapp(struct net_device *, struct dcb_app *);
diff --git a/include/net/dst.h b/include/net/dst.h
index 13d507d69dd..4fb6c438179 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -325,7 +325,14 @@ static inline void skb_dst_force(struct sk_buff *skb)
static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
- skb->rxhash = 0;
+
+ /*
+ * Clear rxhash so that we can recalulate the hash for the
+ * encapsulated packet, unless we have already determine the hash
+ * over the L4 4-tuple.
+ */
+ if (!skb->l4_rxhash)
+ skb->rxhash = 0;
skb_set_queue_mapping(skb, 0);
skb_dst_drop(skb);
nf_reset(skb);
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index b0be5fb9de1..7e2c4d483ad 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -251,6 +251,7 @@ enum ieee80211_radiotap_type {
* retries */
#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */
#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
+#define IEEE80211_RADIOTAP_F_TX_NOACK 0x0008 /* don't expect an ack */
/* For IEEE80211_RADIOTAP_MCS */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 11cf373970a..51a7031b4aa 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -41,6 +41,7 @@ struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
+ /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
__u32 prefered_lft;
atomic_t refcnt;
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 2fa8d1341a0..2fa14691869 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -30,6 +30,14 @@ static inline int INET_ECN_is_capable(__u8 dsfield)
return dsfield & INET_ECN_ECT_0;
}
+/*
+ * RFC 3168 9.1.1
+ * The full-functionality option for ECN encapsulation is to copy the
+ * ECN codepoint of the inside header to the outside header on
+ * encapsulation if the inside header is not-ECT or ECT, and to set the
+ * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of
+ * the inside header is CE.
+ */
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
outer &= ~INET_ECN_MASK;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f1a770977c4..180231c5bbb 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -126,7 +126,8 @@ struct inet_timewait_sock {
/* And these are ours. */
unsigned int tw_ipv6only : 1,
tw_transparent : 1,
- tw_pad : 14, /* 14 bits hole */
+ tw_pad : 6, /* 6 bits hole */
+ tw_tos : 8,
tw_ipv6_offset : 16;
kmemcheck_bitfield_end(flags);
unsigned long tw_ttd;
diff --git a/include/net/ip.h b/include/net/ip.h
index aa76c7a4d9c..eca0ef7a495 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -165,6 +165,7 @@ struct ip_reply_arg {
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
/* -1 if not needed */
int bound_dev_if;
+ u8 tos;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
@@ -175,7 +176,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
}
void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- struct ip_reply_arg *arg, unsigned int len);
+ const struct ip_reply_arg *arg, unsigned int len);
struct ipv4_config {
int log_martians;
@@ -406,9 +407,18 @@ enum ip_defrag_users {
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD,
IP_DEFRAG_AF_PACKET,
+ IP_DEFRAG_MACVLAN,
};
int ip_defrag(struct sk_buff *skb, u32 user);
+#ifdef CONFIG_INET
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+#else
+static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+ return skb;
+}
+#endif
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1aaf915656f..8fa4430f99c 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -900,6 +900,7 @@ struct netns_ipvs {
volatile int sync_state;
volatile int master_syncid;
volatile int backup_syncid;
+ struct mutex sync_mutex;
/* multicast interface name */
char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f82a1e87737..f2419cf44ce 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/poll.h>
#include <linux/socket.h>
+#include <net/iucv/iucv.h>
#ifndef AF_IUCV
#define AF_IUCV 32
@@ -33,6 +34,7 @@ enum {
};
#define IUCV_QUEUELEN_DEFAULT 65535
+#define IUCV_HIPER_MSGLIM_DEFAULT 128
#define IUCV_CONN_TIMEOUT (HZ * 40)
#define IUCV_DISCONN_TIMEOUT (HZ * 2)
#define IUCV_CONN_IDLE_TIMEOUT (HZ * 60)
@@ -57,8 +59,51 @@ struct sock_msg_q {
spinlock_t lock;
};
+#define AF_IUCV_FLAG_ACK 0x1
+#define AF_IUCV_FLAG_SYN 0x2
+#define AF_IUCV_FLAG_FIN 0x4
+#define AF_IUCV_FLAG_WIN 0x8
+
+struct af_iucv_trans_hdr {
+ u16 magic;
+ u8 version;
+ u8 flags;
+ u16 window;
+ char destNodeID[8];
+ char destUserID[8];
+ char destAppName[16];
+ char srcNodeID[8];
+ char srcUserID[8];
+ char srcAppName[16]; /* => 70 bytes */
+ struct iucv_message iucv_hdr; /* => 33 bytes */
+ u8 pad; /* total 104 bytes */
+} __packed;
+
+enum iucv_tx_notify {
+ /* transmission of skb is completed and was successful */
+ TX_NOTIFY_OK = 0,
+ /* target is unreachable */
+ TX_NOTIFY_UNREACHABLE = 1,
+ /* transfer pending queue full */
+ TX_NOTIFY_TPQFULL = 2,
+ /* general error */
+ TX_NOTIFY_GENERALERROR = 3,
+ /* transmission of skb is pending - may interleave
+ * with TX_NOTIFY_DELAYED_* */
+ TX_NOTIFY_PENDING = 4,
+ /* transmission of skb was done successfully (delayed) */
+ TX_NOTIFY_DELAYED_OK = 5,
+ /* target unreachable (detected delayed) */
+ TX_NOTIFY_DELAYED_UNREACHABLE = 6,
+ /* general error (detected delayed) */
+ TX_NOTIFY_DELAYED_GENERALERROR = 7,
+};
+
#define iucv_sk(__sk) ((struct iucv_sock *) __sk)
+#define AF_IUCV_TRANS_IUCV 0
+#define AF_IUCV_TRANS_HIPER 1
+
struct iucv_sock {
struct sock sk;
char src_user_id[8];
@@ -75,6 +120,13 @@ struct iucv_sock {
unsigned int send_tag;
u8 flags;
u16 msglimit;
+ u16 msglimit_peer;
+ atomic_t msg_sent;
+ atomic_t msg_recv;
+ atomic_t pendings;
+ int transport;
+ void (*sk_txnotify)(struct sk_buff *skb,
+ enum iucv_tx_notify n);
};
/* iucv socket options (SOL_IUCV) */
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 1121baa9f69..0894ced3195 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -120,7 +120,7 @@ struct iucv_message {
u32 reply_size;
u8 rmmsg[8];
u8 flags;
-};
+} __packed;
/*
* struct iucv_handler
@@ -459,3 +459,37 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size,
void *answer, size_t asize, size_t *residual);
+
+struct iucv_interface {
+ int (*message_receive)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual);
+ int (*__message_receive)(struct iucv_path *path,
+ struct iucv_message *msg, u8 flags, void *buffer, size_t size,
+ size_t *residual);
+ int (*message_reply)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *reply, size_t size);
+ int (*message_reject)(struct iucv_path *path, struct iucv_message *msg);
+ int (*message_send)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+ int (*__message_send)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+ int (*message_send2way)(struct iucv_path *path,
+ struct iucv_message *msg, u8 flags, u32 srccls, void *buffer,
+ size_t size, void *answer, size_t asize, size_t *residual);
+ int (*message_purge)(struct iucv_path *path, struct iucv_message *msg,
+ u32 srccls);
+ int (*path_accept)(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userdata[16], void *private);
+ int (*path_connect)(struct iucv_path *path,
+ struct iucv_handler *handler,
+ u8 userid[8], u8 system[8], u8 userdata[16], void *private);
+ int (*path_quiesce)(struct iucv_path *path, u8 userdata[16]);
+ int (*path_resume)(struct iucv_path *path, u8 userdata[16]);
+ int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
+ int (*iucv_register)(struct iucv_handler *handler, int smp);
+ void (*iucv_unregister)(struct iucv_handler *handler, int smp);
+ struct bus_type *bus;
+ struct device *root;
+};
+
+extern struct iucv_interface iucv_if;
diff --git a/include/net/lapb.h b/include/net/lapb.h
index 96cb5ddaa9f..fd2bf572ee1 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -95,7 +95,7 @@ struct lapb_cb {
struct sk_buff_head write_queue;
struct sk_buff_head ack_queue;
unsigned char window;
- struct lapb_register_struct callbacks;
+ const struct lapb_register_struct *callbacks;
/* FRMR control information */
struct lapb_frame frmr_data;
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
index b95bbb083ee..2ec896bb72b 100644
--- a/include/net/lib80211.h
+++ b/include/net/lib80211.h
@@ -117,10 +117,7 @@ void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops);
int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
-void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *, int);
-void lib80211_crypt_deinit_handler(unsigned long);
void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt);
-void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
#endif /* LIB80211_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 9259e97864d..dc1123aa818 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
-#include <linux/wireless.h>
#include <linux/device.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -110,6 +109,7 @@ enum ieee80211_ac_numbers {
IEEE80211_AC_BE = 2,
IEEE80211_AC_BK = 3,
};
+#define IEEE80211_NUM_ACS 4
/**
* struct ieee80211_tx_queue_params - transmit queue configuration
@@ -165,13 +165,14 @@ struct ieee80211_low_level_stats {
* @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note
* that it is only ever disabled for station mode.
* @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
+ * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
BSS_CHANGED_ERP_CTS_PROT = 1<<1,
BSS_CHANGED_ERP_PREAMBLE = 1<<2,
BSS_CHANGED_ERP_SLOT = 1<<3,
- BSS_CHANGED_HT = 1<<4,
+ BSS_CHANGED_HT = 1<<4,
BSS_CHANGED_BASIC_RATES = 1<<5,
BSS_CHANGED_BEACON_INT = 1<<6,
BSS_CHANGED_BSSID = 1<<7,
@@ -182,6 +183,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_ARP_FILTER = 1<<12,
BSS_CHANGED_QOS = 1<<13,
BSS_CHANGED_IDLE = 1<<14,
+ BSS_CHANGED_SSID = 1<<15,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -255,6 +257,9 @@ enum ieee80211_rssi_event {
* @idle: This interface is idle. There's also a global idle flag in the
* hardware config which may be more appropriate depending on what
* your driver/device needs to do.
+ * @ssid: The SSID of the current vif. Only valid in AP-mode.
+ * @ssid_len: Length of SSID given in @ssid.
+ * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -281,6 +286,9 @@ struct ieee80211_bss_conf {
bool arp_filter_enabled;
bool qos;
bool idle;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ bool hidden_ssid;
};
/**
@@ -331,9 +339,9 @@ struct ieee80211_bss_conf {
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
* used to indicate frame should not be encrypted
- * @IEEE80211_TX_CTL_PSPOLL_RESPONSE: (internal?)
- * This frame is a response to a PS-poll frame and should be sent
- * although the station is in powersave mode.
+ * @IEEE80211_TX_CTL_POLL_RESPONSE: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD) and should be sent although the station
+ * is in powersave mode.
* @IEEE80211_TX_CTL_MORE_FRAMES: More frames will be passed to the
* transmit function after the current frame, this can be used
* by drivers to kick the DMA queue only if unset or when the
@@ -341,8 +349,6 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted
* after TX status because the destination was asleep, it must not
* be modified again (no seqno assignment, crypto, etc.)
- * @IEEE80211_TX_INTFL_HAS_RADIOTAP: This frame was injected and still
- * has a radiotap header at skb->data.
* @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211
* MLME command (internal to mac80211 to figure out whether to send TX
* status to user space)
@@ -356,6 +362,20 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
* testing. It will be sent out with incorrect Michael MIC key to allow
* TKIP countermeasures to be tested.
+ * @IEEE80211_TX_CTL_NO_CCK_RATE: This frame will be sent at non CCK rate.
+ * This flag is actually used for management frame especially for P2P
+ * frames not being sent at CCK rate in 2GHz band.
+ * @IEEE80211_TX_STATUS_EOSP: This packet marks the end of service period,
+ * when its status is reported the service period ends. For frames in
+ * an SP that mac80211 transmits, it is already set; for driver frames
+ * the driver may set this flag. It is also used to do the same for
+ * PS-Poll responses.
+ * @IEEE80211_TX_CTL_USE_MINRATE: This frame will be sent at lowest rate.
+ * This flag is used to send nullfunc frame at minimum rate when
+ * the nullfunc is used for connection monitoring purpose.
+ * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it
+ * would be fragmented by size (this is optional, only used for
+ * monitor injection).
*
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -377,15 +397,19 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
- IEEE80211_TX_CTL_PSPOLL_RESPONSE = BIT(17),
+ IEEE80211_TX_CTL_POLL_RESPONSE = BIT(17),
IEEE80211_TX_CTL_MORE_FRAMES = BIT(18),
IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19),
- IEEE80211_TX_INTFL_HAS_RADIOTAP = BIT(20),
+ /* hole at 20, use later */
IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21),
IEEE80211_TX_CTL_LDPC = BIT(22),
IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25),
IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26),
+ IEEE80211_TX_CTL_NO_CCK_RATE = BIT(27),
+ IEEE80211_TX_STATUS_EOSP = BIT(28),
+ IEEE80211_TX_CTL_USE_MINRATE = BIT(29),
+ IEEE80211_TX_CTL_DONTFRAG = BIT(30),
};
#define IEEE80211_TX_CTL_STBC_SHIFT 23
@@ -399,9 +423,9 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \
IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \
IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \
- IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_PSPOLL_RESPONSE | \
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_POLL_RESPONSE | \
IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \
- IEEE80211_TX_CTL_STBC)
+ IEEE80211_TX_CTL_STBC | IEEE80211_TX_STATUS_EOSP)
/**
* enum mac80211_rate_control_flags - per-rate flags set by the
@@ -948,6 +972,9 @@ enum set_key_cmd {
* @wme: indicates whether the STA supports WME. Only valid during AP-mode.
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
+ * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
+ * if wme is supported.
+ * @max_sp: max Service Period. Only valid if wme is supported.
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -955,6 +982,8 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
bool wme;
+ u8 uapsd_queues;
+ u8 max_sp;
/* must be last */
u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
@@ -1095,6 +1124,10 @@ enum sta_notify_cmd {
* stations based on the PM bit of incoming frames.
* Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
* the PS mode of connected stations.
+ *
+ * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session
+ * setup strictly in HW. mac80211 should not attempt to do this in
+ * software.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1120,6 +1153,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20,
IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
IEEE80211_HW_AP_LINK_PS = 1<<22,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
};
/**
@@ -1511,6 +1545,95 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
*/
/**
+ * DOC: AP support for powersaving clients
+ *
+ * In order to implement AP and P2P GO modes, mac80211 has support for
+ * client powersaving, both "legacy" PS (PS-Poll/null data) and uAPSD.
+ * There currently is no support for sAPSD.
+ *
+ * There is one assumption that mac80211 makes, namely that a client
+ * will not poll with PS-Poll and trigger with uAPSD at the same time.
+ * Both are supported, and both can be used by the same client, but
+ * they can't be used concurrently by the same client. This simplifies
+ * the driver code.
+ *
+ * The first thing to keep in mind is that there is a flag for complete
+ * driver implementation: %IEEE80211_HW_AP_LINK_PS. If this flag is set,
+ * mac80211 expects the driver to handle most of the state machine for
+ * powersaving clients and will ignore the PM bit in incoming frames.
+ * Drivers then use ieee80211_sta_ps_transition() to inform mac80211 of
+ * stations' powersave transitions. In this mode, mac80211 also doesn't
+ * handle PS-Poll/uAPSD.
+ *
+ * In the mode without %IEEE80211_HW_AP_LINK_PS, mac80211 will check the
+ * PM bit in incoming frames for client powersave transitions. When a
+ * station goes to sleep, we will stop transmitting to it. There is,
+ * however, a race condition: a station might go to sleep while there is
+ * data buffered on hardware queues. If the device has support for this
+ * it will reject frames, and the driver should give the frames back to
+ * mac80211 with the %IEEE80211_TX_STAT_TX_FILTERED flag set which will
+ * cause mac80211 to retry the frame when the station wakes up. The
+ * driver is also notified of powersave transitions by calling its
+ * @sta_notify callback.
+ *
+ * When the station is asleep, it has three choices: it can wake up,
+ * it can PS-Poll, or it can possibly start a uAPSD service period.
+ * Waking up is implemented by simply transmitting all buffered (and
+ * filtered) frames to the station. This is the easiest case. When
+ * the station sends a PS-Poll or a uAPSD trigger frame, mac80211
+ * will inform the driver of this with the @allow_buffered_frames
+ * callback; this callback is optional. mac80211 will then transmit
+ * the frames as usual and set the %IEEE80211_TX_CTL_POLL_RESPONSE
+ * on each frame. The last frame in the service period (or the only
+ * response to a PS-Poll) also has %IEEE80211_TX_STATUS_EOSP set to
+ * indicate that it ends the service period; as this frame must have
+ * TX status report it also sets %IEEE80211_TX_CTL_REQ_TX_STATUS.
+ * When TX status is reported for this frame, the service period is
+ * marked has having ended and a new one can be started by the peer.
+ *
+ * Another race condition can happen on some devices like iwlwifi
+ * when there are frames queued for the station and it wakes up
+ * or polls; the frames that are already queued could end up being
+ * transmitted first instead, causing reordering and/or wrong
+ * processing of the EOSP. The cause is that allowing frames to be
+ * transmitted to a certain station is out-of-band communication to
+ * the device. To allow this problem to be solved, the driver can
+ * call ieee80211_sta_block_awake() if frames are buffered when it
+ * is notified that the station went to sleep. When all these frames
+ * have been filtered (see above), it must call the function again
+ * to indicate that the station is no longer blocked.
+ *
+ * If the driver buffers frames in the driver for aggregation in any
+ * way, it must use the ieee80211_sta_set_buffered() call when it is
+ * notified of the station going to sleep to inform mac80211 of any
+ * TIDs that have frames buffered. Note that when a station wakes up
+ * this information is reset (hence the requirement to call it when
+ * informed of the station going to sleep). Then, when a service
+ * period starts for any reason, @release_buffered_frames is called
+ * with the number of frames to be released and which TIDs they are
+ * to come from. In this case, the driver is responsible for setting
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
+ * to help the @more_data paramter is passed to tell the driver if
+ * there is more data on other TIDs -- the TIDs to release frames
+ * from are ignored since mac80211 doesn't know how many frames the
+ * buffers for those TIDs contain.
+ *
+ * If the driver also implement GO mode, where absence periods may
+ * shorten service periods (or abort PS-Poll responses), it must
+ * filter those response frames except in the case of frames that
+ * are buffered in the driver -- those must remain buffered to avoid
+ * reordering. Because it is possible that no frames are released
+ * in this case, the driver must call ieee80211_sta_eosp_irqsafe()
+ * to indicate to mac80211 that the service period ended anyway.
+ *
+ * Finally, if frames from multiple TIDs are released from mac80211
+ * but the driver might reorder them, it must clear & set the flags
+ * appropriately (only the last frame may have %IEEE80211_TX_STATUS_EOSP)
+ * and also take care of the EOSP and MORE_DATA bits in the frame.
+ * The driver may also use ieee80211_sta_eosp_irqsafe() in this case.
+ */
+
+/**
* enum ieee80211_filter_flags - hardware filter flags
*
* These flags determine what the filter in hardware should be
@@ -1600,6 +1723,17 @@ enum ieee80211_tx_sync_type {
};
/**
+ * enum ieee80211_frame_release_type - frame release reason
+ * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
+ * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to
+ * frame received on trigger-enabled AC
+ */
+enum ieee80211_frame_release_type {
+ IEEE80211_FRAME_RELEASE_PSPOLL,
+ IEEE80211_FRAME_RELEASE_UAPSD,
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -1896,11 +2030,6 @@ enum ieee80211_tx_sync_type {
* ieee80211_remain_on_channel_expired(). This callback may sleep.
* @cancel_remain_on_channel: Requests that an ongoing off-channel period is
* aborted before it expires. This callback may sleep.
- * @offchannel_tx: Transmit frame on another channel, wait for a response
- * and return. Reliable TX status must be reported for the frame. If the
- * return value is 1, then the @remain_on_channel will be used with a
- * regular transmission (if supported.)
- * @offchannel_tx_cancel_wait: cancel wait associated with offchannel TX
*
* @set_ringparam: Set tx and rx ring sizes.
*
@@ -1914,6 +2043,45 @@ enum ieee80211_tx_sync_type {
* The callback can sleep.
* @rssi_callback: Notify driver when the average RSSI goes above/below
* thresholds that were registered previously. The callback can sleep.
+ *
+ * @release_buffered_frames: Release buffered frames according to the given
+ * parameters. In the case where the driver buffers some frames for
+ * sleeping stations mac80211 will use this callback to tell the driver
+ * to release some frames, either for PS-poll or uAPSD.
+ * Note that if the @more_data paramter is %false the driver must check
+ * if there are more frames on the given TIDs, and if there are more than
+ * the frames being released then it must still set the more-data bit in
+ * the frame. If the @more_data parameter is %true, then of course the
+ * more-data bit must always be set.
+ * The @tids parameter tells the driver which TIDs to release frames
+ * from, for PS-poll it will always have only a single bit set.
+ * In the case this is used for a PS-poll initiated release, the
+ * @num_frames parameter will always be 1 so code can be shared. In
+ * this case the driver must also set %IEEE80211_TX_STATUS_EOSP flag
+ * on the TX status (and must report TX status) so that the PS-poll
+ * period is properly ended. This is used to avoid sending multiple
+ * responses for a retried PS-poll frame.
+ * In the case this is used for uAPSD, the @num_frames parameter may be
+ * bigger than one, but the driver may send fewer frames (it must send
+ * at least one, however). In this case it is also responsible for
+ * setting the EOSP flag in the QoS header of the frames. Also, when the
+ * service period ends, the driver must set %IEEE80211_TX_STATUS_EOSP
+ * on the last frame in the SP. Alternatively, it may call the function
+ * ieee80211_sta_eosp_irqsafe() to inform mac80211 of the end of the SP.
+ * This callback must be atomic.
+ * @allow_buffered_frames: Prepare device to allow the given number of frames
+ * to go out to the given station. The frames will be sent by mac80211
+ * via the usual TX path after this call. The TX information for frames
+ * released will also have the %IEEE80211_TX_CTL_POLL_RESPONSE flag set
+ * and the last one will also have %IEEE80211_TX_STATUS_EOSP set. In case
+ * frames from multiple TIDs are released and the driver might reorder
+ * them between the TIDs, it must set the %IEEE80211_TX_STATUS_EOSP flag
+ * on the last frame and clear it on all others and also handle the EOSP
+ * bit in the QoS header correctly. Alternatively, it can also call the
+ * ieee80211_sta_eosp_irqsafe() function.
+ * The @tids parameter is a bitmap and tells the driver which TIDs the
+ * frames will be on; it will at most have two bits set.
+ * This callback must be atomic.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -1986,11 +2154,13 @@ struct ieee80211_ops {
struct ieee80211_sta *sta);
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
- int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
+ int (*conf_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
- u64 (*get_tsf)(struct ieee80211_hw *hw);
- void (*set_tsf)(struct ieee80211_hw *hw, u64 tsf);
- void (*reset_tsf)(struct ieee80211_hw *hw);
+ u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf);
+ void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2019,11 +2189,6 @@ struct ieee80211_ops {
enum nl80211_channel_type channel_type,
int duration);
int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
- int (*offchannel_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int wait);
- int (*offchannel_tx_cancel_wait)(struct ieee80211_hw *hw);
int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
void (*get_ringparam)(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
@@ -2032,6 +2197,17 @@ struct ieee80211_ops {
const struct cfg80211_bitrate_mask *mask);
void (*rssi_callback)(struct ieee80211_hw *hw,
enum ieee80211_rssi_event rssi_event);
+
+ void (*allow_buffered_frames)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+ void (*release_buffered_frames)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
};
/**
@@ -2346,20 +2522,38 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
*/
-#define IEEE80211_TX_STATUS_HEADROOM 13
+#define IEEE80211_TX_STATUS_HEADROOM 14
/**
- * ieee80211_sta_set_tim - set the TIM bit for a sleeping station
+ * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
* @sta: &struct ieee80211_sta pointer for the sleeping station
+ * @tid: the TID that has buffered frames
+ * @buffered: indicates whether or not frames are buffered for this TID
*
* If a driver buffers frames for a powersave station instead of passing
- * them back to mac80211 for retransmission, the station needs to be told
- * to wake up using the TIM bitmap in the beacon.
+ * them back to mac80211 for retransmission, the station may still need
+ * to be told that there are buffered frames via the TIM bit.
*
- * This function sets the station's TIM bit - it will be cleared when the
- * station wakes up.
+ * This function informs mac80211 whether or not there are frames that are
+ * buffered in the driver for a given TID; mac80211 can then use this data
+ * to set the TIM bit (NOTE: This may call back into the driver's set_tim
+ * call! Beware of the locking!)
+ *
+ * If all frames are released to the station (due to PS-poll or uAPSD)
+ * then the driver needs to inform mac80211 that there no longer are
+ * frames buffered. However, when the station wakes up mac80211 assumes
+ * that all buffered frames will be transmitted and clears this data,
+ * drivers need to make sure they inform mac80211 about all buffered
+ * frames on the sleep transition (sta_notify() with %STA_NOTIFY_SLEEP).
+ *
+ * Note that technically mac80211 only needs to know this per AC, not per
+ * TID, but since driver buffering will inevitably happen per TID (since
+ * it is related to aggregation) it is easier to make mac80211 map the
+ * TID to the AC as required instead of keeping track in all drivers that
+ * use this API.
*/
-void ieee80211_sta_set_tim(struct ieee80211_sta *sta);
+void ieee80211_sta_set_buffered(struct ieee80211_sta *sta,
+ u8 tid, bool buffered);
/**
* ieee80211_tx_status - transmit status callback
@@ -3017,6 +3211,24 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta, bool block);
/**
+ * ieee80211_sta_eosp - notify mac80211 about end of SP
+ * @pubsta: the station
+ *
+ * When a device transmits frames in a way that it can't tell
+ * mac80211 in the TX status about the EOSP, it must clear the
+ * %IEEE80211_TX_STATUS_EOSP bit and call this function instead.
+ * This applies for PS-Poll as well as uAPSD.
+ *
+ * Note that there is no non-_irqsafe version right now as
+ * it wasn't needed, but just like _tx_status() and _rx()
+ * must not be mixed in irqsafe/non-irqsafe versions, this
+ * function must not be mixed with those either. Use the
+ * all irqsafe, or all non-irqsafe, don't mix! If you need
+ * the non-irqsafe version of this, you need to add it.
+ */
+void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta);
+
+/**
* ieee80211_iter_keys - iterate keys programmed into the device
* @hw: pointer obtained from ieee80211_alloc_hw()
* @vif: virtual interface to iterate, may be %NULL for all
@@ -3229,6 +3441,19 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw);
void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
const u8 *addr);
+/**
+ * ieee80211_send_bar - send a BlockAckReq frame
+ *
+ * can be used to flush pending frames from the peer's aggregation reorder
+ * buffer.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ra: the peer's destination address
+ * @tid: the TID of the aggregation session
+ * @ssn: the new starting sequence number for the receiver
+ */
+void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
+
/* Rate control API */
/**
@@ -3419,4 +3644,9 @@ void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
int rssi_max_thold);
void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
+
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb);
+
+int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
+ struct sk_buff *skb);
#endif /* MAC80211_H */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
new file mode 100644
index 00000000000..39b85bc0804
--- /dev/null
+++ b/include/net/nfc/nci.h
@@ -0,0 +1,313 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci.h, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __NCI_H
+#define __NCI_H
+
+/* NCI constants */
+#define NCI_MAX_NUM_MAPPING_CONFIGS 10
+#define NCI_MAX_NUM_RF_CONFIGS 10
+#define NCI_MAX_NUM_CONN 10
+
+/* NCI Status Codes */
+#define NCI_STATUS_OK 0x00
+#define NCI_STATUS_REJECTED 0x01
+#define NCI_STATUS_MESSAGE_CORRUPTED 0x02
+#define NCI_STATUS_BUFFER_FULL 0x03
+#define NCI_STATUS_FAILED 0x04
+#define NCI_STATUS_NOT_INITIALIZED 0x05
+#define NCI_STATUS_SYNTAX_ERROR 0x06
+#define NCI_STATUS_SEMANTIC_ERROR 0x07
+#define NCI_STATUS_UNKNOWN_GID 0x08
+#define NCI_STATUS_UNKNOWN_OID 0x09
+#define NCI_STATUS_INVALID_PARAM 0x0a
+#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0b
+/* Discovery Specific Status Codes */
+#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
+#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
+/* RF Interface Specific Status Codes */
+#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
+#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
+#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
+#define NCI_STATUS_RF_LINK_LOSS_ERROR 0xb3
+/* NFCEE Interface Specific Status Codes */
+#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0
+#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1
+#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2
+#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3
+#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4
+
+/* NCI RF Technology and Mode */
+#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
+#define NCI_NFC_B_PASSIVE_POLL_MODE 0x01
+#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
+#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
+#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
+#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
+#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
+#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
+#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
+#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
+
+/* NCI RF Protocols */
+#define NCI_RF_PROTOCOL_UNKNOWN 0x00
+#define NCI_RF_PROTOCOL_T1T 0x01
+#define NCI_RF_PROTOCOL_T2T 0x02
+#define NCI_RF_PROTOCOL_T3T 0x03
+#define NCI_RF_PROTOCOL_ISO_DEP 0x04
+#define NCI_RF_PROTOCOL_NFC_DEP 0x05
+
+/* NCI RF Interfaces */
+#define NCI_RF_INTERFACE_RFU 0x00
+#define NCI_RF_INTERFACE_FRAME 0x01
+#define NCI_RF_INTERFACE_ISO_DEP 0x02
+#define NCI_RF_INTERFACE_NFC_DEP 0x03
+
+/* NCI RF_DISCOVER_MAP_CMD modes */
+#define NCI_DISC_MAP_MODE_POLL 0x01
+#define NCI_DISC_MAP_MODE_LISTEN 0x02
+#define NCI_DISC_MAP_MODE_BOTH 0x03
+
+/* NCI Discovery Types */
+#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00
+#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01
+#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02
+#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03
+#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05
+#define NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE 0x06
+#define NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE 0x07
+#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09
+#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80
+#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81
+#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82
+#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83
+#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85
+
+/* NCI Deactivation Type */
+#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
+#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
+#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
+#define NCI_DEACTIVATE_TYPE_RF_LINK_LOSS 0x03
+#define NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR 0x04
+
+/* Message Type (MT) */
+#define NCI_MT_DATA_PKT 0x00
+#define NCI_MT_CMD_PKT 0x01
+#define NCI_MT_RSP_PKT 0x02
+#define NCI_MT_NTF_PKT 0x03
+
+#define nci_mt(hdr) (((hdr)[0]>>5)&0x07)
+#define nci_mt_set(hdr, mt) ((hdr)[0] |= (__u8)(((mt)&0x07)<<5))
+
+/* Packet Boundary Flag (PBF) */
+#define NCI_PBF_LAST 0x00
+#define NCI_PBF_CONT 0x01
+
+#define nci_pbf(hdr) (__u8)(((hdr)[0]>>4)&0x01)
+#define nci_pbf_set(hdr, pbf) ((hdr)[0] |= (__u8)(((pbf)&0x01)<<4))
+
+/* Control Opcode manipulation */
+#define nci_opcode_pack(gid, oid) (__u16)((((__u16)((gid)&0x0f))<<8)|\
+ ((__u16)((oid)&0x3f)))
+#define nci_opcode(hdr) nci_opcode_pack(hdr[0], hdr[1])
+#define nci_opcode_gid(op) (__u8)(((op)&0x0f00)>>8)
+#define nci_opcode_oid(op) (__u8)((op)&0x003f)
+
+/* Payload Length */
+#define nci_plen(hdr) (__u8)((hdr)[2])
+
+/* Connection ID */
+#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
+
+/* GID values */
+#define NCI_GID_CORE 0x0
+#define NCI_GID_RF_MGMT 0x1
+#define NCI_GID_NFCEE_MGMT 0x2
+#define NCI_GID_PROPRIETARY 0xf
+
+/* ---- NCI Packet structures ---- */
+#define NCI_CTRL_HDR_SIZE 3
+#define NCI_DATA_HDR_SIZE 3
+
+struct nci_ctrl_hdr {
+ __u8 gid; /* MT & PBF & GID */
+ __u8 oid;
+ __u8 plen;
+} __packed;
+
+struct nci_data_hdr {
+ __u8 conn_id; /* MT & PBF & ConnID */
+ __u8 rfu;
+ __u8 plen;
+} __packed;
+
+/* ------------------------ */
+/* ----- NCI Commands ---- */
+/* ------------------------ */
+#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00)
+
+#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
+
+#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
+
+#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
+struct nci_core_conn_create_cmd {
+ __u8 target_handle;
+ __u8 num_target_specific_params;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x06)
+
+#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
+struct disc_map_config {
+ __u8 rf_protocol;
+ __u8 mode;
+ __u8 rf_interface_type;
+} __packed;
+
+struct nci_rf_disc_map_cmd {
+ __u8 num_mapping_configs;
+ struct disc_map_config mapping_configs
+ [NCI_MAX_NUM_MAPPING_CONFIGS];
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+struct disc_config {
+ __u8 type;
+ __u8 frequency;
+} __packed;
+
+struct nci_rf_disc_cmd {
+ __u8 num_disc_configs;
+ struct disc_config disc_configs[NCI_MAX_NUM_RF_CONFIGS];
+} __packed;
+
+#define NCI_OP_RF_DEACTIVATE_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_cmd {
+ __u8 type;
+} __packed;
+
+/* ----------------------- */
+/* ---- NCI Responses ---- */
+/* ----------------------- */
+#define NCI_OP_CORE_RESET_RSP nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_rsp {
+ __u8 status;
+ __u8 nci_ver;
+} __packed;
+
+#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01)
+struct nci_core_init_rsp_1 {
+ __u8 status;
+ __le32 nfcc_features;
+ __u8 num_supported_rf_interfaces;
+ __u8 supported_rf_interfaces[0]; /* variable size array */
+ /* continuted in nci_core_init_rsp_2 */
+} __packed;
+
+struct nci_core_init_rsp_2 {
+ __u8 max_logical_connections;
+ __le16 max_routing_table_size;
+ __u8 max_control_packet_payload_length;
+ __le16 rf_sending_buffer_size;
+ __le16 rf_receiving_buffer_size;
+ __le16 manufacturer_id;
+} __packed;
+
+#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
+
+#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
+struct nci_core_conn_create_rsp {
+ __u8 status;
+ __u8 max_pkt_payload_size;
+ __u8 initial_num_credits;
+ __u8 conn_id;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x06)
+
+#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
+
+#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+
+#define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+
+/* --------------------------- */
+/* ---- NCI Notifications ---- */
+/* --------------------------- */
+#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x07)
+struct conn_credit_entry {
+ __u8 conn_id;
+ __u8 credits;
+} __packed;
+
+struct nci_core_conn_credit_ntf {
+ __u8 num_entries;
+ struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN];
+} __packed;
+
+#define NCI_OP_RF_FIELD_INFO_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
+struct nci_rf_field_info_ntf {
+ __u8 rf_field_status;
+} __packed;
+
+#define NCI_OP_RF_ACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
+struct rf_tech_specific_params_nfca_poll {
+ __u16 sens_res;
+ __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
+ __u8 nfcid1[10];
+ __u8 sel_res_len; /* 0 or 1 Bytes */
+ __u8 sel_res;
+} __packed;
+
+struct activation_params_nfca_poll_iso_dep {
+ __u8 rats_res_len;
+ __u8 rats_res[20];
+};
+
+struct nci_rf_activate_ntf {
+ __u8 target_handle;
+ __u8 rf_protocol;
+ __u8 rf_tech_and_mode;
+ __u8 rf_tech_specific_params_len;
+
+ union {
+ struct rf_tech_specific_params_nfca_poll nfca_poll;
+ } rf_tech_specific_params;
+
+ __u8 rf_interface_type;
+ __u8 activation_params_len;
+
+ union {
+ struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
+ } activation_params;
+
+} __packed;
+
+#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+
+#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
new file mode 100644
index 00000000000..b8b4bbd7e0f
--- /dev/null
+++ b/include/net/nfc/nci_core.h
@@ -0,0 +1,184 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.h, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __NCI_CORE_H
+#define __NCI_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/nci.h>
+
+/* NCI device state */
+enum {
+ NCI_INIT,
+ NCI_UP,
+ NCI_DISCOVERY,
+ NCI_POLL_ACTIVE,
+ NCI_DATA_EXCHANGE,
+};
+
+/* NCI timeouts */
+#define NCI_RESET_TIMEOUT 5000
+#define NCI_INIT_TIMEOUT 5000
+#define NCI_RF_DISC_TIMEOUT 5000
+#define NCI_RF_DEACTIVATE_TIMEOUT 5000
+#define NCI_CMD_TIMEOUT 5000
+
+struct nci_dev;
+
+struct nci_ops {
+ int (*open)(struct nci_dev *ndev);
+ int (*close)(struct nci_dev *ndev);
+ int (*send)(struct sk_buff *skb);
+};
+
+#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
+
+/* NCI Core structures */
+struct nci_dev {
+ struct nfc_dev *nfc_dev;
+ struct nci_ops *ops;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ unsigned long flags;
+
+ atomic_t cmd_cnt;
+ atomic_t credits_cnt;
+
+ struct timer_list cmd_timer;
+
+ struct workqueue_struct *cmd_wq;
+ struct work_struct cmd_work;
+
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_work;
+
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
+
+ struct sk_buff_head cmd_q;
+ struct sk_buff_head rx_q;
+ struct sk_buff_head tx_q;
+
+ struct mutex req_lock;
+ struct completion req_completion;
+ __u32 req_status;
+ __u32 req_result;
+
+ void *driver_data;
+
+ __u32 poll_prots;
+ __u32 target_available_prots;
+ __u32 target_active_prot;
+
+ /* received during NCI_OP_CORE_RESET_RSP */
+ __u8 nci_ver;
+
+ /* received during NCI_OP_CORE_INIT_RSP */
+ __u32 nfcc_features;
+ __u8 num_supported_rf_interfaces;
+ __u8 supported_rf_interfaces
+ [NCI_MAX_SUPPORTED_RF_INTERFACES];
+ __u8 max_logical_connections;
+ __u16 max_routing_table_size;
+ __u8 max_control_packet_payload_length;
+ __u16 rf_sending_buffer_size;
+ __u16 rf_receiving_buffer_size;
+ __u16 manufacturer_id;
+
+ /* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */
+ __u8 max_pkt_payload_size;
+ __u8 initial_num_credits;
+ __u8 conn_id;
+
+ /* stored during nci_data_exchange */
+ data_exchange_cb_t data_exchange_cb;
+ void *data_exchange_cb_context;
+ struct sk_buff *rx_data_reassembly;
+};
+
+/* ----- NCI Devices ----- */
+struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
+void nci_free_device(struct nci_dev *ndev);
+int nci_register_device(struct nci_dev *ndev);
+void nci_unregister_device(struct nci_dev *ndev);
+int nci_recv_frame(struct sk_buff *skb);
+
+static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
+ unsigned int len,
+ gfp_t how)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + ndev->tx_headroom + ndev->tx_tailroom, how);
+ if (skb)
+ skb_reserve(skb, ndev->tx_headroom);
+
+ return skb;
+}
+
+static inline void nci_set_parent_dev(struct nci_dev *ndev, struct device *dev)
+{
+ nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nci_set_drvdata(struct nci_dev *ndev, void *data)
+{
+ ndev->driver_data = data;
+}
+
+static inline void *nci_get_drvdata(struct nci_dev *ndev)
+{
+ return ndev->driver_data;
+}
+
+void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
+void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
+void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
+int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
+void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ int err);
+
+/* ----- NCI requests ----- */
+#define NCI_REQ_DONE 0
+#define NCI_REQ_PEND 1
+#define NCI_REQ_CANCELED 2
+
+void nci_req_complete(struct nci_dev *ndev, int result);
+
+/* ----- NCI status code ----- */
+int nci_to_errno(__u8 code);
+
+#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc.h b/include/net/nfc/nfc.h
index cc0130312f7..6a7f602aa84 100644
--- a/include/net/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -48,6 +48,8 @@ typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
int err);
struct nfc_ops {
+ int (*dev_up)(struct nfc_dev *dev);
+ int (*dev_down)(struct nfc_dev *dev);
int (*start_poll)(struct nfc_dev *dev, u32 protocols);
void (*stop_poll)(struct nfc_dev *dev);
int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
@@ -78,10 +80,15 @@ struct nfc_dev {
int targets_generation;
spinlock_t targets_lock;
struct device dev;
+ bool dev_up;
bool polling;
+ bool remote_activated;
struct nfc_genl_data genl_data;
u32 supported_protocols;
+ int tx_headroom;
+ int tx_tailroom;
+
struct nfc_ops *ops;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -89,7 +96,9 @@ struct nfc_dev {
extern struct class nfc_class;
struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
- u32 supported_protocols);
+ u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
/**
* nfc_free_device - free nfc device
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 356d6e3dc20..eb7d3c2d427 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -3,11 +3,19 @@
/*
* regulatory support structures
*
- * Copyright 2008-2009 Luis R. Rodriguez <lrodriguez@atheros.com>
+ * Copyright 2008-2009 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4fc88f3ccd5..2eb207ea4ea 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -46,14 +46,14 @@ struct qdisc_size_table {
struct Qdisc {
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
- unsigned flags;
+ unsigned int flags;
#define TCQ_F_BUILTIN 1
#define TCQ_F_INGRESS 2
#define TCQ_F_CAN_BYPASS 4
#define TCQ_F_MQROOT 8
#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
- struct Qdisc_ops *ops;
+ const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
@@ -224,7 +224,7 @@ struct qdisc_skb_cb {
long data[];
};
-static inline int qdisc_qlen(struct Qdisc *q)
+static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
@@ -239,12 +239,12 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
return &qdisc->q.lock;
}
-static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc;
}
-static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
}
@@ -260,7 +260,7 @@ static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
* root. This is enforced by holding the RTNL semaphore, which
* all users of this lock accessor must do.
*/
-static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);
@@ -268,7 +268,7 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
@@ -276,17 +276,17 @@ static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
+static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}
-static inline void sch_tree_lock(struct Qdisc *q)
+static inline void sch_tree_lock(const struct Qdisc *q)
{
spin_lock_bh(qdisc_root_sleeping_lock(q));
}
-static inline void sch_tree_unlock(struct Qdisc *q)
+static inline void sch_tree_unlock(const struct Qdisc *q)
{
spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
@@ -319,7 +319,7 @@ static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
}
static inline struct Qdisc_class_common *
-qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
+qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{
struct Qdisc_class_common *cl;
struct hlist_node *n;
@@ -393,7 +393,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
}
/* Are any of the TX qdiscs changing? */
-static inline bool qdisc_tx_changing(struct net_device *dev)
+static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
diff --git a/include/net/scm.h b/include/net/scm.h
index 745460fa2f0..d456f4c71a3 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -49,7 +49,7 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
struct pid *pid, const struct cred *cred)
{
scm->pid = get_pid(pid);
- scm->cred = get_cred(cred);
+ scm->cred = cred ? get_cred(cred) : NULL;
cred_to_ucred(pid, cred, &scm->creds);
}
@@ -73,8 +73,7 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
- scm_set_cred(scm, task_tgid(current), current_cred());
- scm->fp = NULL;
+ memset(scm, 0, sizeof(*scm));
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f7d9c3fc06f..e90e7a9935d 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1915,6 +1915,7 @@ struct sctp_association {
__u32 addip_serial;
union sctp_addr *asconf_addr_del_pending;
int src_out_of_asoc_ok;
+ struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
* keys are provided out of band by the user applicaton
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index d97f6892c01..c2e542b27a5 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -10,7 +10,7 @@ extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport);
extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
diff --git a/include/net/sock.h b/include/net/sock.h
index 8e4062f165b..5ac682f73d6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -686,16 +686,25 @@ static inline void sock_rps_reset_flow(const struct sock *sk)
#endif
}
-static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
+static inline void sock_rps_save_rxhash(struct sock *sk,
+ const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != rxhash)) {
+ if (unlikely(sk->sk_rxhash != skb->rxhash)) {
sock_rps_reset_flow(sk);
- sk->sk_rxhash = rxhash;
+ sk->sk_rxhash = skb->rxhash;
}
#endif
}
+static inline void sock_rps_reset_rxhash(struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_reset_flow(sk);
+ sk->sk_rxhash = 0;
+#endif
+}
+
#define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \
release_sock(__sk); \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index acc620a4a45..e147f42d643 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -18,7 +18,6 @@
#ifndef _TCP_H
#define _TCP_H
-#define TCP_DEBUG 1
#define FASTRETRANS_DEBUG 1
#include <linux/list.h>
@@ -327,9 +326,9 @@ extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
+ const struct tcphdr *th, unsigned int len);
extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
+ const struct tcphdr *th, unsigned int len);
extern void tcp_rcv_space_adjust(struct sock *sk);
extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -356,6 +355,7 @@ static inline void tcp_dec_quickack_mode(struct sock *sk,
#define TCP_ECN_OK 1
#define TCP_ECN_QUEUE_CWR 2
#define TCP_ECN_DEMAND_CWR 4
+#define TCP_ECN_SEEN 8
static __inline__ void
TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
@@ -400,10 +400,10 @@ extern void tcp_set_keepalive(struct sock *sk, int val);
extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_options_received *opt_rx, u8 **hvpp,
+extern void tcp_parse_options(const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx, const u8 **hvpp,
int estab);
-extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
+extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* TCP v4 functions exported for the inet6 API
@@ -449,7 +449,7 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
-extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
+extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
#else
static inline __u32 cookie_v6_init_sequence(struct sock *sk,
@@ -521,7 +521,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-extern void tcp_get_info(struct sock *, struct tcp_info *);
+extern void tcp_get_info(const struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -531,8 +531,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
extern void tcp_initialize_rcv_mss(struct sock *sk);
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
+extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
+extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
extern void tcp_mtup_init(struct sock *sk);
extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
@@ -573,7 +573,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
/* Compute the actual rto_min value */
static inline u32 tcp_rto_min(struct sock *sk)
{
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
@@ -635,13 +635,14 @@ struct tcp_skb_cb {
__u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */
- __u8 flags; /* TCP header flags. */
+ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
__u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
#define TCPCB_LOST 0x04 /* SKB is lost */
#define TCPCB_TAGBITS 0x07 /* All tag bits */
-
+ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+ /* 1 byte hole */
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
@@ -818,6 +819,7 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
static inline __u32 tcp_current_ssthresh(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+
if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
return tp->snd_ssthresh;
else
@@ -830,7 +832,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
+extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as
@@ -859,7 +861,7 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
static inline void tcp_check_probe_timer(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending)
@@ -1182,8 +1184,9 @@ struct tcp_md5sig_pool {
/* - functions */
extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk);
extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
@@ -1200,17 +1203,17 @@ extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr);
#define tcp_twsk_md5_key(twsk) NULL
#endif
-extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
+extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
extern void tcp_put_md5sig_pool(void);
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
+extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
unsigned header_len);
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
- struct tcp_md5sig_key *key);
+ const struct tcp_md5sig_key *key);
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
@@ -1223,22 +1226,24 @@ static inline void tcp_write_queue_purge(struct sock *sk)
tcp_clear_all_retrans_hints(tcp_sk(sk));
}
-static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_next(&sk->sk_write_queue, skb);
}
-static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_prev(&sk->sk_write_queue, skb);
}
@@ -1252,7 +1257,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
-static inline struct sk_buff *tcp_send_head(struct sock *sk)
+static inline struct sk_buff *tcp_send_head(const struct sock *sk)
{
return sk->sk_send_head;
}
@@ -1263,7 +1268,7 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
-static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_is_last(sk, skb))
sk->sk_send_head = NULL;
@@ -1443,9 +1448,9 @@ struct tcp_sock_af_ops {
struct sock *addr_sk);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
int (*md5_add) (struct sock *sk,
struct sock *addr_sk,
u8 *newkey,
@@ -1462,9 +1467,9 @@ struct tcp_request_sock_ops {
struct request_sock *req);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
#endif
};
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 673a024c6b2..5f097ca7d5c 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
+/* Slow-path computation of checksum. Socket is locked. */
+static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
+ const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
+ __wsum csum = 0;
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket
- */
- if (up->pcflag & UDPLITE_SEND_CC) {
+ if (up->pcflag & UDPLITE_SEND_CC) {
+ /*
+ * Sender has set `partial coverage' option on UDP-Lite socket.
+ * The special case "up->pcslen == 0" signifies full coverage.
+ */
if (up->pcslen < up->len) {
- /* up->pcslen == 0 means that full coverage is required,
- * partial coverage only if 0 < up->pcslen < up->len */
- if (0 < up->pcslen) {
- cscov = up->pcslen;
- }
- uh->len = htons(up->pcslen);
+ if (0 < up->pcslen)
+ cscov = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
}
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
+ /*
+ * NOTE: Causes for the error case `up->pcslen > up->len':
+ * (i) Application error (will not be penalized).
+ * (ii) Payload too big for send buffer: data is split
+ * into several packets, each with its own header.
+ * In this case (e.g. last segment), coverage may
+ * exceed packet length.
+ * Since packets with coverage length > packet length are
+ * illegal, we fall back to the defaults here.
+ */
}
- return cscov;
-}
-
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
- __wsum csum = 0;
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+ const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
- const int len = skb->len - off;
+ int len = skb->len - off;
+ if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+ if (0 < up->pcslen)
+ len = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
+ }
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
- return skb_checksum(skb, off, min(cscov, len), 0);
+ return skb_checksum(skb, off, len, 0);
}
extern void udplite4_register(void);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ae8c68f30f1..639a4491fc0 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -218,8 +218,12 @@ static inline int iboe_get_rate(struct net_device *dev)
{
struct ethtool_cmd cmd;
u32 speed;
+ int err;
- if (dev_ethtool_get_settings(dev, &cmd))
+ rtnl_lock();
+ err = __ethtool_get_settings(dev, &cmd);
+ rtnl_unlock();
+ if (err)
return IB_RATE_PORT_CURRENT;
speed = ethtool_cmd_speed(&cmd);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 57e71fa33f7..54cb079b7bf 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -29,7 +29,7 @@
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/bitops.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -373,7 +373,7 @@ struct snd_pcm_substream {
int number;
char name[32]; /* substream name */
int stream; /* stream (direction) */
- struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */
+ struct pm_qos_request latency_pm_qos_req; /* pm_qos request */
size_t buffer_bytes_max; /* limit ring buffer size */
struct snd_dma_buffer dma_buffer;
unsigned int dma_buf_id;
diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h
index 7fe74608b43..a0fc85bbe2d 100644
--- a/include/target/configfs_macros.h
+++ b/include/target/configfs_macros.h
@@ -30,8 +30,8 @@
* Added CONFIGFS_EATTR() macros from original configfs.h macros
* Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * Please read Documentation/filesystems/configfs.txt before using the
- * configfs interface, ESPECIALLY the parts about reference counts and
+ * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * the configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 27040653005..35aa786f93d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -10,10 +10,7 @@
#include <net/tcp.h>
#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
-/* Used by transport_generic_allocate_iovecs() */
-#define TRANSPORT_IOV_DATA_BUFFER 5
/* Maximum Number of LUNs per Target Portal Group */
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
@@ -75,32 +72,26 @@ enum transport_tpg_type_table {
};
/* Used for generate timer flags */
-enum timer_flags_table {
- TF_RUNNING = 0x01,
- TF_STOP = 0x02,
+enum se_task_flags {
+ TF_ACTIVE = (1 << 0),
+ TF_SENT = (1 << 1),
+ TF_REQUEST_STOP = (1 << 2),
};
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1,
- TRANSPORT_DEFERRED_CMD = 2,
TRANSPORT_WRITE_PENDING = 3,
TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5,
- TRANSPORT_COMPLETE_OK = 6,
- TRANSPORT_COMPLETE_FAILURE = 7,
- TRANSPORT_COMPLETE_TIMEOUT = 8,
+ TRANSPORT_COMPLETE = 6,
TRANSPORT_PROCESS_TMR = 9,
- TRANSPORT_TMR_COMPLETE = 10,
TRANSPORT_ISTATE_PROCESSING = 11,
- TRANSPORT_ISTATE_PROCESSED = 12,
- TRANSPORT_KILL = 13,
- TRANSPORT_REMOVE = 14,
- TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_FREE_CMD_INTR = 17,
TRANSPORT_COMPLETE_QF_WP = 18,
+ TRANSPORT_COMPLETE_QF_OK = 19,
};
/* Used for struct se_cmd->se_cmd_flags */
@@ -125,7 +116,6 @@ enum se_cmd_flags_table {
SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
- SCF_EMULATE_QUEUE_FULL = 0x02000000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -401,34 +391,22 @@ struct se_queue_obj {
} ____cacheline_aligned;
struct se_task {
- unsigned char task_sense;
- struct scatterlist *task_sg;
- u32 task_sg_nents;
- struct scatterlist *task_sg_bidi;
- u8 task_scsi_status;
- u8 task_flags;
- int task_error_status;
- int task_state_flags;
- bool task_padded_sg;
unsigned long long task_lba;
- u32 task_no;
- u32 task_sectors;
- u32 task_size;
+ u32 task_sectors;
+ u32 task_size;
+ struct se_cmd *task_se_cmd;
+ struct scatterlist *task_sg;
+ u32 task_sg_nents;
+ u16 task_flags;
+ u8 task_sense;
+ u8 task_scsi_status;
+ int task_error_status;
enum dma_data_direction task_data_direction;
- struct se_cmd *task_se_cmd;
- struct se_device *se_dev;
+ atomic_t task_state_active;
+ struct list_head t_list;
+ struct list_head t_execute_list;
+ struct list_head t_state_list;
struct completion task_stop_comp;
- atomic_t task_active;
- atomic_t task_execute_queue;
- atomic_t task_timeout;
- atomic_t task_sent;
- atomic_t task_stop;
- atomic_t task_state_active;
- struct timer_list task_timer;
- struct se_device *se_obj_ptr;
- struct list_head t_list;
- struct list_head t_execute_list;
- struct list_head t_state_list;
} ____cacheline_aligned;
struct se_cmd {
@@ -446,8 +424,6 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport protocol dependent state for out of order CmdSNs */
- int deferred_t_state;
/* Transport specific error status */
int transport_error_status;
/* See se_cmd_flags_table */
@@ -461,7 +437,6 @@ struct se_cmd {
u32 orig_fe_lun;
/* Persistent Reservation key */
u64 pr_res_key;
- atomic_t transport_sent;
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
@@ -479,10 +454,7 @@ struct se_cmd {
struct list_head se_queue_node;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
- void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
- void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
void (*transport_complete_callback)(struct se_cmd *);
- int (*transport_qf_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -495,7 +467,6 @@ struct se_cmd {
atomic_t t_se_count;
atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left;
- atomic_t t_task_cdbs_timeout_left;
atomic_t t_task_cdbs_sent;
atomic_t t_transport_aborted;
atomic_t t_transport_active;
@@ -503,7 +474,6 @@ struct se_cmd {
atomic_t t_transport_queue_active;
atomic_t t_transport_sent;
atomic_t t_transport_stop;
- atomic_t t_transport_timeout;
atomic_t transport_dev_active;
atomic_t transport_lun_active;
atomic_t transport_lun_fe_stop;
@@ -514,6 +484,8 @@ struct se_cmd {
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
+ struct work_struct work;
+
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
@@ -670,7 +642,6 @@ struct se_dev_attrib {
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
- u32 task_timeout;
u32 max_unmap_lba_count;
u32 max_unmap_block_desc_count;
u32 unmap_granularity;
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
index bd559680747..d5876e17d3f 100644
--- a/include/target/target_core_tmr.h
+++ b/include/target/target_core_tmr.h
@@ -27,7 +27,7 @@ enum tcm_tmrsp_table {
extern struct kmem_cache *se_tmr_req_cache;
-extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
+extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
extern void core_tmr_release_req(struct se_tmr_req *);
extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 46aae4f94ed..a037a1a6fbb 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -22,10 +22,9 @@
#define PYX_TRANSPORT_LU_COMM_FAILURE -7
#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_TASK_TIMEOUT -10
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -11
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -12
-#define PYX_TRANSPORT_USE_SENSE_REASON -13
+#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
+#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
+#define PYX_TRANSPORT_USE_SENSE_REASON -12
#ifndef SAM_STAT_RESERVATION_CONFLICT
#define SAM_STAT_RESERVATION_CONFLICT 0x18
@@ -38,16 +37,6 @@
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
-/* For SE OBJ Plugins, in seconds */
-#define TRANSPORT_TIMEOUT_TUR 10
-#define TRANSPORT_TIMEOUT_TYPE_DISK 60
-#define TRANSPORT_TIMEOUT_TYPE_ROM 120
-#define TRANSPORT_TIMEOUT_TYPE_TAPE 600
-#define TRANSPORT_TIMEOUT_TYPE_OTHER 300
-
-/* For se_task->task_state_flags */
-#define TSF_EXCEPTION_CLEARED 0x01
-
/*
* struct se_subsystem_dev->su_dev_flags
*/
@@ -64,8 +53,6 @@
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
-/* 10 Minutes */
-#define DA_TASK_TIMEOUT_MAX 600
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
@@ -110,16 +97,13 @@
#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
-struct se_mem;
struct se_subsystem_api;
-extern struct kmem_cache *se_mem_cache;
-
extern int init_se_kmem_caches(void);
extern void release_se_kmem_caches(void);
extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *);
-extern int transport_subsystem_check_init(void);
+extern void transport_subsystem_check_init(void);
extern int transport_subsystem_register(struct se_subsystem_api *);
extern void transport_subsystem_release(struct se_subsystem_api *);
extern void transport_load_plugins(void);
@@ -134,7 +118,6 @@ extern void transport_free_session(struct se_session *);
extern void transport_deregister_session_configfs(struct se_session *);
extern void transport_deregister_session(struct se_session *);
extern void transport_cmd_finish_abort(struct se_cmd *, int);
-extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
extern void transport_complete_sync_cache(struct se_cmd *, int);
extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
@@ -142,6 +125,8 @@ extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_device *);
extern void transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
+extern void __transport_remove_task_from_execute_queue(struct se_task *,
+ struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -169,29 +154,24 @@ extern void transport_init_se_cmd(struct se_cmd *,
unsigned char *);
void *transport_kmap_first_data_page(struct se_cmd *cmd);
void transport_kunmap_first_data_page(struct se_cmd *cmd);
-extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
-extern int transport_generic_handle_cdb(struct se_cmd *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void transport_generic_free_cmd_intr(struct se_cmd *);
-extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
+extern void transport_wait_for_tasks(struct se_cmd *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd(struct se_cmd *);
-extern void transport_generic_free_cmd(struct se_cmd *, int, int);
+extern void transport_generic_free_cmd(struct se_cmd *, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
-extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
-extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
- struct scatterlist *, struct se_mem *,
- struct se_mem **, u32 *, u32 *);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
extern int transport_generic_new_cmd(struct se_cmd *);
@@ -200,6 +180,7 @@ extern int transport_generic_do_tmr(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
/* From target_core_cdb.c */
extern int transport_emulate_control_cdb(struct se_task *);
+extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
/*
* Each se_transport_task_t can have N number of possible struct se_task's
@@ -227,6 +208,10 @@ struct se_subsystem_api {
* Transport Type.
*/
u8 transport_type;
+
+ unsigned int fua_write_emulated : 1;
+ unsigned int write_cache_emulated : 1;
+
/*
* struct module for struct se_hba references
*/
@@ -236,18 +221,6 @@ struct se_subsystem_api {
*/
struct list_head sub_api_list;
/*
- * For SCF_SCSI_NON_DATA_CDB
- */
- int (*cdb_none)(struct se_task *);
- /*
- * For SCF_SCSI_DATA_SG_IO_CDB
- */
- int (*map_data_SG)(struct se_task *);
- /*
- * For SCF_SCSI_CONTROL_SG_IO_CDB
- */
- int (*map_control_SG)(struct se_task *);
- /*
* attach_hba():
*/
int (*attach_hba)(struct se_hba *, u32);
@@ -275,22 +248,6 @@ struct se_subsystem_api {
void (*free_device)(void *);
/*
- * dpo_emulated():
- */
- int (*dpo_emulated)(struct se_device *);
- /*
- * fua_write_emulated():
- */
- int (*fua_write_emulated)(struct se_device *);
- /*
- * fua_read_emulated():
- */
- int (*fua_read_emulated)(struct se_device *);
- /*
- * write_cache_emulated():
- */
- int (*write_cache_emulated)(struct se_device *);
- /*
* transport_complete():
*
* Use transport_generic_complete() for majority of DAS transport
@@ -331,10 +288,6 @@ struct se_subsystem_api {
ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
char *);
/*
- * get_cdb():
- */
- unsigned char *(*get_cdb)(struct se_task *);
- /*
* get_device_rev():
*/
u32 (*get_device_rev)(struct se_device *);
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
new file mode 100644
index 00000000000..beeaed8398e
--- /dev/null
+++ b/include/trace/events/9p.h
@@ -0,0 +1,176 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM 9p
+
+#if !defined(_TRACE_9P_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_9P_H
+
+#include <linux/tracepoint.h>
+
+#define show_9p_op(type) \
+ __print_symbolic(type, \
+ { P9_TLERROR, "P9_TLERROR" }, \
+ { P9_RLERROR, "P9_RLERROR" }, \
+ { P9_TSTATFS, "P9_TSTATFS" }, \
+ { P9_RSTATFS, "P9_RSTATFS" }, \
+ { P9_TLOPEN, "P9_TLOPEN" }, \
+ { P9_RLOPEN, "P9_RLOPEN" }, \
+ { P9_TLCREATE, "P9_TLCREATE" }, \
+ { P9_RLCREATE, "P9_RLCREATE" }, \
+ { P9_TSYMLINK, "P9_TSYMLINK" }, \
+ { P9_RSYMLINK, "P9_RSYMLINK" }, \
+ { P9_TMKNOD, "P9_TMKNOD" }, \
+ { P9_RMKNOD, "P9_RMKNOD" }, \
+ { P9_TRENAME, "P9_TRENAME" }, \
+ { P9_RRENAME, "P9_RRENAME" }, \
+ { P9_TREADLINK, "P9_TREADLINK" }, \
+ { P9_RREADLINK, "P9_RREADLINK" }, \
+ { P9_TGETATTR, "P9_TGETATTR" }, \
+ { P9_RGETATTR, "P9_RGETATTR" }, \
+ { P9_TSETATTR, "P9_TSETATTR" }, \
+ { P9_RSETATTR, "P9_RSETATTR" }, \
+ { P9_TXATTRWALK, "P9_TXATTRWALK" }, \
+ { P9_RXATTRWALK, "P9_RXATTRWALK" }, \
+ { P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
+ { P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
+ { P9_TREADDIR, "P9_TREADDIR" }, \
+ { P9_RREADDIR, "P9_RREADDIR" }, \
+ { P9_TFSYNC, "P9_TFSYNC" }, \
+ { P9_RFSYNC, "P9_RFSYNC" }, \
+ { P9_TLOCK, "P9_TLOCK" }, \
+ { P9_RLOCK, "P9_RLOCK" }, \
+ { P9_TGETLOCK, "P9_TGETLOCK" }, \
+ { P9_RGETLOCK, "P9_RGETLOCK" }, \
+ { P9_TLINK, "P9_TLINK" }, \
+ { P9_RLINK, "P9_RLINK" }, \
+ { P9_TMKDIR, "P9_TMKDIR" }, \
+ { P9_RMKDIR, "P9_RMKDIR" }, \
+ { P9_TRENAMEAT, "P9_TRENAMEAT" }, \
+ { P9_RRENAMEAT, "P9_RRENAMEAT" }, \
+ { P9_TUNLINKAT, "P9_TUNLINKAT" }, \
+ { P9_RUNLINKAT, "P9_RUNLINKAT" }, \
+ { P9_TVERSION, "P9_TVERSION" }, \
+ { P9_RVERSION, "P9_RVERSION" }, \
+ { P9_TAUTH, "P9_TAUTH" }, \
+ { P9_RAUTH, "P9_RAUTH" }, \
+ { P9_TATTACH, "P9_TATTACH" }, \
+ { P9_RATTACH, "P9_RATTACH" }, \
+ { P9_TERROR, "P9_TERROR" }, \
+ { P9_RERROR, "P9_RERROR" }, \
+ { P9_TFLUSH, "P9_TFLUSH" }, \
+ { P9_RFLUSH, "P9_RFLUSH" }, \
+ { P9_TWALK, "P9_TWALK" }, \
+ { P9_RWALK, "P9_RWALK" }, \
+ { P9_TOPEN, "P9_TOPEN" }, \
+ { P9_ROPEN, "P9_ROPEN" }, \
+ { P9_TCREATE, "P9_TCREATE" }, \
+ { P9_RCREATE, "P9_RCREATE" }, \
+ { P9_TREAD, "P9_TREAD" }, \
+ { P9_RREAD, "P9_RREAD" }, \
+ { P9_TWRITE, "P9_TWRITE" }, \
+ { P9_RWRITE, "P9_RWRITE" }, \
+ { P9_TCLUNK, "P9_TCLUNK" }, \
+ { P9_RCLUNK, "P9_RCLUNK" }, \
+ { P9_TREMOVE, "P9_TREMOVE" }, \
+ { P9_RREMOVE, "P9_RREMOVE" }, \
+ { P9_TSTAT, "P9_TSTAT" }, \
+ { P9_RSTAT, "P9_RSTAT" }, \
+ { P9_TWSTAT, "P9_TWSTAT" }, \
+ { P9_RWSTAT, "P9_RWSTAT" })
+
+TRACE_EVENT(9p_client_req,
+ TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
+
+ TP_ARGS(clnt, type, tag),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u32, tag )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = type;
+ __entry->tag = tag;
+ ),
+
+ TP_printk("client %lu request %s tag %d",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag)
+ );
+
+TRACE_EVENT(9p_client_res,
+ TP_PROTO(struct p9_client *clnt, int8_t type, int tag, int err),
+
+ TP_ARGS(clnt, type, tag, err),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u32, tag )
+ __field( __u32, err )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = type;
+ __entry->tag = tag;
+ __entry->err = err;
+ ),
+
+ TP_printk("client %lu response %s tag %d err %d",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag, __entry->err)
+);
+
+/* dump 32 bytes of protocol data */
+#define P9_PROTO_DUMP_SZ 32
+TRACE_EVENT(9p_protocol_dump,
+ TP_PROTO(struct p9_client *clnt, struct p9_fcall *pdu),
+
+ TP_ARGS(clnt, pdu),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u16, tag )
+ __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = pdu->id;
+ __entry->tag = pdu->tag;
+ memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ ),
+ TP_printk("clnt %lu %s(tag = %d)\n%.3x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n"
+ "%.3x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag, 0,
+ __entry->line[0], __entry->line[1],
+ __entry->line[2], __entry->line[3],
+ __entry->line[4], __entry->line[5],
+ __entry->line[6], __entry->line[7],
+ __entry->line[8], __entry->line[9],
+ __entry->line[10], __entry->line[11],
+ __entry->line[12], __entry->line[13],
+ __entry->line[14], __entry->line[15],
+ 16,
+ __entry->line[16], __entry->line[17],
+ __entry->line[18], __entry->line[19],
+ __entry->line[20], __entry->line[21],
+ __entry->line[22], __entry->line[23],
+ __entry->line[24], __entry->line[25],
+ __entry->line[26], __entry->line[27],
+ __entry->line[28], __entry->line[29],
+ __entry->line[30], __entry->line[31])
+ );
+
+#endif /* _TRACE_9P_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
new file mode 100644
index 00000000000..669fbd62ec2
--- /dev/null
+++ b/include/trace/events/rcu.h
@@ -0,0 +1,459 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rcu
+
+#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RCU_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for start/end markers used for utilization calculations.
+ * By convention, the string is of the following forms:
+ *
+ * "Start <activity>" -- Mark the start of the specified activity,
+ * such as "context switch". Nesting is permitted.
+ * "End <activity>" -- Mark the end of the specified activity.
+ *
+ * An "@" character within "<activity>" is a comment character: Data
+ * reduction scripts will ignore the "@" and the remainder of the line.
+ */
+TRACE_EVENT(rcu_utilization,
+
+ TP_PROTO(char *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+
+ TP_printk("%s", __entry->s)
+);
+
+#ifdef CONFIG_RCU_TRACE
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+
+/*
+ * Tracepoint for grace-period events: starting and ending a grace
+ * period ("start" and "end", respectively), a CPU noting the start
+ * of a new grace period or the end of an old grace period ("cpustart"
+ * and "cpuend", respectively), a CPU passing through a quiescent
+ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
+ * and "cpuofl", respectively), and a CPU being kicked for being too
+ * long in dyntick-idle mode ("kick").
+ */
+TRACE_EVENT(rcu_grace_period,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+
+ TP_ARGS(rcuname, gpnum, gpevent),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %lu %s",
+ __entry->rcuname, __entry->gpnum, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for grace-period-initialization events. These are
+ * distinguished by the type of RCU, the new grace-period number, the
+ * rcu_node structure level, the starting and ending CPU covered by the
+ * rcu_node structure, and the mask of CPUs that will be waited for.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_grace_period_init,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+ int grplo, int grphi, unsigned long qsmask),
+
+ TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(unsigned long, qsmask)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->qsmask = qsmask;
+ ),
+
+ TP_printk("%s %lu %u %d %d %lx",
+ __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->qsmask)
+);
+
+/*
+ * Tracepoint for tasks blocking within preemptible-RCU read-side
+ * critical sections. Track the type of RCU (which one day might
+ * include SRCU), the grace-period number that the task is blocking
+ * (the current or the next), and the task's PID.
+ */
+TRACE_EVENT(rcu_preempt_task,
+
+ TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+
+ TP_ARGS(rcuname, pid, gpnum),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->pid = pid;
+ ),
+
+ TP_printk("%s %lu %d",
+ __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for tasks that blocked within a given preemptible-RCU
+ * read-side critical section exiting that critical section. Track the
+ * type of RCU (which one day might include SRCU) and the task's PID.
+ */
+TRACE_EVENT(rcu_unlock_preempted_task,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+
+ TP_ARGS(rcuname, gpnum, pid),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->pid = pid;
+ ),
+
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for quiescent-state-reporting events. These are
+ * distinguished by the type of RCU, the grace-period number, the
+ * mask of quiescent lower-level entities, the rcu_node structure level,
+ * the starting and ending CPU covered by the rcu_node structure, and
+ * whether there are any blocked tasks blocking the current grace period.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_quiescent_state_report,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum,
+ unsigned long mask, unsigned long qsmask,
+ u8 level, int grplo, int grphi, int gp_tasks),
+
+ TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(unsigned long, mask)
+ __field(unsigned long, qsmask)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(u8, gp_tasks)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->mask = mask;
+ __entry->qsmask = qsmask;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->gp_tasks = gp_tasks;
+ ),
+
+ TP_printk("%s %lu %lx>%lx %u %d %d %u",
+ __entry->rcuname, __entry->gpnum,
+ __entry->mask, __entry->qsmask, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gp_tasks)
+);
+
+/*
+ * Tracepoint for quiescent states detected by force_quiescent_state().
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
+ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
+ * too long.
+ */
+TRACE_EVENT(rcu_fqs,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+
+ TP_ARGS(rcuname, gpnum, cpu, qsevent),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, cpu)
+ __field(char *, qsevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->cpu = cpu;
+ __entry->qsevent = qsevent;
+ ),
+
+ TP_printk("%s %lu %d %s",
+ __entry->rcuname, __entry->gpnum,
+ __entry->cpu, __entry->qsevent)
+);
+
+#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+
+/*
+ * Tracepoint for dyntick-idle entry/exit events. These take a string
+ * as argument: "Start" for entering dyntick-idle mode and "End" for
+ * leaving it.
+ */
+TRACE_EVENT(rcu_dyntick,
+
+ TP_PROTO(char *polarity),
+
+ TP_ARGS(polarity),
+
+ TP_STRUCT__entry(
+ __field(char *, polarity)
+ ),
+
+ TP_fast_assign(
+ __entry->polarity = polarity;
+ ),
+
+ TP_printk("%s", __entry->polarity)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback function.
+ * The first argument is the type of RCU, the second argument is
+ * a pointer to the RCU callback itself, and the third element is the
+ * new RCU callback queue length for the current CPU.
+ */
+TRACE_EVENT(rcu_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
+
+ TP_ARGS(rcuname, rhp, qlen),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(void *, func)
+ __field(long, qlen)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->func = rhp->func;
+ __entry->qlen = qlen;
+ ),
+
+ TP_printk("%s rhp=%p func=%pf %ld",
+ __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback of the special
+ * kfree() form. The first argument is the RCU type, the second argument
+ * is a pointer to the RCU callback, the third argument is the offset
+ * of the callback within the enclosing RCU-protected data structure,
+ * and the fourth argument is the new RCU callback queue length for the
+ * current CPU.
+ */
+TRACE_EVENT(rcu_kfree_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+ long qlen),
+
+ TP_ARGS(rcuname, rhp, offset, qlen),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(unsigned long, offset)
+ __field(long, qlen)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->offset = offset;
+ __entry->qlen = qlen;
+ ),
+
+ TP_printk("%s rhp=%p func=%ld %ld",
+ __entry->rcuname, __entry->rhp, __entry->offset,
+ __entry->qlen)
+);
+
+/*
+ * Tracepoint for marking the beginning rcu_do_batch, performed to start
+ * RCU callback invocation. The first argument is the RCU flavor,
+ * the second is the total number of callbacks (including those that
+ * are not yet ready to be invoked), and the third argument is the
+ * current RCU-callback batch limit.
+ */
+TRACE_EVENT(rcu_batch_start,
+
+ TP_PROTO(char *rcuname, long qlen, int blimit),
+
+ TP_ARGS(rcuname, qlen, blimit),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(long, qlen)
+ __field(int, blimit)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->qlen = qlen;
+ __entry->blimit = blimit;
+ ),
+
+ TP_printk("%s CBs=%ld bl=%d",
+ __entry->rcuname, __entry->qlen, __entry->blimit)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback function.
+ * The first argument is the type of RCU, and the second argument is
+ * a pointer to the RCU callback itself.
+ */
+TRACE_EVENT(rcu_invoke_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp),
+
+ TP_ARGS(rcuname, rhp),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(void *, func)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->func = rhp->func;
+ ),
+
+ TP_printk("%s rhp=%p func=%pf",
+ __entry->rcuname, __entry->rhp, __entry->func)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree() form. The first argument is the RCU flavor, the second
+ * argument is a pointer to the RCU callback, and the third argument
+ * is the offset of the callback within the enclosing RCU-protected
+ * data structure.
+ */
+TRACE_EVENT(rcu_invoke_kfree_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+
+ TP_ARGS(rcuname, rhp, offset),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(unsigned long, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%s rhp=%p func=%ld",
+ __entry->rcuname, __entry->rhp, __entry->offset)
+);
+
+/*
+ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
+ * invoked. The first argument is the name of the RCU flavor and
+ * the second argument is number of callbacks actually invoked.
+ */
+TRACE_EVENT(rcu_batch_end,
+
+ TP_PROTO(char *rcuname, int callbacks_invoked),
+
+ TP_ARGS(rcuname, callbacks_invoked),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(int, callbacks_invoked)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->callbacks_invoked = callbacks_invoked;
+ ),
+
+ TP_printk("%s CBs-invoked=%d",
+ __entry->rcuname, __entry->callbacks_invoked)
+);
+
+#else /* #ifdef CONFIG_RCU_TRACE */
+
+#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0)
+#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
+#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
+#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
+#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+#endif /* _TRACE_RCU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
new file mode 100644
index 00000000000..1e3193b8fcc
--- /dev/null
+++ b/include/trace/events/regmap.h
@@ -0,0 +1,136 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct regmap;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s reg=%x val=%x", __get_str(name),
+ (unsigned int)__entry->reg,
+ (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( unsigned int, reg )
+ __field( int, count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->reg = reg;
+ __entry->count = count;
+ ),
+
+ TP_printk("%s reg=%x count=%d", __get_str(name),
+ (unsigned int)__entry->reg,
+ (int)__entry->count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+TRACE_EVENT(regcache_sync,
+
+ TP_PROTO(struct device *dev, const char *type,
+ const char *status),
+
+ TP_ARGS(dev, type, status),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __string( status, status )
+ __string( type, type )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __assign_str(status, status);
+ __assign_str(type, type);
+ ),
+
+ TP_printk("%s type=%s status=%s", __get_str(name),
+ __get_str(type), __get_str(status))
+);
+
+#endif /* _TRACE_REGMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
new file mode 100644
index 00000000000..d62c558bf64
--- /dev/null
+++ b/include/trace/events/rpm.h
@@ -0,0 +1,99 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm
+
+#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RUNTIME_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/device.h>
+
+/*
+ * The rpm_internal events are used for tracing some important
+ * runtime pm internal functions.
+ */
+DECLARE_EVENT_CLASS(rpm_internal,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( int, flags )
+ __field( int , usage_count )
+ __field( int , disable_depth )
+ __field( int , runtime_auto )
+ __field( int , request_pending )
+ __field( int , irq_safe )
+ __field( int , child_count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->flags = flags;
+ __entry->usage_count = atomic_read(
+ &dev->power.usage_count);
+ __entry->disable_depth = dev->power.disable_depth;
+ __entry->runtime_auto = dev->power.runtime_auto;
+ __entry->request_pending = dev->power.request_pending;
+ __entry->irq_safe = dev->power.irq_safe;
+ __entry->child_count = atomic_read(
+ &dev->power.child_count);
+ ),
+
+ TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
+ " irq-%-1d child-%d",
+ __get_str(name), __entry->flags,
+ __entry->usage_count,
+ __entry->disable_depth,
+ __entry->runtime_auto,
+ __entry->request_pending,
+ __entry->irq_safe,
+ __entry->child_count
+ )
+);
+DEFINE_EVENT(rpm_internal, rpm_suspend,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_resume,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_idle,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+
+TRACE_EVENT(rpm_return_int,
+ TP_PROTO(struct device *dev, unsigned long ip, int ret),
+ TP_ARGS(dev, ip, ret),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev))
+ __field( unsigned long, ip )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->ip = ip;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
+ __entry->ret)
+);
+
+#endif /* _TRACE_RUNTIME_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 76f7538bb33..d29c153705b 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -25,8 +25,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page** pages);
-void free_xenballooned_pages(int nr_pages, struct page** pages);
+int alloc_xenballooned_pages(int nr_pages, struct page **pages,
+ bool highmem);
+void free_xenballooned_pages(int nr_pages, struct page **pages);
struct sys_device;
#ifdef CONFIG_XEN_SELFBALLOONING
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index b1fab6b5b3e..6b99bfbd785 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -156,6 +156,7 @@ unsigned int gnttab_max_grant_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 99fcffb372d..f0b6890370b 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -26,7 +26,11 @@ enum xsd_sockmsg_type
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
- XS_IS_DOMAIN_INTRODUCED
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET,
+ XS_RESTRICT,
+ XS_RESET_WATCHES
};
#define XS_WRITE_NONE "NONE"
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 534cac89a77..c1080d9c705 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -109,6 +109,7 @@ struct physdev_irq {
#define MAP_PIRQ_TYPE_MSI 0x0
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -119,7 +120,7 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
- /* IN */
+ /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
int bus;
/* IN */
int devfn;
@@ -198,6 +199,37 @@ struct physdev_get_free_pirq {
uint32_t pirq;
};
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint32_t optarr[];
+#elif defined(__GNUC__)
+ uint32_t optarr[0];
+#endif
+};
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
diff --git a/include/xen/page.h b/include/xen/page.h
index 0be36b976f4..12765b6f951 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,6 +3,16 @@
#include <asm/xen/page.h>
-extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region {
+ phys_addr_t start;
+ phys_addr_t size;
+};
+
+#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
+
+extern __initdata
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS];
+
+extern unsigned long xen_released_pages;
#endif /* _XEN_PAGE_H */
diff --git a/init/Kconfig b/init/Kconfig
index d62778390e5..dc7e27bf89a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -391,7 +391,7 @@ config TREE_RCU
config TREE_PREEMPT_RCU
bool "Preemptible tree-based hierarchical RCU"
- depends on PREEMPT
+ depends on PREEMPT && SMP
help
This option selects the RCU implementation that is
designed for very large SMP systems with hundreds or
@@ -401,7 +401,7 @@ config TREE_PREEMPT_RCU
config TINY_RCU
bool "UP-only small-memory-footprint RCU"
- depends on !SMP
+ depends on !PREEMPT && !SMP
help
This option selects the RCU implementation that is
designed for UP systems from which real-time response
@@ -410,7 +410,7 @@ config TINY_RCU
config TINY_PREEMPT_RCU
bool "Preemptible UP-only small-memory-footprint RCU"
- depends on !SMP && PREEMPT
+ depends on PREEMPT && !SMP
help
This option selects the RCU implementation that is designed
for real-time UP systems. This option greatly reduces the
diff --git a/init/main.c b/init/main.c
index 03b408dff82..63f5f6f8dc3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -163,7 +163,7 @@ static int __init obsolete_checksetup(char *line)
p = __setup_start;
do {
int n = strlen(p->str);
- if (!strncmp(line, p->str, n)) {
+ if (parameqn(line, p->str, n)) {
if (p->early) {
/* Already done in parse_early_param?
* (Needs exact match on param part).
@@ -392,7 +392,7 @@ static int __init do_early_param(char *param, char *val)
const struct obs_kernel_param *p;
for (p = __setup_start; p < __setup_end; p++) {
- if ((p->early && strcmp(param, p->str) == 0) ||
+ if ((p->early && parameq(param, p->str)) ||
(strcmp(param, "console") == 0 &&
strcmp(p->str, "earlycon") == 0)
) {
diff --git a/kernel/Makefile b/kernel/Makefile
index eca595e2fd5..2da48d3515e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
- notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
+ notifier.o ksysfs.o sched_clock.o cred.o \
async.o range.o
obj-y += groups.o
diff --git a/kernel/async.c b/kernel/async.c
index d5fe7af0de2..4c2843c0043 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -120,7 +120,7 @@ static void async_run_entry_fn(struct work_struct *work)
struct async_entry *entry =
container_of(work, struct async_entry, work);
unsigned long flags;
- ktime_t calltime, delta, rettime;
+ ktime_t uninitialized_var(calltime), delta, rettime;
/* 1) move self to the running queue */
spin_lock_irqsave(&async_lock, flags);
@@ -269,7 +269,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
void async_synchronize_cookie_domain(async_cookie_t cookie,
struct list_head *running)
{
- ktime_t starttime, delta, endtime;
+ ktime_t uninitialized_var(starttime), delta, endtime;
if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1d2b6ceea95..453100a4159 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -265,7 +265,7 @@ list_for_each_entry(_root, &roots, root_list)
/* the list of cgroups eligible for automatic release. Protected by
* release_list_lock */
static LIST_HEAD(release_list);
-static DEFINE_SPINLOCK(release_list_lock);
+static DEFINE_RAW_SPINLOCK(release_list_lock);
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp);
@@ -4014,11 +4014,11 @@ again:
finish_wait(&cgroup_rmdir_waitq, &wait);
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
set_bit(CGRP_REMOVED, &cgrp->flags);
if (!list_empty(&cgrp->release_list))
list_del_init(&cgrp->release_list);
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
cgroup_lock_hierarchy(cgrp->root);
/* delete this cgroup from parent->children */
@@ -4671,13 +4671,13 @@ static void check_for_release(struct cgroup *cgrp)
* already queued for a userspace notification, queue
* it now */
int need_schedule_work = 0;
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
if (!cgroup_is_removed(cgrp) &&
list_empty(&cgrp->release_list)) {
list_add(&cgrp->release_list, &release_list);
need_schedule_work = 1;
}
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
if (need_schedule_work)
schedule_work(&release_agent_work);
}
@@ -4729,7 +4729,7 @@ static void cgroup_release_agent(struct work_struct *work)
{
BUG_ON(work != &release_agent_work);
mutex_lock(&cgroup_mutex);
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
while (!list_empty(&release_list)) {
char *argv[3], *envp[3];
int i;
@@ -4738,7 +4738,7 @@ static void cgroup_release_agent(struct work_struct *work)
struct cgroup,
release_list);
list_del_init(&cgrp->release_list);
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pathbuf)
goto continue_free;
@@ -4768,9 +4768,9 @@ static void cgroup_release_agent(struct work_struct *work)
continue_free:
kfree(pathbuf);
kfree(agentbuf);
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
}
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
mutex_unlock(&cgroup_mutex);
}
diff --git a/kernel/cred.c b/kernel/cred.c
index 8ef31f53c44..bb55d052d85 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -644,6 +644,9 @@ void __init cred_init(void)
*/
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred;
+#endif
const struct cred *old;
struct cred *new;
@@ -651,6 +654,14 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (!new)
return NULL;
+#ifdef CONFIG_KEYS
+ tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
+ if (!tgcred) {
+ kmem_cache_free(cred_jar, new);
+ return NULL;
+ }
+#endif
+
kdebug("prepare_kernel_cred() alloc %p", new);
if (daemon)
@@ -667,8 +678,11 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
get_group_info(new->group_info);
#ifdef CONFIG_KEYS
- atomic_inc(&init_tgcred.usage);
- new->tgcred = &init_tgcred;
+ atomic_set(&tgcred->usage, 1);
+ spin_lock_init(&tgcred->lock);
+ tgcred->process_keyring = NULL;
+ tgcred->session_keyring = NULL;
+ new->tgcred = tgcred;
new->request_key_auth = NULL;
new->thread_keyring = NULL;
new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 7b01de98bb6..66a594e8ad2 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p)
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, 0);
+ signal_wake_up(p, 1);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 11cbe052b2e..1511dff0cfd 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -854,7 +854,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
- u32 curval, newval;
+ u32 uninitialized_var(curval), newval;
if (!pi_state)
return -EINVAL;
@@ -916,7 +916,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
- u32 oldval;
+ u32 uninitialized_var(oldval);
/*
* There is no waiter, so we unlock the futex. The owner died
@@ -1576,7 +1576,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
struct task_struct *oldowner = pi_state->owner;
- u32 uval, curval, newval;
+ u32 uval, uninitialized_var(curval), newval;
int ret;
/* Owner died? */
@@ -1793,7 +1793,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
*
* Returns:
* 0 - uaddr contains val and hb has been locked
- * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
+ * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
*/
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
struct futex_q *q, struct futex_hash_bucket **hb)
@@ -2481,7 +2481,7 @@ err_unlock:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
- u32 uval, nval, mval;
+ u32 uval, uninitialized_var(nval), mval;
retry:
if (get_user(uval, uaddr))
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index dc5114b4c16..f7c543a801d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -26,7 +26,7 @@
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_set_chip);
int irq_set_irq_type(unsigned int irq, unsigned int type)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(irq_set_irq_type);
int irq_set_handler_data(unsigned int irq, void *data)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_set_handler_data);
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
@@ -119,7 +119,7 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
int irq_set_chip_data(unsigned int irq, void *data)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
@@ -204,6 +204,24 @@ void irq_disable(struct irq_desc *desc)
}
}
+void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
+{
+ if (desc->irq_data.chip->irq_enable)
+ desc->irq_data.chip->irq_enable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ cpumask_set_cpu(cpu, desc->percpu_enabled);
+}
+
+void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
+{
+ if (desc->irq_data.chip->irq_disable)
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ cpumask_clear_cpu(cpu, desc->percpu_enabled);
+}
+
static inline void mask_ack_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask_ack)
@@ -544,12 +562,44 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
+/**
+ * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ *
+ * Per CPU interrupts on SMP machines without locking requirements. Same as
+ * handle_percpu_irq() above but with the following extras:
+ *
+ * action->percpu_dev_id is a pointer to percpu variables which
+ * contain the real device id for the cpu on which this handler is
+ * called
+ */
+void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irqaction *action = desc->action;
+ void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
+ irqreturn_t res;
+
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+
+ trace_irq_handler_entry(irq, action);
+ res = action->handler(irq, dev_id);
+ trace_irq_handler_exit(irq, action, res);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return;
@@ -593,7 +643,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 6546431447d..a73dd6c7372 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -71,6 +71,8 @@ extern int irq_startup(struct irq_desc *desc);
extern void irq_shutdown(struct irq_desc *desc);
extern void irq_enable(struct irq_desc *desc);
extern void irq_disable(struct irq_desc *desc);
+extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
+extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
extern void mask_irq(struct irq_desc *desc);
extern void unmask_irq(struct irq_desc *desc);
@@ -114,14 +116,21 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
}
+#define _IRQ_DESC_CHECK (1 << 0)
+#define _IRQ_DESC_PERCPU (1 << 1)
+
+#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
+#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
+
struct irq_desc *
-__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+ unsigned int check);
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
static inline struct irq_desc *
-irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
+irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
{
- return __irq_get_desc_lock(irq, flags, true);
+ return __irq_get_desc_lock(irq, flags, true, check);
}
static inline void
@@ -131,9 +140,9 @@ irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
}
static inline struct irq_desc *
-irq_get_desc_lock(unsigned int irq, unsigned long *flags)
+irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
{
- return __irq_get_desc_lock(irq, flags, false);
+ return __irq_get_desc_lock(irq, flags, false, check);
}
static inline void
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 039b889ea05..1550e8447a1 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -424,11 +424,22 @@ unsigned int irq_get_next_irq(unsigned int offset)
}
struct irq_desc *
-__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+ unsigned int check)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
+ if (check & _IRQ_DESC_CHECK) {
+ if ((check & _IRQ_DESC_PERCPU) &&
+ !irq_settings_is_per_cpu_devid(desc))
+ return NULL;
+
+ if (!(check & _IRQ_DESC_PERCPU) &&
+ irq_settings_is_per_cpu_devid(desc))
+ return NULL;
+ }
+
if (bus)
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, *flags);
@@ -443,6 +454,25 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
chip_bus_sync_unlock(desc);
}
+int irq_set_percpu_devid(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc)
+ return -EINVAL;
+
+ if (desc->percpu_enabled)
+ return -EINVAL;
+
+ desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
+
+ if (!desc->percpu_enabled)
+ return -ENOMEM;
+
+ irq_set_percpu_devid_flags(irq);
+ return 0;
+}
+
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @irq: irq number to initialize
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9b956fa2030..67ce837ae52 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -195,7 +195,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
@@ -356,7 +356,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
static int __disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
@@ -448,7 +448,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
void enable_irq(unsigned int irq)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return;
@@ -467,6 +467,9 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
+ if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
+ return 0;
+
if (desc->irq_data.chip->irq_set_wake)
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
@@ -488,7 +491,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
@@ -529,7 +532,7 @@ EXPORT_SYMBOL(irq_set_irq_wake);
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
int canrequest = 0;
if (!desc)
@@ -1118,6 +1121,8 @@ int setup_irq(unsigned int irq, struct irqaction *act)
int retval;
struct irq_desc *desc = irq_to_desc(irq);
+ if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ return -EINVAL;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act);
chip_bus_sync_unlock(desc);
@@ -1126,7 +1131,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
}
EXPORT_SYMBOL_GPL(setup_irq);
- /*
+/*
* Internal function to unregister an irqaction - used to free
* regular and special interrupts that are part of the architecture.
*/
@@ -1224,7 +1229,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
*/
void remove_irq(unsigned int irq, struct irqaction *act)
{
- __free_irq(irq, act->dev_id);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ __free_irq(irq, act->dev_id);
}
EXPORT_SYMBOL_GPL(remove_irq);
@@ -1246,7 +1254,7 @@ void free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- if (!desc)
+ if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return;
#ifdef CONFIG_SMP
@@ -1324,7 +1332,8 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (!desc)
return -EINVAL;
- if (!irq_settings_can_request(desc))
+ if (!irq_settings_can_request(desc) ||
+ WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL;
if (!handler) {
@@ -1409,3 +1418,194 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
return !ret ? IRQC_IS_HARDIRQ : ret;
}
EXPORT_SYMBOL_GPL(request_any_context_irq);
+
+void enable_percpu_irq(unsigned int irq, unsigned int type)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+ if (!desc)
+ return;
+
+ type &= IRQ_TYPE_SENSE_MASK;
+ if (type != IRQ_TYPE_NONE) {
+ int ret;
+
+ ret = __irq_set_trigger(desc, irq, type);
+
+ if (ret) {
+ WARN(1, "failed to set type for IRQ%d\n", irq);
+ goto out;
+ }
+ }
+
+ irq_percpu_enable(desc, cpu);
+out:
+ irq_put_desc_unlock(desc, flags);
+}
+
+void disable_percpu_irq(unsigned int irq)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+ if (!desc)
+ return;
+
+ irq_percpu_disable(desc, cpu);
+ irq_put_desc_unlock(desc, flags);
+}
+
+/*
+ * Internal function to unregister a percpu irqaction.
+ */
+static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ unsigned long flags;
+
+ WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
+
+ if (!desc)
+ return NULL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ action = desc->action;
+ if (!action || action->percpu_dev_id != dev_id) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ goto bad;
+ }
+
+ if (!cpumask_empty(desc->percpu_enabled)) {
+ WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
+ irq, cpumask_first(desc->percpu_enabled));
+ goto bad;
+ }
+
+ /* Found it - now remove it from the list of entries: */
+ desc->action = NULL;
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ unregister_handler_proc(irq, action);
+
+ module_put(desc->owner);
+ return action;
+
+bad:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return NULL;
+}
+
+/**
+ * remove_percpu_irq - free a per-cpu interrupt
+ * @irq: Interrupt line to free
+ * @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && irq_settings_is_per_cpu_devid(desc))
+ __free_percpu_irq(irq, act->percpu_dev_id);
+}
+
+/**
+ * free_percpu_irq - free an interrupt allocated with request_percpu_irq
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove a percpu interrupt handler. The handler is removed, but
+ * the interrupt line is not disabled. This must be done on each
+ * CPU before calling this function. The function does not return
+ * until any executing interrupts for this IRQ have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return;
+
+ chip_bus_lock(desc);
+ kfree(__free_percpu_irq(irq, dev_id));
+ chip_bus_sync_unlock(desc);
+}
+
+/**
+ * setup_percpu_irq - setup a per-cpu interrupt
+ * @irq: Interrupt line to setup
+ * @act: irqaction for the interrupt
+ *
+ * Used to statically setup per-cpu interrupts in the early boot process.
+ */
+int setup_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int retval;
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return -EINVAL;
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, act);
+ chip_bus_sync_unlock(desc);
+
+ return retval;
+}
+
+/**
+ * request_percpu_irq - allocate a percpu interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A percpu cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources, but doesn't
+ * automatically enable the interrupt. It has to be done on each
+ * CPU using enable_percpu_irq().
+ *
+ * Dev_id must be globally unique. It is a per-cpu variable, and
+ * the handler gets called with the interrupted CPU's instance of
+ * that variable.
+ */
+int request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *dev_id)
+{
+ struct irqaction *action;
+ struct irq_desc *desc;
+ int retval;
+
+ if (!dev_id)
+ return -EINVAL;
+
+ desc = irq_to_desc(irq);
+ if (!desc || !irq_settings_can_request(desc) ||
+ !irq_settings_is_per_cpu_devid(desc))
+ return -EINVAL;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = IRQF_PERCPU;
+ action->name = devname;
+ action->percpu_dev_id = dev_id;
+
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, action);
+ chip_bus_sync_unlock(desc);
+
+ if (retval)
+ kfree(action);
+
+ return retval;
+}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index f76fc00c987..15e53b1766a 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -9,6 +9,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/syscore_ops.h>
#include "internals.h"
@@ -39,25 +40,58 @@ void suspend_device_irqs(void)
}
EXPORT_SYMBOL_GPL(suspend_device_irqs);
-/**
- * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
- *
- * Enable all interrupt lines previously disabled by suspend_device_irqs() that
- * have the IRQS_SUSPENDED flag set.
- */
-void resume_device_irqs(void)
+static void resume_irqs(bool want_early)
{
struct irq_desc *desc;
int irq;
for_each_irq_desc(irq, desc) {
unsigned long flags;
+ bool is_early = desc->action &&
+ desc->action->flags & IRQF_EARLY_RESUME;
+
+ if (is_early != want_early)
+ continue;
raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
+
+/**
+ * irq_pm_syscore_ops - enable interrupt lines early
+ *
+ * Enable all interrupt lines with %IRQF_EARLY_RESUME set.
+ */
+static void irq_pm_syscore_resume(void)
+{
+ resume_irqs(true);
+}
+
+static struct syscore_ops irq_pm_syscore_ops = {
+ .resume = irq_pm_syscore_resume,
+};
+
+static int __init irq_pm_init_ops(void)
+{
+ register_syscore_ops(&irq_pm_syscore_ops);
+ return 0;
+}
+
+device_initcall(irq_pm_init_ops);
+
+/**
+ * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
+ *
+ * Enable all non-%IRQF_EARLY_RESUME interrupt lines previously
+ * disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag
+ * set as well as those with %IRQF_FORCE_RESUME.
+ */
+void resume_device_irqs(void)
+{
+ resume_irqs(false);
+}
EXPORT_SYMBOL_GPL(resume_device_irqs);
/**
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index f1667833d44..1162f1030f1 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -13,6 +13,7 @@ enum {
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -24,6 +25,7 @@ enum {
#define IRQ_NOTHREAD GOT_YOU_MORON
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
+#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -39,6 +41,11 @@ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
return desc->status_use_accessors & _IRQ_PER_CPU;
}
+static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
+}
+
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
{
desc->status_use_accessors |= _IRQ_PER_CPU;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ddc7644c130..a4bea97c75b 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,10 +114,12 @@ int __request_module(bool wait, const char *fmt, ...)
atomic_inc(&kmod_concurrent);
if (atomic_read(&kmod_concurrent) > max_modprobes) {
/* We may be blaming an innocent here, but unlikely */
- if (kmod_loop_msg++ < 5)
+ if (kmod_loop_msg < 5) {
printk(KERN_ERR
"request_module: runaway loop modprobe %s\n",
module_name);
+ kmod_loop_msg++;
+ }
atomic_dec(&kmod_concurrent);
return -ENOMEM;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b30fd54eb98..2f193d0ba7f 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
- spinlock_t lock ____cacheline_aligned_in_smp;
+ raw_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE];
-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
+static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
return &(kretprobe_table_locks[hash].lock);
}
@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
hlist_del(&ri->hlist);
INIT_HLIST_NODE(&ri->hlist);
if (likely(rp)) {
- spin_lock(&rp->lock);
+ raw_spin_lock(&rp->lock);
hlist_add_head(&ri->hlist, &rp->free_instances);
- spin_unlock(&rp->lock);
+ raw_spin_unlock(&rp->lock);
} else
/* Unregistering */
hlist_add_head(&ri->hlist, head);
@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
__acquires(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- spinlock_t *hlist_lock;
+ raw_spinlock_t *hlist_lock;
*head = &kretprobe_inst_table[hash];
hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_lock_irqsave(hlist_lock, *flags);
+ raw_spin_lock_irqsave(hlist_lock, *flags);
}
static void __kprobes kretprobe_table_lock(unsigned long hash,
unsigned long *flags)
__acquires(hlist_lock)
{
- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_lock_irqsave(hlist_lock, *flags);
+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+ raw_spin_lock_irqsave(hlist_lock, *flags);
}
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
__releases(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- spinlock_t *hlist_lock;
+ raw_spinlock_t *hlist_lock;
hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_unlock_irqrestore(hlist_lock, *flags);
+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
static void __kprobes kretprobe_table_unlock(unsigned long hash,
unsigned long *flags)
__releases(hlist_lock)
{
- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_unlock_irqrestore(hlist_lock, *flags);
+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
/*
@@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
/*TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS);
- spin_lock_irqsave(&rp->lock, flags);
+ raw_spin_lock_irqsave(&rp->lock, flags);
if (!hlist_empty(&rp->free_instances)) {
ri = hlist_entry(rp->free_instances.first,
struct kretprobe_instance, hlist);
hlist_del(&ri->hlist);
- spin_unlock_irqrestore(&rp->lock, flags);
+ raw_spin_unlock_irqrestore(&rp->lock, flags);
ri->rp = rp;
ri->task = current;
@@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
kretprobe_table_unlock(hash, &flags);
} else {
rp->nmissed++;
- spin_unlock_irqrestore(&rp->lock, flags);
+ raw_spin_unlock_irqrestore(&rp->lock, flags);
}
return 0;
}
@@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->maxactive = num_possible_cpus();
#endif
}
- spin_lock_init(&rp->lock);
+ raw_spin_lock_init(&rp->lock);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -1959,7 +1959,7 @@ static int __init init_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
- spin_lock_init(&(kretprobe_table_locks[i].lock));
+ raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
}
/*
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 376066e1041..4ac8ebfcab5 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -58,7 +58,7 @@
#include <linux/list.h>
#include <linux/stacktrace.h>
-static DEFINE_SPINLOCK(latency_lock);
+static DEFINE_RAW_SPINLOCK(latency_lock);
#define MAXLR 128
static struct latency_record latency_record[MAXLR];
@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct task_struct *p)
if (!latencytop_enabled)
return;
- spin_lock_irqsave(&latency_lock, flags);
+ raw_spin_lock_irqsave(&latency_lock, flags);
memset(&p->latency_record, 0, sizeof(p->latency_record));
p->latency_record_count = 0;
- spin_unlock_irqrestore(&latency_lock, flags);
+ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
static void clear_global_latency_tracing(void)
{
unsigned long flags;
- spin_lock_irqsave(&latency_lock, flags);
+ raw_spin_lock_irqsave(&latency_lock, flags);
memset(&latency_record, 0, sizeof(latency_record));
- spin_unlock_irqrestore(&latency_lock, flags);
+ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
static void __sched
@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
lat.max = usecs;
store_stacktrace(tsk, &lat);
- spin_lock_irqsave(&latency_lock, flags);
+ raw_spin_lock_irqsave(&latency_lock, flags);
account_global_scheduler_latency(tsk, &lat);
@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
out_unlock:
- spin_unlock_irqrestore(&latency_lock, flags);
+ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
static int lstats_show(struct seq_file *m, void *v)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 91d67ce3a8d..e69434b070d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -96,8 +96,13 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
- if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
+ if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
+ /*
+ * The lockdep graph lock isn't locked while we expect it to
+ * be, we're confused now, bye!
+ */
return DEBUG_LOCKS_WARN_ON(1);
+ }
current->lockdep_recursion--;
arch_spin_unlock(&lockdep_lock);
@@ -134,6 +139,9 @@ static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
if (!hlock->class_idx) {
+ /*
+ * Someone passed in garbage, we give up.
+ */
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
@@ -687,6 +695,10 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
*/
list_for_each_entry(class, hash_head, hash_entry) {
if (class->key == key) {
+ /*
+ * Huh! same key, different name? Did someone trample
+ * on some memory? We're most confused.
+ */
WARN_ON_ONCE(class->name != lock->name);
return class;
}
@@ -800,6 +812,10 @@ out_unlock_set:
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
lock->class_cache[subclass] = class;
+ /*
+ * Hash collision, did we smoke some? We found a class with a matching
+ * hash but the subclass -- which is hashed in -- didn't match.
+ */
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
return NULL;
@@ -926,7 +942,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
unsigned long nr;
nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries);
+ WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
lock->parent = parent;
lock->class->dep_gen_id = lockdep_dependency_gen_id;
}
@@ -936,7 +952,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
unsigned long nr;
nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries);
+ WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
return lock->class->dep_gen_id == lockdep_dependency_gen_id;
}
@@ -1129,10 +1145,11 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
if (debug_locks_silent)
return 0;
- printk("\n=======================================================\n");
- printk( "[ INFO: possible circular locking dependency detected ]\n");
+ printk("\n");
+ printk("======================================================\n");
+ printk("[ INFO: possible circular locking dependency detected ]\n");
print_kernel_version();
- printk( "-------------------------------------------------------\n");
+ printk("-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(check_src);
@@ -1196,6 +1213,9 @@ static noinline int print_bfs_bug(int ret)
if (!debug_locks_off_graph_unlock())
return 0;
+ /*
+ * Breadth-first-search failed, graph got corrupted?
+ */
WARN(1, "lockdep bfs error:%d\n", ret);
return 0;
@@ -1463,11 +1483,12 @@ print_bad_irq_dependency(struct task_struct *curr,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- printk("\n======================================================\n");
- printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
+ printk("\n");
+ printk("======================================================\n");
+ printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
irqclass, irqclass);
print_kernel_version();
- printk( "------------------------------------------------------\n");
+ printk("------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
@@ -1692,10 +1713,11 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- printk("\n=============================================\n");
- printk( "[ INFO: possible recursive locking detected ]\n");
+ printk("\n");
+ printk("=============================================\n");
+ printk("[ INFO: possible recursive locking detected ]\n");
print_kernel_version();
- printk( "---------------------------------------------\n");
+ printk("---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(next);
@@ -1944,6 +1966,11 @@ out_bug:
if (!debug_locks_off_graph_unlock())
return 0;
+ /*
+ * Clearly we all shouldn't be here, but since we made it we
+ * can reliable say we messed up our state. See the above two
+ * gotos for reasons why we could possibly end up here.
+ */
WARN_ON(1);
return 0;
@@ -1975,6 +2002,11 @@ static inline int lookup_chain_cache(struct task_struct *curr,
struct held_lock *hlock_curr, *hlock_next;
int i, j;
+ /*
+ * We might need to take the graph lock, ensure we've got IRQs
+ * disabled to make this an IRQ-safe lock.. for recursion reasons
+ * lockdep won't complain about its own locking errors.
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
/*
@@ -2126,6 +2158,10 @@ static void check_chain_key(struct task_struct *curr)
hlock = curr->held_locks + i;
if (chain_key != hlock->prev_chain_key) {
debug_locks_off();
+ /*
+ * We got mighty confused, our chain keys don't match
+ * with what we expect, someone trample on our task state?
+ */
WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
@@ -2133,6 +2169,9 @@ static void check_chain_key(struct task_struct *curr)
return;
}
id = hlock->class_idx - 1;
+ /*
+ * Whoops ran out of static storage again?
+ */
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return;
@@ -2144,6 +2183,10 @@ static void check_chain_key(struct task_struct *curr)
}
if (chain_key != curr->curr_chain_key) {
debug_locks_off();
+ /*
+ * More smoking hash instead of calculating it, damn see these
+ * numbers float.. I bet that a pink elephant stepped on my memory.
+ */
WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
@@ -2177,10 +2220,11 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- printk("\n=================================\n");
- printk( "[ INFO: inconsistent lock state ]\n");
+ printk("\n");
+ printk("=================================\n");
+ printk("[ INFO: inconsistent lock state ]\n");
print_kernel_version();
- printk( "---------------------------------\n");
+ printk("---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
usage_str[prev_bit], usage_str[new_bit]);
@@ -2241,10 +2285,11 @@ print_irq_inversion_bug(struct task_struct *curr,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- printk("\n=========================================================\n");
- printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
+ printk("\n");
+ printk("=========================================================\n");
+ printk("[ INFO: possible irq lock inversion dependency detected ]\n");
print_kernel_version();
- printk( "---------------------------------------------------------\n");
+ printk("---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(this);
@@ -2525,12 +2570,24 @@ void trace_hardirqs_on_caller(unsigned long ip)
return;
}
+ /*
+ * We're enabling irqs and according to our state above irqs weren't
+ * already enabled, yet we find the hardware thinks they are in fact
+ * enabled.. someone messed up their IRQ state tracing.
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
+ /*
+ * See the fine text that goes along with this variable definition.
+ */
if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
return;
+ /*
+ * Can't allow enabling interrupts while in an interrupt handler,
+ * that's general bad form and such. Recursion, limited stack etc..
+ */
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;
@@ -2558,6 +2615,10 @@ void trace_hardirqs_off_caller(unsigned long ip)
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
+ /*
+ * So we're supposed to get called after you mask local IRQs, but for
+ * some reason the hardware doesn't quite think you did a proper job.
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
@@ -2590,6 +2651,10 @@ void trace_softirqs_on(unsigned long ip)
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
+ /*
+ * We fancy IRQs being disabled here, see softirq.c, avoids
+ * funny state and nesting things.
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
@@ -2626,6 +2691,9 @@ void trace_softirqs_off(unsigned long ip)
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
+ /*
+ * We fancy IRQs being disabled here, see softirq.c
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
@@ -2637,6 +2705,9 @@ void trace_softirqs_off(unsigned long ip)
curr->softirq_disable_ip = ip;
curr->softirq_disable_event = ++curr->irq_events;
debug_atomic_inc(softirqs_off_events);
+ /*
+ * Whoops, we wanted softirqs off, so why aren't they?
+ */
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
debug_atomic_inc(redundant_softirqs_off);
@@ -2661,6 +2732,9 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
if (!(gfp_mask & __GFP_FS))
return;
+ /*
+ * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
+ */
if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
return;
@@ -2773,13 +2847,13 @@ static int separate_irq_context(struct task_struct *curr,
return 0;
}
-#else
+#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
static inline
int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
- WARN_ON(1);
+ WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
return 1;
}
@@ -2799,7 +2873,7 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
{
}
-#endif
+#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
/*
* Mark a lock with a usage bit, and validate the state transition:
@@ -2880,6 +2954,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
lock->cpu = raw_smp_processor_id();
#endif
+ /*
+ * Can't be having no nameless bastards around this place!
+ */
if (DEBUG_LOCKS_WARN_ON(!name)) {
lock->name = "NULL";
return;
@@ -2887,6 +2964,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
lock->name = name;
+ /*
+ * No key, no joy, we need to hash something.
+ */
if (DEBUG_LOCKS_WARN_ON(!key))
return;
/*
@@ -2894,6 +2974,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
*/
if (!static_obj(key)) {
printk("BUG: key %p not in .data!\n", key);
+ /*
+ * What it says above ^^^^^, I suggest you read it.
+ */
DEBUG_LOCKS_WARN_ON(1);
return;
}
@@ -2932,6 +3015,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (unlikely(!debug_locks))
return 0;
+ /*
+ * Lockdep should run with IRQs disabled, otherwise we could
+ * get an interrupt which would want to take locks, which would
+ * end up in lockdep and have you got a head-ache already?
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
@@ -2963,6 +3051,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
* dependency checks are done)
*/
depth = curr->lockdep_depth;
+ /*
+ * Ran out of static storage for our per-task lock stack again have we?
+ */
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
@@ -2981,6 +3072,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
hlock = curr->held_locks + depth;
+ /*
+ * Plain impossible, we just registered it and checked it weren't no
+ * NULL like.. I bet this mushroom I ate was good!
+ */
if (DEBUG_LOCKS_WARN_ON(!class))
return 0;
hlock->class_idx = class_idx;
@@ -3015,11 +3110,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
* the hash, not class->key.
*/
id = class - lock_classes;
+ /*
+ * Whoops, we did it again.. ran straight out of our static allocation.
+ */
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return 0;
chain_key = curr->curr_chain_key;
if (!depth) {
+ /*
+ * How can we have a chain hash when we ain't got no keys?!
+ */
if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
return 0;
chain_head = 1;
@@ -3065,9 +3166,10 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
if (debug_locks_silent)
return 0;
- printk("\n=====================================\n");
- printk( "[ BUG: bad unlock balance detected! ]\n");
- printk( "-------------------------------------\n");
+ printk("\n");
+ printk("=====================================\n");
+ printk("[ BUG: bad unlock balance detected! ]\n");
+ printk("-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
@@ -3091,6 +3193,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
{
if (unlikely(!debug_locks))
return 0;
+ /*
+ * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
+ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
@@ -3120,6 +3225,11 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
if (!class)
return 0;
+ /*
+ * References, but not a lock we're actually ref-counting?
+ * State got messed up, follow the sites that change ->references
+ * and try to make sense of it.
+ */
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
return 0;
@@ -3142,6 +3252,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
int i;
depth = curr->lockdep_depth;
+ /*
+ * This function is about (re)setting the class of a held lock,
+ * yet we're not actually holding any locks. Naughty user!
+ */
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
@@ -3177,6 +3291,10 @@ found_it:
return 0;
}
+ /*
+ * I took it apart and put it back together again, except now I have
+ * these 'spare' parts.. where shall I put them.
+ */
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
return 0;
return 1;
@@ -3201,6 +3319,10 @@ lock_release_non_nested(struct task_struct *curr,
* of held locks:
*/
depth = curr->lockdep_depth;
+ /*
+ * So we're all set to release this lock.. wait what lock? We don't
+ * own any locks, you've been drinking again?
+ */
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
@@ -3253,6 +3375,10 @@ found_it:
return 0;
}
+ /*
+ * We had N bottles of beer on the wall, we drank one, but now
+ * there's not N-1 bottles of beer left on the wall...
+ */
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
return 0;
return 1;
@@ -3283,6 +3409,9 @@ static int lock_release_nested(struct task_struct *curr,
return lock_release_non_nested(curr, lock, ip);
curr->lockdep_depth--;
+ /*
+ * No more locks, but somehow we've got hash left over, who left it?
+ */
if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
return 0;
@@ -3365,10 +3494,13 @@ static void check_flags(unsigned long flags)
* check if not in hardirq contexts:
*/
if (!hardirq_count()) {
- if (softirq_count())
+ if (softirq_count()) {
+ /* like the above, but with softirqs */
DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
- else
+ } else {
+ /* lick the above, does it taste good? */
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
}
if (!debug_locks)
@@ -3478,9 +3610,10 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
if (debug_locks_silent)
return 0;
- printk("\n=================================\n");
- printk( "[ BUG: bad contention detected! ]\n");
- printk( "---------------------------------\n");
+ printk("\n");
+ printk("=================================\n");
+ printk("[ BUG: bad contention detected! ]\n");
+ printk("---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
@@ -3506,6 +3639,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
int i, contention_point, contending_point;
depth = curr->lockdep_depth;
+ /*
+ * Whee, we contended on this lock, except it seems we're not
+ * actually trying to acquire anything much at all..
+ */
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
@@ -3555,6 +3692,10 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
int i, cpu;
depth = curr->lockdep_depth;
+ /*
+ * Yay, we acquired ownership of this lock we didn't try to
+ * acquire, how the heck did that happen?
+ */
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
@@ -3759,8 +3900,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
match |= class == lock->class_cache[j];
if (unlikely(match)) {
- if (debug_locks_off_graph_unlock())
+ if (debug_locks_off_graph_unlock()) {
+ /*
+ * We all just reset everything, how did it match?
+ */
WARN_ON(1);
+ }
goto out_restore;
}
}
@@ -3839,9 +3984,10 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
if (debug_locks_silent)
return;
- printk("\n=========================\n");
- printk( "[ BUG: held lock freed! ]\n");
- printk( "-------------------------\n");
+ printk("\n");
+ printk("=========================\n");
+ printk("[ BUG: held lock freed! ]\n");
+ printk("-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
@@ -3895,9 +4041,10 @@ static void print_held_locks_bug(struct task_struct *curr)
if (debug_locks_silent)
return;
- printk("\n=====================================\n");
- printk( "[ BUG: lock held at task exit time! ]\n");
- printk( "-------------------------------------\n");
+ printk("\n");
+ printk("=====================================\n");
+ printk("[ BUG: lock held at task exit time! ]\n");
+ printk("-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
curr->comm, task_pid_nr(curr));
lockdep_print_held_locks(curr);
@@ -3991,16 +4138,17 @@ void lockdep_sys_exit(void)
if (unlikely(curr->lockdep_depth)) {
if (!debug_locks_off())
return;
- printk("\n================================================\n");
- printk( "[ BUG: lock held when returning to user space! ]\n");
- printk( "------------------------------------------------\n");
+ printk("\n");
+ printk("================================================\n");
+ printk("[ BUG: lock held when returning to user space! ]\n");
+ printk("------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
}
}
-void lockdep_rcu_dereference(const char *file, const int line)
+void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
struct task_struct *curr = current;
@@ -4009,15 +4157,15 @@ void lockdep_rcu_dereference(const char *file, const int line)
return;
#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
/* Note: the following can be executed concurrently, so be careful. */
- printk("\n===================================================\n");
- printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
- printk( "---------------------------------------------------\n");
- printk("%s:%d invoked rcu_dereference_check() without protection!\n",
- file, line);
+ printk("\n");
+ printk("===============================\n");
+ printk("[ INFO: suspicious RCU usage. ]\n");
+ printk("-------------------------------\n");
+ printk("%s:%d %s!\n", file, line, s);
printk("\nother info that might help us debug this:\n\n");
printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
}
-EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
+EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --git a/kernel/params.c b/kernel/params.c
index 22df3e0d142..821788947e4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -67,20 +67,27 @@ static void maybe_kfree_parameter(void *param)
}
}
-static inline char dash2underscore(char c)
+static char dash2underscore(char c)
{
if (c == '-')
return '_';
return c;
}
-static inline int parameq(const char *input, const char *paramname)
+bool parameqn(const char *a, const char *b, size_t n)
{
- unsigned int i;
- for (i = 0; dash2underscore(input[i]) == paramname[i]; i++)
- if (input[i] == '\0')
- return 1;
- return 0;
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ if (dash2underscore(a[i]) != dash2underscore(b[i]))
+ return false;
+ }
+ return true;
+}
+
+bool parameq(const char *a, const char *b)
+{
+ return parameqn(a, b, strlen(a)+1);
}
static int parse_one(char *param,
diff --git a/kernel/pid.c b/kernel/pid.c
index e432057f3b2..8cafe7e72ad 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -418,7 +418,9 @@ EXPORT_SYMBOL(pid_task);
*/
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{
- rcu_lockdep_assert(rcu_read_lock_held());
+ rcu_lockdep_assert(rcu_read_lock_held(),
+ "find_task_by_pid_ns() needs rcu_read_lock()"
+ " protection");
return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
deleted file mode 100644
index 37f05d0f079..00000000000
--- a/kernel/pm_qos_params.c
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * This module exposes the interface to kernel space for specifying
- * QoS dependencies. It provides infrastructure for registration of:
- *
- * Dependents on a QoS value : register requests
- * Watchers of QoS value : get notified when target QoS value changes
- *
- * This QoS design is best effort based. Dependents register their QoS needs.
- * Watchers register to keep track of the current QoS needs of the system.
- *
- * There are 3 basic classes of QoS parameter: latency, timeout, throughput
- * each have defined units:
- * latency: usec
- * timeout: usec <-- currently not used.
- * throughput: kbs (kilo byte / sec)
- *
- * There are lists of pm_qos_objects each one wrapping requests, notifiers
- *
- * User mode requests on a QOS parameter register themselves to the
- * subsystem by opening the device node /dev/... and writing there request to
- * the node. As long as the process holds a file handle open to the node the
- * client continues to be accounted for. Upon file release the usermode
- * request is removed and a new qos target is computed. This way when the
- * request that the application has is cleaned up when closes the file
- * pointer or exits the pm_qos_object will get an opportunity to clean up.
- *
- * Mark Gross <mgross@linux.intel.com>
- */
-
-/*#define DEBUG*/
-
-#include <linux/pm_qos_params.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/miscdevice.h>
-#include <linux/string.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/uaccess.h>
-
-/*
- * locking rule: all changes to requests or notifiers lists
- * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
- * held, taken with _irqsave. One lock to rule them all
- */
-enum pm_qos_type {
- PM_QOS_MAX, /* return the largest value */
- PM_QOS_MIN /* return the smallest value */
-};
-
-/*
- * Note: The lockless read path depends on the CPU accessing
- * target_value atomically. Atomic access is only guaranteed on all CPU
- * types linux supports for 32 bit quantites
- */
-struct pm_qos_object {
- struct plist_head requests;
- struct blocking_notifier_head *notifiers;
- struct miscdevice pm_qos_power_miscdev;
- char *name;
- s32 target_value; /* Do not change to 64 bit */
- s32 default_value;
- enum pm_qos_type type;
-};
-
-static DEFINE_SPINLOCK(pm_qos_lock);
-
-static struct pm_qos_object null_pm_qos;
-static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
-static struct pm_qos_object cpu_dma_pm_qos = {
- .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
- .notifiers = &cpu_dma_lat_notifier,
- .name = "cpu_dma_latency",
- .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
- .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
- .type = PM_QOS_MIN,
-};
-
-static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
-static struct pm_qos_object network_lat_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
- .notifiers = &network_lat_notifier,
- .name = "network_latency",
- .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
- .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
- .type = PM_QOS_MIN
-};
-
-
-static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
-static struct pm_qos_object network_throughput_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
- .notifiers = &network_throughput_notifier,
- .name = "network_throughput",
- .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
- .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
- .type = PM_QOS_MAX,
-};
-
-
-static struct pm_qos_object *pm_qos_array[] = {
- &null_pm_qos,
- &cpu_dma_pm_qos,
- &network_lat_pm_qos,
- &network_throughput_pm_qos
-};
-
-static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos);
-static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
- size_t count, loff_t *f_pos);
-static int pm_qos_power_open(struct inode *inode, struct file *filp);
-static int pm_qos_power_release(struct inode *inode, struct file *filp);
-
-static const struct file_operations pm_qos_power_fops = {
- .write = pm_qos_power_write,
- .read = pm_qos_power_read,
- .open = pm_qos_power_open,
- .release = pm_qos_power_release,
- .llseek = noop_llseek,
-};
-
-/* unlocked internal variant */
-static inline int pm_qos_get_value(struct pm_qos_object *o)
-{
- if (plist_head_empty(&o->requests))
- return o->default_value;
-
- switch (o->type) {
- case PM_QOS_MIN:
- return plist_first(&o->requests)->prio;
-
- case PM_QOS_MAX:
- return plist_last(&o->requests)->prio;
-
- default:
- /* runtime check for not using enum */
- BUG();
- }
-}
-
-static inline s32 pm_qos_read_value(struct pm_qos_object *o)
-{
- return o->target_value;
-}
-
-static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
-{
- o->target_value = value;
-}
-
-static void update_target(struct pm_qos_object *o, struct plist_node *node,
- int del, int value)
-{
- unsigned long flags;
- int prev_value, curr_value;
-
- spin_lock_irqsave(&pm_qos_lock, flags);
- prev_value = pm_qos_get_value(o);
- /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */
- if (value != PM_QOS_DEFAULT_VALUE) {
- /*
- * to change the list, we atomically remove, reinit
- * with new value and add, then see if the extremal
- * changed
- */
- plist_del(node, &o->requests);
- plist_node_init(node, value);
- plist_add(node, &o->requests);
- } else if (del) {
- plist_del(node, &o->requests);
- } else {
- plist_add(node, &o->requests);
- }
- curr_value = pm_qos_get_value(o);
- pm_qos_set_value(o, curr_value);
- spin_unlock_irqrestore(&pm_qos_lock, flags);
-
- if (prev_value != curr_value)
- blocking_notifier_call_chain(o->notifiers,
- (unsigned long)curr_value,
- NULL);
-}
-
-static int register_pm_qos_misc(struct pm_qos_object *qos)
-{
- qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
- qos->pm_qos_power_miscdev.name = qos->name;
- qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
-
- return misc_register(&qos->pm_qos_power_miscdev);
-}
-
-static int find_pm_qos_object_by_minor(int minor)
-{
- int pm_qos_class;
-
- for (pm_qos_class = 0;
- pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
- if (minor ==
- pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
- return pm_qos_class;
- }
- return -1;
-}
-
-/**
- * pm_qos_request - returns current system wide qos expectation
- * @pm_qos_class: identification of which qos value is requested
- *
- * This function returns the current target value.
- */
-int pm_qos_request(int pm_qos_class)
-{
- return pm_qos_read_value(pm_qos_array[pm_qos_class]);
-}
-EXPORT_SYMBOL_GPL(pm_qos_request);
-
-int pm_qos_request_active(struct pm_qos_request_list *req)
-{
- return req->pm_qos_class != 0;
-}
-EXPORT_SYMBOL_GPL(pm_qos_request_active);
-
-/**
- * pm_qos_add_request - inserts new qos request into the list
- * @dep: pointer to a preallocated handle
- * @pm_qos_class: identifies which list of qos request to use
- * @value: defines the qos request
- *
- * This function inserts a new entry in the pm_qos_class list of requested qos
- * performance characteristics. It recomputes the aggregate QoS expectations
- * for the pm_qos_class of parameters and initializes the pm_qos_request_list
- * handle. Caller needs to save this handle for later use in updates and
- * removal.
- */
-
-void pm_qos_add_request(struct pm_qos_request_list *dep,
- int pm_qos_class, s32 value)
-{
- struct pm_qos_object *o = pm_qos_array[pm_qos_class];
- int new_value;
-
- if (pm_qos_request_active(dep)) {
- WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
- return;
- }
- if (value == PM_QOS_DEFAULT_VALUE)
- new_value = o->default_value;
- else
- new_value = value;
- plist_node_init(&dep->list, new_value);
- dep->pm_qos_class = pm_qos_class;
- update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE);
-}
-EXPORT_SYMBOL_GPL(pm_qos_add_request);
-
-/**
- * pm_qos_update_request - modifies an existing qos request
- * @pm_qos_req : handle to list element holding a pm_qos request to use
- * @value: defines the qos request
- *
- * Updates an existing qos request for the pm_qos_class of parameters along
- * with updating the target pm_qos_class value.
- *
- * Attempts are made to make this code callable on hot code paths.
- */
-void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
- s32 new_value)
-{
- s32 temp;
- struct pm_qos_object *o;
-
- if (!pm_qos_req) /*guard against callers passing in null */
- return;
-
- if (!pm_qos_request_active(pm_qos_req)) {
- WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
- return;
- }
-
- o = pm_qos_array[pm_qos_req->pm_qos_class];
-
- if (new_value == PM_QOS_DEFAULT_VALUE)
- temp = o->default_value;
- else
- temp = new_value;
-
- if (temp != pm_qos_req->list.prio)
- update_target(o, &pm_qos_req->list, 0, temp);
-}
-EXPORT_SYMBOL_GPL(pm_qos_update_request);
-
-/**
- * pm_qos_remove_request - modifies an existing qos request
- * @pm_qos_req: handle to request list element
- *
- * Will remove pm qos request from the list of requests and
- * recompute the current target value for the pm_qos_class. Call this
- * on slow code paths.
- */
-void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
-{
- struct pm_qos_object *o;
-
- if (pm_qos_req == NULL)
- return;
- /* silent return to keep pcm code cleaner */
-
- if (!pm_qos_request_active(pm_qos_req)) {
- WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
- return;
- }
-
- o = pm_qos_array[pm_qos_req->pm_qos_class];
- update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE);
- memset(pm_qos_req, 0, sizeof(*pm_qos_req));
-}
-EXPORT_SYMBOL_GPL(pm_qos_remove_request);
-
-/**
- * pm_qos_add_notifier - sets notification entry for changes to target value
- * @pm_qos_class: identifies which qos target changes should be notified.
- * @notifier: notifier block managed by caller.
- *
- * will register the notifier into a notification chain that gets called
- * upon changes to the pm_qos_class target value.
- */
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
-{
- int retval;
-
- retval = blocking_notifier_chain_register(
- pm_qos_array[pm_qos_class]->notifiers, notifier);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
-
-/**
- * pm_qos_remove_notifier - deletes notification entry from chain.
- * @pm_qos_class: identifies which qos target changes are notified.
- * @notifier: notifier block to be removed.
- *
- * will remove the notifier from the notification chain that gets called
- * upon changes to the pm_qos_class target value.
- */
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
-{
- int retval;
-
- retval = blocking_notifier_chain_unregister(
- pm_qos_array[pm_qos_class]->notifiers, notifier);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
-
-static int pm_qos_power_open(struct inode *inode, struct file *filp)
-{
- long pm_qos_class;
-
- pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
- if (pm_qos_class >= 0) {
- struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
- filp->private_data = req;
-
- if (filp->private_data)
- return 0;
- }
- return -EPERM;
-}
-
-static int pm_qos_power_release(struct inode *inode, struct file *filp)
-{
- struct pm_qos_request_list *req;
-
- req = filp->private_data;
- pm_qos_remove_request(req);
- kfree(req);
-
- return 0;
-}
-
-
-static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
- size_t count, loff_t *f_pos)
-{
- s32 value;
- unsigned long flags;
- struct pm_qos_object *o;
- struct pm_qos_request_list *pm_qos_req = filp->private_data;
-
- if (!pm_qos_req)
- return -EINVAL;
- if (!pm_qos_request_active(pm_qos_req))
- return -EINVAL;
-
- o = pm_qos_array[pm_qos_req->pm_qos_class];
- spin_lock_irqsave(&pm_qos_lock, flags);
- value = pm_qos_get_value(o);
- spin_unlock_irqrestore(&pm_qos_lock, flags);
-
- return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
-}
-
-static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- s32 value;
- struct pm_qos_request_list *pm_qos_req;
-
- if (count == sizeof(s32)) {
- if (copy_from_user(&value, buf, sizeof(s32)))
- return -EFAULT;
- } else if (count <= 11) { /* ASCII perhaps? */
- char ascii_value[11];
- unsigned long int ulval;
- int ret;
-
- if (copy_from_user(ascii_value, buf, count))
- return -EFAULT;
-
- if (count > 10) {
- if (ascii_value[10] == '\n')
- ascii_value[10] = '\0';
- else
- return -EINVAL;
- } else {
- ascii_value[count] = '\0';
- }
- ret = strict_strtoul(ascii_value, 16, &ulval);
- if (ret) {
- pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
- return -EINVAL;
- }
- value = (s32)lower_32_bits(ulval);
- } else {
- return -EINVAL;
- }
-
- pm_qos_req = filp->private_data;
- pm_qos_update_request(pm_qos_req, value);
-
- return count;
-}
-
-
-static int __init pm_qos_power_init(void)
-{
- int ret = 0;
-
- ret = register_pm_qos_misc(&cpu_dma_pm_qos);
- if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
- return ret;
- }
- ret = register_pm_qos_misc(&network_lat_pm_qos);
- if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
- return ret;
- }
- ret = register_pm_qos_misc(&network_throughput_pm_qos);
- if (ret < 0)
- printk(KERN_ERR
- "pm_qos_param: network_throughput setup failed\n");
-
- return ret;
-}
-
-late_initcall(pm_qos_power_init);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c8008dd58ef..e7cb76dc18f 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
struct task_cputime sum;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
if (!cputimer->running) {
- cputimer->running = 1;
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
@@ -284,10 +282,13 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
* it.
*/
thread_group_cputime(tsk, &sum);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 1;
update_gt_cputime(&cputimer->cputime, &sum);
- }
+ } else
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
*times = cputimer->cputime;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
@@ -998,9 +999,9 @@ static void stop_process_timers(struct signal_struct *sig)
struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
static u32 onecputick;
@@ -1290,9 +1291,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (sig->cputimer.running) {
struct task_cputime group_sample;
- spin_lock(&sig->cputimer.lock);
+ raw_spin_lock(&sig->cputimer.lock);
group_sample = sig->cputimer.cputime;
- spin_unlock(&sig->cputimer.lock);
+ raw_spin_unlock(&sig->cputimer.lock);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 3744c594b19..cedd9982306 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -27,6 +27,7 @@ config HIBERNATION
select HIBERNATE_CALLBACKS
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select CRC32
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
@@ -65,6 +66,9 @@ config HIBERNATION
For more information take a look at <file:Documentation/power/swsusp.txt>.
+config ARCH_SAVE_PAGE_KEYS
+ bool
+
config PM_STD_PARTITION
string "Default resume partition"
depends on HIBERNATION
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c5ebc6a9064..07e0e28ffba 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -1,8 +1,8 @@
ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
-obj-$(CONFIG_PM) += main.o
-obj-$(CONFIG_PM_SLEEP) += console.o
+obj-$(CONFIG_PM) += main.o qos.o
+obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 218e5af9015..b1dc456474b 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -1,5 +1,5 @@
/*
- * drivers/power/process.c - Functions for saving/restoring console.
+ * Functions for saving/restoring console.
*
* Originally from swsusp.
*/
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include "power.h"
-#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
static int orig_fgconsole, orig_kmsg;
@@ -32,4 +31,3 @@ void pm_restore_console(void)
vt_kmsg_redirect(orig_kmsg);
}
}
-#endif
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8f7b1db1ece..1c53f7fad5f 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -14,6 +14,7 @@
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
+#include <linux/async.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/fs.h>
@@ -29,12 +30,14 @@
#include "power.h"
-static int nocompress = 0;
-static int noresume = 0;
+static int nocompress;
+static int noresume;
+static int resume_wait;
+static int resume_delay;
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
-int in_suspend __nosavedata = 0;
+int in_suspend __nosavedata;
enum {
HIBERNATION_INVALID,
@@ -334,13 +337,17 @@ int hibernation_snapshot(int platform_mode)
if (error)
goto Close;
- error = dpm_prepare(PMSG_FREEZE);
- if (error)
- goto Complete_devices;
-
/* Preallocate image memory before shutting down devices. */
error = hibernate_preallocate_memory();
if (error)
+ goto Close;
+
+ error = freeze_kernel_threads();
+ if (error)
+ goto Close;
+
+ error = dpm_prepare(PMSG_FREEZE);
+ if (error)
goto Complete_devices;
suspend_console();
@@ -463,7 +470,7 @@ static int resume_target_kernel(bool platform_mode)
* @platform_mode: If set, use platform driver to prepare for the transition.
*
* This routine must be called with pm_mutex held. If it is successful, control
- * reappears in the restored target kernel in hibernation_snaphot().
+ * reappears in the restored target kernel in hibernation_snapshot().
*/
int hibernation_restore(int platform_mode)
{
@@ -650,6 +657,9 @@ int hibernate(void)
flags |= SF_PLATFORM_MODE;
if (nocompress)
flags |= SF_NOCOMPRESS_MODE;
+ else
+ flags |= SF_CRC32_MODE;
+
pr_debug("PM: writing image.\n");
error = swsusp_write(flags);
swsusp_free();
@@ -724,6 +734,12 @@ static int software_resume(void)
pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
+ if (resume_delay) {
+ printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
+ resume_delay);
+ ssleep(resume_delay);
+ }
+
/* Check if the device is there */
swsusp_resume_device = name_to_dev_t(resume_file);
if (!swsusp_resume_device) {
@@ -732,6 +748,13 @@ static int software_resume(void)
* to wait for this to finish.
*/
wait_for_device_probe();
+
+ if (resume_wait) {
+ while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
+ msleep(10);
+ async_synchronize_full();
+ }
+
/*
* We can't depend on SCSI devices being available after loading
* one of their modules until scsi_complete_async_scans() is
@@ -1060,7 +1083,21 @@ static int __init noresume_setup(char *str)
return 1;
}
+static int __init resumewait_setup(char *str)
+{
+ resume_wait = 1;
+ return 1;
+}
+
+static int __init resumedelay_setup(char *str)
+{
+ resume_delay = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);
__setup("hibernate=", hibernate_setup);
+__setup("resumewait", resumewait_setup);
+__setup("resumedelay=", resumedelay_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6c601f87196..a52e88425a3 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -12,6 +12,8 @@
#include <linux/string.h>
#include <linux/resume-trace.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include "power.h"
@@ -131,6 +133,101 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
+#ifdef CONFIG_DEBUG_FS
+static char *suspend_step_name(enum suspend_stat_step step)
+{
+ switch (step) {
+ case SUSPEND_FREEZE:
+ return "freeze";
+ case SUSPEND_PREPARE:
+ return "prepare";
+ case SUSPEND_SUSPEND:
+ return "suspend";
+ case SUSPEND_SUSPEND_NOIRQ:
+ return "suspend_noirq";
+ case SUSPEND_RESUME_NOIRQ:
+ return "resume_noirq";
+ case SUSPEND_RESUME:
+ return "resume";
+ default:
+ return "";
+ }
+}
+
+static int suspend_stats_show(struct seq_file *s, void *unused)
+{
+ int i, index, last_dev, last_errno, last_step;
+
+ last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+ last_dev %= REC_FAILED_NUM;
+ last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
+ last_errno %= REC_FAILED_NUM;
+ last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
+ last_step %= REC_FAILED_NUM;
+ seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
+ "%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
+ "success", suspend_stats.success,
+ "fail", suspend_stats.fail,
+ "failed_freeze", suspend_stats.failed_freeze,
+ "failed_prepare", suspend_stats.failed_prepare,
+ "failed_suspend", suspend_stats.failed_suspend,
+ "failed_suspend_noirq",
+ suspend_stats.failed_suspend_noirq,
+ "failed_resume", suspend_stats.failed_resume,
+ "failed_resume_noirq",
+ suspend_stats.failed_resume_noirq);
+ seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
+ suspend_stats.failed_devs[last_dev]);
+ for (i = 1; i < REC_FAILED_NUM; i++) {
+ index = last_dev + REC_FAILED_NUM - i;
+ index %= REC_FAILED_NUM;
+ seq_printf(s, "\t\t\t%-s\n",
+ suspend_stats.failed_devs[index]);
+ }
+ seq_printf(s, " last_failed_errno:\t%-d\n",
+ suspend_stats.errno[last_errno]);
+ for (i = 1; i < REC_FAILED_NUM; i++) {
+ index = last_errno + REC_FAILED_NUM - i;
+ index %= REC_FAILED_NUM;
+ seq_printf(s, "\t\t\t%-d\n",
+ suspend_stats.errno[index]);
+ }
+ seq_printf(s, " last_failed_step:\t%-s\n",
+ suspend_step_name(
+ suspend_stats.failed_steps[last_step]));
+ for (i = 1; i < REC_FAILED_NUM; i++) {
+ index = last_step + REC_FAILED_NUM - i;
+ index %= REC_FAILED_NUM;
+ seq_printf(s, "\t\t\t%-s\n",
+ suspend_step_name(
+ suspend_stats.failed_steps[index]));
+ }
+
+ return 0;
+}
+
+static int suspend_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_stats_show, NULL);
+}
+
+static const struct file_operations suspend_stats_operations = {
+ .open = suspend_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init pm_debugfs_init(void)
+{
+ debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
+ NULL, NULL, &suspend_stats_operations);
+ return 0;
+}
+
+late_initcall(pm_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+
#endif /* CONFIG_PM_SLEEP */
struct kobject *power_kobj;
@@ -194,6 +291,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
}
if (state < PM_SUSPEND_MAX && *s)
error = enter_state(state);
+ if (error) {
+ suspend_stats.fail++;
+ dpm_save_failed_errno(error);
+ } else
+ suspend_stats.success++;
#endif
Exit:
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 9a00a0a2628..23a2db1ec44 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -146,6 +146,7 @@ extern int swsusp_swap_in_use(void);
*/
#define SF_PLATFORM_MODE 1
#define SF_NOCOMPRESS_MODE 2
+#define SF_CRC32_MODE 4
/* kernel/power/hibernate.c */
extern int swsusp_check(void);
@@ -228,7 +229,8 @@ extern int pm_test_level;
#ifdef CONFIG_SUSPEND_FREEZER
static inline int suspend_freeze_processes(void)
{
- return freeze_processes();
+ int error = freeze_processes();
+ return error ? : freeze_kernel_threads();
}
static inline void suspend_thaw_processes(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0cf3a27a6c9..addbbe5531b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -135,7 +135,7 @@ static int try_to_freeze_tasks(bool sig_only)
}
/**
- * freeze_processes - tell processes to enter the refrigerator
+ * freeze_processes - Signal user space processes to enter the refrigerator.
*/
int freeze_processes(void)
{
@@ -143,20 +143,30 @@ int freeze_processes(void)
printk("Freezing user space processes ... ");
error = try_to_freeze_tasks(true);
- if (error)
- goto Exit;
- printk("done.\n");
+ if (!error) {
+ printk("done.");
+ oom_killer_disable();
+ }
+ printk("\n");
+ BUG_ON(in_atomic());
+
+ return error;
+}
+
+/**
+ * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
+ */
+int freeze_kernel_threads(void)
+{
+ int error;
printk("Freezing remaining freezable tasks ... ");
error = try_to_freeze_tasks(false);
- if (error)
- goto Exit;
- printk("done.");
+ if (!error)
+ printk("done.");
- oom_killer_disable();
- Exit:
- BUG_ON(in_atomic());
printk("\n");
+ BUG_ON(in_atomic());
return error;
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
new file mode 100644
index 00000000000..1c1797dd1d1
--- /dev/null
+++ b/kernel/power/qos.c
@@ -0,0 +1,490 @@
+/*
+ * This module exposes the interface to kernel space for specifying
+ * QoS dependencies. It provides infrastructure for registration of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ *
+ * There are 3 basic classes of QoS parameter: latency, timeout, throughput
+ * each have defined units:
+ * latency: usec
+ * timeout: usec <-- currently not used.
+ * throughput: kbs (kilo byte / sec)
+ *
+ * There are lists of pm_qos_objects each one wrapping requests, notifiers
+ *
+ * User mode requests on a QOS parameter register themselves to the
+ * subsystem by opening the device node /dev/... and writing there request to
+ * the node. As long as the process holds a file handle open to the node the
+ * client continues to be accounted for. Upon file release the usermode
+ * request is removed and a new qos target is computed. This way when the
+ * request that the application has is cleaned up when closes the file
+ * pointer or exits the pm_qos_object will get an opportunity to clean up.
+ *
+ * Mark Gross <mgross@linux.intel.com>
+ */
+
+/*#define DEBUG*/
+
+#include <linux/pm_qos.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <linux/uaccess.h>
+
+/*
+ * locking rule: all changes to constraints or notifiers lists
+ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct pm_qos_object {
+ struct pm_qos_constraints *constraints;
+ struct miscdevice pm_qos_power_miscdev;
+ char *name;
+};
+
+static DEFINE_SPINLOCK(pm_qos_lock);
+
+static struct pm_qos_object null_pm_qos;
+
+static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static struct pm_qos_constraints cpu_dma_constraints = {
+ .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
+ .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .type = PM_QOS_MIN,
+ .notifiers = &cpu_dma_lat_notifier,
+};
+static struct pm_qos_object cpu_dma_pm_qos = {
+ .constraints = &cpu_dma_constraints,
+};
+
+static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static struct pm_qos_constraints network_lat_constraints = {
+ .list = PLIST_HEAD_INIT(network_lat_constraints.list),
+ .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .type = PM_QOS_MIN,
+ .notifiers = &network_lat_notifier,
+};
+static struct pm_qos_object network_lat_pm_qos = {
+ .constraints = &network_lat_constraints,
+ .name = "network_latency",
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static struct pm_qos_constraints network_tput_constraints = {
+ .list = PLIST_HEAD_INIT(network_tput_constraints.list),
+ .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .type = PM_QOS_MAX,
+ .notifiers = &network_throughput_notifier,
+};
+static struct pm_qos_object network_throughput_pm_qos = {
+ .constraints = &network_tput_constraints,
+ .name = "network_throughput",
+};
+
+
+static struct pm_qos_object *pm_qos_array[] = {
+ &null_pm_qos,
+ &cpu_dma_pm_qos,
+ &network_lat_pm_qos,
+ &network_throughput_pm_qos
+};
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos);
+static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+static int pm_qos_power_open(struct inode *inode, struct file *filp);
+static int pm_qos_power_release(struct inode *inode, struct file *filp);
+
+static const struct file_operations pm_qos_power_fops = {
+ .write = pm_qos_power_write,
+ .read = pm_qos_power_read,
+ .open = pm_qos_power_open,
+ .release = pm_qos_power_release,
+ .llseek = noop_llseek,
+};
+
+/* unlocked internal variant */
+static inline int pm_qos_get_value(struct pm_qos_constraints *c)
+{
+ if (plist_head_empty(&c->list))
+ return c->default_value;
+
+ switch (c->type) {
+ case PM_QOS_MIN:
+ return plist_first(&c->list)->prio;
+
+ case PM_QOS_MAX:
+ return plist_last(&c->list)->prio;
+
+ default:
+ /* runtime check for not using enum */
+ BUG();
+ }
+}
+
+s32 pm_qos_read_value(struct pm_qos_constraints *c)
+{
+ return c->target_value;
+}
+
+static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
+{
+ c->target_value = value;
+}
+
+/**
+ * pm_qos_update_target - manages the constraints list and calls the notifiers
+ * if needed
+ * @c: constraints data struct
+ * @node: request to add to the list, to update or to remove
+ * @action: action to take on the constraints list
+ * @value: value of the request to add or update
+ *
+ * This function returns 1 if the aggregated constraint value has changed, 0
+ * otherwise.
+ */
+int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+ enum pm_qos_req_action action, int value)
+{
+ unsigned long flags;
+ int prev_value, curr_value, new_value;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ prev_value = pm_qos_get_value(c);
+ if (value == PM_QOS_DEFAULT_VALUE)
+ new_value = c->default_value;
+ else
+ new_value = value;
+
+ switch (action) {
+ case PM_QOS_REMOVE_REQ:
+ plist_del(node, &c->list);
+ break;
+ case PM_QOS_UPDATE_REQ:
+ /*
+ * to change the list, we atomically remove, reinit
+ * with new value and add, then see if the extremal
+ * changed
+ */
+ plist_del(node, &c->list);
+ case PM_QOS_ADD_REQ:
+ plist_node_init(node, new_value);
+ plist_add(node, &c->list);
+ break;
+ default:
+ /* no action */
+ ;
+ }
+
+ curr_value = pm_qos_get_value(c);
+ pm_qos_set_value(c, curr_value);
+
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ if (prev_value != curr_value) {
+ blocking_notifier_call_chain(c->notifiers,
+ (unsigned long)curr_value,
+ NULL);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * pm_qos_request - returns current system wide qos expectation
+ * @pm_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value.
+ */
+int pm_qos_request(int pm_qos_class)
+{
+ return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
+}
+EXPORT_SYMBOL_GPL(pm_qos_request);
+
+int pm_qos_request_active(struct pm_qos_request *req)
+{
+ return req->pm_qos_class != 0;
+}
+EXPORT_SYMBOL_GPL(pm_qos_request_active);
+
+/**
+ * pm_qos_add_request - inserts new qos request into the list
+ * @req: pointer to a preallocated handle
+ * @pm_qos_class: identifies which list of qos request to use
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the pm_qos_class list of requested qos
+ * performance characteristics. It recomputes the aggregate QoS expectations
+ * for the pm_qos_class of parameters and initializes the pm_qos_request
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
+ */
+
+void pm_qos_add_request(struct pm_qos_request *req,
+ int pm_qos_class, s32 value)
+{
+ if (!req) /*guard against callers passing in null */
+ return;
+
+ if (pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
+ return;
+ }
+ req->pm_qos_class = pm_qos_class;
+ pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
+ &req->node, PM_QOS_ADD_REQ, value);
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_request);
+
+/**
+ * pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a pm_qos request to use
+ * @value: defines the qos request
+ *
+ * Updates an existing qos request for the pm_qos_class of parameters along
+ * with updating the target pm_qos_class value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ */
+void pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+{
+ if (!req) /*guard against callers passing in null */
+ return;
+
+ if (!pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
+ return;
+ }
+
+ if (new_value != req->node.prio)
+ pm_qos_update_target(
+ pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+EXPORT_SYMBOL_GPL(pm_qos_update_request);
+
+/**
+ * pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value for the pm_qos_class. Call this
+ * on slow code paths.
+ */
+void pm_qos_remove_request(struct pm_qos_request *req)
+{
+ if (!req) /*guard against callers passing in null */
+ return;
+ /* silent return to keep pcm code cleaner */
+
+ if (!pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+ return;
+ }
+
+ pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_REMOVE_REQ,
+ PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_request);
+
+/**
+ * pm_qos_add_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * upon changes to the pm_qos_class target value.
+ */
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->constraints->notifiers,
+ notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
+
+/**
+ * pm_qos_remove_notifier - deletes notification entry from chain.
+ * @pm_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * upon changes to the pm_qos_class target value.
+ */
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->constraints->notifiers,
+ notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
+
+/* User space interface to PM QoS classes via misc devices */
+static int register_pm_qos_misc(struct pm_qos_object *qos)
+{
+ qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->pm_qos_power_miscdev.name = qos->name;
+ qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+
+ return misc_register(&qos->pm_qos_power_miscdev);
+}
+
+static int find_pm_qos_object_by_minor(int minor)
+{
+ int pm_qos_class;
+
+ for (pm_qos_class = 0;
+ pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
+ if (minor ==
+ pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
+ return pm_qos_class;
+ }
+ return -1;
+}
+
+static int pm_qos_power_open(struct inode *inode, struct file *filp)
+{
+ long pm_qos_class;
+
+ pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
+ if (pm_qos_class >= 0) {
+ struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
+ filp->private_data = req;
+
+ if (filp->private_data)
+ return 0;
+ }
+ return -EPERM;
+}
+
+static int pm_qos_power_release(struct inode *inode, struct file *filp)
+{
+ struct pm_qos_request *req;
+
+ req = filp->private_data;
+ pm_qos_remove_request(req);
+ kfree(req);
+
+ return 0;
+}
+
+
+static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ unsigned long flags;
+ struct pm_qos_request *req = filp->private_data;
+
+ if (!req)
+ return -EINVAL;
+ if (!pm_qos_request_active(req))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
+}
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ struct pm_qos_request *req;
+
+ if (count == sizeof(s32)) {
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ } else if (count <= 11) { /* ASCII perhaps? */
+ char ascii_value[11];
+ unsigned long int ulval;
+ int ret;
+
+ if (copy_from_user(ascii_value, buf, count))
+ return -EFAULT;
+
+ if (count > 10) {
+ if (ascii_value[10] == '\n')
+ ascii_value[10] = '\0';
+ else
+ return -EINVAL;
+ } else {
+ ascii_value[count] = '\0';
+ }
+ ret = strict_strtoul(ascii_value, 16, &ulval);
+ if (ret) {
+ pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
+ return -EINVAL;
+ }
+ value = (s32)lower_32_bits(ulval);
+ } else {
+ return -EINVAL;
+ }
+
+ req = filp->private_data;
+ pm_qos_update_request(req, value);
+
+ return count;
+}
+
+
+static int __init pm_qos_power_init(void)
+{
+ int ret = 0;
+
+ ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_lat_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_throughput_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: network_throughput setup failed\n");
+
+ return ret;
+}
+
+late_initcall(pm_qos_power_init);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 06efa54f93d..cbe2c144139 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1339,6 +1339,9 @@ int hibernate_preallocate_memory(void)
count += highmem;
count -= totalreserve_pages;
+ /* Add number of pages required for page keys (s390 only). */
+ size += page_key_additional_pages(saveable);
+
/* Compute the maximum number of saveable pages to leave in memory. */
max_size = (count - (size + PAGES_FOR_IO)) / 2
- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
@@ -1662,6 +1665,8 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
+ /* Save page key for data page (s390 only). */
+ page_key_read(buf + j);
}
}
@@ -1821,6 +1826,9 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
+ /* Extract and buffer page key for data page (s390 only). */
+ page_key_memorize(buf + j);
+
if (memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]);
else
@@ -2223,6 +2231,11 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error)
return error;
+ /* Allocate buffer for page keys. */
+ error = page_key_alloc(nr_copy_pages);
+ if (error)
+ return error;
+
} else if (handle->cur <= nr_meta_pages + 1) {
error = unpack_orig_pfns(buffer, &copy_bm);
if (error)
@@ -2243,6 +2256,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
}
} else {
copy_last_highmem_page();
+ /* Restore page key for data page (s390 only). */
+ page_key_write(handle->buffer);
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
@@ -2264,6 +2279,9 @@ int snapshot_write_next(struct snapshot_handle *handle)
void snapshot_write_finalize(struct snapshot_handle *handle)
{
copy_last_highmem_page();
+ /* Restore page key for data page (s390 only). */
+ page_key_write(handle->buffer);
+ page_key_free();
/* Free only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index b6b71ad2208..fdd4263b995 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -104,7 +104,10 @@ static int suspend_prepare(void)
goto Finish;
error = suspend_freeze_processes();
- if (!error)
+ if (error) {
+ suspend_stats.failed_freeze++;
+ dpm_save_failed_step(SUSPEND_FREEZE);
+ } else
return 0;
suspend_thaw_processes();
@@ -315,8 +318,16 @@ int enter_state(suspend_state_t state)
*/
int pm_suspend(suspend_state_t state)
{
- if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
- return enter_state(state);
+ int ret;
+ if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) {
+ ret = enter_state(state);
+ if (ret) {
+ suspend_stats.fail++;
+ dpm_save_failed_errno(ret);
+ } else
+ suspend_stats.success++;
+ return ret;
+ }
return -EINVAL;
}
EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 7c97c3a0eee..11a594c4ba2 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -27,6 +27,10 @@
#include <linux/slab.h>
#include <linux/lzo.h>
#include <linux/vmalloc.h>
+#include <linux/cpumask.h>
+#include <linux/atomic.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
#include "power.h"
@@ -43,8 +47,7 @@
* allocated and populated one at a time, so we only need one memory
* page to set up the entire structure.
*
- * During resume we also only need to use one swap_map_page structure
- * at a time.
+ * During resume we pick up all swap_map_page structures into a list.
*/
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
@@ -54,6 +57,11 @@ struct swap_map_page {
sector_t next_swap;
};
+struct swap_map_page_list {
+ struct swap_map_page *map;
+ struct swap_map_page_list *next;
+};
+
/**
* The swap_map_handle structure is used for handling swap in
* a file-alike way
@@ -61,13 +69,18 @@ struct swap_map_page {
struct swap_map_handle {
struct swap_map_page *cur;
+ struct swap_map_page_list *maps;
sector_t cur_swap;
sector_t first_sector;
unsigned int k;
+ unsigned long nr_free_pages, written;
+ u32 crc32;
};
struct swsusp_header {
- char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
+ char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
+ sizeof(u32)];
+ u32 crc32;
sector_t image;
unsigned int flags; /* Flags to pass to the "boot" kernel */
char orig_sig[10];
@@ -199,6 +212,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
swsusp_header->image = handle->first_sector;
swsusp_header->flags = flags;
+ if (flags & SF_CRC32_MODE)
+ swsusp_header->crc32 = handle->crc32;
error = hib_bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
@@ -245,6 +260,7 @@ static int swsusp_swap_check(void)
static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
{
void *src;
+ int ret;
if (!offset)
return -ENOSPC;
@@ -254,9 +270,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
if (src) {
copy_page(src, buf);
} else {
- WARN_ON_ONCE(1);
- bio_chain = NULL; /* Go synchronous */
- src = buf;
+ ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
+ if (ret)
+ return ret;
+ src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+ WARN_ON_ONCE(1);
+ bio_chain = NULL; /* Go synchronous */
+ src = buf;
+ }
}
} else {
src = buf;
@@ -293,6 +317,8 @@ static int get_swap_writer(struct swap_map_handle *handle)
goto err_rel;
}
handle->k = 0;
+ handle->nr_free_pages = nr_free_pages() >> 1;
+ handle->written = 0;
handle->first_sector = handle->cur_swap;
return 0;
err_rel:
@@ -316,20 +342,23 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
return error;
handle->cur->entries[handle->k++] = offset;
if (handle->k >= MAP_PAGE_ENTRIES) {
- error = hib_wait_on_bio_chain(bio_chain);
- if (error)
- goto out;
offset = alloc_swapdev_block(root_swap);
if (!offset)
return -ENOSPC;
handle->cur->next_swap = offset;
- error = write_page(handle->cur, handle->cur_swap, NULL);
+ error = write_page(handle->cur, handle->cur_swap, bio_chain);
if (error)
goto out;
clear_page(handle->cur);
handle->cur_swap = offset;
handle->k = 0;
}
+ if (bio_chain && ++handle->written > handle->nr_free_pages) {
+ error = hib_wait_on_bio_chain(bio_chain);
+ if (error)
+ goto out;
+ handle->written = 0;
+ }
out:
return error;
}
@@ -372,6 +401,13 @@ static int swap_writer_finish(struct swap_map_handle *handle,
LZO_HEADER, PAGE_SIZE)
#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
+/* Maximum number of threads for compression/decompression. */
+#define LZO_THREADS 3
+
+/* Maximum number of pages for read buffering. */
+#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
+
+
/**
* save_image - save the suspend image data
*/
@@ -419,6 +455,92 @@ static int save_image(struct swap_map_handle *handle,
return ret;
}
+/**
+ * Structure used for CRC32.
+ */
+struct crc_data {
+ struct task_struct *thr; /* thread */
+ atomic_t ready; /* ready to start flag */
+ atomic_t stop; /* ready to stop flag */
+ unsigned run_threads; /* nr current threads */
+ wait_queue_head_t go; /* start crc update */
+ wait_queue_head_t done; /* crc update done */
+ u32 *crc32; /* points to handle's crc32 */
+ size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
+ unsigned char *unc[LZO_THREADS]; /* uncompressed data */
+};
+
+/**
+ * CRC32 update function that runs in its own thread.
+ */
+static int crc32_threadfn(void *data)
+{
+ struct crc_data *d = data;
+ unsigned i;
+
+ while (1) {
+ wait_event(d->go, atomic_read(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ atomic_set(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+ atomic_set(&d->ready, 0);
+
+ for (i = 0; i < d->run_threads; i++)
+ *d->crc32 = crc32_le(*d->crc32,
+ d->unc[i], *d->unc_len[i]);
+ atomic_set(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+}
+/**
+ * Structure used for LZO data compression.
+ */
+struct cmp_data {
+ struct task_struct *thr; /* thread */
+ atomic_t ready; /* ready to start flag */
+ atomic_t stop; /* ready to stop flag */
+ int ret; /* return code */
+ wait_queue_head_t go; /* start compression */
+ wait_queue_head_t done; /* compression done */
+ size_t unc_len; /* uncompressed length */
+ size_t cmp_len; /* compressed length */
+ unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
+ unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
+ unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
+};
+
+/**
+ * Compression function that runs in its own thread.
+ */
+static int lzo_compress_threadfn(void *data)
+{
+ struct cmp_data *d = data;
+
+ while (1) {
+ wait_event(d->go, atomic_read(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+ atomic_set(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+ atomic_set(&d->ready, 0);
+
+ d->ret = lzo1x_1_compress(d->unc, d->unc_len,
+ d->cmp + LZO_HEADER, &d->cmp_len,
+ d->wrk);
+ atomic_set(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+}
/**
* save_image_lzo - Save the suspend image data compressed with LZO.
@@ -437,42 +559,93 @@ static int save_image_lzo(struct swap_map_handle *handle,
struct bio *bio;
struct timeval start;
struct timeval stop;
- size_t off, unc_len, cmp_len;
- unsigned char *unc, *cmp, *wrk, *page;
+ size_t off;
+ unsigned thr, run_threads, nr_threads;
+ unsigned char *page = NULL;
+ struct cmp_data *data = NULL;
+ struct crc_data *crc = NULL;
+
+ /*
+ * We'll limit the number of threads for compression to limit memory
+ * footprint.
+ */
+ nr_threads = num_online_cpus() - 1;
+ nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
if (!page) {
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_clean;
}
- wrk = vmalloc(LZO1X_1_MEM_COMPRESS);
- if (!wrk) {
- printk(KERN_ERR "PM: Failed to allocate LZO workspace\n");
- free_page((unsigned long)page);
- return -ENOMEM;
+ data = vmalloc(sizeof(*data) * nr_threads);
+ if (!data) {
+ printk(KERN_ERR "PM: Failed to allocate LZO data\n");
+ ret = -ENOMEM;
+ goto out_clean;
}
+ for (thr = 0; thr < nr_threads; thr++)
+ memset(&data[thr], 0, offsetof(struct cmp_data, go));
- unc = vmalloc(LZO_UNC_SIZE);
- if (!unc) {
- printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
- vfree(wrk);
- free_page((unsigned long)page);
- return -ENOMEM;
+ crc = kmalloc(sizeof(*crc), GFP_KERNEL);
+ if (!crc) {
+ printk(KERN_ERR "PM: Failed to allocate crc\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ }
+ memset(crc, 0, offsetof(struct crc_data, go));
+
+ /*
+ * Start the compression threads.
+ */
+ for (thr = 0; thr < nr_threads; thr++) {
+ init_waitqueue_head(&data[thr].go);
+ init_waitqueue_head(&data[thr].done);
+
+ data[thr].thr = kthread_run(lzo_compress_threadfn,
+ &data[thr],
+ "image_compress/%u", thr);
+ if (IS_ERR(data[thr].thr)) {
+ data[thr].thr = NULL;
+ printk(KERN_ERR
+ "PM: Cannot start compression threads\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ }
}
- cmp = vmalloc(LZO_CMP_SIZE);
- if (!cmp) {
- printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
- vfree(unc);
- vfree(wrk);
- free_page((unsigned long)page);
- return -ENOMEM;
+ /*
+ * Adjust number of free pages after all allocations have been done.
+ * We don't want to run out of pages when writing.
+ */
+ handle->nr_free_pages = nr_free_pages() >> 1;
+
+ /*
+ * Start the CRC32 thread.
+ */
+ init_waitqueue_head(&crc->go);
+ init_waitqueue_head(&crc->done);
+
+ handle->crc32 = 0;
+ crc->crc32 = &handle->crc32;
+ for (thr = 0; thr < nr_threads; thr++) {
+ crc->unc[thr] = data[thr].unc;
+ crc->unc_len[thr] = &data[thr].unc_len;
+ }
+
+ crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
+ if (IS_ERR(crc->thr)) {
+ crc->thr = NULL;
+ printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
+ ret = -ENOMEM;
+ goto out_clean;
}
printk(KERN_INFO
+ "PM: Using %u thread(s) for compression.\n"
"PM: Compressing and saving image data (%u pages) ... ",
- nr_to_write);
+ nr_threads, nr_to_write);
m = nr_to_write / 100;
if (!m)
m = 1;
@@ -480,55 +653,83 @@ static int save_image_lzo(struct swap_map_handle *handle,
bio = NULL;
do_gettimeofday(&start);
for (;;) {
- for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
- ret = snapshot_read_next(snapshot);
- if (ret < 0)
- goto out_finish;
-
- if (!ret)
+ for (thr = 0; thr < nr_threads; thr++) {
+ for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
+ ret = snapshot_read_next(snapshot);
+ if (ret < 0)
+ goto out_finish;
+
+ if (!ret)
+ break;
+
+ memcpy(data[thr].unc + off,
+ data_of(*snapshot), PAGE_SIZE);
+
+ if (!(nr_pages % m))
+ printk(KERN_CONT "\b\b\b\b%3d%%",
+ nr_pages / m);
+ nr_pages++;
+ }
+ if (!off)
break;
- memcpy(unc + off, data_of(*snapshot), PAGE_SIZE);
+ data[thr].unc_len = off;
- if (!(nr_pages % m))
- printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
- nr_pages++;
+ atomic_set(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
}
- if (!off)
+ if (!thr)
break;
- unc_len = off;
- ret = lzo1x_1_compress(unc, unc_len,
- cmp + LZO_HEADER, &cmp_len, wrk);
- if (ret < 0) {
- printk(KERN_ERR "PM: LZO compression failed\n");
- break;
- }
+ crc->run_threads = thr;
+ atomic_set(&crc->ready, 1);
+ wake_up(&crc->go);
- if (unlikely(!cmp_len ||
- cmp_len > lzo1x_worst_compress(unc_len))) {
- printk(KERN_ERR "PM: Invalid LZO compressed length\n");
- ret = -1;
- break;
- }
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+ atomic_read(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
- *(size_t *)cmp = cmp_len;
+ ret = data[thr].ret;
- /*
- * Given we are writing one page at a time to disk, we copy
- * that much from the buffer, although the last bit will likely
- * be smaller than full page. This is OK - we saved the length
- * of the compressed data, so any garbage at the end will be
- * discarded when we read it.
- */
- for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
- memcpy(page, cmp + off, PAGE_SIZE);
+ if (ret < 0) {
+ printk(KERN_ERR "PM: LZO compression failed\n");
+ goto out_finish;
+ }
- ret = swap_write_page(handle, page, &bio);
- if (ret)
+ if (unlikely(!data[thr].cmp_len ||
+ data[thr].cmp_len >
+ lzo1x_worst_compress(data[thr].unc_len))) {
+ printk(KERN_ERR
+ "PM: Invalid LZO compressed length\n");
+ ret = -1;
goto out_finish;
+ }
+
+ *(size_t *)data[thr].cmp = data[thr].cmp_len;
+
+ /*
+ * Given we are writing one page at a time to disk, we
+ * copy that much from the buffer, although the last
+ * bit will likely be smaller than full page. This is
+ * OK - we saved the length of the compressed data, so
+ * any garbage at the end will be discarded when we
+ * read it.
+ */
+ for (off = 0;
+ off < LZO_HEADER + data[thr].cmp_len;
+ off += PAGE_SIZE) {
+ memcpy(page, data[thr].cmp + off, PAGE_SIZE);
+
+ ret = swap_write_page(handle, page, &bio);
+ if (ret)
+ goto out_finish;
+ }
}
+
+ wait_event(crc->done, atomic_read(&crc->stop));
+ atomic_set(&crc->stop, 0);
}
out_finish:
@@ -536,16 +737,25 @@ out_finish:
do_gettimeofday(&stop);
if (!ret)
ret = err2;
- if (!ret)
+ if (!ret) {
printk(KERN_CONT "\b\b\b\bdone\n");
- else
+ } else {
printk(KERN_CONT "\n");
+ }
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
-
- vfree(cmp);
- vfree(unc);
- vfree(wrk);
- free_page((unsigned long)page);
+out_clean:
+ if (crc) {
+ if (crc->thr)
+ kthread_stop(crc->thr);
+ kfree(crc);
+ }
+ if (data) {
+ for (thr = 0; thr < nr_threads; thr++)
+ if (data[thr].thr)
+ kthread_stop(data[thr].thr);
+ vfree(data);
+ }
+ if (page) free_page((unsigned long)page);
return ret;
}
@@ -625,8 +835,15 @@ out_finish:
static void release_swap_reader(struct swap_map_handle *handle)
{
- if (handle->cur)
- free_page((unsigned long)handle->cur);
+ struct swap_map_page_list *tmp;
+
+ while (handle->maps) {
+ if (handle->maps->map)
+ free_page((unsigned long)handle->maps->map);
+ tmp = handle->maps;
+ handle->maps = handle->maps->next;
+ kfree(tmp);
+ }
handle->cur = NULL;
}
@@ -634,22 +851,46 @@ static int get_swap_reader(struct swap_map_handle *handle,
unsigned int *flags_p)
{
int error;
+ struct swap_map_page_list *tmp, *last;
+ sector_t offset;
*flags_p = swsusp_header->flags;
if (!swsusp_header->image) /* how can this happen? */
return -EINVAL;
- handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
- if (!handle->cur)
- return -ENOMEM;
+ handle->cur = NULL;
+ last = handle->maps = NULL;
+ offset = swsusp_header->image;
+ while (offset) {
+ tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
+ if (!tmp) {
+ release_swap_reader(handle);
+ return -ENOMEM;
+ }
+ memset(tmp, 0, sizeof(*tmp));
+ if (!handle->maps)
+ handle->maps = tmp;
+ if (last)
+ last->next = tmp;
+ last = tmp;
+
+ tmp->map = (struct swap_map_page *)
+ __get_free_page(__GFP_WAIT | __GFP_HIGH);
+ if (!tmp->map) {
+ release_swap_reader(handle);
+ return -ENOMEM;
+ }
- error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
- if (error) {
- release_swap_reader(handle);
- return error;
+ error = hib_bio_read_page(offset, tmp->map, NULL);
+ if (error) {
+ release_swap_reader(handle);
+ return error;
+ }
+ offset = tmp->map->next_swap;
}
handle->k = 0;
+ handle->cur = handle->maps->map;
return 0;
}
@@ -658,6 +899,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
{
sector_t offset;
int error;
+ struct swap_map_page_list *tmp;
if (!handle->cur)
return -EINVAL;
@@ -668,13 +910,15 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
if (error)
return error;
if (++handle->k >= MAP_PAGE_ENTRIES) {
- error = hib_wait_on_bio_chain(bio_chain);
handle->k = 0;
- offset = handle->cur->next_swap;
- if (!offset)
+ free_page((unsigned long)handle->maps->map);
+ tmp = handle->maps;
+ handle->maps = handle->maps->next;
+ kfree(tmp);
+ if (!handle->maps)
release_swap_reader(handle);
- else if (!error)
- error = hib_bio_read_page(offset, handle->cur, NULL);
+ else
+ handle->cur = handle->maps->map;
}
return error;
}
@@ -697,7 +941,7 @@ static int load_image(struct swap_map_handle *handle,
unsigned int nr_to_read)
{
unsigned int m;
- int error = 0;
+ int ret = 0;
struct timeval start;
struct timeval stop;
struct bio *bio;
@@ -713,15 +957,15 @@ static int load_image(struct swap_map_handle *handle,
bio = NULL;
do_gettimeofday(&start);
for ( ; ; ) {
- error = snapshot_write_next(snapshot);
- if (error <= 0)
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0)
break;
- error = swap_read_page(handle, data_of(*snapshot), &bio);
- if (error)
+ ret = swap_read_page(handle, data_of(*snapshot), &bio);
+ if (ret)
break;
if (snapshot->sync_read)
- error = hib_wait_on_bio_chain(&bio);
- if (error)
+ ret = hib_wait_on_bio_chain(&bio);
+ if (ret)
break;
if (!(nr_pages % m))
printk("\b\b\b\b%3d%%", nr_pages / m);
@@ -729,17 +973,61 @@ static int load_image(struct swap_map_handle *handle,
}
err2 = hib_wait_on_bio_chain(&bio);
do_gettimeofday(&stop);
- if (!error)
- error = err2;
- if (!error) {
+ if (!ret)
+ ret = err2;
+ if (!ret) {
printk("\b\b\b\bdone\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
- error = -ENODATA;
+ ret = -ENODATA;
} else
printk("\n");
swsusp_show_speed(&start, &stop, nr_to_read, "Read");
- return error;
+ return ret;
+}
+
+/**
+ * Structure used for LZO data decompression.
+ */
+struct dec_data {
+ struct task_struct *thr; /* thread */
+ atomic_t ready; /* ready to start flag */
+ atomic_t stop; /* ready to stop flag */
+ int ret; /* return code */
+ wait_queue_head_t go; /* start decompression */
+ wait_queue_head_t done; /* decompression done */
+ size_t unc_len; /* uncompressed length */
+ size_t cmp_len; /* compressed length */
+ unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
+ unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
+};
+
+/**
+ * Deompression function that runs in its own thread.
+ */
+static int lzo_decompress_threadfn(void *data)
+{
+ struct dec_data *d = data;
+
+ while (1) {
+ wait_event(d->go, atomic_read(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+ atomic_set(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+ atomic_set(&d->ready, 0);
+
+ d->unc_len = LZO_UNC_SIZE;
+ d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
+ d->unc, &d->unc_len);
+ atomic_set(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
}
/**
@@ -753,50 +1041,120 @@ static int load_image_lzo(struct swap_map_handle *handle,
unsigned int nr_to_read)
{
unsigned int m;
- int error = 0;
+ int ret = 0;
+ int eof = 0;
struct bio *bio;
struct timeval start;
struct timeval stop;
unsigned nr_pages;
- size_t i, off, unc_len, cmp_len;
- unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
-
- for (i = 0; i < LZO_CMP_PAGES; i++) {
- page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
- if (!page[i]) {
- printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ size_t off;
+ unsigned i, thr, run_threads, nr_threads;
+ unsigned ring = 0, pg = 0, ring_size = 0,
+ have = 0, want, need, asked = 0;
+ unsigned long read_pages;
+ unsigned char **page = NULL;
+ struct dec_data *data = NULL;
+ struct crc_data *crc = NULL;
+
+ /*
+ * We'll limit the number of threads for decompression to limit memory
+ * footprint.
+ */
+ nr_threads = num_online_cpus() - 1;
+ nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+
+ page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
+ if (!page) {
+ printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ }
- while (i)
- free_page((unsigned long)page[--i]);
+ data = vmalloc(sizeof(*data) * nr_threads);
+ if (!data) {
+ printk(KERN_ERR "PM: Failed to allocate LZO data\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ }
+ for (thr = 0; thr < nr_threads; thr++)
+ memset(&data[thr], 0, offsetof(struct dec_data, go));
- return -ENOMEM;
+ crc = kmalloc(sizeof(*crc), GFP_KERNEL);
+ if (!crc) {
+ printk(KERN_ERR "PM: Failed to allocate crc\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ }
+ memset(crc, 0, offsetof(struct crc_data, go));
+
+ /*
+ * Start the decompression threads.
+ */
+ for (thr = 0; thr < nr_threads; thr++) {
+ init_waitqueue_head(&data[thr].go);
+ init_waitqueue_head(&data[thr].done);
+
+ data[thr].thr = kthread_run(lzo_decompress_threadfn,
+ &data[thr],
+ "image_decompress/%u", thr);
+ if (IS_ERR(data[thr].thr)) {
+ data[thr].thr = NULL;
+ printk(KERN_ERR
+ "PM: Cannot start decompression threads\n");
+ ret = -ENOMEM;
+ goto out_clean;
}
}
- unc = vmalloc(LZO_UNC_SIZE);
- if (!unc) {
- printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
-
- for (i = 0; i < LZO_CMP_PAGES; i++)
- free_page((unsigned long)page[i]);
-
- return -ENOMEM;
+ /*
+ * Start the CRC32 thread.
+ */
+ init_waitqueue_head(&crc->go);
+ init_waitqueue_head(&crc->done);
+
+ handle->crc32 = 0;
+ crc->crc32 = &handle->crc32;
+ for (thr = 0; thr < nr_threads; thr++) {
+ crc->unc[thr] = data[thr].unc;
+ crc->unc_len[thr] = &data[thr].unc_len;
}
- cmp = vmalloc(LZO_CMP_SIZE);
- if (!cmp) {
- printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
+ crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
+ if (IS_ERR(crc->thr)) {
+ crc->thr = NULL;
+ printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ }
- vfree(unc);
- for (i = 0; i < LZO_CMP_PAGES; i++)
- free_page((unsigned long)page[i]);
+ /*
+ * Adjust number of pages for read buffering, in case we are short.
+ */
+ read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
+ read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
- return -ENOMEM;
+ for (i = 0; i < read_pages; i++) {
+ page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
+ __GFP_WAIT | __GFP_HIGH :
+ __GFP_WAIT);
+ if (!page[i]) {
+ if (i < LZO_CMP_PAGES) {
+ ring_size = i;
+ printk(KERN_ERR
+ "PM: Failed to allocate LZO pages\n");
+ ret = -ENOMEM;
+ goto out_clean;
+ } else {
+ break;
+ }
+ }
}
+ want = ring_size = i;
printk(KERN_INFO
+ "PM: Using %u thread(s) for decompression.\n"
"PM: Loading and decompressing image data (%u pages) ... ",
- nr_to_read);
+ nr_threads, nr_to_read);
m = nr_to_read / 100;
if (!m)
m = 1;
@@ -804,85 +1162,189 @@ static int load_image_lzo(struct swap_map_handle *handle,
bio = NULL;
do_gettimeofday(&start);
- error = snapshot_write_next(snapshot);
- if (error <= 0)
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0)
goto out_finish;
- for (;;) {
- error = swap_read_page(handle, page[0], NULL); /* sync */
- if (error)
- break;
-
- cmp_len = *(size_t *)page[0];
- if (unlikely(!cmp_len ||
- cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
- printk(KERN_ERR "PM: Invalid LZO compressed length\n");
- error = -1;
- break;
+ for(;;) {
+ for (i = 0; !eof && i < want; i++) {
+ ret = swap_read_page(handle, page[ring], &bio);
+ if (ret) {
+ /*
+ * On real read error, finish. On end of data,
+ * set EOF flag and just exit the read loop.
+ */
+ if (handle->cur &&
+ handle->cur->entries[handle->k]) {
+ goto out_finish;
+ } else {
+ eof = 1;
+ break;
+ }
+ }
+ if (++ring >= ring_size)
+ ring = 0;
}
+ asked += i;
+ want -= i;
- for (off = PAGE_SIZE, i = 1;
- off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
- error = swap_read_page(handle, page[i], &bio);
- if (error)
+ /*
+ * We are out of data, wait for some more.
+ */
+ if (!have) {
+ if (!asked)
+ break;
+
+ ret = hib_wait_on_bio_chain(&bio);
+ if (ret)
goto out_finish;
+ have += asked;
+ asked = 0;
+ if (eof)
+ eof = 2;
}
- error = hib_wait_on_bio_chain(&bio); /* need all data now */
- if (error)
- goto out_finish;
-
- for (off = 0, i = 0;
- off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
- memcpy(cmp + off, page[i], PAGE_SIZE);
+ if (crc->run_threads) {
+ wait_event(crc->done, atomic_read(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ crc->run_threads = 0;
}
- unc_len = LZO_UNC_SIZE;
- error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len,
- unc, &unc_len);
- if (error < 0) {
- printk(KERN_ERR "PM: LZO decompression failed\n");
- break;
+ for (thr = 0; have && thr < nr_threads; thr++) {
+ data[thr].cmp_len = *(size_t *)page[pg];
+ if (unlikely(!data[thr].cmp_len ||
+ data[thr].cmp_len >
+ lzo1x_worst_compress(LZO_UNC_SIZE))) {
+ printk(KERN_ERR
+ "PM: Invalid LZO compressed length\n");
+ ret = -1;
+ goto out_finish;
+ }
+
+ need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
+ PAGE_SIZE);
+ if (need > have) {
+ if (eof > 1) {
+ ret = -1;
+ goto out_finish;
+ }
+ break;
+ }
+
+ for (off = 0;
+ off < LZO_HEADER + data[thr].cmp_len;
+ off += PAGE_SIZE) {
+ memcpy(data[thr].cmp + off,
+ page[pg], PAGE_SIZE);
+ have--;
+ want++;
+ if (++pg >= ring_size)
+ pg = 0;
+ }
+
+ atomic_set(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
}
- if (unlikely(!unc_len ||
- unc_len > LZO_UNC_SIZE ||
- unc_len & (PAGE_SIZE - 1))) {
- printk(KERN_ERR "PM: Invalid LZO uncompressed length\n");
- error = -1;
- break;
+ /*
+ * Wait for more data while we are decompressing.
+ */
+ if (have < LZO_CMP_PAGES && asked) {
+ ret = hib_wait_on_bio_chain(&bio);
+ if (ret)
+ goto out_finish;
+ have += asked;
+ asked = 0;
+ if (eof)
+ eof = 2;
}
- for (off = 0; off < unc_len; off += PAGE_SIZE) {
- memcpy(data_of(*snapshot), unc + off, PAGE_SIZE);
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+ atomic_read(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
- if (!(nr_pages % m))
- printk("\b\b\b\b%3d%%", nr_pages / m);
- nr_pages++;
+ if (ret < 0) {
+ printk(KERN_ERR
+ "PM: LZO decompression failed\n");
+ goto out_finish;
+ }
- error = snapshot_write_next(snapshot);
- if (error <= 0)
+ if (unlikely(!data[thr].unc_len ||
+ data[thr].unc_len > LZO_UNC_SIZE ||
+ data[thr].unc_len & (PAGE_SIZE - 1))) {
+ printk(KERN_ERR
+ "PM: Invalid LZO uncompressed length\n");
+ ret = -1;
goto out_finish;
+ }
+
+ for (off = 0;
+ off < data[thr].unc_len; off += PAGE_SIZE) {
+ memcpy(data_of(*snapshot),
+ data[thr].unc + off, PAGE_SIZE);
+
+ if (!(nr_pages % m))
+ printk("\b\b\b\b%3d%%", nr_pages / m);
+ nr_pages++;
+
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0) {
+ crc->run_threads = thr + 1;
+ atomic_set(&crc->ready, 1);
+ wake_up(&crc->go);
+ goto out_finish;
+ }
+ }
}
+
+ crc->run_threads = thr;
+ atomic_set(&crc->ready, 1);
+ wake_up(&crc->go);
}
out_finish:
+ if (crc->run_threads) {
+ wait_event(crc->done, atomic_read(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
do_gettimeofday(&stop);
- if (!error) {
+ if (!ret) {
printk("\b\b\b\bdone\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
- error = -ENODATA;
+ ret = -ENODATA;
+ if (!ret) {
+ if (swsusp_header->flags & SF_CRC32_MODE) {
+ if(handle->crc32 != swsusp_header->crc32) {
+ printk(KERN_ERR
+ "PM: Invalid image CRC32!\n");
+ ret = -ENODATA;
+ }
+ }
+ }
} else
printk("\n");
swsusp_show_speed(&start, &stop, nr_to_read, "Read");
-
- vfree(cmp);
- vfree(unc);
- for (i = 0; i < LZO_CMP_PAGES; i++)
+out_clean:
+ for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
+ if (crc) {
+ if (crc->thr)
+ kthread_stop(crc->thr);
+ kfree(crc);
+ }
+ if (data) {
+ for (thr = 0; thr < nr_threads; thr++)
+ if (data[thr].thr)
+ kthread_stop(data[thr].thr);
+ vfree(data);
+ }
+ if (page) vfree(page);
- return error;
+ return ret;
}
/**
diff --git a/kernel/printk.c b/kernel/printk.c
index 28a40d8171b..b7da18391c3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -100,7 +100,7 @@ static int console_locked, console_suspended;
* It is also used in interesting ways to provide interlocking in
* console_unlock();.
*/
-static DEFINE_SPINLOCK(logbuf_lock);
+static DEFINE_RAW_SPINLOCK(logbuf_lock);
#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
@@ -212,7 +212,7 @@ void __init setup_log_buf(int early)
return;
}
- spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
log_buf_len = new_log_buf_len;
log_buf = new_log_buf;
new_log_buf_len = 0;
@@ -230,7 +230,7 @@ void __init setup_log_buf(int early)
log_start -= offset;
con_start -= offset;
log_end -= offset;
- spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
pr_info("log_buf_len: %d\n", log_buf_len);
pr_info("early log buf free: %d(%d%%)\n",
@@ -365,18 +365,18 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
if (error)
goto out;
i = 0;
- spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
while (!error && (log_start != log_end) && i < len) {
c = LOG_BUF(log_start);
log_start++;
- spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
error = __put_user(c,buf);
buf++;
i++;
cond_resched();
- spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
}
- spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
if (!error)
error = i;
break;
@@ -399,7 +399,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
count = len;
if (count > log_buf_len)
count = log_buf_len;
- spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
if (count > logged_chars)
count = logged_chars;
if (do_clear)
@@ -416,12 +416,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
if (j + log_buf_len < log_end)
break;
c = LOG_BUF(j);
- spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
error = __put_user(c,&buf[count-1-i]);
cond_resched();
- spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
}
- spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
if (error)
break;
error = i;
@@ -689,7 +689,7 @@ static void zap_locks(void)
oops_timestamp = jiffies;
/* If a crash is occurring, make sure we can't deadlock */
- spin_lock_init(&logbuf_lock);
+ raw_spin_lock_init(&logbuf_lock);
/* And make sure that we print immediately */
sema_init(&console_sem, 1);
}
@@ -802,9 +802,9 @@ static int console_trylock_for_printk(unsigned int cpu)
}
}
printk_cpu = UINT_MAX;
- spin_unlock(&logbuf_lock);
if (wake)
up(&console_sem);
+ raw_spin_unlock(&logbuf_lock);
return retval;
}
static const char recursion_bug_msg [] =
@@ -864,7 +864,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
}
lockdep_off();
- spin_lock(&logbuf_lock);
+ raw_spin_lock(&logbuf_lock);
printk_cpu = this_cpu;
if (recursion_bug) {
@@ -1257,14 +1257,14 @@ void console_unlock(void)
again:
for ( ; ; ) {
- spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
wake_klogd |= log_start - log_end;
if (con_start == log_end)
break; /* Nothing to print */
_con_start = con_start;
_log_end = log_end;
con_start = log_end; /* Flush */
- spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(_con_start, _log_end);
start_critical_timings();
@@ -1276,7 +1276,7 @@ again:
if (unlikely(exclusive_console))
exclusive_console = NULL;
- spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&logbuf_lock);
up(&console_sem);
@@ -1286,13 +1286,13 @@ again:
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
- spin_lock(&logbuf_lock);
+ raw_spin_lock(&logbuf_lock);
if (con_start != log_end)
retry = 1;
- spin_unlock_irqrestore(&logbuf_lock, flags);
if (retry && console_trylock())
goto again;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
@@ -1522,9 +1522,9 @@ void register_console(struct console *newcon)
* console_unlock(); will print out the buffered messages
* for us.
*/
- spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
con_start = log_start;
- spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
/*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
@@ -1731,10 +1731,10 @@ void kmsg_dump(enum kmsg_dump_reason reason)
/* Theoretically, the log could move on after we do this, but
there's not a lot we can do about that. The new messages
will overwrite the start of what we dump. */
- spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
end = log_end & LOG_BUF_MASK;
chars = logged_chars;
- spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (chars > end) {
s1 = log_buf + log_buf_len - chars + end;
diff --git a/kernel/rcu.h b/kernel/rcu.h
new file mode 100644
index 00000000000..f600868d550
--- /dev/null
+++ b/kernel/rcu.h
@@ -0,0 +1,85 @@
+/*
+ * Read-Copy Update definitions shared among RCU implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2011
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_RCU_H
+#define __LINUX_RCU_H
+
+#ifdef CONFIG_RCU_TRACE
+#define RCU_TRACE(stmt) stmt
+#else /* #ifdef CONFIG_RCU_TRACE */
+#define RCU_TRACE(stmt)
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+/*
+ * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
+ * by call_rcu() and rcu callback execution, and are therefore not part of the
+ * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
+ */
+
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+# define STATE_RCU_HEAD_READY 0
+# define STATE_RCU_HEAD_QUEUED 1
+
+extern struct debug_obj_descr rcuhead_debug_descr;
+
+static inline void debug_rcu_head_queue(struct rcu_head *head)
+{
+ WARN_ON_ONCE((unsigned long)head & 0x3);
+ debug_object_activate(head, &rcuhead_debug_descr);
+ debug_object_active_state(head, &rcuhead_debug_descr,
+ STATE_RCU_HEAD_READY,
+ STATE_RCU_HEAD_QUEUED);
+}
+
+static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+{
+ debug_object_active_state(head, &rcuhead_debug_descr,
+ STATE_RCU_HEAD_QUEUED,
+ STATE_RCU_HEAD_READY);
+ debug_object_deactivate(head, &rcuhead_debug_descr);
+}
+#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void debug_rcu_head_queue(struct rcu_head *head)
+{
+}
+
+static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+{
+}
+#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+extern void kfree(const void *);
+
+static inline void __rcu_reclaim(char *rn, struct rcu_head *head)
+{
+ unsigned long offset = (unsigned long)head->func;
+
+ if (__is_kfree_rcu_offset(offset)) {
+ RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
+ kfree((void *)head - offset);
+ } else {
+ RCU_TRACE(trace_rcu_invoke_callback(rn, head));
+ head->func(head);
+ }
+}
+
+#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index ddddb320be6..ca0d23b6b3e 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -46,6 +46,11 @@
#include <linux/module.h>
#include <linux/hardirq.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/rcu.h>
+
+#include "rcu.h"
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
@@ -94,11 +99,16 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+
/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
*/
-void wakeme_after_rcu(struct rcu_head *head)
+static void wakeme_after_rcu(struct rcu_head *head)
{
struct rcu_synchronize *rcu;
@@ -106,6 +116,20 @@ void wakeme_after_rcu(struct rcu_head *head)
complete(&rcu->completion);
}
+void wait_rcu_gp(call_rcu_func_t crf)
+{
+ struct rcu_synchronize rcu;
+
+ init_rcu_head_on_stack(&rcu.head);
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ crf(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+ destroy_rcu_head_on_stack(&rcu.head);
+}
+EXPORT_SYMBOL_GPL(wait_rcu_gp);
+
#ifdef CONFIG_PROVE_RCU
/*
* wrapper function to avoid #include problems.
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 7bbac7d0f5a..da775c87f27 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -37,16 +37,17 @@
#include <linux/cpu.h>
#include <linux/prefetch.h>
-/* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
-static struct task_struct *rcu_kthread_task;
-static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
-static unsigned long have_rcu_kthread_work;
+#ifdef CONFIG_RCU_TRACE
+#include <trace/events/rcu.h>
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+#include "rcu.h"
/* Forward declarations for rcutiny_plugin.h. */
struct rcu_ctrlblk;
-static void invoke_rcu_kthread(void);
-static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
-static int rcu_kthread(void *arg);
+static void invoke_rcu_callbacks(void);
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+static void rcu_process_callbacks(struct softirq_action *unused);
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp);
@@ -96,16 +97,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
}
/*
- * Wake up rcu_kthread() to process callbacks now eligible for invocation
- * or to boost readers.
- */
-static void invoke_rcu_kthread(void)
-{
- have_rcu_kthread_work = 1;
- wake_up(&rcu_kthread_wq);
-}
-
-/*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
@@ -117,7 +108,7 @@ void rcu_sched_qs(int cpu)
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
rcu_qsctr_help(&rcu_bh_ctrlblk))
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
local_irq_restore(flags);
}
@@ -130,7 +121,7 @@ void rcu_bh_qs(int cpu)
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
local_irq_restore(flags);
}
@@ -154,18 +145,23 @@ void rcu_check_callbacks(int cpu, int user)
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
* whose grace period has elapsed.
*/
-static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
+ char *rn = NULL;
struct rcu_head *next, *list;
unsigned long flags;
RCU_TRACE(int cb_count = 0);
/* If no RCU callbacks ready to invoke, just return. */
- if (&rcp->rcucblist == rcp->donetail)
+ if (&rcp->rcucblist == rcp->donetail) {
+ RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
return;
+ }
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
+ RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
list = rcp->rcucblist;
rcp->rcucblist = *rcp->donetail;
*rcp->donetail = NULL;
@@ -176,49 +172,26 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
+ RCU_TRACE(rn = rcp->name);
while (list) {
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
local_bh_disable();
- __rcu_reclaim(list);
+ __rcu_reclaim(rn, list);
local_bh_enable();
list = next;
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
}
-/*
- * This kthread invokes RCU callbacks whose grace periods have
- * elapsed. It is awakened as needed, and takes the place of the
- * RCU_SOFTIRQ that was used previously for this purpose.
- * This is a kthread, but it is never stopped, at least not until
- * the system goes down.
- */
-static int rcu_kthread(void *arg)
+static void rcu_process_callbacks(struct softirq_action *unused)
{
- unsigned long work;
- unsigned long morework;
- unsigned long flags;
-
- for (;;) {
- wait_event_interruptible(rcu_kthread_wq,
- have_rcu_kthread_work != 0);
- morework = rcu_boost();
- local_irq_save(flags);
- work = have_rcu_kthread_work;
- have_rcu_kthread_work = morework;
- local_irq_restore(flags);
- if (work) {
- rcu_process_callbacks(&rcu_sched_ctrlblk);
- rcu_process_callbacks(&rcu_bh_ctrlblk);
- rcu_preempt_process_callbacks();
- }
- schedule_timeout_interruptible(1); /* Leave CPU for others. */
- }
-
- return 0; /* Not reached, but needed to shut gcc up. */
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+ rcu_preempt_process_callbacks();
}
/*
@@ -280,45 +253,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-
-void rcu_barrier_bh(void)
-{
- struct rcu_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_bh(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-
-void rcu_barrier_sched(void)
-{
- struct rcu_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_sched(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_sched);
-
-/*
- * Spawn the kthread that invokes RCU callbacks.
- */
-static int __init rcu_spawn_kthreads(void)
-{
- struct sched_param sp;
-
- rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
- sp.sched_priority = RCU_BOOST_PRIO;
- sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
- return 0;
-}
-early_initcall(rcu_spawn_kthreads);
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index f259c676195..02aa7139861 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -26,29 +26,26 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#ifdef CONFIG_RCU_TRACE
-#define RCU_TRACE(stmt) stmt
-#else /* #ifdef CONFIG_RCU_TRACE */
-#define RCU_TRACE(stmt)
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
-
/* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk {
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
struct rcu_head **curtail; /* ->next pointer of last CB. */
RCU_TRACE(long qlen); /* Number of pending CBs. */
+ RCU_TRACE(char *name); /* Name of RCU type. */
};
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
.donetail = &rcu_sched_ctrlblk.rcucblist,
.curtail = &rcu_sched_ctrlblk.rcucblist,
+ RCU_TRACE(.name = "rcu_sched")
};
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.donetail = &rcu_bh_ctrlblk.rcucblist,
.curtail = &rcu_bh_ctrlblk.rcucblist,
+ RCU_TRACE(.name = "rcu_bh")
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -131,6 +128,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
.rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
.nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
.blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
+ RCU_TRACE(.rcb.name = "rcu_preempt")
};
static int rcu_preempted_readers_exp(void);
@@ -247,6 +245,13 @@ static void show_tiny_preempt_stats(struct seq_file *m)
#include "rtmutex_common.h"
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
+
+/* Controls for rcu_kthread() kthread. */
+static struct task_struct *rcu_kthread_task;
+static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
+static unsigned long have_rcu_kthread_work;
+
/*
* Carry out RCU priority boosting on the task indicated by ->boost_tasks,
* and advance ->boost_tasks to the next task in the ->blkd_tasks list.
@@ -334,7 +339,7 @@ static int rcu_initiate_boost(void)
if (rcu_preempt_ctrlblk.exp_tasks == NULL)
rcu_preempt_ctrlblk.boost_tasks =
rcu_preempt_ctrlblk.gp_tasks;
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
} else
RCU_TRACE(rcu_initiate_boost_trace());
return 1;
@@ -353,14 +358,6 @@ static void rcu_preempt_boost_start_gp(void)
#else /* #ifdef CONFIG_RCU_BOOST */
/*
- * If there is no RCU priority boosting, we don't boost.
- */
-static int rcu_boost(void)
-{
- return 0;
-}
-
-/*
* If there is no RCU priority boosting, we don't initiate boosting,
* but we do indicate whether there are blocked readers blocking the
* current grace period.
@@ -427,7 +424,7 @@ static void rcu_preempt_cpu_qs(void)
/* If there are done callbacks, cause them to be invoked. */
if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
}
/*
@@ -648,7 +645,7 @@ static void rcu_preempt_check_callbacks(void)
rcu_preempt_cpu_qs();
if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
rcu_preempt_ctrlblk.rcb.donetail)
- invoke_rcu_kthread();
+ invoke_rcu_callbacks();
if (rcu_preempt_gp_in_progress() &&
rcu_cpu_blocking_cur_gp() &&
rcu_preempt_running_reader())
@@ -674,7 +671,7 @@ static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
*/
static void rcu_preempt_process_callbacks(void)
{
- rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
+ __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
}
/*
@@ -697,20 +694,6 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu);
-void rcu_barrier(void)
-{
- struct rcu_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier);
-
/*
* synchronize_rcu - wait until a grace period has elapsed.
*
@@ -864,15 +847,6 @@ static void show_tiny_preempt_stats(struct seq_file *m)
#endif /* #ifdef CONFIG_RCU_TRACE */
/*
- * Because preemptible RCU does not exist, it is never necessary to
- * boost preempted RCU readers.
- */
-static int rcu_boost(void)
-{
- return 0;
-}
-
-/*
* Because preemptible RCU does not exist, it never has any callbacks
* to check.
*/
@@ -898,6 +872,78 @@ static void rcu_preempt_process_callbacks(void)
#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
+#ifdef CONFIG_RCU_BOOST
+
+/*
+ * Wake up rcu_kthread() to process callbacks now eligible for invocation
+ * or to boost readers.
+ */
+static void invoke_rcu_callbacks(void)
+{
+ have_rcu_kthread_work = 1;
+ wake_up(&rcu_kthread_wq);
+}
+
+/*
+ * This kthread invokes RCU callbacks whose grace periods have
+ * elapsed. It is awakened as needed, and takes the place of the
+ * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
+ * This is a kthread, but it is never stopped, at least not until
+ * the system goes down.
+ */
+static int rcu_kthread(void *arg)
+{
+ unsigned long work;
+ unsigned long morework;
+ unsigned long flags;
+
+ for (;;) {
+ wait_event_interruptible(rcu_kthread_wq,
+ have_rcu_kthread_work != 0);
+ morework = rcu_boost();
+ local_irq_save(flags);
+ work = have_rcu_kthread_work;
+ have_rcu_kthread_work = morework;
+ local_irq_restore(flags);
+ if (work)
+ rcu_process_callbacks(NULL);
+ schedule_timeout_interruptible(1); /* Leave CPU for others. */
+ }
+
+ return 0; /* Not reached, but needed to shut gcc up. */
+}
+
+/*
+ * Spawn the kthread that invokes RCU callbacks.
+ */
+static int __init rcu_spawn_kthreads(void)
+{
+ struct sched_param sp;
+
+ rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
+ sp.sched_priority = RCU_BOOST_PRIO;
+ sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
+ return 0;
+}
+early_initcall(rcu_spawn_kthreads);
+
+#else /* #ifdef CONFIG_RCU_BOOST */
+
+/*
+ * Start up softirq processing of callbacks.
+ */
+void invoke_rcu_callbacks(void)
+{
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+void rcu_init(void)
+{
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+#endif /* #else #ifdef CONFIG_RCU_BOOST */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#include <linux/kernel_stat.h>
@@ -913,12 +959,6 @@ void __init rcu_scheduler_starting(void)
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-#ifdef CONFIG_RCU_BOOST
-#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
-#else /* #ifdef CONFIG_RCU_BOOST */
-#define RCU_BOOST_PRIO 1
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
-
#ifdef CONFIG_RCU_TRACE
#ifdef CONFIG_RCU_BOOST
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 98f51b13bb7..764825c2685 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -73,7 +73,7 @@ module_param(nreaders, int, 0444);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
module_param(nfakewriters, int, 0444);
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0444);
+module_param(stat_interval, int, 0644);
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
module_param(verbose, bool, 0444);
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
@@ -480,30 +480,6 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
}
-struct rcu_bh_torture_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
-{
- struct rcu_bh_torture_synchronize *rcu;
-
- rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
- complete(&rcu->completion);
-}
-
-static void rcu_bh_torture_synchronize(void)
-{
- struct rcu_bh_torture_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-
static struct rcu_torture_ops rcu_bh_ops = {
.init = NULL,
.cleanup = NULL,
@@ -512,7 +488,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_bh_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
+ .sync = synchronize_rcu_bh,
.cb_barrier = rcu_barrier_bh,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
@@ -528,7 +504,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
+ .sync = synchronize_rcu_bh,
.cb_barrier = NULL,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
@@ -536,6 +512,22 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
.name = "rcu_bh_sync"
};
+static struct rcu_torture_ops rcu_bh_expedited_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_rcu_bh_expedited,
+ .cb_barrier = NULL,
+ .fqs = rcu_bh_force_quiescent_state,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh_expedited"
+};
+
/*
* Definitions for srcu torture testing.
*/
@@ -659,11 +651,6 @@ static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
}
-static void sched_torture_synchronize(void)
-{
- synchronize_sched();
-}
-
static struct rcu_torture_ops sched_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
@@ -672,7 +659,7 @@ static struct rcu_torture_ops sched_ops = {
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free,
- .sync = sched_torture_synchronize,
+ .sync = synchronize_sched,
.cb_barrier = rcu_barrier_sched,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
@@ -688,7 +675,7 @@ static struct rcu_torture_ops sched_sync_ops = {
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
- .sync = sched_torture_synchronize,
+ .sync = synchronize_sched,
.cb_barrier = NULL,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
@@ -754,7 +741,7 @@ static int rcu_torture_boost(void *arg)
do {
/* Wait for the next test interval. */
oldstarttime = boost_starttime;
- while (jiffies - oldstarttime > ULONG_MAX / 2) {
+ while (ULONG_CMP_LT(jiffies, oldstarttime)) {
schedule_timeout_uninterruptible(1);
rcu_stutter_wait("rcu_torture_boost");
if (kthread_should_stop() ||
@@ -765,7 +752,7 @@ static int rcu_torture_boost(void *arg)
/* Do one boost-test interval. */
endtime = oldstarttime + test_boost_duration * HZ;
call_rcu_time = jiffies;
- while (jiffies - endtime > ULONG_MAX / 2) {
+ while (ULONG_CMP_LT(jiffies, endtime)) {
/* If we don't have a callback in flight, post one. */
if (!rbi.inflight) {
smp_mb(); /* RCU core before ->inflight = 1. */
@@ -792,7 +779,8 @@ static int rcu_torture_boost(void *arg)
* interval. Besides, we are running at RT priority,
* so delays should be relatively rare.
*/
- while (oldstarttime == boost_starttime) {
+ while (oldstarttime == boost_starttime &&
+ !kthread_should_stop()) {
if (mutex_trylock(&boost_mutex)) {
boost_starttime = jiffies +
test_boost_interval * HZ;
@@ -809,11 +797,11 @@ checkwait: rcu_stutter_wait("rcu_torture_boost");
/* Clean up and exit. */
VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
- destroy_rcu_head_on_stack(&rbi.rcu);
rcutorture_shutdown_absorb("rcu_torture_boost");
while (!kthread_should_stop() || rbi.inflight)
schedule_timeout_uninterruptible(1);
smp_mb(); /* order accesses to ->inflight before stack-frame death. */
+ destroy_rcu_head_on_stack(&rbi.rcu);
return 0;
}
@@ -831,11 +819,13 @@ rcu_torture_fqs(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
do {
fqs_resume_time = jiffies + fqs_stutter * HZ;
- while (jiffies - fqs_resume_time > LONG_MAX) {
+ while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
+ !kthread_should_stop()) {
schedule_timeout_interruptible(1);
}
fqs_burst_remaining = fqs_duration;
- while (fqs_burst_remaining > 0) {
+ while (fqs_burst_remaining > 0 &&
+ !kthread_should_stop()) {
cur_ops->fqs();
udelay(fqs_holdoff);
fqs_burst_remaining -= fqs_holdoff;
@@ -1280,8 +1270,9 @@ static int rcutorture_booster_init(int cpu)
/* Don't allow time recalculation while creating a new task. */
mutex_lock(&boost_mutex);
VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
- boost_tasks[cpu] = kthread_create(rcu_torture_boost, NULL,
- "rcu_torture_boost");
+ boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
+ cpu_to_node(cpu),
+ "rcu_torture_boost");
if (IS_ERR(boost_tasks[cpu])) {
retval = PTR_ERR(boost_tasks[cpu]);
VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
@@ -1424,7 +1415,7 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
- &rcu_bh_ops, &rcu_bh_sync_ops,
+ &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
&srcu_ops, &srcu_expedited_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ba06207b1dd..e234eb92a17 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -52,13 +52,16 @@
#include <linux/prefetch.h>
#include "rcutree.h"
+#include <trace/events/rcu.h>
+
+#include "rcu.h"
/* Data structures. */
static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
#define RCU_STATE_INITIALIZER(structname) { \
- .level = { &structname.node[0] }, \
+ .level = { &structname##_state.node[0] }, \
.levelcnt = { \
NUM_RCU_LVL_0, /* root of hierarchy. */ \
NUM_RCU_LVL_1, \
@@ -69,17 +72,17 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
.signaled = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
- .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \
- .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \
+ .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
+ .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
.n_force_qs = 0, \
.n_force_qs_ngp = 0, \
.name = #structname, \
}
-struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
+struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
-struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
static struct rcu_state *rcu_state;
@@ -128,8 +131,6 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
-#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
-
/*
* Track the rcutorture test sequence number and the update version
* number within a given test. The rcutorture_testseq is incremented
@@ -156,33 +157,41 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
* Note a quiescent state. Because we do not need to know
* how many quiescent states passed, just if there was at least
* one since the start of the grace period, this just sets a flag.
+ * The caller must have disabled preemption.
*/
void rcu_sched_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
- rdp->passed_quiesc_completed = rdp->gpnum - 1;
+ rdp->passed_quiesce_gpnum = rdp->gpnum;
barrier();
- rdp->passed_quiesc = 1;
+ if (rdp->passed_quiesce == 0)
+ trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
+ rdp->passed_quiesce = 1;
}
void rcu_bh_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
- rdp->passed_quiesc_completed = rdp->gpnum - 1;
+ rdp->passed_quiesce_gpnum = rdp->gpnum;
barrier();
- rdp->passed_quiesc = 1;
+ if (rdp->passed_quiesce == 0)
+ trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
+ rdp->passed_quiesce = 1;
}
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
+ * The caller must have disabled preemption.
*/
void rcu_note_context_switch(int cpu)
{
+ trace_rcu_utilization("Start context switch");
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
+ trace_rcu_utilization("End context switch");
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -193,7 +202,7 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
};
#endif /* #ifdef CONFIG_NO_HZ */
-static int blimit = 10; /* Maximum callbacks per softirq. */
+static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static int qhimark = 10000; /* If this many pending, ignore blimit. */
static int qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -314,6 +323,7 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
* trust its state not to change because interrupts are disabled.
*/
if (cpu_is_offline(rdp->cpu)) {
+ trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
rdp->offline_fqs++;
return 1;
}
@@ -354,19 +364,13 @@ void rcu_enter_nohz(void)
local_irq_restore(flags);
return;
}
+ trace_rcu_dyntick("Start");
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
atomic_inc(&rdtp->dynticks);
smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
local_irq_restore(flags);
-
- /* If the interrupt queued a callback, get out of dyntick mode. */
- if (in_irq() &&
- (__get_cpu_var(rcu_sched_data).nxtlist ||
- __get_cpu_var(rcu_bh_data).nxtlist ||
- rcu_preempt_needs_cpu(smp_processor_id())))
- set_need_resched();
}
/*
@@ -391,6 +395,7 @@ void rcu_exit_nohz(void)
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+ trace_rcu_dyntick("End");
local_irq_restore(flags);
}
@@ -481,11 +486,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
- unsigned long curr;
- unsigned long snap;
+ unsigned int curr;
+ unsigned int snap;
- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
- snap = (unsigned long)rdp->dynticks_snap;
+ curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
+ snap = (unsigned int)rdp->dynticks_snap;
/*
* If the CPU passed through or entered a dynticks idle phase with
@@ -495,7 +500,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
- if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
+ if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
+ trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
rdp->dynticks_fqs++;
return 1;
}
@@ -537,6 +543,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
int cpu;
long delta;
unsigned long flags;
+ int ndetected;
struct rcu_node *rnp = rcu_get_root(rsp);
/* Only let one CPU complain about others per time interval. */
@@ -553,7 +560,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
* Now rat on any tasks that got kicked up to the root rcu_node
* due to CPU offlining.
*/
- rcu_print_task_stall(rnp);
+ ndetected = rcu_print_task_stall(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
/*
@@ -565,17 +572,22 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
rsp->name);
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags);
- rcu_print_task_stall(rnp);
+ ndetected += rcu_print_task_stall(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (rnp->qsmask == 0)
continue;
for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
- if (rnp->qsmask & (1UL << cpu))
+ if (rnp->qsmask & (1UL << cpu)) {
printk(" %d", rnp->grplo + cpu);
+ ndetected++;
+ }
}
printk("} (detected by %d, t=%ld jiffies)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start));
- trigger_all_cpu_backtrace();
+ if (ndetected == 0)
+ printk(KERN_ERR "INFO: Stall ended before state dump start\n");
+ else if (!trigger_all_cpu_backtrace())
+ dump_stack();
/* If so configured, complain about tasks blocking the grace period. */
@@ -596,7 +608,8 @@ static void print_cpu_stall(struct rcu_state *rsp)
*/
printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
- trigger_all_cpu_backtrace();
+ if (!trigger_all_cpu_backtrace())
+ dump_stack();
raw_spin_lock_irqsave(&rnp->lock, flags);
if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
@@ -678,9 +691,10 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
* go looking for one.
*/
rdp->gpnum = rnp->gpnum;
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
if (rnp->qsmask & rdp->grpmask) {
rdp->qs_pending = 1;
- rdp->passed_quiesc = 0;
+ rdp->passed_quiesce = 0;
} else
rdp->qs_pending = 0;
}
@@ -741,6 +755,7 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
/* Remember that we saw this grace-period completion. */
rdp->completed = rnp->completed;
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
/*
* If we were in an extended quiescent state, we may have
@@ -826,31 +841,31 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp);
- if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
- if (cpu_needs_another_gp(rsp, rdp))
- rsp->fqs_need_gp = 1;
- if (rnp->completed == rsp->completed) {
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ if (!rcu_scheduler_fully_active ||
+ !cpu_needs_another_gp(rsp, rdp)) {
+ /*
+ * Either the scheduler hasn't yet spawned the first
+ * non-idle task or this CPU does not need another
+ * grace period. Either way, don't start a new grace
+ * period.
+ */
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ if (rsp->fqs_active) {
/*
- * Propagate new ->completed value to rcu_node structures
- * so that other CPUs don't have to wait until the start
- * of the next grace period to process their callbacks.
+ * This CPU needs a grace period, but force_quiescent_state()
+ * is running. Tell it to start one on this CPU's behalf.
*/
- rcu_for_each_node_breadth_first(rsp, rnp) {
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->completed = rsp->completed;
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
- local_irq_restore(flags);
+ rsp->fqs_need_gp = 1;
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
/* Advance to a new grace period and initialize state. */
rsp->gpnum++;
+ trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
@@ -865,6 +880,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
rcu_start_gp_per_cpu(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
+ trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+ rnp->level, rnp->grplo,
+ rnp->grphi, rnp->qsmask);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
@@ -901,6 +919,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
if (rnp == rdp->mynode)
rcu_start_gp_per_cpu(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
+ trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+ rnp->level, rnp->grplo,
+ rnp->grphi, rnp->qsmask);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
@@ -922,6 +943,8 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
unsigned long gp_duration;
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
@@ -933,7 +956,41 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
gp_duration = jiffies - rsp->gp_start;
if (gp_duration > rsp->gp_max)
rsp->gp_max = gp_duration;
- rsp->completed = rsp->gpnum;
+
+ /*
+ * We know the grace period is complete, but to everyone else
+ * it appears to still be ongoing. But it is also the case
+ * that to everyone else it looks like there is nothing that
+ * they can do to advance the grace period. It is therefore
+ * safe for us to drop the lock in order to mark the grace
+ * period as completed in all of the rcu_node structures.
+ *
+ * But if this CPU needs another grace period, it will take
+ * care of this while initializing the next grace period.
+ * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL
+ * because the callbacks have not yet been advanced: Those
+ * callbacks are waiting on the grace period that just now
+ * completed.
+ */
+ if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+
+ /*
+ * Propagate new ->completed value to rcu_node structures
+ * so that other CPUs don't have to wait until the start
+ * of the next grace period to process their callbacks.
+ */
+ rcu_for_each_node_breadth_first(rsp, rnp) {
+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->completed = rsp->gpnum;
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ }
+ rnp = rcu_get_root(rsp);
+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ }
+
+ rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
+ trace_rcu_grace_period(rsp->name, rsp->completed, "end");
rsp->signaled = RCU_GP_IDLE;
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
}
@@ -962,6 +1019,10 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
return;
}
rnp->qsmask &= ~mask;
+ trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
+ mask, rnp->qsmask, rnp->level,
+ rnp->grplo, rnp->grphi,
+ !!rnp->gp_tasks);
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
/* Other bits still set at this level, so done. */
@@ -1000,7 +1061,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* based on quiescent states detected in an earlier grace period!
*/
static void
-rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
+rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
{
unsigned long flags;
unsigned long mask;
@@ -1008,17 +1069,15 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
- if (lastcomp != rnp->completed) {
+ if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
/*
- * Someone beat us to it for this grace period, so leave.
- * The race with GP start is resolved by the fact that we
- * hold the leaf rcu_node lock, so that the per-CPU bits
- * cannot yet be initialized -- so we would simply find our
- * CPU's bit already cleared in rcu_report_qs_rnp() if this
- * race occurred.
+ * The grace period in which this quiescent state was
+ * recorded has ended, so don't report it upwards.
+ * We will instead need a new quiescent state that lies
+ * within the current grace period.
*/
- rdp->passed_quiesc = 0; /* try again later! */
+ rdp->passed_quiesce = 0; /* need qs for new gp. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
@@ -1062,14 +1121,14 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
* Was there a quiescent state since the beginning of the grace
* period? If no, then exit and wait for the next call.
*/
- if (!rdp->passed_quiesc)
+ if (!rdp->passed_quiesce)
return;
/*
* Tell RCU we are done (but rcu_report_qs_rdp() will be the
* judge of that).
*/
- rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
+ rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1130,11 +1189,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
if (rnp->qsmaskinit != 0) {
if (rnp != rdp->mynode)
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ else
+ trace_rcu_grace_period(rsp->name,
+ rnp->gpnum + 1 -
+ !!(rnp->qsmask & mask),
+ "cpuofl");
break;
}
- if (rnp == rdp->mynode)
+ if (rnp == rdp->mynode) {
+ trace_rcu_grace_period(rsp->name,
+ rnp->gpnum + 1 -
+ !!(rnp->qsmask & mask),
+ "cpuofl");
need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
- else
+ } else
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
mask = rnp->grpmask;
rnp = rnp->parent;
@@ -1190,17 +1258,22 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_head *next, *list, **tail;
- int count;
+ int bl, count;
/* If no callbacks are ready, just return.*/
- if (!cpu_has_callbacks_ready_to_invoke(rdp))
+ if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
+ trace_rcu_batch_start(rsp->name, 0, 0);
+ trace_rcu_batch_end(rsp->name, 0);
return;
+ }
/*
* Extract the list of ready callbacks, disabling to prevent
* races with call_rcu() from interrupt handlers.
*/
local_irq_save(flags);
+ bl = rdp->blimit;
+ trace_rcu_batch_start(rsp->name, rdp->qlen, bl);
list = rdp->nxtlist;
rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
*rdp->nxttail[RCU_DONE_TAIL] = NULL;
@@ -1216,13 +1289,14 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
- __rcu_reclaim(list);
+ __rcu_reclaim(rsp->name, list);
list = next;
- if (++count >= rdp->blimit)
+ if (++count >= bl)
break;
}
local_irq_save(flags);
+ trace_rcu_batch_end(rsp->name, count);
/* Update count, and requeue any remaining callbacks. */
rdp->qlen -= count;
@@ -1250,7 +1324,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_restore(flags);
- /* Re-raise the RCU softirq if there are callbacks remaining. */
+ /* Re-invoke RCU core processing if there are callbacks remaining. */
if (cpu_has_callbacks_ready_to_invoke(rdp))
invoke_rcu_core();
}
@@ -1258,7 +1332,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Check to see if this CPU is in a non-context-switch quiescent state
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
- * Also schedule the RCU softirq handler.
+ * Also schedule RCU core processing.
*
* This function must be called with hardirqs disabled. It is normally
* invoked from the scheduling-clock interrupt. If rcu_pending returns
@@ -1266,6 +1340,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
*/
void rcu_check_callbacks(int cpu, int user)
{
+ trace_rcu_utilization("Start scheduler-tick");
if (user ||
(idle_cpu(cpu) && rcu_scheduler_active &&
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -1299,6 +1374,7 @@ void rcu_check_callbacks(int cpu, int user)
rcu_preempt_check_callbacks(cpu);
if (rcu_pending(cpu))
invoke_rcu_core();
+ trace_rcu_utilization("End scheduler-tick");
}
#ifdef CONFIG_SMP
@@ -1360,10 +1436,14 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
- if (!rcu_gp_in_progress(rsp))
+ trace_rcu_utilization("Start fqs");
+ if (!rcu_gp_in_progress(rsp)) {
+ trace_rcu_utilization("End fqs");
return; /* No grace period in progress, nothing to force. */
+ }
if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
+ trace_rcu_utilization("End fqs");
return; /* Someone else is already on the job. */
}
if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
@@ -1412,11 +1492,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
rsp->fqs_need_gp = 0;
rcu_start_gp(rsp, flags); /* releases rnp->lock */
+ trace_rcu_utilization("End fqs");
return;
}
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
unlock_fqs_ret:
raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
+ trace_rcu_utilization("End fqs");
}
#else /* #ifdef CONFIG_SMP */
@@ -1429,9 +1511,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
#endif /* #else #ifdef CONFIG_SMP */
/*
- * This does the RCU processing work from softirq context for the
- * specified rcu_state and rcu_data structures. This may be called
- * only from the CPU to whom the rdp belongs.
+ * This does the RCU core processing work for the specified rcu_state
+ * and rcu_data structures. This may be called only from the CPU to
+ * whom the rdp belongs.
*/
static void
__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -1468,24 +1550,24 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
- * Do softirq processing for the current CPU.
+ * Do RCU core processing for the current CPU.
*/
static void rcu_process_callbacks(struct softirq_action *unused)
{
+ trace_rcu_utilization("Start RCU core");
__rcu_process_callbacks(&rcu_sched_state,
&__get_cpu_var(rcu_sched_data));
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
rcu_preempt_process_callbacks();
-
- /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
- rcu_needs_cpu_flush();
+ trace_rcu_utilization("End RCU core");
}
/*
- * Wake up the current CPU's kthread. This replaces raise_softirq()
- * in earlier versions of RCU. Note that because we are running on
- * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
- * cannot disappear out from under us.
+ * Schedule RCU callback invocation. If the specified type of RCU
+ * does not support RCU priority boosting, just do a direct call,
+ * otherwise wake up the per-CPU kernel kthread. Note that because we
+ * are running on the current CPU with interrupts disabled, the
+ * rcu_cpu_kthread_task cannot disappear out from under us.
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
@@ -1530,6 +1612,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
rdp->qlen++;
+ if (__is_kfree_rcu_offset((unsigned long)func))
+ trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
+ rdp->qlen);
+ else
+ trace_rcu_callback(rsp->name, head, rdp->qlen);
+
/* If interrupts were disabled, don't dive into RCU core. */
if (irqs_disabled_flags(flags)) {
local_irq_restore(flags);
@@ -1613,18 +1701,9 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
*/
void synchronize_sched(void)
{
- struct rcu_synchronize rcu;
-
if (rcu_blocking_is_gp())
return;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_sched(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
+ wait_rcu_gp(call_rcu_sched);
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -1639,18 +1718,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
*/
void synchronize_rcu_bh(void)
{
- struct rcu_synchronize rcu;
-
if (rcu_blocking_is_gp())
return;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_bh(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
+ wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -1671,7 +1741,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
check_cpu_stall(rsp, rdp);
/* Is the RCU core waiting for a quiescent state from this CPU? */
- if (rdp->qs_pending && !rdp->passed_quiesc) {
+ if (rcu_scheduler_fully_active &&
+ rdp->qs_pending && !rdp->passed_quiesce) {
/*
* If force_quiescent_state() coming soon and this CPU
@@ -1683,7 +1754,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
jiffies))
set_need_resched();
- } else if (rdp->qs_pending && rdp->passed_quiesc) {
+ } else if (rdp->qs_pending && rdp->passed_quiesce) {
rdp->n_rp_report_qs++;
return 1;
}
@@ -1846,6 +1917,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
#endif /* #ifdef CONFIG_NO_HZ */
rdp->cpu = cpu;
+ rdp->rsp = rsp;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -1865,8 +1937,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave(&rnp->lock, flags);
- rdp->passed_quiesc = 0; /* We could be racing with new GP, */
- rdp->qs_pending = 1; /* so set up to respond to current GP. */
rdp->beenonline = 1; /* We have now been online. */
rdp->preemptible = preemptible;
rdp->qlen_last_fqs_check = 0;
@@ -1891,9 +1961,17 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rnp->qsmaskinit |= mask;
mask = rnp->grpmask;
if (rnp == rdp->mynode) {
- rdp->gpnum = rnp->completed; /* if GP in progress... */
+ /*
+ * If there is a grace period in progress, we will
+ * set up to wait for it next time we run the
+ * RCU core code.
+ */
+ rdp->gpnum = rnp->completed;
rdp->completed = rnp->completed;
- rdp->passed_quiesc_completed = rnp->completed - 1;
+ rdp->passed_quiesce = 0;
+ rdp->qs_pending = 0;
+ rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
}
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
@@ -1919,6 +1997,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
+ trace_rcu_utilization("Start CPU hotplug");
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
@@ -1954,6 +2033,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
default:
break;
}
+ trace_rcu_utilization("End CPU hotplug");
return NOTIFY_OK;
}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 01b2ccda26f..849ce9ec51f 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -230,9 +230,9 @@ struct rcu_data {
/* in order to detect GP end. */
unsigned long gpnum; /* Highest gp number that this CPU */
/* is aware of having started. */
- unsigned long passed_quiesc_completed;
- /* Value of completed at time of qs. */
- bool passed_quiesc; /* User-mode/idle loop etc. */
+ unsigned long passed_quiesce_gpnum;
+ /* gpnum at time of quiescent state. */
+ bool passed_quiesce; /* User-mode/idle loop etc. */
bool qs_pending; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
bool preemptible; /* Preemptible RCU? */
@@ -299,6 +299,7 @@ struct rcu_data {
unsigned long n_rp_need_nothing;
int cpu;
+ struct rcu_state *rsp;
};
/* Values for signaled field in struct rcu_state. */
@@ -417,6 +418,13 @@ extern struct rcu_state rcu_preempt_state;
DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+#ifdef CONFIG_RCU_BOOST
+DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
+DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
+DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
+DECLARE_PER_CPU(char, rcu_cpu_has_work);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
#ifndef RCU_TREE_NONCORE
/* Forward declarations for rcutree_plugin.h */
@@ -430,7 +438,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
static void rcu_stop_cpu_kthread(int cpu);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
-static void rcu_print_task_stall(struct rcu_node *rnp);
+static int rcu_print_task_stall(struct rcu_node *rnp);
static void rcu_preempt_stall_reset(void);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
@@ -450,7 +458,6 @@ static int rcu_preempt_needs_cpu(int cpu);
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void);
-static void rcu_needs_cpu_flush(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 8aafbb80b8b..4b9b9f8a418 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -27,6 +27,14 @@
#include <linux/delay.h>
#include <linux/stop_machine.h>
+#define RCU_KTHREAD_PRIO 1
+
+#ifdef CONFIG_RCU_BOOST
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
+#else
+#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
+#endif
+
/*
* Check the RCU kernel configuration parameters and print informative
* messages about anything out of the ordinary. If you like #ifdef, you
@@ -64,7 +72,7 @@ static void __init rcu_bootup_announce_oddness(void)
#ifdef CONFIG_TREE_PREEMPT_RCU
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state;
@@ -122,9 +130,11 @@ static void rcu_preempt_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
- rdp->passed_quiesc_completed = rdp->gpnum - 1;
+ rdp->passed_quiesce_gpnum = rdp->gpnum;
barrier();
- rdp->passed_quiesc = 1;
+ if (rdp->passed_quiesce == 0)
+ trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+ rdp->passed_quiesce = 1;
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
}
@@ -190,6 +200,11 @@ static void rcu_preempt_note_context_switch(int cpu)
if (rnp->qsmask & rdp->grpmask)
rnp->gp_tasks = &t->rcu_node_entry;
}
+ trace_rcu_preempt_task(rdp->rsp->name,
+ t->pid,
+ (rnp->qsmask & rdp->grpmask)
+ ? rnp->gpnum
+ : rnp->gpnum + 1);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special) {
@@ -299,6 +314,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
int empty_exp;
unsigned long flags;
struct list_head *np;
+#ifdef CONFIG_RCU_BOOST
+ struct rt_mutex *rbmp = NULL;
+#endif /* #ifdef CONFIG_RCU_BOOST */
struct rcu_node *rnp;
int special;
@@ -344,6 +362,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
+ t->rcu_blocked_node = NULL;
+ trace_rcu_unlock_preempted_task("rcu_preempt",
+ rnp->gpnum, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np;
if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -351,30 +372,34 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
if (&t->rcu_node_entry == rnp->boost_tasks)
rnp->boost_tasks = np;
- /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
- if (t->rcu_boosted) {
- special |= RCU_READ_UNLOCK_BOOSTED;
- t->rcu_boosted = 0;
+ /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
+ if (t->rcu_boost_mutex) {
+ rbmp = t->rcu_boost_mutex;
+ t->rcu_boost_mutex = NULL;
}
#endif /* #ifdef CONFIG_RCU_BOOST */
- t->rcu_blocked_node = NULL;
/*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
*/
- if (empty)
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- else
+ if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+ trace_rcu_quiescent_state_report("preempt_rcu",
+ rnp->gpnum,
+ 0, rnp->qsmask,
+ rnp->level,
+ rnp->grplo,
+ rnp->grphi,
+ !!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rnp, flags);
+ } else
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
- if (special & RCU_READ_UNLOCK_BOOSTED) {
- rt_mutex_unlock(t->rcu_boost_mutex);
- t->rcu_boost_mutex = NULL;
- }
+ if (rbmp)
+ rt_mutex_unlock(rbmp);
#endif /* #ifdef CONFIG_RCU_BOOST */
/*
@@ -399,10 +424,10 @@ void __rcu_read_unlock(void)
{
struct task_struct *t = current;
- barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
if (t->rcu_read_lock_nesting != 1)
--t->rcu_read_lock_nesting;
else {
+ barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
@@ -466,16 +491,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
* Scan the current list of tasks blocked within RCU read-side critical
* sections, printing out the tid of each.
*/
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
{
struct task_struct *t;
+ int ndetected = 0;
if (!rcu_preempt_blocked_readers_cgp(rnp))
- return;
+ return 0;
t = list_entry(rnp->gp_tasks,
struct task_struct, rcu_node_entry);
- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
printk(" P%d", t->pid);
+ ndetected++;
+ }
+ return ndetected;
}
/*
@@ -656,18 +685,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
*/
void synchronize_rcu(void)
{
- struct rcu_synchronize rcu;
-
if (!rcu_scheduler_active)
return;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
+ wait_rcu_gp(call_rcu);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
@@ -968,8 +988,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
* Because preemptible RCU does not exist, we never have to check for
* tasks blocked within RCU read-side critical sections.
*/
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
{
+ return 0;
}
/*
@@ -1136,6 +1157,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
+static struct lock_class_key rcu_boost_class;
+
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
@@ -1198,8 +1221,10 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
+ /* Avoid lockdep false positives. This rt_mutex is its own thing. */
+ lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
+ "rcu_boost_mutex");
t->rcu_boost_mutex = &mtx;
- t->rcu_boosted = 1;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
@@ -1228,9 +1253,12 @@ static int rcu_boost_kthread(void *arg)
int spincnt = 0;
int more2boost;
+ trace_rcu_utilization("Start boost kthread@init");
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
+ trace_rcu_utilization("End boost kthread@rcu_wait");
rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
+ trace_rcu_utilization("Start boost kthread@rcu_wait");
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@ -1238,11 +1266,14 @@ static int rcu_boost_kthread(void *arg)
else
spincnt = 0;
if (spincnt > 10) {
+ trace_rcu_utilization("End boost kthread@rcu_yield");
rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
+ trace_rcu_utilization("Start boost kthread@rcu_yield");
spincnt = 0;
}
}
/* NOTREACHED */
+ trace_rcu_utilization("End boost kthread@notreached");
return 0;
}
@@ -1291,11 +1322,9 @@ static void invoke_rcu_callbacks_kthread(void)
local_irq_save(flags);
__this_cpu_write(rcu_cpu_has_work, 1);
- if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
- local_irq_restore(flags);
- return;
- }
- wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+ if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+ current != __this_cpu_read(rcu_cpu_kthread_task))
+ wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
local_irq_restore(flags);
}
@@ -1343,13 +1372,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (rnp->boost_kthread_task != NULL)
return 0;
t = kthread_create(rcu_boost_kthread, (void *)rnp,
- "rcub%d", rnp_index);
+ "rcub/%d", rnp_index);
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- sp.sched_priority = RCU_KTHREAD_PRIO;
+ sp.sched_priority = RCU_BOOST_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
return 0;
@@ -1444,6 +1473,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
{
struct sched_param sp;
struct timer_list yield_timer;
+ int prio = current->rt_priority;
setup_timer_on_stack(&yield_timer, f, arg);
mod_timer(&yield_timer, jiffies + 2);
@@ -1451,7 +1481,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
set_user_nice(current, 19);
schedule();
- sp.sched_priority = RCU_KTHREAD_PRIO;
+ set_user_nice(current, 0);
+ sp.sched_priority = prio;
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
del_timer(&yield_timer);
}
@@ -1489,7 +1520,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
/*
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * earlier RCU softirq.
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
*/
static int rcu_cpu_kthread(void *arg)
{
@@ -1500,9 +1532,12 @@ static int rcu_cpu_kthread(void *arg)
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+ trace_rcu_utilization("Start CPU kthread@init");
for (;;) {
*statusp = RCU_KTHREAD_WAITING;
+ trace_rcu_utilization("End CPU kthread@rcu_wait");
rcu_wait(*workp != 0 || kthread_should_stop());
+ trace_rcu_utilization("Start CPU kthread@rcu_wait");
local_bh_disable();
if (rcu_cpu_kthread_should_stop(cpu)) {
local_bh_enable();
@@ -1523,11 +1558,14 @@ static int rcu_cpu_kthread(void *arg)
spincnt = 0;
if (spincnt > 10) {
*statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization("End CPU kthread@rcu_yield");
rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+ trace_rcu_utilization("Start CPU kthread@rcu_yield");
spincnt = 0;
}
}
*statusp = RCU_KTHREAD_STOPPED;
+ trace_rcu_utilization("End CPU kthread@term");
return 0;
}
@@ -1560,7 +1598,10 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
if (!rcu_scheduler_fully_active ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
- t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+ t = kthread_create_on_node(rcu_cpu_kthread,
+ (void *)(long)cpu,
+ cpu_to_node(cpu),
+ "rcuc/%d", cpu);
if (IS_ERR(t))
return PTR_ERR(t);
if (cpu_online(cpu))
@@ -1669,7 +1710,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
return 0;
if (rnp->node_kthread_task == NULL) {
t = kthread_create(rcu_node_kthread, (void *)rnp,
- "rcun%d", rnp_index);
+ "rcun/%d", rnp_index);
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1907,15 +1948,6 @@ int rcu_needs_cpu(int cpu)
return rcu_needs_cpu_quick_check(cpu);
}
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
- * entry is not configured, so we never do need to.
- */
-static void rcu_needs_cpu_flush(void)
-{
-}
-
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
#define RCU_NEEDS_CPU_FLUSHES 5
@@ -1991,20 +2023,4 @@ int rcu_needs_cpu(int cpu)
return c;
}
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.
- */
-static void rcu_needs_cpu_flush(void)
-{
- int cpu = smp_processor_id();
- unsigned long flags;
-
- if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
- return;
- local_irq_save(flags);
- (void)rcu_needs_cpu(cpu);
- local_irq_restore(flags);
-}
-
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 3b0c0986afc..9feffa4c069 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -48,11 +48,6 @@
#ifdef CONFIG_RCU_BOOST
-DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
-DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
-DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-DECLARE_PER_CPU(char, rcu_cpu_has_work);
-
static char convert_kthread_status(unsigned int kthread_status)
{
if (kthread_status > RCU_KTHREAD_MAX)
@@ -66,11 +61,11 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
return;
- seq_printf(m, "%3d%cc=%lu g=%lu pq=%d pqc=%lu qp=%d",
+ seq_printf(m, "%3d%cc=%lu g=%lu pq=%d pgp=%lu qp=%d",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
rdp->completed, rdp->gpnum,
- rdp->passed_quiesc, rdp->passed_quiesc_completed,
+ rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, " dt=%d/%d/%d df=%lu",
@@ -144,7 +139,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->cpu,
cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"",
rdp->completed, rdp->gpnum,
- rdp->passed_quiesc, rdp->passed_quiesc_completed,
+ rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu",
@@ -175,7 +170,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata_csv(struct seq_file *m, void *unused)
{
- seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
+ seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 3c7cbc2c33b..a2e7e7210f3 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -29,61 +29,6 @@
#include "rtmutex_common.h"
-# define TRACE_WARN_ON(x) WARN_ON(x)
-# define TRACE_BUG_ON(x) BUG_ON(x)
-
-# define TRACE_OFF() \
-do { \
- if (rt_trace_on) { \
- rt_trace_on = 0; \
- console_verbose(); \
- if (raw_spin_is_locked(&current->pi_lock)) \
- raw_spin_unlock(&current->pi_lock); \
- } \
-} while (0)
-
-# define TRACE_OFF_NOLOCK() \
-do { \
- if (rt_trace_on) { \
- rt_trace_on = 0; \
- console_verbose(); \
- } \
-} while (0)
-
-# define TRACE_BUG_LOCKED() \
-do { \
- TRACE_OFF(); \
- BUG(); \
-} while (0)
-
-# define TRACE_WARN_ON_LOCKED(c) \
-do { \
- if (unlikely(c)) { \
- TRACE_OFF(); \
- WARN_ON(1); \
- } \
-} while (0)
-
-# define TRACE_BUG_ON_LOCKED(c) \
-do { \
- if (unlikely(c)) \
- TRACE_BUG_LOCKED(); \
-} while (0)
-
-#ifdef CONFIG_SMP
-# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
-#else
-# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
-#endif
-
-/*
- * deadlock detection flag. We turn it off when we detect
- * the first problem because we dont want to recurse back
- * into the tracing code when doing error printk or
- * executing a BUG():
- */
-static int rt_trace_on = 1;
-
static void printk_task(struct task_struct *p)
{
if (p)
@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
void rt_mutex_debug_task_free(struct task_struct *task)
{
- WARN_ON(!plist_head_empty(&task->pi_waiters));
- WARN_ON(task->pi_blocked_on);
+ DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
+ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
/*
@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
{
struct task_struct *task;
- if (!rt_trace_on || detect || !act_waiter)
+ if (!debug_locks || detect || !act_waiter)
return;
task = rt_mutex_owner(act_waiter->lock);
@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
struct task_struct *task;
- if (!waiter->deadlock_lock || !rt_trace_on)
+ if (!waiter->deadlock_lock || !debug_locks)
return;
rcu_read_lock();
@@ -149,7 +94,10 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
return;
}
- TRACE_OFF_NOLOCK();
+ if (!debug_locks_off()) {
+ rcu_read_unlock();
+ return;
+ }
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
@@ -180,7 +128,6 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk("[ turning off deadlock detection."
"Please report this trace. ]\n\n");
- local_irq_disable();
}
void debug_rt_mutex_lock(struct rt_mutex *lock)
@@ -189,7 +136,7 @@ void debug_rt_mutex_lock(struct rt_mutex *lock)
void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
- TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
+ DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}
void
@@ -199,7 +146,7 @@ debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
- TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
+ DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
}
void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
@@ -213,8 +160,8 @@ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
put_pid(waiter->deadlock_task_pid);
- TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
- TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
+ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
memset(waiter, 0x22, sizeof(*waiter));
}
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 255e1662acd..5e8d9cce747 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -579,6 +579,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter *waiter)
{
int ret = 0;
+ int was_disabled;
for (;;) {
/* Try to acquire the lock: */
@@ -601,10 +602,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
raw_spin_unlock(&lock->wait_lock);
+ was_disabled = irqs_disabled();
+ if (was_disabled)
+ local_irq_enable();
+
debug_rt_mutex_print_deadlock(waiter);
schedule_rt_mutex(lock);
+ if (was_disabled)
+ local_irq_disable();
+
raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index b50b0f0c9aa..03ad0113801 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1739,7 +1739,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
- * successfuly executed on another CPU. We must ensure that updates of
+ * successfully executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
@@ -4213,6 +4213,7 @@ static inline void schedule_debug(struct task_struct *prev)
*/
if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
__schedule_bug(prev);
+ rcu_sleep_check();
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -5955,15 +5956,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
}
/*
- * In a system that switches off the HZ timer nohz_cpu_mask
- * indicates which cpus entered this state. This is used
- * in the rcu update to wait only for active cpus. For system
- * which do not switch off the HZ timer nohz_cpu_mask should
- * always be CPU_BITS_NONE.
- */
-cpumask_var_t nohz_cpu_mask;
-
-/*
* Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible
* to users decreases. But the relationship is not linear,
@@ -8175,8 +8167,6 @@ void __init sched_init(void)
*/
current->sched_class = &fair_sched_class;
- /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
- zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
#ifdef CONFIG_NO_HZ
@@ -8206,6 +8196,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
+ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
system_state != SYSTEM_RUNNING || oops_in_progress)
return;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 331e01bcd02..87f9e36ea56 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -282,10 +282,10 @@ static inline void account_group_user_time(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ raw_spin_lock(&cputimer->lock);
cputimer->cputime.utime =
cputime_add(cputimer->cputime.utime, cputime);
- spin_unlock(&cputimer->lock);
+ raw_spin_unlock(&cputimer->lock);
}
/**
@@ -306,10 +306,10 @@ static inline void account_group_system_time(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ raw_spin_lock(&cputimer->lock);
cputimer->cputime.stime =
cputime_add(cputimer->cputime.stime, cputime);
- spin_unlock(&cputimer->lock);
+ raw_spin_unlock(&cputimer->lock);
}
/**
@@ -330,7 +330,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ raw_spin_lock(&cputimer->lock);
cputimer->cputime.sum_exec_runtime += ns;
- spin_unlock(&cputimer->lock);
+ raw_spin_unlock(&cputimer->lock);
}
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ad..d831841e55a 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
__down(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(down);
@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_interruptible(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_killable(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
unsigned long flags;
int count;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
count = sem->count - 1;
if (likely(count >= 0))
sem->count = count;
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return (count < 0);
}
@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_timeout(sem, jiffies);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(list_empty(&sem->wait_list)))
sem->count++;
else
__up(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(up);
@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
if (timeout <= 0)
goto timed_out;
__set_task_state(task, state);
- spin_unlock_irq(&sem->lock);
+ raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout);
- spin_lock_irq(&sem->lock);
+ raw_spin_lock_irq(&sem->lock);
if (waiter.up)
return 0;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 291c9700be7..d252be2d3de 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1344,13 +1344,24 @@ int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
return error;
}
+static int kill_as_cred_perm(const struct cred *cred,
+ struct task_struct *target)
+{
+ const struct cred *pcred = __task_cred(target);
+ if (cred->user_ns != pcred->user_ns)
+ return 0;
+ if (cred->euid != pcred->suid && cred->euid != pcred->uid &&
+ cred->uid != pcred->suid && cred->uid != pcred->uid)
+ return 0;
+ return 1;
+}
+
/* like kill_pid_info(), but doesn't use uid/euid of "current" */
-int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
- uid_t uid, uid_t euid, u32 secid)
+int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
+ const struct cred *cred, u32 secid)
{
int ret = -EINVAL;
struct task_struct *p;
- const struct cred *pcred;
unsigned long flags;
if (!valid_signal(sig))
@@ -1362,10 +1373,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
ret = -ESRCH;
goto out_unlock;
}
- pcred = __task_cred(p);
- if (si_fromuser(info) &&
- euid != pcred->suid && euid != pcred->uid &&
- uid != pcred->suid && uid != pcred->uid) {
+ if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
ret = -EPERM;
goto out_unlock;
}
@@ -1384,7 +1392,7 @@ out_unlock:
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
+EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
diff --git a/kernel/sys.c b/kernel/sys.c
index 18ee1d2f647..58459509b14 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem);
static int override_release(char __user *release, int len)
{
int ret = 0;
- char buf[len];
+ char buf[65];
if (current->personality & UNAME26) {
char *rest = UTS_RELEASE;
@@ -1759,6 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
sizeof(me->comm) - 1) < 0)
return -EFAULT;
set_task_comm(me, comm);
+ proc_comm_connector(me);
return 0;
case PR_GET_NAME:
get_task_comm(comm, me);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index e8bffbe2ba4..6318b511afa 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -214,7 +214,7 @@ static const struct bin_table bin_net_ipv4_route_table[] = {
{ CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
{ CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
{ CTL_INT, NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
- { CTL_INT, NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
+ /* NET_IPV4_ROUTE_GC_INTERVAL "gc_interval" no longer used */
{ CTL_INT, NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
{ CTL_INT, NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
{ CTL_INT, NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d5097c44b40..eb98e55196b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -139,7 +139,6 @@ static void tick_nohz_update_jiffies(ktime_t now)
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags;
- cpumask_clear_cpu(cpu, nohz_cpu_mask);
ts->idle_waketime = now;
local_irq_save(flags);
@@ -389,9 +388,6 @@ void tick_nohz_stop_sched_tick(int inidle)
else
expires.tv64 = KTIME_MAX;
- if (delta_jiffies > 1)
- cpumask_set_cpu(cpu, nohz_cpu_mask);
-
/* Skip reprogram of event if its not changed */
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
goto out;
@@ -441,7 +437,6 @@ void tick_nohz_stop_sched_tick(int inidle)
* softirq.
*/
tick_do_update_jiffies64(ktime_get());
- cpumask_clear_cpu(cpu, nohz_cpu_mask);
}
raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
@@ -524,7 +519,6 @@ void tick_nohz_restart_sched_tick(void)
/* Update jiffies first */
select_nohz_load_balancer(0);
tick_do_update_jiffies64(now);
- cpumask_clear_cpu(cpu, nohz_cpu_mask);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
/*
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index a5d0a3a85dd..0b537f27b55 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -81,7 +81,7 @@ struct entry {
/*
* Spinlock protecting the tables - not taken during lookup:
*/
-static DEFINE_SPINLOCK(table_lock);
+static DEFINE_RAW_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
prev = NULL;
curr = *head;
- spin_lock(&table_lock);
+ raw_spin_lock(&table_lock);
/*
* Make sure we have not raced with another CPU:
*/
@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
*head = curr;
}
out_unlock:
- spin_unlock(&table_lock);
+ raw_spin_unlock(&table_lock);
return curr;
}
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index b384ed512ba..5f39a07fe5e 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -55,6 +55,9 @@ endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
+ifeq ($(CONFIG_PM_RUNTIME),y)
+obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
+endif
ifeq ($(CONFIG_TRACING),y)
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index acf6b68dc4a..f5b7b5c1195 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
struct ring_buffer *buffer;
- spinlock_t reader_lock; /* serialize readers */
+ raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
@@ -1064,7 +1064,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
- spin_lock_init(&cpu_buffer->reader_lock);
+ raw_spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -1261,7 +1261,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
struct list_head *p;
unsigned i;
- spin_lock_irq(&cpu_buffer->reader_lock);
+ raw_spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1279,7 +1279,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
rb_check_pages(cpu_buffer);
out:
- spin_unlock_irq(&cpu_buffer->reader_lock);
+ raw_spin_unlock_irq(&cpu_buffer->reader_lock);
}
static void
@@ -1290,7 +1290,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *p;
unsigned i;
- spin_lock_irq(&cpu_buffer->reader_lock);
+ raw_spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1305,7 +1305,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_pages(cpu_buffer);
out:
- spin_unlock_irq(&cpu_buffer->reader_lock);
+ raw_spin_unlock_irq(&cpu_buffer->reader_lock);
}
/**
@@ -2689,7 +2689,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/*
* if the tail is on reader_page, oldest time stamp is on the reader
* page
@@ -2699,7 +2699,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
else
bpage = rb_set_head_page(cpu_buffer);
ret = bpage->page->time_stamp;
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
}
@@ -2869,9 +2869,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
cpu_buffer = iter->cpu_buffer;
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_iter_reset(iter);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
@@ -3330,12 +3330,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
again:
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ raw_spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3360,9 +3360,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
unsigned long flags;
again:
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
@@ -3402,7 +3402,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ raw_spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
@@ -3411,7 +3411,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
}
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
out:
@@ -3503,11 +3503,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
cpu_buffer = iter->cpu_buffer;
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
arch_spin_unlock(&cpu_buffer->lock);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
@@ -3542,7 +3542,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
again:
event = rb_iter_peek(iter, ts);
if (!event)
@@ -3553,7 +3553,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
rb_advance_iter(iter);
out:
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
}
@@ -3624,7 +3624,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
@@ -3636,7 +3636,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
arch_spin_unlock(&cpu_buffer->lock);
out:
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
}
@@ -3674,10 +3674,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ raw_spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
if (!ret)
@@ -3708,10 +3708,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ raw_spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
return ret;
@@ -3908,7 +3908,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (!bpage)
goto out;
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
@@ -4032,7 +4032,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
out_unlock:
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
out:
return ret;
diff --git a/kernel/trace/rpm-traces.c b/kernel/trace/rpm-traces.c
new file mode 100644
index 00000000000..4b3b5eaf94d
--- /dev/null
+++ b/kernel/trace/rpm-traces.c
@@ -0,0 +1,20 @@
+/*
+ * Power trace points
+ *
+ * Copyright (C) 2009 Ming Lei <ming.lei@canonical.com>
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rpm.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_return_int);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_suspend);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_resume);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b24a72d3500..f2bd275bb60 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
static int trace_stop_count;
-static DEFINE_SPINLOCK(tracing_start_lock);
+static DEFINE_RAW_SPINLOCK(tracing_start_lock);
static void wakeup_work_handler(struct work_struct *work)
{
@@ -961,7 +961,7 @@ void tracing_start(void)
if (tracing_disabled)
return;
- spin_lock_irqsave(&tracing_start_lock, flags);
+ raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (--trace_stop_count) {
if (trace_stop_count < 0) {
/* Someone screwed up their debugging */
@@ -986,7 +986,7 @@ void tracing_start(void)
ftrace_start();
out:
- spin_unlock_irqrestore(&tracing_start_lock, flags);
+ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
/**
@@ -1001,7 +1001,7 @@ void tracing_stop(void)
unsigned long flags;
ftrace_stop();
- spin_lock_irqsave(&tracing_start_lock, flags);
+ raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;
@@ -1019,7 +1019,7 @@ void tracing_stop(void)
arch_spin_unlock(&ftrace_max_lock);
out:
- spin_unlock_irqrestore(&tracing_start_lock, flags);
+ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
void trace_stop_cmdline_recording(void);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a1a3359996a..20dad0d7a16 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
static DEFINE_PER_CPU(int, tracing_cpu);
-static DEFINE_SPINLOCK(max_trace_lock);
+static DEFINE_RAW_SPINLOCK(max_trace_lock);
enum {
TRACER_IRQS_OFF = (1 << 1),
@@ -321,7 +321,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out;
- spin_lock_irqsave(&max_trace_lock, flags);
+ raw_spin_lock_irqsave(&max_trace_lock, flags);
/* check if we are still the max latency */
if (!report_latency(delta))
@@ -344,7 +344,7 @@ check_critical_timing(struct trace_array *tr,
max_sequence++;
out_unlock:
- spin_unlock_irqrestore(&max_trace_lock, flags);
+ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
out:
data->critical_sequence = max_sequence;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c0cb9c4bc46..75330bd8756 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -117,31 +117,31 @@ config DEBUG_SECTION_MISMATCH
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
- Linux will during link or during runtime drop some sections
- and any use of code/data previously in these sections will
+ During linktime or runtime, some sections are dropped;
+ any use of code/data previously in these sections would
most likely result in an oops.
- In the code functions and variables are annotated with
- __init, __devinit etc. (see full list in include/linux/init.h)
+ In the code, functions and variables are annotated with
+ __init, __devinit, etc. (see the full list in include/linux/init.h),
which results in the code/data being placed in specific sections.
- The section mismatch analysis is always done after a full
- kernel build but enabling this option will in addition
- do the following:
- - Add the option -fno-inline-functions-called-once to gcc
- When inlining a function annotated __init in a non-init
- function we would lose the section information and thus
+ The section mismatch analysis is always performed after a full
+ kernel build, and enabling this option causes the following
+ additional steps to occur:
+ - Add the option -fno-inline-functions-called-once to gcc commands.
+ When inlining a function annotated with __init in a non-init
+ function, we would lose the section information and thus
the analysis would not catch the illegal reference.
- This option tells gcc to inline less but will also
- result in a larger kernel.
- - Run the section mismatch analysis for each module/built-in.o
- When we run the section mismatch analysis on vmlinux.o we
+ This option tells gcc to inline less (but it does result in
+ a larger kernel).
+ - Run the section mismatch analysis for each module/built-in.o file.
+ When we run the section mismatch analysis on vmlinux.o, we
lose valueble information about where the mismatch was
introduced.
Running the analysis for each module/built-in.o file
- will tell where the mismatch happens much closer to the
- source. The drawback is that we will report the same
- mismatch at least twice.
- - Enable verbose reporting from modpost to help solving
- the section mismatches reported.
+ tells where the mismatch happens much closer to the
+ source. The drawback is that the same mismatch is
+ reported at least twice.
+ - Enable verbose reporting from modpost in order to help resolve
+ the section mismatches that are reported.
config DEBUG_KERNEL
bool "Kernel debugging"
@@ -835,7 +835,7 @@ config DEBUG_CREDENTIALS
#
# Select this config option from the architecture Kconfig, if it
-# it is preferred to always offer frame pointers as a config
+# is preferred to always offer frame pointers as a config
# option on the architecture (regardless of KERNEL_DEBUG):
#
config ARCH_WANT_FRAME_POINTERS
@@ -1081,7 +1081,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
help
Provide stacktrace filter for fault-injection capabilities
@@ -1091,7 +1091,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e12ae0dd08a..3975470caf4 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -29,11 +29,11 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- spinlock_t lock;
+ raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
-static inline spinlock_t *lock_addr(const atomic64_t *v)
+static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v)
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_read);
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter = i;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_set);
void atomic64_add(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_add);
long long atomic64_add_return(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_add_return);
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
void atomic64_sub(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_sub);
long long atomic64_sub_return(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_cmpxchg);
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_xchg);
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
int ret = 0;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
ret = 1;
}
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
int i;
for (i = 0; i < NR_LOCKS; ++i)
- spin_lock_init(&atomic64_lock[i].lock);
+ raw_spin_lock_init(&atomic64_lock[i].lock);
return 0;
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 75ca78f3a8c..dcdade39e47 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -10,11 +10,12 @@
* Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
@@ -30,6 +31,8 @@
#include <linux/jump_label.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
@@ -38,7 +41,6 @@ struct ddebug_table {
struct list_head link;
char *mod_name;
unsigned int num_ddebugs;
- unsigned int num_enabled;
struct _ddebug *ddebugs;
};
@@ -148,19 +150,13 @@ static void ddebug_change(const struct ddebug_query *query,
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
-
- if (!newflags)
- dt->num_enabled--;
- else if (!dp->flags)
- dt->num_enabled++;
dp->flags = newflags;
if (newflags)
dp->enabled = 1;
else
dp->enabled = 0;
if (verbose)
- printk(KERN_INFO
- "ddebug: changed %s:%d [%s]%s %s\n",
+ pr_info("changed %s:%d [%s]%s %s\n",
dp->filename, dp->lineno,
dt->mod_name, dp->function,
ddebug_describe_flags(dp, flagbuf,
@@ -170,7 +166,7 @@ static void ddebug_change(const struct ddebug_query *query,
mutex_unlock(&ddebug_lock);
if (!nfound && verbose)
- printk(KERN_INFO "ddebug: no matches for query\n");
+ pr_info("no matches for query\n");
}
/*
@@ -215,10 +211,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
if (verbose) {
int i;
- printk(KERN_INFO "%s: split into words:", __func__);
+ pr_info("split into words:");
for (i = 0 ; i < nwords ; i++)
- printk(" \"%s\"", words[i]);
- printk("\n");
+ pr_cont(" \"%s\"", words[i]);
+ pr_cont("\n");
}
return nwords;
@@ -330,16 +326,15 @@ static int ddebug_parse_query(char *words[], int nwords,
}
} else {
if (verbose)
- printk(KERN_ERR "%s: unknown keyword \"%s\"\n",
- __func__, words[i]);
+ pr_err("unknown keyword \"%s\"\n", words[i]);
return -EINVAL;
}
}
if (verbose)
- printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" "
- "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
- __func__, query->function, query->filename,
+ pr_info("q->function=\"%s\" q->filename=\"%s\" "
+ "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
+ query->function, query->filename,
query->module, query->format, query->first_lineno,
query->last_lineno);
@@ -368,7 +363,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
return -EINVAL;
}
if (verbose)
- printk(KERN_INFO "%s: op='%c'\n", __func__, op);
+ pr_info("op='%c'\n", op);
for ( ; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
@@ -383,7 +378,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
if (flags == 0)
return -EINVAL;
if (verbose)
- printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags);
+ pr_info("flags=0x%x\n", flags);
/* calculate final *flagsp, *maskp according to mask and op */
switch (op) {
@@ -401,8 +396,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
break;
}
if (verbose)
- printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n",
- __func__, *flagsp, *maskp);
+ pr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
return 0;
}
@@ -427,40 +421,117 @@ static int ddebug_exec_query(char *query_string)
return 0;
}
+#define PREFIX_SIZE 64
+
+static int remaining(int wrote)
+{
+ if (PREFIX_SIZE - wrote > 0)
+ return PREFIX_SIZE - wrote;
+ return 0;
+}
+
+static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+{
+ int pos_after_tid;
+ int pos = 0;
+
+ pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
+ if (in_interrupt())
+ pos += snprintf(buf + pos, remaining(pos), "%s ",
+ "<intr>");
+ else
+ pos += snprintf(buf + pos, remaining(pos), "[%d] ",
+ task_pid_vnr(current));
+ }
+ pos_after_tid = pos;
+ if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->modname);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->function);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
+ pos += snprintf(buf + pos, remaining(pos), "%d:", desc->lineno);
+ if (pos - pos_after_tid)
+ pos += snprintf(buf + pos, remaining(pos), " ");
+ if (pos >= PREFIX_SIZE)
+ buf[PREFIX_SIZE - 1] = '\0';
+
+ return buf;
+}
+
int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
int res;
+ struct va_format vaf;
+ char buf[PREFIX_SIZE];
BUG_ON(!descriptor);
BUG_ON(!fmt);
va_start(args, fmt);
- res = printk(KERN_DEBUG);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) {
- if (in_interrupt())
- res += printk(KERN_CONT "<intr> ");
- else
- res += printk(KERN_CONT "[%d] ", task_pid_vnr(current));
- }
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME)
- res += printk(KERN_CONT "%s:", descriptor->modname);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
- res += printk(KERN_CONT "%s:", descriptor->function);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO)
- res += printk(KERN_CONT "%d ", descriptor->lineno);
- res += vprintk(fmt, args);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
+int __dynamic_dev_dbg(struct _ddebug *descriptor,
+ const struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int res;
+ char buf[PREFIX_SIZE];
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_dev_dbg);
+
+#ifdef CONFIG_NET
+
+int __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int res;
+ char buf[PREFIX_SIZE];
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_netdev_dbg);
+
+#endif
+
static __initdata char ddebug_setup_string[1024];
static __init int ddebug_setup_query(char *str)
{
if (strlen(str) >= 1024) {
- pr_warning("ddebug boot param string too large\n");
+ pr_warn("ddebug boot param string too large\n");
return 0;
}
strcpy(ddebug_setup_string, str);
@@ -488,8 +559,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
return -EFAULT;
tmpbuf[len] = '\0';
if (verbose)
- printk(KERN_INFO "%s: read %d bytes from userspace\n",
- __func__, (int)len);
+ pr_info("read %d bytes from userspace\n", (int)len);
ret = ddebug_exec_query(tmpbuf);
if (ret)
@@ -552,8 +622,7 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
int n = *pos;
if (verbose)
- printk(KERN_INFO "%s: called m=%p *pos=%lld\n",
- __func__, m, (unsigned long long)*pos);
+ pr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
mutex_lock(&ddebug_lock);
@@ -578,8 +647,8 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
struct _ddebug *dp;
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n",
- __func__, m, p, (unsigned long long)*pos);
+ pr_info("called m=%p p=%p *pos=%lld\n",
+ m, p, (unsigned long long)*pos);
if (p == SEQ_START_TOKEN)
dp = ddebug_iter_first(iter);
@@ -602,8 +671,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
char flagsbuf[8];
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p\n",
- __func__, m, p);
+ pr_info("called m=%p p=%p\n", m, p);
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@ -628,8 +696,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
static void ddebug_proc_stop(struct seq_file *m, void *p)
{
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p\n",
- __func__, m, p);
+ pr_info("called m=%p p=%p\n", m, p);
mutex_unlock(&ddebug_lock);
}
@@ -652,7 +719,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file)
int err;
if (verbose)
- printk(KERN_INFO "%s: called\n", __func__);
+ pr_info("called\n");
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (iter == NULL)
@@ -696,7 +763,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
}
dt->mod_name = new_name;
dt->num_ddebugs = n;
- dt->num_enabled = 0;
dt->ddebugs = tab;
mutex_lock(&ddebug_lock);
@@ -704,8 +770,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
mutex_unlock(&ddebug_lock);
if (verbose)
- printk(KERN_INFO "%u debug prints in module %s\n",
- n, dt->mod_name);
+ pr_info("%u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
EXPORT_SYMBOL_GPL(ddebug_add_module);
@@ -727,8 +792,7 @@ int ddebug_remove_module(const char *mod_name)
int ret = -ENOENT;
if (verbose)
- printk(KERN_INFO "%s: removing module \"%s\"\n",
- __func__, mod_name);
+ pr_info("removing module \"%s\"\n", mod_name);
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
@@ -804,8 +868,8 @@ static int __init dynamic_debug_init(void)
if (ddebug_setup_string[0] != '\0') {
ret = ddebug_exec_query(ddebug_setup_string);
if (ret)
- pr_warning("Invalid ddebug boot param %s",
- ddebug_setup_string);
+ pr_warn("Invalid ddebug boot param %s",
+ ddebug_setup_string);
else
pr_info("ddebug initialized with string %s",
ddebug_setup_string);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f5fe6ba7a3a..51d5ae21024 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin);
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
+ *
+ * Return 0 on success, -1 in case of bad input.
*/
-void hex2bin(u8 *dst, const char *src, size_t count)
+int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- *dst = hex_to_bin(*src++) << 4;
- *dst += hex_to_bin(*src++);
- dst++;
+ int hi = hex_to_bin(*src++);
+ int lo = hex_to_bin(*src++);
+
+ if ((hi < 0) || (lo < 0))
+ return -1;
+
+ *dst++ = (hi << 4) | lo;
}
+ return 0;
}
EXPORT_SYMBOL(hex2bin);
diff --git a/lib/idr.c b/lib/idr.c
index db040ce3fa7..5acf9bb1096 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -860,7 +860,7 @@ EXPORT_SYMBOL(ida_get_new_above);
* and go back to the idr_pre_get() call. If the idr is full, it will
* return %-ENOSPC.
*
- * @id returns a value in the range %0 ... %0x7fffffff.
+ * @p_id returns a value in the range %0 ... %0x7fffffff.
*/
int ida_get_new(struct ida *ida, int *p_id)
{
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 70af0a7f97c..ad72a03ce5e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
kobj_bcast_filter,
kobj);
/* ENOBUFS should be handled in userspace */
- if (retval == -ENOBUFS)
+ if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
} else
retval = -ENOMEM;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 28f2c33c6b5..f087105ed91 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_write(*fbc->counters, 0);
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
} else {
__this_cpu_write(*fbc->counters, count);
}
@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
s64 ret;
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
- spin_lock_init(&fbc->lock);
+ raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
s32 *pcount;
unsigned long flags;
- spin_lock_irqsave(&fbc->lock, flags);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
#endif
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de..05df84801b5 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
int prop_local_init_percpu(struct prop_local_percpu *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
percpu_counter_set(&pl->events, 0);
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
int prop_local_init_single(struct prop_local_single *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
* For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
else
pl->events = 0;
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 027a03f4c56..c96d500577d 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* in addition to the one that will be printed by
* the entity that is holding the lock already:
*/
- if (!spin_trylock_irqsave(&rs->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
return 0;
if (!rs->begin)
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->missed++;
ret = 0;
}
- spin_unlock_irqrestore(&rs->lock, flags);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
return ret;
}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ffc9fc7f3b0..f2393c21fe8 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
int ret = 1;
unsigned long flags;
- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->activity != 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_rwsem);
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
diff --git a/lib/rwsem.c b/lib/rwsem.c
index aa7c3052261..410aa1189b1 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
waiter.task = tsk;
waiter.flags = flags;
get_task_struct(tsk);
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 666e4e67741..14d0a6a632f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
ptep = pte_offset_map(pmd, addr);
- if (!is_swap_pte(*ptep)) {
- pte_unmap(ptep);
- goto out;
- }
+ /*
+ * Peek to check is_swap_pte() before taking ptlock? No, we
+ * can race mremap's move_ptes(), which skips anon_vma lock.
+ */
ptl = pte_lockptr(mm, pmd);
}
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 39d216d535e..6bdc67dbbc2 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -513,11 +513,10 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
array_size = length * sizeof(void *);
- array = vmalloc(array_size);
+ array = vzalloc(array_size);
if (!array)
goto nomem;
- memset(array, 0, array_size);
ctrl = &swap_cgroup_ctrl[type];
mutex_lock(&swap_cgroup_mutex);
ctrl->length = length;
diff --git a/mm/shmem.c b/mm/shmem.c
index 32f6763f16f..2d357729529 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1458,7 +1458,7 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir,
- &dentry->d_name, NULL,
+ &dentry->d_name,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
@@ -1598,7 +1598,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (!inode)
return -ENOSPC;
- error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
+ error = security_inode_init_security(inode, dir, &dentry->d_name,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b55699cd906..9fdfce7ba40 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1416,7 +1416,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
return false;
- /* If we have relaimed everything on the isolated list, no stall */
+ /* If we have reclaimed everything on the isolated list, no stall */
if (nr_freed == nr_taken)
return false;
diff --git a/net/802/garp.c b/net/802/garp.c
index 16102951d36..070bf4403bf 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -553,7 +553,7 @@ static void garp_release_port(struct net_device *dev)
if (rtnl_dereference(port->applicants[i]))
return;
}
- rcu_assign_pointer(dev->garp_port, NULL);
+ RCU_INIT_POINTER(dev->garp_port, NULL);
kfree_rcu(port, rcu);
}
@@ -605,7 +605,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
ASSERT_RTNL();
- rcu_assign_pointer(port->applicants[appl->type], NULL);
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
diff --git a/net/802/stp.c b/net/802/stp.c
index 978c30b1b36..0e136ef1e4b 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -88,9 +88,9 @@ void stp_proto_unregister(const struct stp_proto *proto)
{
mutex_lock(&stp_proto_mutex);
if (is_zero_ether_addr(proto->group_address))
- rcu_assign_pointer(stp_proto, NULL);
+ RCU_INIT_POINTER(stp_proto, NULL);
else
- rcu_assign_pointer(garp_protos[proto->group_address[5] -
+ RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], NULL);
synchronize_rcu();
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8970ba139d7..5471628d3ff 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -133,7 +133,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
if (grp->nr_vlans == 0) {
vlan_gvrp_uninit_applicant(real_dev);
- rcu_assign_pointer(real_dev->vlgrp, NULL);
+ RCU_INIT_POINTER(real_dev->vlgrp, NULL);
/* Free the group, after all cpu's are done. */
call_rcu(&grp->rcu, vlan_rcu_free);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9d40a071d03..c8cf9391417 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -610,7 +610,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
- return dev_ethtool_get_settings(vlan->real_dev, cmd);
+
+ return __ethtool_get_settings(vlan->real_dev, cmd);
}
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -674,7 +675,6 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
- .ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
diff --git a/net/9p/client.c b/net/9p/client.c
index 0505a03c374..854ca7a911c 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -38,6 +38,9 @@
#include <net/9p/transport.h>
#include "protocol.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/9p.h>
+
/*
* Client Option Parsing (code inspired by NFS code)
* - a little lazy - parse all client options
@@ -123,21 +126,19 @@ static int parse_opts(char *opts, struct p9_client *clnt)
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
- int token;
+ int token, r;
if (!*p)
continue;
token = match_token(p, tokens, args);
- if (token < Opt_trans) {
- int r = match_int(&args[0], &option);
+ switch (token) {
+ case Opt_msize:
+ r = match_int(&args[0], &option);
if (r < 0) {
P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ "integer field, but no integer?\n");
ret = r;
continue;
}
- }
- switch (token) {
- case Opt_msize:
clnt->msize = option;
break;
case Opt_trans:
@@ -203,11 +204,13 @@ free_and_return:
*
*/
-static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
+static struct p9_req_t *
+p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
{
unsigned long flags;
int row, col;
struct p9_req_t *req;
+ int alloc_msize = min(c->msize, max_size);
/* This looks up the original request by tag so we know which
* buffer to read the data into */
@@ -245,23 +248,10 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
init_waitqueue_head(req->wq);
- if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) {
- int alloc_msize = min(c->msize, 4096);
- req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_NOFS);
- req->tc->capacity = alloc_msize;
- req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_NOFS);
- req->rc->capacity = alloc_msize;
- } else {
- req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_NOFS);
- req->tc->capacity = c->msize;
- req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_NOFS);
- req->rc->capacity = c->msize;
- }
+ req->tc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
+ GFP_NOFS);
+ req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
+ GFP_NOFS);
if ((!req->tc) || (!req->rc)) {
printk(KERN_ERR "Couldn't grow tag array\n");
kfree(req->tc);
@@ -271,6 +261,8 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
req->wq = NULL;
return ERR_PTR(-ENOMEM);
}
+ req->tc->capacity = alloc_msize;
+ req->rc->capacity = alloc_msize;
req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
}
@@ -475,37 +467,22 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
int ecode;
err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
+ /*
+ * dump the response from server
+ * This should be after check errors which poplulate pdu_fcall.
+ */
+ trace_9p_protocol_dump(c, req->rc);
if (err) {
P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
return err;
}
-
if (type != P9_RERROR && type != P9_RLERROR)
return 0;
if (!p9_is_proto_dotl(c)) {
char *ename;
-
- if (req->tc->pbuf_size) {
- /* Handle user buffers */
- size_t len = req->rc->size - req->rc->offset;
- if (req->tc->pubuf) {
- /* User Buffer */
- err = copy_from_user(
- &req->rc->sdata[req->rc->offset],
- req->tc->pubuf, len);
- if (err) {
- err = -EFAULT;
- goto out_err;
- }
- } else {
- /* Kernel Buffer */
- memmove(&req->rc->sdata[req->rc->offset],
- req->tc->pkbuf, len);
- }
- }
err = p9pdu_readf(req->rc, c->proto_version, "s?d",
- &ename, &ecode);
+ &ename, &ecode);
if (err)
goto out_err;
@@ -515,11 +492,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode,
- ename);
-
- kfree(ename);
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
}
+ kfree(ename);
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
err = -ecode;
@@ -527,7 +503,6 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
-
return err;
out_err:
@@ -536,6 +511,115 @@ out_err:
return err;
}
+/**
+ * p9_check_zc_errors - check 9p packet for error return and process it
+ * @c: current client instance
+ * @req: request to parse and check for error conditions
+ * @in_hdrlen: Size of response protocol buffer.
+ *
+ * returns error code if one is discovered, otherwise returns 0
+ *
+ * this will have to be more complicated if we have multiple
+ * error packet types
+ */
+
+static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
+ char *uidata, int in_hdrlen, int kern_buf)
+{
+ int err;
+ int ecode;
+ int8_t type;
+ char *ename = NULL;
+
+ err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
+ /*
+ * dump the response from server
+ * This should be after parse_header which poplulate pdu_fcall.
+ */
+ trace_9p_protocol_dump(c, req->rc);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+ return err;
+ }
+
+ if (type != P9_RERROR && type != P9_RLERROR)
+ return 0;
+
+ if (!p9_is_proto_dotl(c)) {
+ /* Error is reported in string format */
+ uint16_t len;
+ /* 7 = header size for RERROR, 2 is the size of string len; */
+ int inline_len = in_hdrlen - (7 + 2);
+
+ /* Read the size of error string */
+ err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
+ if (err)
+ goto out_err;
+
+ ename = kmalloc(len + 1, GFP_NOFS);
+ if (!ename) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ if (len <= inline_len) {
+ /* We have error in protocol buffer itself */
+ if (pdu_read(req->rc, ename, len)) {
+ err = -EFAULT;
+ goto out_free;
+
+ }
+ } else {
+ /*
+ * Part of the data is in user space buffer.
+ */
+ if (pdu_read(req->rc, ename, inline_len)) {
+ err = -EFAULT;
+ goto out_free;
+
+ }
+ if (kern_buf) {
+ memcpy(ename + inline_len, uidata,
+ len - inline_len);
+ } else {
+ err = copy_from_user(ename + inline_len,
+ uidata, len - inline_len);
+ if (err) {
+ err = -EFAULT;
+ goto out_free;
+ }
+ }
+ }
+ ename[len] = 0;
+ if (p9_is_proto_dotu(c)) {
+ /* For dotu we also have error code */
+ err = p9pdu_readf(req->rc,
+ c->proto_version, "d", &ecode);
+ if (err)
+ goto out_free;
+ err = -ecode;
+ }
+ if (!err || !IS_ERR_VALUE(err)) {
+ err = p9_errstr2errno(ename, strlen(ename));
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
+ }
+ kfree(ename);
+ } else {
+ err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
+ err = -ecode;
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ }
+ return err;
+
+out_free:
+ kfree(ename);
+out_err:
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+ return err;
+}
+
static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
@@ -579,23 +663,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
return 0;
}
-/**
- * p9_client_rpc - issue a request and wait for a response
- * @c: client session
- * @type: type of request
- * @fmt: protocol format string (see protocol.c)
- *
- * Returns request structure (which client must free using p9_free_req)
- */
-
-static struct p9_req_t *
-p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
+ int8_t type, int req_size,
+ const char *fmt, va_list ap)
{
- va_list ap;
int tag, err;
struct p9_req_t *req;
- unsigned long flags;
- int sigpending;
P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
@@ -607,12 +680,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
return ERR_PTR(-EIO);
- if (signal_pending(current)) {
- sigpending = 1;
- clear_thread_flag(TIF_SIGPENDING);
- } else
- sigpending = 0;
-
tag = P9_NOTAG;
if (type != P9_TVERSION) {
tag = p9_idpool_get(c->tagpool);
@@ -620,18 +687,51 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
return ERR_PTR(-ENOMEM);
}
- req = p9_tag_alloc(c, tag);
+ req = p9_tag_alloc(c, tag, req_size);
if (IS_ERR(req))
return req;
/* marshall the data */
p9pdu_prepare(req->tc, tag, type);
- va_start(ap, fmt);
err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
- va_end(ap);
if (err)
goto reterr;
- p9pdu_finalize(req->tc);
+ p9pdu_finalize(c, req->tc);
+ trace_9p_client_req(c, type, tag);
+ return req;
+reterr:
+ p9_free_req(c, req);
+ return ERR_PTR(err);
+}
+
+/**
+ * p9_client_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+
+static struct p9_req_t *
+p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+{
+ va_list ap;
+ int sigpending, err;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ va_start(ap, fmt);
+ req = p9_client_prepare_req(c, type, c->msize, fmt, ap);
+ va_end(ap);
+ if (IS_ERR(req))
+ return req;
+
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ } else
+ sigpending = 0;
err = c->trans_mod->request(c, req);
if (err < 0) {
@@ -639,18 +739,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
c->status = Disconnected;
goto reterr;
}
-
- P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
+ /* Wait for the response */
err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
- P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n",
- req->wq, tag, err);
+ req->status >= REQ_STATUS_RCVD);
if (req->status == REQ_STATUS_ERROR) {
P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
-
if ((err == -ERESTARTSYS) && (c->status == Connected)) {
P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
@@ -663,25 +759,102 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if (req->status == REQ_STATUS_RCVD)
err = 0;
}
-
if (sigpending) {
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
-
if (err < 0)
goto reterr;
err = p9_check_errors(c, req);
- if (!err) {
- P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type);
+ trace_9p_client_res(c, type, req->rc->tag, err);
+ if (!err)
+ return req;
+reterr:
+ p9_free_req(c, req);
+ return ERR_PTR(err);
+}
+
+/**
+ * p9_client_zc_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @uidata: user bffer that should be ued for zero copy read
+ * @uodata: user buffer that shoud be user for zero copy write
+ * @inlen: read buffer size
+ * @olen: write buffer size
+ * @hdrlen: reader header size, This is the size of response protocol data
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ char *uidata, char *uodata,
+ int inlen, int olen, int in_hdrlen,
+ int kern_buf, const char *fmt, ...)
+{
+ va_list ap;
+ int sigpending, err;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ va_start(ap, fmt);
+ /*
+ * We allocate a inline protocol data of only 4k bytes.
+ * The actual content is passed in zero-copy fashion.
+ */
+ req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap);
+ va_end(ap);
+ if (IS_ERR(req))
return req;
+
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ } else
+ sigpending = 0;
+
+ /* If we are called with KERNEL_DS force kern_buf */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ kern_buf = 1;
+
+ err = c->trans_mod->zc_request(c, req, uidata, uodata,
+ inlen, olen, in_hdrlen, kern_buf);
+ if (err < 0) {
+ if (err == -EIO)
+ c->status = Disconnected;
+ goto reterr;
+ }
+ if (req->status == REQ_STATUS_ERROR) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ err = req->t_err;
+ }
+ if ((err == -ERESTARTSYS) && (c->status == Connected)) {
+ P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+
+ if (c->trans_mod->cancel(c, req))
+ p9_client_flush(c, req);
+
+ /* if we received the response anyway, don't signal error */
+ if (req->status == REQ_STATUS_RCVD)
+ err = 0;
+ }
+ if (sigpending) {
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
+ if (err < 0)
+ goto reterr;
+ err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
+ trace_9p_client_res(c, type, req->rc->tag, err);
+ if (!err)
+ return req;
reterr:
- P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
- err);
p9_free_req(c, req);
return ERR_PTR(err);
}
@@ -769,7 +942,7 @@ static int p9_client_version(struct p9_client *c)
err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
if (err) {
P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(c, req->rc);
goto error;
}
@@ -906,15 +1079,14 @@ EXPORT_SYMBOL(p9_client_begin_disconnect);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
char *uname, u32 n_uname, char *aname)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_fid *fid;
struct p9_qid qid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
- afid ? afid->fid : -1, uname, aname);
- err = 0;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
+ afid ? afid->fid : -1, uname, aname);
fid = p9_fid_create(clnt);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
@@ -931,7 +1103,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -991,7 +1163,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto clunk_fid;
}
@@ -1058,7 +1230,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1101,7 +1273,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1146,7 +1318,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1185,7 +1357,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1330,13 +1502,15 @@ int
p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
u32 count)
{
- int err, rsize;
- struct p9_client *clnt;
- struct p9_req_t *req;
char *dataptr;
+ int kernel_buf = 0;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+ int err, rsize, non_zc = 0;
+
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", fid->fid,
- (long long unsigned) offset, count);
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ fid->fid, (long long unsigned) offset, count);
err = 0;
clnt = fid->clnt;
@@ -1348,13 +1522,24 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
rsize = count;
/* Don't bother zerocopy for small IO (< 1024) */
- if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
- req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
- rsize, data, udata);
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ char *indata;
+ if (data) {
+ kernel_buf = 1;
+ indata = data;
+ } else
+ indata = (char *)udata;
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
+ 11, kernel_buf, "dqd", fid->fid,
+ offset, rsize);
} else {
+ non_zc = 1;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
- rsize);
+ rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1363,14 +1548,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
- P9_DUMP_PKT(1, req->rc);
- if (!req->tc->pbuf_size) {
+ if (non_zc) {
if (data) {
memmove(data, dataptr, count);
} else {
@@ -1396,6 +1580,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count)
{
int err, rsize;
+ int kernel_buf = 0;
struct p9_client *clnt;
struct p9_req_t *req;
@@ -1411,19 +1596,24 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
if (count < rsize)
rsize = count;
- /* Don't bother zerocopy form small IO (< 1024) */
- if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
- req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset,
- rsize, data, udata);
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ char *odata;
+ if (data) {
+ kernel_buf = 1;
+ odata = data;
+ } else
+ odata = (char *)udata;
+ req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
+ P9_ZC_HDR_SZ, kernel_buf, "dqd",
+ fid->fid, offset, rsize);
} else {
-
if (data)
req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
- offset, rsize, data);
+ offset, rsize, data);
else
req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
- offset, rsize, udata);
+ offset, rsize, udata);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1432,7 +1622,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1472,7 +1662,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1523,7 +1713,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1671,7 +1861,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
&sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
&sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1778,7 +1968,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
}
err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto clunk_fid;
}
@@ -1824,7 +2014,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
- int err, rsize;
+ int err, rsize, non_zc = 0;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
@@ -1842,13 +2032,18 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
if (count < rsize)
rsize = count;
- if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) {
- req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid,
- offset, rsize, data);
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
+ 11, 1, "dqd", fid->fid, offset, rsize);
} else {
+ non_zc = 1;
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
- offset, rsize);
+ offset, rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1857,13 +2052,13 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
- if (!req->tc->pbuf_size && data)
+ if (non_zc)
memmove(data, dataptr, count);
p9_free_req(clnt, req);
@@ -1894,7 +2089,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
@@ -1925,7 +2120,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
@@ -1960,7 +2155,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
@@ -1993,7 +2188,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
&glock->start, &glock->length, &glock->proc_id,
&glock->client_id);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
@@ -2021,7 +2216,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index df58375ea6b..55e10a96c90 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -37,40 +37,11 @@
#include <net/9p/client.h>
#include "protocol.h"
+#include <trace/events/9p.h>
+
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
-#ifdef CONFIG_NET_9P_DEBUG
-void
-p9pdu_dump(int way, struct p9_fcall *pdu)
-{
- int len = pdu->size;
-
- if ((p9_debug_level & P9_DEBUG_VPKT) != P9_DEBUG_VPKT) {
- if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) {
- if (len > 32)
- len = 32;
- } else {
- /* shouldn't happen */
- return;
- }
- }
-
- if (way)
- print_hex_dump_bytes("[9P] ", DUMP_PREFIX_OFFSET, pdu->sdata,
- len);
- else
- print_hex_dump_bytes("]9P[ ", DUMP_PREFIX_OFFSET, pdu->sdata,
- len);
-}
-#else
-void
-p9pdu_dump(int way, struct p9_fcall *pdu)
-{
-}
-#endif
-EXPORT_SYMBOL(p9pdu_dump);
-
void p9stat_free(struct p9_wstat *stbuf)
{
kfree(stbuf->name);
@@ -81,7 +52,7 @@ void p9stat_free(struct p9_wstat *stbuf)
}
EXPORT_SYMBOL(p9stat_free);
-static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
+size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
{
size_t len = min(pdu->size - pdu->offset, size);
memcpy(data, &pdu->sdata[pdu->offset], len);
@@ -108,26 +79,6 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
return size - len;
}
-static size_t
-pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
- size_t size)
-{
- BUG_ON(pdu->size > P9_IOHDRSZ);
- pdu->pubuf = (char __user *)udata;
- pdu->pkbuf = (char *)kdata;
- pdu->pbuf_size = size;
- return 0;
-}
-
-static size_t
-pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
-{
- BUG_ON(pdu->size > P9_READDIRHDRSZ);
- pdu->pkbuf = (char *)kdata;
- pdu->pbuf_size = size;
- return 0;
-}
-
/*
b - int8_t
w - int16_t
@@ -459,26 +410,6 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
errcode = -EFAULT;
}
break;
- case 'E':{
- int32_t cnt = va_arg(ap, int32_t);
- const char *k = va_arg(ap, const void *);
- const char __user *u = va_arg(ap,
- const void __user *);
- errcode = p9pdu_writef(pdu, proto_version, "d",
- cnt);
- if (!errcode && pdu_write_urw(pdu, k, u, cnt))
- errcode = -EFAULT;
- }
- break;
- case 'F':{
- int32_t cnt = va_arg(ap, int32_t);
- const char *k = va_arg(ap, const void *);
- errcode = p9pdu_writef(pdu, proto_version, "d",
- cnt);
- if (!errcode && pdu_write_readdir(pdu, k, cnt))
- errcode = -EFAULT;
- }
- break;
case 'U':{
int32_t count = va_arg(ap, int32_t);
const char __user *udata =
@@ -591,7 +522,7 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
return ret;
}
-int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
+int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
{
struct p9_fcall fake_pdu;
int ret;
@@ -601,10 +532,10 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
- ret = p9pdu_readf(&fake_pdu, proto_version, "S", st);
+ ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
if (ret) {
P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
- P9_DUMP_PKT(0, &fake_pdu);
+ trace_9p_protocol_dump(clnt, &fake_pdu);
}
return ret;
@@ -617,7 +548,7 @@ int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
-int p9pdu_finalize(struct p9_fcall *pdu)
+int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
{
int size = pdu->size;
int err;
@@ -626,7 +557,7 @@ int p9pdu_finalize(struct p9_fcall *pdu)
err = p9pdu_writef(pdu, 0, "d", size);
pdu->size = size;
- P9_DUMP_PKT(0, pdu);
+ trace_9p_protocol_dump(clnt, pdu);
P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
pdu->id, pdu->tag);
@@ -637,14 +568,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
- pdu->private = NULL;
- pdu->pubuf = NULL;
- pdu->pkbuf = NULL;
- pdu->pbuf_size = 0;
}
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version)
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent)
{
struct p9_fcall fake_pdu;
int ret;
@@ -655,11 +582,11 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
- ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid,
- &dirent->d_off, &dirent->d_type, &nameptr);
+ ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
+ &dirent->d_off, &dirent->d_type, &nameptr);
if (ret) {
P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
- P9_DUMP_PKT(1, &fake_pdu);
+ trace_9p_protocol_dump(clnt, &fake_pdu);
goto out;
}
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index 2431c0f38d5..2cc525fa49f 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -29,6 +29,6 @@ int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap);
int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
-int p9pdu_finalize(struct p9_fcall *pdu);
-void p9pdu_dump(int, struct p9_fcall *);
+int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu);
void p9pdu_reset(struct p9_fcall *pdu);
+size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9a70ebdec56..de8df957867 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -21,30 +21,25 @@
/**
* p9_release_req_pages - Release pages after the transaction.
- * @*private: PDU's private page of struct trans_rpage_info
*/
-void
-p9_release_req_pages(struct trans_rpage_info *rpinfo)
+void p9_release_pages(struct page **pages, int nr_pages)
{
int i = 0;
-
- while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) {
- put_page(rpinfo->rp_data[i]);
+ while (pages[i] && nr_pages--) {
+ put_page(pages[i]);
i++;
}
}
-EXPORT_SYMBOL(p9_release_req_pages);
+EXPORT_SYMBOL(p9_release_pages);
/**
* p9_nr_pages - Return number of pages needed to accommodate the payload.
*/
-int
-p9_nr_pages(struct p9_req_t *req)
+int p9_nr_pages(char *data, int len)
{
unsigned long start_page, end_page;
- start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT;
- end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start_page = (unsigned long)data >> PAGE_SHIFT;
+ end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
return end_page - start_page;
}
EXPORT_SYMBOL(p9_nr_pages);
@@ -58,35 +53,17 @@ EXPORT_SYMBOL(p9_nr_pages);
* @nr_pages: number of pages to accommodate the payload
* @rw: Indicates if the pages are for read or write.
*/
-int
-p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
- int nr_pages, u8 rw)
-{
- uint32_t first_page_bytes = 0;
- int32_t pdata_mapped_pages;
- struct trans_rpage_info *rpinfo;
-
- *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
- if (*pdata_off)
- first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
- req->tc->pbuf_size);
+int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
+{
+ int nr_mapped_pages;
- rpinfo = req->tc->private;
- pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
- nr_pages, rw, &rpinfo->rp_data[0]);
- if (pdata_mapped_pages <= 0)
- return pdata_mapped_pages;
+ nr_mapped_pages = get_user_pages_fast((unsigned long)data,
+ *nr_pages, write, pages);
+ if (nr_mapped_pages <= 0)
+ return nr_mapped_pages;
- rpinfo->rp_nr_pages = pdata_mapped_pages;
- if (*pdata_off) {
- *pdata_len = first_page_bytes;
- *pdata_len += min((req->tc->pbuf_size - *pdata_len),
- ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
- } else {
- *pdata_len = min(req->tc->pbuf_size,
- (size_t)pdata_mapped_pages << PAGE_SHIFT);
- }
+ *nr_pages = nr_mapped_pages;
return 0;
}
EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 76309223bb0..173bb550a9e 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -12,21 +12,6 @@
*
*/
-/* TRUE if it is user context */
-#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
-
-/**
- * struct trans_rpage_info - To store mapped page information in PDU.
- * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
- * @rp_nr_pages: Number of mapped pages
- * @rp_data: Array of page pointers
- */
-struct trans_rpage_info {
- u8 rp_alloc;
- int rp_nr_pages;
- struct page *rp_data[0];
-};
-
-void p9_release_req_pages(struct trans_rpage_info *);
-int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
-int p9_nr_pages(struct p9_req_t *);
+void p9_release_pages(struct page **, int);
+int p9_payload_gup(char *, int *, struct page **, int);
+int p9_nr_pages(char *, int);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e317583fcc7..32aa9834229 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -150,12 +150,10 @@ static void req_done(struct virtqueue *vq)
while (1) {
spin_lock_irqsave(&chan->lock, flags);
rc = virtqueue_get_buf(chan->vq, &len);
-
if (rc == NULL) {
spin_unlock_irqrestore(&chan->lock, flags);
break;
}
-
chan->ring_bufs_avail = 1;
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
@@ -163,17 +161,6 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
- if (req->tc->private) {
- struct trans_rpage_info *rp = req->tc->private;
- int p = rp->rp_nr_pages;
- /*Release pages */
- p9_release_req_pages(rp);
- atomic_sub(p, &vp_pinned);
- wake_up(&vp_wq);
- if (rp->rp_alloc)
- kfree(rp);
- req->tc->private = NULL;
- }
req->status = REQ_STATUS_RCVD;
p9_client_cb(chan->client, req);
}
@@ -193,9 +180,8 @@ static void req_done(struct virtqueue *vq)
*
*/
-static int
-pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
- int count)
+static int pack_sg_list(struct scatterlist *sg, int start,
+ int limit, char *data, int count)
{
int s;
int index = start;
@@ -224,31 +210,36 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
- * @pdata_off: Offset into the first page
* @**pdata: a list of pages to add into sg.
+ * @nr_pages: number of pages to pack into the scatter/gather list
+ * @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*/
static int
-pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
- struct page **pdata, int count)
+pack_sg_list_p(struct scatterlist *sg, int start, int limit,
+ struct page **pdata, int nr_pages, char *data, int count)
{
- int s;
- int i = 0;
+ int i = 0, s;
+ int data_off;
int index = start;
- if (pdata_off) {
- s = min((int)(PAGE_SIZE - pdata_off), count);
- sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
- count -= s;
- }
-
- while (count) {
- BUG_ON(index > limit);
- s = min((int)PAGE_SIZE, count);
- sg_set_page(&sg[index++], pdata[i++], s, 0);
+ BUG_ON(nr_pages > (limit - start));
+ /*
+ * if the first page doesn't start at
+ * page boundary find the offset
+ */
+ data_off = offset_in_page(data);
+ while (nr_pages) {
+ s = rest_of_page(data);
+ if (s > count)
+ s = count;
+ sg_set_page(&sg[index++], pdata[i++], s, data_off);
+ data_off = 0;
+ data += s;
count -= s;
+ nr_pages--;
}
- return index-start;
+ return index - start;
}
/**
@@ -261,114 +252,166 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
- int in, out, inp, outp;
- struct virtio_chan *chan = client->trans;
+ int err;
+ int in, out;
unsigned long flags;
- size_t pdata_off = 0;
- struct trans_rpage_info *rpinfo = NULL;
- int err, pdata_len = 0;
+ struct virtio_chan *chan = client->trans;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req->status = REQ_STATUS_SENT;
+req_retry:
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Handle out VirtIO ring buffers */
+ out = pack_sg_list(chan->sg, 0,
+ VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
- if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
- int nr_pages = p9_nr_pages(req);
- int rpinfo_size = sizeof(struct trans_rpage_info) +
- sizeof(struct page *) * nr_pages;
+ in = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
- if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
- err = wait_event_interruptible(vp_wq,
- atomic_read(&vp_pinned) < chan->p9_max_pages);
+ err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ chan->ring_bufs_avail = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ err = wait_event_interruptible(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
- }
- if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
- /* We can use sdata */
- req->tc->private = req->tc->sdata + req->tc->size;
- rpinfo = (struct trans_rpage_info *)req->tc->private;
- rpinfo->rp_alloc = 0;
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ goto req_retry;
} else {
- req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
- if (!req->tc->private) {
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
- "private kmalloc returned NULL");
- return -ENOMEM;
- }
- rpinfo = (struct trans_rpage_info *)req->tc->private;
- rpinfo->rp_alloc = 1;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ return -EIO;
}
+ }
+ virtqueue_kick(chan->vq);
+ spin_unlock_irqrestore(&chan->lock, flags);
- err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
- req->tc->id == P9_TREAD ? 1 : 0);
- if (err < 0) {
- if (rpinfo->rp_alloc)
- kfree(rpinfo);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+ return 0;
+}
+
+static int p9_get_mapped_pages(struct virtio_chan *chan,
+ struct page **pages, char *data,
+ int nr_pages, int write, int kern_buf)
+{
+ int err;
+ if (!kern_buf) {
+ /*
+ * We allow only p9_max_pages pinned. We wait for the
+ * Other zc request to finish here
+ */
+ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
+ err = wait_event_interruptible(vp_wq,
+ (atomic_read(&vp_pinned) < chan->p9_max_pages));
+ if (err == -ERESTARTSYS)
+ return err;
+ }
+ err = p9_payload_gup(data, &nr_pages, pages, write);
+ if (err < 0)
return err;
- } else {
- atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
+ atomic_add(nr_pages, &vp_pinned);
+ } else {
+ /* kernel buffer, no need to pin pages */
+ int s, index = 0;
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+ pages[index++] = virt_to_page(data);
+ data += s;
+ nr_pages--;
}
+ nr_pages = count;
}
+ return nr_pages;
+}
-req_retry_pinned:
- spin_lock_irqsave(&chan->lock, flags);
+/**
+ * p9_virtio_zc_request - issue a zero copy request
+ * @client: client instance issuing the request
+ * @req: request to be issued
+ * @uidata: user bffer that should be ued for zero copy read
+ * @uodata: user buffer that shoud be user for zero copy write
+ * @inlen: read buffer size
+ * @olen: write buffer size
+ * @hdrlen: reader header size, This is the size of response protocol data
+ *
+ */
+static int
+p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
+ char *uidata, char *uodata, int inlen,
+ int outlen, int in_hdr_len, int kern_buf)
+{
+ int in, out, err;
+ unsigned long flags;
+ int in_nr_pages = 0, out_nr_pages = 0;
+ struct page **in_pages = NULL, **out_pages = NULL;
+ struct virtio_chan *chan = client->trans;
- /* Handle out VirtIO ring buffers */
- out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
- req->tc->size);
-
- if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
- /* We have additional write payload buffer to take care */
- if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
- outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
- pdata_off, rpinfo->rp_data, pdata_len);
- } else {
- char *pbuf;
- if (req->tc->pubuf)
- pbuf = (__force char *) req->tc->pubuf;
- else
- pbuf = req->tc->pkbuf;
- outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
- req->tc->pbuf_size);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+
+ if (uodata) {
+ out_nr_pages = p9_nr_pages(uodata, outlen);
+ out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
+ GFP_NOFS);
+ if (!out_pages) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
+ out_nr_pages, 0, kern_buf);
+ if (out_nr_pages < 0) {
+ err = out_nr_pages;
+ kfree(out_pages);
+ out_pages = NULL;
+ goto err_out;
}
- out += outp;
}
-
- /* Handle in VirtIO ring buffers */
- if (req->tc->pbuf_size &&
- ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
- /*
- * Take care of additional Read payload.
- * 11 is the read/write header = PDU Header(7) + IO Size (4).
- * Arrange in such a way that server places header in the
- * alloced memory and payload onto the user buffer.
- */
- inp = pack_sg_list(chan->sg, out,
- VIRTQUEUE_NUM, req->rc->sdata, 11);
- /*
- * Running executables in the filesystem may result in
- * a read request with kernel buffer as opposed to user buffer.
- */
- if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
- in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
- pdata_off, rpinfo->rp_data, pdata_len);
- } else {
- char *pbuf;
- if (req->tc->pubuf)
- pbuf = (__force char *) req->tc->pubuf;
- else
- pbuf = req->tc->pkbuf;
-
- in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
- pbuf, req->tc->pbuf_size);
+ if (uidata) {
+ in_nr_pages = p9_nr_pages(uidata, inlen);
+ in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
+ GFP_NOFS);
+ if (!in_pages) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
+ in_nr_pages, 1, kern_buf);
+ if (in_nr_pages < 0) {
+ err = in_nr_pages;
+ kfree(in_pages);
+ in_pages = NULL;
+ goto err_out;
}
- in += inp;
- } else {
- in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
- req->rc->sdata, req->rc->capacity);
}
+ req->status = REQ_STATUS_SENT;
+req_retry_pinned:
+ spin_lock_irqsave(&chan->lock, flags);
+ /* out data */
+ out = pack_sg_list(chan->sg, 0,
+ VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
+
+ if (out_pages)
+ out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
+ out_pages, out_nr_pages, uodata, outlen);
+ /*
+ * Take care of in data
+ * For example TREAD have 11.
+ * 11 is the read/write header = PDU Header(7) + IO Size (4).
+ * Arrange in such a way that server places header in the
+ * alloced memory and payload onto the user buffer.
+ */
+ in = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
+ if (in_pages)
+ in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
+ in_pages, in_nr_pages, uidata, inlen);
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
if (err < 0) {
@@ -376,28 +419,45 @@ req_retry_pinned:
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
- return err;
+ goto err_out;
P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: "
- "virtio rpc add_buf returned failure");
- if (rpinfo && rpinfo->rp_alloc)
- kfree(rpinfo);
- return -EIO;
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ err = -EIO;
+ goto err_out;
}
}
-
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
-
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
- return 0;
+ err = wait_event_interruptible(*req->wq,
+ req->status >= REQ_STATUS_RCVD);
+ /*
+ * Non kernel buffers are pinned, unpin them
+ */
+err_out:
+ if (!kern_buf) {
+ if (in_pages) {
+ p9_release_pages(in_pages, in_nr_pages);
+ atomic_sub(in_nr_pages, &vp_pinned);
+ }
+ if (out_pages) {
+ p9_release_pages(out_pages, out_nr_pages);
+ atomic_sub(out_nr_pages, &vp_pinned);
+ }
+ /* wakeup anybody waiting for slots to pin pages */
+ wake_up(&vp_wq);
+ }
+ kfree(in_pages);
+ kfree(out_pages);
+ return err;
}
static ssize_t p9_mount_tag_show(struct device *dev,
@@ -591,8 +651,8 @@ static struct p9_trans_module p9_virtio_trans = {
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
+ .zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
-
/*
* We leave one entry for input and one entry for response
* headers. We also skip one more entry to accomodate, address
@@ -600,7 +660,6 @@ static struct p9_trans_module p9_virtio_trans = {
* page in zero copy.
*/
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
- .pref = P9_TRANS_PREF_PAYLOAD_SEP,
.def = 0,
.owner = THIS_MODULE,
};
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index b1fe7c35e8d..bfa9ab93eda 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -951,13 +951,12 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
/* checksum stuff in frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
-
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 215c9fad7cd..f1964caa0f8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = {
.ndo_start_xmit = lec_start_xmit,
.ndo_change_mtu = lec_change_mtu,
.ndo_tx_timeout = lec_tx_timeout,
- .ndo_set_multicast_list = lec_set_multicast_list,
+ .ndo_set_rx_mode = lec_set_multicast_list,
};
static const unsigned char lec_ctrl_magic[] = {
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 2de93d00631..ce686116649 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,8 +19,8 @@
#
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-y += aggregation.o
batman-adv-y += bat_debugfs.o
+batman-adv-y += bat_iv_ogm.o
batman-adv-y += bat_sysfs.o
batman-adv-y += bitarray.o
batman-adv-y += gateway_client.o
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
deleted file mode 100644
index 69467fe71ff..00000000000
--- a/net/batman-adv/aggregation.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "translation-table.h"
-#include "aggregation.h"
-#include "send.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-/* return true if new_packet can be aggregated with forw_packet */
-static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
- struct bat_priv *bat_priv,
- int packet_len,
- unsigned long send_time,
- bool directlink,
- const struct hard_iface *if_incoming,
- const struct forw_packet *forw_packet)
-{
- struct batman_packet *batman_packet =
- (struct batman_packet *)forw_packet->skb->data;
- int aggregated_bytes = forw_packet->packet_len + packet_len;
- struct hard_iface *primary_if = NULL;
- bool res = false;
-
- /**
- * we can aggregate the current packet to this aggregated packet
- * if:
- *
- * - the send time is within our MAX_AGGREGATION_MS time
- * - the resulting packet wont be bigger than
- * MAX_AGGREGATION_BYTES
- */
-
- if (time_before(send_time, forw_packet->send_time) &&
- time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
- forw_packet->send_time) &&
- (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
-
- /**
- * check aggregation compatibility
- * -> direct link packets are broadcasted on
- * their interface only
- * -> aggregate packet if the current packet is
- * a "global" packet as well as the base
- * packet
- */
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* packets without direct link flag and high TTL
- * are flooded through the net */
- if ((!directlink) &&
- (!(batman_packet->flags & DIRECTLINK)) &&
- (batman_packet->ttl != 1) &&
-
- /* own packets originating non-primary
- * interfaces leave only that interface */
- ((!forw_packet->own) ||
- (forw_packet->if_incoming == primary_if))) {
- res = true;
- goto out;
- }
-
- /* if the incoming packet is sent via this one
- * interface only - we still can aggregate */
- if ((directlink) &&
- (new_batman_packet->ttl == 1) &&
- (forw_packet->if_incoming == if_incoming) &&
-
- /* packets from direct neighbors or
- * own secondary interface packets
- * (= secondary interface packets in general) */
- (batman_packet->flags & DIRECTLINK ||
- (forw_packet->own &&
- forw_packet->if_incoming != primary_if))) {
- res = true;
- goto out;
- }
- }
-
-out:
- if (primary_if)
- hardif_free_ref(primary_if);
- return res;
-}
-
-/* create a new aggregated packet and add this packet to it */
-static void new_aggregated_packet(const unsigned char *packet_buff,
- int packet_len, unsigned long send_time,
- bool direct_link,
- struct hard_iface *if_incoming,
- int own_packet)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct forw_packet *forw_packet_aggr;
- unsigned char *skb_buff;
-
- if (!atomic_inc_not_zero(&if_incoming->refcount))
- return;
-
- /* own packet should always be scheduled */
- if (!own_packet) {
- if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "batman packet queue full\n");
- goto out;
- }
- }
-
- forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
- if (!forw_packet_aggr) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- goto out;
- }
-
- if ((atomic_read(&bat_priv->aggregated_ogms)) &&
- (packet_len < MAX_AGGREGATION_BYTES))
- forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
- sizeof(struct ethhdr));
- else
- forw_packet_aggr->skb = dev_alloc_skb(packet_len +
- sizeof(struct ethhdr));
-
- if (!forw_packet_aggr->skb) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- kfree(forw_packet_aggr);
- goto out;
- }
- skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
-
- INIT_HLIST_NODE(&forw_packet_aggr->list);
-
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- forw_packet_aggr->packet_len = packet_len;
- memcpy(skb_buff, packet_buff, packet_len);
-
- forw_packet_aggr->own = own_packet;
- forw_packet_aggr->if_incoming = if_incoming;
- forw_packet_aggr->num_packets = 0;
- forw_packet_aggr->direct_link_flags = NO_FLAGS;
- forw_packet_aggr->send_time = send_time;
-
- /* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |= 1;
-
- /* add new packet to packet list */
- spin_lock_bh(&bat_priv->forw_bat_list_lock);
- hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
-
- /* start timer for this packet */
- INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
- send_outstanding_bat_packet);
- queue_delayed_work(bat_event_workqueue,
- &forw_packet_aggr->delayed_work,
- send_time - jiffies);
-
- return;
-out:
- hardif_free_ref(if_incoming);
-}
-
-/* aggregate a new packet into the existing aggregation */
-static void aggregate(struct forw_packet *forw_packet_aggr,
- const unsigned char *packet_buff, int packet_len,
- bool direct_link)
-{
- unsigned char *skb_buff;
-
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- memcpy(skb_buff, packet_buff, packet_len);
- forw_packet_aggr->packet_len += packet_len;
- forw_packet_aggr->num_packets++;
-
- /* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |=
- (1 << forw_packet_aggr->num_packets);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming, int own_packet,
- unsigned long send_time)
-{
- /**
- * _aggr -> pointer to the packet we want to aggregate with
- * _pos -> pointer to the position in the queue
- */
- struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
- struct hlist_node *tmp_node;
- struct batman_packet *batman_packet =
- (struct batman_packet *)packet_buff;
- bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
-
- /* find position for the packet in the forward queue */
- spin_lock_bh(&bat_priv->forw_bat_list_lock);
- /* own packets are not to be aggregated */
- if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
- hlist_for_each_entry(forw_packet_pos, tmp_node,
- &bat_priv->forw_bat_list, list) {
- if (can_aggregate_with(batman_packet,
- bat_priv,
- packet_len,
- send_time,
- direct_link,
- if_incoming,
- forw_packet_pos)) {
- forw_packet_aggr = forw_packet_pos;
- break;
- }
- }
- }
-
- /* nothing to aggregate with - either aggregation disabled or no
- * suitable aggregation packet found */
- if (!forw_packet_aggr) {
- /* the following section can run without the lock */
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
-
- /**
- * if we could not aggregate this packet with one of the others
- * we hold it back for a while, so that it might be aggregated
- * later on
- */
- if ((!own_packet) &&
- (atomic_read(&bat_priv->aggregated_ogms)))
- send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
-
- new_aggregated_packet(packet_buff, packet_len,
- send_time, direct_link,
- if_incoming, own_packet);
- } else {
- aggregate(forw_packet_aggr,
- packet_buff, packet_len,
- direct_link);
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- }
-}
-
-/* unpack the aggregated packets and process them one by one */
-void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming)
-{
- struct batman_packet *batman_packet;
- int buff_pos = 0;
- unsigned char *tt_buff;
-
- batman_packet = (struct batman_packet *)packet_buff;
-
- do {
- /* network to host order for our 32bit seqno and the
- orig_interval */
- batman_packet->seqno = ntohl(batman_packet->seqno);
- batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
-
- tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
-
- receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
-
- buff_pos += BAT_PACKET_LEN +
- tt_len(batman_packet->tt_num_changes);
-
- batman_packet = (struct batman_packet *)
- (packet_buff + buff_pos);
- } while (aggregated_packet(buff_pos, packet_len,
- batman_packet->tt_num_changes));
-}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
deleted file mode 100644
index 216337bb841..00000000000
--- a/net/batman-adv/aggregation.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
-#define _NET_BATMAN_ADV_AGGREGATION_H_
-
-#include "main.h"
-
-/* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len,
- int tt_num_changes)
-{
- int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
- sizeof(struct tt_change));
-
- return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= MAX_AGGREGATION_BYTES);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming, int own_packet,
- unsigned long send_time);
-void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming);
-
-#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
new file mode 100644
index 00000000000..3512e251545
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "bat_ogm.h"
+#include "translation-table.h"
+#include "ring_buffer.h"
+#include "originator.h"
+#include "routing.h"
+#include "gateway_common.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "send.h"
+
+void bat_ogm_init(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ hard_iface->packet_len = BATMAN_OGM_LEN;
+ hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batman_ogm_packet->packet_type = BAT_OGM;
+ batman_ogm_packet->version = COMPAT_VERSION;
+ batman_ogm_packet->flags = NO_FLAGS;
+ batman_ogm_packet->ttl = 2;
+ batman_ogm_packet->tq = TQ_MAX_VALUE;
+ batman_ogm_packet->tt_num_changes = 0;
+ batman_ogm_packet->ttvn = 0;
+}
+
+void bat_ogm_init_primary(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
+ batman_ogm_packet->ttl = TTL;
+}
+
+void bat_ogm_update_mac(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ memcpy(batman_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr, ETH_ALEN);
+ memcpy(batman_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr, ETH_ALEN);
+}
+
+/* when do we schedule our own ogm to be sent */
+static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
+{
+ return jiffies + msecs_to_jiffies(
+ atomic_read(&bat_priv->orig_interval) -
+ JITTER + (random32() % 2*JITTER));
+}
+
+/* when do we schedule a ogm packet to be sent */
+static unsigned long bat_ogm_fwd_send_time(void)
+{
+ return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
+}
+
+/* apply hop penalty for a normal link */
+static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
+{
+ int hop_penalty = atomic_read(&bat_priv->hop_penalty);
+ return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
+}
+
+/* is there another aggregated packet here? */
+static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
+ int tt_num_changes)
+{
+ int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= MAX_AGGREGATION_BYTES);
+}
+
+/* send a batman ogm to a given interface */
+static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
+ struct hard_iface *hard_iface)
+{
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ char *fwd_str;
+ uint8_t packet_num;
+ int16_t buff_pos;
+ struct batman_ogm_packet *batman_ogm_packet;
+ struct sk_buff *skb;
+
+ if (hard_iface->if_status != IF_ACTIVE)
+ return;
+
+ packet_num = 0;
+ buff_pos = 0;
+ batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+
+ /* adjust all flags and log packets */
+ while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+ batman_ogm_packet->tt_num_changes)) {
+
+ /* we might have aggregated direct link packets with an
+ * ordinary base packet */
+ if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
+ (forw_packet->if_incoming == hard_iface))
+ batman_ogm_packet->flags |= DIRECTLINK;
+ else
+ batman_ogm_packet->flags &= ~DIRECTLINK;
+
+ fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
+ "Sending own" :
+ "Forwarding"));
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
+ " IDF %s, ttvn %d) on interface %s [%pM]\n",
+ fwd_str, (packet_num > 0 ? "aggregated " : ""),
+ batman_ogm_packet->orig,
+ ntohl(batman_ogm_packet->seqno),
+ batman_ogm_packet->tq, batman_ogm_packet->ttl,
+ (batman_ogm_packet->flags & DIRECTLINK ?
+ "on" : "off"),
+ batman_ogm_packet->ttvn, hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ buff_pos += BATMAN_OGM_LEN +
+ tt_len(batman_ogm_packet->tt_num_changes);
+ packet_num++;
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (forw_packet->skb->data + buff_pos);
+ }
+
+ /* create clone because function is called more than once */
+ skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
+ if (skb)
+ send_skb_packet(skb, hard_iface, broadcast_addr);
+}
+
+/* send a batman ogm packet */
+void bat_ogm_emit(struct forw_packet *forw_packet)
+{
+ struct hard_iface *hard_iface;
+ struct net_device *soft_iface;
+ struct bat_priv *bat_priv;
+ struct hard_iface *primary_if = NULL;
+ struct batman_ogm_packet *batman_ogm_packet;
+ unsigned char directlink;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (forw_packet->skb->data);
+ directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+
+ if (!forw_packet->if_incoming) {
+ pr_err("Error - can't forward packet: incoming iface not "
+ "specified\n");
+ goto out;
+ }
+
+ soft_iface = forw_packet->if_incoming->soft_iface;
+ bat_priv = netdev_priv(soft_iface);
+
+ if (forw_packet->if_incoming->if_status != IF_ACTIVE)
+ goto out;
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* multihomed peer assumed */
+ /* non-primary OGMs are only broadcasted on their interface */
+ if ((directlink && (batman_ogm_packet->ttl == 1)) ||
+ (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
+
+ /* FIXME: what about aggregated packets ? */
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "%s packet (originator %pM, seqno %d, TTL %d) "
+ "on interface %s [%pM]\n",
+ (forw_packet->own ? "Sending own" : "Forwarding"),
+ batman_ogm_packet->orig,
+ ntohl(batman_ogm_packet->seqno),
+ batman_ogm_packet->ttl,
+ forw_packet->if_incoming->net_dev->name,
+ forw_packet->if_incoming->net_dev->dev_addr);
+
+ /* skb is only used once and than forw_packet is free'd */
+ send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
+ broadcast_addr);
+ forw_packet->skb = NULL;
+
+ goto out;
+ }
+
+ /* broadcast on every interface */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ bat_ogm_send_to_if(forw_packet, hard_iface);
+ }
+ rcu_read_unlock();
+
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+}
+
+/* return true if new_packet can be aggregated with forw_packet */
+static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
+ *new_batman_ogm_packet,
+ struct bat_priv *bat_priv,
+ int packet_len, unsigned long send_time,
+ bool directlink,
+ const struct hard_iface *if_incoming,
+ const struct forw_packet *forw_packet)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+ int aggregated_bytes = forw_packet->packet_len + packet_len;
+ struct hard_iface *primary_if = NULL;
+ bool res = false;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+
+ /**
+ * we can aggregate the current packet to this aggregated packet
+ * if:
+ *
+ * - the send time is within our MAX_AGGREGATION_MS time
+ * - the resulting packet wont be bigger than
+ * MAX_AGGREGATION_BYTES
+ */
+
+ if (time_before(send_time, forw_packet->send_time) &&
+ time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
+ forw_packet->send_time) &&
+ (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
+
+ /**
+ * check aggregation compatibility
+ * -> direct link packets are broadcasted on
+ * their interface only
+ * -> aggregate packet if the current packet is
+ * a "global" packet as well as the base
+ * packet
+ */
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* packets without direct link flag and high TTL
+ * are flooded through the net */
+ if ((!directlink) &&
+ (!(batman_ogm_packet->flags & DIRECTLINK)) &&
+ (batman_ogm_packet->ttl != 1) &&
+
+ /* own packets originating non-primary
+ * interfaces leave only that interface */
+ ((!forw_packet->own) ||
+ (forw_packet->if_incoming == primary_if))) {
+ res = true;
+ goto out;
+ }
+
+ /* if the incoming packet is sent via this one
+ * interface only - we still can aggregate */
+ if ((directlink) &&
+ (new_batman_ogm_packet->ttl == 1) &&
+ (forw_packet->if_incoming == if_incoming) &&
+
+ /* packets from direct neighbors or
+ * own secondary interface packets
+ * (= secondary interface packets in general) */
+ (batman_ogm_packet->flags & DIRECTLINK ||
+ (forw_packet->own &&
+ forw_packet->if_incoming != primary_if))) {
+ res = true;
+ goto out;
+ }
+ }
+
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+ return res;
+}
+
+/* create a new aggregated packet and add this packet to it */
+static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
+ int packet_len, unsigned long send_time,
+ bool direct_link,
+ struct hard_iface *if_incoming,
+ int own_packet)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct forw_packet *forw_packet_aggr;
+ unsigned char *skb_buff;
+
+ if (!atomic_inc_not_zero(&if_incoming->refcount))
+ return;
+
+ /* own packet should always be scheduled */
+ if (!own_packet) {
+ if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "batman packet queue full\n");
+ goto out;
+ }
+ }
+
+ forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
+ if (!forw_packet_aggr) {
+ if (!own_packet)
+ atomic_inc(&bat_priv->batman_queue_left);
+ goto out;
+ }
+
+ if ((atomic_read(&bat_priv->aggregated_ogms)) &&
+ (packet_len < MAX_AGGREGATION_BYTES))
+ forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
+ sizeof(struct ethhdr));
+ else
+ forw_packet_aggr->skb = dev_alloc_skb(packet_len +
+ sizeof(struct ethhdr));
+
+ if (!forw_packet_aggr->skb) {
+ if (!own_packet)
+ atomic_inc(&bat_priv->batman_queue_left);
+ kfree(forw_packet_aggr);
+ goto out;
+ }
+ skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
+
+ INIT_HLIST_NODE(&forw_packet_aggr->list);
+
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ forw_packet_aggr->packet_len = packet_len;
+ memcpy(skb_buff, packet_buff, packet_len);
+
+ forw_packet_aggr->own = own_packet;
+ forw_packet_aggr->if_incoming = if_incoming;
+ forw_packet_aggr->num_packets = 0;
+ forw_packet_aggr->direct_link_flags = NO_FLAGS;
+ forw_packet_aggr->send_time = send_time;
+
+ /* save packet direct link flag status */
+ if (direct_link)
+ forw_packet_aggr->direct_link_flags |= 1;
+
+ /* add new packet to packet list */
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ /* start timer for this packet */
+ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
+ send_outstanding_bat_ogm_packet);
+ queue_delayed_work(bat_event_workqueue,
+ &forw_packet_aggr->delayed_work,
+ send_time - jiffies);
+
+ return;
+out:
+ hardif_free_ref(if_incoming);
+}
+
+/* aggregate a new packet into the existing ogm packet */
+static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
+ const unsigned char *packet_buff,
+ int packet_len, bool direct_link)
+{
+ unsigned char *skb_buff;
+
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ memcpy(skb_buff, packet_buff, packet_len);
+ forw_packet_aggr->packet_len += packet_len;
+ forw_packet_aggr->num_packets++;
+
+ /* save packet direct link flag status */
+ if (direct_link)
+ forw_packet_aggr->direct_link_flags |=
+ (1 << forw_packet_aggr->num_packets);
+}
+
+static void bat_ogm_queue_add(struct bat_priv *bat_priv,
+ unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming,
+ int own_packet, unsigned long send_time)
+{
+ /**
+ * _aggr -> pointer to the packet we want to aggregate with
+ * _pos -> pointer to the position in the queue
+ */
+ struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
+ struct hlist_node *tmp_node;
+ struct batman_ogm_packet *batman_ogm_packet;
+ bool direct_link;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
+ direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
+
+ /* find position for the packet in the forward queue */
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ /* own packets are not to be aggregated */
+ if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
+ hlist_for_each_entry(forw_packet_pos, tmp_node,
+ &bat_priv->forw_bat_list, list) {
+ if (bat_ogm_can_aggregate(batman_ogm_packet,
+ bat_priv, packet_len,
+ send_time, direct_link,
+ if_incoming,
+ forw_packet_pos)) {
+ forw_packet_aggr = forw_packet_pos;
+ break;
+ }
+ }
+ }
+
+ /* nothing to aggregate with - either aggregation disabled or no
+ * suitable aggregation packet found */
+ if (!forw_packet_aggr) {
+ /* the following section can run without the lock */
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ /**
+ * if we could not aggregate this packet with one of the others
+ * we hold it back for a while, so that it might be aggregated
+ * later on
+ */
+ if ((!own_packet) &&
+ (atomic_read(&bat_priv->aggregated_ogms)))
+ send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
+
+ bat_ogm_aggregate_new(packet_buff, packet_len,
+ send_time, direct_link,
+ if_incoming, own_packet);
+ } else {
+ bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len,
+ direct_link);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+ }
+}
+
+static void bat_ogm_forward(struct orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ struct batman_ogm_packet *batman_ogm_packet,
+ int directlink, struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct neigh_node *router;
+ uint8_t in_tq, in_ttl, tq_avg = 0;
+ uint8_t tt_num_changes;
+
+ if (batman_ogm_packet->ttl <= 1) {
+ bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ return;
+ }
+
+ router = orig_node_get_router(orig_node);
+
+ in_tq = batman_ogm_packet->tq;
+ in_ttl = batman_ogm_packet->ttl;
+ tt_num_changes = batman_ogm_packet->tt_num_changes;
+
+ batman_ogm_packet->ttl--;
+ memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
+
+ /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
+ * of our best tq value */
+ if (router && router->tq_avg != 0) {
+
+ /* rebroadcast ogm of best ranking neighbor as is */
+ if (!compare_eth(router->addr, ethhdr->h_source)) {
+ batman_ogm_packet->tq = router->tq_avg;
+
+ if (router->last_ttl)
+ batman_ogm_packet->ttl = router->last_ttl - 1;
+ }
+
+ tq_avg = router->tq_avg;
+ }
+
+ if (router)
+ neigh_node_free_ref(router);
+
+ /* apply hop penalty */
+ batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Forwarding packet: tq_orig: %i, tq_avg: %i, "
+ "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
+ in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
+ batman_ogm_packet->ttl);
+
+ batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
+ batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
+
+ /* switch of primaries first hop flag when forwarding */
+ batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
+ if (directlink)
+ batman_ogm_packet->flags |= DIRECTLINK;
+ else
+ batman_ogm_packet->flags &= ~DIRECTLINK;
+
+ bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
+ BATMAN_OGM_LEN + tt_len(tt_num_changes),
+ if_incoming, 0, bat_ogm_fwd_send_time());
+}
+
+void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
+{
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batman_ogm_packet *batman_ogm_packet;
+ struct hard_iface *primary_if;
+ int vis_server;
+
+ vis_server = atomic_read(&bat_priv->vis_mode);
+ primary_if = primary_if_get_selected(bat_priv);
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+
+ /* change sequence number to network order */
+ batman_ogm_packet->seqno =
+ htonl((uint32_t)atomic_read(&hard_iface->seqno));
+
+ batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
+ batman_ogm_packet->tt_crc = htons((uint16_t)
+ atomic_read(&bat_priv->tt_crc));
+ if (tt_num_changes >= 0)
+ batman_ogm_packet->tt_num_changes = tt_num_changes;
+
+ if (vis_server == VIS_TYPE_SERVER_SYNC)
+ batman_ogm_packet->flags |= VIS_SERVER;
+ else
+ batman_ogm_packet->flags &= ~VIS_SERVER;
+
+ if ((hard_iface == primary_if) &&
+ (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
+ batman_ogm_packet->gw_flags =
+ (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+ else
+ batman_ogm_packet->gw_flags = NO_FLAGS;
+
+ atomic_inc(&hard_iface->seqno);
+
+ slide_own_bcast_window(hard_iface);
+ bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+ hard_iface->packet_len, hard_iface, 1,
+ bat_ogm_emit_send_time(bat_priv));
+
+ if (primary_if)
+ hardif_free_ref(primary_if);
+}
+
+static void bat_ogm_orig_update(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ const struct batman_ogm_packet
+ *batman_ogm_packet,
+ struct hard_iface *if_incoming,
+ const unsigned char *tt_buff, int is_duplicate)
+{
+ struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+ struct neigh_node *router = NULL;
+ struct orig_node *orig_node_tmp;
+ struct hlist_node *node;
+ uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+
+ bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
+ "Searching and updating originator entry of received packet\n");
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ &orig_node->neigh_list, list) {
+ if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
+ (tmp_neigh_node->if_incoming == if_incoming) &&
+ atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ neigh_node = tmp_neigh_node;
+ continue;
+ }
+
+ if (is_duplicate)
+ continue;
+
+ spin_lock_bh(&tmp_neigh_node->tq_lock);
+ ring_buffer_set(tmp_neigh_node->tq_recv,
+ &tmp_neigh_node->tq_index, 0);
+ tmp_neigh_node->tq_avg =
+ ring_buffer_avg(tmp_neigh_node->tq_recv);
+ spin_unlock_bh(&tmp_neigh_node->tq_lock);
+ }
+
+ if (!neigh_node) {
+ struct orig_node *orig_tmp;
+
+ orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
+ if (!orig_tmp)
+ goto unlock;
+
+ neigh_node = create_neighbor(orig_node, orig_tmp,
+ ethhdr->h_source, if_incoming);
+
+ orig_node_free_ref(orig_tmp);
+ if (!neigh_node)
+ goto unlock;
+ } else
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Updating existing last-hop neighbor of originator\n");
+
+ rcu_read_unlock();
+
+ orig_node->flags = batman_ogm_packet->flags;
+ neigh_node->last_valid = jiffies;
+
+ spin_lock_bh(&neigh_node->tq_lock);
+ ring_buffer_set(neigh_node->tq_recv,
+ &neigh_node->tq_index,
+ batman_ogm_packet->tq);
+ neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
+ spin_unlock_bh(&neigh_node->tq_lock);
+
+ if (!is_duplicate) {
+ orig_node->last_ttl = batman_ogm_packet->ttl;
+ neigh_node->last_ttl = batman_ogm_packet->ttl;
+ }
+
+ bonding_candidate_add(orig_node, neigh_node);
+
+ /* if this neighbor already is our next hop there is nothing
+ * to change */
+ router = orig_node_get_router(orig_node);
+ if (router == neigh_node)
+ goto update_tt;
+
+ /* if this neighbor does not offer a better TQ we won't consider it */
+ if (router && (router->tq_avg > neigh_node->tq_avg))
+ goto update_tt;
+
+ /* if the TQ is the same and the link not more symmetric we
+ * won't consider it either */
+ if (router && (neigh_node->tq_avg == router->tq_avg)) {
+ orig_node_tmp = router->orig_node;
+ spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+ bcast_own_sum_orig =
+ orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+ spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+ orig_node_tmp = neigh_node->orig_node;
+ spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+ bcast_own_sum_neigh =
+ orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+ spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+ if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+ goto update_tt;
+ }
+
+ update_route(bat_priv, orig_node, neigh_node);
+
+update_tt:
+ /* I have to check for transtable changes only if the OGM has been
+ * sent through a primary interface */
+ if (((batman_ogm_packet->orig != ethhdr->h_source) &&
+ (batman_ogm_packet->ttl > 2)) ||
+ (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
+ tt_update_orig(bat_priv, orig_node, tt_buff,
+ batman_ogm_packet->tt_num_changes,
+ batman_ogm_packet->ttvn,
+ batman_ogm_packet->tt_crc);
+
+ if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
+ gw_node_update(bat_priv, orig_node,
+ batman_ogm_packet->gw_flags);
+
+ orig_node->gw_flags = batman_ogm_packet->gw_flags;
+
+ /* restart gateway selection if fast or late switching was enabled */
+ if ((orig_node->gw_flags) &&
+ (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
+ (atomic_read(&bat_priv->gw_sel_class) > 2))
+ gw_check_election(bat_priv, orig_node);
+
+ goto out;
+
+unlock:
+ rcu_read_unlock();
+out:
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ if (router)
+ neigh_node_free_ref(router);
+}
+
+static int bat_ogm_calc_tq(struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ struct batman_ogm_packet *batman_ogm_packet,
+ struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
+ struct hlist_node *node;
+ uint8_t total_count;
+ uint8_t orig_eq_count, neigh_rq_count, tq_own;
+ int tq_asym_penalty, ret = 0;
+
+ /* find corresponding one hop neighbor */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ &orig_neigh_node->neigh_list, list) {
+
+ if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
+ continue;
+
+ if (tmp_neigh_node->if_incoming != if_incoming)
+ continue;
+
+ if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+ continue;
+
+ neigh_node = tmp_neigh_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!neigh_node)
+ neigh_node = create_neighbor(orig_neigh_node,
+ orig_neigh_node,
+ orig_neigh_node->orig,
+ if_incoming);
+
+ if (!neigh_node)
+ goto out;
+
+ /* if orig_node is direct neighbor update neigh_node last_valid */
+ if (orig_node == orig_neigh_node)
+ neigh_node->last_valid = jiffies;
+
+ orig_node->last_valid = jiffies;
+
+ /* find packet count of corresponding one hop neighbor */
+ spin_lock_bh(&orig_node->ogm_cnt_lock);
+ orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
+ neigh_rq_count = neigh_node->real_packet_count;
+ spin_unlock_bh(&orig_node->ogm_cnt_lock);
+
+ /* pay attention to not get a value bigger than 100 % */
+ total_count = (orig_eq_count > neigh_rq_count ?
+ neigh_rq_count : orig_eq_count);
+
+ /* if we have too few packets (too less data) we set tq_own to zero */
+ /* if we receive too few packets it is not considered bidirectional */
+ if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
+ (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
+ tq_own = 0;
+ else
+ /* neigh_node->real_packet_count is never zero as we
+ * only purge old information when getting new
+ * information */
+ tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
+
+ /*
+ * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
+ * affect the nearly-symmetric links only a little, but
+ * punishes asymmetric links more. This will give a value
+ * between 0 and TQ_MAX_VALUE
+ */
+ tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
+ (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+ (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+ (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
+ (TQ_LOCAL_WINDOW_SIZE *
+ TQ_LOCAL_WINDOW_SIZE *
+ TQ_LOCAL_WINDOW_SIZE);
+
+ batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
+ * tq_asym_penalty) /
+ (TQ_MAX_VALUE * TQ_MAX_VALUE));
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "bidirectional: "
+ "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
+ "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
+ "total tq: %3i\n",
+ orig_node->orig, orig_neigh_node->orig, total_count,
+ neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
+
+ /* if link has the minimum required transmission quality
+ * consider it bidirectional */
+ if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
+ ret = 1;
+
+out:
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ return ret;
+}
+
+/* processes a batman packet for all interfaces, adjusts the sequence number and
+ * finds out whether it is a duplicate.
+ * returns:
+ * 1 the packet is a duplicate
+ * 0 the packet has not yet been received
+ * -1 the packet is old and has been received while the seqno window
+ * was protected. Caller should drop it.
+ */
+static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
+ const struct batman_ogm_packet
+ *batman_ogm_packet,
+ const struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct orig_node *orig_node;
+ struct neigh_node *tmp_neigh_node;
+ struct hlist_node *node;
+ int is_duplicate = 0;
+ int32_t seq_diff;
+ int need_update = 0;
+ int set_mark, ret = -1;
+
+ orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ if (!orig_node)
+ return 0;
+
+ spin_lock_bh(&orig_node->ogm_cnt_lock);
+ seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
+
+ /* signalize caller that the packet is to be dropped. */
+ if (window_protected(bat_priv, seq_diff,
+ &orig_node->batman_seqno_reset))
+ goto out;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ &orig_node->neigh_list, list) {
+
+ is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ batman_ogm_packet->seqno);
+
+ if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
+ (tmp_neigh_node->if_incoming == if_incoming))
+ set_mark = 1;
+ else
+ set_mark = 0;
+
+ /* if the window moved, set the update flag. */
+ need_update |= bit_get_packet(bat_priv,
+ tmp_neigh_node->real_bits,
+ seq_diff, set_mark);
+
+ tmp_neigh_node->real_packet_count =
+ bit_packet_count(tmp_neigh_node->real_bits);
+ }
+ rcu_read_unlock();
+
+ if (need_update) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "updating last_seqno: old %d, new %d\n",
+ orig_node->last_real_seqno, batman_ogm_packet->seqno);
+ orig_node->last_real_seqno = batman_ogm_packet->seqno;
+ }
+
+ ret = is_duplicate;
+
+out:
+ spin_unlock_bh(&orig_node->ogm_cnt_lock);
+ orig_node_free_ref(orig_node);
+ return ret;
+}
+
+static void bat_ogm_process(const struct ethhdr *ethhdr,
+ struct batman_ogm_packet *batman_ogm_packet,
+ const unsigned char *tt_buff,
+ struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct hard_iface *hard_iface;
+ struct orig_node *orig_neigh_node, *orig_node;
+ struct neigh_node *router = NULL, *router_router = NULL;
+ struct neigh_node *orig_neigh_router = NULL;
+ int has_directlink_flag;
+ int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
+ int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+ int is_duplicate;
+ uint32_t if_incoming_seqno;
+
+ /* Silently drop when the batman packet is actually not a
+ * correct packet.
+ *
+ * This might happen if a packet is padded (e.g. Ethernet has a
+ * minimum frame length of 64 byte) and the aggregation interprets
+ * it as an additional length.
+ *
+ * TODO: A more sane solution would be to have a bit in the
+ * batman_ogm_packet to detect whether the packet is the last
+ * packet in an aggregation. Here we expect that the padding
+ * is always zero (or not 0x01)
+ */
+ if (batman_ogm_packet->packet_type != BAT_OGM)
+ return;
+
+ /* could be changed by schedule_own_packet() */
+ if_incoming_seqno = atomic_read(&if_incoming->seqno);
+
+ has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+
+ is_single_hop_neigh = (compare_eth(ethhdr->h_source,
+ batman_ogm_packet->orig) ? 1 : 0);
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
+ "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
+ "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
+ batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
+ batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
+ batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
+ batman_ogm_packet->ttl, batman_ogm_packet->version,
+ has_directlink_flag);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ if (hard_iface->if_status != IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != if_incoming->soft_iface)
+ continue;
+
+ if (compare_eth(ethhdr->h_source,
+ hard_iface->net_dev->dev_addr))
+ is_my_addr = 1;
+
+ if (compare_eth(batman_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr))
+ is_my_orig = 1;
+
+ if (compare_eth(batman_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr))
+ is_my_oldorig = 1;
+
+ if (is_broadcast_ether_addr(ethhdr->h_source))
+ is_broadcast = 1;
+ }
+ rcu_read_unlock();
+
+ if (batman_ogm_packet->version != COMPAT_VERSION) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batman_ogm_packet->version);
+ return;
+ }
+
+ if (is_my_addr) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: received my own broadcast (sender: %pM"
+ ")\n",
+ ethhdr->h_source);
+ return;
+ }
+
+ if (is_broadcast) {
+ bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
+ "ignoring all packets with broadcast source addr (sender: %pM"
+ ")\n", ethhdr->h_source);
+ return;
+ }
+
+ if (is_my_orig) {
+ unsigned long *word;
+ int offset;
+
+ orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
+ if (!orig_neigh_node)
+ return;
+
+ /* neighbor has to indicate direct link and it has to
+ * come via the corresponding interface */
+ /* save packet seqno for bidirectional check */
+ if (has_directlink_flag &&
+ compare_eth(if_incoming->net_dev->dev_addr,
+ batman_ogm_packet->orig)) {
+ offset = if_incoming->if_num * NUM_WORDS;
+
+ spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
+ word = &(orig_neigh_node->bcast_own[offset]);
+ bit_mark(word,
+ if_incoming_seqno -
+ batman_ogm_packet->seqno - 2);
+ orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
+ bit_packet_count(word);
+ spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
+ }
+
+ bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
+ "originator packet from myself (via neighbor)\n");
+ orig_node_free_ref(orig_neigh_node);
+ return;
+ }
+
+ if (is_my_oldorig) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast echos (sender: "
+ "%pM)\n", ethhdr->h_source);
+ return;
+ }
+
+ orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ if (!orig_node)
+ return;
+
+ is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet,
+ if_incoming);
+
+ if (is_duplicate == -1) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: packet within seqno protection time "
+ "(sender: %pM)\n", ethhdr->h_source);
+ goto out;
+ }
+
+ if (batman_ogm_packet->tq == 0) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with tq equal 0\n");
+ goto out;
+ }
+
+ router = orig_node_get_router(orig_node);
+ if (router)
+ router_router = orig_node_get_router(router->orig_node);
+
+ /* avoid temporary routing loops */
+ if (router && router_router &&
+ (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
+ !(compare_eth(batman_ogm_packet->orig,
+ batman_ogm_packet->prev_sender)) &&
+ (compare_eth(router->addr, router_router->addr))) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast packets that "
+ "may make me loop (sender: %pM)\n", ethhdr->h_source);
+ goto out;
+ }
+
+ /* if sender is a direct neighbor the sender mac equals
+ * originator mac */
+ orig_neigh_node = (is_single_hop_neigh ?
+ orig_node :
+ get_orig_node(bat_priv, ethhdr->h_source));
+ if (!orig_neigh_node)
+ goto out;
+
+ orig_neigh_router = orig_node_get_router(orig_neigh_node);
+
+ /* drop packet if sender is not a direct neighbor and if we
+ * don't route towards it */
+ if (!is_single_hop_neigh && (!orig_neigh_router)) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out_neigh;
+ }
+
+ is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node,
+ batman_ogm_packet, if_incoming);
+
+ bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
+
+ /* update ranking if it is not a duplicate or has the same
+ * seqno and similar ttl as the non-duplicate */
+ if (is_bidirectional &&
+ (!is_duplicate ||
+ ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
+ (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl))))
+ bat_ogm_orig_update(bat_priv, orig_node, ethhdr,
+ batman_ogm_packet, if_incoming,
+ tt_buff, is_duplicate);
+
+ /* is single hop (direct) neighbor */
+ if (is_single_hop_neigh) {
+
+ /* mark direct link on incoming interface */
+ bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
+ 1, if_incoming);
+
+ bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
+ "rebroadcast neighbor packet with direct link flag\n");
+ goto out_neigh;
+ }
+
+ /* multihop originator */
+ if (!is_bidirectional) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: not received via bidirectional link\n");
+ goto out_neigh;
+ }
+
+ if (is_duplicate) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: duplicate packet received\n");
+ goto out_neigh;
+ }
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast originator packet\n");
+ bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming);
+
+out_neigh:
+ if ((orig_neigh_node) && (!is_single_hop_neigh))
+ orig_node_free_ref(orig_neigh_node);
+out:
+ if (router)
+ neigh_node_free_ref(router);
+ if (router_router)
+ neigh_node_free_ref(router_router);
+ if (orig_neigh_router)
+ neigh_node_free_ref(orig_neigh_router);
+
+ orig_node_free_ref(orig_node);
+}
+
+void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+ int buff_pos = 0;
+ unsigned char *tt_buff;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
+
+ /* unpack the aggregated packets and process them one by one */
+ do {
+ /* network to host order for our 32bit seqno and the
+ orig_interval */
+ batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
+ batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
+
+ tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
+
+ bat_ogm_process(ethhdr, batman_ogm_packet,
+ tt_buff, if_incoming);
+
+ buff_pos += BATMAN_OGM_LEN +
+ tt_len(batman_ogm_packet->tt_num_changes);
+
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (packet_buff + buff_pos);
+ } while (bat_ogm_aggr_packet(buff_pos, packet_len,
+ batman_ogm_packet->tt_num_changes));
+}
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_ogm.h
new file mode 100644
index 00000000000..69329c107e2
--- /dev/null
+++ b/net/batman-adv/bat_ogm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_OGM_H_
+#define _NET_BATMAN_ADV_OGM_H_
+
+#include "main.h"
+
+void bat_ogm_init(struct hard_iface *hard_iface);
+void bat_ogm_init_primary(struct hard_iface *hard_iface);
+void bat_ogm_update_mac(struct hard_iface *hard_iface);
+void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
+void bat_ogm_emit(struct forw_packet *forw_packet);
+void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming);
+
+#endif /* _NET_BATMAN_ADV_OGM_H_ */
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd15deba60a..b8a7414c357 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -380,6 +380,7 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
+BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
@@ -396,6 +397,7 @@ static struct bat_attribute *mesh_attrs[] = {
&bat_attr_aggregated_ogms,
&bat_attr_bonding,
&bat_attr_fragmentation,
+ &bat_attr_ap_isolation,
&bat_attr_vis_mode,
&bat_attr_gw_mode,
&bat_attr_orig_interval,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index c1f4bfc09cc..0be9ff346fa 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -97,12 +97,12 @@ static void bit_shift(unsigned long *seq_bits, int32_t n)
(seq_bits[i - word_num - 1] >>
(WORD_BIT_SIZE-word_offset));
/* and the upper part of the right half and shift it left to
- * it's position */
+ * its position */
/* for our example that would be: word[0] = 9800 + 0076 =
* 9876 */
}
- /* now for our last word, i==word_num, we only have the it's "left"
- * half. that's the 1000 word in our example.*/
+ /* now for our last word, i==word_num, we only have its "left" half.
+ * that's the 1000 word in our example.*/
seq_bits[i] = (seq_bits[i - word_num] << word_offset);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 056180ef9e1..619fb73b3b7 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -532,14 +532,14 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
/* Access the dhcp option lists. Each entry is made up by:
- * - octect 1: option type
- * - octect 2: option data len (only if type != 255 and 0)
- * - octect 3: option data */
+ * - octet 1: option type
+ * - octet 2: option data len (only if type != 255 and 0)
+ * - octet 3: option data */
while (*p != 255 && !ret) {
- /* p now points to the first octect: option type */
+ /* p now points to the first octet: option type */
if (*p == 53) {
/* type 53 is the message type option.
- * Jump the len octect and go to the data octect */
+ * Jump the len octet and go to the data octet */
if (pkt_len < 2)
goto out;
p += 2;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index db7aacf1e09..7704df468e0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
#include "bat_sysfs.h"
#include "originator.h"
#include "hash.h"
+#include "bat_ogm.h"
#include <linux/if_arp.h>
@@ -131,7 +132,6 @@ static void primary_if_select(struct bat_priv *bat_priv,
struct hard_iface *new_hard_iface)
{
struct hard_iface *curr_hard_iface;
- struct batman_packet *batman_packet;
ASSERT_RTNL();
@@ -147,10 +147,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
if (!new_hard_iface)
return;
- batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
- batman_packet->flags = PRIMARIES_FIRST_HOP;
- batman_packet->ttl = TTL;
-
+ bat_ogm_init_primary(new_hard_iface);
primary_if_update_addr(bat_priv);
}
@@ -162,14 +159,6 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
return false;
}
-static void update_mac_addresses(struct hard_iface *hard_iface)
-{
- memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
- hard_iface->net_dev->dev_addr, ETH_ALEN);
- memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
- hard_iface->net_dev->dev_addr, ETH_ALEN);
-}
-
static void check_known_mac_addr(const struct net_device *net_dev)
{
const struct hard_iface *hard_iface;
@@ -244,12 +233,12 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
bat_priv = netdev_priv(hard_iface->soft_iface);
- update_mac_addresses(hard_iface);
+ bat_ogm_update_mac(hard_iface);
hard_iface->if_status = IF_TO_BE_ACTIVATED;
/**
* the first active interface becomes our primary interface or
- * the next active interface after the old primay interface was removed
+ * the next active interface after the old primary interface was removed
*/
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
@@ -283,7 +272,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
const char *iface_name)
{
struct bat_priv *bat_priv;
- struct batman_packet *batman_packet;
struct net_device *soft_iface;
int ret;
@@ -318,8 +306,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
- hard_iface->packet_len = BAT_PACKET_LEN;
- hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
+
+ bat_ogm_init(hard_iface);
if (!hard_iface->packet_buff) {
bat_err(hard_iface->soft_iface, "Can't add interface packet "
@@ -328,15 +316,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
goto err;
}
- batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
- batman_packet->packet_type = BAT_PACKET;
- batman_packet->version = COMPAT_VERSION;
- batman_packet->flags = NO_FLAGS;
- batman_packet->ttl = 2;
- batman_packet->tq = TQ_MAX_VALUE;
- batman_packet->tt_num_changes = 0;
- batman_packet->ttvn = 0;
-
hard_iface->if_num = bat_priv->num_ifaces;
bat_priv->num_ifaces++;
hard_iface->if_status = IF_INACTIVE;
@@ -381,7 +360,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
hard_iface->net_dev->name);
/* begin scheduling originator messages on that interface */
- schedule_own_packet(hard_iface);
+ schedule_bat_ogm(hard_iface);
out:
return 0;
@@ -455,11 +434,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
dev_hold(net_dev);
hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
- if (!hard_iface) {
- pr_err("Can't add interface (%s): out of memory\n",
- net_dev->name);
+ if (!hard_iface)
goto release_dev;
- }
ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
if (ret)
@@ -551,7 +527,7 @@ static int hard_if_event(struct notifier_block *this,
goto hardif_put;
check_known_mac_addr(hard_iface->net_dev);
- update_mac_addresses(hard_iface);
+ bat_ogm_update_mac(hard_iface);
bat_priv = netdev_priv(hard_iface->soft_iface);
primary_if = primary_if_get_selected(bat_priv);
@@ -573,14 +549,14 @@ out:
return NOTIFY_DONE;
}
-/* receive a packet with the batman ethertype coming on a hard
+/* incoming packets with the batman ethertype received on any active hard
* interface */
static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev)
{
struct bat_priv *bat_priv;
- struct batman_packet *batman_packet;
+ struct batman_ogm_packet *batman_ogm_packet;
struct hard_iface *hard_iface;
int ret;
@@ -612,22 +588,22 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
if (hard_iface->if_status != IF_ACTIVE)
goto err_free;
- batman_packet = (struct batman_packet *)skb->data;
+ batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
- if (batman_packet->version != COMPAT_VERSION) {
+ if (batman_ogm_packet->version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
+ batman_ogm_packet->version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb. */
- switch (batman_packet->packet_type) {
+ switch (batman_ogm_packet->packet_type) {
/* batman originator packet */
- case BAT_PACKET:
- ret = recv_bat_packet(skb, hard_iface);
+ case BAT_OGM:
+ ret = recv_bat_ogm_packet(skb, hard_iface);
break;
/* batman icmp packet */
@@ -681,6 +657,36 @@ err_out:
return NET_RX_DROP;
}
+/* This function returns true if the interface represented by ifindex is a
+ * 802.11 wireless device */
+bool is_wifi_iface(int ifindex)
+{
+ struct net_device *net_device = NULL;
+ bool ret = false;
+
+ if (ifindex == NULL_IFINDEX)
+ goto out;
+
+ net_device = dev_get_by_index(&init_net, ifindex);
+ if (!net_device)
+ goto out;
+
+#ifdef CONFIG_WIRELESS_EXT
+ /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
+ * check for wireless_handlers != NULL */
+ if (net_device->wireless_handlers)
+ ret = true;
+ else
+#endif
+ /* cfg80211 drivers have to set ieee80211_ptr */
+ if (net_device->ieee80211_ptr)
+ ret = true;
+out:
+ if (net_device)
+ dev_put(net_device);
+ return ret;
+}
+
struct notifier_block hard_if_notifier = {
.notifier_call = hard_if_event,
};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 442eacbc9e3..67f78d1a63b 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -42,6 +42,7 @@ void hardif_remove_interfaces(void);
int hardif_min_mtu(struct net_device *soft_iface);
void update_min_mtu(struct net_device *soft_iface);
void hardif_free_rcu(struct rcu_head *rcu);
+bool is_wifi_iface(int ifindex);
static inline void hardif_free_ref(struct hard_iface *hard_iface)
{
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index dd5c9fd7a90..d20aa71ba1e 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -76,19 +76,30 @@ static inline void hash_delete(struct hashtable_t *hash,
hash_destroy(hash);
}
-/* adds data to the hashtable. returns 0 on success, -1 on error */
+/**
+ * hash_add - adds data to the hashtable
+ * @hash: storage hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ * @data_node: to be added element
+ *
+ * Returns 0 on success, 1 if the element already is in the hash
+ * and -1 on error.
+ */
+
static inline int hash_add(struct hashtable_t *hash,
hashdata_compare_cb compare,
hashdata_choose_cb choose,
const void *data, struct hlist_node *data_node)
{
- int index;
+ int index, ret = -1;
struct hlist_head *head;
struct hlist_node *node;
spinlock_t *list_lock; /* spinlock to protect write access */
if (!hash)
- goto err;
+ goto out;
index = choose(data, hash->size);
head = &hash->table[index];
@@ -99,6 +110,7 @@ static inline int hash_add(struct hashtable_t *hash,
if (!compare(node, data))
continue;
+ ret = 1;
goto err_unlock;
}
rcu_read_unlock();
@@ -108,12 +120,13 @@ static inline int hash_add(struct hashtable_t *hash,
hlist_add_head_rcu(data_node, head);
spin_unlock_bh(list_lock);
- return 0;
+ ret = 0;
+ goto out;
err_unlock:
rcu_read_unlock();
-err:
- return -1;
+out:
+ return ret;
}
/* removes data from hash, if found. returns pointer do data on success, so you
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b0f9068ade5..fb87bdc2ce9 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -107,7 +107,7 @@ int mesh_init(struct net_device *soft_iface)
if (tt_init(bat_priv) < 1)
goto err;
- tt_local_add(soft_iface, soft_iface->dev_addr);
+ tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
if (vis_init(bat_priv) < 1)
goto err;
@@ -117,8 +117,6 @@ int mesh_init(struct net_device *soft_iface)
goto end;
err:
- pr_err("Unable to allocate memory for mesh information structures: "
- "out of mem ?\n");
mesh_free(soft_iface);
return -1;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a6df61a6933..964ad4d8ba3 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.3.0"
+#define SOURCE_VERSION "2011.4.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
#define PURGE_TIMEOUT 200
#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
#define TT_CLIENT_ROAM_TIMEOUT 600
-/* sliding packet range of received originator messages in squence numbers
+/* sliding packet range of received originator messages in sequence numbers
* (should be a multiple of our word size) */
#define TQ_LOCAL_WINDOW_SIZE 64
#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
@@ -62,6 +62,8 @@
#define NO_FLAGS 0
+#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
+
#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -133,7 +135,7 @@ enum dbg_level {
#include <linux/mutex.h> /* mutex */
#include <linux/module.h> /* needed by all modules */
#include <linux/netdevice.h> /* netdevice */
-#include <linux/etherdevice.h> /* ethernet address classifaction */
+#include <linux/etherdevice.h> /* ethernet address classification */
#include <linux/if_ether.h> /* ethernet header */
#include <linux/poll.h> /* poll_table */
#include <linux/kthread.h> /* kernel threads */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index f3c3f620d19..0e5b77255d9 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -252,7 +252,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
hash_added = hash_add(bat_priv->orig_hash, compare_orig,
choose_orig, orig_node, &orig_node->hash_entry);
- if (hash_added < 0)
+ if (hash_added != 0)
goto free_bcast_own_sum;
return orig_node;
@@ -336,8 +336,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
} else {
if (purge_orig_neighbors(bat_priv, orig_node,
&best_neigh_node)) {
- update_routes(bat_priv, orig_node,
- best_neigh_node);
+ update_route(bat_priv, orig_node, best_neigh_node);
}
}
@@ -493,10 +492,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
memcpy(data_ptr, orig_node->bcast_own,
(max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
@@ -504,10 +501,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
orig_node->bcast_own = data_ptr;
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
memcpy(data_ptr, orig_node->bcast_own_sum,
(max_if_num - 1) * sizeof(uint8_t));
@@ -562,10 +557,8 @@ static int orig_node_del_if(struct orig_node *orig_node,
chunk_size = sizeof(unsigned long) * NUM_WORDS;
data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
/* copy first part */
memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -583,10 +576,8 @@ free_bcast_own:
goto free_own_sum;
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
memcpy(data_ptr, orig_node->bcast_own_sum,
del_if_num * sizeof(uint8_t));
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b76b4be10b9..4d9e54c57a3 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,14 +25,14 @@
#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
enum bat_packettype {
- BAT_PACKET = 0x01,
- BAT_ICMP = 0x02,
- BAT_UNICAST = 0x03,
- BAT_BCAST = 0x04,
- BAT_VIS = 0x05,
+ BAT_OGM = 0x01,
+ BAT_ICMP = 0x02,
+ BAT_UNICAST = 0x03,
+ BAT_BCAST = 0x04,
+ BAT_VIS = 0x05,
BAT_UNICAST_FRAG = 0x06,
- BAT_TT_QUERY = 0x07,
- BAT_ROAM_ADV = 0x08
+ BAT_TT_QUERY = 0x07,
+ BAT_ROAM_ADV = 0x08
};
/* this file is included by batctl which needs these defines */
@@ -84,12 +84,13 @@ enum tt_query_flags {
enum tt_client_flags {
TT_CLIENT_DEL = 1 << 0,
TT_CLIENT_ROAM = 1 << 1,
+ TT_CLIENT_WIFI = 1 << 2,
TT_CLIENT_NOPURGE = 1 << 8,
TT_CLIENT_NEW = 1 << 9,
TT_CLIENT_PENDING = 1 << 10
};
-struct batman_packet {
+struct batman_ogm_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t ttl;
@@ -104,7 +105,7 @@ struct batman_packet {
uint16_t tt_crc;
} __packed;
-#define BAT_PACKET_LEN sizeof(struct batman_packet)
+#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
struct icmp_packet {
uint8_t packet_type;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 0f32c818874..f961cc5eade 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -22,18 +22,14 @@
#include "main.h"
#include "routing.h"
#include "send.h"
-#include "hash.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
-#include "ring_buffer.h"
#include "vis.h"
-#include "aggregation.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
#include "unicast.h"
+#include "bat_ogm.h"
void slide_own_bcast_window(struct hard_iface *hard_iface)
{
@@ -64,69 +60,9 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
}
}
-static void update_transtable(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const unsigned char *tt_buff,
- uint8_t tt_num_changes, uint8_t ttvn,
- uint16_t tt_crc)
-{
- uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
- bool full_table = true;
-
- /* the ttvn increased by one -> we can apply the attached changes */
- if (ttvn - orig_ttvn == 1) {
- /* the OGM could not contain the changes because they were too
- * many to fit in one frame or because they have already been
- * sent TT_OGM_APPEND_MAX times. In this case send a tt
- * request */
- if (!tt_num_changes) {
- full_table = false;
- goto request_table;
- }
-
- tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
- (struct tt_change *)tt_buff);
-
- /* Even if we received the crc into the OGM, we prefer
- * to recompute it to spot any possible inconsistency
- * in the global table */
- orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
-
- /* The ttvn alone is not enough to guarantee consistency
- * because a single value could repesent different states
- * (due to the wrap around). Thus a node has to check whether
- * the resulting table (after applying the changes) is still
- * consistent or not. E.g. a node could disconnect while its
- * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
- * checking the CRC value is mandatory to detect the
- * inconsistency */
- if (orig_node->tt_crc != tt_crc)
- goto request_table;
-
- /* Roaming phase is over: tables are in sync again. I can
- * unset the flag */
- orig_node->tt_poss_change = false;
- } else {
- /* if we missed more than one change or our tables are not
- * in sync anymore -> request fresh tt data */
- if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
-request_table:
- bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
- "Need to retrieve the correct information "
- "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
- "%u num_changes: %u)\n", orig_node->orig, ttvn,
- orig_ttvn, tt_crc, orig_node->tt_crc,
- tt_num_changes);
- send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
- full_table);
- return;
- }
- }
-}
-
-static void update_route(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+static void _update_route(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ struct neigh_node *neigh_node)
{
struct neigh_node *curr_router;
@@ -170,8 +106,8 @@ static void update_route(struct bat_priv *bat_priv,
neigh_node_free_ref(curr_router);
}
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ struct neigh_node *neigh_node)
{
struct neigh_node *router = NULL;
@@ -181,116 +117,13 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
router = orig_node_get_router(orig_node);
if (router != neigh_node)
- update_route(bat_priv, orig_node, neigh_node);
+ _update_route(bat_priv, orig_node, neigh_node);
out:
if (router)
neigh_node_free_ref(router);
}
-static int is_bidirectional_neigh(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_packet *batman_packet,
- struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
- struct hlist_node *node;
- uint8_t total_count;
- uint8_t orig_eq_count, neigh_rq_count, tq_own;
- int tq_asym_penalty, ret = 0;
-
- /* find corresponding one hop neighbor */
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
- &orig_neigh_node->neigh_list, list) {
-
- if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
- continue;
-
- if (tmp_neigh_node->if_incoming != if_incoming)
- continue;
-
- if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
- continue;
-
- neigh_node = tmp_neigh_node;
- break;
- }
- rcu_read_unlock();
-
- if (!neigh_node)
- neigh_node = create_neighbor(orig_neigh_node,
- orig_neigh_node,
- orig_neigh_node->orig,
- if_incoming);
-
- if (!neigh_node)
- goto out;
-
- /* if orig_node is direct neighbour update neigh_node last_valid */
- if (orig_node == orig_neigh_node)
- neigh_node->last_valid = jiffies;
-
- orig_node->last_valid = jiffies;
-
- /* find packet count of corresponding one hop neighbor */
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
- neigh_rq_count = neigh_node->real_packet_count;
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
- /* pay attention to not get a value bigger than 100 % */
- total_count = (orig_eq_count > neigh_rq_count ?
- neigh_rq_count : orig_eq_count);
-
- /* if we have too few packets (too less data) we set tq_own to zero */
- /* if we receive too few packets it is not considered bidirectional */
- if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
- (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
- tq_own = 0;
- else
- /* neigh_node->real_packet_count is never zero as we
- * only purge old information when getting new
- * information */
- tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
-
- /*
- * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
- * affect the nearly-symmetric links only a little, but
- * punishes asymmetric links more. This will give a value
- * between 0 and TQ_MAX_VALUE
- */
- tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE);
-
- batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
- (TQ_MAX_VALUE * TQ_MAX_VALUE));
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "bidirectional: "
- "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
- "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
- "total tq: %3i\n",
- orig_node->orig, orig_neigh_node->orig, total_count,
- neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
-
- /* if link has the minimum required transmission quality
- * consider it bidirectional */
- if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
- ret = 1;
-
-out:
- if (neigh_node)
- neigh_node_free_ref(neigh_node);
- return ret;
-}
-
/* caller must hold the neigh_list_lock */
void bonding_candidate_del(struct orig_node *orig_node,
struct neigh_node *neigh_node)
@@ -308,8 +141,8 @@ out:
return;
}
-static void bonding_candidate_add(struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void bonding_candidate_add(struct orig_node *orig_node,
+ struct neigh_node *neigh_node)
{
struct hlist_node *node;
struct neigh_node *tmp_neigh_node, *router = NULL;
@@ -379,162 +212,23 @@ out:
}
/* copy primary address for bonding */
-static void bonding_save_primary(const struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const struct batman_packet *batman_packet)
+void bonding_save_primary(const struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ const struct batman_ogm_packet *batman_ogm_packet)
{
- if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
+ if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
return;
memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
}
-static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- const struct batman_packet *batman_packet,
- struct hard_iface *if_incoming,
- const unsigned char *tt_buff, int is_duplicate)
-{
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
- struct neigh_node *router = NULL;
- struct orig_node *orig_node_tmp;
- struct hlist_node *node;
- uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
-
- bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
- "Searching and updating originator entry of received packet\n");
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
- &orig_node->neigh_list, list) {
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming) &&
- atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
- if (neigh_node)
- neigh_node_free_ref(neigh_node);
- neigh_node = tmp_neigh_node;
- continue;
- }
-
- if (is_duplicate)
- continue;
-
- spin_lock_bh(&tmp_neigh_node->tq_lock);
- ring_buffer_set(tmp_neigh_node->tq_recv,
- &tmp_neigh_node->tq_index, 0);
- tmp_neigh_node->tq_avg =
- ring_buffer_avg(tmp_neigh_node->tq_recv);
- spin_unlock_bh(&tmp_neigh_node->tq_lock);
- }
-
- if (!neigh_node) {
- struct orig_node *orig_tmp;
-
- orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
- if (!orig_tmp)
- goto unlock;
-
- neigh_node = create_neighbor(orig_node, orig_tmp,
- ethhdr->h_source, if_incoming);
-
- orig_node_free_ref(orig_tmp);
- if (!neigh_node)
- goto unlock;
- } else
- bat_dbg(DBG_BATMAN, bat_priv,
- "Updating existing last-hop neighbor of originator\n");
-
- rcu_read_unlock();
-
- orig_node->flags = batman_packet->flags;
- neigh_node->last_valid = jiffies;
-
- spin_lock_bh(&neigh_node->tq_lock);
- ring_buffer_set(neigh_node->tq_recv,
- &neigh_node->tq_index,
- batman_packet->tq);
- neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
- spin_unlock_bh(&neigh_node->tq_lock);
-
- if (!is_duplicate) {
- orig_node->last_ttl = batman_packet->ttl;
- neigh_node->last_ttl = batman_packet->ttl;
- }
-
- bonding_candidate_add(orig_node, neigh_node);
-
- /* if this neighbor already is our next hop there is nothing
- * to change */
- router = orig_node_get_router(orig_node);
- if (router == neigh_node)
- goto update_tt;
-
- /* if this neighbor does not offer a better TQ we won't consider it */
- if (router && (router->tq_avg > neigh_node->tq_avg))
- goto update_tt;
-
- /* if the TQ is the same and the link not more symetric we
- * won't consider it either */
- if (router && (neigh_node->tq_avg == router->tq_avg)) {
- orig_node_tmp = router->orig_node;
- spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
- bcast_own_sum_orig =
- orig_node_tmp->bcast_own_sum[if_incoming->if_num];
- spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
-
- orig_node_tmp = neigh_node->orig_node;
- spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
- bcast_own_sum_neigh =
- orig_node_tmp->bcast_own_sum[if_incoming->if_num];
- spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
-
- if (bcast_own_sum_orig >= bcast_own_sum_neigh)
- goto update_tt;
- }
-
- update_routes(bat_priv, orig_node, neigh_node);
-
-update_tt:
- /* I have to check for transtable changes only if the OGM has been
- * sent through a primary interface */
- if (((batman_packet->orig != ethhdr->h_source) &&
- (batman_packet->ttl > 2)) ||
- (batman_packet->flags & PRIMARIES_FIRST_HOP))
- update_transtable(bat_priv, orig_node, tt_buff,
- batman_packet->tt_num_changes,
- batman_packet->ttvn,
- batman_packet->tt_crc);
-
- if (orig_node->gw_flags != batman_packet->gw_flags)
- gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
-
- orig_node->gw_flags = batman_packet->gw_flags;
-
- /* restart gateway selection if fast or late switching was enabled */
- if ((orig_node->gw_flags) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
- (atomic_read(&bat_priv->gw_sel_class) > 2))
- gw_check_election(bat_priv, orig_node);
-
- goto out;
-
-unlock:
- rcu_read_unlock();
-out:
- if (neigh_node)
- neigh_node_free_ref(neigh_node);
- if (router)
- neigh_node_free_ref(router);
-}
-
/* checks whether the host restarted and is in the protection time.
* returns:
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-static int window_protected(struct bat_priv *bat_priv,
- int32_t seq_num_diff,
- unsigned long *last_reset)
+int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset)
{
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
@@ -552,330 +246,12 @@ static int window_protected(struct bat_priv *bat_priv,
return 0;
}
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- * 1 the packet is a duplicate
- * 0 the packet has not yet been received
- * -1 the packet is old and has been received while the seqno window
- * was protected. Caller should drop it.
- */
-static int count_real_packets(const struct ethhdr *ethhdr,
- const struct batman_packet *batman_packet,
- const struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct orig_node *orig_node;
- struct neigh_node *tmp_neigh_node;
- struct hlist_node *node;
- int is_duplicate = 0;
- int32_t seq_diff;
- int need_update = 0;
- int set_mark, ret = -1;
-
- orig_node = get_orig_node(bat_priv, batman_packet->orig);
- if (!orig_node)
- return 0;
-
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
-
- /* signalize caller that the packet is to be dropped. */
- if (window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset))
- goto out;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
- &orig_node->neigh_list, list) {
-
- is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- batman_packet->seqno);
-
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming))
- set_mark = 1;
- else
- set_mark = 0;
-
- /* if the window moved, set the update flag. */
- need_update |= bit_get_packet(bat_priv,
- tmp_neigh_node->real_bits,
- seq_diff, set_mark);
-
- tmp_neigh_node->real_packet_count =
- bit_packet_count(tmp_neigh_node->real_bits);
- }
- rcu_read_unlock();
-
- if (need_update) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "updating last_seqno: old %d, new %d\n",
- orig_node->last_real_seqno, batman_packet->seqno);
- orig_node->last_real_seqno = batman_packet->seqno;
- }
-
- ret = is_duplicate;
-
-out:
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
- orig_node_free_ref(orig_node);
- return ret;
-}
-
-void receive_bat_packet(const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct hard_iface *hard_iface;
- struct orig_node *orig_neigh_node, *orig_node;
- struct neigh_node *router = NULL, *router_router = NULL;
- struct neigh_node *orig_neigh_router = NULL;
- int has_directlink_flag;
- int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
- int is_duplicate;
- uint32_t if_incoming_seqno;
-
- /* Silently drop when the batman packet is actually not a
- * correct packet.
- *
- * This might happen if a packet is padded (e.g. Ethernet has a
- * minimum frame length of 64 byte) and the aggregation interprets
- * it as an additional length.
- *
- * TODO: A more sane solution would be to have a bit in the
- * batman_packet to detect whether the packet is the last
- * packet in an aggregation. Here we expect that the padding
- * is always zero (or not 0x01)
- */
- if (batman_packet->packet_type != BAT_PACKET)
- return;
-
- /* could be changed by schedule_own_packet() */
- if_incoming_seqno = atomic_read(&if_incoming->seqno);
-
- has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
- is_single_hop_neigh = (compare_eth(ethhdr->h_source,
- batman_packet->orig) ? 1 : 0);
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
- "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
- "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
- ethhdr->h_source, if_incoming->net_dev->name,
- if_incoming->net_dev->dev_addr, batman_packet->orig,
- batman_packet->prev_sender, batman_packet->seqno,
- batman_packet->ttvn, batman_packet->tt_crc,
- batman_packet->tt_num_changes, batman_packet->tq,
- batman_packet->ttl, batman_packet->version,
- has_directlink_flag);
-
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->if_status != IF_ACTIVE)
- continue;
-
- if (hard_iface->soft_iface != if_incoming->soft_iface)
- continue;
-
- if (compare_eth(ethhdr->h_source,
- hard_iface->net_dev->dev_addr))
- is_my_addr = 1;
-
- if (compare_eth(batman_packet->orig,
- hard_iface->net_dev->dev_addr))
- is_my_orig = 1;
-
- if (compare_eth(batman_packet->prev_sender,
- hard_iface->net_dev->dev_addr))
- is_my_oldorig = 1;
-
- if (is_broadcast_ether_addr(ethhdr->h_source))
- is_broadcast = 1;
- }
- rcu_read_unlock();
-
- if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
- return;
- }
-
- if (is_my_addr) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: received my own broadcast (sender: %pM"
- ")\n",
- ethhdr->h_source);
- return;
- }
-
- if (is_broadcast) {
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "ignoring all packets with broadcast source addr (sender: %pM"
- ")\n", ethhdr->h_source);
- return;
- }
-
- if (is_my_orig) {
- unsigned long *word;
- int offset;
-
- orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
- if (!orig_neigh_node)
- return;
-
- /* neighbor has to indicate direct link and it has to
- * come via the corresponding interface */
- /* save packet seqno for bidirectional check */
- if (has_directlink_flag &&
- compare_eth(if_incoming->net_dev->dev_addr,
- batman_packet->orig)) {
- offset = if_incoming->if_num * NUM_WORDS;
-
- spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
- word = &(orig_neigh_node->bcast_own[offset]);
- bit_mark(word,
- if_incoming_seqno - batman_packet->seqno - 2);
- orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
- bit_packet_count(word);
- spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
- }
-
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "originator packet from myself (via neighbor)\n");
- orig_node_free_ref(orig_neigh_node);
- return;
- }
-
- if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast echos (sender: "
- "%pM)\n", ethhdr->h_source);
- return;
- }
-
- orig_node = get_orig_node(bat_priv, batman_packet->orig);
- if (!orig_node)
- return;
-
- is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
-
- if (is_duplicate == -1) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: packet within seqno protection time "
- "(sender: %pM)\n", ethhdr->h_source);
- goto out;
- }
-
- if (batman_packet->tq == 0) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet with tq equal 0\n");
- goto out;
- }
-
- router = orig_node_get_router(orig_node);
- if (router)
- router_router = orig_node_get_router(router->orig_node);
-
- /* avoid temporary routing loops */
- if (router && router_router &&
- (compare_eth(router->addr, batman_packet->prev_sender)) &&
- !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
- (compare_eth(router->addr, router_router->addr))) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast packets that "
- "may make me loop (sender: %pM)\n", ethhdr->h_source);
- goto out;
- }
-
- /* if sender is a direct neighbor the sender mac equals
- * originator mac */
- orig_neigh_node = (is_single_hop_neigh ?
- orig_node :
- get_orig_node(bat_priv, ethhdr->h_source));
- if (!orig_neigh_node)
- goto out;
-
- orig_neigh_router = orig_node_get_router(orig_neigh_node);
-
- /* drop packet if sender is not a direct neighbor and if we
- * don't route towards it */
- if (!is_single_hop_neigh && (!orig_neigh_router)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: OGM via unknown neighbor!\n");
- goto out_neigh;
- }
-
- is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
- batman_packet, if_incoming);
-
- bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
-
- /* update ranking if it is not a duplicate or has the same
- * seqno and similar ttl as the non-duplicate */
- if (is_bidirectional &&
- (!is_duplicate ||
- ((orig_node->last_real_seqno == batman_packet->seqno) &&
- (orig_node->last_ttl - 3 <= batman_packet->ttl))))
- update_orig(bat_priv, orig_node, ethhdr, batman_packet,
- if_incoming, tt_buff, is_duplicate);
-
- /* is single hop (direct) neighbor */
- if (is_single_hop_neigh) {
-
- /* mark direct link on incoming interface */
- schedule_forward_packet(orig_node, ethhdr, batman_packet,
- 1, if_incoming);
-
- bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
- "rebroadcast neighbor packet with direct link flag\n");
- goto out_neigh;
- }
-
- /* multihop originator */
- if (!is_bidirectional) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: not received via bidirectional link\n");
- goto out_neigh;
- }
-
- if (is_duplicate) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: duplicate packet received\n");
- goto out_neigh;
- }
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast originator packet\n");
- schedule_forward_packet(orig_node, ethhdr, batman_packet,
- 0, if_incoming);
-
-out_neigh:
- if ((orig_neigh_node) && (!is_single_hop_neigh))
- orig_node_free_ref(orig_neigh_node);
-out:
- if (router)
- neigh_node_free_ref(router);
- if (router_router)
- neigh_node_free_ref(router_router);
- if (orig_neigh_router)
- neigh_node_free_ref(orig_neigh_router);
-
- orig_node_free_ref(orig_node);
-}
-
-int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
+int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
{
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
+ if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -898,10 +274,7 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- receive_aggr_bat_packet(ethhdr,
- skb->data,
- skb_headlen(skb),
- hard_iface);
+ bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
kfree_skb(skb);
return NET_RX_SUCCESS;
@@ -1243,7 +616,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
}
break;
case TT_RESPONSE:
- /* packet needs to be linearised to access the TT changes */
+ /* packet needs to be linearized to access the TT changes */
if (skb_linearize(skb) < 0)
goto out;
@@ -1300,7 +673,7 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
roam_adv_packet->client);
tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
- atomic_read(&orig_node->last_ttvn) + 1, true);
+ atomic_read(&orig_node->last_ttvn) + 1, true, false);
/* Roaming phase starts: I have new information but the ttvn has not
* been incremented yet. This flag will make me check all the incoming
@@ -1536,7 +909,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
ethhdr = (struct ethhdr *)(skb->data +
sizeof(struct unicast_packet));
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
if (!orig_node) {
if (!is_my_client(bat_priv, ethhdr->h_dest))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index fb14e9579b1..7aaee0fb0fd 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,19 +23,15 @@
#define _NET_BATMAN_ADV_ROUTING_H_
void slide_own_bcast_window(struct hard_iface *hard_iface);
-void receive_bat_packet(const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming);
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node);
+void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ struct neigh_node *neigh_node);
int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
@@ -43,5 +39,12 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
const struct hard_iface *recv_if);
void bonding_candidate_del(struct orig_node *orig_node,
struct neigh_node *neigh_node);
+void bonding_candidate_add(struct orig_node *orig_node,
+ struct neigh_node *neigh_node);
+void bonding_save_primary(const struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ const struct batman_ogm_packet *batman_ogm_packet);
+int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 58d14472068..8a684eb738a 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -26,33 +26,12 @@
#include "soft-interface.h"
#include "hard-interface.h"
#include "vis.h"
-#include "aggregation.h"
#include "gateway_common.h"
#include "originator.h"
+#include "bat_ogm.h"
static void send_outstanding_bcast_packet(struct work_struct *work);
-/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
-{
- int hop_penalty = atomic_read(&bat_priv->hop_penalty);
- return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
-}
-
-/* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(const struct bat_priv *bat_priv)
-{
- return jiffies + msecs_to_jiffies(
- atomic_read(&bat_priv->orig_interval) -
- JITTER + (random32() % 2*JITTER));
-}
-
-/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(void)
-{
- return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
-}
-
/* send out an already prepared packet to the given address via the
* specified batman interface */
int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
@@ -99,141 +78,17 @@ send_skb_err:
return NET_XMIT_DROP;
}
-/* Send a packet to a given interface */
-static void send_packet_to_if(struct forw_packet *forw_packet,
- struct hard_iface *hard_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- char *fwd_str;
- uint8_t packet_num;
- int16_t buff_pos;
- struct batman_packet *batman_packet;
- struct sk_buff *skb;
-
- if (hard_iface->if_status != IF_ACTIVE)
- return;
-
- packet_num = 0;
- buff_pos = 0;
- batman_packet = (struct batman_packet *)forw_packet->skb->data;
-
- /* adjust all flags and log packets */
- while (aggregated_packet(buff_pos,
- forw_packet->packet_len,
- batman_packet->tt_num_changes)) {
-
- /* we might have aggregated direct link packets with an
- * ordinary base packet */
- if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
- (forw_packet->if_incoming == hard_iface))
- batman_packet->flags |= DIRECTLINK;
- else
- batman_packet->flags &= ~DIRECTLINK;
-
- fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
- "Sending own" :
- "Forwarding"));
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
- " IDF %s, hvn %d) on interface %s [%pM]\n",
- fwd_str, (packet_num > 0 ? "aggregated " : ""),
- batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->tq, batman_packet->ttl,
- (batman_packet->flags & DIRECTLINK ?
- "on" : "off"),
- batman_packet->ttvn, hard_iface->net_dev->name,
- hard_iface->net_dev->dev_addr);
-
- buff_pos += sizeof(*batman_packet) +
- tt_len(batman_packet->tt_num_changes);
- packet_num++;
- batman_packet = (struct batman_packet *)
- (forw_packet->skb->data + buff_pos);
- }
-
- /* create clone because function is called more than once */
- skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, hard_iface, broadcast_addr);
-}
-
-/* send a batman packet */
-static void send_packet(struct forw_packet *forw_packet)
-{
- struct hard_iface *hard_iface;
- struct net_device *soft_iface;
- struct bat_priv *bat_priv;
- struct hard_iface *primary_if = NULL;
- struct batman_packet *batman_packet =
- (struct batman_packet *)(forw_packet->skb->data);
- int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
- if (!forw_packet->if_incoming) {
- pr_err("Error - can't forward packet: incoming iface not "
- "specified\n");
- goto out;
- }
-
- soft_iface = forw_packet->if_incoming->soft_iface;
- bat_priv = netdev_priv(soft_iface);
-
- if (forw_packet->if_incoming->if_status != IF_ACTIVE)
- goto out;
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* multihomed peer assumed */
- /* non-primary OGMs are only broadcasted on their interface */
- if ((directlink && (batman_packet->ttl == 1)) ||
- (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
-
- /* FIXME: what about aggregated packets ? */
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %d, TTL %d) "
- "on interface %s [%pM]\n",
- (forw_packet->own ? "Sending own" : "Forwarding"),
- batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->ttl,
- forw_packet->if_incoming->net_dev->name,
- forw_packet->if_incoming->net_dev->dev_addr);
-
- /* skb is only used once and than forw_packet is free'd */
- send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
- broadcast_addr);
- forw_packet->skb = NULL;
-
- goto out;
- }
-
- /* broadcast on every interface */
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->soft_iface != soft_iface)
- continue;
-
- send_packet_to_if(forw_packet, hard_iface);
- }
- rcu_read_unlock();
-
-out:
- if (primary_if)
- hardif_free_ref(primary_if);
-}
-
static void realloc_packet_buffer(struct hard_iface *hard_iface,
- int new_len)
+ int new_len)
{
unsigned char *new_buff;
- struct batman_packet *batman_packet;
new_buff = kmalloc(new_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
if (new_buff) {
memcpy(new_buff, hard_iface->packet_buff,
- sizeof(*batman_packet));
+ BATMAN_OGM_LEN);
kfree(hard_iface->packet_buff);
hard_iface->packet_buff = new_buff;
@@ -242,60 +97,48 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
}
/* when calling this function (hard_iface == primary_if) has to be true */
-static void prepare_packet_buffer(struct bat_priv *bat_priv,
+static int prepare_packet_buffer(struct bat_priv *bat_priv,
struct hard_iface *hard_iface)
{
int new_len;
- struct batman_packet *batman_packet;
- new_len = BAT_PACKET_LEN +
+ new_len = BATMAN_OGM_LEN +
tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented */
if (new_len > hard_iface->soft_iface->mtu)
- new_len = BAT_PACKET_LEN;
+ new_len = BATMAN_OGM_LEN;
realloc_packet_buffer(hard_iface, new_len);
- batman_packet = (struct batman_packet *)hard_iface->packet_buff;
atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
/* reset the sending counter */
atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
- batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
- hard_iface->packet_buff + BAT_PACKET_LEN,
- hard_iface->packet_len - BAT_PACKET_LEN);
-
+ return tt_changes_fill_buffer(bat_priv,
+ hard_iface->packet_buff + BATMAN_OGM_LEN,
+ hard_iface->packet_len - BATMAN_OGM_LEN);
}
-static void reset_packet_buffer(struct bat_priv *bat_priv,
- struct hard_iface *hard_iface)
+static int reset_packet_buffer(struct bat_priv *bat_priv,
+ struct hard_iface *hard_iface)
{
- struct batman_packet *batman_packet;
-
- realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
-
- batman_packet = (struct batman_packet *)hard_iface->packet_buff;
- batman_packet->tt_num_changes = 0;
+ realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
+ return 0;
}
-void schedule_own_packet(struct hard_iface *hard_iface)
+void schedule_bat_ogm(struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct hard_iface *primary_if;
- unsigned long send_time;
- struct batman_packet *batman_packet;
- int vis_server;
+ int tt_num_changes = -1;
if ((hard_iface->if_status == IF_NOT_IN_USE) ||
(hard_iface->if_status == IF_TO_BE_REMOVED))
return;
- vis_server = atomic_read(&bat_priv->vis_mode);
- primary_if = primary_if_get_selected(bat_priv);
-
/**
* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
@@ -306,124 +149,26 @@ void schedule_own_packet(struct hard_iface *hard_iface)
if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
hard_iface->if_status = IF_ACTIVE;
+ primary_if = primary_if_get_selected(bat_priv);
+
if (hard_iface == primary_if) {
/* if at least one change happened */
if (atomic_read(&bat_priv->tt_local_changes) > 0) {
tt_commit_changes(bat_priv);
- prepare_packet_buffer(bat_priv, hard_iface);
+ tt_num_changes = prepare_packet_buffer(bat_priv,
+ hard_iface);
}
- /* if the changes have been sent enough times */
+ /* if the changes have been sent often enough */
if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
- reset_packet_buffer(bat_priv, hard_iface);
+ tt_num_changes = reset_packet_buffer(bat_priv,
+ hard_iface);
}
- /**
- * NOTE: packet_buff might just have been re-allocated in
- * prepare_packet_buffer() or in reset_packet_buffer()
- */
- batman_packet = (struct batman_packet *)hard_iface->packet_buff;
-
- /* change sequence number to network order */
- batman_packet->seqno =
- htonl((uint32_t)atomic_read(&hard_iface->seqno));
-
- batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
- batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
-
- if (vis_server == VIS_TYPE_SERVER_SYNC)
- batman_packet->flags |= VIS_SERVER;
- else
- batman_packet->flags &= ~VIS_SERVER;
-
- if ((hard_iface == primary_if) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
- batman_packet->gw_flags =
- (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
- else
- batman_packet->gw_flags = NO_FLAGS;
-
- atomic_inc(&hard_iface->seqno);
-
- slide_own_bcast_window(hard_iface);
- send_time = own_send_time(bat_priv);
- add_bat_packet_to_list(bat_priv,
- hard_iface->packet_buff,
- hard_iface->packet_len,
- hard_iface, 1, send_time);
-
if (primary_if)
hardif_free_ref(primary_if);
-}
-
-void schedule_forward_packet(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- int directlink,
- struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *router;
- uint8_t in_tq, in_ttl, tq_avg = 0;
- unsigned long send_time;
- uint8_t tt_num_changes;
-
- if (batman_packet->ttl <= 1) {
- bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
- return;
- }
-
- router = orig_node_get_router(orig_node);
-
- in_tq = batman_packet->tq;
- in_ttl = batman_packet->ttl;
- tt_num_changes = batman_packet->tt_num_changes;
-
- batman_packet->ttl--;
- memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
-
- /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
- * of our best tq value */
- if (router && router->tq_avg != 0) {
-
- /* rebroadcast ogm of best ranking neighbor as is */
- if (!compare_eth(router->addr, ethhdr->h_source)) {
- batman_packet->tq = router->tq_avg;
-
- if (router->last_ttl)
- batman_packet->ttl = router->last_ttl - 1;
- }
-
- tq_avg = router->tq_avg;
- }
-
- if (router)
- neigh_node_free_ref(router);
-
- /* apply hop penalty */
- batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq_orig: %i, tq_avg: %i, "
- "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
- in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
- batman_packet->ttl);
-
- batman_packet->seqno = htonl(batman_packet->seqno);
- batman_packet->tt_crc = htons(batman_packet->tt_crc);
-
- /* switch of primaries first hop flag when forwarding */
- batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
- if (directlink)
- batman_packet->flags |= DIRECTLINK;
- else
- batman_packet->flags &= ~DIRECTLINK;
- send_time = forward_send_time();
- add_bat_packet_to_list(bat_priv,
- (unsigned char *)batman_packet,
- sizeof(*batman_packet) + tt_len(tt_num_changes),
- if_incoming, 0, send_time);
+ bat_ogm_schedule(hard_iface, tt_num_changes);
}
static void forw_packet_free(struct forw_packet *forw_packet)
@@ -454,7 +199,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
}
/* add a broadcast packet to the queue and setup timers. broadcast packets
- * are sent multiple times to increase probability for beeing received.
+ * are sent multiple times to increase probability for being received.
*
* This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
* errors.
@@ -557,7 +302,7 @@ out:
atomic_inc(&bat_priv->bcast_queue_left);
}
-void send_outstanding_bat_packet(struct work_struct *work)
+void send_outstanding_bat_ogm_packet(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
@@ -573,7 +318,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
- send_packet(forw_packet);
+ bat_ogm_emit(forw_packet);
/**
* we have to have at least one packet in the queue
@@ -581,7 +326,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
* shutting down
*/
if (forw_packet->own)
- schedule_own_packet(forw_packet->if_incoming);
+ schedule_bat_ogm(forw_packet->if_incoming);
out:
/* don't count own packet */
@@ -612,7 +357,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
&bat_priv->forw_bcast_list, list) {
/**
- * if purge_outstanding_packets() was called with an argmument
+ * if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
@@ -641,7 +386,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
&bat_priv->forw_bat_list, list) {
/**
- * if purge_outstanding_packets() was called with an argmument
+ * if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 1f2d1e87766..c8ca3ef7385 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -24,15 +24,10 @@
int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
const uint8_t *dst_addr);
-void schedule_own_packet(struct hard_iface *hard_iface);
-void schedule_forward_packet(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- int directlink,
- struct hard_iface *if_outgoing);
+void schedule_bat_ogm(struct hard_iface *hard_iface);
int add_bcast_packet_to_list(struct bat_priv *bat_priv,
const struct sk_buff *skb, unsigned long delay);
-void send_outstanding_bat_packet(struct work_struct *work);
+void send_outstanding_bat_ogm_packet(struct work_struct *work);
void purge_outstanding_packets(struct bat_priv *bat_priv,
const struct hard_iface *hard_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 05dd35114a2..f9cc9572898 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -445,30 +445,31 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
{
struct bat_priv *bat_priv = netdev_priv(dev);
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct batman_packet *batman_packet;
+ struct batman_ogm_packet *batman_ogm_packet;
struct softif_neigh *softif_neigh = NULL;
struct hard_iface *primary_if = NULL;
struct softif_neigh *curr_softif_neigh = NULL;
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
- batman_packet = (struct batman_packet *)
+ batman_ogm_packet = (struct batman_ogm_packet *)
(skb->data + ETH_HLEN + VLAN_HLEN);
else
- batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN);
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (skb->data + ETH_HLEN);
- if (batman_packet->version != COMPAT_VERSION)
+ if (batman_ogm_packet->version != COMPAT_VERSION)
goto out;
- if (batman_packet->packet_type != BAT_PACKET)
+ if (batman_ogm_packet->packet_type != BAT_OGM)
goto out;
- if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
+ if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
goto out;
- if (is_my_mac(batman_packet->orig))
+ if (is_my_mac(batman_ogm_packet->orig))
goto out;
- softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid);
+ softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
if (!softif_neigh)
goto out;
@@ -532,11 +533,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- /* only modify transtable if it has been initialised before */
+ /* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
tt_local_remove(bat_priv, dev->dev_addr,
"mac address changed", false);
- tt_local_add(dev, addr->sa_data);
+ tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
}
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -595,11 +596,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
goto dropped;
/* Register the client MAC in the transtable */
- tt_local_add(soft_iface, ethhdr->h_source);
+ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ orig_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
- if (do_bcast || (orig_node && orig_node->gw_flags)) {
+ if (do_bcast || (orig_node && orig_node->gw_flags)) {
ret = gw_is_target(bat_priv, skb, orig_node);
if (ret < 0)
@@ -739,6 +741,9 @@ void interface_rx(struct net_device *soft_iface,
soft_iface->last_rx = jiffies;
+ if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+ goto dropped;
+
netif_rx(skb);
goto out;
@@ -796,10 +801,8 @@ struct net_device *softif_create(const char *name)
soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
- if (!soft_iface) {
- pr_err("Unable to allocate the batman interface: %s\n", name);
+ if (!soft_iface)
goto out;
- }
ret = register_netdevice(soft_iface);
if (ret < 0) {
@@ -812,6 +815,7 @@ struct net_device *softif_create(const char *name)
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
+ atomic_set(&bat_priv->ap_isolation, 0);
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
atomic_set(&bat_priv->gw_sel_class, 20);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index fb6931d00cd..873fb3d8e56 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -183,7 +183,8 @@ static int tt_local_init(struct bat_priv *bat_priv)
return 1;
}
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
+void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex)
{
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct tt_local_entry *tt_local_entry = NULL;
@@ -207,6 +208,8 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
memcpy(tt_local_entry->addr, addr, ETH_ALEN);
tt_local_entry->last_seen = jiffies;
tt_local_entry->flags = NO_FLAGS;
+ if (is_wifi_iface(ifindex))
+ tt_local_entry->flags |= TT_CLIENT_WIFI;
atomic_set(&tt_local_entry->refcount, 2);
/* the batman interface mac address should never be purged */
@@ -329,7 +332,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
__hlist_for_each_rcu(node, head)
- buf_size += 21;
+ buf_size += 29;
rcu_read_unlock();
}
@@ -348,8 +351,19 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_local_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 22, " * %pM\n",
- tt_local_entry->addr);
+ pos += snprintf(buff + pos, 30, " * %pM "
+ "[%c%c%c%c%c]\n",
+ tt_local_entry->addr,
+ (tt_local_entry->flags &
+ TT_CLIENT_ROAM ? 'R' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_NOPURGE ? 'P' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_NEW ? 'N' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_PENDING ? 'X' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
@@ -369,8 +383,8 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
tt_local_event(bat_priv, tt_local_entry->addr,
tt_local_entry->flags | flags);
- /* The local client has to be merked as "pending to be removed" but has
- * to be kept in the table in order to send it in an full tables
+ /* The local client has to be marked as "pending to be removed" but has
+ * to be kept in the table in order to send it in a full table
* response issued before the net ttvn increment (consistency check) */
tt_local_entry->flags |= TT_CLIENT_PENDING;
}
@@ -495,7 +509,8 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
/* caller must hold orig_node refcount */
int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
+ const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
+ bool wifi)
{
struct tt_global_entry *tt_global_entry;
struct orig_node *orig_node_tmp;
@@ -537,6 +552,9 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
tt_global_entry->roam_at = 0;
}
+ if (wifi)
+ tt_global_entry->flags |= TT_CLIENT_WIFI;
+
bat_dbg(DBG_TT, bat_priv,
"Creating new global tt entry: %pM (via %pM)\n",
tt_global_entry->addr, orig_node->orig);
@@ -582,8 +600,8 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Globally announced TT entries received via the mesh %s\n",
net_dev->name);
- seq_printf(seq, " %-13s %s %-15s %s\n",
- "Client", "(TTVN)", "Originator", "(Curr TTVN)");
+ seq_printf(seq, " %-13s %s %-15s %s %s\n",
+ "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
@@ -593,7 +611,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
__hlist_for_each_rcu(node, head)
- buf_size += 59;
+ buf_size += 67;
rcu_read_unlock();
}
@@ -612,14 +630,20 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_global_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 61,
- " * %pM (%3u) via %pM (%3u)\n",
- tt_global_entry->addr,
+ pos += snprintf(buff + pos, 69,
+ " * %pM (%3u) via %pM (%3u) "
+ "[%c%c%c]\n", tt_global_entry->addr,
tt_global_entry->ttvn,
tt_global_entry->orig_node->orig,
(uint8_t) atomic_read(
&tt_global_entry->orig_node->
- last_ttvn));
+ last_ttvn),
+ (tt_global_entry->flags &
+ TT_CLIENT_ROAM ? 'R' : '.'),
+ (tt_global_entry->flags &
+ TT_CLIENT_PENDING ? 'X' : '.'),
+ (tt_global_entry->flags &
+ TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
@@ -774,30 +798,56 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
bat_priv->tt_global_hash = NULL;
}
+static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
+ struct tt_global_entry *tt_global_entry)
+{
+ bool ret = false;
+
+ if (tt_local_entry->flags & TT_CLIENT_WIFI &&
+ tt_global_entry->flags & TT_CLIENT_WIFI)
+ ret = true;
+
+ return ret;
+}
+
struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *addr)
+ const uint8_t *src, const uint8_t *addr)
{
- struct tt_global_entry *tt_global_entry;
+ struct tt_local_entry *tt_local_entry = NULL;
+ struct tt_global_entry *tt_global_entry = NULL;
struct orig_node *orig_node = NULL;
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ if (src && atomic_read(&bat_priv->ap_isolation)) {
+ tt_local_entry = tt_local_hash_find(bat_priv, src);
+ if (!tt_local_entry)
+ goto out;
+ }
+ tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
+ /* check whether the clients should not communicate due to AP
+ * isolation */
+ if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
- goto free_tt;
+ goto out;
/* A global client marked as PENDING has already moved from that
* originator */
if (tt_global_entry->flags & TT_CLIENT_PENDING)
- goto free_tt;
+ goto out;
orig_node = tt_global_entry->orig_node;
-free_tt:
- tt_global_entry_free_ref(tt_global_entry);
out:
+ if (tt_global_entry)
+ tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
+
return orig_node;
}
@@ -999,7 +1049,6 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
tt_response = (struct tt_query_packet *)skb_put(skb,
tt_query_size + tt_len);
tt_response->ttvn = ttvn;
- tt_response->tt_data = htons(tt_tot);
tt_change = (struct tt_change *)(skb->data + tt_query_size);
tt_count = 0;
@@ -1025,12 +1074,17 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
}
rcu_read_unlock();
+ /* store in the message the number of entries we have successfully
+ * copied */
+ tt_response->tt_data = htons(tt_count);
+
out:
return skb;
}
-int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
- uint8_t ttvn, uint16_t tt_crc, bool full_table)
+static int send_tt_request(struct bat_priv *bat_priv,
+ struct orig_node *dst_orig_node,
+ uint8_t ttvn, uint16_t tt_crc, bool full_table)
{
struct sk_buff *skb = NULL;
struct tt_query_packet *tt_request;
@@ -1137,12 +1191,12 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
req_ttvn = tt_request->ttvn;
- /* I have not the requested data */
+ /* I don't have the requested data */
if (orig_ttvn != req_ttvn ||
tt_request->tt_data != req_dst_orig_node->tt_crc)
goto out;
- /* If it has explicitly been requested the full table */
+ /* If the full table has been explicitly requested */
if (tt_request->flags & TT_FULL_TABLE ||
!req_dst_orig_node->tt_buff)
full_table = true;
@@ -1363,7 +1417,9 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
(tt_change + i)->flags & TT_CLIENT_ROAM);
else
if (!tt_global_add(bat_priv, orig_node,
- (tt_change + i)->addr, ttvn, false))
+ (tt_change + i)->addr, ttvn, false,
+ (tt_change + i)->flags &
+ TT_CLIENT_WIFI))
/* In case of problem while storing a
* global_entry, we stop the updating
* procedure without committing the
@@ -1403,9 +1459,10 @@ out:
orig_node_free_ref(orig_node);
}
-void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
- struct tt_change *tt_change)
+static void tt_update_changes(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ uint16_t tt_num_changes, uint8_t ttvn,
+ struct tt_change *tt_change)
{
_tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
ttvn);
@@ -1668,6 +1725,8 @@ static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_local_entry, node,
head, hash_entry) {
+ if (!(tt_local_entry->flags & flags))
+ continue;
tt_local_entry->flags &= ~flags;
atomic_inc(&bat_priv->num_local_tt);
}
@@ -1720,3 +1779,90 @@ void tt_commit_changes(struct bat_priv *bat_priv)
atomic_inc(&bat_priv->ttvn);
bat_priv->tt_poss_change = false;
}
+
+bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+{
+ struct tt_local_entry *tt_local_entry = NULL;
+ struct tt_global_entry *tt_global_entry = NULL;
+ bool ret = true;
+
+ if (!atomic_read(&bat_priv->ap_isolation))
+ return false;
+
+ tt_local_entry = tt_local_hash_find(bat_priv, dst);
+ if (!tt_local_entry)
+ goto out;
+
+ tt_global_entry = tt_global_hash_find(bat_priv, src);
+ if (!tt_global_entry)
+ goto out;
+
+ if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
+ ret = false;
+
+out:
+ if (tt_global_entry)
+ tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
+ return ret;
+}
+
+void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc)
+{
+ uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ bool full_table = true;
+
+ /* the ttvn increased by one -> we can apply the attached changes */
+ if (ttvn - orig_ttvn == 1) {
+ /* the OGM could not contain the changes due to their size or
+ * because they have already been sent TT_OGM_APPEND_MAX times.
+ * In this case send a tt request */
+ if (!tt_num_changes) {
+ full_table = false;
+ goto request_table;
+ }
+
+ tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
+ (struct tt_change *)tt_buff);
+
+ /* Even if we received the precomputed crc with the OGM, we
+ * prefer to recompute it to spot any possible inconsistency
+ * in the global table */
+ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+
+ /* The ttvn alone is not enough to guarantee consistency
+ * because a single value could represent different states
+ * (due to the wrap around). Thus a node has to check whether
+ * the resulting table (after applying the changes) is still
+ * consistent or not. E.g. a node could disconnect while its
+ * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
+ * checking the CRC value is mandatory to detect the
+ * inconsistency */
+ if (orig_node->tt_crc != tt_crc)
+ goto request_table;
+
+ /* Roaming phase is over: tables are in sync again. I can
+ * unset the flag */
+ orig_node->tt_poss_change = false;
+ } else {
+ /* if we missed more than one change or our tables are not
+ * in sync anymore -> request fresh tt data */
+ if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
+request_table:
+ bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
+ "Need to retrieve the correct information "
+ "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
+ "%u num_changes: %u)\n", orig_node->orig, ttvn,
+ orig_ttvn, tt_crc, orig_node->tt_crc,
+ tt_num_changes);
+ send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
+ full_table);
+ return;
+ }
+ }
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index d4122cba53b..30efd49881a 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -26,15 +26,16 @@ int tt_len(int changes_num);
int tt_changes_fill_buffer(struct bat_priv *bat_priv,
unsigned char *buff, int buff_len);
int tt_init(struct bat_priv *bat_priv);
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr);
+void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex);
void tt_local_remove(struct bat_priv *bat_priv,
const uint8_t *addr, const char *message, bool roaming);
int tt_local_seq_print_text(struct seq_file *seq, void *offset);
void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, int tt_buff_len);
-int tt_global_add(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const unsigned char *addr,
- uint8_t ttvn, bool roaming);
+int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ const unsigned char *addr, uint8_t ttvn, bool roaming,
+ bool wifi);
int tt_global_seq_print_text(struct seq_file *seq, void *offset);
void tt_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, const char *message);
@@ -42,25 +43,23 @@ void tt_global_del(struct bat_priv *bat_priv,
struct orig_node *orig_node, const unsigned char *addr,
const char *message, bool roaming);
struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *addr);
+ const uint8_t *src, const uint8_t *addr);
void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, uint8_t tt_num_changes);
uint16_t tt_local_crc(struct bat_priv *bat_priv);
uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
void tt_free(struct bat_priv *bat_priv);
-int send_tt_request(struct bat_priv *bat_priv,
- struct orig_node *dst_orig_node, uint8_t hvn,
- uint16_t tt_crc, bool full_table);
bool send_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_request);
-void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
- struct tt_change *tt_change);
bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
void handle_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_response);
void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
struct orig_node *orig_node);
void tt_commit_changes(struct bat_priv *bat_priv);
+bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
+void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 25bd1db3537..1ae35575051 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -57,7 +57,7 @@ struct hard_iface {
* @batman_seqno_reset: time when the batman seqno window was reset
* @gw_flags: flags related to gateway class
* @flags: for now only VIS_SERVER flag
- * @last_real_seqno: last and best known squence number
+ * @last_real_seqno: last and best known sequence number
* @last_ttl: ttl of last received packet
* @last_bcast_seqno: last broadcast sequence number received by this host
*
@@ -146,6 +146,7 @@ struct bat_priv {
atomic_t aggregated_ogms; /* boolean */
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
+ atomic_t ap_isolation; /* boolean */
atomic_t vis_mode; /* VIS_TYPE_* */
atomic_t gw_mode; /* GW_MODE_* */
atomic_t gw_sel_class; /* uint */
@@ -156,7 +157,7 @@ struct bat_priv {
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
- atomic_t ttvn; /* tranlation table version number */
+ atomic_t ttvn; /* translation table version number */
atomic_t tt_ogm_append_cnt;
atomic_t tt_local_changes; /* changes registered in a OGM interval */
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 32b125fb3d3..07d1c1da89d 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -299,8 +299,10 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
goto find_router;
}
- /* check for tt host - increases orig_node refcount */
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ /* check for tt host - increases orig_node refcount.
+ * returns NULL in case of AP isolation */
+ orig_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
find_router:
/**
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 62f54b95462..8fd5535544b 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -24,7 +24,7 @@
#include "packet.h"
-#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
+#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 8a1b98589d7..f81a6b668b0 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -131,7 +131,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
return;
}
- /* its a new address, add it to the list */
+ /* it's a new address, add it to the list */
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
@@ -465,7 +465,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
/* try to add it */
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
info, &info->hash_entry);
- if (hash_added < 0) {
+ if (hash_added != 0) {
/* did not work (for some reason) */
kref_put(&info->refcount, free_info);
info = NULL;
@@ -887,10 +887,8 @@ int vis_init(struct bat_priv *bat_priv)
}
bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
- if (!bat_priv->my_vis_info) {
- pr_err("Can't initialize vis packet\n");
+ if (!bat_priv->my_vis_info)
goto err;
- }
bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
MAX_VIS_PACKET_SIZE +
@@ -920,7 +918,7 @@ int vis_init(struct bat_priv *bat_priv)
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
bat_priv->my_vis_info,
&bat_priv->my_vis_info->hash_entry);
- if (hash_added < 0) {
+ if (hash_added != 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
kref_put(&bat_priv->my_vis_info->refcount, free_info);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 117e0d16178..062124cd89c 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -349,7 +349,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
@@ -361,7 +361,33 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ int skb_len = skb_headlen(skb);
+
+ if (chunk <= skb_len) {
+ __skb_pull(skb, chunk);
+ } else {
+ struct sk_buff *frag;
+
+ __skb_pull(skb, skb_len);
+ chunk -= skb_len;
+
+ skb_walk_frags(skb, frag) {
+ if (chunk <= frag->len) {
+ /* Pulling partial data */
+ skb->len -= chunk;
+ skb->data_len -= chunk;
+ __skb_pull(frag, chunk);
+ break;
+ } else if (frag->len) {
+ /* Pulling all frag data */
+ chunk -= frag->len;
+ skb->len -= frag->len;
+ skb->data_len -= frag->len;
+ __skb_pull(frag, frag->len);
+ }
+ }
+ }
+
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index d9edfe8bf9d..91bcd3a961e 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -492,7 +492,10 @@ static int bnep_session(void *arg)
/* RX */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- bnep_rx_frame(s, skb);
+ if (!skb_linearize(skb))
+ bnep_rx_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state != BT_CONNECTED)
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d4f5dff7c95..bc4086480d9 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -217,7 +217,7 @@ static const struct net_device_ops bnep_netdev_ops = {
.ndo_stop = bnep_net_close,
.ndo_start_xmit = bnep_net_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = bnep_net_set_mc_list,
+ .ndo_set_rx_mode = bnep_net_set_mc_list,
.ndo_set_mac_address = bnep_net_set_mac_addr,
.ndo_tx_timeout = bnep_net_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 521baa4fe83..7d00ddf9e9d 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -302,7 +302,10 @@ static int cmtp_session(void *arg)
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- cmtp_recv_frame(session, skb);
+ if (!skb_linearize(skb))
+ cmtp_recv_frame(session, skb);
+ else
+ kfree_skb(skb);
}
cmtp_process_transmit(session);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ea7f031f3b0..c1c597e3e19 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -56,15 +56,15 @@ static void hci_le_connect(struct hci_conn *conn)
conn->sec_level = BT_SECURITY_LOW;
memset(&cp, 0, sizeof(cp));
- cp.scan_interval = cpu_to_le16(0x0004);
- cp.scan_window = cpu_to_le16(0x0004);
+ cp.scan_interval = cpu_to_le16(0x0060);
+ cp.scan_window = cpu_to_le16(0x0030);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
- cp.conn_interval_min = cpu_to_le16(0x0008);
- cp.conn_interval_max = cpu_to_le16(0x0100);
- cp.supervision_timeout = cpu_to_le16(0x0064);
- cp.min_ce_len = cpu_to_le16(0x0001);
- cp.max_ce_len = cpu_to_le16(0x0001);
+ cp.conn_interval_min = cpu_to_le16(0x0028);
+ cp.conn_interval_max = cpu_to_le16(0x0038);
+ cp.supervision_timeout = cpu_to_le16(0x002a);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
cp.handle = cpu_to_le16(conn->handle);
memcpy(cp.ltk, ltk, sizeof(cp.ltk));
cp.ediv = ediv;
- memcpy(cp.rand, rand, sizeof(rand));
+ memcpy(cp.rand, rand, sizeof(cp.rand));
hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 56943add45c..b84458dcc22 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1312,59 +1312,41 @@ int hci_blacklist_clear(struct hci_dev *hdev)
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *entry;
- int err;
if (bacmp(bdaddr, BDADDR_ANY) == 0)
return -EBADF;
- hci_dev_lock_bh(hdev);
-
- if (hci_blacklist_lookup(hdev, bdaddr)) {
- err = -EEXIST;
- goto err;
- }
+ if (hci_blacklist_lookup(hdev, bdaddr))
+ return -EEXIST;
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto err;
- }
+ if (!entry)
+ return -ENOMEM;
bacpy(&entry->bdaddr, bdaddr);
list_add(&entry->list, &hdev->blacklist);
- err = 0;
-
-err:
- hci_dev_unlock_bh(hdev);
- return err;
+ return mgmt_device_blocked(hdev->id, bdaddr);
}
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *entry;
- int err = 0;
-
- hci_dev_lock_bh(hdev);
if (bacmp(bdaddr, BDADDR_ANY) == 0) {
- hci_blacklist_clear(hdev);
- goto done;
+ return hci_blacklist_clear(hdev);
}
entry = hci_blacklist_lookup(hdev, bdaddr);
if (!entry) {
- err = -ENOENT;
- goto done;
+ return -ENOENT;
}
list_del(&entry->list);
kfree(entry);
-done:
- hci_dev_unlock_bh(hdev);
- return err;
+ return mgmt_device_unblocked(hdev->id, bdaddr);
}
static void hci_clear_adv_cache(unsigned long arg)
@@ -1523,11 +1505,6 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->workqueue)
goto nomem;
- hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(hdev->tfm))
- BT_INFO("Failed to load transform for ecb(aes): %ld",
- PTR_ERR(hdev->tfm));
-
hci_register_sysfs(hdev);
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1576,9 +1553,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
!test_bit(HCI_SETUP, &hdev->flags))
mgmt_index_removed(hdev->id);
- if (!IS_ERR(hdev->tfm))
- crypto_free_blkcipher(hdev->tfm);
-
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
@@ -2074,6 +2048,9 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
min = c->sent;
conn = c;
}
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
}
if (conn) {
@@ -2131,6 +2108,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, ACL_LINK))
+ return;
+
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
@@ -2162,6 +2142,9 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, SCO_LINK))
+ return;
+
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2182,6 +2165,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, ESCO_LINK))
+ return;
+
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2202,6 +2188,9 @@ static inline void hci_sched_le(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, LE_LINK))
+ return;
+
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7ef4eb4435f..d7d96b6b1f0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -898,16 +898,15 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
- hci_dev_lock(hdev);
-
if (cp->enable == 0x01) {
del_timer(&hdev->adv_timer);
+
+ hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
+ hci_dev_unlock(hdev);
} else if (cp->enable == 0x00) {
mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
}
-
- hci_dev_unlock(hdev);
}
static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1103,9 +1102,10 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
return 0;
/* Only request authentication for SSP connections or non-SSP
- * devices with sec_level HIGH */
+ * devices with sec_level HIGH or if MITM protection is requested */
if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
- conn->pending_sec_level != BT_SECURITY_HIGH)
+ conn->pending_sec_level != BT_SECURITY_HIGH &&
+ !(conn->auth_type & 0x01))
return 0;
return 1;
@@ -1412,7 +1412,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr);
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
} else
conn->state = BT_CONNECTED;
@@ -2174,7 +2174,10 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn && conn->state == BT_CONNECTED) {
+ if (!conn)
+ goto unlock;
+
+ if (conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
hci_conn_put(conn);
@@ -2194,6 +2197,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -2816,7 +2820,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
}
- mgmt_connected(hdev->id, &ev->bdaddr);
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -2834,19 +2838,17 @@ unlock:
static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
- struct hci_ev_le_advertising_info *ev;
- u8 num_reports;
-
- num_reports = skb->data[0];
- ev = (void *) &skb->data[1];
+ u8 num_reports = skb->data[0];
+ void *ptr = &skb->data[1];
hci_dev_lock(hdev);
- hci_add_adv_entry(hdev, ev);
+ while (num_reports--) {
+ struct hci_ev_le_advertising_info *ev = ptr;
- while (--num_reports) {
- ev = (void *) (ev->data + ev->length + 1);
hci_add_adv_entry(hdev, ev);
+
+ ptr += sizeof(*ev) + ev->length + 1;
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ff02cf5e77c..f6afe3d76a6 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -183,21 +183,35 @@ static int hci_sock_release(struct socket *sock)
static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- return hci_blacklist_add(hdev, &bdaddr);
+ hci_dev_lock_bh(hdev);
+
+ err = hci_blacklist_add(hdev, &bdaddr);
+
+ hci_dev_unlock_bh(hdev);
+
+ return err;
}
static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- return hci_blacklist_del(hdev, &bdaddr);
+ hci_dev_lock_bh(hdev);
+
+ err = hci_blacklist_del(hdev, &bdaddr);
+
+ hci_dev_unlock_bh(hdev);
+
+ return err;
}
/* Ioctls that require bound socket */
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a6c3aa8be1f..22f1a6c8703 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -23,6 +23,8 @@ static inline char *link_typetostr(int type)
return "SCO";
case ESCO_LINK:
return "eSCO";
+ case LE_LINK:
+ return "LE";
default:
return "UNKNOWN";
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fb68f344c34..075a3e920ca 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -716,12 +716,18 @@ static int hidp_session(void *arg)
while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
- hidp_recv_ctrl_frame(session, skb);
+ if (!skb_linearize(skb))
+ hidp_recv_ctrl_frame(session, skb);
+ else
+ kfree_skb(skb);
}
while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
- hidp_recv_intr_frame(session, skb);
+ if (!skb_linearize(skb))
+ hidp_recv_intr_frame(session, skb);
+ else
+ kfree_skb(skb);
}
hidp_process_transmit(session);
@@ -872,6 +878,9 @@ static int hidp_start(struct hid_device *hid)
struct hidp_session *session = hid->driver_data;
struct hid_report *report;
+ if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
+ return 0;
+
list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
report_list, list)
hidp_send_report(session, report);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bdb482bbe..8cd12917733 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -907,6 +907,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (!conn->hcon->out && conn->hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
+ if (conn->hcon->out && conn->hcon->type == LE_LINK)
+ smp_conn_security(conn, conn->hcon->pending_sec_level);
+
read_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
@@ -986,8 +989,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
del_timer_sync(&conn->info_timer);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
del_timer(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
hcon->l2cap_data = NULL;
kfree(conn);
@@ -1240,7 +1245,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
__clear_retrans_timer(chan);
}
-void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct hci_conn *hcon = chan->conn->hcon;
u16 flags;
@@ -1256,7 +1261,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
hci_send_acl(hcon, skb, flags);
}
-void l2cap_streaming_send(struct l2cap_chan *chan)
+static void l2cap_streaming_send(struct l2cap_chan *chan)
{
struct sk_buff *skb;
u16 control, fcs;
@@ -1322,7 +1327,7 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
l2cap_do_send(chan, tx_skb);
}
-int l2cap_ertm_send(struct l2cap_chan *chan)
+static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
u16 control, fcs;
@@ -1460,7 +1465,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1490,7 +1495,7 @@ struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr
return skb;
}
-struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1519,7 +1524,9 @@ struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *m
return skb;
}
-struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u16 control, u16 sdulen)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1565,7 +1572,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *
return skb;
}
-int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
struct sk_buff_head sar_queue;
@@ -3121,102 +3128,104 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
return 0;
}
-static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static void append_skb_frag(struct sk_buff *skb,
+ struct sk_buff *new_frag, struct sk_buff **last_frag)
{
- struct sk_buff *_skb;
- int err;
+ /* skb->len reflects data in skb as well as all fragments
+ * skb->data_len reflects only data in fragments
+ */
+ if (!skb_has_frag_list(skb))
+ skb_shinfo(skb)->frag_list = new_frag;
+
+ new_frag->next = NULL;
+
+ (*last_frag)->next = new_frag;
+ *last_frag = new_frag;
+
+ skb->len += new_frag->len;
+ skb->data_len += new_frag->len;
+ skb->truesize += new_frag->truesize;
+}
+
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+{
+ int err = -EINVAL;
switch (control & L2CAP_CTRL_SAR) {
case L2CAP_SDU_UNSEGMENTED:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto drop;
+ if (chan->sdu)
+ break;
- return chan->ops->recv(chan->data, skb);
+ err = chan->ops->recv(chan->data, skb);
+ break;
case L2CAP_SDU_START:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto drop;
+ if (chan->sdu)
+ break;
chan->sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, 2);
- if (chan->sdu_len > chan->imtu)
- goto disconnect;
-
- chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
- if (!chan->sdu)
- return -ENOMEM;
+ if (chan->sdu_len > chan->imtu) {
+ err = -EMSGSIZE;
+ break;
+ }
- /* pull sdu_len bytes only after alloc, because of Local Busy
- * condition we have to be sure that this will be executed
- * only once, i.e., when alloc does not fail */
- skb_pull(skb, 2);
+ if (skb->len >= chan->sdu_len)
+ break;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ chan->sdu = skb;
+ chan->sdu_last_frag = skb;
- set_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len = skb->len;
+ skb = NULL;
+ err = 0;
break;
case L2CAP_SDU_CONTINUE:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto disconnect;
-
if (!chan->sdu)
- goto disconnect;
+ break;
- chan->partial_sdu_len += skb->len;
- if (chan->partial_sdu_len > chan->sdu_len)
- goto drop;
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
+ skb = NULL;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ if (chan->sdu->len >= chan->sdu_len)
+ break;
+ err = 0;
break;
case L2CAP_SDU_END:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto disconnect;
-
if (!chan->sdu)
- goto disconnect;
-
- chan->partial_sdu_len += skb->len;
-
- if (chan->partial_sdu_len > chan->imtu)
- goto drop;
+ break;
- if (chan->partial_sdu_len != chan->sdu_len)
- goto drop;
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
+ skb = NULL;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ if (chan->sdu->len != chan->sdu_len)
+ break;
- _skb = skb_clone(chan->sdu, GFP_ATOMIC);
- if (!_skb) {
- return -ENOMEM;
- }
+ err = chan->ops->recv(chan->data, chan->sdu);
- err = chan->ops->recv(chan->data, _skb);
- if (err < 0) {
- kfree_skb(_skb);
- return err;
+ if (!err) {
+ /* Reassembly complete */
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
}
-
- clear_bit(CONN_SAR_SDU, &chan->conn_state);
-
- kfree_skb(chan->sdu);
break;
}
- kfree_skb(skb);
- return 0;
-
-drop:
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
+ if (err) {
+ kfree_skb(skb);
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ }
-disconnect:
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- kfree_skb(skb);
- return 0;
+ return err;
}
static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
@@ -3270,99 +3279,6 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
}
}
-static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
-{
- struct sk_buff *_skb;
- int err = -EINVAL;
-
- /*
- * TODO: We have to notify the userland if some data is lost with the
- * Streaming Mode.
- */
-
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
- kfree_skb(chan->sdu);
- break;
- }
-
- err = chan->ops->recv(chan->data, skb);
- if (!err)
- return 0;
-
- break;
-
- case L2CAP_SDU_START:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
- kfree_skb(chan->sdu);
- break;
- }
-
- chan->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
-
- if (chan->sdu_len > chan->imtu) {
- err = -EMSGSIZE;
- break;
- }
-
- chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
- if (!chan->sdu) {
- err = -ENOMEM;
- break;
- }
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- set_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len = skb->len;
- err = 0;
- break;
-
- case L2CAP_SDU_CONTINUE:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- break;
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- chan->partial_sdu_len += skb->len;
- if (chan->partial_sdu_len > chan->sdu_len)
- kfree_skb(chan->sdu);
- else
- err = 0;
-
- break;
-
- case L2CAP_SDU_END:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- break;
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- clear_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len += skb->len;
-
- if (chan->partial_sdu_len > chan->imtu)
- goto drop;
-
- if (chan->partial_sdu_len == chan->sdu_len) {
- _skb = skb_clone(chan->sdu, GFP_ATOMIC);
- err = chan->ops->recv(chan->data, _skb);
- if (err < 0)
- kfree_skb(_skb);
- }
- err = 0;
-
-drop:
- kfree_skb(chan->sdu);
- break;
- }
-
- kfree_skb(skb);
- return err;
-}
-
static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
{
struct sk_buff *skb;
@@ -3377,7 +3293,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
skb = skb_dequeue(&chan->srej_q);
control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- err = l2cap_ertm_reassembly_sdu(chan, skb, control);
+ err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3537,7 +3453,7 @@ expected:
return 0;
}
- err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
+ err = l2cap_reassemble_sdu(chan, skb, rx_control);
chan->buffer_seq = (chan->buffer_seq + 1) % 64;
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3853,12 +3769,20 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
tx_seq = __get_txseq(control);
- if (chan->expected_tx_seq == tx_seq)
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
- else
- chan->expected_tx_seq = (tx_seq + 1) % 64;
+ if (chan->expected_tx_seq != tx_seq) {
+ /* Frame(s) missing - must discard partial SDU */
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ /* TODO: Notify userland of missing data */
+ }
+
+ chan->expected_tx_seq = (tx_seq + 1) % 64;
- l2cap_streaming_reassembly_sdu(chan, skb, control);
+ if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto done;
@@ -4093,6 +4017,11 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p", conn);
+ if (hcon->type == LE_LINK) {
+ smp_distribute_keys(conn, 0);
+ del_timer(&conn->security_timer);
+ }
+
read_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
@@ -4105,9 +4034,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (chan->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
chan->sec_level = hcon->sec_level;
- del_timer(&conn->security_timer);
l2cap_chan_ready(sk);
- smp_distribute_keys(conn, 0);
}
bh_unlock_sock(sk);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 61f1f623091..e8292369cdc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -26,6 +26,8 @@
/* Bluetooth L2CAP sockets. */
+#include <linux/security.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
@@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->force_reliable = pchan->force_reliable;
chan->flushable = pchan->flushable;
chan->force_active = pchan->force_active;
+
+ security_sk_clone(parent, sk);
} else {
switch (sk->sk_type) {
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 53e109eb043..5a94eec06ca 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -908,7 +908,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
struct hci_dev *hdev;
struct mgmt_cp_load_keys *cp;
u16 key_count, expected_len;
- int i, err;
+ int i;
cp = (void *) data;
@@ -918,9 +918,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
key_count = get_unaligned_le16(&cp->key_count);
expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
- if (expected_len > len) {
- BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
- expected_len, len);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
return -EINVAL;
}
@@ -942,36 +942,17 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
else
clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
- len -= sizeof(*cp);
- i = 0;
-
- while (i < len) {
- struct mgmt_key_info *key = (void *) cp->keys + i;
-
- i += sizeof(*key) + key->dlen;
-
- if (key->type == HCI_LK_SMP_LTK) {
- struct key_master_id *id = (void *) key->data;
-
- if (key->dlen != sizeof(struct key_master_id))
- continue;
-
- hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
- id->ediv, id->rand, key->val);
-
- continue;
- }
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_key_info *key = &cp->keys[i];
hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
key->pin_len);
}
- err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
-
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
- return err;
+ return 0;
}
static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -1347,6 +1328,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
struct hci_dev *hdev;
struct mgmt_cp_pair_device *cp;
struct pending_cmd *cmd;
+ struct adv_entry *entry;
u8 sec_level, auth_type;
struct hci_conn *conn;
int err;
@@ -1364,15 +1346,20 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
hci_dev_lock_bh(hdev);
- if (cp->io_cap == 0x03) {
- sec_level = BT_SECURITY_MEDIUM;
+ sec_level = BT_SECURITY_MEDIUM;
+ if (cp->io_cap == 0x03)
auth_type = HCI_AT_DEDICATED_BONDING;
- } else {
- sec_level = BT_SECURITY_HIGH;
+ else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- }
- conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
+ entry = hci_find_adv_entry(hdev, &cp->bdaddr);
+ if (entry)
+ conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+ auth_type);
+ else
+ conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+ auth_type);
+
if (IS_ERR(conn)) {
err = PTR_ERR(conn);
goto unlock;
@@ -1391,7 +1378,10 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
goto unlock;
}
- conn->connect_cfm_cb = pairing_complete_cb;
+ /* For LE, just connecting isn't a proof that the pairing finished */
+ if (!entry)
+ conn->connect_cfm_cb = pairing_complete_cb;
+
conn->security_cfm_cb = pairing_complete_cb;
conn->disconn_cfm_cb = pairing_complete_cb;
conn->io_capability = cp->io_cap;
@@ -1689,13 +1679,12 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_block_device *cp;
+ struct pending_cmd *cmd;
+ struct mgmt_cp_block_device *cp = (void *) data;
int err;
BT_DBG("hci%u", index);
- cp = (void *) data;
-
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
EINVAL);
@@ -1705,6 +1694,14 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
ENODEV);
+ hci_dev_lock_bh(hdev);
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
err = hci_blacklist_add(hdev, &cp->bdaddr);
if (err < 0)
@@ -1712,6 +1709,11 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
else
err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
NULL, 0);
+
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
@@ -1721,13 +1723,12 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_unblock_device *cp;
+ struct pending_cmd *cmd;
+ struct mgmt_cp_unblock_device *cp = (void *) data;
int err;
BT_DBG("hci%u", index);
- cp = (void *) data;
-
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
EINVAL);
@@ -1737,6 +1738,14 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
ENODEV);
+ hci_dev_lock_bh(hdev);
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
err = hci_blacklist_del(hdev, &cp->bdaddr);
if (err < 0)
@@ -1744,6 +1753,67 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
else
err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
NULL, 0);
+
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_fast_connectable(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_fast_connectable *cp = (void *) data;
+ struct hci_cp_write_page_scan_activity acp;
+ u8 type;
+ int err;
+
+ BT_DBG("hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ ENODEV);
+
+ hci_dev_lock(hdev);
+
+ if (cp->enable) {
+ type = PAGE_SCAN_TYPE_INTERLACED;
+ acp.interval = 0x0024; /* 22.5 msec page scan interval */
+ } else {
+ type = PAGE_SCAN_TYPE_STANDARD; /* default */
+ acp.interval = 0x0800; /* default 1.28 sec page scan */
+ }
+
+ acp.window = 0x0012; /* default 11.25 msec page scan window */
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ sizeof(acp), &acp);
+ if (err < 0) {
+ err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ -err);
+ goto done;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
+ if (err < 0) {
+ err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ -err);
+ goto done;
+ }
+
+ err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ NULL, 0);
+done:
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1869,6 +1939,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_UNBLOCK_DEVICE:
err = unblock_device(sk, index, buf + sizeof(*hdr), len);
break;
+ case MGMT_OP_SET_FAST_CONNECTABLE:
+ err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
default:
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, index, opcode, 0x01);
@@ -1977,35 +2051,25 @@ int mgmt_connectable(u16 index, u8 connectable)
int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
{
- struct mgmt_ev_new_key *ev;
- int err, total;
-
- total = sizeof(struct mgmt_ev_new_key) + key->dlen;
- ev = kzalloc(total, GFP_ATOMIC);
- if (!ev)
- return -ENOMEM;
-
- bacpy(&ev->key.bdaddr, &key->bdaddr);
- ev->key.type = key->type;
- memcpy(ev->key.val, key->val, 16);
- ev->key.pin_len = key->pin_len;
- ev->key.dlen = key->dlen;
- ev->store_hint = persistent;
+ struct mgmt_ev_new_key ev;
- memcpy(ev->key.data, key->data, key->dlen);
-
- err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
+ memset(&ev, 0, sizeof(ev));
- kfree(ev);
+ ev.store_hint = persistent;
+ bacpy(&ev.key.bdaddr, &key->bdaddr);
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, 16);
+ ev.key.pin_len = key->pin_len;
- return err;
+ return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
}
-int mgmt_connected(u16 index, bdaddr_t *bdaddr)
+int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
{
struct mgmt_ev_connected ev;
bacpy(&ev.bdaddr, bdaddr);
+ ev.link_type = link_type;
return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
}
@@ -2260,12 +2324,14 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
memset(&ev, 0, sizeof(ev));
bacpy(&ev.bdaddr, bdaddr);
- memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
ev.rssi = rssi;
if (eir)
memcpy(ev.eir, eir, sizeof(ev.eir));
+ if (dev_class)
+ memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
+
return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
}
@@ -2286,3 +2352,29 @@ int mgmt_discovering(u16 index, u8 discovering)
return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
sizeof(discovering), NULL);
}
+
+int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_blocked ev;
+
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
+
+int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_unblocked ev;
+
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5ba3f6df665..38b618c96de 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1853,7 +1853,10 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- rfcomm_recv_frame(s, skb);
+ if (!skb_linearize(skb))
+ rfcomm_recv_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state == BT_CLOSED) {
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 482722bbc7a..5417f612732 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -42,6 +42,7 @@
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/security.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
+
+ security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8270f05e3f1..a324b009e34 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -41,6 +41,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
+#include <linux/security.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
- if (parent)
+ if (parent) {
sk->sk_type = parent->sk_type;
+ security_sk_clone(parent, sk);
+ }
}
static struct proto sco_proto = {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 391888b88a9..759b6357264 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -182,18 +182,9 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
return;
hci_send_acl(conn->hcon, skb, 0);
-}
-
-static __u8 seclevel_to_authreq(__u8 level)
-{
- switch (level) {
- case BT_SECURITY_HIGH:
- /* Right now we don't support bonding */
- return SMP_AUTH_MITM;
- default:
- return SMP_AUTH_NONE;
- }
+ mod_timer(&conn->security_timer, jiffies +
+ msecs_to_jiffies(SMP_TIMEOUT));
}
static void build_pairing_cmd(struct l2cap_conn *conn,
@@ -205,7 +196,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
dist_keys = 0;
if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
- dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
+ dist_keys = SMP_DIST_ENC_KEY;
authreq |= SMP_AUTH_BONDING;
}
@@ -229,24 +220,184 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
{
+ struct smp_chan *smp = conn->smp_chan;
+
if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
(max_key_size < SMP_MIN_ENC_KEY_SIZE))
return SMP_ENC_KEY_SIZE;
- conn->smp_key_size = max_key_size;
+ smp->smp_key_size = max_key_size;
return 0;
}
+static void confirm_work(struct work_struct *work)
+{
+ struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
+ struct l2cap_conn *conn = smp->conn;
+ struct crypto_blkcipher *tfm;
+ struct smp_cmd_pairing_confirm cp;
+ int ret;
+ u8 res[16], reason;
+
+ BT_DBG("conn %p", conn);
+
+ tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ smp->tfm = tfm;
+
+ if (conn->hcon->out)
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
+ conn->src, conn->hcon->dst_type, conn->dst,
+ res);
+ else
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+ conn->hcon->dst_type, conn->dst, 0, conn->src,
+ res);
+ if (ret) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ swap128(res, cp.confirm_val);
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+
+ return;
+
+error:
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
+ smp_chan_destroy(conn);
+}
+
+static void random_work(struct work_struct *work)
+{
+ struct smp_chan *smp = container_of(work, struct smp_chan, random);
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct crypto_blkcipher *tfm = smp->tfm;
+ u8 reason, confirm[16], res[16], key[16];
+ int ret;
+
+ if (IS_ERR_OR_NULL(tfm)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+ if (hcon->out)
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
+ conn->src, hcon->dst_type, conn->dst,
+ res);
+ else
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ hcon->dst_type, conn->dst, 0, conn->src,
+ res);
+ if (ret) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ swap128(res, confirm);
+
+ if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ BT_ERR("Pairing failed (confirmation values mismatch)");
+ reason = SMP_CONFIRM_FAILED;
+ goto error;
+ }
+
+ if (hcon->out) {
+ u8 stk[16], rand[8];
+ __le16 ediv;
+
+ memset(rand, 0, sizeof(rand));
+ ediv = 0;
+
+ smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
+ swap128(key, stk);
+
+ memset(stk + smp->smp_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ hci_le_start_enc(hcon, ediv, rand, stk);
+ hcon->enc_key_size = smp->smp_key_size;
+ } else {
+ u8 stk[16], r[16], rand[8];
+ __le16 ediv;
+
+ memset(rand, 0, sizeof(rand));
+ ediv = 0;
+
+ swap128(smp->prnd, r);
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+
+ smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
+ swap128(key, stk);
+
+ memset(stk + smp->smp_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
+
+ hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size,
+ ediv, rand, stk);
+ }
+
+ return;
+
+error:
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
+ smp_chan_destroy(conn);
+}
+
+static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+{
+ struct smp_chan *smp;
+
+ smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
+ if (!smp)
+ return NULL;
+
+ INIT_WORK(&smp->confirm, confirm_work);
+ INIT_WORK(&smp->random, random_work);
+
+ smp->conn = conn;
+ conn->smp_chan = smp;
+
+ hci_conn_hold(conn->hcon);
+
+ return smp;
+}
+
+void smp_chan_destroy(struct l2cap_conn *conn)
+{
+ kfree(conn->smp_chan);
+ hci_conn_put(conn->hcon);
+}
+
static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ struct smp_chan *smp;
u8 key_size;
+ int ret;
BT_DBG("conn %p", conn);
- conn->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&conn->preq[1], req, sizeof(*req));
+ if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
+ smp = smp_chan_create(conn);
+
+ smp = conn->smp_chan;
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
if (req->oob_flag)
@@ -260,32 +411,33 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_ENC_KEY_SIZE;
/* Just works */
- memset(conn->tk, 0, sizeof(conn->tk));
+ memset(smp->tk, 0, sizeof(smp->tk));
+
+ ret = smp_rand(smp->prnd);
+ if (ret)
+ return SMP_UNSPECIFIED;
- conn->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&conn->prsp[1], &rsp, sizeof(rsp));
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
-
return 0;
}
static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
- struct smp_cmd_pairing_confirm cp;
- struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ u8 key_size;
int ret;
- u8 res[16], key_size;
BT_DBG("conn %p", conn);
skb_pull(skb, sizeof(*rsp));
- req = (void *) &conn->preq[1];
+ req = (void *) &smp->preq[1];
key_size = min(req->max_key_size, rsp->max_key_size);
if (check_enc_key_size(conn, key_size))
@@ -295,222 +447,154 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_OOB_NOT_AVAIL;
/* Just works */
- memset(conn->tk, 0, sizeof(conn->tk));
-
- conn->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
-
- ret = smp_rand(conn->prnd);
- if (ret)
- return SMP_UNSPECIFIED;
+ memset(smp->tk, 0, sizeof(smp->tk));
- ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst, res);
+ ret = smp_rand(smp->prnd);
if (ret)
return SMP_UNSPECIFIED;
- swap128(res, cp.confirm_val);
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
- smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+ queue_work(hdev->workqueue, &smp->confirm);
return 0;
}
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
- memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf));
- skb_pull(skb, sizeof(conn->pcnf));
+ memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
+ skb_pull(skb, sizeof(smp->pcnf));
if (conn->hcon->out) {
u8 random[16];
- swap128(conn->prnd, random);
+ swap128(smp->prnd, random);
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
random);
} else {
- struct smp_cmd_pairing_confirm cp;
- int ret;
- u8 res[16];
-
- ret = smp_rand(conn->prnd);
- if (ret)
- return SMP_UNSPECIFIED;
-
- ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
- conn->hcon->dst_type, conn->dst,
- 0, conn->src, res);
- if (ret)
- return SMP_CONFIRM_FAILED;
-
- swap128(res, cp.confirm_val);
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+ queue_work(hdev->workqueue, &smp->confirm);
}
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
-
return 0;
}
static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct hci_conn *hcon = conn->hcon;
- struct crypto_blkcipher *tfm = hcon->hdev->tfm;
- int ret;
- u8 key[16], res[16], random[16], confirm[16];
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
- swap128(skb->data, random);
- skb_pull(skb, sizeof(random));
-
- if (conn->hcon->out)
- ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst,
- res);
- else
- ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
- conn->hcon->dst_type, conn->dst, 0, conn->src,
- res);
- if (ret)
- return SMP_UNSPECIFIED;
-
- BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
-
- swap128(res, confirm);
-
- if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
- BT_ERR("Pairing failed (confirmation values mismatch)");
- return SMP_CONFIRM_FAILED;
- }
-
- if (conn->hcon->out) {
- u8 stk[16], rand[8];
- __le16 ediv;
-
- memset(rand, 0, sizeof(rand));
- ediv = 0;
+ BT_DBG("conn %p", conn);
- smp_s1(tfm, conn->tk, random, conn->prnd, key);
- swap128(key, stk);
+ swap128(skb->data, smp->rrnd);
+ skb_pull(skb, sizeof(smp->rrnd));
- memset(stk + conn->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
+ queue_work(hdev->workqueue, &smp->random);
- hci_le_start_enc(hcon, ediv, rand, stk);
- hcon->enc_key_size = conn->smp_key_size;
- } else {
- u8 stk[16], r[16], rand[8];
- __le16 ediv;
+ return 0;
+}
- memset(rand, 0, sizeof(rand));
- ediv = 0;
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+{
+ struct link_key *key;
+ struct key_master_id *master;
+ struct hci_conn *hcon = conn->hcon;
- swap128(conn->prnd, r);
- smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+ key = hci_find_link_key_type(hcon->hdev, conn->dst,
+ HCI_LK_SMP_LTK);
+ if (!key)
+ return 0;
- smp_s1(tfm, conn->tk, conn->prnd, random, key);
- swap128(key, stk);
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND,
+ &hcon->pend))
+ return 1;
- memset(stk + conn->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
+ master = (void *) key->data;
+ hci_le_start_enc(hcon, master->ediv, master->rand,
+ key->val);
+ hcon->enc_key_size = key->pin_len;
- hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size,
- ediv, rand, stk);
- }
+ return 1;
- return 0;
}
-
static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_security_req *rp = (void *) skb->data;
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp;
BT_DBG("conn %p", conn);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+ hcon->pending_sec_level = BT_SECURITY_MEDIUM;
+
+ if (smp_ltk_encrypt(conn))
return 0;
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
+ return 0;
+
+ smp = smp_chan_create(conn);
+
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
- conn->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&conn->preq[1], &cp, sizeof(cp));
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
-
- set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
-
return 0;
}
int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
{
struct hci_conn *hcon = conn->hcon;
- __u8 authreq;
+ struct smp_chan *smp = conn->smp_chan;
BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
if (!lmp_host_le_capable(hcon->hdev))
return 1;
- if (IS_ERR(hcon->hdev->tfm))
- return 1;
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
- return 0;
-
if (sec_level == BT_SECURITY_LOW)
return 1;
if (hcon->sec_level >= sec_level)
return 1;
- authreq = seclevel_to_authreq(sec_level);
-
- if (hcon->link_mode & HCI_LM_MASTER) {
- struct smp_cmd_pairing cp;
- struct link_key *key;
+ if (hcon->link_mode & HCI_LM_MASTER)
+ if (smp_ltk_encrypt(conn))
+ goto done;
- key = hci_find_link_key_type(hcon->hdev, conn->dst,
- HCI_LK_SMP_LTK);
- if (key) {
- struct key_master_id *master = (void *) key->data;
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
+ return 0;
- hci_le_start_enc(hcon, master->ediv, master->rand,
- key->val);
- hcon->enc_key_size = key->pin_len;
+ smp = smp_chan_create(conn);
- goto done;
- }
-
- build_pairing_cmd(conn, &cp, NULL, authreq);
- conn->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&conn->preq[1], &cp, sizeof(cp));
+ if (hcon->link_mode & HCI_LM_MASTER) {
+ struct smp_cmd_pairing cp;
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
+ build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE);
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
} else {
struct smp_cmd_security_req cp;
- cp.auth_req = authreq;
+ cp.auth_req = SMP_AUTH_NONE;
smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
}
done:
hcon->pending_sec_level = sec_level;
- set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
return 0;
}
@@ -518,10 +602,11 @@ done:
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
skb_pull(skb, sizeof(*rp));
- memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
+ memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
return 0;
}
@@ -529,11 +614,12 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_master_ident *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
skb_pull(skb, sizeof(*rp));
- hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
- rp->ediv, rp->rand, conn->tk);
+ hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size,
+ rp->ediv, rp->rand, smp->tk);
smp_distribute_keys(conn, 1);
@@ -552,12 +638,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
goto done;
}
- if (IS_ERR(conn->hcon->hdev->tfm)) {
- err = PTR_ERR(conn->hcon->hdev->tfm);
- reason = SMP_PAIRING_NOTSUPP;
- goto done;
- }
-
skb_pull(skb, sizeof(code));
switch (code) {
@@ -621,20 +701,21 @@ done:
int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
{
struct smp_cmd_pairing *req, *rsp;
+ struct smp_chan *smp = conn->smp_chan;
__u8 *keydist;
BT_DBG("conn %p force %d", conn, force);
- if (IS_ERR(conn->hcon->hdev->tfm))
- return PTR_ERR(conn->hcon->hdev->tfm);
+ if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
+ return 0;
- rsp = (void *) &conn->prsp[1];
+ rsp = (void *) &smp->prsp[1];
/* The responder sends its keys first */
if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
return 0;
- req = (void *) &conn->preq[1];
+ req = (void *) &smp->preq[1];
if (conn->hcon->out) {
keydist = &rsp->init_key_dist;
@@ -658,7 +739,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
- hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size,
+ hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
ediv, ident.rand, enc.ltk);
ident.ediv = cpu_to_le16(ediv);
@@ -698,5 +779,11 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
*keydist &= ~SMP_DIST_SIGN;
}
+ if (conn->hcon->out || force) {
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+ del_timer(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
+
return 0;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 32b8f9f7f79..feb77ea7b58 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- netif_carrier_off(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- netif_carrier_off(dev);
-
br_stp_disable_bridge(br);
br_multicast_stop(br);
@@ -304,7 +301,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
- .ndo_set_multicast_list = br_dev_set_multicast_list,
+ .ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -361,6 +358,8 @@ void br_dev_setup(struct net_device *dev)
memcpy(br->group_addr, br_group_address, ETH_ALEN);
br->stp_enabled = BR_NO_STP;
+ br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
+
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
br->bridge_hello_time = br->hello_time = 2 * HZ;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 68def3b7fb4..c8e7861b88b 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -558,19 +558,28 @@ skip:
/* Create new static fdb entry */
static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
- __u16 state)
+ __u16 state, __u16 flags)
{
struct net_bridge *br = source->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr)];
struct net_bridge_fdb_entry *fdb;
fdb = fdb_find(head, addr);
- if (fdb)
- return -EEXIST;
+ if (fdb == NULL) {
+ if (!(flags & NLM_F_CREATE))
+ return -ENOENT;
- fdb = fdb_create(head, source, addr);
- if (!fdb)
- return -ENOMEM;
+ fdb = fdb_create(head, source, addr);
+ if (!fdb)
+ return -ENOMEM;
+ } else {
+ if (flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ if (flags & NLM_F_REPLACE)
+ fdb->updated = fdb->used = jiffies;
+ fdb->is_local = fdb->is_static = 0;
+ }
if (state & NUD_PERMANENT)
fdb->is_local = fdb->is_static = 1;
@@ -626,7 +635,7 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
}
spin_lock_bh(&p->br->hash_lock);
- err = fdb_add_entry(p, addr, ndm->ndm_state);
+ err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
spin_unlock_bh(&p->br->hash_lock);
return err;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e73815456ad..f603e5b0b93 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/netpoll.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
@@ -33,20 +34,18 @@
*/
static int port_cost(struct net_device *dev)
{
- if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
-
- if (!dev_ethtool_get_settings(dev, &ecmd)) {
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_10000:
- return 2;
- case SPEED_1000:
- return 4;
- case SPEED_100:
- return 19;
- case SPEED_10:
- return 100;
- }
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(dev, &ecmd)) {
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_10000:
+ return 2;
+ case SPEED_1000:
+ return 4;
+ case SPEED_100:
+ return 19;
+ case SPEED_10:
+ return 100;
}
}
@@ -161,9 +160,10 @@ static void del_nbp(struct net_bridge_port *p)
call_rcu(&p->rcu, destroy_nbp_rcu);
}
-/* called with RTNL */
-static void del_br(struct net_bridge *br, struct list_head *head)
+/* Delete bridge device */
+void br_dev_delete(struct net_device *dev, struct list_head *head)
{
+ struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -268,7 +268,7 @@ int br_del_bridge(struct net *net, const char *name)
}
else
- del_br(netdev_priv(dev), NULL);
+ br_dev_delete(dev, NULL);
rtnl_unlock();
return ret;
@@ -324,7 +324,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
/* Don't allow bridging non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) ||
- dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN)
+ dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
+ !is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
/* No bridging of bridges */
@@ -352,10 +353,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
- goto err0;
-
- err = br_fdb_insert(br, p, dev->dev_addr);
- if (err)
goto err1;
err = br_sysfs_addif(p);
@@ -396,6 +393,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
dev_set_mtu(br->dev, br_min_mtu(br));
+ if (br_fdb_insert(br, p, dev->dev_addr))
+ netdev_err(dev, "failed insert local address bridge forwarding table\n");
+
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
@@ -405,11 +405,9 @@ err4:
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
- br_fdb_delete_by_port(br, p, 1);
-err1:
kobject_put(&p->kobj);
p = NULL; /* kobject_put frees */
-err0:
+err1:
dev_set_promiscuity(dev, -1);
put_back:
dev_put(dev);
@@ -449,7 +447,7 @@ void __net_exit br_net_exit(struct net *net)
rtnl_lock();
for_each_netdev(net, dev)
if (dev->priv_flags & IFF_EBRIDGE)
- del_br(netdev_priv(dev), &list);
+ br_dev_delete(dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f06ee39c73f..6f9f8c01472 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -162,14 +162,37 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
p = br_port_get_rcu(skb->dev);
if (unlikely(is_link_local(dest))) {
- /* Pause frames shouldn't be passed up by driver anyway */
- if (skb->protocol == htons(ETH_P_PAUSE))
+ /*
+ * See IEEE 802.1D Table 7-10 Reserved addresses
+ *
+ * Assignment Value
+ * Bridge Group Address 01-80-C2-00-00-00
+ * (MAC Control) 802.3 01-80-C2-00-00-01
+ * (Link Aggregation) 802.3 01-80-C2-00-00-02
+ * 802.1X PAE address 01-80-C2-00-00-03
+ *
+ * 802.1AB LLDP 01-80-C2-00-00-0E
+ *
+ * Others reserved for future standardization
+ */
+ switch (dest[5]) {
+ case 0x00: /* Bridge Group Address */
+ /* If STP is turned off,
+ then must forward to keep loop detection */
+ if (p->br->stp_enabled == BR_NO_STP)
+ goto forward;
+ break;
+
+ case 0x01: /* IEEE MAC (Pause) */
goto drop;
- /* If STP is turned off, then forward */
- if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
- goto forward;
+ default:
+ /* Allow selective forwarding for most other protocols */
+ if (p->br->group_fwd_mask & (1u << dest[5]))
+ goto forward;
+ }
+ /* Deliver packet to local host only */
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
NULL, br_handle_local_finish)) {
return RX_HANDLER_CONSUMED; /* consumed by filter */
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5b1ed1ba9aa..e5f9ece3c9a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = {
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
.validate = br_validate,
+ .dellink = br_dev_delete,
};
int __init br_netlink_init(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 78cc364997d..d7d6fb05411 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -29,6 +29,11 @@
#define BR_VERSION "2.3"
+/* Control of forwarding link local multicast */
+#define BR_GROUPFWD_DEFAULT 0
+/* Don't allow forwarding control protocols like STP and LLDP */
+#define BR_GROUPFWD_RESTRICTED 0x4007u
+
/* Path to usermode spanning tree program */
#define BR_STP_PROG "/sbin/bridge-stp"
@@ -193,6 +198,8 @@ struct net_bridge
unsigned long flags;
#define BR_SET_MAC_ADDR 0x00000001
+ u16 group_fwd_mask;
+
/* STP */
bridge_id designated_root;
bridge_id bridge_id;
@@ -294,6 +301,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
/* br_device.c */
extern void br_dev_setup(struct net_device *dev);
+extern void br_dev_delete(struct net_device *dev, struct list_head *list);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 68b893ea8c3..c236c0e4398 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -149,6 +149,39 @@ static ssize_t store_stp_state(struct device *d,
static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
store_stp_state);
+static ssize_t show_group_fwd_mask(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%#x\n", br->group_fwd_mask);
+}
+
+
+static ssize_t store_group_fwd_mask(struct device *d,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct net_bridge *br = to_bridge(d);
+ char *endp;
+ unsigned long val;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EINVAL;
+
+ if (val & BR_GROUPFWD_RESTRICTED)
+ return -EINVAL;
+
+ br->group_fwd_mask = val;
+
+ return len;
+}
+static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
+ store_group_fwd_mask);
+
static ssize_t show_priority(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -652,6 +685,7 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_max_age.attr,
&dev_attr_ageing_time.attr,
&dev_attr_stp_state.attr,
+ &dev_attr_group_fwd_mask.attr,
&dev_attr_priority.attr,
&dev_attr_bridge_id.attr,
&dev_attr_root_id.attr,
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 1bcaf36ad61..40d8258bf74 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,14 +87,14 @@ static int __init ebtable_broute_init(void)
if (ret < 0)
return ret;
/* see br_input.c */
- rcu_assign_pointer(br_should_route_hook,
+ RCU_INIT_POINTER(br_should_route_hook,
(br_should_route_hook_t *)ebt_broute);
return 0;
}
static void __exit ebtable_broute_fini(void)
{
- rcu_assign_pointer(br_should_route_hook, NULL);
+ RCU_INIT_POINTER(br_should_route_hook, NULL);
synchronize_net();
unregister_pernet_subsys(&broute_net_ops);
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7f9ac0742d1..47fc8f3a47c 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -212,8 +212,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
enum cfcnfg_phy_preference pref;
enum cfcnfg_phy_type phy_type;
struct cfcnfg *cfg;
- struct caif_device_entry_list *caifdevs =
- caif_device_list(dev_net(dev));
+ struct caif_device_entry_list *caifdevs;
if (dev->type != ARPHRD_CAIF)
return 0;
@@ -222,6 +221,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
if (cfg == NULL)
return 0;
+ caifdevs = caif_device_list(dev_net(dev));
+
switch (what) {
case NETDEV_REGISTER:
caifd = caif_device_alloc(dev);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 52fe33bee02..00523ecc4ce 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -78,10 +78,8 @@ struct cfcnfg *cfcnfg_create(void)
/* Initiate this layer */
this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ if (!this)
return NULL;
- }
this->mux = cfmuxl_create();
if (!this->mux)
goto out_of_mem;
@@ -108,8 +106,6 @@ struct cfcnfg *cfcnfg_create(void)
return this;
out_of_mem:
- pr_warn("Out of memory\n");
-
synchronize_rcu();
kfree(this->mux);
@@ -448,10 +444,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
"- unknown channel type\n");
goto unlock;
}
- if (!servicel) {
- pr_warn("Out of memory\n");
+ if (!servicel)
goto unlock;
- }
layer_set_dn(servicel, cnfg->mux);
cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
layer_set_up(servicel, adapt_layer);
@@ -473,7 +467,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
{
struct cflayer *frml;
struct cflayer *phy_driver = NULL;
- struct cfcnfg_phyinfo *phyinfo;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
int i;
u8 phyid;
@@ -488,25 +482,25 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
- goto out;
+ goto out_err;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
+ if (!phyinfo)
+ goto out_err;
switch (phy_type) {
case CFPHYTYPE_FRAG:
phy_driver =
cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
- if (!phy_driver) {
- pr_warn("Out of memory\n");
- goto out;
- }
+ if (!phy_driver)
+ goto out_err;
break;
case CFPHYTYPE_CAIF:
phy_driver = NULL;
break;
default:
- goto out;
+ goto out_err;
}
phy_layer->id = phyid;
phyinfo->pref = pref;
@@ -520,11 +514,8 @@ got_phyid:
frml = cffrml_create(phyid, fcs);
- if (!frml) {
- pr_warn("Out of memory\n");
- kfree(phyinfo);
- goto out;
- }
+ if (!frml)
+ goto out_err;
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
@@ -540,7 +531,12 @@ got_phyid:
}
list_add_rcu(&phyinfo->node, &cnfg->phys);
-out:
+ mutex_unlock(&cnfg->lock);
+ return;
+
+out_err:
+ kfree(phy_driver);
+ kfree(phyinfo);
mutex_unlock(&cnfg->lock);
}
EXPORT_SYMBOL(cfcnfg_add_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index e22671bed66..5cf52225692 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -35,15 +35,12 @@ struct cflayer *cfctrl_create(void)
{
struct dev_info dev_info;
struct cfctrl *this =
- kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ kzalloc(sizeof(struct cfctrl), GFP_ATOMIC);
+ if (!this)
return NULL;
- }
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
memset(&dev_info, 0, sizeof(dev_info));
dev_info.id = 0xff;
- memset(this, 0, sizeof(*this));
cfsrvl_init(&this->serv, 0, &dev_info, false);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
@@ -180,10 +177,8 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return;
- }
if (!dn) {
pr_debug("not able to send enum request\n");
return;
@@ -224,10 +219,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
}
pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
@@ -275,10 +268,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req) {
- pr_warn("Out of memory\n");
+ if (!req)
return -ENOMEM;
- }
req->client_layer = user_layer;
req->cmd = CFCTRL_CMD_LINK_SETUP;
req->param = *param;
@@ -312,10 +303,8 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
if (!dn) {
pr_debug("not able to send link-down request\n");
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 11a2af4c162..65d6ef3cf9a 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -19,13 +19,10 @@ static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!dbg) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *dbg = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dbg)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(dbg, 0, sizeof(struct cfsrvl));
cfsrvl_init(dbg, channel_id, dev_info, false);
dbg->layer.receive = cfdbgl_receive;
dbg->layer.transmit = cfdbgl_transmit;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0382dec84fd..0f5ff27aa41 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -26,13 +26,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!dgm) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *dgm = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dgm)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(dgm, 0, sizeof(struct cfsrvl));
cfsrvl_init(dgm, channel_id, dev_info, true);
dgm->layer.receive = cfdgml_receive;
dgm->layer.transmit = cfdgml_transmit;
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 04204b20271..f39921171d0 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -34,11 +34,9 @@ static u32 cffrml_rcv_error;
static u32 cffrml_rcv_checsum_error;
struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
{
- struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ struct cffrml *this = kzalloc(sizeof(struct cffrml), GFP_ATOMIC);
+ if (!this)
return NULL;
- }
this->pcpu_refcnt = alloc_percpu(int);
if (this->pcpu_refcnt == NULL) {
kfree(this);
@@ -47,7 +45,6 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
caif_assert(offsetof(struct cffrml, layer) == 0);
- memset(this, 0, sizeof(struct cflayer));
this->layer.receive = cffrml_receive;
this->layer.transmit = cffrml_transmit;
this->layer.ctrlcmd = cffrml_ctrlcmd;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index c23979e79df..b36f24a4c8e 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -108,7 +108,7 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
int idx = phyid % DN_CACHE_SIZE;
spin_lock_bh(&muxl->transmit_lock);
- rcu_assign_pointer(muxl->dn_cache[idx], NULL);
+ RCU_INIT_POINTER(muxl->dn_cache[idx], NULL);
dn = get_from_id(&muxl->frml_list, phyid);
if (dn == NULL)
goto out;
@@ -164,7 +164,7 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
if (up == NULL)
goto out;
- rcu_assign_pointer(muxl->up_cache[idx], NULL);
+ RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
list_del_rcu(&up->node);
out:
spin_unlock_bh(&muxl->receive_lock);
@@ -261,7 +261,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
idx = layer->id % UP_CACHE_SIZE;
spin_lock_bh(&muxl->receive_lock);
- rcu_assign_pointer(muxl->up_cache[idx], NULL);
+ RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
list_del_rcu(&layer->node);
spin_unlock_bh(&muxl->receive_lock);
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 0deabb44005..81660f80971 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -46,13 +46,10 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
int mtu_size)
{
int tmp;
- struct cfrfml *this =
- kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
+ struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ if (!this)
return NULL;
- }
cfsrvl_init(&this->serv, channel_id, dev_info, false);
this->serv.release = cfrfml_release;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 2715c84cfa8..797c8d16599 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -33,13 +33,10 @@ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
struct cflayer *cfserl_create(int type, int instance, bool use_stx)
{
- struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
+ if (!this)
return NULL;
- }
caif_assert(offsetof(struct cfserl, layer) == 0);
- memset(this, 0, sizeof(struct cfserl));
this->layer.receive = cfserl_receive;
this->layer.transmit = cfserl_transmit;
this->layer.ctrlcmd = cfserl_ctrlcmd;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 535a1e72b36..b99f5b22689 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -108,10 +108,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
struct caif_payload_info *info;
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pr_err("Packet is erroneous!\n");
@@ -130,10 +128,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("Packet is erroneous!\n");
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 98e027db18e..53e49f3e3af 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -26,13 +26,10 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!util) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *util = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!util)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(util, 0, sizeof(struct cfsrvl));
cfsrvl_init(util, channel_id, dev_info, true);
util->layer.receive = cfutill_receive;
util->layer.transmit = cfutill_transmit;
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3ec83fbc288..910ab0661f6 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -25,13 +25,10 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!vei) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *vei = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vei)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(vei, 0, sizeof(struct cfsrvl));
cfsrvl_init(vei, channel_id, dev_info, true);
vei->layer.receive = cfvei_receive;
vei->layer.transmit = cfvei_transmit;
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index b2f5989ad45..e3f37db40ac 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -21,14 +21,11 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!vid) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *vid = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vid)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(vid, 0, sizeof(struct cfsrvl));
cfsrvl_init(vid, channel_id, dev_info, false);
vid->layer.receive = cfvidl_receive;
vid->layer.transmit = cfvidl_transmit;
diff --git a/net/can/Kconfig b/net/can/Kconfig
index 89395b2c8bc..03200699d27 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -40,5 +40,16 @@ config CAN_BCM
CAN messages are used on the bus (e.g. in automotive environments).
To use the Broadcast Manager, use AF_CAN with protocol CAN_BCM.
+config CAN_GW
+ tristate "CAN Gateway/Router (with netlink configuration)"
+ depends on CAN
+ default N
+ ---help---
+ The CAN Gateway/Router is used to route (and modify) CAN frames.
+ It is based on the PF_CAN core infrastructure for msg filtering and
+ msg sending and can optionally modify routed CAN frames on the fly.
+ CAN frames can be routed between CAN network interfaces (one hop).
+ They can be modified with AND/OR/XOR/SET operations as configured
+ by the netlink configuration interface known e.g. from iptables.
source "drivers/net/can/Kconfig"
diff --git a/net/can/Makefile b/net/can/Makefile
index 2d3894b3274..cef49eb1f5c 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -10,3 +10,6 @@ can-raw-y := raw.o
obj-$(CONFIG_CAN_BCM) += can-bcm.o
can-bcm-y := bcm.o
+
+obj-$(CONFIG_CAN_GW) += can-gw.o
+can-gw-y := gw.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 9b0c32a2690..0ce2ad0696d 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -38,8 +38,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
@@ -719,7 +717,7 @@ int can_proto_register(const struct can_proto *cp)
proto);
err = -EBUSY;
} else
- rcu_assign_pointer(proto_tab[proto], cp);
+ RCU_INIT_POINTER(proto_tab[proto], cp);
mutex_unlock(&proto_tab_lock);
@@ -740,7 +738,7 @@ void can_proto_unregister(const struct can_proto *cp)
mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[proto] != cp);
- rcu_assign_pointer(proto_tab[proto], NULL);
+ RCU_INIT_POINTER(proto_tab[proto], NULL);
mutex_unlock(&proto_tab_lock);
synchronize_rcu();
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 34253b84e30..fd882dbadad 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -35,8 +35,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef AF_CAN_H
diff --git a/net/can/bcm.c b/net/can/bcm.c
index c84963d2dee..151b7730c12 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/net/can/gw.c b/net/can/gw.c
new file mode 100644
index 00000000000..3d79b127881
--- /dev/null
+++ b/net/can/gw.c
@@ -0,0 +1,957 @@
+/*
+ * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
+ *
+ * Copyright (c) 2011 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/core.h>
+#include <linux/can/gw.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#define CAN_GW_VERSION "20101209"
+static __initdata const char banner[] =
+ KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
+
+MODULE_DESCRIPTION("PF_CAN netlink gateway");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+MODULE_ALIAS("can-gw");
+
+HLIST_HEAD(cgw_list);
+static struct notifier_block notifier;
+
+static struct kmem_cache *cgw_cache __read_mostly;
+
+/* structure that contains the (on-the-fly) CAN frame modifications */
+struct cf_mod {
+ struct {
+ struct can_frame and;
+ struct can_frame or;
+ struct can_frame xor;
+ struct can_frame set;
+ } modframe;
+ struct {
+ u8 and;
+ u8 or;
+ u8 xor;
+ u8 set;
+ } modtype;
+ void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf,
+ struct cf_mod *mod);
+
+ /* CAN frame checksum calculation after CAN frame modifications */
+ struct {
+ struct cgw_csum_xor xor;
+ struct cgw_csum_crc8 crc8;
+ } csum;
+ struct {
+ void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
+ void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
+ } csumfunc;
+};
+
+
+/*
+ * So far we just support CAN -> CAN routing and frame modifications.
+ *
+ * The internal can_can_gw structure contains data and attributes for
+ * a CAN -> CAN gateway job.
+ */
+struct can_can_gw {
+ struct can_filter filter;
+ int src_idx;
+ int dst_idx;
+};
+
+/* list entry for CAN gateways jobs */
+struct cgw_job {
+ struct hlist_node list;
+ struct rcu_head rcu;
+ u32 handled_frames;
+ u32 dropped_frames;
+ struct cf_mod mod;
+ union {
+ /* CAN frame data source */
+ struct net_device *dev;
+ } src;
+ union {
+ /* CAN frame data destination */
+ struct net_device *dev;
+ } dst;
+ union {
+ struct can_can_gw ccgw;
+ /* tbc */
+ };
+ u8 gwtype;
+ u16 flags;
+};
+
+/* modification functions that are invoked in the hot path in can_can_gw_rcv */
+
+#define MODFUNC(func, op) static void func(struct can_frame *cf, \
+ struct cf_mod *mod) { op ; }
+
+MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
+MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc)
+MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
+MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
+MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc)
+MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
+MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
+MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc)
+MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
+MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
+MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc)
+MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
+
+static inline void canframecpy(struct can_frame *dst, struct can_frame *src)
+{
+ /*
+ * Copy the struct members separately to ensure that no uninitialized
+ * data are copied in the 3 bytes hole of the struct. This is needed
+ * to make easy compares of the data in the struct cf_mod.
+ */
+
+ dst->can_id = src->can_id;
+ dst->can_dlc = src->can_dlc;
+ *(u64 *)dst->data = *(u64 *)src->data;
+}
+
+static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re)
+{
+ /*
+ * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
+ * relative to received dlc -1 .. -8 :
+ * e.g. for received dlc = 8
+ * -1 => index = 7 (data[7])
+ * -3 => index = 5 (data[5])
+ * -8 => index = 0 (data[0])
+ */
+
+ if (fr > -9 && fr < 8 &&
+ to > -9 && to < 8 &&
+ re > -9 && re < 8)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static inline int calc_idx(int idx, int rx_dlc)
+{
+ if (idx < 0)
+ return rx_dlc + idx;
+ else
+ return idx;
+}
+
+static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor)
+{
+ int from = calc_idx(xor->from_idx, cf->can_dlc);
+ int to = calc_idx(xor->to_idx, cf->can_dlc);
+ int res = calc_idx(xor->result_idx, cf->can_dlc);
+ u8 val = xor->init_xor_val;
+ int i;
+
+ if (from < 0 || to < 0 || res < 0)
+ return;
+
+ if (from <= to) {
+ for (i = from; i <= to; i++)
+ val ^= cf->data[i];
+ } else {
+ for (i = from; i >= to; i--)
+ val ^= cf->data[i];
+ }
+
+ cf->data[res] = val;
+}
+
+static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor)
+{
+ u8 val = xor->init_xor_val;
+ int i;
+
+ for (i = xor->from_idx; i <= xor->to_idx; i++)
+ val ^= cf->data[i];
+
+ cf->data[xor->result_idx] = val;
+}
+
+static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor)
+{
+ u8 val = xor->init_xor_val;
+ int i;
+
+ for (i = xor->from_idx; i >= xor->to_idx; i--)
+ val ^= cf->data[i];
+
+ cf->data[xor->result_idx] = val;
+}
+
+static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
+{
+ int from = calc_idx(crc8->from_idx, cf->can_dlc);
+ int to = calc_idx(crc8->to_idx, cf->can_dlc);
+ int res = calc_idx(crc8->result_idx, cf->can_dlc);
+ u8 crc = crc8->init_crc_val;
+ int i;
+
+ if (from < 0 || to < 0 || res < 0)
+ return;
+
+ if (from <= to) {
+ for (i = crc8->from_idx; i <= crc8->to_idx; i++)
+ crc = crc8->crctab[crc^cf->data[i]];
+ } else {
+ for (i = crc8->from_idx; i >= crc8->to_idx; i--)
+ crc = crc8->crctab[crc^cf->data[i]];
+ }
+
+ switch (crc8->profile) {
+
+ case CGW_CRC8PRF_1U8:
+ crc = crc8->crctab[crc^crc8->profile_data[0]];
+ break;
+
+ case CGW_CRC8PRF_16U8:
+ crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
+ break;
+
+ case CGW_CRC8PRF_SFFID_XOR:
+ crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
+ (cf->can_id >> 8 & 0xFF)];
+ break;
+
+ }
+
+ cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
+}
+
+static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
+{
+ u8 crc = crc8->init_crc_val;
+ int i;
+
+ for (i = crc8->from_idx; i <= crc8->to_idx; i++)
+ crc = crc8->crctab[crc^cf->data[i]];
+
+ switch (crc8->profile) {
+
+ case CGW_CRC8PRF_1U8:
+ crc = crc8->crctab[crc^crc8->profile_data[0]];
+ break;
+
+ case CGW_CRC8PRF_16U8:
+ crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
+ break;
+
+ case CGW_CRC8PRF_SFFID_XOR:
+ crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
+ (cf->can_id >> 8 & 0xFF)];
+ break;
+ }
+
+ cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
+}
+
+static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
+{
+ u8 crc = crc8->init_crc_val;
+ int i;
+
+ for (i = crc8->from_idx; i >= crc8->to_idx; i--)
+ crc = crc8->crctab[crc^cf->data[i]];
+
+ switch (crc8->profile) {
+
+ case CGW_CRC8PRF_1U8:
+ crc = crc8->crctab[crc^crc8->profile_data[0]];
+ break;
+
+ case CGW_CRC8PRF_16U8:
+ crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
+ break;
+
+ case CGW_CRC8PRF_SFFID_XOR:
+ crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
+ (cf->can_id >> 8 & 0xFF)];
+ break;
+ }
+
+ cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
+}
+
+/* the receive & process & send function */
+static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+{
+ struct cgw_job *gwj = (struct cgw_job *)data;
+ struct can_frame *cf;
+ struct sk_buff *nskb;
+ int modidx = 0;
+
+ /* do not handle already routed frames - see comment below */
+ if (skb_mac_header_was_set(skb))
+ return;
+
+ if (!(gwj->dst.dev->flags & IFF_UP)) {
+ gwj->dropped_frames++;
+ return;
+ }
+
+ /*
+ * clone the given skb, which has not been done in can_rcv()
+ *
+ * When there is at least one modification function activated,
+ * we need to copy the skb as we want to modify skb->data.
+ */
+ if (gwj->mod.modfunc[0])
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ else
+ nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!nskb) {
+ gwj->dropped_frames++;
+ return;
+ }
+
+ /*
+ * Mark routed frames by setting some mac header length which is
+ * not relevant for the CAN frames located in the skb->data section.
+ *
+ * As dev->header_ops is not set in CAN netdevices no one is ever
+ * accessing the various header offsets in the CAN skbuffs anyway.
+ * E.g. using the packet socket to read CAN frames is still working.
+ */
+ skb_set_mac_header(nskb, 8);
+ nskb->dev = gwj->dst.dev;
+
+ /* pointer to modifiable CAN frame */
+ cf = (struct can_frame *)nskb->data;
+
+ /* perform preprocessed modification functions if there are any */
+ while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+ (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
+
+ /* check for checksum updates when the CAN frame has been modified */
+ if (modidx) {
+ if (gwj->mod.csumfunc.crc8)
+ (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+
+ if (gwj->mod.csumfunc.xor)
+ (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ }
+
+ /* clear the skb timestamp if not configured the other way */
+ if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
+ nskb->tstamp.tv64 = 0;
+
+ /* send to netdevice */
+ if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
+ gwj->dropped_frames++;
+ else
+ gwj->handled_frames++;
+}
+
+static inline int cgw_register_filter(struct cgw_job *gwj)
+{
+ return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+ gwj->ccgw.filter.can_mask, can_can_gw_rcv,
+ gwj, "gw");
+}
+
+static inline void cgw_unregister_filter(struct cgw_job *gwj)
+{
+ can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
+ gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
+}
+
+static int cgw_notifier(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+
+ if (msg == NETDEV_UNREGISTER) {
+
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n, *nx;
+
+ ASSERT_RTNL();
+
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+
+ if (gwj->src.dev == dev || gwj->dst.dev == dev) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+ kfree(gwj);
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
+{
+ struct cgw_frame_mod mb;
+ struct rtcanmsg *rtcan;
+ struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ rtcan = nlmsg_data(nlh);
+ rtcan->can_family = AF_CAN;
+ rtcan->gwtype = gwj->gwtype;
+ rtcan->flags = gwj->flags;
+
+ /* add statistics if available */
+
+ if (gwj->handled_frames) {
+ if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+ }
+
+ if (gwj->dropped_frames) {
+ if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+ }
+
+ /* check non default settings of attributes */
+
+ if (gwj->mod.modtype.and) {
+ memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.and;
+ if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.modtype.or) {
+ memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.or;
+ if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.modtype.xor) {
+ memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.xor;
+ if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.modtype.set) {
+ memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.set;
+ if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.csumfunc.crc8) {
+ if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
+ &gwj->mod.csum.crc8) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + \
+ NLA_ALIGN(CGW_CS_CRC8_LEN);
+ }
+
+ if (gwj->mod.csumfunc.xor) {
+ if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
+ &gwj->mod.csum.xor) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + \
+ NLA_ALIGN(CGW_CS_XOR_LEN);
+ }
+
+ if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
+
+ if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
+ if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
+ &gwj->ccgw.filter) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN +
+ NLA_ALIGN(sizeof(struct can_filter));
+ }
+
+ if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+
+ if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+ }
+
+ return skb->len;
+
+cancel:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
+static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n;
+ int idx = 0;
+ int s_idx = cb->args[0];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
+ if (idx < s_idx)
+ goto cont;
+
+ if (cgw_put_job(skb, gwj) < 0)
+ break;
+cont:
+ idx++;
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
+/* check for common and gwtype specific attributes */
+static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
+ u8 gwtype, void *gwtypeattr)
+{
+ struct nlattr *tb[CGW_MAX+1];
+ struct cgw_frame_mod mb;
+ int modidx = 0;
+ int err = 0;
+
+ /* initialize modification & checksum data space */
+ memset(mod, 0, sizeof(*mod));
+
+ err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
+ if (err < 0)
+ return err;
+
+ /* check for AND/OR/XOR/SET modifications */
+
+ if (tb[CGW_MOD_AND] &&
+ nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.and, &mb.cf);
+ mod->modtype.and = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_and_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_and_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_and_data;
+ }
+
+ if (tb[CGW_MOD_OR] &&
+ nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.or, &mb.cf);
+ mod->modtype.or = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_or_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_or_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_or_data;
+ }
+
+ if (tb[CGW_MOD_XOR] &&
+ nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.xor, &mb.cf);
+ mod->modtype.xor = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_xor_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_xor_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_xor_data;
+ }
+
+ if (tb[CGW_MOD_SET] &&
+ nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.set, &mb.cf);
+ mod->modtype.set = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_set_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_set_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_set_data;
+ }
+
+ /* check for checksum operations after CAN frame modifications */
+ if (modidx) {
+
+ if (tb[CGW_CS_CRC8] &&
+ nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
+
+ struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
+ nla_data(tb[CGW_CS_CRC8]);
+
+ err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
+ c->result_idx);
+ if (err)
+ return err;
+
+ nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
+ CGW_CS_CRC8_LEN);
+
+ /*
+ * select dedicated processing function to reduce
+ * runtime operations in receive hot path.
+ */
+ if (c->from_idx < 0 || c->to_idx < 0 ||
+ c->result_idx < 0)
+ mod->csumfunc.crc8 = cgw_csum_crc8_rel;
+ else if (c->from_idx <= c->to_idx)
+ mod->csumfunc.crc8 = cgw_csum_crc8_pos;
+ else
+ mod->csumfunc.crc8 = cgw_csum_crc8_neg;
+ }
+
+ if (tb[CGW_CS_XOR] &&
+ nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
+
+ struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
+ nla_data(tb[CGW_CS_XOR]);
+
+ err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
+ c->result_idx);
+ if (err)
+ return err;
+
+ nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
+ CGW_CS_XOR_LEN);
+
+ /*
+ * select dedicated processing function to reduce
+ * runtime operations in receive hot path.
+ */
+ if (c->from_idx < 0 || c->to_idx < 0 ||
+ c->result_idx < 0)
+ mod->csumfunc.xor = cgw_csum_xor_rel;
+ else if (c->from_idx <= c->to_idx)
+ mod->csumfunc.xor = cgw_csum_xor_pos;
+ else
+ mod->csumfunc.xor = cgw_csum_xor_neg;
+ }
+ }
+
+ if (gwtype == CGW_TYPE_CAN_CAN) {
+
+ /* check CGW_TYPE_CAN_CAN specific attributes */
+
+ struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
+ memset(ccgw, 0, sizeof(*ccgw));
+
+ /* check for can_filter in attributes */
+ if (tb[CGW_FILTER] &&
+ nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
+ nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
+ sizeof(struct can_filter));
+
+ err = -ENODEV;
+
+ /* specifying two interfaces is mandatory */
+ if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
+ return err;
+
+ if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
+ nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
+ sizeof(u32));
+
+ if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
+ nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
+ sizeof(u32));
+
+ /* both indices set to 0 for flushing all routing entries */
+ if (!ccgw->src_idx && !ccgw->dst_idx)
+ return 0;
+
+ /* only one index set to 0 is an error */
+ if (!ccgw->src_idx || !ccgw->dst_idx)
+ return err;
+ }
+
+ /* add the checks for other gwtypes here */
+
+ return 0;
+}
+
+static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ void *arg)
+{
+ struct rtcanmsg *r;
+ struct cgw_job *gwj;
+ int err = 0;
+
+ if (nlmsg_len(nlh) < sizeof(*r))
+ return -EINVAL;
+
+ r = nlmsg_data(nlh);
+ if (r->can_family != AF_CAN)
+ return -EPFNOSUPPORT;
+
+ /* so far we only support CAN -> CAN routings */
+ if (r->gwtype != CGW_TYPE_CAN_CAN)
+ return -EINVAL;
+
+ gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
+ if (!gwj)
+ return -ENOMEM;
+
+ gwj->handled_frames = 0;
+ gwj->dropped_frames = 0;
+ gwj->flags = r->flags;
+ gwj->gwtype = r->gwtype;
+
+ err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
+ if (err < 0)
+ goto out;
+
+ err = -ENODEV;
+
+ /* ifindex == 0 is not allowed for job creation */
+ if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
+ goto out;
+
+ gwj->src.dev = dev_get_by_index(&init_net, gwj->ccgw.src_idx);
+
+ if (!gwj->src.dev)
+ goto out;
+
+ /* check for CAN netdev not using header_ops - see gw_rcv() */
+ if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
+ goto put_src_out;
+
+ gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
+
+ if (!gwj->dst.dev)
+ goto put_src_out;
+
+ /* check for CAN netdev not using header_ops - see gw_rcv() */
+ if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
+ goto put_src_dst_out;
+
+ ASSERT_RTNL();
+
+ err = cgw_register_filter(gwj);
+ if (!err)
+ hlist_add_head_rcu(&gwj->list, &cgw_list);
+
+put_src_dst_out:
+ dev_put(gwj->dst.dev);
+put_src_out:
+ dev_put(gwj->src.dev);
+out:
+ if (err)
+ kmem_cache_free(cgw_cache, gwj);
+
+ return err;
+}
+
+static void cgw_remove_all_jobs(void)
+{
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n, *nx;
+
+ ASSERT_RTNL();
+
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+ kfree(gwj);
+ }
+}
+
+static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n, *nx;
+ struct rtcanmsg *r;
+ struct cf_mod mod;
+ struct can_can_gw ccgw;
+ int err = 0;
+
+ if (nlmsg_len(nlh) < sizeof(*r))
+ return -EINVAL;
+
+ r = nlmsg_data(nlh);
+ if (r->can_family != AF_CAN)
+ return -EPFNOSUPPORT;
+
+ /* so far we only support CAN -> CAN routings */
+ if (r->gwtype != CGW_TYPE_CAN_CAN)
+ return -EINVAL;
+
+ err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
+ if (err < 0)
+ return err;
+
+ /* two interface indices both set to 0 => remove all entries */
+ if (!ccgw.src_idx && !ccgw.dst_idx) {
+ cgw_remove_all_jobs();
+ return 0;
+ }
+
+ err = -EINVAL;
+
+ ASSERT_RTNL();
+
+ /* remove only the first matching entry */
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+
+ if (gwj->flags != r->flags)
+ continue;
+
+ if (memcmp(&gwj->mod, &mod, sizeof(mod)))
+ continue;
+
+ /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
+ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+ continue;
+
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+ kfree(gwj);
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+static __init int cgw_module_init(void)
+{
+ printk(banner);
+
+ cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
+ 0, 0, NULL);
+
+ if (!cgw_cache)
+ return -ENOMEM;
+
+ /* set notifier */
+ notifier.notifier_call = cgw_notifier;
+ register_netdevice_notifier(&notifier);
+
+ if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
+ unregister_netdevice_notifier(&notifier);
+ kmem_cache_destroy(cgw_cache);
+ return -ENOBUFS;
+ }
+
+ /* Only the first call to __rtnl_register can fail */
+ __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL);
+ __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL);
+
+ return 0;
+}
+
+static __exit void cgw_module_exit(void)
+{
+ rtnl_unregister_all(PF_CAN);
+
+ unregister_netdevice_notifier(&notifier);
+
+ rtnl_lock();
+ cgw_remove_all_jobs();
+ rtnl_unlock();
+
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+
+ kmem_cache_destroy(cgw_cache);
+}
+
+module_init(cgw_module_init);
+module_exit(cgw_module_exit);
diff --git a/net/can/proc.c b/net/can/proc.c
index 0016f733969..ba873c36d2f 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/net/can/raw.c b/net/can/raw.c
index dea99a6e596..cde1b4a20f7 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 18ac112ea7a..68bbf9f65cb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -324,15 +324,15 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
@@ -410,15 +410,15 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
@@ -500,15 +500,15 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
@@ -585,16 +585,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
__wsum csum2;
int err = 0;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
diff --git a/net/core/dev.c b/net/core/dev.c
index b10ff0a7185..edcf019c056 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,6 +133,10 @@
#include <linux/pci.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/net_tstamp.h>
#include "net-sysfs.h"
@@ -1474,6 +1478,57 @@ static inline void net_timestamp_check(struct sk_buff *skb)
__net_timestamp(skb);
}
+static int net_hwtstamp_validate(struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ enum hwtstamp_tx_types tx_type;
+ enum hwtstamp_rx_filters rx_filter;
+ int tx_type_valid = 0;
+ int rx_filter_valid = 0;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ tx_type = cfg.tx_type;
+ rx_filter = cfg.rx_filter;
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ tx_type_valid = 1;
+ break;
+ }
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ rx_filter_valid = 1;
+ break;
+ }
+
+ if (!tx_type_valid || !rx_filter_valid)
+ return -ERANGE;
+
+ return 0;
+}
+
static inline bool is_skb_forwardable(struct net_device *dev,
struct sk_buff *skb)
{
@@ -1955,9 +2010,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
#ifdef CONFIG_HIGHMEM
int i;
if (!(dev->features & NETIF_F_HIGHDMA)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ if (PageHighMem(skb_frag_page(frag)))
return 1;
+ }
}
if (PCI_DMA_BUS_IS_PHYS) {
@@ -1966,7 +2023,8 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
if (!pdev)
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t addr = page_to_phys(skb_frag_page(frag));
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1;
}
@@ -2527,25 +2585,31 @@ static inline void ____napi_schedule(struct softnet_data *sd,
/*
* __skb_get_rxhash: calculate a flow hash based on src/dst addresses
- * and src/dst port numbers. Returns a non-zero hash number on success
- * and 0 on failure.
+ * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
+ * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
+ * if hash is a canonical 4-tuple hash over transport ports.
*/
-__u32 __skb_get_rxhash(struct sk_buff *skb)
+void __skb_get_rxhash(struct sk_buff *skb)
{
int nhoff, hash = 0, poff;
const struct ipv6hdr *ip6;
const struct iphdr *ip;
+ const struct vlan_hdr *vlan;
u8 ip_proto;
- u32 addr1, addr2, ihl;
+ u32 addr1, addr2;
+ u16 proto;
union {
u32 v32;
u16 v16[2];
} ports;
nhoff = skb_network_offset(skb);
+ proto = skb->protocol;
- switch (skb->protocol) {
+again:
+ switch (proto) {
case __constant_htons(ETH_P_IP):
+ip:
if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
goto done;
@@ -2556,9 +2620,10 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
ip_proto = ip->protocol;
addr1 = (__force u32) ip->saddr;
addr2 = (__force u32) ip->daddr;
- ihl = ip->ihl;
+ nhoff += ip->ihl * 4;
break;
case __constant_htons(ETH_P_IPV6):
+ipv6:
if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
goto done;
@@ -2566,20 +2631,71 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
ip_proto = ip6->nexthdr;
addr1 = (__force u32) ip6->saddr.s6_addr32[3];
addr2 = (__force u32) ip6->daddr.s6_addr32[3];
- ihl = (40 >> 2);
+ nhoff += 40;
break;
+ case __constant_htons(ETH_P_8021Q):
+ if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
+ goto done;
+ vlan = (const struct vlan_hdr *) (skb->data + nhoff);
+ proto = vlan->h_vlan_encapsulated_proto;
+ nhoff += sizeof(*vlan);
+ goto again;
+ case __constant_htons(ETH_P_PPP_SES):
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
+ goto done;
+ proto = *((__be16 *) (skb->data + nhoff +
+ sizeof(struct pppoe_hdr)));
+ nhoff += PPPOE_SES_HLEN;
+ switch (proto) {
+ case __constant_htons(PPP_IP):
+ goto ip;
+ case __constant_htons(PPP_IPV6):
+ goto ipv6;
+ default:
+ goto done;
+ }
default:
goto done;
}
+ switch (ip_proto) {
+ case IPPROTO_GRE:
+ if (pskb_may_pull(skb, nhoff + 16)) {
+ u8 *h = skb->data + nhoff;
+ __be16 flags = *(__be16 *)h;
+
+ /*
+ * Only look inside GRE if version zero and no
+ * routing
+ */
+ if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
+ proto = *(__be16 *)(h + 2);
+ nhoff += 4;
+ if (flags & GRE_CSUM)
+ nhoff += 4;
+ if (flags & GRE_KEY)
+ nhoff += 4;
+ if (flags & GRE_SEQ)
+ nhoff += 4;
+ goto again;
+ }
+ }
+ break;
+ case IPPROTO_IPIP:
+ goto again;
+ default:
+ break;
+ }
+
ports.v32 = 0;
poff = proto_ports_offset(ip_proto);
if (poff >= 0) {
- nhoff += ihl * 4 + poff;
+ nhoff += poff;
if (pskb_may_pull(skb, nhoff + 4)) {
ports.v32 = * (__force u32 *) (skb->data + nhoff);
if (ports.v16[1] < ports.v16[0])
swap(ports.v16[0], ports.v16[1]);
+ skb->l4_rxhash = 1;
}
}
@@ -2592,7 +2708,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
hash = 1;
done:
- return hash;
+ skb->rxhash = hash;
}
EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2606,10 +2722,7 @@ static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
- u16 tcpu;
-
- tcpu = rflow->cpu = next_cpu;
- if (tcpu != RPS_NO_CPU) {
+ if (next_cpu != RPS_NO_CPU) {
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
@@ -2637,16 +2750,16 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
goto out;
old_rflow = rflow;
rflow = &flow_table->flows[flow_id];
- rflow->cpu = next_cpu;
rflow->filter = rc;
if (old_rflow->filter == rflow->filter)
old_rflow->filter = RPS_NO_FILTER;
out:
#endif
rflow->last_qtail =
- per_cpu(softnet_data, tcpu).input_queue_head;
+ per_cpu(softnet_data, next_cpu).input_queue_head;
}
+ rflow->cpu = next_cpu;
return rflow;
}
@@ -2681,13 +2794,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
map = rcu_dereference(rxqueue->rps_map);
if (map) {
if (map->len == 1 &&
- !rcu_dereference_raw(rxqueue->rps_flow_table)) {
+ !rcu_access_pointer(rxqueue->rps_flow_table)) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
goto done;
}
- } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
+ } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
goto done;
}
@@ -3102,8 +3215,8 @@ void netdev_rx_handler_unregister(struct net_device *dev)
{
ASSERT_RTNL();
- rcu_assign_pointer(dev->rx_handler, NULL);
- rcu_assign_pointer(dev->rx_handler_data, NULL);
+ RCU_INIT_POINTER(dev->rx_handler, NULL);
+ RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@ -3170,6 +3283,17 @@ another_round:
ncls:
#endif
+ if (vlan_tx_tag_present(skb)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+ if (vlan_do_receive(&skb))
+ goto another_round;
+ else if (unlikely(!skb))
+ goto out;
+ }
+
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
if (pt_prev) {
@@ -3190,18 +3314,6 @@ ncls:
}
}
- if (vlan_tx_tag_present(skb)) {
- if (pt_prev) {
- ret = deliver_skb(skb, pt_prev, orig_dev);
- pt_prev = NULL;
- }
- if (vlan_do_receive(&skb)) {
- ret = __netif_receive_skb(skb);
- goto out;
- } else if (unlikely(!skb))
- goto out;
- }
-
/* deliver only exact match when indicated */
null_or_dev = deliver_exact ? skb->dev : NULL;
@@ -3429,10 +3541,10 @@ pull:
skb->data_len -= grow;
skb_shinfo(skb)->frags[0].page_offset += grow;
- skb_shinfo(skb)->frags[0].size -= grow;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
- if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
- put_page(skb_shinfo(skb)->frags[0].page);
+ if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
+ skb_frag_unref(skb, 0);
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -3496,11 +3608,10 @@ void skb_gro_reset_offset(struct sk_buff *skb)
NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail &&
- !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
+ !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
NAPI_GRO_CB(skb)->frag0 =
- page_address(skb_shinfo(skb)->frags[0].page) +
- skb_shinfo(skb)->frags[0].page_offset;
- NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
+ skb_frag_address(&skb_shinfo(skb)->frags[0]);
+ NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
}
}
EXPORT_SYMBOL(skb_gro_reset_offset);
@@ -3982,6 +4093,60 @@ static int dev_ifconf(struct net *net, char __user *arg)
}
#ifdef CONFIG_PROC_FS
+
+#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
+
+struct dev_iter_state {
+ struct seq_net_private p;
+ unsigned int pos; /* bucket << BUCKET_SPACE + offset */
+};
+
+#define get_bucket(x) ((x) >> BUCKET_SPACE)
+#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
+#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
+
+static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
+{
+ struct dev_iter_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct net_device *dev;
+ struct hlist_node *p;
+ struct hlist_head *h;
+ unsigned int count, bucket, offset;
+
+ bucket = get_bucket(state->pos);
+ offset = get_offset(state->pos);
+ h = &net->dev_name_head[bucket];
+ count = 0;
+ hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
+ if (count++ == offset) {
+ state->pos = set_bucket_offset(bucket, count);
+ return dev;
+ }
+ }
+
+ return NULL;
+}
+
+static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
+{
+ struct dev_iter_state *state = seq->private;
+ struct net_device *dev;
+ unsigned int bucket;
+
+ bucket = get_bucket(state->pos);
+ do {
+ dev = dev_from_same_bucket(seq);
+ if (dev)
+ return dev;
+
+ bucket++;
+ state->pos = set_bucket_offset(bucket, 0);
+ } while (bucket < NETDEV_HASHENTRIES);
+
+ return NULL;
+}
+
/*
* This is invoked by the /proc filesystem handler to display a device
* in detail.
@@ -3989,33 +4154,33 @@ static int dev_ifconf(struct net *net, char __user *arg)
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct net *net = seq_file_net(seq);
- loff_t off;
- struct net_device *dev;
+ struct dev_iter_state *state = seq->private;
rcu_read_lock();
if (!*pos)
return SEQ_START_TOKEN;
- off = 1;
- for_each_netdev_rcu(net, dev)
- if (off++ == *pos)
- return dev;
+ /* check for end of the hash */
+ if (state->pos == 0 && *pos > 1)
+ return NULL;
- return NULL;
+ return dev_from_new_bucket(seq);
}
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev = v;
+ struct net_device *dev;
+
+ ++*pos;
if (v == SEQ_START_TOKEN)
- dev = first_net_device_rcu(seq_file_net(seq));
- else
- dev = next_net_device_rcu(dev);
+ return dev_from_new_bucket(seq);
- ++*pos;
- return dev;
+ dev = dev_from_same_bucket(seq);
+ if (dev)
+ return dev;
+
+ return dev_from_new_bucket(seq);
}
void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4114,7 +4279,7 @@ static const struct seq_operations dev_seq_ops = {
static int dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_seq_ops,
- sizeof(struct seq_net_private));
+ sizeof(struct dev_iter_state));
}
static const struct file_operations dev_seq_fops = {
@@ -4497,9 +4662,7 @@ void __dev_set_rx_mode(struct net_device *dev)
if (!netif_device_present(dev))
return;
- if (ops->ndo_set_rx_mode)
- ops->ndo_set_rx_mode(dev);
- else {
+ if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
@@ -4510,10 +4673,10 @@ void __dev_set_rx_mode(struct net_device *dev)
__dev_set_promiscuity(dev, -1);
dev->uc_promisc = false;
}
-
- if (ops->ndo_set_multicast_list)
- ops->ndo_set_multicast_list(dev);
}
+
+ if (ops->ndo_set_rx_mode)
+ ops->ndo_set_rx_mode(dev);
}
void dev_set_rx_mode(struct net_device *dev)
@@ -4524,30 +4687,6 @@ void dev_set_rx_mode(struct net_device *dev)
}
/**
- * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
- * @dev: device
- * @cmd: memory area for ethtool_ops::get_settings() result
- *
- * The cmd arg is initialized properly (cleared and
- * ethtool_cmd::cmd field set to ETHTOOL_GSET).
- *
- * Return device's ethtool_ops::get_settings() result value or
- * -EOPNOTSUPP when device doesn't expose
- * ethtool_ops::get_settings() operation.
- */
-int dev_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
- return -EOPNOTSUPP;
-
- memset(cmd, 0, sizeof(struct ethtool_cmd));
- cmd->cmd = ETHTOOL_GSET;
- return dev->ethtool_ops->get_settings(dev, cmd);
-}
-EXPORT_SYMBOL(dev_ethtool_get_settings);
-
-/**
* dev_get_flags - get flags reported to userspace
* @dev: device
*
@@ -4863,7 +5002,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EOPNOTSUPP;
case SIOCADDMULTI:
- if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ if (!ops->ndo_set_rx_mode ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
@@ -4871,7 +5010,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCDELMULTI:
- if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ if (!ops->ndo_set_rx_mode ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
@@ -4888,6 +5027,12 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
ifr->ifr_newname[IFNAMSIZ-1] = '\0';
return dev_change_name(dev, ifr->ifr_newname);
+ case SIOCSHWTSTAMP:
+ err = net_hwtstamp_validate(ifr);
+ if (err)
+ return err;
+ /* fall through */
+
/*
* Unknown or private ioctl
*/
@@ -5202,7 +5347,7 @@ static void rollback_registered_many(struct list_head *head)
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
- rcu_barrier();
+ synchronize_net();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
@@ -5715,6 +5860,12 @@ void netdev_run_todo(void)
__rtnl_unlock();
+ /* Wait for rcu callbacks to finish before attempting to drain
+ * the device list. This usually avoids a 250ms wait.
+ */
+ if (!list_empty(&list))
+ rcu_barrier();
+
while (!list_empty(&list)) {
struct net_device *dev
= list_first_entry(&list, struct net_device, todo_list);
@@ -5735,8 +5886,8 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(netdev_refcnt_read(dev));
- WARN_ON(rcu_dereference_raw(dev->ip_ptr));
- WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
+ WARN_ON(rcu_access_pointer(dev->ip_ptr));
+ WARN_ON(rcu_access_pointer(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
if (dev->destructor)
@@ -5940,7 +6091,7 @@ void free_netdev(struct net_device *dev)
kfree(dev->_rx);
#endif
- kfree(rcu_dereference_raw(dev->ingress_queue));
+ kfree(rcu_dereference_protected(dev->ingress_queue, 1));
/* Flush device addresses */
dev_addr_flush(dev);
@@ -6115,6 +6266,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/*
* Flush the unicast and multicast chains
@@ -6298,7 +6450,7 @@ const char *netdev_drivername(const struct net_device *dev)
return empty;
}
-static int __netdev_printk(const char *level, const struct net_device *dev,
+int __netdev_printk(const char *level, const struct net_device *dev,
struct va_format *vaf)
{
int r;
@@ -6313,6 +6465,7 @@ static int __netdev_printk(const char *level, const struct net_device *dev,
return r;
}
+EXPORT_SYMBOL(__netdev_printk);
int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e2e66939ed0..283d1b86387 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -591,8 +591,8 @@ EXPORT_SYMBOL(dev_mc_del_global);
* addresses that have no users left. The source device must be
* locked by netif_tx_lock_bh.
*
- * This function is intended to be called from the dev->set_multicast_list
- * or dev->set_rx_mode function of layered software devices.
+ * This function is intended to be called from the ndo_set_rx_mode
+ * function of layered software devices.
*/
int dev_mc_sync(struct net_device *to, struct net_device *from)
{
diff --git a/net/core/dst.c b/net/core/dst.c
index 14b33baf073..d5e2c4c0910 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst_init_metrics(dst, dst_default_metrics, true);
dst->expires = 0UL;
dst->path = dst;
- dst->_neighbour = NULL;
+ RCU_INIT_POINTER(dst->_neighbour, NULL);
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
@@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
smp_rmb();
again:
- neigh = dst->_neighbour;
+ neigh = rcu_dereference_protected(dst->_neighbour, 1);
child = dst->child;
if (neigh) {
- dst->_neighbour = NULL;
+ RCU_INIT_POINTER(dst->_neighbour, NULL);
neigh_release(neigh);
}
@@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
if (!unregister) {
dst->input = dst->output = dst_discard;
} else {
+ struct neighbour *neigh;
+
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
- if (dst->_neighbour && dst->_neighbour->dev == dev) {
- dst->_neighbour->dev = dst->dev;
+ rcu_read_lock();
+ neigh = dst_get_neighbour(dst);
+ if (neigh && neigh->dev == dev) {
+ neigh->dev = dst->dev;
dev_hold(dst->dev);
dev_put(dev);
}
+ rcu_read_unlock();
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6cdba5fc2be..f4448170712 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -569,15 +569,25 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
return 0;
}
-static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
- int err;
+ ASSERT_RTNL();
- if (!dev->ethtool_ops->get_settings)
+ if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
- err = dev->ethtool_ops->get_settings(dev, &cmd);
+ memset(cmd, 0, sizeof(struct ethtool_cmd));
+ cmd->cmd = ETHTOOL_GSET;
+ return dev->ethtool_ops->get_settings(dev, cmd);
+}
+EXPORT_SYMBOL(__ethtool_get_settings);
+
+static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+{
+ int err;
+ struct ethtool_cmd cmd;
+
+ err = __ethtool_get_settings(dev, &cmd);
if (err < 0)
return err;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3231b468bb7..57e8f95110e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
list_del_rcu(&rule->list);
- if (rule->action == FR_ACT_GOTO)
+ if (rule->action == FR_ACT_GOTO) {
ops->nr_goto_rules--;
+ if (rtnl_dereference(rule->ctarget) == NULL)
+ ops->unresolved_rules--;
+ }
/*
* Check if this rule is a target to any of them. If so,
@@ -487,7 +490,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (ops->nr_goto_rules > 0) {
list_for_each_entry(tmp, &ops->rules_list, list) {
if (rtnl_dereference(tmp->ctarget) == rule) {
- rcu_assign_pointer(tmp->ctarget, NULL);
+ RCU_INIT_POINTER(tmp->ctarget, NULL);
ops->unresolved_rules++;
}
}
@@ -545,7 +548,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->flags = rule->flags;
if (rule->action == FR_ACT_GOTO &&
- rcu_dereference_raw(rule->ctarget) == NULL)
+ rcu_access_pointer(rule->ctarget) == NULL)
frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 36f975fa87c..5dea4527921 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -436,7 +436,7 @@ error:
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
-int sk_chk_filter(struct sock_filter *filter, int flen)
+int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
{
/*
* Valid instructions are initialized to non-0.
@@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk)
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (filter) {
- rcu_assign_pointer(sk->sk_filter, NULL);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
ret = 0;
}
diff --git a/net/core/flow.c b/net/core/flow.c
index 555a456efb0..8ae42de9c79 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -413,7 +413,7 @@ static int __init flow_cache_init(struct flow_cache *fc)
for_each_online_cpu(i) {
if (flow_cache_cpu_prepare(fc, i))
- return -ENOMEM;
+ goto err;
}
fc->hotcpu_notifier = (struct notifier_block){
.notifier_call = flow_cache_cpu,
@@ -426,6 +426,18 @@ static int __init flow_cache_init(struct flow_cache *fc)
add_timer(&fc->rnd_timer);
return 0;
+
+err:
+ for_each_possible_cpu(i) {
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
+ kfree(fcp->hash_table);
+ fcp->hash_table = NULL;
+ }
+
+ free_percpu(fc->percpu);
+ fc->percpu = NULL;
+
+ return -ENOMEM;
}
static int __init flow_cache_init_global(void)
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
index 283c2b993fb..81e1ed7c838 100644
--- a/net/core/kmap_skb.h
+++ b/net/core/kmap_skb.h
@@ -7,7 +7,7 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
local_bh_disable();
#endif
- return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+ return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
}
static inline void kunmap_skb_frag(void *vaddr)
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 357bd4ee4ba..c3519c6d1b1 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -78,8 +78,13 @@ static void rfc2863_policy(struct net_device *dev)
static bool linkwatch_urgent_event(struct net_device *dev)
{
- return netif_running(dev) && netif_carrier_ok(dev) &&
- qdisc_tx_changing(dev);
+ if (!netif_running(dev))
+ return false;
+
+ if (dev->ifindex != dev->iflink)
+ return true;
+
+ return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1334d7e56f0..909ecb3c2a3 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh)
skb_queue_purge(&neigh->arp_queue);
}
+static void neigh_probe(struct neighbour *neigh)
+ __releases(neigh->lock)
+{
+ struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+ /* keep skb alive even if arp_queue overflows */
+ if (skb)
+ skb = skb_copy(skb, GFP_ATOMIC);
+ write_unlock(&neigh->lock);
+ neigh->ops->solicit(neigh, skb);
+ atomic_inc(&neigh->probes);
+ kfree_skb(skb);
+}
+
/* Called when a timer expires for a neighbour entry. */
static void neigh_timer_handler(unsigned long arg)
@@ -920,14 +933,7 @@ static void neigh_timer_handler(unsigned long arg)
neigh_hold(neigh);
}
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
- struct sk_buff *skb = skb_peek(&neigh->arp_queue);
- /* keep skb alive even if arp_queue overflows */
- if (skb)
- skb = skb_copy(skb, GFP_ATOMIC);
- write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
- atomic_inc(&neigh->probes);
- kfree_skb(skb);
+ neigh_probe(neigh);
} else {
out:
write_unlock(&neigh->lock);
@@ -942,7 +948,7 @@ out:
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
int rc;
- unsigned long now;
+ bool immediate_probe = false;
write_lock_bh(&neigh->lock);
@@ -950,14 +956,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
goto out_unlock_bh;
- now = jiffies;
-
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ unsigned long next, now = jiffies;
+
atomic_set(&neigh->probes, neigh->parms->ucast_probes);
neigh->nud_state = NUD_INCOMPLETE;
- neigh->updated = jiffies;
- neigh_add_timer(neigh, now + 1);
+ neigh->updated = now;
+ next = now + max(neigh->parms->retrans_time, HZ/2);
+ neigh_add_timer(neigh, next);
+ immediate_probe = true;
} else {
neigh->nud_state = NUD_FAILED;
neigh->updated = jiffies;
@@ -989,7 +997,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
rc = 1;
}
out_unlock_bh:
- write_unlock_bh(&neigh->lock);
+ if (immediate_probe)
+ neigh_probe(neigh);
+ else
+ write_unlock(&neigh->lock);
+ local_bh_enable();
return rc;
}
EXPORT_SYMBOL(__neigh_event_send);
@@ -1156,10 +1168,14 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n2, *n1 = neigh;
write_unlock_bh(&neigh->lock);
+
+ rcu_read_lock();
/* On shaper/eql skb->dst->neighbour != neigh :( */
if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
n1 = n2;
n1->output(n1, skb);
+ rcu_read_unlock();
+
write_lock_bh(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1683e5db2f2..7604a635376 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -147,7 +147,7 @@ static ssize_t show_speed(struct device *dev,
if (netif_running(netdev)) {
struct ethtool_cmd cmd;
- if (!dev_ethtool_get_settings(netdev, &cmd))
+ if (!__ethtool_get_settings(netdev, &cmd))
ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
}
rtnl_unlock();
@@ -165,7 +165,7 @@ static ssize_t show_duplex(struct device *dev,
if (netif_running(netdev)) {
struct ethtool_cmd cmd;
- if (!dev_ethtool_get_settings(netdev, &cmd))
+ if (!__ethtool_get_settings(netdev, &cmd))
ret = sprintf(buf, "%s\n",
cmd.duplex ? "full" : "half");
}
@@ -712,13 +712,13 @@ static void rx_queue_release(struct kobject *kobj)
struct rps_dev_flow_table *flow_table;
- map = rcu_dereference_raw(queue->rps_map);
+ map = rcu_dereference_protected(queue->rps_map, 1);
if (map) {
RCU_INIT_POINTER(queue->rps_map, NULL);
kfree_rcu(map, rcu);
}
- flow_table = rcu_dereference_raw(queue->rps_flow_table);
+ flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
if (flow_table) {
RCU_INIT_POINTER(queue->rps_flow_table, NULL);
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
@@ -987,10 +987,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
}
if (nonempty)
- rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+ RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
else {
kfree(new_dev_maps);
- rcu_assign_pointer(dev->xps_maps, NULL);
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
}
if (dev_maps)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 52622517e0d..f57d94627a2 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -762,7 +762,7 @@ int __netpoll_setup(struct netpoll *np)
}
/* last thing to do is link it to the net device structure */
- rcu_assign_pointer(ndev->npinfo, npinfo);
+ RCU_INIT_POINTER(ndev->npinfo, npinfo);
return 0;
@@ -903,7 +903,7 @@ void __netpoll_cleanup(struct netpoll *np)
if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(np->dev);
- rcu_assign_pointer(np->dev->npinfo, NULL);
+ RCU_INIT_POINTER(np->dev->npinfo, NULL);
/* avoid racing with NAPI reading npinfo */
synchronize_rcu_bh();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index e35a6fbb811..0001c243b35 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2145,9 +2145,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
}
start_time = ktime_now();
- if (remaining < 100000)
- ndelay(remaining); /* really small just spin */
- else {
+ if (remaining < 100000) {
+ /* for small delays (<100us), just loop until limit is reached */
+ do {
+ end_time = ktime_now();
+ } while (ktime_lt(end_time, spin_until));
+ } else {
/* see do_nanosleep */
hrtimer_init_sleeper(&t, current);
do {
@@ -2162,8 +2165,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
hrtimer_cancel(&t.timer);
} while (t.task && pkt_dev->running && !signal_pending(current));
__set_current_state(TASK_RUNNING);
+ end_time = ktime_now();
}
- end_time = ktime_now();
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
@@ -2602,18 +2605,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
if (!pkt_dev->page)
break;
}
- skb_shinfo(skb)->frags[i].page = pkt_dev->page;
get_page(pkt_dev->page);
+ skb_frag_set_page(skb, i, pkt_dev->page);
skb_shinfo(skb)->frags[i].page_offset = 0;
/*last fragment, fill rest of data*/
if (i == (frags - 1))
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i],
+ (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
else
- skb_shinfo(skb)->frags[i].size = frag_len;
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
+ datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
i++;
skb_shinfo(skb)->nr_frags = i;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 99d9e953fe3..9083e82bdae 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -731,7 +731,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
nla_total_size(sizeof(struct ifla_vf_vlan)) +
- nla_total_size(sizeof(struct ifla_vf_tx_rate)));
+ nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
+ nla_total_size(sizeof(struct ifla_vf_spoofchk)));
return size;
} else
return 0;
@@ -954,13 +955,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_tx_rate vf_tx_rate;
+ struct ifla_vf_spoofchk vf_spoofchk;
+
+ /*
+ * Not all SR-IOV capable drivers support the
+ * spoofcheck query. Preset to -1 so the user
+ * space tool can detect that the driver didn't
+ * report anything.
+ */
+ ivi.spoofchk = -1;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
- vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf;
+ vf_mac.vf =
+ vf_vlan.vf =
+ vf_tx_rate.vf =
+ vf_spoofchk.vf = ivi.vf;
+
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
+ vf_spoofchk.setting = ivi.spoofchk;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
@@ -968,7 +983,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
}
NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
- NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate);
+ NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+ &vf_tx_rate);
+ NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+ &vf_spoofchk);
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
@@ -1202,6 +1220,15 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
ivt->rate);
break;
}
+ case IFLA_VF_SPOOFCHK: {
+ struct ifla_vf_spoofchk *ivs;
+ ivs = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_spoofchk)
+ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+ ivs->setting);
+ break;
+ }
default:
err = -EINVAL;
break;
@@ -1604,7 +1631,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
dev_net_set(dev, net);
dev->rtnl_link_ops = ops;
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
- dev->real_num_tx_queues = real_num_queues;
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
diff --git a/net/core/scm.c b/net/core/scm.c
index 811b53fb330..ff52ad0a515 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -173,7 +173,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
if (err)
goto error;
- if (pid_vnr(p->pid) != p->creds.pid) {
+ if (!p->pid || pid_vnr(p->pid) != p->creds.pid) {
struct pid *pid;
err = -ESRCH;
pid = find_get_pid(p->creds.pid);
@@ -183,8 +183,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
p->pid = pid;
}
- if ((p->cred->euid != p->creds.uid) ||
- (p->cred->egid != p->creds.gid)) {
+ if (!p->cred ||
+ (p->cred->euid != p->creds.uid) ||
+ (p->cred->egid != p->creds.gid)) {
struct cred *cred;
err = -ENOMEM;
cred = prepare_creds();
@@ -193,7 +194,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
cred->uid = cred->euid = p->creds.uid;
cred->gid = cred->egid = p->creds.gid;
- put_cred(p->cred);
+ if (p->cred)
+ put_cred(p->cred);
p->cred = cred;
}
break;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 45329d7c9dd..025233de25f 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq)
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport)
{
u32 secret[MD5_MESSAGE_BYTES / 4];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 387703f56fc..ca4db40e75b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,20 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
goto out;
prefetchw(skb);
- size = SKB_DATA_ALIGN(size);
- data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
- gfp_mask, node);
+ /* We do our best to align skb_shared_info on a separate cache
+ * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
+ * aligned memory blocks, unless SLUB/SLAB debug is enabled.
+ * Both skb->head and skb_shared_info are cache line aligned.
+ */
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc_node_track_caller(size, gfp_mask, node);
if (!data)
goto nodata;
+ /* kmalloc(size) might give us more room than requested.
+ * Put skb_shared_info exactly at the end of allocated zone,
+ * to allow max possible filling before reallocation.
+ */
+ size = SKB_WITH_OVERHEAD(ksize(data));
prefetchw(data + size);
/*
@@ -197,7 +206,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->truesize = size + sizeof(struct sk_buff);
+ /* Account for allocated memory : skb + skb->head */
+ skb->truesize = SKB_TRUESIZE(size);
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -326,7 +336,7 @@ static void skb_release_data(struct sk_buff *skb)
if (skb_shinfo(skb)->nr_frags) {
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_unref(skb, i);
}
/*
@@ -475,6 +485,30 @@ void consume_skb(struct sk_buff *skb)
EXPORT_SYMBOL(consume_skb);
/**
+ * skb_recycle - clean up an skb for reuse
+ * @skb: buffer
+ *
+ * Recycles the skb to be reused as a receive buffer. This
+ * function does any necessary reference count dropping, and
+ * cleans up the skbuff as if it just came from __alloc_skb().
+ */
+void skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+
+ skb_release_head_state(skb);
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(skb);
+}
+EXPORT_SYMBOL(skb_recycle);
+
+/**
* skb_recycle_check - check if skb can be reused for receive
* @skb: buffer
* @skb_size: minimum receive buffer size
@@ -488,33 +522,10 @@ EXPORT_SYMBOL(consume_skb);
*/
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
- struct skb_shared_info *shinfo;
-
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ if (!skb_is_recycleable(skb, skb_size))
return false;
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_pointer(skb) - skb->head < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
- return false;
-
- skb_release_head_state(skb);
-
- shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
- atomic_set(&shinfo->dataref, 1);
-
- memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = skb->head + NET_SKB_PAD;
- skb_reset_tail_pointer(skb);
+ skb_recycle(skb);
return true;
}
@@ -529,6 +540,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->mac_header = old->mac_header;
skb_dst_copy(new, old);
new->rxhash = old->rxhash;
+ new->ooo_okay = old->ooo_okay;
+ new->l4_rxhash = old->l4_rxhash;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
@@ -647,7 +660,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
}
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(page_address(page),
- vaddr + f->page_offset, f->size);
+ vaddr + f->page_offset, skb_frag_size(f));
kunmap_skb_frag(vaddr);
page->private = (unsigned long)head;
head = page;
@@ -655,14 +668,14 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags release userspace buffers */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_unref(skb, i);
uarg->callback(uarg);
/* skb frags point to kernel buffers */
for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
- skb_shinfo(skb)->frags[i - 1].page_offset = 0;
- skb_shinfo(skb)->frags[i - 1].page = head;
+ __skb_fill_page_desc(skb, i-1, head, 0,
+ skb_shinfo(skb)->frags[i - 1].size);
head = (struct page *)head->private;
}
@@ -820,7 +833,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
- get_page(skb_shinfo(n)->frags[i].page);
+ skb_frag_ref(skb, i);
}
skb_shinfo(n)->nr_frags = i;
}
@@ -911,7 +924,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto nofrags;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
@@ -1178,20 +1191,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
goto drop_pages;
for (; i < nfrags; i++) {
- int end = offset + skb_shinfo(skb)->frags[i].size;
+ int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (end < len) {
offset = end;
continue;
}
- skb_shinfo(skb)->frags[i++].size = len - offset;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
drop_pages:
skb_shinfo(skb)->nr_frags = i;
for (; i < nfrags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_unref(skb, i);
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
@@ -1294,9 +1307,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Estimate size of pulled pages. */
eat = delta;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size >= eat)
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (size >= eat)
goto pull_pages;
- eat -= skb_shinfo(skb)->frags[i].size;
+ eat -= size;
}
/* If we need update frag list, we are in troubles.
@@ -1359,14 +1374,16 @@ pull_pages:
eat = delta;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size <= eat) {
- put_page(skb_shinfo(skb)->frags[i].page);
- eat -= skb_shinfo(skb)->frags[i].size;
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (size <= eat) {
+ skb_frag_unref(skb, i);
+ eat -= size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
if (eat) {
skb_shinfo(skb)->frags[k].page_offset += eat;
- skb_shinfo(skb)->frags[k].size -= eat;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
eat = 0;
}
k++;
@@ -1421,7 +1438,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
u8 *vaddr;
@@ -1619,7 +1636,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
- if (__splice_segment(f->page, f->page_offset, f->size,
+ if (__splice_segment(skb_frag_page(f),
+ f->page_offset, skb_frag_size(f),
offset, len, skb, spd, 0, sk, pipe))
return 1;
}
@@ -1729,7 +1747,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
WARN_ON(start > offset + len);
- end = start + frag->size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
@@ -1802,7 +1820,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
@@ -1877,7 +1895,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
@@ -2150,7 +2168,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
skb->data_len = len - pos;
for (i = 0; i < nfrags; i++) {
- int size = skb_shinfo(skb)->frags[i].size;
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (pos + size > len) {
skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2164,10 +2182,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
- get_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_ref(skb, i);
skb_shinfo(skb1)->frags[0].page_offset += len - pos;
- skb_shinfo(skb1)->frags[0].size -= len - pos;
- skb_shinfo(skb)->frags[i].size = len - pos;
+ skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
skb_shinfo(skb)->nr_frags++;
}
k++;
@@ -2239,12 +2257,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
* commit all, so that we don't have to undo partial changes
*/
if (!to ||
- !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
+ !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
+ fragfrom->page_offset)) {
merge = -1;
} else {
merge = to - 1;
- todo -= fragfrom->size;
+ todo -= skb_frag_size(fragfrom);
if (todo < 0) {
if (skb_prepare_for_shift(skb) ||
skb_prepare_for_shift(tgt))
@@ -2254,8 +2273,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[merge];
- fragto->size += shiftlen;
- fragfrom->size -= shiftlen;
+ skb_frag_size_add(fragto, shiftlen);
+ skb_frag_size_sub(fragfrom, shiftlen);
fragfrom->page_offset += shiftlen;
goto onlymerged;
@@ -2279,20 +2298,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[to];
- if (todo >= fragfrom->size) {
+ if (todo >= skb_frag_size(fragfrom)) {
*fragto = *fragfrom;
- todo -= fragfrom->size;
+ todo -= skb_frag_size(fragfrom);
from++;
to++;
} else {
- get_page(fragfrom->page);
+ __skb_frag_ref(fragfrom);
fragto->page = fragfrom->page;
fragto->page_offset = fragfrom->page_offset;
- fragto->size = todo;
+ skb_frag_size_set(fragto, todo);
fragfrom->page_offset += todo;
- fragfrom->size -= todo;
+ skb_frag_size_sub(fragfrom, todo);
todo = 0;
to++;
@@ -2307,8 +2326,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragfrom = &skb_shinfo(skb)->frags[0];
fragto = &skb_shinfo(tgt)->frags[merge];
- fragto->size += fragfrom->size;
- put_page(fragfrom->page);
+ skb_frag_size_add(fragto, skb_frag_size(fragfrom));
+ __skb_frag_unref(fragfrom);
}
/* Reposition in the original skb */
@@ -2405,7 +2424,7 @@ next_skb:
while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
- block_limit = frag->size + st->stepped_offset;
+ block_limit = skb_frag_size(frag) + st->stepped_offset;
if (abs_offset < block_limit) {
if (!st->frag_data)
@@ -2423,7 +2442,7 @@ next_skb:
}
st->frag_idx++;
- st->stepped_offset += frag->size;
+ st->stepped_offset += skb_frag_size(frag);
}
if (st->frag_data) {
@@ -2553,14 +2572,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
left = PAGE_SIZE - frag->page_offset;
copy = (length > left)? left : length;
- ret = getfrag(from, (page_address(frag->page) +
- frag->page_offset + frag->size),
+ ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
offset, copy, 0, skb);
if (ret < 0)
return -EFAULT;
/* copy was successful so update the size parameters */
- frag->size += copy;
+ skb_frag_size_add(frag, copy);
skb->len += copy;
skb->data_len += copy;
offset += copy;
@@ -2706,12 +2724,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
while (pos < offset + len && i < nfrags) {
*frag = skb_shinfo(skb)->frags[i];
- get_page(frag->page);
- size = frag->size;
+ __skb_frag_ref(frag);
+ size = skb_frag_size(frag);
if (pos < offset) {
frag->page_offset += offset - pos;
- frag->size -= offset - pos;
+ skb_frag_size_sub(frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
@@ -2720,7 +2738,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
i++;
pos += size;
} else {
- frag->size -= pos + size - (offset + len);
+ skb_frag_size_sub(frag, pos + size - (offset + len));
goto skip_fraglist;
}
@@ -2800,7 +2818,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
} while (--i);
frag->page_offset += offset;
- frag->size -= offset;
+ skb_frag_size_sub(frag, offset);
skb->truesize -= skb->data_len;
skb->len -= skb->data_len;
@@ -2852,7 +2870,7 @@ merge:
unsigned int eat = offset - headlen;
skbinfo->frags[0].page_offset += eat;
- skbinfo->frags[0].size -= eat;
+ skb_frag_size_sub(&skbinfo->frags[0], eat);
skb->data_len -= eat;
skb->len -= eat;
offset = headlen;
@@ -2923,13 +2941,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
- sg_set_page(&sg[elt], frag->page, copy,
+ sg_set_page(&sg[elt], skb_frag_page(frag), copy,
frag->page_offset+offset-start);
elt++;
if (!(len -= copy))
diff --git a/net/core/sock.c b/net/core/sock.c
index bc745d00ea4..4ed7b1d12f5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -207,7 +207,7 @@ static struct lock_class_key af_callback_keys[AF_MAX];
* not depend upon such differences.
*/
#define _SK_MEM_PACKETS 256
-#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
+#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
@@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
- rcu_assign_pointer(sk->sk_dst_cache, NULL);
+ RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
}
@@ -738,10 +738,7 @@ set_rcvbuf:
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
case SO_RXQ_OVFL:
- if (valbool)
- sock_set_flag(sk, SOCK_RXQ_OVFL);
- else
- sock_reset_flag(sk, SOCK_RXQ_OVFL);
+ sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
default:
ret = -ENOPROTOOPT;
@@ -1158,7 +1155,7 @@ static void __sk_free(struct sock *sk)
atomic_read(&sk->sk_wmem_alloc) == 0);
if (filter) {
sk_filter_uncharge(sk, filter);
- rcu_assign_pointer(sk->sk_filter, NULL);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
}
sock_disable_timestamp(sk, SOCK_TIMESTAMP);
@@ -1260,6 +1257,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
+ bh_unlock_sock(newsk);
sk_free(newsk);
newsk = NULL;
goto out;
@@ -1533,7 +1531,6 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
skb_shinfo(skb)->nr_frags = npages;
for (i = 0; i < npages; i++) {
struct page *page;
- skb_frag_t *frag;
page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
@@ -1543,12 +1540,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;
}
- frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
- frag->page_offset = 0;
- frag->size = (data_len >= PAGE_SIZE ?
- PAGE_SIZE :
- data_len);
+ __skb_fill_page_desc(skb, i,
+ page, 0,
+ (data_len >= PAGE_SIZE ?
+ PAGE_SIZE :
+ data_len));
data_len -= PAGE_SIZE;
}
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 98a52640e7c..82fb28857b6 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -57,9 +57,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
case PTP_CLASS_V2_VLAN:
phydev = skb->dev->phydev;
if (likely(phydev->drv->txtstamp)) {
+ if (!atomic_inc_not_zero(&sk->sk_refcnt))
+ return;
clone = skb_clone(skb, GFP_ATOMIC);
- if (!clone)
+ if (!clone) {
+ sock_put(sk);
return;
+ }
clone->sk = sk;
phydev->drv->txtstamp(phydev, clone, type);
}
@@ -77,8 +81,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock_exterr_skb *serr;
int err;
- if (!hwtstamps)
+ if (!hwtstamps) {
+ sock_put(sk);
+ kfree_skb(skb);
return;
+ }
*skb_hwtstamps(skb) = *hwtstamps;
serr = SKB_EXT_ERR(skb);
@@ -87,6 +94,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
skb->sk = NULL;
err = sock_queue_err_skb(sk, skb);
+ sock_put(sk);
if (err)
kfree_skb(skb);
}
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 25d717ebc92..2d7cf3d52b4 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -71,14 +71,14 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
copy = end - offset;
if (copy > 0) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3cb56af4e13..9bfbc1d1b50 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1255,7 +1255,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
spin_lock(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
- if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
+ if (itr->ifindex == netdev->ifindex) {
err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
&itr->app);
if (err) {
@@ -1412,7 +1412,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
goto dcb_unlock;
list_for_each_entry(itr, &dcb_app_list, list) {
- if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
+ if (itr->ifindex == netdev->ifindex) {
struct nlattr *app_nest = nla_nest_start(skb,
DCB_ATTR_APP);
if (!app_nest)
@@ -2050,7 +2050,7 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
prio = itr->app.priority;
break;
}
@@ -2073,15 +2073,17 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
struct dcb_app_type *itr;
struct dcb_app_type event;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and replace */
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == new->selector &&
itr->app.protocol == new->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
if (new->priority)
itr->app.priority = new->priority;
else {
@@ -2101,7 +2103,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
}
memcpy(&entry->app, new, sizeof(*new));
- strncpy(entry->name, dev->name, IFNAMSIZ);
+ entry->ifindex = dev->ifindex;
list_add(&entry->list, &dcb_app_list);
}
out:
@@ -2127,7 +2129,7 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
prio |= 1 << itr->app.priority;
}
}
@@ -2150,8 +2152,10 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
struct dcb_app_type event;
int err = 0;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and abort if found */
@@ -2159,7 +2163,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
if (itr->app.selector == new->selector &&
itr->app.protocol == new->protocol &&
itr->app.priority == new->priority &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
err = -EEXIST;
goto out;
}
@@ -2173,7 +2177,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
}
memcpy(&entry->app, new, sizeof(*new));
- strncpy(entry->name, dev->name, IFNAMSIZ);
+ entry->ifindex = dev->ifindex;
list_add(&entry->list, &dcb_app_list);
out:
spin_unlock(&dcb_lock);
@@ -2194,8 +2198,10 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
struct dcb_app_type event;
int err = -ENOENT;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, del, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and remove it. */
@@ -2203,7 +2209,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
if (itr->app.selector == del->selector &&
itr->app.protocol == del->protocol &&
itr->app.priority == del->priority &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
list_del(&itr->list);
kfree(itr);
err = 0;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 0462040fc81..67164bb6ae4 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
{
- struct dccp_sock *dp = dccp_sk(sk);
u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
/*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
val = max_ratio;
}
- if (val > DCCPF_ACK_RATIO_MAX)
- val = DCCPF_ACK_RATIO_MAX;
+ dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
+ min_t(u32, val, DCCPF_ACK_RATIO_MAX));
+}
- if (val == dp->dccps_l_ack_ratio)
- return;
+static void ccid2_check_l_ack_ratio(struct sock *sk)
+{
+ struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- ccid2_pr_debug("changing local ack ratio to %u\n", val);
- dp->dccps_l_ack_ratio = val;
+ /*
+ * After a loss, idle period, application limited period, or RTO we
+ * need to check that the ack ratio is still less than the congestion
+ * window. Otherwise, we will send an entire congestion window of
+ * packets and got no response because we haven't sent ack ratio
+ * packets yet.
+ * If the ack ratio does need to be reduced, we reduce it to half of
+ * the congestion window (or 1 if that's zero) instead of to the
+ * congestion window. This prevents problems if one ack is lost.
+ */
+ if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
+ ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
+}
+
+static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
+{
+ dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
+ clamp_val(val, DCCPF_SEQ_WMIN,
+ DCCPF_SEQ_WMAX));
}
static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
}
hc->tx_cwnd_used = 0;
hc->tx_cwnd_stamp = now;
+
+ ccid2_check_l_ack_ratio(sk);
}
/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
hc->tx_cwnd_stamp = now;
hc->tx_cwnd_used = 0;
+
+ ccid2_check_l_ack_ratio(sk);
}
static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_cwnd < hc->tx_ssthresh) {
- if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
+ struct dccp_sock *dp = dccp_sk(sk);
+ int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
+
+ if (hc->tx_cwnd < dp->dccps_l_seq_win &&
+ r_seq_used < dp->dccps_r_seq_win) {
+ if (hc->tx_cwnd < hc->tx_ssthresh) {
+ if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
+ hc->tx_cwnd += 1;
+ *maxincr -= 1;
+ hc->tx_packets_acked = 0;
+ }
+ } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
hc->tx_cwnd += 1;
- *maxincr -= 1;
hc->tx_packets_acked = 0;
}
- } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
- hc->tx_cwnd += 1;
- hc->tx_packets_acked = 0;
}
+
+ /*
+ * Adjust the local sequence window and the ack ratio to allow about
+ * 5 times the number of packets in the network (RFC 4340 7.5.2)
+ */
+ if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
+ ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
+ else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
+ ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
+
+ if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
+ ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
+ else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
+ ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
+
/*
* FIXME: RTT is sampled several times per acknowledgment (for each
* entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
- /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
- if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
- ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
+ ccid2_check_l_ack_ratio(sk);
}
static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
if (hc->tx_rpdupack >= NUMDUPACK) {
hc->tx_rpdupack = -1; /* XXX lame */
hc->tx_rpseq = 0;
-
+#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
+ /*
+ * FIXME: Ack Congestion Control is broken; in
+ * the current state instabilities occurred with
+ * Ack Ratios greater than 1; causing hang-ups
+ * and long RTO timeouts. This needs to be fixed
+ * before opening up dynamic changes. -- gerrit
+ */
ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
+#endif
}
}
}
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index f585d330e1e..18c97543e52 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -43,6 +43,12 @@ struct ccid2_seq {
#define CCID2_SEQBUF_LEN 1024
#define CCID2_SEQBUF_MAX 128
+/*
+ * Multiple of congestion window to keep the sequence window at
+ * (RFC 4340 7.5.2)
+ */
+#define CCID2_WIN_CHANGE_FACTOR 5
+
/**
* struct ccid2_hc_tx_sock - CCID2 TX half connection
* @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5fdb0722901..583490aaf56 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk)
return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
}
+extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 568def95272..23cea0ee310 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,6 +12,7 @@
* -----------
* o Feature negotiation is coordinated with connection setup (as in TCP), wild
* changes of parameters of an established connection are not supported.
+ * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN.
* o All currently known SP features have 1-byte quantities. If in the future
* extensions of RFCs 4340..42 define features with item lengths larger than
* one byte, a feature-specific extension of the code will be required.
@@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
}
+/**
+ * dccp_feat_activate - Activate feature value on socket
+ * @sk: fully connected DCCP socket (after handshake is complete)
+ * @feat_num: feature to activate, one of %dccp_feature_numbers
+ * @local: whether local (1) or remote (0) @feat_num is meant
+ * @fval: the value (SP or NN) to activate, or NULL to use the default value
+ * For general use this function is preferable over __dccp_feat_activate().
+ */
+static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
+ dccp_feat_val const *fval)
+{
+ return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
+}
+
/* Test for "Req'd" feature (RFC 4340, 6.4) */
static inline int dccp_feat_must_be_understood(u8 feat_num)
{
@@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
return -1;
if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
return -1;
- /*
- * Enter CHANGING after transmitting the Change option (6.6.2).
- */
- if (pos->state == FEAT_INITIALISING)
- pos->state = FEAT_CHANGING;
+
+ if (skb->sk->sk_state == DCCP_OPEN &&
+ (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
+ /*
+ * Confirms don't get retransmitted (6.6.3) once the
+ * connection is in state OPEN
+ */
+ dccp_feat_list_pop(pos);
+ } else {
+ /*
+ * Enter CHANGING after transmitting the Change
+ * option (6.6.2).
+ */
+ if (pos->state == FEAT_INITIALISING)
+ pos->state = FEAT_CHANGING;
+ }
}
return 0;
}
@@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
0, list, len);
}
+/**
+ * dccp_feat_nn_get - Query current/pending value of NN feature
+ * @sk: DCCP socket of an established connection
+ * @feat: NN feature number from %dccp_feature_numbers
+ * For a known NN feature, returns value currently being negotiated, or
+ * current (confirmed) value if no negotiation is going on.
+ */
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
+{
+ if (dccp_feat_type(feat) == FEAT_NN) {
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct dccp_feat_entry *entry;
+
+ entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1);
+ if (entry != NULL)
+ return entry->val.nn;
+
+ switch (feat) {
+ case DCCPF_ACK_RATIO:
+ return dp->dccps_l_ack_ratio;
+ case DCCPF_SEQUENCE_WINDOW:
+ return dp->dccps_l_seq_win;
+ }
+ }
+ DCCP_BUG("attempt to look up unsupported feature %u", feat);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
+
+/**
+ * dccp_feat_signal_nn_change - Update NN values for an established connection
+ * @sk: DCCP socket of an established connection
+ * @feat: NN feature number from %dccp_feature_numbers
+ * @nn_val: the new value to use
+ * This function is used to communicate NN updates out-of-band.
+ */
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
+{
+ struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
+ dccp_feat_val fval = { .nn = nn_val };
+ struct dccp_feat_entry *entry;
+
+ if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
+ return 0;
+
+ if (dccp_feat_type(feat) != FEAT_NN ||
+ !dccp_feat_is_valid_nn_val(feat, nn_val))
+ return -EINVAL;
+
+ if (nn_val == dccp_feat_nn_get(sk, feat))
+ return 0; /* already set or negotiation under way */
+
+ entry = dccp_feat_list_lookup(fn, feat, 1);
+ if (entry != NULL) {
+ dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n",
+ (unsigned long long)entry->val.nn,
+ (unsigned long long)nn_val);
+ dccp_feat_list_pop(entry);
+ }
+
+ inet_csk_schedule_ack(sk);
+ return dccp_feat_push_change(fn, feat, 1, 0, &fval);
+}
+EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change);
/*
* Tracking features whose value depend on the choice of CCID
@@ -1187,6 +1277,100 @@ confirmation_failed:
}
/**
+ * dccp_feat_handle_nn_established - Fast-path reception of NN options
+ * @sk: socket of an established DCCP connection
+ * @mandatory: whether @opt was preceded by a Mandatory option
+ * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only)
+ * @feat: NN number, one of %dccp_feature_numbers
+ * @val: NN value
+ * @len: length of @val in bytes
+ * This function combines the functionality of change_recv/confirm_recv, with
+ * the following differences (reset codes are the same):
+ * - cleanup after receiving the Confirm;
+ * - values are directly activated after successful parsing;
+ * - deliberately restricted to NN features.
+ * The restriction to NN features is essential since SP features can have non-
+ * predictable outcomes (depending on the remote configuration), and are inter-
+ * dependent (CCIDs for instance cause further dependencies).
+ */
+static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
+ u8 feat, u8 *val, u8 len)
+{
+ struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
+ const bool local = (opt == DCCPO_CONFIRM_R);
+ struct dccp_feat_entry *entry;
+ u8 type = dccp_feat_type(feat);
+ dccp_feat_val fval;
+
+ dccp_feat_print_opt(opt, feat, val, len, mandatory);
+
+ /* Ignore non-mandatory unknown and non-NN features */
+ if (type == FEAT_UNKNOWN) {
+ if (local && !mandatory)
+ return 0;
+ goto fast_path_unknown;
+ } else if (type != FEAT_NN) {
+ return 0;
+ }
+
+ /*
+ * We don't accept empty Confirms, since in fast-path feature
+ * negotiation the values are enabled immediately after sending
+ * the Change option.
+ * Empty Changes on the other hand are invalid (RFC 4340, 6.1).
+ */
+ if (len == 0 || len > sizeof(fval.nn))
+ goto fast_path_unknown;
+
+ if (opt == DCCPO_CHANGE_L) {
+ fval.nn = dccp_decode_value_var(val, len);
+ if (!dccp_feat_is_valid_nn_val(feat, fval.nn))
+ goto fast_path_unknown;
+
+ if (dccp_feat_push_confirm(fn, feat, local, &fval) ||
+ dccp_feat_activate(sk, feat, local, &fval))
+ return DCCP_RESET_CODE_TOO_BUSY;
+
+ /* set the `Ack Pending' flag to piggyback a Confirm */
+ inet_csk_schedule_ack(sk);
+
+ } else if (opt == DCCPO_CONFIRM_R) {
+ entry = dccp_feat_list_lookup(fn, feat, local);
+ if (entry == NULL || entry->state != FEAT_CHANGING)
+ return 0;
+
+ fval.nn = dccp_decode_value_var(val, len);
+ /*
+ * Just ignore a value that doesn't match our current value.
+ * If the option changes twice within two RTTs, then at least
+ * one CONFIRM will be received for the old value after a
+ * new CHANGE was sent.
+ */
+ if (fval.nn != entry->val.nn)
+ return 0;
+
+ /* Only activate after receiving the Confirm option (6.6.1). */
+ dccp_feat_activate(sk, feat, local, &fval);
+
+ /* It has been confirmed - so remove the entry */
+ dccp_feat_list_pop(entry);
+
+ } else {
+ DCCP_WARN("Received illegal option %u\n", opt);
+ goto fast_path_failed;
+ }
+ return 0;
+
+fast_path_unknown:
+ if (!mandatory)
+ return dccp_push_empty_confirm(fn, feat, local);
+
+fast_path_failed:
+ return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR
+ : DCCP_RESET_CODE_OPTION_ERROR;
+}
+
+/**
* dccp_feat_parse_options - Process Feature-Negotiation Options
* @sk: for general use and used by the client during connection setup
* @dreq: used by the server during connection setup
@@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
return dccp_feat_confirm_recv(fn, mandatory, opt, feat,
val, len, server);
}
+ break;
+ /*
+ * Support for exchanging NN options on an established connection.
+ */
+ case DCCP_OPEN:
+ case DCCP_PARTOPEN:
+ return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
+ val, len);
}
return 0; /* ignore FN options in all other states */
}
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index e56a4e5e634..90b957d34d2 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
extern int dccp_insert_option_mandatory(struct sk_buff *skb);
extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 152975d942d..e742f90a685 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
dp->dccps_rate_last = jiffies;
dp->dccps_role = DCCP_ROLE_UNDEFINED;
dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
- dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
dccp_init_xmit_timers(sk);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ba4faceec40..2ab16e12520 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
}
ifa->ifa_next = dn_db->ifa_list;
- rcu_assign_pointer(dn_db->ifa_list, ifa);
+ RCU_INIT_POINTER(dn_db->ifa_list, ifa);
dn_ifaddr_notify(RTM_NEWADDR, ifa);
blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
- rcu_assign_pointer(dev->dn_ptr, dn_db);
+ RCU_INIT_POINTER(dev->dn_ptr, dn_db);
dn_db->dev = dev;
init_timer(&dn_db->timer);
@@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
if (!dn_db->neigh_parms) {
- rcu_assign_pointer(dev->dn_ptr, NULL);
+ RCU_INIT_POINTER(dev->dn_ptr, NULL);
kfree(dn_db);
return NULL;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0a47b6c3703..56cf9b8e1c7 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = {
.ndo_start_xmit = dsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
@@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = {
.ndo_start_xmit = edsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
@@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = {
.ndo_start_xmit = trailer_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
new file mode 100644
index 00000000000..19d6aefe97d
--- /dev/null
+++ b/net/ieee802154/6lowpan.c
@@ -0,0 +1,891 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define DEBUG
+
+#include <linux/bitops.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/ipv6.h>
+
+#include "6lowpan.h"
+
+/* TTL uncompression values */
+static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
+
+static LIST_HEAD(lowpan_devices);
+
+/*
+ * Uncompression of linklocal:
+ * 0 -> 16 bytes from packet
+ * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
+ * 2 -> 2 bytes from prefix - zeroes + 2 from packet
+ * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
+ *
+ * NOTE: => the uncompress function does change 0xf to 0x10
+ * NOTE: 0x00 => no-autoconfig => unspecified
+ */
+static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
+
+/*
+ * Uncompression of ctx-based:
+ * 0 -> 0 bits from packet [unspecified / reserved]
+ * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
+ * 2 -> 8 bytes from prefix - zeroes + 2 from packet
+ * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
+ */
+static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
+
+/*
+ * Uncompression of ctx-base
+ * 0 -> 0 bits from packet
+ * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
+ * 2 -> 2 bytes from prefix - zeroes + 3 from packet
+ * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
+ */
+static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
+
+/* Link local prefix */
+static const u8 lowpan_llprefix[] = {0xfe, 0x80};
+
+/* private device info */
+struct lowpan_dev_info {
+ struct net_device *real_dev; /* real WPAN device ptr */
+ struct mutex dev_list_mtx; /* mutex for list ops */
+};
+
+struct lowpan_dev_record {
+ struct net_device *ldev;
+ struct list_head list;
+};
+
+static inline struct
+lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline void lowpan_address_flip(u8 *src, u8 *dest)
+{
+ int i;
+ for (i = 0; i < IEEE802154_ADDR_LEN; i++)
+ (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
+}
+
+/* list of all 6lowpan devices, uses for package delivering */
+/* print data in line */
+static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+#ifdef DEBUG
+ if (msg)
+ pr_debug("(%s) %s: ", caller, msg);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
+ 16, 1, buf, len, false);
+#endif /* DEBUG */
+}
+
+/*
+ * print data in a table format:
+ *
+ * addr: xx xx xx xx xx xx
+ * addr: xx xx xx xx xx xx
+ * ...
+ */
+static inline void lowpan_raw_dump_table(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+#ifdef DEBUG
+ if (msg)
+ pr_debug("(%s) %s:\n", caller, msg);
+ print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+#endif /* DEBUG */
+}
+
+static u8
+lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
+ const unsigned char *lladdr)
+{
+ u8 val = 0;
+
+ if (is_addr_mac_addr_based(ipaddr, lladdr))
+ val = 3; /* 0-bits */
+ else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
+ /* compress IID to 16 bits xxxx::XXXX */
+ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
+ *hc06_ptr += 2;
+ val = 2; /* 16-bits */
+ } else {
+ /* do not compress IID => xxxx::IID */
+ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
+ *hc06_ptr += 8;
+ val = 1; /* 64-bits */
+ }
+
+ return rol8(val, shift);
+}
+
+static void
+lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+{
+ memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
+ /* second bit-flip (Universe/Local) is done according RFC2464 */
+ ipaddr->s6_addr[8] ^= 0x02;
+}
+
+/*
+ * Uncompress addresses based on a prefix and a postfix with zeroes in
+ * between. If the postfix is zero in length it will use the link address
+ * to configure the IP address (autoconf style).
+ * pref_post_count takes a byte where the first nibble specify prefix count
+ * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
+ u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+{
+ u8 prefcount = pref_post_count >> 4;
+ u8 postcount = pref_post_count & 0x0f;
+
+ /* full nibble 15 => 16 */
+ prefcount = (prefcount == 15 ? 16 : prefcount);
+ postcount = (postcount == 15 ? 16 : postcount);
+
+ if (lladdr)
+ lowpan_raw_dump_inline(__func__, "linklocal address",
+ lladdr, IEEE802154_ALEN);
+ if (prefcount > 0)
+ memcpy(ipaddr, prefix, prefcount);
+
+ if (prefcount + postcount < 16)
+ memset(&ipaddr->s6_addr[prefcount], 0,
+ 16 - (prefcount + postcount));
+
+ if (postcount > 0) {
+ memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
+ skb_pull(skb, postcount);
+ } else if (prefcount > 0) {
+ if (lladdr == NULL)
+ return -EINVAL;
+
+ /* no IID based configuration if no prefix and no data */
+ lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+ }
+
+ pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
+ postcount);
+ lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+
+ return 0;
+}
+
+static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
+{
+ u8 ret;
+
+ ret = skb->data[0];
+ skb_pull(skb, 1);
+
+ return ret;
+}
+
+static int lowpan_header_create(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned len)
+{
+ u8 tmp, iphc0, iphc1, *hc06_ptr;
+ struct ipv6hdr *hdr;
+ const u8 *saddr = _saddr;
+ const u8 *daddr = _daddr;
+ u8 *head;
+ struct ieee802154_addr sa, da;
+
+ if (type != ETH_P_IPV6)
+ return 0;
+ /* TODO:
+ * if this package isn't ipv6 one, where should it be routed?
+ */
+ head = kzalloc(100, GFP_KERNEL);
+ if (head == NULL)
+ return -ENOMEM;
+
+ hdr = ipv6_hdr(skb);
+ hc06_ptr = head + 2;
+
+ pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
+ "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
+ hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
+ hdr->hop_limit);
+
+ lowpan_raw_dump_table(__func__, "raw skb network header dump",
+ skb_network_header(skb), sizeof(struct ipv6hdr));
+
+ if (!saddr)
+ saddr = dev->dev_addr;
+
+ lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
+
+ /*
+ * As we copy some bit-length fields, in the IPHC encoding bytes,
+ * we sometimes use |=
+ * If the field is 0, and the current bit value in memory is 1,
+ * this does not work. We therefore reset the IPHC encoding here
+ */
+ iphc0 = LOWPAN_DISPATCH_IPHC;
+ iphc1 = 0;
+
+ /* TODO: context lookup */
+
+ lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
+
+ /*
+ * Traffic class, flow label
+ * If flow label is 0, compress it. If traffic class is 0, compress it
+ * We have to process both in the same time as the offset of traffic
+ * class depends on the presence of version and flow label
+ */
+
+ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
+ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
+ tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
+
+ if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
+ (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
+ /* flow label can be compressed */
+ iphc0 |= LOWPAN_IPHC_FL_C;
+ if ((hdr->priority == 0) &&
+ ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+ /* compress (elide) all */
+ iphc0 |= LOWPAN_IPHC_TC_C;
+ } else {
+ /* compress only the flow label */
+ *hc06_ptr = tmp;
+ hc06_ptr += 1;
+ }
+ } else {
+ /* Flow label cannot be compressed */
+ if ((hdr->priority == 0) &&
+ ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+ /* compress only traffic class */
+ iphc0 |= LOWPAN_IPHC_TC_C;
+ *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
+ memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
+ hc06_ptr += 3;
+ } else {
+ /* compress nothing */
+ memcpy(hc06_ptr, &hdr, 4);
+ /* replace the top byte with new ECN | DSCP format */
+ *hc06_ptr = tmp;
+ hc06_ptr += 4;
+ }
+ }
+
+ /* NOTE: payload length is always compressed */
+
+ /* Next Header is compress if UDP */
+ if (hdr->nexthdr == UIP_PROTO_UDP)
+ iphc0 |= LOWPAN_IPHC_NH_C;
+
+/* TODO: next header compression */
+
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+ *hc06_ptr = hdr->nexthdr;
+ hc06_ptr += 1;
+ }
+
+ /*
+ * Hop limit
+ * if 1: compress, encoding is 01
+ * if 64: compress, encoding is 10
+ * if 255: compress, encoding is 11
+ * else do not compress
+ */
+ switch (hdr->hop_limit) {
+ case 1:
+ iphc0 |= LOWPAN_IPHC_TTL_1;
+ break;
+ case 64:
+ iphc0 |= LOWPAN_IPHC_TTL_64;
+ break;
+ case 255:
+ iphc0 |= LOWPAN_IPHC_TTL_255;
+ break;
+ default:
+ *hc06_ptr = hdr->hop_limit;
+ break;
+ }
+
+ /* source address compression */
+ if (is_addr_unspecified(&hdr->saddr)) {
+ pr_debug("(%s): source address is unspecified, setting SAC\n",
+ __func__);
+ iphc1 |= LOWPAN_IPHC_SAC;
+ /* TODO: context lookup */
+ } else if (is_addr_link_local(&hdr->saddr)) {
+ pr_debug("(%s): source address is link-local\n", __func__);
+ iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
+ } else {
+ pr_debug("(%s): send the full source address\n", __func__);
+ memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
+ hc06_ptr += 16;
+ }
+
+ /* destination address compression */
+ if (is_addr_mcast(&hdr->daddr)) {
+ pr_debug("(%s): destination address is multicast", __func__);
+ iphc1 |= LOWPAN_IPHC_M;
+ if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
+ pr_debug("compressed to 1 octet\n");
+ iphc1 |= LOWPAN_IPHC_DAM_11;
+ /* use last byte */
+ *hc06_ptr = hdr->daddr.s6_addr[15];
+ hc06_ptr += 1;
+ } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
+ pr_debug("compressed to 4 octets\n");
+ iphc1 |= LOWPAN_IPHC_DAM_10;
+ /* second byte + the last three */
+ *hc06_ptr = hdr->daddr.s6_addr[1];
+ memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
+ hc06_ptr += 4;
+ } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
+ pr_debug("compressed to 6 octets\n");
+ iphc1 |= LOWPAN_IPHC_DAM_01;
+ /* second byte + the last five */
+ *hc06_ptr = hdr->daddr.s6_addr[1];
+ memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
+ hc06_ptr += 6;
+ } else {
+ pr_debug("using full address\n");
+ iphc1 |= LOWPAN_IPHC_DAM_00;
+ memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
+ hc06_ptr += 16;
+ }
+ } else {
+ pr_debug("(%s): destination address is unicast: ", __func__);
+ /* TODO: context lookup */
+ if (is_addr_link_local(&hdr->daddr)) {
+ pr_debug("destination address is link-local\n");
+ iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
+ } else {
+ pr_debug("using full address\n");
+ memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
+ hc06_ptr += 16;
+ }
+ }
+
+ /* TODO: UDP header compression */
+ /* TODO: Next Header compression */
+
+ head[0] = iphc0;
+ head[1] = iphc1;
+
+ skb_pull(skb, sizeof(struct ipv6hdr));
+ memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
+
+ kfree(head);
+
+ lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
+ skb->len);
+
+ /*
+ * NOTE1: I'm still unsure about the fact that compression and WPAN
+ * header are created here and not later in the xmit. So wait for
+ * an opinion of net maintainers.
+ */
+ /*
+ * NOTE2: to be absolutely correct, we must derive PANid information
+ * from MAC subif of the 'dev' and 'real_dev' network devices, but
+ * this isn't implemented in mainline yet, so currently we assign 0xff
+ */
+ {
+ /* prepare wpan address data */
+ sa.addr_type = IEEE802154_ADDR_LONG;
+ sa.pan_id = 0xff;
+
+ da.addr_type = IEEE802154_ADDR_LONG;
+ da.pan_id = 0xff;
+
+ memcpy(&(da.hwaddr), daddr, 8);
+ memcpy(&(sa.hwaddr), saddr, 8);
+
+ mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+ return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+ type, (void *)&da, (void *)&sa, skb->len);
+ }
+}
+
+static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
+{
+ struct sk_buff *new;
+ struct lowpan_dev_record *entry;
+ int stat = NET_RX_SUCCESS;
+
+ new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
+ GFP_ATOMIC);
+ kfree_skb(skb);
+
+ if (!new)
+ return -ENOMEM;
+
+ skb_push(new, sizeof(struct ipv6hdr));
+ skb_reset_network_header(new);
+ skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
+
+ new->protocol = htons(ETH_P_IPV6);
+ new->pkt_type = PACKET_HOST;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &lowpan_devices, list)
+ if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
+ skb = skb_copy(new, GFP_ATOMIC);
+ if (!skb) {
+ stat = -ENOMEM;
+ break;
+ }
+
+ skb->dev = entry->ldev;
+ stat = netif_rx(skb);
+ }
+ rcu_read_unlock();
+
+ kfree_skb(new);
+
+ return stat;
+}
+
+static int
+lowpan_process_data(struct sk_buff *skb)
+{
+ struct ipv6hdr hdr;
+ u8 tmp, iphc0, iphc1, num_context = 0;
+ u8 *_saddr, *_daddr;
+ int err;
+
+ lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
+ skb->len);
+ /* at least two bytes will be used for the encoding */
+ if (skb->len < 2)
+ goto drop;
+ iphc0 = lowpan_fetch_skb_u8(skb);
+ iphc1 = lowpan_fetch_skb_u8(skb);
+
+ _saddr = mac_cb(skb)->sa.hwaddr;
+ _daddr = mac_cb(skb)->da.hwaddr;
+
+ pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
+
+ /* another if the CID flag is set */
+ if (iphc1 & LOWPAN_IPHC_CID) {
+ pr_debug("(%s): CID flag is set, increase header with one\n",
+ __func__);
+ if (!skb->len)
+ goto drop;
+ num_context = lowpan_fetch_skb_u8(skb);
+ }
+
+ hdr.version = 6;
+
+ /* Traffic Class and Flow Label */
+ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
+ /*
+ * Traffic Class and FLow Label carried in-line
+ * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
+ */
+ case 0: /* 00b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+ memcpy(&hdr.flow_lbl, &skb->data[0], 3);
+ skb_pull(skb, 3);
+ hdr.priority = ((tmp >> 2) & 0x0f);
+ hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
+ (hdr.flow_lbl[0] & 0x0f);
+ break;
+ /*
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+ case 1: /* 10b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+ hdr.priority = ((tmp >> 2) & 0x0f);
+ hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
+ hdr.flow_lbl[1] = 0;
+ hdr.flow_lbl[2] = 0;
+ break;
+ /*
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+ case 2: /* 01b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+ hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+ memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
+ skb_pull(skb, 2);
+ break;
+ /* Traffic Class and Flow Label are elided */
+ case 3: /* 11b */
+ hdr.priority = 0;
+ hdr.flow_lbl[0] = 0;
+ hdr.flow_lbl[1] = 0;
+ hdr.flow_lbl[2] = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* Next Header */
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+ /* Next header is carried inline */
+ if (!skb->len)
+ goto drop;
+ hdr.nexthdr = lowpan_fetch_skb_u8(skb);
+ pr_debug("(%s): NH flag is set, next header is carried "
+ "inline: %02x\n", __func__, hdr.nexthdr);
+ }
+
+ /* Hop Limit */
+ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
+ hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
+ else {
+ if (!skb->len)
+ goto drop;
+ hdr.hop_limit = lowpan_fetch_skb_u8(skb);
+ }
+
+ /* Extract SAM to the tmp variable */
+ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
+
+ /* Source address uncompression */
+ pr_debug("(%s): source address stateless compression\n", __func__);
+ err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
+ lowpan_unc_llconf[tmp], skb->data);
+ if (err)
+ goto drop;
+
+ /* Extract DAM to the tmp variable */
+ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
+
+ /* check for Multicast Compression */
+ if (iphc1 & LOWPAN_IPHC_M) {
+ if (iphc1 & LOWPAN_IPHC_DAC) {
+ pr_debug("(%s): destination address context-based "
+ "multicast compression\n", __func__);
+ /* TODO: implement this */
+ } else {
+ u8 prefix[] = {0xff, 0x02};
+
+ pr_debug("(%s): destination address non-context-based"
+ " multicast compression\n", __func__);
+ if (0 < tmp && tmp < 3) {
+ if (!skb->len)
+ goto drop;
+ else
+ prefix[1] = lowpan_fetch_skb_u8(skb);
+ }
+
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
+ lowpan_unc_mxconf[tmp], NULL);
+ if (err)
+ goto drop;
+ }
+ } else {
+ pr_debug("(%s): destination address stateless compression\n",
+ __func__);
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
+ lowpan_unc_llconf[tmp], skb->data);
+ if (err)
+ goto drop;
+ }
+
+ /* TODO: UDP header parse */
+
+ /* Not fragmented package */
+ hdr.payload_len = htons(skb->len);
+
+ pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
+ skb_headroom(skb), skb->len);
+
+ pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
+ "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
+ ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
+
+ lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
+ sizeof(hdr));
+ return lowpan_skb_deliver(skb, &hdr);
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int lowpan_set_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *sa = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* TODO: validate addr */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int err = 0;
+
+ pr_debug("(%s): package xmit\n", __func__);
+
+ skb->dev = lowpan_dev_info(dev)->real_dev;
+ if (skb->dev == NULL) {
+ pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
+ dev_kfree_skb(skb);
+ } else
+ err = dev_queue_xmit(skb);
+
+ return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
+}
+
+static void lowpan_dev_free(struct net_device *dev)
+{
+ dev_put(lowpan_dev_info(dev)->real_dev);
+ free_netdev(dev);
+}
+
+static struct header_ops lowpan_header_ops = {
+ .create = lowpan_header_create,
+};
+
+static const struct net_device_ops lowpan_netdev_ops = {
+ .ndo_start_xmit = lowpan_xmit,
+ .ndo_set_mac_address = lowpan_set_address,
+};
+
+static void lowpan_setup(struct net_device *dev)
+{
+ pr_debug("(%s)\n", __func__);
+
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+ dev->type = ARPHRD_IEEE802154;
+ dev->features = NETIF_F_NO_CSUM;
+ /* Frame Control + Sequence Number + Address fields + Security Header */
+ dev->hard_header_len = 2 + 1 + 20 + 14;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = 1281;
+ dev->tx_queue_len = 0;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+
+ dev->netdev_ops = &lowpan_netdev_ops;
+ dev->header_ops = &lowpan_header_ops;
+ dev->destructor = lowpan_dev_free;
+}
+
+static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ pr_debug("(%s)\n", __func__);
+
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ if (!netif_running(dev))
+ goto drop;
+
+ if (dev->type != ARPHRD_IEEE802154)
+ goto drop;
+
+ /* check that it's our buffer */
+ if ((skb->data[0] & 0xe0) == 0x60)
+ lowpan_process_data(skb);
+
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_device *real_dev;
+ struct lowpan_dev_record *entry;
+
+ pr_debug("(%s)\n", __func__);
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+ /* find and hold real wpan device */
+ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
+
+ lowpan_dev_info(dev)->real_dev = real_dev;
+ mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
+ if (!entry) {
+ dev_put(real_dev);
+ lowpan_dev_info(dev)->real_dev = NULL;
+ return -ENOMEM;
+ }
+
+ entry->ldev = dev;
+
+ mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, &lowpan_devices);
+ mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ register_netdevice(dev);
+
+ return 0;
+}
+
+static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
+ struct net_device *real_dev = lowpan_dev->real_dev;
+ struct lowpan_dev_record *entry;
+ struct lowpan_dev_record *tmp;
+
+ ASSERT_RTNL();
+
+ mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+ list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+ if (entry->ldev == dev) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+ mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ unregister_netdevice_queue(dev, head);
+
+ dev_put(real_dev);
+}
+
+static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
+ .kind = "lowpan",
+ .priv_size = sizeof(struct lowpan_dev_info),
+ .setup = lowpan_setup,
+ .newlink = lowpan_newlink,
+ .dellink = lowpan_dellink,
+ .validate = lowpan_validate,
+};
+
+static inline int __init lowpan_netlink_init(void)
+{
+ return rtnl_link_register(&lowpan_link_ops);
+}
+
+static inline void __init lowpan_netlink_fini(void)
+{
+ rtnl_link_unregister(&lowpan_link_ops);
+}
+
+static struct packet_type lowpan_packet_type = {
+ .type = __constant_htons(ETH_P_IEEE802154),
+ .func = lowpan_rcv,
+};
+
+static int __init lowpan_init_module(void)
+{
+ int err = 0;
+
+ pr_debug("(%s)\n", __func__);
+
+ err = lowpan_netlink_init();
+ if (err < 0)
+ goto out;
+
+ dev_add_pack(&lowpan_packet_type);
+out:
+ return err;
+}
+
+static void __exit lowpan_cleanup_module(void)
+{
+ pr_debug("(%s)\n", __func__);
+
+ lowpan_netlink_fini();
+
+ dev_remove_pack(&lowpan_packet_type);
+}
+
+module_init(lowpan_init_module);
+module_exit(lowpan_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
new file mode 100644
index 00000000000..5d8cf80b930
--- /dev/null
+++ b/net/ieee802154/6lowpan.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __6LOWPAN_H__
+#define __6LOWPAN_H__
+
+/* need to know address length to manipulate with it */
+#define IEEE802154_ALEN 8
+
+#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
+#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
+#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
+#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
+
+/*
+ * ipv6 address based on mac
+ * second bit-flip (Universe/Local) is done according RFC2464
+ */
+#define is_addr_mac_addr_based(a, m) \
+ ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
+ (((a)->s6_addr[9]) == (m)[1]) && \
+ (((a)->s6_addr[10]) == (m)[2]) && \
+ (((a)->s6_addr[11]) == (m)[3]) && \
+ (((a)->s6_addr[12]) == (m)[4]) && \
+ (((a)->s6_addr[13]) == (m)[5]) && \
+ (((a)->s6_addr[14]) == (m)[6]) && \
+ (((a)->s6_addr[15]) == (m)[7]))
+
+/* ipv6 address is unspecified */
+#define is_addr_unspecified(a) \
+ ((((a)->s6_addr32[0]) == 0) && \
+ (((a)->s6_addr32[1]) == 0) && \
+ (((a)->s6_addr32[2]) == 0) && \
+ (((a)->s6_addr32[3]) == 0))
+
+/* compare ipv6 addresses prefixes */
+#define ipaddr_prefixcmp(addr1, addr2, length) \
+ (memcmp(addr1, addr2, length >> 3) == 0)
+
+/* local link, i.e. FE80::/10 */
+#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
+
+/*
+ * check whether we can compress the IID to 16 bits,
+ * it's possible for unicast adresses with first 49 bits are zero only.
+ */
+#define lowpan_is_iid_16_bit_compressable(a) \
+ ((((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ ((((a)->s6_addr[14]) & 0x80) == 0))
+
+/* multicast address */
+#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
+
+/* check whether the 112-bit gid of the multicast address is mappable to: */
+
+/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
+#define lowpan_is_mcast_addr_compressable(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ (((a)->s6_addr[14]) == 0) && \
+ ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
+
+/* 48 bits, FFXX::00XX:XXXX:XXXX */
+#define lowpan_is_mcast_addr_compressable48(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr[10]) == 0))
+
+/* 32 bits, FFXX::00XX:XXXX */
+#define lowpan_is_mcast_addr_compressable32(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr[12]) == 0))
+
+/* 8 bits, FF02::00XX */
+#define lowpan_is_mcast_addr_compressable8(a) \
+ ((((a)->s6_addr[1]) == 2) && \
+ (((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ (((a)->s6_addr[14]) == 0))
+
+#define lowpan_is_addr_broadcast(a) \
+ ((((a)[0]) == 0xFF) && \
+ (((a)[1]) == 0xFF) && \
+ (((a)[2]) == 0xFF) && \
+ (((a)[3]) == 0xFF) && \
+ (((a)[4]) == 0xFF) && \
+ (((a)[5]) == 0xFF) && \
+ (((a)[6]) == 0xFF) && \
+ (((a)[7]) == 0xFF))
+
+#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */
+#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
+#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
+
+/*
+ * Values of fields within the IPHC encoding first byte
+ * (C stands for compressed and I for inline)
+ */
+#define LOWPAN_IPHC_TF 0x18
+
+#define LOWPAN_IPHC_FL_C 0x10
+#define LOWPAN_IPHC_TC_C 0x08
+#define LOWPAN_IPHC_NH_C 0x04
+#define LOWPAN_IPHC_TTL_1 0x01
+#define LOWPAN_IPHC_TTL_64 0x02
+#define LOWPAN_IPHC_TTL_255 0x03
+#define LOWPAN_IPHC_TTL_I 0x00
+
+
+/* Values of fields within the IPHC encoding second byte */
+#define LOWPAN_IPHC_CID 0x80
+
+#define LOWPAN_IPHC_SAC 0x40
+#define LOWPAN_IPHC_SAM_00 0x00
+#define LOWPAN_IPHC_SAM_01 0x10
+#define LOWPAN_IPHC_SAM_10 0x20
+#define LOWPAN_IPHC_SAM 0x30
+
+#define LOWPAN_IPHC_SAM_BIT 4
+
+#define LOWPAN_IPHC_M 0x08
+#define LOWPAN_IPHC_DAC 0x04
+#define LOWPAN_IPHC_DAM_00 0x00
+#define LOWPAN_IPHC_DAM_01 0x01
+#define LOWPAN_IPHC_DAM_10 0x02
+#define LOWPAN_IPHC_DAM_11 0x03
+
+#define LOWPAN_IPHC_DAM_BIT 0
+/*
+ * LOWPAN_UDP encoding (works together with IPHC)
+ */
+#define LOWPAN_NHC_UDP_MASK 0xF8
+#define LOWPAN_NHC_UDP_ID 0xF0
+#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
+#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
+
+/* values for port compression, _with checksum_ ie bit 5 set to 0 */
+#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
+#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
+ dest = 0xF0 + 8 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
+ dest = 16 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+
+#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1c1de97d264..7dee6505292 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -10,3 +10,9 @@ config IEEE802154
Say Y here to compile LR-WPAN support into the kernel or say M to
compile it as modules.
+
+config IEEE802154_6LOWPAN
+ tristate "6lowpan support over IEEE 802.15.4"
+ depends on IEEE802154 && IPV6
+ ---help---
+ IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 5761185f884..d7716d64c6b 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
-af_802154-y := af_ieee802154.o raw.o dgram.o
+obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
+obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
+
+ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
+af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd2b9478ddd..1b5096a9875 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -893,7 +893,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
-int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = -ENOIOCTLCMD;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2c2a98e402e..86f3b885b4f 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -476,7 +476,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
doi = doi_def->doi;
doi_type = doi_def->type;
- if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
+ if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
goto doi_add_return;
for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
switch (doi_def->tags[iter]) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc19bd06dd0..c6b5092f29a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
ip_mc_up(in_dev);
/* we can receive as soon as ip_ptr is set -- do this last */
- rcu_assign_pointer(dev->ip_ptr, in_dev);
+ RCU_INIT_POINTER(dev->ip_ptr, in_dev);
out:
return in_dev;
out_kfree:
@@ -291,7 +291,7 @@ static void inetdev_destroy(struct in_device *in_dev)
inet_free_ifa(ifa);
}
- rcu_assign_pointer(dev->ip_ptr, NULL);
+ RCU_INIT_POINTER(dev->ip_ptr, NULL);
devinet_sysctl_unregister(in_dev);
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1175,7 +1175,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_REGISTER:
printk(KERN_DEBUG "inetdev_event: bug\n");
- rcu_assign_pointer(dev->ip_ptr, NULL);
+ RCU_INIT_POINTER(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index de9e2978476..89d6f71a6a9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -204,7 +204,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
return (struct tnode *)(parent & ~NODE_TYPE_MASK);
}
-/* Same as rcu_assign_pointer
+/* Same as RCU_INIT_POINTER
* but that macro() assumes that value is a pointer.
*/
static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
@@ -528,7 +528,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
if (n)
node_set_parent(n, tn);
- rcu_assign_pointer(tn->child[i], n);
+ RCU_INIT_POINTER(tn->child[i], n);
}
#define MAX_WORK 10
@@ -1014,7 +1014,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
tp = node_parent((struct rt_trie_node *) tn);
if (!tp)
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
if (!tp)
@@ -1026,7 +1026,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn);
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
}
@@ -1163,7 +1163,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
put_child(t, (struct tnode *)tp, cindex,
(struct rt_trie_node *)tn);
} else {
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
tp = tn;
}
}
@@ -1621,7 +1621,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
put_child(t, (struct tnode *)tp, cindex, NULL);
trie_rebalance(t, tp);
} else
- rcu_assign_pointer(t->trie, NULL);
+ RCU_INIT_POINTER(t->trie, NULL);
free_leaf(l);
}
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index dbfc21de347..8cb1ebb7cd7 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -34,7 +34,7 @@ int gre_add_protocol(const struct gre_protocol *proto, u8 version)
if (gre_proto[version])
goto err_out_unlock;
- rcu_assign_pointer(gre_proto[version], proto);
+ RCU_INIT_POINTER(gre_proto[version], proto);
spin_unlock(&gre_proto_lock);
return 0;
@@ -54,7 +54,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
if (rcu_dereference_protected(gre_proto[version],
lockdep_is_held(&gre_proto_lock)) != proto)
goto err_out_unlock;
- rcu_assign_pointer(gre_proto[version], NULL);
+ RCU_INIT_POINTER(gre_proto[version], NULL);
spin_unlock(&gre_proto_lock);
synchronize_rcu();
return 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 23ef31baa1a..ab188ae12fd 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1152,10 +1152,9 @@ static int __net_init icmp_sk_init(struct net *net)
net->ipv4.icmp_sk[i] = sk;
/* Enough space for 2 64K ICMP packets, including
- * sk_buff struct overhead.
+ * sk_buff/skb_shared_info struct overhead.
*/
- sk->sk_sndbuf =
- (2 * ((64 * 1024) + sizeof(struct sk_buff)));
+ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
/*
* Speedup sock_wfree()
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d577199eabd..c7472eff2d5 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1009,7 +1009,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
We will get multicast token leakage, when IFF_MULTICAST
- is changed. This check should be done in dev->set_multicast_list
+ is changed. This check should be done in ndo_set_rx_mode
routine. Something sort of:
if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
--ANK
@@ -1242,7 +1242,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
im->next_rcu = in_dev->mc_list;
in_dev->mc_count++;
- rcu_assign_pointer(in_dev->mc_list, im);
+ RCU_INIT_POINTER(in_dev->mc_list, im);
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im->multiaddr);
@@ -1813,7 +1813,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
iml->next_rcu = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
- rcu_assign_pointer(inet->mc_list, iml);
+ RCU_INIT_POINTER(inet->mc_list, iml);
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
@@ -1835,7 +1835,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
}
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, psf->sl_count, psf->sl_addr, 0);
- rcu_assign_pointer(iml->sflist, NULL);
+ RCU_INIT_POINTER(iml->sflist, NULL);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psf, rcu);
@@ -2000,7 +2000,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
}
- rcu_assign_pointer(pmc->sflist, newpsl);
+ RCU_INIT_POINTER(pmc->sflist, newpsl);
psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
@@ -2103,7 +2103,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
} else
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
- rcu_assign_pointer(pmc->sflist, newpsl);
+ RCU_INIT_POINTER(pmc->sflist, newpsl);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 389a2e6a17f..f5e2bdaef94 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,9 @@ static int inet_csk_diag_fill(struct sock *sk,
icsk->icsk_ca_ops->name);
}
+ if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
@@ -130,6 +133,8 @@ static int inet_csk_diag_fill(struct sock *sk,
&np->rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&np->daddr);
+ if (ext & (1 << (INET_DIAG_TOS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass);
}
#endif
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index ef7ae6049a5..cc280a3f4f9 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -244,11 +244,11 @@ static void lro_add_frags(struct net_lro_desc *lro_desc,
skb->truesize += truesize;
skb_frags[0].page_offset += hlen;
- skb_frags[0].size -= hlen;
+ skb_frag_size_sub(&skb_frags[0], hlen);
while (tcp_data_len > 0) {
*(lro_desc->next_frag) = *skb_frags;
- tcp_data_len -= skb_frags->size;
+ tcp_data_len -= skb_frag_size(skb_frags);
lro_desc->next_frag++;
skb_frags++;
skb_shinfo(skb)->nr_frags++;
@@ -400,14 +400,14 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
skb_frags = skb_shinfo(skb)->frags;
while (data_len > 0) {
*skb_frags = *frags;
- data_len -= frags->size;
+ data_len -= skb_frag_size(frags);
skb_frags++;
frags++;
skb_shinfo(skb)->nr_frags++;
}
skb_shinfo(skb)->frags[0].page_offset += hdr_len;
- skb_shinfo(skb)->frags[0].size -= hdr_len;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
skb->ip_summed = ip_summed;
skb->csum = sum;
@@ -433,7 +433,7 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
if (!lro_mgr->get_frag_header ||
lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
(void *)&tcph, &flags, priv)) {
- mac_hdr = page_address(frags->page) + frags->page_offset;
+ mac_hdr = skb_frag_address(frags);
goto out1;
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 3c8dfa16614..44d65d546e3 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -183,6 +183,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
tw->tw_daddr = inet->inet_daddr;
tw->tw_rcv_saddr = inet->inet_rcv_saddr;
tw->tw_bound_dev_if = sk->sk_bound_dev_if;
+ tw->tw_tos = inet->tos;
tw->tw_num = inet->inet_num;
tw->tw_state = TCP_TIME_WAIT;
tw->tw_substate = state;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 0e0ab98abc6..fdaabf2f2b6 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -599,8 +599,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i=0; i<skb_shinfo(head)->nr_frags; i++)
- plen += skb_shinfo(head)->frags[i].size;
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
@@ -682,6 +682,42 @@ int ip_defrag(struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_defrag);
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+ const struct iphdr *iph;
+ u32 len;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return skb;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return skb;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return skb;
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ return skb;
+ iph = ip_hdr(skb);
+ len = ntohs(iph->tot_len);
+ if (skb->len < len || len < (iph->ihl * 4))
+ return skb;
+
+ if (ip_is_fragment(ip_hdr(skb))) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb) {
+ if (pskb_trim_rcsum(skb, len))
+ return skb;
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (ip_defrag(skb, user))
+ return NULL;
+ skb->rxhash = 0;
+ }
+ }
+ return skb;
+}
+EXPORT_SYMBOL(ip_check_defrag);
+
#ifdef CONFIG_SYSCTL
static int zero;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7bb94c4834..d55110e9312 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -835,8 +835,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
- if (max_headroom > dev->needed_headroom)
- dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8c6563361ab..0bc95f3977d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -989,13 +989,13 @@ alloc_new_skb:
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
- if (page != frag->page) {
+ if (page != skb_frag_page(frag)) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
- get_page(page);
skb_fill_page_desc(skb, i, page, off, 0);
+ skb_frag_ref(skb, i);
frag = &skb_shinfo(skb)->frags[i];
}
} else if (i < MAX_SKB_FRAGS) {
@@ -1015,12 +1015,13 @@ alloc_new_skb:
err = -EMSGSIZE;
goto error;
}
- if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
+ if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
+ offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
cork->off += copy;
- frag->size += copy;
+ skb_frag_size_add(frag, copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
@@ -1229,7 +1230,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
if (len > size)
len = size;
if (skb_can_coalesce(skb, i, page, offset)) {
- skb_shinfo(skb)->frags[i-1].size += len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, len);
@@ -1465,7 +1466,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
* structure to pass arguments.
*/
void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- struct ip_reply_arg *arg, unsigned int len)
+ const struct ip_reply_arg *arg, unsigned int len)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_options_data replyopts;
@@ -1488,7 +1489,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
}
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
- RT_TOS(ip_hdr(skb)->tos),
+ RT_TOS(arg->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol,
ip_reply_arg_flowi_flags(arg),
daddr, rt->rt_spec_dst,
@@ -1505,7 +1506,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
- inet->tos = ip_hdr(skb)->tos;
+ inet->tos = arg->tos;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8905e92f896..09ff51bf16a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -33,6 +33,7 @@
#include <linux/netfilter.h>
#include <linux/route.h>
#include <linux/mroute.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
@@ -578,8 +579,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
- val &= ~3;
- val |= inet->tos & 3;
+ val &= ~INET_ECN_MASK;
+ val |= inet->tos & INET_ECN_MASK;
}
if (inet->tos != val) {
inet->tos = val;
@@ -961,7 +962,7 @@ mc_msf_out:
break;
case IP_TRANSPARENT:
- if (!capable(CAP_NET_ADMIN)) {
+ if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 378b20b7ca6..065effd8349 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
+ RCU_INIT_POINTER(*tp, t->next);
break;
}
}
@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
{
struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
+ RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+ RCU_INIT_POINTER(*tp, t);
}
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -301,7 +301,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
struct ipip_net *ipn = net_generic(net, ipip_net_id);
if (dev == ipn->fb_tunnel_dev)
- rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
+ RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
else
ipip_tunnel_unlink(ipn, netdev_priv(dev));
dev_put(dev);
@@ -791,7 +791,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
return -ENOMEM;
dev_hold(dev);
- rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
return 0;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 58e87915797..6164e982e0e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1176,7 +1176,7 @@ static void mrtsock_destruct(struct sock *sk)
ipmr_for_each_table(mrt, net) {
if (sk == rtnl_dereference(mrt->mroute_sk)) {
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
- rcu_assign_pointer(mrt->mroute_sk, NULL);
+ RCU_INIT_POINTER(mrt->mroute_sk, NULL);
mroute_clean_tables(mrt);
}
}
@@ -1203,7 +1203,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOENT;
if (optname != MRT_INIT) {
- if (sk != rcu_dereference_raw(mrt->mroute_sk) &&
+ if (sk != rcu_access_pointer(mrt->mroute_sk) &&
!capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -1224,13 +1224,13 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
- rcu_assign_pointer(mrt->mroute_sk, sk);
+ RCU_INIT_POINTER(mrt->mroute_sk, sk);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
}
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != rcu_dereference_raw(mrt->mroute_sk))
+ if (sk != rcu_access_pointer(mrt->mroute_sk))
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 703f366fd23..7b22382ff0e 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -70,14 +70,14 @@ static unsigned int help(struct sk_buff *skb,
static void __exit nf_nat_amanda_fini(void)
{
- rcu_assign_pointer(nf_nat_amanda_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_amanda_init(void)
{
BUG_ON(nf_nat_amanda_hook != NULL);
- rcu_assign_pointer(nf_nat_amanda_hook, help);
+ RCU_INIT_POINTER(nf_nat_amanda_hook, help);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 3346de5d94d..447bc5cfdc6 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -514,7 +514,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
ret = -EBUSY;
goto out;
}
- rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
+ RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
out:
spin_unlock_bh(&nf_nat_lock);
return ret;
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
{
spin_lock_bh(&nf_nat_lock);
- rcu_assign_pointer(nf_nat_protos[proto->protonum],
+ RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
&nf_nat_unknown_protocol);
spin_unlock_bh(&nf_nat_lock);
synchronize_rcu();
@@ -736,10 +736,10 @@ static int __init nf_nat_init(void)
/* Sew in builtin protocols. */
spin_lock_bh(&nf_nat_lock);
for (i = 0; i < MAX_IP_NAT_PROTO; i++)
- rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
- rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
- rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
- rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
+ RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
+ RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
+ RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
+ RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
spin_unlock_bh(&nf_nat_lock);
/* Initialize fake conntrack so that NAT will skip it */
@@ -748,12 +748,12 @@ static int __init nf_nat_init(void)
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
BUG_ON(nf_nat_seq_adjust_hook != NULL);
- rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
+ RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
- rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
+ RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
BUG_ON(nf_ct_nat_offset != NULL);
- rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
+ RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
return 0;
cleanup_extend:
@@ -766,9 +766,9 @@ static void __exit nf_nat_cleanup(void)
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_l3proto_put(l3proto);
nf_ct_extend_unregister(&nat_extend);
- rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
- rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
- rcu_assign_pointer(nf_ct_nat_offset, NULL);
+ RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
+ RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
+ RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
synchronize_net();
}
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index dc73abb3fe2..e462a957d08 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -113,14 +113,14 @@ out:
static void __exit nf_nat_ftp_fini(void)
{
- rcu_assign_pointer(nf_nat_ftp_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_ftp_init(void)
{
BUG_ON(nf_nat_ftp_hook != NULL);
- rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp);
+ RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 790f3160e01..b9a1136addb 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -581,30 +581,30 @@ static int __init init(void)
BUG_ON(nat_callforwarding_hook != NULL);
BUG_ON(nat_q931_hook != NULL);
- rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
- rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
- rcu_assign_pointer(set_sig_addr_hook, set_sig_addr);
- rcu_assign_pointer(set_ras_addr_hook, set_ras_addr);
- rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp);
- rcu_assign_pointer(nat_t120_hook, nat_t120);
- rcu_assign_pointer(nat_h245_hook, nat_h245);
- rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding);
- rcu_assign_pointer(nat_q931_hook, nat_q931);
+ RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
+ RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
+ RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
+ RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
+ RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
+ RCU_INIT_POINTER(nat_t120_hook, nat_t120);
+ RCU_INIT_POINTER(nat_h245_hook, nat_h245);
+ RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
+ RCU_INIT_POINTER(nat_q931_hook, nat_q931);
return 0;
}
/****************************************************************************/
static void __exit fini(void)
{
- rcu_assign_pointer(set_h245_addr_hook, NULL);
- rcu_assign_pointer(set_h225_addr_hook, NULL);
- rcu_assign_pointer(set_sig_addr_hook, NULL);
- rcu_assign_pointer(set_ras_addr_hook, NULL);
- rcu_assign_pointer(nat_rtp_rtcp_hook, NULL);
- rcu_assign_pointer(nat_t120_hook, NULL);
- rcu_assign_pointer(nat_h245_hook, NULL);
- rcu_assign_pointer(nat_callforwarding_hook, NULL);
- rcu_assign_pointer(nat_q931_hook, NULL);
+ RCU_INIT_POINTER(set_h245_addr_hook, NULL);
+ RCU_INIT_POINTER(set_h225_addr_hook, NULL);
+ RCU_INIT_POINTER(set_sig_addr_hook, NULL);
+ RCU_INIT_POINTER(set_ras_addr_hook, NULL);
+ RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
+ RCU_INIT_POINTER(nat_t120_hook, NULL);
+ RCU_INIT_POINTER(nat_h245_hook, NULL);
+ RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
+ RCU_INIT_POINTER(nat_q931_hook, NULL);
synchronize_rcu();
}
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 535e1a80235..979ae165f4e 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -75,14 +75,14 @@ static unsigned int help(struct sk_buff *skb,
static void __exit nf_nat_irc_fini(void)
{
- rcu_assign_pointer(nf_nat_irc_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_irc_init(void)
{
BUG_ON(nf_nat_irc_hook != NULL);
- rcu_assign_pointer(nf_nat_irc_hook, help);
+ RCU_INIT_POINTER(nf_nat_irc_hook, help);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 4c060038d29..3e8284ba46b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -282,25 +282,25 @@ static int __init nf_nat_helper_pptp_init(void)
nf_nat_need_gre();
BUG_ON(nf_nat_pptp_hook_outbound != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
BUG_ON(nf_nat_pptp_hook_inbound != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
return 0;
}
static void __exit nf_nat_helper_pptp_fini(void)
{
- rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
synchronize_rcu();
}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index e40cf7816fd..78844d9208f 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -528,13 +528,13 @@ err1:
static void __exit nf_nat_sip_fini(void)
{
- rcu_assign_pointer(nf_nat_sip_hook, NULL);
- rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
- rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_session_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_media_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
synchronize_rcu();
}
@@ -547,13 +547,13 @@ static int __init nf_nat_sip_init(void)
BUG_ON(nf_nat_sdp_port_hook != NULL);
BUG_ON(nf_nat_sdp_session_hook != NULL);
BUG_ON(nf_nat_sdp_media_hook != NULL);
- rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
- rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
- rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
- rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
- rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
- rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session);
- rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media);
+ RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
+ RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
+ RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
+ RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
+ RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
+ RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
+ RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 076b7c8c4aa..d1cb412c18e 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1310,7 +1310,7 @@ static int __init nf_nat_snmp_basic_init(void)
int ret = 0;
BUG_ON(nf_nat_snmp_hook != NULL);
- rcu_assign_pointer(nf_nat_snmp_hook, help);
+ RCU_INIT_POINTER(nf_nat_snmp_hook, help);
ret = nf_conntrack_helper_register(&snmp_trap_helper);
if (ret < 0) {
@@ -1322,7 +1322,7 @@ static int __init nf_nat_snmp_basic_init(void)
static void __exit nf_nat_snmp_basic_fini(void)
{
- rcu_assign_pointer(nf_nat_snmp_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index a6e606e8482..92900482ede 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -284,7 +284,7 @@ static int __init nf_nat_standalone_init(void)
#ifdef CONFIG_XFRM
BUG_ON(ip_nat_decode_session != NULL);
- rcu_assign_pointer(ip_nat_decode_session, nat_decode_session);
+ RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
#endif
ret = nf_nat_rule_init();
if (ret < 0) {
@@ -302,7 +302,7 @@ static int __init nf_nat_standalone_init(void)
nf_nat_rule_cleanup();
cleanup_decode_session:
#ifdef CONFIG_XFRM
- rcu_assign_pointer(ip_nat_decode_session, NULL);
+ RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
return ret;
@@ -313,7 +313,7 @@ static void __exit nf_nat_standalone_fini(void)
nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
nf_nat_rule_cleanup();
#ifdef CONFIG_XFRM
- rcu_assign_pointer(ip_nat_decode_session, NULL);
+ RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
/* Conntrack caches are unregistered in nf_conntrack_cleanup */
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 7274a43c7a1..a2901bf829c 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -36,14 +36,14 @@ static unsigned int help(struct sk_buff *skb,
static void __exit nf_nat_tftp_fini(void)
{
- rcu_assign_pointer(nf_nat_tftp_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_tftp_init(void)
{
BUG_ON(nf_nat_tftp_hook != NULL);
- rcu_assign_pointer(nf_nat_tftp_hook, help);
+ RCU_INIT_POINTER(nf_nat_tftp_hook, help);
return 0;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 075212e41b8..155138d8ec8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -120,7 +120,6 @@
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
-static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -324,7 +323,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
+ if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
continue;
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -350,7 +349,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
do {
if (--st->bucket < 0)
return NULL;
- } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
+ } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
}
@@ -761,7 +760,7 @@ static void rt_do_flush(struct net *net, int process_context)
if (process_context && need_resched())
cond_resched();
- rth = rcu_dereference_raw(rt_hash_table[i].chain);
+ rth = rcu_access_pointer(rt_hash_table[i].chain);
if (!rth)
continue;
@@ -1309,7 +1308,12 @@ static void rt_del(unsigned hash, struct rtable *rt)
void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
__be32 saddr, struct net_device *dev)
{
+ int s, i;
struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct rtable *rt;
+ __be32 skeys[2] = { saddr, 0 };
+ int ikeys[2] = { dev->ifindex, 0 };
+ struct flowi4 fl4;
struct inet_peer *peer;
struct net *net;
@@ -1332,13 +1336,34 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
goto reject_redirect;
}
- peer = inet_getpeer_v4(daddr, 1);
- if (peer) {
- peer->redirect_learned.a4 = new_gw;
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = daddr;
+ for (s = 0; s < 2; s++) {
+ for (i = 0; i < 2; i++) {
+ fl4.flowi4_oif = ikeys[i];
+ fl4.saddr = skeys[s];
+ rt = __ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ continue;
- inet_putpeer(peer);
+ if (rt->dst.error || rt->dst.dev != dev ||
+ rt->rt_gateway != old_gw) {
+ ip_rt_put(rt);
+ continue;
+ }
+
+ if (!rt->peer)
+ rt_bind_peer(rt, rt->rt_dst, 1);
+
+ peer = rt->peer;
+ if (peer) {
+ peer->redirect_learned.a4 = new_gw;
+ atomic_inc(&__rt_peer_genid);
+ }
- atomic_inc(&__rt_peer_genid);
+ ip_rt_put(rt);
+ return;
+ }
}
return;
@@ -1568,11 +1593,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
est_mtu = mtu;
peer->pmtu_learned = mtu;
peer->pmtu_expires = pmtu_expires;
+ atomic_inc(&__rt_peer_genid);
}
inet_putpeer(peer);
-
- atomic_inc(&__rt_peer_genid);
}
return est_mtu ? : new_mtu;
}
@@ -3121,13 +3145,6 @@ static ctl_table ipv4_route_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "gc_interval",
- .data = &ip_rt_gc_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3bc5c8f7c71..d7b89b12f6d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -265,7 +265,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_options_received tcp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febcacb72..34f5db1e1c8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -374,7 +374,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask;
struct sock *sk = sock->sk;
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_LISTEN)
@@ -524,11 +524,11 @@ EXPORT_SYMBOL(tcp_ioctl);
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
}
-static inline int forced_push(struct tcp_sock *tp)
+static inline int forced_push(const struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
@@ -540,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
- tcb->flags = TCPHDR_ACK;
+ tcb->tcp_flags = TCPHDR_ACK;
tcb->sacked = 0;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
@@ -813,7 +813,7 @@ new_segment:
goto wait_for_memory;
if (can_coalesce) {
- skb_shinfo(skb)->frags[i - 1].size += copy;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy);
@@ -830,7 +830,7 @@ new_segment:
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy;
poffset += copy;
@@ -891,9 +891,9 @@ EXPORT_SYMBOL(tcp_sendpage);
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-static inline int select_size(struct sock *sk, int sg)
+static inline int select_size(const struct sock *sk, int sg)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
@@ -1058,8 +1058,7 @@ new_segment:
/* Update the skb. */
if (merge) {
- skb_shinfo(skb)->frags[i - 1].size +=
- copy;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
@@ -1074,7 +1073,7 @@ new_segment:
}
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
@@ -1194,13 +1193,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
struct tcp_sock *tp = tcp_sk(sk);
int time_to_ack = 0;
-#if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
"cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
-#endif
if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2409,7 +2406,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
if (level != SOL_TCP)
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
@@ -2431,9 +2428,9 @@ EXPORT_SYMBOL(compat_tcp_setsockopt);
#endif
/* Return information about state of tcp endpoint in API format. */
-void tcp_get_info(struct sock *sk, struct tcp_info *info)
+void tcp_get_info(const struct sock *sk, struct tcp_info *info)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp;
@@ -2455,8 +2452,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
}
- if (tp->ecn_flags&TCP_ECN_OK)
+ if (tp->ecn_flags & TCP_ECN_OK)
info->tcpi_options |= TCPI_OPT_ECN;
+ if (tp->ecn_flags & TCP_ECN_SEEN)
+ info->tcpi_options |= TCPI_OPT_ECN_SEEN;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
@@ -2857,26 +2856,25 @@ EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
+static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
{
int cpu;
+
for_each_possible_cpu(cpu) {
- struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
- if (p) {
- if (p->md5_desc.tfm)
- crypto_free_hash(p->md5_desc.tfm);
- kfree(p);
- }
+ struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
+
+ if (p->md5_desc.tfm)
+ crypto_free_hash(p->md5_desc.tfm);
}
free_percpu(pool);
}
void tcp_free_md5sig_pool(void)
{
- struct tcp_md5sig_pool * __percpu *pool = NULL;
+ struct tcp_md5sig_pool __percpu *pool = NULL;
spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
@@ -2889,30 +2887,24 @@ void tcp_free_md5sig_pool(void)
}
EXPORT_SYMBOL(tcp_free_md5sig_pool);
-static struct tcp_md5sig_pool * __percpu *
+static struct tcp_md5sig_pool __percpu *
__tcp_alloc_md5sig_pool(struct sock *sk)
{
int cpu;
- struct tcp_md5sig_pool * __percpu *pool;
+ struct tcp_md5sig_pool __percpu *pool;
- pool = alloc_percpu(struct tcp_md5sig_pool *);
+ pool = alloc_percpu(struct tcp_md5sig_pool);
if (!pool)
return NULL;
for_each_possible_cpu(cpu) {
- struct tcp_md5sig_pool *p;
struct crypto_hash *hash;
- p = kzalloc(sizeof(*p), sk->sk_allocation);
- if (!p)
- goto out_free;
- *per_cpu_ptr(pool, cpu) = p;
-
hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (!hash || IS_ERR(hash))
goto out_free;
- p->md5_desc.tfm = hash;
+ per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
}
return pool;
out_free:
@@ -2920,9 +2912,9 @@ out_free:
return NULL;
}
-struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
+struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
- struct tcp_md5sig_pool * __percpu *pool;
+ struct tcp_md5sig_pool __percpu *pool;
int alloc = 0;
retry:
@@ -2941,7 +2933,7 @@ retry:
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
- struct tcp_md5sig_pool * __percpu *p;
+ struct tcp_md5sig_pool __percpu *p;
p = __tcp_alloc_md5sig_pool(sk);
spin_lock_bh(&tcp_md5sig_pool_lock);
@@ -2974,7 +2966,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
*/
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
- struct tcp_md5sig_pool * __percpu *p;
+ struct tcp_md5sig_pool __percpu *p;
local_bh_disable();
@@ -2985,7 +2977,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
spin_unlock(&tcp_md5sig_pool_lock);
if (p)
- return *this_cpu_ptr(p);
+ return this_cpu_ptr(p);
local_bh_enable();
return NULL;
@@ -3000,23 +2992,25 @@ void tcp_put_md5sig_pool(void)
EXPORT_SYMBOL(tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
- struct tcphdr *th)
+ const struct tcphdr *th)
{
struct scatterlist sg;
+ struct tcphdr hdr;
int err;
- __sum16 old_checksum = th->check;
- th->check = 0;
+ /* We are not allowed to change tcphdr, make a local copy */
+ memcpy(&hdr, th, sizeof(hdr));
+ hdr.check = 0;
+
/* options aren't included in the hash */
- sg_init_one(&sg, th, sizeof(struct tcphdr));
- err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
- th->check = old_checksum;
+ sg_init_one(&sg, &hdr, sizeof(hdr));
+ err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
return err;
}
EXPORT_SYMBOL(tcp_md5_hash_header);
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
- struct sk_buff *skb, unsigned header_len)
+ const struct sk_buff *skb, unsigned int header_len)
{
struct scatterlist sg;
const struct tcphdr *tp = tcp_hdr(skb);
@@ -3035,8 +3029,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
for (i = 0; i < shi->nr_frags; ++i) {
const struct skb_frag_struct *f = &shi->frags[i];
- sg_set_page(&sg, f->page, f->size, f->page_offset);
- if (crypto_hash_update(desc, &sg, f->size))
+ struct page *page = skb_frag_page(f);
+ sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+ if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
return 1;
}
@@ -3048,7 +3043,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
}
EXPORT_SYMBOL(tcp_md5_hash_skb_data);
-int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
{
struct scatterlist sg;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 21fab3edb92..52b5c2d0ecd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -206,7 +206,7 @@ static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
}
-static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
{
if (tcp_hdr(skb)->cwr)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
@@ -217,32 +217,41 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
-static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
{
- if (tp->ecn_flags & TCP_ECN_OK) {
- if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ if (!(tp->ecn_flags & TCP_ECN_OK))
+ return;
+
+ switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
- * it is surely retransmit. It is not in ECN RFC,
- * but Linux follows this rule. */
- else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
+ * and we already seen ECT on a previous segment,
+ * it is probably a retransmit.
+ */
+ if (tp->ecn_flags & TCP_ECN_SEEN)
tcp_enter_quickack_mode((struct sock *)tp);
+ break;
+ case INET_ECN_CE:
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ /* fallinto */
+ default:
+ tp->ecn_flags |= TCP_ECN_SEEN;
}
}
-static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
+static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
+static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
+static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
return 1;
@@ -256,14 +265,11 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
static void tcp_fixup_sndbuf(struct sock *sk)
{
- int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
- sizeof(struct sk_buff);
+ int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
- if (sk->sk_sndbuf < 3 * sndmem) {
- sk->sk_sndbuf = 3 * sndmem;
- if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
- sk->sk_sndbuf = sysctl_tcp_wmem[2];
- }
+ sndmem *= TCP_INIT_CWND;
+ if (sk->sk_sndbuf < sndmem)
+ sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -309,7 +315,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
return 0;
}
-static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
+static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -339,17 +345,24 @@ static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
static void tcp_fixup_rcvbuf(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
- int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
+ u32 mss = tcp_sk(sk)->advmss;
+ u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+ int rcvmem;
- /* Try to select rcvbuf so that 4 mss-sized segments
- * will fit to window and corresponding skbs will fit to our rcvbuf.
- * (was 3; 4 is minimum to allow fast retransmit to work.)
+ /* Limit to 10 segments if mss <= 1460,
+ * or 14600/mss segments, with a minimum of two segments.
*/
- while (tcp_win_from_space(rcvmem) < tp->advmss)
+ if (mss > 1460)
+ icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+
+ rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
+ while (tcp_win_from_space(rcvmem) < mss)
rcvmem += 128;
- if (sk->sk_rcvbuf < 4 * rcvmem)
- sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
+
+ rcvmem *= icwnd;
+
+ if (sk->sk_rcvbuf < rcvmem)
+ sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made immediately after connection enters
@@ -416,7 +429,7 @@ static void tcp_clamp_window(struct sock *sk)
*/
void tcp_initialize_rcv_mss(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
hint = min(hint, tp->rcv_wnd / 2);
@@ -531,8 +544,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
space /= tp->advmss;
if (!space)
space = 1;
- rcvmem = (tp->advmss + MAX_TCP_HEADER +
- 16 + sizeof(struct sk_buff));
+ rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
space *= rcvmem;
@@ -812,7 +824,7 @@ void tcp_update_metrics(struct sock *sk)
}
}
-__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -1204,7 +1216,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
tp->lost_retrans_low = new_low_seq;
}
-static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
+static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
{
@@ -1298,7 +1310,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
return in_sack;
}
-static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
+static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
struct tcp_sacktag_state *state,
int dup_sack, int pcount)
{
@@ -1389,9 +1401,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!pcount);
- /* Tweak before seqno plays */
- if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
- !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+ if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1440,7 +1450,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
}
- TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
+ TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
if (skb == tcp_highest_sack(sk))
tcp_advance_highest_sack(sk, skb);
@@ -1455,13 +1465,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
/* I wish gso_size would have a bit more sane initialization than
* something-or-zero which complicates things
*/
-static int tcp_skb_seglen(struct sk_buff *skb)
+static int tcp_skb_seglen(const struct sk_buff *skb)
{
return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
}
/* Shifting pages past head area doesn't work */
-static int skb_can_shift(struct sk_buff *skb)
+static int skb_can_shift(const struct sk_buff *skb)
{
return !skb_headlen(skb) && skb_is_nonlinear(skb);
}
@@ -1710,19 +1720,19 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
return skb;
}
-static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
+static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
{
return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
}
static int
-tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
+tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
u32 prior_snd_una)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- unsigned char *ptr = (skb_transport_header(ack_skb) +
- TCP_SKB_CB(ack_skb)->sacked);
+ const unsigned char *ptr = (skb_transport_header(ack_skb) +
+ TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache;
@@ -2286,7 +2296,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
return 0;
}
-static inline int tcp_fackets_out(struct tcp_sock *tp)
+static inline int tcp_fackets_out(const struct tcp_sock *tp)
{
return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
}
@@ -2306,19 +2316,20 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
* they differ. Since neither occurs due to loss, TCP should really
* ignore them.
*/
-static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
+static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
{
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
}
-static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_skb_timedout(const struct sock *sk,
+ const struct sk_buff *skb)
{
return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
}
-static inline int tcp_head_timedout(struct sock *sk)
+static inline int tcp_head_timedout(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
return tp->packets_out &&
tcp_skb_timedout(sk, tcp_write_queue_head(sk));
@@ -2629,7 +2640,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
-static inline int tcp_packet_delayed(struct tcp_sock *tp)
+static inline int tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2690,7 +2701,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static inline int tcp_may_undo(struct tcp_sock *tp)
+static inline int tcp_may_undo(const struct tcp_sock *tp)
{
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
@@ -2754,9 +2765,9 @@ static void tcp_try_undo_dsack(struct sock *sk)
* that successive retransmissions of a segment must not advance
* retrans_stamp under any conditions.
*/
-static int tcp_any_retrans_done(struct sock *sk)
+static int tcp_any_retrans_done(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (tp->retrans_out)
@@ -2830,9 +2841,13 @@ static int tcp_try_undo_loss(struct sock *sk)
static inline void tcp_complete_cwr(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- /* Do not moderate cwnd if it's already undone in cwr or recovery */
- if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
- tp->snd_cwnd = tp->snd_ssthresh;
+
+ /* Do not moderate cwnd if it's already undone in cwr or recovery. */
+ if (tp->undo_marker) {
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+ else /* PRR */
+ tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_time_stamp;
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2950,6 +2965,38 @@ void tcp_simple_retransmit(struct sock *sk)
}
EXPORT_SYMBOL(tcp_simple_retransmit);
+/* This function implements the PRR algorithm, specifcally the PRR-SSRB
+ * (proportional rate reduction with slow start reduction bound) as described in
+ * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
+ * It computes the number of packets to send (sndcnt) based on packets newly
+ * delivered:
+ * 1) If the packets in flight is larger than ssthresh, PRR spreads the
+ * cwnd reductions across a full RTT.
+ * 2) If packets in flight is lower than ssthresh (such as due to excess
+ * losses and/or application stalls), do not perform any further cwnd
+ * reductions, but instead slow start up to ssthresh.
+ */
+static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
+ int fast_rexmit, int flag)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int sndcnt = 0;
+ int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
+
+ if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+ u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
+ tp->prior_cwnd - 1;
+ sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
+ } else {
+ sndcnt = min_t(int, delta,
+ max_t(int, tp->prr_delivered - tp->prr_out,
+ newly_acked_sacked) + 1);
+ }
+
+ sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+ tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+}
+
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -2961,7 +3008,8 @@ EXPORT_SYMBOL(tcp_simple_retransmit);
* It does _not_ decide what to send, it is made in function
* tcp_xmit_retransmit_queue().
*/
-static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
+static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
+ int newly_acked_sacked, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -3111,13 +3159,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
tp->bytes_acked = 0;
tp->snd_cwnd_cnt = 0;
+ tp->prior_cwnd = tp->snd_cwnd;
+ tp->prr_delivered = 0;
+ tp->prr_out = 0;
tcp_set_ca_state(sk, TCP_CA_Recovery);
fast_rexmit = 1;
}
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
tcp_update_scoreboard(sk, fast_rexmit);
- tcp_cwnd_down(sk, flag);
+ tp->prr_delivered += newly_acked_sacked;
+ tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
tcp_xmit_retransmit_queue(sk);
}
@@ -3194,7 +3246,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*/
static void tcp_rearm_rto(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
@@ -3298,7 +3350,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* connection startup slow start one packet too
* quickly. This is severely frowned upon behavior.
*/
- if (!(scb->flags & TCPHDR_SYN)) {
+ if (!(scb->tcp_flags & TCPHDR_SYN)) {
flag |= FLAG_DATA_ACKED;
} else {
flag |= FLAG_SYN_ACKED;
@@ -3446,7 +3498,7 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
*/
-static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
+static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
u32 ack_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3622,7 +3674,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
}
/* This routine deals with incoming acks, but not outgoing ones. */
-static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
+static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -3632,6 +3684,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
u32 prior_in_flight;
u32 prior_fackets;
int prior_packets;
+ int prior_sacked = tp->sacked_out;
+ int newly_acked_sacked = 0;
int frto_cwnd = 0;
/* If the ack is older than previous acks
@@ -3703,6 +3757,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+ newly_acked_sacked = (prior_packets - prior_sacked) -
+ (tp->packets_out - tp->sacked_out);
+
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
/* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3715,7 +3772,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
- flag);
+ newly_acked_sacked, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3754,14 +3811,14 @@ old_ack:
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
-void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- u8 **hvpp, int estab)
+void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
+ const u8 **hvpp, int estab)
{
- unsigned char *ptr;
- struct tcphdr *th = tcp_hdr(skb);
+ const unsigned char *ptr;
+ const struct tcphdr *th = tcp_hdr(skb);
int length = (th->doff * 4) - sizeof(struct tcphdr);
- ptr = (unsigned char *)(th + 1);
+ ptr = (const unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
while (length > 0) {
@@ -3872,9 +3929,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
}
EXPORT_SYMBOL(tcp_parse_options);
-static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
+static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
{
- __be32 *ptr = (__be32 *)(th + 1);
+ const __be32 *ptr = (const __be32 *)(th + 1);
if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
@@ -3891,8 +3948,9 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
*/
-static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
- struct tcp_sock *tp, u8 **hvpp)
+static int tcp_fast_parse_options(const struct sk_buff *skb,
+ const struct tcphdr *th,
+ struct tcp_sock *tp, const u8 **hvpp)
{
/* In the spirit of fast parsing, compare doff directly to constant
* values. Because equality is used, short doff can be ignored here.
@@ -3913,10 +3971,10 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
/*
* Parse MD5 Signature option
*/
-u8 *tcp_parse_md5sig_option(struct tcphdr *th)
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
{
- int length = (th->doff << 2) - sizeof (*th);
- u8 *ptr = (u8*)(th + 1);
+ int length = (th->doff << 2) - sizeof(*th);
+ const u8 *ptr = (const u8 *)(th + 1);
/* If the TCP option is too short, we can short cut */
if (length < TCPOLEN_MD5SIG)
@@ -3993,8 +4051,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcphdr *th = tcp_hdr(skb);
u32 seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -4033,7 +4091,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
* (borrowed from freebsd)
*/
-static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
{
return !before(end_seq, tp->rcv_wup) &&
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4078,7 +4136,7 @@ static void tcp_reset(struct sock *sk)
*
* If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
*/
-static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
+static void tcp_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4190,7 +4248,7 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
}
-static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
+static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4349,7 +4407,7 @@ static void tcp_ofo_queue(struct sock *sk)
__skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tcp_hdr(skb)->fin)
- tcp_fin(skb, sk, tcp_hdr(skb));
+ tcp_fin(sk);
}
}
@@ -4377,7 +4435,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
int eaten = -1;
@@ -4431,7 +4489,7 @@ queue_and_out:
if (skb->len)
tcp_event_data_recv(sk, skb);
if (th->fin)
- tcp_fin(skb, sk, th);
+ tcp_fin(sk);
if (!skb_queue_empty(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
@@ -4861,9 +4919,9 @@ void tcp_cwnd_application_limited(struct sock *sk)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static int tcp_should_expand_sndbuf(struct sock *sk)
+static int tcp_should_expand_sndbuf(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* If the user specified a specific send buffer setting, do
* not modify it.
@@ -4897,8 +4955,10 @@ static void tcp_new_space(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_should_expand_sndbuf(sk)) {
- int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
- MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
+ int sndmem = SKB_TRUESIZE(max_t(u32,
+ tp->rx_opt.mss_clamp,
+ tp->mss_cache) +
+ MAX_TCP_HEADER);
int demanded = max_t(unsigned int, tp->snd_cwnd,
tp->reordering + 1);
sndmem *= 2 * demanded;
@@ -4970,7 +5030,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
* either form (or just set the sysctl tcp_stdurg).
*/
-static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
+static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 ptr = ntohs(th->urg_ptr);
@@ -5036,7 +5096,7 @@ static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
}
/* This is the 'fast' part of urgent handling. */
-static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
+static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5157,9 +5217,9 @@ out:
* play significant role here.
*/
static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, int syn_inerr)
+ const struct tcphdr *th, int syn_inerr)
{
- u8 *hash_location;
+ const u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
/* RFC1323: H1. Apply PAWS check first. */
@@ -5240,7 +5300,7 @@ discard:
* tcp_data_queue when everything is OK.
*/
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
int res;
@@ -5451,9 +5511,9 @@ discard:
EXPORT_SYMBOL(tcp_rcv_established);
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ const struct tcphdr *th, unsigned int len)
{
- u8 *hash_location;
+ const u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
@@ -5728,7 +5788,7 @@ reset_and_undo:
*/
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c34f0151394..0ea10eefa60 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
__be32 addr);
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
- __be32 daddr, __be32 saddr, struct tcphdr *th);
+ __be32 daddr, __be32 saddr, const struct tcphdr *th);
#else
static inline
struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
@@ -104,7 +104,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
-static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
+static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
{
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
@@ -552,7 +552,7 @@ static void __tcp_v4_send_check(struct sk_buff *skb,
/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
@@ -590,7 +590,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
@@ -652,6 +652,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
&arg, arg.iov[0].iov_len);
@@ -666,9 +667,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 ts, int oif,
struct tcp_md5sig_key *key,
- int reply_flags)
+ int reply_flags, u8 tos)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -726,7 +727,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
if (oif)
arg.bound_dev_if = oif;
-
+ arg.tos = tos;
ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
&arg, arg.iov[0].iov_len);
@@ -743,7 +744,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
- tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
+ tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
+ tw->tw_tos
);
inet_twsk_put(tw);
@@ -757,7 +759,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
req->ts_recent,
0,
tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
- inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
+ inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
+ ip_hdr(skb)->tos);
}
/*
@@ -927,18 +930,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
}
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
- if (tcp_alloc_md5sig_pool(sk) == NULL) {
+
+ md5sig = tp->md5sig_info;
+ if (md5sig->entries4 == 0 &&
+ tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
- md5sig = tp->md5sig_info;
if (md5sig->alloced4 == md5sig->entries4) {
keys = kmalloc((sizeof(*keys) *
(md5sig->entries4 + 1)), GFP_ATOMIC);
if (!keys) {
kfree(newkey);
- tcp_free_md5sig_pool();
+ if (md5sig->entries4 == 0)
+ tcp_free_md5sig_pool();
return -ENOMEM;
}
@@ -982,6 +988,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
kfree(tp->md5sig_info->keys4);
tp->md5sig_info->keys4 = NULL;
tp->md5sig_info->alloced4 = 0;
+ tcp_free_md5sig_pool();
} else if (tp->md5sig_info->entries4 != i) {
/* Need to do some manipulation */
memmove(&tp->md5sig_info->keys4[i],
@@ -989,7 +996,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
(tp->md5sig_info->entries4 - i) *
sizeof(struct tcp4_md5sig_key));
}
- tcp_free_md5sig_pool();
return 0;
}
}
@@ -1087,7 +1093,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
}
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
- __be32 daddr, __be32 saddr, struct tcphdr *th)
+ __be32 daddr, __be32 saddr, const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
@@ -1119,12 +1125,12 @@ clear_hash_noput:
}
int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb)
+ const struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
__be32 saddr, daddr;
if (sk) {
@@ -1169,7 +1175,7 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
-static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
+static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1179,10 +1185,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
- __u8 *hash_location = NULL;
+ const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct iphdr *iph = ip_hdr(skb);
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
int genhash;
unsigned char newhash[16];
@@ -1245,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct request_sock *req;
struct inet_request_sock *ireq;
struct tcp_sock *tp = tcp_sk(sk);
@@ -1585,7 +1591,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1602,7 +1608,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
- sock_rps_save_rxhash(nsk, skb->rxhash);
+ sock_rps_save_rxhash(nsk, skb);
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
goto reset;
@@ -1610,7 +1616,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
} else
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
@@ -1642,7 +1648,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
int tcp_v4_rcv(struct sk_buff *skb)
{
const struct iphdr *iph;
- struct tcphdr *th;
+ const struct tcphdr *th;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
@@ -1677,7 +1683,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = iph->tos;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1806,7 +1812,7 @@ EXPORT_SYMBOL(tcp_v4_get_peer);
void *tcp_v4_tw_get_peer(struct sock *sk)
{
- struct inet_timewait_sock *tw = inet_twsk(sk);
+ const struct inet_timewait_sock *tw = inet_twsk(sk);
return inet_getpeer_v4(tw->tw_daddr, 1);
}
@@ -2378,7 +2384,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
}
EXPORT_SYMBOL(tcp_proc_unregister);
-static void get_openreq4(struct sock *sk, struct request_sock *req,
+static void get_openreq4(const struct sock *sk, const struct request_sock *req,
struct seq_file *f, int i, int uid, int *len)
{
const struct inet_request_sock *ireq = inet_rsk(req);
@@ -2408,9 +2414,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
{
int timer_active;
unsigned long timer_expires;
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
@@ -2459,7 +2465,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
len);
}
-static void get_timewait4_sock(struct inet_timewait_sock *tw,
+static void get_timewait4_sock(const struct inet_timewait_sock *tw,
struct seq_file *f, int i, int *len)
{
__be32 dest, src;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d2fe4e06b47..85a2fbebd7e 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -141,7 +141,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
const struct tcphdr *th)
{
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
@@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
+ tw->tw_transparent = inet_sk(sk)->transparent;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -566,7 +567,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock **prev)
{
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 882e0b0964d..980b98f6288 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
/* Account for new data that has been sent to the network. */
-static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
+static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int prior_packets = tp->packets_out;
@@ -89,9 +89,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now:
*/
-static inline __u32 tcp_acceptable_seq(struct sock *sk)
+static inline __u32 tcp_acceptable_seq(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
if (!before(tcp_wnd_end(tp), tp->snd_nxt))
return tp->snd_nxt;
@@ -116,7 +116,7 @@ static inline __u32 tcp_acceptable_seq(struct sock *sk)
static __u16 tcp_advertise_mss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
if (dst) {
@@ -133,7 +133,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
* This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
+static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = tcp_time_stamp - tp->lsndtime;
@@ -154,7 +154,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
/* Congestion state accounting after a packet has been sent. */
static void tcp_event_data_sent(struct tcp_sock *tp,
- struct sk_buff *skb, struct sock *sk)
+ struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
@@ -295,11 +295,11 @@ static u16 tcp_select_window(struct sock *sk)
}
/* Packet ECN state for a SYN-ACK */
-static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
}
/* Packet ECN state for a SYN. */
@@ -309,13 +309,13 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
tp->ecn_flags = 0;
if (sysctl_tcp_ecn == 1) {
- TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
}
static __inline__ void
-TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
+TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
{
if (inet_rsk(req)->ecn_ok)
th->ece = 1;
@@ -356,7 +356,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- TCP_SKB_CB(skb)->flags = flags;
+ TCP_SKB_CB(skb)->tcp_flags = flags;
TCP_SKB_CB(skb)->sacked = 0;
skb_shinfo(skb)->gso_segs = 1;
@@ -565,7 +565,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
*/
static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5) {
+ struct tcp_md5sig_key **md5)
+{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
unsigned remaining = MAX_TCP_OPTION_SPACE;
@@ -743,7 +744,8 @@ static unsigned tcp_synack_options(struct sock *sk,
*/
static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5) {
+ struct tcp_md5sig_key **md5)
+{
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned size = 0;
@@ -826,7 +828,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
- if (unlikely(tcb->flags & TCPHDR_SYN))
+ if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -850,9 +852,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->seq = htonl(tcb->seq);
th->ack_seq = htonl(tp->rcv_nxt);
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
- tcb->flags);
+ tcb->tcp_flags);
- if (unlikely(tcb->flags & TCPHDR_SYN)) {
+ if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled.
*/
@@ -875,7 +877,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
tcp_options_write((__be32 *)(th + 1), tp, &opts);
- if (likely((tcb->flags & TCPHDR_SYN) == 0))
+ if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
@@ -889,11 +891,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb);
- if (likely(tcb->flags & TCPHDR_ACK))
+ if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size)
- tcp_event_data_sent(tp, skb, sk);
+ tcp_event_data_sent(tp, sk);
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
@@ -926,7 +928,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
}
/* Initialize TSO segments for a packet. */
-static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
+static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
if (skb->len <= mss_now || !sk_can_gso(sk) ||
@@ -947,7 +949,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
/* When a modification to fackets out becomes necessary, we need to check
* skb is counted to fackets_out or not.
*/
-static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
+static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -962,7 +964,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
/* Pcount in the middle of the write queue got changed, we need to do various
* tweaks to fix counters
*/
-static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
+static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1032,9 +1034,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
/* PSH and FIN should only be set in the second packet. */
- flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
- TCP_SKB_CB(buff)->flags = flags;
+ flags = TCP_SKB_CB(skb)->tcp_flags;
+ TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
+ TCP_SKB_CB(buff)->tcp_flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -1094,14 +1096,16 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
eat = len;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size <= eat) {
- put_page(skb_shinfo(skb)->frags[i].page);
- eat -= skb_shinfo(skb)->frags[i].size;
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (size <= eat) {
+ skb_frag_unref(skb, i);
+ eat -= size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
if (eat) {
skb_shinfo(skb)->frags[k].page_offset += eat;
- skb_shinfo(skb)->frags[k].size -= eat;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
eat = 0;
}
k++;
@@ -1144,10 +1148,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
}
/* Calculate MSS. Not accounting for SACKs here. */
-int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
/* Calculate base mss without TCP options:
@@ -1173,10 +1177,10 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
}
/* Inverse of above */
-int tcp_mss_to_mtu(struct sock *sk, int mss)
+int tcp_mss_to_mtu(const struct sock *sk, int mss)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
int mtu;
mtu = mss +
@@ -1250,8 +1254,8 @@ EXPORT_SYMBOL(tcp_sync_mss);
*/
unsigned int tcp_current_mss(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
u32 mss_now;
unsigned header_len;
struct tcp_out_options opts;
@@ -1311,10 +1315,10 @@ static void tcp_cwnd_validate(struct sock *sk)
* modulo only when the receiver window alone is the limiting factor or
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
-static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
unsigned int mss_now, unsigned int cwnd)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
u32 needed, window, cwnd_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -1334,13 +1338,14 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
/* Can at least one segment of SKB be sent right now, according to the
* congestion window rules? If so, return how many segments are allowed.
*/
-static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
- struct sk_buff *skb)
+static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
+ const struct sk_buff *skb)
{
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
- if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
+ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
+ tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
@@ -1355,7 +1360,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
-static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
+static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
@@ -1393,7 +1398,7 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
/* Return non-zero if the Nagle test allows this packet to be
* sent now.
*/
-static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
+static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
@@ -1409,7 +1414,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
- (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
+ (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
return 1;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1419,7 +1424,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
}
/* Does at least the first segment of SKB fit into the send window? */
-static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
+static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1434,10 +1439,10 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
* should be put on the wire right now. If so, it returns the number of
* packets allowed by the congestion window.
*/
-static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
unsigned int cwnd_quota;
tcp_init_tso_segs(sk, skb, cur_mss);
@@ -1455,7 +1460,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
/* Test if sending is allowed right now. */
int tcp_may_send_now(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
return skb &&
@@ -1497,9 +1502,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
/* PSH and FIN should only be set in the second packet. */
- flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
- TCP_SKB_CB(buff)->flags = flags;
+ flags = TCP_SKB_CB(skb)->tcp_flags;
+ TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
+ TCP_SKB_CB(buff)->tcp_flags = flags;
/* This packet was never sent out yet, so no SACK bits. */
TCP_SKB_CB(buff)->sacked = 0;
@@ -1530,7 +1535,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
u32 send_win, cong_win, limit, in_flight;
int win_divisor;
- if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1657,7 +1662,7 @@ static int tcp_mtu_probe(struct sock *sk)
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
- TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
+ TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0;
nskb->ip_summed = skb->ip_summed;
@@ -1677,11 +1682,11 @@ static int tcp_mtu_probe(struct sock *sk)
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
* Throw it away. */
- TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
+ TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
} else {
- TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
+ TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
@@ -1796,11 +1801,13 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
tcp_event_new_data_sent(sk, skb);
tcp_minshall_update(tp, mss_now, skb);
- sent_pkts++;
+ sent_pkts += tcp_skb_pcount(skb);
if (push_one)
break;
}
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
+ tp->prr_out += sent_pkts;
if (likely(sent_pkts)) {
tcp_cwnd_validate(sk);
@@ -1985,7 +1992,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
/* Merge over control information. This moves PSH/FIN etc. over */
- TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
+ TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
/* All done, get rid of second SKB and account for it so
* packet counting does not break.
@@ -2003,7 +2010,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
}
/* Check if coalescing SKBs is legal. */
-static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
+static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
return 0;
@@ -2033,7 +2040,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
if (!sysctl_tcp_retrans_collapse)
return;
- if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
return;
tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2125,12 +2132,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* since it is cheap to do so and saves bytes on the network.
*/
if (skb->len > 0 &&
- (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
+ (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */
tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
- TCP_SKB_CB(skb)->flags);
+ TCP_SKB_CB(skb)->tcp_flags);
skb->ip_summed = CHECKSUM_NONE;
}
}
@@ -2179,7 +2186,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
static int tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* Forward retransmissions are possible only during Recovery. */
if (icsk->icsk_ca_state != TCP_CA_Recovery)
@@ -2294,6 +2301,9 @@ begin_fwd:
return;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
+ tp->prr_out += tcp_skb_pcount(skb);
+
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
@@ -2317,7 +2327,7 @@ void tcp_send_fin(struct sock *sk)
mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
} else {
@@ -2379,11 +2389,11 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
- if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
+ if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
}
- if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
+ if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
@@ -2397,7 +2407,7 @@ int tcp_send_synack(struct sock *sk)
skb = nskb;
}
- TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2542,7 +2552,7 @@ EXPORT_SYMBOL(tcp_make_synack);
/* Do all connect socket setups that can be done AF independent. */
static void tcp_connect_init(struct sock *sk)
{
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
@@ -2794,13 +2804,13 @@ int tcp_write_wakeup(struct sock *sk)
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
seg_size = min(seg_size, mss);
- TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss))
return -1;
} else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss);
- TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ecd44b0c45f..2e0f0af76c1 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -334,7 +334,6 @@ void tcp_retransmit_timer(struct sock *sk)
* connection. If the socket is an orphan, time it out,
* we cannot allow such beasts to hang infinitely.
*/
-#ifdef TCP_DEBUG
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
@@ -349,7 +348,6 @@ void tcp_retransmit_timer(struct sock *sk)
inet->inet_num, tp->snd_una, tp->snd_nxt);
}
#endif
-#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b5a19340a9..ebaa96bd346 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1267,7 +1267,7 @@ int udp_disconnect(struct sock *sk, int flags)
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
- sock_rps_save_rxhash(sk, 0);
+ sock_rps_reset_rxhash(sk);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
@@ -1355,7 +1355,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
int rc;
if (inet_sk(sk)->inet_daddr)
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
rc = ip_queue_rcv_skb(sk, skb);
if (rc < 0) {
@@ -1461,10 +1461,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
- if (rcu_dereference_raw(sk->sk_filter)) {
- if (udp_lib_checksum_complete(skb))
- goto drop;
- }
+ if (rcu_access_pointer(sk->sk_filter) &&
+ udp_lib_checksum_complete(skb))
+ goto drop;
if (sk_rcvqueues_full(sk, skb))
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fc5368ad2b0..a0b4c5da8d4 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
- rt->rt_key_dst = fl4->daddr;
- rt->rt_key_src = fl4->saddr;
- rt->rt_key_tos = fl4->flowi4_tos;
- rt->rt_route_iif = fl4->flowi4_iif;
- rt->rt_iif = fl4->flowi4_iif;
- rt->rt_oif = fl4->flowi4_oif;
- rt->rt_mark = fl4->flowi4_mark;
+ xdst->u.rt.rt_key_dst = fl4->daddr;
+ xdst->u.rt.rt_key_src = fl4->saddr;
+ xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
+ xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
+ xdst->u.rt.rt_iif = fl4->flowi4_iif;
+ xdst->u.rt.rt_oif = fl4->flowi4_oif;
+ xdst->u.rt.rt_mark = fl4->flowi4_mark;
xdst->u.dst.dev = dev;
dev_hold(dev);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12368c58606..e39239e6426 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -428,7 +428,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
ndev->tstamp = jiffies;
addrconf_sysctl_register(ndev);
/* protected by rtnl_lock */
- rcu_assign_pointer(dev->ip6_ptr, ndev);
+ RCU_INIT_POINTER(dev->ip6_ptr, ndev);
/* Join all-node multicast group */
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
@@ -824,12 +824,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
- unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
+ unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
int max_addresses;
u32 addr_flags;
+ unsigned long now = jiffies;
write_lock(&idev->lock);
if (ift) {
@@ -874,7 +875,7 @@ retry:
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
- age = (jiffies - ifp->tstamp) / HZ;
+ age = (now - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
@@ -884,7 +885,6 @@ retry:
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
- tmp_cstamp = ifp->cstamp;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
@@ -929,7 +929,7 @@ retry:
ift->ifpub = ifp;
ift->valid_lft = tmp_valid_lft;
ift->prefered_lft = tmp_prefered_lft;
- ift->cstamp = tmp_cstamp;
+ ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
@@ -1999,25 +1999,50 @@ ok:
#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
- list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
- /*
- * When adjusting the lifetimes of an existing
- * temporary address, only lower the lifetimes.
- * Implementations must not increase the
- * lifetimes of an existing temporary address
- * when processing a Prefix Information Option.
- */
+ list_for_each_entry(ift, &in6_dev->tempaddr_list,
+ tmp_list) {
+ int age, max_valid, max_prefered;
+
if (ifp != ift->ifpub)
continue;
+ /*
+ * RFC 4941 section 3.3:
+ * If a received option will extend the lifetime
+ * of a public address, the lifetimes of
+ * temporary addresses should be extended,
+ * subject to the overall constraint that no
+ * temporary addresses should ever remain
+ * "valid" or "preferred" for a time longer than
+ * (TEMP_VALID_LIFETIME) or
+ * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
+ * respectively.
+ */
+ age = (now - ift->cstamp) / HZ;
+ max_valid = in6_dev->cnf.temp_valid_lft - age;
+ if (max_valid < 0)
+ max_valid = 0;
+
+ max_prefered = in6_dev->cnf.temp_prefered_lft -
+ in6_dev->cnf.max_desync_factor -
+ age;
+ if (max_prefered < 0)
+ max_prefered = 0;
+
+ if (valid_lft > max_valid)
+ valid_lft = max_valid;
+
+ if (prefered_lft > max_prefered)
+ prefered_lft = max_prefered;
+
spin_lock(&ift->lock);
flags = ift->flags;
- if (ift->valid_lft > valid_lft &&
- ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ)
- ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ;
- if (ift->prefered_lft > prefered_lft &&
- ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ)
- ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ;
+ ift->valid_lft = valid_lft;
+ ift->prefered_lft = prefered_lft;
+ ift->tstamp = now;
+ if (prefered_lft > 0)
+ ift->flags &= ~IFA_F_DEPRECATED;
+
spin_unlock(&ift->lock);
if (!(flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ift);
@@ -2025,9 +2050,11 @@ ok:
if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
- * When a new public address is created as described in [ADDRCONF],
- * also create a new temporary address. Also create a temporary
- * address if it's enabled but no temporary address currently exists.
+ * When a new public address is created as
+ * described in [ADDRCONF], also create a new
+ * temporary address. Also create a temporary
+ * address if it's enabled but no temporary
+ * address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
@@ -2706,7 +2733,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
idev->dead = 1;
/* protected by rtnl_lock */
- rcu_assign_pointer(dev->ip6_ptr, NULL);
+ RCU_INIT_POINTER(dev->ip6_ptr, NULL);
/* Step 1.5: remove snmp6 entry */
snmp6_unregister_dev(idev);
@@ -2969,12 +2996,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
ipv6_ifa_notify(RTM_NEWADDR, ifp);
- /* If added prefix is link local and forwarding is off,
- start sending router solicitations.
+ /* If added prefix is link local and we are prepared to process
+ router advertisements, start sending router solicitations.
*/
- if ((ifp->idev->cnf.forwarding == 0 ||
- ifp->idev->cnf.forwarding == 2) &&
+ if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) ||
+ ifp->idev->cnf.accept_ra == 2) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3b5669a2582..d27c797f9f0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_reset_transport_header(skb);
__skb_push(skb, skb_gro_offset(skb));
+ ops = rcu_dereference(inet6_protos[proto]);
if (!ops || !ops->gro_receive)
goto out_unlock;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b46e9f88ce3..e2480691c22 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -297,10 +297,6 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
ipv6_addr_copy(&iph->daddr, &fl6->daddr);
mtu_info = IP6CBMTU(skb);
- if (!mtu_info) {
- kfree_skb(skb);
- return;
- }
mtu_info->ip6m_mtu = mtu;
mtu_info->ip6m_addr.sin6_family = AF_INET6;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 79a485e8a70..1318de4c3e8 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -273,12 +273,12 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
__u16 dstbuf;
#endif
- struct dst_entry *dst;
+ struct dst_entry *dst = skb_dst(skb);
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) {
- IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+ IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
@@ -289,9 +289,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
dstbuf = opt->dst1;
#endif
- dst = dst_clone(skb_dst(skb));
if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
- dst_release(dst);
skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -304,7 +302,6 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
- dst_release(dst);
return -1;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 11900417b1c..90868fb4275 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -490,7 +490,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
goto out_dst_release;
}
- idev = in6_dev_get(skb->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
@@ -500,19 +501,16 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
- goto out_put;
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+ len + sizeof(struct icmp6hdr));
}
- err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr));
-
-out_put:
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
}
-
EXPORT_SYMBOL(icmpv6_send);
static void icmpv6_echo_reply(struct sk_buff *skb)
@@ -569,7 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- idev = in6_dev_get(skb->dev);
+ idev = __in6_dev_get(skb->dev);
msg.skb = skb;
msg.offset = 0;
@@ -583,13 +581,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
- goto out_put;
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+ skb->len + sizeof(struct icmp6hdr));
}
- err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
-
-out_put:
- if (likely(idev != NULL))
- in6_dev_put(idev);
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
@@ -840,8 +835,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
*/
- sk->sk_sndbuf =
- (2 * ((64 * 1024) + sizeof(struct sk_buff)));
+ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
}
return 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a58e8cf664..2916200f90c 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -211,6 +211,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
struct flowi6 fl6;
struct dst_entry *dst;
struct in6_addr *final_p, final;
+ int res;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
@@ -241,12 +242,14 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
__inet6_csk_dst_store(sk, dst, NULL, NULL);
}
- skb_dst_set(skb, dst_clone(dst));
+ rcu_read_lock();
+ skb_dst_set_noref(skb, dst);
/* Restore final destination back after routing done */
ipv6_addr_copy(&fl6.daddr, &np->daddr);
- return ip6_xmit(sk, skb, &fl6, np->opt);
+ res = ip6_xmit(sk, skb, &fl6, np->opt);
+ rcu_read_unlock();
+ return res;
}
-
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 320d91d20ad..93718f3db79 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -28,10 +28,6 @@
#include <linux/list.h>
#include <linux/slab.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
-
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4c882cf4e8a..1c9bf8b5c30 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1193,6 +1193,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct sk_buff *skb;
unsigned int maxfraglen, fragheaderlen;
int exthdrlen;
+ int dst_exthdrlen;
int hh_len;
int mtu;
int copy;
@@ -1248,7 +1249,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ rt->dst.dev->mtu : dst_mtu(&rt->dst);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
@@ -1259,16 +1260,17 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
cork->length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
- rt->rt6i_nfheader_len;
+ exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
length += exthdrlen;
transhdrlen += exthdrlen;
+ dst_exthdrlen = rt->dst.header_len;
} else {
rt = (struct rt6_info *)cork->dst;
fl6 = &inet->cork.fl.u.ip6;
opt = np->cork.opt;
transhdrlen = 0;
exthdrlen = 0;
+ dst_exthdrlen = 0;
mtu = cork->fragsize;
}
@@ -1368,6 +1370,8 @@ alloc_new_skb:
else
alloclen = datalen + fragheaderlen;
+ alloclen += dst_exthdrlen;
+
/*
* The last fragment gets additional space at tail.
* Note: we overallocate on fragments with MSG_MODE
@@ -1419,9 +1423,9 @@ alloc_new_skb:
/*
* Find where to start putting bytes
*/
- data = skb_put(skb, fraglen);
- skb_set_network_header(skb, exthdrlen);
- data += fragheaderlen;
+ data = skb_put(skb, fraglen + dst_exthdrlen);
+ skb_set_network_header(skb, exthdrlen + dst_exthdrlen);
+ data += fragheaderlen + dst_exthdrlen;
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
@@ -1434,6 +1438,7 @@ alloc_new_skb:
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
+
if (copy < 0) {
err = -EINVAL;
kfree_skb(skb);
@@ -1448,6 +1453,7 @@ alloc_new_skb:
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
+ dst_exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
@@ -1480,13 +1486,13 @@ alloc_new_skb:
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
- if (page != frag->page) {
+ if (page != skb_frag_page(frag)) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
- get_page(page);
skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ skb_frag_ref(skb, i);
frag = &skb_shinfo(skb)->frags[i];
}
} else if(i < MAX_SKB_FRAGS) {
@@ -1506,12 +1512,14 @@ alloc_new_skb:
err = -EMSGSIZE;
goto error;
}
- if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
+ if (getfrag(from,
+ skb_frag_address(frag) + skb_frag_size(frag),
+ offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
sk->sk_sndmsg_off += copy;
- frag->size += copy;
+ skb_frag_size_add(frag, copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0bc98886c38..bdc15c9003d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
- rcu_assign_pointer(t->next , rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
+ RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
+ RCU_INIT_POINTER(*tp, t);
}
/**
@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
+ RCU_INIT_POINTER(*tp, t->next);
break;
}
}
@@ -350,7 +350,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
- rcu_assign_pointer(ip6n->tnls_wc[0], NULL);
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
else
ip6_tnl_unlink(ip6n, t);
ip6_tnl_dst_reset(t);
@@ -889,7 +889,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ipv6_tel_txoption opt;
- struct dst_entry *dst;
+ struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
int mtu;
unsigned int max_headroom = sizeof(struct ipv6hdr);
@@ -897,19 +897,20 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
int err = -1;
int pkt_len;
- if ((dst = ip6_tnl_dst_check(t)) != NULL)
- dst_hold(dst);
- else {
- dst = ip6_route_output(net, NULL, fl6);
+ if (!fl6->flowi6_mark)
+ dst = ip6_tnl_dst_check(t);
+ if (!dst) {
+ ndst = ip6_route_output(net, NULL, fl6);
- if (dst->error)
+ if (ndst->error)
goto tx_err_link_failure;
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
- if (IS_ERR(dst)) {
- err = PTR_ERR(dst);
- dst = NULL;
+ ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
+ if (IS_ERR(ndst)) {
+ err = PTR_ERR(ndst);
+ ndst = NULL;
goto tx_err_link_failure;
}
+ dst = ndst;
}
tdev = dst->dev;
@@ -955,8 +956,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb = new_skb;
}
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(dst));
-
+ if (fl6->flowi6_mark) {
+ skb_dst_set(skb, dst);
+ ndst = NULL;
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
skb->transport_header = skb->network_header;
proto = fl6->flowi6_proto;
@@ -987,13 +992,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
stats->tx_errors++;
stats->tx_aborted_errors++;
}
- ip6_tnl_dst_store(t, dst);
+ if (ndst)
+ ip6_tnl_dst_store(t, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
- dst_release(dst);
+ dst_release(ndst);
return err;
}
@@ -1020,9 +1026,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
dsfield = ipv4_get_dsfield(iph);
- if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
& IPV6_TCLASS_MASK;
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
@@ -1069,10 +1077,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
- if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
- if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
@@ -1439,7 +1449,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
- rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
return 0;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2fbda5fc4cc..c99e3ee9781 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,7 +343,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_TRANSPARENT:
- if (!capable(CAP_NET_ADMIN)) {
+ if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) {
retv = -EPERM;
break;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9da6e02eaae..44e5b7f2a6c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -370,17 +370,14 @@ static int ndisc_constructor(struct neighbour *neigh)
struct neigh_parms *parms;
int is_multicast = ipv6_addr_is_multicast(addr);
- rcu_read_lock();
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
- rcu_read_unlock();
return -EINVAL;
}
parms = in6_dev->nd_parms;
__neigh_parms_put(neigh->parms);
neigh->parms = neigh_parms_clone(parms);
- rcu_read_unlock();
neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST;
if (!dev->header_ops) {
@@ -533,7 +530,8 @@ void ndisc_send_skb(struct sk_buff *skb,
skb_dst_set(skb, dst);
- idev = in6_dev_get(dst->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
@@ -543,8 +541,7 @@ void ndisc_send_skb(struct sk_buff *skb,
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
}
EXPORT_SYMBOL(ndisc_send_skb);
@@ -1039,7 +1036,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
if (skb->len < sizeof(*rs_msg))
return;
- idev = in6_dev_get(skb->dev);
+ idev = __in6_dev_get(skb->dev);
if (!idev) {
if (net_ratelimit())
ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
@@ -1080,7 +1077,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
neigh_release(neigh);
}
out:
- in6_dev_put(idev);
+ return;
}
static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
@@ -1179,7 +1176,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
* set the RA_RECV flag in the interface
*/
- in6_dev = in6_dev_get(skb->dev);
+ in6_dev = __in6_dev_get(skb->dev);
if (in6_dev == NULL) {
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: can't find inet6 device for %s.\n",
@@ -1188,7 +1185,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (!ndisc_parse_options(opt, optlen, &ndopts)) {
- in6_dev_put(in6_dev);
ND_PRINTK2(KERN_WARNING
"ICMP6 RA: invalid ND options\n");
return;
@@ -1225,6 +1221,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (!in6_dev->cnf.accept_ra_defrtr)
goto skip_defrtr;
+ if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+ goto skip_defrtr;
+
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -1255,7 +1254,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: %s() failed to add default route.\n",
__func__);
- in6_dev_put(in6_dev);
return;
}
@@ -1265,7 +1263,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
"ICMPv6 RA: %s() got default router without neighbour.\n",
__func__);
dst_release(&rt->dst);
- in6_dev_put(in6_dev);
return;
}
neigh->flags |= NTF_ROUTER;
@@ -1349,6 +1346,9 @@ skip_linkparms:
goto out;
#ifdef CONFIG_IPV6_ROUTE_INFO
+ if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+ goto skip_routeinfo;
+
if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
@@ -1366,6 +1366,8 @@ skip_linkparms:
&ipv6_hdr(skb)->saddr);
}
}
+
+skip_routeinfo:
#endif
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1422,7 +1424,6 @@ out:
dst_release(&rt->dst);
else if (neigh)
neigh_release(neigh);
- in6_dev_put(in6_dev);
}
static void ndisc_redirect_rcv(struct sk_buff *skb)
@@ -1481,13 +1482,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
return;
}
- in6_dev = in6_dev_get(skb->dev);
+ in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) {
- in6_dev_put(in6_dev);
+ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
return;
- }
/* RFC2461 8.1:
* The IP source address of the Redirect MUST be the same as the current
@@ -1497,7 +1496,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
ND_PRINTK2(KERN_WARNING
"ICMPv6 Redirect: invalid ND options\n");
- in6_dev_put(in6_dev);
return;
}
if (ndopts.nd_opts_tgt_lladdr) {
@@ -1506,7 +1504,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!lladdr) {
ND_PRINTK2(KERN_WARNING
"ICMPv6 Redirect: invalid link-layer address length\n");
- in6_dev_put(in6_dev);
return;
}
}
@@ -1518,7 +1515,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
on_link);
neigh_release(neigh);
}
- in6_dev_put(in6_dev);
}
void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
@@ -1651,7 +1647,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
csum_partial(icmph, len, 0));
skb_dst_set(buff, dst);
- idev = in6_dev_get(dst->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
dst_output);
@@ -1660,8 +1657,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
return;
release:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 08572726381..e8762c73b17 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -378,8 +378,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i=0; i<skb_shinfo(head)->nr_frags; i++)
- plen += skb_shinfo(head)->frags[i].size;
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 343852e5c70..6f7824e1cea 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -130,14 +130,14 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
int rawv6_mh_filter_register(mh_filter_t filter)
{
- rcu_assign_pointer(mh_filter, filter);
+ RCU_INIT_POINTER(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
- rcu_assign_pointer(mh_filter, NULL);
+ RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
}
@@ -372,9 +372,9 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
read_unlock(&raw_v6_hashinfo.lock);
}
-static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
+static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
+ if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
@@ -542,8 +542,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
goto out;
offset = rp->offset;
- total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) -
- skb->data);
+ total_len = inet_sk(sk)->cork.base.length;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 7b954e2539d..cc22099ac8b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -464,8 +464,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i=0; i<skb_shinfo(head)->nr_frags; i++)
- plen += skb_shinfo(head)->frags[i].size;
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 00b15ac7a70..a7a18602a04 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
+ RCU_INIT_POINTER(*tp, t->next);
break;
}
}
@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
{
struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
+ RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+ RCU_INIT_POINTER(*tp, t);
}
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -391,7 +391,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
p->addr = a->addr;
p->flags = a->flags;
t->prl_count++;
- rcu_assign_pointer(t->prl, p);
+ RCU_INIT_POINTER(t->prl, p);
out:
return err;
}
@@ -474,7 +474,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
struct sit_net *sitn = net_generic(net, sit_net_id);
if (dev == sitn->fb_tunnel_dev) {
- rcu_assign_pointer(sitn->tunnels_wc[0], NULL);
+ RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
} else {
ipip6_tunnel_unlink(sitn, netdev_priv(dev));
ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
@@ -1176,7 +1176,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
dev_hold(dev);
- rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+ RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
return 0;
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index ac838965ff3..5a0d6648bbb 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -115,7 +115,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
& COOKIEMASK;
}
-__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
jiffies / (HZ * 60), mssind);
}
-static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
+static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
@@ -152,7 +152,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tcp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct inet_request_sock *ireq;
struct inet6_request_sock *ireq6;
struct tcp_request_sock *treq;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 79cc6469508..c8683fcc487 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -114,7 +114,7 @@ static __inline__ __sum16 tcp_v6_check(int len,
return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
}
-static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
+static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
@@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
}
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
- if (tcp_alloc_md5sig_pool(sk) == NULL) {
+ if (tp->md5sig_info->entries6 == 0 &&
+ tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
@@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
if (!keys) {
- tcp_free_md5sig_pool();
kfree(newkey);
+ if (tp->md5sig_info->entries6 == 0)
+ tcp_free_md5sig_pool();
return -ENOMEM;
}
@@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
kfree(tp->md5sig_info->keys6);
tp->md5sig_info->keys6 = NULL;
tp->md5sig_info->alloced6 = 0;
+ tcp_free_md5sig_pool();
} else {
/* shrink the database */
if (tp->md5sig_info->entries6 != i)
@@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
(tp->md5sig_info->entries6 - i)
* sizeof (tp->md5sig_info->keys6[0]));
}
- tcp_free_md5sig_pool();
return 0;
}
}
@@ -759,7 +761,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
const struct in6_addr *daddr, struct in6_addr *saddr,
- struct tcphdr *th)
+ const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
@@ -791,13 +793,14 @@ clear_hash_noput:
}
static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb)
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb)
{
const struct in6_addr *saddr, *daddr;
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
if (sk) {
saddr = &inet6_sk(sk)->saddr;
@@ -840,12 +843,12 @@ clear_hash_noput:
return 1;
}
-static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
+static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
- __u8 *hash_location = NULL;
+ const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
int genhash;
u8 newhash[16];
@@ -978,7 +981,8 @@ static int tcp6_gro_complete(struct sk_buff *skb)
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
u32 ts, struct tcp_md5sig_key *key, int rst)
{
- struct tcphdr *th = tcp_hdr(skb), *t1;
+ const struct tcphdr *th = tcp_hdr(skb);
+ struct tcphdr *t1;
struct sk_buff *buff;
struct flowi6 fl6;
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -1068,7 +1072,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
u32 seq = 0, ack_seq = 0;
struct tcp_md5sig_key *key = NULL;
@@ -1158,7 +1162,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct request_sock *req;
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1606,7 +1610,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1628,7 +1632,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
* the new socket..
*/
if(nsk != sk) {
- sock_rps_save_rxhash(nsk, skb->rxhash);
+ sock_rps_save_rxhash(nsk, skb);
if (tcp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb)
@@ -1636,7 +1640,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
} else
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
@@ -1686,7 +1690,7 @@ ipv6_pktoptions:
static int tcp_v6_rcv(struct sk_buff *skb)
{
- struct tcphdr *th;
+ const struct tcphdr *th;
const struct ipv6hdr *hdr;
struct sock *sk;
int ret;
@@ -1720,7 +1724,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
+ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1854,8 +1858,8 @@ static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
static void *tcp_v6_tw_get_peer(struct sock *sk)
{
- struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
- struct inet_timewait_sock *tw = inet_twsk(sk);
+ const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
+ const struct inet_timewait_sock *tw = inet_twsk(sk);
if (tw->tw_family == AF_INET)
return tcp_v4_tw_get_peer(sk);
@@ -2010,7 +2014,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
- struct sock *sk, struct request_sock *req, int i, int uid)
+ const struct sock *sk, struct request_sock *req, int i, int uid)
{
int ttd = req->expires - jiffies;
const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -2046,10 +2050,10 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
__u16 destp, srcp;
int timer_active;
unsigned long timer_expires;
- struct inet_sock *inet = inet_sk(sp);
- struct tcp_sock *tp = tcp_sk(sp);
+ const struct inet_sock *inet = inet_sk(sp);
+ const struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
- struct ipv6_pinfo *np = inet6_sk(sp);
+ const struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr;
src = &np->rcv_saddr;
@@ -2101,7 +2105,7 @@ static void get_timewait6_sock(struct seq_file *seq,
{
const struct in6_addr *dest, *src;
__u16 destp, srcp;
- struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
+ const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bb95e8e1c6f..f4ca0a5b345 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -509,7 +509,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
int is_udplite = IS_UDPLITE(sk);
if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
@@ -533,7 +533,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
}
}
- if (rcu_dereference_raw(sk->sk_filter)) {
+ if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 49a91c5f562..faae41737fc 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -28,6 +28,43 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
EXPORT_SYMBOL(xfrm6_find_1stfragopt);
+static int xfrm6_local_dontfrag(struct sk_buff *skb)
+{
+ int proto;
+ struct sock *sk = skb->sk;
+
+ if (sk) {
+ proto = sk->sk_protocol;
+
+ if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
+ return inet6_sk(sk)->dontfrag;
+ }
+
+ return 0;
+}
+
+static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
+{
+ struct flowi6 fl6;
+ struct sock *sk = skb->sk;
+
+ fl6.flowi6_oif = sk->sk_bound_dev_if;
+ ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+
+ ipv6_local_rxpmtu(sk, &fl6, mtu);
+}
+
+static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
+{
+ struct flowi6 fl6;
+ struct sock *sk = skb->sk;
+
+ fl6.fl6_dport = inet_sk(sk)->inet_dport;
+ ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+
+ ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
+}
+
static int xfrm6_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
@@ -39,7 +76,13 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (!skb->local_df && skb->len > mtu) {
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+
+ if (xfrm6_local_dontfrag(skb))
+ xfrm6_local_rxpmtu(skb, mtu);
+ else if (skb->sk)
+ xfrm6_local_error(skb, mtu);
+ else
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
}
@@ -93,9 +136,18 @@ static int __xfrm6_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
+ int mtu = ip6_skb_dst_mtu(skb);
+
+ if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+ xfrm6_local_rxpmtu(skb, mtu);
+ return -EMSGSIZE;
+ } else if (!skb->local_df && skb->len > mtu && skb->sk) {
+ xfrm6_local_error(skb, mtu);
+ return -EMSGSIZE;
+ }
if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
- ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ ((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)))) {
return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
}
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b3cc8b3989a..253695d43fd 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -551,7 +551,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
*/
tty->closing = 1;
if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, self->closing_wait);
+ tty_wait_until_sent_from_close(tty, self->closing_wait);
ircomm_tty_shutdown(self);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index e8d5f4405d6..d14152e866d 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -50,7 +50,7 @@ static const struct net_device_ops irlan_eth_netdev_ops = {
.ndo_open = irlan_eth_open,
.ndo_stop = irlan_eth_close,
.ndo_start_xmit = irlan_eth_xmit,
- .ndo_set_multicast_list = irlan_eth_set_multicast_list,
+ .ndo_set_rx_mode = irlan_eth_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd4f39..497fbe732de 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
config IUCV
- tristate "IUCV support (S390 - z/VM only)"
depends on S390
+ def_tristate y if S390
+ prompt "IUCV support (S390 - z/VM only)"
help
Select this option if you want to use inter-user communication
under VM or VIF. If you run on z/VM, say "Y" to enable a fast
communication link between VM guests.
config AFIUCV
- tristate "AF_IUCV support (S390 - z/VM only)"
- depends on IUCV
+ depends on S390
+ def_tristate m if QETH_L3 || IUCV
+ prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
help
- Select this option if you want to use inter-user communication under
- VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
- communication link between VM guests.
+ Select this option if you want to use AF_IUCV socket applications
+ based on z/VM inter-user communication vehicle or based on
+ HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2013e434d0..274d150320c 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
#include <asm/cpcmd.h>
#include <linux/kmod.h>
-#include <net/iucv/iucv.h>
#include <net/iucv/af_iucv.h>
-#define VERSION "1.1"
+#define VERSION "1.2"
static char iucv_userid[80];
@@ -42,6 +41,8 @@ static struct proto iucv_proto = {
.obj_size = sizeof(struct iucv_sock),
};
+static struct iucv_interface *pr_iucv;
+
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
@@ -90,6 +91,12 @@ do { \
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
+static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags);
+static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
+
/* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev)
case IUCV_CLOSING:
case IUCV_CONNECTED:
if (iucv->path) {
- err = iucv_path_sever(iucv->path, NULL);
+ err = pr_iucv->path_sever(iucv->path, NULL);
iucv_path_free(iucv->path);
iucv->path = NULL;
}
@@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
static struct device_driver af_iucv_driver = {
.owner = THIS_MODULE,
.name = "afiucv",
- .bus = &iucv_bus,
+ .bus = NULL,
.pm = &afiucv_pm_ops,
};
@@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
if (sk->sk_state != IUCV_CONNECTED)
return 1;
- return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ if (iucv->transport == AF_IUCV_TRANS_IUCV)
+ return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ else
+ return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
+ (atomic_read(&iucv->pendings) <= 0));
}
/**
@@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
rcu_read_unlock();
}
+/**
+ * afiucv_hs_send() - send a message through HiperSockets transport
+ */
+static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags)
+{
+ struct net *net = sock_net(sock);
+ struct iucv_sock *iucv = iucv_sk(sock);
+ struct af_iucv_trans_hdr *phs_hdr;
+ struct sk_buff *nskb;
+ int err, confirm_recv = 0;
+
+ memset(skb->head, 0, ETH_HLEN);
+ phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
+ sizeof(struct af_iucv_trans_hdr));
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
+
+ phs_hdr->magic = ETH_P_AF_IUCV;
+ phs_hdr->version = 1;
+ phs_hdr->flags = flags;
+ if (flags == AF_IUCV_FLAG_SYN)
+ phs_hdr->window = iucv->msglimit;
+ else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
+ confirm_recv = atomic_read(&iucv->msg_recv);
+ phs_hdr->window = confirm_recv;
+ if (confirm_recv)
+ phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
+ }
+ memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
+ memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
+ memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
+ memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
+ ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
+ ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
+ ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
+ ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
+ if (imsg)
+ memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
+
+ rcu_read_lock();
+ skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
+ rcu_read_unlock();
+ if (!skb->dev)
+ return -ENODEV;
+ if (!(skb->dev->flags & IFF_UP))
+ return -ENETDOWN;
+ if (skb->len > skb->dev->mtu) {
+ if (sock->sk_type == SOCK_SEQPACKET)
+ return -EMSGSIZE;
+ else
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ skb_queue_tail(&iucv->send_skb_q, nskb);
+ err = dev_queue_xmit(skb);
+ if (err) {
+ skb_unlink(nskb, &iucv->send_skb_q);
+ kfree_skb(nskb);
+ } else {
+ atomic_sub(confirm_recv, &iucv->msg_recv);
+ WARN_ON(atomic_read(&iucv->msg_recv) < 0);
+ }
+ return err;
+}
+
/* Timers */
static void iucv_sock_timeout(unsigned long arg)
{
@@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
+ int err, blen;
+ struct sk_buff *skb;
iucv_sock_clear_timer(sk);
lock_sock(sk);
@@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
break;
case IUCV_CONNECTED:
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ /* send fin */
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (skb) {
+ skb_reserve(skb,
+ sizeof(struct af_iucv_trans_hdr) +
+ ETH_HLEN);
+ err = afiucv_hs_send(NULL, sk, skb,
+ AF_IUCV_FLAG_FIN);
+ }
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
@@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk)
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
- iucv_path_sever(iucv->path, user_data);
+ pr_iucv->path_sever(iucv->path, user_data);
iucv_path_free(iucv->path);
iucv->path = NULL;
}
@@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
+ struct iucv_sock *iucv;
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
if (!sk)
return NULL;
+ iucv = iucv_sk(sk);
sock_init_data(sock, sk);
- INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
- spin_lock_init(&iucv_sk(sk)->accept_q_lock);
- skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
- INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
- spin_lock_init(&iucv_sk(sk)->message_q.lock);
- skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
- iucv_sk(sk)->send_tag = 0;
- iucv_sk(sk)->flags = 0;
- iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
- iucv_sk(sk)->path = NULL;
- memset(&iucv_sk(sk)->src_user_id , 0, 32);
+ INIT_LIST_HEAD(&iucv->accept_q);
+ spin_lock_init(&iucv->accept_q_lock);
+ skb_queue_head_init(&iucv->send_skb_q);
+ INIT_LIST_HEAD(&iucv->message_q.list);
+ spin_lock_init(&iucv->message_q.lock);
+ skb_queue_head_init(&iucv->backlog_skb_q);
+ iucv->send_tag = 0;
+ atomic_set(&iucv->pendings, 0);
+ iucv->flags = 0;
+ iucv->msglimit = 0;
+ atomic_set(&iucv->msg_sent, 0);
+ atomic_set(&iucv->msg_recv, 0);
+ iucv->path = NULL;
+ iucv->sk_txnotify = afiucv_hs_callback_txnotify;
+ memset(&iucv->src_user_id , 0, 32);
+ if (pr_iucv)
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ else
+ iucv->transport = AF_IUCV_TRANS_HIPER;
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
- int err;
+ int err = 0;
+ struct net_device *dev;
+ char uid[9];
/* Verify the input sockaddr */
if (!addr || addr->sa_family != AF_IUCV)
@@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
err = -EADDRINUSE;
goto done_unlock;
}
- if (iucv->path) {
- err = 0;
+ if (iucv->path)
goto done_unlock;
- }
/* Bind the socket */
- memcpy(iucv->src_name, sa->siucv_name, 8);
- /* Copy the user id */
- memcpy(iucv->src_user_id, iucv_userid, 8);
- sk->sk_state = IUCV_BOUND;
- err = 0;
+ if (pr_iucv)
+ if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
+ goto vm_bind; /* VM IUCV transport */
+ /* try hiper transport */
+ memcpy(uid, sa->siucv_user_id, sizeof(uid));
+ ASCEBC(uid, 8);
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ if (!memcmp(dev->perm_addr, uid, 8)) {
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
+ sock->sk->sk_bound_dev_if = dev->ifindex;
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_HIPER;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
+ rcu_read_unlock();
+ goto done_unlock;
+ }
+ }
+ rcu_read_unlock();
+vm_bind:
+ if (pr_iucv) {
+ /* use local userid for backward compat */
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ memcpy(iucv->src_user_id, iucv_userid, 8);
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
+ goto done_unlock;
+ }
+ /* found no dev to bind */
+ err = -ENODEV;
done_unlock:
/* Release the socket list lock */
write_unlock_bh(&iucv_sk_list.lock);
@@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk)
memcpy(&iucv->src_name, name, 8);
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
+
return err;
}
-/* Connect an unconnected socket */
-static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
- int alen, int flags)
+static int afiucv_hs_connect(struct socket *sock)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
- struct iucv_sock *iucv;
- unsigned char user_data[16];
- int err;
-
- if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
- return -EINVAL;
-
- if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
- return -EBADFD;
-
- if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
- return -EINVAL;
+ struct sk_buff *skb;
+ int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ int err = 0;
- if (sk->sk_state == IUCV_OPEN) {
- err = iucv_sock_autobind(sk);
- if (unlikely(err))
- return err;
+ /* send syn */
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (!skb) {
+ err = -ENOMEM;
+ goto done;
}
+ skb->dev = NULL;
+ skb_reserve(skb, blen);
+ err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
+done:
+ return err;
+}
- lock_sock(sk);
-
- /* Set the destination information */
- memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
- memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
+static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ unsigned char user_data[16];
+ int err;
high_nmcpy(user_data, sa->siucv_name);
- low_nmcpy(user_data, iucv_sk(sk)->src_name);
+ low_nmcpy(user_data, iucv->src_name);
ASCEBC(user_data, sizeof(user_data));
- iucv = iucv_sk(sk);
/* Create path. */
iucv->path = iucv_path_alloc(iucv->msglimit,
IUCV_IPRMDATA, GFP_KERNEL);
@@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
err = -ENOMEM;
goto done;
}
- err = iucv_path_connect(iucv->path, &af_iucv_handler,
- sa->siucv_user_id, NULL, user_data, sk);
+ err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
+ sa->siucv_user_id, NULL, user_data,
+ sk);
if (err) {
iucv_path_free(iucv->path);
iucv->path = NULL;
@@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
err = -ECONNREFUSED;
break;
}
- goto done;
}
+done:
+ return err;
+}
- if (sk->sk_state != IUCV_CONNECTED) {
+/* Connect an unconnected socket */
+static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ int err;
+
+ if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
+ return -EINVAL;
+
+ if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
+ return -EBADFD;
+
+ if (sk->sk_state == IUCV_OPEN &&
+ iucv->transport == AF_IUCV_TRANS_HIPER)
+ return -EBADFD; /* explicit bind required */
+
+ if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
+ return -EINVAL;
+
+ if (sk->sk_state == IUCV_OPEN) {
+ err = iucv_sock_autobind(sk);
+ if (unlikely(err))
+ return err;
+ }
+
+ lock_sock(sk);
+
+ /* Set the destination information */
+ memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
+ memcpy(iucv->dst_name, sa->siucv_name, 8);
+
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ err = afiucv_hs_connect(sock);
+ else
+ err = afiucv_path_connect(sock, addr);
+ if (err)
+ goto done;
+
+ if (sk->sk_state != IUCV_CONNECTED)
err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
IUCV_DISCONN),
sock_sndtimeo(sk, flags & O_NONBLOCK));
- }
- if (sk->sk_state == IUCV_DISCONN) {
+ if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
err = -ECONNREFUSED;
- }
- if (err) {
- iucv_path_sever(iucv->path, NULL);
+ if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
+ pr_iucv->path_sever(iucv->path, NULL);
iucv_path_free(iucv->path);
iucv->path = NULL;
}
@@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
{
struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
addr->sa_family = AF_IUCV;
*len = sizeof(struct sockaddr_iucv);
if (peer) {
- memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
- memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
+ memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
+ memcpy(siucv->siucv_name, iucv->dst_name, 8);
} else {
- memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
- memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
+ memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
+ memcpy(siucv->siucv_name, iucv->src_name, 8);
}
memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
- memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
+ memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
return 0;
}
@@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
memcpy(prmdata, (void *) skb->data, skb->len);
prmdata[7] = 0xff - (u8) skb->len;
- return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
+ return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
(void *) prmdata, 8);
}
@@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
* this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
- skb = sock_alloc_send_skb(sk, len, noblock, &err);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb = sock_alloc_send_skb(sk,
+ len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
+ noblock, &err);
+ else
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
if (!skb)
goto out;
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto fail;
@@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_sent);
+ err = afiucv_hs_send(&txmsg, sk, skb, 0);
+ if (err) {
+ atomic_dec(&iucv->msg_sent);
+ goto fail;
+ }
+ goto release;
+ }
skb_queue_tail(&iucv->send_skb_q, skb);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* this error should never happen since the
* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) {
- iucv_path_sever(iucv->path, NULL);
+ pr_iucv->path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
} else
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
(void *) skb->data, skb->len);
if (err) {
if (err == 3) {
@@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto fail;
}
+release:
release_sock(sk);
return len;
@@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
skb->len = 0;
}
} else {
- rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
- skb->data, len, NULL);
+ rc = pr_iucv->message_receive(path, msg,
+ msg->flags & IUCV_IPRMDATA,
+ skb->data, len, NULL);
if (rc) {
kfree_skb(skb);
return;
@@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
kfree_skb(skb);
skb = NULL;
if (rc) {
- iucv_path_sever(path, NULL);
+ pr_iucv->path_sever(path, NULL);
return;
}
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
- struct sk_buff *skb, *rskb, *cskb;
+ struct sk_buff *skb, *rskb, *cskb, *sskb;
+ int blen;
int err = 0;
if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
copied = min_t(unsigned int, rlen, len);
cskb = skb;
- if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
+ if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
@@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
kfree_skb(skb);
+ atomic_inc(&iucv->msg_recv);
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
@@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
+ if (atomic_read(&iucv->msg_recv) >=
+ iucv->msglimit / 2) {
+ /* send WIN to peer */
+ blen = sizeof(struct af_iucv_trans_hdr) +
+ ETH_HLEN;
+ sskb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (sskb) {
+ skb_reserve(sskb,
+ sizeof(struct af_iucv_trans_hdr)
+ + ETH_HLEN);
+ err = afiucv_hs_send(NULL, sk, sskb,
+ AF_IUCV_FLAG_WIN);
+ }
+ if (err) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ }
}
spin_unlock_bh(&iucv->message_q.lock);
}
@@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
txmsg.class = 0;
txmsg.tag = 0;
- err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
- (void *) iprm_shutdown, 8);
+ err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
+ 0, (void *) iprm_shutdown, 8);
if (err) {
switch (err) {
case 1:
@@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
}
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
- err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
+ err = pr_iucv->path_quiesce(iucv->path, NULL);
if (err)
err = -ENOTCONN;
@@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock)
/* Unregister with IUCV base support */
if (iucv_sk(sk)->path) {
- iucv_path_sever(iucv_sk(sk)->path, NULL);
+ pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
iucv_path_free(iucv_sk(sk)->path);
iucv_sk(sk)->path = NULL;
}
@@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
if (sk->sk_state != IUCV_LISTEN) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
/* Check for backlog size */
if (sk_acceptq_is_full(sk)) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
@@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Create the new socket */
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
if (!nsk) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
@@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* set message limit for path based on msglimit of accepting socket */
niucv->msglimit = iucv->msglimit;
path->msglim = iucv->msglimit;
- err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
+ err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
iucv_sock_kill(nsk);
goto fail;
@@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
int len;
if (sk->sk_shutdown & RCV_SHUTDOWN) {
- iucv_message_reject(path, msg);
+ pr_iucv->message_reject(path, msg);
return;
}
@@ -1600,7 +1819,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
goto save_message;
len = atomic_read(&sk->sk_rmem_alloc);
- len += iucv_msg_length(msg) + sizeof(struct sk_buff);
+ len += SKB_TRUESIZE(iucv_msg_length(msg));
if (len > sk->sk_rcvbuf)
goto save_message;
@@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
bh_unlock_sock(sk);
}
+/***************** HiperSockets transport callbacks ********************/
+static void afiucv_swap_src_dest(struct sk_buff *skb)
+{
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+ char tmpID[8];
+ char tmpName[8];
+
+ ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
+ ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
+ ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
+ ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
+ memcpy(tmpID, trans_hdr->srcUserID, 8);
+ memcpy(tmpName, trans_hdr->srcAppName, 8);
+ memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
+ memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
+ memcpy(trans_hdr->destUserID, tmpID, 8);
+ memcpy(trans_hdr->destAppName, tmpName, 8);
+ skb_push(skb, ETH_HLEN);
+ memset(skb->data, 0, ETH_HLEN);
+}
+
+/**
+ * afiucv_hs_callback_syn - react on received SYN
+ **/
+static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
+{
+ struct sock *nsk;
+ struct iucv_sock *iucv, *niucv;
+ struct af_iucv_trans_hdr *trans_hdr;
+ int err;
+
+ iucv = iucv_sk(sk);
+ trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ if (!iucv) {
+ /* no sock - connection refused */
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
+ err = dev_queue_xmit(skb);
+ goto out;
+ }
+
+ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+ bh_lock_sock(sk);
+ if ((sk->sk_state != IUCV_LISTEN) ||
+ sk_acceptq_is_full(sk) ||
+ !nsk) {
+ /* error on server socket - connection refused */
+ if (nsk)
+ sk_free(nsk);
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
+ err = dev_queue_xmit(skb);
+ bh_unlock_sock(sk);
+ goto out;
+ }
+
+ niucv = iucv_sk(nsk);
+ iucv_sock_init(nsk, sk);
+ niucv->transport = AF_IUCV_TRANS_HIPER;
+ niucv->msglimit = iucv->msglimit;
+ if (!trans_hdr->window)
+ niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
+ else
+ niucv->msglimit_peer = trans_hdr->window;
+ memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
+ memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
+ memcpy(niucv->src_name, iucv->src_name, 8);
+ memcpy(niucv->src_user_id, iucv->src_user_id, 8);
+ nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
+ trans_hdr->window = niucv->msglimit;
+ /* if receiver acks the xmit connection is established */
+ err = dev_queue_xmit(skb);
+ if (!err) {
+ iucv_accept_enqueue(sk, nsk);
+ nsk->sk_state = IUCV_CONNECTED;
+ sk->sk_data_ready(sk, 1);
+ } else
+ iucv_sock_kill(nsk);
+ bh_unlock_sock(sk);
+
+out:
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_synack() - react on received SYN-ACK
+ **/
+static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+ if (!iucv)
+ goto out;
+ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+ bh_lock_sock(sk);
+ iucv->msglimit_peer = trans_hdr->window;
+ sk->sk_state = IUCV_CONNECTED;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_synfin() - react on received SYN_FIN
+ **/
+static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (!iucv)
+ goto out;
+ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+ bh_lock_sock(sk);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_fin() - react on received FIN
+ **/
+static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ /* other end of connection closed */
+ if (iucv) {
+ bh_lock_sock(sk);
+ if (!list_empty(&iucv->accept_q))
+ sk->sk_state = IUCV_SEVERED;
+ else
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+ }
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_win() - react on received WIN
+ **/
+static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+ if (!iucv)
+ return NET_RX_SUCCESS;
+
+ if (sk->sk_state != IUCV_CONNECTED)
+ return NET_RX_SUCCESS;
+
+ atomic_sub(trans_hdr->window, &iucv->msg_sent);
+ iucv_sock_wake_msglim(sk);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_rx() - react on received data
+ **/
+static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (!iucv) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ if (sk->sk_state != IUCV_CONNECTED) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ /* write stuff from iucv_msg to skb cb */
+ if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+ skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ spin_lock(&iucv->message_q.lock);
+ if (skb_queue_empty(&iucv->backlog_skb_q)) {
+ if (sock_queue_rcv_skb(sk, skb)) {
+ /* handle rcv queue full */
+ skb_queue_tail(&iucv->backlog_skb_q, skb);
+ }
+ } else
+ skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
+ spin_unlock(&iucv->message_q.lock);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_rcv() - base function for arriving data through HiperSockets
+ * transport
+ * called from netif RX softirq
+ **/
+static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct hlist_node *node;
+ struct sock *sk;
+ struct iucv_sock *iucv;
+ struct af_iucv_trans_hdr *trans_hdr;
+ char nullstring[8];
+ int err = 0;
+
+ skb_pull(skb, ETH_HLEN);
+ trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
+ EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
+ EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
+ EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
+ memset(nullstring, 0, sizeof(nullstring));
+ iucv = NULL;
+ sk = NULL;
+ read_lock(&iucv_sk_list.lock);
+ sk_for_each(sk, node, &iucv_sk_list.head) {
+ if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
+ if ((!memcmp(&iucv_sk(sk)->src_name,
+ trans_hdr->destAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->src_user_id,
+ trans_hdr->destUserID, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_user_id,
+ nullstring, 8))) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ } else {
+ if ((!memcmp(&iucv_sk(sk)->src_name,
+ trans_hdr->destAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->src_user_id,
+ trans_hdr->destUserID, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_name,
+ trans_hdr->srcAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_user_id,
+ trans_hdr->srcUserID, 8))) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ }
+ }
+ read_unlock(&iucv_sk_list.lock);
+ if (!iucv)
+ sk = NULL;
+
+ /* no sock
+ how should we send with no sock
+ 1) send without sock no send rc checking?
+ 2) introduce default sock to handle this cases
+
+ SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
+ data -> send FIN
+ SYN|ACK, SYN|FIN, FIN -> no action? */
+
+ switch (trans_hdr->flags) {
+ case AF_IUCV_FLAG_SYN:
+ /* connect request */
+ err = afiucv_hs_callback_syn(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
+ /* connect request confirmed */
+ err = afiucv_hs_callback_synack(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
+ /* connect request refused */
+ err = afiucv_hs_callback_synfin(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_FIN):
+ /* close request */
+ err = afiucv_hs_callback_fin(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_WIN):
+ err = afiucv_hs_callback_win(sk, skb);
+ if (skb->len > sizeof(struct af_iucv_trans_hdr))
+ err = afiucv_hs_callback_rx(sk, skb);
+ else
+ kfree(skb);
+ break;
+ case 0:
+ /* plain data frame */
+ err = afiucv_hs_callback_rx(sk, skb);
+ break;
+ default:
+ ;
+ }
+
+ return err;
+}
+
+/**
+ * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
+ * transport
+ **/
+static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
+ enum iucv_tx_notify n)
+{
+ struct sock *isk = skb->sk;
+ struct sock *sk = NULL;
+ struct iucv_sock *iucv = NULL;
+ struct sk_buff_head *list;
+ struct sk_buff *list_skb;
+ struct sk_buff *this = NULL;
+ unsigned long flags;
+ struct hlist_node *node;
+
+ read_lock(&iucv_sk_list.lock);
+ sk_for_each(sk, node, &iucv_sk_list.head)
+ if (sk == isk) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ read_unlock(&iucv_sk_list.lock);
+
+ if (!iucv)
+ return;
+
+ bh_lock_sock(sk);
+ list = &iucv->send_skb_q;
+ list_skb = list->next;
+ if (skb_queue_empty(list))
+ goto out_unlock;
+
+ spin_lock_irqsave(&list->lock, flags);
+ while (list_skb != (struct sk_buff *)list) {
+ if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
+ this = list_skb;
+ switch (n) {
+ case TX_NOTIFY_OK:
+ __skb_unlink(this, list);
+ iucv_sock_wake_msglim(sk);
+ kfree_skb(this);
+ break;
+ case TX_NOTIFY_PENDING:
+ atomic_inc(&iucv->pendings);
+ break;
+ case TX_NOTIFY_DELAYED_OK:
+ __skb_unlink(this, list);
+ atomic_dec(&iucv->pendings);
+ if (atomic_read(&iucv->pendings) <= 0)
+ iucv_sock_wake_msglim(sk);
+ kfree_skb(this);
+ break;
+ case TX_NOTIFY_UNREACHABLE:
+ case TX_NOTIFY_DELAYED_UNREACHABLE:
+ case TX_NOTIFY_TPQFULL: /* not yet used */
+ case TX_NOTIFY_GENERALERROR:
+ case TX_NOTIFY_DELAYED_GENERALERROR:
+ __skb_unlink(this, list);
+ kfree_skb(this);
+ if (!list_empty(&iucv->accept_q))
+ sk->sk_state = IUCV_SEVERED;
+ else
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ break;
+ }
+ break;
+ }
+ list_skb = list_skb->next;
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+
+out_unlock:
+ bh_unlock_sock(sk);
+}
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
@@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = {
.create = iucv_sock_create,
};
-static int __init afiucv_init(void)
+static struct packet_type iucv_packet_type = {
+ .type = cpu_to_be16(ETH_P_AF_IUCV),
+ .func = afiucv_hs_rcv,
+};
+
+static int afiucv_iucv_init(void)
{
int err;
- if (!MACHINE_IS_VM) {
- pr_err("The af_iucv module cannot be loaded"
- " without z/VM\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
- cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
- if (unlikely(err)) {
- WARN_ON(err);
- err = -EPROTONOSUPPORT;
- goto out;
- }
-
- err = iucv_register(&af_iucv_handler, 0);
+ err = pr_iucv->iucv_register(&af_iucv_handler, 0);
if (err)
goto out;
- err = proto_register(&iucv_proto, 0);
- if (err)
- goto out_iucv;
- err = sock_register(&iucv_sock_family_ops);
- if (err)
- goto out_proto;
/* establish dummy device */
+ af_iucv_driver.bus = pr_iucv->bus;
err = driver_register(&af_iucv_driver);
if (err)
- goto out_sock;
+ goto out_iucv;
af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!af_iucv_dev) {
err = -ENOMEM;
goto out_driver;
}
dev_set_name(af_iucv_dev, "af_iucv");
- af_iucv_dev->bus = &iucv_bus;
- af_iucv_dev->parent = iucv_root;
+ af_iucv_dev->bus = pr_iucv->bus;
+ af_iucv_dev->parent = pr_iucv->root;
af_iucv_dev->release = (void (*)(struct device *))kfree;
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
goto out_driver;
-
return 0;
out_driver:
driver_unregister(&af_iucv_driver);
+out_iucv:
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+out:
+ return err;
+}
+
+static int __init afiucv_init(void)
+{
+ int err;
+
+ if (MACHINE_IS_VM) {
+ cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
+ if (unlikely(err)) {
+ WARN_ON(err);
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
+ if (!pr_iucv) {
+ printk(KERN_WARNING "iucv_if lookup failed\n");
+ memset(&iucv_userid, 0, sizeof(iucv_userid));
+ }
+ } else {
+ memset(&iucv_userid, 0, sizeof(iucv_userid));
+ pr_iucv = NULL;
+ }
+
+ err = proto_register(&iucv_proto, 0);
+ if (err)
+ goto out;
+ err = sock_register(&iucv_sock_family_ops);
+ if (err)
+ goto out_proto;
+
+ if (pr_iucv) {
+ err = afiucv_iucv_init();
+ if (err)
+ goto out_sock;
+ }
+ dev_add_pack(&iucv_packet_type);
+ return 0;
+
out_sock:
sock_unregister(PF_IUCV);
out_proto:
proto_unregister(&iucv_proto);
-out_iucv:
- iucv_unregister(&af_iucv_handler, 0);
out:
+ if (pr_iucv)
+ symbol_put(iucv_if);
return err;
}
static void __exit afiucv_exit(void)
{
- device_unregister(af_iucv_dev);
- driver_unregister(&af_iucv_driver);
+ if (pr_iucv) {
+ device_unregister(af_iucv_dev);
+ driver_unregister(&af_iucv_driver);
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+ symbol_put(iucv_if);
+ }
+ dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
- iucv_unregister(&af_iucv_handler, 0);
}
module_init(afiucv_init);
@@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_IUCV);
+
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 075a3808aa4..403be43b793 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1974,6 +1974,27 @@ out:
return rc;
}
+struct iucv_interface iucv_if = {
+ .message_receive = iucv_message_receive,
+ .__message_receive = __iucv_message_receive,
+ .message_reply = iucv_message_reply,
+ .message_reject = iucv_message_reject,
+ .message_send = iucv_message_send,
+ .__message_send = __iucv_message_send,
+ .message_send2way = iucv_message_send2way,
+ .message_purge = iucv_message_purge,
+ .path_accept = iucv_path_accept,
+ .path_connect = iucv_path_connect,
+ .path_quiesce = iucv_path_quiesce,
+ .path_resume = iucv_path_resume,
+ .path_sever = iucv_path_sever,
+ .iucv_register = iucv_register,
+ .iucv_unregister = iucv_unregister,
+ .bus = NULL,
+ .root = NULL,
+};
+EXPORT_SYMBOL(iucv_if);
+
/**
* iucv_init
*
@@ -2038,6 +2059,8 @@ static int __init iucv_init(void)
rc = bus_register(&iucv_bus);
if (rc)
goto out_reboot;
+ iucv_if.root = iucv_root;
+ iucv_if.bus = &iucv_bus;
return 0;
out_reboot:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ad4ac2601a5..34b2ddeacb6 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
headroom = NET_SKB_PAD + sizeof(struct iphdr) +
uhlen + hdr_len;
old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, headroom))
+ if (skb_cow_head(skb, headroom)) {
+ dev_kfree_skb(skb);
goto abort;
+ }
new_headroom = skb_headroom(skb);
skb_orphan(skb);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f42cd091596..8a90d756c90 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -395,6 +395,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
struct pppol2tp_session *ps;
int old_headroom;
int new_headroom;
+ int uhlen, headroom;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
@@ -413,7 +414,13 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
goto abort_put_sess;
old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, sizeof(ppph)))
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ headroom = NET_SKB_PAD +
+ sizeof(struct iphdr) + /* IP header */
+ uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
+ session->hdr_len + /* L2TP header */
+ sizeof(ppph); /* PPP header */
+ if (skb_cow_head(skb, headroom))
goto abort_put_sess_tun;
new_headroom = skb_headroom(skb);
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 956b7e47dc5..8d0324bac01 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -139,7 +139,8 @@ out:
return lapb;
}
-int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks)
+int lapb_register(struct net_device *dev,
+ const struct lapb_register_struct *callbacks)
{
struct lapb_cb *lapb;
int rc = LAPB_BADTOKEN;
@@ -158,7 +159,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
goto out;
lapb->dev = dev;
- lapb->callbacks = *callbacks;
+ lapb->callbacks = callbacks;
__lapb_insert_cb(lapb);
@@ -380,32 +381,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.connect_confirmation)
- lapb->callbacks.connect_confirmation(lapb->dev, reason);
+ if (lapb->callbacks->connect_confirmation)
+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
}
void lapb_connect_indication(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.connect_indication)
- lapb->callbacks.connect_indication(lapb->dev, reason);
+ if (lapb->callbacks->connect_indication)
+ lapb->callbacks->connect_indication(lapb->dev, reason);
}
void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.disconnect_confirmation)
- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
+ if (lapb->callbacks->disconnect_confirmation)
+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
}
void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.disconnect_indication)
- lapb->callbacks.disconnect_indication(lapb->dev, reason);
+ if (lapb->callbacks->disconnect_indication)
+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
}
int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
{
- if (lapb->callbacks.data_indication)
- return lapb->callbacks.data_indication(lapb->dev, skb);
+ if (lapb->callbacks->data_indication)
+ return lapb->callbacks->data_indication(lapb->dev, skb);
kfree_skb(skb);
return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
@@ -415,8 +416,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
{
int used = 0;
- if (lapb->callbacks.data_transmit) {
- lapb->callbacks.data_transmit(lapb->dev, skb);
+ if (lapb->callbacks->data_transmit) {
+ lapb->callbacks->data_transmit(lapb->dev, skb);
used = 1;
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f5fdfcbf552..7d3b438755f 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -199,6 +199,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_MPATH_DEBUG
+ bool "Verbose mesh path debugging"
+ depends on MAC80211_DEBUG_MENU
+ depends on MAC80211_MESH
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose mesh path selection debugging messages (when mac80211
+ is taking part in a mesh network).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_VERBOSE_MHWMP_DEBUG
bool "Verbose mesh HWMP routing debugging"
depends on MAC80211_DEBUG_MENU
@@ -212,6 +225,18 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_TDLS_DEBUG
+ bool "Verbose TDLS debugging"
+ depends on MAC80211_DEBUG_MENU
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose TDLS selection debugging messages (when mac80211
+ is a TDLS STA).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_DEBUG_COUNTERS
bool "Extra statistics for TX/RX debugging"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index fd1aaf2a4a6..97f33588b65 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -69,7 +69,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
if (!tid_rx)
return;
- rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
+ RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -167,12 +167,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
u16 capab;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer "
- "for addba resp frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -227,7 +223,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying ADDBA request\n");
@@ -279,14 +275,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* prepare A-MPDU MLME for Rx aggregation */
tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
- if (!tid_agg_rx) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
- tid);
-#endif
+ if (!tid_agg_rx)
goto end;
- }
spin_lock_init(&tid_agg_rx->reorder_lock);
@@ -306,11 +296,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
tid_agg_rx->reorder_time =
kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "can not allocate reordering buffer "
- "to tid %d\n", tid);
-#endif
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
@@ -340,7 +325,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
- rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+ RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
if (timeout)
mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8be8eff70d..2ac033989e0 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -68,11 +68,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer "
- "for addba request frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
@@ -106,19 +104,18 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_skb(sdata, skb);
}
-void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
+void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_bar *bar;
u16 bar_control = 0;
skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer for "
- "bar frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
memset(bar, 0, sizeof(*bar));
@@ -128,13 +125,14 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
- bar_control |= (u16)(tid << 12);
+ bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
+EXPORT_SYMBOL(ieee80211_send_bar);
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx)
@@ -364,7 +362,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if ((tid >= STA_TID_NUM) ||
- !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
+ (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -383,7 +382,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA sessions blocked. "
"Denying BA session request\n");
@@ -413,11 +412,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
/* prepare A-MPDU MLME for Tx aggregation */
tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
if (!tid_tx) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
- tid);
-#endif
ret = -ENOMEM;
goto err_unlock_sta;
}
@@ -574,14 +568,9 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
- if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Not enough memory, "
- "dropping start BA session", sdata->name);
-#endif
+ if (unlikely(!skb))
return;
- }
+
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
@@ -727,14 +716,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
- if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Not enough memory, "
- "dropping stop BA session", sdata->name);
-#endif
+ if (unlikely(!skb))
return;
- }
+
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
@@ -777,18 +761,14 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif
-
+ /*
+ * IEEE 802.11-2007 7.3.1.14:
+ * In an ADDBA Response frame, when the Status Code field
+ * is set to 0, the Buffer Size subfield is set to a value
+ * of at least 1.
+ */
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
- == WLAN_STATUS_SUCCESS) {
- /*
- * IEEE 802.11-2007 7.3.1.14:
- * In an ADDBA Response frame, when the Status Code field
- * is set to 0, the Buffer Size subfield is set to a value
- * of at least 1.
- */
- if (!buf_size)
- goto out;
-
+ == WLAN_STATUS_SUCCESS && buf_size) {
if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
&tid_tx->state)) {
/* ignore duplicate response */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 3d1b091d9b2..ebd7fb101fb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -62,7 +63,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
else if (type == NL80211_IFTYPE_STATION &&
params && params->use_4addr >= 0)
sdata->u.mgd.use_4addr = params->use_4addr;
@@ -343,7 +344,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
STATION_INFO_RX_BITRATE |
STATION_INFO_RX_DROP_MISC |
STATION_INFO_BSS_PARAM |
- STATION_INFO_CONNECTED_TIME;
+ STATION_INFO_CONNECTED_TIME |
+ STATION_INFO_STA_FLAGS;
do_posix_clock_monotonic_gettime(&uptime);
sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -403,6 +405,23 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
+
+ sinfo->sta_flags.set = 0;
+ sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
+ BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+ BIT(NL80211_STA_FLAG_WME) |
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+ if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
+ if (test_sta_flag(sta, WLAN_STA_WME))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
+ if (test_sta_flag(sta, WLAN_STA_MFP))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
+ if (test_sta_flag(sta, WLAN_STA_AUTH))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
}
@@ -455,6 +474,20 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
+ struct beacon_parameters *params)
+{
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+
+ bss_conf->ssid_len = params->ssid_len;
+
+ if (params->ssid_len)
+ memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
+
+ bss_conf->hidden_ssid =
+ (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
+}
+
/*
* This handles both adding a beacon and setting new beacon info
*/
@@ -542,14 +575,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.dtim_period = new->dtim_period;
- rcu_assign_pointer(sdata->u.ap.beacon, new);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, new);
synchronize_rcu();
kfree(old);
+ ieee80211_config_ap_ssid(sdata, params);
+
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_BEACON);
+ BSS_CHANGED_BEACON |
+ BSS_CHANGED_SSID);
return 0;
}
@@ -594,7 +630,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
if (!old)
return -ENOENT;
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old);
@@ -650,7 +686,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
{
- unsigned long flags;
u32 rates;
int i, j;
struct ieee80211_supported_band *sband;
@@ -659,43 +694,58 @@ static void sta_apply_parameters(struct ieee80211_local *local,
sband = local->hw.wiphy->bands[local->oper_channel->band];
- spin_lock_irqsave(&sta->flaglock, flags);
mask = params->sta_flags_mask;
set = params->sta_flags_set;
if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
- sta->flags &= ~WLAN_STA_AUTHORIZED;
if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- sta->flags |= WLAN_STA_AUTHORIZED;
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ else
+ clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
}
if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
- sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
- sta->flags |= WLAN_STA_SHORT_PREAMBLE;
+ set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
+ else
+ clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
}
if (mask & BIT(NL80211_STA_FLAG_WME)) {
- sta->flags &= ~WLAN_STA_WME;
- sta->sta.wme = false;
if (set & BIT(NL80211_STA_FLAG_WME)) {
- sta->flags |= WLAN_STA_WME;
+ set_sta_flag(sta, WLAN_STA_WME);
sta->sta.wme = true;
+ } else {
+ clear_sta_flag(sta, WLAN_STA_WME);
+ sta->sta.wme = false;
}
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
- sta->flags &= ~WLAN_STA_MFP;
if (set & BIT(NL80211_STA_FLAG_MFP))
- sta->flags |= WLAN_STA_MFP;
+ set_sta_flag(sta, WLAN_STA_MFP);
+ else
+ clear_sta_flag(sta, WLAN_STA_MFP);
}
if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
- sta->flags &= ~WLAN_STA_AUTH;
if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
- sta->flags |= WLAN_STA_AUTH;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ else
+ clear_sta_flag(sta, WLAN_STA_AUTH);
+ }
+
+ if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
+ if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ set_sta_flag(sta, WLAN_STA_TDLS_PEER);
+ else
+ clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
+ }
+
+ if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
+ sta->sta.uapsd_queues = params->uapsd_queues;
+ sta->sta.max_sp = params->max_sp;
}
- spin_unlock_irqrestore(&sta->flaglock, flags);
/*
* cfg80211 validates this (1-2007) and allows setting the AID
@@ -786,10 +836,17 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (!sta)
return -ENOMEM;
- sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_ASSOC);
sta_apply_parameters(local, sta, params);
+ /* Only TDLS-supporting stations can add TDLS peers */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+ !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION))
+ return -ENOTSUPP;
+
rate_control_rate_init(sta);
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
@@ -842,6 +899,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
return -ENOENT;
}
+ /* The TDLS bit cannot be toggled after the STA was added */
+ if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
+ !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
if (params->vlan && params->vlan != sta->sdata->dev) {
vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -857,7 +922,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
return -EBUSY;
}
- rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+ RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
}
sta->sdata = vlansdata;
@@ -918,7 +983,7 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
if (dst)
return mesh_path_del(dst, sdata);
- mesh_path_flush(sdata);
+ mesh_path_flush_by_iface(sdata);
return 0;
}
@@ -1137,6 +1202,22 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
ieee80211_mesh_root_setup(ifmsh);
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) {
+ /* our current gate announcement implementation rides on root
+ * announcements, so require this ifmsh to also be a root node
+ * */
+ if (nconf->dot11MeshGateAnnouncementProtocol &&
+ !conf->dot11MeshHWMPRootMode) {
+ conf->dot11MeshHWMPRootMode = 1;
+ ieee80211_mesh_root_setup(ifmsh);
+ }
+ conf->dot11MeshGateAnnouncementProtocol =
+ nconf->dot11MeshGateAnnouncementProtocol;
+ }
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
+ conf->dot11MeshHWMPRannInterval =
+ nconf->dot11MeshHWMPRannInterval;
+ }
return 0;
}
@@ -1235,9 +1316,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
static int ieee80211_set_txq_params(struct wiphy *wiphy,
+ struct net_device *dev,
struct ieee80211_txq_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_tx_queue_params p;
if (!local->ops->conf_tx)
@@ -1258,8 +1341,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
if (params->queue >= local->hw.queues)
return -EINVAL;
- local->tx_conf[params->queue] = p;
- if (drv_conf_tx(local, params->queue, &p)) {
+ sdata->tx_conf[params->queue] = p;
+ if (drv_conf_tx(local, sdata, params->queue, &p)) {
wiphy_debug(local->hw.wiphy,
"failed to set TX queue parameters for queue %d\n",
params->queue);
@@ -1821,7 +1904,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
* so in that case userspace will have to deal with it.
*/
- if (wk->offchan_tx.wait && wk->offchan_tx.frame)
+ if (wk->offchan_tx.wait && !wk->offchan_tx.status)
cfg80211_mgmt_tx_status(wk->sdata->dev,
(unsigned long) wk->offchan_tx.frame,
wk->ie, wk->ie_len, false, GFP_KERNEL);
@@ -1833,7 +1916,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
@@ -1860,6 +1944,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
}
+ if (no_cck)
+ flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+
if (is_offchan && !offchan)
return -EBUSY;
@@ -1898,33 +1985,6 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = (unsigned long) skb;
- if (is_offchan && local->ops->offchannel_tx) {
- int ret;
-
- IEEE80211_SKB_CB(skb)->band = chan->band;
-
- mutex_lock(&local->mtx);
-
- if (local->hw_offchan_tx_cookie) {
- mutex_unlock(&local->mtx);
- return -EBUSY;
- }
-
- /* TODO: bitrate control, TX processing? */
- ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
-
- if (ret == 0)
- local->hw_offchan_tx_cookie = *cookie;
- mutex_unlock(&local->mtx);
-
- /*
- * Allow driver to return 1 to indicate it wants to have the
- * frame transmitted with a remain_on_channel + regular TX.
- */
- if (ret != 1)
- return ret;
- }
-
if (is_offchan && local->ops->remain_on_channel) {
unsigned int duration;
int ret;
@@ -2011,18 +2071,6 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
mutex_lock(&local->mtx);
- if (local->ops->offchannel_tx_cancel_wait &&
- local->hw_offchan_tx_cookie == cookie) {
- ret = drv_offchannel_tx_cancel_wait(local);
-
- if (!ret)
- local->hw_offchan_tx_cookie = 0;
-
- mutex_unlock(&local->mtx);
-
- return ret;
- }
-
if (local->ops->cancel_remain_on_channel) {
cookie ^= 2;
ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
@@ -2123,6 +2171,323 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
return 0;
}
+static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 7);
+
+ *pos++ = WLAN_EID_EXT_CAPABILITY;
+ *pos++ = 5; /* len */
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+}
+
+static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ u16 capab;
+
+ capab = 0;
+ if (local->oper_channel->band != IEEE80211_BAND_2GHZ)
+ return capab;
+
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+
+ return capab;
+}
+
+static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
+ u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+static int
+ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_tdls_data *tf;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_mgmt *mgmt;
+
+ mgmt = (void *)skb_put(skb, 24);
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb = NULL;
+ bool send_direct;
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in managed mode, and associated */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
+ printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
+#endif
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ 50 + /* supported rates */
+ 7 + /* ext capab */
+ extra_ies_len +
+ sizeof(struct ieee80211_tdls_lnkie));
+ if (!skb)
+ return -ENOMEM;
+
+ info = IEEE80211_SKB_CB(skb);
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
+ action_code, dialog_token,
+ status_code, skb);
+ send_direct = false;
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ send_direct = true;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ if (ret < 0)
+ goto fail;
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last */
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ /* we are the initiator */
+ ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
+ sdata->u.mgd.bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ /* we are the responder */
+ ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
+ sdata->u.mgd.bssid);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto fail;
+ }
+
+ if (send_direct) {
+ ieee80211_tx_skb(sdata, skb);
+ return 0;
+ }
+
+ /*
+ * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
+ * we should default to AC_VI.
+ */
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb_set_queue_mapping(skb, IEEE80211_AC_BK);
+ skb->priority = 2;
+ break;
+ default:
+ skb_set_queue_mapping(skb, IEEE80211_AC_VI);
+ skb->priority = 5;
+ break;
+ }
+
+ /* disable bottom halves when entering the Tx path */
+ local_bh_disable();
+ ret = ieee80211_subif_start_xmit(skb, dev);
+ local_bh_enable();
+
+ return ret;
+
+fail:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper)
+{
+ struct sta_info *sta;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
+ printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
+#endif
+
+ switch (oper) {
+ case NL80211_TDLS_ENABLE_LINK:
+ rcu_read_lock();
+ sta = sta_info_get(sdata, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ return -ENOLINK;
+ }
+
+ set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
+ rcu_read_unlock();
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ return sta_info_destroy_addr(sdata, peer);
+ case NL80211_TDLS_TEARDOWN:
+ case NL80211_TDLS_SETUP:
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* We don't support in-driver setup/teardown/discovery */
+ return -ENOTSUPP;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2186,4 +2551,6 @@ struct cfg80211_ops mac80211_config_ops = {
.set_ringparam = ieee80211_set_ringparam,
.get_ringparam = ieee80211_get_ringparam,
.set_rekey_data = ieee80211_set_rekey_data,
+ .tdls_oper = ieee80211_tdls_oper,
+ .tdls_mgmt = ieee80211_tdls_mgmt,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 186e02f7cc3..883996b2f99 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -78,57 +78,6 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
-static ssize_t tsf_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- u64 tsf;
-
- tsf = drv_get_tsf(local);
-
- return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
- (unsigned long long) tsf);
-}
-
-static ssize_t tsf_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- unsigned long long tsf;
- char buf[100];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- if (strncmp(buf, "reset", 5) == 0) {
- if (local->ops->reset_tsf) {
- drv_reset_tsf(local);
- wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
- }
- } else {
- tsf = simple_strtoul(buf, NULL, 0);
- if (local->ops->set_tsf) {
- drv_set_tsf(local, tsf);
- wiphy_info(local->hw.wiphy,
- "debugfs set TSF to %#018llx\n", tsf);
-
- }
- }
-
- return count;
-}
-
-static const struct file_operations tsf_ops = {
- .read = tsf_read,
- .write = tsf_write,
- .open = mac80211_open_file_generic,
- .llseek = default_llseek,
-};
-
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -195,20 +144,12 @@ static ssize_t uapsd_queues_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- unsigned long val;
- char buf[10];
- size_t len;
+ u8 val;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = strict_strtoul(buf, 0, &val);
-
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
if (ret)
- return -EINVAL;
+ return ret;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
return -ERANGE;
@@ -305,6 +246,9 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
char *buf = kzalloc(mxln, GFP_KERNEL);
int sf = 0; /* how many written so far */
+ if (!buf)
+ return 0;
+
sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
@@ -355,6 +299,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
+ if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
+ sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
@@ -450,7 +396,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(frequency);
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
- DEBUGFS_ADD(tsf);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9ea7c0d0103..9352819a986 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -21,6 +21,7 @@
#include "rate.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
+#include "driver-ops.h"
static ssize_t ieee80211_if_read(
struct ieee80211_sub_if_data *sdata,
@@ -331,6 +332,46 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
}
__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
+/* IBSS attributes */
+static ssize_t ieee80211_if_fmt_tsf(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ u64 tsf;
+
+ tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata);
+
+ return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf);
+}
+
+static ssize_t ieee80211_if_parse_tsf(
+ struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ unsigned long long tsf;
+ int ret;
+
+ if (strncmp(buf, "reset", 5) == 0) {
+ if (local->ops->reset_tsf) {
+ drv_reset_tsf(local, sdata);
+ wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
+ }
+ } else {
+ ret = kstrtoull(buf, 10, &tsf);
+ if (ret < 0)
+ return -EINVAL;
+ if (local->ops->set_tsf) {
+ drv_set_tsf(local, sdata, tsf);
+ wiphy_info(local->hw.wiphy,
+ "debugfs set TSF to %#018llx\n", tsf);
+ }
+ }
+
+ return buflen;
+}
+__IEEE80211_IF_FILE_W(tsf);
+
+
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -340,6 +381,8 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
+IEEE80211_IF_FILE(dropped_frames_congestion,
+ u.mesh.mshstats.dropped_frames_congestion, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
@@ -372,6 +415,10 @@ IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
+IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
+ u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
+ u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
#endif
@@ -415,6 +462,11 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
+static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
+{
+ DEBUGFS_ADD_MODE(tsf, 0600);
+}
+
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
@@ -459,6 +511,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
+ MESHSTATS_ADD(dropped_frames_congestion);
MESHSTATS_ADD(estab_plinks);
#undef MESHSTATS_ADD
}
@@ -485,7 +538,9 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
-
+ MESHPARAMS_ADD(dot11MeshHWMPRootMode);
+ MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
+ MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
#undef MESHPARAMS_ADD
}
#endif
@@ -506,7 +561,7 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
add_sta_files(sdata);
break;
case NL80211_IFTYPE_ADHOC:
- /* XXX */
+ add_ibss_files(sdata);
break;
case NL80211_IFTYPE_AP:
add_ap_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a01d2137fdd..c5f341798c1 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -56,19 +56,22 @@ STA_FILE(last_signal, last_signal, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[100];
+ char buf[121];
struct sta_info *sta = file->private_data;
- u32 staflags = get_sta_flags(sta);
- int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
- staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
- staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
- staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "",
- staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "",
- staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
- staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
- staflags & WLAN_STA_WME ? "WME\n" : "",
- staflags & WLAN_STA_WDS ? "WDS\n" : "",
- staflags & WLAN_STA_MFP ? "MFP\n" : "");
+
+#define TEST(flg) \
+ test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
+
+ int res = scnprintf(buf, sizeof(buf),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
+ TEST(PS_DRIVER), TEST(AUTHORIZED),
+ TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
+ TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
+ TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
+ TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
+ TEST(TDLS_PEER_AUTH));
+#undef TEST
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
STA_OPS(flags);
@@ -78,8 +81,14 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
- return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
- skb_queue_len(&sta->ps_tx_buf));
+ char buf[17*IEEE80211_NUM_ACS], *p = buf;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
+ skb_queue_len(&sta->ps_tx_buf[ac]) +
+ skb_queue_len(&sta->tx_filtered[ac]));
+ return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(num_ps_buf_frames);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 1425380983f..5f165d7eb2d 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -413,50 +413,56 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
+static inline int drv_conf_tx(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
int ret = -EOPNOTSUPP;
might_sleep();
- trace_drv_conf_tx(local, queue, params);
+ trace_drv_conf_tx(local, sdata, queue, params);
if (local->ops->conf_tx)
- ret = local->ops->conf_tx(&local->hw, queue, params);
+ ret = local->ops->conf_tx(&local->hw, &sdata->vif,
+ queue, params);
trace_drv_return_int(local, ret);
return ret;
}
-static inline u64 drv_get_tsf(struct ieee80211_local *local)
+static inline u64 drv_get_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
u64 ret = -1ULL;
might_sleep();
- trace_drv_get_tsf(local);
+ trace_drv_get_tsf(local, sdata);
if (local->ops->get_tsf)
- ret = local->ops->get_tsf(&local->hw);
+ ret = local->ops->get_tsf(&local->hw, &sdata->vif);
trace_drv_return_u64(local, ret);
return ret;
}
-static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
+static inline void drv_set_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 tsf)
{
might_sleep();
- trace_drv_set_tsf(local, tsf);
+ trace_drv_set_tsf(local, sdata, tsf);
if (local->ops->set_tsf)
- local->ops->set_tsf(&local->hw, tsf);
+ local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
trace_drv_return_void(local);
}
-static inline void drv_reset_tsf(struct ieee80211_local *local)
+static inline void drv_reset_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
might_sleep();
- trace_drv_reset_tsf(local);
+ trace_drv_reset_tsf(local, sdata);
if (local->ops->reset_tsf)
- local->ops->reset_tsf(&local->hw);
+ local->ops->reset_tsf(&local->hw, &sdata->vif);
trace_drv_return_void(local);
}
@@ -590,37 +596,6 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
return ret;
}
-static inline int drv_offchannel_tx(struct ieee80211_local *local,
- struct sk_buff *skb,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int wait)
-{
- int ret;
-
- might_sleep();
-
- trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
- ret = local->ops->offchannel_tx(&local->hw, skb, chan,
- channel_type, wait);
- trace_drv_return_int(local, ret);
-
- return ret;
-}
-
-static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
-{
- int ret;
-
- might_sleep();
-
- trace_drv_offchannel_tx_cancel_wait(local);
- ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
- trace_drv_return_int(local, ret);
-
- return ret;
-}
-
static inline int drv_set_ringparam(struct ieee80211_local *local,
u32 tx, u32 rx)
{
@@ -696,4 +671,34 @@ static inline void drv_rssi_callback(struct ieee80211_local *local,
local->ops->rssi_callback(&local->hw, event);
trace_drv_return_void(local);
}
+
+static inline void
+drv_release_buffered_frames(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames,
+ reason, more_data);
+ if (local->ops->release_buffered_frames)
+ local->ops->release_buffered_frames(&local->hw, &sta->sta, tids,
+ num_frames, reason,
+ more_data);
+ trace_drv_return_void(local);
+}
+
+static inline void
+drv_allow_buffered_frames(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames,
+ reason, more_data);
+ if (local->ops->allow_buffered_frames)
+ local->ops->allow_buffered_frames(&local->hw, &sta->sta,
+ tids, num_frames, reason,
+ more_data);
+ trace_drv_return_void(local);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index f47b00dc7af..2af4fca5533 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -697,64 +697,76 @@ TRACE_EVENT(drv_sta_remove,
);
TRACE_EVENT(drv_conf_tx,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 queue,
const struct ieee80211_tx_queue_params *params),
- TP_ARGS(local, queue, params),
+ TP_ARGS(local, sdata, queue, params),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(u16, queue)
__field(u16, txop)
__field(u16, cw_min)
__field(u16, cw_max)
__field(u8, aifs)
+ __field(bool, uapsd)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->queue = queue;
__entry->txop = params->txop;
__entry->cw_max = params->cw_max;
__entry->cw_min = params->cw_min;
__entry->aifs = params->aifs;
+ __entry->uapsd = params->uapsd;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d",
- LOCAL_PR_ARG, __entry->queue
+ LOCAL_PR_FMT VIF_PR_FMT " queue:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
)
);
-DEFINE_EVENT(local_only_evt, drv_get_tsf,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_get_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
TRACE_EVENT(drv_set_tsf,
- TP_PROTO(struct ieee80211_local *local, u64 tsf),
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 tsf),
- TP_ARGS(local, tsf),
+ TP_ARGS(local, sdata, tsf),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(u64, tsf)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->tsf = tsf;
),
TP_printk(
- LOCAL_PR_FMT " tsf:%llu",
- LOCAL_PR_ARG, (unsigned long long)__entry->tsf
+ LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf
)
);
-DEFINE_EVENT(local_only_evt, drv_reset_tsf,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_reset_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
@@ -1117,6 +1129,61 @@ TRACE_EVENT(drv_rssi_callback,
)
);
+DECLARE_EVENT_CLASS(release_evt,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(u16, tids)
+ __field(int, num_frames)
+ __field(int, reason)
+ __field(bool, more_data)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->tids = tids;
+ __entry->num_frames = num_frames;
+ __entry->reason = reason;
+ __entry->more_data = more_data;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT
+ " TIDs:0x%.4x frames:%d reason:%d more:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames,
+ __entry->reason, __entry->more_data
+ )
+);
+
+DEFINE_EVENT(release_evt, drv_release_buffered_frames,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data)
+);
+
+DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data)
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1431,6 +1498,28 @@ TRACE_EVENT(api_enable_rssi_reports,
)
);
+TRACE_EVENT(api_eosp,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, STA_PR_FMT
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 7cfc286946c..f80a35c0d00 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -130,7 +130,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
* down by the code that set the flag, so this
* need not run.
*/
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA))
return;
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -186,12 +186,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
u16 params;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer "
- "for delba frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56c24cabf26..ede9a8b341a 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -81,10 +81,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
lockdep_assert_held(&ifibss->mtx);
/* Reset own TSF to allow time synchronization work. */
- drv_reset_tsf(local);
+ drv_reset_tsf(local, sdata);
skb = ifibss->skb;
- rcu_assign_pointer(ifibss->presp, NULL);
+ RCU_INIT_POINTER(ifibss->presp, NULL);
synchronize_rcu();
skb->data = skb->head;
skb->len = 0;
@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
*pos++ = 0; /* U-APSD no in use */
}
- rcu_assign_pointer(ifibss->presp, skb);
+ RCU_INIT_POINTER(ifibss->presp, skb);
sdata->vif.bss_conf.beacon_int = beacon_int;
sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -314,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (sta && elems->wmm_info)
- set_sta_flags(sta, WLAN_STA_WME);
+ set_sta_flag(sta, WLAN_STA_WME);
rcu_read_unlock();
}
@@ -382,7 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* second best option: get current TSF
* (will return -1 if not supported)
*/
- rx_timestamp = drv_get_tsf(local);
+ rx_timestamp = drv_get_tsf(local, sdata);
}
#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -417,7 +417,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* must be callable in atomic context.
*/
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid,u8 *addr, u32 supp_rates,
+ u8 *bssid, u8 *addr, u32 supp_rates,
gfp_t gfp)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -452,7 +452,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
sta->last_rx = jiffies;
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
/* make sure mandatory rates are always added */
sta->sta.supp_rates[band] = supp_rates |
@@ -995,7 +995,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree(sdata->u.ibss.ie);
skb = rcu_dereference_protected(sdata->u.ibss.presp,
lockdep_is_held(&sdata->u.ibss.mtx));
- rcu_assign_pointer(sdata->u.ibss.presp, NULL);
+ RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
sdata->vif.bss_conf.ibss_joined = false;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 400c09bea63..4c3d1f591be 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -136,7 +136,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
#define TX_DROP ((__force ieee80211_tx_result) 1u)
#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
-#define IEEE80211_TX_FRAGMENTED BIT(0)
#define IEEE80211_TX_UNICAST BIT(1)
#define IEEE80211_TX_PS_BUFFERED BIT(2)
@@ -149,7 +148,6 @@ struct ieee80211_tx_data {
struct ieee80211_channel *channel;
- u16 ethertype;
unsigned int flags;
};
@@ -261,6 +259,7 @@ struct mesh_stats {
__u32 fwded_frames; /* Mesh total forwarded frames */
__u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
__u32 dropped_frames_no_route; /* Not transmitted, no route found */
+ __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
atomic_t estab_plinks;
};
@@ -345,6 +344,7 @@ struct ieee80211_work {
struct {
struct sk_buff *frame;
u32 wait;
+ bool status;
} offchan_tx;
};
@@ -514,6 +514,7 @@ struct ieee80211_if_mesh {
struct mesh_config mshcfg;
u32 mesh_seqnum;
bool accepting_plinks;
+ int num_gates;
const u8 *ie;
u8 ie_len;
enum {
@@ -607,6 +608,8 @@ struct ieee80211_sub_if_data {
__be16 control_port_protocol;
bool control_port_no_encrypt;
+ struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
+
struct work_struct work;
struct sk_buff_head skb_queue;
@@ -660,6 +663,11 @@ enum sdata_queue_type {
enum {
IEEE80211_RX_MSG = 1,
IEEE80211_TX_STATUS_MSG = 2,
+ IEEE80211_EOSP_MSG = 3,
+};
+
+struct skb_eosp_msg_data {
+ u8 sta[ETH_ALEN], iface[ETH_ALEN];
};
enum queue_stop_reason {
@@ -669,6 +677,7 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
};
#ifdef CONFIG_MAC80211_LEDS
@@ -748,7 +757,6 @@ struct ieee80211_local {
struct workqueue_struct *workqueue;
unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
- struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
spinlock_t queue_stop_reason_lock;
@@ -1002,7 +1010,6 @@ struct ieee80211_local {
unsigned int hw_roc_duration;
u32 hw_roc_cookie;
bool hw_roc_for_tx;
- unsigned long hw_offchan_tx_cookie;
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1022,69 +1029,6 @@ struct ieee80211_ra_tid {
u16 tid;
};
-/* Parsed Information Elements */
-struct ieee802_11_elems {
- u8 *ie_start;
- size_t total_len;
-
- /* pointers to IEs */
- u8 *ssid;
- u8 *supp_rates;
- u8 *fh_params;
- u8 *ds_params;
- u8 *cf_params;
- struct ieee80211_tim_ie *tim;
- u8 *ibss_params;
- u8 *challenge;
- u8 *wpa;
- u8 *rsn;
- u8 *erp_info;
- u8 *ext_supp_rates;
- u8 *wmm_info;
- u8 *wmm_param;
- struct ieee80211_ht_cap *ht_cap_elem;
- struct ieee80211_ht_info *ht_info_elem;
- struct ieee80211_meshconf_ie *mesh_config;
- u8 *mesh_id;
- u8 *peer_link;
- u8 *preq;
- u8 *prep;
- u8 *perr;
- struct ieee80211_rann_ie *rann;
- u8 *ch_switch_elem;
- u8 *country_elem;
- u8 *pwr_constr_elem;
- u8 *quiet_elem; /* first quite element */
- u8 *timeout_int;
-
- /* length of them, respectively */
- u8 ssid_len;
- u8 supp_rates_len;
- u8 fh_params_len;
- u8 ds_params_len;
- u8 cf_params_len;
- u8 tim_len;
- u8 ibss_params_len;
- u8 challenge_len;
- u8 wpa_len;
- u8 rsn_len;
- u8 erp_info_len;
- u8 ext_supp_rates_len;
- u8 wmm_info_len;
- u8 wmm_param_len;
- u8 mesh_id_len;
- u8 peer_link_len;
- u8 preq_len;
- u8 prep_len;
- u8 perr_len;
- u8 ch_switch_elem_len;
- u8 country_elem_len;
- u8 pwr_constr_elem_len;
- u8 quiet_elem_len;
- u8 num_of_quiet_elem; /* can be more the one */
- u8 timeout_int_len;
-};
-
static inline struct ieee80211_local *hw_to_local(
struct ieee80211_hw *hw)
{
@@ -1233,23 +1177,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-/*
- * radiotap header for status frames
- */
-struct ieee80211_tx_status_rtap_hdr {
- struct ieee80211_radiotap_header hdr;
- u8 rate;
- u8 padding_for_rate;
- __le16 tx_flags;
- u8 data_retries;
-} __packed;
-
-
/* HT */
void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
struct ieee80211_ht_cap *ht_cap_ie,
struct ieee80211_sta_ht_cap *ht_cap);
-void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code);
@@ -1333,6 +1264,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems);
@@ -1364,11 +1296,11 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs);
-int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data);
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs);
+void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
@@ -1386,7 +1318,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed);
+ u32 ratemask, bool directed, bool no_cck);
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 556e7e6ddf0..30d73552e9a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -299,8 +299,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
goto err_del_interface;
}
- /* no locking required since STA is not live yet */
- sta->flags |= WLAN_STA_AUTHORIZED;
+ /* no atomic bitop required since STA is not live yet */
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
res = sta_info_insert(sta);
if (res) {
@@ -456,21 +456,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old_beacon);
- /* free all potentially still buffered bcast frames */
- while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
- local->total_ps_buffered--;
- dev_kfree_skb(skb);
- }
-
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
u.vlan.list)
dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
+
+ /* free all potentially still buffered bcast frames */
+ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
+ skb_queue_purge(&sdata->u.ap.ps_bc_buf);
}
if (going_down)
@@ -645,7 +643,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_stop = ieee80211_stop,
.ndo_uninit = ieee80211_teardown_sdata,
.ndo_start_xmit = ieee80211_subif_start_xmit,
- .ndo_set_multicast_list = ieee80211_set_multicast_list,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
@@ -689,7 +687,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_stop = ieee80211_stop,
.ndo_uninit = ieee80211_teardown_sdata,
.ndo_start_xmit = ieee80211_monitor_start_xmit,
- .ndo_set_multicast_list = ieee80211_set_multicast_list,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_select_queue = ieee80211_monitor_select_queue,
@@ -1214,6 +1212,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_path_flush_by_iface(sdata);
+
synchronize_rcu();
unregister_netdevice(sdata->dev);
}
@@ -1233,6 +1234,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_path_flush_by_iface(sdata);
+
unregister_netdevice_queue(sdata->dev, &unreg_list);
}
mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5150c6d11b5..756b157c2ed 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -464,7 +464,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
* some hardware cannot handle TKIP with QoS, so
* we indicate whether QoS could be in use.
*/
- if (test_sta_flags(sta, WLAN_STA_WME))
+ if (test_sta_flag(sta, WLAN_STA_WME))
key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
} else {
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -478,7 +478,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
/* same here, the AP could be using QoS */
ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
if (ap) {
- if (test_sta_flags(ap, WLAN_STA_WME))
+ if (test_sta_flag(ap, WLAN_STA_WME))
key->conf.flags |=
IEEE80211_KEY_FLAG_WMM_STA;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index acb44230b25..d999bf3b84e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -19,7 +19,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -325,6 +325,8 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct sta_info *sta, *tmp;
+ struct skb_eosp_msg_data *eosp_data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -340,6 +342,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
skb->pkt_type = 0;
ieee80211_tx_status(local_to_hw(local), skb);
break;
+ case IEEE80211_EOSP_MSG:
+ eosp_data = (void *)skb->cb;
+ for_each_sta_info(local, eosp_data->sta, sta, tmp) {
+ /* skip wrong virtual interface */
+ if (memcmp(eosp_data->iface,
+ sta->sdata->vif.addr, ETH_ALEN))
+ continue;
+ clear_sta_flag(sta, WLAN_STA_SP);
+ break;
+ }
+ dev_kfree_skb(skb);
+ break;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
@@ -608,6 +622,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
@@ -862,6 +877,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->ops->sched_scan_start)
local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ /* mac80211 based drivers don't support internal TDLS setup */
+ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
+ local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
result = wiphy_register(local->hw.wiphy);
if (result < 0)
goto fail_wiphy_register;
@@ -885,12 +904,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
-#ifndef __CHECKER__
- BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
- sizeof(struct ieee80211_tx_status_rtap_hdr));
-#endif
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
- sizeof(struct ieee80211_tx_status_rtap_hdr));
+ IEEE80211_TX_STATUS_HEADROOM);
debugfs_hw_add(local);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29e9980c8e6..a7078fdba8c 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
#include "ieee80211_i.h"
#include "mesh.h"
-#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
-#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
-#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
-
#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
#define MESHCONF_CAPAB_FORWARDING 0x08
@@ -27,6 +23,17 @@
int mesh_allocated;
static struct kmem_cache *rm_cache;
+#ifdef CONFIG_MAC80211_MESH
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
+{
+ return (mgmt->u.action.u.mesh_action.action_code ==
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
+}
+#else
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
+{ return false; }
+#endif
+
void ieee80211s_init(void)
{
mesh_pathtbl_init();
@@ -193,10 +200,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
}
p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
- if (!p) {
- printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
+ if (!p)
return 0;
- }
+
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
@@ -204,89 +210,136 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
return 0;
}
-void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+int
+mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- u8 *pos;
- int len, i, rate;
- u8 neighbors;
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- len = sband->n_bitrates;
- if (len > 8)
- len = 8;
- pos = skb_put(skb, len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = len;
- for (i = 0; i < len; i++) {
- rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
-
- if (sband->n_bitrates > len) {
- pos = skb_put(skb, sband->n_bitrates - len + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - len;
- for (i = len; i < sband->n_bitrates; i++) {
- rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
- }
-
- if (sband->band == IEEE80211_BAND_2GHZ) {
- pos = skb_put(skb, 2 + 1);
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
- }
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 *pos, neighbors;
+ u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
- pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
- *pos++ = WLAN_EID_MESH_ID;
- *pos++ = sdata->u.mesh.mesh_id_len;
- if (sdata->u.mesh.mesh_id_len)
- memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
+ if (skb_tailroom(skb) < 2 + meshconf_len)
+ return -ENOMEM;
- pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie));
+ pos = skb_put(skb, 2 + meshconf_len);
*pos++ = WLAN_EID_MESH_CONFIG;
- *pos++ = sizeof(struct ieee80211_meshconf_ie);
+ *pos++ = meshconf_len;
/* Active path selection protocol ID */
- *pos++ = sdata->u.mesh.mesh_pp_id;
-
+ *pos++ = ifmsh->mesh_pp_id;
/* Active path selection metric ID */
- *pos++ = sdata->u.mesh.mesh_pm_id;
-
+ *pos++ = ifmsh->mesh_pm_id;
/* Congestion control mode identifier */
- *pos++ = sdata->u.mesh.mesh_cc_id;
-
+ *pos++ = ifmsh->mesh_cc_id;
/* Synchronization protocol identifier */
- *pos++ = sdata->u.mesh.mesh_sp_id;
-
+ *pos++ = ifmsh->mesh_sp_id;
/* Authentication Protocol identifier */
- *pos++ = sdata->u.mesh.mesh_auth_id;
-
+ *pos++ = ifmsh->mesh_auth_id;
/* Mesh Formation Info - number of neighbors */
- neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
+ neighbors = atomic_read(&ifmsh->mshstats.estab_plinks);
/* Number of neighbor mesh STAs or 15 whichever is smaller */
neighbors = (neighbors > 15) ? 15 : neighbors;
*pos++ = neighbors << 1;
-
/* Mesh capability */
- sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
+ ifmsh->accepting_plinks = mesh_plink_availables(sdata);
*pos = MESHCONF_CAPAB_FORWARDING;
- *pos++ |= sdata->u.mesh.accepting_plinks ?
+ *pos++ |= ifmsh->accepting_plinks ?
MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
- if (sdata->u.mesh.ie) {
- int len = sdata->u.mesh.ie_len;
- const u8 *data = sdata->u.mesh.ie;
- if (skb_tailroom(skb) > len)
- memcpy(skb_put(skb, len), data, len);
+ return 0;
+}
+
+int
+mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
+ *pos++ = WLAN_EID_MESH_ID;
+ *pos++ = ifmsh->mesh_id_len;
+ if (ifmsh->mesh_id_len)
+ memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len);
+
+ return 0;
+}
+
+int
+mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 offset, len;
+ const u8 *data;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return 0;
+
+ /* fast-forward to vendor IEs */
+ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
+
+ if (offset) {
+ len = ifmsh->ie_len - offset;
+ data = ifmsh->ie + offset;
+ if (skb_tailroom(skb) < len)
+ return -ENOMEM;
+ memcpy(skb_put(skb, len), data, len);
}
+
+ return 0;
}
+int
+mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 len = 0;
+ const u8 *data;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return 0;
+
+ /* find RSN IE */
+ data = ifmsh->ie;
+ while (data < ifmsh->ie + ifmsh->ie_len) {
+ if (*data == WLAN_EID_RSN) {
+ len = data[1] + 2;
+ break;
+ }
+ data++;
+ }
+
+ if (len) {
+ if (skb_tailroom(skb) < len)
+ return -ENOMEM;
+ memcpy(skb_put(skb, len), data, len);
+ }
+
+ return 0;
+}
+
+int mesh_add_ds_params_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 3)
+ return -ENOMEM;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ pos = skb_put(skb, 2 + 1);
+ *pos++ = WLAN_EID_DS_PARAMS;
+ *pos++ = 1;
+ *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+ }
+
+ return 0;
+}
static void ieee80211_mesh_path_timer(unsigned long data)
{
@@ -352,8 +405,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
memcpy(hdr->addr3, meshsa, ETH_ALEN);
return 24;
} else {
- *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
- IEEE80211_FCTL_TODS);
+ *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
memcpy(hdr->addr2, meshsa, ETH_ALEN);
@@ -425,7 +477,8 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
mesh_path_tx_root_frame(sdata);
mod_timer(&ifmsh->mesh_path_root_timer,
- round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL));
+ round_jiffies(TU_TO_EXP_TIME(
+ ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
}
#ifdef CONFIG_PM
@@ -433,7 +486,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- /* use atomic bitops in case both timers fire at the same time */
+ /* use atomic bitops in case all timers fire at the same time */
if (del_timer_sync(&ifmsh->housekeeping_timer))
set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
@@ -557,11 +610,18 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_MESH_ACTION:
- mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+ case WLAN_CATEGORY_SELF_PROTECTED:
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+ break;
+ }
break;
- case WLAN_CATEGORY_MESH_PATH_SEL:
- mesh_rx_path_sel_frame(sdata, mgmt, len);
+ case WLAN_CATEGORY_MESH_ACTION:
+ if (mesh_action_is_path_sel(mgmt))
+ mesh_rx_path_sel_frame(sdata, mgmt, len);
break;
}
}
@@ -633,6 +693,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ifmsh->accepting_plinks = true;
ifmsh->preq_id = 0;
ifmsh->sn = 0;
+ ifmsh->num_gates = 0;
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 249e733362e..8c00e2d1d63 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -80,7 +80,10 @@ enum mesh_deferred_task_flags {
* retry
* @discovery_retries: number of discovery retries
* @flags: mesh path flags, as specified on &enum mesh_path_flags
- * @state_lock: mesh path state lock
+ * @state_lock: mesh path state lock used to protect changes to the
+ * mpath itself. No need to take this lock when adding or removing
+ * an mpath to a hash bucket on a path table.
+ * @is_gate: the destination station of this path is a mesh gate
*
*
* The combination of dst and sdata is unique in the mesh path table. Since the
@@ -104,6 +107,7 @@ struct mesh_path {
u8 discovery_retries;
enum mesh_path_flags flags;
spinlock_t state_lock;
+ bool is_gate;
};
/**
@@ -120,6 +124,9 @@ struct mesh_path {
* buckets
* @mean_chain_len: maximum average length for the hash buckets' list, if it is
* reached, the table will grow
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ *
* rcu_head: RCU head to free the table
*/
struct mesh_table {
@@ -133,6 +140,8 @@ struct mesh_table {
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
int mean_chain_len;
+ struct hlist_head *known_gates;
+ spinlock_t gates_lock;
struct rcu_head rcu_head;
};
@@ -166,6 +175,8 @@ struct mesh_rmc {
u32 idx_mask;
};
+#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
+#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
@@ -177,14 +188,6 @@ struct mesh_rmc {
/* Maximum number of paths per interface */
#define MESH_MAX_MPATHS 1024
-/* Pending ANA approval */
-#define MESH_PATH_SEL_ACTION 0
-
-/* PERR reason codes */
-#define PEER_RCODE_UNSPECIFIED 11
-#define PERR_RCODE_NO_ROUTE 12
-#define PERR_RCODE_DEST_UNREACH 13
-
/* Public interfaces */
/* Various */
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -199,6 +202,16 @@ bool mesh_matches_local(struct ieee802_11_elems *ie,
void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
void mesh_mgmt_ies_add(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
+int mesh_add_meshconf_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_meshid_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_rsn_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_vendor_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_ds_params_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
@@ -223,10 +236,13 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx,
struct ieee80211_sub_if_data *sdata);
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len);
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
+
+int mesh_path_add_gate(struct mesh_path *mpath);
+int mesh_path_send_to_gates(struct mesh_path *mpath);
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
/* Mesh plinks */
void mesh_neighbour_update(u8 *hw_addr, u32 rates,
struct ieee80211_sub_if_data *sdata,
@@ -256,12 +272,14 @@ void mesh_pathtbl_unregister(void);
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
void mesh_path_timer(unsigned long data);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
extern int mesh_paths_generation;
#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 3460108810d..174040a4288 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,10 +8,12 @@
*/
#include <linux/slab.h>
+#include "wme.h"
#include "mesh.h"
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
-#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args)
+#define mhwmp_dbg(fmt, args...) \
+ printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
#else
#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
@@ -68,12 +70,12 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
-#define PREP_IE_ORIG_ADDR(x) (x + 3)
-#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0)
+#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
+#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
-#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
-#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x))
+#define PREP_IE_TARGET_ADDR(x) (x + 3)
+#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
#define PERR_IE_TTL(x) (*(x))
#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
@@ -132,24 +134,25 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
- mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
+ mgmt->u.action.u.mesh_action.action_code =
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
switch (action) {
case MPATH_PREQ:
- mhwmp_dbg("sending PREQ to %pM\n", target);
+ mhwmp_dbg("sending PREQ to %pM", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
- mhwmp_dbg("sending PREP to %pM\n", target);
+ mhwmp_dbg("sending PREP to %pM", target);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
- mhwmp_dbg("sending RANN from %pM\n", orig_addr);
+ mhwmp_dbg("sending RANN from %pM", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
@@ -163,35 +166,63 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
*pos++ = flags;
*pos++ = hop_count;
*pos++ = ttl;
- if (action == MPATH_PREQ) {
- memcpy(pos, &preq_id, 4);
+ if (action == MPATH_PREP) {
+ memcpy(pos, target, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &target_sn, 4);
pos += 4;
- }
- memcpy(pos, orig_addr, ETH_ALEN);
- pos += ETH_ALEN;
- memcpy(pos, &orig_sn, 4);
- pos += 4;
- if (action != MPATH_RANN) {
- memcpy(pos, &lifetime, 4);
+ } else {
+ if (action == MPATH_PREQ) {
+ memcpy(pos, &preq_id, 4);
+ pos += 4;
+ }
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_sn, 4);
pos += 4;
}
+ memcpy(pos, &lifetime, 4); /* interval for RANN */
+ pos += 4;
memcpy(pos, &metric, 4);
pos += 4;
if (action == MPATH_PREQ) {
- /* destination count */
- *pos++ = 1;
+ *pos++ = 1; /* destination count */
*pos++ = target_flags;
- }
- if (action != MPATH_RANN) {
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
memcpy(pos, &target_sn, 4);
+ pos += 4;
+ } else if (action == MPATH_PREP) {
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_sn, 4);
+ pos += 4;
}
ieee80211_tx_skb(sdata, skb);
return 0;
}
+
+/* Headroom is not adjusted. Caller should ensure that skb has sufficient
+ * headroom in case the frame is encrypted. */
+static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, 0);
+
+ /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb->priority = 7;
+
+ info->control.vif = &sdata->vif;
+ ieee80211_set_qos_hdr(sdata, skb);
+}
+
/**
* mesh_send_path error - Sends a PERR mesh management frame
*
@@ -199,6 +230,10 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
+ *
+ * Note: This function may be called with driver locks taken that the driver
+ * also acquires in the TX path. To avoid a deadlock we don't transmit the
+ * frame directly but add it to the pending queue instead.
*/
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
__le16 target_rcode, const u8 *ra,
@@ -212,7 +247,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
if (!skb)
return -1;
- skb_reserve(skb, local->hw.extra_tx_headroom);
+ skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
/* 25 is the size of the common mgmt part (24) plus the size of the
* common action part (1)
*/
@@ -224,9 +259,11 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- /* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
- mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ /* BSSID == SA */
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
+ mgmt->u.action.u.mesh_action.action_code =
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
@@ -251,7 +288,9 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
pos += 4;
memcpy(pos, &target_rcode, 2);
- ieee80211_tx_skb(sdata, skb);
+ /* see note in function header */
+ prepare_frame_for_deferred_tx(sdata, skb);
+ ieee80211_add_pending_skb(local, skb);
return 0;
}
@@ -449,7 +488,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (fresh_info) {
mesh_path_assign_nexthop(mpath, sta);
- mpath->flags &= ~MESH_PATH_SN_VALID;
mpath->metric = last_hop_metric;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
@@ -484,10 +522,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
- mhwmp_dbg("received PREQ from %pM\n", orig_addr);
+ mhwmp_dbg("received PREQ from %pM", orig_addr);
if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
- mhwmp_dbg("PREQ is for us\n");
+ mhwmp_dbg("PREQ is for us");
forward = false;
reply = true;
metric = 0;
@@ -523,7 +561,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
- mhwmp_dbg("replying to the PREQ\n");
+ mhwmp_dbg("replying to the PREQ");
mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
cpu_to_le32(target_sn), 0, orig_addr,
cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
@@ -543,7 +581,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
- mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr);
+ mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
--ttl;
flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
@@ -578,7 +616,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
- mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem));
+ mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
/* Note that we divert from the draft nomenclature and denominate
* destination to what the draft refers to as origininator. So in this
@@ -684,6 +722,8 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
u8 ttl, flags, hopcount;
u8 *orig_addr;
u32 orig_sn, metric;
+ u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ bool root_is_gate;
ttl = rann->rann_ttl;
if (ttl <= 1) {
@@ -692,12 +732,19 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
ttl--;
flags = rann->rann_flags;
+ root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
orig_sn = rann->rann_seq;
hopcount = rann->rann_hopcount;
hopcount++;
metric = rann->rann_metric;
- mhwmp_dbg("received RANN from %pM\n", orig_addr);
+
+ /* Ignore our own RANNs */
+ if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
+ return;
+
+ mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr,
+ root_is_gate);
rcu_read_lock();
mpath = mesh_path_lookup(orig_addr, sdata);
@@ -709,18 +756,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
sdata->u.mesh.mshstats.dropped_frames_no_route++;
return;
}
- mesh_queue_preq(mpath,
- PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
+
+ if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
+ time_after(jiffies, mpath->exp_time - 1*HZ)) &&
+ !(mpath->flags & MESH_PATH_FIXED)) {
+ mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
+ orig_addr);
+ mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ }
+
if (mpath->sn < orig_sn) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
- hopcount, ttl, 0,
+ hopcount, ttl, cpu_to_le32(interval),
cpu_to_le32(metric + mpath->metric),
0, sdata);
mpath->sn = orig_sn;
}
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+
rcu_read_unlock();
}
@@ -732,11 +789,20 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems elems;
size_t baselen;
u32 last_hop_metric;
+ struct sta_info *sta;
/* need action_code */
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
return;
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
len - baselen, &elems);
@@ -788,16 +854,16 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
- mhwmp_dbg("could not allocate PREQ node\n");
+ mhwmp_dbg("could not allocate PREQ node");
return;
}
- spin_lock(&ifmsh->mesh_preq_queue_lock);
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
- mhwmp_dbg("PREQ node queue full\n");
+ mhwmp_dbg("PREQ node queue full");
return;
}
@@ -806,7 +872,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
@@ -982,35 +1048,46 @@ void mesh_path_timer(unsigned long data)
{
struct mesh_path *mpath = (void *) data;
struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ int ret;
if (sdata->local->quiescing)
return;
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_RESOLVED ||
- (!(mpath->flags & MESH_PATH_RESOLVING)))
+ (!(mpath->flags & MESH_PATH_RESOLVING))) {
mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
- else if (mpath->discovery_retries < max_preq_retries(sdata)) {
+ spin_unlock_bh(&mpath->state_lock);
+ } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
+ spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
mpath->flags = 0;
mpath->exp_time = jiffies;
- mesh_path_flush_pending(mpath);
+ spin_unlock_bh(&mpath->state_lock);
+ if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
+ ret = mesh_path_send_to_gates(mpath);
+ if (ret)
+ mhwmp_dbg("no gate was reachable");
+ } else
+ mesh_path_flush_pending(mpath);
}
-
- spin_unlock_bh(&mpath->state_lock);
}
void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ u8 flags;
- mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
+ flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
+ ? RANN_FLAG_IS_GATE : 0;
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
0, sdata->u.mesh.mshcfg.element_ttl,
- 0, 0, 0, sdata);
+ cpu_to_le32(interval), 0, 0, sdata);
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 068ee651825..7f54c504223 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -14,9 +14,16 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
+#include "wme.h"
#include "ieee80211_i.h"
#include "mesh.h"
+#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
+#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
+#endif
+
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
- * as readers. When reading the table (i.e. doing lookups) we are well protected
- * by RCU
+ * as readers. RCU provides sufficient protection only when reading the table
+ * (i.e. doing lookups). Adding or adding or removing nodes requires we take
+ * the read lock or we risk operating on an old table. The write lock is only
+ * needed when modifying the number of buckets a table.
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
lockdep_is_held(&pathtbl_resize_lock));
}
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
+
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
+ spin_lock_init(&newtbl->gates_lock);
return newtbl;
}
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
+ struct mpath_node *gate;
int i;
mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
+ if (free_leafs) {
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_for_each_entry_safe(gate, p, q,
+ tbl->known_gates, list) {
+ hlist_del(&gate->list);
+ kfree(gate);
+ }
+ kfree(tbl->known_gates);
+ spin_unlock_bh(&tbl->gates_lock);
+ }
+
__mesh_table_free(tbl);
}
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
+ newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
rcu_assign_pointer(mpath->next_hop, sta);
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
+ ieee80211_set_qos_hdr(sdata, skb);
__skb_queue_tail(&tmpq, skb);
}
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
+static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
+ struct mesh_path *gate_mpath)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211s_hdr *mshdr;
+ int mesh_hdrlen, hdrlen;
+ char *next_hop;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ if (!(mshdr->flags & MESH_FLAGS_AE)) {
+ /* size of the fixed part of the mesh header */
+ mesh_hdrlen = 6;
+
+ /* make room for the two extended addresses */
+ skb_push(skb, 2 * ETH_ALEN);
+ memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* we preserve the previous mesh header and only add
+ * the new addreses */
+ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ mshdr->flags = MESH_FLAGS_AE_A5_A6;
+ memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
+ memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
+ }
+
+ /* update next hop */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ rcu_read_lock();
+ next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
+ memcpy(hdr->addr1, next_hop, ETH_ALEN);
+ rcu_read_unlock();
+ memcpy(hdr->addr3, dst_addr, ETH_ALEN);
+}
/**
- * mesh_path_lookup - look up a path in the mesh path table
- * @dst: hardware address (ETH_ALEN length) of destination
- * @sdata: local subif
*
- * Returns: pointer to the mesh path structure, or NULL if not found
+ * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
*
- * Locking: must be called within a read rcu section.
+ * This function is used to transfer or copy frames from an unresolved mpath to
+ * a gate mpath. The function also adds the Address Extension field and
+ * updates the next hop.
+ *
+ * If a frame already has an Address Extension field, only the next hop and
+ * destination addresses are updated.
+ *
+ * The gate mpath must be an active mpath with a valid mpath->next_hop.
+ *
+ * @mpath: An active mpath the frames will be sent to (i.e. the gate)
+ * @from_mpath: The failed mpath
+ * @copy: When true, copy all the frames to the new mpath queue. When false,
+ * move them.
*/
-struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
+ struct mesh_path *from_mpath,
+ bool copy)
{
- struct mesh_path *mpath;
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct mesh_table *tbl;
- struct mpath_node *node;
+ struct sk_buff *skb, *cp_skb = NULL;
+ struct sk_buff_head gateq, failq;
+ unsigned long flags;
+ int num_skbs;
- tbl = rcu_dereference(mesh_paths);
+ BUG_ON(gate_mpath == from_mpath);
+ BUG_ON(!gate_mpath->next_hop);
- bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
- hlist_for_each_entry_rcu(node, n, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
- if (MPATH_EXPIRED(mpath)) {
- spin_lock_bh(&mpath->state_lock);
- if (MPATH_EXPIRED(mpath))
- mpath->flags &= ~MESH_PATH_ACTIVE;
- spin_unlock_bh(&mpath->state_lock);
- }
- return mpath;
+ __skb_queue_head_init(&gateq);
+ __skb_queue_head_init(&failq);
+
+ spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+ skb_queue_splice_init(&from_mpath->frame_queue, &failq);
+ spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
+
+ num_skbs = skb_queue_len(&failq);
+
+ while (num_skbs--) {
+ skb = __skb_dequeue(&failq);
+ if (copy) {
+ cp_skb = skb_copy(skb, GFP_ATOMIC);
+ if (cp_skb)
+ __skb_queue_tail(&failq, cp_skb);
}
+
+ prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
+ __skb_queue_tail(&gateq, skb);
}
- return NULL;
+
+ spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
+ skb_queue_splice(&gateq, &gate_mpath->frame_queue);
+ mpath_dbg("Mpath queue for gate %pM has %d frames\n",
+ gate_mpath->dst,
+ skb_queue_len(&gate_mpath->frame_queue));
+ spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
+
+ if (!copy)
+ return;
+
+ spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+ skb_queue_splice(&failq, &from_mpath->frame_queue);
+ spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}
-struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+
+static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
+ struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
- struct mesh_table *tbl;
struct mpath_node *node;
- tbl = rcu_dereference(mpp_paths);
-
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
+ memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
- if (MPATH_EXPIRED(mpath))
- mpath->flags &= ~MESH_PATH_ACTIVE;
+ mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
return NULL;
}
+/**
+ * mesh_path_lookup - look up a path in the mesh path table
+ * @dst: hardware address (ETH_ALEN length) of destination
+ * @sdata: local subif
+ *
+ * Returns: pointer to the mesh path structure, or NULL if not found
+ *
+ * Locking: must be called within a read rcu section.
+ */
+struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+ return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
+}
+
+struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+ return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
+}
+
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
- if (MPATH_EXPIRED(node->mpath))
- node->mpath->flags &= ~MESH_PATH_ACTIVE;
+ node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
return NULL;
}
+static void mesh_gate_node_reclaim(struct rcu_head *rp)
+{
+ struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ kfree(node);
+}
+
+/**
+ * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
+ * @mesh_tbl: table which contains known_gates list
+ * @mpath: mpath to known mesh gate
+ *
+ * Returns: 0 on success
+ *
+ */
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+ struct mpath_node *gate, *new_gate;
+ struct hlist_node *n;
+ int err;
+
+ rcu_read_lock();
+ tbl = rcu_dereference(tbl);
+
+ hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
+ if (gate->mpath == mpath) {
+ err = -EEXIST;
+ goto err_rcu;
+ }
+
+ new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
+ if (!new_gate) {
+ err = -ENOMEM;
+ goto err_rcu;
+ }
+
+ mpath->is_gate = true;
+ mpath->sdata->u.mesh.num_gates++;
+ new_gate->mpath = mpath;
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
+ spin_unlock_bh(&tbl->gates_lock);
+ rcu_read_unlock();
+ mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
+ mpath->sdata->name, mpath->dst,
+ mpath->sdata->u.mesh.num_gates);
+ return 0;
+err_rcu:
+ rcu_read_unlock();
+ return err;
+}
+
+/**
+ * mesh_gate_del - remove a mesh gate from the list of known gates
+ * @tbl: table which holds our list of known gates
+ * @mpath: gate mpath
+ *
+ * Returns: 0 on success
+ *
+ * Locking: must be called inside rcu_read_lock() section
+ */
+static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+ struct mpath_node *gate;
+ struct hlist_node *p, *q;
+
+ tbl = rcu_dereference(tbl);
+
+ hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
+ if (gate->mpath == mpath) {
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_del_rcu(&gate->list);
+ call_rcu(&gate->rcu, mesh_gate_node_reclaim);
+ spin_unlock_bh(&tbl->gates_lock);
+ mpath->sdata->u.mesh.num_gates--;
+ mpath->is_gate = false;
+ mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
+ "%d known gates\n", mpath->sdata->name,
+ mpath->dst, mpath->sdata->u.mesh.num_gates);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
+ */
+int mesh_path_add_gate(struct mesh_path *mpath)
+{
+ return mesh_gate_add(mesh_paths, mpath);
+}
+
+/**
+ * mesh_gate_num - number of gates known to this interface
+ * @sdata: subif data
+ */
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
+{
+ return sdata->u.mesh.num_gates;
+}
+
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
+ init_timer(&new_mpath->timer);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- spin_lock_bh(&mpath->state_lock);
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
+ spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, cpu_to_le32(mpath->sn),
- cpu_to_le16(PERR_RCODE_DEST_UNREACH),
- bcast, sdata);
- } else
- spin_unlock_bh(&mpath->state_lock);
+ reason, bcast, sdata);
+ }
}
rcu_read_unlock();
}
+static void mesh_path_node_reclaim(struct rcu_head *rp)
+{
+ struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
+ del_timer_sync(&node->mpath->timer);
+ atomic_dec(&sdata->u.mesh.mpaths);
+ kfree(node->mpath);
+ kfree(node);
+}
+
+/* needs to be called with the corresponding hashwlock taken */
+static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+{
+ struct mesh_path *mpath;
+ mpath = node->mpath;
+ spin_lock(&mpath->state_lock);
+ mpath->flags |= MESH_PATH_RESOLVING;
+ if (mpath->is_gate)
+ mesh_gate_del(tbl, mpath);
+ hlist_del_rcu(&node->list);
+ call_rcu(&node->rcu, mesh_path_node_reclaim);
+ spin_unlock(&mpath->state_lock);
+ atomic_dec(&tbl->entries);
+}
+
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
int i;
rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mesh_paths();
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (rcu_dereference(mpath->next_hop) == sta)
- mesh_path_del(mpath->dst, mpath->sdata);
+ if (rcu_dereference(mpath->next_hop) == sta) {
+ spin_lock_bh(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock_bh(&tbl->hashwlock[i]);
+ }
}
+ read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
+static void table_flush_by_iface(struct mesh_table *tbl,
+ struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
- rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
+ WARN_ON(!rcu_read_lock_held());
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (mpath->sdata == sdata)
- mesh_path_del(mpath->dst, mpath->sdata);
+ if (mpath->sdata != sdata)
+ continue;
+ spin_lock_bh(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock_bh(&tbl->hashwlock[i]);
}
- rcu_read_unlock();
}
-static void mesh_path_node_reclaim(struct rcu_head *rp)
+/**
+ * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
+ *
+ * This function deletes both mesh paths as well as mesh portal paths.
+ *
+ * @sdata - interface data to match
+ *
+ */
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+ struct mesh_table *tbl;
- del_timer_sync(&node->mpath->timer);
- atomic_dec(&sdata->u.mesh.mpaths);
- kfree(node->mpath);
- kfree(node);
+ rcu_read_lock();
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mesh_paths();
+ table_flush_by_iface(tbl, sdata);
+ tbl = resize_dereference_mpp_paths();
+ table_flush_by_iface(tbl, sdata);
+ read_unlock_bh(&pathtbl_resize_lock);
+ rcu_read_unlock();
}
/**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
- spin_lock(&mpath->state_lock);
- mpath->flags |= MESH_PATH_RESOLVING;
- hlist_del_rcu(&node->list);
- call_rcu(&node->rcu, mesh_path_node_reclaim);
- atomic_dec(&tbl->entries);
- spin_unlock(&mpath->state_lock);
+ __mesh_path_del(tbl, node);
goto enddel;
}
}
@@ -681,6 +934,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
}
/**
+ * mesh_path_send_to_gates - sends pending frames to all known mesh gates
+ *
+ * @mpath: mesh path whose queue will be emptied
+ *
+ * If there is only one gate, the frames are transferred from the failed mpath
+ * queue to that gate's queue. If there are more than one gates, the frames
+ * are copied from each gate to the next. After frames are copied, the
+ * mpath queues are emptied onto the transmission queue.
+ */
+int mesh_path_send_to_gates(struct mesh_path *mpath)
+{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct hlist_node *n;
+ struct mesh_table *tbl;
+ struct mesh_path *from_mpath = mpath;
+ struct mpath_node *gate = NULL;
+ bool copy = false;
+ struct hlist_head *known_gates;
+
+ rcu_read_lock();
+ tbl = rcu_dereference(mesh_paths);
+ known_gates = tbl->known_gates;
+ rcu_read_unlock();
+
+ if (!known_gates)
+ return -EHOSTUNREACH;
+
+ hlist_for_each_entry_rcu(gate, n, known_gates, list) {
+ if (gate->mpath->sdata != sdata)
+ continue;
+
+ if (gate->mpath->flags & MESH_PATH_ACTIVE) {
+ mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
+ mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
+ from_mpath = gate->mpath;
+ copy = true;
+ } else {
+ mpath_dbg("Not forwarding %p\n", gate->mpath);
+ mpath_dbg("flags %x\n", gate->mpath->flags);
+ }
+ }
+
+ hlist_for_each_entry_rcu(gate, n, known_gates, list)
+ if (gate->mpath->sdata == sdata) {
+ mpath_dbg("Sending to %pM\n", gate->mpath->dst);
+ mesh_path_tx_pending(gate->mpath);
+ }
+
+ return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
+}
+
+/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mesh_path *mpath;
u32 sn = 0;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
ra = hdr->addr1;
+ rcu_read_lock();
mpath = mesh_path_lookup(da, sdata);
- if (mpath)
+ if (mpath) {
+ spin_lock_bh(&mpath->state_lock);
sn = ++mpath->sn;
+ spin_unlock_bh(&mpath->state_lock);
+ }
+ rcu_read_unlock();
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
- cpu_to_le32(sn),
- cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
+ cpu_to_le32(sn), reason, ra, sdata);
}
kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&mpath->frame_queue)) &&
- (mpath->flags & MESH_PATH_ACTIVE))
+ while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(skb, mpath->sdata);
}
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
int mesh_pathtbl_init(void)
{
struct mesh_table *tbl_path, *tbl_mpp;
+ int ret;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+ if (!tbl_path->known_gates) {
+ ret = -ENOMEM;
+ goto free_path;
+ }
+ INIT_HLIST_HEAD(tbl_path->known_gates);
+
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_mpp) {
- mesh_table_free(tbl_path, true);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_path;
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+ if (!tbl_mpp->known_gates) {
+ ret = -ENOMEM;
+ goto free_mpp;
+ }
+ INIT_HLIST_HEAD(tbl_mpp->known_gates);
/* Need no locking since this is during init */
RCU_INIT_POINTER(mesh_paths, tbl_path);
RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
+
+free_mpp:
+ mesh_table_free(tbl_mpp, true);
+free_path:
+ mesh_table_free(tbl_path, true);
+ return ret;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
- spin_lock_bh(&mpath->state_lock);
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
- time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
- spin_unlock_bh(&mpath->state_lock);
+ time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(mpath->dst, mpath->sdata);
- } else
- spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
}
@@ -843,6 +1168,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
void mesh_pathtbl_unregister(void)
{
/* no need for locking during exit path */
- mesh_table_free(rcu_dereference_raw(mesh_paths), true);
- mesh_table_free(rcu_dereference_raw(mpp_paths), true);
+ mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
+ mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f4adc091788..7e57f5d07f6 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,35 +19,18 @@
#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
-#define PLINK_GET_LLID(p) (p + 4)
-#define PLINK_GET_PLID(p) (p + 6)
+#define PLINK_GET_LLID(p) (p + 2)
+#define PLINK_GET_PLID(p) (p + 4)
#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
jiffies + HZ * t / 1000))
-/* Peer link cancel reasons, all subject to ANA approval */
-#define MESH_LINK_CANCELLED 2
-#define MESH_MAX_NEIGHBORS 3
-#define MESH_CAPABILITY_POLICY_VIOLATION 4
-#define MESH_CLOSE_RCVD 5
-#define MESH_MAX_RETRIES 6
-#define MESH_CONFIRM_TIMEOUT 7
-#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8
-#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
-#define MESH_SECURITY_FAILED_VERIFICATION 10
-
#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
-enum plink_frame_type {
- PLINK_OPEN = 1,
- PLINK_CONFIRM,
- PLINK_CLOSE
-};
-
enum plink_event {
PLINK_UNDEFINED,
OPN_ACPT,
@@ -60,6 +43,10 @@ enum plink_event {
CLS_IGNR
};
+static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_self_protected_actioncode action,
+ u8 *da, __le16 llid, __le16 plid, __le16 reason);
+
static inline
void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
{
@@ -105,7 +92,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
- sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_WME);
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
rate_control_rate_init(sta);
@@ -150,6 +139,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
spin_lock_bh(&sta->lock);
deactivated = __mesh_plink_deactivate(sta);
+ sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, sta->llid, sta->plid,
+ sta->reason);
spin_unlock_bh(&sta->lock);
if (deactivated)
@@ -157,16 +150,16 @@ void mesh_plink_deactivate(struct sta_info *sta)
}
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
- enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
- __le16 reason) {
+ enum ieee80211_self_protected_actioncode action,
+ u8 *da, __le16 llid, __le16 plid, __le16 reason) {
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
sdata->u.mesh.ie_len);
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
- static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
+ int ie_len = 4;
+ u16 peering_proto = 0;
u8 *pos;
- int ie_len;
if (!skb)
return -1;
@@ -175,63 +168,75 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
* common action part (1)
*/
mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
+ skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
+ memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
- mgmt->u.action.u.plink_action.action_code = action;
-
- if (action == PLINK_CLOSE)
- mgmt->u.action.u.plink_action.aux = reason;
- else {
- mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0);
- if (action == PLINK_CONFIRM) {
- pos = skb_put(skb, 4);
- /* two-byte status code followed by two-byte AID */
- memset(pos, 0, 2);
+ mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED;
+ mgmt->u.action.u.self_prot.action_code = action;
+
+ if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+ /* capability info */
+ pos = skb_put(skb, 2);
+ memset(pos, 0, 2);
+ if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
+ /* AID */
+ pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- mesh_mgmt_ies_add(skb, sdata);
+ if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_meshid_ie(skb, sdata) ||
+ mesh_add_meshconf_ie(skb, sdata))
+ return -1;
+ } else { /* WLAN_SP_MESH_PEERING_CLOSE */
+ if (mesh_add_meshid_ie(skb, sdata))
+ return -1;
}
- /* Add Peer Link Management element */
+ /* Add Mesh Peering Management element */
switch (action) {
- case PLINK_OPEN:
- ie_len = 6;
+ case WLAN_SP_MESH_PEERING_OPEN:
break;
- case PLINK_CONFIRM:
- ie_len = 8;
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ ie_len += 2;
include_plid = true;
break;
- case PLINK_CLOSE:
- default:
- if (!plid)
- ie_len = 8;
- else {
- ie_len = 10;
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ if (plid) {
+ ie_len += 2;
include_plid = true;
}
+ ie_len += 2; /* reason code */
break;
+ default:
+ return -EINVAL;
}
+ if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
+ return -ENOMEM;
+
pos = skb_put(skb, 2 + ie_len);
- *pos++ = WLAN_EID_PEER_LINK;
+ *pos++ = WLAN_EID_PEER_MGMT;
*pos++ = ie_len;
- memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto));
- pos += 4;
+ memcpy(pos, &peering_proto, 2);
+ pos += 2;
memcpy(pos, &llid, 2);
+ pos += 2;
if (include_plid) {
- pos += 2;
memcpy(pos, &plid, 2);
- }
- if (action == PLINK_CLOSE) {
pos += 2;
+ }
+ if (action == WLAN_SP_MESH_PEERING_CLOSE) {
memcpy(pos, &reason, 2);
+ pos += 2;
}
+ if (mesh_add_vendor_ies(skb, sdata))
+ return -1;
ieee80211_tx_skb(sdata, skb);
return 0;
@@ -322,21 +327,21 @@ static void mesh_plink_timer(unsigned long data)
++sta->plink_retries;
mod_plink_timer(sta, sta->plink_timeout);
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
- 0, 0);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, llid, 0, 0);
break;
}
- reason = cpu_to_le16(MESH_MAX_RETRIES);
+ reason = cpu_to_le16(WLAN_REASON_MESH_MAX_RETRIES);
/* fall through on else */
case NL80211_PLINK_CNF_RCVD:
/* confirm timer */
if (!reason)
- reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
sta->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
- reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
@@ -380,7 +385,7 @@ int mesh_plink_open(struct sta_info *sta)
__le16 llid;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- if (!test_sta_flags(sta, WLAN_STA_AUTH))
+ if (!test_sta_flag(sta, WLAN_STA_AUTH))
return -EPERM;
spin_lock_bh(&sta->lock);
@@ -396,7 +401,7 @@ int mesh_plink_open(struct sta_info *sta)
mpl_dbg("Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
- return mesh_plink_frame_tx(sdata, PLINK_OPEN,
+ return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
sta->sta.addr, llid, 0, 0);
}
@@ -422,7 +427,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
struct ieee802_11_elems elems;
struct sta_info *sta;
enum plink_event event;
- enum plink_frame_type ftype;
+ enum ieee80211_self_protected_actioncode ftype;
size_t baselen;
bool deactivated, matches_local = true;
u8 ie_len;
@@ -449,14 +454,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- baseaddr = mgmt->u.action.u.plink_action.variable;
- baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
- if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
+ baseaddr = mgmt->u.action.u.self_prot.variable;
+ baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt;
+ if (mgmt->u.action.u.self_prot.action_code ==
+ WLAN_SP_MESH_PEERING_CONFIRM) {
baseaddr += 4;
baselen += 4;
}
ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
- if (!elems.peer_link) {
+ if (!elems.peering) {
mpl_dbg("Mesh plink: missing necessary peer link ie\n");
return;
}
@@ -466,37 +472,40 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- ftype = mgmt->u.action.u.plink_action.action_code;
- ie_len = elems.peer_link_len;
- if ((ftype == PLINK_OPEN && ie_len != 6) ||
- (ftype == PLINK_CONFIRM && ie_len != 8) ||
- (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) {
+ ftype = mgmt->u.action.u.self_prot.action_code;
+ ie_len = elems.peering_len;
+ if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) ||
+ (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
+ && ie_len != 8)) {
mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
ftype, ie_len);
return;
}
- if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) {
+ if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
+ (!elems.mesh_id || !elems.mesh_config)) {
mpl_dbg("Mesh plink: missing necessary ie\n");
return;
}
/* Note the lines below are correct, the llid in the frame is the plid
* from the point of view of this host.
*/
- memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
- if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10))
- memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);
+ memcpy(&plid, PLINK_GET_LLID(elems.peering), 2);
+ if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
+ memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
- if (!sta && ftype != PLINK_OPEN) {
+ if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
return;
}
- if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) {
+ if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
rcu_read_unlock();
return;
@@ -509,30 +518,30 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
/* Now we will figure out the appropriate event... */
event = PLINK_UNDEFINED;
- if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
+ if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
+ (!mesh_matches_local(&elems, sdata))) {
matches_local = false;
switch (ftype) {
- case PLINK_OPEN:
+ case WLAN_SP_MESH_PEERING_OPEN:
event = OPN_RJCT;
break;
- case PLINK_CONFIRM:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
event = CNF_RJCT;
break;
- case PLINK_CLOSE:
- /* avoid warning */
+ default:
break;
}
}
if (!sta && !matches_local) {
rcu_read_unlock();
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
llid = 0;
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ mgmt->sa, llid, plid, reason);
return;
} else if (!sta) {
- /* ftype == PLINK_OPEN */
+ /* ftype == WLAN_SP_MESH_PEERING_OPEN */
u32 rates;
rcu_read_unlock();
@@ -557,21 +566,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (matches_local) {
spin_lock_bh(&sta->lock);
switch (ftype) {
- case PLINK_OPEN:
+ case WLAN_SP_MESH_PEERING_OPEN:
if (!mesh_plink_free_count(sdata) ||
(sta->plid && sta->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
break;
- case PLINK_CONFIRM:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
if (!mesh_plink_free_count(sdata) ||
(sta->llid != llid || sta->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
break;
- case PLINK_CLOSE:
+ case WLAN_SP_MESH_PEERING_CLOSE:
if (sta->plink_state == NL80211_PLINK_ESTAB)
/* Do not check for llid or plid. This does not
* follow the standard but since multiple plinks
@@ -620,10 +629,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->llid = llid;
mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
- 0, 0);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
- llid, plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, llid, 0, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -635,10 +646,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -647,8 +658,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
/* retry timer is left untouched */
@@ -656,8 +668,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->plid = plid;
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
case CNF_ACPT:
sta->plink_state = NL80211_PLINK_CNF_RCVD;
@@ -677,10 +690,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -689,14 +702,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
case CNF_ACPT:
del_timer(&sta->plink_timer);
@@ -717,10 +731,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -729,8 +743,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
del_timer(&sta->plink_timer);
@@ -740,8 +755,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -752,7 +768,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case NL80211_PLINK_ESTAB:
switch (event) {
case CLS_ACPT:
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
deactivated = __mesh_plink_deactivate(sta);
sta->plink_state = NL80211_PLINK_HOLDING;
@@ -761,14 +777,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
spin_unlock_bh(&sta->lock);
if (deactivated)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -790,8 +807,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
reason = sta->reason;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
- llid, plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
default:
spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d6470c7fd6c..ba2da11a997 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -17,7 +17,7 @@
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -160,7 +160,8 @@ static int ecw2cw(int ecw)
*/
static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct ieee80211_ht_info *hti,
- const u8 *bssid, u16 ap_ht_cap_flags)
+ const u8 *bssid, u16 ap_ht_cap_flags,
+ bool beacon_htcap_ie)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -232,6 +233,21 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
}
+ if (beacon_htcap_ie && (prev_chantype != channel_type)) {
+ /*
+ * Whenever the AP announces the HT mode change that can be
+ * 40MHz intolerant or etc., it would be safer to stop tx
+ * queues before doing hw config to avoid buffer overflow.
+ */
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
+
+ /* flush out all packets */
+ synchronize_net();
+
+ drv_flush(local, false);
+ }
+
/* channel_type change automatically detected */
ieee80211_hw_config(local, 0);
@@ -243,6 +259,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
IEEE80211_RC_HT_CHANGED,
channel_type);
rcu_read_unlock();
+
+ if (beacon_htcap_ie)
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
}
ht_opmode = le16_to_cpu(hti->operation_mode);
@@ -271,11 +291,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "deauth/disassoc frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -330,6 +348,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
if (!skb)
@@ -340,6 +359,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+
ieee80211_tx_skb(sdata, skb);
}
@@ -354,11 +377,9 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
return;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
- "nullfunc frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
@@ -394,6 +415,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
/* call "hw_config" only if doing sw channel switch */
ieee80211_hw_config(sdata->local,
IEEE80211_CONF_CHANGE_CHANNEL);
+ } else {
+ /* update the device channel directly */
+ sdata->local->hw.conf.channel = sdata->local->oper_channel;
}
/* XXX: shouldn't really modify cfg80211-owned data! */
@@ -608,7 +632,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *mgd = &sdata->u.mgd;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
+ bool authorized = false;
if (!mgd->powersave)
return false;
@@ -626,13 +650,10 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
sta = sta_info_get(sdata, mgd->bssid);
if (sta)
- sta_flags = get_sta_flags(sta);
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
rcu_read_unlock();
- if (!(sta_flags & WLAN_STA_AUTHORIZED))
- return false;
-
- return true;
+ return authorized;
}
/* need to hold RTNL or interface lock */
@@ -917,8 +938,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.aifs, params.cw_min, params.cw_max,
params.txop, params.uapsd);
#endif
- local->tx_conf[queue] = params;
- if (drv_conf_tx(local, queue, &params))
+ sdata->tx_conf[queue] = params;
+ if (drv_conf_tx(local, sdata, queue, &params))
wiphy_debug(local->hw.wiphy,
"failed to set TX queue parameters for queue %d\n",
queue);
@@ -1076,7 +1097,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
if (sta) {
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, tx);
}
mutex_unlock(&local->sta_mtx);
@@ -1118,8 +1139,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
+ /* remove AP and TDLS peers */
if (remove_sta)
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(local, sdata);
del_timer_sync(&sdata->u.mgd.conn_mon_timer);
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1220,7 +1242,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
} else {
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
- (u32) -1, true);
+ (u32) -1, true, false);
}
ifmgd->probe_send_count++;
@@ -1482,17 +1504,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
ifmgd->aid = aid;
- sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
- if (!sta) {
- printk(KERN_DEBUG "%s: failed to alloc STA entry for"
- " the AP\n", sdata->name);
+ mutex_lock(&sdata->local->sta_mtx);
+ /*
+ * station info was already allocated and inserted before
+ * the association and should be available to us
+ */
+ sta = sta_info_get_rx(sdata, cbss->bssid);
+ if (WARN_ON(!sta)) {
+ mutex_unlock(&sdata->local->sta_mtx);
return false;
}
- set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
- WLAN_STA_ASSOC_AP);
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_ASSOC);
+ set_sta_flag(sta, WLAN_STA_ASSOC_AP);
if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
rates = 0;
basic_rates = 0;
@@ -1551,12 +1578,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
rate_control_rate_init(sta);
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
- set_sta_flags(sta, WLAN_STA_MFP);
+ set_sta_flag(sta, WLAN_STA_MFP);
if (elems.wmm_param)
- set_sta_flags(sta, WLAN_STA_WME);
+ set_sta_flag(sta, WLAN_STA_WME);
- err = sta_info_insert(sta);
+ /* sta_info_reinsert will also unlock the mutex lock */
+ err = sta_info_reinsert(sta);
sta = NULL;
if (err) {
printk(KERN_DEBUG "%s: failed to insert STA entry for"
@@ -1584,7 +1612,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
(sdata->local->hw.queues >= 4) &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- cbss->bssid, ap_ht_cap_flags);
+ cbss->bssid, ap_ht_cap_flags,
+ false);
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
@@ -1918,7 +1947,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- bssid, ap_ht_cap_flags);
+ bssid, ap_ht_cap_flags, true);
}
/* Note: country IE parsing is done for us by cfg80211 */
@@ -2429,6 +2458,29 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return 0;
}
+/* create and insert a dummy station entry */
+static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
+ u8 *bssid) {
+ struct sta_info *sta;
+ int err;
+
+ sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
+ if (!sta)
+ return -ENOMEM;
+
+ sta->dummy = true;
+
+ err = sta_info_insert(sta);
+ sta = NULL;
+ if (err) {
+ printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
+ " the AP (error %d)\n", sdata->name, err);
+ return err;
+ }
+
+ return 0;
+}
+
static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
struct sk_buff *skb)
{
@@ -2436,9 +2488,11 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt;
struct ieee80211_rx_status *rx_status;
struct ieee802_11_elems elems;
+ struct cfg80211_bss *cbss = wk->assoc.bss;
u16 status;
if (!skb) {
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
goto destroy;
}
@@ -2468,12 +2522,16 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
mutex_unlock(&wk->sdata->u.mgd.mtx);
/* oops -- internal error -- send timeout for now */
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
cfg80211_send_assoc_timeout(wk->sdata->dev,
wk->filter_ta);
return WORK_DONE_DESTROY;
}
mutex_unlock(&wk->sdata->u.mgd.mtx);
+ } else {
+ /* assoc failed - destroy the dummy station entry */
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
}
cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
@@ -2492,7 +2550,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss = (void *)req->bss->priv;
struct ieee80211_work *wk;
const u8 *ssid;
- int i;
+ int i, err;
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated) {
@@ -2517,6 +2575,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (!wk)
return -ENOMEM;
+ /*
+ * create a dummy station info entry in order
+ * to start accepting incoming EAPOL packets from the station
+ */
+ err = ieee80211_pre_assoc(sdata, req->bss->bssid);
+ if (err) {
+ kfree(wk);
+ return err;
+ }
+
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
@@ -2674,7 +2742,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->reason_code, cookie,
!req->local_state_change);
if (assoc_bss)
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(sdata->local, sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
@@ -2714,7 +2782,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
cookie, !req->local_state_change);
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(sdata->local, sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 6326d343986..9ee7164b207 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -42,7 +42,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
}
mutex_unlock(&local->sta_mtx);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3d5a2cb835c..ff5c3aa48a1 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -199,7 +199,7 @@ static void rate_control_release(struct kref *kref)
kfree(ctrl_ref);
}
-static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
+static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -208,7 +208,9 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
fc = hdr->frame_control;
- return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
+ return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
+ IEEE80211_TX_CTL_USE_MINRATE)) ||
+ !ieee80211_is_data(fc);
}
static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
@@ -233,6 +235,27 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
/* could not find a basic rate; use original selection */
}
+static inline s8
+rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ struct ieee80211_rate *srate = &sband->bitrates[i];
+ if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
+ (srate->bitrate == 55) || (srate->bitrate == 110))
+ continue;
+
+ if (rate_supported(sta, sband->band, i))
+ return i;
+ }
+
+ /* No matching rate found */
+ return 0;
+}
+
+
bool rate_control_send_low(struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
@@ -241,8 +264,14 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband = txrc->sband;
int mcast_rate;
- if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) {
- info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta);
+ if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
+ if ((sband->band != IEEE80211_BAND_2GHZ) ||
+ !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ info->control.rates[0].idx =
+ rate_lowest_index(txrc->sband, sta);
+ else
+ info->control.rates[0].idx =
+ rate_lowest_non_cck_index(txrc->sband, sta);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 66a1eeb279c..cdb28535716 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -281,6 +281,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
if (cur_tp < mr->cur_tp) {
+ mi->max_tp_rate2 = mi->max_tp_rate;
+ cur_tp2 = cur_tp;
mi->max_tp_rate = mg->max_tp_rate;
cur_tp = mr->cur_tp;
}
@@ -452,7 +454,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
minstrel_ht_update_stats(mp, mi);
- minstrel_aggr_check(mp, sta, skb);
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ minstrel_aggr_check(mp, sta, skb);
}
}
@@ -608,7 +611,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
info->flags |= mi->tx_flags;
- sample_idx = minstrel_get_sample_rate(mp, mi);
+
+ /* Don't use EAPOL frames for sampling on non-mrr hw */
+ if (mp->hw->max_rates == 1 &&
+ txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
+ sample_idx = -1;
+ else
+ sample_idx = minstrel_get_sample_rate(mp, mi);
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fe2c2a71779..b867bd55de7 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -476,7 +476,6 @@ static ieee80211_rx_result
ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
char *dev_addr = rx->sdata->vif.addr;
if (ieee80211_is_data(hdr->frame_control)) {
@@ -524,14 +523,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
}
-#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
-
- if (ieee80211_is_data(hdr->frame_control) &&
- is_multicast_ether_addr(hdr->addr1) &&
- mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
- return RX_DROP_MONITOR;
-#undef msh_h_get
-
return RX_CONTINUE;
}
@@ -850,8 +841,21 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
- (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
+ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
+ if (rx->sta && rx->sta->dummy &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+ u16 ethertype;
+ u8 *payload;
+
+ payload = rx->skb->data +
+ ieee80211_hdrlen(hdr->frame_control);
+ ethertype = (payload[6] << 8) | payload[7];
+ if (cpu_to_be16(ethertype) ==
+ rx->sdata->control_port_protocol)
+ return RX_CONTINUE;
+ }
return RX_DROP_MONITOR;
+ }
return RX_CONTINUE;
}
@@ -1106,7 +1110,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
atomic_inc(&sdata->bss->num_sta_ps);
- set_sta_flags(sta, WLAN_STA_PS_STA);
+ set_sta_flag(sta, WLAN_STA_PS_STA);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -1126,7 +1130,7 @@ static void ap_sta_ps_end(struct sta_info *sta)
sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
- if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1145,7 +1149,7 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
/* Don't let the same PS state be set twice */
- in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+ in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
if ((start && in_ps) || (!start && !in_ps))
return -EINVAL;
@@ -1159,6 +1163,81 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
EXPORT_SYMBOL(ieee80211_sta_ps_transition);
static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ int tid, ac;
+
+ if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
+ return RX_CONTINUE;
+
+ /*
+ * The device handles station powersave, so don't do anything about
+ * uAPSD and PS-Poll frames (the latter shouldn't even come up from
+ * it to mac80211 since they're handled.)
+ */
+ if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ return RX_CONTINUE;
+
+ /*
+ * Don't do anything if the station isn't already asleep. In
+ * the uAPSD case, the station will probably be marked asleep,
+ * in the PS-Poll case the station must be confused ...
+ */
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
+ return RX_CONTINUE;
+
+ if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
+ if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_poll_response(rx->sta);
+ else
+ set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
+ }
+
+ /* Free PS Poll skb here instead of returning RX_DROP that would
+ * count as an dropped frame. */
+ dev_kfree_skb(rx->skb);
+
+ return RX_QUEUED;
+ } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
+ ieee80211_has_pm(hdr->frame_control) &&
+ (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))) {
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ ac = ieee802_1d_to_ac[tid & 7];
+
+ /*
+ * If this AC is not trigger-enabled do nothing.
+ *
+ * NB: This could/should check a separate bitmap of trigger-
+ * enabled queues, but for now we only implement uAPSD w/o
+ * TSPEC changes to the ACs, so they're always the same.
+ */
+ if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
+ return RX_CONTINUE;
+
+ /* if we are in a service period, do nothing */
+ if (test_sta_flag(rx->sta, WLAN_STA_SP))
+ return RX_CONTINUE;
+
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_uapsd(rx->sta);
+ else
+ set_sta_flag(rx->sta, WLAN_STA_UAPSD);
+ }
+
+ return RX_CONTINUE;
+}
+
+static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
{
struct sta_info *sta = rx->sta;
@@ -1216,7 +1295,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
- if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* Ignore doze->wake transitions that are
* indicated by non-data frames, the standard
@@ -1469,33 +1548,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
}
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-
- if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
- !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
- return RX_CONTINUE;
-
- if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
- (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
- return RX_DROP_UNUSABLE;
-
- if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
- ieee80211_sta_ps_deliver_poll_response(rx->sta);
- else
- set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
-
- /* Free PS Poll skb here instead of returning RX_DROP that would
- * count as an dropped frame. */
- dev_kfree_skb(rx->skb);
-
- return RX_QUEUED;
-}
-
-static ieee80211_rx_result debug_noinline
ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
{
u8 *data = rx->skb->data;
@@ -1518,7 +1570,7 @@ static int
ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
{
if (unlikely(!rx->sta ||
- !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
+ !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
return -EACCES;
return 0;
@@ -1561,7 +1613,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
- if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
+ if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
if (unlikely(!ieee80211_has_protected(fc) &&
ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key)) {
@@ -1827,6 +1879,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ /* frame is in RMC, don't forward */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
+ return RX_DROP_MONITOR;
+
if (!ieee80211_is_data(hdr->frame_control))
return RX_CONTINUE;
@@ -1834,6 +1892,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* illegal frame */
return RX_DROP_MONITOR;
+ if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
+ IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
+ dropped_frames_congestion);
+ return RX_DROP_MONITOR;
+ }
+
if (mesh_hdr->flags & MESH_FLAGS_AE) {
struct mesh_path *mppath;
char *proxied_addr;
@@ -1889,13 +1953,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
- skb_set_queue_mapping(skb,
- ieee80211_select_queue(rx->sdata, fwd_skb));
- ieee80211_set_qos_hdr(local, skb);
- if (is_multicast_ether_addr(fwd_hdr->addr1))
+ if (is_multicast_ether_addr(fwd_hdr->addr1)) {
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
- else {
+ skb_set_queue_mapping(fwd_skb,
+ ieee80211_select_queue(sdata, fwd_skb));
+ ieee80211_set_qos_hdr(sdata, fwd_skb);
+ } else {
int err;
/*
* Save TA to addr1 to send TA a path error if a
@@ -2220,12 +2284,29 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
+ case WLAN_CATEGORY_SELF_PROTECTED:
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ goto invalid;
+ if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
+ /* userspace handles this frame */
+ break;
+ goto queue;
+ case WLAN_SP_MGK_INFORM:
+ case WLAN_SP_MGK_ACK:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ goto invalid;
+ break;
+ }
+ break;
case WLAN_CATEGORY_MESH_ACTION:
if (!ieee80211_vif_is_mesh(&sdata->vif))
break;
- goto queue;
- case WLAN_CATEGORY_MESH_PATH_SEL:
- if (!mesh_path_sel_is_hwmp(sdata))
+ if (mesh_action_is_path_sel(mgmt) &&
+ (!mesh_path_sel_is_hwmp(sdata)))
break;
goto queue;
}
@@ -2534,17 +2615,17 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
+ CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
CALL_RXH(ieee80211_rx_h_sta_process)
CALL_RXH(ieee80211_rx_h_defragment)
- CALL_RXH(ieee80211_rx_h_ps_poll)
CALL_RXH(ieee80211_rx_h_michael_mic_verify)
/* must be after MMIC verify so header is counted in MPDU mic */
- CALL_RXH(ieee80211_rx_h_remove_qos_control)
- CALL_RXH(ieee80211_rx_h_amsdu)
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&rx->sdata->vif))
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
+ CALL_RXH(ieee80211_rx_h_remove_qos_control)
+ CALL_RXH(ieee80211_rx_h_amsdu)
CALL_RXH(ieee80211_rx_h_data)
CALL_RXH(ieee80211_rx_h_ctrl);
CALL_RXH(ieee80211_rx_h_mgmt_check)
@@ -2686,7 +2767,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !ieee80211_is_beacon(hdr->frame_control))
+ !ieee80211_is_beacon(hdr->frame_control) &&
+ !(ieee80211_is_action(hdr->frame_control) &&
+ sdata->vif.p2p))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2791,7 +2874,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc)) {
prev_sta = NULL;
- for_each_sta_info(local, hdr->addr2, sta, tmp) {
+ for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
@@ -2835,7 +2918,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
continue;
}
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
rx.sdata = prev;
ieee80211_prepare_and_rx_handle(&rx, skb, false);
@@ -2843,7 +2926,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
if (prev) {
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
rx.sdata = prev;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 6f09eca0111..83a0b050b37 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,7 +14,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -254,6 +254,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
req->ie, req->ie_len, band,
req->rates[band], 0);
local->hw_scan_req->ie_len = ielen;
+ local->hw_scan_req->no_cck = req->no_cck;
return true;
}
@@ -660,7 +661,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
local->scan_req->ssids[i].ssid,
local->scan_req->ssids[i].ssid_len,
local->scan_req->ie, local->scan_req->ie_len,
- local->scan_req->rates[band], false);
+ local->scan_req->rates[band], false,
+ local->scan_req->no_cck);
/*
* After sending probe requests, wait for probe responses
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 7733f66ee2c..578eea3fc04 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -32,12 +32,8 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
sizeof(struct ieee80211_msrment_ie));
-
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer for "
- "measurement report frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 21070e9bc8d..ce962d2c878 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -24,6 +24,7 @@
#include "sta_info.h"
#include "debugfs_sta.h"
#include "mesh.h"
+#include "wme.h"
/**
* DOC: STA information lifetime rules
@@ -72,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
if (!s)
return -ENOENT;
if (s == sta) {
- rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
+ RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
s->hnext);
return 0;
}
@@ -82,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
s = rcu_dereference_protected(s->hnext,
lockdep_is_held(&local->sta_lock));
if (rcu_access_pointer(s->hnext)) {
- rcu_assign_pointer(s->hnext, sta->hnext);
+ RCU_INIT_POINTER(s->hnext, sta->hnext);
return 0;
}
@@ -100,6 +101,27 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
+ if (sta->sdata == sdata && !sta->dummy &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference_check(sta->hnext,
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ }
+ return sta;
+}
+
+/* get a station info entry even if it is a dummy station*/
+struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ while (sta) {
if (sta->sdata == sdata &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
@@ -126,6 +148,32 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
while (sta) {
if ((sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
+ !sta->dummy &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference_check(sta->hnext,
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ }
+ return sta;
+}
+
+/*
+ * Get sta info either from the specified interface
+ * or from one of its vlans (including dummy stations)
+ */
+struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ while (sta) {
+ if ((sta->sdata == sdata ||
+ (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
@@ -184,7 +232,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
{
sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
- rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
+ RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
}
static void sta_unblock(struct work_struct *wk)
@@ -196,13 +244,22 @@ static void sta_unblock(struct work_struct *wk)
if (sta->dead)
return;
- if (!test_sta_flags(sta, WLAN_STA_PS_STA))
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA))
ieee80211_sta_ps_deliver_wakeup(sta);
- else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
+ local_bh_disable();
ieee80211_sta_ps_deliver_poll_response(sta);
+ local_bh_enable();
+ } else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) {
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
+ local_bh_disable();
+ ieee80211_sta_ps_deliver_uapsd(sta);
+ local_bh_enable();
} else
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
}
static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -235,7 +292,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return NULL;
spin_lock_init(&sta->lock);
- spin_lock_init(&sta->flaglock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -262,8 +318,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
*/
sta->timer_to_tid[i] = i;
}
- skb_queue_head_init(&sta->ps_tx_buf);
- skb_queue_head_init(&sta->tx_filtered);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb_queue_head_init(&sta->ps_tx_buf[i]);
+ skb_queue_head_init(&sta->tx_filtered[i]);
+ }
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
@@ -280,7 +338,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-static int sta_info_finish_insert(struct sta_info *sta, bool async)
+static int sta_info_finish_insert(struct sta_info *sta,
+ bool async, bool dummy_reinsert)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,50 +349,58 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
lockdep_assert_held(&local->sta_mtx);
- /* notify driver */
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
- err = drv_sta_add(local, sdata, &sta->sta);
- if (err) {
- if (!async)
- return err;
- printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
- " - keeping it anyway.\n",
- sdata->name, sta->sta.addr, err);
- } else {
- sta->uploaded = true;
+ if (!sta->dummy || dummy_reinsert) {
+ /* notify driver */
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ err = drv_sta_add(local, sdata, &sta->sta);
+ if (err) {
+ if (!async)
+ return err;
+ printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
+ "driver (%d) - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, err);
+ } else {
+ sta->uploaded = true;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (async)
- wiphy_debug(local->hw.wiphy,
- "Finished adding IBSS STA %pM\n",
- sta->sta.addr);
+ if (async)
+ wiphy_debug(local->hw.wiphy,
+ "Finished adding IBSS STA %pM\n",
+ sta->sta.addr);
#endif
+ }
+
+ sdata = sta->sdata;
}
- sdata = sta->sdata;
+ if (!dummy_reinsert) {
+ if (!async) {
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
- if (!async) {
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
+ /* make the station visible */
+ spin_lock_irqsave(&local->sta_lock, flags);
+ sta_info_hash_add(local, sta);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ }
- /* make the station visible */
- spin_lock_irqsave(&local->sta_lock, flags);
- sta_info_hash_add(local, sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ list_add(&sta->list, &local->sta_list);
+ } else {
+ sta->dummy = false;
}
- list_add(&sta->list, &local->sta_list);
-
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
-
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ if (!sta->dummy) {
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ }
return 0;
}
@@ -350,7 +417,7 @@ static void sta_info_finish_pending(struct ieee80211_local *local)
list_del(&sta->list);
spin_unlock_irqrestore(&local->sta_lock, flags);
- sta_info_finish_insert(sta, true);
+ sta_info_finish_insert(sta, true, false);
spin_lock_irqsave(&local->sta_lock, flags);
}
@@ -367,106 +434,117 @@ static void sta_info_finish_work(struct work_struct *work)
mutex_unlock(&local->sta_mtx);
}
-int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+static int sta_info_insert_check(struct sta_info *sta)
{
- struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- unsigned long flags;
- int err = 0;
/*
* Can't be a WARN_ON because it can be triggered through a race:
* something inserts a STA (on one CPU) without holding the RTNL
* and another CPU turns off the net device.
*/
- if (unlikely(!ieee80211_sdata_running(sdata))) {
- err = -ENETDOWN;
- rcu_read_lock();
- goto out_free;
- }
+ if (unlikely(!ieee80211_sdata_running(sdata)))
+ return -ENETDOWN;
if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
- is_multicast_ether_addr(sta->sta.addr))) {
- err = -EINVAL;
+ is_multicast_ether_addr(sta->sta.addr)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ /* check if STA exists already */
+ if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
rcu_read_lock();
- goto out_free;
+ return -EEXIST;
}
- /*
- * In ad-hoc mode, we sometimes need to insert stations
- * from tasklet context from the RX path. To avoid races,
- * always do so in that case -- see the comment below.
- */
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- rcu_read_lock();
- err = -EEXIST;
- goto out_free;
- }
-
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
- sta_info_hash_add(local, sta);
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+ sta_info_hash_add(local, sta);
- list_add_tail(&sta->list, &local->sta_pending_list);
+ list_add_tail(&sta->list, &local->sta_pending_list);
- rcu_read_lock();
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
+ spin_unlock_irqrestore(&local->sta_lock, flags);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
- sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
+ sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
- ieee80211_queue_work(&local->hw, &local->sta_finish_work);
+ ieee80211_queue_work(&local->hw, &local->sta_finish_work);
- return 0;
- }
+ return 0;
+}
+
+/*
+ * should be called with sta_mtx locked
+ * this function replaces the mutex lock
+ * with a RCU lock
+ */
+static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+ struct sta_info *exist_sta;
+ bool dummy_reinsert = false;
+ int err = 0;
+
+ lockdep_assert_held(&local->sta_mtx);
/*
* On first glance, this will look racy, because the code
- * below this point, which inserts a station with sleeping,
+ * in this function, which inserts a station with sleeping,
* unlocks the sta_lock between checking existence in the
* hash table and inserting into it.
*
* However, it is not racy against itself because it keeps
- * the mutex locked. It still seems to race against the
- * above code that atomically inserts the station... That,
- * however, is not true because the above code can only
- * be invoked for IBSS interfaces, and the below code will
- * not be -- and the two do not race against each other as
- * the hash table also keys off the interface.
+ * the mutex locked.
*/
- might_sleep();
-
- mutex_lock(&local->sta_mtx);
-
spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- mutex_unlock(&local->sta_mtx);
- rcu_read_lock();
- err = -EEXIST;
- goto out_free;
+ /*
+ * check if STA exists already.
+ * only accept a scenario of a second call to sta_info_insert_non_ibss
+ * with a dummy station entry that was inserted earlier
+ * in that case - assume that the dummy station flag should
+ * be removed.
+ */
+ exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
+ if (exist_sta) {
+ if (exist_sta == sta && sta->dummy) {
+ dummy_reinsert = true;
+ } else {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+ return -EEXIST;
+ }
}
spin_unlock_irqrestore(&local->sta_lock, flags);
- err = sta_info_finish_insert(sta, false);
+ err = sta_info_finish_insert(sta, false, dummy_reinsert);
if (err) {
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
- goto out_free;
+ return err;
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n",
+ sta->dummy ? "dummy " : "", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
/* move reference to rcu-protected */
@@ -477,6 +555,51 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
mesh_accept_plinks_update(sdata);
return 0;
+}
+
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ int err = 0;
+
+ err = sta_info_insert_check(sta);
+ if (err) {
+ rcu_read_lock();
+ goto out_free;
+ }
+
+ /*
+ * In ad-hoc mode, we sometimes need to insert stations
+ * from tasklet context from the RX path. To avoid races,
+ * always do so in that case -- see the comment below.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ err = sta_info_insert_ibss(sta);
+ if (err)
+ goto out_free;
+
+ return 0;
+ }
+
+ /*
+ * It might seem that the function called below is in race against
+ * the function call above that atomically inserts the station... That,
+ * however, is not true because the above code can only
+ * be invoked for IBSS interfaces, and the below code will
+ * not be -- and the two do not race against each other as
+ * the hash table also keys off the interface.
+ */
+
+ might_sleep();
+
+ mutex_lock(&local->sta_mtx);
+
+ err = sta_info_insert_non_ibss(sta);
+ if (err)
+ goto out_free;
+
+ return 0;
out_free:
BUG_ON(!err);
__sta_info_free(local, sta);
@@ -492,6 +615,25 @@ int sta_info_insert(struct sta_info *sta)
return err;
}
+/* Caller must hold sta->local->sta_mtx */
+int sta_info_reinsert(struct sta_info *sta)
+{
+ struct ieee80211_local *local = sta->local;
+ int err = 0;
+
+ err = sta_info_insert_check(sta);
+ if (err) {
+ mutex_unlock(&local->sta_mtx);
+ return err;
+ }
+
+ might_sleep();
+
+ err = sta_info_insert_non_ibss(sta);
+ rcu_read_unlock();
+ return err;
+}
+
static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
{
/*
@@ -510,64 +652,93 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
bss->tim[aid / 8] &= ~(1 << (aid % 8));
}
-static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
- struct sta_info *sta)
+static unsigned long ieee80211_tids_for_ac(int ac)
{
- BUG_ON(!bss);
-
- __bss_tim_set(bss, sta->sta.aid);
-
- if (sta->local->ops->set_tim) {
- sta->local->tim_in_locked_section = true;
- drv_set_tim(sta->local, &sta->sta, true);
- sta->local->tim_in_locked_section = false;
+ /* If we ever support TIDs > 7, this obviously needs to be adjusted */
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ return BIT(6) | BIT(7);
+ case IEEE80211_AC_VI:
+ return BIT(4) | BIT(5);
+ case IEEE80211_AC_BE:
+ return BIT(0) | BIT(3);
+ case IEEE80211_AC_BK:
+ return BIT(1) | BIT(2);
+ default:
+ WARN_ON(1);
+ return 0;
}
}
-void sta_info_set_tim_bit(struct sta_info *sta)
+void sta_info_recalc_tim(struct sta_info *sta)
{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_if_ap *bss = sta->sdata->bss;
unsigned long flags;
+ bool indicate_tim = false;
+ u8 ignore_for_tim = sta->sta.uapsd_queues;
+ int ac;
- BUG_ON(!sta->sdata->bss);
+ if (WARN_ON_ONCE(!sta->sdata->bss))
+ return;
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- __sta_info_set_tim_bit(sta->sdata->bss, sta);
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
-}
+ /* No need to do anything if the driver does all */
+ if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ return;
-static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
- struct sta_info *sta)
-{
- BUG_ON(!bss);
+ if (sta->dead)
+ goto done;
+
+ /*
+ * If all ACs are delivery-enabled then we should build
+ * the TIM bit for all ACs anyway; if only some are then
+ * we ignore those and build the TIM bit using only the
+ * non-enabled ones.
+ */
+ if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
+ ignore_for_tim = 0;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ unsigned long tids;
- __bss_tim_clear(bss, sta->sta.aid);
+ if (ignore_for_tim & BIT(ac))
+ continue;
+
+ indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac]);
+ if (indicate_tim)
+ break;
- if (sta->local->ops->set_tim) {
- sta->local->tim_in_locked_section = true;
- drv_set_tim(sta->local, &sta->sta, false);
- sta->local->tim_in_locked_section = false;
+ tids = ieee80211_tids_for_ac(ac);
+
+ indicate_tim |=
+ sta->driver_buffered_tids & tids;
}
-}
-void sta_info_clear_tim_bit(struct sta_info *sta)
-{
- unsigned long flags;
+ done:
+ spin_lock_irqsave(&local->sta_lock, flags);
- BUG_ON(!sta->sdata->bss);
+ if (indicate_tim)
+ __bss_tim_set(bss, sta->sta.aid);
+ else
+ __bss_tim_clear(bss, sta->sta.aid);
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- __sta_info_clear_tim_bit(sta->sdata->bss, sta);
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
+ if (local->ops->set_tim) {
+ local->tim_in_locked_section = true;
+ drv_set_tim(local, &sta->sta, indicate_tim);
+ local->tim_in_locked_section = false;
+ }
+
+ spin_unlock_irqrestore(&local->sta_lock, flags);
}
-static int sta_info_buffer_expired(struct sta_info *sta,
- struct sk_buff *skb)
+static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
int timeout;
if (!skb)
- return 0;
+ return false;
info = IEEE80211_SKB_CB(skb);
@@ -581,24 +752,59 @@ static int sta_info_buffer_expired(struct sta_info *sta,
}
-static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
- struct sta_info *sta)
+static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
+ struct sta_info *sta, int ac)
{
unsigned long flags;
struct sk_buff *skb;
- if (skb_queue_empty(&sta->ps_tx_buf))
- return false;
+ /*
+ * First check for frames that should expire on the filtered
+ * queue. Frames here were rejected by the driver and are on
+ * a separate queue to avoid reordering with normal PS-buffered
+ * frames. They also aren't accounted for right now in the
+ * total_ps_buffered counter.
+ */
+ for (;;) {
+ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
+ skb = skb_peek(&sta->tx_filtered[ac]);
+ if (sta_info_buffer_expired(sta, skb))
+ skb = __skb_dequeue(&sta->tx_filtered[ac]);
+ else
+ skb = NULL;
+ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
+ /*
+ * Frames are queued in order, so if this one
+ * hasn't expired yet we can stop testing. If
+ * we actually reached the end of the queue we
+ * also need to stop, of course.
+ */
+ if (!skb)
+ break;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Now also check the normal PS-buffered queue, this will
+ * only find something if the filtered queue was emptied
+ * since the filtered frames are all before the normal PS
+ * buffered frames.
+ */
for (;;) {
- spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
- skb = skb_peek(&sta->ps_tx_buf);
+ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
+ skb = skb_peek(&sta->ps_tx_buf[ac]);
if (sta_info_buffer_expired(sta, skb))
- skb = __skb_dequeue(&sta->ps_tx_buf);
+ skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
else
skb = NULL;
- spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags);
+ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
+ /*
+ * frames are queued in order, so if this one
+ * hasn't expired yet (or we reached the end of
+ * the queue) we can stop testing
+ */
if (!skb)
break;
@@ -608,22 +814,47 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
sta->sta.addr);
#endif
dev_kfree_skb(skb);
-
- if (skb_queue_empty(&sta->ps_tx_buf) &&
- !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
- sta_info_clear_tim_bit(sta);
}
- return true;
+ /*
+ * Finally, recalculate the TIM bit for this station -- it might
+ * now be clear because the station was too slow to retrieve its
+ * frames.
+ */
+ sta_info_recalc_tim(sta);
+
+ /*
+ * Return whether there are any frames still buffered, this is
+ * used to check whether the cleanup timer still needs to run,
+ * if there are no frames we don't need to rearm the timer.
+ */
+ return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
+ skb_queue_empty(&sta->tx_filtered[ac]));
+}
+
+static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
+ struct sta_info *sta)
+{
+ bool have_buffered = false;
+ int ac;
+
+ /* This is only necessary for stations on BSS interfaces */
+ if (!sta->sdata->bss)
+ return false;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ have_buffered |=
+ sta_info_cleanup_expire_buffered_ac(local, sta, ac);
+
+ return have_buffered;
}
static int __must_check __sta_info_destroy(struct sta_info *sta)
{
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
- struct sk_buff *skb;
unsigned long flags;
- int ret, i;
+ int ret, i, ac;
might_sleep();
@@ -639,7 +870,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
* sessions -- block that to make sure the tear-down
* will be sufficient.
*/
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
spin_lock_irqsave(&local->sta_lock, flags);
@@ -660,19 +891,22 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
sta->dead = true;
- if (test_and_clear_sta_flags(sta,
- WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
BUG_ON(!sdata->bss);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
atomic_dec(&sdata->bss->num_sta_ps);
- sta_info_clear_tim_bit(sta);
+ sta_info_recalc_tim(sta);
}
local->num_sta--;
local->sta_generation++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
if (sta->uploaded) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -691,6 +925,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
*/
synchronize_rcu();
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
+ __skb_queue_purge(&sta->ps_tx_buf[ac]);
+ __skb_queue_purge(&sta->tx_filtered[ac]);
+ }
+
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
@@ -713,14 +953,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
}
#endif
- while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
- local->total_ps_buffered--;
- dev_kfree_skb_any(skb);
- }
-
- while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
- dev_kfree_skb_any(skb);
-
__sta_info_free(local, sta);
return 0;
@@ -732,7 +964,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get(sdata, addr);
+ sta = sta_info_get_rx(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -746,7 +978,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get_bss(sdata, addr);
+ sta = sta_info_get_bss_rx(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -886,7 +1118,8 @@ static void clear_sta_ps_flags(void *_sta)
{
struct sta_info *sta = _sta;
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
}
/* powersave support code */
@@ -894,88 +1127,341 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- int sent, buffered;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
+
+ clear_sta_flag(sta, WLAN_STA_SP);
+
+ BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1);
+ sta->driver_buffered_tids = 0;
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
- if (!skb_queue_empty(&sta->ps_tx_buf))
- sta_info_clear_tim_bit(sta);
+ skb_queue_head_init(&pending);
/* Send all buffered frames to the station */
- sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
- buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf,
- clear_sta_ps_flags, sta);
- sent += buffered;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+
+ skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
+ tmp = skb_queue_len(&pending);
+ filtered += tmp - count;
+ count = tmp;
+
+ skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
+ tmp = skb_queue_len(&pending);
+ buffered += tmp - count;
+ }
+
+ ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+
local->total_ps_buffered -= buffered;
+ sta_info_recalc_tim(sta);
+
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
"since STA not sleeping anymore\n", sdata->name,
- sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
+ sta->sta.addr, sta->sta.aid, filtered, buffered);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
-void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, int tid,
+ enum ieee80211_frame_release_type reason)
{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_qos_hdr *nullfunc;
struct sk_buff *skb;
- int no_pending_pkts;
+ int size = sizeof(*nullfunc);
+ __le16 fc;
+ bool qos = test_sta_flag(sta, WLAN_STA_WME);
+ struct ieee80211_tx_info *info;
- skb = skb_dequeue(&sta->tx_filtered);
- if (!skb) {
- skb = skb_dequeue(&sta->ps_tx_buf);
- if (skb)
- local->total_ps_buffered--;
+ if (qos) {
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ } else {
+ size -= 2;
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (void *) skb_put(skb, size);
+ nullfunc->frame_control = fc;
+ nullfunc->duration_id = 0;
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+
+ skb->priority = tid;
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+ if (qos) {
+ nullfunc->qos_ctrl = cpu_to_le16(tid);
+
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+ nullfunc->qos_ctrl |=
+ cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
}
- no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
- skb_queue_empty(&sta->ps_tx_buf);
- if (skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) skb->data;
+ info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * Tell TX path to send this frame even though the
+ * STA may still remain is PS mode after this frame
+ * exchange. Also set EOSP to indicate this packet
+ * ends the poll/service period.
+ */
+ info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE |
+ IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
+
+ ieee80211_xmit(sdata, skb);
+}
+
+static void
+ieee80211_sta_ps_deliver_response(struct sta_info *sta,
+ int n_frames, u8 ignored_acs,
+ enum ieee80211_frame_release_type reason)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ bool found = false;
+ bool more_data = false;
+ int ac;
+ unsigned long driver_release_tids = 0;
+ struct sk_buff_head frames;
+
+ /* Service or PS-Poll period starts */
+ set_sta_flag(sta, WLAN_STA_SP);
+
+ __skb_queue_head_init(&frames);
+
+ /*
+ * Get response frame(s) and more data bit for it.
+ */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ unsigned long tids;
+
+ if (ignored_acs & BIT(ac))
+ continue;
+
+ tids = ieee80211_tids_for_ac(ac);
+
+ if (!found) {
+ driver_release_tids = sta->driver_buffered_tids & tids;
+ if (driver_release_tids) {
+ found = true;
+ } else {
+ struct sk_buff *skb;
+
+ while (n_frames > 0) {
+ skb = skb_dequeue(&sta->tx_filtered[ac]);
+ if (!skb) {
+ skb = skb_dequeue(
+ &sta->ps_tx_buf[ac]);
+ if (skb)
+ local->total_ps_buffered--;
+ }
+ if (!skb)
+ break;
+ n_frames--;
+ found = true;
+ __skb_queue_tail(&frames, skb);
+ }
+ }
+
+ /*
+ * If the driver has data on more than one TID then
+ * certainly there's more data if we release just a
+ * single frame now (from a single TID).
+ */
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
+ hweight16(driver_release_tids) > 1) {
+ more_data = true;
+ driver_release_tids =
+ BIT(ffs(driver_release_tids) - 1);
+ break;
+ }
+ }
+
+ if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac])) {
+ more_data = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ int tid;
/*
- * Tell TX path to send this frame even though the STA may
- * still remain is PS mode after this frame exchange.
+ * For PS-Poll, this can only happen due to a race condition
+ * when we set the TIM bit and the station notices it, but
+ * before it can poll for the frame we expire it.
+ *
+ * For uAPSD, this is said in the standard (11.2.1.5 h):
+ * At each unscheduled SP for a non-AP STA, the AP shall
+ * attempt to transmit at least one MSDU or MMPDU, but no
+ * more than the value specified in the Max SP Length field
+ * in the QoS Capability element from delivery-enabled ACs,
+ * that are destined for the non-AP STA.
+ *
+ * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
*/
- info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
- sta->sta.addr, sta->sta.aid,
- skb_queue_len(&sta->ps_tx_buf));
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ /* This will evaluate to 1, 3, 5 or 7. */
+ tid = 7 - ((ffs(~ignored_acs) - 1) << 1);
- /* Use MoreData flag to indicate whether there are more
- * buffered frames for this STA */
- if (no_pending_pkts)
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
- else
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ ieee80211_send_null_response(sdata, sta, tid, reason);
+ return;
+ }
+
+ if (!driver_release_tids) {
+ struct sk_buff_head pending;
+ struct sk_buff *skb;
+ int num = 0;
+ u16 tids = 0;
+
+ skb_queue_head_init(&pending);
+
+ while ((skb = __skb_dequeue(&frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qoshdr = NULL;
+
+ num++;
+
+ /*
+ * Tell TX path to send this frame even though the
+ * STA may still remain is PS mode after this frame
+ * exchange.
+ */
+ info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE;
+
+ /*
+ * Use MoreData flag to indicate whether there are
+ * more buffered frames for this STA
+ */
+ if (!more_data)
+ hdr->frame_control &=
+ cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))
+ qoshdr = ieee80211_get_qos_ctl(hdr);
+
+ /* set EOSP for the frame */
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
+ qoshdr && skb_queue_empty(&frames))
+ *qoshdr |= IEEE80211_QOS_CTL_EOSP;
+
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ if (qoshdr)
+ tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
+ else
+ tids |= BIT(0);
+
+ __skb_queue_tail(&pending, skb);
+ }
- ieee80211_add_pending_skb(local, skb);
+ drv_allow_buffered_frames(local, sta, tids, num,
+ reason, more_data);
- if (no_pending_pkts)
- sta_info_clear_tim_bit(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+ ieee80211_add_pending_skbs(local, &pending);
+
+ sta_info_recalc_tim(sta);
} else {
/*
- * FIXME: This can be the result of a race condition between
- * us expiring a frame and the station polling for it.
- * Should we send it a null-func frame indicating we
- * have nothing buffered for it?
+ * We need to release a frame that is buffered somewhere in the
+ * driver ... it'll have to handle that.
+ * Note that, as per the comment above, it'll also have to see
+ * if there is more than just one frame on the specific TID that
+ * we're releasing from, and it needs to set the more-data bit
+ * accordingly if we tell it that there's no more data. If we do
+ * tell it there's more data, then of course the more-data bit
+ * needs to be set anyway.
+ */
+ drv_release_buffered_frames(local, sta, driver_release_tids,
+ n_frames, reason, more_data);
+
+ /*
+ * Note that we don't recalculate the TIM bit here as it would
+ * most likely have no effect at all unless the driver told us
+ * that the TID became empty before returning here from the
+ * release function.
+ * Either way, however, when the driver tells us that the TID
+ * became empty we'll do the TIM recalculation.
*/
- printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
- "though there are no buffered frames for it\n",
- sdata->name, sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
}
+void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+{
+ u8 ignore_for_response = sta->sta.uapsd_queues;
+
+ /*
+ * If all ACs are delivery-enabled then we should reply
+ * from any of them, if only some are enabled we reply
+ * only from the non-enabled ones.
+ */
+ if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
+ ignore_for_response = 0;
+
+ ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
+ IEEE80211_FRAME_RELEASE_PSPOLL);
+}
+
+void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
+{
+ int n_frames = sta->sta.max_sp;
+ u8 delivery_enabled = sta->sta.uapsd_queues;
+
+ /*
+ * If we ever grow support for TSPEC this might happen if
+ * the TSPEC update from hostapd comes in between a trigger
+ * frame setting WLAN_STA_UAPSD in the RX path and this
+ * actually getting called.
+ */
+ if (!delivery_enabled)
+ return;
+
+ switch (sta->sta.max_sp) {
+ case 1:
+ n_frames = 2;
+ break;
+ case 2:
+ n_frames = 4;
+ break;
+ case 3:
+ n_frames = 6;
+ break;
+ case 0:
+ /* XXX: what is a good value? */
+ n_frames = 8;
+ break;
+ }
+
+ ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
+ IEEE80211_FRAME_RELEASE_UAPSD);
+}
+
void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta, bool block)
{
@@ -984,17 +1470,50 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
trace_api_sta_block_awake(sta->local, pubsta, block);
if (block)
- set_sta_flags(sta, WLAN_STA_PS_DRIVER);
- else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER))
+ set_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ else if (test_sta_flag(sta, WLAN_STA_PS_DRIVER))
ieee80211_queue_work(hw, &sta->drv_unblock_wk);
}
EXPORT_SYMBOL(ieee80211_sta_block_awake);
-void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
+void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_local *local = sta->local;
+ struct sk_buff *skb;
+ struct skb_eosp_msg_data *data;
+
+ trace_api_eosp(local, pubsta);
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb) {
+ /* too bad ... but race is better than loss */
+ clear_sta_flag(sta, WLAN_STA_SP);
+ return;
+ }
+
+ data = (void *)skb->cb;
+ memcpy(data->sta, pubsta->addr, ETH_ALEN);
+ memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN);
+ skb->pkt_type = IEEE80211_EOSP_MSG;
+ skb_queue_tail(&local->skb_queue, skb);
+ tasklet_schedule(&local->tasklet);
+}
+EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe);
+
+void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
+ u8 tid, bool buffered)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+ if (WARN_ON(tid >= STA_TID_NUM))
+ return;
+
+ if (buffered)
+ set_bit(tid, &sta->driver_buffered_tids);
+ else
+ clear_bit(tid, &sta->driver_buffered_tids);
- set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
- sta_info_set_tim_bit(sta);
+ sta_info_recalc_tim(sta);
}
-EXPORT_SYMBOL(ieee80211_sta_set_tim);
+EXPORT_SYMBOL(ieee80211_sta_set_buffered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 28beb78e601..8c8ce05ad26 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -19,7 +19,8 @@
/**
* enum ieee80211_sta_info_flags - Stations flags
*
- * These flags are used with &struct sta_info's @flags member.
+ * These flags are used with &struct sta_info's @flags member, but
+ * only indirectly with set_sta_flag() and friends.
*
* @WLAN_STA_AUTH: Station is authenticated.
* @WLAN_STA_ASSOC: Station is associated.
@@ -43,24 +44,33 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
- * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
- * buffers. Automatically cleared on station wake-up.
+ * @WLAN_STA_TDLS_PEER: Station is a TDLS peer.
+ * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct
+ * packets. This means the link is enabled.
+ * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
+ * keeping station in power-save mode, reply when the driver
+ * unblocks the station.
+ * @WLAN_STA_SP: Station is in a service period, so don't try to
+ * reply to other uAPSD trigger frames or PS-Poll.
*/
enum ieee80211_sta_info_flags {
- WLAN_STA_AUTH = 1<<0,
- WLAN_STA_ASSOC = 1<<1,
- WLAN_STA_PS_STA = 1<<2,
- WLAN_STA_AUTHORIZED = 1<<3,
- WLAN_STA_SHORT_PREAMBLE = 1<<4,
- WLAN_STA_ASSOC_AP = 1<<5,
- WLAN_STA_WME = 1<<6,
- WLAN_STA_WDS = 1<<7,
- WLAN_STA_CLEAR_PS_FILT = 1<<9,
- WLAN_STA_MFP = 1<<10,
- WLAN_STA_BLOCK_BA = 1<<11,
- WLAN_STA_PS_DRIVER = 1<<12,
- WLAN_STA_PSPOLL = 1<<13,
- WLAN_STA_PS_DRIVER_BUF = 1<<14,
+ WLAN_STA_AUTH,
+ WLAN_STA_ASSOC,
+ WLAN_STA_PS_STA,
+ WLAN_STA_AUTHORIZED,
+ WLAN_STA_SHORT_PREAMBLE,
+ WLAN_STA_ASSOC_AP,
+ WLAN_STA_WME,
+ WLAN_STA_WDS,
+ WLAN_STA_CLEAR_PS_FILT,
+ WLAN_STA_MFP,
+ WLAN_STA_BLOCK_BA,
+ WLAN_STA_PS_DRIVER,
+ WLAN_STA_PSPOLL,
+ WLAN_STA_TDLS_PEER,
+ WLAN_STA_TDLS_PEER_AUTH,
+ WLAN_STA_UAPSD,
+ WLAN_STA_SP,
};
#define STA_TID_NUM 16
@@ -86,6 +96,8 @@ enum ieee80211_sta_info_flags {
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
* @buf_size: reorder buffer size at receiver
+ * @failed_bar_ssn: ssn of the last failed BAR tx attempt
+ * @bar_pending: BAR needs to be re-sent
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -106,6 +118,9 @@ struct tid_ampdu_tx {
u8 stop_initiator;
bool tx_stop;
u8 buf_size;
+
+ u16 failed_bar_ssn;
+ bool bar_pending;
};
/**
@@ -198,15 +213,16 @@ struct sta_ampdu_mlme {
* @last_rx_rate_flag: rx status flag of the last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
- * @flaglock: spinlock for flags accesses
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
- * @flags: STA flags, see &enum ieee80211_sta_info_flags
- * @ps_tx_buf: buffer of frames to transmit to this station
- * when it leaves power saving state
- * @tx_filtered: buffer of frames we already tried to transmit
- * but were filtered by hardware due to STA having entered
- * power saving state
+ * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
+ * when it leaves power saving state or polls
+ * @tx_filtered: buffers (per AC) of frames we already tried to
+ * transmit but were filtered by hardware due to STA having
+ * entered power saving state, these are also delivered to
+ * the station when it leaves powersave or polls for frames
+ * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
* @rx_packets: Number of MSDUs received from this STA
* @rx_bytes: Number of bytes received from this STA
* @wep_weak_iv_count: number of weak WEP IVs received from this station
@@ -238,10 +254,12 @@ struct sta_ampdu_mlme {
* @plink_timer: peer link watch timer
* @plink_timer_was_running: used by suspend/resume to restore timers
* @debugfs: debug filesystem info
- * @sta: station information we share with the driver
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
* @lost_packets: number of consecutive lost packets
+ * @dummy: indicate a dummy station created for receiving
+ * EAP frames before association
+ * @sta: station information we share with the driver
*/
struct sta_info {
/* General information, mostly static */
@@ -254,7 +272,6 @@ struct sta_info {
struct rate_control_ref *rate_ctrl;
void *rate_ctrl_priv;
spinlock_t lock;
- spinlock_t flaglock;
struct work_struct drv_unblock_wk;
@@ -264,18 +281,16 @@ struct sta_info {
bool uploaded;
- /*
- * frequently updated, locked with own spinlock (flaglock),
- * use the accessors defined below
- */
- u32 flags;
+ /* use the accessors defined below */
+ unsigned long _flags;
/*
* STA powersave frame queues, no more than the internal
* locking required.
*/
- struct sk_buff_head ps_tx_buf;
- struct sk_buff_head tx_filtered;
+ struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
+ struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
+ unsigned long driver_buffered_tids;
/* Updated from RX path only, no locking requirements */
unsigned long rx_packets, rx_bytes;
@@ -336,6 +351,9 @@ struct sta_info {
unsigned int lost_packets;
+ /* should be right in front of sta to be in the same cache line */
+ bool dummy;
+
/* keep last! */
struct ieee80211_sta sta;
};
@@ -348,60 +366,28 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
return NL80211_PLINK_LISTEN;
}
-static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
+static inline void set_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags |= flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
+ set_bit(flag, &sta->_flags);
}
-static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
+static inline void clear_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags &= ~flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
+ clear_bit(flag, &sta->_flags);
}
-static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
+static inline int test_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags & flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
-}
-
-static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
- const u32 flags)
-{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags & flags;
- sta->flags &= ~flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
+ return test_bit(flag, &sta->_flags);
}
-static inline u32 get_sta_flags(struct sta_info *sta)
+static inline int test_and_clear_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
+ return test_and_clear_bit(flag, &sta->_flags);
}
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
@@ -419,8 +405,8 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
#define STA_HASH(sta) (sta[5])
-/* Maximum number of frames to buffer per power saving station */
-#define STA_MAX_TX_BUFFER 128
+/* Maximum number of frames to buffer per power saving station per AC */
+#define STA_MAX_TX_BUFFER 64
/* Minimum buffered frame expiry time. If STA uses listen interval that is
* smaller than this value, the minimum value here is used instead. */
@@ -436,9 +422,15 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
static inline
void for_each_sta_info_type_check(struct ieee80211_local *local,
const u8 *addr,
@@ -459,6 +451,22 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
_sta = nxt, \
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
+ /* run code only if address matches and it's not a dummy sta */ \
+ if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
+ !_sta->dummy)
+
+#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
+ for ( /* initialise loop */ \
+ _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
+ /* typecheck */ \
+ for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
+ /* continue condition */ \
+ _sta; \
+ /* advance loop */ \
+ _sta = nxt, \
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
+ ) \
/* compare address and run code only if it matches */ \
if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
@@ -484,14 +492,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
int sta_info_insert(struct sta_info *sta);
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
int sta_info_insert_atomic(struct sta_info *sta);
+int sta_info_reinsert(struct sta_info *sta);
int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-void sta_info_set_tim_bit(struct sta_info *sta);
-void sta_info_clear_tim_bit(struct sta_info *sta);
+void sta_info_recalc_tim(struct sta_info *sta);
void sta_info_init(struct ieee80211_local *local);
void sta_info_stop(struct ieee80211_local *local);
@@ -502,5 +510,6 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
+void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1658efaa2e8..df643cedf9b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -14,6 +14,7 @@
#include "rate.h"
#include "mesh.h"
#include "led.h"
+#include "wme.h"
void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -43,6 +44,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ int ac;
/*
* This skb 'survived' a round-trip through the driver, and
@@ -63,11 +66,37 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
sta->tx_filtered_count++;
/*
+ * Clear more-data bit on filtered frames, it might be set
+ * but later frames might time out so it might have to be
+ * clear again ... It's all rather unlikely (this frame
+ * should time out first, right?) but let's not confuse
+ * peers unnecessarily.
+ */
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *p = ieee80211_get_qos_ctl(hdr);
+ int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
+
+ /*
+ * Clear EOSP if set, this could happen e.g.
+ * if an absence period (us being a P2P GO)
+ * shortens the SP.
+ */
+ if (*p & IEEE80211_QOS_CTL_EOSP)
+ *p &= ~IEEE80211_QOS_CTL_EOSP;
+ ac = ieee802_1d_to_ac[tid & 7];
+ } else {
+ ac = IEEE80211_AC_BE;
+ }
+
+ /*
* Clear the TX filter mask for this STA when sending the next
* packet. If the STA went to power save mode, this will happen
* when it wakes up for the next time.
*/
- set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
+ set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
/*
* This code races in the following way:
@@ -103,13 +132,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
* changes before calling TX status events if ordering can be
* unknown.
*/
- if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
- skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
- skb_queue_tail(&sta->tx_filtered, skb);
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
+ skb_queue_tail(&sta->tx_filtered[ac], skb);
+ sta_info_recalc_tim(sta);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+ round_jiffies(jiffies +
+ STA_INFO_CLEANUP_INTERVAL));
return;
}
- if (!test_sta_flags(sta, WLAN_STA_PS_STA) &&
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
!(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
/* Software retry the packet once */
info->flags |= IEEE80211_TX_INTFL_RETRIED;
@@ -121,18 +156,38 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
if (net_ratelimit())
wiphy_debug(local->hw.wiphy,
"dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
- skb_queue_len(&sta->tx_filtered),
- !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
#endif
dev_kfree_skb(skb);
}
+static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx || !tid_tx->bar_pending)
+ return;
+
+ tid_tx->bar_pending = false;
+ ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
+}
+
static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *) skb->data;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u16 tid = qc[0] & 0xf;
+
+ ieee80211_check_pending_bar(sta, hdr->addr1, tid);
+ }
+
if (ieee80211_is_action(mgmt->frame_control) &&
sdata->vif.type == NL80211_IFTYPE_STATION &&
mgmt->u.action.category == WLAN_CATEGORY_HT &&
@@ -161,6 +216,114 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
}
}
+static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx)
+ return;
+
+ tid_tx->failed_bar_ssn = ssn;
+ tid_tx->bar_pending = true;
+}
+
+static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
+{
+ int len = sizeof(struct ieee80211_radiotap_header);
+
+ /* IEEE80211_RADIOTAP_RATE rate */
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ len += 2;
+
+ /* IEEE80211_RADIOTAP_TX_FLAGS */
+ len += 2;
+
+ /* IEEE80211_RADIOTAP_DATA_RETRIES */
+ len += 1;
+
+ /* IEEE80211_TX_RC_MCS */
+ if (info->status.rates[0].idx >= 0 &&
+ info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+ len += 3;
+
+ return len;
+}
+
+static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
+ *sband, struct sk_buff *skb,
+ int retry_count, int rtap_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_radiotap_header *rthdr;
+ unsigned char *pos;
+ __le16 txflags;
+
+ rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
+
+ memset(rthdr, 0, rtap_len);
+ rthdr->it_len = cpu_to_le16(rtap_len);
+ rthdr->it_present =
+ cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
+ pos = (unsigned char *)(rthdr + 1);
+
+ /*
+ * XXX: Once radiotap gets the bitmap reset thing the vendor
+ * extensions proposal contains, we can actually report
+ * the whole set of tries we did.
+ */
+
+ /* IEEE80211_RADIOTAP_RATE */
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
+ *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+ /* padding for tx flags */
+ pos += 2;
+ }
+
+ /* IEEE80211_RADIOTAP_TX_FLAGS */
+ txflags = 0;
+ if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !is_multicast_ether_addr(hdr->addr1))
+ txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+
+ if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+
+ put_unaligned_le16(txflags, pos);
+ pos += 2;
+
+ /* IEEE80211_RADIOTAP_DATA_RETRIES */
+ /* for now report the total retry_count */
+ *pos = retry_count;
+ pos++;
+
+ /* IEEE80211_TX_RC_MCS */
+ if (info->status.rates[0].idx >= 0 &&
+ info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+ pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ pos[2] = info->status.rates[0].idx;
+ pos += 3;
+ }
+
+}
+
/*
* Use a static threshold for now, best value to be determined
* by testing ...
@@ -179,7 +342,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
u16 frag, type;
__le16 fc;
struct ieee80211_supported_band *sband;
- struct ieee80211_tx_status_rtap_hdr *rthdr;
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
struct sta_info *sta, *tmp;
@@ -187,6 +349,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
int rates_idx = -1;
bool send_to_cooked;
bool acked;
+ struct ieee80211_bar *bar;
+ u16 tid;
+ int rtap_len;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (info->status.rates[i].idx < 0) {
@@ -215,8 +380,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
continue;
+ if (info->flags & IEEE80211_TX_STATUS_EOSP)
+ clear_sta_flag(sta, WLAN_STA_SP);
+
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
- if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
* that this TX packet failed because of that.
@@ -239,10 +407,31 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
tid = qc[0] & 0xf;
ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
& IEEE80211_SCTL_SEQ);
- ieee80211_send_bar(sta->sdata, hdr->addr1,
+ ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
tid, ssn);
}
+ if (!acked && ieee80211_is_back_req(fc)) {
+ u16 control;
+
+ /*
+ * BAR failed, store the last SSN and retry sending
+ * the BAR when the next unicast transmission on the
+ * same TID succeeds.
+ */
+ bar = (struct ieee80211_bar *) skb->data;
+ control = le16_to_cpu(bar->control);
+ if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
+
+ tid = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+
+ ieee80211_set_bar_pending(sta, tid, ssn);
+ }
+ }
+
if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
ieee80211_handle_filtered_frame(local, sta, skb);
rcu_read_unlock();
@@ -336,7 +525,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
continue;
if (wk->offchan_tx.frame != skb)
continue;
- wk->offchan_tx.frame = NULL;
+ wk->offchan_tx.status = true;
break;
}
rcu_read_unlock();
@@ -345,9 +534,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
local->hw_roc_skb_for_status = NULL;
}
- if (cookie == local->hw_offchan_tx_cookie)
- local->hw_offchan_tx_cookie = 0;
-
cfg80211_mgmt_tx_status(
skb->dev, cookie, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
@@ -370,44 +556,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* send frame to monitor interfaces now */
-
- if (skb_headroom(skb) < sizeof(*rthdr)) {
+ rtap_len = ieee80211_tx_radiotap_len(info);
+ if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
dev_kfree_skb(skb);
return;
}
-
- rthdr = (struct ieee80211_tx_status_rtap_hdr *)
- skb_push(skb, sizeof(*rthdr));
-
- memset(rthdr, 0, sizeof(*rthdr));
- rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
- rthdr->hdr.it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
- (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
- (1 << IEEE80211_RADIOTAP_RATE));
-
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- !is_multicast_ether_addr(hdr->addr1))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
-
- /*
- * XXX: Once radiotap gets the bitmap reset thing the vendor
- * extensions proposal contains, we can actually report
- * the whole set of tries we did.
- */
- if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
- else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
- if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
- rthdr->rate = sband->bitrates[
- info->status.rates[0].idx].bitrate / 5;
-
- /* for now report the total retry_count */
- rthdr->data_retries = retry_count;
+ ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8cb0d2d0ac6..48bbb96d8ed 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -253,7 +253,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- u32 sta_flags;
+ bool assoc = false;
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
@@ -284,10 +284,11 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;
- sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+ if (tx->sta)
+ assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
- if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
+ if (unlikely(!assoc &&
tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -343,13 +344,22 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
total += skb_queue_len(&ap->ps_bc_buf);
}
+ /*
+ * Drop one frame from each station from the lowest-priority
+ * AC that has frames at all.
+ */
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- skb = skb_dequeue(&sta->ps_tx_buf);
- if (skb) {
- purged++;
- dev_kfree_skb(skb);
+ int ac;
+
+ for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
+ skb = skb_dequeue(&sta->ps_tx_buf[ac]);
+ total += skb_queue_len(&sta->ps_tx_buf[ac]);
+ if (skb) {
+ purged++;
+ dev_kfree_skb(skb);
+ break;
+ }
}
- total += skb_queue_len(&sta->ps_tx_buf);
}
rcu_read_unlock();
@@ -418,7 +428,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
if (!ieee80211_is_mgmt(fc))
return 0;
- if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
+ if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
return 0;
if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
@@ -435,7 +445,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
- u32 staflags;
if (unlikely(!sta ||
ieee80211_is_probe_resp(hdr->frame_control) ||
@@ -444,57 +453,52 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
ieee80211_is_reassoc_resp(hdr->frame_control)))
return TX_CONTINUE;
- staflags = get_sta_flags(sta);
+ if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
+ !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) {
+ int ac = skb_get_queue_mapping(tx->skb);
- if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
- !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
- "before %d)\n",
- sta->sta.addr, sta->sta.aid,
- skb_queue_len(&sta->ps_tx_buf));
+ printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
+ sta->sta.addr, sta->sta.aid, ac);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
- if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
- struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
+ if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
+ struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: STA %pM TX "
- "buffer full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr);
- }
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: STA %pM TX buffer for "
+ "AC %d full - dropping oldest frame\n",
+ tx->sdata->name, sta->sta.addr, ac);
#endif
dev_kfree_skb(old);
} else
tx->local->total_ps_buffered++;
- /*
- * Queue frame to be sent after STA wakes up/polls,
- * but don't set the TIM bit if the driver is blocking
- * wakeup or poll response transmissions anyway.
- */
- if (skb_queue_empty(&sta->ps_tx_buf) &&
- !(staflags & WLAN_STA_PS_DRIVER))
- sta_info_set_tim_bit(sta);
-
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- skb_queue_tail(&sta->ps_tx_buf, tx->skb);
+ skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
+ /*
+ * We queued up some frames, so the TIM bit might
+ * need to be set, recalculate it.
+ */
+ sta_info_recalc_tim(sta);
+
return TX_QUEUED;
}
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- else if (unlikely(staflags & WLAN_STA_PS_STA)) {
- printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
- "set -> send frame\n", tx->sdata->name,
- sta->sta.addr);
+ else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
+ printk(KERN_DEBUG
+ "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
+ tx->sdata->name, sta->sta.addr);
}
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -552,7 +556,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(!ieee80211_is_robust_mgmt_frame(hdr) ||
(ieee80211_is_action(hdr->frame_control) &&
- tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
+ tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
return TX_DROP;
} else
@@ -611,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
- u32 sta_flags;
+ bool assoc = false;
memset(&txrc, 0, sizeof(txrc));
@@ -647,17 +651,17 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
*/
if (tx->sdata->vif.bss_conf.use_short_preamble &&
(ieee80211_is_data(hdr->frame_control) ||
- (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
+ (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
txrc.short_preamble = short_preamble = true;
- sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+ if (tx->sta)
+ assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
/*
* Lets not bother rate control if we're associated and cannot
* talk to the sta. This should not happen.
*/
- if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) &&
- (sta_flags & WLAN_STA_ASSOC) &&
+ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
!rate_usable_index_exists(sband, &tx->sta->sta),
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
@@ -800,6 +804,9 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
if (ieee80211_hdrlen(hdr->frame_control) < 24)
return TX_CONTINUE;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ return TX_CONTINUE;
+
/*
* Anything but QoS data that has a sequence number field
* (is long enough) gets a sequence number from the global
@@ -891,7 +898,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
int hdrlen;
int fragnum;
- if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
+ if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
+ return TX_CONTINUE;
+
+ if (tx->local->ops->set_frag_threshold)
return TX_CONTINUE;
/*
@@ -904,7 +914,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- /* internal error, why is TX_FRAGMENTED set? */
+ /* internal error, why isn't DONTFRAG set? */
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
return TX_DROP;
@@ -1025,100 +1035,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
/* actual transmit path */
-/*
- * deal with packet injection down monitor interface
- * with Radiotap Header -- only called for monitor mode interface
- */
-static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
- struct sk_buff *skb)
-{
- /*
- * this is the moment to interpret and discard the radiotap header that
- * must be at the start of the packet injected in Monitor mode
- *
- * Need to take some care with endian-ness since radiotap
- * args are little-endian
- */
-
- struct ieee80211_radiotap_iterator iterator;
- struct ieee80211_radiotap_header *rthdr =
- (struct ieee80211_radiotap_header *) skb->data;
- bool hw_frag;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
- NULL);
-
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- tx->flags &= ~IEEE80211_TX_FRAGMENTED;
-
- /* packet is fragmented in HW if we have a non-NULL driver callback */
- hw_frag = (tx->local->ops->set_frag_threshold != NULL);
-
- /*
- * for every radiotap entry that is present
- * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
- * entries present, or -EINVAL on error)
- */
-
- while (!ret) {
- ret = ieee80211_radiotap_iterator_next(&iterator);
-
- if (ret)
- continue;
-
- /* see if this argument is something we can use */
- switch (iterator.this_arg_index) {
- /*
- * You must take care when dereferencing iterator.this_arg
- * for multibyte types... the pointer is not aligned. Use
- * get_unaligned((type *)iterator.this_arg) to dereference
- * iterator.this_arg for type "type" safely on all arches.
- */
- case IEEE80211_RADIOTAP_FLAGS:
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
- /*
- * this indicates that the skb we have been
- * handed has the 32-bit FCS CRC at the end...
- * we should react to that by snipping it off
- * because it will be recomputed and added
- * on transmission
- */
- if (skb->len < (iterator._max_length + FCS_LEN))
- return false;
-
- skb_trim(skb, skb->len - FCS_LEN);
- }
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
- info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
- if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
- !hw_frag)
- tx->flags |= IEEE80211_TX_FRAGMENTED;
- break;
-
- /*
- * Please update the file
- * Documentation/networking/mac80211-injection.txt
- * when parsing new fields here.
- */
-
- default:
- break;
- }
- }
-
- if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
- return false;
-
- /*
- * remove the radiotap header
- * iterator->_max_length was sanity-checked against
- * skb->len by iterator init
- */
- skb_pull(skb, iterator._max_length);
-
- return true;
-}
-
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
struct sk_buff *skb,
struct ieee80211_tx_info *info,
@@ -1183,7 +1099,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int hdrlen, tid;
+ int tid;
u8 *qc;
memset(tx, 0, sizeof(*tx));
@@ -1191,26 +1107,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->local = local;
tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
- /*
- * Set this flag (used below to indicate "automatic fragmentation"),
- * it will be cleared/left by radiotap as desired.
- * Only valid when fragmentation is done by the stack.
- */
- if (!local->ops->set_frag_threshold)
- tx->flags |= IEEE80211_TX_FRAGMENTED;
-
- /* process and remove the injection radiotap header */
- if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
- if (!__ieee80211_parse_tx_radiotap(tx, skb))
- return TX_DROP;
-
- /*
- * __ieee80211_parse_tx_radiotap has now removed
- * the radiotap header that was present and pre-filled
- * 'tx' with tx control information.
- */
- info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
- }
/*
* If this flag is set to true anywhere, and we get here,
@@ -1232,7 +1128,9 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = sta_info_get(sdata, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
- (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
+ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+ (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
+ !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
struct tid_ampdu_tx *tid_tx;
qc = ieee80211_get_qos_ctl(hdr);
@@ -1257,29 +1155,25 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->flags |= IEEE80211_TX_UNICAST;
if (unlikely(local->wifi_wme_noack_test))
info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else
- info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
+ /*
+ * Flags are initialized to 0. Hence, no need to
+ * explicitly unset IEEE80211_TX_CTL_NO_ACK since
+ * it might already be set for injected frames.
+ */
}
- if (tx->flags & IEEE80211_TX_FRAGMENTED) {
- if ((tx->flags & IEEE80211_TX_UNICAST) &&
- skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
- !(info->flags & IEEE80211_TX_CTL_AMPDU))
- tx->flags |= IEEE80211_TX_FRAGMENTED;
- else
- tx->flags &= ~IEEE80211_TX_FRAGMENTED;
+ if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
+ if (!(tx->flags & IEEE80211_TX_UNICAST) ||
+ skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
+ info->flags & IEEE80211_TX_CTL_AMPDU)
+ info->flags |= IEEE80211_TX_CTL_DONTFRAG;
}
if (!tx->sta)
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+ else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
- u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
- tx->ethertype = (pos[0] << 8) | pos[1];
- }
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
return TX_CONTINUE;
@@ -1490,11 +1384,6 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
tail_need = max_t(int, tail_need, 0);
}
- if (head_need || tail_need) {
- /* Sorry. Can't account for this any more */
- skb_orphan(skb);
- }
-
if (skb_cloned(skb))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
@@ -1508,67 +1397,19 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
}
- /* update truesize too */
- skb->truesize += head_need + tail_need;
-
return 0;
}
-static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_sub_if_data *tmp_sdata;
int headroom;
bool may_encrypt;
rcu_read_lock();
- if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
- int hdrlen;
- u16 len_rthdr;
-
- info->flags |= IEEE80211_TX_CTL_INJECTED |
- IEEE80211_TX_INTFL_HAS_RADIOTAP;
-
- len_rthdr = ieee80211_get_radiotap_len(skb->data);
- hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-
- /* check the header is complete in the frame */
- if (likely(skb->len >= len_rthdr + hdrlen)) {
- /*
- * We process outgoing injected frames that have a
- * local address we handle as though they are our
- * own frames.
- * This code here isn't entirely correct, the local
- * MAC address is not necessarily enough to find
- * the interface to use; for that proper VLAN/WDS
- * support we will need a different mechanism.
- */
-
- list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
- list) {
- if (!ieee80211_sdata_running(tmp_sdata))
- continue;
- if (tmp_sdata->vif.type ==
- NL80211_IFTYPE_MONITOR ||
- tmp_sdata->vif.type ==
- NL80211_IFTYPE_AP_VLAN ||
- tmp_sdata->vif.type ==
- NL80211_IFTYPE_WDS)
- continue;
- if (compare_ether_addr(tmp_sdata->vif.addr,
- hdr->addr2) == 0) {
- sdata = tmp_sdata;
- break;
- }
- }
- }
- }
-
may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
headroom = local->tx_headroom;
@@ -1595,11 +1436,94 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
return;
}
- ieee80211_set_qos_hdr(local, skb);
+ ieee80211_set_qos_hdr(sdata, skb);
ieee80211_tx(sdata, skb, false);
rcu_read_unlock();
}
+static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
+{
+ struct ieee80211_radiotap_iterator iterator;
+ struct ieee80211_radiotap_header *rthdr =
+ (struct ieee80211_radiotap_header *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ NULL);
+ u16 txflags;
+
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_DONTFRAG;
+
+ /*
+ * for every radiotap entry that is present
+ * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
+ * entries present, or -EINVAL on error)
+ */
+
+ while (!ret) {
+ ret = ieee80211_radiotap_iterator_next(&iterator);
+
+ if (ret)
+ continue;
+
+ /* see if this argument is something we can use */
+ switch (iterator.this_arg_index) {
+ /*
+ * You must take care when dereferencing iterator.this_arg
+ * for multibyte types... the pointer is not aligned. Use
+ * get_unaligned((type *)iterator.this_arg) to dereference
+ * iterator.this_arg for type "type" safely on all arches.
+ */
+ case IEEE80211_RADIOTAP_FLAGS:
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
+ /*
+ * this indicates that the skb we have been
+ * handed has the 32-bit FCS CRC at the end...
+ * we should react to that by snipping it off
+ * because it will be recomputed and added
+ * on transmission
+ */
+ if (skb->len < (iterator._max_length + FCS_LEN))
+ return false;
+
+ skb_trim(skb, skb->len - FCS_LEN);
+ }
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
+ info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
+ info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
+ break;
+
+ case IEEE80211_RADIOTAP_TX_FLAGS:
+ txflags = get_unaligned_le16(iterator.this_arg);
+ if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ break;
+
+ /*
+ * Please update the file
+ * Documentation/networking/mac80211-injection.txt
+ * when parsing new fields here.
+ */
+
+ default:
+ break;
+ }
+ }
+
+ if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
+ return false;
+
+ /*
+ * remove the radiotap header
+ * iterator->_max_length was sanity-checked against
+ * skb->len by iterator init
+ */
+ skb_pull(skb, iterator._max_length);
+
+ return true;
+}
+
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1608,7 +1532,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct ieee80211_radiotap_header *prthdr =
(struct ieee80211_radiotap_header *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sub_if_data *tmp_sdata, *sdata;
u16 len_rthdr;
+ int hdrlen;
/*
* Frame injection is not allowed if beaconing is not allowed
@@ -1659,12 +1586,65 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb_set_network_header(skb, len_rthdr);
skb_set_transport_header(skb, len_rthdr);
+ if (skb->len < len_rthdr + 2)
+ goto fail;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (skb->len < len_rthdr + hdrlen)
+ goto fail;
+
+ /*
+ * Initialize skb->protocol if the injected frame is a data frame
+ * carrying a rfc1042 header
+ */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
+ u8 *payload = (u8 *)hdr + hdrlen;
+
+ if (compare_ether_addr(payload, rfc1042_header) == 0)
+ skb->protocol = cpu_to_be16((payload[6] << 8) |
+ payload[7]);
+ }
+
memset(info, 0, sizeof(*info));
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_CTL_INJECTED;
+
+ /* process and remove the injection radiotap header */
+ if (!ieee80211_parse_tx_radiotap(skb))
+ goto fail;
+
+ rcu_read_lock();
+
+ /*
+ * We process outgoing injected frames that have a local address
+ * we handle as though they are non-injected frames.
+ * This code here isn't entirely correct, the local MAC address
+ * isn't always enough to find the interface to use; for proper
+ * VLAN/WDS support we will need a different mechanism (which
+ * likely isn't going to be monitor interfaces).
+ */
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(tmp_sdata))
+ continue;
+ if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
+ continue;
+ if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
+ sdata = tmp_sdata;
+ break;
+ }
+ }
+
+ ieee80211_xmit(sdata, skb);
+ rcu_read_unlock();
- /* pass the radiotap header up to xmit */
- ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
return NETDEV_TX_OK;
fail:
@@ -1703,8 +1683,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
+ bool wme_sta = false, authorized = false, tdls_auth = false;
struct sk_buff *tmp_skb;
+ bool tdls_direct = false;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
@@ -1728,7 +1709,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
- sta_flags = get_sta_flags(sta);
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
}
rcu_read_unlock();
if (sta)
@@ -1816,11 +1798,50 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
break;
#endif
case NL80211_IFTYPE_STATION:
- memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
- if (sdata->u.mgd.use_4addr &&
- cpu_to_be16(ethertype) != sdata->control_port_protocol) {
- fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
+ if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ bool tdls_peer = false;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, skb->data);
+ if (sta) {
+ authorized = test_sta_flag(sta,
+ WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ tdls_peer = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER);
+ tdls_auth = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER_AUTH);
+ }
+ rcu_read_unlock();
+
+ /*
+ * If the TDLS link is enabled, send everything
+ * directly. Otherwise, allow TDLS setup frames
+ * to be transmitted indirectly.
+ */
+ tdls_direct = tdls_peer && (tdls_auth ||
+ !(ethertype == ETH_P_TDLS && skb->len > 14 &&
+ skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
+ }
+
+ if (tdls_direct) {
+ /* link during setup - throw out frames to peer */
+ if (!tdls_auth) {
+ ret = NETDEV_TX_OK;
+ goto fail;
+ }
+
+ /* DA SA BSSID */
+ memcpy(hdr.addr1, skb->data, ETH_ALEN);
+ memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+ memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ hdrlen = 24;
+ } else if (sdata->u.mgd.use_4addr &&
+ cpu_to_be16(ethertype) != sdata->control_port_protocol) {
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS);
/* RA TA DA SA */
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1828,6 +1849,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
} else {
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
@@ -1853,13 +1875,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!is_multicast_ether_addr(hdr.addr1)) {
rcu_read_lock();
sta = sta_info_get(sdata, hdr.addr1);
- if (sta)
- sta_flags = get_sta_flags(sta);
+ if (sta) {
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ }
rcu_read_unlock();
}
+ /* For mesh, the use of the QoS header is mandatory */
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ wme_sta = true;
+
/* receiver and we are QoS enabled, use a QoS type frame */
- if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
+ if (wme_sta && local->hw.queues >= 4) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -1868,12 +1896,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* Drop unicast frames to unauthorised stations unless they are
* EAPOL frames from the local station.
*/
- if (!ieee80211_vif_is_mesh(&sdata->vif) &&
- unlikely(!is_multicast_ether_addr(hdr.addr1) &&
- !(sta_flags & WLAN_STA_AUTHORIZED) &&
- !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
- compare_ether_addr(sdata->vif.addr,
- skb->data + ETH_ALEN) == 0))) {
+ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
+ !is_multicast_ether_addr(hdr.addr1) && !authorized &&
+ (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
+ compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: dropped frame to %pM"
@@ -2275,13 +2301,23 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
- mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
+ mgmt->u.beacon.capab_info |= cpu_to_le16(
+ sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
pos = skb_put(skb, 2);
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- mesh_mgmt_ies_add(skb, sdata);
+ if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ mesh_add_ds_params_ie(skb, sdata) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_meshid_ie(skb, sdata) ||
+ mesh_add_meshconf_ie(skb, sdata) ||
+ mesh_add_vendor_ies(skb, sdata)) {
+ pr_err("o11s: couldn't add ies!\n");
+ goto out;
+ }
} else {
WARN_ON(1);
goto out;
@@ -2335,11 +2371,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "pspoll template\n", sdata->name);
+ if (!skb)
return NULL;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
@@ -2375,11 +2409,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
- "template\n", sdata->name);
+ if (!skb)
return NULL;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
@@ -2414,11 +2446,8 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
ie_ssid_len + ie_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
- "request template\n", sdata->name);
+ if (!skb)
return NULL;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ddeb1b99838..7439d26bf5f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -19,7 +19,6 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/bitmap.h>
-#include <linux/crc32.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
@@ -368,14 +367,14 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data)
+void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
unsigned long flags;
- int queue, ret = 0, i;
+ int queue, i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < hw->queues; i++)
@@ -390,7 +389,6 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
continue;
}
- ret++;
queue = skb_get_queue_mapping(skb);
__skb_queue_tail(&local->pending[queue], skb);
}
@@ -402,14 +400,12 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-
- return ret;
}
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
{
- return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
+ ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
}
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
@@ -573,172 +569,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
}
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- u64 filter, u32 crc)
-{
- size_t left = len;
- u8 *pos = start;
- bool calc_crc = filter != 0;
-
- memset(elems, 0, sizeof(*elems));
- elems->ie_start = start;
- elems->total_len = len;
-
- while (left >= 2) {
- u8 id, elen;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left)
- break;
-
- if (calc_crc && id < 64 && (filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- switch (id) {
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_FH_PARAMS:
- elems->fh_params = pos;
- elems->fh_params_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- elems->ds_params = pos;
- elems->ds_params_len = elen;
- break;
- case WLAN_EID_CF_PARAMS:
- elems->cf_params = pos;
- elems->cf_params_len = elen;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- }
- break;
- case WLAN_EID_IBSS_PARAMS:
- elems->ibss_params = pos;
- elems->ibss_params_len = elen;
- break;
- case WLAN_EID_CHALLENGE:
- elems->challenge = pos;
- elems->challenge_len = elen;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (pos[3] == 1) {
- /* OUI Type 1 - WPA IE */
- elems->wpa = pos;
- elems->wpa_len = elen;
- } else if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- elems->erp_info = pos;
- elems->erp_info_len = elen;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- break;
- case WLAN_EID_HT_INFORMATION:
- if (elen >= sizeof(struct ieee80211_ht_info))
- elems->ht_info_elem = (void *)pos;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- break;
- case WLAN_EID_PEER_LINK:
- elems->peer_link = pos;
- elems->peer_link_len = elen;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- elems->ch_switch_elem = pos;
- elems->ch_switch_elem_len = elen;
- break;
- case WLAN_EID_QUIET:
- if (!elems->quiet_elem) {
- elems->quiet_elem = pos;
- elems->quiet_elem_len = elen;
- }
- elems->num_of_quiet_elem++;
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- elems->pwr_constr_elem = pos;
- elems->pwr_constr_elem_len = elen;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- elems->timeout_int = pos;
- elems->timeout_int_len = elen;
- break;
- default:
- break;
- }
-
- left -= elen;
- pos += elen;
- }
-
- return crc;
-}
-
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -799,8 +629,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
qparam.uapsd = false;
- local->tx_conf[queue] = qparam;
- drv_conf_tx(local, queue, &qparam);
+ sdata->tx_conf[queue] = qparam;
+ drv_conf_tx(local, sdata, queue, &qparam);
}
/* after reinitialize QoS TX queues setting to default,
@@ -874,11 +704,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + 6 + extra_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
- "frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
@@ -1031,11 +859,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
/* FIXME: come up with a proper value */
buf = kmalloc(200 + ie_len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_DEBUG "%s: failed to allocate temporary IE "
- "buffer\n", sdata->name);
+ if (!buf)
return NULL;
- }
/*
* Do not send DS Channel parameter for directed probe requests
@@ -1071,14 +896,18 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed)
+ u32 ratemask, bool directed, bool no_cck)
{
struct sk_buff *skb;
skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
ie, ie_len, directed);
- if (skb)
+ if (skb) {
+ if (no_cck)
+ IEEE80211_SKB_CB(skb)->flags |=
+ IEEE80211_TX_CTL_NO_CCK_RATE;
ieee80211_tx_skb(sdata, skb);
+ }
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1205,14 +1034,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data,
u.ap);
+ memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
}
mutex_unlock(&local->sta_mtx);
/* reconfigure tx conf */
- for (i = 0; i < hw->queues; i++)
- drv_conf_tx(local, i, &local->tx_conf[i]);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ !ieee80211_sdata_running(sdata))
+ continue;
+
+ for (i = 0; i < hw->queues; i++)
+ drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
+ }
/* reconfigure hardware */
ieee80211_hw_config(local, ~0);
@@ -1248,6 +1085,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
+ changed |= BSS_CHANGED_SSID;
+ /* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED;
@@ -1283,7 +1122,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sta, &local->sta_list, list) {
ieee80211_sta_tear_down_BA_sessions(sta, true);
- clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
}
mutex_unlock(&local->sta_mtx);
@@ -1522,3 +1361,60 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
_ieee80211_enable_rssi_reports(sdata, 0, 0);
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ int rate;
+ u8 i, rates, *pos;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ rates = sband->n_bitrates;
+ if (rates > 8)
+ rates = 8;
+
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOMEM;
+
+ pos = skb_put(skb, rates + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = rates;
+ for (i = 0; i < rates; i++) {
+ rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+
+ return 0;
+}
+
+int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ int rate;
+ u8 i, exrates, *pos;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ exrates = sband->n_bitrates;
+ if (exrates > 8)
+ exrates -= 8;
+ else
+ exrates = 0;
+
+ if (skb_tailroom(skb) < exrates + 2)
+ return -ENOMEM;
+
+ if (exrates) {
+ pos = skb_put(skb, exrates + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = exrates;
+ for (i = 8; i < sband->n_bitrates; i++) {
+ rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+ return 0;
+}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 7a49532f14c..fd52e695c07 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -72,7 +72,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
sta = rcu_dereference(sdata->u.vlan.sta);
if (sta) {
- qos = get_sta_flags(sta) & WLAN_STA_WME;
+ qos = test_sta_flag(sta, WLAN_STA_WME);
break;
}
case NL80211_IFTYPE_AP:
@@ -83,11 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- /*
- * XXX: This is clearly broken ... but already was before,
- * because ieee80211_fill_mesh_addresses() would clear A1
- * except for multicast addresses.
- */
+ ra = skb->data;
break;
#endif
case NL80211_IFTYPE_STATION:
@@ -103,7 +99,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (!sta && ra && !is_multicast_ether_addr(ra)) {
sta = sta_info_get(sdata, ra);
if (sta)
- qos = get_sta_flags(sta) & WLAN_STA_WME;
+ qos = test_sta_flag(sta, WLAN_STA_WME);
}
rcu_read_unlock();
@@ -139,7 +135,8 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
return ieee802_1d_to_ac[skb->priority];
}
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
+void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -150,10 +147,11 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- if (unlikely(local->wifi_wme_noack_test))
+ if (unlikely(sdata->local->wifi_wme_noack_test))
ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
- /* qos header is 2 bytes, second reserved */
+ /* qos header is 2 bytes */
*p++ = ack_policy | tid;
- *p = 0;
+ *p = ieee80211_vif_is_mesh(&sdata->vif) ?
+ (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
}
}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index faead6d0202..34e166fbf4d 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -17,7 +17,8 @@ extern const int ieee802_1d_to_ac[8];
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
+void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
struct sk_buff *skb);
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 380b9a7462b..94472eb34d7 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -229,11 +229,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
wk->ie_len + /* extra IEs */
9, /* WMM */
GFP_KERNEL);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
- "frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
capab = WLAN_CAPABILITY_ESS;
@@ -460,7 +458,7 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
*/
ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
wk->probe_auth.ssid_len, NULL, 0,
- (u32) -1, true);
+ (u32) -1, true, false);
wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
run_again(local, wk->timeout);
@@ -579,7 +577,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
/*
* After this, offchan_tx.frame remains but now is no
* longer a valid pointer -- we still need it as the
- * cookie for canceling this work.
+ * cookie for canceling this work/status matching.
*/
ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 7bc8702808f..f614ce7bb6e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -53,7 +53,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
}
if (info->control.hw_key &&
- !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
+ (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
+ tx->local->ops->set_frag_threshold) &&
!(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
/* hwaccel - with no need for SW-generated MMIC */
return TX_CONTINUE;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d86cb..8260b13d93c 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -505,7 +505,7 @@ config NETFILTER_XT_TARGET_LED
echo netfilter-ssh > /sys/class/leds/<ledname>/trigger
For more information on the LEDs available on your system, see
- Documentation/leds-class.txt
+ Documentation/leds/leds-class.txt
config NETFILTER_XT_TARGET_MARK
tristate '"MARK" target support'
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 899b71c0ff5..3346829ea07 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -37,7 +37,7 @@ int nf_register_afinfo(const struct nf_afinfo *afinfo)
err = mutex_lock_interruptible(&afinfo_mutex);
if (err < 0)
return err;
- rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
+ RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
mutex_unlock(&afinfo_mutex);
return 0;
}
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(nf_register_afinfo);
void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
{
mutex_lock(&afinfo_mutex);
- rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
+ RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
mutex_unlock(&afinfo_mutex);
synchronize_rcu();
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2b771dc708a..e3be48bf4dc 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
+ struct netns_ipvs *ipvs = net_ipvs(net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* increase the module use count */
ip_vs_use_count_inc();
+ /* Handle daemons since they have another lock */
+ if (cmd == IP_VS_SO_SET_STARTDAEMON ||
+ cmd == IP_VS_SO_SET_STOPDAEMON) {
+ struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
+
+ if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
+ ret = -ERESTARTSYS;
+ goto out_dec;
+ }
+ if (cmd == IP_VS_SO_SET_STARTDAEMON)
+ ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+ dm->syncid);
+ else
+ ret = stop_sync_thread(net, dm->state);
+ mutex_unlock(&ipvs->sync_mutex);
+ goto out_dec;
+ }
+
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
@@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
- } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
- struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
- dm->syncid);
- goto out_unlock;
- } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
- struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = stop_sync_thread(net, dm->state);
- goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
+ /*
+ * Handle daemons first since it has its own locking
+ */
+ if (cmd == IP_VS_SO_GET_DAEMON) {
+ struct ip_vs_daemon_user d[2];
+
+ memset(&d, 0, sizeof(d));
+ if (mutex_lock_interruptible(&ipvs->sync_mutex))
+ return -ERESTARTSYS;
+
+ if (ipvs->sync_state & IP_VS_STATE_MASTER) {
+ d[0].state = IP_VS_STATE_MASTER;
+ strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+ sizeof(d[0].mcast_ifn));
+ d[0].syncid = ipvs->master_syncid;
+ }
+ if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
+ d[1].state = IP_VS_STATE_BACKUP;
+ strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+ sizeof(d[1].mcast_ifn));
+ d[1].syncid = ipvs->backup_syncid;
+ }
+ if (copy_to_user(user, &d, sizeof(d)) != 0)
+ ret = -EFAULT;
+ mutex_unlock(&ipvs->sync_mutex);
+ return ret;
+ }
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
@@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
break;
- case IP_VS_SO_GET_DAEMON:
- {
- struct ip_vs_daemon_user d[2];
-
- memset(&d, 0, sizeof(d));
- if (ipvs->sync_state & IP_VS_STATE_MASTER) {
- d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
- sizeof(d[0].mcast_ifn));
- d[0].syncid = ipvs->master_syncid;
- }
- if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
- d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
- sizeof(d[1].mcast_ifn));
- d[1].syncid = ipvs->backup_syncid;
- }
- if (copy_to_user(user, &d, sizeof(d)) != 0)
- ret = -EFAULT;
- }
- break;
-
default:
ret = -EINVAL;
}
@@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net);
- mutex_lock(&__ip_vs_mutex);
+ mutex_lock(&ipvs->sync_mutex);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
ipvs->master_mcast_ifn,
@@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
}
nla_put_failure:
- mutex_unlock(&__ip_vs_mutex);
+ mutex_unlock(&ipvs->sync_mutex);
return skb->len;
}
@@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
return ip_vs_set_timeout(net, &t);
}
-static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
+static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
{
- struct ip_vs_service *svc = NULL;
- struct ip_vs_service_user_kern usvc;
- struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
- int need_full_svc = 0, need_full_dest = 0;
struct net *net;
struct netns_ipvs *ipvs;
@@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
- mutex_lock(&__ip_vs_mutex);
-
- if (cmd == IPVS_CMD_FLUSH) {
- ret = ip_vs_flush(net);
- goto out;
- } else if (cmd == IPVS_CMD_SET_CONFIG) {
- ret = ip_vs_genl_set_config(net, info->attrs);
- goto out;
- } else if (cmd == IPVS_CMD_NEW_DAEMON ||
- cmd == IPVS_CMD_DEL_DAEMON) {
-
+ if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
+ mutex_lock(&ipvs->sync_mutex);
if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
info->attrs[IPVS_CMD_ATTR_DAEMON],
@@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
ret = ip_vs_genl_del_daemon(net, daemon_attrs);
+out:
+ mutex_unlock(&ipvs->sync_mutex);
+ }
+ return ret;
+}
+
+static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ip_vs_service *svc = NULL;
+ struct ip_vs_service_user_kern usvc;
+ struct ip_vs_dest_user_kern udest;
+ int ret = 0, cmd;
+ int need_full_svc = 0, need_full_dest = 0;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
+ cmd = info->genlhdr->cmd;
+
+ mutex_lock(&__ip_vs_mutex);
+
+ if (cmd == IPVS_CMD_FLUSH) {
+ ret = ip_vs_flush(net);
+ goto out;
+ } else if (cmd == IPVS_CMD_SET_CONFIG) {
+ ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
@@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
.cmd = IPVS_CMD_NEW_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
- .doit = ip_vs_genl_set_cmd,
+ .doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_DEL_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
- .doit = ip_vs_genl_set_cmd,
+ .doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_GET_DAEMON,
@@ -3679,7 +3708,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
- ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+ rwlock_init(&ipvs->rs_lock);
/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 7ee7215b8ba..3cdd479f9b5 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -61,6 +61,7 @@
#define SYNC_PROTO_VER 1 /* Protocol version in header */
+static struct lock_class_key __ipvs_sync_key;
/*
* IPVS sync connection entry
* Version 0, i.e. original version.
@@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+
if (state == IP_VS_STATE_MASTER) {
if (ipvs->master_thread)
return -EEXIST;
@@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
INIT_LIST_HEAD(&ipvs->sync_queue);
spin_lock_init(&ipvs->sync_lock);
spin_lock_init(&ipvs->sync_buff_lock);
@@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net)
void ip_vs_sync_net_cleanup(struct net *net)
{
int retc;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ mutex_lock(&ipvs->sync_mutex);
retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n");
@@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net)
retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n");
+ mutex_unlock(&ipvs->sync_mutex);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f7af8b86601..5acfaf59a9c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -779,7 +779,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (exp->helper) {
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help)
- rcu_assign_pointer(help->helper, exp->helper);
+ RCU_INIT_POINTER(help->helper, exp->helper);
}
#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -1317,7 +1317,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
void nf_conntrack_cleanup(struct net *net)
{
if (net_eq(net, &init_net))
- rcu_assign_pointer(ip_ct_attach, NULL);
+ RCU_INIT_POINTER(ip_ct_attach, NULL);
/* This makes sure all current packets have passed through
netfilter framework. Roll on, two-stage module
@@ -1327,7 +1327,7 @@ void nf_conntrack_cleanup(struct net *net)
nf_conntrack_cleanup_net(net);
if (net_eq(net, &init_net)) {
- rcu_assign_pointer(nf_ct_destroy, NULL);
+ RCU_INIT_POINTER(nf_ct_destroy, NULL);
nf_conntrack_cleanup_init_net();
}
}
@@ -1576,11 +1576,11 @@ int nf_conntrack_init(struct net *net)
if (net_eq(net, &init_net)) {
/* For use by REJECT target */
- rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
- rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
+ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
+ RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
/* Howto get NAT offsets */
- rcu_assign_pointer(nf_ct_nat_offset, NULL);
+ RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
}
return 0;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 63a1b915a7e..3add9943905 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -94,7 +94,7 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
ret = -EBUSY;
goto out_unlock;
}
- rcu_assign_pointer(nf_conntrack_event_cb, new);
+ RCU_INIT_POINTER(nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -112,7 +112,7 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
notify = rcu_dereference_protected(nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- rcu_assign_pointer(nf_conntrack_event_cb, NULL);
+ RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -129,7 +129,7 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
ret = -EBUSY;
goto out_unlock;
}
- rcu_assign_pointer(nf_expect_event_cb, new);
+ RCU_INIT_POINTER(nf_expect_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -147,7 +147,7 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
notify = rcu_dereference_protected(nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- rcu_assign_pointer(nf_expect_event_cb, NULL);
+ RCU_INIT_POINTER(nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 05ecdc281a5..4605c947dcc 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
before updating alloc_size */
type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
+ type->len;
- rcu_assign_pointer(nf_ct_ext_types[type->id], type);
+ RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
update_alloc_size(type);
out:
mutex_unlock(&nf_ct_ext_type_mutex);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(nf_ct_extend_register);
void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
{
mutex_lock(&nf_ct_ext_type_mutex);
- rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
+ RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 1bdfea35795..93c4bdbfc1a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -131,7 +131,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
if (helper == NULL) {
if (help)
- rcu_assign_pointer(help->helper, NULL);
+ RCU_INIT_POINTER(help->helper, NULL);
goto out;
}
@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
memset(&help->help, 0, sizeof(help->help));
}
- rcu_assign_pointer(help->helper, helper);
+ RCU_INIT_POINTER(help->helper, helper);
out:
return ret;
}
@@ -162,7 +162,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
lockdep_is_held(&nf_conntrack_lock)
) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
- rcu_assign_pointer(help->helper, NULL);
+ RCU_INIT_POINTER(help->helper, NULL);
}
return 0;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7dec88a1755..e58aa9b1fe8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1125,7 +1125,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
if (help && help->helper) {
/* we had a helper before ... */
nf_ct_remove_expectations(ct);
- rcu_assign_pointer(help->helper, NULL);
+ RCU_INIT_POINTER(help->helper, NULL);
}
return 0;
@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
return -EOPNOTSUPP;
}
- rcu_assign_pointer(help->helper, helper);
+ RCU_INIT_POINTER(help->helper, helper);
return 0;
}
@@ -1386,7 +1386,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
/* not in hash table yet so not strictly necessary */
- rcu_assign_pointer(help->helper, helper);
+ RCU_INIT_POINTER(help->helper, helper);
}
} else {
/* try an implicit helper assignation */
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index cf616e55ca4..d69facdd9a7 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.stream_timeout);
/* Also, more likely to be important, and not a probe. */
- set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_ASSURED, ct);
+ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 20714edf6cd..ce0c406f58a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
llog = rcu_dereference_protected(nf_loggers[pf],
lockdep_is_held(&nf_log_mutex));
if (llog == NULL)
- rcu_assign_pointer(nf_loggers[pf], logger);
+ RCU_INIT_POINTER(nf_loggers[pf], logger);
}
mutex_unlock(&nf_log_mutex);
@@ -74,7 +74,7 @@ void nf_log_unregister(struct nf_logger *logger)
c_logger = rcu_dereference_protected(nf_loggers[i],
lockdep_is_held(&nf_log_mutex));
if (c_logger == logger)
- rcu_assign_pointer(nf_loggers[i], NULL);
+ RCU_INIT_POINTER(nf_loggers[i], NULL);
list_del(&logger->list[i]);
}
mutex_unlock(&nf_log_mutex);
@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
- rcu_assign_pointer(nf_loggers[pf], logger);
+ RCU_INIT_POINTER(nf_loggers[pf], logger);
mutex_unlock(&nf_log_mutex);
return 0;
}
@@ -103,7 +103,7 @@ void nf_log_unbind_pf(u_int8_t pf)
if (pf >= ARRAY_SIZE(nf_loggers))
return;
mutex_lock(&nf_log_mutex);
- rcu_assign_pointer(nf_loggers[pf], NULL);
+ RCU_INIT_POINTER(nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
}
EXPORT_SYMBOL(nf_log_unbind_pf);
@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
- rcu_assign_pointer(nf_loggers[tindex], logger);
+ RCU_INIT_POINTER(nf_loggers[tindex], logger);
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 84d0fd47636..99ffd288508 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
else if (old)
ret = -EBUSY;
else {
- rcu_assign_pointer(queue_handler[pf], qh);
+ RCU_INIT_POINTER(queue_handler[pf], qh);
ret = 0;
}
mutex_unlock(&queue_handler_mutex);
@@ -65,7 +65,7 @@ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
return -EINVAL;
}
- rcu_assign_pointer(queue_handler[pf], NULL);
+ RCU_INIT_POINTER(queue_handler[pf], NULL);
mutex_unlock(&queue_handler_mutex);
synchronize_rcu();
@@ -84,7 +84,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
queue_handler[pf],
lockdep_is_held(&queue_handler_mutex)
) == qh)
- rcu_assign_pointer(queue_handler[pf], NULL);
+ RCU_INIT_POINTER(queue_handler[pf], NULL);
}
mutex_unlock(&queue_handler_mutex);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 1905976b513..c879c1a2370 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
nfnl_unlock();
return -EBUSY;
}
- rcu_assign_pointer(subsys_table[n->subsys_id], n);
+ RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
nfnl_unlock();
return 0;
@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
if (!nfnl)
return -ENOMEM;
net->nfnl_stash = nfnl;
- rcu_assign_pointer(net->nfnl, nfnl);
+ RCU_INIT_POINTER(net->nfnl, nfnl);
return 0;
}
@@ -219,7 +219,7 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
- rcu_assign_pointer(net->nfnl, NULL);
+ RCU_INIT_POINTER(net->nfnl, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->nfnl_stash);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index b0869fe3633..71441b934ff 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -776,12 +776,11 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
- i->jumpstack = vmalloc(size);
+ i->jumpstack = vzalloc(size);
else
- i->jumpstack = kmalloc(size, GFP_KERNEL);
+ i->jumpstack = kzalloc(size, GFP_KERNEL);
if (i->jumpstack == NULL)
return -ENOMEM;
- memset(i->jumpstack, 0, size);
i->stacksize *= xt_jumpstack_multiplier;
size = sizeof(void *) * i->stacksize;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 7d8083cde34..3f905e5370c 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
spin_lock(&netlbl_domhsh_lock);
- rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
+ RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
spin_unlock(&netlbl_domhsh_lock);
return 0;
@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
&rcu_dereference(netlbl_domhsh)->tbl[bkt]);
} else {
INIT_LIST_HEAD(&entry->list);
- rcu_assign_pointer(netlbl_domhsh_def, entry);
+ RCU_INIT_POINTER(netlbl_domhsh_def, entry);
}
if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
@@ -451,7 +451,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
if (entry != rcu_dereference(netlbl_domhsh_def))
list_del_rcu(&entry->list);
else
- rcu_assign_pointer(netlbl_domhsh_def, NULL);
+ RCU_INIT_POINTER(netlbl_domhsh_def, NULL);
} else
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e6e823656f9..e251c2c8852 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
INIT_LIST_HEAD(&iface->list);
if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
goto add_iface_failure;
- rcu_assign_pointer(netlbl_unlhsh_def, iface);
+ RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
}
spin_unlock(&netlbl_unlhsh_lock);
@@ -621,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
if (iface->ifindex > 0)
list_del_rcu(&iface->list);
else
- rcu_assign_pointer(netlbl_unlhsh_def, NULL);
+ RCU_INIT_POINTER(netlbl_unlhsh_def, NULL);
spin_unlock(&netlbl_unlhsh_lock);
call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
@@ -1449,7 +1449,7 @@ int __init netlbl_unlabel_init(u32 size)
rcu_read_lock();
spin_lock(&netlbl_unlhsh_lock);
- rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
+ RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
spin_unlock(&netlbl_unlhsh_lock);
rcu_read_unlock();
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a4db0211da..1201b6d4183 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1324,10 +1324,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
- if (NULL == siocb->scm) {
+ if (NULL == siocb->scm)
siocb->scm = &scm;
- memset(&scm, 0, sizeof(scm));
- }
+
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
@@ -1578,7 +1577,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
- old = rcu_dereference_raw(tbl->listeners);
+ old = rcu_dereference_protected(tbl->listeners, 1);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 33e095b124b..58cddadf8e8 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -13,4 +13,6 @@ menuconfig NFC
To compile this support as a module, choose M here: the module will
be called nfc.
+source "net/nfc/nci/Kconfig"
+
source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 16250c35385..fbb550f2377 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -3,5 +3,6 @@
#
obj-$(CONFIG_NFC) += nfc.o
+obj-$(CONFIG_NFC_NCI) += nci/
nfc-objs := core.o netlink.o af_nfc.o rawsock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index b6fd4e1f205..47e02c1b8c0 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -53,6 +53,80 @@ int nfc_printk(const char *level, const char *format, ...)
EXPORT_SYMBOL(nfc_printk);
/**
+ * nfc_dev_up - turn on the NFC device
+ *
+ * @dev: The nfc device to be turned on
+ *
+ * The device remains up until the nfc_dev_down function is called.
+ */
+int nfc_dev_up(struct nfc_dev *dev)
+{
+ int rc = 0;
+
+ nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (dev->dev_up) {
+ rc = -EALREADY;
+ goto error;
+ }
+
+ if (dev->ops->dev_up)
+ rc = dev->ops->dev_up(dev);
+
+ if (!rc)
+ dev->dev_up = true;
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
+/**
+ * nfc_dev_down - turn off the NFC device
+ *
+ * @dev: The nfc device to be turned off
+ */
+int nfc_dev_down(struct nfc_dev *dev)
+{
+ int rc = 0;
+
+ nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (!dev->dev_up) {
+ rc = -EALREADY;
+ goto error;
+ }
+
+ if (dev->polling || dev->remote_activated) {
+ rc = -EBUSY;
+ goto error;
+ }
+
+ if (dev->ops->dev_down)
+ dev->ops->dev_down(dev);
+
+ dev->dev_up = false;
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
+/**
* nfc_start_poll - start polling for nfc targets
*
* @dev: The nfc device that must start polling
@@ -144,6 +218,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
}
rc = dev->ops->activate_target(dev, target_idx, protocol);
+ if (!rc)
+ dev->remote_activated = true;
error:
device_unlock(&dev->dev);
@@ -170,6 +246,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
}
dev->ops->deactivate_target(dev, target_idx);
+ dev->remote_activated = false;
error:
device_unlock(&dev->dev);
@@ -322,7 +399,9 @@ struct nfc_dev *nfc_get_device(unsigned idx)
* @supported_protocols: NFC protocols supported by the device
*/
struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
- u32 supported_protocols)
+ u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom)
{
static atomic_t dev_no = ATOMIC_INIT(0);
struct nfc_dev *dev;
@@ -345,6 +424,8 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
dev->ops = ops;
dev->supported_protocols = supported_protocols;
+ dev->tx_headroom = tx_headroom;
+ dev->tx_tailroom = tx_tailroom;
spin_lock_init(&dev->targets_lock);
nfc_genl_data_init(&dev->genl_data);
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
new file mode 100644
index 00000000000..decdc49b26d
--- /dev/null
+++ b/net/nfc/nci/Kconfig
@@ -0,0 +1,10 @@
+config NFC_NCI
+ depends on NFC && EXPERIMENTAL
+ tristate "NCI protocol support (EXPERIMENTAL)"
+ default n
+ help
+ NCI (NFC Controller Interface) is a communication protocol between
+ an NFC Controller (NFCC) and a Device Host (DH).
+
+ Say Y here to compile NCI support into the kernel or say M to
+ compile it as module (nci).
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
new file mode 100644
index 00000000000..cdb3a2e4447
--- /dev/null
+++ b/net/nfc/nci/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux NFC NCI layer.
+#
+
+obj-$(CONFIG_NFC_NCI) += nci.o
+
+nci-objs := core.o data.o lib.o ntf.o rsp.o \ No newline at end of file
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
new file mode 100644
index 00000000000..4047e29acb3
--- /dev/null
+++ b/net/nfc/nci/core.c
@@ -0,0 +1,797 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+static void nci_cmd_work(struct work_struct *work);
+static void nci_rx_work(struct work_struct *work);
+static void nci_tx_work(struct work_struct *work);
+
+/* ---- NCI requests ---- */
+
+void nci_req_complete(struct nci_dev *ndev, int result)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = result;
+ ndev->req_status = NCI_REQ_DONE;
+ complete(&ndev->req_completion);
+ }
+}
+
+static void nci_req_cancel(struct nci_dev *ndev, int err)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = err;
+ ndev->req_status = NCI_REQ_CANCELED;
+ complete(&ndev->req_completion);
+ }
+}
+
+/* Execute request and wait for completion. */
+static int __nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt,
+ __u32 timeout)
+{
+ int rc = 0;
+ unsigned long completion_rc;
+
+ ndev->req_status = NCI_REQ_PEND;
+
+ init_completion(&ndev->req_completion);
+ req(ndev, opt);
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &ndev->req_completion,
+ timeout);
+
+ nfc_dbg("wait_for_completion return %ld", completion_rc);
+
+ if (completion_rc > 0) {
+ switch (ndev->req_status) {
+ case NCI_REQ_DONE:
+ rc = nci_to_errno(ndev->req_result);
+ break;
+
+ case NCI_REQ_CANCELED:
+ rc = -ndev->req_result;
+ break;
+
+ default:
+ rc = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ nfc_err("wait_for_completion_interruptible_timeout failed %ld",
+ completion_rc);
+
+ rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
+ }
+
+ ndev->req_status = ndev->req_result = 0;
+
+ return rc;
+}
+
+static inline int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
+{
+ int rc;
+
+ if (!test_bit(NCI_UP, &ndev->flags))
+ return -ENETDOWN;
+
+ /* Serialize all requests */
+ mutex_lock(&ndev->req_lock);
+ rc = __nci_request(ndev, req, opt, timeout);
+ mutex_unlock(&ndev->req_lock);
+
+ return rc;
+}
+
+static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+}
+
+static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
+}
+
+static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_core_conn_create_cmd conn_cmd;
+ struct nci_rf_disc_map_cmd cmd;
+ struct disc_map_config *cfg = cmd.mapping_configs;
+ __u8 *num = &cmd.num_mapping_configs;
+ int i;
+
+ /* create static rf connection */
+ conn_cmd.target_handle = 0;
+ conn_cmd.num_target_specific_params = 0;
+ nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
+
+ /* set rf mapping configurations */
+ *num = 0;
+
+ /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
+ for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
+ if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_ISO_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
+ (*num)++;
+ } else if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_NFC_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
+ (*num)++;
+ }
+
+ if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
+ break;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
+ (1 + ((*num)*sizeof(struct disc_map_config))),
+ &cmd);
+}
+
+static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_disc_cmd cmd;
+ __u32 protocols = opt;
+
+ cmd.num_disc_configs = 0;
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_JEWEL_MASK
+ || protocols & NFC_PROTO_MIFARE_MASK
+ || protocols & NFC_PROTO_ISO14443_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_ISO14443_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_FELICA_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
+ (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
+ &cmd);
+}
+
+static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_deactivate_cmd cmd;
+
+ cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
+
+ nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
+ sizeof(struct nci_rf_deactivate_cmd),
+ &cmd);
+}
+
+static int nci_open_device(struct nci_dev *ndev)
+{
+ int rc = 0;
+
+ mutex_lock(&ndev->req_lock);
+
+ if (test_bit(NCI_UP, &ndev->flags)) {
+ rc = -EALREADY;
+ goto done;
+ }
+
+ if (ndev->ops->open(ndev)) {
+ rc = -EIO;
+ goto done;
+ }
+
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+
+ rc = __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_complete_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ if (!rc) {
+ set_bit(NCI_UP, &ndev->flags);
+ } else {
+ /* Init failed, cleanup */
+ skb_queue_purge(&ndev->cmd_q);
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ ndev->ops->close(ndev);
+ ndev->flags = 0;
+ }
+
+done:
+ mutex_unlock(&ndev->req_lock);
+ return rc;
+}
+
+static int nci_close_device(struct nci_dev *ndev)
+{
+ nci_req_cancel(ndev, ENODEV);
+ mutex_lock(&ndev->req_lock);
+
+ if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+ del_timer_sync(&ndev->cmd_timer);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+
+ /* Drop RX and TX queues */
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ /* Flush RX and TX wq */
+ flush_workqueue(ndev->rx_wq);
+ flush_workqueue(ndev->tx_wq);
+
+ /* Reset device */
+ skb_queue_purge(&ndev->cmd_q);
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+ __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ /* Flush cmd wq */
+ flush_workqueue(ndev->cmd_wq);
+
+ /* After this point our queues are empty
+ * and no works are scheduled. */
+ ndev->ops->close(ndev);
+
+ /* Clear flags */
+ ndev->flags = 0;
+
+ mutex_unlock(&ndev->req_lock);
+
+ return 0;
+}
+
+/* NCI command timer function */
+static void nci_cmd_timer(unsigned long arg)
+{
+ struct nci_dev *ndev = (void *) arg;
+
+ nfc_dbg("entry");
+
+ atomic_set(&ndev->cmd_cnt, 1);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+}
+
+static int nci_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_open_device(ndev);
+}
+
+static int nci_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_close_device(ndev);
+}
+
+static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry");
+
+ if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to start poll, since poll is already active");
+ return -EBUSY;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is an active target");
+ return -EBUSY;
+ }
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_dbg("target is active, implicitly deactivate...");
+
+ rc = nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ if (rc)
+ return -EBUSY;
+ }
+
+ rc = nci_request(ndev, nci_rf_discover_req, protocols,
+ msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
+
+ if (!rc)
+ ndev->poll_prots = protocols;
+
+ return rc;
+}
+
+static void nci_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to stop poll, since poll is not active");
+ return;
+ }
+
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+}
+
+static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
+ __u32 protocol)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+
+ if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_err("there is no available target to activate");
+ return -EINVAL;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is already an active target");
+ return -EBUSY;
+ }
+
+ if (!(ndev->target_available_prots & (1 << protocol))) {
+ nfc_err("target does not support the requested protocol 0x%x",
+ protocol);
+ return -EINVAL;
+ }
+
+ ndev->target_active_prot = protocol;
+ ndev->target_available_prots = 0;
+
+ return 0;
+}
+
+static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d", target_idx);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to deactivate target, no active target");
+ return;
+ }
+
+ ndev->target_active_prot = 0;
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ }
+}
+
+static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to exchange data, no active target");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ return -EBUSY;
+
+ /* store cb and context to be used on receiving data */
+ ndev->data_exchange_cb = cb;
+ ndev->data_exchange_cb_context = cb_context;
+
+ rc = nci_send_data(ndev, ndev->conn_id, skb);
+ if (rc)
+ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+
+ return rc;
+}
+
+static struct nfc_ops nci_nfc_ops = {
+ .dev_up = nci_dev_up,
+ .dev_down = nci_dev_down,
+ .start_poll = nci_start_poll,
+ .stop_poll = nci_stop_poll,
+ .activate_target = nci_activate_target,
+ .deactivate_target = nci_deactivate_target,
+ .data_exchange = nci_data_exchange,
+};
+
+/* ---- Interface to NCI drivers ---- */
+
+/**
+ * nci_allocate_device - allocate a new nci device
+ *
+ * @ops: device operations
+ * @supported_protocols: NFC protocols supported by the device
+ */
+struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom)
+{
+ struct nci_dev *ndev;
+
+ nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+
+ if (!ops->open || !ops->close || !ops->send)
+ return NULL;
+
+ if (!supported_protocols)
+ return NULL;
+
+ ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
+ if (!ndev)
+ return NULL;
+
+ ndev->ops = ops;
+ ndev->tx_headroom = tx_headroom;
+ ndev->tx_tailroom = tx_tailroom;
+
+ ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
+ supported_protocols,
+ tx_headroom + NCI_DATA_HDR_SIZE,
+ tx_tailroom);
+ if (!ndev->nfc_dev)
+ goto free_exit;
+
+ nfc_set_drvdata(ndev->nfc_dev, ndev);
+
+ return ndev;
+
+free_exit:
+ kfree(ndev);
+ return NULL;
+}
+EXPORT_SYMBOL(nci_allocate_device);
+
+/**
+ * nci_free_device - deallocate nci device
+ *
+ * @ndev: The nci device to deallocate
+ */
+void nci_free_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nfc_free_device(ndev->nfc_dev);
+ kfree(ndev);
+}
+EXPORT_SYMBOL(nci_free_device);
+
+/**
+ * nci_register_device - register a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to register
+ */
+int nci_register_device(struct nci_dev *ndev)
+{
+ int rc;
+ struct device *dev = &ndev->nfc_dev->dev;
+ char name[32];
+
+ nfc_dbg("entry");
+
+ rc = nfc_register_device(ndev->nfc_dev);
+ if (rc)
+ goto exit;
+
+ ndev->flags = 0;
+
+ INIT_WORK(&ndev->cmd_work, nci_cmd_work);
+ snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
+ ndev->cmd_wq = create_singlethread_workqueue(name);
+ if (!ndev->cmd_wq) {
+ rc = -ENOMEM;
+ goto unreg_exit;
+ }
+
+ INIT_WORK(&ndev->rx_work, nci_rx_work);
+ snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
+ ndev->rx_wq = create_singlethread_workqueue(name);
+ if (!ndev->rx_wq) {
+ rc = -ENOMEM;
+ goto destroy_cmd_wq_exit;
+ }
+
+ INIT_WORK(&ndev->tx_work, nci_tx_work);
+ snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
+ ndev->tx_wq = create_singlethread_workqueue(name);
+ if (!ndev->tx_wq) {
+ rc = -ENOMEM;
+ goto destroy_rx_wq_exit;
+ }
+
+ skb_queue_head_init(&ndev->cmd_q);
+ skb_queue_head_init(&ndev->rx_q);
+ skb_queue_head_init(&ndev->tx_q);
+
+ setup_timer(&ndev->cmd_timer, nci_cmd_timer,
+ (unsigned long) ndev);
+
+ mutex_init(&ndev->req_lock);
+
+ goto exit;
+
+destroy_rx_wq_exit:
+ destroy_workqueue(ndev->rx_wq);
+
+destroy_cmd_wq_exit:
+ destroy_workqueue(ndev->cmd_wq);
+
+unreg_exit:
+ nfc_unregister_device(ndev->nfc_dev);
+
+exit:
+ return rc;
+}
+EXPORT_SYMBOL(nci_register_device);
+
+/**
+ * nci_unregister_device - unregister a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to unregister
+ */
+void nci_unregister_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nci_close_device(ndev);
+
+ destroy_workqueue(ndev->cmd_wq);
+ destroy_workqueue(ndev->rx_wq);
+ destroy_workqueue(ndev->tx_wq);
+
+ nfc_unregister_device(ndev->nfc_dev);
+}
+EXPORT_SYMBOL(nci_unregister_device);
+
+/**
+ * nci_recv_frame - receive frame from NCI drivers
+ *
+ * @skb: The sk_buff to receive
+ */
+int nci_recv_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
+ && !test_bit(NCI_INIT, &ndev->flags))) {
+ kfree_skb(skb);
+ return -ENXIO;
+ }
+
+ /* Queue frame for rx worker thread */
+ skb_queue_tail(&ndev->rx_q, skb);
+ queue_work(ndev->rx_wq, &ndev->rx_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(nci_recv_frame);
+
+static int nci_send_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ /* Get rid of skb owner, prior to sending to the driver. */
+ skb_orphan(skb);
+
+ return ndev->ops->send(skb);
+}
+
+/* Send NCI command */
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
+{
+ struct nci_ctrl_hdr *hdr;
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+
+ skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
+ if (!skb) {
+ nfc_err("no memory for command");
+ return -ENOMEM;
+ }
+
+ hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
+ hdr->gid = nci_opcode_gid(opcode);
+ hdr->oid = nci_opcode_oid(opcode);
+ hdr->plen = plen;
+
+ nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
+ nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
+
+ if (plen)
+ memcpy(skb_put(skb, plen), payload, plen);
+
+ skb->dev = (void *) ndev;
+
+ skb_queue_tail(&ndev->cmd_q, skb);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+
+ return 0;
+}
+
+/* ---- NCI TX Data worker thread ---- */
+
+static void nci_tx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+
+ /* Send queued tx data */
+ while (atomic_read(&ndev->credits_cnt)) {
+ skb = skb_dequeue(&ndev->tx_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->credits_cnt);
+
+ nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+ }
+}
+
+/* ----- NCI RX worker thread (data & control) ----- */
+
+static void nci_rx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ndev->rx_q))) {
+ /* Process frame */
+ switch (nci_mt(skb->data)) {
+ case NCI_MT_RSP_PKT:
+ nci_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_MT_NTF_PKT:
+ nci_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_MT_DATA_PKT:
+ nci_rx_data_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+/* ----- NCI TX CMD worker thread ----- */
+
+static void nci_cmd_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+
+ /* Send queued command */
+ if (atomic_read(&ndev->cmd_cnt)) {
+ skb = skb_dequeue(&ndev->cmd_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->cmd_cnt);
+
+ nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(nci_opcode(skb->data)),
+ nci_opcode_oid(nci_opcode(skb->data)),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+
+ mod_timer(&ndev->cmd_timer,
+ jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ }
+}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
new file mode 100644
index 00000000000..e5ed90fc1a9
--- /dev/null
+++ b/net/nfc/nci/data.c
@@ -0,0 +1,247 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+/* Complete data exchange transaction and forward skb to nfc core */
+void nci_data_exchange_complete(struct nci_dev *ndev,
+ struct sk_buff *skb,
+ int err)
+{
+ data_exchange_cb_t cb = ndev->data_exchange_cb;
+ void *cb_context = ndev->data_exchange_cb_context;
+
+ nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
+
+ if (cb) {
+ ndev->data_exchange_cb = NULL;
+ ndev->data_exchange_cb_context = 0;
+
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+ } else if (skb) {
+ nfc_err("no rx callback, dropping rx data...");
+
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+
+ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+}
+
+/* ----------------- NCI TX Data ----------------- */
+
+static inline void nci_push_data_hdr(struct nci_dev *ndev,
+ __u8 conn_id,
+ struct sk_buff *skb,
+ __u8 pbf)
+{
+ struct nci_data_hdr *hdr;
+ int plen = skb->len;
+
+ hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE);
+ hdr->conn_id = conn_id;
+ hdr->rfu = 0;
+ hdr->plen = plen;
+
+ nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT);
+ nci_pbf_set((__u8 *)hdr, pbf);
+
+ skb->dev = (void *) ndev;
+}
+
+static int nci_queue_tx_data_frags(struct nci_dev *ndev,
+ __u8 conn_id,
+ struct sk_buff *skb) {
+ int total_len = skb->len;
+ unsigned char *data = skb->data;
+ unsigned long flags;
+ struct sk_buff_head frags_q;
+ struct sk_buff *skb_frag;
+ int frag_len;
+ int rc = 0;
+
+ nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
+
+ __skb_queue_head_init(&frags_q);
+
+ while (total_len) {
+ frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
+
+ skb_frag = nci_skb_alloc(ndev,
+ (NCI_DATA_HDR_SIZE + frag_len),
+ GFP_KERNEL);
+ if (skb_frag == NULL) {
+ rc = -ENOMEM;
+ goto free_exit;
+ }
+ skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);
+
+ /* first, copy the data */
+ memcpy(skb_put(skb_frag, frag_len), data, frag_len);
+
+ /* second, set the header */
+ nci_push_data_hdr(ndev, conn_id, skb_frag,
+ ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT)));
+
+ __skb_queue_tail(&frags_q, skb_frag);
+
+ data += frag_len;
+ total_len -= frag_len;
+
+ nfc_dbg("frag_len %d, remaining total_len %d",
+ frag_len, total_len);
+ }
+
+ /* queue all fragments atomically */
+ spin_lock_irqsave(&ndev->tx_q.lock, flags);
+
+ while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
+ __skb_queue_tail(&ndev->tx_q, skb_frag);
+
+ spin_unlock_irqrestore(&ndev->tx_q.lock, flags);
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ goto exit;
+
+free_exit:
+ while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
+ kfree_skb(skb_frag);
+
+exit:
+ return rc;
+}
+
+/* Send NCI data */
+int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
+{
+ int rc = 0;
+
+ nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
+
+ /* check if the packet need to be fragmented */
+ if (skb->len <= ndev->max_pkt_payload_size) {
+ /* no need to fragment packet */
+ nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
+
+ skb_queue_tail(&ndev->tx_q, skb);
+ } else {
+ /* fragment packet and queue the fragments */
+ rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
+ if (rc) {
+ nfc_err("failed to fragment tx data packet");
+ goto free_exit;
+ }
+ }
+
+ queue_work(ndev->tx_wq, &ndev->tx_work);
+
+ goto exit;
+
+free_exit:
+ kfree_skb(skb);
+
+exit:
+ return rc;
+}
+
+/* ----------------- NCI RX Data ----------------- */
+
+static void nci_add_rx_data_frag(struct nci_dev *ndev,
+ struct sk_buff *skb,
+ __u8 pbf)
+{
+ int reassembly_len;
+ int err = 0;
+
+ if (ndev->rx_data_reassembly) {
+ reassembly_len = ndev->rx_data_reassembly->len;
+
+ /* first, make enough room for the already accumulated data */
+ if (skb_cow_head(skb, reassembly_len)) {
+ nfc_err("error adding room for accumulated rx data");
+
+ kfree_skb(skb);
+ skb = 0;
+
+ kfree_skb(ndev->rx_data_reassembly);
+ ndev->rx_data_reassembly = 0;
+
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* second, combine the two fragments */
+ memcpy(skb_push(skb, reassembly_len),
+ ndev->rx_data_reassembly->data,
+ reassembly_len);
+
+ /* third, free old reassembly */
+ kfree_skb(ndev->rx_data_reassembly);
+ ndev->rx_data_reassembly = 0;
+ }
+
+ if (pbf == NCI_PBF_CONT) {
+ /* need to wait for next fragment, store skb and exit */
+ ndev->rx_data_reassembly = skb;
+ return;
+ }
+
+exit:
+ nci_data_exchange_complete(ndev, skb, err);
+}
+
+/* Rx Data packet */
+void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u8 pbf = nci_pbf(skb->data);
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
+
+ /* strip the nci data header */
+ skb_pull(skb, NCI_DATA_HDR_SIZE);
+
+ if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
+ /* frame I/F => remove the status byte */
+ nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
+ skb_trim(skb, (skb->len - 1));
+ }
+
+ nci_add_rx_data_frag(ndev, skb, pbf);
+}
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
new file mode 100644
index 00000000000..b19dc2fa90e
--- /dev/null
+++ b/net/nfc/nci/lib.c
@@ -0,0 +1,94 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on lib.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <net/nfc/nci.h>
+
+/* NCI status codes to Unix errno mapping */
+int nci_to_errno(__u8 code)
+{
+ switch (code) {
+ case NCI_STATUS_OK:
+ return 0;
+
+ case NCI_STATUS_REJECTED:
+ return -EBUSY;
+
+ case NCI_STATUS_MESSAGE_CORRUPTED:
+ return -EBADMSG;
+
+ case NCI_STATUS_BUFFER_FULL:
+ return -ENOBUFS;
+
+ case NCI_STATUS_NOT_INITIALIZED:
+ return -EHOSTDOWN;
+
+ case NCI_STATUS_SYNTAX_ERROR:
+ case NCI_STATUS_SEMANTIC_ERROR:
+ case NCI_STATUS_INVALID_PARAM:
+ case NCI_STATUS_RF_PROTOCOL_ERROR:
+ case NCI_STATUS_NFCEE_PROTOCOL_ERROR:
+ return -EPROTO;
+
+ case NCI_STATUS_UNKNOWN_GID:
+ case NCI_STATUS_UNKNOWN_OID:
+ return -EBADRQC;
+
+ case NCI_STATUS_MESSAGE_SIZE_EXCEEDED:
+ return -EMSGSIZE;
+
+ case NCI_STATUS_DISCOVERY_ALREADY_STARTED:
+ return -EALREADY;
+
+ case NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED:
+ case NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED:
+ return -ECONNREFUSED;
+
+ case NCI_STATUS_RF_TRANSMISSION_ERROR:
+ case NCI_STATUS_NFCEE_TRANSMISSION_ERROR:
+ return -ECOMM;
+
+ case NCI_STATUS_RF_TIMEOUT_ERROR:
+ case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
+ return -ETIMEDOUT;
+
+ case NCI_STATUS_RF_LINK_LOSS_ERROR:
+ return -ENOLINK;
+
+ case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
+ return -EDQUOT;
+
+ case NCI_STATUS_FAILED:
+ default:
+ return -ENOSYS;
+ }
+}
+EXPORT_SYMBOL(nci_to_errno);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
new file mode 100644
index 00000000000..96633f5cda4
--- /dev/null
+++ b/net/nfc/nci/ntf.c
@@ -0,0 +1,258 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_event.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+/* Handle NCI Notification packets */
+
+static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
+ int i;
+
+ nfc_dbg("entry, num_entries %d", ntf->num_entries);
+
+ if (ntf->num_entries > NCI_MAX_NUM_CONN)
+ ntf->num_entries = NCI_MAX_NUM_CONN;
+
+ /* update the credits */
+ for (i = 0; i < ntf->num_entries; i++) {
+ nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
+ ntf->conn_entries[i].conn_id,
+ ntf->conn_entries[i].credits);
+
+ if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
+ /* found static rf connection */
+ atomic_add(ntf->conn_entries[i].credits,
+ &ndev->credits_cnt);
+ }
+ }
+
+ /* trigger the next tx */
+ if (!skb_queue_empty(&ndev->tx_q))
+ queue_work(ndev->tx_wq, &ndev->tx_work);
+}
+
+static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
+
+ nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
+}
+
+static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
+ struct nci_rf_activate_ntf *ntf, __u8 *data)
+{
+ struct rf_tech_specific_params_nfca_poll *nfca_poll;
+ struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
+
+ nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
+ nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
+
+ nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
+ data += 2;
+
+ nfca_poll->nfcid1_len = *data++;
+
+ nfc_dbg("sens_res 0x%x, nfcid1_len %d",
+ nfca_poll->sens_res,
+ nfca_poll->nfcid1_len);
+
+ memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
+ data += nfca_poll->nfcid1_len;
+
+ nfca_poll->sel_res_len = *data++;
+
+ if (nfca_poll->sel_res_len != 0)
+ nfca_poll->sel_res = *data++;
+
+ ntf->rf_interface_type = *data++;
+ ntf->activation_params_len = *data++;
+
+ nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
+ nfca_poll->sel_res_len,
+ nfca_poll->sel_res,
+ ntf->rf_interface_type,
+ ntf->activation_params_len);
+
+ switch (ntf->rf_interface_type) {
+ case NCI_RF_INTERFACE_ISO_DEP:
+ nfca_poll_iso_dep->rats_res_len = *data++;
+ if (nfca_poll_iso_dep->rats_res_len > 0) {
+ memcpy(nfca_poll_iso_dep->rats_res,
+ data,
+ nfca_poll_iso_dep->rats_res_len);
+ }
+ break;
+
+ case NCI_RF_INTERFACE_FRAME:
+ /* no activation params */
+ break;
+
+ default:
+ nfc_err("unsupported rf_interface_type 0x%x",
+ ntf->rf_interface_type);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void nci_target_found(struct nci_dev *ndev,
+ struct nci_rf_activate_ntf *ntf)
+{
+ struct nfc_target nfc_tgt;
+
+ if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */
+ nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
+ else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
+ nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
+
+ nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
+ nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
+
+ if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
+ nfc_dbg("the target found does not have the desired protocol");
+ return;
+ }
+
+ nfc_dbg("new target found, supported_protocols 0x%x",
+ nfc_tgt.supported_protocols);
+
+ ndev->target_available_prots = nfc_tgt.supported_protocols;
+
+ nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
+}
+
+static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_rf_activate_ntf ntf;
+ __u8 *data = skb->data;
+ int rc = -1;
+
+ clear_bit(NCI_DISCOVERY, &ndev->flags);
+ set_bit(NCI_POLL_ACTIVE, &ndev->flags);
+
+ ntf.target_handle = *data++;
+ ntf.rf_protocol = *data++;
+ ntf.rf_tech_and_mode = *data++;
+ ntf.rf_tech_specific_params_len = *data++;
+
+ nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
+ ntf.target_handle,
+ ntf.rf_protocol,
+ ntf.rf_tech_and_mode,
+ ntf.rf_tech_specific_params_len);
+
+ switch (ntf.rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
+ data);
+ break;
+
+ default:
+ nfc_err("unsupported rf_tech_and_mode 0x%x",
+ ntf.rf_tech_and_mode);
+ return;
+ }
+
+ if (!rc)
+ nci_target_found(ndev, &ntf);
+}
+
+static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 type = skb->data[0];
+
+ nfc_dbg("entry, type 0x%x", type);
+
+ clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
+ ndev->target_active_prot = 0;
+
+ /* drop tx data queue */
+ skb_queue_purge(&ndev->tx_q);
+
+ /* drop partial rx data packet */
+ if (ndev->rx_data_reassembly) {
+ kfree_skb(ndev->rx_data_reassembly);
+ ndev->rx_data_reassembly = 0;
+ }
+
+ /* complete the data exchange transaction, if exists */
+ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ nci_data_exchange_complete(ndev, NULL, -EIO);
+}
+
+void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u16 ntf_opcode = nci_opcode(skb->data);
+
+ nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(ntf_opcode),
+ nci_opcode_oid(ntf_opcode),
+ nci_plen(skb->data));
+
+ /* strip the nci control header */
+ skb_pull(skb, NCI_CTRL_HDR_SIZE);
+
+ switch (ntf_opcode) {
+ case NCI_OP_CORE_CONN_CREDITS_NTF:
+ nci_core_conn_credits_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_FIELD_INFO_NTF:
+ nci_rf_field_info_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_ACTIVATE_NTF:
+ nci_rf_activate_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DEACTIVATE_NTF:
+ nci_rf_deactivate_ntf_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
+ break;
+ }
+
+ kfree_skb(skb);
+}
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
new file mode 100644
index 00000000000..0403d4cd091
--- /dev/null
+++ b/net/nfc/nci/rsp.c
@@ -0,0 +1,226 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_event.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+/* Handle NCI Response packets */
+
+static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nci_core_reset_rsp *rsp = (void *) skb->data;
+
+ nfc_dbg("entry, status 0x%x", rsp->status);
+
+ if (rsp->status == NCI_STATUS_OK)
+ ndev->nci_ver = rsp->nci_ver;
+
+ nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
+
+ nci_req_complete(ndev, rsp->status);
+}
+
+static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
+ struct nci_core_init_rsp_2 *rsp_2;
+
+ nfc_dbg("entry, status 0x%x", rsp_1->status);
+
+ if (rsp_1->status != NCI_STATUS_OK)
+ return;
+
+ ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
+ ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
+
+ if (ndev->num_supported_rf_interfaces >
+ NCI_MAX_SUPPORTED_RF_INTERFACES) {
+ ndev->num_supported_rf_interfaces =
+ NCI_MAX_SUPPORTED_RF_INTERFACES;
+ }
+
+ memcpy(ndev->supported_rf_interfaces,
+ rsp_1->supported_rf_interfaces,
+ ndev->num_supported_rf_interfaces);
+
+ rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
+
+ ndev->max_logical_connections =
+ rsp_2->max_logical_connections;
+ ndev->max_routing_table_size =
+ __le16_to_cpu(rsp_2->max_routing_table_size);
+ ndev->max_control_packet_payload_length =
+ rsp_2->max_control_packet_payload_length;
+ ndev->rf_sending_buffer_size =
+ __le16_to_cpu(rsp_2->rf_sending_buffer_size);
+ ndev->rf_receiving_buffer_size =
+ __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
+ ndev->manufacturer_id =
+ __le16_to_cpu(rsp_2->manufacturer_id);
+
+ nfc_dbg("nfcc_features 0x%x",
+ ndev->nfcc_features);
+ nfc_dbg("num_supported_rf_interfaces %d",
+ ndev->num_supported_rf_interfaces);
+ nfc_dbg("supported_rf_interfaces[0] 0x%x",
+ ndev->supported_rf_interfaces[0]);
+ nfc_dbg("supported_rf_interfaces[1] 0x%x",
+ ndev->supported_rf_interfaces[1]);
+ nfc_dbg("supported_rf_interfaces[2] 0x%x",
+ ndev->supported_rf_interfaces[2]);
+ nfc_dbg("supported_rf_interfaces[3] 0x%x",
+ ndev->supported_rf_interfaces[3]);
+ nfc_dbg("max_logical_connections %d",
+ ndev->max_logical_connections);
+ nfc_dbg("max_routing_table_size %d",
+ ndev->max_routing_table_size);
+ nfc_dbg("max_control_packet_payload_length %d",
+ ndev->max_control_packet_payload_length);
+ nfc_dbg("rf_sending_buffer_size %d",
+ ndev->rf_sending_buffer_size);
+ nfc_dbg("rf_receiving_buffer_size %d",
+ ndev->rf_receiving_buffer_size);
+ nfc_dbg("manufacturer_id 0x%x",
+ ndev->manufacturer_id);
+
+ nci_req_complete(ndev, rsp_1->status);
+}
+
+static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
+
+ nfc_dbg("entry, status 0x%x", rsp->status);
+
+ if (rsp->status != NCI_STATUS_OK)
+ return;
+
+ ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
+ ndev->initial_num_credits = rsp->initial_num_credits;
+ ndev->conn_id = rsp->conn_id;
+
+ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+
+ nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
+ nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
+ nfc_dbg("conn_id %d", ndev->conn_id);
+}
+
+static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nfc_dbg("entry, status 0x%x", status);
+
+ nci_req_complete(ndev, status);
+}
+
+static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nfc_dbg("entry, status 0x%x", status);
+
+ if (status == NCI_STATUS_OK)
+ set_bit(NCI_DISCOVERY, &ndev->flags);
+
+ nci_req_complete(ndev, status);
+}
+
+static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nfc_dbg("entry, status 0x%x", status);
+
+ clear_bit(NCI_DISCOVERY, &ndev->flags);
+
+ nci_req_complete(ndev, status);
+}
+
+void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u16 rsp_opcode = nci_opcode(skb->data);
+
+ /* we got a rsp, stop the cmd timer */
+ del_timer(&ndev->cmd_timer);
+
+ nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(rsp_opcode),
+ nci_opcode_oid(rsp_opcode),
+ nci_plen(skb->data));
+
+ /* strip the nci control header */
+ skb_pull(skb, NCI_CTRL_HDR_SIZE);
+
+ switch (rsp_opcode) {
+ case NCI_OP_CORE_RESET_RSP:
+ nci_core_reset_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_CORE_INIT_RSP:
+ nci_core_init_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_CORE_CONN_CREATE_RSP:
+ nci_core_conn_create_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DISCOVER_MAP_RSP:
+ nci_rf_disc_map_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DISCOVER_RSP:
+ nci_rf_disc_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DEACTIVATE_RSP:
+ nci_rf_deactivate_rsp_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
+ break;
+ }
+
+ kfree_skb(skb);
+
+ /* trigger the next cmd */
+ atomic_set(&ndev->cmd_cnt, 1);
+ if (!skb_queue_empty(&ndev->cmd_q))
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ccdff7953f7..03f8818e1f1 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -367,6 +367,52 @@ out_putdev:
return rc;
}
+static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ int rc;
+ u32 idx;
+
+ nfc_dbg("entry");
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+ dev = nfc_get_device(idx);
+ if (!dev)
+ return -ENODEV;
+
+ rc = nfc_dev_up(dev);
+
+ nfc_put_device(dev);
+ return rc;
+}
+
+static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ int rc;
+ u32 idx;
+
+ nfc_dbg("entry");
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+ dev = nfc_get_device(idx);
+ if (!dev)
+ return -ENODEV;
+
+ rc = nfc_dev_down(dev);
+
+ nfc_put_device(dev);
+ return rc;
+}
+
static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
@@ -441,6 +487,16 @@ static struct genl_ops nfc_genl_ops[] = {
.policy = nfc_genl_policy,
},
{
+ .cmd = NFC_CMD_DEV_UP,
+ .doit = nfc_genl_dev_up,
+ .policy = nfc_genl_policy,
+ },
+ {
+ .cmd = NFC_CMD_DEV_DOWN,
+ .doit = nfc_genl_dev_down,
+ .policy = nfc_genl_policy,
+ },
+ {
.cmd = NFC_CMD_START_POLL,
.doit = nfc_genl_start_poll,
.policy = nfc_genl_policy,
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index aaf9832298f..b6753f45624 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -24,7 +24,7 @@
#ifndef __LOCAL_NFC_H
#define __LOCAL_NFC_H
-#include <net/nfc.h>
+#include <net/nfc/nfc.h>
#include <net/sock.h>
__attribute__((format (printf, 2, 3)))
@@ -101,6 +101,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
class_dev_iter_exit(iter);
}
+int nfc_dev_up(struct nfc_dev *dev);
+
+int nfc_dev_down(struct nfc_dev *dev);
+
int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
int nfc_stop_poll(struct nfc_dev *dev);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 52de84a5511..9fd652a5142 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -123,11 +123,7 @@ error:
static int rawsock_add_header(struct sk_buff *skb)
{
-
- if (skb_cow_head(skb, 1))
- return -ENOMEM;
-
- *skb_push(skb, 1) = 0;
+ *skb_push(skb, NFC_HEADER_SIZE) = 0;
return 0;
}
@@ -197,6 +193,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
+ struct nfc_dev *dev = nfc_rawsock(sk)->dev;
struct sk_buff *skb;
int rc;
@@ -208,11 +205,13 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
- skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT,
- &rc);
+ skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
+ msg->msg_flags & MSG_DONTWAIT, &rc);
if (!skb)
return rc;
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fabb4fafa28..03bb45adf2f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -40,6 +40,10 @@
* byte arrays at the end of sockaddr_ll
* and packet_mreq.
* Johann Baudy : Added TX RING.
+ * Chetan Loke : Implemented TPACKET_V3 block abstraction
+ * layer.
+ * Copyright (C) 2011, <lokec@ccs.neu.edu>
+ *
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -161,9 +165,56 @@ struct packet_mreq_max {
unsigned char mr_address[MAX_ADDR_LEN];
};
-static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
+static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring);
+
+#define V3_ALIGNMENT (8)
+
+#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
+
+#define BLK_PLUS_PRIV(sz_of_priv) \
+ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
+
+/* kbdq - kernel block descriptor queue */
+struct tpacket_kbdq_core {
+ struct pgv *pkbdq;
+ unsigned int feature_req_word;
+ unsigned int hdrlen;
+ unsigned char reset_pending_on_curr_blk;
+ unsigned char delete_blk_timer;
+ unsigned short kactive_blk_num;
+ unsigned short blk_sizeof_priv;
+
+ /* last_kactive_blk_num:
+ * trick to see if user-space has caught up
+ * in order to avoid refreshing timer when every single pkt arrives.
+ */
+ unsigned short last_kactive_blk_num;
+
+ char *pkblk_start;
+ char *pkblk_end;
+ int kblk_size;
+ unsigned int knum_blocks;
+ uint64_t knxt_seq_num;
+ char *prev;
+ char *nxt_offset;
+ struct sk_buff *skb;
+
+ atomic_t blk_fill_in_prog;
+
+ /* Default is set to 8ms */
+#define DEFAULT_PRB_RETIRE_TOV (8)
+
+ unsigned short retire_blk_tov;
+ unsigned short version;
+ unsigned long tov_in_jiffies;
+
+ /* timer to retire an outstanding block */
+ struct timer_list retire_blk_timer;
+};
+
+#define PGV_FROM_VMALLOC 1
struct pgv {
char *buffer;
};
@@ -179,12 +230,44 @@ struct packet_ring_buffer {
unsigned int pg_vec_pages;
unsigned int pg_vec_len;
+ struct tpacket_kbdq_core prb_bdqc;
atomic_t pending;
};
+#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
+#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
+#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
+#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
+#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
+#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
+#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
+
struct packet_sock;
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
+static void *packet_previous_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status);
+static void packet_increment_head(struct packet_ring_buffer *buff);
+static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
+ struct tpacket_block_desc *);
+static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
+ struct packet_sock *);
+static void prb_retire_current_block(struct tpacket_kbdq_core *,
+ struct packet_sock *, unsigned int status);
+static int prb_queue_frozen(struct tpacket_kbdq_core *);
+static void prb_open_block(struct tpacket_kbdq_core *,
+ struct tpacket_block_desc *);
+static void prb_retire_rx_blk_timer_expired(unsigned long);
+static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
+static void prb_init_blk_timer(struct packet_sock *,
+ struct tpacket_kbdq_core *,
+ void (*func) (unsigned long));
+static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
+static void prb_clear_rxhash(struct tpacket_kbdq_core *,
+ struct tpacket3_hdr *);
+static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
+ struct tpacket3_hdr *);
static void packet_flush_mclist(struct sock *sk);
struct packet_fanout;
@@ -193,6 +276,7 @@ struct packet_sock {
struct sock sk;
struct packet_fanout *fanout;
struct tpacket_stats stats;
+ union tpacket_stats_u stats_u;
struct packet_ring_buffer rx_ring;
struct packet_ring_buffer tx_ring;
int copy_thresh;
@@ -242,6 +326,15 @@ struct packet_skb_cb {
#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
+#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
+#define GET_PBLOCK_DESC(x, bid) \
+ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
+#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
+ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
+#define GET_NEXT_PRB_BLK_NUM(x) \
+ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
+ ((x)->kactive_blk_num+1) : 0)
+
static inline struct packet_sock *pkt_sk(struct sock *sk)
{
return (struct packet_sock *)sk;
@@ -325,8 +418,9 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
h.h2->tp_status = status;
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
break;
+ case TPACKET_V3:
default:
- pr_err("TPACKET version not supported\n");
+ WARN(1, "TPACKET version not supported.\n");
BUG();
}
@@ -351,8 +445,9 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
return h.h2->tp_status;
+ case TPACKET_V3:
default:
- pr_err("TPACKET version not supported\n");
+ WARN(1, "TPACKET version not supported.\n");
BUG();
return 0;
}
@@ -389,6 +484,670 @@ static inline void *packet_current_frame(struct packet_sock *po,
return packet_lookup_frame(po, rb, rb->head, status);
}
+static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
+{
+ del_timer_sync(&pkc->retire_blk_timer);
+}
+
+static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
+ int tx_ring,
+ struct sk_buff_head *rb_queue)
+{
+ struct tpacket_kbdq_core *pkc;
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+
+ spin_lock(&rb_queue->lock);
+ pkc->delete_blk_timer = 1;
+ spin_unlock(&rb_queue->lock);
+
+ prb_del_retire_blk_timer(pkc);
+}
+
+static void prb_init_blk_timer(struct packet_sock *po,
+ struct tpacket_kbdq_core *pkc,
+ void (*func) (unsigned long))
+{
+ init_timer(&pkc->retire_blk_timer);
+ pkc->retire_blk_timer.data = (long)po;
+ pkc->retire_blk_timer.function = func;
+ pkc->retire_blk_timer.expires = jiffies;
+}
+
+static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
+{
+ struct tpacket_kbdq_core *pkc;
+
+ if (tx_ring)
+ BUG();
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+ prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
+}
+
+static int prb_calc_retire_blk_tmo(struct packet_sock *po,
+ int blk_size_in_bytes)
+{
+ struct net_device *dev;
+ unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
+ struct ethtool_cmd ecmd;
+ int err;
+
+ rtnl_lock();
+ dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
+ if (unlikely(!dev)) {
+ rtnl_unlock();
+ return DEFAULT_PRB_RETIRE_TOV;
+ }
+ err = __ethtool_get_settings(dev, &ecmd);
+ rtnl_unlock();
+ if (!err) {
+ switch (ecmd.speed) {
+ case SPEED_10000:
+ msec = 1;
+ div = 10000/1000;
+ break;
+ case SPEED_1000:
+ msec = 1;
+ div = 1000/1000;
+ break;
+ /*
+ * If the link speed is so slow you don't really
+ * need to worry about perf anyways
+ */
+ case SPEED_100:
+ case SPEED_10:
+ default:
+ return DEFAULT_PRB_RETIRE_TOV;
+ }
+ }
+
+ mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
+
+ if (div)
+ mbits /= div;
+
+ tmo = mbits * msec;
+
+ if (div)
+ return tmo+1;
+ return tmo;
+}
+
+static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
+ union tpacket_req_u *req_u)
+{
+ p1->feature_req_word = req_u->req3.tp_feature_req_word;
+}
+
+static void init_prb_bdqc(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ struct pgv *pg_vec,
+ union tpacket_req_u *req_u, int tx_ring)
+{
+ struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
+ struct tpacket_block_desc *pbd;
+
+ memset(p1, 0x0, sizeof(*p1));
+
+ p1->knxt_seq_num = 1;
+ p1->pkbdq = pg_vec;
+ pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
+ p1->pkblk_start = (char *)pg_vec[0].buffer;
+ p1->kblk_size = req_u->req3.tp_block_size;
+ p1->knum_blocks = req_u->req3.tp_block_nr;
+ p1->hdrlen = po->tp_hdrlen;
+ p1->version = po->tp_version;
+ p1->last_kactive_blk_num = 0;
+ po->stats_u.stats3.tp_freeze_q_cnt = 0;
+ if (req_u->req3.tp_retire_blk_tov)
+ p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
+ else
+ p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
+ req_u->req3.tp_block_size);
+ p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
+ p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+
+ prb_init_ft_ops(p1, req_u);
+ prb_setup_retire_blk_timer(po, tx_ring);
+ prb_open_block(p1, pbd);
+}
+
+/* Do NOT update the last_blk_num first.
+ * Assumes sk_buff_head lock is held.
+ */
+static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
+{
+ mod_timer(&pkc->retire_blk_timer,
+ jiffies + pkc->tov_in_jiffies);
+ pkc->last_kactive_blk_num = pkc->kactive_blk_num;
+}
+
+/*
+ * Timer logic:
+ * 1) We refresh the timer only when we open a block.
+ * By doing this we don't waste cycles refreshing the timer
+ * on packet-by-packet basis.
+ *
+ * With a 1MB block-size, on a 1Gbps line, it will take
+ * i) ~8 ms to fill a block + ii) memcpy etc.
+ * In this cut we are not accounting for the memcpy time.
+ *
+ * So, if the user sets the 'tmo' to 10ms then the timer
+ * will never fire while the block is still getting filled
+ * (which is what we want). However, the user could choose
+ * to close a block early and that's fine.
+ *
+ * But when the timer does fire, we check whether or not to refresh it.
+ * Since the tmo granularity is in msecs, it is not too expensive
+ * to refresh the timer, lets say every '8' msecs.
+ * Either the user can set the 'tmo' or we can derive it based on
+ * a) line-speed and b) block-size.
+ * prb_calc_retire_blk_tmo() calculates the tmo.
+ *
+ */
+static void prb_retire_rx_blk_timer_expired(unsigned long data)
+{
+ struct packet_sock *po = (struct packet_sock *)data;
+ struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
+ unsigned int frozen;
+ struct tpacket_block_desc *pbd;
+
+ spin_lock(&po->sk.sk_receive_queue.lock);
+
+ frozen = prb_queue_frozen(pkc);
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ if (unlikely(pkc->delete_blk_timer))
+ goto out;
+
+ /* We only need to plug the race when the block is partially filled.
+ * tpacket_rcv:
+ * lock(); increment BLOCK_NUM_PKTS; unlock()
+ * copy_bits() is in progress ...
+ * timer fires on other cpu:
+ * we can't retire the current block because copy_bits
+ * is in progress.
+ *
+ */
+ if (BLOCK_NUM_PKTS(pbd)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+ cpu_relax();
+ }
+ }
+
+ if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
+ if (!frozen) {
+ prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
+ if (!prb_dispatch_next_block(pkc, po))
+ goto refresh_timer;
+ else
+ goto out;
+ } else {
+ /* Case 1. Queue was frozen because user-space was
+ * lagging behind.
+ */
+ if (prb_curr_blk_in_use(pkc, pbd)) {
+ /*
+ * Ok, user-space is still behind.
+ * So just refresh the timer.
+ */
+ goto refresh_timer;
+ } else {
+ /* Case 2. queue was frozen,user-space caught up,
+ * now the link went idle && the timer fired.
+ * We don't have a block to close.So we open this
+ * block and restart the timer.
+ * opening a block thaws the queue,restarts timer
+ * Thawing/timer-refresh is a side effect.
+ */
+ prb_open_block(pkc, pbd);
+ goto out;
+ }
+ }
+ }
+
+refresh_timer:
+ _prb_refresh_rx_retire_blk_timer(pkc);
+
+out:
+ spin_unlock(&po->sk.sk_receive_queue.lock);
+}
+
+static inline void prb_flush_block(struct tpacket_kbdq_core *pkc1,
+ struct tpacket_block_desc *pbd1, __u32 status)
+{
+ /* Flush everything minus the block header */
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ u8 *start, *end;
+
+ start = (u8 *)pbd1;
+
+ /* Skip the block header(we know header WILL fit in 4K) */
+ start += PAGE_SIZE;
+
+ end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
+ for (; start < end; start += PAGE_SIZE)
+ flush_dcache_page(pgv_to_page(start));
+
+ smp_wmb();
+#endif
+
+ /* Now update the block status. */
+
+ BLOCK_STATUS(pbd1) = status;
+
+ /* Flush the block header */
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ start = (u8 *)pbd1;
+ flush_dcache_page(pgv_to_page(start));
+
+ smp_wmb();
+#endif
+}
+
+/*
+ * Side effect:
+ *
+ * 1) flush the block
+ * 2) Increment active_blk_num
+ *
+ * Note:We DONT refresh the timer on purpose.
+ * Because almost always the next block will be opened.
+ */
+static void prb_close_block(struct tpacket_kbdq_core *pkc1,
+ struct tpacket_block_desc *pbd1,
+ struct packet_sock *po, unsigned int stat)
+{
+ __u32 status = TP_STATUS_USER | stat;
+
+ struct tpacket3_hdr *last_pkt;
+ struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+
+ if (po->stats.tp_drops)
+ status |= TP_STATUS_LOSING;
+
+ last_pkt = (struct tpacket3_hdr *)pkc1->prev;
+ last_pkt->tp_next_offset = 0;
+
+ /* Get the ts of the last pkt */
+ if (BLOCK_NUM_PKTS(pbd1)) {
+ h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
+ h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
+ } else {
+ /* Ok, we tmo'd - so get the current time */
+ struct timespec ts;
+ getnstimeofday(&ts);
+ h1->ts_last_pkt.ts_sec = ts.tv_sec;
+ h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
+ }
+
+ smp_wmb();
+
+ /* Flush the block */
+ prb_flush_block(pkc1, pbd1, status);
+
+ pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
+}
+
+static inline void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
+{
+ pkc->reset_pending_on_curr_blk = 0;
+}
+
+/*
+ * Side effect of opening a block:
+ *
+ * 1) prb_queue is thawed.
+ * 2) retire_blk_timer is refreshed.
+ *
+ */
+static void prb_open_block(struct tpacket_kbdq_core *pkc1,
+ struct tpacket_block_desc *pbd1)
+{
+ struct timespec ts;
+ struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+
+ smp_rmb();
+
+ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
+
+ /* We could have just memset this but we will lose the
+ * flexibility of making the priv area sticky
+ */
+ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
+ BLOCK_NUM_PKTS(pbd1) = 0;
+ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+ getnstimeofday(&ts);
+ h1->ts_first_pkt.ts_sec = ts.tv_sec;
+ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
+ pkc1->pkblk_start = (char *)pbd1;
+ pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
+ BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
+ pbd1->version = pkc1->version;
+ pkc1->prev = pkc1->nxt_offset;
+ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
+ prb_thaw_queue(pkc1);
+ _prb_refresh_rx_retire_blk_timer(pkc1);
+
+ smp_wmb();
+
+ return;
+ }
+
+ WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
+ pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
+ dump_stack();
+ BUG();
+}
+
+/*
+ * Queue freeze logic:
+ * 1) Assume tp_block_nr = 8 blocks.
+ * 2) At time 't0', user opens Rx ring.
+ * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
+ * 4) user-space is either sleeping or processing block '0'.
+ * 5) tpacket_rcv is currently filling block '7', since there is no space left,
+ * it will close block-7,loop around and try to fill block '0'.
+ * call-flow:
+ * __packet_lookup_frame_in_block
+ * prb_retire_current_block()
+ * prb_dispatch_next_block()
+ * |->(BLOCK_STATUS == USER) evaluates to true
+ * 5.1) Since block-0 is currently in-use, we just freeze the queue.
+ * 6) Now there are two cases:
+ * 6.1) Link goes idle right after the queue is frozen.
+ * But remember, the last open_block() refreshed the timer.
+ * When this timer expires,it will refresh itself so that we can
+ * re-open block-0 in near future.
+ * 6.2) Link is busy and keeps on receiving packets. This is a simple
+ * case and __packet_lookup_frame_in_block will check if block-0
+ * is free and can now be re-used.
+ */
+static inline void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
+ struct packet_sock *po)
+{
+ pkc->reset_pending_on_curr_blk = 1;
+ po->stats_u.stats3.tp_freeze_q_cnt++;
+}
+
+#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
+
+/*
+ * If the next block is free then we will dispatch it
+ * and return a good offset.
+ * Else, we will freeze the queue.
+ * So, caller must check the return value.
+ */
+static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
+ struct packet_sock *po)
+{
+ struct tpacket_block_desc *pbd;
+
+ smp_rmb();
+
+ /* 1. Get current block num */
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ /* 2. If this block is currently in_use then freeze the queue */
+ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
+ prb_freeze_queue(pkc, po);
+ return NULL;
+ }
+
+ /*
+ * 3.
+ * open this block and return the offset where the first packet
+ * needs to get stored.
+ */
+ prb_open_block(pkc, pbd);
+ return (void *)pkc->nxt_offset;
+}
+
+static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ struct packet_sock *po, unsigned int status)
+{
+ struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ /* retire/close the current block */
+ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
+ /*
+ * Plug the case where copy_bits() is in progress on
+ * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
+ * have space to copy the pkt in the current block and
+ * called prb_retire_current_block()
+ *
+ * We don't need to worry about the TMO case because
+ * the timer-handler already handled this case.
+ */
+ if (!(status & TP_STATUS_BLK_TMO)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+ cpu_relax();
+ }
+ }
+ prb_close_block(pkc, pbd, po, status);
+ return;
+ }
+
+ WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
+ dump_stack();
+ BUG();
+}
+
+static inline int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
+ struct tpacket_block_desc *pbd)
+{
+ return TP_STATUS_USER & BLOCK_STATUS(pbd);
+}
+
+static inline int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
+{
+ return pkc->reset_pending_on_curr_blk;
+}
+
+static inline void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
+{
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
+ atomic_dec(&pkc->blk_fill_in_prog);
+}
+
+static inline void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
+}
+
+static inline void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ ppd->hv1.tp_rxhash = 0;
+}
+
+static inline void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ if (vlan_tx_tag_present(pkc->skb)) {
+ ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
+ ppd->tp_status = TP_STATUS_VLAN_VALID;
+ } else {
+ ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
+ }
+}
+
+static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ prb_fill_vlan_info(pkc, ppd);
+
+ if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
+ prb_fill_rxhash(pkc, ppd);
+ else
+ prb_clear_rxhash(pkc, ppd);
+}
+
+static inline void prb_fill_curr_block(char *curr,
+ struct tpacket_kbdq_core *pkc,
+ struct tpacket_block_desc *pbd,
+ unsigned int len)
+{
+ struct tpacket3_hdr *ppd;
+
+ ppd = (struct tpacket3_hdr *)curr;
+ ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
+ pkc->prev = curr;
+ pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
+ BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
+ BLOCK_NUM_PKTS(pbd) += 1;
+ atomic_inc(&pkc->blk_fill_in_prog);
+ prb_run_all_ft_ops(pkc, ppd);
+}
+
+/* Assumes caller has the sk->rx_queue.lock */
+static void *__packet_lookup_frame_in_block(struct packet_sock *po,
+ struct sk_buff *skb,
+ int status,
+ unsigned int len
+ )
+{
+ struct tpacket_kbdq_core *pkc;
+ struct tpacket_block_desc *pbd;
+ char *curr, *end;
+
+ pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ /* Queue is frozen when user space is lagging behind */
+ if (prb_queue_frozen(pkc)) {
+ /*
+ * Check if that last block which caused the queue to freeze,
+ * is still in_use by user-space.
+ */
+ if (prb_curr_blk_in_use(pkc, pbd)) {
+ /* Can't record this packet */
+ return NULL;
+ } else {
+ /*
+ * Ok, the block was released by user-space.
+ * Now let's open that block.
+ * opening a block also thaws the queue.
+ * Thawing is a side effect.
+ */
+ prb_open_block(pkc, pbd);
+ }
+ }
+
+ smp_mb();
+ curr = pkc->nxt_offset;
+ pkc->skb = skb;
+ end = (char *) ((char *)pbd + pkc->kblk_size);
+
+ /* first try the current block */
+ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
+ prb_fill_curr_block(curr, pkc, pbd, len);
+ return (void *)curr;
+ }
+
+ /* Ok, close the current block */
+ prb_retire_current_block(pkc, po, 0);
+
+ /* Now, try to dispatch the next block */
+ curr = (char *)prb_dispatch_next_block(pkc, po);
+ if (curr) {
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+ prb_fill_curr_block(curr, pkc, pbd, len);
+ return (void *)curr;
+ }
+
+ /*
+ * No free blocks are available.user_space hasn't caught up yet.
+ * Queue was just frozen and now this packet will get dropped.
+ */
+ return NULL;
+}
+
+static inline void *packet_current_rx_frame(struct packet_sock *po,
+ struct sk_buff *skb,
+ int status, unsigned int len)
+{
+ char *curr = NULL;
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ curr = packet_lookup_frame(po, &po->rx_ring,
+ po->rx_ring.head, status);
+ return curr;
+ case TPACKET_V3:
+ return __packet_lookup_frame_in_block(po, skb, status, len);
+ default:
+ WARN(1, "TPACKET version not supported\n");
+ BUG();
+ return 0;
+ }
+}
+
+static inline void *prb_lookup_block(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ unsigned int previous,
+ int status)
+{
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
+ struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
+
+ if (status != BLOCK_STATUS(pbd))
+ return NULL;
+ return pbd;
+}
+
+static inline int prb_previous_blk_num(struct packet_ring_buffer *rb)
+{
+ unsigned int prev;
+ if (rb->prb_bdqc.kactive_blk_num)
+ prev = rb->prb_bdqc.kactive_blk_num-1;
+ else
+ prev = rb->prb_bdqc.knum_blocks-1;
+ return prev;
+}
+
+/* Assumes caller has held the rx_queue.lock */
+static inline void *__prb_previous_block(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status)
+{
+ unsigned int previous = prb_previous_blk_num(rb);
+ return prb_lookup_block(po, rb, previous, status);
+}
+
+static inline void *packet_previous_rx_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status)
+{
+ if (po->tp_version <= TPACKET_V2)
+ return packet_previous_frame(po, rb, status);
+
+ return __prb_previous_block(po, rb, status);
+}
+
+static inline void packet_increment_rx_head(struct packet_sock *po,
+ struct packet_ring_buffer *rb)
+{
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ return packet_increment_head(rb);
+ case TPACKET_V3:
+ default:
+ WARN(1, "TPACKET version not supported.\n");
+ BUG();
+ return;
+ }
+}
+
static inline void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
@@ -454,43 +1213,6 @@ static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *sk
return f->arr[cpu % num];
}
-static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- const struct iphdr *iph;
- u32 len;
-
- if (skb->protocol != htons(ETH_P_IP))
- return skb;
-
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return skb;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- return skb;
- if (!pskb_may_pull(skb, iph->ihl*4))
- return skb;
- iph = ip_hdr(skb);
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < (iph->ihl * 4))
- return skb;
-
- if (ip_is_fragment(ip_hdr(skb))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb) {
- if (pskb_trim_rcsum(skb, len))
- return skb;
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
- if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
- return NULL;
- skb->rxhash = 0;
- }
- }
-#endif
- return skb;
-}
-
static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -509,7 +1231,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
case PACKET_FANOUT_HASH:
default:
if (f->defrag) {
- skb = fanout_check_defrag(skb);
+ skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
if (!skb)
return 0;
}
@@ -985,12 +1707,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
union {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
+ struct tpacket3_hdr *h3;
void *raw;
} h;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
- unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
+ unsigned long status = TP_STATUS_USER;
unsigned short macoff, netoff, hdrlen;
struct sk_buff *copy_skb = NULL;
struct timeval tv;
@@ -1036,37 +1759,46 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
po->tp_reserve;
macoff = netoff - maclen;
}
-
- if (macoff + snaplen > po->rx_ring.frame_size) {
- if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
- (unsigned)sk->sk_rcvbuf) {
- if (skb_shared(skb)) {
- copy_skb = skb_clone(skb, GFP_ATOMIC);
- } else {
- copy_skb = skb_get(skb);
- skb_head = skb->data;
+ if (po->tp_version <= TPACKET_V2) {
+ if (macoff + snaplen > po->rx_ring.frame_size) {
+ if (po->copy_thresh &&
+ atomic_read(&sk->sk_rmem_alloc) + skb->truesize
+ < (unsigned)sk->sk_rcvbuf) {
+ if (skb_shared(skb)) {
+ copy_skb = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ copy_skb = skb_get(skb);
+ skb_head = skb->data;
+ }
+ if (copy_skb)
+ skb_set_owner_r(copy_skb, sk);
}
- if (copy_skb)
- skb_set_owner_r(copy_skb, sk);
+ snaplen = po->rx_ring.frame_size - macoff;
+ if ((int)snaplen < 0)
+ snaplen = 0;
}
- snaplen = po->rx_ring.frame_size - macoff;
- if ((int)snaplen < 0)
- snaplen = 0;
}
-
spin_lock(&sk->sk_receive_queue.lock);
- h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
+ h.raw = packet_current_rx_frame(po, skb,
+ TP_STATUS_KERNEL, (macoff+snaplen));
if (!h.raw)
goto ring_is_full;
- packet_increment_head(&po->rx_ring);
+ if (po->tp_version <= TPACKET_V2) {
+ packet_increment_rx_head(po, &po->rx_ring);
+ /*
+ * LOSING will be reported till you read the stats,
+ * because it's COR - Clear On Read.
+ * Anyways, moving it for V1/V2 only as V3 doesn't need this
+ * at packet level.
+ */
+ if (po->stats.tp_drops)
+ status |= TP_STATUS_LOSING;
+ }
po->stats.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
- if (!po->stats.tp_drops)
- status &= ~TP_STATUS_LOSING;
spin_unlock(&sk->sk_receive_queue.lock);
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
@@ -1117,6 +1849,29 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_padding = 0;
hdrlen = sizeof(*h.h2);
break;
+ case TPACKET_V3:
+ /* tp_nxt_offset,vlan are already populated above.
+ * So DONT clear those fields here
+ */
+ h.h3->tp_status |= status;
+ h.h3->tp_len = skb->len;
+ h.h3->tp_snaplen = snaplen;
+ h.h3->tp_mac = macoff;
+ h.h3->tp_net = netoff;
+ if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
+ && shhwtstamps->syststamp.tv64)
+ ts = ktime_to_timespec(shhwtstamps->syststamp);
+ else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
+ && shhwtstamps->hwtstamp.tv64)
+ ts = ktime_to_timespec(shhwtstamps->hwtstamp);
+ else if (skb->tstamp.tv64)
+ ts = ktime_to_timespec(skb->tstamp);
+ else
+ getnstimeofday(&ts);
+ h.h3->tp_sec = ts.tv_sec;
+ h.h3->tp_nsec = ts.tv_nsec;
+ hdrlen = sizeof(*h.h3);
+ break;
default:
BUG();
}
@@ -1137,13 +1892,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
{
u8 *start, *end;
- end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
- for (start = h.raw; start < end; start += PAGE_SIZE)
- flush_dcache_page(pgv_to_page(start));
+ if (po->tp_version <= TPACKET_V2) {
+ end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
+ + macoff + snaplen);
+ for (start = h.raw; start < end; start += PAGE_SIZE)
+ flush_dcache_page(pgv_to_page(start));
+ }
smp_wmb();
}
#endif
- __packet_set_status(po, h.raw, status);
+ if (po->tp_version <= TPACKET_V2)
+ __packet_set_status(po, h.raw, status);
+ else
+ prb_clear_blk_fill_status(&po->rx_ring);
sk->sk_data_ready(sk, 0);
@@ -1170,8 +1931,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
struct packet_sock *po = pkt_sk(skb->sk);
void *ph;
- BUG_ON(skb == NULL);
-
if (likely(po->tx_ring.pg_vec)) {
ph = skb_shinfo(skb)->destructor_arg;
BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
@@ -1634,7 +2393,7 @@ static int packet_release(struct socket *sock)
struct sock *sk = sock->sk;
struct packet_sock *po;
struct net *net;
- struct tpacket_req req;
+ union tpacket_req_u req_u;
if (!sk)
return 0;
@@ -1657,13 +2416,13 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
- memset(&req, 0, sizeof(req));
+ memset(&req_u, 0, sizeof(req_u));
if (po->rx_ring.pg_vec)
- packet_set_ring(sk, &req, 1, 0);
+ packet_set_ring(sk, &req_u, 1, 0);
if (po->tx_ring.pg_vec)
- packet_set_ring(sk, &req, 1, 1);
+ packet_set_ring(sk, &req_u, 1, 1);
fanout_release(sk);
@@ -2283,15 +3042,27 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
case PACKET_RX_RING:
case PACKET_TX_RING:
{
- struct tpacket_req req;
+ union tpacket_req_u req_u;
+ int len;
- if (optlen < sizeof(req))
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ len = sizeof(req_u.req);
+ break;
+ case TPACKET_V3:
+ default:
+ len = sizeof(req_u.req3);
+ break;
+ }
+ if (optlen < len)
return -EINVAL;
if (pkt_sk(sk)->has_vnet_hdr)
return -EINVAL;
- if (copy_from_user(&req, optval, sizeof(req)))
+ if (copy_from_user(&req_u.req, optval, len))
return -EFAULT;
- return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
+ return packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
}
case PACKET_COPY_THRESH:
{
@@ -2318,6 +3089,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
+ case TPACKET_V3:
po->tp_version = val;
return 0;
default:
@@ -2427,6 +3199,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
struct packet_sock *po = pkt_sk(sk);
void *data;
struct tpacket_stats st;
+ union tpacket_stats_u st_u;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -2439,15 +3212,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case PACKET_STATISTICS:
- if (len > sizeof(struct tpacket_stats))
- len = sizeof(struct tpacket_stats);
+ if (po->tp_version == TPACKET_V3) {
+ len = sizeof(struct tpacket_stats_v3);
+ } else {
+ if (len > sizeof(struct tpacket_stats))
+ len = sizeof(struct tpacket_stats);
+ }
spin_lock_bh(&sk->sk_receive_queue.lock);
- st = po->stats;
+ if (po->tp_version == TPACKET_V3) {
+ memcpy(&st_u.stats3, &po->stats,
+ sizeof(struct tpacket_stats));
+ st_u.stats3.tp_freeze_q_cnt =
+ po->stats_u.stats3.tp_freeze_q_cnt;
+ st_u.stats3.tp_packets += po->stats.tp_drops;
+ data = &st_u.stats3;
+ } else {
+ st = po->stats;
+ st.tp_packets += st.tp_drops;
+ data = &st;
+ }
memset(&po->stats, 0, sizeof(st));
spin_unlock_bh(&sk->sk_receive_queue.lock);
- st.tp_packets += st.tp_drops;
-
- data = &st;
break;
case PACKET_AUXDATA:
if (len > sizeof(int))
@@ -2488,6 +3273,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case TPACKET_V2:
val = sizeof(struct tpacket2_hdr);
break;
+ case TPACKET_V3:
+ val = sizeof(struct tpacket3_hdr);
+ break;
default:
return -EINVAL;
}
@@ -2644,7 +3432,8 @@ static unsigned int packet_poll(struct file *file, struct socket *sock,
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
- if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
+ if (!packet_previous_rx_frame(po, &po->rx_ring,
+ TP_STATUS_KERNEL))
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -2763,7 +3552,7 @@ out_free_pgvec:
goto out;
}
-static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
+static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
@@ -2772,7 +3561,15 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
- int err;
+ int err = -EINVAL;
+ /* Added to avoid minimal code churn */
+ struct tpacket_req *req = &req_u->req;
+
+ /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ WARN(1, "Tx-ring is not supported.\n");
+ goto out;
+ }
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@@ -2798,6 +3595,9 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
+ case TPACKET_V3:
+ po->tp_hdrlen = TPACKET3_HDRLEN;
+ break;
}
err = -EINVAL;
@@ -2823,6 +3623,17 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
+ switch (po->tp_version) {
+ case TPACKET_V3:
+ /* Transmit path is not supported. We checked
+ * it above but just being paranoid
+ */
+ if (!tx_ring)
+ init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
+ break;
+ default:
+ break;
+ }
}
/* Done */
else {
@@ -2875,7 +3686,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
-
+ if (closing && (po->tp_version > TPACKET_V2)) {
+ /* Because we don't support block-based V3 on tx-ring */
+ if (!tx_ring)
+ prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ }
release_sock(sk);
if (pg_vec)
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index c6fffd946d4..bf10ea8fbbf 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
if (proto_tab[protocol])
err = -EBUSY;
else
- rcu_assign_pointer(proto_tab[protocol], pp);
+ RCU_INIT_POINTER(proto_tab[protocol], pp);
mutex_unlock(&proto_tab_lock);
return err;
@@ -491,7 +491,7 @@ void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
{
mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[protocol] != pp);
- rcu_assign_pointer(proto_tab[protocol], NULL);
+ RCU_INIT_POINTER(proto_tab[protocol], NULL);
mutex_unlock(&proto_tab_lock);
synchronize_rcu();
proto_unregister(pp->prot);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index d2df8f33160..c5827614376 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -276,7 +276,7 @@ static void phonet_route_autodel(struct net_device *dev)
mutex_lock(&pnn->routes.lock);
for (i = 0; i < 64; i++)
if (dev == pnn->routes.table[i]) {
- rcu_assign_pointer(pnn->routes.table[i], NULL);
+ RCU_INIT_POINTER(pnn->routes.table[i], NULL);
set_bit(i, deleted);
}
mutex_unlock(&pnn->routes.lock);
@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (routes->table[daddr] == NULL) {
- rcu_assign_pointer(routes->table[daddr], dev);
+ RCU_INIT_POINTER(routes->table[daddr], dev);
dev_hold(dev);
err = 0;
}
@@ -406,7 +406,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (dev == routes->table[daddr])
- rcu_assign_pointer(routes->table[daddr], NULL);
+ RCU_INIT_POINTER(routes->table[daddr], NULL);
else
dev = NULL;
mutex_unlock(&routes->lock);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index ab07711cf2f..676d18dc75b 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -679,7 +679,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
mutex_lock(&resource_mutex);
if (pnres.sk[res] == NULL) {
sock_hold(sk);
- rcu_assign_pointer(pnres.sk[res], sk);
+ RCU_INIT_POINTER(pnres.sk[res], sk);
ret = 0;
}
mutex_unlock(&resource_mutex);
@@ -695,7 +695,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
mutex_lock(&resource_mutex);
if (pnres.sk[res] == sk) {
- rcu_assign_pointer(pnres.sk[res], NULL);
+ RCU_INIT_POINTER(pnres.sk[res], NULL);
ret = 0;
}
mutex_unlock(&resource_mutex);
@@ -714,7 +714,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
mutex_lock(&resource_mutex);
for (res = 0; res < 256; res++) {
if (pnres.sk[res] == sk) {
- rcu_assign_pointer(pnres.sk[res], NULL);
+ RCU_INIT_POINTER(pnres.sk[res], NULL);
match++;
}
}
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index ec753b3ae72..4cf6dc7910e 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -9,6 +9,7 @@ config RDS
config RDS_RDMA
tristate "RDS over Infiniband and iWARP"
+ select LLIST
depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
---help---
Allow RDS to use Infiniband and iWARP as a transport.
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index cd67026be2d..51c868923f6 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -375,23 +375,21 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
goto out;
}
- ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
+ ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
ibdev_to_node(dev));
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto out;
}
- memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
- ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
+ ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
ibdev_to_node(dev));
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto out;
}
- memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
rds_ib_recv_init_ack(ic);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 819c35a0d9c..e8fdb172adb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -33,10 +33,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/rculist.h>
+#include <linux/llist.h>
#include "rds.h"
#include "ib.h"
-#include "xlist.h"
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
@@ -49,7 +49,7 @@ struct rds_ib_mr {
struct rds_ib_mr_pool *pool;
struct ib_fmr *fmr;
- struct xlist_head xlist;
+ struct llist_node llnode;
/* unmap_list is for freeing */
struct list_head unmap_list;
@@ -71,9 +71,9 @@ struct rds_ib_mr_pool {
atomic_t item_count; /* total # of MRs */
atomic_t dirty_count; /* # dirty of MRs */
- struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
- struct xlist_head free_list; /* unused MRs */
- struct xlist_head clean_list; /* global unused & unamapped MRs */
+ struct llist_head drop_list; /* MRs that have reached their max_maps limit */
+ struct llist_head free_list; /* unused MRs */
+ struct llist_head clean_list; /* global unused & unamapped MRs */
wait_queue_head_t flush_wait;
atomic_t free_pinned; /* memory pinned by free MRs */
@@ -220,9 +220,9 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
if (!pool)
return ERR_PTR(-ENOMEM);
- INIT_XLIST_HEAD(&pool->free_list);
- INIT_XLIST_HEAD(&pool->drop_list);
- INIT_XLIST_HEAD(&pool->clean_list);
+ init_llist_head(&pool->free_list);
+ init_llist_head(&pool->drop_list);
+ init_llist_head(&pool->clean_list);
mutex_init(&pool->flush_lock);
init_waitqueue_head(&pool->flush_wait);
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
@@ -260,26 +260,18 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
kfree(pool);
}
-static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
- struct rds_ib_mr **ibmr_ret)
-{
- struct xlist_head *ibmr_xl;
- ibmr_xl = xlist_del_head_fast(xl);
- *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
-}
-
static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
- struct xlist_head *ret;
+ struct llist_node *ret;
unsigned long *flag;
preempt_disable();
flag = &__get_cpu_var(clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag);
- ret = xlist_del_head(&pool->clean_list);
+ ret = llist_del_first(&pool->clean_list);
if (ret)
- ibmr = list_entry(ret, struct rds_ib_mr, xlist);
+ ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
clear_bit(CLEAN_LIST_BUSY_BIT, flag);
preempt_enable();
@@ -529,46 +521,44 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
}
/*
- * given an xlist of mrs, put them all into the list_head for more processing
+ * given an llist of mrs, put them all into the list_head for more processing
*/
-static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
+static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
{
struct rds_ib_mr *ibmr;
- struct xlist_head splice;
- struct xlist_head *cur;
- struct xlist_head *next;
-
- splice.next = NULL;
- xlist_splice(xlist, &splice);
- cur = splice.next;
- while (cur) {
- next = cur->next;
- ibmr = list_entry(cur, struct rds_ib_mr, xlist);
+ struct llist_node *node;
+ struct llist_node *next;
+
+ node = llist_del_all(llist);
+ while (node) {
+ next = node->next;
+ ibmr = llist_entry(node, struct rds_ib_mr, llnode);
list_add_tail(&ibmr->unmap_list, list);
- cur = next;
+ node = next;
}
}
/*
- * this takes a list head of mrs and turns it into an xlist of clusters.
- * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
- * reuse.
+ * this takes a list head of mrs and turns it into linked llist nodes
+ * of clusters. Each cluster has linked llist nodes of
+ * MR_CLUSTER_SIZE mrs that are ready for reuse.
*/
-static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
- struct list_head *list, struct xlist_head *xlist,
- struct xlist_head **tail_ret)
+static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
+ struct list_head *list,
+ struct llist_node **nodes_head,
+ struct llist_node **nodes_tail)
{
struct rds_ib_mr *ibmr;
- struct xlist_head *cur_mr = xlist;
- struct xlist_head *tail_mr = NULL;
+ struct llist_node *cur = NULL;
+ struct llist_node **next = nodes_head;
list_for_each_entry(ibmr, list, unmap_list) {
- tail_mr = &ibmr->xlist;
- tail_mr->next = NULL;
- cur_mr->next = tail_mr;
- cur_mr = tail_mr;
+ cur = &ibmr->llnode;
+ *next = cur;
+ next = &cur->next;
}
- *tail_ret = tail_mr;
+ *next = NULL;
+ *nodes_tail = cur;
}
/*
@@ -581,8 +571,8 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
int free_all, struct rds_ib_mr **ibmr_ret)
{
struct rds_ib_mr *ibmr, *next;
- struct xlist_head clean_xlist;
- struct xlist_head *clean_tail;
+ struct llist_node *clean_nodes;
+ struct llist_node *clean_tail;
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
unsigned long unpinned = 0;
@@ -603,7 +593,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
prepare_to_wait(&pool->flush_wait, &wait,
TASK_UNINTERRUPTIBLE);
- if (xlist_empty(&pool->clean_list))
+ if (llist_empty(&pool->clean_list))
schedule();
ibmr = rds_ib_reuse_fmr(pool);
@@ -628,10 +618,10 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
/* Get the list of all MRs to be dropped. Ordering matters -
* we want to put drop_list ahead of free_list.
*/
- xlist_append_to_list(&pool->drop_list, &unmap_list);
- xlist_append_to_list(&pool->free_list, &unmap_list);
+ llist_append_to_list(&pool->drop_list, &unmap_list);
+ llist_append_to_list(&pool->free_list, &unmap_list);
if (free_all)
- xlist_append_to_list(&pool->clean_list, &unmap_list);
+ llist_append_to_list(&pool->clean_list, &unmap_list);
free_goal = rds_ib_flush_goal(pool, free_all);
@@ -663,22 +653,22 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
if (!list_empty(&unmap_list)) {
/* we have to make sure that none of the things we're about
* to put on the clean list would race with other cpus trying
- * to pull items off. The xlist would explode if we managed to
+ * to pull items off. The llist would explode if we managed to
* remove something from the clean list and then add it back again
- * while another CPU was spinning on that same item in xlist_del_head.
+ * while another CPU was spinning on that same item in llist_del_first.
*
- * This is pretty unlikely, but just in case wait for an xlist grace period
+ * This is pretty unlikely, but just in case wait for an llist grace period
* here before adding anything back into the clean list.
*/
wait_clean_list_grace();
- list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
+ list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret)
- refill_local(pool, &clean_xlist, ibmr_ret);
+ *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
- /* refill_local may have emptied our list */
- if (!xlist_empty(&clean_xlist))
- xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
+ /* more than one entry in llist nodes */
+ if (clean_nodes->next)
+ llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
}
@@ -711,9 +701,9 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* Return it to the pool's free list */
if (ibmr->remap_count >= pool->fmr_attr.max_maps)
- xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
+ llist_add(&ibmr->llnode, &pool->drop_list);
else
- xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
+ llist_add(&ibmr->llnode, &pool->free_list);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
deleted file mode 100644
index e6b5190dadd..00000000000
--- a/net/rds/xlist.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef _LINUX_XLIST_H
-#define _LINUX_XLIST_H
-
-#include <linux/stddef.h>
-#include <linux/poison.h>
-#include <linux/prefetch.h>
-#include <asm/system.h>
-
-struct xlist_head {
- struct xlist_head *next;
-};
-
-static inline void INIT_XLIST_HEAD(struct xlist_head *list)
-{
- list->next = NULL;
-}
-
-static inline int xlist_empty(struct xlist_head *head)
-{
- return head->next == NULL;
-}
-
-static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
- struct xlist_head *head)
-{
- struct xlist_head *cur;
- struct xlist_head *check;
-
- while (1) {
- cur = head->next;
- tail->next = cur;
- check = cmpxchg(&head->next, cur, new);
- if (check == cur)
- break;
- }
-}
-
-static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
-{
- struct xlist_head *cur;
- struct xlist_head *check;
- struct xlist_head *next;
-
- while (1) {
- cur = head->next;
- if (!cur)
- goto out;
-
- next = cur->next;
- check = cmpxchg(&head->next, cur, next);
- if (check == cur)
- goto out;
- }
-out:
- return cur;
-}
-
-static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
-{
- struct xlist_head *cur;
-
- cur = head->next;
- if (!cur)
- return NULL;
-
- head->next = cur->next;
- return cur;
-}
-
-static inline void xlist_splice(struct xlist_head *list,
- struct xlist_head *head)
-{
- struct xlist_head *cur;
-
- WARN_ON(head->next);
- cur = xchg(&list->next, NULL);
- head->next = cur;
-}
-
-#endif
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640a277..5be19575c34 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -235,7 +235,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
else
rfkill->state &= ~RFKILL_BLOCK_HW;
*change = prev != blocked;
- any = rfkill->state & RFKILL_BLOCK_ANY;
+ any = !!(rfkill->state & RFKILL_BLOCK_ANY);
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 256c5ddd2d7..128677d6905 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -101,6 +101,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
if (!rfkill)
return -ENOMEM;
+ if (pdata->gpio_runtime_setup) {
+ ret = pdata->gpio_runtime_setup(pdev);
+ if (ret) {
+ pr_warn("%s: can't set up gpio\n", __func__);
+ return ret;
+ }
+ }
+
rfkill->pdata = pdata;
len = strlen(pdata->name);
@@ -182,7 +190,10 @@ fail_alloc:
static int rfkill_gpio_remove(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
+ struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
+ if (pdata->gpio_runtime_close)
+ pdata->gpio_runtime_close(pdev);
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 18dc512a10f..3ca7277a3c3 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -90,7 +90,6 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
pdata->type,
&rfkill_regulator_ops, rfkill_data);
if (rf_kill == NULL) {
- dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
ret = -ENOMEM;
goto err_rfkill_alloc;
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6994214db8f..9e087d88567 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -65,132 +65,134 @@ static inline u32 addr_fold(void *addr)
return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
}
-static u32 flow_get_src(struct sk_buff *skb)
+static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
{
+ __be32 *data = NULL, hdata;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
- return ntohl(ip_hdr(skb)->saddr);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct iphdr,
+ saddr),
+ 4, &hdata);
break;
case htons(ETH_P_IPV6):
- if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
- return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct ipv6hdr,
+ saddr.s6_addr32[3]),
+ 4, &hdata);
break;
}
+ if (data)
+ return ntohl(*data);
return addr_fold(skb->sk);
}
-static u32 flow_get_dst(struct sk_buff *skb)
+static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
{
+ __be32 *data = NULL, hdata;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
- return ntohl(ip_hdr(skb)->daddr);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct iphdr,
+ daddr),
+ 4, &hdata);
break;
case htons(ETH_P_IPV6):
- if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
- return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct ipv6hdr,
+ daddr.s6_addr32[3]),
+ 4, &hdata);
break;
}
+ if (data)
+ return ntohl(*data);
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
}
-static u32 flow_get_proto(struct sk_buff *skb)
+static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
{
+ __u8 *data = NULL, hdata;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- return pskb_network_may_pull(skb, sizeof(struct iphdr)) ?
- ip_hdr(skb)->protocol : 0;
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct iphdr,
+ protocol),
+ 1, &hdata);
+ break;
case htons(ETH_P_IPV6):
- return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ?
- ipv6_hdr(skb)->nexthdr : 0;
- default:
- return 0;
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct ipv6hdr,
+ nexthdr),
+ 1, &hdata);
+ break;
}
+ if (data)
+ return *data;
+ return 0;
}
-static u32 flow_get_proto_src(struct sk_buff *skb)
+/* helper function to get either src or dst port */
+static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
+ __be16 *_port, int dst)
{
+ __be16 *port = NULL;
+ int poff;
+
switch (skb->protocol) {
case htons(ETH_P_IP): {
- struct iphdr *iph;
- int poff;
+ struct iphdr *iph, _iph;
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
break;
- iph = ip_hdr(skb);
if (ip_is_fragment(iph))
break;
poff = proto_ports_offset(iph->protocol);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
- iph = ip_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
- poff));
- }
+ if (poff >= 0)
+ port = skb_header_pointer(skb,
+ nhoff + iph->ihl * 4 + poff + dst,
+ sizeof(*_port), _port);
break;
}
case htons(ETH_P_IPV6): {
- struct ipv6hdr *iph;
- int poff;
+ struct ipv6hdr *iph, _iph;
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
break;
- iph = ipv6_hdr(skb);
poff = proto_ports_offset(iph->nexthdr);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
- iph = ipv6_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
- poff));
- }
+ if (poff >= 0)
+ port = skb_header_pointer(skb,
+ nhoff + sizeof(*iph) + poff + dst,
+ sizeof(*_port), _port);
break;
}
}
- return addr_fold(skb->sk);
+ return port;
}
-static u32 flow_get_proto_dst(struct sk_buff *skb)
+static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
{
- switch (skb->protocol) {
- case htons(ETH_P_IP): {
- struct iphdr *iph;
- int poff;
+ __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
- break;
- iph = ip_hdr(skb);
- if (ip_is_fragment(iph))
- break;
- poff = proto_ports_offset(iph->protocol);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
- iph = ip_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
- 2 + poff));
- }
- break;
- }
- case htons(ETH_P_IPV6): {
- struct ipv6hdr *iph;
- int poff;
+ if (port)
+ return ntohs(*port);
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
- break;
- iph = ipv6_hdr(skb);
- poff = proto_ports_offset(iph->nexthdr);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
- iph = ipv6_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
- poff + 2));
- }
- break;
- }
- }
+ return addr_fold(skb->sk);
+}
+
+static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
+{
+ __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
+
+ if (port)
+ return ntohs(*port);
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
}
@@ -223,7 +225,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
#define CTTUPLE(skb, member) \
({ \
enum ip_conntrack_info ctinfo; \
- struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
+ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
if (ct == NULL) \
goto fallback; \
ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
@@ -236,7 +238,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
})
#endif
-static u32 flow_get_nfct_src(struct sk_buff *skb)
+static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -245,10 +247,10 @@ static u32 flow_get_nfct_src(struct sk_buff *skb)
return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
}
fallback:
- return flow_get_src(skb);
+ return flow_get_src(skb, nhoff);
}
-static u32 flow_get_nfct_dst(struct sk_buff *skb)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -257,21 +259,21 @@ static u32 flow_get_nfct_dst(struct sk_buff *skb)
return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
}
fallback:
- return flow_get_dst(skb);
+ return flow_get_dst(skb, nhoff);
}
-static u32 flow_get_nfct_proto_src(struct sk_buff *skb)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
{
return ntohs(CTTUPLE(skb, src.u.all));
fallback:
- return flow_get_proto_src(skb);
+ return flow_get_proto_src(skb, nhoff);
}
-static u32 flow_get_nfct_proto_dst(struct sk_buff *skb)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
{
return ntohs(CTTUPLE(skb, dst.u.all));
fallback:
- return flow_get_proto_dst(skb);
+ return flow_get_proto_dst(skb, nhoff);
}
static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -313,17 +315,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
static u32 flow_key_get(struct sk_buff *skb, int key)
{
+ int nhoff = skb_network_offset(skb);
+
switch (key) {
case FLOW_KEY_SRC:
- return flow_get_src(skb);
+ return flow_get_src(skb, nhoff);
case FLOW_KEY_DST:
- return flow_get_dst(skb);
+ return flow_get_dst(skb, nhoff);
case FLOW_KEY_PROTO:
- return flow_get_proto(skb);
+ return flow_get_proto(skb, nhoff);
case FLOW_KEY_PROTO_SRC:
- return flow_get_proto_src(skb);
+ return flow_get_proto_src(skb, nhoff);
case FLOW_KEY_PROTO_DST:
- return flow_get_proto_dst(skb);
+ return flow_get_proto_dst(skb, nhoff);
case FLOW_KEY_IIF:
return flow_get_iif(skb);
case FLOW_KEY_PRIORITY:
@@ -333,13 +337,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
case FLOW_KEY_NFCT:
return flow_get_nfct(skb);
case FLOW_KEY_NFCT_SRC:
- return flow_get_nfct_src(skb);
+ return flow_get_nfct_src(skb, nhoff);
case FLOW_KEY_NFCT_DST:
- return flow_get_nfct_dst(skb);
+ return flow_get_nfct_dst(skb, nhoff);
case FLOW_KEY_NFCT_PROTO_SRC:
- return flow_get_nfct_proto_src(skb);
+ return flow_get_nfct_proto_src(skb, nhoff);
case FLOW_KEY_NFCT_PROTO_DST:
- return flow_get_nfct_proto_dst(skb);
+ return flow_get_nfct_proto_dst(skb, nhoff);
case FLOW_KEY_RTCLASSID:
return flow_get_rtclassid(skb);
case FLOW_KEY_SKUID:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0a833d0c1f6..e83c272c032 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
u32 r, slot, salt, sfbhash;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ if (unlikely(sch->q.qlen >= q->limit)) {
+ sch->qstats.overlimits++;
+ q->stats.queuedrop++;
+ goto drop;
+ }
+
if (q->rehash_interval > 0) {
unsigned long limit = q->rehash_time + q->rehash_interval;
@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot ^= 1;
sfb_skb_cb(skb)->hashes[slot] = 0;
- if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) {
+ if (unlikely(minqlen >= q->max)) {
sch->qstats.overlimits++;
- if (minqlen >= q->max)
- q->stats.bucketdrop++;
- else
- q->stats.queuedrop++;
+ q->stats.bucketdrop++;
goto drop;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index dc16b90ddb6..152b5b3c3ff 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -282,6 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->peer.asconf_capable = 1;
asoc->asconf_addr_del_pending = NULL;
asoc->src_out_of_asoc_ok = 0;
+ asoc->new_transport = NULL;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a6d27bf563a..14c2b06028f 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -917,6 +917,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
* current cwnd).
*/
if (!list_empty(&q->retransmit)) {
+ if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
+ goto sctp_flush_out;
if (transport == asoc->peer.retran_path)
goto retran;
@@ -989,6 +991,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
((new_transport->state == SCTP_INACTIVE) ||
(new_transport->state == SCTP_UNCONFIRMED)))
new_transport = asoc->peer.active_path;
+ if (new_transport->state == SCTP_UNCONFIRMED)
+ continue;
/* Change packets if necessary. */
if (new_transport != transport) {
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 91784f44a2e..61b9fca5a17 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1299,7 +1299,7 @@ SCTP_STATIC __init int sctp_init(void)
max_share = min(4UL*1024*1024, limit);
sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
- sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1));
+ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 81db4e38535..0121e0ab035 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3015,6 +3015,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
/* Start the heartbeat timer. */
if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
sctp_transport_hold(peer);
+ asoc->new_transport = peer;
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a0f31e6c1c6..891f5db8cc3 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3618,6 +3618,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
+ if (asoc->new_transport) {
+ sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
+ commands);
+ ((struct sctp_association *)asoc)->new_transport = NULL;
+ }
return SCTP_DISPOSITION_CONSUME;
}
diff --git a/net/socket.c b/net/socket.c
index ffe92ca32f2..2877647f347 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2472,7 +2472,7 @@ int sock_register(const struct net_proto_family *ops)
lockdep_is_held(&net_family_lock)))
err = -EEXIST;
else {
- rcu_assign_pointer(net_families[ops->family], ops);
+ RCU_INIT_POINTER(net_families[ops->family], ops);
err = 0;
}
spin_unlock(&net_family_lock);
@@ -2500,7 +2500,7 @@ void sock_unregister(int family)
BUG_ON(family < 0 || family >= NPROTO);
spin_lock(&net_family_lock);
- rcu_assign_pointer(net_families[family], NULL);
+ RCU_INIT_POINTER(net_families[family], NULL);
spin_unlock(&net_family_lock);
synchronize_rcu();
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 4195233c491..4548757c987 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(rpc_pton);
/**
* rpc_sockaddr2uaddr - Construct a universal address string from @sap.
* @sap: socket address
+ * @gfp_flags: allocation mode
*
* Returns a %NUL-terminated string in dynamically allocated memory;
* otherwise NULL is returned if an error occurred. Caller must
* free the returned string.
*/
-char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
+char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
{
char portbuf[RPCBIND_MAXUADDRPLEN];
char addrbuf[RPCBIND_MAXUADDRLEN];
@@ -288,9 +289,8 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
return NULL;
- return kstrdup(addrbuf, GFP_KERNEL);
+ return kstrdup(addrbuf, gfp_flags);
}
-EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
/**
* rpc_uaddr2sockaddr - convert a universal address to a socket address.
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 364eb45e989..afb56553dfe 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
return;
gss_get_ctx(ctx);
- rcu_assign_pointer(gss_cred->gc_ctx, ctx);
+ RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
smp_mb__before_clear_bit();
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
@@ -603,26 +603,6 @@ out:
return err;
}
-static ssize_t
-gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char __user *dst, size_t buflen)
-{
- char *data = (char *)msg->data + msg->copied;
- size_t mlen = min(msg->len, buflen);
- unsigned long left;
-
- left = copy_to_user(dst, data, mlen);
- if (left == mlen) {
- msg->errno = -EFAULT;
- return -EFAULT;
- }
-
- mlen -= left;
- msg->copied += mlen;
- msg->errno = 0;
- return mlen;
-}
-
#define MSG_BUF_MAXSIZE 1024
static ssize_t
@@ -970,7 +950,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
- rcu_assign_pointer(gss_cred->gc_ctx, NULL);
+ RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
if (ctx)
gss_put_ctx(ctx);
@@ -1590,7 +1570,7 @@ static const struct rpc_credops gss_nullops = {
};
static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
- .upcall = gss_pipe_upcall,
+ .upcall = rpc_pipe_generic_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
.open_pipe = gss_pipe_open_v0,
@@ -1598,7 +1578,7 @@ static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
};
static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
- .upcall = gss_pipe_upcall,
+ .upcall = rpc_pipe_generic_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
.open_pipe = gss_pipe_open_v1,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c5347d29cfb..f0268ea7e71 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -850,7 +850,9 @@ rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
- task->tk_action = rpc_prepare_task;
+ task->tk_action = call_start;
+ if (task->tk_ops->rpc_call_prepare != NULL)
+ task->tk_action = rpc_prepare_task;
return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index b181e344132..bfddd68b31d 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -77,6 +77,26 @@ rpc_timeout_upcall_queue(struct work_struct *work)
rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
}
+ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ char __user *dst, size_t buflen)
+{
+ char *data = (char *)msg->data + msg->copied;
+ size_t mlen = min(msg->len - msg->copied, buflen);
+ unsigned long left;
+
+ left = copy_to_user(dst, data, mlen);
+ if (left == mlen) {
+ msg->errno = -EFAULT;
+ return -EFAULT;
+ }
+
+ mlen -= left;
+ msg->copied += mlen;
+ msg->errno = 0;
+ return mlen;
+}
+EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
+
/**
* rpc_queue_upcall - queue an upcall message to userspace
* @inode: inode of upcall pipe on which to queue given message
@@ -1084,3 +1104,6 @@ void unregister_rpc_pipefs(void)
kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
}
+
+/* Make 'mount -t rpc_pipefs ...' autoload this module. */
+MODULE_ALIAS("rpc_pipefs");
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index e45d2fbbe5a..f588b852d41 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -410,7 +410,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
unsigned short port = ntohs(sin->sin_port);
int result;
- map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@@ -437,7 +437,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
unsigned short port = ntohs(sin6->sin6_port);
int result;
- map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@@ -686,7 +686,7 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_4:
case RPCBVERS_3:
map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
- map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
map->r_owner = "";
break;
case RPCBVERS_2:
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6a69a1131fb..30d70abb4e2 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -295,6 +295,18 @@ svc_pool_map_put(void)
}
+static int svc_pool_map_get_node(unsigned int pidx)
+{
+ const struct svc_pool_map *m = &svc_pool_map;
+
+ if (m->count) {
+ if (m->mode == SVC_POOL_PERCPU)
+ return cpu_to_node(m->pool_to[pidx]);
+ if (m->mode == SVC_POOL_PERNODE)
+ return m->pool_to[pidx];
+ }
+ return NUMA_NO_NODE;
+}
/*
* Set the given thread's cpus_allowed mask so that it
* will only run on cpus in the given pool.
@@ -499,7 +511,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
* We allocate pages and place them in rq_argpages.
*/
static int
-svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
+svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
{
unsigned int pages, arghi;
@@ -513,7 +525,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
arghi = 0;
BUG_ON(pages > RPCSVC_MAXPAGES);
while (pages) {
- struct page *p = alloc_page(GFP_KERNEL);
+ struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
if (!p)
break;
rqstp->rq_pages[arghi++] = p;
@@ -536,11 +548,11 @@ svc_release_buffer(struct svc_rqst *rqstp)
}
struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
- rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
+ rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
if (!rqstp)
goto out_enomem;
@@ -554,15 +566,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
- rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
+ rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
goto out_thread;
- rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
+ rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_resp)
goto out_thread;
- if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
+ if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
goto out_thread;
return rqstp;
@@ -647,6 +659,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
struct svc_pool *chosen_pool;
int error = 0;
unsigned int state = serv->sv_nrthreads-1;
+ int node;
if (pool == NULL) {
/* The -1 assumes caller has done a svc_get() */
@@ -662,14 +675,16 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
nrservs--;
chosen_pool = choose_pool(serv, pool, &state);
- rqstp = svc_prepare_thread(serv, chosen_pool);
+ node = svc_pool_map_get_node(chosen_pool->sp_id);
+ rqstp = svc_prepare_thread(serv, chosen_pool, node);
if (IS_ERR(rqstp)) {
error = PTR_ERR(rqstp);
break;
}
__module_get(serv->sv_module);
- task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
+ task = kthread_create_on_node(serv->sv_function, rqstp,
+ node, serv->sv_name);
if (IS_ERR(task)) {
error = PTR_ERR(task);
module_put(serv->sv_module);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bd31208bbb6..d86bb673e1f 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -254,8 +254,6 @@ EXPORT_SYMBOL_GPL(svc_create_xprt);
*/
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
{
- struct sockaddr *sin;
-
memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
rqstp->rq_addrlen = xprt->xpt_remotelen;
@@ -263,15 +261,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
* Destination address in request is needed for binding the
* source address in RPC replies/callbacks later.
*/
- sin = (struct sockaddr *)&xprt->xpt_local;
- switch (sin->sa_family) {
- case AF_INET:
- rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
- break;
- case AF_INET6:
- rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
- break;
- }
+ memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
+ rqstp->rq_daddrlen = xprt->xpt_locallen;
}
EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 767d494de7a..dfd686eb0b7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -143,19 +143,20 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
cmh->cmsg_level = SOL_IP;
cmh->cmsg_type = IP_PKTINFO;
pki->ipi_ifindex = 0;
- pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
+ pki->ipi_spec_dst.s_addr =
+ svc_daddr_in(rqstp)->sin_addr.s_addr;
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
case AF_INET6: {
struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
cmh->cmsg_level = SOL_IPV6;
cmh->cmsg_type = IPV6_PKTINFO;
- pki->ipi6_ifindex = 0;
- ipv6_addr_copy(&pki->ipi6_addr,
- &rqstp->rq_daddr.addr6);
+ pki->ipi6_ifindex = daddr->sin6_scope_id;
+ ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
@@ -498,9 +499,13 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
{
struct in_pktinfo *pki = CMSG_DATA(cmh);
+ struct sockaddr_in *daddr = svc_daddr_in(rqstp);
+
if (cmh->cmsg_type != IP_PKTINFO)
return 0;
- rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
+
+ daddr->sin_family = AF_INET;
+ daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
return 1;
}
@@ -511,9 +516,14 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
{
struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
+
if (cmh->cmsg_type != IPV6_PKTINFO)
return 0;
- ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
+
+ daddr->sin6_family = AF_INET6;
+ ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+ daddr->sin6_scope_id = pki->ipi6_ifindex;
return 1;
}
@@ -614,6 +624,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
+ rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
if (skb_is_nonlinear(skb)) {
/* we have to copy */
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 759b318b5ff..28908f54459 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -39,6 +39,7 @@
#include "link.h"
#include "port.h"
#include "bcast.h"
+#include "name_distr.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
@@ -298,14 +299,9 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
msg_set_bcast_tag(msg, tipc_own_tag);
- if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
- bcl->stats.sent_nacks++;
- buf_discard(buf);
- } else {
- tipc_bearer_schedule(bcl->b_ptr, bcl);
- bcl->proto_msg_queue = buf;
- bcl->stats.bearer_congs++;
- }
+ tipc_bearer_send(&bcbearer->bearer, buf, NULL);
+ bcl->stats.sent_nacks++;
+ buf_discard(buf);
/*
* Ensure we doesn't send another NACK msg to the node
@@ -426,20 +422,28 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- struct tipc_node *node = tipc_node_find(msg_prevnode(msg));
+ struct tipc_node *node;
u32 next_in;
u32 seqno;
struct sk_buff *deferred;
- if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
- (msg_mc_netid(msg) != tipc_net_id))) {
- buf_discard(buf);
- return;
- }
+ /* Screen out unwanted broadcast messages */
+
+ if (msg_mc_netid(msg) != tipc_net_id)
+ goto exit;
+
+ node = tipc_node_find(msg_prevnode(msg));
+ if (unlikely(!node))
+ goto exit;
+
+ tipc_node_lock(node);
+ if (unlikely(!node->bclink.supported))
+ goto unlock;
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
+ if (msg_type(msg) != STATE_MSG)
+ goto unlock;
if (msg_destnode(msg) == tipc_own_addr) {
- tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
spin_lock_bh(&bc_lock);
@@ -449,18 +453,18 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
msg_bcgap_to(msg));
spin_unlock_bh(&bc_lock);
} else {
+ tipc_node_unlock(node);
tipc_bclink_peek_nack(msg_destnode(msg),
msg_bcast_tag(msg),
msg_bcgap_after(msg),
msg_bcgap_to(msg));
}
- buf_discard(buf);
- return;
+ goto exit;
}
- tipc_node_lock(node);
+ /* Handle in-sequence broadcast message */
+
receive:
- deferred = node->bclink.deferred_head;
next_in = mod(node->bclink.last_in + 1);
seqno = msg_seqno(msg);
@@ -474,7 +478,10 @@ receive:
}
if (likely(msg_isdata(msg))) {
tipc_node_unlock(node);
- tipc_port_recv_mcast(buf, NULL);
+ if (likely(msg_mcast(msg)))
+ tipc_port_recv_mcast(buf, NULL);
+ else
+ buf_discard(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
@@ -487,18 +494,22 @@ receive:
bcl->stats.recv_fragmented++;
tipc_node_unlock(node);
tipc_net_route_msg(buf);
+ } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
+ tipc_node_unlock(node);
+ tipc_named_recv(buf);
} else {
tipc_node_unlock(node);
- tipc_net_route_msg(buf);
+ buf_discard(buf);
}
+ buf = NULL;
+ tipc_node_lock(node);
+ deferred = node->bclink.deferred_head;
if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
- tipc_node_lock(node);
buf = deferred;
msg = buf_msg(buf);
node->bclink.deferred_head = deferred->next;
goto receive;
}
- return;
} else if (less(next_in, seqno)) {
u32 gap_after = node->bclink.gap_after;
u32 gap_to = node->bclink.gap_to;
@@ -513,6 +524,7 @@ receive:
else if (less(gap_after, seqno) && less(seqno, gap_to))
node->bclink.gap_to = seqno;
}
+ buf = NULL;
if (bclink_ack_allowed(node->bclink.nack_sync)) {
if (gap_to != gap_after)
bclink_send_nack(node);
@@ -520,9 +532,11 @@ receive:
}
} else {
bcl->stats.duplicates++;
- buf_discard(buf);
}
+unlock:
tipc_node_unlock(node);
+exit:
+ buf_discard(buf);
}
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
@@ -535,10 +549,11 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
/**
* tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
*
- * Send through as many bearers as necessary to reach all nodes
- * that support TIPC multicasting.
+ * Send packet over as many bearers as necessary to reach all nodes
+ * that have joined the broadcast link.
*
- * Returns 0 if packet sent successfully, non-zero if not
+ * Returns 0 (packet sent successfully) under all circumstances,
+ * since the broadcast link's pseudo-bearer never blocks
*/
static int tipc_bcbearer_send(struct sk_buff *buf,
@@ -547,7 +562,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
{
int bp_index;
- /* Prepare buffer for broadcasting (if first time trying to send it) */
+ /*
+ * Prepare broadcast link message for reliable transmission,
+ * if first time trying to send it;
+ * preparation is skipped for broadcast link protocol messages
+ * since they are sent in an unreliable manner and don't need it
+ */
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
@@ -596,18 +616,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
}
if (bcbearer->remains_new.count == 0)
- return 0;
+ break; /* all targets reached */
bcbearer->remains = bcbearer->remains_new;
}
- /*
- * Unable to reach all targets (indicate success, since currently
- * there isn't code in place to properly block & unblock the
- * pseudo-bearer used by the broadcast link)
- */
-
- return TIPC_OK;
+ return 0;
}
/**
@@ -667,27 +681,6 @@ void tipc_bcbearer_sort(void)
spin_unlock_bh(&bc_lock);
}
-/**
- * tipc_bcbearer_push - resolve bearer congestion
- *
- * Forces bclink to push out any unsent packets, until all packets are gone
- * or congestion reoccurs.
- * No locks set when function called
- */
-
-void tipc_bcbearer_push(void)
-{
- struct tipc_bearer *b_ptr;
-
- spin_lock_bh(&bc_lock);
- b_ptr = &bcbearer->bearer;
- if (b_ptr->blocked) {
- b_ptr->blocked = 0;
- tipc_bearer_lock_push(b_ptr);
- }
- spin_unlock_bh(&bc_lock);
-}
-
int tipc_bclink_stats(char *buf, const u32 buf_size)
{
@@ -764,7 +757,7 @@ int tipc_bclink_init(void)
bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
- warn("Multicast link creation failed, no memory\n");
+ warn("Broadcast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
kfree(bclink);
@@ -775,7 +768,7 @@ int tipc_bclink_init(void)
INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
bcbearer->bearer.media = &bcbearer->media;
bcbearer->media.send_msg = tipc_bcbearer_send;
- sprintf(bcbearer->media.name, "tipc-multicast");
+ sprintf(bcbearer->media.name, "tipc-broadcast");
bcl = &bclink->link;
INIT_LIST_HEAD(&bcl->waiting_ports);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 500c97f1c85..06740da5ae6 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -101,6 +101,5 @@ int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit);
void tipc_bcbearer_sort(void);
-void tipc_bcbearer_push(void);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 85eba9c08ee..e2202de3d93 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -385,13 +385,9 @@ static int bearer_push(struct tipc_bearer *b_ptr)
void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
{
- int res;
-
spin_lock_bh(&b_ptr->lock);
- res = bearer_push(b_ptr);
+ bearer_push(b_ptr);
spin_unlock_bh(&b_ptr->lock);
- if (res)
- tipc_bcbearer_push();
}
@@ -608,6 +604,7 @@ int tipc_block_bearer(const char *name)
info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
+ list_splice_init(&b_ptr->cong_links, &b_ptr->links);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
struct tipc_node *n_ptr = l_ptr->owner;
@@ -635,6 +632,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
b_ptr->media->disable_bearer(b_ptr);
+ list_splice_init(&b_ptr->cong_links, &b_ptr->links);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 5ad70eff1eb..d696f9e414e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,8 +39,8 @@
#include "bcast.h"
-#define MAX_BEARERS 8
-#define MAX_MEDIA 4
+#define MAX_BEARERS 2
+#define MAX_MEDIA 2
/*
* Identifiers of supported TIPC media types
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 443159a166f..80da6ebc278 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -65,7 +65,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
const void *req_tlv_area, int req_tlv_space,
int headroom);
-void tipc_cfg_link_event(u32 addr, char *name, int up);
int tipc_cfg_init(void);
void tipc_cfg_stop(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 0987933155b..f2fb96e86ee 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -159,12 +159,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
}
tipc_node_lock(n_ptr);
- /* Don't talk to neighbor during cleanup after last session */
- if (n_ptr->cleanup_required) {
- tipc_node_unlock(n_ptr);
- return;
- }
-
link = n_ptr->links[b_ptr->identity];
/* Create a link endpoint for this bearer, if necessary */
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index b69092eb95d..e728d4ce2a1 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
* Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2008, 2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@
#include "core.h"
#include "bearer.h"
-#define MAX_ETH_BEARERS 2
+#define MAX_ETH_BEARERS MAX_BEARERS
#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
@@ -144,31 +144,27 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find device with specified name */
+ read_lock(&dev_base_lock);
for_each_netdev(&init_net, pdev) {
if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
dev = pdev;
+ dev_hold(dev);
break;
}
}
+ read_unlock(&dev_base_lock);
if (!dev)
return -ENODEV;
- /* Find Ethernet bearer for device (or create one) */
-
- while ((eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev))
- eb_ptr++;
- if (eb_ptr == stop)
- return -EDQUOT;
- if (!eb_ptr->dev) {
- eb_ptr->dev = dev;
- eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
- eb_ptr->tipc_packet_type.dev = dev;
- eb_ptr->tipc_packet_type.func = recv_msg;
- eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
- INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
- dev_hold(dev);
- dev_add_pack(&eb_ptr->tipc_packet_type);
- }
+ /* Create Ethernet bearer for device */
+
+ eb_ptr->dev = dev;
+ eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
+ eb_ptr->tipc_packet_type.dev = dev;
+ eb_ptr->tipc_packet_type.func = recv_msg;
+ eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+ INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+ dev_add_pack(&eb_ptr->tipc_packet_type);
/* Associate TIPC bearer with Ethernet bearer */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index f89570c54f5..ae98a72da11 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -332,15 +332,16 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
l_ptr->addr = peer;
if_name = strchr(b_ptr->name, ':') + 1;
- sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+ sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
tipc_node(tipc_own_addr),
if_name,
tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
- /* note: peer i/f is appended to link name by reset/activate */
+ /* note: peer i/f name is updated by reset/activate message */
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
l_ptr->owner = n_ptr;
l_ptr->checkpoint = 1;
+ l_ptr->peer_session = INVALID_SESSION;
l_ptr->b_ptr = b_ptr;
link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
l_ptr->state = RESET_UNKNOWN;
@@ -536,9 +537,6 @@ void tipc_link_stop(struct link *l_ptr)
l_ptr->proto_msg_queue = NULL;
}
-/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
-#define link_send_event(fcn, l_ptr, up) do { } while (0)
-
void tipc_link_reset(struct link *l_ptr)
{
struct sk_buff *buf;
@@ -596,10 +594,6 @@ void tipc_link_reset(struct link *l_ptr)
l_ptr->fsm_msg_cnt = 0;
l_ptr->stale_count = 0;
link_reset_statistics(l_ptr);
-
- link_send_event(tipc_cfg_link_event, l_ptr, 0);
- if (!in_own_cluster(l_ptr->addr))
- link_send_event(tipc_disc_link_event, l_ptr, 0);
}
@@ -608,9 +602,6 @@ static void link_activate(struct link *l_ptr)
l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
tipc_node_link_up(l_ptr->owner, l_ptr);
tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
- link_send_event(tipc_cfg_link_event, l_ptr, 1);
- if (!in_own_cluster(l_ptr->addr))
- link_send_event(tipc_disc_link_event, l_ptr, 1);
}
/**
@@ -985,6 +976,51 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
}
/*
+ * tipc_link_send_names - send name table entries to new neighbor
+ *
+ * Send routine for bulk delivery of name table messages when contact
+ * with a new neighbor occurs. No link congestion checking is performed
+ * because name table messages *must* be delivered. The messages must be
+ * small enough not to require fragmentation.
+ * Called without any locks held.
+ */
+
+void tipc_link_send_names(struct list_head *message_list, u32 dest)
+{
+ struct tipc_node *n_ptr;
+ struct link *l_ptr;
+ struct sk_buff *buf;
+ struct sk_buff *temp_buf;
+
+ if (list_empty(message_list))
+ return;
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find(dest);
+ if (n_ptr) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[0];
+ if (l_ptr) {
+ /* convert circular list to linear list */
+ ((struct sk_buff *)message_list->prev)->next = NULL;
+ link_add_chain_to_outqueue(l_ptr,
+ (struct sk_buff *)message_list->next, 0);
+ tipc_link_push_queue(l_ptr);
+ INIT_LIST_HEAD(message_list);
+ }
+ tipc_node_unlock(n_ptr);
+ }
+ read_unlock_bh(&tipc_net_lock);
+
+ /* discard the messages if they couldn't be sent */
+
+ list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
+ list_del((struct list_head *)buf);
+ buf_discard(buf);
+ }
+}
+
+/*
* link_send_buf_fast: Entry for data messages where the
* destination link is known and the header is complete,
* inclusive total message length. Very time critical.
@@ -1031,9 +1067,6 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
u32 selector = msg_origport(buf_msg(buf)) & 1;
u32 dummy;
- if (destnode == tipc_own_addr)
- return tipc_port_recv_msg(buf);
-
read_lock_bh(&tipc_net_lock);
n_ptr = tipc_node_find(destnode);
if (likely(n_ptr)) {
@@ -1658,19 +1691,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
continue;
}
+ /* Discard unicast link messages destined for another node */
+
if (unlikely(!msg_short(msg) &&
(msg_destnode(msg) != tipc_own_addr)))
goto cont;
- /* Discard non-routeable messages destined for another node */
-
- if (unlikely(!msg_isdata(msg) &&
- (msg_destnode(msg) != tipc_own_addr))) {
- if ((msg_user(msg) != CONN_MANAGER) &&
- (msg_user(msg) != MSG_FRAGMENTER))
- goto cont;
- }
-
/* Locate neighboring node that sent message */
n_ptr = tipc_node_find(msg_prevnode(msg));
@@ -1678,17 +1704,24 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
goto cont;
tipc_node_lock(n_ptr);
- /* Don't talk to neighbor during cleanup after last session */
+ /* Locate unicast link endpoint that should handle message */
- if (n_ptr->cleanup_required) {
+ l_ptr = n_ptr->links[b_ptr->identity];
+ if (unlikely(!l_ptr)) {
tipc_node_unlock(n_ptr);
goto cont;
}
- /* Locate unicast link endpoint that should handle message */
+ /* Verify that communication with node is currently allowed */
- l_ptr = n_ptr->links[b_ptr->identity];
- if (unlikely(!l_ptr)) {
+ if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
+ msg_user(msg) == LINK_PROTOCOL &&
+ (msg_type(msg) == RESET_MSG ||
+ msg_type(msg) == ACTIVATE_MSG) &&
+ !msg_redundant_link(msg))
+ n_ptr->block_setup &= ~WAIT_PEER_DOWN;
+
+ if (n_ptr->block_setup) {
tipc_node_unlock(n_ptr);
goto cont;
}
@@ -1923,6 +1956,12 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
if (link_blocked(l_ptr))
return;
+
+ /* Abort non-RESET send if communication with node is prohibited */
+
+ if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
+ return;
+
msg_set_type(msg, msg_typ);
msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
@@ -2051,9 +2090,19 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
case RESET_MSG:
if (!link_working_unknown(l_ptr) &&
(l_ptr->peer_session != INVALID_SESSION)) {
- if (msg_session(msg) == l_ptr->peer_session)
- break; /* duplicate: ignore */
+ if (less_eq(msg_session(msg), l_ptr->peer_session))
+ break; /* duplicate or old reset: ignore */
+ }
+
+ if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
+ link_working_unknown(l_ptr))) {
+ /*
+ * peer has lost contact -- don't allow peer's links
+ * to reactivate before we recognize loss & clean up
+ */
+ l_ptr->owner->block_setup = WAIT_NODE_DOWN;
}
+
/* fall thru' */
case ACTIVATE_MSG:
/* Update link settings according other endpoint's values */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 74fbecab1ea..e56cb532913 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -223,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
void tipc_link_reset(struct link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
+void tipc_link_send_names(struct list_head *message_list, u32 dest);
int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
int tipc_link_send_sections_fast(struct tipc_port *sender,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index cd356e50433..b7ca1bd7b15 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -173,18 +173,40 @@ void tipc_named_withdraw(struct publication *publ)
* tipc_named_node_up - tell specified node about all publications by this node
*/
-void tipc_named_node_up(unsigned long node)
+void tipc_named_node_up(unsigned long nodearg)
{
+ struct tipc_node *n_ptr;
+ struct link *l_ptr;
struct publication *publ;
struct distr_item *item = NULL;
struct sk_buff *buf = NULL;
+ struct list_head message_list;
+ u32 node = (u32)nodearg;
u32 left = 0;
u32 rest;
- u32 max_item_buf;
+ u32 max_item_buf = 0;
+
+ /* compute maximum amount of publication data to send per message */
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find(node);
+ if (n_ptr) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[0];
+ if (l_ptr)
+ max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ tipc_node_unlock(n_ptr);
+ }
+ read_unlock_bh(&tipc_net_lock);
+ if (!max_item_buf)
+ return;
+
+ /* create list of publication messages, then send them as a unit */
+
+ INIT_LIST_HEAD(&message_list);
read_lock_bh(&tipc_nametbl_lock);
- max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
- max_item_buf *= ITEM_SIZE;
rest = publ_cnt * ITEM_SIZE;
list_for_each_entry(publ, &publ_root, local_list) {
@@ -202,13 +224,14 @@ void tipc_named_node_up(unsigned long node)
item++;
left -= ITEM_SIZE;
if (!left) {
- msg_set_link_selector(buf_msg(buf), node);
- tipc_link_send(buf, node, node);
+ list_add_tail((struct list_head *)buf, &message_list);
buf = NULL;
}
}
exit:
read_unlock_bh(&tipc_nametbl_lock);
+
+ tipc_link_send_names(&message_list, (u32)node);
}
/**
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 68b3dd63729..fafef6c3c0f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -141,17 +141,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
return;
msg = buf_msg(buf);
- msg_incr_reroute_cnt(msg);
- if (msg_reroute_cnt(msg) > 6) {
- if (msg_errcode(msg)) {
- buf_discard(buf);
- } else {
- tipc_reject_msg(buf, msg_destport(msg) ?
- TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
- }
- return;
- }
-
/* Handle message for this node */
dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
if (tipc_in_scope(dnode, tipc_own_addr)) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2d106ef4fa4..27b4bb0cca6 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -112,6 +112,7 @@ struct tipc_node *tipc_node_create(u32 addr)
break;
}
list_add_tail(&n_ptr->list, &temp_node->list);
+ n_ptr->block_setup = WAIT_PEER_DOWN;
tipc_num_nodes++;
@@ -312,7 +313,7 @@ static void node_established_contact(struct tipc_node *n_ptr)
}
}
-static void node_cleanup_finished(unsigned long node_addr)
+static void node_name_purge_complete(unsigned long node_addr)
{
struct tipc_node *n_ptr;
@@ -320,7 +321,7 @@ static void node_cleanup_finished(unsigned long node_addr)
n_ptr = tipc_node_find(node_addr);
if (n_ptr) {
tipc_node_lock(n_ptr);
- n_ptr->cleanup_required = 0;
+ n_ptr->block_setup &= ~WAIT_NAMES_GONE;
tipc_node_unlock(n_ptr);
}
read_unlock_bh(&tipc_net_lock);
@@ -331,28 +332,32 @@ static void node_lost_contact(struct tipc_node *n_ptr)
char addr_string[16];
u32 i;
- /* Clean up broadcast reception remains */
- n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
- while (n_ptr->bclink.deferred_head) {
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
- n_ptr->bclink.deferred_head = buf->next;
- buf_discard(buf);
- }
- if (n_ptr->bclink.defragm) {
- buf_discard(n_ptr->bclink.defragm);
- n_ptr->bclink.defragm = NULL;
- }
+ info("Lost contact with %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
+
+ /* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.supported) {
+ n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
+ while (n_ptr->bclink.deferred_head) {
+ struct sk_buff *buf = n_ptr->bclink.deferred_head;
+ n_ptr->bclink.deferred_head = buf->next;
+ buf_discard(buf);
+ }
+
+ if (n_ptr->bclink.defragm) {
+ buf_discard(n_ptr->bclink.defragm);
+ n_ptr->bclink.defragm = NULL;
+ }
+
+ tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
tipc_bclink_acknowledge(n_ptr,
mod(n_ptr->bclink.acked + 10000));
- tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
if (n_ptr->addr < tipc_own_addr)
tipc_own_tag--;
- }
- info("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ n_ptr->bclink.supported = 0;
+ }
/* Abort link changeover */
for (i = 0; i < MAX_BEARERS; i++) {
@@ -367,10 +372,10 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Notify subscribers */
tipc_nodesub_notify(n_ptr);
- /* Prevent re-contact with node until all cleanup is done */
+ /* Prevent re-contact with node until cleanup is done */
- n_ptr->cleanup_required = 1;
- tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
+ n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
+ tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
}
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5c61afc7a0b..4f15cb40aaa 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,6 +42,12 @@
#include "net.h"
#include "bearer.h"
+/* Flags used to block (re)establishment of contact with a neighboring node */
+
+#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
+#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
+#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
+
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
@@ -52,7 +58,7 @@
* @active_links: pointers to active links to node
* @links: pointers to all links to node
* @working_links: number of working links to node (both active and standby)
- * @cleanup_required: non-zero if cleaning up after a prior loss of contact
+ * @block_setup: bit mask of conditions preventing link establishment to node
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @bclink: broadcast-related info
@@ -77,7 +83,7 @@ struct tipc_node {
struct link *links[MAX_BEARERS];
int link_cnt;
int working_links;
- int cleanup_required;
+ int block_setup;
int permit_changeover;
struct {
int supported;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index adb2eff4a10..9440a3d48ca 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -49,7 +49,7 @@ struct tipc_sock {
struct sock sk;
struct tipc_port *p;
struct tipc_portid peer_name;
- long conn_timeout;
+ unsigned int conn_timeout;
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -231,7 +231,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
sock_init_data(sock, sk);
sk->sk_backlog_rcv = backlog_rcv;
tipc_sk(sk)->p = tp_ptr;
- tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
+ tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
spin_unlock_bh(tp_ptr->lock);
@@ -525,6 +525,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
int needs_conn;
+ long timeout_val;
int res = -EINVAL;
if (unlikely(!dest))
@@ -564,6 +565,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
reject_rx_queue(sk);
}
+ timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
res = dest_name_check(dest, m);
@@ -600,16 +603,14 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
sock->state = SS_CONNECTING;
break;
}
- if (m->msg_flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
+ if (timeout_val <= 0L) {
+ res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- !tport->congested);
+ timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
+ !tport->congested, timeout_val);
lock_sock(sk);
- if (res)
- break;
} while (1);
exit:
@@ -636,6 +637,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+ long timeout_val;
int res;
/* Handle implied connection establishment */
@@ -650,6 +652,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
if (iocb)
lock_sock(sk);
+ timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+
do {
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_DISCONNECTING)
@@ -663,16 +667,14 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
total_len);
if (likely(res != -ELINKCONG))
break;
- if (m->msg_flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
+ if (timeout_val <= 0L) {
+ res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- (!tport->congested || !tport->connected));
+ timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
+ (!tport->congested || !tport->connected), timeout_val);
lock_sock(sk);
- if (res)
- break;
} while (1);
if (iocb)
@@ -1369,7 +1371,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
struct msghdr m = {NULL,};
struct sk_buff *buf;
struct tipc_msg *msg;
- long timeout;
+ unsigned int timeout;
int res;
lock_sock(sk);
@@ -1434,7 +1436,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
res = wait_event_interruptible_timeout(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state != SS_CONNECTING)),
- timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
+ timeout ? (long)msecs_to_jiffies(timeout)
+ : MAX_SCHEDULE_TIMEOUT);
lock_sock(sk);
if (res > 0) {
@@ -1480,9 +1483,7 @@ static int listen(struct socket *sock, int len)
lock_sock(sk);
- if (sock->state == SS_READY)
- res = -EOPNOTSUPP;
- else if (sock->state != SS_UNCONNECTED)
+ if (sock->state != SS_UNCONNECTED)
res = -EINVAL;
else {
sock->state = SS_LISTENING;
@@ -1510,10 +1511,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
lock_sock(sk);
- if (sock->state == SS_READY) {
- res = -EOPNOTSUPP;
- goto exit;
- }
if (sock->state != SS_LISTENING) {
res = -EINVAL;
goto exit;
@@ -1696,7 +1693,7 @@ static int setsockopt(struct socket *sock,
res = tipc_set_portunreturnable(tport->ref, value);
break;
case TIPC_CONN_TIMEOUT:
- tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
+ tipc_sk(sk)->conn_timeout = value;
/* no need to set "res", since already 0 at this point */
break;
default:
@@ -1752,7 +1749,7 @@ static int getsockopt(struct socket *sock,
res = tipc_portunreturnable(tport->ref, &value);
break;
case TIPC_CONN_TIMEOUT:
- value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
+ value = tipc_sk(sk)->conn_timeout;
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
@@ -1790,11 +1787,11 @@ static const struct proto_ops msg_ops = {
.bind = bind,
.connect = connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
+ .accept = sock_no_accept,
.getname = get_name,
.poll = poll,
.ioctl = sock_no_ioctl,
- .listen = listen,
+ .listen = sock_no_listen,
.shutdown = shutdown,
.setsockopt = setsockopt,
.getsockopt = getsockopt,
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 6cf72686348..198371723b4 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -151,7 +151,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
if (!must && !(sub->filter & TIPC_SUB_PORTS))
return;
- sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
+ subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
}
/**
@@ -365,7 +365,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
subscr_terminate(subscriber);
return NULL;
}
- sub->event_cb = subscr_send_event;
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
sub->server_ref = subscriber->port_ref;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d20..4b06ef6f840 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -39,16 +39,11 @@
struct subscription;
-typedef void (*tipc_subscr_event) (struct subscription *sub,
- u32 found_lower, u32 found_upper,
- u32 event, u32 port_ref, u32 node);
-
/**
* struct subscription - TIPC network topology subscription object
* @seq: name sequence associated with subscription
* @timeout: duration of subscription (in ms)
* @filter: event filtering to be done for subscription
- * @event_cb: routine invoked when a subscription event is detected
* @timer: timer governing subscription duration (optional)
* @nameseq_list: adjacent subscriptions in name sequence's subscription list
* @subscription_list: adjacent subscriptions in subscriber's subscription list
@@ -61,7 +56,6 @@ struct subscription {
struct tipc_name_seq seq;
u32 timeout;
u32 filter;
- tipc_subscr_event event_cb;
struct timer_list timer;
struct list_head nameseq_list;
struct list_head subscription_list;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ec68e1c05b8..466fbcc5cf7 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1381,8 +1381,10 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
+
UNIXCB(skb).pid = get_pid(scm->pid);
- UNIXCB(skb).cred = get_cred(scm->cred);
+ if (scm->cred)
+ UNIXCB(skb).cred = get_cred(scm->cred);
UNIXCB(skb).fp = NULL;
if (scm->fp && send_fds)
err = unix_attach_fds(scm, skb);
@@ -1392,6 +1394,24 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
}
/*
+ * Some apps rely on write() giving SCM_CREDENTIALS
+ * We include credentials if source or destination socket
+ * asserted SOCK_PASSCRED.
+ */
+static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
+ const struct sock *other)
+{
+ if (UNIXCB(skb).cred)
+ return;
+ if (test_bit(SOCK_PASSCRED, &sock->flags) ||
+ !other->sk_socket ||
+ test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+ UNIXCB(skb).pid = get_pid(task_tgid(current));
+ UNIXCB(skb).cred = get_current_cred();
+ }
+}
+
+/*
* Send AF_UNIX data.
*/
@@ -1538,6 +1558,7 @@ restart:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
+ maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
@@ -1652,6 +1673,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
+ maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c14865172da..220f3bd176f 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -582,7 +582,7 @@ int wiphy_register(struct wiphy *wiphy)
}
/* set up regulatory info */
- wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
+ regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE);
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8672e028022..b9ec3061ed7 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -279,8 +279,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
char *newname);
void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
-void wiphy_update_regulatory(struct wiphy *wiphy,
- enum nl80211_reg_initiator setby);
void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
void cfg80211_bss_age(struct cfg80211_registered_device *dev,
@@ -377,7 +375,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie);
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie);
/* SME */
int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -408,6 +407,7 @@ void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
/* internal helpers */
+bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr);
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 3268fac5ab2..a55c27b75ee 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -41,6 +41,11 @@ struct lib80211_crypto_alg {
static LIST_HEAD(lib80211_crypto_algs);
static DEFINE_SPINLOCK(lib80211_crypto_lock);
+static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
+ int force);
+static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
+static void lib80211_crypt_deinit_handler(unsigned long data);
+
const char *print_ssid(char *buf, const char *ssid, u8 ssid_len)
{
const char *s = ssid;
@@ -111,7 +116,8 @@ void lib80211_crypt_info_free(struct lib80211_crypt_info *info)
}
EXPORT_SYMBOL(lib80211_crypt_info_free);
-void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
+static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
+ int force)
{
struct lib80211_crypt_data *entry, *next;
unsigned long flags;
@@ -131,10 +137,9 @@ void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
}
spin_unlock_irqrestore(info->lock, flags);
}
-EXPORT_SYMBOL(lib80211_crypt_deinit_entries);
/* After this, crypt_deinit_list won't accept new members */
-void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
+static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
{
unsigned long flags;
@@ -142,9 +147,8 @@ void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
info->crypt_quiesced = 1;
spin_unlock_irqrestore(info->lock, flags);
}
-EXPORT_SYMBOL(lib80211_crypt_quiescing);
-void lib80211_crypt_deinit_handler(unsigned long data)
+static void lib80211_crypt_deinit_handler(unsigned long data)
{
struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
@@ -160,7 +164,6 @@ void lib80211_crypt_deinit_handler(unsigned long data)
}
spin_unlock_irqrestore(info->lock, flags);
}
-EXPORT_SYMBOL(lib80211_crypt_deinit_handler);
void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt)
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index dacb3b4b1bd..755738d26bb 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -77,8 +77,6 @@ static void *lib80211_ccmp_init(int key_idx)
priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
- printk(KERN_DEBUG "lib80211_crypt_ccmp: could not allocate "
- "crypto API aes\n");
priv->tfm = NULL;
goto fail;
}
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 7ea4f2b0770..38734846c19 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -101,7 +101,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
priv->tx_tfm_arc4 = NULL;
goto fail;
}
@@ -109,7 +108,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_michael)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
priv->tx_tfm_michael = NULL;
goto fail;
}
@@ -117,7 +115,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
priv->rx_tfm_arc4 = NULL;
goto fail;
}
@@ -125,7 +122,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_michael)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
priv->rx_tfm_michael = NULL;
goto fail;
}
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index 2f265e033ae..c1304018fc1 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -50,16 +50,12 @@ static void *lib80211_wep_init(int keyidx)
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
- printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
- printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
priv->rx_tfm = NULL;
goto fail;
}
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 5c116083eec..4423e64c7d9 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -12,6 +12,7 @@
#define MESH_HOLD_T 100
#define MESH_PATH_TIMEOUT 5000
+#define MESH_RANN_INTERVAL 5000
/*
* Minimum interval between two consecutive PREQs originated by the same
@@ -49,6 +50,8 @@ const struct mesh_config default_mesh_config = {
.dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
.path_refresh_time = MESH_PATH_REFRESH_TIME,
.min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
+ .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
+ .dot11MeshGateAnnouncementProtocol = false,
};
const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 832f6574e4e..21fc9702f81 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -900,7 +900,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const struct ieee80211_mgmt *mgmt;
@@ -991,7 +992,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
/* Transmit the Action frame as requested by user space */
return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
channel_type, channel_type_valid,
- wait, buf, len, cookie);
+ wait, buf, len, no_cck, cookie);
}
bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1095,3 +1096,14 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
}
EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
+
+void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
+}
+EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ea40d540a99..48260c2d092 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -23,6 +23,12 @@
#include "nl80211.h"
#include "reg.h"
+static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type);
+static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_crypto_settings *settings,
+ int cipher_limit);
+
static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info);
static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
@@ -178,6 +184,19 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
+ [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
+ [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
+ [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG },
+ [NL80211_ATTR_TDLS_ACTION] = { .type = NLA_U8 },
+ [NL80211_ATTR_TDLS_DIALOG_TOKEN] = { .type = NLA_U8 },
+ [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
+ [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -220,6 +239,12 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
[NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
};
+static const struct nla_policy
+nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
+ [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_SSID_LEN },
+};
+
/* ifidx get helper */
static int nl80211_get_ifidx(struct netlink_callback *cb)
{
@@ -703,11 +728,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.max_scan_ie_len);
NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
dev->wiphy.max_sched_scan_ie_len);
+ NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+ dev->wiphy.max_match_sets);
if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
+ if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
+ if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
+ if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
+ if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -850,6 +885,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
}
CMD(set_channel, SET_CHANNEL);
CMD(set_wds_peer, SET_WDS_PEER);
+ if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ CMD(tdls_mgmt, TDLS_MGMT);
+ CMD(tdls_oper, TDLS_OPER);
+ }
if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
CMD(sched_scan_start, START_SCHED_SCAN);
@@ -871,8 +910,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
dev->wiphy.max_remain_on_channel_duration);
- /* for now at least assume all drivers have it */
- if (dev->ops->mgmt_tx)
+ if (dev->ops->mgmt_tx_cancel_wait)
NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
if (mgmt_stypes) {
@@ -1210,6 +1248,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
}
+ if (!netdev) {
+ result = -EINVAL;
+ goto bad_res;
+ }
+
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
rem_txq_params) {
@@ -1222,6 +1265,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
result = rdev->ops->set_txq_params(&rdev->wiphy,
+ netdev,
&txq_params);
if (result)
goto bad_res;
@@ -1985,7 +2029,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
struct beacon_parameters params;
int haveinfo = 0, err;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]))
+ if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
+ !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
+ !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
+ !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
return -EINVAL;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
@@ -2011,6 +2058,49 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ /*
+ * In theory, some of these attributes could be required for
+ * NEW_BEACON, but since they were not used when the command was
+ * originally added, keep them optional for old user space
+ * programs to work with drivers that do not need the additional
+ * information.
+ */
+ if (info->attrs[NL80211_ATTR_SSID]) {
+ params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ params.ssid_len =
+ nla_len(info->attrs[NL80211_ATTR_SSID]);
+ if (params.ssid_len == 0 ||
+ params.ssid_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
+ }
+
+ if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
+ params.hidden_ssid = nla_get_u32(
+ info->attrs[NL80211_ATTR_HIDDEN_SSID]);
+ if (params.hidden_ssid !=
+ NL80211_HIDDEN_SSID_NOT_IN_USE &&
+ params.hidden_ssid !=
+ NL80211_HIDDEN_SSID_ZERO_LEN &&
+ params.hidden_ssid !=
+ NL80211_HIDDEN_SSID_ZERO_CONTENTS)
+ return -EINVAL;
+ }
+
+ params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
+
+ if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
+ params.auth_type = nla_get_u32(
+ info->attrs[NL80211_ATTR_AUTH_TYPE]);
+ if (!nl80211_valid_auth_type(params.auth_type))
+ return -EINVAL;
+ } else
+ params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+
+ err = nl80211_crypto_settings(rdev, info, &params.crypto,
+ NL80211_MAX_NR_CIPHER_SUITES);
+ if (err)
+ return err;
+
call = rdev->ops->add_beacon;
break;
case NL80211_CMD_SET_BEACON:
@@ -2041,6 +2131,25 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
if (!haveinfo)
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_IE]) {
+ params.beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
+ params.beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
+ params.proberesp_ies =
+ nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
+ params.proberesp_ies_len =
+ nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
+ }
+
+ if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
+ params.assocresp_ies =
+ nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
+ params.assocresp_ies_len =
+ nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
+ }
+
err = call(&rdev->wiphy, dev, &params);
if (!err && params.interval)
wdev->beacon_interval = params.interval;
@@ -2235,8 +2344,16 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
nla_nest_end(msg, bss_param);
}
+ if (sinfo->filled & STATION_INFO_STA_FLAGS)
+ NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
+ sizeof(struct nl80211_sta_flag_update),
+ &sinfo->sta_flags);
nla_nest_end(msg, sinfoattr);
+ if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
+ NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+ sinfo->assoc_req_ies);
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -2264,6 +2381,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
}
while (1) {
+ memset(&sinfo, 0, sizeof(sinfo));
err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
mac_addr, &sinfo);
if (err == -ENOENT)
@@ -2416,18 +2534,25 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- /* disallow everything but AUTHORIZED flag */
+ /* disallow things sta doesn't support */
if (params.plink_action)
err = -EINVAL;
if (params.vlan)
err = -EINVAL;
- if (params.supported_rates)
+ if (params.supported_rates &&
+ !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
err = -EINVAL;
if (params.ht_capa)
err = -EINVAL;
if (params.listen_interval >= 0)
err = -EINVAL;
- if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+ if (params.sta_flags_mask &
+ ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
+ BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ err = -EINVAL;
+ /* can't change the TDLS bit */
+ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
@@ -2465,6 +2590,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static struct nla_policy
+nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
+ [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
+ [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
+};
+
static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2510,10 +2641,50 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (parse_station_flags(info, &params))
return -EINVAL;
+ /* parse WME attributes if sta is WME capable */
+ if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+ (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
+ info->attrs[NL80211_ATTR_STA_WME]) {
+ struct nlattr *tb[NL80211_STA_WME_MAX + 1];
+ struct nlattr *nla;
+
+ nla = info->attrs[NL80211_ATTR_STA_WME];
+ err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
+ nl80211_sta_wme_policy);
+ if (err)
+ return err;
+
+ if (tb[NL80211_STA_WME_UAPSD_QUEUES])
+ params.uapsd_queues =
+ nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
+ if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ return -EINVAL;
+
+ if (tb[NL80211_STA_WME_MAX_SP])
+ params.max_sp =
+ nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
+
+ if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+ return -EINVAL;
+
+ params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+ }
+
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+ /*
+ * Only managed stations can add TDLS peers, and only when the
+ * wiphy supports external TDLS setup.
+ */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+ !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+ (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
return -EINVAL;
err = get_vlan(info, rdev, &params.vlan);
@@ -2955,6 +3126,10 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
cur_params.dot11MeshHWMPnetDiameterTraversalTime);
NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
cur_params.dot11MeshHWMPRootMode);
+ NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ cur_params.dot11MeshHWMPRannInterval);
+ NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ cur_params.dot11MeshGateAnnouncementProtocol);
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -2982,6 +3157,9 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
[NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
+ [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
};
static const struct nla_policy
@@ -3060,6 +3238,14 @@ do {\
dot11MeshHWMPRootMode, mask,
NL80211_MESHCONF_HWMP_ROOTMODE,
nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshHWMPRannInterval, mask,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshGateAnnouncementProtocol, mask,
+ NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ nla_get_u8);
if (mask_out)
*mask_out = mask;
@@ -3477,6 +3663,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
}
}
+ request->no_cck =
+ nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+
request->dev = dev;
request->wiphy = &rdev->wiphy;
@@ -3503,10 +3692,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
struct net_device *dev = info->user_ptr[1];
struct nlattr *attr;
struct wiphy *wiphy;
- int err, tmp, n_ssids = 0, n_channels, i;
+ int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
u32 interval;
enum ieee80211_band band;
size_t ie_len;
+ struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_start)
@@ -3545,6 +3735,15 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (n_ssids > wiphy->max_sched_scan_ssids)
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+ nla_for_each_nested(attr,
+ info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ tmp)
+ n_match_sets++;
+
+ if (n_match_sets > wiphy->max_match_sets)
+ return -EINVAL;
+
if (info->attrs[NL80211_ATTR_IE])
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
else
@@ -3562,6 +3761,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ + sizeof(*request->match_sets) * n_match_sets
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request) {
@@ -3579,6 +3779,18 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request->ie = (void *)(request->channels + n_channels);
}
+ if (n_match_sets) {
+ if (request->ie)
+ request->match_sets = (void *)(request->ie + ie_len);
+ else if (request->ssids)
+ request->match_sets =
+ (void *)(request->ssids + n_ssids);
+ else
+ request->match_sets =
+ (void *)(request->channels + n_channels);
+ }
+ request->n_match_sets = n_match_sets;
+
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
@@ -3643,6 +3855,31 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
}
}
+ i = 0;
+ if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+ nla_for_each_nested(attr,
+ info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ tmp) {
+ struct nlattr *ssid;
+
+ nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_match_policy);
+ ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
+ if (ssid) {
+ if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ memcpy(request->match_sets[i].ssid.ssid,
+ nla_data(ssid), nla_len(ssid));
+ request->match_sets[i].ssid.ssid_len =
+ nla_len(ssid);
+ }
+ i++;
+ }
+ }
+
if (info->attrs[NL80211_ATTR_IE]) {
request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
memcpy((void *)request->ie,
@@ -3935,22 +4172,6 @@ static bool nl80211_valid_wpa_versions(u32 wpa_versions)
NL80211_WPA_VERSION_2));
}
-static bool nl80211_valid_akm_suite(u32 akm)
-{
- return akm == WLAN_AKM_SUITE_8021X ||
- akm == WLAN_AKM_SUITE_PSK;
-}
-
-static bool nl80211_valid_cipher_suite(u32 cipher)
-{
- return cipher == WLAN_CIPHER_SUITE_WEP40 ||
- cipher == WLAN_CIPHER_SUITE_WEP104 ||
- cipher == WLAN_CIPHER_SUITE_TKIP ||
- cipher == WLAN_CIPHER_SUITE_CCMP ||
- cipher == WLAN_CIPHER_SUITE_AES_CMAC;
-}
-
-
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4083,7 +4304,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
memcpy(settings->ciphers_pairwise, data, len);
for (i = 0; i < settings->n_ciphers_pairwise; i++)
- if (!nl80211_valid_cipher_suite(
+ if (!cfg80211_supported_cipher_suite(
+ &rdev->wiphy,
settings->ciphers_pairwise[i]))
return -EINVAL;
}
@@ -4091,7 +4313,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) {
settings->cipher_group =
nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]);
- if (!nl80211_valid_cipher_suite(settings->cipher_group))
+ if (!cfg80211_supported_cipher_suite(&rdev->wiphy,
+ settings->cipher_group))
return -EINVAL;
}
@@ -4104,7 +4327,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
- int len, i;
+ int len;
data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]);
len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]);
@@ -4117,10 +4340,6 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
memcpy(settings->akm_suites, data, len);
-
- for (i = 0; i < settings->n_akm_suites; i++)
- if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
- return -EINVAL;
}
return 0;
@@ -4339,8 +4558,12 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
wiphy = &rdev->wiphy;
- if (info->attrs[NL80211_ATTR_MAC])
+ if (info->attrs[NL80211_ATTR_MAC]) {
ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (!is_valid_ether_addr(ibss.bssid))
+ return -EINVAL;
+ }
ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -4777,6 +5000,57 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
}
+static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ u8 action_code, dialog_token;
+ u16 status_code;
+ u8 *peer;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !rdev->ops->tdls_mgmt)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_TDLS_ACTION] ||
+ !info->attrs[NL80211_ATTR_STATUS_CODE] ||
+ !info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN] ||
+ !info->attrs[NL80211_ATTR_IE] ||
+ !info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
+ status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
+ dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
+
+ return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
+ dialog_token, status_code,
+ nla_data(info->attrs[NL80211_ATTR_IE]),
+ nla_len(info->attrs[NL80211_ATTR_IE]));
+}
+
+static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ enum nl80211_tdls_operation operation;
+ u8 *peer;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !rdev->ops->tdls_oper)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_TDLS_OPERATION] ||
+ !info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]);
+ peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation);
+}
+
static int nl80211_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
@@ -4997,6 +5271,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
unsigned int wait = 0;
bool offchan;
+ bool no_cck;
if (!info->attrs[NL80211_ATTR_FRAME] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5033,6 +5308,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
+ no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
chan = rdev_freq_to_chan(rdev, freq, channel_type);
if (chan == NULL)
@@ -5053,7 +5330,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
- &cookie);
+ no_cck, &cookie);
if (err)
goto free_msg;
@@ -6089,6 +6366,22 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_TDLS_MGMT,
+ .doit = nl80211_tdls_mgmt,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_TDLS_OPER,
+ .doit = nl80211_tdls_oper,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7078,6 +7371,52 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *attr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+
+ attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
+ if (!attr)
+ goto nla_put_failure;
+
+ NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
+ NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
+ if (preauth)
+ NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
+
+ nla_nest_end(msg, attr);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
void
nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 5d69c56400a..f24a1fbeaf1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -113,4 +113,8 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
+void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 68a471ba193..2520a1b7e7d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -49,10 +49,8 @@
#include "nl80211.h"
#ifdef CONFIG_CFG80211_REG_DEBUG
-#define REG_DBG_PRINT(format, args...) \
- do { \
- printk(KERN_DEBUG pr_fmt(format), ##args); \
- } while (0)
+#define REG_DBG_PRINT(format, args...) \
+ printk(KERN_DEBUG pr_fmt(format), ##args)
#else
#define REG_DBG_PRINT(args...)
#endif
@@ -753,9 +751,10 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
- REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n",
+ REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
+ freq_range->max_bandwidth_khz,
max_antenna_gain,
power_rule->max_eirp);
}
@@ -891,7 +890,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver uses its own custom "
- "regulatory domain ",
+ "regulatory domain\n",
reg_initiator_name(initiator));
return true;
}
@@ -905,7 +904,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
!is_world_regdom(last_request->alpha2)) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver requires its own regulatory "
- "domain to be set first",
+ "domain to be set first\n",
reg_initiator_name(initiator));
return true;
}
@@ -913,14 +912,6 @@ static bool ignore_reg_update(struct wiphy *wiphy,
return false;
}
-static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
-{
- struct cfg80211_registered_device *rdev;
-
- list_for_each_entry(rdev, &cfg80211_rdev_list, list)
- wiphy_update_regulatory(&rdev->wiphy, initiator);
-}
-
static void handle_reg_beacon(struct wiphy *wiphy,
unsigned int chan_idx,
struct reg_beacon *reg_beacon)
@@ -1120,11 +1111,13 @@ static void reg_process_ht_flags(struct wiphy *wiphy)
}
-void wiphy_update_regulatory(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static void wiphy_update_regulatory(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
+ assert_reg_lock();
+
if (ignore_reg_update(wiphy, initiator))
return;
@@ -1139,6 +1132,22 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
wiphy->reg_notifier(wiphy, last_request);
}
+void regulatory_update(struct wiphy *wiphy,
+ enum nl80211_reg_initiator setby)
+{
+ mutex_lock(&reg_mutex);
+ wiphy_update_regulatory(wiphy, setby);
+ mutex_unlock(&reg_mutex);
+}
+
+static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
+{
+ struct cfg80211_registered_device *rdev;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+ wiphy_update_regulatory(&rdev->wiphy, initiator);
+}
+
static void handle_channel_custom(struct wiphy *wiphy,
enum ieee80211_band band,
unsigned int chan_idx,
@@ -1475,7 +1484,7 @@ static void reg_process_pending_hints(void)
/* When last_request->processed becomes true this will be rescheduled */
if (last_request && !last_request->processed) {
REG_DBG_PRINT("Pending regulatory request, waiting "
- "for it to be processed...");
+ "for it to be processed...\n");
goto out;
}
@@ -2188,7 +2197,7 @@ out:
static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
- "restoring regulatory settings");
+ "restoring regulatory settings\n");
restore_regulatory_settings(true);
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b67d1c3a2fb..4a56799d868 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -16,6 +16,8 @@ void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
+void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby);
+
/**
* regulatory_hint_found_beacon - hints a beacon was found on a channel
* @wiphy: the wireless device where the beacon was found on
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2936cb80915..0fb14241040 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <net/arp.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include <net/iw_handler.h>
#include "core.h"
#include "nl80211.h"
@@ -227,6 +228,33 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
}
EXPORT_SYMBOL(cfg80211_find_ie);
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+ const u8 *ies, int len)
+{
+ struct ieee80211_vendor_ie *ie;
+ const u8 *pos = ies, *end = ies + len;
+ int ie_oui;
+
+ while (pos < end) {
+ pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
+ end - pos);
+ if (!pos)
+ return NULL;
+
+ if (end - pos < sizeof(*ie))
+ return NULL;
+
+ ie = (struct ieee80211_vendor_ie *)pos;
+ ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
+ if (ie_oui == oui && ie->oui_type == oui_type)
+ return pos;
+
+ pos += 2 + ie->len;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cfg80211_find_vendor_ie);
+
static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
{
const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dec0fa28372..6e86d5acf14 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -110,17 +110,22 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
else {
int i = 0, j;
enum ieee80211_band band;
+ struct ieee80211_supported_band *bands;
+ struct ieee80211_channel *channel;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (!wdev->wiphy->bands[band])
+ bands = wdev->wiphy->bands[band];
+ if (!bands)
continue;
- for (j = 0; j < wdev->wiphy->bands[band]->n_channels;
- i++, j++)
- request->channels[i] =
- &wdev->wiphy->bands[band]->channels[j];
- request->rates[band] =
- (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
+ for (j = 0; j < bands->n_channels; j++) {
+ channel = &bands->channels[j];
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+ request->channels[i++] = channel;
+ }
+ request->rates[band] = (1 << bands->n_bitrates) - 1;
}
+ n_channels = i;
}
request->n_channels = n_channels;
request->ssids = (void *)&request->channels[n_channels];
diff --git a/net/wireless/util.c b/net/wireless/util.c
index be75a3a0424..2f178f73943 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
+#include <linux/crc32.h>
#include <net/cfg80211.h>
#include <net/ip.h>
#include "core.h"
@@ -150,12 +151,19 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
set_mandatory_flags_band(wiphy->bands[band], band);
}
+bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
+{
+ int i;
+ for (i = 0; i < wiphy->n_cipher_suites; i++)
+ if (cipher == wiphy->cipher_suites[i])
+ return true;
+ return false;
+}
+
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr)
{
- int i;
-
if (key_idx > 5)
return -EINVAL;
@@ -225,10 +233,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
}
}
- for (i = 0; i < rdev->wiphy.n_cipher_suites; i++)
- if (params->cipher == rdev->wiphy.cipher_suites[i])
- break;
- if (i == rdev->wiphy.n_cipher_suites)
+ if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher))
return -EINVAL;
return 0;
@@ -391,8 +396,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
}
break;
case cpu_to_le16(0):
- if (iftype != NL80211_IFTYPE_ADHOC)
- return -1;
+ if (iftype != NL80211_IFTYPE_ADHOC &&
+ iftype != NL80211_IFTYPE_STATION)
+ return -1;
break;
}
@@ -512,10 +518,9 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
if (head_need)
skb_orphan(skb);
- if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
- pr_err("failed to reallocate Tx buffer\n");
+ if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
return -ENOMEM;
- }
+
skb->truesize += head_need;
}
@@ -1044,3 +1049,170 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
return 0;
}
+
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ u64 filter, u32 crc)
+{
+ size_t left = len;
+ u8 *pos = start;
+ bool calc_crc = filter != 0;
+
+ memset(elems, 0, sizeof(*elems));
+ elems->ie_start = start;
+ elems->total_len = len;
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left)
+ break;
+
+ if (calc_crc && id < 64 && (filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ }
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (pos[3] == 1) {
+ /* OUI Type 1 - WPA IE */
+ elems->wpa = pos;
+ elems->wpa_len = elen;
+ } else if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ break;
+ case WLAN_EID_HT_INFORMATION:
+ if (elen >= sizeof(struct ieee80211_ht_info))
+ elems->ht_info_elem = (void *)pos;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ elems->ch_switch_elem = pos;
+ elems->ch_switch_elem_len = elen;
+ break;
+ case WLAN_EID_QUIET:
+ if (!elems->quiet_elem) {
+ elems->quiet_elem = pos;
+ elems->quiet_elem_len = elen;
+ }
+ elems->num_of_quiet_elem++;
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ elems->pwr_constr_elem = pos;
+ elems->pwr_constr_elem_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ default:
+ break;
+ }
+
+ left -= elen;
+ pos += elen;
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0bf169bb770..62f121d1d9c 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "core.h"
@@ -363,9 +364,9 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
-int cfg80211_wext_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *retry, char *extra)
+static int cfg80211_wext_siwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -402,7 +403,6 @@ int cfg80211_wext_siwretry(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry);
int cfg80211_wext_giwretry(struct net_device *dev,
struct iw_request_info *info,
@@ -593,9 +593,9 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
return err;
}
-int cfg80211_wext_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf)
+static int cfg80211_wext_siwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -652,11 +652,10 @@ int cfg80211_wext_siwencode(struct net_device *dev,
wdev->wext.default_key == -1,
idx, &params);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode);
-int cfg80211_wext_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *extra)
+static int cfg80211_wext_siwencodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -744,11 +743,10 @@ int cfg80211_wext_siwencodeext(struct net_device *dev,
ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
idx, &params);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
-int cfg80211_wext_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf)
+static int cfg80211_wext_giwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int idx;
@@ -782,11 +780,10 @@ int cfg80211_wext_giwencode(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
-int cfg80211_wext_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wextfreq, char *extra)
+static int cfg80211_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *wextfreq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -815,11 +812,10 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
-int cfg80211_wext_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
+static int cfg80211_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -836,11 +832,10 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
return 0;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq);
-int cfg80211_wext_siwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
+static int cfg80211_wext_siwtxpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -889,11 +884,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
-int cfg80211_wext_giwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
+static int cfg80211_wext_giwtxpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -919,7 +913,6 @@ int cfg80211_wext_giwtxpower(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);
static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
s32 auth_alg)
@@ -1070,9 +1063,9 @@ static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt)
return 0;
}
-int cfg80211_wext_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+static int cfg80211_wext_siwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1102,21 +1095,19 @@ int cfg80211_wext_siwauth(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth);
-int cfg80211_wext_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+static int cfg80211_wext_giwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
/* XXX: what do we need? */
return -EOPNOTSUPP;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth);
-int cfg80211_wext_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra)
+static int cfg80211_wext_siwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1160,11 +1151,10 @@ int cfg80211_wext_siwpower(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower);
-int cfg80211_wext_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra)
+static int cfg80211_wext_giwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1172,7 +1162,6 @@ int cfg80211_wext_giwpower(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower);
static int cfg80211_wds_wext_siwap(struct net_device *dev,
struct iw_request_info *info,
@@ -1218,9 +1207,9 @@ static int cfg80211_wds_wext_giwap(struct net_device *dev,
return 0;
}
-int cfg80211_wext_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra)
+static int cfg80211_wext_siwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1268,11 +1257,10 @@ int cfg80211_wext_siwrate(struct net_device *dev,
return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
-int cfg80211_wext_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra)
+static int cfg80211_wext_giwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1308,10 +1296,9 @@ int cfg80211_wext_giwrate(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate);
/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
-struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
+static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1376,11 +1363,10 @@ struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
return &wstats;
}
-EXPORT_SYMBOL_GPL(cfg80211_wireless_stats);
-int cfg80211_wext_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+static int cfg80211_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1395,11 +1381,10 @@ int cfg80211_wext_siwap(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwap);
-int cfg80211_wext_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+static int cfg80211_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1414,11 +1399,10 @@ int cfg80211_wext_giwap(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwap);
-int cfg80211_wext_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid)
+static int cfg80211_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1431,11 +1415,10 @@ int cfg80211_wext_siwessid(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid);
-int cfg80211_wext_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid)
+static int cfg80211_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1451,11 +1434,10 @@ int cfg80211_wext_giwessid(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid);
-int cfg80211_wext_siwpmksa(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
+static int cfg80211_wext_siwpmksa(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1493,7 +1475,6 @@ int cfg80211_wext_siwpmksa(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwpmksa);
static const iw_handler cfg80211_handlers[] = {
[IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 20b3daef696..5d766b0118e 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -42,6 +42,14 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid);
+int cfg80211_wext_siwmlme(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_siwgenie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+
+
int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 6fffe62d7c2..0d4b8c3033f 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -9,6 +9,7 @@
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "nl80211.h"
@@ -365,7 +366,6 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
wdev_unlock(wdev);
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie);
int cfg80211_wext_siwmlme(struct net_device *dev,
struct iw_request_info *info,
@@ -402,4 +402,3 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d30615419b4..5f03e4ea65b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb,
int needed;
int rc;
- if (skb->len < 1) {
+ if (!pskb_may_pull(skb, 1)) {
/* packet has no address block */
rc = 0;
goto empty;
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
len = *skb->data;
needed = 1 + (len >> 4) + (len & 0x0f);
- if (skb->len < needed) {
+ if (!pskb_may_pull(skb, needed)) {
/* packet is too short to hold the addresses it claims
to hold */
rc = -1;
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
* Found a listening socket, now check the incoming
* call user data vs this sockets call user data
*/
- if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
+ if (x25_sk(s)->cudmatchlength > 0 &&
+ skb->len >= x25_sk(s)->cudmatchlength) {
if((memcmp(x25_sk(s)->calluserdata.cuddata,
skb->data,
x25_sk(s)->cudmatchlength)) == 0) {
@@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
*
* Facilities length is mandatory in call request packets
*/
- if (skb->len < 1)
+ if (!pskb_may_pull(skb, 1))
goto out_clear_request;
len = skb->data[0] + 1;
- if (skb->len < len)
+ if (!pskb_may_pull(skb, len))
goto out_clear_request;
skb_pull(skb,len);
/*
+ * Ensure that the amount of call user data is valid.
+ */
+ if (skb->len > X25_MAX_CUD_LEN)
+ goto out_clear_request;
+
+ /*
+ * Get all the call user data so it can be used in
+ * x25_find_listener and skb_copy_from_linear_data up ahead.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto out_clear_request;
+
+ /*
* Find a listener for the particular address/cud pair.
*/
sk = x25_find_listener(&source_addr,skb);
@@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
* byte of the user data is the logical value of the Q Bit.
*/
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+ if (!pskb_may_pull(skb, 1))
+ goto out_kfree_skb;
+
qbit = skb->data[0];
skb_pull(skb, 1);
}
@@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct x25_sock *x25 = x25_sk(sk);
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
size_t copied;
- int qbit;
+ int qbit, header_len = x25->neighbour->extended ?
+ X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
+
struct sk_buff *skb;
unsigned char *asmptr;
int rc = -ENOTCONN;
@@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
skb = skb_dequeue(&x25->interrupt_in_queue);
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_free_dgram;
+
skb_pull(skb, X25_STD_MIN_LEN);
/*
@@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
goto out;
+ if (!pskb_may_pull(skb, header_len))
+ goto out_free_dgram;
+
qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
- skb_pull(skb, x25->neighbour->extended ?
- X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
+ skb_pull(skb, header_len);
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index e547ca1578c..fa2b41888bd 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
unsigned short frametype;
unsigned int lci;
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ return 0;
+
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
+ if (!pskb_may_pull(skb, 1))
+ return 0;
+
switch (skb->data[0]) {
case X25_IFACE_DATA:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index f77e4e75f91..36384a1fa9f 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -44,7 +44,7 @@
int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
{
- unsigned char *p = skb->data;
+ unsigned char *p;
unsigned int len;
*vc_fac_mask = 0;
@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
- if (skb->len < 1)
+ if (!pskb_may_pull(skb, 1))
return 0;
- len = *p++;
+ len = skb->data[0];
- if (len >= skb->len)
+ if (!pskb_may_pull(skb, 1 + len))
return -1;
+ p = skb->data + 1;
+
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 0b073b51b18..a49cd4ec551 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
/*
* Parse the data in the frame.
*/
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_clear;
skb_pull(skb, X25_STD_MIN_LEN);
len = x25_parse_address_block(skb, &source_addr,
@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
* Copy any Call User Data.
*/
if (skb->len > 0) {
- skb_copy_from_linear_data(skb,
- x25->calluserdata.cuddata,
- skb->len);
+ if (skb->len > X25_MAX_CUD_LEN)
+ goto out_clear;
+
+ skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
+ skb->len);
x25->calluserdata.cudlength = skb->len;
}
if (!sock_flag(sk, SOCK_DEAD))
@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
}
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
break;
@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
switch (frametype) {
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_start_t23timer(sk);
+ return 0;
}
/*
@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return queued;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
}
/*
@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
*/
static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
+ struct x25_sock *x25 = x25_sk(sk);
+
switch (frametype) {
case X25_RESET_REQUEST:
x25_write_internal(sk, X25_RESET_CONFIRMATION);
case X25_RESET_CONFIRMATION: {
- struct x25_sock *x25 = x25_sk(sk);
-
x25_stop_timer(sk);
x25->condition = 0x00;
x25->va = 0;
@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
}
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
}
/* Higher level upcall for a LAPB frame */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 037958ff8ee..4acacf3c661 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
break;
case X25_DIAGNOSTIC:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
+ break;
+
printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
skb->data[3], skb->data[4],
skb->data[5], skb->data[6]);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 24a342ebc7f..5170d52bfd9 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
int *d, int *m)
{
struct x25_sock *x25 = x25_sk(sk);
- unsigned char *frame = skb->data;
+ unsigned char *frame;
+
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
@@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
if (frame[2] == X25_RR ||
frame[2] == X25_RNR ||
frame[2] == X25_REJ) {
+ if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
*nr = (frame[3] >> 1) & 0x7F;
return frame[2];
}
@@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
if (x25->neighbour->extended) {
if ((frame[2] & 0x01) == X25_DATA) {
+ if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
*q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
*d = (frame[0] & X25_D_BIT) == X25_D_BIT;
*m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index fc91ad7ee26..e5246fbe36c 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -70,26 +70,29 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
while ((scratch += len, dlen -= len) > 0) {
skb_frag_t *frag;
+ struct page *page;
err = -EMSGSIZE;
if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
goto out;
frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
- frag->page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(GFP_ATOMIC);
err = -ENOMEM;
- if (!frag->page)
+ if (!page)
goto out;
+ __skb_frag_set_page(frag, page);
+
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
- memcpy(page_address(frag->page), scratch, len);
-
frag->page_offset = 0;
- frag->size = len;
+ skb_frag_size_set(frag, len);
+ memcpy(skb_frag_address(frag), scratch, len);
+
skb->truesize += len;
skb->data_len += len;
skb->len += len;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index b11ea692bd7..6ca357406ea 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -203,8 +203,6 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
if (!replay_esn->replay_window)
return 0;
- pos = (replay_esn->seq - 1) % replay_esn->replay_window;
-
if (unlikely(seq == 0))
goto err;
@@ -216,19 +214,18 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
goto err;
}
- if (pos >= diff) {
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- }
+
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+
return 0;
err_replay:
@@ -259,39 +256,27 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] &= ~(1U << bitnr);
}
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
} else {
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
}
+ bitnr = (pos + diff) % replay_esn->replay_window;
replay_esn->seq = seq;
} else {
diff = replay_esn->seq - seq;
- if (pos >= diff) {
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- }
}
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
@@ -390,8 +375,6 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
if (!wsize)
return 0;
- pos = (replay_esn->seq - 1) % replay_esn->replay_window;
-
if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
(replay_esn->seq < replay_esn->replay_window - 1)))
goto err;
@@ -415,19 +398,18 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
goto err;
}
- if (pos >= diff) {
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- }
+
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+
return 0;
err_replay:
@@ -465,22 +447,13 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] &= ~(1U << bitnr);
}
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
} else {
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
}
+ bitnr = (pos + diff) % replay_esn->replay_window;
replay_esn->seq = seq;
if (unlikely(wrap > 0))
@@ -488,19 +461,16 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
} else {
diff = replay_esn->seq - seq;
- if (pos >= diff) {
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- }
}
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 0256b8a0a7c..d0a42df5160 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
- rcu_assign_pointer(net->xfrm.nlsk, nlsk);
+ RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
return 0;
}
@@ -2935,7 +2935,7 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
- rcu_assign_pointer(net->xfrm.nlsk, NULL);
+ RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->xfrm.nlsk_stash);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 3dfc47134e5..0b3e35c9ef0 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2932,11 +2932,11 @@ sub process {
}
}
if ($level == 0 && $block =~ /^\s*\{/ && !$allowed) {
- my $herectx = $here . "\n";;
+ my $herectx = $here . "\n";
my $cnt = statement_rawlines($block);
for (my $n = 0; $n < $cnt; $n++) {
- $herectx .= raw_line($linenr, $n) . "\n";;
+ $herectx .= raw_line($linenr, $n) . "\n";
}
WARN("BRACES",
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e26e2fb462d..f936d1fa969 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -735,6 +735,27 @@ static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
return 1;
}
+/*
+ * Looks like: vmbus:guid
+ * Each byte of the guid will be represented by two hex characters
+ * in the name.
+ */
+
+static int do_vmbus_entry(const char *filename, struct hv_vmbus_device_id *id,
+ char *alias)
+{
+ int i;
+ char guid_name[((sizeof(id->guid) + 1)) * 2];
+
+ for (i = 0; i < (sizeof(id->guid) * 2); i += 2)
+ sprintf(&guid_name[i], "%02x", id->guid[i/2]);
+
+ strcpy(alias, "vmbus:");
+ strcat(alias, guid_name);
+
+ return 1;
+}
+
/* Looks like: i2c:S */
static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
char *alias)
@@ -994,6 +1015,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
do_table(symval, sym->st_size,
sizeof(struct virtio_device_id), "virtio",
do_virtio_entry, mod);
+ else if (sym_is(symname, "__mod_vmbus_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct hv_vmbus_device_id), "vmbus",
+ do_vmbus_entry, mod);
else if (sym_is(symname, "__mod_i2c_device_table"))
do_table(symval, sym->st_size,
sizeof(struct i2c_device_id), "i2c",
diff --git a/security/Kconfig b/security/Kconfig
index e0f08b52e4a..51bd5a0b69a 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -38,7 +38,9 @@ config TRUSTED_KEYS
config ENCRYPTED_KEYS
tristate "ENCRYPTED KEYS"
- depends on KEYS && TRUSTED_KEYS
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_SHA256
@@ -186,7 +188,7 @@ source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
-source security/integrity/ima/Kconfig
+source security/integrity/Kconfig
choice
prompt "Default security module"
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9e1ca..a5e502f8a05 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
-subdir-$(CONFIG_IMA) += integrity/ima
-obj-$(CONFIG_IMA) += integrity/ima/built-in.o
+subdir-$(CONFIG_INTEGRITY) += integrity
+obj-$(CONFIG_INTEGRITY) += integrity/built-in.o
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 0848292982a..69ddb47787b 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -200,7 +200,7 @@ void __init aa_destroy_aafs(void)
*
* Returns: error on failure
*/
-int __init aa_create_aafs(void)
+static int __init aa_create_aafs(void)
{
int error;
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 649fad88869..7ee05c6f3c6 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -19,6 +19,7 @@
#include "include/capability.h"
#include "include/context.h"
#include "include/policy.h"
+#include "include/ipc.h"
/* call back to audit ptrace fields */
static void audit_cb(struct audit_buffer *ab, void *va)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index b82e383beb7..9516948041a 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -18,6 +18,7 @@
#include <linux/vmalloc.h>
#include "include/audit.h"
+#include "include/apparmor.h"
/**
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index d6d9a57b565..741dd13e089 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -381,11 +381,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
profile->file.trans.size = size;
for (i = 0; i < size; i++) {
char *str;
- int c, j, size = unpack_strdup(e, &str, NULL);
+ int c, j, size2 = unpack_strdup(e, &str, NULL);
/* unpack_strdup verifies that the last character is
* null termination byte.
*/
- if (!size)
+ if (!size2)
goto fail;
profile->file.trans.table[i] = str;
/* verify that name doesn't start with space */
@@ -393,7 +393,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
goto fail;
/* count internal # of internal \0 */
- for (c = j = 0; j < size - 2; j++) {
+ for (c = j = 0; j < size2 - 2; j++) {
if (!str[j])
c++;
}
@@ -440,11 +440,11 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
if (size > RLIM_NLIMITS)
goto fail;
for (i = 0; i < size; i++) {
- u64 tmp = 0;
+ u64 tmp2 = 0;
int a = aa_map_resource(i);
- if (!unpack_u64(e, &tmp, NULL))
+ if (!unpack_u64(e, &tmp2, NULL))
goto fail;
- profile->rlimits.limits[a].rlim_max = tmp;
+ profile->rlimits.limits[a].rlim_max = tmp2;
}
if (!unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index 04a2cf8d1b6..1b41c542d37 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -16,6 +16,7 @@
#include "include/context.h"
#include "include/policy.h"
#include "include/domain.h"
+#include "include/procattr.h"
/**
diff --git a/security/commoncap.c b/security/commoncap.c
index a93b3b73307..ee4f8486e5f 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -332,7 +332,8 @@ int cap_inode_killpriv(struct dentry *dentry)
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
- bool *effective)
+ bool *effective,
+ bool *has_cap)
{
struct cred *new = bprm->cred;
unsigned i;
@@ -341,6 +342,9 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
+ if (caps->magic_etc & VFS_CAP_REVISION_MASK)
+ *has_cap = true;
+
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
__u32 inheritable = caps->inheritable.cap[i];
@@ -424,7 +428,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective)
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
{
struct dentry *dentry;
int rc = 0;
@@ -450,7 +454,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective)
goto out;
}
- rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective);
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
@@ -475,11 +479,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective;
+ bool effective, has_cap = false;
int ret;
effective = false;
- ret = get_file_caps(bprm, &effective);
+ ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
@@ -489,7 +493,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
- if (effective && new->uid != 0 && new->euid == 0) {
+ if (has_cap && new->uid != 0 && new->euid == 0) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 00000000000..4bf00acf793
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,7 @@
+#
+config INTEGRITY
+ def_bool y
+ depends on IMA || EVM
+
+source security/integrity/ima/Kconfig
+source security/integrity/evm/Kconfig
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 00000000000..0ae44aea651
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+
+integrity-y := iint.o
+
+subdir-$(CONFIG_IMA) += ima
+obj-$(CONFIG_IMA) += ima/built-in.o
+subdir-$(CONFIG_EVM) += evm
+obj-$(CONFIG_EVM) += evm/built-in.o
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 00000000000..afbb59dd262
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,13 @@
+config EVM
+ boolean "EVM support"
+ depends on SECURITY && KEYS && (TRUSTED_KEYS=y || TRUSTED_KEYS=n)
+ select CRYPTO_HMAC
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select ENCRYPTED_KEYS
+ default n
+ help
+ EVM protects a file's security extended attributes against
+ integrity attacks.
+
+ If you are unsure how to answer this question, answer N.
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 00000000000..7393c415a06
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 00000000000..d320f519743
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm.h
+ *
+ */
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include "../integrity.h"
+
+extern int evm_initialized;
+extern char *evm_hmac;
+
+extern struct crypto_shash *hmac_tfm;
+
+/* List of EVM protected security xattrs */
+extern char *evm_config_xattrnames[];
+
+extern int evm_init_key(void);
+extern int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+extern int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+extern int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+extern int evm_init_secfs(void);
+extern void evm_cleanup_secfs(void);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 00000000000..5dd5b140242
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_crypto.c
+ * Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static int evmkey_len = MAX_KEY_SIZE;
+
+struct crypto_shash *hmac_tfm;
+
+static struct shash_desc *init_desc(void)
+{
+ int rc;
+ struct shash_desc *desc;
+
+ if (hmac_tfm == NULL) {
+ hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac_tfm)) {
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ evm_hmac, PTR_ERR(hmac_tfm));
+ rc = PTR_ERR(hmac_tfm);
+ hmac_tfm = NULL;
+ return ERR_PTR(rc);
+ }
+ }
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->tfm = hmac_tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+ if (rc)
+ goto out;
+ rc = crypto_shash_init(desc);
+out:
+ if (rc) {
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+ return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ char *digest)
+{
+ struct h_misc {
+ unsigned long ino;
+ __u32 generation;
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof hmac_misc);
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ hmac_misc.uid = inode->i_uid;
+ hmac_misc.gid = inode->i_gid;
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
+ crypto_shash_final(desc, digest);
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char *digest)
+{
+ struct inode *inode = dentry->d_inode;
+ struct shash_desc *desc;
+ char **xattrname;
+ size_t xattr_size = 0;
+ char *xattr_value = NULL;
+ int error;
+ int size;
+
+ if (!inode->i_op || !inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+ desc = init_desc();
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ error = -ENODATA;
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(*xattrname, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
+ continue;
+ }
+ size = vfs_getxattr_alloc(dentry, *xattrname,
+ &xattr_value, xattr_size, GFP_NOFS);
+ if (size == -ENOMEM) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (size < 0)
+ continue;
+
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ }
+ hmac_add_misc(desc, inode, digest);
+
+out:
+ kfree(xattr_value);
+ kfree(desc);
+ return error;
+}
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ struct inode *inode = dentry->d_inode;
+ struct evm_ima_xattr_data xattr_data;
+ int rc = 0;
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc == 0) {
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+ }
+ else if (rc == -ENODATA)
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+ return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ char *hmac_val)
+{
+ struct shash_desc *desc;
+
+ desc = init_desc();
+ if (IS_ERR(desc)) {
+ printk(KERN_INFO "init_desc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+ hmac_add_misc(desc, inode, hmac_val);
+ kfree(desc);
+ return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+ struct key *evm_key;
+ struct encrypted_key_payload *ekp;
+ int rc = 0;
+
+ evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+ if (IS_ERR(evm_key))
+ return -ENOENT;
+
+ down_read(&evm_key->sem);
+ ekp = evm_key->payload.data;
+ if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen);
+out:
+ /* burn the original key contents */
+ memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+ up_read(&evm_key->sem);
+ key_put(evm_key);
+ return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 00000000000..92d3d99a9f7
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_main.c
+ * implements evm_inode_setxattr, evm_inode_post_setxattr,
+ * evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+int evm_initialized;
+
+char *evm_hmac = "hmac(sha1)";
+
+char *evm_config_xattrnames[] = {
+#ifdef CONFIG_SECURITY_SELINUX
+ XATTR_NAME_SELINUX,
+#endif
+#ifdef CONFIG_SECURITY_SMACK
+ XATTR_NAME_SMACK,
+#endif
+ XATTR_NAME_CAPS,
+ NULL
+};
+
+static int evm_fixmode;
+static int __init evm_set_fixmode(char *str)
+{
+ if (strncmp(str, "fix", 3) == 0)
+ evm_fixmode = 1;
+ return 0;
+}
+__setup("evm=", evm_set_fixmode);
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ * HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ const char *xattr_name,
+ char *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ struct evm_ima_xattr_data xattr_data;
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ int rc;
+
+ if (iint && iint->evm_status == INTEGRITY_PASS)
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc < 0) {
+ evm_status = (rc == -ENODATA)
+ ? INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+ goto out;
+ }
+
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = vfs_xattr_cmp(dentry, XATTR_NAME_EVM, (u8 *)&xattr_data,
+ sizeof xattr_data, GFP_NOFS);
+ if (rc < 0)
+ evm_status = (rc == -ENODATA)
+ ? INTEGRITY_NOLABEL : INTEGRITY_FAIL;
+out:
+ if (iint)
+ iint->evm_status = evm_status;
+ return evm_status;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ char **xattrname;
+ int namelen;
+ int found = 0;
+
+ namelen = strlen(req_xattr_name);
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((strlen(*xattrname) == namelen)
+ && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+ found = 1;
+ break;
+ }
+ if (strncmp(req_xattr_name,
+ *xattrname + XATTR_SECURITY_PREFIX_LEN,
+ strlen(req_xattr_name)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value, size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return INTEGRITY_UNKNOWN;
+
+ if (!iint) {
+ iint = integrity_iint_find(dentry->d_inode);
+ if (!iint)
+ return INTEGRITY_UNKNOWN;
+ }
+ return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+ return 0;
+ return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated.
+ */
+static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ enum integrity_status evm_status;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ } else if (!evm_protected_xattr(xattr_name)) {
+ if (!posix_xattr_acl(xattr_name))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ return -EPERM;
+ }
+ evm_status = evm_verify_current_integrity(dentry);
+ return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm(). The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (!evm_initialized || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
+ return;
+
+ evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+ return;
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return;
+
+ mutex_lock(&inode->i_mutex);
+ evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+ mutex_unlock(&inode->i_mutex);
+ return;
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ */
+int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ enum integrity_status evm_status;
+
+ if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ if (!evm_initialized)
+ return;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+ return;
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm
+ */
+int evm_inode_init_security(struct inode *inode,
+ const struct xattr *lsm_xattr,
+ struct xattr *evm_xattr)
+{
+ struct evm_ima_xattr_data *xattr_data;
+ int rc;
+
+ if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+ return 0;
+
+ xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+ if (!xattr_data)
+ return -ENOMEM;
+
+ xattr_data->type = EVM_XATTR_HMAC;
+ rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+ if (rc < 0)
+ goto out;
+
+ evm_xattr->value = xattr_data;
+ evm_xattr->value_len = sizeof(*xattr_data);
+ evm_xattr->name = kstrdup(XATTR_EVM_SUFFIX, GFP_NOFS);
+ return 0;
+out:
+ kfree(xattr_data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+static int __init init_evm(void)
+{
+ int error;
+
+ error = evm_init_secfs();
+ if (error < 0) {
+ printk(KERN_INFO "EVM: Error registering secfs\n");
+ goto err;
+ }
+err:
+ return error;
+}
+
+static void __exit cleanup_evm(void)
+{
+ evm_cleanup_secfs();
+ if (hmac_tfm)
+ crypto_free_shash(hmac_tfm);
+}
+
+/*
+ * evm_display_config - list the EVM protected security extended attributes
+ */
+static int __init evm_display_config(void)
+{
+ char **xattrname;
+
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
+ printk(KERN_INFO "EVM: %s\n", *xattrname);
+ return 0;
+}
+
+pure_initcall(evm_display_config);
+late_initcall(init_evm);
+
+MODULE_DESCRIPTION("Extended Verification Module");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 00000000000..b1753e98bf9
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/xattr.h>
+
+int posix_xattr_acl(char *xattr)
+{
+ int xattr_len = strlen(xattr);
+
+ if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+ return 1;
+ if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+ return 1;
+ return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 00000000000..ac762995057
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_secfs.c
+ * - Used to signal when key is on keyring
+ * - Get the key and enable EVM
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include "evm.h"
+
+static struct dentry *evm_init_tpm;
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", evm_initialized);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i, error;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_initialized)
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+ return -EINVAL;
+
+ error = evm_init_key();
+ if (!error) {
+ evm_initialized = 1;
+ pr_info("EVM: initialized\n");
+ } else
+ pr_err("EVM: initialization failed\n");
+ return count;
+}
+
+static const struct file_operations evm_key_ops = {
+ .read = evm_read_key,
+ .write = evm_write_key,
+};
+
+int __init evm_init_secfs(void)
+{
+ int error = 0;
+
+ evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
+ NULL, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+ error = -EFAULT;
+ return error;
+}
+
+void __exit evm_cleanup_secfs(void)
+{
+ if (evm_init_tpm)
+ securityfs_remove(evm_init_tpm);
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 00000000000..399641c3e84
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: integrity_iint.c
+ * - implements the integrity hooks: integrity_inode_alloc,
+ * integrity_inode_free
+ * - cache integrity information associated with an inode
+ * using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_SPINLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+int iint_initialized;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ struct rb_node *n = integrity_iint_tree.rb_node;
+
+ assert_spin_locked(&integrity_iint_lock);
+
+ while (n) {
+ iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ break;
+ }
+ if (!n)
+ return NULL;
+
+ return iint;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return NULL;
+
+ spin_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ spin_unlock(&integrity_iint_lock);
+
+ return iint;
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_alloc - allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ */
+int integrity_inode_alloc(struct inode *inode)
+{
+ struct rb_node **p;
+ struct rb_node *new_node, *parent = NULL;
+ struct integrity_iint_cache *new_iint, *test_iint;
+ int rc;
+
+ new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!new_iint)
+ return -ENOMEM;
+
+ new_iint->inode = inode;
+ new_node = &new_iint->rb_node;
+
+ mutex_lock(&inode->i_mutex); /* i_flags */
+ spin_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct integrity_iint_cache,
+ rb_node);
+ rc = -EEXIST;
+ if (inode < test_iint->inode)
+ p = &(*p)->rb_left;
+ else if (inode > test_iint->inode)
+ p = &(*p)->rb_right;
+ else
+ goto out_err;
+ }
+
+ inode->i_flags |= S_IMA;
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &integrity_iint_tree);
+
+ spin_unlock(&integrity_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+
+ return 0;
+out_err:
+ spin_unlock(&integrity_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+ iint_free(new_iint);
+
+ return rc;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return;
+
+ spin_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ rb_erase(&iint->rb_node, &integrity_iint_tree);
+ spin_unlock(&integrity_iint_lock);
+
+ iint_free(iint);
+}
+
+static void init_once(void *foo)
+{
+ struct integrity_iint_cache *iint = foo;
+
+ memset(iint, 0, sizeof *iint);
+ iint->version = 0;
+ iint->flags = 0UL;
+ mutex_init(&iint->mutex);
+ iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+static int __init integrity_iintcache_init(void)
+{
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+ 0, SLAB_PANIC, init_once);
+ iint_initialized = 1;
+ return 0;
+}
+security_initcall(integrity_iintcache_init);
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index b6ecfd4d8d7..19c053b8230 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -3,6 +3,7 @@
config IMA
bool "Integrity Measurement Architecture(IMA)"
depends on SECURITY
+ select INTEGRITY
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 787c4cb916c..5690c021de8 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o ima_iint.o ima_audit.o
+ ima_policy.o ima_audit.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 08408bd7146..3ccf7acac6d 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -24,18 +24,19 @@
#include <linux/tpm.h>
#include <linux/audit.h>
+#include "../integrity.h"
+
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
-#define IMA_DIGEST_SIZE 20
+#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
-extern int iint_initialized;
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
@@ -96,34 +97,21 @@ static inline unsigned long ima_hash_key(u8 *digest)
return hash_long(*digest, IMA_HASH_BITS);
}
-/* iint cache flags */
-#define IMA_MEASURED 0x01
-
-/* integrity data associated with an inode */
-struct ima_iint_cache {
- struct rb_node rb_node; /* rooted in ima_iint_tree */
- struct inode *inode; /* back pointer to inode in question */
- u64 version; /* track inode changes */
- unsigned char flags;
- u8 digest[IMA_DIGEST_SIZE];
- struct mutex mutex; /* protects: version, flags, digest */
-};
-
/* LIM API function definitions */
int ima_must_measure(struct inode *inode, int mask, int function);
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file);
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
const unsigned char *filename);
int ima_store_template(struct ima_template_entry *entry, int violation,
struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e,
- enum ima_show_type show);
+void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
-struct ima_iint_cache *ima_iint_insert(struct inode *inode);
-struct ima_iint_cache *ima_iint_find(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
/* IMA policy related functions */
enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index da36d2c085a..0d50df04ccc 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -126,7 +126,8 @@ int ima_must_measure(struct inode *inode, int mask, int function)
*
* Return 0 on success, error code otherwise
*/
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file)
{
int result = -EEXIST;
@@ -156,8 +157,8 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
*
* Must be called with iint->mutex held.
*/
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
- const unsigned char *filename)
+void ima_store_measurement(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename)
{
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ef21b96a0b4..e1aa2b482dd 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -287,7 +287,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
-int ima_open_policy(struct inode * inode, struct file * filp)
+static int ima_open_policy(struct inode * inode, struct file * filp)
{
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
deleted file mode 100644
index 4ae73040ab7..00000000000
--- a/security/integrity/ima/ima_iint.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2008 IBM Corporation
- *
- * Authors:
- * Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
- * File: ima_iint.c
- * - implements the IMA hooks: ima_inode_alloc, ima_inode_free
- * - cache integrity information associated with an inode
- * using a rbtree tree.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/rbtree.h>
-#include "ima.h"
-
-static struct rb_root ima_iint_tree = RB_ROOT;
-static DEFINE_SPINLOCK(ima_iint_lock);
-static struct kmem_cache *iint_cache __read_mostly;
-
-int iint_initialized = 0;
-
-/*
- * __ima_iint_find - return the iint associated with an inode
- */
-static struct ima_iint_cache *__ima_iint_find(struct inode *inode)
-{
- struct ima_iint_cache *iint;
- struct rb_node *n = ima_iint_tree.rb_node;
-
- assert_spin_locked(&ima_iint_lock);
-
- while (n) {
- iint = rb_entry(n, struct ima_iint_cache, rb_node);
-
- if (inode < iint->inode)
- n = n->rb_left;
- else if (inode > iint->inode)
- n = n->rb_right;
- else
- break;
- }
- if (!n)
- return NULL;
-
- return iint;
-}
-
-/*
- * ima_iint_find - return the iint associated with an inode
- */
-struct ima_iint_cache *ima_iint_find(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!IS_IMA(inode))
- return NULL;
-
- spin_lock(&ima_iint_lock);
- iint = __ima_iint_find(inode);
- spin_unlock(&ima_iint_lock);
-
- return iint;
-}
-
-static void iint_free(struct ima_iint_cache *iint)
-{
- iint->version = 0;
- iint->flags = 0UL;
- kmem_cache_free(iint_cache, iint);
-}
-
-/**
- * ima_inode_alloc - allocate an iint associated with an inode
- * @inode: pointer to the inode
- */
-int ima_inode_alloc(struct inode *inode)
-{
- struct rb_node **p;
- struct rb_node *new_node, *parent = NULL;
- struct ima_iint_cache *new_iint, *test_iint;
- int rc;
-
- new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
- if (!new_iint)
- return -ENOMEM;
-
- new_iint->inode = inode;
- new_node = &new_iint->rb_node;
-
- mutex_lock(&inode->i_mutex); /* i_flags */
- spin_lock(&ima_iint_lock);
-
- p = &ima_iint_tree.rb_node;
- while (*p) {
- parent = *p;
- test_iint = rb_entry(parent, struct ima_iint_cache, rb_node);
-
- rc = -EEXIST;
- if (inode < test_iint->inode)
- p = &(*p)->rb_left;
- else if (inode > test_iint->inode)
- p = &(*p)->rb_right;
- else
- goto out_err;
- }
-
- inode->i_flags |= S_IMA;
- rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &ima_iint_tree);
-
- spin_unlock(&ima_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
-
- return 0;
-out_err:
- spin_unlock(&ima_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
- iint_free(new_iint);
-
- return rc;
-}
-
-/**
- * ima_inode_free - called on security_inode_free
- * @inode: pointer to the inode
- *
- * Free the integrity information(iint) associated with an inode.
- */
-void ima_inode_free(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!IS_IMA(inode))
- return;
-
- spin_lock(&ima_iint_lock);
- iint = __ima_iint_find(inode);
- rb_erase(&iint->rb_node, &ima_iint_tree);
- spin_unlock(&ima_iint_lock);
-
- iint_free(iint);
-}
-
-static void init_once(void *foo)
-{
- struct ima_iint_cache *iint = foo;
-
- memset(iint, 0, sizeof *iint);
- iint->version = 0;
- iint->flags = 0UL;
- mutex_init(&iint->mutex);
-}
-
-static int __init ima_iintcache_init(void)
-{
- iint_cache =
- kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
- SLAB_PANIC, init_once);
- iint_initialized = 1;
- return 0;
-}
-security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 26b46ff7466..1eff5cb001e 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -22,6 +22,7 @@
#include <linux/mount.h>
#include <linux/mman.h>
#include <linux/slab.h>
+#include <linux/ima.h>
#include "ima.h"
@@ -82,7 +83,7 @@ out:
"open_writers");
}
-static void ima_check_last_writer(struct ima_iint_cache *iint,
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
struct inode *inode,
struct file *file)
{
@@ -105,12 +106,12 @@ static void ima_check_last_writer(struct ima_iint_cache *iint,
void ima_file_free(struct file *file)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct integrity_iint_cache *iint;
if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find(inode);
+ iint = integrity_iint_find(inode);
if (!iint)
return;
@@ -121,7 +122,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
int mask, int function)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct integrity_iint_cache *iint;
int rc = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
@@ -131,9 +132,9 @@ static int process_measurement(struct file *file, const unsigned char *filename,
if (rc != 0)
return rc;
retry:
- iint = ima_iint_find(inode);
+ iint = integrity_iint_find(inode);
if (!iint) {
- rc = ima_inode_alloc(inode);
+ rc = integrity_inode_alloc(inode);
if (!rc || rc == -EEXIST)
goto retry;
return rc;
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 00000000000..3143a3c3986
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha.h>
+
+/* iint cache flags */
+#define IMA_MEASURED 0x01
+
+enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+};
+
+struct evm_ima_xattr_data {
+ u8 type;
+ u8 digest[SHA1_DIGEST_SIZE];
+} __attribute__((packed));
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned char flags;
+ u8 digest[SHA1_DIGEST_SIZE];
+ struct mutex mutex; /* protects: version, flags, digest */
+ enum integrity_status evm_status;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+/* set during initialization */
+extern int iint_initialized;
diff --git a/security/keys/Makefile b/security/keys/Makefile
index b34cc6ee690..a56f1ffdc64 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -14,7 +14,7 @@ obj-y := \
user_defined.o
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += ecryptfs_format.o encrypted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 00000000000..6bc7a86d102
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o
+obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o
diff --git a/security/keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
index 6daa3b6ff9e..6daa3b6ff9e 100644
--- a/security/keys/ecryptfs_format.c
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
diff --git a/security/keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
index 40294de238b..40294de238b 100644
--- a/security/keys/ecryptfs_format.h
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
diff --git a/security/keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index e7eca9ec4c6..f33804c1b4c 100644
--- a/security/keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -299,31 +299,6 @@ out:
}
/*
- * request_trusted_key - request the trusted key
- *
- * Trusted keys are sealed to PCRs and other metadata. Although userspace
- * manages both trusted/encrypted key-types, like the encrypted key type
- * data, trusted key type data is not visible decrypted from userspace.
- */
-static struct key *request_trusted_key(const char *trusted_desc,
- u8 **master_key, size_t *master_keylen)
-{
- struct trusted_key_payload *tpayload;
- struct key *tkey;
-
- tkey = request_key(&key_type_trusted, trusted_desc, NULL);
- if (IS_ERR(tkey))
- goto error;
-
- down_read(&tkey->sem);
- tpayload = rcu_dereference(tkey->payload.data);
- *master_key = tpayload->key;
- *master_keylen = tpayload->key_len;
-error:
- return tkey;
-}
-
-/*
* request_user_key - request the user key
*
* Use a user provided key to encrypt/decrypt an encrypted-key.
@@ -469,8 +444,14 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload,
goto out;
if (IS_ERR(mkey)) {
- pr_info("encrypted_key: key %s not found",
- epayload->master_desc);
+ int ret = PTR_ERR(epayload);
+
+ if (ret == -ENOTSUPP)
+ pr_info("encrypted_key: key %s not supported",
+ epayload->master_desc);
+ else
+ pr_info("encrypted_key: key %s not found",
+ epayload->master_desc);
goto out;
}
@@ -686,11 +667,19 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
return -EINVAL;
hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
- hex2bin(epayload->iv, hex_encoded_iv, ivsize);
- hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen);
+ ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+ if (ret < 0)
+ return -EINVAL;
+ ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+ encrypted_datalen);
+ if (ret < 0)
+ return -EINVAL;
hmac = epayload->format + epayload->datablob_len;
- hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE);
+ ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+ HASH_SIZE);
+ if (ret < 0)
+ return -EINVAL;
mkey = request_master_key(epayload, &master_key, &master_keylen);
if (IS_ERR(mkey))
diff --git a/security/keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
index cef5e2f2b7d..b6ade894525 100644
--- a/security/keys/encrypted.h
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -2,6 +2,17 @@
#define __ENCRYPTED_KEY_H
#define ENCRYPTED_DEBUG 0
+#ifdef CONFIG_TRUSTED_KEYS
+extern struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key,
+ size_t *master_keylen)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
#if ENCRYPTED_DEBUG
static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 00000000000..df87272e3f5
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen)
+{
+ struct trusted_key_payload *tpayload;
+ struct key *tkey;
+
+ tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+ if (IS_ERR(tkey))
+ goto error;
+
+ down_read(&tkey->sem);
+ tpayload = rcu_dereference(tkey->payload.data);
+ *master_key = tpayload->key;
+ *master_keylen = tpayload->key_len;
+error:
+ return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 89df6b5f203..bf4d8da5a79 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -1,6 +1,6 @@
/* Key garbage collector
*
- * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -10,6 +10,8 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
#include <keys/keyring-type.h>
#include "internal.h"
@@ -19,17 +21,33 @@
unsigned key_gc_delay = 5 * 60;
/*
- * Reaper
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
*/
static void key_gc_timer_func(unsigned long);
-static void key_garbage_collector(struct work_struct *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
-static DECLARE_WORK(key_gc_work, key_garbage_collector);
-static key_serial_t key_gc_cursor; /* the last key the gc considered */
-static bool key_gc_again;
-static unsigned long key_gc_executing;
+
static time_t key_gc_next_run = LONG_MAX;
-static time_t key_gc_new_timer;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
+
+
+/*
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+ .name = "dead",
+};
/*
* Schedule a garbage collection run.
@@ -42,31 +60,75 @@ void key_schedule_gc(time_t gc_at)
kenter("%ld", gc_at - now);
- if (gc_at <= now) {
- schedule_work(&key_gc_work);
+ if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+ kdebug("IMMEDIATE");
+ queue_work(system_nrt_wq, &key_gc_work);
} else if (gc_at < key_gc_next_run) {
+ kdebug("DEFERRED");
+ key_gc_next_run = gc_at;
expires = jiffies + (gc_at - now) * HZ;
mod_timer(&key_gc_timer, expires);
}
}
/*
- * The garbage collector timer kicked off
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
*/
static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
- schedule_work(&key_gc_work);
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ queue_work(system_nrt_wq, &key_gc_work);
+}
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+static int key_gc_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys. We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+ kenter("%s", ktype->name);
+
+ key_gc_dead_keytype = ktype;
+ set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ smp_mb();
+ set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+ kdebug("schedule");
+ queue_work(system_nrt_wq, &key_gc_work);
+
+ kdebug("sleep");
+ wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
+ TASK_UNINTERRUPTIBLE);
+
+ key_gc_dead_keytype = NULL;
+ kleave("");
}
/*
* Garbage collect pointers from a keyring.
*
- * Return true if we altered the keyring.
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
*/
-static bool key_gc_keyring(struct key *keyring, time_t limit)
- __releases(key_serial_lock)
+static void key_gc_keyring(struct key *keyring, time_t limit)
{
struct keyring_list *klist;
struct key *key;
@@ -93,130 +155,234 @@ static bool key_gc_keyring(struct key *keyring, time_t limit)
unlock_dont_gc:
rcu_read_unlock();
dont_gc:
- kleave(" = false");
- return false;
+ kleave(" [no gc]");
+ return;
do_gc:
rcu_read_unlock();
- key_gc_cursor = keyring->serial;
- key_get(keyring);
- spin_unlock(&key_serial_lock);
+
keyring_gc(keyring, limit);
- key_put(keyring);
- kleave(" = true");
- return true;
+ kleave(" [gc]");
}
/*
- * Garbage collector for keys. This involves scanning the keyrings for dead,
- * expired and revoked keys that have overstayed their welcome
+ * Garbage collect an unreferenced, detached key
*/
-static void key_garbage_collector(struct work_struct *work)
+static noinline void key_gc_unused_key(struct key *key)
{
- struct rb_node *rb;
- key_serial_t cursor;
- struct key *key, *xkey;
- time_t new_timer = LONG_MAX, limit, now;
-
- now = current_kernel_time().tv_sec;
- kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
-
- if (test_and_set_bit(0, &key_gc_executing)) {
- key_schedule_gc(current_kernel_time().tv_sec + 1);
- kleave(" [busy; deferring]");
- return;
+ key_check(key);
+
+ security_key_free(key);
+
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
}
- limit = now;
+ atomic_dec(&key->user->nkeys);
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ atomic_dec(&key->user->nikeys);
+
+ key_user_put(key->user);
+
+ /* now throw away the key memory */
+ if (key->type->destroy)
+ key->type->destroy(key);
+
+ kfree(key->description);
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC_X;
+#endif
+ kmem_cache_free(key_jar, key);
+}
+
+/*
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place. key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+ static u8 gc_state; /* Internal persistent state */
+#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
+#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
+
+ struct rb_node *cursor;
+ struct key *key;
+ time_t new_timer, limit;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = current_kernel_time().tv_sec;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
limit = key_gc_delay;
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+ kdebug("new pass %x", gc_state);
+
+ new_timer = LONG_MAX;
+
+ /* As only this function is permitted to remove things from the key
+ * serial tree, if cursor is non-NULL then it will always point to a
+ * valid node in the tree - even if lock got dropped.
+ */
spin_lock(&key_serial_lock);
+ cursor = rb_first(&key_serial_tree);
- if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
- spin_unlock(&key_serial_lock);
- clear_bit(0, &key_gc_executing);
- return;
- }
+continue_scanning:
+ while (cursor) {
+ key = rb_entry(cursor, struct key, serial_node);
+ cursor = rb_next(cursor);
- cursor = key_gc_cursor;
- if (cursor < 0)
- cursor = 0;
- if (cursor > 0)
- new_timer = key_gc_new_timer;
- else
- key_gc_again = false;
-
- /* find the first key above the cursor */
- key = NULL;
- rb = key_serial_tree.rb_node;
- while (rb) {
- xkey = rb_entry(rb, struct key, serial_node);
- if (cursor < xkey->serial) {
- key = xkey;
- rb = rb->rb_left;
- } else if (cursor > xkey->serial) {
- rb = rb->rb_right;
- } else {
- rb = rb_next(rb);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
- break;
+ if (atomic_read(&key->usage) == 0)
+ goto found_unreferenced_key;
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ if (key->type == key_gc_dead_keytype) {
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ key->perm = 0;
+ goto skip_dead_key;
+ }
+ }
+
+ if (gc_state & KEY_GC_SET_TIMER) {
+ if (key->expiry > limit && key->expiry < new_timer) {
+ kdebug("will expire %x in %ld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+ }
}
- }
- if (!key)
- goto reached_the_end;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+ if (key->type == key_gc_dead_keytype)
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
- /* trawl through the keys looking for keyrings */
- for (;;) {
- if (key->expiry > limit && key->expiry < new_timer) {
- kdebug("will expire %x in %ld",
- key_serial(key), key->expiry - limit);
- new_timer = key->expiry;
+ if ((gc_state & KEY_GC_REAPING_LINKS) ||
+ unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ if (key->type == &key_type_keyring)
+ goto found_keyring;
}
- if (key->type == &key_type_keyring &&
- key_gc_keyring(key, limit))
- /* the gc had to release our lock so that the keyring
- * could be modified, so we have to get it again */
- goto gc_released_our_lock;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+ if (key->type == key_gc_dead_keytype)
+ goto destroy_dead_key;
- rb = rb_next(&key->serial_node);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
+ skip_dead_key:
+ if (spin_is_contended(&key_serial_lock) || need_resched())
+ goto contended;
}
-gc_released_our_lock:
- kdebug("gc_released_our_lock");
- key_gc_new_timer = new_timer;
- key_gc_again = true;
- clear_bit(0, &key_gc_executing);
- schedule_work(&key_gc_work);
- kleave(" [continue]");
- return;
-
- /* when we reach the end of the run, we set the timer for the next one */
-reached_the_end:
- kdebug("reached_the_end");
+contended:
spin_unlock(&key_serial_lock);
- key_gc_new_timer = new_timer;
- key_gc_cursor = 0;
- clear_bit(0, &key_gc_executing);
-
- if (key_gc_again) {
- /* there may have been a key that expired whilst we were
- * scanning, so if we discarded any links we should do another
- * scan */
- new_timer = now + 1;
- key_schedule_gc(new_timer);
- } else if (new_timer < LONG_MAX) {
+
+maybe_resched:
+ if (cursor) {
+ cond_resched();
+ spin_lock(&key_serial_lock);
+ goto continue_scanning;
+ }
+
+ /* We've completed the pass. Set the timer if we need to and queue a
+ * new cycle if necessary. We keep executing cycles until we find one
+ * where we didn't reap any keys.
+ */
+ kdebug("pass complete");
+
+ if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
- kleave(" [end]");
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ /* Make sure everyone revalidates their keys if we marked a
+ * bunch as being dead and make sure all keyring ex-payloads
+ * are destroyed.
+ */
+ kdebug("dead sync");
+ synchronize_rcu();
+ }
+
+ if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+ KEY_GC_REAPING_DEAD_2))) {
+ if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+ /* No remaining dead keys: short circuit the remaining
+ * keytype reap cycles.
+ */
+ kdebug("dead short");
+ gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+ gc_state |= KEY_GC_REAPING_DEAD_3;
+ } else {
+ gc_state |= KEY_GC_REAP_AGAIN;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+ kdebug("dead wake");
+ smp_mb();
+ clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+ }
+
+ if (gc_state & KEY_GC_REAP_AGAIN)
+ queue_work(system_nrt_wq, &key_gc_work);
+ kleave(" [end %x]", gc_state);
+ return;
+
+ /* We found an unreferenced key - once we've removed it from the tree,
+ * we can safely drop the lock.
+ */
+found_unreferenced_key:
+ kdebug("unrefd key %d", key->serial);
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ key_gc_unused_key(key);
+ gc_state |= KEY_GC_REAP_AGAIN;
+ goto maybe_resched;
+
+ /* We found a keyring and we need to check the payload for links to
+ * dead or expired keys. We don't flag another reap immediately as we
+ * have to wait for the old payload to be destroyed by RCU before we
+ * can reap the keys to which it refers.
+ */
+found_keyring:
+ spin_unlock(&key_serial_lock);
+ kdebug("scan keyring %d", key->serial);
+ key_gc_keyring(key, limit);
+ goto maybe_resched;
+
+ /* We found a dead key that is still referenced. Reset its type and
+ * destroy its payload with its semaphore held.
+ */
+destroy_dead_key:
+ spin_unlock(&key_serial_lock);
+ kdebug("destroy key %d", key->serial);
+ down_write(&key->sem);
+ key->type = &key_type_dead;
+ if (key_gc_dead_keytype->destroy)
+ key_gc_dead_keytype->destroy(key);
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+ up_write(&key->sem);
+ goto maybe_resched;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index f375152a250..c7a7caec483 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -31,6 +31,7 @@
no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
+extern struct key_type key_type_dead;
extern struct key_type key_type_user;
/*****************************************************************************/
@@ -75,6 +76,7 @@ extern unsigned key_quota_maxbytes;
#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+extern struct kmem_cache *key_jar;
extern struct rb_root key_serial_tree;
extern spinlock_t key_serial_lock;
extern struct mutex key_construction_mutex;
@@ -146,9 +148,11 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
extern long join_session_keyring(const char *name);
+extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time_t limit);
extern void key_schedule_gc(time_t expiry_at);
+extern void key_gc_keytype(struct key_type *ktype);
extern int key_task_permission(const key_ref_t key_ref,
const struct cred *cred,
diff --git a/security/keys/key.c b/security/keys/key.c
index f7f9d93f08d..4414abddcb5 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -21,7 +21,7 @@
#include <linux/user_namespace.h>
#include "internal.h"
-static struct kmem_cache *key_jar;
+struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
@@ -36,17 +36,9 @@ unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
-static void key_cleanup(struct work_struct *work);
-static DECLARE_WORK(key_cleanup_task, key_cleanup);
-
/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
-/* Any key who's type gets unegistered will be re-typed to this */
-static struct key_type key_type_dead = {
- .name = "dead",
-};
-
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
@@ -591,71 +583,6 @@ int key_reject_and_link(struct key *key,
}
EXPORT_SYMBOL(key_reject_and_link);
-/*
- * Garbage collect keys in process context so that we don't have to disable
- * interrupts all over the place.
- *
- * key_put() schedules this rather than trying to do the cleanup itself, which
- * means key_put() doesn't have to sleep.
- */
-static void key_cleanup(struct work_struct *work)
-{
- struct rb_node *_n;
- struct key *key;
-
-go_again:
- /* look for a dead key in the tree */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (atomic_read(&key->usage) == 0)
- goto found_dead_key;
- }
-
- spin_unlock(&key_serial_lock);
- return;
-
-found_dead_key:
- /* we found a dead key - once we've removed it from the tree, we can
- * drop the lock */
- rb_erase(&key->serial_node, &key_serial_tree);
- spin_unlock(&key_serial_lock);
-
- key_check(key);
-
- security_key_free(key);
-
- /* deal with the user's key tracking and quota */
- if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- spin_lock(&key->user->lock);
- key->user->qnkeys--;
- key->user->qnbytes -= key->quotalen;
- spin_unlock(&key->user->lock);
- }
-
- atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
- atomic_dec(&key->user->nikeys);
-
- key_user_put(key->user);
-
- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
-
- kfree(key->description);
-
-#ifdef KEY_DEBUGGING
- key->magic = KEY_DEBUG_MAGIC_X;
-#endif
- kmem_cache_free(key_jar, key);
-
- /* there may, of course, be more than one key to destroy */
- goto go_again;
-}
-
/**
* key_put - Discard a reference to a key.
* @key: The key to discard a reference from.
@@ -670,7 +597,7 @@ void key_put(struct key *key)
key_check(key);
if (atomic_dec_and_test(&key->usage))
- schedule_work(&key_cleanup_task);
+ queue_work(system_nrt_wq, &key_gc_work);
}
}
EXPORT_SYMBOL(key_put);
@@ -1048,49 +975,11 @@ EXPORT_SYMBOL(register_key_type);
*/
void unregister_key_type(struct key_type *ktype)
{
- struct rb_node *_n;
- struct key *key;
-
down_write(&key_types_sem);
-
- /* withdraw the key type */
list_del_init(&ktype->link);
-
- /* mark all the keys of this type dead */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- key->type = &key_type_dead;
- set_bit(KEY_FLAG_DEAD, &key->flags);
- }
- }
-
- spin_unlock(&key_serial_lock);
-
- /* make sure everyone revalidates their keys */
- synchronize_rcu();
-
- /* we should now be able to destroy the payloads of all the keys of
- * this type with impunity */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- if (ktype->destroy)
- ktype->destroy(key);
- memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
- }
- }
-
- spin_unlock(&key_serial_lock);
- up_write(&key_types_sem);
-
- key_schedule_gc(0);
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 30e242f7bd0..37a7f3b2885 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -860,8 +860,7 @@ void __key_link(struct key *keyring, struct key *key,
kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
- klist = rcu_dereference_protected(keyring->payload.subscriptions,
- rwsem_is_locked(&keyring->sem));
+ klist = rcu_dereference_locked_keyring(keyring);
atomic_inc(&key->usage);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index a3063eb3dc2..1068cb1939b 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -270,7 +270,7 @@ static int install_session_keyring(struct key *keyring)
if (!new)
return -ENOMEM;
- ret = install_session_keyring_to_cred(new, NULL);
+ ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
@@ -589,12 +589,22 @@ try_again:
ret = install_user_keyrings();
if (ret < 0)
goto error;
- ret = install_session_keyring(
- cred->user->session_keyring);
+ if (lflags & KEY_LOOKUP_CREATE)
+ ret = join_session_keyring(NULL);
+ else
+ ret = install_session_keyring(
+ cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
+ } else if (cred->tgcred->session_keyring ==
+ cred->user->session_keyring &&
+ lflags & KEY_LOOKUP_CREATE) {
+ ret = join_session_keyring(NULL);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
}
rcu_read_lock();
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 0c33e2ea1f3..0964fc23694 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -779,7 +779,10 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
opt->pcrinfo_len = strlen(args[0].from) / 2;
if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
return -EINVAL;
- hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len);
+ res = hex2bin(opt->pcrinfo, args[0].from,
+ opt->pcrinfo_len);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_keyhandle:
res = strict_strtoul(args[0].from, 16, &handle);
@@ -791,12 +794,18 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
case Opt_keyauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
- hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE);
+ res = hex2bin(opt->keyauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_blobauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
- hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE);
+ res = hex2bin(opt->blobauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_migratable:
if (*args[0].from == '0')
@@ -860,7 +869,9 @@ static int datablob_parse(char *datablob, struct trusted_key_payload *p,
p->blob_len = strlen(c) / 2;
if (p->blob_len > MAX_BLOB_SIZE)
return -EINVAL;
- hex2bin(p->blob, c, p->blob_len);
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
ret = getoptions(datablob, p, o);
if (ret < 0)
return ret;
diff --git a/security/security.c b/security/security.c
index 0e4fccfef12..0c6cc69c8f8 100644
--- a/security/security.c
+++ b/security/security.c
@@ -16,15 +16,16 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
+#include <linux/integrity.h>
#include <linux/ima.h>
+#include <linux/evm.h>
+
+#define MAX_LSM_EVM_XATTR 2
/* Boot-time LSM user choice */
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
-/* things that live in capability.c */
-extern void __init security_fixup_ops(struct security_operations *ops);
-
static struct security_operations *security_ops;
static struct security_operations default_security_ops = {
.name = "default",
@@ -334,20 +335,57 @@ int security_inode_alloc(struct inode *inode)
void security_inode_free(struct inode *inode)
{
- ima_inode_free(inode);
+ integrity_inode_free(inode);
security_ops->inode_free_security(inode);
}
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len)
+ const struct qstr *qstr,
+ const initxattrs initxattrs, void *fs_data)
{
+ struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
+ struct xattr *lsm_xattr, *evm_xattr, *xattr;
+ int ret;
+
if (unlikely(IS_PRIVATE(inode)))
- return -EOPNOTSUPP;
+ return 0;
+
+ memset(new_xattrs, 0, sizeof new_xattrs);
+ if (!initxattrs)
+ return security_ops->inode_init_security(inode, dir, qstr,
+ NULL, NULL, NULL);
+ lsm_xattr = new_xattrs;
+ ret = security_ops->inode_init_security(inode, dir, qstr,
+ &lsm_xattr->name,
+ &lsm_xattr->value,
+ &lsm_xattr->value_len);
+ if (ret)
+ goto out;
+
+ evm_xattr = lsm_xattr + 1;
+ ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
+ if (ret)
+ goto out;
+ ret = initxattrs(inode, new_xattrs, fs_data);
+out:
+ for (xattr = new_xattrs; xattr->name != NULL; xattr++) {
+ kfree(xattr->name);
+ kfree(xattr->value);
+ }
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+}
+EXPORT_SYMBOL(security_inode_init_security);
+
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}
-EXPORT_SYMBOL(security_inode_init_security);
+EXPORT_SYMBOL(security_old_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
@@ -523,9 +561,14 @@ int security_inode_permission(struct inode *inode, int mask)
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setattr(dentry, attr);
+ ret = security_ops->inode_setattr(dentry, attr);
+ if (ret)
+ return ret;
+ return evm_inode_setattr(dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
@@ -539,9 +582,14 @@ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setxattr(dentry, name, value, size, flags);
+ ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
+ if (ret)
+ return ret;
+ return evm_inode_setxattr(dentry, name, value, size);
}
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -550,6 +598,7 @@ void security_inode_post_setxattr(struct dentry *dentry, const char *name,
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return;
security_ops->inode_post_setxattr(dentry, name, value, size, flags);
+ evm_inode_post_setxattr(dentry, name, value, size);
}
int security_inode_getxattr(struct dentry *dentry, const char *name)
@@ -568,9 +617,14 @@ int security_inode_listxattr(struct dentry *dentry)
int security_inode_removexattr(struct dentry *dentry, const char *name)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_removexattr(dentry, name);
+ ret = security_ops->inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
+ return evm_inode_removexattr(dentry, name);
}
int security_inode_need_killpriv(struct dentry *dentry)
@@ -1097,6 +1151,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
security_ops->sk_clone_security(sk, newsk);
}
+EXPORT_SYMBOL(security_sk_clone);
void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
{
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index 90664385dea..e75dd94e2d2 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -12,6 +12,7 @@
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
+#include <linux/selinux.h>
#include "security.h"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 266a2292451..e545b9f6707 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -89,14 +89,14 @@
#include "xfrm.h"
#include "netlabel.h"
#include "audit.h"
+#include "avc_ss.h"
#define NUM_SEL_MNT_OPTS 5
-extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
extern struct security_operations *security_ops;
/* SECMARK reference count */
-atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
int selinux_enforcing;
@@ -279,10 +279,6 @@ static void superblock_free_security(struct super_block *sb)
kfree(sbsec);
}
-/* The security server must be initialized before
- any labeling or access decisions can be provided. */
-extern int ss_initialized;
-
/* The file system's label must be initialized prior to use. */
static const char *labeling_behaviors[6] = {
@@ -2097,9 +2093,6 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
return (atsecure || cap_bprm_secureexec(bprm));
}
-extern struct vfsmount *selinuxfs_mount;
-extern struct dentry *selinux_null;
-
/* Derived from fs/exec.c:flush_old_files. */
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
@@ -5803,8 +5796,6 @@ static int selinux_disabled;
int selinux_disable(void)
{
- extern void exit_sel_fs(void);
-
if (ss_initialized) {
/* Not permitted after initial policy load. */
return -EINVAL;
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 4677aa519b0..d5c328452df 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -18,5 +18,11 @@ struct security_class_mapping {
extern struct security_class_mapping secclass_map[];
+/*
+ * The security server must be initialized before
+ * any labeling or access decisions can be provided.
+ */
+extern int ss_initialized;
+
#endif /* _SELINUX_AVC_SS_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 3ba4feba048..d871e8ad210 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -216,6 +216,14 @@ struct selinux_kernel_status {
extern void selinux_status_update_setenforce(int enforcing);
extern void selinux_status_update_policyload(int seqno);
+extern void selinux_complete_init(void);
+extern int selinux_disable(void);
+extern void exit_sel_fs(void);
+extern struct dentry *selinux_null;
+extern struct vfsmount *selinuxfs_mount;
+extern void selnl_notify_setenforce(int val);
+extern void selnl_notify_policyload(u32 seqno);
+extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 36ac257cec9..ce3f481558d 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -19,6 +19,8 @@
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
+#include "security.h"
+
static struct sock *selnl;
static int selnl_msglen(int msgtype)
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 8b02b2137da..0920ea3bf59 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -21,6 +21,7 @@
#include "flask.h"
#include "av_permissions.h"
+#include "security.h"
struct nlmsg_perm {
u16 nlmsg_type;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 55d92cbb177..f46658722c7 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -75,8 +75,6 @@ static char policy_opened;
/* global data for policy capabilities */
static struct dentry *policycap_dir;
-extern void selnl_notify_setenforce(int val);
-
/* Check whether a task is allowed to use a security operation. */
static int task_has_security(struct task_struct *tsk,
u32 perms)
@@ -278,7 +276,6 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
char *page = NULL;
ssize_t length;
int new_value;
- extern int selinux_disable(void);
length = -ENOMEM;
if (count >= PAGE_SIZE)
@@ -478,7 +475,7 @@ static struct vm_operations_struct sel_mmap_policy_ops = {
.page_mkwrite = sel_mmap_policy_fault,
};
-int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED) {
/* do not allow mprotect to make mapping writable */
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index a53373207fb..2ec904177fe 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -555,7 +555,7 @@ static int cond_write_av_list(struct policydb *p,
return 0;
}
-int cond_write_node(struct policydb *p, struct cond_node *node,
+static int cond_write_node(struct policydb *p, struct cond_node *node,
struct policy_file *fp)
{
struct cond_expr *cur_expr;
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 3f209c63529..4d1f8746650 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -13,6 +13,7 @@
#include "avtab.h"
#include "symtab.h"
#include "policydb.h"
+#include "../include/conditional.h"
#define COND_EXPR_MAXDEPTH 10
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 2381d0ded22..a7f61d52f05 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1743,8 +1743,6 @@ static int policydb_bounds_sanity_check(struct policydb *p)
return 0;
}
-extern int ss_initialized;
-
u16 string_to_security_class(struct policydb *p, const char *name)
{
struct class_datum *cladatum;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f6917bc0aa0..185f849a26f 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -70,8 +70,6 @@
#include "ebitmap.h"
#include "audit.h"
-extern void selnl_notify_policyload(u32 seqno);
-
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
@@ -1790,7 +1788,6 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_OPENPERM);
}
-extern void selinux_complete_init(void);
static int security_preserve_bools(struct policydb *p);
/**
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 2b6c6a51612..2ad00657b80 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -41,9 +41,9 @@ struct superblock_smack {
};
struct socket_smack {
- char *smk_out; /* outbound label */
- char *smk_in; /* inbound label */
- char smk_packet[SMK_LABELLEN]; /* TCP peer label */
+ char *smk_out; /* outbound label */
+ char *smk_in; /* inbound label */
+ char *smk_packet; /* TCP peer label */
};
/*
@@ -116,13 +116,19 @@ struct smk_netlbladdr {
* If there is a cipso value associated with the label it
* gets stored here, too. This will most likely be rare as
* the cipso direct mapping in used internally.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
*/
struct smack_known {
struct list_head list;
char smk_known[SMK_LABELLEN];
u32 smk_secid;
struct smack_cipso *smk_cipso;
- spinlock_t smk_cipsolock; /* for changing cipso map */
+ spinlock_t smk_cipsolock; /* for changing cipso map */
+ struct list_head smk_rules; /* access rules */
+ struct mutex smk_rules_lock; /* lock for the rules */
};
/*
@@ -150,7 +156,6 @@ struct smack_known {
/*
* smackfs magic number
- * smackfs macic number
*/
#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
@@ -176,9 +181,9 @@ struct smack_known {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxa)
+ * Number of access types used by Smack (rwxat)
*/
-#define SMK_NUM_ACCESS_TYPE 4
+#define SMK_NUM_ACCESS_TYPE 5
/*
* Smack audit data; is empty if CONFIG_AUDIT not set
@@ -201,10 +206,12 @@ int smk_access_entry(char *, char *, struct list_head *);
int smk_access(char *, char *, int, struct smk_audit_info *);
int smk_curacc(char *, u32, struct smk_audit_info *);
int smack_to_cipso(const char *, struct smack_cipso *);
-void smack_from_cipso(u32, char *, char *);
+char *smack_from_cipso(u32, char *);
char *smack_from_secid(const u32);
+void smk_parse_smack(const char *string, int len, char *smack);
char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
+struct smack_known *smk_find_entry(const char *);
u32 smack_to_secid(const char *);
/*
@@ -223,7 +230,6 @@ extern struct smack_known smack_known_star;
extern struct smack_known smack_known_web;
extern struct list_head smack_known_list;
-extern struct list_head smack_rule_list;
extern struct list_head smk_netlbladdr_list;
extern struct security_operations smack_ops;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 9637e107f7e..cc7cb6edba0 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -77,14 +77,19 @@ int log_policy = SMACK_AUDIT_DENIED;
* entry is found returns -ENOENT.
*
* NOTE:
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
*
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Earlier versions of this function allowed for labels that
+ * were not on the label list. This was done to allow for
+ * labels to come over the network that had never been seen
+ * before on this host. Unless the receiving socket has the
+ * star label this will always result in a failure check. The
+ * star labeled socket case is now handled in the networking
+ * hooks so there is no case where the label is not on the
+ * label list. Checking to see if the address of two labels
+ * is the same is now a reliable test.
+ *
+ * Do the object check first because that is more
+ * likely to differ.
*/
int smk_access_entry(char *subject_label, char *object_label,
struct list_head *rule_list)
@@ -93,13 +98,10 @@ int smk_access_entry(char *subject_label, char *object_label,
struct smack_rule *srp;
list_for_each_entry_rcu(srp, rule_list, list) {
- if (srp->smk_subject == subject_label ||
- strcmp(srp->smk_subject, subject_label) == 0) {
- if (srp->smk_object == object_label ||
- strcmp(srp->smk_object, object_label) == 0) {
- may = srp->smk_access;
- break;
- }
+ if (srp->smk_object == object_label &&
+ srp->smk_subject == subject_label) {
+ may = srp->smk_access;
+ break;
}
}
@@ -117,18 +119,12 @@ int smk_access_entry(char *subject_label, char *object_label,
* access rule list and returns 0 if the access is permitted,
* non zero otherwise.
*
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
- *
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Smack labels are shared on smack_list
*/
int smk_access(char *subject_label, char *object_label, int request,
struct smk_audit_info *a)
{
+ struct smack_known *skp;
int may = MAY_NOT;
int rc = 0;
@@ -137,8 +133,7 @@ int smk_access(char *subject_label, char *object_label, int request,
*
* A star subject can't access any object.
*/
- if (subject_label == smack_known_star.smk_known ||
- strcmp(subject_label, smack_known_star.smk_known) == 0) {
+ if (subject_label == smack_known_star.smk_known) {
rc = -EACCES;
goto out_audit;
}
@@ -148,33 +143,27 @@ int smk_access(char *subject_label, char *object_label, int request,
* An internet subject can access any object.
*/
if (object_label == smack_known_web.smk_known ||
- subject_label == smack_known_web.smk_known ||
- strcmp(object_label, smack_known_web.smk_known) == 0 ||
- strcmp(subject_label, smack_known_web.smk_known) == 0)
+ subject_label == smack_known_web.smk_known)
goto out_audit;
/*
* A star object can be accessed by any subject.
*/
- if (object_label == smack_known_star.smk_known ||
- strcmp(object_label, smack_known_star.smk_known) == 0)
+ if (object_label == smack_known_star.smk_known)
goto out_audit;
/*
* An object can be accessed in any way by a subject
* with the same label.
*/
- if (subject_label == object_label ||
- strcmp(subject_label, object_label) == 0)
+ if (subject_label == object_label)
goto out_audit;
/*
* A hat subject can read any object.
* A floor object can be read by any subject.
*/
if ((request & MAY_ANYREAD) == request) {
- if (object_label == smack_known_floor.smk_known ||
- strcmp(object_label, smack_known_floor.smk_known) == 0)
+ if (object_label == smack_known_floor.smk_known)
goto out_audit;
- if (subject_label == smack_known_hat.smk_known ||
- strcmp(subject_label, smack_known_hat.smk_known) == 0)
+ if (subject_label == smack_known_hat.smk_known)
goto out_audit;
}
/*
@@ -184,8 +173,9 @@ int smk_access(char *subject_label, char *object_label, int request,
* good. A negative response from smk_access_entry()
* indicates there is no entry for this pair.
*/
+ skp = smk_find_entry(subject_label);
rcu_read_lock();
- may = smk_access_entry(subject_label, object_label, &smack_rule_list);
+ may = smk_access_entry(subject_label, object_label, &skp->smk_rules);
rcu_read_unlock();
if (may > 0 && (request & may) == request)
@@ -344,17 +334,32 @@ void smack_log(char *subject_label, char *object_label, int request,
static DEFINE_MUTEX(smack_known_lock);
/**
- * smk_import_entry - import a label, return the list entry
+ * smk_find_entry - find a label on the list, return the list entry
* @string: a text string that might be a Smack label
- * @len: the maximum size, or zero if it is NULL terminated.
*
* Returns a pointer to the entry in the label list that
- * matches the passed string, adding it if necessary.
+ * matches the passed string.
*/
-struct smack_known *smk_import_entry(const char *string, int len)
+struct smack_known *smk_find_entry(const char *string)
{
struct smack_known *skp;
- char smack[SMK_LABELLEN];
+
+ list_for_each_entry_rcu(skp, &smack_known_list, list) {
+ if (strncmp(skp->smk_known, string, SMK_MAXLEN) == 0)
+ return skp;
+ }
+
+ return NULL;
+}
+
+/**
+ * smk_parse_smack - parse smack label from a text string
+ * @string: a text string that might contain a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ * @smack: parsed smack label, or NULL if parse error
+ */
+void smk_parse_smack(const char *string, int len, char *smack)
+{
int found;
int i;
@@ -372,27 +377,38 @@ struct smack_known *smk_import_entry(const char *string, int len)
} else
smack[i] = string[i];
}
+}
+
+/**
+ * smk_import_entry - import a label, return the list entry
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string, adding it if necessary.
+ */
+struct smack_known *smk_import_entry(const char *string, int len)
+{
+ struct smack_known *skp;
+ char smack[SMK_LABELLEN];
+ smk_parse_smack(string, len, smack);
if (smack[0] == '\0')
return NULL;
mutex_lock(&smack_known_lock);
- found = 0;
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) {
- found = 1;
- break;
- }
- }
+ skp = smk_find_entry(smack);
- if (found == 0) {
+ if (skp == NULL) {
skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL);
if (skp != NULL) {
strncpy(skp->smk_known, smack, SMK_MAXLEN);
skp->smk_secid = smack_next_secid++;
skp->smk_cipso = NULL;
+ INIT_LIST_HEAD(&skp->smk_rules);
spin_lock_init(&skp->smk_cipsolock);
+ mutex_init(&skp->smk_rules_lock);
/*
* Make sure that the entry is actually
* filled before putting it on the list.
@@ -480,19 +496,12 @@ u32 smack_to_secid(const char *smack)
* smack_from_cipso - find the Smack label associated with a CIPSO option
* @level: Bell & LaPadula level from the network
* @cp: Bell & LaPadula categories from the network
- * @result: where to put the Smack value
*
* This is a simple lookup in the label table.
*
- * This is an odd duck as far as smack handling goes in that
- * it sends back a copy of the smack label rather than a pointer
- * to the master list. This is done because it is possible for
- * a foreign host to send a smack label that is new to this
- * machine and hence not on the list. That would not be an
- * issue except that adding an entry to the master list can't
- * be done at that point.
+ * Return the matching label from the label list or NULL.
*/
-void smack_from_cipso(u32 level, char *cp, char *result)
+char *smack_from_cipso(u32 level, char *cp)
{
struct smack_known *kp;
char *final = NULL;
@@ -509,12 +518,13 @@ void smack_from_cipso(u32 level, char *cp, char *result)
final = kp->smk_known;
spin_unlock_bh(&kp->smk_cipsolock);
+
+ if (final != NULL)
+ break;
}
rcu_read_unlock();
- if (final == NULL)
- final = smack_known_huh.smk_known;
- strncpy(result, final, SMK_MAXLEN);
- return;
+
+ return final;
}
/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index b9c5e149903..7db62b48eb4 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -5,12 +5,13 @@
*
* Authors:
* Casey Schaufler <casey@schaufler-ca.com>
- * Jarkko Sakkinen <ext-jarkko.2.sakkinen@nokia.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@intel.com>
*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
@@ -34,6 +35,7 @@
#include <linux/audit.h>
#include <linux/magic.h>
#include <linux/dcache.h>
+#include <linux/personality.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
@@ -441,11 +443,17 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
* BPRM hooks
*/
+/**
+ * smack_bprm_set_creds - set creds for exec
+ * @bprm: the exec information
+ *
+ * Returns 0 if it gets a blob, -ENOMEM otherwise
+ */
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
- struct task_smack *tsp = bprm->cred->security;
+ struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct task_smack *bsp = bprm->cred->security;
struct inode_smack *isp;
- struct dentry *dp;
int rc;
rc = cap_bprm_set_creds(bprm);
@@ -455,20 +463,48 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->cred_prepared)
return 0;
- if (bprm->file == NULL || bprm->file->f_dentry == NULL)
+ isp = inode->i_security;
+ if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
- dp = bprm->file->f_dentry;
+ if (bprm->unsafe)
+ return -EPERM;
- if (dp->d_inode == NULL)
- return 0;
+ bsp->smk_task = isp->smk_task;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
- isp = dp->d_inode->i_security;
+ return 0;
+}
- if (isp->smk_task != NULL)
- tsp->smk_task = isp->smk_task;
+/**
+ * smack_bprm_committing_creds - Prepare to install the new credentials
+ * from bprm.
+ *
+ * @bprm: binprm for exec
+ */
+static void smack_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ struct task_smack *bsp = bprm->cred->security;
- return 0;
+ if (bsp->smk_task != bsp->smk_forked)
+ current->pdeath_signal = 0;
+}
+
+/**
+ * smack_bprm_secureexec - Return the decision to use secureexec.
+ * @bprm: binprm for exec
+ *
+ * Returns 0 on success.
+ */
+static int smack_bprm_secureexec(struct linux_binprm *bprm)
+{
+ struct task_smack *tsp = current_security();
+ int ret = cap_bprm_secureexec(bprm);
+
+ if (!ret && (tsp->smk_task != tsp->smk_forked))
+ ret = 1;
+
+ return ret;
}
/*
@@ -516,6 +552,8 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len)
{
+ struct smack_known *skp;
+ char *csp = smk_of_current();
char *isp = smk_of_inode(inode);
char *dsp = smk_of_inode(dir);
int may;
@@ -527,8 +565,9 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
}
if (value) {
+ skp = smk_find_entry(csp);
rcu_read_lock();
- may = smk_access_entry(smk_of_current(), dsp, &smack_rule_list);
+ may = smk_access_entry(csp, dsp, &skp->smk_rules);
rcu_read_unlock();
/*
@@ -841,7 +880,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
-/*
+/**
* smack_inode_getxattr - Smack check on getxattr
* @dentry: the object
* @name: unused
@@ -858,7 +897,7 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
-/*
+/**
* smack_inode_removexattr - Smack check on removexattr
* @dentry: the object
* @name: name of the attribute
@@ -1088,36 +1127,31 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
* @cmd: what action to check
* @arg: unused
*
+ * Generally these operations are harmless.
+ * File locking operations present an obvious mechanism
+ * for passing information, so they require write access.
+ *
* Returns 0 if current has access, error code otherwise
*/
static int smack_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct smk_audit_info ad;
- int rc;
+ int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
- smk_ad_setfield_u_fs_path(&ad, file->f_path);
switch (cmd) {
- case F_DUPFD:
- case F_GETFD:
- case F_GETFL:
case F_GETLK:
- case F_GETOWN:
- case F_GETSIG:
- rc = smk_curacc(file->f_security, MAY_READ, &ad);
- break;
- case F_SETFD:
- case F_SETFL:
case F_SETLK:
case F_SETLKW:
case F_SETOWN:
case F_SETSIG:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
break;
default:
- rc = smk_curacc(file->f_security, MAY_READWRITE, &ad);
+ break;
}
return rc;
@@ -1138,6 +1172,7 @@ static int smack_file_mmap(struct file *file,
unsigned long flags, unsigned long addr,
unsigned long addr_only)
{
+ struct smack_known *skp;
struct smack_rule *srp;
struct task_smack *tsp;
char *sp;
@@ -1170,6 +1205,7 @@ static int smack_file_mmap(struct file *file,
tsp = current_security();
sp = smk_of_current();
+ skp = smk_find_entry(sp);
rc = 0;
rcu_read_lock();
@@ -1177,15 +1213,8 @@ static int smack_file_mmap(struct file *file,
* For each Smack rule associated with the subject
* label verify that the SMACK64MMAP also has access
* to that rule's object label.
- *
- * Because neither of the labels comes
- * from the networking code it is sufficient
- * to compare pointers.
*/
- list_for_each_entry_rcu(srp, &smack_rule_list, list) {
- if (srp->smk_subject != sp)
- continue;
-
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
osmack = srp->smk_object;
/*
* Matching labels always allows access.
@@ -1214,7 +1243,8 @@ static int smack_file_mmap(struct file *file,
* If there isn't one a SMACK64MMAP subject
* can't have as much access as current.
*/
- mmay = smk_access_entry(msmack, osmack, &smack_rule_list);
+ skp = smk_find_entry(msmack);
+ mmay = smk_access_entry(msmack, osmack, &skp->smk_rules);
if (mmay == -ENOENT) {
rc = -EACCES;
break;
@@ -1315,6 +1345,24 @@ static int smack_file_receive(struct file *file)
return smk_curacc(file->f_security, may, &ad);
}
+/**
+ * smack_dentry_open - Smack dentry open processing
+ * @file: the object
+ * @cred: unused
+ *
+ * Set the security blob in the file structure.
+ *
+ * Returns 0
+ */
+static int smack_dentry_open(struct file *file, const struct cred *cred)
+{
+ struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
+
+ file->f_security = isp->smk_inode;
+
+ return 0;
+}
+
/*
* Task hooks
*/
@@ -1455,15 +1503,17 @@ static int smack_kernel_create_files_as(struct cred *new,
/**
* smk_curacc_on_task - helper to log task related access
* @p: the task object
- * @access : the access requested
+ * @access: the access requested
+ * @caller: name of the calling function for audit
*
* Return 0 if access is permitted
*/
-static int smk_curacc_on_task(struct task_struct *p, int access)
+static int smk_curacc_on_task(struct task_struct *p, int access,
+ const char *caller)
{
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
return smk_curacc(smk_of_task(task_security(p)), access, &ad);
}
@@ -1477,7 +1527,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access)
*/
static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1488,7 +1538,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
*/
static int smack_task_getpgid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1499,7 +1549,7 @@ static int smack_task_getpgid(struct task_struct *p)
*/
static int smack_task_getsid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1527,7 +1577,7 @@ static int smack_task_setnice(struct task_struct *p, int nice)
rc = cap_task_setnice(p, nice);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1544,7 +1594,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
rc = cap_task_setioprio(p, ioprio);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1556,7 +1606,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
*/
static int smack_task_getioprio(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1573,7 +1623,7 @@ static int smack_task_setscheduler(struct task_struct *p)
rc = cap_task_setscheduler(p);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1585,7 +1635,7 @@ static int smack_task_setscheduler(struct task_struct *p)
*/
static int smack_task_getscheduler(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1596,7 +1646,7 @@ static int smack_task_getscheduler(struct task_struct *p)
*/
static int smack_task_movememory(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1711,7 +1761,7 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
ssp->smk_in = csp;
ssp->smk_out = csp;
- ssp->smk_packet[0] = '\0';
+ ssp->smk_packet = NULL;
sk->sk_security = ssp;
@@ -2753,6 +2803,7 @@ static int smack_unix_stream_connect(struct sock *sock,
{
struct socket_smack *ssp = sock->sk_security;
struct socket_smack *osp = other->sk_security;
+ struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
int rc = 0;
@@ -2762,6 +2813,14 @@ static int smack_unix_stream_connect(struct sock *sock,
if (!capable(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
+ if (rc == 0) {
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
+ }
+
return rc;
}
@@ -2813,16 +2872,17 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
return smack_netlabel_send(sock->sk, sip);
}
-
/**
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
* @sap: netlabel secattr
- * @sip: where to put the result
+ * @ssp: socket security information
*
- * Copies a smack label into sip
+ * Returns a pointer to a Smack label found on the label list.
*/
-static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
+static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+ struct socket_smack *ssp)
{
+ struct smack_known *skp;
char smack[SMK_LABELLEN];
char *sp;
int pcat;
@@ -2852,15 +2912,43 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* we are already done. WeeHee.
*/
if (sap->attr.mls.lvl == smack_cipso_direct) {
- memcpy(sip, smack, SMK_MAXLEN);
- return;
+ /*
+ * The label sent is usually on the label list.
+ *
+ * If it is not we may still want to allow the
+ * delivery.
+ *
+ * If the recipient is accepting all packets
+ * because it is using the star ("*") label
+ * for SMACK64IPIN provide the web ("@") label
+ * so that a directed response will succeed.
+ * This is not very correct from a MAC point
+ * of view, but gets around the problem that
+ * locking prevents adding the newly discovered
+ * label to the list.
+ * The case where the recipient is not using
+ * the star label should obviously fail.
+ * The easy way to do this is to provide the
+ * star label as the subject label.
+ */
+ skp = smk_find_entry(smack);
+ if (skp != NULL)
+ return skp->smk_known;
+ if (ssp != NULL &&
+ ssp->smk_in == smack_known_star.smk_known)
+ return smack_known_web.smk_known;
+ return smack_known_star.smk_known;
}
/*
* Look it up in the supplied table if it is not
* a direct mapping.
*/
- smack_from_cipso(sap->attr.mls.lvl, smack, sip);
- return;
+ sp = smack_from_cipso(sap->attr.mls.lvl, smack);
+ if (sp != NULL)
+ return sp;
+ if (ssp != NULL && ssp->smk_in == smack_known_star.smk_known)
+ return smack_known_web.smk_known;
+ return smack_known_star.smk_known;
}
if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
/*
@@ -2875,16 +2963,14 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* secid is from a fallback.
*/
BUG_ON(sp == NULL);
- strncpy(sip, sp, SMK_MAXLEN);
- return;
+ return sp;
}
/*
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
- strncpy(sip, smack_net_ambient, SMK_MAXLEN);
- return;
+ return smack_net_ambient;
}
/**
@@ -2898,7 +2984,6 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
- char smack[SMK_LABELLEN];
char *csp;
int rc;
struct smk_audit_info ad;
@@ -2911,10 +2996,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
- if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- csp = smack;
- } else
+ if (rc == 0)
+ csp = smack_from_secattr(&secattr, ssp);
+ else
csp = smack_net_ambient;
netlbl_secattr_destroy(&secattr);
@@ -2951,15 +3035,19 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
int __user *optlen, unsigned len)
{
struct socket_smack *ssp;
- int slen;
+ char *rcp = "";
+ int slen = 1;
int rc = 0;
ssp = sock->sk->sk_security;
- slen = strlen(ssp->smk_packet) + 1;
+ if (ssp->smk_packet != NULL) {
+ rcp = ssp->smk_packet;
+ slen = strlen(rcp) + 1;
+ }
if (slen > len)
rc = -ERANGE;
- else if (copy_to_user(optval, ssp->smk_packet, slen) != 0)
+ else if (copy_to_user(optval, rcp, slen) != 0)
rc = -EFAULT;
if (put_user(slen, optlen) != 0)
@@ -2982,8 +3070,8 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
{
struct netlbl_lsm_secattr secattr;
- struct socket_smack *sp;
- char smack[SMK_LABELLEN];
+ struct socket_smack *ssp = NULL;
+ char *sp;
int family = PF_UNSPEC;
u32 s = 0; /* 0 is the invalid secid */
int rc;
@@ -2998,17 +3086,19 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
family = sock->sk->sk_family;
if (family == PF_UNIX) {
- sp = sock->sk->sk_security;
- s = smack_to_secid(sp->smk_out);
+ ssp = sock->sk->sk_security;
+ s = smack_to_secid(ssp->smk_out);
} else if (family == PF_INET || family == PF_INET6) {
/*
* Translate what netlabel gave us.
*/
+ if (sock != NULL && sock->sk != NULL)
+ ssp = sock->sk->sk_security;
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- s = smack_to_secid(smack);
+ sp = smack_from_secattr(&secattr, ssp);
+ s = smack_to_secid(sp);
}
netlbl_secattr_destroy(&secattr);
}
@@ -3056,7 +3146,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
- char smack[SMK_LABELLEN];
+ char *sp;
int rc;
struct smk_audit_info ad;
@@ -3067,9 +3157,9 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
- smack_from_secattr(&secattr, smack);
+ sp = smack_from_secattr(&secattr, ssp);
else
- strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN);
+ sp = smack_known_huh.smk_known;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
@@ -3082,7 +3172,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
- rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_access(sp, ssp->smk_in, MAY_WRITE, &ad);
if (rc != 0)
return rc;
@@ -3090,7 +3180,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Save the peer's label in the request_sock so we can later setup
* smk_packet in the child socket so that SO_PEERCRED can report it.
*/
- req->peer_secid = smack_to_secid(smack);
+ req->peer_secid = smack_to_secid(sp);
/*
* We need to decide if we want to label the incoming connection here
@@ -3103,7 +3193,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
if (smack_host_label(&addr) == NULL) {
rcu_read_unlock();
netlbl_secattr_init(&secattr);
- smack_to_secattr(smack, &secattr);
+ smack_to_secattr(sp, &secattr);
rc = netlbl_req_setattr(req, &secattr);
netlbl_secattr_destroy(&secattr);
} else {
@@ -3125,13 +3215,11 @@ static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
- char *smack;
- if (req->peer_secid != 0) {
- smack = smack_from_secid(req->peer_secid);
- strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
- } else
- ssp->smk_packet[0] = '\0';
+ if (req->peer_secid != 0)
+ ssp->smk_packet = smack_from_secid(req->peer_secid);
+ else
+ ssp->smk_packet = NULL;
}
/*
@@ -3409,6 +3497,8 @@ struct security_operations smack_ops = {
.sb_umount = smack_sb_umount,
.bprm_set_creds = smack_bprm_set_creds,
+ .bprm_committing_creds = smack_bprm_committing_creds,
+ .bprm_secureexec = smack_bprm_secureexec,
.inode_alloc_security = smack_inode_alloc_security,
.inode_free_security = smack_inode_free_security,
@@ -3440,6 +3530,8 @@ struct security_operations smack_ops = {
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
+ .dentry_open = smack_dentry_open,
+
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f93460156dc..6aceef518a4 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -44,6 +44,7 @@ enum smk_inos {
SMK_ONLYCAP = 9, /* the only "capable" label */
SMK_LOGGING = 10, /* logging */
SMK_LOAD_SELF = 11, /* task specific rules */
+ SMK_ACCESSES = 12, /* access policy */
};
/*
@@ -85,6 +86,16 @@ char *smack_onlycap;
*/
LIST_HEAD(smk_netlbladdr_list);
+
+/*
+ * Rule lists are maintained for each label.
+ * This master list is just for reading /smack/load.
+ */
+struct smack_master_list {
+ struct list_head list;
+ struct smack_rule *smk_rule;
+};
+
LIST_HEAD(smack_rule_list);
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
@@ -92,7 +103,7 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
const char *smack_cipso_option = SMACK_CIPSO_OPTION;
-#define SEQ_READ_FINISHED 1
+#define SEQ_READ_FINISHED ((loff_t)-1)
/*
* Values for parsing cipso rules
@@ -159,9 +170,13 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
mutex_lock(rule_lock);
+ /*
+ * Because the object label is less likely to match
+ * than the subject label check it first
+ */
list_for_each_entry_rcu(sp, rule_list, list) {
- if (sp->smk_subject == srp->smk_subject &&
- sp->smk_object == srp->smk_object) {
+ if (sp->smk_object == srp->smk_object &&
+ sp->smk_subject == srp->smk_subject) {
found = 1;
sp->smk_access = srp->smk_access;
break;
@@ -176,6 +191,99 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
}
/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ */
+static int smk_parse_rule(const char *data, struct smack_rule *rule, int import)
+{
+ char smack[SMK_LABELLEN];
+ struct smack_known *skp;
+
+ if (import) {
+ rule->smk_subject = smk_import(data, 0);
+ if (rule->smk_subject == NULL)
+ return -1;
+
+ rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
+ if (rule->smk_object == NULL)
+ return -1;
+ } else {
+ smk_parse_smack(data, 0, smack);
+ skp = smk_find_entry(smack);
+ if (skp == NULL)
+ return -1;
+ rule->smk_subject = skp->smk_known;
+
+ smk_parse_smack(data + SMK_LABELLEN, 0, smack);
+ skp = smk_find_entry(smack);
+ if (skp == NULL)
+ return -1;
+ rule->smk_object = skp->smk_known;
+ }
+
+ rule->smk_access = 0;
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ rule->smk_access |= MAY_READ;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
+ case '-':
+ break;
+ case 'w':
+ case 'W':
+ rule->smk_access |= MAY_WRITE;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
+ case '-':
+ break;
+ case 'x':
+ case 'X':
+ rule->smk_access |= MAY_EXEC;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
+ case '-':
+ break;
+ case 'a':
+ case 'A':
+ rule->smk_access |= MAY_APPEND;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
+ case '-':
+ break;
+ case 't':
+ case 'T':
+ rule->smk_access |= MAY_TRANSMUTE;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
* smk_write_load_list - write() for any /smack/load
* @file: file pointer, not actually used
* @buf: where to get the data from
@@ -197,9 +305,12 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
struct list_head *rule_list,
struct mutex *rule_lock)
{
+ struct smack_master_list *smlp;
+ struct smack_known *skp;
struct smack_rule *rule;
char *data;
int rc = -EINVAL;
+ int load = 0;
/*
* No partial writes.
@@ -234,69 +345,14 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
goto out;
}
- rule->smk_subject = smk_import(data, 0);
- if (rule->smk_subject == NULL)
- goto out_free_rule;
-
- rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
- if (rule->smk_object == NULL)
+ if (smk_parse_rule(data, rule, 1))
goto out_free_rule;
- rule->smk_access = 0;
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
- case '-':
- break;
- case 'r':
- case 'R':
- rule->smk_access |= MAY_READ;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
- case '-':
- break;
- case 'w':
- case 'W':
- rule->smk_access |= MAY_WRITE;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
- case '-':
- break;
- case 'x':
- case 'X':
- rule->smk_access |= MAY_EXEC;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
- case '-':
- break;
- case 'a':
- case 'A':
- rule->smk_access |= MAY_APPEND;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
- case '-':
- break;
- case 't':
- case 'T':
- rule->smk_access |= MAY_TRANSMUTE;
- break;
- default:
- goto out_free_rule;
+ if (rule_list == NULL) {
+ load = 1;
+ skp = smk_find_entry(rule->smk_subject);
+ rule_list = &skp->smk_rules;
+ rule_lock = &skp->smk_rules_lock;
}
rc = count;
@@ -304,8 +360,15 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
* smk_set_access returns true if there was already a rule
* for the subject/object pair, and false if it was new.
*/
- if (!smk_set_access(rule, rule_list, rule_lock))
+ if (!smk_set_access(rule, rule_list, rule_lock)) {
+ smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+ if (smlp != NULL) {
+ smlp->smk_rule = rule;
+ list_add_rcu(&smlp->list, &smack_rule_list);
+ } else
+ rc = -ENOMEM;
goto out;
+ }
out_free_rule:
kfree(rule);
@@ -321,11 +384,24 @@ out:
static void *load_seq_start(struct seq_file *s, loff_t *pos)
{
- if (*pos == SEQ_READ_FINISHED)
+ struct list_head *list;
+
+ /*
+ * This is 0 the first time through.
+ */
+ if (s->index == 0)
+ s->private = &smack_rule_list;
+
+ if (s->private == NULL)
return NULL;
- if (list_empty(&smack_rule_list))
+
+ list = s->private;
+ if (list_empty(list))
return NULL;
- return smack_rule_list.next;
+
+ if (s->index == 0)
+ return list->next;
+ return list;
}
static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
@@ -333,17 +409,19 @@ static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
struct list_head *list = v;
if (list_is_last(list, &smack_rule_list)) {
- *pos = SEQ_READ_FINISHED;
+ s->private = NULL;
return NULL;
}
+ s->private = list->next;
return list->next;
}
static int load_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
- struct smack_rule *srp =
- list_entry(list, struct smack_rule, list);
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+ struct smack_rule *srp = smlp->smk_rule;
seq_printf(s, "%s %s", (char *)srp->smk_subject,
(char *)srp->smk_object);
@@ -412,8 +490,7 @@ static ssize_t smk_write_load(struct file *file, const char __user *buf,
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
- return smk_write_load_list(file, buf, count, ppos, &smack_rule_list,
- &smack_list_lock);
+ return smk_write_load_list(file, buf, count, ppos, NULL, NULL);
}
static const struct file_operations smk_load_ops = {
@@ -1425,6 +1502,44 @@ static const struct file_operations smk_load_self_ops = {
.write = smk_write_load_self,
.release = seq_release,
};
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_rule rule;
+ char *data;
+ int res;
+
+ data = simple_transaction_get(file, buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (count < SMK_LOADLEN || smk_parse_rule(data, &rule, 0))
+ return -EINVAL;
+
+ res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access,
+ NULL);
+ data[0] = res == 0 ? '1' : '0';
+ data[1] = '\0';
+
+ simple_transaction_set(file, 2);
+ return SMK_LOADLEN;
+}
+
+static const struct file_operations smk_access_ops = {
+ .write = smk_write_access,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
/**
* smk_fill_super - fill the /smackfs superblock
* @sb: the empty superblock
@@ -1459,6 +1574,8 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
[SMK_LOAD_SELF] = {
"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESSES] = {
+ "access", &smk_access_ops, S_IRUGO|S_IWUGO},
/* last one */
{""}
};
@@ -1534,6 +1651,20 @@ static int __init init_smk_fs(void)
smk_cipso_doi();
smk_unlbl_ambient(NULL);
+ mutex_init(&smack_known_floor.smk_rules_lock);
+ mutex_init(&smack_known_hat.smk_rules_lock);
+ mutex_init(&smack_known_huh.smk_rules_lock);
+ mutex_init(&smack_known_invalid.smk_rules_lock);
+ mutex_init(&smack_known_star.smk_rules_lock);
+ mutex_init(&smack_known_web.smk_rules_lock);
+
+ INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+ INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+ INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+ INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+ INIT_LIST_HEAD(&smack_known_star.smk_rules);
+ INIT_LIST_HEAD(&smack_known_web.smk_rules);
+
return err;
}
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 7c7f8c16c10..8eb779b9d77 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -1,8 +1,10 @@
config SECURITY_TOMOYO
bool "TOMOYO Linux Support"
depends on SECURITY
+ depends on NET
select SECURITYFS
select SECURITY_PATH
+ select SECURITY_NETWORK
default n
help
This selects TOMOYO Linux, pathname-based access control.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 95278b71fc2..56a0c7be409 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1,4 +1,4 @@
-obj-y = audit.o common.o condition.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o
+obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
$(obj)/policy/profile.conf:
@mkdir -p $(obj)/policy/
@@ -27,7 +27,7 @@ $(obj)/policy/stat.conf:
@touch $@
$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf
- @echo Generating built-in policy for TOMOYO 2.4.x.
+ @echo Generating built-in policy for TOMOYO 2.5.x.
@echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp
@sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp
@echo "\"\";" >> $@.tmp
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 5dbb1f7617c..075c3a6d164 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -313,6 +313,7 @@ static unsigned int tomoyo_log_count;
*/
static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
const u8 profile, const u8 index,
+ const struct tomoyo_acl_info *matched_acl,
const bool is_granted)
{
u8 mode;
@@ -324,6 +325,9 @@ static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
p = tomoyo_profile(ns, profile);
if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
return false;
+ if (is_granted && matched_acl && matched_acl->cond &&
+ matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES;
mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
mode = p->config[category];
@@ -350,7 +354,8 @@ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
char *buf;
struct tomoyo_log *entry;
bool quota_exceeded = false;
- if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, r->granted))
+ if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
+ r->matched_acl, r->granted))
goto out;
buf = tomoyo_init_log(r, len, fmt, args);
if (!buf)
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 2e43aec1c36..150911c7ff0 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -20,6 +20,7 @@ const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
/* String table for /sys/kernel/security/tomoyo/profile */
const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ /* CONFIG::file group */
[TOMOYO_MAC_FILE_EXECUTE] = "execute",
[TOMOYO_MAC_FILE_OPEN] = "open",
[TOMOYO_MAC_FILE_CREATE] = "create",
@@ -43,7 +44,28 @@ const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
[TOMOYO_MAC_FILE_MOUNT] = "mount",
[TOMOYO_MAC_FILE_UMOUNT] = "unmount",
[TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send",
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind",
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = "env",
+ /* CONFIG group */
[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* String table for conditions. */
@@ -130,10 +152,20 @@ const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
[TOMOYO_TYPE_UMOUNT] = "unmount",
};
+/* String table for socket's operation. */
+const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
+ [TOMOYO_NETWORK_BIND] = "bind",
+ [TOMOYO_NETWORK_LISTEN] = "listen",
+ [TOMOYO_NETWORK_CONNECT] = "connect",
+ [TOMOYO_NETWORK_SEND] = "send",
+};
+
/* String table for categories. */
static const char * const tomoyo_category_keywords
[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
- [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* Permit policy management by non-root user? */
@@ -230,13 +262,17 @@ static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
WARN_ON(1);
}
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...) __printf(2, 3);
+
/**
* tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @fmt: The printf()'s format string, followed by parameters.
*/
-void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...)
{
va_list args;
size_t len;
@@ -313,7 +349,7 @@ void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
INIT_LIST_HEAD(&ns->group_list[idx]);
for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
INIT_LIST_HEAD(&ns->policy_list[idx]);
- ns->profile_version = 20100903;
+ ns->profile_version = 20110903;
tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
}
@@ -466,8 +502,10 @@ static struct tomoyo_profile *tomoyo_assign_profile
TOMOYO_CONFIG_WANT_REJECT_LOG;
memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
sizeof(ptr->config));
- ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 1024;
- ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048;
+ ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
+ CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
+ ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
+ CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
mb(); /* Avoid out-of-order execution. */
ns->profile_ptr[profile] = ptr;
entry = NULL;
@@ -951,14 +989,12 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
(global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
struct task_struct *p;
rcu_read_lock();
- read_lock(&tasklist_lock);
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
@@ -982,6 +1018,48 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
}
/**
+ * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+ return p1->domainname == p2->domainname;
+}
+
+/**
+ * tomoyo_write_task - Update task related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_task(struct tomoyo_acl_param *param)
+{
+ int error = -EINVAL;
+ if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
+ struct tomoyo_task_acl e = {
+ .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
+ .domainname = tomoyo_get_domainname(param),
+ };
+ if (e.domainname)
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_task_acl,
+ NULL);
+ tomoyo_put_name(e.domainname);
+ }
+ return error;
+}
+
+/**
* tomoyo_delete_domain - Delete a domain.
*
* @domainname: The name of domain.
@@ -1039,11 +1117,16 @@ static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
static const struct {
const char *keyword;
int (*write) (struct tomoyo_acl_param *);
- } tomoyo_callback[1] = {
+ } tomoyo_callback[5] = {
{ "file ", tomoyo_write_file },
+ { "network inet ", tomoyo_write_inet_network },
+ { "network unix ", tomoyo_write_unix_network },
+ { "misc ", tomoyo_write_misc },
+ { "task ", tomoyo_write_task },
};
u8 i;
- for (i = 0; i < 1; i++) {
+
+ for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
if (!tomoyo_str_starts(&param.data,
tomoyo_callback[i].keyword))
continue;
@@ -1127,6 +1210,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
case 0:
head->r.cond_index = 0;
head->r.cond_step++;
+ if (cond->transit) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, cond->transit->name);
+ }
/* fall through */
case 1:
{
@@ -1239,6 +1326,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
head->r.cond_step++;
/* fall through */
case 3:
+ if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ tomoyo_io_printf(head, " grant_log=%s",
+ tomoyo_yesno(cond->grant_log ==
+ TOMOYO_GRANTLOG_YES));
tomoyo_set_lf(head);
return true;
}
@@ -1306,6 +1397,12 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
+ struct tomoyo_task_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_set_group(head, "task ");
+ tomoyo_set_string(head, "manual_domain_transition ");
+ tomoyo_set_string(head, ptr->domainname->name);
} else if (head->r.print_transition_related_only) {
return true;
} else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
@@ -1370,6 +1467,60 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
tomoyo_print_number_union(head, &ptr->mode);
tomoyo_print_number_union(head, &ptr->major);
tomoyo_print_number_union(head, &ptr->minor);
+ } else if (acl_type == TOMOYO_TYPE_INET_ACL) {
+ struct tomoyo_inet_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network inet ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_set_space(head);
+ if (ptr->address.group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->address.group->group_name
+ ->name);
+ } else {
+ char buf[128];
+ tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
+ tomoyo_io_printf(head, "%s", buf);
+ }
+ tomoyo_print_number_union(head, &ptr->port);
+ } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
+ struct tomoyo_unix_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network unix ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
} else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
struct tomoyo_mount_acl *ptr =
container_of(acl, typeof(*ptr), head);
@@ -1378,6 +1529,12 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
tomoyo_print_name_union(head, &ptr->dir_name);
tomoyo_print_name_union(head, &ptr->fs_type);
tomoyo_print_number_union(head, &ptr->flags);
+ } else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
+ struct tomoyo_env_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "misc env ");
+ tomoyo_set_string(head, ptr->env->name);
}
if (acl->cond) {
head->r.print_cond_part = true;
@@ -1510,14 +1667,12 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
global_pid = true;
pid = (unsigned int) simple_strtoul(buf, NULL, 10);
rcu_read_lock();
- read_lock(&tasklist_lock);
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
rcu_read_unlock();
if (!domain)
return;
@@ -1537,8 +1692,9 @@ static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
/* String table for grouping keywords. */
static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
- [TOMOYO_PATH_GROUP] = "path_group ",
- [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_PATH_GROUP] = "path_group ",
+ [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_ADDRESS_GROUP] = "address_group ",
};
/**
@@ -1580,7 +1736,7 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
}
/**
- * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group" list.
+ * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @idx: Index number.
@@ -1617,6 +1773,15 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
(ptr,
struct tomoyo_number_group,
head)->number);
+ } else if (idx == TOMOYO_ADDRESS_GROUP) {
+ char buffer[128];
+
+ struct tomoyo_address_group *member =
+ container_of(ptr, typeof(*member),
+ head);
+ tomoyo_print_ip(buffer, sizeof(buffer),
+ &member->address);
+ tomoyo_io_printf(head, " %s", buffer);
}
tomoyo_set_lf(head);
}
@@ -2066,27 +2231,7 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
static void tomoyo_read_version(struct tomoyo_io_buffer *head)
{
if (!head->r.eof) {
- tomoyo_io_printf(head, "2.4.0");
- head->r.eof = true;
- }
-}
-
-/**
- * tomoyo_read_self_domain - Get the current process's domainname.
- *
- * @head: Pointer to "struct tomoyo_io_buffer".
- *
- * Returns the current process's domainname.
- */
-static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
-{
- if (!head->r.eof) {
- /*
- * tomoyo_domain()->domainname != NULL
- * because every process belongs to a domain and
- * the domain's name cannot be NULL.
- */
- tomoyo_io_printf(head, "%s", tomoyo_domain()->domainname->name);
+ tomoyo_io_printf(head, "2.5.0");
head->r.eof = true;
}
}
@@ -2221,10 +2366,6 @@ int tomoyo_open_control(const u8 type, struct file *file)
head->poll = tomoyo_poll_log;
head->read = tomoyo_read_log;
break;
- case TOMOYO_SELFDOMAIN:
- /* /sys/kernel/security/tomoyo/self_domain */
- head->read = tomoyo_read_self_domain;
- break;
case TOMOYO_PROCESS_STATUS:
/* /sys/kernel/security/tomoyo/.process_status */
head->write = tomoyo_write_pid;
@@ -2453,6 +2594,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
return -EFAULT;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
+ head->read_user_buf_avail = 0;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
@@ -2562,11 +2704,11 @@ void tomoyo_check_profile(void)
struct tomoyo_domain_info *domain;
const int idx = tomoyo_read_lock();
tomoyo_policy_loaded = true;
- printk(KERN_INFO "TOMOYO: 2.4.0\n");
+ printk(KERN_INFO "TOMOYO: 2.5.0\n");
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
const struct tomoyo_policy_namespace *ns = domain->ns;
- if (ns->profile_version != 20100903)
+ if (ns->profile_version != 20110903)
printk(KERN_ERR
"Profile version %u is not supported.\n",
ns->profile_version);
@@ -2577,9 +2719,9 @@ void tomoyo_check_profile(void)
else
continue;
printk(KERN_ERR
- "Userland tools for TOMOYO 2.4 must be installed and "
+ "Userland tools for TOMOYO 2.5 must be installed and "
"policy must be initialized.\n");
- printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.4/ "
+ printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ "
"for more information.\n");
panic("STOP!");
}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index f7fbaa66e44..ed311d7a8ce 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -3,7 +3,7 @@
*
* Header file for TOMOYO.
*
- * Copyright (C) 2005-2010 NTT DATA CORPORATION
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -23,6 +23,16 @@
#include <linux/poll.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/un.h>
+#include <net/sock.h>
+#include <net/af_unix.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
/********** Constants definitions. **********/
@@ -34,8 +44,17 @@
#define TOMOYO_HASH_BITS 8
#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+/*
+ * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET.
+ * Therefore, we don't need SOCK_MAX.
+ */
+#define TOMOYO_SOCK_MAX 6
+
#define TOMOYO_EXEC_TMPSIZE 4096
+/* Garbage collector is trying to kfree() this element. */
+#define TOMOYO_GC_IN_PROGRESS -1
+
/* Profile number is an integer between 0 and 255. */
#define TOMOYO_MAX_PROFILES 256
@@ -136,6 +155,7 @@ enum tomoyo_mode_index {
/* Index numbers for entry type. */
enum tomoyo_policy_id {
TOMOYO_ID_GROUP,
+ TOMOYO_ID_ADDRESS_GROUP,
TOMOYO_ID_PATH_GROUP,
TOMOYO_ID_NUMBER_GROUP,
TOMOYO_ID_TRANSITION_CONTROL,
@@ -162,10 +182,21 @@ enum tomoyo_domain_info_flags_index {
TOMOYO_MAX_DOMAIN_INFO_FLAGS
};
+/* Index numbers for audit type. */
+enum tomoyo_grant_log {
+ /* Follow profile's configuration. */
+ TOMOYO_GRANTLOG_AUTO,
+ /* Do not generate grant log. */
+ TOMOYO_GRANTLOG_NO,
+ /* Generate grant_log. */
+ TOMOYO_GRANTLOG_YES,
+};
+
/* Index numbers for group entries. */
enum tomoyo_group_id {
TOMOYO_PATH_GROUP,
TOMOYO_NUMBER_GROUP,
+ TOMOYO_ADDRESS_GROUP,
TOMOYO_MAX_GROUP
};
@@ -196,6 +227,10 @@ enum tomoyo_acl_entry_type_index {
TOMOYO_TYPE_PATH_NUMBER_ACL,
TOMOYO_TYPE_MKDEV_ACL,
TOMOYO_TYPE_MOUNT_ACL,
+ TOMOYO_TYPE_INET_ACL,
+ TOMOYO_TYPE_UNIX_ACL,
+ TOMOYO_TYPE_ENV_ACL,
+ TOMOYO_TYPE_MANUAL_TASK_ACL,
};
/* Index numbers for access controls with one pathname. */
@@ -228,6 +263,15 @@ enum tomoyo_mkdev_acl_index {
TOMOYO_MAX_MKDEV_OPERATION
};
+/* Index numbers for socket operations. */
+enum tomoyo_network_acl_index {
+ TOMOYO_NETWORK_BIND, /* bind() operation. */
+ TOMOYO_NETWORK_LISTEN, /* listen() operation. */
+ TOMOYO_NETWORK_CONNECT, /* connect() operation. */
+ TOMOYO_NETWORK_SEND, /* send() operation. */
+ TOMOYO_MAX_NETWORK_OPERATION
+};
+
/* Index numbers for access controls with two pathnames. */
enum tomoyo_path2_acl_index {
TOMOYO_TYPE_LINK,
@@ -255,7 +299,6 @@ enum tomoyo_securityfs_interface_index {
TOMOYO_EXCEPTIONPOLICY,
TOMOYO_PROCESS_STATUS,
TOMOYO_STAT,
- TOMOYO_SELFDOMAIN,
TOMOYO_AUDIT,
TOMOYO_VERSION,
TOMOYO_PROFILE,
@@ -300,12 +343,30 @@ enum tomoyo_mac_index {
TOMOYO_MAC_FILE_MOUNT,
TOMOYO_MAC_FILE_UMOUNT,
TOMOYO_MAC_FILE_PIVOT_ROOT,
+ TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ TOMOYO_MAC_ENVIRON,
TOMOYO_MAX_MAC_INDEX
};
/* Index numbers for category of functionality. */
enum tomoyo_mac_category_index {
TOMOYO_MAC_CATEGORY_FILE,
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ TOMOYO_MAC_CATEGORY_MISC,
TOMOYO_MAX_MAC_CATEGORY_INDEX
};
@@ -340,7 +401,7 @@ enum tomoyo_pref_index {
/* Common header for holding ACL entries. */
struct tomoyo_acl_head {
struct list_head list;
- bool is_deleted;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
} __packed;
/* Common header for shared entries. */
@@ -397,13 +458,36 @@ struct tomoyo_request_info {
u8 operation;
} path_number;
struct {
+ const struct tomoyo_path_info *name;
+ } environ;
+ struct {
+ const __be32 *address;
+ u16 port;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ bool is_ipv6;
+ } inet_network;
+ struct {
+ const struct tomoyo_path_info *address;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ } unix_network;
+ struct {
const struct tomoyo_path_info *type;
const struct tomoyo_path_info *dir;
const struct tomoyo_path_info *dev;
unsigned long flags;
int need_dev;
} mount;
+ struct {
+ const struct tomoyo_path_info *domainname;
+ } task;
} param;
+ struct tomoyo_acl_info *matched_acl;
u8 param_type;
bool granted;
u8 retry;
@@ -442,7 +526,14 @@ struct tomoyo_number_union {
u8 value_type[2];
};
-/* Structure for "path_group"/"number_group" directive. */
+/* Structure for holding an IP address. */
+struct tomoyo_ipaddr_union {
+ struct in6_addr ip[2]; /* Big endian. */
+ struct tomoyo_group *group; /* Pointer to address group. */
+ bool is_ipv6; /* Valid only if @group == NULL. */
+};
+
+/* Structure for "path_group"/"number_group"/"address_group" directive. */
struct tomoyo_group {
struct tomoyo_shared_acl_head head;
const struct tomoyo_path_info *group_name;
@@ -461,6 +552,13 @@ struct tomoyo_number_group {
struct tomoyo_number_union number;
};
+/* Structure for "address_group" directive. */
+struct tomoyo_address_group {
+ struct tomoyo_acl_head head;
+ /* Structure for holding an IP address. */
+ struct tomoyo_ipaddr_union address;
+};
+
/* Subset of "struct stat". Used by conditional ACL and audit logs. */
struct tomoyo_mini_stat {
uid_t uid;
@@ -520,6 +618,7 @@ struct tomoyo_execve {
struct tomoyo_request_info r;
struct tomoyo_obj_info obj;
struct linux_binprm *bprm;
+ const struct tomoyo_path_info *transition;
/* For dumping argv[] and envp[]. */
struct tomoyo_page_dump dump;
/* For temporary use. */
@@ -554,6 +653,8 @@ struct tomoyo_condition {
u16 names_count; /* Number of "struct tomoyo_name_union names". */
u16 argc; /* Number of "struct tomoyo_argv". */
u16 envc; /* Number of "struct tomoyo_envp". */
+ u8 grant_log; /* One of values in "enum tomoyo_grant_log". */
+ const struct tomoyo_path_info *transit; /* Maybe NULL. */
/*
* struct tomoyo_condition_element condition[condc];
* struct tomoyo_number_union values[numbers_count];
@@ -567,7 +668,7 @@ struct tomoyo_condition {
struct tomoyo_acl_info {
struct list_head list;
struct tomoyo_condition *cond; /* Maybe NULL. */
- bool is_deleted;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
} __packed;
@@ -587,6 +688,15 @@ struct tomoyo_domain_info {
};
/*
+ * Structure for "task manual_domain_transition" directive.
+ */
+struct tomoyo_task_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */
+ /* Pointer to domainname. */
+ const struct tomoyo_path_info *domainname;
+};
+
+/*
* Structure for "file execute", "file read", "file write", "file append",
* "file unlink", "file getattr", "file rmdir", "file truncate",
* "file symlink", "file chroot" and "file unmount" directive.
@@ -638,6 +748,29 @@ struct tomoyo_mount_acl {
struct tomoyo_number_union flags;
};
+/* Structure for "misc env" directive in domain policy. */
+struct tomoyo_env_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */
+ const struct tomoyo_path_info *env; /* environment variable */
+};
+
+/* Structure for "network inet" directive. */
+struct tomoyo_inet_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_ipaddr_union address;
+ struct tomoyo_number_union port;
+};
+
+/* Structure for "network unix" directive. */
+struct tomoyo_unix_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_name_union name;
+};
+
/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */
struct tomoyo_acl_param {
char *data;
@@ -773,7 +906,7 @@ struct tomoyo_policy_namespace {
struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
/* List for connecting to tomoyo_namespace_list list. */
struct list_head namespace_list;
- /* Profile version. Currently only 20100903 is defined. */
+ /* Profile version. Currently only 20110903 is defined. */
unsigned int profile_version;
/* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
const char *name;
@@ -781,6 +914,8 @@ struct tomoyo_policy_namespace {
/********** Function prototypes. **********/
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group);
bool tomoyo_compare_number_union(const unsigned long value,
const struct tomoyo_number_union *ptr);
bool tomoyo_condition(struct tomoyo_request_info *r,
@@ -796,6 +931,8 @@ bool tomoyo_memory_ok(void *ptr);
bool tomoyo_number_matches_group(const unsigned long min,
const unsigned long max,
const struct tomoyo_group *group);
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr);
bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
struct tomoyo_name_union *ptr);
bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
@@ -805,6 +942,7 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
bool tomoyo_permstr(const char *string, const char *keyword);
bool tomoyo_str_starts(char **src, const char *find);
char *tomoyo_encode(const char *str);
+char *tomoyo_encode2(const char *str, int str_len);
char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
va_list args);
char *tomoyo_read_token(struct tomoyo_acl_param *param);
@@ -814,12 +952,17 @@ const char *tomoyo_get_exe(void);
const char *tomoyo_yesno(const unsigned int value);
const struct tomoyo_path_info *tomoyo_compare_name_union
(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param);
const struct tomoyo_path_info *tomoyo_get_name(const char *name);
const struct tomoyo_path_info *tomoyo_path_matches_group
(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag);
int tomoyo_close_control(struct tomoyo_io_buffer *head);
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename);
int tomoyo_find_next_domain(struct linux_binprm *bprm);
int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index);
@@ -838,10 +981,15 @@ int tomoyo_path_number_perm(const u8 operation, struct path *path,
unsigned long number);
int tomoyo_path_perm(const u8 operation, struct path *path,
const char *target);
-int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
- const struct tomoyo_path_info *filename);
int tomoyo_poll_control(struct file *file, poll_table *wait);
int tomoyo_poll_log(struct file *file, poll_table *wait);
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len);
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len);
+int tomoyo_socket_listen_permission(struct socket *sock);
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size);
int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
__printf(2, 3);
int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
@@ -860,8 +1008,11 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
int tomoyo_write_aggregator(struct tomoyo_acl_param *param);
int tomoyo_write_file(struct tomoyo_acl_param *param);
int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type);
+int tomoyo_write_misc(struct tomoyo_acl_param *param);
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param);
int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
const u8 type);
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param);
ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
const int buffer_len);
ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
@@ -891,12 +1042,11 @@ void tomoyo_del_condition(struct list_head *element);
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns);
-void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
- __printf(2, 3);
void tomoyo_load_policy(const char *filename);
-void tomoyo_memory_free(void *ptr);
void tomoyo_normalize_line(unsigned char *buffer);
void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr);
void tomoyo_print_ulong(char *buffer, const int buffer_len,
const unsigned long value, const u8 type);
void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
@@ -919,6 +1069,8 @@ extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX];
extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE];
extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION];
+extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX];
+extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION];
extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX];
extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION];
extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION];
@@ -1098,6 +1250,21 @@ static inline bool tomoyo_same_number_union
}
/**
+ * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_ipaddr_union".
+ * @b: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_ipaddr_union
+(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b)
+{
+ return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group &&
+ a->is_ipv6 == b->is_ipv6;
+}
+
+/**
* tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread.
*
* Returns pointer to "struct tomoyo_policy_namespace" for current thread.
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
index 8a05f71eaf6..986330b8c73 100644
--- a/security/tomoyo/condition.c
+++ b/security/tomoyo/condition.c
@@ -348,6 +348,7 @@ static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
a->numbers_count == b->numbers_count &&
a->names_count == b->names_count &&
a->argc == b->argc && a->envc == b->envc &&
+ a->grant_log == b->grant_log && a->transit == b->transit &&
!memcmp(a + 1, b + 1, a->size - sizeof(*a));
}
@@ -399,8 +400,9 @@ static struct tomoyo_condition *tomoyo_commit_condition
found = true;
goto out;
}
- list_for_each_entry_rcu(ptr, &tomoyo_condition_list, head.list) {
- if (!tomoyo_same_condition(ptr, entry))
+ list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
+ if (!tomoyo_same_condition(ptr, entry) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
/* Same entry found. Share this entry. */
atomic_inc(&ptr->head.users);
@@ -410,8 +412,7 @@ static struct tomoyo_condition *tomoyo_commit_condition
if (!found) {
if (tomoyo_memory_ok(entry)) {
atomic_set(&entry->head.users, 1);
- list_add_rcu(&entry->head.list,
- &tomoyo_condition_list);
+ list_add(&entry->head.list, &tomoyo_condition_list);
} else {
found = true;
ptr = NULL;
@@ -428,6 +429,46 @@ out:
}
/**
+ * tomoyo_get_transit_preference - Parse domain transition preference for execve().
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @e: Pointer to "struct tomoyo_condition".
+ *
+ * Returns the condition string part.
+ */
+static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
+ struct tomoyo_condition *e)
+{
+ char * const pos = param->data;
+ bool flag;
+ if (*pos == '<') {
+ e->transit = tomoyo_get_domainname(param);
+ goto done;
+ }
+ {
+ char *cp = strchr(pos, ' ');
+ if (cp)
+ *cp = '\0';
+ flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
+ !strcmp(pos, "initialize") || !strcmp(pos, "reset") ||
+ !strcmp(pos, "child") || !strcmp(pos, "parent");
+ if (cp)
+ *cp = ' ';
+ }
+ if (!flag)
+ return pos;
+ e->transit = tomoyo_get_name(tomoyo_read_token(param));
+done:
+ if (e->transit)
+ return param->data;
+ /*
+ * Return a bad read-only condition string that will let
+ * tomoyo_get_condition() return NULL.
+ */
+ return "/";
+}
+
+/**
* tomoyo_get_condition - Parse condition part.
*
* @param: Pointer to "struct tomoyo_acl_param".
@@ -443,7 +484,8 @@ struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
struct tomoyo_argv *argv = NULL;
struct tomoyo_envp *envp = NULL;
struct tomoyo_condition e = { };
- char * const start_of_string = param->data;
+ char * const start_of_string =
+ tomoyo_get_transit_preference(param, &e);
char * const end_of_string = start_of_string + strlen(start_of_string);
char *pos;
rerun:
@@ -486,6 +528,20 @@ rerun:
goto out;
dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word,
is_not ? "!" : "", right_word);
+ if (!strcmp(left_word, "grant_log")) {
+ if (entry) {
+ if (is_not ||
+ entry->grant_log != TOMOYO_GRANTLOG_AUTO)
+ goto out;
+ else if (!strcmp(right_word, "yes"))
+ entry->grant_log = TOMOYO_GRANTLOG_YES;
+ else if (!strcmp(right_word, "no"))
+ entry->grant_log = TOMOYO_GRANTLOG_NO;
+ else
+ goto out;
+ }
+ continue;
+ }
if (!strncmp(left_word, "exec.argv[", 10)) {
if (!argv) {
e.argc++;
@@ -593,8 +649,9 @@ store_value:
+ e.envc * sizeof(struct tomoyo_envp);
entry = kzalloc(e.size, GFP_NOFS);
if (!entry)
- return NULL;
+ goto out2;
*entry = e;
+ e.transit = NULL;
condp = (struct tomoyo_condition_element *) (entry + 1);
numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
@@ -621,6 +678,8 @@ out:
tomoyo_del_condition(&entry->head.list);
kfree(entry);
}
+out2:
+ tomoyo_put_name(e.transit);
return NULL;
}
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index cd0f92d88bb..9027ac1534a 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -39,6 +39,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -ENOMEM;
list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
if (!check_duplicate(entry, new_entry))
continue;
entry->is_deleted = param->is_delete;
@@ -102,10 +104,21 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
new_entry->cond = tomoyo_get_condition(param);
if (!new_entry->cond)
return -EINVAL;
+ /*
+ * Domain transition preference is allowed for only
+ * "file execute" entries.
+ */
+ if (new_entry->cond->transit &&
+ !(new_entry->type == TOMOYO_TYPE_PATH_ACL &&
+ container_of(new_entry, struct tomoyo_path_acl, head)
+ ->perm == 1 << TOMOYO_TYPE_EXECUTE))
+ goto out;
}
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
if (!tomoyo_same_acl_head(entry, new_entry) ||
!check_duplicate(entry, new_entry))
continue;
@@ -157,6 +170,7 @@ retry:
continue;
if (!tomoyo_condition(r, ptr->cond))
continue;
+ r->matched_acl = ptr;
r->granted = true;
return;
}
@@ -501,7 +515,8 @@ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
* that domain. Do not perform domain transition if
* profile for that domain is not yet created.
*/
- if (!entry->ns->profile_ptr[entry->profile])
+ if (tomoyo_policy_loaded &&
+ !entry->ns->profile_ptr[entry->profile])
return NULL;
}
return entry;
@@ -557,12 +572,99 @@ out:
tomoyo_write_log(&r, "use_profile %u\n",
entry->profile);
tomoyo_write_log(&r, "use_group %u\n", entry->group);
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
}
}
return entry;
}
/**
+ * tomoyo_environ - Check permission for environment variable names.
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_environ(struct tomoyo_execve *ee)
+{
+ struct tomoyo_request_info *r = &ee->r;
+ struct linux_binprm *bprm = ee->bprm;
+ /* env_page.data is allocated by tomoyo_dump_page(). */
+ struct tomoyo_page_dump env_page = { };
+ char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ int error = -ENOMEM;
+
+ ee->r.type = TOMOYO_MAC_ENVIRON;
+ ee->r.profile = r->domain->profile;
+ ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile,
+ TOMOYO_MAC_ENVIRON);
+ if (!r->mode || !envp_count)
+ return 0;
+ arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!arg_ptr)
+ goto out;
+ while (error == -ENOMEM) {
+ if (!tomoyo_dump_page(bprm, pos, &env_page))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (argv_count && offset < PAGE_SIZE) {
+ if (!env_page.data[offset++])
+ argv_count--;
+ }
+ if (argv_count) {
+ offset = 0;
+ continue;
+ }
+ while (offset < PAGE_SIZE) {
+ const unsigned char c = env_page.data[offset++];
+
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '=') {
+ arg_ptr[arg_len++] = '\0';
+ } else if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++]
+ = ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ if (tomoyo_env_perm(r, arg_ptr)) {
+ error = -EPERM;
+ break;
+ }
+ if (!--envp_count) {
+ error = 0;
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
+ }
+out:
+ if (r->mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ kfree(env_page.data);
+ kfree(arg_ptr);
+ return error;
+}
+
+/**
* tomoyo_find_next_domain - Find a domain.
*
* @bprm: Pointer to "struct linux_binprm".
@@ -577,10 +679,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
struct tomoyo_domain_info *domain = NULL;
const char *original_name = bprm->filename;
int retval = -ENOMEM;
- bool need_kfree = false;
bool reject_on_transition_failure = false;
- struct tomoyo_path_info rn = { }; /* real name */
+ const struct tomoyo_path_info *candidate;
+ struct tomoyo_path_info exename;
struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS);
+
if (!ee)
return -ENOMEM;
ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
@@ -594,40 +697,32 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->bprm = bprm;
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
- retry:
- if (need_kfree) {
- kfree(rn.name);
- need_kfree = false;
- }
/* Get symlink's pathname of program. */
retval = -ENOENT;
- rn.name = tomoyo_realpath_nofollow(original_name);
- if (!rn.name)
+ exename.name = tomoyo_realpath_nofollow(original_name);
+ if (!exename.name)
goto out;
- tomoyo_fill_path_info(&rn);
- need_kfree = true;
-
+ tomoyo_fill_path_info(&exename);
+retry:
/* Check 'aggregator' directive. */
{
struct tomoyo_aggregator *ptr;
struct list_head *list =
&old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
/* Check 'aggregator' directive. */
+ candidate = &exename;
list_for_each_entry_rcu(ptr, list, head.list) {
if (ptr->head.is_deleted ||
- !tomoyo_path_matches_pattern(&rn,
+ !tomoyo_path_matches_pattern(&exename,
ptr->original_name))
continue;
- kfree(rn.name);
- need_kfree = false;
- /* This is OK because it is read only. */
- rn = *ptr->aggregated_name;
+ candidate = ptr->aggregated_name;
break;
}
}
/* Check execute permission. */
- retval = tomoyo_path_permission(&ee->r, TOMOYO_TYPE_EXECUTE, &rn);
+ retval = tomoyo_execute_permission(&ee->r, candidate);
if (retval == TOMOYO_RETRY_REQUEST)
goto retry;
if (retval < 0)
@@ -638,20 +733,51 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* wildcard) rather than the pathname passed to execve()
* (which never contains wildcard).
*/
- if (ee->r.param.path.matched_path) {
- if (need_kfree)
- kfree(rn.name);
- need_kfree = false;
- /* This is OK because it is read only. */
- rn = *ee->r.param.path.matched_path;
- }
+ if (ee->r.param.path.matched_path)
+ candidate = ee->r.param.path.matched_path;
- /* Calculate domain to transit to. */
+ /*
+ * Check for domain transition preference if "file execute" matched.
+ * If preference is given, make do_execve() fail if domain transition
+ * has failed, for domain transition preference should be used with
+ * destination domain defined.
+ */
+ if (ee->transition) {
+ const char *domainname = ee->transition->name;
+ reject_on_transition_failure = true;
+ if (!strcmp(domainname, "keep"))
+ goto force_keep_domain;
+ if (!strcmp(domainname, "child"))
+ goto force_child_domain;
+ if (!strcmp(domainname, "reset"))
+ goto force_reset_domain;
+ if (!strcmp(domainname, "initialize"))
+ goto force_initialize_domain;
+ if (!strcmp(domainname, "parent")) {
+ char *cp;
+ strncpy(ee->tmp, old_domain->domainname->name,
+ TOMOYO_EXEC_TMPSIZE - 1);
+ cp = strrchr(ee->tmp, ' ');
+ if (cp)
+ *cp = '\0';
+ } else if (*domainname == '<')
+ strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+ else
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, domainname);
+ goto force_jump_domain;
+ }
+ /*
+ * No domain transition preference specified.
+ * Calculate domain to transit to.
+ */
switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname,
- &rn)) {
+ candidate)) {
case TOMOYO_TRANSITION_CONTROL_RESET:
+force_reset_domain:
/* Transit to the root of specified namespace. */
- snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", rn.name);
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>",
+ candidate->name);
/*
* Make do_execve() fail if domain transition across namespaces
* has failed.
@@ -659,11 +785,13 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
reject_on_transition_failure = true;
break;
case TOMOYO_TRANSITION_CONTROL_INITIALIZE:
+force_initialize_domain:
/* Transit to the child of current namespace's root. */
snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
- old_domain->ns->name, rn.name);
+ old_domain->ns->name, candidate->name);
break;
case TOMOYO_TRANSITION_CONTROL_KEEP:
+force_keep_domain:
/* Keep current domain. */
domain = old_domain;
break;
@@ -677,13 +805,15 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* before /sbin/init.
*/
domain = old_domain;
- } else {
- /* Normal domain transition. */
- snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
- old_domain->domainname->name, rn.name);
+ break;
}
+force_child_domain:
+ /* Normal domain transition. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, candidate->name);
break;
}
+force_jump_domain:
if (!domain)
domain = tomoyo_assign_domain(ee->tmp, true);
if (domain)
@@ -711,8 +841,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
/* Update reference count on "struct tomoyo_domain_info". */
atomic_inc(&domain->users);
bprm->cred->security = domain;
- if (need_kfree)
- kfree(rn.name);
+ kfree(exename.name);
+ if (!retval) {
+ ee->r.domain = domain;
+ retval = tomoyo_environ(ee);
+ }
kfree(ee->tmp);
kfree(ee->dump.data);
kfree(ee);
@@ -732,7 +865,8 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
struct tomoyo_page_dump *dump)
{
struct page *page;
- /* dump->data is released by tomoyo_finish_execve(). */
+
+ /* dump->data is released by tomoyo_find_next_domain(). */
if (!dump->data) {
dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
if (!dump->data)
@@ -753,6 +887,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* So do I.
*/
char *kaddr = kmap_atomic(page, KM_USER0);
+
dump->page = page;
memcpy(dump->data + offset, kaddr + offset,
PAGE_SIZE - offset);
diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c
new file mode 100644
index 00000000000..ad4c6e18a43
--- /dev/null
+++ b/security/tomoyo/environ.c
@@ -0,0 +1,122 @@
+/*
+ * security/tomoyo/environ.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+/**
+ * tomoyo_check_env_acl - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_env_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_env_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return tomoyo_path_matches_pattern(r->param.environ.name, acl->env);
+}
+
+/**
+ * tomoyo_audit_env_log - Audit environment variable name log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_env_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "misc env %s\n",
+ r->param.environ.name->name);
+}
+
+/**
+ * tomoyo_env_perm - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @env: The name of environment variable.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env)
+{
+ struct tomoyo_path_info environ;
+ int error;
+
+ if (!env || !*env)
+ return 0;
+ environ.name = env;
+ tomoyo_fill_path_info(&environ);
+ r->param_type = TOMOYO_TYPE_ENV_ACL;
+ r->param.environ.name = &environ;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_env_acl);
+ error = tomoyo_audit_env_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ return error;
+}
+
+/**
+ * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->env == p2->env;
+}
+
+/**
+ * tomoyo_write_env - Write "struct tomoyo_env_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_env(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
+ int error = -ENOMEM;
+ const char *data = tomoyo_read_token(param);
+
+ if (!tomoyo_correct_word(data) || strchr(data, '='))
+ return -EINVAL;
+ e.env = tomoyo_get_name(data);
+ if (!e.env)
+ return error;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_env_acl, NULL);
+ tomoyo_put_name(e.env);
+ return error;
+}
+
+/**
+ * tomoyo_write_misc - Update environment variable list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_misc(struct tomoyo_acl_param *param)
+{
+ if (tomoyo_str_starts(&param->data, "env "))
+ return tomoyo_write_env(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 743c35f5084..40039079074 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -555,8 +555,8 @@ static int tomoyo_update_path2_acl(const u8 perm,
*
* Caller holds tomoyo_read_lock().
*/
-int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
- const struct tomoyo_path_info *filename)
+static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
+ const struct tomoyo_path_info *filename)
{
int error;
@@ -570,16 +570,42 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
do {
tomoyo_check_acl(r, tomoyo_check_path_acl);
error = tomoyo_audit_path_log(r);
- /*
- * Do not retry for execute request, for alias may have
- * changed.
- */
- } while (error == TOMOYO_RETRY_REQUEST &&
- operation != TOMOYO_TYPE_EXECUTE);
+ } while (error == TOMOYO_RETRY_REQUEST);
return error;
}
/**
+ * tomoyo_execute_permission - Check permission for execute operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @filename: Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename)
+{
+ /*
+ * Unlike other permission checks, this check is done regardless of
+ * profile mode settings in order to check for domain transition
+ * preference.
+ */
+ r->type = TOMOYO_MAC_FILE_EXECUTE;
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = TOMOYO_TYPE_EXECUTE;
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ r->ee->transition = r->matched_acl && r->matched_acl->cond ?
+ r->matched_acl->cond->transit : NULL;
+ if (r->mode != TOMOYO_CONFIG_DISABLED)
+ return tomoyo_audit_path_log(r);
+ return 0;
+}
+
+/**
* tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index ae135fbbbe9..986a6a75686 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -8,36 +8,26 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+/**
+ * tomoyo_memory_free - Free memory for elements.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_memory_free(void *ptr)
+{
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr);
+ kfree(ptr);
+}
+
/* The list for "struct tomoyo_io_buffer". */
static LIST_HEAD(tomoyo_io_buffer_list);
/* Lock for protecting tomoyo_io_buffer_list. */
static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
-/* Size of an element. */
-static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = {
- [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group),
- [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group),
- [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group),
- [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator),
- [TOMOYO_ID_TRANSITION_CONTROL] =
- sizeof(struct tomoyo_transition_control),
- [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager),
- /* [TOMOYO_ID_CONDITION] = "struct tomoyo_condition"->size, */
- /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
- /* [TOMOYO_ID_ACL] =
- tomoyo_acl_size["struct tomoyo_acl_info"->type], */
- [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info),
-};
-
-/* Size of a domain ACL element. */
-static const u8 tomoyo_acl_size[] = {
- [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl),
- [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl),
- [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl),
- [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl),
- [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl),
-};
-
/**
* tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
*
@@ -55,15 +45,11 @@ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
- if (mutex_lock_interruptible(&head->io_sem)) {
- in_use = true;
- goto out;
- }
+ mutex_lock(&head->io_sem);
if (head->r.domain == element || head->r.group == element ||
head->r.acl == element || &head->w.domain->list == element)
in_use = true;
mutex_unlock(&head->io_sem);
-out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
@@ -77,15 +63,14 @@ out:
* tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
*
* @string: String to check.
- * @size: Memory allocated for @string .
*
* Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
* false otherwise.
*/
-static bool tomoyo_name_used_by_io_buffer(const char *string,
- const size_t size)
+static bool tomoyo_name_used_by_io_buffer(const char *string)
{
struct tomoyo_io_buffer *head;
+ const size_t size = strlen(string) + 1;
bool in_use = false;
spin_lock(&tomoyo_io_buffer_list_lock);
@@ -93,10 +78,7 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
int i;
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
- if (mutex_lock_interruptible(&head->io_sem)) {
- in_use = true;
- goto out;
- }
+ mutex_lock(&head->io_sem);
for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
const char *w = head->r.w[i];
if (w < string || w > string + size)
@@ -105,7 +87,6 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
break;
}
mutex_unlock(&head->io_sem);
-out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
@@ -115,84 +96,6 @@ out:
return in_use;
}
-/* Structure for garbage collection. */
-struct tomoyo_gc {
- struct list_head list;
- enum tomoyo_policy_id type;
- size_t size;
- struct list_head *element;
-};
-/* List of entries to be deleted. */
-static LIST_HEAD(tomoyo_gc_list);
-/* Length of tomoyo_gc_list. */
-static int tomoyo_gc_list_len;
-
-/**
- * tomoyo_add_to_gc - Add an entry to to be deleted list.
- *
- * @type: One of values in "enum tomoyo_policy_id".
- * @element: Pointer to "struct list_head".
- *
- * Returns true on success, false otherwise.
- *
- * Caller holds tomoyo_policy_lock mutex.
- *
- * Adding an entry needs kmalloc(). Thus, if we try to add thousands of
- * entries at once, it will take too long time. Thus, do not add more than 128
- * entries per a scan. But to be able to handle worst case where all entries
- * are in-use, we accept one more entry per a scan.
- *
- * If we use singly linked list using "struct list_head"->prev (which is
- * LIST_POISON2), we can avoid kmalloc().
- */
-static bool tomoyo_add_to_gc(const int type, struct list_head *element)
-{
- struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return false;
- entry->type = type;
- if (type == TOMOYO_ID_ACL)
- entry->size = tomoyo_acl_size[
- container_of(element,
- typeof(struct tomoyo_acl_info),
- list)->type];
- else if (type == TOMOYO_ID_NAME)
- entry->size = strlen(container_of(element,
- typeof(struct tomoyo_name),
- head.list)->entry.name) + 1;
- else if (type == TOMOYO_ID_CONDITION)
- entry->size =
- container_of(element, typeof(struct tomoyo_condition),
- head.list)->size;
- else
- entry->size = tomoyo_element_size[type];
- entry->element = element;
- list_add(&entry->list, &tomoyo_gc_list);
- list_del_rcu(element);
- return tomoyo_gc_list_len++ < 128;
-}
-
-/**
- * tomoyo_element_linked_by_gc - Validate next element of an entry.
- *
- * @element: Pointer to an element.
- * @size: Size of @element in byte.
- *
- * Returns true if @element is linked by other elements in the garbage
- * collector's queue, false otherwise.
- */
-static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
-{
- struct tomoyo_gc *p;
- list_for_each_entry(p, &tomoyo_gc_list, list) {
- const u8 *ptr = (const u8 *) p->element->next;
- if (ptr < element || element + size < ptr)
- continue;
- return true;
- }
- return false;
-}
-
/**
* tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
*
@@ -200,7 +103,7 @@ static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
*
* Returns nothing.
*/
-static void tomoyo_del_transition_control(struct list_head *element)
+static inline void tomoyo_del_transition_control(struct list_head *element)
{
struct tomoyo_transition_control *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -215,7 +118,7 @@ static void tomoyo_del_transition_control(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_aggregator(struct list_head *element)
+static inline void tomoyo_del_aggregator(struct list_head *element)
{
struct tomoyo_aggregator *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -230,7 +133,7 @@ static void tomoyo_del_aggregator(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_manager(struct list_head *element)
+static inline void tomoyo_del_manager(struct list_head *element)
{
struct tomoyo_manager *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -293,6 +196,38 @@ static void tomoyo_del_acl(struct list_head *element)
tomoyo_put_number_union(&entry->flags);
}
break;
+ case TOMOYO_TYPE_ENV_ACL:
+ {
+ struct tomoyo_env_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name(entry->env);
+ }
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ {
+ struct tomoyo_inet_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_group(entry->address.group);
+ tomoyo_put_number_union(&entry->port);
+ }
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ {
+ struct tomoyo_unix_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ {
+ struct tomoyo_task_acl *entry =
+ container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->domainname);
+ }
+ break;
}
}
@@ -301,44 +236,26 @@ static void tomoyo_del_acl(struct list_head *element)
*
* @element: Pointer to "struct list_head".
*
- * Returns true if deleted, false otherwise.
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
-static bool tomoyo_del_domain(struct list_head *element)
+static inline void tomoyo_del_domain(struct list_head *element)
{
struct tomoyo_domain_info *domain =
container_of(element, typeof(*domain), list);
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
/*
- * Since we don't protect whole execve() operation using SRCU,
- * we need to recheck domain->users at this point.
- *
- * (1) Reader starts SRCU section upon execve().
- * (2) Reader traverses tomoyo_domain_list and finds this domain.
- * (3) Writer marks this domain as deleted.
- * (4) Garbage collector removes this domain from tomoyo_domain_list
- * because this domain is marked as deleted and used by nobody.
- * (5) Reader saves reference to this domain into
- * "struct linux_binprm"->cred->security .
- * (6) Reader finishes SRCU section, although execve() operation has
- * not finished yet.
- * (7) Garbage collector waits for SRCU synchronization.
- * (8) Garbage collector kfree() this domain because this domain is
- * used by nobody.
- * (9) Reader finishes execve() operation and restores this domain from
- * "struct linux_binprm"->cred->security.
- *
- * By updating domain->users at (5), we can solve this race problem
- * by rechecking domain->users at (8).
+ * Since this domain is referenced from neither
+ * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
+ * elements without checking for is_deleted flag.
*/
- if (atomic_read(&domain->users))
- return false;
list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
tomoyo_del_acl(&acl->list);
tomoyo_memory_free(acl);
}
tomoyo_put_name(domain->domainname);
- return true;
}
/**
@@ -387,10 +304,9 @@ void tomoyo_del_condition(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_name(struct list_head *element)
+static inline void tomoyo_del_name(struct list_head *element)
{
- const struct tomoyo_name *ptr =
- container_of(element, typeof(*ptr), head.list);
+ /* Nothing to do. */
}
/**
@@ -400,7 +316,7 @@ static void tomoyo_del_name(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_path_group(struct list_head *element)
+static inline void tomoyo_del_path_group(struct list_head *element)
{
struct tomoyo_path_group *member =
container_of(element, typeof(*member), head.list);
@@ -414,7 +330,7 @@ static void tomoyo_del_path_group(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_group(struct list_head *element)
+static inline void tomoyo_del_group(struct list_head *element)
{
struct tomoyo_group *group =
container_of(element, typeof(*group), head.list);
@@ -422,16 +338,128 @@ static void tomoyo_del_group(struct list_head *element)
}
/**
+ * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_address_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
* tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
*
* @element: Pointer to "struct list_head".
*
* Returns nothing.
*/
-static void tomoyo_del_number_group(struct list_head *element)
+static inline void tomoyo_del_number_group(struct list_head *element)
{
- struct tomoyo_number_group *member =
- container_of(element, typeof(*member), head.list);
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_try_to_gc - Try to kfree() an entry.
+ *
+ * @type: One of values in "enum tomoyo_policy_id".
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
+ struct list_head *element)
+{
+ /*
+ * __list_del_entry() guarantees that the list element became no longer
+ * reachable from the list which the element was originally on (e.g.
+ * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
+ * list element became no longer referenced by syscall users.
+ */
+ __list_del_entry(element);
+ mutex_unlock(&tomoyo_policy_lock);
+ synchronize_srcu(&tomoyo_ss);
+ /*
+ * However, there are two users which may still be using the list
+ * element. We need to defer until both users forget this element.
+ *
+ * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
+ * and "struct tomoyo_io_buffer"->w.domain forget this element.
+ */
+ if (tomoyo_struct_used_by_io_buffer(element))
+ goto reinject;
+ switch (type) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ tomoyo_del_transition_control(element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(element);
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ tomoyo_del_aggregator(element);
+ break;
+ case TOMOYO_ID_GROUP:
+ tomoyo_del_group(element);
+ break;
+ case TOMOYO_ID_PATH_GROUP:
+ tomoyo_del_path_group(element);
+ break;
+ case TOMOYO_ID_ADDRESS_GROUP:
+ tomoyo_del_address_group(element);
+ break;
+ case TOMOYO_ID_NUMBER_GROUP:
+ tomoyo_del_number_group(element);
+ break;
+ case TOMOYO_ID_CONDITION:
+ tomoyo_del_condition(element);
+ break;
+ case TOMOYO_ID_NAME:
+ /*
+ * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
+ * forget this element.
+ */
+ if (tomoyo_name_used_by_io_buffer
+ (container_of(element, typeof(struct tomoyo_name),
+ head.list)->entry.name))
+ goto reinject;
+ tomoyo_del_name(element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ /*
+ * Don't kfree() until all "struct cred"->security forget this
+ * element.
+ */
+ if (atomic_read(&container_of
+ (element, typeof(struct tomoyo_domain_info),
+ list)->users))
+ goto reinject;
+ break;
+ case TOMOYO_MAX_POLICY:
+ break;
+ }
+ mutex_lock(&tomoyo_policy_lock);
+ if (type == TOMOYO_ID_DOMAIN)
+ tomoyo_del_domain(element);
+ tomoyo_memory_free(element);
+ return;
+reinject:
+ /*
+ * We can safely reinject this element here bacause
+ * (1) Appending list elements and removing list elements are protected
+ * by tomoyo_policy_lock mutex.
+ * (2) Only this function removes list elements and this function is
+ * exclusively executed by tomoyo_gc_mutex mutex.
+ * are true.
+ */
+ mutex_lock(&tomoyo_policy_lock);
+ list_add_rcu(element, element->prev);
}
/**
@@ -440,19 +468,19 @@ static void tomoyo_del_number_group(struct list_head *element)
* @id: One of values in "enum tomoyo_policy_id".
* @member_list: Pointer to "struct list_head".
*
- * Returns true if some elements are deleted, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
+static void tomoyo_collect_member(const enum tomoyo_policy_id id,
struct list_head *member_list)
{
struct tomoyo_acl_head *member;
- list_for_each_entry(member, member_list, list) {
+ struct tomoyo_acl_head *tmp;
+ list_for_each_entry_safe(member, tmp, member_list, list) {
if (!member->is_deleted)
continue;
- if (!tomoyo_add_to_gc(id, &member->list))
- return false;
+ member->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(id, &member->list);
}
- return true;
}
/**
@@ -460,22 +488,22 @@ static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
*
* @list: Pointer to "struct list_head".
*
- * Returns true if some elements are deleted, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_collect_acl(struct list_head *list)
+static void tomoyo_collect_acl(struct list_head *list)
{
struct tomoyo_acl_info *acl;
- list_for_each_entry(acl, list, list) {
+ struct tomoyo_acl_info *tmp;
+ list_for_each_entry_safe(acl, tmp, list, list) {
if (!acl->is_deleted)
continue;
- if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list))
- return false;
+ acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
}
- return true;
}
/**
- * tomoyo_collect_entry - Scan lists for deleted elements.
+ * tomoyo_collect_entry - Try to kfree() deleted elements.
*
* Returns nothing.
*/
@@ -484,174 +512,82 @@ static void tomoyo_collect_entry(void)
int i;
enum tomoyo_policy_id id;
struct tomoyo_policy_namespace *ns;
- int idx;
- if (mutex_lock_interruptible(&tomoyo_policy_lock))
- return;
- idx = tomoyo_read_lock();
+ mutex_lock(&tomoyo_policy_lock);
{
struct tomoyo_domain_info *domain;
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
- if (!tomoyo_collect_acl(&domain->acl_info_list))
- goto unlock;
+ struct tomoyo_domain_info *tmp;
+ list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
+ list) {
+ tomoyo_collect_acl(&domain->acl_info_list);
if (!domain->is_deleted || atomic_read(&domain->users))
continue;
- /*
- * Nobody is referring this domain. But somebody may
- * refer this domain after successful execve().
- * We recheck domain->users after SRCU synchronization.
- */
- if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list))
- goto unlock;
+ tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
}
}
- list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) {
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (id = 0; id < TOMOYO_MAX_POLICY; id++)
- if (!tomoyo_collect_member(id, &ns->policy_list[id]))
- goto unlock;
+ tomoyo_collect_member(id, &ns->policy_list[id]);
for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
- if (!tomoyo_collect_acl(&ns->acl_group[i]))
- goto unlock;
+ tomoyo_collect_acl(&ns->acl_group[i]);
+ }
+ {
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
+ list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
struct list_head *list = &ns->group_list[i];
struct tomoyo_group *group;
+ struct tomoyo_group *tmp;
switch (i) {
case 0:
id = TOMOYO_ID_PATH_GROUP;
break;
- default:
+ case 1:
id = TOMOYO_ID_NUMBER_GROUP;
break;
+ default:
+ id = TOMOYO_ID_ADDRESS_GROUP;
+ break;
}
- list_for_each_entry(group, list, head.list) {
- if (!tomoyo_collect_member
- (id, &group->member_list))
- goto unlock;
+ list_for_each_entry_safe(group, tmp, list, head.list) {
+ tomoyo_collect_member(id, &group->member_list);
if (!list_empty(&group->member_list) ||
- atomic_read(&group->head.users))
+ atomic_read(&group->head.users) > 0)
continue;
- if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP,
- &group->head.list))
- goto unlock;
+ atomic_set(&group->head.users,
+ TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_GROUP,
+ &group->head.list);
}
}
}
- id = TOMOYO_ID_CONDITION;
- for (i = 0; i < TOMOYO_MAX_HASH + 1; i++) {
- struct list_head *list = !i ?
- &tomoyo_condition_list : &tomoyo_name_list[i - 1];
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct list_head *list = &tomoyo_name_list[i];
struct tomoyo_shared_acl_head *ptr;
- list_for_each_entry(ptr, list, list) {
- if (atomic_read(&ptr->users))
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, list, list) {
+ if (atomic_read(&ptr->users) > 0)
continue;
- if (!tomoyo_add_to_gc(id, &ptr->list))
- goto unlock;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
}
- id = TOMOYO_ID_NAME;
}
-unlock:
- tomoyo_read_unlock(idx);
mutex_unlock(&tomoyo_policy_lock);
}
/**
- * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
- *
- * Returns true if some entries were kfree()d, false otherwise.
- */
-static bool tomoyo_kfree_entry(void)
-{
- struct tomoyo_gc *p;
- struct tomoyo_gc *tmp;
- bool result = false;
-
- list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) {
- struct list_head *element = p->element;
-
- /*
- * list_del_rcu() in tomoyo_add_to_gc() guarantees that the
- * list element became no longer reachable from the list which
- * the element was originally on (e.g. tomoyo_domain_list).
- * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
- * that the list element became no longer referenced by syscall
- * users.
- *
- * However, there are three users which may still be using the
- * list element. We need to defer until all of these users
- * forget the list element.
- *
- * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
- * group,acl} and "struct tomoyo_io_buffer"->w.domain forget
- * the list element.
- */
- if (tomoyo_struct_used_by_io_buffer(element))
- continue;
- /*
- * Secondly, defer until all other elements in the
- * tomoyo_gc_list list forget the list element.
- */
- if (tomoyo_element_linked_by_gc((const u8 *) element, p->size))
- continue;
- switch (p->type) {
- case TOMOYO_ID_TRANSITION_CONTROL:
- tomoyo_del_transition_control(element);
- break;
- case TOMOYO_ID_AGGREGATOR:
- tomoyo_del_aggregator(element);
- break;
- case TOMOYO_ID_MANAGER:
- tomoyo_del_manager(element);
- break;
- case TOMOYO_ID_CONDITION:
- tomoyo_del_condition(element);
- break;
- case TOMOYO_ID_NAME:
- /*
- * Thirdly, defer until all "struct tomoyo_io_buffer"
- * ->r.w[] forget the list element.
- */
- if (tomoyo_name_used_by_io_buffer(
- container_of(element, typeof(struct tomoyo_name),
- head.list)->entry.name, p->size))
- continue;
- tomoyo_del_name(element);
- break;
- case TOMOYO_ID_ACL:
- tomoyo_del_acl(element);
- break;
- case TOMOYO_ID_DOMAIN:
- if (!tomoyo_del_domain(element))
- continue;
- break;
- case TOMOYO_ID_PATH_GROUP:
- tomoyo_del_path_group(element);
- break;
- case TOMOYO_ID_GROUP:
- tomoyo_del_group(element);
- break;
- case TOMOYO_ID_NUMBER_GROUP:
- tomoyo_del_number_group(element);
- break;
- case TOMOYO_MAX_POLICY:
- break;
- }
- tomoyo_memory_free(element);
- list_del(&p->list);
- kfree(p);
- tomoyo_gc_list_len--;
- result = true;
- }
- return result;
-}
-
-/**
* tomoyo_gc_thread - Garbage collector thread function.
*
* @unused: Unused.
*
- * In case OOM-killer choose this thread for termination, we create this thread
- * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
- * close()d.
- *
* Returns 0.
*/
static int tomoyo_gc_thread(void *unused)
@@ -660,13 +596,7 @@ static int tomoyo_gc_thread(void *unused)
static DEFINE_MUTEX(tomoyo_gc_mutex);
if (!mutex_trylock(&tomoyo_gc_mutex))
goto out;
- daemonize("GC for TOMOYO");
- do {
- tomoyo_collect_entry();
- if (list_empty(&tomoyo_gc_list))
- break;
- synchronize_srcu(&tomoyo_ss);
- } while (tomoyo_kfree_entry());
+ tomoyo_collect_entry();
{
struct tomoyo_io_buffer *head;
struct tomoyo_io_buffer *tmp;
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
index 5fb0e129840..50092534ec5 100644
--- a/security/tomoyo/group.c
+++ b/security/tomoyo/group.c
@@ -42,7 +42,26 @@ static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
}
/**
- * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list.
+ * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
+}
+
+/**
+ * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @type: Type of this group.
@@ -77,6 +96,14 @@ int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
* tomoyo_put_number_union() is not needed because
* param->data[0] != '@'.
*/
+ } else {
+ struct tomoyo_address_group e = { };
+
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_address_group);
}
out:
tomoyo_put_group(group);
@@ -137,3 +164,35 @@ bool tomoyo_number_matches_group(const unsigned long min,
}
return matched;
}
+
+/**
+ * tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
+ *
+ * @is_ipv6: True if @address is an IPv6 address.
+ * @address: An IPv4 or IPv6 address.
+ * @group: Pointer to "struct tomoyo_address_group".
+ *
+ * Returns true if @address matches addresses in @group group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_address_group *member;
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+ continue;
+ if (memcmp(&member->address.ip[0], address, size) > 0 ||
+ memcmp(address, &member->address.ip[1], size) > 0)
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
index 7a56051146c..0e995716cc2 100644
--- a/security/tomoyo/memory.c
+++ b/security/tomoyo/memory.c
@@ -27,8 +27,6 @@ void tomoyo_warn_oom(const char *function)
panic("MAC Initialization failed.\n");
}
-/* Lock for protecting tomoyo_memory_used. */
-static DEFINE_SPINLOCK(tomoyo_policy_memory_lock);
/* Memoy currently used by policy/audit log/query. */
unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
/* Memory quota for "policy"/"audit log"/"query". */
@@ -42,22 +40,19 @@ unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
* Returns true on success, false otherwise.
*
* Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
bool tomoyo_memory_ok(void *ptr)
{
if (ptr) {
const size_t s = ksize(ptr);
- bool result;
- spin_lock(&tomoyo_policy_memory_lock);
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
- result = !tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
- tomoyo_memory_quota[TOMOYO_MEMORY_POLICY];
- if (!result)
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
- spin_unlock(&tomoyo_policy_memory_lock);
- if (result)
+ if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
+ tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
return true;
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
}
tomoyo_warn_oom(__func__);
return false;
@@ -71,6 +66,8 @@ bool tomoyo_memory_ok(void *ptr)
*
* Returns pointer to allocated memory on success, NULL otherwise.
* @data is zero-cleared on success.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
void *tomoyo_commit_ok(void *data, const unsigned int size)
{
@@ -85,20 +82,6 @@ void *tomoyo_commit_ok(void *data, const unsigned int size)
}
/**
- * tomoyo_memory_free - Free memory for elements.
- *
- * @ptr: Pointer to allocated memory.
- */
-void tomoyo_memory_free(void *ptr)
-{
- size_t s = ksize(ptr);
- spin_lock(&tomoyo_policy_memory_lock);
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
- spin_unlock(&tomoyo_policy_memory_lock);
- kfree(ptr);
-}
-
-/**
* tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
*
* @param: Pointer to "struct tomoyo_acl_param".
@@ -123,7 +106,8 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
goto out;
list = &param->ns->group_list[idx];
list_for_each_entry(group, list, head.list) {
- if (e.group_name != group->group_name)
+ if (e.group_name != group->group_name ||
+ atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&group->head.users);
found = true;
@@ -175,7 +159,8 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return NULL;
list_for_each_entry(ptr, head, head.list) {
- if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&ptr->head.users);
goto out;
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
new file mode 100644
index 00000000000..97527710a72
--- /dev/null
+++ b/security/tomoyo/network.c
@@ -0,0 +1,771 @@
+/*
+ * security/tomoyo/network.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* Structure for holding inet domain socket's address. */
+struct tomoyo_inet_addr_info {
+ __be16 port; /* In network byte order. */
+ const __be32 *address; /* In network byte order. */
+ bool is_ipv6;
+};
+
+/* Structure for holding unix domain socket's address. */
+struct tomoyo_unix_addr_info {
+ u8 *addr; /* This may not be '\0' terminated string. */
+ unsigned int addr_len;
+};
+
+/* Structure for holding socket address. */
+struct tomoyo_addr_info {
+ u8 protocol;
+ u8 operation;
+ struct tomoyo_inet_addr_info inet;
+ struct tomoyo_unix_addr_info unix0;
+};
+
+/* String table for socket's protocols. */
+const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
+ [SOCK_STREAM] = "stream",
+ [SOCK_DGRAM] = "dgram",
+ [SOCK_RAW] = "raw",
+ [SOCK_SEQPACKET] = "seqpacket",
+ [0] = " ", /* Dummy for avoiding NULL pointer dereference. */
+ [4] = " ", /* Dummy for avoiding NULL pointer dereference. */
+};
+
+/**
+ * tomoyo_parse_ipaddr_union - Parse an IP address.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr)
+{
+ u8 * const min = ptr->ip[0].in6_u.u6_addr8;
+ u8 * const max = ptr->ip[1].in6_u.u6_addr8;
+ char *address = tomoyo_read_token(param);
+ const char *end;
+
+ if (!strchr(address, ':') &&
+ in4_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = false;
+ if (!*end)
+ ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
+ else if (*end++ != '-' ||
+ in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ if (in6_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = true;
+ if (!*end)
+ memmove(max, min, sizeof(u16) * 8);
+ else if (*end++ != '-' ||
+ in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_print_ipv4 - Print an IPv4 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to __be32.
+ * @max_ip: Pointer to __be32.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
+ const __be32 *min_ip, const __be32 *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
+ *min_ip == *max_ip ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ipv6 - Print an IPv6 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to "struct in6_addr".
+ * @max_ip: Pointer to "struct in6_addr".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
+ const struct in6_addr *min_ip,
+ const struct in6_addr *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
+ !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ip - Print an IP address.
+ *
+ * @buf: Buffer to write to.
+ * @size: Size of @buf.
+ * @ptr: Pointer to "struct ipaddr_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr)
+{
+ if (ptr->is_ipv6)
+ tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
+ else
+ tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
+ &ptr->ip[1].s6_addr32[0]);
+}
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for inet domain socket.
+ */
+static const u8 tomoyo_inet2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ },
+ [SOCK_RAW] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ },
+};
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for unix domain socket.
+ */
+static const u8 tomoyo_unix2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ },
+ [SOCK_SEQPACKET] = {
+ [TOMOYO_NETWORK_BIND] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ },
+};
+
+/**
+ * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
+ tomoyo_same_number_union(&p1->port, &p2->port);
+}
+
+/**
+ * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_inet_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_unix_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
+ int error = -EINVAL;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (param->data[0] == '@') {
+ param->data++;
+ e.address.group =
+ tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
+ if (!e.address.group)
+ return -ENOMEM;
+ } else {
+ if (!tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ }
+ if (!tomoyo_parse_number_union(param, &e.port) ||
+ e.port.values[1] > 65535)
+ goto out;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_inet_acl,
+ tomoyo_merge_inet_acl);
+out:
+ tomoyo_put_group(e.address.group);
+ tomoyo_put_number_union(&e.port);
+ return error;
+}
+
+/**
+ * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
+ int error;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (!tomoyo_parse_name_union(param, &e.name))
+ return -EINVAL;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_unix_acl,
+ tomoyo_merge_unix_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
+}
+
+/**
+ * tomoyo_audit_net_log - Audit network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @family: Name of socket family ("inet" or "unix").
+ * @protocol: Name of protocol in @family.
+ * @operation: Name of socket operation.
+ * @address: Name of address.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
+ const char *family, const u8 protocol,
+ const u8 operation, const char *address)
+{
+ return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
+ tomoyo_proto_keyword[protocol],
+ tomoyo_socket_keyword[operation], address);
+}
+
+/**
+ * tomoyo_audit_inet_log - Audit INET network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
+{
+ char buf[128];
+ int len;
+ const __be32 *address = r->param.inet_network.address;
+
+ if (r->param.inet_network.is_ipv6)
+ tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
+ address, (const struct in6_addr *) address);
+ else
+ tomoyo_print_ipv4(buf, sizeof(buf), address, address);
+ len = strlen(buf);
+ snprintf(buf + len, sizeof(buf) - len, " %u",
+ r->param.inet_network.port);
+ return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
+ r->param.inet_network.operation, buf);
+}
+
+/**
+ * tomoyo_audit_unix_log - Audit UNIX network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
+ r->param.unix_network.operation,
+ r->param.unix_network.address->name);
+}
+
+/**
+ * tomoyo_check_inet_acl - Check permission for inet domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_inet_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
+
+ if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
+ !tomoyo_compare_number_union(r->param.inet_network.port,
+ &acl->port))
+ return false;
+ if (acl->address.group)
+ return tomoyo_address_matches_group
+ (r->param.inet_network.is_ipv6,
+ r->param.inet_network.address, acl->address.group);
+ return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
+ memcmp(&acl->address.ip[0],
+ r->param.inet_network.address, size) <= 0 &&
+ memcmp(r->param.inet_network.address,
+ &acl->address.ip[1], size) <= 0;
+}
+
+/**
+ * tomoyo_check_unix_acl - Check permission for unix domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_unix_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.unix_network.operation)) &&
+ tomoyo_compare_name_union(r->param.unix_network.address,
+ &acl->name);
+}
+
+/**
+ * tomoyo_inet_entry - Check permission for INET network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ r.param_type = TOMOYO_TYPE_INET_ACL;
+ r.param.inet_network.protocol = address->protocol;
+ r.param.inet_network.operation = address->operation;
+ r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
+ r.param.inet_network.address = address->inet.address;
+ r.param.inet_network.port = ntohs(address->inet.port);
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_inet_acl);
+ error = tomoyo_audit_inet_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_inet_address - Check permission for inet domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @port: Port number.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_inet_address(const struct sockaddr *addr,
+ const unsigned int addr_len,
+ const u16 port,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_inet_addr_info *i = &address->inet;
+
+ switch (addr->sa_family) {
+ case AF_INET6:
+ if (addr_len < SIN6_LEN_RFC2133)
+ goto skip;
+ i->is_ipv6 = true;
+ i->address = (__be32 *)
+ ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
+ i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
+ break;
+ case AF_INET:
+ if (addr_len < sizeof(struct sockaddr_in))
+ goto skip;
+ i->is_ipv6 = false;
+ i->address = (__be32 *)
+ &((struct sockaddr_in *) addr)->sin_addr;
+ i->port = ((struct sockaddr_in *) addr)->sin_port;
+ break;
+ default:
+ goto skip;
+ }
+ if (address->protocol == SOCK_RAW)
+ i->port = htons(port);
+ return tomoyo_inet_entry(address);
+skip:
+ return 0;
+}
+
+/**
+ * tomoyo_unix_entry - Check permission for UNIX network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ char *buf = address->unix0.addr;
+ int len = address->unix0.addr_len - sizeof(sa_family_t);
+
+ if (len <= 0) {
+ buf = "anonymous";
+ len = 9;
+ } else if (buf[0]) {
+ len = strnlen(buf, len);
+ }
+ buf = tomoyo_encode2(buf, len);
+ if (buf) {
+ struct tomoyo_path_info addr;
+
+ addr.name = buf;
+ tomoyo_fill_path_info(&addr);
+ r.param_type = TOMOYO_TYPE_UNIX_ACL;
+ r.param.unix_network.protocol = address->protocol;
+ r.param.unix_network.operation = address->operation;
+ r.param.unix_network.address = &addr;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_unix_acl);
+ error = tomoyo_audit_unix_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf);
+ } else
+ error = -ENOMEM;
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_unix_address - Check permission for unix domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_unix_address(struct sockaddr *addr,
+ const unsigned int addr_len,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_unix_addr_info *u = &address->unix0;
+
+ if (addr->sa_family != AF_UNIX)
+ return 0;
+ u->addr = ((struct sockaddr_un *) addr)->sun_path;
+ u->addr_len = addr_len;
+ return tomoyo_unix_entry(address);
+}
+
+/**
+ * tomoyo_kernel_service - Check whether I'm kernel service or not.
+ *
+ * Returns true if I'm kernel service, false otherwise.
+ */
+static bool tomoyo_kernel_service(void)
+{
+ /* Nothing to do if I am a kernel service. */
+ return segment_eq(get_fs(), KERNEL_DS);
+}
+
+/**
+ * tomoyo_sock_family - Get socket's family.
+ *
+ * @sk: Pointer to "struct sock".
+ *
+ * Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
+ */
+static u8 tomoyo_sock_family(struct sock *sk)
+{
+ u8 family;
+
+ if (tomoyo_kernel_service())
+ return 0;
+ family = sk->sk_family;
+ switch (family) {
+ case PF_INET:
+ case PF_INET6:
+ case PF_UNIX:
+ return family;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * tomoyo_socket_listen_permission - Check permission for listening a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_listen_permission(struct socket *sock)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+ struct sockaddr_storage addr;
+ int addr_len;
+
+ if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
+ return 0;
+ {
+ const int error = sock->ops->getname(sock, (struct sockaddr *)
+ &addr, &addr_len, 0);
+
+ if (error)
+ return error;
+ }
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_LISTEN;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *) &addr,
+ addr_len, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
+ 0, &address);
+}
+
+/**
+ * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ address.protocol = type;
+ switch (type) {
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ address.operation = TOMOYO_NETWORK_SEND;
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ address.operation = TOMOYO_NETWORK_CONNECT;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ case SOCK_SEQPACKET:
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_BIND;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Unused.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!msg->msg_name || !family ||
+ (type != SOCK_DGRAM && type != SOCK_RAW))
+ return 0;
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_SEND;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *)
+ msg->msg_name,
+ msg->msg_namelen, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
+ msg->msg_namelen,
+ sock->sk->sk_protocol, &address);
+}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 6c601bd300f..738bbdf8d4c 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -15,17 +15,19 @@
#include "../../fs/internal.h"
/**
- * tomoyo_encode: Convert binary string to ascii string.
+ * tomoyo_encode2 - Encode binary string to ascii string.
*
- * @str: String in binary format.
+ * @str: String in binary format.
+ * @str_len: Size of @str in byte.
*
* Returns pointer to @str in ascii format on success, NULL otherwise.
*
* This function uses kzalloc(), so caller must kfree() if this function
* didn't return NULL.
*/
-char *tomoyo_encode(const char *str)
+char *tomoyo_encode2(const char *str, int str_len)
{
+ int i;
int len = 0;
const char *p = str;
char *cp;
@@ -33,8 +35,9 @@ char *tomoyo_encode(const char *str)
if (!p)
return NULL;
- while (*p) {
- const unsigned char c = *p++;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
+
if (c == '\\')
len += 2;
else if (c > ' ' && c < 127)
@@ -49,8 +52,8 @@ char *tomoyo_encode(const char *str)
return NULL;
cp0 = cp;
p = str;
- while (*p) {
- const unsigned char c = *p++;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
if (c == '\\') {
*cp++ = '\\';
@@ -68,6 +71,21 @@ char *tomoyo_encode(const char *str)
}
/**
+ * tomoyo_encode - Encode binary string to ascii string.
+ *
+ * @str: String in binary format.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode(const char *str)
+{
+ return str ? tomoyo_encode2(str, strlen(str)) : NULL;
+}
+
+/**
* tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index a49c3bfd4dd..2672ac4f3be 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -8,6 +8,124 @@
#include "common.h"
/**
+ * tomoyo_check_task_acl - Check permission for task operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+ return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
+}
+
+/**
+ * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname to transit to.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ *
+ * If domain transition was permitted but the domain transition failed, this
+ * function returns error rather than terminating current thread with SIGKILL.
+ */
+static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int error;
+ if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
+ return -ENOMEM;
+ data = kzalloc(count + 1, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, buf, count)) {
+ error = -EFAULT;
+ goto out;
+ }
+ tomoyo_normalize_line(data);
+ if (tomoyo_correct_domain(data)) {
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_path_info name;
+ struct tomoyo_request_info r;
+ name.name = data;
+ tomoyo_fill_path_info(&name);
+ /* Check "task manual_domain_transition" permission. */
+ tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
+ r.param.task.domainname = &name;
+ tomoyo_check_acl(&r, tomoyo_check_task_acl);
+ if (!r.granted)
+ error = -EPERM;
+ else {
+ struct tomoyo_domain_info *new_domain =
+ tomoyo_assign_domain(data, true);
+ if (!new_domain) {
+ error = -ENOENT;
+ } else {
+ struct cred *cred = prepare_creds();
+ if (!cred) {
+ error = -ENOMEM;
+ } else {
+ struct tomoyo_domain_info *old_domain =
+ cred->security;
+ cred->security = new_domain;
+ atomic_inc(&new_domain->users);
+ atomic_dec(&old_domain->users);
+ commit_creds(cred);
+ error = 0;
+ }
+ }
+ }
+ tomoyo_read_unlock(idx);
+ } else
+ error = -EINVAL;
+out:
+ kfree(data);
+ return error ? error : count;
+}
+
+/**
+ * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname which current thread belongs to.
+ * @count: Size of @buf.
+ * @ppos: Bytes read by now.
+ *
+ * Returns read size on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *domain = tomoyo_domain()->domainname->name;
+ loff_t len = strlen(domain);
+ loff_t pos = *ppos;
+ if (pos >= len || !count)
+ return 0;
+ len -= pos;
+ if (count < len)
+ len = count;
+ if (copy_to_user(buf, domain + pos, len))
+ return -EFAULT;
+ *ppos += len;
+ return len;
+}
+
+/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
+static const struct file_operations tomoyo_self_operations = {
+ .write = tomoyo_write_self,
+ .read = tomoyo_read_self,
+};
+
+/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
@@ -135,8 +253,6 @@ static int __init tomoyo_initerface_init(void)
TOMOYO_EXCEPTIONPOLICY);
tomoyo_create_entry("audit", 0400, tomoyo_dir,
TOMOYO_AUDIT);
- tomoyo_create_entry("self_domain", 0400, tomoyo_dir,
- TOMOYO_SELFDOMAIN);
tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
TOMOYO_PROCESS_STATUS);
tomoyo_create_entry("stat", 0644, tomoyo_dir,
@@ -147,6 +263,9 @@ static int __init tomoyo_initerface_init(void)
TOMOYO_MANAGER);
tomoyo_create_entry("version", 0400, tomoyo_dir,
TOMOYO_VERSION);
+ securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
+ &tomoyo_self_operations);
+ tomoyo_load_builtin_policy();
return 0;
}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index f776400a8f3..4b327b69174 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -442,6 +442,64 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
}
+/**
+ * tomoyo_socket_listen - Check permission for listen().
+ *
+ * @sock: Pointer to "struct socket".
+ * @backlog: Backlog parameter.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return tomoyo_socket_listen_permission(sock);
+}
+
+/**
+ * tomoyo_socket_connect - Check permission for connect().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_connect_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_bind - Check permission for bind().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_bind_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_sendmsg - Check permission for sendmsg().
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Size of message.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ return tomoyo_socket_sendmsg_permission(sock, msg, size);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -472,6 +530,10 @@ static struct security_operations tomoyo_security_ops = {
.sb_mount = tomoyo_sb_mount,
.sb_umount = tomoyo_sb_umount,
.sb_pivotroot = tomoyo_sb_pivotroot,
+ .socket_bind = tomoyo_socket_bind,
+ .socket_connect = tomoyo_socket_connect,
+ .socket_listen = tomoyo_socket_listen,
+ .socket_sendmsg = tomoyo_socket_sendmsg,
};
/* Lock for GC. */
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index c36bd1107fc..4a9b4b2eb75 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -42,6 +42,39 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
[TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC,
};
/**
@@ -126,6 +159,31 @@ char *tomoyo_read_token(struct tomoyo_acl_param *param)
}
/**
+ * tomoyo_get_domainname - Read a domainname from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a domainname on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param)
+{
+ char *start = param->data;
+ char *pos = start;
+ while (*pos) {
+ if (*pos++ != ' ' || *pos++ == '/')
+ continue;
+ pos -= 2;
+ *pos++ = '\0';
+ break;
+ }
+ param->data = pos;
+ if (tomoyo_correct_domain(start))
+ return tomoyo_get_name(start);
+ return NULL;
+}
+
+/**
* tomoyo_parse_ulong - Parse an "unsigned long" value.
*
* @result: Pointer to "unsigned long".
@@ -920,14 +978,17 @@ int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index)
{
u8 mode;
- const u8 category = TOMOYO_MAC_CATEGORY_FILE;
+ struct tomoyo_profile *p;
+
if (!tomoyo_policy_loaded)
return TOMOYO_CONFIG_DISABLED;
- mode = tomoyo_profile(ns, profile)->config[index];
+ p = tomoyo_profile(ns, profile);
+ mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
- mode = tomoyo_profile(ns, profile)->config[category];
+ mode = p->config[tomoyo_index2category[index]
+ + TOMOYO_MAX_MAC_INDEX];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
- mode = tomoyo_profile(ns, profile)->default_config;
+ mode = p->default_config;
return mode & 3;
}
@@ -996,6 +1057,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
perm = container_of(ptr, struct tomoyo_mkdev_acl,
head)->perm;
break;
+ case TOMOYO_TYPE_INET_ACL:
+ perm = container_of(ptr, struct tomoyo_inet_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ perm = container_of(ptr, struct tomoyo_unix_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ perm = 0;
+ break;
default:
perm = 1;
}
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 16bd9c03679..69156923843 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -176,7 +176,7 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
* Calls the memory-allocator function for the corresponding
* buffer type.
*
- * Returns zero if the buffer with the given size is allocated successfuly,
+ * Returns zero if the buffer with the given size is allocated successfully,
* other a negative value at error.
*/
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
@@ -230,7 +230,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
* tries to allocate again. The size actually allocated is stored in
* res_size argument.
*
- * Returns zero if the buffer with the given size is allocated successfuly,
+ * Returns zero if the buffer with the given size is allocated successfully,
* other a negative value at error.
*/
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 1c6be91dfb9..c74e228731e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -23,7 +23,7 @@
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/uio.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 6c93e051f9a..6c9e8e8f45f 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -432,9 +432,7 @@ config SOUND_SB
ALS-007 and ALS-1X0 chips (read <file:Documentation/sound/oss/ALS>) and
for cards based on ESS chips (read
<file:Documentation/sound/oss/ESS1868> and
- <file:Documentation/sound/oss/ESS>). If you have an SB AWE 32 or SB AWE
- 64, say Y here and also to "AWE32 synth" below and read
- <file:Documentation/sound/oss/INSTALL.awe>. If you have an IBM Mwave
+ <file:Documentation/sound/oss/ESS>). If you have an IBM Mwave
card, say Y here and read <file:Documentation/sound/oss/mwave>.
If you compile the driver into the kernel and don't want to use
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e9a2a8795d1..191284a1c0a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2370,6 +2370,7 @@ static int azx_dev_free(struct snd_device *device)
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 7696d05b935..76752d8ea73 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3110,6 +3110,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 6bbf001f659..9e11a14d1b4 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -29,7 +29,6 @@ choice
prompt "DM365 codec select"
depends on SND_DAVINCI_SOC_EVM
depends on MACH_DAVINCI_DM365_EVM
- default SND_DM365_EXTERNAL_CODEC
config SND_DM365_AIC3X_CODEC
bool "Audio Codec - AIC3101"
diff --git a/sound/usb/card.c b/sound/usb/card.c
index d8f2bf40145..3068f043099 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -631,7 +631,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
if (chip == (void *)-1L)
return 0;
- if (!(message.event & PM_EVENT_AUTO)) {
+ if (!PMSG_IS_AUTO(message)) {
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (!chip->num_suspended_intf++) {
list_for_each(p, &chip->pcm_list) {
diff --git a/drivers/staging/hv/tools/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index a4a407f7052..11224eddcdc 100644
--- a/drivers/staging/hv/tools/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -116,7 +116,16 @@ void kvp_get_os_info(void)
uname(&uts_buf);
os_build = uts_buf.release;
- processor_arch= uts_buf.machine;
+ processor_arch = uts_buf.machine;
+
+ /*
+ * The current windows host (win7) expects the build
+ * string to be of the form: x.y.z
+ * Strip additional information we may have.
+ */
+ p = strchr(os_build, '-');
+ if (p)
+ *p = '\0';
file = fopen("/etc/SuSE-release", "r");
if (file != NULL)
@@ -264,22 +273,20 @@ static int
kvp_get_domain_name(char *buffer, int length)
{
struct addrinfo hints, *info ;
- gethostname(buffer, length);
int error = 0;
+ gethostname(buffer, length);
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_INET; /*Get only ipv4 addrinfo. */
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_CANONNAME;
- error = getaddrinfo(buffer, "http", &hints, &info);
+ error = getaddrinfo(buffer, NULL, &hints, &info);
if (error != 0) {
strcpy(buffer, "getaddrinfo failed\n");
- error = 1;
- goto get_domain_done;
+ return error;
}
strcpy(buffer, info->ai_canonname);
-get_domain_done:
freeaddrinfo(info);
return error;
}
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index fe02903f7d0..80d9598db31 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -341,7 +341,7 @@ const char *perf_config_dirname(const char *name, const char *value)
static int perf_default_core_config(const char *var __used, const char *value __used)
{
- /* Add other config variables here and to Documentation/config.txt. */
+ /* Add other config variables here. */
return 0;
}
@@ -350,7 +350,7 @@ int perf_default_config(const char *var, const char *value, void *dummy __used)
if (!prefixcmp(var, "core."))
return perf_default_core_config(var, value);
- /* Add other config variables here and to Documentation/config.txt. */
+ /* Add other config variables here. */
return 0;
}
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 4e9eaeb518c..eaf3a50f976 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -205,6 +205,8 @@ static void kvm_free_assigned_device(struct kvm *kvm,
else
pci_restore_state(assigned_dev->dev);
+ assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+
pci_release_regions(assigned_dev->dev);
pci_disable_device(assigned_dev->dev);
pci_dev_put(assigned_dev->dev);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 78c80f67f53..967aba133a6 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -187,6 +187,8 @@ int kvm_assign_device(struct kvm *kvm,
goto out_unmap;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+
printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
assigned_dev->host_segnr,
assigned_dev->host_busnr,
@@ -215,6 +217,8 @@ int kvm_deassign_device(struct kvm *kvm,
iommu_detach_device(domain, &pdev->dev);
+ pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+
printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
assigned_dev->host_segnr,
assigned_dev->host_busnr,